summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore5
-rw-r--r--.mailmap1
-rw-r--r--Documentation/00-INDEX28
-rw-r--r--Documentation/ABI/stable/sysfs-class-tpm22
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uvc265
-rw-r--r--Documentation/ABI/testing/sysfs-bus-amba20
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-events6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x722
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio200
-rw-r--r--Documentation/ABI/testing/sysfs-class-cxl67
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei15
-rw-r--r--Documentation/ABI/testing/sysfs-class-power42
-rw-r--r--Documentation/ABI/testing/sysfs-driver-input-axp-pek11
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-livepatch44
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dell-laptop60
-rw-r--r--Documentation/Changes6
-rw-r--r--Documentation/CodingStyle1
-rw-r--r--Documentation/DocBook/80211.tmpl5
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/crypto-API.tmpl2
-rw-r--r--Documentation/DocBook/drm.tmpl126
-rw-r--r--Documentation/DocBook/kgdb.tmpl81
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml11
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10.xml2
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml2
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml2
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml99
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb12.xml2
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml1
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml8
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml8
-rw-r--r--Documentation/DocBook/uio-howto.tmpl14
-rw-r--r--Documentation/RCU/stallwarn.txt16
-rw-r--r--Documentation/RCU/trace.txt32
-rw-r--r--Documentation/SubmittingPatches452
-rw-r--r--Documentation/acpi/enumeration.txt2
-rw-r--r--Documentation/arm/00-INDEX12
-rw-r--r--Documentation/arm/Samsung-S3C24XX/DMA.txt46
-rw-r--r--Documentation/arm64/legacy_instructions.txt12
-rw-r--r--Documentation/blackfin/Makefile2
-rw-r--r--Documentation/cachetlb.txt8
-rw-r--r--Documentation/cgroups/00-INDEX2
-rw-r--r--Documentation/cgroups/unified-hierarchy.txt79
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/arm-boards2
-rw-r--r--Documentation/devicetree/bindings/arm/brcm-brcmstb.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/fw-cfg.txt72
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt10
-rw-r--r--Documentation/devicetree/bindings/arm/msm/timer.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/sprd.txt11
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/versatile-sysreg.txt10
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt9
-rw-r--r--Documentation/devicetree/bindings/ata/cavium-compact-flash.txt2
-rw-r--r--Documentation/devicetree/bindings/ata/tegra-sata.txt4
-rw-r--r--Documentation/devicetree/bindings/c6x/dscr.txt2
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt110
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/atmel/hlcdc-dc.txt53
-rw-r--r--Documentation/devicetree/bindings/drm/bridge/dw_hdmi.txt50
-rw-r--r--Documentation/devicetree/bindings/drm/msm/hdmi.txt2
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-socfpga-fpga-mgr.txt17
-rw-r--r--Documentation/devicetree/bindings/fuse/nvidia,tegra20-fuse.txt10
-rw-r--r--Documentation/devicetree/bindings/gpio/fujitsu,mb86s70-gpio.txt20
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-max732x.txt59
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-sx150x.txt40
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt32
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt5
-rw-r--r--Documentation/devicetree/bindings/gpio/mrvl-gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt8
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt29
-rw-r--r--Documentation/devicetree/bindings/graph.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-st.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt10
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/cc10001_adc.txt22
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt129
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/sensorhub.txt25
-rw-r--r--Documentation/devicetree/bindings/input/e3x0-button.txt25
-rw-r--r--Documentation/devicetree/bindings/input/regulator-haptic.txt21
-rw-r--r--Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt62
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sun4i.txt4
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt15
-rw-r--r--Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt17
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/digicolor-ic.txt21
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt5
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.txt28
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt41
-rw-r--r--Documentation/devicetree/bindings/leds/common.txt30
-rw-r--r--Documentation/devicetree/bindings/mailbox/altera-mailbox.txt49
-rw-r--r--Documentation/devicetree/bindings/media/atmel-isi.txt2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/nokia,smia.txt63
-rw-r--r--Documentation/devicetree/bindings/media/sunxi-ir.txt4
-rw-r--r--Documentation/devicetree/bindings/media/ti-am437x-vpfe.txt61
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.txt5
-rw-r--r--Documentation/devicetree/bindings/mfd/max77686.txt14
-rw-r--r--Documentation/devicetree/bindings/mfd/max77693.txt45
-rw-r--r--Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt9
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-pwrseq-emmc.txt25
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt25
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt62
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt6
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt30
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-pxa.txt15
-rw-r--r--Documentation/devicetree/bindings/mtd/fsmc-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe-phy.txt21
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-systemport.txt2
-rw-r--r--Documentation/devicetree/bindings/net/davicom-dm9000.txt4
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt3
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt11
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt88
-rw-r--r--Documentation/devicetree/bindings/net/keystone-netcp.txt197
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st21nfca.txt11
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st21nfcb.txt4
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.txt68
-rw-r--r--Documentation/devicetree/bindings/net/sti-dwmac.txt14
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt30
-rw-r--r--Documentation/devicetree/bindings/panel/avic,tm070ddh03.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/giantplus,gpg482739qs5.txt7
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt8
-rw-r--r--Documentation/devicetree/bindings/pci/versatile.txt59
-rw-r--r--Documentation/devicetree/bindings/phy/phy-miphy28lp.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/phy-miphy365x.txt15
-rw-r--r--Documentation/devicetree/bindings/phy/phy-stih407-usb.txt10
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt37
-rw-r--r--Documentation/devicetree/bindings/phy/samsung-phy.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt186
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt12
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt35
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.txt104
-rw-r--r--Documentation/devicetree/bindings/power/ltc2941.txt27
-rw-r--r--Documentation/devicetree/bindings/power/reset/ltc2952-poweroff.txt13
-rw-r--r--Documentation/devicetree/bindings/power/rockchip-io-domain.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/fman.txt70
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/lbc.txt18
-rw-r--r--Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt7
-rw-r--r--Documentation/devicetree/bindings/regulator/da9211.txt7
-rw-r--r--Documentation/devicetree/bindings/regulator/isl9305.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6397-regulator.txt217
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt94
-rw-r--r--Documentation/devicetree/bindings/rtc/armada-380-rtc.txt22
-rw-r--r--Documentation/devicetree/bindings/rtc/isil,isl12057.txt78
-rw-r--r--Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt4
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt16
-rw-r--r--Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt36
-rw-r--r--Documentation/devicetree/bindings/serial/digicolor-usart.txt27
-rw-r--r--Documentation/devicetree/bindings/serial/of-serial.txt5
-rw-r--r--Documentation/devicetree/bindings/serial/sirf-uart.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/sprd-uart.txt7
-rw-r--r--Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt23
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/bman.txt12
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/qman.txt14
-rw-r--r--Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/designware-i2s.txt31
-rw-r--r--Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/max98357a.txt14
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.txt67
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-ahub.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/pcm512x.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/st,sta32x.txt92
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic3x.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/ts3a227e.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/wm8904.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt16
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sirf.txt41
-rw-r--r--Documentation/devicetree/bindings/spi/spi-st-ssc.txt40
-rw-r--r--Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt4
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt23
-rw-r--r--Documentation/devicetree/bindings/thermal/tegra-soctherm.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/digicolor-timer.txt18
-rw-r--r--Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt18
-rw-r--r--Documentation/devicetree/bindings/unittest.txt59
-rw-r--r--Documentation/devicetree/bindings/usb/atmel-usb.txt5
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt5
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt5
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ehci.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt10
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt22
-rw-r--r--Documentation/devicetree/bindings/video/bridge/ps8622.txt31
-rw-r--r--Documentation/devicetree/bindings/video/bridge/ptn3460.txt (renamed from Documentation/devicetree/bindings/drm/bridge/ptn3460.txt)16
-rw-r--r--Documentation/devicetree/bindings/video/dw_hdmi-rockchip.txt46
-rw-r--r--Documentation/devicetree/bindings/video/exynos7-decon.txt68
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dp.txt12
-rw-r--r--Documentation/devicetree/bindings/video/exynos_mixer.txt1
-rw-r--r--Documentation/devicetree/bindings/video/renesas,du.txt4
-rw-r--r--Documentation/devicetree/bindings/video/ti,dra7-dss.txt69
-rw-r--r--Documentation/devicetree/bindings/video/ti,opa362.txt38
-rw-r--r--Documentation/devicetree/overlay-notes.txt4
-rw-r--r--Documentation/dmaengine/00-INDEX8
-rw-r--r--Documentation/driver-model/bus.txt2
-rw-r--r--Documentation/driver-model/devres.txt2
-rw-r--r--Documentation/filesystems/00-INDEX5
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/dax.txt94
-rw-r--r--Documentation/filesystems/ext2.txt5
-rw-r--r--Documentation/filesystems/ext4.txt4
-rw-r--r--Documentation/filesystems/f2fs.txt6
-rw-r--r--Documentation/filesystems/fiemap.txt3
-rw-r--r--Documentation/filesystems/inotify.txt197
-rw-r--r--Documentation/filesystems/nfs/nfs41-server.txt23
-rw-r--r--Documentation/filesystems/nfs/pnfs-block-server.txt37
-rw-r--r--Documentation/filesystems/nfs/pnfs.txt13
-rw-r--r--Documentation/filesystems/ocfs2.txt4
-rw-r--r--Documentation/filesystems/proc.txt62
-rw-r--r--Documentation/filesystems/seq_file.txt12
-rw-r--r--Documentation/filesystems/vfs.txt7
-rw-r--r--Documentation/filesystems/xfs.txt22
-rw-r--r--Documentation/filesystems/xip.txt68
-rw-r--r--Documentation/futex-requeue-pi.txt8
-rw-r--r--Documentation/gpio/board.txt2
-rw-r--r--Documentation/hwmon/ina2xx23
-rw-r--r--Documentation/kasan.txt170
-rw-r--r--Documentation/kernel-parameters.txt30
-rw-r--r--Documentation/kprobes.txt3
-rw-r--r--Documentation/locking/00-INDEX16
-rw-r--r--Documentation/locking/lockdep-design.txt2
-rw-r--r--Documentation/locking/lockstat.txt5
-rw-r--r--Documentation/memory-barriers.txt46
-rw-r--r--Documentation/misc-devices/mei/mei-client-bus.txt17
-rw-r--r--Documentation/misc-devices/mei/mei.txt126
-rw-r--r--Documentation/networking/00-INDEX8
-rw-r--r--Documentation/networking/can.txt2
-rw-r--r--Documentation/networking/filter.txt4
-rw-r--r--Documentation/networking/ip-sysctl.txt31
-rw-r--r--Documentation/networking/netlink_mmap.txt13
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt3
-rw-r--r--Documentation/networking/openvswitch.txt13
-rw-r--r--Documentation/networking/timestamping.txt21
-rw-r--r--Documentation/networking/timestamping/txtimestamp.c38
-rw-r--r--Documentation/nommu-mmap.txt8
-rw-r--r--Documentation/oops-tracing.txt2
-rw-r--r--Documentation/power/s2ram.txt4
-rw-r--r--Documentation/rfkill.txt3
-rw-r--r--Documentation/s390/Debugging390.txt493
-rw-r--r--Documentation/scheduler/completion.txt236
-rw-r--r--Documentation/security/keys.txt2
-rw-r--r--Documentation/stable_kernel_rules.txt44
-rw-r--r--Documentation/sysctl/kernel.txt1
-rw-r--r--Documentation/sysctl/net.txt8
-rw-r--r--Documentation/sysctl/vm.txt14
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py49
-rw-r--r--Documentation/thermal/cpu-cooling-api.txt15
-rw-r--r--Documentation/trace/coresight.txt4
-rw-r--r--Documentation/trace/ftrace.txt2
-rw-r--r--Documentation/usb/gadget-testing.txt728
-rw-r--r--Documentation/usb/gadget_serial.txt8
-rw-r--r--Documentation/usb/usbmon.txt4
-rw-r--r--Documentation/video4linux/CQcam.txt205
-rw-r--r--Documentation/video4linux/README.tlg230047
-rw-r--r--Documentation/video4linux/v4l2-framework.txt25
-rw-r--r--Documentation/video4linux/v4l2-pci-skeleton.c2
-rw-r--r--Documentation/video4linux/w9966.txt33
-rw-r--r--Documentation/virtual/kvm/api.txt35
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt37
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt59
-rw-r--r--Documentation/vm/cleancache.txt2
-rw-r--r--Documentation/vm/pagemap.txt8
-rw-r--r--Documentation/vm/remap_file_pages.txt7
-rw-r--r--Documentation/x86/entry_64.txt18
-rw-r--r--Documentation/x86/x86_64/kernel-stacks8
-rw-r--r--Documentation/x86/x86_64/mm.txt2
-rw-r--r--MAINTAINERS364
-rw-r--r--Makefile11
-rw-r--r--arch/alpha/include/asm/pgtable.h9
-rw-r--r--arch/alpha/include/asm/thread_info.h5
-rw-r--r--arch/alpha/kernel/pci.c8
-rw-r--r--arch/alpha/kernel/signal.c2
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/include/asm/pgtable.h18
-rw-r--r--arch/arc/include/asm/processor.h3
-rw-r--r--arch/arc/include/asm/serial.h23
-rw-r--r--arch/arc/include/asm/thread_info.h4
-rw-r--r--arch/arc/kernel/devtree.c24
-rw-r--r--arch/arc/kernel/entry.S14
-rw-r--r--arch/arc/kernel/setup.c7
-rw-r--r--arch/arc/kernel/signal.c2
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/Kconfig.debug101
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/compressed/head.S41
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts1
-rw-r--r--arch/arm/boot/dts/am3517.dtsi1
-rw-r--r--arch/arm/boot/dts/armada-370-netgear-rn102.dts1
-rw-r--r--arch/arm/boot/dts/armada-370-netgear-rn104.dts1
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi7
-rw-r--r--arch/arm/boot/dts/armada-xp-netgear-rn2120.dts1
-rw-r--r--arch/arm/boot/dts/berlin2q-marvell-dmp.dts2
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi63
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts10
-rw-r--r--arch/arm/boot/dts/dra7.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi13
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts1
-rw-r--r--arch/arm/boot/dts/exynos4210-smdkv310.dts1
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts1
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts1
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi9
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi31
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidu3.dts8
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidx2.dts8
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts1
-rw-r--r--arch/arm/boot/dts/exynos4412-smdk4412.dts1
-rw-r--r--arch/arm/boot/dts/exynos4412-tiny4412.dts4
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts1
-rw-r--r--arch/arm/boot/dts/exynos4x12.dtsi14
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts4
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi6
-rw-r--r--arch/arm/boot/dts/imx25.dtsi10
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts22
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts15
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/r8a7790-lager.dts6
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-evb-rk808.dts23
-rw-r--r--arch/arm/boot/dts/rk3288-evb.dtsi49
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi54
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts8
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi9
-rw-r--r--arch/arm/boot/dts/stih410.dtsi70
-rw-r--r--arch/arm/boot/dts/stih415.dtsi12
-rw-r--r--arch/arm/boot/dts/stih416.dtsi22
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi20
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts6
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi8
-rw-r--r--arch/arm/boot/dts/sun5i-a13-hsg-h702.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi9
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-hummingbird.dts8
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi8
-rw-r--r--arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a23.dtsi9
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts5
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi10
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts5
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts37
-rw-r--r--arch/arm/boot/dts/vf610-twr.dts15
-rw-r--r--arch/arm/boot/dts/zynq-parallella.dts2
-rw-r--r--arch/arm/configs/exynos_defconfig18
-rw-r--r--arch/arm/configs/iop32x_defconfig1
-rw-r--r--arch/arm/configs/iop33x_defconfig1
-rw-r--r--arch/arm/configs/ixp4xx_defconfig1
-rw-r--r--arch/arm/configs/lager_defconfig150
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/mv78xx0_defconfig1
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/configs/orion5x_defconfig1
-rw-r--r--arch/arm/configs/rpc_defconfig1
-rw-r--r--arch/arm/configs/shmobile_defconfig1
-rw-r--r--arch/arm/include/asm/bitrev.h20
-rw-r--r--arch/arm/include/asm/compiler.h15
-rw-r--r--arch/arm/include/asm/insn.h (renamed from arch/arm/kernel/insn.h)0
-rw-r--r--arch/arm/include/asm/kprobes.h33
-rw-r--r--arch/arm/include/asm/kvm_asm.h1
-rw-r--r--arch/arm/include/asm/kvm_emulate.h15
-rw-r--r--arch/arm/include/asm/kvm_host.h9
-rw-r--r--arch/arm/include/asm/kvm_mmio.h1
-rw-r--r--arch/arm/include/asm/kvm_mmu.h98
-rw-r--r--arch/arm/include/asm/mach/pci.h6
-rw-r--r--arch/arm/include/asm/outercache.h3
-rw-r--r--arch/arm/include/asm/patch.h (renamed from arch/arm/kernel/patch.h)0
-rw-r--r--arch/arm/include/asm/pci.h7
-rw-r--r--arch/arm/include/asm/pgtable-2level.h3
-rw-r--r--arch/arm/include/asm/pgtable-3level.h7
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h4
-rw-r--r--arch/arm/include/asm/pgtable.h20
-rw-r--r--arch/arm/include/asm/probes.h15
-rw-r--r--arch/arm/include/asm/thread_info.h4
-rw-r--r--arch/arm/include/asm/xen/page.h2
-rw-r--r--arch/arm/include/debug/ks8695.S (renamed from arch/arm/mach-ks8695/include/mach/debug-macro.S)10
-rw-r--r--arch/arm/include/debug/netx.S (renamed from arch/arm/mach-netx/include/mach/debug-macro.S)22
-rw-r--r--arch/arm/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm/kernel/Makefile16
-rw-r--r--arch/arm/kernel/bios32.c8
-rw-r--r--arch/arm/kernel/entry-armv.S3
-rw-r--r--arch/arm/kernel/entry-header.S13
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/ftrace.c3
-rw-r--r--arch/arm/kernel/head.S9
-rw-r--r--arch/arm/kernel/irq.c3
-rw-r--r--arch/arm/kernel/jump_label.c5
-rw-r--r--arch/arm/kernel/kgdb.c3
-rw-r--r--arch/arm/kernel/module.c2
-rw-r--r--arch/arm/kernel/patch.c3
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/setup.c7
-rw-r--r--arch/arm/kernel/signal.c4
-rw-r--r--arch/arm/kernel/suspend.c4
-rw-r--r--arch/arm/kvm/Kconfig3
-rw-r--r--arch/arm/kvm/Makefile1
-rw-r--r--arch/arm/kvm/arm.c68
-rw-r--r--arch/arm/kvm/coproc.c70
-rw-r--r--arch/arm/kvm/coproc.h6
-rw-r--r--arch/arm/kvm/coproc_a15.c2
-rw-r--r--arch/arm/kvm/coproc_a7.c2
-rw-r--r--arch/arm/kvm/handle_exit.c8
-rw-r--r--arch/arm/kvm/interrupts.S11
-rw-r--r--arch/arm/kvm/mmu.c435
-rw-r--r--arch/arm/kvm/psci.c17
-rw-r--r--arch/arm/kvm/trace.h50
-rw-r--r--arch/arm/lib/Makefile15
-rw-r--r--arch/arm/lib/uaccess.S564
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c92
-rw-r--r--arch/arm/mach-davinci/Makefile2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c31
-rw-r--r--arch/arm/mach-davinci/cdce949.c295
-rw-r--r--arch/arm/mach-davinci/include/mach/cdce949.h19
-rw-r--r--arch/arm/mach-davinci/serial.c10
-rw-r--r--arch/arm/mach-exynos/exynos.c39
-rw-r--r--arch/arm/mach-exynos/firmware.c50
-rw-r--r--arch/arm/mach-exynos/include/mach/dma.h26
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h3
-rw-r--r--arch/arm/mach-exynos/pm.c3
-rw-r--r--arch/arm/mach-exynos/regs-sys.h22
-rw-r--r--arch/arm/mach-exynos/sleep.S46
-rw-r--r--arch/arm/mach-exynos/suspend.c7
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6sx.c3
-rw-r--r--arch/arm/mach-integrator/pci_v3.c62
-rw-r--r--arch/arm/mach-ks8695/pci.c77
-rw-r--r--arch/arm/mach-mvebu/coherency.c7
-rw-r--r--arch/arm/mach-omap1/include/mach/debug-macro.S101
-rw-r--r--arch/arm/mach-omap1/irq.c5
-rw-r--r--arch/arm/mach-omap1/timer32k.c5
-rw-r--r--arch/arm/mach-omap2/Kconfig12
-rw-r--r--arch/arm/mach-omap2/Makefile9
-rw-r--r--arch/arm/mach-omap2/am35xx-emac.c114
-rw-r--r--arch/arm/mach-omap2/am35xx-emac.h15
-rw-r--r--arch/arm/mach-omap2/am35xx.h46
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c150
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c373
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c335
-rw-r--r--arch/arm/mach-omap2/board-generic.c24
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c14
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c6
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_apll.c142
-rw-r--r--arch/arm/mach-omap2/clock.h1
-rw-r--r--arch/arm/mach-omap2/clock2xxx.h11
-rw-r--r--arch/arm/mach-omap2/cm2xxx.c10
-rw-r--r--arch/arm/mach-omap2/cm2xxx.h2
-rw-r--r--arch/arm/mach-omap2/cm33xx.c21
-rw-r--r--arch/arm/mach-omap2/common.h10
-rw-r--r--arch/arm/mach-omap2/control.h4
-rw-r--r--arch/arm/mach-omap2/dpll44xx.c20
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S21
-rw-r--r--arch/arm/mach-omap2/omap-pm-noop.c196
-rw-r--r--arch/arm/mach-omap2/omap-pm.h192
-rw-r--r--arch/arm/mach-omap2/omap-smp.c13
-rw-r--r--arch/arm/mach-omap2/omap4-common.c48
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c246
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c33
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_43xx_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c35
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-omap2/pmu.c5
-rw-r--r--arch/arm/mach-omap2/powerdomain.c82
-rw-r--r--arch/arm/mach-omap2/powerdomain.h5
-rw-r--r--arch/arm/mach-omap2/prcm-common.h1
-rw-r--r--arch/arm/mach-omap2/prm3xxx.h1
-rw-r--r--arch/arm/mach-omap2/prm44xx.c5
-rw-r--r--arch/arm/mach-omap2/prm44xx_54xx.h1
-rw-r--r--arch/arm/mach-omap2/prm_common.c14
-rw-r--r--arch/arm/mach-omap2/timer.c44
-rw-r--r--arch/arm/mach-omap2/twl-common.c7
-rw-r--r--arch/arm/mach-omap2/usb-musb.c12
-rw-r--r--arch/arm/mach-omap2/usb.h2
-rw-r--r--arch/arm/mach-omap2/voltage.c110
-rw-r--r--arch/arm/mach-omap2/voltage.h13
-rw-r--r--arch/arm/mach-prima2/common.h15
-rw-r--r--arch/arm/mach-pxa/Kconfig6
-rw-r--r--arch/arm/mach-pxa/corgi.c3
-rw-r--r--arch/arm/mach-pxa/devices.c2
-rw-r--r--arch/arm/mach-pxa/hx4700.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h10
-rw-r--r--arch/arm/mach-pxa/poodle.c2
-rw-r--r--arch/arm/mach-pxa/spitz.c2
-rw-r--r--arch/arm/mach-qcom/platsmp.c4
-rw-r--r--arch/arm/mach-realview/realview_eb.c3
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c2
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c2
-rw-r--r--arch/arm/mach-realview/realview_pba8.c2
-rw-r--r--arch/arm/mach-realview/realview_pbx.c2
-rw-r--r--arch/arm/mach-rockchip/Kconfig1
-rw-r--r--arch/arm/mach-rockchip/rockchip.c27
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig42
-rw-r--r--arch/arm/mach-s3c24xx/Makefile7
-rw-r--r--arch/arm/mach-s3c24xx/dma-s3c2410.c182
-rw-r--r--arch/arm/mach-s3c24xx/dma-s3c2412.c150
-rw-r--r--arch/arm/mach-s3c24xx/dma-s3c2440.c193
-rw-r--r--arch/arm/mach-s3c24xx/dma-s3c2443.c179
-rw-r--r--arch/arm/mach-s3c24xx/dma.c1465
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h159
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/dma.h15
-rw-r--r--arch/arm/mach-sa1100/Makefile2
-rw-r--r--arch/arm/mach-sa1100/assabet.c2
-rw-r--r--arch/arm/mach-sa1100/clock.c12
-rw-r--r--arch/arm/mach-sa1100/collie.c5
-rw-r--r--arch/arm/mach-sa1100/generic.c6
-rw-r--r--arch/arm/mach-sa1100/h3100.c2
-rw-r--r--arch/arm/mach-sa1100/h3600.c2
-rw-r--r--arch/arm/mach-sa1100/include/mach/irqs.h73
-rw-r--r--arch/arm/mach-sa1100/irq.c203
-rw-r--r--arch/arm/mach-sa1100/pci-nanoengine.c94
-rw-r--r--arch/arm/mach-sa1100/pm.c1
-rw-r--r--arch/arm/mach-sa1100/time.c139
-rw-r--r--arch/arm/mach-shmobile/Kconfig20
-rw-r--r--arch/arm/mach-shmobile/Makefile3
-rw-r--r--arch/arm/mach-shmobile/Makefile.boot1
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm.c20
-rw-r--r--arch/arm/mach-shmobile/board-lager-reference.c39
-rw-r--r--arch/arm/mach-shmobile/board-lager.c827
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7790.c459
-rw-r--r--arch/arm/mach-shmobile/r8a7790.h28
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c9
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c9
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c9
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7790.c284
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c2
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c5
-rw-r--r--arch/arm/mach-shmobile/timer.c12
-rw-r--r--arch/arm/mach-versatile/core.c37
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/cache-l2x0.c443
-rw-r--r--arch/arm/mm/context.c26
-rw-r--r--arch/arm/mm/dma-mapping.c56
-rw-r--r--arch/arm/mm/hugetlbpage.c6
-rw-r--r--arch/arm/mm/init.c5
-rw-r--r--arch/arm/mm/pgd.c4
-rw-r--r--arch/arm/mm/proc-macros.S2
-rw-r--r--arch/arm/plat-iop/pmu.c2
-rw-r--r--arch/arm/plat-omap/dma.c8
-rw-r--r--arch/arm/plat-samsung/Kconfig15
-rw-r--r--arch/arm/plat-samsung/Makefile6
-rw-r--r--arch/arm/plat-samsung/dma-ops.c146
-rw-r--r--arch/arm/plat-samsung/dma.c84
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-core.h22
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-ops.h69
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-pl330.h121
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-s3c24xx.h73
-rw-r--r--arch/arm/plat-samsung/include/plat/dma.h130
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-dma.h151
-rw-r--r--arch/arm/plat-samsung/s3c-dma-ops.c146
-rw-r--r--arch/arm/probes/Makefile7
-rw-r--r--arch/arm/probes/decode-arm.c (renamed from arch/arm/kernel/probes-arm.c)18
-rw-r--r--arch/arm/probes/decode-arm.h (renamed from arch/arm/kernel/probes-arm.h)9
-rw-r--r--arch/arm/probes/decode-thumb.c (renamed from arch/arm/kernel/probes-thumb.c)16
-rw-r--r--arch/arm/probes/decode-thumb.h (renamed from arch/arm/kernel/probes-thumb.h)10
-rw-r--r--arch/arm/probes/decode.c (renamed from arch/arm/kernel/probes.c)81
-rw-r--r--arch/arm/probes/decode.h (renamed from arch/arm/kernel/probes.h)13
-rw-r--r--arch/arm/probes/kprobes/Makefile12
-rw-r--r--arch/arm/probes/kprobes/actions-arm.c (renamed from arch/arm/kernel/kprobes-arm.c)11
-rw-r--r--arch/arm/probes/kprobes/actions-common.c (renamed from arch/arm/kernel/kprobes-common.c)4
-rw-r--r--arch/arm/probes/kprobes/actions-thumb.c (renamed from arch/arm/kernel/kprobes-thumb.c)10
-rw-r--r--arch/arm/probes/kprobes/checkers-arm.c192
-rw-r--r--arch/arm/probes/kprobes/checkers-common.c101
-rw-r--r--arch/arm/probes/kprobes/checkers-thumb.c110
-rw-r--r--arch/arm/probes/kprobes/checkers.h55
-rw-r--r--arch/arm/probes/kprobes/core.c (renamed from arch/arm/kernel/kprobes.c)49
-rw-r--r--arch/arm/probes/kprobes/core.h (renamed from arch/arm/kernel/kprobes.h)12
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c370
-rw-r--r--arch/arm/probes/kprobes/test-arm.c (renamed from arch/arm/kernel/kprobes-test-arm.c)40
-rw-r--r--arch/arm/probes/kprobes/test-core.c (renamed from arch/arm/kernel/kprobes-test.c)46
-rw-r--r--arch/arm/probes/kprobes/test-core.h (renamed from arch/arm/kernel/kprobes-test.h)35
-rw-r--r--arch/arm/probes/kprobes/test-thumb.c (renamed from arch/arm/kernel/kprobes-test-thumb.c)20
-rw-r--r--arch/arm/probes/uprobes/Makefile1
-rw-r--r--arch/arm/probes/uprobes/actions-arm.c (renamed from arch/arm/kernel/uprobes-arm.c)8
-rw-r--r--arch/arm/probes/uprobes/core.c (renamed from arch/arm/kernel/uprobes.c)8
-rw-r--r--arch/arm/probes/uprobes/core.h (renamed from arch/arm/kernel/uprobes.h)0
-rw-r--r--arch/arm/xen/enlighten.c4
-rw-r--r--arch/arm/xen/mm.c2
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/arm64/Kconfig20
-rw-r--r--arch/arm64/Kconfig.debug23
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/Makefile2
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts2
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/include/asm/bitrev.h19
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cachetype.h29
-rw-r--r--arch/arm64/include/asm/compat.h1
-rw-r--r--arch/arm64/include/asm/cpu_ops.h8
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/cpuidle.h6
-rw-r--r--arch/arm64/include/asm/cputype.h17
-rw-r--r--arch/arm64/include/asm/dma-mapping.h11
-rw-r--r--arch/arm64/include/asm/efi.h30
-rw-r--r--arch/arm64/include/asm/esr.h118
-rw-r--r--arch/arm64/include/asm/fixmap.h1
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h43
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/io.h5
-rw-r--r--arch/arm64/include/asm/kvm_arm.h73
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h50
-rw-r--r--arch/arm64/include/asm/kvm_host.h10
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h55
-rw-r--r--arch/arm64/include/asm/memory.h10
-rw-r--r--arch/arm64/include/asm/mmu.h5
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h32
-rw-r--r--arch/arm64/include/asm/ptrace.h7
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/syscalls.h30
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/unistd.h5
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/include/uapi/asm/ucontext.h (renamed from arch/arm64/include/asm/ucontext.h)8
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c205
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/cacheinfo.c128
-rw-r--r--arch/arm64/kernel/cpuidle.c20
-rw-r--r--arch/arm64/kernel/cpuinfo.c34
-rw-r--r--arch/arm64/kernel/efi-stub.c14
-rw-r--r--arch/arm64/kernel/efi.c356
-rw-r--r--arch/arm64/kernel/entry.S66
-rw-r--r--arch/arm64/kernel/entry32.S (renamed from arch/arm64/kernel/sys32.S)34
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/insn.c47
-rw-r--r--arch/arm64/kernel/module.c4
-rw-r--r--arch/arm64/kernel/pci.c22
-rw-r--r--arch/arm64/kernel/psci.c2
-rw-r--r--arch/arm64/kernel/setup.c22
-rw-r--r--arch/arm64/kernel/signal.c2
-rw-r--r--arch/arm64/kernel/signal32.c11
-rw-r--r--arch/arm64/kernel/smp.c10
-rw-r--r--arch/arm64/kernel/suspend.c21
-rw-r--r--arch/arm64/kernel/sys.c5
-rw-r--r--arch/arm64/kernel/sys32.c51
-rw-r--r--arch/arm64/kernel/traps.c50
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm64/kvm/Kconfig3
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/emulate.c5
-rw-r--r--arch/arm64/kvm/handle_exit.c50
-rw-r--r--arch/arm64/kvm/hyp.S40
-rw-r--r--arch/arm64/kvm/inject_fault.c14
-rw-r--r--arch/arm64/kvm/reset.c1
-rw-r--r--arch/arm64/kvm/sys_regs.c122
-rw-r--r--arch/arm64/kvm/trace.h55
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S14
-rw-r--r--arch/arm64/mm/dma-mapping.c116
-rw-r--r--arch/arm64/mm/dump.c31
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/hugetlbpage.c6
-rw-r--r--arch/arm64/mm/init.c33
-rw-r--r--arch/arm64/mm/ioremap.c1
-rw-r--r--arch/arm64/mm/mm.h2
-rw-r--r--arch/arm64/mm/mmu.c342
-rw-r--r--arch/arm64/mm/proc.S14
-rw-r--r--arch/avr32/configs/atngw100_defconfig30
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig30
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig30
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig23
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig30
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig30
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig30
-rw-r--r--arch/avr32/configs/atstk1002_defconfig30
-rw-r--r--arch/avr32/configs/atstk1003_defconfig26
-rw-r--r--arch/avr32/configs/atstk1004_defconfig27
-rw-r--r--arch/avr32/configs/atstk1006_defconfig30
-rw-r--r--arch/avr32/configs/favr-32_defconfig26
-rw-r--r--arch/avr32/configs/hammerhead_defconfig22
-rw-r--r--arch/avr32/configs/merisc_defconfig23
-rw-r--r--arch/avr32/configs/mimc200_defconfig18
-rw-r--r--arch/avr32/include/asm/pgtable.h27
-rw-r--r--arch/avr32/include/asm/thread_info.h4
-rw-r--r--arch/avr32/include/asm/unistd.h2
-rw-r--r--arch/avr32/include/uapi/asm/unistd.h41
-rw-r--r--arch/avr32/kernel/asm-offsets.c1
-rw-r--r--arch/avr32/kernel/module.c13
-rw-r--r--arch/avr32/kernel/signal.c2
-rw-r--r--arch/avr32/kernel/syscall-stubs.S36
-rw-r--r--arch/avr32/kernel/syscall_table.S37
-rw-r--r--arch/avr32/mach-at32ap/include/mach/cpu.h26
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/blackfin/include/asm/pgtable.h5
-rw-r--r--arch/blackfin/include/asm/thread_info.h4
-rw-r--r--arch/blackfin/kernel/signal.c2
-rw-r--r--arch/blackfin/mach-common/ints-priority.c8
-rw-r--r--arch/c6x/include/asm/pgtable.h5
-rw-r--r--arch/c6x/include/asm/thread_info.h4
-rw-r--r--arch/c6x/kernel/signal.c2
-rw-r--r--arch/cris/Kconfig4
-rw-r--r--arch/cris/arch-v10/kernel/signal.c2
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c6
-rw-r--r--arch/cris/arch-v32/kernel/signal.c2
-rw-r--r--arch/cris/arch-v32/kernel/time.c4
-rw-r--r--arch/cris/include/arch-v10/arch/mmu.h3
-rw-r--r--arch/cris/include/arch-v32/arch/mmu.h3
-rw-r--r--arch/cris/include/asm/pgtable.h6
-rw-r--r--arch/cris/include/asm/thread_info.h4
-rw-r--r--arch/cris/include/asm/uaccess.h170
-rw-r--r--arch/cris/kernel/crisksyms.c1
-rw-r--r--arch/cris/kernel/module.c2
-rw-r--r--arch/cris/mm/fault.c6
-rw-r--r--arch/frv/include/asm/pgtable.h29
-rw-r--r--arch/frv/include/asm/string.h1
-rw-r--r--arch/frv/include/asm/thread_info.h4
-rw-r--r--arch/frv/kernel/asm-offsets.c1
-rw-r--r--arch/frv/kernel/signal.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c4
-rw-r--r--arch/frv/mm/extable.c23
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/hexagon/include/asm/pgtable.h62
-rw-r--r--arch/hexagon/include/asm/thread_info.h4
-rw-r--r--arch/hexagon/kernel/signal.c2
-rw-r--r--arch/ia64/include/asm/pgtable.h27
-rw-r--r--arch/ia64/include/asm/thread_info.h4
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild1
-rw-r--r--arch/ia64/kernel/acpi-ext.c6
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/kernel/module.c6
-rw-r--r--arch/ia64/kernel/signal.c2
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/mm/hugetlbpage.c6
-rw-r--r--arch/ia64/pci/pci.c62
-rw-r--r--arch/m32r/include/asm/pgtable-2level.h4
-rw-r--r--arch/m32r/include/asm/pgtable.h13
-rw-r--r--arch/m32r/include/asm/thread_info.h5
-rw-r--r--arch/m32r/kernel/signal.c2
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/68360/commproc.c8
-rw-r--r--arch/m68k/68360/config.c13
-rw-r--r--arch/m68k/atari/atakeyb.c72
-rw-r--r--arch/m68k/atari/stdma.c2
-rw-r--r--arch/m68k/atari/time.c3
-rw-r--r--arch/m68k/configs/amiga_defconfig73
-rw-r--r--arch/m68k/configs/apollo_defconfig73
-rw-r--r--arch/m68k/configs/atari_defconfig78
-rw-r--r--arch/m68k/configs/bvme6000_defconfig73
-rw-r--r--arch/m68k/configs/hp300_defconfig73
-rw-r--r--arch/m68k/configs/mac_defconfig72
-rw-r--r--arch/m68k/configs/multi_defconfig78
-rw-r--r--arch/m68k/configs/mvme147_defconfig73
-rw-r--r--arch/m68k/configs/mvme16x_defconfig72
-rw-r--r--arch/m68k/configs/q40_defconfig73
-rw-r--r--arch/m68k/configs/sun3_defconfig72
-rw-r--r--arch/m68k/configs/sun3x_defconfig73
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/atariints.h5
-rw-r--r--arch/m68k/include/asm/commproc.h24
-rw-r--r--arch/m68k/include/asm/futex.h94
-rw-r--r--arch/m68k/include/asm/macintosh.h2
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h23
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h15
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h2
-rw-r--r--arch/m68k/include/asm/pgtable_no.h2
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h15
-rw-r--r--arch/m68k/include/asm/thread_info.h4
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/signal.c4
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/mac/config.c32
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/m68k/mvme147/config.c46
-rw-r--r--arch/m68k/mvme16x/rtc.c2
-rw-r--r--arch/metag/include/asm/pgtable.h6
-rw-r--r--arch/metag/include/asm/thread_info.h6
-rw-r--r--arch/metag/kernel/signal.c2
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/metag/mm/hugetlbpage.c6
-rw-r--r--arch/microblaze/boot/Makefile3
-rw-r--r--arch/microblaze/boot/dts/Makefile2
-rw-r--r--arch/microblaze/include/asm/delay.h4
-rw-r--r--arch/microblaze/include/asm/kgdb.h3
-rw-r--r--arch/microblaze/include/asm/linkage.h16
-rw-r--r--arch/microblaze/include/asm/pgalloc.h14
-rw-r--r--arch/microblaze/include/asm/pgtable.h15
-rw-r--r--arch/microblaze/include/asm/syscall.h2
-rw-r--r--arch/microblaze/include/asm/thread_info.h4
-rw-r--r--arch/microblaze/include/asm/uaccess.h6
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h1
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/cpu/cache.c6
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c7
-rw-r--r--arch/microblaze/kernel/intc.c8
-rw-r--r--arch/microblaze/kernel/kgdb.c10
-rw-r--r--arch/microblaze/kernel/prom_parse.c35
-rw-r--r--arch/microblaze/kernel/ptrace.c4
-rw-r--r--arch/microblaze/kernel/reset.c1
-rw-r--r--arch/microblaze/kernel/signal.c6
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/microblaze/kernel/timer.c1
-rw-r--r--arch/microblaze/kernel/unwind.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/microblaze/pci/pci-common.c13
-rw-r--r--arch/mips/Kconfig23
-rw-r--r--arch/mips/boot/elf2ecoff.c64
-rw-r--r--arch/mips/cavium-octeon/Makefile1
-rw-r--r--arch/mips/cavium-octeon/crypto/Makefile7
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.c66
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.h75
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c216
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c6
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c152
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/configs/malta_defconfig16
-rw-r--r--arch/mips/include/asm/fpu.h43
-rw-r--r--arch/mips/include/asm/fw/arc/hinv.h6
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/mips-cm.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h15
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h17
-rw-r--r--arch/mips/include/asm/octeon/octeon.h5
-rw-r--r--arch/mips/include/asm/pgtable-32.h38
-rw-r--r--arch/mips/include/asm/pgtable-64.h9
-rw-r--r--arch/mips/include/asm/pgtable-bits.h9
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/include/asm/syscall.h8
-rw-r--r--arch/mips/include/asm/thread_info.h5
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/jz4740/irq.c3
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/elf.c8
-rw-r--r--arch/mips/kernel/irq_cpu.c4
-rw-r--r--arch/mips/kernel/module.c2
-rw-r--r--arch/mips/kernel/process.c36
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-cmp.c4
-rw-r--r--arch/mips/kernel/smp-mt.c3
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/locore.S2
-rw-r--r--arch/mips/kvm/mips.c23
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/mm/gup.c8
-rw-r--r--arch/mips/mm/hugetlbpage.c18
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/net/bpf_jit.c2
-rw-r--r--arch/mips/netlogic/common/smp.c13
-rw-r--r--arch/mips/pci/pci-bcm1480.c4
-rw-r--r--arch/mips/pci/pci-octeon.c4
-rw-r--r--arch/mips/pci/pcie-octeon.c12
-rw-r--r--arch/mn10300/include/asm/cacheflush.h7
-rw-r--r--arch/mn10300/include/asm/pgtable.h19
-rw-r--r--arch/mn10300/include/asm/thread_info.h4
-rw-r--r--arch/mn10300/kernel/asm-offsets.c1
-rw-r--r--arch/mn10300/kernel/signal.c2
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci.c51
-rw-r--r--arch/nios2/include/asm/pgtable-bits.h1
-rw-r--r--arch/nios2/include/asm/pgtable.h12
-rw-r--r--arch/nios2/kernel/module.c2
-rw-r--r--arch/nios2/kernel/signal.c2
-rw-r--r--arch/nios2/mm/fault.c10
-rw-r--r--arch/openrisc/include/asm/pgtable.h10
-rw-r--r--arch/openrisc/include/asm/thread_info.h4
-rw-r--r--arch/openrisc/kernel/head.S5
-rw-r--r--arch/openrisc/kernel/signal.c2
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/parisc/include/asm/pgtable.h12
-rw-r--r--arch/parisc/include/asm/thread_info.h4
-rw-r--r--arch/parisc/kernel/module.c8
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi6
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi90
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-bman1.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-qman1-portals.dtsi101
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-qman1.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-qman3.dtsi41
-rw-r--r--arch/powerpc/boot/dts/mvme2500.dts280
-rw-r--r--arch/powerpc/boot/dts/t4240emu.dts281
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig21
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig18
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig27
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig4
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig20
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig13
-rw-r--r--arch/powerpc/configs/40x/virtex_defconfig21
-rw-r--r--arch/powerpc/configs/40x/walnut_defconfig18
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig7
-rw-r--r--arch/powerpc/configs/44x/arches_defconfig23
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig15
-rw-r--r--arch/powerpc/configs/44x/bluestone_defconfig16
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig26
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig13
-rw-r--r--arch/powerpc/configs/44x/ebony_defconfig17
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig26
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig20
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig16
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig15
-rw-r--r--arch/powerpc/configs/44x/rainier_defconfig15
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig26
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig19
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig21
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig16
-rw-r--r--arch/powerpc/configs/44x/virtex5_defconfig21
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig23
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig19
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig20
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig22
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig21
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig22
-rw-r--r--arch/powerpc/configs/83xx/asp8347_defconfig22
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig25
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig22
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_mds_defconfig21
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig23
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig20
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig20
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_mds_defconfig23
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_mds_defconfig22
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_rdk_defconfig16
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_mds_defconfig15
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig17
-rw-r--r--arch/powerpc/configs/83xx/sbc834x_defconfig16
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig16
-rw-r--r--arch/powerpc/configs/85xx/kmp204x_defconfig3
-rw-r--r--arch/powerpc/configs/85xx/ksi8560_defconfig17
-rw-r--r--arch/powerpc/configs/85xx/mpc8540_ads_defconfig19
-rw-r--r--arch/powerpc/configs/85xx/mpc8560_ads_defconfig19
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig21
-rw-r--r--arch/powerpc/configs/85xx/ppa8548_defconfig62
-rw-r--r--arch/powerpc/configs/85xx/sbc8548_defconfig40
-rw-r--r--arch/powerpc/configs/85xx/socrates_defconfig24
-rw-r--r--arch/powerpc/configs/85xx/stx_gp3_defconfig15
-rw-r--r--arch/powerpc/configs/85xx/tqm8540_defconfig17
-rw-r--r--arch/powerpc/configs/85xx/tqm8541_defconfig17
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig22
-rw-r--r--arch/powerpc/configs/85xx/tqm8555_defconfig17
-rw-r--r--arch/powerpc/configs/85xx/tqm8560_defconfig17
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig34
-rw-r--r--arch/powerpc/configs/86xx/gef_ppc9a_defconfig36
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc310_defconfig35
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig53
-rw-r--r--arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig32
-rw-r--r--arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig33
-rw-r--r--arch/powerpc/configs/86xx/sbc8641d_defconfig54
-rw-r--r--arch/powerpc/configs/adder875_defconfig27
-rw-r--r--arch/powerpc/configs/amigaone_defconfig36
-rw-r--r--arch/powerpc/configs/c2k_defconfig82
-rw-r--r--arch/powerpc/configs/cell_defconfig30
-rw-r--r--arch/powerpc/configs/celleb_defconfig21
-rw-r--r--arch/powerpc/configs/chrp32_defconfig37
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig21
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig6
-rw-r--r--arch/powerpc/configs/ep8248e_defconfig17
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig29
-rw-r--r--arch/powerpc/configs/g5_defconfig63
-rw-r--r--arch/powerpc/configs/gamecube_defconfig15
-rw-r--r--arch/powerpc/configs/holly_defconfig15
-rw-r--r--arch/powerpc/configs/linkstation_defconfig28
-rw-r--r--arch/powerpc/configs/maple_defconfig26
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig12
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig2
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig18
-rw-r--r--arch/powerpc/configs/mpc7448_hpc2_defconfig18
-rw-r--r--arch/powerpc/configs/mpc8272_ads_defconfig24
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig7
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig72
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig63
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig22
-rw-r--r--arch/powerpc/configs/mpc86xx_defconfig39
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig29
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig9
-rw-r--r--arch/powerpc/configs/pasemi_defconfig3
-rw-r--r--arch/powerpc/configs/pmac32_defconfig65
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig17
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig14
-rw-r--r--arch/powerpc/configs/ppc64_defconfig16
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig4
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig348
-rw-r--r--arch/powerpc/configs/pq2fads_defconfig28
-rw-r--r--arch/powerpc/configs/ps3_defconfig18
-rw-r--r--arch/powerpc/configs/pseries_defconfig29
-rw-r--r--arch/powerpc/configs/pseries_le_defconfig31
-rw-r--r--arch/powerpc/configs/storcenter_defconfig12
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig32
-rw-r--r--arch/powerpc/configs/wii_defconfig18
-rw-r--r--arch/powerpc/crypto/sha1.c1
-rw-r--r--arch/powerpc/include/asm/cacheflush.h7
-rw-r--r--arch/powerpc/include/asm/cputable.h2
-rw-r--r--arch/powerpc/include/asm/eeh.h13
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h3
-rw-r--r--arch/powerpc/include/asm/fsl_pamu_stash.h4
-rw-r--r--arch/powerpc/include/asm/kprobes.h63
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/opal.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h34
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h7
-rw-r--r--arch/powerpc/include/asm/pgtable.h62
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h4
-rw-r--r--arch/powerpc/include/asm/pte-40x.h1
-rw-r--r--arch/powerpc/include/asm/pte-44x.h5
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h10
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h1
-rw-r--r--arch/powerpc/include/asm/pte-common.h30
-rw-r--r--arch/powerpc/include/asm/pte-fsl-booke.h3
-rw-r--r--arch/powerpc/include/asm/pte-hash32.h1
-rw-r--r--arch/powerpc/include/asm/pte-hash64.h7
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/thread_info.h19
-rw-r--r--arch/powerpc/kernel/asm-offsets.c15
-rw-r--r--arch/powerpc/kernel/cacheinfo.c15
-rw-r--r--arch/powerpc/kernel/cputable.c1
-rw-r--r--arch/powerpc/kernel/eeh.c24
-rw-r--r--arch/powerpc/kernel/eeh_driver.c4
-rw-r--r--arch/powerpc/kernel/eeh_pe.c20
-rw-r--r--arch/powerpc/kernel/entry_32.S83
-rw-r--r--arch/powerpc/kernel/entry_64.S37
-rw-r--r--arch/powerpc/kernel/head_8xx.S123
-rw-r--r--arch/powerpc/kernel/paca.c8
-rw-r--r--arch/powerpc/kernel/pci-common.c12
-rw-r--r--arch/powerpc/kernel/pci_dn.c4
-rw-r--r--arch/powerpc/kernel/prom.c3
-rw-r--r--arch/powerpc/kernel/rtas.c22
-rw-r--r--arch/powerpc/kernel/signal_32.c4
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c14
-rw-r--r--arch/powerpc/kernel/syscalls.c14
-rw-r--r--arch/powerpc/kernel/time.c32
-rw-r--r--arch/powerpc/kernel/traps.c15
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S26
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S28
-rw-r--r--arch/powerpc/kvm/book3s_xics.c16
-rw-r--r--arch/powerpc/kvm/booke.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/lib/Makefile27
-rw-r--r--arch/powerpc/lib/memcmp_64.S233
-rw-r--r--arch/powerpc/lib/string.S2
-rw-r--r--arch/powerpc/mm/copro_fault.c10
-rw-r--r--arch/powerpc/mm/fault.c27
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c12
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c43
-rw-r--r--arch/powerpc/mm/pgtable.c11
-rw-r--r--arch/powerpc/mm/pgtable_32.c19
-rw-r--r--arch/powerpc/mm/pgtable_64.c5
-rw-r--r--arch/powerpc/mm/slice.c29
-rw-r--r--arch/powerpc/mm/subpage-prot.c6
-rw-r--r--arch/powerpc/mm/tlb_nohash.c9
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c10
-rw-r--r--arch/powerpc/perf/hv-24x7-catalog.h25
-rw-r--r--arch/powerpc/perf/hv-24x7-domains.h28
-rw-r--r--arch/powerpc/perf/hv-24x7.c793
-rw-r--r--arch/powerpc/perf/hv-24x7.h12
-rw-r--r--arch/powerpc/perf/hv-common.c10
-rw-r--r--arch/powerpc/perf/hv-common.h10
-rw-r--r--arch/powerpc/perf/hv-gpci-requests.h261
-rw-r--r--arch/powerpc/perf/hv-gpci.c23
-rw-r--r--arch/powerpc/perf/hv-gpci.h37
-rw-r--r--arch/powerpc/perf/req-gen/_begin.h13
-rw-r--r--arch/powerpc/perf/req-gen/_clear.h5
-rw-r--r--arch/powerpc/perf/req-gen/_end.h4
-rw-r--r--arch/powerpc/perf/req-gen/_request-begin.h15
-rw-r--r--arch/powerpc/perf/req-gen/_request-end.h8
-rw-r--r--arch/powerpc/perf/req-gen/perf.h155
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/83xx/usb.c3
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig6
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/mvme2500.c74
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c4
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c11
-rw-r--r--arch/powerpc/platforms/powermac/pci.c209
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c11
-rw-r--r--arch/powerpc/platforms/powernv/opal-power.c65
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c72
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c41
-rw-r--r--arch/powerpc/platforms/powernv/pci.c30
-rw-r--r--arch/powerpc/platforms/powernv/setup.c2
-rw-r--r--arch/powerpc/platforms/ps3/mm.c77
-rw-r--r--arch/powerpc/platforms/ps3/platform.h13
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig20
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c22
-rw-r--r--arch/powerpc/platforms/pseries/ras.c2
-rw-r--r--arch/powerpc/sysdev/axonram.c19
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c57
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c25
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c6
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c6
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c7
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/xmon/xmon.c15
-rw-r--r--arch/s390/Kconfig42
-rw-r--r--arch/s390/Makefile12
-rw-r--r--arch/s390/boot/compressed/misc.c3
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c4
-rw-r--r--arch/s390/defconfig7
-rw-r--r--arch/s390/hypfs/Makefile1
-rw-r--r--arch/s390/hypfs/hypfs.h7
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c49
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c139
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/hypfs/inode.c9
-rw-r--r--arch/s390/include/asm/cacheflush.h4
-rw-r--r--arch/s390/include/asm/cpu_mf.h14
-rw-r--r--arch/s390/include/asm/elf.h8
-rw-r--r--arch/s390/include/asm/ftrace.h15
-rw-r--r--arch/s390/include/asm/irqflags.h2
-rw-r--r--arch/s390/include/asm/jump_label.h7
-rw-r--r--arch/s390/include/asm/kvm_host.h56
-rw-r--r--arch/s390/include/asm/pgtable.h35
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/reset.h3
-rw-r--r--arch/s390/include/asm/sclp.h11
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/sigp.h1
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/string.h1
-rw-r--r--arch/s390/include/asm/sysinfo.h30
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/asm/timex.h10
-rw-r--r--arch/s390/include/asm/topology.h4
-rw-r--r--arch/s390/include/uapi/asm/hypfs.h35
-rw-r--r--arch/s390/include/uapi/asm/kvm.h37
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S3
-rw-r--r--arch/s390/kernel/cache.c391
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/early.c18
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/ftrace.c108
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/ipl.c11
-rw-r--r--arch/s390/kernel/jump_label.c63
-rw-r--r--arch/s390/kernel/kprobes.c3
-rw-r--r--arch/s390/kernel/machine_kexec.c19
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/processor.c10
-rw-r--r--arch/s390/kernel/sclp.S3
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c261
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/sysinfo.c37
-rw-r--r--arch/s390/kernel/topology.c63
-rw-r--r--arch/s390/kernel/uprobes.c69
-rw-r--r--arch/s390/kernel/vtime.c60
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/gaccess.c4
-rw-r--r--arch/s390/kvm/intercept.c41
-rw-r--r--arch/s390/kvm/interrupt.c191
-rw-r--r--arch/s390/kvm/kvm-s390.c596
-rw-r--r--arch/s390/kvm/kvm-s390.h19
-rw-r--r--arch/s390/kvm/priv.c13
-rw-r--r--arch/s390/kvm/sigp.c160
-rw-r--r--arch/s390/kvm/trace-s390.h14
-rw-r--r--arch/s390/lib/spinlock.c52
-rw-r--r--arch/s390/mm/fault.c10
-rw-r--r--arch/s390/mm/gup.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c20
-rw-r--r--arch/s390/mm/init.c9
-rw-r--r--arch/s390/mm/mmap.c142
-rw-r--r--arch/s390/mm/pgtable.c11
-rw-r--r--arch/s390/net/bpf_jit.S28
-rw-r--r--arch/s390/net/bpf_jit_comp.c17
-rw-r--r--arch/s390/pci/pci_mmio.c4
-rw-r--r--arch/score/include/asm/pgtable-bits.h1
-rw-r--r--arch/score/include/asm/pgtable.h20
-rw-r--r--arch/score/include/asm/thread_info.h4
-rw-r--r--arch/score/kernel/asm-offsets.c1
-rw-r--r--arch/score/kernel/signal.c2
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/boards/mach-se/7343/irq.c3
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c3
-rw-r--r--arch/sh/include/asm/pgtable.h2
-rw-r--r--arch/sh/include/asm/pgtable_32.h30
-rw-r--r--arch/sh/include/asm/pgtable_64.h9
-rw-r--r--arch/sh/include/asm/thread_info.h4
-rw-r--r--arch/sh/kernel/asm-offsets.c1
-rw-r--r--arch/sh/kernel/signal_32.c4
-rw-r--r--arch/sh/kernel/signal_64.c4
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sh/mm/gup.c8
-rw-r--r--arch/sh/mm/hugetlbpage.c12
-rw-r--r--arch/sparc/crypto/aes_glue.c2
-rw-r--r--arch/sparc/crypto/camellia_glue.c2
-rw-r--r--arch/sparc/crypto/des_glue.c1
-rw-r--r--arch/sparc/crypto/md5_glue.c2
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h5
-rw-r--r--arch/sparc/include/asm/pgtable_32.h29
-rw-r--r--arch/sparc/include/asm/pgtable_64.h42
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h14
-rw-r--r--arch/sparc/include/asm/thread_info_32.h6
-rw-r--r--arch/sparc/include/asm/thread_info_64.h12
-rw-r--r--arch/sparc/kernel/module.c2
-rw-r--r--arch/sparc/kernel/pci.c5
-rw-r--r--arch/sparc/kernel/signal32.c4
-rw-r--r--arch/sparc/kernel/signal_32.c2
-rw-r--r--arch/sparc/kernel/signal_64.c2
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/sparc/mm/gup.c6
-rw-r--r--arch/sparc/mm/hugetlbpage.c12
-rw-r--r--arch/sparc/net/bpf_jit_comp.c4
-rw-r--r--arch/tile/include/asm/pgtable.h13
-rw-r--r--arch/tile/include/asm/thread_info.h4
-rw-r--r--arch/tile/include/asm/uaccess.h37
-rw-r--r--arch/tile/include/uapi/asm/byteorder.h4
-rw-r--r--arch/tile/kernel/hardwall.c5
-rw-r--r--arch/tile/kernel/module.c4
-rw-r--r--arch/tile/kernel/pci.c4
-rw-r--r--arch/tile/kernel/proc.c5
-rw-r--r--arch/tile/kernel/setup.c13
-rw-r--r--arch/tile/kernel/signal.c2
-rw-r--r--arch/tile/kvm/Kconfig1
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/tile/mm/homecache.c16
-rw-r--r--arch/tile/mm/hugetlbpage.c28
-rw-r--r--arch/tile/mm/init.c18
-rw-r--r--arch/um/include/asm/pgtable-2level.h11
-rw-r--r--arch/um/include/asm/pgtable-3level.h22
-rw-r--r--arch/um/include/asm/pgtable.h9
-rw-r--r--arch/um/include/asm/thread_info.h4
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/unicore32/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/unicore32/include/asm/pgtable.h14
-rw-r--r--arch/unicore32/include/asm/thread_info.h4
-rw-r--r--arch/unicore32/kernel/module.c2
-rw-r--r--arch/unicore32/kernel/signal.c2
-rw-r--r--arch/unicore32/mm/pgd.c3
-rw-r--r--arch/x86/Kconfig26
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/eboot.c3
-rw-r--r--arch/x86/boot/compressed/misc.c9
-rw-r--r--arch/x86/boot/compressed/misc.h1
-rw-r--r--arch/x86/boot/ctype.h5
-rw-r--r--arch/x86/boot/early_serial_console.c6
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S343
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c34
-rw-r--r--arch/x86/crypto/des3_ede_glue.c2
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c2
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/apic.h58
-rw-r--r--arch/x86/include/asm/calling.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/debugreg.h5
-rw-r--r--arch/x86/include/asm/desc.h20
-rw-r--r--arch/x86/include/asm/fpu-internal.h10
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/asm/i387.h6
-rw-r--r--arch/x86/include/asm/intel-mid.h3
-rw-r--r--arch/x86/include/asm/io_apic.h5
-rw-r--r--arch/x86/include/asm/irq_remapping.h4
-rw-r--r--arch/x86/include/asm/kasan.h31
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h59
-rw-r--r--arch/x86/include/asm/livepatch.h46
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/mmu.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h53
-rw-r--r--arch/x86/include/asm/page_64_types.h12
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/pgtable-2level.h38
-rw-r--r--arch/x86/include/asm/pgtable-3level.h12
-rw-r--r--arch/x86/include/asm/pgtable.h66
-rw-r--r--arch/x86/include/asm/pgtable_64.h11
-rw-r--r--arch/x86/include/asm/pgtable_types.h46
-rw-r--r--arch/x86/include/asm/pmc_atom.h22
-rw-r--r--arch/x86/include/asm/processor.h33
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h68
-rw-r--r--arch/x86/include/asm/special_insns.h6
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/include/asm/string_64.h18
-rw-r--r--arch/x86/include/asm/thread_info.h19
-rw-r--r--arch/x86/include/asm/tlbflush.h77
-rw-r--r--arch/x86/include/asm/traps.h6
-rw-r--r--arch/x86/include/asm/virtext.h5
-rw-r--r--arch/x86/include/asm/vmx.h4
-rw-r--r--arch/x86/include/asm/xen/page.h20
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h11
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h12
-rw-r--r--arch/x86/include/uapi/asm/vmx.h6
-rw-r--r--arch/x86/kernel/Makefile5
-rw-r--r--arch/x86/kernel/acpi/boot.c44
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/apb_timer.c8
-rw-r--r--arch/x86/kernel/apic/apic.c455
-rw-r--r--arch/x86/kernel/apic/io_apic.c13
-rw-r--r--arch/x86/kernel/cpu/amd.c19
-rw-r--r--arch/x86/kernel/cpu/common.c32
-rw-r--r--arch/x86/kernel/cpu/intel.c6
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c26
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c140
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c9
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c8
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.c76
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c46
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h18
-rw-r--r--arch/x86/kernel/dumpstack.c5
-rw-r--r--arch/x86/kernel/e820.c26
-rw-r--r--arch/x86/kernel/early_printk.c187
-rw-r--r--arch/x86/kernel/entry_64.S317
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c11
-rw-r--r--arch/x86/kernel/head_64.S30
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c45
-rw-r--r--arch/x86/kernel/i387.c42
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/irq_32.c13
-rw-r--r--arch/x86/kernel/kprobes/core.c20
-rw-r--r--arch/x86/kernel/kprobes/opt.c3
-rw-r--r--arch/x86/kernel/livepatch.c90
-rw-r--r--arch/x86/kernel/module.c14
-rw-r--r--arch/x86/kernel/pmc_atom.c81
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/rtc.c6
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/signal.c8
-rw-r--r--arch/x86/kernel/smpboot.c113
-rw-r--r--arch/x86/kernel/tls.c25
-rw-r--r--arch/x86/kernel/traps.c131
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c10
-rw-r--r--arch/x86/kernel/xsave.c3
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/emulate.c261
-rw-r--r--arch/x86/kvm/ioapic.h2
-rw-r--r--arch/x86/kvm/iommu.c4
-rw-r--r--arch/x86/kvm/lapic.c150
-rw-r--r--arch/x86/kvm/lapic.h6
-rw-r--r--arch/x86/kvm/mmu.c351
-rw-r--r--arch/x86/kvm/mmu.h17
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/trace.h38
-rw-r--r--arch/x86/kvm/vmx.c1096
-rw-r--r--arch/x86/kvm/x86.c209
-rw-r--r--arch/x86/kvm/x86.h3
-rw-r--r--arch/x86/lib/memcpy_64.S6
-rw-r--r--arch/x86/lib/memmove_64.S4
-rw-r--r--arch/x86/lib/memset_64.S10
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/mm/gup.c13
-rw-r--r--arch/x86/mm/hugetlbpage.c31
-rw-r--r--arch/x86/mm/init.c19
-rw-r--r--arch/x86/mm/kasan_init_64.c206
-rw-r--r--arch/x86/mm/mpx.c6
-rw-r--r--arch/x86/mm/numa.c6
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/mm/pgtable.c14
-rw-r--r--arch/x86/mm/tlb.c3
-rw-r--r--arch/x86/pci/acpi.c293
-rw-r--r--arch/x86/pci/bus_numa.c4
-rw-r--r--arch/x86/pci/common.c50
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/pci/intel_mid_pci.c5
-rw-r--r--arch/x86/pci/irq.c15
-rw-r--r--arch/x86/pci/mmconfig-shared.c34
-rw-r--r--arch/x86/pci/xen.c53
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max3111.c35
-rw-r--r--arch/x86/platform/intel-mid/early_printk_intel_mid.c220
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_vrtc.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c25
-rw-r--r--arch/x86/power/cpu.c11
-rw-r--r--arch/x86/realmode/Makefile2
-rw-r--r--arch/x86/realmode/init.c2
-rw-r--r--arch/x86/realmode/rm/Makefile1
-rw-r--r--arch/x86/tools/calc_run_size.pl39
-rw-r--r--arch/x86/tools/calc_run_size.sh42
-rw-r--r--arch/x86/um/signal.c2
-rw-r--r--arch/x86/vdso/Makefile3
-rw-r--r--arch/x86/xen/enlighten.c26
-rw-r--r--arch/x86/xen/mmu.c17
-rw-r--r--arch/x86/xen/p2m.c289
-rw-r--r--arch/x86/xen/setup.c79
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/time.c22
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/include/asm/pgtable.h12
-rw-r--r--arch/xtensa/include/asm/thread_info.h5
-rw-r--r--arch/xtensa/kernel/setup.c7
-rw-r--r--arch/xtensa/kernel/signal.c2
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--block/bio.c426
-rw-r--r--block/blk-core.c34
-rw-r--r--block/blk-lib.c30
-rw-r--r--block/blk-map.c172
-rw-r--r--block/blk-merge.c41
-rw-r--r--block/blk-mq-tag.c95
-rw-r--r--block/blk-mq-tag.h5
-rw-r--r--block/blk-mq.c117
-rw-r--r--block/blk-mq.h3
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-tag.c33
-rw-r--r--block/blk-timeout.c3
-rw-r--r--block/bsg.c72
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/ioctl.c2
-rw-r--r--block/partitions/check.c12
-rw-r--r--block/partitions/efi.c2
-rw-r--r--block/scsi_ioctl.c17
-rw-r--r--crypto/Kconfig18
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablkcipher.c7
-rw-r--r--crypto/aead.c3
-rw-r--r--crypto/aes_generic.c1
-rw-r--r--crypto/af_alg.c51
-rw-r--r--crypto/ahash.c3
-rw-r--r--crypto/algapi.c1
-rw-r--r--crypto/algif_hash.c45
-rw-r--r--crypto/algif_rng.c192
-rw-r--r--crypto/algif_skcipher.c81
-rw-r--r--crypto/ansi_cprng.c1
-rw-r--r--crypto/blowfish_generic.c1
-rw-r--r--crypto/camellia_generic.c1
-rw-r--r--crypto/cast5_generic.c1
-rw-r--r--crypto/cast6_generic.c1
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/crct10dif_generic.c1
-rw-r--r--crypto/cts.c5
-rw-r--r--crypto/des_generic.c7
-rw-r--r--crypto/drbg.c34
-rw-r--r--crypto/ghash-generic.c1
-rw-r--r--crypto/krng.c1
-rw-r--r--crypto/salsa20_generic.c1
-rw-r--r--crypto/scatterwalk.c6
-rw-r--r--crypto/seqiv.c12
-rw-r--r--crypto/serpent_generic.c1
-rw-r--r--crypto/sha1_generic.c1
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--crypto/tcrypt.c37
-rw-r--r--crypto/tea.c1
-rw-r--r--crypto/testmgr.c58
-rw-r--r--crypto/tgr192.c1
-rw-r--r--crypto/twofish_generic.c1
-rw-r--r--crypto/wp512.c1
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_apd.c150
-rw-r--r--drivers/acpi/acpi_lpss.c47
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/acpi_platform.c4
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c164
-rw-r--r--drivers/acpi/acpica/evgpeblk.c10
-rw-r--r--drivers/acpi/acpica/evgpeinit.c10
-rw-r--r--drivers/acpi/acpica/evgpeutil.c61
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c132
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c123
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c10
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c46
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c11
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c61
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c41
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c6
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uterror.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utfileio.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c2
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstring.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/apei/apei-base.c32
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/ec.c453
-rw-r--r--drivers/acpi/event.c7
-rw-r--r--drivers/acpi/int340x_thermal.c11
-rw-r--r--drivers/acpi/internal.h11
-rw-r--r--drivers/acpi/ioapic.c229
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/pci_irq.c10
-rw-r--r--drivers/acpi/pci_root.c9
-rw-r--r--drivers/acpi/processor_core.c123
-rw-r--r--drivers/acpi/processor_idle.c182
-rw-r--r--drivers/acpi/resource.c353
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/acpi/video.c18
-rw-r--r--drivers/amba/bus.c47
-rw-r--r--drivers/android/binder.c26
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ahci_da850.c11
-rw-r--r--drivers/ata/ahci_imx.c25
-rw-r--r--drivers/ata/ahci_mvebu.c11
-rw-r--r--drivers/ata/ahci_platform.c11
-rw-r--r--drivers/ata/ahci_st.c11
-rw-r--r--drivers/ata/ahci_sunxi.c11
-rw-r--r--drivers/ata/ahci_tegra.c11
-rw-r--r--drivers/ata/ahci_xgene.c197
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libahci_platform.c249
-rw-r--r--drivers/ata/libata-core.c125
-rw-r--r--drivers/ata/libata-eh.c22
-rw-r--r--drivers/ata/libata-scsi.c47
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/libata.h5
-rw-r--r--drivers/ata/pata_cs5530.c6
-rw-r--r--drivers/ata/pata_of_platform.c10
-rw-r--r--drivers/ata/pata_pdc2027x.c10
-rw-r--r--drivers/ata/pata_platform.c8
-rw-r--r--drivers/ata/sata_dwc_460ex.c142
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/ata/sata_rcar.c25
-rw-r--r--drivers/ata/sata_sil24.c3
-rw-r--r--drivers/atm/eni.c33
-rw-r--r--drivers/atm/fore200e.c22
-rw-r--r--drivers/atm/he.c125
-rw-r--r--drivers/atm/he.h4
-rw-r--r--drivers/atm/horizon.c24
-rw-r--r--drivers/atm/idt77252.c107
-rw-r--r--drivers/atm/iphase.c54
-rw-r--r--drivers/atm/lanai.c23
-rw-r--r--drivers/atm/nicstar.c60
-rw-r--r--drivers/atm/solos-pci.c26
-rw-r--r--drivers/atm/zatm.c17
-rw-r--r--drivers/base/core.c29
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/firmware_class.c34
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/common.c18
-rw-r--r--drivers/base/power/domain.c157
-rw-r--r--drivers/base/power/opp.c202
-rw-r--r--drivers/base/power/qos.c4
-rw-r--r--drivers/base/regmap/internal.h10
-rw-r--r--drivers/base/regmap/regmap-ac97.c4
-rw-r--r--drivers/base/regmap/regmap-i2c.c46
-rw-r--r--drivers/base/regmap/regmap.c7
-rw-r--r--drivers/bcma/bcma_private.h18
-rw-r--r--drivers/bcma/driver_chipcommon.c20
-rw-r--r--drivers/bcma/driver_pci.c68
-rw-r--r--drivers/bcma/host_pci.c6
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/bcma/main.c76
-rw-r--r--drivers/bcma/scan.c67
-rw-r--r--drivers/bcma/sprom.c3
-rw-r--r--drivers/block/Kconfig13
-rw-r--r--drivers/block/brd.c137
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/floppy.c17
-rw-r--r--drivers/block/loop.c416
-rw-r--r--drivers/block/loop.h18
-rw-r--r--drivers/block/null_blk.c4
-rw-r--r--drivers/block/nvme-core.c305
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/rbd.c25
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c177
-rw-r--r--drivers/block/xen-blkback/common.h12
-rw-r--r--drivers/block/xen-blkback/xenbus.c4
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/block/zram/zram_drv.c227
-rw-r--r--drivers/block/zram/zram_drv.h21
-rw-r--r--drivers/bluetooth/ath3k.c10
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/btmrvl_drv.h5
-rw-r--r--drivers/bluetooth/btmrvl_main.c32
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c6
-rw-r--r--drivers/bluetooth/btusb.c710
-rw-r--r--drivers/bluetooth/hci_ath.c15
-rw-r--r--drivers/bus/arm-cci.c7
-rw-r--r--drivers/char/Kconfig9
-rw-r--r--drivers/char/agp/agp.h5
-rw-r--r--drivers/char/agp/generic.c11
-rw-r--r--drivers/char/agp/intel-gtt.c14
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hw_random/core.c215
-rw-r--r--drivers/char/hw_random/virtio-rng.c1
-rw-r--r--drivers/char/i8k.c316
-rw-r--r--drivers/char/mem.c86
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/char/raw.c4
-rw-r--r--drivers/char/tpm/Kconfig15
-rw-r--r--drivers/char/tpm/Makefile5
-rw-r--r--drivers/char/tpm/tpm-chip.c256
-rw-r--r--drivers/char/tpm/tpm-dev.c42
-rw-r--r--drivers/char/tpm/tpm-interface.c263
-rw-r--r--drivers/char/tpm/tpm-sysfs.c29
-rw-r--r--drivers/char/tpm/tpm.h124
-rw-r--r--drivers/char/tpm/tpm2-cmd.c617
-rw-r--r--drivers/char/tpm/tpm_atmel.c25
-rw-r--r--drivers/char/tpm/tpm_crb.c354
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c52
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c43
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c69
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c666
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c27
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h2
-rw-r--r--drivers/char/tpm/tpm_infineon.c51
-rw-r--r--drivers/char/tpm/tpm_nsc.c34
-rw-r--r--drivers/char/tpm/tpm_of.c2
-rw-r--r--drivers/char/tpm/tpm_ppi.c141
-rw-r--r--drivers/char/tpm/tpm_tis.c276
-rw-r--r--drivers/char/tpm/xen-tpmfront.c14
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/at91/clk-slow.c27
-rw-r--r--drivers/clk/berlin/bg2q.c1
-rw-r--r--drivers/clk/clk-ppc-corenet.c2
-rw-r--r--drivers/clk/clk.c14
-rw-r--r--drivers/clk/rockchip/clk-cpu.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c27
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c42
-rw-r--r--drivers/clocksource/Kconfig24
-rw-r--r--drivers/clocksource/Makefile7
-rw-r--r--drivers/clocksource/arm_arch_timer.c1
-rw-r--r--drivers/clocksource/asm9260_timer.c220
-rw-r--r--drivers/clocksource/bcm_kona_timer.c9
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/rockchip_timer.c180
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/clocksource/timer-atlas7.c (renamed from drivers/clocksource/timer-marco.c)15
-rw-r--r--drivers/clocksource/timer-digicolor.c199
-rw-r--r--drivers/clocksource/versatile.c4
-rw-r--r--drivers/coresight/coresight-etb10.c14
-rw-r--r--drivers/coresight/coresight-etm3x.c24
-rw-r--r--drivers/coresight/coresight-funnel.c12
-rw-r--r--drivers/coresight/coresight-priv.h2
-rw-r--r--drivers/coresight/coresight-replicator.c2
-rw-r--r--drivers/coresight/coresight-tmc.c12
-rw-r--r--drivers/coresight/coresight-tpiu.c12
-rw-r--r--drivers/coresight/coresight.c11
-rw-r--r--drivers/coresight/of_coresight.c22
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig.x8610
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq.c174
-rw-r--r--drivers/cpufreq/cpufreq_stats.c219
-rw-r--r--drivers/cpufreq/intel_pstate.c55
-rw-r--r--drivers/cpufreq/ls1x-cpufreq.c1
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c136
-rw-r--r--drivers/cpufreq/speedstep-lib.c3
-rw-r--r--drivers/cpufreq/speedstep-smi.c12
-rw-r--r--drivers/cpuidle/Kconfig.arm641
-rw-r--r--drivers/cpuidle/cpuidle-arm64.c1
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c23
-rw-r--r--drivers/crypto/atmel-aes.c2
-rw-r--r--drivers/crypto/atmel-sha.c50
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/bfin_crc.c4
-rw-r--r--drivers/crypto/caam/caamalg.c14
-rw-r--r--drivers/crypto/caam/ctrl.c6
-rw-r--r--drivers/crypto/caam/error.c13
-rw-r--r--drivers/crypto/caam/jr.c37
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h8
-rw-r--r--drivers/crypto/ccp/ccp-dev.c1
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/crypto/nx/nx.c6
-rw-r--r--drivers/crypto/omap-aes.c4
-rw-r--r--drivers/crypto/omap-des.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c24
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c98
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h1
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c642
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h16
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c19
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c42
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c8
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c10
-rw-r--r--drivers/devfreq/Kconfig13
-rw-r--r--drivers/devfreq/Makefile5
-rw-r--r--drivers/devfreq/devfreq-event.c494
-rw-r--r--drivers/devfreq/event/Kconfig25
-rw-r--r--drivers/devfreq/event/Makefile2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c374
-rw-r--r--drivers/devfreq/event/exynos-ppmu.h93
-rw-r--r--drivers/devfreq/tegra-devfreq.c718
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/acpi-dma.c10
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/edac_mc_sysfs.c51
-rw-r--r--drivers/edac/i5100_edac.c5
-rw-r--r--drivers/edac/mce_amd_inj.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.h2
-rw-r--r--drivers/edac/mv64x60_edac.c3
-rw-r--r--drivers/edac/synopsys_edac.c535
-rw-r--r--drivers/extcon/extcon-adc-jack.c1
-rw-r--r--drivers/extcon/extcon-class.c1
-rw-r--r--drivers/extcon/extcon-max77693.c2
-rw-r--r--drivers/firmware/efi/Kconfig4
-rw-r--r--drivers/firmware/efi/efi.c73
-rw-r--r--drivers/firmware/efi/efivars.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile15
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c67
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c43
-rw-r--r--drivers/firmware/efi/libstub/efistub.h8
-rw-r--r--drivers/firmware/efi/libstub/fdt.c62
-rw-r--r--drivers/firmware/efi/runtime-map.c2
-rw-r--r--drivers/gpio/Kconfig35
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-amd8111.c6
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-dln2.c157
-rw-r--r--drivers/gpio/gpio-dwapb.c53
-rw-r--r--drivers/gpio/gpio-ge.c98
-rw-r--r--drivers/gpio/gpio-generic.c76
-rw-r--r--drivers/gpio/gpio-grgpio.c5
-rw-r--r--drivers/gpio/gpio-max732x.c225
-rw-r--r--drivers/gpio/gpio-mb86s7x.c232
-rw-r--r--drivers/gpio/gpio-mcp23s08.c17
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c42
-rw-r--r--drivers/gpio/gpio-moxart.c101
-rw-r--r--drivers/gpio/gpio-mpc5200.c23
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c70
-rw-r--r--drivers/gpio/gpio-mvebu.c100
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpio-pxa.c55
-rw-r--r--drivers/gpio/gpio-rcar.c75
-rw-r--r--drivers/gpio/gpio-sa1100.c199
-rw-r--r--drivers/gpio/gpio-sch.c89
-rw-r--r--drivers/gpio/gpio-stmpe.c23
-rw-r--r--drivers/gpio/gpio-sx150x.c251
-rw-r--r--drivers/gpio/gpio-tc3589x.c15
-rw-r--r--drivers/gpio/gpio-tz1090-pdc.c2
-rw-r--r--drivers/gpio/gpio-tz1090.c2
-rw-r--r--drivers/gpio/gpio-vf610.c1
-rw-r--r--drivers/gpio/gpio-vx855.c44
-rw-r--r--drivers/gpio/gpio-xgene-sb.c160
-rw-r--r--drivers/gpio/gpio-xilinx.c284
-rw-r--r--drivers/gpio/gpio-zevio.c12
-rw-r--r--drivers/gpio/gpiolib-of.c27
-rw-r--r--drivers/gpio/gpiolib-sysfs.c95
-rw-r--r--drivers/gpio/gpiolib.c66
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/Makefile5
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c39
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c263
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c500
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c135
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c176
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c111
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c44
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c321
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c450
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h95
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c50
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c155
-rw-r--r--drivers/gpu/drm/amd/include/cik_structs.h293
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h49
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c5
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c21
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig11
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Makefile7
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c406
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c579
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h213
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c667
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h398
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c319
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c856
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c14
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c5
-rw-r--r--drivers/gpu/drm/bridge/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.c (renamed from drivers/gpu/drm/imx/imx-hdmi.c)717
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.h (renamed from drivers/gpu/drm/imx/imx-hdmi.h)4
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c310
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c9
-rw-r--r--drivers/gpu/drm/drm_atomic.c751
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c321
-rw-r--r--drivers/gpu/drm/drm_bridge.c91
-rw-r--r--drivers/gpu/drm/drm_cache.c13
-rw-r--r--drivers/gpu/drm/drm_crtc.c651
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h6
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c31
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c40
-rw-r--r--drivers/gpu/drm/drm_fops.c89
-rw-r--r--drivers/gpu/drm/drm_info.c24
-rw-r--r--drivers/gpu/drm/drm_internal.h1
-rw-r--r--drivers/gpu/drm/drm_ioctl.c13
-rw-r--r--drivers/gpu/drm/drm_irq.c22
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_modes.c183
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c42
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c97
-rw-r--r--drivers/gpu/drm/drm_sysfs.c132
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/exynos/Kconfig25
-rw-r--r--drivers/gpu/drm/exynos/Makefile4
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c990
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c67
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c247
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c192
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c152
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c132
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c174
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c22
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c3
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c52
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c131
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c442
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c49
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c47
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h443
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c696
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c137
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c114
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c145
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c170
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c93
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c181
-rw-r--r--drivers/gpu/drm/i915/i915_params.c14
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h410
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c133
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h69
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c237
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c246
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c112
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c45
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h25
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c19
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c130
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2173
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c276
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c19
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h204
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c835
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h75
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c437
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h78
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c322
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c23
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c701
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c15
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c2
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c51
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c385
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h43
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c19
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c46
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c18
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1097
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c308
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c25
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c25
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c25
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c289
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h37
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c73
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c34
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c394
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1148
-rw-r--r--drivers/gpu/drm/imx/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/Makefile2
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c258
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c87
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c8
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c28
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c78
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c12
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile9
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h248
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h420
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h41
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h11
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c208
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h85
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h292
-rw-r--r--drivers/gpu/drm/msm/edp/edp_aux.c268
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c120
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c161
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c1373
-rw-r--r--drivers/gpu/drm/msm/edp/edp_phy.c106
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c145
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h9
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h106
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h55
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c65
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c119
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c34
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c154
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c104
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h245
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c230
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c127
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c56
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c216
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h28
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c108
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h24
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c50
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h14
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c13
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h5
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild66
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile400
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c323
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c176
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/acpi.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c475
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c357
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c324
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h252
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h202
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h270
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h91
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h184
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h54
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engine.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h24
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h71
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ioctl.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/namedb.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/option.h20
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h62
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ramht.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/falcon.h83
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h126
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h86
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h63
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/perfmon.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h51
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/xtensa.h38
l---------drivers/gpu/drm/nouveau/core/include/nvif/class.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/event.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/unpack.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/M0205.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/M0209.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h28
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h53
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h166
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h159
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fuse.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h136
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ibus.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h52
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltc.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mxm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h83
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h64
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h135
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/volt.h61
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c149
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c220
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/priv.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h20
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pll.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fuse/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c272
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c201
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h159
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c483
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h19
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Kbuild (renamed from drivers/gpu/drm/nouveau/dispnv04/Makefile)0
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c20
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h (renamed from drivers/gpu/drm/nouveau/nvif/class.h)23
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h (renamed from drivers/gpu/drm/nouveau/nvif/client.h)6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h61
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h (renamed from drivers/gpu/drm/nouveau/nvif/driver.h)0
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/event.h (renamed from drivers/gpu/drm/nouveau/nvif/event.h)0
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h (renamed from drivers/gpu/drm/nouveau/nvif/ioctl.h)0
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/list.h (renamed from drivers/gpu/drm/nouveau/nvif/list.h)0
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/notify.h (renamed from drivers/gpu/drm/nouveau/nvif/notify.h)0
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h (renamed from drivers/gpu/drm/nouveau/nvif/object.h)2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h (renamed from drivers/gpu/drm/nouveau/core/os.h)0
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/unpack.h (renamed from drivers/gpu/drm/nouveau/nvif/unpack.h)0
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h55
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/debug.h (renamed from drivers/gpu/drm/nouveau/core/include/core/debug.h)6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h101
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h56
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/event.h (renamed from drivers/gpu/drm/nouveau/core/include/core/event.h)27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/handle.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/notify.h (renamed from drivers/gpu/drm/nouveau/core/include/core/notify.h)5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h (renamed from drivers/gpu/drm/nouveau/core/include/core/object.h)109
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/option.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/parent.h58
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/printk.h (renamed from drivers/gpu/drm/nouveau/core/include/core/printk.h)13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h (renamed from drivers/gpu/drm/nouveau/core/include/core/subdev.h)55
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/device.h (renamed from drivers/gpu/drm/nouveau/core/include/engine/device.h)3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h81
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h126
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h50
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h)12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h)4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h)10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h)10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h)16
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h)9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/fan.h)4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h)10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h)10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h)4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h)8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h)6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h)18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h)10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h)12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h)8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h)15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h23
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h)5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h50
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h161
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h154
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h135
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h104
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h61
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/vga.h)0
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c126
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c102
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c66
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c165
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.h62
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c8
l---------drivers/gpu/drm/nouveau/nvif/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/Kbuild17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c (renamed from drivers/gpu/drm/nouveau/core/core/client.c)93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engctx.c (renamed from drivers/gpu/drm/nouveau/core/core/engctx.c)130
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c (renamed from drivers/gpu/drm/nouveau/core/core/engine.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c (renamed from drivers/gpu/drm/nouveau/core/core/enum.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/event.c (renamed from drivers/gpu/drm/nouveau/core/core/event.c)3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c316
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/handle.c (renamed from drivers/gpu/drm/nouveau/core/core/handle.c)97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c (renamed from drivers/gpu/drm/nouveau/core/core/ioctl.c)170
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c (renamed from drivers/gpu/drm/nouveau/core/core/mm.c)71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/namedb.c (renamed from drivers/gpu/drm/nouveau/core/core/namedb.c)108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/notify.c (renamed from drivers/gpu/drm/nouveau/core/core/notify.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c (renamed from drivers/gpu/drm/nouveau/core/core/object.c)106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/option.c (renamed from drivers/gpu/drm/nouveau/core/core/option.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/parent.c (renamed from drivers/gpu/drm/nouveau/core/core/parent.c)56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/printk.c (renamed from drivers/gpu/drm/nouveau/core/core/printk.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c (renamed from drivers/gpu/drm/nouveau/core/core/ramht.c)46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c (renamed from drivers/gpu/drm/nouveau/core/core/subdev.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c (renamed from drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c)63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc (renamed from drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc)58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c166
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c173
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c (renamed from drivers/gpu/drm/nouveau/core/engine/copy/nva3.c)112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c (renamed from drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c)136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/acpi.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/base.c)287
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/ctrl.c)66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/gm100.c)97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/nv04.c)32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/nv10.c)90
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/nv20.c)52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/nv30.c)62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c (renamed from drivers/gpu/drm/nouveau/core/engine/device/nv40.c)208
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c478
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/base.c)84
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/conn.c)49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h (renamed from drivers/gpu/drm/nouveau/core/engine/disp/conn.h)28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/dport.c)30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h (renamed from drivers/gpu/drm/nouveau/core/engine/disp/dport.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nv84.c)72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nv94.c)69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c)289
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nve0.c)96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c)76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/gm107.c)64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/gm204.c)65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nva0.c)56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nva3.c)54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c)12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/hdminve0.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nv04.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/nv50.c)260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h226
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/outp.c)53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h (renamed from drivers/gpu/drm/nouveau/core/engine/disp/outp.h)31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c)53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h (renamed from drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h)21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c)42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c)74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c)63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c)31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c (renamed from drivers/gpu/drm/nouveau/core/engine/disp/vga.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c (renamed from drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c)45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c)57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c (renamed from drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c)57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c (renamed from drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c)44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c)32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c (renamed from drivers/gpu/drm/nouveau/core/engine/falcon.c)51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/base.c)115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c)328
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c)506
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c)537
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c)17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c)17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c)170
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h)39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c)77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c)87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c)101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c)223
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c)330
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c)73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c)175
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c)101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c)186
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c)177
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c)244
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c)185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c)55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c)177
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c)49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c)138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c)69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctx.h)38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c)324
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/com.fuc (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc)2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc)2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc)2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5 (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c)633
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h250
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c)101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c)115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c)107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c)131
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c)129
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nve4.c)193
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c)171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c)97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv108.c)163
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/gm107.c)207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv04.c)742
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv10.c)432
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv20.c)191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv25.c)114
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c)80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv30.c)130
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv34.c)118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv35.c)116
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv40.c)249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv40.h)16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nv50.c)326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h (renamed from drivers/gpu/drm/nouveau/core/engine/graph/regs.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c (renamed from drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c)62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c (renamed from drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c)111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h (renamed from drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c (renamed from drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c (renamed from drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c)78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c)91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c (renamed from drivers/gpu/drm/nouveau/core/engine/vp/nv98.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c (renamed from drivers/gpu/drm/nouveau/core/engine/vp/nve0.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c (renamed from drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c (renamed from drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c)85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c (renamed from drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c (renamed from drivers/gpu/drm/nouveau/core/engine/perfmon/base.c)205
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c (renamed from drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c)31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c)108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c (renamed from drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c)69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c (renamed from drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c)85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h90
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s (renamed from drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc)104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h (renamed from drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h)54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c (renamed from drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c)113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c (renamed from drivers/gpu/drm/nouveau/core/engine/software/nvc0.c)92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c (renamed from drivers/gpu/drm/nouveau/core/engine/software/nv04.c)97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c (renamed from drivers/gpu/drm/nouveau/core/engine/software/nv10.c)86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c (renamed from drivers/gpu/drm/nouveau/core/engine/software/nv50.c)153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c (renamed from drivers/gpu/drm/nouveau/core/engine/vp/nv84.c)63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c (renamed from drivers/gpu/drm/nouveau/core/engine/xtensa.c)70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c219
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c)26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c)136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/M0205.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/M0209.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c)12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/base.c)91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/bit.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/boost.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/conn.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c)21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/disp.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/dp.c)22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/fan.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/image.c)5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/init.c)194
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/npde.c)5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c)5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/perf.c)18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/pll.c)19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/priv.h)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c)40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c)7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/therm.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/timing.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/volt.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c)17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c)26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h (renamed from drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h)24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c)21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/base.c)204
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c)132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c)142
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c)114
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c)153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c)90
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c)54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c)87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c)88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h (renamed from drivers/gpu/drm/nouveau/core/subdev/clock/seq.h)3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/base.c)43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c)22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c)28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c)51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c)22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c)44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c)28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c)64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c)16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c)16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c)39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/base.c)58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c)21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c)19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c)26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c)19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c)17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c)19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c)19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c)98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h)40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c)178
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c)241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c)33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c)202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c)46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv10.c)24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv1a.c)25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv20.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c)41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c)21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c)21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c)21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c)111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h)3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/fb/regsnv04.h)5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c)3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c)3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fuse/base.c)25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fuse/gf100.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fuse/gm107.c)24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/fuse/g80.c)41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/gpio/base.c)118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c (renamed from drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c (renamed from drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c)33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c (renamed from drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c (renamed from drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c)70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/base.c)300
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c)89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c)72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c)17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c)58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h)32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c)19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c)5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h (renamed from drivers/gpu/drm/nouveau/core/subdev/i2c/port.h)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c)47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c)53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c)25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/instmem/base.c)75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c)97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c)51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c)66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltc/base.c)30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c)40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c)7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c)19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h (renamed from drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h)46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/base.c)72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c)39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c)25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c)11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c)25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c480
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c)95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c)70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c (renamed from drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c)74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c (renamed from drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c)80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c)97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mxm/base.c)49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c)20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c)40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c268
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/arith.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/arith.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3 (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5 (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3 (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/i2c_.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/idle.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/perf.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c)62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c229
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c (renamed from drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c200
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/base.c)199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/fan.c)89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c)15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c)40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c)52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c)144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c)66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/gm107.c)40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c)51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/ic.c)22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c)53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c)49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c (renamed from drivers/gpu/drm/nouveau/core/subdev/therm/temp.c)139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/timer/base.c)23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c)9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c)48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h (renamed from drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h (renamed from drivers/gpu/drm/nouveau/core/subdev/timer/priv.h)2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/volt/base.c)65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c (renamed from drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c)36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c (renamed from drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c)16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c)21
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c10
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c33
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c63
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c22
-rw-r--r--drivers/gpu/drm/radeon/Makefile8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c29
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c57
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c77
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h167
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c40
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c264
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c218
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c76
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c478
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h15
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c19
-rw-r--r--drivers/gpu/drm/radeon/ni.c28
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c7
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/nid.h24
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r600.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c399
-rw-r--r--drivers/gpu/drm/radeon/radeon.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h33
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c766
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h84
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c285
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c168
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c12
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c21
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/si.c15
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c114
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/sid.h28
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c95
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c34
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c21
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c18
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c1
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig12
-rw-r--r--drivers/gpu/drm/rockchip/Makefile2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c341
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig4
-rw-r--r--drivers/gpu/drm/sti/Kconfig3
-rw-r--r--drivers/gpu/drm/sti/Makefile4
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c182
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.h34
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c6
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c560
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c11
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c11
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c190
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c118
-rw-r--r--drivers/gpu/drm/tegra/dc.c954
-rw-r--r--drivers/gpu/drm/tegra/drm.c140
-rw-r--r--drivers/gpu/drm/tegra/drm.h91
-rw-r--r--drivers/gpu/drm/tegra/dsi.c578
-rw-r--r--drivers/gpu/drm/tegra/fb.c25
-rw-r--r--drivers/gpu/drm/tegra/gem.c39
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c327
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.c25
-rw-r--r--drivers/gpu/drm/tegra/output.c168
-rw-r--r--drivers/gpu/drm/tegra/rgb.c218
-rw-r--r--drivers/gpu/drm/tegra/sor.c759
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c22
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c9
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/host1x/bus.c201
-rw-r--r--drivers/gpu/host1x/bus.h4
-rw-r--r--drivers/gpu/host1x/dev.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c30
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c121
-rw-r--r--drivers/hid/Kconfig12
-rw-r--r--drivers/hid/Makefile50
-rw-r--r--drivers/hid/hid-betopff.c160
-rw-r--r--drivers/hid/hid-core.c32
-rw-r--r--drivers/hid/hid-hyperv.c2
-rw-r--r--drivers/hid/hid-ids.h10
-rw-r--r--drivers/hid/hid-input.c26
-rw-r--r--drivers/hid/hid-lenovo.c79
-rw-r--r--drivers/hid/hid-lg4ff.c11
-rw-r--r--drivers/hid/hid-logitech-hidpp.c81
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-rmi.c118
-rw-r--r--drivers/hid/usbhid/Makefile12
-rw-r--r--drivers/hid/usbhid/hid-pidff.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c9
-rw-r--r--drivers/hid/wacom_wac.c239
-rw-r--r--drivers/hid/wacom_wac.h17
-rw-r--r--drivers/hsi/clients/nokia-modem.c1
-rw-r--r--drivers/hv/channel.c54
-rw-r--r--drivers/hv/channel_mgmt.c54
-rw-r--r--drivers/hv/connection.c6
-rw-r--r--drivers/hv/hv.c78
-rw-r--r--drivers/hv/hv_balloon.c88
-rw-r--r--drivers/hv/hv_fcopy.c27
-rw-r--r--drivers/hv/hyperv_vmbus.h21
-rw-r--r--drivers/hv/vmbus_drv.c41
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abx500.c6
-rw-r--r--drivers/hwmon/ad7314.c5
-rw-r--r--drivers/hwmon/adc128d818.c3
-rw-r--r--drivers/hwmon/ads7828.c102
-rw-r--r--drivers/hwmon/i5500_temp.c149
-rw-r--r--drivers/hwmon/ina2xx.c334
-rw-r--r--drivers/hwmon/jc42.c15
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/tmp102.c15
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c23
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c12
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/idle/intel_idle.c1
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/iio/accel/Kconfig31
-rw-r--r--drivers/iio/accel/Makefile6
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c8
-rw-r--r--drivers/iio/accel/kxcjk-1013.c52
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/accel/mma9551.c637
-rw-r--r--drivers/iio/accel/mma9551_core.c798
-rw-r--r--drivers/iio/accel/mma9551_core.h81
-rw-r--r--drivers/iio/accel/mma9553.c1334
-rw-r--r--drivers/iio/accel/ssp_accel_sensor.c169
-rw-r--r--drivers/iio/adc/Kconfig25
-rw-r--r--drivers/iio/adc/Makefile2
-rw-r--r--drivers/iio/adc/ad799x.c15
-rw-r--r--drivers/iio/adc/cc10001_adc.c423
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c1016
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c16
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/common/Kconfig1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c75
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h5
-rw-r--r--drivers/iio/common/ssp_sensors/Kconfig26
-rw-r--r--drivers/iio/common/ssp_sensors/Makefile8
-rw-r--r--drivers/iio/common/ssp_sensors/ssp.h257
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c712
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c107
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio_sensor.h71
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c608
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c2
-rw-r--r--drivers/iio/frequency/ad9523.c2
-rw-r--r--drivers/iio/frequency/adf4350.c7
-rw-r--r--drivers/iio/gyro/Makefile2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c8
-rw-r--r--drivers/iio/gyro/ssp_gyro_sensor.c168
-rw-r--r--drivers/iio/iio_core.h9
-rw-r--r--drivers/iio/imu/Kconfig11
-rw-r--r--drivers/iio/imu/Makefile2
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c124
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c39
-rw-r--r--drivers/iio/imu/kmx61.c1595
-rw-r--r--drivers/iio/industrialio-buffer.c410
-rw-r--r--drivers/iio/industrialio-core.c57
-rw-r--r--drivers/iio/industrialio-event.c15
-rw-r--r--drivers/iio/industrialio-triggered-buffer.c13
-rw-r--r--drivers/iio/inkern.c33
-rw-r--r--drivers/iio/kfifo_buf.c87
-rw-r--r--drivers/iio/light/Kconfig24
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm3232.c403
-rw-r--r--drivers/iio/light/hid-sensor-als.c9
-rw-r--r--drivers/iio/light/hid-sensor-prox.c10
-rw-r--r--drivers/iio/light/jsa1212.c471
-rw-r--r--drivers/iio/light/lm3533-als.c2
-rw-r--r--drivers/iio/light/tcs3414.c4
-rw-r--r--drivers/iio/magnetometer/Kconfig15
-rw-r--r--drivers/iio/magnetometer/Makefile1
-rw-r--r--drivers/iio/magnetometer/ak09911.c326
-rw-r--r--drivers/iio/magnetometer/ak8975.c505
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c9
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c9
-rw-r--r--drivers/iio/pressure/bmp280.c150
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c9
-rw-r--r--drivers/iio/proximity/Kconfig17
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/as3935.c18
-rw-r--r--drivers/iio/proximity/sx9500.c752
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c2
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c137
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c118
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c60
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c62
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h126
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h812
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c1
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c57
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c249
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h26
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c90
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c239
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
-rw-r--r--drivers/input/evdev.c71
-rw-r--r--drivers/input/input-mt.c31
-rw-r--r--drivers/input/input.c38
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/atakbd.c2
-rw-r--r--drivers/input/keyboard/atkbd.c4
-rw-r--r--drivers/input/keyboard/cap11xx.c1
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/keyboard/imx_keypad.c3
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c5
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c286
-rw-r--r--drivers/input/misc/Kconfig43
-rw-r--r--drivers/input/misc/Makefile4
-rw-r--r--drivers/input/misc/axp20x-pek.c290
-rw-r--r--drivers/input/misc/drv260x.c1
-rw-r--r--drivers/input/misc/drv2667.c1
-rw-r--r--drivers/input/misc/e3x0-button.c157
-rw-r--r--drivers/input/misc/regulator-haptic.c266
-rw-r--r--drivers/input/misc/tps65218-pwrbutton.c126
-rw-r--r--drivers/input/mouse/Kconfig22
-rw-r--r--drivers/input/mouse/Makefile3
-rw-r--r--drivers/input/mouse/alps.c42
-rw-r--r--drivers/input/mouse/bcm5974.c2
-rw-r--r--drivers/input/mouse/cyapa.c1710
-rw-r--r--drivers/input/mouse/cyapa.h301
-rw-r--r--drivers/input/mouse/cyapa_gen3.c1247
-rw-r--r--drivers/input/mouse/cyapa_gen5.c2777
-rw-r--r--drivers/input/mouse/cypress_ps2.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h6
-rw-r--r--drivers/input/mouse/elan_i2c_core.c21
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c1
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c3
-rw-r--r--drivers/input/mouse/elantech.c18
-rw-r--r--drivers/input/mouse/focaltech.c408
-rw-r--r--drivers/input/mouse/focaltech.h2
-rw-r--r--drivers/input/mouse/psmouse-base.c32
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/synaptics.c445
-rw-r--r--drivers/input/mouse/synaptics.h18
-rw-r--r--drivers/input/serio/Kconfig10
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h47
-rw-r--r--drivers/input/serio/i8042.c14
-rw-r--r--drivers/input/serio/sun4i-ps2.c340
-rw-r--r--drivers/input/tablet/gtco.c20
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c78
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c197
-rw-r--r--drivers/iommu/Kconfig51
-rw-r--r--drivers/iommu/Makefile5
-rw-r--r--drivers/iommu/amd_iommu.c15
-rw-r--r--drivers/iommu/amd_iommu_init.c17
-rw-r--r--drivers/iommu/amd_iommu_proto.h3
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/amd_iommu_v2.c35
-rw-r--r--drivers/iommu/arm-smmu.c935
-rw-r--r--drivers/iommu/fsl_pamu.c216
-rw-r--r--drivers/iommu/fsl_pamu.h15
-rw-r--r--drivers/iommu/fsl_pamu_domain.c173
-rw-r--r--drivers/iommu/intel-iommu.c45
-rw-r--r--drivers/iommu/intel_irq_remapping.c96
-rw-r--r--drivers/iommu/io-pgtable-arm.c986
-rw-r--r--drivers/iommu/io-pgtable.c82
-rw-r--r--drivers/iommu/io-pgtable.h143
-rw-r--r--drivers/iommu/iommu.c7
-rw-r--r--drivers/iommu/iova.c53
-rw-r--r--drivers/iommu/ipmmu-vmsa.c674
-rw-r--r--drivers/iommu/irq_remapping.c74
-rw-r--r--drivers/iommu/irq_remapping.h7
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c3
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c4
-rw-r--r--drivers/irqchip/irq-digicolor.c120
-rw-r--r--drivers/irqchip/irq-gic-common.c18
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c22
-rw-r--r--drivers/irqchip/irq-gic.c9
-rw-r--r--drivers/irqchip/irq-hip04.c11
-rw-r--r--drivers/irqchip/irq-mips-gic.c71
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c18
-rw-r--r--drivers/irqchip/irq-omap-intc.c48
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c50
-rw-r--r--drivers/isdn/hardware/eicon/message.c4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c12
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c6
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c21
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c64
-rw-r--r--drivers/isdn/sc/init.c15
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class-flash.c486
-rw-r--r--drivers/leds/led-class.c9
-rw-r--r--drivers/leds/leds-gpio.c3
-rw-r--r--drivers/leds/leds-mc13783.c4
-rw-r--r--drivers/leds/leds-netxbig.c12
-rw-r--r--drivers/leds/leds.h3
-rw-r--r--drivers/lguest/x86/core.c5
-rw-r--r--drivers/mailbox/Kconfig6
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/mailbox-altera.c388
-rw-r--r--drivers/mailbox/mailbox.c2
-rw-r--r--drivers/mailbox/pcc.c8
-rw-r--r--drivers/mcb/mcb-internal.h1
-rw-r--r--drivers/mcb/mcb-pci.c45
-rw-r--r--drivers/md/Kconfig6
-rw-r--r--drivers/md/bitmap.c28
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-cache-metadata.c104
-rw-r--r--drivers/md/dm-cache-target.c94
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-log-userspace-base.c5
-rw-r--r--drivers/md/dm-mpath.c87
-rw-r--r--drivers/md/dm-raid.c24
-rw-r--r--drivers/md/dm-snap-persistent.c14
-rw-r--r--drivers/md/dm-table.c72
-rw-r--r--drivers/md/dm-target.c15
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/md/dm-thin-metadata.h2
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm.c355
-rw-r--r--drivers/md/dm.h11
-rw-r--r--drivers/md/faulty.c8
-rw-r--r--drivers/md/linear.c67
-rw-r--r--drivers/md/md.c816
-rw-r--r--drivers/md/md.h57
-rw-r--r--drivers/md/multipath.c22
-rw-r--r--drivers/md/raid0.c29
-rw-r--r--drivers/md/raid1.c52
-rw-r--r--drivers/md/raid1.h3
-rw-r--r--drivers/md/raid10.c49
-rw-r--r--drivers/md/raid10.h3
-rw-r--r--drivers/md/raid5.c339
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/media/common/Kconfig4
-rw-r--r--drivers/media/common/Makefile1
-rw-r--r--drivers/media/common/btcx-risc.h6
-rw-r--r--drivers/media/dvb-core/dvb_net.c88
-rw-r--r--drivers/media/dvb-frontends/Kconfig4
-rw-r--r--drivers/media/dvb-frontends/au8522.h5
-rw-r--r--drivers/media/dvb-frontends/dib8000.c3
-rw-r--r--drivers/media/dvb-frontends/hd29l2.c10
-rw-r--r--drivers/media/dvb-frontends/lg2160.c6
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c23
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.h6
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c6
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.h6
-rw-r--r--drivers/media/dvb-frontends/lgdt330x_priv.h6
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c4
-rw-r--r--drivers/media/dvb-frontends/mn88472.h6
-rw-r--r--drivers/media/dvb-frontends/nxt200x.h6
-rw-r--r--drivers/media/dvb-frontends/or51132.c6
-rw-r--r--drivers/media/dvb-frontends/or51132.h6
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c944
-rw-r--r--drivers/media/dvb-frontends/rtl2830.h79
-rw-r--r--drivers/media/dvb-frontends/rtl2830_priv.h24
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c1336
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h99
-rw-r--r--drivers/media/dvb-frontends/rtl2832_priv.h32
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c1189
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.h57
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1409.h5
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c5
-rw-r--r--drivers/media/dvb-frontends/s5h1411.h5
-rw-r--r--drivers/media/dvb-frontends/si2168.c317
-rw-r--r--drivers/media/dvb-frontends/si2168.h6
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h3
-rw-r--r--drivers/media/dvb-frontends/stb0899_algo.c5
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c7
-rw-r--r--drivers/media/dvb-frontends/tc90522.c1
-rw-r--r--drivers/media/i2c/Kconfig9
-rw-r--r--drivers/media/i2c/adv7180.c1010
-rw-r--r--drivers/media/i2c/adv7604.c76
-rw-r--r--drivers/media/i2c/adv7842.c184
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c9
-rw-r--r--drivers/media/i2c/msp3400-driver.c8
-rw-r--r--drivers/media/i2c/mt9m032.c42
-rw-r--r--drivers/media/i2c/mt9p031.c41
-rw-r--r--drivers/media/i2c/mt9t001.c41
-rw-r--r--drivers/media/i2c/mt9v032.c43
-rw-r--r--drivers/media/i2c/s5k4ecgx.c11
-rw-r--r--drivers/media/i2c/s5k5baf.c13
-rw-r--r--drivers/media/i2c/s5k6aa.c46
-rw-r--r--drivers/media/i2c/smiapp-pll.c7
-rw-r--r--drivers/media/i2c/smiapp-pll.h8
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c386
-rw-r--r--drivers/media/i2c/smiapp/smiapp-limits.c6
-rw-r--r--drivers/media/i2c/smiapp/smiapp-limits.h6
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c14
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.h24
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg-defs.h6
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h6
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c6
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.h6
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h7
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c82
-rw-r--r--drivers/media/i2c/ths8200.c10
-rw-r--r--drivers/media/mmc/siano/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/Kconfig4
-rw-r--r--drivers/media/pci/bt8xx/Makefile2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c6
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.c (renamed from drivers/media/common/btcx-risc.c)36
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.h26
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c324
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c44
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-if.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c7
-rw-r--r--drivers/media/pci/bt8xx/bttv.h5
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h20
-rw-r--r--drivers/media/pci/cx23885/Kconfig1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c66
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c387
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx25821/Kconfig3
-rw-r--r--drivers/media/pci/cx25821/Makefile3
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c113
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c112
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c685
-rw-r--r--drivers/media/pci/cx25821/cx25821.h48
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c3
-rw-r--r--drivers/media/pci/cx88/cx88-core.c7
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c4
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c7
-rw-r--r--drivers/media/pci/cx88/cx88-tvaudio.c7
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c22
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c6
-rw-r--r--drivers/media/pci/mantis/mantis_core.c23
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c5
-rw-r--r--drivers/media/pci/smipcie/smipcie.c12
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-eeprom.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-enc.c6
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-jpeg.h4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-tw28.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c48
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h6
-rw-r--r--drivers/media/pci/sta2x11/Kconfig1
-rw-r--r--drivers/media/pci/ttpci/av7110.c5
-rw-r--r--drivers/media/pci/ttpci/budget-core.c89
-rw-r--r--drivers/media/pci/tw68/tw68.h1
-rw-r--r--drivers/media/platform/Kconfig11
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/am437x/Kconfig11
-rw-r--r--drivers/media/platform/am437x/Makefile3
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c2776
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h283
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe_regs.h140
-rw-r--r--drivers/media/platform/coda/coda-bit.c25
-rw-r--r--drivers/media/platform/coda/coda-common.c165
-rw-r--r--drivers/media/platform/coda/coda.h2
-rw-r--r--drivers/media/platform/coda/coda_regs.h4
-rw-r--r--drivers/media/platform/davinci/Kconfig6
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h12
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c6
-rw-r--r--drivers/media/platform/marvell-ccic/Kconfig3
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c1
-rw-r--r--drivers/media/platform/omap3isp/isp.c3
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c17
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c23
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c21
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c6
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c21
-rw-r--r--drivers/media/platform/sh_veu.c35
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c12
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c10
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c98
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c11
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c18
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c162
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c4
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c10
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1.h14
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c81
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c5
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c13
-rw-r--r--drivers/media/radio/radio-aimslab.c4
-rw-r--r--drivers/media/radio/tea575x.c41
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c16
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.h1
-rw-r--r--drivers/media/rc/img-ir/Kconfig15
-rw-r--r--drivers/media/rc/img-ir/Makefile2
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c84
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.h24
-rw-r--r--drivers/media/rc/img-ir/img-ir-jvc.c8
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c24
-rw-r--r--drivers/media/rc/img-ir/img-ir-rc5.c88
-rw-r--r--drivers/media/rc/img-ir/img-ir-rc6.c117
-rw-r--r--drivers/media/rc/img-ir/img-ir-sanyo.c8
-rw-r--r--drivers/media/rc/img-ir/img-ir-sharp.c8
-rw-r--r--drivers/media/rc/img-ir/img-ir-sony.c12
-rw-r--r--drivers/media/rc/lirc_dev.c6
-rw-r--r--drivers/media/rc/rc-main.c14
-rw-r--r--drivers/media/rc/sunxi-cir.c46
-rw-r--r--drivers/media/tuners/mt20xx.c8
-rw-r--r--drivers/media/tuners/mt2131.c5
-rw-r--r--drivers/media/tuners/mt2131.h5
-rw-r--r--drivers/media/tuners/mt2131_priv.h5
-rw-r--r--drivers/media/tuners/mxl5007t.c8
-rw-r--r--drivers/media/tuners/mxl5007t.h9
-rw-r--r--drivers/media/tuners/si2157.c189
-rw-r--r--drivers/media/tuners/si2157_priv.h3
-rw-r--r--drivers/media/tuners/tda18271-fe.c8
-rw-r--r--drivers/media/tuners/tda18271-maps.c8
-rw-r--r--drivers/media/tuners/tda18271-priv.h8
-rw-r--r--drivers/media/tuners/tda827x.c8
-rw-r--r--drivers/media/tuners/tda8290.c8
-rw-r--r--drivers/media/tuners/tda9887.c8
-rw-r--r--drivers/media/tuners/tuner-simple.c8
-rw-r--r--drivers/media/usb/au0828/Kconfig2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c2
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c122
-rw-r--r--drivers/media/usb/au0828/au0828-video.c976
-rw-r--r--drivers/media/usb/au0828/au0828.h61
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c9
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h10
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c336
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h9
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h6
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c940
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h27
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c5
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c8
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c14
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c9
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c7
-rw-r--r--drivers/media/usb/gspca/Kconfig10
-rw-r--r--drivers/media/usb/gspca/Makefile2
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/ov534.c10
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c4
-rw-r--r--drivers/media/usb/gspca/touptek.c731
-rw-r--r--drivers/media/usb/gspca/vc032x.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-audio.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-audio.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.h9
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ctrl.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ctrl.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c12
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debug.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debugifc.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debugifc.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-devattr.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-devattr.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-eeprom.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-eeprom.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c50
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.h13
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ioread.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ioread.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-main.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-util.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c34
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-wm8775.c12
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-wm8775.h10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2.h10
-rw-r--r--drivers/media/usb/pwc/pwc-if.c12
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/siano/Kconfig2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c5
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c1
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c13
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c8
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c8
-rw-r--r--drivers/media/usb/usbvision/usbvision.h8
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c5
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c19
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c1
-rw-r--r--drivers/media/usb/uvc/uvc_video.c6
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h3
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c35
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c8
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c15
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c19
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c9
-rw-r--r--drivers/memory/fsl-corenet-cf.c36
-rw-r--r--drivers/message/Makefile1
-rw-r--r--drivers/mfd/da9052-core.c3
-rw-r--r--drivers/mfd/rtsx_usb.c12
-rw-r--r--drivers/mfd/tps65218.c12
-rw-r--r--drivers/misc/ad525x_dpot-spi.c4
-rw-r--r--drivers/misc/ad525x_dpot.c7
-rw-r--r--drivers/misc/cxl/Makefile5
-rw-r--r--drivers/misc/cxl/context.c82
-rw-r--r--drivers/misc/cxl/cxl.h22
-rw-r--r--drivers/misc/cxl/fault.c11
-rw-r--r--drivers/misc/cxl/file.c21
-rw-r--r--drivers/misc/cxl/irq.c7
-rw-r--r--drivers/misc/cxl/main.c2
-rw-r--r--drivers/misc/cxl/native.c39
-rw-r--r--drivers/misc/cxl/pci.c123
-rw-r--r--drivers/misc/cxl/sysfs.c236
-rw-r--r--drivers/misc/cxl/trace.c13
-rw-r--r--drivers/misc/cxl/trace.h459
-rw-r--r--drivers/misc/enclosure.c108
-rw-r--r--drivers/misc/genwqe/card_base.h1
-rw-r--r--drivers/misc/genwqe/card_sysfs.c1
-rw-r--r--drivers/misc/ioc4.c31
-rw-r--r--drivers/misc/mei/amthif.c14
-rw-r--r--drivers/misc/mei/bus.c69
-rw-r--r--drivers/misc/mei/client.c156
-rw-r--r--drivers/misc/mei/client.h17
-rw-r--r--drivers/misc/mei/debugfs.c32
-rw-r--r--drivers/misc/mei/hbm.c34
-rw-r--r--drivers/misc/mei/hw-me.c15
-rw-r--r--drivers/misc/mei/main.c22
-rw-r--r--drivers/misc/mei/mei_dev.h8
-rw-r--r--drivers/misc/mei/nfc.c2
-rw-r--r--drivers/misc/mei/wd.c1
-rw-r--r--drivers/misc/ti-st/st_core.c24
-rw-r--r--drivers/misc/ti-st/st_kim.c144
-rw-r--r--drivers/misc/ti-st/st_ll.c17
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c13
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c16
-rw-r--r--drivers/mmc/card/block.c6
-rw-r--r--drivers/mmc/card/mmc_test.c18
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/bus.c4
-rw-r--r--drivers/mmc/core/core.c152
-rw-r--r--drivers/mmc/core/core.h7
-rw-r--r--drivers/mmc/core/host.c65
-rw-r--r--drivers/mmc/core/mmc.c101
-rw-r--r--drivers/mmc/core/mmc_ops.c30
-rw-r--r--drivers/mmc/core/pwrseq.c112
-rw-r--r--drivers/mmc/core/pwrseq.h43
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c101
-rw-r--r--drivers/mmc/core/pwrseq_simple.c145
-rw-r--r--drivers/mmc/core/sd.c27
-rw-r--r--drivers/mmc/core/sdio.c14
-rw-r--r--drivers/mmc/core/sdio_bus.c11
-rw-r--r--drivers/mmc/core/slot-gpio.c182
-rw-r--r--drivers/mmc/core/slot-gpio.h13
-rw-r--r--drivers/mmc/host/Kconfig15
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c113
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.h56
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c124
-rw-r--r--drivers/mmc/host/dw_mmc.h9
-rw-r--r--drivers/mmc/host/mmci.c7
-rw-r--r--drivers/mmc/host/moxart-mmc.c24
-rw-r--r--drivers/mmc/host/mvsdio.c10
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c150
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c542
-rw-r--r--drivers/mmc/host/sdhci-acpi.c15
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c4
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c17
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c20
-rw-r--r--drivers/mmc/host/sdhci-pci.c32
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c128
-rw-r--r--drivers/mmc/host/sdhci-s3c.c12
-rw-r--r--drivers/mmc/host/sdhci-sirf.c74
-rw-r--r--drivers/mmc/host/sdhci-st.c4
-rw-r--r--drivers/mmc/host/sdhci-tegra.c30
-rw-r--r--drivers/mmc/host/sdhci.c205
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c237
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c117
-rw-r--r--drivers/mmc/host/sunxi-mmc.c29
-rw-r--r--drivers/mmc/host/tmio_mmc.c15
-rw-r--r--drivers/mmc/host/tmio_mmc.h43
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c38
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c61
-rw-r--r--drivers/mmc/host/toshsd.c17
-rw-r--r--drivers/mmc/host/vub300.c4
-rw-r--r--drivers/mtd/mtdchar.c72
-rw-r--r--drivers/mtd/mtdconcat.c10
-rw-r--r--drivers/mtd/mtdcore.c81
-rw-r--r--drivers/mtd/mtdpart.c1
-rw-r--r--drivers/mtd/ubi/block.c247
-rw-r--r--drivers/mtd/ubi/build.c6
-rw-r--r--drivers/mtd/ubi/cdev.c19
-rw-r--r--drivers/mtd/ubi/eba.c56
-rw-r--r--drivers/mtd/ubi/fastmap.c13
-rw-r--r--drivers/mtd/ubi/io.c3
-rw-r--r--drivers/mtd/ubi/kapi.c110
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/ubi.h19
-rw-r--r--drivers/mtd/ubi/vtbl.c7
-rw-r--r--drivers/mtd/ubi/wl.c10
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/bonding/bond_3ad.c55
-rw-r--r--drivers/net/bonding/bond_main.c121
-rw-r--r--drivers/net/bonding/bond_options.c6
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c5
-rw-r--r--drivers/net/can/c_can/c_can_platform.c29
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/dev.c13
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/janz-ican3.c7
-rw-r--r--drivers/net/can/m_can/m_can.c6
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/rcar_can.c1
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/can/spi/mcp251x.c1
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/Kconfig22
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c766
-rw-r--r--drivers/net/can/usb/peak_usb/Makefile2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h222
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c83
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h26
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c1095
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h13
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c88
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h4
-rw-r--r--drivers/net/dsa/mv88e6131.c3
-rw-r--r--drivers/net/dsa/mv88e6352.c13
-rw-r--r--drivers/net/dsa/mv88e6xxx.c9
-rw-r--r--drivers/net/ethernet/3com/typhoon.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c8
-rw-r--r--drivers/net/ethernet/amd/Kconfig6
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c4
-rw-r--r--drivers/net/ethernet/amd/atarilance.c8
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c32
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c70
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c82
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c203
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c29
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h31
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c94
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c111
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h3
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c24
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c9
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c52
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c10
-rw-r--r--drivers/net/ethernet/cadence/macb.c84
-rw-r--r--drivers/net/ethernet/cadence/macb.h631
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/mc5.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c317
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h169
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c1917
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1003
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c270
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1543
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h367
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h3392
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h124
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h101
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c45
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig3
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h16
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c56
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c21
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c181
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c40
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dnet.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h203
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c231
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h218
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h240
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c993
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c155
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c95
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c130
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig9
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c971
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_mdio.c186
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig22
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c44
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h152
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c136
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c149
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c113
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h108
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c44
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c112
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c157
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c267
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c24
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c120
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c90
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h36
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c503
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h10
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c59
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c492
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c3
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c50
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c32
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c22
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c249
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h34
-rw-r--r--drivers/net/ethernet/rocker/rocker.c177
-rw-r--r--drivers/net/ethernet/rocker/rocker.h21
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c68
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c8
-rw-r--r--drivers/net/ethernet/smsc/Kconfig10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c437
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h1
-rw-r--r--drivers/net/ethernet/sun/niu.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c91
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c4
-rw-r--r--drivers/net/ethernet/ti/Kconfig25
-rw-r--r--drivers/net/ethernet/ti/Makefile11
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c55
-rw-r--r--drivers/net/ethernet/ti/cpsw.c171
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c36
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/cpts.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c152
-rw-r--r--drivers/net/ethernet/ti/netcp.h229
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2149
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c2159
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c131
-rw-r--r--drivers/net/ethernet/ti/netcp_xgbepcsr.c501
-rw-r--r--drivers/net/ethernet/ti/tlan.c14
-rw-r--r--drivers/net/ethernet/tile/tilegx.c5
-rw-r--r--drivers/net/ethernet/tile/tilepro.c5
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/fddi/skfp/smt.c12
-rw-r--r--drivers/net/hyperv/netvsc.c22
-rw-r--r--drivers/net/hyperv/rndis_filter.c24
-rw-r--r--drivers/net/ieee802154/at86rf230.c82
-rw-r--r--drivers/net/ieee802154/cc2520.c37
-rw-r--r--drivers/net/ieee802154/mrf24j40.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c8
-rw-r--r--drivers/net/irda/ali-ircc.c11
-rw-r--r--drivers/net/irda/ali-ircc.h5
-rw-r--r--drivers/net/irda/au1k_ir.c3
-rw-r--r--drivers/net/irda/irda-usb.c10
-rw-r--r--drivers/net/irda/irda-usb.h5
-rw-r--r--drivers/net/irda/kingsun-sir.c3
-rw-r--r--drivers/net/irda/ks959-sir.c3
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/mcs7780.h1
-rw-r--r--drivers/net/irda/nsc-ircc.c7
-rw-r--r--drivers/net/irda/nsc-ircc.h5
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/stir4200.c16
-rw-r--r--drivers/net/irda/via-ircc.h4
-rw-r--r--drivers/net/irda/vlsi_ir.c46
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/macvtap.c22
-rw-r--r--drivers/net/mii.c12
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c981
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c22
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/team/team.c28
-rw-r--r--drivers/net/tun.c62
-rw-r--r--drivers/net/usb/hso.c106
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/r8152.c259
-rw-r--r--drivers/net/usb/sr9700.c36
-rw-r--r--drivers/net/usb/sr9700.h66
-rw-r--r--drivers/net/usb/usbnet.c17
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/virtio_net.c30
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c54
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c29
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h6
-rw-r--r--drivers/net/vxlan.c450
-rw-r--r--drivers/net/wan/Kconfig6
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c322
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c122
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c243
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c402
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c99
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c58
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h136
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c666
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c170
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c244
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h58
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h68
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h1064
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2696
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h1444
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2238
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h449
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c80
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c315
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar956x_initvals.h1046
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h15
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c263
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c90
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h129
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_wow.h128
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c228
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c83
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c12
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c24
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c73
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c83
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h9
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig9
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c179
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c164
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c109
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c205
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c65
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c277
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c151
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h158
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h183
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform_msm.c257
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c239
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h70
-rw-r--r--drivers/net/wireless/atmel.c12
-rw-r--r--drivers/net/wireless/b43/Kconfig9
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h3
-rw-r--r--drivers/net/wireless/b43/main.c71
-rw-r--r--drivers/net/wireless/b43/phy_ac.c92
-rw-r--r--drivers/net/wireless/b43/phy_ac.h38
-rw-r--r--drivers/net/wireless/b43/phy_common.c9
-rw-r--r--drivers/net/wireless/b43/phy_common.h2
-rw-r--r--drivers/net/wireless/b43legacy/radio.c19
-rw-r--r--drivers/net/wireless/b43legacy/radio.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c90
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bus.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c227
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/common.c34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/common.h (renamed from drivers/net/wireless/ath/wil6210/wil_platform_msm.h)24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/commonring.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.c42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.h34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h55
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c178
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c32
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h12
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h4
-rw-r--r--drivers/net/wireless/cw1200/fwio.c40
-rw-r--r--drivers/net/wireless/cw1200/main.c6
-rw-r--r--drivers/net/wireless/cw1200/pm.c5
-rw-r--r--drivers/net/wireless/cw1200/queue.c4
-rw-r--r--drivers/net/wireless/cw1200/scan.c8
-rw-r--r--drivers/net/wireless/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c31
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c88
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scd.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h50
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h35
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c51
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c247
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h277
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h39
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h301
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c117
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c342
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h88
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c83
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c551
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c140
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c44
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tdls.c63
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c81
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h18
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c78
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c100
-rw-r--r--drivers/net/wireless/libertas/cfg.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c33
-rw-r--r--drivers/net/wireless/mwifiex/11h.c198
-rw-r--r--drivers/net/wireless/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/mwifiex/11n.h14
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c15
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c16
-rw-r--r--drivers/net/wireless/mwifiex/Makefile2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c951
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c22
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c46
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c281
-rw-r--r--drivers/net/wireless/mwifiex/decl.h55
-rw-r--r--drivers/net/wireless/mwifiex/ethtool.c16
-rw-r--r--drivers/net/wireless/mwifiex/fw.h61
-rw-r--r--drivers/net/wireless/mwifiex/ie.c89
-rw-r--r--drivers/net/wireless/mwifiex/init.c35
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h11
-rw-r--r--drivers/net/wireless/mwifiex/main.c147
-rw-r--r--drivers/net/wireless/mwifiex/main.h84
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c7
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h3
-rw-r--r--drivers/net/wireless/mwifiex/scan.c16
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c111
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h49
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c24
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c7
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c18
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c38
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c9
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c28
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c35
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c2
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c70
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c50
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c28
-rw-r--r--drivers/net/wireless/mwifiex/usb.c27
-rw-r--r--drivers/net/wireless/mwifiex/usb.h11
-rw-r--r--drivers/net/wireless/mwifiex/util.c222
-rw-r--r--drivers/net/wireless/mwifiex/util.h20
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/mwl8k.c12
-rw-r--r--drivers/net/wireless/orinoco/Kconfig3
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c4
-rw-r--r--drivers/net/wireless/p54/eeprom.c6
-rw-r--r--drivers/net/wireless/p54/fwio.c9
-rw-r--r--drivers/net/wireless/p54/main.c10
-rw-r--r--drivers/net/wireless/p54/p54pci.c7
-rw-r--r--drivers/net/wireless/p54/txrx.c12
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c18
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c18
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c8
-rw-r--r--drivers/net/wireless/rtlwifi/base.c156
-rw-r--r--drivers/net/wireless/rtlwifi/base.h4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c72
-rw-r--r--drivers/net/wireless/rtlwifi/core.h42
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c63
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c36
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c45
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c165
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.h38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c27
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/dm.c55
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/dm.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/fw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/hw.c166
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/reg.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/trx.c200
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/trx.h12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.h28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.c42
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.h38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.c55
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.h33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/def.h54
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/dm.c58
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/dm.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/sw.c74
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/trx.c232
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h99
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c5
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c4
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c88
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h46
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c93
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.h27
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h23
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c43
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c21
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h14
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c37
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h8
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h7
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c9
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h48
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c405
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h12
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h7
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c11
-rw-r--r--drivers/net/xen-netback/netback.c216
-rw-r--r--drivers/net/xen-netfront.c329
-rw-r--r--drivers/nfc/microread/microread.c3
-rw-r--r--drivers/nfc/pn544/i2c.c133
-rw-r--r--drivers/nfc/pn544/pn544.c3
-rw-r--r--drivers/nfc/st21nfca/Makefile2
-rw-r--r--drivers/nfc/st21nfca/i2c.c23
-rw-r--r--drivers/nfc/st21nfca/st21nfca.c186
-rw-r--r--drivers/nfc/st21nfca/st21nfca.h21
-rw-r--r--drivers/nfc/st21nfca/st21nfca_se.c411
-rw-r--r--drivers/nfc/st21nfca/st21nfca_se.h63
-rw-r--r--drivers/nfc/st21nfcb/Makefile2
-rw-r--r--drivers/nfc/st21nfcb/i2c.c19
-rw-r--r--drivers/nfc/st21nfcb/ndlc.c3
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb.c11
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb.h2
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb_se.c707
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb_se.h61
-rw-r--r--drivers/of/Kconfig1
-rw-r--r--drivers/of/base.c1
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/of_pci.c4
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/of/platform.c11
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi149
-rw-r--r--drivers/of/unittest.c624
-rw-r--r--drivers/parisc/lba_pci.c5
-rw-r--r--drivers/parport/parport_atari.c4
-rw-r--r--drivers/pci/access.c87
-rw-r--r--drivers/pci/bus.c61
-rw-r--r--drivers/pci/host-bridge.c8
-rw-r--r--drivers/pci/host/Kconfig4
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-host-generic.c55
-rw-r--r--drivers/pci/host/pci-keystone.c4
-rw-r--r--drivers/pci/host/pci-layerscape.c1
-rw-r--r--drivers/pci/host/pci-mvebu.c15
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c51
-rw-r--r--drivers/pci/host/pci-tegra.c68
-rw-r--r--drivers/pci/host/pci-versatile.c237
-rw-r--r--drivers/pci/host/pci-xgene.c156
-rw-r--r--drivers/pci/host/pcie-designware.c6
-rw-r--r--drivers/pci/host/pcie-rcar.c7
-rw-r--r--drivers/pci/host/pcie-xilinx.c96
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c3
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c13
-rw-r--r--drivers/pci/msi.c5
-rw-r--r--drivers/pci/pci-acpi.c17
-rw-r--r--drivers/pci/pci-driver.c13
-rw-r--r--drivers/pci/pci.c117
-rw-r--r--drivers/pci/pci.h7
-rw-r--r--drivers/pci/pcie/aspm.c12
-rw-r--r--drivers/pci/probe.c10
-rw-r--r--drivers/pci/quirks.c118
-rw-r--r--drivers/pci/rom.c7
-rw-r--r--drivers/pci/setup-bus.c56
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/cistpl.c31
-rw-r--r--drivers/pcmcia/cs_internal.h6
-rw-r--r--drivers/pcmcia/ds.c3
-rw-r--r--drivers/pcmcia/rsrc_mgr.c5
-rw-r--r--drivers/pcmcia/rsrc_pci.c173
-rw-r--r--drivers/phy/Kconfig14
-rw-r--r--drivers/phy/Makefile4
-rw-r--r--drivers/phy/phy-armada375-usb2.c4
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c89
-rw-r--r--drivers/phy/phy-miphy28lp.c64
-rw-r--r--drivers/phy/phy-miphy365x.c29
-rw-r--r--drivers/phy/phy-omap-control.c7
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h159
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c201
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.h177
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c257
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.h235
-rw-r--r--drivers/phy/phy-qcom-ufs.c745
-rw-r--r--drivers/phy/phy-rockchip-usb.c158
-rw-r--r--drivers/phy/phy-stih407-usb.c25
-rw-r--r--drivers/phy/phy-sun4i-usb.c3
-rw-r--r--drivers/phy/phy-ti-pipe3.c153
-rw-r--r--drivers/pinctrl/Kconfig18
-rw-r--r--drivers/pinctrl/Makefile4
-rw-r--r--drivers/pinctrl/core.c5
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c276
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c122
-rw-r--r--drivers/pinctrl/meson/Makefile2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c761
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h209
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c1089
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c4
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/pinconf-generic.c206
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinconf.h22
-rw-r--r--drivers/pinctrl/pinctrl-at91.c108
-rw-r--r--drivers/pinctrl/pinctrl-bcm281xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c3
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c95
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c2
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c2
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c1180
-rw-r--r--drivers/pinctrl/qcom/Kconfig8
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c1005
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c129
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c29
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig10
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile2
-rw-r--r--drivers/pinctrl/sh-pfc/core.c18
-rw-r--r--drivers/pinctrl/sh-pfc/core.h2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-emev2.c1711
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c13
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c21
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c2645
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c2
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c51
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.h1
-rw-r--r--drivers/pinctrl/sunxi/Kconfig4
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c5
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c815
-rw-r--r--drivers/platform/x86/dell-laptop.c1055
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c45
-rw-r--r--drivers/pnp/pnpbios/core.c3
-rw-r--r--drivers/power/88pm860x_charger.c1
-rw-r--r--drivers/power/Kconfig25
-rw-r--r--drivers/power/Makefile3
-rw-r--r--drivers/power/ab8500_fg.c14
-rw-r--r--drivers/power/bq24190_charger.c4
-rw-r--r--drivers/power/bq27x00_battery.c19
-rw-r--r--drivers/power/charger-manager.c289
-rw-r--r--drivers/power/collie_battery.c24
-rw-r--r--drivers/power/gpio-charger.c4
-rw-r--r--drivers/power/ltc2941-battery-gauge.c547
-rw-r--r--drivers/power/max17042_battery.c2
-rw-r--r--drivers/power/max77693_charger.c753
-rw-r--r--drivers/power/reset/Kconfig26
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/arm-versatile-reboot.c49
-rw-r--r--drivers/power/reset/at91-poweroff.c7
-rw-r--r--drivers/power/reset/at91-reset.c28
-rw-r--r--drivers/power/reset/brcmstb-reboot.c41
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c290
-rw-r--r--drivers/power/reset/restart-poweroff.c1
-rw-r--r--drivers/power/reset/rmobile-reset.c91
-rw-r--r--drivers/power/reset/st-poweroff.c17
-rw-r--r--drivers/power/reset/sun6i-reboot.c85
-rw-r--r--drivers/power/reset/vexpress-poweroff.c17
-rw-r--r--drivers/power/rt5033_battery.c177
-rw-r--r--drivers/power/test_power.c33
-rw-r--r--drivers/ps3/ps3-vuart.c5
-rw-r--r--drivers/ps3/sys-manager-core.c6
-rw-r--r--drivers/ps3/vuart.h16
-rw-r--r--drivers/rapidio/devices/tsi721.c2
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/Kconfig17
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/axp20x-regulator.c93
-rw-r--r--drivers/regulator/core.c379
-rw-r--r--drivers/regulator/da9211-regulator.c16
-rw-r--r--drivers/regulator/fan53555.c4
-rw-r--r--drivers/regulator/internal.h2
-rw-r--r--drivers/regulator/isl9305.c6
-rw-r--r--drivers/regulator/lp872x.c24
-rw-r--r--drivers/regulator/max14577.c62
-rw-r--r--drivers/regulator/max77686.c70
-rw-r--r--drivers/regulator/max77843.c227
-rw-r--r--drivers/regulator/max8649.c4
-rw-r--r--drivers/regulator/mt6397-regulator.c332
-rw-r--r--drivers/regulator/of_regulator.c11
-rw-r--r--drivers/regulator/pfuze100-regulator.c134
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c15
-rw-r--r--drivers/regulator/rk808-regulator.c6
-rw-r--r--drivers/regulator/rt5033-regulator.c8
-rw-r--r--drivers/regulator/s2mps11.c42
-rw-r--r--drivers/regulator/tps65023-regulator.c6
-rw-r--r--drivers/reset/reset-sunxi.c4
-rw-r--r--drivers/rtc/Kconfig111
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/hctosys.c18
-rw-r--r--drivers/rtc/interface.c22
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c1035
-rw-r--r--drivers/rtc/rtc-armada38x.c320
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-dev.c8
-rw-r--r--drivers/rtc/rtc-ds1685.c2252
-rw-r--r--drivers/rtc/rtc-efi.c1
-rw-r--r--drivers/rtc/rtc-imxdi.c50
-rw-r--r--drivers/rtc/rtc-isl12022.c3
-rw-r--r--drivers/rtc/rtc-isl12057.c351
-rw-r--r--drivers/rtc/rtc-pcf2123.c10
-rw-r--r--drivers/rtc/rtc-rk808.c10
-rw-r--r--drivers/rtc/rtc-s5m.c1
-rw-r--r--drivers/rtc/systohc.c6
-rw-r--r--drivers/s390/block/dasd.c102
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_proc.c21
-rw-r--r--drivers/s390/block/dcssblk.c36
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c6
-rw-r--r--drivers/s390/char/hmcdrv_mod.c1
-rw-r--r--drivers/s390/char/sclp_early.c57
-rw-r--r--drivers/s390/char/tape_34xx.c12
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/idset.c20
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/crypto/ap_bus.c140
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c33
-rw-r--r--drivers/s390/net/claw.c6
-rw-r--r--drivers/s390/net/ctcm_fsms.c18
-rw-r--r--drivers/s390/net/ctcm_main.c4
-rw-r--r--drivers/s390/net/ctcm_main.h2
-rw-r--r--drivers/s390/net/ctcm_sysfs.c4
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c15
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c117
-rw-r--r--drivers/s390/net/qeth_core_sys.c45
-rw-r--r--drivers/s390/net/qeth_l2_main.c226
-rw-r--r--drivers/s390/net/qeth_l3_main.c65
-rw-r--r--drivers/s390/net/qeth_l3_sys.c45
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/BusLogic.c10
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/Makefile6
-rw-r--r--drivers/scsi/NCR5380.c20
-rw-r--r--drivers/scsi/advansys.c142
-rw-r--r--drivers/scsi/aha152x.c295
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c38
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c24
-rw-r--r--drivers/scsi/arm/fas216.c6
-rw-r--r--drivers/scsi/atari_NCR5380.c4
-rw-r--r--drivers/scsi/atp870u.c5
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/ch.c6
-rw-r--r--drivers/scsi/constants.c275
-rw-r--r--drivers/scsi/csiostor/Makefile2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c1175
-rw-r--r--drivers/scsi/csiostor/csio_hw.h49
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h65
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c404
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c150
-rw-r--r--drivers/scsi/csiostor/csio_init.c6
-rw-r--r--drivers/scsi/csiostor/csio_isr.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2
-rw-r--r--drivers/scsi/csiostor/csio_mb.c56
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c4
-rw-r--r--drivers/scsi/csiostor/csio_wr.c157
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c41
-rw-r--r--drivers/scsi/dc395x.c79
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c3
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/eata_pio.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c5
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/gdth_proc.c24
-rw-r--r--drivers/scsi/hpsa.c1798
-rw-r--r--drivers/scsi/hpsa.h61
-rw-r--r--drivers/scsi/hpsa_cmd.h334
-rw-r--r--drivers/scsi/in2000.c18
-rw-r--r--drivers/scsi/ipr.c92
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/ips.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c9
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h66
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c188
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c109
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h9
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h51
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h4
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c52
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h16
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c39
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c47
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c53
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debug.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c42
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h3
-rw-r--r--drivers/scsi/nsp32.c41
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c50
-rw-r--r--drivers/scsi/pmcraid.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c8
-rw-r--r--drivers/scsi/scsi.c24
-rw-r--r--drivers/scsi/scsi_debug.c138
-rw-r--r--drivers/scsi/scsi_error.c50
-rw-r--r--drivers/scsi/scsi_lib.c17
-rw-r--r--drivers/scsi/scsi_logging.c485
-rw-r--r--drivers/scsi/scsi_proc.c22
-rw-r--r--drivers/scsi/scsi_scan.c57
-rw-r--r--drivers/scsi/scsi_trace.c6
-rw-r--r--drivers/scsi/sd.c13
-rw-r--r--drivers/scsi/ses.c148
-rw-r--r--drivers/scsi/sg.c32
-rw-r--r--drivers/scsi/sr_ioctl.c11
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/storvsc_drv.c190
-rw-r--r--drivers/scsi/ufs/Kconfig13
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c1004
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h170
-rw-r--r--drivers/scsi/ufs/ufshcd.c6
-rw-r--r--drivers/scsi/wd33c93.c18
-rw-r--r--drivers/scsi/wd7000.c41
-rw-r--r--drivers/sfi/sfi_core.c4
-rw-r--r--drivers/spi/Kconfig24
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-au1550.c4
-rw-r--r--drivers/spi/spi-bcm2835.c4
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-bitbang.c4
-rw-r--r--drivers/spi/spi-butterfly.c4
-rw-r--r--drivers/spi/spi-coldfire-qspi.c5
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dln2.c881
-rw-r--r--drivers/spi/spi-dw-mid.c16
-rw-r--r--drivers/spi/spi-dw-pci.c38
-rw-r--r--drivers/spi/spi-dw.c15
-rw-r--r--drivers/spi/spi-falcon.c12
-rw-r--r--drivers/spi/spi-fsl-cpm.c9
-rw-r--r--drivers/spi/spi-fsl-dspi.c163
-rw-r--r--drivers/spi/spi-fsl-lib.c16
-rw-r--r--drivers/spi/spi-fsl-lib.h4
-rw-r--r--drivers/spi/spi-gpio.c8
-rw-r--r--drivers/spi/spi-img-spfi.c49
-rw-r--r--drivers/spi/spi-imx.c32
-rw-r--r--drivers/spi/spi-lm70llp.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c2
-rw-r--r--drivers/spi/spi-mxs.c5
-rw-r--r--drivers/spi/spi-omap-100k.c5
-rw-r--r--drivers/spi/spi-omap-uwire.c4
-rw-r--r--drivers/spi/spi-omap2-mcspi.c5
-rw-r--r--drivers/spi/spi-orion.c88
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c17
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c34
-rw-r--r--drivers/spi/spi-pxa2xx.c209
-rw-r--r--drivers/spi/spi-pxa2xx.h34
-rw-r--r--drivers/spi/spi-qup.c11
-rw-r--r--drivers/spi/spi-rockchip.c6
-rw-r--r--drivers/spi/spi-rspi.c5
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sc18is602.c4
-rw-r--r--drivers/spi/spi-sh-hspi.c5
-rw-r--r--drivers/spi/spi-sh-msiof.c93
-rw-r--r--drivers/spi/spi-sh.c5
-rw-r--r--drivers/spi/spi-sirf.c1
-rw-r--r--drivers/spi/spi-st-ssc4.c504
-rw-r--r--drivers/spi/spi-ti-qspi.c14
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-xilinx.c298
-rw-r--r--drivers/spi/spi.c120
-rw-r--r--drivers/spi/spidev.c125
-rw-r--r--drivers/ssb/main.c19
-rw-r--r--drivers/staging/Kconfig10
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/android/Kconfig26
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/alarm-dev.c446
-rw-r--r--drivers/staging/android/android_alarm.h41
-rw-r--r--drivers/staging/android/ashmem.c12
-rw-r--r--drivers/staging/android/ion/ion.c3
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c20
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/android/logger.c808
-rw-r--r--drivers/staging/android/logger.h89
-rw-r--r--drivers/staging/android/lowmemorykiller.c7
-rw-r--r--drivers/staging/android/sync_debug.c3
-rw-r--r--drivers/staging/android/uapi/android_alarm.h62
-rw-r--r--drivers/staging/board/board.c3
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c5
-rw-r--r--drivers/staging/comedi/Kconfig21
-rw-r--r--drivers/staging/comedi/comedi_compat32.c99
-rw-r--r--drivers/staging/comedi/comedi_compat32.h38
-rw-r--r--drivers/staging/comedi/comedi_fops.c467
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.c5
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.h55
-rw-r--r--drivers/staging/comedi/comedi_usb.c3
-rw-r--r--drivers/staging/comedi/comedi_usb.h50
-rw-r--r--drivers/staging/comedi/comedidev.h103
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/8253.h92
-rw-r--r--drivers/staging/comedi/drivers/8255.c174
-rw-r--r--drivers/staging/comedi/drivers/8255.h32
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c65
-rw-r--r--drivers/staging/comedi/drivers/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c2365
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c3
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c78
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c844
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c3
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c3
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c62
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c4
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c665
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c2
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c240
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c5
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c124
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c10
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c1
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c262
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.h116
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c2
-rw-r--r--drivers/staging/comedi/drivers/dac02.c2
-rw-r--r--drivers/staging/comedi/drivers/das08.c17
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c5
-rw-r--r--drivers/staging/comedi/drivers/das08_isa.c18
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/das16.c469
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c2
-rw-r--r--drivers/staging/comedi/drivers/das1800.c340
-rw-r--r--drivers/staging/comedi/drivers/das6402.c4
-rw-r--r--drivers/staging/comedi/drivers/das800.c2
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c4
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c231
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c2
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c3
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c35
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c4
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c2
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.h5
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c2
-rw-r--r--drivers/staging/comedi/drivers/me4000.c4
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c5
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c2
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c30
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c192
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c16
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h8
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c55
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c15
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.c149
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.h23
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c43
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c43
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c3
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c6
-rw-r--r--drivers/staging/comedi/drivers/pcl724.c12
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c7
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c19
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c258
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c240
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c422
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c3
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c2
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c2
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c3
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c12
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c16
-rw-r--r--drivers/staging/comedi/drivers/rti800.c3
-rw-r--r--drivers/staging/comedi/drivers/rti802.c2
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c6
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c5
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c9
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c80
-rw-r--r--drivers/staging/comedi/drivers/z8536.h202
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c2
-rw-r--r--drivers/staging/comedi/range.c56
-rw-r--r--drivers/staging/cptm1217/Kconfig12
-rw-r--r--drivers/staging/cptm1217/Makefile2
-rw-r--r--drivers/staging/cptm1217/TODO5
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c665
-rw-r--r--drivers/staging/cptm1217/cp_tm1217.h8
-rw-r--r--drivers/staging/dgap/dgap.c38
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c40
-rw-r--r--drivers/staging/dgnc/dgnc_utils.c2
-rw-r--r--drivers/staging/dgnc/digi.h60
-rw-r--r--drivers/staging/dgnc/dpacompat.h12
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c60
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h1
-rw-r--r--drivers/staging/fbtft/Kconfig169
-rw-r--r--drivers/staging/fbtft/Makefile34
-rw-r--r--drivers/staging/fbtft/README32
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c462
-rw-r--r--drivers/staging/fbtft/fb_bd663474.c193
-rw-r--r--drivers/staging/fbtft/fb_hx8340bn.c229
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c181
-rw-r--r--drivers/staging/fbtft/fb_hx8353d.c166
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c234
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c291
-rw-r--r--drivers/staging/fbtft/fb_ili9340.c163
-rw-r--r--drivers/staging/fbtft/fb_ili9341.c179
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c117
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c121
-rw-r--r--drivers/staging/fbtft/fb_pcd8544.c177
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c331
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c168
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c208
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c206
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c229
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c205
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c258
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c195
-rw-r--r--drivers/staging/fbtft/fb_tinylcd.c124
-rw-r--r--drivers/staging/fbtft/fb_tls8204.c176
-rw-r--r--drivers/staging/fbtft/fb_uc1701.c210
-rw-r--r--drivers/staging/fbtft/fb_upd161704.c206
-rw-r--r--drivers/staging/fbtft/fb_watterott.c324
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c256
-rw-r--r--drivers/staging/fbtft/fbtft-core.c1521
-rw-r--r--drivers/staging/fbtft/fbtft-io.c239
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c222
-rw-r--r--drivers/staging/fbtft/fbtft.h447
-rw-r--r--drivers/staging/fbtft/fbtft_device.c1444
-rw-r--r--drivers/staging/fbtft/flexfb.c592
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c111
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c8
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c3
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c2
-rw-r--r--drivers/staging/gs_fpgaboot/io.c9
-rw-r--r--drivers/staging/i2o/Kconfig (renamed from drivers/message/i2o/Kconfig)1
-rw-r--r--drivers/staging/i2o/Makefile (renamed from drivers/message/i2o/Makefile)0
-rw-r--r--drivers/staging/i2o/README (renamed from drivers/message/i2o/README)0
-rw-r--r--drivers/staging/i2o/README.ioctl (renamed from drivers/message/i2o/README.ioctl)0
-rw-r--r--drivers/staging/i2o/bus-osm.c (renamed from drivers/message/i2o/bus-osm.c)2
-rw-r--r--drivers/staging/i2o/config-osm.c (renamed from drivers/message/i2o/config-osm.c)2
-rw-r--r--drivers/staging/i2o/core.h (renamed from drivers/message/i2o/core.h)0
-rw-r--r--drivers/staging/i2o/debug.c (renamed from drivers/message/i2o/debug.c)2
-rw-r--r--drivers/staging/i2o/device.c (renamed from drivers/message/i2o/device.c)2
-rw-r--r--drivers/staging/i2o/driver.c (renamed from drivers/message/i2o/driver.c)2
-rw-r--r--drivers/staging/i2o/exec-osm.c (renamed from drivers/message/i2o/exec-osm.c)2
-rw-r--r--drivers/staging/i2o/i2o.h (renamed from include/linux/i2o.h)0
-rw-r--r--drivers/staging/i2o/i2o_block.c (renamed from drivers/message/i2o/i2o_block.c)2
-rw-r--r--drivers/staging/i2o/i2o_block.h (renamed from drivers/message/i2o/i2o_block.h)0
-rw-r--r--drivers/staging/i2o/i2o_config.c (renamed from drivers/message/i2o/i2o_config.c)0
-rw-r--r--drivers/staging/i2o/i2o_proc.c (renamed from drivers/message/i2o/i2o_proc.c)2
-rw-r--r--drivers/staging/i2o/i2o_scsi.c (renamed from drivers/message/i2o/i2o_scsi.c)2
-rw-r--r--drivers/staging/i2o/iop.c (renamed from drivers/message/i2o/iop.c)2
-rw-r--r--drivers/staging/i2o/memory.c (renamed from drivers/message/i2o/memory.c)2
-rw-r--r--drivers/staging/i2o/pci.c (renamed from drivers/message/i2o/pci.c)2
-rw-r--r--drivers/staging/iio/Documentation/iio_event_monitor.c23
-rw-r--r--drivers/staging/iio/Documentation/ring.txt8
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c13
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c45
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c29
-rw-r--r--drivers/staging/iio/adc/ad7192.c2
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c45
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c17
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.h6
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c210
-rw-r--r--drivers/staging/iio/iio_simple_dummy.h13
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c12
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c66
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c53
-rw-r--r--drivers/staging/iio/light/isl29028.c4
-rw-r--r--drivers/staging/iio/light/tsl2583.c2
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c2
-rw-r--r--drivers/staging/iio/meter/ade7758.h1
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c15
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c7
-rw-r--r--drivers/staging/iio/meter/ade7759.c2
-rw-r--r--drivers/staging/line6/Kconfig38
-rw-r--r--drivers/staging/line6/Makefile14
-rw-r--r--drivers/staging/line6/audio.c71
-rw-r--r--drivers/staging/line6/audio.h21
-rw-r--r--drivers/staging/line6/capture.c432
-rw-r--r--drivers/staging/line6/driver.c1162
-rw-r--r--drivers/staging/line6/pcm.c576
-rw-r--r--drivers/staging/line6/pcm.h382
-rw-r--r--drivers/staging/line6/pod.h102
-rw-r--r--drivers/staging/line6/podhd.c154
-rw-r--r--drivers/staging/line6/podhd.h30
-rw-r--r--drivers/staging/line6/revision.h4
-rw-r--r--drivers/staging/line6/toneport.h52
-rw-r--r--drivers/staging/line6/usbdefs.h116
-rw-r--r--drivers/staging/line6/variax.h72
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h3
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h28
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h8
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h36
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h8
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c14
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c13
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h14
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c28
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c36
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c49
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c40
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h2
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c30
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c50
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c8
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h12
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c2
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c4
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c11
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c16
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_update.h189
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c36
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c7
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c12
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c18
-rw-r--r--drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_string.c16
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c7
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c24
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c8
-rw-r--r--drivers/staging/lustre/lustre/libcfs/nidstrings.c233
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c11
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c141
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c87
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c20
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c9
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c7
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c62
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c31
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c26
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c5
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c77
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c12
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c12
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c41
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c23
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c2
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile4
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c69
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c10
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c118
-rw-r--r--drivers/staging/media/mn88472/mn88472.c63
-rw-r--r--drivers/staging/media/mn88472/mn88472_priv.h1
-rw-r--r--drivers/staging/media/omap4iss/iss.c111
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c43
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.h2
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c22
-rw-r--r--drivers/staging/media/omap4iss/iss_regs.h2
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c18
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c16
-rw-r--r--drivers/staging/media/parport/Kconfig69
-rw-r--r--drivers/staging/media/parport/Makefile4
-rw-r--r--drivers/staging/media/parport/bw-qcam.c1177
-rw-r--r--drivers/staging/media/parport/c-qcam.c882
-rw-r--r--drivers/staging/media/parport/pms.c1156
-rw-r--r--drivers/staging/media/parport/w9966.c980
-rw-r--r--drivers/staging/media/tlg2300/Kconfig20
-rw-r--r--drivers/staging/media/tlg2300/Makefile9
-rw-r--r--drivers/staging/media/tlg2300/pd-alsa.c337
-rw-r--r--drivers/staging/media/tlg2300/pd-common.h271
-rw-r--r--drivers/staging/media/tlg2300/pd-dvb.c597
-rw-r--r--drivers/staging/media/tlg2300/pd-main.c553
-rw-r--r--drivers/staging/media/tlg2300/pd-radio.c339
-rw-r--r--drivers/staging/media/tlg2300/pd-video.c1570
-rw-r--r--drivers/staging/media/tlg2300/vendorcmds.h243
-rw-r--r--drivers/staging/media/vino/Kconfig24
-rw-r--r--drivers/staging/media/vino/Makefile3
-rw-r--r--drivers/staging/media/vino/indycam.c378
-rw-r--r--drivers/staging/media/vino/indycam.h93
-rw-r--r--drivers/staging/media/vino/saa7191.c649
-rw-r--r--drivers/staging/media/vino/saa7191.h245
-rw-r--r--drivers/staging/media/vino/vino.c4345
-rw-r--r--drivers/staging/media/vino/vino.h138
-rw-r--r--drivers/staging/mt29f_spinand/Kconfig2
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c17
-rw-r--r--drivers/staging/netlogic/xlr_net.c2
-rw-r--r--drivers/staging/nvec/nvec.c11
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c2
-rw-r--r--drivers/staging/octeon/ethernet-rx.c2
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c2
-rw-r--r--drivers/staging/panel/panel.c104
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c67
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c22
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c36
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c14
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h4
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c27
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c29
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c14
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c127
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c28
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c2309
-rw-r--r--drivers/staging/rtl8712/drv_types.h6
-rw-r--r--drivers/staging/rtl8712/osdep_service.h9
-rw-r--r--drivers/staging/rtl8712/recv_linux.c14
-rw-r--r--drivers/staging/rtl8712/recv_osdep.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h12
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c525
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h46
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h22
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c2
-rw-r--r--drivers/staging/rtl8712/sta_info.h2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c5
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c10
-rw-r--r--drivers/staging/rtl8723au/core/rtw_cmd.c61
-rw-r--r--drivers/staging/rtl8723au/core/rtw_efuse.c32
-rw-r--r--drivers/staging/rtl8723au/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c8
-rw-r--r--drivers/staging/rtl8723au/hal/odm.c87
-rw-r--r--drivers/staging/rtl8723au/hal/odm_HWConfig.c8
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c62
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c35
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_xmit.c4
-rw-r--r--drivers/staging/rtl8723au/hal/usb_halinit.c14
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723APhyCfg.h55
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723PwrSeq.h46
-rw-r--r--drivers/staging/rtl8723au/include/osdep_intf.h3
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h177
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_recv.h4
-rw-r--r--drivers/staging/rtl8723au/include/rtw_cmd.h10
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8723au/include/wifi.h14
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c13
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c170
-rw-r--r--drivers/staging/rts5208/ms.c12
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c12
-rw-r--r--drivers/staging/skein/skein_block.c17
-rw-r--r--drivers/staging/skein/skein_generic.c1
-rw-r--r--drivers/staging/sm7xxfb/Kconfig13
-rw-r--r--drivers/staging/sm7xxfb/Makefile1
-rw-r--r--drivers/staging/sm7xxfb/TODO12
-rw-r--r--drivers/staging/sm7xxfb/sm7xx.h779
-rw-r--r--drivers/staging/sm7xxfb/sm7xxfb.c1024
-rw-r--r--drivers/staging/speakup/i18n.h2
-rw-r--r--drivers/staging/speakup/kobjects.c2
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c7
-rw-r--r--drivers/staging/speakup/synth.c6
-rw-r--r--drivers/staging/unisys/Kconfig1
-rw-r--r--drivers/staging/unisys/Makefile1
-rw-r--r--drivers/staging/unisys/channels/Kconfig10
-rw-r--r--drivers/staging/unisys/channels/Makefile11
-rw-r--r--drivers/staging/unisys/channels/channel.c219
-rw-r--r--drivers/staging/unisys/channels/chanstub.c75
-rw-r--r--drivers/staging/unisys/channels/chanstub.h23
-rw-r--r--drivers/staging/unisys/common-spar/include/version.h1
-rw-r--r--drivers/staging/unisys/include/timskmod.h4
-rw-r--r--drivers/staging/unisys/uislib/Kconfig2
-rw-r--r--drivers/staging/unisys/uislib/uislib.c755
-rw-r--r--drivers/staging/unisys/uislib/uisqueue.c215
-rw-r--r--drivers/staging/unisys/uislib/uisthread.c1
-rw-r--r--drivers/staging/unisys/uislib/uisutils.c99
-rw-r--r--drivers/staging/unisys/virthba/Kconfig2
-rw-r--r--drivers/staging/unisys/virthba/virthba.c353
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.c61
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel.h69
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel_funcs.c206
-rw-r--r--drivers/staging/unisys/visorchipset/file.c122
-rw-r--r--drivers/staging/unisys/visorchipset/file.h3
-rw-r--r--drivers/staging/unisys/visorchipset/globals.h2
-rw-r--r--drivers/staging/unisys/visorchipset/testing.h43
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset.h55
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_main.c5
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_umode.h2
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.c2
-rw-r--r--drivers/staging/unisys/visorutil/procobjecttree.c21
-rw-r--r--drivers/staging/vt6655/baseband.c345
-rw-r--r--drivers/staging/vt6655/baseband.h17
-rw-r--r--drivers/staging/vt6655/card.c9
-rw-r--r--drivers/staging/vt6655/channel.c26
-rw-r--r--drivers/staging/vt6655/channel.h2
-rw-r--r--drivers/staging/vt6655/device.h25
-rw-r--r--drivers/staging/vt6655/device_main.c185
-rw-r--r--drivers/staging/vt6655/dpc.c15
-rw-r--r--drivers/staging/vt6655/mac.c25
-rw-r--r--drivers/staging/vt6655/mac.h684
-rw-r--r--drivers/staging/vt6655/power.c26
-rw-r--r--drivers/staging/vt6655/rf.c27
-rw-r--r--drivers/staging/vt6655/rf.h8
-rw-r--r--drivers/staging/vt6655/rxtx.c8
-rw-r--r--drivers/staging/vt6655/upc.h8
-rw-r--r--drivers/staging/vt6656/card.c2
-rw-r--r--drivers/staging/vt6656/device.h6
-rw-r--r--drivers/staging/vt6656/dpc.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/vt6656/rxtx.c109
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c4
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h6
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c17
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c2
-rw-r--r--drivers/staging/wlan-ng/p80211req.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c4
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c10
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h4
-rw-r--r--drivers/target/target_core_device.c54
-rw-r--r--drivers/target/target_core_file.c12
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--drivers/target/target_core_pr.c12
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c15
-rw-r--r--drivers/target/target_core_spc.c5
-rw-r--r--drivers/target/target_core_user.c5
-rw-r--r--drivers/thermal/imx_thermal.c2
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c16
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c2
-rw-r--r--drivers/thermal/of-thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c17
-rw-r--r--drivers/thermal/thermal_core.c6
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/ehv_bytechan.c1
-rw-r--r--drivers/tty/hvc/hvc_iucv.c31
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/n_tty.c263
-rw-r--r--drivers/tty/pty.c31
-rw-r--r--drivers/tty/rocket.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c230
-rw-r--r--drivers/tty/serial/8250/8250_dma.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c11
-rw-r--r--drivers/tty/serial/8250/8250_early.c13
-rw-r--r--drivers/tty/serial/8250/8250_omap.c88
-rw-r--r--drivers/tty/serial/8250/8250_pci.c33
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c8
-rw-r--r--drivers/tty/serial/8250/Kconfig19
-rw-r--r--drivers/tty/serial/Kconfig54
-rw-r--r--drivers/tty/serial/Makefile4
-rw-r--r--drivers/tty/serial/altera_jtaguart.c1
-rw-r--r--drivers/tty/serial/altera_uart.c1
-rw-r--r--drivers/tty/serial/atmel_serial.c153
-rw-r--r--drivers/tty/serial/digicolor-usart.c560
-rw-r--r--drivers/tty/serial/etraxfs-uart.c996
-rw-r--r--drivers/tty/serial/fsl_lpuart.c135
-rw-r--r--drivers/tty/serial/imx.c190
-rw-r--r--drivers/tty/serial/mcf.c7
-rw-r--r--drivers/tty/serial/men_z135_uart.c155
-rw-r--r--drivers/tty/serial/mrst_max3110.c909
-rw-r--r--drivers/tty/serial/mrst_max3110.h61
-rw-r--r--drivers/tty/serial/msm_serial.c27
-rw-r--r--drivers/tty/serial/mxs-auart.c55
-rw-r--r--drivers/tty/serial/of_serial.c13
-rw-r--r--drivers/tty/serial/omap-serial.c37
-rw-r--r--drivers/tty/serial/samsung.c736
-rw-r--r--drivers/tty/serial/samsung.h42
-rw-r--r--drivers/tty/serial/serial_core.c115
-rw-r--r--drivers/tty/serial/sh-sci.c25
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c40
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h4
-rw-r--r--drivers/tty/serial/sprd_serial.c793
-rw-r--r--drivers/tty/serial/xilinx_uartps.c10
-rw-r--r--drivers/tty/sysrq.c23
-rw-r--r--drivers/tty/tty_buffer.c6
-rw-r--r--drivers/tty/tty_io.c7
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/tty/tty_mutex.c18
-rw-r--r--drivers/tty/vt/vt.c18
-rw-r--r--drivers/uio/Kconfig20
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c499
-rw-r--r--drivers/uio/uio_pci_generic.c3
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c3
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/host.c1
-rw-r--r--drivers/usb/chipidea/udc.c18
-rw-r--r--drivers/usb/class/cdc-acm.c54
-rw-r--r--drivers/usb/core/buffer.c26
-rw-r--r--drivers/usb/core/devio.c63
-rw-r--r--drivers/usb/core/driver.c29
-rw-r--r--drivers/usb/core/hcd.c16
-rw-r--r--drivers/usb/core/hub.c94
-rw-r--r--drivers/usb/core/message.c23
-rw-r--r--drivers/usb/core/otg_whitelist.h5
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/usb.c1
-rw-r--r--drivers/usb/dwc2/Kconfig4
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/core.h60
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/dwc2/gadget.c1197
-rw-r--r--drivers/usb/dwc2/hcd.c100
-rw-r--r--drivers/usb/dwc2/hw.h2
-rw-r--r--drivers/usb/dwc2/platform.c36
-rw-r--r--drivers/usb/dwc3/Kconfig6
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h5
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c182
-rw-r--r--drivers/usb/dwc3/ep0.c2
-rw-r--r--drivers/usb/dwc3/gadget.c88
-rw-r--r--drivers/usb/dwc3/trace.h10
-rw-r--r--drivers/usb/gadget/Kconfig11
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/Makefile2
-rw-r--r--drivers/usb/gadget/function/f_fs.c119
-rw-r--r--drivers/usb/gadget/function/f_hid.c7
-rw-r--r--drivers/usb/gadget/function/f_midi.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c20
-rw-r--r--drivers/usb/gadget/function/f_uac1.c18
-rw-r--r--drivers/usb/gadget/function/f_uvc.c136
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/function/u_fs.h25
-rw-r--r--drivers/usb/gadget/function/u_uac1.c3
-rw-r--r--drivers/usb/gadget/function/u_uac1.h1
-rw-r--r--drivers/usb/gadget/function/u_uvc.h52
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2468
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h22
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c8
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h1
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c166
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h9
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c13
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c9
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c5
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c12
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/net2272.c7
-rw-r--r--drivers/usb/gadget/udc/net2272.h1
-rw-r--r--drivers/usb/gadget/udc/net2280.c533
-rw-r--r--drivers/usb/gadget/udc/net2280.h24
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c21
-rw-r--r--drivers/usb/gadget/udc/udc-core.c2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c4
-rw-r--r--drivers/usb/host/Kconfig22
-rw-r--r--drivers/usb/host/Makefile3
-rw-r--r--drivers/usb/host/ehci-atmel.c102
-rw-r--r--drivers/usb/host/ehci-fsl.c1
-rw-r--r--drivers/usb/host/ehci-grlib.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c6
-rw-r--r--drivers/usb/host/ehci-hub.c8
-rw-r--r--drivers/usb/host/ehci-pci.c27
-rw-r--r--drivers/usb/host/ehci-platform.c92
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c1
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-sched.c14
-rw-r--r--drivers/usb/host/ehci-sead3.c1
-rw-r--r--drivers/usb/host/ehci-sh.c1
-rw-r--r--drivers/usb/host/ehci-tegra.c2
-rw-r--r--drivers/usb/host/ehci-tilegx.c1
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c1
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/fhci-hub.c9
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/host/fusbh200-hcd.c4
-rw-r--r--drivers/usb/host/imx21-hcd.c5
-rw-r--r--drivers/usb/host/isp116x-hcd.c5
-rw-r--r--drivers/usb/host/isp1362-hcd.c8
-rw-r--r--drivers/usb/host/isp1760-hcd.h208
-rw-r--r--drivers/usb/host/isp1760-if.c477
-rw-r--r--drivers/usb/host/max3421-hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c132
-rw-r--r--drivers/usb/host/ohci-da8xx.c1
-rw-r--r--drivers/usb/host/ohci-hub.c10
-rw-r--r--drivers/usb/host/ohci-jz4740.c1
-rw-r--r--drivers/usb/host/ohci-platform.c83
-rw-r--r--drivers/usb/host/ohci-ppc-of.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c6
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-tilegx.c1
-rw-r--r--drivers/usb/host/ohci-tmio.c1
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c10
-rw-r--r--drivers/usb/host/pci-quirks.c64
-rw-r--r--drivers/usb/host/r8a66597-hcd.c13
-rw-r--r--drivers/usb/host/sl811-hcd.c10
-rw-r--r--drivers/usb/host/u132-hcd.c10
-rw-r--r--drivers/usb/host/uhci-grlib.c1
-rw-r--r--drivers/usb/host/uhci-hub.c5
-rw-r--r--drivers/usb/host/uhci-platform.c1
-rw-r--r--drivers/usb/host/whci/debug.c7
-rw-r--r--drivers/usb/host/xhci-dbg.c2
-rw-r--r--drivers/usb/host/xhci-mem.c31
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c12
-rw-r--r--drivers/usb/host/xhci.c87
-rw-r--r--drivers/usb/host/xhci.h12
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/isp1760/Kconfig59
-rw-r--r--drivers/usb/isp1760/Makefile5
-rw-r--r--drivers/usb/isp1760/isp1760-core.c177
-rw-r--r--drivers/usb/isp1760/isp1760-core.h68
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c (renamed from drivers/usb/host/isp1760-hcd.c)291
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.h102
-rw-r--r--drivers/usb/isp1760/isp1760-if.c309
-rw-r--r--drivers/usb/isp1760/isp1760-regs.h230
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c1498
-rw-r--r--drivers/usb/isp1760/isp1760-udc.h106
-rw-r--r--drivers/usb/misc/uss720.c12
-rw-r--r--drivers/usb/musb/Kconfig11
-rw-r--r--drivers/usb/musb/blackfin.c4
-rw-r--r--drivers/usb/musb/musb_core.c7
-rw-r--r--drivers/usb/musb/musb_cppi41.c28
-rw-r--r--drivers/usb/musb/musb_debugfs.c36
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c2
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/musb/musb_virthub.c7
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c35
-rw-r--r--drivers/usb/phy/phy-generic.c150
-rw-r--r--drivers/usb/phy/phy-generic.h10
-rw-r--r--drivers/usb/phy/phy-mv-usb.c5
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c86
-rw-r--r--drivers/usb/phy/phy.c14
-rw-r--r--drivers/usb/renesas_usbhs/common.c25
-rw-r--r--drivers/usb/renesas_usbhs/common.h3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c25
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c3
-rw-r--r--drivers/usb/serial/console.c16
-rw-r--r--drivers/usb/serial/cp210x.c5
-rw-r--r--drivers/usb/serial/generic.c4
-rw-r--r--drivers/usb/serial/keyspan.c20
-rw-r--r--drivers/usb/serial/mos7840.c60
-rw-r--r--drivers/usb/serial/option.c69
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/uas-detect.h33
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h53
-rw-r--r--drivers/usb/usbip/vhci_hcd.c3
-rw-r--r--drivers/usb/wusbcore/reservation.c5
-rw-r--r--drivers/usb/wusbcore/rh.c4
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c5
-rw-r--r--drivers/usb/wusbcore/wusbhc.c7
-rw-r--r--drivers/uwb/drp.c2
-rw-r--r--drivers/uwb/lc-dev.c7
-rw-r--r--drivers/uwb/uwb-debug.c16
-rw-r--r--drivers/vhost/net.c85
-rw-r--r--drivers/vhost/scsi.c26
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/video/console/Kconfig16
-rw-r--r--drivers/video/console/dummycon.c5
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/fbdev/Kconfig5
-rw-r--r--drivers/video/fbdev/atafb.c3
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c9
-rw-r--r--drivers/video/fbdev/broadsheetfb.c8
-rw-r--r--drivers/video/fbdev/core/fbcvt.c6
-rw-r--r--drivers/video/fbdev/core/fbmon.c103
-rw-r--r--drivers/video/fbdev/core/modedb.c111
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c137
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c6
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c6
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c6
-rw-r--r--drivers/video/fbdev/hgafb.c3
-rw-r--r--drivers/video/fbdev/mmp/Makefile4
-rw-r--r--drivers/video/fbdev/mmp/fb/Kconfig2
-rw-r--r--drivers/video/fbdev/ocfb.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/Kconfig6
-rw-r--r--drivers/video/fbdev/omap2/displays-new/Makefile1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-opa362.c285
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c57
-rw-r--r--drivers/video/fbdev/omap2/dss/Makefile2
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.c54
-rw-r--r--drivers/video/fbdev/omap2/dss/dpi.c26
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.c219
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.h22
-rw-r--r--drivers/video/fbdev/omap2/dss/dss_features.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_phy.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c6
-rw-r--r--drivers/video/fbdev/omap2/dss/omapdss-boot-init.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/pll.c10
-rw-r--r--drivers/video/fbdev/omap2/dss/video-pll.c211
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c7
-rw-r--r--drivers/video/fbdev/pvr2fb.c6
-rw-r--r--drivers/video/fbdev/savage/savagefb.h12
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c86
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c4
-rw-r--r--drivers/video/hdmi.c822
-rw-r--r--drivers/video/vgastate.c82
-rw-r--r--drivers/vme/vme.c2
-rw-r--r--drivers/watchdog/cadence_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c40
-rw-r--r--drivers/watchdog/meson_wdt.c1
-rw-r--r--drivers/xen/balloon.c86
-rw-r--r--drivers/xen/gntdev.c143
-rw-r--r--drivers/xen/grant-table.c120
-rw-r--r--drivers/xen/manage.c8
-rw-r--r--drivers/xen/tmem.c2
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c8
-rw-r--r--drivers/xen/xen-scsiback.c7
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c11
-rw-r--r--fs/9p/v9fs.c2
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/Kconfig24
-rw-r--r--fs/Makefile1
-rw-r--r--fs/afs/rxrpc.c14
-rw-r--r--fs/afs/volume.c2
-rw-r--r--fs/aio.c20
-rw-r--r--fs/block_dev.c77
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/file.c3
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/super.c14
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/tree-log.c1
-rw-r--r--fs/ceph/addr.c1
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/locks.c64
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/ceph/super.c20
-rw-r--r--fs/char_dev.c24
-rw-r--r--fs/cifs/cifs_debug.c6
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c41
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/ioctl.c21
-rw-r--r--fs/cifs/smbencrypt.c2
-rw-r--r--fs/coda/inode.c2
-rw-r--r--fs/configfs/configfs_internal.h2
-rw-r--r--fs/configfs/inode.c17
-rw-r--r--fs/configfs/mount.c11
-rw-r--r--fs/dax.c534
-rw-r--r--fs/dcache.c40
-rw-r--r--fs/dlm/netlink.c7
-rw-r--r--fs/drop_caches.c14
-rw-r--r--fs/ecryptfs/inode.c1
-rw-r--r--fs/ecryptfs/main.c2
-rw-r--r--fs/efivarfs/Kconfig1
-rw-r--r--fs/efivarfs/super.c2
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exofs/inode.c3
-rw-r--r--fs/exofs/super.c2
-rw-r--r--fs/ext2/Kconfig11
-rw-r--r--fs/ext2/Makefile1
-rw-r--r--fs/ext2/ext2.h10
-rw-r--r--fs/ext2/file.c44
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c38
-rw-r--r--fs/ext2/namei.c13
-rw-r--r--fs/ext2/super.c53
-rw-r--r--fs/ext2/xip.c91
-rw-r--r--fs/ext2/xip.h26
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/ext4.h6
-rw-r--r--fs/ext4/file.c50
-rw-r--r--fs/ext4/indirect.c18
-rw-r--r--fs/ext4/inode.c89
-rw-r--r--fs/ext4/namei.c10
-rw-r--r--fs/ext4/super.c83
-rw-r--r--fs/f2fs/Kconfig10
-rw-r--r--fs/f2fs/Makefile1
-rw-r--r--fs/f2fs/acl.c6
-rw-r--r--fs/f2fs/checkpoint.c95
-rw-r--r--fs/f2fs/data.c218
-rw-r--r--fs/f2fs/debug.c59
-rw-r--r--fs/f2fs/dir.c3
-rw-r--r--fs/f2fs/f2fs.h120
-rw-r--r--fs/f2fs/file.c101
-rw-r--r--fs/f2fs/gc.c38
-rw-r--r--fs/f2fs/gc.h33
-rw-r--r--fs/f2fs/inline.c32
-rw-r--r--fs/f2fs/inode.c37
-rw-r--r--fs/f2fs/namei.c2
-rw-r--r--fs/f2fs/node.c154
-rw-r--r--fs/f2fs/node.h45
-rw-r--r--fs/f2fs/recovery.c11
-rw-r--r--fs/f2fs/segment.c194
-rw-r--r--fs/f2fs/segment.h29
-rw-r--r--fs/f2fs/super.c75
-rw-r--r--fs/f2fs/trace.c159
-rw-r--r--fs/f2fs/trace.h46
-rw-r--r--fs/fs-writeback.c14
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c31
-rw-r--r--fs/fuse/file.c11
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c6
-rw-r--r--fs/gfs2/acl.c2
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/file.c1
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/inode.c3
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/quota.c60
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/hugetlbfs/inode.c13
-rw-r--r--fs/inode.c32
-rw-r--r--fs/internal.h7
-rw-r--r--fs/ioctl.c5
-rw-r--r--fs/isofs/util.c18
-rw-r--r--fs/jfs/endian24.h49
-rw-r--r--fs/jfs/jfs_dtree.c4
-rw-r--r--fs/jfs/jfs_types.h55
-rw-r--r--fs/jfs/jfs_xtree.h25
-rw-r--r--fs/jfs/super.c3
-rw-r--r--fs/kernfs/dir.c36
-rw-r--r--fs/kernfs/file.c4
-rw-r--r--fs/kernfs/inode.c13
-rw-r--r--fs/kernfs/kernfs-internal.h1
-rw-r--r--fs/kernfs/mount.c1
-rw-r--r--fs/lockd/mon.c13
-rw-r--r--fs/lockd/svc.c8
-rw-r--r--fs/lockd/svclock.c4
-rw-r--r--fs/lockd/svcsubs.c26
-rw-r--r--fs/lockd/xdr.c8
-rw-r--r--fs/locks.c593
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/ncpfs/inode.c3
-rw-r--r--fs/nfs/Kconfig5
-rw-r--r--fs/nfs/Makefile3
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/callback.c8
-rw-r--r--fs/nfs/delegation.c43
-rw-r--r--fs/nfs/direct.c118
-rw-r--r--fs/nfs/file.c1
-rw-r--r--fs/nfs/filelayout/filelayout.c317
-rw-r--r--fs/nfs/filelayout/filelayout.h40
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c469
-rw-r--r--fs/nfs/flexfilelayout/Makefile5
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1574
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h155
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c552
-rw-r--r--fs/nfs/idmap.c3
-rw-r--r--fs/nfs/inode.c13
-rw-r--r--fs/nfs/internal.h56
-rw-r--r--fs/nfs/nfs2xdr.c10
-rw-r--r--fs/nfs/nfs3_fs.h2
-rw-r--r--fs/nfs/nfs3client.c41
-rw-r--r--fs/nfs/nfs3proc.c9
-rw-r--r--fs/nfs/nfs3super.c2
-rw-r--r--fs/nfs/nfs3xdr.c3
-rw-r--r--fs/nfs/nfs4_fs.h9
-rw-r--r--fs/nfs/nfs4client.c51
-rw-r--r--fs/nfs/nfs4proc.c336
-rw-r--r--fs/nfs/nfs4state.c101
-rw-r--r--fs/nfs/nfs4super.c4
-rw-r--r--fs/nfs/nfs4xdr.c113
-rw-r--r--fs/nfs/nfsroot.c4
-rw-r--r--fs/nfs/objlayout/objio_osd.c5
-rw-r--r--fs/nfs/pagelist.c300
-rw-r--r--fs/nfs/pnfs.c471
-rw-r--r--fs/nfs/pnfs.h135
-rw-r--r--fs/nfs/pnfs_nfs.c840
-rw-r--r--fs/nfs/read.c33
-rw-r--r--fs/nfs/super.c33
-rw-r--r--fs/nfs/write.c99
-rw-r--r--fs/nfsd/Kconfig10
-rw-r--r--fs/nfsd/Makefile8
-rw-r--r--fs/nfsd/blocklayout.c189
-rw-r--r--fs/nfsd/blocklayoutxdr.c157
-rw-r--r--fs/nfsd/blocklayoutxdr.h62
-rw-r--r--fs/nfsd/export.c8
-rw-r--r--fs/nfsd/export.h2
-rw-r--r--fs/nfsd/nfs4callback.c99
-rw-r--r--fs/nfsd/nfs4layouts.c721
-rw-r--r--fs/nfsd/nfs4proc.c310
-rw-r--r--fs/nfsd/nfs4state.c97
-rw-r--r--fs/nfsd/nfs4xdr.c362
-rw-r--r--fs/nfsd/nfsctl.c9
-rw-r--r--fs/nfsd/nfsd.h16
-rw-r--r--fs/nfsd/nfsfh.h18
-rw-r--r--fs/nfsd/nfssvc.c1
-rw-r--r--fs/nfsd/pnfs.h81
-rw-r--r--fs/nfsd/state.h43
-rw-r--r--fs/nfsd/trace.c5
-rw-r--r--fs/nfsd/trace.h54
-rw-r--r--fs/nfsd/xdr4.h59
-rw-r--r--fs/nfsd/xdr4cb.h7
-rw-r--r--fs/nilfs2/file.c1
-rw-r--r--fs/nilfs2/gcinode.c1
-rw-r--r--fs/nilfs2/mdt.c6
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/nilfs2/page.c4
-rw-r--r--fs/nilfs2/page.h3
-rw-r--r--fs/nilfs2/segment.c44
-rw-r--r--fs/nilfs2/segment.h5
-rw-r--r--fs/nilfs2/super.c6
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/notify/fanotify/fanotify.c2
-rw-r--r--fs/notify/fanotify/fanotify_user.c35
-rw-r--r--fs/ntfs/file.c3
-rw-r--r--fs/ocfs2/acl.c14
-rw-r--r--fs/ocfs2/alloc.c18
-rw-r--r--fs/ocfs2/aops.c242
-rw-r--r--fs/ocfs2/cluster/tcp.c3
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h12
-rw-r--r--fs/ocfs2/dir.c10
-rw-r--r--fs/ocfs2/dlm/dlmast.c6
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c4
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c14
-rw-r--r--fs/ocfs2/dlm/dlmdomain.h1
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c7
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c14
-rw-r--r--fs/ocfs2/dlmglue.c3
-rw-r--r--fs/ocfs2/file.c80
-rw-r--r--fs/ocfs2/file.h9
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/inode.h2
-rw-r--r--fs/ocfs2/journal.c111
-rw-r--r--fs/ocfs2/journal.h5
-rw-r--r--fs/ocfs2/mmap.c1
-rw-r--r--fs/ocfs2/namei.c284
-rw-r--r--fs/ocfs2/namei.h8
-rw-r--r--fs/ocfs2/ocfs2.h25
-rw-r--r--fs/ocfs2/ocfs2_fs.h14
-rw-r--r--fs/ocfs2/quota.h1
-rw-r--r--fs/ocfs2/quota_local.c20
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/ocfs2/reservations.c2
-rw-r--r--fs/ocfs2/super.c51
-rw-r--r--fs/ocfs2/xattr.c10
-rw-r--r--fs/open.c5
-rw-r--r--fs/proc/array.c44
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/page.c16
-rw-r--r--fs/proc/task_mmu.c250
-rw-r--r--fs/pstore/Kconfig10
-rw-r--r--fs/pstore/Makefile2
-rw-r--r--fs/pstore/inode.c26
-rw-r--r--fs/pstore/internal.h6
-rw-r--r--fs/pstore/platform.c5
-rw-r--r--fs/pstore/pmsg.c114
-rw-r--r--fs/pstore/ram.c53
-rw-r--r--fs/quota/Kconfig1
-rw-r--r--fs/quota/dquot.c186
-rw-r--r--fs/quota/quota.c214
-rw-r--r--fs/quota/quota_v1.c4
-rw-r--r--fs/quota/quota_v2.c16
-rw-r--r--fs/ramfs/file-nommu.c7
-rw-r--r--fs/ramfs/inode.c21
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/romfs/mmap-nommu.c10
-rw-r--r--fs/romfs/super.c3
-rw-r--r--fs/select.c2
-rw-r--r--fs/seq_file.c32
-rw-r--r--fs/super.c59
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/sysfs/group.c2
-rw-r--r--fs/ubifs/debug.c4
-rw-r--r--fs/ubifs/dir.c18
-rw-r--r--fs/ubifs/file.c5
-rw-r--r--fs/ubifs/replay.c19
-rw-r--r--fs/ubifs/super.c6
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/ubifs/xattr.c112
-rw-r--r--fs/udf/Kconfig10
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c32
-rw-r--r--fs/udf/super.c5
-rw-r--r--fs/xfs/kmem.c10
-rw-r--r--fs/xfs/kmem.h5
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c20
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h33
-rw-r--r--fs/xfs/libxfs/xfs_format.h24
-rw-r--r--fs/xfs/libxfs/xfs_fs.h (renamed from fs/xfs/xfs_fs.h)0
-rw-r--r--fs/xfs/libxfs/xfs_sb.c320
-rw-r--r--fs/xfs/libxfs/xfs_sb.h11
-rw-r--r--fs/xfs/libxfs/xfs_shared.h33
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c14
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.h1
-rw-r--r--fs/xfs/libxfs/xfs_types.h (renamed from fs/xfs/xfs_types.h)0
-rw-r--r--fs/xfs/xfs_aops.c149
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_bmap_util.h37
-rw-r--r--fs/xfs/xfs_buf.c13
-rw-r--r--fs/xfs/xfs_buf_item.c6
-rw-r--r--fs/xfs/xfs_dquot.h2
-rw-r--r--fs/xfs/xfs_file.c67
-rw-r--r--fs/xfs/xfs_fsops.c34
-rw-r--r--fs/xfs/xfs_inode.c136
-rw-r--r--fs/xfs/xfs_inode.h11
-rw-r--r--fs/xfs/xfs_ioctl.c501
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_iops.c21
-rw-r--r--fs/xfs/xfs_log.c28
-rw-r--r--fs/xfs/xfs_mount.c107
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_qm.c55
-rw-r--r--fs/xfs/xfs_qm.h5
-rw-r--r--fs/xfs/xfs_qm_syscalls.c244
-rw-r--r--fs/xfs/xfs_quotaops.c67
-rw-r--r--fs/xfs/xfs_super.c27
-rw-r--r--fs/xfs/xfs_sysctl.c18
-rw-r--r--fs/xfs/xfs_trans.c1
-rw-r--r--fs/xfs/xfs_trans_buf.c5
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h8
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h18
-rw-r--r--include/acpi/acrestyp.h42
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actbl2.h2
-rw-r--r--include/acpi/actbl3.h2
-rw-r--r--include/acpi/actypes.h14
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/asm-generic/4level-fixup.h1
-rw-r--r--include/asm-generic/pgtable.h168
-rw-r--r--include/asm-generic/tlb.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/clocksource/arm_arch_timer.h2
-rw-r--r--include/crypto/if_alg.h4
-rw-r--r--include/crypto/scatterwalk.h10
-rw-r--r--include/drm/bridge/dw_hdmi.h61
-rw-r--r--include/drm/bridge/ptn3460.h8
-rw-r--r--include/drm/drmP.h8
-rw-r--r--include/drm/drm_atomic.h13
-rw-r--r--include/drm/drm_atomic_helper.h43
-rw-r--r--include/drm/drm_crtc.h120
-rw-r--r--include/drm/drm_crtc_helper.h59
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/drm/drm_modes.h11
-rw-r--r--include/drm/drm_plane_helper.h5
-rw-r--r--include/drm/i915_component.h38
-rw-r--r--include/drm/i915_powerwell.h37
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h3
-rw-r--r--include/dt-bindings/gpio/meson8-gpio.h157
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h119
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h4
-rw-r--r--include/dt-bindings/sound/samsung-i2s.h8
-rw-r--r--include/kvm/arm_vgic.h43
-rw-r--r--include/linux/acpi.h22
-rw-r--r--include/linux/ahci_platform.h6
-rw-r--r--include/linux/amba/bus.h13
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/ata_platform.h5
-rw-r--r--include/linux/audit.h1
-rw-r--r--include/linux/backing-dev.h53
-rw-r--r--include/linux/bcma/bcma.h1
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/bcma/bcma_regs.h2
-rw-r--r--include/linux/bcma/bcma_soc.h2
-rw-r--r--include/linux/bio.h12
-rw-r--r--include/linux/bitmap.h59
-rw-r--r--include/linux/bitrev.h77
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h25
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cgroup_subsys.h8
-rw-r--r--include/linux/clocksource.h102
-rw-r--r--include/linux/compaction.h86
-rw-r--r--include/linux/compat.h9
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/compiler-gcc4.h4
-rw-r--r--include/linux/compiler-gcc5.h2
-rw-r--r--include/linux/compiler.h39
-rw-r--r--include/linux/coresight.h22
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/cpumask.h71
-rw-r--r--include/linux/crypto.h11
-rw-r--r--include/linux/cryptohash.h2
-rw-r--r--include/linux/devfreq-event.h196
-rw-r--r--include/linux/device-mapper.h10
-rw-r--r--include/linux/device.h62
-rw-r--r--include/linux/dqblk_v1.h3
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/enclosure.h13
-rw-r--r--include/linux/etherdevice.h4
-rw-r--r--include/linux/exportfs.h23
-rw-r--r--include/linux/f2fs_fs.h7
-rw-r--r--include/linux/fb.h12
-rw-r--r--include/linux/fec.h1
-rw-r--r--include/linux/fs.h142
-rw-r--r--include/linux/fsnotify.h6
-rw-r--r--include/linux/ftrace_event.h6
-rw-r--r--include/linux/genetlink.h4
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/gpio/consumer.h17
-rw-r--r--include/linux/hdmi.h37
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/host1x.h18
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--include/linux/hugetlb.h10
-rw-r--r--include/linux/hw_random.h4
-rw-r--r--include/linux/hyperv.h38
-rw-r--r--include/linux/i2c.h6
-rw-r--r--include/linux/ieee80211.h27
-rw-r--r--include/linux/if_bridge.h18
-rw-r--r--include/linux/if_vlan.h74
-rw-r--r--include/linux/iio/buffer.h76
-rw-r--r--include/linux/iio/common/ssp_sensors.h82
-rw-r--r--include/linux/iio/consumer.h12
-rw-r--r--include/linux/iio/iio.h11
-rw-r--r--include/linux/iio/kfifo_buf.h5
-rw-r--r--include/linux/iio/types.h14
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/input/mt.h3
-rw-r--r--include/linux/iopoll.h144
-rw-r--r--include/linux/iova.h41
-rw-r--r--include/linux/ipv6.h13
-rw-r--r--include/linux/irqchip/arm-gic-v3.h44
-rw-r--r--include/linux/irqchip/irq-omap-intc.h2
-rw-r--r--include/linux/jbd.h9
-rw-r--r--include/linux/jbd2.h9
-rw-r--r--include/linux/kasan.h89
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/kernfs.h7
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/ktime.h17
-rw-r--r--include/linux/kvm_host.h28
-rw-r--r--include/linux/led-class-flash.h207
-rw-r--r--include/linux/leds.h3
-rw-r--r--include/linux/libata.h13
-rw-r--r--include/linux/list_lru.h82
-rw-r--r--include/linux/list_nulls.h6
-rw-r--r--include/linux/livepatch.h133
-rw-r--r--include/linux/mei_cl_bus.h4
-rw-r--r--include/linux/memcontrol.h92
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h1
-rw-r--r--include/linux/mfd/max77693-private.h108
-rw-r--r--include/linux/mfd/samsung/s2mps13.h2
-rw-r--r--include/linux/mfd/stmpe.h16
-rw-r--r--include/linux/mfd/syscon/exynos4-pmu.h21
-rw-r--r--include/linux/mfd/tc3589x.h12
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h3
-rw-r--r--include/linux/mfd/tmio.h28
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/mlx4/cmd.h16
-rw-r--r--include/linux/mlx4/device.h63
-rw-r--r--include/linux/mlx4/driver.h19
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mm.h156
-rw-r--r--include/linux/mm_types.h23
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/linux/mmc/mmc.h10
-rw-r--r--include/linux/mmc/sdhci.h11
-rw-r--r--include/linux/mmc/sdio_ids.h6
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h15
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmzone.h15
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/moduleloader.h4
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/ubi.h53
-rw-r--r--include/linux/mutex.h1
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h94
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h12
-rw-r--r--include/linux/nfs_idmap.h2
-rw-r--r--include/linux/nfs_page.h22
-rw-r--r--include/linux/nfs_xdr.h19
-rw-r--r--include/linux/nodemask.h67
-rw-r--r--include/linux/nvme.h3
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/oom.h23
-rw-r--r--include/linux/osq_lock.h12
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/page_counter.h3
-rw-r--r--include/linux/page_ext.h2
-rw-r--r--include/linux/pci.h27
-rw-r--r--include/linux/percpu-refcount.h34
-rw-r--r--include/linux/perf_event.h53
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/phy/omap_control_phy.h6
-rw-r--r--include/linux/phy/phy-qcom-ufs.h59
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/pinctrl/pinconf-generic.h29
-rw-r--r--include/linux/pinctrl/pinctrl.h12
-rw-r--r--include/linux/platform_data/ipmmu-vmsa.h24
-rw-r--r--include/linux/platform_data/irda-sa11x0.h (renamed from arch/arm/include/asm/mach/irda.h)0
-rw-r--r--include/linux/platform_data/mmc-omap.h4
-rw-r--r--include/linux/platform_data/regulator-haptic.h29
-rw-r--r--include/linux/platform_data/st21nfca.h2
-rw-r--r--include/linux/platform_data/st21nfcb.h4
-rw-r--r--include/linux/platform_data/tpm_stm_st33.h (renamed from drivers/char/tpm/tpm_i2c_stm_st33.h)38
-rw-r--r--include/linux/platform_data/vsp1.h27
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h4
-rw-r--r--include/linux/pnp.h12
-rw-r--r--include/linux/power/charger-manager.h32
-rw-r--r--include/linux/printk.h21
-rw-r--r--include/linux/pstore.h1
-rw-r--r--include/linux/pstore_ram.h1
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/quota.h69
-rw-r--r--include/linux/quotaops.h7
-rw-r--r--include/linux/rculist.h16
-rw-r--r--include/linux/rcupdate.h13
-rw-r--r--include/linux/rcutiny.h45
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/regmap.h2
-rw-r--r--include/linux/regulator/da9211.h2
-rw-r--r--include/linux/regulator/driver.h13
-rw-r--r--include/linux/regulator/machine.h13
-rw-r--r--include/linux/regulator/mt6397-regulator.h49
-rw-r--r--include/linux/regulator/pfuze100.h14
-rw-r--r--include/linux/resource_ext.h77
-rw-r--r--include/linux/rhashtable.h308
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/rtc/ds1685.h375
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/security.h58
-rw-r--r--include/linux/seq_buf.h3
-rw-r--r--include/linux/seq_file.h25
-rw-r--r--include/linux/serial_8250.h4
-rw-r--r--include/linux/serial_core.h22
-rw-r--r--include/linux/serial_s3c.h28
-rw-r--r--include/linux/shrinker.h6
-rw-r--r--include/linux/skbuff.h44
-rw-r--r--include/linux/slab.h45
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/slub_def.h21
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/socket.h7
-rw-r--r--include/linux/spi/at86rf230.h4
-rw-r--r--include/linux/spi/l4f00242t03.h4
-rw-r--r--include/linux/spi/lms283gf05.h4
-rw-r--r--include/linux/spi/mxs-spi.h4
-rw-r--r--include/linux/spi/pxa2xx_spi.h5
-rw-r--r--include/linux/spi/rspi.h5
-rw-r--r--include/linux/spi/sh_hspi.h4
-rw-r--r--include/linux/spi/sh_msiof.h2
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/tle62x0.h4
-rw-r--r--include/linux/spi/tsc2005.h5
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/linux/ssb/ssb_regs.h1
-rw-r--r--include/linux/string.h6
-rw-r--r--include/linux/string_helpers.h4
-rw-r--r--include/linux/sunrpc/clnt.h3
-rw-r--r--include/linux/sunrpc/metrics.h4
-rw-r--r--include/linux/sunrpc/rpc_rdma.h14
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h15
-rw-r--r--include/linux/sunrpc/xprt.h6
-rw-r--r--include/linux/swap.h15
-rw-r--r--include/linux/swapops.h8
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/tcp.h6
-rw-r--r--include/linux/ti_wilink_st.h13
-rw-r--r--include/linux/time.h13
-rw-r--r--include/linux/timecounter.h139
-rw-r--r--include/linux/timekeeping.h21
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/tty.h25
-rw-r--r--include/linux/types.h8
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/usb.h7
-rw-r--r--include/linux/usb/ehci_pdriver.h4
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/usb/phy.h4
-rw-r--r--include/linux/usb/usb_phy_generic.h2
-rw-r--r--include/linux/vmalloc.h13
-rw-r--r--include/linux/vmw_vmci_api.h2
-rw-r--r--include/linux/vt_buffer.h4
-rw-r--r--include/linux/wait.h42
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--include/linux/zpool.h5
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/smiapp.h10
-rw-r--r--include/media/tea575x.h5
-rw-r--r--include/media/v4l2-dev.h3
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-subdev.h6
-rw-r--r--include/media/videobuf-dma-sg.h8
-rw-r--r--include/media/videobuf-dvb.h6
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h83
-rw-r--r--include/net/bluetooth/hci_core.h52
-rw-r--r--include/net/bluetooth/l2cap.h1
-rw-r--r--include/net/bluetooth/mgmt.h4
-rw-r--r--include/net/bluetooth/rfcomm.h2
-rw-r--r--include/net/bond_3ad.h1
-rw-r--r--include/net/bonding.h18
-rw-r--r--include/net/cfg80211.h293
-rw-r--r--include/net/cfg802154.h10
-rw-r--r--include/net/cipso_ipv4.h25
-rw-r--r--include/net/flow_keys.h6
-rw-r--r--include/net/genetlink.h28
-rw-r--r--include/net/geneve.h7
-rw-r--r--include/net/gro_cells.h29
-rw-r--r--include/net/ieee802154_netdev.h4
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/inet_sock.h29
-rw-r--r--include/net/ip.h20
-rw-r--r--include/net/ip6_fib.h10
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h50
-rw-r--r--include/net/ip_tunnels.h6
-rw-r--r--include/net/ipv6.h24
-rw-r--r--include/net/mac80211.h100
-rw-r--r--include/net/mac802154.h5
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netlink.h10
-rw-r--r--include/net/netns/ipv4.h6
-rw-r--r--include/net/nfc/hci.h25
-rw-r--r--include/net/nfc/nci.h97
-rw-r--r--include/net/nfc/nci_core.h137
-rw-r--r--include/net/nfc/nfc.h27
-rw-r--r--include/net/nl802154.h45
-rw-r--r--include/net/ping.h2
-rw-r--r--include/net/pkt_sched.h12
-rw-r--r--include/net/regulatory.h19
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/sch_generic.h13
-rw-r--r--include/net/sock.h71
-rw-r--r--include/net/switchdev.h79
-rw-r--r--include/net/tc_act/tc_bpf.h25
-rw-r--r--include/net/tc_act/tc_connmark.h14
-rw-r--r--include/net/tcp.h76
-rw-r--r--include/net/udp_tunnel.h16
-rw-r--r--include/net/udplite.h3
-rw-r--r--include/net/vxlan.h103
-rw-r--r--include/rdma/ib_verbs.h5
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_dbg.h74
-rw-r--r--include/scsi/scsi_device.h22
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/scsi/scsi_tcq.h3
-rw-r--r--include/sound/ad1816a.h5
-rw-r--r--include/sound/ak4113.h11
-rw-r--r--include/sound/ak4114.h11
-rw-r--r--include/sound/compress_driver.h5
-rw-r--r--include/sound/control.h11
-rw-r--r--include/sound/core.h44
-rw-r--r--include/sound/emu10k1.h16
-rw-r--r--include/sound/es1688.h3
-rw-r--r--include/sound/gus.h6
-rw-r--r--include/sound/hwdep.h3
-rw-r--r--include/sound/pcm.h26
-rw-r--r--include/sound/pcm_params.h96
-rw-r--r--include/sound/rawmidi.h4
-rw-r--r--include/sound/rcar_snd.h1
-rw-r--r--include/sound/rt5677.h3
-rw-r--r--include/sound/sb.h8
-rw-r--r--include/sound/seq_kernel.h9
-rw-r--r--include/sound/simple_card.h1
-rw-r--r--include/sound/soc-dapm.h4
-rw-r--r--include/sound/soc.h4
-rw-r--r--include/sound/sta32x.h18
-rw-r--r--include/sound/wss.h6
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_backend_configfs.h2
-rw-r--r--include/target/target_core_base.h3
-rw-r--r--include/trace/events/compaction.h209
-rw-r--r--include/trace/events/f2fs.h148
-rw-r--r--include/trace/events/iommu.h31
-rw-r--r--include/trace/events/kmem.h7
-rw-r--r--include/trace/events/kvm.h35
-rw-r--r--include/trace/events/net.h8
-rw-r--r--include/trace/events/tlb.h4
-rw-r--r--include/trace/events/writeback.h12
-rw-r--r--include/trace/ftrace.h16
-rw-r--r--include/uapi/drm/drm.h8
-rw-r--r--include/uapi/drm/drm_fourcc.h3
-rw-r--r--include/uapi/drm/drm_mode.h38
-rw-r--r--include/uapi/drm/i915_drm.h30
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/am437x-vpfe.h122
-rw-r--r--include/uapi/linux/can/netlink.h1
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h4
-rw-r--r--include/uapi/linux/in.h1
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/ipv6.h7
-rw-r--r--include/uapi/linux/kernel-page-flags.h1
-rw-r--r--include/uapi/linux/kvm.h9
-rw-r--r--include/uapi/linux/l2tp.h1
-rw-r--r--include/uapi/linux/libc-compat.h6
-rw-r--r--include/uapi/linux/mempolicy.h2
-rw-r--r--include/uapi/linux/msdos_fs.h4
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/net_namespace.h23
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/nfc.h1
-rw-r--r--include/uapi/linux/nfsd/debug.h1
-rw-r--r--include/uapi/linux/nfsd/export.h4
-rw-r--r--include/uapi/linux/nl80211.h207
-rw-r--r--include/uapi/linux/openvswitch.h57
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/uapi/linux/pkt_sched.h2
-rw-r--r--include/uapi/linux/quota.h14
-rw-r--r--include/uapi/linux/rtnetlink.h8
-rw-r--r--include/uapi/linux/serial_core.h12
-rw-r--r--include/uapi/linux/serial_reg.h3
-rw-r--r--include/uapi/linux/snmp.h6
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h31
-rw-r--r--include/uapi/linux/tc_act/tc_connmark.h22
-rw-r--r--include/uapi/linux/tipc_config.h20
-rw-r--r--include/uapi/linux/uinput.h4
-rw-r--r--include/uapi/linux/usb/functionfs.h1
-rw-r--r--include/uapi/linux/usbdevice_fs.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h4
-rw-r--r--include/uapi/linux/videodev2.h17
-rw-r--r--include/uapi/rdma/ib_user_verbs.h27
-rw-r--r--include/uapi/sound/asound.h1
-rw-r--r--include/uapi/sound/usb_stream.h76
-rw-r--r--include/video/exynos7_decon.h349
-rw-r--r--include/video/imx-ipu-v3.h21
-rw-r--r--include/video/omapdss.h1
-rw-r--r--include/xen/grant_table.h43
-rw-r--r--include/xen/interface/features.h6
-rw-r--r--include/xen/interface/grant_table.h7
-rw-r--r--include/xen/interface/nmi.h51
-rw-r--r--include/xen/page.h5
-rw-r--r--init/Kconfig34
-rw-r--r--init/main.c18
-rw-r--r--kernel/Kconfig.locks4
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/auditfilter.c2
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/syscall.c25
-rw-r--r--kernel/cgroup.c14
-rw-r--r--kernel/compat.c5
-rw-r--r--kernel/cpu.c56
-rw-r--r--kernel/cpuset.c44
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/events/Makefile2
-rw-r--r--kernel/events/core.c505
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/futex.c8
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/irq/proc.c11
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kprobes.c24
-rw-r--r--kernel/livepatch/Kconfig18
-rw-r--r--kernel/livepatch/Makefile3
-rw-r--r--kernel/livepatch/core.c1015
-rw-r--r--kernel/locking/Makefile11
-rw-r--r--kernel/locking/mcs_spinlock.h16
-rw-r--r--kernel/locking/mutex.c62
-rw-r--r--kernel/locking/osq_lock.c (renamed from kernel/locking/mcs_spinlock.c)9
-rw-r--r--kernel/locking/rtmutex.c7
-rw-r--r--kernel/locking/rwsem-spinlock.c2
-rw-r--r--kernel/locking/rwsem-xadd.c3
-rw-r--r--kernel/locking/spinlock.c8
-rw-r--r--kernel/module.c140
-rw-r--r--kernel/notifier.c3
-rw-r--r--kernel/padata.c11
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/params.c3
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/power/process.c75
-rw-r--r--kernel/power/qos.c91
-rw-r--r--kernel/power/snapshot.c11
-rw-r--r--kernel/printk/printk.c12
-rw-r--r--kernel/profile.c3
-rw-r--r--kernel/range.c10
-rw-r--r--kernel/rcu/Makefile3
-rw-r--r--kernel/rcu/rcu.h6
-rw-r--r--kernel/rcu/rcutorture.c66
-rw-r--r--kernel/rcu/srcu.c2
-rw-r--r--kernel/rcu/tiny.c113
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c355
-rw-r--r--kernel/rcu/tree.h62
-rw-r--r--kernel/rcu/tree_plugin.h276
-rw-r--r--kernel/rcu/tree_trace.c8
-rw-r--r--kernel/resource.c25
-rw-r--r--kernel/sched/Makefile2
-rw-r--r--kernel/sched/clock.c13
-rw-r--r--kernel/sched/completion.c18
-rw-r--r--kernel/sched/core.c160
-rw-r--r--kernel/sched/cpudeadline.c27
-rw-r--r--kernel/sched/cpudeadline.h2
-rw-r--r--kernel/sched/deadline.c54
-rw-r--r--kernel/sched/debug.c1
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/idle.c3
-rw-r--r--kernel/sched/rt.c26
-rw-r--r--kernel/sched/sched.h22
-rw-r--r--kernel/sched/stats.c11
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/softirq.c9
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/taskstats.c13
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/clocksource.c76
-rw-r--r--kernel/time/hrtimer.c116
-rw-r--r--kernel/time/ntp.c11
-rw-r--r--kernel/time/posix-cpu-timers.c3
-rw-r--r--kernel/time/tick-sched.c11
-rw-r--r--kernel/time/time.c4
-rw-r--r--kernel/time/timecounter.c112
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c55
-rw-r--r--kernel/trace/power-traces.c1
-rw-r--r--kernel/trace/ring_buffer.c42
-rw-r--r--kernel/trace/ring_buffer_benchmark.c18
-rw-r--r--kernel/trace/trace.c195
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_branch.c1
-rw-r--r--kernel/trace/trace_event_perf.c4
-rw-r--r--kernel/trace/trace_events.c71
-rw-r--r--kernel/trace/trace_export.c2
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_kprobe.c6
-rw-r--r--kernel/trace/trace_nop.c2
-rw-r--r--kernel/trace/trace_output.c44
-rw-r--r--kernel/trace/trace_printk.c4
-rw-r--r--kernel/trace/trace_sched_switch.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_seq.c2
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_stat.c2
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c4
-rw-r--r--kernel/watchdog.c2
-rw-r--r--kernel/workqueue.c30
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Kconfig.kasan54
-rw-r--r--lib/Makefile14
-rw-r--r--lib/bitmap.c240
-rw-r--r--lib/bitrev.c17
-rw-r--r--lib/checksum.c12
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/dynamic_queue_limits.c4
-rw-r--r--lib/gen_crc32table.c6
-rw-r--r--lib/genalloc.c5
-rw-r--r--lib/halfmd4.c2
-rw-r--r--lib/hexdump.c105
-rw-r--r--lib/idr.c1
-rw-r--r--lib/interval_tree.c4
-rw-r--r--lib/iovec.c87
-rw-r--r--lib/kobject_uevent.c1
-rw-r--r--lib/lcm.c2
-rw-r--r--lib/list_sort.c7
-rw-r--r--lib/llist.c1
-rw-r--r--lib/md5.c2
-rw-r--r--lib/mpi/mpi-cmp.c10
-rw-r--r--lib/mpi/mpi-internal.h2
-rw-r--r--lib/nlattr.c1
-rw-r--r--lib/percpu_ida.c3
-rw-r--r--lib/plist.c1
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/raid6/algos.c2
-rw-r--r--lib/raid6/recov_avx2.c2
-rw-r--r--lib/raid6/recov_ssse3.c6
-rw-r--r--lib/rhashtable.c1170
-rw-r--r--lib/seq_buf.c36
-rw-r--r--lib/show_mem.c1
-rw-r--r--lib/sort.c6
-rw-r--r--lib/stmp_device.c3
-rw-r--r--lib/string.c25
-rw-r--r--lib/string_helpers.c26
-rw-r--r--lib/strncpy_from_user.c3
-rw-r--r--lib/test-hexdump.c180
-rw-r--r--lib/test_kasan.c277
-rw-r--r--lib/test_rhashtable.c227
-rw-r--r--lib/vsprintf.c106
-rw-r--r--mm/Kconfig11
-rw-r--r--mm/Makefile7
-rw-r--r--mm/backing-dev.c107
-rw-r--r--mm/cleancache.c2
-rw-r--r--mm/cma.c2
-rw-r--r--mm/compaction.c181
-rw-r--r--mm/debug.c4
-rw-r--r--mm/fadvise.c10
-rw-r--r--mm/filemap.c30
-rw-r--r--mm/filemap_xip.c478
-rw-r--r--mm/fremap.c283
-rw-r--r--mm/gup.c246
-rw-r--r--mm/huge_memory.c156
-rw-r--r--mm/hugetlb.c160
-rw-r--r--mm/hugetlb_cgroup.c2
-rw-r--r--mm/internal.h28
-rw-r--r--mm/interval_tree.c34
-rw-r--r--mm/kasan/Makefile8
-rw-r--r--mm/kasan/kasan.c516
-rw-r--r--mm/kasan/kasan.h75
-rw-r--r--mm/kasan/report.c269
-rw-r--r--mm/kmemleak.c6
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/list_lru.c467
-rw-r--r--mm/madvise.c32
-rw-r--r--mm/memcontrol.c1079
-rw-r--r--mm/memory-failure.c13
-rw-r--r--mm/memory.c365
-rw-r--r--mm/mempolicy.c286
-rw-r--r--mm/migrate.c45
-rw-r--r--mm/mincore.c175
-rw-r--r--mm/mm_init.c4
-rw-r--r--mm/mmap.c100
-rw-r--r--mm/mmzone.c4
-rw-r--r--mm/mprotect.c50
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/msync.c5
-rw-r--r--mm/nommu.c115
-rw-r--r--mm/oom_kill.c169
-rw-r--r--mm/page-writeback.c46
-rw-r--r--mm/page_alloc.c526
-rw-r--r--mm/page_counter.c7
-rw-r--r--mm/page_owner.c26
-rw-r--r--mm/pagewalk.c235
-rw-r--r--mm/percpu.c6
-rw-r--r--mm/pgtable-generic.c2
-rw-r--r--mm/process_vm_access.c7
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/rmap.c237
-rw-r--r--mm/shmem.c29
-rw-r--r--mm/slab.c17
-rw-r--r--mm/slab.h67
-rw-r--r--mm/slab_common.c323
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c232
-rw-r--r--mm/swap.c6
-rw-r--r--mm/swap_state.c6
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/util.c48
-rw-r--r--mm/vmalloc.c16
-rw-r--r--mm/vmscan.c123
-rw-r--r--mm/vmstat.c130
-rw-r--r--mm/workingset.c9
-rw-r--r--mm/zbud.c3
-rw-r--r--mm/zpool.c6
-rw-r--r--mm/zsmalloc.c239
-rw-r--r--mm/zswap.c5
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_netlink.c8
-rw-r--r--net/batman-adv/Kconfig1
-rw-r--r--net/batman-adv/bat_iv_ogm.c15
-rw-r--r--net/batman-adv/bitarray.c1
-rw-r--r--net/batman-adv/bitarray.h3
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c17
-rw-r--r--net/batman-adv/debugfs.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c1
-rw-r--r--net/batman-adv/distributed-arp-table.h4
-rw-r--r--net/batman-adv/fragmentation.c1
-rw-r--r--net/batman-adv/fragmentation.h3
-rw-r--r--net/batman-adv/gateway_client.c1
-rw-r--r--net/batman-adv/main.c10
-rw-r--r--net/batman-adv/main.h15
-rw-r--r--net/batman-adv/multicast.h3
-rw-r--r--net/batman-adv/network-coding.c3
-rw-r--r--net/batman-adv/originator.c1
-rw-r--r--net/batman-adv/originator.h1
-rw-r--r--net/batman-adv/packet.h5
-rw-r--r--net/batman-adv/routing.c3
-rw-r--r--net/batman-adv/soft-interface.c1
-rw-r--r--net/batman-adv/sysfs.c1
-rw-r--r--net/batman-adv/translation-table.c8
-rw-r--r--net/batman-adv/types.h4
-rw-r--r--net/bluetooth/6lowpan.c66
-rw-r--r--net/bluetooth/Kconfig27
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/core.c7
-rw-r--r--net/bluetooth/cmtp/capi.c6
-rw-r--r--net/bluetooth/hci_conn.c21
-rw-r--r--net/bluetooth/hci_core.c1895
-rw-r--r--net/bluetooth/hci_debugfs.c1056
-rw-r--r--net/bluetooth/hci_debugfs.h26
-rw-r--r--net/bluetooth/hci_event.c248
-rw-r--r--net/bluetooth/hci_request.c556
-rw-r--r--net/bluetooth/hci_request.h54
-rw-r--r--net/bluetooth/hci_sock.c107
-rw-r--r--net/bluetooth/l2cap_core.c55
-rw-r--r--net/bluetooth/l2cap_sock.c11
-rw-r--r--net/bluetooth/mgmt.c617
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c11
-rw-r--r--net/bluetooth/sco.c10
-rw-r--r--net/bluetooth/selftest.c244
-rw-r--r--net/bluetooth/selftest.h45
-rw-r--r--net/bluetooth/smp.c468
-rw-r--r--net/bluetooth/smp.h13
-rw-r--r--net/bridge/br.c52
-rw-r--r--net/bridge/br_fdb.c60
-rw-r--r--net/bridge/br_if.c11
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/br_mdb.c5
-rw-r--r--net/bridge/br_netfilter.c12
-rw-r--r--net/bridge/br_netlink.c300
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/ebt_vlan.c4
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c29
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/can/gw.c3
-rw-r--r--net/ceph/pagevec.c6
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c284
-rw-r--r--net/core/ethtool.c45
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/flow.c2
-rw-r--r--net/core/flow_dissector.c21
-rw-r--r--net/core/iovec.c137
-rw-r--r--net/core/neighbour.c64
-rw-r--r--net/core/net-sysfs.c28
-rw-r--r--net/core/net_namespace.c213
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c16
-rw-r--r--net/core/rtnetlink.c160
-rw-r--r--net/core/skbuff.c59
-rw-r--r--net/core/sock.c3
-rw-r--r--net/core/sysctl_net_core.c15
-rw-r--r--net/decnet/dn_dev.c3
-rw-r--r--net/decnet/dn_fib.c3
-rw-r--r--net/decnet/dn_route.c8
-rw-r--r--net/decnet/dn_table.c7
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/dsa/slave.c14
-rw-r--r--net/ethernet/eth.c92
-rw-r--r--net/ieee802154/6lowpan/6lowpan_i.h72
-rw-r--r--net/ieee802154/6lowpan/Kconfig5
-rw-r--r--net/ieee802154/6lowpan/Makefile3
-rw-r--r--net/ieee802154/6lowpan/core.c304
-rw-r--r--net/ieee802154/6lowpan/reassembly.c (renamed from net/ieee802154/reassembly.c)2
-rw-r--r--net/ieee802154/6lowpan/rx.c171
-rw-r--r--net/ieee802154/6lowpan/tx.c271
-rw-r--r--net/ieee802154/6lowpan_rtnl.c729
-rw-r--r--net/ieee802154/Kconfig18
-rw-r--r--net/ieee802154/Makefile8
-rw-r--r--net/ieee802154/af802154.h33
-rw-r--r--net/ieee802154/af_ieee802154.c369
-rw-r--r--net/ieee802154/dgram.c549
-rw-r--r--net/ieee802154/netlink.c12
-rw-r--r--net/ieee802154/nl-mac.c7
-rw-r--r--net/ieee802154/nl-phy.c3
-rw-r--r--net/ieee802154/nl802154.c52
-rw-r--r--net/ieee802154/raw.c270
-rw-r--r--net/ieee802154/rdev-ops.h7
-rw-r--r--net/ieee802154/reassembly.h41
-rw-r--r--net/ieee802154/socket.c1125
-rw-r--r--net/ieee802154/sysfs.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/cipso_ipv4.c51
-rw-r--r--net/ipv4/devinet.c16
-rw-r--r--net/ipv4/fib_frontend.c29
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_rules.c22
-rw-r--r--net/ipv4/fib_semantics.c35
-rw-r--r--net/ipv4/fib_trie.c1960
-rw-r--r--net/ipv4/fou.c32
-rw-r--r--net/ipv4/geneve.c211
-rw-r--r--net/ipv4/icmp.c17
-rw-r--r--net/ipv4/inet_diag.c9
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/ip_gre.c15
-rw-r--r--net/ipv4/ip_output.c35
-rw-r--r--net/ipv4/ip_sockglue.c123
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/ipip.c13
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c8
-rw-r--r--net/ipv4/ping.c22
-rw-r--r--net/ipv4/proc.c6
-rw-r--r--net/ipv4/raw.c7
-rw-r--r--net/ipv4/route.c63
-rw-r--r--net/ipv4/sysctl_net_ipv4.c35
-rw-r--r--net/ipv4/tcp.c233
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cong.c153
-rw-r--r--net/ipv4/tcp_cubic.c39
-rw-r--r--net/ipv4/tcp_fastopen.c13
-rw-r--r--net/ipv4/tcp_input.c88
-rw-r--r--net/ipv4/tcp_ipv4.c40
-rw-r--r--net/ipv4/tcp_memcontrol.c6
-rw-r--r--net/ipv4/tcp_metrics.c3
-rw-r--r--net/ipv4/tcp_minisocks.c66
-rw-r--r--net/ipv4/tcp_output.c50
-rw-r--r--net/ipv4/tcp_scalable.c3
-rw-r--r--net/ipv4/tcp_timer.c7
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udp_diag.c4
-rw-r--r--net/ipv4/udp_offload.c7
-rw-r--r--net/ipv4/udp_tunnel.c14
-rw-r--r--net/ipv6/addrconf.c82
-rw-r--r--net/ipv6/addrlabel.c5
-rw-r--r--net/ipv6/datagram.c13
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_fib.c112
-rw-r--r--net/ipv6/ip6_gre.c6
-rw-r--r--net/ipv6/ip6_output.c374
-rw-r--r--net/ipv6/ip6_tunnel.c9
-rw-r--r--net/ipv6/ip6_udp_tunnel.c12
-rw-r--r--net/ipv6/ip6_vti.c1
-rw-r--r--net/ipv6/ip6mr.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c8
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c8
-rw-r--r--net/ipv6/output_core.c40
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/route.c90
-rw-r--r--net/ipv6/sit.c9
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c93
-rw-r--r--net/ipv6/udp_offload.c10
-rw-r--r--net/ipv6/xfrm6_policy.c10
-rw-r--r--net/irda/irlap.c8
-rw-r--r--net/l2tp/l2tp_netlink.c107
-rw-r--r--net/llc/sysctl_net_llc.c8
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/aes_ccm.c21
-rw-r--r--net/mac80211/aes_ccm.h10
-rw-r--r--net/mac80211/aes_cmac.c34
-rw-r--r--net/mac80211/aes_cmac.h5
-rw-r--r--net/mac80211/aes_gcm.c95
-rw-r--r--net/mac80211/aes_gcm.h22
-rw-r--r--net/mac80211/aes_gmac.c84
-rw-r--r--net/mac80211/aes_gmac.h20
-rw-r--r--net/mac80211/cfg.c90
-rw-r--r--net/mac80211/chan.c41
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs_key.c55
-rw-r--r--net/mac80211/driver-ops.h30
-rw-r--r--net/mac80211/ethtool.c26
-rw-r--r--net/mac80211/ibss.c11
-rw-r--r--net/mac80211/ieee80211_i.h44
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/key.c194
-rw-r--r--net/mac80211/key.h18
-rw-r--r--net/mac80211/main.c111
-rw-r--r--net/mac80211/mlme.c98
-rw-r--r--net/mac80211/offchannel.c4
-rw-r--r--net/mac80211/pm.c31
-rw-r--r--net/mac80211/rc80211_minstrel.c6
-rw-r--r--net/mac80211/rc80211_minstrel.h15
-rw-r--r--net/mac80211/rx.c71
-rw-r--r--net/mac80211/scan.c13
-rw-r--r--net/mac80211/spectmgmt.c4
-rw-r--r--net/mac80211/sta_info.c189
-rw-r--r--net/mac80211/sta_info.h12
-rw-r--r--net/mac80211/status.c26
-rw-r--r--net/mac80211/tdls.c69
-rw-r--r--net/mac80211/trace.h33
-rw-r--r--net/mac80211/tx.c25
-rw-r--r--net/mac80211/util.c83
-rw-r--r--net/mac80211/vht.c73
-rw-r--r--net/mac80211/wpa.c443
-rw-r--r--net/mac80211/wpa.h19
-rw-r--r--net/mac802154/cfg.c26
-rw-r--r--net/mac802154/driver-ops.h5
-rw-r--r--net/mac802154/iface.c100
-rw-r--r--net/mac802154/mac_cmd.c6
-rw-r--r--net/mpls/mpls_gso.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c33
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c37
-rw-r--r--net/netfilter/nf_conntrack_netlink.c89
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c6
-rw-r--r--net/netfilter/nf_log.c3
-rw-r--r--net/netfilter/nf_tables_api.c60
-rw-r--r--net/netfilter/nfnetlink.c20
-rw-r--r--net/netfilter/nfnetlink_cthelper.c4
-rw-r--r--net/netfilter/nft_hash.c145
-rw-r--r--net/netfilter/nft_masq.c26
-rw-r--r--net/netfilter/nft_nat.c48
-rw-r--r--net/netfilter/nft_redir.c25
-rw-r--r--net/netfilter/xt_osf.c169
-rw-r--r--net/netlabel/netlabel_cipso_v4.c6
-rw-r--r--net/netlabel/netlabel_kapi.c15
-rw-r--r--net/netlabel/netlabel_mgmt.c56
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/netlink/af_netlink.c278
-rw-r--r--net/netlink/af_netlink.h3
-rw-r--r--net/netlink/diag.c15
-rw-r--r--net/netlink/genetlink.c24
-rw-r--r--net/nfc/core.c23
-rw-r--r--net/nfc/hci/command.c23
-rw-r--r--net/nfc/hci/core.c97
-rw-r--r--net/nfc/hci/hci.h10
-rw-r--r--net/nfc/hci/hcp.c11
-rw-r--r--net/nfc/nci/Makefile2
-rw-r--r--net/nfc/nci/core.c169
-rw-r--r--net/nfc/nci/data.c56
-rw-r--r--net/nfc/nci/hci.c694
-rw-r--r--net/nfc/nci/ntf.c77
-rw-r--r--net/nfc/nci/rsp.c120
-rw-r--r--net/nfc/netlink.c59
-rw-r--r--net/nfc/nfc.h2
-rw-r--r--net/openvswitch/actions.c377
-rw-r--r--net/openvswitch/datapath.c240
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/flow.h42
-rw-r--r--net/openvswitch/flow_netlink.c547
-rw-r--r--net/openvswitch/flow_netlink.h13
-rw-r--r--net/openvswitch/flow_table.c228
-rw-r--r--net/openvswitch/flow_table.h8
-rw-r--r--net/openvswitch/vport-geneve.c32
-rw-r--r--net/openvswitch/vport-gre.c14
-rw-r--r--net/openvswitch/vport-vxlan.c110
-rw-r--r--net/openvswitch/vport-vxlan.h11
-rw-r--r--net/openvswitch/vport.c12
-rw-r--r--net/openvswitch/vport.h18
-rw-r--r--net/packet/af_packet.c16
-rw-r--r--net/packet/diag.c3
-rw-r--r--net/phonet/pn_netlink.c20
-rw-r--r--net/rds/ib_send.c4
-rw-r--r--net/rds/iw_cm.c4
-rw-r--r--net/rds/iw_send.c4
-rw-r--r--net/rds/message.c8
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/rfkill/rfkill-gpio.c1
-rw-r--r--net/rxrpc/ar-error.c5
-rw-r--r--net/rxrpc/ar-output.c46
-rw-r--r--net/sched/Kconfig24
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_bpf.c208
-rw-r--r--net/sched/act_connmark.c192
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/cls_basic.c7
-rw-r--r--net/sched/cls_bpf.c33
-rw-r--r--net/sched/cls_flow.c8
-rw-r--r--net/sched/em_ipset.c2
-rw-r--r--net/sched/em_meta.c4
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_dsmark.c6
-rw-r--r--net/sched/sch_fq.c43
-rw-r--r--net/sched/sch_teql.c11
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/socket.c126
-rw-r--r--net/sunrpc/clnt.c15
-rw-r--r--net/sunrpc/rpcb_clnt.c8
-rw-r--r--net/sunrpc/sched.c7
-rw-r--r--net/sunrpc/stats.c26
-rw-r--r--net/sunrpc/svc.c4
-rw-r--r--net/sunrpc/svc_xprt.c3
-rw-r--r--net/sunrpc/xprt.c38
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c108
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c16
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c244
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c46
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c47
-rw-r--r--net/sunrpc/xprtrdma/transport.c182
-rw-r--r--net/sunrpc/xprtrdma/verbs.c411
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h112
-rw-r--r--net/sunrpc/xprtsock.c238
-rw-r--r--net/switchdev/switchdev.c175
-rw-r--r--net/tipc/Kconfig12
-rw-r--r--net/tipc/Makefile6
-rw-r--r--net/tipc/addr.c45
-rw-r--r--net/tipc/addr.h45
-rw-r--r--net/tipc/bcast.c504
-rw-r--r--net/tipc/bcast.h115
-rw-r--r--net/tipc/bearer.c205
-rw-r--r--net/tipc/bearer.h43
-rw-r--r--net/tipc/config.c342
-rw-r--r--net/tipc/config.h67
-rw-r--r--net/tipc/core.c154
-rw-r--r--net/tipc/core.h171
-rw-r--r--net/tipc/discover.c90
-rw-r--r--net/tipc/discover.h8
-rw-r--r--net/tipc/link.c881
-rw-r--r--net/tipc/link.h47
-rw-r--r--net/tipc/log.c55
-rw-r--r--net/tipc/msg.c153
-rw-r--r--net/tipc/msg.h143
-rw-r--r--net/tipc/name_distr.c145
-rw-r--r--net/tipc/name_distr.h16
-rw-r--r--net/tipc/name_table.c398
-rw-r--r--net/tipc/name_table.h49
-rw-r--r--net/tipc/net.c56
-rw-r--r--net/tipc/net.h4
-rw-r--r--net/tipc/netlink.c64
-rw-r--r--net/tipc/netlink.h7
-rw-r--r--net/tipc/netlink_compat.c1084
-rw-r--r--net/tipc/node.c336
-rw-r--r--net/tipc/node.h53
-rw-r--r--net/tipc/server.c6
-rw-r--r--net/tipc/server.h17
-rw-r--r--net/tipc/socket.c1015
-rw-r--r--net/tipc/socket.h20
-rw-r--r--net/tipc/subscr.c131
-rw-r--r--net/tipc/subscr.h14
-rw-r--r--net/unix/af_unix.c73
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/vmw_vsock/vmci_transport.c3
-rw-r--r--net/wireless/core.c34
-rw-r--r--net/wireless/core.h11
-rw-r--r--net/wireless/nl80211.c735
-rw-r--r--net/wireless/nl80211.h16
-rw-r--r--net/wireless/reg.c216
-rw-r--r--net/wireless/reg.h1
-rw-r--r--net/wireless/scan.c13
-rw-r--r--net/wireless/trace.h31
-rw-r--r--net/wireless/util.c103
-rw-r--r--net/wireless/wext-compat.c10
-rw-r--r--net/xfrm/xfrm_algo.c5
-rw-r--r--net/xfrm/xfrm_user.c27
-rw-r--r--samples/Kconfig7
-rw-r--r--samples/Makefile2
-rw-r--r--samples/bpf/test_maps.c4
-rw-r--r--samples/livepatch/Makefile1
-rw-r--r--samples/livepatch/livepatch-sample.c91
-rw-r--r--samples/trace_events/trace-events-sample.c80
-rw-r--r--samples/trace_events/trace-events-sample.h328
-rw-r--r--scripts/Makefile.build5
-rw-r--r--scripts/Makefile.kasan25
-rw-r--r--scripts/Makefile.lib10
-rw-r--r--scripts/asn1_compiler.c30
-rwxr-xr-xscripts/checkpatch.pl147
-rwxr-xr-xscripts/diffconfig1
-rw-r--r--scripts/module-common.lds23
-rwxr-xr-xscripts/recordmcount.pl10
-rw-r--r--security/capability.c27
-rw-r--r--security/integrity/ima/Kconfig1
-rw-r--r--security/keys/Kconfig18
-rw-r--r--security/keys/proc.c8
-rw-r--r--security/security.c36
-rw-r--r--security/selinux/avc.c5
-rw-r--r--security/selinux/hooks.c102
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/selinux/ss/policydb.c8
-rw-r--r--security/smack/Kconfig12
-rw-r--r--security/smack/Makefile1
-rw-r--r--security/smack/smack.h11
-rw-r--r--security/smack/smack_lsm.c199
-rw-r--r--security/smack/smack_netfilter.c96
-rw-r--r--security/tomoyo/Kconfig1
-rw-r--r--sound/aoa/soundbus/i2sbus/control.c2
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c19
-rw-r--r--sound/aoa/soundbus/i2sbus/pcm.c7
-rw-r--r--sound/arm/aaci.c4
-rw-r--r--sound/atmel/ac97c.c51
-rw-r--r--sound/core/compress_offload.c26
-rw-r--r--sound/core/control.c68
-rw-r--r--sound/core/hwdep.c88
-rw-r--r--sound/core/init.c54
-rw-r--r--sound/core/memory.c4
-rw-r--r--sound/core/oss/pcm_oss.c6
-rw-r--r--sound/core/pcm.c70
-rw-r--r--sound/core/pcm_lib.c100
-rw-r--r--sound/core/pcm_memory.c2
-rw-r--r--sound/core/pcm_native.c21
-rw-r--r--sound/core/rawmidi.c47
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c6
-rw-r--r--sound/core/seq/seq_clientmgr.c17
-rw-r--r--sound/core/seq/seq_dummy.c31
-rw-r--r--sound/core/seq/seq_midi.c3
-rw-r--r--sound/core/seq/seq_ports.c9
-rw-r--r--sound/core/seq/seq_ports.h1
-rw-r--r--sound/core/sound.c116
-rw-r--r--sound/core/timer.c46
-rw-r--r--sound/drivers/aloop.c3
-rw-r--r--sound/drivers/dummy.c10
-rw-r--r--sound/drivers/ml403-ac97cr.c12
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c13
-rw-r--r--sound/drivers/mtpav.c12
-rw-r--r--sound/drivers/opl3/opl3_lib.c2
-rw-r--r--sound/drivers/opl3/opl3_midi.c11
-rw-r--r--sound/drivers/opl3/opl3_seq.c4
-rw-r--r--sound/drivers/opl4/opl4_lib.c2
-rw-r--r--sound/drivers/opl4/opl4_synth.c2
-rw-r--r--sound/drivers/pcsp/pcsp.c2
-rw-r--r--sound/drivers/pcsp/pcsp_input.c2
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c2
-rw-r--r--sound/drivers/serial-u16550.c11
-rw-r--r--sound/drivers/vx/vx_core.c2
-rw-r--r--sound/firewire/amdtp.c71
-rw-r--r--sound/firewire/amdtp.h5
-rw-r--r--sound/firewire/bebob/bebob_stream.c7
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c5
-rw-r--r--sound/i2c/other/ak4113.c36
-rw-r--r--sound/i2c/other/ak4114.c46
-rw-r--r--sound/i2c/other/ak4117.c10
-rw-r--r--sound/i2c/other/ak4xxx-adda.c2
-rw-r--r--sound/isa/ad1816a/ad1816a.c5
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c11
-rw-r--r--sound/isa/ad1848/ad1848.c7
-rw-r--r--sound/isa/als100.c2
-rw-r--r--sound/isa/azt2320.c6
-rw-r--r--sound/isa/cmi8328.c4
-rw-r--r--sound/isa/cs423x/cs4231.c9
-rw-r--r--sound/isa/cs423x/cs4236.c13
-rw-r--r--sound/isa/cs423x/cs4236_lib.c11
-rw-r--r--sound/isa/es1688/es1688.c7
-rw-r--r--sound/isa/es1688/es1688_lib.c8
-rw-r--r--sound/isa/es18xx.c12
-rw-r--r--sound/isa/galaxy/galaxy.c4
-rw-r--r--sound/isa/gus/gus_instr.c172
-rw-r--r--sound/isa/gus/gus_pcm.c6
-rw-r--r--sound/isa/gus/gus_uart.c6
-rw-r--r--sound/isa/gus/gusclassic.c4
-rw-r--r--sound/isa/gus/gusextreme.c4
-rw-r--r--sound/isa/gus/gusmax.c8
-rw-r--r--sound/isa/gus/interwave.c14
-rw-r--r--sound/isa/msnd/msnd.c6
-rw-r--r--sound/isa/msnd/msnd.h2
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c5
-rw-r--r--sound/isa/opl3sa2.c7
-rw-r--r--sound/isa/opti9xx/miro.c14
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c15
-rw-r--r--sound/isa/sb/emu8000.c37
-rw-r--r--sound/isa/sb/emu8000_patch.c2
-rw-r--r--sound/isa/sb/emu8000_pcm.c10
-rw-r--r--sound/isa/sb/emu8000_synth.c3
-rw-r--r--sound/isa/sb/jazz16.c2
-rw-r--r--sound/isa/sb/sb16.c2
-rw-r--r--sound/isa/sb/sb16_main.c10
-rw-r--r--sound/isa/sb/sb8.c4
-rw-r--r--sound/isa/sb/sb8_main.c8
-rw-r--r--sound/isa/sb/sb8_midi.c20
-rw-r--r--sound/isa/sb/sb_common.c2
-rw-r--r--sound/isa/sb/sb_mixer.c2
-rw-r--r--sound/isa/sc6000.c2
-rw-r--r--sound/isa/sscape.c6
-rw-r--r--sound/isa/wavefront/wavefront.c4
-rw-r--r--sound/isa/wavefront/wavefront_fx.c2
-rw-r--r--sound/isa/wavefront/wavefront_midi.c14
-rw-r--r--sound/isa/wavefront/wavefront_synth.c2
-rw-r--r--sound/isa/wss/wss_lib.c10
-rw-r--r--sound/oss/dmasound/dmasound_atari.c2
-rw-r--r--sound/oss/msnd_pinnacle.c4
-rw-r--r--sound/oss/pss.c2
-rw-r--r--sound/oss/swarm_cs4297a.c2
-rw-r--r--sound/oss/trix.c2
-rw-r--r--sound/parisc/harmony.c6
-rw-r--r--sound/pci/Kconfig9
-rw-r--r--sound/pci/ad1889.c18
-rw-r--r--sound/pci/ali5451/ali5451.c17
-rw-r--r--sound/pci/als300.c18
-rw-r--r--sound/pci/als4000.c17
-rw-r--r--sound/pci/asihpi/asihpi.c22
-rw-r--r--sound/pci/asihpi/hpi6000.c7
-rw-r--r--sound/pci/asihpi/hpioctl.c10
-rw-r--r--sound/pci/atiixp.c20
-rw-r--r--sound/pci/atiixp_modem.c20
-rw-r--r--sound/pci/au88x0/au88x0.h4
-rw-r--r--sound/pci/aw2/aw2-alsa.c4
-rw-r--r--sound/pci/aw2/aw2-saa7146.c2
-rw-r--r--sound/pci/azt3328.c17
-rw-r--r--sound/pci/bt87x.c5
-rw-r--r--sound/pci/ca0106/ca0106_main.c16
-rw-r--r--sound/pci/ca0106/ca0106_mixer.c2
-rw-r--r--sound/pci/ca0106/ca0106_proc.c2
-rw-r--r--sound/pci/cmipci.c17
-rw-r--r--sound/pci/cs4281.c41
-rw-r--r--sound/pci/cs46xx/cs46xx.c10
-rw-r--r--sound/pci/cs46xx/cs46xx.h10
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c62
-rw-r--r--sound/pci/cs46xx/dsp_spos.c2
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c2
-rw-r--r--sound/pci/cs5530.c2
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c2
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pm.c18
-rw-r--r--sound/pci/ctxfi/cthw20k1.c14
-rw-r--r--sound/pci/ctxfi/cthw20k2.c17
-rw-r--r--sound/pci/echoaudio/darla20.c2
-rw-r--r--sound/pci/echoaudio/darla24.c2
-rw-r--r--sound/pci/echoaudio/echo3g.c2
-rw-r--r--sound/pci/echoaudio/echoaudio.c11
-rw-r--r--sound/pci/echoaudio/gina20.c2
-rw-r--r--sound/pci/echoaudio/gina24.c2
-rw-r--r--sound/pci/echoaudio/indigo.c2
-rw-r--r--sound/pci/echoaudio/indigodj.c2
-rw-r--r--sound/pci/echoaudio/indigoio.c2
-rw-r--r--sound/pci/echoaudio/layla20.c2
-rw-r--r--sound/pci/echoaudio/layla24.c2
-rw-r--r--sound/pci/echoaudio/mia.c2
-rw-r--r--sound/pci/echoaudio/midi.c5
-rw-r--r--sound/pci/echoaudio/mona.c2
-rw-r--r--sound/pci/emu10k1/emu10k1.c27
-rw-r--r--sound/pci/emu10k1/emu10k1x.c19
-rw-r--r--sound/pci/emu10k1/emufx.c7
-rw-r--r--sound/pci/emu10k1/emupcm.c33
-rw-r--r--sound/pci/emu10k1/p16v.c14
-rw-r--r--sound/pci/ens1370.c54
-rw-r--r--sound/pci/es1938.c15
-rw-r--r--sound/pci/es1968.c18
-rw-r--r--sound/pci/fm801.c66
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/hda_auto_parser.c18
-rw-r--r--sound/pci/hda/hda_controller.c8
-rw-r--r--sound/pci/hda/hda_hwdep.c7
-rw-r--r--sound/pci/hda/hda_i915.c154
-rw-r--r--sound/pci/hda/hda_i915.h37
-rw-r--r--sound/pci/hda/hda_intel.c76
-rw-r--r--sound/pci/hda/hda_intel.h71
-rw-r--r--sound/pci/hda/hda_priv.h2
-rw-r--r--sound/pci/hda/patch_analog.c33
-rw-r--r--sound/pci/hda/patch_realtek.c95
-rw-r--r--sound/pci/hda/patch_sigmatel.c20
-rw-r--r--sound/pci/ice1712/ak4xxx.c2
-rw-r--r--sound/pci/ice1712/ice1712.c42
-rw-r--r--sound/pci/ice1712/ice1724.c16
-rw-r--r--sound/pci/ice1712/juli.c4
-rw-r--r--sound/pci/ice1712/wm8766.c16
-rw-r--r--sound/pci/ice1712/wm8766.h2
-rw-r--r--sound/pci/ice1712/wm8776.c15
-rw-r--r--sound/pci/ice1712/wm8776.h3
-rw-r--r--sound/pci/intel8x0.c17
-rw-r--r--sound/pci/intel8x0m.c14
-rw-r--r--sound/pci/korg1212/korg1212.c14
-rw-r--r--sound/pci/lola/lola.c6
-rw-r--r--sound/pci/maestro3.c17
-rw-r--r--sound/pci/mixart/mixart.c7
-rw-r--r--sound/pci/mixart/mixart_core.c2
-rw-r--r--sound/pci/mixart/mixart_hwdep.c2
-rw-r--r--sound/pci/nm256/nm256.c22
-rw-r--r--sound/pci/oxygen/Makefile2
-rw-r--r--sound/pci/oxygen/oxygen.h2
-rw-r--r--sound/pci/oxygen/oxygen_io.c2
-rw-r--r--sound/pci/oxygen/oxygen_lib.c44
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c36
-rw-r--r--sound/pci/oxygen/oxygen_pcm.c53
-rw-r--r--sound/pci/oxygen/se6x.c160
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c2
-rw-r--r--sound/pci/pcxhr/pcxhr_hwdep.c2
-rw-r--r--sound/pci/riptide/riptide.c52
-rw-r--r--sound/pci/rme32.c5
-rw-r--r--sound/pci/rme96.c22
-rw-r--r--sound/pci/rme9652/hdsp.c20
-rw-r--r--sound/pci/rme9652/hdspm.c20
-rw-r--r--sound/pci/rme9652/rme9652.c5
-rw-r--r--sound/pci/sis7019.c18
-rw-r--r--sound/pci/sonicvibes.c10
-rw-r--r--sound/pci/trident/trident.c6
-rw-r--r--sound/pci/trident/trident.h6
-rw-r--r--sound/pci/trident/trident_main.c39
-rw-r--r--sound/pci/trident/trident_memory.c2
-rw-r--r--sound/pci/via82xx.c16
-rw-r--r--sound/pci/via82xx_modem.c17
-rw-r--r--sound/pci/vx222/vx222.c17
-rw-r--r--sound/pci/vx222/vx222_ops.c2
-rw-r--r--sound/pci/ymfpci/ymfpci.c8
-rw-r--r--sound/pci/ymfpci/ymfpci.h8
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c55
-rw-r--r--sound/ppc/awacs.c2
-rw-r--r--sound/ppc/beep.c2
-rw-r--r--sound/ppc/burgundy.c2
-rw-r--r--sound/ppc/pmac.c17
-rw-r--r--sound/ppc/snd_ps3.c4
-rw-r--r--sound/ppc/tumbler.c2
-rw-r--r--sound/sh/aica.c10
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/adi/axi-i2s.c2
-rw-r--r--sound/soc/atmel/Kconfig2
-rw-r--r--sound/soc/atmel/atmel-pcm-dma.c12
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c132
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c31
-rw-r--r--sound/soc/au1x/db1200.c19
-rw-r--r--sound/soc/au1x/dbdma2.c6
-rw-r--r--sound/soc/au1x/dma.c6
-rw-r--r--sound/soc/codecs/88pm860x-codec.c4
-rw-r--r--sound/soc/codecs/Kconfig10
-rw-r--r--sound/soc/codecs/Makefile2
-rw-r--r--sound/soc/codecs/ad193x.c4
-rw-r--r--sound/soc/codecs/ak4671.c2
-rw-r--r--sound/soc/codecs/alc5623.c8
-rw-r--r--sound/soc/codecs/alc5632.c12
-rw-r--r--sound/soc/codecs/arizona.c78
-rw-r--r--sound/soc/codecs/arizona.h5
-rw-r--r--sound/soc/codecs/bt-sco.c2
-rw-r--r--sound/soc/codecs/cs35l32.c4
-rw-r--r--sound/soc/codecs/cs42l52.c4
-rw-r--r--sound/soc/codecs/cs42l56.c4
-rw-r--r--sound/soc/codecs/cs42l73.c4
-rw-r--r--sound/soc/codecs/da732x.c4
-rw-r--r--sound/soc/codecs/max98357a.c138
-rw-r--r--sound/soc/codecs/mc13783.c10
-rw-r--r--sound/soc/codecs/pcm3008.c4
-rw-r--r--sound/soc/codecs/pcm512x-i2c.c4
-rw-r--r--sound/soc/codecs/pcm512x-spi.c4
-rw-r--r--sound/soc/codecs/pcm512x.c934
-rw-r--r--sound/soc/codecs/pcm512x.h109
-rw-r--r--sound/soc/codecs/rt286.c88
-rw-r--r--sound/soc/codecs/rt286.h7
-rw-r--r--sound/soc/codecs/rt5631.c28
-rw-r--r--sound/soc/codecs/rt5640.c12
-rw-r--r--sound/soc/codecs/rt5645.c255
-rw-r--r--sound/soc/codecs/rt5645.h87
-rw-r--r--sound/soc/codecs/rt5651.c18
-rw-r--r--sound/soc/codecs/rt5670.c138
-rw-r--r--sound/soc/codecs/rt5670.h80
-rw-r--r--sound/soc/codecs/rt5677.c261
-rw-r--r--sound/soc/codecs/sgtl5000.c27
-rw-r--r--sound/soc/codecs/sn95031.c33
-rw-r--r--sound/soc/codecs/sta32x.c534
-rw-r--r--sound/soc/codecs/sta32x.h2
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c9
-rw-r--r--sound/soc/codecs/tlv320aic3x.c349
-rw-r--r--sound/soc/codecs/tlv320dac33.c9
-rw-r--r--sound/soc/codecs/ts3a227e.c41
-rw-r--r--sound/soc/codecs/twl4030.c55
-rw-r--r--sound/soc/codecs/twl6040.c4
-rw-r--r--sound/soc/codecs/wm2000.c2
-rw-r--r--sound/soc/codecs/wm5100.c5
-rw-r--r--sound/soc/codecs/wm5102.c23
-rw-r--r--sound/soc/codecs/wm5110.c20
-rw-r--r--sound/soc/codecs/wm8350.c2
-rw-r--r--sound/soc/codecs/wm8400.c9
-rw-r--r--sound/soc/codecs/wm8731.c5
-rw-r--r--sound/soc/codecs/wm8750.c2
-rw-r--r--sound/soc/codecs/wm8770.c8
-rw-r--r--sound/soc/codecs/wm8804.c4
-rw-r--r--sound/soc/codecs/wm8900.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/codecs/wm8904.c58
-rw-r--r--sound/soc/codecs/wm8955.c2
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c2
-rw-r--r--sound/soc/codecs/wm8960.c53
-rw-r--r--sound/soc/codecs/wm8961.c4
-rw-r--r--sound/soc/codecs/wm8962.c6
-rw-r--r--sound/soc/codecs/wm8988.c6
-rw-r--r--sound/soc/codecs/wm8990.c9
-rw-r--r--sound/soc/codecs/wm8991.c9
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994.c23
-rw-r--r--sound/soc/codecs/wm8995.c20
-rw-r--r--sound/soc/codecs/wm8996.c8
-rw-r--r--sound/soc/codecs/wm8997.c11
-rw-r--r--sound/soc/codecs/wm9081.c2
-rw-r--r--sound/soc/codecs/wm9090.c2
-rw-r--r--sound/soc/codecs/wm9705.c16
-rw-r--r--sound/soc/codecs/wm9712.c12
-rw-r--r--sound/soc/codecs/wm9713.c14
-rw-r--r--sound/soc/codecs/wm_adsp.c6
-rw-r--r--sound/soc/codecs/wm_hubs.c10
-rw-r--r--sound/soc/davinci/Kconfig3
-rw-r--r--sound/soc/davinci/davinci-evm.c6
-rw-r--r--sound/soc/davinci/davinci-mcasp.c103
-rw-r--r--sound/soc/dwc/Kconfig1
-rw-r--r--sound/soc/dwc/designware_i2s.c311
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c23
-rw-r--r--sound/soc/fsl/fsl_asrc.c5
-rw-r--r--sound/soc/fsl/fsl_asrc.h3
-rw-r--r--sound/soc/fsl/fsl_esai.c2
-rw-r--r--sound/soc/fsl/fsl_esai.h2
-rw-r--r--sound/soc/fsl/fsl_sai.c2
-rw-r--r--sound/soc/fsl/fsl_spdif.c17
-rw-r--r--sound/soc/fsl/fsl_ssi.c10
-rw-r--r--sound/soc/fsl/fsl_utils.c27
-rw-r--r--sound/soc/fsl/fsl_utils.h3
-rw-r--r--sound/soc/fsl/imx-mc13783.c5
-rw-r--r--sound/soc/fsl/imx-spdif.c1
-rw-r--r--sound/soc/fsl/imx-ssi.c5
-rw-r--r--sound/soc/fsl/imx-wm8962.c1
-rw-r--r--sound/soc/fsl/mx27vis-aic32x4.c12
-rw-r--r--sound/soc/fsl/wm1133-ev1.c16
-rw-r--r--sound/soc/generic/simple-card.c41
-rw-r--r--sound/soc/intel/Kconfig15
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/broadwell.c10
-rw-r--r--sound/soc/intel/byt-rt5640.c12
-rw-r--r--sound/soc/intel/bytcr_dpcm_rt5640.c1
-rw-r--r--sound/soc/intel/cht_bsw_rt5645.c326
-rw-r--r--sound/soc/intel/cht_bsw_rt5672.c15
-rw-r--r--sound/soc/intel/sst-baytrail-pcm.c6
-rw-r--r--sound/soc/intel/sst-dsp.c3
-rw-r--r--sound/soc/intel/sst-firmware.c16
-rw-r--r--sound/soc/intel/sst-haswell-dsp.c17
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c211
-rw-r--r--sound/soc/intel/sst-haswell-ipc.h36
-rw-r--r--sound/soc/intel/sst-haswell-pcm.c232
-rw-r--r--sound/soc/intel/sst-mfld-platform-pcm.c7
-rw-r--r--sound/soc/intel/sst/sst.h3
-rw-r--r--sound/soc/intel/sst/sst_acpi.c11
-rw-r--r--sound/soc/intel/sst/sst_loader.c3
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c21
-rw-r--r--sound/soc/mxs/mxs-saif.c10
-rw-r--r--sound/soc/mxs/mxs-saif.h1
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c27
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c6
-rw-r--r--sound/soc/omap/ams-delta.c18
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c1
-rw-r--r--sound/soc/omap/omap-mcbsp.c2
-rw-r--r--sound/soc/omap/omap-twl4030.c20
-rw-r--r--sound/soc/omap/rx51.c8
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/pxa/corgi.c16
-rw-r--r--sound/soc/pxa/e740_wm9705.c20
-rw-r--r--sound/soc/pxa/e750_wm9705.c20
-rw-r--r--sound/soc/pxa/hx4700.c8
-rw-r--r--sound/soc/pxa/magician.c21
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c2
-rw-r--r--sound/soc/pxa/palm27x.c15
-rw-r--r--sound/soc/pxa/raumfeld.c35
-rw-r--r--sound/soc/pxa/spitz.c23
-rw-r--r--sound/soc/pxa/ttc-dkb.c4
-rw-r--r--sound/soc/pxa/zylonite.c12
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c5
-rw-r--r--sound/soc/samsung/Kconfig15
-rw-r--r--sound/soc/samsung/Makefile2
-rw-r--r--sound/soc/samsung/arndale_rt5631.c1
-rw-r--r--sound/soc/samsung/goni_wm8994.c304
-rw-r--r--sound/soc/samsung/h1940_uda1380.c15
-rw-r--r--sound/soc/samsung/i2s.c362
-rw-r--r--sound/soc/samsung/jive_wm8750.c34
-rw-r--r--sound/soc/samsung/neo1973_wm8753.c25
-rw-r--r--sound/soc/samsung/odroidx2_max98090.c6
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c15
-rw-r--r--sound/soc/samsung/s3c24xx_simtec.c20
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c12
-rw-r--r--sound/soc/samsung/smartq_wm8987.c16
-rw-r--r--sound/soc/samsung/smdk_wm8580.c26
-rw-r--r--sound/soc/samsung/smdk_wm8580pcm.c19
-rw-r--r--sound/soc/samsung/smdk_wm8994pcm.c16
-rw-r--r--sound/soc/sh/dma-sh7760.c6
-rw-r--r--sound/soc/sh/fsi.c15
-rw-r--r--sound/soc/sh/migor.c12
-rw-r--r--sound/soc/sh/rcar/adg.c14
-rw-r--r--sound/soc/sh/rcar/core.c111
-rw-r--r--sound/soc/sh/rcar/dvc.c63
-rw-r--r--sound/soc/sh/rcar/gen.c15
-rw-r--r--sound/soc/sh/rcar/rsnd.h81
-rw-r--r--sound/soc/sh/rcar/src.c269
-rw-r--r--sound/soc/sh/rcar/ssi.c100
-rw-r--r--sound/soc/sh/siu_pcm.c1
-rw-r--r--sound/soc/soc-ac97.c36
-rw-r--r--sound/soc/soc-compress.c9
-rw-r--r--sound/soc/soc-core.c209
-rw-r--r--sound/soc/soc-dapm.c125
-rw-r--r--sound/soc/soc-devres.c2
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c15
-rw-r--r--sound/soc/soc-pcm.c33
-rw-r--r--sound/soc/tegra/Kconfig10
-rw-r--r--sound/soc/tegra/Makefile2
-rw-r--r--sound/soc/tegra/tegra_rt5677.c347
-rw-r--r--sound/soc/txx9/txx9aclc.c6
-rw-r--r--sound/soc/ux500/mop500_ab8500.c16
-rw-r--r--sound/soc/xtensa/Kconfig7
-rw-r--r--sound/soc/xtensa/Makefile3
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c675
-rw-r--r--sound/sparc/amd7930.c2
-rw-r--r--sound/synth/emux/emux.c10
-rw-r--r--sound/synth/emux/emux_hwdep.c2
-rw-r--r--sound/synth/emux/emux_oss.c2
-rw-r--r--sound/synth/emux/emux_synth.c6
-rw-r--r--sound/synth/emux/soundfont.c2
-rw-r--r--sound/usb/Kconfig2
-rw-r--r--sound/usb/Makefile1
-rw-r--r--sound/usb/card.h2
-rw-r--r--sound/usb/line6/Kconfig42
-rw-r--r--sound/usb/line6/Makefile18
-rw-r--r--sound/usb/line6/capture.c275
-rw-r--r--sound/usb/line6/capture.h (renamed from drivers/staging/line6/capture.h)8
-rw-r--r--sound/usb/line6/driver.c666
-rw-r--r--sound/usb/line6/driver.h (renamed from drivers/staging/line6/driver.h)168
-rw-r--r--sound/usb/line6/midi.c (renamed from drivers/staging/line6/midi.c)97
-rw-r--r--sound/usb/line6/midi.h (renamed from drivers/staging/line6/midi.h)41
-rw-r--r--sound/usb/line6/midibuf.c (renamed from drivers/staging/line6/midibuf.c)22
-rw-r--r--sound/usb/line6/midibuf.h (renamed from drivers/staging/line6/midibuf.h)5
-rw-r--r--sound/usb/line6/pcm.c588
-rw-r--r--sound/usb/line6/pcm.h197
-rw-r--r--sound/usb/line6/playback.c (renamed from drivers/staging/line6/playback.c)337
-rw-r--r--sound/usb/line6/playback.h (renamed from drivers/staging/line6/playback.h)8
-rw-r--r--sound/usb/line6/pod.c (renamed from drivers/staging/line6/pod.c)308
-rw-r--r--sound/usb/line6/podhd.c192
-rw-r--r--sound/usb/line6/toneport.c (renamed from drivers/staging/line6/toneport.c)460
-rw-r--r--sound/usb/line6/variax.c (renamed from drivers/staging/line6/variax.c)153
-rw-r--r--sound/usb/midi.c5
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/pcm.c9
-rw-r--r--sound/usb/quirks-table.h22
-rw-r--r--sound/usb/usx2y/usb_stream.h78
-rw-r--r--tools/hv/Makefile4
-rw-r--r--tools/hv/hv_fcopy_daemon.c10
-rw-r--r--tools/hv/hv_kvp_daemon.c41
-rw-r--r--tools/include/asm-generic/bitops.h2
-rw-r--r--tools/include/asm-generic/bitops/arch_hweight.h1
-rw-r--r--tools/include/asm-generic/bitops/const_hweight.h1
-rw-r--r--tools/include/asm-generic/bitops/hweight.h7
-rw-r--r--tools/include/linux/bitops.h7
-rw-r--r--tools/lib/api/fs/debugfs.c45
-rw-r--r--tools/lib/api/fs/debugfs.h3
-rw-r--r--tools/lib/api/fs/fs.c2
-rw-r--r--tools/lib/lockdep/.gitignore1
-rw-r--r--tools/lib/lockdep/Makefile2
-rw-r--r--tools/lib/traceevent/event-parse.c328
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt2
-rw-r--r--tools/perf/Documentation/perf-list.txt13
-rw-r--r--tools/perf/Documentation/perf-mem.txt9
-rw-r--r--tools/perf/Documentation/perf-record.txt19
-rw-r--r--tools/perf/Documentation/perf-script.txt28
-rw-r--r--tools/perf/Documentation/perf-stat.txt20
-rw-r--r--tools/perf/MANIFEST6
-rw-r--r--tools/perf/Makefile.perf11
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c19
-rw-r--r--tools/perf/bench/futex.h13
-rw-r--r--tools/perf/bench/sched-pipe.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-diff.c248
-rw-r--r--tools/perf/builtin-inject.c5
-rw-r--r--tools/perf/builtin-mem.c131
-rw-r--r--tools/perf/builtin-record.c70
-rw-r--r--tools/perf/builtin-report.c16
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-top.c3
-rw-r--r--tools/perf/builtin-trace.c106
-rw-r--r--tools/perf/config/Makefile8
-rw-r--r--tools/perf/config/Makefile.arch26
-rw-r--r--tools/perf/config/feature-checks/Makefile4
-rw-r--r--tools/perf/config/feature-checks/test-all.c5
-rw-r--r--tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c14
-rw-r--r--tools/perf/perf-sys.h1
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c5
-rw-r--r--tools/perf/tests/attr.py1
-rw-r--r--tools/perf/tests/dwarf-unwind.c36
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_output.c2
-rw-r--r--tools/perf/tests/make1
-rw-r--r--tools/perf/tests/parse-events.c60
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/ui/browsers/annotate.c3
-rw-r--r--tools/perf/ui/hist.c12
-rw-r--r--tools/perf/ui/progress.h4
-rw-r--r--tools/perf/ui/tui/helpline.c3
-rw-r--r--tools/perf/ui/tui/setup.c3
-rw-r--r--tools/perf/util/annotate.c20
-rw-r--r--tools/perf/util/annotate.h8
-rw-r--r--tools/perf/util/cache.h2
-rw-r--r--tools/perf/util/color.c126
-rw-r--r--tools/perf/util/color.h2
-rw-r--r--tools/perf/util/dso.c6
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/evlist.c27
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c4
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c48
-rw-r--r--tools/perf/util/hist.h11
-rw-r--r--tools/perf/util/hweight.c31
-rw-r--r--tools/perf/util/include/asm/hweight.h8
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/map.h16
-rw-r--r--tools/perf/util/parse-events.c27
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/parse-events.y26
-rw-r--r--tools/perf/util/parse-options.c2
-rw-r--r--tools/perf/util/pmu.c102
-rw-r--r--tools/perf/util/probe-event.c38
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c8
-rw-r--r--tools/perf/util/session.h3
-rw-r--r--tools/perf/util/sort.c37
-rw-r--r--tools/perf/util/symbol-elf.c13
-rw-r--r--tools/perf/util/symbol.c33
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/unwind-libunwind.c59
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslibcfs.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.8126
-rw-r--r--tools/power/x86/turbostat/turbostat.c662
-rwxr-xr-xtools/testing/ktest/ktest.pl259
-rw-r--r--tools/testing/selftests/exec/execveat.c19
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c3
-rw-r--r--tools/testing/selftests/powerpc/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/copyloops/.gitignore4
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile9
-rw-r--r--tools/testing/selftests/powerpc/mm/subpage_prot.c220
-rw-r--r--tools/testing/selftests/powerpc/pmu/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/.gitignore22
-rw-r--r--tools/testing/selftests/powerpc/primitives/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile20
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h7
-rw-r--r--tools/testing/selftests/powerpc/stringloops/memcmp.c103
l---------tools/testing/selftests/powerpc/stringloops/memcmp_64.S1
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh18
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh9
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh20
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh2
-rw-r--r--tools/testing/selftests/vm/Makefile2
-rw-r--r--tools/usb/ffs-aio-example/multibuff/host_app/test.c14
-rw-r--r--tools/usb/ffs-aio-example/simple/device_app/aio_simple.c2
-rw-r--r--tools/usb/ffs-aio-example/simple/host_app/test.c17
-rw-r--r--tools/vm/page-types.c1
-rw-r--r--virt/kvm/Kconfig10
-rw-r--r--virt/kvm/arm/arch_timer.c3
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c847
-rw-r--r--virt/kvm/arm/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c1036
-rw-r--r--virt/kvm/arm/vgic-v3.c82
-rw-r--r--virt/kvm/arm/vgic.c1127
-rw-r--r--virt/kvm/arm/vgic.h123
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/kvm_main.c194
7581 files changed, 306552 insertions, 173724 deletions
diff --git a/.gitignore b/.gitignore
index ce57b79670a5..9ac91060ea64 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,6 +53,11 @@ Module.symvers
/debian/
#
+# tar directory (make tar*-pkg)
+#
+/tar-install/
+
+#
# git files that we don't want to ignore even it they are dot-files
#
!.gitignore
diff --git a/.mailmap b/.mailmap
index d357e1bd2a43..0d971cfb0772 100644
--- a/.mailmap
+++ b/.mailmap
@@ -73,6 +73,7 @@ Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
+Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 1750fcef1ab4..cd077ca0e1b8 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -29,8 +29,6 @@ DMA-ISA-LPC.txt
- How to do DMA with ISA (and LPC) devices.
DMA-attributes.txt
- listing of the various possible attributes a DMA region can have
-dmatest.txt
- - how to compile, configure and use the dmatest system.
DocBook/
- directory with DocBook templates etc. for kernel documentation.
EDID/
@@ -163,8 +161,6 @@ digsig.txt
-info on the Digital Signature Verification API
dma-buf-sharing.txt
- the DMA Buffer Sharing API Guide
-dmaengine.txt
- -the DMA Engine API Guide
dontdiff
- file containing a list of files that should never be diff'ed.
driver-model/
@@ -209,6 +205,8 @@ hid/
- directory with information on human interface devices
highuid.txt
- notes on the change from 16 bit to 32 bit user/group IDs.
+hsi.txt
+ - HSI subsystem overview.
hwspinlock.txt
- hardware spinlock provides hardware assistance for synchronization
timers/
@@ -277,6 +275,8 @@ kprobes.txt
- documents the kernel probes debugging feature.
kref.txt
- docs on adding reference counters (krefs) to kernel objects.
+kselftest.txt
+ - small unittests for (some) individual codepaths in the kernel.
laptops/
- directory with laptop related info and laptop driver documentation.
ldm.txt
@@ -285,22 +285,22 @@ leds/
- directory with info about LED handling under Linux.
local_ops.txt
- semantics and behavior of local atomic operations.
-lockdep-design.txt
- - documentation on the runtime locking correctness validator.
locking/
- directory with info about kernel locking primitives
-lockstat.txt
- - info on collecting statistics on locks (and contention).
lockup-watchdogs.txt
- info on soft and hard lockup detectors (aka nmi_watchdog).
logo.gif
- full colour GIF image of Linux logo (penguin - Tux).
logo.txt
- info on creator of above logo & site to get additional images from.
+lzo.txt
+ - kernel LZO decompressor input formats
m68k/
- directory with info about Linux on Motorola 68k architecture.
magic-number.txt
- list of magic numbers used to mark/protect kernel data structures.
+mailbox.txt
+ - How to write drivers for the common mailbox framework (IPC).
md.txt
- info on boot arguments for the multiple devices driver.
media-framework.txt
@@ -327,8 +327,6 @@ mtd/
- directory with info about memory technology devices (flash)
mono.txt
- how to execute Mono-based .NET binaries with the help of BINFMT_MISC.
-mutex-design.txt
- - info on the generic mutex subsystem.
namespaces/
- directory with various information about namespaces
netlabel/
@@ -395,10 +393,6 @@ robust-futexes.txt
- a description of what robust futexes are.
rpmsg.txt
- info on the Remote Processor Messaging (rpmsg) Framework
-rt-mutex-design.txt
- - description of the RealTime mutex implementation design.
-rt-mutex.txt
- - desc. of RT-mutex subsystem with PI (Priority Inheritance) support.
rtc.txt
- notes on how to use the Real Time Clock (aka CMOS clock) driver.
s390/
@@ -425,8 +419,6 @@ sparse.txt
- info on how to obtain and use the sparse tool for typechecking.
spi/
- overview of Linux kernel Serial Peripheral Interface (SPI) support.
-spinlocks.txt
- - info on using spinlocks to provide exclusive access in kernel.
stable_api_nonsense.txt
- info on why the kernel does not have a stable in-kernel api or abi.
stable_kernel_rules.txt
@@ -483,10 +475,10 @@ wimax/
- directory with info about Intel Wireless Wimax Connections
workqueue.txt
- information on the Concurrency Managed Workqueue implementation
-ww-mutex-design.txt
- - Intro to Mutex wait/would deadlock handling.s
x86/x86_64/
- directory with info on Linux support for AMD x86-64 (Hammer) machines.
+xillybus.txt
+ - Overview and basic ui of xillybus driver
xtensa/
- directory with documents relating to arch/xtensa port/implementation
xz.txt
diff --git a/Documentation/ABI/stable/sysfs-class-tpm b/Documentation/ABI/stable/sysfs-class-tpm
index a60b45e2493b..9f790eebb5d2 100644
--- a/Documentation/ABI/stable/sysfs-class-tpm
+++ b/Documentation/ABI/stable/sysfs-class-tpm
@@ -1,4 +1,4 @@
-What: /sys/class/misc/tpmX/device/
+What: /sys/class/tpm/tpmX/device/
Date: April 2005
KernelVersion: 2.6.12
Contact: tpmdd-devel@lists.sf.net
@@ -6,7 +6,7 @@ Description: The device/ directory under a specific TPM instance exposes
the properties of that TPM chip
-What: /sys/class/misc/tpmX/device/active
+What: /sys/class/tpm/tpmX/device/active
Date: April 2006
KernelVersion: 2.6.17
Contact: tpmdd-devel@lists.sf.net
@@ -18,7 +18,7 @@ Description: The "active" property prints a '1' if the TPM chip is accepting
section 17 for more information on which commands are
available.
-What: /sys/class/misc/tpmX/device/cancel
+What: /sys/class/tpm/tpmX/device/cancel
Date: June 2005
KernelVersion: 2.6.13
Contact: tpmdd-devel@lists.sf.net
@@ -26,7 +26,7 @@ Description: The "cancel" property allows you to cancel the currently
pending TPM command. Writing any value to cancel will call the
TPM vendor specific cancel operation.
-What: /sys/class/misc/tpmX/device/caps
+What: /sys/class/tpm/tpmX/device/caps
Date: April 2005
KernelVersion: 2.6.12
Contact: tpmdd-devel@lists.sf.net
@@ -43,7 +43,7 @@ Description: The "caps" property contains TPM manufacturer and version info.
the chip supports. Firmware version is that of the chip and
is manufacturer specific.
-What: /sys/class/misc/tpmX/device/durations
+What: /sys/class/tpm/tpmX/device/durations
Date: March 2011
KernelVersion: 3.1
Contact: tpmdd-devel@lists.sf.net
@@ -66,7 +66,7 @@ Description: The "durations" property shows the 3 vendor-specific values
scaled to be displayed in usecs. In this case "[adjusted]"
will be displayed in place of "[original]".
-What: /sys/class/misc/tpmX/device/enabled
+What: /sys/class/tpm/tpmX/device/enabled
Date: April 2006
KernelVersion: 2.6.17
Contact: tpmdd-devel@lists.sf.net
@@ -75,7 +75,7 @@ Description: The "enabled" property prints a '1' if the TPM chip is enabled,
may be visible but produce a '0' after some operation that
disables the TPM.
-What: /sys/class/misc/tpmX/device/owned
+What: /sys/class/tpm/tpmX/device/owned
Date: April 2006
KernelVersion: 2.6.17
Contact: tpmdd-devel@lists.sf.net
@@ -83,7 +83,7 @@ Description: The "owned" property produces a '1' if the TPM_TakeOwnership
ordinal has been executed successfully in the chip. A '0'
indicates that ownership hasn't been taken.
-What: /sys/class/misc/tpmX/device/pcrs
+What: /sys/class/tpm/tpmX/device/pcrs
Date: April 2005
KernelVersion: 2.6.12
Contact: tpmdd-devel@lists.sf.net
@@ -106,7 +106,7 @@ Description: The "pcrs" property will dump the current value of all Platform
1.2 chips, PCRs represent SHA-1 hashes, which are 20 bytes
long. Use the "caps" property to determine TPM version.
-What: /sys/class/misc/tpmX/device/pubek
+What: /sys/class/tpm/tpmX/device/pubek
Date: April 2005
KernelVersion: 2.6.12
Contact: tpmdd-devel@lists.sf.net
@@ -158,7 +158,7 @@ Description: The "pubek" property will return the TPM's public endorsement
Modulus Length: 256 (bytes)
Modulus: The 256 byte Endorsement Key modulus
-What: /sys/class/misc/tpmX/device/temp_deactivated
+What: /sys/class/tpm/tpmX/device/temp_deactivated
Date: April 2006
KernelVersion: 2.6.17
Contact: tpmdd-devel@lists.sf.net
@@ -167,7 +167,7 @@ Description: The "temp_deactivated" property returns a '1' if the chip has
cycle. Whether a warm boot (reboot) will clear a TPM chip
from a temp_deactivated state is platform specific.
-What: /sys/class/misc/tpmX/device/timeouts
+What: /sys/class/tpm/tpmX/device/timeouts
Date: March 2011
KernelVersion: 3.1
Contact: tpmdd-devel@lists.sf.net
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
new file mode 100644
index 000000000000..2f4a0051b32d
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
@@ -0,0 +1,265 @@
+What: /config/usb-gadget/gadget/functions/uvc.name
+Date: Dec 2014
+KernelVersion: 3.20
+Description: UVC function directory
+
+ streaming_maxburst - 0..15 (ss only)
+ streaming_maxpacket - 1..1023 (fs), 1..3072 (hs/ss)
+ streaming_interval - 1..16
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Control descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/class
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Class descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Super speed control class descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Full speed control class descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Terminal descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Output terminal descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Default output terminal descriptors
+
+ All attributes read only:
+ iTerminal - index of string descriptor
+ bSourceID - id of the terminal to which this terminal
+ is connected
+ bAssocTerminal - id of the input terminal to which this output
+ terminal is associated
+ wTerminalType - terminal type
+ bTerminalID - a non-zero id of this terminal
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Camera terminal descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Default camera terminal descriptors
+
+ All attributes read only:
+ bmControls - bitmap specifying which controls are
+ supported for the video stream
+ wOcularFocalLength - the value of Locular
+ wObjectiveFocalLengthMax- the value of Lmin
+ wObjectiveFocalLengthMin- the value of Lmax
+ iTerminal - index of string descriptor
+ bAssocTerminal - id of the output terminal to which
+ this terminal is connected
+ wTerminalType - terminal type
+ bTerminalID - a non-zero id of this terminal
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/processing
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Processing unit descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Default processing unit descriptors
+
+ All attributes read only:
+ iProcessing - index of string descriptor
+ bmControls - bitmap specifying which controls are
+ supported for the video stream
+ wMaxMultiplier - maximum digital magnification x100
+ bSourceID - id of the terminal to which this unit is
+ connected
+ bUnitID - a non-zero id of this unit
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/header
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Control header descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Specific control header descriptors
+
+dwClockFrequency
+bcdUVC
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Streaming descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Streaming class descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Super speed streaming class descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs
+Date: Dec 2014
+KernelVersion: 3.20
+Description: High speed streaming class descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Full speed streaming class descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Color matching descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Default color matching descriptors
+
+ All attributes read only:
+ bMatrixCoefficients - matrix used to compute luma and
+ chroma values from the color primaries
+ bTransferCharacteristics- optoelectronic transfer
+ characteristic of the source picutre,
+ also called the gamma function
+ bColorPrimaries - color primaries and the reference
+ white
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg
+Date: Dec 2014
+KernelVersion: 3.20
+Description: MJPEG format descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Specific MJPEG format descriptors
+
+ All attributes read only,
+ except bmaControls and bDefaultFrameIndex:
+ bmaControls - this format's data for bmaControls in
+ the streaming header
+ bmInterfaceFlags - specifies interlace information,
+ read-only
+ bAspectRatioY - the X dimension of the picture aspect
+ ratio, read-only
+ bAspectRatioX - the Y dimension of the picture aspect
+ ratio, read-only
+ bmFlags - characteristics of this format,
+ read-only
+ bDefaultFrameIndex - optimum frame index for this stream
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Specific MJPEG frame descriptors
+
+ dwFrameInterval - indicates how frame interval can be
+ programmed; a number of values
+ separated by newline can be specified
+ dwDefaultFrameInterval - the frame interval the device would
+ like to use as default
+ dwMaxVideoFrameBufferSize- the maximum number of bytes the
+ compressor will produce for a video
+ frame or still image
+ dwMaxBitRate - the maximum bit rate at the shortest
+ frame interval in bps
+ dwMinBitRate - the minimum bit rate at the longest
+ frame interval in bps
+ wHeight - height of decoded bitmap frame in px
+ wWidth - width of decoded bitmam frame in px
+ bmCapabilities - still image support, fixed frame-rate
+ support
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Uncompressed format descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Specific uncompressed format descriptors
+
+ bmaControls - this format's data for bmaControls in
+ the streaming header
+ bmInterfaceFlags - specifies interlace information,
+ read-only
+ bAspectRatioY - the X dimension of the picture aspect
+ ratio, read-only
+ bAspectRatioX - the Y dimension of the picture aspect
+ ratio, read-only
+ bDefaultFrameIndex - optimum frame index for this stream
+ bBitsPerPixel - number of bits per pixel used to
+ specify color in the decoded video
+ frame
+ guidFormat - globally unique id used to identify
+ stream-encoding format
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Specific uncompressed frame descriptors
+
+ dwFrameInterval - indicates how frame interval can be
+ programmed; a number of values
+ separated by newline can be specified
+ dwDefaultFrameInterval - the frame interval the device would
+ like to use as default
+ dwMaxVideoFrameBufferSize- the maximum number of bytes the
+ compressor will produce for a video
+ frame or still image
+ dwMaxBitRate - the maximum bit rate at the shortest
+ frame interval in bps
+ dwMinBitRate - the minimum bit rate at the longest
+ frame interval in bps
+ wHeight - height of decoded bitmap frame in px
+ wWidth - width of decoded bitmam frame in px
+ bmCapabilities - still image support, fixed frame-rate
+ support
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Streaming header descriptors
+
+What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name
+Date: Dec 2014
+KernelVersion: 3.20
+Description: Specific streaming header descriptors
+
+ All attributes read only:
+ bTriggerUsage - how the host software will respond to
+ a hardware trigger interrupt event
+ bTriggerSupport - flag specifying if hardware
+ triggering is supported
+ bStillCaptureMethod - method of still image caputre
+ supported
+ bTerminalLink - id of the output terminal to which
+ the video endpoint of this interface
+ is connected
+ bmInfo - capabilities of this video streaming
+ interface
diff --git a/Documentation/ABI/testing/sysfs-bus-amba b/Documentation/ABI/testing/sysfs-bus-amba
new file mode 100644
index 000000000000..e7b54677cfbe
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-amba
@@ -0,0 +1,20 @@
+What: /sys/bus/amba/devices/.../driver_override
+Date: September 2014
+Contact: Antonios Motakis <a.motakis@virtualopensystems.com>
+Description:
+ This file allows the driver for a device to be specified which
+ will override standard OF, ACPI, ID table, and name matching.
+ When specified, only a driver with a name matching the value
+ written to driver_override will have an opportunity to bind to
+ the device. The override is specified by writing a string to the
+ driver_override file (echo vfio-amba > driver_override) and may
+ be cleared with an empty string (echo > driver_override).
+ This returns the device to standard matching rules binding.
+ Writing to driver_override does not automatically unbind the
+ device from its current driver or make any attempt to
+ automatically load the specified driver. If no driver with a
+ matching name is currently loaded in the kernel, the device will
+ not bind to any driver. This also allows devices to opt-out of
+ driver binding using a driver_override name such as "none".
+ Only a single driver may be specified in the override, there is
+ no support for parsing delimiters.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
index 20979f8b3edb..505f080d20a1 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
@@ -52,12 +52,18 @@ Description: Per-pmu performance monitoring events specific to the running syste
event=0x2abc
event=0x423,inv,cmask=0x3
domain=0x1,offset=0x8,starting_index=0xffff
+ domain=0x1,offset=0x8,core=?
Each of the assignments indicates a value to be assigned to a
particular set of bits (as defined by the format file
corresponding to the <term>) in the perf_event structure passed
to the perf_open syscall.
+ In the case of the last example, a value replacing "?" would
+ need to be provided by the user selecting the particular event.
+ This is referred to as "event parameterization". Event
+ parameters have the format 'param=?'.
+
What: /sys/bus/event_source/devices/<pmu>/events/<event>.unit
Date: 2014/02/24
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
index 32f3f5f8bba2..f893337570c1 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
@@ -21,3 +21,25 @@ Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
Exposes the "version" field of the 24x7 catalog. This is also
extractable from the provided binary "catalog" sysfs entry.
+
+What: /sys/bus/event_source/devices/hv_24x7/event_descs/<event-name>
+Date: February 2014
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
+Description:
+ Provides the description of a particular event as provided by
+ the firmware. If firmware does not provide a description, no
+ file will be created.
+
+ Note that the event-name lacks the domain suffix appended for
+ events in the events/ dir.
+
+What: /sys/bus/event_source/devices/hv_24x7/event_long_descs/<event-name>
+Date: February 2014
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
+Description:
+ Provides the "long" description of a particular event as
+ provided by the firmware. If firmware does not provide a
+ description, no file will be created.
+
+ Note that the event-name lacks the domain suffix appended for
+ events in the events/ dir.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 117521dbf2b3..9a70c31619ea 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -92,6 +92,18 @@ Description:
is required is a consistent labeling. Units after application
of scale and offset are millivolts.
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_supply_raw
+KernelVersion: 3.17
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw (unscaled no bias removal etc.) current measurement from
+ channel Y. In special cases where the channel does not
+ correspond to externally available input one of the named
+ versions may be used. The number must always be specified and
+ unique to allow association with event codes. Units after
+ application of scale and offset are milliamps.
+
What: /sys/bus/iio/devices/iio:deviceX/in_capacitanceY_raw
KernelVersion: 3.2
Contact: linux-iio@vger.kernel.org
@@ -234,6 +246,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_offset
What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_offset
What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_offset
What: /sys/bus/iio/devices/iio:deviceX/in_voltage_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_current_offset
What: /sys/bus/iio/devices/iio:deviceX/in_tempY_offset
What: /sys/bus/iio/devices/iio:deviceX/in_temp_offset
What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_offset
@@ -262,9 +276,14 @@ What: /sys/bus/iio/devices/iio:deviceX/in_voltage_scale
What: /sys/bus/iio/devices/iio:deviceX/in_voltage-voltage_scale
What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_supply_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_current_scale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_peak_scale
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_energy_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_distance_scale
What: /sys/bus/iio/devices/iio:deviceX/in_magn_scale
What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_scale
What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_scale
@@ -276,6 +295,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_rot_from_north_true_tilt_comp_scale
What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_scale
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_scale
What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -323,6 +343,44 @@ Description:
production inaccuracies). If shared across all channels,
<type>_calibscale is used.
+What: /sys/bus/iio/devices/iio:deviceX/in_activity_calibgender
+What: /sys/bus/iio/devices/iio:deviceX/in_energy_calibgender
+What: /sys/bus/iio/devices/iio:deviceX/in_distance_calibgender
+What: /sys/bus/iio/devices/iio:deviceX/in_velocity_calibgender
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ Gender of the user (e.g.: male, female) used by some pedometers
+ to compute the stride length, distance, speed and activity
+ type.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_activity_calibgender_available
+What: /sys/bus/iio/devices/iio:deviceX/in_energy_calibgender_available
+What: /sys/bus/iio/devices/iio:deviceX/in_distance_calibgender_available
+What: /sys/bus/iio/devices/iio:deviceX/in_velocity_calibgender_available
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ Lists all available gender values (e.g.: male, female).
+
+What: /sys/bus/iio/devices/iio:deviceX/in_activity_calibheight
+What: /sys/bus/iio/devices/iio:deviceX/in_energy_calibheight
+What: /sys/bus/iio/devices/iio:deviceX/in_distance_calibheight
+What: /sys/bus/iio/devices/iio:deviceX/in_velocity_calibheight
+KernelVersion: 3.19
+Contact: linux-iio@vger.kernel.org
+Description:
+ Height of the user (in meters) used by some pedometers
+ to compute the stride length, distance, speed and activity
+ type.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_energy_calibweight
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ Weight of the user (in kg). It is needed by some pedometers
+ to compute the calories burnt by the user.
+
What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale_available
What: /sys/.../iio:deviceX/in_voltageX_scale_available
What: /sys/.../iio:deviceX/in_voltage-voltage_scale_available
@@ -783,6 +841,14 @@ What: /sys/.../events/in_tempY_roc_falling_period
What: /sys/.../events/in_accel_x&y&z_mag_falling_period
What: /sys/.../events/in_intensity0_thresh_period
What: /sys/.../events/in_proximity0_thresh_period
+What: /sys/.../events/in_activity_still_thresh_rising_period
+What: /sys/.../events/in_activity_still_thresh_falling_period
+What: /sys/.../events/in_activity_walking_thresh_rising_period
+What: /sys/.../events/in_activity_walking_thresh_falling_period
+What: /sys/.../events/in_activity_jogging_thresh_rising_period
+What: /sys/.../events/in_activity_jogging_thresh_falling_period
+What: /sys/.../events/in_activity_running_thresh_rising_period
+What: /sys/.../events/in_activity_running_thresh_falling_period
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -790,6 +856,40 @@ Description:
met before an event is generated. If direction is not
specified then this period applies to both directions.
+What: /sys/.../events/in_activity_still_thresh_rising_en
+What: /sys/.../events/in_activity_still_thresh_falling_en
+What: /sys/.../events/in_activity_walking_thresh_rising_en
+What: /sys/.../events/in_activity_walking_thresh_falling_en
+What: /sys/.../events/in_activity_jogging_thresh_rising_en
+What: /sys/.../events/in_activity_jogging_thresh_falling_en
+What: /sys/.../events/in_activity_running_thresh_rising_en
+What: /sys/.../events/in_activity_running_thresh_falling_en
+KernelVersion: 3.19
+Contact: linux-iio@vger.kernel.org
+Description:
+ Enables or disables activitity events. Depending on direction
+ an event is generated when sensor ENTERS or LEAVES a given state.
+
+What: /sys/.../events/in_activity_still_thresh_rising_value
+What: /sys/.../events/in_activity_still_thresh_falling_value
+What: /sys/.../events/in_activity_walking_thresh_rising_value
+What: /sys/.../events/in_activity_walking_thresh_falling_value
+What: /sys/.../events/in_activity_jogging_thresh_rising_value
+What: /sys/.../events/in_activity_jogging_thresh_falling_value
+What: /sys/.../events/in_activity_running_thresh_rising_value
+What: /sys/.../events/in_activity_running_thresh_falling_value
+KernelVersion: 3.19
+Contact: linux-iio@vger.kernel.org
+Description:
+ Confidence value (in units as percentage) to be used
+ for deciding when an event should be generated. E.g for
+ running: If the confidence value reported by the sensor
+ is greater than in_activity_running_thresh_rising_value
+ then the sensor ENTERS running state. Conversely, if the
+ confidence value reported by the sensor is lower than
+ in_activity_running_thresh_falling_value then the sensor
+ is LEAVING running state.
+
What: /sys/.../iio:deviceX/events/in_accel_mag_en
What: /sys/.../iio:deviceX/events/in_accel_mag_rising_en
What: /sys/.../iio:deviceX/events/in_accel_mag_falling_en
@@ -822,6 +922,25 @@ Description:
number or direction is not specified, applies to all channels of
this type.
+What: /sys/.../events/in_steps_change_en
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ Event generated when channel passes a threshold on the absolute
+ change in value. E.g. for steps: a step change event is
+ generated each time the user takes N steps, where N is set using
+ in_steps_change_value.
+
+What: /sys/.../events/in_steps_change_value
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ Specifies the value of change threshold that the
+ device is comparing against for the events enabled by
+ <type>[Y][_name]_roc[_rising|falling|]_en. E.g. for steps:
+ if set to 3, a step change event will be generated every 3
+ steps.
+
What: /sys/bus/iio/devices/iio:deviceX/trigger/current_trigger
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
@@ -956,6 +1075,16 @@ Description:
and the relevant _type attributes to establish the data storage
format.
+What: /sys/.../iio:deviceX/in_activity_still_input
+What: /sys/.../iio:deviceX/in_activity_walking_input
+What: /sys/.../iio:deviceX/in_activity_jogging_input
+What: /sys/.../iio:deviceX/in_activity_running_input
+KernelVersion: 3.19
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is used to read the confidence for an activity
+ expressed in units as percentage.
+
What: /sys/.../iio:deviceX/in_anglvel_z_quadrature_correction_raw
KernelVersion: 2.6.38
Contact: linux-iio@vger.kernel.org
@@ -973,6 +1102,24 @@ Description:
For a list of available output power modes read
in_accel_power_mode_available.
+What: /sys/.../iio:deviceX/in_energy_input
+What: /sys/.../iio:deviceX/in_energy_raw
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is used to read the energy value reported by the
+ device (e.g.: human activity sensors report energy burnt by the
+ user). Units after application of scale are Joules.
+
+What: /sys/.../iio:deviceX/in_distance_input
+What: /sys/.../iio:deviceX/in_distance_raw
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is used to read the distance covered by the user
+ since the last reboot while activated. Units after application
+ of scale are meters.
+
What: /sys/bus/iio/devices/iio:deviceX/store_eeprom
KernelVersion: 3.4.0
Contact: linux-iio@vger.kernel.org
@@ -992,7 +1139,9 @@ Description:
reflectivity of infrared or ultrasound emitted.
Often these sensors are unit less and as such conversion
to SI units is not possible. Where it is, the units should
- be meters.
+ be meters. If such a conversion is not possible, the reported
+ values should behave in the same way as a distance, i.e. lower
+ values indicate something is closer to the sensor.
What: /sys/.../iio:deviceX/in_illuminanceY_input
What: /sys/.../iio:deviceX/in_illuminanceY_raw
@@ -1024,6 +1173,12 @@ Description:
This attribute is used to get/set the integration time in
seconds.
+What: /sys/.../iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_integration_time
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ Number of seconds in which to compute speed.
+
What: /sys/bus/iio/devices/iio:deviceX/in_rot_quaternion_raw
KernelVersion: 3.15
Contact: linux-iio@vger.kernel.org
@@ -1051,3 +1206,46 @@ Description:
after application of scale and offset. If no offset or scale is
present, output should be considered as processed with the
unit in milliamps.
+
+What: /sys/.../iio:deviceX/in_energy_en
+What: /sys/.../iio:deviceX/in_distance_en
+What: /sys/.../iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_en
+What: /sys/.../iio:deviceX/in_steps_en
+KernelVersion: 3.19
+Contact: linux-iio@vger.kernel.org
+Description:
+ Activates a device feature that runs in firmware/hardware.
+ E.g. for steps: the pedometer saves power while not used;
+ when activated, it will count the steps taken by the user in
+ firmware and export them through in_steps_input.
+
+What: /sys/.../iio:deviceX/in_steps_input
+KernelVersion: 3.19
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is used to read the number of steps taken by the user
+ since the last reboot while activated.
+
+What: /sys/.../iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_input
+What: /sys/.../iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_raw
+KernelVersion: 3.19
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is used to read the current speed value of the
+ user (which is the norm or magnitude of the velocity vector).
+ Units after application of scale are m/s.
+
+What: /sys/.../iio:deviceX/in_steps_debounce_count
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ Specifies the number of steps that must occur within
+ in_steps_filter_debounce_time for the pedometer to decide the
+ consumer is making steps.
+
+What: /sys/.../iio:deviceX/in_steps_debounce_time
+KernelVersion: 3.20
+Contact: linux-iio@vger.kernel.org
+Description:
+ Specifies number of seconds in which we compute the steps
+ that occur in order to decide if the consumer is making steps.
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index 554405ec1955..3680364b4048 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -1,3 +1,9 @@
+Note: Attributes that are shared between devices are stored in the directory
+pointed to by the symlink device/.
+Example: The real path of the attribute /sys/class/cxl/afu0.0s/irqs_max is
+/sys/class/cxl/afu0.0s/device/irqs_max, i.e. /sys/class/cxl/afu0.0/irqs_max.
+
+
Slave contexts (eg. /sys/class/cxl/afu0.0s):
What: /sys/class/cxl/<afu>/irqs_max
@@ -67,7 +73,7 @@ Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the current version of the kernel/user API.
-What: /sys/class/cxl/<afu>/api_version_com
+What: /sys/class/cxl/<afu>/api_version_compatible
Date: September 2014
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
@@ -75,6 +81,42 @@ Description: read only
this this kernel supports.
+AFU configuration records (eg. /sys/class/cxl/afu0.0/cr0):
+
+An AFU may optionally export one or more PCIe like configuration records, known
+as AFU configuration records, which will show up here (if present).
+
+What: /sys/class/cxl/<afu>/cr<config num>/vendor
+Date: February 2015
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Hexadecimal value of the vendor ID found in this AFU
+ configuration record.
+
+What: /sys/class/cxl/<afu>/cr<config num>/device
+Date: February 2015
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Hexadecimal value of the device ID found in this AFU
+ configuration record.
+
+What: /sys/class/cxl/<afu>/cr<config num>/vendor
+Date: February 2015
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Hexadecimal value of the class code found in this AFU
+ configuration record.
+
+What: /sys/class/cxl/<afu>/cr<config num>/config
+Date: February 2015
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ This binary file provides raw access to the AFU configuration
+ record. The format is expected to match the either the standard
+ or extended configuration space defined by the PCIe
+ specification.
+
+
Master contexts (eg. /sys/class/cxl/afu0.0m)
@@ -106,7 +148,7 @@ Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Identifies the CAIA Version the card implements.
-What: /sys/class/cxl/<card>/psl_version
+What: /sys/class/cxl/<card>/psl_revision
Date: September 2014
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
@@ -127,3 +169,24 @@ Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Will return "user" or "factory" depending on the image loaded
onto the card.
+
+What: /sys/class/cxl/<card>/load_image_on_perst
+Date: December 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read/write
+ Valid entries are "none", "user", and "factory".
+ "none" means PERST will not cause image to be loaded to the
+ card. A power cycle is required to load the image.
+ "none" could be useful for debugging because the trace arrays
+ are preserved.
+ "user" and "factory" means PERST will cause either the user or
+ user or factory image to be loaded.
+ Default is to reload on PERST whichever image the card has
+ loaded.
+
+What: /sys/class/cxl/<card>/reset
+Date: October 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: write only
+ Writing 1 will issue a PERST to card which may cause the card
+ to reload the FPGA depending on load_image_on_perst.
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 0ec8b8178c41..80d9888a8ece 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -14,3 +14,18 @@ Description:
The /sys/class/mei/meiN directory is created for
each probed mei device
+What: /sys/class/mei/meiN/fw_status
+Date: Nov 2014
+KernelVersion: 3.19
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display fw status registers content
+
+ The ME FW writes its status information into fw status
+ registers for BIOS and OS to monitor fw health.
+
+ The register contains running state, power management
+ state, error codes, and others. The way the registers
+ are decoded depends on PCH or SoC generation.
+ Also number of registers varies between 1 and 6
+ depending on generation.
+
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index 909e7602c717..369d2a2d7d3e 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -32,3 +32,45 @@ Description:
Valid values:
- 5, 6 or 7 (hours),
- 0: disabled.
+
+What: /sys/class/power_supply/max77693-charger/device/fast_charge_timer
+Date: January 2015
+KernelVersion: 3.19.0
+Contact: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Description:
+ This entry shows and sets the maximum time the max77693
+ charger operates in fast-charge mode. When the timer expires
+ the device will terminate fast-charge mode (charging current
+ will drop to 0 A) and will trigger interrupt.
+
+ Valid values:
+ - 4 - 16 (hours), step by 2 (rounded down)
+ - 0: disabled.
+
+What: /sys/class/power_supply/max77693-charger/device/top_off_threshold_current
+Date: January 2015
+KernelVersion: 3.19.0
+Contact: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Description:
+ This entry shows and sets the charging current threshold for
+ entering top-off charging mode. When charging current in fast
+ charge mode drops below this value, the charger will trigger
+ interrupt and start top-off charging mode.
+
+ Valid values:
+ - 100000 - 200000 (microamps), step by 25000 (rounded down)
+ - 200000 - 350000 (microamps), step by 50000 (rounded down)
+ - 0: disabled.
+
+What: /sys/class/power_supply/max77693-charger/device/top_off_timer
+Date: January 2015
+KernelVersion: 3.19.0
+Contact: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Description:
+ This entry shows and sets the maximum time the max77693
+ charger operates in top-off charge mode. When the timer expires
+ the device will terminate top-off charge mode (charging current
+ will drop to 0 A) and will trigger interrupt.
+
+ Valid values:
+ - 0 - 70 (minutes), step by 10 (rounded down)
diff --git a/Documentation/ABI/testing/sysfs-driver-input-axp-pek b/Documentation/ABI/testing/sysfs-driver-input-axp-pek
new file mode 100644
index 000000000000..a5e671b9fa79
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-input-axp-pek
@@ -0,0 +1,11 @@
+What: /sys/class/input/input(x)/device/startup
+Date: March 2014
+Contact: Carlo Caione <carlo@caione.org>
+Description: Startup time in us. Board is powered on if the button is pressed
+ for more than <startup_time>
+
+What: /sys/class/input/input(x)/device/shutdown
+Date: March 2014
+Contact: Carlo Caione <carlo@caione.org>
+Description: Shutdown time in us. Board is powered off if the button is pressed
+ for more than <shutdown_time>
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 6f9157f16725..2c4cc42006e8 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -74,3 +74,9 @@ Date: March 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
Controls the memory footprint used by f2fs.
+
+What: /sys/fs/f2fs/<disk>/trim_sections
+Date: February 2015
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Controls the trimming rate in batch mode.
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch
new file mode 100644
index 000000000000..5bf42a840b22
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-livepatch
@@ -0,0 +1,44 @@
+What: /sys/kernel/livepatch
+Date: Nov 2014
+KernelVersion: 3.19.0
+Contact: live-patching@vger.kernel.org
+Description:
+ Interface for kernel live patching
+
+ The /sys/kernel/livepatch directory contains subdirectories for
+ each loaded live patch module.
+
+What: /sys/kernel/livepatch/<patch>
+Date: Nov 2014
+KernelVersion: 3.19.0
+Contact: live-patching@vger.kernel.org
+Description:
+ The patch directory contains subdirectories for each kernel
+ object (vmlinux or a module) in which it patched functions.
+
+What: /sys/kernel/livepatch/<patch>/enabled
+Date: Nov 2014
+KernelVersion: 3.19.0
+Contact: live-patching@vger.kernel.org
+Description:
+ A writable attribute that indicates whether the patched
+ code is currently applied. Writing 0 will disable the patch
+ while writing 1 will re-enable the patch.
+
+What: /sys/kernel/livepatch/<patch>/<object>
+Date: Nov 2014
+KernelVersion: 3.19.0
+Contact: live-patching@vger.kernel.org
+Description:
+ The object directory contains subdirectories for each function
+ that is patched within the object.
+
+What: /sys/kernel/livepatch/<patch>/<object>/<function>
+Date: Nov 2014
+KernelVersion: 3.19.0
+Contact: live-patching@vger.kernel.org
+Description:
+ The function directory contains attributes regarding the
+ properties and state of the patched function.
+
+ There are currently no such attributes.
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
deleted file mode 100644
index 7969443ef0ef..000000000000
--- a/Documentation/ABI/testing/sysfs-platform-dell-laptop
+++ /dev/null
@@ -1,60 +0,0 @@
-What: /sys/class/leds/dell::kbd_backlight/als_setting
-Date: December 2014
-KernelVersion: 3.19
-Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
- Pali Rohár <pali.rohar@gmail.com>
-Description:
- This file allows to control the automatic keyboard
- illumination mode on some systems that have an ambient
- light sensor. Write 1 to this file to enable the auto
- mode, 0 to disable it.
-
-What: /sys/class/leds/dell::kbd_backlight/start_triggers
-Date: December 2014
-KernelVersion: 3.19
-Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
- Pali Rohár <pali.rohar@gmail.com>
-Description:
- This file allows to control the input triggers that
- turn on the keyboard backlight illumination that is
- disabled because of inactivity.
- Read the file to see the triggers available. The ones
- enabled are preceded by '+', those disabled by '-'.
-
- To enable a trigger, write its name preceded by '+' to
- this file. To disable a trigger, write its name preceded
- by '-' instead.
-
- For example, to enable the keyboard as trigger run:
- echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
- To disable it:
- echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
-
- Note that not all the available triggers can be configured.
-
-What: /sys/class/leds/dell::kbd_backlight/stop_timeout
-Date: December 2014
-KernelVersion: 3.19
-Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
- Pali Rohár <pali.rohar@gmail.com>
-Description:
- This file allows to specify the interval after which the
- keyboard illumination is disabled because of inactivity.
- The timeouts are expressed in seconds, minutes, hours and
- days, for which the symbols are 's', 'm', 'h' and 'd'
- respectively.
-
- To configure the timeout, write to this file a value along
- with any the above units. If no unit is specified, the value
- is assumed to be expressed in seconds.
-
- For example, to set the timeout to 10 minutes run:
- echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
-
- Note that when this file is read, the returned value might be
- expressed in a different unit than the one used when the timeout
- was set.
-
- Also note that only some timeouts are supported and that
- some systems might fall back to a specific timeout in case
- an invalid timeout is written to this file.
diff --git a/Documentation/Changes b/Documentation/Changes
index 74bdda9272a4..646cdaa6e9d1 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -21,8 +21,8 @@ running a Linux kernel. Also, not all tools are necessary on all
systems; obviously, if you don't have any ISDN hardware, for example,
you probably needn't concern yourself with isdn4k-utils.
-o Gnu C 3.2 # gcc --version
-o Gnu make 3.80 # make --version
+o GNU C 3.2 # gcc --version
+o GNU make 3.80 # make --version
o binutils 2.12 # ld -v
o util-linux 2.10o # fdformat --version
o module-init-tools 0.9.10 # depmod -V
@@ -57,7 +57,7 @@ computer.
Make
----
-You will need Gnu make 3.80 or later to build the kernel.
+You will need GNU make 3.80 or later to build the kernel.
Binutils
--------
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 618a33c940df..449a8a19fc21 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -527,6 +527,7 @@ values. To do the latter, you can stick the following in your .emacs file:
(string-match (expand-file-name "~/src/linux-trees")
filename))
(setq indent-tabs-mode t)
+ (setq show-trailing-whitespace t)
(c-set-style "linux-tabs-only")))))
This will make emacs go better with the kernel coding style for C
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index d9b9416c989f..aac9357d4866 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -113,7 +113,6 @@
!Finclude/net/cfg80211.h cfg80211_beacon_data
!Finclude/net/cfg80211.h cfg80211_ap_settings
!Finclude/net/cfg80211.h station_parameters
-!Finclude/net/cfg80211.h station_info_flags
!Finclude/net/cfg80211.h rate_info_flags
!Finclude/net/cfg80211.h rate_info
!Finclude/net/cfg80211.h station_info
@@ -435,7 +434,6 @@
<section id="ps-client">
<title>support for powersaving clients</title>
!Pinclude/net/mac80211.h AP support for powersaving clients
- </section>
!Finclude/net/mac80211.h ieee80211_get_buffered_bc
!Finclude/net/mac80211.h ieee80211_beacon_get
!Finclude/net/mac80211.h ieee80211_sta_eosp
@@ -444,6 +442,7 @@
!Finclude/net/mac80211.h ieee80211_sta_ps_transition_ni
!Finclude/net/mac80211.h ieee80211_sta_set_buffered
!Finclude/net/mac80211.h ieee80211_sta_block_awake
+ </section>
</chapter>
<chapter id="multi-iface">
@@ -488,8 +487,8 @@
<title>RX A-MPDU aggregation</title>
!Pnet/mac80211/agg-rx.c RX A-MPDU aggregation
!Cnet/mac80211/agg-rx.c
- </sect1>
!Finclude/net/mac80211.h ieee80211_ampdu_mlme_action
+ </sect1>
</chapter>
<chapter id="smps">
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 9c7d92d03f62..b6a6a2e0dd3b 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -56,7 +56,7 @@ htmldocs: $(HTML)
MAN := $(patsubst %.xml, %.9, $(BOOKS))
mandocs: $(MAN)
- $(if $(wildcard $(obj)/man/*.9),gzip -f $(obj)/man/*.9)
+ find $(obj)/man -name '*.9' | xargs gzip -f
installmandocs: mandocs
mkdir -p /usr/local/man/man9/
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
index c763d30f4893..04a8c24ead47 100644
--- a/Documentation/DocBook/crypto-API.tmpl
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -111,7 +111,7 @@
<para>
This specification is intended for consumers of the kernel crypto
API as well as for developers implementing ciphers. This API
- specification, however, does not discusses all API calls available
+ specification, however, does not discuss all API calls available
to data transformation implementations (i.e. implementations of
ciphers and other transformations (such as CRC or even compression
algorithms) that can register with the kernel crypto API).
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 4b592ffbafee..03f1985a4bd1 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -239,6 +239,14 @@
Driver supports dedicated render nodes.
</para></listitem>
</varlistentry>
+ <varlistentry>
+ <term>DRIVER_ATOMIC</term>
+ <listitem><para>
+ Driver supports atomic properties. In this case the driver
+ must implement appropriate obj->atomic_get_property() vfuncs
+ for any modeset objects with driver specific properties.
+ </para></listitem>
+ </varlistentry>
</variablelist>
</sect3>
<sect3>
@@ -1377,7 +1385,7 @@ int max_width, max_height;</synopsis>
<itemizedlist>
<listitem>
DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary
- planes are the planes operated upon by by CRTC modesetting and flipping
+ planes are the planes operated upon by CRTC modesetting and flipping
operations described in <xref linkend="drm-kms-crtcops"/>.
</listitem>
<listitem>
@@ -2362,6 +2370,7 @@ void intel_crt_init(struct drm_device *dev)
</sect2>
<sect2>
<title>Modeset Helper Functions Reference</title>
+!Iinclude/drm/drm_crtc_helper.h
!Edrivers/gpu/drm/drm_crtc_helper.c
!Pdrivers/gpu/drm/drm_crtc_helper.c overview
</sect2>
@@ -2564,8 +2573,8 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Description/Restrictions</td>
</tr>
<tr>
- <td rowspan="25" valign="top" >DRM</td>
- <td rowspan="4" valign="top" >Generic</td>
+ <td rowspan="36" valign="top" >DRM</td>
+ <td rowspan="5" valign="top" >Connector</td>
<td valign="top" >“EDIDâ€</td>
<td valign="top" >BLOB | IMMUTABLE</td>
<td valign="top" >0</td>
@@ -2594,7 +2603,14 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Contains tiling information for a connector.</td>
</tr>
<tr>
- <td rowspan="1" valign="top" >Plane</td>
+ <td valign="top" >“CRTC_IDâ€</td>
+ <td valign="top" >OBJECT</td>
+ <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >CRTC that connector is attached to (atomic)</td>
+ </tr>
+ <tr>
+ <td rowspan="11" valign="top" >Plane</td>
<td valign="top" >“typeâ€</td>
<td valign="top" >ENUM | IMMUTABLE</td>
<td valign="top" >{ "Overlay", "Primary", "Cursor" }</td>
@@ -2602,6 +2618,76 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Plane type</td>
</tr>
<tr>
+ <td valign="top" >“SRC_Xâ€</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout source x coordinate in 16.16 fixed point (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“SRC_Yâ€</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout source y coordinate in 16.16 fixed point (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“SRC_Wâ€</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout source width in 16.16 fixed point (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“SRC_Hâ€</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout source height in 16.16 fixed point (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_Xâ€</td>
+ <td valign="top" >SIGNED_RANGE</td>
+ <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout CRTC (destination) x coordinate (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_Yâ€</td>
+ <td valign="top" >SIGNED_RANGE</td>
+ <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout CRTC (destination) y coordinate (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_Wâ€</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout CRTC (destination) width (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_Hâ€</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout CRTC (destination) height (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“FB_IDâ€</td>
+ <td valign="top" >OBJECT</td>
+ <td valign="top" >DRM_MODE_OBJECT_FB</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout framebuffer (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_IDâ€</td>
+ <td valign="top" >OBJECT</td>
+ <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >CRTC that plane is attached to (atomic)</td>
+ </tr>
+ <tr>
<td rowspan="2" valign="top" >DVI-I</td>
<td valign="top" >“subconnectorâ€</td>
<td valign="top" >ENUM</td>
@@ -3883,6 +3969,7 @@ int num_ioctls;</synopsis>
<title>Runtime Power Management</title>
!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
!Idrivers/gpu/drm/i915/intel_runtime_pm.c
+!Idrivers/gpu/drm/i915/intel_uncore.c
</sect2>
<sect2>
<title>Interrupt Handling</title>
@@ -3932,6 +4019,11 @@ int num_ioctls;</synopsis>
</para>
</sect2>
<sect2>
+ <title>Atomic Plane Helpers</title>
+!Pdrivers/gpu/drm/i915/intel_atomic_plane.c atomic plane helpers
+!Idrivers/gpu/drm/i915/intel_atomic_plane.c
+ </sect2>
+ <sect2>
<title>Output Probing</title>
<para>
This section covers output probing and related infrastructure like the
@@ -3951,6 +4043,11 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/intel_psr.c
</sect2>
<sect2>
+ <title>Frame Buffer Compression (FBC)</title>
+!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
+!Idrivers/gpu/drm/i915/intel_fbc.c
+ </sect2>
+ <sect2>
<title>DPIO</title>
!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
<table id="dpiox2">
@@ -4054,10 +4151,31 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
<sect2>
+ <title>Batchbuffer Pools</title>
+!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
+!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
+ </sect2>
+ <sect2>
<title>Logical Rings, Logical Ring Contexts and Execlists</title>
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
+ <sect2>
+ <title>Global GTT views</title>
+!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
+!Idrivers/gpu/drm/i915/i915_gem_gtt.c
+ </sect2>
+ <sect2>
+ <title>Buffer Object Eviction</title>
+ <para>
+ This section documents the interface function for evicting buffer
+ objects to make space available in the virtual gpu address spaces.
+ Note that this is mostly orthogonal to shrinking buffer objects
+ caches, which has the goal to make main memory (shared with the gpu
+ through the unified memory architecture) available.
+ </para>
+!Idrivers/gpu/drm/i915/i915_gem_evict.c
+ </sect2>
</sect1>
<sect1>
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
index f77358f96930..2428cc04dbc8 100644
--- a/Documentation/DocBook/kgdb.tmpl
+++ b/Documentation/DocBook/kgdb.tmpl
@@ -75,7 +75,7 @@
a development machine and the other is the target machine. The
kernel to be debugged runs on the target machine. The development
machine runs an instance of gdb against the vmlinux file which
- contains the symbols (not boot image such as bzImage, zImage,
+ contains the symbols (not a boot image such as bzImage, zImage,
uImage...). In gdb the developer specifies the connection
parameters and connects to kgdb. The type of connection a
developer makes with gdb depends on the availability of kgdb I/O
@@ -95,7 +95,7 @@
<title>Kernel config options for kgdb</title>
<para>
To enable <symbol>CONFIG_KGDB</symbol> you should look under
- "Kernel debugging" and select "KGDB: kernel debugger".
+ "Kernel hacking" / "Kernel debugging" and select "KGDB: kernel debugger".
</para>
<para>
While it is not a hard requirement that you have symbols in your
@@ -105,7 +105,7 @@
kernel with debug info" in the config menu.
</para>
<para>
- It is advised, but not required that you turn on the
+ It is advised, but not required, that you turn on the
<symbol>CONFIG_FRAME_POINTER</symbol> kernel option which is called "Compile the
kernel with frame pointers" in the config menu. This option
inserts code to into the compiled executable which saves the frame
@@ -181,7 +181,7 @@
<para>This section describes the various runtime kernel
parameters that affect the configuration of the kernel debugger.
The following chapter covers using kdb and kgdb as well as
- provides some examples of the configuration parameters.</para>
+ providing some examples of the configuration parameters.</para>
<sect1 id="kgdboc">
<title>Kernel parameter: kgdboc</title>
<para>The kgdboc driver was originally an abbreviation meant to
@@ -219,8 +219,8 @@
<listitem><para>kbd = Keyboard</para></listitem>
</itemizedlist>
</para>
- <para>You can configure kgdboc to use the keyboard, and or a serial
- device depending on if you are using kdb and or kgdb, in one of the
+ <para>You can configure kgdboc to use the keyboard, and/or a serial
+ device depending on if you are using kdb and/or kgdb, in one of the
following scenarios. The order listed above must be observed if
you use any of the optional configurations together. Using kms +
only gdb is generally not a useful combination.</para>
@@ -261,11 +261,8 @@
</sect3>
<sect3 id="kgdbocArgs3">
<title>More examples</title>
- <para>You can configure kgdboc to use the keyboard, and or a serial
- device depending on if you are using kdb and or kgdb, in one of the
- following scenarios.</para>
- <para>You can configure kgdboc to use the keyboard, and or a serial device
- depending on if you are using kdb and or kgdb, in one of the
+ <para>You can configure kgdboc to use the keyboard, and/or a serial device
+ depending on if you are using kdb and/or kgdb, in one of the
following scenarios.
<orderedlist>
<listitem><para>kdb and kgdb over only a serial port</para>
@@ -315,7 +312,7 @@
<para>
The Kernel command line option <constant>kgdbwait</constant> makes
kgdb wait for a debugger connection during booting of a kernel. You
- can only use this option you compiled a kgdb I/O driver into the
+ can only use this option if you compiled a kgdb I/O driver into the
kernel and you specified the I/O driver configuration as a kernel
command line option. The kgdbwait parameter should always follow the
configuration parameter for the kgdb I/O driver in the kernel
@@ -354,7 +351,7 @@
</listitem>
</orderedlist>
<para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
- active system console. An example incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
+ active system console. An example of incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
</para>
<para>It is possible to use this option with kgdboc on a tty that is not a system console.
</para>
@@ -386,12 +383,12 @@
<title>Quick start for kdb on a serial port</title>
<para>This is a quick example of how to use kdb.</para>
<para><orderedlist>
- <listitem><para>Boot kernel with arguments:
+ <listitem><para>Configure kgdboc at boot using kernel parameters:
<itemizedlist>
<listitem><para><constant>console=ttyS0,115200 kgdboc=ttyS0,115200</constant></para></listitem>
</itemizedlist></para>
<para>OR</para>
- <para>Configure kgdboc after the kernel booted; assuming you are using a serial port console:
+ <para>Configure kgdboc after the kernel has booted; assuming you are using a serial port console:
<itemizedlist>
<listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
</itemizedlist>
@@ -442,12 +439,12 @@
<title>Quick start for kdb using a keyboard connected console</title>
<para>This is a quick example of how to use kdb with a keyboard.</para>
<para><orderedlist>
- <listitem><para>Boot kernel with arguments:
+ <listitem><para>Configure kgdboc at boot using kernel parameters:
<itemizedlist>
<listitem><para><constant>kgdboc=kbd</constant></para></listitem>
</itemizedlist></para>
<para>OR</para>
- <para>Configure kgdboc after the kernel booted:
+ <para>Configure kgdboc after the kernel has booted:
<itemizedlist>
<listitem><para><constant>echo kbd &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
</itemizedlist>
@@ -501,12 +498,12 @@
<title>Connecting with gdb to a serial port</title>
<orderedlist>
<listitem><para>Configure kgdboc</para>
- <para>Boot kernel with arguments:
+ <para>Configure kgdboc at boot using kernel parameters:
<itemizedlist>
<listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
</itemizedlist></para>
<para>OR</para>
- <para>Configure kgdboc after the kernel booted:
+ <para>Configure kgdboc after the kernel has booted:
<itemizedlist>
<listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
</itemizedlist></para>
@@ -536,7 +533,7 @@
</para>
</listitem>
<listitem>
- <para>Connect from from gdb</para>
+ <para>Connect from gdb</para>
<para>
Example (using a directly connected port):
</para>
@@ -584,7 +581,7 @@
<para>
There are two ways to switch from kgdb to kdb: you can use gdb to
issue a maintenance packet, or you can blindly type the command $3#33.
- Whenever kernel debugger stops in kgdb mode it will print the
+ Whenever the kernel debugger stops in kgdb mode it will print the
message <constant>KGDB or $3#33 for KDB</constant>. It is important
to note that you have to type the sequence correctly in one pass.
You cannot type a backspace or delete because kgdb will interpret
@@ -704,7 +701,7 @@ Task Addr Pid Parent [*] cpu State Thread Command
<listitem><para>Registration and unregistration of architecture specific trap hooks</para></listitem>
<listitem><para>Any special exception handling and cleanup</para></listitem>
<listitem><para>NMI exception handling and cleanup</para></listitem>
- <listitem><para>(optional)HW breakpoints</para></listitem>
+ <listitem><para>(optional) HW breakpoints</para></listitem>
</itemizedlist>
</para>
</listitem>
@@ -760,7 +757,7 @@ Task Addr Pid Parent [*] cpu State Thread Command
a kgdb I/O driver for characters when it needs input. The I/O
driver is expected to return immediately if there is no data
available. Doing so allows for the future possibility to touch
- watch dog hardware in such a way as to have a target system not
+ watchdog hardware in such a way as to have a target system not
reset when these are enabled.
</para>
</listitem>
@@ -779,21 +776,25 @@ Task Addr Pid Parent [*] cpu State Thread Command
their &lt;asm/kgdb.h&gt; file. These are:
<itemizedlist>
<listitem>
- <para>
- NUMREGBYTES: The size in bytes of all of the registers, so
- that we can ensure they will all fit into a packet.
- </para>
- <para>
- BUFMAX: The size in bytes of the buffer GDB will read into.
- This must be larger than NUMREGBYTES.
- </para>
- <para>
- CACHE_FLUSH_IS_SAFE: Set to 1 if it is always safe to call
- flush_cache_range or flush_icache_range. On some architectures,
- these functions may not be safe to call on SMP since we keep other
- CPUs in a holding pattern.
- </para>
- </listitem>
+ <para>
+ NUMREGBYTES: The size in bytes of all of the registers, so
+ that we can ensure they will all fit into a packet.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ BUFMAX: The size in bytes of the buffer GDB will read into.
+ This must be larger than NUMREGBYTES.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ CACHE_FLUSH_IS_SAFE: Set to 1 if it is always safe to call
+ flush_cache_range or flush_icache_range. On some architectures,
+ these functions may not be safe to call on SMP since we keep other
+ CPUs in a holding pattern.
+ </para>
+ </listitem>
</itemizedlist>
</para>
<para>
@@ -812,8 +813,8 @@ Task Addr Pid Parent [*] cpu State Thread Command
<para>
The kgdboc driver is actually a very thin driver that relies on the
underlying low level to the hardware driver having "polling hooks"
- which the to which the tty driver is attached. In the initial
- implementation of kgdboc it the serial_core was changed to expose a
+ to which the tty driver is attached. In the initial
+ implementation of kgdboc the serial_core was changed to expose a
low level UART hook for doing polled mode reading and writing of a
single character while in an atomic context. When kgdb makes an I/O
request to the debugger, kgdboc invokes a callback in the serial
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index e013e4bf244c..4e9462f1ab4c 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -2692,12 +2692,11 @@ in the S5P family of SoCs by Samsung.
<row><entry></entry></row>
<row>
<entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">If the display delay is enabled then the decoder has to return a
-CAPTURE buffer after processing a certain number of OUTPUT buffers. If this number is low, then it may result in
-buffers not being dequeued in display order. In addition hardware may still use those buffers as reference, thus
-application should not write to those buffers. This feature can be used for example for generating thumbnails of videos.
-Applicable to the H264 decoder.
+ <entry>boolean</entry>
+ </row><row><entry spanname="descr">If the display delay is enabled then the decoder is forced to return a
+CAPTURE buffer (decoded frame) after processing a certain number of OUTPUT buffers. The delay can be set through
+<constant>V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY</constant>. This feature can be used for example
+for generating thumbnails of videos. Applicable to the H264 decoder.
</entry>
</row>
<row><entry></entry></row>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml
index c1c62a9acc2a..f34d03ebda3a 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml
@@ -17,7 +17,7 @@
<refsect1>
<title>Description</title>
- <para>The following four pixel formats are raw sRGB / Bayer formats with
+ <para>These four pixel formats are raw sRGB / Bayer formats with
10 bits per colour. Each colour component is stored in a 16-bit word, with 6
unused high bits filled with zeros. Each n-pixel row contains n/2 green samples
and n/2 blue or red samples, with alternating red and blue rows. Bytes are
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml
index 29acc2098cc2..d2e5845e57fb 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml
@@ -25,7 +25,7 @@
</refnamediv>
<refsect1>
<title>Description</title>
- <para>The following four pixel formats are raw sRGB / Bayer
+ <para>These four pixel formats are raw sRGB / Bayer
formats with 10 bits per color compressed to 8 bits each,
using the A-LAW algorithm. Each color component consumes 8
bits of memory. In other respects this format is similar to
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml
index 2d3f0b1aefe0..bde89878c5c5 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml
@@ -18,7 +18,7 @@
<refsect1>
<title>Description</title>
- <para>The following four pixel formats are raw sRGB / Bayer formats
+ <para>These four pixel formats are raw sRGB / Bayer formats
with 10 bits per colour compressed to 8 bits each, using DPCM
compression. DPCM, differential pulse-code modulation, is lossy.
Each colour component consumes 8 bits of memory. In other respects
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml
new file mode 100644
index 000000000000..30aa63581fe3
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml
@@ -0,0 +1,99 @@
+ <refentry id="pixfmt-srggb10p">
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_SRGGB10P ('pRAA'),
+ V4L2_PIX_FMT_SGRBG10P ('pgAA'),
+ V4L2_PIX_FMT_SGBRG10P ('pGAA'),
+ V4L2_PIX_FMT_SBGGR10P ('pBAA'),
+ </refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname id="V4L2-PIX-FMT-SRGGB10P"><constant>V4L2_PIX_FMT_SRGGB10P</constant></refname>
+ <refname id="V4L2-PIX-FMT-SGRBG10P"><constant>V4L2_PIX_FMT_SGRBG10P</constant></refname>
+ <refname id="V4L2-PIX-FMT-SGBRG10P"><constant>V4L2_PIX_FMT_SGBRG10P</constant></refname>
+ <refname id="V4L2-PIX-FMT-SBGGR10P"><constant>V4L2_PIX_FMT_SBGGR10P</constant></refname>
+ <refpurpose>10-bit packed Bayer formats</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>These four pixel formats are packed raw sRGB /
+ Bayer formats with 10 bits per colour. Every four consecutive
+ colour components are packed into 5 bytes. Each of the first 4
+ bytes contain the 8 high order bits of the pixels, and the
+ fifth byte contains the two least significants bits of each
+ pixel, in the same order.</para>
+
+ <para>Each n-pixel row contains n/2 green samples and n/2 blue
+ or red samples, with alternating green-red and green-blue
+ rows. They are conventionally described as GRGR... BGBG...,
+ RGRG... GBGB..., etc. Below is an example of one of these
+ formats:</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_SBGGR10P</constant> 4 &times; 4
+ pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="topbot" colsep="1" rowsep="1">
+ <tgroup cols="5" align="center" border="1">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start&nbsp;+&nbsp;0:</entry>
+ <entry>B<subscript>00high</subscript></entry>
+ <entry>G<subscript>01high</subscript></entry>
+ <entry>B<subscript>02high</subscript></entry>
+ <entry>G<subscript>03high</subscript></entry>
+ <entry>B<subscript>00low</subscript>(bits 7--6)
+ G<subscript>01low</subscript>(bits 5--4)
+ B<subscript>02low</subscript>(bits 3--2)
+ G<subscript>03low</subscript>(bits 1--0)
+ </entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;5:</entry>
+ <entry>G<subscript>10high</subscript></entry>
+ <entry>R<subscript>11high</subscript></entry>
+ <entry>G<subscript>12high</subscript></entry>
+ <entry>R<subscript>13high</subscript></entry>
+ <entry>G<subscript>10low</subscript>(bits 7--6)
+ R<subscript>11low</subscript>(bits 5--4)
+ G<subscript>12low</subscript>(bits 3--2)
+ R<subscript>13low</subscript>(bits 1--0)
+ </entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;10:</entry>
+ <entry>B<subscript>20high</subscript></entry>
+ <entry>G<subscript>21high</subscript></entry>
+ <entry>B<subscript>22high</subscript></entry>
+ <entry>G<subscript>23high</subscript></entry>
+ <entry>B<subscript>20low</subscript>(bits 7--6)
+ G<subscript>21low</subscript>(bits 5--4)
+ B<subscript>22low</subscript>(bits 3--2)
+ G<subscript>23low</subscript>(bits 1--0)
+ </entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;15:</entry>
+ <entry>G<subscript>30high</subscript></entry>
+ <entry>R<subscript>31high</subscript></entry>
+ <entry>G<subscript>32high</subscript></entry>
+ <entry>R<subscript>33high</subscript></entry>
+ <entry>G<subscript>30low</subscript>(bits 7--6)
+ R<subscript>31low</subscript>(bits 5--4)
+ G<subscript>32low</subscript>(bits 3--2)
+ R<subscript>33low</subscript>(bits 1--0)
+ </entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb12.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb12.xml
index 96947f17fca1..0c8e4adf417f 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb12.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb12.xml
@@ -17,7 +17,7 @@
<refsect1>
<title>Description</title>
- <para>The following four pixel formats are raw sRGB / Bayer formats with
+ <para>These four pixel formats are raw sRGB / Bayer formats with
12 bits per colour. Each colour component is stored in a 16-bit word, with 4
unused high bits filled with zeros. Each n-pixel row contains n/2 green samples
and n/2 blue or red samples, with alternating red and blue rows. Bytes are
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index d5eca4b8f74b..5e0352c50324 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -1405,6 +1405,7 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
&sub-srggb8;
&sub-sbggr16;
&sub-srggb10;
+ &sub-srggb10p;
&sub-srggb10alaw8;
&sub-srggb10dpcm8;
&sub-srggb12;
diff --git a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
index 28a8c1e1c705..a2017bfcaed2 100644
--- a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
@@ -212,11 +212,3 @@ standards set in the <structfield>standards</structfield> field.
&return-value;
</refsect1>
</refentry>
-
-<!--
-Local Variables:
-mode: sgml
-sgml-parent-document: "v4l2.sgml"
-indent-tabs-mode: nil
-End:
--->
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
index b9fdfeacdbcb..6e3cadd4e1f9 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
@@ -131,11 +131,3 @@ is out of bounds or the <structfield>pad</structfield> number is invalid.</para>
</variablelist>
</refsect1>
</refentry>
-
-<!--
-Local Variables:
-mode: sgml
-sgml-parent-document: "v4l2.sgml"
-indent-tabs-mode: nil
-End:
--->
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 1fdc246e4256..cd0e452dfed5 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -719,7 +719,7 @@ framework to set up sysfs files for this region. Simply leave it alone.
</para>
</sect1>
-<sect1 id="using uio_dmem_genirq">
+<sect1 id="using-uio_dmem_genirq">
<title>Using uio_dmem_genirq for platform devices</title>
<para>
In addition to statically allocated memory ranges, they may also be
@@ -746,16 +746,16 @@ framework to set up sysfs files for this region. Simply leave it alone.
following elements:
</para>
<itemizedlist>
- <listitem><varname>struct uio_info uioinfo</varname>: The same
+ <listitem><para><varname>struct uio_info uioinfo</varname>: The same
structure used as the <varname>uio_pdrv_genirq</varname> platform
- data</listitem>
- <listitem><varname>unsigned int *dynamic_region_sizes</varname>:
+ data</para></listitem>
+ <listitem><para><varname>unsigned int *dynamic_region_sizes</varname>:
Pointer to list of sizes of dynamic memory regions to be mapped into
user space.
- </listitem>
- <listitem><varname>unsigned int num_dynamic_regions</varname>:
+ </para></listitem>
+ <listitem><para><varname>unsigned int num_dynamic_regions</varname>:
Number of elements in <varname>dynamic_region_sizes</varname> array.
- </listitem>
+ </para></listitem>
</itemizedlist>
<para>
The dynamic regions defined in the platform data will be appended to
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index ed186a902d31..b57c0c1cdac6 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -15,7 +15,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
21 seconds.
This configuration parameter may be changed at runtime via the
- /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however
+ /sys/module/rcupdate/parameters/rcu_cpu_stall_timeout, however
this parameter is checked only at the beginning of a cycle.
So if you are 10 seconds into a 40-second stall, setting this
sysfs parameter to (say) five will shorten the timeout for the
@@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and
"D" indicates that dyntick-idle processing is enabled ("." is printed
otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
+If the relevant grace-period kthread has been unable to run prior to
+the stall warning, the following additional line is printed:
+
+ rcu_preempt kthread starved for 2023 jiffies!
+
+Starving the grace-period kthreads of CPU time can of course result in
+RCU CPU stall warnings even when all CPUs and tasks have passed through
+the required quiescent states.
+
Multiple Warnings From One Stall
@@ -187,6 +196,11 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
behavior, you might need to replace some of the cond_resched()
calls with calls to cond_resched_rcu_qs().
+o Anything that prevents RCU's grace-period kthreads from running.
+ This can result in the "All QSes seen" console-log message.
+ This message will include information on when the kthread last
+ ran and how often it should be expected to run.
+
o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
happen to preempt a low-priority task in the middle of an RCU
read-side critical section. This is especially damaging if
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index b63b9bb3bc0c..08651da15448 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -56,14 +56,14 @@ rcuboost:
The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
- 0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
- 1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
- 2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
- 3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
- 4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
- 5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
- 6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
- 7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
+ 0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
+ 1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
+ 2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
+ 3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
+ 4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
+ 5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
+ 6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
+ 7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
This file has one line per CPU, or eight for this 8-CPU system.
The fields are as follows:
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this
Kernels compiled with CONFIG_RCU_BOOST=y display the following from
/debug/rcu/rcu_preempt/rcudata:
- 0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
- 1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
- 2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
- 3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
- 4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
- 5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
- 6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
- 7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
+ 0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
+ 1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
+ 2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
+ 3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
+ 4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
+ 5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
+ 6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
+ 7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
This is similar to the output discussed above, but contains the following
additional fields:
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 1fa1caa198eb..447671bd2927 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -10,27 +10,49 @@ kernel, the process can sometimes be daunting if you're not familiar
with "the system." This text is a collection of suggestions which
can greatly increase the chances of your change being accepted.
-Read Documentation/SubmitChecklist for a list of items to check
-before submitting code. If you are submitting a driver, also read
-Documentation/SubmittingDrivers.
+This document contains a large number of suggestions in a relatively terse
+format. For detailed information on how the kernel development process
+works, see Documentation/development-process. Also, read
+Documentation/SubmitChecklist for a list of items to check before
+submitting code. If you are submitting a driver, also read
+Documentation/SubmittingDrivers; for device tree binding patches, read
+Documentation/devicetree/bindings/submitting-patches.txt.
Many of these steps describe the default behavior of the git version
control system; if you use git to prepare your patches, you'll find much
of the mechanical work done for you, though you'll still need to prepare
-and document a sensible set of patches.
+and document a sensible set of patches. In general, use of git will make
+your life as a kernel developer easier.
--------------------------------------------
SECTION 1 - CREATING AND SENDING YOUR CHANGE
--------------------------------------------
+0) Obtain a current source tree
+-------------------------------
+
+If you do not have a repository with the current kernel source handy, use
+git to obtain one. You'll want to start with the mainline repository,
+which can be grabbed with:
+
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+Note, however, that you may not want to develop against the mainline tree
+directly. Most subsystem maintainers run their own trees and want to see
+patches prepared against those trees. See the "T:" entry for the subsystem
+in the MAINTAINERS file to find that tree, or simply ask the maintainer if
+the tree is not listed there.
+
+It is still possible to download kernel releases via tarballs (as described
+in the next section), but that is the hard way to do kernel development.
1) "diff -up"
------------
-Use "diff -up" or "diff -uprN" to create patches. git generates patches
-in this form by default; if you're using git, you can skip this section
-entirely.
+If you must generate your patches by hand, use "diff -up" or "diff -uprN"
+to create patches. Git generates patches in this form by default; if
+you're using git, you can skip this section entirely.
All changes to the Linux kernel occur in the form of patches, as
generated by diff(1). When creating your patch, make sure to create it
@@ -42,7 +64,7 @@ not in any lower subdirectory.
To create a patch for a single file, it is often sufficient to do:
- SRCTREE= linux-2.6
+ SRCTREE= linux
MYFILE= drivers/net/mydriver.c
cd $SRCTREE
@@ -55,17 +77,16 @@ To create a patch for multiple files, you should unpack a "vanilla",
or unmodified kernel source tree, and generate a diff against your
own source tree. For example:
- MYSRC= /devel/linux-2.6
+ MYSRC= /devel/linux
- tar xvfz linux-2.6.12.tar.gz
- mv linux-2.6.12 linux-2.6.12-vanilla
- diff -uprN -X linux-2.6.12-vanilla/Documentation/dontdiff \
- linux-2.6.12-vanilla $MYSRC > /tmp/patch
+ tar xvfz linux-3.19.tar.gz
+ mv linux-3.19 linux-3.19-vanilla
+ diff -uprN -X linux-3.19-vanilla/Documentation/dontdiff \
+ linux-3.19-vanilla $MYSRC > /tmp/patch
"dontdiff" is a list of files which are generated by the kernel during
the build process, and should be ignored in any diff(1)-generated
-patch. The "dontdiff" file is included in the kernel tree in
-2.6.12 and later.
+patch.
Make sure your patch does not include any extra files which do not
belong in a patch submission. Make sure to review your patch -after-
@@ -83,6 +104,7 @@ is another popular alternative.
2) Describe your changes.
+-------------------------
Describe your problem. Whether your patch is a one-line bug fix or
5000 lines of a new feature, there must be an underlying problem that
@@ -124,10 +146,10 @@ See #3, next.
When you submit or resubmit a patch or patch series, include the
complete patch description and justification for it. Don't just
say that this is version N of the patch (series). Don't expect the
-patch merger to refer back to earlier patch versions or referenced
+subsystem maintainer to refer back to earlier patch versions or referenced
URLs to find the patch description and put that into the patch.
I.e., the patch (series) and its description should be self-contained.
-This benefits both the patch merger(s) and reviewers. Some reviewers
+This benefits both the maintainers and reviewers. Some reviewers
probably didn't even receive earlier versions of the patch.
Describe your changes in imperative mood, e.g. "make xyzzy do frotz"
@@ -156,10 +178,15 @@ Example:
platform_set_drvdata(), but left the variable "dev" unused,
delete it.
+You should also be sure to use at least the first twelve characters of the
+SHA-1 ID. The kernel repository holds a *lot* of objects, making
+collisions with shorter IDs a real possibility. Bear in mind that, even if
+there is no collision with your six-character ID now, that condition may
+change five years from now.
+
If your patch fixes a bug in a specific commit, e.g. you found an issue using
git-bisect, please use the 'Fixes:' tag with the first 12 characters of the
-SHA-1 ID, and the one line summary.
-Example:
+SHA-1 ID, and the one line summary. For example:
Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
@@ -172,8 +199,9 @@ outputting the above style in the git log or git show commands
fixes = Fixes: %h (\"%s\")
3) Separate your changes.
+-------------------------
-Separate _logical changes_ into a single patch file.
+Separate each _logical change_ into a separate patch.
For example, if your changes include both bug fixes and performance
enhancements for a single driver, separate those changes into two
@@ -184,90 +212,116 @@ On the other hand, if you make a single change to numerous files,
group those changes into a single patch. Thus a single logical change
is contained within a single patch.
+The point to remember is that each patch should make an easily understood
+change that can be verified by reviewers. Each patch should be justifiable
+on its own merits.
+
If one patch depends on another patch in order for a change to be
complete, that is OK. Simply note "this patch depends on patch X"
in your patch description.
+When dividing your change into a series of patches, take special care to
+ensure that the kernel builds and runs properly after each patch in the
+series. Developers using "git bisect" to track down a problem can end up
+splitting your patch series at any point; they will not thank you if you
+introduce bugs in the middle.
+
If you cannot condense your patch set into a smaller set of patches,
then only post say 15 or so at a time and wait for review and integration.
-4) Style check your changes.
+4) Style-check your changes.
+----------------------------
Check your patch for basic style violations, details of which can be
found in Documentation/CodingStyle. Failure to do so simply wastes
the reviewers time and will get your patch rejected, probably
without even being read.
-At a minimum you should check your patches with the patch style
-checker prior to submission (scripts/checkpatch.pl). You should
-be able to justify all violations that remain in your patch.
-
-
+One significant exception is when moving code from one file to
+another -- in this case you should not modify the moved code at all in
+the same patch which moves it. This clearly delineates the act of
+moving the code and your changes. This greatly aids review of the
+actual differences and allows tools to better track the history of
+the code itself.
-5) Select e-mail destination.
+Check your patches with the patch style checker prior to submission
+(scripts/checkpatch.pl). Note, though, that the style checker should be
+viewed as a guide, not as a replacement for human judgment. If your code
+looks better with a violation then its probably best left alone.
-Look through the MAINTAINERS file and the source code, and determine
-if your change applies to a specific subsystem of the kernel, with
-an assigned maintainer. If so, e-mail that person. The script
-scripts/get_maintainer.pl can be very useful at this step.
+The checker reports at three levels:
+ - ERROR: things that are very likely to be wrong
+ - WARNING: things requiring careful review
+ - CHECK: things requiring thought
-If no maintainer is listed, or the maintainer does not respond, send
-your patch to the primary Linux kernel developer's mailing list,
-linux-kernel@vger.kernel.org. Most kernel developers monitor this
-e-mail list, and can comment on your changes.
+You should be able to justify all violations that remain in your
+patch.
-Do not send more than 15 patches at once to the vger mailing lists!!!
+5) Select the recipients for your patch.
+----------------------------------------
+You should always copy the appropriate subsystem maintainer(s) on any patch
+to code that they maintain; look through the MAINTAINERS file and the
+source code revision history to see who those maintainers are. The
+script scripts/get_maintainer.pl can be very useful at this step. If you
+cannot find a maintainer for the subsystem your are working on, Andrew
+Morton (akpm@linux-foundation.org) serves as a maintainer of last resort.
-Linus Torvalds is the final arbiter of all changes accepted into the
-Linux kernel. His e-mail address is <torvalds@linux-foundation.org>.
-He gets a lot of e-mail, so typically you should do your best to -avoid-
-sending him e-mail.
+You should also normally choose at least one mailing list to receive a copy
+of your patch set. linux-kernel@vger.kernel.org functions as a list of
+last resort, but the volume on that list has caused a number of developers
+to tune it out. Look in the MAINTAINERS file for a subsystem-specific
+list; your patch will probably get more attention there. Please do not
+spam unrelated lists, though.
-Patches which are bug fixes, are "obvious" changes, or similarly
-require little discussion should be sent or CC'd to Linus. Patches
-which require discussion or do not have a clear advantage should
-usually be sent first to linux-kernel. Only after the patch is
-discussed should the patch then be submitted to Linus.
+Many kernel-related lists are hosted on vger.kernel.org; you can find a
+list of them at http://vger.kernel.org/vger-lists.html. There are
+kernel-related lists hosted elsewhere as well, though.
+Do not send more than 15 patches at once to the vger mailing lists!!!
+Linus Torvalds is the final arbiter of all changes accepted into the
+Linux kernel. His e-mail address is <torvalds@linux-foundation.org>.
+He gets a lot of e-mail, and, at this point, very few patches go through
+Linus directly, so typically you should do your best to -avoid-
+sending him e-mail.
-6) Select your CC (e-mail carbon copy) list.
+If you have a patch that fixes an exploitable security bug, send that patch
+to security@kernel.org. For severe bugs, a short embargo may be considered
+to allow distrbutors to get the patch out to users; in such cases,
+obviously, the patch should not be sent to any public lists.
-Unless you have a reason NOT to do so, CC linux-kernel@vger.kernel.org.
+Patches that fix a severe bug in a released kernel should be directed
+toward the stable maintainers by putting a line like this:
-Other kernel developers besides Linus need to be aware of your change,
-so that they may comment on it and offer code review and suggestions.
-linux-kernel is the primary Linux kernel developer mailing list.
-Other mailing lists are available for specific subsystems, such as
-USB, framebuffer devices, the VFS, the SCSI subsystem, etc. See the
-MAINTAINERS file for a mailing list that relates specifically to
-your change.
+ Cc: stable@vger.kernel.org
-Majordomo lists of VGER.KERNEL.ORG at:
- <http://vger.kernel.org/vger-lists.html>
+into your patch.
-If changes affect userland-kernel interfaces, please send
-the MAN-PAGES maintainer (as listed in the MAINTAINERS file)
-a man-pages patch, or at least a notification of the change,
-so that some information makes its way into the manual pages.
+Note, however, that some subsystem maintainers want to come to their own
+conclusions on which patches should go to the stable trees. The networking
+maintainer, in particular, would rather not see individual developers
+adding lines like the above to their patches.
-Even if the maintainer did not respond in step #5, make sure to ALWAYS
-copy the maintainer when you change their code.
+If changes affect userland-kernel interfaces, please send the MAN-PAGES
+maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
+least a notification of the change, so that some information makes its way
+into the manual pages. User-space API changes should also be copied to
+linux-api@vger.kernel.org.
For small patches you may want to CC the Trivial Patch Monkey
trivial@kernel.org which collects "trivial" patches. Have a look
into the MAINTAINERS file for its current manager.
Trivial patches must qualify for one of the following rules:
Spelling fixes in documentation
- Spelling fixes which could break grep(1)
+ Spelling fixes for errors which could break grep(1)
Warning fixes (cluttering with useless warnings is bad)
Compilation fixes (only if they are actually correct)
Runtime fixes (only if they actually fix things)
- Removing use of deprecated functions/macros (eg. check_region)
+ Removing use of deprecated functions/macros
Contact detail and documentation fixes
Non-portable code replaced by portable code (even in arch-specific,
since people copy, as long as it's trivial)
@@ -276,7 +330,8 @@ Trivial patches must qualify for one of the following rules:
-7) No MIME, no links, no compression, no attachments. Just plain text.
+6) No MIME, no links, no compression, no attachments. Just plain text.
+-----------------------------------------------------------------------
Linus and other kernel developers need to be able to read and comment
on the changes you are submitting. It is important for a kernel
@@ -299,54 +354,48 @@ you to re-send them using MIME.
See Documentation/email-clients.txt for hints about configuring
your e-mail client so that it sends your patches untouched.
-8) E-mail size.
-
-When sending patches to Linus, always follow step #7.
+7) E-mail size.
+---------------
Large changes are not appropriate for mailing lists, and some
maintainers. If your patch, uncompressed, exceeds 300 kB in size,
it is preferred that you store your patch on an Internet-accessible
-server, and provide instead a URL (link) pointing to your patch.
+server, and provide instead a URL (link) pointing to your patch. But note
+that if your patch exceeds 300 kB, it almost certainly needs to be broken up
+anyway.
+8) Respond to review comments.
+------------------------------
+Your patch will almost certainly get comments from reviewers on ways in
+which the patch can be improved. You must respond to those comments;
+ignoring reviewers is a good way to get ignored in return. Review comments
+or questions that do not lead to a code change should almost certainly
+bring about a comment or changelog entry so that the next reviewer better
+understands what is going on.
-9) Name your kernel version.
+Be sure to tell the reviewers what changes you are making and to thank them
+for their time. Code review is a tiring and time-consuming process, and
+reviewers sometimes get grumpy. Even in that case, though, respond
+politely and address the problems they have pointed out.
-It is important to note, either in the subject line or in the patch
-description, the kernel version to which this patch applies.
-If the patch does not apply cleanly to the latest kernel version,
-Linus will not apply it.
+9) Don't get discouraged - or impatient.
+----------------------------------------
+After you have submitted your change, be patient and wait. Reviewers are
+busy people and may not get to your patch right away.
+Once upon a time, patches used to disappear into the void without comment,
+but the development process works more smoothly than that now. You should
+receive comments within a week or so; if that does not happen, make sure
+that you have sent your patches to the right place. Wait for a minimum of
+one week before resubmitting or pinging reviewers - possibly longer during
+busy times like merge windows.
-10) Don't get discouraged. Re-submit.
-After you have submitted your change, be patient and wait. If Linus
-likes your change and applies it, it will appear in the next version
-of the kernel that he releases.
-
-However, if your change doesn't appear in the next version of the
-kernel, there could be any number of reasons. It's YOUR job to
-narrow down those reasons, correct what was wrong, and submit your
-updated change.
-
-It is quite common for Linus to "drop" your patch without comment.
-That's the nature of the system. If he drops your patch, it could be
-due to
-* Your patch did not apply cleanly to the latest kernel version.
-* Your patch was not sufficiently discussed on linux-kernel.
-* A style issue (see section 2).
-* An e-mail formatting issue (re-read this section).
-* A technical problem with your change.
-* He gets tons of e-mail, and yours got lost in the shuffle.
-* You are being annoying.
-
-When in doubt, solicit comments on linux-kernel mailing list.
-
-
-
-11) Include PATCH in the subject
+10) Include PATCH in the subject
+--------------------------------
Due to high e-mail traffic to Linus, and to linux-kernel, it is common
convention to prefix your subject line with [PATCH]. This lets Linus
@@ -355,7 +404,8 @@ e-mail discussions.
-12) Sign your work
+11) Sign your work
+------------------
To improve tracking of who did what, especially with patches that can
percolate to their final resting place in the kernel through several
@@ -387,11 +437,11 @@ can certify the below:
person who certified (a), (b) or (c) and I have not modified
it.
- (d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
+ (d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
then you just add a line saying
@@ -401,7 +451,7 @@ using your real name (sorry, no pseudonyms or anonymous contributions.)
Some people also put extra tags at the end. They'll just be ignored for
now, but you can do this to mark internal company procedures or just
-point out some special detail about the sign-off.
+point out some special detail about the sign-off.
If you are a subsystem or branch maintainer, sometimes you need to slightly
modify patches you receive in order to merge them, because the code is not
@@ -429,15 +479,15 @@ which appears in the changelog.
Special note to back-porters: It seems to be a common and useful practice
to insert an indication of the origin of a patch at the top of the commit
message (just after the subject line) to facilitate tracking. For instance,
-here's what we see in 2.6-stable :
+here's what we see in a 3.x-stable release:
- Date: Tue May 13 19:10:30 2008 +0000
+Date: Tue Oct 7 07:26:38 2014 -0400
- SCSI: libiscsi regression in 2.6.25: fix nop timer handling
+ libata: Un-break ATA blacklist
- commit 4cf1043593db6a337f10e006c23c69e5fc93e722 upstream
+ commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream.
-And here's what appears in 2.4 :
+And here's what might appear in an older kernel once a patch is backported:
Date: Tue May 13 22:12:27 2008 +0200
@@ -446,18 +496,19 @@ And here's what appears in 2.4 :
[backport of 2.6 commit b7acbdfbd1f277c1eb23f344f899cfa4cd0bf36a]
Whatever the format, this information provides a valuable help to people
-tracking your trees, and to people trying to trouble-shoot bugs in your
+tracking your trees, and to people trying to troubleshoot bugs in your
tree.
-13) When to use Acked-by: and Cc:
+12) When to use Acked-by: and Cc:
+---------------------------------
The Signed-off-by: tag indicates that the signer was involved in the
development of the patch, or that he/she was in the patch's delivery path.
If a person was not directly involved in the preparation or handling of a
patch but wishes to signify and record their approval of it then they can
-arrange to have an Acked-by: line added to the patch's changelog.
+ask to have an Acked-by: line added to the patch's changelog.
Acked-by: is often used by the maintainer of the affected code when that
maintainer neither contributed to nor forwarded the patch.
@@ -465,7 +516,8 @@ maintainer neither contributed to nor forwarded the patch.
Acked-by: is not as formal as Signed-off-by:. It is a record that the acker
has at least reviewed the patch and has indicated acceptance. Hence patch
mergers will sometimes manually convert an acker's "yep, looks good to me"
-into an Acked-by:.
+into an Acked-by: (but note that it is usually better to ask for an
+explicit ack).
Acked-by: does not necessarily indicate acknowledgement of the entire patch.
For example, if a patch affects multiple subsystems and has an Acked-by: from
@@ -477,11 +529,13 @@ list archives.
If a person has had the opportunity to comment on a patch, but has not
provided such comments, you may optionally add a "Cc:" tag to the patch.
This is the only tag which might be added without an explicit action by the
-person it names. This tag documents that potentially interested parties
-have been included in the discussion
+person it names - but it should indicate that this person was copied on the
+patch. This tag documents that potentially interested parties
+have been included in the discussion.
-14) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
+13) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
+--------------------------------------------------------------------------
The Reported-by tag gives credit to people who find bugs and report them and it
hopefully inspires them to help us again in the future. Please note that if
@@ -541,7 +595,13 @@ which stable kernel versions should receive your fix. This is the preferred
method for indicating a bug fixed by the patch. See #2 above for more details.
-15) The canonical patch format
+14) The canonical patch format
+------------------------------
+
+This section describes how the patch itself should be formatted. Note
+that, if you have your patches stored in a git repository, proper patch
+formatting can be had with "git format-patch". The tools cannot create
+the necessary text, though, so read the instructions below anyway.
The canonical patch subject line is:
@@ -549,7 +609,8 @@ The canonical patch subject line is:
The canonical patch message body contains the following:
- - A "from" line specifying the patch author.
+ - A "from" line specifying the patch author (only needed if the person
+ sending the patch is not the author).
- An empty line.
@@ -656,128 +717,63 @@ See more details on the proper patch format in the following
references.
-16) Sending "git pull" requests (from Linus emails)
-
-Please write the git repo address and branch name alone on the same line
-so that I can't even by mistake pull from the wrong branch, and so
-that a triple-click just selects the whole thing.
-
-So the proper format is something along the lines of:
-
- "Please pull from
-
- git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus
-
- to get these changes:"
-
-so that I don't have to hunt-and-peck for the address and inevitably
-get it wrong (actually, I've only gotten it wrong a few times, and
-checking against the diffstat tells me when I get it wrong, but I'm
-just a lot more comfortable when I don't have to "look for" the right
-thing to pull, and double-check that I have the right branch-name).
-
-
-Please use "git diff -M --stat --summary" to generate the diffstat:
-the -M enables rename detection, and the summary enables a summary of
-new/deleted or renamed files.
-
-With rename detection, the statistics are rather different [...]
-because git will notice that a fair number of the changes are renames.
-
------------------------------------
-SECTION 2 - HINTS, TIPS, AND TRICKS
------------------------------------
-
-This section lists many of the common "rules" associated with code
-submitted to the kernel. There are always exceptions... but you must
-have a really good reason for doing so. You could probably call this
-section Linus Computer Science 101.
-
-
-
-1) Read Documentation/CodingStyle
-
-Nuff said. If your code deviates too much from this, it is likely
-to be rejected without further review, and without comment.
-
-One significant exception is when moving code from one file to
-another -- in this case you should not modify the moved code at all in
-the same patch which moves it. This clearly delineates the act of
-moving the code and your changes. This greatly aids review of the
-actual differences and allows tools to better track the history of
-the code itself.
-
-Check your patches with the patch style checker prior to submission
-(scripts/checkpatch.pl). The style checker should be viewed as
-a guide not as the final word. If your code looks better with
-a violation then its probably best left alone.
-
-The checker reports at three levels:
- - ERROR: things that are very likely to be wrong
- - WARNING: things requiring careful review
- - CHECK: things requiring thought
-
-You should be able to justify all violations that remain in your
-patch.
-
-
-
-2) #ifdefs are ugly
-
-Code cluttered with ifdefs is difficult to read and maintain. Don't do
-it. Instead, put your ifdefs in a header, and conditionally define
-'static inline' functions, or macros, which are used in the code.
-Let the compiler optimize away the "no-op" case.
-
-Simple example, of poor code:
-
- dev = alloc_etherdev (sizeof(struct funky_private));
- if (!dev)
- return -ENODEV;
- #ifdef CONFIG_NET_FUNKINESS
- init_funky_net(dev);
- #endif
-
-Cleaned-up example:
-
-(in header)
- #ifndef CONFIG_NET_FUNKINESS
- static inline void init_funky_net (struct net_device *d) {}
- #endif
+15) Sending "git pull" requests
+-------------------------------
-(in the code itself)
- dev = alloc_etherdev (sizeof(struct funky_private));
- if (!dev)
- return -ENODEV;
- init_funky_net(dev);
+If you have a series of patches, it may be most convenient to have the
+maintainer pull them directly into the subsystem repository with a
+"git pull" operation. Note, however, that pulling patches from a developer
+requires a higher degree of trust than taking patches from a mailing list.
+As a result, many subsystem maintainers are reluctant to take pull
+requests, especially from new, unknown developers. If in doubt you can use
+the pull request as the cover letter for a normal posting of the patch
+series, giving the maintainer the option of using either.
+A pull request should have [GIT] or [PULL] in the subject line. The
+request itself should include the repository name and the branch of
+interest on a single line; it should look something like:
+ Please pull from
-3) 'static inline' is better than a macro
+ git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus
-Static inline functions are greatly preferred over macros.
-They provide type safety, have no length limitations, no formatting
-limitations, and under gcc they are as cheap as macros.
+ to get these changes:"
-Macros should only be used for cases where a static inline is clearly
-suboptimal [there are a few, isolated cases of this in fast paths],
-or where it is impossible to use a static inline function [such as
-string-izing].
+A pull request should also include an overall message saying what will be
+included in the request, a "git shortlog" listing of the patches
+themselves, and a diffstat showing the overall effect of the patch series.
+The easiest way to get all this information together is, of course, to let
+git do it for you with the "git request-pull" command.
-'static inline' is preferred over 'static __inline__', 'extern inline',
-and 'extern __inline__'.
+Some maintainers (including Linus) want to see pull requests from signed
+commits; that increases their confidence that the request actually came
+from you. Linus, in particular, will not pull from public hosting sites
+like GitHub in the absence of a signed tag.
+The first step toward creating such tags is to make a GNUPG key and get it
+signed by one or more core kernel developers. This step can be hard for
+new developers, but there is no way around it. Attending conferences can
+be a good way to find developers who can sign your key.
+Once you have prepared a patch series in git that you wish to have somebody
+pull, create a signed tag with "git tag -s". This will create a new tag
+identifying the last commit in the series and containing a signature
+created with your private key. You will also have the opportunity to add a
+changelog-style message to the tag; this is an ideal place to describe the
+effects of the pull request as a whole.
-4) Don't over-design.
+If the tree the maintainer will be pulling from is not the repository you
+are working from, don't forget to push the signed tag explicitly to the
+public tree.
-Don't try to anticipate nebulous future cases which may or may not
-be useful: "Make it as simple as you can, and no simpler."
+When generating your pull request, use the signed tag as the target. A
+command like this will do the trick:
+ git request-pull master git://my.public.tree/linux.git my-signed-tag
----------------------
-SECTION 3 - REFERENCES
+SECTION 2 - REFERENCES
----------------------
Andrew Morton, "The perfect patch" (tpp).
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index b60d2ab69497..9b121a569ab4 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -243,7 +243,7 @@ input driver:
.owner = THIS_MODULE,
.pm = &mpu3050_pm,
.of_match_table = mpu3050_of_match,
- .acpi_match_table ACPI_PTR(mpu3050_acpi_match),
+ .acpi_match_table = ACPI_PTR(mpu3050_acpi_match),
},
.probe = mpu3050_probe,
.remove = mpu3050_remove,
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 3b08bc2b04cf..8edb9007844e 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -2,11 +2,15 @@
- this file
Booting
- requirements for booting
+CCN.txt
+ - Cache Coherent Network ring-bus and perf PMU driver.
Interrupts
- ARM Interrupt subsystem documentation
IXP4xx
- Intel IXP4xx Network processor.
-msm
+Makefile
+ - Build sourcefiles as part of the Documentation-build for arm
+msm/
- MSM specific documentation
Netwinder
- Netwinder specific documentation
@@ -18,11 +22,9 @@ README
- General ARM documentation
SA1100/
- SA1100 documentation
-Samsung-S3C24XX
+Samsung-S3C24XX/
- S3C24XX ARM Linux Overview
-Sharp-LH
- - Linux on Sharp LH79524 and LH7A40X System On a Chip (SOC)
-SPEAr
+SPEAr/
- ST SPEAr platform Linux Overview
VFP/
- Release notes for Linux Kernel Vector Floating Point support code
diff --git a/Documentation/arm/Samsung-S3C24XX/DMA.txt b/Documentation/arm/Samsung-S3C24XX/DMA.txt
deleted file mode 100644
index 3ed82383efea..000000000000
--- a/Documentation/arm/Samsung-S3C24XX/DMA.txt
+++ /dev/null
@@ -1,46 +0,0 @@
- S3C2410 DMA
- ===========
-
-Introduction
-------------
-
- The kernel provides an interface to manage DMA transfers
- using the DMA channels in the CPU, so that the central
- duty of managing channel mappings, and programming the
- channel generators is in one place.
-
-
-DMA Channel Ordering
---------------------
-
- Many of the range do not have connections for the DMA
- channels to all sources, which means that some devices
- have a restricted number of channels that can be used.
-
- To allow flexibility for each CPU type and board, the
- DMA code can be given a DMA ordering structure which
- allows the order of channel search to be specified, as
- well as allowing the prohibition of certain claims.
-
- struct s3c24xx_dma_order has a list of channels, and
- each channel within has a slot for a list of DMA
- channel numbers. The slots are searched in order for
- the presence of a DMA channel number with DMA_CH_VALID
- or-ed in.
-
- If the order has the flag DMA_CH_NEVER set, then after
- checking the channel list, the system will return no
- found channel, thus denying the request.
-
- A board support file can call s3c24xx_dma_order_set()
- to register a complete ordering set. The routine will
- copy the data, so the original can be discarded with
- __initdata.
-
-
-Authour
--------
-
-Ben Dooks,
-Copyright (c) 2007 Ben Dooks, Simtec Electronics
-Licensed under the GPL v2
diff --git a/Documentation/arm64/legacy_instructions.txt b/Documentation/arm64/legacy_instructions.txt
index a3b3da2ec6ed..01bf3d9fac85 100644
--- a/Documentation/arm64/legacy_instructions.txt
+++ b/Documentation/arm64/legacy_instructions.txt
@@ -32,6 +32,9 @@ The default mode depends on the status of the instruction in the
architecture. Deprecated instructions should default to emulation
while obsolete instructions must be undefined by default.
+Note: Instruction emulation may not be possible in all cases. See
+individual instruction notes for further information.
+
Supported legacy instructions
-----------------------------
* SWP{B}
@@ -43,3 +46,12 @@ Default: Undef (0)
Node: /proc/sys/abi/cp15_barrier
Status: Deprecated
Default: Emulate (1)
+
+* SETEND
+Node: /proc/sys/abi/setend
+Status: Deprecated
+Default: Emulate (1)*
+Note: All the cpus on the system must have mixed endian support at EL0
+for this feature to be enabled. If a new CPU - which doesn't support mixed
+endian - is hotplugged in after this feature has been enabled, there could
+be unexpected results in the application.
diff --git a/Documentation/blackfin/Makefile b/Documentation/blackfin/Makefile
index c7e6c99bad81..03f78059d6f5 100644
--- a/Documentation/blackfin/Makefile
+++ b/Documentation/blackfin/Makefile
@@ -1,3 +1,5 @@
ifneq ($(CONFIG_BLACKFIN),)
+ifneq ($(CONFIG_BFIN_GPTIMERS,)
obj-m := gptimers-example.o
endif
+endif
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index d79b008e4a32..3f9f808b5119 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -317,10 +317,10 @@ maps this page at its virtual address.
about doing this.
The idea is, first at flush_dcache_page() time, if
- page->mapping->i_mmap is an empty tree and ->i_mmap_nonlinear
- an empty list, just mark the architecture private page flag bit.
- Later, in update_mmu_cache(), a check is made of this flag bit,
- and if set the flush is done and the flag bit is cleared.
+ page->mapping->i_mmap is an empty tree, just mark the architecture
+ private page flag bit. Later, in update_mmu_cache(), a check is
+ made of this flag bit, and if set the flush is done and the flag
+ bit is cleared.
IMPORTANT NOTE: It is often important, if you defer the flush,
that the actual flush occurs on the same CPU
diff --git a/Documentation/cgroups/00-INDEX b/Documentation/cgroups/00-INDEX
index bc461b6425a7..96ce071a3633 100644
--- a/Documentation/cgroups/00-INDEX
+++ b/Documentation/cgroups/00-INDEX
@@ -24,3 +24,5 @@ net_prio.txt
- Network priority cgroups details and usages.
resource_counter.txt
- Resource Counter API.
+unified-hierarchy.txt
+ - Description the new/next cgroup interface.
diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt
index 4f4563277864..71daa35ec2d9 100644
--- a/Documentation/cgroups/unified-hierarchy.txt
+++ b/Documentation/cgroups/unified-hierarchy.txt
@@ -327,6 +327,85 @@ supported and the interface files "release_agent" and
- use_hierarchy is on by default and the cgroup file for the flag is
not created.
+- The original lower boundary, the soft limit, is defined as a limit
+ that is per default unset. As a result, the set of cgroups that
+ global reclaim prefers is opt-in, rather than opt-out. The costs
+ for optimizing these mostly negative lookups are so high that the
+ implementation, despite its enormous size, does not even provide the
+ basic desirable behavior. First off, the soft limit has no
+ hierarchical meaning. All configured groups are organized in a
+ global rbtree and treated like equal peers, regardless where they
+ are located in the hierarchy. This makes subtree delegation
+ impossible. Second, the soft limit reclaim pass is so aggressive
+ that it not just introduces high allocation latencies into the
+ system, but also impacts system performance due to overreclaim, to
+ the point where the feature becomes self-defeating.
+
+ The memory.low boundary on the other hand is a top-down allocated
+ reserve. A cgroup enjoys reclaim protection when it and all its
+ ancestors are below their low boundaries, which makes delegation of
+ subtrees possible. Secondly, new cgroups have no reserve per
+ default and in the common case most cgroups are eligible for the
+ preferred reclaim pass. This allows the new low boundary to be
+ efficiently implemented with just a minor addition to the generic
+ reclaim code, without the need for out-of-band data structures and
+ reclaim passes. Because the generic reclaim code considers all
+ cgroups except for the ones running low in the preferred first
+ reclaim pass, overreclaim of individual groups is eliminated as
+ well, resulting in much better overall workload performance.
+
+- The original high boundary, the hard limit, is defined as a strict
+ limit that can not budge, even if the OOM killer has to be called.
+ But this generally goes against the goal of making the most out of
+ the available memory. The memory consumption of workloads varies
+ during runtime, and that requires users to overcommit. But doing
+ that with a strict upper limit requires either a fairly accurate
+ prediction of the working set size or adding slack to the limit.
+ Since working set size estimation is hard and error prone, and
+ getting it wrong results in OOM kills, most users tend to err on the
+ side of a looser limit and end up wasting precious resources.
+
+ The memory.high boundary on the other hand can be set much more
+ conservatively. When hit, it throttles allocations by forcing them
+ into direct reclaim to work off the excess, but it never invokes the
+ OOM killer. As a result, a high boundary that is chosen too
+ aggressively will not terminate the processes, but instead it will
+ lead to gradual performance degradation. The user can monitor this
+ and make corrections until the minimal memory footprint that still
+ gives acceptable performance is found.
+
+ In extreme cases, with many concurrent allocations and a complete
+ breakdown of reclaim progress within the group, the high boundary
+ can be exceeded. But even then it's mostly better to satisfy the
+ allocation from the slack available in other groups or the rest of
+ the system than killing the group. Otherwise, memory.max is there
+ to limit this type of spillover and ultimately contain buggy or even
+ malicious applications.
+
+- The original control file names are unwieldy and inconsistent in
+ many different ways. For example, the upper boundary hit count is
+ exported in the memory.failcnt file, but an OOM event count has to
+ be manually counted by listening to memory.oom_control events, and
+ lower boundary / soft limit events have to be counted by first
+ setting a threshold for that value and then counting those events.
+ Also, usage and limit files encode their units in the filename.
+ That makes the filenames very long, even though this is not
+ information that a user needs to be reminded of every time they type
+ out those names.
+
+ To address these naming issues, as well as to signal clearly that
+ the new interface carries a new configuration model, the naming
+ conventions in it necessarily differ from the old interface.
+
+- The original limit files indicate the state of an unset limit with a
+ Very High Number, and a configured limit can be unset by echoing -1
+ into those files. But that very high number is implementation and
+ architecture dependent and not very descriptive. And while -1 can
+ be understood as an underflow into the highest possible value, -2 or
+ -10M etc. do not work, so it's not consistent.
+
+ memory.low, memory.high, and memory.max will use the string
+ "infinity" to indicate and set the highest possible value.
5. Planned Changes
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index 765d7fc0e692..655750743fb0 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -37,6 +37,14 @@ controlling P state selection. These files have been added to
no_turbo: limits the driver to selecting P states below the turbo
frequency range.
+ turbo_pct: displays the percentage of the total performance that
+ is supported by hardware that is in the turbo range. This number
+ is independent of whether turbo has been disabled or not.
+
+ num_pstates: displays the number of pstates that are supported
+ by hardware. This number is independent of whether turbo has
+ been disabled or not.
+
For contemporary Intel processors, the frequency is controlled by the
processor itself and the P-states exposed to software are related to
performance levels. The idea that frequency can be set to a single
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
index 556c8665fdbf..b78564b2b201 100644
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ b/Documentation/devicetree/bindings/arm/arm-boards
@@ -23,7 +23,7 @@ Required nodes:
range of 0x200 bytes.
- syscon: the root node of the Integrator platforms must have a
- system controller node pointong to the control registers,
+ system controller node pointing to the control registers,
with the compatible string
"arm,integrator-ap-syscon"
"arm,integrator-cp-syscon"
diff --git a/Documentation/devicetree/bindings/arm/brcm-brcmstb.txt b/Documentation/devicetree/bindings/arm/brcm-brcmstb.txt
index 3c436cc4f35d..430608ec09f0 100644
--- a/Documentation/devicetree/bindings/arm/brcm-brcmstb.txt
+++ b/Documentation/devicetree/bindings/arm/brcm-brcmstb.txt
@@ -79,7 +79,9 @@ reboot
Required properties
- compatible
- The string property "brcm,brcmstb-reboot".
+ The string property "brcm,brcmstb-reboot" for 40nm/28nm chips with
+ the new SYS_CTRL interface, or "brcm,bcm7038-reboot" for 65nm
+ chips with the old SUN_TOP_CTRL interface.
- syscon
A phandle / integer array that points to the syscon node which describes
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index d790f49066f3..a3089359aaa6 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -38,8 +38,6 @@ its hardware characteristcs.
AMBA markee):
- "arm,coresight-replicator"
- * id: a unique number that will identify this replicator.
-
* port or ports: same as above.
* Optional properties for ETM/PTMs:
@@ -94,8 +92,6 @@ Example:
* AMBA bus. As such no need to add "arm,primecell".
*/
compatible = "arm,coresight-replicator";
- /* this will show up in debugfs as "0.replicator" */
- id = <0>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index b2aacbe16ed9..8b9e0a95de31 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -175,6 +175,7 @@ nodes to be present and contain the properties described below.
"marvell,pj4a"
"marvell,pj4b"
"marvell,sheeva-v5"
+ "nvidia,tegra132-denver"
"qcom,krait"
"qcom,scorpion"
- enable-method
diff --git a/Documentation/devicetree/bindings/arm/fw-cfg.txt b/Documentation/devicetree/bindings/arm/fw-cfg.txt
new file mode 100644
index 000000000000..953fb640d9c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/fw-cfg.txt
@@ -0,0 +1,72 @@
+* QEMU Firmware Configuration bindings for ARM
+
+QEMU's arm-softmmu and aarch64-softmmu emulation / virtualization targets
+provide the following Firmware Configuration interface on the "virt" machine
+type:
+
+- A write-only, 16-bit wide selector (or control) register,
+- a read-write, 64-bit wide data register.
+
+QEMU exposes the control and data register to ARM guests as memory mapped
+registers; their location is communicated to the guest's UEFI firmware in the
+DTB that QEMU places at the bottom of the guest's DRAM.
+
+The guest writes a selector value (a key) to the selector register, and then
+can read the corresponding data (produced by QEMU) via the data register. If
+the selected entry is writable, the guest can rewrite it through the data
+register.
+
+The selector register takes keys in big endian byte order.
+
+The data register allows accesses with 8, 16, 32 and 64-bit width (only at
+offset 0 of the register). Accesses larger than a byte are interpreted as
+arrays, bundled together only for better performance. The bytes constituting
+such a word, in increasing address order, correspond to the bytes that would
+have been transferred by byte-wide accesses in chronological order.
+
+The interface allows guest firmware to download various parameters and blobs
+that affect how the firmware works and what tables it installs for the guest
+OS. For example, boot order of devices, ACPI tables, SMBIOS tables, kernel and
+initrd images for direct kernel booting, virtual machine UUID, SMP information,
+virtual NUMA topology, and so on.
+
+The authoritative registry of the valid selector values and their meanings is
+the QEMU source code; the structure of the data blobs corresponding to the
+individual key values is also defined in the QEMU source code.
+
+The presence of the registers can be verified by selecting the "signature" blob
+with key 0x0000, and reading four bytes from the data register. The returned
+signature is "QEMU".
+
+The outermost protocol (involving the write / read sequences of the control and
+data registers) is expected to be versioned, and/or described by feature bits.
+The interface revision / feature bitmap can be retrieved with key 0x0001. The
+blob to be read from the data register has size 4, and it is to be interpreted
+as a uint32_t value in little endian byte order. The current value
+(corresponding to the above outer protocol) is zero.
+
+The guest kernel is not expected to use these registers (although it is
+certainly allowed to); the device tree bindings are documented here because
+this is where device tree bindings reside in general.
+
+Required properties:
+
+- compatible: "qemu,fw-cfg-mmio".
+
+- reg: the MMIO region used by the device.
+ * Bytes 0x0 to 0x7 cover the data register.
+ * Bytes 0x8 to 0x9 cover the selector register.
+ * Further registers may be appended to the region in case of future interface
+ revisions / feature bits.
+
+Example:
+
+/ {
+ #size-cells = <0x2>;
+ #address-cells = <0x2>;
+
+ fw-cfg@9020000 {
+ compatible = "qemu,fw-cfg-mmio";
+ reg = <0x0 0x9020000 0x0 0xa>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index 8112d0c3675a..c97484b73e72 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -32,12 +32,16 @@ Main node required properties:
The 3rd cell is the flags, encoded as follows:
bits[3:0] trigger type and level flags.
1 = low-to-high edge triggered
- 2 = high-to-low edge triggered
+ 2 = high-to-low edge triggered (invalid for SPIs)
4 = active high level-sensitive
- 8 = active low level-sensitive
+ 8 = active low level-sensitive (invalid for SPIs).
bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of
the 8 possible cpus attached to the GIC. A bit set to '1' indicated
the interrupt is wired to that CPU. Only valid for PPI interrupts.
+ Also note that the configurability of PPI interrupts is IMPLEMENTATION
+ DEFINED and as such not guaranteed to be present (most SoC available
+ in 2014 seem to ignore the setting of this flag and use the hardware
+ default value).
- reg : Specifies base physical address(s) and size of the GIC registers. The
first region is the GIC distributor register base and size. The 2nd region is
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 292ef7ca3058..0dbabe9a6b0a 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -57,6 +57,16 @@ Optional properties:
- cache-id-part: cache id part number to be used if it is not present
on hardware
- wt-override: If present then L2 is forced to Write through mode
+- arm,double-linefill : Override double linefill enable setting. Enable if
+ non-zero, disable if zero.
+- arm,double-linefill-incr : Override double linefill on INCR read. Enable
+ if non-zero, disable if zero.
+- arm,double-linefill-wrap : Override double linefill on WRAP read. Enable
+ if non-zero, disable if zero.
+- arm,prefetch-drop : Override prefetch drop enable setting. Enable if non-zero,
+ disable if zero.
+- arm,prefetch-offset : Override prefetch offset value. Valid values are
+ 0-7, 15, 23, and 31.
Example:
diff --git a/Documentation/devicetree/bindings/arm/msm/timer.txt b/Documentation/devicetree/bindings/arm/msm/timer.txt
index c6ef8f13dc7e..74607b6c1117 100644
--- a/Documentation/devicetree/bindings/arm/msm/timer.txt
+++ b/Documentation/devicetree/bindings/arm/msm/timer.txt
@@ -8,7 +8,7 @@ Properties:
"qcom,kpss-timer" - krait subsystem
"qcom,scss-timer" - scorpion subsystem
-- interrupts : Interrupts for the the debug timer, the first general purpose
+- interrupts : Interrupts for the debug timer, the first general purpose
timer, and optionally a second general purpose timer in that
order.
diff --git a/Documentation/devicetree/bindings/arm/sprd.txt b/Documentation/devicetree/bindings/arm/sprd.txt
new file mode 100644
index 000000000000..31a629dc75b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/sprd.txt
@@ -0,0 +1,11 @@
+Spreadtrum SoC Platforms Device Tree Bindings
+----------------------------------------------------
+
+Sharkl64 is a Spreadtrum's SoC Platform which is based
+on ARM 64-bit processor.
+
+SC9836 openphone board with SC9836 SoC based on the
+Sharkl64 Platform shall have the following properties.
+
+Required root node properties:
+ - compatible = "sprd,sc9836-openphone", "sprd,sc9836";
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
index 234406d41c12..067c9790062f 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
@@ -1,7 +1,10 @@
NVIDIA Tegra AHB
Required properties:
-- compatible : "nvidia,tegra20-ahb" or "nvidia,tegra30-ahb"
+- compatible : For Tegra20, must contain "nvidia,tegra20-ahb". For
+ Tegra30, must contain "nvidia,tegra30-ahb". Otherwise, must contain
+ '"nvidia,<chip>-ahb", "nvidia,tegra30-ahb"' where <chip> is tegra124,
+ tegra132, or tegra210.
- reg : Should contain 1 register ranges(address and length)
Example:
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt
index 2fd50511ab4b..02c27004d4a8 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt
@@ -6,7 +6,11 @@ modes. It provides power-gating controllers for SoC and CPU power-islands.
Required properties:
- name : Should be pmc
-- compatible : Should contain "nvidia,tegra<chip>-pmc".
+- compatible : For Tegra20, must contain "nvidia,tegra20-pmc". For Tegra30,
+ must contain "nvidia,tegra30-pmc". For Tegra114, must contain
+ "nvidia,tegra114-pmc". For Tegra124, must contain "nvidia,tegra124-pmc".
+ Otherwise, must contain "nvidia,<chip>-pmc", plus at least one of the
+ above, where <chip> is tegra132.
- reg : Offset and length of the register set for the device
- clocks : Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
diff --git a/Documentation/devicetree/bindings/arm/versatile-sysreg.txt b/Documentation/devicetree/bindings/arm/versatile-sysreg.txt
new file mode 100644
index 000000000000..a4f15262d717
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/versatile-sysreg.txt
@@ -0,0 +1,10 @@
+ARM Versatile system registers
+--------------------------------------
+
+This is a system control registers block, providing multiple low level
+platform functions like board detection and identification, software
+interrupt generation, MMC and NOR Flash control etc.
+
+Required node properties:
+- compatible value : = "arm,versatile-sysreg", "syscon"
+- reg : physical base address and the size of the registers window
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 4ab09f2202d4..c2340eeeb97f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -37,9 +37,10 @@ Required properties when using sub-nodes:
Sub-nodes required properties:
-- reg : the port number
-- phys : reference to the SATA PHY node
-
+- reg : the port number
+And at least one of the following properties:
+- phys : reference to the SATA PHY node
+- target-supply : regulator for SATA target power
Examples:
sata@ffe08000 {
@@ -68,10 +69,12 @@ With sub-nodes:
sata0: sata-port@0 {
reg = <0>;
phys = <&sata_phy 0>;
+ target-supply = <&reg_sata0>;
};
sata1: sata-port@1 {
reg = <1>;
phys = <&sata_phy 1>;
+ target-supply = <&reg_sata1>;;
};
};
diff --git a/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt b/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt
index 93986a5a8018..3bacc8e0931e 100644
--- a/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt
+++ b/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt
@@ -9,7 +9,7 @@ Properties:
Compatibility with many Cavium evaluation boards.
-- reg: The base address of the the CF chip select banks. Depending on
+- reg: The base address of the CF chip select banks. Depending on
the device configuration, there may be one or two banks.
- cavium,bus-width: The width of the connection to the CF devices. Valid
diff --git a/Documentation/devicetree/bindings/ata/tegra-sata.txt b/Documentation/devicetree/bindings/ata/tegra-sata.txt
index 946f2072570b..66c83c3e8915 100644
--- a/Documentation/devicetree/bindings/ata/tegra-sata.txt
+++ b/Documentation/devicetree/bindings/ata/tegra-sata.txt
@@ -1,7 +1,9 @@
Tegra124 SoC SATA AHCI controller
Required properties :
-- compatible : "nvidia,tegra124-ahci".
+- compatible : For Tegra124, must contain "nvidia,tegra124-ahci". Otherwise,
+ must contain '"nvidia,<chip>-ahci", "nvidia,tegra124-ahci"', where <chip>
+ is tegra132.
- reg : Should contain 2 entries:
- AHCI register set (SATA BAR5)
- SATA register set
diff --git a/Documentation/devicetree/bindings/c6x/dscr.txt b/Documentation/devicetree/bindings/c6x/dscr.txt
index b0e97144cfb1..92672235de57 100644
--- a/Documentation/devicetree/bindings/c6x/dscr.txt
+++ b/Documentation/devicetree/bindings/c6x/dscr.txt
@@ -12,7 +12,7 @@ configuration register for writes. These configuration register may be used to
enable (and disable in some cases) SoC pin drivers, select peripheral clock
sources (internal or pin), etc. In some cases, a configuration register is
write once or the individual bits are write once. In addition to device config,
-the DSCR block may provide registers which which are used to reset peripherals,
+the DSCR block may provide registers which are used to reset peripherals,
provide device ID information, provide ethernet MAC addresses, as well as other
miscellaneous functions.
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
new file mode 100644
index 000000000000..b54bf3a2ff57
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
@@ -0,0 +1,110 @@
+
+* Samsung Exynos PPMU (Platform Performance Monitoring Unit) device
+
+The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
+each IP. PPMU provides the primitive values to get performance data. These
+PPMU events provide information of the SoC's behaviors so that you may
+use to analyze system performance, to make behaviors visible and to count
+usages of each IP (DMC, CPU, RIGHTBUS, LEFTBUS, CAM interface, LCD, G3D, MFC).
+The Exynos PPMU driver uses the devfreq-event class to provide event data
+to various devfreq devices. The devfreq devices would use the event data when
+derterming the current state of each IP.
+
+Required properties:
+- compatible: Should be "samsung,exynos-ppmu".
+- reg: physical base address of each PPMU and length of memory mapped region.
+
+Optional properties:
+- clock-names : the name of clock used by the PPMU, "ppmu"
+- clocks : phandles for clock specified in "clock-names" property
+- #clock-cells: should be 1.
+
+Example1 : PPMU nodes in exynos3250.dtsi are listed below.
+
+ ppmu_dmc0: ppmu_dmc0@106a0000 {
+ compatible = "samsung,exynos-ppmu";
+ reg = <0x106a0000 0x2000>;
+ status = "disabled";
+ };
+
+ ppmu_dmc1: ppmu_dmc1@106b0000 {
+ compatible = "samsung,exynos-ppmu";
+ reg = <0x106b0000 0x2000>;
+ status = "disabled";
+ };
+
+ ppmu_cpu: ppmu_cpu@106c0000 {
+ compatible = "samsung,exynos-ppmu";
+ reg = <0x106c0000 0x2000>;
+ status = "disabled";
+ };
+
+ ppmu_rightbus: ppmu_rightbus@112a0000 {
+ compatible = "samsung,exynos-ppmu";
+ reg = <0x112a0000 0x2000>;
+ clocks = <&cmu CLK_PPMURIGHT>;
+ clock-names = "ppmu";
+ status = "disabled";
+ };
+
+ ppmu_leftbus: ppmu_leftbus0@116a0000 {
+ compatible = "samsung,exynos-ppmu";
+ reg = <0x116a0000 0x2000>;
+ clocks = <&cmu CLK_PPMULEFT>;
+ clock-names = "ppmu";
+ status = "disabled";
+ };
+
+Example2 : Events of each PPMU node in exynos3250-rinato.dts are listed below.
+
+ &ppmu_dmc0 {
+ status = "okay";
+
+ events {
+ ppmu_dmc0_3: ppmu-event3-dmc0 {
+ event-name = "ppmu-event3-dmc0";
+ };
+
+ ppmu_dmc0_2: ppmu-event2-dmc0 {
+ event-name = "ppmu-event2-dmc0";
+ };
+
+ ppmu_dmc0_1: ppmu-event1-dmc0 {
+ event-name = "ppmu-event1-dmc0";
+ };
+
+ ppmu_dmc0_0: ppmu-event0-dmc0 {
+ event-name = "ppmu-event0-dmc0";
+ };
+ };
+ };
+
+ &ppmu_dmc1 {
+ status = "okay";
+
+ events {
+ ppmu_dmc1_3: ppmu-event3-dmc1 {
+ event-name = "ppmu-event3-dmc1";
+ };
+ };
+ };
+
+ &ppmu_leftbus {
+ status = "okay";
+
+ events {
+ ppmu_leftbus_3: ppmu-event3-leftbus {
+ event-name = "ppmu-event3-leftbus";
+ };
+ };
+ };
+
+ &ppmu_rightbus {
+ status = "okay";
+
+ events {
+ ppmu_rightbus_3: ppmu-event3-rightbus {
+ event-name = "ppmu-event3-rightbus";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index df0f48bcf75a..f7e21b1c2a05 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -1,6 +1,6 @@
* Renesas R-Car DMA Controller Device Tree bindings
-Renesas R-Car Generation 2 SoCs have have multiple multi-channel DMA
+Renesas R-Car Generation 2 SoCs have multiple multi-channel DMA
controller instances named DMAC capable of serving multiple clients. Channels
can be dedicated to specific clients or shared between a large number of
clients.
diff --git a/Documentation/devicetree/bindings/drm/atmel/hlcdc-dc.txt b/Documentation/devicetree/bindings/drm/atmel/hlcdc-dc.txt
new file mode 100644
index 000000000000..ebc1a914bda3
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/atmel/hlcdc-dc.txt
@@ -0,0 +1,53 @@
+Device-Tree bindings for Atmel's HLCDC (High LCD Controller) DRM driver
+
+The Atmel HLCDC Display Controller is subdevice of the HLCDC MFD device.
+See ../mfd/atmel-hlcdc.txt for more details.
+
+Required properties:
+ - compatible: value should be "atmel,hlcdc-display-controller"
+ - pinctrl-names: the pin control state names. Should contain "default".
+ - pinctrl-0: should contain the default pinctrl states.
+ - #address-cells: should be set to 1.
+ - #size-cells: should be set to 0.
+
+Required children nodes:
+ Children nodes are encoding available output ports and their connections
+ to external devices using the OF graph reprensentation (see ../graph.txt).
+ At least one port node is required.
+
+Example:
+
+ hlcdc: hlcdc@f0030000 {
+ compatible = "atmel,sama5d3-hlcdc";
+ reg = <0xf0030000 0x2000>;
+ interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+ clock-names = "periph_clk","sys_clk", "slow_clk";
+ status = "disabled";
+
+ hlcdc-display-controller {
+ compatible = "atmel,hlcdc-display-controller";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_base &pinctrl_lcd_rgb888>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ hlcdc_panel_output: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_input>;
+ };
+ };
+ };
+
+ hlcdc_pwm: hlcdc-pwm {
+ compatible = "atmel,hlcdc-pwm";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_pwm>;
+ #pwm-cells = <3>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/bridge/dw_hdmi.txt b/Documentation/devicetree/bindings/drm/bridge/dw_hdmi.txt
new file mode 100644
index 000000000000..a905c1413558
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/bridge/dw_hdmi.txt
@@ -0,0 +1,50 @@
+DesignWare HDMI bridge bindings
+
+Required properties:
+- compatible: platform specific such as:
+ * "snps,dw-hdmi-tx"
+ * "fsl,imx6q-hdmi"
+ * "fsl,imx6dl-hdmi"
+ * "rockchip,rk3288-dw-hdmi"
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The HDMI interrupt number
+- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks,
+ as described in Documentation/devicetree/bindings/clock/clock-bindings.txt,
+ the clocks are soc specific, the clock-names should be "iahb", "isfr"
+-port@[X]: SoC specific port nodes with endpoint definitions as defined
+ in Documentation/devicetree/bindings/media/video-interfaces.txt,
+ please refer to the SoC specific binding document:
+ * Documentation/devicetree/bindings/drm/imx/hdmi.txt
+ * Documentation/devicetree/bindings/video/dw_hdmi-rockchip.txt
+
+Optional properties
+- reg-io-width: the width of the reg:1,4, default set to 1 if not present
+- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
+
+Example:
+ hdmi: hdmi@0120000 {
+ compatible = "fsl,imx6q-hdmi";
+ reg = <0x00120000 0x9000>;
+ interrupts = <0 115 0x04>;
+ gpr = <&gpr>;
+ clocks = <&clks 123>, <&clks 124>;
+ clock-names = "iahb", "isfr";
+ ddc-i2c-bus = <&i2c2>;
+
+ port@0 {
+ reg = <0>;
+
+ hdmi_mux_0: endpoint {
+ remote-endpoint = <&ipu1_di0_hdmi>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ hdmi_mux_1: endpoint {
+ remote-endpoint = <&ipu1_di1_hdmi>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi.txt b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
index aca917fe2ba7..a29a55f3d937 100644
--- a/Documentation/devicetree/bindings/drm/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
@@ -2,6 +2,8 @@ Qualcomm adreno/snapdragon hdmi output
Required properties:
- compatible: one of the following
+ * "qcom,hdmi-tx-8084"
+ * "qcom,hdmi-tx-8074"
* "qcom,hdmi-tx-8660"
* "qcom,hdmi-tx-8960"
- reg: Physical base address and length of the controller's registers
diff --git a/Documentation/devicetree/bindings/fpga/altera-socfpga-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/altera-socfpga-fpga-mgr.txt
new file mode 100644
index 000000000000..9b027a615486
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-socfpga-fpga-mgr.txt
@@ -0,0 +1,17 @@
+Altera SOCFPGA FPGA Manager
+
+Required properties:
+- compatible : should contain "altr,socfpga-fpga-mgr"
+- reg : base address and size for memory mapped io.
+ - The first index is for FPGA manager register access.
+ - The second index is for writing FPGA configuration data.
+- interrupts : interrupt for the FPGA Manager device.
+
+Example:
+
+ hps_0_fpgamgr: fpgamgr@0xff706000 {
+ compatible = "altr,socfpga-fpga-mgr";
+ reg = <0xFF706000 0x1000
+ 0xFFB90000 0x1000>;
+ interrupts = <0 175 4>;
+ };
diff --git a/Documentation/devicetree/bindings/fuse/nvidia,tegra20-fuse.txt b/Documentation/devicetree/bindings/fuse/nvidia,tegra20-fuse.txt
index d8c98c7614d0..23e1d3194174 100644
--- a/Documentation/devicetree/bindings/fuse/nvidia,tegra20-fuse.txt
+++ b/Documentation/devicetree/bindings/fuse/nvidia,tegra20-fuse.txt
@@ -1,11 +1,11 @@
NVIDIA Tegra20/Tegra30/Tegr114/Tegra124 fuse block.
Required properties:
-- compatible : should be:
- "nvidia,tegra20-efuse"
- "nvidia,tegra30-efuse"
- "nvidia,tegra114-efuse"
- "nvidia,tegra124-efuse"
+- compatible : For Tegra20, must contain "nvidia,tegra20-efuse". For Tegra30,
+ must contain "nvidia,tegra30-efuse". For Tegra114, must contain
+ "nvidia,tegra114-efuse". For Tegra124, must contain "nvidia,tegra124-efuse".
+ Otherwise, must contain "nvidia,<chip>-efuse", plus one of the above, where
+ <chip> is tegra132.
Details:
nvidia,tegra20-efuse: Tegra20 requires using APB DMA to read the fuse data
due to a hardware bug. Tegra20 also lacks certain information which is
diff --git a/Documentation/devicetree/bindings/gpio/fujitsu,mb86s70-gpio.txt b/Documentation/devicetree/bindings/gpio/fujitsu,mb86s70-gpio.txt
new file mode 100644
index 000000000000..bef353f370d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/fujitsu,mb86s70-gpio.txt
@@ -0,0 +1,20 @@
+Fujitsu MB86S7x GPIO Controller
+-------------------------------
+
+Required properties:
+- compatible: Should be "fujitsu,mb86s70-gpio"
+- reg: Base address and length of register space
+- clocks: Specify the clock
+- gpio-controller: Marks the device node as a gpio controller.
+- #gpio-cells: Should be <2>. The first cell is the pin number and the
+ second cell is used to specify optional parameters:
+ - bit 0 specifies polarity (0 for normal, 1 for inverted).
+
+Examples:
+ gpio0: gpio@31000000 {
+ compatible = "fujitsu,mb86s70-gpio";
+ reg = <0 0x31000000 0x10000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ clocks = <&clk 0 2 1>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-max732x.txt b/Documentation/devicetree/bindings/gpio/gpio-max732x.txt
new file mode 100644
index 000000000000..5fdc843b4542
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-max732x.txt
@@ -0,0 +1,59 @@
+* MAX732x-compatible I/O expanders
+
+Required properties:
+ - compatible: Should be one of the following:
+ - "maxim,max7319": For the Maxim MAX7319
+ - "maxim,max7320": For the Maxim MAX7320
+ - "maxim,max7321": For the Maxim MAX7321
+ - "maxim,max7322": For the Maxim MAX7322
+ - "maxim,max7323": For the Maxim MAX7323
+ - "maxim,max7324": For the Maxim MAX7324
+ - "maxim,max7325": For the Maxim MAX7325
+ - "maxim,max7326": For the Maxim MAX7326
+ - "maxim,max7327": For the Maxim MAX7327
+ - reg: I2C slave address for this device.
+ - gpio-controller: Marks the device node as a GPIO controller.
+ - #gpio-cells: Should be 2.
+ - first cell is the GPIO number
+ - second cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>.
+ Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
+
+Optional properties:
+
+ The I/O expander can detect input state changes, and thus optionally act as
+ an interrupt controller. When the expander interrupt line is connected all the
+ following properties must be set. For more information please see the
+ interrupt controller device tree bindings documentation available at
+ Documentation/devicetree/bindings/interrupt-controller/interrupts.txt.
+
+ - interrupt-controller: Identifies the node as an interrupt controller.
+ - #interrupt-cells: Number of cells to encode an interrupt source, shall be 2.
+ - first cell is the pin number
+ - second cell is used to specify flags
+ - interrupt-parent: phandle of the parent interrupt controller.
+ - interrupts: Interrupt specifier for the controllers interrupt.
+
+Please refer to gpio.txt in this directory for details of the common GPIO
+bindings used by client devices.
+
+Example 1. MAX7325 with interrupt support enabled (CONFIG_GPIO_MAX732X_IRQ=y):
+
+ expander: max7325@6d {
+ compatible = "maxim,max7325";
+ reg = <0x6d>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+Example 2. MAX7325 with interrupt support disabled (CONFIG_GPIO_MAX732X_IRQ=n):
+
+ expander: max7325@6d {
+ compatible = "maxim,max7325";
+ reg = <0x6d>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
index d63194a2c848..ada4e2973323 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
@@ -39,7 +39,7 @@ Optional Properties:
- lines-initial-states: Bitmask that specifies the initial state of each
line. When a bit is set to zero, the corresponding line will be initialized to
the input (pulled-up) state. When the bit is set to one, the line will be
- initialized the the low-level output state. If the property is not specified
+ initialized the low-level output state. If the property is not specified
all lines will be initialized to the input state.
The I/O expander can detect input state changes, and thus optionally act as
diff --git a/Documentation/devicetree/bindings/gpio/gpio-sx150x.txt b/Documentation/devicetree/bindings/gpio/gpio-sx150x.txt
new file mode 100644
index 000000000000..ba2bb84eeac3
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-sx150x.txt
@@ -0,0 +1,40 @@
+SEMTECH SX150x GPIO expander bindings
+
+
+Required properties:
+
+- compatible: should be "semtech,sx1506q",
+ "semtech,sx1508q",
+ "semtech,sx1509q".
+
+- reg: The I2C slave address for this device.
+
+- interrupt-parent: phandle of the parent interrupt controller.
+
+- interrupts: Interrupt specifier for the controllers interrupt.
+
+- #gpio-cells: Should be 2. The first cell is the GPIO number and the
+ second cell is used to specify optional parameters:
+ bit 0: polarity (0: normal, 1: inverted)
+
+- gpio-controller: Marks the device as a GPIO controller.
+
+- interrupt-controller: Marks the device as a interrupt controller.
+
+The GPIO expander can optionally be used as an interrupt controller, in
+which case it uses the default two cell specifier as described in
+Documentation/devicetree/bindings/interrupt-controller/interrupts.txt.
+
+Example:
+
+ i2c_gpio_expander@20{
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "semtech,sx1506q";
+ reg = <0x20>;
+ interrupt-parent = <&gpio_1>;
+ interrupts = <16 0>;
+
+ gpio-controller;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt b/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt
new file mode 100644
index 000000000000..dae130060537
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt
@@ -0,0 +1,32 @@
+APM X-Gene Standby GPIO controller bindings
+
+This is a gpio controller in the standby domain.
+
+There are 20 GPIO pins from 0..21. There is no GPIO_DS14 or GPIO_DS15,
+only GPIO_DS8..GPIO_DS13 support interrupts. The IRQ mapping
+is currently 1-to-1 on interrupts 0x28 thru 0x2d.
+
+Required properties:
+- compatible: "apm,xgene-gpio-sb" for the X-Gene Standby GPIO controller
+- reg: Physical base address and size of the controller's registers
+- #gpio-cells: Should be two.
+ - first cell is the pin number
+ - second cell is used to specify the gpio polarity:
+ 0 = active high
+ 1 = active low
+- gpio-controller: Marks the device node as a GPIO controller.
+- interrupts: Shall contain exactly 6 interrupts.
+
+Example:
+ sbgpio: sbgpio@17001000 {
+ compatible = "apm,xgene-gpio-sb";
+ reg = <0x0 0x17001000 0x0 0x400>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupts = <0x0 0x28 0x1>,
+ <0x0 0x29 0x1>,
+ <0x0 0x2a 0x1>,
+ <0x0 0x2b 0x1>,
+ <0x0 0x2c 0x1>,
+ <0x0 0x2d 0x1>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index b9bd1d64cfa6..f7a158d85862 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -69,7 +69,8 @@ GPIO pin number, and GPIO flags as accepted by the "qe_pio_e" gpio-controller.
----------------------------------
A gpio-specifier should contain a flag indicating the GPIO polarity; active-
-high or active-low. If it does, the follow best practices should be followed:
+high or active-low. If it does, the following best practices should be
+followed:
The gpio-specifier's polarity flag should represent the physical level at the
GPIO controller that achieves (or represents, for inputs) a logically asserted
@@ -147,7 +148,7 @@ contains information structures as follows:
numeric-gpio-range ::=
<pinctrl-phandle> <gpio-base> <pinctrl-base> <count>
named-gpio-range ::= <pinctrl-phandle> <gpio-base> '<0 0>'
- gpio-phandle : phandle to pin controller node.
+ pinctrl-phandle : phandle to pin controller node
gpio-base : Base GPIO ID in the GPIO controller
pinctrl-base : Base pinctrl pin ID in the pin controller
count : The number of GPIOs/pins in this range
diff --git a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
index b2afdb27adeb..67a2e4e414a5 100644
--- a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
@@ -3,8 +3,8 @@
Required properties:
- compatible : Should be "intel,pxa25x-gpio", "intel,pxa26x-gpio",
"intel,pxa27x-gpio", "intel,pxa3xx-gpio",
- "marvell,pxa93x-gpio", "marvell,mmp-gpio" or
- "marvell,mmp2-gpio".
+ "marvell,pxa93x-gpio", "marvell,mmp-gpio",
+ "marvell,mmp2-gpio" or marvell,pxa1928-gpio.
- reg : Address and length of the register set for the device
- interrupts : Should be the port interrupt shared by all gpio pins.
There're three gpio interrupts in arch-pxa, and they're gpio0,
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index 4c32ef0b7db8..009f4bfa1590 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -197,7 +197,9 @@ of the following host1x client modules:
- sor: serial output resource
Required properties:
- - compatible: "nvidia,tegra124-sor"
+ - compatible: For Tegra124, must contain "nvidia,tegra124-sor". Otherwise,
+ must contain '"nvidia,<chip>-sor", "nvidia,tegra124-sor"', where <chip>
+ is tegra132.
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- clocks: Must contain an entry for each entry in clock-names.
@@ -222,7 +224,9 @@ of the following host1x client modules:
- nvidia,dpaux: phandle to a DispayPort AUX interface
- dpaux: DisplayPort AUX interface
- - compatible: "nvidia,tegra124-dpaux"
+ - compatible: For Tegra124, must contain "nvidia,tegra124-dpaux". Otherwise,
+ must contain '"nvidia,<chip>-dpaux", "nvidia,tegra124-dpaux"', where
+ <chip> is tegra132.
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- clocks: Must contain an entry for each entry in clock-names.
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index c99eb34e640b..6b1d75f1a529 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -83,6 +83,22 @@ sti-hda:
- clock-names: names of the clocks listed in clocks property in the same
order.
+sti-dvo:
+ Required properties:
+ must be a child of sti-tvout
+ - compatible: "st,stih<chip>-dvo"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - reg-names: names of the mapped memory regions listed in regs property in
+ the same order.
+ - clocks: from common clock binding: handle hardware IP needed clocks, the
+ number of clocks may depend of the SoC type.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: names of the clocks listed in clocks property in the same
+ order.
+ - pinctrl-0: pin control handle
+ - pinctrl-name: names of the pin control to use
+ - sti,panel: phandle of the panel connected to the DVO output
+
sti-hqvdp:
must be a child of sti-display-subsystem
Required properties:
@@ -198,6 +214,19 @@ Example:
clock-names = "pix", "hddac";
clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
};
+
+ sti-dvo@8d00400 {
+ compatible = "st,stih407-dvo";
+ reg = <0x8d00400 0x200>;
+ reg-names = "dvo-reg";
+ clock-names = "dvo_pix", "dvo",
+ "main_parent", "aux_parent";
+ clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
+ <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dvo>;
+ sti,panel = <&panel_dvo>;
+ };
};
sti-hqvdp@9c000000 {
diff --git a/Documentation/devicetree/bindings/graph.txt b/Documentation/devicetree/bindings/graph.txt
index 1a69c078adf2..fcb1c6a4787b 100644
--- a/Documentation/devicetree/bindings/graph.txt
+++ b/Documentation/devicetree/bindings/graph.txt
@@ -19,7 +19,7 @@ type of the connections, they just map their existence. Specific properties
may be described by specialized bindings depending on the type of connection.
To see how this binding applies to video pipelines, for example, see
-Documentation/device-tree/bindings/media/video-interfaces.txt.
+Documentation/devicetree/bindings/media/video-interfaces.txt.
Here the ports describe data interfaces, and the links between them are
the connecting data buses. A single port with multiple connections can
correspond to multiple devices being connected to the same physical bus.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st.txt b/Documentation/devicetree/bindings/i2c/i2c-st.txt
index 437e0db3823c..4c26fda3844a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-st.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-st.txt
@@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
compatible = "st,comms-ssc4-i2c";
reg = <0xfed40000 0x110>;
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&CLK_S_ICN_REG_0>;
+ clocks = <&clk_s_a0_ls CLK_ICN_REG>;
clock-names = "ssc";
clock-frequency = <400000>;
pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt
index 87507e9ce6db..656716b72cc4 100644
--- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt
@@ -1,11 +1,11 @@
NVIDIA Tegra20/Tegra30/Tegra114 I2C controller driver.
Required properties:
-- compatible : should be:
- "nvidia,tegra114-i2c"
- "nvidia,tegra30-i2c"
- "nvidia,tegra20-i2c"
- "nvidia,tegra20-i2c-dvc"
+- compatible : For Tegra20, must be one of "nvidia,tegra20-i2c-dvc" or
+ "nvidia,tegra20-i2c". For Tegra30, must be "nvidia,tegra30-i2c".
+ For Tegra114, must be "nvidia,tegra114-i2c". Otherwise, must be
+ "nvidia,<chip>-i2c", plus at least one of the above, where <chip> is
+ tegra124, tegra132, or tegra210.
Details of compatible are as follows:
nvidia,tegra20-i2c-dvc: Tegra20 has specific I2C controller called as DVC I2C
controller. This only support master mode of I2C communication. Register
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 9f4e3824e71e..4dcd88d5f7ca 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -9,6 +9,7 @@ document for it just like any other devices.
Compatible Vendor / Chip
========== =============
+abracon,abb5zes3 AB-RTCMC-32.768kHz-B5ZE-S3: Real Time Clock/Calendar Module with I2C Interface
ad,ad7414 SMBus/I2C Digital Temperature Sensor in 6-Pin SOT with SMBus Alert and Over Temperature Pin
ad,adm9240 ADM9240: Complete System Hardware Monitor for uProcessor-Based Systems
adi,adt7461 +/-1C TDM Extended Temp Range I.C
@@ -34,6 +35,7 @@ atmel,24c512 i2c serial eeprom (24cxx)
atmel,24c1024 i2c serial eeprom (24cxx)
atmel,at97sc3204t i2c trusted platform module (TPM)
capella,cm32181 CM32181: Ambient Light Sensor
+capella,cm3232 CM3232: Ambient Light Sensor
catalyst,24c32 i2c serial eeprom
cirrus,cs42l51 Cirrus Logic CS42L51 audio codec
dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock
@@ -47,6 +49,7 @@ dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM
dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O
dallas,ds75 Digital Thermometer and Thermostat
dlg,da9053 DA9053: flexible system level PMIC with multicore support
+dlg,da9063 DA9063: system PMIC for quad-core application processors
epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
diff --git a/Documentation/devicetree/bindings/iio/adc/cc10001_adc.txt b/Documentation/devicetree/bindings/iio/adc/cc10001_adc.txt
new file mode 100644
index 000000000000..904f76de9055
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/cc10001_adc.txt
@@ -0,0 +1,22 @@
+* Cosmic Circuits - Analog to Digital Converter (CC-10001-ADC)
+
+Required properties:
+ - compatible: Should be "cosmic,10001-adc"
+ - reg: Should contain adc registers location and length.
+ - clock-names: Should contain "adc".
+ - clocks: Should contain a clock specifier for each entry in clock-names
+ - vref-supply: The regulator supply ADC reference voltage.
+
+Optional properties:
+ - adc-reserved-channels: Bitmask of reserved channels,
+ i.e. channels that cannot be used by the OS.
+
+Example:
+adc: adc@18101600 {
+ compatible = "cosmic,10001-adc";
+ reg = <0x18101600 0x24>;
+ adc-reserved-channels = <0x2>;
+ clocks = <&adc_clk>;
+ clock-names = "adc";
+ vref-supply = <&reg_1v8>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt
new file mode 100644
index 000000000000..0fb46137f936
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt
@@ -0,0 +1,129 @@
+Qualcomm's SPMI PMIC voltage ADC
+
+SPMI PMIC voltage ADC (VADC) provides interface to clients to read
+voltage. The VADC is a 15-bit sigma-delta ADC.
+
+VADC node:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should contain "qcom,spmi-vadc".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: VADC base address and length in the SPMI PMIC register map.
+
+- #address-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Must be one. Child node 'reg' property should define ADC
+ channel number.
+
+- #size-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Must be zero.
+
+- #io-channel-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Must be one. For details about IIO bindings see:
+ Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+- interrupts:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: End of conversion interrupt.
+
+Channel node properties:
+
+- reg:
+ Usage: required
+ Value type: <u32>
+ Definition: ADC channel number.
+ See include/dt-bindings/iio/qcom,spmi-vadc.h
+
+- qcom,decimation:
+ Usage: optional
+ Value type: <u32>
+ Definition: This parameter is used to decrease ADC sampling rate.
+ Quicker measurements can be made by reducing decimation ratio.
+ Valid values are 512, 1024, 2048, 4096.
+ If property is not found, default value of 512 will be used.
+
+- qcom,pre-scaling:
+ Usage: optional
+ Value type: <u32 array>
+ Definition: Used for scaling the channel input signal before the signal is
+ fed to VADC. The configuration for this node is to know the
+ pre-determined ratio and use it for post scaling. Select one from
+ the following options.
+ <1 1>, <1 3>, <1 4>, <1 6>, <1 20>, <1 8>, <10 81>, <1 10>
+ If property is not found default value depending on chip will be used.
+
+- qcom,ratiometric:
+ Usage: optional
+ Value type: <empty>
+ Definition: Channel calibration type. If this property is specified
+ VADC will use the VDD reference (1.8V) and GND for channel
+ calibration. If property is not found, channel will be
+ calibrated with 0.625V and 1.25V reference channels, also
+ known as absolute calibration.
+
+- qcom,hw-settle-time:
+ Usage: optional
+ Value type: <u32>
+ Definition: Time between AMUX getting configured and the ADC starting
+ conversion. Delay = 100us * (value) for value < 11, and
+ 2ms * (value - 10) otherwise.
+ Valid values are: 0, 100, 200, 300, 400, 500, 600, 700, 800,
+ 900 us and 1, 2, 4, 6, 8, 10 ms
+ If property is not found, channel will use 0us.
+
+- qcom,avg-samples:
+ Usage: optional
+ Value type: <u32>
+ Definition: Number of samples to be used for measurement.
+ Averaging provides the option to obtain a single measurement
+ from the ADC that is an average of multiple samples. The value
+ selected is 2^(value).
+ Valid values are: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512
+ If property is not found, 1 sample will be used.
+
+NOTE:
+
+Following channels, also known as reference point channels, are used for
+result calibration and their channel configuration nodes should be defined:
+VADC_REF_625MV and/or VADC_SPARE1(based on PMIC version) VADC_REF_1250MV,
+VADC_GND_REF and VADC_VDD_VADC.
+
+Example:
+
+ /* VADC node */
+ pmic_vadc: vadc@3100 {
+ compatible = "qcom,spmi-vadc";
+ reg = <0x3100 0x100>;
+ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+
+ /* Channel node */
+ usb_id_nopull {
+ reg = <VADC_LR_MUX10_USB_ID>;
+ qcom,decimation = <512>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,avg-samples = <1>;
+ qcom,pre-scaling = <1 3>;
+ };
+ };
+
+ /* IIO client node */
+ usb {
+ io-channels = <&pmic_vadc VADC_LR_MUX10_USB_ID>;
+ io-channel-names = "vadc";
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
index d9ee909d2b78..d71258e2d456 100644
--- a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
@@ -59,7 +59,7 @@ Optional properties:
Each child node represents one channel and has the following
properties:
Required properties:
- * reg: Pair of pins the the channel is connected to.
+ * reg: Pair of pins the channel is connected to.
0: VP/VN
1: VAUXP[0]/VAUXN[0]
2: VAUXP[1]/VAUXN[1]
diff --git a/Documentation/devicetree/bindings/iio/sensorhub.txt b/Documentation/devicetree/bindings/iio/sensorhub.txt
new file mode 100644
index 000000000000..8d57571d5c0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/sensorhub.txt
@@ -0,0 +1,25 @@
+Samsung Sensorhub driver
+
+Sensorhub is a MCU which manages several sensors and also plays the role
+of a virtual sensor device.
+
+Required properties:
+- compatible: "samsung,sensorhub-rinato" or "samsung,sensorhub-thermostat"
+- spi-max-frequency: max SPI clock frequency
+- interrupt-parent: interrupt parent
+- interrupts: communication interrupt
+- ap-mcu-gpios: [out] ap to sensorhub line - used during communication
+- mcu-ap-gpios: [in] sensorhub to ap - used during communication
+- mcu-reset-gpios: [out] sensorhub reset
+
+Example:
+
+ shub_spi: shub {
+ compatible = "samsung,sensorhub-rinato";
+ spi-max-frequency = <5000000>;
+ interrupt-parent = <&gpx0>;
+ interrupts = <2 0>;
+ ap-mcu-gpios = <&gpx0 0 0>;
+ mcu-ap-gpios = <&gpx0 4 0>;
+ mcu-reset-gpios = <&gpx0 5 0>;
+ };
diff --git a/Documentation/devicetree/bindings/input/e3x0-button.txt b/Documentation/devicetree/bindings/input/e3x0-button.txt
new file mode 100644
index 000000000000..751665e8e47a
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/e3x0-button.txt
@@ -0,0 +1,25 @@
+National Instruments Ettus Research USRP E3x0 button driver
+
+This module is part of the NI Ettus Research USRP E3x0 SDR.
+
+This module provides a simple power button event via two interrupts.
+
+Required properties:
+- compatible: should be one of the following
+ - "ettus,e3x0-button": For devices such as the NI Ettus Research USRP E3x0
+- interrupt-parent:
+ - a phandle to the interrupt controller that it is attached to.
+- interrupts: should be one of the following
+ - <0 30 1>, <0 31 1>: For devices such as the NI Ettus Research USRP E3x0
+- interrupt-names: should be one of the following
+ - "press", "release": For devices such as the NI Ettus Research USRP E3x0
+
+Note: Interrupt numbers might vary depending on the FPGA configuration.
+
+Example:
+ button {
+ compatible = "ettus,e3x0-button";
+ interrupt-parent = <&intc>;
+ interrupts = <0 30 1>, <0 31 1>;
+ interrupt-names = "press", "release";
+ }
diff --git a/Documentation/devicetree/bindings/input/regulator-haptic.txt b/Documentation/devicetree/bindings/input/regulator-haptic.txt
new file mode 100644
index 000000000000..3ed1c7eb2f97
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/regulator-haptic.txt
@@ -0,0 +1,21 @@
+* Regulator Haptic Device Tree Bindings
+
+Required Properties:
+ - compatible : Should be "regulator-haptic"
+ - haptic-supply : Power supply to the haptic motor.
+ [*] refer Documentation/devicetree/bindings/regulator/regulator.txt
+
+ - max-microvolt : The maximum voltage value supplied to the haptic motor.
+ [The unit of the voltage is a micro]
+
+ - min-microvolt : The minimum voltage value supplied to the haptic motor.
+ [The unit of the voltage is a micro]
+
+Example:
+
+ haptics {
+ compatible = "regulator-haptic";
+ haptic-supply = <&motor_regulator>;
+ max-microvolt = <2700000>;
+ min-microvolt = <1100000>;
+ };
diff --git a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
new file mode 100644
index 000000000000..b9c32f6fd687
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
@@ -0,0 +1,62 @@
+Allwinner sun4i low res adc attached tablet keys
+------------------------------------------------
+
+Required properties:
+ - compatible: "allwinner,sun4i-a10-lradc-keys"
+ - reg: mmio address range of the chip
+ - interrupts: interrupt to which the chip is connected
+ - vref-supply: powersupply for the lradc reference voltage
+
+Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
+
+Required subnode-properties:
+ - label: Descriptive name of the key.
+ - linux,code: Keycode to emit.
+ - channel: Channel this key is attached to, mut be 0 or 1.
+ - voltage: Voltage in µV at lradc input when this key is pressed.
+
+Example:
+
+#include <dt-bindings/input/input.h>
+
+ lradc: lradc@01c22800 {
+ compatible = "allwinner,sun4i-a10-lradc-keys";
+ reg = <0x01c22800 0x100>;
+ interrupts = <31>;
+ vref-supply = <&reg_vcc3v0>;
+
+ button@191 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ channel = <0>;
+ voltage = <191274>;
+ };
+
+ button@392 {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ channel = <0>;
+ voltage = <392644>;
+ };
+
+ button@601 {
+ label = "Menu";
+ linux,code = <KEY_MENU>;
+ channel = <0>;
+ voltage = <601151>;
+ };
+
+ button@795 {
+ label = "Enter";
+ linux,code = <KEY_ENTER>;
+ channel = <0>;
+ voltage = <795090>;
+ };
+
+ button@987 {
+ label = "Home";
+ linux,code = <KEY_HOMEPAGE>;
+ channel = <0>;
+ voltage = <987387>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sun4i.txt b/Documentation/devicetree/bindings/input/touchscreen/sun4i.txt
index aef57791f40b..433332d3b2ba 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sun4i.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/sun4i.txt
@@ -2,9 +2,10 @@ sun4i resistive touchscreen controller
--------------------------------------
Required properties:
- - compatible: "allwinner,sun4i-a10-ts"
+ - compatible: "allwinner,sun4i-a10-ts" or "allwinner,sun6i-a31-ts"
- reg: mmio address range of the chip
- interrupts: interrupt to which the chip is connected
+ - #thermal-sensor-cells: shall be 0
Optional properties:
- allwinner,ts-attached: boolean indicating that an actual touchscreen is
@@ -17,4 +18,5 @@ Example:
reg = <0x01c25000 0x100>;
interrupts = <29>;
allwinner,ts-attached;
+ #thermal-sensor-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt b/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
index 878549ba814d..6c4fb34823d3 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
@@ -28,6 +28,20 @@ Required properties:
ti,adc-channels: List of analog inputs available for ADC.
AIN0 = 0, AIN1 = 1 and so on till AIN7 = 7.
+Optional properties:
+- child "tsc"
+ ti,charge-delay: Length of touch screen charge delay step in terms of
+ ADC clock cycles. Charge delay value should be large
+ in order to avoid false pen-up events. This value
+ effects the overall sampling speed, hence need to be
+ kept as low as possible, while avoiding false pen-up
+ event. Start from a lower value, say 0x400, and
+ increase value until false pen-up events are avoided.
+ The pen-up detection happens immediately after the
+ charge step, so this does in fact function as a
+ hardware knob for adjusting the amount of "settling
+ time".
+
Example:
tscadc: tscadc@44e0d000 {
compatible = "ti,am3359-tscadc";
@@ -36,6 +50,7 @@ Example:
ti,x-plate-resistance = <200>;
ti,coordiante-readouts = <5>;
ti,wire-config = <0x00 0x11 0x22 0x33>;
+ ti,charge-delay = <0x400>;
};
adc {
diff --git a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
new file mode 100644
index 000000000000..e30e0b93f2b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
@@ -0,0 +1,17 @@
+Texas Instruments TPS65218 power button
+
+This driver provides a simple power button event via an Interrupt.
+
+Required properties:
+- compatible: should be "ti,tps65218-pwrbutton"
+- interrupts: should be one of the following
+ - <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218
+
+Example:
+
+&tps {
+ power-button {
+ compatible = "ti,tps65218-pwrbutton";
+ interrupts = <3 IRQ_TYPE_EDGE_BOTH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/digicolor-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/digicolor-ic.txt
new file mode 100644
index 000000000000..42d41ec84c7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/digicolor-ic.txt
@@ -0,0 +1,21 @@
+Conexant Digicolor Interrupt Controller
+
+Required properties:
+
+- compatible : should be "cnxt,cx92755-ic"
+- reg : Specifies base physical address and size of the interrupt controller
+ registers (IC) area
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The value shall be 1.
+- syscon: A phandle to the syscon node describing UC registers
+
+Example:
+
+ intc: interrupt-controller@f0000040 {
+ compatible = "cnxt,cx92755-ic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0xf0000040 0x40>;
+ syscon = <&uc_regs>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt
index c73acd060093..4f7946ae8adc 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt
@@ -9,6 +9,11 @@ Required properties:
- "renesas,intc-irqpin-r8a7778" (R-Car M1A)
- "renesas,intc-irqpin-r8a7779" (R-Car H1)
- "renesas,intc-irqpin-sh73a0" (SH-Mobile AG5)
+
+- reg: Base address and length of each register bank used by the external
+ IRQ pins driven by the interrupt controller hardware module. The base
+ addresses, length and number of required register banks varies with soctype.
+
- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
interrupts.txt in this directory
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.txt
new file mode 100644
index 000000000000..38ce5d037722
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,omap-intc-irq.txt
@@ -0,0 +1,28 @@
+Omap2/3 intc controller
+
+On TI omap2 and 3 the intc interrupt controller can provide
+96 or 128 IRQ signals to the ARM host depending on the SoC.
+
+Required Properties:
+- compatible: should be one of
+ "ti,omap2-intc"
+ "ti,omap3-intc"
+ "ti,dm814-intc"
+ "ti,dm816-intc"
+ "ti,am33xx-intc"
+
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode interrupt
+ source, should be 1 for intc
+- interrupts: interrupt reference to primary interrupt controller
+
+Please refer to interrupts.txt in this directory for details of the common
+Interrupt Controllers bindings used by client devices.
+
+Example:
+ intc: interrupt-controller@48200000 {
+ compatible = "ti,omap3-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x48200000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
new file mode 100644
index 000000000000..cd29083e16ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
@@ -0,0 +1,41 @@
+* Renesas VMSA-Compatible IOMMU
+
+The IPMMU is an IOMMU implementation compatible with the ARM VMSA page tables.
+It provides address translation for bus masters outside of the CPU, each
+connected to the IPMMU through a port called micro-TLB.
+
+
+Required Properties:
+
+ - compatible: Must contain "renesas,ipmmu-vmsa".
+ - reg: Base address and size of the IPMMU registers.
+ - interrupts: Specifiers for the MMU fault interrupts. For instances that
+ support secure mode two interrupts must be specified, for non-secure and
+ secure mode, in that order. For instances that don't support secure mode a
+ single interrupt must be specified.
+
+ - #iommu-cells: Must be 1.
+
+Each bus master connected to an IPMMU must reference the IPMMU in its device
+node with the following property:
+
+ - iommus: A reference to the IPMMU in two cells. The first cell is a phandle
+ to the IPMMU and the second cell the number of the micro-TLB that the
+ device is connected to.
+
+
+Example: R8A7791 IPMMU-MX and VSP1-D0 bus master
+
+ ipmmu_mx: mmu@fe951000 {
+ compatible = "renasas,ipmmu-vmsa";
+ reg = <0 0xfe951000 0 0x1000>;
+ interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ };
+
+ vsp1@fe928000 {
+ ...
+ iommus = <&ipmmu_mx 13>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 2d88816dd550..34811c57db69 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -1,6 +1,19 @@
Common leds properties.
+LED and flash LED devices provide the same basic functionality as current
+regulators, but extended with LED and flash LED specific features like
+blinking patterns, flash timeout, flash faults and external flash strobe mode.
+
+Many LED devices expose more than one current output that can be connected
+to one or more discrete LED component. Since the arrangement of connections
+can influence the way of the LED device initialization, the LED components
+have to be tightly coupled with the LED device binding. They are represented
+by child nodes of the parent LED device binding.
+
Optional properties for child nodes:
+- led-sources : List of device current outputs the LED is connected to. The
+ outputs are identified by the numbers that must be defined
+ in the LED device binding documentation.
- label : The label for this LED. If omitted, the label is
taken from the node name (excluding the unit address).
@@ -14,6 +27,15 @@ Optional properties for child nodes:
"ide-disk" - LED indicates disk activity
"timer" - LED flashes at a fixed, configurable rate
+- max-microamp : maximum intensity in microamperes of the LED
+ (torch LED for flash devices)
+- flash-max-microamp : maximum intensity in microamperes of the
+ flash LED; it is mandatory if the LED should
+ support the flash mode
+- flash-timeout-us : timeout in microseconds after which the flash
+ LED is turned off
+
+
Examples:
system-status {
@@ -21,3 +43,11 @@ system-status {
linux,default-trigger = "heartbeat";
...
};
+
+camera-flash {
+ label = "Flash";
+ led-sources = <0>, <1>;
+ max-microamp = <50000>;
+ flash-max-microamp = <320000>;
+ flash-timeout-us = <500000>;
+};
diff --git a/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt b/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
new file mode 100644
index 000000000000..c2619797ce0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
@@ -0,0 +1,49 @@
+Altera Mailbox Driver
+=====================
+
+Required properties:
+- compatible : "altr,mailbox-1.0".
+- reg : physical base address of the mailbox and length of
+ memory mapped region.
+- #mbox-cells: Common mailbox binding property to identify the number
+ of cells required for the mailbox specifier. Should be 1.
+
+Optional properties:
+- interrupt-parent : interrupt source phandle.
+- interrupts : interrupt number. The interrupt specifier format
+ depends on the interrupt controller parent.
+
+Example:
+ mbox_tx: mailbox@0x100 {
+ compatible = "altr,mailbox-1.0";
+ reg = <0x100 0x8>;
+ interrupt-parent = < &gic_0 >;
+ interrupts = <5>;
+ #mbox-cells = <1>;
+ };
+
+ mbox_rx: mailbox@0x200 {
+ compatible = "altr,mailbox-1.0";
+ reg = <0x200 0x8>;
+ interrupt-parent = < &gic_0 >;
+ interrupts = <6>;
+ #mbox-cells = <1>;
+ };
+
+Mailbox client
+===============
+"mboxes" and the optional "mbox-names" (please see
+Documentation/devicetree/bindings/mailbox/mailbox.txt for details). Each value
+of the mboxes property should contain a phandle to the mailbox controller
+device node and second argument is the channel index. It must be 0 (hardware
+support only one channel).The equivalent "mbox-names" property value can be
+used to give a name to the communication channel to be used by the client user.
+
+Example:
+ mclient0: mclient0@0x400 {
+ compatible = "client-1.0";
+ reg = <0x400 0x10>;
+ mbox-names = "mbox-tx", "mbox-rx";
+ mboxes = <&mbox_tx 0>,
+ <&mbox_rx 0>;
+ };
diff --git a/Documentation/devicetree/bindings/media/atmel-isi.txt b/Documentation/devicetree/bindings/media/atmel-isi.txt
index 17e71b7b44c6..251f008f220c 100644
--- a/Documentation/devicetree/bindings/media/atmel-isi.txt
+++ b/Documentation/devicetree/bindings/media/atmel-isi.txt
@@ -38,7 +38,7 @@ Example:
i2c1: i2c@f0018000 {
ov2640: camera@0x30 {
- compatible = "omnivision,ov2640";
+ compatible = "ovti,ov2640";
reg = <0x30>;
port {
diff --git a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
new file mode 100644
index 000000000000..855e1faf73e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
@@ -0,0 +1,63 @@
+SMIA/SMIA++ sensor
+
+SMIA (Standard Mobile Imaging Architecture) is an image sensor standard
+defined jointly by Nokia and ST. SMIA++, defined by Nokia, is an extension
+of that. These definitions are valid for both types of sensors.
+
+More detailed documentation can be found in
+Documentation/devicetree/bindings/media/video-interfaces.txt .
+
+
+Mandatory properties
+--------------------
+
+- compatible: "nokia,smia"
+- reg: I2C address (0x10, or an alternative address)
+- vana-supply: Analogue voltage supply (VANA), typically 2,8 volts (sensor
+ dependent).
+- clocks: External clock to the sensor
+- clock-frequency: Frequency of the external clock to the sensor
+- link-frequencies: List of allowed data link frequencies. An array of
+ 64-bit elements.
+
+
+Optional properties
+-------------------
+
+- nokia,nvm-size: The size of the NVM, in bytes. If the size is not given,
+ the NVM contents will not be read.
+- reset-gpios: XSHUTDOWN GPIO
+
+
+Endpoint node mandatory properties
+----------------------------------
+
+- clock-lanes: <0>
+- data-lanes: <1..n>
+- remote-endpoint: A phandle to the bus receiver's endpoint node.
+
+
+Example
+-------
+
+&i2c2 {
+ clock-frequency = <400000>;
+
+ smiapp_1: camera@10 {
+ compatible = "nokia,smia";
+ reg = <0x10>;
+ reset-gpios = <&gpio3 20 0>;
+ vana-supply = <&vaux3>;
+ clocks = <&omap3_isp 0>;
+ clock-frequency = <9600000>;
+ nokia,nvm-size = <512>; /* 8 * 64 */
+ link-frequencies = /bits/ 64 <199200000 210000000 499200000>;
+ port {
+ smiapp_1_1: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ remote-endpoint = <&csi2a_ep>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/media/sunxi-ir.txt b/Documentation/devicetree/bindings/media/sunxi-ir.txt
index 23dd5ad07b7c..1811a067c72c 100644
--- a/Documentation/devicetree/bindings/media/sunxi-ir.txt
+++ b/Documentation/devicetree/bindings/media/sunxi-ir.txt
@@ -1,7 +1,7 @@
Device-Tree bindings for SUNXI IR controller found in sunXi SoC family
Required properties:
-- compatible : should be "allwinner,sun4i-a10-ir";
+- compatible : "allwinner,sun4i-a10-ir" or "allwinner,sun5i-a13-ir"
- clocks : list of clock specifiers, corresponding to
entries in clock-names property;
- clock-names : should contain "apb" and "ir" entries;
@@ -10,6 +10,7 @@ Required properties:
Optional properties:
- linux,rc-map-name : Remote control map name.
+- resets : phandle + reset specifier pair
Example:
@@ -17,6 +18,7 @@ ir0: ir@01c21800 {
compatible = "allwinner,sun4i-a10-ir";
clocks = <&apb0_gates 6>, <&ir0_clk>;
clock-names = "apb", "ir";
+ resets = <&apb0_rst 1>;
interrupts = <0 5 1>;
reg = <0x01C21800 0x40>;
linux,rc-map-name = "rc-rc6-mce";
diff --git a/Documentation/devicetree/bindings/media/ti-am437x-vpfe.txt b/Documentation/devicetree/bindings/media/ti-am437x-vpfe.txt
new file mode 100644
index 000000000000..3932e766553a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/ti-am437x-vpfe.txt
@@ -0,0 +1,61 @@
+Texas Instruments AM437x CAMERA (VPFE)
+--------------------------------------
+
+The Video Processing Front End (VPFE) is a key component for image capture
+applications. The capture module provides the system interface and the
+processing capability to connect RAW image-sensor modules and video decoders
+to the AM437x device.
+
+Required properties:
+- compatible: must be "ti,am437x-vpfe"
+- reg: physical base address and length of the registers set for the device;
+- interrupts: should contain IRQ line for the VPFE;
+- ti,am437x-vpfe-interface: can be one of the following,
+ 0 - Raw Bayer Interface.
+ 1 - 8 Bit BT656 Interface.
+ 2 - 10 Bit BT656 Interface.
+ 3 - YCbCr 8 Bit Interface.
+ 4 - YCbCr 16 Bit Interface.
+
+VPFE supports a single port node with parallel bus. It should contain one
+'port' child node with child 'endpoint' node. Please refer to the bindings
+defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+ vpfe: vpfe@f0034000 {
+ compatible = "ti,am437x-vpfe";
+ reg = <0x48328000 0x2000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&vpfe_pins_default>;
+ pinctrl-1 = <&vpfe_pins_sleep>;
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vpfe0_ep: endpoint {
+ remote-endpoint = <&ov2659_1>;
+ ti,am437x-vpfe-interface = <0>;
+ bus-width = <8>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ };
+ };
+ };
+
+ i2c1: i2c@4802a000 {
+
+ ov2659@30 {
+ compatible = "ti,ov2659";
+ reg = <0x30>;
+
+ port {
+ ov2659_1: endpoint {
+ remote-endpoint = <&vpfe0_ep>;
+ bus-width = <8>;
+ mclk-frequency = <12000000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index ce719f89dd1c..571b4c60665f 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -103,6 +103,9 @@ Optional endpoint properties
array contains only one entry.
- clock-noncontinuous: a boolean property to allow MIPI CSI-2 non-continuous
clock mode.
+- link-frequencies: Allowed data bus frequencies. For MIPI CSI-2, for
+ instance, this is the actual frequency of the bus, not bits per clock per
+ lane value. An array of 64-bit unsigned integers.
Example
@@ -159,7 +162,7 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
i2c0: i2c@0xfff20000 {
...
ov772x_1: camera@0x21 {
- compatible = "omnivision,ov772x";
+ compatible = "ovti,ov772x";
reg = <0x21>;
vddio-supply = <&regulator1>;
vddcore-supply = <&regulator2>;
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 75fdfaf41831..e39f0bc1f55e 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -39,6 +39,12 @@ to get matched with their hardware counterparts as follow:
-BUCKn : 1-4.
Use standard regulator bindings for it ('regulator-off-in-suspend').
+ LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
+ control. To turn this feature on this property must be added to the regulator
+ sub-node:
+ - maxim,ena-gpios : one GPIO specifier enable control (the gpio
+ flags are actually ignored and always
+ ACTIVE_HIGH is used)
Example:
@@ -65,4 +71,12 @@ Example:
regulator-always-on;
regulator-boot-on;
};
+
+ buck9_reg {
+ regulator-compatible = "BUCK9";
+ regulator-name = "CAM_ISP_CORE_1.2V";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1200000>;
+ maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
+ };
}
diff --git a/Documentation/devicetree/bindings/mfd/max77693.txt b/Documentation/devicetree/bindings/mfd/max77693.txt
index 01e9f30fe678..38e64405e98d 100644
--- a/Documentation/devicetree/bindings/mfd/max77693.txt
+++ b/Documentation/devicetree/bindings/mfd/max77693.txt
@@ -41,6 +41,41 @@ Optional properties:
To get more informations, please refer to documentaion.
[*] refer Documentation/devicetree/bindings/pwm/pwm.txt
+- charger : Node configuring the charger driver.
+ If present, required properties:
+ - compatible : Must be "maxim,max77693-charger".
+
+ Optional properties (if not set, defaults will be used):
+ - maxim,constant-microvolt : Battery constant voltage in uV. The charger
+ will operate in fast charge constant current mode till battery voltage
+ reaches this level. Then the charger will switch to fast charge constant
+ voltage mode. Also vsys (system voltage) will be set to this value when
+ DC power is supplied but charger is not enabled.
+ Valid values: 3650000 - 4400000, step by 25000 (rounded down)
+ Default: 4200000
+
+ - maxim,min-system-microvolt : Minimal system voltage in uV.
+ Valid values: 3000000 - 3700000, step by 100000 (rounded down)
+ Default: 3600000
+
+ - maxim,thermal-regulation-celsius : Temperature in Celsius for entering
+ high temperature charging mode. If die temperature exceeds this value
+ the charging current will be reduced by 105 mA/Celsius.
+ Valid values: 70, 85, 100, 115
+ Default: 100
+
+ - maxim,battery-overcurrent-microamp : Overcurrent protection threshold
+ in uA (current from battery to system).
+ Valid values: 2000000 - 3500000, step by 250000 (rounded down)
+ Default: 3500000
+
+ - maxim,charge-input-threshold-microvolt : Threshold voltage in uV for
+ triggering input voltage regulation loop. If input voltage decreases
+ below this value, the input current will be reduced to reach the
+ threshold voltage.
+ Valid values: 4300000, 4700000, 4800000, 4900000
+ Default: 4300000
+
Example:
max77693@66 {
compatible = "maxim,max77693";
@@ -73,4 +108,14 @@ Example:
pwms = <&pwm 0 40000 0>;
pwm-names = "haptic";
};
+
+ charger {
+ compatible = "maxim,max77693-charger";
+
+ maxim,constant-microvolt = <4200000>;
+ maxim,min-system-microvolt = <3600000>;
+ maxim,thermal-regulation-celsius = <75>;
+ maxim,battery-overcurrent-microamp = <3000000>;
+ maxim,charge-input-threshold-microvolt = <4300000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt b/Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt
index b97b8bef1fe5..47b205cc9cc7 100644
--- a/Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt
+++ b/Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt
@@ -1,11 +1,10 @@
NVIDIA Tegra20/Tegra30/Tegr114/Tegra124 apbmisc block
Required properties:
-- compatible : should be:
- "nvidia,tegra20-apbmisc"
- "nvidia,tegra30-apbmisc"
- "nvidia,tegra114-apbmisc"
- "nvidia,tegra124-apbmisc"
+- compatible : For Tegra20, must be "nvidia,tegra20-apbmisc". For Tegra30,
+ must be "nvidia,tegra30-apbmisc". Otherwise, must contain
+ "nvidia,<chip>-apbmisc", plus one of the above, where <chip> is tegra114,
+ tegra124, tegra132.
- reg: Should contain 2 entries: the first entry gives the physical address
and length of the registers which contain revision and debug features.
The second entry gives the physical address and length of the
diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-emmc.txt b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-emmc.txt
new file mode 100644
index 000000000000..0cb827bf9435
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-emmc.txt
@@ -0,0 +1,25 @@
+* The simple eMMC hardware reset provider
+
+The purpose of this driver is to perform standard eMMC hw reset
+procedure, as descibed by Jedec 4.4 specification. This procedure is
+performed just after MMC core enabled power to the given mmc host (to
+fix possible issues if bootloader has left eMMC card in initialized or
+unknown state), and before performing complete system reboot (also in
+case of emergency reboot call). The latter is needed on boards, which
+doesn't have hardware reset logic connected to emmc card and (limited or
+broken) ROM bootloaders are unable to read second stage from the emmc
+card if the card is left in unknown or already initialized state.
+
+Required properties:
+- compatible : contains "mmc-pwrseq-emmc".
+- reset-gpios : contains a GPIO specifier. The reset GPIO is asserted
+ and then deasserted to perform eMMC card reset. To perform
+ reset procedure as described in Jedec 4.4 specification, the
+ gpio line should be defined as GPIO_ACTIVE_LOW.
+
+Example:
+
+ sdhci0_pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
+ }
diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
new file mode 100644
index 000000000000..a462c50f19a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
@@ -0,0 +1,25 @@
+* The simple MMC power sequence provider
+
+The purpose of the simple MMC power sequence provider is to supports a set of
+common properties between various SOC designs. It thus enables us to use the
+same provider for several SOC designs.
+
+Required properties:
+- compatible : contains "mmc-pwrseq-simple".
+
+Optional properties:
+- reset-gpios : contains a list of GPIO specifiers. The reset GPIOs are asserted
+ at initialization and prior we start the power up procedure of the card.
+ They will be de-asserted right after the power has been provided to the
+ card.
+- clocks : Must contain an entry for the entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entry:
+ "ext_clock" (External clock provided to the card).
+
+Example:
+
+ sdhci0_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio1 12 0>;
+ }
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index b52628b18a53..438899e8829b 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -64,7 +64,43 @@ Optional SDIO properties:
- keep-power-in-suspend: Preserves card power during a suspend/resume cycle
- enable-sdio-wakeup: Enables wake up of host system on SDIO IRQ assertion
-Example:
+
+MMC power sequences:
+--------------------
+
+System on chip designs may specify a specific MMC power sequence. To
+successfully detect an (e)MMC/SD/SDIO card, that power sequence must be
+maintained while initializing the card.
+
+Optional property:
+- mmc-pwrseq: phandle to the MMC power sequence node. See "mmc-pwrseq-*"
+ for documentation of MMC power sequence bindings.
+
+
+Use of Function subnodes
+------------------------
+
+On embedded systems the cards connected to a host may need additional
+properties. These can be specified in subnodes to the host controller node.
+The subnodes are identified by the standard 'reg' property.
+Which information exactly can be specified depends on the bindings for the
+SDIO function driver for the subnode, as specified by the compatible string.
+
+Required host node properties when using function subnodes:
+- #address-cells: should be one. The cell is the slot id.
+- #size-cells: should be zero.
+
+Required function subnode properties:
+- compatible: name of SDIO function following generic names recommended practice
+- reg: Must contain the SDIO function number of the function this subnode
+ describes. A value of 0 denotes the memory SD function, values from
+ 1 to 7 denote the SDIO functions.
+
+
+Examples
+--------
+
+Basic example:
sdhci@ab000000 {
compatible = "sdhci";
@@ -77,4 +113,28 @@ sdhci@ab000000 {
max-frequency = <50000000>;
keep-power-in-suspend;
enable-sdio-wakeup;
+ mmc-pwrseq = <&sdhci0_pwrseq>
}
+
+Example with sdio function subnode:
+
+mmc3: mmc@01c12000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins_a>;
+ vmmc-supply = <&reg_vmmc3>;
+ bus-width = <4>;
+ non-removable;
+ mmc-pwrseq = <&sdhci0_pwrseq>
+ status = "okay";
+
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm43xx-fmac";
+ interrupt-parent = <&pio>;
+ interrupts = <10 8>; /* PH10 / EINT10 */
+ interrupt-names = "host-wake";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
index f357c16ea815..15b8368ee1f2 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
@@ -7,7 +7,11 @@ This file documents differences between the core properties described
by mmc.txt and the properties used by the sdhci-tegra driver.
Required properties:
-- compatible : Should be "nvidia,<chip>-sdhci"
+- compatible : For Tegra20, must contain "nvidia,tegra20-sdhci".
+ For Tegra30, must contain "nvidia,tegra30-sdhci". For Tegra114,
+ must contain "nvidia,tegra114-sdhci". For Tegra124, must contain
+ "nvidia,tegra124-sdhci". Otherwise, must contain "nvidia,<chip>-sdhci",
+ plus one of the above, where <chip> is tegra132 or tegra210.
- clocks : Must contain one entry, for the module clock.
See ../clocks/clock-bindings.txt for details.
- resets : Must contain an entry for each entry in reset-names.
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt b/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt
new file mode 100644
index 000000000000..de2c53cff4f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt
@@ -0,0 +1,30 @@
+* Fujitsu SDHCI controller
+
+This file documents differences between the core properties in mmc.txt
+and the properties used by the sdhci_f_sdh30 driver.
+
+Required properties:
+- compatible: "fujitsu,mb86s70-sdhci-3.0"
+- clocks: Must contain an entry for each entry in clock-names. It is a
+ list of phandles and clock-specifier pairs.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Should contain the following two entries:
+ "iface" - clock used for sdhci interface
+ "core" - core clock for sdhci controller
+
+Optional properties:
+- vqmmc-supply: phandle to the regulator device tree node, mentioned
+ as the VCCQ/VDD_IO supply in the eMMC/SD specs.
+
+Example:
+
+ sdhci1: mmc@36600000 {
+ compatible = "fujitsu,mb86s70-sdhci-3.0";
+ reg = <0 0x36600000 0x1000>;
+ interrupts = <0 172 0x4>,
+ <0 173 0x4>;
+ bus-width = <4>;
+ vqmmc-supply = <&vccq_sdhci1>;
+ clocks = <&clock 2 2 0>, <&clock 2 3 0>;
+ clock-names = "iface", "core";
+ };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt b/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt
index 4dd6deb90719..3d1b449d6097 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt
@@ -9,9 +9,13 @@ Required properties:
- reg:
* for "mrvl,pxav2-mmc" and "mrvl,pxav3-mmc", one register area for
the SDHCI registers.
- * for "marvell,armada-380-sdhci", two register areas. The first one
- for the SDHCI registers themselves, and the second one for the
- AXI/Mbus bridge registers of the SDHCI unit.
+
+ * for "marvell,armada-380-sdhci", three register areas. The first
+ one for the SDHCI registers themselves, the second one for the
+ AXI/Mbus bridge registers of the SDHCI unit, the third one for the
+ SDIO3 Configuration register
+- reg names: should be "sdhci", "mbus", "conf-sdio3". only mandatory
+ for "marvell,armada-380-sdhci"
- clocks: Array of clocks required for SDHCI; requires at least one for
I/O clock.
- clock-names: Array of names corresponding to clocks property; shall be
@@ -35,7 +39,10 @@ sdhci@d4280800 {
sdhci@d8000 {
compatible = "marvell,armada-380-sdhci";
- reg = <0xd8000 0x1000>, <0xdc000 0x100>;
+ reg-names = "sdhci", "mbus", "conf-sdio3";
+ reg = <0xd8000 0x1000>,
+ <0xdc000 0x100>;
+ <0x18454 0x4>;
interrupts = <0 25 0x4>;
clocks = <&gateclk 17>;
clock-names = "io";
diff --git a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
index ec42935f3908..5235cbc551b0 100644
--- a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
@@ -9,7 +9,7 @@ Required properties:
Optional properties:
- bank-width : Width (in bytes) of the device. If not present, the width
defaults to 1 byte
-- nand-skip-bbtscan: Indicates the the BBT scanning should be skipped
+- nand-skip-bbtscan: Indicates the BBT scanning should be skipped
- timings: array of 6 bytes for NAND timings. The meanings of these bytes
are:
byte 0 TCLR : CLE to RE delay in number of AHB clock cycles, only 4 bits
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
index 42409bfe04c4..33df3932168e 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
@@ -7,17 +7,38 @@ Required properties:
- SerDes Rx/Tx registers
- SerDes integration registers (1/2)
- SerDes integration registers (2/2)
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupts: Should contain the amd-xgbe-phy interrupt.
Optional properties:
- amd,speed-set: Speed capabilities of the device
0 - 1GbE and 10GbE (default)
1 - 2.5GbE and 10GbE
+The following optional properties are represented by an array with each
+value corresponding to a particular speed. The first array value represents
+the setting for the 1GbE speed, the second value for the 2.5GbE speed and
+the third value for the 10GbE speed. All three values are required if the
+property is used.
+- amd,serdes-blwc: Baseline wandering correction enablement
+ 0 - Off
+ 1 - On
+- amd,serdes-cdr-rate: CDR rate speed selection
+- amd,serdes-pq-skew: PQ (data sampling) skew
+- amd,serdes-tx-amp: TX amplitude boost
+
Example:
xgbe_phy@e1240800 {
compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
reg = <0 0xe1240800 0 0x00400>,
<0 0xe1250000 0 0x00060>,
<0 0xe1250080 0 0x00004>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 323 4>;
amd,speed-set = <0>;
+ amd,serdes-blwc = <1>, <1>, <0>;
+ amd,serdes-cdr-rate = <2>, <2>, <7>;
+ amd,serdes-pq-skew = <10>, <10>, <30>;
+ amd,serdes-tx-amp = <15>, <15>, <10>;
};
diff --git a/Documentation/devicetree/bindings/net/broadcom-systemport.txt b/Documentation/devicetree/bindings/net/broadcom-systemport.txt
index aa7ad622259d..877da34145b0 100644
--- a/Documentation/devicetree/bindings/net/broadcom-systemport.txt
+++ b/Documentation/devicetree/bindings/net/broadcom-systemport.txt
@@ -3,7 +3,7 @@
Required properties:
- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
- reg: address and length of the register set for the device.
-- interrupts: interrupts for the device, first cell must be for the the rx
+- interrupts: interrupts for the device, first cell must be for the rx
interrupts, and the second cell should be for the transmit queues. An
optional third interrupt cell for Wake-on-LAN can be specified
- local-mac-address: Ethernet MAC address (48 bits) of this adapter
diff --git a/Documentation/devicetree/bindings/net/davicom-dm9000.txt b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
index 28767ed7c1bd..5224bf05f6f8 100644
--- a/Documentation/devicetree/bindings/net/davicom-dm9000.txt
+++ b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
@@ -11,6 +11,8 @@ Required properties:
Optional properties:
- davicom,no-eeprom : Configuration EEPROM is not available
- davicom,ext-phy : Use external PHY
+- reset-gpios : phandle of gpio that will be used to reset chip during probe
+- vcc-supply : phandle of regulator that will be used to enable power to chip
Example:
@@ -21,4 +23,6 @@ Example:
interrupts = <7 4>;
local-mac-address = [00 00 de ad be ef];
davicom,no-eeprom;
+ reset-gpios = <&gpf 12 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&eth0_power>;
};
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 032808843f90..24c5cdaba8d2 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -4,7 +4,8 @@ This file provides information, what the device node
for the davinci_emac interface contains.
Required properties:
-- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
+- compatible: "ti,davinci-dm6467-emac", "ti,am3517-emac" or
+ "ti,dm816-emac"
- reg: Offset and length of the register set for the device
- ti,davinci-ctrl-reg-offset: offset to control register
- ti,davinci-ctrl-mod-reg-offset: offset to control module register
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 0c8775c45798..a9eb611bee68 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -22,6 +22,8 @@ Optional properties:
- fsl,num-rx-queues : The property is valid for enet-avb IP, which supports
hw multi queues. Should specify the rx queue number, otherwise set rx queue
number to 1.
+- fsl,magic-packet : If present, indicates that the hardware supports waking
+ up via magic packet.
Optional subnodes:
- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index be6ea8960f20..1e97532a0b79 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -8,7 +8,16 @@ of how to define a PHY.
Required properties:
- reg : Offset and length of the register set for the device
- compatible : Should define the compatible device type for the
- mdio. Currently, this is most likely to be "fsl,gianfar-mdio"
+ mdio. Currently supported strings/devices are:
+ - "fsl,gianfar-tbi"
+ - "fsl,gianfar-mdio"
+ - "fsl,etsec2-tbi"
+ - "fsl,etsec2-mdio"
+ - "fsl,ucc-mdio"
+ - "fsl,fman-mdio"
+ When device_type is "mdio", the following strings are also considered:
+ - "gianfar"
+ - "ucc_geth_phy"
Example:
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt b/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
new file mode 100644
index 000000000000..988fc694b663
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
@@ -0,0 +1,88 @@
+Hisilicon hip04 Ethernet Controller
+
+* Ethernet controller node
+
+Required properties:
+- compatible: should be "hisilicon,hip04-mac".
+- reg: address and length of the register set for the device.
+- interrupts: interrupt for the device.
+- port-handle: <phandle port channel>
+ phandle, specifies a reference to the syscon ppe node
+ port, port number connected to the controller
+ channel, recv channel start from channel * number (RX_DESC_NUM)
+- phy-mode: see ethernet.txt [1].
+
+Optional properties:
+- phy-handle: see ethernet.txt [1].
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+
+
+* Ethernet ppe node:
+Control rx & tx fifos of all ethernet controllers.
+Have 2048 recv channels shared by all ethernet controllers, only if no overlap.
+Each controller's recv channel start from channel * number (RX_DESC_NUM).
+
+Required properties:
+- compatible: "hisilicon,hip04-ppe", "syscon".
+- reg: address and length of the register set for the device.
+
+
+* MDIO bus node:
+
+Required properties:
+
+- compatible: should be "hisilicon,hip04-mdio".
+- Inherits from MDIO bus node binding [2]
+[2] Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+ mdio {
+ compatible = "hisilicon,hip04-mdio";
+ reg = <0x28f1000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ marvell,reg-init = <18 0x14 0 0x8001>;
+ };
+
+ phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ marvell,reg-init = <18 0x14 0 0x8001>;
+ };
+ };
+
+ ppe: ppe@28c0000 {
+ compatible = "hisilicon,hip04-ppe", "syscon";
+ reg = <0x28c0000 0x10000>;
+ };
+
+ fe: ethernet@28b0000 {
+ compatible = "hisilicon,hip04-mac";
+ reg = <0x28b0000 0x10000>;
+ interrupts = <0 413 4>;
+ phy-mode = "mii";
+ port-handle = <&ppe 31 0>;
+ };
+
+ ge0: ethernet@2800000 {
+ compatible = "hisilicon,hip04-mac";
+ reg = <0x2800000 0x10000>;
+ interrupts = <0 402 4>;
+ phy-mode = "sgmii";
+ port-handle = <&ppe 0 1>;
+ phy-handle = <&phy0>;
+ };
+
+ ge8: ethernet@2880000 {
+ compatible = "hisilicon,hip04-mac";
+ reg = <0x2880000 0x10000>;
+ interrupts = <0 410 4>;
+ phy-mode = "sgmii";
+ port-handle = <&ppe 8 2>;
+ phy-handle = <&phy1>;
+ };
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
new file mode 100644
index 000000000000..f9c07710478d
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt
@@ -0,0 +1,197 @@
+This document describes the device tree bindings associated with the
+keystone network coprocessor(NetCP) driver support.
+
+The network coprocessor (NetCP) is a hardware accelerator that processes
+Ethernet packets. NetCP has a gigabit Ethernet (GbE) subsytem with a ethernet
+switch sub-module to send and receive packets. NetCP also includes a packet
+accelerator (PA) module to perform packet classification operations such as
+header matching, and packet modification operations such as checksum
+generation. NetCP can also optionally include a Security Accelerator (SA)
+capable of performing IPSec operations on ingress/egress packets.
+
+Keystone II SoC's also have a 10 Gigabit Ethernet Subsystem (XGbE) which
+includes a 3-port Ethernet switch sub-module capable of 10Gb/s and 1Gb/s rates
+per Ethernet port.
+
+Keystone NetCP driver has a plug-in module architecture where each of the NetCP
+sub-modules exist as a loadable kernel module which plug in to the netcp core.
+These sub-modules are represented as "netcp-devices" in the dts bindings. It is
+mandatory to have the ethernet switch sub-module for the ethernet interface to
+be operational. Any other sub-module like the PA is optional.
+
+NetCP Ethernet SubSystem Layout:
+
+-----------------------------
+ NetCP subsystem(10G or 1G)
+-----------------------------
+ |
+ |-> NetCP Devices -> |
+ | |-> GBE/XGBE Switch
+ | |
+ | |-> Packet Accelerator
+ | |
+ | |-> Security Accelerator
+ |
+ |
+ |
+ |-> NetCP Interfaces -> |
+ |-> Ethernet Port 0
+ |
+ |-> Ethernet Port 1
+ |
+ |-> Ethernet Port 2
+ |
+ |-> Ethernet Port 3
+
+
+NetCP subsystem properties:
+Required properties:
+- compatible: Should be "ti,netcp-1.0"
+- clocks: phandle to the reference clocks for the subsystem.
+- dma-id: Navigator packet dma instance id.
+
+Optional properties:
+- reg: register location and the size for the following register
+ regions in the specified order.
+ - Efuse MAC address register
+- dma-coherent: Present if dma operations are coherent
+- big-endian: Keystone devices can be operated in a mode where the DSP is in
+ the big endian mode. In such cases enable this option. This
+ option should also be enabled if the ARM is operated in
+ big endian mode with the DSP in little endian.
+
+NetCP device properties: Device specification for NetCP sub-modules.
+1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications.
+Required properties:
+- label: Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb.
+- reg: register location and the size for the following register
+ regions in the specified order.
+ - subsystem registers
+ - serdes registers
+- tx-channel: the navigator packet dma channel name for tx.
+- tx-queue: the navigator queue number associated with the tx dma channel.
+- interfaces: specification for each of the switch port to be registered as a
+ network interface in the stack.
+-- slave-port: Switch port number, 0 based numbering.
+-- link-interface: type of link interface, supported options are
+ - mac<->mac auto negotiate mode: 0
+ - mac<->phy mode: 1
+ - mac<->mac forced mode: 2
+ - mac<->fiber mode: 3
+ - mac<->phy mode with no mdio: 4
+ - 10Gb mac<->phy mode : 10
+ - 10Gb mac<->mac forced mode : 11
+----phy-handle: phandle to PHY device
+
+Optional properties:
+- enable-ale: NetCP driver keeps the address learning feature in the ethernet
+ switch module disabled. This attribute is to enable the address
+ learning.
+- secondary-slave-ports: specification for each of the switch port not be
+ registered as a network interface. NetCP driver
+ will only initialize these ports and attach PHY
+ driver to them if needed.
+
+NetCP interface properties: Interface specification for NetCP sub-modules.
+Required properties:
+- rx-channel: the navigator packet dma channel name for rx.
+- rx-queue: the navigator queue number associated with rx dma channel.
+- rx-pool: specifies the number of descriptors to be used & the region-id
+ for creating the rx descriptor pool.
+- tx-pool: specifies the number of descriptors to be used & the region-id
+ for creating the tx descriptor pool.
+- rx-queue-depth: number of descriptors in each of the free descriptor
+ queue (FDQ) for the pktdma Rx flow. There can be at
+ present a maximum of 4 queues per Rx flow.
+- rx-buffer-size: the buffer size for each of the Rx flow FDQ.
+- tx-completion-queue: the navigator queue number where the descriptors are
+ recycled after Tx DMA completion.
+
+Optional properties:
+- efuse-mac: If this is 1, then the MAC address for the interface is
+ obtained from the device efuse mac address register
+- local-mac-address: the driver is designed to use the of_get_mac_address api
+ only if efuse-mac is 0. When efuse-mac is 0, the MAC
+ address is obtained from local-mac-address. If this
+ attribute is not present, then the driver will use a
+ random MAC address.
+- "netcp-device label": phandle to the device specification for each of NetCP
+ sub-module attached to this interface.
+
+Example binding:
+
+netcp: netcp@2090000 {
+ reg = <0x2620110 0x8>;
+ reg-names = "efuse";
+ compatible = "ti,netcp-1.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>;
+ dma-coherent;
+ /* big-endian; */
+ dma-id = <0>;
+
+ netcp-devices {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ gbe@0x2090000 {
+ label = "netcp-gbe";
+ reg = <0x2090000 0xf00>;
+ /* enable-ale; */
+ tx-queue = <648>;
+ tx-channel = <8>;
+
+ interfaces {
+ gbe0: interface-0 {
+ slave-port = <0>;
+ link-interface = <4>;
+ };
+ gbe1: interface-1 {
+ slave-port = <1>;
+ link-interface = <4>;
+ };
+ };
+
+ secondary-slave-ports {
+ port-2 {
+ slave-port = <2>;
+ link-interface = <2>;
+ };
+ port-3 {
+ slave-port = <3>;
+ link-interface = <2>;
+ };
+ };
+ };
+ };
+
+ netcp-interfaces {
+ interface-0 {
+ rx-channel = <22>;
+ rx-pool = <1024 12>;
+ tx-pool = <1024 12>;
+ rx-queue-depth = <128 128 0 0>;
+ rx-buffer-size = <1518 4096 0 0>;
+ rx-queue = <8704>;
+ tx-completion-queue = <8706>;
+ efuse-mac = <1>;
+ netcp-gbe = <&gbe0>;
+
+ };
+ interface-1 {
+ rx-channel = <23>;
+ rx-pool = <1024 12>;
+ tx-pool = <1024 12>;
+ rx-queue-depth = <128 128 0 0>;
+ rx-buffer-size = <1518 4096 0 0>;
+ rx-queue = <8705>;
+ tx-completion-queue = <8707>;
+ efuse-mac = <0>;
+ local-mac-address = [02 18 31 7e 3e 6f];
+ netcp-gbe = <&gbe1>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
index e4faa2e8dfeb..7bb2e213d6f9 100644
--- a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
@@ -1,7 +1,7 @@
* STMicroelectronics SAS. ST21NFCA NFC Controller
Required properties:
-- compatible: Should be "st,st21nfca_i2c".
+- compatible: Should be "st,st21nfca-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
- interrupt-parent: phandle for the interrupt gpio controller
@@ -11,6 +11,10 @@ Required properties:
Optional SoC Specific Properties:
- pinctrl-names: Contains only one value - "default".
- pintctrl-0: Specifies the pin control groups used for this controller.
+- ese-present: Specifies that an ese is physically connected to the nfc
+controller.
+- uicc-present: Specifies that the uicc swp signal can be physically
+connected to the nfc controller.
Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
@@ -20,7 +24,7 @@ Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
st21nfca: st21nfca@1 {
- compatible = "st,st21nfca_i2c";
+ compatible = "st,st21nfca-i2c";
reg = <0x01>;
clock-frequency = <400000>;
@@ -29,5 +33,8 @@ Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
enable-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+
+ ese-present;
+ uicc-present;
};
};
diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt b/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt
index 9005608cbbd1..bb237072dbe9 100644
--- a/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt
@@ -1,7 +1,7 @@
* STMicroelectronics SAS. ST21NFCB NFC Controller
Required properties:
-- compatible: Should be "st,st21nfcb_i2c".
+- compatible: Should be "st,st21nfcb-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
- interrupt-parent: phandle for the interrupt gpio controller
@@ -20,7 +20,7 @@ Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2):
st21nfcb: st21nfcb@8 {
- compatible = "st,st21nfcb_i2c";
+ compatible = "st,st21nfcb-i2c";
reg = <0x08>;
clock-frequency = <400000>;
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
new file mode 100644
index 000000000000..21fd199e89b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -0,0 +1,68 @@
+Rockchip SoC RK3288 10/100/1000 Ethernet driver(GMAC)
+
+The device node has following properties.
+
+Required properties:
+ - compatible: Can be "rockchip,rk3288-gmac".
+ - reg: addresses and length of the register sets for the device.
+ - interrupts: Should contain the GMAC interrupts.
+ - interrupt-names: Should contain the interrupt names "macirq".
+ - rockchip,grf: phandle to the syscon grf used to control speed and mode.
+ - clocks: <&cru SCLK_MAC>: clock selector for main clock, from PLL or PHY.
+ <&cru SCLK_MAC_PLL>: PLL clock for SCLK_MAC
+ <&cru SCLK_MAC_RX>: clock gate for RX
+ <&cru SCLK_MAC_TX>: clock gate for TX
+ <&cru SCLK_MACREF>: clock gate for RMII referce clock
+ <&cru SCLK_MACREF_OUT> clock gate for RMII reference clock output
+ <&cru ACLK_GMAC>: AXI clock gate for GMAC
+ <&cru PCLK_GMAC>: APB clock gate for GMAC
+ - clock-names: One name for each entry in the clocks property.
+ - phy-mode: See ethernet.txt file in the same directory.
+ - pinctrl-names: Names corresponding to the numbered pinctrl states.
+ - pinctrl-0: pin-control mode. can be <&rgmii_pins> or <&rmii_pins>.
+ - clock_in_out: For RGMII, it must be "input", means main clock(125MHz)
+ is not sourced from SoC's PLL, but input from PHY; For RMII, "input" means
+ PHY provides the reference clock(50MHz), "output" means GMAC provides the
+ reference clock.
+ - snps,reset-gpio gpio number for phy reset.
+ - snps,reset-active-low boolean flag to indicate if phy reset is active low.
+ - assigned-clocks: main clock, should be <&cru SCLK_MAC>;
+ - assigned-clock-parents = parent of main clock.
+ can be <&ext_gmac> or <&cru SCLK_MAC_PLL>.
+
+Optional properties:
+ - tx_delay: Delay value for TXD timing. Range value is 0~0x7F, 0x30 as default.
+ - rx_delay: Delay value for RXD timing. Range value is 0~0x7F, 0x10 as default.
+ - phy-supply: phandle to a regulator if the PHY needs one
+
+Example:
+
+gmac: ethernet@ff290000 {
+ compatible = "rockchip,rk3288-gmac";
+ reg = <0xff290000 0x10000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ rockchip,grf = <&grf>;
+ clocks = <&cru SCLK_MAC>,
+ <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
+ <&cru SCLK_MACREF>, <&cru SCLK_MACREF_OUT>,
+ <&cru ACLK_GMAC>, <&cru PCLK_GMAC>;
+ clock-names = "stmmaceth",
+ "mac_clk_rx", "mac_clk_tx",
+ "clk_mac_ref", "clk_mac_refout",
+ "aclk_mac", "pclk_mac";
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins /*&rmii_pins*/>;
+
+ clock_in_out = "input";
+ snps,reset-gpio = <&gpio4 7 0>;
+ snps,reset-active-low;
+
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+
+ status = "ok";
+};
diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt
index 6762a6b5da7e..d05c1e1fd9b6 100644
--- a/Documentation/devicetree/bindings/net/sti-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt
@@ -9,14 +9,10 @@ The device node has following properties.
Required properties:
- compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac",
"st,stih407-dwmac", "st,stid127-dwmac".
- - reg : Offset of the glue configuration register map in system
- configuration regmap pointed by st,syscon property and size.
- - st,syscon : Should be phandle to system configuration node which
- encompases this glue registers.
+ - st,syscon : Should be phandle/offset pair. The phandle to the syscon node which
+ encompases the glue register, and the offset of the control register.
- st,gmac_en: this is to enable the gmac into a dedicated sysctl control
register available on STiH407 SoC.
- - sti-ethconf: this is the gmac glue logic register to enable the GMAC,
- select among the different modes and program the clk retiming.
- pinctrl-0: pin-control for all the MII mode supported.
Optional properties:
@@ -40,10 +36,10 @@ ethernet0: dwmac@9630000 {
device_type = "network";
status = "disabled";
compatible = "st,stih407-dwmac", "snps,dwmac", "snps,dwmac-3.710";
- reg = <0x9630000 0x8000>, <0x80 0x4>;
- reg-names = "stmmaceth", "sti-ethconf";
+ reg = <0x9630000 0x8000>;
+ reg-names = "stmmaceth";
- st,syscon = <&syscfg_sbc_reg>;
+ st,syscon = <&syscfg_sbc_reg 0x80>;
st,gmac_en;
resets = <&softreset STIH407_ETH1_SOFTRESET>;
reset-names = "stmmaceth";
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index c41afd963edf..8ca65cec52ae 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -43,6 +43,7 @@ Optional properties:
available this clock is used for programming the Timestamp Addend Register.
If not passed then the system clock will be used and this is fine on some
platforms.
+- snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register.
Examples:
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
new file mode 100644
index 000000000000..edefc26c6204
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -0,0 +1,30 @@
+* Qualcomm Atheros ath10k wireless devices
+
+For ath10k devices the calibration data can be provided through Device
+Tree. The node is a child node of the PCI controller.
+
+Required properties:
+-compatible : Should be "qcom,ath10k"
+
+Optional properties:
+- qcom,ath10k-calibration-data : calibration data as an array, the
+ length can vary between hw versions
+
+
+Example:
+
+pci {
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+
+ ath10k@0,0 {
+ reg = <0 0 0 0 0>;
+ device_type = "pci";
+ qcom,ath10k-calibration-data = [ 01 02 03 ... ];
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/panel/avic,tm070ddh03.txt b/Documentation/devicetree/bindings/panel/avic,tm070ddh03.txt
new file mode 100644
index 000000000000..b6f2f3e8f44e
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/avic,tm070ddh03.txt
@@ -0,0 +1,7 @@
+Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
+
+Required properties:
+- compatible: should be "avic,tm070ddh03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/giantplus,gpg482739qs5.txt b/Documentation/devicetree/bindings/panel/giantplus,gpg482739qs5.txt
new file mode 100644
index 000000000000..24b0b624434b
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/giantplus,gpg482739qs5.txt
@@ -0,0 +1,7 @@
+GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "giantplus,gpg48273qs5"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
index d763e047c6ae..75321ae23c08 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
@@ -1,10 +1,10 @@
NVIDIA Tegra PCIe controller
Required properties:
-- compatible: Must be one of:
- - "nvidia,tegra20-pcie"
- - "nvidia,tegra30-pcie"
- - "nvidia,tegra124-pcie"
+- compatible: For Tegra20, must contain "nvidia,tegra20-pcie". For Tegra30,
+ "nvidia,tegra30-pcie". For Tegra124, must contain "nvidia,tegra124-pcie".
+ Otherwise, must contain "nvidia,<chip>-pcie", plus one of the above, where
+ <chip> is tegra132 or tegra210.
- device_type: Must be "pci"
- reg: A list of physical base address and length for each set of controller
registers. Must contain an entry for each entry in the reg-names property.
diff --git a/Documentation/devicetree/bindings/pci/versatile.txt b/Documentation/devicetree/bindings/pci/versatile.txt
new file mode 100644
index 000000000000..ebd1e7d0403e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/versatile.txt
@@ -0,0 +1,59 @@
+* ARM Versatile Platform Baseboard PCI interface
+
+PCI host controller found on the ARM Versatile PB board's FPGA.
+
+Required properties:
+- compatible: should contain "arm,versatile-pci" to identify the Versatile PCI
+ controller.
+- reg: base addresses and lengths of the pci controller. There must be 3
+ entries:
+ - Versatile-specific registers
+ - Self Config space
+ - Config space
+- #address-cells: set to <3>
+- #size-cells: set to <2>
+- device_type: set to "pci"
+- bus-range: set to <0 0xff>
+- ranges: ranges for the PCI memory and I/O regions
+- #interrupt-cells: set to <1>
+- interrupt-map-mask and interrupt-map: standard PCI properties to define
+ the mapping of the PCI interface to interrupt numbers.
+
+Example:
+
+pci-controller@10001000 {
+ compatible = "arm,versatile-pci";
+ device_type = "pci";
+ reg = <0x10001000 0x1000
+ 0x41000000 0x10000
+ 0x42000000 0x100000>;
+ bus-range = <0 0xff>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000 /* downstream I/O */
+ 0x02000000 0 0x50000000 0x50000000 0 0x10000000 /* non-prefetchable memory */
+ 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
+
+ interrupt-map-mask = <0x1800 0 0 7>;
+ interrupt-map = <0x1800 0 0 1 &sic 28
+ 0x1800 0 0 2 &sic 29
+ 0x1800 0 0 3 &sic 30
+ 0x1800 0 0 4 &sic 27
+
+ 0x1000 0 0 1 &sic 27
+ 0x1000 0 0 2 &sic 28
+ 0x1000 0 0 3 &sic 29
+ 0x1000 0 0 4 &sic 30
+
+ 0x0800 0 0 1 &sic 30
+ 0x0800 0 0 2 &sic 27
+ 0x0800 0 0 3 &sic 28
+ 0x0800 0 0 4 &sic 29
+
+ 0x0000 0 0 1 &sic 29
+ 0x0000 0 0 2 &sic 30
+ 0x0000 0 0 3 &sic 27
+ 0x0000 0 0 4 &sic 28>;
+};
diff --git a/Documentation/devicetree/bindings/phy/phy-miphy28lp.txt b/Documentation/devicetree/bindings/phy/phy-miphy28lp.txt
index 46a135dae6b3..89caa885d08c 100644
--- a/Documentation/devicetree/bindings/phy/phy-miphy28lp.txt
+++ b/Documentation/devicetree/bindings/phy/phy-miphy28lp.txt
@@ -26,6 +26,7 @@ Required properties (port (child) node):
filled in "reg". It can also contain the offset of the system configuration
registers used as glue-logic to setup the device for SATA/PCIe or USB3
devices.
+- st,syscfg : Offset of the parent configuration register.
- resets : phandle to the parent reset controller.
- reset-names : Associated name must be "miphy-sw-rst".
@@ -54,18 +55,12 @@ example:
phy_port0: port@9b22000 {
reg = <0x9b22000 0xff>,
<0x9b09000 0xff>,
- <0x9b04000 0xff>,
- <0x114 0x4>, /* sysctrl MiPHY cntrl */
- <0x818 0x4>, /* sysctrl MiPHY status*/
- <0xe0 0x4>, /* sysctrl PCIe */
- <0xec 0x4>; /* sysctrl SATA */
+ <0x9b04000 0xff>;
reg-names = "sata-up",
"pcie-up",
- "pipew",
- "miphy-ctrl-glue",
- "miphy-status-glue",
- "pcie-glue",
- "sata-glue";
+ "pipew";
+
+ st,syscfg = <0x114 0x818 0xe0 0xec>;
#phy-cells = <1>;
st,osc-rdy;
reset-names = "miphy-sw-rst";
@@ -75,18 +70,13 @@ example:
phy_port1: port@9b2a000 {
reg = <0x9b2a000 0xff>,
<0x9b19000 0xff>,
- <0x9b14000 0xff>,
- <0x118 0x4>,
- <0x81c 0x4>,
- <0xe4 0x4>,
- <0xf0 0x4>;
+ <0x9b14000 0xff>;
reg-names = "sata-up",
"pcie-up",
- "pipew",
- "miphy-ctrl-glue",
- "miphy-status-glue",
- "pcie-glue",
- "sata-glue";
+ "pipew";
+
+ st,syscfg = <0x118 0x81c 0xe4 0xf0>;
+
#phy-cells = <1>;
st,osc-force-ext;
reset-names = "miphy-sw-rst";
@@ -95,13 +85,12 @@ example:
phy_port2: port@8f95000 {
reg = <0x8f95000 0xff>,
- <0x8f90000 0xff>,
- <0x11c 0x4>,
- <0x820 0x4>;
+ <0x8f90000 0xff>;
reg-names = "pipew",
- "usb3-up",
- "miphy-ctrl-glue",
- "miphy-status-glue";
+ "usb3-up";
+
+ st,syscfg = <0x11c 0x820>;
+
#phy-cells = <1>;
reset-names = "miphy-sw-rst";
resets = <&softreset STIH407_MIPHY2_SOFTRESET>;
@@ -125,4 +114,4 @@ example:
Macro definitions for the supported miphy configuration can be found in:
-include/dt-bindings/phy/phy-miphy28lp.h
+include/dt-bindings/phy/phy.h
diff --git a/Documentation/devicetree/bindings/phy/phy-miphy365x.txt b/Documentation/devicetree/bindings/phy/phy-miphy365x.txt
index 42c880886cf7..9802d5d911aa 100644
--- a/Documentation/devicetree/bindings/phy/phy-miphy365x.txt
+++ b/Documentation/devicetree/bindings/phy/phy-miphy365x.txt
@@ -6,8 +6,10 @@ for SATA and PCIe.
Required properties (controller (parent) node):
- compatible : Should be "st,miphy365x-phy"
-- st,syscfg : Should be a phandle of the system configuration register group
- which contain the SATA, PCIe mode setting bits
+- st,syscfg : Phandle / integer array property. Phandle of sysconfig group
+ containing the miphy registers and integer array should contain
+ an entry for each port sub-node, specifying the control
+ register offset inside the sysconfig group.
Required nodes : A sub-node is required for each channel the controller
provides. Address range information including the usual
@@ -26,7 +28,6 @@ Required properties (port (child) node):
registers filled in "reg":
- sata: For SATA devices
- pcie: For PCIe devices
- - syscfg: To specify the syscfg based config register
Optional properties (port (child) node):
- st,sata-gen : Generation of locally attached SATA IP. Expected values
@@ -39,20 +40,20 @@ Example:
miphy365x_phy: miphy365x@fe382000 {
compatible = "st,miphy365x-phy";
- st,syscfg = <&syscfg_rear>;
+ st,syscfg = <&syscfg_rear 0x824 0x828>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
phy_port0: port@fe382000 {
- reg = <0xfe382000 0x100>, <0xfe394000 0x100>, <0x824 0x4>;
- reg-names = "sata", "pcie", "syscfg";
+ reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
+ reg-names = "sata", "pcie";
#phy-cells = <1>;
st,sata-gen = <3>;
};
phy_port1: port@fe38a000 {
- reg = <0xfe38a000 0x100>, <0xfe804000 0x100>, <0x828 0x4>;;
+ reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;;
reg-names = "sata", "pcie", "syscfg";
#phy-cells = <1>;
st,pcie-tx-pol-inv;
diff --git a/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt b/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt
index 1ef8228db73b..de6a706abcdb 100644
--- a/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt
+++ b/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt
@@ -5,10 +5,7 @@ host controllers (when controlling usb2/1.1 devices) available on STiH407 SoC fa
Required properties:
- compatible : should be "st,stih407-usb2-phy"
-- reg : contain the offset and length of the system configuration registers
- used as glue logic to control & parameter phy
-- reg-names : the names of the system configuration registers in "reg", should be "param" and "reg"
-- st,syscfg : sysconfig register to manage phy parameter at driver level
+- st,syscfg : phandle of sysconfig bank plus integer array containing phyparam and phyctrl register offsets
- resets : list of phandle and reset specifier pairs. There should be two entries, one
for the whole phy and one for the port
- reset-names : list of reset signal names. Should be "global" and "port"
@@ -19,11 +16,8 @@ Example:
usb2_picophy0: usbpicophy@f8 {
compatible = "st,stih407-usb2-phy";
- reg = <0xf8 0x04>, /* syscfg 5062 */
- <0xf4 0x04>; /* syscfg 5061 */
- reg-names = "param", "ctrl";
#phy-cells = <0>;
- st,syscfg = <&syscfg_core>;
+ st,syscfg = <&syscfg_core 0x100 0xf4>;
resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
<&picophyreset STIH407_PICOPHY0_RESET>;
reset-names = "global", "port";
diff --git a/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
new file mode 100644
index 000000000000..826454ac43bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
@@ -0,0 +1,37 @@
+ROCKCHIP USB2 PHY
+
+Required properties:
+ - compatible: rockchip,rk3288-usb-phy
+ - rockchip,grf : phandle to the syscon managing the "general
+ register files"
+ - #address-cells: should be 1
+ - #size-cells: should be 0
+
+Sub-nodes:
+Each PHY should be represented as a sub-node.
+
+Sub-nodes
+required properties:
+- #phy-cells: should be 0
+- reg: PHY configure reg address offset in GRF
+ "0x320" - for PHY attach to OTG controller
+ "0x334" - for PHY attach to HOST0 controller
+ "0x348" - for PHY attach to HOST1 controller
+
+Optional Properties:
+- clocks : phandle + clock specifier for the phy clocks
+- clock-names: string, clock name, must be "phyclk"
+
+Example:
+
+usbphy: phy {
+ compatible = "rockchip,rk3288-usb-phy";
+ rockchip,grf = <&grf>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbphy0: usb-phy0 {
+ #phy-cells = <0>;
+ reg = <0x320>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/phy/samsung-phy.txt b/Documentation/devicetree/bindings/phy/samsung-phy.txt
index d5bad920827f..91e38cfe1f8f 100644
--- a/Documentation/devicetree/bindings/phy/samsung-phy.txt
+++ b/Documentation/devicetree/bindings/phy/samsung-phy.txt
@@ -3,8 +3,8 @@ Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY
Required properties:
- compatible : should be "samsung,s5pv210-mipi-video-phy";
-- reg : offset and length of the MIPI DPHY register set;
- #phy-cells : from the generic phy bindings, must be 1;
+- syscon - phandle to the PMU system controller;
For "samsung,s5pv210-mipi-video-phy" compatible PHYs the second cell in
the PHY specifier identifies the PHY and its meaning is as follows:
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 93ce12eb422a..fdd8046e650a 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -11,6 +11,7 @@ Required properties:
"allwinner,sun5i-a10s-pinctrl"
"allwinner,sun5i-a13-pinctrl"
"allwinner,sun6i-a31-pinctrl"
+ "allwinner,sun6i-a31s-pinctrl"
"allwinner,sun6i-a31-r-pinctrl"
"allwinner,sun7i-a20-pinctrl"
"allwinner,sun8i-a23-pinctrl"
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
index 189814e7cdc7..ecb5c0d25218 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
@@ -6,7 +6,8 @@ nvidia,tegra30-pinmux.txt. In fact, this document assumes that binding as
a baseline, and only documents the differences between the two bindings.
Required properties:
-- compatible: "nvidia,tegra124-pinmux"
+- compatible: For Tegra124, must contain "nvidia,tegra124-pinmux". For
+ Tegra132, must contain '"nvidia,tegra132-pinmux", "nvidia-tegra124-pinmux"'.
- reg: Should contain a list of base address and size pairs for:
-- first entry - the drive strength and pad control registers.
-- second entry - the pinmux registers
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
index 2f9c0bd66457..30676ded85bb 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
@@ -13,7 +13,9 @@ how to describe and reference PHYs in device trees.
Required properties:
--------------------
-- compatible: should be "nvidia,tegra124-xusb-padctl"
+- compatible: For Tegra124, must contain "nvidia,tegra124-xusb-padctl".
+ Otherwise, must contain '"nvidia,<chip>-xusb-padctl",
+ "nvidia-tegra124-xusb-padctl"', where <chip> is tegra132 or tegra210.
- reg: Physical base address and length of the controller's registers.
- resets: Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt
new file mode 100644
index 000000000000..498caff6029e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt
@@ -0,0 +1,186 @@
+Qualcomm MSM8916 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+MSM8916 platform.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,msm8916-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode. Valid pins are:
+ gpio0-gpio121,
+ sdc1_clk,
+ sdc1_cmd,
+ sdc1_data
+ sdc2_clk,
+ sdc2_cmd,
+ sdc2_data,
+ qdsd_cmd,
+ qdsd_data0,
+ qdsd_data1,
+ qdsd_data2,
+ qdsd_data3
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+ adsp_ext, alsp_int, atest_bbrx0, atest_bbrx1, atest_char, atest_char0,
+ atest_char1, atest_char2, atest_char3, atest_combodac, atest_gpsadc0,
+ atest_gpsadc1, atest_tsens, atest_wlan0, atest_wlan1, backlight_en,
+ bimc_dte0,bimc_dte1, blsp_i2c1, blsp_i2c2, blsp_i2c3, blsp_i2c4,
+ blsp_i2c5, blsp_i2c6, blsp_spi1, blsp_spi1_cs1, blsp_spi1_cs2,
+ blsp_spi1_cs3, blsp_spi2, blsp_spi2_cs1, blsp_spi2_cs2, blsp_spi2_cs3,
+ blsp_spi3, blsp_spi3_cs1, blsp_spi3_cs2, blsp_spi3_cs3, blsp_spi4,
+ blsp_spi5, blsp_spi6, blsp_uart1, blsp_uart2, blsp_uim1, blsp_uim2,
+ cam1_rst, cam1_standby, cam_mclk0, cam_mclk1, cci_async, cci_i2c,
+ cci_timer0, cci_timer1, cci_timer2, cdc_pdm0, codec_mad, dbg_out,
+ display_5v, dmic0_clk, dmic0_data, dsi_rst, ebi0_wrcdc, euro_us,
+ ext_lpass, flash_strobe, gcc_gp1_clk_a, gcc_gp1_clk_b, gcc_gp2_clk_a,
+ gcc_gp2_clk_b, gcc_gp3_clk_a, gcc_gp3_clk_b, gpio, gsm0_tx0, gsm0_tx1,
+ gsm1_tx0, gsm1_tx1, gyro_accl, kpsns0, kpsns1, kpsns2, ldo_en,
+ ldo_update, mag_int, mdp_vsync, modem_tsync, m_voc, nav_pps, nav_tsync,
+ pa_indicator, pbs0, pbs1, pbs2, pri_mi2s, pri_mi2s_ws, prng_rosc,
+ pwr_crypto_enabled_a, pwr_crypto_enabled_b, pwr_modem_enabled_a,
+ pwr_modem_enabled_b, pwr_nav_enabled_a, pwr_nav_enabled_b,
+ qdss_ctitrig_in_a0, qdss_ctitrig_in_a1, qdss_ctitrig_in_b0,
+ qdss_ctitrig_in_b1, qdss_ctitrig_out_a0, qdss_ctitrig_out_a1,
+ qdss_ctitrig_out_b0, qdss_ctitrig_out_b1, qdss_traceclk_a,
+ qdss_traceclk_b, qdss_tracectl_a, qdss_tracectl_b, qdss_tracedata_a,
+ qdss_tracedata_b, reset_n, sd_card, sd_write, sec_mi2s, smb_int,
+ ssbi_wtr0, ssbi_wtr1, uim1, uim2, uim3, uim_batt, wcss_bt, wcss_fm,
+ wcss_wlan, webcam1_rst
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,msm8916-pinctrl";
+ reg = <0x1000000 0x300000>;
+ interrupts = <0 208 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ uart2: uart2-default {
+ mux {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart2";
+ };
+
+ tx {
+ pins = "gpio4";
+ drive-strength = <4>;
+ bias-disable;
+ };
+
+ rx {
+ pins = "gpio5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index daef6fad6a5f..bfe72ec055e3 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -1,7 +1,7 @@
* Renesas Pin Function Controller (GPIO and Pin Mux/Config)
-The Pin Function Controller (PFC) is a Pin Mux/Config controller. On SH7372,
-SH73A0, R8A73A4 and R8A7740 it also acts as a GPIO controller.
+The Pin Function Controller (PFC) is a Pin Mux/Config controller. On SH73A0,
+R8A73A4 and R8A7740 it also acts as a GPIO controller.
Pin Control
@@ -10,13 +10,13 @@ Pin Control
Required Properties:
- compatible: should be one of the following.
+ - "renesas,pfc-emev2": for EMEV2 (EMMA Mobile EV2) compatible pin-controller.
- "renesas,pfc-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible pin-controller.
- "renesas,pfc-r8a7740": for R8A7740 (R-Mobile A1) compatible pin-controller.
- "renesas,pfc-r8a7778": for R8A7778 (R-Mobile M1) compatible pin-controller.
- "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
- "renesas,pfc-r8a7790": for R8A7790 (R-Car H2) compatible pin-controller.
- "renesas,pfc-r8a7791": for R8A7791 (R-Car M2) compatible pin-controller.
- - "renesas,pfc-sh7372": for SH7372 (SH-Mobile AP4) compatible pin-controller.
- "renesas,pfc-sh73a0": for SH73A0 (SH-Mobile AG5) compatible pin-controller.
- reg: Base address and length of each memory resource used by the pin
@@ -75,8 +75,7 @@ bias-disable, bias-pull-up and bias-pull-down.
GPIO
----
-On SH7372, SH73A0, R8A73A4 and R8A7740 the PFC node is also a GPIO controller
-node.
+On SH73A0, R8A73A4 and R8A7740 the PFC node is also a GPIO controller node.
Required Properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index 8425838a6dff..9d2a995293e6 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -171,6 +171,18 @@ Aliases:
All the pin controller nodes should be represented in the aliases node using
the following format 'pinctrl{n}' where n is a unique number for the alias.
+Aliases for controllers compatible with "samsung,exynos7-pinctrl":
+- pinctrl0: pin controller of ALIVE block,
+- pinctrl1: pin controller of BUS0 block,
+- pinctrl2: pin controller of NFC block,
+- pinctrl3: pin controller of TOUCH block,
+- pinctrl4: pin controller of FF block,
+- pinctrl5: pin controller of ESE block,
+- pinctrl6: pin controller of FSYS0 block,
+- pinctrl7: pin controller of FSYS1 block,
+- pinctrl8: pin controller of BUS1 block,
+- pinctrl9: pin controller of AUDIO block,
+
Example: A pin-controller node with pin banks:
pinctrl_0: pinctrl@11400000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
index 6b33b9f18e88..f63fcb3ed352 100644
--- a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
+++ b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
@@ -16,17 +16,22 @@ mux function to select on those pin(s)/group(s), and various pin configuration
parameters, such as input, output, pull up, pull down...
The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
+and processed purely based on their content. The subnodes use the generic
+pin multiplexing node layout from the standard pin control bindings
+(see pinctrl-bindings.txt):
-Required subnode-properties:
-- ste,pins : An array of strings. Each string contains the name of a pin or
- group.
-
-Optional subnode-properties:
-- ste,function: A string containing the name of the function to mux to the
+Required pin multiplexing subnode properties:
+- function: A string containing the name of the function to mux to the
pin or group.
+- groups : An array of strings. Each string contains the name of a pin
+ group that will be combined with the function to form a multiplexing
+ set-up.
-- ste,config: Handle of pin configuration node (e.g. ste,config = <&slpm_in_wkup_pdis>)
+Required pin configuration subnode properties:
+- pins: A string array describing the pins affected by the configuration
+ in the node.
+- ste,config: Handle of pin configuration node
+ (e.g. ste,config = <&slpm_in_wkup_pdis>)
- ste,input : <0/1/2>
0: input with no pull
@@ -97,32 +102,32 @@ Example board file extract:
uart0 {
uart0_default_mux: uart0_mux {
u0_default_mux {
- ste,function = "u0";
- ste,pins = "u0_a_1";
+ function = "u0";
+ pins = "u0_a_1";
};
};
uart0_default_mode: uart0_default {
uart0_default_cfg1 {
- ste,pins = "GPIO0", "GPIO2";
+ pins = "GPIO0", "GPIO2";
ste,input = <1>;
};
uart0_default_cfg2 {
- ste,pins = "GPIO1", "GPIO3";
+ pins = "GPIO1", "GPIO3";
ste,output = <1>;
};
};
uart0_sleep_mode: uart0_sleep {
uart0_sleep_cfg1 {
- ste,pins = "GPIO0", "GPIO2";
+ pins = "GPIO0", "GPIO2";
ste,config = <&slpm_in_wkup_pdis>;
};
uart0_sleep_cfg2 {
- ste,pins = "GPIO1";
+ pins = "GPIO1";
ste,config = <&slpm_out_hi_wkup_pdis>;
};
uart0_sleep_cfg3 {
- ste,pins = "GPIO3";
+ pins = "GPIO3";
ste,config = <&slpm_out_wkup_pdis>;
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.txt
new file mode 100644
index 000000000000..b7b55a964f65
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.txt
@@ -0,0 +1,104 @@
+ Binding for Xilinx Zynq Pinctrl
+
+Required properties:
+- compatible: "xlnx,zynq-pinctrl"
+- syscon: phandle to SLCR
+- reg: Offset and length of pinctrl space in SLCR
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Zynq's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, slew rate, etc.
+
+Each configuration node can consist of multiple nodes describing the pinmux and
+pinconf options. Those nodes can be pinmux nodes or pinconf nodes.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Required properties for pinmux nodes are:
+ - groups: A list of pinmux groups.
+ - function: The name of a pinmux function to activate for the specified set
+ of groups.
+
+Required properties for configuration nodes:
+One of:
+ - pins: a list of pin names
+ - groups: A list of pinmux groups.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinmux subnode:
+ groups, function
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinconf subnode:
+ groups, pins, bias-disable, bias-high-impedance, bias-pull-up, slew-rate,
+ low-power-disable, low-power-enable
+
+ Valid arguments for 'slew-rate' are '0' and '1' to select between slow and fast
+ respectively.
+
+ Valid values for groups are:
+ ethernet0_0_grp, ethernet1_0_grp, mdio0_0_grp, mdio1_0_grp,
+ qspi0_0_grp, qspi1_0_grp, qspi_fbclk, qspi_cs1_grp, spi0_0_grp,
+ spi0_1_grp - spi0_2_grp, spi1_0_grp - spi1_3_grp, sdio0_0_grp - sdio0_2_grp,
+ sdio1_0_grp - sdio1_3_grp, sdio0_emio_wp, sdio0_emio_cd, sdio1_emio_wp,
+ sdio1_emio_cd, smc0_nor, smc0_nor_cs1_grp, smc0_nor_addr25_grp, smc0_nand,
+ can0_0_grp - can0_10_grp, can1_0_grp - can1_11_grp, uart0_0_grp - uart0_10_grp,
+ uart1_0_grp - uart1_11_grp, i2c0_0_grp - i2c0_10_grp, i2c1_0_grp - i2c1_10_grp,
+ ttc0_0_grp - ttc0_2_grp, ttc1_0_grp - ttc1_2_grp, swdt0_0_grp - swdt0_4_grp,
+ gpio0_0_grp - gpio0_53_grp, usb0_0_grp, usb1_0_grp
+
+ Valid values for pins are:
+ MIO0 - MIO53
+
+ Valid values for function are:
+ ethernet0, ethernet1, mdio0, mdio1, qspi0, qspi1, qspi_fbclk, qspi_cs1,
+ spi0, spi1, sdio0, sdio0_pc, sdio0_cd, sdio0_wp,
+ sdio1, sdio1_pc, sdio1_cd, sdio1_wp,
+ smc0_nor, smc0_nor_cs1, smc0_nor_addr25, smc0_nand, can0, can1, uart0, uart1,
+ i2c0, i2c1, ttc0, ttc1, swdt0, gpio0, usb0, usb1
+
+The following driver-specific properties as defined here are valid to specify in
+a pin configuration subnode:
+ - io-standard: Configure the pin to use the selected IO standard according to
+ this mapping:
+ 1: LVCMOS18
+ 2: LVCMOS25
+ 3: LVCMOS33
+ 4: HSTL
+
+Example:
+ pinctrl0: pinctrl@700 {
+ compatible = "xlnx,pinctrl-zynq";
+ reg = <0x700 0x200>;
+ syscon = <&slcr>;
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_10_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_10_grp";
+ slew-rate = <0>;
+ io-standard = <1>;
+ };
+
+ conf-rx {
+ pins = "MIO49";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO48";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/ltc2941.txt b/Documentation/devicetree/bindings/power/ltc2941.txt
new file mode 100644
index 000000000000..ea42ae12d924
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/ltc2941.txt
@@ -0,0 +1,27 @@
+binding for LTC2941 and LTC2943 battery gauges
+
+Both the LTC2941 and LTC2943 measure battery capacity.
+The LTC2943 is compatible with the LTC2941, it adds voltage and
+temperature monitoring, and uses a slightly different conversion
+formula for the charge counter.
+
+Required properties:
+- compatible: Should contain "ltc2941" or "ltc2943" which also indicates the
+ type of I2C chip attached.
+- reg: The 7-bit I2C address.
+- lltc,resistor-sense: The sense resistor value in milli-ohms. Can be a 32-bit
+ negative value when the battery has been connected to the wrong end of the
+ resistor.
+- lltc,prescaler-exponent: The prescaler exponent as explained in the datasheet.
+ This determines the range and accuracy of the gauge. The value is programmed
+ into the chip only if it differs from the current setting. The setting is
+ lost when the battery is disconnected.
+
+Example from the Topic Miami Florida board:
+
+ fuelgauge: ltc2943@64 {
+ compatible = "ltc2943";
+ reg = <0x64>;
+ lltc,resistor-sense = <15>;
+ lltc,prescaler-exponent = <5>; /* 2^(2*5) = 1024 */
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/ltc2952-poweroff.txt b/Documentation/devicetree/bindings/power/reset/ltc2952-poweroff.txt
index 0c94c637f63b..cd2d7f58a9d7 100644
--- a/Documentation/devicetree/bindings/power/reset/ltc2952-poweroff.txt
+++ b/Documentation/devicetree/bindings/power/reset/ltc2952-poweroff.txt
@@ -1,20 +1,23 @@
Binding for the LTC2952 PowerPath controller
This chip is used to externally trigger a system shut down. Once the trigger has
-been sent, the chips' watchdog has to be reset to gracefully shut down.
-If the Linux systems decides to shut down it powers off the platform via the
-kill signal.
+been sent, the chip's watchdog has to be reset to gracefully shut down.
+A full powerdown can be triggered via the kill signal.
Required properties:
- compatible: Must contain: "lltc,ltc2952"
-- trigger-gpios: phandle + gpio-specifier for the GPIO connected to the
- chip's trigger line
- watchdog-gpios: phandle + gpio-specifier for the GPIO connected to the
chip's watchdog line
- kill-gpios: phandle + gpio-specifier for the GPIO connected to the
chip's kill line
+Optional properties:
+- trigger-gpios: phandle + gpio-specifier for the GPIO connected to the
+ chip's trigger line. If this property is not set, the
+ trigger function is ignored and the chip is kept alive
+ until an explicit kill signal is received
+
Example:
ltc2952 {
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
index 6fbf6e7ecde6..8b70db103ca7 100644
--- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
@@ -37,7 +37,7 @@ Required properties:
You specify supplies using the standard regulator bindings by including
-a phandle the the relevant regulator. All specified supplies must be able
+a phandle the relevant regulator. All specified supplies must be able
to report their voltage. The IO Voltage Domain for any non-specified
supplies will be not be touched.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/fman.txt b/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
index edeea160ca39..edda55f74004 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
@@ -7,6 +7,7 @@ CONTENTS
- FMan MURAM Node
- FMan dTSEC/XGEC/mEMAC Node
- FMan IEEE 1588 Node
+ - FMan MDIO Node
- Example
=============================================================================
@@ -357,6 +358,69 @@ ptp-timer@fe000 {
};
=============================================================================
+FMan MDIO Node
+
+DESCRIPTION
+
+The MDIO is a bus to which the PHY devices are connected.
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: A standard property.
+ Must include "fsl,fman-mdio" for 1 Gb/s MDIO from FMan v2.
+ Must include "fsl,fman-xmdio" for 10 Gb/s MDIO from FMan v2.
+ Must include "fsl,fman-memac-mdio" for 1/10 Gb/s MDIO from
+ FMan v3.
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property.
+
+- bus-frequency
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the external MDIO bus clock speed to
+ be used, if different from the standard 2.5 MHz.
+ This may be due to the standard speed being unsupported (e.g.
+ due to a hardware problem), or to advertise that all relevant
+ components in the system support a faster speed.
+
+- interrupts
+ Usage: required for external MDIO
+ Value type: <prop-encoded-array>
+ Definition: Event interrupt of external MDIO controller.
+
+- fsl,fman-internal-mdio
+ Usage: required for internal MDIO
+ Value type: boolean
+ Definition: Fman has internal MDIO for internal PCS(Physical
+ Coding Sublayer) PHYs and external MDIO for external PHYs.
+ The settings and programming routines for internal/external
+ MDIO are different. Must be included for internal MDIO.
+
+EXAMPLE
+
+Example for FMan v2 external MDIO:
+
+mdio@f1000 {
+ compatible = "fsl,fman-xmdio";
+ reg = <0xf1000 0x1000>;
+ interrupts = <101 2 0 0>;
+};
+
+Example for FMan v3 internal MDIO:
+
+mdio@f1000 {
+ compatible = "fsl,fman-memac-mdio";
+ reg = <0xf1000 0x1000>;
+ fsl,fman-internal-mdio;
+};
+
+=============================================================================
Example
fman@400000 {
@@ -531,4 +595,10 @@ fman@400000 {
compatible = "fsl,fman-ptp-timer";
reg = <0xfe000 0x1000>;
};
+
+ mdio@f1000 {
+ compatible = "fsl,fman-xmdio";
+ reg = <0xf1000 0x1000>;
+ interrupts = <101 2 0 0>;
+ };
};
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt b/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
index 3300fec501c5..1c80fcedebb5 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
@@ -16,20 +16,28 @@ Example:
"fsl,pq2-localbus";
#address-cells = <2>;
#size-cells = <1>;
- reg = <f0010100 40>;
+ reg = <0xf0010100 0x40>;
- ranges = <0 0 fe000000 02000000
- 1 0 f4500000 00008000>;
+ ranges = <0x0 0x0 0xfe000000 0x02000000
+ 0x1 0x0 0xf4500000 0x00008000
+ 0x2 0x0 0xfd810000 0x00010000>;
flash@0,0 {
compatible = "jedec-flash";
- reg = <0 0 2000000>;
+ reg = <0x0 0x0 0x2000000>;
bank-width = <4>;
device-width = <1>;
};
board-control@1,0 {
- reg = <1 0 20>;
+ reg = <0x1 0x0 0x20>;
compatible = "fsl,mpc8272ads-bcsr";
};
+
+ simple-periph@2,0 {
+ compatible = "fsl,elbc-gpcm-uio";
+ reg = <0x2 0x0 0x10000>;
+ elbc-gpcm-br = <0xfd810800>;
+ elbc-gpcm-or = <0xffff09f7>;
+ };
};
diff --git a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
index c7ea9d4a988b..c52f03b5032f 100644
--- a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
@@ -1,9 +1,10 @@
Tegra SoC PWFM controller
Required properties:
-- compatible: should be one of:
- - "nvidia,tegra20-pwm"
- - "nvidia,tegra30-pwm"
+- compatible: For Tegra20, must contain "nvidia,tegra20-pwm". For Tegra30,
+ must contain "nvidia,tegra30-pwm". Otherwise, must contain
+ "nvidia,<chip>-pwm", plus one of the above, where <chip> is tegra114,
+ tegra124, tegra132, or tegra210.
- reg: physical base address and length of the controller's registers
- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt
index 240019a82f9a..eb618907c7de 100644
--- a/Documentation/devicetree/bindings/regulator/da9211.txt
+++ b/Documentation/devicetree/bindings/regulator/da9211.txt
@@ -11,6 +11,7 @@ Required properties:
BUCKA and BUCKB.
Optional properties:
+- enable-gpios: platform gpio for control of BUCKA/BUCKB.
- Any optional property defined in regulator.txt
Example 1) DA9211
@@ -27,6 +28,7 @@ Example 1) DA9211
regulator-max-microvolt = <1570000>;
regulator-min-microamp = <2000000>;
regulator-max-microamp = <5000000>;
+ enable-gpios = <&gpio 27 0>;
};
BUCKB {
regulator-name = "VBUCKB";
@@ -34,11 +36,12 @@ Example 1) DA9211
regulator-max-microvolt = <1570000>;
regulator-min-microamp = <2000000>;
regulator-max-microamp = <5000000>;
+ enable-gpios = <&gpio 17 0>;
};
};
};
-Example 2) DA92113
+Example 2) DA9213
pmic: da9213@68 {
compatible = "dlg,da9213";
reg = <0x68>;
@@ -51,6 +54,7 @@ Example 2) DA92113
regulator-max-microvolt = <1570000>;
regulator-min-microamp = <3000000>;
regulator-max-microamp = <6000000>;
+ enable-gpios = <&gpio 27 0>;
};
BUCKB {
regulator-name = "VBUCKB";
@@ -58,6 +62,7 @@ Example 2) DA92113
regulator-max-microvolt = <1570000>;
regulator-min-microamp = <3000000>;
regulator-max-microamp = <6000000>;
+ enable-gpios = <&gpio 17 0>;
};
};
};
diff --git a/Documentation/devicetree/bindings/regulator/isl9305.txt b/Documentation/devicetree/bindings/regulator/isl9305.txt
index a626fc1bbf0d..d6e7c9ec9413 100644
--- a/Documentation/devicetree/bindings/regulator/isl9305.txt
+++ b/Documentation/devicetree/bindings/regulator/isl9305.txt
@@ -2,7 +2,7 @@ Intersil ISL9305/ISL9305H voltage regulator
Required properties:
-- compatible: "isl,isl9305" or "isl,isl9305h"
+- compatible: "isil,isl9305" or "isil,isl9305h"
- reg: I2C slave address, usually 0x68.
- regulators: A node that houses a sub-node for each regulator within the
device. Each sub-node is identified using the node's name, with valid
@@ -19,7 +19,7 @@ Optional properties:
Example
pmic: isl9305@68 {
- compatible = "isl,isl9305";
+ compatible = "isil,isl9305";
reg = <0x68>;
VINDCD1-supply = <&system_power>;
diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
new file mode 100644
index 000000000000..a42b1d6e9863
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
@@ -0,0 +1,217 @@
+Mediatek MT6397 Regulator Driver
+
+Required properties:
+- compatible: "mediatek,mt6397-regulator"
+- mt6397regulator: List of regulators provided by this controller. It is named
+ according to its regulator type, buck_<name> and ldo_<name>.
+ The definition for each of these nodes is defined using the standard binding
+ for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are::
+BUCK:
+ buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu,
+ buck_vdrm, buck_vio18
+LDO:
+ ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch,
+ ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6,
+ ldo_vibr
+
+Example:
+ pmic {
+ compatible = "mediatek,mt6397";
+
+ mt6397regulator: mt6397regulator {
+ compatible = "mediatek,mt6397-regulator";
+
+ mt6397_vpca15_reg: buck_vpca15 {
+ regulator-compatible = "buck_vpca15";
+ regulator-name = "vpca15";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <200>;
+ };
+
+ mt6397_vpca7_reg: buck_vpca7 {
+ regulator-compatible = "buck_vpca7";
+ regulator-name = "vpca7";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vsramca15_reg: buck_vsramca15 {
+ regulator-compatible = "buck_vsramca15";
+ regulator-name = "vsramca15";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+
+ };
+
+ mt6397_vsramca7_reg: buck_vsramca7 {
+ regulator-compatible = "buck_vsramca7";
+ regulator-name = "vsramca7";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+
+ };
+
+ mt6397_vcore_reg: buck_vcore {
+ regulator-compatible = "buck_vcore";
+ regulator-name = "vcore";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vgpu_reg: buck_vgpu {
+ regulator-compatible = "buck_vgpu";
+ regulator-name = "vgpu";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vdrm_reg: buck_vdrm {
+ regulator-compatible = "buck_vdrm";
+ regulator-name = "vdrm";
+ regulator-min-microvolt = < 800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <500>;
+ };
+
+ mt6397_vio18_reg: buck_vio18 {
+ regulator-compatible = "buck_vio18";
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2120000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <500>;
+ };
+
+ mt6397_vtcxo_reg: ldo_vtcxo {
+ regulator-compatible = "ldo_vtcxo";
+ regulator-name = "vtcxo";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <90>;
+ };
+
+ mt6397_va28_reg: ldo_va28 {
+ regulator-compatible = "ldo_va28";
+ regulator-name = "va28";
+ /* fixed output 2.8 V */
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vcama_reg: ldo_vcama {
+ regulator-compatible = "ldo_vcama";
+ regulator-name = "vcama";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vio28_reg: ldo_vio28 {
+ regulator-compatible = "ldo_vio28";
+ regulator-name = "vio28";
+ /* fixed output 2.8 V */
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6397_usb_reg: ldo_vusb {
+ regulator-compatible = "ldo_vusb";
+ regulator-name = "vusb";
+ /* fixed output 3.3 V */
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vmc_reg: ldo_vmc {
+ regulator-compatible = "ldo_vmc";
+ regulator-name = "vmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vmch_reg: ldo_vmch {
+ regulator-compatible = "ldo_vmch";
+ regulator-name = "vmch";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ regulator-compatible = "ldo_vemc3v3";
+ regulator-name = "vemc_3v3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp1_reg: ldo_vgp1 {
+ regulator-compatible = "ldo_vgp1";
+ regulator-name = "vcamd";
+ regulator-min-microvolt = <1220000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6397_vgp2_reg: ldo_vgp2 {
+ egulator-compatible = "ldo_vgp2";
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp3_reg: ldo_vgp3 {
+ regulator-compatible = "ldo_vgp3";
+ regulator-name = "vcamaf";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp4_reg: ldo_vgp4 {
+ regulator-compatible = "ldo_vgp4";
+ regulator-name = "vgp4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp5_reg: ldo_vgp5 {
+ regulator-compatible = "ldo_vgp5";
+ regulator-name = "vgp5";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp6_reg: ldo_vgp6 {
+ regulator-compatible = "ldo_vgp6";
+ regulator-name = "vgp6";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vibr_reg: ldo_vibr {
+ regulator-compatible = "ldo_vibr";
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index 34ef5d16d0f1..9b40db88f637 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -1,7 +1,7 @@
PFUZE100 family of regulators
Required properties:
-- compatible: "fsl,pfuze100" or "fsl,pfuze200"
+- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000"
- reg: I2C slave address
Required child node:
@@ -14,6 +14,8 @@ Required child node:
sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
--PFUZE200
sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6
+ --PFUZE3000
+ sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4
Each regulator is defined using the standard binding for regulators.
@@ -205,3 +207,93 @@ Example 2: PFUZE200
};
};
};
+
+Example 3: PFUZE3000
+
+ pmic: pfuze3000@08 {
+ compatible = "fsl,pfuze3000";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1a {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1475000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+ /* use sw1c_reg to align with pfuze100/pfuze200 */
+ sw1c_reg: sw1b {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1475000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1650000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen2_reg: vldo2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vccsd {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: v33 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vgen5_reg: vldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/armada-380-rtc.txt b/Documentation/devicetree/bindings/rtc/armada-380-rtc.txt
new file mode 100644
index 000000000000..2eb9d4ee7dc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/armada-380-rtc.txt
@@ -0,0 +1,22 @@
+* Real Time Clock of the Armada 38x SoCs
+
+RTC controller for the Armada 38x SoCs
+
+Required properties:
+- compatible : Should be "marvell,armada-380-rtc"
+- reg: a list of base address and size pairs, one for each entry in
+ reg-names
+- reg names: should contain:
+ * "rtc" for the RTC registers
+ * "rtc-soc" for the SoC related registers and among them the one
+ related to the interrupt.
+- interrupts: IRQ line for the RTC.
+
+Example:
+
+rtc@a3800 {
+ compatible = "marvell,armada-380-rtc";
+ reg = <0xa3800 0x20>, <0x184a0 0x0c>;
+ reg-names = "rtc", "rtc-soc";
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/isil,isl12057.txt b/Documentation/devicetree/bindings/rtc/isil,isl12057.txt
new file mode 100644
index 000000000000..501c39ceae79
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/isil,isl12057.txt
@@ -0,0 +1,78 @@
+Intersil ISL12057 I2C RTC/Alarm chip
+
+ISL12057 is a trivial I2C device (it has simple device tree bindings,
+consisting of a compatible field, an address and possibly an interrupt
+line).
+
+Nonetheless, it also supports an option boolean property
+("isil,irq2-can-wakeup-machine") to handle the specific use-case found
+on at least three in-tree users of the chip (NETGEAR ReadyNAS 102, 104
+and 2120 ARM-based NAS); On those devices, the IRQ#2 pin of the chip
+(associated with the alarm supported by the driver) is not connected
+to the SoC but to a PMIC. It allows the device to be powered up when
+RTC alarm rings. In order to mark the device has a wakeup source and
+get access to the 'wakealarm' sysfs entry, this specific property can
+be set when the IRQ#2 pin of the chip is not connected to the SoC but
+can wake up the device.
+
+Required properties supported by the device:
+
+ - "compatible": must be "isil,isl12057"
+ - "reg": I2C bus address of the device
+
+Optional properties:
+
+ - "isil,irq2-can-wakeup-machine": mark the chip as a wakeup source,
+ independently of the availability of an IRQ line connected to the
+ SoC.
+
+ - "interrupt-parent", "interrupts": for passing the interrupt line
+ of the SoC connected to IRQ#2 of the RTC chip.
+
+
+Example isl12057 node without IRQ#2 pin connected (no alarm support):
+
+ isl12057: isl12057@68 {
+ compatible = "isil,isl12057";
+ reg = <0x68>;
+ };
+
+
+Example isl12057 node with IRQ#2 pin connected to main SoC via MPP6 (note
+that the pinctrl-related properties below are given for completeness and
+may not be required or may be different depending on your system or
+SoC, and the main function of the MPP used as IRQ line, i.e.
+"interrupt-parent" and "interrupts" are usually sufficient):
+
+ pinctrl {
+ ...
+
+ rtc_alarm_pin: rtc_alarm_pin {
+ marvell,pins = "mpp6";
+ marvell,function = "gpio";
+ };
+
+ ...
+
+ };
+
+ ...
+
+ isl12057: isl12057@68 {
+ compatible = "isil,isl12057";
+ reg = <0x68>;
+ pinctrl-0 = <&rtc_alarm_pin>;
+ pinctrl-names = "default";
+ interrupt-parent = <&gpio0>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+
+Example isl12057 node without IRQ#2 pin connected to the SoC but to a
+PMIC, allowing the device to be started based on configured alarm:
+
+ isl12057: isl12057@68 {
+ compatible = "isil,isl12057";
+ reg = <0x68>;
+ isil,irq2-can-wakeup-machine;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt b/Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt
index 652d1ff2e8be..b7d98ed3e098 100644
--- a/Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt
@@ -6,7 +6,9 @@ state.
Required properties:
-- compatible : should be "nvidia,tegra20-rtc".
+- compatible : For Tegra20, must contain "nvidia,tegra20-rtc". Otherwise,
+ must contain '"nvidia,<chip>-rtc", "nvidia,tegra20-rtc"', where <chip>
+ can be tegra30, tegra114, tegra124, or tegra132.
- reg : Specifies base physical address and size of the registers.
- interrupts : A single interrupt specifier.
- clocks : Must contain one entry, for the module clock.
diff --git a/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt b/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt
new file mode 100644
index 000000000000..5cbc0b145a61
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt
@@ -0,0 +1,16 @@
+NXP PCF2123 SPI Real Time Clock
+
+Required properties:
+- compatible: should be: "nxp,rtc-pcf2123"
+- reg: should be the SPI slave chipselect address
+
+Optional properties:
+- spi-cs-high: PCF2123 needs chipselect high
+
+Example:
+
+rtc: nxp,rtc-pcf2123@3 {
+ compatible = "nxp,rtc-pcf2123"
+ reg = <3>
+ spi-cs-high;
+};
diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
new file mode 100644
index 000000000000..3ad115efed1e
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
@@ -0,0 +1,36 @@
+* STMicroelectronics SAS. ST33ZP24 TPM SoC
+
+Required properties:
+- compatible: Should be "st,st33zp24-i2c".
+- clock-frequency: I²C work frequency.
+- reg: address on the bus
+
+Optional ST33ZP24 Properties:
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- lpcpd-gpios: Output GPIO pin used for ST33ZP24 power management D1/D2 state.
+If set, power must be present when the platform is going into sleep/hibernate mode.
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBoard xM with ST33ZP24 on I2C2):
+
+&i2c2 {
+
+ status = "okay";
+
+ st33zp24: st33zp24@13 {
+
+ compatible = "st,st33zp24-i2c";
+
+ reg = <0x13>;
+ clock-frequency = <400000>;
+
+ interrupt-parent = <&gpio5>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+
+ lpcpd-gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/serial/digicolor-usart.txt b/Documentation/devicetree/bindings/serial/digicolor-usart.txt
new file mode 100644
index 000000000000..2d3ede66889d
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/digicolor-usart.txt
@@ -0,0 +1,27 @@
+Binding for Conexant Digicolor USART
+
+Note: this binding is only applicable for using the USART peripheral as
+UART. USART also support synchronous serial protocols like SPI and I2S. Use
+the binding that matches the wiring of your system.
+
+Required properties:
+- compatible : should be "cnxt,cx92755-usart".
+- reg: Should contain USART controller registers location and length.
+- interrupts: Should contain a single USART controller interrupt.
+- clocks: Must contain phandles to the USART clock
+ See ../clocks/clock-bindings.txt for details.
+
+Note: Each UART port should have an alias correctly numbered
+in "aliases" node.
+
+Example:
+ aliases {
+ serial0 = &uart0;
+ };
+
+ uart0: uart@f0000740 {
+ compatible = "cnxt,cx92755-usart";
+ reg = <0xf0000740 0x20>;
+ clocks = <&main_clk>;
+ interrupts = <44>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/of-serial.txt
index b52b98234b9b..bea60ef6cdc5 100644
--- a/Documentation/devicetree/bindings/serial/of-serial.txt
+++ b/Documentation/devicetree/bindings/serial/of-serial.txt
@@ -8,7 +8,10 @@ Required properties:
- "ns16550"
- "ns16750"
- "ns16850"
- - "nvidia,tegra20-uart"
+ - For Tegra20, must contain "nvidia,tegra20-uart"
+ - For other Tegra, must contain '"nvidia,<chip>-uart",
+ "nvidia,tegra20-uart"' where <chip> is tegra30, tegra114, tegra124,
+ tegra132, or tegra210.
- "nxp,lpc3220-uart"
- "ralink,rt2880-uart"
- "ibm,qpace-nwp-serial"
diff --git a/Documentation/devicetree/bindings/serial/sirf-uart.txt b/Documentation/devicetree/bindings/serial/sirf-uart.txt
index 3acdd969edf1..f0c39261c5d4 100644
--- a/Documentation/devicetree/bindings/serial/sirf-uart.txt
+++ b/Documentation/devicetree/bindings/serial/sirf-uart.txt
@@ -2,7 +2,7 @@
Required properties:
- compatible : Should be "sirf,prima2-uart", "sirf, prima2-usp-uart",
- "sirf,marco-uart" or "sirf,marco-bt-uart" which means
+ "sirf,atlas7-uart" or "sirf,atlas7-bt-uart" which means
uart located in BT module and used for BT.
- reg : Offset and length of the register set for the device
- interrupts : Should contain uart interrupt
@@ -37,7 +37,7 @@ usp@b0090000 {
for uart use in BT module,
uart6: uart@11000000 {
cell-index = <6>;
- compatible = "sirf,marco-bt-uart", "sirf,marco-uart";
+ compatible = "sirf,atlas7-bt-uart", "sirf,atlas7-uart";
reg = <0x11000000 0x1000>;
interrupts = <0 100 0>;
clocks = <&clks 138>, <&clks 140>, <&clks 141>;
diff --git a/Documentation/devicetree/bindings/serial/sprd-uart.txt b/Documentation/devicetree/bindings/serial/sprd-uart.txt
new file mode 100644
index 000000000000..2aff0f22c9fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/sprd-uart.txt
@@ -0,0 +1,7 @@
+* Spreadtrum serial UART
+
+Required properties:
+- compatible: must be "sprd,sc9836-uart"
+- reg: offset and length of the register set for the device
+- interrupts: exactly one interrupt specifier
+- clocks: phandles to input clocks.
diff --git a/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt b/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
new file mode 100644
index 000000000000..362a76925bcd
--- /dev/null
+++ b/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
@@ -0,0 +1,23 @@
+* Device tree bindings for Allwinner A10, A20 PS2 host controller
+
+A20 PS2 is dual role controller (PS2 host and PS2 device). These bindings are
+for PS2 A10/A20 host controller. IBM compliant IBM PS2 and AT-compatible keyboard
+and mouse can be connected.
+
+Required properties:
+
+ - reg : Offset and length of the register set for the device.
+ - compatible : Should be as of the following:
+ - "allwinner,sun4i-a10-ps2"
+ - interrupts : The interrupt line connected to the PS2.
+ - clocks : The gate clk connected to the PS2.
+
+
+Example:
+ ps20: ps2@0x01c2a000 {
+ compatible = "allwinner,sun4i-a10-ps2";
+ reg = <0x01c2a000 0x400>;
+ interrupts = <0 62 4>;
+ clocks = <&apb1_gates 6>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/soc/fsl/bman.txt b/Documentation/devicetree/bindings/soc/fsl/bman.txt
index 9f80bf8709ac..47ac834414d8 100644
--- a/Documentation/devicetree/bindings/soc/fsl/bman.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/bman.txt
@@ -36,6 +36,11 @@ are located at offsets 0xbf8 and 0xbfc
Value type: <prop-encoded-array>
Definition: Standard property. The error interrupt
+- fsl,bman-portals
+ Usage: Required
+ Value type: <phandle>
+ Definition: Phandle to this BMan instance's portals
+
- fsl,liodn
Usage: See pamu.txt
Value type: <prop-encoded-array>
@@ -96,7 +101,7 @@ The example below shows a BMan FBPR dynamic allocation memory node
bman_fbpr: bman-fbpr {
compatible = "fsl,bman-fbpr";
- alloc-ranges = <0 0 0xf 0xffffffff>;
+ alloc-ranges = <0 0 0x10 0>;
size = <0 0x1000000>;
alignment = <0 0x1000000>;
};
@@ -104,6 +109,10 @@ The example below shows a BMan FBPR dynamic allocation memory node
The example below shows a (P4080) BMan CCSR-space node
+ bportals: bman-portals@ff4000000 {
+ ...
+ };
+
crypto@300000 {
...
fsl,bman = <&bman, 2>;
@@ -115,6 +124,7 @@ The example below shows a (P4080) BMan CCSR-space node
reg = <0x31a000 0x1000>;
interrupts = <16 2 1 2>;
fsl,liodn = <0x17>;
+ fsl,bman-portals = <&bportals>;
memory-region = <&bman_fbpr>;
};
diff --git a/Documentation/devicetree/bindings/soc/fsl/qman.txt b/Documentation/devicetree/bindings/soc/fsl/qman.txt
index 063e3a0b9d04..556ebb8be75d 100644
--- a/Documentation/devicetree/bindings/soc/fsl/qman.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/qman.txt
@@ -38,6 +38,11 @@ are located at offsets 0xbf8 and 0xbfc
Value type: <prop-encoded-array>
Definition: Standard property. The error interrupt
+- fsl,qman-portals
+ Usage: Required
+ Value type: <phandle>
+ Definition: Phandle to this QMan instance's portals
+
- fsl,liodn
Usage: See pamu.txt
Value type: <prop-encoded-array>
@@ -113,13 +118,13 @@ The example below shows a QMan FQD and a PFDR dynamic allocation memory nodes
qman_fqd: qman-fqd {
compatible = "fsl,qman-fqd";
- alloc-ranges = <0 0 0xf 0xffffffff>;
+ alloc-ranges = <0 0 0x10 0>;
size = <0 0x400000>;
alignment = <0 0x400000>;
};
qman_pfdr: qman-pfdr {
compatible = "fsl,qman-pfdr";
- alloc-ranges = <0 0 0xf 0xffffffff>;
+ alloc-ranges = <0 0 0x10 0>;
size = <0 0x2000000>;
alignment = <0 0x2000000>;
};
@@ -127,6 +132,10 @@ The example below shows a QMan FQD and a PFDR dynamic allocation memory nodes
The example below shows a (P4080) QMan CCSR-space node
+ qportals: qman-portals@ff4200000 {
+ ...
+ };
+
clockgen: global-utilities@e1000 {
...
sysclk: sysclk {
@@ -154,6 +163,7 @@ The example below shows a (P4080) QMan CCSR-space node
reg = <0x318000 0x1000>;
interrupts = <16 2 1 3>
fsl,liodn = <0x16>;
+ fsl,qman-portals = <&qportals>;
memory-region = <&qman_fqd &qman_pfdr>;
clocks = <&platform_pll 1>;
};
diff --git a/Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt b/Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt
new file mode 100644
index 000000000000..befd125d18bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt
@@ -0,0 +1,18 @@
+Bindings for I2S controller built into xtfpga Xtensa bitstreams.
+
+Required properties:
+- compatible: shall be "cdns,xtfpga-i2s".
+- reg: memory region (address and length) with device registers.
+- interrupts: interrupt for the device.
+- clocks: phandle to the clk used as master clock. I2S bus clock
+ is derived from it.
+
+Examples:
+
+ i2s0: xtfpga-i2s@0d080000 {
+ #sound-dai-cells = <0>;
+ compatible = "cdns,xtfpga-i2s";
+ reg = <0x0d080000 0x40>;
+ interrupts = <2 1>;
+ clocks = <&cdce706 4>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/designware-i2s.txt b/Documentation/devicetree/bindings/sound/designware-i2s.txt
new file mode 100644
index 000000000000..7bb54247f8e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/designware-i2s.txt
@@ -0,0 +1,31 @@
+DesignWare I2S controller
+
+Required properties:
+ - compatible : Must be "snps,designware-i2s"
+ - reg : Must contain the I2S core's registers location and length
+ - clocks : Pairs of phandle and specifier referencing the controller's
+ clocks. The controller expects one clock: the clock used as the sampling
+ rate reference clock sample.
+ - clock-names : "i2sclk" for the sample rate reference clock.
+ - dmas: Pairs of phandle and specifier for the DMA channels that are used by
+ the core. The core expects one or two dma channels: one for transmit and
+ one for receive.
+ - dma-names : "tx" for the transmit channel, "rx" for the receive channel.
+
+For more details on the 'dma', 'dma-names', 'clock' and 'clock-names'
+properties please check:
+ * resource-names.txt
+ * clock/clock-bindings.txt
+ * dma/dma.txt
+
+Example:
+
+ soc_i2s: i2s@7ff90000 {
+ compatible = "snps,designware-i2s";
+ reg = <0x0 0x7ff90000 0x0 0x1000>;
+ clocks = <&scpi_i2sclk 0>;
+ clock-names = "i2sclk";
+ #sound-dai-cells = <0>;
+ dmas = <&dma0 5>;
+ dma-names = "tx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt b/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt
new file mode 100644
index 000000000000..b41433386e2f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt
@@ -0,0 +1,23 @@
+Ingenic JZ4740 I2S controller
+
+Required properties:
+- compatible : "ingenic,jz4740-i2s"
+- reg : I2S registers location and length
+- clocks : AIC and I2S PLL clock specifiers.
+- clock-names: "aic" and "i2s"
+- dmas: DMA controller phandle and DMA request line for I2S Tx and Rx channels
+- dma-names: Must be "tx" and "rx"
+
+Example:
+
+i2s: i2s@10020000 {
+ compatible = "ingenic,jz4740-i2s";
+ reg = <0x10020000 0x94>;
+
+ clocks = <&cgu JZ4740_CLK_AIC>, <&cgu JZ4740_CLK_I2SPLL>;
+ clock-names = "aic", "i2s";
+
+ dmas = <&dma 2>, <&dma 3>;
+ dma-names = "tx", "rx";
+
+};
diff --git a/Documentation/devicetree/bindings/sound/max98357a.txt b/Documentation/devicetree/bindings/sound/max98357a.txt
new file mode 100644
index 000000000000..a7a149a236e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98357a.txt
@@ -0,0 +1,14 @@
+Maxim MAX98357A audio DAC
+
+This node models the Maxim MAX98357A DAC.
+
+Required properties:
+- compatible : "maxim,max98357a"
+- sdmode-gpios : GPIO specifier for the GPIO -> DAC SDMODE pin
+
+Example:
+
+max98357a {
+ compatible = "maxim,max98357a";
+ sdmode-gpios = <&qcom_pinmux 25 0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.txt
new file mode 100644
index 000000000000..a4589cda214e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.txt
@@ -0,0 +1,67 @@
+NVIDIA Tegra audio complex, with RT5677 CODEC
+
+Required properties:
+- compatible : "nvidia,tegra-audio-rt5677"
+- clocks : Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entries:
+ - pll_a
+ - pll_a_out0
+ - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
+- nvidia,model : The user-visible name of this sound complex.
+- nvidia,audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the RT5677's pins (as documented in its binding), and the jacks
+ on the board:
+
+ * Headphone
+ * Speaker
+ * Headset Mic
+ * Internal Mic 1
+ * Internal Mic 2
+
+- nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
+ connected to the CODEC.
+- nvidia,audio-codec : The phandle of the RT5677 audio codec. This binding
+ assumes that AIF1 on the CODEC is connected to Tegra.
+
+Optional properties:
+- nvidia,hp-det-gpios : The GPIO that detects headphones are plugged in
+- nvidia,hp-en-gpios : The GPIO that enables headphone amplifier
+- nvidia,mic-present-gpios: The GPIO that mic jack is plugged in
+- nvidia,dmic-clk-en-gpios : The GPIO that gates DMIC clock signal
+
+Example:
+
+sound {
+ compatible = "nvidia,tegra-audio-rt5677-ryu",
+ "nvidia,tegra-audio-rt5677";
+ nvidia,model = "NVIDIA Tegra Ryu";
+
+ nvidia,audio-routing =
+ "Headphone", "LOUT2",
+ "Headphone", "LOUT1",
+ "Headset Mic", "MICBIAS1",
+ "IN1P", "Headset Mic",
+ "IN1N", "Headset Mic",
+ "DMIC L1", "Internal Mic 1",
+ "DMIC R1", "Internal Mic 1",
+ "DMIC L2", "Internal Mic 2",
+ "DMIC R2", "Internal Mic 2",
+ "Speaker", "PDM1L",
+ "Speaker", "PDM1R";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&rt5677>;
+
+ nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(R, 7) GPIO_ACTIVE_HIGH>;
+ nvidia,mic-present-gpios = <&gpio TEGRA_GPIO(O, 5) GPIO_ACTIVE_LOW>;
+ nvidia,hp-en-gpios = <&rt5677 1 GPIO_ACTIVE_HIGH>;
+ nvidia,dmic-clk-en-gpios = <&rt5677 2 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
+ <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-ahub.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra30-ahub.txt
index 946e2ac46091..0e9a1895d7fb 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-ahub.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-ahub.txt
@@ -1,7 +1,10 @@
NVIDIA Tegra30 AHUB (Audio Hub)
Required properties:
-- compatible : "nvidia,tegra30-ahub", "nvidia,tegra114-ahub", etc.
+- compatible : For Tegra30, must contain "nvidia,tegra30-ahub". For Tegra114,
+ must contain "nvidia,tegra114-ahub". For Tegra124, must contain
+ "nvidia,tegra124-ahub". Otherwise, must contain "nvidia,<chip>-ahub",
+ plus at least one of the above, where <chip> is tegra132.
- reg : Should contain the register physical address and length for each of
the AHUB's register blocks.
- Tegra30 requires 2 entries, for the APBIF and AHUB/AUDIO register blocks.
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
index b4730c2822bc..13e2ef496724 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
@@ -1,7 +1,9 @@
NVIDIA Tegra30 HDA controller
Required properties:
-- compatible : "nvidia,tegra30-hda"
+- compatible : For Tegra30, must contain "nvidia,tegra30-hda". Otherwise,
+ must contain '"nvidia,<chip>-hda", "nvidia,tegra30-hda"', where <chip> is
+ tegra114, tegra124, or tegra132.
- reg : Should contain the HDA registers location and length.
- interrupts : The interrupt from the HDA controller.
- clocks : Must contain an entry for each required entry in clock-names.
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt
index 0c113ffe3814..38caa936f6f8 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt
@@ -1,7 +1,10 @@
NVIDIA Tegra30 I2S controller
Required properties:
-- compatible : "nvidia,tegra30-i2s"
+- compatible : For Tegra30, must contain "nvidia,tegra30-i2s". For Tegra124,
+ must contain "nvidia,tegra124-i2s". Otherwise, must contain
+ "nvidia,<chip>-i2s" plus at least one of the above, where <chip> is
+ tegra114 or tegra132.
- reg : Should contain I2S registers location and length
- clocks : Must contain one entry, for the module clock.
See ../clocks/clock-bindings.txt for details.
diff --git a/Documentation/devicetree/bindings/sound/pcm512x.txt b/Documentation/devicetree/bindings/sound/pcm512x.txt
index faff75e64573..3aae3b41bd8e 100644
--- a/Documentation/devicetree/bindings/sound/pcm512x.txt
+++ b/Documentation/devicetree/bindings/sound/pcm512x.txt
@@ -5,7 +5,8 @@ on the board).
Required properties:
- - compatible : One of "ti,pcm5121" or "ti,pcm5122"
+ - compatible : One of "ti,pcm5121", "ti,pcm5122", "ti,pcm5141" or
+ "ti,pcm5142"
- reg : the I2C address of the device for I2C, the chip select
number for SPI.
@@ -16,9 +17,16 @@ Required properties:
Optional properties:
- clocks : A clock specifier for the clock connected as SCLK. If this
- is absent the device will be configured to clock from BCLK.
+ is absent the device will be configured to clock from BCLK. If pll-in
+ and pll-out are specified in addition to a clock, the device is
+ configured to accept clock input on a specified gpio pin.
-Example:
+ - pll-in, pll-out : gpio pins used to connect the pll using <1>
+ through <6>. The device will be configured for clock input on the
+ given pll-in pin and PLL output on the given pll-out pin. An
+ external connection from the pll-out pin to the SCLK pin is assumed.
+
+Examples:
pcm5122: pcm5122@4c {
compatible = "ti,pcm5122";
@@ -28,3 +36,17 @@ Example:
DVDD-supply = <&reg_1v8>;
CPVDD-supply = <&reg_3v3>;
};
+
+
+ pcm5142: pcm5142@4c {
+ compatible = "ti,pcm5142";
+ reg = <0x4c>;
+
+ AVDD-supply = <&reg_3v3_analog>;
+ DVDD-supply = <&reg_1v8>;
+ CPVDD-supply = <&reg_3v3>;
+
+ clocks = <&sck>;
+ pll-in = <3>;
+ pll-out = <6>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.txt b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
index d188296bb6ec..09e0e18591ae 100644
--- a/Documentation/devicetree/bindings/sound/samsung-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
@@ -33,6 +33,25 @@ Required SoC Specific Properties:
"iis" is the i2s bus clock and i2s_opclk0, i2s_opclk1 are sources of the root
clk. i2s0 has internal mux to select the source of root clk and i2s1 and i2s2
doesn't have any such mux.
+- #clock-cells: should be 1, this property must be present if the I2S device
+ is a clock provider in terms of the common clock bindings, described in
+ ../clock/clock-bindings.txt.
+- clock-output-names: from the common clock bindings, names of the CDCLK
+ I2S output clocks, suggested values are "i2s_cdclk0", "i2s_cdclk1",
+ "i2s_cdclk3" for the I2S0, I2S1, I2S2 devices recpectively.
+
+There are following clocks available at the I2S device nodes:
+ CLK_I2S_CDCLK - the CDCLK (CODECLKO) gate clock,
+ CLK_I2S_RCLK_PSR - the RCLK prescaler divider clock (corresponding to the
+ IISPSR register),
+ CLK_I2S_RCLK_SRC - the RCLKSRC mux clock (corresponding to RCLKSRC bit in
+ IISMOD register).
+
+Refer to the SoC datasheet for availability of the above clocks.
+The CLK_I2S_RCLK_PSR and CLK_I2S_RCLK_SRC clocks are usually only available
+in the IIS Multi Audio Interface (I2S0).
+Note: Old DTs may not have the #clock-cells, clock-output-names properties
+and then not use the I2S node as a clock supplier.
Optional SoC Specific Properties:
@@ -41,6 +60,7 @@ Optional SoC Specific Properties:
- pinctrl-0: Should specify pin control groups used for this controller.
- pinctrl-names: Should contain only one value - "default".
+
Example:
i2s0: i2s@03830000 {
@@ -54,6 +74,8 @@ i2s0: i2s@03830000 {
<&clock_audss EXYNOS_I2S_BUS>,
<&clock_audss EXYNOS_SCLK_I2S>;
clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+ #clock-cells;
+ clock-output-names = "i2s_cdclk0";
samsung,idma-addr = <0x03000000>;
pinctrl-names = "default";
pinctrl-0 = <&i2s0_bus>;
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index c3cba600bf11..73bf314f7240 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -75,6 +75,11 @@ Optional CPU/CODEC subnodes properties:
it can be specified via "clocks" if system has
clock node (= common clock), or "system-clock-frequency"
(if system doens't support common clock)
+ If a clock is specified, it is
+ enabled with clk_prepare_enable()
+ in dai startup() and disabled with
+ clk_disable_unprepare() in dai
+ shutdown().
Example 1 - single DAI link:
diff --git a/Documentation/devicetree/bindings/sound/st,sta32x.txt b/Documentation/devicetree/bindings/sound/st,sta32x.txt
new file mode 100644
index 000000000000..255de3ae5b2f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,sta32x.txt
@@ -0,0 +1,92 @@
+STA32X audio CODEC
+
+The driver for this device only supports I2C.
+
+Required properties:
+
+ - compatible: "st,sta32x"
+ - reg: the I2C address of the device for I2C
+ - reset-gpios: a GPIO spec for the reset pin. If specified, it will be
+ deasserted before communication to the codec starts.
+
+ - power-down-gpios: a GPIO spec for the power down pin. If specified,
+ it will be deasserted before communication to the codec
+ starts.
+
+ - Vdda-supply: regulator spec, providing 3.3V
+ - Vdd3-supply: regulator spec, providing 3.3V
+ - Vcc-supply: regulator spec, providing 5V - 26V
+
+Optional properties:
+
+ - st,output-conf: number, Selects the output configuration:
+ 0: 2-channel (full-bridge) power, 2-channel data-out
+ 1: 2 (half-bridge). 1 (full-bridge) on-board power
+ 2: 2 Channel (Full-Bridge) Power, 1 Channel FFX
+ 3: 1 Channel Mono-Parallel
+ If parameter is missing, mode 0 will be enabled.
+ This property has to be specified as '/bits/ 8' value.
+
+ - st,ch1-output-mapping: Channel 1 output mapping
+ - st,ch2-output-mapping: Channel 2 output mapping
+ - st,ch3-output-mapping: Channel 3 output mapping
+ 0: Channel 1
+ 1: Channel 2
+ 2: Channel 3
+ If parameter is missing, channel 1 is chosen.
+ This properties have to be specified as '/bits/ 8' values.
+
+ - st,thermal-warning-recover:
+ If present, thermal warning recovery is enabled.
+
+ - st,thermal-warning-adjustment:
+ If present, thermal warning adjustment is enabled.
+
+ - st,fault-detect-recovery:
+ If present, then fault recovery will be enabled.
+
+ - st,drop-compensation-ns: number
+ Only required for "st,ffx-power-output-mode" ==
+ "variable-drop-compensation".
+ Specifies the drop compensation in nanoseconds.
+ The value must be in the range of 0..300, and only
+ multiples of 20 are allowed. Default is 140ns.
+
+ - st,max-power-use-mpcc:
+ If present, then MPCC bits are used for MPC coefficients,
+ otherwise standard MPC coefficients are used.
+
+ - st,max-power-corr:
+ If present, power bridge correction for THD reduction near maximum
+ power output is enabled.
+
+ - st,am-reduction-mode:
+ If present, FFX mode runs in AM reduction mode, otherwise normal
+ FFX mode is used.
+
+ - st,odd-pwm-speed-mode:
+ If present, PWM speed mode run on odd speed mode (341.3 kHz) on all
+ channels. If not present, normal PWM spped mode (384 kHz) will be used.
+
+ - st,invalid-input-detect-mute:
+ If present, automatic invalid input detect mute is enabled.
+
+Example:
+
+codec: sta32x@38 {
+ compatible = "st,sta32x";
+ reg = <0x1c>;
+ reset-gpios = <&gpio1 19 0>;
+ power-down-gpios = <&gpio1 16 0>;
+ st,output-conf = /bits/ 8 <0x3>; // set output to 2-channel
+ // (full-bridge) power,
+ // 2-channel data-out
+ st,ch1-output-mapping = /bits/ 8 <0>; // set channel 1 output ch 1
+ st,ch2-output-mapping = /bits/ 8 <0>; // set channel 2 output ch 1
+ st,ch3-output-mapping = /bits/ 8 <0>; // set channel 3 output ch 1
+ st,max-power-correction; // enables power bridge
+ // correction for THD reduction
+ // near maximum power output
+ st,invalid-input-detect-mute; // mute if no valid digital
+ // audio signal is provided.
+};
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
index 5e6040c2c2e9..47a213c411ce 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
@@ -9,6 +9,7 @@ Required properties:
"ti,tlv320aic33" - TLV320AIC33
"ti,tlv320aic3007" - TLV320AIC3007
"ti,tlv320aic3106" - TLV320AIC3106
+ "ti,tlv320aic3104" - TLV320AIC3104
- reg - <int> - I2C slave address
@@ -18,6 +19,7 @@ Optional properties:
- gpio-reset - gpio pin number used for codec reset
- ai3x-gpio-func - <array of 2 int> - AIC3X_GPIO1 & AIC3X_GPIO2 Functionality
+ - Not supported on tlv320aic3104
- ai3x-micbias-vg - MicBias Voltage required.
1 - MICBIAS output is powered to 2.0V,
2 - MICBIAS output is powered to 2.5V,
@@ -36,7 +38,13 @@ CODEC output pins:
* HPLCOM
* HPRCOM
-CODEC input pins:
+CODEC input pins for TLV320AIC3104:
+ * MIC2L
+ * MIC2R
+ * LINE1L
+ * LINE1R
+
+CODEC input pins for other compatible codecs:
* MIC3L
* MIC3R
* LINE1L
diff --git a/Documentation/devicetree/bindings/sound/ts3a227e.txt b/Documentation/devicetree/bindings/sound/ts3a227e.txt
index e8bf23eb1803..a836881d9608 100644
--- a/Documentation/devicetree/bindings/sound/ts3a227e.txt
+++ b/Documentation/devicetree/bindings/sound/ts3a227e.txt
@@ -13,6 +13,11 @@ Required properties:
- interrupt-parent: The parent interrupt controller
- interrupts: Interrupt number for /INT pin from the 227e
+Optional properies:
+ - ti,micbias: Intended MICBIAS voltage (datasheet section 9.6.7).
+ Select 0/1/2/3/4/5/6/7 to specify MACBIAS voltage
+ 2.1V/2.2V/2.3V/2.4V/2.5V/2.6V/2.7V/2.8V
+ Default value is "1" (2.2V).
Examples:
diff --git a/Documentation/devicetree/bindings/sound/wm8904.txt b/Documentation/devicetree/bindings/sound/wm8904.txt
index e99f4097c83c..66bf261423b9 100644
--- a/Documentation/devicetree/bindings/sound/wm8904.txt
+++ b/Documentation/devicetree/bindings/sound/wm8904.txt
@@ -3,7 +3,7 @@ WM8904 audio CODEC
This device supports I2C only.
Required properties:
- - compatible: "wlf,wm8904"
+ - compatible: "wlf,wm8904" or "wlf,wm8912"
- reg: the I2C address of the device.
- clock-names: "mclk"
- clocks: reference to
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
index 7ea701e07dc2..b785976fe98a 100644
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
@@ -1,7 +1,9 @@
NVIDIA Tegra114 SPI controller.
Required properties:
-- compatible : should be "nvidia,tegra114-spi".
+- compatible : For Tegra114, must contain "nvidia,tegra114-spi".
+ Otherwise, must contain '"nvidia,<chip>-spi", "nvidia,tegra114-spi"' where
+ <chip> is tegra124, tegra132, or tegra210.
- reg: Should contain SPI registers location and length.
- interrupts: Should contain SPI interrupts.
- clock-names : Must include the following entries:
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index d11c3721e7cd..4c388bb2f0a2 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -30,6 +30,22 @@ Optional properties:
specifiers, one for transmission, and one for
reception.
- dma-names : Must contain a list of two DMA names, "tx" and "rx".
+- renesas,dtdl : delay sync signal (setup) in transmit mode.
+ Must contain one of the following values:
+ 0 (no bit delay)
+ 50 (0.5-clock-cycle delay)
+ 100 (1-clock-cycle delay)
+ 150 (1.5-clock-cycle delay)
+ 200 (2-clock-cycle delay)
+
+- renesas,syncdl : delay sync signal (hold) in transmit mode.
+ Must contain one of the following values:
+ 0 (no bit delay)
+ 50 (0.5-clock-cycle delay)
+ 100 (1-clock-cycle delay)
+ 150 (1.5-clock-cycle delay)
+ 200 (2-clock-cycle delay)
+ 300 (3-clock-cycle delay)
Optional properties, deprecated for soctype-specific bindings:
- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt
new file mode 100644
index 000000000000..4c7adb8f777c
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sirf.txt
@@ -0,0 +1,41 @@
+* CSR SiRFprimaII Serial Peripheral Interface
+
+Required properties:
+- compatible : Should be "sirf,prima2-spi"
+- reg : Offset and length of the register set for the device
+- interrupts : Should contain SPI interrupt
+- resets: phandle to the reset controller asserting this device in
+ reset
+ See ../reset/reset.txt for details.
+- dmas : Must contain an entry for each entry in clock-names.
+ See ../dma/dma.txt for details.
+- dma-names : Must include the following entries:
+ - rx
+ - tx
+- clocks : Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+
+- #address-cells: Number of cells required to define a chip select
+ address on the SPI bus. Should be set to 1.
+- #size-cells: Should be zero.
+
+Optional properties:
+- spi-max-frequency: Specifies maximum SPI clock frequency,
+ Units - Hz. Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+- cs-gpios: should specify GPIOs used for chipselects.
+
+Example:
+
+spi0: spi@b00d0000 {
+ compatible = "sirf,prima2-spi";
+ reg = <0xb00d0000 0x10000>;
+ interrupts = <15>;
+ dmas = <&dmac1 9>,
+ <&dmac1 4>;
+ dma-names = "rx", "tx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clks 19>;
+ resets = <&rstc 26>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
new file mode 100644
index 000000000000..fe54959ec957
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
@@ -0,0 +1,40 @@
+STMicroelectronics SSC (SPI) Controller
+---------------------------------------
+
+Required properties:
+- compatible : "st,comms-ssc4-spi"
+- reg : Offset and length of the device's register set
+- interrupts : The interrupt specifier
+- clock-names : Must contain "ssc"
+- clocks : Must contain an entry for each name in clock-names
+ See ../clk/*
+- pinctrl-names : Uses "default", can use "sleep" if provided
+ See ../pinctrl/pinctrl-binding.txt
+
+Optional properties:
+- cs-gpios : List of GPIO chip selects
+ See ../spi/spi-bus.txt
+
+Child nodes represent devices on the SPI bus
+ See ../spi/spi-bus.txt
+
+Example:
+ spi@9840000 {
+ compatible = "st,comms-ssc4-spi";
+ reg = <0x9840000 0x110>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_s_c0_flexgen CLK_EXT2F_A9>;
+ clock-names = "ssc";
+ pinctrl-0 = <&pinctrl_spi0_default>;
+ pinctrl-names = "default";
+ cs-gpios = <&pio17 5 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ st95hf@0{
+ compatible = "st,st95hf";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt b/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt
index ee05dc390694..307537787574 100644
--- a/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt
+++ b/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt
@@ -12,9 +12,9 @@ Optional properties:
property is not present, then the touchscreen is
disabled. 5 wires is valid for i.MX28 SoC only.
- fsl,ave-ctrl: number of samples per direction to calculate an average value.
- Allowed value is 1 ... 31, default is 4
+ Allowed value is 1 ... 32, default is 4
- fsl,ave-delay: delay between consecutive samples. Allowed value is
- 1 ... 2047. It is used if 'fsl,ave-ctrl' > 1, counts at
+ 2 ... 2048. It is used if 'fsl,ave-ctrl' > 1, counts at
2 kHz and its default is 2 (= 1 ms)
- fsl,settling: delay between plate switch to next sample. Allowed value is
1 ... 2047. It counts at 2 kHz and its default is
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index b7ba01ad1426..56742bc70218 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -15,6 +15,29 @@ I. For patch submitters
3) The Documentation/ portion of the patch should come in the series before
the code implementing the binding.
+ 4) Any compatible strings used in a chip or board DTS file must be
+ previously documented in the corresponding DT binding text file
+ in Documentation/devicetree/bindings. This rule applies even if
+ the Linux device driver does not yet match on the compatible
+ string. [ checkpatch will emit warnings if this step is not
+ followed as of commit bff5da4335256513497cc8c79f9a9d1665e09864
+ ("checkpatch: add DT compatible string documentation checks"). ]
+
+ 5) The wildcard "<chip>" may be used in compatible strings, as in
+ the following example:
+
+ - compatible: Must contain '"nvidia,<chip>-pcie",
+ "nvidia,tegra20-pcie"' where <chip> is tegra30, tegra132, ...
+
+ As in the above example, the known values of "<chip>" should be
+ documented if it is used.
+
+ 6) If a documented compatible string is not yet matched by the
+ driver, the documentation should also include a compatible
+ string that is matched by the driver (as in the "nvidia,tegra20-pcie"
+ example above).
+
+
II. For kernel maintainers
1) If you aren't comfortable reviewing a given binding, reply to it and ask
diff --git a/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt b/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
index ecf3ed76cd46..6b68cd150405 100644
--- a/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
+++ b/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
@@ -7,7 +7,9 @@ notifications. It is also used to manage emergency shutdown in an
overheating situation.
Required properties :
-- compatible : "nvidia,tegra124-soctherm".
+- compatible : For Tegra124, must contain "nvidia,tegra124-soctherm".
+ For Tegra132, must contain "nvidia,tegra132-soctherm".
+ For Tegra210, must contain "nvidia,tegra210-soctherm".
- reg : Should contain 1 entry:
- SOCTHERM register set
- interrupts : Defines the interrupt used by SOCTHERM
diff --git a/Documentation/devicetree/bindings/timer/digicolor-timer.txt b/Documentation/devicetree/bindings/timer/digicolor-timer.txt
new file mode 100644
index 000000000000..d1b659bbc29f
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/digicolor-timer.txt
@@ -0,0 +1,18 @@
+Conexant Digicolor SoCs Timer Controller
+
+Required properties:
+
+- compatible : should be "cnxt,cx92755-timer"
+- reg : Specifies base physical address and size of the "Agent Communication"
+ timer registers
+- interrupts : Contains 8 interrupts, one for each timer
+- clocks: phandle to the main clock
+
+Example:
+
+ timer@f0000fc0 {
+ compatible = "cnxt,cx92755-timer";
+ reg = <0xf0000fc0 0x40>;
+ interrupts = <19>, <31>, <34>, <35>, <52>, <53>, <54>, <55>;
+ clocks = <&main_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt b/Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt
index b5082a1cf461..1761f53ee36f 100644
--- a/Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt
+++ b/Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt
@@ -6,7 +6,9 @@ trigger a legacy watchdog reset.
Required properties:
-- compatible : should be "nvidia,tegra30-timer", "nvidia,tegra20-timer".
+- compatible : For Tegra30, must contain "nvidia,tegra30-timer". Otherwise,
+ must contain '"nvidia,<chip>-timer", "nvidia,tegra30-timer"' where
+ <chip> is tegra124 or tegra132.
- reg : Specifies base physical address and size of the registers.
- interrupts : A list of 6 interrupts; one per each of timer channels 1
through 5, and one for the shared interrupt for the remaining channels.
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt b/Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt
new file mode 100644
index 000000000000..87f0b0042bae
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt
@@ -0,0 +1,18 @@
+Rockchip rk3288 timer
+
+Required properties:
+- compatible: shall be "rockchip,rk3288-timer"
+- reg: base address of the timer register starting with TIMERS CONTROL register
+- interrupts: should contain the interrupts for Timer0
+- clocks : must contain an entry for each entry in clock-names
+- clock-names : must include the following entries:
+ "timer", "pclk"
+
+Example:
+ timer: timer@ff810000 {
+ compatible = "rockchip,rk3288-timer";
+ reg = <0xff810000 0x20>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&xin24m>, <&cru PCLK_TIMER>;
+ clock-names = "timer", "pclk";
+ };
diff --git a/Documentation/devicetree/bindings/unittest.txt b/Documentation/devicetree/bindings/unittest.txt
index 0f92a22fddfa..8933211f32f9 100644
--- a/Documentation/devicetree/bindings/unittest.txt
+++ b/Documentation/devicetree/bindings/unittest.txt
@@ -1,4 +1,4 @@
-* OF selftest platform device
+1) OF selftest platform device
** selftest
@@ -12,3 +12,60 @@ Example:
compatible = "selftest";
status = "okay";
};
+
+2) OF selftest i2c adapter platform device
+
+** platform device unittest adapter
+
+Required properties:
+- compatible: must be selftest-i2c-bus
+
+Children nodes contain selftest i2c devices.
+
+Example:
+ selftest-i2c-bus {
+ compatible = "selftest-i2c-bus";
+ status = "okay";
+ };
+
+3) OF selftest i2c device
+
+** I2C selftest device
+
+Required properties:
+- compatible: must be selftest-i2c-dev
+
+All other properties are optional
+
+Example:
+ selftest-i2c-dev {
+ compatible = "selftest-i2c-dev";
+ status = "okay";
+ };
+
+4) OF selftest i2c mux device
+
+** I2C selftest mux
+
+Required properties:
+- compatible: must be selftest-i2c-mux
+
+Children nodes contain selftest i2c bus nodes per channel.
+
+Example:
+ selftest-i2c-mux {
+ compatible = "selftest-i2c-mux";
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel-0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-dev {
+ reg = <8>;
+ compatible = "selftest-i2c-dev";
+ status = "okay";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt
index bc2222ca3f2a..38fee0f66c12 100644
--- a/Documentation/devicetree/bindings/usb/atmel-usb.txt
+++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt
@@ -51,7 +51,10 @@ usb1: gadget@fffa4000 {
Atmel High-Speed USB device controller
Required properties:
- - compatible: Should be "atmel,at91sam9rl-udc"
+ - compatible: Should be one of the following
+ "at91sam9rl-udc"
+ "at91sam9g45-udc"
+ "sama5d3-udc"
- reg: Address and length of the register set for the device
- interrupts: Should contain usba interrupt
- ep childnode: To specify the number of endpoints and their properties.
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 482f815363ef..fd132cbee70e 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -20,6 +20,10 @@ Optional properties:
Refer to phy/phy-bindings.txt for generic phy consumer properties
- dr_mode: shall be one of "host", "peripheral" and "otg"
Refer to usb/generic.txt
+- g-use-dma: enable dma usage in gadget driver.
+- g-rx-fifo-size: size of rx fifo size in gadget mode.
+- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
+- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
Example:
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt b/Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt
index 3dc9140e3dfb..f60785f73d3d 100644
--- a/Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt
@@ -6,7 +6,10 @@ Practice : Universal Serial Bus" with the following modifications
and additions :
Required properties :
- - compatible : Should be "nvidia,tegra20-ehci".
+ - compatible : For Tegra20, must contain "nvidia,tegra20-ehci".
+ For Tegra30, must contain "nvidia,tegra30-ehci". Otherwise, must contain
+ "nvidia,<chip>-ehci" plus at least one of the above, where <chip> is
+ tegra114, tegra124, tegra132, or tegra210.
- nvidia,phy : phandle of the PHY that the controller is connected to.
- clocks : Must contain one entry, for the module clock.
See ../clocks/clock-bindings.txt for details.
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt b/Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt
index c9205fbf26e2..a9aa79fb90ed 100644
--- a/Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt
@@ -3,7 +3,10 @@ Tegra SOC USB PHY
The device node for Tegra SOC USB PHY:
Required properties :
- - compatible : Should be "nvidia,tegra<chip>-usb-phy".
+ - compatible : For Tegra20, must contain "nvidia,tegra20-usb-phy".
+ For Tegra30, must contain "nvidia,tegra30-usb-phy". Otherwise, must contain
+ "nvidia,<chip>-usb-phy" plus at least one of the above, where <chip> is
+ tegra114, tegra124, tegra132, or tegra210.
- reg : Defines the following set of registers, in the order listed:
- The PHY's own register set.
Always present.
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index b08c903f8668..61b045b6d50e 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -14,6 +14,8 @@ Optional properties:
function should be enabled
- phys: phandle + phy specifier pair
- phy-names: must be "usb"
+ - dmas: Must contain a list of references to DMA specifiers.
+ - dma-names : Must contain a list of DMA names, "tx" or "rx".
Example:
usbhs: usb@e6590000 {
diff --git a/Documentation/devicetree/bindings/usb/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt
index 43c1a4e06767..0b04fdff9d5a 100644
--- a/Documentation/devicetree/bindings/usb/usb-ehci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ehci.txt
@@ -12,6 +12,7 @@ Optional properties:
- big-endian-regs : boolean, set this for hcds with big-endian registers
- big-endian-desc : boolean, set this for hcds with big-endian descriptors
- big-endian : boolean, for hcds with big-endian-regs + big-endian-desc
+ - needs-reset-on-resume : boolean, set this to force EHCI reset after resume
- clocks : a list of phandle + clock specifier pairs
- phys : phandle + phy specifier pair
- phy-names : "usb"
diff --git a/Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt b/Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt
index 1bd37faba05b..5be01c859b7a 100644
--- a/Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt
+++ b/Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt
@@ -13,10 +13,15 @@ Optional properties:
- clock-frequency: the clock frequency (in Hz) that the PHY clock must
be configured to.
-- vcc-supply: phandle to the regulator that provides RESET to the PHY.
+- vcc-supply: phandle to the regulator that provides power to the PHY.
- reset-gpios: Should specify the GPIO for reset.
+- vbus-detect-gpio: should specify the GPIO detecting a VBus insertion
+ (see Documentation/devicetree/bindings/gpio/gpio.txt)
+- vbus-regulator : should specifiy the regulator supplying current drawn from
+ the VBus line (see Documentation/devicetree/bindings/regulator/regulator.txt).
+
Example:
hsusb1_phy {
@@ -26,8 +31,11 @@ Example:
clock-names = "main_clk";
vcc-supply = <&hsusb1_vcc_regulator>;
reset-gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ vbus-detect-gpio = <&gpio2 13 GPIO_ACTIVE_HIGH>;
+ vbus-regulator = <&vbus_regulator>;
};
hsusb1_phy is a NOP USB PHY device that gets its clock from an oscillator
and expects that clock to be configured to 19.2MHz by the NOP PHY driver.
hsusb1_vcc_regulator provides power to the PHY and GPIO 7 controls RESET.
+GPIO 13 detects VBus insertion, and accordingly notifies the vbus-regulator.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index b1df0ad1306c..0229b71dc74b 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -4,12 +4,13 @@ This isn't an exhaustive list, but you should add new prefixes to it before
using them to avoid name-space collisions.
abilis Abilis Systems
+abcn Abracon Corporation
active-semi Active-Semi International Inc
ad Avionic Design GmbH
adapteva Adapteva, Inc.
+adh AD Holdings Plc.
adi Analog Devices, Inc.
aeroflexgaisler Aeroflex Gaisler AB
-ak Asahi Kasei Corp.
allwinner Allwinner Technology Co., Ltd.
altr Altera Corp.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
@@ -20,9 +21,11 @@ amstaos AMS-Taos Inc.
apm Applied Micro Circuits Corporation (APM)
arm ARM Ltd.
armadeus ARMadeus Systems SARL
+asahi-kasei Asahi Kasei Corp.
atmel Atmel Corporation
auo AU Optronics Corporation
avago Avago Technologies
+avic Shanghai AVIC Optoelectronics Co., Ltd.
bosch Bosch Sensortec GmbH
brcm Broadcom Corporation
buffalo Buffalo, Inc.
@@ -35,7 +38,9 @@ chrp Common Hardware Reference Platform
chunghwa Chunghwa Picture Tubes Ltd.
cirrus Cirrus Logic, Inc.
cnm Chips&Media, Inc.
+cnxt Conexant Systems, Inc.
cortina Cortina Systems, Inc.
+cosmic Cosmic Circuits
crystalfontz Crystalfontz America, Inc.
dallas Maxim Integrated Products (formerly Dallas Semiconductor)
davicom DAVICOM Semiconductor, Inc.
@@ -54,14 +59,17 @@ epcos EPCOS AG
epfl Ecole Polytechnique Fédérale de Lausanne
epson Seiko Epson Corp.
est ESTeem Wireless Modems
+ettus NI Ettus Research
eukrea Eukréa Electromatique
everest Everest Semiconductor Co. Ltd.
+everspin Everspin Technologies, Inc.
excito Excito
fcs Fairchild Semiconductor
fsl Freescale Semiconductor
GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
geniatech Geniatech, Inc.
+giantplus Giantplus Technology Co., Ltd.
globalscale Globalscale Technologies, Inc.
gmt Global Mixed-mode Technology, Inc.
google Google, Inc.
@@ -69,6 +77,7 @@ gumstix Gumstix, Inc.
gw Gateworks Corporation
hannstar HannStar Display Corporation
haoyu Haoyu Microelectronic Co. Ltd.
+himax Himax Technologies, Inc.
hisilicon Hisilicon Limited.
hit Hitachi Ltd.
honeywell Honeywell
@@ -82,8 +91,7 @@ innolux Innolux Corporation
intel Intel Corporation
intercontrol Inter Control Group
isee ISEE 2007 S.L.
-isil Intersil (deprecated, use isl)
-isl Intersil
+isil Intersil
karo Ka-Ro electronics GmbH
keymile Keymile GmbH
lacie LaCie
@@ -118,7 +126,9 @@ nvidia NVIDIA
nxp NXP Semiconductors
onnn ON Semiconductor Corp.
opencores OpenCores.org
+ovti OmniVision Technologies
panasonic Panasonic Corporation
+parade Parade Technologies Inc.
pericom Pericom Technology Inc.
phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
@@ -127,6 +137,7 @@ pixcir PIXCIR MICROELECTRONICS Co., Ltd
powervr PowerVR (deprecated, use img)
qca Qualcomm Atheros, Inc.
qcom Qualcomm Technologies, Inc
+qemu QEMU, a generic and open source machine emulator and virtualizer
qnap QNAP Systems, Inc.
radxa Radxa
raidsonic RaidSonic Technology GmbH
@@ -141,8 +152,10 @@ sandisk Sandisk Corporation
sbs Smart Battery System
schindler Schindler
seagate Seagate Technology PLC
+semtech Semtech Corporation
sil Silicon Image
silabs Silicon Laboratories
+siliconmitus Silicon Mitus, Inc.
simtek
sii Seiko Instruments, Inc.
silergy Silergy Corp.
@@ -153,6 +166,7 @@ snps Synopsys, Inc.
solidrun SolidRun
sony Sony Corporation
spansion Spansion Inc.
+sprd Spreadtrum Communications Inc.
st STMicroelectronics
ste ST-Ericsson
stericsson ST-Ericsson
@@ -164,10 +178,12 @@ tlm Trusted Logic Mobility
toradex Toradex AG
toshiba Toshiba Corporation
toumaz Toumaz
+truly Truly Semiconductors Limited
usi Universal Scientific Industrial Co., Ltd.
v3 V3 Semiconductor
variscite Variscite Ltd.
via VIA Technologies, Inc.
+virtio Virtual I/O Device Specification, developed by the OASIS consortium
voipac Voipac Technologies s.r.o.
winbond Winbond Electronics corp.
wlf Wolfson Microelectronics
diff --git a/Documentation/devicetree/bindings/video/bridge/ps8622.txt b/Documentation/devicetree/bindings/video/bridge/ps8622.txt
new file mode 100644
index 000000000000..c989c3807f2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/bridge/ps8622.txt
@@ -0,0 +1,31 @@
+ps8622-bridge bindings
+
+Required properties:
+ - compatible: "parade,ps8622" or "parade,ps8625"
+ - reg: first i2c address of the bridge
+ - sleep-gpios: OF device-tree gpio specification for PD_ pin.
+ - reset-gpios: OF device-tree gpio specification for RST_ pin.
+
+Optional properties:
+ - lane-count: number of DP lanes to use
+ - use-external-pwm: backlight will be controlled by an external PWM
+ - video interfaces: Device node can contain video interface port
+ nodes for panel according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+ lvds-bridge@48 {
+ compatible = "parade,ps8622";
+ reg = <0x48>;
+ sleep-gpios = <&gpc3 6 1 0 0>;
+ reset-gpios = <&gpc3 1 1 0 0>;
+ lane-count = <1>;
+ ports {
+ port@0 {
+ bridge_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/bridge/ptn3460.txt b/Documentation/devicetree/bindings/video/bridge/ptn3460.txt
index 52b93b2c6748..361971ba104d 100644
--- a/Documentation/devicetree/bindings/drm/bridge/ptn3460.txt
+++ b/Documentation/devicetree/bindings/video/bridge/ptn3460.txt
@@ -3,8 +3,8 @@ ptn3460 bridge bindings
Required properties:
- compatible: "nxp,ptn3460"
- reg: i2c address of the bridge
- - powerdown-gpio: OF device-tree gpio specification
- - reset-gpio: OF device-tree gpio specification
+ - powerdown-gpio: OF device-tree gpio specification for PD_N pin.
+ - reset-gpio: OF device-tree gpio specification for RST_N pin.
- edid-emulation: The EDID emulation entry to use
+-------+------------+------------------+
| Value | Resolution | Description |
@@ -17,6 +17,11 @@ Required properties:
| 6 | 1600x900 | ChiMei M215HGE |
+-------+------------+------------------+
+ - video interfaces: Device node can contain video interface port
+ nodes for panel according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
Example:
lvds-bridge@20 {
compatible = "nxp,ptn3460";
@@ -24,4 +29,11 @@ Example:
powerdown-gpio = <&gpy2 5 1 0 0>;
reset-gpio = <&gpx1 5 1 0 0>;
edid-emulation = <5>;
+ ports {
+ port@0 {
+ bridge_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/video/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/video/dw_hdmi-rockchip.txt
new file mode 100644
index 000000000000..668091f27674
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/dw_hdmi-rockchip.txt
@@ -0,0 +1,46 @@
+Rockchip specific extensions to the Synopsys Designware HDMI
+================================
+
+Required properties:
+- compatible: "rockchip,rk3288-dw-hdmi";
+- reg: Physical base address and length of the controller's registers.
+- clocks: phandle to hdmi iahb and isfr clocks.
+- clock-names: should be "iahb" "isfr"
+- rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
+- interrupts: HDMI interrupt number
+- ports: contain a port node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt. For
+ vopb,set the reg = <0> and set the reg = <1> for vopl.
+- reg-io-width: the width of the reg:1,4, the value should be 4 on
+ rk3288 platform
+
+Optional properties
+- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
+
+Example:
+hdmi: hdmi@ff980000 {
+ compatible = "rockchip,rk3288-dw-hdmi";
+ reg = <0xff980000 0x20000>;
+ reg-io-width = <4>;
+ ddc-i2c-bus = <&i2c5>;
+ rockchip,grf = <&grf>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>;
+ clock-names = "iahb", "isfr";
+ status = "disabled";
+ ports {
+ hdmi_in: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ hdmi_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_hdmi>;
+ };
+ hdmi_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_hdmi>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/video/exynos7-decon.txt b/Documentation/devicetree/bindings/video/exynos7-decon.txt
new file mode 100644
index 000000000000..f5f9c8d4a55a
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/exynos7-decon.txt
@@ -0,0 +1,68 @@
+Device-Tree bindings for Samsung Exynos7 SoC display controller (DECON)
+
+DECON (Display and Enhancement Controller) is the Display Controller for the
+Exynos7 series of SoCs which transfers the image data from a video memory
+buffer to an external LCD interface.
+
+Required properties:
+- compatible: value should be "samsung,exynos7-decon";
+
+- reg: physical base address and length of the DECON registers set.
+
+- interrupt-parent: should be the phandle of the decon controller's
+ parent interrupt controller.
+
+- interrupts: should contain a list of all DECON IP block interrupts in the
+ order: FIFO Level, VSYNC, LCD_SYSTEM. The interrupt specifier
+ format depends on the interrupt controller used.
+
+- interrupt-names: should contain the interrupt names: "fifo", "vsync",
+ "lcd_sys", in the same order as they were listed in the interrupts
+ property.
+
+- pinctrl-0: pin control group to be used for this controller.
+
+- pinctrl-names: must contain a "default" entry.
+
+- clocks: must include clock specifiers corresponding to entries in the
+ clock-names property.
+
+- clock-names: list of clock names sorted in the same order as the clocks
+ property. Must contain "pclk_decon0", "aclk_decon0",
+ "decon0_eclk", "decon0_vclk".
+- i80-if-timings: timing configuration for lcd i80 interface support.
+
+Optional Properties:
+- samsung,power-domain: a phandle to DECON power domain node.
+- display-timings: timing settings for DECON, as described in document [1].
+ Can be used in case timings cannot be provided otherwise
+ or to override timings provided by the panel.
+
+[1]: Documentation/devicetree/bindings/video/display-timing.txt
+
+Example:
+
+SoC specific DT entry:
+
+ decon@13930000 {
+ compatible = "samsung,exynos7-decon";
+ interrupt-parent = <&combiner>;
+ reg = <0x13930000 0x1000>;
+ interrupt-names = "lcd_sys", "vsync", "fifo";
+ interrupts = <0 188 0>, <0 189 0>, <0 190 0>;
+ clocks = <&clock_disp PCLK_DECON_INT>,
+ <&clock_disp ACLK_DECON_INT>,
+ <&clock_disp SCLK_DECON_INT_ECLK>,
+ <&clock_disp SCLK_DECON_INT_EXTCLKPLL>;
+ clock-names = "pclk_decon0", "aclk_decon0", "decon0_eclk",
+ "decon0_vclk";
+ status = "disabled";
+ };
+
+Board specific DT entry:
+
+ decon@13930000 {
+ pinctrl-0 = <&lcd_clk &pwm1_out>;
+ pinctrl-names = "default";
+ status = "okay";
+ };
diff --git a/Documentation/devicetree/bindings/video/exynos_dp.txt b/Documentation/devicetree/bindings/video/exynos_dp.txt
index 53dbccfa80ca..7a3a9cdb86ab 100644
--- a/Documentation/devicetree/bindings/video/exynos_dp.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dp.txt
@@ -66,6 +66,10 @@ Optional properties for dp-controller:
Hotplug detect GPIO.
Indicates which GPIO should be used for hotplug
detection
+ -video interfaces: Device node can contain video interface port
+ nodes according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
@@ -105,4 +109,12 @@ Board Specific portion:
vsync-len = <6>;
};
};
+
+ ports {
+ port@0 {
+ dp_out: endpoint {
+ remote-endpoint = <&bridge_in>;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/video/exynos_mixer.txt b/Documentation/devicetree/bindings/video/exynos_mixer.txt
index 08b394b1edbf..3e38128f866b 100644
--- a/Documentation/devicetree/bindings/video/exynos_mixer.txt
+++ b/Documentation/devicetree/bindings/video/exynos_mixer.txt
@@ -15,6 +15,7 @@ Required properties:
a) mixer: Gate of Mixer IP bus clock.
b) sclk_hdmi: HDMI Special clock, one of the two possible inputs of
mixer mux.
+ c) hdmi: Gate of HDMI IP bus clock, needed together with sclk_hdmi.
Example:
diff --git a/Documentation/devicetree/bindings/video/renesas,du.txt b/Documentation/devicetree/bindings/video/renesas,du.txt
index 5102830f2760..c902323928f7 100644
--- a/Documentation/devicetree/bindings/video/renesas,du.txt
+++ b/Documentation/devicetree/bindings/video/renesas,du.txt
@@ -26,6 +26,10 @@ Required Properties:
per LVDS encoder. The functional clocks must be named "du.x" with "x"
being the channel numerical index. The LVDS clocks must be named
"lvds.x" with "x" being the LVDS encoder numerical index.
+ - In addition to the functional and encoder clocks, all DU versions also
+ support externally supplied pixel clocks. Those clocks are optional.
+ When supplied they must be named "dclkin.x" with "x" being the input
+ clock numerical index.
Required nodes:
diff --git a/Documentation/devicetree/bindings/video/ti,dra7-dss.txt b/Documentation/devicetree/bindings/video/ti,dra7-dss.txt
new file mode 100644
index 000000000000..f33a05137b0e
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/ti,dra7-dss.txt
@@ -0,0 +1,69 @@
+Texas Instruments DRA7x Display Subsystem
+=========================================
+
+See Documentation/devicetree/bindings/video/ti,omap-dss.txt for generic
+description about OMAP Display Subsystem bindings.
+
+DSS Core
+--------
+
+Required properties:
+- compatible: "ti,dra7-dss"
+- reg: address and length of the register spaces for 'dss'
+- ti,hwmods: "dss_core"
+- clocks: handle to fclk
+- clock-names: "fck"
+- syscon: phandle to control module core syscon node
+
+Optional properties:
+
+Some DRA7xx SoCs have one dedicated video PLL, some have two. These properties
+can be used to describe the video PLLs:
+
+- reg: address and length of the register spaces for 'pll1_clkctrl',
+ 'pll1', 'pll2_clkctrl', 'pll2'
+- clocks: handle to video1 pll clock and video2 pll clock
+- clock-names: "video1_clk" and "video2_clk"
+
+Required nodes:
+- DISPC
+
+Optional nodes:
+- DSS Submodules: HDMI
+- Video port for DPI output
+
+DPI Endpoint required properties:
+- data-lines: number of lines used
+
+
+DISPC
+-----
+
+Required properties:
+- compatible: "ti,dra7-dispc"
+- reg: address and length of the register space
+- ti,hwmods: "dss_dispc"
+- interrupts: the DISPC interrupt
+- clocks: handle to fclk
+- clock-names: "fck"
+
+HDMI
+----
+
+Required properties:
+- compatible: "ti,dra7-hdmi"
+- reg: addresses and lengths of the register spaces for 'wp', 'pll', 'phy',
+ 'core'
+- reg-names: "wp", "pll", "phy", "core"
+- interrupts: the HDMI interrupt line
+- ti,hwmods: "dss_hdmi"
+- vdda-supply: vdda power supply
+- clocks: handles to fclk and pll clock
+- clock-names: "fck", "sys_clk"
+
+Optional nodes:
+- Video port for HDMI output
+
+HDMI Endpoint optional properties:
+- lanes: list of 8 pin numbers for the HDMI lanes: CLK+, CLK-, D0+, D0-,
+ D1+, D1-, D2+, D2-. (default: 0,1,2,3,4,5,6,7)
diff --git a/Documentation/devicetree/bindings/video/ti,opa362.txt b/Documentation/devicetree/bindings/video/ti,opa362.txt
new file mode 100644
index 000000000000..f96083c0bd17
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/ti,opa362.txt
@@ -0,0 +1,38 @@
+OPA362 analog video amplifier
+
+Required properties:
+- compatible: "ti,opa362"
+- enable-gpios: enable/disable output gpio
+
+Required node:
+- Video port 0 for opa362 input
+- Video port 1 for opa362 output
+
+Example:
+
+tv_amp: opa362 {
+ compatible = "ti,opa362";
+ enable-gpios = <&gpio1 23 0>; /* GPIO to enable video out amplifier */
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ opa_in: endpoint@0 {
+ remote-endpoint = <&venc_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ opa_out: endpoint@0 {
+ remote-endpoint = <&tv_connector_in>;
+ };
+ };
+ };
+};
+
+
+
diff --git a/Documentation/devicetree/overlay-notes.txt b/Documentation/devicetree/overlay-notes.txt
index 30ae758e3eef..d418a6ce9812 100644
--- a/Documentation/devicetree/overlay-notes.txt
+++ b/Documentation/devicetree/overlay-notes.txt
@@ -10,7 +10,7 @@ How overlays work
-----------------
A Device Tree's overlay purpose is to modify the kernel's live tree, and
-have the modification affecting the state of the the kernel in a way that
+have the modification affecting the state of the kernel in a way that
is reflecting the changes.
Since the kernel mainly deals with devices, any new device node that result
in an active device should have it created while if the device node is either
@@ -80,7 +80,7 @@ result in foo+bar.dts
};
---- foo+bar.dts -------------------------------------------------------------
-As a result of the the overlay, a new device node (bar) has been created
+As a result of the overlay, a new device node (bar) has been created
so a bar platform device will be registered and if a matching device driver
is loaded the device will be created as expected.
diff --git a/Documentation/dmaengine/00-INDEX b/Documentation/dmaengine/00-INDEX
new file mode 100644
index 000000000000..07de6573d22b
--- /dev/null
+++ b/Documentation/dmaengine/00-INDEX
@@ -0,0 +1,8 @@
+00-INDEX
+ - this file.
+client.txt
+ -the DMA Engine API Guide.
+dmatest.txt
+ - how to compile, configure and use the dmatest system.
+provider.txt
+ - the DMA controller API. \ No newline at end of file
diff --git a/Documentation/driver-model/bus.txt b/Documentation/driver-model/bus.txt
index 6754b2df8aa1..b577a45b93ea 100644
--- a/Documentation/driver-model/bus.txt
+++ b/Documentation/driver-model/bus.txt
@@ -45,7 +45,7 @@ them are inherently bus-specific. Drivers typically declare an array
of device IDs of devices they support that reside in a bus-specific
driver structure.
-The purpose of the match callback is provide the bus an opportunity to
+The purpose of the match callback is to give the bus an opportunity to
determine if a particular driver supports a particular device by
comparing the device IDs the driver supports with the device ID of a
particular device, without sacrificing bus-specific functionality or
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index b5ab416cd53a..6d1e8eeb5990 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -258,6 +258,8 @@ IIO
devm_iio_device_free()
devm_iio_device_register()
devm_iio_device_unregister()
+ devm_iio_kfifo_allocate()
+ devm_iio_kfifo_free()
devm_iio_trigger_alloc()
devm_iio_trigger_free()
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index ac28149aede4..9922939e7d99 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -34,6 +34,9 @@ configfs/
- directory containing configfs documentation and example code.
cramfs.txt
- info on the cram filesystem for small storage (ROMs etc).
+dax.txt
+ - info on avoiding the page cache for files stored on CPU-addressable
+ storage devices.
debugfs.txt
- info on the debugfs filesystem.
devpts.txt
@@ -154,5 +157,3 @@ xfs-self-describing-metadata.txt
- info on XFS Self Describing Metadata.
xfs.txt
- info and mount options for the XFS filesystem.
-xip.txt
- - info on execute-in-place for file mappings.
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index b30753cbf431..2ca3d17eee56 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -199,8 +199,6 @@ prototypes:
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
- int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
- unsigned long *);
int (*migratepage)(struct address_space *, struct page *, struct page *);
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
@@ -225,7 +223,6 @@ invalidatepage: yes
releasepage: yes
freepage: yes
direct_IO:
-get_xip_mem: maybe
migratepage: yes (both)
launder_page: yes
is_partially_uptodate: yes
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
new file mode 100644
index 000000000000..baf41118660d
--- /dev/null
+++ b/Documentation/filesystems/dax.txt
@@ -0,0 +1,94 @@
+Direct Access for files
+-----------------------
+
+Motivation
+----------
+
+The page cache is usually used to buffer reads and writes to files.
+It is also used to provide the pages which are mapped into userspace
+by a call to mmap.
+
+For block devices that are memory-like, the page cache pages would be
+unnecessary copies of the original storage. The DAX code removes the
+extra copy by performing reads and writes directly to the storage device.
+For file mappings, the storage device is mapped directly into userspace.
+
+
+Usage
+-----
+
+If you have a block device which supports DAX, you can make a filesystem
+on it as usual. When mounting it, use the -o dax option manually
+or add 'dax' to the options in /etc/fstab.
+
+
+Implementation Tips for Block Driver Writers
+--------------------------------------------
+
+To support DAX in your block driver, implement the 'direct_access'
+block device operation. It is used to translate the sector number
+(expressed in units of 512-byte sectors) to a page frame number (pfn)
+that identifies the physical page for the memory. It also returns a
+kernel virtual address that can be used to access the memory.
+
+The direct_access method takes a 'size' parameter that indicates the
+number of bytes being requested. The function should return the number
+of bytes that can be contiguously accessed at that offset. It may also
+return a negative errno if an error occurs.
+
+In order to support this method, the storage must be byte-accessible by
+the CPU at all times. If your device uses paging techniques to expose
+a large amount of memory through a smaller window, then you cannot
+implement direct_access. Equally, if your device can occasionally
+stall the CPU for an extended period, you should also not attempt to
+implement direct_access.
+
+These block devices may be used for inspiration:
+- axonram: Axon DDR2 device driver
+- brd: RAM backed block device driver
+- dcssblk: s390 dcss block device driver
+
+
+Implementation Tips for Filesystem Writers
+------------------------------------------
+
+Filesystem support consists of
+- adding support to mark inodes as being DAX by setting the S_DAX flag in
+ i_flags
+- implementing the direct_IO address space operation, and calling
+ dax_do_io() instead of blockdev_direct_IO() if S_DAX is set
+- implementing an mmap file operation for DAX files which sets the
+ VM_MIXEDMAP flag on the VMA, and setting the vm_ops to include handlers
+ for fault and page_mkwrite (which should probably call dax_fault() and
+ dax_mkwrite(), passing the appropriate get_block() callback)
+- calling dax_truncate_page() instead of block_truncate_page() for DAX files
+- calling dax_zero_page_range() instead of zero_user() for DAX files
+- ensuring that there is sufficient locking between reads, writes,
+ truncates and page faults
+
+The get_block() callback passed to the DAX functions may return
+uninitialised extents. If it does, it must ensure that simultaneous
+calls to get_block() (for example by a page-fault racing with a read()
+or a write()) work correctly.
+
+These filesystems may be used for inspiration:
+- ext2: the second extended filesystem, see Documentation/filesystems/ext2.txt
+- ext4: the fourth extended filesystem, see Documentation/filesystems/ext4.txt
+
+
+Shortcomings
+------------
+
+Even if the kernel or its modules are stored on a filesystem that supports
+DAX on a block device that supports DAX, they will still be copied into RAM.
+
+The DAX code does not work correctly on architectures which have virtually
+mapped caches such as ARM, MIPS and SPARC.
+
+Calling get_user_pages() on a range of user memory that has been mmaped
+from a DAX file will fail as there are no 'struct page' to describe
+those pages. This problem is being worked on. That means that O_DIRECT
+reads/writes to those memory ranges from a non-DAX file will fail (note
+that O_DIRECT reads/writes _of a DAX file_ do work, it is the memory
+that is being accessed that is key here). Other things that will not
+work include RDMA, sendfile() and splice().
diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt
index 67639f905f10..b9714569e472 100644
--- a/Documentation/filesystems/ext2.txt
+++ b/Documentation/filesystems/ext2.txt
@@ -20,6 +20,9 @@ minixdf Makes `df' act like Minix.
check=none, nocheck (*) Don't do extra checking of bitmaps on mount
(check=normal and check=strict options removed)
+dax Use direct access (no page cache). See
+ Documentation/filesystems/dax.txt.
+
debug Extra debugging information is sent to the
kernel syslog. Useful for developers.
@@ -56,8 +59,6 @@ noacl Don't support POSIX ACLs.
nobh Do not attach buffer_heads to file pagecache.
-xip Use execute in place (no caching) if possible
-
grpquota,noquota,quota,usrquota Quota options are silently ignored by ext2.
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 919a3293aaa4..6c0108eb0137 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -386,6 +386,10 @@ max_dir_size_kb=n This limits the size of directories so that any
i_version Enable 64-bit inode version support. This option is
off by default.
+dax Use direct access (no page cache). See
+ Documentation/filesystems/dax.txt. Note that
+ this option is incompatible with data=journal.
+
Data Mode
=========
There are 3 different data modes:
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e0950c483c22..dac11d7fef27 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -106,6 +106,8 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
Default value for this option is on. So garbage
collection is on by default.
disable_roll_forward Disable the roll-forward recovery routine
+norecovery Disable the roll-forward recovery routine, mounted read-
+ only (i.e., -o ro,disable_roll_forward)
discard Issue discard/TRIM commands when a segment is cleaned.
no_heap Disable heap-style segment allocation which finds free
segments for data from the beginning of main area, while
@@ -197,6 +199,10 @@ Files in /sys/fs/f2fs/<devname>
checkpoint is triggered, and issued during the
checkpoint. By default, it is disabled with 0.
+ trim_sections This parameter controls the number of sections
+ to be trimmed out in batch mode when FITRIM
+ conducts. 32 sections is set by default.
+
ipu_policy This parameter controls the policy of in-place
updates in f2fs. There are five policies:
0x01: F2FS_IPU_FORCE, 0x02: F2FS_IPU_SSR,
diff --git a/Documentation/filesystems/fiemap.txt b/Documentation/filesystems/fiemap.txt
index 1b805a0efbb0..f6d9c99103a4 100644
--- a/Documentation/filesystems/fiemap.txt
+++ b/Documentation/filesystems/fiemap.txt
@@ -196,7 +196,8 @@ struct fiemap_extent_info {
};
It is intended that the file system should not need to access any of this
-structure directly.
+structure directly. Filesystem handlers should be tolerant to signals and return
+EINTR once fatal signal received.
Flag checking should be done at the beginning of the ->fiemap callback via the
diff --git a/Documentation/filesystems/inotify.txt b/Documentation/filesystems/inotify.txt
index cfd02712b83e..51f61db787fb 100644
--- a/Documentation/filesystems/inotify.txt
+++ b/Documentation/filesystems/inotify.txt
@@ -4,201 +4,10 @@
Document started 15 Mar 2005 by Robert Love <rml@novell.com>
+Document updated 4 Jan 2015 by Zhang Zhen <zhenzhang.zhang@huawei.com>
+ --Deleted obsoleted interface, just refer to manpages for user interface.
-
-(i) User Interface
-
-Inotify is controlled by a set of three system calls and normal file I/O on a
-returned file descriptor.
-
-First step in using inotify is to initialise an inotify instance:
-
- int fd = inotify_init ();
-
-Each instance is associated with a unique, ordered queue.
-
-Change events are managed by "watches". A watch is an (object,mask) pair where
-the object is a file or directory and the mask is a bit mask of one or more
-inotify events that the application wishes to receive. See <linux/inotify.h>
-for valid events. A watch is referenced by a watch descriptor, or wd.
-
-Watches are added via a path to the file.
-
-Watches on a directory will return events on any files inside of the directory.
-
-Adding a watch is simple:
-
- int wd = inotify_add_watch (fd, path, mask);
-
-Where "fd" is the return value from inotify_init(), path is the path to the
-object to watch, and mask is the watch mask (see <linux/inotify.h>).
-
-You can update an existing watch in the same manner, by passing in a new mask.
-
-An existing watch is removed via
-
- int ret = inotify_rm_watch (fd, wd);
-
-Events are provided in the form of an inotify_event structure that is read(2)
-from a given inotify instance. The filename is of dynamic length and follows
-the struct. It is of size len. The filename is padded with null bytes to
-ensure proper alignment. This padding is reflected in len.
-
-You can slurp multiple events by passing a large buffer, for example
-
- size_t len = read (fd, buf, BUF_LEN);
-
-Where "buf" is a pointer to an array of "inotify_event" structures at least
-BUF_LEN bytes in size. The above example will return as many events as are
-available and fit in BUF_LEN.
-
-Each inotify instance fd is also select()- and poll()-able.
-
-You can find the size of the current event queue via the standard FIONREAD
-ioctl on the fd returned by inotify_init().
-
-All watches are destroyed and cleaned up on close.
-
-
-(ii)
-
-Prototypes:
-
- int inotify_init (void);
- int inotify_add_watch (int fd, const char *path, __u32 mask);
- int inotify_rm_watch (int fd, __u32 mask);
-
-
-(iii) Kernel Interface
-
-Inotify's kernel API consists a set of functions for managing watches and an
-event callback.
-
-To use the kernel API, you must first initialize an inotify instance with a set
-of inotify_operations. You are given an opaque inotify_handle, which you use
-for any further calls to inotify.
-
- struct inotify_handle *ih = inotify_init(my_event_handler);
-
-You must provide a function for processing events and a function for destroying
-the inotify watch.
-
- void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
- u32 cookie, const char *name, struct inode *inode)
-
- watch - the pointer to the inotify_watch that triggered this call
- wd - the watch descriptor
- mask - describes the event that occurred
- cookie - an identifier for synchronizing events
- name - the dentry name for affected files in a directory-based event
- inode - the affected inode in a directory-based event
-
- void destroy_watch(struct inotify_watch *watch)
-
-You may add watches by providing a pre-allocated and initialized inotify_watch
-structure and specifying the inode to watch along with an inotify event mask.
-You must pin the inode during the call. You will likely wish to embed the
-inotify_watch structure in a structure of your own which contains other
-information about the watch. Once you add an inotify watch, it is immediately
-subject to removal depending on filesystem events. You must grab a reference if
-you depend on the watch hanging around after the call.
-
- inotify_init_watch(&my_watch->iwatch);
- inotify_get_watch(&my_watch->iwatch); // optional
- s32 wd = inotify_add_watch(ih, &my_watch->iwatch, inode, mask);
- inotify_put_watch(&my_watch->iwatch); // optional
-
-You may use the watch descriptor (wd) or the address of the inotify_watch for
-other inotify operations. You must not directly read or manipulate data in the
-inotify_watch. Additionally, you must not call inotify_add_watch() more than
-once for a given inotify_watch structure, unless you have first called either
-inotify_rm_watch() or inotify_rm_wd().
-
-To determine if you have already registered a watch for a given inode, you may
-call inotify_find_watch(), which gives you both the wd and the watch pointer for
-the inotify_watch, or an error if the watch does not exist.
-
- wd = inotify_find_watch(ih, inode, &watchp);
-
-You may use container_of() on the watch pointer to access your own data
-associated with a given watch. When an existing watch is found,
-inotify_find_watch() bumps the refcount before releasing its locks. You must
-put that reference with:
-
- put_inotify_watch(watchp);
-
-Call inotify_find_update_watch() to update the event mask for an existing watch.
-inotify_find_update_watch() returns the wd of the updated watch, or an error if
-the watch does not exist.
-
- wd = inotify_find_update_watch(ih, inode, mask);
-
-An existing watch may be removed by calling either inotify_rm_watch() or
-inotify_rm_wd().
-
- int ret = inotify_rm_watch(ih, &my_watch->iwatch);
- int ret = inotify_rm_wd(ih, wd);
-
-A watch may be removed while executing your event handler with the following:
-
- inotify_remove_watch_locked(ih, iwatch);
-
-Call inotify_destroy() to remove all watches from your inotify instance and
-release it. If there are no outstanding references, inotify_destroy() will call
-your destroy_watch op for each watch.
-
- inotify_destroy(ih);
-
-When inotify removes a watch, it sends an IN_IGNORED event to your callback.
-You may use this event as an indication to free the watch memory. Note that
-inotify may remove a watch due to filesystem events, as well as by your request.
-If you use IN_ONESHOT, inotify will remove the watch after the first event, at
-which point you may call the final inotify_put_watch.
-
-(iv) Kernel Interface Prototypes
-
- struct inotify_handle *inotify_init(struct inotify_operations *ops);
-
- inotify_init_watch(struct inotify_watch *watch);
-
- s32 inotify_add_watch(struct inotify_handle *ih,
- struct inotify_watch *watch,
- struct inode *inode, u32 mask);
-
- s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
- struct inotify_watch **watchp);
-
- s32 inotify_find_update_watch(struct inotify_handle *ih,
- struct inode *inode, u32 mask);
-
- int inotify_rm_wd(struct inotify_handle *ih, u32 wd);
-
- int inotify_rm_watch(struct inotify_handle *ih,
- struct inotify_watch *watch);
-
- void inotify_remove_watch_locked(struct inotify_handle *ih,
- struct inotify_watch *watch);
-
- void inotify_destroy(struct inotify_handle *ih);
-
- void get_inotify_watch(struct inotify_watch *watch);
- void put_inotify_watch(struct inotify_watch *watch);
-
-
-(v) Internal Kernel Implementation
-
-Each inotify instance is represented by an inotify_handle structure.
-Inotify's userspace consumers also have an inotify_device which is
-associated with the inotify_handle, and on which events are queued.
-
-Each watch is associated with an inotify_watch structure. Watches are chained
-off of each associated inotify_handle and each associated inode.
-
-See fs/notify/inotify/inotify_fsnotify.c and fs/notify/inotify/inotify_user.c
-for the locking and lifetime rules.
-
-
-(vi) Rationale
+(i) Rationale
Q: What is the design decision behind not tying the watch to the open fd of
the watched object?
diff --git a/Documentation/filesystems/nfs/nfs41-server.txt b/Documentation/filesystems/nfs/nfs41-server.txt
index c49cd7e796e7..682a59fabe3f 100644
--- a/Documentation/filesystems/nfs/nfs41-server.txt
+++ b/Documentation/filesystems/nfs/nfs41-server.txt
@@ -24,11 +24,6 @@ focuses on the mandatory-to-implement NFSv4.1 Sessions, providing
"exactly once" semantics and better control and throttling of the
resources allocated for each client.
-Other NFSv4.1 features, Parallel NFS operations in particular,
-are still under development out of tree.
-See http://wiki.linux-nfs.org/wiki/index.php/PNFS_prototype_design
-for more information.
-
The table below, taken from the NFSv4.1 document, lists
the operations that are mandatory to implement (REQ), optional
(OPT), and NFSv4.0 operations that are required not to implement (MNI)
@@ -43,9 +38,7 @@ The OPTIONAL features identified and their abbreviations are as follows:
The following abbreviations indicate the linux server implementation status.
I Implemented NFSv4.1 operations.
NS Not Supported.
- NS* unimplemented optional feature.
- P pNFS features implemented out of tree.
- PNS pNFS features that are not supported yet (out of tree).
+ NS* Unimplemented optional feature.
Operations
@@ -70,13 +63,13 @@ I | DESTROY_SESSION | REQ | | Section 18.37 |
I | EXCHANGE_ID | REQ | | Section 18.35 |
I | FREE_STATEID | REQ | | Section 18.38 |
| GETATTR | REQ | | Section 18.7 |
-P | GETDEVICEINFO | OPT | pNFS (REQ) | Section 18.40 |
-P | GETDEVICELIST | OPT | pNFS (OPT) | Section 18.41 |
+I | GETDEVICEINFO | OPT | pNFS (REQ) | Section 18.40 |
+NS*| GETDEVICELIST | OPT | pNFS (OPT) | Section 18.41 |
| GETFH | REQ | | Section 18.8 |
NS*| GET_DIR_DELEGATION | OPT | DDELG (REQ) | Section 18.39 |
-P | LAYOUTCOMMIT | OPT | pNFS (REQ) | Section 18.42 |
-P | LAYOUTGET | OPT | pNFS (REQ) | Section 18.43 |
-P | LAYOUTRETURN | OPT | pNFS (REQ) | Section 18.44 |
+I | LAYOUTCOMMIT | OPT | pNFS (REQ) | Section 18.42 |
+I | LAYOUTGET | OPT | pNFS (REQ) | Section 18.43 |
+I | LAYOUTRETURN | OPT | pNFS (REQ) | Section 18.44 |
| LINK | OPT | | Section 18.9 |
| LOCK | REQ | | Section 18.10 |
| LOCKT | REQ | | Section 18.11 |
@@ -122,9 +115,9 @@ Callback Operations
| | MNI | or OPT) | |
+-------------------------+-----------+-------------+---------------+
| CB_GETATTR | OPT | FDELG (REQ) | Section 20.1 |
-P | CB_LAYOUTRECALL | OPT | pNFS (REQ) | Section 20.3 |
+I | CB_LAYOUTRECALL | OPT | pNFS (REQ) | Section 20.3 |
NS*| CB_NOTIFY | OPT | DDELG (REQ) | Section 20.4 |
-P | CB_NOTIFY_DEVICEID | OPT | pNFS (OPT) | Section 20.12 |
+NS*| CB_NOTIFY_DEVICEID | OPT | pNFS (OPT) | Section 20.12 |
NS*| CB_NOTIFY_LOCK | OPT | | Section 20.11 |
NS*| CB_PUSH_DELEG | OPT | FDELG (OPT) | Section 20.5 |
| CB_RECALL | OPT | FDELG, | Section 20.2 |
diff --git a/Documentation/filesystems/nfs/pnfs-block-server.txt b/Documentation/filesystems/nfs/pnfs-block-server.txt
new file mode 100644
index 000000000000..2143673cf154
--- /dev/null
+++ b/Documentation/filesystems/nfs/pnfs-block-server.txt
@@ -0,0 +1,37 @@
+pNFS block layout server user guide
+
+The Linux NFS server now supports the pNFS block layout extension. In this
+case the NFS server acts as Metadata Server (MDS) for pNFS, which in addition
+to handling all the metadata access to the NFS export also hands out layouts
+to the clients to directly access the underlying block devices that are
+shared with the client.
+
+To use pNFS block layouts with with the Linux NFS server the exported file
+system needs to support the pNFS block layouts (currently just XFS), and the
+file system must sit on shared storage (typically iSCSI) that is accessible
+to the clients in addition to the MDS. As of now the file system needs to
+sit directly on the exported volume, striping or concatenation of
+volumes on the MDS and clients is not supported yet.
+
+On the server, pNFS block volume support is automatically if the file system
+support it. On the client make sure the kernel has the CONFIG_PNFS_BLOCK
+option enabled, the blkmapd daemon from nfs-utils is running, and the
+file system is mounted using the NFSv4.1 protocol version (mount -o vers=4.1).
+
+If the nfsd server needs to fence a non-responding client it calls
+/sbin/nfsd-recall-failed with the first argument set to the IP address of
+the client, and the second argument set to the device node without the /dev
+prefix for the file system to be fenced. Below is an example file that shows
+how to translate the device into a serial number from SCSI EVPD 0x80:
+
+cat > /sbin/nfsd-recall-failed << EOF
+#!/bin/sh
+
+CLIENT="$1"
+DEV="/dev/$2"
+EVPD=`sg_inq --page=0x80 ${DEV} | \
+ grep "Unit serial number:" | \
+ awk -F ': ' '{print $2}'`
+
+echo "fencing client ${CLIENT} serial ${EVPD}" >> /var/log/pnfsd-fence.log
+EOF
diff --git a/Documentation/filesystems/nfs/pnfs.txt b/Documentation/filesystems/nfs/pnfs.txt
index adc81a35fe2d..44a9f2493a88 100644
--- a/Documentation/filesystems/nfs/pnfs.txt
+++ b/Documentation/filesystems/nfs/pnfs.txt
@@ -57,15 +57,16 @@ bit is set, preventing any new lsegs from being added.
layout drivers
--------------
-PNFS utilizes what is called layout drivers. The STD defines 3 basic
-layout types: "files" "objects" and "blocks". For each of these types
-there is a layout-driver with a common function-vectors table which
-are called by the nfs-client pnfs-core to implement the different layout
-types.
+PNFS utilizes what is called layout drivers. The STD defines 4 basic
+layout types: "files", "objects", "blocks", and "flexfiles". For each
+of these types there is a layout-driver with a common function-vectors
+table which are called by the nfs-client pnfs-core to implement the
+different layout types.
-Files-layout-driver code is in: fs/nfs/nfs4filelayout.c && nfs4filelayoutdev.c
+Files-layout-driver code is in: fs/nfs/filelayout/.. directory
Objects-layout-deriver code is in: fs/nfs/objlayout/.. directory
Blocks-layout-deriver code is in: fs/nfs/blocklayout/.. directory
+Flexfiles-layout-driver code is in: fs/nfs/flexfilelayout/.. directory
objects-layout setup
--------------------
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index 7618a287aa41..28f8c08201e2 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -100,3 +100,7 @@ coherency=full (*) Disallow concurrent O_DIRECT writes, cluster inode
coherency=buffered Allow concurrent O_DIRECT writes without EX lock among
nodes, which gains high performance at risk of getting
stale data on other nodes.
+journal_async_commit Commit block can be written to disk without waiting
+ for descriptor blocks. If enabled older kernels cannot
+ mount the device. This will enable 'journal_checksum'
+ internally.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index aae9dd13c91f..a07ba61662ed 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -28,7 +28,7 @@ Table of Contents
1.6 Parallel port info in /proc/parport
1.7 TTY info in /proc/tty
1.8 Miscellaneous kernel statistics in /proc/stat
- 1.9 Ext4 file system parameters
+ 1.9 Ext4 file system parameters
2 Modifying System Parameters
@@ -42,6 +42,7 @@ Table of Contents
3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
3.7 /proc/<pid>/task/<tid>/children - Information about task children
3.8 /proc/<pid>/fdinfo/<fd> - Information about opened file
+ 3.9 /proc/<pid>/map_files - Information about memory mapped files
4 Configuring procfs
4.1 Mount options
@@ -144,6 +145,8 @@ Table 1-1: Process specific entries in /proc
stack Report full stack trace, enable via CONFIG_STACKTRACE
smaps a extension based on maps, showing the memory consumption of
each mapping and flags associated with it
+ numa_maps an extension based on maps, showing the memory locality and
+ binding policy as well as mem usage (in pages) of each mapping.
..............................................................................
For example, to get the status information of a process, all you have to do is
@@ -488,12 +491,47 @@ To clear the bits for the file mapped pages associated with the process
To clear the soft-dirty bit
> echo 4 > /proc/PID/clear_refs
+To reset the peak resident set size ("high water mark") to the process's
+current value:
+ > echo 5 > /proc/PID/clear_refs
+
Any other value written to /proc/PID/clear_refs will have no effect.
The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
using /proc/kpageflags and number of times a page is mapped using
/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
+The /proc/pid/numa_maps is an extension based on maps, showing the memory
+locality and binding policy, as well as the memory usage (in pages) of
+each mapping. The output follows a general format where mapping details get
+summarized separated by blank spaces, one mapping per each file line:
+
+address policy mapping details
+
+00400000 default file=/usr/local/bin/app mapped=1 active=0 N3=1 kernelpagesize_kB=4
+00600000 default file=/usr/local/bin/app anon=1 dirty=1 N3=1 kernelpagesize_kB=4
+3206000000 default file=/lib64/ld-2.12.so mapped=26 mapmax=6 N0=24 N3=2 kernelpagesize_kB=4
+320621f000 default file=/lib64/ld-2.12.so anon=1 dirty=1 N3=1 kernelpagesize_kB=4
+3206220000 default file=/lib64/ld-2.12.so anon=1 dirty=1 N3=1 kernelpagesize_kB=4
+3206221000 default anon=1 dirty=1 N3=1 kernelpagesize_kB=4
+3206800000 default file=/lib64/libc-2.12.so mapped=59 mapmax=21 active=55 N0=41 N3=18 kernelpagesize_kB=4
+320698b000 default file=/lib64/libc-2.12.so
+3206b8a000 default file=/lib64/libc-2.12.so anon=2 dirty=2 N3=2 kernelpagesize_kB=4
+3206b8e000 default file=/lib64/libc-2.12.so anon=1 dirty=1 N3=1 kernelpagesize_kB=4
+3206b8f000 default anon=3 dirty=3 active=1 N3=3 kernelpagesize_kB=4
+7f4dc10a2000 default anon=3 dirty=3 N3=3 kernelpagesize_kB=4
+7f4dc10b4000 default anon=2 dirty=2 active=1 N3=2 kernelpagesize_kB=4
+7f4dc1200000 default file=/anon_hugepage\040(deleted) huge anon=1 dirty=1 N3=1 kernelpagesize_kB=2048
+7fff335f0000 default stack anon=3 dirty=3 N3=3 kernelpagesize_kB=4
+7fff3369d000 default mapped=1 mapmax=35 active=0 N3=1 kernelpagesize_kB=4
+
+Where:
+"address" is the starting address for the mapping;
+"policy" reports the NUMA memory policy set for the mapping (see vm/numa_memory_policy.txt);
+"mapping details" summarizes mapping data such as mapping type, page usage counters,
+node locality page counters (N0 == node0, N1 == node1, ...) and the kernel page
+size, in KB, that is backing the mapping up.
+
1.2 Kernel data
---------------
@@ -1763,6 +1801,28 @@ pair provide additional information particular to the objects they represent.
with TIMER_ABSTIME option which will be shown in 'settime flags', but 'it_value'
still exhibits timer's remaining time.
+3.9 /proc/<pid>/map_files - Information about memory mapped files
+---------------------------------------------------------------------
+This directory contains symbolic links which represent memory mapped files
+the process is maintaining. Example output:
+
+ | lr-------- 1 root root 64 Jan 27 11:24 333c600000-333c620000 -> /usr/lib64/ld-2.18.so
+ | lr-------- 1 root root 64 Jan 27 11:24 333c81f000-333c820000 -> /usr/lib64/ld-2.18.so
+ | lr-------- 1 root root 64 Jan 27 11:24 333c820000-333c821000 -> /usr/lib64/ld-2.18.so
+ | ...
+ | lr-------- 1 root root 64 Jan 27 11:24 35d0421000-35d0422000 -> /usr/lib64/libselinux.so.1
+ | lr-------- 1 root root 64 Jan 27 11:24 400000-41a000 -> /usr/bin/ls
+
+The name of a link represents the virtual memory bounds of a mapping, i.e.
+vm_area_struct::vm_start-vm_area_struct::vm_end.
+
+The main purpose of the map_files is to retrieve a set of memory mapped
+files in a fast way instead of parsing /proc/<pid>/maps or
+/proc/<pid>/smaps, both of which contain many more records. At the same
+time one can open(2) mappings from the listings of two processes and
+comparing their inode numbers to figure out which anonymous memory areas
+are actually shared.
+
------------------------------------------------------------------------------
Configuring procfs
------------------------------------------------------------------------------
diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
index b797ed38de46..9de4303201e1 100644
--- a/Documentation/filesystems/seq_file.txt
+++ b/Documentation/filesystems/seq_file.txt
@@ -194,16 +194,16 @@ which is in the string esc will be represented in octal form in the output.
There are also a pair of functions for printing filenames:
- int seq_path(struct seq_file *m, struct path *path, char *esc);
- int seq_path_root(struct seq_file *m, struct path *path,
- struct path *root, char *esc)
+ int seq_path(struct seq_file *m, const struct path *path,
+ const char *esc);
+ int seq_path_root(struct seq_file *m, const struct path *path,
+ const struct path *root, const char *esc)
Here, path indicates the file of interest, and esc is a set of characters
which should be escaped in the output. A call to seq_path() will output
the path relative to the current process's filesystem root. If a different
-root is desired, it can be used with seq_path_root(). Note that, if it
-turns out that path cannot be reached from root, the value of root will be
-changed in seq_file_root() to a root which *does* work.
+root is desired, it can be used with seq_path_root(). If it turns out that
+path cannot be reached from root, seq_path_root() returns SEQ_SKIP.
A function producing complicated output may want to check
bool seq_has_overflowed(struct seq_file *m);
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 43ce0507ee25..966b22829f3b 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -591,8 +591,6 @@ struct address_space_operations {
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
- struct page* (*get_xip_page)(struct address_space *, sector_t,
- int);
/* migrate the contents of a page to the specified target */
int (*migratepage) (struct page *, struct page *);
int (*launder_page) (struct page *);
@@ -748,11 +746,6 @@ struct address_space_operations {
and transfer data directly between the storage and the
application's address space.
- get_xip_page: called by the VM to translate a block number to a page.
- The page is valid until the corresponding filesystem is unmounted.
- Filesystems that want to use execute-in-place (XIP) need to implement
- it. An example implementation can be found in fs/ext2/xip.c.
-
migrate_page: This is used to compact the physical memory usage.
If the VM wants to relocate a page (maybe off a memory card
that is signalling imminent failure) it will pass a new page
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 5be51fd888bd..0bfafe108357 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -287,9 +287,9 @@ The following sysctls are available for the XFS filesystem:
XFS_ERRLEVEL_LOW: 1
XFS_ERRLEVEL_HIGH: 5
- fs.xfs.panic_mask (Min: 0 Default: 0 Max: 127)
+ fs.xfs.panic_mask (Min: 0 Default: 0 Max: 255)
Causes certain error conditions to call BUG(). Value is a bitmask;
- AND together the tags which represent errors which should cause panics:
+ OR together the tags which represent errors which should cause panics:
XFS_NO_PTAG 0
XFS_PTAG_IFLUSH 0x00000001
@@ -299,6 +299,7 @@ The following sysctls are available for the XFS filesystem:
XFS_PTAG_SHUTDOWN_CORRUPT 0x00000010
XFS_PTAG_SHUTDOWN_IOERROR 0x00000020
XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040
+ XFS_PTAG_FSBLOCK_ZERO 0x00000080
This option is intended for debugging only.
@@ -348,16 +349,13 @@ The following sysctls are available for the XFS filesystem:
Deprecated Sysctls
==================
- fs.xfs.xfsbufd_centisecs (Min: 50 Default: 100 Max: 3000)
- Dirty metadata is now tracked by the log subsystem and
- flushing is driven by log space and idling demands. The
- xfsbufd no longer exists, so this syctl does nothing.
+None at present.
- Due for removal in 3.14.
- fs.xfs.age_buffer_centisecs (Min: 100 Default: 1500 Max: 720000)
- Dirty metadata is now tracked by the log subsystem and
- flushing is driven by log space and idling demands. The
- xfsbufd no longer exists, so this syctl does nothing.
+Removed Sysctls
+===============
- Due for removal in 3.14.
+ Name Removed
+ ---- -------
+ fs.xfs.xfsbufd_centisec v3.20
+ fs.xfs.age_buffer_centisecs v3.20
diff --git a/Documentation/filesystems/xip.txt b/Documentation/filesystems/xip.txt
deleted file mode 100644
index 0466ee569278..000000000000
--- a/Documentation/filesystems/xip.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-Execute-in-place for file mappings
-----------------------------------
-
-Motivation
-----------
-File mappings are performed by mapping page cache pages to userspace. In
-addition, read&write type file operations also transfer data from/to the page
-cache.
-
-For memory backed storage devices that use the block device interface, the page
-cache pages are in fact copies of the original storage. Various approaches
-exist to work around the need for an extra copy. The ramdisk driver for example
-does read the data into the page cache, keeps a reference, and discards the
-original data behind later on.
-
-Execute-in-place solves this issue the other way around: instead of keeping
-data in the page cache, the need to have a page cache copy is eliminated
-completely. With execute-in-place, read&write type operations are performed
-directly from/to the memory backed storage device. For file mappings, the
-storage device itself is mapped directly into userspace.
-
-This implementation was initially written for shared memory segments between
-different virtual machines on s390 hardware to allow multiple machines to
-share the same binaries and libraries.
-
-Implementation
---------------
-Execute-in-place is implemented in three steps: block device operation,
-address space operation, and file operations.
-
-A block device operation named direct_access is used to retrieve a
-reference (pointer) to a block on-disk. The reference is supposed to be
-cpu-addressable, physical address and remain valid until the release operation
-is performed. A struct block_device reference is used to address the device,
-and a sector_t argument is used to identify the individual block. As an
-alternative, memory technology devices can be used for this.
-
-The block device operation is optional, these block devices support it as of
-today:
-- dcssblk: s390 dcss block device driver
-
-An address space operation named get_xip_mem is used to retrieve references
-to a page frame number and a kernel address. To obtain these values a reference
-to an address_space is provided. This function assigns values to the kmem and
-pfn parameters. The third argument indicates whether the function should allocate
-blocks if needed.
-
-This address space operation is mutually exclusive with readpage&writepage that
-do page cache read/write operations.
-The following filesystems support it as of today:
-- ext2: the second extended filesystem, see Documentation/filesystems/ext2.txt
-
-A set of file operations that do utilize get_xip_page can be found in
-mm/filemap_xip.c . The following file operation implementations are provided:
-- aio_read/aio_write
-- readv/writev
-- sendfile
-
-The generic file operations do_sync_read/do_sync_write can be used to implement
-classic synchronous IO calls.
-
-Shortcomings
-------------
-This implementation is limited to storage devices that are cpu addressable at
-all times (no highmem or such). It works well on rom/ram, but enhancements are
-needed to make it work with flash in read+write mode.
-Putting the Linux kernel and/or its modules on a xip filesystem does not mean
-they are not copied.
diff --git a/Documentation/futex-requeue-pi.txt b/Documentation/futex-requeue-pi.txt
index 31b16610c416..77b36f59d16b 100644
--- a/Documentation/futex-requeue-pi.txt
+++ b/Documentation/futex-requeue-pi.txt
@@ -98,7 +98,7 @@ rt_mutex_start_proxy_lock() and rt_mutex_finish_proxy_lock(), which
allow the requeue code to acquire an uncontended rt_mutex on behalf
of the waiter and to enqueue the waiter on a contended rt_mutex.
Two new system calls provide the kernel<->user interface to
-requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_REQUEUE_CMP_PI.
+requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI.
FUTEX_WAIT_REQUEUE_PI is called by the waiter (pthread_cond_wait()
and pthread_cond_timedwait()) to block on the initial futex and wait
@@ -107,7 +107,7 @@ result of a high-speed collision between futex_wait() and
futex_lock_pi(), with some extra logic to check for the additional
wake-up scenarios.
-FUTEX_REQUEUE_CMP_PI is called by the waker
+FUTEX_CMP_REQUEUE_PI is called by the waker
(pthread_cond_broadcast() and pthread_cond_signal()) to requeue and
possibly wake the waiting tasks. Internally, this system call is
still handled by futex_requeue (by passing requeue_pi=1). Before
@@ -120,12 +120,12 @@ task as a waiter on the underlying rt_mutex. It is possible that
the lock can be acquired at this stage as well, if so, the next
waiter is woken to finish the acquisition of the lock.
-FUTEX_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but
+FUTEX_CMP_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but
their sum is all that really matters. futex_requeue() will wake or
requeue up to nr_wake + nr_requeue tasks. It will wake only as many
tasks as it can acquire the lock for, which in the majority of cases
should be 0 as good programming practice dictates that the caller of
either pthread_cond_broadcast() or pthread_cond_signal() acquire the
-mutex prior to making the call. FUTEX_REQUEUE_PI requires that
+mutex prior to making the call. FUTEX_CMP_REQUEUE_PI requires that
nr_wake=1. nr_requeue should be INT_MAX for broadcast and 0 for
signal.
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index 4452786225b8..8b35f51fe7b6 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -31,7 +31,7 @@ through gpiod_get(). For example:
<&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
<&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
- power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
+ power-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
};
This property will make GPIOs 15, 16 and 17 available to the driver under the
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index 4223c2d3b508..cfd31d94c872 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -26,6 +26,12 @@ Supported chips:
Datasheet: Publicly available at the Texas Instruments website
http://www.ti.com/
+ * Texas Instruments INA231
+ Prefix: 'ina231'
+ Addresses: I2C 0x40 - 0x4f
+ Datasheet: Publicly available at the Texas Instruments website
+ http://www.ti.com/
+
Author: Lothar Felten <l-felten@ti.com>
Description
@@ -41,9 +47,18 @@ interface. The INA220 monitors both shunt drop and supply voltage.
The INA226 is a current shunt and power monitor with an I2C interface.
The INA226 monitors both a shunt voltage drop and bus supply voltage.
-The INA230 is a high or low side current shunt and power monitor with an I2C
-interface. The INA230 monitors both a shunt voltage drop and bus supply voltage.
+INA230 and INA231 are high or low side current shunt and power monitors
+with an I2C interface. The chips monitor both a shunt voltage drop and
+bus supply voltage.
-The shunt value in micro-ohms can be set via platform data or device tree.
-Please refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
+The shunt value in micro-ohms can be set via platform data or device tree at
+compile-time or via the shunt_resistor attribute in sysfs at run-time. Please
+refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
if the device tree is used.
+
+Additionally ina226 supports update_interval attribute as described in
+Documentation/hwmon/sysfs-interface. Internally the interval is the sum of
+bus and shunt voltage conversion times multiplied by the averaging rate. We
+don't touch the conversion times and only modify the number of averages. The
+lower limit of the update_interval is 2 ms, the upper limit is 2253 ms.
+The actual programmed interval may vary from the desired value.
diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt
new file mode 100644
index 000000000000..092fc10961fe
--- /dev/null
+++ b/Documentation/kasan.txt
@@ -0,0 +1,170 @@
+Kernel address sanitizer
+================
+
+0. Overview
+===========
+
+Kernel Address sanitizer (KASan) is a dynamic memory error detector. It provides
+a fast and comprehensive solution for finding use-after-free and out-of-bounds
+bugs.
+
+KASan uses compile-time instrumentation for checking every memory access,
+therefore you will need a certain version of GCC > 4.9.2
+
+Currently KASan is supported only for x86_64 architecture and requires that the
+kernel be built with the SLUB allocator.
+
+1. Usage
+=========
+
+To enable KASAN configure kernel with:
+
+ CONFIG_KASAN = y
+
+and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline
+is compiler instrumentation types. The former produces smaller binary the
+latter is 1.1 - 2 times faster. Inline instrumentation requires GCC 5.0 or
+latter.
+
+Currently KASAN works only with the SLUB memory allocator.
+For better bug detection and nicer report, enable CONFIG_STACKTRACE and put
+at least 'slub_debug=U' in the boot cmdline.
+
+To disable instrumentation for specific files or directories, add a line
+similar to the following to the respective kernel Makefile:
+
+ For a single file (e.g. main.o):
+ KASAN_SANITIZE_main.o := n
+
+ For all files in one directory:
+ KASAN_SANITIZE := n
+
+1.1 Error reports
+==========
+
+A typical out of bounds access report looks like this:
+
+==================================================================
+BUG: AddressSanitizer: out of bounds access in kmalloc_oob_right+0x65/0x75 [test_kasan] at addr ffff8800693bc5d3
+Write of size 1 by task modprobe/1689
+=============================================================================
+BUG kmalloc-128 (Not tainted): kasan error
+-----------------------------------------------------------------------------
+
+Disabling lock debugging due to kernel taint
+INFO: Allocated in kmalloc_oob_right+0x3d/0x75 [test_kasan] age=0 cpu=0 pid=1689
+ __slab_alloc+0x4b4/0x4f0
+ kmem_cache_alloc_trace+0x10b/0x190
+ kmalloc_oob_right+0x3d/0x75 [test_kasan]
+ init_module+0x9/0x47 [test_kasan]
+ do_one_initcall+0x99/0x200
+ load_module+0x2cb3/0x3b20
+ SyS_finit_module+0x76/0x80
+ system_call_fastpath+0x12/0x17
+INFO: Slab 0xffffea0001a4ef00 objects=17 used=7 fp=0xffff8800693bd728 flags=0x100000000004080
+INFO: Object 0xffff8800693bc558 @offset=1368 fp=0xffff8800693bc720
+
+Bytes b4 ffff8800693bc548: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+Object ffff8800693bc558: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+Object ffff8800693bc568: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+Object ffff8800693bc578: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+Object ffff8800693bc588: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+Object ffff8800693bc598: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+Object ffff8800693bc5a8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+Object ffff8800693bc5b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+Object ffff8800693bc5c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkkkkkkkkkk.
+Redzone ffff8800693bc5d8: cc cc cc cc cc cc cc cc ........
+Padding ffff8800693bc718: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+CPU: 0 PID: 1689 Comm: modprobe Tainted: G B 3.18.0-rc1-mm1+ #98
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
+ ffff8800693bc000 0000000000000000 ffff8800693bc558 ffff88006923bb78
+ ffffffff81cc68ae 00000000000000f3 ffff88006d407600 ffff88006923bba8
+ ffffffff811fd848 ffff88006d407600 ffffea0001a4ef00 ffff8800693bc558
+Call Trace:
+ [<ffffffff81cc68ae>] dump_stack+0x46/0x58
+ [<ffffffff811fd848>] print_trailer+0xf8/0x160
+ [<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
+ [<ffffffff811ff0f5>] object_err+0x35/0x40
+ [<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
+ [<ffffffff8120b9fa>] kasan_report_error+0x38a/0x3f0
+ [<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
+ [<ffffffff8120b344>] ? kasan_unpoison_shadow+0x14/0x40
+ [<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
+ [<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
+ [<ffffffff8120a995>] __asan_store1+0x75/0xb0
+ [<ffffffffa0002601>] ? kmem_cache_oob+0x1d/0xc3 [test_kasan]
+ [<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
+ [<ffffffffa0002065>] kmalloc_oob_right+0x65/0x75 [test_kasan]
+ [<ffffffffa00026b0>] init_module+0x9/0x47 [test_kasan]
+ [<ffffffff810002d9>] do_one_initcall+0x99/0x200
+ [<ffffffff811e4e5c>] ? __vunmap+0xec/0x160
+ [<ffffffff81114f63>] load_module+0x2cb3/0x3b20
+ [<ffffffff8110fd70>] ? m_show+0x240/0x240
+ [<ffffffff81115f06>] SyS_finit_module+0x76/0x80
+ [<ffffffff81cd3129>] system_call_fastpath+0x12/0x17
+Memory state around the buggy address:
+ ffff8800693bc300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc380: fc fc 00 00 00 00 00 00 00 00 00 00 00 00 00 fc
+ ffff8800693bc400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc500: fc fc fc fc fc fc fc fc fc fc fc 00 00 00 00 00
+>ffff8800693bc580: 00 00 00 00 00 00 00 00 00 00 03 fc fc fc fc fc
+ ^
+ ffff8800693bc600: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc700: fc fc fc fc fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8800693bc780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8800693bc800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+==================================================================
+
+First sections describe slub object where bad access happened.
+See 'SLUB Debug output' section in Documentation/vm/slub.txt for details.
+
+In the last section the report shows memory state around the accessed address.
+Reading this part requires some more understanding of how KASAN works.
+
+Each 8 bytes of memory are encoded in one shadow byte as accessible,
+partially accessible, freed or they can be part of a redzone.
+We use the following encoding for each shadow byte: 0 means that all 8 bytes
+of the corresponding memory region are accessible; number N (1 <= N <= 7) means
+that the first N bytes are accessible, and other (8 - N) bytes are not;
+any negative value indicates that the entire 8-byte word is inaccessible.
+We use different negative values to distinguish between different kinds of
+inaccessible memory like redzones or freed memory (see mm/kasan/kasan.h).
+
+In the report above the arrows point to the shadow byte 03, which means that
+the accessed address is partially accessible.
+
+
+2. Implementation details
+========================
+
+From a high level, our approach to memory error detection is similar to that
+of kmemcheck: use shadow memory to record whether each byte of memory is safe
+to access, and use compile-time instrumentation to check shadow memory on each
+memory access.
+
+AddressSanitizer dedicates 1/8 of kernel memory to its shadow memory
+(e.g. 16TB to cover 128TB on x86_64) and uses direct mapping with a scale and
+offset to translate a memory address to its corresponding shadow address.
+
+Here is the function witch translate an address to its corresponding shadow
+address:
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ return ((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ + KASAN_SHADOW_OFFSET;
+}
+
+where KASAN_SHADOW_SCALE_SHIFT = 3.
+
+Compile-time instrumentation used for checking memory accesses. Compiler inserts
+function calls (__asan_load*(addr), __asan_store*(addr)) before each memory
+access of size 1, 2, 4, 8 or 16. These functions check whether memory access is
+valid or not by checking corresponding shadow memory.
+
+GCC 5.0 has possibility to perform inline instrumentation. Instead of making
+function calls GCC directly inserts the code to check the shadow memory.
+This option significantly enlarges kernel but it gives x1.1-x2 performance
+boost over outline instrumented kernel.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4df73da11adc..bfcb1a62a7b4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -970,6 +970,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
smh Use ARM semihosting calls for early console.
+ s3c2410,<addr>
+ s3c2412,<addr>
+ s3c2440,<addr>
+ s3c6400,<addr>
+ s5pv210,<addr>
+ exynos4210,<addr>
+ Use early console provided by serial driver available
+ on Samsung SoCs, requires selecting proper type and
+ a correct base address of the selected UART port. The
+ serial port must already be setup and configured.
+ Options are not yet supported.
+
earlyprintk= [X86,SH,BLACKFIN,ARM,M68k]
earlyprintk=vga
earlyprintk=efi
@@ -1277,6 +1289,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
i8042.notimeout [HW] Ignore timeout condition signalled by controller
i8042.reset [HW] Reset the controller during init and cleanup
i8042.unlock [HW] Unlock (ignore) the keylock
+ i8042.kbdreset [HW] Reset device connected to KBD port
i810= [HW,DRM]
@@ -1469,6 +1482,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
no_hwp
Do not enable hardware P state control (HWP)
if available.
+ hwp_only
+ Only load intel_pstate on systems which support
+ hardware P state control (HWP) if available.
intremap= [X86-64, Intel-IOMMU]
on enable Interrupt Remapping (default)
@@ -1493,6 +1509,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
forcesac
soft
pt [x86, IA-64]
+ nobypass [PPC/POWERNV]
+ Disable IOMMU bypass, using IOMMU for PCI devices.
io7= [HW] IO7 for Marvel based alpha systems
@@ -3206,6 +3224,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
retain_initrd [RAM] Keep initrd memory after extraction
+ rfkill.default_state=
+ 0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
+ etc. communication is blocked by default.
+ 1 Unblocked.
+
+ rfkill.master_switch_mode=
+ 0 The "airplane mode" button does nothing.
+ 1 The "airplane mode" button toggles between everything
+ blocked and the previous configuration.
+ 2 The "airplane mode" button toggles between everything
+ blocked and everything unblocked.
+
rhash_entries= [KNL,NET]
Set number of hash buckets for route cache
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 4227ec2e3ab2..1488b6525eb6 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -702,7 +702,8 @@ a virtual address that is no longer valid (module init sections, module
virtual addresses that correspond to modules that've been unloaded),
such probes are marked with [GONE]. If the probe is temporarily disabled,
such probes are marked with [DISABLED]. If the probe is optimized, it is
-marked with [OPTIMIZED].
+marked with [OPTIMIZED]. If the probe is ftrace-based, it is marked with
+[FTRACE].
/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
diff --git a/Documentation/locking/00-INDEX b/Documentation/locking/00-INDEX
new file mode 100644
index 000000000000..c256c9bee2a4
--- /dev/null
+++ b/Documentation/locking/00-INDEX
@@ -0,0 +1,16 @@
+00-INDEX
+ - this file.
+lockdep-design.txt
+ - documentation on the runtime locking correctness validator.
+lockstat.txt
+ - info on collecting statistics on locks (and contention).
+mutex-design.txt
+ - info on the generic mutex subsystem.
+rt-mutex-design.txt
+ - description of the RealTime mutex implementation design.
+rt-mutex.txt
+ - desc. of RT-mutex subsystem with PI (Priority Inheritance) support.
+spinlocks.txt
+ - info on using spinlocks to provide exclusive access in kernel.
+ww-mutex-design.txt
+ - Intro to Mutex wait/would deadlock handling.s
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 5dbc99c04f6e..5001280e9d82 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -34,7 +34,7 @@ The validator tracks lock-class usage history into 4n + 1 separate state bits:
- 'ever held with STATE enabled'
- 'ever held as readlock with STATE enabled'
-Where STATE can be either one of (kernel/lockdep_states.h)
+Where STATE can be either one of (kernel/locking/lockdep_states.h)
- hardirq
- softirq
- reclaim_fs
diff --git a/Documentation/locking/lockstat.txt b/Documentation/locking/lockstat.txt
index 7428773a1e69..568bbbacee91 100644
--- a/Documentation/locking/lockstat.txt
+++ b/Documentation/locking/lockstat.txt
@@ -121,6 +121,11 @@ show the header with column descriptions. Lines 05-18 and 20-31 show the actual
statistics. These statistics come in two parts; the actual stats separated by a
short separator (line 08, 13) from the contention points.
+Lines 09-12 show the first 4 recorded contention points (the code
+which tries to get the lock) and lines 14-17 show the first 4 recorded
+contended points (the lock holder). It is possible that the max
+con-bounces point is missing in the statistics.
+
The first lock (05-18) is a read/write lock, and shows two lines above the
short separator. The contention points don't match the column descriptors,
they have two: contentions and [<IP>] symbol. The second set of contention
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 70a09f8a0383..ca2387ef27ab 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -269,6 +269,50 @@ And there are a number of things that _must_ or _must_not_ be assumed:
STORE *(A + 4) = Y; STORE *A = X;
STORE {*A, *(A + 4) } = {X, Y};
+And there are anti-guarantees:
+
+ (*) These guarantees do not apply to bitfields, because compilers often
+ generate code to modify these using non-atomic read-modify-write
+ sequences. Do not attempt to use bitfields to synchronize parallel
+ algorithms.
+
+ (*) Even in cases where bitfields are protected by locks, all fields
+ in a given bitfield must be protected by one lock. If two fields
+ in a given bitfield are protected by different locks, the compiler's
+ non-atomic read-modify-write sequences can cause an update to one
+ field to corrupt the value of an adjacent field.
+
+ (*) These guarantees apply only to properly aligned and sized scalar
+ variables. "Properly sized" currently means variables that are
+ the same size as "char", "short", "int" and "long". "Properly
+ aligned" means the natural alignment, thus no constraints for
+ "char", two-byte alignment for "short", four-byte alignment for
+ "int", and either four-byte or eight-byte alignment for "long",
+ on 32-bit and 64-bit systems, respectively. Note that these
+ guarantees were introduced into the C11 standard, so beware when
+ using older pre-C11 compilers (for example, gcc 4.6). The portion
+ of the standard containing this guarantee is Section 3.14, which
+ defines "memory location" as follows:
+
+ memory location
+ either an object of scalar type, or a maximal sequence
+ of adjacent bit-fields all having nonzero width
+
+ NOTE 1: Two threads of execution can update and access
+ separate memory locations without interfering with
+ each other.
+
+ NOTE 2: A bit-field and an adjacent non-bit-field member
+ are in separate memory locations. The same applies
+ to two bit-fields, if one is declared inside a nested
+ structure declaration and the other is not, or if the two
+ are separated by a zero-length bit-field declaration,
+ or if they are separated by a non-bit-field member
+ declaration. It is not safe to concurrently update two
+ bit-fields in the same structure if all members declared
+ between them are also bit-fields, no matter what the
+ sizes of those intervening bit-fields happen to be.
+
=========================
WHAT ARE MEMORY BARRIERS?
@@ -750,7 +794,7 @@ In summary:
However, they do -not- guarantee any other sort of ordering:
Not prior loads against later loads, nor prior stores against
later anything. If you need these other forms of ordering,
- use smb_rmb(), smp_wmb(), or, in the case of prior stores and
+ use smp_rmb(), smp_wmb(), or, in the case of prior stores and
later loads, smp_mb().
(*) If both legs of the "if" statement begin with identical stores
diff --git a/Documentation/misc-devices/mei/mei-client-bus.txt b/Documentation/misc-devices/mei/mei-client-bus.txt
index f83910a8ce76..743be4ec8989 100644
--- a/Documentation/misc-devices/mei/mei-client-bus.txt
+++ b/Documentation/misc-devices/mei/mei-client-bus.txt
@@ -1,9 +1,10 @@
Intel(R) Management Engine (ME) Client bus API
-===============================================
+==============================================
Rationale
=========
+
MEI misc character device is useful for dedicated applications to send and receive
data to the many FW appliance found in Intel's ME from the user space.
However for some of the ME functionalities it make sense to leverage existing software
@@ -17,7 +18,8 @@ the existing code.
MEI CL bus API
-===========
+==============
+
A driver implementation for an MEI Client is very similar to existing bus
based device drivers. The driver registers itself as an MEI CL bus driver through
the mei_cl_driver structure:
@@ -55,6 +57,7 @@ received buffers.
Example
=======
+
As a theoretical example let's pretend the ME comes with a "contact" NFC IP.
The driver init and exit routines for this device would look like:
@@ -69,11 +72,11 @@ static struct mei_cl_device_id contact_mei_cl_tbl[] = {
MODULE_DEVICE_TABLE(mei_cl, contact_mei_cl_tbl);
static struct mei_cl_driver contact_driver = {
- .id_table = contact_mei_tbl,
- .name = CONTACT_DRIVER_NAME,
+ .id_table = contact_mei_tbl,
+ .name = CONTACT_DRIVER_NAME,
- .probe = contact_probe,
- .remove = contact_remove,
+ .probe = contact_probe,
+ .remove = contact_remove,
};
static int contact_init(void)
@@ -109,7 +112,7 @@ int contact_probe(struct mei_cl_device *dev, struct mei_cl_device_id *id)
mei_cl_register_event_cb(dev, contact_event_cb, contact);
return 0;
- }
+}
In the probe routine the driver first enable the MEI device and then registers
an ME bus event handler which is as close as it can get to registering a
diff --git a/Documentation/misc-devices/mei/mei.txt b/Documentation/misc-devices/mei/mei.txt
index 15bba1aeba9a..8d47501bba0a 100644
--- a/Documentation/misc-devices/mei/mei.txt
+++ b/Documentation/misc-devices/mei/mei.txt
@@ -1,8 +1,8 @@
Intel(R) Management Engine Interface (Intel(R) MEI)
-=======================
+===================================================
Introduction
-=======================
+============
The Intel Management Engine (Intel ME) is an isolated and protected computing
resource (Co-processor) residing inside certain Intel chipsets. The Intel ME
@@ -19,7 +19,7 @@ each client has its own protocol. The protocol is message-based with a
header and payload up to 512 bytes.
Prominent usage of the Intel ME Interface is to communicate with Intel(R)
-Active Management Technology (Intel AMT)implemented in firmware running on
+Active Management Technology (Intel AMT) implemented in firmware running on
the Intel ME.
Intel AMT provides the ability to manage a host remotely out-of-band (OOB)
@@ -44,8 +44,9 @@ HTTP/S that are received from a remote management console application.
For more information about Intel AMT:
http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+
Intel MEI Driver
-=======================
+================
The driver exposes a misc device called /dev/mei.
@@ -91,8 +92,10 @@ A code snippet for an application communicating with Intel AMTHI client:
[...]
close(fd);
-IOCTL:
-======
+
+IOCTL
+=====
+
The Intel MEI Driver supports the following IOCTL command:
IOCTL_MEI_CONNECT_CLIENT Connect to firmware Feature (client).
@@ -122,58 +125,61 @@ The Intel MEI Driver supports the following IOCTL command:
data that can be sent or received. (e.g. if MTU=2K, can send
requests up to bytes 2k and received responses up to 2k bytes).
-Intel ME Applications:
-==============
-
-1) Intel Local Management Service (Intel LMS)
-
- Applications running locally on the platform communicate with Intel AMT Release
- 2.0 and later releases in the same way that network applications do via SOAP
- over HTTP (deprecated starting with Release 6.0) or with WS-Management over
- SOAP over HTTP. This means that some Intel AMT features can be accessed from a
- local application using the same network interface as a remote application
- communicating with Intel AMT over the network.
-
- When a local application sends a message addressed to the local Intel AMT host
- name, the Intel LMS, which listens for traffic directed to the host name,
- intercepts the message and routes it to the Intel MEI.
- For more information:
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
- Under "About Intel AMT" => "Local Access"
-
- For downloading Intel LMS:
- http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
-
- The Intel LMS opens a connection using the Intel MEI driver to the Intel LMS
- firmware feature using a defined UUID and then communicates with the feature
- using a protocol called Intel AMT Port Forwarding Protocol(Intel APF protocol).
- The protocol is used to maintain multiple sessions with Intel AMT from a
- single application.
-
- See the protocol specification in the Intel AMT Software Development Kit(SDK)
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
- Under "SDK Resources" => "Intel(R) vPro(TM) Gateway(MPS)"
- => "Information for Intel(R) vPro(TM) Gateway Developers"
- => "Description of the Intel AMT Port Forwarding (APF)Protocol"
-
- 2) Intel AMT Remote configuration using a Local Agent
- A Local Agent enables IT personnel to configure Intel AMT out-of-the-box
- without requiring installing additional data to enable setup. The remote
- configuration process may involve an ISV-developed remote configuration
- agent that runs on the host.
- For more information:
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
- Under "Setup and Configuration of Intel AMT" =>
- "SDK Tools Supporting Setup and Configuration" =>
- "Using the Local Agent Sample"
-
- An open source Intel AMT configuration utility, implementing a local agent
- that accesses the Intel MEI driver, can be found here:
- http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
-
-
-Intel AMT OS Health Watchdog:
-=============================
+
+Intel ME Applications
+=====================
+
+ 1) Intel Local Management Service (Intel LMS)
+
+ Applications running locally on the platform communicate with Intel AMT Release
+ 2.0 and later releases in the same way that network applications do via SOAP
+ over HTTP (deprecated starting with Release 6.0) or with WS-Management over
+ SOAP over HTTP. This means that some Intel AMT features can be accessed from a
+ local application using the same network interface as a remote application
+ communicating with Intel AMT over the network.
+
+ When a local application sends a message addressed to the local Intel AMT host
+ name, the Intel LMS, which listens for traffic directed to the host name,
+ intercepts the message and routes it to the Intel MEI.
+ For more information:
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "About Intel AMT" => "Local Access"
+
+ For downloading Intel LMS:
+ http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
+
+ The Intel LMS opens a connection using the Intel MEI driver to the Intel LMS
+ firmware feature using a defined UUID and then communicates with the feature
+ using a protocol called Intel AMT Port Forwarding Protocol (Intel APF protocol).
+ The protocol is used to maintain multiple sessions with Intel AMT from a
+ single application.
+
+ See the protocol specification in the Intel AMT Software Development Kit (SDK)
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "SDK Resources" => "Intel(R) vPro(TM) Gateway (MPS)"
+ => "Information for Intel(R) vPro(TM) Gateway Developers"
+ => "Description of the Intel AMT Port Forwarding (APF) Protocol"
+
+ 2) Intel AMT Remote configuration using a Local Agent
+
+ A Local Agent enables IT personnel to configure Intel AMT out-of-the-box
+ without requiring installing additional data to enable setup. The remote
+ configuration process may involve an ISV-developed remote configuration
+ agent that runs on the host.
+ For more information:
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "Setup and Configuration of Intel AMT" =>
+ "SDK Tools Supporting Setup and Configuration" =>
+ "Using the Local Agent Sample"
+
+ An open source Intel AMT configuration utility, implementing a local agent
+ that accesses the Intel MEI driver, can be found here:
+ http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
+
+
+Intel AMT OS Health Watchdog
+============================
+
The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
Whenever the OS hangs or crashes, Intel AMT will send an event
to any subscriber to this event. This mechanism means that
@@ -192,8 +198,10 @@ watchdog is 120 seconds.
If the Intel AMT Watchdog feature does not exist (i.e. the connection failed),
the Intel MEI driver will disable the sending of heartbeats.
-Supported Chipsets:
+
+Supported Chipsets
==================
+
7 Series Chipset Family
6 Series Chipset Family
5 Series Chipset Family
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 557b6ef70c26..df27a1a50776 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -1,7 +1,5 @@
00-INDEX
- this file
-3c505.txt
- - information on the 3Com EtherLink Plus (3c505) driver.
3c509.txt
- information on the 3Com Etherlink III Series Ethernet cards.
6pack.txt
@@ -24,6 +22,8 @@ README.sb1000
- info on General Instrument/NextLevel SURFboard1000 cable modem.
alias.txt
- info on using alias network devices.
+altera_tse.txt
+ - Altera Triple-Speed Ethernet controller.
arcnet-hardware.txt
- tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc.
arcnet.txt
@@ -42,6 +42,8 @@ bridge.txt
- where to get user space programs for ethernet bridging with Linux.
can.txt
- documentation on CAN protocol family.
+cdc_mbim.txt
+ - 3G/LTE USB modem (Mobile Broadband Interface Model)
cops.txt
- info on the COPS LocalTalk Linux driver
cs89x0.txt
@@ -54,6 +56,8 @@ cxgb.txt
- Release Notes for the Chelsio N210 Linux device driver.
dccp.txt
- the Datagram Congestion Control Protocol (DCCP) (RFC 4340..42).
+dctcp.txt
+ - DataCenter TCP congestion control
de4x5.txt
- the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver
decnet.txt
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 2236d6dcb7da..0a2859a8ee7e 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -234,7 +234,7 @@ solution for a couple of reasons:
mechanisms. Inside this filter definition the (interested) type of
errors may be selected. The reception of error messages is disabled
by default. The format of the CAN error message frame is briefly
- described in the Linux header file "include/linux/can/error.h".
+ described in the Linux header file "include/uapi/linux/can/error.h".
4. How to use SocketCAN
------------------------
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 58d08f8d8d80..9930ecfbb465 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -279,8 +279,8 @@ Possible BPF extensions are shown in the following table:
hatype skb->dev->type
rxhash skb->hash
cpu raw_smp_processor_id()
- vlan_tci vlan_tx_tag_get(skb)
- vlan_pr vlan_tx_tag_present(skb)
+ vlan_tci skb_vlan_tag_get(skb)
+ vlan_pr skb_vlan_tag_present(skb)
rand prandom_u32()
These extensions can also be prefixed with '#'.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9bffdfc648dc..1b8c964b0d17 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
route/max_size - INTEGER
Maximum number of routes allowed in the kernel. Increase
this when using large numbers of interfaces and/or routes.
+ From linux kernel 3.6 onwards, this is deprecated for ipv4
+ as route cache is no longer used.
neigh/default/gc_thresh1 - INTEGER
Minimum number of entries to keep. Garbage collector will not
@@ -288,6 +290,28 @@ tcp_frto - INTEGER
By default it's enabled with a non-zero value. 0 disables F-RTO.
+tcp_invalid_ratelimit - INTEGER
+ Limit the maximal rate for sending duplicate acknowledgments
+ in response to incoming TCP packets that are for an existing
+ connection but that are invalid due to any of these reasons:
+
+ (a) out-of-window sequence number,
+ (b) out-of-window acknowledgment number, or
+ (c) PAWS (Protection Against Wrapped Sequence numbers) check failure
+
+ This can help mitigate simple "ack loop" DoS attacks, wherein
+ a buggy or malicious middlebox or man-in-the-middle can
+ rewrite TCP header fields in manner that causes each endpoint
+ to think that the other is sending invalid TCP segments, thus
+ causing each side to send an unterminating stream of duplicate
+ acknowledgments for invalid segments.
+
+ Using 0 disables rate-limiting of dupacks in response to
+ invalid segments; otherwise this value specifies the minimal
+ space between sending such dupacks, in milliseconds.
+
+ Default: 500 (milliseconds).
+
tcp_keepalive_time - INTEGER
How often TCP sends out keepalive messages when keepalive is enabled.
Default: 2hours.
@@ -1285,6 +1309,13 @@ accept_ra_rtr_pref - BOOLEAN
Functional default: enabled if accept_ra is enabled.
disabled if accept_ra is disabled.
+accept_ra_mtu - BOOLEAN
+ Apply the MTU value specified in RA option 5 (RFC4861). If
+ disabled, the MTU specified in the RA will be ignored.
+
+ Functional default: enabled if accept_ra is enabled.
+ disabled if accept_ra is disabled.
+
accept_redirects - BOOLEAN
Accept Redirects.
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index c6af4bac5aa8..54f10478e8e3 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -199,16 +199,9 @@ frame header.
TX limitations
--------------
-Kernel processing usually involves validation of the message received by
-user-space, then processing its contents. The kernel must assure that
-userspace is not able to modify the message contents after they have been
-validated. In order to do so, the message is copied from the ring frame
-to an allocated buffer if either of these conditions is false:
-
-- only a single mapping of the ring exists
-- the file descriptor is not shared between processes
-
-This means that for threaded programs, the kernel will fall back to copying.
+As of Jan 2015 the message is always copied from the ring frame to an
+allocated buffer due to unresolved security concerns.
+See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
Example
-------
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 70da5086153d..f55599c62c9d 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -11,7 +11,8 @@ nf_conntrack_buckets - INTEGER (read-only)
Size of hash table. If not specified as parameter during module
loading, the default size is calculated by dividing total memory
by 16384 to determine the number of buckets but the hash table will
- never have fewer than 32 or more than 16384 buckets.
+ never have fewer than 32 and limited to 16384 buckets. For systems
+ with more than 4GB of memory it will be 65536 buckets.
nf_conntrack_checksum - BOOLEAN
0 - disabled
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt
index 37c20ee2455e..b3b9ac61d29d 100644
--- a/Documentation/networking/openvswitch.txt
+++ b/Documentation/networking/openvswitch.txt
@@ -131,6 +131,19 @@ performs best-effort detection of overlapping wildcarded flows and may reject
some but not all of them. However, this behavior may change in future versions.
+Unique flow identifiers
+-----------------------
+
+An alternative to using the original match portion of a key as the handle for
+flow identification is a unique flow identifier, or "UFID". UFIDs are optional
+for both the kernel and user space program.
+
+User space programs that support UFID are expected to provide it during flow
+setup in addition to the flow, then refer to the flow using the UFID for all
+future operations. The kernel is not required to index flows by the original
+flow key if a UFID is specified.
+
+
Basic rule for evolving flow keys
---------------------------------
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index a5c784c89312..5f0922613f1a 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -162,6 +162,27 @@ SOF_TIMESTAMPING_OPT_CMSG:
option IP_PKTINFO simultaneously.
+SOF_TIMESTAMPING_OPT_TSONLY:
+
+ Applies to transmit timestamps only. Makes the kernel return the
+ timestamp as a cmsg alongside an empty packet, as opposed to
+ alongside the original packet. This reduces the amount of memory
+ charged to the socket's receive budget (SO_RCVBUF) and delivers
+ the timestamp even if sysctl net.core.tstamp_allow_data is 0.
+ This option disables SOF_TIMESTAMPING_OPT_CMSG.
+
+
+New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
+disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
+regardless of the setting of sysctl net.core.tstamp_allow_data.
+
+An exception is when a process needs additional cmsg data, for
+instance SOL_IP/IP_PKTINFO to detect the egress network interface.
+Then pass option SOF_TIMESTAMPING_OPT_CMSG. This option depends on
+having access to the contents of the original packet, so cannot be
+combined with SOF_TIMESTAMPING_OPT_TSONLY.
+
+
1.4 Bytestream Timestamps
The SO_TIMESTAMPING interface supports timestamping of bytes in a
diff --git a/Documentation/networking/timestamping/txtimestamp.c b/Documentation/networking/timestamping/txtimestamp.c
index 876f71c5625a..8217510d3842 100644
--- a/Documentation/networking/timestamping/txtimestamp.c
+++ b/Documentation/networking/timestamping/txtimestamp.c
@@ -30,6 +30,8 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#define _GNU_SOURCE
+
#include <arpa/inet.h>
#include <asm/types.h>
#include <error.h>
@@ -59,14 +61,6 @@
#include <time.h>
#include <unistd.h>
-/* ugly hack to work around netinet/in.h and linux/ipv6.h conflicts */
-#ifndef in6_pktinfo
-struct in6_pktinfo {
- struct in6_addr ipi6_addr;
- int ipi6_ifindex;
-};
-#endif
-
/* command line parameters */
static int cfg_proto = SOCK_STREAM;
static int cfg_ipproto = IPPROTO_TCP;
@@ -76,6 +70,7 @@ static int do_ipv6 = 1;
static int cfg_payload_len = 10;
static bool cfg_show_payload;
static bool cfg_do_pktinfo;
+static bool cfg_loop_nodata;
static uint16_t dest_port = 9000;
static struct sockaddr_in daddr;
@@ -147,6 +142,9 @@ static void print_payload(char *data, int len)
{
int i;
+ if (!len)
+ return;
+
if (len > 70)
len = 70;
@@ -183,6 +181,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
struct sock_extended_err *serr = NULL;
struct scm_timestamping *tss = NULL;
struct cmsghdr *cm;
+ int batch = 0;
for (cm = CMSG_FIRSTHDR(msg);
cm && cm->cmsg_len;
@@ -215,10 +214,18 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
} else
fprintf(stderr, "unknown cmsg %d,%d\n",
cm->cmsg_level, cm->cmsg_type);
+
+ if (serr && tss) {
+ print_timestamp(tss, serr->ee_info, serr->ee_data,
+ payload_len);
+ serr = NULL;
+ tss = NULL;
+ batch++;
+ }
}
- if (serr && tss)
- print_timestamp(tss, serr->ee_info, serr->ee_data, payload_len);
+ if (batch > 1)
+ fprintf(stderr, "batched %d timestamps\n", batch);
}
static int recv_errmsg(int fd)
@@ -250,7 +257,7 @@ static int recv_errmsg(int fd)
if (ret == -1 && errno != EAGAIN)
error(1, errno, "recvmsg");
- if (ret > 0) {
+ if (ret >= 0) {
__recv_errmsg_cmsg(&msg, ret);
if (cfg_show_payload)
print_payload(data, cfg_payload_len);
@@ -315,6 +322,9 @@ static void do_test(int family, unsigned int opt)
opt |= SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_OPT_CMSG |
SOF_TIMESTAMPING_OPT_ID;
+ if (cfg_loop_nodata)
+ opt |= SOF_TIMESTAMPING_OPT_TSONLY;
+
if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING,
(char *) &opt, sizeof(opt)))
error(1, 0, "setsockopt timestamping");
@@ -384,6 +394,7 @@ static void __attribute__((noreturn)) usage(const char *filepath)
" -h: show this message\n"
" -I: request PKTINFO\n"
" -l N: send N bytes at a time\n"
+ " -n: set no-payload option\n"
" -r: use raw\n"
" -R: use raw (IP_HDRINCL)\n"
" -p N: connect to port N\n"
@@ -398,7 +409,7 @@ static void parse_opt(int argc, char **argv)
int proto_count = 0;
char c;
- while ((c = getopt(argc, argv, "46hIl:p:rRux")) != -1) {
+ while ((c = getopt(argc, argv, "46hIl:np:rRux")) != -1) {
switch (c) {
case '4':
do_ipv6 = 0;
@@ -409,6 +420,9 @@ static void parse_opt(int argc, char **argv)
case 'I':
cfg_do_pktinfo = true;
break;
+ case 'n':
+ cfg_loop_nodata = true;
+ break;
case 'r':
proto_count++;
cfg_proto = SOCK_RAW;
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index 8e1ddec2c78a..ae57b9ea0d41 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -43,12 +43,12 @@ and it's also much more restricted in the latter case:
even if this was created by another process.
- If possible, the file mapping will be directly on the backing device
- if the backing device has the BDI_CAP_MAP_DIRECT capability and
+ if the backing device has the NOMMU_MAP_DIRECT capability and
appropriate mapping protection capabilities. Ramfs, romfs, cramfs
and mtd might all permit this.
- If the backing device device can't or won't permit direct sharing,
- but does have the BDI_CAP_MAP_COPY capability, then a copy of the
+ but does have the NOMMU_MAP_COPY capability, then a copy of the
appropriate bit of the file will be read into a contiguous bit of
memory and any extraneous space beyond the EOF will be cleared
@@ -220,7 +220,7 @@ directly (can't be copied).
The file->f_op->mmap() operation will be called to actually inaugurate the
mapping. It can be rejected at that point. Returning the ENOSYS error will
-cause the mapping to be copied instead if BDI_CAP_MAP_COPY is specified.
+cause the mapping to be copied instead if NOMMU_MAP_COPY is specified.
The vm_ops->close() routine will be invoked when the last mapping on a chardev
is removed. An existing mapping will be shared, partially or not, if possible
@@ -232,7 +232,7 @@ want to handle it, despite the fact it's got an operation. For instance, it
might try directing the call to a secondary driver which turns out not to
implement it. Such is the case for the framebuffer driver which attempts to
direct the call to the device-specific driver. Under such circumstances, the
-mapping request will be rejected if BDI_CAP_MAP_COPY is not specified, and a
+mapping request will be rejected if NOMMU_MAP_COPY is not specified, and a
copy mapped otherwise.
IMPORTANT NOTE:
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt
index beefb9f82902..f3ac05cc23e4 100644
--- a/Documentation/oops-tracing.txt
+++ b/Documentation/oops-tracing.txt
@@ -270,6 +270,8 @@ characters, each representing a particular tainted value.
15: 'L' if a soft lockup has previously occurred on the system.
+ 16: 'K' if the kernel has been live patched.
+
The primary reason for the 'Tainted: ' string is to tell kernel
debuggers if this is a clean kernel or if anything unusual has
occurred. Tainting is permanent: even if an offending module is
diff --git a/Documentation/power/s2ram.txt b/Documentation/power/s2ram.txt
index 1bdfa0443773..4685aee197fd 100644
--- a/Documentation/power/s2ram.txt
+++ b/Documentation/power/s2ram.txt
@@ -69,6 +69,10 @@ Reason for this is that the RTC is the only reliably available piece of
hardware during resume operations where a value can be set that will
survive a reboot.
+pm_trace is not compatible with asynchronous suspend, so it turns
+asynchronous suspend off (which may work around timing or
+ordering-sensitive bugs).
+
Consequence is that after a resume (even if it is successful) your system
clock will have a value corresponding to the magic number instead of the
correct date/time! It is therefore advisable to use a program like ntp-date
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 427e89712f4a..2ee6ef9a6554 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -25,6 +25,9 @@ whether they can be changed or not:
- soft block: writable radio block (need not be readable) that is set by
the system software.
+The rfkill subsystem has two parameters, rfkill.default_state and
+rfkill.master_switch_mode, which are documented in kernel-parameters.txt.
+
2. Implementation details
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index 08911b5c6b0e..3df8babcdc41 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -1,14 +1,14 @@
-
- Debugging on Linux for s/390 & z/Architecture
- by
- Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
- Copyright (C) 2000-2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- Best viewed with fixed width fonts
+
+ Debugging on Linux for s/390 & z/Architecture
+ by
+ Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ Copyright (C) 2000-2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ Best viewed with fixed width fonts
Overview of Document:
=====================
-This document is intended to give a good overview of how to debug
-Linux for s/390 & z/Architecture. It isn't intended as a complete reference & not a
+This document is intended to give a good overview of how to debug Linux for
+s/390 and z/Architecture. It is not intended as a complete reference and not a
tutorial on the fundamentals of C & assembly. It doesn't go into
390 IO in any detail. It is intended to complement the documents in the
reference section below & any other worthwhile references you get.
@@ -35,7 +35,6 @@ Examining core dumps
ldd
Debugging modules
The proc file system
-Starting points for debugging scripting languages etc.
SysRq
References
Special Thanks
@@ -44,18 +43,20 @@ Register Set
============
The current architectures have the following registers.
-16 General propose registers, 32 bit on s/390 64 bit on z/Architecture, r0-r15 or gpr0-gpr15 used for arithmetic & addressing.
-
-16 Control registers, 32 bit on s/390 64 bit on z/Architecture, ( cr0-cr15 kernel usage only ) used for memory management,
-interrupt control,debugging control etc.
-
-16 Access registers ( ar0-ar15 ) 32 bit on s/390 & z/Architecture
-not used by normal programs but potentially could
-be used as temporary storage. Their main purpose is their 1 to 1
-association with general purpose registers and are used in
-the kernel for copying data between kernel & user address spaces.
-Access register 0 ( & access register 1 on z/Architecture ( needs 64 bit
-pointer ) ) is currently used by the pthread library as a pointer to
+16 General propose registers, 32 bit on s/390 and 64 bit on z/Architecture,
+r0-r15 (or gpr0-gpr15), used for arithmetic and addressing.
+
+16 Control registers, 32 bit on s/390 and 64 bit on z/Architecture, cr0-cr15,
+kernel usage only, used for memory management, interrupt control, debugging
+control etc.
+
+16 Access registers (ar0-ar15), 32 bit on both s/390 and z/Architecture,
+normally not used by normal programs but potentially could be used as
+temporary storage. These registers have a 1:1 association with general
+purpose registers and are designed to be used in the so-called access
+register mode to select different address spaces.
+Access register 0 (and access register 1 on z/Architecture, which needs a
+64 bit pointer) is currently used by the pthread library as a pointer to
the current running threads private area.
16 64 bit floating point registers (fp0-fp15 ) IEEE & HFP floating
@@ -90,18 +91,19 @@ s/390 z/Architecture
6 6 Input/Output interrupt Mask
-7 7 External interrupt Mask used primarily for interprocessor signalling &
- clock interrupts.
+7 7 External interrupt Mask used primarily for interprocessor
+ signalling and clock interrupts.
-8-11 8-11 PSW Key used for complex memory protection mechanism not used under linux
+8-11 8-11 PSW Key used for complex memory protection mechanism
+ (not used under linux)
12 12 1 on s/390 0 on z/Architecture
13 13 Machine Check Mask 1=enable machine check interrupts
-14 14 Wait State set this to 1 to stop the processor except for interrupts & give
- time to other LPARS used in CPU idle in the kernel to increase overall
- usage of processor resources.
+14 14 Wait State. Set this to 1 to stop the processor except for
+ interrupts and give time to other LPARS. Used in CPU idle in
+ the kernel to increase overall usage of processor resources.
15 15 Problem state ( if set to 1 certain instructions are disabled )
all linux user programs run with this bit 1
@@ -165,21 +167,23 @@ s/390 z/Architecture
when loading the address with LPSWE otherwise a
specification exception occurs, LPSW is fully backward
compatible.
-
-
+
+
Prefix Page(s)
---------------
+--------------
This per cpu memory area is too intimately tied to the processor not to mention.
-It exists between the real addresses 0-4096 on s/390 & 0-8192 z/Architecture & is exchanged
-with a 1 page on s/390 or 2 pages on z/Architecture in absolute storage by the set
-prefix instruction in linux'es startup.
-This page is mapped to a different prefix for each processor in an SMP configuration
-( assuming the os designer is sane of course :-) ).
-Bytes 0-512 ( 200 hex ) on s/390 & 0-512,4096-4544,4604-5119 currently on z/Architecture
-are used by the processor itself for holding such information as exception indications &
-entry points for exceptions.
-Bytes after 0xc00 hex are used by linux for per processor globals on s/390 & z/Architecture
-( there is a gap on z/Architecture too currently between 0xc00 & 1000 which linux uses ).
+It exists between the real addresses 0-4096 on s/390 and between 0-8192 on
+z/Architecture and is exchanged with one page on s/390 or two pages on
+z/Architecture in absolute storage by the set prefix instruction during Linux
+startup.
+This page is mapped to a different prefix for each processor in an SMP
+configuration (assuming the OS designer is sane of course).
+Bytes 0-512 (200 hex) on s/390 and 0-512, 4096-4544, 4604-5119 currently on
+z/Architecture are used by the processor itself for holding such information
+as exception indications and entry points for exceptions.
+Bytes after 0xc00 hex are used by linux for per processor globals on s/390 and
+z/Architecture (there is a gap on z/Architecture currently between 0xc00 and
+0x1000, too, which is used by Linux).
The closest thing to this on traditional architectures is the interrupt
vector table. This is a good thing & does simplify some of the kernel coding
however it means that we now cannot catch stray NULL pointers in the
@@ -192,26 +196,26 @@ Address Spaces on Intel Linux
The traditional Intel Linux is approximately mapped as follows forgive
the ascii art.
-0xFFFFFFFF 4GB Himem *****************
- * *
- * Kernel Space *
- * *
- ***************** ****************
-User Space Himem (typically 0xC0000000 3GB )* User Stack * * *
- ***************** * *
- * Shared Libs * * Next Process *
- ***************** * to *
- * * <== * Run * <==
- * User Program * * *
- * Data BSS * * *
- * Text * * *
- * Sections * * *
-0x00000000 ***************** ****************
-
-Now it is easy to see that on Intel it is quite easy to recognise a kernel address
-as being one greater than user space himem ( in this case 0xC0000000).
-& addresses of less than this are the ones in the current running program on this
-processor ( if an smp box ).
+0xFFFFFFFF 4GB Himem *****************
+ * *
+ * Kernel Space *
+ * *
+ ***************** ****************
+User Space Himem * User Stack * * *
+(typically 0xC0000000 3GB ) ***************** * *
+ * Shared Libs * * Next Process *
+ ***************** * to *
+ * * <== * Run * <==
+ * User Program * * *
+ * Data BSS * * *
+ * Text * * *
+ * Sections * * *
+0x00000000 ***************** ****************
+
+Now it is easy to see that on Intel it is quite easy to recognise a kernel
+address as being one greater than user space himem (in this case 0xC0000000),
+and addresses of less than this are the ones in the current running program on
+this processor (if an smp box).
If using the virtual machine ( VM ) as a debugger it is quite difficult to
know which user process is running as the address space you are looking at
could be from any process in the run queue.
@@ -247,8 +251,8 @@ Our addressing scheme is basically as follows:
Himem 0x7fffffff 2GB on s/390 ***************** ****************
currently 0x3ffffffffff (2^42)-1 * User Stack * * *
on z/Architecture. ***************** * *
- * Shared Libs * * *
- ***************** * *
+ * Shared Libs * * *
+ ***************** * *
* * * Kernel *
* User Program * * *
* Data BSS * * *
@@ -301,10 +305,10 @@ Virtual Addresses on s/390 & z/Architecture
===========================================
A virtual address on s/390 is made up of 3 parts
-The SX ( segment index, roughly corresponding to the PGD & PMD in linux terminology )
-being bits 1-11.
-The PX ( page index, corresponding to the page table entry (pte) in linux terminology )
-being bits 12-19.
+The SX (segment index, roughly corresponding to the PGD & PMD in Linux
+terminology) being bits 1-11.
+The PX (page index, corresponding to the page table entry (pte) in Linux
+terminology) being bits 12-19.
The remaining bits BX (the byte index are the offset in the page )
i.e. bits 20 to 31.
@@ -368,9 +372,9 @@ each processor as follows.
* ( 8K ) *
16K aligned ************************
-What this means is that we don't need to dedicate any register or global variable
-to point to the current running process & can retrieve it with the following
-very simple construct for s/390 & one very similar for z/Architecture.
+What this means is that we don't need to dedicate any register or global
+variable to point to the current running process & can retrieve it with the
+following very simple construct for s/390 & one very similar for z/Architecture.
static inline struct task_struct * get_current(void)
{
@@ -403,8 +407,8 @@ Note: To follow stackframes requires a knowledge of C or Pascal &
limited knowledge of one assembly language.
It should be noted that there are some differences between the
-s/390 & z/Architecture stack layouts as the z/Architecture stack layout didn't have
-to maintain compatibility with older linkage formats.
+s/390 and z/Architecture stack layouts as the z/Architecture stack layout
+didn't have to maintain compatibility with older linkage formats.
Glossary:
---------
@@ -440,7 +444,7 @@ The code generated by the compiler to return to the caller.
frameless-function
A frameless function in Linux for s390 & z/Architecture is one which doesn't
-need more than the register save area ( 96 bytes on s/390, 160 on z/Architecture )
+need more than the register save area (96 bytes on s/390, 160 on z/Architecture)
given to it by the caller.
A frameless function never:
1) Sets up a back chain.
@@ -588,8 +592,8 @@ A sample program with comments.
Comments on the function test
-----------------------------
-1) It didn't need to set up a pointer to the constant pool gpr13 as it isn't used
-( :-( ).
+1) It didn't need to set up a pointer to the constant pool gpr13 as it is not
+used ( :-( ).
2) This is a frameless function & no stack is bought.
3) The compiler was clever enough to recognise that it could return the
value in r2 as well as use it for the passed in parameter ( :-) ).
@@ -743,35 +747,34 @@ Debugging under VM
Notes
-----
Addresses & values in the VM debugger are always hex never decimal
-Address ranges are of the format <HexValue1>-<HexValue2> or <HexValue1>.<HexValue2>
-e.g. The address range 0x2000 to 0x3000 can be described as 2000-3000 or 2000.1000
+Address ranges are of the format <HexValue1>-<HexValue2> or
+<HexValue1>.<HexValue2>
+For example, the address range 0x2000 to 0x3000 can be described as 2000-3000
+or 2000.1000
The VM Debugger is case insensitive.
-VM's strengths are usually other debuggers weaknesses you can get at any resource
-no matter how sensitive e.g. memory management resources,change address translation
-in the PSW. For kernel hacking you will reap dividends if you get good at it.
-
-The VM Debugger displays operators but not operands, probably because some
-of it was written when memory was expensive & the programmer was probably proud that
-it fitted into 2k of memory & the programmers & didn't want to shock hardcore VM'ers by
-changing the interface :-), also the debugger displays useful information on the same line &
-the author of the code probably felt that it was a good idea not to go over
-the 80 columns on the screen.
-
-As some of you are probably in a panic now this isn't as unintuitive as it may seem
-as the 390 instructions are easy to decode mentally & you can make a good guess at a lot
-of them as all the operands are nibble ( half byte aligned ) & if you have an objdump listing
-also it is quite easy to follow, if you don't have an objdump listing keep a copy of
-the s/390 Reference Summary & look at between pages 2 & 7 or alternatively the
-s/390 principles of operation.
+VM's strengths are usually other debuggers weaknesses you can get at any
+resource no matter how sensitive e.g. memory management resources, change
+address translation in the PSW. For kernel hacking you will reap dividends if
+you get good at it.
+
+The VM Debugger displays operators but not operands, and also the debugger
+displays useful information on the same line as the author of the code probably
+felt that it was a good idea not to go over the 80 columns on the screen.
+This isn't as unintuitive as it may seem as the s/390 instructions are easy to
+decode mentally and you can make a good guess at a lot of them as all the
+operands are nibble (half byte aligned).
+So if you have an objdump listing by hand, it is quite easy to follow, and if
+you don't have an objdump listing keep a copy of the s/390 Reference Summary
+or alternatively the s/390 principles of operation next to you.
e.g. even I can guess that
0001AFF8' LR 180F CC 0
is a ( load register ) lr r0,r15
-Also it is very easy to tell the length of a 390 instruction from the 2 most significant
-bits in the instruction ( not that this info is really useful except if you are trying to
-make sense of a hexdump of code ).
+Also it is very easy to tell the length of a 390 instruction from the 2 most
+significant bits in the instruction (not that this info is really useful except
+if you are trying to make sense of a hexdump of code).
Here is a table
Bits Instruction Length
------------------------------------------
@@ -780,9 +783,6 @@ Bits Instruction Length
10 4 Bytes
11 6 Bytes
-
-
-
The debugger also displays other useful info on the same line such as the
addresses being operated on destination addresses of branches & condition codes.
e.g.
@@ -853,8 +853,8 @@ Displaying & modifying Registers
--------------------------------
D G will display all the gprs
Adding a extra G to all the commands is necessary to access the full 64 bit
-content in VM on z/Architecture obviously this isn't required for access registers
-as these are still 32 bit.
+content in VM on z/Architecture. Obviously this isn't required for access
+registers as these are still 32 bit.
e.g. DGG instead of DG
D X will display all the control registers
D AR will display all the access registers
@@ -870,10 +870,11 @@ Displaying Memory
-----------------
To display memory mapped using the current PSW's mapping try
D <range>
-To make VM display a message each time it hits a particular address & continue try
+To make VM display a message each time it hits a particular address and
+continue try
D I<range> will disassemble/display a range of instructions.
ST addr 32 bit word will store a 32 bit aligned address
-D T<range> will display the EBCDIC in an address ( if you are that way inclined )
+D T<range> will display the EBCDIC in an address (if you are that way inclined)
D R<range> will display real addresses ( without DAT ) but with prefixing.
There are other complex options to display if you need to get at say home space
but are in primary space the easiest thing to do is to temporarily
@@ -884,8 +885,8 @@ restore it.
Hints
-----
-If you want to issue a debugger command without halting your virtual machine with the
-PA1 key try prefixing the command with #CP e.g.
+If you want to issue a debugger command without halting your virtual machine
+with the PA1 key try prefixing the command with #CP e.g.
#cp tr i pswa 2000
also suffixing most debugger commands with RUN will cause them not
to stop just display the mnemonic at the current instruction on the console.
@@ -903,9 +904,10 @@ This sends a message to your own console each time do_signal is entered.
script with breakpoints on every kernel procedure, this isn't a good idea
because there are thousands of these routines & VM can only set 255 breakpoints
at a time so you nearly had to spend as long pruning the file down as you would
-entering the msg's by hand ),however, the trick might be useful for a single object file.
-On linux'es 3270 emulator x3270 there is a very useful option under the file ment
-Save Screens In File this is very good of keeping a copy of traces.
+entering the msgs by hand), however, the trick might be useful for a single
+object file. In the 3270 terminal emulator x3270 there is a very useful option
+in the file menu called "Save Screen In File" - this is very good for keeping a
+copy of traces.
From CMS help <command name> will give you online help on a particular command.
e.g.
@@ -920,7 +922,8 @@ SET PF9 IMM B
This does a single step in VM on pressing F8.
SET PF10 ^
This sets up the ^ key.
-which can be used for ^c (ctrl-c),^z (ctrl-z) which can't be typed directly into some 3270 consoles.
+which can be used for ^c (ctrl-c),^z (ctrl-z) which can't be typed directly
+into some 3270 consoles.
SET PF11 ^-
This types the starting keystrokes for a sysrq see SysRq below.
SET PF12 RETRIEVE
@@ -1014,8 +1017,8 @@ Tracing Program Exceptions
--------------------------
If you get a crash which says something like
illegal operation or specification exception followed by a register dump
-You can restart linux & trace these using the tr prog <range or value> trace option.
-
+You can restart linux & trace these using the tr prog <range or value> trace
+option.
The most common ones you will normally be tracing for is
@@ -1057,9 +1060,10 @@ TR GOTO INITIAL
Tracing linux syscalls under VM
-------------------------------
-Syscalls are implemented on Linux for S390 by the Supervisor call instruction (SVC) there 256
-possibilities of these as the instruction is made up of a 0xA opcode & the second byte being
-the syscall number. They are traced using the simple command.
+Syscalls are implemented on Linux for S390 by the Supervisor call instruction
+(SVC). There 256 possibilities of these as the instruction is made up of a 0xA
+opcode and the second byte being the syscall number. They are traced using the
+simple command:
TR SVC <Optional value or range>
the syscalls are defined in linux/arch/s390/include/asm/unistd.h
e.g. to trace all file opens just do
@@ -1070,12 +1074,12 @@ SMP Specific commands
---------------------
To find out how many cpus you have
Q CPUS displays all the CPU's available to your virtual machine
-To find the cpu that the current cpu VM debugger commands are being directed at do
-Q CPU to change the current cpu VM debugger commands are being directed at do
+To find the cpu that the current cpu VM debugger commands are being directed at
+do Q CPU to change the current cpu VM debugger commands are being directed at do
CPU <desired cpu no>
-On a SMP guest issue a command to all CPUs try prefixing the command with cpu all.
-To issue a command to a particular cpu try cpu <cpu number> e.g.
+On a SMP guest issue a command to all CPUs try prefixing the command with cpu
+all. To issue a command to a particular cpu try cpu <cpu number> e.g.
CPU 01 TR I R 2000.3000
If you are running on a guest with several cpus & you have a IO related problem
& cannot follow the flow of code but you know it isn't smp related.
@@ -1101,10 +1105,10 @@ D TX0.100
Alternatively
=============
-Under older VM debuggers ( I love EBDIC too ) you can use this little program I wrote which
-will convert a command line of hex digits to ascii text which can be compiled under linux &
-you can copy the hex digits from your x3270 terminal to your xterm if you are debugging
-from a linuxbox.
+Under older VM debuggers (I love EBDIC too) you can use following little
+program which converts a command line of hex digits to ascii text. It can be
+compiled under linux and you can copy the hex digits from your x3270 terminal
+to your xterm if you are debugging from a linuxbox.
This is quite useful when looking at a parameter passed in as a text string
under VM ( unless you are good at decoding ASCII in your head ).
@@ -1114,14 +1118,14 @@ TR SVC 5
We have stopped at a breakpoint
000151B0' SVC 0A05 -> 0001909A' CC 0
-D 20.8 to check the SVC old psw in the prefix area & see was it from userspace
-( for the layout of the prefix area consult P18 of the s/390 390 Reference Summary
-if you have it available ).
+D 20.8 to check the SVC old psw in the prefix area and see was it from userspace
+(for the layout of the prefix area consult the "Fixed Storage Locations"
+chapter of the s/390 Reference Summary if you have it available).
V00000020 070C2000 800151B2
The problem state bit wasn't set & it's also too early in the boot sequence
for it to be a userspace SVC if it was we would have to temporarily switch the
-psw to user space addressing so we could get at the first parameter of the open in
-gpr2.
+psw to user space addressing so we could get at the first parameter of the open
+in gpr2.
Next do a
D G2
GPR 2 = 00014CB4
@@ -1208,9 +1212,9 @@ Here are the tricks I use 9 out of 10 times it works pretty well,
When your backchain reaches a dead end
--------------------------------------
-This can happen when an exception happens in the kernel & the kernel is entered twice
-if you reach the NULL pointer at the end of the back chain you should be
-able to sniff further back if you follow the following tricks.
+This can happen when an exception happens in the kernel and the kernel is
+entered twice. If you reach the NULL pointer at the end of the back chain you
+should be able to sniff further back if you follow the following tricks.
1) A kernel address should be easy to recognise since it is in
primary space & the problem state bit isn't set & also
The Hi bit of the address is set.
@@ -1260,8 +1264,8 @@ V000FFFD0 00010400 80010802 8001085A 000FFFA0
our 3rd return address is 8001085A
-as the 04B52002 looks suspiciously like rubbish it is fair to assume that the kernel entry routines
-for the sake of optimisation don't set up a backchain.
+as the 04B52002 looks suspiciously like rubbish it is fair to assume that the
+kernel entry routines for the sake of optimisation don't set up a backchain.
now look at System.map to see if the addresses make any sense.
@@ -1289,67 +1293,75 @@ Congrats you've done your first backchain.
s/390 & z/Architecture IO Overview
==================================
-I am not going to give a course in 390 IO architecture as this would take me quite a
-while & I'm no expert. Instead I'll give a 390 IO architecture summary for Dummies if you have
-the s/390 principles of operation available read this instead. If nothing else you may find a few
-useful keywords in here & be able to use them on a web search engine like altavista to find
-more useful information.
+I am not going to give a course in 390 IO architecture as this would take me
+quite a while and I'm no expert. Instead I'll give a 390 IO architecture
+summary for Dummies. If you have the s/390 principles of operation available
+read this instead. If nothing else you may find a few useful keywords in here
+and be able to use them on a web search engine to find more useful information.
Unlike other bus architectures modern 390 systems do their IO using mostly
-fibre optics & devices such as tapes & disks can be shared between several mainframes,
-also S390 can support up to 65536 devices while a high end PC based system might be choking
-with around 64. Here is some of the common IO terminology
+fibre optics and devices such as tapes and disks can be shared between several
+mainframes. Also S390 can support up to 65536 devices while a high end PC based
+system might be choking with around 64.
-Subchannel:
-This is the logical number most IO commands use to talk to an IO device there can be up to
-0x10000 (65536) of these in a configuration typically there is a few hundred. Under VM
-for simplicity they are allocated contiguously, however on the native hardware they are not
-they typically stay consistent between boots provided no new hardware is inserted or removed.
-Under Linux for 390 we use these as IRQ's & also when issuing an IO command (CLEAR SUBCHANNEL,
-HALT SUBCHANNEL,MODIFY SUBCHANNEL,RESUME SUBCHANNEL,START SUBCHANNEL,STORE SUBCHANNEL &
-TEST SUBCHANNEL ) we use this as the ID of the device we wish to talk to, the most
-important of these instructions are START SUBCHANNEL ( to start IO ), TEST SUBCHANNEL ( to check
-whether the IO completed successfully ), & HALT SUBCHANNEL ( to kill IO ), a subchannel
-can have up to 8 channel paths to a device this offers redundancy if one is not available.
+Here is some of the common IO terminology:
+Subchannel:
+This is the logical number most IO commands use to talk to an IO device. There
+can be up to 0x10000 (65536) of these in a configuration, typically there are a
+few hundred. Under VM for simplicity they are allocated contiguously, however
+on the native hardware they are not. They typically stay consistent between
+boots provided no new hardware is inserted or removed.
+Under Linux for s390 we use these as IRQ's and also when issuing an IO command
+(CLEAR SUBCHANNEL, HALT SUBCHANNEL, MODIFY SUBCHANNEL, RESUME SUBCHANNEL,
+START SUBCHANNEL, STORE SUBCHANNEL and TEST SUBCHANNEL). We use this as the ID
+of the device we wish to talk to. The most important of these instructions are
+START SUBCHANNEL (to start IO), TEST SUBCHANNEL (to check whether the IO
+completed successfully) and HALT SUBCHANNEL (to kill IO). A subchannel can have
+up to 8 channel paths to a device, this offers redundancy if one is not
+available.
Device Number:
-This number remains static & Is closely tied to the hardware, there are 65536 of these
-also they are made up of a CHPID ( Channel Path ID, the most significant 8 bits )
-& another lsb 8 bits. These remain static even if more devices are inserted or removed
-from the hardware, there is a 1 to 1 mapping between Subchannels & Device Numbers provided
-devices aren't inserted or removed.
+This number remains static and is closely tied to the hardware. There are 65536
+of these, made up of a CHPID (Channel Path ID, the most significant 8 bits) and
+another lsb 8 bits. These remain static even if more devices are inserted or
+removed from the hardware. There is a 1 to 1 mapping between subchannels and
+device numbers, provided devices aren't inserted or removed.
Channel Control Words:
-CCWS are linked lists of instructions initially pointed to by an operation request block (ORB),
-which is initially given to Start Subchannel (SSCH) command along with the subchannel number
-for the IO subsystem to process while the CPU continues executing normal code.
-These come in two flavours, Format 0 ( 24 bit for backward )
-compatibility & Format 1 ( 31 bit ). These are typically used to issue read & write
-( & many other instructions ) they consist of a length field & an absolute address field.
-For each IO typically get 1 or 2 interrupts one for channel end ( primary status ) when the
-channel is idle & the second for device end ( secondary status ) sometimes you get both
-concurrently, you check how the IO went on by issuing a TEST SUBCHANNEL at each interrupt,
-from which you receive an Interruption response block (IRB). If you get channel & device end
-status in the IRB without channel checks etc. your IO probably went okay. If you didn't you
-probably need a doctor to examine the IRB & extended status word etc.
+CCWs are linked lists of instructions initially pointed to by an operation
+request block (ORB), which is initially given to Start Subchannel (SSCH)
+command along with the subchannel number for the IO subsystem to process
+while the CPU continues executing normal code.
+CCWs come in two flavours, Format 0 (24 bit for backward compatibility) and
+Format 1 (31 bit). These are typically used to issue read and write (and many
+other) instructions. They consist of a length field and an absolute address
+field.
+Each IO typically gets 1 or 2 interrupts, one for channel end (primary status)
+when the channel is idle, and the second for device end (secondary status).
+Sometimes you get both concurrently. You check how the IO went on by issuing a
+TEST SUBCHANNEL at each interrupt, from which you receive an Interruption
+response block (IRB). If you get channel and device end status in the IRB
+without channel checks etc. your IO probably went okay. If you didn't you
+probably need to examine the IRB, extended status word etc.
If an error occurs, more sophisticated control units have a facility known as
-concurrent sense this means that if an error occurs Extended sense information will
-be presented in the Extended status word in the IRB if not you have to issue a
-subsequent SENSE CCW command after the test subchannel.
+concurrent sense. This means that if an error occurs Extended sense information
+will be presented in the Extended status word in the IRB. If not you have to
+issue a subsequent SENSE CCW command after the test subchannel.
-TPI( Test pending interrupt) can also be used for polled IO but in multitasking multiprocessor
-systems it isn't recommended except for checking special cases ( i.e. non looping checks for
-pending IO etc. ).
+TPI (Test pending interrupt) can also be used for polled IO, but in
+multitasking multiprocessor systems it isn't recommended except for
+checking special cases (i.e. non looping checks for pending IO etc.).
-Store Subchannel & Modify Subchannel can be used to examine & modify operating characteristics
-of a subchannel ( e.g. channel paths ).
+Store Subchannel and Modify Subchannel can be used to examine and modify
+operating characteristics of a subchannel (e.g. channel paths).
Other IO related Terms:
Sysplex: S390's Clustering Technology
-QDIO: S390's new high speed IO architecture to support devices such as gigabit ethernet,
-this architecture is also designed to be forward compatible with up & coming 64 bit machines.
+QDIO: S390's new high speed IO architecture to support devices such as gigabit
+ethernet, this architecture is also designed to be forward compatible with
+upcoming 64 bit machines.
General Concepts
@@ -1406,37 +1418,40 @@ sometimes called Bus-and Tag & sometimes Original Equipment Manufacturers
Interface (OEMI).
This byte wide Parallel channel path/bus has parity & data on the "Bus" cable
-& control lines on the "Tag" cable. These can operate in byte multiplex mode for
-sharing between several slow devices or burst mode & monopolize the channel for the
-whole burst. Up to 256 devices can be addressed on one of these cables. These cables are
-about one inch in diameter. The maximum unextended length supported by these cables is
-125 Meters but this can be extended up to 2km with a fibre optic channel extended
-such as a 3044. The maximum burst speed supported is 4.5 megabytes per second however
-some really old processors support only transfer rates of 3.0, 2.0 & 1.0 MB/sec.
+and control lines on the "Tag" cable. These can operate in byte multiplex mode
+for sharing between several slow devices or burst mode and monopolize the
+channel for the whole burst. Up to 256 devices can be addressed on one of these
+cables. These cables are about one inch in diameter. The maximum unextended
+length supported by these cables is 125 Meters but this can be extended up to
+2km with a fibre optic channel extended such as a 3044. The maximum burst speed
+supported is 4.5 megabytes per second. However, some really old processors
+support only transfer rates of 3.0, 2.0 & 1.0 MB/sec.
One of these paths can be daisy chained to up to 8 control units.
ESCON if fibre optic it is also called FICON
-Was introduced by IBM in 1990. Has 2 fibre optic cables & uses either leds or lasers
-for communication at a signaling rate of up to 200 megabits/sec. As 10bits are transferred
-for every 8 bits info this drops to 160 megabits/sec & to 18.6 Megabytes/sec once
-control info & CRC are added. ESCON only operates in burst mode.
+Was introduced by IBM in 1990. Has 2 fibre optic cables and uses either leds or
+lasers for communication at a signaling rate of up to 200 megabits/sec. As
+10bits are transferred for every 8 bits info this drops to 160 megabits/sec
+and to 18.6 Megabytes/sec once control info and CRC are added. ESCON only
+operates in burst mode.
-ESCONs typical max cable length is 3km for the led version & 20km for the laser version
-known as XDF ( extended distance facility ). This can be further extended by using an
-ESCON director which triples the above mentioned ranges. Unlike Bus & Tag as ESCON is
-serial it uses a packet switching architecture the standard Bus & Tag control protocol
-is however present within the packets. Up to 256 devices can be attached to each control
-unit that uses one of these interfaces.
+ESCONs typical max cable length is 3km for the led version and 20km for the
+laser version known as XDF (extended distance facility). This can be further
+extended by using an ESCON director which triples the above mentioned ranges.
+Unlike Bus & Tag as ESCON is serial it uses a packet switching architecture,
+the standard Bus & Tag control protocol is however present within the packets.
+Up to 256 devices can be attached to each control unit that uses one of these
+interfaces.
Common 390 Devices include:
Network adapters typically OSA2,3172's,2116's & OSA-E gigabit ethernet adapters,
-Consoles 3270 & 3215 ( a teletype emulated under linux for a line mode console ).
+Consoles 3270 & 3215 (a teletype emulated under linux for a line mode console).
DASD's direct access storage devices ( otherwise known as hard disks ).
Tape Drives.
CTC ( Channel to Channel Adapters ),
ESCON or Parallel Cables used as a very high speed serial link
-between 2 machines. We use 2 cables under linux to do a bi-directional serial link.
+between 2 machines.
Debugging IO on s/390 & z/Architecture under VM
@@ -1475,9 +1490,9 @@ or the halt subchannels
or TR HSCH 7C08-7C09
MSCH's ,STSCH's I think you can guess the rest
-Ingo's favourite trick is tracing all the IO's & CCWS & spooling them into the reader of another
-VM guest so he can ftp the logfile back to his own machine.I'll do a small bit of this & give you
- a look at the output.
+A good trick is tracing all the IO's and CCWS and spooling them into the reader
+of another VM guest so he can ftp the logfile back to his own machine. I'll do
+a small bit of this and give you a look at the output.
1) Spool stdout to VM reader
SP PRT TO (another vm guest ) or * for the local vm guest
@@ -1593,8 +1608,8 @@ undisplay : undo's display's
info breakpoints: shows all current breakpoints
-info stack: shows stack back trace ( if this doesn't work too well, I'll show you the
-stacktrace by hand below ).
+info stack: shows stack back trace (if this doesn't work too well, I'll show
+you the stacktrace by hand below).
info locals: displays local variables.
@@ -1619,7 +1634,8 @@ next: like step except this will not step into subroutines
stepi: steps a single machine code instruction.
e.g. stepi 100
-nexti: steps a single machine code instruction but will not step into subroutines.
+nexti: steps a single machine code instruction but will not step into
+subroutines.
finish: will run until exit of the current routine
@@ -1721,7 +1737,8 @@ e.g.
outputs:
$1 = 11
-You might now be thinking that the line above didn't work, something extra had to be done.
+You might now be thinking that the line above didn't work, something extra had
+to be done.
(gdb) call fflush(stdout)
hello world$2 = 0
As an aside the debugger also calls malloc & free under the hood
@@ -1804,26 +1821,17 @@ man gdb or info gdb.
core dumps
----------
What a core dump ?,
-A core dump is a file generated by the kernel ( if allowed ) which contains the registers,
-& all active pages of the program which has crashed.
-From this file gdb will allow you to look at the registers & stack trace & memory of the
-program as if it just crashed on your system, it is usually called core & created in the
-current working directory.
-This is very useful in that a customer can mail a core dump to a technical support department
-& the technical support department can reconstruct what happened.
-Provided they have an identical copy of this program with debugging symbols compiled in &
-the source base of this build is available.
-In short it is far more useful than something like a crash log could ever hope to be.
-
-In theory all that is missing to restart a core dumped program is a kernel patch which
-will do the following.
-1) Make a new kernel task structure
-2) Reload all the dumped pages back into the kernel's memory management structures.
-3) Do the required clock fixups
-4) Get all files & network connections for the process back into an identical state ( really difficult ).
-5) A few more difficult things I haven't thought of.
-
-
+A core dump is a file generated by the kernel (if allowed) which contains the
+registers and all active pages of the program which has crashed.
+From this file gdb will allow you to look at the registers, stack trace and
+memory of the program as if it just crashed on your system. It is usually
+called core and created in the current working directory.
+This is very useful in that a customer can mail a core dump to a technical
+support department and the technical support department can reconstruct what
+happened. Provided they have an identical copy of this program with debugging
+symbols compiled in and the source base of this build is available.
+In short it is far more useful than something like a crash log could ever hope
+to be.
Why have I never seen one ?.
Probably because you haven't used the command
@@ -1868,7 +1876,7 @@ Breakpoint 2 at 0x4d87a4: file top.c, line 2609.
#3 0x5167e6 in readline_internal_char () at readline.c:454
#4 0x5168ee in readline_internal_charloop () at readline.c:507
#5 0x51692c in readline_internal () at readline.c:521
-#6 0x5164fe in readline (prompt=0x7ffff810 "\177ÂÿÂøx\177ÂÿÂ÷ÂØ\177ÂÿÂøxÂÀ")
+#6 0x5164fe in readline (prompt=0x7ffff810)
at readline.c:349
#7 0x4d7a8a in command_line_input (prompt=0x564420 "(gdb) ", repeat=1,
annotation_suffix=0x4d6b44 "prompt") at top.c:2091
@@ -1929,8 +1937,8 @@ cat /proc/sys/net/ipv4/ip_forward
On my machine now outputs
1
IP forwarding is on.
-There is a lot of useful info in here best found by going in & having a look around,
-so I'll take you through some entries I consider important.
+There is a lot of useful info in here best found by going in and having a look
+around, so I'll take you through some entries I consider important.
All the processes running on the machine have their own entry defined by
/proc/<pid>
@@ -2060,7 +2068,8 @@ if the device doesn't say up
try
/etc/rc.d/init.d/network start
( this starts the network stack & hopefully calls ifconfig tr0 up ).
-ifconfig looks at the output of /proc/net/dev & presents it in a more presentable form
+ifconfig looks at the output of /proc/net/dev and presents it in a more
+presentable form.
Now ping the device from a machine in the same subnet.
if the RX packets count & TX packets counts don't increment you probably
have problems.
@@ -2086,34 +2095,6 @@ of the device.
See the manpage chandev.8 &type cat /proc/chandev for more info.
-
-Starting points for debugging scripting languages etc.
-======================================================
-
-bash/sh
-
-bash -x <scriptname>
-e.g. bash -x /usr/bin/bashbug
-displays the following lines as it executes them.
-+ MACHINE=i586
-+ OS=linux-gnu
-+ CC=gcc
-+ CFLAGS= -DPROGRAM='bash' -DHOSTTYPE='i586' -DOSTYPE='linux-gnu' -DMACHTYPE='i586-pc-linux-gnu' -DSHELL -DHAVE_CONFIG_H -I. -I. -I./lib -O2 -pipe
-+ RELEASE=2.01
-+ PATCHLEVEL=1
-+ RELSTATUS=release
-+ MACHTYPE=i586-pc-linux-gnu
-
-perl -d <scriptname> runs the perlscript in a fully interactive debugger
-<like gdb>.
-Type 'h' in the debugger for help.
-
-for debugging java type
-jdb <filename> another fully interactive gdb style debugger.
-& type ? in the debugger for help.
-
-
-
SysRq
=====
This is now supported by linux for s/390 & z/Architecture.
diff --git a/Documentation/scheduler/completion.txt b/Documentation/scheduler/completion.txt
new file mode 100644
index 000000000000..f77651eca31e
--- /dev/null
+++ b/Documentation/scheduler/completion.txt
@@ -0,0 +1,236 @@
+completions - wait for completion handling
+==========================================
+
+This document was originally written based on 3.18.0 (linux-next)
+
+Introduction:
+-------------
+
+If you have one or more threads of execution that must wait for some process
+to have reached a point or a specific state, completions can provide a race
+free solution to this problem. Semantically they are somewhat like a
+pthread_barriers and have similar use-cases.
+
+Completions are a code synchronization mechanism that is preferable to any
+misuse of locks. Any time you think of using yield() or some quirky
+msleep(1); loop to allow something else to proceed, you probably want to
+look into using one of the wait_for_completion*() calls instead. The
+advantage of using completions is clear intent of the code but also more
+efficient code as both threads can continue until the result is actually
+needed.
+
+Completions are built on top of the generic event infrastructure in Linux,
+with the event reduced to a simple flag appropriately called "done" in
+struct completion, that tells the waiting threads of execution if they
+can continue safely.
+
+As completions are scheduling related the code is found in
+kernel/sched/completion.c - for details on completion design and
+implementation see completions-design.txt
+
+
+Usage:
+------
+
+There are three parts to the using completions, the initialization of the
+struct completion, the waiting part through a call to one of the variants of
+wait_for_completion() and the signaling side through a call to complete(),
+or complete_all(). Further there are some helper functions for checking the
+state of completions.
+
+To use completions one needs to include <linux/completion.h> and
+create a variable of type struct completion. The structure used for
+handling of completions is:
+
+ struct completion {
+ unsigned int done;
+ wait_queue_head_t wait;
+ };
+
+providing the wait queue to place tasks on for waiting and the flag for
+indicating the state of affairs.
+
+Completions should be named to convey the intent of the waiter. A good
+example is:
+
+ wait_for_completion(&early_console_added);
+
+ complete(&early_console_added);
+
+Good naming (as always) helps code readability.
+
+
+Initializing completions:
+-------------------------
+
+Initialization of dynamically allocated completions, often embedded in
+other structures, is done with:
+
+ void init_completion(&done);
+
+Initialization is accomplished by initializing the wait queue and setting
+the default state to "not available", that is, "done" is set to 0.
+
+The re-initialization function, reinit_completion(), simply resets the
+done element to "not available", thus again to 0, without touching the
+wait queue. Calling init_completion() on the same completions object is
+most likely a bug as it re-initializes the queue to an empty queue and
+enqueued tasks could get "lost" - use reinit_completion() in that case.
+
+For static declaration and initialization, macros are available. These are:
+
+ static DECLARE_COMPLETION(setup_done)
+
+used for static declarations in file scope. Within functions the static
+initialization should always use:
+
+ DECLARE_COMPLETION_ONSTACK(setup_done)
+
+suitable for automatic/local variables on the stack and will make lockdep
+happy. Note also that one needs to making *sure* the completion passt to
+work threads remains in-scope, and no references remain to on-stack data
+when the initiating function returns.
+
+
+Waiting for completions:
+------------------------
+
+For a thread of execution to wait for some concurrent work to finish, it
+calls wait_for_completion() on the initialized completion structure.
+A typical usage scenario is:
+
+ structure completion setup_done;
+ init_completion(&setup_done);
+ initialze_work(...,&setup_done,...)
+
+ /* run non-dependent code */ /* do setup */
+
+ wait_for_completion(&seupt_done); complete(setup_done)
+
+This is not implying any temporal order of wait_for_completion() and the
+call to complete() - if the call to complete() happened before the call
+to wait_for_completion() then the waiting side simply will continue
+immediately as all dependencies are satisfied.
+
+Note that wait_for_completion() is calling spin_lock_irq/spin_unlock_irq
+so it can only be called safely when you know that interrupts are enabled.
+Calling it from hard-irq context will result in hard to detect spurious
+enabling of interrupts.
+
+wait_for_completion():
+
+ void wait_for_completion(struct completion *done):
+
+The default behavior is to wait without a timeout and mark the task as
+uninterruptible. wait_for_completion() and its variants are only safe
+in soft-interrupt or process context but not in hard-irq context.
+As all variants of wait_for_completion() can (obviously) block for a long
+time, you probably don't want to call this with held locks - see also
+try_wait_for_completion() below.
+
+
+Variants available:
+-------------------
+
+The below variants all return status and this status should be checked in
+most(/all) cases - in cases where the status is deliberately not checked you
+probably want to make a note explaining this (e.g. see
+arch/arm/kernel/smp.c:__cpu_up()).
+
+A common problem that occurs is to have unclean assignment of return types,
+so care should be taken with assigning return-values to variables of proper
+type. Checking for the specific meaning of return values also has been found
+to be quite inaccurate e.g. constructs like
+if(!wait_for_completion_interruptible_timeout(...)) would execute the same
+code path for successful completion and for the interrupted case - which is
+probably not what you want.
+
+ int wait_for_completion_interruptible(struct completion *done)
+
+marking the task TASK_INTERRUPTIBLE. If a signal was received while waiting.
+It will return -ERESTARTSYS and 0 otherwise.
+
+ unsigned long wait_for_completion_timeout(struct completion *done,
+ unsigned long timeout)
+
+The task is marked as TASK_UNINTERRUPTIBLE and will wait at most timeout
+(in jiffies). If timeout occurs it return 0 else the remaining time in
+jiffies (but at least 1). Timeouts are preferably passed by msecs_to_jiffies()
+or usecs_to_jiffies(). If the returned timeout value is deliberately ignored
+a comment should probably explain why (e.g. see drivers/mfd/wm8350-core.c
+wm8350_read_auxadc())
+
+ long wait_for_completion_interruptible_timeout(
+ struct completion *done, unsigned long timeout)
+
+passing a timeout in jiffies and marking the task as TASK_INTERRUPTIBLE. If a
+signal was received it will return -ERESTARTSYS, 0 if completion timed-out and
+the remaining time in jiffies if completion occurred.
+
+Further variants include _killable which passes TASK_KILLABLE as the
+designated tasks state and will return a -ERESTARTSYS if interrupted or
+else 0 if completions was achieved as well as a _timeout variant.
+
+ long wait_for_completion_killable(struct completion *done)
+ long wait_for_completion_killable_timeout(struct completion *done,
+ unsigned long timeout)
+
+The _io variants wait_for_completion_io behave the same as the non-_io
+variants, except for accounting waiting time as waiting on IO, which has
+an impact on how scheduling is calculated.
+
+ void wait_for_completion_io(struct completion *done)
+ unsigned long wait_for_completion_io_timeout(struct completion *done
+ unsigned long timeout)
+
+
+Signaling completions:
+----------------------
+
+A thread of execution that wants to signal that the conditions for
+continuation have been achieved calls complete() to signal exactly one
+of the waiters that it can continue.
+
+ void complete(struct completion *done)
+
+or calls complete_all to signal all current and future waiters.
+
+ void complete_all(struct completion *done)
+
+The signaling will work as expected even if completions are signaled before
+a thread starts waiting. This is achieved by the waiter "consuming"
+(decrementing) the done element of struct completion. Waiting threads
+wakeup order is the same in which they were enqueued (FIFO order).
+
+If complete() is called multiple times then this will allow for that number
+of waiters to continue - each call to complete() will simply increment the
+done element. Calling complete_all() multiple times is a bug though. Both
+complete() and complete_all() can be called in hard-irq context safely.
+
+There only can be one thread calling complete() or complete_all() on a
+particular struct completions at any time - serialized through the wait
+queue spinlock. Any such concurrent calls to complete() or complete_all()
+probably are a design bug.
+
+Signaling completion from hard-irq context is fine as it will appropriately
+lock with spin_lock_irqsave/spin_unlock_irqrestore.
+
+
+try_wait_for_completion()/completion_done():
+--------------------------------------------
+
+The try_wait_for_completion will not put the thread on the wait queue but
+rather returns false if it would need to enqueue (block) the thread, else it
+consumes any posted completions and returns true.
+
+ bool try_wait_for_completion(struct completion *done)
+
+Finally to check state of a completions without changing it in any way is
+provided by completion_done() returning false if there are any posted
+completion that was not yet consumed by waiters implying that there are
+waiters and true otherwise;
+
+ bool completion_done(struct completion *done)
+
+Both try_wait_for_completion() and completion_done() are safe to be called in
+hard-irq context.
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 821c936e1a63..c9e7f4f223a5 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -323,8 +323,6 @@ about the status of the key service:
U Under construction by callback to userspace
N Negative key
- This file must be enabled at kernel configuration time as it allows anyone
- to list the keys database.
(*) /proc/key-users
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index aee73e78c7d4..02f8331edb8b 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -32,18 +32,42 @@ Procedure for submitting patches to the -stable tree:
- If the patch covers files in net/ or drivers/net please follow netdev stable
submission guidelines as described in
Documentation/networking/netdev-FAQ.txt
- - Send the patch, after verifying that it follows the above rules, to
- stable@vger.kernel.org. You must note the upstream commit ID in the
- changelog of your submission, as well as the kernel version you wish
- it to be applied to.
- - To have the patch automatically included in the stable tree, add the tag
+ - Security patches should not be handled (solely) by the -stable review
+ process but should follow the procedures in Documentation/SecurityBugs.
+
+For all other submissions, choose one of the following procedures:
+
+ --- Option 1 ---
+
+ To have the patch automatically included in the stable tree, add the tag
Cc: stable@vger.kernel.org
in the sign-off area. Once the patch is merged it will be applied to
the stable tree without anything else needing to be done by the author
or subsystem maintainer.
- - If the patch requires other patches as prerequisites which can be
- cherry-picked, then this can be specified in the following format in
- the sign-off area:
+
+ --- Option 2 ---
+
+ After the patch has been merged to Linus' tree, send an email to
+ stable@vger.kernel.org containing the subject of the patch, the commit ID,
+ why you think it should be applied, and what kernel version you wish it to
+ be applied to.
+
+ --- Option 3 ---
+
+ Send the patch, after verifying that it follows the above rules, to
+ stable@vger.kernel.org. You must note the upstream commit ID in the
+ changelog of your submission, as well as the kernel version you wish
+ it to be applied to.
+
+Option 1 is probably the easiest and most common. Options 2 and 3 are more
+useful if the patch isn't deemed worthy at the time it is applied to a public
+git tree (for instance, because it deserves more regression testing first).
+Option 3 is especially useful if the patch needs some special handling to apply
+to an older kernel (e.g., if API's have changed in the meantime).
+
+Additionally, some patches submitted via Option 1 may have additional patch
+prerequisites which can be cherry-picked. This can be specified in the following
+format in the sign-off area:
Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
@@ -57,13 +81,13 @@ Procedure for submitting patches to the -stable tree:
git cherry-pick fd21073
git cherry-pick <this commit>
+Following the submission:
+
- The sender will receive an ACK when the patch has been accepted into the
queue, or a NAK if the patch is rejected. This response might take a few
days, according to the developer's schedules.
- If accepted, the patch will be added to the -stable queue, for review by
other developers and by the relevant subsystem maintainer.
- - Security patches should not be sent to this alias, but instead to the
- documented security@kernel.org address.
Review cycle:
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 75511efefc64..83ab25660fc9 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -843,6 +843,7 @@ can be ORed together:
8192 - An unsigned module has been loaded in a kernel supporting module
signature.
16384 - A soft lockup has previously occurred on the system.
+32768 - The kernel has been live patched.
==============================================================
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 666594b43cff..6294b5186ae5 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -97,6 +97,14 @@ rmem_max
The maximum receive socket buffer size in bytes.
+tstamp_allow_data
+-----------------
+Allow processes to receive tx timestamps looped together with the original
+packet contents. If disabled, transmit timestamp requests from unprivileged
+processes are dropped unless socket option SOF_TIMESTAMPING_OPT_TSONLY is set.
+Default: 1 (on)
+
+
wmem_default
------------
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 4415aa915681..902b4574acfb 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -555,12 +555,12 @@ this is causing problems for your system/application.
oom_dump_tasks
-Enables a system-wide task dump (excluding kernel threads) to be
-produced when the kernel performs an OOM-killing and includes such
-information as pid, uid, tgid, vm size, rss, nr_ptes, swapents,
-oom_score_adj score, and name. This is helpful to determine why the
-OOM killer was invoked, to identify the rogue task that caused it,
-and to determine why the OOM killer chose the task it did to kill.
+Enables a system-wide task dump (excluding kernel threads) to be produced
+when the kernel performs an OOM-killing and includes such information as
+pid, uid, tgid, vm size, rss, nr_ptes, nr_pmds, swapents, oom_score_adj
+score, and name. This is helpful to determine why the OOM killer was
+invoked, to identify the rogue task that caused it, and to determine why
+the OOM killer chose the task it did to kill.
If this is set to zero, this information is suppressed. On very
large systems with thousands of tasks it may not be feasible to dump
@@ -728,7 +728,7 @@ The default value is 60.
- user_reserve_kbytes
-When overcommit_memory is set to 2, "never overommit" mode, reserve
+When overcommit_memory is set to 2, "never overcommit" mode, reserve
min(3% of current process size, user_reserve_kbytes) of free memory.
This is intended to prevent a user from starting a single memory hogging
process, such that they cannot recover (kill the hog).
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 230ce71f4d75..2b47704f75cb 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
- buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
- buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
- buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
- buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
+ buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
- buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+ buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
- buf += " return \"" + fabric_mod_name[4:] + "\";\n"
+ buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
- if re.search('stop_session\)\(', fo):
- buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
-
- if re.search('fall_back_to_erl0\)\(', fo):
- buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
-
- if re.search('sess_logged_in\)\(', fo):
- buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
-
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
- buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+ buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
- buf += " return 0;\n"
+ buf += " return;\n"
buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+ bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
- if re.search('is_state_remove\)\(', fo):
- buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+ if re.search('aborted_task\)\(', fo):
+ buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
- buf += " return 0;\n"
+ buf += " return;\n"
buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
-
+ bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
@@ -1018,11 +993,11 @@ def main(modname, proto_ident):
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
- input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
+ input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
- input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+ input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt
index fca24c931ec8..753e47cc2e20 100644
--- a/Documentation/thermal/cpu-cooling-api.txt
+++ b/Documentation/thermal/cpu-cooling-api.txt
@@ -3,7 +3,7 @@ CPU cooling APIs How To
Written by Amit Daniel Kachhap <amit.kachhap@linaro.org>
-Updated: 12 May 2012
+Updated: 6 Jan 2015
Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
@@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer.
clip_cpus: cpumask of cpus where the frequency constraints will happen.
-1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
+ struct device_node *np, const struct cpumask *clip_cpus)
+
+ This interface function registers the cpufreq cooling device with
+ the name "thermal-cpufreq-%x" linking it with a device tree node, in
+ order to bind it via the thermal DT code. This api can support multiple
+ instances of cpufreq cooling devices.
+
+ np: pointer to the cooling device device tree node
+ clip_cpus: cpumask of cpus where the frequency constraints will happen.
+
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
This interface function unregisters the "thermal-cpufreq-%x" cooling device.
diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt
index bba7dbfc49ed..02361552a3ea 100644
--- a/Documentation/trace/coresight.txt
+++ b/Documentation/trace/coresight.txt
@@ -46,7 +46,7 @@ At typical coresight system would look like this:
| | . | ! | | . | ! | ! . | | SWD/
| | . | ! | | . | ! | ! . | | JTAG
*****************************************************************<-|
- *************************** AMBA Debug ABP ************************
+ *************************** AMBA Debug APB ************************
*****************************************************************
| . ! . ! ! . |
| . * . * * . |
@@ -79,7 +79,7 @@ At typical coresight system would look like this:
To trace port TPIU= Trace Port Interface Unit
SWD = Serial Wire Debug
-While on target configuration of the components is done via the ABP bus,
+While on target configuration of the components is done via the APB bus,
all trace data are carried out-of-band on the ATB bus. The CTM provides
a way to aggregate and distribute signals between CoreSight components.
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 8408e040f06f..572ca923631a 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -1740,7 +1740,7 @@ no pid
yum-updatesd-3111 [003] 1637.254683: lock_hrtimer_base <-hrtimer_try_to_cancel
yum-updatesd-3111 [003] 1637.254685: fget_light <-do_sys_poll
yum-updatesd-3111 [003] 1637.254686: pipe_poll <-do_sys_poll
-# echo -1 > set_ftrace_pid
+# echo > set_ftrace_pid
# cat trace |head
# tracer: function
#
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
new file mode 100644
index 000000000000..076ac7ba7f93
--- /dev/null
+++ b/Documentation/usb/gadget-testing.txt
@@ -0,0 +1,728 @@
+This file summarizes information on basic testing of USB functions
+provided by gadgets.
+
+1. ACM function
+2. ECM function
+3. ECM subset function
+4. EEM function
+5. FFS function
+6. HID function
+7. LOOPBACK function
+8. MASS STORAGE function
+9. MIDI function
+10. NCM function
+11. OBEX function
+12. PHONET function
+13. RNDIS function
+14. SERIAL function
+15. SOURCESINK function
+16. UAC1 function
+17. UAC2 function
+18. UVC function
+
+
+1. ACM function
+===============
+
+The function is provided by usb_f_acm.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "acm".
+The ACM function provides just one attribute in its function directory:
+
+ port_num
+
+The attribute is read-only.
+
+There can be at most 4 ACM/generic serial/OBEX ports in the system.
+
+
+Testing the ACM function
+------------------------
+
+On the host: cat > /dev/ttyACM<X>
+On the device : cat /dev/ttyGS<Y>
+
+then the other way round
+
+On the device: cat > /dev/ttyGS<Y>
+On the host: cat /dev/ttyACM<X>
+
+2. ECM function
+===============
+
+The function is provided by usb_f_ecm.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "ecm".
+The ECM function provides these attributes in its function directory:
+
+ ifname - network device interface name associated with this
+ function instance
+ qmult - queue length multiplier for high and super speed
+ host_addr - MAC address of host's end of this
+ Ethernet over USB link
+ dev_addr - MAC address of device's end of this
+ Ethernet over USB link
+
+and after creating the functions/ecm.<instance name> they contain default
+values: qmult is 5, dev_addr and host_addr are randomly selected.
+Except for ifname they can be written to until the function is linked to a
+configuration. The ifname is read-only and contains the name of the interface
+which was assigned by the net core, e. g. usb0.
+
+Testing the ECM function
+------------------------
+
+Configure IP addresses of the device and the host. Then:
+
+On the device: ping <host's IP>
+On the host: ping <device's IP>
+
+3. ECM subset function
+======================
+
+The function is provided by usb_f_ecm_subset.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "geth".
+The ECM subset function provides these attributes in its function directory:
+
+ ifname - network device interface name associated with this
+ function instance
+ qmult - queue length multiplier for high and super speed
+ host_addr - MAC address of host's end of this
+ Ethernet over USB link
+ dev_addr - MAC address of device's end of this
+ Ethernet over USB link
+
+and after creating the functions/ecm.<instance name> they contain default
+values: qmult is 5, dev_addr and host_addr are randomly selected.
+Except for ifname they can be written to until the function is linked to a
+configuration. The ifname is read-only and contains the name of the interface
+which was assigned by the net core, e. g. usb0.
+
+Testing the ECM subset function
+-------------------------------
+
+Configure IP addresses of the device and the host. Then:
+
+On the device: ping <host's IP>
+On the host: ping <device's IP>
+
+4. EEM function
+===============
+
+The function is provided by usb_f_eem.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "eem".
+The EEM function provides these attributes in its function directory:
+
+ ifname - network device interface name associated with this
+ function instance
+ qmult - queue length multiplier for high and super speed
+ host_addr - MAC address of host's end of this
+ Ethernet over USB link
+ dev_addr - MAC address of device's end of this
+ Ethernet over USB link
+
+and after creating the functions/eem.<instance name> they contain default
+values: qmult is 5, dev_addr and host_addr are randomly selected.
+Except for ifname they can be written to until the function is linked to a
+configuration. The ifname is read-only and contains the name of the interface
+which was assigned by the net core, e. g. usb0.
+
+Testing the EEM function
+------------------------
+
+Configure IP addresses of the device and the host. Then:
+
+On the device: ping <host's IP>
+On the host: ping <device's IP>
+
+5. FFS function
+===============
+
+The function is provided by usb_f_fs.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "ffs".
+The function directory is intentionally empty and not modifiable.
+
+After creating the directory there is a new instance (a "device") of FunctionFS
+available in the system. Once a "device" is available, the user should follow
+the standard procedure for using FunctionFS (mount it, run the userspace
+process which implements the function proper). The gadget should be enabled
+by writing a suitable string to usb_gadget/<gadget>/UDC.
+
+Testing the FFS function
+------------------------
+
+On the device: start the function's userspace daemon, enable the gadget
+On the host: use the USB function provided by the device
+
+6. HID function
+===============
+
+The function is provided by usb_f_hid.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "hid".
+The HID function provides these attributes in its function directory:
+
+ protocol - HID protocol to use
+ report_desc - data to be used in HID reports, except data
+ passed with /dev/hidg<X>
+ report_length - HID report length
+ subclass - HID subclass to use
+
+For a keyboard the protocol and the subclass are 1, the report_length is 8,
+while the report_desc is:
+
+$ hd my_report_desc
+00000000 05 01 09 06 a1 01 05 07 19 e0 29 e7 15 00 25 01 |..........)...%.|
+00000010 75 01 95 08 81 02 95 01 75 08 81 03 95 05 75 01 |u.......u.....u.|
+00000020 05 08 19 01 29 05 91 02 95 01 75 03 91 03 95 06 |....).....u.....|
+00000030 75 08 15 00 25 65 05 07 19 00 29 65 81 00 c0 |u...%e....)e...|
+0000003f
+
+Such a sequence of bytes can be stored to the attribute with echo:
+
+$ echo -ne \\x05\\x01\\x09\\x06\\xa1.....
+
+Testing the HID function
+------------------------
+
+Device:
+- create the gadget
+- connect the gadget to a host, preferably not the one used
+to control the gadget
+- run a program which writes to /dev/hidg<N>, e.g.
+a userspace program found in Documentation/usb/gadget_hid.txt:
+
+$ ./hid_gadget_test /dev/hidg0 keyboard
+
+Host:
+- observe the keystrokes from the gadget
+
+7. LOOPBACK function
+====================
+
+The function is provided by usb_f_ss_lb.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "Loopback".
+The LOOPBACK function provides these attributes in its function directory:
+
+ qlen - depth of loopback queue
+ bulk_buflen - buffer length
+
+Testing the LOOPBACK function
+-----------------------------
+
+device: run the gadget
+host: test-usb
+
+http://www.linux-usb.org/usbtest/testusb.c
+
+8. MASS STORAGE function
+========================
+
+The function is provided by usb_f_mass_storage.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "mass_storage".
+The MASS STORAGE function provides these attributes in its directory:
+files:
+
+ stall - Set to permit function to halt bulk endpoints.
+ Disabled on some USB devices known not to work
+ correctly. You should set it to true.
+ num_buffers - Number of pipeline buffers. Valid numbers
+ are 2..4. Available only if
+ CONFIG_USB_GADGET_DEBUG_FILES is set.
+
+and a default lun.0 directory corresponding to SCSI LUN #0.
+
+A new lun can be added with mkdir:
+
+$ mkdir functions/mass_storage.0/partition.5
+
+Lun numbering does not have to be continuous, except for lun #0 which is
+created by default. A maximum of 8 luns can be specified and they all must be
+named following the <name>.<number> scheme. The numbers can be 0..8.
+Probably a good convention is to name the luns "lun.<number>",
+although it is not mandatory.
+
+In each lun directory there are the following attribute files:
+
+ file - The path to the backing file for the LUN.
+ Required if LUN is not marked as removable.
+ ro - Flag specifying access to the LUN shall be
+ read-only. This is implied if CD-ROM emulation
+ is enabled as well as when it was impossible
+ to open "filename" in R/W mode.
+ removable - Flag specifying that LUN shall be indicated as
+ being removable.
+ cdrom - Flag specifying that LUN shall be reported as
+ being a CD-ROM.
+ nofua - Flag specifying that FUA flag
+ in SCSI WRITE(10,12)
+
+Testing the MASS STORAGE function
+---------------------------------
+
+device: connect the gadget, enable it
+host: dmesg, see the USB drives appear (if system configured to automatically
+mount)
+
+9. MIDI function
+================
+
+The function is provided by usb_f_midi.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "midi".
+The MIDI function provides these attributes in its function directory:
+
+ buflen - MIDI buffer length
+ id - ID string for the USB MIDI adapter
+ in_ports - number of MIDI input ports
+ index - index value for the USB MIDI adapter
+ out_ports - number of MIDI output ports
+ qlen - USB read request queue length
+
+Testing the MIDI function
+-------------------------
+
+There are two cases: playing a mid from the gadget to
+the host and playing a mid from the host to the gadget.
+
+1) Playing a mid from the gadget to the host
+host)
+
+$ arecordmidi -l
+ Port Client name Port name
+ 14:0 Midi Through Midi Through Port-0
+ 24:0 MIDI Gadget MIDI Gadget MIDI 1
+$ arecordmidi -p 24:0 from_gadget.mid
+
+gadget)
+
+$ aplaymidi -l
+ Port Client name Port name
+ 20:0 f_midi f_midi
+
+$ aplaymidi -p 20:0 to_host.mid
+
+2) Playing a mid from the host to the gadget
+gadget)
+
+$ arecordmidi -l
+ Port Client name Port name
+ 20:0 f_midi f_midi
+
+$ arecordmidi -p 20:0 from_host.mid
+
+host)
+
+$ aplaymidi -l
+ Port Client name Port name
+ 14:0 Midi Through Midi Through Port-0
+ 24:0 MIDI Gadget MIDI Gadget MIDI 1
+
+$ aplaymidi -p24:0 to_gadget.mid
+
+The from_gadget.mid should sound identical to the to_host.mid.
+The from_host.id should sound identical to the to_gadget.mid.
+
+MIDI files can be played to speakers/headphones with e.g. timidity installed
+
+$ aplaymidi -l
+ Port Client name Port name
+ 14:0 Midi Through Midi Through Port-0
+ 24:0 MIDI Gadget MIDI Gadget MIDI 1
+128:0 TiMidity TiMidity port 0
+128:1 TiMidity TiMidity port 1
+128:2 TiMidity TiMidity port 2
+128:3 TiMidity TiMidity port 3
+
+$ aplaymidi -p 128:0 file.mid
+
+MIDI ports can be logically connected using the aconnect utility, e.g.:
+
+$ aconnect 24:0 128:0 # try it on the host
+
+After the gadget's MIDI port is connected to timidity's MIDI port,
+whatever is played at the gadget side with aplaymidi -l is audible
+in host's speakers/headphones.
+
+10. NCM function
+================
+
+The function is provided by usb_f_ncm.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "ncm".
+The NCM function provides these attributes in its function directory:
+
+ ifname - network device interface name associated with this
+ function instance
+ qmult - queue length multiplier for high and super speed
+ host_addr - MAC address of host's end of this
+ Ethernet over USB link
+ dev_addr - MAC address of device's end of this
+ Ethernet over USB link
+
+and after creating the functions/ncm.<instance name> they contain default
+values: qmult is 5, dev_addr and host_addr are randomly selected.
+Except for ifname they can be written to until the function is linked to a
+configuration. The ifname is read-only and contains the name of the interface
+which was assigned by the net core, e. g. usb0.
+
+Testing the NCM function
+------------------------
+
+Configure IP addresses of the device and the host. Then:
+
+On the device: ping <host's IP>
+On the host: ping <device's IP>
+
+11. OBEX function
+=================
+
+The function is provided by usb_f_obex.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "obex".
+The OBEX function provides just one attribute in its function directory:
+
+ port_num
+
+The attribute is read-only.
+
+There can be at most 4 ACM/generic serial/OBEX ports in the system.
+
+Testing the OBEX function
+-------------------------
+
+On device: seriald -f /dev/ttyGS<Y> -s 1024
+On host: serialc -v <vendorID> -p <productID> -i<interface#> -a1 -s1024 \
+ -t<out endpoint addr> -r<in endpoint addr>
+
+where seriald and serialc are Felipe's utilities found here:
+
+https://git.gitorious.org/usb/usb-tools.git master
+
+12. PHONET function
+===================
+
+The function is provided by usb_f_phonet.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "phonet".
+The PHONET function provides just one attribute in its function directory:
+
+ ifname - network device interface name associated with this
+ function instance
+
+Testing the PHONET function
+---------------------------
+
+It is not possible to test the SOCK_STREAM protocol without a specific piece
+of hardware, so only SOCK_DGRAM has been tested. For the latter to work,
+in the past I had to apply the patch mentioned here:
+
+http://www.spinics.net/lists/linux-usb/msg85689.html
+
+These tools are required:
+
+git://git.gitorious.org/meego-cellular/phonet-utils.git
+
+On the host:
+
+$ ./phonet -a 0x10 -i usbpn0
+$ ./pnroute add 0x6c usbpn0
+$./pnroute add 0x10 usbpn0
+$ ifconfig usbpn0 up
+
+On the device:
+
+$ ./phonet -a 0x6c -i upnlink0
+$ ./pnroute add 0x10 upnlink0
+$ ifconfig upnlink0 up
+
+Then a test program can be used:
+
+http://www.spinics.net/lists/linux-usb/msg85690.html
+
+On the device:
+
+$ ./pnxmit -a 0x6c -r
+
+On the host:
+
+$ ./pnxmit -a 0x10 -s 0x6c
+
+As a result some data should be sent from host to device.
+Then the other way round:
+
+On the host:
+
+$ ./pnxmit -a 0x10 -r
+
+On the device:
+
+$ ./pnxmit -a 0x6c -s 0x10
+
+13. RNDIS function
+==================
+
+The function is provided by usb_f_rndis.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "rndis".
+The RNDIS function provides these attributes in its function directory:
+
+ ifname - network device interface name associated with this
+ function instance
+ qmult - queue length multiplier for high and super speed
+ host_addr - MAC address of host's end of this
+ Ethernet over USB link
+ dev_addr - MAC address of device's end of this
+ Ethernet over USB link
+
+and after creating the functions/rndis.<instance name> they contain default
+values: qmult is 5, dev_addr and host_addr are randomly selected.
+Except for ifname they can be written to until the function is linked to a
+configuration. The ifname is read-only and contains the name of the interface
+which was assigned by the net core, e. g. usb0.
+
+By default there can be only 1 RNDIS interface in the system.
+
+Testing the RNDIS function
+--------------------------
+
+Configure IP addresses of the device and the host. Then:
+
+On the device: ping <host's IP>
+On the host: ping <device's IP>
+
+14. SERIAL function
+===================
+
+The function is provided by usb_f_gser.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "gser".
+The SERIAL function provides just one attribute in its function directory:
+
+ port_num
+
+The attribute is read-only.
+
+There can be at most 4 ACM/generic serial/OBEX ports in the system.
+
+Testing the SERIAL function
+---------------------------
+
+On host: insmod usbserial
+ echo VID PID >/sys/bus/usb-serial/drivers/generic/new_id
+On host: cat > /dev/ttyUSB<X>
+On target: cat /dev/ttyGS<Y>
+
+then the other way round
+
+On target: cat > /dev/ttyGS<Y>
+On host: cat /dev/ttyUSB<X>
+
+15. SOURCESINK function
+=======================
+
+The function is provided by usb_f_ss_lb.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "SourceSink".
+The SOURCESINK function provides these attributes in its function directory:
+
+ pattern - 0 (all zeros), 1 (mod63), 2 (none)
+ isoc_interval - 1..16
+ isoc_maxpacket - 0 - 1023 (fs), 0 - 1024 (hs/ss)
+ isoc_mult - 0..2 (hs/ss only)
+ isoc_maxburst - 0..15 (ss only)
+ bulk_buflen - buffer length
+
+Testing the SOURCESINK function
+-------------------------------
+
+device: run the gadget
+host: test-usb
+
+http://www.linux-usb.org/usbtest/testusb.c
+
+16. UAC1 function
+=================
+
+The function is provided by usb_f_uac1.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "uac1".
+The uac1 function provides these attributes in its function directory:
+
+ audio_buf_size - audio buffer size
+ fn_cap - capture pcm device file name
+ fn_cntl - control device file name
+ fn_play - playback pcm device file name
+ req_buf_size - ISO OUT endpoint request buffer size
+ req_count - ISO OUT endpoint request count
+
+The attributes have sane default values.
+
+Testing the UAC1 function
+-------------------------
+
+device: run the gadget
+host: aplay -l # should list our USB Audio Gadget
+
+17. UAC2 function
+=================
+
+The function is provided by usb_f_uac2.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "uac2".
+The uac2 function provides these attributes in its function directory:
+
+ chmask - capture channel mask
+ c_srate - capture sampling rate
+ c_ssize - capture sample size (bytes)
+ p_chmask - playback channel mask
+ p_srate - playback sampling rate
+ p_ssize - playback sample size (bytes)
+
+The attributes have sane default values.
+
+Testing the UAC2 function
+-------------------------
+
+device: run the gadget
+host: aplay -l # should list our USB Audio Gadget
+
+This function does not require real hardware support, it just
+sends a stream of audio data to/from the host. In order to
+actually hear something at the device side, a command similar
+to this must be used at the device side:
+
+$ arecord -f dat -t wav -D hw:2,0 | aplay -D hw:0,0 &
+
+e.g.:
+
+$ arecord -f dat -t wav -D hw:CARD=UAC2Gadget,DEV=0 | \
+aplay -D default:CARD=OdroidU3
+
+18. UVC function
+================
+
+The function is provided by usb_f_uvc.ko module.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "uvc".
+The uvc function provides these attributes in its function directory:
+
+ streaming_interval - interval for polling endpoint for data transfers
+ streaming_maxburst - bMaxBurst for super speed companion descriptor
+ streaming_maxpacket - maximum packet size this endpoint is capable of
+ sending or receiving when this configuration is
+ selected
+
+There are also "control" and "streaming" subdirectories, each of which contain
+a number of their subdirectories. There are some sane defaults provided, but
+the user must provide the following:
+
+ control header - create in control/header, link from control/class/fs
+ and/or control/class/ss
+ streaming header - create in streaming/header, link from
+ streaming/class/fs and/or streaming/class/hs and/or
+ streaming/class/ss
+ format description - create in streaming/mjpeg and/or
+ streaming/uncompressed
+ frame description - create in streaming/mjpeg/<format> and/or in
+ streaming/uncompressed/<format>
+
+Each frame description contains frame interval specification, and each
+such specification consists of a number of lines with an inverval value
+in each line. The rules stated above are best illustrated with an example:
+
+# mkdir functions/uvc.usb0/control/header/h
+# cd functions/uvc.usb0/control/header/h
+# ln -s header/h class/fs
+# ln -s header/h class/ss
+# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
+# cat <<EOF > functions/uvc.usb0/streaming/uncompressed/u/360p/dwFrameInterval
+666666
+1000000
+5000000
+EOF
+# cd $GADGET_CONFIGFS_ROOT
+# mkdir functions/uvc.usb0/streaming/header/h
+# cd functions/uvc.usb0/streaming/header/h
+# ln -s ../../uncompressed/u
+# cd ../../class/fs
+# ln -s ../../header/h
+# cd ../../class/hs
+# ln -s ../../header/h
+# cd ../../class/ss
+# ln -s ../../header/h
+
+
+Testing the UVC function
+------------------------
+
+device: run the gadget, modprobe vivid
+
+# uvc-gadget -u /dev/video<uvc video node #> -v /dev/video<vivid video node #>
+
+where uvc-gadget is this program:
+http://git.ideasonboard.org/uvc-gadget.git
+
+with these patches:
+http://www.spinics.net/lists/linux-usb/msg99220.html
+
+host: luvcview -f yuv
diff --git a/Documentation/usb/gadget_serial.txt b/Documentation/usb/gadget_serial.txt
index 61e67f6a20a0..6b4a88a8c8e3 100644
--- a/Documentation/usb/gadget_serial.txt
+++ b/Documentation/usb/gadget_serial.txt
@@ -236,8 +236,12 @@ I: If#= 0 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=serial
E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
-You must explicitly load the usbserial driver with parameters to
-configure it to recognize the gadget serial device, like this:
+You must load the usbserial driver and explicitly set its parameters
+to configure it to recognize the gadget serial device, like this:
+
+ echo 0x0525 0xA4A6 >/sys/bus/usb-serial/drivers/generic/new_id
+
+The legacy way is to use module parameters:
modprobe usbserial vendor=0x0525 product=0xA4A6
diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
index c42bb9cd3b43..28425f736756 100644
--- a/Documentation/usb/usbmon.txt
+++ b/Documentation/usb/usbmon.txt
@@ -72,7 +72,7 @@ to listen on a single bus, otherwise, to listen on all buses, type:
# cat /sys/kernel/debug/usb/usbmon/0u > /tmp/1.mon.out
-This process will be reading until killed. Naturally, the output can be
+This process will read until it is killed. Naturally, the output can be
redirected to a desirable location. This is preferred, because it is going
to be quite long.
@@ -231,7 +231,7 @@ number. Number zero (/dev/usbmon0) is special and means "all buses".
Note that specific naming policy is set by your Linux distribution.
If you create /dev/usbmon0 by hand, make sure that it is owned by root
-and has mode 0600. Otherwise, unpriviledged users will be able to snoop
+and has mode 0600. Otherwise, unprivileged users will be able to snoop
keyboard traffic.
The following ioctl calls are available, with MON_IOC_MAGIC 0x92:
diff --git a/Documentation/video4linux/CQcam.txt b/Documentation/video4linux/CQcam.txt
deleted file mode 100644
index 0b69e4ee8e31..000000000000
--- a/Documentation/video4linux/CQcam.txt
+++ /dev/null
@@ -1,205 +0,0 @@
-c-qcam - Connectix Color QuickCam video4linux kernel driver
-
-Copyright (C) 1999 Dave Forrest <drf5n@virginia.edu>
- released under GNU GPL.
-
-1999-12-08 Dave Forrest, written with kernel version 2.2.12 in mind
-
-
-Table of Contents
-
-1.0 Introduction
-2.0 Compilation, Installation, and Configuration
-3.0 Troubleshooting
-4.0 Future Work / current work arounds
-9.0 Sample Program, v4lgrab
-10.0 Other Information
-
-
-1.0 Introduction
-
- The file ../../drivers/media/parport/c-qcam.c is a device driver for
-the Logitech (nee Connectix) parallel port interface color CCD camera.
-This is a fairly inexpensive device for capturing images. Logitech
-does not currently provide information for developers, but many people
-have engineered several solutions for non-Microsoft use of the Color
-Quickcam.
-
-1.1 Motivation
-
- I spent a number of hours trying to get my camera to work, and I
-hope this document saves you some time. My camera will not work with
-the 2.2.13 kernel as distributed, but with a few patches to the
-module, I was able to grab some frames. See 4.0, Future Work.
-
-
-
-2.0 Compilation, Installation, and Configuration
-
- The c-qcam depends on parallel port support, video4linux, and the
-Color Quickcam. It is also nice to have the parallel port readback
-support enabled. I enabled these as modules during the kernel
-configuration. The appropriate flags are:
-
- CONFIG_PRINTER M for lp.o, parport.o parport_pc.o modules
- CONFIG_PNP_PARPORT M for autoprobe.o IEEE1284 readback module
- CONFIG_PRINTER_READBACK M for parport_probe.o IEEE1284 readback module
- CONFIG_VIDEO_DEV M for videodev.o video4linux module
- CONFIG_VIDEO_CQCAM M for c-qcam.o Color Quickcam module
-
- With these flags, the kernel should compile and install the modules.
-To record and monitor the compilation, I use:
-
- (make zlilo ; \
- make modules; \
- make modules_install ;
- depmod -a ) &>log &
- less log # then a capital 'F' to watch the progress
-
-But that is my personal preference.
-
-2.2 Configuration
-
- The configuration requires module configuration and device
-configuration. The following sections detail these procedures.
-
-
-2.1 Module Configuration
-
- Using modules requires a bit of work to install and pass the
-parameters. Understand that entries in /etc/modprobe.d/*.conf of:
-
- alias parport_lowlevel parport_pc
- options parport_pc io=0x378 irq=none
- alias char-major-81 videodev
- alias char-major-81-0 c-qcam
-
-2.2 Device Configuration
-
- At this point, we need to ensure that the device files exist.
-Video4linux used the /dev/video* files, and we want to attach the
-Quickcam to one of these.
-
- ls -lad /dev/video* # should produce a list of the video devices
-
-If the video devices do not exist, you can create them with:
-
- su
- cd /dev
- for ii in 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 ; do
- mknod video$ii c 81 $ii # char-major-81-[0-16]
- chown root.root video$ii # owned by root
- chmod 600 video$ii # read/writable by root only
- done
-
- Lots of people connect video0 to video and bttv, but you might want
-your c-qcam to mean something more:
-
- ln -s video0 c-qcam # make /dev/c-qcam a working file
- ln -s c-qcam video # make /dev/c-qcam your default video source
-
- But these are conveniences. The important part is to make the proper
-special character files with the right major and minor numbers. All
-of the special device files are listed in ../devices.txt. If you
-would like the c-qcam readable by non-root users, you will need to
-change the permissions.
-
-3.0 Troubleshooting
-
- If the sample program below, v4lgrab, gives you output then
-everything is working.
-
- v4lgrab | wc # should give you a count of characters
-
- Otherwise, you have some problem.
-
- The c-qcam is IEEE1284 compatible, so if you are using the proc file
-system (CONFIG_PROC_FS), the parallel printer support
-(CONFIG_PRINTER), the IEEE 1284 system,(CONFIG_PRINTER_READBACK), you
-should be able to read some identification from your quickcam with
-
- modprobe -v parport
- modprobe -v parport_probe
- cat /proc/parport/PORTNUMBER/autoprobe
-Returns:
- CLASS:MEDIA;
- MODEL:Color QuickCam 2.0;
- MANUFACTURER:Connectix;
-
- A good response to this indicates that your color quickcam is alive
-and well. A common problem is that the current driver does not
-reliably detect a c-qcam, even though one is attached. In this case,
-
- modprobe -v c-qcam
-or
- insmod -v c-qcam
-
- Returns a message saying "Device or resource busy" Development is
-currently underway, but a workaround is to patch the module to skip
-the detection code and attach to a defined port. Check the
-video4linux mailing list and archive for more current information.
-
-3.1 Checklist:
-
- Can you get an image?
- v4lgrab >qcam.ppm ; wc qcam.ppm ; xv qcam.ppm
-
- Is a working c-qcam connected to the port?
- grep ^ /proc/parport/?/autoprobe
-
- Do the /dev/video* files exist?
- ls -lad /dev/video
-
- Is the c-qcam module loaded?
- modprobe -v c-qcam ; lsmod
-
- Does the camera work with alternate programs? cqcam, etc?
-
-
-
-
-4.0 Future Work / current workarounds
-
- It is hoped that this section will soon become obsolete, but if it
-isn't, you might try patching the c-qcam module to add a parport=xxx
-option as in the bw-qcam module so you can specify the parallel port:
-
- insmod -v c-qcam parport=0
-
-And bypass the detection code, see ../../drivers/char/c-qcam.c and
-look for the 'qc_detect' code and call.
-
- Note that there is work in progress to change the video4linux API,
-this work is documented at the video4linux2 site listed below.
-
-
-9.0 --- A sample program using v4lgrabber,
-
-v4lgrab is a simple image grabber that will copy a frame from the
-first video device, /dev/video0 to standard output in portable pixmap
-format (.ppm) To produce .jpg output, you can use it like this:
-'v4lgrab | convert - c-qcam.jpg'
-
-
-10.0 --- Other Information
-
-Use the ../../Maintainers file, particularly the VIDEO FOR LINUX and PARALLEL
-PORT SUPPORT sections
-
-The video4linux page:
- http://linuxtv.org
-
-The V4L2 API spec:
- http://v4l2spec.bytesex.org/
-
-Some web pages about the quickcams:
- http://www.pingouin-land.com/howto/QuickCam-HOWTO.html
-
- http://www.crynwr.com/qcpc/ QuickCam Third-Party Drivers
- http://www.crynwr.com/qcpc/re.html Some Reverse Engineering
- http://www.wirelesscouch.net/software/gqcam/ v4l client
- http://phobos.illtel.denver.co.us/pub/qcread/ doesn't use v4l
- ftp://ftp.cs.unm.edu/pub/chris/quickcam/ Has lots of drivers
- http://www.cs.duke.edu/~reynolds/quickcam/ Has lots of information
-
-
diff --git a/Documentation/video4linux/README.tlg2300 b/Documentation/video4linux/README.tlg2300
deleted file mode 100644
index 416ccb93d8c9..000000000000
--- a/Documentation/video4linux/README.tlg2300
+++ /dev/null
@@ -1,47 +0,0 @@
-tlg2300 release notes
-====================
-
-This is a v4l2/dvb device driver for the tlg2300 chip.
-
-
-current status
-==============
-
-video
- - support mmap and read().(no overlay)
-
-audio
- - The driver will register a ALSA card for the audio input.
-
-vbi
- - Works for almost TV norms.
-
-dvb-t
- - works for DVB-T
-
-FM
- - Works for radio.
-
----------------------------------------------------------------------------
-TESTED APPLICATIONS:
-
--VLC1.0.4 test the video and dvb. The GUI is friendly to use.
-
--Mplayer test the video.
-
--Mplayer test the FM. The mplayer should be compiled with --enable-radio and
- --enable-radio-capture.
- The command runs as this(The alsa audio registers to card 1):
- #mplayer radio://103.7/capture/ -radio adevice=hw=1,0:arate=48000 \
- -rawaudio rate=48000:channels=2
-
----------------------------------------------------------------------------
-KNOWN PROBLEMS:
-about preemphasis:
- You can set the preemphasis for radio by the following command:
- #v4l2-ctl -d /dev/radio0 --set-ctrl=pre_emphasis_settings=1
-
- "pre_emphasis_settings=1" means that you select the 50us. If you want
- to select the 75us, please use "pre_emphasis_settings=2"
-
-
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index a11dff07ef71..f586e29ce221 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -793,8 +793,10 @@ video_register_device_no_warn() instead.
Whenever a device node is created some attributes are also created for you.
If you look in /sys/class/video4linux you see the devices. Go into e.g.
-video0 and you will see 'name' and 'index' attributes. The 'name' attribute
-is the 'name' field of the video_device struct.
+video0 and you will see 'name', 'debug' and 'index' attributes. The 'name'
+attribute is the 'name' field of the video_device struct. The 'debug' attribute
+can be used to enable core debugging. See the next section for more detailed
+information on this.
The 'index' attribute is the index of the device node: for each call to
video_register_device() the index is just increased by 1. The first video
@@ -816,6 +818,25 @@ video_device was embedded in it. The vdev->release() callback will never
be called if the registration failed, nor should you ever attempt to
unregister the device if the registration failed.
+video device debugging
+----------------------
+
+The 'debug' attribute that is created for each video, vbi, radio or swradio
+device in /sys/class/video4linux/<devX>/ allows you to enable logging of
+file operations.
+
+It is a bitmask and the following bits can be set:
+
+0x01: Log the ioctl name and error code. VIDIOC_(D)QBUF ioctls are only logged
+ if bit 0x08 is also set.
+0x02: Log the ioctl name arguments and error code. VIDIOC_(D)QBUF ioctls are
+ only logged if bit 0x08 is also set.
+0x04: Log the file operations open, release, read, write, mmap and
+ get_unmapped_area. The read and write operations are only logged if
+ bit 0x08 is also set.
+0x08: Log the read and write file operations and the VIDIOC_QBUF and
+ VIDIOC_DQBUF ioctls.
+0x10: Log the poll file operation.
video_device cleanup
--------------------
diff --git a/Documentation/video4linux/v4l2-pci-skeleton.c b/Documentation/video4linux/v4l2-pci-skeleton.c
index 006721e43b2a..7bd1b975bfd2 100644
--- a/Documentation/video4linux/v4l2-pci-skeleton.c
+++ b/Documentation/video4linux/v4l2-pci-skeleton.c
@@ -42,7 +42,6 @@
MODULE_DESCRIPTION("V4L2 PCI Skeleton Driver");
MODULE_AUTHOR("Hans Verkuil");
MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, skeleton_pci_tbl);
/**
* struct skeleton - All internal data for one instance of device
@@ -95,6 +94,7 @@ static const struct pci_device_id skeleton_pci_tbl[] = {
/* { PCI_DEVICE(PCI_VENDOR_ID_, PCI_DEVICE_ID_) }, */
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, skeleton_pci_tbl);
/*
* HDTV: this structure has the capabilities of the HDTV receiver.
diff --git a/Documentation/video4linux/w9966.txt b/Documentation/video4linux/w9966.txt
deleted file mode 100644
index 855024525fd2..000000000000
--- a/Documentation/video4linux/w9966.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-W9966 Camera driver, written by Jakob Kemi (jakob.kemi@telia.com)
-
-After a lot of work in softice & wdasm, reading .pdf-files and tiresome
-trial-and-error work I've finally got everything to work. I needed vision for a
-robotics project so I borrowed this camera from a friend and started hacking.
-Anyway I've converted my original code from the AVR 8bit RISC C/ASM code into
-a working Linux driver.
-
-To get it working simply configure your kernel to support
-parport, ieee1284, video4linux and w9966
-
-If w9966 is statically linked it will always perform aggressive probing for
-the camera. If built as a module you'll have more configuration options.
-
-Options:
- modprobe w9966.o pardev=parport0(or whatever) parmode=0 (0=auto, 1=ecp, 2=epp)
-voila!
-
-you can also type 'modinfo -p w9966.o' for option usage
-(or checkout w9966.c)
-
-The only thing to keep in mind is that the image format is in Y-U-Y-V format
-where every two pixels take 4 bytes. In SDL (www.libsdl.org) this format
-is called VIDEO_PALETTE_YUV422 (16 bpp).
-
-A minimal test application (with source) is available from:
- http://www.slackwaresupport.com/howtos/Webcam-HOWTO
-
-The slow framerate is due to missing DMA ECP read support in the
-parport drivers. I might add working EPP support later.
-
-Good luck!
- /Jakob Kemi
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 0007fef4ed81..b112efc816f1 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -612,11 +612,14 @@ Type: vm ioctl
Parameters: none
Returns: 0 on success, -1 on error
-Creates an interrupt controller model in the kernel. On x86, creates a virtual
-ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
-local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
-only go to the IOAPIC. On ARM/arm64, a GIC is
-created. On s390, a dummy irq routing table is created.
+Creates an interrupt controller model in the kernel.
+On x86, creates a virtual ioapic, a virtual PIC (two PICs, nested), and sets up
+future vcpus to have a local APIC. IRQ routing for GSIs 0-15 is set to both
+PIC and IOAPIC; GSI 16-23 only go to the IOAPIC.
+On ARM/arm64, a GICv2 is created. Any other GIC versions require the usage of
+KVM_CREATE_DEVICE, which also supports creating a GICv2. Using
+KVM_CREATE_DEVICE is preferred over KVM_CREATE_IRQCHIP for GICv2.
+On s390, a dummy irq routing table is created.
Note that on s390 the KVM_CAP_S390_IRQCHIP vm capability needs to be enabled
before KVM_CREATE_IRQCHIP can be used.
@@ -2312,7 +2315,7 @@ struct kvm_s390_interrupt {
type can be one of the following:
-KVM_S390_SIGP_STOP (vcpu) - sigp restart
+KVM_S390_SIGP_STOP (vcpu) - sigp stop; optional flags in parm
KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm
KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm
KVM_S390_RESTART (vcpu) - restart
@@ -3225,3 +3228,23 @@ userspace from doing that.
If the hcall number specified is not one that has an in-kernel
implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL
error.
+
+7.2 KVM_CAP_S390_USER_SIGP
+
+Architectures: s390
+Parameters: none
+
+This capability controls which SIGP orders will be handled completely in user
+space. With this capability enabled, all fast orders will be handled completely
+in the kernel:
+- SENSE
+- SENSE RUNNING
+- EXTERNAL CALL
+- EMERGENCY SIGNAL
+- CONDITIONAL EMERGENCY SIGNAL
+
+All other orders will be handled completely in user space.
+
+Only privileged operation exceptions will be checked for in the kernel (or even
+in the hardware prior to interception). If this capability is not enabled, the
+old way of handling SIGP orders is used (partially in kernel and user space).
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index df8b0c7540b6..3fb905429e8a 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -3,22 +3,42 @@ ARM Virtual Generic Interrupt Controller (VGIC)
Device types supported:
KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0
+ KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0
Only one VGIC instance may be instantiated through either this API or the
legacy KVM_CREATE_IRQCHIP api. The created VGIC will act as the VM interrupt
controller, requiring emulated user-space devices to inject interrupts to the
VGIC instead of directly to CPUs.
+Creating a guest GICv3 device requires a host GICv3 as well.
+GICv3 implementations with hardware compatibility support allow a guest GICv2
+as well.
+
Groups:
KVM_DEV_ARM_VGIC_GRP_ADDR
Attributes:
KVM_VGIC_V2_ADDR_TYPE_DIST (rw, 64-bit)
Base address in the guest physical address space of the GIC distributor
- register mappings.
+ register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2.
+ This address needs to be 4K aligned and the region covers 4 KByte.
KVM_VGIC_V2_ADDR_TYPE_CPU (rw, 64-bit)
Base address in the guest physical address space of the GIC virtual cpu
- interface register mappings.
+ interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2.
+ This address needs to be 4K aligned and the region covers 4 KByte.
+
+ KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit)
+ Base address in the guest physical address space of the GICv3 distributor
+ register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
+ This address needs to be 64K aligned and the region covers 64 KByte.
+
+ KVM_VGIC_V3_ADDR_TYPE_REDIST (rw, 64-bit)
+ Base address in the guest physical address space of the GICv3
+ redistributor register mappings. There are two 64K pages for each
+ VCPU and all of the redistributor pages are contiguous.
+ Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
+ This address needs to be 64K aligned.
+
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
Attributes:
@@ -36,6 +56,7 @@ Groups:
the register.
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
+ - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
Errors:
-ENODEV: Getting or setting this register is not yet supported
-EBUSY: One or more VCPUs are running
@@ -68,6 +89,7 @@ Groups:
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
+ - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
Errors:
-ENODEV: Getting or setting this register is not yet supported
-EBUSY: One or more VCPUs are running
@@ -81,3 +103,14 @@ Groups:
-EINVAL: Value set is out of the expected range
-EBUSY: Value has already be set, or GIC has already been initialized
with default values.
+
+ KVM_DEV_ARM_VGIC_GRP_CTRL
+ Attributes:
+ KVM_DEV_ARM_VGIC_CTRL_INIT
+ request the initialization of the VGIC, no additional parameter in
+ kvm_device_attr.addr.
+ Errors:
+ -ENXIO: VGIC not properly configured as required prior to calling
+ this attribute
+ -ENODEV: no online VCPU
+ -ENOMEM: memory shortage when allocating vgic internal data
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index d426fc87fe93..5542c4641a3c 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -24,3 +24,62 @@ Returns: 0
Clear the CMMA status for all guest pages, so any pages the guest marked
as unused are again used any may not be reclaimed by the host.
+
+1.3. ATTRIBUTE KVM_S390_VM_MEM_LIMIT_SIZE
+Parameters: in attr->addr the address for the new limit of guest memory
+Returns: -EFAULT if the given address is not accessible
+ -EINVAL if the virtual machine is of type UCONTROL
+ -E2BIG if the given guest memory is to big for that machine
+ -EBUSY if a vcpu is already defined
+ -ENOMEM if not enough memory is available for a new shadow guest mapping
+ 0 otherwise
+
+Allows userspace to query the actual limit and set a new limit for
+the maximum guest memory size. The limit will be rounded up to
+2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
+the number of page table levels.
+
+2. GROUP: KVM_S390_VM_CPU_MODEL
+Architectures: s390
+
+2.1. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE (r/o)
+
+Allows user space to retrieve machine and kvm specific cpu related information:
+
+struct kvm_s390_vm_cpu_machine {
+ __u64 cpuid; # CPUID of host
+ __u32 ibc; # IBC level range offered by host
+ __u8 pad[4];
+ __u64 fac_mask[256]; # set of cpu facilities enabled by KVM
+ __u64 fac_list[256]; # set of cpu facilities offered by host
+}
+
+Parameters: address of buffer to store the machine related cpu data
+ of type struct kvm_s390_vm_cpu_machine*
+Returns: -EFAULT if the given address is not accessible from kernel space
+ -ENOMEM if not enough memory is available to process the ioctl
+ 0 in case of success
+
+2.2. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR (r/w)
+
+Allows user space to retrieve or request to change cpu related information for a vcpu:
+
+struct kvm_s390_vm_cpu_processor {
+ __u64 cpuid; # CPUID currently (to be) used by this vcpu
+ __u16 ibc; # IBC level currently (to be) used by this vcpu
+ __u8 pad[6];
+ __u64 fac_list[256]; # set of cpu facilities currently (to be) used
+ # by this vcpu
+}
+
+KVM does not enforce or limit the cpu model data in any form. Take the information
+retrieved by means of KVM_S390_VM_CPU_MACHINE as hint for reasonable configuration
+setups. Instruction interceptions triggered by additionally set facilitiy bits that
+are not handled by KVM need to by imlemented in the VM driver code.
+
+Parameters: address of buffer to store/set the processor related cpu
+ data of type struct kvm_s390_vm_cpu_processor*.
+Returns: -EBUSY in case 1 or more vcpus are already activated (only in write case)
+ -EFAULT if the given address is not accessible from kernel space
+ -ENOMEM if not enough memory is available to process the ioctl
+ 0 in case of success
diff --git a/Documentation/vm/cleancache.txt b/Documentation/vm/cleancache.txt
index 142fbb0f325a..01d76282444e 100644
--- a/Documentation/vm/cleancache.txt
+++ b/Documentation/vm/cleancache.txt
@@ -85,7 +85,7 @@ lock the page to ensure serial behavior.
CLEANCACHE PERFORMANCE METRICS
If properly configured, monitoring of cleancache is done via debugfs in
-the /sys/kernel/debug/mm/cleancache directory. The effectiveness of cleancache
+the /sys/kernel/debug/cleancache directory. The effectiveness of cleancache
can be measured (across all filesystems) with:
succ_gets - number of gets that were successful
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index 5948e455c4d2..6fbd55ef6b45 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -62,6 +62,8 @@ There are three components to pagemap:
20. NOPAGE
21. KSM
22. THP
+ 23. BALLOON
+ 24. ZERO_PAGE
Short descriptions to the page flags:
@@ -102,6 +104,12 @@ Short descriptions to the page flags:
22. THP
contiguous pages which construct transparent hugepages
+23. BALLOON
+ balloon compaction page
+
+24. ZERO_PAGE
+ zero page for pfn_zero or huge_zero page
+
[IO related page flags]
1. ERROR IO error occurred
3. UPTODATE page has up-to-date data
diff --git a/Documentation/vm/remap_file_pages.txt b/Documentation/vm/remap_file_pages.txt
index 560e4363a55d..f609142f406a 100644
--- a/Documentation/vm/remap_file_pages.txt
+++ b/Documentation/vm/remap_file_pages.txt
@@ -18,10 +18,9 @@ on 32-bit systems to map files bigger than can linearly fit into 32-bit
virtual address space. This use-case is not critical anymore since 64-bit
systems are widely available.
-The plan is to deprecate the syscall and replace it with an emulation.
-The emulation will create new VMAs instead of nonlinear mappings. It's
-going to work slower for rare users of remap_file_pages() but ABI is
-preserved.
+The syscall is deprecated and replaced it with an emulation now. The
+emulation creates new VMAs instead of nonlinear mappings. It's going to
+work slower for rare users of remap_file_pages() but ABI is preserved.
One side effect of emulation (apart from performance) is that user can hit
vm.max_map_count limit more easily due to additional VMAs. See comment for
diff --git a/Documentation/x86/entry_64.txt b/Documentation/x86/entry_64.txt
index 4a1c5c2dc5a9..9132b86176a3 100644
--- a/Documentation/x86/entry_64.txt
+++ b/Documentation/x86/entry_64.txt
@@ -78,9 +78,6 @@ The expensive (paranoid) way is to read back the MSR_GS_BASE value
xorl %ebx,%ebx
1: ret
-and the whole paranoid non-paranoid macro complexity is about whether
-to suffer that RDMSR cost.
-
If we are at an interrupt or user-trap/gate-alike boundary then we can
use the faster check: the stack will be a reliable indicator of
whether SWAPGS was already done: if we see that we are a secondary
@@ -93,6 +90,15 @@ which might have triggered right after a normal entry wrote CS to the
stack but before we executed SWAPGS, then the only safe way to check
for GS is the slower method: the RDMSR.
-So we try only to mark those entry methods 'paranoid' that absolutely
-need the more expensive check for the GS base - and we generate all
-'normal' entry points with the regular (faster) entry macros.
+Therefore, super-atomic entries (except NMI, which is handled separately)
+must use idtentry with paranoid=1 to handle gsbase correctly. This
+triggers three main behavior changes:
+
+ - Interrupt entry will use the slower gsbase check.
+ - Interrupt entry from user mode will switch off the IST stack.
+ - Interrupt exit to kernel mode will not attempt to reschedule.
+
+We try to only use IST entries and the paranoid entry code for vectors
+that absolutely need the more expensive check for the GS base - and we
+generate all 'normal' entry points with the regular (faster) paranoid=0
+variant.
diff --git a/Documentation/x86/x86_64/kernel-stacks b/Documentation/x86/x86_64/kernel-stacks
index a01eec5d1d0b..e3c8a49d1a2f 100644
--- a/Documentation/x86/x86_64/kernel-stacks
+++ b/Documentation/x86/x86_64/kernel-stacks
@@ -40,9 +40,11 @@ An IST is selected by a non-zero value in the IST field of an
interrupt-gate descriptor. When an interrupt occurs and the hardware
loads such a descriptor, the hardware automatically sets the new stack
pointer based on the IST value, then invokes the interrupt handler. If
-software wants to allow nested IST interrupts then the handler must
-adjust the IST values on entry to and exit from the interrupt handler.
-(This is occasionally done, e.g. for debug exceptions.)
+the interrupt came from user mode, then the interrupt handler prologue
+will switch back to the per-thread stack. If software wants to allow
+nested IST interrupts then the handler must adjust the IST values on
+entry to and exit from the interrupt handler. (This is occasionally
+done, e.g. for debug exceptions.)
Events with different IST codes (i.e. with different stacks) can be
nested. For example, a debug interrupt can safely be interrupted by an
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 052ee643a32e..05712ac83e38 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -12,6 +12,8 @@ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
... unused hole ...
+ffffec0000000000 - fffffc0000000000 (=44 bits) kasan shadow memory (16TB)
+... unused hole ...
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
... unused hole ...
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
diff --git a/MAINTAINERS b/MAINTAINERS
index a087a6ddce36..85024e23309f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -34,7 +34,7 @@ trivial patch so apply some common sense.
generalized kernel feature ready for next time.
PLEASE check your patch with the automated style checker
- (scripts/checkpatch.pl) to catch trival style violations.
+ (scripts/checkpatch.pl) to catch trivial style violations.
See Documentation/CodingStyle for guidance here.
PLEASE CC: the maintainers and mailing lists that are generated
@@ -270,12 +270,12 @@ F: drivers/acpi/
F: drivers/pnp/pnpacpi/
F: include/linux/acpi.h
F: include/acpi/
-F: Documentation/acpi
+F: Documentation/acpi/
F: Documentation/ABI/testing/sysfs-bus-acpi
F: drivers/pci/*acpi*
F: drivers/pci/*/*acpi*
F: drivers/pci/*/*/*acpi*
-F: tools/power/acpi
+F: tools/power/acpi/
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: Robert Moore <robert.moore@intel.com>
@@ -563,6 +563,12 @@ S: Odd Fixes
L: linux-alpha@vger.kernel.org
F: arch/alpha/
+ALTERA MAILBOX DRIVER
+M: Ley Foon Tan <lftan@altera.com>
+L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/mailbox/mailbox-altera.c
+
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Vince Bridgers <vbridger@opensource.altera.com>
L: netdev@vger.kernel.org
@@ -624,6 +630,8 @@ L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~gabbayo/linux.git
S: Supported
F: drivers/gpu/drm/amd/amdkfd/
+F: drivers/gpu/drm/amd/include/cik_structs.h
+F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h
F: drivers/gpu/drm/radeon/radeon_kfd.c
F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h
@@ -659,6 +667,13 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/ad9389b*
+ANALOG DEVICES INC ADV7180 DRIVER
+M: Lars-Peter Clausen <lars@metafoo.de>
+L: linux-media@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/media/i2c/adv7180.c
+
ANALOG DEVICES INC ADV7511 DRIVER
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
@@ -696,7 +711,7 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://blackfin.uclinux.org/
S: Supported
F: sound/soc/blackfin/*
-
+
ANALOG DEVICES INC IIO DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
M: Michael Hennerich <Michael.Hennerich@analog.com>
@@ -708,6 +723,16 @@ X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
F: staging/iio/trigger/iio-trig-bfin-timer.c
+ANDROID DRIVERS
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M: Arve Hjønnevåg <arve@android.com>
+M: Riley Andrews <riandrews@android.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
+L: devel@driverdev.osuosl.org
+S: Supported
+F: drivers/android/
+F: drivers/staging/android/
+
AOA (Apple Onboard Audio) ALSA DRIVER
M: Johannes Berg <johannes@sipsolutions.net>
L: linuxppc-dev@lists.ozlabs.org
@@ -754,13 +779,6 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/aptina-pll.*
-ARASAN COMPACT FLASH PATA CONTROLLER
-M: Viresh Kumar <viresh.linux@gmail.com>
-L: linux-ide@vger.kernel.org
-S: Maintained
-F: include/linux/pata_arasan_cf_data.h
-F: drivers/ata/pata_arasan_cf.c
-
ARC FRAMEBUFFER DRIVER
M: Jaya Kumar <jayalk@intworks.biz>
S: Maintained
@@ -959,7 +977,7 @@ S: Maintained
F: arch/arm/mach-prima2/
F: drivers/clk/sirf/
F: drivers/clocksource/timer-prima2.c
-F: drivers/clocksource/timer-marco.c
+F: drivers/clocksource/timer-atlas7.c
N: [^a-z]sirf
ARM/EBSA110 MACHINE SUPPORT
@@ -1158,6 +1176,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-mvebu/
+F: drivers/rtc/armada38x-rtc
ARM/Marvell Berlin SoC support
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1399,7 +1418,6 @@ F: arch/arm/configs/ape6evm_defconfig
F: arch/arm/configs/armadillo800eva_defconfig
F: arch/arm/configs/bockw_defconfig
F: arch/arm/configs/kzm9g_defconfig
-F: arch/arm/configs/lager_defconfig
F: arch/arm/configs/mackerel_defconfig
F: arch/arm/configs/marzen_defconfig
F: arch/arm/configs/shmobile_defconfig
@@ -1587,12 +1605,14 @@ N: xilinx
F: drivers/clocksource/cadence_ttc_timer.c
F: drivers/i2c/busses/i2c-cadence.c
F: drivers/mmc/host/sdhci-of-arasan.c
+F: drivers/edac/synopsys_edac.c
ARM SMMU DRIVER
M: Will Deacon <will.deacon@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/iommu/arm-smmu.c
+F: drivers/iommu/io-pgtable-arm.c
ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com>
@@ -1661,7 +1681,6 @@ M: Jiri Slaby <jirislaby@gmail.com>
M: Nick Kossifidis <mickflemm@gmail.com>
M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
L: linux-wireless@vger.kernel.org
-L: ath5k-devel@lists.ath5k.org
W: http://wireless.kernel.org/en/users/Drivers/ath5k
S: Maintained
F: drivers/net/wireless/ath/ath5k/
@@ -2128,7 +2147,7 @@ F: arch/arm/boot/dts/bcm470*
BROADCOM BCM63XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
L: linux-arm-kernel@lists.infradead.org
-T: git git://git.github.com/brcm/linux.git
+T: git git://github.com/broadcom/arm-bcm63xx.git
S: Maintained
F: arch/arm/mach-bcm/bcm63xx.c
F: arch/arm/include/debug/bcm63xx.S
@@ -2145,6 +2164,7 @@ M: Brian Norris <computersforpeace@gmail.com>
M: Gregory Fong <gregory.0xf0@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T: git git://github.com/broadcom/stblinux.git
S: Maintained
F: arch/arm/mach-bcm/*brcmstb*
F: arch/arm/boot/dts/bcm7*.dts*
@@ -2154,6 +2174,7 @@ BROADCOM BMIPS MIPS ARCHITECTURE
M: Kevin Cernekee <cernekee@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
L: linux-mips@linux-mips.org
+T: git git://github.com/broadcom/stblinux.git
S: Maintained
F: arch/mips/bmips/*
F: arch/mips/include/asm/mach-bmips/*
@@ -2196,7 +2217,7 @@ M: Ray Jui <rjui@broadcom.com>
M: Scott Branden <sbranden@broadcom.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: bcm-kernel-feedback-list@broadcom.com
-T: git git://git.github.com/brcm/linux.git
+T: git git://github.com/broadcom/cygnus-linux.git
S: Maintained
N: iproc
N: cygnus
@@ -2350,7 +2371,8 @@ CAN NETWORK LAYER
M: Oliver Hartkopp <socketcan@hartkopp.net>
L: linux-can@vger.kernel.org
W: http://gitorious.org/linux-can
-T: git git://gitorious.org/linux-can/linux-can-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
S: Maintained
F: Documentation/networking/can.txt
F: net/can/
@@ -2365,7 +2387,8 @@ M: Wolfgang Grandegger <wg@grandegger.com>
M: Marc Kleine-Budde <mkl@pengutronix.de>
L: linux-can@vger.kernel.org
W: http://gitorious.org/linux-can
-T: git git://gitorious.org/linux-can/linux-can-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
S: Maintained
F: drivers/net/can/
F: include/linux/can/dev.h
@@ -2383,6 +2406,12 @@ F: security/capability.c
F: security/commoncap.c
F: kernel/capability.c
+CAPELLA MICROSYSTEMS LIGHT SENSOR DRIVER
+M: Kevin Tsai <ktsai@capellamicro.com>
+S: Maintained
+F: drivers/iio/light/cm*
+F: Documentation/devicetree/bindings/i2c/trivial-devices.txt
+
CC2520 IEEE-802.15.4 RADIO DRIVER
M: Varka Bhadram <varkabhadram@gmail.com>
L: linux-wpan@vger.kernel.org
@@ -2941,6 +2970,12 @@ S: Supported
F: drivers/input/touchscreen/cyttsp*
F: include/linux/input/cyttsp.h
+DALLAS/MAXIM DS1685-FAMILY REAL TIME CLOCK
+M: Joshua Kinard <kumba@gentoo.org>
+S: Maintained
+F: drivers/rtc/rtc-ds1685.c
+F: include/linux/rtc/ds1685.h
+
DAMA SLAVE for AX.25
M: Joerg Reuter <jreuter@yaina.de>
W: http://yaina.de/jreuter/
@@ -3017,6 +3052,7 @@ F: drivers/platform/x86/dell-laptop.c
DELL LAPTOP SMM DRIVER
M: Guenter Roeck <linux@roeck-us.net>
+S: Maintained
F: drivers/char/i8k.c
F: include/uapi/linux/i8k.h
@@ -3032,7 +3068,7 @@ S: Maintained
F: drivers/platform/x86/dell-wmi.c
DESIGNWARE USB2 DRD IP DRIVER
-M: Paul Zimmerman <paulz@synopsys.com>
+M: John Youn <johnyoun@synopsys.com>
L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
@@ -3128,6 +3164,12 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-diolan-u2c.c
+DIRECT ACCESS (DAX)
+M: Matthew Wilcox <willy@linux.intel.com>
+L: linux-fsdevel@vger.kernel.org
+S: Supported
+F: fs/dax.c
+
DIRECTORY NOTIFICATION (DNOTIFY)
M: Eric Paris <eparis@parisplace.org>
S: Maintained
@@ -3187,7 +3229,7 @@ L: dmaengine@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Maintained
F: drivers/dma/
-F: include/linux/dma*
+F: include/linux/dmaengine.h
F: Documentation/dmaengine/
T: git git://git.infradead.org/users/vkoul/slave-dma.git
@@ -3212,6 +3254,7 @@ F: Documentation/
X: Documentation/ABI/
X: Documentation/devicetree/
X: Documentation/[a-z][a-z]_[A-Z][A-Z]/
+T: git git://git.lwn.net/linux-2.6.git docs-next
DOUBLETALK DRIVER
M: "James R. Van Zandt" <jrv@vanzandt.mv.com>
@@ -3471,6 +3514,14 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
F: drivers/tty/serial/dz.*
+E3X0 POWER BUTTON DRIVER
+M: Moritz Fischer <moritz.fischer@ettus.com>
+L: usrp-users@lists.ettus.com
+W: http://www.ettus.com
+S: Supported
+F: drivers/input/misc/e3x0-button.c
+F: Documentation/devicetree/bindings/input/e3x0-button.txt
+
E4000 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -3512,6 +3563,8 @@ M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
+T: git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git#for-next
+T: git://git.kernel.org/pub/linux/kernel/git/mchehab/linux-edac.git#linux_next
S: Supported
F: Documentation/edac.txt
F: drivers/edac/
@@ -3876,6 +3929,12 @@ S: Supported
F: Documentation/fault-injection/
F: lib/fault-inject.c
+FBTFT Framebuffer drivers
+M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+M: Noralf Trønnes <noralf@tronnes.org>
+S: Maintained
+F: drivers/staging/fbtft/
+
FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
M: Robert Love <robert.w.love@intel.com>
L: fcoe-devel@open-fcoe.org
@@ -4407,6 +4466,7 @@ F: include/linux/hwmon*.h
HARDWARE RANDOM NUMBER GENERATOR CORE
M: Matt Mackall <mpm@selenic.com>
M: Herbert Xu <herbert@gondor.apana.org.au>
+L: linux-crypto@vger.kernel.org
S: Odd fixes
F: Documentation/hw_random.txt
F: drivers/char/hw_random/
@@ -4753,20 +4813,20 @@ S: Supported
F: drivers/scsi/ipr.*
IBM Power Virtual Ethernet Device Driver
-M: Santiago Leon <santil@linux.vnet.ibm.com>
+M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmveth.*
IBM Power Virtual SCSI Device Drivers
-M: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi/ibmvscsi*
F: drivers/scsi/ibmvscsi/viosrp.h
IBM Power Virtual FC Device Drivers
-M: Brian King <brking@linux.vnet.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi/ibmvfc*
@@ -4892,7 +4952,7 @@ F: drivers/ipack/
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-M: Dmitry Kasatkin <d.kasatkin@samsung.com>
+M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
L: linux-ima-devel@lists.sourceforge.net
L: linux-ima-user@lists.sourceforge.net
L: linux-security-module@vger.kernel.org
@@ -4934,7 +4994,6 @@ F: include/uapi/linux/inotify.h
INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS
M: Dmitry Torokhov <dmitry.torokhov@gmail.com>
-M: Dmitry Torokhov <dtor@mail.ru>
L: linux-input@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-input/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git
@@ -4953,10 +5012,19 @@ F: Documentation/input/multi-touch-protocol.txt
F: drivers/input/input-mt.c
K: \b(ABS|SYN)_MT_
+INTEL ASoC BDW/HSW DRIVERS
+M: Jie Yang <yang.jie@linux.intel.com>
+L: alsa-devel@alsa-project.org
+S: Supported
+F: sound/soc/intel/sst-haswell*
+F: sound/soc/intel/sst-dsp*
+F: sound/soc/intel/sst-firmware.c
+F: sound/soc/intel/broadwell.c
+F: sound/soc/intel/haswell.c
+
INTEL C600 SERIES SAS CONTROLLER DRIVER
M: Intel SCU Linux support <intel-linux-scu@intel.com>
M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
-M: Dave Jiang <dave.jiang@intel.com>
L: linux-scsi@vger.kernel.org
T: git git://git.code.sf.net/p/intel-sas/isci
S: Supported
@@ -5284,6 +5352,15 @@ W: www.open-iscsi.org
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/ulp/iser/
+ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
+M: Sagi Grimberg <sagig@mellanox.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
+L: linux-rdma@vger.kernel.org
+L: target-devel@vger.kernel.org
+S: Supported
+W: http://www.linux-iscsi.org
+F: drivers/infiniband/ulp/isert
+
ISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -5698,6 +5775,49 @@ F: drivers/lguest/
F: include/linux/lguest*.h
F: tools/lguest/
+LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
+M: Tejun Heo <tj@kernel.org>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/
+F: include/linux/ata.h
+F: include/linux/libata.h
+
+LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
+M: Viresh Kumar <viresh.linux@gmail.com>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: include/linux/pata_arasan_cf_data.h
+F: drivers/ata/pata_arasan_cf.c
+
+LIBATA PATA DRIVERS
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+M: Tejun Heo <tj@kernel.org>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/pata_*.c
+F: drivers/ata/ata_generic.c
+
+LIBATA SATA AHCI PLATFORM devices support
+M: Hans de Goede <hdegoede@redhat.com>
+M: Tejun Heo <tj@kernel.org>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/ahci_platform.c
+F: drivers/ata/libahci_platform.c
+F: include/linux/ahci_platform.h
+
+LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
+M: Mikael Pettersson <mikpelinux@gmail.com>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/sata_promise.*
+
LIBLOCKDEP
M: Sasha Levin <sasha.levin@oracle.com>
S: Maintained
@@ -5789,6 +5909,21 @@ F: Documentation/misc-devices/lis3lv02d
F: drivers/misc/lis3lv02d/
F: drivers/platform/x86/hp_accel.c
+LIVE PATCHING
+M: Josh Poimboeuf <jpoimboe@redhat.com>
+M: Seth Jennings <sjenning@redhat.com>
+M: Jiri Kosina <jkosina@suse.cz>
+M: Vojtech Pavlik <vojtech@suse.cz>
+S: Maintained
+F: kernel/livepatch/
+F: include/linux/livepatch.h
+F: arch/x86/include/asm/livepatch.h
+F: arch/x86/kernel/livepatch.c
+F: Documentation/ABI/testing/sysfs-kernel-livepatch
+F: samples/livepatch/
+L: live-patching@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
+
LLC (802.2)
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
S: Maintained
@@ -6087,6 +6222,33 @@ F: Documentation/devicetree/bindings/i2c/max6697.txt
F: drivers/hwmon/max6697.c
F: include/linux/platform_data/max6697.h
+MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
+M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+L: linux-pm@vger.kernel.org
+S: Supported
+F: drivers/power/max14577_charger.c
+F: drivers/power/max77693_charger.c
+
+MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
+M: Chanwoo Choi <cw00.choi@samsung.com>
+M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: drivers/*/max14577.c
+F: drivers/*/max77686.c
+F: drivers/*/max77693.c
+F: drivers/extcon/extcon-max14577.c
+F: drivers/extcon/extcon-max77693.c
+F: drivers/rtc/rtc-max77686.c
+F: drivers/clk/clk-max77686.c
+F: Documentation/devicetree/bindings/mfd/max14577.txt
+F: Documentation/devicetree/bindings/mfd/max77686.txt
+F: Documentation/devicetree/bindings/mfd/max77693.txt
+F: Documentation/devicetree/bindings/clock/maxim,max77686.txt
+F: include/linux/mfd/max14577*.h
+F: include/linux/mfd/max77686*.h
+F: include/linux/mfd/max77693*.h
+
MAXIRADIO FM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -6117,14 +6279,6 @@ F: include/uapi/linux/meye.h
F: include/uapi/linux/ivtv*
F: include/uapi/linux/uvcvideo.h
-MEDIAVISION PRO MOVIE STUDIO DRIVER
-M: Hans Verkuil <hverkuil@xs4all.nl>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-W: http://linuxtv.org
-S: Odd Fixes
-F: drivers/media/parport/pms*
-
MEGARAID SCSI/SAS DRIVERS
M: Kashyap Desai <kashyap.desai@avagotech.com>
M: Sumit Saxena <sumit.saxena@avagotech.com>
@@ -6542,9 +6696,10 @@ F: include/uapi/linux/netrom.h
F: net/netrom/
NETWORK BLOCK DEVICE (NBD)
-M: Paul Clements <Paul.Clements@steeleye.com>
+M: Markus Pargmann <mpa@pengutronix.de>
S: Maintained
L: nbd-general@lists.sourceforge.net
+T: git git://git.pengutronix.de/git/mpa/linux-nbd.git
F: Documentation/blockdev/nbd.txt
F: drivers/block/nbd.c
F: include/linux/nbd.h
@@ -6573,6 +6728,7 @@ F: include/linux/netdevice.h
F: include/uapi/linux/in.h
F: include/uapi/linux/net.h
F: include/uapi/linux/netdevice.h
+F: include/uapi/linux/net_namespace.h
F: tools/net/
F: tools/testing/selftests/net/
F: lib/random32.c
@@ -6677,6 +6833,7 @@ F: Documentation/devicetree/bindings/net/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@primarydata.com>
+M: Anna Schumaker <anna.schumaker@netapp.com>
L: linux-nfs@vger.kernel.org
W: http://client.linux-nfs.org
T: git git://git.linux-nfs.org/projects/trondmy/linux-nfs.git
@@ -6892,6 +7049,12 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.*
+OMAP HWMOD DATA
+M: Paul Walmsley <paul@pwsan.com>
+L: linux-omap@vger.kernel.org
+S: Maintained
+F: arch/arm/mach-omap2/omap_hwmod*data*
+
OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
M: Benoît Cousson <bcousson@baylibre.com>
L: linux-omap@vger.kernel.org
@@ -6982,14 +7145,12 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Grant Likely <grant.likely@linaro.org>
M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
-W: http://fdt.secretlab.ca
-T: git git://git.secretlab.ca/git/linux-2.6.git
+W: http://www.devicetree.org/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
S: Maintained
F: drivers/of/
F: include/linux/of*.h
F: scripts/dtc/
-K: of_get_property
-K: of_match_table
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
@@ -7013,11 +7174,12 @@ F: arch/openrisc/
OPENVSWITCH
M: Pravin Shelar <pshelar@nicira.com>
+L: netdev@vger.kernel.org
L: dev@openvswitch.org
W: http://openvswitch.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
S: Maintained
F: net/openvswitch/
+F: include/uapi/linux/openvswitch.h
OPL4 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
@@ -7215,6 +7377,14 @@ F: include/linux/pci*
F: arch/x86/pci/
F: arch/x86/kernel/quirks.c
+PCI DRIVER FOR ARM VERSATILE PLATFORM
+M: Rob Herring <robh@kernel.org>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/versatile.txt
+F: drivers/pci/host/pci-versatile.c
+
PCI DRIVER FOR APPLIEDMICRO XGENE
M: Tanmay Inamdar <tinamdar@apm.com>
L: linux-pci@vger.kernel.org
@@ -7234,7 +7404,7 @@ S: Maintained
F: drivers/pci/host/*layerscape*
PCI DRIVER FOR IMX6
-M: Richard Zhu <r65037@freescale.com>
+M: Richard Zhu <Richard.Zhu@freescale.com>
M: Lucas Stach <l.stach@pengutronix.de>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -7404,6 +7574,7 @@ F: drivers/crypto/picoxcell*
PIN CONTROL SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-gpio@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
S: Maintained
F: drivers/pinctrl/
F: include/linux/pinctrl/
@@ -7571,12 +7742,6 @@ W: http://wireless.kernel.org/en/users/Drivers/p54
S: Obsolete
F: drivers/net/wireless/prism54/
-PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
-M: Mikael Pettersson <mikpelinux@gmail.com>
-L: linux-ide@vger.kernel.org
-S: Maintained
-F: drivers/ata/sata_promise.*
-
PS3 NETWORK SUPPORT
M: Geoff Levand <geoff@infradead.org>
L: netdev@vger.kernel.org
@@ -7742,8 +7907,7 @@ F: Documentation/scsi/LICENSE.qla2xxx
F: drivers/scsi/qla2xxx/
QLOGIC QLA4XXX iSCSI DRIVER
-M: Vikas Chaudhary <vikas.chaudhary@qlogic.com>
-M: iscsi-driver@qlogic.com
+M: QLogic-Storage-Upstream@qlogic.com
L: linux-scsi@vger.kernel.org
S: Supported
F: Documentation/scsi/LICENSE.qla4xxx
@@ -7822,14 +7986,6 @@ T: git git://github.com/KrasnikovEugene/wcn36xx.git
S: Supported
F: drivers/net/wireless/ath/wcn36xx/
-QUICKCAM PARALLEL PORT WEBCAMS
-M: Hans Verkuil <hverkuil@xs4all.nl>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-W: http://linuxtv.org
-S: Odd Fixes
-F: drivers/media/parport/*-qcam*
-
RADOS BLOCK DEVICE (RBD)
M: Yehuda Sadeh <yehuda@inktank.com>
M: Sage Weil <sage@inktank.com>
@@ -8018,6 +8174,13 @@ S: Maintained
F: Documentation/rfkill.txt
F: net/rfkill/
+RHASHTABLE
+M: Thomas Graf <tgraf@suug.ch>
+L: netdev@vger.kernel.org
+S: Maintained
+F: lib/rhashtable.c
+F: include/linux/rhashtable.h
+
RICOH SMARTMEDIA/XD DRIVER
M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
@@ -8360,12 +8523,6 @@ F: kernel/time/clocksource.c
F: kernel/time/time*.c
F: kernel/time/ntp.c
-TLG2300 VIDEO4LINUX-2 DRIVER
-M: Huang Shijie <shijie8@gmail.com>
-M: Hans Verkuil <hverkuil@xs4all.nl>
-S: Odd Fixes
-F: drivers/media/usb/tlg2300/
-
SC1200 WDT DRIVER
M: Zwane Mwaikambo <zwanem@gmail.com>
S: Maintained
@@ -8551,25 +8708,6 @@ S: Maintained
F: drivers/misc/phantom.c
F: include/uapi/linux/phantom.h
-SERIAL ATA (SATA) SUBSYSTEM
-M: Tejun Heo <tj@kernel.org>
-L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
-S: Supported
-F: drivers/ata/
-F: include/linux/ata.h
-F: include/linux/libata.h
-
-SERIAL ATA AHCI PLATFORM devices support
-M: Hans de Goede <hdegoede@redhat.com>
-M: Tejun Heo <tj@kernel.org>
-L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
-S: Supported
-F: drivers/ata/ahci_platform.c
-F: drivers/ata/libahci_platform.c
-F: include/linux/ahci_platform.h
-
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
L: linux-scsi@vger.kernel.org
@@ -8750,6 +8888,15 @@ S: Maintained
F: drivers/media/platform/davinci/
F: include/media/davinci/
+TI AM437X VPFE DRIVER
+M: Lad, Prabhakar <prabhakar.csengg@gmail.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
+S: Maintained
+F: drivers/media/platform/am437x/
+
SIS 190 ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
@@ -8831,6 +8978,8 @@ F: drivers/media/i2c/smiapp/
F: include/media/smiapp.h
F: drivers/media/i2c/smiapp-pll.c
F: drivers/media/i2c/smiapp-pll.h
+F: include/uapi/linux/smiapp.h
+F: Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
SMM665 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
@@ -8897,6 +9046,7 @@ SOFTLOGIC 6x10 MPEG CODEC
M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
M: Andrey Utkin <andrey.utkin@corp.bluecherry.net>
M: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+M: Ismael Luceno <ismael@iodev.co.uk>
L: linux-media@vger.kernel.org
S: Supported
F: drivers/media/pci/solo6x10/
@@ -9169,6 +9319,14 @@ L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/staging/rtl8723au/
+STAGING - SILICON MOTION SM7XX FRAME BUFFER DRIVER
+M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+M: Teddy Wang <teddy.wang@siliconmotion.com>
+M: Sudip Mukherjee <sudip@vectorindia.org>
+L: linux-fbdev@vger.kernel.org
+S: Maintained
+F: drivers/staging/sm7xxfb/
+
STAGING - SLICOSS
M: Lior Dotan <liodot@gmail.com>
M: Christopher Harrer <charrer@alacritech.com>
@@ -9209,6 +9367,13 @@ F: arch/m68k/sun3*/
F: arch/m68k/include/asm/sun3*
F: drivers/net/ethernet/i825xx/sun3*
+SUN4I LOW RES ADC ATTACHED TABLET KEYS DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
+F: drivers/input/keyboard/sun4i-lradc-keys.c
+
SUNDANCE NETWORK DRIVER
M: Denis Kirjanov <kda@linux-powerpc.org>
L: netdev@vger.kernel.org
@@ -9217,7 +9382,6 @@ F: drivers/net/ethernet/dlink/sundance.c
SUPERH
L: linux-sh@vger.kernel.org
-W: http://www.linux-sh.org
Q: http://patchwork.kernel.org/project/linux-sh/list/
S: Orphan
F: Documentation/sh/
@@ -9538,7 +9702,8 @@ F: drivers/platform/x86/thinkpad_acpi.c
TI BANDGAP AND THERMAL DRIVER
M: Eduardo Valentin <edubezval@gmail.com>
L: linux-pm@vger.kernel.org
-S: Supported
+L: linux-omap@vger.kernel.org
+S: Maintained
F: drivers/thermal/ti-soc-thermal/
TI CLOCK DRIVER
@@ -9595,6 +9760,13 @@ F: drivers/power/lp8788-charger.c
F: drivers/regulator/lp8788-*.c
F: include/linux/mfd/lp8788*.h
+TI NETCP ETHERNET DRIVER
+M: Wingman Kwok <w-kwok2@ti.com>
+M: Murali Karicheri <m-karicheri2@ti.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/ti/netcp*
+
TI TWL4030 SERIES SOC CODEC DRIVER
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -9622,7 +9794,7 @@ F: net/tipc/
TILE ARCHITECTURE
M: Chris Metcalf <cmetcalf@ezchip.com>
-W: http://www.tilera.com/scm/
+W: http://www.ezchip.com/scm/
S: Supported
F: arch/tile/
F: drivers/char/tile-srom.c
@@ -9715,13 +9887,21 @@ F: drivers/media/pci/tw68/
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
-M: Ashley Lai <ashley@ashleylai.com>
M: Marcel Selhorst <tpmdd@selhorst.net>
W: http://tpmdd.sourceforge.net
L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
+Q: git git://github.com/PeterHuewe/linux-tpmdd.git
+T: https://github.com/PeterHuewe/linux-tpmdd
S: Maintained
F: drivers/char/tpm/
+TPM IBM_VTPM DEVICE DRIVER
+M: Ashley Lai <ashleydlai@gmail.com>
+W: http://tpmdd.sourceforge.net
+L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
+S: Maintained
+F: drivers/char/tpm/tpm_ibmvtpm*
+
TRACING
M: Steven Rostedt <rostedt@goodmis.org>
M: Ingo Molnar <mingo@redhat.com>
@@ -9876,20 +10056,15 @@ F: drivers/scsi/ufs/
UNSORTED BLOCK IMAGES (UBI)
M: Artem Bityutskiy <dedekind1@gmail.com>
+M: Richard Weinberger <richard@nod.at>
W: http://www.linux-mtd.infradead.org/
L: linux-mtd@lists.infradead.org
T: git git://git.infradead.org/ubifs-2.6.git
-S: Maintained
+S: Supported
F: drivers/mtd/ubi/
F: include/linux/mtd/ubi.h
F: include/uapi/mtd/ubi-user.h
-UNSORTED BLOCK IMAGES (UBI) Fastmap
-M: Richard Weinberger <richard@nod.at>
-L: linux-mtd@lists.infradead.org
-S: Maintained
-F: drivers/mtd/ubi/fastmap.c
-
USB ACM DRIVER
M: Oliver Neukum <oliver@neukum.org>
L: linux-usb@vger.kernel.org
@@ -10151,6 +10326,7 @@ USERSPACE I/O (UIO)
M: "Hans J. Koch" <hjk@hansjkoch.de>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/DocBook/uio-howto.tmpl
F: drivers/uio/
F: include/linux/uio*.h
@@ -10578,6 +10754,7 @@ F: drivers/pci/*xen*
XEN BLOCK SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Roger Pau Monné <roger.pau@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: drivers/block/xen-blkback/*
@@ -10633,6 +10810,7 @@ M: Max Filippov <jcmvbkbc@gmail.com>
L: linux-xtensa@linux-xtensa.org
S: Maintained
F: drivers/spi/spi-xtensa-xtfpga.c
+F: sound/soc/xtensa/xtfpga-i2s.c
YAM DRIVER FOR AX.25
M: Jean-Paul Roubelat <jpr@f6fbb.org>
diff --git a/Makefile b/Makefile
index e41a3356abee..33cb15efd257 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION =
NAME = Diseased Newt
# *DOCUMENTATION*
@@ -423,7 +423,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -726,10 +726,14 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
endif
ifdef CONFIG_FUNCTION_TRACER
+ifndef CC_FLAGS_FTRACE
+CC_FLAGS_FTRACE := -pg
+endif
+export CC_FLAGS_FTRACE
ifdef CONFIG_HAVE_FENTRY
CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY)
endif
-KBUILD_CFLAGS += -pg $(CC_USING_FENTRY)
+KBUILD_CFLAGS += $(CC_FLAGS_FTRACE) $(CC_USING_FENTRY)
KBUILD_AFLAGS += $(CC_USING_FENTRY)
ifdef CONFIG_DYNAMIC_FTRACE
ifdef CONFIG_HAVE_C_RECORDMCOUNT
@@ -777,6 +781,7 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
endif
+include $(srctree)/scripts/Makefile.kasan
include $(srctree)/scripts/Makefile.extrawarn
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index d8f9b7e89234..a9a119592372 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -45,7 +45,7 @@ struct vm_area_struct;
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
/* Number of pointers that fit on a page: this will go away. */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
@@ -73,7 +73,6 @@ struct vm_area_struct;
/* .. and these are ours ... */
#define _PAGE_DIRTY 0x20000
#define _PAGE_ACCESSED 0x40000
-#define _PAGE_FILE 0x80000 /* set:pagecache, unset:swap */
/*
* NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
@@ -268,7 +267,6 @@ extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
extern inline int pte_special(pte_t pte) { return 0; }
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
@@ -345,11 +343,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define pte_to_pgoff(pte) (pte_val(pte) >> 32)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
-
-#define PTE_FILE_MAX_BITS 32
-
#ifndef CONFIG_DISCONTIGMEM
#define kern_addr_valid(addr) (1)
#endif
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 48bbea6898b3..d5b98ab514bb 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -27,8 +27,6 @@ struct thread_info {
int bpt_nsaved;
unsigned long bpt_addr[2]; /* breakpoint handling */
unsigned int bpt_insn[2];
-
- struct restart_block restart_block;
};
/*
@@ -40,9 +38,6 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 076c35cd6cde..98a1525fa164 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -285,8 +285,12 @@ pcibios_claim_one_bus(struct pci_bus *b)
if (r->parent || !r->start || !r->flags)
continue;
if (pci_has_flag(PCI_PROBE_ONLY) ||
- (r->flags & IORESOURCE_PCI_FIXED))
- pci_claim_resource(dev, i);
+ (r->flags & IORESOURCE_PCI_FIXED)) {
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
+ }
}
}
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 6cec2881acbf..8dbfb15f1745 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -150,7 +150,7 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
struct switch_stack *sw = (struct switch_stack *)regs - 1;
long i, err = __get_user(regs->pc, &sc->sc_pc);
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
sw->r26 = (unsigned long) ret_from_sys_call;
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 98838a05ba6d..9d0ac091a52a 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -156,6 +156,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 6b0b7f7ef783..9615fe1701c6 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -61,7 +61,6 @@
#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */
-#define _PAGE_FILE (1<<7) /* page cache/ swap (S) */
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
@@ -73,7 +72,6 @@
#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
-#define _PAGE_FILE (1<<6) /* page cache/ swap (S) */
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
@@ -213,7 +211,7 @@
* No special requirements for lowest virtual address we permit any user space
* mapping to be mapped at.
*/
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
/****************************************************************
@@ -259,7 +257,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
#define pte_page(x) (mem_map + \
- (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
+ (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
+ PAGE_SHIFT)))
#define mk_pte(page, pgprot) \
({ \
@@ -268,15 +267,6 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
pte; \
})
-/* TBD: Non linear mapping stuff */
-static inline int pte_file(pte_t pte)
-{
- return pte_val(pte) & _PAGE_FILE;
-}
-
-#define PTE_FILE_MAX_BITS 30
-#define pgoff_to_pte(x) __pte(x)
-#define pte_to_pgoff(x) (pte_val(x) >> 2)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -364,7 +354,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* Encode swap {type,off} tuple into PTE
* We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
- * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
+ * PAGE_PRESENT is zero in a PTE holding swap "identifier"
*/
#define __swp_entry(type, off) ((swp_entry_t) { \
((type) & 0x1f) | ((off) << 13) })
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 210fe97464c3..4e547296831d 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -56,9 +56,6 @@ unsigned long thread_saved_pc(struct task_struct *t);
/* Free all resources held by a thread */
#define release_thread(thread) do { } while (0)
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
/*
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
* get optimised away by gcc
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
index 602b0970a764..744a6ae15754 100644
--- a/arch/arc/include/asm/serial.h
+++ b/arch/arc/include/asm/serial.h
@@ -10,26 +10,13 @@
#define _ASM_ARC_SERIAL_H
/*
- * early-8250 requires BASE_BAUD to be defined and includes this header.
- * We put in a typical value:
- * (core clk / 16) - i.e. UART samples 16 times per sec.
- * Athough in multi-platform-image this might not work, specially if the
- * clk driving the UART is different.
- * We can't use DeviceTree as this is typically for early serial.
+ * early 8250 (now earlycon) requires BASE_BAUD to be defined in this header.
+ * However to still determine it dynamically (for multi-platform images)
+ * we do this in a helper by parsing the FDT early
*/
-#include <asm/clk.h>
+extern unsigned int __init arc_early_base_baud(void);
-#define BASE_BAUD (arc_get_core_freq() / 16)
-
-/*
- * This is definitely going to break early 8250 consoles on multi-platform
- * images but hey, it won't add any code complexity for a debug feature of
- * one broken driver.
- */
-#ifdef CONFIG_ARC_PLAT_TB10X
-#undef BASE_BAUD
-#define BASE_BAUD (arc_get_core_freq() / 16 / 3)
-#endif
+#define BASE_BAUD arc_early_base_baud()
#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 02bc5ec0fb2e..1163a1838ac1 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -46,7 +46,6 @@ struct thread_info {
struct exec_domain *exec_domain;/* execution domain */
__u32 cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
- struct restart_block restart_block;
};
/*
@@ -62,9 +61,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index fffdb5e41b20..e32b54abff51 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -17,6 +17,28 @@
#include <asm/clk.h>
#include <asm/mach_desc.h>
+#ifdef CONFIG_SERIAL_EARLYCON
+
+static unsigned int __initdata arc_base_baud;
+
+unsigned int __init arc_early_base_baud(void)
+{
+ return arc_base_baud/16;
+}
+
+static void __init arc_set_early_base_baud(unsigned long dt_root)
+{
+ unsigned int core_clk = arc_get_core_freq();
+
+ if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x"))
+ arc_base_baud = core_clk/3;
+ else
+ arc_base_baud = core_clk;
+}
+#else
+#define arc_set_early_base_baud(dt_root)
+#endif
+
static const void * __init arch_get_next_mach(const char *const **match)
{
static const struct machine_desc *mdesc = __arch_info_begin;
@@ -56,5 +78,7 @@ const struct machine_desc * __init setup_machine_fdt(void *dt)
if (clk)
arc_set_core_freq(of_read_ulong(clk, len/4));
+ arc_set_early_base_baud(dt_root);
+
return mdesc;
}
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 83a046a7cd06..d868289c5a26 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -736,16 +736,20 @@ ENTRY(ret_from_fork)
; put last task in scheduler queue
bl @schedule_tail
- ; If kernel thread, jump to its entry-point
ld r9, [sp, PT_status32]
brne r9, 0, 1f
- jl.d [r14]
- mov r0, r13 ; arg to payload
+ jl.d [r14] ; kernel thread entry point
+ mov r0, r13 ; (see PF_KTHREAD block in copy_thread)
1:
- ; special case of kernel_thread entry point returning back due to
- ; kernel_execve() - pretend return from syscall to ret to userland
+ ; Return to user space
+ ; 1. Any forked task (Reach here via BRne above)
+ ; 2. First ever init task (Reach here via return from JL above)
+ ; This is the historic "kernel_execve" use-case, to return to init
+ ; user mode, in a round about way since that is always done from
+ ; a kernel thread which is executed via JL above but always returns
+ ; out whenever kernel_execve (now inline do_fork()) is involved
b ret_from_exception
END(ret_from_fork)
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 252bf603db9c..900f68a70088 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -412,6 +412,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
char *str;
int cpu_id = ptr_to_cpu(v);
+ if (!cpu_online(cpu_id)) {
+ seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
+ goto done;
+ }
+
str = (char *)__get_free_page(GFP_TEMPORARY);
if (!str)
goto done;
@@ -429,7 +434,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
free_page((unsigned long)str);
done:
- seq_printf(m, "\n\n");
+ seq_printf(m, "\n");
return 0;
}
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index cb3142a2d40b..114234e83caa 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -104,7 +104,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
struct pt_regs *regs = current_pt_regs();
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* Since we stacked the signal on a word boundary,
* then 'sp' should be word aligned here. If it's
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 20ebb602ea2f..6a400b1b0b62 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -221,7 +221,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
* and read back old value
*/
do {
- new = old = *ipi_data_ptr;
+ new = old = ACCESS_ONCE(*ipi_data_ptr);
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 6f7e3a68803a..563cb27e37f5 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -161,6 +161,8 @@ good_area:
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f5dd6e970f53..9f1f09a2bc9b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -29,6 +29,7 @@ config ARM
select HANDLE_DOMAIN_IRQ
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
@@ -60,6 +61,7 @@ config ARM
select HAVE_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
+ select HAVE_OPTPROBES if !THUMB2_KERNEL
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -1281,6 +1283,9 @@ config PCI_DOMAINS
bool
depends on PCI
+config PCI_DOMAINS_GENERIC
+ def_bool PCI_DOMAINS
+
config PCI_NANOENGINE
bool "BSE nanoEngine PCI support"
depends on SA1100_NANOENGINE
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index b6a073de1559..970de7518341 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -418,6 +418,13 @@ choice
Say Y here if you want the debug print routines to direct
their output to UART1 serial port on KEYSTONE2 devices.
+ config DEBUG_KS8695_UART
+ bool "KS8695 Debug UART"
+ depends on ARCH_KS8695
+ help
+ Say Y here if you want kernel low-level debugging support
+ on KS8695.
+
config DEBUG_MESON_UARTAO
bool "Kernel low-level debugging via Meson6 UARTAO"
depends on ARCH_MESON
@@ -533,6 +540,13 @@ choice
Say Y here if you want kernel low-level debugging support
on Vybrid based platforms.
+ config DEBUG_NETX_UART
+ bool "Kernel low-level debugging messages via NetX UART"
+ depends on ARCH_NETX
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Hilscher NetX based platforms.
+
config DEBUG_NOMADIK_UART
bool "Kernel low-level debugging messages via NOMADIK UART"
depends on ARCH_NOMADIK
@@ -557,6 +571,30 @@ choice
Say Y here if you want kernel low-level debugging support
on TI-NSPIRE CX models.
+ config DEBUG_OMAP1UART1
+ bool "Kernel low-level debugging via OMAP1 UART1"
+ depends on ARCH_OMAP1
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on OMAP1 based platforms (except OMAP730) on the UART1.
+
+ config DEBUG_OMAP1UART2
+ bool "Kernel low-level debugging via OMAP1 UART2"
+ depends on ARCH_OMAP1
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on OMAP1 based platforms (except OMAP730) on the UART2.
+
+ config DEBUG_OMAP1UART3
+ bool "Kernel low-level debugging via OMAP1 UART3"
+ depends on ARCH_OMAP1
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on OMAP1 based platforms (except OMAP730) on the UART3.
+
config DEBUG_OMAP2UART1
bool "OMAP2/3/4 UART1 (omap2/3 sdp boards and some omap3 boards)"
depends on ARCH_OMAP2PLUS
@@ -599,6 +637,30 @@ choice
depends on ARCH_OMAP2PLUS
select DEBUG_OMAP2PLUS_UART
+ config DEBUG_OMAP7XXUART1
+ bool "Kernel low-level debugging via OMAP730 UART1"
+ depends on ARCH_OMAP730
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on OMAP730 based platforms on the UART1.
+
+ config DEBUG_OMAP7XXUART2
+ bool "Kernel low-level debugging via OMAP730 UART2"
+ depends on ARCH_OMAP730
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on OMAP730 based platforms on the UART2.
+
+ config DEBUG_OMAP7XXUART3
+ bool "Kernel low-level debugging via OMAP730 UART3"
+ depends on ARCH_OMAP730
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on OMAP730 based platforms on the UART3.
+
config DEBUG_TI81XXUART1
bool "Kernel low-level debugging messages via TI81XX UART1 (ti8148evm)"
depends on ARCH_OMAP2PLUS
@@ -1080,15 +1142,6 @@ choice
This option selects UART0 on VIA/Wondermedia System-on-a-chip
devices, including VT8500, WM8505, WM8650 and WM8850.
- config DEBUG_LL_UART_NONE
- bool "No low-level debugging UART"
- depends on !ARCH_MULTIPLATFORM
- help
- Say Y here if your platform doesn't provide a UART option
- above. This relies on your platform choosing the right UART
- definition internally in order for low-level debugging to
- work.
-
config DEBUG_ICEDCC
bool "Kernel low-level debugging via EmbeddedICE DCC channel"
help
@@ -1241,7 +1294,9 @@ config DEBUG_LL_INCLUDE
DEBUG_IMX6Q_UART || \
DEBUG_IMX6SL_UART || \
DEBUG_IMX6SX_UART
+ default "debug/ks8695.S" if DEBUG_KS8695_UART
default "debug/msm.S" if DEBUG_MSM_UART || DEBUG_QCOM_UARTDM
+ default "debug/netx.S" if DEBUG_NETX_UART
default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
default "debug/renesas-scif.S" if DEBUG_R7S72100_SCIF2
default "debug/renesas-scif.S" if DEBUG_RCAR_GEN1_SCIF0
@@ -1267,12 +1322,7 @@ config DEBUG_LL_INCLUDE
# Compatibility options for PL01x
config DEBUG_UART_PL01X
- def_bool ARCH_EP93XX || \
- ARCH_INTEGRATOR || \
- ARCH_SPEAR3XX || \
- ARCH_SPEAR6XX || \
- ARCH_SPEAR13XX || \
- ARCH_VERSATILE
+ bool
# Compatibility options for 8250
config DEBUG_UART_8250
@@ -1288,6 +1338,7 @@ config DEBUG_UART_BCM63XX
config DEBUG_UART_PHYS
hex "Physical base address of debug UART"
+ default 0x00100a00 if DEBUG_NETX_UART
default 0x01c20000 if DEBUG_DAVINCI_DMx_UART0
default 0x01c28000 if DEBUG_SUNXI_UART0
default 0x01c28400 if DEBUG_SUNXI_UART1
@@ -1331,7 +1382,6 @@ config DEBUG_UART_PHYS
DEBUG_S3C2410_UART2)
default 0x78000000 if DEBUG_CNS3XXX
default 0x7c0003f8 if FOOTBRIDGE
- default 0x78000000 if DEBUG_CNS3XXX
default 0x80010000 if DEBUG_ASM9260_UART
default 0x80070000 if DEBUG_IMX23_UART
default 0x80074000 if DEBUG_IMX28_UART
@@ -1374,12 +1424,17 @@ config DEBUG_UART_PHYS
default 0xffe40000 if DEBUG_RCAR_GEN1_SCIF0
default 0xffe42000 if DEBUG_RCAR_GEN1_SCIF2
default 0xfff36000 if DEBUG_HIGHBANK_UART
+ default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
+ default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
+ default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
default 0xfffe8600 if DEBUG_UART_BCM63XX
default 0xfffff700 if ARCH_IOP33X
- depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+ depends on ARCH_EP93XX || \
+ DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
DEBUG_LL_UART_EFM32 || \
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
- DEBUG_MSM_UART || DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \
+ DEBUG_MSM_UART || DEBUG_NETX_UART || \
+ DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \
DEBUG_RCAR_GEN1_SCIF0 || DEBUG_RCAR_GEN1_SCIF2 || \
DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
@@ -1389,6 +1444,7 @@ config DEBUG_UART_PHYS
config DEBUG_UART_VIRT
hex "Virtual base address of debug UART"
+ default 0xe0000a00 if DEBUG_NETX_UART
default 0xe0010fe0 if ARCH_RPC
default 0xe1000000 if DEBUG_MSM_UART
default 0xf0000be0 if ARCH_EBSA110
@@ -1461,20 +1517,25 @@ config DEBUG_UART_VIRT
default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
default 0xfef36000 if DEBUG_HIGHBANK_UART
+ default 0xfefb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
+ default 0xfefb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
+ default 0xfefb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
default 0xfefff700 if ARCH_IOP33X
default 0xff003000 if DEBUG_U300_UART
default 0xffd01000 if DEBUG_HIP01_UART
default DEBUG_UART_PHYS if !MMU
depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
- DEBUG_MSM_UART || DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
+ DEBUG_MSM_UART || DEBUG_NETX_UART || \
+ DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
config DEBUG_UART_8250_SHIFT
int "Register offset shift for the 8250 debug UART"
depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
- default 0 if FOOTBRIDGE || ARCH_IOP32X || DEBUG_BCM_5301X
+ default 0 if FOOTBRIDGE || ARCH_IOP32X || DEBUG_BCM_5301X || \
+ DEBUG_OMAP7XXUART1 || DEBUG_OMAP7XXUART2 || DEBUG_OMAP7XXUART3
default 2
config DEBUG_UART_8250_WORD
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c1785eec2cf7..7f99cd652203 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -266,6 +266,7 @@ core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
# If we have a machine-specific directory, then include it in the build.
core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
+core-y += arch/arm/probes/
core-y += arch/arm/net/
core-y += arch/arm/crypto/
core-y += arch/arm/firmware/
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 68be9017593d..c41a793b519c 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -178,7 +178,7 @@ not_angel:
/*
* Set up a page table only if it won't overwrite ourself.
- * That means r4 < pc && r4 - 16k page directory > &_end.
+ * That means r4 < pc || r4 - 16k page directory > &_end.
* Given that r4 > &_end is most unfrequent, we add a rough
* additional 1MB of room for a possible appended DTB.
*/
@@ -263,16 +263,37 @@ restart: adr r0, LC0
* OK... Let's do some funky business here.
* If we do have a DTB appended to zImage, and we do have
* an ATAG list around, we want the later to be translated
- * and folded into the former here. To be on the safe side,
- * let's temporarily move the stack away into the malloc
- * area. No GOT fixup has occurred yet, but none of the
- * code we're about to call uses any global variable.
+ * and folded into the former here. No GOT fixup has occurred
+ * yet, but none of the code we're about to call uses any
+ * global variable.
*/
- add sp, sp, #0x10000
+
+ /* Get the initial DTB size */
+ ldr r5, [r6, #4]
+#ifndef __ARMEB__
+ /* convert to little endian */
+ eor r1, r5, r5, ror #16
+ bic r1, r1, #0x00ff0000
+ mov r5, r5, ror #8
+ eor r5, r5, r1, lsr #8
+#endif
+ /* 50% DTB growth should be good enough */
+ add r5, r5, r5, lsr #1
+ /* preserve 64-bit alignment */
+ add r5, r5, #7
+ bic r5, r5, #7
+ /* clamp to 32KB min and 1MB max */
+ cmp r5, #(1 << 15)
+ movlo r5, #(1 << 15)
+ cmp r5, #(1 << 20)
+ movhi r5, #(1 << 20)
+ /* temporarily relocate the stack past the DTB work space */
+ add sp, sp, r5
+
stmfd sp!, {r0-r3, ip, lr}
mov r0, r8
mov r1, r6
- sub r2, sp, r6
+ mov r2, r5
bl atags_to_fdt
/*
@@ -285,11 +306,11 @@ restart: adr r0, LC0
bic r0, r0, #1
add r0, r0, #0x100
mov r1, r6
- sub r2, sp, r6
+ mov r2, r5
bleq atags_to_fdt
ldmfd sp!, {r0-r3, ip, lr}
- sub sp, sp, #0x10000
+ sub sp, sp, r5
#endif
mov r8, r6 @ use the appended device tree
@@ -306,7 +327,7 @@ restart: adr r0, LC0
subs r1, r5, r1
addhi r9, r9, r1
- /* Get the dtb's size */
+ /* Get the current DTB size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert r5 (dtb size) to little endian */
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 91bd5bd62857..19cb6fcecf89 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -410,7 +410,6 @@ dtb-$(CONFIG_ARCH_SHMOBILE_LEGACY) += \
r8a7778-bockw.dtb \
r8a7778-bockw-reference.dtb \
r8a7779-marzen.dtb \
- r8a7790-lager.dtb \
sh7372-mackerel.dtb \
sh73a0-kzm9g.dtb \
sh73a0-kzm9g-reference.dtb
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 54f118c08db8..66342515df20 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -648,6 +648,7 @@
ti,x-plate-resistance = <200>;
ti,coordinate-readouts = <5>;
ti,wire-config = <0x00 0x11 0x22 0x33>;
+ ti,charge-delay = <0x400>;
};
adc {
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index 5a452fdd7c5d..c90724bded10 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -31,6 +31,7 @@
status = "disabled";
reg = <0x5c000000 0x30000>;
interrupts = <67 68 69 70>;
+ syscon = <&omap3_scm_general>;
ti,davinci-ctrl-reg-offset = <0x10000>;
ti,davinci-ctrl-mod-reg-offset = <0>;
ti,davinci-ctrl-ram-offset = <0x20000>;
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
index 4e24932c6e30..1c83b7ce0982 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
@@ -87,6 +87,7 @@
isl12057: isl12057@68 {
compatible = "isil,isl12057";
reg = <0x68>;
+ isil,irq2-can-wakeup-machine;
};
g762: g762@3e {
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn104.dts b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
index 30586e47986a..5fbfe02964dc 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn104.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
@@ -93,6 +93,7 @@
isl12057: isl12057@68 {
compatible = "isil,isl12057";
reg = <0x68>;
+ isil,irq2-can-wakeup-machine;
};
g762: g762@3e {
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 74391dace9e7..2a9f4caac643 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -381,6 +381,13 @@
clocks = <&gateclk 4>;
};
+ rtc@a3800 {
+ compatible = "marvell,armada-380-rtc";
+ reg = <0xa3800 0x20>, <0x184a0 0x0c>;
+ reg-names = "rtc", "rtc-soc";
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
sata@a8000 {
compatible = "marvell,armada-380-ahci";
reg = <0xa8000 0x2000>;
diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
index d81430aa4ab3..fc8bdfcd2348 100644
--- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
+++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
@@ -100,6 +100,7 @@
isl12057: isl12057@68 {
compatible = "isil,isl12057";
reg = <0x68>;
+ isil,irq2-can-wakeup-machine;
};
/* Controller for rear fan #1 of 3 (Protechnic
diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
index 28e7e2060c33..a98ac1bd8f65 100644
--- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
+++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
@@ -65,6 +65,8 @@
};
&sdhci2 {
+ broken-cd;
+ bus-width = <8>;
non-removable;
status = "okay";
};
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 35253c947a7c..e2f61f27944e 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -83,7 +83,8 @@
compatible = "mrvl,pxav3-mmc";
reg = <0xab1000 0x200>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&chip CLKID_SDIO1XIN>;
+ clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>;
+ clock-names = "io", "core";
status = "disabled";
};
@@ -348,36 +349,6 @@
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
};
-
- gpio4: gpio@5000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0x5000 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- porte: gpio-port@4 {
- compatible = "snps,dw-apb-gpio-port";
- gpio-controller;
- #gpio-cells = <2>;
- snps,nr-gpios = <32>;
- reg = <0>;
- };
- };
-
- gpio5: gpio@c000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0xc000 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- portf: gpio-port@5 {
- compatible = "snps,dw-apb-gpio-port";
- gpio-controller;
- #gpio-cells = <2>;
- snps,nr-gpios = <32>;
- reg = <0>;
- };
- };
};
chip: chip-control@ea0000 {
@@ -466,6 +437,21 @@
ranges = <0 0xfc0000 0x10000>;
interrupt-parent = <&sic>;
+ sm_gpio1: gpio@5000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x5000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ portf: gpio-port@5 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ };
+ };
+
i2c2: i2c@7000 {
compatible = "snps,designware-i2c";
#address-cells = <1>;
@@ -516,6 +502,21 @@
status = "disabled";
};
+ sm_gpio0: gpio@c000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0xc000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ porte: gpio-port@4 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ };
+ };
+
sysctrl: pin-controller@d000 {
compatible = "marvell,berlin2q-system-ctrl";
reg = <0xd000 0x100>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 10b725c7bfc0..ad4118f7e1a6 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -499,23 +499,23 @@
};
partition@5 {
label = "QSPI.u-boot-spl-os";
- reg = <0x00140000 0x00010000>;
+ reg = <0x00140000 0x00080000>;
};
partition@6 {
label = "QSPI.u-boot-env";
- reg = <0x00150000 0x00010000>;
+ reg = <0x001c0000 0x00010000>;
};
partition@7 {
label = "QSPI.u-boot-env.backup1";
- reg = <0x00160000 0x0010000>;
+ reg = <0x001d0000 0x0010000>;
};
partition@8 {
label = "QSPI.kernel";
- reg = <0x00170000 0x0800000>;
+ reg = <0x001e0000 0x0800000>;
};
partition@9 {
label = "QSPI.file-system";
- reg = <0x00970000 0x01690000>;
+ reg = <0x009e0000 0x01620000>;
};
};
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 22771bc1643a..63f8b007bdc5 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1257,6 +1257,8 @@
tx-fifo-resize;
maximum-speed = "super-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
@@ -1278,6 +1280,8 @@
tx-fifo-resize;
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
@@ -1299,6 +1303,8 @@
tx-fifo-resize;
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index b8168f1f8139..cb6001085f1a 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -61,9 +61,12 @@
reg = <0x03830000 0x100>;
clocks = <&clock_audss EXYNOS_I2S_BUS>;
clock-names = "iis";
+ #clock-cells = <1>;
+ clock-output-names = "i2s_cdclk0";
dmas = <&pdma0 12>, <&pdma0 11>, <&pdma0 10>;
dma-names = "tx", "rx", "tx-sec";
samsung,idma-addr = <0x03000000>;
+ #sound-dai-cells = <1>;
status = "disabled";
};
@@ -368,22 +371,28 @@
};
i2s1: i2s@13960000 {
- compatible = "samsung,s5pv210-i2s";
+ compatible = "samsung,s3c6410-i2s";
reg = <0x13960000 0x100>;
clocks = <&clock CLK_I2S1>;
clock-names = "iis";
+ #clock-cells = <1>;
+ clock-output-names = "i2s_cdclk1";
dmas = <&pdma1 12>, <&pdma1 11>;
dma-names = "tx", "rx";
+ #sound-dai-cells = <1>;
status = "disabled";
};
i2s2: i2s@13970000 {
- compatible = "samsung,s5pv210-i2s";
+ compatible = "samsung,s3c6410-i2s";
reg = <0x13970000 0x100>;
clocks = <&clock CLK_I2S2>;
clock-names = "iis";
+ #clock-cells = <1>;
+ clock-output-names = "i2s_cdclk2";
dmas = <&pdma0 14>, <&pdma0 13>;
dma-names = "tx", "rx";
+ #sound-dai-cells = <1>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index f767c425d0b5..b81146141402 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -31,6 +31,7 @@
chosen {
bootargs ="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC2,115200 init=/linuxrc";
+ stdout-path = &serial_2;
};
regulators {
diff --git a/arch/arm/boot/dts/exynos4210-smdkv310.dts b/arch/arm/boot/dts/exynos4210-smdkv310.dts
index 676e6e0c8cf3..86216fff1b4f 100644
--- a/arch/arm/boot/dts/exynos4210-smdkv310.dts
+++ b/arch/arm/boot/dts/exynos4210-smdkv310.dts
@@ -27,6 +27,7 @@
chosen {
bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc";
+ stdout-path = &serial_1;
};
sdhci@12530000 {
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 720836205546..a406df3d6df8 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -28,6 +28,7 @@
chosen {
bootargs = "console=ttySAC2,115200N8 root=/dev/mmcblk0p5 rootwait earlyprintk panic=5";
+ stdout-path = &serial_2;
};
regulators {
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index aaf0cae4f5e8..6effb13f98a6 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -26,6 +26,7 @@
chosen {
bootargs = "console=ttySAC2,115200N8 root=/dev/mmcblk0p5 rw rootwait earlyprintk panic=5 maxcpus=1";
+ stdout-path = &serial_2;
};
sysram@02020000 {
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index bcc9e63c8070..8e45ea44317e 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -81,6 +81,15 @@
reg = <0x10023CA0 0x20>;
};
+ l2c: l2-cache-controller@10502000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x10502000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ arm,tag-latency = <2 2 1>;
+ arm,data-latency = <2 2 1>;
+ };
+
gic: interrupt-controller@10490000 {
cpu-offset = <0x8000>;
};
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 3fbf588682b9..2c43d1859308 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -7,10 +7,15 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/sound/samsung-i2s.h>
#include <dt-bindings/input/input.h>
#include "exynos4412.dtsi"
/ {
+ chosen {
+ stdout-path = &serial_1;
+ };
+
firmware@0204F000 {
compatible = "samsung,secure-firmware";
reg = <0x0204F000 0x1000>;
@@ -37,14 +42,13 @@
pinctrl-names = "default";
status = "okay";
clocks = <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_DOUT_AUD_BUS>;
- clock-names = "iis", "i2s_opclk0";
+ <&clock_audss EXYNOS_DOUT_AUD_BUS>,
+ <&clock_audss EXYNOS_SCLK_I2S>;
+ clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
};
sound: sound {
- compatible = "samsung,odroidx2-audio";
- samsung,i2s-controller = <&i2s0>;
- samsung,audio-codec = <&max98090>;
+ compatible = "simple-audio-card";
assigned-clocks = <&clock_audss EXYNOS_MOUT_AUDSS>,
<&clock_audss EXYNOS_MOUT_I2S>,
<&clock_audss EXYNOS_DOUT_SRP>,
@@ -55,6 +59,20 @@
<0>,
<192000000>,
<19200000>;
+
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&link0_codec>;
+ simple-audio-card,frame-master = <&link0_codec>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0 0>;
+ system-clock-frequency = <19200000>;
+ };
+
+ link0_codec: simple-audio-card,codec {
+ sound-dai = <&max98090>;
+ clocks = <&i2s0 CLK_I2S_CDCLK>;
+ };
};
mmc@12550000 {
@@ -373,6 +391,9 @@
reg = <0x10>;
interrupt-parent = <&gpx0>;
interrupts = <0 0>;
+ clocks = <&i2s0 CLK_I2S_CDCLK>;
+ clock-names = "mclk";
+ #sound-dai-cells = <0>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-odroidu3.dts b/arch/arm/boot/dts/exynos4412-odroidu3.dts
index c8a64be55d07..44684e57ead1 100644
--- a/arch/arm/boot/dts/exynos4412-odroidu3.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidu3.dts
@@ -49,9 +49,11 @@
};
&sound {
- compatible = "samsung,odroidu3-audio";
- samsung,model = "Odroid-U3";
- samsung,audio-routing =
+ simple-audio-card,name = "Odroid-U3";
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack",
+ "Speakers", "Speakers";
+ simple-audio-card,routing =
"Headphone Jack", "HPL",
"Headphone Jack", "HPR",
"Headphone Jack", "MICBIAS",
diff --git a/arch/arm/boot/dts/exynos4412-odroidx2.dts b/arch/arm/boot/dts/exynos4412-odroidx2.dts
index 96b43f4497cc..6e33678562ae 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx2.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx2.dts
@@ -23,8 +23,12 @@
};
&sound {
- samsung,model = "Odroid-X2";
- samsung,audio-routing =
+ simple-audio-card,name = "Odroid-X2";
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack",
+ "Microphone", "Mic Jack",
+ "Microphone", "DMIC";
+ simple-audio-card,routing =
"Headphone Jack", "HPL",
"Headphone Jack", "HPR",
"IN1", "Mic Jack",
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index de15114fd07c..bd8b73077d41 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -26,6 +26,7 @@
chosen {
bootargs ="console=ttySAC2,115200";
+ stdout-path = &serial_2;
};
firmware@0203F000 {
diff --git a/arch/arm/boot/dts/exynos4412-smdk4412.dts b/arch/arm/boot/dts/exynos4412-smdk4412.dts
index ded0b70f7644..b9256afbcc68 100644
--- a/arch/arm/boot/dts/exynos4412-smdk4412.dts
+++ b/arch/arm/boot/dts/exynos4412-smdk4412.dts
@@ -25,6 +25,7 @@
chosen {
bootargs ="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc";
+ stdout-path = &serial_1;
};
g2d@10800000 {
diff --git a/arch/arm/boot/dts/exynos4412-tiny4412.dts b/arch/arm/boot/dts/exynos4412-tiny4412.dts
index ea6929d9c621..d46fd4c2aeaa 100644
--- a/arch/arm/boot/dts/exynos4412-tiny4412.dts
+++ b/arch/arm/boot/dts/exynos4412-tiny4412.dts
@@ -18,6 +18,10 @@
model = "FriendlyARM TINY4412 board based on Exynos4412";
compatible = "friendlyarm,tiny4412", "samsung,exynos4412", "samsung,exynos4";
+ chosen {
+ stdout-path = &serial_0;
+ };
+
memory {
reg = <0x40000000 0x40000000>;
};
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 29231b452643..5fbb01335a0f 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -32,6 +32,7 @@
chosen {
bootargs = "console=ttySAC2,115200N8 root=/dev/mmcblk0p5 rootwait earlyprintk panic=5";
+ stdout-path = &serial_2;
};
firmware@0204F000 {
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index 93b70402e943..8bc97c415c9a 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -54,6 +54,20 @@
reg = <0x10023CA0 0x20>;
};
+ l2c: l2-cache-controller@10502000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x10502000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ arm,tag-latency = <2 2 1>;
+ arm,data-latency = <3 2 1>;
+ arm,double-linefill = <1>;
+ arm,double-linefill-incr = <0>;
+ arm,double-linefill-wrap = <1>;
+ arm,prefetch-drop = <1>;
+ arm,prefetch-offset = <7>;
+ };
+
clock: clock-controller@10030000 {
compatible = "samsung,exynos4412-clock";
reg = <0x10030000 0x20000>;
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 0a229fcd7acf..d75c89d7666a 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -736,7 +736,7 @@
dp_phy: video-phy@10040720 {
compatible = "samsung,exynos5250-dp-video-phy";
- reg = <0x10040720 4>;
+ samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index aa7a7d727a7e..db2c1c4cd900 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -372,3 +372,7 @@
&usbdrd_dwc3_1 {
dr_mode = "host";
};
+
+&cci {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 517e50f6760b..6d38f8bfd0e6 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -120,7 +120,7 @@
};
};
- cci@10d20000 {
+ cci: cci@10d20000 {
compatible = "arm,cci-400";
#address-cells = <1>;
#size-cells = <1>;
@@ -503,8 +503,8 @@
};
dp_phy: video-phy@10040728 {
- compatible = "samsung,exynos5250-dp-video-phy";
- reg = <0x10040728 4>;
+ compatible = "samsung,exynos5420-dp-video-phy";
+ samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 58d3c3cf2923..e4d3aecc4ed2 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -162,7 +162,7 @@
#size-cells = <0>;
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x43fa4000 0x4000>;
- clocks = <&clks 62>, <&clks 62>;
+ clocks = <&clks 78>, <&clks 78>;
clock-names = "ipg", "per";
interrupts = <14>;
status = "disabled";
@@ -369,7 +369,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fa0000 0x4000>;
- clocks = <&clks 106>, <&clks 36>;
+ clocks = <&clks 106>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <36>;
};
@@ -388,7 +388,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fa8000 0x4000>;
- clocks = <&clks 107>, <&clks 36>;
+ clocks = <&clks 107>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <41>;
};
@@ -429,7 +429,7 @@
pwm4: pwm@53fc8000 {
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
reg = <0x53fc8000 0x4000>;
- clocks = <&clks 108>, <&clks 36>;
+ clocks = <&clks 108>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <42>;
};
@@ -476,7 +476,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fe0000 0x4000>;
- clocks = <&clks 105>, <&clks 36>;
+ clocks = <&clks 105>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <26>;
};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 56569cecaa78..649befeb2cf9 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -127,24 +127,12 @@
#address-cells = <1>;
#size-cells = <0>;
- reg_usbh1_vbus: regulator@0 {
- compatible = "regulator-fixed";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbh1reg>;
- reg = <0>;
- regulator-name = "usbh1_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- reg_usbotg_vbus: regulator@1 {
+ reg_hub_reset: regulator@0 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotgreg>;
- reg = <1>;
- regulator-name = "usbotg_vbus";
+ reg = <0>;
+ regulator-name = "hub_reset";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
@@ -176,6 +164,7 @@
reg = <0>;
clocks = <&clks IMX5_CLK_DUMMY>;
clock-names = "main_clk";
+ reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
};
};
};
@@ -419,7 +408,7 @@
&usbh1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbh1>;
- vbus-supply = <&reg_usbh1_vbus>;
+ vbus-supply = <&reg_hub_reset>;
fsl,usbphy = <&usbh1phy>;
phy_type = "ulpi";
status = "okay";
@@ -429,7 +418,6 @@
dr_mode = "otg";
disable-over-current;
phy_type = "utmi_wide";
- vbus-supply = <&reg_usbotg_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 4fc03b7f1cee..2109d0763c1b 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -335,8 +335,8 @@
vpu: vpu@02040000 {
compatible = "cnm,coda960";
reg = <0x02040000 0x3c000>;
- interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>,
- <0 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 3 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "bit", "jpeg";
clocks = <&clks IMX6QDL_CLK_VPU_AXI>,
<&clks IMX6QDL_CLK_MMDC_CH0_AXI>,
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 1e6e5cc1c14c..c108bb451337 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -159,13 +159,28 @@
pinctrl-0 = <&pinctrl_enet1>;
phy-supply = <&reg_enet_3v3>;
phy-mode = "rgmii";
+ phy-handle = <&ethphy1>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ };
};
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
+ phy-handle = <&ethphy2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 657da14cb4b5..c70bb27ac65a 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -142,6 +142,7 @@
scfg: scfg@1570000 {
compatible = "fsl,ls1021a-scfg", "syscon";
reg = <0x0 0x1570000 0x0 0x10000>;
+ big-endian;
};
clockgen: clocking@1ee1000 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 53f3ca064140..b550c41b46f1 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -700,11 +700,9 @@
};
};
+ /* Ethernet is on some early development boards and qemu */
ethernet@gpmc {
compatible = "smsc,lan91c94";
-
- status = "disabled";
-
interrupt-parent = <&gpio2>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */
reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 636d53bb87a2..4118030f366d 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -47,12 +47,12 @@
compatible = "renesas,lager", "renesas,r8a7790";
aliases {
- serial6 = &scifa0;
- serial7 = &scifa1;
+ serial0 = &scifa0;
+ serial1 = &scifa1;
};
chosen {
- bootargs = "console=ttySC6,115200 ignore_loglevel rw root=/dev/nfs ip=dhcp";
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
stdout-path = &scifa0;
};
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 990af167c551..bf58c79a6554 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -48,8 +48,8 @@
compatible = "renesas,koelsch", "renesas,r8a7791";
aliases {
- serial6 = &scif0;
- serial7 = &scif1;
+ serial0 = &scif0;
+ serial1 = &scif1;
};
chosen {
diff --git a/arch/arm/boot/dts/rk3288-evb-rk808.dts b/arch/arm/boot/dts/rk3288-evb-rk808.dts
index d8c775e6d5fe..e1d3eeb8f094 100644
--- a/arch/arm/boot/dts/rk3288-evb-rk808.dts
+++ b/arch/arm/boot/dts/rk3288-evb-rk808.dts
@@ -15,6 +15,13 @@
/ {
compatible = "rockchip,rk3288-evb-rk808", "rockchip,rk3288";
+
+ ext_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "ext_gmac";
+ #clock-cells = <0>;
+ };
};
&cpu0 {
@@ -152,3 +159,19 @@
};
};
};
+
+&gmac {
+ phy-supply = <&vcc_phy>;
+ phy-mode = "rgmii";
+ clock_in_out = "input";
+ snps,reset-gpio = <&gpio4 7 0>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 3e067dd65d0c..1c08eb0ecdb9 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -90,6 +90,19 @@
regulator-always-on;
regulator-boot-on;
};
+
+ vcc_phy: vcc-phy-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth_phy_pwr>;
+ regulator-name = "vcc_phy";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
&emmc {
@@ -155,6 +168,15 @@
};
&pinctrl {
+ pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
backlight {
bl_en: bl-en {
rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -173,11 +195,38 @@
};
};
+ sdmmc {
+ /*
+ * Default drive strength isn't enough to achieve even
+ * high-speed mode on EVB board so bump up to 8ma.
+ */
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+ };
+
usb {
host_vbus_drv: host-vbus-drv {
rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ eth_phy {
+ eth_phy_pwr: eth-phy-pwr {
+ rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
&usb_host0_ehci {
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index fd19f00784bd..910dcad2088a 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -380,6 +380,22 @@
status = "disabled";
};
+ gmac: ethernet@ff290000 {
+ compatible = "rockchip,rk3288-gmac";
+ reg = <0xff290000 0x10000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ rockchip,grf = <&grf>;
+ clocks = <&cru SCLK_MAC>,
+ <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
+ <&cru SCLK_MACREF>, <&cru SCLK_MACREF_OUT>,
+ <&cru ACLK_GMAC>, <&cru PCLK_GMAC>;
+ clock-names = "stmmaceth",
+ "mac_clk_rx", "mac_clk_tx",
+ "clk_mac_ref", "clk_mac_refout",
+ "aclk_mac", "pclk_mac";
+ };
+
usb_host0_ehci: usb@ff500000 {
compatible = "generic-ehci";
reg = <0xff500000 0x100>;
@@ -725,6 +741,11 @@
bias-disable;
};
+ pcfg_pull_none_12ma: pcfg-pull-none-12ma {
+ bias-disable;
+ drive-strength = <12>;
+ };
+
i2c0 {
i2c0_xfer: i2c0-xfer {
rockchip,pins = <0 15 RK_FUNC_1 &pcfg_pull_none>,
@@ -1068,5 +1089,38 @@
rockchip,pins = <7 23 3 &pcfg_pull_none>;
};
};
+
+ gmac {
+ rgmii_pins: rgmii-pins {
+ rockchip,pins = <3 30 3 &pcfg_pull_none>,
+ <3 31 3 &pcfg_pull_none>,
+ <3 26 3 &pcfg_pull_none>,
+ <3 27 3 &pcfg_pull_none>,
+ <3 28 3 &pcfg_pull_none_12ma>,
+ <3 29 3 &pcfg_pull_none_12ma>,
+ <3 24 3 &pcfg_pull_none_12ma>,
+ <3 25 3 &pcfg_pull_none_12ma>,
+ <4 0 3 &pcfg_pull_none>,
+ <4 5 3 &pcfg_pull_none>,
+ <4 6 3 &pcfg_pull_none>,
+ <4 9 3 &pcfg_pull_none_12ma>,
+ <4 4 3 &pcfg_pull_none_12ma>,
+ <4 1 3 &pcfg_pull_none>,
+ <4 3 3 &pcfg_pull_none>;
+ };
+
+ rmii_pins: rmii-pins {
+ rockchip,pins = <3 30 3 &pcfg_pull_none>,
+ <3 31 3 &pcfg_pull_none>,
+ <3 28 3 &pcfg_pull_none>,
+ <3 29 3 &pcfg_pull_none>,
+ <4 0 3 &pcfg_pull_none>,
+ <4 5 3 &pcfg_pull_none>,
+ <4 4 3 &pcfg_pull_none>,
+ <4 1 3 &pcfg_pull_none>,
+ <4 2 3 &pcfg_pull_none>,
+ <4 3 3 &pcfg_pull_none>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index a8c00ee7522a..3d0b8755caee 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -25,11 +25,11 @@
stmpe2401_1 {
stmpe2401_1_nhk_mode: stmpe2401_1_nhk {
nhk_cfg1 {
- ste,pins = "GPIO76_B20"; // IRQ line
+ pins = "GPIO76_B20"; // IRQ line
ste,input = <0>;
};
nhk_cfg2 {
- ste,pins = "GPIO77_B8"; // reset line
+ pins = "GPIO77_B8"; // reset line
ste,output = <1>;
};
};
@@ -37,11 +37,11 @@
stmpe2401_2 {
stmpe2401_2_nhk_mode: stmpe2401_2_nhk {
nhk_cfg1 {
- ste,pins = "GPIO78_A8"; // IRQ line
+ pins = "GPIO78_A8"; // IRQ line
ste,input = <0>;
};
nhk_cfg2 {
- ste,pins = "GPIO79_C9"; // reset line
+ pins = "GPIO79_C9"; // reset line
ste,output = <1>;
};
};
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 3e31d32133b8..d4a8f843cdc8 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -274,5 +274,14 @@
status = "disabled";
};
+
+ usb2_picophy0: phy1 {
+ compatible = "st,stih407-usb2-phy";
+ #phy-cells = <0>;
+ st,syscfg = <&syscfg_core 0x100 0xf4>;
+ resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+ <&picophyreset STIH407_PICOPHY0_RESET>;
+ reset-names = "global", "port";
+ };
};
};
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index c05627eb717d..37995f4739d2 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -10,5 +10,75 @@
#include "stih407-family.dtsi"
#include "stih410-pinctrl.dtsi"
/ {
+ soc {
+ usb2_picophy1: phy2 {
+ compatible = "st,stih407-usb2-phy";
+ #phy-cells = <0>;
+ st,syscfg = <&syscfg_core 0xf8 0xf4>;
+ resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+ <&picophyreset STIH407_PICOPHY0_RESET>;
+ reset-names = "global", "port";
+ };
+ usb2_picophy2: phy3 {
+ compatible = "st,stih407-usb2-phy";
+ #phy-cells = <0>;
+ st,syscfg = <&syscfg_core 0xfc 0xf4>;
+ resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+ <&picophyreset STIH407_PICOPHY1_RESET>;
+ reset-names = "global", "port";
+ };
+
+ ohci0: usb@9a03c00 {
+ compatible = "st,st-ohci-300x";
+ reg = <0x9a03c00 0x100>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
+ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+ resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+ <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+ reset-names = "power", "softreset";
+ phys = <&usb2_picophy1>;
+ phy-names = "usb";
+ };
+
+ ehci0: usb@9a03e00 {
+ compatible = "st,st-ehci-300x";
+ reg = <0x9a03e00 0x100>;
+ interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0>;
+ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+ resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+ <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+ reset-names = "power", "softreset";
+ phys = <&usb2_picophy1>;
+ phy-names = "usb";
+ };
+
+ ohci1: usb@9a83c00 {
+ compatible = "st,st-ohci-300x";
+ reg = <0x9a83c00 0x100>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
+ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+ resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+ <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+ reset-names = "power", "softreset";
+ phys = <&usb2_picophy2>;
+ phy-names = "usb";
+ };
+
+ ehci1: usb@9a83e00 {
+ compatible = "st,st-ehci-300x";
+ reg = <0x9a83e00 0x100>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1>;
+ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+ resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+ <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+ reset-names = "power", "softreset";
+ phys = <&usb2_picophy2>;
+ phy-names = "usb";
+ };
+ };
};
diff --git a/arch/arm/boot/dts/stih415.dtsi b/arch/arm/boot/dts/stih415.dtsi
index 9198c12765ea..19b019b5f30e 100644
--- a/arch/arm/boot/dts/stih415.dtsi
+++ b/arch/arm/boot/dts/stih415.dtsi
@@ -153,8 +153,8 @@
compatible = "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
status = "disabled";
- reg = <0xfe810000 0x8000>, <0x148 0x4>;
- reg-names = "stmmaceth", "sti-ethconf";
+ reg = <0xfe810000 0x8000>;
+ reg-names = "stmmaceth";
interrupts = <0 147 0>, <0 148 0>, <0 149 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
@@ -165,7 +165,7 @@
snps,mixed-burst;
snps,force_sf_dma_mode;
- st,syscon = <&syscfg_rear>;
+ st,syscon = <&syscfg_rear 0x148>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mii0>;
@@ -177,8 +177,8 @@
device_type = "network";
compatible = "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
status = "disabled";
- reg = <0xfef08000 0x8000>, <0x74 0x4>;
- reg-names = "stmmaceth", "sti-ethconf";
+ reg = <0xfef08000 0x8000>;
+ reg-names = "stmmaceth";
interrupts = <0 150 0>, <0 151 0>, <0 152 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
@@ -186,7 +186,7 @@
snps,mixed-burst;
snps,force_sf_dma_mode;
- st,syscon = <&syscfg_sbc>;
+ st,syscon = <&syscfg_sbc 0x74>;
resets = <&softreset STIH415_ETH1_SOFTRESET>;
reset-names = "stmmaceth";
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index fad9073ddeed..ea28ebadab1a 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -163,8 +163,8 @@
device_type = "network";
compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
status = "disabled";
- reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
- reg-names = "stmmaceth", "sti-ethconf";
+ reg = <0xfe810000 0x8000>;
+ reg-names = "stmmaceth";
interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
@@ -172,7 +172,7 @@
snps,pbl = <32>;
snps,mixed-burst;
- st,syscon = <&syscfg_rear>;
+ st,syscon = <&syscfg_rear 0x8bc>;
resets = <&softreset STIH416_ETH0_SOFTRESET>;
reset-names = "stmmaceth";
pinctrl-names = "default";
@@ -185,15 +185,15 @@
device_type = "network";
compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
status = "disabled";
- reg = <0xfef08000 0x8000>, <0x7f0 0x4>;
- reg-names = "stmmaceth", "sti-ethconf";
+ reg = <0xfef08000 0x8000>;
+ reg-names = "stmmaceth";
interrupts = <0 136 0>, <0 137 0>, <0 138 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
snps,pbl = <32>;
snps,mixed-burst;
- st,syscon = <&syscfg_sbc>;
+ st,syscon = <&syscfg_sbc 0x7f0>;
resets = <&softreset STIH416_ETH1_SOFTRESET>;
reset-names = "stmmaceth";
@@ -283,21 +283,21 @@
miphy365x_phy: phy@fe382000 {
compatible = "st,miphy365x-phy";
- st,syscfg = <&syscfg_rear>;
+ st,syscfg = <&syscfg_rear 0x824 0x828>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
phy_port0: port@fe382000 {
#phy-cells = <1>;
- reg = <0xfe382000 0x100>, <0xfe394000 0x100>, <0x824 0x4>;
- reg-names = "sata", "pcie", "syscfg";
+ reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
+ reg-names = "sata", "pcie";
};
phy_port1: port@fe38a000 {
#phy-cells = <1>;
- reg = <0xfe38a000 0x100>, <0xfe804000 0x100>, <0x828 0x4>;
- reg-names = "sata", "pcie", "syscfg";
+ reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;
+ reg-names = "sata", "pcie";
};
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 7b4099fcf817..d5c4669224b1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -17,14 +17,6 @@
aliases {
ethernet0 = &emac;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &uart6;
- serial7 = &uart7;
};
chosen {
@@ -39,6 +31,14 @@
<&ahb_gates 44>;
status = "disabled";
};
+
+ framebuffer@1 {
+ compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
+ allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
+ clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
+ <&ahb_gates 44>, <&ahb_gates 46>;
+ status = "disabled";
+ };
};
cpus {
@@ -438,8 +438,8 @@
reg-names = "phy_ctrl", "pmu1", "pmu2";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
- resets = <&usb_clk 1>, <&usb_clk 2>;
- reset-names = "usb1_reset", "usb2_reset";
+ resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
+ reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index fe3c559ca6a8..bfa742817690 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -55,6 +55,12 @@
model = "Olimex A10s-Olinuxino Micro";
compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ };
+
soc@01c00000 {
emac: ethernet@01c0b000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 1b76667f3182..2e7d8263799d 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -18,10 +18,6 @@
aliases {
ethernet0 = &emac;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
};
chosen {
@@ -390,8 +386,8 @@
reg-names = "phy_ctrl", "pmu1";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
- resets = <&usb_clk 1>;
- reset-names = "usb1_reset";
+ resets = <&usb_clk 0>, <&usb_clk 1>;
+ reset-names = "usb0_reset", "usb1_reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
index eeed1f236ee8..c7be3abd9fcc 100644
--- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
+++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
@@ -53,6 +53,10 @@
model = "HSG H702";
compatible = "hsg,h702", "allwinner,sun5i-a13";
+ aliases {
+ serial0 = &uart1;
+ };
+
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
index 916ee8bb826f..3decefb3c37a 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
@@ -54,6 +54,10 @@
model = "Olimex A13-Olinuxino Micro";
compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
+ aliases {
+ serial0 = &uart1;
+ };
+
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index e31d291d14cb..b421f7fa197b 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -55,6 +55,10 @@
model = "Olimex A13-Olinuxino";
compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
+ aliases {
+ serial0 = &uart1;
+ };
+
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index c35217ea1f64..c556688f8b8b 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -16,11 +16,6 @@
/ {
interrupt-parent = <&intc>;
- aliases {
- serial0 = &uart1;
- serial1 = &uart3;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -349,8 +344,8 @@
reg-names = "phy_ctrl", "pmu1";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
- resets = <&usb_clk 1>;
- reset-names = "usb1_reset";
+ resets = <&usb_clk 0>, <&usb_clk 1>;
+ reset-names = "usb0_reset", "usb1_reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index f47156b6572b..1e7e7bcf8307 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -53,12 +53,6 @@
interrupt-parent = <&gic>;
aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
ethernet0 = &gmac;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index 1cf1214cc068..bd7b15add697 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -55,6 +55,12 @@
model = "LeMaker Banana Pi";
compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart3;
+ serial2 = &uart7;
+ };
+
soc@01c00000 {
spi0: spi@01c05000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
index 0e4bfa3b2b85..0bcefcbbb756 100644
--- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
+++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
@@ -19,6 +19,14 @@
model = "Merrii A20 Hummingbird";
compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ };
+
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 9d669cdf031d..66cc77707198 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -20,6 +20,9 @@
compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
aliases {
+ serial0 = &uart0;
+ serial1 = &uart6;
+ serial2 = &uart7;
spi0 = &spi1;
spi1 = &spi2;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index e21ce5992d56..89749ce34a84 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -54,14 +54,6 @@
aliases {
ethernet0 = &gmac;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &uart6;
- serial7 = &uart7;
};
chosen {
diff --git a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
index 7f2117ce6985..32ad80804dbb 100644
--- a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
+++ b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
@@ -55,6 +55,10 @@
model = "Ippo Q8H Dual Core Tablet (v5)";
compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
+ aliases {
+ serial0 = &r_uart;
+ };
+
chosen {
bootargs = "earlyprintk console=ttyS0,115200";
};
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index 0746cd1024d7..86584fcf5e32 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -52,15 +52,6 @@
/ {
interrupt-parent = <&gic>;
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &r_uart;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index 506948f582ee..11ec71072e81 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -54,6 +54,11 @@
model = "Merrii A80 Optimus Board";
compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart4;
+ };
+
chosen {
bootargs = "earlyprintk console=ttyS0,115200";
};
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index 494714f67b57..9ef4438206a9 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -52,16 +52,6 @@
/ {
interrupt-parent = <&gic>;
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &r_uart;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index ea282c7c0ca5..e2fed2712249 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -406,7 +406,7 @@
clock-frequency = <400000>;
magnetometer@c {
- compatible = "ak,ak8975";
+ compatible = "asahi-kasei,ak8975";
reg = <0xc>;
interrupt-parent = <&gpio>;
interrupts = <TEGRA_GPIO(N, 5) IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 27d0d9c8adf3..01f40197ea13 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -252,6 +252,11 @@
#size-cells = <1>;
ranges = <0 0x10000000 0x10000>;
+ sysreg@0 {
+ compatible = "arm,versatile-sysreg", "syscon";
+ reg = <0x00000 0x1000>;
+ };
+
aaci@4000 {
compatible = "arm,primecell";
reg = <0x4000 0x1000>;
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index e36c1e82fea7..b83137f66034 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -29,6 +29,43 @@
clock-names = "apb_pclk";
};
+ pci-controller@10001000 {
+ compatible = "arm,versatile-pci";
+ device_type = "pci";
+ reg = <0x10001000 0x1000
+ 0x41000000 0x10000
+ 0x42000000 0x100000>;
+ bus-range = <0 0xff>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000 /* downstream I/O */
+ 0x02000000 0 0x50000000 0x50000000 0 0x10000000 /* non-prefetchable memory */
+ 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
+
+ interrupt-map-mask = <0x1800 0 0 7>;
+ interrupt-map = <0x1800 0 0 1 &sic 28
+ 0x1800 0 0 2 &sic 29
+ 0x1800 0 0 3 &sic 30
+ 0x1800 0 0 4 &sic 27
+
+ 0x1000 0 0 1 &sic 27
+ 0x1000 0 0 2 &sic 28
+ 0x1000 0 0 3 &sic 29
+ 0x1000 0 0 4 &sic 30
+
+ 0x0800 0 0 1 &sic 30
+ 0x0800 0 0 2 &sic 27
+ 0x0800 0 0 3 &sic 28
+ 0x0800 0 0 4 &sic 29
+
+ 0x0000 0 0 1 &sic 29
+ 0x0000 0 0 2 &sic 30
+ 0x0000 0 0 3 &sic 27
+ 0x0000 0 0 4 &sic 28>;
+ };
+
fpga {
uart@9000 {
compatible = "arm,pl011", "arm,primecell";
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts
index a0f762159cb2..f2b64b1b00fa 100644
--- a/arch/arm/boot/dts/vf610-twr.dts
+++ b/arch/arm/boot/dts/vf610-twr.dts
@@ -129,13 +129,28 @@
&fec0 {
phy-mode = "rmii";
+ phy-handle = <&ethphy0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec0>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
};
&fec1 {
phy-mode = "rmii";
+ phy-handle = <&ethphy1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
status = "okay";
diff --git a/arch/arm/boot/dts/zynq-parallella.dts b/arch/arm/boot/dts/zynq-parallella.dts
index ab1dc0a56cdd..174571232ea5 100644
--- a/arch/arm/boot/dts/zynq-parallella.dts
+++ b/arch/arm/boot/dts/zynq-parallella.dts
@@ -58,7 +58,7 @@
status = "okay";
isl9305: isl9305@68 {
- compatible = "isl,isl9305";
+ compatible = "isil,isl9305";
reg = <0x68>;
regulators {
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 5ef14de00a29..3d0c5d65c741 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -84,7 +84,8 @@ CONFIG_DEBUG_GPIO=y
CONFIG_POWER_SUPPLY=y
CONFIG_BATTERY_SBS=y
CONFIG_CHARGER_TPS65090=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=y
+CONFIG_SENSORS_LM90=y
CONFIG_THERMAL=y
CONFIG_EXYNOS_THERMAL=y
CONFIG_EXYNOS_THERMAL_CORE=y
@@ -109,11 +110,26 @@ CONFIG_REGULATOR_S2MPA01=y
CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
CONFIG_REGULATOR_TPS65090=y
+CONFIG_DRM=y
+CONFIG_DRM_BRIDGE=y
+CONFIG_DRM_PTN3460=y
+CONFIG_DRM_PS8622=y
+CONFIG_DRM_EXYNOS=y
+CONFIG_DRM_EXYNOS_FIMD=y
+CONFIG_DRM_EXYNOS_DP=y
+CONFIG_DRM_PANEL=y
+CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_SIMPLE=y
CONFIG_EXYNOS_VIDEO=y
CONFIG_EXYNOS_MIPI_DSI=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+CONFIG_BACKLIGHT_PWM=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_7x14=y
diff --git a/arch/arm/configs/iop32x_defconfig b/arch/arm/configs/iop32x_defconfig
index 4f2ec3ac138e..c3058da631da 100644
--- a/arch/arm/configs/iop32x_defconfig
+++ b/arch/arm/configs/iop32x_defconfig
@@ -106,6 +106,7 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_CRYPTO_NULL=y
diff --git a/arch/arm/configs/iop33x_defconfig b/arch/arm/configs/iop33x_defconfig
index aa36128abca2..713faeee8cf4 100644
--- a/arch/arm/configs/iop33x_defconfig
+++ b/arch/arm/configs/iop33x_defconfig
@@ -87,5 +87,6 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 1af665e847d1..24636cfdf6df 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -202,3 +202,4 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_ERRORS=y
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
diff --git a/arch/arm/configs/lager_defconfig b/arch/arm/configs/lager_defconfig
deleted file mode 100644
index a82afc916a89..000000000000
--- a/arch/arm/configs/lager_defconfig
+++ /dev/null
@@ -1,150 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_EMBEDDED=y
-CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_SHMOBILE_LEGACY=y
-CONFIG_ARCH_R8A7790=y
-CONFIG_MACH_LAGER=y
-# CONFIG_SH_TIMER_TMU is not set
-# CONFIG_EM_TIMER_STI is not set
-CONFIG_ARM_ERRATA_430973=y
-CONFIG_ARM_ERRATA_458693=y
-CONFIG_ARM_ERRATA_460075=y
-CONFIG_ARM_ERRATA_743622=y
-CONFIG_ARM_ERRATA_754322=y
-CONFIG_PCI=y
-CONFIG_PCI_RCAR_GEN2=y
-CONFIG_PCI_RCAR_GEN2_PCIE=y
-CONFIG_HAVE_ARM_ARCH_TIMER=y
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_ARM_APPENDED_DTB=y
-CONFIG_KEXEC=y
-CONFIG_AUTO_ZRELADDR=y
-CONFIG_VFP=y
-CONFIG_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_ATA=y
-CONFIG_SATA_RCAR=y
-CONFIG_NETDEVICES=y
-# CONFIG_NET_CORE is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CIRRUS is not set
-# CONFIG_NET_VENDOR_FARADAY is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-CONFIG_SH_ETH=y
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=10
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C_GPIO=y
-CONFIG_I2C_SH_MOBILE=y
-CONFIG_I2C_RCAR=y
-CONFIG_SPI=y
-CONFIG_SPI_RSPI=y
-CONFIG_SPI_SH_MSIOF=y
-CONFIG_GPIO_SH_PFC=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_RCAR=y
-# CONFIG_HWMON is not set
-CONFIG_THERMAL=y
-CONFIG_RCAR_THERMAL=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_REGULATOR_DA9210=y
-CONFIG_REGULATOR_GPIO=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_SOC_CAMERA=y
-CONFIG_SOC_CAMERA_PLATFORM=y
-CONFIG_VIDEO_RCAR_VIN=y
-# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
-CONFIG_VIDEO_ADV7180=y
-CONFIG_DRM=y
-CONFIG_DRM_RCAR_DU=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_RCAR=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_MMC=y
-CONFIG_MMC_SDHI=y
-CONFIG_MMC_SH_MMCIF=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_RTC_CLASS=y
-CONFIG_DMADEVICES=y
-CONFIG_SH_DMAE=y
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_ARM_UNWIND is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 9f56ca3985ae..c100b7df5441 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -204,6 +204,7 @@ CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
# CONFIG_ARM_UNWIND is not set
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index bc393b7e5ece..444685c44055 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -456,6 +456,7 @@ CONFIG_OMAP_USB2=y
CONFIG_TI_PIPE3=y
CONFIG_PHY_MIPHY365X=y
CONFIG_PHY_STIH41X_USB=y
+CONFIG_PHY_STIH407_USB=y
CONFIG_PHY_SUN4I_USB=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index 0dae1c1f007a..85d10d2e3d66 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -132,6 +132,7 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index 627accea72fb..2400b9f52403 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -112,6 +112,7 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_S35390A=y
CONFIG_RTC_DRV_MV=y
+CONFIG_RTC_DRV_ARMADA38X=y
CONFIG_DMADEVICES=y
CONFIG_MV_XOR=y
# CONFIG_IOMMU_SUPPORT is not set
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c2c3a852af9f..667d9d52aa01 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
CONFIG_CPU_IDLE=y
CONFIG_BINFMT_MISC=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 952430d9e2d9..855143fac6bd 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -156,6 +156,7 @@ CONFIG_LATENCYTOP=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig
index 00515ef9782d..89631795a915 100644
--- a/arch/arm/configs/rpc_defconfig
+++ b/arch/arm/configs/rpc_defconfig
@@ -131,3 +131,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 3df6ca0c1d1f..e4a6c5e9174d 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -17,7 +17,6 @@ CONFIG_ARCH_R8A7779=y
CONFIG_ARCH_R8A7790=y
CONFIG_ARCH_R8A7791=y
CONFIG_ARCH_R8A7794=y
-CONFIG_MACH_LAGER=y
CONFIG_MACH_MARZEN=y
# CONFIG_SWP_EMULATE is not set
CONFIG_CPU_BPREDICT_DISABLE=y
diff --git a/arch/arm/include/asm/bitrev.h b/arch/arm/include/asm/bitrev.h
new file mode 100644
index 000000000000..ec291c350ea3
--- /dev/null
+++ b/arch/arm/include/asm/bitrev.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_BITREV_H
+#define __ASM_BITREV_H
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ __asm__ ("rbit %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ return __arch_bitrev32((u32)x) >> 16;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ return __arch_bitrev32((u32)x) >> 24;
+}
+
+#endif
diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h
index 8155db2f7fa1..29fe85e59439 100644
--- a/arch/arm/include/asm/compiler.h
+++ b/arch/arm/include/asm/compiler.h
@@ -8,8 +8,21 @@
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
* (for details, see gcc PR 15089)
+ * For compatibility with clang, we have to specifically take the equivalence
+ * of 'r11' <-> 'fp' and 'r12' <-> 'ip' into account as well.
*/
-#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+#define __asmeq(x, y) \
+ ".ifnc " x "," y "; " \
+ ".ifnc " x y ",fpr11; " \
+ ".ifnc " x y ",r11fp; " \
+ ".ifnc " x y ",ipr12; " \
+ ".ifnc " x y ",r12ip; " \
+ ".err; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif\n\t"
#endif /* __ASM_ARM_COMPILER_H */
diff --git a/arch/arm/kernel/insn.h b/arch/arm/include/asm/insn.h
index e96065da4dae..e96065da4dae 100644
--- a/arch/arm/kernel/insn.h
+++ b/arch/arm/include/asm/insn.h
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 49fa0dfaad33..3ea9be559726 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -22,7 +22,6 @@
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
-#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
@@ -51,5 +50,37 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
+/* optinsn template addresses */
+extern __visible kprobe_opcode_t optprobe_template_entry;
+extern __visible kprobe_opcode_t optprobe_template_val;
+extern __visible kprobe_opcode_t optprobe_template_call;
+extern __visible kprobe_opcode_t optprobe_template_end;
+extern __visible kprobe_opcode_t optprobe_template_sub_sp;
+extern __visible kprobe_opcode_t optprobe_template_add_sp;
+extern __visible kprobe_opcode_t optprobe_template_restore_begin;
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
+extern __visible kprobe_opcode_t optprobe_template_restore_end;
+
+#define MAX_OPTIMIZED_LENGTH 4
+#define MAX_OPTINSN_SIZE \
+ ((unsigned long)&optprobe_template_end - \
+ (unsigned long)&optprobe_template_entry)
+#define RELATIVEJUMP_SIZE 4
+
+struct arch_optimized_insn {
+ /*
+ * copy of the original instructions.
+ * Different from x86, ARM kprobe_opcode_t is u32.
+ */
+#define MAX_COPIED_INSN DIV_ROUND_UP(RELATIVEJUMP_SIZE, sizeof(kprobe_opcode_t))
+ kprobe_opcode_t copied_insn[MAX_COPIED_INSN];
+ /* detour code buffer */
+ kprobe_opcode_t *insn;
+ /*
+ * We always copy one instruction on ARM,
+ * so size will always be 4, and unlike x86, there is no
+ * need for a size field.
+ */
+};
#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 3a67bec72d0c..25410b2d8bc1 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -96,6 +96,7 @@ extern char __kvm_hyp_code_end[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 66ce17655bb9..a9c80a2ea1a7 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -23,6 +23,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_arm.h>
+#include <asm/cputype.h>
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
@@ -38,6 +39,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK;
}
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hcr;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+ vcpu->arch.hcr = hcr;
+}
+
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
@@ -167,9 +178,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}
-static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cp15[c0_MPIDR];
+ return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 254e0650e48b..41008cd7c53f 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -68,6 +68,7 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
+ int max_vcpus;
};
#define KVM_NR_MEM_OBJS 40
@@ -125,9 +126,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
/* Don't run the guest on this vcpu */
bool pause;
@@ -147,6 +145,7 @@ struct kvm_vm_stat {
};
struct kvm_vcpu_stat {
+ u32 halt_successful_poll;
u32 halt_wakeup;
};
@@ -234,6 +233,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
int kvm_perf_init(void);
int kvm_perf_teardown(void);
+void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
+
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index adcc0d7d3175..3f83db2f6cf0 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -37,6 +37,7 @@ struct kvm_exit_mmio {
u8 data[8];
u32 len;
bool is_write;
+ void *private;
};
static inline void kvm_prepare_mmio(struct kvm_run *run,
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 63e0ecc04901..37ca2a4c6f09 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -44,6 +44,7 @@
#ifndef __ASSEMBLY__
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
@@ -114,6 +115,27 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
pmd_val(*pmd) |= L_PMD_S2_RDWR;
}
+static inline void kvm_set_s2pte_readonly(pte_t *pte)
+{
+ pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
+}
+
+static inline bool kvm_s2pte_readonly(pte_t *pte)
+{
+ return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
+}
+
+static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
+{
+ pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
+}
+
+static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
+{
+ return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
+}
+
+
/* Open coded p*d_addr_end that can deal with 64bit addresses */
#define kvm_pgd_addr_end(addr, end) \
({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
@@ -161,13 +183,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
}
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size,
- bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc((void *)hva, size);
-
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +198,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ *
+ * We need to do this through a kernel mapping (using the
+ * user-space mapping has proved to be the wrong
+ * solution). For that, we need to kmap one page at a time,
+ * and iterate over the range.
*/
- if (icache_is_pipt()) {
- __cpuc_coherent_user_range(hva, hva + size);
- } else if (!icache_is_vivt_asid_tagged()) {
+
+ bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+
+ VM_BUG_ON(size & PAGE_MASK);
+
+ if (!need_flush && !icache_is_pipt())
+ goto vipt_cache;
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ if (need_flush)
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ if (icache_is_pipt())
+ __cpuc_coherent_user_range((unsigned long)va,
+ (unsigned long)va + PAGE_SIZE);
+
+ size -= PAGE_SIZE;
+ pfn++;
+
+ kunmap_atomic(va);
+ }
+
+vipt_cache:
+ if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ void *va = kmap_atomic(pte_page(pte));
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ kunmap_atomic(va);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ unsigned long size = PMD_SIZE;
+ pfn_t pfn = pmd_pfn(pmd);
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ pfn++;
+ size -= PAGE_SIZE;
+
+ kunmap_atomic(va);
+ }
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+}
+
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 8292b5f81e23..28b9bb35949e 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -19,9 +19,6 @@ struct pci_bus;
struct device;
struct hw_pci {
-#ifdef CONFIG_PCI_DOMAINS
- int domain;
-#endif
#ifdef CONFIG_PCI_MSI
struct msi_controller *msi_ctrl;
#endif
@@ -45,9 +42,6 @@ struct hw_pci {
* Per-controller structure
*/
struct pci_sys_data {
-#ifdef CONFIG_PCI_DOMAINS
- int domain;
-#endif
#ifdef CONFIG_PCI_MSI
struct msi_controller *msi_ctrl;
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 891a56b35bcf..563b92fc2f41 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -23,6 +23,8 @@
#include <linux/types.h>
+struct l2x0_regs;
+
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
@@ -36,6 +38,7 @@ struct outer_cache_fns {
/* This is an ARM L2C thing */
void (*write_sec)(unsigned long, unsigned);
+ void (*configure)(const struct l2x0_regs *);
};
extern struct outer_cache_fns outer_cache;
diff --git a/arch/arm/kernel/patch.h b/arch/arm/include/asm/patch.h
index 77e054c2f6cd..77e054c2f6cd 100644
--- a/arch/arm/kernel/patch.h
+++ b/arch/arm/include/asm/patch.h
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 7e95d8535e24..585dc33a7a24 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void)
}
#ifdef CONFIG_PCI_DOMAINS
-static inline int pci_domain_nr(struct pci_bus *bus)
-{
- struct pci_sys_data *root = bus->sysdata;
-
- return root->domain;
-}
-
static inline int pci_proc_domain(struct pci_bus *bus)
{
return pci_domain_nr(bus);
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f0279411847d..bfd662e49a25 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -10,6 +10,8 @@
#ifndef _ASM_PGTABLE_2LEVEL_H
#define _ASM_PGTABLE_2LEVEL_H
+#define __PAGETABLE_PMD_FOLDED
+
/*
* Hardware-wise, we have a two level page table structure, where the first
* level has 4096 entries, and the second level has 256 entries. Each entry
@@ -118,7 +120,6 @@
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
#define L_PTE_USER (_AT(pteval_t, 1) << 8)
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a31ecdad4b59..a745a2a53853 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -77,7 +77,6 @@
*/
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
@@ -130,6 +129,7 @@
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
@@ -258,7 +258,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
-#define pmd_mknotpresent(pmd) (__pmd(0))
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+ return __pmd(0);
+}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 0642228ff785..add094d09e3e 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -54,8 +54,6 @@
typedef pte_t *pte_addr_t;
-static inline int pte_file(pte_t pte) { return 0; }
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -87,7 +85,7 @@ extern unsigned int kobjsize(const void *objp);
#define VMALLOC_START 0UL
#define VMALLOC_END 0xffffffffUL
-#define FIRST_USER_ADDRESS (0)
+#define FIRST_USER_ADDRESS 0UL
#include <asm-generic/pgtable.h>
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index d5cac545ba33..f40354198bad 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -318,12 +318,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset ----------------------> < type -> 0 0 0
+ * <--------------- offset ------------------------> < type -> 0 0
*
- * This gives us up to 31 swap files and 64GB per swap file. Note that
+ * This gives us up to 31 swap files and 128GB per swap file. Note that
* the offset field is always non-zero.
*/
-#define __SWP_TYPE_SHIFT 3
+#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
@@ -342,20 +342,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
-/*
- * Encode and decode a file entry. File entries are stored in the Linux
- * page tables as follows:
- *
- * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <----------------------- offset ------------------------> 1 0 0
- */
-#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 3)
-#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
-
-#define PTE_FILE_MAX_BITS 29
-
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
index 806cfe622a9e..1e5b9bb92270 100644
--- a/arch/arm/include/asm/probes.h
+++ b/arch/arm/include/asm/probes.h
@@ -19,6 +19,8 @@
#ifndef _ASM_PROBES_H
#define _ASM_PROBES_H
+#ifndef __ASSEMBLY__
+
typedef u32 probes_opcode_t;
struct arch_probes_insn;
@@ -38,6 +40,19 @@ struct arch_probes_insn {
probes_check_cc *insn_check_cc;
probes_insn_singlestep_t *insn_singlestep;
probes_insn_fn_t *insn_fn;
+ int stack_space;
+ unsigned long register_usage_flags;
+ bool kprobe_direct_exec;
};
+#endif /* __ASSEMBLY__ */
+
+/*
+ * We assume one instruction can consume at most 64 bytes stack, which is
+ * 'push {r0-r15}'. Instructions consume more or unknown stack space like
+ * 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
+ * Both kprobe and jprobe use this macro.
+ */
+#define MAX_STACK_SIZE 64
+
#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d890e41f5520..72812a1f3d1c 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -68,7 +68,6 @@ struct thread_info {
#ifdef CONFIG_ARM_THUMBEE
unsigned long thumbee_state; /* ThumbEE Handler Base register */
#endif
- struct restart_block restart_block;
};
#define INIT_THREAD_INFO(tsk) \
@@ -81,9 +80,6 @@ struct thread_info {
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 68c739b3fdf4..2f7e6ff67d51 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -92,7 +92,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
diff --git a/arch/arm/mach-ks8695/include/mach/debug-macro.S b/arch/arm/include/debug/ks8695.S
index a79e48981202..961da1f32ab3 100644
--- a/arch/arm/mach-ks8695/include/mach/debug-macro.S
+++ b/arch/arm/include/debug/ks8695.S
@@ -1,5 +1,5 @@
/*
- * arch/arm/mach-ks8695/include/mach/debug-macro.S
+ * arch/arm/include/debug/ks8695.S
*
* Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk>
* Copyright (C) 2006 Simtec Electronics
@@ -11,8 +11,12 @@
* published by the Free Software Foundation.
*/
-#include <mach/hardware.h>
-#include <mach/regs-uart.h>
+#define KS8695_UART_PA 0x03ffe000
+#define KS8695_UART_VA 0xf00fe000
+#define KS8695_URTH (0x04)
+#define KS8695_URLS (0x14)
+#define URLS_URTE (1 << 6)
+#define URLS_URTHRE (1 << 5)
.macro addruart, rp, rv, tmp
ldr \rp, =KS8695_UART_PA @ physical base address
diff --git a/arch/arm/mach-netx/include/mach/debug-macro.S b/arch/arm/include/debug/netx.S
index 247781e096e2..81e1b2af70f7 100644
--- a/arch/arm/mach-netx/include/mach/debug-macro.S
+++ b/arch/arm/include/debug/netx.S
@@ -1,5 +1,4 @@
-/* arch/arm/mach-netx/include/mach/debug-macro.S
- *
+/*
* Debugging macro include header
*
* Copyright (C) 1994-1999 Russell King
@@ -11,26 +10,27 @@
*
*/
-#include "hardware.h"
+#define UART_DATA 0
+#define UART_FLAG 0x18
+#define UART_FLAG_BUSY (1 << 3)
.macro addruart, rp, rv, tmp
- mov \rp, #0x00000a00
- orr \rv, \rp, #io_p2v(0x00100000) @ virtual
- orr \rp, \rp, #0x00100000 @ physical
+ ldr \rp, =CONFIG_DEBUG_UART_PHYS
+ ldr \rv, =CONFIG_DEBUG_UART_VIRT
.endm
.macro senduart,rd,rx
- str \rd, [\rx, #0]
+ str \rd, [\rx, #UART_DATA]
.endm
.macro busyuart,rd,rx
-1002: ldr \rd, [\rx, #0x18]
- tst \rd, #(1 << 3)
+1002: ldr \rd, [\rx, #UART_FLAG]
+ tst \rd, #UART_FLAG_BUSY
bne 1002b
.endm
.macro waituart,rd,rx
-1001: ldr \rd, [\rx, #0x18]
- tst \rd, #(1 << 3)
+1001: ldr \rd, [\rx, #UART_FLAG]
+ tst \rd, #UART_FLAG_BUSY
bne 1001b
.endm
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 09ee408c1a67..0db25bc32864 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -175,6 +175,8 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
+#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
+#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index fb2b71ebe3f2..902397dd1000 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -51,20 +51,8 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
-obj-$(CONFIG_UPROBES) += probes.o probes-arm.o uprobes.o uprobes-arm.o
-obj-$(CONFIG_KPROBES) += probes.o kprobes.o kprobes-common.o patch.o
-ifdef CONFIG_THUMB2_KERNEL
-obj-$(CONFIG_KPROBES) += kprobes-thumb.o probes-thumb.o
-else
-obj-$(CONFIG_KPROBES) += kprobes-arm.o probes-arm.o
-endif
-obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
-test-kprobes-objs := kprobes-test.o
-ifdef CONFIG_THUMB2_KERNEL
-test-kprobes-objs += kprobes-test-thumb.o
-else
-test-kprobes-objs += kprobes-test-arm.o
-endif
+# Main staffs in KPROBES are in arch/arm/probes/ .
+obj-$(CONFIG_KPROBES) += patch.o insn.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_KGDB) += kgdb.o patch.o
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index a4effd6d8f2f..ab19b7c03423 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -422,17 +422,16 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
{
int ret;
- struct pci_host_bridge_window *window;
+ struct resource_entry *window;
if (list_empty(&sys->resources)) {
pci_add_resource_offset(&sys->resources,
&iomem_resource, sys->mem_offset);
}
- list_for_each_entry(window, &sys->resources, list) {
+ resource_list_for_each_entry(window, &sys->resources)
if (resource_type(window->res) == IORESOURCE_IO)
return 0;
- }
sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io;
sys->io_res.end = (busnr + 1) * SZ_64K - 1;
@@ -463,9 +462,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
if (!sys)
panic("PCI: unable to allocate sys data!");
-#ifdef CONFIG_PCI_DOMAINS
- sys->domain = hw->domain;
-#endif
#ifdef CONFIG_PCI_MSI
sys->msi_ctrl = hw->msi_ctrl;
#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2f5555d307b3..672b21942fff 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -31,6 +31,7 @@
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
+#include <asm/probes.h>
/*
* Interrupt handling.
@@ -249,7 +250,7 @@ __und_svc:
@ If a kprobe is about to simulate a "stmdb sp..." instruction,
@ it obviously needs free stack space which then will belong to
@ the saved context.
- svc_entry 64
+ svc_entry MAX_STACK_SIZE
#else
svc_entry
#endif
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 4176df721bf0..1a0045abead7 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -253,21 +253,22 @@
.endm
.macro restore_user_regs, fast = 0, offset = 0
- ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
- ldr lr, [sp, #\offset + S_PC]! @ get pc
+ mov r2, sp
+ ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
+ ldr lr, [r2, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
- strex r1, r2, [sp] @ clear the exclusive monitor
+ strex r1, r2, [r2] @ clear the exclusive monitor
#endif
.if \fast
- ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
+ ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
.else
- ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
+ ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
.endif
mov r0, r0 @ ARMv5T and earlier require a nop
@ after ldm {}^
- add sp, sp, #S_FRAME_SIZE - S_PC
+ add sp, sp, #\offset + S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
.endm
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 2260f1855820..8944f4991c3c 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -22,10 +22,12 @@
__invalid_entry:
v7m_exception_entry
+#ifdef CONFIG_PRINTK
adr r0, strerr
mrs r1, ipsr
mov r2, lr
bl printk
+#endif
mov r0, sp
bl show_regs
1: b 1b
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index b8c75e45a950..709ee1d6d4df 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -20,8 +20,7 @@
#include <asm/cacheflush.h>
#include <asm/opcodes.h>
#include <asm/ftrace.h>
-
-#include "insn.h"
+#include <asm/insn.h>
#ifdef CONFIG_THUMB2_KERNEL
#define NOP 0xf85deb04 /* pop.w {lr} */
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 664eee8c4a26..01963273c07a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -346,6 +346,12 @@ __turn_mmu_on_loc:
#if defined(CONFIG_SMP)
.text
+ENTRY(secondary_startup_arm)
+ .arm
+ THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM.
+ THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
+ THUMB( .thumb ) @ switch to Thumb now.
+ THUMB(1: )
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
@@ -385,6 +391,7 @@ ENTRY(secondary_startup)
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( ret r12 )
ENDPROC(secondary_startup)
+ENDPROC(secondary_startup_arm)
/*
* r6 = &secondary_data
@@ -586,7 +593,7 @@ __fixup_pv_table:
add r5, r5, r3 @ adjust table end address
add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
add r7, r7, r3 @ adjust __pv_offset address
- mov r0, r8, lsr #12 @ convert to PFN
+ mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN
str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ad857bada96c..350f188c92d2 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -109,7 +109,8 @@ void __init init_IRQ(void)
if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) &&
(machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) {
- outer_cache.write_sec = machine_desc->l2c_write_sec;
+ if (!outer_cache.write_sec)
+ outer_cache.write_sec = machine_desc->l2c_write_sec;
ret = l2x0_of_init(machine_desc->l2c_aux_val,
machine_desc->l2c_aux_mask);
if (ret)
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index afeeb9ea6f43..e39cbf488cfe 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -1,8 +1,7 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
-
-#include "insn.h"
-#include "patch.h"
+#include <asm/patch.h>
+#include <asm/insn.h>
#ifdef HAVE_JUMP_LABEL
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 07db2f8a1b45..a6ad93c9bce3 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -14,10 +14,9 @@
#include <linux/kgdb.h>
#include <linux/uaccess.h>
+#include <asm/patch.h>
#include <asm/traps.h>
-#include "patch.h"
-
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
{ "r0", 4, offsetof(struct pt_regs, ARM_r0)},
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index bea7db9e5b80..2e11961f65ae 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -41,7 +41,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index 5038960e3c55..69bda1a5707e 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -8,8 +8,7 @@
#include <asm/fixmap.h>
#include <asm/smp_plat.h>
#include <asm/opcodes.h>
-
-#include "patch.h"
+#include <asm/patch.h>
struct patch {
void *addr;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f7c65adaa428..557e128e4df0 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event)
ret = 1;
}
- if (left > (s64)armpmu->max_period)
- left = armpmu->max_period;
+ /*
+ * Limit the maximum period to prevent the counter value
+ * from overtaking the one we are about to program. In
+ * effect we are reducing max_period to account for
+ * interrupt latency (and we are being very conservative).
+ */
+ if (left > (armpmu->max_period >> 1))
+ left = armpmu->max_period >> 1;
local64_set(&hwc->prev_count, (u64)-left);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 715ae19bc7c8..e55408e96559 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size)
/*
* Ensure that start/size are aligned to a page boundary.
- * Size is appropriately rounded down, start is rounded up.
+ * Size is rounded down, start is rounded up.
*/
- size -= start & ~PAGE_MASK;
aligned_start = PAGE_ALIGN(start);
+ if (aligned_start > start + size)
+ size = 0;
+ else
+ size -= aligned_start - start;
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
if (aligned_start > ULONG_MAX) {
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 8aa6f1b87c9e..023ac905e4c3 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -191,7 +191,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
struct sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
@@ -221,7 +221,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 2835d35234ca..9a2f882a0a2d 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -14,10 +14,6 @@ extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
extern void cpu_resume_mmu(void);
#ifdef CONFIG_MMU
-/*
- * Hide the first two arguments to __cpu_suspend - these are an implementation
- * detail which platform code shouldn't have to know about.
- */
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 466bd299b1a8..338ace78ed18 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -21,8 +21,11 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
+ select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select SRCU
depends on ARM_VIRT_EXT && ARM_LPAE
---help---
Support hosting virtualized guest machines. You will also
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index f7057ed045b6..443b8bea43e9 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -22,4 +22,5 @@ obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2d6d91001062..07e7eb1d7ab6 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -132,6 +132,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0;
+ /* The maximum number of VCPUs is limited by the host's GIC model */
+ kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus();
+
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
@@ -218,6 +221,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
goto out;
}
+ if (id >= kvm->arch.max_vcpus) {
+ err = -EINVAL;
+ goto out;
+ }
+
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
@@ -241,9 +249,8 @@ out:
return ERR_PTR(err);
}
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
- return 0;
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
@@ -281,15 +288,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
- /*
- * Check whether this vcpu requires the cache to be flushed on
- * this physical CPU. This is a consequence of doing dcache
- * operations by set/way on this vcpu. We do it here to be in
- * a non-preemptible section.
- */
- if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
- flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
-
kvm_arm_set_running_vcpu(vcpu);
}
@@ -541,7 +539,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
- vcpu->arch.last_pcpu = smp_processor_id();
kvm_guest_exit();
trace_kvm_exit(*vcpu_pc(vcpu));
/*
@@ -787,9 +784,39 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
}
+/**
+ * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
+ * @kvm: kvm instance
+ * @log: slot id and address to which we copy the log
+ *
+ * Steps 1-4 below provide general overview of dirty page logging. See
+ * kvm_get_dirty_log_protect() function description for additional details.
+ *
+ * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
+ * always flush the TLB (step 4) even if previous step failed and the dirty
+ * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
+ * does not preclude user space subsequent dirty log read. Flushing TLB ensures
+ * writes will be marked dirty for next log read.
+ *
+ * 1. Take a snapshot of the bit and clear it if needed.
+ * 2. Write protect the corresponding page.
+ * 3. Copy the snapshot to the userspace.
+ * 4. Flush TLB's if needed.
+ */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
- return -EINVAL;
+ bool is_dirty = false;
+ int r;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+
+ if (is_dirty)
+ kvm_flush_remote_tlbs(kvm);
+
+ mutex_unlock(&kvm->slots_lock);
+ return r;
}
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
@@ -821,7 +848,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
if (vgic_present)
- return kvm_vgic_create(kvm);
+ return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
else
return -ENXIO;
}
@@ -1045,6 +1072,19 @@ static void check_kvm_target_cpu(void *ret)
*(int *)ret = kvm_target_cpu();
}
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ mpidr &= MPIDR_HWID_BITMASK;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
+ return vcpu;
+ }
+ return NULL;
+}
+
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7928dbdf2102..f3d88dc388bc 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
return true;
}
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
- unsigned long val;
- int cpu;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
- cpumask_setall(&vcpu->arch.require_dcache_flush);
- cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
- /* If we were already preempted, take the long way around */
- if (cpu != vcpu->arch.last_pcpu) {
- flush_cache_all();
- goto done;
- }
-
- val = *vcpu_reg(vcpu, p->Rt1);
-
- switch (p->CRm) {
- case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
- case 14: /* DCCISW */
- asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
- break;
-
- case 10: /* DCCSW */
- asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
- break;
- }
-
-done:
- put_cpu();
-
+ kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
+ *
+ * Used by the cpu-specific code.
*/
-static bool access_vm_reg(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
{
+ bool was_enabled = vcpu_has_cache_enabled(vcpu);
+
BUG_ON(!p->is_write);
vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
if (p->is_64bit)
vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
- return true;
-}
-
-/*
- * SCTLR accessor. Only called as long as HCR_TVM is set. If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- *
- * Used by the cpu-specific code.
- */
-bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- access_vm_reg(vcpu, p, r);
-
- if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
- vcpu->arch.hcr &= ~HCR_TVM;
- stage2_flush_vm(vcpu->kvm);
- }
-
+ kvm_toggle_cache(vcpu, was_enabled);
return true;
}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 1a44bbe39643..88d24a3a9778 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define is64 .is_64 = true
#define is32 .is_64 = false
-bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r);
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r);
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index e6f4ae48bda9..a7136757d373 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -34,7 +34,7 @@
static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
- access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+ access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
};
static struct kvm_coproc_target_table a15_target_table = {
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
index 17fc7cd479d3..b19e46d1b2c0 100644
--- a/arch/arm/kvm/coproc_a7.c
+++ b/arch/arm/kvm/coproc_a7.c
@@ -37,7 +37,7 @@
static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
- access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
+ access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
};
static struct kvm_coproc_target_table a7_target_table = {
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index a96a8043277c..95f12b2ccdcb 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -87,11 +87,13 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- trace_kvm_wfi(*vcpu_pc(vcpu));
- if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
+ if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
+ trace_kvm_wfx(*vcpu_pc(vcpu), true);
kvm_vcpu_on_spin(vcpu);
- else
+ } else {
+ trace_kvm_wfx(*vcpu_pc(vcpu), false);
kvm_vcpu_block(vcpu);
+ }
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 01dcb0e752d9..79caf79b304a 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -66,6 +66,17 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
bx lr
ENDPROC(__kvm_tlb_flush_vmid_ipa)
+/**
+ * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
+ *
+ * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
+ * parameter
+ */
+
+ENTRY(__kvm_tlb_flush_vmid)
+ b __kvm_tlb_flush_vmid_ipa
+ENDPROC(__kvm_tlb_flush_vmid)
+
/********************************************************************
* Flush TLBs and instruction caches of all CPUs inside the inner-shareable
* domain, for all VMIDs
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 1dc9778a00af..3e6859bc3e11 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -45,6 +45,26 @@ static phys_addr_t hyp_idmap_vector;
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
+#define kvm_pud_huge(_x) pud_huge(_x)
+
+#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
+#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
+
+static bool memslot_is_logging(struct kvm_memory_slot *memslot)
+{
+ return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
+}
+
+/**
+ * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
+ * @kvm: pointer to kvm structure.
+ *
+ * Interface to HYP function to flush all VM TLB entries
+ */
+void kvm_flush_remote_tlbs(struct kvm *kvm)
+{
+ kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
+}
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
@@ -58,6 +78,45 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}
+/*
+ * D-Cache management functions. They take the page table entries by
+ * value, as they are flushing the cache using the kernel mapping (or
+ * kmap on 32bit).
+ */
+static void kvm_flush_dcache_pte(pte_t pte)
+{
+ __kvm_flush_dcache_pte(pte);
+}
+
+static void kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ __kvm_flush_dcache_pmd(pmd);
+}
+
+static void kvm_flush_dcache_pud(pud_t pud)
+{
+ __kvm_flush_dcache_pud(pud);
+}
+
+/**
+ * stage2_dissolve_pmd() - clear and flush huge PMD entry
+ * @kvm: pointer to kvm structure.
+ * @addr: IPA
+ * @pmd: pmd pointer for IPA
+ *
+ * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
+ * pages in the range dirty.
+ */
+static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
+{
+ if (!kvm_pmd_huge(*pmd))
+ return;
+
+ pmd_clear(pmd);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ put_page(virt_to_page(pmd));
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
int min, int max)
{
@@ -119,6 +178,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
put_page(virt_to_page(pmd));
}
+/*
+ * Unmapping vs dcache management:
+ *
+ * If a guest maps certain memory pages as uncached, all writes will
+ * bypass the data cache and go directly to RAM. However, the CPUs
+ * can still speculate reads (not writes) and fill cache lines with
+ * data.
+ *
+ * Those cache lines will be *clean* cache lines though, so a
+ * clean+invalidate operation is equivalent to an invalidate
+ * operation, because no cache lines are marked dirty.
+ *
+ * Those clean cache lines could be filled prior to an uncached write
+ * by the guest, and the cache coherent IO subsystem would therefore
+ * end up writing old data to disk.
+ *
+ * This is why right after unmapping a page/section and invalidating
+ * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
+ * the IO subsystem will never hit in the cache.
+ */
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
@@ -128,9 +207,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
start_pte = pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte)) {
+ pte_t old_pte = *pte;
+
kvm_set_pte(pte, __pte(0));
- put_page(virt_to_page(pte));
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ /* No need to invalidate the cache for device mappings */
+ if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ kvm_flush_dcache_pte(old_pte);
+
+ put_page(virt_to_page(pte));
}
} while (pte++, addr += PAGE_SIZE, addr != end);
@@ -149,8 +235,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) {
+ pmd_t old_pmd = *pmd;
+
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ kvm_flush_dcache_pmd(old_pmd);
+
put_page(virt_to_page(pmd));
} else {
unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +264,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
if (pud_huge(*pud)) {
+ pud_t old_pud = *pud;
+
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ kvm_flush_dcache_pud(old_pud);
+
put_page(virt_to_page(pud));
} else {
unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +305,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
- if (!pte_none(*pte)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
- }
+ if (!pte_none(*pte) &&
+ (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -226,12 +321,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
do {
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
- if (kvm_pmd_huge(*pmd)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
- } else {
+ if (kvm_pmd_huge(*pmd))
+ kvm_flush_dcache_pmd(*pmd);
+ else
stage2_flush_ptes(kvm, pmd, addr, next);
- }
}
} while (pmd++, addr = next, addr != end);
}
@@ -246,12 +339,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
do {
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
- if (pud_huge(*pud)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
- } else {
+ if (pud_huge(*pud))
+ kvm_flush_dcache_pud(*pud);
+ else
stage2_flush_pmds(kvm, pud, addr, next);
- }
}
} while (pud++, addr = next, addr != end);
}
@@ -278,7 +369,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
* Go through the stage 2 page tables and invalidate any cache lines
* backing memory already mapped to the VM.
*/
-void stage2_flush_vm(struct kvm *kvm)
+static void stage2_flush_vm(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -767,10 +858,15 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
}
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr, const pte_t *new_pte, bool iomap)
+ phys_addr_t addr, const pte_t *new_pte,
+ unsigned long flags)
{
pmd_t *pmd;
pte_t *pte, old_pte;
+ bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
+ bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
+
+ VM_BUG_ON(logging_active && !cache);
/* Create stage-2 page table mapping - Levels 0 and 1 */
pmd = stage2_get_pmd(kvm, cache, addr);
@@ -782,6 +878,13 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
return 0;
}
+ /*
+ * While dirty page logging - dissolve huge PMD, then continue on to
+ * allocate page.
+ */
+ if (logging_active)
+ stage2_dissolve_pmd(kvm, addr, pmd);
+
/* Create stage-2 page mappings - Level 2 */
if (pmd_none(*pmd)) {
if (!cache)
@@ -838,7 +941,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
- ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
+ ret = stage2_set_pte(kvm, &cache, addr, &pte,
+ KVM_S2PTE_FLAG_IS_IOMAP);
spin_unlock(&kvm->mmu_lock);
if (ret)
goto out;
@@ -905,6 +1009,171 @@ static bool kvm_is_device_pfn(unsigned long pfn)
return !pfn_valid(pfn);
}
+/**
+ * stage2_wp_ptes - write protect PMD range
+ * @pmd: pointer to pmd entry
+ * @addr: range start address
+ * @end: range end address
+ */
+static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
+{
+ pte_t *pte;
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+ if (!pte_none(*pte)) {
+ if (!kvm_s2pte_readonly(pte))
+ kvm_set_s2pte_readonly(pte);
+ }
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+/**
+ * stage2_wp_pmds - write protect PUD range
+ * @pud: pointer to pud entry
+ * @addr: range start address
+ * @end: range end address
+ */
+static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
+{
+ pmd_t *pmd;
+ phys_addr_t next;
+
+ pmd = pmd_offset(pud, addr);
+
+ do {
+ next = kvm_pmd_addr_end(addr, end);
+ if (!pmd_none(*pmd)) {
+ if (kvm_pmd_huge(*pmd)) {
+ if (!kvm_s2pmd_readonly(pmd))
+ kvm_set_s2pmd_readonly(pmd);
+ } else {
+ stage2_wp_ptes(pmd, addr, next);
+ }
+ }
+ } while (pmd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_wp_puds - write protect PGD range
+ * @pgd: pointer to pgd entry
+ * @addr: range start address
+ * @end: range end address
+ *
+ * Process PUD entries, for a huge PUD we cause a panic.
+ */
+static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+{
+ pud_t *pud;
+ phys_addr_t next;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = kvm_pud_addr_end(addr, end);
+ if (!pud_none(*pud)) {
+ /* TODO:PUD not supported, revisit later if supported */
+ BUG_ON(kvm_pud_huge(*pud));
+ stage2_wp_pmds(pud, addr, next);
+ }
+ } while (pud++, addr = next, addr != end);
+}
+
+/**
+ * stage2_wp_range() - write protect stage2 memory region range
+ * @kvm: The KVM pointer
+ * @addr: Start address of range
+ * @end: End address of range
+ */
+static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
+{
+ pgd_t *pgd;
+ phys_addr_t next;
+
+ pgd = kvm->arch.pgd + pgd_index(addr);
+ do {
+ /*
+ * Release kvm_mmu_lock periodically if the memory region is
+ * large. Otherwise, we may see kernel panics with
+ * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
+ * CONFIG_LOCKDEP. Additionally, holding the lock too long
+ * will also starve other vCPUs.
+ */
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+ cond_resched_lock(&kvm->mmu_lock);
+
+ next = kvm_pgd_addr_end(addr, end);
+ if (pgd_present(*pgd))
+ stage2_wp_puds(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
+ * @kvm: The KVM pointer
+ * @slot: The memory slot to write protect
+ *
+ * Called to start logging dirty pages after memory region
+ * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
+ * all present PMD and PTEs are write protected in the memory region.
+ * Afterwards read of dirty page log can be called.
+ *
+ * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
+ * serializing operations for VM memory regions.
+ */
+void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
+{
+ struct kvm_memory_slot *memslot = id_to_memslot(kvm->memslots, slot);
+ phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+ phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+
+ spin_lock(&kvm->mmu_lock);
+ stage2_wp_range(kvm, start, end);
+ spin_unlock(&kvm->mmu_lock);
+ kvm_flush_remote_tlbs(kvm);
+}
+
+/**
+ * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
+ * @kvm: The KVM pointer
+ * @slot: The memory slot associated with mask
+ * @gfn_offset: The gfn offset in memory slot
+ * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
+ * slot to be write protected
+ *
+ * Walks bits set in mask write protects the associated pte's. Caller must
+ * acquire kvm_mmu_lock.
+ */
+static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+ phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
+ phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
+
+ stage2_wp_range(kvm, start, end);
+}
+
+/*
+ * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
+ * dirty pages.
+ *
+ * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
+ * enable dirty logging for them.
+ */
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+}
+
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size, bool uncached)
+{
+ __coherent_cache_guest_page(vcpu, pfn, size, uncached);
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -919,6 +1188,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
bool fault_ipa_uncached;
+ bool logging_active = memslot_is_logging(memslot);
+ unsigned long flags = 0;
write_fault = kvm_is_write_fault(vcpu);
if (fault_status == FSC_PERM && !write_fault) {
@@ -935,7 +1206,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- if (is_vm_hugetlb_page(vma)) {
+ if (is_vm_hugetlb_page(vma) && !logging_active) {
hugetlb = true;
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
} else {
@@ -976,12 +1247,30 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (is_error_pfn(pfn))
return -EFAULT;
- if (kvm_is_device_pfn(pfn))
+ if (kvm_is_device_pfn(pfn)) {
mem_type = PAGE_S2_DEVICE;
+ flags |= KVM_S2PTE_FLAG_IS_IOMAP;
+ } else if (logging_active) {
+ /*
+ * Faults on pages in a memslot with logging enabled
+ * should not be mapped with huge pages (it introduces churn
+ * and performance degradation), so force a pte mapping.
+ */
+ force_pte = true;
+ flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
+
+ /*
+ * Only actually map the page as writable if this was a write
+ * fault.
+ */
+ if (!write_fault)
+ writable = false;
+ }
spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock;
+
if (!hugetlb && !force_pte)
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
@@ -994,22 +1283,20 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pmd_writable(&new_pmd);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
- fault_ipa_uncached);
+ coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
+
if (writable) {
kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn);
+ mark_page_dirty(kvm, gfn);
}
- coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
- fault_ipa_uncached);
- ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
- pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
+ coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
}
-
out_unlock:
spin_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
@@ -1159,7 +1446,14 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
pte_t *pte = (pte_t *)data;
- stage2_set_pte(kvm, NULL, gpa, pte, false);
+ /*
+ * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
+ * flag clear because MMU notifiers will have unmapped a huge PMD before
+ * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
+ * therefore stage2_set_pte() never needs to clear out a huge PMD
+ * through this calling path.
+ */
+ stage2_set_pte(kvm, NULL, gpa, pte, 0);
}
@@ -1292,6 +1586,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{
+ /*
+ * At this point memslot has been committed and there is an
+ * allocated dirty_bitmap[], dirty pages will be be tracked while the
+ * memory slot is write protected.
+ */
+ if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
+ kvm_mmu_wp_memory_region(kvm, mem->slot);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -1304,7 +1605,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
bool writable = !(mem->flags & KVM_MEM_READONLY);
int ret = 0;
- if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
+ if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
+ change != KVM_MR_FLAGS_ONLY)
return 0;
/*
@@ -1355,6 +1657,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
vm_start - vma->vm_start;
+ /* IO region dirty page logging not allowed */
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
+ return -EINVAL;
+
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
writable);
@@ -1364,6 +1670,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
hva = vm_end;
} while (hva < reg_end);
+ if (change == KVM_MR_FLAGS_ONLY)
+ return ret;
+
spin_lock(&kvm->mmu_lock);
if (ret)
unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
@@ -1411,3 +1720,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
unmap_stage2_range(kvm, gpa, size);
spin_unlock(&kvm->mmu_lock);
}
+
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ *
+ * Main problems:
+ * - S/W ops are local to a CPU (not broadcast)
+ * - We have line migration behind our back (speculation)
+ * - System caches don't support S/W at all (damn!)
+ *
+ * In the face of the above, the best we can do is to try and convert
+ * S/W ops to VA ops. Because the guest is not allowed to infer the
+ * S/W to PA mapping, it can only use S/W to nuke the whole cache,
+ * which is a rather good thing for us.
+ *
+ * Also, it is only used when turning caches on/off ("The expected
+ * usage of the cache maintenance instructions that operate by set/way
+ * is associated with the cache maintenance instructions associated
+ * with the powerdown and powerup of caches, if this is required by
+ * the implementation.").
+ *
+ * We use the following policy:
+ *
+ * - If we trap a S/W operation, we enable VM trapping to detect
+ * caches being turned on/off, and do a full clean.
+ *
+ * - We flush the caches on both caches being turned on and off.
+ *
+ * - Once the caches are enabled, we stop trapping VM ops.
+ */
+void kvm_set_way_flush(struct kvm_vcpu *vcpu)
+{
+ unsigned long hcr = vcpu_get_hcr(vcpu);
+
+ /*
+ * If this is the first time we do a S/W operation
+ * (i.e. HCR_TVM not set) flush the whole memory, and set the
+ * VM trapping.
+ *
+ * Otherwise, rely on the VM trapping to wait for the MMU +
+ * Caches to be turned off. At that point, we'll be able to
+ * clean the caches again.
+ */
+ if (!(hcr & HCR_TVM)) {
+ trace_kvm_set_way_flush(*vcpu_pc(vcpu),
+ vcpu_has_cache_enabled(vcpu));
+ stage2_flush_vm(vcpu->kvm);
+ vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+ }
+}
+
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
+{
+ bool now_enabled = vcpu_has_cache_enabled(vcpu);
+
+ /*
+ * If switching the MMU+caches on, need to invalidate the caches.
+ * If switching it off, need to clean the caches.
+ * Clean + invalidate does the trick always.
+ */
+ if (now_enabled != was_enabled)
+ stage2_flush_vm(vcpu->kvm);
+
+ /* Caches are now on, stop trapping VM ops (until a S/W op) */
+ if (now_enabled)
+ vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+
+ trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
+}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 58cb3248d277..02fa8eff6ae1 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -22,6 +22,7 @@
#include <asm/cputype.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_psci.h>
+#include <asm/kvm_host.h>
/*
* This is an implementation of the Power State Coordination Interface
@@ -66,25 +67,17 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
struct kvm *kvm = source_vcpu->kvm;
- struct kvm_vcpu *vcpu = NULL, *tmp;
+ struct kvm_vcpu *vcpu = NULL;
wait_queue_head_t *wq;
unsigned long cpu_id;
unsigned long context_id;
- unsigned long mpidr;
phys_addr_t target_pc;
- int i;
- cpu_id = *vcpu_reg(source_vcpu, 1);
+ cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
- kvm_for_each_vcpu(i, tmp, kvm) {
- mpidr = kvm_vcpu_get_mpidr(tmp);
- if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
- vcpu = tmp;
- break;
- }
- }
+ vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
/*
* Make sure the caller requested a valid CPU and that the CPU is
@@ -155,7 +148,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
* then ON else OFF
*/
kvm_for_each_vcpu(i, tmp, kvm) {
- mpidr = kvm_vcpu_get_mpidr(tmp);
+ mpidr = kvm_vcpu_get_mpidr_aff(tmp);
if (((mpidr & target_affinity_mask) == target_affinity) &&
!tmp->arch.pause) {
return PSCI_0_2_AFFINITY_LEVEL_ON;
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index b1d640f78623..881874b1a036 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -140,19 +140,22 @@ TRACE_EVENT(kvm_emulate_cp15_imp,
__entry->CRm, __entry->Op2)
);
-TRACE_EVENT(kvm_wfi,
- TP_PROTO(unsigned long vcpu_pc),
- TP_ARGS(vcpu_pc),
+TRACE_EVENT(kvm_wfx,
+ TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
+ TP_ARGS(vcpu_pc, is_wfe),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
+ __field( bool, is_wfe )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
+ __entry->is_wfe = is_wfe;
),
- TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
+ TP_printk("guest executed wf%c at: 0x%08lx",
+ __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
TRACE_EVENT(kvm_unmap_hva,
@@ -223,6 +226,45 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
+TRACE_EVENT(kvm_set_way_flush,
+ TP_PROTO(unsigned long vcpu_pc, bool cache),
+ TP_ARGS(vcpu_pc, cache),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( bool, cache )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->cache = cache;
+ ),
+
+ TP_printk("S/W flush at 0x%016lx (cache %s)",
+ __entry->vcpu_pc, __entry->cache ? "on" : "off")
+);
+
+TRACE_EVENT(kvm_toggle_cache,
+ TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
+ TP_ARGS(vcpu_pc, was, now),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( bool, was )
+ __field( bool, now )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->was = was;
+ __entry->now = now;
+ ),
+
+ TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
+ __entry->vcpu_pc, __entry->was ? "on" : "off",
+ __entry->now ? "on" : "off")
+);
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 0573faab96ad..d8a780799506 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -15,19 +15,8 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
io-readsb.o io-writesb.o io-readsl.o io-writesl.o \
call_with_stack.o bswapsdi2.o
-mmu-y := clear_user.o copy_page.o getuser.o putuser.o
-
-# the code in uaccess.S is not preemption safe and
-# probably faster on ARMv3 only
-ifeq ($(CONFIG_PREEMPT),y)
- mmu-y += copy_from_user.o copy_to_user.o
-else
-ifneq ($(CONFIG_CPU_32v3),y)
- mmu-y += copy_from_user.o copy_to_user.o
-else
- mmu-y += uaccess.o
-endif
-endif
+mmu-y := clear_user.o copy_page.o getuser.o putuser.o \
+ copy_from_user.o copy_to_user.o
# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
deleted file mode 100644
index e50520904b76..000000000000
--- a/arch/arm/lib/uaccess.S
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * linux/arch/arm/lib/uaccess.S
- *
- * Copyright (C) 1995, 1996,1997,1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Routines to block copy data to/from user memory
- * These are highly optimised both for the 4k page size
- * and for various alignments.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-#include <asm/domain.h>
-
- .text
-
-#define PAGE_SHIFT 12
-
-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
- * Purpose : copy a block to user memory from kernel memory
- * Params : to - user memory
- * : from - kernel memory
- * : n - number of bytes to copy
- * Returns : Number of bytes NOT copied.
- */
-
-.Lc2u_dest_not_aligned:
- rsb ip, ip, #4
- cmp ip, #2
- ldrb r3, [r1], #1
-USER( TUSER( strb) r3, [r0], #1) @ May fault
- ldrgeb r3, [r1], #1
-USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- ldrgtb r3, [r1], #1
-USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- sub r2, r2, ip
- b .Lc2u_dest_aligned
-
-ENTRY(__copy_to_user)
- stmfd sp!, {r2, r4 - r7, lr}
- cmp r2, #4
- blt .Lc2u_not_enough
- ands ip, r0, #3
- bne .Lc2u_dest_not_aligned
-.Lc2u_dest_aligned:
-
- ands ip, r1, #3
- bne .Lc2u_src_not_aligned
-/*
- * Seeing as there has to be at least 8 bytes to copy, we can
- * copy one word, and force a user-mode page fault...
- */
-
-.Lc2u_0fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lc2u_0nowords
- ldr r3, [r1], #4
-USER( TUSER( str) r3, [r0], #4) @ May fault
- mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
- beq .Lc2u_0fupi
-/*
- * ip = max no. of bytes to copy before needing another "strt" insn
- */
- cmp r2, ip
- movlt ip, r2
- sub r2, r2, ip
- subs ip, ip, #32
- blt .Lc2u_0rem8lp
-
-.Lc2u_0cpy8lp: ldmia r1!, {r3 - r6}
- stmia r0!, {r3 - r6} @ Shouldnt fault
- ldmia r1!, {r3 - r6}
- subs ip, ip, #32
- stmia r0!, {r3 - r6} @ Shouldnt fault
- bpl .Lc2u_0cpy8lp
-
-.Lc2u_0rem8lp: cmn ip, #16
- ldmgeia r1!, {r3 - r6}
- stmgeia r0!, {r3 - r6} @ Shouldnt fault
- tst ip, #8
- ldmneia r1!, {r3 - r4}
- stmneia r0!, {r3 - r4} @ Shouldnt fault
- tst ip, #4
- ldrne r3, [r1], #4
- TUSER( strne) r3, [r0], #4 @ Shouldnt fault
- ands ip, ip, #3
- beq .Lc2u_0fupi
-.Lc2u_0nowords: teq ip, #0
- beq .Lc2u_finished
-.Lc2u_nowords: cmp ip, #2
- ldrb r3, [r1], #1
-USER( TUSER( strb) r3, [r0], #1) @ May fault
- ldrgeb r3, [r1], #1
-USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- ldrgtb r3, [r1], #1
-USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- b .Lc2u_finished
-
-.Lc2u_not_enough:
- movs ip, r2
- bne .Lc2u_nowords
-.Lc2u_finished: mov r0, #0
- ldmfd sp!, {r2, r4 - r7, pc}
-
-.Lc2u_src_not_aligned:
- bic r1, r1, #3
- ldr r7, [r1], #4
- cmp ip, #2
- bgt .Lc2u_3fupi
- beq .Lc2u_2fupi
-.Lc2u_1fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lc2u_1nowords
- mov r3, r7, lspull #8
- ldr r7, [r1], #4
- orr r3, r3, r7, lspush #24
-USER( TUSER( str) r3, [r0], #4) @ May fault
- mov ip, r0, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
- beq .Lc2u_1fupi
- cmp r2, ip
- movlt ip, r2
- sub r2, r2, ip
- subs ip, ip, #16
- blt .Lc2u_1rem8lp
-
-.Lc2u_1cpy8lp: mov r3, r7, lspull #8
- ldmia r1!, {r4 - r7}
- subs ip, ip, #16
- orr r3, r3, r4, lspush #24
- mov r4, r4, lspull #8
- orr r4, r4, r5, lspush #24
- mov r5, r5, lspull #8
- orr r5, r5, r6, lspush #24
- mov r6, r6, lspull #8
- orr r6, r6, r7, lspush #24
- stmia r0!, {r3 - r6} @ Shouldnt fault
- bpl .Lc2u_1cpy8lp
-
-.Lc2u_1rem8lp: tst ip, #8
- movne r3, r7, lspull #8
- ldmneia r1!, {r4, r7}
- orrne r3, r3, r4, lspush #24
- movne r4, r4, lspull #8
- orrne r4, r4, r7, lspush #24
- stmneia r0!, {r3 - r4} @ Shouldnt fault
- tst ip, #4
- movne r3, r7, lspull #8
- ldrne r7, [r1], #4
- orrne r3, r3, r7, lspush #24
- TUSER( strne) r3, [r0], #4 @ Shouldnt fault
- ands ip, ip, #3
- beq .Lc2u_1fupi
-.Lc2u_1nowords: mov r3, r7, get_byte_1
- teq ip, #0
- beq .Lc2u_finished
- cmp ip, #2
-USER( TUSER( strb) r3, [r0], #1) @ May fault
- movge r3, r7, get_byte_2
-USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- movgt r3, r7, get_byte_3
-USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- b .Lc2u_finished
-
-.Lc2u_2fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lc2u_2nowords
- mov r3, r7, lspull #16
- ldr r7, [r1], #4
- orr r3, r3, r7, lspush #16
-USER( TUSER( str) r3, [r0], #4) @ May fault
- mov ip, r0, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
- beq .Lc2u_2fupi
- cmp r2, ip
- movlt ip, r2
- sub r2, r2, ip
- subs ip, ip, #16
- blt .Lc2u_2rem8lp
-
-.Lc2u_2cpy8lp: mov r3, r7, lspull #16
- ldmia r1!, {r4 - r7}
- subs ip, ip, #16
- orr r3, r3, r4, lspush #16
- mov r4, r4, lspull #16
- orr r4, r4, r5, lspush #16
- mov r5, r5, lspull #16
- orr r5, r5, r6, lspush #16
- mov r6, r6, lspull #16
- orr r6, r6, r7, lspush #16
- stmia r0!, {r3 - r6} @ Shouldnt fault
- bpl .Lc2u_2cpy8lp
-
-.Lc2u_2rem8lp: tst ip, #8
- movne r3, r7, lspull #16
- ldmneia r1!, {r4, r7}
- orrne r3, r3, r4, lspush #16
- movne r4, r4, lspull #16
- orrne r4, r4, r7, lspush #16
- stmneia r0!, {r3 - r4} @ Shouldnt fault
- tst ip, #4
- movne r3, r7, lspull #16
- ldrne r7, [r1], #4
- orrne r3, r3, r7, lspush #16
- TUSER( strne) r3, [r0], #4 @ Shouldnt fault
- ands ip, ip, #3
- beq .Lc2u_2fupi
-.Lc2u_2nowords: mov r3, r7, get_byte_2
- teq ip, #0
- beq .Lc2u_finished
- cmp ip, #2
-USER( TUSER( strb) r3, [r0], #1) @ May fault
- movge r3, r7, get_byte_3
-USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- ldrgtb r3, [r1], #0
-USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- b .Lc2u_finished
-
-.Lc2u_3fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lc2u_3nowords
- mov r3, r7, lspull #24
- ldr r7, [r1], #4
- orr r3, r3, r7, lspush #8
-USER( TUSER( str) r3, [r0], #4) @ May fault
- mov ip, r0, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
- beq .Lc2u_3fupi
- cmp r2, ip
- movlt ip, r2
- sub r2, r2, ip
- subs ip, ip, #16
- blt .Lc2u_3rem8lp
-
-.Lc2u_3cpy8lp: mov r3, r7, lspull #24
- ldmia r1!, {r4 - r7}
- subs ip, ip, #16
- orr r3, r3, r4, lspush #8
- mov r4, r4, lspull #24
- orr r4, r4, r5, lspush #8
- mov r5, r5, lspull #24
- orr r5, r5, r6, lspush #8
- mov r6, r6, lspull #24
- orr r6, r6, r7, lspush #8
- stmia r0!, {r3 - r6} @ Shouldnt fault
- bpl .Lc2u_3cpy8lp
-
-.Lc2u_3rem8lp: tst ip, #8
- movne r3, r7, lspull #24
- ldmneia r1!, {r4, r7}
- orrne r3, r3, r4, lspush #8
- movne r4, r4, lspull #24
- orrne r4, r4, r7, lspush #8
- stmneia r0!, {r3 - r4} @ Shouldnt fault
- tst ip, #4
- movne r3, r7, lspull #24
- ldrne r7, [r1], #4
- orrne r3, r3, r7, lspush #8
- TUSER( strne) r3, [r0], #4 @ Shouldnt fault
- ands ip, ip, #3
- beq .Lc2u_3fupi
-.Lc2u_3nowords: mov r3, r7, get_byte_3
- teq ip, #0
- beq .Lc2u_finished
- cmp ip, #2
-USER( TUSER( strb) r3, [r0], #1) @ May fault
- ldrgeb r3, [r1], #1
-USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- ldrgtb r3, [r1], #0
-USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- b .Lc2u_finished
-ENDPROC(__copy_to_user)
-
- .pushsection .fixup,"ax"
- .align 0
-9001: ldmfd sp!, {r0, r4 - r7, pc}
- .popsection
-
-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
- * Purpose : copy a block from user memory to kernel memory
- * Params : to - kernel memory
- * : from - user memory
- * : n - number of bytes to copy
- * Returns : Number of bytes NOT copied.
- */
-.Lcfu_dest_not_aligned:
- rsb ip, ip, #4
- cmp ip, #2
-USER( TUSER( ldrb) r3, [r1], #1) @ May fault
- strb r3, [r0], #1
-USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
- strgeb r3, [r0], #1
-USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
- strgtb r3, [r0], #1
- sub r2, r2, ip
- b .Lcfu_dest_aligned
-
-ENTRY(__copy_from_user)
- stmfd sp!, {r0, r2, r4 - r7, lr}
- cmp r2, #4
- blt .Lcfu_not_enough
- ands ip, r0, #3
- bne .Lcfu_dest_not_aligned
-.Lcfu_dest_aligned:
- ands ip, r1, #3
- bne .Lcfu_src_not_aligned
-
-/*
- * Seeing as there has to be at least 8 bytes to copy, we can
- * copy one word, and force a user-mode page fault...
- */
-
-.Lcfu_0fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lcfu_0nowords
-USER( TUSER( ldr) r3, [r1], #4)
- str r3, [r0], #4
- mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
- beq .Lcfu_0fupi
-/*
- * ip = max no. of bytes to copy before needing another "strt" insn
- */
- cmp r2, ip
- movlt ip, r2
- sub r2, r2, ip
- subs ip, ip, #32
- blt .Lcfu_0rem8lp
-
-.Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault
- stmia r0!, {r3 - r6}
- ldmia r1!, {r3 - r6} @ Shouldnt fault
- subs ip, ip, #32
- stmia r0!, {r3 - r6}
- bpl .Lcfu_0cpy8lp
-
-.Lcfu_0rem8lp: cmn ip, #16
- ldmgeia r1!, {r3 - r6} @ Shouldnt fault
- stmgeia r0!, {r3 - r6}
- tst ip, #8
- ldmneia r1!, {r3 - r4} @ Shouldnt fault
- stmneia r0!, {r3 - r4}
- tst ip, #4
- TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault
- strne r3, [r0], #4
- ands ip, ip, #3
- beq .Lcfu_0fupi
-.Lcfu_0nowords: teq ip, #0
- beq .Lcfu_finished
-.Lcfu_nowords: cmp ip, #2
-USER( TUSER( ldrb) r3, [r1], #1) @ May fault
- strb r3, [r0], #1
-USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
- strgeb r3, [r0], #1
-USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
- strgtb r3, [r0], #1
- b .Lcfu_finished
-
-.Lcfu_not_enough:
- movs ip, r2
- bne .Lcfu_nowords
-.Lcfu_finished: mov r0, #0
- add sp, sp, #8
- ldmfd sp!, {r4 - r7, pc}
-
-.Lcfu_src_not_aligned:
- bic r1, r1, #3
-USER( TUSER( ldr) r7, [r1], #4) @ May fault
- cmp ip, #2
- bgt .Lcfu_3fupi
- beq .Lcfu_2fupi
-.Lcfu_1fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lcfu_1nowords
- mov r3, r7, lspull #8
-USER( TUSER( ldr) r7, [r1], #4) @ May fault
- orr r3, r3, r7, lspush #24
- str r3, [r0], #4
- mov ip, r1, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
- beq .Lcfu_1fupi
- cmp r2, ip
- movlt ip, r2
- sub r2, r2, ip
- subs ip, ip, #16
- blt .Lcfu_1rem8lp
-
-.Lcfu_1cpy8lp: mov r3, r7, lspull #8
- ldmia r1!, {r4 - r7} @ Shouldnt fault
- subs ip, ip, #16
- orr r3, r3, r4, lspush #24
- mov r4, r4, lspull #8
- orr r4, r4, r5, lspush #24
- mov r5, r5, lspull #8
- orr r5, r5, r6, lspush #24
- mov r6, r6, lspull #8
- orr r6, r6, r7, lspush #24
- stmia r0!, {r3 - r6}
- bpl .Lcfu_1cpy8lp
-
-.Lcfu_1rem8lp: tst ip, #8
- movne r3, r7, lspull #8
- ldmneia r1!, {r4, r7} @ Shouldnt fault
- orrne r3, r3, r4, lspush #24
- movne r4, r4, lspull #8
- orrne r4, r4, r7, lspush #24
- stmneia r0!, {r3 - r4}
- tst ip, #4
- movne r3, r7, lspull #8
-USER( TUSER( ldrne) r7, [r1], #4) @ May fault
- orrne r3, r3, r7, lspush #24
- strne r3, [r0], #4
- ands ip, ip, #3
- beq .Lcfu_1fupi
-.Lcfu_1nowords: mov r3, r7, get_byte_1
- teq ip, #0
- beq .Lcfu_finished
- cmp ip, #2
- strb r3, [r0], #1
- movge r3, r7, get_byte_2
- strgeb r3, [r0], #1
- movgt r3, r7, get_byte_3
- strgtb r3, [r0], #1
- b .Lcfu_finished
-
-.Lcfu_2fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lcfu_2nowords
- mov r3, r7, lspull #16
-USER( TUSER( ldr) r7, [r1], #4) @ May fault
- orr r3, r3, r7, lspush #16
- str r3, [r0], #4
- mov ip, r1, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
- beq .Lcfu_2fupi
- cmp r2, ip
- movlt ip, r2
- sub r2, r2, ip
- subs ip, ip, #16
- blt .Lcfu_2rem8lp
-
-
-.Lcfu_2cpy8lp: mov r3, r7, lspull #16
- ldmia r1!, {r4 - r7} @ Shouldnt fault
- subs ip, ip, #16
- orr r3, r3, r4, lspush #16
- mov r4, r4, lspull #16
- orr r4, r4, r5, lspush #16
- mov r5, r5, lspull #16
- orr r5, r5, r6, lspush #16
- mov r6, r6, lspull #16
- orr r6, r6, r7, lspush #16
- stmia r0!, {r3 - r6}
- bpl .Lcfu_2cpy8lp
-
-.Lcfu_2rem8lp: tst ip, #8
- movne r3, r7, lspull #16
- ldmneia r1!, {r4, r7} @ Shouldnt fault
- orrne r3, r3, r4, lspush #16
- movne r4, r4, lspull #16
- orrne r4, r4, r7, lspush #16
- stmneia r0!, {r3 - r4}
- tst ip, #4
- movne r3, r7, lspull #16
-USER( TUSER( ldrne) r7, [r1], #4) @ May fault
- orrne r3, r3, r7, lspush #16
- strne r3, [r0], #4
- ands ip, ip, #3
- beq .Lcfu_2fupi
-.Lcfu_2nowords: mov r3, r7, get_byte_2
- teq ip, #0
- beq .Lcfu_finished
- cmp ip, #2
- strb r3, [r0], #1
- movge r3, r7, get_byte_3
- strgeb r3, [r0], #1
-USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault
- strgtb r3, [r0], #1
- b .Lcfu_finished
-
-.Lcfu_3fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lcfu_3nowords
- mov r3, r7, lspull #24
-USER( TUSER( ldr) r7, [r1], #4) @ May fault
- orr r3, r3, r7, lspush #8
- str r3, [r0], #4
- mov ip, r1, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
- beq .Lcfu_3fupi
- cmp r2, ip
- movlt ip, r2
- sub r2, r2, ip
- subs ip, ip, #16
- blt .Lcfu_3rem8lp
-
-.Lcfu_3cpy8lp: mov r3, r7, lspull #24
- ldmia r1!, {r4 - r7} @ Shouldnt fault
- orr r3, r3, r4, lspush #8
- mov r4, r4, lspull #24
- orr r4, r4, r5, lspush #8
- mov r5, r5, lspull #24
- orr r5, r5, r6, lspush #8
- mov r6, r6, lspull #24
- orr r6, r6, r7, lspush #8
- stmia r0!, {r3 - r6}
- subs ip, ip, #16
- bpl .Lcfu_3cpy8lp
-
-.Lcfu_3rem8lp: tst ip, #8
- movne r3, r7, lspull #24
- ldmneia r1!, {r4, r7} @ Shouldnt fault
- orrne r3, r3, r4, lspush #8
- movne r4, r4, lspull #24
- orrne r4, r4, r7, lspush #8
- stmneia r0!, {r3 - r4}
- tst ip, #4
- movne r3, r7, lspull #24
-USER( TUSER( ldrne) r7, [r1], #4) @ May fault
- orrne r3, r3, r7, lspush #8
- strne r3, [r0], #4
- ands ip, ip, #3
- beq .Lcfu_3fupi
-.Lcfu_3nowords: mov r3, r7, get_byte_3
- teq ip, #0
- beq .Lcfu_finished
- cmp ip, #2
- strb r3, [r0], #1
-USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
- strgeb r3, [r0], #1
-USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
- strgtb r3, [r0], #1
- b .Lcfu_finished
-ENDPROC(__copy_from_user)
-
- .pushsection .fixup,"ax"
- .align 0
- /*
- * We took an exception. r0 contains a pointer to
- * the byte not copied.
- */
-9001: ldr r2, [sp], #4 @ void *to
- sub r2, r0, r2 @ bytes copied
- ldr r1, [sp], #4 @ unsigned long count
- subs r4, r1, r2 @ bytes left to copy
- movne r1, r4
- blne __memzero
- mov r0, r4
- ldmfd sp!, {r4 - r7, pc}
- .popsection
-
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 45d6bd09e6ef..c622c306c390 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -30,18 +30,15 @@ struct cns3xxx_pcie {
unsigned int irqs[2];
struct resource res_io;
struct resource res_mem;
- struct hw_pci hw_pci;
-
+ int port;
bool linked;
};
-static struct cns3xxx_pcie cns3xxx_pcie[]; /* forward decl. */
-
static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
{
struct pci_sys_data *root = sysdata;
- return &cns3xxx_pcie[root->domain];
+ return root->private_data;
}
static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
@@ -54,8 +51,8 @@ static struct cns3xxx_pcie *pbus_to_cnspci(struct pci_bus *bus)
return sysdata_to_cnspci(bus->sysdata);
}
-static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus,
- unsigned int devfn, int where)
+static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct cns3xxx_pcie *cnspci = pbus_to_cnspci(bus);
int busno = bus->number;
@@ -91,55 +88,22 @@ static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus,
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- u32 v;
- void __iomem *base;
+ int ret;
u32 mask = (0x1ull << (size * 8)) - 1;
int shift = (where % 4) * 8;
- base = cns3xxx_pci_cfg_base(bus, devfn, where);
- if (!base) {
- *val = 0xffffffff;
- return PCIBIOS_SUCCESSFUL;
- }
-
- v = __raw_readl(base);
+ ret = pci_generic_config_read32(bus, devfn, where, size, val);
- if (bus->number == 0 && devfn == 0 &&
- (where & 0xffc) == PCI_CLASS_REVISION) {
+ if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
+ (where & 0xffc) == PCI_CLASS_REVISION)
/*
* RC's class is 0xb, but Linux PCI driver needs 0x604
* for a PCIe bridge. So we must fixup the class code
* to 0x604 here.
*/
- v &= 0xff;
- v |= 0x604 << 16;
- }
+ *val = ((((*val << shift) & 0xff) | (0x604 << 16)) >> shift) & mask;
- *val = (v >> shift) & mask;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int cns3xxx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 val)
-{
- u32 v;
- void __iomem *base;
- u32 mask = (0x1ull << (size * 8)) - 1;
- int shift = (where % 4) * 8;
-
- base = cns3xxx_pci_cfg_base(bus, devfn, where);
- if (!base)
- return PCIBIOS_SUCCESSFUL;
-
- v = __raw_readl(base);
-
- v &= ~(mask << shift);
- v |= (val & mask) << shift;
-
- __raw_writel(v, base);
-
- return PCIBIOS_SUCCESSFUL;
+ return ret;
}
static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys)
@@ -158,8 +122,9 @@ static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys)
}
static struct pci_ops cns3xxx_pcie_ops = {
+ .map_bus = cns3xxx_pci_map_bus,
.read = cns3xxx_pci_read_config,
- .write = cns3xxx_pci_write_config,
+ .write = pci_generic_config_write,
};
static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -192,13 +157,7 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
.flags = IORESOURCE_MEM,
},
.irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
- .hw_pci = {
- .domain = 0,
- .nr_controllers = 1,
- .ops = &cns3xxx_pcie_ops,
- .setup = cns3xxx_pci_setup,
- .map_irq = cns3xxx_pcie_map_irq,
- },
+ .port = 0,
},
[1] = {
.host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT,
@@ -217,19 +176,13 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
.flags = IORESOURCE_MEM,
},
.irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
- .hw_pci = {
- .domain = 1,
- .nr_controllers = 1,
- .ops = &cns3xxx_pcie_ops,
- .setup = cns3xxx_pci_setup,
- .map_irq = cns3xxx_pcie_map_irq,
- },
+ .port = 1,
},
};
static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
{
- int port = cnspci->hw_pci.domain;
+ int port = cnspci->port;
u32 reg;
unsigned long time;
@@ -260,9 +213,9 @@ static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
{
- int port = cnspci->hw_pci.domain;
+ int port = cnspci->port;
struct pci_sys_data sd = {
- .domain = port,
+ .private_data = cnspci,
};
struct pci_bus bus = {
.number = 0,
@@ -323,6 +276,14 @@ static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
void __init cns3xxx_pcie_init_late(void)
{
int i;
+ void *private_data;
+ struct hw_pci hw_pci = {
+ .nr_controllers = 1,
+ .ops = &cns3xxx_pcie_ops,
+ .setup = cns3xxx_pci_setup,
+ .map_irq = cns3xxx_pcie_map_irq,
+ .private_data = &private_data,
+ };
pcibios_min_io = 0;
pcibios_min_mem = 0;
@@ -335,7 +296,8 @@ void __init cns3xxx_pcie_init_late(void)
cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
- pci_common_init(&cns3xxx_pcie[i].hw_pci);
+ private_data = &cns3xxx_pcie[i];
+ pci_common_init(&hw_pci);
}
pci_assign_unassigned_resources();
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 2204239ed243..2e3464b8bab4 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_MACH_SFFSDR) += board-sffsdr.o
obj-$(CONFIG_MACH_NEUROS_OSD2) += board-neuros-osd2.o
obj-$(CONFIG_MACH_DAVINCI_DM355_EVM) += board-dm355-evm.o
obj-$(CONFIG_MACH_DM355_LEOPARD) += board-dm355-leopard.o
-obj-$(CONFIG_MACH_DAVINCI_DM6467_EVM) += board-dm646x-evm.o cdce949.o
+obj-$(CONFIG_MACH_DAVINCI_DM6467_EVM) += board-dm646x-evm.o
obj-$(CONFIG_MACH_DAVINCI_DM365_EVM) += board-dm365-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA830_EVM) += board-da830-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA850_EVM) += board-da850-evm.o
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index ae129bc49273..846a84ddc28e 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -45,7 +45,6 @@
#include <mach/irqs.h>
#include <mach/serial.h>
#include <mach/clock.h>
-#include <mach/cdce949.h>
#include "davinci.h"
#include "clock.h"
@@ -399,9 +398,6 @@ static struct i2c_board_info __initdata i2c_info[] = {
{
I2C_BOARD_INFO("cpld_video", 0x3b),
},
- {
- I2C_BOARD_INFO("cdce949", 0x6c),
- },
};
static struct davinci_i2c_platform_data i2c_pdata = {
@@ -715,31 +711,6 @@ static void __init evm_init_i2c(void)
evm_init_video();
}
-#define CDCE949_XIN_RATE 27000000
-
-/* CDCE949 support - "lpsc" field is overridden to work as clock number */
-static struct clk cdce_clk_in = {
- .name = "cdce_xin",
- .rate = CDCE949_XIN_RATE,
-};
-
-static struct clk_lookup cdce_clks[] = {
- CLK(NULL, "xin", &cdce_clk_in),
- CLK(NULL, NULL, NULL),
-};
-
-static void __init cdce_clk_init(void)
-{
- struct clk_lookup *c;
- struct clk *clk;
-
- for (c = cdce_clks; c->clk; c++) {
- clk = c->clk;
- clkdev_add(c);
- clk_register(clk);
- }
-}
-
#define DM6467T_EVM_REF_FREQ 33000000
static void __init davinci_map_io(void)
@@ -748,8 +719,6 @@ static void __init davinci_map_io(void)
if (machine_is_davinci_dm6467tevm())
davinci_set_refclk_rate(DM6467T_EVM_REF_FREQ);
-
- cdce_clk_init();
}
#define DM646X_EVM_PHY_ID "davinci_mdio-0:01"
diff --git a/arch/arm/mach-davinci/cdce949.c b/arch/arm/mach-davinci/cdce949.c
deleted file mode 100644
index abafb92031c0..000000000000
--- a/arch/arm/mach-davinci/cdce949.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * TI CDCE949 clock synthesizer driver
- *
- * Note: This implementation assumes an input of 27MHz to the CDCE.
- * This is by no means constrained by CDCE hardware although the datasheet
- * does use this as an example for all illustrations and more importantly:
- * that is the crystal input on boards it is currently used on.
- *
- * Copyright (C) 2009 Texas Instruments Incorporated. http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-
-#include <mach/clock.h>
-#include <mach/cdce949.h>
-
-#include "clock.h"
-
-static struct i2c_client *cdce_i2c_client;
-static DEFINE_MUTEX(cdce_mutex);
-
-/* CDCE register descriptor */
-struct cdce_reg {
- u8 addr;
- u8 val;
-};
-
-/* Per-Output (Y1, Y2 etc.) frequency descriptor */
-struct cdce_freq {
- /* Frequency in KHz */
- unsigned long frequency;
- /*
- * List of registers to program to obtain a particular frequency.
- * 0x0 in register address and value is the end of list marker.
- */
- struct cdce_reg *reglist;
-};
-
-#define CDCE_FREQ_TABLE_ENTRY(line, out) \
-{ \
- .reglist = cdce_y ##line## _ ##out, \
- .frequency = out, \
-}
-
-/* List of CDCE outputs */
-struct cdce_output {
- /* List of frequencies on this output */
- struct cdce_freq *freq_table;
- /* Number of possible frequencies */
- int size;
-};
-
-/*
- * Finding out the values to program into CDCE949 registers for a particular
- * frequency output is not a simple calculation. Have a look at the datasheet
- * for the details. There is desktop software available to help users with
- * the calculations. Here, we just depend on the output of that software
- * (or hand calculations) instead trying to runtime calculate the register
- * values and inflicting misery on ourselves.
- */
-static struct cdce_reg cdce_y1_148500[] = {
- { 0x13, 0x00 },
- /* program PLL1_0 multiplier */
- { 0x18, 0xaf },
- { 0x19, 0x50 },
- { 0x1a, 0x02 },
- { 0x1b, 0xc9 },
- /* program PLL1_11 multiplier */
- { 0x1c, 0x00 },
- { 0x1d, 0x40 },
- { 0x1e, 0x02 },
- { 0x1f, 0xc9 },
- /* output state selection */
- { 0x15, 0x00 },
- { 0x14, 0xef },
- /* switch MUX to PLL1 output */
- { 0x14, 0x6f },
- { 0x16, 0x06 },
- /* set P2DIV divider, P3DIV and input crystal */
- { 0x17, 0x06 },
- { 0x01, 0x00 },
- { 0x05, 0x48 },
- { 0x02, 0x80 },
- /* enable and disable PLL */
- { 0x02, 0xbc },
- { 0x03, 0x01 },
- { },
-};
-
-static struct cdce_reg cdce_y1_74250[] = {
- { 0x13, 0x00 },
- { 0x18, 0xaf },
- { 0x19, 0x50 },
- { 0x1a, 0x02 },
- { 0x1b, 0xc9 },
- { 0x1c, 0x00 },
- { 0x1d, 0x40 },
- { 0x1e, 0x02 },
- { 0x1f, 0xc9 },
- /* output state selection */
- { 0x15, 0x00 },
- { 0x14, 0xef },
- /* switch MUX to PLL1 output */
- { 0x14, 0x6f },
- { 0x16, 0x06 },
- /* set P2DIV divider, P3DIV and input crystal */
- { 0x17, 0x06 },
- { 0x01, 0x00 },
- { 0x05, 0x48 },
- { 0x02, 0x80 },
- /* enable and disable PLL */
- { 0x02, 0xbc },
- { 0x03, 0x02 },
- { },
-};
-
-static struct cdce_reg cdce_y1_27000[] = {
- { 0x13, 0x00 },
- { 0x18, 0x00 },
- { 0x19, 0x40 },
- { 0x1a, 0x02 },
- { 0x1b, 0x08 },
- { 0x1c, 0x00 },
- { 0x1d, 0x40 },
- { 0x1e, 0x02 },
- { 0x1f, 0x08 },
- { 0x15, 0x02 },
- { 0x14, 0xed },
- { 0x16, 0x01 },
- { 0x17, 0x01 },
- { 0x01, 0x00 },
- { 0x05, 0x50 },
- { 0x02, 0xb4 },
- { 0x03, 0x01 },
- { },
-};
-
-static struct cdce_freq cdce_y1_freqs[] = {
- CDCE_FREQ_TABLE_ENTRY(1, 148500),
- CDCE_FREQ_TABLE_ENTRY(1, 74250),
- CDCE_FREQ_TABLE_ENTRY(1, 27000),
-};
-
-static struct cdce_reg cdce_y5_13500[] = {
- { 0x27, 0x08 },
- { 0x28, 0x00 },
- { 0x29, 0x40 },
- { 0x2a, 0x02 },
- { 0x2b, 0x08 },
- { 0x24, 0x6f },
- { },
-};
-
-static struct cdce_reg cdce_y5_16875[] = {
- { 0x27, 0x08 },
- { 0x28, 0x9f },
- { 0x29, 0xb0 },
- { 0x2a, 0x02 },
- { 0x2b, 0x89 },
- { 0x24, 0x6f },
- { },
-};
-
-static struct cdce_reg cdce_y5_27000[] = {
- { 0x27, 0x04 },
- { 0x28, 0x00 },
- { 0x29, 0x40 },
- { 0x2a, 0x02 },
- { 0x2b, 0x08 },
- { 0x24, 0x6f },
- { },
-};
-static struct cdce_reg cdce_y5_54000[] = {
- { 0x27, 0x04 },
- { 0x28, 0xff },
- { 0x29, 0x80 },
- { 0x2a, 0x02 },
- { 0x2b, 0x07 },
- { 0x24, 0x6f },
- { },
-};
-
-static struct cdce_reg cdce_y5_81000[] = {
- { 0x27, 0x02 },
- { 0x28, 0xbf },
- { 0x29, 0xa0 },
- { 0x2a, 0x03 },
- { 0x2b, 0x0a },
- { 0x24, 0x6f },
- { },
-};
-
-static struct cdce_freq cdce_y5_freqs[] = {
- CDCE_FREQ_TABLE_ENTRY(5, 13500),
- CDCE_FREQ_TABLE_ENTRY(5, 16875),
- CDCE_FREQ_TABLE_ENTRY(5, 27000),
- CDCE_FREQ_TABLE_ENTRY(5, 54000),
- CDCE_FREQ_TABLE_ENTRY(5, 81000),
-};
-
-
-static struct cdce_output output_list[] = {
- [1] = { cdce_y1_freqs, ARRAY_SIZE(cdce_y1_freqs) },
- [5] = { cdce_y5_freqs, ARRAY_SIZE(cdce_y5_freqs) },
-};
-
-int cdce_set_rate(struct clk *clk, unsigned long rate)
-{
- int i, ret = 0;
- struct cdce_freq *freq_table = output_list[clk->lpsc].freq_table;
- struct cdce_reg *regs = NULL;
-
- if (!cdce_i2c_client)
- return -ENODEV;
-
- if (!freq_table)
- return -EINVAL;
-
- for (i = 0; i < output_list[clk->lpsc].size; i++) {
- if (freq_table[i].frequency == rate / 1000) {
- regs = freq_table[i].reglist;
- break;
- }
- }
-
- if (!regs)
- return -EINVAL;
-
- mutex_lock(&cdce_mutex);
- for (i = 0; regs[i].addr; i++) {
- ret = i2c_smbus_write_byte_data(cdce_i2c_client,
- regs[i].addr | 0x80, regs[i].val);
- if (ret)
- break;
- }
- mutex_unlock(&cdce_mutex);
-
- if (!ret)
- clk->rate = rate;
-
- return ret;
-}
-
-static int cdce_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- cdce_i2c_client = client;
- return 0;
-}
-
-static int cdce_remove(struct i2c_client *client)
-{
- cdce_i2c_client = NULL;
- return 0;
-}
-
-static const struct i2c_device_id cdce_id[] = {
- {"cdce949", 0},
- {},
-};
-MODULE_DEVICE_TABLE(i2c, cdce_id);
-
-static struct i2c_driver cdce_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "cdce949",
- },
- .probe = cdce_probe,
- .remove = cdce_remove,
- .id_table = cdce_id,
-};
-
-static int __init cdce_init(void)
-{
- return i2c_add_driver(&cdce_driver);
-}
-subsys_initcall(cdce_init);
-
-static void __exit cdce_exit(void)
-{
- i2c_del_driver(&cdce_driver);
-}
-module_exit(cdce_exit);
-
-MODULE_AUTHOR("Texas Instruments");
-MODULE_DESCRIPTION("CDCE949 clock synthesizer driver");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-davinci/include/mach/cdce949.h b/arch/arm/mach-davinci/include/mach/cdce949.h
deleted file mode 100644
index c73331fae341..000000000000
--- a/arch/arm/mach-davinci/include/mach/cdce949.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * TI CDCE949 off-chip clock synthesizer support
- *
- * 2009 (C) Texas Instruments, Inc. http://www.ti.com/
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef _MACH_DAVINCI_CDCE949_H
-#define _MACH_DAVINCI_CDCE949_H
-
-#include <linux/clk.h>
-
-#include <mach/clock.h>
-
-int cdce_set_rate(struct clk *clk, unsigned long rate);
-
-#endif
diff --git a/arch/arm/mach-davinci/serial.c b/arch/arm/mach-davinci/serial.c
index 5e93a734c858..951b620bfa73 100644
--- a/arch/arm/mach-davinci/serial.c
+++ b/arch/arm/mach-davinci/serial.c
@@ -31,16 +31,6 @@
#include <mach/serial.h>
#include <mach/cputype.h>
-static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
- int offset)
-{
- offset <<= up->regshift;
-
- WARN_ONCE(!up->membase, "unmapped read: uart[%d]\n", offset);
-
- return (unsigned int)__raw_readl(up->membase + offset);
-}
-
static inline void serial_write_reg(struct plat_serial8250_port *p, int offset,
int value)
{
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index b343a1a626d5..78eca99b98d1 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -27,20 +27,16 @@
#include <asm/mach/map.h>
#include <asm/memory.h>
+#include <mach/map.h>
+
#include "common.h"
#include "mfc.h"
#include "regs-pmu.h"
-#include "regs-sys.h"
void __iomem *pmu_base_addr;
static struct map_desc exynos4_iodesc[] __initdata = {
{
- .virtual = (unsigned long)S3C_VA_SYS,
- .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S5P_VA_SROMC,
.pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
.length = SZ_4K,
@@ -70,11 +66,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
static struct map_desc exynos5_iodesc[] __initdata = {
{
- .virtual = (unsigned long)S3C_VA_SYS,
- .pfn = __phys_to_pfn(EXYNOS5_PA_SYSCON),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S5P_VA_SROMC,
.pfn = __phys_to_pfn(EXYNOS5_PA_SROMC),
.length = SZ_4K,
@@ -213,32 +204,6 @@ static void __init exynos_init_irq(void)
static void __init exynos_dt_machine_init(void)
{
- struct device_node *i2c_np;
- const char *i2c_compat = "samsung,s3c2440-i2c";
- unsigned int tmp;
- int id;
-
- /*
- * Exynos5's legacy i2c controller and new high speed i2c
- * controller have muxed interrupt sources. By default the
- * interrupts for 4-channel HS-I2C controller are enabled.
- * If node for first four channels of legacy i2c controller
- * are available then re-configure the interrupts via the
- * system register.
- */
- if (soc_is_exynos5()) {
- for_each_compatible_node(i2c_np, NULL, i2c_compat) {
- if (of_device_is_available(i2c_np)) {
- id = of_alias_get_id(i2c_np, "i2c");
- if (id < 4) {
- tmp = readl(EXYNOS5_SYS_I2C_CFG);
- writel(tmp & ~(0x1 << id),
- EXYNOS5_SYS_I2C_CFG);
- }
- }
- }
- }
-
/*
* This is called from smp_prepare_cpus if we've built for SMP, but
* we still need to set it up for PM and firmware ops if not.
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 766f57d2f029..4791a3cc00f9 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -17,6 +17,7 @@
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
#include <asm/suspend.h>
#include <mach/map.h>
@@ -136,6 +137,43 @@ static const struct firmware_ops exynos_firmware_ops = {
.resume = IS_ENABLED(CONFIG_EXYNOS_CPU_SUSPEND) ? exynos_resume : NULL,
};
+static void exynos_l2_write_sec(unsigned long val, unsigned reg)
+{
+ static int l2cache_enabled;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (val & L2X0_CTRL_EN) {
+ /*
+ * Before the cache can be enabled, due to firmware
+ * design, SMC_CMD_L2X0INVALL must be called.
+ */
+ if (!l2cache_enabled) {
+ exynos_smc(SMC_CMD_L2X0INVALL, 0, 0, 0);
+ l2cache_enabled = 1;
+ }
+ } else {
+ l2cache_enabled = 0;
+ }
+ exynos_smc(SMC_CMD_L2X0CTRL, val, 0, 0);
+ break;
+
+ case L2X0_DEBUG_CTRL:
+ exynos_smc(SMC_CMD_L2X0DEBUG, val, 0, 0);
+ break;
+
+ default:
+ WARN_ONCE(1, "%s: ignoring write to reg 0x%x\n", __func__, reg);
+ }
+}
+
+static void exynos_l2_configure(const struct l2x0_regs *regs)
+{
+ exynos_smc(SMC_CMD_L2X0SETUP1, regs->tag_latency, regs->data_latency,
+ regs->prefetch_ctrl);
+ exynos_smc(SMC_CMD_L2X0SETUP2, regs->pwr_ctrl, regs->aux_ctrl, 0);
+}
+
void __init exynos_firmware_init(void)
{
struct device_node *nd;
@@ -155,4 +193,16 @@ void __init exynos_firmware_init(void)
pr_info("Running under secure firmware.\n");
register_firmware_ops(&exynos_firmware_ops);
+
+ /*
+ * Exynos 4 SoCs (based on Cortex A9 and equipped with L2C-310),
+ * running under secure firmware, require certain registers of L2
+ * cache controller to be written in secure mode. Here .write_sec
+ * callback is provided to perform necessary SMC calls.
+ */
+ if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
+ read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
+ outer_cache.write_sec = exynos_l2_write_sec;
+ outer_cache.configure = exynos_l2_configure;
+ }
}
diff --git a/arch/arm/mach-exynos/include/mach/dma.h b/arch/arm/mach-exynos/include/mach/dma.h
deleted file mode 100644
index 201842a3769e..000000000000
--- a/arch/arm/mach-exynos/include/mach/dma.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MACH_DMA_H
-#define __MACH_DMA_H
-
-/* This platform uses the common DMA API driver for PL330 */
-#include <plat/dma-pl330.h>
-
-#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index 1ad3f496ef56..de3ae59e1cfb 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -24,9 +24,6 @@
#define EXYNOS_PA_CHIPID 0x10000000
-#define EXYNOS4_PA_SYSCON 0x10010000
-#define EXYNOS5_PA_SYSCON 0x10050100
-
#define EXYNOS4_PA_CMU 0x10030000
#define EXYNOS5_PA_CMU 0x10010000
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 86f3ecd88f78..dfc8594e5b1f 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -23,12 +23,13 @@
#include <asm/smp_scu.h>
#include <asm/suspend.h>
+#include <mach/map.h>
+
#include <plat/pm-common.h>
#include "common.h"
#include "exynos-pmu.h"
#include "regs-pmu.h"
-#include "regs-sys.h"
static inline void __iomem *exynos_boot_vector_addr(void)
{
diff --git a/arch/arm/mach-exynos/regs-sys.h b/arch/arm/mach-exynos/regs-sys.h
deleted file mode 100644
index 84332b0dd7a6..000000000000
--- a/arch/arm/mach-exynos/regs-sys.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - system register definition
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_SYS_H
-#define __ASM_ARCH_REGS_SYS_H __FILE__
-
-#include <mach/map.h>
-
-#define S5P_SYSREG(x) (S3C_VA_SYS + (x))
-
-/* For EXYNOS5 */
-#define EXYNOS5_SYS_I2C_CFG S5P_SYSREG(0x0234)
-
-#endif /* __ASM_ARCH_REGS_SYS_H */
diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
index e3c373082bbe..31d25834b9c4 100644
--- a/arch/arm/mach-exynos/sleep.S
+++ b/arch/arm/mach-exynos/sleep.S
@@ -16,6 +16,8 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/hardware/cache-l2x0.h>
#include "smc.h"
#define CPU_MASK 0xff0ffff0
@@ -74,6 +76,45 @@ ENTRY(exynos_cpu_resume_ns)
mov r0, #SMC_CMD_C15RESUME
dsb
smc #0
+#ifdef CONFIG_CACHE_L2X0
+ adr r0, 1f
+ ldr r2, [r0]
+ add r0, r2, r0
+
+ /* Check that the address has been initialised. */
+ ldr r1, [r0, #L2X0_R_PHY_BASE]
+ teq r1, #0
+ beq skip_l2x0
+
+ /* Check if controller has been enabled. */
+ ldr r2, [r1, #L2X0_CTRL]
+ tst r2, #0x1
+ bne skip_l2x0
+
+ ldr r1, [r0, #L2X0_R_TAG_LATENCY]
+ ldr r2, [r0, #L2X0_R_DATA_LATENCY]
+ ldr r3, [r0, #L2X0_R_PREFETCH_CTRL]
+ mov r0, #SMC_CMD_L2X0SETUP1
+ smc #0
+
+ /* Reload saved regs pointer because smc corrupts registers. */
+ adr r0, 1f
+ ldr r2, [r0]
+ add r0, r2, r0
+
+ ldr r1, [r0, #L2X0_R_PWR_CTRL]
+ ldr r2, [r0, #L2X0_R_AUX_CTRL]
+ mov r0, #SMC_CMD_L2X0SETUP2
+ smc #0
+
+ mov r0, #SMC_CMD_L2X0INVALL
+ smc #0
+
+ mov r1, #1
+ mov r0, #SMC_CMD_L2X0CTRL
+ smc #0
+skip_l2x0:
+#endif /* CONFIG_CACHE_L2X0 */
skip_cp15:
b cpu_resume
ENDPROC(exynos_cpu_resume_ns)
@@ -83,3 +124,8 @@ cp15_save_diag:
.globl cp15_save_power
cp15_save_power:
.long 0 @ cp15 power control
+
+#ifdef CONFIG_CACHE_L2X0
+ .align
+1: .long l2x0_saved_regs - .
+#endif /* CONFIG_CACHE_L2X0 */
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index d6feef31b468..82e6b6fba23f 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -34,7 +34,6 @@
#include "common.h"
#include "regs-pmu.h"
-#include "regs-sys.h"
#include "exynos-pmu.h"
#define S5P_CHECK_SLEEP 0x00000BAD
@@ -53,10 +52,6 @@ struct exynos_wkup_irq {
u32 mask;
};
-static struct sleep_save exynos5_sys_save[] = {
- SAVE_ITEM(EXYNOS5_SYS_I2C_CFG),
-};
-
static struct sleep_save exynos_core_save[] = {
/* SROM side */
SAVE_ITEM(S5P_SROM_BW),
@@ -571,8 +566,6 @@ static const struct exynos_pm_data exynos5250_pm_data = {
.wkup_irq = exynos5250_wkup_irq,
.wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
.release_ret_regs = exynos_release_ret_regs,
- .extra_save = exynos5_sys_save,
- .num_extra_save = ARRAY_SIZE(exynos5_sys_save),
.pm_suspend = exynos_pm_suspend,
.pm_resume = exynos_pm_resume,
.pm_prepare = exynos_pm_prepare,
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 108b80bb048a..d04a430607b8 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -144,7 +144,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
post_div_table[1].div = 1;
post_div_table[2].div = 1;
video_div_table[1].div = 1;
- video_div_table[2].div = 1;
+ video_div_table[3].div = 1;
}
clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/arch/arm/mach-imx/clk-imx6sx.c b/arch/arm/mach-imx/clk-imx6sx.c
index 17354a11356f..5a3e5a159e70 100644
--- a/arch/arm/mach-imx/clk-imx6sx.c
+++ b/arch/arm/mach-imx/clk-imx6sx.c
@@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
+ clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+ clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+
/* Set initial power mode */
imx6q_set_lpm(WAIT_CLOCKED);
}
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index c186a17c2cff..2565f0e7b5cf 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -356,7 +356,6 @@ static u64 pre_mem_pci_sz;
* 7:2 register number
*
*/
-static DEFINE_RAW_SPINLOCK(v3_lock);
#undef V3_LB_BASE_PREFETCH
#define V3_LB_BASE_PREFETCH 0
@@ -457,67 +456,21 @@ static void v3_close_config_window(void)
static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
- void __iomem *addr;
- unsigned long flags;
- u32 v;
-
- raw_spin_lock_irqsave(&v3_lock, flags);
- addr = v3_open_config_window(bus, devfn, where);
-
- switch (size) {
- case 1:
- v = __raw_readb(addr);
- break;
-
- case 2:
- v = __raw_readw(addr);
- break;
-
- default:
- v = __raw_readl(addr);
- break;
- }
-
+ int ret = pci_generic_config_read(bus, devfn, where, size, val);
v3_close_config_window();
- raw_spin_unlock_irqrestore(&v3_lock, flags);
-
- *val = v;
- return PCIBIOS_SUCCESSFUL;
+ return ret;
}
static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
- void __iomem *addr;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&v3_lock, flags);
- addr = v3_open_config_window(bus, devfn, where);
-
- switch (size) {
- case 1:
- __raw_writeb((u8)val, addr);
- __raw_readb(addr);
- break;
-
- case 2:
- __raw_writew((u16)val, addr);
- __raw_readw(addr);
- break;
-
- case 4:
- __raw_writel(val, addr);
- __raw_readl(addr);
- break;
- }
-
+ int ret = pci_generic_config_write(bus, devfn, where, size, val);
v3_close_config_window();
- raw_spin_unlock_irqrestore(&v3_lock, flags);
-
- return PCIBIOS_SUCCESSFUL;
+ return ret;
}
static struct pci_ops pci_v3_ops = {
+ .map_bus = v3_open_config_window,
.read = v3_read_config,
.write = v3_write_config,
};
@@ -658,7 +611,6 @@ static int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
*/
static void __init pci_v3_preinit(void)
{
- unsigned long flags;
unsigned int temp;
phys_addr_t io_address = pci_pio_to_address(io_mem.start);
@@ -672,8 +624,6 @@ static void __init pci_v3_preinit(void)
hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
- raw_spin_lock_irqsave(&v3_lock, flags);
-
/*
* Unlock V3 registers, but only if they were previously locked.
*/
@@ -736,8 +686,6 @@ static void __init pci_v3_preinit(void)
v3_writew(V3_LB_CFG, v3_readw(V3_LB_CFG) | (1 << 10));
v3_writeb(V3_LB_IMASK, 0x28);
__raw_writel(3, ap_syscon_base + INTEGRATOR_SC_PCIENABLE_OFFSET);
-
- raw_spin_unlock_irqrestore(&v3_lock, flags);
}
static void __init pci_v3_postinit(void)
diff --git a/arch/arm/mach-ks8695/pci.c b/arch/arm/mach-ks8695/pci.c
index bb18193b4bac..c1bc4c3716ed 100644
--- a/arch/arm/mach-ks8695/pci.c
+++ b/arch/arm/mach-ks8695/pci.c
@@ -38,8 +38,6 @@
static int pci_dbg;
-static int pci_cfg_dbg;
-
static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsigned int where)
{
@@ -59,75 +57,11 @@ static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsi
}
}
-
-/*
- * The KS8695 datasheet prohibits anything other than 32bit accesses
- * to the IO registers, so all our configuration must be done with
- * 32bit operations, and the correct bit masking and shifting.
- */
-
-static int ks8695_pci_readconfig(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *value)
-{
- ks8695_pci_setupconfig(bus->number, devfn, where);
-
- *value = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
-
- switch (size) {
- case 4:
- break;
- case 2:
- *value = *value >> ((where & 2) * 8);
- *value &= 0xffff;
- break;
- case 1:
- *value = *value >> ((where & 3) * 8);
- *value &= 0xff;
- break;
- }
-
- if (pci_cfg_dbg) {
- printk("read: %d,%08x,%02x,%d: %08x (%08x)\n",
- bus->number, devfn, where, size, *value,
- __raw_readl(KS8695_PCI_VA + KS8695_PBCD));
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int ks8695_pci_writeconfig(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 value)
+static void __iomem *ks8695_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
{
- unsigned long tmp;
-
- if (pci_cfg_dbg) {
- printk("write: %d,%08x,%02x,%d: %08x\n",
- bus->number, devfn, where, size, value);
- }
-
ks8695_pci_setupconfig(bus->number, devfn, where);
-
- switch (size) {
- case 4:
- __raw_writel(value, KS8695_PCI_VA + KS8695_PBCD);
- break;
- case 2:
- tmp = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
- tmp &= ~(0xffff << ((where & 2) * 8));
- tmp |= value << ((where & 2) * 8);
-
- __raw_writel(tmp, KS8695_PCI_VA + KS8695_PBCD);
- break;
- case 1:
- tmp = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
- tmp &= ~(0xff << ((where & 3) * 8));
- tmp |= value << ((where & 3) * 8);
-
- __raw_writel(tmp, KS8695_PCI_VA + KS8695_PBCD);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
+ return KS8695_PCI_VA + KS8695_PBCD;
}
static void ks8695_local_writeconfig(int where, u32 value)
@@ -137,8 +71,9 @@ static void ks8695_local_writeconfig(int where, u32 value)
}
static struct pci_ops ks8695_pci_ops = {
- .read = ks8695_pci_readconfig,
- .write = ks8695_pci_writeconfig,
+ .map_bus = ks8695_pci_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
};
static struct resource pci_mem = {
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 440799ba664a..b5895f040caa 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -143,6 +143,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
/*
+ * We should switch the PL310 to I/O coherency mode only if
+ * I/O coherency is actually enabled.
+ */
+ if (!coherency_available())
+ return;
+
+ /*
* Add the PL310 property "arm,io-coherent". This makes sure the
* outer sync operation is not used, which allows to
* workaround the system erratum that causes deadlocks when
diff --git a/arch/arm/mach-omap1/include/mach/debug-macro.S b/arch/arm/mach-omap1/include/mach/debug-macro.S
deleted file mode 100644
index 5c1a26c9f490..000000000000
--- a/arch/arm/mach-omap1/include/mach/debug-macro.S
+++ /dev/null
@@ -1,101 +0,0 @@
-/* arch/arm/mach-omap1/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-#include <linux/serial_reg.h>
-
-#include "serial.h"
-
- .pushsection .data
-omap_uart_phys: .word 0x0
-omap_uart_virt: .word 0x0
- .popsection
-
- /*
- * Note that this code won't work if the bootloader passes
- * a wrong machine ID number in r1. To debug, just hardcode
- * the desired UART phys and virt addresses temporarily into
- * the omap_uart_phys and omap_uart_virt above.
- */
- .macro addruart, rp, rv, tmp
-
- /* Use omap_uart_phys/virt if already configured */
-9: adr \rp, 99f @ get effective addr of 99f
- ldr \rv, [\rp] @ get absolute addr of 99f
- sub \rv, \rv, \rp @ offset between the two
- ldr \rp, [\rp, #4] @ abs addr of omap_uart_phys
- sub \tmp, \rp, \rv @ make it effective
- ldr \rp, [\tmp, #0] @ omap_uart_phys
- ldr \rv, [\tmp, #4] @ omap_uart_virt
- cmp \rp, #0 @ is port configured?
- cmpne \rv, #0
- bne 100f @ already configured
-
- /* Check the debug UART configuration set in uncompress.h */
- and \rp, pc, #0xff000000
- ldr \rv, =OMAP_UART_INFO_OFS
- ldr \rp, [\rp, \rv]
-
- /* Select the UART to use based on the UART1 scratchpad value */
-10: cmp \rp, #0 @ no port configured?
- beq 11f @ if none, try to use UART1
- cmp \rp, #OMAP1UART1
- beq 11f @ configure OMAP1UART1
- cmp \rp, #OMAP1UART2
- beq 12f @ configure OMAP1UART2
- cmp \rp, #OMAP1UART3
- beq 13f @ configure OMAP2UART3
-
- /* Configure the UART offset from the phys/virt base */
-11: mov \rp, #0x00fb0000 @ OMAP1UART1
- b 98f
-12: mov \rp, #0x00fb0000 @ OMAP1UART1
- orr \rp, \rp, #0x00000800 @ OMAP1UART2
- b 98f
-13: mov \rp, #0x00fb0000 @ OMAP1UART1
- orr \rp, \rp, #0x00000800 @ OMAP1UART2
- orr \rp, \rp, #0x00009000 @ OMAP1UART3
-
- /* Store both phys and virt address for the uart */
-98: add \rp, \rp, #0xff000000 @ phys base
- str \rp, [\tmp, #0] @ omap_uart_phys
- sub \rp, \rp, #0xff000000 @ phys base
- add \rp, \rp, #0xfe000000 @ virt base
- str \rp, [\tmp, #4] @ omap_uart_virt
- b 9b
-
- .align
-99: .word .
- .word omap_uart_phys
- .ltorg
-
-100:
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx]
- .endm
-
- .macro busyuart,rd,rx
-1001: ldrb \rd, [\rx, #(UART_LSR << OMAP_PORT_SHIFT)]
- and \rd, \rd, #(UART_LSR_TEMT | UART_LSR_THRE)
- teq \rd, #(UART_LSR_TEMT | UART_LSR_THRE)
- beq 1002f
- ldrb \rd, [\rx, #(UART_LSR << OMAP7XX_PORT_SHIFT)]
- and \rd, \rd, #(UART_LSR_TEMT | UART_LSR_THRE)
- teq \rd, #(UART_LSR_TEMT | UART_LSR_THRE)
- bne 1001b
-1002:
- .endm
-
- .macro waituart,rd,rx
- .endm
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 122ef67939a2..a8a533df24e1 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -64,11 +64,6 @@ u32 omap_irq_flags;
static unsigned int irq_bank_count;
static struct omap_irq_bank *irq_banks;
-static inline unsigned int irq_bank_readl(int bank, int offset)
-{
- return omap_readl(irq_banks[bank].base_reg + offset);
-}
-
static inline void irq_bank_writel(unsigned long value, int bank, int offset)
{
omap_writel(value, irq_banks[bank].base_reg + offset);
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 107e7ab3edba..36bf174b3fac 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -91,11 +91,6 @@ static inline void omap_32k_timer_write(int val, int reg)
omap_writew(val, OMAP1_32K_TIMER_BASE + reg);
}
-static inline unsigned long omap_32k_timer_read(int reg)
-{
- return omap_readl(OMAP1_32K_TIMER_BASE + reg) & 0xffffff;
-}
-
static inline void omap_32k_timer_start(unsigned long load_val)
{
if (!load_val)
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 6ab656cc4f16..2b8e47788062 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -217,12 +217,6 @@ config MACH_OMAP3517EVM
bool "OMAP3517/ AM3517 EVM board"
depends on ARCH_OMAP3
default y
- select OMAP_PACKAGE_CBB
-
-config MACH_CRANEBOARD
- bool "AM3517/05 CRANE board"
- depends on ARCH_OMAP3
- select OMAP_PACKAGE_CBB
config MACH_OMAP3_PANDORA
bool "OMAP3 Pandora"
@@ -263,12 +257,6 @@ config MACH_CM_T35
select MACH_CM_T3730
select OMAP_PACKAGE_CUS
-config MACH_CM_T3517
- bool "CompuLab CM-T3517 module"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBB
-
config MACH_CM_T3730
bool
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 08ed2fe6366c..00d5d8f9f150 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -185,7 +185,6 @@ obj-$(CONFIG_SOC_DRA7XX) += clockdomains7xx_data.o
obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o
obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpllcore.o
obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_virt_prcm_set.o
-obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_apll.o
obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o clkt_iclk.o
obj-$(CONFIG_SOC_OMAP2430) += clock2430.o
obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o
@@ -255,13 +254,8 @@ obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o sdram-nokia.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-peripherals.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-video.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
-obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
-obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
-
-obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o
-
obj-$(CONFIG_MACH_SBC3530) += board-omap3stalker.o
# Platform specific device init code
@@ -291,7 +285,4 @@ ifneq ($(CONFIG_HWSPINLOCK_OMAP),)
obj-y += hwspinlock.o
endif
-emac-$(CONFIG_TI_DAVINCI_EMAC) := am35xx-emac.o
-obj-y += $(emac-m) $(emac-y)
-
obj-y += common-board-devices.o twl-common.o dss-common.o
diff --git a/arch/arm/mach-omap2/am35xx-emac.c b/arch/arm/mach-omap2/am35xx-emac.c
deleted file mode 100644
index 6a6935caac1e..000000000000
--- a/arch/arm/mach-omap2/am35xx-emac.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (C) 2011 Ilya Yanok, Emcraft Systems
- *
- * Based on mach-omap2/board-am3517evm.c
- * Copyright (C) 2009 Texas Instruments Incorporated
- * Author: Ranjith Lohithakshan <ranjithl@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/err.h>
-#include <linux/davinci_emac.h>
-#include "omap_device.h"
-#include "am35xx.h"
-#include "control.h"
-#include "am35xx-emac.h"
-
-static void am35xx_enable_emac_int(void)
-{
- u32 v;
-
- v = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
- v |= (AM35XX_CPGMAC_C0_RX_PULSE_CLR | AM35XX_CPGMAC_C0_TX_PULSE_CLR |
- AM35XX_CPGMAC_C0_MISC_PULSE_CLR | AM35XX_CPGMAC_C0_RX_THRESH_CLR);
- omap_ctrl_writel(v, AM35XX_CONTROL_LVL_INTR_CLEAR);
- omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); /* OCP barrier */
-}
-
-static void am35xx_disable_emac_int(void)
-{
- u32 v;
-
- v = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
- v |= (AM35XX_CPGMAC_C0_RX_PULSE_CLR | AM35XX_CPGMAC_C0_TX_PULSE_CLR);
- omap_ctrl_writel(v, AM35XX_CONTROL_LVL_INTR_CLEAR);
- omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); /* OCP barrier */
-}
-
-static struct emac_platform_data am35xx_emac_pdata = {
- .ctrl_reg_offset = AM35XX_EMAC_CNTRL_OFFSET,
- .ctrl_mod_reg_offset = AM35XX_EMAC_CNTRL_MOD_OFFSET,
- .ctrl_ram_offset = AM35XX_EMAC_CNTRL_RAM_OFFSET,
- .ctrl_ram_size = AM35XX_EMAC_CNTRL_RAM_SIZE,
- .hw_ram_addr = AM35XX_EMAC_HW_RAM_ADDR,
- .version = EMAC_VERSION_2,
- .interrupt_enable = am35xx_enable_emac_int,
- .interrupt_disable = am35xx_disable_emac_int,
-};
-
-static struct mdio_platform_data am35xx_mdio_pdata;
-
-static int __init omap_davinci_emac_dev_init(struct omap_hwmod *oh,
- void *pdata, int pdata_len)
-{
- struct platform_device *pdev;
-
- pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len);
- if (IS_ERR(pdev)) {
- WARN(1, "Can't build omap_device for %s:%s.\n",
- oh->class->name, oh->name);
- return PTR_ERR(pdev);
- }
-
- return 0;
-}
-
-void __init am35xx_emac_init(unsigned long mdio_bus_freq, u8 rmii_en)
-{
- struct omap_hwmod *oh;
- u32 v;
- int ret;
-
- oh = omap_hwmod_lookup("davinci_mdio");
- if (!oh) {
- pr_err("Could not find davinci_mdio hwmod\n");
- return;
- }
-
- am35xx_mdio_pdata.bus_freq = mdio_bus_freq;
-
- ret = omap_davinci_emac_dev_init(oh, &am35xx_mdio_pdata,
- sizeof(am35xx_mdio_pdata));
- if (ret) {
- pr_err("Could not build davinci_mdio hwmod device\n");
- return;
- }
-
- oh = omap_hwmod_lookup("davinci_emac");
- if (!oh) {
- pr_err("Could not find davinci_emac hwmod\n");
- return;
- }
-
- am35xx_emac_pdata.rmii_en = rmii_en;
-
- ret = omap_davinci_emac_dev_init(oh, &am35xx_emac_pdata,
- sizeof(am35xx_emac_pdata));
- if (ret) {
- pr_err("Could not build davinci_emac hwmod device\n");
- return;
- }
-
- v = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
- v &= ~AM35XX_CPGMACSS_SW_RST;
- omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET);
- omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */
-}
diff --git a/arch/arm/mach-omap2/am35xx-emac.h b/arch/arm/mach-omap2/am35xx-emac.h
deleted file mode 100644
index 15c6f9ce59a2..000000000000
--- a/arch/arm/mach-omap2/am35xx-emac.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (C) 2011 Ilya Yanok, Emcraft Systems
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define AM35XX_DEFAULT_MDIO_FREQUENCY 1000000
-
-#if defined(CONFIG_TI_DAVINCI_EMAC) || defined(CONFIG_TI_DAVINCI_EMAC_MODULE)
-void am35xx_emac_init(unsigned long mdio_bus_freq, u8 rmii_en);
-#else
-static inline void am35xx_emac_init(unsigned long mdio_bus_freq, u8 rmii_en) {}
-#endif
diff --git a/arch/arm/mach-omap2/am35xx.h b/arch/arm/mach-omap2/am35xx.h
deleted file mode 100644
index 95594495fcf6..000000000000
--- a/arch/arm/mach-omap2/am35xx.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*:
- * Address mappings and base address for AM35XX specific interconnects
- * and peripherals.
- *
- * Copyright (C) 2009 Texas Instruments
- *
- * Author: Sriramakrishnan <srk@ti.com>
- * Vaibhav Hiremath <hvaibhav@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARCH_AM35XX_H
-#define __ASM_ARCH_AM35XX_H
-
-/*
- * Base addresses
- * Note: OMAP3430 IVA2 memory space is being used for AM35xx IPSS modules
- */
-#define AM35XX_IPSS_EMAC_BASE 0x5C000000
-#define AM35XX_IPSS_USBOTGSS_BASE 0x5C040000
-#define AM35XX_IPSS_HECC_BASE 0x5C050000
-#define AM35XX_IPSS_VPFE_BASE 0x5C060000
-
-
-/* HECC module specifc offset definitions */
-#define AM35XX_HECC_SCC_HECC_OFFSET (0x0)
-#define AM35XX_HECC_SCC_RAM_OFFSET (0x3000)
-#define AM35XX_HECC_RAM_OFFSET (0x3000)
-#define AM35XX_HECC_MBOX_OFFSET (0x2000)
-#define AM35XX_HECC_INT_LINE (0x0)
-#define AM35XX_HECC_VERSION (0x1)
-
-#define AM35XX_EMAC_CNTRL_OFFSET (0x10000)
-#define AM35XX_EMAC_CNTRL_MOD_OFFSET (0x0)
-#define AM35XX_EMAC_CNTRL_RAM_OFFSET (0x20000)
-#define AM35XX_EMAC_MDIO_OFFSET (0x30000)
-#define AM35XX_IPSS_MDIO_BASE (AM35XX_IPSS_EMAC_BASE + \
- AM35XX_EMAC_MDIO_OFFSET)
-#define AM35XX_EMAC_CNTRL_RAM_SIZE (0x2000)
-#define AM35XX_EMAC_RAM_ADDR (AM3517_EMAC_BASE + \
- AM3517_EMAC_CNTRL_RAM_OFFSET)
-#define AM35XX_EMAC_HW_RAM_ADDR (0x01E20000)
-
-#endif /* __ASM_ARCH_AM35XX_H */
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
deleted file mode 100644
index 8168ddabaeda..000000000000
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Support for AM3517/05 Craneboard
- * http://www.mistralsolutions.com/products/craneboard.php
- *
- * Copyright (C) 2010 Mistral Solutions Pvt Ltd. <www.mistralsolutions.com>
- * Author: R.Srinath <srinath@mistralsolutions.com>
- *
- * Based on mach-omap2/board-am3517evm.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/mfd/tps65910.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/omap-gpmc.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "common-board-devices.h"
-#include "board-flash.h"
-
-#include "am35xx-emac.h"
-#include "mux.h"
-#include "control.h"
-
-#define GPIO_USB_POWER 35
-#define GPIO_USB_NRESET 38
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static struct usbhs_phy_data phy_data[] __initdata = {
- {
- .port = 1,
- .reset_gpio = GPIO_USB_NRESET,
- .vcc_gpio = GPIO_USB_POWER,
- .vcc_polarity = 1,
- },
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
- .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-static struct mtd_partition crane_nand_partitions[] = {
- {
- .name = "X-Loader",
- .offset = 0,
- .size = 4 * NAND_BLOCK_SIZE,
- .mask_flags = MTD_WRITEABLE,
- },
- {
- .name = "U-Boot",
- .offset = MTDPART_OFS_APPEND,
- .size = 14 * NAND_BLOCK_SIZE,
- .mask_flags = MTD_WRITEABLE,
- },
- {
- .name = "U-Boot Env",
- .offset = MTDPART_OFS_APPEND,
- .size = 2 * NAND_BLOCK_SIZE,
- },
- {
- .name = "Kernel",
- .offset = MTDPART_OFS_APPEND,
- .size = 40 * NAND_BLOCK_SIZE,
- },
- {
- .name = "File System",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct tps65910_board tps65910_pdata = {
- .irq = 7 + OMAP_INTC_START,
- .en_ck32k_xtal = true,
-};
-
-static struct i2c_board_info __initdata tps65910_board_info[] = {
- {
- I2C_BOARD_INFO("tps65910", 0x2d),
- .platform_data = &tps65910_pdata,
- },
-};
-
-static void __init am3517_crane_i2c_init(void)
-{
- omap_register_i2c_bus(1, 2600, tps65910_board_info,
- ARRAY_SIZE(tps65910_board_info));
-}
-
-static void __init am3517_crane_init(void)
-{
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- omap_serial_init();
- omap_sdrc_init(NULL, NULL);
- board_nand_init(crane_nand_partitions,
- ARRAY_SIZE(crane_nand_partitions), 0,
- NAND_BUSWIDTH_16, NULL);
- am3517_crane_i2c_init();
-
- /* Configure GPIO for EHCI port */
- if (omap_mux_init_gpio(GPIO_USB_NRESET, OMAP_PIN_OUTPUT)) {
- pr_err("Can not configure mux for GPIO_USB_NRESET %d\n",
- GPIO_USB_NRESET);
- return;
- }
-
- if (omap_mux_init_gpio(GPIO_USB_POWER, OMAP_PIN_OUTPUT)) {
- pr_err("Can not configure mux for GPIO_USB_POWER %d\n",
- GPIO_USB_POWER);
- return;
- }
-
- usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
- usbhs_init(&usbhs_bdata);
- am35xx_emac_init(AM35XX_DEFAULT_MDIO_FREQUENCY, 1);
-}
-
-MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = am35xx_init_early,
- .init_irq = omap3_init_irq,
- .init_machine = am3517_crane_init,
- .init_late = am35xx_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
deleted file mode 100644
index 1c091b3fa312..000000000000
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-am3517evm.c
- *
- * Copyright (C) 2009 Texas Instruments Incorporated
- * Author: Ranjith Lohithakshan <ranjithl@ti.com>
- *
- * Based on mach-omap2/board-omap3evm.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/platform_data/pca953x.h>
-#include <linux/can/platform/ti_hecc.h>
-#include <linux/davinci_emac.h>
-#include <linux/mmc/host.h>
-#include <linux/usb/musb.h>
-#include <linux/platform_data/gpio-omap.h>
-
-#include "am35xx.h"
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include "am35xx-emac.h"
-#include "mux.h"
-#include "control.h"
-#include "hsmmc.h"
-
-#define LCD_PANEL_PWR 176
-#define LCD_PANEL_BKLIGHT_PWR 182
-#define LCD_PANEL_PWM 181
-
-static struct i2c_board_info __initdata am3517evm_i2c1_boardinfo[] = {
- {
- I2C_BOARD_INFO("s35390a", 0x30),
- },
-};
-
-/*
- * RTC - S35390A
- */
-#define GPIO_RTCS35390A_IRQ 55
-
-static void __init am3517_evm_rtc_init(void)
-{
- int r;
-
- omap_mux_init_gpio(GPIO_RTCS35390A_IRQ, OMAP_PIN_INPUT_PULLUP);
-
- r = gpio_request_one(GPIO_RTCS35390A_IRQ, GPIOF_IN, "rtcs35390a-irq");
- if (r < 0) {
- printk(KERN_WARNING "failed to request GPIO#%d\n",
- GPIO_RTCS35390A_IRQ);
- return;
- }
-
- am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ);
-}
-
-/*
- * I2C GPIO Expander - TCA6416
- */
-
-/* Mounted on Base-Board */
-static struct pca953x_platform_data am3517evm_gpio_expander_info_0 = {
- .gpio_base = OMAP_MAX_GPIO_LINES,
-};
-static struct i2c_board_info __initdata am3517evm_i2c2_boardinfo[] = {
- {
- I2C_BOARD_INFO("tlv320aic23", 0x1A),
- },
- {
- I2C_BOARD_INFO("tca6416", 0x21),
- .platform_data = &am3517evm_gpio_expander_info_0,
- },
-};
-
-/* Mounted on UI Card */
-static struct pca953x_platform_data am3517evm_ui_gpio_expander_info_1 = {
- .gpio_base = OMAP_MAX_GPIO_LINES + 16,
-};
-static struct pca953x_platform_data am3517evm_ui_gpio_expander_info_2 = {
- .gpio_base = OMAP_MAX_GPIO_LINES + 32,
-};
-static struct i2c_board_info __initdata am3517evm_i2c3_boardinfo[] = {
- {
- I2C_BOARD_INFO("tca6416", 0x20),
- .platform_data = &am3517evm_ui_gpio_expander_info_1,
- },
- {
- I2C_BOARD_INFO("tca6416", 0x21),
- .platform_data = &am3517evm_ui_gpio_expander_info_2,
- },
-};
-
-static int __init am3517_evm_i2c_init(void)
-{
- omap_register_i2c_bus(1, 400, NULL, 0);
- omap_register_i2c_bus(2, 400, am3517evm_i2c2_boardinfo,
- ARRAY_SIZE(am3517evm_i2c2_boardinfo));
- omap_register_i2c_bus(3, 400, am3517evm_i2c3_boardinfo,
- ARRAY_SIZE(am3517evm_i2c3_boardinfo));
-
- return 0;
-}
-
-static const struct display_timing am3517_evm_lcd_videomode = {
- .pixelclock = { 0, 9000000, 0 },
-
- .hactive = { 0, 480, 0 },
- .hfront_porch = { 0, 3, 0 },
- .hback_porch = { 0, 2, 0 },
- .hsync_len = { 0, 42, 0 },
-
- .vactive = { 0, 272, 0 },
- .vfront_porch = { 0, 3, 0 },
- .vback_porch = { 0, 2, 0 },
- .vsync_len = { 0, 11, 0 },
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_LOW | DISPLAY_FLAGS_PIXDATA_POSEDGE,
-};
-
-static struct panel_dpi_platform_data am3517_evm_lcd_pdata = {
- .name = "lcd",
- .source = "dpi.0",
-
- .data_lines = 16,
-
- .display_timing = &am3517_evm_lcd_videomode,
-
- .enable_gpio = LCD_PANEL_PWR,
- .backlight_gpio = LCD_PANEL_BKLIGHT_PWR,
-};
-
-static struct platform_device am3517_evm_lcd_device = {
- .name = "panel-dpi",
- .id = 0,
- .dev.platform_data = &am3517_evm_lcd_pdata,
-};
-
-static struct connector_dvi_platform_data am3517_evm_dvi_connector_pdata = {
- .name = "dvi",
- .source = "tfp410.0",
- .i2c_bus_num = -1,
-};
-
-static struct platform_device am3517_evm_dvi_connector_device = {
- .name = "connector-dvi",
- .id = 0,
- .dev.platform_data = &am3517_evm_dvi_connector_pdata,
-};
-
-static struct encoder_tfp410_platform_data am3517_evm_tfp410_pdata = {
- .name = "tfp410.0",
- .source = "dpi.0",
- .data_lines = 24,
- .power_down_gpio = -1,
-};
-
-static struct platform_device am3517_evm_tfp410_device = {
- .name = "tfp410",
- .id = 0,
- .dev.platform_data = &am3517_evm_tfp410_pdata,
-};
-
-static struct connector_atv_platform_data am3517_evm_tv_pdata = {
- .name = "tv",
- .source = "venc.0",
- .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
- .invert_polarity = false,
-};
-
-static struct platform_device am3517_evm_tv_connector_device = {
- .name = "connector-analog-tv",
- .id = 0,
- .dev.platform_data = &am3517_evm_tv_pdata,
-};
-
-static struct omap_dss_board_info am3517_evm_dss_data = {
- .default_display_name = "lcd",
-};
-
-static void __init am3517_evm_display_init(void)
-{
- gpio_request_one(LCD_PANEL_PWM, GPIOF_OUT_INIT_HIGH, "lcd panel pwm");
-
- omap_display_init(&am3517_evm_dss_data);
-
- platform_device_register(&am3517_evm_tfp410_device);
- platform_device_register(&am3517_evm_dvi_connector_device);
- platform_device_register(&am3517_evm_lcd_device);
- platform_device_register(&am3517_evm_tv_connector_device);
-}
-
-/*
- * Board initialization
- */
-
-static struct omap_musb_board_data musb_board_data = {
- .interface_type = MUSB_INTERFACE_ULPI,
- .mode = MUSB_OTG,
- .power = 500,
- .set_phy_power = am35x_musb_phy_power,
- .clear_irq = am35x_musb_clear_irq,
- .set_mode = am35x_set_mode,
- .reset = am35x_musb_reset,
-};
-
-static __init void am3517_evm_musb_init(void)
-{
- u32 devconf2;
-
- /*
- * Set up USB clock/mode in the DEVCONF2 register.
- */
- devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
-
- /* USB2.0 PHY reference clock is 13 MHz */
- devconf2 &= ~(CONF2_REFFREQ | CONF2_OTGMODE | CONF2_PHY_GPIOMODE);
- devconf2 |= CONF2_REFFREQ_13MHZ | CONF2_SESENDEN | CONF2_VBDTCTEN
- | CONF2_DATPOL;
-
- omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
-
- usb_musb_init(&musb_board_data);
-}
-
-static __init void am3517_evm_mcbsp1_init(void)
-{
- u32 devconf0;
-
- /* McBSP1 CLKR/FSR signal to be connected to CLKX/FSX pin */
- devconf0 = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- devconf0 |= OMAP2_MCBSP1_CLKR_MASK | OMAP2_MCBSP1_FSR_MASK;
- omap_ctrl_writel(devconf0, OMAP2_CONTROL_DEVCONF0);
-}
-
-static struct usbhs_phy_data phy_data[] __initdata = {
- {
- .port = 1,
- .reset_gpio = 57,
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
- .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
- .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- /* USB OTG DRVVBUS offset = 0x212 */
- OMAP3_MUX(SAD2D_MCAD23, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-
-static struct resource am3517_hecc_resources[] = {
- {
- .start = AM35XX_IPSS_HECC_BASE,
- .end = AM35XX_IPSS_HECC_BASE + 0x3FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 24 + OMAP_INTC_START,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device am3517_hecc_device = {
- .name = "ti_hecc",
- .id = -1,
- .num_resources = ARRAY_SIZE(am3517_hecc_resources),
- .resource = am3517_hecc_resources,
-};
-
-static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
- .scc_hecc_offset = AM35XX_HECC_SCC_HECC_OFFSET,
- .scc_ram_offset = AM35XX_HECC_SCC_RAM_OFFSET,
- .hecc_ram_offset = AM35XX_HECC_RAM_OFFSET,
- .mbx_offset = AM35XX_HECC_MBOX_OFFSET,
- .int_line = AM35XX_HECC_INT_LINE,
- .version = AM35XX_HECC_VERSION,
-};
-
-static void am3517_evm_hecc_init(struct ti_hecc_platform_data *pdata)
-{
- am3517_hecc_device.dev.platform_data = pdata;
- platform_device_register(&am3517_hecc_device);
-}
-
-static struct omap2_hsmmc_info mmc[] = {
- {
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = 127,
- .gpio_wp = 126,
- },
- {
- .mmc = 2,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = 128,
- .gpio_wp = 129,
- },
- {} /* Terminator */
-};
-
-static void __init am3517_evm_init(void)
-{
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-
- am3517_evm_i2c_init();
-
- am3517_evm_display_init();
-
- omap_serial_init();
- omap_sdrc_init(NULL, NULL);
-
- /* Configure GPIO for EHCI port */
- omap_mux_init_gpio(57, OMAP_PIN_OUTPUT);
-
- usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
- usbhs_init(&usbhs_bdata);
- am3517_evm_hecc_init(&am3517_evm_hecc_pdata);
-
- /* RTC - S35390A */
- am3517_evm_rtc_init();
-
- i2c_register_board_info(1, am3517evm_i2c1_boardinfo,
- ARRAY_SIZE(am3517evm_i2c1_boardinfo));
- /*Ethernet*/
- am35xx_emac_init(AM35XX_DEFAULT_MDIO_FREQUENCY, 1);
-
- /* MUSB */
- am3517_evm_musb_init();
-
- /* McBSP1 */
- am3517_evm_mcbsp1_init();
-
- /* MMC init function */
- omap_hsmmc_init(mmc);
-}
-
-MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = am35xx_init_early,
- .init_irq = omap3_init_irq,
- .init_machine = am3517_evm_init,
- .init_late = am35xx_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
deleted file mode 100644
index 794756df8529..000000000000
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-cm-t3517.c
- *
- * Support for the CompuLab CM-T3517 modules
- *
- * Copyright (C) 2010 CompuLab, Ltd.
- * Author: Igor Grinberg <grinberg@compulab.co.il>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/leds.h>
-#include <linux/omap-gpmc.h>
-#include <linux/rtc-v3020.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mmc/host.h>
-#include <linux/can/platform/ti_hecc.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include <linux/platform_data/mtd-nand-omap2.h>
-
-#include "am35xx.h"
-
-#include "mux.h"
-#include "control.h"
-#include "hsmmc.h"
-#include "common-board-devices.h"
-#include "am35xx-emac.h"
-
-#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
-static struct gpio_led cm_t3517_leds[] = {
- [0] = {
- .gpio = 186,
- .name = "cm-t3517:green",
- .default_trigger = "heartbeat",
- .active_low = 0,
- },
-};
-
-static struct gpio_led_platform_data cm_t3517_led_pdata = {
- .num_leds = ARRAY_SIZE(cm_t3517_leds),
- .leds = cm_t3517_leds,
-};
-
-static struct platform_device cm_t3517_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &cm_t3517_led_pdata,
- },
-};
-
-static void __init cm_t3517_init_leds(void)
-{
- platform_device_register(&cm_t3517_led_device);
-}
-#else
-static inline void cm_t3517_init_leds(void) {}
-#endif
-
-#if defined(CONFIG_CAN_TI_HECC) || defined(CONFIG_CAN_TI_HECC_MODULE)
-static struct resource cm_t3517_hecc_resources[] = {
- {
- .start = AM35XX_IPSS_HECC_BASE,
- .end = AM35XX_IPSS_HECC_BASE + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 24 + OMAP_INTC_START,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct ti_hecc_platform_data cm_t3517_hecc_pdata = {
- .scc_hecc_offset = AM35XX_HECC_SCC_HECC_OFFSET,
- .scc_ram_offset = AM35XX_HECC_SCC_RAM_OFFSET,
- .hecc_ram_offset = AM35XX_HECC_RAM_OFFSET,
- .mbx_offset = AM35XX_HECC_MBOX_OFFSET,
- .int_line = AM35XX_HECC_INT_LINE,
- .version = AM35XX_HECC_VERSION,
-};
-
-static struct platform_device cm_t3517_hecc_device = {
- .name = "ti_hecc",
- .id = 1,
- .num_resources = ARRAY_SIZE(cm_t3517_hecc_resources),
- .resource = cm_t3517_hecc_resources,
- .dev = {
- .platform_data = &cm_t3517_hecc_pdata,
- },
-};
-
-static void cm_t3517_init_hecc(void)
-{
- platform_device_register(&cm_t3517_hecc_device);
-}
-#else
-static inline void cm_t3517_init_hecc(void) {}
-#endif
-
-#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
-static struct omap2_hsmmc_info cm_t3517_mmc[] = {
- {
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = 144,
- .gpio_wp = 59,
- },
- {
- .mmc = 2,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
- },
- {} /* Terminator */
-};
-#else
-#define cm_t3517_mmc NULL
-#endif
-
-#if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
-#define RTC_IO_GPIO (153)
-#define RTC_WR_GPIO (154)
-#define RTC_RD_GPIO (53)
-#define RTC_CS_GPIO (163)
-#define RTC_CS_EN_GPIO (160)
-
-struct v3020_platform_data cm_t3517_v3020_pdata = {
- .use_gpio = 1,
- .gpio_cs = RTC_CS_GPIO,
- .gpio_wr = RTC_WR_GPIO,
- .gpio_rd = RTC_RD_GPIO,
- .gpio_io = RTC_IO_GPIO,
-};
-
-static struct platform_device cm_t3517_rtc_device = {
- .name = "v3020",
- .id = -1,
- .dev = {
- .platform_data = &cm_t3517_v3020_pdata,
- }
-};
-
-static void __init cm_t3517_init_rtc(void)
-{
- int err;
-
- err = gpio_request_one(RTC_CS_EN_GPIO, GPIOF_OUT_INIT_HIGH,
- "rtc cs en");
- if (err) {
- pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err);
- return;
- }
-
- platform_device_register(&cm_t3517_rtc_device);
-}
-#else
-static inline void cm_t3517_init_rtc(void) {}
-#endif
-
-#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_EHCI_HCD_MODULE)
-#define HSUSB1_RESET_GPIO (146)
-#define HSUSB2_RESET_GPIO (147)
-#define USB_HUB_RESET_GPIO (152)
-
-static struct usbhs_phy_data phy_data[] __initdata = {
- {
- .port = 1,
- .reset_gpio = HSUSB1_RESET_GPIO,
- .vcc_gpio = -EINVAL,
- },
- {
- .port = 2,
- .reset_gpio = HSUSB2_RESET_GPIO,
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct usbhs_omap_platform_data cm_t3517_ehci_pdata __initdata = {
- .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
- .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-static int __init cm_t3517_init_usbh(void)
-{
- int err;
-
- err = gpio_request_one(USB_HUB_RESET_GPIO, GPIOF_OUT_INIT_LOW,
- "usb hub rst");
- if (err) {
- pr_err("CM-T3517: usb hub rst gpio request failed: %d\n", err);
- } else {
- udelay(10);
- gpio_set_value(USB_HUB_RESET_GPIO, 1);
- msleep(1);
- }
-
- usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
- usbhs_init(&cm_t3517_ehci_pdata);
-
- return 0;
-}
-#else
-static inline int cm_t3517_init_usbh(void)
-{
- return 0;
-}
-#endif
-
-#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
-static struct mtd_partition cm_t3517_nand_partitions[] = {
- {
- .name = "xloader",
- .offset = 0, /* Offset = 0x00000 */
- .size = 4 * NAND_BLOCK_SIZE,
- .mask_flags = MTD_WRITEABLE
- },
- {
- .name = "uboot",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
- .size = 15 * NAND_BLOCK_SIZE,
- },
- {
- .name = "uboot environment",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */
- .size = 2 * NAND_BLOCK_SIZE,
- },
- {
- .name = "linux",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x2A0000 */
- .size = 32 * NAND_BLOCK_SIZE,
- },
- {
- .name = "rootfs",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x6A0000 */
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct omap_nand_platform_data cm_t3517_nand_data = {
- .parts = cm_t3517_nand_partitions,
- .nr_parts = ARRAY_SIZE(cm_t3517_nand_partitions),
- .cs = 0,
-};
-
-static void __init cm_t3517_init_nand(void)
-{
- if (gpmc_nand_init(&cm_t3517_nand_data, NULL) < 0)
- pr_err("CM-T3517: NAND initialization failed\n");
-}
-#else
-static inline void cm_t3517_init_nand(void) {}
-#endif
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- /* GPIO186 - Green LED */
- OMAP3_MUX(SYS_CLKOUT2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-
- /* RTC GPIOs: */
- /* IO - GPIO153 */
- OMAP3_MUX(MCBSP4_DR, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
- /* WR# - GPIO154 */
- OMAP3_MUX(MCBSP4_DX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
- /* RD# - GPIO53 */
- OMAP3_MUX(GPMC_NCS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
- /* CS# - GPIO163 */
- OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
- /* CS EN - GPIO160 */
- OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
-
- /* HSUSB1 RESET */
- OMAP3_MUX(UART2_TX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
- /* HSUSB2 RESET */
- OMAP3_MUX(UART2_RX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
- /* CM-T3517 USB HUB nRESET */
- OMAP3_MUX(MCBSP4_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-
- /* CD - GPIO144 and WP - GPIO59 for MMC1 - SB-T35 */
- OMAP3_MUX(UART2_CTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(GPMC_CLK, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
-
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static void __init cm_t3517_init(void)
-{
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- omap_serial_init();
- omap_sdrc_init(NULL, NULL);
- cm_t3517_init_leds();
- cm_t3517_init_nand();
- cm_t3517_init_rtc();
- cm_t3517_init_usbh();
- cm_t3517_init_hecc();
- am35xx_emac_init(AM35XX_DEFAULT_MDIO_FREQUENCY, 1);
- omap_hsmmc_init(cm_t3517_mmc);
-}
-
-MACHINE_START(CM_T3517, "Compulab CM-T3517")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = am35xx_init_early,
- .init_irq = omap3_init_irq,
- .init_machine = cm_t3517_init,
- .init_late = am35xx_init_late,
- .init_time = omap3_gptimer_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 359fc5dcbba4..34ff14b7beab 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -77,6 +77,24 @@ MACHINE_END
#endif
#ifdef CONFIG_ARCH_OMAP3
+/* Some boards need board name for legacy userspace in /proc/cpuinfo */
+static const char *const n900_boards_compat[] __initconst = {
+ "nokia,omap3-n900",
+ NULL,
+};
+
+DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
+ .reserve = omap_reserve,
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
+ .init_time = omap3_sync32k_timer_init,
+ .dt_compat = n900_boards_compat,
+ .restart = omap3xxx_restart,
+MACHINE_END
+
+/* Generic omap3 boards, most boards can use these */
static const char *const omap3_boards_compat[] __initconst = {
"ti,omap3430",
"ti,omap3",
@@ -207,6 +225,9 @@ static const char *const omap4_boards_compat[] __initconst = {
};
DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
+ .l2c_aux_val = OMAP_L2C_AUX_CTRL,
+ .l2c_aux_mask = 0xcf9fffff,
+ .l2c_write_sec = omap4_l2c310_write_sec,
.reserve = omap_reserve,
.smp = smp_ops(omap4_smp_ops),
.map_io = omap4_map_io,
@@ -250,6 +271,9 @@ static const char *const am43_boards_compat[] __initconst = {
};
DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)")
+ .l2c_aux_val = OMAP_L2C_AUX_CTRL,
+ .l2c_aux_mask = 0xcf9fffff,
+ .l2c_write_sec = omap4_l2c310_write_sec,
.map_io = am33xx_map_io,
.init_early = am43xx_init_early,
.init_late = am43xx_init_late,
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 7f1708738c30..969e1003dd92 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -254,12 +254,14 @@ static void pandora_wl1251_init_card(struct mmc_card *card)
* We have TI wl1251 attached to MMC3. Pass this information to
* SDIO core because it can't be probed by normal methods.
*/
- card->quirks |= MMC_QUIRK_NONSTD_SDIO;
- card->cccr.wide_bus = 1;
- card->cis.vendor = 0x104c;
- card->cis.device = 0x9066;
- card->cis.blksize = 512;
- card->cis.max_dtr = 20000000;
+ if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 20000000;
+ }
}
static struct omap2_hsmmc_info omap3pandora_mmc[] = {
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 644ff3231bb8..e79c80bbc755 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -3634,10 +3634,6 @@ int __init omap3xxx_clk_init(void)
omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
- } else if (soc_is_am33xx()) {
- cpu_mask = RATE_IN_AM33XX;
- } else if (cpu_is_ti814x()) {
- cpu_mask = RATE_IN_TI814X;
} else if (cpu_is_omap34xx()) {
if (omap_rev() == OMAP3430_REV_ES1_0) {
cpu_mask = RATE_IN_3430ES1;
@@ -3681,7 +3677,7 @@ int __init omap3xxx_clk_init(void)
* Lock DPLL5 -- here only until other device init code can
* handle this
*/
- if (!cpu_is_ti81xx() && (omap_rev() >= OMAP3430_REV_ES2_0))
+ if (omap_rev() >= OMAP3430_REV_ES2_0)
omap3_clk_lock_dpll5();
/* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
diff --git a/arch/arm/mach-omap2/clkt2xxx_apll.c b/arch/arm/mach-omap2/clkt2xxx_apll.c
deleted file mode 100644
index c78e893eba7d..000000000000
--- a/arch/arm/mach-omap2/clkt2xxx_apll.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * OMAP2xxx APLL clock control functions
- *
- * Copyright (C) 2005-2008 Texas Instruments, Inc.
- * Copyright (C) 2004-2010 Nokia Corporation
- *
- * Contacts:
- * Richard Woodruff <r-woodruff2@ti.com>
- * Paul Walmsley
- *
- * Based on earlier work by Tuukka Tikkanen, Tony Lindgren,
- * Gordon McNutt and RidgeRun, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-
-#include "clock.h"
-#include "clock2xxx.h"
-#include "cm2xxx.h"
-#include "cm-regbits-24xx.h"
-
-/* CM_CLKEN_PLL.EN_{54,96}M_PLL options (24XX) */
-#define EN_APLL_STOPPED 0
-#define EN_APLL_LOCKED 3
-
-/* CM_CLKSEL1_PLL.APLLS_CLKIN options (24XX) */
-#define APLLS_CLKIN_19_2MHZ 0
-#define APLLS_CLKIN_13MHZ 2
-#define APLLS_CLKIN_12MHZ 3
-
-/* Private functions */
-
-/**
- * omap2xxx_clk_apll_locked - is the APLL locked?
- * @hw: struct clk_hw * of the APLL to check
- *
- * If the APLL IP block referred to by @hw indicates that it's locked,
- * return true; otherwise, return false.
- */
-static bool omap2xxx_clk_apll_locked(struct clk_hw *hw)
-{
- struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- u32 r, apll_mask;
-
- apll_mask = EN_APLL_LOCKED << clk->enable_bit;
-
- r = omap2xxx_cm_get_pll_status();
-
- return ((r & apll_mask) == apll_mask) ? true : false;
-}
-
-int omap2_clk_apll96_enable(struct clk_hw *hw)
-{
- return omap2xxx_cm_apll96_enable();
-}
-
-int omap2_clk_apll54_enable(struct clk_hw *hw)
-{
- return omap2xxx_cm_apll54_enable();
-}
-
-static void _apll96_allow_idle(struct clk_hw_omap *clk)
-{
- omap2xxx_cm_set_apll96_auto_low_power_stop();
-}
-
-static void _apll96_deny_idle(struct clk_hw_omap *clk)
-{
- omap2xxx_cm_set_apll96_disable_autoidle();
-}
-
-static void _apll54_allow_idle(struct clk_hw_omap *clk)
-{
- omap2xxx_cm_set_apll54_auto_low_power_stop();
-}
-
-static void _apll54_deny_idle(struct clk_hw_omap *clk)
-{
- omap2xxx_cm_set_apll54_disable_autoidle();
-}
-
-void omap2_clk_apll96_disable(struct clk_hw *hw)
-{
- omap2xxx_cm_apll96_disable();
-}
-
-void omap2_clk_apll54_disable(struct clk_hw *hw)
-{
- omap2xxx_cm_apll54_disable();
-}
-
-unsigned long omap2_clk_apll54_recalc(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- return (omap2xxx_clk_apll_locked(hw)) ? 54000000 : 0;
-}
-
-unsigned long omap2_clk_apll96_recalc(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- return (omap2xxx_clk_apll_locked(hw)) ? 96000000 : 0;
-}
-
-/* Public data */
-const struct clk_hw_omap_ops clkhwops_apll54 = {
- .allow_idle = _apll54_allow_idle,
- .deny_idle = _apll54_deny_idle,
-};
-
-const struct clk_hw_omap_ops clkhwops_apll96 = {
- .allow_idle = _apll96_allow_idle,
- .deny_idle = _apll96_deny_idle,
-};
-
-/* Public functions */
-
-u32 omap2xxx_get_apll_clkin(void)
-{
- u32 aplls, srate = 0;
-
- aplls = omap2xxx_cm_get_pll_config();
- aplls &= OMAP24XX_APLLS_CLKIN_MASK;
- aplls >>= OMAP24XX_APLLS_CLKIN_SHIFT;
-
- if (aplls == APLLS_CLKIN_19_2MHZ)
- srate = 19200000;
- else if (aplls == APLLS_CLKIN_13MHZ)
- srate = 13000000;
- else if (aplls == APLLS_CLKIN_12MHZ)
- srate = 12000000;
-
- return srate;
-}
-
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index a4282e79143e..1cf9dd85248a 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -177,7 +177,6 @@ struct clksel {
u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
-int omap4_dpllmx_gatectrl_read(struct clk_hw_omap *clk);
void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk);
void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk);
diff --git a/arch/arm/mach-omap2/clock2xxx.h b/arch/arm/mach-omap2/clock2xxx.h
index a090225ceeba..125c37614848 100644
--- a/arch/arm/mach-omap2/clock2xxx.h
+++ b/arch/arm/mach-omap2/clock2xxx.h
@@ -22,12 +22,7 @@ unsigned long omap2xxx_sys_clk_recalc(struct clk_hw *clk,
unsigned long omap2_osc_clk_recalc(struct clk_hw *clk,
unsigned long parent_rate);
void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
-unsigned long omap2_clk_apll54_recalc(struct clk_hw *hw,
- unsigned long parent_rate);
-unsigned long omap2_clk_apll96_recalc(struct clk_hw *hw,
- unsigned long parent_rate);
unsigned long omap2xxx_clk_get_core_rate(void);
-u32 omap2xxx_get_apll_clkin(void);
u32 omap2xxx_get_sysclkdiv(void);
void omap2xxx_clk_prepare_for_reboot(void);
void omap2xxx_clkt_vps_check_bootloader_rates(void);
@@ -46,11 +41,5 @@ int omap2430_clk_init(void);
#endif
extern struct clk_hw *dclk_hw;
-int omap2_enable_osc_ck(struct clk_hw *hw);
-void omap2_disable_osc_ck(struct clk_hw *hw);
-int omap2_clk_apll96_enable(struct clk_hw *hw);
-int omap2_clk_apll54_enable(struct clk_hw *hw);
-void omap2_clk_apll96_disable(struct clk_hw *hw);
-void omap2_clk_apll54_disable(struct clk_hw *hw);
#endif
diff --git a/arch/arm/mach-omap2/cm2xxx.c b/arch/arm/mach-omap2/cm2xxx.c
index a96d901b1d5d..ef62ac9dcd05 100644
--- a/arch/arm/mach-omap2/cm2xxx.c
+++ b/arch/arm/mach-omap2/cm2xxx.c
@@ -370,16 +370,6 @@ u32 omap2xxx_cm_get_core_pll_config(void)
return omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
}
-u32 omap2xxx_cm_get_pll_config(void)
-{
- return omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL1);
-}
-
-u32 omap2xxx_cm_get_pll_status(void)
-{
- return omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
-}
-
void omap2xxx_cm_set_mod_dividers(u32 mpu, u32 dsp, u32 gfx, u32 core, u32 mdm)
{
u32 tmp;
diff --git a/arch/arm/mach-omap2/cm2xxx.h b/arch/arm/mach-omap2/cm2xxx.h
index c89502b168ae..83b6c597b0e1 100644
--- a/arch/arm/mach-omap2/cm2xxx.h
+++ b/arch/arm/mach-omap2/cm2xxx.h
@@ -60,8 +60,6 @@ extern int omap2xxx_cm_fclks_active(void);
extern int omap2xxx_cm_mpu_retention_allowed(void);
extern u32 omap2xxx_cm_get_core_clk_src(void);
extern u32 omap2xxx_cm_get_core_pll_config(void);
-extern u32 omap2xxx_cm_get_pll_config(void);
-extern u32 omap2xxx_cm_get_pll_status(void);
extern void omap2xxx_cm_set_mod_dividers(u32 mpu, u32 dsp, u32 gfx, u32 core,
u32 mdm);
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index b9ad463a368a..cc5aac784278 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -72,27 +72,6 @@ static inline u32 am33xx_cm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx)
return v;
}
-static inline u32 am33xx_cm_set_reg_bits(u32 bits, s16 inst, s16 idx)
-{
- return am33xx_cm_rmw_reg_bits(bits, bits, inst, idx);
-}
-
-static inline u32 am33xx_cm_clear_reg_bits(u32 bits, s16 inst, s16 idx)
-{
- return am33xx_cm_rmw_reg_bits(bits, 0x0, inst, idx);
-}
-
-static inline u32 am33xx_cm_read_reg_bits(u16 inst, s16 idx, u32 mask)
-{
- u32 v;
-
- v = am33xx_cm_read_reg(inst, idx);
- v &= mask;
- v >>= __ffs(mask);
-
- return v;
-}
-
/**
* _clkctrl_idlest - read a CM_*_CLKCTRL register; mask & shift IDLEST bitfield
* @inst: CM instance register offset (*_INST macro)
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 65b4371b3361..46e24581d624 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -35,6 +35,7 @@
#include <linux/irqchip/irq-omap-intc.h>
#include <asm/proc-fns.h>
+#include <asm/hardware/cache-l2x0.h>
#include "i2c.h"
#include "serial.h"
@@ -94,11 +95,18 @@ extern void omap3_gptimer_timer_init(void);
extern void omap4_local_timer_init(void);
#ifdef CONFIG_CACHE_L2X0
int omap_l2_cache_init(void);
+#define OMAP_L2C_AUX_CTRL (L2C_AUX_CTRL_SHARED_OVERRIDE | \
+ L310_AUX_CTRL_DATA_PREFETCH | \
+ L310_AUX_CTRL_INSTR_PREFETCH)
+void omap4_l2c310_write_sec(unsigned long val, unsigned reg);
#else
static inline int omap_l2_cache_init(void)
{
return 0;
}
+
+#define OMAP_L2C_AUX_CTRL 0
+#define omap4_l2c310_write_sec NULL
#endif
extern void omap5_realtime_timer_init(void);
@@ -220,6 +228,7 @@ extern struct device *omap2_get_iva_device(void);
extern struct device *omap2_get_l3_device(void);
extern struct device *omap4_get_dsp_device(void);
+unsigned int omap4_xlate_irq(unsigned int hwirq);
void omap_gic_of_init(void);
#ifdef CONFIG_CACHE_L2X0
@@ -258,6 +267,7 @@ extern void omap4_cpu_die(unsigned int cpu);
extern struct smp_operations omap4_smp_ops;
extern void omap5_secondary_startup(void);
+extern void omap5_secondary_hyp_startup(void);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index 0fba6d1130a9..b8a487181210 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -290,6 +290,10 @@
#define OMAP5XXX_CONTROL_STATUS 0x134
#define OMAP5_DEVICETYPE_MASK (0x7 << 6)
+/* DRA7XX CONTROL CORE BOOTSTRAP */
+#define DRA7_CTRL_CORE_BOOTSTRAP 0x6c4
+#define DRA7_SPEEDSELECT_MASK (0x3 << 8)
+
/*
* REVISIT: This list of registers is not comprehensive - there are more
* that should be added.
diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c
index 0e58e5a85d53..fc712240e5fd 100644
--- a/arch/arm/mach-omap2/dpll44xx.c
+++ b/arch/arm/mach-omap2/dpll44xx.c
@@ -36,26 +36,6 @@
/* Static rate multiplier for OMAP4 REGM4XEN clocks */
#define OMAP4430_REGM4XEN_MULT 4
-/* Supported only on OMAP4 */
-int omap4_dpllmx_gatectrl_read(struct clk_hw_omap *clk)
-{
- u32 v;
- u32 mask;
-
- if (!clk || !clk->clksel_reg)
- return -EINVAL;
-
- mask = clk->flags & CLOCK_CLKOUTX2 ?
- OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
- OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
-
- v = omap2_clk_readl(clk, clk->clksel_reg);
- v &= mask;
- v >>= __ffs(mask);
-
- return v;
-}
-
void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk)
{
u32 v;
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4993d4bfe9b2..6d1dffca6c7b 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -22,6 +22,7 @@
/* Physical address needed since MMU not enabled yet on secondary core */
#define AUX_CORE_BOOT0_PA 0x48281800
+#define API_HYP_ENTRY 0x102
/*
* OMAP5 specific entry point for secondary CPU to jump from ROM
@@ -41,6 +42,26 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
b secondary_startup
ENDPROC(omap5_secondary_startup)
/*
+ * Same as omap5_secondary_startup except we call into the ROM to
+ * enable HYP mode first. This is called instead of
+ * omap5_secondary_startup if the primary CPU was put into HYP mode by
+ * the boot loader.
+ */
+ENTRY(omap5_secondary_hyp_startup)
+wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
+ ldr r0, [r2]
+ mov r0, r0, lsr #5
+ mrc p15, 0, r4, c0, c0, 5
+ and r4, r4, #0x0f
+ cmp r0, r4
+ bne wait_2
+ ldr r12, =API_HYP_ENTRY
+ adr r0, hyp_boot
+ smc #0
+hyp_boot:
+ b secondary_startup
+ENDPROC(omap5_secondary_hyp_startup)
+/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
diff --git a/arch/arm/mach-omap2/omap-pm-noop.c b/arch/arm/mach-omap2/omap-pm-noop.c
index 6a3be2bebddb..a1ee8066958e 100644
--- a/arch/arm/mach-omap2/omap-pm-noop.c
+++ b/arch/arm/mach-omap2/omap-pm-noop.c
@@ -86,200 +86,10 @@ int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r)
return 0;
}
-int omap_pm_set_max_dev_wakeup_lat(struct device *req_dev, struct device *dev,
- long t)
-{
- if (!req_dev || !dev || t < -1) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- }
-
- if (t == -1)
- pr_debug("OMAP PM: remove max device latency constraint: dev %s\n",
- dev_name(dev));
- else
- pr_debug("OMAP PM: add max device latency constraint: dev %s, t = %ld usec\n",
- dev_name(dev), t);
-
- /*
- * For current Linux, this needs to map the device to a
- * powerdomain, then go through the list of current max lat
- * constraints on that powerdomain and find the smallest. If
- * the latency constraint has changed, the code should
- * recompute the state to enter for the next powerdomain
- * state. Conceivably, this code should also determine
- * whether to actually disable the device clocks or not,
- * depending on how long it takes to re-enable the clocks.
- *
- * TI CDP code can call constraint_set here.
- */
-
- return 0;
-}
-
-int omap_pm_set_max_sdma_lat(struct device *dev, long t)
-{
- if (!dev || t < -1) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- }
-
- if (t == -1)
- pr_debug("OMAP PM: remove max DMA latency constraint: dev %s\n",
- dev_name(dev));
- else
- pr_debug("OMAP PM: add max DMA latency constraint: dev %s, t = %ld usec\n",
- dev_name(dev), t);
-
- /*
- * For current Linux PM QOS params, this code should scan the
- * list of maximum CPU and DMA latencies and select the
- * smallest, then set cpu_dma_latency pm_qos_param
- * accordingly.
- *
- * For future Linux PM QOS params, with separate CPU and DMA
- * latency params, this code should just set the dma_latency param.
- *
- * TI CDP code can call constraint_set here.
- */
-
- return 0;
-}
-
-int omap_pm_set_min_clk_rate(struct device *dev, struct clk *c, long r)
-{
- if (!dev || !c || r < 0) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- }
-
- if (r == 0)
- pr_debug("OMAP PM: remove min clk rate constraint: dev %s\n",
- dev_name(dev));
- else
- pr_debug("OMAP PM: add min clk rate constraint: dev %s, rate = %ld Hz\n",
- dev_name(dev), r);
-
- /*
- * Code in a real implementation should keep track of these
- * constraints on the clock, and determine the highest minimum
- * clock rate. It should iterate over each OPP and determine
- * whether the OPP will result in a clock rate that would
- * satisfy this constraint (and any other PM constraint in effect
- * at that time). Once it finds the lowest-voltage OPP that
- * meets those conditions, it should switch to it, or return
- * an error if the code is not capable of doing so.
- */
-
- return 0;
-}
-
/*
* DSP Bridge-specific constraints
*/
-const struct omap_opp *omap_pm_dsp_get_opp_table(void)
-{
- pr_debug("OMAP PM: DSP request for OPP table\n");
-
- /*
- * Return DSP frequency table here: The final item in the
- * array should have .rate = .opp_id = 0.
- */
-
- return NULL;
-}
-
-void omap_pm_dsp_set_min_opp(u8 opp_id)
-{
- if (opp_id == 0) {
- WARN_ON(1);
- return;
- }
-
- pr_debug("OMAP PM: DSP requests minimum VDD1 OPP to be %d\n", opp_id);
-
- /*
- *
- * For l-o dev tree, our VDD1 clk is keyed on OPP ID, so we
- * can just test to see which is higher, the CPU's desired OPP
- * ID or the DSP's desired OPP ID, and use whichever is
- * highest.
- *
- * In CDP12.14+, the VDD1 OPP custom clock that controls the DSP
- * rate is keyed on MPU speed, not the OPP ID. So we need to
- * map the OPP ID to the MPU speed for use with clk_set_rate()
- * if it is higher than the current OPP clock rate.
- *
- */
-}
-
-
-u8 omap_pm_dsp_get_opp(void)
-{
- pr_debug("OMAP PM: DSP requests current DSP OPP ID\n");
-
- /*
- * For l-o dev tree, call clk_get_rate() on VDD1 OPP clock
- *
- * CDP12.14+:
- * Call clk_get_rate() on the OPP custom clock, map that to an
- * OPP ID using the tables defined in board-*.c/chip-*.c files.
- */
-
- return 0;
-}
-
-/*
- * CPUFreq-originated constraint
- *
- * In the future, this should be handled by custom OPP clocktype
- * functions.
- */
-
-struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void)
-{
- pr_debug("OMAP PM: CPUFreq request for frequency table\n");
-
- /*
- * Return CPUFreq frequency table here: loop over
- * all VDD1 clkrates, pull out the mpu_ck frequencies, build
- * table
- */
-
- return NULL;
-}
-
-void omap_pm_cpu_set_freq(unsigned long f)
-{
- if (f == 0) {
- WARN_ON(1);
- return;
- }
-
- pr_debug("OMAP PM: CPUFreq requests CPU frequency to be set to %lu\n",
- f);
-
- /*
- * For l-o dev tree, determine whether MPU freq or DSP OPP id
- * freq is higher. Find the OPP ID corresponding to the
- * higher frequency. Call clk_round_rate() and clk_set_rate()
- * on the OPP custom clock.
- *
- * CDP should just be able to set the VDD1 OPP clock rate here.
- */
-}
-
-unsigned long omap_pm_cpu_get_freq(void)
-{
- pr_debug("OMAP PM: CPUFreq requests current CPU frequency\n");
-
- /*
- * Call clk_get_rate() on the mpu_ck.
- */
-
- return 0;
-}
/**
* omap_pm_enable_off_mode - notify OMAP PM that off-mode is enabled
@@ -363,9 +173,3 @@ int __init omap_pm_if_init(void)
{
return 0;
}
-
-void omap_pm_if_exit(void)
-{
- /* Deallocate CPUFreq frequency table here */
-}
-
diff --git a/arch/arm/mach-omap2/omap-pm.h b/arch/arm/mach-omap2/omap-pm.h
index 1d777e63e05c..109bef5538eb 100644
--- a/arch/arm/mach-omap2/omap-pm.h
+++ b/arch/arm/mach-omap2/omap-pm.h
@@ -50,14 +50,6 @@ int __init omap_pm_if_early_init(void);
*/
int __init omap_pm_if_init(void);
-/**
- * omap_pm_if_exit - OMAP PM exit code
- *
- * Exit code; currently unused. The "_if_" is to avoid name
- * collisions with the PM idle-loop code.
- */
-void omap_pm_if_exit(void);
-
/*
* Device-driver-originated constraints (via board-*.c files, platform_data)
*/
@@ -132,163 +124,6 @@ int omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t);
int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r);
-/**
- * omap_pm_set_max_dev_wakeup_lat - set the maximum device enable latency
- * @req_dev: struct device * requesting the constraint, or NULL if none
- * @dev: struct device * to set the constraint one
- * @t: maximum device wakeup latency in microseconds
- *
- * Request that the maximum amount of time necessary for a device @dev
- * to become accessible after its clocks are enabled should be no
- * greater than @t microseconds. Specifically, this represents the
- * time from when a device driver enables device clocks with
- * clk_enable(), to when the register reads and writes on the device
- * will succeed. This function should be called before clk_disable()
- * is called, since the power state transition decision may be made
- * during clk_disable().
- *
- * It is intended that underlying PM code will use this information to
- * determine what power state to put the powerdomain enclosing this
- * device into.
- *
- * Multiple calls to omap_pm_set_max_dev_wakeup_lat() will replace the
- * previous wakeup latency values for this device. To remove the
- * wakeup latency restriction for this device, call with t = -1.
- *
- * Returns -EINVAL for an invalid argument, -ERANGE if the constraint
- * is not satisfiable, or 0 upon success.
- */
-int omap_pm_set_max_dev_wakeup_lat(struct device *req_dev, struct device *dev,
- long t);
-
-
-/**
- * omap_pm_set_max_sdma_lat - set the maximum system DMA transfer start latency
- * @dev: struct device *
- * @t: maximum DMA transfer start latency in microseconds
- *
- * Request that the maximum system DMA transfer start latency for this
- * device 'dev' should be no greater than 't' microseconds. "DMA
- * transfer start latency" here is defined as the elapsed time from
- * when a device (e.g., McBSP) requests that a system DMA transfer
- * start or continue, to the time at which data starts to flow into
- * that device from the system DMA controller.
- *
- * It is intended that underlying PM code will use this information to
- * determine what power state to put the CORE powerdomain into.
- *
- * Since system DMA transfers may not involve the MPU, this function
- * will not affect MPU wakeup latency. Use set_max_cpu_lat() to do
- * so. Similarly, this function will not affect device wakeup latency
- * -- use set_max_dev_wakeup_lat() to affect that.
- *
- * Multiple calls to set_max_sdma_lat() will replace the previous t
- * value for this device. To remove the maximum DMA latency for this
- * device, call with t = -1.
- *
- * Returns -EINVAL for an invalid argument, -ERANGE if the constraint
- * is not satisfiable, or 0 upon success.
- */
-int omap_pm_set_max_sdma_lat(struct device *dev, long t);
-
-
-/**
- * omap_pm_set_min_clk_rate - set minimum clock rate requested by @dev
- * @dev: struct device * requesting the constraint
- * @clk: struct clk * to set the minimum rate constraint on
- * @r: minimum rate in Hz
- *
- * Request that the minimum clock rate on the device @dev's clk @clk
- * be no less than @r Hz.
- *
- * It is expected that the OMAP PM code will use this information to
- * find an OPP or clock setting that will satisfy this clock rate
- * constraint, along with any other applicable system constraints on
- * the clock rate or corresponding voltage, etc.
- *
- * omap_pm_set_min_clk_rate() differs from the clock code's
- * clk_set_rate() in that it considers other constraints before taking
- * any hardware action, and may change a system OPP rather than just a
- * clock rate. clk_set_rate() is intended to be a low-level
- * interface.
- *
- * omap_pm_set_min_clk_rate() is easily open to abuse. A better API
- * would be something like "omap_pm_set_min_dev_performance()";
- * however, there is no easily-generalizable concept of performance
- * that applies to all devices. Only a device (and possibly the
- * device subsystem) has both the subsystem-specific knowledge, and
- * the hardware IP block-specific knowledge, to translate a constraint
- * on "touchscreen sampling accuracy" or "number of pixels or polygons
- * rendered per second" to a clock rate. This translation can be
- * dependent on the hardware IP block's revision, or firmware version,
- * and the driver is the only code on the system that has this
- * information and can know how to translate that into a clock rate.
- *
- * The intended use-case for this function is for userspace or other
- * kernel code to communicate a particular performance requirement to
- * a subsystem; then for the subsystem to communicate that requirement
- * to something that is meaningful to the device driver; then for the
- * device driver to convert that requirement to a clock rate, and to
- * then call omap_pm_set_min_clk_rate().
- *
- * Users of this function (such as device drivers) should not simply
- * call this function with some high clock rate to ensure "high
- * performance." Rather, the device driver should take a performance
- * constraint from its subsystem, such as "render at least X polygons
- * per second," and use some formula or table to convert that into a
- * clock rate constraint given the hardware type and hardware
- * revision. Device drivers or subsystems should not assume that they
- * know how to make a power/performance tradeoff - some device use
- * cases may tolerate a lower-fidelity device function for lower power
- * consumption; others may demand a higher-fidelity device function,
- * no matter what the power consumption.
- *
- * Multiple calls to omap_pm_set_min_clk_rate() will replace the
- * previous rate value for the device @dev. To remove the minimum clock
- * rate constraint for the device, call with r = 0.
- *
- * Returns -EINVAL for an invalid argument, -ERANGE if the constraint
- * is not satisfiable, or 0 upon success.
- */
-int omap_pm_set_min_clk_rate(struct device *dev, struct clk *c, long r);
-
-/*
- * DSP Bridge-specific constraints
- */
-
-/**
- * omap_pm_dsp_get_opp_table - get OPP->DSP clock frequency table
- *
- * Intended for use by DSPBridge. Returns an array of OPP->DSP clock
- * frequency entries. The final item in the array should have .rate =
- * .opp_id = 0.
- */
-const struct omap_opp *omap_pm_dsp_get_opp_table(void);
-
-/**
- * omap_pm_dsp_set_min_opp - receive desired OPP target ID from DSP Bridge
- * @opp_id: target DSP OPP ID
- *
- * Set a minimum OPP ID for the DSP. This is intended to be called
- * only from the DSP Bridge MPU-side driver. Unfortunately, the only
- * information that code receives from the DSP/BIOS load estimator is the
- * target OPP ID; hence, this interface. No return value.
- */
-void omap_pm_dsp_set_min_opp(u8 opp_id);
-
-/**
- * omap_pm_dsp_get_opp - report the current DSP OPP ID
- *
- * Report the current OPP for the DSP. Since on OMAP3, the DSP and
- * MPU share a single voltage domain, the OPP ID returned back may
- * represent a higher DSP speed than the OPP requested via
- * omap_pm_dsp_set_min_opp().
- *
- * Returns the current VDD1 OPP ID, or 0 upon error.
- */
-u8 omap_pm_dsp_get_opp(void);
-
-
/*
* CPUFreq-originated constraint
*
@@ -296,33 +131,6 @@ u8 omap_pm_dsp_get_opp(void);
* functions.
*/
-/**
- * omap_pm_cpu_get_freq_table - return a cpufreq_frequency_table array ptr
- *
- * Provide a frequency table usable by CPUFreq for the current chip/board.
- * Returns a pointer to a struct cpufreq_frequency_table array or NULL
- * upon error.
- */
-struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void);
-
-/**
- * omap_pm_cpu_set_freq - set the current minimum MPU frequency
- * @f: MPU frequency in Hz
- *
- * Set the current minimum CPU frequency. The actual CPU frequency
- * used could end up higher if the DSP requested a higher OPP.
- * Intended to be called by plat-omap/cpu_omap.c:omap_target(). No
- * return value.
- */
-void omap_pm_cpu_set_freq(unsigned long f);
-
-/**
- * omap_pm_cpu_get_freq - report the current CPU frequency
- *
- * Returns the current MPU frequency, or 0 upon error.
- */
-unsigned long omap_pm_cpu_get_freq(void);
-
/*
* Device context loss tracking
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 256e84ef0f67..5305ec7341ec 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -22,6 +22,7 @@
#include <linux/irqchip/arm-gic.h>
#include <asm/smp_scu.h>
+#include <asm/virt.h>
#include "omap-secure.h"
#include "omap-wakeupgen.h"
@@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
if (omap_secure_apis_support())
omap_auxcoreboot_addr(virt_to_phys(startup_addr));
else
- writel_relaxed(virt_to_phys(omap5_secondary_startup),
- base + OMAP_AUX_CORE_BOOT_1);
+ /*
+ * If the boot CPU is in HYP mode then start secondary
+ * CPU in HYP mode as well.
+ */
+ if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
+ writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
+ base + OMAP_AUX_CORE_BOOT_1);
+ else
+ writel_relaxed(virt_to_phys(omap5_secondary_startup),
+ base + OMAP_AUX_CORE_BOOT_1);
}
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index b7cb44abe49b..2418bdf28ca2 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -166,7 +166,7 @@ void __iomem *omap4_get_l2cache_base(void)
return l2cache_base;
}
-static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
+void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
{
unsigned smc_op;
@@ -201,24 +201,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
int __init omap_l2_cache_init(void)
{
- u32 aux_ctrl;
-
/* Static mapping, never released */
l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
if (WARN_ON(!l2cache_base))
return -ENOMEM;
-
- /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */
- aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE |
- L310_AUX_CTRL_DATA_PREFETCH |
- L310_AUX_CTRL_INSTR_PREFETCH;
-
- outer_cache.write_sec = omap4_l2c310_write_sec;
- if (of_have_populated_dt())
- l2x0_of_init(aux_ctrl, 0xcf9fffff);
- else
- l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff);
-
return 0;
}
#endif
@@ -256,6 +242,38 @@ static int __init omap4_sar_ram_init(void)
}
omap_early_initcall(omap4_sar_ram_init);
+static struct of_device_id gic_match[] = {
+ { .compatible = "arm,cortex-a9-gic", },
+ { .compatible = "arm,cortex-a15-gic", },
+ { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int omap4_xlate_irq(unsigned int hwirq)
+{
+ struct of_phandle_args irq_data;
+ unsigned int irq;
+
+ if (!gic_node)
+ gic_node = of_find_matching_node(NULL, gic_match);
+
+ if (WARN_ON(!gic_node))
+ return hwirq;
+
+ irq_data.np = gic_node;
+ irq_data.args_count = 3;
+ irq_data.args[0] = 0;
+ irq_data.args[1] = hwirq - OMAP44XX_IRQ_GIC_START;
+ irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ irq = irq_create_of_mapping(&irq_data);
+ if (WARN_ON(!irq))
+ irq = hwirq;
+
+ return irq;
+}
+
void __init omap_gic_of_init(void)
{
struct device_node *np;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d7e6d5c8d171..92afb723dcfc 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2155,8 +2155,8 @@ static int _enable(struct omap_hwmod *oh)
if (soc_ops.disable_module)
soc_ops.disable_module(oh);
_disable_clocks(oh);
- pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
- oh->name, r);
+ pr_err("omap_hwmod: %s: _wait_target_ready failed: %d\n",
+ oh->name, r);
if (oh->clkdm)
clkdm_hwmod_disable(oh->clkdm, oh);
@@ -3384,91 +3384,6 @@ int omap_hwmod_shutdown(struct omap_hwmod *oh)
return 0;
}
-/**
- * omap_hwmod_enable_clocks - enable main_clk, all interface clocks
- * @oh: struct omap_hwmod *oh
- *
- * Intended to be called by the omap_device code.
- */
-int omap_hwmod_enable_clocks(struct omap_hwmod *oh)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&oh->_lock, flags);
- _enable_clocks(oh);
- spin_unlock_irqrestore(&oh->_lock, flags);
-
- return 0;
-}
-
-/**
- * omap_hwmod_disable_clocks - disable main_clk, all interface clocks
- * @oh: struct omap_hwmod *oh
- *
- * Intended to be called by the omap_device code.
- */
-int omap_hwmod_disable_clocks(struct omap_hwmod *oh)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&oh->_lock, flags);
- _disable_clocks(oh);
- spin_unlock_irqrestore(&oh->_lock, flags);
-
- return 0;
-}
-
-/**
- * omap_hwmod_ocp_barrier - wait for posted writes against the hwmod to complete
- * @oh: struct omap_hwmod *oh
- *
- * Intended to be called by drivers and core code when all posted
- * writes to a device must complete before continuing further
- * execution (for example, after clearing some device IRQSTATUS
- * register bits)
- *
- * XXX what about targets with multiple OCP threads?
- */
-void omap_hwmod_ocp_barrier(struct omap_hwmod *oh)
-{
- BUG_ON(!oh);
-
- if (!oh->class->sysc || !oh->class->sysc->sysc_flags) {
- WARN(1, "omap_device: %s: OCP barrier impossible due to device configuration\n",
- oh->name);
- return;
- }
-
- /*
- * Forces posted writes to complete on the OCP thread handling
- * register writes
- */
- omap_hwmod_read(oh, oh->class->sysc->sysc_offs);
-}
-
-/**
- * omap_hwmod_reset - reset the hwmod
- * @oh: struct omap_hwmod *
- *
- * Under some conditions, a driver may wish to reset the entire device.
- * Called from omap_device code. Returns -EINVAL on error or passes along
- * the return value from _reset().
- */
-int omap_hwmod_reset(struct omap_hwmod *oh)
-{
- int r;
- unsigned long flags;
-
- if (!oh)
- return -EINVAL;
-
- spin_lock_irqsave(&oh->_lock, flags);
- r = _reset(oh);
- spin_unlock_irqrestore(&oh->_lock, flags);
-
- return r;
-}
-
/*
* IP block data retrieval functions
*/
@@ -3534,9 +3449,15 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
mpu_irqs_cnt = _count_mpu_irqs(oh);
for (i = 0; i < mpu_irqs_cnt; i++) {
+ unsigned int irq;
+
+ if (oh->xlate_irq)
+ irq = oh->xlate_irq((oh->mpu_irqs + i)->irq);
+ else
+ irq = (oh->mpu_irqs + i)->irq;
(res + r)->name = (oh->mpu_irqs + i)->name;
- (res + r)->start = (oh->mpu_irqs + i)->irq;
- (res + r)->end = (oh->mpu_irqs + i)->irq;
+ (res + r)->start = irq;
+ (res + r)->end = irq;
(res + r)->flags = IORESOURCE_IRQ;
r++;
}
@@ -3723,52 +3644,12 @@ void __iomem *omap_hwmod_get_mpu_rt_va(struct omap_hwmod *oh)
return oh->_mpu_rt_va;
}
-/**
- * omap_hwmod_add_initiator_dep - add sleepdep from @init_oh to @oh
- * @oh: struct omap_hwmod *
- * @init_oh: struct omap_hwmod * (initiator)
- *
- * Add a sleep dependency between the initiator @init_oh and @oh.
- * Intended to be called by DSP/Bridge code via platform_data for the
- * DSP case; and by the DMA code in the sDMA case. DMA code, *Bridge
- * code needs to add/del initiator dependencies dynamically
- * before/after accessing a device. Returns the return value from
- * _add_initiator_dep().
- *
- * XXX Keep a usecount in the clockdomain code
- */
-int omap_hwmod_add_initiator_dep(struct omap_hwmod *oh,
- struct omap_hwmod *init_oh)
-{
- return _add_initiator_dep(oh, init_oh);
-}
-
/*
* XXX what about functions for drivers to save/restore ocp_sysconfig
* for context save/restore operations?
*/
/**
- * omap_hwmod_del_initiator_dep - remove sleepdep from @init_oh to @oh
- * @oh: struct omap_hwmod *
- * @init_oh: struct omap_hwmod * (initiator)
- *
- * Remove a sleep dependency between the initiator @init_oh and @oh.
- * Intended to be called by DSP/Bridge code via platform_data for the
- * DSP case; and by the DMA code in the sDMA case. DMA code, *Bridge
- * code needs to add/del initiator dependencies dynamically
- * before/after accessing a device. Returns the return value from
- * _del_initiator_dep().
- *
- * XXX Keep a usecount in the clockdomain code
- */
-int omap_hwmod_del_initiator_dep(struct omap_hwmod *oh,
- struct omap_hwmod *init_oh)
-{
- return _del_initiator_dep(oh, init_oh);
-}
-
-/**
* omap_hwmod_enable_wakeup - allow device to wake up the system
* @oh: struct omap_hwmod *
*
@@ -3889,33 +3770,6 @@ int omap_hwmod_deassert_hardreset(struct omap_hwmod *oh, const char *name)
}
/**
- * omap_hwmod_read_hardreset - read the HW reset line state of submodules
- * contained in the hwmod module
- * @oh: struct omap_hwmod *
- * @name: name of the reset line to look up and read
- *
- * Return the current state of the hwmod @oh's reset line named @name:
- * returns -EINVAL upon parameter error or if this operation
- * is unsupported on the current OMAP; otherwise, passes along the return
- * value from _read_hardreset().
- */
-int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name)
-{
- int ret;
- unsigned long flags;
-
- if (!oh)
- return -EINVAL;
-
- spin_lock_irqsave(&oh->_lock, flags);
- ret = _read_hardreset(oh, name);
- spin_unlock_irqrestore(&oh->_lock, flags);
-
- return ret;
-}
-
-
-/**
* omap_hwmod_for_each_by_class - call @fn for each hwmod of class @classname
* @classname: struct omap_hwmod_class name to search for
* @fn: callback function pointer to call for each hwmod in class @classname
@@ -4025,86 +3879,6 @@ int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh)
}
/**
- * omap_hwmod_no_setup_reset - prevent a hwmod from being reset upon setup
- * @oh: struct omap_hwmod *
- *
- * Prevent the hwmod @oh from being reset during the setup process.
- * Intended for use by board-*.c files on boards with devices that
- * cannot tolerate being reset. Must be called before the hwmod has
- * been set up. Returns 0 upon success or negative error code upon
- * failure.
- */
-int omap_hwmod_no_setup_reset(struct omap_hwmod *oh)
-{
- if (!oh)
- return -EINVAL;
-
- if (oh->_state != _HWMOD_STATE_REGISTERED) {
- pr_err("omap_hwmod: %s: cannot prevent setup reset; in wrong state\n",
- oh->name);
- return -EINVAL;
- }
-
- oh->flags |= HWMOD_INIT_NO_RESET;
-
- return 0;
-}
-
-/**
- * omap_hwmod_pad_route_irq - route an I/O pad wakeup to a particular MPU IRQ
- * @oh: struct omap_hwmod * containing hwmod mux entries
- * @pad_idx: array index in oh->mux of the hwmod mux entry to route wakeup
- * @irq_idx: the hwmod mpu_irqs array index of the IRQ to trigger on wakeup
- *
- * When an I/O pad wakeup arrives for the dynamic or wakeup hwmod mux
- * entry number @pad_idx for the hwmod @oh, trigger the interrupt
- * service routine for the hwmod's mpu_irqs array index @irq_idx. If
- * this function is not called for a given pad_idx, then the ISR
- * associated with @oh's first MPU IRQ will be triggered when an I/O
- * pad wakeup occurs on that pad. Note that @pad_idx is the index of
- * the _dynamic or wakeup_ entry: if there are other entries not
- * marked with OMAP_DEVICE_PAD_WAKEUP or OMAP_DEVICE_PAD_REMUX, these
- * entries are NOT COUNTED in the dynamic pad index. This function
- * must be called separately for each pad that requires its interrupt
- * to be re-routed this way. Returns -EINVAL if there is an argument
- * problem or if @oh does not have hwmod mux entries or MPU IRQs;
- * returns -ENOMEM if memory cannot be allocated; or 0 upon success.
- *
- * XXX This function interface is fragile. Rather than using array
- * indexes, which are subject to unpredictable change, it should be
- * using hwmod IRQ names, and some other stable key for the hwmod mux
- * pad records.
- */
-int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx)
-{
- int nr_irqs;
-
- might_sleep();
-
- if (!oh || !oh->mux || !oh->mpu_irqs || pad_idx < 0 ||
- pad_idx >= oh->mux->nr_pads_dynamic)
- return -EINVAL;
-
- /* Check the number of available mpu_irqs */
- for (nr_irqs = 0; oh->mpu_irqs[nr_irqs].irq >= 0; nr_irqs++)
- ;
-
- if (irq_idx >= nr_irqs)
- return -EINVAL;
-
- if (!oh->mux->irqs) {
- /* XXX What frees this? */
- oh->mux->irqs = kzalloc(sizeof(int) * oh->mux->nr_pads_dynamic,
- GFP_KERNEL);
- if (!oh->mux->irqs)
- return -ENOMEM;
- }
- oh->mux->irqs[pad_idx] = irq_idx;
-
- return 0;
-}
-
-/**
* omap_hwmod_init - initialize the hwmod code
*
* Sets up some function pointers needed by the hwmod code to operate on the
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 4b070b42a15c..9d4bec6ee742 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -676,6 +676,7 @@ struct omap_hwmod {
spinlock_t _lock;
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
+ unsigned int (*xlate_irq)(unsigned int);
u16 flags;
u8 mpu_rt_idx;
u8 response_lat;
@@ -702,13 +703,6 @@ int omap_hwmod_shutdown(struct omap_hwmod *oh);
int omap_hwmod_assert_hardreset(struct omap_hwmod *oh, const char *name);
int omap_hwmod_deassert_hardreset(struct omap_hwmod *oh, const char *name);
-int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name);
-
-int omap_hwmod_enable_clocks(struct omap_hwmod *oh);
-int omap_hwmod_disable_clocks(struct omap_hwmod *oh);
-
-int omap_hwmod_reset(struct omap_hwmod *oh);
-void omap_hwmod_ocp_barrier(struct omap_hwmod *oh);
void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs);
u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs);
@@ -723,11 +717,6 @@ int omap_hwmod_get_resource_byname(struct omap_hwmod *oh, unsigned int type,
struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh);
void __iomem *omap_hwmod_get_mpu_rt_va(struct omap_hwmod *oh);
-int omap_hwmod_add_initiator_dep(struct omap_hwmod *oh,
- struct omap_hwmod *init_oh);
-int omap_hwmod_del_initiator_dep(struct omap_hwmod *oh,
- struct omap_hwmod *init_oh);
-
int omap_hwmod_enable_wakeup(struct omap_hwmod *oh);
int omap_hwmod_disable_wakeup(struct omap_hwmod *oh);
@@ -739,10 +728,6 @@ int omap_hwmod_for_each_by_class(const char *classname,
int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state);
int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
-int omap_hwmod_no_setup_reset(struct omap_hwmod *oh);
-
-int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx);
-
extern void __init omap_hwmod_init(void);
const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 11468eea3871..4e8e93c398db 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -29,8 +29,6 @@
#include <linux/platform_data/mailbox-omap.h>
#include <plat/dmtimer.h>
-#include "am35xx.h"
-
#include "soc.h"
#include "omap_hwmod.h"
#include "omap_hwmod_common_data.h"
@@ -50,6 +48,8 @@
* elsewhere.
*/
+#define AM35XX_IPSS_USBOTGSS_BASE 0x5C040000
+
/*
* IP blocks
*/
@@ -3459,15 +3459,6 @@ static struct omap_hwmod_ocp_if am35xx_mdio__l3 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_addr_space am35xx_mdio_addrs[] = {
- {
- .pa_start = AM35XX_IPSS_MDIO_BASE,
- .pa_end = AM35XX_IPSS_MDIO_BASE + SZ_4K - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
/* l4_core -> davinci mdio */
/*
* XXX Should be connected to an IPSS hwmod, not the L4_CORE directly;
@@ -3478,25 +3469,15 @@ static struct omap_hwmod_ocp_if am35xx_l4_core__mdio = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &am35xx_mdio_hwmod,
.clk = "emac_fck",
- .addr = am35xx_mdio_addrs,
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_irq_info am35xx_emac_mpu_irqs[] = {
- { .name = "rxthresh", .irq = 67 + OMAP_INTC_START, },
- { .name = "rx_pulse", .irq = 68 + OMAP_INTC_START, },
- { .name = "tx_pulse", .irq = 69 + OMAP_INTC_START },
- { .name = "misc_pulse", .irq = 70 + OMAP_INTC_START },
- { .irq = -1 },
-};
-
static struct omap_hwmod_class am35xx_emac_class = {
.name = "davinci_emac",
};
static struct omap_hwmod am35xx_emac_hwmod = {
.name = "davinci_emac",
- .mpu_irqs = am35xx_emac_mpu_irqs,
.class = &am35xx_emac_class,
/*
* According to Mark Greer, the MPU will not return from WFI
@@ -3519,15 +3500,6 @@ static struct omap_hwmod_ocp_if am35xx_emac__l3 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_addr_space am35xx_emac_addrs[] = {
- {
- .pa_start = AM35XX_IPSS_EMAC_BASE,
- .pa_end = AM35XX_IPSS_EMAC_BASE + 0x30000 - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
/* l4_core -> davinci emac */
/*
* XXX Should be connected to an IPSS hwmod, not the L4_CORE directly;
@@ -3538,7 +3510,6 @@ static struct omap_hwmod_ocp_if am35xx_l4_core__emac = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &am35xx_emac_hwmod,
.clk = "emac_ick",
- .addr = am35xx_emac_addrs,
.user = OCP_USER_MPU,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index 5c6c8410160e..8eb85925e444 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -498,6 +498,7 @@ static struct omap_hwmod am43xx_dss_dispc_hwmod = {
},
},
.dev_attr = &am43xx_dss_dispc_dev_attr,
+ .parent_hwmod = &am43xx_dss_core_hwmod,
};
/* rfbi */
@@ -512,6 +513,7 @@ static struct omap_hwmod am43xx_dss_rfbi_hwmod = {
.clkctrl_offs = AM43XX_CM_PER_DSS_CLKCTRL_OFFSET,
},
},
+ .parent_hwmod = &am43xx_dss_core_hwmod,
};
/* Interfaces */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index c314b3c31117..f5e68a782025 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -479,6 +479,7 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
.class = &omap44xx_dma_hwmod_class,
.clkdm_name = "l3_dma_clkdm",
.mpu_irqs = omap44xx_dma_system_irqs,
+ .xlate_irq = omap4_xlate_irq,
.main_clk = "l3_div_ck",
.prcm = {
.omap4 = {
@@ -640,6 +641,7 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.class = &omap44xx_dispc_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dispc_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dispc_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -693,6 +695,7 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
.class = &omap44xx_dsi_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dsi1_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dsi1_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -726,6 +729,7 @@ static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
.class = &omap44xx_dsi_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dsi2_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dsi2_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -784,6 +788,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
*/
.flags = HWMOD_SWSUP_SIDLE,
.mpu_irqs = omap44xx_dss_hdmi_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
.main_clk = "dss_48mhz_clk",
.prcm = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 3e9523084b2a..7c3fac035e93 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -288,6 +288,7 @@ static struct omap_hwmod omap54xx_dma_system_hwmod = {
.class = &omap54xx_dma_hwmod_class,
.clkdm_name = "dma_clkdm",
.mpu_irqs = omap54xx_dma_system_irqs,
+ .xlate_irq = omap4_xlate_irq,
.main_clk = "l3_iclk_div",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index ffd6604cd546..e8692e7675b8 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -819,7 +819,8 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
.name = "gpmc",
.class = &dra7xx_gpmc_hwmod_class,
.clkdm_name = "l3main1_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
+ HWMOD_SWSUP_SIDLE),
.main_clk = "l3_iclk_div",
.prcm = {
.omap4 = {
@@ -2017,7 +2018,7 @@ static struct omap_hwmod dra7xx_uart3_hwmod = {
.class = &dra7xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
.main_clk = "uart3_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 1a19fa096bab..8e903564ede2 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -152,38 +152,3 @@ void am35x_set_mode(u8 musb_mode)
omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
}
-
-void ti81xx_musb_phy_power(u8 on)
-{
- void __iomem *scm_base = NULL;
- u32 usbphycfg;
-
- scm_base = ioremap(TI81XX_SCM_BASE, SZ_2K);
- if (!scm_base) {
- pr_err("system control module ioremap failed\n");
- return;
- }
-
- usbphycfg = readl_relaxed(scm_base + USBCTRL0);
-
- if (on) {
- if (cpu_is_ti816x()) {
- usbphycfg |= TI816X_USBPHY0_NORMAL_MODE;
- usbphycfg &= ~TI816X_USBPHY_REFCLK_OSC;
- } else if (cpu_is_ti814x()) {
- usbphycfg &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN
- | USBPHY_DPINPUT | USBPHY_DMINPUT);
- usbphycfg |= (USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN
- | USBPHY_DPOPBUFCTL | USBPHY_DMOPBUFCTL);
- }
- } else {
- if (cpu_is_ti816x())
- usbphycfg &= ~TI816X_USBPHY0_NORMAL_MODE;
- else if (cpu_is_ti814x())
- usbphycfg |= USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN;
-
- }
- writel_relaxed(usbphycfg, scm_base + USBCTRL0);
-
- iounmap(scm_base);
-}
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 3d7eee1d3cfa..190fa43e7479 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -19,7 +19,6 @@
#include <linux/platform_data/pinctrl-single.h>
#include <linux/platform_data/iommu-omap.h>
-#include "am35xx.h"
#include "common.h"
#include "common-board-devices.h"
#include "dss-common.h"
diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c
index 33c8846b4193..a69e9a33cb6d 100644
--- a/arch/arm/mach-omap2/pmu.c
+++ b/arch/arm/mach-omap2/pmu.c
@@ -13,7 +13,7 @@
*/
#include <linux/of.h>
-#include <asm/pmu.h>
+#include <asm/system_info.h>
#include "soc.h"
#include "omap_hwmod.h"
@@ -37,7 +37,8 @@ static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[])
{
int i;
struct omap_hwmod *oh[3];
- char *dev_name = "arm-pmu";
+ char *dev_name = cpu_architecture() == CPU_ARCH_ARMv6 ?
+ "armv6-pmu" : "armv7-pmu";
if ((!oh_num) || (oh_num > 3))
return -EINVAL;
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 7fb033eca0a5..78af6d8cf2e2 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -115,7 +115,6 @@ static int _pwrdm_register(struct powerdomain *pwrdm)
}
pwrdm->voltdm.ptr = voltdm;
INIT_LIST_HEAD(&pwrdm->voltdm_node);
- voltdm_add_pwrdm(voltdm, pwrdm);
skip_voltdm:
spin_lock_init(&pwrdm->_lock);
@@ -484,87 +483,6 @@ pac_exit:
}
/**
- * pwrdm_del_clkdm - remove a clockdomain from a powerdomain
- * @pwrdm: struct powerdomain * to add the clockdomain to
- * @clkdm: struct clockdomain * to associate with a powerdomain
- *
- * Dissociate the clockdomain @clkdm from the powerdomain
- * @pwrdm. Returns -EINVAL if presented with invalid pointers; -ENOENT
- * if @clkdm was not associated with the powerdomain, or 0 upon
- * success.
- */
-int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm)
-{
- int ret = -EINVAL;
- int i;
-
- if (!pwrdm || !clkdm)
- return -EINVAL;
-
- pr_debug("powerdomain: %s: dissociating clockdomain %s\n",
- pwrdm->name, clkdm->name);
-
- for (i = 0; i < PWRDM_MAX_CLKDMS; i++)
- if (pwrdm->pwrdm_clkdms[i] == clkdm)
- break;
-
- if (i == PWRDM_MAX_CLKDMS) {
- pr_debug("powerdomain: %s: clkdm %s not associated?!\n",
- pwrdm->name, clkdm->name);
- ret = -ENOENT;
- goto pdc_exit;
- }
-
- pwrdm->pwrdm_clkdms[i] = NULL;
-
- ret = 0;
-
-pdc_exit:
- return ret;
-}
-
-/**
- * pwrdm_for_each_clkdm - call function on each clkdm in a pwrdm
- * @pwrdm: struct powerdomain * to iterate over
- * @fn: callback function *
- *
- * Call the supplied function @fn for each clockdomain in the powerdomain
- * @pwrdm. The callback function can return anything but 0 to bail
- * out early from the iterator. Returns -EINVAL if presented with
- * invalid pointers; or passes along the last return value of the
- * callback function, which should be 0 for success or anything else
- * to indicate failure.
- */
-int pwrdm_for_each_clkdm(struct powerdomain *pwrdm,
- int (*fn)(struct powerdomain *pwrdm,
- struct clockdomain *clkdm))
-{
- int ret = 0;
- int i;
-
- if (!fn)
- return -EINVAL;
-
- for (i = 0; i < PWRDM_MAX_CLKDMS && !ret; i++)
- if (pwrdm->pwrdm_clkdms[i])
- ret = (*fn)(pwrdm, pwrdm->pwrdm_clkdms[i]);
-
- return ret;
-}
-
-/**
- * pwrdm_get_voltdm - return a ptr to the voltdm that this pwrdm resides in
- * @pwrdm: struct powerdomain *
- *
- * Return a pointer to the struct voltageomain that the specified powerdomain
- * @pwrdm exists in.
- */
-struct voltagedomain *pwrdm_get_voltdm(struct powerdomain *pwrdm)
-{
- return pwrdm->voltdm.ptr;
-}
-
-/**
* pwrdm_get_mem_bank_count - get number of memory banks in this powerdomain
* @pwrdm: struct powerdomain *
*
diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
index 11bd4dd7d8d6..28a796ce07d7 100644
--- a/arch/arm/mach-omap2/powerdomain.h
+++ b/arch/arm/mach-omap2/powerdomain.h
@@ -212,11 +212,6 @@ int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
void *user);
int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
-int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
-int pwrdm_for_each_clkdm(struct powerdomain *pwrdm,
- int (*fn)(struct powerdomain *pwrdm,
- struct clockdomain *clkdm));
-struct voltagedomain *pwrdm_get_voltdm(struct powerdomain *pwrdm);
int pwrdm_get_mem_bank_count(struct powerdomain *pwrdm);
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index a8e4b582c527..6163d66102a3 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -498,6 +498,7 @@ struct omap_prcm_irq_setup {
u8 nr_irqs;
const struct omap_prcm_irq *irqs;
int irq;
+ unsigned int (*xlate_irq)(unsigned int);
void (*read_pending_irqs)(unsigned long *events);
void (*ocp_barrier)(void);
void (*save_and_clear_irqen)(u32 *saved_mask);
diff --git a/arch/arm/mach-omap2/prm3xxx.h b/arch/arm/mach-omap2/prm3xxx.h
index cfde3f4a03cc..ed8a3d8b739a 100644
--- a/arch/arm/mach-omap2/prm3xxx.h
+++ b/arch/arm/mach-omap2/prm3xxx.h
@@ -145,7 +145,6 @@ extern void omap3_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
extern int __init omap3xxx_prm_init(void);
-extern u32 omap3xxx_prm_get_reset_sources(void);
int omap3xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits);
void omap3xxx_prm_iva_idle(void);
void omap3_prm_reset_modem(void);
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index cc170fb81ff7..408c64efb807 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -49,6 +49,7 @@ static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
.irqs = omap4_prcm_irqs,
.nr_irqs = ARRAY_SIZE(omap4_prcm_irqs),
.irq = 11 + OMAP44XX_IRQ_GIC_START,
+ .xlate_irq = omap4_xlate_irq,
.read_pending_irqs = &omap44xx_prm_read_pending_irqs,
.ocp_barrier = &omap44xx_prm_ocp_barrier,
.save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen,
@@ -751,8 +752,10 @@ static int omap44xx_prm_late_init(void)
}
/* Once OMAP4 DT is filled as well */
- if (irq_num >= 0)
+ if (irq_num >= 0) {
omap4_prcm_irq_setup.irq = irq_num;
+ omap4_prcm_irq_setup.xlate_irq = NULL;
+ }
}
omap44xx_prm_enable_io_wakeup();
diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h
index f7512515fde5..714329565b90 100644
--- a/arch/arm/mach-omap2/prm44xx_54xx.h
+++ b/arch/arm/mach-omap2/prm44xx_54xx.h
@@ -39,7 +39,6 @@ extern void omap4_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
extern int __init omap44xx_prm_init(void);
-extern u32 omap44xx_prm_get_reset_sources(void);
#endif
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index b6587854bca5..264b5e29404d 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -187,6 +187,7 @@ int omap_prcm_event_to_irq(const char *name)
*/
void omap_prcm_irq_cleanup(void)
{
+ unsigned int irq;
int i;
if (!prcm_irq_setup) {
@@ -211,7 +212,11 @@ void omap_prcm_irq_cleanup(void)
kfree(prcm_irq_setup->priority_mask);
prcm_irq_setup->priority_mask = NULL;
- irq_set_chained_handler(prcm_irq_setup->irq, NULL);
+ if (prcm_irq_setup->xlate_irq)
+ irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq);
+ else
+ irq = prcm_irq_setup->irq;
+ irq_set_chained_handler(irq, NULL);
if (prcm_irq_setup->base_irq > 0)
irq_free_descs(prcm_irq_setup->base_irq,
@@ -259,6 +264,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
int offset, i;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ unsigned int irq;
if (!irq_setup)
return -EINVAL;
@@ -298,7 +304,11 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
1 << (offset & 0x1f);
}
- irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
+ if (irq_setup->xlate_irq)
+ irq = irq_setup->xlate_irq(irq_setup->irq);
+ else
+ irq = irq_setup->irq;
+ irq_set_chained_handler(irq, omap_prcm_irq_handler);
irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
0);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 376b099ba84b..cef67af9e9b8 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -54,6 +54,7 @@
#include "soc.h"
#include "common.h"
+#include "control.h"
#include "powerdomain.h"
#include "omap-secure.h"
@@ -498,7 +499,8 @@ static void __init realtime_counter_init(void)
void __iomem *base;
static struct clk *sys_clk;
unsigned long rate;
- unsigned int reg, num, den;
+ unsigned int reg;
+ unsigned long long num, den;
base = ioremap(REALTIME_COUNTER_BASE, SZ_32);
if (!base) {
@@ -513,13 +515,42 @@ static void __init realtime_counter_init(void)
}
rate = clk_get_rate(sys_clk);
+
+ if (soc_is_dra7xx()) {
+ /*
+ * Errata i856 says the 32.768KHz crystal does not start at
+ * power on, so the CPU falls back to an emulated 32KHz clock
+ * based on sysclk / 610 instead. This causes the master counter
+ * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2
+ * (OR sysclk * 75 / 244)
+ *
+ * This affects at least the DRA7/AM572x 1.0, 1.1 revisions.
+ * Of course any board built without a populated 32.768KHz
+ * crystal would also need this fix even if the CPU is fixed
+ * later.
+ *
+ * Either case can be detected by using the two speedselect bits
+ * If they are not 0, then the 32.768KHz clock driving the
+ * coarse counter that corrects the fine counter every time it
+ * ticks is actually rate/610 rather than 32.768KHz and we
+ * should compensate to avoid the 570ppm (at 20MHz, much worse
+ * at other rates) too fast system time.
+ */
+ reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP);
+ if (reg & DRA7_SPEEDSELECT_MASK) {
+ num = 75;
+ den = 244;
+ goto sysclk1_based;
+ }
+ }
+
/* Numerator/denumerator values refer TRM Realtime Counter section */
switch (rate) {
- case 1200000:
+ case 12000000:
num = 64;
den = 125;
break;
- case 1300000:
+ case 13000000:
num = 768;
den = 1625;
break;
@@ -531,11 +562,11 @@ static void __init realtime_counter_init(void)
num = 192;
den = 625;
break;
- case 2600000:
+ case 26000000:
num = 384;
den = 1625;
break;
- case 2700000:
+ case 27000000:
num = 256;
den = 1125;
break;
@@ -547,6 +578,7 @@ static void __init realtime_counter_init(void)
break;
}
+sysclk1_based:
/* Program numerator and denumerator registers */
reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) &
NUMERATOR_DENUMERATOR_MASK;
@@ -558,7 +590,7 @@ static void __init realtime_counter_init(void)
reg |= den;
writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
- arch_timer_freq = (rate / den) * num;
+ arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den);
set_cntfreq();
iounmap(base);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 4457e731f7a4..292eca0e78ed 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -66,19 +66,24 @@ void __init omap_pmic_init(int bus, u32 clkrate,
omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
}
+#ifdef CONFIG_ARCH_OMAP4
void __init omap4_pmic_init(const char *pmic_type,
struct twl4030_platform_data *pmic_data,
struct i2c_board_info *devices, int nr_devices)
{
/* PMIC part*/
+ unsigned int irq;
+
omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
- omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
+ irq = omap4_xlate_irq(7 + OMAP44XX_IRQ_GIC_START);
+ omap_pmic_init(1, 400, pmic_type, irq, pmic_data);
/* Register additional devices on i2c1 bus if needed */
if (devices)
i2c_register_board_info(1, devices, nr_devices);
}
+#endif
void __init omap_pmic_late_init(void)
{
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index bc897231bd10..e4562b2b973b 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -82,16 +82,8 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
musb_plat.mode = board_data->mode;
musb_plat.extvbus = board_data->extvbus;
- if (soc_is_am35xx()) {
- oh_name = "am35x_otg_hs";
- name = "musb-am35x";
- } else if (cpu_is_ti81xx()) {
- oh_name = "usb_otg_hs";
- name = "musb-ti81xx";
- } else {
- oh_name = "usb_otg_hs";
- name = "musb-omap2430";
- }
+ oh_name = "usb_otg_hs";
+ name = "musb-omap2430";
oh = omap_hwmod_lookup(oh_name);
if (WARN(!oh, "%s: could not find omap_hwmod for %s\n",
diff --git a/arch/arm/mach-omap2/usb.h b/arch/arm/mach-omap2/usb.h
index 4ba2ae759895..3395365ef1db 100644
--- a/arch/arm/mach-omap2/usb.h
+++ b/arch/arm/mach-omap2/usb.h
@@ -68,5 +68,3 @@ extern void am35x_musb_reset(void);
extern void am35x_musb_phy_power(u8 on);
extern void am35x_musb_clear_irq(void);
extern void am35x_set_mode(u8 musb_mode);
-extern void ti81xx_musb_phy_power(u8 on);
-
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index 3783b8625f0f..cba8cada8c81 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -224,37 +224,6 @@ int omap_voltage_register_pmic(struct voltagedomain *voltdm,
}
/**
- * omap_change_voltscale_method() - API to change the voltage scaling method.
- * @voltdm: pointer to the VDD whose voltage scaling method
- * has to be changed.
- * @voltscale_method: the method to be used for voltage scaling.
- *
- * This API can be used by the board files to change the method of voltage
- * scaling between vpforceupdate and vcbypass. The parameter values are
- * defined in voltage.h
- */
-void omap_change_voltscale_method(struct voltagedomain *voltdm,
- int voltscale_method)
-{
- if (!voltdm || IS_ERR(voltdm)) {
- pr_warn("%s: VDD specified does not exist!\n", __func__);
- return;
- }
-
- switch (voltscale_method) {
- case VOLTSCALE_VPFORCEUPDATE:
- voltdm->scale = omap_vp_forceupdate_scale;
- return;
- case VOLTSCALE_VCBYPASS:
- voltdm->scale = omap_vc_bypass_scale;
- return;
- default:
- pr_warn("%s: Trying to change the method of voltage scaling to an unsupported one!\n",
- __func__);
- }
-}
-
-/**
* omap_voltage_late_init() - Init the various voltage parameters
*
* This API is to be called in the later stages of the
@@ -316,90 +285,11 @@ static struct voltagedomain *_voltdm_lookup(const char *name)
return voltdm;
}
-/**
- * voltdm_add_pwrdm - add a powerdomain to a voltagedomain
- * @voltdm: struct voltagedomain * to add the powerdomain to
- * @pwrdm: struct powerdomain * to associate with a voltagedomain
- *
- * Associate the powerdomain @pwrdm with a voltagedomain @voltdm. This
- * enables the use of voltdm_for_each_pwrdm(). Returns -EINVAL if
- * presented with invalid pointers; -ENOMEM if memory could not be allocated;
- * or 0 upon success.
- */
-int voltdm_add_pwrdm(struct voltagedomain *voltdm, struct powerdomain *pwrdm)
-{
- if (!voltdm || !pwrdm)
- return -EINVAL;
-
- pr_debug("voltagedomain: %s: associating powerdomain %s\n",
- voltdm->name, pwrdm->name);
-
- list_add(&pwrdm->voltdm_node, &voltdm->pwrdm_list);
-
- return 0;
-}
-
-/**
- * voltdm_for_each_pwrdm - call function for each pwrdm in a voltdm
- * @voltdm: struct voltagedomain * to iterate over
- * @fn: callback function *
- *
- * Call the supplied function @fn for each powerdomain in the
- * voltagedomain @voltdm. Returns -EINVAL if presented with invalid
- * pointers; or passes along the last return value of the callback
- * function, which should be 0 for success or anything else to
- * indicate failure.
- */
-int voltdm_for_each_pwrdm(struct voltagedomain *voltdm,
- int (*fn)(struct voltagedomain *voltdm,
- struct powerdomain *pwrdm))
-{
- struct powerdomain *pwrdm;
- int ret = 0;
-
- if (!fn)
- return -EINVAL;
-
- list_for_each_entry(pwrdm, &voltdm->pwrdm_list, voltdm_node)
- ret = (*fn)(voltdm, pwrdm);
-
- return ret;
-}
-
-/**
- * voltdm_for_each - call function on each registered voltagedomain
- * @fn: callback function *
- *
- * Call the supplied function @fn for each registered voltagedomain.
- * The callback function @fn can return anything but 0 to bail out
- * early from the iterator. Returns the last return value of the
- * callback function, which should be 0 for success or anything else
- * to indicate failure; or -EINVAL if the function pointer is null.
- */
-int voltdm_for_each(int (*fn)(struct voltagedomain *voltdm, void *user),
- void *user)
-{
- struct voltagedomain *temp_voltdm;
- int ret = 0;
-
- if (!fn)
- return -EINVAL;
-
- list_for_each_entry(temp_voltdm, &voltdm_list, node) {
- ret = (*fn)(temp_voltdm, user);
- if (ret)
- break;
- }
-
- return ret;
-}
-
static int _voltdm_register(struct voltagedomain *voltdm)
{
if (!voltdm || !voltdm->name)
return -EINVAL;
- INIT_LIST_HEAD(&voltdm->pwrdm_list);
list_add(&voltdm->node, &voltdm_list);
pr_debug("voltagedomain: registered %s\n", voltdm->name);
diff --git a/arch/arm/mach-omap2/voltage.h b/arch/arm/mach-omap2/voltage.h
index f7f2879b31b0..e64550321510 100644
--- a/arch/arm/mach-omap2/voltage.h
+++ b/arch/arm/mach-omap2/voltage.h
@@ -23,10 +23,6 @@
struct powerdomain;
-/* XXX document */
-#define VOLTSCALE_VPFORCEUPDATE 1
-#define VOLTSCALE_VCBYPASS 2
-
/*
* OMAP3 GENERIC setup times. Revisit to see if these needs to be
* passed from board or PMIC file
@@ -55,7 +51,6 @@ struct omap_vfsm_instance {
* @name: Name of the voltage domain which can be used as a unique identifier.
* @scalable: Whether or not this voltage domain is scalable
* @node: list_head linking all voltage domains
- * @pwrdm_list: list_head linking all powerdomains in this voltagedomain
* @vc: pointer to VC channel associated with this voltagedomain
* @vp: pointer to VP associated with this voltagedomain
* @read: read a VC/VP register
@@ -71,7 +66,6 @@ struct voltagedomain {
char *name;
bool scalable;
struct list_head node;
- struct list_head pwrdm_list;
struct omap_vc_channel *vc;
const struct omap_vfsm_instance *vfsm;
struct omap_vp_instance *vp;
@@ -163,8 +157,6 @@ struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
unsigned long volt);
int omap_voltage_register_pmic(struct voltagedomain *voltdm,
struct omap_voltdm_pmic *pmic);
-void omap_change_voltscale_method(struct voltagedomain *voltdm,
- int voltscale_method);
int omap_voltage_late_init(void);
extern void omap2xxx_voltagedomains_init(void);
@@ -175,11 +167,6 @@ extern void omap54xx_voltagedomains_init(void);
struct voltagedomain *voltdm_lookup(const char *name);
void voltdm_init(struct voltagedomain **voltdm_list);
int voltdm_add_pwrdm(struct voltagedomain *voltdm, struct powerdomain *pwrdm);
-int voltdm_for_each(int (*fn)(struct voltagedomain *voltdm, void *user),
- void *user);
-int voltdm_for_each_pwrdm(struct voltagedomain *voltdm,
- int (*fn)(struct voltagedomain *voltdm,
- struct powerdomain *pwrdm));
int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt);
void voltdm_reset(struct voltagedomain *voltdm);
unsigned long voltdm_get_voltage(struct voltagedomain *voltdm);
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index 07d3e5ed9264..3916a6665100 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -15,9 +15,6 @@
#include <asm/mach/time.h>
#include <asm/exception.h>
-#define SIRFSOC_VA_BASE _AC(0xFEC00000, UL)
-#define SIRFSOC_VA(x) (SIRFSOC_VA_BASE + ((x) & 0x00FFF000))
-
extern struct smp_operations sirfsoc_smp_ops;
extern void sirfsoc_secondary_startup(void);
extern void sirfsoc_cpu_die(unsigned int cpu);
@@ -25,18 +22,6 @@ extern void sirfsoc_cpu_die(unsigned int cpu);
extern void __init sirfsoc_of_irq_init(void);
extern asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs);
-#ifndef CONFIG_DEBUG_LL
-static inline void sirfsoc_map_lluart(void) {}
-#else
-extern void __init sirfsoc_map_lluart(void);
-#endif
-
-#ifndef CONFIG_SMP
-static inline void sirfsoc_map_scu(void) {}
-#else
-extern void sirfsoc_map_scu(void);
-#endif
-
#ifdef CONFIG_SUSPEND
extern int sirfsoc_pm_init(void);
#else
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 83efe914bf7d..8896e71586f5 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -6,7 +6,6 @@ comment "Intel/Marvell Dev Platforms (sorted by hardware release time)"
config MACH_PXA27X_DT
bool "Support PXA27x platforms from device tree"
- select CPU_PXA27x
select POWER_SUPPLY
select PXA27x
select USE_OF
@@ -84,14 +83,12 @@ config ARCH_VIPER
select I2C_GPIO if I2C=y
select ISA
select PXA25x
- select PXA_HAVE_ISA_IRQS
config MACH_ARCOM_ZEUS
bool "Arcom/Eurotech ZEUS SBC"
select ARCOM_PCMCIA
select ISA
select PXA27x
- select PXA_HAVE_ISA_IRQS
config MACH_BALLOON3
bool "Balloon 3 board"
@@ -691,9 +688,6 @@ config SHARPSL_PM_MAX1111
select SPI
select SPI_MASTER
-config PXA_HAVE_ISA_IRQS
- bool
-
config PXA310_ULPI
bool
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 06022b235730..89f790dda93e 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include <linux/i2c/pxa-i2c.h>
#include <linux/io.h>
+#include <linux/regulator/machine.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/corgi_lcd.h>
@@ -752,6 +753,8 @@ static void __init corgi_init(void)
sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ regulator_has_full_constraints();
}
static void __init fixup_corgi(struct tag *tags, char **cmdline)
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index ac7b3eabbd85..35434662dc7c 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -40,7 +40,7 @@ static struct resource pxa_resource_pmu = {
};
struct platform_device pxa_device_pmu = {
- .name = "arm-pmu",
+ .name = "xscale-pmu",
.id = -1,
.resource = &pxa_resource_pmu,
.num_resources = 1,
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index c66ad4edc5e3..5fb41ad6e3bc 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -893,6 +893,8 @@ static void __init hx4700_init(void)
mdelay(10);
gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
mdelay(10);
+
+ regulator_has_full_constraints();
}
MACHINE_START(H4700, "HP iPAQ HX4700")
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index 48c2fd851686..7e3ea351f3c7 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -12,14 +12,10 @@
#ifndef __ASM_MACH_IRQS_H
#define __ASM_MACH_IRQS_H
-#ifdef CONFIG_PXA_HAVE_ISA_IRQS
-#define PXA_ISA_IRQ(x) (x)
-#define PXA_ISA_IRQ_NUM (16)
-#else
-#define PXA_ISA_IRQ_NUM (0)
-#endif
+#include <asm/irq.h>
-#define PXA_IRQ(x) (PXA_ISA_IRQ_NUM + (x))
+#define PXA_ISA_IRQ(x) (x)
+#define PXA_IRQ(x) (NR_IRQS_LEGACY + (x))
#define IRQ_SSP3 PXA_IRQ(0) /* SSP3 service request */
#define IRQ_MSL PXA_IRQ(1) /* MSL Interface interrupt */
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 29019beae591..195b1121c8f1 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -25,6 +25,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c/pxa-i2c.h>
+#include <linux/regulator/machine.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -455,6 +456,7 @@ static void __init poodle_init(void)
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
poodle_init_spi();
+ regulator_has_full_constraints();
}
static void __init fixup_poodle(struct tag *tags, char **cmdline)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 962a7f31f596..f4e2e2719580 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -979,6 +979,8 @@ static void __init spitz_init(void)
spitz_nand_init();
spitz_i2c_init();
spitz_audio_init();
+
+ regulator_has_full_constraints();
}
static void __init spitz_fixup(struct tag *tags, char **cmdline)
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index d6908569ecaf..09cffed4c0a4 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -44,7 +44,7 @@
#define APCS_SAW2_VCTL 0x14
#define APCS_SAW2_2_VCTL 0x1c
-extern void secondary_startup(void);
+extern void secondary_startup_arm(void);
static DEFINE_SPINLOCK(boot_lock);
@@ -337,7 +337,7 @@ static void __init qcom_smp_prepare_cpus(unsigned int max_cpus)
flags |= cold_boot_flags[map];
}
- if (scm_set_boot_addr(virt_to_phys(secondary_startup), flags)) {
+ if (scm_set_boot_addr(virt_to_phys(secondary_startup_arm), flags)) {
for_each_present_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 739d4f113097..64c88d657f9e 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -37,6 +37,7 @@
#include <asm/pgtable.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/smp_twd.h>
+#include <asm/system_info.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -296,7 +297,6 @@ static struct resource pmu_resources[] = {
};
static struct platform_device pmu_device = {
- .name = "arm-pmu",
.id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
@@ -451,6 +451,7 @@ static void __init realview_eb_init(void)
*/
l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
#endif
+ pmu_device.name = core_tile_a9mp() ? "armv7-pmu" : "armv6-pmu";
platform_device_register(&pmu_device);
}
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index b0e0dcaed944..ce92c1823494 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -280,7 +280,7 @@ static struct resource pmu_resource = {
};
static struct platform_device pmu_device = {
- .name = "arm-pmu",
+ .name = "armv6-pmu",
.id = -1,
.num_resources = 1,
.resource = &pmu_resource,
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index 47bf55fdbf27..15c45e25095f 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -262,7 +262,7 @@ static struct resource pmu_resources[] = {
};
static struct platform_device pmu_device = {
- .name = "arm-pmu",
+ .name = "armv6-pmu",
.id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index 4e57a8599265..4c64662f5437 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -240,7 +240,7 @@ static struct resource pmu_resource = {
};
static struct platform_device pmu_device = {
- .name = "arm-pmu",
+ .name = "armv7-pmu",
.id = -1,
.num_resources = 1,
.resource = &pmu_resource,
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index d89eb4023467..9a22b864219f 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -280,7 +280,7 @@ static struct resource pmu_resources[] = {
};
static struct platform_device pmu_device = {
- .name = "arm-pmu",
+ .name = "armv7-pmu",
.id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
index ac5803cac98d..5078932c1683 100644
--- a/arch/arm/mach-rockchip/Kconfig
+++ b/arch/arm/mach-rockchip/Kconfig
@@ -11,6 +11,7 @@ config ARCH_ROCKCHIP
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select DW_APB_TIMER_OF
+ select ROCKCHIP_TIMER
select ARM_GLOBAL_TIMER
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
help
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index f70861174cbb..d360ec044b66 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -19,12 +19,38 @@
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/irqchip.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/hardware/cache-l2x0.h>
#include "core.h"
#include "pm.h"
+#define RK3288_GRF_SOC_CON0 0x244
+
+static void __init rockchip_timer_init(void)
+{
+ if (of_machine_is_compatible("rockchip,rk3288")) {
+ struct regmap *grf;
+
+ /*
+ * Disable auto jtag/sdmmc switching that causes issues
+ * with the mmc controllers making them unreliable
+ */
+ grf = syscon_regmap_lookup_by_compatible("rockchip,rk3288-grf");
+ if (!IS_ERR(grf))
+ regmap_write(grf, RK3288_GRF_SOC_CON0, 0x10000000);
+ else
+ pr_err("rockchip: could not get grf syscon\n");
+ }
+
+ of_clk_init(NULL);
+ clocksource_of_init();
+}
+
static void __init rockchip_dt_init(void)
{
rockchip_suspend_init();
@@ -44,6 +70,7 @@ static const char * const rockchip_board_dt_compat[] = {
DT_MACHINE_START(ROCKCHIP_DT, "Rockchip (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
+ .init_time = rockchip_timer_init,
.dt_compat = rockchip_board_dt_compat,
.init_machine = rockchip_dt_init,
MACHINE_END
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 9eb22297cbe1..79c49ff77f6e 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -29,7 +29,6 @@ config CPU_S3C2410
default y
select CPU_ARM920T
select S3C2410_COMMON_CLK
- select S3C2410_DMA if S3C24XX_DMA
select ARM_S3C2410_CPUFREQ if ARM_S3C24XX_CPUFREQ
select S3C2410_PM if PM
help
@@ -40,7 +39,6 @@ config CPU_S3C2412
bool "SAMSUNG S3C2412"
select CPU_ARM926T
select S3C2412_COMMON_CLK
- select S3C2412_DMA if S3C24XX_DMA
select S3C2412_PM if PM
help
Support for the S3C2412 and S3C2413 SoCs from the S3C24XX line
@@ -50,7 +48,6 @@ config CPU_S3C2416
select CPU_ARM926T
select S3C2416_PM if PM
select S3C2443_COMMON_CLK
- select S3C2443_DMA if S3C24XX_DMA
help
Support for the S3C2416 SoC from the S3C24XX line
@@ -59,7 +56,6 @@ config CPU_S3C2440
select CPU_ARM920T
select S3C2410_COMMON_CLK
select S3C2410_PM if PM
- select S3C2440_DMA if S3C24XX_DMA
help
Support for S3C2440 Samsung Mobile CPU based systems.
@@ -67,7 +63,6 @@ config CPU_S3C2442
bool "SAMSUNG S3C2442"
select CPU_ARM920T
select S3C2410_COMMON_CLK
- select S3C2410_DMA if S3C24XX_DMA
select S3C2410_PM if PM
help
Support for S3C2442 Samsung Mobile CPU based systems.
@@ -80,7 +75,6 @@ config CPU_S3C2443
bool "SAMSUNG S3C2443"
select CPU_ARM920T
select S3C2443_COMMON_CLK
- select S3C2443_DMA if S3C24XX_DMA
help
Support for the S3C2443 SoC from the S3C24XX line
@@ -114,27 +108,6 @@ config S3C24XX_SETUP_TS
help
Compile in platform device definition for Samsung TouchScreen.
-config S3C24XX_DMA
- bool "S3C2410 DMA support (deprecated)"
- select S3C_DMA
- help
- S3C2410 DMA support. This is needed for drivers like sound which
- use the S3C2410's DMA system to move data to and from the
- peripheral blocks.
-
-config S3C2410_DMA_DEBUG
- bool "S3C2410 DMA support debug"
- depends on S3C2410_DMA
- help
- Enable debugging output for the DMA code. This option sends info
- to the kernel log, at priority KERN_DEBUG.
-
-config S3C2410_DMA
- bool
- depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442)
- help
- DMA device selection for S3C2410 and compatible CPUs
-
config S3C2410_PM
bool
help
@@ -325,11 +298,6 @@ config CPU_S3C2412_ONLY
!CPU_S3C2442 && !CPU_S3C2443
default y
-config S3C2412_DMA
- bool
- help
- Internal config node for S3C2412 DMA support
-
config S3C2412_PM
bool
select S3C2412_PM_SLEEP
@@ -438,11 +406,6 @@ endif # CPU_S3C2416
if CPU_S3C2440
-config S3C2440_DMA
- bool
- help
- Support for S3C2440 specific DMA code5A
-
config S3C2440_XTAL_12000000
bool
help
@@ -601,11 +564,6 @@ endif # CPU_S3C2442
if CPU_S3C2443 || CPU_S3C2416
-config S3C2443_DMA
- bool
- help
- Internal config node for S3C2443 DMA support
-
config S3C2443_SETUP_SPI
bool
help
diff --git a/arch/arm/mach-s3c24xx/Makefile b/arch/arm/mach-s3c24xx/Makefile
index b92071638733..b40a22fe082a 100644
--- a/arch/arm/mach-s3c24xx/Makefile
+++ b/arch/arm/mach-s3c24xx/Makefile
@@ -12,12 +12,10 @@
obj-y += common.o
obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
-obj-$(CONFIG_S3C2410_DMA) += dma-s3c2410.o
obj-$(CONFIG_S3C2410_PLL) += pll-s3c2410.o
obj-$(CONFIG_S3C2410_PM) += pm-s3c2410.o sleep-s3c2410.o
obj-$(CONFIG_CPU_S3C2412) += s3c2412.o
-obj-$(CONFIG_S3C2412_DMA) += dma-s3c2412.o
obj-$(CONFIG_S3C2412_PM) += pm-s3c2412.o
obj-$(CONFIG_S3C2412_PM_SLEEP) += sleep-s3c2412.o
@@ -27,7 +25,6 @@ obj-$(CONFIG_S3C2416_PM) += pm-s3c2416.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440.o
obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x.o
-obj-$(CONFIG_S3C2440_DMA) += dma-s3c2440.o
obj-$(CONFIG_S3C2440_PLL_12000000) += pll-s3c2440-12000000.o
obj-$(CONFIG_S3C2440_PLL_16934400) += pll-s3c2440-16934400.o
@@ -39,15 +36,11 @@ obj-$(CONFIG_PM) += pm.o irq-pm.o sleep.o
# common code
-obj-$(CONFIG_S3C24XX_DMA) += dma.o
-
obj-$(CONFIG_S3C2410_CPUFREQ_UTILS) += cpufreq-utils.o
obj-$(CONFIG_S3C2410_IOTIMING) += iotiming-s3c2410.o
obj-$(CONFIG_S3C2412_IOTIMING) += iotiming-s3c2412.o
-obj-$(CONFIG_S3C2443_DMA) += dma-s3c2443.o
-
#
# machine support
# following is ordered alphabetically by option text.
diff --git a/arch/arm/mach-s3c24xx/dma-s3c2410.c b/arch/arm/mach-s3c24xx/dma-s3c2410.c
deleted file mode 100644
index 09aa12da1789..000000000000
--- a/arch/arm/mach-s3c24xx/dma-s3c2410.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/* linux/arch/arm/mach-s3c2410/dma.c
- *
- * Copyright (c) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 DMA selection
- *
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/serial_core.h>
-#include <linux/serial_s3c.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-
-#include <plat/cpu.h>
-#include <plat/dma-s3c24xx.h>
-
-#include <mach/regs-gpio.h>
-#include <plat/regs-dma.h>
-#include <mach/regs-lcd.h>
-#include <plat/regs-spi.h>
-
-static struct s3c24xx_dma_map __initdata s3c2410_dma_mappings[] = {
- [DMACH_XD0] = {
- .name = "xdreq0",
- .channels[0] = S3C2410_DCON_CH0_XDREQ0 | DMA_CH_VALID,
- },
- [DMACH_XD1] = {
- .name = "xdreq1",
- .channels[1] = S3C2410_DCON_CH1_XDREQ1 | DMA_CH_VALID,
- },
- [DMACH_SDI] = {
- .name = "sdi",
- .channels[0] = S3C2410_DCON_CH0_SDI | DMA_CH_VALID,
- .channels[2] = S3C2410_DCON_CH2_SDI | DMA_CH_VALID,
- .channels[3] = S3C2410_DCON_CH3_SDI | DMA_CH_VALID,
- },
- [DMACH_SPI0] = {
- .name = "spi0",
- .channels[1] = S3C2410_DCON_CH1_SPI | DMA_CH_VALID,
- },
- [DMACH_SPI1] = {
- .name = "spi1",
- .channels[3] = S3C2410_DCON_CH3_SPI | DMA_CH_VALID,
- },
- [DMACH_UART0] = {
- .name = "uart0",
- .channels[0] = S3C2410_DCON_CH0_UART0 | DMA_CH_VALID,
- },
- [DMACH_UART1] = {
- .name = "uart1",
- .channels[1] = S3C2410_DCON_CH1_UART1 | DMA_CH_VALID,
- },
- [DMACH_UART2] = {
- .name = "uart2",
- .channels[3] = S3C2410_DCON_CH3_UART2 | DMA_CH_VALID,
- },
- [DMACH_TIMER] = {
- .name = "timer",
- .channels[0] = S3C2410_DCON_CH0_TIMER | DMA_CH_VALID,
- .channels[2] = S3C2410_DCON_CH2_TIMER | DMA_CH_VALID,
- .channels[3] = S3C2410_DCON_CH3_TIMER | DMA_CH_VALID,
- },
- [DMACH_I2S_IN] = {
- .name = "i2s-sdi",
- .channels[1] = S3C2410_DCON_CH1_I2SSDI | DMA_CH_VALID,
- .channels[2] = S3C2410_DCON_CH2_I2SSDI | DMA_CH_VALID,
- },
- [DMACH_I2S_OUT] = {
- .name = "i2s-sdo",
- .channels[2] = S3C2410_DCON_CH2_I2SSDO | DMA_CH_VALID,
- },
- [DMACH_USB_EP1] = {
- .name = "usb-ep1",
- .channels[0] = S3C2410_DCON_CH0_USBEP1 | DMA_CH_VALID,
- },
- [DMACH_USB_EP2] = {
- .name = "usb-ep2",
- .channels[1] = S3C2410_DCON_CH1_USBEP2 | DMA_CH_VALID,
- },
- [DMACH_USB_EP3] = {
- .name = "usb-ep3",
- .channels[2] = S3C2410_DCON_CH2_USBEP3 | DMA_CH_VALID,
- },
- [DMACH_USB_EP4] = {
- .name = "usb-ep4",
- .channels[3] =S3C2410_DCON_CH3_USBEP4 | DMA_CH_VALID,
- },
-};
-
-static void s3c2410_dma_select(struct s3c2410_dma_chan *chan,
- struct s3c24xx_dma_map *map)
-{
- chan->dcon = map->channels[chan->number] & ~DMA_CH_VALID;
-}
-
-static struct s3c24xx_dma_selection __initdata s3c2410_dma_sel = {
- .select = s3c2410_dma_select,
- .dcon_mask = 7 << 24,
- .map = s3c2410_dma_mappings,
- .map_size = ARRAY_SIZE(s3c2410_dma_mappings),
-};
-
-static struct s3c24xx_dma_order __initdata s3c2410_dma_order = {
- .channels = {
- [DMACH_SDI] = {
- .list = {
- [0] = 3 | DMA_CH_VALID,
- [1] = 2 | DMA_CH_VALID,
- [2] = 0 | DMA_CH_VALID,
- },
- },
- [DMACH_I2S_IN] = {
- .list = {
- [0] = 1 | DMA_CH_VALID,
- [1] = 2 | DMA_CH_VALID,
- },
- },
- },
-};
-
-static int __init s3c2410_dma_add(struct device *dev,
- struct subsys_interface *sif)
-{
- s3c2410_dma_init();
- s3c24xx_dma_order_set(&s3c2410_dma_order);
- return s3c24xx_dma_init_map(&s3c2410_dma_sel);
-}
-
-#if defined(CONFIG_CPU_S3C2410)
-static struct subsys_interface s3c2410_dma_interface = {
- .name = "s3c2410_dma",
- .subsys = &s3c2410_subsys,
- .add_dev = s3c2410_dma_add,
-};
-
-static int __init s3c2410_dma_drvinit(void)
-{
- return subsys_interface_register(&s3c2410_dma_interface);
-}
-
-arch_initcall(s3c2410_dma_drvinit);
-
-static struct subsys_interface s3c2410a_dma_interface = {
- .name = "s3c2410a_dma",
- .subsys = &s3c2410a_subsys,
- .add_dev = s3c2410_dma_add,
-};
-
-static int __init s3c2410a_dma_drvinit(void)
-{
- return subsys_interface_register(&s3c2410a_dma_interface);
-}
-
-arch_initcall(s3c2410a_dma_drvinit);
-#endif
-
-#if defined(CONFIG_CPU_S3C2442)
-/* S3C2442 DMA contains the same selection table as the S3C2410 */
-static struct subsys_interface s3c2442_dma_interface = {
- .name = "s3c2442_dma",
- .subsys = &s3c2442_subsys,
- .add_dev = s3c2410_dma_add,
-};
-
-static int __init s3c2442_dma_drvinit(void)
-{
- return subsys_interface_register(&s3c2442_dma_interface);
-}
-
-arch_initcall(s3c2442_dma_drvinit);
-#endif
-
diff --git a/arch/arm/mach-s3c24xx/dma-s3c2412.c b/arch/arm/mach-s3c24xx/dma-s3c2412.c
deleted file mode 100644
index 0c0106d1a4d1..000000000000
--- a/arch/arm/mach-s3c24xx/dma-s3c2412.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/* linux/arch/arm/mach-s3c2412/dma.c
- *
- * Copyright (c) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2412 DMA selection
- *
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/serial_core.h>
-#include <linux/serial_s3c.h>
-#include <linux/io.h>
-
-#include <mach/dma.h>
-
-#include <plat/dma-s3c24xx.h>
-#include <plat/cpu.h>
-
-#include <mach/regs-gpio.h>
-#include <plat/regs-dma.h>
-#include <mach/regs-lcd.h>
-#include <plat/regs-spi.h>
-
-#define MAP(x) { (x)| DMA_CH_VALID, (x)| DMA_CH_VALID, (x)| DMA_CH_VALID, (x)| DMA_CH_VALID }
-
-static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = {
- [DMACH_XD0] = {
- .name = "xdreq0",
- .channels = MAP(S3C2412_DMAREQSEL_XDREQ0),
- },
- [DMACH_XD1] = {
- .name = "xdreq1",
- .channels = MAP(S3C2412_DMAREQSEL_XDREQ1),
- },
- [DMACH_SDI] = {
- .name = "sdi",
- .channels = MAP(S3C2412_DMAREQSEL_SDI),
- },
- [DMACH_SPI0_RX] = {
- .name = "spi0-rx",
- .channels = MAP(S3C2412_DMAREQSEL_SPI0RX),
- },
- [DMACH_SPI0_TX] = {
- .name = "spi0-tx",
- .channels = MAP(S3C2412_DMAREQSEL_SPI0TX),
- },
- [DMACH_SPI1_RX] = {
- .name = "spi1-rx",
- .channels = MAP(S3C2412_DMAREQSEL_SPI1RX),
- },
- [DMACH_SPI1_TX] = {
- .name = "spi1-tx",
- .channels = MAP(S3C2412_DMAREQSEL_SPI1TX),
- },
- [DMACH_UART0] = {
- .name = "uart0",
- .channels = MAP(S3C2412_DMAREQSEL_UART0_0),
- },
- [DMACH_UART1] = {
- .name = "uart1",
- .channels = MAP(S3C2412_DMAREQSEL_UART1_0),
- },
- [DMACH_UART2] = {
- .name = "uart2",
- .channels = MAP(S3C2412_DMAREQSEL_UART2_0),
- },
- [DMACH_UART0_SRC2] = {
- .name = "uart0",
- .channels = MAP(S3C2412_DMAREQSEL_UART0_1),
- },
- [DMACH_UART1_SRC2] = {
- .name = "uart1",
- .channels = MAP(S3C2412_DMAREQSEL_UART1_1),
- },
- [DMACH_UART2_SRC2] = {
- .name = "uart2",
- .channels = MAP(S3C2412_DMAREQSEL_UART2_1),
- },
- [DMACH_TIMER] = {
- .name = "timer",
- .channels = MAP(S3C2412_DMAREQSEL_TIMER),
- },
- [DMACH_I2S_IN] = {
- .name = "i2s-sdi",
- .channels = MAP(S3C2412_DMAREQSEL_I2SRX),
- },
- [DMACH_I2S_OUT] = {
- .name = "i2s-sdo",
- .channels = MAP(S3C2412_DMAREQSEL_I2STX),
- },
- [DMACH_USB_EP1] = {
- .name = "usb-ep1",
- .channels = MAP(S3C2412_DMAREQSEL_USBEP1),
- },
- [DMACH_USB_EP2] = {
- .name = "usb-ep2",
- .channels = MAP(S3C2412_DMAREQSEL_USBEP2),
- },
- [DMACH_USB_EP3] = {
- .name = "usb-ep3",
- .channels = MAP(S3C2412_DMAREQSEL_USBEP3),
- },
- [DMACH_USB_EP4] = {
- .name = "usb-ep4",
- .channels = MAP(S3C2412_DMAREQSEL_USBEP4),
- },
-};
-
-static void s3c2412_dma_select(struct s3c2410_dma_chan *chan,
- struct s3c24xx_dma_map *map)
-{
- unsigned long chsel = map->channels[0] & (~DMA_CH_VALID);
- writel(chsel | S3C2412_DMAREQSEL_HW,
- chan->regs + S3C2412_DMA_DMAREQSEL);
-}
-
-static struct s3c24xx_dma_selection __initdata s3c2412_dma_sel = {
- .select = s3c2412_dma_select,
- .dcon_mask = 0,
- .map = s3c2412_dma_mappings,
- .map_size = ARRAY_SIZE(s3c2412_dma_mappings),
-};
-
-static int __init s3c2412_dma_add(struct device *dev,
- struct subsys_interface *sif)
-{
- s3c2410_dma_init();
- return s3c24xx_dma_init_map(&s3c2412_dma_sel);
-}
-
-static struct subsys_interface s3c2412_dma_interface = {
- .name = "s3c2412_dma",
- .subsys = &s3c2412_subsys,
- .add_dev = s3c2412_dma_add,
-};
-
-static int __init s3c2412_dma_init(void)
-{
- return subsys_interface_register(&s3c2412_dma_interface);
-}
-
-arch_initcall(s3c2412_dma_init);
diff --git a/arch/arm/mach-s3c24xx/dma-s3c2440.c b/arch/arm/mach-s3c24xx/dma-s3c2440.c
deleted file mode 100644
index 2f8e8a3017df..000000000000
--- a/arch/arm/mach-s3c24xx/dma-s3c2440.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/* linux/arch/arm/mach-s3c2440/dma.c
- *
- * Copyright (c) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2440 DMA selection
- *
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/serial_core.h>
-#include <linux/serial_s3c.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-
-#include <plat/dma-s3c24xx.h>
-#include <plat/cpu.h>
-
-#include <mach/regs-gpio.h>
-#include <plat/regs-dma.h>
-#include <mach/regs-lcd.h>
-#include <plat/regs-spi.h>
-
-static struct s3c24xx_dma_map __initdata s3c2440_dma_mappings[] = {
- [DMACH_XD0] = {
- .name = "xdreq0",
- .channels[0] = S3C2410_DCON_CH0_XDREQ0 | DMA_CH_VALID,
- },
- [DMACH_XD1] = {
- .name = "xdreq1",
- .channels[1] = S3C2410_DCON_CH1_XDREQ1 | DMA_CH_VALID,
- },
- [DMACH_SDI] = {
- .name = "sdi",
- .channels[0] = S3C2410_DCON_CH0_SDI | DMA_CH_VALID,
- .channels[1] = S3C2440_DCON_CH1_SDI | DMA_CH_VALID,
- .channels[2] = S3C2410_DCON_CH2_SDI | DMA_CH_VALID,
- .channels[3] = S3C2410_DCON_CH3_SDI | DMA_CH_VALID,
- },
- [DMACH_SPI0] = {
- .name = "spi0",
- .channels[1] = S3C2410_DCON_CH1_SPI | DMA_CH_VALID,
- },
- [DMACH_SPI1] = {
- .name = "spi1",
- .channels[3] = S3C2410_DCON_CH3_SPI | DMA_CH_VALID,
- },
- [DMACH_UART0] = {
- .name = "uart0",
- .channels[0] = S3C2410_DCON_CH0_UART0 | DMA_CH_VALID,
- },
- [DMACH_UART1] = {
- .name = "uart1",
- .channels[1] = S3C2410_DCON_CH1_UART1 | DMA_CH_VALID,
- },
- [DMACH_UART2] = {
- .name = "uart2",
- .channels[3] = S3C2410_DCON_CH3_UART2 | DMA_CH_VALID,
- },
- [DMACH_TIMER] = {
- .name = "timer",
- .channels[0] = S3C2410_DCON_CH0_TIMER | DMA_CH_VALID,
- .channels[2] = S3C2410_DCON_CH2_TIMER | DMA_CH_VALID,
- .channels[3] = S3C2410_DCON_CH3_TIMER | DMA_CH_VALID,
- },
- [DMACH_I2S_IN] = {
- .name = "i2s-sdi",
- .channels[1] = S3C2410_DCON_CH1_I2SSDI | DMA_CH_VALID,
- .channels[2] = S3C2410_DCON_CH2_I2SSDI | DMA_CH_VALID,
- },
- [DMACH_I2S_OUT] = {
- .name = "i2s-sdo",
- .channels[0] = S3C2440_DCON_CH0_I2SSDO | DMA_CH_VALID,
- .channels[2] = S3C2410_DCON_CH2_I2SSDO | DMA_CH_VALID,
- },
- [DMACH_PCM_IN] = {
- .name = "pcm-in",
- .channels[0] = S3C2440_DCON_CH0_PCMIN | DMA_CH_VALID,
- .channels[2] = S3C2440_DCON_CH2_PCMIN | DMA_CH_VALID,
- },
- [DMACH_PCM_OUT] = {
- .name = "pcm-out",
- .channels[1] = S3C2440_DCON_CH1_PCMOUT | DMA_CH_VALID,
- .channels[3] = S3C2440_DCON_CH3_PCMOUT | DMA_CH_VALID,
- },
- [DMACH_MIC_IN] = {
- .name = "mic-in",
- .channels[2] = S3C2440_DCON_CH2_MICIN | DMA_CH_VALID,
- .channels[3] = S3C2440_DCON_CH3_MICIN | DMA_CH_VALID,
- },
- [DMACH_USB_EP1] = {
- .name = "usb-ep1",
- .channels[0] = S3C2410_DCON_CH0_USBEP1 | DMA_CH_VALID,
- },
- [DMACH_USB_EP2] = {
- .name = "usb-ep2",
- .channels[1] = S3C2410_DCON_CH1_USBEP2 | DMA_CH_VALID,
- },
- [DMACH_USB_EP3] = {
- .name = "usb-ep3",
- .channels[2] = S3C2410_DCON_CH2_USBEP3 | DMA_CH_VALID,
- },
- [DMACH_USB_EP4] = {
- .name = "usb-ep4",
- .channels[3] = S3C2410_DCON_CH3_USBEP4 | DMA_CH_VALID,
- },
-};
-
-static void s3c2440_dma_select(struct s3c2410_dma_chan *chan,
- struct s3c24xx_dma_map *map)
-{
- chan->dcon = map->channels[chan->number] & ~DMA_CH_VALID;
-}
-
-static struct s3c24xx_dma_selection __initdata s3c2440_dma_sel = {
- .select = s3c2440_dma_select,
- .dcon_mask = 7 << 24,
- .map = s3c2440_dma_mappings,
- .map_size = ARRAY_SIZE(s3c2440_dma_mappings),
-};
-
-static struct s3c24xx_dma_order __initdata s3c2440_dma_order = {
- .channels = {
- [DMACH_SDI] = {
- .list = {
- [0] = 3 | DMA_CH_VALID,
- [1] = 2 | DMA_CH_VALID,
- [2] = 1 | DMA_CH_VALID,
- [3] = 0 | DMA_CH_VALID,
- },
- },
- [DMACH_I2S_IN] = {
- .list = {
- [0] = 1 | DMA_CH_VALID,
- [1] = 2 | DMA_CH_VALID,
- },
- },
- [DMACH_I2S_OUT] = {
- .list = {
- [0] = 2 | DMA_CH_VALID,
- [1] = 1 | DMA_CH_VALID,
- },
- },
- [DMACH_PCM_IN] = {
- .list = {
- [0] = 2 | DMA_CH_VALID,
- [1] = 1 | DMA_CH_VALID,
- },
- },
- [DMACH_PCM_OUT] = {
- .list = {
- [0] = 1 | DMA_CH_VALID,
- [1] = 3 | DMA_CH_VALID,
- },
- },
- [DMACH_MIC_IN] = {
- .list = {
- [0] = 3 | DMA_CH_VALID,
- [1] = 2 | DMA_CH_VALID,
- },
- },
- },
-};
-
-static int __init s3c2440_dma_add(struct device *dev,
- struct subsys_interface *sif)
-{
- s3c2410_dma_init();
- s3c24xx_dma_order_set(&s3c2440_dma_order);
- return s3c24xx_dma_init_map(&s3c2440_dma_sel);
-}
-
-static struct subsys_interface s3c2440_dma_interface = {
- .name = "s3c2440_dma",
- .subsys = &s3c2440_subsys,
- .add_dev = s3c2440_dma_add,
-};
-
-static int __init s3c2440_dma_init(void)
-{
- return subsys_interface_register(&s3c2440_dma_interface);
-}
-
-arch_initcall(s3c2440_dma_init);
-
diff --git a/arch/arm/mach-s3c24xx/dma-s3c2443.c b/arch/arm/mach-s3c24xx/dma-s3c2443.c
deleted file mode 100644
index f4096ec0700a..000000000000
--- a/arch/arm/mach-s3c24xx/dma-s3c2443.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/* linux/arch/arm/mach-s3c2443/dma.c
- *
- * Copyright (c) 2007 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2443 DMA selection
- *
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/serial_core.h>
-#include <linux/serial_s3c.h>
-#include <linux/io.h>
-
-#include <mach/dma.h>
-
-#include <plat/dma-s3c24xx.h>
-#include <plat/cpu.h>
-
-#include <mach/regs-gpio.h>
-#include <plat/regs-dma.h>
-#include <mach/regs-lcd.h>
-#include <plat/regs-spi.h>
-
-#define MAP(x) { \
- [0] = (x) | DMA_CH_VALID, \
- [1] = (x) | DMA_CH_VALID, \
- [2] = (x) | DMA_CH_VALID, \
- [3] = (x) | DMA_CH_VALID, \
- [4] = (x) | DMA_CH_VALID, \
- [5] = (x) | DMA_CH_VALID, \
- }
-
-static struct s3c24xx_dma_map __initdata s3c2443_dma_mappings[] = {
- [DMACH_XD0] = {
- .name = "xdreq0",
- .channels = MAP(S3C2443_DMAREQSEL_XDREQ0),
- },
- [DMACH_XD1] = {
- .name = "xdreq1",
- .channels = MAP(S3C2443_DMAREQSEL_XDREQ1),
- },
- [DMACH_SDI] = { /* only on S3C2443 */
- .name = "sdi",
- .channels = MAP(S3C2443_DMAREQSEL_SDI),
- },
- [DMACH_SPI0_RX] = {
- .name = "spi0-rx",
- .channels = MAP(S3C2443_DMAREQSEL_SPI0RX),
- },
- [DMACH_SPI0_TX] = {
- .name = "spi0-tx",
- .channels = MAP(S3C2443_DMAREQSEL_SPI0TX),
- },
- [DMACH_SPI1_RX] = { /* only on S3C2443/S3C2450 */
- .name = "spi1-rx",
- .channels = MAP(S3C2443_DMAREQSEL_SPI1RX),
- },
- [DMACH_SPI1_TX] = { /* only on S3C2443/S3C2450 */
- .name = "spi1-tx",
- .channels = MAP(S3C2443_DMAREQSEL_SPI1TX),
- },
- [DMACH_UART0] = {
- .name = "uart0",
- .channels = MAP(S3C2443_DMAREQSEL_UART0_0),
- },
- [DMACH_UART1] = {
- .name = "uart1",
- .channels = MAP(S3C2443_DMAREQSEL_UART1_0),
- },
- [DMACH_UART2] = {
- .name = "uart2",
- .channels = MAP(S3C2443_DMAREQSEL_UART2_0),
- },
- [DMACH_UART3] = {
- .name = "uart3",
- .channels = MAP(S3C2443_DMAREQSEL_UART3_0),
- },
- [DMACH_UART0_SRC2] = {
- .name = "uart0",
- .channels = MAP(S3C2443_DMAREQSEL_UART0_1),
- },
- [DMACH_UART1_SRC2] = {
- .name = "uart1",
- .channels = MAP(S3C2443_DMAREQSEL_UART1_1),
- },
- [DMACH_UART2_SRC2] = {
- .name = "uart2",
- .channels = MAP(S3C2443_DMAREQSEL_UART2_1),
- },
- [DMACH_UART3_SRC2] = {
- .name = "uart3",
- .channels = MAP(S3C2443_DMAREQSEL_UART3_1),
- },
- [DMACH_TIMER] = {
- .name = "timer",
- .channels = MAP(S3C2443_DMAREQSEL_TIMER),
- },
- [DMACH_I2S_IN] = {
- .name = "i2s-sdi",
- .channels = MAP(S3C2443_DMAREQSEL_I2SRX),
- },
- [DMACH_I2S_OUT] = {
- .name = "i2s-sdo",
- .channels = MAP(S3C2443_DMAREQSEL_I2STX),
- },
- [DMACH_PCM_IN] = {
- .name = "pcm-in",
- .channels = MAP(S3C2443_DMAREQSEL_PCMIN),
- },
- [DMACH_PCM_OUT] = {
- .name = "pcm-out",
- .channels = MAP(S3C2443_DMAREQSEL_PCMOUT),
- },
- [DMACH_MIC_IN] = {
- .name = "mic-in",
- .channels = MAP(S3C2443_DMAREQSEL_MICIN),
- },
-};
-
-static void s3c2443_dma_select(struct s3c2410_dma_chan *chan,
- struct s3c24xx_dma_map *map)
-{
- unsigned long chsel = map->channels[0] & (~DMA_CH_VALID);
- writel(chsel | S3C2443_DMAREQSEL_HW,
- chan->regs + S3C2443_DMA_DMAREQSEL);
-}
-
-static struct s3c24xx_dma_selection __initdata s3c2443_dma_sel = {
- .select = s3c2443_dma_select,
- .dcon_mask = 0,
- .map = s3c2443_dma_mappings,
- .map_size = ARRAY_SIZE(s3c2443_dma_mappings),
-};
-
-static int __init s3c2443_dma_add(struct device *dev,
- struct subsys_interface *sif)
-{
- s3c24xx_dma_init(6, IRQ_S3C2443_DMA0, 0x100);
- return s3c24xx_dma_init_map(&s3c2443_dma_sel);
-}
-
-#ifdef CONFIG_CPU_S3C2416
-/* S3C2416 DMA contains the same selection table as the S3C2443 */
-static struct subsys_interface s3c2416_dma_interface = {
- .name = "s3c2416_dma",
- .subsys = &s3c2416_subsys,
- .add_dev = s3c2443_dma_add,
-};
-
-static int __init s3c2416_dma_init(void)
-{
- return subsys_interface_register(&s3c2416_dma_interface);
-}
-
-arch_initcall(s3c2416_dma_init);
-#endif
-
-#ifdef CONFIG_CPU_S3C2443
-static struct subsys_interface s3c2443_dma_interface = {
- .name = "s3c2443_dma",
- .subsys = &s3c2443_subsys,
- .add_dev = s3c2443_dma_add,
-};
-
-static int __init s3c2443_dma_init(void)
-{
- return subsys_interface_register(&s3c2443_dma_interface);
-}
-
-arch_initcall(s3c2443_dma_init);
-#endif
diff --git a/arch/arm/mach-s3c24xx/dma.c b/arch/arm/mach-s3c24xx/dma.c
deleted file mode 100644
index a8dafc174fe3..000000000000
--- a/arch/arm/mach-s3c24xx/dma.c
+++ /dev/null
@@ -1,1465 +0,0 @@
-/*
- * Copyright 2003-2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 DMA core
- *
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-
-#ifdef CONFIG_S3C2410_DMA_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/syscore_ops.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <mach/dma.h>
-#include <mach/map.h>
-
-#include <plat/dma-s3c24xx.h>
-#include <plat/regs-dma.h>
-
-/* io map for dma */
-static void __iomem *dma_base;
-static struct kmem_cache *dma_kmem;
-
-static int dma_channels;
-
-static struct s3c24xx_dma_selection dma_sel;
-
-
-/* debugging functions */
-
-#define BUF_MAGIC (0xcafebabe)
-
-#define dmawarn(fmt...) printk(KERN_DEBUG fmt)
-
-#define dma_regaddr(chan, reg) ((chan)->regs + (reg))
-
-#if 1
-#define dma_wrreg(chan, reg, val) writel((val), (chan)->regs + (reg))
-#else
-static inline void
-dma_wrreg(struct s3c2410_dma_chan *chan, int reg, unsigned long val)
-{
- pr_debug("writing %08x to register %08x\n",(unsigned int)val,reg);
- writel(val, dma_regaddr(chan, reg));
-}
-#endif
-
-#define dma_rdreg(chan, reg) readl((chan)->regs + (reg))
-
-/* captured register state for debug */
-
-struct s3c2410_dma_regstate {
- unsigned long dcsrc;
- unsigned long disrc;
- unsigned long dstat;
- unsigned long dcon;
- unsigned long dmsktrig;
-};
-
-#ifdef CONFIG_S3C2410_DMA_DEBUG
-
-/* dmadbg_showregs
- *
- * simple debug routine to print the current state of the dma registers
-*/
-
-static void
-dmadbg_capture(struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs)
-{
- regs->dcsrc = dma_rdreg(chan, S3C2410_DMA_DCSRC);
- regs->disrc = dma_rdreg(chan, S3C2410_DMA_DISRC);
- regs->dstat = dma_rdreg(chan, S3C2410_DMA_DSTAT);
- regs->dcon = dma_rdreg(chan, S3C2410_DMA_DCON);
- regs->dmsktrig = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
-}
-
-static void
-dmadbg_dumpregs(const char *fname, int line, struct s3c2410_dma_chan *chan,
- struct s3c2410_dma_regstate *regs)
-{
- printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
- chan->number, fname, line,
- regs->dcsrc, regs->disrc, regs->dstat, regs->dmsktrig,
- regs->dcon);
-}
-
-static void
-dmadbg_showchan(const char *fname, int line, struct s3c2410_dma_chan *chan)
-{
- struct s3c2410_dma_regstate state;
-
- dmadbg_capture(chan, &state);
-
- printk(KERN_DEBUG "dma%d: %s:%d: ls=%d, cur=%p, %p %p\n",
- chan->number, fname, line, chan->load_state,
- chan->curr, chan->next, chan->end);
-
- dmadbg_dumpregs(fname, line, chan, &state);
-}
-
-static void
-dmadbg_showregs(const char *fname, int line, struct s3c2410_dma_chan *chan)
-{
- struct s3c2410_dma_regstate state;
-
- dmadbg_capture(chan, &state);
- dmadbg_dumpregs(fname, line, chan, &state);
-}
-
-#define dbg_showregs(chan) dmadbg_showregs(__func__, __LINE__, (chan))
-#define dbg_showchan(chan) dmadbg_showchan(__func__, __LINE__, (chan))
-#else
-#define dbg_showregs(chan) do { } while(0)
-#define dbg_showchan(chan) do { } while(0)
-#endif /* CONFIG_S3C2410_DMA_DEBUG */
-
-/* s3c2410_dma_stats_timeout
- *
- * Update DMA stats from timeout info
-*/
-
-static void
-s3c2410_dma_stats_timeout(struct s3c2410_dma_stats *stats, int val)
-{
- if (stats == NULL)
- return;
-
- if (val > stats->timeout_longest)
- stats->timeout_longest = val;
- if (val < stats->timeout_shortest)
- stats->timeout_shortest = val;
-
- stats->timeout_avg += val;
-}
-
-/* s3c2410_dma_waitforload
- *
- * wait for the DMA engine to load a buffer, and update the state accordingly
-*/
-
-static int
-s3c2410_dma_waitforload(struct s3c2410_dma_chan *chan, int line)
-{
- int timeout = chan->load_timeout;
- int took;
-
- if (chan->load_state != S3C2410_DMALOAD_1LOADED) {
- printk(KERN_ERR "dma%d: s3c2410_dma_waitforload() called in loadstate %d from line %d\n", chan->number, chan->load_state, line);
- return 0;
- }
-
- if (chan->stats != NULL)
- chan->stats->loads++;
-
- while (--timeout > 0) {
- if ((dma_rdreg(chan, S3C2410_DMA_DSTAT) << (32-20)) != 0) {
- took = chan->load_timeout - timeout;
-
- s3c2410_dma_stats_timeout(chan->stats, took);
-
- switch (chan->load_state) {
- case S3C2410_DMALOAD_1LOADED:
- chan->load_state = S3C2410_DMALOAD_1RUNNING;
- break;
-
- default:
- printk(KERN_ERR "dma%d: unknown load_state in s3c2410_dma_waitforload() %d\n", chan->number, chan->load_state);
- }
-
- return 1;
- }
- }
-
- if (chan->stats != NULL) {
- chan->stats->timeout_failed++;
- }
-
- return 0;
-}
-
-/* s3c2410_dma_loadbuffer
- *
- * load a buffer, and update the channel state
-*/
-
-static inline int
-s3c2410_dma_loadbuffer(struct s3c2410_dma_chan *chan,
- struct s3c2410_dma_buf *buf)
-{
- unsigned long reload;
-
- if (buf == NULL) {
- dmawarn("buffer is NULL\n");
- return -EINVAL;
- }
-
- pr_debug("s3c2410_chan_loadbuffer: loading buff %p (0x%08lx,0x%06x)\n",
- buf, (unsigned long)buf->data, buf->size);
-
- /* check the state of the channel before we do anything */
-
- if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
- dmawarn("load_state is S3C2410_DMALOAD_1LOADED\n");
- }
-
- if (chan->load_state == S3C2410_DMALOAD_1LOADED_1RUNNING) {
- dmawarn("state is S3C2410_DMALOAD_1LOADED_1RUNNING\n");
- }
-
- /* it would seem sensible if we are the last buffer to not bother
- * with the auto-reload bit, so that the DMA engine will not try
- * and load another transfer after this one has finished...
- */
- if (chan->load_state == S3C2410_DMALOAD_NONE) {
- pr_debug("load_state is none, checking for noreload (next=%p)\n",
- buf->next);
- reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
- } else {
- //pr_debug("load_state is %d => autoreload\n", chan->load_state);
- reload = S3C2410_DCON_AUTORELOAD;
- }
-
- if ((buf->data & 0xf0000000) != 0x30000000) {
- dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
- }
-
- writel(buf->data, chan->addr_reg);
-
- dma_wrreg(chan, S3C2410_DMA_DCON,
- chan->dcon | reload | (buf->size/chan->xfer_unit));
-
- chan->next = buf->next;
-
- /* update the state of the channel */
-
- switch (chan->load_state) {
- case S3C2410_DMALOAD_NONE:
- chan->load_state = S3C2410_DMALOAD_1LOADED;
- break;
-
- case S3C2410_DMALOAD_1RUNNING:
- chan->load_state = S3C2410_DMALOAD_1LOADED_1RUNNING;
- break;
-
- default:
- dmawarn("dmaload: unknown state %d in loadbuffer\n",
- chan->load_state);
- break;
- }
-
- return 0;
-}
-
-/* s3c2410_dma_call_op
- *
- * small routine to call the op routine with the given op if it has been
- * registered
-*/
-
-static void
-s3c2410_dma_call_op(struct s3c2410_dma_chan *chan, enum s3c2410_chan_op op)
-{
- if (chan->op_fn != NULL) {
- (chan->op_fn)(chan, op);
- }
-}
-
-/* s3c2410_dma_buffdone
- *
- * small wrapper to check if callback routine needs to be called, and
- * if so, call it
-*/
-
-static inline void
-s3c2410_dma_buffdone(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf,
- enum s3c2410_dma_buffresult result)
-{
-#if 0
- pr_debug("callback_fn=%p, buf=%p, id=%p, size=%d, result=%d\n",
- chan->callback_fn, buf, buf->id, buf->size, result);
-#endif
-
- if (chan->callback_fn != NULL) {
- (chan->callback_fn)(chan, buf->id, buf->size, result);
- }
-}
-
-/* s3c2410_dma_start
- *
- * start a dma channel going
-*/
-
-static int s3c2410_dma_start(struct s3c2410_dma_chan *chan)
-{
- unsigned long tmp;
- unsigned long flags;
-
- pr_debug("s3c2410_start_dma: channel=%d\n", chan->number);
-
- local_irq_save(flags);
-
- if (chan->state == S3C2410_DMA_RUNNING) {
- pr_debug("s3c2410_start_dma: already running (%d)\n", chan->state);
- local_irq_restore(flags);
- return 0;
- }
-
- chan->state = S3C2410_DMA_RUNNING;
-
- /* check whether there is anything to load, and if not, see
- * if we can find anything to load
- */
-
- if (chan->load_state == S3C2410_DMALOAD_NONE) {
- if (chan->next == NULL) {
- printk(KERN_ERR "dma%d: channel has nothing loaded\n",
- chan->number);
- chan->state = S3C2410_DMA_IDLE;
- local_irq_restore(flags);
- return -EINVAL;
- }
-
- s3c2410_dma_loadbuffer(chan, chan->next);
- }
-
- dbg_showchan(chan);
-
- /* enable the channel */
-
- if (!chan->irq_enabled) {
- enable_irq(chan->irq);
- chan->irq_enabled = 1;
- }
-
- /* start the channel going */
-
- tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
- tmp &= ~S3C2410_DMASKTRIG_STOP;
- tmp |= S3C2410_DMASKTRIG_ON;
- dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
-
- pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
-
-#if 0
- /* the dma buffer loads should take care of clearing the AUTO
- * reloading feature */
- tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
- tmp &= ~S3C2410_DCON_NORELOAD;
- dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
-#endif
-
- s3c2410_dma_call_op(chan, S3C2410_DMAOP_START);
-
- dbg_showchan(chan);
-
- /* if we've only loaded one buffer onto the channel, then chec
- * to see if we have another, and if so, try and load it so when
- * the first buffer is finished, the new one will be loaded onto
- * the channel */
-
- if (chan->next != NULL) {
- if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
-
- if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
- pr_debug("%s: buff not yet loaded, no more todo\n",
- __func__);
- } else {
- chan->load_state = S3C2410_DMALOAD_1RUNNING;
- s3c2410_dma_loadbuffer(chan, chan->next);
- }
-
- } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
- s3c2410_dma_loadbuffer(chan, chan->next);
- }
- }
-
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-/* s3c2410_dma_canload
- *
- * work out if we can queue another buffer into the DMA engine
-*/
-
-static int
-s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
-{
- if (chan->load_state == S3C2410_DMALOAD_NONE ||
- chan->load_state == S3C2410_DMALOAD_1RUNNING)
- return 1;
-
- return 0;
-}
-
-/* s3c2410_dma_enqueue
- *
- * queue an given buffer for dma transfer.
- *
- * id the device driver's id information for this buffer
- * data the physical address of the buffer data
- * size the size of the buffer in bytes
- *
- * If the channel is not running, then the flag S3C2410_DMAF_AUTOSTART
- * is checked, and if set, the channel is started. If this flag isn't set,
- * then an error will be returned.
- *
- * It is possible to queue more than one DMA buffer onto a channel at
- * once, and the code will deal with the re-loading of the next buffer
- * when necessary.
-*/
-
-int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
- dma_addr_t data, int size)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
- struct s3c2410_dma_buf *buf;
- unsigned long flags;
-
- if (chan == NULL)
- return -EINVAL;
-
- pr_debug("%s: id=%p, data=%08x, size=%d\n",
- __func__, id, (unsigned int)data, size);
-
- buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
- if (buf == NULL) {
- pr_debug("%s: out of memory (%ld alloc)\n",
- __func__, (long)sizeof(*buf));
- return -ENOMEM;
- }
-
- //pr_debug("%s: new buffer %p\n", __func__, buf);
- //dbg_showchan(chan);
-
- buf->next = NULL;
- buf->data = buf->ptr = data;
- buf->size = size;
- buf->id = id;
- buf->magic = BUF_MAGIC;
-
- local_irq_save(flags);
-
- if (chan->curr == NULL) {
- /* we've got nothing loaded... */
- pr_debug("%s: buffer %p queued onto empty channel\n",
- __func__, buf);
-
- chan->curr = buf;
- chan->end = buf;
- chan->next = NULL;
- } else {
- pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
- chan->number, __func__, buf);
-
- if (chan->end == NULL) {
- pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
- chan->number, __func__, chan);
- } else {
- chan->end->next = buf;
- chan->end = buf;
- }
- }
-
- /* if necessary, update the next buffer field */
- if (chan->next == NULL)
- chan->next = buf;
-
- /* check to see if we can load a buffer */
- if (chan->state == S3C2410_DMA_RUNNING) {
- if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) {
- if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
- printk(KERN_ERR "dma%d: loadbuffer:"
- "timeout loading buffer\n",
- chan->number);
- dbg_showchan(chan);
- local_irq_restore(flags);
- return -EINVAL;
- }
- }
-
- while (s3c2410_dma_canload(chan) && chan->next != NULL) {
- s3c2410_dma_loadbuffer(chan, chan->next);
- }
- } else if (chan->state == S3C2410_DMA_IDLE) {
- if (chan->flags & S3C2410_DMAF_AUTOSTART) {
- s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL,
- S3C2410_DMAOP_START);
- }
- }
-
- local_irq_restore(flags);
- return 0;
-}
-
-EXPORT_SYMBOL(s3c2410_dma_enqueue);
-
-static inline void
-s3c2410_dma_freebuf(struct s3c2410_dma_buf *buf)
-{
- int magicok = (buf->magic == BUF_MAGIC);
-
- buf->magic = -1;
-
- if (magicok) {
- kmem_cache_free(dma_kmem, buf);
- } else {
- printk("s3c2410_dma_freebuf: buff %p with bad magic\n", buf);
- }
-}
-
-/* s3c2410_dma_lastxfer
- *
- * called when the system is out of buffers, to ensure that the channel
- * is prepared for shutdown.
-*/
-
-static inline void
-s3c2410_dma_lastxfer(struct s3c2410_dma_chan *chan)
-{
-#if 0
- pr_debug("dma%d: s3c2410_dma_lastxfer: load_state %d\n",
- chan->number, chan->load_state);
-#endif
-
- switch (chan->load_state) {
- case S3C2410_DMALOAD_NONE:
- break;
-
- case S3C2410_DMALOAD_1LOADED:
- if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
- /* flag error? */
- printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
- chan->number, __func__);
- return;
- }
- break;
-
- case S3C2410_DMALOAD_1LOADED_1RUNNING:
- /* I believe in this case we do not have anything to do
- * until the next buffer comes along, and we turn off the
- * reload */
- return;
-
- default:
- pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
- chan->number, chan->load_state);
- return;
-
- }
-
- /* hopefully this'll shut the damned thing up after the transfer... */
- dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | S3C2410_DCON_NORELOAD);
-}
-
-
-#define dmadbg2(x...)
-
-static irqreturn_t
-s3c2410_dma_irq(int irq, void *devpw)
-{
- struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw;
- struct s3c2410_dma_buf *buf;
-
- buf = chan->curr;
-
- dbg_showchan(chan);
-
- /* modify the channel state */
-
- switch (chan->load_state) {
- case S3C2410_DMALOAD_1RUNNING:
- /* TODO - if we are running only one buffer, we probably
- * want to reload here, and then worry about the buffer
- * callback */
-
- chan->load_state = S3C2410_DMALOAD_NONE;
- break;
-
- case S3C2410_DMALOAD_1LOADED:
- /* iirc, we should go back to NONE loaded here, we
- * had a buffer, and it was never verified as being
- * loaded.
- */
-
- chan->load_state = S3C2410_DMALOAD_NONE;
- break;
-
- case S3C2410_DMALOAD_1LOADED_1RUNNING:
- /* we'll worry about checking to see if another buffer is
- * ready after we've called back the owner. This should
- * ensure we do not wait around too long for the DMA
- * engine to start the next transfer
- */
-
- chan->load_state = S3C2410_DMALOAD_1LOADED;
- break;
-
- case S3C2410_DMALOAD_NONE:
- printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n",
- chan->number);
- break;
-
- default:
- printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n",
- chan->number, chan->load_state);
- break;
- }
-
- if (buf != NULL) {
- /* update the chain to make sure that if we load any more
- * buffers when we call the callback function, things should
- * work properly */
-
- chan->curr = buf->next;
- buf->next = NULL;
-
- if (buf->magic != BUF_MAGIC) {
- printk(KERN_ERR "dma%d: %s: buf %p incorrect magic\n",
- chan->number, __func__, buf);
- return IRQ_HANDLED;
- }
-
- s3c2410_dma_buffdone(chan, buf, S3C2410_RES_OK);
-
- /* free resouces */
- s3c2410_dma_freebuf(buf);
- } else {
- }
-
- /* only reload if the channel is still running... our buffer done
- * routine may have altered the state by requesting the dma channel
- * to stop or shutdown... */
-
- /* todo: check that when the channel is shut-down from inside this
- * function, we cope with unsetting reload, etc */
-
- if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
- unsigned long flags;
-
- switch (chan->load_state) {
- case S3C2410_DMALOAD_1RUNNING:
- /* don't need to do anything for this state */
- break;
-
- case S3C2410_DMALOAD_NONE:
- /* can load buffer immediately */
- break;
-
- case S3C2410_DMALOAD_1LOADED:
- if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
- /* flag error? */
- printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
- chan->number, __func__);
- return IRQ_HANDLED;
- }
-
- break;
-
- case S3C2410_DMALOAD_1LOADED_1RUNNING:
- goto no_load;
-
- default:
- printk(KERN_ERR "dma%d: unknown load_state in irq, %d\n",
- chan->number, chan->load_state);
- return IRQ_HANDLED;
- }
-
- local_irq_save(flags);
- s3c2410_dma_loadbuffer(chan, chan->next);
- local_irq_restore(flags);
- } else {
- s3c2410_dma_lastxfer(chan);
-
- /* see if we can stop this channel.. */
- if (chan->load_state == S3C2410_DMALOAD_NONE) {
- pr_debug("dma%d: end of transfer, stopping channel (%ld)\n",
- chan->number, jiffies);
- s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL,
- S3C2410_DMAOP_STOP);
- }
- }
-
- no_load:
- return IRQ_HANDLED;
-}
-
-static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
-
-/* s3c2410_request_dma
- *
- * get control of an dma channel
-*/
-
-int s3c2410_dma_request(enum dma_ch channel,
- struct s3c2410_dma_client *client,
- void *dev)
-{
- struct s3c2410_dma_chan *chan;
- unsigned long flags;
- int err;
-
- pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
- channel, client->name, dev);
-
- local_irq_save(flags);
-
- chan = s3c2410_dma_map_channel(channel);
- if (chan == NULL) {
- local_irq_restore(flags);
- return -EBUSY;
- }
-
- dbg_showchan(chan);
-
- chan->client = client;
- chan->in_use = 1;
-
- if (!chan->irq_claimed) {
- pr_debug("dma%d: %s : requesting irq %d\n",
- channel, __func__, chan->irq);
-
- chan->irq_claimed = 1;
- local_irq_restore(flags);
-
- err = request_irq(chan->irq, s3c2410_dma_irq, 0,
- client->name, (void *)chan);
-
- local_irq_save(flags);
-
- if (err) {
- chan->in_use = 0;
- chan->irq_claimed = 0;
- local_irq_restore(flags);
-
- printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
- client->name, chan->irq, chan->number);
- return err;
- }
-
- chan->irq_enabled = 1;
- }
-
- local_irq_restore(flags);
-
- /* need to setup */
-
- pr_debug("%s: channel initialised, %p\n", __func__, chan);
-
- return chan->number | DMACH_LOW_LEVEL;
-}
-
-EXPORT_SYMBOL(s3c2410_dma_request);
-
-/* s3c2410_dma_free
- *
- * release the given channel back to the system, will stop and flush
- * any outstanding transfers, and ensure the channel is ready for the
- * next claimant.
- *
- * Note, although a warning is currently printed if the freeing client
- * info is not the same as the registrant's client info, the free is still
- * allowed to go through.
-*/
-
-int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
- unsigned long flags;
-
- if (chan == NULL)
- return -EINVAL;
-
- local_irq_save(flags);
-
- if (chan->client != client) {
- printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
- channel, chan->client, client);
- }
-
- /* sort out stopping and freeing the channel */
-
- if (chan->state != S3C2410_DMA_IDLE) {
- pr_debug("%s: need to stop dma channel %p\n",
- __func__, chan);
-
- /* possibly flush the channel */
- s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STOP);
- }
-
- chan->client = NULL;
- chan->in_use = 0;
-
- if (chan->irq_claimed)
- free_irq(chan->irq, (void *)chan);
-
- chan->irq_claimed = 0;
-
- if (!(channel & DMACH_LOW_LEVEL))
- s3c_dma_chan_map[channel] = NULL;
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-EXPORT_SYMBOL(s3c2410_dma_free);
-
-static int s3c2410_dma_dostop(struct s3c2410_dma_chan *chan)
-{
- unsigned long flags;
- unsigned long tmp;
-
- pr_debug("%s:\n", __func__);
-
- dbg_showchan(chan);
-
- local_irq_save(flags);
-
- s3c2410_dma_call_op(chan, S3C2410_DMAOP_STOP);
-
- tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
- tmp |= S3C2410_DMASKTRIG_STOP;
- //tmp &= ~S3C2410_DMASKTRIG_ON;
- dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
-
-#if 0
- /* should also clear interrupts, according to WinCE BSP */
- tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
- tmp |= S3C2410_DCON_NORELOAD;
- dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
-#endif
-
- /* should stop do this, or should we wait for flush? */
- chan->state = S3C2410_DMA_IDLE;
- chan->load_state = S3C2410_DMALOAD_NONE;
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-static void s3c2410_dma_waitforstop(struct s3c2410_dma_chan *chan)
-{
- unsigned long tmp;
- unsigned int timeout = 0x10000;
-
- while (timeout-- > 0) {
- tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
-
- if (!(tmp & S3C2410_DMASKTRIG_ON))
- return;
- }
-
- pr_debug("dma%d: failed to stop?\n", chan->number);
-}
-
-
-/* s3c2410_dma_flush
- *
- * stop the channel, and remove all current and pending transfers
-*/
-
-static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan)
-{
- struct s3c2410_dma_buf *buf, *next;
- unsigned long flags;
-
- pr_debug("%s: chan %p (%d)\n", __func__, chan, chan->number);
-
- dbg_showchan(chan);
-
- local_irq_save(flags);
-
- if (chan->state != S3C2410_DMA_IDLE) {
- pr_debug("%s: stopping channel...\n", __func__ );
- s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP);
- }
-
- buf = chan->curr;
- if (buf == NULL)
- buf = chan->next;
-
- chan->curr = chan->next = chan->end = NULL;
-
- if (buf != NULL) {
- for ( ; buf != NULL; buf = next) {
- next = buf->next;
-
- pr_debug("%s: free buffer %p, next %p\n",
- __func__, buf, buf->next);
-
- s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT);
- s3c2410_dma_freebuf(buf);
- }
- }
-
- dbg_showregs(chan);
-
- s3c2410_dma_waitforstop(chan);
-
-#if 0
- /* should also clear interrupts, according to WinCE BSP */
- {
- unsigned long tmp;
-
- tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
- tmp |= S3C2410_DCON_NORELOAD;
- dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
- }
-#endif
-
- dbg_showregs(chan);
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- dbg_showchan(chan);
-
- /* if we've only loaded one buffer onto the channel, then chec
- * to see if we have another, and if so, try and load it so when
- * the first buffer is finished, the new one will be loaded onto
- * the channel */
-
- if (chan->next != NULL) {
- if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
-
- if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
- pr_debug("%s: buff not yet loaded, no more todo\n",
- __func__);
- } else {
- chan->load_state = S3C2410_DMALOAD_1RUNNING;
- s3c2410_dma_loadbuffer(chan, chan->next);
- }
-
- } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
- s3c2410_dma_loadbuffer(chan, chan->next);
- }
- }
-
-
- local_irq_restore(flags);
-
- return 0;
-
-}
-
-int
-s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
-
- if (chan == NULL)
- return -EINVAL;
-
- switch (op) {
- case S3C2410_DMAOP_START:
- return s3c2410_dma_start(chan);
-
- case S3C2410_DMAOP_STOP:
- return s3c2410_dma_dostop(chan);
-
- case S3C2410_DMAOP_PAUSE:
- case S3C2410_DMAOP_RESUME:
- return -ENOENT;
-
- case S3C2410_DMAOP_FLUSH:
- return s3c2410_dma_flush(chan);
-
- case S3C2410_DMAOP_STARTED:
- return s3c2410_dma_started(chan);
-
- case S3C2410_DMAOP_TIMEOUT:
- return 0;
-
- }
-
- return -ENOENT; /* unknown, don't bother */
-}
-
-EXPORT_SYMBOL(s3c2410_dma_ctrl);
-
-/* DMA configuration for each channel
- *
- * DISRCC -> source of the DMA (AHB,APB)
- * DISRC -> source address of the DMA
- * DIDSTC -> destination of the DMA (AHB,APD)
- * DIDST -> destination address of the DMA
-*/
-
-/* s3c2410_dma_config
- *
- * xfersize: size of unit in bytes (1,2,4)
-*/
-
-int s3c2410_dma_config(enum dma_ch channel,
- int xferunit)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
- unsigned int dcon;
-
- pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit);
-
- if (chan == NULL)
- return -EINVAL;
-
- dcon = chan->dcon & dma_sel.dcon_mask;
- pr_debug("%s: dcon is %08x\n", __func__, dcon);
-
- switch (chan->req_ch) {
- case DMACH_I2S_IN:
- case DMACH_I2S_OUT:
- case DMACH_PCM_IN:
- case DMACH_PCM_OUT:
- case DMACH_MIC_IN:
- default:
- dcon |= S3C2410_DCON_HANDSHAKE;
- dcon |= S3C2410_DCON_SYNC_PCLK;
- break;
-
- case DMACH_SDI:
- /* note, ensure if need HANDSHAKE or not */
- dcon |= S3C2410_DCON_SYNC_PCLK;
- break;
-
- case DMACH_XD0:
- case DMACH_XD1:
- dcon |= S3C2410_DCON_HANDSHAKE;
- dcon |= S3C2410_DCON_SYNC_HCLK;
- break;
- }
-
- switch (xferunit) {
- case 1:
- dcon |= S3C2410_DCON_BYTE;
- break;
-
- case 2:
- dcon |= S3C2410_DCON_HALFWORD;
- break;
-
- case 4:
- dcon |= S3C2410_DCON_WORD;
- break;
-
- default:
- pr_debug("%s: bad transfer size %d\n", __func__, xferunit);
- return -EINVAL;
- }
-
- dcon |= S3C2410_DCON_HWTRIG;
- dcon |= S3C2410_DCON_INTREQ;
-
- pr_debug("%s: dcon now %08x\n", __func__, dcon);
-
- chan->dcon = dcon;
- chan->xfer_unit = xferunit;
-
- return 0;
-}
-
-EXPORT_SYMBOL(s3c2410_dma_config);
-
-
-/* s3c2410_dma_devconfig
- *
- * configure the dma source/destination hardware type and address
- *
- * source: DMA_FROM_DEVICE: source is hardware
- * DMA_TO_DEVICE: source is memory
- *
- * devaddr: physical address of the source
-*/
-
-int s3c2410_dma_devconfig(enum dma_ch channel,
- enum dma_data_direction source,
- unsigned long devaddr)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
- unsigned int hwcfg;
-
- if (chan == NULL)
- return -EINVAL;
-
- pr_debug("%s: source=%d, devaddr=%08lx\n",
- __func__, (int)source, devaddr);
-
- chan->source = source;
- chan->dev_addr = devaddr;
-
- switch (chan->req_ch) {
- case DMACH_XD0:
- case DMACH_XD1:
- hwcfg = 0; /* AHB */
- break;
-
- default:
- hwcfg = S3C2410_DISRCC_APB;
- }
-
- /* always assume our peripheral desintation is a fixed
- * address in memory. */
- hwcfg |= S3C2410_DISRCC_INC;
-
- switch (source) {
- case DMA_FROM_DEVICE:
- /* source is hardware */
- pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
- __func__, devaddr, hwcfg);
- dma_wrreg(chan, S3C2410_DMA_DISRCC, hwcfg & 3);
- dma_wrreg(chan, S3C2410_DMA_DISRC, devaddr);
- dma_wrreg(chan, S3C2410_DMA_DIDSTC, (0<<1) | (0<<0));
-
- chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
- break;
-
- case DMA_TO_DEVICE:
- /* source is memory */
- pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n",
- __func__, devaddr, hwcfg);
- dma_wrreg(chan, S3C2410_DMA_DISRCC, (0<<1) | (0<<0));
- dma_wrreg(chan, S3C2410_DMA_DIDST, devaddr);
- dma_wrreg(chan, S3C2410_DMA_DIDSTC, hwcfg & 3);
-
- chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DISRC);
- break;
-
- default:
- printk(KERN_ERR "dma%d: invalid source type (%d)\n",
- channel, source);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(s3c2410_dma_devconfig);
-
-/* s3c2410_dma_getposition
- *
- * returns the current transfer points for the dma source and destination
-*/
-
-int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
-
- if (chan == NULL)
- return -EINVAL;
-
- if (src != NULL)
- *src = dma_rdreg(chan, S3C2410_DMA_DCSRC);
-
- if (dst != NULL)
- *dst = dma_rdreg(chan, S3C2410_DMA_DCDST);
-
- return 0;
-}
-
-EXPORT_SYMBOL(s3c2410_dma_getposition);
-
-/* system core operations */
-
-#ifdef CONFIG_PM
-
-static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp)
-{
- printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
-
- if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) {
- /* the dma channel is still working, which is probably
- * a bad thing to do over suspend/resume. We stop the
- * channel and assume that the client is either going to
- * retry after resume, or that it is broken.
- */
-
- printk(KERN_INFO "dma: stopping channel %d due to suspend\n",
- cp->number);
-
- s3c2410_dma_dostop(cp);
- }
-}
-
-static int s3c2410_dma_suspend(void)
-{
- struct s3c2410_dma_chan *cp = s3c2410_chans;
- int channel;
-
- for (channel = 0; channel < dma_channels; cp++, channel++)
- s3c2410_dma_suspend_chan(cp);
-
- return 0;
-}
-
-static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
-{
- unsigned int no = cp->number | DMACH_LOW_LEVEL;
-
- /* restore channel's hardware configuration */
-
- if (!cp->in_use)
- return;
-
- printk(KERN_INFO "dma%d: restoring configuration\n", cp->number);
-
- s3c2410_dma_config(no, cp->xfer_unit);
- s3c2410_dma_devconfig(no, cp->source, cp->dev_addr);
-
- /* re-select the dma source for this channel */
-
- if (cp->map != NULL)
- dma_sel.select(cp, cp->map);
-}
-
-static void s3c2410_dma_resume(void)
-{
- struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1;
- int channel;
-
- for (channel = dma_channels - 1; channel >= 0; cp--, channel--)
- s3c2410_dma_resume_chan(cp);
-}
-
-#else
-#define s3c2410_dma_suspend NULL
-#define s3c2410_dma_resume NULL
-#endif /* CONFIG_PM */
-
-struct syscore_ops dma_syscore_ops = {
- .suspend = s3c2410_dma_suspend,
- .resume = s3c2410_dma_resume,
-};
-
-/* kmem cache implementation */
-
-static void s3c2410_dma_cache_ctor(void *p)
-{
- memset(p, 0, sizeof(struct s3c2410_dma_buf));
-}
-
-/* initialisation code */
-
-static int __init s3c24xx_dma_syscore_init(void)
-{
- register_syscore_ops(&dma_syscore_ops);
-
- return 0;
-}
-
-late_initcall(s3c24xx_dma_syscore_init);
-
-int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq,
- unsigned int stride)
-{
- struct s3c2410_dma_chan *cp;
- int channel;
- int ret;
-
- printk("S3C24XX DMA Driver, Copyright 2003-2006 Simtec Electronics\n");
-
- dma_channels = channels;
-
- dma_base = ioremap(S3C24XX_PA_DMA, stride * channels);
- if (dma_base == NULL) {
- printk(KERN_ERR "dma failed to remap register block\n");
- return -ENOMEM;
- }
-
- dma_kmem = kmem_cache_create("dma_desc",
- sizeof(struct s3c2410_dma_buf), 0,
- SLAB_HWCACHE_ALIGN,
- s3c2410_dma_cache_ctor);
-
- if (dma_kmem == NULL) {
- printk(KERN_ERR "dma failed to make kmem cache\n");
- ret = -ENOMEM;
- goto err;
- }
-
- for (channel = 0; channel < channels; channel++) {
- cp = &s3c2410_chans[channel];
-
- memset(cp, 0, sizeof(struct s3c2410_dma_chan));
-
- /* dma channel irqs are in order.. */
- cp->number = channel;
- cp->irq = channel + irq;
- cp->regs = dma_base + (channel * stride);
-
- /* point current stats somewhere */
- cp->stats = &cp->stats_store;
- cp->stats_store.timeout_shortest = LONG_MAX;
-
- /* basic channel configuration */
-
- cp->load_timeout = 1<<18;
-
- printk("DMA channel %d at %p, irq %d\n",
- cp->number, cp->regs, cp->irq);
- }
-
- return 0;
-
- err:
- kmem_cache_destroy(dma_kmem);
- iounmap(dma_base);
- dma_base = NULL;
- return ret;
-}
-
-int __init s3c2410_dma_init(void)
-{
- return s3c24xx_dma_init(4, IRQ_DMA0, 0x40);
-}
-
-static inline int is_channel_valid(unsigned int channel)
-{
- return (channel & DMA_CH_VALID);
-}
-
-static struct s3c24xx_dma_order *dma_order;
-
-
-/* s3c2410_dma_map_channel()
- *
- * turn the virtual channel number into a real, and un-used hardware
- * channel.
- *
- * first, try the dma ordering given to us by either the relevant
- * dma code, or the board. Then just find the first usable free
- * channel
-*/
-
-static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel)
-{
- struct s3c24xx_dma_order_ch *ord = NULL;
- struct s3c24xx_dma_map *ch_map;
- struct s3c2410_dma_chan *dmach;
- int ch;
-
- if (dma_sel.map == NULL || channel > dma_sel.map_size)
- return NULL;
-
- ch_map = dma_sel.map + channel;
-
- /* first, try the board mapping */
-
- if (dma_order) {
- ord = &dma_order->channels[channel];
-
- for (ch = 0; ch < dma_channels; ch++) {
- int tmp;
- if (!is_channel_valid(ord->list[ch]))
- continue;
-
- tmp = ord->list[ch] & ~DMA_CH_VALID;
- if (s3c2410_chans[tmp].in_use == 0) {
- ch = tmp;
- goto found;
- }
- }
-
- if (ord->flags & DMA_CH_NEVER)
- return NULL;
- }
-
- /* second, search the channel map for first free */
-
- for (ch = 0; ch < dma_channels; ch++) {
- if (!is_channel_valid(ch_map->channels[ch]))
- continue;
-
- if (s3c2410_chans[ch].in_use == 0) {
- printk("mapped channel %d to %d\n", channel, ch);
- break;
- }
- }
-
- if (ch >= dma_channels)
- return NULL;
-
- /* update our channel mapping */
-
- found:
- dmach = &s3c2410_chans[ch];
- dmach->map = ch_map;
- dmach->req_ch = channel;
- s3c_dma_chan_map[channel] = dmach;
-
- /* select the channel */
-
- (dma_sel.select)(dmach, ch_map);
-
- return dmach;
-}
-
-static int s3c24xx_dma_check_entry(struct s3c24xx_dma_map *map, int ch)
-{
- return 0;
-}
-
-int __init s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel)
-{
- struct s3c24xx_dma_map *nmap;
- size_t map_sz = sizeof(*nmap) * sel->map_size;
- int ptr;
-
- nmap = kmemdup(sel->map, map_sz, GFP_KERNEL);
- if (nmap == NULL)
- return -ENOMEM;
-
- memcpy(&dma_sel, sel, sizeof(*sel));
-
- dma_sel.map = nmap;
-
- for (ptr = 0; ptr < sel->map_size; ptr++)
- s3c24xx_dma_check_entry(nmap+ptr, ptr);
-
- return 0;
-}
-
-int __init s3c24xx_dma_order_set(struct s3c24xx_dma_order *ord)
-{
- struct s3c24xx_dma_order *nord = dma_order;
-
- if (nord == NULL)
- nord = kmalloc(sizeof(struct s3c24xx_dma_order), GFP_KERNEL);
-
- if (nord == NULL) {
- printk(KERN_ERR "no memory to store dma channel order\n");
- return -ENOMEM;
- }
-
- dma_order = nord;
- memcpy(nord, ord, sizeof(struct s3c24xx_dma_order));
- return 0;
-}
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index b55da1d8cd8f..9e8117198e0c 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -15,8 +15,6 @@
#include <linux/device.h>
-#define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */
-
/* We use `virtual` dma channels to hide the fact we have only a limited
* number of DMA channels, and not of all of them (dependent on the device)
* can be attached to any DMA source. We therefore let the DMA core handle
@@ -54,161 +52,4 @@ enum dma_ch {
DMACH_MAX, /* the end entry */
};
-static inline bool samsung_dma_has_circular(void)
-{
- return false;
-}
-
-static inline bool samsung_dma_is_dmadev(void)
-{
- return false;
-}
-
-#include <plat/dma.h>
-
-#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */
-
-/* we have 4 dma channels */
-#if !defined(CONFIG_CPU_S3C2443) && !defined(CONFIG_CPU_S3C2416)
-#define S3C_DMA_CHANNELS (4)
-#else
-#define S3C_DMA_CHANNELS (6)
-#endif
-
-/* types */
-
-enum s3c2410_dma_state {
- S3C2410_DMA_IDLE,
- S3C2410_DMA_RUNNING,
- S3C2410_DMA_PAUSED
-};
-
-/* enum s3c2410_dma_loadst
- *
- * This represents the state of the DMA engine, wrt to the loaded / running
- * transfers. Since we don't have any way of knowing exactly the state of
- * the DMA transfers, we need to know the state to make decisions on whether
- * we can
- *
- * S3C2410_DMA_NONE
- *
- * There are no buffers loaded (the channel should be inactive)
- *
- * S3C2410_DMA_1LOADED
- *
- * There is one buffer loaded, however it has not been confirmed to be
- * loaded by the DMA engine. This may be because the channel is not
- * yet running, or the DMA driver decided that it was too costly to
- * sit and wait for it to happen.
- *
- * S3C2410_DMA_1RUNNING
- *
- * The buffer has been confirmed running, and not finisged
- *
- * S3C2410_DMA_1LOADED_1RUNNING
- *
- * There is a buffer waiting to be loaded by the DMA engine, and one
- * currently running.
-*/
-
-enum s3c2410_dma_loadst {
- S3C2410_DMALOAD_NONE,
- S3C2410_DMALOAD_1LOADED,
- S3C2410_DMALOAD_1RUNNING,
- S3C2410_DMALOAD_1LOADED_1RUNNING,
-};
-
-
-/* flags */
-
-#define S3C2410_DMAF_SLOW (1<<0) /* slow, so don't worry about
- * waiting for reloads */
-#define S3C2410_DMAF_AUTOSTART (1<<1) /* auto-start if buffer queued */
-
-#define S3C2410_DMAF_CIRCULAR (1 << 2) /* no circular dma support */
-
-/* dma buffer */
-
-struct s3c2410_dma_buf;
-
-/* s3c2410_dma_buf
- *
- * internally used buffer structure to describe a queued or running
- * buffer.
-*/
-
-struct s3c2410_dma_buf {
- struct s3c2410_dma_buf *next;
- int magic; /* magic */
- int size; /* buffer size in bytes */
- dma_addr_t data; /* start of DMA data */
- dma_addr_t ptr; /* where the DMA got to [1] */
- void *id; /* client's id */
-};
-
-/* [1] is this updated for both recv/send modes? */
-
-struct s3c2410_dma_stats {
- unsigned long loads;
- unsigned long timeout_longest;
- unsigned long timeout_shortest;
- unsigned long timeout_avg;
- unsigned long timeout_failed;
-};
-
-struct s3c2410_dma_map;
-
-/* struct s3c2410_dma_chan
- *
- * full state information for each DMA channel
-*/
-
-struct s3c2410_dma_chan {
- /* channel state flags and information */
- unsigned char number; /* number of this dma channel */
- unsigned char in_use; /* channel allocated */
- unsigned char irq_claimed; /* irq claimed for channel */
- unsigned char irq_enabled; /* irq enabled for channel */
- unsigned char xfer_unit; /* size of an transfer */
-
- /* channel state */
-
- enum s3c2410_dma_state state;
- enum s3c2410_dma_loadst load_state;
- struct s3c2410_dma_client *client;
-
- /* channel configuration */
- enum dma_data_direction source;
- enum dma_ch req_ch;
- unsigned long dev_addr;
- unsigned long load_timeout;
- unsigned int flags; /* channel flags */
-
- struct s3c24xx_dma_map *map; /* channel hw maps */
-
- /* channel's hardware position and configuration */
- void __iomem *regs; /* channels registers */
- void __iomem *addr_reg; /* data address register */
- unsigned int irq; /* channel irq */
- unsigned long dcon; /* default value of DCON */
-
- /* driver handles */
- s3c2410_dma_cbfn_t callback_fn; /* buffer done callback */
- s3c2410_dma_opfn_t op_fn; /* channel op callback */
-
- /* stats gathering */
- struct s3c2410_dma_stats *stats;
- struct s3c2410_dma_stats stats_store;
-
- /* buffer list and information */
- struct s3c2410_dma_buf *curr; /* current dma buffer */
- struct s3c2410_dma_buf *next; /* next buffer to load */
- struct s3c2410_dma_buf *end; /* end of queue */
-
- /* system device */
- struct device dev;
-};
-
-typedef unsigned long dma_device_t;
-
#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
index 059b1fc85037..096e14073bd9 100644
--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
@@ -51,21 +51,6 @@ enum dma_ch {
DMACH_MAX = 32
};
-struct s3c2410_dma_client {
- char *name;
-};
-
-static inline bool samsung_dma_has_circular(void)
-{
- return true;
-}
-
-static inline bool samsung_dma_is_dmadev(void)
-{
- return true;
-}
-
#include <linux/amba/pl08x.h>
-#include <plat/dma-ops.h>
#endif /* __ASM_ARCH_IRQ_H */
diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile
index f1114d11fe13..61ff91e76e0a 100644
--- a/arch/arm/mach-sa1100/Makefile
+++ b/arch/arm/mach-sa1100/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := clock.o generic.o irq.o time.o #nmi-oopser.o
+obj-y := clock.o generic.o irq.o #nmi-oopser.o
# Specific board support
obj-$(CONFIG_SA1100_ASSABET) += assabet.o
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 7dd894ece9ae..d28ecb9ef172 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -37,7 +37,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
#include <asm/mach/map.h>
#include <mach/assabet.h>
#include <linux/platform_data/mfd-mcp-sa11x0.h>
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c
index 03c75a811cb0..cbf53bb9c814 100644
--- a/arch/arm/mach-sa1100/clock.c
+++ b/arch/arm/mach-sa1100/clock.c
@@ -119,6 +119,17 @@ static DEFINE_CLK(gpio27, &clk_gpio27_ops);
static DEFINE_CLK(cpu, &clk_cpu_ops);
+static unsigned long clk_36864_get_rate(struct clk *clk)
+{
+ return 3686400;
+}
+
+static struct clkops clk_36864_ops = {
+ .get_rate = clk_36864_get_rate,
+};
+
+static DEFINE_CLK(36864, &clk_36864_ops);
+
static struct clk_lookup sa11xx_clkregs[] = {
CLKDEV_INIT("sa1111.0", NULL, &clk_gpio27),
CLKDEV_INIT("sa1100-rtc", NULL, NULL),
@@ -126,6 +137,7 @@ static struct clk_lookup sa11xx_clkregs[] = {
CLKDEV_INIT("sa11x0-pcmcia", NULL, &clk_cpu),
/* sa1111 names devices using internal offsets, PCMCIA is at 0x1800 */
CLKDEV_INIT("1800", NULL, &clk_cpu),
+ CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864),
};
static int __init sa11xx_clk_init(void)
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index b90c7d828391..3cc2b71e16f0 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -43,7 +43,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
#include <asm/hardware/scoop.h>
#include <asm/mach/sharpsl_param.h>
@@ -371,8 +371,7 @@ static void __init collie_init(void)
PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS |
PPC_TXD1 | PPC_TXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM;
- PWER = _COLLIE_GPIO_AC_IN | _COLLIE_GPIO_CO | _COLLIE_GPIO_ON_KEY |
- _COLLIE_GPIO_WAKEUP | _COLLIE_GPIO_nREMOCON_INT | PWER_RTC;
+ PWER = 0;
PGSR = _COLLIE_GPIO_nREMOCON_ON;
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index d4ea142c4edd..40e0d8619a2d 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -33,6 +33,7 @@
#include <mach/irqs.h>
#include "generic.h"
+#include <clocksource/pxa.h>
unsigned int reset_status;
EXPORT_SYMBOL(reset_status);
@@ -369,6 +370,11 @@ void __init sa1100_map_io(void)
iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc));
}
+void __init sa1100_timer_init(void)
+{
+ pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x90000000), 3686400);
+}
+
/*
* Disable the memory bus request/grant signals on the SA1110 to
* ensure that we don't receive spurious memory requests. We set
diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c
index 3c43219bc881..c6b412054a3c 100644
--- a/arch/arm/mach-sa1100/h3100.c
+++ b/arch/arm/mach-sa1100/h3100.c
@@ -18,7 +18,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
#include <mach/h3xxx.h>
#include <mach/irqs.h>
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c
index 5be54c214c7c..118338efd790 100644
--- a/arch/arm/mach-sa1100/h3600.c
+++ b/arch/arm/mach-sa1100/h3600.c
@@ -18,7 +18,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
#include <mach/h3xxx.h>
#include <mach/irqs.h>
diff --git a/arch/arm/mach-sa1100/include/mach/irqs.h b/arch/arm/mach-sa1100/include/mach/irqs.h
index de0983494c7e..734e30e406a3 100644
--- a/arch/arm/mach-sa1100/include/mach/irqs.h
+++ b/arch/arm/mach-sa1100/include/mach/irqs.h
@@ -8,17 +8,17 @@
* 2001/11/14 RMK Cleaned up and standardised a lot of the IRQs.
*/
-#define IRQ_GPIO0 1
-#define IRQ_GPIO1 2
-#define IRQ_GPIO2 3
-#define IRQ_GPIO3 4
-#define IRQ_GPIO4 5
-#define IRQ_GPIO5 6
-#define IRQ_GPIO6 7
-#define IRQ_GPIO7 8
-#define IRQ_GPIO8 9
-#define IRQ_GPIO9 10
-#define IRQ_GPIO10 11
+#define IRQ_GPIO0_SC 1
+#define IRQ_GPIO1_SC 2
+#define IRQ_GPIO2_SC 3
+#define IRQ_GPIO3_SC 4
+#define IRQ_GPIO4_SC 5
+#define IRQ_GPIO5_SC 6
+#define IRQ_GPIO6_SC 7
+#define IRQ_GPIO7_SC 8
+#define IRQ_GPIO8_SC 9
+#define IRQ_GPIO9_SC 10
+#define IRQ_GPIO10_SC 11
#define IRQ_GPIO11_27 12
#define IRQ_LCD 13 /* LCD controller */
#define IRQ_Ser0UDC 14 /* Ser. port 0 UDC */
@@ -41,32 +41,43 @@
#define IRQ_RTC1Hz 31 /* RTC 1 Hz clock */
#define IRQ_RTCAlrm 32 /* RTC Alarm */
-#define IRQ_GPIO11 33
-#define IRQ_GPIO12 34
-#define IRQ_GPIO13 35
-#define IRQ_GPIO14 36
-#define IRQ_GPIO15 37
-#define IRQ_GPIO16 38
-#define IRQ_GPIO17 39
-#define IRQ_GPIO18 40
-#define IRQ_GPIO19 41
-#define IRQ_GPIO20 42
-#define IRQ_GPIO21 43
-#define IRQ_GPIO22 44
-#define IRQ_GPIO23 45
-#define IRQ_GPIO24 46
-#define IRQ_GPIO25 47
-#define IRQ_GPIO26 48
-#define IRQ_GPIO27 49
+#define IRQ_GPIO0 33
+#define IRQ_GPIO1 34
+#define IRQ_GPIO2 35
+#define IRQ_GPIO3 36
+#define IRQ_GPIO4 37
+#define IRQ_GPIO5 38
+#define IRQ_GPIO6 39
+#define IRQ_GPIO7 40
+#define IRQ_GPIO8 41
+#define IRQ_GPIO9 42
+#define IRQ_GPIO10 43
+#define IRQ_GPIO11 44
+#define IRQ_GPIO12 45
+#define IRQ_GPIO13 46
+#define IRQ_GPIO14 47
+#define IRQ_GPIO15 48
+#define IRQ_GPIO16 49
+#define IRQ_GPIO17 50
+#define IRQ_GPIO18 51
+#define IRQ_GPIO19 52
+#define IRQ_GPIO20 53
+#define IRQ_GPIO21 54
+#define IRQ_GPIO22 55
+#define IRQ_GPIO23 56
+#define IRQ_GPIO24 57
+#define IRQ_GPIO25 58
+#define IRQ_GPIO26 59
+#define IRQ_GPIO27 60
/*
* The next 16 interrupts are for board specific purposes. Since
* the kernel can only run on one machine at a time, we can re-use
* these. If you need more, increase IRQ_BOARD_END, but keep it
- * within sensible limits. IRQs 49 to 64 are available.
+ * within sensible limits. IRQs 61 to 76 are available.
*/
-#define IRQ_BOARD_START 50
-#define IRQ_BOARD_END 66
+#define IRQ_BOARD_START 61
+#define IRQ_BOARD_END 77
/*
* Figure out the MAX IRQ number.
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c
index 63e2901db416..65aebfa66fe5 100644
--- a/arch/arm/mach-sa1100/irq.c
+++ b/arch/arm/mach-sa1100/irq.c
@@ -80,170 +80,6 @@ static struct irq_domain_ops sa1100_normal_irqdomain_ops = {
static struct irq_domain *sa1100_normal_irqdomain;
-/*
- * SA1100 GPIO edge detection for IRQs:
- * IRQs are generated on Falling-Edge, Rising-Edge, or both.
- * Use this instead of directly setting GRER/GFER.
- */
-static int GPIO_IRQ_rising_edge;
-static int GPIO_IRQ_falling_edge;
-static int GPIO_IRQ_mask = (1 << 11) - 1;
-
-static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
-{
- unsigned int mask;
-
- mask = BIT(d->hwirq);
-
- if (type == IRQ_TYPE_PROBE) {
- if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
- return 0;
- type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
- }
-
- if (type & IRQ_TYPE_EDGE_RISING) {
- GPIO_IRQ_rising_edge |= mask;
- } else
- GPIO_IRQ_rising_edge &= ~mask;
- if (type & IRQ_TYPE_EDGE_FALLING) {
- GPIO_IRQ_falling_edge |= mask;
- } else
- GPIO_IRQ_falling_edge &= ~mask;
-
- GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
- GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
-
- return 0;
-}
-
-/*
- * GPIO IRQs must be acknowledged.
- */
-static void sa1100_gpio_ack(struct irq_data *d)
-{
- GEDR = BIT(d->hwirq);
-}
-
-static int sa1100_gpio_wake(struct irq_data *d, unsigned int on)
-{
- if (on)
- PWER |= BIT(d->hwirq);
- else
- PWER &= ~BIT(d->hwirq);
- return 0;
-}
-
-/*
- * This is for IRQs from 0 to 10.
- */
-static struct irq_chip sa1100_low_gpio_chip = {
- .name = "GPIO-l",
- .irq_ack = sa1100_gpio_ack,
- .irq_mask = sa1100_mask_irq,
- .irq_unmask = sa1100_unmask_irq,
- .irq_set_type = sa1100_gpio_type,
- .irq_set_wake = sa1100_gpio_wake,
-};
-
-static int sa1100_low_gpio_irqdomain_map(struct irq_domain *d,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip,
- handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-
- return 0;
-}
-
-static struct irq_domain_ops sa1100_low_gpio_irqdomain_ops = {
- .map = sa1100_low_gpio_irqdomain_map,
- .xlate = irq_domain_xlate_onetwocell,
-};
-
-static struct irq_domain *sa1100_low_gpio_irqdomain;
-
-/*
- * IRQ11 (GPIO11 through 27) handler. We enter here with the
- * irq_controller_lock held, and IRQs disabled. Decode the IRQ
- * and call the handler.
- */
-static void
-sa1100_high_gpio_handler(unsigned int irq, struct irq_desc *desc)
-{
- unsigned int mask;
-
- mask = GEDR & 0xfffff800;
- do {
- /*
- * clear down all currently active IRQ sources.
- * We will be processing them all.
- */
- GEDR = mask;
-
- irq = IRQ_GPIO11;
- mask >>= 11;
- do {
- if (mask & 1)
- generic_handle_irq(irq);
- mask >>= 1;
- irq++;
- } while (mask);
-
- mask = GEDR & 0xfffff800;
- } while (mask);
-}
-
-/*
- * Like GPIO0 to 10, GPIO11-27 IRQs need to be handled specially.
- * In addition, the IRQs are all collected up into one bit in the
- * interrupt controller registers.
- */
-static void sa1100_high_gpio_mask(struct irq_data *d)
-{
- unsigned int mask = BIT(d->hwirq);
-
- GPIO_IRQ_mask &= ~mask;
-
- GRER &= ~mask;
- GFER &= ~mask;
-}
-
-static void sa1100_high_gpio_unmask(struct irq_data *d)
-{
- unsigned int mask = BIT(d->hwirq);
-
- GPIO_IRQ_mask |= mask;
-
- GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
- GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
-}
-
-static struct irq_chip sa1100_high_gpio_chip = {
- .name = "GPIO-h",
- .irq_ack = sa1100_gpio_ack,
- .irq_mask = sa1100_high_gpio_mask,
- .irq_unmask = sa1100_high_gpio_unmask,
- .irq_set_type = sa1100_gpio_type,
- .irq_set_wake = sa1100_gpio_wake,
-};
-
-static int sa1100_high_gpio_irqdomain_map(struct irq_domain *d,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip,
- handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-
- return 0;
-}
-
-static struct irq_domain_ops sa1100_high_gpio_irqdomain_ops = {
- .map = sa1100_high_gpio_irqdomain_map,
- .xlate = irq_domain_xlate_onetwocell,
-};
-
-static struct irq_domain *sa1100_high_gpio_irqdomain;
-
static struct resource irq_resource =
DEFINE_RES_MEM_NAMED(0x90050000, SZ_64K, "irqs");
@@ -270,17 +106,6 @@ static int sa1100irq_suspend(void)
IC_GPIO6|IC_GPIO5|IC_GPIO4|IC_GPIO3|IC_GPIO2|
IC_GPIO1|IC_GPIO0);
- /*
- * Set the appropriate edges for wakeup.
- */
- GRER = PWER & GPIO_IRQ_rising_edge;
- GFER = PWER & GPIO_IRQ_falling_edge;
-
- /*
- * Clear any pending GPIO interrupts.
- */
- GEDR = GEDR;
-
return 0;
}
@@ -292,9 +117,6 @@ static void sa1100irq_resume(void)
ICCR = st->iccr;
ICLR = st->iclr;
- GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
- GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
-
ICMR = st->icmr;
}
}
@@ -325,7 +147,8 @@ sa1100_handle_irq(struct pt_regs *regs)
if (mask == 0)
break;
- handle_IRQ(ffs(mask) - 1 + IRQ_GPIO0, regs);
+ handle_domain_irq(sa1100_normal_irqdomain,
+ ffs(mask) - 1, regs);
} while (1);
}
@@ -339,34 +162,16 @@ void __init sa1100_init_irq(void)
/* all IRQs are IRQ, not FIQ */
ICLR = 0;
- /* clear all GPIO edge detects */
- GFER = 0;
- GRER = 0;
- GEDR = -1;
-
/*
* Whatever the doc says, this has to be set for the wait-on-irq
* instruction to work... on a SA1100 rev 9 at least.
*/
ICCR = 1;
- sa1100_low_gpio_irqdomain = irq_domain_add_legacy(NULL,
- 11, IRQ_GPIO0, 0,
- &sa1100_low_gpio_irqdomain_ops, NULL);
-
- sa1100_normal_irqdomain = irq_domain_add_legacy(NULL,
- 21, IRQ_GPIO11_27, 11,
+ sa1100_normal_irqdomain = irq_domain_add_simple(NULL,
+ 32, IRQ_GPIO0_SC,
&sa1100_normal_irqdomain_ops, NULL);
- sa1100_high_gpio_irqdomain = irq_domain_add_legacy(NULL,
- 17, IRQ_GPIO11, 11,
- &sa1100_high_gpio_irqdomain_ops, NULL);
-
- /*
- * Install handler for GPIO 11-27 edge detect interrupts
- */
- irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler);
-
set_handle_irq(sa1100_handle_irq);
sa1100_init_gpio();
diff --git a/arch/arm/mach-sa1100/pci-nanoengine.c b/arch/arm/mach-sa1100/pci-nanoengine.c
index b704433c529c..d7ae8d50f6d8 100644
--- a/arch/arm/mach-sa1100/pci-nanoengine.c
+++ b/arch/arm/mach-sa1100/pci-nanoengine.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/pci.h>
-#include <linux/spinlock.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
@@ -30,97 +29,20 @@
#include <mach/nanoengine.h>
#include <mach/hardware.h>
-static DEFINE_SPINLOCK(nano_lock);
-
-static int nanoengine_get_pci_address(struct pci_bus *bus,
- unsigned int devfn, int where, void __iomem **address)
+static void __iomem *nanoengine_pci_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
- int ret = PCIBIOS_DEVICE_NOT_FOUND;
- unsigned int busnr = bus->number;
+ if (bus->number != 0 || (devfn >> 3) != 0)
+ return NULL;
- *address = (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT +
+ return (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT +
((bus->number << 16) | (devfn << 8) | (where & ~3));
-
- ret = (busnr > 255 || devfn > 255 || where > 255) ?
- PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-
- return ret;
-}
-
-static int nanoengine_read_config(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 *val)
-{
- int ret;
- void __iomem *address;
- unsigned long flags;
- u32 v;
-
- /* nanoEngine PCI bridge does not return -1 for a non-existing
- * device. We must fake the answer. We know that the only valid
- * device is device zero at bus 0, which is the network chip. */
- if (bus->number != 0 || (devfn >> 3) != 0) {
- v = -1;
- nanoengine_get_pci_address(bus, devfn, where, &address);
- goto exit_function;
- }
-
- spin_lock_irqsave(&nano_lock, flags);
-
- ret = nanoengine_get_pci_address(bus, devfn, where, &address);
- if (ret != PCIBIOS_SUCCESSFUL)
- return ret;
- v = __raw_readl(address);
-
- spin_unlock_irqrestore(&nano_lock, flags);
-
- v >>= ((where & 3) * 8);
- v &= (unsigned long)(-1) >> ((4 - size) * 8);
-
-exit_function:
- *val = v;
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int nanoengine_write_config(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 val)
-{
- int ret;
- void __iomem *address;
- unsigned long flags;
- unsigned shift;
- u32 v;
-
- shift = (where & 3) * 8;
-
- spin_lock_irqsave(&nano_lock, flags);
-
- ret = nanoengine_get_pci_address(bus, devfn, where, &address);
- if (ret != PCIBIOS_SUCCESSFUL)
- return ret;
- v = __raw_readl(address);
- switch (size) {
- case 1:
- v &= ~(0xFF << shift);
- v |= val << shift;
- break;
- case 2:
- v &= ~(0xFFFF << shift);
- v |= val << shift;
- break;
- case 4:
- v = val;
- break;
- }
- __raw_writel(v, address);
-
- spin_unlock_irqrestore(&nano_lock, flags);
-
- return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops pci_nano_ops = {
- .read = nanoengine_read_config,
- .write = nanoengine_write_config,
+ .map_bus = nanoengine_pci_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
};
static int __init pci_nanoengine_map_irq(const struct pci_dev *dev, u8 slot,
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 6645d1e31f14..34853d5dfda2 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
/*
* Ensure not to come back here if it wasn't intended
*/
+ RCSR = RCSR_SMR;
PSPR = 0;
/*
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
deleted file mode 100644
index 1dea6cfafb31..000000000000
--- a/arch/arm/mach-sa1100/time.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * linux/arch/arm/mach-sa1100/time.c
- *
- * Copyright (C) 1998 Deborah Wallach.
- * Twiddles (C) 1999 Hugo Fiennes <hugo@empeg.com>
- *
- * 2000/03/29 (C) Nicolas Pitre <nico@fluxnic.net>
- * Rewritten: big cleanup, much simpler, better HZ accuracy.
- *
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/timex.h>
-#include <linux/clockchips.h>
-#include <linux/sched_clock.h>
-
-#include <asm/mach/time.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
-#define SA1100_CLOCK_FREQ 3686400
-#define SA1100_LATCH DIV_ROUND_CLOSEST(SA1100_CLOCK_FREQ, HZ)
-
-static u64 notrace sa1100_read_sched_clock(void)
-{
- return readl_relaxed(OSCR);
-}
-
-#define MIN_OSCR_DELTA 2
-
-static irqreturn_t sa1100_ost0_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *c = dev_id;
-
- /* Disarm the compare/match, signal the event. */
- writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
- writel_relaxed(OSSR_M0, OSSR);
- c->event_handler(c);
-
- return IRQ_HANDLED;
-}
-
-static int
-sa1100_osmr0_set_next_event(unsigned long delta, struct clock_event_device *c)
-{
- unsigned long next, oscr;
-
- writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER);
- next = readl_relaxed(OSCR) + delta;
- writel_relaxed(next, OSMR0);
- oscr = readl_relaxed(OSCR);
-
- return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
-}
-
-static void
-sa1100_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *c)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
- writel_relaxed(OSSR_M0, OSSR);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- case CLOCK_EVT_MODE_PERIODIC:
- break;
- }
-}
-
-#ifdef CONFIG_PM
-unsigned long osmr[4], oier;
-
-static void sa1100_timer_suspend(struct clock_event_device *cedev)
-{
- osmr[0] = readl_relaxed(OSMR0);
- osmr[1] = readl_relaxed(OSMR1);
- osmr[2] = readl_relaxed(OSMR2);
- osmr[3] = readl_relaxed(OSMR3);
- oier = readl_relaxed(OIER);
-}
-
-static void sa1100_timer_resume(struct clock_event_device *cedev)
-{
- writel_relaxed(0x0f, OSSR);
- writel_relaxed(osmr[0], OSMR0);
- writel_relaxed(osmr[1], OSMR1);
- writel_relaxed(osmr[2], OSMR2);
- writel_relaxed(osmr[3], OSMR3);
- writel_relaxed(oier, OIER);
-
- /*
- * OSMR0 is the system timer: make sure OSCR is sufficiently behind
- */
- writel_relaxed(OSMR0 - SA1100_LATCH, OSCR);
-}
-#else
-#define sa1100_timer_suspend NULL
-#define sa1100_timer_resume NULL
-#endif
-
-static struct clock_event_device ckevt_sa1100_osmr0 = {
- .name = "osmr0",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_next_event = sa1100_osmr0_set_next_event,
- .set_mode = sa1100_osmr0_set_mode,
- .suspend = sa1100_timer_suspend,
- .resume = sa1100_timer_resume,
-};
-
-static struct irqaction sa1100_timer_irq = {
- .name = "ost0",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = sa1100_ost0_interrupt,
- .dev_id = &ckevt_sa1100_osmr0,
-};
-
-void __init sa1100_timer_init(void)
-{
- writel_relaxed(0, OIER);
- writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
-
- sched_clock_register(sa1100_read_sched_clock, 32, 3686400);
-
- ckevt_sa1100_osmr0.cpumask = cpumask_of(0);
-
- setup_irq(IRQ_OST0, &sa1100_timer_irq);
-
- clocksource_mmio_init(OSCR, "oscr", SA1100_CLOCK_FREQ, 200, 32,
- clocksource_mmio_readl_up);
- clockevents_config_and_register(&ckevt_sa1100_osmr0, 3686400,
- MIN_OSCR_DELTA * 2, 0x7fffffff);
-}
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 894c68760060..2f36c85eec4b 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -80,11 +80,6 @@ config ARCH_R8A7794
comment "Renesas ARM SoCs Board Type"
-config MACH_LAGER
- bool "Lager board"
- depends on ARCH_R8A7790
- select MICREL_PHY if SH_ETH
-
config MACH_MARZEN
bool "MARZEN board"
depends on ARCH_R8A7779
@@ -139,14 +134,6 @@ config ARCH_R8A7779
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_GIC
-config ARCH_R8A7790
- bool "R-Car H2 (R8A77900)"
- select ARCH_RCAR_GEN2
- select ARCH_WANT_OPTIONAL_GPIOLIB
- select ARM_GIC
- select MIGHT_HAVE_PCI
- select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
-
comment "Renesas ARM SoCs Board Type"
config MACH_APE6EVM
@@ -214,13 +201,6 @@ config MACH_MARZEN
select REGULATOR_FIXED_VOLTAGE if REGULATOR
select USE_OF
-config MACH_LAGER
- bool "Lager board"
- depends on ARCH_R8A7790
- select USE_OF
- select MICREL_PHY if SH_ETH
- select SND_SOC_AK4642 if SND_SIMPLE_CARD
-
config MACH_KZM9G
bool "KZM-A9-GT board"
depends on ARCH_SH73A0
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index b55cac0e5b2b..d53996e6da97 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_ARCH_R8A73A4) += clock-r8a73a4.o
obj-$(CONFIG_ARCH_R8A7740) += clock-r8a7740.o
obj-$(CONFIG_ARCH_R8A7778) += clock-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += clock-r8a7779.o
-obj-$(CONFIG_ARCH_R8A7790) += clock-r8a7790.o
endif
# CPU reset vector handling objects
@@ -57,7 +56,6 @@ obj-$(CONFIG_ARCH_SH7372) += entry-intc.o sleep-sh7372.o
# Board objects
ifdef CONFIG_ARCH_SHMOBILE_MULTI
-obj-$(CONFIG_MACH_LAGER) += board-lager-reference.o
obj-$(CONFIG_MACH_MARZEN) += board-marzen-reference.o
else
obj-$(CONFIG_MACH_APE6EVM) += board-ape6evm.o
@@ -66,7 +64,6 @@ obj-$(CONFIG_MACH_MACKEREL) += board-mackerel.o
obj-$(CONFIG_MACH_BOCKW) += board-bockw.o
obj-$(CONFIG_MACH_BOCKW_REFERENCE) += board-bockw-reference.o
obj-$(CONFIG_MACH_MARZEN) += board-marzen.o
-obj-$(CONFIG_MACH_LAGER) += board-lager.o
obj-$(CONFIG_MACH_ARMADILLO800EVA) += board-armadillo800eva.o
obj-$(CONFIG_MACH_KZM9G) += board-kzm9g.o
obj-$(CONFIG_MACH_KZM9G_REFERENCE) += board-kzm9g-reference.o
diff --git a/arch/arm/mach-shmobile/Makefile.boot b/arch/arm/mach-shmobile/Makefile.boot
index 57d00ed6ec0c..02532bea5300 100644
--- a/arch/arm/mach-shmobile/Makefile.boot
+++ b/arch/arm/mach-shmobile/Makefile.boot
@@ -7,7 +7,6 @@ loadaddr-$(CONFIG_MACH_BOCKW) += 0x60008000
loadaddr-$(CONFIG_MACH_BOCKW_REFERENCE) += 0x60008000
loadaddr-$(CONFIG_MACH_KZM9G) += 0x41008000
loadaddr-$(CONFIG_MACH_KZM9G_REFERENCE) += 0x41008000
-loadaddr-$(CONFIG_MACH_LAGER) += 0x40008000
loadaddr-$(CONFIG_MACH_MACKEREL) += 0x40008000
loadaddr-$(CONFIG_MACH_MARZEN) += 0x60008000
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c
index 66f67816a844..444f22d370f0 100644
--- a/arch/arm/mach-shmobile/board-ape6evm.c
+++ b/arch/arm/mach-shmobile/board-ape6evm.c
@@ -18,6 +18,8 @@
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
#include <linux/kernel.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
@@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
sizeof(ape6evm_leds_pdata));
}
+static void __init ape6evm_legacy_init_time(void)
+{
+ /* Do not invoke DT-based timers via clocksource_of_init() */
+}
+
+static void __init ape6evm_legacy_init_irq(void)
+{
+ void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
+
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+
+ /* Do not invoke DT-based interrupt code via irqchip_init() */
+}
+
+
static const char *ape6evm_boards_compat_dt[] __initdata = {
"renesas,ape6evm",
NULL,
@@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
DT_MACHINE_START(APE6EVM_DT, "ape6evm")
.init_early = shmobile_init_delay,
+ .init_irq = ape6evm_legacy_init_irq,
.init_machine = ape6evm_add_standard_devices,
.init_late = shmobile_init_late,
.dt_compat = ape6evm_boards_compat_dt,
+ .init_time = ape6evm_legacy_init_time,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-lager-reference.c b/arch/arm/mach-shmobile/board-lager-reference.c
deleted file mode 100644
index fa06bdba61df..000000000000
--- a/arch/arm/mach-shmobile/board-lager-reference.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Lager board support - Reference DT implementation
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Simon Horman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "r8a7790.h"
-#include "rcar-gen2.h"
-
-static const char *lager_boards_compat_dt[] __initdata = {
- "renesas,lager",
- "renesas,lager-reference",
- NULL,
-};
-
-DT_MACHINE_START(LAGER_DT, "lager")
- .smp = smp_ops(r8a7790_smp_ops),
- .init_early = shmobile_init_delay,
- .init_time = rcar_gen2_timer_init,
- .init_late = shmobile_init_late,
- .reserve = rcar_gen2_reserve,
- .dt_compat = lager_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
deleted file mode 100644
index f8197eb6e566..000000000000
--- a/arch/arm/mach-shmobile/board-lager.c
+++ /dev/null
@@ -1,827 +0,0 @@
-/*
- * Lager board support
- *
- * Copyright (C) 2013-2014 Renesas Solutions Corp.
- * Copyright (C) 2013 Magnus Damm
- * Copyright (C) 2014 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/i2c.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/mtd.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/platform_data/camera-rcar.h>
-#include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/usb-rcar-gen2-phy.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/gpio-regulator.h>
-#include <linux/regulator/machine.h>
-#include <linux/sh_eth.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/rspi.h>
-#include <linux/spi/spi.h>
-#include <linux/usb/phy.h>
-#include <linux/usb/renesas_usbhs.h>
-
-#include <media/soc_camera.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <sound/rcar_snd.h>
-#include <sound/simple_card.h>
-
-#include "common.h"
-#include "irqs.h"
-#include "r8a7790.h"
-#include "rcar-gen2.h"
-
-/*
- * SSI-AK4643
- *
- * SW1: 1: AK4643
- * 2: CN22
- * 3: ADV7511
- *
- * this command is required when playback.
- *
- * # amixer set "LINEOUT Mixer DACL" on
- */
-
-/*
- * SDHI0 (CN8)
- *
- * JP3: pin1
- * SW20: pin1
-
- * GP5_24: 1: VDD 3.3V (defult)
- * 0: VDD 0.0V
- * GP5_29: 1: VccQ 3.3V (defult)
- * 0: VccQ 1.8V
- *
- */
-
-/* LEDS */
-static struct gpio_led lager_leds[] = {
- {
- .name = "led8",
- .gpio = RCAR_GP_PIN(5, 17),
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- }, {
- .name = "led7",
- .gpio = RCAR_GP_PIN(4, 23),
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- }, {
- .name = "led6",
- .gpio = RCAR_GP_PIN(4, 22),
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- },
-};
-
-static const struct gpio_led_platform_data lager_leds_pdata __initconst = {
- .leds = lager_leds,
- .num_leds = ARRAY_SIZE(lager_leds),
-};
-
-/* GPIO KEY */
-#define GPIO_KEY(c, g, d, ...) \
- { .code = c, .gpio = g, .desc = d, .active_low = 1, \
- .wakeup = 1, .debounce_interval = 20 }
-
-static struct gpio_keys_button gpio_buttons[] = {
- GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"),
- GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"),
- GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"),
- GPIO_KEY(KEY_1, RCAR_GP_PIN(1, 14), "SW2-pin1"),
-};
-
-static const struct gpio_keys_platform_data lager_keys_pdata __initconst = {
- .buttons = gpio_buttons,
- .nbuttons = ARRAY_SIZE(gpio_buttons),
-};
-
-/* Fixed 3.3V regulator to be used by MMCIF */
-static struct regulator_consumer_supply fixed3v3_power_consumers[] =
-{
- REGULATOR_SUPPLY("vmmc", "sh_mmcif.1"),
-};
-
-/*
- * SDHI regulator macro
- *
- ** FIXME**
- * Lager board vqmmc is provided via DA9063 PMIC chip,
- * and we should use ${LINK}/drivers/mfd/da9063-* driver for it.
- * but, it doesn't have regulator support at this point.
- * It uses gpio-regulator for vqmmc as quick-hack.
- */
-#define SDHI_REGULATOR(idx, vdd_pin, vccq_pin) \
-static struct regulator_consumer_supply vcc_sdhi##idx##_consumer = \
- REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi." #idx); \
- \
-static struct regulator_init_data vcc_sdhi##idx##_init_data = { \
- .constraints = { \
- .valid_ops_mask = REGULATOR_CHANGE_STATUS, \
- }, \
- .consumer_supplies = &vcc_sdhi##idx##_consumer, \
- .num_consumer_supplies = 1, \
-}; \
- \
-static const struct fixed_voltage_config vcc_sdhi##idx##_info __initconst = {\
- .supply_name = "SDHI" #idx "Vcc", \
- .microvolts = 3300000, \
- .gpio = vdd_pin, \
- .enable_high = 1, \
- .init_data = &vcc_sdhi##idx##_init_data, \
-}; \
- \
-static struct regulator_consumer_supply vccq_sdhi##idx##_consumer = \
- REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi." #idx); \
- \
-static struct regulator_init_data vccq_sdhi##idx##_init_data = { \
- .constraints = { \
- .input_uV = 3300000, \
- .min_uV = 1800000, \
- .max_uV = 3300000, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
- REGULATOR_CHANGE_STATUS, \
- }, \
- .consumer_supplies = &vccq_sdhi##idx##_consumer, \
- .num_consumer_supplies = 1, \
-}; \
- \
-static struct gpio vccq_sdhi##idx##_gpio = \
- { vccq_pin, GPIOF_OUT_INIT_HIGH, "vccq-sdhi" #idx }; \
- \
-static struct gpio_regulator_state vccq_sdhi##idx##_states[] = { \
- { .value = 1800000, .gpios = 0 }, \
- { .value = 3300000, .gpios = 1 }, \
-}; \
- \
-static const struct gpio_regulator_config vccq_sdhi##idx##_info __initconst = {\
- .supply_name = "vqmmc", \
- .gpios = &vccq_sdhi##idx##_gpio, \
- .nr_gpios = 1, \
- .states = vccq_sdhi##idx##_states, \
- .nr_states = ARRAY_SIZE(vccq_sdhi##idx##_states), \
- .type = REGULATOR_VOLTAGE, \
- .init_data = &vccq_sdhi##idx##_init_data, \
-};
-
-SDHI_REGULATOR(0, RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 29));
-SDHI_REGULATOR(2, RCAR_GP_PIN(5, 25), RCAR_GP_PIN(5, 30));
-
-/* MMCIF */
-static const struct sh_mmcif_plat_data mmcif1_pdata __initconst = {
- .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
- .clk_ctrl2_present = true,
- .ccs_unsupported = true,
-};
-
-static const struct resource mmcif1_resources[] __initconst = {
- DEFINE_RES_MEM(0xee220000, 0x80),
- DEFINE_RES_IRQ(gic_spi(170)),
-};
-
-/* Ether */
-static const struct sh_eth_plat_data ether_pdata __initconst = {
- .phy = 0x1,
- .phy_irq = irq_pin(0),
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
- .phy_interface = PHY_INTERFACE_MODE_RMII,
- .ether_link_active_low = 1,
-};
-
-static const struct resource ether_resources[] __initconst = {
- DEFINE_RES_MEM(0xee700000, 0x400),
- DEFINE_RES_IRQ(gic_spi(162)),
-};
-
-static const struct platform_device_info ether_info __initconst = {
- .name = "r8a7790-ether",
- .id = -1,
- .res = ether_resources,
- .num_res = ARRAY_SIZE(ether_resources),
- .data = &ether_pdata,
- .size_data = sizeof(ether_pdata),
- .dma_mask = DMA_BIT_MASK(32),
-};
-
-/* SPI Flash memory (Spansion S25FL512SAGMFIG11 64Mb) */
-static struct mtd_partition spi_flash_part[] = {
- /* Reserved for user loader program, read-only */
- {
- .name = "loader",
- .offset = 0,
- .size = SZ_256K,
- .mask_flags = MTD_WRITEABLE,
- },
- /* Reserved for user program, read-only */
- {
- .name = "user",
- .offset = MTDPART_OFS_APPEND,
- .size = SZ_4M,
- .mask_flags = MTD_WRITEABLE,
- },
- /* All else is writable (e.g. JFFS2) */
- {
- .name = "flash",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- .mask_flags = 0,
- },
-};
-
-static const struct flash_platform_data spi_flash_data = {
- .name = "m25p80",
- .parts = spi_flash_part,
- .nr_parts = ARRAY_SIZE(spi_flash_part),
- .type = "s25fl512s",
-};
-
-static const struct rspi_plat_data qspi_pdata __initconst = {
- .num_chipselect = 1,
-};
-
-static const struct spi_board_info spi_info[] __initconst = {
- {
- .modalias = "m25p80",
- .platform_data = &spi_flash_data,
- .mode = SPI_MODE_0 | SPI_TX_QUAD | SPI_RX_QUAD,
- .max_speed_hz = 30000000,
- .bus_num = 0,
- .chip_select = 0,
- },
-};
-
-/* QSPI resource */
-static const struct resource qspi_resources[] __initconst = {
- DEFINE_RES_MEM(0xe6b10000, 0x1000),
- DEFINE_RES_IRQ_NAMED(gic_spi(184), "mux"),
-};
-
-/* VIN */
-static const struct resource vin_resources[] __initconst = {
- /* VIN0 */
- DEFINE_RES_MEM(0xe6ef0000, 0x1000),
- DEFINE_RES_IRQ(gic_spi(188)),
- /* VIN1 */
- DEFINE_RES_MEM(0xe6ef1000, 0x1000),
- DEFINE_RES_IRQ(gic_spi(189)),
-};
-
-static void __init lager_add_vin_device(unsigned idx,
- struct rcar_vin_platform_data *pdata)
-{
- struct platform_device_info vin_info = {
- .name = "r8a7790-vin",
- .id = idx,
- .res = &vin_resources[idx * 2],
- .num_res = 2,
- .dma_mask = DMA_BIT_MASK(32),
- .data = pdata,
- .size_data = sizeof(*pdata),
- };
-
- BUG_ON(idx > 1);
-
- platform_device_register_full(&vin_info);
-}
-
-#define LAGER_CAMERA(idx, name, addr, pdata, flag) \
-static struct i2c_board_info i2c_cam##idx##_device = { \
- I2C_BOARD_INFO(name, addr), \
-}; \
- \
-static struct rcar_vin_platform_data vin##idx##_pdata = { \
- .flags = flag, \
-}; \
- \
-static struct soc_camera_link cam##idx##_link = { \
- .bus_id = idx, \
- .board_info = &i2c_cam##idx##_device, \
- .i2c_adapter_id = 2, \
- .module_name = name, \
- .priv = pdata, \
-}
-
-/* Camera 0 is not currently supported due to adv7612 support missing */
-LAGER_CAMERA(1, "adv7180", 0x20, NULL, RCAR_VIN_BT656);
-
-static void __init lager_add_camera1_device(void)
-{
- platform_device_register_data(NULL, "soc-camera-pdrv", 1,
- &cam1_link, sizeof(cam1_link));
- lager_add_vin_device(1, &vin1_pdata);
-}
-
-/* SATA1 */
-static const struct resource sata1_resources[] __initconst = {
- DEFINE_RES_MEM(0xee500000, 0x2000),
- DEFINE_RES_IRQ(gic_spi(106)),
-};
-
-static const struct platform_device_info sata1_info __initconst = {
- .name = "sata-r8a7790",
- .id = 1,
- .res = sata1_resources,
- .num_res = ARRAY_SIZE(sata1_resources),
- .dma_mask = DMA_BIT_MASK(32),
-};
-
-/* USBHS */
-static const struct resource usbhs_resources[] __initconst = {
- DEFINE_RES_MEM(0xe6590000, 0x100),
- DEFINE_RES_IRQ(gic_spi(107)),
-};
-
-struct usbhs_private {
- struct renesas_usbhs_platform_info info;
- struct usb_phy *phy;
-};
-
-#define usbhs_get_priv(pdev) \
- container_of(renesas_usbhs_get_info(pdev), struct usbhs_private, info)
-
-static int usbhs_power_ctrl(struct platform_device *pdev,
- void __iomem *base, int enable)
-{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
-
- if (!priv->phy)
- return -ENODEV;
-
- if (enable) {
- int retval = usb_phy_init(priv->phy);
-
- if (!retval)
- retval = usb_phy_set_suspend(priv->phy, 0);
- return retval;
- }
-
- usb_phy_set_suspend(priv->phy, 1);
- usb_phy_shutdown(priv->phy);
- return 0;
-}
-
-static int usbhs_hardware_init(struct platform_device *pdev)
-{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
- struct usb_phy *phy;
- int ret;
-
- /* USB0 Function - use PWEN as GPIO input to detect DIP Switch SW5
- * setting to avoid VBUS short circuit due to wrong cable.
- * PWEN should be pulled up high if USB Function is selected by SW5
- */
- gpio_request_one(RCAR_GP_PIN(5, 18), GPIOF_IN, NULL); /* USB0_PWEN */
- if (!gpio_get_value(RCAR_GP_PIN(5, 18))) {
- pr_warn("Error: USB Function not selected - check SW5 + SW6\n");
- ret = -ENOTSUPP;
- goto error;
- }
-
- phy = usb_get_phy_dev(&pdev->dev, 0);
- if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- goto error;
- }
-
- priv->phy = phy;
- return 0;
- error:
- gpio_free(RCAR_GP_PIN(5, 18));
- return ret;
-}
-
-static int usbhs_hardware_exit(struct platform_device *pdev)
-{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
-
- if (!priv->phy)
- return 0;
-
- usb_put_phy(priv->phy);
- priv->phy = NULL;
-
- gpio_free(RCAR_GP_PIN(5, 18));
- return 0;
-}
-
-static int usbhs_get_id(struct platform_device *pdev)
-{
- return USBHS_GADGET;
-}
-
-static u32 lager_usbhs_pipe_type[] = {
- USB_ENDPOINT_XFER_CONTROL,
- USB_ENDPOINT_XFER_ISOC,
- USB_ENDPOINT_XFER_ISOC,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_INT,
- USB_ENDPOINT_XFER_INT,
- USB_ENDPOINT_XFER_INT,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
-};
-
-static struct usbhs_private usbhs_priv __initdata = {
- .info = {
- .platform_callback = {
- .power_ctrl = usbhs_power_ctrl,
- .hardware_init = usbhs_hardware_init,
- .hardware_exit = usbhs_hardware_exit,
- .get_id = usbhs_get_id,
- },
- .driver_param = {
- .buswait_bwait = 4,
- .pipe_type = lager_usbhs_pipe_type,
- .pipe_size = ARRAY_SIZE(lager_usbhs_pipe_type),
- },
- }
-};
-
-static void __init lager_register_usbhs(void)
-{
- usb_bind_phy("renesas_usbhs", 0, "usb_phy_rcar_gen2");
- platform_device_register_resndata(NULL,
- "renesas_usbhs", -1,
- usbhs_resources,
- ARRAY_SIZE(usbhs_resources),
- &usbhs_priv.info,
- sizeof(usbhs_priv.info));
-}
-
-/* USBHS PHY */
-static const struct rcar_gen2_phy_platform_data usbhs_phy_pdata __initconst = {
- .chan0_pci = 0, /* Channel 0 is USBHS */
- .chan2_pci = 1, /* Channel 2 is PCI USB */
-};
-
-static const struct resource usbhs_phy_resources[] __initconst = {
- DEFINE_RES_MEM(0xe6590100, 0x100),
-};
-
-/* I2C */
-static struct i2c_board_info i2c2_devices[] = {
- {
- I2C_BOARD_INFO("ak4643", 0x12),
- }
-};
-
-/* Sound */
-static struct resource rsnd_resources[] __initdata = {
- [RSND_GEN2_SCU] = DEFINE_RES_MEM(0xec500000, 0x1000),
- [RSND_GEN2_ADG] = DEFINE_RES_MEM(0xec5a0000, 0x100),
- [RSND_GEN2_SSIU] = DEFINE_RES_MEM(0xec540000, 0x1000),
- [RSND_GEN2_SSI] = DEFINE_RES_MEM(0xec541000, 0x1280),
-};
-
-static struct rsnd_ssi_platform_info rsnd_ssi[] = {
- RSND_SSI(0, gic_spi(370), 0),
- RSND_SSI(0, gic_spi(371), RSND_SSI_CLK_PIN_SHARE),
-};
-
-static struct rsnd_src_platform_info rsnd_src[2] = {
- /* no member at this point */
-};
-
-static struct rsnd_dai_platform_info rsnd_dai = {
- .playback = { .ssi = &rsnd_ssi[0], },
- .capture = { .ssi = &rsnd_ssi[1], },
-};
-
-static struct rcar_snd_info rsnd_info = {
- .flags = RSND_GEN2,
- .ssi_info = rsnd_ssi,
- .ssi_info_nr = ARRAY_SIZE(rsnd_ssi),
- .src_info = rsnd_src,
- .src_info_nr = ARRAY_SIZE(rsnd_src),
- .dai_info = &rsnd_dai,
- .dai_info_nr = 1,
-};
-
-static struct asoc_simple_card_info rsnd_card_info = {
- .name = "AK4643",
- .card = "SSI01-AK4643",
- .codec = "ak4642-codec.2-0012",
- .platform = "rcar_sound",
- .daifmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM,
- .cpu_dai = {
- .name = "rcar_sound",
- },
- .codec_dai = {
- .name = "ak4642-hifi",
- .sysclk = 11289600,
- },
-};
-
-static void __init lager_add_rsnd_device(void)
-{
- struct platform_device_info cardinfo = {
- .name = "asoc-simple-card",
- .id = -1,
- .data = &rsnd_card_info,
- .size_data = sizeof(struct asoc_simple_card_info),
- .dma_mask = DMA_BIT_MASK(32),
- };
-
- i2c_register_board_info(2, i2c2_devices,
- ARRAY_SIZE(i2c2_devices));
-
- platform_device_register_resndata(
- NULL, "rcar_sound", -1,
- rsnd_resources, ARRAY_SIZE(rsnd_resources),
- &rsnd_info, sizeof(rsnd_info));
-
- platform_device_register_full(&cardinfo);
-}
-
-/* SDHI0 */
-static struct sh_mobile_sdhi_info sdhi0_info __initdata = {
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
- MMC_CAP_POWER_OFF_CARD,
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT |
- TMIO_MMC_WRPROTECT_DISABLE,
-};
-
-static struct resource sdhi0_resources[] __initdata = {
- DEFINE_RES_MEM(0xee100000, 0x200),
- DEFINE_RES_IRQ(gic_spi(165)),
-};
-
-/* SDHI2 */
-static struct sh_mobile_sdhi_info sdhi2_info __initdata = {
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
- MMC_CAP_POWER_OFF_CARD,
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT |
- TMIO_MMC_WRPROTECT_DISABLE,
-};
-
-static struct resource sdhi2_resources[] __initdata = {
- DEFINE_RES_MEM(0xee140000, 0x100),
- DEFINE_RES_IRQ(gic_spi(167)),
-};
-
-/* Internal PCI1 */
-static const struct resource pci1_resources[] __initconst = {
- DEFINE_RES_MEM(0xee0b0000, 0x10000), /* CFG */
- DEFINE_RES_MEM(0xee0a0000, 0x10000), /* MEM */
- DEFINE_RES_IRQ(gic_spi(112)),
-};
-
-static const struct platform_device_info pci1_info __initconst = {
- .name = "pci-rcar-gen2",
- .id = 1,
- .res = pci1_resources,
- .num_res = ARRAY_SIZE(pci1_resources),
- .dma_mask = DMA_BIT_MASK(32),
-};
-
-static void __init lager_add_usb1_device(void)
-{
- platform_device_register_full(&pci1_info);
-}
-
-/* Internal PCI2 */
-static const struct resource pci2_resources[] __initconst = {
- DEFINE_RES_MEM(0xee0d0000, 0x10000), /* CFG */
- DEFINE_RES_MEM(0xee0c0000, 0x10000), /* MEM */
- DEFINE_RES_IRQ(gic_spi(113)),
-};
-
-static const struct platform_device_info pci2_info __initconst = {
- .name = "pci-rcar-gen2",
- .id = 2,
- .res = pci2_resources,
- .num_res = ARRAY_SIZE(pci2_resources),
- .dma_mask = DMA_BIT_MASK(32),
-};
-
-static void __init lager_add_usb2_device(void)
-{
- platform_device_register_full(&pci2_info);
-}
-
-static const struct pinctrl_map lager_pinctrl_map[] = {
- /* DU (CN10: ARGB0, CN13: LVDS) */
- PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7790", "pfc-r8a7790",
- "du_rgb666", "du"),
- PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7790", "pfc-r8a7790",
- "du_sync_1", "du"),
- PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7790", "pfc-r8a7790",
- "du_clk_out_0", "du"),
- /* I2C2 */
- PIN_MAP_MUX_GROUP_DEFAULT("i2c-rcar.2", "pfc-r8a7790",
- "i2c2", "i2c2"),
- /* QSPI */
- PIN_MAP_MUX_GROUP_DEFAULT("qspi.0", "pfc-r8a7790",
- "qspi_ctrl", "qspi"),
- PIN_MAP_MUX_GROUP_DEFAULT("qspi.0", "pfc-r8a7790",
- "qspi_data4", "qspi"),
- /* SCIF0 (CN19: DEBUG SERIAL0) */
- PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.6", "pfc-r8a7790",
- "scif0_data", "scif0"),
- /* SCIF1 (CN20: DEBUG SERIAL1) */
- PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.7", "pfc-r8a7790",
- "scif1_data", "scif1"),
- /* SDHI0 */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7790",
- "sdhi0_data4", "sdhi0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7790",
- "sdhi0_ctrl", "sdhi0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7790",
- "sdhi0_cd", "sdhi0"),
- /* SDHI2 */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-r8a7790",
- "sdhi2_data4", "sdhi2"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-r8a7790",
- "sdhi2_ctrl", "sdhi2"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-r8a7790",
- "sdhi2_cd", "sdhi2"),
- /* SSI (CN17: sound) */
- PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7790",
- "ssi0129_ctrl", "ssi"),
- PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7790",
- "ssi0_data", "ssi"),
- PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7790",
- "ssi1_data", "ssi"),
- PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7790",
- "audio_clk_a", "audio_clk"),
- /* MMCIF1 */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.1", "pfc-r8a7790",
- "mmc1_data8", "mmc1"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.1", "pfc-r8a7790",
- "mmc1_ctrl", "mmc1"),
- /* Ether */
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-ether", "pfc-r8a7790",
- "eth_link", "eth"),
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-ether", "pfc-r8a7790",
- "eth_mdio", "eth"),
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-ether", "pfc-r8a7790",
- "eth_rmii", "eth"),
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-ether", "pfc-r8a7790",
- "intc_irq0", "intc"),
- /* VIN0 */
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-vin.0", "pfc-r8a7790",
- "vin0_data24", "vin0"),
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-vin.0", "pfc-r8a7790",
- "vin0_sync", "vin0"),
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-vin.0", "pfc-r8a7790",
- "vin0_field", "vin0"),
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-vin.0", "pfc-r8a7790",
- "vin0_clkenb", "vin0"),
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-vin.0", "pfc-r8a7790",
- "vin0_clk", "vin0"),
- /* VIN1 */
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-vin.1", "pfc-r8a7790",
- "vin1_data8", "vin1"),
- PIN_MAP_MUX_GROUP_DEFAULT("r8a7790-vin.1", "pfc-r8a7790",
- "vin1_clk", "vin1"),
- /* USB0 */
- PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs", "pfc-r8a7790",
- "usb0_ovc_vbus", "usb0"),
- /* USB1 */
- PIN_MAP_MUX_GROUP_DEFAULT("pci-rcar-gen2.1", "pfc-r8a7790",
- "usb1", "usb1"),
- /* USB2 */
- PIN_MAP_MUX_GROUP_DEFAULT("pci-rcar-gen2.2", "pfc-r8a7790",
- "usb2", "usb2"),
-};
-
-static void __init lager_add_standard_devices(void)
-{
- int fixed_regulator_idx = 0;
- int gpio_regulator_idx = 0;
-
- r8a7790_clock_init();
-
- pinctrl_register_mappings(lager_pinctrl_map,
- ARRAY_SIZE(lager_pinctrl_map));
- r8a7790_pinmux_init();
-
- r8a7790_add_standard_devices();
- platform_device_register_data(NULL, "leds-gpio", -1,
- &lager_leds_pdata,
- sizeof(lager_leds_pdata));
- platform_device_register_data(NULL, "gpio-keys", -1,
- &lager_keys_pdata,
- sizeof(lager_keys_pdata));
- regulator_register_always_on(fixed_regulator_idx++,
- "fixed-3.3V", fixed3v3_power_consumers,
- ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
- platform_device_register_resndata(NULL, "sh_mmcif", 1,
- mmcif1_resources, ARRAY_SIZE(mmcif1_resources),
- &mmcif1_pdata, sizeof(mmcif1_pdata));
-
- platform_device_register_full(&ether_info);
-
- platform_device_register_resndata(NULL, "qspi", 0,
- qspi_resources,
- ARRAY_SIZE(qspi_resources),
- &qspi_pdata, sizeof(qspi_pdata));
- spi_register_board_info(spi_info, ARRAY_SIZE(spi_info));
-
- platform_device_register_data(NULL, "reg-fixed-voltage", fixed_regulator_idx++,
- &vcc_sdhi0_info, sizeof(struct fixed_voltage_config));
- platform_device_register_data(NULL, "reg-fixed-voltage", fixed_regulator_idx++,
- &vcc_sdhi2_info, sizeof(struct fixed_voltage_config));
-
- platform_device_register_data(NULL, "gpio-regulator", gpio_regulator_idx++,
- &vccq_sdhi0_info, sizeof(struct gpio_regulator_config));
- platform_device_register_data(NULL, "gpio-regulator", gpio_regulator_idx++,
- &vccq_sdhi2_info, sizeof(struct gpio_regulator_config));
-
- lager_add_camera1_device();
-
- platform_device_register_full(&sata1_info);
-
- platform_device_register_resndata(NULL, "usb_phy_rcar_gen2",
- -1, usbhs_phy_resources,
- ARRAY_SIZE(usbhs_phy_resources),
- &usbhs_phy_pdata,
- sizeof(usbhs_phy_pdata));
- lager_register_usbhs();
- lager_add_usb1_device();
- lager_add_usb2_device();
-
- lager_add_rsnd_device();
-
- platform_device_register_resndata(NULL, "sh_mobile_sdhi", 0,
- sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
- &sdhi0_info, sizeof(struct sh_mobile_sdhi_info));
- platform_device_register_resndata(NULL, "sh_mobile_sdhi", 2,
- sdhi2_resources, ARRAY_SIZE(sdhi2_resources),
- &sdhi2_info, sizeof(struct sh_mobile_sdhi_info));
-}
-
-/*
- * Ether LEDs on the Lager board are named LINK and ACTIVE which corresponds
- * to non-default 01 setting of the Micrel KSZ8041 PHY control register 1 bits
- * 14-15. We have to set them back to 01 from the default 00 value each time
- * the PHY is reset. It's also important because the PHY's LED0 signal is
- * connected to SoC's ETH_LINK signal and in the PHY's default mode it will
- * bounce on and off after each packet, which we apparently want to avoid.
- */
-static int lager_ksz8041_fixup(struct phy_device *phydev)
-{
- u16 phyctrl1 = phy_read(phydev, 0x1e);
-
- phyctrl1 &= ~0xc000;
- phyctrl1 |= 0x4000;
- return phy_write(phydev, 0x1e, phyctrl1);
-}
-
-static void __init lager_init(void)
-{
- lager_add_standard_devices();
-
- irq_set_irq_type(irq_pin(0), IRQ_TYPE_LEVEL_LOW);
-
- if (IS_ENABLED(CONFIG_PHYLIB))
- phy_register_fixup_for_id("r8a7790-ether-ff:01",
- lager_ksz8041_fixup);
-}
-
-static const char * const lager_boards_compat_dt[] __initconst = {
- "renesas,lager",
- NULL,
-};
-
-DT_MACHINE_START(LAGER_DT, "lager")
- .smp = smp_ops(r8a7790_smp_ops),
- .init_early = shmobile_init_delay,
- .init_time = rcar_gen2_timer_init,
- .init_machine = lager_init,
- .init_late = shmobile_init_late,
- .reserve = rcar_gen2_reserve,
- .dt_compat = lager_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/clock-r8a7790.c b/arch/arm/mach-shmobile/clock-r8a7790.c
deleted file mode 100644
index f9bbc5f0a9a1..000000000000
--- a/arch/arm/mach-shmobile/clock-r8a7790.c
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * r8a7790 clock framework support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/sh_clk.h>
-#include <linux/clkdev.h>
-
-#include "clock.h"
-#include "common.h"
-#include "r8a7790.h"
-#include "rcar-gen2.h"
-
-/*
- * MD EXTAL PLL0 PLL1 PLL3
- * 14 13 19 (MHz) *1 *1
- *---------------------------------------------------
- * 0 0 0 15 x 1 x172/2 x208/2 x106
- * 0 0 1 15 x 1 x172/2 x208/2 x88
- * 0 1 0 20 x 1 x130/2 x156/2 x80
- * 0 1 1 20 x 1 x130/2 x156/2 x66
- * 1 0 0 26 / 2 x200/2 x240/2 x122
- * 1 0 1 26 / 2 x200/2 x240/2 x102
- * 1 1 0 30 / 2 x172/2 x208/2 x106
- * 1 1 1 30 / 2 x172/2 x208/2 x88
- *
- * *1 : Table 7.6 indicates VCO ouput (PLLx = VCO/2)
- * see "p1 / 2" on R8A7790_CLOCK_ROOT() below
- */
-
-#define CPG_BASE 0xe6150000
-#define CPG_LEN 0x1000
-
-#define SMSTPCR1 0xe6150134
-#define SMSTPCR2 0xe6150138
-#define SMSTPCR3 0xe615013c
-#define SMSTPCR5 0xe6150144
-#define SMSTPCR7 0xe615014c
-#define SMSTPCR8 0xe6150990
-#define SMSTPCR9 0xe6150994
-#define SMSTPCR10 0xe6150998
-
-#define MSTPSR1 IOMEM(0xe6150038)
-#define MSTPSR2 IOMEM(0xe6150040)
-#define MSTPSR3 IOMEM(0xe6150048)
-#define MSTPSR5 IOMEM(0xe615003c)
-#define MSTPSR7 IOMEM(0xe61501c4)
-#define MSTPSR8 IOMEM(0xe61509a0)
-#define MSTPSR9 IOMEM(0xe61509a4)
-#define MSTPSR10 IOMEM(0xe61509a8)
-
-#define SDCKCR 0xE6150074
-#define SD2CKCR 0xE6150078
-#define SD3CKCR 0xE615026C
-#define MMC0CKCR 0xE6150240
-#define MMC1CKCR 0xE6150244
-#define SSPCKCR 0xE6150248
-#define SSPRSCKCR 0xE615024C
-
-static struct clk_mapping cpg_mapping = {
- .phys = CPG_BASE,
- .len = CPG_LEN,
-};
-
-static struct clk extal_clk = {
- /* .rate will be updated on r8a7790_clock_init() */
- .mapping = &cpg_mapping,
-};
-
-static struct sh_clk_ops followparent_clk_ops = {
- .recalc = followparent_recalc,
-};
-
-static struct clk main_clk = {
- /* .parent will be set r8a7790_clock_init */
- .ops = &followparent_clk_ops,
-};
-
-static struct clk audio_clk_a = {
-};
-
-static struct clk audio_clk_b = {
-};
-
-static struct clk audio_clk_c = {
-};
-
-/*
- * clock ratio of these clock will be updated
- * on r8a7790_clock_init()
- */
-SH_FIXED_RATIO_CLK_SET(pll1_clk, main_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(pll3_clk, main_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(lb_clk, pll1_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(qspi_clk, pll1_clk, 1, 1);
-
-/* fixed ratio clock */
-SH_FIXED_RATIO_CLK_SET(extal_div2_clk, extal_clk, 1, 2);
-SH_FIXED_RATIO_CLK_SET(cp_clk, extal_clk, 1, 2);
-
-SH_FIXED_RATIO_CLK_SET(pll1_div2_clk, pll1_clk, 1, 2);
-SH_FIXED_RATIO_CLK_SET(zg_clk, pll1_clk, 1, 3);
-SH_FIXED_RATIO_CLK_SET(zx_clk, pll1_clk, 1, 3);
-SH_FIXED_RATIO_CLK_SET(zs_clk, pll1_clk, 1, 6);
-SH_FIXED_RATIO_CLK_SET(hp_clk, pll1_clk, 1, 12);
-SH_FIXED_RATIO_CLK_SET(i_clk, pll1_clk, 1, 2);
-SH_FIXED_RATIO_CLK_SET(b_clk, pll1_clk, 1, 12);
-SH_FIXED_RATIO_CLK_SET(p_clk, pll1_clk, 1, 24);
-SH_FIXED_RATIO_CLK_SET(cl_clk, pll1_clk, 1, 48);
-SH_FIXED_RATIO_CLK_SET(m2_clk, pll1_clk, 1, 8);
-SH_FIXED_RATIO_CLK_SET(imp_clk, pll1_clk, 1, 4);
-SH_FIXED_RATIO_CLK_SET(rclk_clk, pll1_clk, 1, (48 * 1024));
-SH_FIXED_RATIO_CLK_SET(oscclk_clk, pll1_clk, 1, (12 * 1024));
-
-SH_FIXED_RATIO_CLK_SET(zb3_clk, pll3_clk, 1, 4);
-SH_FIXED_RATIO_CLK_SET(zb3d2_clk, pll3_clk, 1, 8);
-SH_FIXED_RATIO_CLK_SET(ddr_clk, pll3_clk, 1, 8);
-SH_FIXED_RATIO_CLK_SET(mp_clk, pll1_div2_clk, 1, 15);
-
-static struct clk *main_clks[] = {
- &audio_clk_a,
- &audio_clk_b,
- &audio_clk_c,
- &extal_clk,
- &extal_div2_clk,
- &main_clk,
- &pll1_clk,
- &pll1_div2_clk,
- &pll3_clk,
- &lb_clk,
- &qspi_clk,
- &zg_clk,
- &zx_clk,
- &zs_clk,
- &hp_clk,
- &i_clk,
- &b_clk,
- &p_clk,
- &cl_clk,
- &m2_clk,
- &imp_clk,
- &rclk_clk,
- &oscclk_clk,
- &zb3_clk,
- &zb3d2_clk,
- &ddr_clk,
- &mp_clk,
- &cp_clk,
-};
-
-/* SDHI (DIV4) clock */
-static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18, 24, 0, 36, 48, 10 };
-
-static struct clk_div_mult_table div4_div_mult_table = {
- .divisors = divisors,
- .nr_divisors = ARRAY_SIZE(divisors),
-};
-
-static struct clk_div4_table div4_table = {
- .div_mult_table = &div4_div_mult_table,
-};
-
-enum {
- DIV4_SDH, DIV4_SD0, DIV4_SD1, DIV4_NR
-};
-
-static struct clk div4_clks[DIV4_NR] = {
- [DIV4_SDH] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 8, 0x0dff, CLK_ENABLE_ON_INIT),
- [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1df0, CLK_ENABLE_ON_INIT),
- [DIV4_SD1] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 0, 0x1df0, CLK_ENABLE_ON_INIT),
-};
-
-/* DIV6 clocks */
-enum {
- DIV6_SD2, DIV6_SD3,
- DIV6_MMC0, DIV6_MMC1,
- DIV6_SSP, DIV6_SSPRS,
- DIV6_NR
-};
-
-static struct clk div6_clks[DIV6_NR] = {
- [DIV6_SD2] = SH_CLK_DIV6(&pll1_div2_clk, SD2CKCR, 0),
- [DIV6_SD3] = SH_CLK_DIV6(&pll1_div2_clk, SD3CKCR, 0),
- [DIV6_MMC0] = SH_CLK_DIV6(&pll1_div2_clk, MMC0CKCR, 0),
- [DIV6_MMC1] = SH_CLK_DIV6(&pll1_div2_clk, MMC1CKCR, 0),
- [DIV6_SSP] = SH_CLK_DIV6(&pll1_div2_clk, SSPCKCR, 0),
- [DIV6_SSPRS] = SH_CLK_DIV6(&pll1_div2_clk, SSPRSCKCR, 0),
-};
-
-/* MSTP */
-enum {
- MSTP1017, /* parent of SCU */
-
- MSTP1031, MSTP1030,
- MSTP1029, MSTP1028, MSTP1027, MSTP1026, MSTP1025, MSTP1024, MSTP1023, MSTP1022,
- MSTP1015, MSTP1014, MSTP1013, MSTP1012, MSTP1011, MSTP1010,
- MSTP1009, MSTP1008, MSTP1007, MSTP1006, MSTP1005,
- MSTP931, MSTP930, MSTP929, MSTP928,
- MSTP917,
- MSTP815, MSTP814,
- MSTP813,
- MSTP811, MSTP810, MSTP809, MSTP808,
- MSTP726, MSTP725, MSTP724, MSTP723, MSTP722, MSTP721, MSTP720,
- MSTP717, MSTP716,
- MSTP704, MSTP703,
- MSTP522,
- MSTP502, MSTP501,
- MSTP315, MSTP314, MSTP313, MSTP312, MSTP311, MSTP305, MSTP304,
- MSTP216, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202,
- MSTP124,
- MSTP_NR
-};
-
-static struct clk mstp_clks[MSTP_NR] = {
- [MSTP1031] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 31, MSTPSR10, 0), /* SCU0 */
- [MSTP1030] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 30, MSTPSR10, 0), /* SCU1 */
- [MSTP1029] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 29, MSTPSR10, 0), /* SCU2 */
- [MSTP1028] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 28, MSTPSR10, 0), /* SCU3 */
- [MSTP1027] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 27, MSTPSR10, 0), /* SCU4 */
- [MSTP1026] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 26, MSTPSR10, 0), /* SCU5 */
- [MSTP1025] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 25, MSTPSR10, 0), /* SCU6 */
- [MSTP1024] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 24, MSTPSR10, 0), /* SCU7 */
- [MSTP1023] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 23, MSTPSR10, 0), /* SCU8 */
- [MSTP1022] = SH_CLK_MSTP32_STS(&mstp_clks[MSTP1017], SMSTPCR10, 22, MSTPSR10, 0), /* SCU9 */
- [MSTP1017] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 17, MSTPSR10, 0), /* SCU */
- [MSTP1015] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 15, MSTPSR10, 0), /* SSI0 */
- [MSTP1014] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 14, MSTPSR10, 0), /* SSI1 */
- [MSTP1013] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 13, MSTPSR10, 0), /* SSI2 */
- [MSTP1012] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 12, MSTPSR10, 0), /* SSI3 */
- [MSTP1011] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 11, MSTPSR10, 0), /* SSI4 */
- [MSTP1010] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 10, MSTPSR10, 0), /* SSI5 */
- [MSTP1009] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 9, MSTPSR10, 0), /* SSI6 */
- [MSTP1008] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 8, MSTPSR10, 0), /* SSI7 */
- [MSTP1007] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 7, MSTPSR10, 0), /* SSI8 */
- [MSTP1006] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 6, MSTPSR10, 0), /* SSI9 */
- [MSTP1005] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR10, 5, MSTPSR10, 0), /* SSI ALL */
- [MSTP931] = SH_CLK_MSTP32_STS(&hp_clk, SMSTPCR9, 31, MSTPSR9, 0), /* I2C0 */
- [MSTP930] = SH_CLK_MSTP32_STS(&hp_clk, SMSTPCR9, 30, MSTPSR9, 0), /* I2C1 */
- [MSTP929] = SH_CLK_MSTP32_STS(&hp_clk, SMSTPCR9, 29, MSTPSR9, 0), /* I2C2 */
- [MSTP928] = SH_CLK_MSTP32_STS(&hp_clk, SMSTPCR9, 28, MSTPSR9, 0), /* I2C3 */
- [MSTP917] = SH_CLK_MSTP32_STS(&qspi_clk, SMSTPCR9, 17, MSTPSR9, 0), /* QSPI */
- [MSTP815] = SH_CLK_MSTP32_STS(&zs_clk, SMSTPCR8, 15, MSTPSR8, 0), /* SATA0 */
- [MSTP814] = SH_CLK_MSTP32_STS(&zs_clk, SMSTPCR8, 14, MSTPSR8, 0), /* SATA1 */
- [MSTP813] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR8, 13, MSTPSR8, 0), /* Ether */
- [MSTP811] = SH_CLK_MSTP32_STS(&zg_clk, SMSTPCR8, 11, MSTPSR8, 0), /* VIN0 */
- [MSTP810] = SH_CLK_MSTP32_STS(&zg_clk, SMSTPCR8, 10, MSTPSR8, 0), /* VIN1 */
- [MSTP809] = SH_CLK_MSTP32_STS(&zg_clk, SMSTPCR8, 9, MSTPSR8, 0), /* VIN2 */
- [MSTP808] = SH_CLK_MSTP32_STS(&zg_clk, SMSTPCR8, 8, MSTPSR8, 0), /* VIN3 */
- [MSTP726] = SH_CLK_MSTP32_STS(&zx_clk, SMSTPCR7, 26, MSTPSR7, 0), /* LVDS0 */
- [MSTP725] = SH_CLK_MSTP32_STS(&zx_clk, SMSTPCR7, 25, MSTPSR7, 0), /* LVDS1 */
- [MSTP724] = SH_CLK_MSTP32_STS(&zx_clk, SMSTPCR7, 24, MSTPSR7, 0), /* DU0 */
- [MSTP723] = SH_CLK_MSTP32_STS(&zx_clk, SMSTPCR7, 23, MSTPSR7, 0), /* DU1 */
- [MSTP722] = SH_CLK_MSTP32_STS(&zx_clk, SMSTPCR7, 22, MSTPSR7, 0), /* DU2 */
- [MSTP721] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR7, 21, MSTPSR7, 0), /* SCIF0 */
- [MSTP720] = SH_CLK_MSTP32_STS(&p_clk, SMSTPCR7, 20, MSTPSR7, 0), /* SCIF1 */
- [MSTP717] = SH_CLK_MSTP32_STS(&zs_clk, SMSTPCR7, 17, MSTPSR7, 0), /* HSCIF0 */
- [MSTP716] = SH_CLK_MSTP32_STS(&zs_clk, SMSTPCR7, 16, MSTPSR7, 0), /* HSCIF1 */
- [MSTP704] = SH_CLK_MSTP32_STS(&mp_clk, SMSTPCR7, 4, MSTPSR7, 0), /* HSUSB */
- [MSTP703] = SH_CLK_MSTP32_STS(&mp_clk, SMSTPCR7, 3, MSTPSR7, 0), /* EHCI */
- [MSTP522] = SH_CLK_MSTP32_STS(&extal_clk, SMSTPCR5, 22, MSTPSR5, 0), /* Thermal */
- [MSTP502] = SH_CLK_MSTP32_STS(&zs_clk, SMSTPCR5, 2, MSTPSR5, 0), /* Audio-DMAC low */
- [MSTP501] = SH_CLK_MSTP32_STS(&zs_clk, SMSTPCR5, 1, MSTPSR5, 0), /* Audio-DMAC hi */
- [MSTP315] = SH_CLK_MSTP32_STS(&div6_clks[DIV6_MMC0], SMSTPCR3, 15, MSTPSR3, 0), /* MMC0 */
- [MSTP314] = SH_CLK_MSTP32_STS(&div4_clks[DIV4_SD0], SMSTPCR3, 14, MSTPSR3, 0), /* SDHI0 */
- [MSTP313] = SH_CLK_MSTP32_STS(&div4_clks[DIV4_SD1], SMSTPCR3, 13, MSTPSR3, 0), /* SDHI1 */
- [MSTP312] = SH_CLK_MSTP32_STS(&div6_clks[DIV6_SD2], SMSTPCR3, 12, MSTPSR3, 0), /* SDHI2 */
- [MSTP311] = SH_CLK_MSTP32_STS(&div6_clks[DIV6_SD3], SMSTPCR3, 11, MSTPSR3, 0), /* SDHI3 */
- [MSTP305] = SH_CLK_MSTP32_STS(&div6_clks[DIV6_MMC1], SMSTPCR3, 5, MSTPSR3, 0), /* MMC1 */
- [MSTP304] = SH_CLK_MSTP32_STS(&cp_clk, SMSTPCR3, 4, MSTPSR3, 0), /* TPU0 */
- [MSTP216] = SH_CLK_MSTP32_STS(&mp_clk, SMSTPCR2, 16, MSTPSR2, 0), /* SCIFB2 */
- [MSTP207] = SH_CLK_MSTP32_STS(&mp_clk, SMSTPCR2, 7, MSTPSR2, 0), /* SCIFB1 */
- [MSTP206] = SH_CLK_MSTP32_STS(&mp_clk, SMSTPCR2, 6, MSTPSR2, 0), /* SCIFB0 */
- [MSTP204] = SH_CLK_MSTP32_STS(&mp_clk, SMSTPCR2, 4, MSTPSR2, 0), /* SCIFA0 */
- [MSTP203] = SH_CLK_MSTP32_STS(&mp_clk, SMSTPCR2, 3, MSTPSR2, 0), /* SCIFA1 */
- [MSTP202] = SH_CLK_MSTP32_STS(&mp_clk, SMSTPCR2, 2, MSTPSR2, 0), /* SCIFA2 */
- [MSTP124] = SH_CLK_MSTP32_STS(&rclk_clk, SMSTPCR1, 24, MSTPSR1, 0), /* CMT0 */
-};
-
-static struct clk_lookup lookups[] = {
-
- /* main clocks */
- CLKDEV_CON_ID("extal", &extal_clk),
- CLKDEV_CON_ID("extal_div2", &extal_div2_clk),
- CLKDEV_CON_ID("main", &main_clk),
- CLKDEV_CON_ID("pll1", &pll1_clk),
- CLKDEV_CON_ID("pll1_div2", &pll1_div2_clk),
- CLKDEV_CON_ID("pll3", &pll3_clk),
- CLKDEV_CON_ID("zg", &zg_clk),
- CLKDEV_CON_ID("zx", &zx_clk),
- CLKDEV_CON_ID("zs", &zs_clk),
- CLKDEV_CON_ID("hp", &hp_clk),
- CLKDEV_CON_ID("i", &i_clk),
- CLKDEV_CON_ID("b", &b_clk),
- CLKDEV_CON_ID("lb", &lb_clk),
- CLKDEV_CON_ID("p", &p_clk),
- CLKDEV_CON_ID("cl", &cl_clk),
- CLKDEV_CON_ID("m2", &m2_clk),
- CLKDEV_CON_ID("imp", &imp_clk),
- CLKDEV_CON_ID("rclk", &rclk_clk),
- CLKDEV_CON_ID("oscclk", &oscclk_clk),
- CLKDEV_CON_ID("zb3", &zb3_clk),
- CLKDEV_CON_ID("zb3d2", &zb3d2_clk),
- CLKDEV_CON_ID("ddr", &ddr_clk),
- CLKDEV_CON_ID("mp", &mp_clk),
- CLKDEV_CON_ID("qspi", &qspi_clk),
- CLKDEV_CON_ID("cp", &cp_clk),
-
- /* DIV4 */
- CLKDEV_CON_ID("sdh", &div4_clks[DIV4_SDH]),
-
- /* DIV6 */
- CLKDEV_CON_ID("ssp", &div6_clks[DIV6_SSP]),
- CLKDEV_CON_ID("ssprs", &div6_clks[DIV6_SSPRS]),
-
- /* MSTP */
- CLKDEV_DEV_ID("rcar_sound", &mstp_clks[MSTP1005]),
- CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]),
- CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]),
- CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP206]),
- CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP207]),
- CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP216]),
- CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP202]),
- CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP721]),
- CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP720]),
- CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP717]),
- CLKDEV_DEV_ID("sh-sci.9", &mstp_clks[MSTP716]),
- CLKDEV_DEV_ID("i2c-rcar_gen2.0", &mstp_clks[MSTP931]),
- CLKDEV_DEV_ID("i2c-rcar_gen2.1", &mstp_clks[MSTP930]),
- CLKDEV_DEV_ID("i2c-rcar_gen2.2", &mstp_clks[MSTP929]),
- CLKDEV_DEV_ID("i2c-rcar_gen2.3", &mstp_clks[MSTP928]),
- CLKDEV_DEV_ID("r8a7790-ether", &mstp_clks[MSTP813]),
- CLKDEV_DEV_ID("r8a7790-vin.0", &mstp_clks[MSTP811]),
- CLKDEV_DEV_ID("r8a7790-vin.1", &mstp_clks[MSTP810]),
- CLKDEV_DEV_ID("r8a7790-vin.2", &mstp_clks[MSTP809]),
- CLKDEV_DEV_ID("r8a7790-vin.3", &mstp_clks[MSTP808]),
- CLKDEV_DEV_ID("rcar_thermal", &mstp_clks[MSTP522]),
- CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP502]),
- CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP501]),
- CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP315]),
- CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]),
- CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]),
- CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP312]),
- CLKDEV_DEV_ID("sh_mobile_sdhi.3", &mstp_clks[MSTP311]),
- CLKDEV_DEV_ID("sh_mmcif.1", &mstp_clks[MSTP305]),
- CLKDEV_DEV_ID("qspi.0", &mstp_clks[MSTP917]),
- CLKDEV_DEV_ID("renesas_usbhs", &mstp_clks[MSTP704]),
- CLKDEV_DEV_ID("pci-rcar-gen2.0", &mstp_clks[MSTP703]),
- CLKDEV_DEV_ID("pci-rcar-gen2.1", &mstp_clks[MSTP703]),
- CLKDEV_DEV_ID("pci-rcar-gen2.2", &mstp_clks[MSTP703]),
- CLKDEV_DEV_ID("sata-r8a7790.0", &mstp_clks[MSTP815]),
- CLKDEV_DEV_ID("sata-r8a7790.1", &mstp_clks[MSTP814]),
-
- /* ICK */
- CLKDEV_ICK_ID("fck", "sh-cmt-48-gen2.0", &mstp_clks[MSTP124]),
- CLKDEV_ICK_ID("usbhs", "usb_phy_rcar_gen2", &mstp_clks[MSTP704]),
- CLKDEV_ICK_ID("lvds.0", "rcar-du-r8a7790", &mstp_clks[MSTP726]),
- CLKDEV_ICK_ID("lvds.1", "rcar-du-r8a7790", &mstp_clks[MSTP725]),
- CLKDEV_ICK_ID("du.0", "rcar-du-r8a7790", &mstp_clks[MSTP724]),
- CLKDEV_ICK_ID("du.1", "rcar-du-r8a7790", &mstp_clks[MSTP723]),
- CLKDEV_ICK_ID("du.2", "rcar-du-r8a7790", &mstp_clks[MSTP722]),
- CLKDEV_ICK_ID("clk_a", "rcar_sound", &audio_clk_a),
- CLKDEV_ICK_ID("clk_b", "rcar_sound", &audio_clk_b),
- CLKDEV_ICK_ID("clk_c", "rcar_sound", &audio_clk_c),
- CLKDEV_ICK_ID("clk_i", "rcar_sound", &m2_clk),
- CLKDEV_ICK_ID("src.0", "rcar_sound", &mstp_clks[MSTP1031]),
- CLKDEV_ICK_ID("src.1", "rcar_sound", &mstp_clks[MSTP1030]),
- CLKDEV_ICK_ID("src.2", "rcar_sound", &mstp_clks[MSTP1029]),
- CLKDEV_ICK_ID("src.3", "rcar_sound", &mstp_clks[MSTP1028]),
- CLKDEV_ICK_ID("src.4", "rcar_sound", &mstp_clks[MSTP1027]),
- CLKDEV_ICK_ID("src.5", "rcar_sound", &mstp_clks[MSTP1026]),
- CLKDEV_ICK_ID("src.6", "rcar_sound", &mstp_clks[MSTP1025]),
- CLKDEV_ICK_ID("src.7", "rcar_sound", &mstp_clks[MSTP1024]),
- CLKDEV_ICK_ID("src.8", "rcar_sound", &mstp_clks[MSTP1023]),
- CLKDEV_ICK_ID("src.9", "rcar_sound", &mstp_clks[MSTP1022]),
- CLKDEV_ICK_ID("ssi.0", "rcar_sound", &mstp_clks[MSTP1015]),
- CLKDEV_ICK_ID("ssi.1", "rcar_sound", &mstp_clks[MSTP1014]),
- CLKDEV_ICK_ID("ssi.2", "rcar_sound", &mstp_clks[MSTP1013]),
- CLKDEV_ICK_ID("ssi.3", "rcar_sound", &mstp_clks[MSTP1012]),
- CLKDEV_ICK_ID("ssi.4", "rcar_sound", &mstp_clks[MSTP1011]),
- CLKDEV_ICK_ID("ssi.5", "rcar_sound", &mstp_clks[MSTP1010]),
- CLKDEV_ICK_ID("ssi.6", "rcar_sound", &mstp_clks[MSTP1009]),
- CLKDEV_ICK_ID("ssi.7", "rcar_sound", &mstp_clks[MSTP1008]),
- CLKDEV_ICK_ID("ssi.8", "rcar_sound", &mstp_clks[MSTP1007]),
- CLKDEV_ICK_ID("ssi.9", "rcar_sound", &mstp_clks[MSTP1006]),
-
-};
-
-#define R8A7790_CLOCK_ROOT(e, m, p0, p1, p30, p31) \
- extal_clk.rate = e * 1000 * 1000; \
- main_clk.parent = m; \
- SH_CLK_SET_RATIO(&pll1_clk_ratio, p1 / 2, 1); \
- if (mode & MD(19)) \
- SH_CLK_SET_RATIO(&pll3_clk_ratio, p31, 1); \
- else \
- SH_CLK_SET_RATIO(&pll3_clk_ratio, p30, 1)
-
-
-void __init r8a7790_clock_init(void)
-{
- u32 mode = rcar_gen2_read_mode_pins();
- int k, ret = 0;
-
- switch (mode & (MD(14) | MD(13))) {
- case 0:
- R8A7790_CLOCK_ROOT(15, &extal_clk, 172, 208, 106, 88);
- break;
- case MD(13):
- R8A7790_CLOCK_ROOT(20, &extal_clk, 130, 156, 80, 66);
- break;
- case MD(14):
- R8A7790_CLOCK_ROOT(26 / 2, &extal_div2_clk, 200, 240, 122, 102);
- break;
- case MD(13) | MD(14):
- R8A7790_CLOCK_ROOT(30 / 2, &extal_div2_clk, 172, 208, 106, 88);
- break;
- }
-
- if (mode & (MD(18)))
- SH_CLK_SET_RATIO(&lb_clk_ratio, 1, 36);
- else
- SH_CLK_SET_RATIO(&lb_clk_ratio, 1, 24);
-
- if ((mode & (MD(3) | MD(2) | MD(1))) == MD(2))
- SH_CLK_SET_RATIO(&qspi_clk_ratio, 1, 16);
- else
- SH_CLK_SET_RATIO(&qspi_clk_ratio, 1, 20);
-
- for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
- ret = clk_register(main_clks[k]);
-
- if (!ret)
- ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
-
- if (!ret)
- ret = sh_clk_div6_register(div6_clks, DIV6_NR);
-
- if (!ret)
- ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- if (!ret)
- shmobile_clk_init();
- else
- panic("failed to setup r8a7790 clocks\n");
-}
diff --git a/arch/arm/mach-shmobile/r8a7790.h b/arch/arm/mach-shmobile/r8a7790.h
index 388f0514d931..bf73a850aaed 100644
--- a/arch/arm/mach-shmobile/r8a7790.h
+++ b/arch/arm/mach-shmobile/r8a7790.h
@@ -1,34 +1,6 @@
#ifndef __ASM_R8A7790_H__
#define __ASM_R8A7790_H__
-/* DMA slave IDs */
-enum {
- RCAR_DMA_SLAVE_INVALID,
- AUDIO_DMAC_SLAVE_SSI0_TX,
- AUDIO_DMAC_SLAVE_SSI0_RX,
- AUDIO_DMAC_SLAVE_SSI1_TX,
- AUDIO_DMAC_SLAVE_SSI1_RX,
- AUDIO_DMAC_SLAVE_SSI2_TX,
- AUDIO_DMAC_SLAVE_SSI2_RX,
- AUDIO_DMAC_SLAVE_SSI3_TX,
- AUDIO_DMAC_SLAVE_SSI3_RX,
- AUDIO_DMAC_SLAVE_SSI4_TX,
- AUDIO_DMAC_SLAVE_SSI4_RX,
- AUDIO_DMAC_SLAVE_SSI5_TX,
- AUDIO_DMAC_SLAVE_SSI5_RX,
- AUDIO_DMAC_SLAVE_SSI6_TX,
- AUDIO_DMAC_SLAVE_SSI6_RX,
- AUDIO_DMAC_SLAVE_SSI7_TX,
- AUDIO_DMAC_SLAVE_SSI7_RX,
- AUDIO_DMAC_SLAVE_SSI8_TX,
- AUDIO_DMAC_SLAVE_SSI8_RX,
- AUDIO_DMAC_SLAVE_SSI9_TX,
- AUDIO_DMAC_SLAVE_SSI9_RX,
-};
-
-void r8a7790_add_standard_devices(void);
-void r8a7790_clock_init(void);
-void r8a7790_pinmux_init(void);
void r8a7790_pm_init(void);
extern struct smp_operations r8a7790_smp_ops;
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 79ad93dfdae4..dd64caf79216 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -656,7 +656,7 @@ static struct resource pmu_resources[] = {
};
static struct platform_device pmu_device = {
- .name = "arm-pmu",
+ .name = "armv7-pmu",
.id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
@@ -800,7 +800,14 @@ void __init r8a7740_init_irq_of(void)
void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000);
+
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
+#endif
/* route signals to GIC */
iowrite32(0x0, pfc_inta_ctrl);
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 170bd146ba17..cef8895a9b82 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -576,11 +576,18 @@ void __init r8a7778_init_irq_extpin(int irlm)
void __init r8a7778_init_irq_dt(void)
{
void __iomem *base = ioremap_nocache(0xfe700000, 0x00100000);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xfe438000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xfe430000, 0x1000);
+#endif
BUG_ON(!base);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
-
+#endif
/* route all interrupts to ARM */
__raw_writel(0x73ffffff, base + INT2NTSR0);
__raw_writel(0xffffffff, base + INT2NTSR1);
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 6156d172cf31..27dceaf9e688 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -720,10 +720,17 @@ static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
void __init r8a7779_init_irq_dt(void)
{
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000);
+#endif
gic_arch_extn.irq_set_wake = r8a7779_set_wake;
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
-
+#endif
/* route all interrupts to ARM */
__raw_writel(0xffffffff, INT2NTSR0);
__raw_writel(0x3fffffff, INT2NTSR1);
diff --git a/arch/arm/mach-shmobile/setup-r8a7790.c b/arch/arm/mach-shmobile/setup-r8a7790.c
index ec7d97dca4de..3a18af4922b4 100644
--- a/arch/arm/mach-shmobile/setup-r8a7790.c
+++ b/arch/arm/mach-shmobile/setup-r8a7790.c
@@ -14,295 +14,14 @@
* GNU General Public License for more details.
*/
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/of_platform.h>
-#include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/irq-renesas-irqc.h>
-#include <linux/serial_sci.h>
-#include <linux/sh_dma.h>
-#include <linux/sh_timer.h>
+#include <linux/init.h>
#include <asm/mach/arch.h>
#include "common.h"
-#include "dma-register.h"
-#include "irqs.h"
#include "r8a7790.h"
#include "rcar-gen2.h"
-/* Audio-DMAC */
-#define AUDIO_DMAC_SLAVE(_id, _addr, t, r) \
-{ \
- .slave_id = AUDIO_DMAC_SLAVE_## _id ##_TX, \
- .addr = _addr + 0x8, \
- .chcr = CHCR_TX(XMIT_SZ_32BIT), \
- .mid_rid = t, \
-}, { \
- .slave_id = AUDIO_DMAC_SLAVE_## _id ##_RX, \
- .addr = _addr + 0xc, \
- .chcr = CHCR_RX(XMIT_SZ_32BIT), \
- .mid_rid = r, \
-}
-
-static const struct sh_dmae_slave_config r8a7790_audio_dmac_slaves[] = {
- AUDIO_DMAC_SLAVE(SSI0, 0xec241000, 0x01, 0x02),
- AUDIO_DMAC_SLAVE(SSI1, 0xec241040, 0x03, 0x04),
- AUDIO_DMAC_SLAVE(SSI2, 0xec241080, 0x05, 0x06),
- AUDIO_DMAC_SLAVE(SSI3, 0xec2410c0, 0x07, 0x08),
- AUDIO_DMAC_SLAVE(SSI4, 0xec241100, 0x09, 0x0a),
- AUDIO_DMAC_SLAVE(SSI5, 0xec241140, 0x0b, 0x0c),
- AUDIO_DMAC_SLAVE(SSI6, 0xec241180, 0x0d, 0x0e),
- AUDIO_DMAC_SLAVE(SSI7, 0xec2411c0, 0x0f, 0x10),
- AUDIO_DMAC_SLAVE(SSI8, 0xec241200, 0x11, 0x12),
- AUDIO_DMAC_SLAVE(SSI9, 0xec241240, 0x13, 0x14),
-};
-
-#define DMAE_CHANNEL(a, b) \
-{ \
- .offset = (a) - 0x20, \
- .dmars = (a) - 0x20 + 0x40, \
- .chclr_bit = (b), \
- .chclr_offset = 0x80 - 0x20, \
-}
-
-static const struct sh_dmae_channel r8a7790_audio_dmac_channels[] = {
- DMAE_CHANNEL(0x8000, 0),
- DMAE_CHANNEL(0x8080, 1),
- DMAE_CHANNEL(0x8100, 2),
- DMAE_CHANNEL(0x8180, 3),
- DMAE_CHANNEL(0x8200, 4),
- DMAE_CHANNEL(0x8280, 5),
- DMAE_CHANNEL(0x8300, 6),
- DMAE_CHANNEL(0x8380, 7),
- DMAE_CHANNEL(0x8400, 8),
- DMAE_CHANNEL(0x8480, 9),
- DMAE_CHANNEL(0x8500, 10),
- DMAE_CHANNEL(0x8580, 11),
- DMAE_CHANNEL(0x8600, 12),
-};
-
-static struct sh_dmae_pdata r8a7790_audio_dmac_platform_data = {
- .slave = r8a7790_audio_dmac_slaves,
- .slave_num = ARRAY_SIZE(r8a7790_audio_dmac_slaves),
- .channel = r8a7790_audio_dmac_channels,
- .channel_num = ARRAY_SIZE(r8a7790_audio_dmac_channels),
- .ts_low_shift = TS_LOW_SHIFT,
- .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
- .ts_high_shift = TS_HI_SHIFT,
- .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
- .ts_shift = dma_ts_shift,
- .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
- .dmaor_init = DMAOR_DME,
- .chclr_present = 1,
- .chclr_bitwise = 1,
-};
-
-static struct resource r8a7790_audio_dmac_resources[] = {
- /* Channel registers and DMAOR for low */
- DEFINE_RES_MEM(0xec700020, 0x8663 - 0x20),
- DEFINE_RES_IRQ(gic_spi(346)),
- DEFINE_RES_NAMED(gic_spi(320), 13, NULL, IORESOURCE_IRQ),
-
- /* Channel registers and DMAOR for hi */
- DEFINE_RES_MEM(0xec720020, 0x8663 - 0x20), /* hi */
- DEFINE_RES_IRQ(gic_spi(347)),
- DEFINE_RES_NAMED(gic_spi(333), 13, NULL, IORESOURCE_IRQ),
-};
-
-#define r8a7790_register_audio_dmac(id) \
- platform_device_register_resndata( \
- NULL, "sh-dma-engine", id, \
- &r8a7790_audio_dmac_resources[id * 3], 3, \
- &r8a7790_audio_dmac_platform_data, \
- sizeof(r8a7790_audio_dmac_platform_data))
-
-static const struct resource pfc_resources[] __initconst = {
- DEFINE_RES_MEM(0xe6060000, 0x250),
-};
-
-#define r8a7790_register_pfc() \
- platform_device_register_simple("pfc-r8a7790", -1, pfc_resources, \
- ARRAY_SIZE(pfc_resources))
-
-#define R8A7790_GPIO(idx) \
-static const struct resource r8a7790_gpio##idx##_resources[] __initconst = { \
- DEFINE_RES_MEM(0xe6050000 + 0x1000 * (idx), 0x50), \
- DEFINE_RES_IRQ(gic_spi(4 + (idx))), \
-}; \
- \
-static const struct gpio_rcar_config \
-r8a7790_gpio##idx##_platform_data __initconst = { \
- .gpio_base = 32 * (idx), \
- .irq_base = 0, \
- .number_of_pins = 32, \
- .pctl_name = "pfc-r8a7790", \
- .has_both_edge_trigger = 1, \
-}; \
-
-R8A7790_GPIO(0);
-R8A7790_GPIO(1);
-R8A7790_GPIO(2);
-R8A7790_GPIO(3);
-R8A7790_GPIO(4);
-R8A7790_GPIO(5);
-
-#define r8a7790_register_gpio(idx) \
- platform_device_register_resndata(NULL, "gpio_rcar", idx, \
- r8a7790_gpio##idx##_resources, \
- ARRAY_SIZE(r8a7790_gpio##idx##_resources), \
- &r8a7790_gpio##idx##_platform_data, \
- sizeof(r8a7790_gpio##idx##_platform_data))
-
-static struct resource i2c_resources[] __initdata = {
- /* I2C0 */
- DEFINE_RES_MEM(0xE6508000, 0x40),
- DEFINE_RES_IRQ(gic_spi(287)),
- /* I2C1 */
- DEFINE_RES_MEM(0xE6518000, 0x40),
- DEFINE_RES_IRQ(gic_spi(288)),
- /* I2C2 */
- DEFINE_RES_MEM(0xE6530000, 0x40),
- DEFINE_RES_IRQ(gic_spi(286)),
- /* I2C3 */
- DEFINE_RES_MEM(0xE6540000, 0x40),
- DEFINE_RES_IRQ(gic_spi(290)),
-
-};
-
-#define r8a7790_register_i2c(idx) \
- platform_device_register_simple( \
- "i2c-rcar_gen2", idx, \
- i2c_resources + (2 * idx), 2); \
-
-void __init r8a7790_pinmux_init(void)
-{
- r8a7790_register_pfc();
- r8a7790_register_gpio(0);
- r8a7790_register_gpio(1);
- r8a7790_register_gpio(2);
- r8a7790_register_gpio(3);
- r8a7790_register_gpio(4);
- r8a7790_register_gpio(5);
-}
-
-#define __R8A7790_SCIF(scif_type, _scscr, index, baseaddr, irq) \
-static struct plat_sci_port scif##index##_platform_data = { \
- .type = scif_type, \
- .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, \
- .scscr = _scscr, \
-}; \
- \
-static struct resource scif##index##_resources[] = { \
- DEFINE_RES_MEM(baseaddr, 0x100), \
- DEFINE_RES_IRQ(irq), \
-}
-
-#define R8A7790_SCIF(index, baseaddr, irq) \
- __R8A7790_SCIF(PORT_SCIF, SCSCR_RE | SCSCR_TE, \
- index, baseaddr, irq)
-
-#define R8A7790_SCIFA(index, baseaddr, irq) \
- __R8A7790_SCIF(PORT_SCIFA, SCSCR_RE | SCSCR_TE | SCSCR_CKE0, \
- index, baseaddr, irq)
-
-#define R8A7790_SCIFB(index, baseaddr, irq) \
- __R8A7790_SCIF(PORT_SCIFB, SCSCR_RE | SCSCR_TE, \
- index, baseaddr, irq)
-
-#define R8A7790_HSCIF(index, baseaddr, irq) \
- __R8A7790_SCIF(PORT_HSCIF, SCSCR_RE | SCSCR_TE, \
- index, baseaddr, irq)
-
-R8A7790_SCIFA(0, 0xe6c40000, gic_spi(144)); /* SCIFA0 */
-R8A7790_SCIFA(1, 0xe6c50000, gic_spi(145)); /* SCIFA1 */
-R8A7790_SCIFB(2, 0xe6c20000, gic_spi(148)); /* SCIFB0 */
-R8A7790_SCIFB(3, 0xe6c30000, gic_spi(149)); /* SCIFB1 */
-R8A7790_SCIFB(4, 0xe6ce0000, gic_spi(150)); /* SCIFB2 */
-R8A7790_SCIFA(5, 0xe6c60000, gic_spi(151)); /* SCIFA2 */
-R8A7790_SCIF(6, 0xe6e60000, gic_spi(152)); /* SCIF0 */
-R8A7790_SCIF(7, 0xe6e68000, gic_spi(153)); /* SCIF1 */
-R8A7790_HSCIF(8, 0xe62c0000, gic_spi(154)); /* HSCIF0 */
-R8A7790_HSCIF(9, 0xe62c8000, gic_spi(155)); /* HSCIF1 */
-
-#define r8a7790_register_scif(index) \
- platform_device_register_resndata(NULL, "sh-sci", index, \
- scif##index##_resources, \
- ARRAY_SIZE(scif##index##_resources), \
- &scif##index##_platform_data, \
- sizeof(scif##index##_platform_data))
-
-static const struct renesas_irqc_config irqc0_data __initconst = {
- .irq_base = irq_pin(0), /* IRQ0 -> IRQ3 */
-};
-
-static const struct resource irqc0_resources[] __initconst = {
- DEFINE_RES_MEM(0xe61c0000, 0x200), /* IRQC Event Detector Block_0 */
- DEFINE_RES_IRQ(gic_spi(0)), /* IRQ0 */
- DEFINE_RES_IRQ(gic_spi(1)), /* IRQ1 */
- DEFINE_RES_IRQ(gic_spi(2)), /* IRQ2 */
- DEFINE_RES_IRQ(gic_spi(3)), /* IRQ3 */
-};
-
-#define r8a7790_register_irqc(idx) \
- platform_device_register_resndata(NULL, "renesas_irqc", \
- idx, irqc##idx##_resources, \
- ARRAY_SIZE(irqc##idx##_resources), \
- &irqc##idx##_data, \
- sizeof(struct renesas_irqc_config))
-
-static const struct resource thermal_resources[] __initconst = {
- DEFINE_RES_MEM(0xe61f0000, 0x14),
- DEFINE_RES_MEM(0xe61f0100, 0x38),
- DEFINE_RES_IRQ(gic_spi(69)),
-};
-
-#define r8a7790_register_thermal() \
- platform_device_register_simple("rcar_thermal", -1, \
- thermal_resources, \
- ARRAY_SIZE(thermal_resources))
-
-static struct sh_timer_config cmt0_platform_data = {
- .channels_mask = 0x60,
-};
-
-static struct resource cmt0_resources[] = {
- DEFINE_RES_MEM(0xffca0000, 0x1004),
- DEFINE_RES_IRQ(gic_spi(142)),
-};
-
-#define r8a7790_register_cmt(idx) \
- platform_device_register_resndata(NULL, "sh-cmt-48-gen2", \
- idx, cmt##idx##_resources, \
- ARRAY_SIZE(cmt##idx##_resources), \
- &cmt##idx##_platform_data, \
- sizeof(struct sh_timer_config))
-
-void __init r8a7790_add_standard_devices(void)
-{
- r8a7790_register_scif(0);
- r8a7790_register_scif(1);
- r8a7790_register_scif(2);
- r8a7790_register_scif(3);
- r8a7790_register_scif(4);
- r8a7790_register_scif(5);
- r8a7790_register_scif(6);
- r8a7790_register_scif(7);
- r8a7790_register_scif(8);
- r8a7790_register_scif(9);
- r8a7790_register_cmt(0);
- r8a7790_register_irqc(0);
- r8a7790_register_thermal();
- r8a7790_register_i2c(0);
- r8a7790_register_i2c(1);
- r8a7790_register_i2c(2);
- r8a7790_register_i2c(3);
- r8a7790_register_audio_dmac(0);
- r8a7790_register_audio_dmac(1);
-}
-
-#ifdef CONFIG_USE_OF
-
static const char * const r8a7790_boards_compat_dt[] __initconst = {
"renesas,r8a7790",
NULL,
@@ -316,4 +35,3 @@ DT_MACHINE_START(R8A7790_DT, "Generic R8A7790 (Flattened Device Tree)")
.reserve = rcar_gen2_reserve,
.dt_compat = r8a7790_boards_compat_dt,
MACHINE_END
-#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index c35b91d92190..d1fa625e61f5 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -131,7 +131,9 @@ void __init rcar_gen2_timer_init(void)
#ifdef CONFIG_COMMON_CLK
rcar_gen2_clocks_init(mode);
#endif
+#ifdef CONFIG_ARCH_SHMOBILE_MULTI
clocksource_of_init();
+#endif
}
struct memory_reserve_config {
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 93ebe3430bfe..a8593d1241be 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -563,7 +563,7 @@ static struct resource pmu_resources[] = {
};
static struct platform_device pmu_device = {
- .name = "arm-pmu",
+ .name = "armv7-pmu",
.id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
@@ -595,6 +595,7 @@ static struct platform_device ipmmu_device = {
static struct renesas_intc_irqpin_config irqpin0_platform_data = {
.irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
+ .control_parent = true,
};
static struct resource irqpin0_resources[] = {
@@ -656,6 +657,7 @@ static struct platform_device irqpin1_device = {
static struct renesas_intc_irqpin_config irqpin2_platform_data = {
.irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
+ .control_parent = true,
};
static struct resource irqpin2_resources[] = {
@@ -686,6 +688,7 @@ static struct platform_device irqpin2_device = {
static struct renesas_intc_irqpin_config irqpin3_platform_data = {
.irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
+ .control_parent = true,
};
static struct resource irqpin3_resources[] = {
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index f1d027aa7a81..0edf2a6d2bbe 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
if (!max_freq)
return;
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ /* Non-multiplatform r8a73a4 SoC cannot use arch timer due
+ * to GIC being initialized from C and arch timer via DT */
+ if (of_machine_is_compatible("renesas,r8a73a4"))
+ has_arch_timer = false;
+
+ /* Non-multiplatform r8a7790 SoC cannot use arch timer due
+ * to GIC being initialized from C and arch timer via DT */
+ if (of_machine_is_compatible("renesas,r8a7790"))
+ has_arch_timer = false;
+#endif
+
if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
if (is_a7_a8_a9)
shmobile_setup_delay_hz(max_freq, 1, 3);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 08fb8c89f414..6ea09fe53426 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -728,43 +728,6 @@ struct of_dev_auxdata versatile_auxdata_lookup[] __initdata = {
};
#endif
-#ifdef CONFIG_LEDS
-#define VA_LEDS_BASE (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LED_OFFSET)
-
-static void versatile_leds_event(led_event_t ledevt)
-{
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- val = readl(VA_LEDS_BASE);
-
- switch (ledevt) {
- case led_idle_start:
- val = val & ~VERSATILE_SYS_LED0;
- break;
-
- case led_idle_end:
- val = val | VERSATILE_SYS_LED0;
- break;
-
- case led_timer:
- val = val ^ VERSATILE_SYS_LED1;
- break;
-
- case led_halted:
- val = 0;
- break;
-
- default:
- break;
- }
-
- writel(val, VA_LEDS_BASE);
- local_irq_restore(flags);
-}
-#endif /* CONFIG_LEDS */
-
void versatile_restart(enum reboot_mode mode, const char *cmd)
{
void __iomem *sys = __io_address(VERSATILE_SYS_BASE);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 03823e784f63..c43c71455566 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
config ARM_KERNMEM_PERMS
bool "Restrict kernel memory permissions"
+ depends on MMU
help
If this is set, kernel memory other than kernel text (and rodata)
will be made non-executable. The tradeoff is that each region is
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 5e65ca8dea62..c6c7696b8db9 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
+ * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
*
* Copyright (C) 2007 ARM Limited
*
@@ -41,12 +41,14 @@ struct l2c_init_data {
void (*enable)(void __iomem *, u32, unsigned);
void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
void (*save)(void __iomem *);
+ void (*configure)(void __iomem *);
struct outer_cache_fns outer_cache;
};
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
+static const struct l2c_init_data *l2x0_data;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
@@ -106,6 +108,19 @@ static inline void l2c_unlock(void __iomem *base, unsigned num)
}
}
+static void l2c_configure(void __iomem *base)
+{
+ if (outer_cache.configure) {
+ outer_cache.configure(&l2x0_saved_regs);
+ return;
+ }
+
+ if (l2x0_data->configure)
+ l2x0_data->configure(base);
+
+ l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
+}
+
/*
* Enable the L2 cache controller. This function must only be
* called when the cache controller is known to be disabled.
@@ -114,7 +129,12 @@ static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
{
unsigned long flags;
- l2c_write_sec(aux, base, L2X0_AUX_CTRL);
+ /* Do not touch the controller if already enabled. */
+ if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)
+ return;
+
+ l2x0_saved_regs.aux_ctrl = aux;
+ l2c_configure(base);
l2c_unlock(base, num_lock);
@@ -136,76 +156,14 @@ static void l2c_disable(void)
dsb(st);
}
-#ifdef CONFIG_CACHE_PL310
-static inline void cache_wait(void __iomem *reg, unsigned long mask)
-{
- /* cache operations by line are atomic on PL310 */
-}
-#else
-#define cache_wait l2c_wait_mask
-#endif
-
-static inline void cache_sync(void)
-{
- void __iomem *base = l2x0_base;
-
- writel_relaxed(0, base + sync_reg_offset);
- cache_wait(base + L2X0_CACHE_SYNC, 1);
-}
-
-#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
-static inline void debug_writel(unsigned long val)
-{
- l2c_set_debug(l2x0_base, val);
-}
-#else
-/* Optimised out for non-errata case */
-static inline void debug_writel(unsigned long val)
-{
-}
-#endif
-
-static void l2x0_cache_sync(void)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- cache_sync();
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-}
-
-static void __l2x0_flush_all(void)
-{
- debug_writel(0x03);
- __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
- cache_sync();
- debug_writel(0x00);
-}
-
-static void l2x0_flush_all(void)
-{
- unsigned long flags;
-
- /* clean all ways */
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- __l2x0_flush_all();
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-}
-
-static void l2x0_disable(void)
+static void l2c_save(void __iomem *base)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- __l2x0_flush_all();
- l2c_write_sec(0, l2x0_base, L2X0_CTRL);
- dsb(st);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
}
-static void l2c_save(void __iomem *base)
+static void l2c_resume(void)
{
- l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+ l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock);
}
/*
@@ -288,14 +246,6 @@ static void l2c210_sync(void)
__l2c210_cache_sync(l2x0_base);
}
-static void l2c210_resume(void)
-{
- void __iomem *base = l2x0_base;
-
- if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
- l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
-}
-
static const struct l2c_init_data l2c210_data __initconst = {
.type = "L2C-210",
.way_size_0 = SZ_8K,
@@ -309,7 +259,7 @@ static const struct l2c_init_data l2c210_data __initconst = {
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
- .resume = l2c210_resume,
+ .resume = l2c_resume,
},
};
@@ -466,7 +416,7 @@ static const struct l2c_init_data l2c220_data = {
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
- .resume = l2c210_resume,
+ .resume = l2c_resume,
},
};
@@ -615,39 +565,29 @@ static void __init l2c310_save(void __iomem *base)
L310_POWER_CTRL);
}
-static void l2c310_resume(void)
+static void l2c310_configure(void __iomem *base)
{
- void __iomem *base = l2x0_base;
+ unsigned revision;
- if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
- unsigned revision;
-
- /* restore pl310 setup */
- writel_relaxed(l2x0_saved_regs.tag_latency,
- base + L310_TAG_LATENCY_CTRL);
- writel_relaxed(l2x0_saved_regs.data_latency,
- base + L310_DATA_LATENCY_CTRL);
- writel_relaxed(l2x0_saved_regs.filter_end,
- base + L310_ADDR_FILTER_END);
- writel_relaxed(l2x0_saved_regs.filter_start,
- base + L310_ADDR_FILTER_START);
-
- revision = readl_relaxed(base + L2X0_CACHE_ID) &
- L2X0_CACHE_ID_RTL_MASK;
-
- if (revision >= L310_CACHE_ID_RTL_R2P0)
- l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
- L310_PREFETCH_CTRL);
- if (revision >= L310_CACHE_ID_RTL_R3P0)
- l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
- L310_POWER_CTRL);
-
- l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
-
- /* Re-enable full-line-of-zeros for Cortex-A9 */
- if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
- set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
- }
+ /* restore pl310 setup */
+ l2c_write_sec(l2x0_saved_regs.tag_latency, base,
+ L310_TAG_LATENCY_CTRL);
+ l2c_write_sec(l2x0_saved_regs.data_latency, base,
+ L310_DATA_LATENCY_CTRL);
+ l2c_write_sec(l2x0_saved_regs.filter_end, base,
+ L310_ADDR_FILTER_END);
+ l2c_write_sec(l2x0_saved_regs.filter_start, base,
+ L310_ADDR_FILTER_START);
+
+ revision = readl_relaxed(base + L2X0_CACHE_ID) &
+ L2X0_CACHE_ID_RTL_MASK;
+
+ if (revision >= L310_CACHE_ID_RTL_R2P0)
+ l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
+ L310_PREFETCH_CTRL);
+ if (revision >= L310_CACHE_ID_RTL_R3P0)
+ l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
+ L310_POWER_CTRL);
}
static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
@@ -699,6 +639,23 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
}
+ /* r3p0 or later has power control register */
+ if (rev >= L310_CACHE_ID_RTL_R3P0)
+ l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
+ L310_STNDBY_MODE_EN;
+
+ /*
+ * Always enable non-secure access to the lockdown registers -
+ * we write to them as part of the L2C enable sequence so they
+ * need to be accessible.
+ */
+ aux |= L310_AUX_CTRL_NS_LOCKDOWN;
+
+ l2c_enable(base, aux, num_lock);
+
+ /* Read back resulting AUX_CTRL value as it could have been altered. */
+ aux = readl_relaxed(base + L2X0_AUX_CTRL);
+
if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
@@ -712,23 +669,12 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
if (rev >= L310_CACHE_ID_RTL_R3P0) {
u32 power_ctrl;
- l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
- base, L310_POWER_CTRL);
power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
}
- /*
- * Always enable non-secure access to the lockdown registers -
- * we write to them as part of the L2C enable sequence so they
- * need to be accessible.
- */
- aux |= L310_AUX_CTRL_NS_LOCKDOWN;
-
- l2c_enable(base, aux, num_lock);
-
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
cpu_notifier(l2c310_cpu_enable_flz, 0);
@@ -760,11 +706,11 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
if (revision >= L310_CACHE_ID_RTL_R3P0 &&
revision < L310_CACHE_ID_RTL_R3P2) {
- u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
+ u32 val = l2x0_saved_regs.prefetch_ctrl;
/* I don't think bit23 is required here... but iMX6 does so */
if (val & (BIT(30) | BIT(23))) {
val &= ~(BIT(30) | BIT(23));
- l2c_write_sec(val, base, L310_PREFETCH_CTRL);
+ l2x0_saved_regs.prefetch_ctrl = val;
errata[n++] = "752271";
}
}
@@ -800,6 +746,15 @@ static void l2c310_disable(void)
l2c_disable();
}
+static void l2c310_resume(void)
+{
+ l2c_resume();
+
+ /* Re-enable full-line-of-zeros for Cortex-A9 */
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
+ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
+}
+
static const struct l2c_init_data l2c310_init_fns __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
@@ -807,6 +762,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
+ .configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -818,14 +774,22 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
},
};
-static void __init __l2c_init(const struct l2c_init_data *data,
- u32 aux_val, u32 aux_mask, u32 cache_id)
+static int __init __l2c_init(const struct l2c_init_data *data,
+ u32 aux_val, u32 aux_mask, u32 cache_id)
{
struct outer_cache_fns fns;
unsigned way_size_bits, ways;
u32 aux, old_aux;
/*
+ * Save the pointer globally so that callbacks which do not receive
+ * context from callers can access the structure.
+ */
+ l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
+ if (!l2x0_data)
+ return -ENOMEM;
+
+ /*
* Sanity check the aux values. aux_mask is the bits we preserve
* from reading the hardware register, and aux_val is the bits we
* set.
@@ -884,6 +848,7 @@ static void __init __l2c_init(const struct l2c_init_data *data,
fns = data->outer_cache;
fns.write_sec = outer_cache.write_sec;
+ fns.configure = outer_cache.configure;
if (data->fixup)
data->fixup(l2x0_base, cache_id, &fns);
@@ -910,6 +875,8 @@ static void __init __l2c_init(const struct l2c_init_data *data,
data->type, ways, l2x0_size >> 10);
pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
data->type, cache_id, aux);
+
+ return 0;
}
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
@@ -936,6 +903,10 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
break;
}
+ /* Read back current (default) hardware configuration */
+ if (data->save)
+ data->save(l2x0_base);
+
__l2c_init(data, aux_val, aux_mask, cache_id);
}
@@ -979,7 +950,7 @@ static int __init l2x0_cache_size_of_parse(const struct device_node *np,
/* All these l2 caches have the same line = block size actually */
if (!line_size) {
if (block_size) {
- /* If linesize if not given, it is equal to blocksize */
+ /* If linesize is not given, it is equal to blocksize */
line_size = block_size;
} else {
/* Fall back to known size */
@@ -1102,7 +1073,7 @@ static const struct l2c_init_data of_l2c210_data __initconst = {
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
- .resume = l2c210_resume,
+ .resume = l2c_resume,
},
};
@@ -1120,7 +1091,7 @@ static const struct l2c_init_data of_l2c220_data __initconst = {
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
- .resume = l2c210_resume,
+ .resume = l2c_resume,
},
};
@@ -1131,32 +1102,32 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
u32 assoc;
+ u32 prefetch;
+ u32 val;
int ret;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
- writel_relaxed(
+ l2x0_saved_regs.tag_latency =
L310_LATENCY_CTRL_RD(tag[0] - 1) |
L310_LATENCY_CTRL_WR(tag[1] - 1) |
- L310_LATENCY_CTRL_SETUP(tag[2] - 1),
- l2x0_base + L310_TAG_LATENCY_CTRL);
+ L310_LATENCY_CTRL_SETUP(tag[2] - 1);
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1] && data[2])
- writel_relaxed(
+ l2x0_saved_regs.data_latency =
L310_LATENCY_CTRL_RD(data[0] - 1) |
L310_LATENCY_CTRL_WR(data[1] - 1) |
- L310_LATENCY_CTRL_SETUP(data[2] - 1),
- l2x0_base + L310_DATA_LATENCY_CTRL);
+ L310_LATENCY_CTRL_SETUP(data[2] - 1);
of_property_read_u32_array(np, "arm,filter-ranges",
filter, ARRAY_SIZE(filter));
if (filter[1]) {
- writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
- l2x0_base + L310_ADDR_FILTER_END);
- writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
- l2x0_base + L310_ADDR_FILTER_START);
+ l2x0_saved_regs.filter_end =
+ ALIGN(filter[0] + filter[1], SZ_1M);
+ l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
+ | L310_ADDR_FILTER_EN;
}
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
@@ -1178,6 +1149,58 @@ static void __init l2c310_of_parse(const struct device_node *np,
assoc);
break;
}
+
+ prefetch = l2x0_saved_regs.prefetch_ctrl;
+
+ ret = of_property_read_u32(np, "arm,double-linefill", &val);
+ if (ret == 0) {
+ if (val)
+ prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
+ if (ret == 0) {
+ if (val)
+ prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
+ if (ret == 0) {
+ if (!val)
+ prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
+ if (ret == 0) {
+ if (val)
+ prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
+ if (ret == 0) {
+ prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
+ prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
+ }
+
+ l2x0_saved_regs.prefetch_ctrl = prefetch;
}
static const struct l2c_init_data of_l2c310_data __initconst = {
@@ -1188,6 +1211,7 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
+ .configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1216,6 +1240,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
+ .configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1231,7 +1256,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
* noninclusive, while the hardware cache range operations use
* inclusive start and end addresses.
*/
-static unsigned long calc_range_end(unsigned long start, unsigned long end)
+static unsigned long aurora_range_end(unsigned long start, unsigned long end)
{
/*
* Limit the number of cache lines processed at once,
@@ -1250,25 +1275,13 @@ static unsigned long calc_range_end(unsigned long start, unsigned long end)
return end;
}
-/*
- * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
- * and range operations only do a TLB lookup on the start address.
- */
static void aurora_pa_range(unsigned long start, unsigned long end,
- unsigned long offset)
+ unsigned long offset)
{
+ void __iomem *base = l2x0_base;
+ unsigned long range_end;
unsigned long flags;
- raw_spin_lock_irqsave(&l2x0_lock, flags);
- writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
- writel_relaxed(end, l2x0_base + offset);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-
- cache_sync();
-}
-
-static void aurora_inv_range(unsigned long start, unsigned long end)
-{
/*
* round start and end adresses up to cache line size
*/
@@ -1276,15 +1289,24 @@ static void aurora_inv_range(unsigned long start, unsigned long end)
end = ALIGN(end, CACHE_LINE_SIZE);
/*
- * Invalidate all full cache lines between 'start' and 'end'.
+ * perform operation on all full cache lines between 'start' and 'end'
*/
while (start < end) {
- unsigned long range_end = calc_range_end(start, end);
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_INVAL_RANGE_REG);
+ range_end = aurora_range_end(start, end);
+
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
+ writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+
+ writel_relaxed(0, base + AURORA_SYNC_REG);
start = range_end;
}
}
+static void aurora_inv_range(unsigned long start, unsigned long end)
+{
+ aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
+}
static void aurora_clean_range(unsigned long start, unsigned long end)
{
@@ -1292,52 +1314,53 @@ static void aurora_clean_range(unsigned long start, unsigned long end)
* If L2 is forced to WT, the L2 will always be clean and we
* don't need to do anything here.
*/
- if (!l2_wt_override) {
- start &= ~(CACHE_LINE_SIZE - 1);
- end = ALIGN(end, CACHE_LINE_SIZE);
- while (start != end) {
- unsigned long range_end = calc_range_end(start, end);
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_CLEAN_RANGE_REG);
- start = range_end;
- }
- }
+ if (!l2_wt_override)
+ aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
}
static void aurora_flush_range(unsigned long start, unsigned long end)
{
- start &= ~(CACHE_LINE_SIZE - 1);
- end = ALIGN(end, CACHE_LINE_SIZE);
- while (start != end) {
- unsigned long range_end = calc_range_end(start, end);
- /*
- * If L2 is forced to WT, the L2 will always be clean and we
- * just need to invalidate.
- */
- if (l2_wt_override)
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_INVAL_RANGE_REG);
- else
- aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
- AURORA_FLUSH_RANGE_REG);
- start = range_end;
- }
+ if (l2_wt_override)
+ aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
+ else
+ aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
}
-static void aurora_save(void __iomem *base)
+static void aurora_flush_all(void)
{
- l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
- l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+
+ /* clean all ways */
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+
+ writel_relaxed(0, base + AURORA_SYNC_REG);
}
-static void aurora_resume(void)
+static void aurora_cache_sync(void)
+{
+ writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
+}
+
+static void aurora_disable(void)
{
void __iomem *base = l2x0_base;
+ unsigned long flags;
- if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
- writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
- writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
- }
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
+ writel_relaxed(0, base + AURORA_SYNC_REG);
+ l2c_write_sec(0, base, L2X0_CTRL);
+ dsb(st);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
+static void aurora_save(void __iomem *base)
+{
+ l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
+ l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
}
/*
@@ -1398,10 +1421,10 @@ static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
.inv_range = aurora_inv_range,
.clean_range = aurora_clean_range,
.flush_range = aurora_flush_range,
- .flush_all = l2x0_flush_all,
- .disable = l2x0_disable,
- .sync = l2x0_cache_sync,
- .resume = aurora_resume,
+ .flush_all = aurora_flush_all,
+ .disable = aurora_disable,
+ .sync = aurora_cache_sync,
+ .resume = l2c_resume,
},
};
@@ -1414,7 +1437,7 @@ static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
.fixup = aurora_fixup,
.save = aurora_save,
.outer_cache = {
- .resume = aurora_resume,
+ .resume = l2c_resume,
},
};
@@ -1562,6 +1585,7 @@ static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.save = l2c310_save,
+ .configure = l2c310_configure,
.outer_cache = {
.inv_range = bcm_inv_range,
.clean_range = bcm_clean_range,
@@ -1583,18 +1607,12 @@ static void __init tauros3_save(void __iomem *base)
readl_relaxed(base + L310_PREFETCH_CTRL);
}
-static void tauros3_resume(void)
+static void tauros3_configure(void __iomem *base)
{
- void __iomem *base = l2x0_base;
-
- if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
- writel_relaxed(l2x0_saved_regs.aux2_ctrl,
- base + TAUROS3_AUX2_CTRL);
- writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
- base + L310_PREFETCH_CTRL);
-
- l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
- }
+ writel_relaxed(l2x0_saved_regs.aux2_ctrl,
+ base + TAUROS3_AUX2_CTRL);
+ writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+ base + L310_PREFETCH_CTRL);
}
static const struct l2c_init_data of_tauros3_data __initconst = {
@@ -1603,9 +1621,10 @@ static const struct l2c_init_data of_tauros3_data __initconst = {
.num_lock = 8,
.enable = l2c_enable,
.save = tauros3_save,
+ .configure = tauros3_configure,
/* Tauros3 broadcasts L1 cache operations to L2 */
.outer_cache = {
- .resume = tauros3_resume,
+ .resume = l2c_resume,
},
};
@@ -1661,6 +1680,10 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
if (!of_property_read_bool(np, "cache-unified"))
pr_err("L2C: device tree omits to specify unified cache\n");
+ /* Read back current (default) hardware configuration */
+ if (data->save)
+ data->save(l2x0_base);
+
/* L2 configuration can only be changed if the cache is disabled */
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
if (data->of_parse)
@@ -1671,8 +1694,6 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
else
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
- __l2c_init(data, aux_val, aux_mask, cache_id);
-
- return 0;
+ return __l2c_init(data, aux_val, aux_mask, cache_id);
}
#endif
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 91892569710f..845769e41332 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
for_each_possible_cpu(i) {
- if (i == cpu) {
- asid = 0;
- } else {
- asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
- /*
- * If this CPU has already been through a
- * rollover, but hasn't run another task in
- * the meantime, we must preserve its reserved
- * ASID, as this is the only trace we have of
- * the process it is still running.
- */
- if (asid == 0)
- asid = per_cpu(reserved_asids, i);
- __set_bit(asid & ~ASID_MASK, asid_map);
- }
+ asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
+ /*
+ * If this CPU has already been through a
+ * rollover, but hasn't run another task in
+ * the meantime, we must preserve its reserved
+ * ASID, as this is the only trace we have of
+ * the process it is still running.
+ */
+ if (asid == 0)
+ asid = per_cpu(reserved_asids, i);
+ __set_bit(asid & ~ASID_MASK, asid_map);
per_cpu(reserved_asids, i) = asid;
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7864797609b3..903dba064a03 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
}
EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
+static int __arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+
+ pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+
/**
* arm_iommu_attach_device
* @dev: valid struct device pointer
* @mapping: io address space mapping structure (returned from
* arm_iommu_create_mapping)
*
- * Attaches specified io address space mapping to the provided device,
+ * Attaches specified io address space mapping to the provided device.
+ * This replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version.
+ *
* More than one client might be attached to the same io address space
* mapping.
*/
@@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev,
{
int err;
- err = iommu_attach_device(mapping->domain, dev);
+ err = __arm_iommu_attach_device(dev, mapping);
if (err)
return err;
- kref_get(&mapping->kref);
- dev->archdata.mapping = mapping;
-
- pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ set_dma_ops(dev, &iommu_ops);
return 0;
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- */
-void arm_iommu_detach_device(struct device *dev)
+static void __arm_iommu_detach_device(struct device *dev)
{
struct dma_iommu_mapping *mapping;
@@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ __arm_iommu_detach_device(dev);
+ set_dma_ops(dev, NULL);
+}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
return false;
}
- if (arm_iommu_attach_device(dev, mapping)) {
+ if (__arm_iommu_attach_device(dev, mapping)) {
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
dev_name(dev));
arm_iommu_release_mapping(mapping);
@@ -2025,7 +2048,10 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
- arm_iommu_detach_device(dev);
+ if (!mapping)
+ return;
+
+ __arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
index 66781bf34077..c72412415093 100644
--- a/arch/arm/mm/hugetlbpage.c
+++ b/arch/arm/mm/hugetlbpage.c
@@ -36,12 +36,6 @@
* of type casting from pmd_t * to pte_t *.
*/
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pud_huge(pud_t pud)
{
return 0;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2495c8cb47ba..1609b022a72f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -319,10 +319,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
early_init_fdt_scan_reserved_mem();
- /*
- * reserve memory for DMA contigouos allocations,
- * must come from DMA area inside low memory
- */
+ /* reserve memory for DMA contiguous allocations */
dma_contiguous_reserve(arm_dma_limit);
arm_memblock_steal_permitted = false;
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 249379535be2..a3681f11dd9f 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -97,6 +97,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
no_pte:
pmd_free(mm, new_pmd);
+ mm_dec_nr_pmds(mm);
no_pmd:
pud_free(mm, new_pud);
no_pud:
@@ -130,9 +131,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
+ atomic_long_dec(&mm->nr_ptes);
no_pmd:
pud_clear(pud);
pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
no_pud:
pgd_clear(pgd);
pud_free(mm, pud);
@@ -152,6 +155,7 @@ no_pgd:
pmd = pmd_offset(pud, 0);
pud_clear(pud);
pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
pgd_clear(pgd);
pud_free(mm, pud);
}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index ba1196c968d8..082b9f2f7e90 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -98,7 +98,7 @@
#endif
#if !defined (CONFIG_ARM_LPAE) && \
(L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
- L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
+ L_PTE_PRESENT) > L_PTE_SHARED
#error Invalid Linux PTE bit settings
#endif
#endif /* CONFIG_MMU */
diff --git a/arch/arm/plat-iop/pmu.c b/arch/arm/plat-iop/pmu.c
index ad9f9744a82d..c6d979ace524 100644
--- a/arch/arm/plat-iop/pmu.c
+++ b/arch/arm/plat-iop/pmu.c
@@ -24,7 +24,7 @@ static struct resource pmu_resource = {
};
static struct platform_device pmu_device = {
- .name = "arm-pmu",
+ .name = "xscale-pmu",
.id = -1,
.resource = &pmu_resource,
.num_resources = 1,
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 24770e5a5081..6416e03b4482 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -151,14 +151,6 @@ static int omap_dma_in_1510_mode(void)
#endif
#ifdef CONFIG_ARCH_OMAP1
-static inline int get_gdma_dev(int req)
-{
- u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
- int shift = ((req - 1) % 5) * 6;
-
- return ((omap_readl(reg) >> shift) & 0x3f) + 1;
-}
-
static inline void set_gdma_dev(int req, int dev)
{
u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 9bd2776e7d05..cb8e3d655d1a 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -236,13 +236,6 @@ config S3C_SETUP_CAMIF
help
Compile in common setup code for S3C CAMIF devices
-# DMA
-
-config S3C_DMA
- bool
- help
- Internal configuration for S3C DMA core
-
config SAMSUNG_PM_GPIO
bool
default y if GPIO_SAMSUNG && PM
@@ -250,14 +243,6 @@ config SAMSUNG_PM_GPIO
Include legacy GPIO power management code for platforms not using
pinctrl-samsung driver.
-config SAMSUNG_DMADEV
- bool "Use legacy Samsung DMA abstraction"
- depends on CPU_S5PV210 || ARCH_S3C64XX
- select DMADEVICES
- default y
- help
- Use DMA device engine for PL330 DMAC.
-
endif
config S5P_DEV_MFC
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 87746c37f030..1a29ab1f446d 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -26,12 +26,6 @@ obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o
obj-$(CONFIG_S3C_SETUP_CAMIF) += setup-camif.o
-# DMA support
-
-obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o
-
-obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
-
# PM support
obj-$(CONFIG_PM_SLEEP) += pm-common.o
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
deleted file mode 100644
index 886326ee6f6c..000000000000
--- a/arch/arm/plat-samsung/dma-ops.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/* linux/arch/arm/plat-samsung/dma-ops.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung DMA Operations
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/amba/pl330.h>
-#include <linux/scatterlist.h>
-#include <linux/export.h>
-
-#include <mach/dma.h>
-
-#if defined(CONFIG_PL330_DMA)
-#define dma_filter pl330_filter
-#elif defined(CONFIG_S3C64XX_PL080)
-#define dma_filter pl08x_filter_id
-#endif
-
-static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
- struct samsung_dma_req *param,
- struct device *dev, char *ch_name)
-{
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(param->cap, mask);
-
- if (dev->of_node)
- return (unsigned)dma_request_slave_channel(dev, ch_name);
- else
- return (unsigned)dma_request_channel(mask, dma_filter,
- (void *)dma_ch);
-}
-
-static int samsung_dmadev_release(unsigned ch, void *param)
-{
- dma_release_channel((struct dma_chan *)ch);
-
- return 0;
-}
-
-static int samsung_dmadev_config(unsigned ch,
- struct samsung_dma_config *param)
-{
- struct dma_chan *chan = (struct dma_chan *)ch;
- struct dma_slave_config slave_config;
-
- if (param->direction == DMA_DEV_TO_MEM) {
- memset(&slave_config, 0, sizeof(struct dma_slave_config));
- slave_config.direction = param->direction;
- slave_config.src_addr = param->fifo;
- slave_config.src_addr_width = param->width;
- slave_config.src_maxburst = 1;
- dmaengine_slave_config(chan, &slave_config);
- } else if (param->direction == DMA_MEM_TO_DEV) {
- memset(&slave_config, 0, sizeof(struct dma_slave_config));
- slave_config.direction = param->direction;
- slave_config.dst_addr = param->fifo;
- slave_config.dst_addr_width = param->width;
- slave_config.dst_maxburst = 1;
- dmaengine_slave_config(chan, &slave_config);
- } else {
- pr_warn("unsupported direction\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int samsung_dmadev_prepare(unsigned ch,
- struct samsung_dma_prep *param)
-{
- struct scatterlist sg;
- struct dma_chan *chan = (struct dma_chan *)ch;
- struct dma_async_tx_descriptor *desc;
-
- switch (param->cap) {
- case DMA_SLAVE:
- sg_init_table(&sg, 1);
- sg_dma_len(&sg) = param->len;
- sg_set_page(&sg, pfn_to_page(PFN_DOWN(param->buf)),
- param->len, offset_in_page(param->buf));
- sg_dma_address(&sg) = param->buf;
-
- desc = dmaengine_prep_slave_sg(chan,
- &sg, 1, param->direction, DMA_PREP_INTERRUPT);
- break;
- case DMA_CYCLIC:
- desc = dmaengine_prep_dma_cyclic(chan, param->buf,
- param->len, param->period, param->direction,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- break;
- default:
- dev_err(&chan->dev->device, "unsupported format\n");
- return -EFAULT;
- }
-
- if (!desc) {
- dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
- return -EFAULT;
- }
-
- desc->callback = param->fp;
- desc->callback_param = param->fp_param;
-
- dmaengine_submit((struct dma_async_tx_descriptor *)desc);
-
- return 0;
-}
-
-static inline int samsung_dmadev_trigger(unsigned ch)
-{
- dma_async_issue_pending((struct dma_chan *)ch);
-
- return 0;
-}
-
-static inline int samsung_dmadev_flush(unsigned ch)
-{
- return dmaengine_terminate_all((struct dma_chan *)ch);
-}
-
-static struct samsung_dma_ops dmadev_ops = {
- .request = samsung_dmadev_request,
- .release = samsung_dmadev_release,
- .config = samsung_dmadev_config,
- .prepare = samsung_dmadev_prepare,
- .trigger = samsung_dmadev_trigger,
- .started = NULL,
- .flush = samsung_dmadev_flush,
- .stop = samsung_dmadev_flush,
-};
-
-void *samsung_dmadev_get_ops(void)
-{
- return &dmadev_ops;
-}
-EXPORT_SYMBOL(samsung_dmadev_get_ops);
diff --git a/arch/arm/plat-samsung/dma.c b/arch/arm/plat-samsung/dma.c
deleted file mode 100644
index 6143aa147688..000000000000
--- a/arch/arm/plat-samsung/dma.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/* linux/arch/arm/plat-samsung/dma.c
- *
- * Copyright (c) 2003-2009 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C DMA core
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-struct s3c2410_dma_buf;
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-
-#include <mach/dma.h>
-#include <mach/irqs.h>
-
-/* dma channel state information */
-struct s3c2410_dma_chan s3c2410_chans[S3C_DMA_CHANNELS];
-struct s3c2410_dma_chan *s3c_dma_chan_map[DMACH_MAX];
-
-/* s3c_dma_lookup_channel
- *
- * change the dma channel number given into a real dma channel id
-*/
-
-struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel)
-{
- if (channel & DMACH_LOW_LEVEL)
- return &s3c2410_chans[channel & ~DMACH_LOW_LEVEL];
- else
- return s3c_dma_chan_map[channel];
-}
-
-/* do we need to protect the settings of the fields from
- * irq?
-*/
-
-int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
-
- if (chan == NULL)
- return -EINVAL;
-
- pr_debug("%s: chan=%p, op rtn=%p\n", __func__, chan, rtn);
-
- chan->op_fn = rtn;
-
- return 0;
-}
-EXPORT_SYMBOL(s3c2410_dma_set_opfn);
-
-int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
-
- if (chan == NULL)
- return -EINVAL;
-
- pr_debug("%s: chan=%p, callback rtn=%p\n", __func__, chan, rtn);
-
- chan->callback_fn = rtn;
-
- return 0;
-}
-EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
-
-int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags)
-{
- struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
-
- if (chan == NULL)
- return -EINVAL;
-
- chan->flags = flags;
- return 0;
-}
-EXPORT_SYMBOL(s3c2410_dma_setflags);
diff --git a/arch/arm/plat-samsung/include/plat/dma-core.h b/arch/arm/plat-samsung/include/plat/dma-core.h
deleted file mode 100644
index 32ff2a92cb3c..000000000000
--- a/arch/arm/plat-samsung/include/plat/dma-core.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* arch/arm/plat-s3c/include/plat/dma.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * Samsung S3C DMA core support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-extern struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel);
-
-extern struct s3c2410_dma_chan *s3c_dma_chan_map[];
-
-/* the currently allocated channel information */
-extern struct s3c2410_dma_chan s3c2410_chans[];
-
-
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
deleted file mode 100644
index ce6d7634b6cb..000000000000
--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* arch/arm/plat-samsung/include/plat/dma-ops.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung DMA support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __SAMSUNG_DMA_OPS_H_
-#define __SAMSUNG_DMA_OPS_H_ __FILE__
-
-#include <linux/dmaengine.h>
-#include <mach/dma.h>
-
-struct samsung_dma_req {
- enum dma_transaction_type cap;
- struct s3c2410_dma_client *client;
-};
-
-struct samsung_dma_prep {
- enum dma_transaction_type cap;
- enum dma_transfer_direction direction;
- dma_addr_t buf;
- unsigned long period;
- unsigned long len;
- void (*fp)(void *data);
- void *fp_param;
-};
-
-struct samsung_dma_config {
- enum dma_transfer_direction direction;
- enum dma_slave_buswidth width;
- dma_addr_t fifo;
-};
-
-struct samsung_dma_ops {
- unsigned (*request)(enum dma_ch ch, struct samsung_dma_req *param,
- struct device *dev, char *ch_name);
- int (*release)(unsigned ch, void *param);
- int (*config)(unsigned ch, struct samsung_dma_config *param);
- int (*prepare)(unsigned ch, struct samsung_dma_prep *param);
- int (*trigger)(unsigned ch);
- int (*started)(unsigned ch);
- int (*flush)(unsigned ch);
- int (*stop)(unsigned ch);
-};
-
-extern void *samsung_dmadev_get_ops(void);
-extern void *s3c_dma_get_ops(void);
-
-static inline void *__samsung_dma_get_ops(void)
-{
- if (samsung_dma_is_dmadev())
- return samsung_dmadev_get_ops();
- else
- return s3c_dma_get_ops();
-}
-
-/*
- * samsung_dma_get_ops
- * get the set of samsung dma operations
- */
-#define samsung_dma_get_ops() __samsung_dma_get_ops()
-
-#endif /* __SAMSUNG_DMA_OPS_H_ */
diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h
deleted file mode 100644
index abe07fae71db..000000000000
--- a/arch/arm/plat-samsung/include/plat/dma-pl330.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __DMA_PL330_H_
-#define __DMA_PL330_H_ __FILE__
-
-/*
- * PL330 can assign any channel to communicate with
- * any of the peripherals attched to the DMAC.
- * For the sake of consistency across client drivers,
- * We keep the channel names unchanged and only add
- * missing peripherals are added.
- * Order is not important since DMA PL330 API driver
- * use these just as IDs.
- */
-enum dma_ch {
- DMACH_UART0_RX = 0,
- DMACH_UART0_TX,
- DMACH_UART1_RX,
- DMACH_UART1_TX,
- DMACH_UART2_RX,
- DMACH_UART2_TX,
- DMACH_UART3_RX,
- DMACH_UART3_TX,
- DMACH_UART4_RX,
- DMACH_UART4_TX,
- DMACH_UART5_RX,
- DMACH_UART5_TX,
- DMACH_USI_RX,
- DMACH_USI_TX,
- DMACH_IRDA,
- DMACH_I2S0_RX,
- DMACH_I2S0_TX,
- DMACH_I2S0S_TX,
- DMACH_I2S1_RX,
- DMACH_I2S1_TX,
- DMACH_I2S2_RX,
- DMACH_I2S2_TX,
- DMACH_SPI0_RX,
- DMACH_SPI0_TX,
- DMACH_SPI1_RX,
- DMACH_SPI1_TX,
- DMACH_SPI2_RX,
- DMACH_SPI2_TX,
- DMACH_AC97_MICIN,
- DMACH_AC97_PCMIN,
- DMACH_AC97_PCMOUT,
- DMACH_EXTERNAL,
- DMACH_PWM,
- DMACH_SPDIF,
- DMACH_HSI_RX,
- DMACH_HSI_TX,
- DMACH_PCM0_TX,
- DMACH_PCM0_RX,
- DMACH_PCM1_TX,
- DMACH_PCM1_RX,
- DMACH_PCM2_TX,
- DMACH_PCM2_RX,
- DMACH_MSM_REQ3,
- DMACH_MSM_REQ2,
- DMACH_MSM_REQ1,
- DMACH_MSM_REQ0,
- DMACH_SLIMBUS0_RX,
- DMACH_SLIMBUS0_TX,
- DMACH_SLIMBUS0AUX_RX,
- DMACH_SLIMBUS0AUX_TX,
- DMACH_SLIMBUS1_RX,
- DMACH_SLIMBUS1_TX,
- DMACH_SLIMBUS2_RX,
- DMACH_SLIMBUS2_TX,
- DMACH_SLIMBUS3_RX,
- DMACH_SLIMBUS3_TX,
- DMACH_SLIMBUS4_RX,
- DMACH_SLIMBUS4_TX,
- DMACH_SLIMBUS5_RX,
- DMACH_SLIMBUS5_TX,
- DMACH_MIPI_HSI0,
- DMACH_MIPI_HSI1,
- DMACH_MIPI_HSI2,
- DMACH_MIPI_HSI3,
- DMACH_MIPI_HSI4,
- DMACH_MIPI_HSI5,
- DMACH_MIPI_HSI6,
- DMACH_MIPI_HSI7,
- DMACH_DISP1,
- DMACH_MTOM_0,
- DMACH_MTOM_1,
- DMACH_MTOM_2,
- DMACH_MTOM_3,
- DMACH_MTOM_4,
- DMACH_MTOM_5,
- DMACH_MTOM_6,
- DMACH_MTOM_7,
- /* END Marker, also used to denote a reserved channel */
- DMACH_MAX,
-};
-
-struct s3c2410_dma_client {
- char *name;
-};
-
-static inline bool samsung_dma_has_circular(void)
-{
- return true;
-}
-
-static inline bool samsung_dma_is_dmadev(void)
-{
- return true;
-}
-
-#include <plat/dma-ops.h>
-
-#endif /* __DMA_PL330_H_ */
diff --git a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
deleted file mode 100644
index bd3a6db14cbb..000000000000
--- a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
- *
- * Copyright (C) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Samsung S3C24XX DMA support - per SoC functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <plat/dma-core.h>
-
-extern struct bus_type dma_subsys;
-extern struct s3c2410_dma_chan s3c2410_chans[S3C_DMA_CHANNELS];
-
-#define DMA_CH_VALID (1<<31)
-#define DMA_CH_NEVER (1<<30)
-
-/* struct s3c24xx_dma_map
- *
- * this holds the mapping information for the channel selected
- * to be connected to the specified device
-*/
-
-struct s3c24xx_dma_map {
- const char *name;
-
- unsigned long channels[S3C_DMA_CHANNELS];
-};
-
-struct s3c24xx_dma_selection {
- struct s3c24xx_dma_map *map;
- unsigned long map_size;
- unsigned long dcon_mask;
-
- void (*select)(struct s3c2410_dma_chan *chan,
- struct s3c24xx_dma_map *map);
-};
-
-extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel);
-
-/* struct s3c24xx_dma_order_ch
- *
- * channel map for one of the `enum dma_ch` dma channels. the list
- * entry contains a set of low-level channel numbers, orred with
- * DMA_CH_VALID, which are checked in the order in the array.
-*/
-
-struct s3c24xx_dma_order_ch {
- unsigned int list[S3C_DMA_CHANNELS]; /* list of channels */
- unsigned int flags; /* flags */
-};
-
-/* struct s3c24xx_dma_order
- *
- * information provided by either the core or the board to give the
- * dma system a hint on how to allocate channels
-*/
-
-struct s3c24xx_dma_order {
- struct s3c24xx_dma_order_ch channels[DMACH_MAX];
-};
-
-extern int s3c24xx_dma_order_set(struct s3c24xx_dma_order *map);
-
-/* DMA init code, called from the cpu support code */
-
-extern int s3c2410_dma_init(void);
-
-extern int s3c24xx_dma_init(unsigned int channels, unsigned int irq,
- unsigned int stride);
diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h
deleted file mode 100644
index 7b02143ccd9a..000000000000
--- a/arch/arm/plat-samsung/include/plat/dma.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* arch/arm/plat-samsung/include/plat/dma.h
- *
- * Copyright (C) 2003-2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Samsung S3C DMA support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_DMA_H
-#define __PLAT_DMA_H
-
-#include <linux/dma-mapping.h>
-
-enum s3c2410_dma_buffresult {
- S3C2410_RES_OK,
- S3C2410_RES_ERR,
- S3C2410_RES_ABORT
-};
-
-/* enum s3c2410_chan_op
- *
- * operation codes passed to the DMA code by the user, and also used
- * to inform the current channel owner of any changes to the system state
-*/
-
-enum s3c2410_chan_op {
- S3C2410_DMAOP_START,
- S3C2410_DMAOP_STOP,
- S3C2410_DMAOP_PAUSE,
- S3C2410_DMAOP_RESUME,
- S3C2410_DMAOP_FLUSH,
- S3C2410_DMAOP_TIMEOUT, /* internal signal to handler */
- S3C2410_DMAOP_STARTED, /* indicate channel started */
-};
-
-struct s3c2410_dma_client {
- char *name;
-};
-
-struct s3c2410_dma_chan;
-enum dma_ch;
-
-/* s3c2410_dma_cbfn_t
- *
- * buffer callback routine type
-*/
-
-typedef void (*s3c2410_dma_cbfn_t)(struct s3c2410_dma_chan *,
- void *buf, int size,
- enum s3c2410_dma_buffresult result);
-
-typedef int (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *,
- enum s3c2410_chan_op );
-
-
-
-/* s3c2410_dma_request
- *
- * request a dma channel exclusivley
-*/
-
-extern int s3c2410_dma_request(enum dma_ch channel,
- struct s3c2410_dma_client *, void *dev);
-
-
-/* s3c2410_dma_ctrl
- *
- * change the state of the dma channel
-*/
-
-extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op);
-
-/* s3c2410_dma_setflags
- *
- * set the channel's flags to a given state
-*/
-
-extern int s3c2410_dma_setflags(enum dma_ch channel,
- unsigned int flags);
-
-/* s3c2410_dma_free
- *
- * free the dma channel (will also abort any outstanding operations)
-*/
-
-extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *);
-
-/* s3c2410_dma_enqueue
- *
- * place the given buffer onto the queue of operations for the channel.
- * The buffer must be allocated from dma coherent memory, or the Dcache/WB
- * drained before the buffer is given to the DMA system.
-*/
-
-extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
- dma_addr_t data, int size);
-
-/* s3c2410_dma_config
- *
- * configure the dma channel
-*/
-
-extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
-
-/* s3c2410_dma_devconfig
- *
- * configure the device we're talking to
-*/
-
-extern int s3c2410_dma_devconfig(enum dma_ch channel,
- enum dma_data_direction source, unsigned long devaddr);
-
-/* s3c2410_dma_getposition
- *
- * get the position that the dma transfer is currently at
-*/
-
-extern int s3c2410_dma_getposition(enum dma_ch channel,
- dma_addr_t *src, dma_addr_t *dest);
-
-extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
-extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
-
-#include <plat/dma-ops.h>
-
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/regs-dma.h b/arch/arm/plat-samsung/include/plat/regs-dma.h
deleted file mode 100644
index a7d622ef16af..000000000000
--- a/arch/arm/plat-samsung/include/plat/regs-dma.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* arch/arm/plat-samsung/include/plat/regs-dma.h
- *
- * Copyright (C) 2003-2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Samsung S3C24XX DMA support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_PLAT_REGS_DMA_H
-#define __ASM_PLAT_REGS_DMA_H __FILE__
-
-#define S3C2410_DMA_DISRC (0x00)
-#define S3C2410_DMA_DISRCC (0x04)
-#define S3C2410_DMA_DIDST (0x08)
-#define S3C2410_DMA_DIDSTC (0x0C)
-#define S3C2410_DMA_DCON (0x10)
-#define S3C2410_DMA_DSTAT (0x14)
-#define S3C2410_DMA_DCSRC (0x18)
-#define S3C2410_DMA_DCDST (0x1C)
-#define S3C2410_DMA_DMASKTRIG (0x20)
-#define S3C2412_DMA_DMAREQSEL (0x24)
-#define S3C2443_DMA_DMAREQSEL (0x24)
-
-#define S3C2410_DISRCC_INC (1 << 0)
-#define S3C2410_DISRCC_APB (1 << 1)
-
-#define S3C2410_DMASKTRIG_STOP (1 << 2)
-#define S3C2410_DMASKTRIG_ON (1 << 1)
-#define S3C2410_DMASKTRIG_SWTRIG (1 << 0)
-
-#define S3C2410_DCON_DEMAND (0 << 31)
-#define S3C2410_DCON_HANDSHAKE (1 << 31)
-#define S3C2410_DCON_SYNC_PCLK (0 << 30)
-#define S3C2410_DCON_SYNC_HCLK (1 << 30)
-
-#define S3C2410_DCON_INTREQ (1 << 29)
-
-#define S3C2410_DCON_CH0_XDREQ0 (0 << 24)
-#define S3C2410_DCON_CH0_UART0 (1 << 24)
-#define S3C2410_DCON_CH0_SDI (2 << 24)
-#define S3C2410_DCON_CH0_TIMER (3 << 24)
-#define S3C2410_DCON_CH0_USBEP1 (4 << 24)
-
-#define S3C2410_DCON_CH1_XDREQ1 (0 << 24)
-#define S3C2410_DCON_CH1_UART1 (1 << 24)
-#define S3C2410_DCON_CH1_I2SSDI (2 << 24)
-#define S3C2410_DCON_CH1_SPI (3 << 24)
-#define S3C2410_DCON_CH1_USBEP2 (4 << 24)
-
-#define S3C2410_DCON_CH2_I2SSDO (0 << 24)
-#define S3C2410_DCON_CH2_I2SSDI (1 << 24)
-#define S3C2410_DCON_CH2_SDI (2 << 24)
-#define S3C2410_DCON_CH2_TIMER (3 << 24)
-#define S3C2410_DCON_CH2_USBEP3 (4 << 24)
-
-#define S3C2410_DCON_CH3_UART2 (0 << 24)
-#define S3C2410_DCON_CH3_SDI (1 << 24)
-#define S3C2410_DCON_CH3_SPI (2 << 24)
-#define S3C2410_DCON_CH3_TIMER (3 << 24)
-#define S3C2410_DCON_CH3_USBEP4 (4 << 24)
-
-#define S3C2410_DCON_SRCSHIFT (24)
-#define S3C2410_DCON_SRCMASK (7 << 24)
-
-#define S3C2410_DCON_BYTE (0 << 20)
-#define S3C2410_DCON_HALFWORD (1 << 20)
-#define S3C2410_DCON_WORD (2 << 20)
-
-#define S3C2410_DCON_AUTORELOAD (0 << 22)
-#define S3C2410_DCON_NORELOAD (1 << 22)
-#define S3C2410_DCON_HWTRIG (1 << 23)
-
-#ifdef CONFIG_CPU_S3C2440
-
-#define S3C2440_DIDSTC_CHKINT (1 << 2)
-
-#define S3C2440_DCON_CH0_I2SSDO (5 << 24)
-#define S3C2440_DCON_CH0_PCMIN (6 << 24)
-
-#define S3C2440_DCON_CH1_PCMOUT (5 << 24)
-#define S3C2440_DCON_CH1_SDI (6 << 24)
-
-#define S3C2440_DCON_CH2_PCMIN (5 << 24)
-#define S3C2440_DCON_CH2_MICIN (6 << 24)
-
-#define S3C2440_DCON_CH3_MICIN (5 << 24)
-#define S3C2440_DCON_CH3_PCMOUT (6 << 24)
-#endif /* CONFIG_CPU_S3C2440 */
-
-#ifdef CONFIG_CPU_S3C2412
-
-#define S3C2412_DMAREQSEL_SRC(x) ((x) << 1)
-
-#define S3C2412_DMAREQSEL_HW (1)
-
-#define S3C2412_DMAREQSEL_SPI0TX S3C2412_DMAREQSEL_SRC(0)
-#define S3C2412_DMAREQSEL_SPI0RX S3C2412_DMAREQSEL_SRC(1)
-#define S3C2412_DMAREQSEL_SPI1TX S3C2412_DMAREQSEL_SRC(2)
-#define S3C2412_DMAREQSEL_SPI1RX S3C2412_DMAREQSEL_SRC(3)
-#define S3C2412_DMAREQSEL_I2STX S3C2412_DMAREQSEL_SRC(4)
-#define S3C2412_DMAREQSEL_I2SRX S3C2412_DMAREQSEL_SRC(5)
-#define S3C2412_DMAREQSEL_TIMER S3C2412_DMAREQSEL_SRC(9)
-#define S3C2412_DMAREQSEL_SDI S3C2412_DMAREQSEL_SRC(10)
-#define S3C2412_DMAREQSEL_USBEP1 S3C2412_DMAREQSEL_SRC(13)
-#define S3C2412_DMAREQSEL_USBEP2 S3C2412_DMAREQSEL_SRC(14)
-#define S3C2412_DMAREQSEL_USBEP3 S3C2412_DMAREQSEL_SRC(15)
-#define S3C2412_DMAREQSEL_USBEP4 S3C2412_DMAREQSEL_SRC(16)
-#define S3C2412_DMAREQSEL_XDREQ0 S3C2412_DMAREQSEL_SRC(17)
-#define S3C2412_DMAREQSEL_XDREQ1 S3C2412_DMAREQSEL_SRC(18)
-#define S3C2412_DMAREQSEL_UART0_0 S3C2412_DMAREQSEL_SRC(19)
-#define S3C2412_DMAREQSEL_UART0_1 S3C2412_DMAREQSEL_SRC(20)
-#define S3C2412_DMAREQSEL_UART1_0 S3C2412_DMAREQSEL_SRC(21)
-#define S3C2412_DMAREQSEL_UART1_1 S3C2412_DMAREQSEL_SRC(22)
-#define S3C2412_DMAREQSEL_UART2_0 S3C2412_DMAREQSEL_SRC(23)
-#define S3C2412_DMAREQSEL_UART2_1 S3C2412_DMAREQSEL_SRC(24)
-#endif /* CONFIG_CPU_S3C2412 */
-
-#if defined(CONFIG_CPU_S3C2416) || defined(CONFIG_CPU_S3C2443)
-
-#define S3C2443_DMAREQSEL_SRC(x) ((x) << 1)
-
-#define S3C2443_DMAREQSEL_HW (1)
-
-#define S3C2443_DMAREQSEL_SPI0TX S3C2443_DMAREQSEL_SRC(0)
-#define S3C2443_DMAREQSEL_SPI0RX S3C2443_DMAREQSEL_SRC(1)
-#define S3C2443_DMAREQSEL_SPI1TX S3C2443_DMAREQSEL_SRC(2)
-#define S3C2443_DMAREQSEL_SPI1RX S3C2443_DMAREQSEL_SRC(3)
-#define S3C2443_DMAREQSEL_I2STX S3C2443_DMAREQSEL_SRC(4)
-#define S3C2443_DMAREQSEL_I2SRX S3C2443_DMAREQSEL_SRC(5)
-#define S3C2443_DMAREQSEL_TIMER S3C2443_DMAREQSEL_SRC(9)
-#define S3C2443_DMAREQSEL_SDI S3C2443_DMAREQSEL_SRC(10)
-#define S3C2443_DMAREQSEL_XDREQ0 S3C2443_DMAREQSEL_SRC(17)
-#define S3C2443_DMAREQSEL_XDREQ1 S3C2443_DMAREQSEL_SRC(18)
-#define S3C2443_DMAREQSEL_UART0_0 S3C2443_DMAREQSEL_SRC(19)
-#define S3C2443_DMAREQSEL_UART0_1 S3C2443_DMAREQSEL_SRC(20)
-#define S3C2443_DMAREQSEL_UART1_0 S3C2443_DMAREQSEL_SRC(21)
-#define S3C2443_DMAREQSEL_UART1_1 S3C2443_DMAREQSEL_SRC(22)
-#define S3C2443_DMAREQSEL_UART2_0 S3C2443_DMAREQSEL_SRC(23)
-#define S3C2443_DMAREQSEL_UART2_1 S3C2443_DMAREQSEL_SRC(24)
-#define S3C2443_DMAREQSEL_UART3_0 S3C2443_DMAREQSEL_SRC(25)
-#define S3C2443_DMAREQSEL_UART3_1 S3C2443_DMAREQSEL_SRC(26)
-#define S3C2443_DMAREQSEL_PCMOUT S3C2443_DMAREQSEL_SRC(27)
-#define S3C2443_DMAREQSEL_PCMIN S3C2443_DMAREQSEL_SRC(28)
-#define S3C2443_DMAREQSEL_MICIN S3C2443_DMAREQSEL_SRC(29)
-#endif /* CONFIG_CPU_S3C2443 */
-
-#endif /* __ASM_PLAT_REGS_DMA_H */
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
deleted file mode 100644
index 98b10ba67dc7..000000000000
--- a/arch/arm/plat-samsung/s3c-dma-ops.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/* linux/arch/arm/plat-samsung/s3c-dma-ops.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung S3C-DMA Operations
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/export.h>
-
-#include <mach/dma.h>
-
-struct cb_data {
- void (*fp) (void *);
- void *fp_param;
- unsigned ch;
- struct list_head node;
-};
-
-static LIST_HEAD(dma_list);
-
-static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
- int size, enum s3c2410_dma_buffresult res)
-{
- struct cb_data *data = param;
-
- data->fp(data->fp_param);
-}
-
-static unsigned s3c_dma_request(enum dma_ch dma_ch,
- struct samsung_dma_req *param,
- struct device *dev, char *ch_name)
-{
- struct cb_data *data;
-
- if (s3c2410_dma_request(dma_ch, param->client, NULL) < 0) {
- s3c2410_dma_free(dma_ch, param->client);
- return 0;
- }
-
- if (param->cap == DMA_CYCLIC)
- s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);
-
- data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
- data->ch = dma_ch;
- list_add_tail(&data->node, &dma_list);
-
- return (unsigned)dma_ch;
-}
-
-static int s3c_dma_release(unsigned ch, void *param)
-{
- struct cb_data *data;
-
- list_for_each_entry(data, &dma_list, node)
- if (data->ch == ch)
- break;
- list_del(&data->node);
-
- s3c2410_dma_free(ch, param);
- kfree(data);
-
- return 0;
-}
-
-static int s3c_dma_config(unsigned ch, struct samsung_dma_config *param)
-{
- s3c2410_dma_devconfig(ch, param->direction, param->fifo);
- s3c2410_dma_config(ch, param->width);
-
- return 0;
-}
-
-static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep *param)
-{
- struct cb_data *data;
- dma_addr_t pos = param->buf;
- dma_addr_t end = param->buf + param->len;
-
- list_for_each_entry(data, &dma_list, node)
- if (data->ch == ch)
- break;
-
- if (!data->fp) {
- s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb);
- data->fp = param->fp;
- data->fp_param = param->fp_param;
- }
-
- if (param->cap != DMA_CYCLIC) {
- s3c2410_dma_enqueue(ch, (void *)data, param->buf, param->len);
- return 0;
- }
-
- while (pos < end) {
- s3c2410_dma_enqueue(ch, (void *)data, pos, param->period);
- pos += param->period;
- }
-
- return 0;
-}
-
-static inline int s3c_dma_trigger(unsigned ch)
-{
- return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START);
-}
-
-static inline int s3c_dma_started(unsigned ch)
-{
- return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED);
-}
-
-static inline int s3c_dma_flush(unsigned ch)
-{
- return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH);
-}
-
-static inline int s3c_dma_stop(unsigned ch)
-{
- return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP);
-}
-
-static struct samsung_dma_ops s3c_dma_ops = {
- .request = s3c_dma_request,
- .release = s3c_dma_release,
- .config = s3c_dma_config,
- .prepare = s3c_dma_prepare,
- .trigger = s3c_dma_trigger,
- .started = s3c_dma_started,
- .flush = s3c_dma_flush,
- .stop = s3c_dma_stop,
-};
-
-void *s3c_dma_get_ops(void)
-{
- return &s3c_dma_ops;
-}
-EXPORT_SYMBOL(s3c_dma_get_ops);
diff --git a/arch/arm/probes/Makefile b/arch/arm/probes/Makefile
new file mode 100644
index 000000000000..aa1f8590dcdd
--- /dev/null
+++ b/arch/arm/probes/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_UPROBES) += decode.o decode-arm.o uprobes/
+obj-$(CONFIG_KPROBES) += decode.o kprobes/
+ifdef CONFIG_THUMB2_KERNEL
+obj-$(CONFIG_KPROBES) += decode-thumb.o
+else
+obj-$(CONFIG_KPROBES) += decode-arm.o
+endif
diff --git a/arch/arm/kernel/probes-arm.c b/arch/arm/probes/decode-arm.c
index 8eaef81d8344..f72c33a2dcfb 100644
--- a/arch/arm/kernel/probes-arm.c
+++ b/arch/arm/probes/decode-arm.c
@@ -1,5 +1,6 @@
/*
- * arch/arm/kernel/probes-arm.c
+ *
+ * arch/arm/probes/decode-arm.c
*
* Some code moved here from arch/arm/kernel/kprobes-arm.c
*
@@ -20,8 +21,8 @@
#include <linux/stddef.h>
#include <linux/ptrace.h>
-#include "probes.h"
-#include "probes-arm.h"
+#include "decode.h"
+#include "decode-arm.h"
#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
@@ -369,17 +370,17 @@ static const union decode_item arm_cccc_001x_table[] = {
/* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
/* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
- DECODE_EMULATEX (0x0fb00000, 0x03000000, PROBES_DATA_PROCESSING_IMM,
+ DECODE_EMULATEX (0x0fb00000, 0x03000000, PROBES_MOV_HALFWORD,
REGS(0, NOPC, 0, 0, 0)),
/* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
DECODE_OR (0x0fff00ff, 0x03200001),
/* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
- DECODE_EMULATE (0x0fff00ff, 0x03200004, PROBES_EMULATE_NONE),
+ DECODE_EMULATE (0x0fff00ff, 0x03200004, PROBES_SEV),
/* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
/* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
/* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
- DECODE_SIMULATE (0x0fff00fc, 0x03200000, PROBES_SIMULATE_NOP),
+ DECODE_SIMULATE (0x0fff00fc, 0x03200000, PROBES_WFE),
/* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */
/* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
/* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */
@@ -725,10 +726,11 @@ static void __kprobes arm_singlestep(probes_opcode_t insn,
*/
enum probes_insn __kprobes
arm_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
- bool emulate, const union decode_action *actions)
+ bool emulate, const union decode_action *actions,
+ const struct decode_checker *checkers[])
{
asi->insn_singlestep = arm_singlestep;
asi->insn_check_cc = probes_condition_checks[insn>>28];
return probes_decode_insn(insn, asi, probes_decode_arm_table, false,
- emulate, actions);
+ emulate, actions, checkers);
}
diff --git a/arch/arm/kernel/probes-arm.h b/arch/arm/probes/decode-arm.h
index ace6572f6e26..b3b80f6d414b 100644
--- a/arch/arm/kernel/probes-arm.h
+++ b/arch/arm/probes/decode-arm.h
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/probes-arm.h
+ * arch/arm/probes/decode-arm.h
*
* Copyright 2013 Linaro Ltd.
* Written by: David A. Long
@@ -15,9 +15,9 @@
#ifndef _ARM_KERNEL_PROBES_ARM_H
#define _ARM_KERNEL_PROBES_ARM_H
+#include "decode.h"
+
enum probes_arm_action {
- PROBES_EMULATE_NONE,
- PROBES_SIMULATE_NOP,
PROBES_PRELOAD_IMM,
PROBES_PRELOAD_REG,
PROBES_BRANCH_IMM,
@@ -68,6 +68,7 @@ extern const union decode_item probes_decode_arm_table[];
enum probes_insn arm_probes_decode_insn(probes_opcode_t,
struct arch_probes_insn *, bool emulate,
- const union decode_action *actions);
+ const union decode_action *actions,
+ const struct decode_checker *checkers[]);
#endif
diff --git a/arch/arm/kernel/probes-thumb.c b/arch/arm/probes/decode-thumb.c
index 4131351e812f..985e7dd4cac6 100644
--- a/arch/arm/kernel/probes-thumb.c
+++ b/arch/arm/probes/decode-thumb.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/probes-thumb.c
+ * arch/arm/probes/decode-thumb.c
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
@@ -12,8 +12,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include "probes.h"
-#include "probes-thumb.h"
+#include "decode.h"
+#include "decode-thumb.h"
static const union decode_item t32_table_1110_100x_x0xx[] = {
@@ -863,20 +863,22 @@ static void __kprobes thumb32_singlestep(probes_opcode_t opcode,
enum probes_insn __kprobes
thumb16_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
- bool emulate, const union decode_action *actions)
+ bool emulate, const union decode_action *actions,
+ const struct decode_checker *checkers[])
{
asi->insn_singlestep = thumb16_singlestep;
asi->insn_check_cc = thumb_check_cc;
return probes_decode_insn(insn, asi, probes_decode_thumb16_table, true,
- emulate, actions);
+ emulate, actions, checkers);
}
enum probes_insn __kprobes
thumb32_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
- bool emulate, const union decode_action *actions)
+ bool emulate, const union decode_action *actions,
+ const struct decode_checker *checkers[])
{
asi->insn_singlestep = thumb32_singlestep;
asi->insn_check_cc = thumb_check_cc;
return probes_decode_insn(insn, asi, probes_decode_thumb32_table, true,
- emulate, actions);
+ emulate, actions, checkers);
}
diff --git a/arch/arm/kernel/probes-thumb.h b/arch/arm/probes/decode-thumb.h
index 7c6f6ebe514f..8457add0a2d8 100644
--- a/arch/arm/kernel/probes-thumb.h
+++ b/arch/arm/probes/decode-thumb.h
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/probes-thumb.h
+ * arch/arm/probes/decode-thumb.h
*
* Copyright 2013 Linaro Ltd.
* Written by: David A. Long
@@ -15,6 +15,8 @@
#ifndef _ARM_KERNEL_PROBES_THUMB_H
#define _ARM_KERNEL_PROBES_THUMB_H
+#include "decode.h"
+
/*
* True if current instruction is in an IT block.
*/
@@ -89,9 +91,11 @@ extern const union decode_item probes_decode_thumb16_table[];
enum probes_insn __kprobes
thumb16_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
- bool emulate, const union decode_action *actions);
+ bool emulate, const union decode_action *actions,
+ const struct decode_checker *checkers[]);
enum probes_insn __kprobes
thumb32_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
- bool emulate, const union decode_action *actions);
+ bool emulate, const union decode_action *actions,
+ const struct decode_checker *checkers[]);
#endif
diff --git a/arch/arm/kernel/probes.c b/arch/arm/probes/decode.c
index a8ab540d7e73..880ebe0cdf19 100644
--- a/arch/arm/kernel/probes.c
+++ b/arch/arm/probes/decode.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/probes.c
+ * arch/arm/probes/decode.c
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
@@ -17,7 +17,7 @@
#include <asm/ptrace.h>
#include <linux/bug.h>
-#include "probes.h"
+#include "decode.h"
#ifndef find_str_pc_offset
@@ -342,6 +342,31 @@ static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
[DECODE_TYPE_REJECT] = sizeof(struct decode_reject)
};
+static int run_checkers(const struct decode_checker *checkers[],
+ int action, probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ const struct decode_checker **p;
+
+ if (!checkers)
+ return INSN_GOOD;
+
+ p = checkers;
+ while (*p != NULL) {
+ int retval;
+ probes_check_t *checker_func = (*p)[action].checker;
+
+ retval = INSN_GOOD;
+ if (checker_func)
+ retval = checker_func(insn, asi, h);
+ if (retval == INSN_REJECTED)
+ return retval;
+ p++;
+ }
+ return INSN_GOOD;
+}
+
/*
* probes_decode_insn operates on data tables in order to decode an ARM
* architecture instruction onto which a kprobe has been placed.
@@ -388,11 +413,34 @@ static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
int __kprobes
probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
const union decode_item *table, bool thumb,
- bool emulate, const union decode_action *actions)
+ bool emulate, const union decode_action *actions,
+ const struct decode_checker *checkers[])
{
const struct decode_header *h = (struct decode_header *)table;
const struct decode_header *next;
bool matched = false;
+ /*
+ * @insn can be modified by decode_regs. Save its original
+ * value for checkers.
+ */
+ probes_opcode_t origin_insn = insn;
+
+ /*
+ * stack_space is initialized to 0 here. Checker functions
+ * should update is value if they find this is a stack store
+ * instruction: positive value means bytes of stack usage,
+ * negitive value means unable to determine stack usage
+ * statically. For instruction doesn't store to stack, checker
+ * do nothing with it.
+ */
+ asi->stack_space = 0;
+
+ /*
+ * Similarly to stack_space, register_usage_flags is filled by
+ * checkers. Its default value is set to ~0, which is 'all
+ * registers are used', to prevent any potential optimization.
+ */
+ asi->register_usage_flags = ~0UL;
if (emulate)
insn = prepare_emulated_insn(insn, asi, thumb);
@@ -422,24 +470,41 @@ probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
}
case DECODE_TYPE_CUSTOM: {
+ int err;
struct decode_custom *d = (struct decode_custom *)h;
- return actions[d->decoder.action].decoder(insn, asi, h);
+ int action = d->decoder.action;
+
+ err = run_checkers(checkers, action, origin_insn, asi, h);
+ if (err == INSN_REJECTED)
+ return INSN_REJECTED;
+ return actions[action].decoder(insn, asi, h);
}
case DECODE_TYPE_SIMULATE: {
+ int err;
struct decode_simulate *d = (struct decode_simulate *)h;
- asi->insn_handler = actions[d->handler.action].handler;
+ int action = d->handler.action;
+
+ err = run_checkers(checkers, action, origin_insn, asi, h);
+ if (err == INSN_REJECTED)
+ return INSN_REJECTED;
+ asi->insn_handler = actions[action].handler;
return INSN_GOOD_NO_SLOT;
}
case DECODE_TYPE_EMULATE: {
+ int err;
struct decode_emulate *d = (struct decode_emulate *)h;
+ int action = d->handler.action;
+
+ err = run_checkers(checkers, action, origin_insn, asi, h);
+ if (err == INSN_REJECTED)
+ return INSN_REJECTED;
if (!emulate)
- return actions[d->handler.action].decoder(insn,
- asi, h);
+ return actions[action].decoder(insn, asi, h);
- asi->insn_handler = actions[d->handler.action].handler;
+ asi->insn_handler = actions[action].handler;
set_emulated_insn(insn, asi, thumb);
return INSN_GOOD;
}
diff --git a/arch/arm/kernel/probes.h b/arch/arm/probes/decode.h
index dba9f2466a93..f9b08ba7fe73 100644
--- a/arch/arm/kernel/probes.h
+++ b/arch/arm/probes/decode.h
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/probes.h
+ * arch/arm/probes/decode.h
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
@@ -314,6 +314,14 @@ union decode_action {
probes_custom_decode_t *decoder;
};
+typedef enum probes_insn (probes_check_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ const struct decode_header *);
+
+struct decode_checker {
+ probes_check_t *checker;
+};
+
#define DECODE_END \
{.bits = DECODE_TYPE_END}
@@ -402,6 +410,7 @@ probes_insn_handler_t probes_emulate_none;
int __kprobes
probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
const union decode_item *table, bool thumb, bool emulate,
- const union decode_action *actions);
+ const union decode_action *actions,
+ const struct decode_checker **checkers);
#endif
diff --git a/arch/arm/probes/kprobes/Makefile b/arch/arm/probes/kprobes/Makefile
new file mode 100644
index 000000000000..76a36bf102b7
--- /dev/null
+++ b/arch/arm/probes/kprobes/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_KPROBES) += core.o actions-common.o checkers-common.o
+obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
+test-kprobes-objs := test-core.o
+
+ifdef CONFIG_THUMB2_KERNEL
+obj-$(CONFIG_KPROBES) += actions-thumb.o checkers-thumb.o
+test-kprobes-objs += test-thumb.o
+else
+obj-$(CONFIG_KPROBES) += actions-arm.o checkers-arm.o
+obj-$(CONFIG_OPTPROBES) += opt-arm.o
+test-kprobes-objs += test-arm.o
+endif
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/probes/kprobes/actions-arm.c
index ac300c60d656..b9056d649607 100644
--- a/arch/arm/kernel/kprobes-arm.c
+++ b/arch/arm/probes/kprobes/actions-arm.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/kprobes-decode.c
+ * arch/arm/probes/kprobes/actions-arm.c
*
* Copyright (C) 2006, 2007 Motorola Inc.
*
@@ -62,8 +62,9 @@
#include <linux/kprobes.h>
#include <linux/ptrace.h>
-#include "kprobes.h"
-#include "probes-arm.h"
+#include "../decode-arm.h"
+#include "core.h"
+#include "checkers.h"
#if __LINUX_ARM_ARCH__ >= 6
#define BLX(reg) "blx "reg" \n\t"
@@ -302,8 +303,6 @@ emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(probes_opcode_t insn,
}
const union decode_action kprobes_arm_actions[NUM_PROBES_ARM_ACTIONS] = {
- [PROBES_EMULATE_NONE] = {.handler = probes_emulate_none},
- [PROBES_SIMULATE_NOP] = {.handler = probes_simulate_nop},
[PROBES_PRELOAD_IMM] = {.handler = probes_simulate_nop},
[PROBES_PRELOAD_REG] = {.handler = probes_simulate_nop},
[PROBES_BRANCH_IMM] = {.handler = simulate_blx1},
@@ -341,3 +340,5 @@ const union decode_action kprobes_arm_actions[NUM_PROBES_ARM_ACTIONS] = {
[PROBES_BRANCH] = {.handler = simulate_bbl},
[PROBES_LDMSTM] = {.decoder = kprobe_decode_ldmstm}
};
+
+const struct decode_checker *kprobes_arm_checkers[] = {arm_stack_checker, arm_regs_checker, NULL};
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/probes/kprobes/actions-common.c
index 0bf5d64eba1d..bd20a71cd34a 100644
--- a/arch/arm/kernel/kprobes-common.c
+++ b/arch/arm/probes/kprobes/actions-common.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/kprobes-common.c
+ * arch/arm/probes/kprobes/actions-common.c
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
@@ -15,7 +15,7 @@
#include <linux/kprobes.h>
#include <asm/opcodes.h>
-#include "kprobes.h"
+#include "core.h"
static void __kprobes simulate_ldm1stm1(probes_opcode_t insn,
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/probes/kprobes/actions-thumb.c
index 9495d7f3516f..07cfd9bef340 100644
--- a/arch/arm/kernel/kprobes-thumb.c
+++ b/arch/arm/probes/kprobes/actions-thumb.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/kprobes-thumb.c
+ * arch/arm/probes/kprobes/actions-thumb.c
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
@@ -13,8 +13,9 @@
#include <linux/ptrace.h>
#include <linux/kprobes.h>
-#include "kprobes.h"
-#include "probes-thumb.h"
+#include "../decode-thumb.h"
+#include "core.h"
+#include "checkers.h"
/* These emulation encodings are functionally equivalent... */
#define t32_emulate_rd8rn16rm0ra12_noflags \
@@ -664,3 +665,6 @@ const union decode_action kprobes_t32_actions[NUM_PROBES_T32_ACTIONS] = {
[PROBES_T32_MUL_ADD_LONG] = {
.handler = t32_emulate_rdlo12rdhi8rn16rm0_noflags},
};
+
+const struct decode_checker *kprobes_t32_checkers[] = {t32_stack_checker, NULL};
+const struct decode_checker *kprobes_t16_checkers[] = {t16_stack_checker, NULL};
diff --git a/arch/arm/probes/kprobes/checkers-arm.c b/arch/arm/probes/kprobes/checkers-arm.c
new file mode 100644
index 000000000000..7b9817333b68
--- /dev/null
+++ b/arch/arm/probes/kprobes/checkers-arm.c
@@ -0,0 +1,192 @@
+/*
+ * arch/arm/probes/kprobes/checkers-arm.c
+ *
+ * Copyright (C) 2014 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "../decode.h"
+#include "../decode-arm.h"
+#include "checkers.h"
+
+static enum probes_insn __kprobes arm_check_stack(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ /*
+ * PROBES_LDRSTRD, PROBES_LDMSTM, PROBES_STORE,
+ * PROBES_STORE_EXTRA may get here. Simply mark all normal
+ * insns as STACK_USE_NONE.
+ */
+ static const union decode_item table[] = {
+ /*
+ * 'STR{,D,B,H}, Rt, [Rn, Rm]' should be marked as UNKNOWN
+ * if Rn or Rm is SP.
+ * x
+ * STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx
+ * STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_OR (0x0e10000f, 0x0600000d),
+ DECODE_OR (0x0e1f0000, 0x060d0000),
+
+ /*
+ * x
+ * STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx
+ * STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx
+ */
+ DECODE_OR (0x0e5000bf, 0x000000bd),
+ DECODE_CUSTOM (0x0e5f00b0, 0x000d00b0, STACK_USE_UNKNOWN),
+
+ /*
+ * For PROBES_LDMSTM, only stmdx sp, [...] need to examine
+ *
+ * Bit B/A (bit 24) encodes arithmetic operation order. 1 means
+ * before, 0 means after.
+ * Bit I/D (bit 23) encodes arithmetic operation. 1 means
+ * increment, 0 means decrement.
+ *
+ * So:
+ * B I
+ * / /
+ * A D | Rn |
+ * STMDX SP, [...] cccc 100x 00x0 xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_CUSTOM (0x0edf0000, 0x080d0000, STACK_USE_STMDX),
+
+ /* P U W | Rn | Rt | imm12 |*/
+ /* STR (immediate) cccc 010x x0x0 1101 xxxx xxxx xxxx xxxx */
+ /* STRB (immediate) cccc 010x x1x0 1101 xxxx xxxx xxxx xxxx */
+ /* P U W | Rn | Rt |imm4| |imm4|*/
+ /* STRD (immediate) cccc 000x x1x0 1101 xxxx xxxx 1111 xxxx */
+ /* STRH (immediate) cccc 000x x1x0 1101 xxxx xxxx 1011 xxxx */
+ /*
+ * index = (P == '1'); add = (U == '1').
+ * Above insns with:
+ * index == 0 (str{,d,h} rx, [sp], #+/-imm) or
+ * add == 1 (str{,d,h} rx, [sp, #+<imm>])
+ * should be STACK_USE_NONE.
+ * Only str{,b,d,h} rx,[sp,#-n] (P == 1 and U == 0) are
+ * required to be examined.
+ */
+ /* STR{,B} Rt,[SP,#-n] cccc 0101 0xx0 1101 xxxx xxxx xxxx xxxx */
+ DECODE_CUSTOM (0x0f9f0000, 0x050d0000, STACK_USE_FIXED_XXX),
+
+ /* STR{D,H} Rt,[SP,#-n] cccc 0001 01x0 1101 xxxx xxxx 1x11 xxxx */
+ DECODE_CUSTOM (0x0fdf00b0, 0x014d00b0, STACK_USE_FIXED_X0X),
+
+ /* fall through */
+ DECODE_CUSTOM (0, 0, STACK_USE_NONE),
+ DECODE_END
+ };
+
+ return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL);
+}
+
+const struct decode_checker arm_stack_checker[NUM_PROBES_ARM_ACTIONS] = {
+ [PROBES_LDRSTRD] = {.checker = arm_check_stack},
+ [PROBES_STORE_EXTRA] = {.checker = arm_check_stack},
+ [PROBES_STORE] = {.checker = arm_check_stack},
+ [PROBES_LDMSTM] = {.checker = arm_check_stack},
+};
+
+static enum probes_insn __kprobes arm_check_regs_nouse(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ asi->register_usage_flags = 0;
+ return INSN_GOOD;
+}
+
+static enum probes_insn arm_check_regs_normal(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS;
+ int i;
+
+ asi->register_usage_flags = 0;
+ for (i = 0; i < 5; regs >>= 4, insn >>= 4, i++)
+ if (regs & 0xf)
+ asi->register_usage_flags |= 1 << (insn & 0xf);
+
+ return INSN_GOOD;
+}
+
+
+static enum probes_insn arm_check_regs_ldmstm(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ unsigned int reglist = insn & 0xffff;
+ unsigned int rn = (insn >> 16) & 0xf;
+ asi->register_usage_flags = reglist | (1 << rn);
+ return INSN_GOOD;
+}
+
+static enum probes_insn arm_check_regs_mov_ip_sp(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ /* Instruction is 'mov ip, sp' i.e. 'mov r12, r13' */
+ asi->register_usage_flags = (1 << 12) | (1<< 13);
+ return INSN_GOOD;
+}
+
+/*
+ * | Rn |Rt/d| | Rm |
+ * LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx
+ * STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx
+ * | Rn |Rt/d| |imm4L|
+ * LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx
+ * STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx
+ *
+ * Such instructions access Rt/d and its next register, so different
+ * from others, a specific checker is required to handle this extra
+ * implicit register usage.
+ */
+static enum probes_insn arm_check_regs_ldrdstrd(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int rdt = (insn >> 12) & 0xf;
+ arm_check_regs_normal(insn, asi, h);
+ asi->register_usage_flags |= 1 << (rdt + 1);
+ return INSN_GOOD;
+}
+
+
+const struct decode_checker arm_regs_checker[NUM_PROBES_ARM_ACTIONS] = {
+ [PROBES_MRS] = {.checker = arm_check_regs_normal},
+ [PROBES_SATURATING_ARITHMETIC] = {.checker = arm_check_regs_normal},
+ [PROBES_MUL1] = {.checker = arm_check_regs_normal},
+ [PROBES_MUL2] = {.checker = arm_check_regs_normal},
+ [PROBES_MUL_ADD_LONG] = {.checker = arm_check_regs_normal},
+ [PROBES_MUL_ADD] = {.checker = arm_check_regs_normal},
+ [PROBES_LOAD] = {.checker = arm_check_regs_normal},
+ [PROBES_LOAD_EXTRA] = {.checker = arm_check_regs_normal},
+ [PROBES_STORE] = {.checker = arm_check_regs_normal},
+ [PROBES_STORE_EXTRA] = {.checker = arm_check_regs_normal},
+ [PROBES_DATA_PROCESSING_REG] = {.checker = arm_check_regs_normal},
+ [PROBES_DATA_PROCESSING_IMM] = {.checker = arm_check_regs_normal},
+ [PROBES_SEV] = {.checker = arm_check_regs_nouse},
+ [PROBES_WFE] = {.checker = arm_check_regs_nouse},
+ [PROBES_SATURATE] = {.checker = arm_check_regs_normal},
+ [PROBES_REV] = {.checker = arm_check_regs_normal},
+ [PROBES_MMI] = {.checker = arm_check_regs_normal},
+ [PROBES_PACK] = {.checker = arm_check_regs_normal},
+ [PROBES_EXTEND] = {.checker = arm_check_regs_normal},
+ [PROBES_EXTEND_ADD] = {.checker = arm_check_regs_normal},
+ [PROBES_BITFIELD] = {.checker = arm_check_regs_normal},
+ [PROBES_LDMSTM] = {.checker = arm_check_regs_ldmstm},
+ [PROBES_MOV_IP_SP] = {.checker = arm_check_regs_mov_ip_sp},
+ [PROBES_LDRSTRD] = {.checker = arm_check_regs_ldrdstrd},
+};
diff --git a/arch/arm/probes/kprobes/checkers-common.c b/arch/arm/probes/kprobes/checkers-common.c
new file mode 100644
index 000000000000..971119c29474
--- /dev/null
+++ b/arch/arm/probes/kprobes/checkers-common.c
@@ -0,0 +1,101 @@
+/*
+ * arch/arm/probes/kprobes/checkers-common.c
+ *
+ * Copyright (C) 2014 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "../decode.h"
+#include "../decode-arm.h"
+#include "checkers.h"
+
+enum probes_insn checker_stack_use_none(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ asi->stack_space = 0;
+ return INSN_GOOD_NO_SLOT;
+}
+
+enum probes_insn checker_stack_use_unknown(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ asi->stack_space = -1;
+ return INSN_GOOD_NO_SLOT;
+}
+
+#ifdef CONFIG_THUMB2_KERNEL
+enum probes_insn checker_stack_use_imm_0xx(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int imm = insn & 0xff;
+ asi->stack_space = imm;
+ return INSN_GOOD_NO_SLOT;
+}
+
+/*
+ * Different from other insn uses imm8, the real addressing offset of
+ * STRD in T32 encoding should be imm8 * 4. See ARMARM description.
+ */
+enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int imm = insn & 0xff;
+ asi->stack_space = imm << 2;
+ return INSN_GOOD_NO_SLOT;
+}
+#else
+enum probes_insn checker_stack_use_imm_x0x(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int imm = ((insn & 0xf00) >> 4) + (insn & 0xf);
+ asi->stack_space = imm;
+ return INSN_GOOD_NO_SLOT;
+}
+#endif
+
+enum probes_insn checker_stack_use_imm_xxx(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int imm = insn & 0xfff;
+ asi->stack_space = imm;
+ return INSN_GOOD_NO_SLOT;
+}
+
+enum probes_insn checker_stack_use_stmdx(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ unsigned int reglist = insn & 0xffff;
+ int pbit = insn & (1 << 24);
+ asi->stack_space = (hweight32(reglist) - (!pbit ? 1 : 0)) * 4;
+
+ return INSN_GOOD_NO_SLOT;
+}
+
+const union decode_action stack_check_actions[] = {
+ [STACK_USE_NONE] = {.decoder = checker_stack_use_none},
+ [STACK_USE_UNKNOWN] = {.decoder = checker_stack_use_unknown},
+#ifdef CONFIG_THUMB2_KERNEL
+ [STACK_USE_FIXED_0XX] = {.decoder = checker_stack_use_imm_0xx},
+ [STACK_USE_T32STRD] = {.decoder = checker_stack_use_t32strd},
+#else
+ [STACK_USE_FIXED_X0X] = {.decoder = checker_stack_use_imm_x0x},
+#endif
+ [STACK_USE_FIXED_XXX] = {.decoder = checker_stack_use_imm_xxx},
+ [STACK_USE_STMDX] = {.decoder = checker_stack_use_stmdx},
+};
diff --git a/arch/arm/probes/kprobes/checkers-thumb.c b/arch/arm/probes/kprobes/checkers-thumb.c
new file mode 100644
index 000000000000..d608e3b9017a
--- /dev/null
+++ b/arch/arm/probes/kprobes/checkers-thumb.c
@@ -0,0 +1,110 @@
+/*
+ * arch/arm/probes/kprobes/checkers-thumb.c
+ *
+ * Copyright (C) 2014 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "../decode.h"
+#include "../decode-thumb.h"
+#include "checkers.h"
+
+static enum probes_insn __kprobes t32_check_stack(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ /*
+ * PROBES_T32_LDMSTM, PROBES_T32_LDRDSTRD and PROBES_T32_LDRSTR
+ * may get here. Simply mark all normal insns as STACK_USE_NONE.
+ */
+ static const union decode_item table[] = {
+
+ /*
+ * First, filter out all ldr insns to make our life easier.
+ * Following load insns may come here:
+ * LDM, LDRD, LDR.
+ * In T32 encoding, bit 20 is enough for distinguishing
+ * load and store. All load insns have this bit set, when
+ * all store insns have this bit clear.
+ */
+ DECODE_CUSTOM (0x00100000, 0x00100000, STACK_USE_NONE),
+
+ /*
+ * Mark all 'STR{,B,H}, Rt, [Rn, Rm]' as STACK_USE_UNKNOWN
+ * if Rn or Rm is SP. T32 doesn't encode STRD.
+ */
+ /* xx | Rn | Rt | | Rm |*/
+ /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */
+ /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */
+ /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */
+ /* INVALID INSN 1111 1000 0110 xxxx xxxx 0000 00xx xxxx */
+ /* By Introducing INVALID INSN, bit 21 and 22 can be ignored. */
+ DECODE_OR (0xff9f0fc0, 0xf80d0000),
+ DECODE_CUSTOM (0xff900fcf, 0xf800000d, STACK_USE_UNKNOWN),
+
+
+ /* xx | Rn | Rt | PUW| imm8 |*/
+ /* STR (imm 8) 1111 1000 0100 1101 xxxx 110x xxxx xxxx */
+ /* STRB (imm 8) 1111 1000 0000 1101 xxxx 110x xxxx xxxx */
+ /* STRH (imm 8) 1111 1000 0010 1101 xxxx 110x xxxx xxxx */
+ /* INVALID INSN 1111 1000 0110 1101 xxxx 110x xxxx xxxx */
+ /* Only consider U == 0 and P == 1: strx rx, [sp, #-<imm>] */
+ DECODE_CUSTOM (0xff9f0e00, 0xf80d0c00, STACK_USE_FIXED_0XX),
+
+ /* For STR{,B,H} (imm 12), offset is always positive, so ignore them. */
+
+ /* P U W | Rn | Rt | Rt2| imm8 |*/
+ /* STRD (immediate) 1110 1001 01x0 1101 xxxx xxxx xxxx xxxx */
+ /*
+ * Only consider U == 0 and P == 1.
+ * Also note that STRD in T32 encoding is special:
+ * imm = ZeroExtend(imm8:'00', 32)
+ */
+ DECODE_CUSTOM (0xffdf0000, 0xe94d0000, STACK_USE_T32STRD),
+
+ /* | Rn | */
+ /* STMDB 1110 1001 00x0 1101 xxxx xxxx xxxx xxxx */
+ DECODE_CUSTOM (0xffdf0000, 0xe90d0000, STACK_USE_STMDX),
+
+ /* fall through */
+ DECODE_CUSTOM (0, 0, STACK_USE_NONE),
+ DECODE_END
+ };
+
+ return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL);
+}
+
+const struct decode_checker t32_stack_checker[NUM_PROBES_T32_ACTIONS] = {
+ [PROBES_T32_LDMSTM] = {.checker = t32_check_stack},
+ [PROBES_T32_LDRDSTRD] = {.checker = t32_check_stack},
+ [PROBES_T32_LDRSTR] = {.checker = t32_check_stack},
+};
+
+/*
+ * See following comments. This insn must be 'push'.
+ */
+static enum probes_insn __kprobes t16_check_stack(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ unsigned int reglist = insn & 0x1ff;
+ asi->stack_space = hweight32(reglist) * 4;
+ return INSN_GOOD;
+}
+
+/*
+ * T16 encoding is simple: only the 'push' insn can need extra stack space.
+ * Other insns, like str, can only use r0-r7 as Rn.
+ */
+const struct decode_checker t16_stack_checker[NUM_PROBES_T16_ACTIONS] = {
+ [PROBES_T16_PUSH] = {.checker = t16_check_stack},
+};
diff --git a/arch/arm/probes/kprobes/checkers.h b/arch/arm/probes/kprobes/checkers.h
new file mode 100644
index 000000000000..cf6c9e74d666
--- /dev/null
+++ b/arch/arm/probes/kprobes/checkers.h
@@ -0,0 +1,55 @@
+/*
+ * arch/arm/probes/kprobes/checkers.h
+ *
+ * Copyright (C) 2014 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef _ARM_KERNEL_PROBES_CHECKERS_H
+#define _ARM_KERNEL_PROBES_CHECKERS_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include "../decode.h"
+
+extern probes_check_t checker_stack_use_none;
+extern probes_check_t checker_stack_use_unknown;
+#ifdef CONFIG_THUMB2_KERNEL
+extern probes_check_t checker_stack_use_imm_0xx;
+#else
+extern probes_check_t checker_stack_use_imm_x0x;
+#endif
+extern probes_check_t checker_stack_use_imm_xxx;
+extern probes_check_t checker_stack_use_stmdx;
+
+enum {
+ STACK_USE_NONE,
+ STACK_USE_UNKNOWN,
+#ifdef CONFIG_THUMB2_KERNEL
+ STACK_USE_FIXED_0XX,
+ STACK_USE_T32STRD,
+#else
+ STACK_USE_FIXED_X0X,
+#endif
+ STACK_USE_FIXED_XXX,
+ STACK_USE_STMDX,
+ NUM_STACK_USE_TYPES
+};
+
+extern const union decode_action stack_check_actions[];
+
+#ifndef CONFIG_THUMB2_KERNEL
+extern const struct decode_checker arm_stack_checker[];
+extern const struct decode_checker arm_regs_checker[];
+#else
+#endif
+extern const struct decode_checker t32_stack_checker[];
+extern const struct decode_checker t16_stack_checker[];
+#endif
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/probes/kprobes/core.c
index 6d644202c8dc..a4ec240ee7ba 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -30,11 +30,11 @@
#include <asm/cacheflush.h>
#include <linux/percpu.h>
#include <linux/bug.h>
+#include <asm/patch.h>
-#include "kprobes.h"
-#include "probes-arm.h"
-#include "probes-thumb.h"
-#include "patch.h"
+#include "../decode-arm.h"
+#include "../decode-thumb.h"
+#include "core.h"
#define MIN_STACK_SIZE(addr) \
min((unsigned long)MAX_STACK_SIZE, \
@@ -61,6 +61,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
kprobe_decode_insn_t *decode_insn;
const union decode_action *actions;
int is;
+ const struct decode_checker **checkers;
if (in_exception_text(addr))
return -EINVAL;
@@ -74,9 +75,11 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
insn = __opcode_thumb32_compose(insn, inst2);
decode_insn = thumb32_probes_decode_insn;
actions = kprobes_t32_actions;
+ checkers = kprobes_t32_checkers;
} else {
decode_insn = thumb16_probes_decode_insn;
actions = kprobes_t16_actions;
+ checkers = kprobes_t16_checkers;
}
#else /* !CONFIG_THUMB2_KERNEL */
thumb = false;
@@ -85,12 +88,13 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
insn = __mem_to_opcode_arm(*p->addr);
decode_insn = arm_probes_decode_insn;
actions = kprobes_arm_actions;
+ checkers = kprobes_arm_checkers;
#endif
p->opcode = insn;
p->ainsn.insn = tmp_insn;
- switch ((*decode_insn)(insn, &p->ainsn, true, actions)) {
+ switch ((*decode_insn)(insn, &p->ainsn, true, actions, checkers)) {
case INSN_REJECTED: /* not supported */
return -EINVAL;
@@ -111,6 +115,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
break;
}
+ /*
+ * Never instrument insn like 'str r0, [sp, +/-r1]'. Also, insn likes
+ * 'str r0, [sp, #-68]' should also be prohibited.
+ * See __und_svc.
+ */
+ if ((p->ainsn.stack_space < 0) ||
+ (p->ainsn.stack_space > MAX_STACK_SIZE))
+ return -EINVAL;
+
return 0;
}
@@ -150,19 +163,31 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
* memory. It is also needed to atomically set the two half-words of a 32-bit
* Thumb breakpoint.
*/
-int __kprobes __arch_disarm_kprobe(void *p)
-{
- struct kprobe *kp = p;
- void *addr = (void *)((uintptr_t)kp->addr & ~1);
-
- __patch_text(addr, kp->opcode);
+struct patch {
+ void *addr;
+ unsigned int insn;
+};
+static int __kprobes_remove_breakpoint(void *data)
+{
+ struct patch *p = data;
+ __patch_text(p->addr, p->insn);
return 0;
}
+void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn)
+{
+ struct patch p = {
+ .addr = addr,
+ .insn = insn,
+ };
+ stop_machine(__kprobes_remove_breakpoint, &p, cpu_online_mask);
+}
+
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- stop_machine(__arch_disarm_kprobe, p, cpu_online_mask);
+ kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1),
+ p->opcode);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/arm/kernel/kprobes.h b/arch/arm/probes/kprobes/core.h
index 9a2712ecefc3..ec5d1f20a085 100644
--- a/arch/arm/kernel/kprobes.h
+++ b/arch/arm/probes/kprobes/core.h
@@ -19,7 +19,8 @@
#ifndef _ARM_KERNEL_KPROBES_H
#define _ARM_KERNEL_KPROBES_H
-#include "probes.h"
+#include <asm/kprobes.h>
+#include "../decode.h"
/*
* These undefined instructions must be unique and
@@ -29,6 +30,8 @@
#define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18
#define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018
+extern void kprobes_remove_breakpoint(void *addr, unsigned int insn);
+
enum probes_insn __kprobes
kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_probes_insn *asi,
const struct decode_header *h);
@@ -36,16 +39,19 @@ kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_probes_insn *asi,
typedef enum probes_insn (kprobe_decode_insn_t)(probes_opcode_t,
struct arch_probes_insn *,
bool,
- const union decode_action *);
+ const union decode_action *,
+ const struct decode_checker *[]);
#ifdef CONFIG_THUMB2_KERNEL
extern const union decode_action kprobes_t32_actions[];
extern const union decode_action kprobes_t16_actions[];
-
+extern const struct decode_checker *kprobes_t32_checkers[];
+extern const struct decode_checker *kprobes_t16_checkers[];
#else /* !CONFIG_THUMB2_KERNEL */
extern const union decode_action kprobes_arm_actions[];
+extern const struct decode_checker *kprobes_arm_checkers[];
#endif
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
new file mode 100644
index 000000000000..bcdecc25461b
--- /dev/null
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -0,0 +1,370 @@
+/*
+ * Kernel Probes Jump Optimization (Optprobes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright (C) Huawei Inc., 2014
+ */
+
+#include <linux/kprobes.h>
+#include <linux/jump_label.h>
+#include <asm/kprobes.h>
+#include <asm/cacheflush.h>
+/* for arm_gen_branch */
+#include <asm/insn.h>
+/* for patch_text */
+#include <asm/patch.h>
+
+#include "core.h"
+
+/*
+ * See register_usage_flags. If the probed instruction doesn't use PC,
+ * we can copy it into template and have it executed directly without
+ * simulation or emulation.
+ */
+#define ARM_REG_PC 15
+#define can_kprobe_direct_exec(m) (!test_bit(ARM_REG_PC, &(m)))
+
+/*
+ * NOTE: the first sub and add instruction will be modified according
+ * to the stack cost of the instruction.
+ */
+asm (
+ ".global optprobe_template_entry\n"
+ "optprobe_template_entry:\n"
+ ".global optprobe_template_sub_sp\n"
+ "optprobe_template_sub_sp:"
+ " sub sp, sp, #0xff\n"
+ " stmia sp, {r0 - r14} \n"
+ ".global optprobe_template_add_sp\n"
+ "optprobe_template_add_sp:"
+ " add r3, sp, #0xff\n"
+ " str r3, [sp, #52]\n"
+ " mrs r4, cpsr\n"
+ " str r4, [sp, #64]\n"
+ " mov r1, sp\n"
+ " ldr r0, 1f\n"
+ " ldr r2, 2f\n"
+ /*
+ * AEABI requires an 8-bytes alignment stack. If
+ * SP % 8 != 0 (SP % 4 == 0 should be ensured),
+ * alloc more bytes here.
+ */
+ " and r4, sp, #4\n"
+ " sub sp, sp, r4\n"
+#if __LINUX_ARM_ARCH__ >= 5
+ " blx r2\n"
+#else
+ " mov lr, pc\n"
+ " mov pc, r2\n"
+#endif
+ " add sp, sp, r4\n"
+ " ldr r1, [sp, #64]\n"
+ " tst r1, #"__stringify(PSR_T_BIT)"\n"
+ " ldrne r2, [sp, #60]\n"
+ " orrne r2, #1\n"
+ " strne r2, [sp, #60] @ set bit0 of PC for thumb\n"
+ " msr cpsr_cxsf, r1\n"
+ ".global optprobe_template_restore_begin\n"
+ "optprobe_template_restore_begin:\n"
+ " ldmia sp, {r0 - r15}\n"
+ ".global optprobe_template_restore_orig_insn\n"
+ "optprobe_template_restore_orig_insn:\n"
+ " nop\n"
+ ".global optprobe_template_restore_end\n"
+ "optprobe_template_restore_end:\n"
+ " nop\n"
+ ".global optprobe_template_val\n"
+ "optprobe_template_val:\n"
+ "1: .long 0\n"
+ ".global optprobe_template_call\n"
+ "optprobe_template_call:\n"
+ "2: .long 0\n"
+ ".global optprobe_template_end\n"
+ "optprobe_template_end:\n");
+
+#define TMPL_VAL_IDX \
+ ((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry)
+#define TMPL_CALL_IDX \
+ ((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry)
+#define TMPL_END_IDX \
+ ((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry)
+#define TMPL_ADD_SP \
+ ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry)
+#define TMPL_SUB_SP \
+ ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry)
+#define TMPL_RESTORE_BEGIN \
+ ((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry)
+#define TMPL_RESTORE_ORIGN_INSN \
+ ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry)
+#define TMPL_RESTORE_END \
+ ((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry)
+
+/*
+ * ARM can always optimize an instruction when using ARM ISA, except
+ * instructions like 'str r0, [sp, r1]' which store to stack and unable
+ * to determine stack space consumption statically.
+ */
+int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
+{
+ return optinsn->insn != NULL;
+}
+
+/*
+ * In ARM ISA, kprobe opt always replace one instruction (4 bytes
+ * aligned and 4 bytes long). It is impossible to encounter another
+ * kprobe in the address range. So always return 0.
+ */
+int arch_check_optimized_kprobe(struct optimized_kprobe *op)
+{
+ return 0;
+}
+
+/* Caller must ensure addr & 3 == 0 */
+static int can_optimize(struct kprobe *kp)
+{
+ if (kp->ainsn.stack_space < 0)
+ return 0;
+ /*
+ * 255 is the biggest imm can be used in 'sub r0, r0, #<imm>'.
+ * Number larger than 255 needs special encoding.
+ */
+ if (kp->ainsn.stack_space > 255 - sizeof(struct pt_regs))
+ return 0;
+ return 1;
+}
+
+/* Free optimized instruction slot */
+static void
+__arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
+{
+ if (op->optinsn.insn) {
+ free_optinsn_slot(op->optinsn.insn, dirty);
+ op->optinsn.insn = NULL;
+ }
+}
+
+extern void kprobe_handler(struct pt_regs *regs);
+
+static void
+optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
+{
+ unsigned long flags;
+ struct kprobe *p = &op->kp;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ /* Save skipped registers */
+ regs->ARM_pc = (unsigned long)op->kp.addr;
+ regs->ARM_ORIG_r0 = ~0UL;
+
+ local_irq_save(flags);
+
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(&op->kp);
+ } else {
+ __this_cpu_write(current_kprobe, &op->kp);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ opt_pre_handler(&op->kp, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+ /*
+ * We singlestep the replaced instruction only when it can't be
+ * executed directly during restore.
+ */
+ if (!p->ainsn.kprobe_direct_exec)
+ op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
+
+ local_irq_restore(flags);
+}
+
+int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
+{
+ kprobe_opcode_t *code;
+ unsigned long rel_chk;
+ unsigned long val;
+ unsigned long stack_protect = sizeof(struct pt_regs);
+
+ if (!can_optimize(orig))
+ return -EILSEQ;
+
+ code = get_optinsn_slot();
+ if (!code)
+ return -ENOMEM;
+
+ /*
+ * Verify if the address gap is in 32MiB range, because this uses
+ * a relative jump.
+ *
+ * kprobe opt use a 'b' instruction to branch to optinsn.insn.
+ * According to ARM manual, branch instruction is:
+ *
+ * 31 28 27 24 23 0
+ * +------+---+---+---+---+----------------+
+ * | cond | 1 | 0 | 1 | 0 | imm24 |
+ * +------+---+---+---+---+----------------+
+ *
+ * imm24 is a signed 24 bits integer. The real branch offset is computed
+ * by: imm32 = SignExtend(imm24:'00', 32);
+ *
+ * So the maximum forward branch should be:
+ * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
+ * The maximum backword branch should be:
+ * (0xff800000 << 2) = 0xfe000000 = -0x2000000
+ *
+ * We can simply check (rel & 0xfe000003):
+ * if rel is positive, (rel & 0xfe000000) shoule be 0
+ * if rel is negitive, (rel & 0xfe000000) should be 0xfe000000
+ * the last '3' is used for alignment checking.
+ */
+ rel_chk = (unsigned long)((long)code -
+ (long)orig->addr + 8) & 0xfe000003;
+
+ if ((rel_chk != 0) && (rel_chk != 0xfe000000)) {
+ /*
+ * Different from x86, we free code buf directly instead of
+ * calling __arch_remove_optimized_kprobe() because
+ * we have not fill any field in op.
+ */
+ free_optinsn_slot(code, 0);
+ return -ERANGE;
+ }
+
+ /* Copy arch-dep-instance from template. */
+ memcpy(code, &optprobe_template_entry,
+ TMPL_END_IDX * sizeof(kprobe_opcode_t));
+
+ /* Adjust buffer according to instruction. */
+ BUG_ON(orig->ainsn.stack_space < 0);
+
+ stack_protect += orig->ainsn.stack_space;
+
+ /* Should have been filtered by can_optimize(). */
+ BUG_ON(stack_protect > 255);
+
+ /* Create a 'sub sp, sp, #<stack_protect>' */
+ code[TMPL_SUB_SP] = __opcode_to_mem_arm(0xe24dd000 | stack_protect);
+ /* Create a 'add r3, sp, #<stack_protect>' */
+ code[TMPL_ADD_SP] = __opcode_to_mem_arm(0xe28d3000 | stack_protect);
+
+ /* Set probe information */
+ val = (unsigned long)op;
+ code[TMPL_VAL_IDX] = val;
+
+ /* Set probe function call */
+ val = (unsigned long)optimized_callback;
+ code[TMPL_CALL_IDX] = val;
+
+ /* If possible, copy insn and have it executed during restore */
+ orig->ainsn.kprobe_direct_exec = false;
+ if (can_kprobe_direct_exec(orig->ainsn.register_usage_flags)) {
+ kprobe_opcode_t final_branch = arm_gen_branch(
+ (unsigned long)(&code[TMPL_RESTORE_END]),
+ (unsigned long)(op->kp.addr) + 4);
+ if (final_branch != 0) {
+ /*
+ * Replace original 'ldmia sp, {r0 - r15}' with
+ * 'ldmia {r0 - r14}', restore all registers except pc.
+ */
+ code[TMPL_RESTORE_BEGIN] = __opcode_to_mem_arm(0xe89d7fff);
+
+ /* The original probed instruction */
+ code[TMPL_RESTORE_ORIGN_INSN] = __opcode_to_mem_arm(orig->opcode);
+
+ /* Jump back to next instruction */
+ code[TMPL_RESTORE_END] = __opcode_to_mem_arm(final_branch);
+ orig->ainsn.kprobe_direct_exec = true;
+ }
+ }
+
+ flush_icache_range((unsigned long)code,
+ (unsigned long)(&code[TMPL_END_IDX]));
+
+ /* Set op->optinsn.insn means prepared. */
+ op->optinsn.insn = code;
+ return 0;
+}
+
+void __kprobes arch_optimize_kprobes(struct list_head *oplist)
+{
+ struct optimized_kprobe *op, *tmp;
+
+ list_for_each_entry_safe(op, tmp, oplist, list) {
+ unsigned long insn;
+ WARN_ON(kprobe_disabled(&op->kp));
+
+ /*
+ * Backup instructions which will be replaced
+ * by jump address
+ */
+ memcpy(op->optinsn.copied_insn, op->kp.addr,
+ RELATIVEJUMP_SIZE);
+
+ insn = arm_gen_branch((unsigned long)op->kp.addr,
+ (unsigned long)op->optinsn.insn);
+ BUG_ON(insn == 0);
+
+ /*
+ * Make it a conditional branch if replaced insn
+ * is consitional
+ */
+ insn = (__mem_to_opcode_arm(
+ op->optinsn.copied_insn[0]) & 0xf0000000) |
+ (insn & 0x0fffffff);
+
+ /*
+ * Similar to __arch_disarm_kprobe, operations which
+ * removing breakpoints must be wrapped by stop_machine
+ * to avoid racing.
+ */
+ kprobes_remove_breakpoint(op->kp.addr, insn);
+
+ list_del_init(&op->list);
+ }
+}
+
+void arch_unoptimize_kprobe(struct optimized_kprobe *op)
+{
+ arch_arm_kprobe(&op->kp);
+}
+
+/*
+ * Recover original instructions and breakpoints from relative jumps.
+ * Caller must call with locking kprobe_mutex.
+ */
+void arch_unoptimize_kprobes(struct list_head *oplist,
+ struct list_head *done_list)
+{
+ struct optimized_kprobe *op, *tmp;
+
+ list_for_each_entry_safe(op, tmp, oplist, list) {
+ arch_unoptimize_kprobe(op);
+ list_move(&op->list, done_list);
+ }
+}
+
+int arch_within_optimized_kprobe(struct optimized_kprobe *op,
+ unsigned long addr)
+{
+ return ((unsigned long)op->kp.addr <= addr &&
+ (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
+}
+
+void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
+{
+ __arch_remove_optimized_kprobe(op, 1);
+}
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/probes/kprobes/test-arm.c
index cb1424240ff6..8866aedfdea2 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/probes/kprobes/test-arm.c
@@ -12,8 +12,9 @@
#include <linux/module.h>
#include <asm/system_info.h>
#include <asm/opcodes.h>
+#include <asm/probes.h>
-#include "kprobes-test.h"
+#include "test-core.h"
#define TEST_ISA "32"
@@ -203,9 +204,9 @@ void kprobe_arm_test_cases(void)
#endif
TEST_GROUP("Miscellaneous instructions")
- TEST("mrs r0, cpsr")
- TEST("mrspl r7, cpsr")
- TEST("mrs r14, cpsr")
+ TEST_RMASKED("mrs r",0,~PSR_IGNORE_BITS,", cpsr")
+ TEST_RMASKED("mrspl r",7,~PSR_IGNORE_BITS,", cpsr")
+ TEST_RMASKED("mrs r",14,~PSR_IGNORE_BITS,", cpsr")
TEST_UNSUPPORTED(__inst_arm(0xe10ff000) " @ mrs r15, cpsr")
TEST_UNSUPPORTED("mrs r0, spsr")
TEST_UNSUPPORTED("mrs lr, spsr")
@@ -214,9 +215,12 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED("msr cpsr_f, lr")
TEST_UNSUPPORTED("msr spsr, r0")
+#if __LINUX_ARM_ARCH__ >= 5 || \
+ (__LINUX_ARM_ARCH__ == 4 && !defined(CONFIG_CPU_32v4))
TEST_BF_R("bx r",0,2f,"")
TEST_BB_R("bx r",7,2f,"")
TEST_BF_R("bxeq r",14,2f,"")
+#endif
#if __LINUX_ARM_ARCH__ >= 5
TEST_R("clz r0, r",0, 0x0,"")
@@ -476,7 +480,9 @@ void kprobe_arm_test_cases(void)
TEST_GROUP("Extra load/store instructions")
TEST_RPR( "strh r",0, VAL1,", [r",1, 48,", -r",2, 24,"]")
- TEST_RPR( "streqh r",14,VAL2,", [r",13,0, ", r",12, 48,"]")
+ TEST_RPR( "streqh r",14,VAL2,", [r",11,0, ", r",12, 48,"]")
+ TEST_UNSUPPORTED( "streqh r14, [r13, r12]")
+ TEST_UNSUPPORTED( "streqh r14, [r12, r13]")
TEST_RPR( "strh r",1, VAL1,", [r",2, 24,", r",3, 48,"]!")
TEST_RPR( "strneh r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
TEST_RPR( "strh r",2, VAL1,", [r",3, 24,"], r",4, 48,"")
@@ -501,6 +507,9 @@ void kprobe_arm_test_cases(void)
TEST_RP( "strplh r",12,VAL2,", [r",11,24,", #-4]!")
TEST_RP( "strh r",2, VAL1,", [r",3, 24,"], #48")
TEST_RP( "strh r",10,VAL2,", [r",9, 64,"], #-48")
+ TEST_RP( "strh r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
+ TEST_UNSUPPORTED("strh r3, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_RP( "strh r",4, VAL1,", [r",14,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!")
TEST_UNSUPPORTED(__inst_arm(0xe1efc3b0) " @ strh r12, [pc, #48]!")
TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) " @ strh pc, [r9], #48")
@@ -565,7 +574,9 @@ void kprobe_arm_test_cases(void)
#if __LINUX_ARM_ARCH__ >= 5
TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
- TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
+ TEST_RPR( "strccd r",8, VAL2,", [r",11,0, ", r",12,48,"]")
+ TEST_UNSUPPORTED( "strccd r8, [r13, r12]")
+ TEST_UNSUPPORTED( "strccd r8, [r12, r13]")
TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
@@ -589,6 +600,9 @@ void kprobe_arm_test_cases(void)
TEST_RP( "strvcd r",12,VAL2,", [r",11,24,", #-16]!")
TEST_RP( "strd r",2, VAL1,", [r",4, 24,"], #48")
TEST_RP( "strd r",10,VAL2,", [r",9, 64,"], #-48")
+ TEST_RP( "strd r",6, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
+ TEST_UNSUPPORTED("strd r6, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_RP( "strd r",4, VAL1,", [r",12,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!")
TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) " @ strd r12, [pc, #48]!")
TEST_P( "ldrd r0, [r",0, 24,", #-8]")
@@ -637,14 +651,20 @@ void kprobe_arm_test_cases(void)
TEST_RP( "str"byte" r",12,VAL2,", [r",11,24,", #-4]!") \
TEST_RP( "str"byte" r",2, VAL1,", [r",3, 24,"], #48") \
TEST_RP( "str"byte" r",10,VAL2,", [r",9, 64,"], #-48") \
+ TEST_RP( "str"byte" r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") \
+ TEST_UNSUPPORTED("str"byte" r3, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") \
+ TEST_RP( "str"byte" r",4, VAL1,", [r",10,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") \
TEST_RPR("str"byte" r",0, VAL1,", [r",1, 48,", -r",2, 24,"]") \
- TEST_RPR("str"byte" r",14,VAL2,", [r",13,0, ", r",12, 48,"]") \
+ TEST_RPR("str"byte" r",14,VAL2,", [r",11,0, ", r",12, 48,"]") \
+ TEST_UNSUPPORTED("str"byte" r14, [r13, r12]") \
+ TEST_UNSUPPORTED("str"byte" r14, [r12, r13]") \
TEST_RPR("str"byte" r",1, VAL1,", [r",2, 24,", r",3, 48,"]!") \
TEST_RPR("str"byte" r",12,VAL2,", [r",11,48,", -r",10,24,"]!") \
TEST_RPR("str"byte" r",2, VAL1,", [r",3, 24,"], r",4, 48,"") \
TEST_RPR("str"byte" r",10,VAL2,", [r",9, 48,"], -r",11,24,"") \
TEST_RPR("str"byte" r",0, VAL1,", [r",1, 24,", r",2, 32,", asl #1]")\
- TEST_RPR("str"byte" r",14,VAL2,", [r",13,0, ", r",12, 32,", lsr #2]")\
+ TEST_RPR("str"byte" r",14,VAL2,", [r",11,0, ", r",12, 32,", lsr #2]")\
+ TEST_UNSUPPORTED("str"byte" r14, [r13, r12, lsr #2]") \
TEST_RPR("str"byte" r",1, VAL1,", [r",2, 24,", r",3, 32,", asr #3]!")\
TEST_RPR("str"byte" r",12,VAL2,", [r",11,24,", r",10, 4,", ror #31]!")\
TEST_P( "ldr"byte" r0, [r",0, 24,", #-2]") \
@@ -668,12 +688,12 @@ void kprobe_arm_test_cases(void)
LOAD_STORE("")
TEST_P( "str pc, [r",0,0,", #15*4]")
- TEST_R( "str pc, [sp, r",2,15*4,"]")
+ TEST_UNSUPPORTED( "str pc, [sp, r2]")
TEST_BF( "ldr pc, [sp, #15*4]")
TEST_BF_R("ldr pc, [sp, r",2,15*4,"]")
TEST_P( "str sp, [r",0,0,", #13*4]")
- TEST_R( "str sp, [sp, r",2,13*4,"]")
+ TEST_UNSUPPORTED( "str sp, [sp, r2]")
TEST_BF( "ldr sp, [sp, #13*4]")
TEST_BF_R("ldr sp, [sp, r",2,13*4,"]")
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/probes/kprobes/test-core.c
index b206d7790c77..9775de22e2ff 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -209,10 +209,10 @@
#include <linux/bug.h>
#include <asm/opcodes.h>
-#include "kprobes.h"
-#include "probes-arm.h"
-#include "probes-thumb.h"
-#include "kprobes-test.h"
+#include "core.h"
+#include "test-core.h"
+#include "../decode-arm.h"
+#include "../decode-thumb.h"
#define BENCHMARKING 1
@@ -236,6 +236,8 @@ static int tests_failed;
#ifndef CONFIG_THUMB2_KERNEL
+#define RET(reg) "mov pc, "#reg
+
long arm_func(long r0, long r1);
static void __used __naked __arm_kprobes_test_func(void)
@@ -245,7 +247,7 @@ static void __used __naked __arm_kprobes_test_func(void)
".type arm_func, %%function \n\t"
"arm_func: \n\t"
"adds r0, r0, r1 \n\t"
- "bx lr \n\t"
+ "mov pc, lr \n\t"
".code "NORMAL_ISA /* Back to Thumb if necessary */
: : : "r0", "r1", "cc"
);
@@ -253,6 +255,8 @@ static void __used __naked __arm_kprobes_test_func(void)
#else /* CONFIG_THUMB2_KERNEL */
+#define RET(reg) "bx "#reg
+
long thumb16_func(long r0, long r1);
long thumb32even_func(long r0, long r1);
long thumb32odd_func(long r0, long r1);
@@ -494,7 +498,7 @@ static void __naked benchmark_nop(void)
{
__asm__ __volatile__ (
"nop \n\t"
- "bx lr"
+ RET(lr)" \n\t"
);
}
@@ -977,7 +981,7 @@ void __naked __kprobes_test_case_start(void)
"bic r0, lr, #1 @ r0 = inline data \n\t"
"mov r1, sp \n\t"
"bl kprobes_test_case_start \n\t"
- "bx r0 \n\t"
+ RET(r0)" \n\t"
);
}
@@ -1056,15 +1060,6 @@ static int test_case_run_count;
static bool test_case_is_thumb;
static int test_instance;
-/*
- * We ignore the state of the imprecise abort disable flag (CPSR.A) because this
- * can change randomly as the kernel doesn't take care to preserve or initialise
- * this across context switches. Also, with Security Extentions, the flag may
- * not be under control of the kernel; for this reason we ignore the state of
- * the FIQ disable flag CPSR.F as well.
- */
-#define PSR_IGNORE_BITS (PSR_A_BIT | PSR_F_BIT)
-
static unsigned long test_check_cc(int cc, unsigned long cpsr)
{
int ret = arm_check_condition(cc << 28, cpsr);
@@ -1196,6 +1191,13 @@ static void setup_test_context(struct pt_regs *regs)
regs->uregs[arg->reg] =
(unsigned long)current_stack + arg->val;
memory_needs_checking = true;
+ /*
+ * Test memory at an address below SP is in danger of
+ * being altered by an interrupt occurring and pushing
+ * data onto the stack. Disable interrupts to stop this.
+ */
+ if (arg->reg == 13)
+ regs->ARM_cpsr |= PSR_I_BIT;
break;
}
case ARG_TYPE_MEM: {
@@ -1264,14 +1266,26 @@ test_case_pre_handler(struct kprobe *p, struct pt_regs *regs)
static int __kprobes
test_after_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
+ struct test_arg *args;
+
if (container_of(p, struct test_probe, kprobe)->hit == test_instance)
return 0; /* Already run for this test instance */
result_regs = *regs;
+
+ /* Mask out results which are indeterminate */
result_regs.ARM_cpsr &= ~PSR_IGNORE_BITS;
+ for (args = current_args; args[0].type != ARG_TYPE_END; ++args)
+ if (args[0].type == ARG_TYPE_REG_MASKED) {
+ struct test_arg_regptr *arg =
+ (struct test_arg_regptr *)args;
+ result_regs.uregs[arg->reg] &= arg->val;
+ }
/* Undo any changes done to SP by the test case */
regs->ARM_sp = (unsigned long)current_stack;
+ /* Enable interrupts in case setup_test_context disabled them */
+ regs->ARM_cpsr &= ~PSR_I_BIT;
container_of(p, struct test_probe, kprobe)->hit = test_instance;
return 0;
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/probes/kprobes/test-core.h
index 4430990e90e7..94285203e9f7 100644
--- a/arch/arm/kernel/kprobes-test.h
+++ b/arch/arm/probes/kprobes/test-core.h
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/kprobes-test.h
+ * arch/arm/probes/kprobes/test-core.h
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
@@ -45,10 +45,11 @@ extern int kprobe_test_cc_position;
*
*/
-#define ARG_TYPE_END 0
-#define ARG_TYPE_REG 1
-#define ARG_TYPE_PTR 2
-#define ARG_TYPE_MEM 3
+#define ARG_TYPE_END 0
+#define ARG_TYPE_REG 1
+#define ARG_TYPE_PTR 2
+#define ARG_TYPE_MEM 3
+#define ARG_TYPE_REG_MASKED 4
#define ARG_FLAG_UNSUPPORTED 0x01
#define ARG_FLAG_SUPPORTED 0x02
@@ -61,7 +62,7 @@ struct test_arg {
};
struct test_arg_regptr {
- u8 type; /* ARG_TYPE_REG or ARG_TYPE_PTR */
+ u8 type; /* ARG_TYPE_REG or ARG_TYPE_PTR or ARG_TYPE_REG_MASKED */
u8 reg;
u8 _padding[2];
u32 val;
@@ -138,6 +139,12 @@ struct test_arg_end {
".short 0 \n\t" \
".word "#val" \n\t"
+#define TEST_ARG_REG_MASKED(reg, val) \
+ ".byte "__stringify(ARG_TYPE_REG_MASKED)" \n\t" \
+ ".byte "#reg" \n\t" \
+ ".short 0 \n\t" \
+ ".word "#val" \n\t"
+
#define TEST_ARG_END(flags) \
".byte "__stringify(ARG_TYPE_END)" \n\t" \
".byte "TEST_ISA flags" \n\t" \
@@ -395,6 +402,22 @@ struct test_arg_end {
" "codex" \n\t" \
TESTCASE_END
+#define TEST_RMASKED(code1, reg, mask, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG_MASKED(reg, mask) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg code2) \
+ TESTCASE_END
+
+/*
+ * We ignore the state of the imprecise abort disable flag (CPSR.A) because this
+ * can change randomly as the kernel doesn't take care to preserve or initialise
+ * this across context switches. Also, with Security Extensions, the flag may
+ * not be under control of the kernel; for this reason we ignore the state of
+ * the FIQ disable flag CPSR.F as well.
+ */
+#define PSR_IGNORE_BITS (PSR_A_BIT | PSR_F_BIT)
+
/*
* Macros for defining space directives spread over multiple lines.
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/probes/kprobes/test-thumb.c
index 844dd10d8593..b683b4517458 100644
--- a/arch/arm/kernel/kprobes-test-thumb.c
+++ b/arch/arm/probes/kprobes/test-thumb.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/kernel/kprobes-test-thumb.c
+ * arch/arm/probes/kprobes/test-thumb.c
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
@@ -11,8 +11,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/opcodes.h>
+#include <asm/probes.h>
-#include "kprobes-test.h"
+#include "test-core.h"
#define TEST_ISA "16"
@@ -416,6 +417,9 @@ void kprobe_thumb32_test_cases(void)
TEST_RR( "strd r",14,VAL2,", r",12,VAL1,", [sp, #16]!")
TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16")
TEST_RR( "strd r",7, VAL2,", r",8, VAL1,", [sp], #-16")
+ TEST_RRP("strd r",6, VAL1,", r",7, VAL2,", [r",13, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
+ TEST_UNSUPPORTED("strd r6, r7, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_RRP("strd r",4, VAL1,", r",5, VAL2,", [r",14, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!")
TEST_UNSUPPORTED(__inst_thumb32(0xe9efec04) " @ strd r14, r12, [pc, #16]!")
TEST_UNSUPPORTED(__inst_thumb32(0xe8efec04) " @ strd r14, r12, [pc], #16")
@@ -774,8 +778,8 @@ CONDITION_INSTRUCTIONS(22,
TEST_UNSUPPORTED("subs pc, lr, #4")
- TEST("mrs r0, cpsr")
- TEST("mrs r14, cpsr")
+ TEST_RMASKED("mrs r",0,~PSR_IGNORE_BITS,", cpsr")
+ TEST_RMASKED("mrs r",14,~PSR_IGNORE_BITS,", cpsr")
TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8d00) " @ mrs sp, spsr")
TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8f00) " @ mrs pc, spsr")
TEST_UNSUPPORTED("mrs r0, spsr")
@@ -821,14 +825,22 @@ CONDITION_INSTRUCTIONS(22,
TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]!") \
TEST_RPR("str"size".w r",0, VAL1,", [r",1, 0,", r",2, 4,"]") \
TEST_RPR("str"size" r",14,VAL2,", [r",10,0,", r",11,4,", lsl #1]") \
+ TEST_UNSUPPORTED("str"size" r0, [r13, r1]") \
TEST_R( "str"size".w r",7, VAL1,", [sp, #24]") \
TEST_RP( "str"size".w r",0, VAL2,", [r",0,0, "]") \
+ TEST_RP( "str"size" r",6, VAL1,", [r",13, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") \
+ TEST_UNSUPPORTED("str"size" r6, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") \
+ TEST_RP( "str"size" r",4, VAL2,", [r",12, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") \
TEST_UNSUPPORTED("str"size"t r0, [r1, #4]")
SINGLE_STORE("b")
SINGLE_STORE("h")
SINGLE_STORE("")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf801000d) " @ strb r0, [r1, r13]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf821000d) " @ strh r0, [r1, r13]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf841000d) " @ str r0, [r1, r13]")
+
TEST("str sp, [sp]")
TEST_UNSUPPORTED(__inst_thumb32(0xf8cfe000) " @ str r14, [pc]")
TEST_UNSUPPORTED(__inst_thumb32(0xf8cef000) " @ str pc, [r14]")
diff --git a/arch/arm/probes/uprobes/Makefile b/arch/arm/probes/uprobes/Makefile
new file mode 100644
index 000000000000..e1dc3d0f6d5a
--- /dev/null
+++ b/arch/arm/probes/uprobes/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_UPROBES) += core.o actions-arm.o
diff --git a/arch/arm/kernel/uprobes-arm.c b/arch/arm/probes/uprobes/actions-arm.c
index d3b655ff17da..76eb44972ebe 100644
--- a/arch/arm/kernel/uprobes-arm.c
+++ b/arch/arm/probes/uprobes/actions-arm.c
@@ -13,9 +13,9 @@
#include <linux/uprobes.h>
#include <linux/module.h>
-#include "probes.h"
-#include "probes-arm.h"
-#include "uprobes.h"
+#include "../decode.h"
+#include "../decode-arm.h"
+#include "core.h"
static int uprobes_substitute_pc(unsigned long *pinsn, u32 oregs)
{
@@ -195,8 +195,6 @@ uprobe_decode_ldmstm(probes_opcode_t insn,
}
const union decode_action uprobes_probes_actions[] = {
- [PROBES_EMULATE_NONE] = {.handler = probes_simulate_nop},
- [PROBES_SIMULATE_NOP] = {.handler = probes_simulate_nop},
[PROBES_PRELOAD_IMM] = {.handler = probes_simulate_nop},
[PROBES_PRELOAD_REG] = {.handler = probes_simulate_nop},
[PROBES_BRANCH_IMM] = {.handler = simulate_blx1},
diff --git a/arch/arm/kernel/uprobes.c b/arch/arm/probes/uprobes/core.c
index 56adf9c1fde0..d1329f1ba4e4 100644
--- a/arch/arm/kernel/uprobes.c
+++ b/arch/arm/probes/uprobes/core.c
@@ -17,9 +17,9 @@
#include <asm/opcodes.h>
#include <asm/traps.h>
-#include "probes.h"
-#include "probes-arm.h"
-#include "uprobes.h"
+#include "../decode.h"
+#include "../decode-arm.h"
+#include "core.h"
#define UPROBE_TRAP_NR UINT_MAX
@@ -88,7 +88,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
auprobe->ixol[1] = __opcode_to_mem_arm(UPROBE_SS_ARM_INSN);
ret = arm_probes_decode_insn(insn, &auprobe->asi, false,
- uprobes_probes_actions);
+ uprobes_probes_actions, NULL);
switch (ret) {
case INSN_REJECTED:
return -EINVAL;
diff --git a/arch/arm/kernel/uprobes.h b/arch/arm/probes/uprobes/core.h
index 1d0c12dfbd03..1d0c12dfbd03 100644
--- a/arch/arm/kernel/uprobes.h
+++ b/arch/arm/probes/uprobes/core.h
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index c7ca936ebd99..263a2044c65b 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -29,10 +29,10 @@
struct start_info _xen_start_info;
struct start_info *xen_start_info = &_xen_start_info;
-EXPORT_SYMBOL_GPL(xen_start_info);
+EXPORT_SYMBOL(xen_start_info);
enum xen_domain_type xen_domain_type = XEN_NATIVE;
-EXPORT_SYMBOL_GPL(xen_domain_type);
+EXPORT_SYMBOL(xen_domain_type);
struct shared_info xen_dummy_shared_info;
struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 351b24a979d4..793551d15f1d 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -149,7 +149,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
struct dma_map_ops *xen_dma_ops;
-EXPORT_SYMBOL_GPL(xen_dma_ops);
+EXPORT_SYMBOL(xen_dma_ops);
static struct dma_map_ops xen_swiotlb_dma_ops = {
.mapping_error = xen_swiotlb_dma_mapping_error,
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 054857776254..cb7a14c5cd69 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -102,7 +102,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int i;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b1f9a20a3677..676454a65af8 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -39,6 +39,7 @@ config ARM64
select HARDIRQS_SW_RESEND
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
@@ -349,7 +350,6 @@ config ARM64_VA_BITS_42
config ARM64_VA_BITS_48
bool "48-bit"
- depends on !ARM_SMMU
endchoice
@@ -540,6 +540,21 @@ config CP15_BARRIER_EMULATION
If unsure, say Y
+config SETEND_EMULATION
+ bool "Emulate SETEND instruction"
+ help
+ The SETEND instruction alters the data-endianness of the
+ AArch32 EL0, and is deprecated in ARMv8.
+
+ Say Y here to enable software emulation of the instruction
+ for AArch32 userspace code.
+
+ Note: All the cpus on the system must have mixed endian support at EL0
+ for this feature to be enabled. If a new CPU - which doesn't support mixed
+ endian - is hotplugged in after this feature has been enabled, there could
+ be unexpected results in the applications.
+
+ If unsure, say Y
endif
endmenu
@@ -627,9 +642,6 @@ source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
def_bool y
-config ARM64_CPU_SUSPEND
- def_bool PM_SLEEP
-
endmenu
menu "CPU Power Management"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 5fdd6dce8061..4a8741073c90 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -66,4 +66,27 @@ config DEBUG_SET_MODULE_RONX
against certain classes of kernel exploits.
If in doubt, say "N".
+config DEBUG_RODATA
+ bool "Make kernel text and rodata read-only"
+ help
+ If this is set, kernel text and rodata will be made read-only. This
+ is to help catch accidental or malicious attempts to change the
+ kernel's executable code. Additionally splits rodata from kernel
+ text so it can be made explicitly non-executable.
+
+ If in doubt, say Y
+
+config DEBUG_ALIGN_RODATA
+ depends on DEBUG_RODATA && !ARM64_64K_PAGES
+ bool "Align linker sections up to SECTION_SIZE"
+ help
+ If this option is enabled, sections that may potentially be marked as
+ read only or non-executable will be aligned up to the section size of
+ the kernel. This prevents sections from being split into pages and
+ avoids a potential TLB penalty. The downside is an increase in
+ alignment and potentially wasted space. Turn on this option if
+ performance is more important than memory pressure.
+
+ If in doubt, say N
+
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1c43cec971b5..69ceedc982a5 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,8 +15,6 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
@@ -50,7 +48,6 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
-libs-y += $(LIBGCC)
libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
# Default target when executing plain make
@@ -85,6 +82,7 @@ vdso_install:
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+ $(Q)$(MAKE) $(clean)=$(boot)/dts
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 3b8d427c3985..c62b0f4d9ef6 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -3,6 +3,4 @@ dts-dirs += apm
dts-dirs += arm
dts-dirs += cavium
-always := $(dtb-y)
subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index cb3073e4e7a8..d429129ecb3d 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -22,7 +22,7 @@
};
chosen {
- stdout-path = &soc_uart0;
+ stdout-path = "serial0:115200n8";
};
psci {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 5376d908eabe..66b6cacc3251 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -45,6 +45,8 @@ CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM64_CPUIDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm64/include/asm/bitrev.h b/arch/arm64/include/asm/bitrev.h
new file mode 100644
index 000000000000..a5a0c3660137
--- /dev/null
+++ b/arch/arm64/include/asm/bitrev.h
@@ -0,0 +1,19 @@
+#ifndef __ASM_BITREV_H
+#define __ASM_BITREV_H
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ __asm__ ("rbit %w0, %w1" : "=r" (x) : "r" (x));
+ return x;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ return __arch_bitrev32((u32)x) >> 16;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ return __arch_bitrev32((u32)x) >> 24;
+}
+
+#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 7ae31a2cc6c0..67d309cc3b6b 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -152,4 +152,9 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
index 4c631a0a3609..da2fc9e3cedd 100644
--- a/arch/arm64/include/asm/cachetype.h
+++ b/arch/arm64/include/asm/cachetype.h
@@ -39,24 +39,41 @@
extern unsigned long __icache_flags;
+/*
+ * NumSets, bits[27:13] - (Number of sets in cache) - 1
+ * Associativity, bits[12:3] - (Associativity of cache) - 1
+ * LineSize, bits[2:0] - (Log2(Number of words in cache line)) - 2
+ */
+#define CCSIDR_EL1_WRITE_THROUGH BIT(31)
+#define CCSIDR_EL1_WRITE_BACK BIT(30)
+#define CCSIDR_EL1_READ_ALLOCATE BIT(29)
+#define CCSIDR_EL1_WRITE_ALLOCATE BIT(28)
#define CCSIDR_EL1_LINESIZE_MASK 0x7
#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
-
+#define CCSIDR_EL1_ASSOCIATIVITY_SHIFT 3
+#define CCSIDR_EL1_ASSOCIATIVITY_MASK 0x3ff
+#define CCSIDR_EL1_ASSOCIATIVITY(x) \
+ (((x) >> CCSIDR_EL1_ASSOCIATIVITY_SHIFT) & CCSIDR_EL1_ASSOCIATIVITY_MASK)
#define CCSIDR_EL1_NUMSETS_SHIFT 13
-#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT)
+#define CCSIDR_EL1_NUMSETS_MASK 0x7fff
#define CCSIDR_EL1_NUMSETS(x) \
- (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT)
+ (((x) >> CCSIDR_EL1_NUMSETS_SHIFT) & CCSIDR_EL1_NUMSETS_MASK)
+
+#define CACHE_LINESIZE(x) (16 << CCSIDR_EL1_LINESIZE(x))
+#define CACHE_NUMSETS(x) (CCSIDR_EL1_NUMSETS(x) + 1)
+#define CACHE_ASSOCIATIVITY(x) (CCSIDR_EL1_ASSOCIATIVITY(x) + 1)
-extern u64 __attribute_const__ icache_get_ccsidr(void);
+extern u64 __attribute_const__ cache_get_ccsidr(u64 csselr);
+/* Helpers for Level 1 Instruction cache csselr = 1L */
static inline int icache_get_linesize(void)
{
- return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr());
+ return CACHE_LINESIZE(cache_get_ccsidr(1L));
}
static inline int icache_get_numsets(void)
{
- return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr());
+ return CACHE_NUMSETS(cache_get_ccsidr(1L));
}
/*
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 3fb053fa6e98..7fbed6919b54 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -161,7 +161,6 @@ typedef struct compat_siginfo {
int si_code;
union {
- /* The padding is the same size as AArch64. */
int _pad[128/sizeof(int) - 3];
/* kill() */
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 6f8e2ef9094a..da301ee7395c 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -28,8 +28,6 @@ struct device_node;
* enable-method property.
* @cpu_init: Reads any data necessary for a specific enable-method from the
* devicetree, for a given cpu node and proposed logical id.
- * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
- * devicetree, for a given cpu node and proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
@@ -42,6 +40,8 @@ struct device_node;
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
+ * devicetree, for a given cpu node and proposed logical id.
* @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
* to wrong parameters or error conditions. Called from the
* CPU being suspended. Must be called with IRQs disabled.
@@ -49,7 +49,6 @@ struct device_node;
struct cpu_operations {
const char *name;
int (*cpu_init)(struct device_node *, unsigned int);
- int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
@@ -58,7 +57,8 @@ struct cpu_operations {
void (*cpu_die)(unsigned int cpu);
int (*cpu_kill)(unsigned int cpu);
#endif
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_IDLE
+ int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_suspend)(unsigned long);
#endif
};
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 07547ccc1f2b..b6c16d5f622f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -52,6 +52,8 @@ static inline void cpus_set_cap(unsigned int num)
}
void check_local_cpu_errata(void);
+bool cpu_supports_mixed_endian_el0(void);
+bool system_supports_mixed_endian_el0(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index b52a9932e2b1..0710654631e7 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -3,11 +3,17 @@
#ifdef CONFIG_CPU_IDLE
extern int cpu_init_idle(unsigned int cpu);
+extern int cpu_suspend(unsigned long arg);
#else
static inline int cpu_init_idle(unsigned int cpu)
{
return -EOPNOTSUPP;
}
+
+static inline int cpu_suspend(unsigned long arg)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 8adb986a3086..a84ec605bed8 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -72,6 +72,18 @@
#define APM_CPU_PART_POTENZA 0x000
+#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
+#define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
+#define ID_AA64MMFR0_BIGENDEL0(mmfr0) \
+ (((mmfr0) & ID_AA64MMFR0_BIGENDEL0_MASK) >> ID_AA64MMFR0_BIGENDEL0_SHIFT)
+#define ID_AA64MMFR0_BIGEND_SHIFT 8
+#define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT)
+#define ID_AA64MMFR0_BIGEND(mmfr0) \
+ (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
+
+#define SCTLR_EL1_CP15BEN (0x1 << 5)
+#define SCTLR_EL1_SED (0x1 << 8)
+
#ifndef __ASSEMBLY__
/*
@@ -104,6 +116,11 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void)
return read_cpuid(CTR_EL0);
}
+static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
+{
+ return (ID_AA64MMFR0_BIGEND(mmfr0) == 0x1) ||
+ (ID_AA64MMFR0_BIGENDEL0(mmfr0) == 0x1);
+}
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 9ce3e680ae1c..6932bb57dba0 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -28,8 +28,6 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
-extern struct dma_map_ops coherent_swiotlb_dma_ops;
-extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
@@ -47,23 +45,18 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev);
}
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
-{
- dev->archdata.dma_ops = ops;
-}
-
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
dev->archdata.dma_coherent = coherent;
- if (coherent)
- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
}
#define arch_setup_dma_ops arch_setup_dma_ops
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
{
+ if (!dev)
+ return false;
return dev->archdata.dma_coherent;
}
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index a34fd3b12e2b..ef572206f1c3 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -6,29 +6,33 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
-extern void efi_idmap_init(void);
#else
#define efi_init()
-#define efi_idmap_init()
#endif
#define efi_call_virt(f, ...) \
({ \
- efi_##f##_t *__f = efi.systab->runtime->f; \
+ efi_##f##_t *__f; \
efi_status_t __s; \
\
kernel_neon_begin(); \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
__s = __f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
kernel_neon_end(); \
__s; \
})
#define __efi_call_virt(f, ...) \
({ \
- efi_##f##_t *__f = efi.systab->runtime->f; \
+ efi_##f##_t *__f; \
\
kernel_neon_begin(); \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
__f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
kernel_neon_end(); \
})
@@ -44,4 +48,22 @@ extern void efi_idmap_init(void);
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+#define EFI_ALLOC_ALIGN SZ_64K
+
+/*
+ * On ARM systems, virtually remapped UEFI runtime services are set up in two
+ * distinct stages:
+ * - The stub retrieves the final version of the memory map from UEFI, populates
+ * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
+ * service to communicate the new mapping to the firmware (Note that the new
+ * mapping is not live at this time)
+ * - During an early initcall(), the EFI system table is permanently remapped
+ * and the virtual remapping of the UEFI Runtime Services regions is loaded
+ * into a private set of page tables. If this all succeeds, the Runtime
+ * Services are enabled and the EFI_RUNTIME_SERVICES bit set.
+ */
+
+void efi_virtmap_load(void);
+void efi_virtmap_unload(void);
+
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 72674f4c3871..92bbae381598 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,40 +18,90 @@
#ifndef __ASM_ESR_H
#define __ASM_ESR_H
-#define ESR_EL1_WRITE (1 << 6)
-#define ESR_EL1_CM (1 << 8)
-#define ESR_EL1_IL (1 << 25)
+#define ESR_ELx_EC_UNKNOWN (0x00)
+#define ESR_ELx_EC_WFx (0x01)
+/* Unallocated EC: 0x02 */
+#define ESR_ELx_EC_CP15_32 (0x03)
+#define ESR_ELx_EC_CP15_64 (0x04)
+#define ESR_ELx_EC_CP14_MR (0x05)
+#define ESR_ELx_EC_CP14_LS (0x06)
+#define ESR_ELx_EC_FP_ASIMD (0x07)
+#define ESR_ELx_EC_CP10_ID (0x08)
+/* Unallocated EC: 0x09 - 0x0B */
+#define ESR_ELx_EC_CP14_64 (0x0C)
+/* Unallocated EC: 0x0d */
+#define ESR_ELx_EC_ILL (0x0E)
+/* Unallocated EC: 0x0F - 0x10 */
+#define ESR_ELx_EC_SVC32 (0x11)
+#define ESR_ELx_EC_HVC32 (0x12)
+#define ESR_ELx_EC_SMC32 (0x13)
+/* Unallocated EC: 0x14 */
+#define ESR_ELx_EC_SVC64 (0x15)
+#define ESR_ELx_EC_HVC64 (0x16)
+#define ESR_ELx_EC_SMC64 (0x17)
+#define ESR_ELx_EC_SYS64 (0x18)
+/* Unallocated EC: 0x19 - 0x1E */
+#define ESR_ELx_EC_IMP_DEF (0x1f)
+#define ESR_ELx_EC_IABT_LOW (0x20)
+#define ESR_ELx_EC_IABT_CUR (0x21)
+#define ESR_ELx_EC_PC_ALIGN (0x22)
+/* Unallocated EC: 0x23 */
+#define ESR_ELx_EC_DABT_LOW (0x24)
+#define ESR_ELx_EC_DABT_CUR (0x25)
+#define ESR_ELx_EC_SP_ALIGN (0x26)
+/* Unallocated EC: 0x27 */
+#define ESR_ELx_EC_FP_EXC32 (0x28)
+/* Unallocated EC: 0x29 - 0x2B */
+#define ESR_ELx_EC_FP_EXC64 (0x2C)
+/* Unallocated EC: 0x2D - 0x2E */
+#define ESR_ELx_EC_SERROR (0x2F)
+#define ESR_ELx_EC_BREAKPT_LOW (0x30)
+#define ESR_ELx_EC_BREAKPT_CUR (0x31)
+#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
+#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
+#define ESR_ELx_EC_WATCHPT_LOW (0x34)
+#define ESR_ELx_EC_WATCHPT_CUR (0x35)
+/* Unallocated EC: 0x36 - 0x37 */
+#define ESR_ELx_EC_BKPT32 (0x38)
+/* Unallocated EC: 0x39 */
+#define ESR_ELx_EC_VECTOR32 (0x3A)
+/* Unallocted EC: 0x3B */
+#define ESR_ELx_EC_BRK64 (0x3C)
+/* Unallocated EC: 0x3D - 0x3F */
+#define ESR_ELx_EC_MAX (0x3F)
-#define ESR_EL1_EC_SHIFT (26)
-#define ESR_EL1_EC_UNKNOWN (0x00)
-#define ESR_EL1_EC_WFI (0x01)
-#define ESR_EL1_EC_CP15_32 (0x03)
-#define ESR_EL1_EC_CP15_64 (0x04)
-#define ESR_EL1_EC_CP14_MR (0x05)
-#define ESR_EL1_EC_CP14_LS (0x06)
-#define ESR_EL1_EC_FP_ASIMD (0x07)
-#define ESR_EL1_EC_CP10_ID (0x08)
-#define ESR_EL1_EC_CP14_64 (0x0C)
-#define ESR_EL1_EC_ILL_ISS (0x0E)
-#define ESR_EL1_EC_SVC32 (0x11)
-#define ESR_EL1_EC_SVC64 (0x15)
-#define ESR_EL1_EC_SYS64 (0x18)
-#define ESR_EL1_EC_IABT_EL0 (0x20)
-#define ESR_EL1_EC_IABT_EL1 (0x21)
-#define ESR_EL1_EC_PC_ALIGN (0x22)
-#define ESR_EL1_EC_DABT_EL0 (0x24)
-#define ESR_EL1_EC_DABT_EL1 (0x25)
-#define ESR_EL1_EC_SP_ALIGN (0x26)
-#define ESR_EL1_EC_FP_EXC32 (0x28)
-#define ESR_EL1_EC_FP_EXC64 (0x2C)
-#define ESR_EL1_EC_SERROR (0x2F)
-#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
-#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
-#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
-#define ESR_EL1_EC_SOFTSTP_EL1 (0x33)
-#define ESR_EL1_EC_WATCHPT_EL0 (0x34)
-#define ESR_EL1_EC_WATCHPT_EL1 (0x35)
-#define ESR_EL1_EC_BKPT32 (0x38)
-#define ESR_EL1_EC_BRK64 (0x3C)
+#define ESR_ELx_EC_SHIFT (26)
+#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
+
+#define ESR_ELx_IL (UL(1) << 25)
+#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
+#define ESR_ELx_ISV (UL(1) << 24)
+#define ESR_ELx_SAS_SHIFT (22)
+#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
+#define ESR_ELx_SSE (UL(1) << 21)
+#define ESR_ELx_SRT_SHIFT (16)
+#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
+#define ESR_ELx_SF (UL(1) << 15)
+#define ESR_ELx_AR (UL(1) << 14)
+#define ESR_ELx_EA (UL(1) << 9)
+#define ESR_ELx_CM (UL(1) << 8)
+#define ESR_ELx_S1PTW (UL(1) << 7)
+#define ESR_ELx_WNR (UL(1) << 6)
+#define ESR_ELx_FSC (0x3F)
+#define ESR_ELx_FSC_TYPE (0x3C)
+#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_FAULT (0x04)
+#define ESR_ELx_FSC_PERM (0x0C)
+#define ESR_ELx_CV (UL(1) << 24)
+#define ESR_ELx_COND_SHIFT (20)
+#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
+#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
+#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+
+const char *esr_get_class_string(u32 esr);
+#endif /* __ASSEMBLY */
#endif /* __ASM_ESR_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 9ef6eca905ca..defa0ff98250 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -49,6 +49,7 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ FIX_TEXT_POKE0,
__end_of_fixed_addresses
};
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 007618b8188c..a2daf1293028 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -76,7 +76,6 @@
fpsimd_restore_fpcr x\tmpnr, \state
.endm
-.altmacro
.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2
mrs x\tmpnr1, fpsr
str w\numnr, [\state, #8]
@@ -86,11 +85,22 @@
add \state, \state, x\numnr, lsl #4
sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1
br x\tmpnr1
- .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
- .irp qb, %(qa + 1)
- stp q\qa, q\qb, [\state, # -16 * \qa - 16]
- .endr
- .endr
+ stp q30, q31, [\state, #-16 * 30 - 16]
+ stp q28, q29, [\state, #-16 * 28 - 16]
+ stp q26, q27, [\state, #-16 * 26 - 16]
+ stp q24, q25, [\state, #-16 * 24 - 16]
+ stp q22, q23, [\state, #-16 * 22 - 16]
+ stp q20, q21, [\state, #-16 * 20 - 16]
+ stp q18, q19, [\state, #-16 * 18 - 16]
+ stp q16, q17, [\state, #-16 * 16 - 16]
+ stp q14, q15, [\state, #-16 * 14 - 16]
+ stp q12, q13, [\state, #-16 * 12 - 16]
+ stp q10, q11, [\state, #-16 * 10 - 16]
+ stp q8, q9, [\state, #-16 * 8 - 16]
+ stp q6, q7, [\state, #-16 * 6 - 16]
+ stp q4, q5, [\state, #-16 * 4 - 16]
+ stp q2, q3, [\state, #-16 * 2 - 16]
+ stp q0, q1, [\state, #-16 * 0 - 16]
0:
.endm
@@ -103,10 +113,21 @@
add \state, \state, x\tmpnr2, lsl #4
sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1
br x\tmpnr1
- .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
- .irp qb, %(qa + 1)
- ldp q\qa, q\qb, [\state, # -16 * \qa - 16]
- .endr
- .endr
+ ldp q30, q31, [\state, #-16 * 30 - 16]
+ ldp q28, q29, [\state, #-16 * 28 - 16]
+ ldp q26, q27, [\state, #-16 * 26 - 16]
+ ldp q24, q25, [\state, #-16 * 24 - 16]
+ ldp q22, q23, [\state, #-16 * 22 - 16]
+ ldp q20, q21, [\state, #-16 * 20 - 16]
+ ldp q18, q19, [\state, #-16 * 18 - 16]
+ ldp q16, q17, [\state, #-16 * 16 - 16]
+ ldp q14, q15, [\state, #-16 * 14 - 16]
+ ldp q12, q13, [\state, #-16 * 12 - 16]
+ ldp q10, q11, [\state, #-16 * 10 - 16]
+ ldp q8, q9, [\state, #-16 * 8 - 16]
+ ldp q6, q7, [\state, #-16 * 6 - 16]
+ ldp q4, q5, [\state, #-16 * 4 - 16]
+ ldp q2, q3, [\state, #-16 * 2 - 16]
+ ldp q0, q1, [\state, #-16 * 0 - 16]
0:
.endm
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index e8a3268a891c..6aae421f4d73 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 5
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 949c406d4df4..540f7c0aea82 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -26,6 +26,7 @@
#include <asm/byteorder.h>
#include <asm/barrier.h>
+#include <asm/memory.h>
#include <asm/pgtable.h>
#include <asm/early_ioremap.h>
#include <asm/alternative.h>
@@ -145,8 +146,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* I/O port access primitives.
*/
#define arch_has_dev_port() (1)
-#define IO_SPACE_LIMIT (SZ_32M - 1)
-#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
+#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
+#define PCI_IOBASE ((void __iomem *)PCI_IO_START)
/*
* String version of I/O memory access operations.
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 8afb863f5a9e..94674eb7e7bb 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -18,6 +18,7 @@
#ifndef __ARM64_KVM_ARM_H__
#define __ARM64_KVM_ARM_H__
+#include <asm/esr.h>
#include <asm/memory.h>
#include <asm/types.h>
@@ -184,77 +185,11 @@
#define MDCR_EL2_TPMCR (1 << 5)
#define MDCR_EL2_HPMN_MASK (0x1F)
-/* Exception Syndrome Register (ESR) bits */
-#define ESR_EL2_EC_SHIFT (26)
-#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
-#define ESR_EL2_IL (UL(1) << 25)
-#define ESR_EL2_ISS (ESR_EL2_IL - 1)
-#define ESR_EL2_ISV_SHIFT (24)
-#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
-#define ESR_EL2_SAS_SHIFT (22)
-#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
-#define ESR_EL2_SSE (1 << 21)
-#define ESR_EL2_SRT_SHIFT (16)
-#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
-#define ESR_EL2_SF (1 << 15)
-#define ESR_EL2_AR (1 << 14)
-#define ESR_EL2_EA (1 << 9)
-#define ESR_EL2_CM (1 << 8)
-#define ESR_EL2_S1PTW (1 << 7)
-#define ESR_EL2_WNR (1 << 6)
-#define ESR_EL2_FSC (0x3f)
-#define ESR_EL2_FSC_TYPE (0x3c)
-
-#define ESR_EL2_CV_SHIFT (24)
-#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
-#define ESR_EL2_COND_SHIFT (20)
-#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
-
-
-#define FSC_FAULT (0x04)
-#define FSC_PERM (0x0c)
+/* For compatibility with fault code shared with 32-bit */
+#define FSC_FAULT ESR_ELx_FSC_FAULT
+#define FSC_PERM ESR_ELx_FSC_PERM
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
-#define ESR_EL2_EC_UNKNOWN (0x00)
-#define ESR_EL2_EC_WFI (0x01)
-#define ESR_EL2_EC_CP15_32 (0x03)
-#define ESR_EL2_EC_CP15_64 (0x04)
-#define ESR_EL2_EC_CP14_MR (0x05)
-#define ESR_EL2_EC_CP14_LS (0x06)
-#define ESR_EL2_EC_FP_ASIMD (0x07)
-#define ESR_EL2_EC_CP10_ID (0x08)
-#define ESR_EL2_EC_CP14_64 (0x0C)
-#define ESR_EL2_EC_ILL_ISS (0x0E)
-#define ESR_EL2_EC_SVC32 (0x11)
-#define ESR_EL2_EC_HVC32 (0x12)
-#define ESR_EL2_EC_SMC32 (0x13)
-#define ESR_EL2_EC_SVC64 (0x15)
-#define ESR_EL2_EC_HVC64 (0x16)
-#define ESR_EL2_EC_SMC64 (0x17)
-#define ESR_EL2_EC_SYS64 (0x18)
-#define ESR_EL2_EC_IABT (0x20)
-#define ESR_EL2_EC_IABT_HYP (0x21)
-#define ESR_EL2_EC_PC_ALIGN (0x22)
-#define ESR_EL2_EC_DABT (0x24)
-#define ESR_EL2_EC_DABT_HYP (0x25)
-#define ESR_EL2_EC_SP_ALIGN (0x26)
-#define ESR_EL2_EC_FP_EXC32 (0x28)
-#define ESR_EL2_EC_FP_EXC64 (0x2C)
-#define ESR_EL2_EC_SERROR (0x2F)
-#define ESR_EL2_EC_BREAKPT (0x30)
-#define ESR_EL2_EC_BREAKPT_HYP (0x31)
-#define ESR_EL2_EC_SOFTSTP (0x32)
-#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
-#define ESR_EL2_EC_WATCHPT (0x34)
-#define ESR_EL2_EC_WATCHPT_HYP (0x35)
-#define ESR_EL2_EC_BKPT32 (0x38)
-#define ESR_EL2_EC_VECTOR32 (0x3A)
-#define ESR_EL2_EC_BRK64 (0x3C)
-
-#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
-
-#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
-
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 483842180f8f..4f7310fa77f0 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -126,6 +126,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 8127e45e2637..17e92f05b1fe 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -23,10 +23,13 @@
#define __ARM64_KVM_EMULATE_H__
#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
+
+#include <asm/esr.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
+#include <asm/cputype.h>
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
@@ -41,6 +44,18 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+ vcpu->arch.hcr_el2 &= ~HCR_RW;
+}
+
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hcr_el2;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+ vcpu->arch.hcr_el2 = hcr;
}
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
@@ -126,70 +141,75 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
}
+static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
+}
+
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
}
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
}
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{
- return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
+ return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
}
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{
- return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
+ return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
}
/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
}
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
+ return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
}
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
+ return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
}
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
-static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
- return vcpu_sys_reg(vcpu, MPIDR_EL1);
+ return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0b7dfdb931df..8ac3c70fe3c6 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -59,6 +59,9 @@ struct kvm_arch {
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* The maximum number of vCPUs depends on the used GIC model */
+ int max_vcpus;
+
/* Interrupt controller */
struct vgic_dist vgic;
@@ -116,9 +119,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
/* Don't run the guest */
bool pause;
@@ -162,6 +162,7 @@ struct kvm_vm_stat {
};
struct kvm_vcpu_stat {
+ u32 halt_successful_poll;
u32 halt_wakeup;
};
@@ -199,6 +200,7 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
+void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
@@ -206,6 +208,8 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int kvm_perf_init(void);
int kvm_perf_teardown(void);
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index fc2f689c0694..9f52beb7cb13 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -40,6 +40,7 @@ struct kvm_exit_mmio {
u8 data[8];
u32 len;
bool is_write;
+ void *private;
};
static inline void kvm_prepare_mmio(struct kvm_run *run,
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..6458b5373142 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -118,6 +118,27 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
pmd_val(*pmd) |= PMD_S2_RDWR;
}
+static inline void kvm_set_s2pte_readonly(pte_t *pte)
+{
+ pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
+}
+
+static inline bool kvm_s2pte_readonly(pte_t *pte)
+{
+ return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
+}
+
+static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
+{
+ pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
+}
+
+static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
+{
+ return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
+}
+
+
#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
@@ -243,24 +264,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size,
- bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size,
+ bool ipa_uncached)
{
+ void *va = page_address(pfn_to_page(pfn));
+
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc((void *)hva, size);
+ kvm_flush_dcache_to_poc(va, size);
if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range(hva, hva + size);
+ flush_icache_range((unsigned long)va,
+ (unsigned long)va + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+ struct page *page = pud_page(pud);
+ kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+}
+
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 6486b2bfd562..f800d45ea226 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -33,6 +33,12 @@
#define UL(x) _AC(x, UL)
/*
+ * Size of the PCI I/O space. This must remain a power of two so that
+ * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
+ */
+#define PCI_IO_SIZE SZ_16M
+
+/*
* PAGE_OFFSET - the virtual address of the start of the kernel image (top
* (VA_BITS - 1))
* VA_BITS - the maximum number of bits for virtual addresses.
@@ -45,7 +51,9 @@
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
-#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE)
+#define PCI_IO_END (MODULES_VADDR - SZ_2M)
+#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
+#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index c2f006c48bdb..3d311761e3c2 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -31,7 +31,8 @@ extern void paging_init(void);
extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
-/* create an identity mapping for memory (or io if map_io is true) */
-extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
+extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot);
#endif
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 88174e0bfafe..5f930cc9ea83 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -119,6 +119,7 @@
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 210d632aa5ad..16449c535e50 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,7 +25,6 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
#define PTE_WRITE (_AT(pteval_t, 1) << 57)
@@ -46,7 +45,7 @@
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
extern void __pte_error(const char *file, int line, unsigned long val);
@@ -264,6 +263,11 @@ static inline pmd_t pte_pmd(pte_t pte)
return __pmd(pte_val(pte));
}
+static inline pgprot_t mk_sect_prot(pgprot_t prot)
+{
+ return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
+}
+
/*
* THP definitions.
*/
@@ -337,9 +341,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#ifdef CONFIG_ARM64_64K_PAGES
#define pud_sect(pud) (0)
+#define pud_table(pud) (1)
#else
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_SECT)
+#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
+ PUD_TYPE_TABLE)
#endif
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
@@ -469,13 +476,12 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
* bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-8: swap type
- * bits 9-57: swap offset
+ * bits 2-7: swap type
+ * bits 8-57: swap offset
*/
-#define __SWP_TYPE_SHIFT 3
+#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 6
-#define __SWP_OFFSET_BITS 49
+#define __SWP_OFFSET_BITS 50
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
@@ -493,18 +499,6 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
-/*
- * Encode and decode a file entry:
- * bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-57: file offset / PAGE_SIZE
- */
-#define pte_file(pte) (pte_val(pte) & PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 3)
-#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
-
-#define PTE_FILE_MAX_BITS 55
-
extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 41ed9e13795e..d6dd9fdbc3be 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -58,6 +58,13 @@
#define COMPAT_PSR_Z_BIT 0x40000000
#define COMPAT_PSR_N_BIT 0x80000000
#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
+#else
+#define COMPAT_PSR_ENDSTATE 0
+#endif
+
/*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
* process is located in memory.
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 456d67c1f0fa..003802f58963 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -23,6 +23,4 @@ struct sleep_save_sp {
extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
-extern int cpu_suspend(unsigned long);
-
#endif
diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h
deleted file mode 100644
index 48fe7c600e98..000000000000
--- a/arch/arm64/include/asm/syscalls.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_SYSCALLS_H
-#define __ASM_SYSCALLS_H
-
-#include <linux/linkage.h>
-#include <linux/compiler.h>
-#include <linux/signal.h>
-
-/*
- * System call wrappers implemented in kernel/entry.S.
- */
-asmlinkage long sys_rt_sigreturn_wrapper(void);
-
-#include <asm-generic/syscalls.h>
-
-#endif /* __ASM_SYSCALLS_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 459bf8e53208..702e1e6a0d80 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -48,7 +48,6 @@ struct thread_info {
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
- struct restart_block restart_block;
int preempt_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
};
@@ -60,9 +59,6 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index b780c6c76eec..3bc498c250dc 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,10 +44,13 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 387
+#define __NR_compat_syscalls 388
#endif
#define __ARCH_WANT_SYS_CLONE
+
+#ifndef __COMPAT_SYSCALL_NR
#include <uapi/asm/unistd.h>
+#endif
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 8893cebcea5b..27224426e0bf 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
__SYSCALL(__NR_memfd_create, sys_memfd_create)
#define __NR_bpf 386
__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 387
+__SYSCALL(__NR_execveat, compat_sys_execveat)
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 942376d37d22..825b0fe51c2b 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -18,4 +18,5 @@ header-y += siginfo.h
header-y += signal.h
header-y += stat.h
header-y += statfs.h
+header-y += ucontext.h
header-y += unistd.h
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 8e38878c87c6..3ef77a466018 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -78,6 +78,13 @@ struct kvm_regs {
#define KVM_VGIC_V2_DIST_SIZE 0x1000
#define KVM_VGIC_V2_CPU_SIZE 0x2000
+/* Supported VGICv3 address types */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+
+#define KVM_VGIC_V3_DIST_SIZE SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
@@ -161,6 +168,8 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
+#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
+#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/uapi/asm/ucontext.h
index 42e04c877428..791de8e89e35 100644
--- a/arch/arm64/include/asm/ucontext.h
+++ b/arch/arm64/include/uapi/asm/ucontext.h
@@ -13,8 +13,10 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ASM_UCONTEXT_H
-#define __ASM_UCONTEXT_H
+#ifndef _UAPI__ASM_UCONTEXT_H
+#define _UAPI__ASM_UCONTEXT_H
+
+#include <linux/types.h>
struct ucontext {
unsigned long uc_flags;
@@ -27,4 +29,4 @@ struct ucontext {
struct sigcontext uc_mcontext;
};
-#endif /* __ASM_UCONTEXT_H */
+#endif /* _UAPI__ASM_UCONTEXT_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index eaa77ed7766a..bef04afd6031 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -16,10 +16,10 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
- cpuinfo.o cpu_errata.o alternative.o
+ cpuinfo.o cpu_errata.o alternative.o cacheinfo.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o \
+ sys_compat.o entry32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
@@ -27,7 +27,7 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
-arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index c363671d7509..7922c2e710ca 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -19,6 +19,7 @@
#include <asm/system_misc.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
+#include <asm/cpufeature.h>
#define CREATE_TRACE_POINTS
#include "trace-events-emulation.h"
@@ -85,6 +86,57 @@ static void remove_emulation_hooks(struct insn_emulation_ops *ops)
pr_notice("Removed %s emulation handler\n", ops->name);
}
+static void enable_insn_hw_mode(void *data)
+{
+ struct insn_emulation *insn = (struct insn_emulation *)data;
+ if (insn->ops->set_hw_mode)
+ insn->ops->set_hw_mode(true);
+}
+
+static void disable_insn_hw_mode(void *data)
+{
+ struct insn_emulation *insn = (struct insn_emulation *)data;
+ if (insn->ops->set_hw_mode)
+ insn->ops->set_hw_mode(false);
+}
+
+/* Run set_hw_mode(mode) on all active CPUs */
+static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
+{
+ if (!insn->ops->set_hw_mode)
+ return -EINVAL;
+ if (enable)
+ on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
+ else
+ on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
+ return 0;
+}
+
+/*
+ * Run set_hw_mode for all insns on a starting CPU.
+ * Returns:
+ * 0 - If all the hooks ran successfully.
+ * -EINVAL - At least one hook is not supported by the CPU.
+ */
+static int run_all_insn_set_hw_mode(unsigned long cpu)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct insn_emulation *insn;
+
+ raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+ list_for_each_entry(insn, &insn_emulation, node) {
+ bool enable = (insn->current_mode == INSN_HW);
+ if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
+ pr_warn("CPU[%ld] cannot support the emulation of %s",
+ cpu, insn->ops->name);
+ rc = -EINVAL;
+ }
+ }
+ raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+ return rc;
+}
+
static int update_insn_emulation_mode(struct insn_emulation *insn,
enum insn_emulation_mode prev)
{
@@ -97,10 +149,8 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
remove_emulation_hooks(insn->ops);
break;
case INSN_HW:
- if (insn->ops->set_hw_mode) {
- insn->ops->set_hw_mode(false);
+ if (!run_all_cpu_set_hw_mode(insn, false))
pr_notice("Disabled %s support\n", insn->ops->name);
- }
break;
}
@@ -111,10 +161,9 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
register_emulation_hooks(insn->ops);
break;
case INSN_HW:
- if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true))
+ ret = run_all_cpu_set_hw_mode(insn, true);
+ if (!ret)
pr_notice("Enabled %s support\n", insn->ops->name);
- else
- ret = -EINVAL;
break;
}
@@ -133,6 +182,8 @@ static void register_insn_emulation(struct insn_emulation_ops *ops)
switch (ops->status) {
case INSN_DEPRECATED:
insn->current_mode = INSN_EMULATE;
+ /* Disable the HW mode if it was turned on at early boot time */
+ run_all_cpu_set_hw_mode(insn, false);
insn->max = INSN_HW;
break;
case INSN_OBSOLETE:
@@ -453,8 +504,6 @@ ret:
return 0;
}
-#define SCTLR_EL1_CP15BEN (1 << 5)
-
static inline void config_sctlr_el1(u32 clear, u32 set)
{
u32 val;
@@ -465,48 +514,13 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
asm volatile("msr sctlr_el1, %0" : : "r" (val));
}
-static void enable_cp15_ben(void *info)
-{
- config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
-}
-
-static void disable_cp15_ben(void *info)
-{
- config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
-}
-
-static int cpu_hotplug_notify(struct notifier_block *b,
- unsigned long action, void *hcpu)
-{
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- enable_cp15_ben(NULL);
- return NOTIFY_DONE;
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- disable_cp15_ben(NULL);
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpu_hotplug_notifier = {
- .notifier_call = cpu_hotplug_notify,
-};
-
static int cp15_barrier_set_hw_mode(bool enable)
{
- if (enable) {
- register_cpu_notifier(&cpu_hotplug_notifier);
- on_each_cpu(enable_cp15_ben, NULL, true);
- } else {
- unregister_cpu_notifier(&cpu_hotplug_notifier);
- on_each_cpu(disable_cp15_ben, NULL, true);
- }
-
- return true;
+ if (enable)
+ config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
+ else
+ config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
+ return 0;
}
static struct undef_hook cp15_barrier_hooks[] = {
@@ -534,6 +548,93 @@ static struct insn_emulation_ops cp15_barrier_ops = {
.set_hw_mode = cp15_barrier_set_hw_mode,
};
+static int setend_set_hw_mode(bool enable)
+{
+ if (!cpu_supports_mixed_endian_el0())
+ return -EINVAL;
+
+ if (enable)
+ config_sctlr_el1(SCTLR_EL1_SED, 0);
+ else
+ config_sctlr_el1(0, SCTLR_EL1_SED);
+ return 0;
+}
+
+static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
+{
+ char *insn;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ if (big_endian) {
+ insn = "setend be";
+ regs->pstate |= COMPAT_PSR_E_BIT;
+ } else {
+ insn = "setend le";
+ regs->pstate &= ~COMPAT_PSR_E_BIT;
+ }
+
+ trace_instruction_emulation(insn, regs->pc);
+ pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ return 0;
+}
+
+static int a32_setend_handler(struct pt_regs *regs, u32 instr)
+{
+ int rc = compat_setend_handler(regs, (instr >> 9) & 1);
+ regs->pc += 4;
+ return rc;
+}
+
+static int t16_setend_handler(struct pt_regs *regs, u32 instr)
+{
+ int rc = compat_setend_handler(regs, (instr >> 3) & 1);
+ regs->pc += 2;
+ return rc;
+}
+
+static struct undef_hook setend_hooks[] = {
+ {
+ .instr_mask = 0xfffffdff,
+ .instr_val = 0xf1010000,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = a32_setend_handler,
+ },
+ {
+ /* Thumb mode */
+ .instr_mask = 0x0000fff7,
+ .instr_val = 0x0000b650,
+ .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
+ .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
+ .fn = t16_setend_handler,
+ },
+ {}
+};
+
+static struct insn_emulation_ops setend_ops = {
+ .name = "setend",
+ .status = INSN_DEPRECATED,
+ .hooks = setend_hooks,
+ .set_hw_mode = setend_set_hw_mode,
+};
+
+static int insn_cpu_hotplug_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ int rc = 0;
+ if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
+ rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
+
+ return notifier_from_errno(rc);
+}
+
+static struct notifier_block insn_cpu_hotplug_notifier = {
+ .notifier_call = insn_cpu_hotplug_notify,
+};
+
/*
* Invoked as late_initcall, since not needed before init spawned.
*/
@@ -545,6 +646,14 @@ static int __init armv8_deprecated_init(void)
if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
register_insn_emulation(&cp15_barrier_ops);
+ if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
+ if(system_supports_mixed_endian_el0())
+ register_insn_emulation(&setend_ops);
+ else
+ pr_info("setend instruction emulation is not supported on the system");
+ }
+
+ register_cpu_notifier(&insn_cpu_hotplug_notifier);
register_insn_emulation_sysctl(ctl_abi);
return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 9a9fce090d58..f7fa65d4c352 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -140,6 +140,7 @@ int main(void)
DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
+ DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
@@ -152,7 +153,7 @@ int main(void)
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_PM
DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
new file mode 100644
index 000000000000..b8629d52fba9
--- /dev/null
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -0,0 +1,128 @@
+/*
+ * ARM64 cacheinfo support
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/compiler.h>
+#include <linux/of.h>
+
+#include <asm/cachetype.h>
+#include <asm/processor.h>
+
+#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */
+/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
+#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
+#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
+#define CLIDR_CTYPE(clidr, level) \
+ (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
+
+static inline enum cache_type get_cache_type(int level)
+{
+ u64 clidr;
+
+ if (level > MAX_CACHE_LEVEL)
+ return CACHE_TYPE_NOCACHE;
+ asm volatile ("mrs %x0, clidr_el1" : "=r" (clidr));
+ return CLIDR_CTYPE(clidr, level);
+}
+
+/*
+ * Cache Size Selection Register(CSSELR) selects which Cache Size ID
+ * Register(CCSIDR) is accessible by specifying the required cache
+ * level and the cache type. We need to ensure that no one else changes
+ * CSSELR by calling this in non-preemtible context
+ */
+u64 __attribute_const__ cache_get_ccsidr(u64 csselr)
+{
+ u64 ccsidr;
+
+ WARN_ON(preemptible());
+
+ /* Put value into CSSELR */
+ asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
+ isb();
+ /* Read result out of CCSIDR */
+ asm volatile("mrs %x0, ccsidr_el1" : "=r" (ccsidr));
+
+ return ccsidr;
+}
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ enum cache_type type, unsigned int level)
+{
+ bool is_icache = type & CACHE_TYPE_INST;
+ u64 tmp = cache_get_ccsidr((level - 1) << 1 | is_icache);
+
+ this_leaf->level = level;
+ this_leaf->type = type;
+ this_leaf->coherency_line_size = CACHE_LINESIZE(tmp);
+ this_leaf->number_of_sets = CACHE_NUMSETS(tmp);
+ this_leaf->ways_of_associativity = CACHE_ASSOCIATIVITY(tmp);
+ this_leaf->size = this_leaf->number_of_sets *
+ this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
+ this_leaf->attributes =
+ ((tmp & CCSIDR_EL1_WRITE_THROUGH) ? CACHE_WRITE_THROUGH : 0) |
+ ((tmp & CCSIDR_EL1_WRITE_BACK) ? CACHE_WRITE_BACK : 0) |
+ ((tmp & CCSIDR_EL1_READ_ALLOCATE) ? CACHE_READ_ALLOCATE : 0) |
+ ((tmp & CCSIDR_EL1_WRITE_ALLOCATE) ? CACHE_WRITE_ALLOCATE : 0);
+}
+
+static int __init_cache_level(unsigned int cpu)
+{
+ unsigned int ctype, level, leaves;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
+ ctype = get_cache_type(level);
+ if (ctype == CACHE_TYPE_NOCACHE) {
+ level--;
+ break;
+ }
+ /* Separate instruction and data caches */
+ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
+ }
+
+ this_cpu_ci->num_levels = level;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+static int __populate_cache_leaves(unsigned int cpu)
+{
+ unsigned int level, idx;
+ enum cache_type type;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+ for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ type = get_cache_type(level);
+ if (type == CACHE_TYPE_SEPARATE) {
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+ } else {
+ ci_leaf_init(this_leaf++, type, level);
+ }
+ }
+ return 0;
+}
+
+DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
+DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 19d17f51db37..5c0896647fd1 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -29,3 +29,23 @@ int cpu_init_idle(unsigned int cpu)
of_node_put(cpu_node);
return ret;
}
+
+/**
+ * cpu_suspend() - function to enter a low-power idle state
+ * @arg: argument to pass to CPU suspend operations
+ *
+ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
+ * operations back-end error code otherwise.
+ */
+int cpu_suspend(unsigned long arg)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * If cpu_ops have not been registered or suspend
+ * has not been initialized, cpu_suspend call fails early.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
+ return -EOPNOTSUPP;
+ return cpu_ops[cpu]->cpu_suspend(arg);
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 07d435cf2eea..929855691dae 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -35,6 +35,7 @@
*/
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
+static bool mixed_endian_el0 = true;
static char *icache_policy_str[] = {
[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
@@ -68,6 +69,26 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
}
+bool cpu_supports_mixed_endian_el0(void)
+{
+ return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+}
+
+bool system_supports_mixed_endian_el0(void)
+{
+ return mixed_endian_el0;
+}
+
+static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info)
+{
+ mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0);
+}
+
+static void update_cpu_features(struct cpuinfo_arm64 *info)
+{
+ update_mixed_endian_el0_support(info);
+}
+
static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
{
if ((boot & mask) == (cur & mask))
@@ -215,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();
+ update_cpu_features(info);
}
void cpuinfo_store_cpu(void)
@@ -231,15 +253,3 @@ void __init cpuinfo_store_boot_cpu(void)
boot_cpu_data = *info;
}
-
-u64 __attribute_const__ icache_get_ccsidr(void)
-{
- u64 ccsidr;
-
- WARN_ON(preemptible());
-
- /* Select L1 I-cache and read its size ID register */
- asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1"
- : "=r"(ccsidr) : "r"(1L));
- return ccsidr;
-}
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index d27dd982ff26..f5374065ad53 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -13,13 +13,13 @@
#include <asm/efi.h>
#include <asm/sections.h>
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
- unsigned long *image_addr,
- unsigned long *image_size,
- unsigned long *reserve_addr,
- unsigned long *reserve_size,
- unsigned long dram_base,
- efi_loaded_image_t *image)
+efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2bb4347d0edf..b42c7b480e1e 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,27 +11,46 @@
*
*/
+#include <linux/atomic.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/mm_types.h>
#include <linux/bootmem.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
struct efi_memory_map memmap;
-static efi_runtime_services_t *runtime;
-
static u64 efi_system_table;
+static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
+
+static struct mm_struct efi_mm = {
+ .mm_rb = RB_ROOT,
+ .pgd = efi_pgd,
+ .mm_users = ATOMIC_INIT(2),
+ .mm_count = ATOMIC_INIT(1),
+ .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+ .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ INIT_MM_CONTEXT(efi_mm)
+};
+
static int uefi_debug __initdata;
static int __init uefi_debug_setup(char *str)
{
@@ -48,30 +67,33 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
return 0;
}
-static void __init efi_setup_idmap(void)
+/*
+ * Translate a EFI virtual address into a physical address: this is necessary,
+ * as some data members of the EFI system table are virtually remapped after
+ * SetVirtualAddressMap() has been called.
+ */
+static phys_addr_t efi_to_phys(unsigned long addr)
{
- struct memblock_region *r;
efi_memory_desc_t *md;
- u64 paddr, npages, size;
- for_each_memblock(memory, r)
- create_id_mapping(r->base, r->size, 0);
-
- /* map runtime io spaces */
for_each_efi_memory_desc(&memmap, md) {
- if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
- paddr = md->phys_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&paddr, &npages);
- size = npages << PAGE_SHIFT;
- create_id_mapping(paddr, size, 1);
+ if (md->virt_addr == 0)
+ /* no virtual mapping has been installed by the stub */
+ break;
+ if (md->virt_addr <= addr &&
+ (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
+ return md->phys_addr + addr - md->virt_addr;
}
+ return addr;
}
static int __init uefi_init(void)
{
efi_char16_t *c16;
+ void *config_tables;
+ u64 table_size;
char vendor[100] = "unknown";
int i, retval;
@@ -99,7 +121,7 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff);
/* Show what we know for posterity */
- c16 = early_memremap(efi.systab->fw_vendor,
+ c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
sizeof(vendor));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
@@ -112,8 +134,14 @@ static int __init uefi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
- retval = efi_config_init(NULL);
+ table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
+ config_tables = early_memremap(efi_to_phys(efi.systab->tables),
+ table_size);
+ retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
+ sizeof(efi_config_table_64_t), NULL);
+
+ early_memunmap(config_tables, table_size);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
return retval;
@@ -163,9 +191,7 @@ static __init void reserve_regions(void)
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
- if (is_reserve_region(md) ||
- md->type == EFI_BOOT_SERVICES_CODE ||
- md->type == EFI_BOOT_SERVICES_DATA) {
+ if (is_reserve_region(md)) {
memblock_reserve(paddr, size);
if (uefi_debug)
pr_cont("*");
@@ -178,123 +204,6 @@ static __init void reserve_regions(void)
set_bit(EFI_MEMMAP, &efi.flags);
}
-
-static u64 __init free_one_region(u64 start, u64 end)
-{
- u64 size = end - start;
-
- if (uefi_debug)
- pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
-
- free_bootmem_late(start, size);
- return size;
-}
-
-static u64 __init free_region(u64 start, u64 end)
-{
- u64 map_start, map_end, total = 0;
-
- if (end <= start)
- return total;
-
- map_start = (u64)memmap.phys_map;
- map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
- map_start &= PAGE_MASK;
-
- if (start < map_end && end > map_start) {
- /* region overlaps UEFI memmap */
- if (start < map_start)
- total += free_one_region(start, map_start);
-
- if (map_end < end)
- total += free_one_region(map_end, end);
- } else
- total += free_one_region(start, end);
-
- return total;
-}
-
-static void __init free_boot_services(void)
-{
- u64 total_freed = 0;
- u64 keep_end, free_start, free_end;
- efi_memory_desc_t *md;
-
- /*
- * If kernel uses larger pages than UEFI, we have to be careful
- * not to inadvertantly free memory we want to keep if there is
- * overlap at the kernel page size alignment. We do not want to
- * free is_reserve_region() memory nor the UEFI memmap itself.
- *
- * The memory map is sorted, so we keep track of the end of
- * any previous region we want to keep, remember any region
- * we want to free and defer freeing it until we encounter
- * the next region we want to keep. This way, before freeing
- * it, we can clip it as needed to avoid freeing memory we
- * want to keep for UEFI.
- */
-
- keep_end = 0;
- free_start = 0;
-
- for_each_efi_memory_desc(&memmap, md) {
- u64 paddr, npages, size;
-
- if (is_reserve_region(md)) {
- /*
- * We don't want to free any memory from this region.
- */
- if (free_start) {
- /* adjust free_end then free region */
- if (free_end > md->phys_addr)
- free_end -= PAGE_SIZE;
- total_freed += free_region(free_start, free_end);
- free_start = 0;
- }
- keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
- continue;
- }
-
- if (md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA) {
- /* no need to free this region */
- continue;
- }
-
- /*
- * We want to free memory from this region.
- */
- paddr = md->phys_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&paddr, &npages);
- size = npages << PAGE_SHIFT;
-
- if (free_start) {
- if (paddr <= free_end)
- free_end = paddr + size;
- else {
- total_freed += free_region(free_start, free_end);
- free_start = paddr;
- free_end = paddr + size;
- }
- } else {
- free_start = paddr;
- free_end = paddr + size;
- }
- if (free_start < keep_end) {
- free_start += PAGE_SIZE;
- if (free_start >= free_end)
- free_start = 0;
- }
- }
- if (free_start)
- total_freed += free_region(free_start, free_end);
-
- if (total_freed)
- pr_info("Freed 0x%llx bytes of EFI boot services memory",
- total_freed);
-}
-
void __init efi_init(void)
{
struct efi_fdt_params params;
@@ -317,159 +226,100 @@ void __init efi_init(void)
return;
reserve_regions();
+ early_memunmap(memmap.map, params.mmap_size);
}
-void __init efi_idmap_init(void)
+static bool __init efi_virtmap_init(void)
{
- if (!efi_enabled(EFI_BOOT))
- return;
-
- /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
- efi_setup_idmap();
- early_memunmap(memmap.map, memmap.map_end - memmap.map);
-}
-
-static int __init remap_region(efi_memory_desc_t *md, void **new)
-{
- u64 paddr, vaddr, npages, size;
-
- paddr = md->phys_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&paddr, &npages);
- size = npages << PAGE_SHIFT;
+ efi_memory_desc_t *md;
- if (is_normal_ram(md))
- vaddr = (__force u64)ioremap_cache(paddr, size);
- else
- vaddr = (__force u64)ioremap(paddr, size);
+ for_each_efi_memory_desc(&memmap, md) {
+ u64 paddr, npages, size;
+ pgprot_t prot;
- if (!vaddr) {
- pr_err("Unable to remap 0x%llx pages @ %p\n",
- npages, (void *)paddr);
- return 0;
- }
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ return false;
- /* adjust for any rounding when EFI and system pagesize differs */
- md->virt_addr = vaddr + (md->phys_addr - paddr);
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
- if (uefi_debug)
- pr_info(" EFI remap 0x%012llx => %p\n",
+ pr_info(" EFI remap 0x%016llx => %p\n",
md->phys_addr, (void *)md->virt_addr);
- memcpy(*new, md, memmap.desc_size);
- *new += memmap.desc_size;
-
- return 1;
+ /*
+ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+ * executable, everything else can be mapped with the XN bits
+ * set.
+ */
+ if (!is_normal_ram(md))
+ prot = __pgprot(PROT_DEVICE_nGnRE);
+ else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+ prot = PAGE_KERNEL_EXEC;
+ else
+ prot = PAGE_KERNEL;
+
+ create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
+ }
+ return true;
}
/*
- * Switch UEFI from an identity map to a kernel virtual map
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
*/
-static int __init arm64_enter_virtual_mode(void)
+static int __init arm64_enable_runtime_services(void)
{
- efi_memory_desc_t *md;
- phys_addr_t virtmap_phys;
- void *virtmap, *virt_md;
- efi_status_t status;
u64 mapsize;
- int count = 0;
- unsigned long flags;
if (!efi_enabled(EFI_BOOT)) {
pr_info("EFI services will not be available.\n");
return -1;
}
- mapsize = memmap.map_end - memmap.map;
-
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return -1;
}
pr_info("Remapping and enabling EFI services.\n");
- /* replace early memmap mapping with permanent mapping */
+
+ mapsize = memmap.map_end - memmap.map;
memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
mapsize);
- memmap.map_end = memmap.map + mapsize;
-
- efi.memmap = &memmap;
-
- /* Map the runtime regions */
- virtmap = kmalloc(mapsize, GFP_KERNEL);
- if (!virtmap) {
- pr_err("Failed to allocate EFI virtual memmap\n");
+ if (!memmap.map) {
+ pr_err("Failed to remap EFI memory map\n");
return -1;
}
- virtmap_phys = virt_to_phys(virtmap);
- virt_md = virtmap;
-
- for_each_efi_memory_desc(&memmap, md) {
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
- continue;
- if (!remap_region(md, &virt_md))
- goto err_unmap;
- ++count;
- }
+ memmap.map_end = memmap.map + mapsize;
+ efi.memmap = &memmap;
- efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
+ efi.systab = (__force void *)ioremap_cache(efi_system_table,
+ sizeof(efi_system_table_t));
if (!efi.systab) {
- /*
- * If we have no virtual mapping for the System Table at this
- * point, the memory map doesn't cover the physical offset where
- * it resides. This means the System Table will be inaccessible
- * to Runtime Services themselves once the virtual mapping is
- * installed.
- */
- pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
- goto err_unmap;
+ pr_err("Failed to remap EFI System Table\n");
+ return -1;
}
set_bit(EFI_SYSTEM_TABLES, &efi.flags);
- local_irq_save(flags);
- cpu_switch_mm(idmap_pg_dir, &init_mm);
-
- /* Call SetVirtualAddressMap with the physical address of the map */
- runtime = efi.systab->runtime;
- efi.set_virtual_address_map = runtime->set_virtual_address_map;
-
- status = efi.set_virtual_address_map(count * memmap.desc_size,
- memmap.desc_size,
- memmap.desc_version,
- (efi_memory_desc_t *)virtmap_phys);
- cpu_set_reserved_ttbr0();
- flush_tlb_all();
- local_irq_restore(flags);
-
- kfree(virtmap);
-
- free_boot_services();
-
- if (status != EFI_SUCCESS) {
- pr_err("Failed to set EFI virtual address map! [%lx]\n",
- status);
+ if (!efi_virtmap_init()) {
+ pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
return -1;
}
/* Set up runtime services function pointers */
- runtime = efi.systab->runtime;
efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
efi.runtime_version = efi.systab->hdr.revision;
return 0;
-
-err_unmap:
- /* unmap all mappings that succeeded: there are 'count' of those */
- for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
- md = virt_md;
- iounmap((__force void __iomem *)md->virt_addr);
- }
- kfree(virtmap);
- return -1;
}
-early_initcall(arm64_enter_virtual_mode);
+early_initcall(arm64_enable_runtime_services);
static int __init arm64_dmi_init(void)
{
@@ -484,3 +334,23 @@ static int __init arm64_dmi_init(void)
return 0;
}
core_initcall(arm64_dmi_init);
+
+static void efi_set_pgd(struct mm_struct *mm)
+{
+ cpu_switch_mm(mm->pgd, mm);
+ flush_tlb_all();
+ if (icache_is_aivivt())
+ __flush_icache_all();
+}
+
+void efi_virtmap_load(void)
+{
+ preempt_disable();
+ efi_set_pgd(&efi_mm);
+}
+
+void efi_virtmap_unload(void)
+{
+ efi_set_pgd(current->active_mm);
+ preempt_enable();
+}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index fd4fa374e5d2..cf21bb3bf752 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -269,18 +269,18 @@ ENDPROC(el1_error_invalid)
el1_sync:
kernel_entry 1
mrs x1, esr_el1 // read the syndrome register
- lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
+ lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da
- cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
+ cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
- cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
+ cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el1_sp_pc
- cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el1_sp_pc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
+ cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg
b el1_inv
el1_da:
@@ -318,7 +318,7 @@ el1_dbg:
/*
* Debug exception handling
*/
- cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
@@ -375,26 +375,26 @@ el1_preempt:
el0_sync:
kernel_entry 0
mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
+ lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
b.eq el0_svc
- cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
+ cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
- cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
+ cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
- cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
+ cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
- cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
+ cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
- cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
+ cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
+ cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
- cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
+ cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
@@ -403,37 +403,37 @@ el0_sync:
el0_sync_compat:
kernel_entry 0, 32
mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
+ lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
b.eq el0_svc_compat
- cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
+ cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
- cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
+ cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
- cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
+ cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
- cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
+ cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
+ cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
+ cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
+ cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
+ cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
+ cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
+ cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
el0_svc_compat:
/*
* AArch32 syscall handling
*/
- adr stbl, compat_sys_call_table // load compat syscall table pointer
+ adrp stbl, compat_sys_call_table // load compat syscall table pointer
uxtw scno, w7 // syscall number in w7 (r7)
mov sc_nr, #__NR_compat_syscalls
b el0_svc_naked
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/entry32.S
index 423a5b3fc2be..9a8f6ae2530e 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/entry32.S
@@ -27,26 +27,26 @@
* System call wrappers for the AArch32 compatibility layer.
*/
-compat_sys_sigreturn_wrapper:
+ENTRY(compat_sys_sigreturn_wrapper)
mov x0, sp
mov x27, #0 // prevent syscall restart handling (why)
b compat_sys_sigreturn
ENDPROC(compat_sys_sigreturn_wrapper)
-compat_sys_rt_sigreturn_wrapper:
+ENTRY(compat_sys_rt_sigreturn_wrapper)
mov x0, sp
mov x27, #0 // prevent syscall restart handling (why)
b compat_sys_rt_sigreturn
ENDPROC(compat_sys_rt_sigreturn_wrapper)
-compat_sys_statfs64_wrapper:
+ENTRY(compat_sys_statfs64_wrapper)
mov w3, #84
cmp w1, #88
csel w1, w3, w1, eq
b compat_sys_statfs64
ENDPROC(compat_sys_statfs64_wrapper)
-compat_sys_fstatfs64_wrapper:
+ENTRY(compat_sys_fstatfs64_wrapper)
mov w3, #84
cmp w1, #88
csel w1, w3, w1, eq
@@ -58,33 +58,33 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* in registers or that take 32-bit parameters which require sign
* extension.
*/
-compat_sys_pread64_wrapper:
+ENTRY(compat_sys_pread64_wrapper)
regs_to_64 x3, x4, x5
b sys_pread64
ENDPROC(compat_sys_pread64_wrapper)
-compat_sys_pwrite64_wrapper:
+ENTRY(compat_sys_pwrite64_wrapper)
regs_to_64 x3, x4, x5
b sys_pwrite64
ENDPROC(compat_sys_pwrite64_wrapper)
-compat_sys_truncate64_wrapper:
+ENTRY(compat_sys_truncate64_wrapper)
regs_to_64 x1, x2, x3
b sys_truncate
ENDPROC(compat_sys_truncate64_wrapper)
-compat_sys_ftruncate64_wrapper:
+ENTRY(compat_sys_ftruncate64_wrapper)
regs_to_64 x1, x2, x3
b sys_ftruncate
ENDPROC(compat_sys_ftruncate64_wrapper)
-compat_sys_readahead_wrapper:
+ENTRY(compat_sys_readahead_wrapper)
regs_to_64 x1, x2, x3
mov w2, w4
b sys_readahead
ENDPROC(compat_sys_readahead_wrapper)
-compat_sys_fadvise64_64_wrapper:
+ENTRY(compat_sys_fadvise64_64_wrapper)
mov w6, w1
regs_to_64 x1, x2, x3
regs_to_64 x2, x4, x5
@@ -92,24 +92,14 @@ compat_sys_fadvise64_64_wrapper:
b sys_fadvise64_64
ENDPROC(compat_sys_fadvise64_64_wrapper)
-compat_sys_sync_file_range2_wrapper:
+ENTRY(compat_sys_sync_file_range2_wrapper)
regs_to_64 x2, x2, x3
regs_to_64 x3, x4, x5
b sys_sync_file_range2
ENDPROC(compat_sys_sync_file_range2_wrapper)
-compat_sys_fallocate_wrapper:
+ENTRY(compat_sys_fallocate_wrapper)
regs_to_64 x2, x2, x3
regs_to_64 x3, x4, x5
b sys_fallocate
ENDPROC(compat_sys_fallocate_wrapper)
-
-#undef __SYSCALL
-#define __SYSCALL(x, y) .quad y // x
-
-/*
- * The system calls table must be 4KB aligned.
- */
- .align 12
-ENTRY(compat_sys_call_table)
-#include <asm/unistd32.h>
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index df1cf15377b4..98bbe06e469c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -894,7 +894,7 @@ static struct notifier_block hw_breakpoint_reset_nb = {
.notifier_call = hw_breakpoint_reset_notify,
};
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_PM
extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
#else
static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 7e9327a0986d..27d4864577e5 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -17,14 +17,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/stop_machine.h>
+#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/fixmap.h>
#include <asm/insn.h>
#define AARCH64_INSN_SF_BIT BIT(31)
@@ -72,6 +77,29 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
}
}
+static DEFINE_SPINLOCK(patch_lock);
+
+static void __kprobes *patch_map(void *addr, int fixmap)
+{
+ unsigned long uintaddr = (uintptr_t) addr;
+ bool module = !core_kernel_text(uintaddr);
+ struct page *page;
+
+ if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
+ page = vmalloc_to_page(addr);
+ else
+ page = virt_to_page(addr);
+
+ BUG_ON(!page);
+ set_fixmap(fixmap, page_to_phys(page));
+
+ return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap)
+{
+ clear_fixmap(fixmap);
+}
/*
* In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
* little-endian.
@@ -88,10 +116,27 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
return ret;
}
+static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
+{
+ void *waddr = addr;
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(&patch_lock, flags);
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
+
+ patch_unmap(FIX_TEXT_POKE0);
+ spin_unlock_irqrestore(&patch_lock, flags);
+
+ return ret;
+}
+
int __kprobes aarch64_insn_write(void *addr, u32 insn)
{
insn = cpu_to_le32(insn);
- return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
+ return __aarch64_insn_write(addr, insn);
}
static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 9b6f71db2709..67bf4107f6ef 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -35,8 +35,8 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
- __builtin_return_address(0));
+ GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ NUMA_NO_NODE, __builtin_return_address(0));
}
enum aarch64_reloc_op {
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index ce5836c14ec1..6f93c24ca801 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -46,25 +46,3 @@ int pcibios_add_device(struct pci_dev *dev)
return 0;
}
-
-
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
-static bool dt_domain_found = false;
-
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
-{
- int domain = of_get_pci_domain_nr(parent->of_node);
-
- if (domain >= 0) {
- dt_domain_found = true;
- } else if (dt_domain_found == true) {
- dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
- parent->of_node->full_name);
- return;
- } else {
- domain = pci_get_new_domain_nr();
- }
-
- bus->domain_nr = domain;
-}
-#endif
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index f1dbca7d5c96..3425f311c49e 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -540,8 +540,6 @@ const struct cpu_operations cpu_psci_ops = {
.name = "psci",
#ifdef CONFIG_CPU_IDLE
.cpu_init_idle = cpu_psci_cpu_init_idle,
-#endif
-#ifdef CONFIG_ARM64_CPU_SUSPEND
.cpu_suspend = cpu_psci_cpu_suspend,
#endif
#ifdef CONFIG_SMP
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 20fe2932ad0c..e8420f635bd4 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -40,6 +40,7 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
+#include <linux/of_iommu.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/efi.h>
@@ -322,25 +323,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
}
-/*
- * Limit the memory size that was specified via FDT.
- */
-static int __init early_mem(char *p)
-{
- phys_addr_t limit;
-
- if (!p)
- return 1;
-
- limit = memparse(p, &p) & PAGE_MASK;
- pr_notice("Memory limited to %lldMB\n", limit >> 20);
-
- memblock_enforce_memory_limit(limit);
-
- return 0;
-}
-early_param("mem", early_mem);
-
static void __init request_standard_resources(void)
{
struct memblock_region *region;
@@ -401,7 +383,6 @@ void __init setup_arch(char **cmdline_p)
paging_init();
request_standard_resources();
- efi_idmap_init();
early_ioremap_reset();
unflatten_device_tree();
@@ -425,6 +406,7 @@ void __init setup_arch(char **cmdline_p)
static int __init arm64_device_init(void)
{
+ of_iommu_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 6fa792137eda..660ccf9f7524 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -131,7 +131,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 128-bit boundary, then 'sp' should
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 5a1ba6e80d4e..c20a300e2213 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -347,7 +347,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
struct compat_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
@@ -381,7 +381,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
struct compat_rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
@@ -440,7 +440,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
{
compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
compat_ulong_t retcode;
- compat_ulong_t spsr = regs->pstate & ~PSR_f;
+ compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT);
int thumb;
/* Check if the handler is written for ARM or Thumb */
@@ -454,6 +454,9 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* The IT state must be cleared for both ARM and Thumb-2 */
spsr &= ~COMPAT_PSR_IT_MASK;
+ /* Restore the original endianness */
+ spsr |= COMPAT_PSR_ENDSTATE;
+
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
} else {
@@ -501,7 +504,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
/* set the compat FSR WnR */
- __put_user_error(!!(current->thread.fault_code & ESR_EL1_WRITE) <<
+ __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) <<
FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 7ae6ee085261..328b8ce4b007 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -65,7 +65,6 @@ struct secondary_data secondary_data;
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
- IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
@@ -483,7 +482,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"),
@@ -527,7 +525,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
void arch_send_call_function_single_ipi(int cpu)
{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
}
#ifdef CONFIG_IRQ_WORK
@@ -585,12 +583,6 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
- case IPI_CALL_FUNC_SINGLE:
- irq_enter();
- generic_smp_call_function_single_interrupt();
- irq_exit();
- break;
-
case IPI_CPU_STOP:
irq_enter();
ipi_cpu_stop(cpu);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 2d6b6065fe7f..d7daf45ae7a2 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,7 +1,6 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
-#include <asm/cpu_ops.h>
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
@@ -51,26 +50,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
hw_breakpoint_restore = hw_bp_restore;
}
-/**
- * cpu_suspend() - function to enter a low-power state
- * @arg: argument to pass to CPU suspend operations
- *
- * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
- * operations back-end error code otherwise.
- */
-int cpu_suspend(unsigned long arg)
-{
- int cpu = smp_processor_id();
-
- /*
- * If cpu_ops have not been registered or suspend
- * has not been initialized, cpu_suspend call fails early.
- */
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
- return -EOPNOTSUPP;
- return cpu_ops[cpu]->cpu_suspend(arg);
-}
-
/*
* __cpu_suspend
*
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 3fa98ff14f0e..75151aaf1a52 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -39,10 +39,9 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
/*
* Wrappers to pass the pt_regs argument.
*/
+asmlinkage long sys_rt_sigreturn_wrapper(void);
#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
-#include <asm/syscalls.h>
-
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = sym,
@@ -50,7 +49,7 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
* The sys_call_table array must be 4K aligned to be accessible from
* kernel/entry.S.
*/
-void *sys_call_table[__NR_syscalls] __aligned(4096) = {
+void * const sys_call_table[__NR_syscalls] __aligned(4096) = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
new file mode 100644
index 000000000000..2d5ab3c90b82
--- /dev/null
+++ b/arch/arm64/kernel/sys32.c
@@ -0,0 +1,51 @@
+/*
+ * arch/arm64/kernel/sys32.c
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software(void); you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http(void);//www.gnu.org/licenses/>.
+ */
+
+/*
+ * Needed to avoid conflicting __NR_* macros between uapi/asm/unistd.h and
+ * asm/unistd32.h.
+ */
+#define __COMPAT_SYSCALL_NR
+
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+
+asmlinkage long compat_sys_sigreturn_wrapper(void);
+asmlinkage long compat_sys_rt_sigreturn_wrapper(void);
+asmlinkage long compat_sys_statfs64_wrapper(void);
+asmlinkage long compat_sys_fstatfs64_wrapper(void);
+asmlinkage long compat_sys_pread64_wrapper(void);
+asmlinkage long compat_sys_pwrite64_wrapper(void);
+asmlinkage long compat_sys_truncate64_wrapper(void);
+asmlinkage long compat_sys_ftruncate64_wrapper(void);
+asmlinkage long compat_sys_readahead_wrapper(void);
+asmlinkage long compat_sys_fadvise64_64_wrapper(void);
+asmlinkage long compat_sys_sync_file_range2_wrapper(void);
+asmlinkage long compat_sys_fallocate_wrapper(void);
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [nr] = sym,
+
+/*
+ * The sys_call_table array must be 4K aligned to be accessible from
+ * kernel/entry.S.
+ */
+void * const compat_sys_call_table[__NR_compat_syscalls] __aligned(4096) = {
+ [0 ... __NR_compat_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd32.h>
+};
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 0a801e3743d5..1ef2940df13c 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -33,6 +33,7 @@
#include <asm/atomic.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/traps.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
@@ -373,6 +374,51 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
return sys_ni_syscall();
}
+static const char *esr_class_str[] = {
+ [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
+ [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
+ [ESR_ELx_EC_WFx] = "WFI/WFE",
+ [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
+ [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
+ [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
+ [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
+ [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
+ [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
+ [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
+ [ESR_ELx_EC_ILL] = "PSTATE.IL",
+ [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
+ [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
+ [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
+ [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
+ [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
+ [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
+ [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
+ [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
+ [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
+ [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
+ [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
+ [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
+ [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
+ [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
+ [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
+ [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
+ [ESR_ELx_EC_SERROR] = "SError",
+ [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
+ [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
+ [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
+ [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
+ [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
+ [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
+ [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
+ [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
+ [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
+};
+
+const char *esr_get_class_string(u32 esr)
+{
+ return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
+}
+
/*
* bad_mode handles the impossible case in the exception vector.
*/
@@ -382,8 +428,8 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
- pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
- handler[reason], esr);
+ pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
+ handler[reason], esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 9965ec87cbec..5d9d2dca530d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -8,6 +8,7 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include "image.h"
@@ -49,6 +50,14 @@ PECOFF_FILE_ALIGNMENT = 0x200;
#define PECOFF_EDATA_PADDING
#endif
+#ifdef CONFIG_DEBUG_ALIGN_RODATA
+#define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT);
+#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
+#else
+#define ALIGN_DEBUG_RO
+#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min);
+#endif
+
SECTIONS
{
/*
@@ -71,6 +80,7 @@ SECTIONS
_text = .;
HEAD_TEXT
}
+ ALIGN_DEBUG_RO
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -87,19 +97,22 @@ SECTIONS
*(.got) /* Global offset table */
}
+ ALIGN_DEBUG_RO
RO_DATA(PAGE_SIZE)
EXCEPTION_TABLE(8)
NOTES
+ ALIGN_DEBUG_RO
_etext = .; /* End of text and rodata section */
- . = ALIGN(PAGE_SIZE);
+ ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
__init_begin = .;
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
- . = ALIGN(16);
+
+ ALIGN_DEBUG_RO_MIN(16)
.init.data : {
INIT_DATA
INIT_SETUP(16)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 8ba85e9ea388..f5590c81d95f 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -22,10 +22,13 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
select KVM_ARM_VGIC
select KVM_ARM_TIMER
+ select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select SRCU
---help---
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 32a096174b94..4e6e09ee4033 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -21,7 +21,9 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
index 124418d17049..f87d8fbaa48d 100644
--- a/arch/arm64/kvm/emulate.c
+++ b/arch/arm64/kvm/emulate.c
@@ -22,6 +22,7 @@
*/
#include <linux/kvm_host.h>
+#include <asm/esr.h>
#include <asm/kvm_emulate.h>
/*
@@ -55,8 +56,8 @@ static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
- if (esr & ESR_EL2_CV)
- return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
+ if (esr & ESR_ELx_CV)
+ return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
return -1;
}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 34b8bd0711e9..524fa25671fc 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -21,17 +21,25 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_emulate.h>
+
+#include <asm/esr.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
+ trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
+ kvm_vcpu_hvc_get_imm(vcpu));
+
ret = kvm_psci_call(vcpu);
if (ret < 0) {
kvm_inject_undefined(vcpu);
@@ -61,10 +69,13 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
+ if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
+ trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
kvm_vcpu_on_spin(vcpu);
- else
+ } else {
+ trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
kvm_vcpu_block(vcpu);
+ }
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
@@ -72,29 +83,30 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
static exit_handle_fn arm_exit_handlers[] = {
- [ESR_EL2_EC_WFI] = kvm_handle_wfx,
- [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
- [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
- [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
- [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
- [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
- [ESR_EL2_EC_HVC32] = handle_hvc,
- [ESR_EL2_EC_SMC32] = handle_smc,
- [ESR_EL2_EC_HVC64] = handle_hvc,
- [ESR_EL2_EC_SMC64] = handle_smc,
- [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,
- [ESR_EL2_EC_IABT] = kvm_handle_guest_abort,
- [ESR_EL2_EC_DABT] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_WFx] = kvm_handle_wfx,
+ [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
+ [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
+ [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
+ [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
+ [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
+ [ESR_ELx_EC_HVC32] = handle_hvc,
+ [ESR_ELx_EC_SMC32] = handle_smc,
+ [ESR_ELx_EC_HVC64] = handle_hvc,
+ [ESR_ELx_EC_SMC64] = handle_smc,
+ [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
+ [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
- u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT;
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) {
- kvm_err("Unknown exception class: hsr: %#08x\n",
- (unsigned int)kvm_vcpu_get_hsr(vcpu));
+ kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
+ hsr, esr_get_class_string(hsr));
BUG();
}
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index fbe909fb0a1a..5befd010e232 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,15 +17,16 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/memory.h>
#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/fpsimdmacros.h>
#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/memory.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
@@ -1014,6 +1015,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
+ lsr x1, x1, #12
tlbi ipas2e1is, x1
/*
* We have to ensure completion of the invalidation at Stage-2,
@@ -1030,6 +1032,28 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
ret
ENDPROC(__kvm_tlb_flush_vmid_ipa)
+/**
+ * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
+ * @struct kvm *kvm - pointer to kvm structure
+ *
+ * Invalidates all Stage 1 and 2 TLB entries for current VMID.
+ */
+ENTRY(__kvm_tlb_flush_vmid)
+ dsb ishst
+
+ kern_hyp_va x0
+ ldr x2, [x0, #KVM_VTTBR]
+ msr vttbr_el2, x2
+ isb
+
+ tlbi vmalls12e1is
+ dsb ish
+ isb
+
+ msr vttbr_el2, xzr
+ ret
+ENDPROC(__kvm_tlb_flush_vmid)
+
ENTRY(__kvm_flush_vm_context)
dsb ishst
tlbi alle1is
@@ -1140,9 +1164,9 @@ el1_sync: // Guest trapped into EL2
push x2, x3
mrs x1, esr_el2
- lsr x2, x1, #ESR_EL2_EC_SHIFT
+ lsr x2, x1, #ESR_ELx_EC_SHIFT
- cmp x2, #ESR_EL2_EC_HVC64
+ cmp x2, #ESR_ELx_EC_HVC64
b.ne el1_trap
mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
@@ -1177,13 +1201,13 @@ el1_trap:
* x1: ESR
* x2: ESR_EC
*/
- cmp x2, #ESR_EL2_EC_DABT
- mov x0, #ESR_EL2_EC_IABT
+ cmp x2, #ESR_ELx_EC_DABT_LOW
+ mov x0, #ESR_ELx_EC_IABT_LOW
ccmp x2, x0, #4, ne
b.ne 1f // Not an abort we care about
/* This is an abort. Check for permission fault */
- and x2, x1, #ESR_EL2_FSC_TYPE
+ and x2, x1, #ESR_ELx_FSC_TYPE
cmp x2, #FSC_PERM
b.ne 1f // Not a permission fault
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 81a02a8762b0..f02530e726f6 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -118,27 +118,27 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
* instruction set. Report an external synchronous abort.
*/
if (kvm_vcpu_trap_il_is32bit(vcpu))
- esr |= ESR_EL1_IL;
+ esr |= ESR_ELx_IL;
/*
* Here, the guest runs in AArch64 mode when in EL1. If we get
* an AArch32 fault, it means we managed to trap an EL0 fault.
*/
if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
- esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
+ esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
else
- esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
+ esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
if (!is_iabt)
- esr |= ESR_EL1_EC_DABT_EL0;
+ esr |= ESR_ELx_EC_DABT_LOW;
- vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
+ vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
}
static void inject_undef64(struct kvm_vcpu *vcpu)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
+ u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
*vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
@@ -151,7 +151,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
* set.
*/
if (kvm_vcpu_trap_il_is32bit(vcpu))
- esr |= ESR_EL1_IL;
+ esr |= ESR_ELx_IL;
vcpu_sys_reg(vcpu, ESR_EL1) = esr;
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 70a7816535cd..0b4326578985 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (!cpu_has_32bit_el1())
return -EINVAL;
cpu_reset = &default_regs_reset32;
- vcpu->arch.hcr_el2 &= ~HCR_RW;
} else {
cpu_reset = &default_regs_reset;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3d7c2df89946..c370b4014799 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -20,17 +20,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/mm.h>
#include <linux/kvm_host.h>
+#include <linux/mm.h>
#include <linux/uaccess.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_host.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_mmu.h>
+
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_mmu.h>
+
#include <trace/events/kvm.h>
#include "sys_regs.h"
@@ -69,68 +72,31 @@ static u32 get_ccsidr(u32 csselr)
return ccsidr;
}
-static void do_dc_cisw(u32 val)
-{
- asm volatile("dc cisw, %x0" : : "r" (val));
- dsb(ish);
-}
-
-static void do_dc_csw(u32 val)
-{
- asm volatile("dc csw, %x0" : : "r" (val));
- dsb(ish);
-}
-
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- unsigned long val;
- int cpu;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
- cpumask_setall(&vcpu->arch.require_dcache_flush);
- cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
- /* If we were already preempted, take the long way around */
- if (cpu != vcpu->arch.last_pcpu) {
- flush_cache_all();
- goto done;
- }
-
- val = *vcpu_reg(vcpu, p->Rt);
-
- switch (p->CRm) {
- case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
- case 14: /* DCCISW */
- do_dc_cisw(val);
- break;
-
- case 10: /* DCCSW */
- do_dc_csw(val);
- break;
- }
-
-done:
- put_cpu();
-
+ kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
+ bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
@@ -143,24 +109,27 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
}
+ kvm_toggle_cache(vcpu, was_enabled);
return true;
}
/*
- * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
+ * Trap handler for the GICv3 SGI generation system register.
+ * Forward the request to the VGIC emulation.
+ * The cp15_64 code makes sure this automatically works
+ * for both AArch64 and AArch32 accesses.
*/
-static bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
- const struct sys_reg_desc *r)
+static bool access_gic_sgi(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
{
- access_vm_reg(vcpu, p, r);
+ u64 val;
- if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
- vcpu->arch.hcr_el2 &= ~HCR_TVM;
- stage2_flush_vm(vcpu->kvm);
- }
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p);
+
+ val = *vcpu_reg(vcpu, p->Rt);
+ vgic_v3_dispatch_sgi(vcpu, val);
return true;
}
@@ -252,10 +221,19 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
+ u64 mpidr;
+
/*
- * Simply map the vcpu_id into the Aff0 field of the MPIDR.
+ * Map the vcpu_id into the first three affinity level fields of
+ * the MPIDR. We limit the number of VCPUs in level 0 due to a
+ * limitation to 16 CPUs in that level in the ICC_SGIxR registers
+ * of the GICv3 to be able to address each CPU directly when
+ * sending IPIs.
*/
- vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
+ mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
+ mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
+ mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
+ vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
}
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
@@ -377,7 +355,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_mpidr, MPIDR_EL1 },
/* SCTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
- access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+ access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
/* CPACR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
NULL, reset_val, CPACR_EL1, 0 },
@@ -425,6 +403,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
NULL, reset_val, VBAR_EL1, 0 },
+ /* ICC_SGI1R_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
+ access_gic_sgi },
/* ICC_SRE_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
trap_raz_wi },
@@ -657,7 +638,9 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register).
*/
static const struct sys_reg_desc cp15_regs[] = {
- { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+ { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
+
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
@@ -704,6 +687,7 @@ static const struct sys_reg_desc cp15_regs[] = {
static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+ { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
};
@@ -815,12 +799,12 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
int cp;
switch(hsr_ec) {
- case ESR_EL2_EC_CP15_32:
- case ESR_EL2_EC_CP15_64:
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
cp = 15;
break;
- case ESR_EL2_EC_CP14_MR:
- case ESR_EL2_EC_CP14_64:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_64:
cp = 14;
break;
default:
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
new file mode 100644
index 000000000000..157416e963f2
--- /dev/null
+++ b/arch/arm64/kvm/trace.h
@@ -0,0 +1,55 @@
+#if !defined(_TRACE_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ARM64_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+TRACE_EVENT(kvm_wfx_arm64,
+ TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
+ TP_ARGS(vcpu_pc, is_wfe),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, vcpu_pc)
+ __field(bool, is_wfe)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->is_wfe = is_wfe;
+ ),
+
+ TP_printk("guest executed wf%c at: 0x%08lx",
+ __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_hvc_arm64,
+ TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
+ TP_ARGS(vcpu_pc, r0, imm),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, vcpu_pc)
+ __field(unsigned long, r0)
+ __field(unsigned long, imm)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->r0 = r0;
+ __entry->imm = imm;
+ ),
+
+ TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
+ __entry->vcpu_pc, __entry->r0, __entry->imm)
+);
+
+#endif /* _TRACE_ARM64_KVM_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
index d16046999e06..617a012a0107 100644
--- a/arch/arm64/kvm/vgic-v3-switch.S
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -148,17 +148,18 @@
* x0: Register pointing to VCPU struct
*/
.macro restore_vgic_v3_state
- // Disable SRE_EL1 access. Necessary, otherwise
- // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
- msr_s ICC_SRE_EL1, xzr
- isb
-
// Compute the address of struct vgic_cpu
add x3, x0, #VCPU_VGIC_CPU
// Restore all interesting registers
ldr w4, [x3, #VGIC_V3_CPU_HCR]
ldr w5, [x3, #VGIC_V3_CPU_VMCR]
+ ldr w25, [x3, #VGIC_V3_CPU_SRE]
+
+ msr_s ICC_SRE_EL1, x25
+
+ // make sure SRE is valid before writing the other registers
+ isb
msr_s ICH_HCR_EL2, x4
msr_s ICH_VMCR_EL2, x5
@@ -244,9 +245,12 @@
dsb sy
// Prevent the guest from touching the GIC system registers
+ // if SRE isn't enabled for GICv3 emulation
+ cbnz x25, 1f
mrs_s x5, ICC_SRE_EL2
and x5, x5, #~ICC_SRE_EL2_ENABLE
msr_s ICC_SRE_EL2, x5
+1:
.endm
ENTRY(__save_vgic_v3_state)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d92094203913..0a24b9b8c698 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -134,16 +134,17 @@ static void __dma_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
-static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+static void *__dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
struct page *page;
void *ptr, *coherent_ptr;
+ bool coherent = is_device_dma_coherent(dev);
size = PAGE_ALIGN(size);
- if (!(flags & __GFP_WAIT)) {
+ if (!coherent && !(flags & __GFP_WAIT)) {
struct page *page = NULL;
void *addr = __alloc_from_pool(size, &page);
@@ -151,13 +152,16 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return addr;
-
}
ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
+ /* no need for non-cacheable mapping if coherent */
+ if (coherent)
+ return ptr;
+
/* remove any dirty cache lines on the kernel alias */
__dma_flush_range(ptr, ptr + size);
@@ -179,15 +183,17 @@ no_mem:
return NULL;
}
-static void __dma_free_noncoherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+static void __dma_free(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
- if (__free_from_pool(vaddr, size))
- return;
- vunmap(vaddr);
+ if (!is_device_dma_coherent(dev)) {
+ if (__free_from_pool(vaddr, size))
+ return;
+ vunmap(vaddr);
+ }
__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}
@@ -199,7 +205,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
dma_addr_t dev_addr;
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
return dev_addr;
}
@@ -209,7 +216,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}
@@ -221,9 +229,10 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int i, ret;
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
- for_each_sg(sgl, sg, ret, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ if (!is_device_dma_coherent(dev))
+ for_each_sg(sgl, sg, ret, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
return ret;
}
@@ -236,9 +245,10 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ if (!is_device_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}
@@ -246,7 +256,8 @@ static void __swiotlb_sync_single_for_cpu(struct device *dev,
dma_addr_t dev_addr, size_t size,
enum dma_data_direction dir)
{
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}
@@ -255,7 +266,8 @@ static void __swiotlb_sync_single_for_device(struct device *dev,
enum dma_data_direction dir)
{
swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
}
static void __swiotlb_sync_sg_for_cpu(struct device *dev,
@@ -265,9 +277,10 @@ static void __swiotlb_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ if (!is_device_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}
@@ -279,9 +292,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
int i;
swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
- for_each_sg(sgl, sg, nelems, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ if (!is_device_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
}
/* vma->vm_page_prot must be set appropriately before calling this function */
@@ -308,28 +322,20 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
-static int __swiotlb_mmap_noncoherent(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
-{
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
- return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-
-static int __swiotlb_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+static int __swiotlb_mmap(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
- /* Just use whatever page_prot attributes were specified */
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ is_device_dma_coherent(dev));
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
-struct dma_map_ops noncoherent_swiotlb_dma_ops = {
- .alloc = __dma_alloc_noncoherent,
- .free = __dma_free_noncoherent,
- .mmap = __swiotlb_mmap_noncoherent,
+static struct dma_map_ops swiotlb_dma_ops = {
+ .alloc = __dma_alloc,
+ .free = __dma_free,
+ .mmap = __swiotlb_mmap,
.map_page = __swiotlb_map_page,
.unmap_page = __swiotlb_unmap_page,
.map_sg = __swiotlb_map_sg_attrs,
@@ -341,24 +347,6 @@ struct dma_map_ops noncoherent_swiotlb_dma_ops = {
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
-EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
-
-struct dma_map_ops coherent_swiotlb_dma_ops = {
- .alloc = __dma_alloc_coherent,
- .free = __dma_free_coherent,
- .mmap = __swiotlb_mmap_coherent,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .dma_supported = swiotlb_dma_supported,
- .mapping_error = swiotlb_dma_mapping_error,
-};
-EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
extern int swiotlb_late_init_with_default_size(size_t default_size);
@@ -427,7 +415,7 @@ static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
- dma_ops = &noncoherent_swiotlb_dma_ops;
+ dma_ops = &swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index cf33f33333cc..74c256744b25 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -14,13 +14,18 @@
* of the License.
*/
#include <linux/debugfs.h>
+#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/fixmap.h>
+#include <asm/memory.h>
#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
#define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS)
@@ -36,10 +41,10 @@ enum address_markers_idx {
VMEMMAP_START_NR,
VMEMMAP_END_NR,
#endif
- PCI_START_NR,
- PCI_END_NR,
FIXADDR_START_NR,
FIXADDR_END_NR,
+ PCI_START_NR,
+ PCI_END_NR,
MODULES_START_NR,
MODUELS_END_NR,
KERNEL_SPACE_NR,
@@ -52,10 +57,10 @@ static struct addr_marker address_markers[] = {
{ 0, "vmemmap start" },
{ 0, "vmemmap end" },
#endif
- { (unsigned long) PCI_IOBASE, "PCI I/O start" },
- { (unsigned long) PCI_IOBASE + SZ_16M, "PCI I/O end" },
{ FIXADDR_START, "Fixmap start" },
{ FIXADDR_TOP, "Fixmap end" },
+ { PCI_IO_START, "PCI I/O start" },
+ { PCI_IO_END, "PCI I/O end" },
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
{ PAGE_OFFSET, "Kernel Mapping" },
@@ -245,10 +250,12 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
- if (pmd_none(*pmd) || pmd_sect(*pmd) || pmd_bad(*pmd))
+ if (pmd_none(*pmd) || pmd_sect(*pmd)) {
note_page(st, addr, 3, pmd_val(*pmd));
- else
+ } else {
+ BUG_ON(pmd_bad(*pmd));
walk_pte(st, pmd, addr);
+ }
}
}
@@ -260,10 +267,12 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
- if (pud_none(*pud) || pud_sect(*pud) || pud_bad(*pud))
+ if (pud_none(*pud) || pud_sect(*pud)) {
note_page(st, addr, 2, pud_val(*pud));
- else
+ } else {
+ BUG_ON(pud_bad(*pud));
walk_pmd(st, pud, addr);
+ }
}
}
@@ -275,10 +284,12 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long st
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = start + i * PGDIR_SIZE;
- if (pgd_none(*pgd) || pgd_bad(*pgd))
+ if (pgd_none(*pgd)) {
note_page(st, addr, 1, pgd_val(*pgd));
- else
+ } else {
+ BUG_ON(pgd_bad(*pgd));
walk_pud(st, pgd, addr);
+ }
}
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c11cd27ca8f5..96da13167d4a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -219,7 +219,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (esr & ESR_LNX_EXEC) {
vm_flags = VM_EXEC;
- } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
+ } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 023747bf4dd7..2de9d2e59d96 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
}
#endif
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
return !(pmd_val(pmd) & PMD_TABLE_BIT);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index bac492c12fcc..71145f952070 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,6 +35,7 @@
#include <linux/efi.h>
#include <asm/fixmap.h>
+#include <asm/memory.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -136,10 +137,29 @@ static void arm64_memory_present(void)
}
#endif
+static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
+
+/*
+ * Limit the memory size that was specified via FDT.
+ */
+static int __init early_mem(char *p)
+{
+ if (!p)
+ return 1;
+
+ memory_limit = memparse(p, &p) & PAGE_MASK;
+ pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
+
+ return 0;
+}
+early_param("mem", early_mem);
+
void __init arm64_memblock_init(void)
{
phys_addr_t dma_phys_limit = 0;
+ memblock_enforce_memory_limit(memory_limit);
+
/*
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
@@ -277,8 +297,8 @@ void __init mem_init(void)
" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
" 0x%16lx - 0x%16lx (%6ld MB actual)\n"
#endif
- " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
+ " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
" modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
" memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
" .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
@@ -291,8 +311,8 @@ void __init mem_init(void)
MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)),
#endif
- MLM((unsigned long)PCI_IOBASE, (unsigned long)PCI_IOBASE + SZ_16M),
MLK(FIXADDR_START, FIXADDR_TOP),
+ MLM(PCI_IO_START, PCI_IO_END),
MLM(MODULES_VADDR, MODULES_END),
MLM(PAGE_OFFSET, (unsigned long)high_memory),
MLK_ROUNDUP(__init_begin, __init_end),
@@ -325,6 +345,7 @@ void __init mem_init(void)
void free_initmem(void)
{
+ fixup_init();
free_initmem_default(0);
free_alternatives_memory();
}
@@ -335,14 +356,8 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd) {
- if (start == initrd_start)
- start = round_down(start, PAGE_SIZE);
- if (end == initrd_end)
- end = round_up(end, PAGE_SIZE);
-
+ if (!keep_initrd)
free_reserved_area((void *)start, (void *)end, 0, "initrd");
- }
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index cbb99c8f1e04..01e88c8bcab0 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -62,6 +62,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
if (!area)
return NULL;
addr = (unsigned long)area->addr;
+ area->phys_addr = phys_addr;
err = ioremap_page_range(addr, addr + size, phys_addr, prot);
if (err) {
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index 50c3351df9c7..ef47d99b5cbc 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1 +1,3 @@
extern void __init bootmem_init(void);
+
+void fixup_init(void);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6032f3e3056a..c6daaf6c6f97 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -26,6 +26,8 @@
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
#include <asm/cputype.h>
#include <asm/fixmap.h>
@@ -45,80 +47,6 @@
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-struct cachepolicy {
- const char policy[16];
- u64 mair;
- u64 tcr;
-};
-
-static struct cachepolicy cache_policies[] __initdata = {
- {
- .policy = "uncached",
- .mair = 0x44, /* inner, outer non-cacheable */
- .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
- }, {
- .policy = "writethrough",
- .mair = 0xaa, /* inner, outer write-through, read-allocate */
- .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
- }, {
- .policy = "writeback",
- .mair = 0xee, /* inner, outer write-back, read-allocate */
- .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
- }
-};
-
-/*
- * These are useful for identifying cache coherency problems by allowing the
- * cache or the cache and writebuffer to be turned off. It changes the Normal
- * memory caching attributes in the MAIR_EL1 register.
- */
-static int __init early_cachepolicy(char *p)
-{
- int i;
- u64 tmp;
-
- for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
- int len = strlen(cache_policies[i].policy);
-
- if (memcmp(p, cache_policies[i].policy, len) == 0)
- break;
- }
- if (i == ARRAY_SIZE(cache_policies)) {
- pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
- return 0;
- }
-
- flush_cache_all();
-
- /*
- * Modify MT_NORMAL attributes in MAIR_EL1.
- */
- asm volatile(
- " mrs %0, mair_el1\n"
- " bfi %0, %1, %2, #8\n"
- " msr mair_el1, %0\n"
- " isb\n"
- : "=&r" (tmp)
- : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
-
- /*
- * Modify TCR PTW cacheability attributes.
- */
- asm volatile(
- " mrs %0, tcr_el1\n"
- " bic %0, %0, %2\n"
- " orr %0, %0, %1\n"
- " msr tcr_el1, %0\n"
- " isb\n"
- : "=&r" (tmp)
- : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
-
- flush_cache_all();
-
- return 0;
-}
-early_param("cachepolicy", early_cachepolicy);
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -133,19 +61,42 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
void *ptr = __va(memblock_alloc(sz, sz));
+ BUG_ON(!ptr);
memset(ptr, 0, sz);
return ptr;
}
-static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
+/*
+ * remap a PMD into pages
+ */
+static void split_pmd(pmd_t *pmd, pte_t *pte)
+{
+ unsigned long pfn = pmd_pfn(*pmd);
+ int i = 0;
+
+ do {
+ /*
+ * Need to have the least restrictive permissions available
+ * permissions will be fixed up later
+ */
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ pfn++;
+ } while (pte++, i++, i < PTRS_PER_PTE);
+}
+
+static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
- pgprot_t prot)
+ pgprot_t prot,
+ void *(*alloc)(unsigned long size))
{
pte_t *pte;
- if (pmd_none(*pmd)) {
- pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
+ if (pmd_none(*pmd) || pmd_sect(*pmd)) {
+ pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
+ if (pmd_sect(*pmd))
+ split_pmd(pmd, pte);
__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
+ flush_tlb_all();
}
BUG_ON(pmd_bad(*pmd));
@@ -156,30 +107,42 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- int map_io)
+void split_pud(pud_t *old_pud, pmd_t *pmd)
+{
+ unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
+ pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
+ int i = 0;
+
+ do {
+ set_pmd(pmd, __pmd(addr | prot));
+ addr += PMD_SIZE;
+ } while (pmd++, i++, i < PTRS_PER_PMD);
+}
+
+static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot,
+ void *(*alloc)(unsigned long size))
{
pmd_t *pmd;
unsigned long next;
- pmdval_t prot_sect;
- pgprot_t prot_pte;
-
- if (map_io) {
- prot_sect = PROT_SECT_DEVICE_nGnRE;
- prot_pte = __pgprot(PROT_DEVICE_nGnRE);
- } else {
- prot_sect = PROT_SECT_NORMAL_EXEC;
- prot_pte = PAGE_KERNEL_EXEC;
- }
/*
* Check for initial section mappings in the pgd/pud and remove them.
*/
- if (pud_none(*pud) || pud_bad(*pud)) {
- pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
- pud_populate(&init_mm, pud, pmd);
+ if (pud_none(*pud) || pud_sect(*pud)) {
+ pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
+ if (pud_sect(*pud)) {
+ /*
+ * need to have the 1G of mappings continue to be
+ * present
+ */
+ split_pud(pud, pmd);
+ }
+ pud_populate(mm, pud, pmd);
+ flush_tlb_all();
}
+ BUG_ON(pud_bad(*pud));
pmd = pmd_offset(pud, addr);
do {
@@ -187,31 +150,51 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
- set_pmd(pmd, __pmd(phys | prot_sect));
+ set_pmd(pmd, __pmd(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
*/
- if (!pmd_none(old_pmd))
+ if (!pmd_none(old_pmd)) {
flush_tlb_all();
+ if (pmd_table(old_pmd)) {
+ phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
+ if (!WARN_ON_ONCE(slab_is_available()))
+ memblock_free(table, PAGE_SIZE);
+ }
+ }
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot_pte);
+ prot, alloc);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
-static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- int map_io)
+static inline bool use_1G_block(unsigned long addr, unsigned long next,
+ unsigned long phys)
+{
+ if (PAGE_SHIFT != 12)
+ return false;
+
+ if (((addr | next | phys) & ~PUD_MASK) != 0)
+ return false;
+
+ return true;
+}
+
+static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot,
+ void *(*alloc)(unsigned long size))
{
pud_t *pud;
unsigned long next;
if (pgd_none(*pgd)) {
- pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
- pgd_populate(&init_mm, pgd, pud);
+ pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
+ pgd_populate(mm, pgd, pud);
}
BUG_ON(pgd_bad(*pgd));
@@ -222,10 +205,10 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (!map_io && (PAGE_SHIFT == 12) &&
- ((addr | next | phys) & ~PUD_MASK) == 0) {
+ if (use_1G_block(addr, next, phys)) {
pud_t old_pud = *pud;
- set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
+ set_pud(pud, __pud(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* If we have an old value for a pud, it will
@@ -235,12 +218,15 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
* Look up the old pmd table and free it.
*/
if (!pud_none(old_pud)) {
- phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
- memblock_free(table, PAGE_SIZE);
flush_tlb_all();
+ if (pud_table(old_pud)) {
+ phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+ if (!WARN_ON_ONCE(slab_is_available()))
+ memblock_free(table, PAGE_SIZE);
+ }
}
} else {
- alloc_init_pmd(pud, addr, next, phys, map_io);
+ alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
@@ -250,9 +236,10 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
-static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- int map_io)
+static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
+ phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot,
+ void *(*alloc)(unsigned long size))
{
unsigned long addr, length, end, next;
@@ -262,31 +249,95 @@ static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, map_io);
+ alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
- phys_addr_t size)
+static void *late_alloc(unsigned long size)
+{
+ void *ptr;
+
+ BUG_ON(size > PAGE_SIZE);
+ ptr = (void *)__get_free_page(PGALLOC_GFP);
+ BUG_ON(!ptr);
+ return ptr;
+}
+
+static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
if (virt < VMALLOC_START) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
}
- __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
+ __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
+ size, prot, early_alloc);
+}
+
+void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot)
+{
+ __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
+ late_alloc);
}
-void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
+static void create_mapping_late(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
- if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
- pr_warn("BUG: not creating id mapping for %pa\n", &addr);
+ if (virt < VMALLOC_START) {
+ pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
+ &phys, virt);
return;
}
- __create_mapping(&idmap_pg_dir[pgd_index(addr)],
- addr, addr, size, map_io);
+
+ return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
+ phys, virt, size, prot, late_alloc);
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+{
+ /*
+ * Set up the executable regions using the existing section mappings
+ * for now. This will get more fine grained later once all memory
+ * is mapped
+ */
+ unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+
+ if (end < kernel_x_start) {
+ create_mapping(start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL);
+ } else if (start >= kernel_x_end) {
+ create_mapping(start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL);
+ } else {
+ if (start < kernel_x_start)
+ create_mapping(start, __phys_to_virt(start),
+ kernel_x_start - start,
+ PAGE_KERNEL);
+ create_mapping(kernel_x_start,
+ __phys_to_virt(kernel_x_start),
+ kernel_x_end - kernel_x_start,
+ PAGE_KERNEL_EXEC);
+ if (kernel_x_end < end)
+ create_mapping(kernel_x_end,
+ __phys_to_virt(kernel_x_end),
+ end - kernel_x_end,
+ PAGE_KERNEL);
+ }
+
}
+#else
+static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+{
+ create_mapping(start, __phys_to_virt(start), end - start,
+ PAGE_KERNEL_EXEC);
+}
+#endif
static void __init map_mem(void)
{
@@ -332,14 +383,53 @@ static void __init map_mem(void)
memblock_set_current_limit(limit);
}
#endif
-
- create_mapping(start, __phys_to_virt(start), end - start);
+ __map_memblock(start, end);
}
/* Limit no longer required. */
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
+void __init fixup_executable(void)
+{
+#ifdef CONFIG_DEBUG_RODATA
+ /* now that we are actually fully mapped, make the start/end more fine grained */
+ if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+ unsigned long aligned_start = round_down(__pa(_stext),
+ SECTION_SIZE);
+
+ create_mapping(aligned_start, __phys_to_virt(aligned_start),
+ __pa(_stext) - aligned_start,
+ PAGE_KERNEL);
+ }
+
+ if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+ unsigned long aligned_end = round_up(__pa(__init_end),
+ SECTION_SIZE);
+ create_mapping(__pa(__init_end), (unsigned long)__init_end,
+ aligned_end - __pa(__init_end),
+ PAGE_KERNEL);
+ }
+#endif
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ create_mapping_late(__pa(_stext), (unsigned long)_stext,
+ (unsigned long)_etext - (unsigned long)_stext,
+ PAGE_KERNEL_EXEC | PTE_RDONLY);
+
+}
+#endif
+
+void fixup_init(void)
+{
+ create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
+ (unsigned long)__init_end - (unsigned long)__init_begin,
+ PAGE_KERNEL);
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps and sets up the zero page.
@@ -349,13 +439,7 @@ void __init paging_init(void)
void *zero_page;
map_mem();
-
- /*
- * Finally flush the caches and tlb to ensure that we're in a
- * consistent state.
- */
- flush_cache_all();
- flush_tlb_all();
+ fixup_executable();
/* allocate the zero page. */
zero_page = early_alloc(PAGE_SIZE);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 4e778b13291b..28eebfb6af76 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -102,7 +102,7 @@ ENTRY(cpu_do_idle)
ret
ENDPROC(cpu_do_idle)
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_PM
/**
* cpu_do_suspend - save CPU registers context
*
@@ -244,14 +244,18 @@ ENTRY(__cpu_setup)
ENDPROC(__cpu_setup)
/*
+ * We set the desired value explicitly, including those of the
+ * reserved bits. The values of bits EE & E0E were set early in
+ * el2_setup, which are left untouched below.
+ *
* n n T
* U E WT T UD US IHBS
* CE0 XWHW CZ ME TEEA S
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
- * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
- * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
+ * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
+ * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
*/
.type crval, #object
crval:
- .word 0x000802e2 // clear
- .word 0x0405d11d // set
+ .word 0xfcffffff // clear
+ .word 0x34d5d91d // set
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 4733e38e7ae6..ce0030020c25 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -1,33 +1,28 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATNGW100_MKI=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -52,7 +47,6 @@ CONFIG_INET6_IPCOMP=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NETFILTER_XTABLES=y
-# CONFIG_IP_NF_TARGET_ULOG is not set
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -60,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -70,28 +63,24 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -128,7 +117,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -140,7 +128,6 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
@@ -149,8 +136,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 1be0ee31bd91..01ff632249c0 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -1,35 +1,30 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATNGW100_MKI=y
CONFIG_BOARD_ATNGW100_EVKLCD10X=y
CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -54,7 +49,6 @@ CONFIG_INET6_IPCOMP=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NETFILTER_XTABLES=y
-# CONFIG_IP_NF_TARGET_ULOG is not set
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -62,7 +56,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -72,21 +65,17 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -94,9 +83,9 @@ CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_WM97XX=m
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -144,7 +133,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -156,7 +144,6 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
@@ -165,8 +152,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 796e536f7bc4..c4021dfd5347 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -1,34 +1,29 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATNGW100_MKI=y
CONFIG_BOARD_ATNGW100_EVKLCD10X=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -53,7 +48,6 @@ CONFIG_INET6_IPCOMP=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NETFILTER_XTABLES=y
-# CONFIG_IP_NF_TARGET_ULOG is not set
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -61,7 +55,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -71,21 +64,17 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -93,9 +82,9 @@ CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_WM97XX=m
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -143,7 +132,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -155,7 +143,6 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
@@ -164,8 +151,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index 6838781e966f..ffcc28df9dbc 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -1,14 +1,11 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_SLUB_DEBUG is not set
CONFIG_MODULES=y
@@ -17,8 +14,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_OWNERSHIP_TRACE is not set
-CONFIG_PM=y
# CONFIG_SUSPEND is not set
+CONFIG_PM=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -37,7 +34,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_HIDP=m
@@ -49,7 +45,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -57,10 +52,7 @@ CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_DATAFLASH=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -70,9 +62,9 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=m
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -84,9 +76,7 @@ CONFIG_WATCHDOG=y
CONFIG_AT32AP700X_WDT=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_MIXER_OSS=m
@@ -129,10 +119,7 @@ CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_CIFS_STATS=y
CONFIG_CIFS_WEAK_PW_HASH=y
@@ -142,10 +129,8 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRC_CCITT=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 97fe1b399b06..04962641c936 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -1,33 +1,28 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATNGW100_MKII=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -52,7 +47,6 @@ CONFIG_INET6_IPCOMP=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NETFILTER_XTABLES=y
-# CONFIG_IP_NF_TARGET_ULOG is not set
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -60,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -72,28 +65,24 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -130,7 +119,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -142,7 +130,6 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
@@ -151,8 +138,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index a176d24467e9..89c2cda573da 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -1,36 +1,31 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATNGW100_MKII=y
CONFIG_BOARD_ATNGW100_MKII_LCD=y
CONFIG_BOARD_ATNGW100_EVKLCD10X=y
CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -55,7 +50,6 @@ CONFIG_INET6_IPCOMP=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NETFILTER_XTABLES=y
-# CONFIG_IP_NF_TARGET_ULOG is not set
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -63,7 +57,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -75,21 +68,17 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -97,9 +86,9 @@ CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_WM97XX=m
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -147,7 +136,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -159,7 +147,6 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
@@ -168,8 +155,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index d1bf6dcfc47d..1b4d4a87a356 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -1,35 +1,30 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATNGW100_MKII=y
CONFIG_BOARD_ATNGW100_MKII_LCD=y
CONFIG_BOARD_ATNGW100_EVKLCD10X=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -54,7 +49,6 @@ CONFIG_INET6_IPCOMP=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NETFILTER_XTABLES=y
-# CONFIG_IP_NF_TARGET_ULOG is not set
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -62,7 +56,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -74,21 +67,17 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -96,9 +85,9 @@ CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_WM97XX=m
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -146,7 +135,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -158,7 +146,6 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
@@ -167,8 +154,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index b056820eef33..9b8b52e54b79 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -1,32 +1,27 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -54,7 +49,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -63,7 +57,6 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=m
# CONFIG_SCSI_PROC_FS is not set
@@ -75,14 +68,11 @@ CONFIG_ATA=m
CONFIG_PATA_AT32=m
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
CONFIG_INPUT=m
CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
@@ -91,10 +81,10 @@ CONFIG_KEYBOARD_GPIO=m
CONFIG_MOUSE_GPIO=m
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -108,10 +98,8 @@ CONFIG_WATCHDOG=y
CONFIG_AT32AP700X_WDT=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_LTV350QV=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_MIXER_OSS=m
@@ -119,7 +107,6 @@ CONFIG_SND_PCM_OSS=m
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_AT73C213=m
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
CONFIG_USB_ETH=m
@@ -147,7 +134,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -159,15 +145,13 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 0cd23a303da1..ccce1a0b7917 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -1,33 +1,28 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATSTK1003=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -43,7 +38,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -52,7 +46,6 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=m
# CONFIG_SCSI_PROC_FS is not set
@@ -63,12 +56,10 @@ CONFIG_ATA=m
# CONFIG_SATA_PMP is not set
CONFIG_PATA_AT32=m
CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
CONFIG_INPUT=m
CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
@@ -77,10 +68,10 @@ CONFIG_KEYBOARD_GPIO=m
CONFIG_MOUSE_GPIO=m
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -98,7 +89,6 @@ CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
# CONFIG_SND_DRIVERS is not set
CONFIG_SND_AT73C213=m
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
CONFIG_USB_ETH=m
@@ -126,7 +116,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -142,8 +131,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index ac1041f5f85a..e64288fc794d 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -1,33 +1,28 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATSTK1004=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -43,7 +38,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -52,7 +46,6 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=m
# CONFIG_SCSI_PROC_FS is not set
@@ -63,12 +56,10 @@ CONFIG_ATA=m
# CONFIG_SATA_PMP is not set
CONFIG_PATA_AT32=m
CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
CONFIG_INPUT=m
CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
@@ -77,10 +68,10 @@ CONFIG_KEYBOARD_GPIO=m
CONFIG_MOUSE_GPIO=m
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -94,10 +85,8 @@ CONFIG_WATCHDOG=y
CONFIG_AT32AP700X_WDT=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_LTV350QV=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
CONFIG_USB_ETH=m
@@ -125,7 +114,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -141,8 +129,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index ea4f670cb995..7d669f79ff74 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -1,33 +1,28 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_ATSTK1006=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_AVR32_AT32AP_CPUFREQ=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -55,7 +50,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -66,7 +60,6 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=m
# CONFIG_SCSI_PROC_FS is not set
@@ -78,14 +71,11 @@ CONFIG_ATA=m
CONFIG_PATA_AT32=m
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
CONFIG_INPUT=m
CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
@@ -94,10 +84,10 @@ CONFIG_KEYBOARD_GPIO=m
CONFIG_MOUSE_GPIO=m
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -111,10 +101,8 @@ CONFIG_WATCHDOG=y
CONFIG_AT32AP700X_WDT=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_LTV350QV=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_MIXER_OSS=m
@@ -122,7 +110,6 @@ CONFIG_SND_PCM_OSS=m
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_AT73C213=m
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
CONFIG_USB_ETH=m
@@ -150,7 +137,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-# CONFIG_EXT4_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_MSDOS_FS=m
@@ -162,15 +148,13 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index b3eb67dc05ac..560c52f87d45 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -1,28 +1,23 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_FAVR_32=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
@@ -37,7 +32,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
@@ -59,7 +53,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -70,14 +63,11 @@ CONFIG_BLK_DEV_RAM=m
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=m
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
CONFIG_INPUT_MOUSEDEV=m
CONFIG_INPUT_EVDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
@@ -88,10 +78,10 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=m
# CONFIG_SERIO is not set
# CONFIG_CONSOLE_TRANSLATIONS is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -105,12 +95,10 @@ CONFIG_WATCHDOG=y
CONFIG_AT32AP700X_WDT=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_PWM=m
CONFIG_SOUND=m
CONFIG_SOUND_PRIME=m
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB_GADGET=y
CONFIG_USB_ZERO=m
CONFIG_USB_ETH=m
@@ -145,13 +133,11 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
# CONFIG_JFFS2_FS_WRITEBUFFER is not set
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index 4912f0aadaa1..d57fadb9e6b6 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -1,26 +1,22 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
-# CONFIG_KPROBES is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_HAMMERHEAD=y
CONFIG_BOARD_HAMMERHEAD_USB=y
CONFIG_BOARD_HAMMERHEAD_LCD=y
@@ -53,13 +49,11 @@ CONFIG_INET_IPCOMP=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NETFILTER_XTABLES=y
-# CONFIG_IP_NF_TARGET_ULOG is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -70,16 +64,13 @@ CONFIG_ATMEL_TCLIB=y
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_INPUT_FF_MEMLESS=m
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
@@ -114,10 +105,8 @@ CONFIG_HID_MONTEREY=m
CONFIG_HID_PANTHERLORD=m
CONFIG_HID_PETALYNX=m
CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
CONFIG_HID_SUNPLUS=m
CONFIG_USB=m
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=m
CONFIG_USB_ISP116X_HCD=m
CONFIG_USB_STORAGE=m
@@ -140,16 +129,13 @@ CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_MAGIC_SYSRQ=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_ARC4=m
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index b9ef4cc85d08..e6a9cb7d574e 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -1,22 +1,19 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_MERISC=y
CONFIG_AP700X_32_BIT_SMC=y
# CONFIG_OWNERSHIP_TRACE is not set
@@ -39,14 +36,10 @@ CONFIG_INET_IPCOMP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -60,10 +53,7 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -74,11 +64,10 @@ CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO is not set
# CONFIG_CONSOLE_TRANSLATIONS is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -89,13 +78,9 @@ CONFIG_SPI_SPIDEV=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
-CONFIG_DISPLAY_SUPPORT=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_MMC=y
@@ -121,12 +106,10 @@ CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_WBUF_VERIFY=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index d630e089dd32..49c7e890af7b 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -1,25 +1,21 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_BASE_FULL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BOARD_MIMC200=y
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_PM=y
CONFIG_CPU_FREQ=y
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
@@ -50,7 +46,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -60,17 +55,14 @@ CONFIG_ATMEL_TCLIB=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -109,15 +101,13 @@ CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_FRAME_POINTER=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_ARC4=y
diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h
index 4beff97e2033..35800664076e 100644
--- a/arch/avr32/include/asm/pgtable.h
+++ b/arch/avr32/include/asm/pgtable.h
@@ -30,7 +30,7 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
@@ -86,9 +86,6 @@ extern struct page *empty_zero_page;
#define _PAGE_BIT_PRESENT 10
#define _PAGE_BIT_ACCESSED 11 /* software: page was accessed */
-/* The following flags are only valid when !PRESENT */
-#define _PAGE_BIT_FILE 0 /* software: pagecache or swap? */
-
#define _PAGE_WT (1 << _PAGE_BIT_WT)
#define _PAGE_DIRTY (1 << _PAGE_BIT_DIRTY)
#define _PAGE_EXECUTE (1 << _PAGE_BIT_EXECUTE)
@@ -101,7 +98,6 @@ extern struct page *empty_zero_page;
/* Software flags */
#define _PAGE_ACCESSED (1 << _PAGE_BIT_ACCESSED)
#define _PAGE_PRESENT (1 << _PAGE_BIT_PRESENT)
-#define _PAGE_FILE (1 << _PAGE_BIT_FILE)
/*
* Page types, i.e. sizes. _PAGE_TYPE_NONE corresponds to what is
@@ -210,14 +206,6 @@ static inline int pte_special(pte_t pte)
return 0;
}
-/*
- * The following only work if pte_present() is not true.
- */
-static inline int pte_file(pte_t pte)
-{
- return pte_val(pte) & _PAGE_FILE;
-}
-
/* Mutator functions for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -329,7 +317,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
* Encode and decode a swap entry
*
* Constraints:
- * _PAGE_FILE at bit 0
* _PAGE_TYPE_* at bits 2-3 (for emulating _PAGE_PROTNONE)
* _PAGE_PRESENT at bit 10
*
@@ -346,18 +333,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/*
- * Encode and decode a nonlinear file mapping entry. We have to
- * preserve _PAGE_FILE and _PAGE_PRESENT here. _PAGE_TYPE_* isn't
- * necessary, since _PAGE_FILE implies !_PAGE_PROTNONE (?)
- */
-#define PTE_FILE_MAX_BITS 30
-#define pte_to_pgoff(pte) (((pte_val(pte) >> 1) & 0x1ff) \
- | ((pte_val(pte) >> 11) << 9))
-#define pgoff_to_pte(off) ((pte_t) { ((((off) & 0x1ff) << 1) \
- | (((off) >> 9) << 11) \
- | _PAGE_FILE) })
-
typedef pte_t *pte_addr_t;
#define kern_addr_valid(addr) (1)
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index a978f3fe7c25..d56afa99a514 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -30,7 +30,6 @@ struct thread_info {
saved by debug handler
when setting up
trampoline */
- struct restart_block restart_block;
__u8 supervisor_stack[0];
};
@@ -41,9 +40,6 @@ struct thread_info {
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall \
- } \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h
index c1eb080e45fe..2011bee3f252 100644
--- a/arch/avr32/include/asm/unistd.h
+++ b/arch/avr32/include/asm/unistd.h
@@ -10,7 +10,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 284
+#define NR_syscalls 321
/* Old stuff */
#define __IGNORE_uselib
diff --git a/arch/avr32/include/uapi/asm/unistd.h b/arch/avr32/include/uapi/asm/unistd.h
index 8822bf46ddc6..bbe2fba565cd 100644
--- a/arch/avr32/include/uapi/asm/unistd.h
+++ b/arch/avr32/include/uapi/asm/unistd.h
@@ -222,7 +222,6 @@
#define __NR_epoll_wait 207
#define __NR_remap_file_pages 208
#define __NR_set_tid_address 209
-
#define __NR_timer_create 210
#define __NR_timer_settime 211
#define __NR_timer_gettime 212
@@ -238,7 +237,6 @@
/* 222 reserved for tux */
#define __NR_utimes 223
#define __NR_fadvise64_64 224
-
#define __NR_cacheflush 225
#define __NR_vserver 226
@@ -281,7 +279,6 @@
#define __NR_tee 263
#define __NR_vmsplice 264
#define __NR_epoll_pwait 265
-
#define __NR_msgget 266
#define __NR_msgsnd 267
#define __NR_msgrcv 268
@@ -294,11 +291,47 @@
#define __NR_shmget 275
#define __NR_shmdt 276
#define __NR_shmctl 277
-
#define __NR_utimensat 278
#define __NR_signalfd 279
/* 280 was __NR_timerfd */
#define __NR_eventfd 281
#define __NR_setns 283
+#define __NR_pread64 284
+#define __NR_pwrite64 285
+#define __NR_timerfd_create 286
+#define __NR_fallocate 287
+#define __NR_timerfd_settime 288
+#define __NR_timerfd_gettime 289
+#define __NR_signalfd4 290
+#define __NR_eventfd2 291
+#define __NR_epoll_create1 292
+#define __NR_dup3 293
+#define __NR_pipe2 294
+#define __NR_inotify_init1 295
+#define __NR_preadv 296
+#define __NR_pwritev 297
+#define __NR_rt_tgsigqueueinfo 298
+#define __NR_perf_event_open 299
+#define __NR_recvmmsg 300
+#define __NR_fanotify_init 301
+#define __NR_fanotify_mark 302
+#define __NR_prlimit64 303
+#define __NR_name_to_handle_at 304
+#define __NR_open_by_handle_at 305
+#define __NR_clock_adjtime 306
+#define __NR_syncfs 307
+#define __NR_sendmmsg 308
+#define __NR_process_vm_readv 309
+#define __NR_process_vm_writev 310
+#define __NR_kcmp 311
+#define __NR_finit_module 312
+#define __NR_sched_setattr 313
+#define __NR_sched_getattr 314
+#define __NR_renameat2 315
+#define __NR_seccomp 316
+#define __NR_getrandom 317
+#define __NR_memfd_create 318
+#define __NR_bpf 319
+#define __NR_execveat 320
#endif /* _UAPI__ASM_AVR32_UNISTD_H */
diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c
index d6a8193a1d2f..e41c84516e5d 100644
--- a/arch/avr32/kernel/asm-offsets.c
+++ b/arch/avr32/kernel/asm-offsets.c
@@ -18,7 +18,6 @@ void foo(void)
OFFSET(TI_preempt_count, thread_info, preempt_count);
OFFSET(TI_rar_saved, thread_info, rar_saved);
OFFSET(TI_rsr_saved, thread_info, rsr_saved);
- OFFSET(TI_restart_block, thread_info, restart_block);
BLANK();
OFFSET(TSK_active_mm, task_struct, active_mm);
BLANK();
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 2c9412908024..164efa009e5b 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -19,12 +19,10 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
vfree(mod->arch.syminfo);
mod->arch.syminfo = NULL;
-
- vfree(module_region);
}
static inline int check_rela(Elf32_Rela *rela, struct module *module,
@@ -291,12 +289,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
return ret;
}
-
-int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *module)
-{
- vfree(module->arch.syminfo);
- module->arch.syminfo = NULL;
-
- return 0;
-}
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index d309fbcc3bd6..8f1c63b9b983 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -69,7 +69,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
sigset_t set;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
frame = (struct rt_sigframe __user *)regs->sp;
pr_debug("SIG return: frame = %p\n", frame);
diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
index b5fc927cd398..f9c68fab0e2f 100644
--- a/arch/avr32/kernel/syscall-stubs.S
+++ b/arch/avr32/kernel/syscall-stubs.S
@@ -88,3 +88,39 @@ __sys_sync_file_range:
call sys_sync_file_range
sub sp, -4
popm pc
+
+ .global __sys_fallocate
+ .type __sys_fallocate,@function
+__sys_fallocate:
+ pushm lr
+ st.w --sp, ARG6
+ call sys_fallocate
+ sub sp, -4
+ popm pc
+
+ .global __sys_fanotify_mark
+ .type __sys_fanotify_mark,@function
+__sys_fanotify_mark:
+ pushm lr
+ st.w --sp, ARG6
+ call sys_fanotify_mark
+ sub sp, -4
+ popm pc
+
+ .global __sys_process_vm_readv
+ .type __sys_process_vm_readv,@function
+__sys_process_vm_readv:
+ pushm lr
+ st.w --sp, ARG6
+ call sys_process_vm_readv
+ sub sp, -4
+ popm pc
+
+ .global __sys_process_vm_writev
+ .type __sys_process_vm_writev,@function
+__sys_process_vm_writev:
+ pushm lr
+ st.w --sp, ARG6
+ call sys_process_vm_writev
+ sub sp, -4
+ popm pc
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index 017a904180c8..c3b593bfc3b3 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -297,4 +297,41 @@ sys_call_table:
.long sys_eventfd
.long sys_recvmmsg
.long sys_setns
+ .long sys_pread64
+ .long sys_pwrite64 /* 285 */
+ .long sys_timerfd_create
+ .long __sys_fallocate
+ .long sys_timerfd_settime
+ .long sys_timerfd_gettime
+ .long sys_signalfd4 /* 290 */
+ .long sys_eventfd2
+ .long sys_epoll_create1
+ .long sys_dup3
+ .long sys_pipe2
+ .long sys_inotify_init1 /* 295 */
+ .long sys_preadv
+ .long sys_pwritev
+ .long sys_rt_tgsigqueueinfo
+ .long sys_perf_event_open
+ .long sys_recvmmsg /* 300 */
+ .long sys_fanotify_init
+ .long __sys_fanotify_mark
+ .long sys_prlimit64
+ .long sys_name_to_handle_at
+ .long sys_open_by_handle_at /* 305 */
+ .long sys_clock_adjtime
+ .long sys_syncfs
+ .long sys_sendmmsg
+ .long __sys_process_vm_readv
+ .long __sys_process_vm_writev /* 310 */
+ .long sys_kcmp
+ .long sys_finit_module
+ .long sys_sched_setattr
+ .long sys_sched_getattr
+ .long sys_renameat2 /* 315 */
+ .long sys_seccomp
+ .long sys_getrandom
+ .long sys_memfd_create
+ .long sys_bpf
+ .long sys_execveat /* 320 */
.long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/mach-at32ap/include/mach/cpu.h b/arch/avr32/mach-at32ap/include/mach/cpu.h
index 16a24b14146c..4181086f4ddc 100644
--- a/arch/avr32/mach-at32ap/include/mach/cpu.h
+++ b/arch/avr32/mach-at32ap/include/mach/cpu.h
@@ -1,5 +1,5 @@
/*
- * AVR32 and (fake) AT91 CPU identification
+ * AVR32 CPU identification
*
* Copyright (C) 2007 Atmel Corporation
*
@@ -20,28 +20,4 @@
# define cpu_is_at32ap7000() (0)
#endif
-/*
- * Since this is AVR32, we will never run on any AT91 CPU. But these
- * definitions may reduce clutter in common drivers.
- */
-#define cpu_is_at91rm9200() (0)
-#define cpu_is_at91sam9xe() (0)
-#define cpu_is_at91sam9260() (0)
-#define cpu_is_at91sam9261() (0)
-#define cpu_is_at91sam9263() (0)
-#define cpu_is_at91sam9rl() (0)
-#define cpu_is_at91sam9g10() (0)
-#define cpu_is_at91sam9g20() (0)
-#define cpu_is_at91sam9g45() (0)
-#define cpu_is_at91sam9g45es() (0)
-#define cpu_is_at91sam9m10() (0)
-#define cpu_is_at91sam9g46() (0)
-#define cpu_is_at91sam9m11() (0)
-#define cpu_is_at91sam9x5() (0)
-#define cpu_is_at91sam9g15() (0)
-#define cpu_is_at91sam9g35() (0)
-#define cpu_is_at91sam9x35() (0)
-#define cpu_is_at91sam9g25() (0)
-#define cpu_is_at91sam9x25() (0)
-
#endif /* __ASM_ARCH_CPU_H */
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 0eca93327195..d223a8b57c1e 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -142,6 +142,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/blackfin/include/asm/pgtable.h b/arch/blackfin/include/asm/pgtable.h
index 0b049019eba7..b88a1558b0b9 100644
--- a/arch/blackfin/include/asm/pgtable.h
+++ b/arch/blackfin/include/asm/pgtable.h
@@ -45,11 +45,6 @@ extern void paging_init(void);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-static inline int pte_file(pte_t pte)
-{
- return 0;
-}
-
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 55f473bdad36..57c3a8bd583d 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -42,7 +42,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* address limit */
- struct restart_block restart_block;
#ifndef CONFIG_SMP
struct l1_scratch_task_info l1_task_info;
#endif
@@ -58,9 +57,6 @@ struct thread_info {
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index ef275571d885..f2a8b5493bd3 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -44,7 +44,7 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p
int err = 0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
#define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x)
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index dd2af74aff80..7236bdfc71e6 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -429,14 +429,6 @@ static void init_software_driven_irq(void)
bfin_sec_enable_ssi(37);
}
-void bfin_sec_resume(void)
-{
- bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
- udelay(100);
- bfin_write_SEC_GCTL(SEC_GCTL_EN);
- bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
-}
-
void handle_sec_sfi_fault(uint32_t gstat)
{
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
index c0eed5b18860..78d4483ba40c 100644
--- a/arch/c6x/include/asm/pgtable.h
+++ b/arch/c6x/include/asm/pgtable.h
@@ -50,11 +50,6 @@ extern void paging_init(void);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-static inline int pte_file(pte_t pte)
-{
- return 0;
-}
-
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h
index d4e9ef87076d..584e253f3217 100644
--- a/arch/c6x/include/asm/thread_info.h
+++ b/arch/c6x/include/asm/thread_info.h
@@ -45,7 +45,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 = preemptable, <0 = BUG */
mm_segment_t addr_limit; /* thread address space */
- struct restart_block restart_block;
};
/*
@@ -61,9 +60,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
index fe68226f6c4d..3c4bb5a5c382 100644
--- a/arch/c6x/kernel/signal.c
+++ b/arch/c6x/kernel/signal.c
@@ -68,7 +68,7 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
sigset_t set;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a dword boundary,
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 52731e221851..4a03911053ab 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -57,6 +57,10 @@ config HZ
int
default 100
+config NR_CPUS
+ int
+ default "1"
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c
index 9b32d338838b..74d7ba35120d 100644
--- a/arch/cris/arch-v10/kernel/signal.c
+++ b/arch/cris/arch-v10/kernel/signal.c
@@ -67,7 +67,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
unsigned long old_usp;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* restore the regs from &sc->regs (same as sc, since regs is first)
* (sc is already checked for VERIFY_READ since the sigframe was
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 08a313fc2241..4dda9bd6b8fb 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -604,7 +604,7 @@ static ssize_t __sync_serial_read(struct file *file,
struct timespec *ts)
{
unsigned long flags;
- int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int dev = MINOR(file_inode(file)->i_rdev);
int avail;
struct sync_port *port;
unsigned char *start;
@@ -1286,7 +1286,7 @@ static void start_dma_out(struct sync_port *port, const char *data, int count)
tr_cfg.tr_en = regk_sser_yes;
REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
- DEBUGTRDMA(pr_info(KERN_INFO "dma s\n"););
+ DEBUGTRDMA(pr_info("dma s\n"););
} else {
DMA_CONTINUE_DATA(port->regi_dmaout);
DEBUGTRDMA(pr_info("dma c\n"););
@@ -1443,7 +1443,7 @@ static inline void handle_rx_packet(struct sync_port *port)
reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
unsigned long flags;
- DEBUGRXINT(pr_info(KERN_INFO "!"));
+ DEBUGRXINT(pr_info("!"));
spin_lock_irqsave(&port->lock, flags);
/* If we overrun the user experience is crap regardless if we
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index 78ce3b1c9bcb..870e3e069318 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -59,7 +59,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
unsigned long old_usp;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Restore the registers from &sc->regs. sc is already checked
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index eb74dabbeb96..c17b01abdc3b 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -107,8 +107,10 @@ static short int watchdog_key = 42; /* arbitrary 7 bit number */
* is used though, so set this really low. */
#define WATCHDOG_MIN_FREE_PAGES 8
+#if defined(CONFIG_ETRAX_WATCHDOG_NICE_DOGGY)
/* for reliable NICE_DOGGY behaviour */
static int bite_in_progress;
+#endif
void reset_watchdog(void)
{
@@ -155,7 +157,9 @@ void handle_watchdog_bite(struct pt_regs *regs)
nmi_enter();
oops_in_progress = 1;
+#if defined(CONFIG_ETRAX_WATCHDOG_NICE_DOGGY)
bite_in_progress = 1;
+#endif
printk(KERN_WARNING "Watchdog bite\n");
/* Check if forced restart or unexpected watchdog */
diff --git a/arch/cris/include/arch-v10/arch/mmu.h b/arch/cris/include/arch-v10/arch/mmu.h
index e829e5a37bbe..47a5dd21749d 100644
--- a/arch/cris/include/arch-v10/arch/mmu.h
+++ b/arch/cris/include/arch-v10/arch/mmu.h
@@ -58,7 +58,6 @@ typedef struct
/* Bits the HW doesn't care about but the kernel uses them in SW */
#define _PAGE_PRESENT (1<<4) /* page present in memory */
-#define _PAGE_FILE (1<<5) /* set: pagecache, unset: swap (when !PRESENT) */
#define _PAGE_ACCESSED (1<<5) /* simulated in software using valid bit */
#define _PAGE_MODIFIED (1<<6) /* simulated in software using we bit */
#define _PAGE_READ (1<<7) /* read-enabled */
@@ -105,6 +104,4 @@ typedef struct
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
-#define PTE_FILE_MAX_BITS 26
-
#endif
diff --git a/arch/cris/include/arch-v32/arch/mmu.h b/arch/cris/include/arch-v32/arch/mmu.h
index c1a13e05e963..e6db1616dee5 100644
--- a/arch/cris/include/arch-v32/arch/mmu.h
+++ b/arch/cris/include/arch-v32/arch/mmu.h
@@ -53,7 +53,6 @@ typedef struct
* software.
*/
#define _PAGE_PRESENT (1 << 5) /* Page is present in memory. */
-#define _PAGE_FILE (1 << 6) /* 1=pagecache, 0=swap (when !present) */
#define _PAGE_ACCESSED (1 << 6) /* Simulated in software using valid bit. */
#define _PAGE_MODIFIED (1 << 7) /* Simulated in software using we bit. */
#define _PAGE_READ (1 << 8) /* Read enabled. */
@@ -108,6 +107,4 @@ typedef struct
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
-#define PTE_FILE_MAX_BITS 25
-
#endif /* _ASM_CRIS_ARCH_MMU_H */
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index 8b8c86793225..ceefc314d64d 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -67,7 +67,7 @@ extern void paging_init(void);
*/
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
/* zero page used for uninitialized stuff */
#ifndef __ASSEMBLY__
@@ -114,7 +114,6 @@ extern unsigned long empty_zero_page;
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte)
@@ -290,9 +289,6 @@ static inline void update_mmu_cache(struct vm_area_struct * vma,
*/
#define pgtable_cache_init() do { } while (0)
-#define pte_to_pgoff(x) (pte_val(x) >> 6)
-#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)
-
typedef pte_t *pte_addr_t;
#endif /* __ASSEMBLY__ */
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 55dede18c032..7286db5ed90e 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -38,7 +38,6 @@ struct thread_info {
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
- struct restart_block restart_block;
__u8 supervisor_stack[0];
};
@@ -56,9 +55,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h
index 914540801c5e..e3530d0f13ee 100644
--- a/arch/cris/include/asm/uaccess.h
+++ b/arch/cris/include/asm/uaccess.h
@@ -1,4 +1,4 @@
-/*
+/*
* Authors: Bjorn Wesen (bjornw@axis.com)
* Hans-Peter Nilsson (hp@axis.com)
*/
@@ -35,7 +35,7 @@
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
/* addr_limit is the maximum accessible address for the task. we misuse
- * the KERNEL_DS and USER_DS values to both assign and compare the
+ * the KERNEL_DS and USER_DS values to both assign and compare the
* addr_limit values through the equally misnamed get/set_fs macros.
* (see above)
*/
@@ -47,12 +47,13 @@
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
-#define segment_eq(a,b) ((a).seg == (b).seg)
+#define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
-#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
-#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
+#define __user_ok(addr, size) \
+ (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
+#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
#include <arch/uaccess.h>
@@ -69,8 +70,7 @@
* on our cache or tlb entries.
*/
-struct exception_table_entry
-{
+struct exception_table_entry {
unsigned long insn, fixup;
};
@@ -92,56 +92,74 @@ struct exception_table_entry
* CRIS, we can just do these as direct assignments. (Of course, the
* exception handling means that it's no longer "just"...)
*/
-#define get_user(x,ptr) \
- __get_user_check((x),(ptr),sizeof(*(ptr)))
-#define put_user(x,ptr) \
- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-#define __get_user(x,ptr) \
- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __put_user(x,ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
extern long __put_user_bad(void);
-#define __put_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \
- case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \
- case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \
- case 8: __put_user_asm_64(x,ptr,retval); break; \
- default: __put_user_bad(); \
- } \
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: \
+ __put_user_asm(x, ptr, retval, "move.b"); \
+ break; \
+ case 2: \
+ __put_user_asm(x, ptr, retval, "move.w"); \
+ break; \
+ case 4: \
+ __put_user_asm(x, ptr, retval, "move.d"); \
+ break; \
+ case 8: \
+ __put_user_asm_64(x, ptr, retval); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ } \
} while (0)
-#define __get_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \
- case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \
- case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \
- case 8: __get_user_asm_64(x,ptr,retval); break; \
- default: (x) = __get_user_bad(); \
- } \
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: \
+ __get_user_asm(x, ptr, retval, "move.b"); \
+ break; \
+ case 2: \
+ __get_user_asm(x, ptr, retval, "move.w"); \
+ break; \
+ case 4: \
+ __get_user_asm(x, ptr, retval, "move.d"); \
+ break; \
+ case 8: \
+ __get_user_asm_64(x, ptr, retval); \
+ break; \
+ default: \
+ (x) = __get_user_bad(); \
+ } \
} while (0)
-#define __put_user_nocheck(x,ptr,size) \
+#define __put_user_nocheck(x, ptr, size) \
({ \
long __pu_err; \
- __put_user_size((x),(ptr),(size),__pu_err); \
+ __put_user_size((x), (ptr), (size), __pu_err); \
__pu_err; \
})
-#define __put_user_check(x,ptr,size) \
-({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) *__pu_addr = (ptr); \
- if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
- __put_user_size((x),__pu_addr,(size),__pu_err); \
- __pu_err; \
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
})
struct __large_struct { unsigned long buf[100]; };
@@ -149,21 +167,21 @@ struct __large_struct { unsigned long buf[100]; };
-#define __get_user_nocheck(x,ptr,size) \
+#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err, __gu_val; \
- __get_user_size(__gu_val,(ptr),(size),__gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
-#define __get_user_check(x,ptr,size) \
+#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
- if (access_ok(VERIFY_READ,__gu_addr,size)) \
- __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -180,7 +198,7 @@ static inline unsigned long
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- return __copy_user(to,from,n);
+ return __copy_user(to, from, n);
return n;
}
@@ -188,7 +206,7 @@ static inline unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
- return __copy_user_zeroing(to,from,n);
+ return __copy_user_zeroing(to, from, n);
return n;
}
@@ -196,7 +214,7 @@ static inline unsigned long
__generic_clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- return __do_clear_user(to,n);
+ return __do_clear_user(to, n);
return n;
}
@@ -210,6 +228,7 @@ static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
+
if (access_ok(VERIFY_READ, src, 1))
res = __do_strncpy_from_user(dst, src, count);
return res;
@@ -223,6 +242,7 @@ static inline unsigned long
__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long ret = 0;
+
if (n == 0)
;
else if (n == 1)
@@ -273,6 +293,7 @@ static inline unsigned long
__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned long ret = 0;
+
if (n == 0)
;
else if (n == 1)
@@ -323,6 +344,7 @@ static inline unsigned long
__constant_clear_user(void __user *to, unsigned long n)
{
unsigned long ret = 0;
+
if (n == 0)
;
else if (n == 1)
@@ -350,20 +372,20 @@ __constant_clear_user(void __user *to, unsigned long n)
}
-#define clear_user(to, n) \
-(__builtin_constant_p(n) ? \
- __constant_clear_user(to, n) : \
- __generic_clear_user(to, n))
+#define clear_user(to, n) \
+ (__builtin_constant_p(n) ? \
+ __constant_clear_user(to, n) : \
+ __generic_clear_user(to, n))
-#define copy_from_user(to, from, n) \
-(__builtin_constant_p(n) ? \
- __constant_copy_from_user(to, from, n) : \
- __generic_copy_from_user(to, from, n))
+#define copy_from_user(to, from, n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_from_user(to, from, n) : \
+ __generic_copy_from_user(to, from, n))
-#define copy_to_user(to, from, n) \
-(__builtin_constant_p(n) ? \
- __constant_copy_to_user(to, from, n) : \
- __generic_copy_to_user(to, from, n))
+#define copy_to_user(to, from, n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_to_user(to, from, n) : \
+ __generic_copy_to_user(to, from, n))
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
@@ -373,29 +395,31 @@ static inline unsigned long
__generic_copy_from_user_nocheck(void *to, const void __user *from,
unsigned long n)
{
- return __copy_user_zeroing(to,from,n);
+ return __copy_user_zeroing(to, from, n);
}
static inline unsigned long
__generic_copy_to_user_nocheck(void __user *to, const void *from,
unsigned long n)
{
- return __copy_user(to,from,n);
+ return __copy_user(to, from, n);
}
static inline unsigned long
__generic_clear_user_nocheck(void __user *to, unsigned long n)
{
- return __do_clear_user(to,n);
+ return __do_clear_user(to, n);
}
/* without checking */
-#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
-#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
+#define __copy_to_user(to, from, n) \
+ __generic_copy_to_user_nocheck((to), (from), (n))
+#define __copy_from_user(to, from, n) \
+ __generic_copy_from_user_nocheck((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
-#define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))
+#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 3908b942fd4c..e704f81f85cc 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -67,3 +67,4 @@ EXPORT_SYMBOL(schedule_usleep);
#endif
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index 51123f985eb5..af04cb6b6dc9 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
}
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
kfree(module_region);
}
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 1790f22e71a2..83f12f2ed9e3 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -176,6 +176,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
@@ -217,6 +219,9 @@ retry:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
+#ifdef CONFIG_NO_SEGFAULT_TERMINATION
+ DECLARE_WAIT_QUEUE_HEAD(wq);
+#endif
printk(KERN_NOTICE "%s (pid %d) segfaults for page "
"address %08lx at pc %08lx\n",
tsk->comm, tsk->pid,
@@ -227,7 +232,6 @@ retry:
show_registers(regs);
#ifdef CONFIG_NO_SEGFAULT_TERMINATION
- DECLARE_WAIT_QUEUE_HEAD(wq);
wait_event_interruptible(wq, 0 == 1);
#else
info.si_signo = SIGSEGV;
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index eb0110acd19b..93bcf2abd1a1 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -62,10 +62,6 @@ typedef pte_t *pte_addr_t;
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifndef __ASSEMBLY__
-static inline int pte_file(pte_t pte) { return 0; }
-#endif
-
#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
#define swapper_pg_dir ((pgd_t *) NULL)
@@ -144,7 +140,7 @@ extern unsigned long empty_zero_page;
#define PTRS_PER_PTE 4096
#define USER_PGDS_IN_LAST_PML4 (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
@@ -298,7 +294,6 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
#define _PAGE_RESERVED_MASK (xAMPRx_RESERVED8 | xAMPRx_RESERVED13)
-#define _PAGE_FILE 0x002 /* set:pagecache unset:swap */
#define _PAGE_PROTNONE 0x000 /* If not present */
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -463,27 +458,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* Handle swap and file entries
* - the PTE is encoded in the following format:
* bit 0: Must be 0 (!_PAGE_PRESENT)
- * bit 1: Type: 0 for swap, 1 for file (_PAGE_FILE)
- * bits 2-7: Swap type
- * bits 8-31: Swap offset
- * bits 2-31: File pgoff
+ * bits 1-6: Swap type
+ * bits 7-31: Swap offset
*/
-#define __swp_type(x) (((x).val >> 2) & 0x1f)
-#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
+#define __swp_type(x) (((x).val >> 1) & 0x1f)
+#define __swp_offset(x) ((x).val >> 7)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 7) })
#define __pte_to_swp_entry(_pte) ((swp_entry_t) { (_pte).pte })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-static inline int pte_file(pte_t pte)
-{
- return pte.pte & _PAGE_FILE;
-}
-
-#define PTE_FILE_MAX_BITS 29
-
-#define pte_to_pgoff(PTE) ((PTE).pte >> 2)
-#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
-
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define PageSkip(page) (0)
#define kern_addr_valid(addr) (1)
diff --git a/arch/frv/include/asm/string.h b/arch/frv/include/asm/string.h
index 5ed310f64b7e..1f6c35990439 100644
--- a/arch/frv/include/asm/string.h
+++ b/arch/frv/include/asm/string.h
@@ -33,7 +33,6 @@ extern void *memcpy(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_STRNCAT 1
#define __HAVE_ARCH_STRCMP 1
#define __HAVE_ARCH_STRNCMP 1
-#define __HAVE_ARCH_STRNICMP 1
#define __HAVE_ARCH_STRCHR 1
#define __HAVE_ARCH_STRRCHR 1
#define __HAVE_ARCH_STRSTR 1
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index af29e17c0181..6b917f1c2955 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -41,7 +41,6 @@ struct thread_info {
* 0-0xBFFFFFFF for user-thead
* 0-0xFFFFFFFF for kernel-thread
*/
- struct restart_block restart_block;
__u8 supervisor_stack[0];
};
@@ -65,9 +64,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/frv/kernel/asm-offsets.c b/arch/frv/kernel/asm-offsets.c
index 9de96843a278..446e89d500cc 100644
--- a/arch/frv/kernel/asm-offsets.c
+++ b/arch/frv/kernel/asm-offsets.c
@@ -40,7 +40,6 @@ void foo(void)
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PREEMPT_COUNT, thread_info, preempt_count);
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
- OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
BLANK();
/* offsets into register file storage */
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index dc3d59de0870..336713ab4745 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -62,7 +62,7 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
unsigned long tbr, psr;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
tbr = user->i.tbr;
psr = user->i.psr;
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 67b1d1685759..0635bd6c2af3 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -94,7 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
r = &dev->resource[idx];
if (!r->start)
continue;
- pci_claim_resource(dev, idx);
+ pci_claim_bridge_resource(dev, idx);
}
}
pcibios_allocate_bus_resources(&bus->children);
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index efa5d65b0007..b073f4d771a5 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -168,8 +168,8 @@ static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int whe
}
static struct pci_ops pci_direct_frv = {
- pci_frv_read_config,
- pci_frv_write_config,
+ .read = pci_frv_read_config,
+ .write = pci_frv_write_config,
};
/*
diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c
index 2fb9b3ab57b9..8863d6c1df6e 100644
--- a/arch/frv/mm/extable.c
+++ b/arch/frv/mm/extable.c
@@ -10,29 +10,6 @@ extern const void __memset_end, __memset_user_error_lr, __memset_user_error_hand
extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
extern spinlock_t modlist_lock;
-/*****************************************************************************/
-/*
- *
- */
-static inline unsigned long search_one_table(const struct exception_table_entry *first,
- const struct exception_table_entry *last,
- unsigned long value)
-{
- while (first <= last) {
- const struct exception_table_entry __attribute__((aligned(8))) *mid;
- long diff;
-
- mid = (last - first) / 2 + first;
- diff = mid->insn - value;
- if (diff == 0)
- return mid->fixup;
- else if (diff < 0)
- first = mid + 1;
- else
- last = mid - 1;
- }
- return 0;
-} /* end search_one_table() */
/*****************************************************************************/
/*
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 9a66372fc7c7..ec4917ddf678 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index d8bd54fa431e..49eab8136ec3 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -62,13 +62,6 @@ extern unsigned long zero_page_mask;
#define _PAGE_ACCESSED (1<<2)
/*
- * _PAGE_FILE is only meaningful if _PAGE_PRESENT is false, while
- * _PAGE_DIRTY is only meaningful if _PAGE_PRESENT is true.
- * So we can overload the bit...
- */
-#define _PAGE_FILE _PAGE_DIRTY /* set: pagecache, unset = swap */
-
-/*
* For now, let's say that Valid and Present are the same thing.
* Alternatively, we could say that it's the "or" of R, W, and X
* permissions.
@@ -178,7 +171,7 @@ extern unsigned long _dflt_cache_att;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
/* Seems to be zero even in architectures where the zero page is firewalled? */
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define pte_special(pte) 0
#define pte_mkspecial(pte) (pte)
@@ -456,57 +449,36 @@ static inline int pte_exec(pte_t pte)
#define pgtable_cache_init() do { } while (0)
/*
- * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the
- * PTE is interpreted as swap information. Depending on the _PAGE_FILE
- * bit, the remaining free bits are eitehr interpreted as a file offset
- * or a swap type/offset tuple. Rather than have the TLB fill handler
- * test _PAGE_PRESENT, we're going to reserve the permissions bits
- * and set them to all zeros for swap entries, which speeds up the
- * miss handler at the cost of 3 bits of offset. That trade-off can
- * be revisited if necessary, but Hexagon processor architecture and
- * target applications suggest a lot of TLB misses and not much swap space.
+ * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
+ * interpreted as swap information. The remaining free bits are interpreted as
+ * swap type/offset tuple. Rather than have the TLB fill handler test
+ * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
+ * all zeros for swap entries, which speeds up the miss handler at the cost of
+ * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon
+ * processor architecture and target applications suggest a lot of TLB misses
+ * and not much swap space.
*
* Format of swap PTE:
* bit 0: Present (zero)
- * bit 1: _PAGE_FILE (zero)
- * bits 2-6: swap type (arch independent layer uses 5 bits max)
- * bits 7-9: bits 2:0 of offset
- * bits 10-12: effectively _PAGE_PROTNONE (all zero)
- * bits 13-31: bits 21:3 of swap offset
- *
- * Format of file PTE:
- * bit 0: Present (zero)
- * bit 1: _PAGE_FILE (zero)
- * bits 2-9: bits 7:0 of offset
- * bits 10-12: effectively _PAGE_PROTNONE (all zero)
- * bits 13-31: bits 26:8 of swap offset
+ * bits 1-5: swap type (arch independent layer uses 5 bits max)
+ * bits 6-9: bits 3:0 of offset
+ * bits 10-12: effectively _PAGE_PROTNONE (all zero)
+ * bits 13-31: bits 22:4 of swap offset
*
* The split offset makes some of the following macros a little gnarly,
* but there's plenty of precedent for this sort of thing.
*/
-#define PTE_FILE_MAX_BITS 27
/* Used for swap PTEs */
-#define __swp_type(swp_pte) (((swp_pte).val >> 2) & 0x1f)
+#define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f)
#define __swp_offset(swp_pte) \
- ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x003ffff8))
+ ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0))
#define __swp_entry(type, offset) \
((swp_entry_t) { \
- ((type << 2) | \
- ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })
-
-/* Used for file PTEs */
-#define pte_file(pte) \
- ((pte_val(pte) & (_PAGE_FILE | _PAGE_PRESENT)) == _PAGE_FILE)
-
-#define pte_to_pgoff(pte) \
- (((pte_val(pte) >> 2) & 0xff) | ((pte_val(pte) >> 5) & 0x07ffff00))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { ((((off) & 0x7ffff00) << 5) | (((off) & 0xff) << 2)\
- | _PAGE_FILE) })
+ ((type << 1) | \
+ ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
/* Oh boy. There are a lot of possible arch overrides found in this file. */
#include <asm-generic/pgtable.h>
diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h
index a59dad3b3695..bacd3d6030c5 100644
--- a/arch/hexagon/include/asm/thread_info.h
+++ b/arch/hexagon/include/asm/thread_info.h
@@ -56,7 +56,6 @@ struct thread_info {
* used for syscalls somehow;
* seems to have a function pointer and four arguments
*/
- struct restart_block restart_block;
/* Points to the current pt_regs frame */
struct pt_regs *regs;
/*
@@ -83,9 +82,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
.sp = 0, \
.regs = NULL, \
}
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
index eadd70e47e7e..b039a624c170 100644
--- a/arch/hexagon/kernel/signal.c
+++ b/arch/hexagon/kernel/signal.c
@@ -239,7 +239,7 @@ asmlinkage int sys_rt_sigreturn(void)
sigset_t blocked;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
frame = (struct rt_sigframe __user *)pt_psp(regs);
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7935115398a6..7b6f8801df57 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -57,9 +57,6 @@
#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
-/* Valid only for a PTE with the present bit cleared: */
-#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
-
#define _PFN_MASK _PAGE_PPN_MASK
/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
@@ -130,7 +127,7 @@
#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
/*
* All the normal masks have the "page accessed" bits on, as any time
@@ -300,7 +297,6 @@ extern unsigned long VMALLOC_END;
#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
-#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
#define pte_special(pte) 0
/*
@@ -472,27 +468,16 @@ extern void paging_init (void);
*
* Format of swap pte:
* bit 0 : present bit (must be zero)
- * bit 1 : _PAGE_FILE (must be zero)
- * bits 2- 8: swap-type
- * bits 9-62: swap offset
- * bit 63 : _PAGE_PROTNONE bit
- *
- * Format of file pte:
- * bit 0 : present bit (must be zero)
- * bit 1 : _PAGE_FILE (must be one)
- * bits 2-62: file_offset/PAGE_SIZE
+ * bits 1- 7: swap-type
+ * bits 8-62: swap offset
* bit 63 : _PAGE_PROTNONE bit
*/
-#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
-#define __swp_offset(entry) (((entry).val << 1) >> 10)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
+#define __swp_type(entry) (((entry).val >> 1) & 0x7f)
+#define __swp_offset(entry) (((entry).val << 1) >> 9)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define PTE_FILE_MAX_BITS 61
-#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 5b17418b4223..c16f21a068ff 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -27,7 +27,6 @@ struct thread_info {
__u32 status; /* Thread synchronous flags */
mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
- struct restart_block restart_block;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
__u64 ac_stamp;
__u64 ac_leave;
@@ -46,9 +45,6 @@ struct thread_info {
.cpu = 0, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#ifndef ASM_OFFSETS_C
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index 1b3f5eb5fcdb..891002bbb995 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -18,7 +18,6 @@ header-y += intrinsics.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
-header-y += kvm.h
header-y += kvm_para.h
header-y += mman.h
header-y += msgbuf.h
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 8b9318d311a0..bd09bf74f187 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -69,10 +69,10 @@ static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
status = acpi_resource_to_address64(resource, &addr);
if (ACPI_SUCCESS(status) &&
addr.resource_type == ACPI_MEMORY_RANGE &&
- addr.address_length &&
+ addr.address.address_length &&
addr.producer_consumer == ACPI_CONSUMER) {
- space->base = addr.minimum;
- space->length = addr.address_length;
+ space->base = addr.address.minimum;
+ space->length = addr.address.address_length;
return AE_CTRL_TERMINATE;
}
return AE_OK; /* keep looking */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index e795cb848154..2c4498919d3c 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -380,9 +380,6 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
static int __init acpi_parse_madt(struct acpi_table_header *table)
{
- if (!table)
- return -EINVAL;
-
acpi_madt = (struct acpi_table_madt *)table;
acpi_madt_rev = acpi_madt->header.revision;
@@ -645,9 +642,6 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
struct acpi_table_header *fadt_header;
struct acpi_table_fadt *fadt;
- if (!table)
- return -EINVAL;
-
fadt_header = (struct acpi_table_header *)table;
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 24603be24c14..29754aae5177 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -305,14 +305,12 @@ plt_target (struct plt_entry *plt)
#endif /* !USE_BRL */
void
-module_free (struct module *mod, void *module_region)
+module_arch_freeing_init (struct module *mod)
{
- if (mod && mod->arch.init_unw_table &&
- module_region == mod->module_init) {
+ if (mod->arch.init_unw_table) {
unw_remove_unwind_table(mod->arch.init_unw_table);
mod->arch.init_unw_table = NULL;
}
- vfree(module_region);
}
/* Have we already seen one of these relocations? */
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 6d92170be457..b3a124da71e5 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -46,7 +46,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
long err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* restore scratch that always needs gets updated during signal delivery: */
err = __get_user(flags, &sc->sc_flags);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index f295f9abba4b..965ab42fabb0 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -217,14 +217,12 @@ static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
{
- ssize_t len;
cpumask_t shared_cpu_map;
cpumask_and(&shared_cpu_map,
&this_leaf->shared_cpu_map, cpu_online_mask);
- len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
- len += sprintf(buf+len, "\n");
- return len;
+ return scnprintf(buf, PAGE_SIZE, "%*pb\n",
+ cpumask_pr_args(&shared_cpu_map));
}
static ssize_t show_type(struct cache_info *this_leaf, char *buf)
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 7225dad87094..ba5ba7accd0d 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -172,6 +172,8 @@ retry:
*/
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ goto bad_area;
} else if (fault & VM_FAULT_SIGBUS) {
signal = SIGBUS;
goto bad_area;
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 76069c18ee42..52b7604b5215 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -114,12 +114,6 @@ int pud_huge(pud_t pud)
return 0;
}
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
-{
- return NULL;
-}
-
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 291a582777cf..48cc65705db4 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -188,12 +188,12 @@ static u64 add_io_space(struct pci_root_info *info,
name = (char *)(iospace + 1);
- min = addr->minimum;
- max = min + addr->address_length - 1;
+ min = addr->address.minimum;
+ max = min + addr->address.address_length - 1;
if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
sparse = 1;
- space_nr = new_space(addr->translation_offset, sparse);
+ space_nr = new_space(addr->address.translation_offset, sparse);
if (space_nr == ~0)
goto free_resource;
@@ -247,7 +247,7 @@ static acpi_status resource_to_window(struct acpi_resource *resource,
if (ACPI_SUCCESS(status) &&
(addr->resource_type == ACPI_MEMORY_RANGE ||
addr->resource_type == ACPI_IO_RANGE) &&
- addr->address_length &&
+ addr->address.address_length &&
addr->producer_consumer == ACPI_PRODUCER)
return AE_OK;
@@ -284,7 +284,7 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM;
root = &iomem_resource;
- offset = addr.translation_offset;
+ offset = addr.address.translation_offset;
} else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO;
root = &ioport_resource;
@@ -297,8 +297,8 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
resource = &info->res[info->res_num];
resource->name = info->name;
resource->flags = flags;
- resource->start = addr.minimum + offset;
- resource->end = resource->start + addr.address_length - 1;
+ resource->start = addr.address.minimum + offset;
+ resource->end = resource->start + addr.address.address_length - 1;
info->res_offset[info->res_num] = offset;
if (insert_resource(root, resource)) {
@@ -487,45 +487,39 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
}
-static int is_valid_resource(struct pci_dev *dev, int idx)
+void pcibios_fixup_device_resources(struct pci_dev *dev)
{
- unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- struct resource *devr = &dev->resource[idx], *busr;
+ int idx;
if (!dev->bus)
- return 0;
-
- pci_bus_for_each_resource(dev->bus, busr, i) {
- if (!busr || ((busr->flags ^ devr->flags) & type_mask))
- continue;
- if ((devr->start) && (devr->start >= busr->start) &&
- (devr->end <= busr->end))
- return 1;
- }
- return 0;
-}
+ return;
-static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
-{
- int i;
+ for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
- for (i = start; i < limit; i++) {
- if (!dev->resource[i].flags)
+ if (!r->flags || r->parent || !r->start)
continue;
- if ((is_valid_resource(dev, i)))
- pci_claim_resource(dev, i);
- }
-}
-void pcibios_fixup_device_resources(struct pci_dev *dev)
-{
- pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
+ pci_claim_resource(dev, idx);
+ }
}
EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
{
- pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
+ int idx;
+
+ if (!dev->bus)
+ return;
+
+ for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
+
+ if (!r->flags || r->parent || !r->start)
+ continue;
+
+ pci_claim_bridge_resource(dev, idx);
+ }
}
/*
diff --git a/arch/m32r/include/asm/pgtable-2level.h b/arch/m32r/include/asm/pgtable-2level.h
index 9cdaf7350ef6..8fd8ee70266a 100644
--- a/arch/m32r/include/asm/pgtable-2level.h
+++ b/arch/m32r/include/asm/pgtable-2level.h
@@ -70,9 +70,5 @@ static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define PTE_FILE_MAX_BITS 29
-#define pte_to_pgoff(pte) (((pte_val(pte) >> 2) & 0x7f) | (((pte_val(pte) >> 10)) << 7))
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7f) << 2) | (((off) >> 7) << 10) | _PAGE_FILE })
-
#endif /* __KERNEL__ */
#endif /* _ASM_M32R_PGTABLE_2LEVEL_H */
diff --git a/arch/m32r/include/asm/pgtable.h b/arch/m32r/include/asm/pgtable.h
index 103ce6710f07..8c1fb902a9ce 100644
--- a/arch/m32r/include/asm/pgtable.h
+++ b/arch/m32r/include/asm/pgtable.h
@@ -53,7 +53,7 @@ extern unsigned long empty_zero_page[1024];
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
/* Just any arbitrary offset to the start of the vmalloc VM area: the
@@ -80,8 +80,6 @@ extern unsigned long empty_zero_page[1024];
*/
#define _PAGE_BIT_DIRTY 0 /* software: page changed */
-#define _PAGE_BIT_FILE 0 /* when !present: nonlinear file
- mapping */
#define _PAGE_BIT_PRESENT 1 /* Valid: page is valid */
#define _PAGE_BIT_GLOBAL 2 /* Global */
#define _PAGE_BIT_LARGE 3 /* Large */
@@ -93,7 +91,6 @@ extern unsigned long empty_zero_page[1024];
#define _PAGE_BIT_PROTNONE 9 /* software: if not present */
#define _PAGE_DIRTY (1UL << _PAGE_BIT_DIRTY)
-#define _PAGE_FILE (1UL << _PAGE_BIT_FILE)
#define _PAGE_PRESENT (1UL << _PAGE_BIT_PRESENT)
#define _PAGE_GLOBAL (1UL << _PAGE_BIT_GLOBAL)
#define _PAGE_LARGE (1UL << _PAGE_BIT_LARGE)
@@ -206,14 +203,6 @@ static inline int pte_write(pte_t pte)
return pte_val(pte) & _PAGE_WRITE;
}
-/*
- * The following only works if pte_present() is not true.
- */
-static inline int pte_file(pte_t pte)
-{
- return pte_val(pte) & _PAGE_FILE;
-}
-
static inline int pte_special(pte_t pte)
{
return 0;
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 00171703402f..32422d0211c3 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -34,7 +34,6 @@ struct thread_info {
0-0xBFFFFFFF for user-thread
0-0xFFFFFFFF for kernel-thread
*/
- struct restart_block restart_block;
__u8 supervisor_stack[0];
};
@@ -49,7 +48,6 @@ struct thread_info {
#define TI_CPU 0x00000010
#define TI_PRE_COUNT 0x00000014
#define TI_ADDR_LIMIT 0x00000018
-#define TI_RESTART_BLOCK 0x000001C
#endif
@@ -68,9 +66,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 95408b8f130a..7736c6660a15 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -48,7 +48,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
unsigned int err = 0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(r4);
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index e9c6a8014bd6..e3d4d4890104 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -200,6 +200,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/m68k/68360/commproc.c b/arch/m68k/68360/commproc.c
index 315727b7ff40..14d7f35cd37b 100644
--- a/arch/m68k/68360/commproc.c
+++ b/arch/m68k/68360/commproc.c
@@ -64,15 +64,15 @@ QUICC *pquicc;
/* CPM interrupt vector functions. */
struct cpm_action {
- void (*handler)(void *);
- void *dev_id;
+ irq_handler_t handler;
+ void *dev_id;
};
static struct cpm_action cpm_vecs[CPMVEC_NR];
static void cpm_interrupt(int irq, void * dev, struct pt_regs * regs);
static void cpm_error_interrupt(void *);
/* prototypes: */
-void cpm_install_handler(int vec, void (*handler)(), void *dev_id);
+void cpm_install_handler(int vec, irq_handler_t handler, void *dev_id);
void m360_cpm_reset(void);
@@ -208,7 +208,7 @@ cpm_error_interrupt(void *dev)
/* Install a CPM interrupt handler.
*/
void
-cpm_install_handler(int vec, void (*handler)(), void *dev_id)
+cpm_install_handler(int vec, irq_handler_t handler, void *dev_id)
{
request_irq(vec, handler, 0, "timer", dev_id);
diff --git a/arch/m68k/68360/config.c b/arch/m68k/68360/config.c
index 17ec416fed9d..fd1f948c7129 100644
--- a/arch/m68k/68360/config.c
+++ b/arch/m68k/68360/config.c
@@ -106,19 +106,6 @@ void hw_timer_init(irq_handler_t handler)
pquicc->timer_tgcr = tgcr_save;
}
-int BSP_set_clock_mmss(unsigned long nowtime)
-{
-#if 0
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-
- tod->second1 = real_seconds / 10;
- tod->second2 = real_seconds % 10;
- tod->minute1 = real_minutes / 10;
- tod->minute2 = real_minutes % 10;
-#endif
- return 0;
-}
-
void BSP_reset (void)
{
local_irq_disable();
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index 95022b04b62d..264db1126803 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -170,7 +170,6 @@ repeat:
if (acia_stat & ACIA_RDRF) {
/* received a character */
scancode = acia.key_data; /* get it or reset the ACIA, I'll get it! */
- tasklet_schedule(&keyboard_tasklet);
interpret_scancode:
switch (kb_state.state) {
case KEYBOARD:
@@ -430,14 +429,6 @@ void ikbd_mouse_y0_top(void)
}
EXPORT_SYMBOL(ikbd_mouse_y0_top);
-/* Resume */
-void ikbd_resume(void)
-{
- static const char cmd[1] = { 0x11 };
-
- ikbd_write(cmd, 1);
-}
-
/* Disable mouse */
void ikbd_mouse_disable(void)
{
@@ -447,14 +438,6 @@ void ikbd_mouse_disable(void)
}
EXPORT_SYMBOL(ikbd_mouse_disable);
-/* Pause output */
-void ikbd_pause(void)
-{
- static const char cmd[1] = { 0x13 };
-
- ikbd_write(cmd, 1);
-}
-
/* Set joystick event reporting */
void ikbd_joystick_event_on(void)
{
@@ -502,56 +485,6 @@ void ikbd_joystick_disable(void)
ikbd_write(cmd, 1);
}
-/* Time-of-day clock set */
-void ikbd_clock_set(int year, int month, int day, int hour, int minute, int second)
-{
- char cmd[7] = { 0x1B, year, month, day, hour, minute, second };
-
- ikbd_write(cmd, 7);
-}
-
-/* Interrogate time-of-day clock */
-void ikbd_clock_get(int *year, int *month, int *day, int *hour, int *minute, int second)
-{
- static const char cmd[1] = { 0x1C };
-
- ikbd_write(cmd, 1);
-}
-
-/* Memory load */
-void ikbd_mem_write(int address, int size, char *data)
-{
- panic("Attempt to write data into keyboard memory");
-}
-
-/* Memory read */
-void ikbd_mem_read(int address, char data[6])
-{
- char cmd[3] = { 0x21, address>>8, address&0xFF };
-
- ikbd_write(cmd, 3);
-
- /* receive data and put it in data */
-}
-
-/* Controller execute */
-void ikbd_exec(int address)
-{
- char cmd[3] = { 0x22, address>>8, address&0xFF };
-
- ikbd_write(cmd, 3);
-}
-
-/* Status inquiries (0x87-0x9A) not yet implemented */
-
-/* Set the state of the caps lock led. */
-void atari_kbd_leds(unsigned int leds)
-{
- char cmd[6] = {32, 0, 4, 1, 254 + ((leds & 4) != 0), 0};
-
- ikbd_write(cmd, 6);
-}
-
/*
* The original code sometimes left the interrupt line of
* the ACIAs low forever. I hope, it is fixed now.
@@ -571,9 +504,8 @@ int atari_keyb_init(void)
kb_state.state = KEYBOARD;
kb_state.len = 0;
- error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt,
- IRQ_TYPE_SLOW, "keyboard,mouse,MIDI",
- atari_keyboard_interrupt);
+ error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, 0,
+ "keyboard,mouse,MIDI", atari_keyboard_interrupt);
if (error)
return error;
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index e5a66596b116..ba65f942d0c7 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -198,7 +198,7 @@ EXPORT_SYMBOL(stdma_islocked);
void __init stdma_init(void)
{
stdma_isr = NULL;
- if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
+ if (request_irq(IRQ_MFP_FDC, stdma_int, IRQF_SHARED,
"ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int))
pr_err("Couldn't register ST-DMA interrupt\n");
}
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index da8f981c36d6..c549b48174ec 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -32,8 +32,7 @@ atari_sched_init(irq_handler_t timer_routine)
/* start timer C, div = 1:100 */
st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
/* install interrupt service routine for MFP Timer C */
- if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
- "timer", timer_routine))
+ if (request_irq(IRQ_MFP_TIMC, timer_routine, 0, "timer", timer_routine))
pr_err("Couldn't register timer interrupt\n");
}
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 399df883c8bb..1a10a08ebec7 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -36,6 +36,7 @@ CONFIG_AMIGA_PCMCIA=y
CONFIG_ZORRO_NAMES=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -55,6 +56,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -96,6 +99,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -142,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -163,6 +169,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -170,9 +177,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -181,8 +191,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -197,6 +206,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -213,17 +224,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -232,9 +269,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_AMIGA=m
@@ -299,6 +337,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -316,6 +357,8 @@ CONFIG_ARIADNE=y
CONFIG_HYDRA=y
CONFIG_APNE=y
CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
@@ -371,6 +414,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_MSM6242=m
@@ -392,6 +436,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -407,6 +452,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -476,10 +522,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -514,13 +568,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index be16740c0749..7859a738c81e 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -34,6 +34,7 @@ CONFIG_M68060=y
CONFIG_APOLLO=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -53,6 +54,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -94,6 +97,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -140,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -161,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -168,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -179,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -195,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -211,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -230,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -281,6 +319,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -291,6 +332,8 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -332,6 +375,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -350,6 +394,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -365,6 +410,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -434,10 +480,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -472,13 +526,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 391e185d73be..372593a3b398 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -31,8 +31,10 @@ CONFIG_M68030=y
CONFIG_M68040=y
CONFIG_M68060=y
CONFIG_ATARI=y
+CONFIG_ATARI_ROM_ISA=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -52,6 +54,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -93,6 +97,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -139,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -160,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -167,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -178,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -194,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -210,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -229,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_ATARI=m
@@ -289,6 +328,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -299,8 +341,12 @@ CONFIG_ATARILANCE=y
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_NE2000=y
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
@@ -345,6 +391,7 @@ CONFIG_DMASOUND_ATARI=m
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
+# CONFIG_HID_PLANTRONICS is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
@@ -354,6 +401,8 @@ CONFIG_NATFEAT=y
CONFIG_NFBLOCK=y
CONFIG_NFCON=y
CONFIG_NFETH=y
+CONFIG_ATARI_ETHERNAT=y
+CONFIG_ATARI_ETHERNEC=y
CONFIG_ATARI_DSP56K=m
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
@@ -367,6 +416,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -382,6 +432,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -451,10 +502,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -489,13 +548,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index d0e705d1a063..f3bd35e76ea4 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -32,6 +32,7 @@ CONFIG_VME=y
CONFIG_BVME6000=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -280,6 +318,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -427,10 +473,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -465,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index fdc7e9672249..9f9793fb2b73 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -34,6 +34,7 @@ CONFIG_M68060=y
CONFIG_HP300=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -53,6 +54,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -94,6 +97,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -140,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -161,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -168,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -179,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -195,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -211,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -230,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -281,6 +319,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -292,6 +333,8 @@ CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -335,6 +378,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -352,6 +396,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -367,6 +412,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -436,10 +482,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -474,13 +528,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3d345641d5a0..89f225c01a0b 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -33,6 +33,7 @@ CONFIG_M68KFPU_EMU=y
CONFIG_MAC=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -52,6 +53,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -93,6 +96,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -139,6 +144,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -160,6 +166,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -167,9 +174,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -178,8 +188,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -194,6 +203,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -210,20 +221,46 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -232,9 +269,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_SWIM=m
CONFIG_BLK_DEV_LOOP=y
@@ -297,6 +335,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -310,6 +351,8 @@ CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_MICREL is not set
CONFIG_MACSONIC=y
CONFIG_MAC8390=y
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
@@ -357,6 +400,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -374,6 +418,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -389,6 +434,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -458,11 +504,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -497,13 +550,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 59aa42096000..d3cdb5447a2c 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -39,9 +39,11 @@ CONFIG_SUN3X=y
CONFIG_Q40=y
CONFIG_ZORRO=y
CONFIG_AMIGA_PCMCIA=y
+CONFIG_ATARI_ROM_ISA=y
CONFIG_ZORRO_NAMES=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -61,6 +63,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -102,6 +106,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -148,6 +154,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -169,6 +176,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -176,9 +184,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -187,8 +198,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -203,6 +213,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -219,20 +231,46 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -241,9 +279,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -329,6 +368,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -352,11 +394,14 @@ CONFIG_MVME16x_NET=y
CONFIG_MACSONIC=y
CONFIG_HYDRA=y
CONFIG_MAC8390=y
-CONFIG_NE2000=m
+CONFIG_NE2000=y
CONFIG_APNE=y
CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
@@ -423,6 +468,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_MSM6242=m
@@ -435,6 +481,8 @@ CONFIG_NATFEAT=y
CONFIG_NFBLOCK=y
CONFIG_NFCON=y
CONFIG_NFETH=y
+CONFIG_ATARI_ETHERNAT=y
+CONFIG_ATARI_ETHERNEC=y
CONFIG_ATARI_DSP56K=m
CONFIG_AMIGA_BUILTIN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
@@ -450,6 +498,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -465,6 +514,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -534,11 +584,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -573,13 +630,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 066b24af095e..b4c76640973e 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -31,6 +31,7 @@ CONFIG_VME=y
CONFIG_MVME147=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -50,6 +51,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -91,6 +94,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -137,6 +142,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -158,6 +164,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -165,9 +172,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -176,8 +186,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -192,6 +201,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -208,17 +219,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -227,9 +264,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -279,6 +317,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -427,10 +473,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -465,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 9326ea664a5b..0d4a26f9b58c 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -32,6 +32,7 @@ CONFIG_VME=y
CONFIG_MVME16x=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -280,6 +318,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -427,11 +473,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -466,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index d7d1101e31b5..5d581c503fa3 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -32,6 +32,7 @@ CONFIG_M68060=y
CONFIG_Q40=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -286,6 +324,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -300,6 +341,8 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
CONFIG_NE2000=m
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
@@ -347,6 +390,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -365,6 +409,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -380,6 +425,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -449,10 +495,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -487,13 +541,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 98522e8fb852..c6b49a4a887c 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -29,6 +29,7 @@ CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -48,6 +49,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -89,6 +92,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -135,6 +140,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -156,6 +162,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -163,9 +170,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -174,8 +184,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -190,6 +199,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -206,17 +217,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -225,9 +262,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -277,6 +315,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -287,6 +328,8 @@ CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -327,6 +370,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -344,6 +388,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -359,6 +404,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -428,10 +474,17 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -466,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 5128a8c3f4e3..b65785eaff8d 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -29,6 +29,7 @@ CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3X=y
# CONFIG_COMPACTION is not set
CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
@@ -48,6 +49,8 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -89,6 +92,8 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
@@ -135,6 +140,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -156,6 +162,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
@@ -163,9 +170,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -174,8 +184,7 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -190,6 +199,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -206,17 +217,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
+CONFIG_BRIDGE=m
CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -225,9 +262,10 @@ CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_NET_MPLS_GSO=m
# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -277,6 +315,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
CONFIG_NET_TEAM_MODE_RANDOM=m
CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -288,6 +329,8 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -327,6 +370,7 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=m
@@ -344,6 +388,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -359,6 +404,7 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
@@ -428,10 +474,18 @@ CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -466,13 +520,10 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 9b6c691874bd..1517ed1c6471 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += device.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += futex.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
diff --git a/arch/m68k/include/asm/atariints.h b/arch/m68k/include/asm/atariints.h
index 953e0ac6855e..6321c4495620 100644
--- a/arch/m68k/include/asm/atariints.h
+++ b/arch/m68k/include/asm/atariints.h
@@ -40,11 +40,6 @@
/* convert irq_handler index to vector number */
#define IRQ_SOURCE_TO_VECTOR(i) ((i) + ((i) < 8 ? 0x18 : (0x40-8)))
-/* interrupt service types */
-#define IRQ_TYPE_SLOW 0
-#define IRQ_TYPE_FAST 1
-#define IRQ_TYPE_PRIO 2
-
/* ST-MFP interrupts */
#define IRQ_MFP_BUSY (8)
#define IRQ_MFP_DCD (9)
diff --git a/arch/m68k/include/asm/commproc.h b/arch/m68k/include/asm/commproc.h
index 66a36bd51aa1..f41c96863e98 100644
--- a/arch/m68k/include/asm/commproc.h
+++ b/arch/m68k/include/asm/commproc.h
@@ -480,28 +480,6 @@ typedef struct scc_enet {
#define SICR_ENET_CLKRT ((uint)0x0000003d)
#endif
-#ifdef CONFIG_BSEIP
-/* This ENET stuff is for the MPC823 with ethernet on SCC2.
- * This is unique to the BSE ip-Engine board.
- */
-#define PA_ENET_RXD ((ushort)0x0004)
-#define PA_ENET_TXD ((ushort)0x0008)
-#define PA_ENET_TCLK ((ushort)0x0100)
-#define PA_ENET_RCLK ((ushort)0x0200)
-#define PB_ENET_TENA ((uint)0x00002000)
-#define PC_ENET_CLSN ((ushort)0x0040)
-#define PC_ENET_RENA ((ushort)0x0080)
-
-/* BSE uses port B and C bits for PHY control also.
-*/
-#define PB_BSE_POWERUP ((uint)0x00000004)
-#define PB_BSE_FDXDIS ((uint)0x00008000)
-#define PC_BSE_LOOPBACK ((ushort)0x0800)
-
-#define SICR_ENET_MASK ((uint)0x0000ff00)
-#define SICR_ENET_CLKRT ((uint)0x00002c00)
-#endif
-
/* SCC Event register as used by Ethernet.
*/
#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
@@ -671,7 +649,7 @@ typedef struct scc_trans {
/* #define CPMVEC_PIO_PC4 ((ushort)0x01) */
/* #define CPMVEC_ERROR ((ushort)0x00) */
-extern void cpm_install_handler(int vec, void (*handler)(void *), void *dev_id);
+extern void cpm_install_handler(int vec, irq_handler_t handler, void *dev_id);
/* CPM interrupt configuration vector.
*/
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644
index bc868af10c96..000000000000
--- a/arch/m68k/include/asm/futex.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef _ASM_M68K_FUTEX_H
-#define _ASM_M68K_FUTEX_H
-
-#ifdef __KERNEL__
-#if !defined(CONFIG_MMU)
-#include <asm-generic/futex.h>
-#else /* CONFIG_MMU */
-
-#include <linux/futex.h>
-#include <linux/uaccess.h>
-#include <asm/errno.h>
-
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- u32 val;
-
- if (unlikely(get_user(val, uaddr) != 0))
- return -EFAULT;
-
- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
- return -EFAULT;
-
- *uval = val;
-
- return 0;
-}
-
-static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval, ret;
- u32 tmp;
-
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- pagefault_disable(); /* implies preempt_disable() */
-
- ret = -EFAULT;
- if (unlikely(get_user(oldval, uaddr) != 0))
- goto out_pagefault_enable;
-
- ret = 0;
- tmp = oldval;
-
- switch (op) {
- case FUTEX_OP_SET:
- tmp = oparg;
- break;
- case FUTEX_OP_ADD:
- tmp += oparg;
- break;
- case FUTEX_OP_OR:
- tmp |= oparg;
- break;
- case FUTEX_OP_ANDN:
- tmp &= ~oparg;
- break;
- case FUTEX_OP_XOR:
- tmp ^= oparg;
- break;
- default:
- ret = -ENOSYS;
- }
-
- if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
- ret = -EFAULT;
-
-out_pagefault_enable:
- pagefault_enable(); /* subsumes preempt_enable() */
-
- if (ret == 0) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif /* CONFIG_MMU */
-#endif /* __KERNEL__ */
-#endif /* _ASM_M68K_FUTEX_H */
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 29c7c6c3a5f2..42235e7fbeed 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -55,7 +55,7 @@ struct mac_model
#define MAC_SCSI_QUADRA3 4
#define MAC_SCSI_IIFX 5
#define MAC_SCSI_DUO 6
-#define MAC_SCSI_CCL 7
+#define MAC_SCSI_LC 7
#define MAC_SCSI_LATE 8
#define MAC_IDE_NONE 0
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 3c793682e5d9..2500ce04fcc4 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -35,7 +35,6 @@
* hitting hardware.
*/
#define CF_PAGE_DIRTY 0x00000001
-#define CF_PAGE_FILE 0x00000200
#define CF_PAGE_ACCESSED 0x00001000
#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
@@ -243,11 +242,6 @@ static inline int pte_young(pte_t pte)
return pte_val(pte) & CF_PAGE_ACCESSED;
}
-static inline int pte_file(pte_t pte)
-{
- return pte_val(pte) & CF_PAGE_FILE;
-}
-
static inline int pte_special(pte_t pte)
{
return 0;
@@ -391,26 +385,13 @@ static inline void cache_page(void *vaddr)
*ptep = pte_mkcache(*ptep);
}
-#define PTE_FILE_MAX_BITS 21
-#define PTE_FILE_SHIFT 11
-
-static inline unsigned long pte_to_pgoff(pte_t pte)
-{
- return pte_val(pte) >> PTE_FILE_SHIFT;
-}
-
-static inline pte_t pgoff_to_pte(unsigned pgoff)
-{
- return __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
-}
-
/*
* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
*/
#define __swp_type(x) ((x).val & 0xFF)
-#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
+#define __swp_offset(x) ((x).val >> 11)
#define __swp_entry(typ, off) ((swp_entry_t) { (typ) | \
- (off << PTE_FILE_SHIFT) })
+ (off << 11) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) (__pte((x).val))
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index e0fdd4d08075..0085aab80e5a 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -28,7 +28,6 @@
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
#define _PAGE_PROTNONE 0x004
-#define _PAGE_FILE 0x008 /* pagecache or swap? */
#ifndef __ASSEMBLY__
@@ -168,7 +167,6 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
@@ -266,19 +264,6 @@ static inline void cache_page(void *vaddr)
}
}
-#define PTE_FILE_MAX_BITS 28
-
-static inline unsigned long pte_to_pgoff(pte_t pte)
-{
- return pte.pte >> 4;
-}
-
-static inline pte_t pgoff_to_pte(unsigned off)
-{
- pte_t pte = { (off << 4) + _PAGE_FILE };
- return pte;
-}
-
/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
#define __swp_type(x) (((x).val >> 4) & 0xff)
#define __swp_offset(x) ((x).val >> 12)
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 9f5abbda1ea7..28a145bfbb71 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -66,7 +66,7 @@
#define PTRS_PER_PGD 128
#endif
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
/* Virtual address region for use by kernel_map() */
#ifdef CONFIG_SUN3
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index 11859b86b1f9..ac7d87a02335 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -37,8 +37,6 @@ extern void paging_init(void);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-static inline int pte_file(pte_t pte) { return 0; }
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index f55aa04161e8..48657f9fdece 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -38,8 +38,6 @@
#define _PAGE_PRESENT (SUN3_PAGE_VALID)
#define _PAGE_ACCESSED (SUN3_PAGE_ACCESSED)
-#define PTE_FILE_MAX_BITS 28
-
/* Compound page protection values. */
//todo: work out which ones *should* have SUN3_PAGE_NOCACHE and fix...
// is it just PAGE_KERNEL and PAGE_SHARED?
@@ -168,7 +166,6 @@ static inline void pgd_clear (pgd_t *pgdp) {}
static inline int pte_write(pte_t pte) { return pte_val(pte) & SUN3_PAGE_WRITEABLE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
@@ -202,18 +199,6 @@ static inline pmd_t *pmd_offset (pgd_t *pgd, unsigned long address)
return (pmd_t *) pgd;
}
-static inline unsigned long pte_to_pgoff(pte_t pte)
-{
- return pte.pte & SUN3_PAGE_PGNUM_MASK;
-}
-
-static inline pte_t pgoff_to_pte(unsigned off)
-{
- pte_t pte = { off + SUN3_PAGE_ACCESSED };
- return pte;
-}
-
-
/* Find an entry in the third-level pagetable. */
#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 21a4784ca5a1..c54256e69e64 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -31,7 +31,6 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */
unsigned long tp_value; /* thread pointer */
- struct restart_block restart_block;
};
#endif /* __ASSEMBLY__ */
@@ -41,9 +40,6 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_stack (init_thread_union.stack)
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 75e75d7b1702..244e0dbe45db 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 355
+#define NR_syscalls 356
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 2c1bec9a14b6..61fb6cb9d2ae 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -360,5 +360,6 @@
#define __NR_getrandom 352
#define __NR_memfd_create 353
#define __NR_bpf 354
+#define __NR_execveat 355
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 967a8b7e1527..d7179281e74a 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -655,7 +655,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
int err = 0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* get previous context */
if (copy_from_user(&context, usc, sizeof(context)))
@@ -693,7 +693,7 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 2ca219e184cd..a0ec4303f2c8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -375,4 +375,5 @@ ENTRY(sys_call_table)
.long sys_getrandom
.long sys_memfd_create
.long sys_bpf
+ .long sys_execveat /* 355 */
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index e9c3756139fc..689b47d292ac 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -296,7 +296,7 @@ static struct mac_model mac_data_table[] = {
.name = "IIvi",
.adb_type = MAC_ADB_IISI,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -305,7 +305,7 @@ static struct mac_model mac_data_table[] = {
.name = "IIvx",
.adb_type = MAC_ADB_IISI,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -320,7 +320,7 @@ static struct mac_model mac_data_table[] = {
.name = "Classic II",
.adb_type = MAC_ADB_IISI,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -329,7 +329,7 @@ static struct mac_model mac_data_table[] = {
.name = "Color Classic",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_CCL,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -338,7 +338,7 @@ static struct mac_model mac_data_table[] = {
.name = "Color Classic II",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_CCL,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -353,7 +353,7 @@ static struct mac_model mac_data_table[] = {
.name = "LC",
.adb_type = MAC_ADB_IISI,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -362,7 +362,7 @@ static struct mac_model mac_data_table[] = {
.name = "LC II",
.adb_type = MAC_ADB_IISI,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -371,7 +371,7 @@ static struct mac_model mac_data_table[] = {
.name = "LC III",
.adb_type = MAC_ADB_IISI,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -499,7 +499,7 @@ static struct mac_model mac_data_table[] = {
.name = "Performa 460",
.adb_type = MAC_ADB_IISI,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -526,7 +526,7 @@ static struct mac_model mac_data_table[] = {
.name = "Performa 520",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_CCL,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -535,7 +535,7 @@ static struct mac_model mac_data_table[] = {
.name = "Performa 550",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_CCL,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -567,7 +567,7 @@ static struct mac_model mac_data_table[] = {
.name = "TV",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_CCL,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -576,7 +576,7 @@ static struct mac_model mac_data_table[] = {
.name = "Performa 600",
.adb_type = MAC_ADB_IISI,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -1109,8 +1109,10 @@ int __init mac_platform_init(void)
platform_device_register_simple("mac_scsi", 0,
mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
break;
- case MAC_SCSI_CCL:
- /* Addresses from the Color Classic Developer Note.
+ case MAC_SCSI_LC:
+ /* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
+ * Also from the Developer Notes for Classic II, LC III,
+ * Color Classic and IIvx.
* $50F0 6000 - $50F0 7FFF: SCSI handshake
* $50F1 0000 - $50F1 1FFF: SCSI
* $50F1 2000 - $50F1 3FFF: SCSI DMA
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 2bd7487440c4..b2f04aee46ec 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -145,6 +145,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto map_err;
else if (fault & VM_FAULT_SIGBUS)
goto bus_err;
BUG();
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 1bb3ce6634d3..e6a3b56c6481 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -168,49 +168,3 @@ int mvme147_set_clock_mmss (unsigned long nowtime)
{
return 0;
}
-
-/*------------------- Serial console stuff ------------------------*/
-
-static void scc_delay (void)
-{
- int n;
- volatile int trash;
-
- for (n = 0; n < 20; n++)
- trash = n;
-}
-
-static void scc_write (char ch)
-{
- volatile char *p = (volatile char *)M147_SCC_A_ADDR;
-
- do {
- scc_delay();
- }
- while (!(*p & 4));
- scc_delay();
- *p = 8;
- scc_delay();
- *p = ch;
-}
-
-
-void m147_scc_write (struct console *co, const char *str, unsigned count)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- while (count--)
- {
- if (*str == '\n')
- scc_write ('\r');
- scc_write (*str++);
- }
- local_irq_restore(flags);
-}
-
-void mvme147_init_console_port (struct console *co, int cflag)
-{
- co->write = m147_scc_write;
-}
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 6ef7a81a3b12..1755e2f7137d 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -161,4 +161,4 @@ static int __init rtc_MK48T08_init(void)
printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
return misc_register(&rtc_dev);
}
-module_init(rtc_MK48T08_init);
+device_initcall(rtc_MK48T08_init);
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index 0d9dc5487296..d0604c0a8702 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -47,7 +47,6 @@
*/
#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
-#define _PAGE_FILE _PAGE_ALWAYS_ZERO_3
/* Pages owned, and protected by, the kernel. */
#define _PAGE_KERNEL _PAGE_PRIV
@@ -219,7 +218,6 @@ extern unsigned long empty_zero_page;
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
@@ -327,10 +325,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define PTE_FILE_MAX_BITS 22
-#define pte_to_pgoff(x) (pte_val(x) >> 10)
-#define pgoff_to_pte(x) __pte(((x) << 10) | _PAGE_FILE)
-
#define kern_addr_valid(addr) (1)
/*
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 47711336119e..afb3ca4776d1 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -35,9 +35,8 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* thread address space */
- struct restart_block restart_block;
- u8 supervisor_stack[0];
+ u8 supervisor_stack[0] __aligned(8);
};
#else /* !__ASSEMBLY__ */
@@ -74,9 +73,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c
index 0d100d5c1407..ce49d429c74a 100644
--- a/arch/metag/kernel/signal.c
+++ b/arch/metag/kernel/signal.c
@@ -48,7 +48,7 @@ static int restore_sigcontext(struct pt_regs *regs,
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
err = metag_gp_regs_copyin(regs, 0, sizeof(struct user_gp_regs), NULL,
&sc->regs);
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 332680e5ebf2..2de5dc695a87 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -141,6 +141,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
index 3c32075d2945..7ca80ac42ed5 100644
--- a/arch/metag/mm/hugetlbpage.c
+++ b/arch/metag/mm/hugetlbpage.c
@@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0;
}
-struct page *follow_huge_addr(struct mm_struct *mm,
- unsigned long address, int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
return pmd_page_shift(pmd) > PAGE_SHIFT;
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 8e211cc28dac..91d2068da1b9 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -34,5 +34,4 @@ $(obj)/simpleImage.%: vmlinux FORCE
$(call if_changed,strip)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
-
-clean-files += simpleImage.*.unstrip linux.bin.ub
+clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile
index c4982d16e555..a3d2e42c3c97 100644
--- a/arch/microblaze/boot/dts/Makefile
+++ b/arch/microblaze/boot/dts/Makefile
@@ -16,5 +16,3 @@ quiet_cmd_cp = CP $< $@$2
# Rule to build device tree blobs
DTC_FLAGS := -p 1024
-
-clean-files += *.dtb
diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h
index 60cb39deb533..ea2a9cd9b159 100644
--- a/arch/microblaze/include/asm/delay.h
+++ b/arch/microblaze/include/asm/delay.h
@@ -15,7 +15,7 @@
#include <linux/param.h>
-extern inline void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
{
asm volatile ("# __delay \n\t" \
"1: addi %0, %0, -1\t\n" \
@@ -43,7 +43,7 @@ extern inline void __delay(unsigned long loops)
extern unsigned long loops_per_jiffy;
-extern inline void __udelay(unsigned int x)
+static inline void __udelay(unsigned int x)
{
unsigned long long tmp =
diff --git a/arch/microblaze/include/asm/kgdb.h b/arch/microblaze/include/asm/kgdb.h
index 78b17d40b235..ad27acb2b15f 100644
--- a/arch/microblaze/include/asm/kgdb.h
+++ b/arch/microblaze/include/asm/kgdb.h
@@ -23,6 +23,9 @@ static inline void arch_kgdb_breakpoint(void)
__asm__ __volatile__("brki r16, 0x18;");
}
+struct pt_regs;
+asmlinkage void microblaze_kgdb_break(struct pt_regs *regs);
+
#endif /* __ASSEMBLY__ */
#endif /* __MICROBLAZE_KGDB_H__ */
#endif /* __KERNEL__ */
diff --git a/arch/microblaze/include/asm/linkage.h b/arch/microblaze/include/asm/linkage.h
index 3a8e36d057eb..0540bbaad897 100644
--- a/arch/microblaze/include/asm/linkage.h
+++ b/arch/microblaze/include/asm/linkage.h
@@ -1,15 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_LINKAGE_H
-#define _ASM_MICROBLAZE_LINKAGE_H
-
-#define __ALIGN .align 4
-#define __ALIGN_STR ".align 4"
-
-#endif /* _ASM_MICROBLAZE_LINKAGE_H */
+#include <asm-generic/linkage.h>
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 7fdf7fabc7d7..61436d69775c 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -60,7 +60,7 @@ extern unsigned long get_zero_page_fast(void);
extern void __bad_pte(pmd_t *pmd);
-extern inline pgd_t *get_pgd_slow(void)
+static inline pgd_t *get_pgd_slow(void)
{
pgd_t *ret;
@@ -70,7 +70,7 @@ extern inline pgd_t *get_pgd_slow(void)
return ret;
}
-extern inline pgd_t *get_pgd_fast(void)
+static inline pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
@@ -84,14 +84,14 @@ extern inline pgd_t *get_pgd_fast(void)
return (pgd_t *)ret;
}
-extern inline void free_pgd_fast(pgd_t *pgd)
+static inline void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long **)pgd = pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
-extern inline void free_pgd_slow(pgd_t *pgd)
+static inline void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
@@ -146,19 +146,19 @@ static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
return (pte_t *)ret;
}
-extern inline void pte_free_fast(pte_t *pte)
+static inline void pte_free_fast(pte_t *pte)
{
*(unsigned long **)pte = pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
-extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-extern inline void pte_free_slow(struct page *ptepage)
+static inline void pte_free_slow(struct page *ptepage)
{
__free_page(ptepage);
}
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index df19d0c47be8..e53b8532353c 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -40,10 +40,6 @@ extern int mem_init_done;
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifndef __ASSEMBLY__
-static inline int pte_file(pte_t pte) { return 0; }
-#endif /* __ASSEMBLY__ */
-
#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
#define swapper_pg_dir ((pgd_t *) NULL)
@@ -65,6 +61,8 @@ static inline int pte_file(pte_t pte) { return 0; }
#include <asm-generic/4level-fixup.h>
+#define __PAGETABLE_PMD_FOLDED
+
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@@ -74,7 +72,7 @@ static inline int pte_file(pte_t pte) { return 0; }
#include <asm/mmu.h>
#include <asm/page.h>
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
@@ -207,7 +205,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/* Definitions for MicroBlaze. */
#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
-#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
@@ -337,7 +334,6 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -499,11 +495,6 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
#define pte_unmap(pte) kunmap_atomic(pte)
-/* Encode and decode a nonlinear file mapping entry */
-#define PTE_FILE_MAX_BITS 29
-#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/*
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h
index 53cfaf34c343..04a5bece8168 100644
--- a/arch/microblaze/include/asm/syscall.h
+++ b/arch/microblaze/include/asm/syscall.h
@@ -97,7 +97,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
microblaze_set_syscall_arg(regs, i++, *args++);
}
-asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
+asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
static inline int syscall_get_arch(void)
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 8c9d36591a03..b699fbd7de4a 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -71,7 +71,6 @@ struct thread_info {
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
mm_segment_t addr_limit; /* thread address space */
- struct restart_block restart_block;
struct cpu_context cpu_context;
};
@@ -87,9 +86,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 59a89a64a865..62942fd12672 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -220,7 +220,7 @@ extern long __user_bad(void);
} else { \
__gu_err = -EFAULT; \
} \
- x = (typeof(*(ptr)))__gu_val; \
+ x = (__force typeof(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -242,7 +242,7 @@ extern long __user_bad(void);
default: \
/* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
} \
- x = (__typeof__(*(ptr))) __gu_val; \
+ x = (__force __typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
@@ -306,7 +306,7 @@ extern long __user_bad(void);
#define __put_user_check(x, ptr, size) \
({ \
- typeof(*(ptr)) volatile __pu_val = x; \
+ typeof(*(ptr)) volatile __pu_val = x; \
typeof(*(ptr)) __user *__pu_addr = (ptr); \
int __pu_err = 0; \
\
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 0a53362d5548..76ed17b56fea 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
#endif /* __ASSEMBLY__ */
-#define __NR_syscalls 388
+#define __NR_syscalls 389
#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index c712677f8a2a..32850c73be09 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -403,5 +403,6 @@
#define __NR_getrandom 385
#define __NR_memfd_create 386
#define __NR_bpf 387
+#define __NR_execveat 388
#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index 08d50cc55e7d..f08bacaf8a95 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -16,7 +16,7 @@ extra-y := head.o vmlinux.lds
obj-y += dma.o exceptions.o \
hw_exception_handler.o intc.o irq.o \
- platform.o process.o prom.o prom_parse.o ptrace.o \
+ platform.o process.o prom.o ptrace.o \
reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
obj-y += cpu/
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index a6e44410672d..0bde47e4fa69 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -140,10 +140,10 @@ do { \
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
- int volatile temp = 0; \
- int align = ~(line_length - 1); \
+ unsigned int volatile temp = 0; \
+ unsigned int align = ~(line_length - 1); \
end = ((end & align) == end) ? end - line_length : end & align; \
- WARN_ON(end - start < 0); \
+ WARN_ON(end < start); \
\
__asm__ __volatile__ (" 1: " #op " %1, r0;" \
"cmpu %0, %1, %2;" \
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index 93c26cf50de5..a32daec96c12 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -33,7 +33,7 @@
void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
{
struct pvr_s pvr;
- int temp; /* for saving temp value */
+ u32 temp; /* for saving temp value */
get_pvr(&pvr);
CI(ver_code, VERSION);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index 4854285b26e7..85dbda4a08a8 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -22,7 +22,7 @@ static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
{
- int i = 0;
+ u32 i = 0;
ci->use_instr =
(fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 234acad79b9e..d1dd6e83d59b 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -41,8 +41,12 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"8.40.a", 0x18},
{"8.40.b", 0x19},
{"8.50.a", 0x1a},
+ {"8.50.b", 0x1c},
+ {"8.50.c", 0x1e},
{"9.0", 0x1b},
{"9.1", 0x1d},
+ {"9.2", 0x1f},
+ {"9.3", 0x20},
{NULL, 0},
};
@@ -61,11 +65,14 @@ const struct family_string_key family_string_lookup[] = {
{"spartan3adsp", 0xc},
{"spartan6", 0xd},
{"virtex6", 0xe},
+ {"virtex7", 0xf},
/* FIXME There is no key code defined for spartan2 */
{"spartan2", 0xf0},
{"kintex7", 0x10},
{"artix7", 0x11},
{"zynq7000", 0x12},
+ {"UltraScale Virtex", 0x13},
+ {"UltraScale Kintex", 0x14},
{NULL, 0},
};
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 15c7c12ea0e7..719feee1e043 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -148,17 +148,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
if (ret < 0) {
pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__);
- return -EINVAL;
+ return ret;
}
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask);
if (ret < 0) {
pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__);
- return -EINVAL;
+ return ret;
}
- if (intr_mask > (u32)((1ULL << nr_irq) - 1))
- pr_info(" ERROR: Mismatch in kind-of-intr param\n");
+ if (intr_mask >> nr_irq)
+ pr_warn("%s: mismatch in kind-of-intr param\n", __func__);
pr_info("%s: num_irq=%d, edge=0x%x\n",
intc->full_name, nr_irq, intr_mask);
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c
index 09a5e8286137..8736af5806ae 100644
--- a/arch/microblaze/kernel/kgdb.c
+++ b/arch/microblaze/kernel/kgdb.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/asm-offsets.h>
+#include <asm/kgdb.h>
#include <asm/pvr.h>
#define GDB_REG 0
@@ -35,9 +36,10 @@ struct pvr_s pvr;
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
- int i;
+ unsigned int i;
unsigned long *pt_regb = (unsigned long *)regs;
int temp;
+
/* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
gdb_regs[i] = pt_regb[i];
@@ -67,7 +69,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
- int i;
+ unsigned int i;
unsigned long *pt_regb = (unsigned long *)regs;
/* pt_regs and gdb_regs have the same 37 values.
@@ -77,7 +79,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
pt_regb[i] = gdb_regs[i];
}
-void microblaze_kgdb_break(struct pt_regs *regs)
+asmlinkage void microblaze_kgdb_break(struct pt_regs *regs)
{
if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return;
@@ -91,7 +93,7 @@ void microblaze_kgdb_break(struct pt_regs *regs)
/* untested */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
- int i;
+ unsigned int i;
unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
/* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
deleted file mode 100644
index 068762f55fd6..000000000000
--- a/arch/microblaze/kernel/prom_parse.c
+++ /dev/null
@@ -1,35 +0,0 @@
-#undef DEBUG
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/etherdevice.h>
-#include <linux/of_address.h>
-#include <asm/prom.h>
-
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
- unsigned long *busno, unsigned long *phys, unsigned long *size)
-{
- const u32 *dma_window;
- u32 cells;
- const unsigned char *prop;
-
- dma_window = dma_window_prop;
-
- /* busno is always one cell */
- *busno = *(dma_window++);
-
- prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
- if (!prop)
- prop = of_get_property(dn, "#address-cells", NULL);
-
- cells = prop ? *(u32 *)prop : of_n_addr_cells(dn);
- *phys = of_read_number(dma_window, cells);
-
- dma_window += cells;
-
- prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
- cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
- *size = of_read_number(dma_window, cells);
-}
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index bb10637ce688..8cfa98cadf3d 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -132,9 +132,9 @@ long arch_ptrace(struct task_struct *child, long request,
return rval;
}
-asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret = 0;
+ unsigned long ret = 0;
secure_computing_strict(regs->r12);
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index fbe58c6554a8..bab4c8330ef4 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/of_platform.h>
-#include <asm/prom.h>
/* Trigger specific functions */
#ifdef CONFIG_GPIOLIB
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 8955a3829cf0..a1cbaf90e2ea 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -89,7 +89,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
int rval;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -158,7 +158,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
- int signal;
+ unsigned long signal;
unsigned long address = 0;
#ifdef CONFIG_MMU
pmd_t *pmdp;
@@ -174,7 +174,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
+ : (unsigned long)sig;
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 0166e890486c..29c8568ec55c 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -388,3 +388,4 @@ ENTRY(sys_call_table)
.long sys_getrandom /* 385 */
.long sys_memfd_create
.long sys_bpf
+ .long sys_execveat
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index dd96f0e4bfa2..c8977450e28c 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -17,6 +17,7 @@
#include <linux/clockchips.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/timecounter.h>
#include <asm/cpuinfo.h>
static void __iomem *timer_baseaddr;
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 1f7b8d449668..61c04eed14d5 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -59,7 +59,7 @@ struct stack_trace;
*
* Return - Number of stack bytes the instruction reserves or reclaims
*/
-inline long get_frame_size(unsigned long instr)
+static inline long get_frame_size(unsigned long instr)
{
return abs((s16)(instr & 0xFFFF));
}
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index fa4cf52aa7a6..d46a5ebb7570 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -224,6 +224,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index b30e41c0c033..48528fb81eff 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -1026,6 +1026,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+ struct pci_dev *dev = bus->self;
+
if (request_resource(pr, res) == 0)
continue;
/*
@@ -1035,6 +1037,12 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
*/
if (reparent_resources(pr, res) == 0)
continue;
+
+ if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+ pci_claim_bridge_resource(dev,
+ i + PCI_BRIDGE_RESOURCES) == 0)
+ continue;
+
}
pr_warn("PCI: Cannot allocate resource region ");
pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1227,7 +1235,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3289969ee423..843713c05b79 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2656,27 +2656,21 @@ config TRAD_SIGNALS
bool
config MIPS32_COMPAT
- bool "Kernel support for Linux/MIPS 32-bit binary compatibility"
- depends on 64BIT
- help
- Select this option if you want Linux/MIPS 32-bit binary
- compatibility. Since all software available for Linux/MIPS is
- currently 32-bit you should say Y here.
+ bool
config COMPAT
bool
- depends on MIPS32_COMPAT
- select ARCH_WANT_OLD_COMPAT_IPC
- default y
config SYSVIPC_COMPAT
bool
- depends on COMPAT && SYSVIPC
- default y
config MIPS32_O32
bool "Kernel support for o32 binaries"
- depends on MIPS32_COMPAT
+ depends on 64BIT
+ select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT
+ select MIPS32_COMPAT
+ select SYSVIPC_COMPAT if SYSVIPC
help
Select this option if you want to run o32 binaries. These are pure
32-bit binaries as used by the 32-bit Linux/MIPS port. Most of
@@ -2686,7 +2680,10 @@ config MIPS32_O32
config MIPS32_N32
bool "Kernel support for n32 binaries"
- depends on MIPS32_COMPAT
+ depends on 64BIT
+ select COMPAT
+ select MIPS32_COMPAT
+ select SYSVIPC_COMPAT if SYSVIPC
help
Select this option if you want to run n32 binaries. These are
64-bit binaries using 32-bit quantities for addressing and certain
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index 8585078ae50e..2a4c52e27f41 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -49,7 +49,8 @@
/*
* Some extra ELF definitions
*/
-#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
+#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
+#define PT_MIPS_ABIFLAGS 0x70000003 /* Records ABI related flags */
/* -------------------------------------------------------------------- */
@@ -349,39 +350,46 @@ int main(int argc, char *argv[])
for (i = 0; i < ex.e_phnum; i++) {
/* Section types we can ignore... */
- if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE ||
- ph[i].p_type == PT_PHDR
- || ph[i].p_type == PT_MIPS_REGINFO)
+ switch (ph[i].p_type) {
+ case PT_NULL:
+ case PT_NOTE:
+ case PT_PHDR:
+ case PT_MIPS_REGINFO:
+ case PT_MIPS_ABIFLAGS:
continue;
- /* Section types we can't handle... */
- else if (ph[i].p_type != PT_LOAD) {
- fprintf(stderr,
- "Program header %d type %d can't be converted.\n",
- ex.e_phnum, ph[i].p_type);
- exit(1);
- }
- /* Writable (data) segment? */
- if (ph[i].p_flags & PF_W) {
- struct sect ndata, nbss;
- ndata.vaddr = ph[i].p_vaddr;
- ndata.len = ph[i].p_filesz;
- nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
- nbss.len = ph[i].p_memsz - ph[i].p_filesz;
+ case PT_LOAD:
+ /* Writable (data) segment? */
+ if (ph[i].p_flags & PF_W) {
+ struct sect ndata, nbss;
+
+ ndata.vaddr = ph[i].p_vaddr;
+ ndata.len = ph[i].p_filesz;
+ nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
+ nbss.len = ph[i].p_memsz - ph[i].p_filesz;
- combine(&data, &ndata, 0);
- combine(&bss, &nbss, 1);
- } else {
- struct sect ntxt;
+ combine(&data, &ndata, 0);
+ combine(&bss, &nbss, 1);
+ } else {
+ struct sect ntxt;
- ntxt.vaddr = ph[i].p_vaddr;
- ntxt.len = ph[i].p_filesz;
+ ntxt.vaddr = ph[i].p_vaddr;
+ ntxt.len = ph[i].p_filesz;
- combine(&text, &ntxt, 0);
+ combine(&text, &ntxt, 0);
+ }
+ /* Remember the lowest segment start address. */
+ if (ph[i].p_vaddr < cur_vma)
+ cur_vma = ph[i].p_vaddr;
+ break;
+
+ default:
+ /* Section types we can't handle... */
+ fprintf(stderr,
+ "Program header %d type %d can't be converted.\n",
+ ex.e_phnum, ph[i].p_type);
+ exit(1);
}
- /* Remember the lowest segment start address. */
- if (ph[i].p_vaddr < cur_vma)
- cur_vma = ph[i].p_vaddr;
}
/* Sections must be in order to be converted... */
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
index 42f5f1a4b40a..69a8a8dabc2b 100644
--- a/arch/mips/cavium-octeon/Makefile
+++ b/arch/mips/cavium-octeon/Makefile
@@ -16,6 +16,7 @@ obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o
obj-y += dma-octeon.o
obj-y += octeon-memcpy.o
obj-y += executive/
+obj-y += crypto/
obj-$(CONFIG_MTD) += flash_setup.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/cavium-octeon/crypto/Makefile b/arch/mips/cavium-octeon/crypto/Makefile
new file mode 100644
index 000000000000..a74f76d85a2f
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/Makefile
@@ -0,0 +1,7 @@
+#
+# OCTEON-specific crypto modules.
+#
+
+obj-y += octeon-crypto.o
+
+obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
new file mode 100644
index 000000000000..7c82ff463b65
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
@@ -0,0 +1,66 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2012 Cavium Networks
+ */
+
+#include <asm/cop2.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+#include "octeon-crypto.h"
+
+/**
+ * Enable access to Octeon's COP2 crypto hardware for kernel use. Wrap any
+ * crypto operations in calls to octeon_crypto_enable/disable in order to make
+ * sure the state of COP2 isn't corrupted if userspace is also performing
+ * hardware crypto operations. Allocate the state parameter on the stack.
+ * Preemption must be disabled to prevent context switches.
+ *
+ * @state: Pointer to state structure to store current COP2 state in.
+ *
+ * Returns: Flags to be passed to octeon_crypto_disable()
+ */
+unsigned long octeon_crypto_enable(struct octeon_cop2_state *state)
+{
+ int status;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ status = read_c0_status();
+ write_c0_status(status | ST0_CU2);
+ if (KSTK_STATUS(current) & ST0_CU2) {
+ octeon_cop2_save(&(current->thread.cp2));
+ KSTK_STATUS(current) &= ~ST0_CU2;
+ status &= ~ST0_CU2;
+ } else if (status & ST0_CU2) {
+ octeon_cop2_save(state);
+ }
+ local_irq_restore(flags);
+ return status & ST0_CU2;
+}
+EXPORT_SYMBOL_GPL(octeon_crypto_enable);
+
+/**
+ * Disable access to Octeon's COP2 crypto hardware in the kernel. This must be
+ * called after an octeon_crypto_enable() before any context switch or return to
+ * userspace.
+ *
+ * @state: Pointer to COP2 state to restore
+ * @flags: Return value from octeon_crypto_enable()
+ */
+void octeon_crypto_disable(struct octeon_cop2_state *state,
+ unsigned long crypto_flags)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (crypto_flags & ST0_CU2)
+ octeon_cop2_restore(state);
+ else
+ write_c0_status(read_c0_status() & ~ST0_CU2);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(octeon_crypto_disable);
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.h b/arch/mips/cavium-octeon/crypto/octeon-crypto.h
new file mode 100644
index 000000000000..e2a4aece9c24
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.h
@@ -0,0 +1,75 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012-2013 Cavium Inc., All Rights Reserved.
+ *
+ * MD5 instruction definitions added by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ */
+#ifndef __LINUX_OCTEON_CRYPTO_H
+#define __LINUX_OCTEON_CRYPTO_H
+
+#include <linux/sched.h>
+#include <asm/mipsregs.h>
+
+#define OCTEON_CR_OPCODE_PRIORITY 300
+
+extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
+extern void octeon_crypto_disable(struct octeon_cop2_state *state,
+ unsigned long flags);
+
+/*
+ * Macros needed to implement MD5:
+ */
+
+/*
+ * The index can be 0-1.
+ */
+#define write_octeon_64bit_hash_dword(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0048+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The index can be 0-1.
+ */
+#define read_octeon_64bit_hash_dword(index) \
+({ \
+ u64 __value; \
+ \
+ __asm__ __volatile__ ( \
+ "dmfc2 %[rt],0x0048+" STR(index) \
+ : [rt] "=d" (__value) \
+ : ); \
+ \
+ __value; \
+})
+
+/*
+ * The index can be 0-6.
+ */
+#define write_octeon_64bit_block_dword(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0040+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_md5_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x4047" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+#endif /* __LINUX_OCTEON_CRYPTO_H */
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
new file mode 100644
index 000000000000..b909881ba6c1
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -0,0 +1,216 @@
+/*
+ * Cryptographic API.
+ *
+ * MD5 Message Digest Algorithm (RFC1321).
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/md5.c, which is:
+ *
+ * Derived from cryptoapi implementation, originally based on the
+ * public domain implementation written by Colin Plumb in 1993.
+ *
+ * Copyright (c) Cryptoapi developers.
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/md5.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+#include <linux/cryptohash.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_md5_store_hash(struct md5_state *ctx)
+{
+ u64 *hash = (u64 *)ctx->hash;
+
+ write_octeon_64bit_hash_dword(hash[0], 0);
+ write_octeon_64bit_hash_dword(hash[1], 1);
+}
+
+static void octeon_md5_read_hash(struct md5_state *ctx)
+{
+ u64 *hash = (u64 *)ctx->hash;
+
+ hash[0] = read_octeon_64bit_hash_dword(0);
+ hash[1] = read_octeon_64bit_hash_dword(1);
+}
+
+static void octeon_md5_transform(const void *_block)
+{
+ const u64 *block = _block;
+
+ write_octeon_64bit_block_dword(block[0], 0);
+ write_octeon_64bit_block_dword(block[1], 1);
+ write_octeon_64bit_block_dword(block[2], 2);
+ write_octeon_64bit_block_dword(block[3], 3);
+ write_octeon_64bit_block_dword(block[4], 4);
+ write_octeon_64bit_block_dword(block[5], 5);
+ write_octeon_64bit_block_dword(block[6], 6);
+ octeon_md5_start(block[7]);
+}
+
+static int octeon_md5_init(struct shash_desc *desc)
+{
+ struct md5_state *mctx = shash_desc_ctx(desc);
+
+ mctx->hash[0] = cpu_to_le32(0x67452301);
+ mctx->hash[1] = cpu_to_le32(0xefcdab89);
+ mctx->hash[2] = cpu_to_le32(0x98badcfe);
+ mctx->hash[3] = cpu_to_le32(0x10325476);
+ mctx->byte_count = 0;
+
+ return 0;
+}
+
+static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct md5_state *mctx = shash_desc_ctx(desc);
+ const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ mctx->byte_count += len;
+
+ if (avail > len) {
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+ data, len);
+ return 0;
+ }
+
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
+ avail);
+
+ local_bh_disable();
+ preempt_disable();
+ flags = octeon_crypto_enable(&state);
+ octeon_md5_store_hash(mctx);
+
+ octeon_md5_transform(mctx->block);
+ data += avail;
+ len -= avail;
+
+ while (len >= sizeof(mctx->block)) {
+ octeon_md5_transform(data);
+ data += sizeof(mctx->block);
+ len -= sizeof(mctx->block);
+ }
+
+ octeon_md5_read_hash(mctx);
+ octeon_crypto_disable(&state, flags);
+ preempt_enable();
+ local_bh_enable();
+
+ memcpy(mctx->block, data, len);
+
+ return 0;
+}
+
+static int octeon_md5_final(struct shash_desc *desc, u8 *out)
+{
+ struct md5_state *mctx = shash_desc_ctx(desc);
+ const unsigned int offset = mctx->byte_count & 0x3f;
+ char *p = (char *)mctx->block + offset;
+ int padding = 56 - (offset + 1);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ *p++ = 0x80;
+
+ local_bh_disable();
+ preempt_disable();
+ flags = octeon_crypto_enable(&state);
+ octeon_md5_store_hash(mctx);
+
+ if (padding < 0) {
+ memset(p, 0x00, padding + sizeof(u64));
+ octeon_md5_transform(mctx->block);
+ p = (char *)mctx->block;
+ padding = 56;
+ }
+
+ memset(p, 0, padding);
+ mctx->block[14] = cpu_to_le32(mctx->byte_count << 3);
+ mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29);
+ octeon_md5_transform(mctx->block);
+
+ octeon_md5_read_hash(mctx);
+ octeon_crypto_disable(&state, flags);
+ preempt_enable();
+ local_bh_enable();
+
+ memcpy(out, mctx->hash, sizeof(mctx->hash));
+ memset(mctx, 0, sizeof(*mctx));
+
+ return 0;
+}
+
+static int octeon_md5_export(struct shash_desc *desc, void *out)
+{
+ struct md5_state *ctx = shash_desc_ctx(desc);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int octeon_md5_import(struct shash_desc *desc, const void *in)
+{
+ struct md5_state *ctx = shash_desc_ctx(desc);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
+static struct shash_alg alg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .init = octeon_md5_init,
+ .update = octeon_md5_update,
+ .final = octeon_md5_final,
+ .export = octeon_md5_export,
+ .import = octeon_md5_import,
+ .descsize = sizeof(struct md5_state),
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name= "octeon-md5",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init md5_mod_init(void)
+{
+ if (!octeon_has_crypto())
+ return -ENOTSUPP;
+ return crypto_register_shash(&alg);
+}
+
+static void __exit md5_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(md5_mod_init);
+module_exit(md5_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index e15b049b3bd7..b2104bd9ab3b 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -27,6 +27,9 @@
#include <asm/octeon/octeon.h>
+enum octeon_feature_bits __octeon_feature_bits __read_mostly;
+EXPORT_SYMBOL_GPL(__octeon_feature_bits);
+
/**
* Read a byte of fuse data
* @byte_addr: address to read
@@ -103,6 +106,9 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
else
suffix = "NSP";
+ if (!fus_dat2.s.nocrypto)
+ __octeon_feature_bits |= OCTEON_HAS_CRYPTO;
+
/*
* Assume pass number is encoded using <5:3><2:0>. Exceptions
* will be fixed later.
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index b67ddf0f8bcd..12410a2788d8 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -77,7 +77,7 @@ static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
static int octeon2_usb_clock_start_cnt;
-static void octeon2_usb_clocks_start(void)
+static void octeon2_usb_clocks_start(struct device *dev)
{
u64 div;
union cvmx_uctlx_if_ena if_ena;
@@ -86,6 +86,8 @@ static void octeon2_usb_clocks_start(void)
union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
int i;
unsigned long io_clk_64_to_ns;
+ u32 clock_rate = 12000000;
+ bool is_crystal_clock = false;
mutex_lock(&octeon2_usb_clocks_mutex);
@@ -96,6 +98,28 @@ static void octeon2_usb_clocks_start(void)
io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
+ if (dev->of_node) {
+ struct device_node *uctl_node;
+ const char *clock_type;
+
+ uctl_node = of_get_parent(dev->of_node);
+ if (!uctl_node) {
+ dev_err(dev, "No UCTL device node\n");
+ goto exit;
+ }
+ i = of_property_read_u32(uctl_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No UCTL \"refclk-frequency\"\n");
+ goto exit;
+ }
+ i = of_property_read_string(uctl_node,
+ "refclk-type", &clock_type);
+
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+ }
+
/*
* Step 1: Wait for voltages stable. That surely happened
* before starting the kernel.
@@ -126,9 +150,22 @@ static void octeon2_usb_clocks_start(void)
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* 3b */
- /* 12MHz crystal. */
- clk_rst_ctl.s.p_refclk_sel = 0;
- clk_rst_ctl.s.p_refclk_div = 0;
+ clk_rst_ctl.s.p_refclk_sel = is_crystal_clock ? 0 : 1;
+ switch (clock_rate) {
+ default:
+ pr_err("Invalid UCTL clock rate of %u, using 12000000 instead\n",
+ clock_rate);
+ /* Fall through */
+ case 12000000:
+ clk_rst_ctl.s.p_refclk_div = 0;
+ break;
+ case 24000000:
+ clk_rst_ctl.s.p_refclk_div = 1;
+ break;
+ case 48000000:
+ clk_rst_ctl.s.p_refclk_div = 2;
+ break;
+ }
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* 3c */
@@ -259,7 +296,7 @@ static void octeon2_usb_clocks_stop(void)
static int octeon_ehci_power_on(struct platform_device *pdev)
{
- octeon2_usb_clocks_start();
+ octeon2_usb_clocks_start(&pdev->dev);
return 0;
}
@@ -273,15 +310,16 @@ static struct usb_ehci_pdata octeon_ehci_pdata = {
#ifdef __BIG_ENDIAN
.big_endian_mmio = 1,
#endif
+ .dma_mask_64 = 1,
.power_on = octeon_ehci_power_on,
.power_off = octeon_ehci_power_off,
};
-static void __init octeon_ehci_hw_start(void)
+static void __init octeon_ehci_hw_start(struct device *dev)
{
union cvmx_uctlx_ehci_ctl ehci_ctl;
- octeon2_usb_clocks_start();
+ octeon2_usb_clocks_start(dev);
ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0));
/* Use 64-bit addressing. */
@@ -294,64 +332,30 @@ static void __init octeon_ehci_hw_start(void)
octeon2_usb_clocks_stop();
}
-static u64 octeon_ehci_dma_mask = DMA_BIT_MASK(64);
-
static int __init octeon_ehci_device_init(void)
{
struct platform_device *pd;
+ struct device_node *ehci_node;
int ret = 0;
- struct resource usb_resources[] = {
- {
- .flags = IORESOURCE_MEM,
- }, {
- .flags = IORESOURCE_IRQ,
- }
- };
-
- /* Only Octeon2 has ehci/ohci */
- if (!OCTEON_IS_MODEL(OCTEON_CN63XX))
+ ehci_node = of_find_node_by_name(NULL, "ehci");
+ if (!ehci_node)
return 0;
- if (octeon_is_simulation() || usb_disabled())
- return 0; /* No USB in the simulator. */
-
- pd = platform_device_alloc("ehci-platform", 0);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- usb_resources[0].start = 0x00016F0000000000ULL;
- usb_resources[0].end = usb_resources[0].start + 0x100;
-
- usb_resources[1].start = OCTEON_IRQ_USB0;
- usb_resources[1].end = OCTEON_IRQ_USB0;
-
- ret = platform_device_add_resources(pd, usb_resources,
- ARRAY_SIZE(usb_resources));
- if (ret)
- goto fail;
+ pd = of_find_device_by_node(ehci_node);
+ if (!pd)
+ return 0;
- pd->dev.dma_mask = &octeon_ehci_dma_mask;
pd->dev.platform_data = &octeon_ehci_pdata;
- octeon_ehci_hw_start();
+ octeon_ehci_hw_start(&pd->dev);
- ret = platform_device_add(pd);
- if (ret)
- goto fail;
-
- return ret;
-fail:
- platform_device_put(pd);
-out:
return ret;
}
device_initcall(octeon_ehci_device_init);
static int octeon_ohci_power_on(struct platform_device *pdev)
{
- octeon2_usb_clocks_start();
+ octeon2_usb_clocks_start(&pdev->dev);
return 0;
}
@@ -369,11 +373,11 @@ static struct usb_ohci_pdata octeon_ohci_pdata = {
.power_off = octeon_ohci_power_off,
};
-static void __init octeon_ohci_hw_start(void)
+static void __init octeon_ohci_hw_start(struct device *dev)
{
union cvmx_uctlx_ohci_ctl ohci_ctl;
- octeon2_usb_clocks_start();
+ octeon2_usb_clocks_start(dev);
ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0));
ohci_ctl.s.l2c_addr_msb = 0;
@@ -387,57 +391,27 @@ static void __init octeon_ohci_hw_start(void)
static int __init octeon_ohci_device_init(void)
{
struct platform_device *pd;
+ struct device_node *ohci_node;
int ret = 0;
- struct resource usb_resources[] = {
- {
- .flags = IORESOURCE_MEM,
- }, {
- .flags = IORESOURCE_IRQ,
- }
- };
-
- /* Only Octeon2 has ehci/ohci */
- if (!OCTEON_IS_MODEL(OCTEON_CN63XX))
+ ohci_node = of_find_node_by_name(NULL, "ohci");
+ if (!ohci_node)
return 0;
- if (octeon_is_simulation() || usb_disabled())
- return 0; /* No USB in the simulator. */
-
- pd = platform_device_alloc("ohci-platform", 0);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- usb_resources[0].start = 0x00016F0000000400ULL;
- usb_resources[0].end = usb_resources[0].start + 0x100;
-
- usb_resources[1].start = OCTEON_IRQ_USB0;
- usb_resources[1].end = OCTEON_IRQ_USB0;
-
- ret = platform_device_add_resources(pd, usb_resources,
- ARRAY_SIZE(usb_resources));
- if (ret)
- goto fail;
+ pd = of_find_device_by_node(ohci_node);
+ if (!pd)
+ return 0;
pd->dev.platform_data = &octeon_ohci_pdata;
- octeon_ohci_hw_start();
-
- ret = platform_device_add(pd);
- if (ret)
- goto fail;
+ octeon_ohci_hw_start(&pd->dev);
return ret;
-fail:
- platform_device_put(pd);
-out:
- return ret;
}
device_initcall(octeon_ohci_device_init);
#endif /* CONFIG_USB */
+
static struct of_device_id __initdata octeon_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "cavium,octeon-6335-uctl", },
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index ecd903dd1c45..8b1eeffa12ed 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -240,9 +240,7 @@ static int octeon_cpu_disable(void)
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
- local_irq_disable();
octeon_fixup_irqs();
- local_irq_enable();
flush_cache_all();
local_flush_tlb_all();
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index f57b96dcf7df..61a4460d67d3 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -132,7 +132,6 @@ CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -175,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_SCTP=m
CONFIG_BRIDGE=m
@@ -220,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_CLS_IND=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_MAC80211_MESH=y
CONFIG_RFKILL=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -248,19 +244,13 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_PIIX=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
-CONFIG_BLK_DEV_SD=m
+CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
@@ -273,6 +263,8 @@ CONFIG_SCSI_AACRAID=m
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_RESET_DELAY_MS=15000
# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
@@ -340,6 +332,7 @@ CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_REISERFS_FS_XATTR=y
@@ -441,4 +434,3 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 994d21939676..affebb78f5d6 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -64,7 +64,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
return SIGFPE;
/* set FRE */
- write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE);
+ set_c0_config5(MIPS_CONF5_FRE);
goto fr_common;
case FPU_64BIT:
@@ -74,8 +74,10 @@ static inline int __enable_fpu(enum fpu_mode mode)
#endif
/* fall through */
case FPU_32BIT:
- /* clear FRE */
- write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE);
+ if (cpu_has_fre) {
+ /* clear FRE */
+ clear_c0_config5(MIPS_CONF5_FRE);
+ }
fr_common:
/* set CU1 & change FR appropriately */
fr = (int)mode & FPU_FR_MASK;
@@ -182,25 +184,32 @@ static inline int init_fpu(void)
int ret = 0;
if (cpu_has_fpu) {
+ unsigned int config5;
+
ret = __own_fpu();
- if (!ret) {
- unsigned int config5 = read_c0_config5();
-
- /*
- * Ensure FRE is clear whilst running _init_fpu, since
- * single precision FP instructions are used. If FRE
- * was set then we'll just end up initialising all 32
- * 64b registers.
- */
- write_c0_config5(config5 & ~MIPS_CONF5_FRE);
- enable_fpu_hazard();
+ if (ret)
+ return ret;
+ if (!cpu_has_fre) {
_init_fpu();
- /* Restore FRE */
- write_c0_config5(config5);
- enable_fpu_hazard();
+ return 0;
}
+
+ /*
+ * Ensure FRE is clear whilst running _init_fpu, since
+ * single precision FP instructions are used. If FRE
+ * was set then we'll just end up initialising all 32
+ * 64b registers.
+ */
+ config5 = clear_c0_config5(MIPS_CONF5_FRE);
+ enable_fpu_hazard();
+
+ _init_fpu();
+
+ /* Restore FRE */
+ write_c0_config5(config5);
+ enable_fpu_hazard();
} else
fpu_emulator_init_fpu();
diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h
index f8d37d1df5de..9fac64a26353 100644
--- a/arch/mips/include/asm/fw/arc/hinv.h
+++ b/arch/mips/include/asm/fw/arc/hinv.h
@@ -119,7 +119,7 @@ union key_u {
#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */
#endif
-typedef struct component {
+typedef struct {
CONFIGCLASS Class;
CONFIGTYPE Type;
IDENTIFIERFLAG Flags;
@@ -140,7 +140,7 @@ struct cfgdata {
};
/* System ID */
-typedef struct systemid {
+typedef struct {
CHAR VendorId[8];
CHAR ProductId[8];
} SYSTEMID;
@@ -166,7 +166,7 @@ typedef enum memorytype {
#endif /* _NT_PROM */
} MEMORYTYPE;
-typedef struct memorydescriptor {
+typedef struct {
MEMORYTYPE Type;
LONG BasePage;
LONG PageCount;
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index f2c249796ea8..ac4fc716062b 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -120,6 +120,7 @@ struct kvm_vcpu_stat {
u32 resvd_inst_exits;
u32 break_inst_exits;
u32 flush_dcache_exits;
+ u32 halt_successful_poll;
u32 halt_wakeup;
};
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b95a827d763e..59c0901bdd84 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -89,9 +89,9 @@ static inline bool mips_cm_has_l2sync(void)
/* Macros to ease the creation of register access functions */
#define BUILD_CM_R_(name, off) \
-static inline u32 *addr_gcr_##name(void) \
+static inline u32 __iomem *addr_gcr_##name(void) \
{ \
- return (u32 *)(mips_cm_base + (off)); \
+ return (u32 __iomem *)(mips_cm_base + (off)); \
} \
\
static inline u32 read_gcr_##name(void) \
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 5e4aef304b02..5b720d8c2745 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1386,12 +1386,27 @@ do { \
__res; \
})
+#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set reorder \n" \
+ " "STR(gas_hardfloat)" \n" \
+ " ctc1 %0,"STR(dest)" \n" \
+ " .set pop \n" \
+ : : "r" (val)); \
+} while (0)
+
#ifdef GAS_HAS_SET_HARDFLOAT
#define read_32bit_cp1_register(source) \
_read_32bit_cp1_register(source, .set hardfloat)
+#define write_32bit_cp1_register(dest, val) \
+ _write_32bit_cp1_register(dest, val, .set hardfloat)
#else
#define read_32bit_cp1_register(source) \
_read_32bit_cp1_register(source, )
+#define write_32bit_cp1_register(dest, val) \
+ _write_32bit_cp1_register(dest, val, )
#endif
#ifdef HAVE_AS_DSP
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index c4fe81f47f53..8ebd3f579b84 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -46,8 +46,6 @@ enum octeon_feature {
OCTEON_FEATURE_SAAD,
/* Does this Octeon support the ZIP offload engine? */
OCTEON_FEATURE_ZIP,
- /* Does this Octeon support crypto acceleration using COP2? */
- OCTEON_FEATURE_CRYPTO,
OCTEON_FEATURE_DORM_CRYPTO,
/* Does this Octeon support PCI express? */
OCTEON_FEATURE_PCIE,
@@ -86,6 +84,21 @@ enum octeon_feature {
OCTEON_MAX_FEATURE
};
+enum octeon_feature_bits {
+ OCTEON_HAS_CRYPTO = 0x0001, /* Crypto acceleration using COP2 */
+};
+extern enum octeon_feature_bits __octeon_feature_bits;
+
+/**
+ * octeon_has_crypto() - Check if this OCTEON has crypto acceleration support.
+ *
+ * Returns: Non-zero if the feature exists. Zero if the feature does not exist.
+ */
+static inline int octeon_has_crypto(void)
+{
+ return __octeon_feature_bits & OCTEON_HAS_CRYPTO;
+}
+
/**
* Determine if the current Octeon supports a specific feature. These
* checks have been optimized to be fairly quick, but they should still
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index d781f9e66884..6dfefd2d5cdf 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -44,11 +44,6 @@ extern int octeon_get_boot_num_arguments(void);
extern const char *octeon_get_boot_argument(int arg);
extern void octeon_hal_setup_reserved32(void);
extern void octeon_user_io_init(void);
-struct octeon_cop2_state;
-extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
-extern void octeon_crypto_disable(struct octeon_cop2_state *state,
- unsigned long flags);
-extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
extern void octeon_init_cvmcount(void);
extern void octeon_setup_delays(void);
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 68984b612f9d..a6be006b6f75 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -57,7 +57,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START MAP_BASE
@@ -161,22 +161,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/*
- * Encode and decode a nonlinear file mapping entry
- */
-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
- (((_pte).pte >> 2 ) & 0x38) | \
- (((_pte).pte >> 10) << 6 ))
-
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
- (((off) & 0x38) << 2 ) | \
- (((off) >> 6 ) << 10) | \
- _PAGE_FILE })
-
-/*
- * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
- */
-#define PTE_FILE_MAX_BITS 28
#else
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@@ -188,13 +172,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
-/*
- * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
- */
-#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
-
-#define PTE_FILE_MAX_BITS 30
#else
/*
* Constraints:
@@ -209,19 +186,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/*
- * Encode and decode a nonlinear file mapping entry
- */
-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
- (((_pte).pte >> 2) & 0x8) | \
- (((_pte).pte >> 8) << 4))
-
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
- (((off) & 0x8) << 2) | \
- (((off) >> 4) << 8) | \
- _PAGE_FILE })
-
-#define PTE_FILE_MAX_BITS 28
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index e1c49a96807d..1659bb91ae21 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -291,13 +291,4 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/*
- * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
- * make things easier, and only use the upper 56 bits for the page offset...
- */
-#define PTE_FILE_MAX_BITS 56
-
-#define pte_to_pgoff(_pte) ((_pte).pte >> 8)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE })
-
#endif /* _ASM_PGTABLE_64_H */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index ca11f14f40a3..fc807aa5ec8d 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -48,8 +48,6 @@
/*
* The following bits are implemented in software
- *
- * _PAGE_FILE semantics: set:pagecache unset:swap
*/
#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
@@ -64,7 +62,6 @@
#define _PAGE_SILENT_READ _PAGE_VALID
#define _PAGE_SILENT_WRITE _PAGE_DIRTY
-#define _PAGE_FILE _PAGE_MODIFIED
#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
@@ -72,8 +69,6 @@
/*
* The following are implemented by software
- *
- * _PAGE_FILE semantics: set:pagecache unset:swap
*/
#define _PAGE_PRESENT_SHIFT 0
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
@@ -85,8 +80,6 @@
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED_SHIFT 4
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE_SHIFT 4
-#define _PAGE_FILE (1 << _PAGE_FILE_SHIFT)
/*
* And these are the hardware TLB bits
@@ -116,7 +109,6 @@
* The following bits are implemented in software
*
* _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
- * _PAGE_FILE semantics: set:pagecache unset:swap
*/
#define _PAGE_PRESENT_SHIFT (0)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
@@ -128,7 +120,6 @@
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE (_PAGE_MODIFIED)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* huge tlb page */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 62a6ba383d4f..583ff4215479 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -231,7 +231,6 @@ extern pgd_t swapper_pg_dir[];
static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -287,7 +286,6 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline pte_t pte_wrprotect(pte_t pte)
{
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index bb7963753730..6499d93ae68d 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -29,13 +29,7 @@
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
- /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
- if ((config_enabled(CONFIG_32BIT) ||
- test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
- (regs->regs[2] == __NR_syscall))
- return regs->regs[4];
- else
- return regs->regs[2];
+ return current_thread_info()->syscall;
}
static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 99eea59604e9..9e1295f874f0 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -34,8 +34,8 @@ struct thread_info {
* 0x7fffffff for user-thead
* 0xffffffff for kernel-thread
*/
- struct restart_block restart_block;
struct pt_regs *regs;
+ long syscall; /* syscall number */
};
/*
@@ -49,9 +49,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index d001bb1ad177..c03088f9f514 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -376,16 +376,17 @@
#define __NR_getrandom (__NR_Linux + 353)
#define __NR_memfd_create (__NR_Linux + 354)
#define __NR_bpf (__NR_Linux + 355)
+#define __NR_execveat (__NR_Linux + 356)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 355
+#define __NR_Linux_syscalls 356
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 355
+#define __NR_O32_Linux_syscalls 356
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -709,16 +710,17 @@
#define __NR_getrandom (__NR_Linux + 313)
#define __NR_memfd_create (__NR_Linux + 314)
#define __NR_bpf (__NR_Linux + 315)
+#define __NR_execveat (__NR_Linux + 316)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 315
+#define __NR_Linux_syscalls 316
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 315
+#define __NR_64_Linux_syscalls 316
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1046,15 +1048,16 @@
#define __NR_getrandom (__NR_Linux + 317)
#define __NR_memfd_create (__NR_Linux + 318)
#define __NR_bpf (__NR_Linux + 319)
+#define __NR_execveat (__NR_Linux + 320)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 319
+#define __NR_Linux_syscalls 320
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 319
+#define __NR_N32_Linux_syscalls 320
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c
index 2531da1d3add..97206b3deb97 100644
--- a/arch/mips/jz4740/irq.c
+++ b/arch/mips/jz4740/irq.c
@@ -30,6 +30,9 @@
#include <asm/irq_cpu.h>
#include <asm/mach-jz4740/base.h>
+#include <asm/mach-jz4740/irq.h>
+
+#include "irq.h"
static void __iomem *jz_intc_base;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index b1d84bd4efb3..3b2dfdb4865f 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -98,7 +98,6 @@ void output_thread_info_defines(void)
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
- OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index c92b15df6893..a5b5b56485c1 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -19,8 +19,8 @@ enum {
int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
bool is_interp, struct arch_elf_state *state)
{
- struct elfhdr *ehdr = _ehdr;
- struct elf_phdr *phdr = _phdr;
+ struct elf32_hdr *ehdr = _ehdr;
+ struct elf32_phdr *phdr = _phdr;
struct mips_elf_abiflags_v0 abiflags;
int ret;
@@ -48,7 +48,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
return 0;
}
-static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
+static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
{
/* If the ABI requirement is provided, simply return that */
if (in_abi != -1)
@@ -65,7 +65,7 @@ static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
int arch_check_elf(void *_ehdr, bool has_interpreter,
struct arch_elf_state *state)
{
- struct elfhdr *ehdr = _ehdr;
+ struct elf32_hdr *ehdr = _ehdr;
unsigned fp_abi, interp_fp_abi, abi0, abi1;
/* Ignore non-O32 binaries */
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 590c2c980fd3..6eb7a3f515fc 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -57,6 +57,8 @@ static struct irq_chip mips_cpu_irq_controller = {
.irq_mask_ack = mask_mips_irq,
.irq_unmask = unmask_mips_irq,
.irq_eoi = unmask_mips_irq,
+ .irq_disable = mask_mips_irq,
+ .irq_enable = unmask_mips_irq,
};
/*
@@ -93,6 +95,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
.irq_mask_ack = mips_mt_cpu_irq_ack,
.irq_unmask = unmask_mips_irq,
.irq_eoi = unmask_mips_irq,
+ .irq_disable = mask_mips_irq,
+ .irq_enable = unmask_mips_irq,
};
asmlinkage void __weak plat_irq_dispatch(void)
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 2a52568dbcd6..1833f5171ccd 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(dbe_lock);
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
- GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eb76434828e8..85bff5d513e5 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -82,6 +82,30 @@ void flush_thread(void)
{
}
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ /*
+ * Save any process state which is live in hardware registers to the
+ * parent context prior to duplication. This prevents the new child
+ * state becoming stale if the parent is preempted before copy_thread()
+ * gets a chance to save the parent's live hardware registers to the
+ * child context.
+ */
+ preempt_disable();
+
+ if (is_msa_enabled())
+ save_msa(current);
+ else if (is_fpu_owner())
+ _save_fp(current);
+
+ save_dsp(current);
+
+ preempt_enable();
+
+ *dst = *src;
+ return 0;
+}
+
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
@@ -92,18 +116,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
- preempt_disable();
-
- if (is_msa_enabled())
- save_msa(p);
- else if (is_fpu_owner())
- save_fp(p);
-
- if (cpu_has_dsp)
- save_dsp(p);
-
- preempt_enable();
-
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9d1487d83293..510452812594 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -770,6 +770,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
long ret = 0;
user_exit();
+ current_thread_info()->syscall = syscall;
+
if (secure_computing() == -1)
return -1;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 00cad1005a16..6e8de80bb446 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -181,6 +181,7 @@ illegal_syscall:
sll t1, t0, 2
beqz v0, einval
lw t2, sys_call_table(t1) # syscall routine
+ sw a0, PT_R2(sp) # call routine directly on restart
/* Some syscalls like execve get their arguments from struct pt_regs
and claim zero arguments in the syscall table. Thus we have to
@@ -580,3 +581,4 @@ EXPORT(sys_call_table)
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf /* 4355 */
+ PTR sys_execveat
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 5251565e344b..ad4d44635c76 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -435,4 +435,5 @@ EXPORT(sys_call_table)
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf /* 5315 */
+ PTR sys_execveat
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 77e74398b828..446cc654da56 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -428,4 +428,5 @@ EXPORT(sysn32_call_table)
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf
+ PTR compat_sys_execveat /* 6320 */
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6f8db9f728e8..d07b210fbeff 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -186,6 +186,7 @@ LEAF(sys32_syscall)
dsll t1, t0, 3
beqz v0, einval
ld t2, sys32_call_table(t1) # syscall routine
+ sd a0, PT_R2(sp) # call routine directly on restart
move a0, a1 # shift argument registers
move a1, a2
@@ -565,4 +566,5 @@ EXPORT(sys32_call_table)
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf /* 4355 */
+ PTR compat_sys_execveat
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 545bf11bd2ed..6a28c792d862 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -243,7 +243,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
int i;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index d69179c0d49d..19a7705f2a01 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -220,7 +220,7 @@ static int restore_sigcontext32(struct pt_regs *regs,
int i;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 1e0a93c5a3e7..e36a859af666 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -44,8 +44,8 @@ static void cmp_init_secondary(void)
struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
/* Assume GIC is present */
- change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
- STATUSF_IP7);
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
+ STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
/* Enable per-cpu interrupts: platform specific */
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index ad86951b73bd..17ea705f6c40 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -161,7 +161,8 @@ static void vsmp_init_secondary(void)
#ifdef CONFIG_MIPS_GIC
/* This is Malta specific: IPI,performance and timer interrupts */
if (gic_present)
- change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+ STATUSF_IP4 | STATUSF_IP5 |
STATUSF_IP6 | STATUSF_IP7);
else
#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index c94c4e92e17d..1c0d8c50b7e1 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -123,10 +123,10 @@ asmlinkage void start_secondary(void)
unsigned int cpu;
cpu_probe();
- cpu_report();
per_cpu_trap_init(false);
mips_clockevent_init();
mp_ops->init_secondary();
+ cpu_report();
/*
* XXX parity protection should be folded in here when it's converted
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index ad3d2031c327..c3b41e24c05a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1231,7 +1231,8 @@ static int enable_restore_fp_context(int msa)
/* Restore the scalar FP control & status register */
if (!was_fpu_owner)
- asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
+ write_32bit_cp1_register(CP1_STATUS,
+ current->thread.fpu.fcr31);
}
out:
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 30e334e823bd..2ae12825529f 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select KVM_MMIO
+ select SRCU
---help---
Support for hosting Guest kernels.
Currently supported on MIPS32 processors.
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index d7279c03c517..4a68b176d6e4 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -434,7 +434,7 @@ __kvm_mips_return_to_guest:
/* Setup status register for running guest in UM */
.set at
or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
- and v1, v1, ~ST0_CU0
+ and v1, v1, ~(ST0_CU0 | ST0_MX)
.set noat
mtc0 v1, CP0_STATUS
ehb
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index e3b21e51ff7e..c9eccf5df912 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -15,9 +15,11 @@
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
+#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
#include <linux/kvm_host.h>
@@ -47,6 +49,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
{ "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+ { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
{NULL}
};
@@ -378,6 +381,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu->mmio_needed = 0;
}
+ lose_fpu(1);
+
local_irq_disable();
/* Check if we have any exceptions/interrupts pending */
kvm_mips_deliver_interrupts(vcpu,
@@ -385,8 +390,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_guest_enter();
+ /* Disable hardware page table walking while in guest */
+ htw_stop();
+
r = __kvm_mips_vcpu_run(run, vcpu);
+ /* Re-enable HTW before enabling interrupts */
+ htw_start();
+
kvm_guest_exit();
local_irq_enable();
@@ -832,9 +843,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
- return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
@@ -980,9 +990,6 @@ static void kvm_mips_set_c0_status(void)
{
uint32_t status = read_c0_status();
- if (cpu_has_fpu)
- status |= (ST0_CU1);
-
if (cpu_has_dsp)
status |= (ST0_MX);
@@ -1002,6 +1009,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
+ /* re-enable HTW before enabling interrupts */
+ htw_start();
+
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
@@ -1136,6 +1146,9 @@ skip_emul:
}
}
+ /* Disable HTW before returning to guest or host */
+ htw_stop();
+
return ret;
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index becc42bb1849..70ab5d664332 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -158,6 +158,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 70795a67a276..349995d19c7f 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -301,11 +301,9 @@ slow_irqon:
start += nr << PAGE_SHIFT;
pages += nr;
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT,
- write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
+ ret = get_user_pages_unlocked(current, mm, start,
+ (end - start) >> PAGE_SHIFT,
+ write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 4ec8ee10d371..06e0f421b41b 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -68,12 +68,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
return 0;
}
-struct page *
-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_HUGE) != 0;
@@ -83,15 +77,3 @@ int pud_huge(pud_t pud)
{
return (pud_val(pud) & _PAGE_HUGE) != 0;
}
-
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
- return page;
-}
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index e90b2e899291..30639a6e9b8c 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -489,6 +489,8 @@ static void r4k_tlb_configure(void)
#ifdef CONFIG_64BIT
pg |= PG_ELPA;
#endif
+ if (cpu_has_rixiex)
+ pg |= PG_IEC;
write_c0_pagegrain(pg);
}
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 9fd6834a2172..5d6139390bf8 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1388,7 +1388,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 4fde7ac76cc9..e743bdd6e20c 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -162,7 +162,6 @@ void __init nlm_smp_setup(void)
unsigned int boot_cpu;
int num_cpus, i, ncore, node;
volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
- char buf[64];
boot_cpu = hard_smp_processor_id();
cpumask_clear(&phys_cpu_present_mask);
@@ -189,10 +188,10 @@ void __init nlm_smp_setup(void)
}
}
- cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
- pr_info("Physical CPU mask: %s\n", buf);
- cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
- pr_info("Possible CPU mask: %s\n", buf);
+ pr_info("Physical CPU mask: %*pb\n",
+ cpumask_pr_args(&phys_cpu_present_mask));
+ pr_info("Possible CPU mask: %*pb\n",
+ cpumask_pr_args(cpu_possible_mask));
/* check with the cores we have woken up */
for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
@@ -209,7 +208,6 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
{
uint32_t core0_thr_mask, core_thr_mask;
int threadmode, i, j;
- char buf[64];
core0_thr_mask = 0;
for (i = 0; i < NLM_THREADS_PER_CORE; i++)
@@ -244,8 +242,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
return threadmode;
unsupp:
- cpumask_scnprintf(buf, ARRAY_SIZE(buf), wakeup_mask);
- panic("Unsupported CPU mask %s", buf);
+ panic("Unsupported CPU mask %*pb", cpumask_pr_args(wakeup_mask));
return 0;
}
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index 5ec2a7bae02c..f2355e3e65a1 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
}
struct pci_ops bcm1480_pci_ops = {
- bcm1480_pcibios_read,
- bcm1480_pcibios_write,
+ .read = bcm1480_pcibios_read,
+ .write = bcm1480_pcibios_write,
};
static struct resource bcm1480_mem_resource = {
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index d07e04121cc6..bedb72bd3a27 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
static struct pci_ops octeon_pci_ops = {
- octeon_read_config,
- octeon_write_config,
+ .read = octeon_read_config,
+ .write = octeon_write_config,
};
static struct resource octeon_pci_mem_resource = {
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 5e36c33e5543..eb4a17ba4a53 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
}
static struct pci_ops octeon_pcie0_ops = {
- octeon_pcie0_read_config,
- octeon_pcie0_write_config,
+ .read = octeon_pcie0_read_config,
+ .write = octeon_pcie0_write_config,
};
static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
};
static struct pci_ops octeon_pcie1_ops = {
- octeon_pcie1_read_config,
- octeon_pcie1_write_config,
+ .read = octeon_pcie1_read_config,
+ .write = octeon_pcie1_write_config,
};
static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
};
static struct pci_ops octeon_dummy_ops = {
- octeon_dummy_read_config,
- octeon_dummy_write_config,
+ .read = octeon_dummy_read_config,
+ .write = octeon_dummy_write_config,
};
static struct resource octeon_dummy_mem_resource = {
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index faed90240ded..6d6df839948f 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -159,13 +159,6 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
-/*
- * Internal debugging function
- */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index 2ddaa67e7983..afab728ab65e 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -65,7 +65,7 @@ extern void paging_init(void);
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
@@ -134,7 +134,6 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#define _PAGE_NX 0 /* no-execute bit */
/* If _PAGE_VALID is clear, we use these: */
-#define _PAGE_FILE xPTEL2_C /* set:pagecache unset:swap */
#define _PAGE_PROTNONE 0x000 /* If not present */
#define __PAGE_PROT_UWAUX 0x010
@@ -241,11 +240,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; }
static inline int pte_special(pte_t pte){ return 0; }
-/*
- * The following only works if pte_present() is not true.
- */
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-
static inline pte_t pte_rdprotect(pte_t pte)
{
pte_val(pte) &= ~(__PAGE_PROT_USER|__PAGE_PROT_UWAUX); return pte;
@@ -338,16 +332,11 @@ static inline int pte_exec_kernel(pte_t pte)
return 1;
}
-#define PTE_FILE_MAX_BITS 30
-
-#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
-#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
-
/* Encode and de-code a swap entry */
-#define __swp_type(x) (((x).val >> 2) & 0x3f)
-#define __swp_offset(x) ((x).val >> 8)
+#define __swp_type(x) (((x).val >> 1) & 0x3f)
+#define __swp_offset(x) ((x).val >> 7)
#define __swp_entry(type, offset) \
- ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
+ ((swp_entry_t) { ((type) << 1) | ((offset) << 7) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) __pte((x).val)
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index bf280eaccd36..c1c374f0ec12 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -50,7 +50,6 @@ struct thread_info {
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
- struct restart_block restart_block;
__u8 supervisor_stack[0];
};
@@ -80,9 +79,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c
index 47b3bb0c04ff..d780670cbaf3 100644
--- a/arch/mn10300/kernel/asm-offsets.c
+++ b/arch/mn10300/kernel/asm-offsets.c
@@ -28,7 +28,6 @@ void foo(void)
OFFSET(TI_cpu, thread_info, cpu);
OFFSET(TI_preempt_count, thread_info, preempt_count);
OFFSET(TI_addr_limit, thread_info, addr_limit);
- OFFSET(TI_restart_block, thread_info, restart_block);
BLANK();
OFFSET(REG_D0, pt_regs, d0);
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index a6c0858592c3..8609845f12c5 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -40,7 +40,7 @@ static int restore_sigcontext(struct pt_regs *regs,
unsigned int err = 0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (is_using_fpu(current))
fpu_kill_state(current);
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 3516cbdf1ee9..0c2cc5d39c8e 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -262,6 +262,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index febb9cd83177..b5b036f64275 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -106,7 +106,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
if (!r->flags)
continue;
if (!r->start ||
- pci_claim_resource(dev, idx) < 0) {
+ pci_claim_bridge_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of bridge %s\n",
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 6b4339f8c9c2..613ca1e55b4b 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -228,8 +228,8 @@ static int pci_ampci_write_config(struct pci_bus *bus, unsigned int devfn,
}
static struct pci_ops pci_direct_ampci = {
- pci_ampci_read_config,
- pci_ampci_write_config,
+ .read = pci_ampci_read_config,
+ .write = pci_ampci_write_config,
};
/*
@@ -281,42 +281,37 @@ static int __init pci_check_direct(void)
return -ENODEV;
}
-static int is_valid_resource(struct pci_dev *dev, int idx)
+static void pcibios_fixup_device_resources(struct pci_dev *dev)
{
- unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- struct resource *devr = &dev->resource[idx], *busr;
-
- if (dev->bus) {
- pci_bus_for_each_resource(dev->bus, busr, i) {
- if (!busr || (busr->flags ^ devr->flags) & type_mask)
- continue;
-
- if (devr->start &&
- devr->start >= busr->start &&
- devr->end <= busr->end)
- return 1;
- }
- }
+ int idx;
- return 0;
+ if (!dev->bus)
+ return;
+
+ for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
+
+ if (!r->flags || r->parent || !r->start)
+ continue;
+
+ pci_claim_resource(dev, idx);
+ }
}
-static void pcibios_fixup_device_resources(struct pci_dev *dev)
+static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
{
- int limit, i;
+ int idx;
- if (dev->bus->number != 0)
+ if (!dev->bus)
return;
- limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ?
- PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
+ for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
- for (i = 0; i < limit; i++) {
- if (!dev->resource[i].flags)
+ if (!r->flags || r->parent || !r->start)
continue;
- if (is_valid_resource(dev, i))
- pci_claim_resource(dev, i);
+ pci_claim_bridge_resource(dev, idx);
}
}
@@ -330,7 +325,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
if (bus->self) {
pci_read_bridge_bases(bus);
- pcibios_fixup_device_resources(bus->self);
+ pcibios_fixup_bridge_resources(bus->self);
}
list_for_each_entry(dev, &bus->devices, bus_list)
diff --git a/arch/nios2/include/asm/pgtable-bits.h b/arch/nios2/include/asm/pgtable-bits.h
index ce9e7069aa96..bfddff383e89 100644
--- a/arch/nios2/include/asm/pgtable-bits.h
+++ b/arch/nios2/include/asm/pgtable-bits.h
@@ -30,6 +30,5 @@
#define _PAGE_PRESENT (1<<25) /* PTE contains a translation */
#define _PAGE_ACCESSED (1<<26) /* page referenced */
#define _PAGE_DIRTY (1<<27) /* dirty page */
-#define _PAGE_FILE (1<<28) /* PTE used for file mapping or swap */
#endif /* _ASM_NIOS2_PGTABLE_BITS_H */
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index ccbaffd47671..a213e8c9aad0 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -24,7 +24,7 @@
#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
@@ -112,8 +112,6 @@ static inline int pte_dirty(pte_t pte) \
{ return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) \
{ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) \
- { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
#define pgprot_noncached pgprot_noncached
@@ -272,8 +270,7 @@ static inline void pte_clear(struct mm_struct *mm,
__FILE__, __LINE__, pgd_val(e))
/*
- * Encode and decode a swap entry (must be !pte_none(pte) && !pte_present(pte)
- * && !pte_file(pte)):
+ * Encode and decode a swap entry (must be !pte_none(pte) && !pte_present(pte):
*
* 31 30 29 28 27 26 25 24 23 22 21 20 19 18 ... 1 0
* 0 0 0 0 type. 0 0 0 0 0 0 offset.........
@@ -290,11 +287,6 @@ static inline void pte_clear(struct mm_struct *mm,
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-/* Encode and decode a nonlinear file mapping entry */
-#define PTE_FILE_MAX_BITS 25
-#define pte_to_pgoff(pte) (pte_val(pte) & 0x1ffffff)
-#define pgoff_to_pte(off) __pte(((off) & 0x1ffffff) | _PAGE_FILE)
-
#define kern_addr_valid(addr) (1)
#include <asm-generic/pgtable.h>
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
index cc924a38f22a..e2e3f13f98d5 100644
--- a/arch/nios2/kernel/module.c
+++ b/arch/nios2/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
}
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
kfree(module_region);
}
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index f9d27883a714..2d0ea25be171 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -200,7 +200,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
/* Set up to return from userspace; jump to fixed address sigreturn
trampoline on kuser page. */
- regs->ra = (unsigned long) (0x1040);
+ regs->ra = (unsigned long) (0x1044);
/* Set up registers for signal handler */
regs->sp = (unsigned long) frame;
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 15a0bb5fc06d..d194c0427b26 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -135,6 +135,8 @@ survive:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
@@ -157,9 +159,11 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
- pr_alert("%s: unhandled page fault (%d) at 0x%08lx, "
- "cause %ld\n", current->comm, SIGSEGV, address, cause);
- show_regs(regs);
+ if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
+ pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
+ "cause %ld\n", current->comm, SIGSEGV, address, cause);
+ show_regs(regs);
+ }
_exception(SIGSEGV, regs, code, address);
return;
}
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 37bf6a3ef8f4..69c7df0e1420 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -77,7 +77,7 @@ extern void paging_init(void);
*/
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
/*
* Kernels own virtual memory area.
@@ -125,7 +125,6 @@ extern void paging_init(void);
#define _PAGE_CC 0x001 /* software: pte contains a translation */
#define _PAGE_CI 0x002 /* cache inhibit */
#define _PAGE_WBC 0x004 /* write back cache */
-#define _PAGE_FILE 0x004 /* set: pagecache, unset: swap (when !PRESENT) */
#define _PAGE_WOM 0x008 /* weakly ordered memory */
#define _PAGE_A 0x010 /* accessed */
@@ -240,7 +239,6 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
@@ -438,12 +436,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/* Encode and decode a nonlinear file mapping entry */
-
-#define PTE_FILE_MAX_BITS 26
-#define pte_to_pgoff(x) (pte_val(x) >> 6)
-#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)
-
#define kern_addr_valid(addr) (1)
#include <asm-generic/pgtable.h>
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index d797acc901e4..875f0845a707 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -57,7 +57,6 @@ struct thread_info {
0-0x7FFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
- struct restart_block restart_block;
__u8 supervisor_stack[0];
/* saved context data */
@@ -79,9 +78,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
.ksp = 0, \
}
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index 1d3c9c28ac25..f14793306b03 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -754,11 +754,6 @@ _dc_enable:
/* ===============================================[ page table masks ]=== */
-/* bit 4 is used in hardware as write back cache bit. we never use this bit
- * explicitly, so we can reuse it as _PAGE_FILE bit and mask it out when
- * writing into hardware pte's
- */
-
#define DTLB_UP_CONVERT_MASK 0x3fa
#define ITLB_UP_CONVERT_MASK 0x3a
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index 7d1b8235bf90..4112175bf803 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -46,7 +46,7 @@ static int restore_sigcontext(struct pt_regs *regs,
int err = 0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Restore the regs from &sc->regs.
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 0703acf7d327..230ac20ae794 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -171,6 +171,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 22b89d1edba7..8c966b2270aa 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -134,7 +134,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
* pgd entries used up by user/kernel:
*/
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
/* NB: The tlb miss handlers make certain assumptions about the order */
/* of the following bits, so be careful (One example, bits 25-31 */
@@ -146,7 +146,6 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
-#define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */
#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
@@ -167,13 +166,6 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
#define PFN_PTE_SHIFT 12
-
-/* this is how many bits may be used by the file functions */
-#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT)
-
-#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE })
-
#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
@@ -186,7 +178,6 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
-#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT))
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -344,7 +335,6 @@ static inline void pgd_clear(pgd_t * pgdp) { }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index a84611835549..fb13e3865563 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -14,7 +14,6 @@ struct thread_info {
mm_segment_t addr_limit; /* user-level address space limit */
__u32 cpu; /* current CPU */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
- struct restart_block restart_block;
};
#define INIT_THREAD_INFO(tsk) \
@@ -25,9 +24,6 @@ struct thread_info {
.cpu = 0, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall \
- } \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 50dfafc3f2c1..3c63a820fcda 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -219,7 +219,7 @@ void *module_alloc(unsigned long size)
* init_data correctly */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_HIGHMEM,
- PAGE_KERNEL_RWX, NUMA_NO_NODE,
+ PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
@@ -298,14 +298,10 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
}
#endif
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
kfree(mod->arch.section);
mod->arch.section = NULL;
-
- vfree(module_region);
}
/* Additional bytes needed in front of individual sections */
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 012d4fa63d97..9b910a0251b8 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -99,7 +99,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* Unwind the user stack to get the rt_sigframe structure. */
frame = (struct rt_sigframe __user *)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 3ca9c1131cfe..e5120e653240 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -256,6 +256,8 @@ good_area:
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto bad_area;
BUG();
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a2a168e2dfe7..22b0940494bb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -256,6 +256,7 @@ config PPC_OF_PLATFORM_PCI
default n
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ depends on PPC32 || PPC_STD_MMU_64
def_bool y
config ARCH_SUPPORTS_UPROBES
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
index 1382fec9e8c5..7fcb1ac0f232 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
@@ -50,6 +50,7 @@ ethernet@b0000 {
fsl,num_tx_queues = <0x8>;
fsl,magic-packet;
local-mac-address = [ 00 00 00 00 00 00 ];
+ ranges;
queue-group@b0000 {
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
index 221cd2ea5b31..9f25427c1527 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
@@ -50,6 +50,7 @@ ethernet@b1000 {
fsl,num_tx_queues = <0x8>;
fsl,magic-packet;
local-mac-address = [ 00 00 00 00 00 00 ];
+ ranges;
queue-group@b1000 {
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
index 61456c317609..cd7c318ab131 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
@@ -49,6 +49,7 @@ ethernet@b2000 {
fsl,num_tx_queues = <0x8>;
fsl,magic-packet;
local-mac-address = [ 00 00 00 00 00 00 ];
+ ranges;
queue-group@b2000 {
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi
index 72a3ef5945c1..a1b48546b02d 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi
@@ -1,5 +1,5 @@
/*
- * PQ3 GPIO device tree stub [ controller @ offset 0xf000 ]
+ * PQ3 GPIO device tree stub [ controller @ offset 0xfc00 ]
*
* Copyright 2011 Freescale Semiconductor Inc.
*
@@ -32,10 +32,10 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-gpio-controller@f000 {
+gpio-controller@fc00 {
#gpio-cells = <2>;
compatible = "fsl,pq3-gpio";
- reg = <0xf000 0x100>;
+ reg = <0xfc00 0x100>;
interrupts = <47 0x2 0 0>;
gpio-controller;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi
new file mode 100644
index 000000000000..5022432ebaa9
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi
@@ -0,0 +1,90 @@
+/*
+ * QorIQ BMan Portal device tree stub for 10 portals
+ *
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&bportals {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x100000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x101000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x102000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x103000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x104000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x105000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x106000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x107000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x108000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x109000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-bman1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-bman1.dtsi
new file mode 100644
index 000000000000..3b5e3504acb7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-bman1.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ BMan device tree stub [ controller @ offset 0x31a000 ]
+ *
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+bman: bman@31a000 {
+ compatible = "fsl,bman";
+ reg = <0x31a000 0x1000>;
+ interrupts = <16 2 1 2>;
+ fsl,bman-portals = <&bportals>;
+ memory-region = <&bman_fbpr>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-qman1-portals.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-qman1-portals.dtsi
new file mode 100644
index 000000000000..05d51acafa67
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-qman1-portals.dtsi
@@ -0,0 +1,101 @@
+/*
+ * QorIQ QMan Portal device tree stub for 10 portals & 15 pool channels
+ *
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&qportals {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+
+ qportal0: qman-portal@0 {
+ compatible = "fsl,qman-portal";
+ reg = <0x0 0x4000>, <0x100000 0x1000>;
+ interrupts = <104 2 0 0>;
+ fsl,qman-channel-id = <0x0>;
+ };
+ qportal1: qman-portal@4000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x4000 0x4000>, <0x101000 0x1000>;
+ interrupts = <106 2 0 0>;
+ fsl,qman-channel-id = <1>;
+ };
+ qportal2: qman-portal@8000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x8000 0x4000>, <0x102000 0x1000>;
+ interrupts = <108 2 0 0>;
+ fsl,qman-channel-id = <2>;
+ };
+ qportal3: qman-portal@c000 {
+ compatible = "fsl,qman-portal";
+ reg = <0xc000 0x4000>, <0x103000 0x1000>;
+ interrupts = <110 2 0 0>;
+ fsl,qman-channel-id = <3>;
+ };
+ qportal4: qman-portal@10000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x10000 0x4000>, <0x104000 0x1000>;
+ interrupts = <112 2 0 0>;
+ fsl,qman-channel-id = <4>;
+ };
+ qportal5: qman-portal@14000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x14000 0x4000>, <0x105000 0x1000>;
+ interrupts = <114 2 0 0>;
+ fsl,qman-channel-id = <5>;
+ };
+ qportal6: qman-portal@18000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x18000 0x4000>, <0x106000 0x1000>;
+ interrupts = <116 2 0 0>;
+ fsl,qman-channel-id = <6>;
+ };
+
+ qportal7: qman-portal@1c000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x1c000 0x4000>, <0x107000 0x1000>;
+ interrupts = <118 2 0 0>;
+ fsl,qman-channel-id = <7>;
+ };
+ qportal8: qman-portal@20000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x20000 0x4000>, <0x108000 0x1000>;
+ interrupts = <120 2 0 0>;
+ fsl,qman-channel-id = <8>;
+ };
+ qportal9: qman-portal@24000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x24000 0x4000>, <0x109000 0x1000>;
+ interrupts = <122 2 0 0>;
+ fsl,qman-channel-id = <9>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-qman1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-qman1.dtsi
new file mode 100644
index 000000000000..0695778c4386
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-qman1.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ QMan device tree stub [ controller @ offset 0x318000 ]
+ *
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+qman: qman@318000 {
+ compatible = "fsl,qman";
+ reg = <0x318000 0x1000>;
+ interrupts = <16 2 1 3>;
+ fsl,qman-portals = <&qportals>;
+ memory-region = <&qman_fqd &qman_pfdr>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-qman3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-qman3.dtsi
new file mode 100644
index 000000000000..b379abd1439d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-qman3.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ QMan rev3 device tree stub [ controller @ offset 0x318000 ]
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+qman: qman@318000 {
+ compatible = "fsl,qman";
+ reg = <0x318000 0x2000>;
+ interrupts = <16 2 1 3>;
+ fsl,qman-portals = <&qportals>;
+ memory-region = <&qman_fqd &qman_pfdr>;
+};
diff --git a/arch/powerpc/boot/dts/mvme2500.dts b/arch/powerpc/boot/dts/mvme2500.dts
new file mode 100644
index 000000000000..67714cf0f745
--- /dev/null
+++ b/arch/powerpc/boot/dts/mvme2500.dts
@@ -0,0 +1,280 @@
+/*
+ * Device tree source for the Emerson/Artesyn MVME2500
+ *
+ * Copyright 2014 Elettra-Sincrotrone Trieste S.C.p.A.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Based on: P2020 DS Device Tree Source
+ * Copyright 2009 Freescale Semiconductor Inc.
+ */
+
+/include/ "fsl/p2020si-pre.dtsi"
+
+/ {
+ model = "MVME2500";
+ compatible = "artesyn,MVME2500";
+
+ aliases {
+ serial2 = &serial2;
+ serial3 = &serial3;
+ serial4 = &serial4;
+ serial5 = &serial5;
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0 0xffe00000 0x100000>;
+
+ i2c@3000 {
+ hwmon@4c {
+ compatible = "adi,adt7461";
+ reg = <0x4c>;
+ };
+
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ interrupts = <8 1 0 0>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c512";
+ reg = <0x52>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c512";
+ reg = <0x53>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ };
+
+ };
+
+ spi0: spi@7000 {
+ fsl,espi-num-chipselects = <2>;
+
+ flash@0 {
+ compatible = "atmel,at25df641";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ };
+ flash@1 {
+ compatible = "atmel,at25df641";
+ reg = <1>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ usb@22000 {
+ dr_mode = "host";
+ phy_type = "ulpi";
+ };
+
+ enet0: ethernet@24000 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@24520 {
+ phy1: ethernet-phy@1 {
+ compatible = "brcm,bcm54616S";
+ interrupts = <6 1 0 0>;
+ reg = <0x1>;
+ };
+
+ phy2: ethernet-phy@2 {
+ compatible = "brcm,bcm54616S";
+ interrupts = <6 1 0 0>;
+ reg = <0x2>;
+ };
+
+ phy3: ethernet-phy@3 {
+ compatible = "brcm,bcm54616S";
+ interrupts = <5 1 0 0>;
+ reg = <0x3>;
+ };
+
+ phy7: ethernet-phy@7 {
+ compatible = "brcm,bcm54616S";
+ interrupts = <7 1 0 0>;
+ reg = <0x7>;
+ };
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet1: ethernet@25000 {
+ tbi-handle = <&tbi1>;
+ phy-handle = <&phy7>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet2: ethernet@26000 {
+ tbi-handle = <&tbi2>;
+ phy-handle = <&phy3>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@26520 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ ranges = <0x0 0x0 0x0 0xfff00000 0x00080000
+ 0x1 0x0 0x0 0xffc40000 0x00010000
+ 0x2 0x0 0x0 0xffc50000 0x00010000
+ 0x3 0x0 0x0 0xffc60000 0x00010000
+ 0x4 0x0 0x0 0xffc70000 0x00010000
+ 0x6 0x0 0x0 0xffc80000 0x00010000
+ 0x5 0x0 0x0 0xffdf0000 0x00008000>;
+
+ serial2: serial@1,0 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x1 0x0 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 2 0 0>;
+ };
+
+ serial3: serial@2,0 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x2 0x0 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <1 2 0 0>;
+ };
+
+ serial4: serial@3,0 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x3 0x0 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <2 2 0 0>;
+ };
+
+ serial5: serial@4,0 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4 0x0 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <3 2 0 0>;
+ };
+
+ mram@0,0 {
+ compatible = "everspin,mram", "mtd-ram";
+ reg = <0x0 0x0 0x80000>;
+ bank-width = <2>;
+ };
+
+ board-control@5,0 {
+ compatible = "artesyn,mvme2500-fpga";
+ reg = <0x5 0x0 0x01000>;
+ };
+
+ cpld@6,0 {
+ compatible = "artesyn,mvme2500-cpld";
+ reg = <0x6 0x0 0x10000>;
+ interrupts = <9 1 0 0>;
+ };
+ };
+
+ pci0: pcie@ffe08000 {
+ reg = <0 0xffe08000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x10000>;
+ };
+ };
+
+ pci1: pcie@ffe09000 {
+ reg = <0 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x10000>;
+ };
+
+ };
+
+ pci2: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x10000>;
+ };
+ };
+};
+
+/include/ "fsl/p2020si-post.dtsi"
+
+/ {
+ soc@ffe00000 {
+ serial@4600 {
+ status = "disabled";
+ };
+
+ i2c@3100 {
+ status = "disabled";
+ };
+
+ sdhc@2e000 {
+ compatible = "fsl,p2020-esdhc", "fsl,esdhc";
+ non-removable;
+ };
+
+ };
+
+};
diff --git a/arch/powerpc/boot/dts/t4240emu.dts b/arch/powerpc/boot/dts/t4240emu.dts
deleted file mode 100644
index decaf357db9c..000000000000
--- a/arch/powerpc/boot/dts/t4240emu.dts
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * T4240 emulator Device Tree Source
- *
- * Copyright 2013 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/include/ "fsl/e6500_power_isa.dtsi"
-/ {
- compatible = "fsl,T4240";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ccsr = &soc;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- dma0 = &dma0;
- dma1 = &dma1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,e6500@0 {
- device_type = "cpu";
- reg = <0 1>;
- next-level-cache = <&L2_1>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu1: PowerPC,e6500@2 {
- device_type = "cpu";
- reg = <2 3>;
- next-level-cache = <&L2_1>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu2: PowerPC,e6500@4 {
- device_type = "cpu";
- reg = <4 5>;
- next-level-cache = <&L2_1>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu3: PowerPC,e6500@6 {
- device_type = "cpu";
- reg = <6 7>;
- next-level-cache = <&L2_1>;
- fsl,portid-mapping = <0x80000000>;
- };
-
- cpu4: PowerPC,e6500@8 {
- device_type = "cpu";
- reg = <8 9>;
- next-level-cache = <&L2_2>;
- fsl,portid-mapping = <0x40000000>;
- };
- cpu5: PowerPC,e6500@10 {
- device_type = "cpu";
- reg = <10 11>;
- next-level-cache = <&L2_2>;
- fsl,portid-mapping = <0x40000000>;
- };
- cpu6: PowerPC,e6500@12 {
- device_type = "cpu";
- reg = <12 13>;
- next-level-cache = <&L2_2>;
- fsl,portid-mapping = <0x40000000>;
- };
- cpu7: PowerPC,e6500@14 {
- device_type = "cpu";
- reg = <14 15>;
- next-level-cache = <&L2_2>;
- fsl,portid-mapping = <0x40000000>;
- };
-
- cpu8: PowerPC,e6500@16 {
- device_type = "cpu";
- reg = <16 17>;
- next-level-cache = <&L2_3>;
- fsl,portid-mapping = <0x20000000>;
- };
- cpu9: PowerPC,e6500@18 {
- device_type = "cpu";
- reg = <18 19>;
- next-level-cache = <&L2_3>;
- fsl,portid-mapping = <0x20000000>;
- };
- cpu10: PowerPC,e6500@20 {
- device_type = "cpu";
- reg = <20 21>;
- next-level-cache = <&L2_3>;
- fsl,portid-mapping = <0x20000000>;
- };
- cpu11: PowerPC,e6500@22 {
- device_type = "cpu";
- reg = <22 23>;
- next-level-cache = <&L2_3>;
- fsl,portid-mapping = <0x20000000>;
- };
- };
-};
-
-/ {
- model = "fsl,T4240QDS";
- compatible = "fsl,T4240EMU", "fsl,T4240QDS";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- ifc: localbus@ffe124000 {
- reg = <0xf 0xfe124000 0 0x2000>;
- ranges = <0 0 0xf 0xe8000000 0x08000000
- 2 0 0xf 0xff800000 0x00010000
- 3 0 0xf 0xffdf0000 0x00008000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
-
- bank-width = <2>;
- device-width = <1>;
- };
-
- };
-
- memory {
- device_type = "memory";
- };
-
- soc: soc@ffe000000 {
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
-
- };
-};
-
-&ifc {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
- interrupts = <25 2 0 0>;
-};
-
-&soc {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 29>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr1: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.7",
- "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 23>;
- };
-
- ddr2: memory-controller@9000 {
- compatible = "fsl,qoriq-memory-controller-v4.7",
- "fsl,qoriq-memory-controller";
- reg = <0x9000 0x1000>;
- interrupts = <16 2 1 22>;
- };
-
- ddr3: memory-controller@a000 {
- compatible = "fsl,qoriq-memory-controller-v4.7",
- "fsl,qoriq-memory-controller";
- reg = <0xa000 0x1000>;
- interrupts = <16 2 1 21>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,t4240-l3-cache-controller", "cache";
- reg = <0x10000 0x1000
- 0x11000 0x1000
- 0x12000 0x1000>;
- interrupts = <16 2 1 27
- 16 2 1 26
- 16 2 1 25>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet2-cf", "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 31>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x6000>;
- fsl,portid-mapping = <0x8000>;
- interrupts = <
- 24 2 0 0
- 16 2 1 30>;
- };
-
-/include/ "fsl/qoriq-mpic.dtsi"
-
- guts: global-utilities@e0000 {
- compatible = "fsl,t4240-device-config", "fsl,qoriq-device-config-2.0";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- fsl,liodn-bits = <12>;
- };
-
-/include/ "fsl/qoriq-clockgen2.dtsi"
- global-utilities@e1000 {
- compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0";
- };
-
-/include/ "fsl/qoriq-dma-0.dtsi"
-/include/ "fsl/qoriq-dma-1.dtsi"
-
-/include/ "fsl/qoriq-i2c-0.dtsi"
-/include/ "fsl/qoriq-i2c-1.dtsi"
-/include/ "fsl/qoriq-duart-0.dtsi"
-/include/ "fsl/qoriq-duart-1.dtsi"
-
- L2_1: l2-cache-controller@c20000 {
- compatible = "fsl,t4240-l2-cache-controller";
- reg = <0xc20000 0x40000>;
- next-level-cache = <&cpc>;
- };
- L2_2: l2-cache-controller@c60000 {
- compatible = "fsl,t4240-l2-cache-controller";
- reg = <0xc60000 0x40000>;
- next-level-cache = <&cpc>;
- };
- L2_3: l2-cache-controller@ca0000 {
- compatible = "fsl,t4240-l2-cache-controller";
- reg = <0xca0000 0x40000>;
- next-level-cache = <&cpc>;
- };
-};
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 69e06eeae6a6..9110a5cb1bb7 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -1,19 +1,15 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ACADIA=y
# CONFIG_WALNUT is not set
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -31,27 +27,18 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
-CONFIG_MII=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
CONFIG_IBM_EMAC_TXB=256
CONFIG_IBM_EMAC_DEBUG=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -65,20 +52,14 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_THERMAL=y
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index e9d84b5d0ab6..790366652ba3 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -1,19 +1,15 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_EP405=y
# CONFIG_WALNUT is not set
-CONFIG_SPARSE_IRQ=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -30,19 +26,14 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -55,27 +46,20 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_USB=y
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 5ff338f6443f..01bd71bac027 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -1,21 +1,17 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_KILAUEA=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_WALNUT is not set
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -33,8 +29,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -42,21 +36,12 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_NDFC=y
-CONFIG_PROC_DEVICETREE=y
-CONFIG_PM=y
-CONFIG_SUSPEND=y
-CONFIG_PPC4xx_CPM=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
CONFIG_IBM_EMAC_TXB=256
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -75,20 +60,14 @@ CONFIG_THERMAL=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
index c0d228dc73dc..e2036b7c7edb 100644
--- a/arch/powerpc/configs/40x/klondike_defconfig
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -1,5 +1,4 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED=y
@@ -14,10 +13,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_APM8018X=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MATH_EMULATION=y
-# CONFIG_MIGRATION is not set
# CONFIG_SUSPEND is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_SCSI=y
@@ -51,5 +48,4 @@ CONFIG_AVERAGE=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_FTRACE is not set
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index 84505e3aa0fb..efd51701fb4d 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -1,19 +1,15 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_MAKALU=y
# CONFIG_WALNUT is not set
-CONFIG_SPARSE_IRQ=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -30,25 +26,17 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
CONFIG_IBM_EMAC_TXB=256
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -62,20 +50,14 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_THERMAL=y
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index 91c110dad2d6..5ded3dcdf60a 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -1,7 +1,8 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
@@ -11,8 +12,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_WALNUT is not set
CONFIG_OBS600=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_MATH_EMULATION=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,8 +29,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -39,7 +36,6 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_NDFC=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
@@ -68,13 +64,10 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig
index 0a81e1f7dd59..bcb0c4d854db 100644
--- a/arch/powerpc/configs/40x/virtex_defconfig
+++ b/arch/powerpc/configs/40x/virtex_defconfig
@@ -1,5 +1,4 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -7,7 +6,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -35,16 +33,11 @@ CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_MANGLE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_XILINX_SYSACE=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_XILINX_XPS_PS2=y
CONFIG_SERIAL_8250=y
@@ -60,15 +53,9 @@ CONFIG_GPIO_XILINX=y
CONFIG_FB=y
CONFIG_FB_XILINX=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_LOGO=y
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -76,16 +63,16 @@ CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
CONFIG_CRC_CCITT=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_DEBUG_KERNEL=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index 0a19f4386ee9..37c838f26e53 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -1,17 +1,13 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_SPARSE_IRQ=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -28,19 +24,14 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -53,22 +44,15 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index 7e2530cd9d30..ea4ef02a0578 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -4,9 +4,6 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_XZ=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_SLUB_CPU_PARTIAL is not set
@@ -15,7 +12,6 @@ CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_POWERNV_MSI is not set
CONFIG_PPC_47x=y
# CONFIG_EBONY is not set
CONFIG_AKEBONO=y
@@ -26,7 +22,6 @@ CONFIG_IRQ_ALL_CPUS=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
# CONFIG_SUSPEND is not set
-CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -48,7 +43,6 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_SCSI_PROC_FS is not set
@@ -62,7 +56,6 @@ CONFIG_BLK_DEV_SD=y
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 44355c53cd30..95494209c124 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -1,19 +1,16 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_ARCHES=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -32,24 +29,16 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
CONFIG_IBM_EMAC_TXB=256
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -64,21 +53,15 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IBM_IIC=y
CONFIG_SENSORS_AD7414=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index cef7d62560c4..a046f08413fd 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -1,17 +1,14 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BAMBOO=y
# CONFIG_EBONY is not set
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -28,12 +25,9 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -47,23 +41,16 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index ca7f1f32f2b2..a326b773ac05 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -1,19 +1,16 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_PCI_QUIRKS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_BLUESTONE=y
# CONFIG_EBONY is not set
-# CONFIG_KVM_GUEST is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -27,18 +24,13 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
CONFIG_IBM_EMAC_TXB=256
@@ -53,7 +45,6 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IBM_IIC=y
CONFIG_SENSORS_AD7414=y
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
@@ -63,6 +54,5 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS=y
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 9919a91add12..d939e71fff7d 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -1,19 +1,16 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_CANYONLANDS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -32,29 +29,18 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_NDFC=y
-CONFIG_PROC_DEVICETREE=y
-CONFIG_PM=y
-CONFIG_SUSPEND=y
-CONFIG_PPC4xx_CPM=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
CONFIG_IBM_EMAC_TXB=256
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -76,21 +62,15 @@ CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_LIBUSUAL=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 47de68261443..5aa312a158dd 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -1,9 +1,7 @@
CONFIG_44x=y
CONFIG_SMP=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_SPARSE_IRQ=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
@@ -39,12 +37,10 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_SCSI_PROC_FS is not set
@@ -56,7 +52,6 @@ CONFIG_SATA_SIL24=y
# CONFIG_ATA_SFF is not set
CONFIG_NETDEVICES=y
CONFIG_E1000E=y
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -84,22 +79,18 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NLS_DEFAULT="n"
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_PPC_EARLY_DEBUG=y
CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x10000000
CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x200
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index 31b58b0d52e2..5909e016c37d 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -1,18 +1,14 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -28,19 +24,14 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -55,21 +46,15 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index faccaf65f394..57499d25c877 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -1,19 +1,15 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_EIGER=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_PCIEPORTBUS=y
@@ -33,20 +29,15 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_NDFC=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
@@ -54,13 +45,10 @@ CONFIG_FUSION=y
CONFIG_FUSION_SAS=y
CONFIG_I2O=y
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
CONFIG_IBM_EMAC_TXB=256
CONFIG_E1000E=y
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -80,25 +68,18 @@ CONFIG_I2C_DEBUG_CORE=y
CONFIG_I2C_DEBUG_ALGO=y
CONFIG_I2C_DEBUG_BUS=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_SUPPORT is not set
CONFIG_DMADEVICES=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CRYPTD=y
CONFIG_CRYPTO_AUTHENC=y
CONFIG_CRYPTO_CCM=y
@@ -116,7 +97,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_BLOWFISH=y
CONFIG_CRYPTO_DES=y
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 05782c145141..5d52185d8f5a 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -1,19 +1,14 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_ICON=y
CONFIG_HIGHMEM=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_PCIEPORTBUS=y
@@ -34,17 +29,13 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_XILINX_SYSACE=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
@@ -55,11 +46,7 @@ CONFIG_FUSION_SAS=y
CONFIG_FUSION_CTL=y
CONFIG_FUSION_LOGGING=y
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
@@ -79,7 +66,6 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IBM_IIC=y
# CONFIG_HWMON is not set
CONFIG_MFD_SM501=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_SM501=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -92,25 +78,19 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 49a1518a4e69..0ad3e449526e 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -1,15 +1,11 @@
CONFIG_44x=y
CONFIG_SMP=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_SPARSE_IRQ=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
@@ -25,7 +21,6 @@ CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="root=/dev/issblk0"
# CONFIG_PCI is not set
CONFIG_ADVANCED_OPTIONS=y
-CONFIG_NONSTATIC_KERNEL=y
CONFIG_DYNAMIC_MEMSTART=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -42,13 +37,10 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_INPUT is not set
@@ -72,15 +64,11 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index f1137972ed41..a042335971da 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -1,17 +1,14 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_KATMAI=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -30,19 +27,14 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_MACINTOSH_DRIVERS=y
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -55,22 +47,15 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index 4b91a44c4c32..91c2aff9bd55 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -1,10 +1,8 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -12,7 +10,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_EBONY is not set
CONFIG_RAINIER=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -31,14 +28,11 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_MACINTOSH_DRIVERS=y
@@ -55,26 +49,19 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_PPC_EARLY_DEBUG=y
CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0xef600300
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index b7113e114a14..7fddf3fe275c 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -1,19 +1,15 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_REDWOOD=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_PCIEPORTBUS=y
@@ -33,18 +29,13 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
@@ -52,14 +43,11 @@ CONFIG_FUSION=y
CONFIG_FUSION_SAS=y
CONFIG_I2O=y
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
CONFIG_IBM_EMAC_TXB=256
CONFIG_IBM_EMAC_DEBUG=y
CONFIG_E1000E=y
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -79,25 +67,18 @@ CONFIG_I2C_DEBUG_CORE=y
CONFIG_I2C_DEBUG_ALGO=y
CONFIG_I2C_DEBUG_BUS=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_SUPPORT is not set
CONFIG_DMADEVICES=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CRYPTD=y
CONFIG_CRYPTO_AUTHENC=y
CONFIG_CRYPTO_CCM=y
@@ -115,7 +96,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_BLOWFISH=y
CONFIG_CRYPTO_DES=y
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 9622eb2a3e37..6928012f3813 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -1,20 +1,19 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_EBONY is not set
CONFIG_SAM440EP=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -31,11 +30,9 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-# CONFIG_MISC_DEVICES is not set
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
@@ -44,11 +41,7 @@ CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_INPUT_FF_MEMLESS=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
@@ -59,7 +52,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C_IBM_IIC=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_RADEON=y
CONFIG_LCD_CLASS_DEVICE=y
@@ -80,10 +72,8 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
@@ -99,7 +89,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_REISERFS_FS=y
-CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
@@ -111,11 +100,7 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_AMIGA_PARTITION=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index 9642d99b47f1..c294369cc39f 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -1,19 +1,16 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_SEQUOIA=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -32,8 +29,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -41,12 +36,9 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_NDFC=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -60,24 +52,17 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index 09e3075030bf..e779228d6cd6 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -1,17 +1,14 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_TAISHAN=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
@@ -30,17 +27,13 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_MACINTOSH_DRIVERS=y
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -54,23 +47,16 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig
index 1eb3caf828a5..53d0300b3390 100644
--- a/arch/powerpc/configs/44x/virtex5_defconfig
+++ b/arch/powerpc/configs/44x/virtex5_defconfig
@@ -1,5 +1,4 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -7,7 +6,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -34,16 +32,11 @@ CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_MANGLE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_XILINX_SYSACE=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_XILINX_XPS_PS2=y
CONFIG_SERIAL_8250=y
@@ -59,15 +52,9 @@ CONFIG_GPIO_XILINX=y
CONFIG_FB=y
CONFIG_FB_XILINX=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_LOGO=y
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -75,16 +62,16 @@ CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
CONFIG_CRC_CCITT=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_DEBUG_KERNEL=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 551e50a0be5e..ee434375fc24 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -1,5 +1,4 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-pika"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
@@ -7,7 +6,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -16,7 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_WARP=y
CONFIG_PPC4xx_GPIO=y
CONFIG_HZ_1000=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="ip=on"
# CONFIG_PCI is not set
@@ -35,8 +32,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -44,21 +39,14 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_MTD_UBI=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
-CONFIG_MII=y
CONFIG_IBM_EMAC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -72,7 +60,6 @@ CONFIG_I2C_IBM_IIC=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_AD7414=y
CONFIG_THERMAL=y
-CONFIG_THERMAL_HWMON=y
CONFIG_WATCHDOG=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -83,14 +70,12 @@ CONFIG_MMC=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
-# CONFIG_LEDS_GPIO_PLATFORM is not set
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
@@ -99,7 +84,6 @@ CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
@@ -110,13 +94,10 @@ CONFIG_NLS_UTF8=y
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_PRINTK_TIME=y
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 0dc99e141035..19fad0e0016e 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -1,18 +1,15 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_EPOLL is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_SPARSE_IRQ=y
CONFIG_PM=y
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -31,26 +28,20 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_LXT_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FEC_MPC52xx=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_LXT_PHY=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -64,7 +55,6 @@ CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_STORAGE=y
@@ -73,7 +63,6 @@ CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
@@ -81,17 +70,13 @@ CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index 104a332e79ab..5f40ba92a39a 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -1,10 +1,9 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_EPOLL is not set
CONFIG_MODULES=y
@@ -15,10 +14,6 @@ CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
CONFIG_PPC_LITE5200=y
# CONFIG_PPC_PMAC is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -33,7 +28,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -42,9 +36,8 @@ CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_PATA_MPC52xx=y
CONFIG_NETDEVICES=y
-CONFIG_LXT_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FEC_MPC52xx=y
+CONFIG_LXT_PHY=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -57,23 +50,18 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index c936fab9ec4a..909e185a88d1 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -1,18 +1,15 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_EPOLL is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_SPARSE_IRQ=y
CONFIG_PM=y
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -30,24 +27,21 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ROM=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_ATA=y
CONFIG_PATA_MPC52xx=y
CONFIG_NETDEVICES=y
+CONFIG_FEC_MPC52xx=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_QSEMI_PHY=y
@@ -58,11 +52,6 @@ CONFIG_SMSC_PHY=y
CONFIG_BROADCOM_PHY=y
CONFIG_ICPLUS_PHY=y
CONFIG_MDIO_BITBANG=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_FEC_MPC52xx=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -86,7 +75,6 @@ CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
@@ -94,18 +82,14 @@ CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 1d03c35540c7..649a01a0044d 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -1,15 +1,14 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="trunk"
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -21,11 +20,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
CONFIG_PREEMPT=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -45,40 +41,31 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_PROC_DEVICETREE=y
# CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=m
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=m
CONFIG_PATA_MPC52xx=m
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_FEC_MPC52xx=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=m
-# CONFIG_USB_OHCI_HCD_PPC_SOC is not set
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_STORAGE=m
@@ -95,8 +82,6 @@ CONFIG_FAT_DEFAULT_CODEPAGE=850
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index ca83ec88b114..efab8388ea92 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -1,17 +1,14 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
-CONFIG_SPARSE_IRQ=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_EPOLL is not set
+CONFIG_EMBEDDED=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
@@ -34,17 +31,13 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ROM=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_PLATRAM=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -54,12 +47,9 @@ CONFIG_ATA=y
CONFIG_PATA_MPC52xx=y
CONFIG_PATA_PLATFORM=y
CONFIG_NETDEVICES=y
+CONFIG_FEC_MPC52xx=y
CONFIG_LXT_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FEC_MPC52xx=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
@@ -75,7 +65,6 @@ CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_SM501=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
@@ -95,17 +84,14 @@ CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig
index 985f95c7280a..bcdfb07921fc 100644
--- a/arch/powerpc/configs/83xx/asp8347_defconfig
+++ b/arch/powerpc/configs/83xx/asp8347_defconfig
@@ -1,21 +1,20 @@
CONFIG_FSL_EMB_PERFMON=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_ASP834x=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -34,22 +33,16 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -63,8 +56,6 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
@@ -74,17 +65,12 @@ CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index e12e60c3b9a2..11a959283149 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -36,7 +35,6 @@ CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -45,10 +43,8 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_PHRAM=y
CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_GLUEBI=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_MII=y
CONFIG_TUN=y
CONFIG_UCC_GETH=y
CONFIG_MARVELL_PHY=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index 4b4a2a9133a5..b47a41f77836 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -1,20 +1,18 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC831x_RDB=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,15 +28,12 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -51,12 +46,10 @@ CONFIG_MD_LINEAR=y
CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_NETDEVICES=y
+CONFIG_GIANFAR=y
+CONFIG_E100=y
CONFIG_CICADA_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_E100=y
-CONFIG_GIANFAR=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -71,7 +64,6 @@ CONFIG_I2C_MPC=y
CONFIG_SPI=y
CONFIG_SPI_BITBANG=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -82,7 +74,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_NET2280=y
CONFIG_USB_ETH=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
@@ -90,18 +81,12 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index 5871395573c5..e28c83f320c1 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -1,20 +1,18 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC831x_RDB=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,13 +28,11 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -50,10 +46,8 @@ CONFIG_MD_LINEAR=y
CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_E100=y
CONFIG_GIANFAR=y
+CONFIG_E100=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -68,7 +62,6 @@ CONFIG_I2C_MPC=y
CONFIG_SPI=y
CONFIG_SPI_BITBANG=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -79,7 +72,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_NET2280=y
CONFIG_USB_ETH=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
@@ -87,18 +79,12 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
index a5699a1f7d0a..e84d35b848c0 100644
--- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
@@ -1,22 +1,21 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC832x_MDS=y
CONFIG_QUICC_ENGINE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -31,16 +30,13 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_SCSI=y
CONFIG_NETDEVICES=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_UCC_GETH=y
+CONFIG_DAVICOM_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -53,23 +49,16 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index 5adc4cea42d3..ae145f410590 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -1,22 +1,21 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_LDM_PARTITION=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC832x_RDB=y
CONFIG_QUICC_ENGINE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -31,18 +30,15 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
-CONFIG_ICPLUS_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_E1000=y
CONFIG_UCC_GETH=y
+CONFIG_E1000=y
+CONFIG_ICPLUS_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -58,7 +54,6 @@ CONFIG_I2C_MPC=y
CONFIG_SPI=y
CONFIG_SPI_BITBANG=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -71,24 +66,18 @@ CONFIG_MMC_SPI=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_LDM_PARTITION=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_ISO8859_8=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index 82b6b6c88d6a..2a5fdcbabcdd 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -1,20 +1,19 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC834x_ITX=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,11 +29,9 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -52,9 +49,9 @@ CONFIG_MD_LINEAR=y
CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_NETDEVICES=y
+CONFIG_GIANFAR=y
CONFIG_CICADA_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_GIANFAR=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -69,7 +66,6 @@ CONFIG_SPI=y
CONFIG_SPI_BITBANG=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_USB=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
@@ -82,19 +78,13 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index f8b228aaa03a..9a2ff25a2e98 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -1,20 +1,19 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC834x_ITX=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,11 +29,9 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -43,8 +40,8 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_NETDEVICES=y
-CONFIG_CICADA_PHY=y
CONFIG_GIANFAR=y
+CONFIG_CICADA_PHY=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -59,7 +56,6 @@ CONFIG_SPI=y
CONFIG_SPI_BITBANG=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_USB=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
@@ -72,19 +68,13 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
index 99660c062191..e44edc575549 100644
--- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
@@ -1,20 +1,19 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC834x_MDS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,16 +29,13 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
-CONFIG_MARVELL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_E100=y
CONFIG_GIANFAR=y
+CONFIG_E100=y
+CONFIG_MARVELL_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -52,23 +48,16 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
index 05710bbfd2ef..94a7d85f1603 100644
--- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
@@ -1,21 +1,20 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC836x_MDS=y
CONFIG_QUICC_ENGINE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -32,21 +31,17 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_SCSI=y
CONFIG_NETDEVICES=y
-CONFIG_MARVELL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_UCC_GETH=y
+CONFIG_MARVELL_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -59,23 +54,16 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
index 0540d673a052..761ed8ea0729 100644
--- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
@@ -1,20 +1,19 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC836x_RDK=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,20 +29,17 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
-CONFIG_BROADCOM_PHY=y
CONFIG_UCC_GETH=y
-# CONFIG_NETDEV_10000 is not set
+CONFIG_BROADCOM_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -71,17 +67,11 @@ CONFIG_LOGO=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_PPC_EARLY_DEBUG=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
index f367985be6f7..bcf1b48cc9e6 100644
--- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
@@ -1,18 +1,16 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC837x_MDS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -28,7 +26,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -37,10 +34,8 @@ CONFIG_CHR_DEV_SG=y
CONFIG_ATA=y
CONFIG_SATA_FSL=y
CONFIG_NETDEVICES=y
-CONFIG_MARVELL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
+CONFIG_MARVELL_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -54,21 +49,15 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index 414eda381591..f0f0ebf75125 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -1,18 +1,16 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC837x_RDB=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,7 +28,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -43,12 +40,9 @@ CONFIG_BLK_DEV_MD=y
CONFIG_MD_RAID1=y
CONFIG_MD_RAID456=y
CONFIG_NETDEVICES=y
+CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -63,7 +57,6 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -78,7 +71,6 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -87,18 +79,13 @@ CONFIG_USB_EHCI_FSL=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_T10DIF=y
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig
index 4ae385894c64..d2e4d82de14d 100644
--- a/arch/powerpc/configs/83xx/sbc834x_defconfig
+++ b/arch/powerpc/configs/83xx/sbc834x_defconfig
@@ -1,8 +1,6 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
@@ -13,7 +11,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_SBC834x=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,15 +27,11 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -47,11 +40,8 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
+CONFIG_BROADCOM_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -78,15 +68,11 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index b4c4b469e320..b0939dd9ad6f 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -1,17 +1,15 @@
CONFIG_PPC_85xx=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_SPARSE_IRQ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
-# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_SYSFS_DEPRECATED=y
@@ -28,7 +26,6 @@ CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_CPM2=y
CONFIG_HIGHMEM=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -71,8 +68,6 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -81,13 +76,11 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -125,7 +118,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -141,7 +133,6 @@ CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM92=y
CONFIG_WATCHDOG=y
CONFIG_GEF_WDT=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_HID_DRAGONRISE=y
CONFIG_HID_GYRATION=y
CONFIG_HID_TWINHAN=y
@@ -149,7 +140,6 @@ CONFIG_HID_ORTEK=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
CONFIG_HID_SMARTJOYPLUS=y
@@ -172,7 +162,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_RX8581=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
-# CONFIG_NET_DMA is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -194,7 +183,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
@@ -244,10 +232,8 @@ CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/kmp204x_defconfig b/arch/powerpc/configs/85xx/kmp204x_defconfig
index e362d588dfbf..e94d3eb4a8c1 100644
--- a/arch/powerpc/configs/85xx/kmp204x_defconfig
+++ b/arch/powerpc/configs/85xx/kmp204x_defconfig
@@ -94,7 +94,6 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_M25P80=y
CONFIG_MTD_PHRAM=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ECC_BCH=y
@@ -111,7 +110,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_NETDEVICES=y
@@ -120,7 +118,6 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ALTEON is not set
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig
index aee0d17a9551..3be85c5f1a2a 100644
--- a/arch/powerpc/configs/85xx/ksi8560_defconfig
+++ b/arch/powerpc/configs/85xx/ksi8560_defconfig
@@ -1,17 +1,16 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_KSI8560=y
CONFIG_CPM2=y
CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -27,8 +26,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -39,12 +36,11 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_IDE=y
CONFIG_NETDEVICES=y
-CONFIG_MARVELL_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
CONFIG_FS_ENET_MDIO_FCC=y
CONFIG_GIANFAR=y
+CONFIG_MARVELL_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -53,22 +49,15 @@ CONFIG_GIANFAR=y
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_GEN_RTC=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index 11662c217ac0..e38c373f2edf 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -1,17 +1,16 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_MPC8540_ADS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -27,13 +26,10 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -44,20 +40,13 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index ebe9b30b0721..48fc8e3a7be0 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -1,15 +1,14 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_MPC8560_ADS=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_PCI=y
CONFIG_PCI_DEBUG=y
@@ -30,13 +29,12 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
-CONFIG_E1000=y
CONFIG_GIANFAR=y
+CONFIG_E1000=y
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -45,20 +43,13 @@ CONFIG_GIANFAR=y
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_GEN_RTC=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index eb25229b387a..ecb0c3bf8796 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -1,17 +1,16 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_MPC85xx_CDS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_PCI=y
CONFIG_NET=y
@@ -28,7 +27,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -36,10 +34,8 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_E1000=y
CONFIG_GIANFAR=y
+CONFIG_E1000=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -49,20 +45,13 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/ppa8548_defconfig b/arch/powerpc/configs/85xx/ppa8548_defconfig
index e80bb9b21eac..190978a5b7d5 100644
--- a/arch/powerpc/configs/85xx/ppa8548_defconfig
+++ b/arch/powerpc/configs/85xx/ppa8548_defconfig
@@ -1,11 +1,16 @@
CONFIG_PPC_85xx=y
-CONFIG_PPA8548=y
-CONFIG_DTC=y
-CONFIG_DEFAULT_UIMAGE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-# CONFIG_PCI is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_PPA8548=y
+CONFIG_FSL_LBC=y
+CONFIG_RAPIDIO=y
+CONFIG_FSL_RIO=y
+CONFIG_RAPIDIO_DMA_ENGINE=y
+CONFIG_RAPIDIO_ENUM_BASIC=y
+CONFIG_RAPIDIO_TSI57X=y
+CONFIG_RAPIDIO_CPS_XX=y
+CONFIG_RAPIDIO_TSI568=y
+CONFIG_RAPIDIO_CPS_GEN2=y
CONFIG_ADVANCED_OPTIONS=y
CONFIG_LOWMEM_SIZE_BOOL=y
CONFIG_LOWMEM_SIZE=0x40000000
@@ -14,51 +19,28 @@ CONFIG_LOWMEM_CAM_NUM=4
CONFIG_PAGE_OFFSET_BOOL=y
CONFIG_PAGE_OFFSET=0xb0000000
CONFIG_KERNEL_START_BOOL=y
-CONFIG_KERNEL_START=0xb0000000
-# CONFIG_PHYSICAL_START_BOOL is not set
-CONFIG_PHYSICAL_START=0x00000000
-CONFIG_PHYSICAL_ALIGN=0x04000000
CONFIG_TASK_SIZE_BOOL=y
CONFIG_TASK_SIZE=0xb0000000
-
-CONFIG_FSL_LBC=y
-CONFIG_FSL_DMA=y
-CONFIG_FSL_RIO=y
-
-CONFIG_RAPIDIO=y
-CONFIG_RAPIDIO_DMA_ENGINE=y
-CONFIG_RAPIDIO_TSI57X=y
-CONFIG_RAPIDIO_TSI568=y
-CONFIG_RAPIDIO_CPS_XX=y
-CONFIG_RAPIDIO_CPS_GEN2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_PROC_DEVICETREE=y
-
+CONFIG_NET=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
CONFIG_MTD=y
-CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CONCAT=y
+CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-
+CONFIG_NETDEVICES=y
+CONFIG_GIANFAR=y
+CONFIG_MARVELL_PHY=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_I2C=y
CONFIG_I2C_MPC=y
-CONFIG_I2C_CHARDEV
+# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
-CONFIG_RTC_HCTOSYS=y
CONFIG_RTC_DRV_ISL1208=y
-
-CONFIG_NET=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_NETDEVICES=y
-CONFIG_MII=y
-CONFIG_GIANFAR=y
-CONFIG_MARVELL_PHY=y
+CONFIG_FSL_DMA=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index 008a7a47b89b..72b7ccfbe2c2 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -1,16 +1,13 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_SBC8548=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_PCI=y
CONFIG_NET=y
@@ -27,14 +24,19 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
+CONFIG_BROADCOM_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -44,33 +46,9 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLKDEVS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_NOSWAP=y
-CONFIG_MTD_CFI_GEOMETRY=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-CONFIG_MTD_CFI_I4=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_UTIL=y
-CONFIG_MTD_PHYSMAP_OF=y
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index 435fd408eef1..0ad7bd5ee6b6 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -1,20 +1,17 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_SOCRATES=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -28,13 +25,8 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -49,11 +41,8 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_MARVELL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
+CONFIG_MARVELL_PHY=y
CONFIG_INPUT_MOUSEDEV_SCREEN_X=800
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
CONFIG_INPUT_EVDEV=y
@@ -65,7 +54,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -82,8 +70,6 @@ CONFIG_FB_MB862XX=y
CONFIG_FB_MB862XX_LIME=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x16=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
@@ -95,15 +81,11 @@ CONFIG_RTC_CLASS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_FONTS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index 5d4db154bf59..f66d16ba8c58 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -1,9 +1,7 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODVERSIONS=y
@@ -12,7 +10,6 @@ CONFIG_STX_GP3=y
CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -28,7 +25,6 @@ CONFIG_IP_NF_FILTER=m
CONFIG_NET_PKTGEN=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
CONFIG_BLK_DEV_LOOP=m
@@ -42,12 +38,10 @@ CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_NETDEVICES=y
-CONFIG_MARVELL_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_GIANFAR=y
+CONFIG_MARVELL_PHY=y
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1280
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1024
CONFIG_INPUT_JOYDEV=m
@@ -63,8 +57,6 @@ CONFIG_SOUND=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=m
CONFIG_UDF_FS=m
@@ -73,16 +65,11 @@ CONFIG_VFAT_FS=m
CONFIG_TMPFS=y
CONFIG_CRAMFS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_SMB_FS=m
CONFIG_NLS=y
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=m
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BDI_SWITCH=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig
index 5a800e6e38e3..4daaf2943b44 100644
--- a/arch/powerpc/configs/85xx/tqm8540_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8540_defconfig
@@ -1,17 +1,15 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8540=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -25,9 +23,7 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -38,10 +34,8 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_E100=y
CONFIG_GIANFAR=y
+CONFIG_E100=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -58,15 +52,10 @@ CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig
index 2d936697d69e..bb402b3cf786 100644
--- a/arch/powerpc/configs/85xx/tqm8541_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8541_defconfig
@@ -1,17 +1,15 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8541=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -25,9 +23,7 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -38,10 +34,8 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_E100=y
CONFIG_GIANFAR=y
+CONFIG_E100=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -60,15 +54,10 @@ CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index ce8a67e89473..685d0fb132d6 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -1,20 +1,19 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8548=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
@@ -34,22 +33,16 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND_ECC_SMC=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_UPM=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -63,22 +56,15 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_SENSORS_LM75=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig
index a4e12971ccac..02a931d4e954 100644
--- a/arch/powerpc/configs/85xx/tqm8555_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8555_defconfig
@@ -1,17 +1,15 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8555=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -25,9 +23,7 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -38,10 +34,8 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_E100=y
CONFIG_GIANFAR=y
+CONFIG_E100=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -60,15 +54,10 @@ CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig
index 341abe18a74d..633d5b759a36 100644
--- a/arch/powerpc/configs/85xx/tqm8560_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8560_defconfig
@@ -1,17 +1,15 @@
CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8560=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -25,9 +23,7 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -38,10 +34,8 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_E100=y
CONFIG_GIANFAR=y
+CONFIG_E100=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -60,15 +54,10 @@ CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 72df8ab8449e..34f3ea1729e0 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -1,29 +1,25 @@
CONFIG_PPC_85xx=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_XES_MPC85xx=y
-CONFIG_GPIO_MPC8XXX=y
CONFIG_HIGHMEM=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
@@ -52,12 +48,9 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=y
-CONFIG_NET_IPGRE=y
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
@@ -67,8 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -79,7 +70,6 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_UPM=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
@@ -88,19 +78,15 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_PATA_ALI=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_E1000=y
CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
+CONFIG_E1000=y
+CONFIG_BROADCOM_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -109,7 +95,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -118,25 +103,19 @@ CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_PCA953X=y
CONFIG_SENSORS_DS1621=y
CONFIG_SENSORS_LM90=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
CONFIG_LEDS_PCA955X=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_GPIO=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
@@ -147,7 +126,6 @@ CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -159,17 +137,13 @@ CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_SUMMARY=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index 7cb9719abf3d..9792a2cb9b20 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -1,8 +1,8 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -10,7 +10,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -21,11 +20,9 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_PPC_86xx=y
CONFIG_GEF_PPC9A=y
CONFIG_HIGHMEM=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT=y
CONFIG_BINFMT_MISC=m
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
@@ -52,8 +49,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -69,9 +64,6 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -83,7 +75,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
@@ -93,35 +84,32 @@ CONFIG_BLK_DEV_SR=y
CONFIG_ATA=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
@@ -133,7 +121,6 @@ CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM92=y
CONFIG_WATCHDOG=y
CONFIG_GEF_WDT=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -148,10 +135,8 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
@@ -160,7 +145,6 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_INTF_PROC is not set
CONFIG_RTC_DRV_RX8581=y
CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
CONFIG_VME_BUS=y
CONFIG_VME_TSI148=y
CONFIG_EXT2_FS=y
@@ -169,7 +153,6 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -182,7 +165,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
@@ -230,7 +212,5 @@ CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=y
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index ecabf625d249..cadc36682bb4 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -1,8 +1,8 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -10,7 +10,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -21,11 +20,9 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_PPC_86xx=y
CONFIG_GEF_SBC310=y
CONFIG_HIGHMEM=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT=y
CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
@@ -52,8 +49,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -69,9 +64,6 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -83,7 +75,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
@@ -94,35 +85,32 @@ CONFIG_ATA=y
CONFIG_SATA_SIL24=y
# CONFIG_ATA_SFF is not set
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
@@ -134,7 +122,6 @@ CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM92=y
CONFIG_WATCHDOG=y
CONFIG_GEF_WDT=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -149,10 +136,8 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
@@ -166,7 +151,6 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -179,7 +163,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
@@ -227,7 +210,5 @@ CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=y
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 4a4a86fb0d3d..2aa7d9737e43 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -1,8 +1,8 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -10,7 +10,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -21,11 +20,9 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_PPC_86xx=y
CONFIG_GEF_SBC610=y
CONFIG_HIGHMEM=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT=y
CONFIG_BINFMT_MISC=m
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
@@ -46,8 +43,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -62,22 +57,17 @@ CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -85,7 +75,6 @@ CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
@@ -98,7 +87,6 @@ CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_WAN_ROUTER=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
@@ -122,9 +110,6 @@ CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -136,7 +121,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -144,37 +128,34 @@ CONFIG_BLK_DEV_SR=y
CONFIG_ATA=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
-CONFIG_PPPOATM=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
@@ -186,7 +167,6 @@ CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM92=y
CONFIG_WATCHDOG=y
CONFIG_GEF_WDT=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -201,10 +181,8 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
@@ -213,7 +191,6 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_INTF_PROC is not set
CONFIG_RTC_DRV_RX8581=y
CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
CONFIG_VME_BUS=y
CONFIG_VME_TSI148=y
CONFIG_EXT2_FS=y
@@ -222,14 +199,12 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
@@ -273,28 +248,20 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
index 9b192bb6bd3d..e32207de2b77 100644
--- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
@@ -1,27 +1,25 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_ELF_CORE is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_LDM_PARTITION=y
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_86xx=y
CONFIG_MPC8610_HPCD=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
-CONFIG_SPARSE_IRQ=y
CONFIG_FORCE_MAX_ZONEORDER=12
# CONFIG_SECCOMP is not set
CONFIG_PCI=y
@@ -42,14 +40,12 @@ CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
@@ -61,15 +57,13 @@ CONFIG_SATA_AHCI=y
CONFIG_PATA_ALI=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
CONFIG_NET_TULIP=y
CONFIG_ULI526X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
@@ -79,19 +73,14 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_MIXER_OSS=y
@@ -108,17 +97,14 @@ CONFIG_EXT3_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_LDM_PARTITION=y
CONFIG_NLS=y
CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
index 76f43df3dec7..a36e11ddaebd 100644
--- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
@@ -1,33 +1,31 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_86xx=y
CONFIG_MPC8641_HPCN=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_BINFMT_MISC=m
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -45,12 +43,9 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=y
-CONFIG_NET_IPGRE=y
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
@@ -58,28 +53,23 @@ CONFIG_ARPD=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_PATA_ALI=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
+CONFIG_VITESSE_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -99,7 +89,6 @@ CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_MIXER_OSS=y
@@ -120,7 +109,6 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -134,7 +122,6 @@ CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -158,18 +145,12 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig
index 99ea8746bbaf..db79bdee844b 100644
--- a/arch/powerpc/configs/86xx/sbc8641d_defconfig
+++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig
@@ -1,8 +1,8 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -10,7 +10,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -20,10 +19,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_86xx=y
CONFIG_SBC8641D=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_BINFMT_MISC=m
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
@@ -43,8 +40,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -59,22 +54,17 @@ CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -82,7 +72,6 @@ CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
@@ -95,7 +84,6 @@ CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_WAN_ROUTER=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
@@ -119,8 +107,6 @@ CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -145,28 +131,25 @@ CONFIG_DM_SNAPSHOT=y
CONFIG_DM_MIRROR=y
CONFIG_DM_ZERO=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
CONFIG_TUN=m
-CONFIG_BROADCOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
-# CONFIG_NETDEV_10000 is not set
+CONFIG_BROADCOM_PHY=y
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
-CONFIG_PPPOATM=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -180,7 +163,6 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
CONFIG_SOFT_WATCHDOG=m
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -190,20 +172,14 @@ CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_OCFS2_FS=m
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
@@ -245,29 +221,21 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 15b1ff5d96e7..d89ff40d39b7 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -1,21 +1,18 @@
CONFIG_PPC_8xx=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
# CONFIG_FUTEX is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_PPC_ADDER875=y
CONFIG_8xx_COPYBACK=y
CONFIG_HZ_1000=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -32,42 +29,30 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
# CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_DAVICOM_PHY=y
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_DNOTIFY is not set
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_CRC32 is not set
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_CRC32_SLICEBY4=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 8c66b13e59fc..84f1b4140579 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -1,8 +1,9 @@
CONFIG_ALTIVEC=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
@@ -12,14 +13,13 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_AMIGAONE=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
-# CONFIG_MIGRATION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,11 +37,9 @@ CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
# CONFIG_NETFILTER_XT_MATCH_STATE is not set
-# CONFIG_IP_NF_TARGET_ULOG is not set
# CONFIG_IP_NF_MANGLE is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_PC_FIFO=y
@@ -65,24 +63,19 @@ CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
# CONFIG_SCSI_SYM53C8XX_MMIO is not set
CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_VENDOR_3COM=y
CONFIG_VORTEX=y
-CONFIG_NET_PCI=y
CONFIG_8139CP=y
CONFIG_8139TOO=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_PHYLIB=y
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=y
@@ -96,7 +89,6 @@ CONFIG_FIRMWARE_EDID=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_RADEON=y
CONFIG_FB_3DFX=y
-CONFIG_DISPLAY_SUPPORT=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_HID_GYRATION=y
@@ -104,7 +96,6 @@ CONFIG_HID_NTRIG=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_TOPSEED=y
CONFIG_USB=y
@@ -117,27 +108,20 @@ CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_AMIGA_PARTITION=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=m
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index 8a08d6dcb0b4..91862292cd55 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -1,17 +1,23 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_EMBEDDED6xx=y
@@ -24,7 +30,6 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=m
CONFIG_HIGHMEM=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PM=y
CONFIG_PCI_MSI=y
CONFIG_HOTPLUG_PCI=y
@@ -43,8 +48,6 @@ CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -52,22 +55,17 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_RAW=m
@@ -81,7 +79,6 @@ CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
@@ -133,8 +130,6 @@ CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_IND=y
CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
@@ -149,9 +144,6 @@ CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=m
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -162,7 +154,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_ST=m
@@ -182,7 +173,6 @@ CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
CONFIG_AIC7XXX_RESET_DELAY_MS=15000
# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
-CONFIG_SCSI_AIC7XXX_OLD=m
CONFIG_SCSI_AIC79XX=m
CONFIG_AIC79XX_CMDS_PER_DEVICE=4
CONFIG_AIC79XX_RESET_DELAY_MS=15000
@@ -199,18 +189,14 @@ CONFIG_SCSI_IPS=m
CONFIG_SCSI_INITIO=m
CONFIG_SCSI_SYM53C8XX_2=m
CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_LPFC=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=m
CONFIG_TUN=m
-CONFIG_VITESSE_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MV643XX_ETH=y
-# CONFIG_NETDEV_10000 is not set
# CONFIG_ATM_DRIVERS is not set
-CONFIG_NETCONSOLE=m
+CONFIG_MV643XX_ETH=y
+CONFIG_VITESSE_PHY=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
@@ -218,10 +204,10 @@ CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=m
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_MPSC=y
CONFIG_SERIAL_MPSC_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_NVRAM=m
CONFIG_GEN_RTC=m
CONFIG_RAW_DRIVER=y
@@ -236,7 +222,7 @@ CONFIG_SENSORS_ADM1026=m
CONFIG_SENSORS_ADM1031=m
CONFIG_SENSORS_DS1621=m
CONFIG_SENSORS_GL518SM=m
-CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_MAX1619=m
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM77=m
CONFIG_SENSORS_LM78=m
@@ -245,23 +231,17 @@ CONFIG_SENSORS_LM83=m
CONFIG_SENSORS_LM85=m
CONFIG_SENSORS_LM87=m
CONFIG_SENSORS_LM90=m
-CONFIG_SENSORS_MAX1619=m
CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_SMSC47M1=m
-CONFIG_SENSORS_SMSC47B397=m
CONFIG_SENSORS_VIA686A=m
CONFIG_SENSORS_W83781D=m
CONFIG_SENSORS_W83L785TS=m
-CONFIG_SENSORS_W83627HF=m
CONFIG_WATCHDOG=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_PCIPCWATCHDOG=m
CONFIG_WDTPCI=m
CONFIG_USBPCWATCHDOG=m
# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB=m
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_EHCI_ROOT_HUB_TT=y
@@ -350,24 +330,12 @@ CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_CIFS=m
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
@@ -409,21 +377,16 @@ CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=m
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
+CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_HIGHMEM=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_SPINLOCK=y
CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_PPC_EARLY_DEBUG_BOOTX=y
-CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -431,16 +394,11 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 7a7b3c879f96..9788b3c2d563 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -3,9 +3,10 @@ CONFIG_TUNE_CELL=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_FHANDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
@@ -17,7 +18,7 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
@@ -34,8 +35,6 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
@@ -63,7 +62,6 @@ CONFIG_INET6_IPCOMP=m
# CONFIG_IPV6_SIT is not set
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
@@ -94,16 +92,12 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -112,7 +106,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
@@ -144,26 +137,24 @@ CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_MACVLAN=m
CONFIG_TUN=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
+CONFIG_TIGON3=y
CONFIG_E1000=m
CONFIG_SKGE=m
CONFIG_SKY2=m
-CONFIG_TIGON3=y
-CONFIG_SPIDER_NET=y
CONFIG_GELIC_NET=m
CONFIG_GELIC_WIRELESS=y
+CONFIG_SPIDER_NET=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO_I8042 is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_TXX9_NR_UARTS=2
CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HVC_RTAS=y
CONFIG_HVC_BEAT=y
CONFIG_IPMI_HANDLER=m
@@ -174,9 +165,7 @@ CONFIG_IPMI_POWEROFF=m
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_I2C=y
-# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_VGA_CONSOLE is not set
CONFIG_HID=m
# CONFIG_USB_HID is not set
@@ -199,7 +188,6 @@ CONFIG_EDAC_CELL=y
CONFIG_UIO=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
@@ -210,11 +198,8 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_ISO8859_3=m
@@ -227,11 +212,10 @@ CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_CRYPTO_ECB=m
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig
index acccbfde8a50..ff454dcd2dd3 100644
--- a/arch/powerpc/configs/celleb_defconfig
+++ b/arch/powerpc/configs/celleb_defconfig
@@ -3,9 +3,10 @@ CONFIG_TUNE_CELL=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_FHANDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
@@ -15,6 +16,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
@@ -23,8 +25,6 @@ CONFIG_SPU_FS=y
# CONFIG_CBE_THERM is not set
CONFIG_UDBG_RTAS_CONSOLE=y
# CONFIG_RTAS_PROC is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
CONFIG_NUMA=y
@@ -40,9 +40,7 @@ CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
@@ -55,7 +53,6 @@ CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
@@ -68,17 +65,15 @@ CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_SPIDER_NET=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO_I8042 is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_TXX9_NR_UARTS=3
CONFIG_SERIAL_TXX9_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HVC_RTAS=y
CONFIG_HVC_BEAT=y
# CONFIG_HW_RANDOM is not set
@@ -89,7 +84,6 @@ CONFIG_WATCHDOG=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
@@ -103,7 +97,6 @@ CONFIG_EXT2_FS_XIP=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
@@ -113,13 +106,10 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
CONFIG_NFSD_V3_ACL=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_ISO8859_3=m
@@ -132,11 +122,10 @@ CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_LIBCRC32C=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_CRYPTO_NULL=m
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index db5b30857e1c..253a9f200097 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -1,25 +1,24 @@
CONFIG_SMP=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
# CONFIG_PPC_PMAC is not set
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
CONFIG_IRQ_ALL_CPUS=y
-# CONFIG_MIGRATION is not set
CONFIG_ISA=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -37,11 +36,9 @@ CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
# CONFIG_NETFILTER_XT_MATCH_STATE is not set
-# CONFIG_IP_NF_TARGET_ULOG is not set
# CONFIG_IP_NF_MANGLE is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -60,31 +57,28 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
+CONFIG_PCNET32=y
CONFIG_NET_TULIP=y
CONFIG_DE4X5=y
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
+CONFIG_MV643XX_ETH=y
CONFIG_8139CP=y
CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set
CONFIG_VIA_RHINE=y
-CONFIG_MV643XX_ETH=y
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_BRIQ_PANEL=m
# CONFIG_HW_RANDOM is not set
CONFIG_NVRAM=y
CONFIG_GEN_RTC=y
@@ -101,14 +95,12 @@ CONFIG_FB_ATY=y
CONFIG_FB_ATY_CT=y
CONFIG_FB_ATY_GX=y
CONFIG_FB_3DFX=y
-CONFIG_DISPLAY_SUPPORT=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_HID_GYRATION=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -120,26 +112,19 @@ CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=m
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 611efe99faeb..51866f170684 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -1,13 +1,12 @@
CONFIG_PPC_85xx=y
CONFIG_SMP=y
CONFIG_NR_CPUS=8
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -57,7 +56,6 @@ CONFIG_NET_IPIP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -69,17 +67,15 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_PROC_DEVICETREE=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
@@ -87,7 +83,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_ATA=y
@@ -98,10 +93,11 @@ CONFIG_SATA_SIL=y
CONFIG_PATA_SIL680=y
CONFIG_NETDEVICES=y
CONFIG_FSL_PQ_MDIO=y
+CONFIG_FSL_XGMAC_MDIO=y
CONFIG_E1000=y
CONFIG_E1000E=y
-CONFIG_VITESSE_PHY=y
CONFIG_AT803X_PHY=y
+CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -123,7 +119,6 @@ CONFIG_SPI_GPIO=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -143,10 +138,10 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_UIO=y
-CONFIG_STAGING=y
-CONFIG_MEMORY=y
CONFIG_VIRT_DRIVERS=y
CONFIG_FSL_HV_MANAGER=y
+CONFIG_STAGING=y
+CONFIG_FSL_CORENET_CF=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -170,17 +165,15 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
CONFIG_RCU_TRACE=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_FSL_CAAM=y
-CONFIG_FSL_CORENET_CF=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index be24a18c0d96..d6c0c8198952 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -69,6 +69,7 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -79,7 +80,11 @@ CONFIG_SATA_FSL=y
CONFIG_SATA_SIL24=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_FSL_XGMAC_MDIO=y
CONFIG_E1000E=y
+CONFIG_VITESSE_PHY=y
+CONFIG_FIXED_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -118,7 +123,6 @@ CONFIG_FSL_DMA=y
CONFIG_VIRT_DRIVERS=y
CONFIG_FSL_HV_MANAGER=y
CONFIG_FSL_CORENET_CF=y
-CONFIG_MEMORY=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_ISO9660_FS=m
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index fceffb3cffbe..7c137041f1d6 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -5,13 +5,13 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_82xx=y
CONFIG_EP8248E=y
CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_PCI=y
CONFIG_NET=y
@@ -28,7 +28,6 @@ CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -38,14 +37,11 @@ CONFIG_MTD_CFI_GEOMETRY=y
# CONFIG_MTD_CFI_I1 is not set
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
+CONFIG_DAVICOM_PHY=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -57,27 +53,20 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-# CONFIG_CRC32 is not set
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BDI_SWITCH=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index b8a79d7ee89f..ee96be889dac 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -1,24 +1,20 @@
CONFIG_PPC_8xx=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
# CONFIG_FUTEX is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_PPC_EP88XC=y
CONFIG_8xx_COPYBACK=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
-CONFIG_8XX_MINIMAL_FPEMU=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -35,27 +31,21 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
# CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_LXT_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_LXT_PHY=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
@@ -63,13 +53,8 @@ CONFIG_GEN_RTC=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_CRC32 is not set
+CONFIG_CRC32_SLICEBY4=y
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 6fab06f7f411..1d9ad8500909 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -2,12 +2,14 @@ CONFIG_PPC64=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
@@ -16,17 +18,15 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_PMAC64=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
-# CONFIG_MIGRATION is not set
CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -52,9 +52,9 @@ CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@@ -69,7 +69,6 @@ CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_ATA=y
@@ -87,33 +86,29 @@ CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
-CONFIG_IEEE1394=y
-CONFIG_IEEE1394_OHCI1394=y
-CONFIG_IEEE1394_SBP2=m
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_RAWIO=y
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_DV1394=m
CONFIG_ADB_PMU=y
CONFIG_PMAC_SMU=y
CONFIG_MAC_EMUMOUSEBTN=y
-CONFIG_THERM_PM72=y
CONFIG_WINDFARM=y
CONFIG_WINDFARM_PM81=y
CONFIG_WINDFARM_PM91=y
CONFIG_WINDFARM_PM112=y
CONFIG_WINDFARM_PM121=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_SUNGEM=y
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_E1000=y
CONFIG_TIGON3=y
+CONFIG_E1000=y
+CONFIG_SUNGEM=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -123,12 +118,6 @@ CONFIG_USB_USBNET=m
# CONFIG_USB_NET_NET1080 is not set
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
-CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
@@ -140,10 +129,8 @@ CONFIG_INPUT_EVDEV=y
CONFIG_GEN_RTC=y
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
-# CONFIG_HWMON is not set
CONFIG_AGP=m
CONFIG_AGP_UNINORTH=m
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_TILEBLITTING=y
@@ -167,15 +154,14 @@ CONFIG_SND_AOA_ONYX=m
CONFIG_SND_AOA_TAS=m
CONFIG_SND_AOA_TOONIE=m
CONFIG_SND_USB_AUDIO=m
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
CONFIG_HID_GYRATION=y
CONFIG_LOGITECH_FF=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
@@ -245,8 +231,6 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -260,14 +244,12 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFSD=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_1250=y
CONFIG_NLS_CODEPAGE_1251=y
@@ -276,30 +258,21 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_PPC_EARLY_DEBUG_BOOTX=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index 9ef2cc13e1b4..6c6c60f1aba4 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -1,11 +1,9 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-gcn"
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
CONFIG_PERF_EVENTS=y
@@ -22,7 +20,6 @@ CONFIG_GAMECUBE=y
CONFIG_PREEMPT=y
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
-# CONFIG_MIGRATION is not set
# CONFIG_SECCOMP is not set
CONFIG_ADVANCED_OPTIONS=y
CONFIG_NET=y
@@ -42,15 +39,11 @@ CONFIG_IP_PNP_RARP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
@@ -61,8 +54,8 @@ CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_JOYSTICK=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-# CONFIG_DEVKMEM is not set
CONFIG_LEGACY_PTY_COUNT=64
+# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
@@ -86,7 +79,6 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_MSDOS_FS=y
@@ -95,20 +87,15 @@ CONFIG_PROC_KCORE=y
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SCHED_TRACER=y
CONFIG_DMA_API_DEBUG=y
CONFIG_PPC_EARLY_DEBUG=y
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index 94ebfee188db..5e0f2551e5c7 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -1,17 +1,16 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_EMBEDDED6xx=y
CONFIG_PPC_HOLLY=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
@@ -30,18 +29,15 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_VENDOR_3COM=y
CONFIG_VORTEX=y
CONFIG_TSI108_ETH=y
+CONFIG_PHYLIB=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -57,14 +53,11 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_XMON=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index b5e684640fdf..62ae92956d05 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -1,6 +1,7 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -13,10 +14,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_PMAC is not set
CONFIG_EMBEDDED6xx=y
CONFIG_LINKSTATION=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
-CONFIG_SPARSE_IRQ=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -43,12 +41,8 @@ CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -58,10 +52,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -72,27 +63,23 @@ CONFIG_MTD_CFI_GEOMETRY=y
# CONFIG_MTD_CFI_I2 is not set
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_ATA=y
CONFIG_PATA_IT821X=y
CONFIG_PATA_SIL680=y
CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_NET_TULIP=y
CONFIG_TULIP=y
CONFIG_TULIP_MMIO=y
CONFIG_R8169=y
-CONFIG_NETCONSOLE=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -106,7 +93,6 @@ CONFIG_HW_RANDOM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_VGA_CONSOLE is not set
CONFIG_HID=m
# CONFIG_USB_HID is not set
@@ -126,7 +112,6 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=m
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -137,7 +122,6 @@ CONFIG_NTFS_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
@@ -153,15 +137,9 @@ CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index fbd9e4163311..ac9666f8abf1 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -1,10 +1,11 @@
CONFIG_PPC64=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_COMPAT_BRK is not set
@@ -16,16 +17,15 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_MAPLE=y
CONFIG_UDBG_RTAS_CONSOLE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
-# CONFIG_MIGRATION is not set
CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -38,7 +38,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_IDE=y
@@ -52,11 +51,9 @@ CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_IPR=y
CONFIG_ATA=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_AMD8111_ETH=y
-CONFIG_E1000=y
CONFIG_TIGON3=y
+CONFIG_E1000=y
CONFIG_USB_PEGASUS=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600
@@ -72,13 +69,11 @@ CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
-# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
CONFIG_HID_GYRATION=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -111,7 +106,6 @@ CONFIG_EXT2_FS_XIP=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=y
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
@@ -119,28 +113,22 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_NLS_DEFAULT="utf-8"
CONFIG_NLS_UTF8=y
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LATENCYTOP=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_PPC_EARLY_DEBUG_BOOTX=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index 8fa84f156ef3..666922c5b572 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -1,7 +1,7 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -16,7 +16,6 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_82xx=y
CONFIG_MGCOGE=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
@@ -36,8 +35,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_GEOMETRY=y
@@ -45,7 +42,6 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
@@ -68,7 +64,6 @@ CONFIG_USB_GADGET=y
CONFIG_USB_FSL_USB2=y
CONFIG_USB_G_SERIAL=y
CONFIG_UIO=y
-CONFIG_UIO_PDRV=y
CONFIG_EXT2_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
@@ -77,16 +72,15 @@ CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_SQUASHFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
CONFIG_BDI_SWITCH=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index ee853a1b1b2c..59b85cb95259 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -50,7 +50,6 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_MPC5121_NFC=y
CONFIG_MTD_UBI=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -81,7 +80,6 @@ CONFIG_MDIO_BITBANG=y
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 69fd8adf9f5e..9fd041bfd778 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -1,6 +1,6 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
@@ -16,8 +16,6 @@ CONFIG_PPC_MPC5200_BUGFIX=y
CONFIG_PPC_MPC5200_LPBFIFO=m
# CONFIG_PPC_PMAC is not set
CONFIG_SIMPLE_GPIO=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -33,8 +31,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
@@ -42,7 +38,6 @@ CONFIG_MTD_ROM=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_PLATRAM=y
CONFIG_MTD_UBI=m
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -77,7 +72,6 @@ CONFIG_SENSORS_LM87=m
CONFIG_WATCHDOG=y
CONFIG_MFD_SM501=m
CONFIG_DRM=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_RADEON=y
@@ -94,9 +88,6 @@ CONFIG_SND=y
# CONFIG_SND_SPI is not set
# CONFIG_SND_USB is not set
CONFIG_SND_SOC=y
-CONFIG_SND_SOC_MPC5200_I2S=y
-CONFIG_SND_MPC52xx_SOC_PCM030=y
-CONFIG_SND_MPC52xx_SOC_EFIKA=y
CONFIG_HID_DRAGONRISE=y
CONFIG_HID_GYRATION=y
CONFIG_HID_TWINHAN=y
@@ -104,7 +95,6 @@ CONFIG_HID_ORTEK=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
CONFIG_HID_SMARTJOYPLUS=y
@@ -112,7 +102,6 @@ CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
@@ -135,13 +124,12 @@ CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=m
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig
index 75f0bbf0f6e8..e2647d5bb605 100644
--- a/arch/powerpc/configs/mpc7448_hpc2_defconfig
+++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig
@@ -1,19 +1,17 @@
CONFIG_ALTIVEC=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_EMBEDDED6xx=y
CONFIG_MPC7448HPC2=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -29,7 +27,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
@@ -37,13 +34,11 @@ CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_E100=y
CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set
CONFIG_TSI108_ETH=y
+CONFIG_PHYLIB=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -53,17 +48,12 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index 6a22400f73c1..825b052176af 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -1,17 +1,17 @@
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_82xx=y
CONFIG_MPC8272_ADS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -27,7 +27,6 @@ CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -39,52 +38,41 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_I4=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
CONFIG_TUN=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
CONFIG_FS_ENET_MDIO_FCC=y
+CONFIG_DAVICOM_PHY=y
CONFIG_PPP=y
+CONFIG_PPP_DEFLATE=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
-CONFIG_PPP_DEFLATE=y
CONFIG_INPUT_EVDEV=y
-# CONFIG_SERIO_I8042 is not set
# CONFIG_VT is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
# CONFIG_HWMON is not set
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BDI_SWITCH=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 23fec79964cf..671e220a9a98 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
@@ -45,14 +44,12 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
@@ -62,7 +59,6 @@ CONFIG_ATA=y
CONFIG_SATA_FSL=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
-CONFIG_MII=y
CONFIG_UCC_GETH=y
CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
@@ -82,7 +78,6 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -97,7 +92,6 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -119,6 +113,5 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 02395fab19bd..8535c343dd57 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -20,6 +20,7 @@ CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
+CONFIG_C293_PCIE=y
CONFIG_MPC8540_ADS=y
CONFIG_MPC8560_ADS=y
CONFIG_MPC85xx_CDS=y
@@ -27,7 +28,6 @@ CONFIG_MPC85xx_MDS=y
CONFIG_MPC8536_DS=y
CONFIG_MPC85xx_DS=y
CONFIG_MPC85xx_RDB=y
-CONFIG_C293_PCIE=y
CONFIG_P1010_RDB=y
CONFIG_P1022_DS=y
CONFIG_P1022_RDK=y
@@ -42,6 +42,7 @@ CONFIG_TQM8548=y
CONFIG_TQM8555=y
CONFIG_TQM8560=y
CONFIG_SBC8548=y
+CONFIG_MVME2500=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_HIGHMEM=y
@@ -49,6 +50,8 @@ CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_FORCE_MAX_ZONEORDER=12
CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
CONFIG_NET=y
@@ -70,7 +73,6 @@ CONFIG_NET_IPIP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
@@ -80,33 +82,21 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_MTD_CFI=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PLATRAM=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_CFI_UTIL=y
-CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MTD_UBI_BEB_RESERVE=1
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
@@ -117,7 +107,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
@@ -133,11 +122,12 @@ CONFIG_GIANFAR=y
CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IGB=y
+CONFIG_AT803X_PHY=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
-CONFIG_AT803X_PHY=y
+CONFIG_BROADCOM_PHY=y
CONFIG_FIXED_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
@@ -146,8 +136,8 @@ CONFIG_INPUT_FF_MEMLESS=m
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_NR_UARTS=6
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -160,15 +150,12 @@ CONFIG_SPI=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
CONFIG_GPIO_MPC8XXX=y
-# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_HWMON=m
+CONFIG_SENSORS_LM90=m
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_SUPPORT_OLD_API is not set
@@ -192,7 +179,6 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -208,6 +194,7 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_MPC85XX=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
@@ -215,8 +202,6 @@ CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
-CONFIG_MEMORY=y
-# CONFIG_NET_DMA is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -227,21 +212,9 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-CONFIG_UBIFS_FS=y
-CONFIG_UBIFS_FS_XATTR=y
-CONFIG_UBIFS_FS_LZO=y
-CONFIG_UBIFS_FS_ZLIB=y
CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m
@@ -249,6 +222,9 @@ CONFIG_HFSPLUS_FS=m
CONFIG_BEFS_FS=m
CONFIG_BFS_FS=m
CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_UBIFS_FS=y
CONFIG_CRAMFS=y
CONFIG_VXFS_FS=m
CONFIG_HPFS_FS=m
@@ -259,16 +235,16 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_T10DIF=y
-CONFIG_CRC16=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_LZO=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index b5d1b82a1b43..c45ad2e01b0c 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -2,14 +2,13 @@ CONFIG_PPC_85xx=y
CONFIG_PHYS_64BIT=y
CONFIG_SMP=y
CONFIG_NR_CPUS=8
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -23,6 +22,7 @@ CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
+CONFIG_C293_PCIE=y
CONFIG_MPC8540_ADS=y
CONFIG_MPC8560_ADS=y
CONFIG_MPC85xx_CDS=y
@@ -30,7 +30,6 @@ CONFIG_MPC85xx_MDS=y
CONFIG_MPC8536_DS=y
CONFIG_MPC85xx_DS=y
CONFIG_MPC85xx_RDB=y
-CONFIG_C293_PCIE=y
CONFIG_P1010_RDB=y
CONFIG_P1022_DS=y
CONFIG_P1022_RDK=y
@@ -73,7 +72,6 @@ CONFIG_NET_IPIP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
@@ -83,33 +81,18 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_MTD_CFI=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_CFI_UTIL=y
-CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MTD_UBI_BEB_RESERVE=1
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
@@ -120,7 +103,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
@@ -133,11 +115,11 @@ CONFIG_FS_ENET=y
CONFIG_UCC_GETH=y
CONFIG_GIANFAR=y
CONFIG_E1000E=y
+CONFIG_AT803X_PHY=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
-CONFIG_AT803X_PHY=y
CONFIG_FIXED_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
@@ -162,14 +144,10 @@ CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
CONFIG_GPIO_MPC8XXX=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_SUPPORT_OLD_API is not set
@@ -193,7 +171,6 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -216,8 +193,6 @@ CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
-CONFIG_MEMORY=y
-# CONFIG_NET_DMA is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -228,21 +203,9 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-CONFIG_UBIFS_FS=y
-CONFIG_UBIFS_FS_XATTR=y
-CONFIG_UBIFS_FS_LZO=y
-CONFIG_UBIFS_FS_ZLIB=y
CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m
@@ -250,6 +213,9 @@ CONFIG_HFSPLUS_FS=m
CONFIG_BEFS_FS=m
CONFIG_BFS_FS=m
CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_UBIFS_FS=y
CONFIG_CRAMFS=y
CONFIG_VXFS_FS=m
CONFIG_HPFS_FS=m
@@ -260,20 +226,19 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_T10DIF=y
-CONFIG_CRC16=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_LZO=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_FSL_CAAM=y
CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index d954e80c286a..321412c5dae4 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -1,25 +1,21 @@
CONFIG_PPC_8xx=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_BUG is not set
# CONFIG_BASE_FULL is not set
# CONFIG_EPOLL is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_MPC86XADS=y
CONFIG_8xx_COPYBACK=y
CONFIG_8xx_CPU6=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -32,27 +28,21 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_NETDEVICES=y
-CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
+CONFIG_FIXED_PHY=y
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_GEN_RTC=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_CCITT=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC32_SLICEBY4=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig
index fc58aa8a89e4..a4572563681c 100644
--- a/arch/powerpc/configs/mpc86xx_defconfig
+++ b/arch/powerpc/configs/mpc86xx_defconfig
@@ -1,23 +1,24 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_86xx=y
@@ -26,11 +27,8 @@ CONFIG_SBC8641D=y
CONFIG_MPC8610_HPCD=y
CONFIG_GEF_SBC610=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_BINFMT_MISC=m
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -48,12 +46,9 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=y
-CONFIG_NET_IPGRE=y
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
@@ -61,28 +56,23 @@ CONFIG_ARPD=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_PATA_ALI=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_GIANFAR=y
+CONFIG_VITESSE_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -102,7 +92,6 @@ CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_MIXER_OSS=y
@@ -123,7 +112,6 @@ CONFIG_HID_MONTEREY=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -137,7 +125,6 @@ CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -145,9 +132,6 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_ADFS_FS=m
@@ -164,18 +148,15 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 3f47d00a10c0..2a10f98d4ee5 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -1,23 +1,19 @@
CONFIG_PPC_8xx=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
# CONFIG_FUTEX is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_8xx_COPYBACK=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
-CONFIG_8XX_MINIMAL_FPEMU=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -34,7 +30,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -46,22 +41,17 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_I4=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
# CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_DAVICOM_PHY=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
@@ -69,13 +59,8 @@ CONFIG_GEN_RTC=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_CRC32 is not set
+CONFIG_CRC32_SLICEBY4=y
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
index 93c7752e2dbb..525a2cb500a7 100644
--- a/arch/powerpc/configs/mvme5100_defconfig
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -61,7 +61,6 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_LAPB=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
@@ -88,13 +87,10 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_VME_BUS=m
-CONFIG_VME_CA91CX42=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -122,11 +118,6 @@ CONFIG_NLS_UTF8=m
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC=y
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 3e72c8c06a0d..8f94782eb907 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -54,7 +54,6 @@ CONFIG_MTD_SLRAM=y
CONFIG_MTD_PHRAM=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_PASEMI=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
@@ -69,7 +68,6 @@ CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_ATA=y
@@ -134,7 +132,6 @@ CONFIG_HID_NTRIG=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
CONFIG_HID_SMARTJOYPLUS=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 0351b5ffdfef..ea8705ffcd76 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -1,19 +1,20 @@
CONFIG_ALTIVEC=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -21,13 +22,9 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_PMAC=y
CONFIG_PPC601_SYNC_FIX=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
-# CONFIG_MIGRATION is not set
-CONFIG_PM=y
-CONFIG_PM_DEBUG=y
CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
CONFIG_APM_EMULATION=y
CONFIG_PCCARD=m
CONFIG_YENTA=m
@@ -46,7 +43,6 @@ CONFIG_INET_ESP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_IRC=m
@@ -84,18 +80,11 @@ CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -112,8 +101,6 @@ CONFIG_IRDA_CACHE_LAST_LSAP=y
CONFIG_IRDA_FAST_RR=y
CONFIG_IRTTY_SIR=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
@@ -128,10 +115,8 @@ CONFIG_MAC80211_LEDS=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_CONNECTOR=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_MAC_FLOPPY=m
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_UB=m
CONFIG_BLK_DEV_RAM=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=m
@@ -152,7 +137,6 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-CONFIG_SCSI_AIC7XXX_OLD=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_MESH=y
@@ -170,12 +154,6 @@ CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
-CONFIG_IEEE1394=m
-CONFIG_IEEE1394_OHCI1394=m
-CONFIG_IEEE1394_SBP2=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_DV1394=m
CONFIG_ADB=y
CONFIG_ADB_CUDA=y
CONFIG_ADB_PMU=y
@@ -193,27 +171,22 @@ CONFIG_PMAC_RACKMETER=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
+CONFIG_PCNET32=y
CONFIG_MACE=y
CONFIG_BMAC=y
CONFIG_SUNGEM=y
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_PRISM54=m
-CONFIG_B43=m
-CONFIG_B43LEGACY=m
-CONFIG_HERMES=m
-CONFIG_APPLE_AIRPORT=m
-CONFIG_PCMCIA_HERMES=m
-CONFIG_P54_COMMON=m
-CONFIG_USB_USBNET=m
-# CONFIG_USB_NET_CDC_SUBSET is not set
CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=m
+CONFIG_USB_USBNET=m
+# CONFIG_USB_NET_CDC_SUBSET is not set
+CONFIG_PRISM54=m
+CONFIG_B43=m
+CONFIG_B43LEGACY=m
+CONFIG_P54_COMMON=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
@@ -226,7 +199,6 @@ CONFIG_SERIAL_PMACZILOG_TTYS=y
CONFIG_NVRAM=y
CONFIG_GEN_RTC=y
CONFIG_I2C_CHARDEV=m
-CONFIG_POWER_SUPPLY=y
CONFIG_APM_POWER=y
CONFIG_BATTERY_PMU=y
CONFIG_HWMON=m
@@ -253,7 +225,6 @@ CONFIG_FB_ATY=y
CONFIG_FB_ATY_CT=y
CONFIG_FB_ATY_GX=y
CONFIG_FB_3DFX=y
-CONFIG_DISPLAY_SUPPORT=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -314,7 +285,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT4_FS=y
-CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
@@ -328,34 +298,27 @@ CONFIG_TMPFS=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_PPC_EARLY_DEBUG_BOOTX=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index 52908c7897d9..a43bf6ea10fb 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -1,13 +1,10 @@
CONFIG_40x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -18,7 +15,6 @@ CONFIG_HOTFOOT=y
CONFIG_KILAUEA=y
CONFIG_MAKALU=y
CONFIG_XILINX_VIRTEX_GENERIC_BOARD=y
-CONFIG_SPARSE_IRQ=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -34,8 +30,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -43,13 +37,10 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_XILINX_SYSACE=m
CONFIG_NETDEVICES=y
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
CONFIG_SERIO=m
@@ -73,13 +64,11 @@ CONFIG_I2C_IBM_IIC=m
CONFIG_GPIO_XILINX=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=m
CONFIG_FB_XILINX=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=m
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
@@ -87,16 +76,12 @@ CONFIG_JFFS2_FS=m
CONFIG_UBIFS_FS=m
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 924e10df1844..bbc7f76d52c8 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -1,13 +1,10 @@
CONFIG_44x=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -28,7 +25,6 @@ CONFIG_YOSEMITE=y
CONFIG_XILINX_VIRTEX440_GENERIC_BOARD=y
CONFIG_PPC4xx_GPIO=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -44,8 +40,6 @@ CONFIG_BRIDGE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
@@ -55,7 +49,6 @@ CONFIG_MTD_NAND=m
CONFIG_MTD_NAND_NDFC=m
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_XILINX_SYSACE=m
@@ -64,8 +57,6 @@ CONFIG_BLK_DEV_SD=m
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_ETHERNET=y
-CONFIG_NET_VENDOR_IBM=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
CONFIG_SERIO=m
@@ -105,23 +96,18 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=m
-CONFIG_UBIFS_FS_XATTR=y
CONFIG_LOGFS=m
CONFIG_CRAMFS=y
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZO=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
CONFIG_CRC_T10DIF=m
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 5830d735c5c3..3315c9f0828a 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1,6 +1,4 @@
CONFIG_PPC64=y
-CONFIG_ALTIVEC=y
-CONFIG_VSX=y
CONFIG_SMP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -42,10 +40,8 @@ CONFIG_PPC_CELLEB=y
CONFIG_PPC_CELL_QPACE=y
CONFIG_RTAS_FLASH=m
CONFIG_IBMEBUS=y
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_PMAC64=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
@@ -53,6 +49,7 @@ CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
CONFIG_SCHED_SMT=y
CONFIG_PCCARD=y
CONFIG_ELECTRA_CF=y
@@ -82,7 +79,6 @@ CONFIG_BPF_JIT=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
@@ -101,7 +97,6 @@ CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_CXGB3_ISCSI=m
@@ -122,6 +117,7 @@ CONFIG_SCSI_DH=m
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
CONFIG_SATA_SIL24=y
CONFIG_SATA_MV=y
CONFIG_SATA_SVW=y
@@ -145,7 +141,6 @@ CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_UEVENT=y
CONFIG_ADB_PMU=y
CONFIG_PMAC_SMU=y
-CONFIG_THERM_PM72=y
CONFIG_WINDFARM=y
CONFIG_WINDFARM_PM81=y
CONFIG_WINDFARM_PM91=y
@@ -209,7 +204,6 @@ CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
CONFIG_I2C_PASEMI=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_OF=y
@@ -296,6 +290,7 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
@@ -326,7 +321,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
@@ -362,4 +356,4 @@ CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
-CONFIG_KVM_BOOK3S_64_HV=y
+CONFIG_KVM_BOOK3S_64_HV=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 67885b2d70aa..ddf9773458cf 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -22,7 +22,6 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_CORENET_GENERIC=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -55,7 +54,6 @@ CONFIG_BRIDGE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
@@ -70,7 +68,6 @@ CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SRP_ATTRS=y
@@ -131,7 +128,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_OF=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index ad6d6b5af7d7..e5d2c3dc07f1 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -1,26 +1,22 @@
CONFIG_FSL_EMB_PERFMON=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
CONFIG_USER_NS=y
-CONFIG_PID_NS=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
@@ -29,10 +25,19 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_EFIKA=y
CONFIG_PPC_MPC5200_BUGFIX=y
-CONFIG_PPC_MPC5200_GPIO=y
CONFIG_PPC_82xx=y
CONFIG_MPC8272_ADS=y
CONFIG_PQ2FADS=y
@@ -56,7 +61,6 @@ CONFIG_SBC8641D=y
CONFIG_MPC8610_HPCD=y
CONFIG_GEF_SBC610=y
CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_DEBUG=y
CONFIG_CPU_FREQ_STAT=m
CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
@@ -69,19 +73,13 @@ CONFIG_TAU=y
CONFIG_TAU_AVERAGE=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
-CONFIG_PPC_BESTCOMM=y
-CONFIG_GPIO_MPC8XXX=y
CONFIG_MCU_MPC8349EMITX=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BINFMT_MISC=y
-# CONFIG_MIGRATION is not set
-CONFIG_PM=y
-CONFIG_PM_DEBUG=y
CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
CONFIG_ISA=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y
@@ -106,8 +104,6 @@ CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -128,7 +124,6 @@ CONFIG_TCP_CONG_VENO=m
CONFIG_TCP_CONG_YEAH=m
CONFIG_TCP_CONG_ILLINOIS=m
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -144,7 +139,6 @@ CONFIG_IPV6_MROUTE=y
CONFIG_IPV6_PIMSM_V2=y
CONFIG_NETLABEL=y
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -159,7 +153,6 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
@@ -204,21 +197,12 @@ CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -229,7 +213,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -240,7 +223,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -266,7 +248,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
CONFIG_NET_DCCPPROBE=m
@@ -284,8 +265,6 @@ CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
-CONFIG_WAN_ROUTER=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
@@ -349,8 +328,6 @@ CONFIG_VLSI_FIR=m
CONFIG_VIA_FIR=m
CONFIG_MCS_FIR=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
@@ -380,7 +357,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_DEBUG_DEVRES=y
CONFIG_CONNECTOR=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
CONFIG_PARPORT_SERIAL=m
@@ -396,7 +372,6 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_CDROM_PKTCDVD=m
CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_HD=y
-CONFIG_MISC_DEVICES=y
CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SENSORS_TSL2550=m
CONFIG_EEPROM_AT24=m
@@ -419,7 +394,6 @@ CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
@@ -436,8 +410,8 @@ CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
CONFIG_SATA_FSL=m
CONFIG_PDC_ADMA=m
-CONFIG_PATA_MPC52xx=m
CONFIG_ATA_PIIX=m
+CONFIG_PATA_MPC52xx=m
CONFIG_PATA_OPTIDMA=m
CONFIG_PATA_SCH=m
CONFIG_PATA_VIA=m
@@ -479,37 +453,44 @@ CONFIG_THERM_WINDTUNNEL=m
CONFIG_THERM_ADT746X=m
CONFIG_WINDFARM=y
CONFIG_PMAC_RACKMETER=m
+CONFIG_SENSORS_AMS=m
CONFIG_NETDEVICES=y
-CONFIG_IFB=m
-CONFIG_DUMMY=m
CONFIG_BONDING=m
-CONFIG_MACVLAN=m
+CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
+CONFIG_NET_FC=y
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_NETCONSOLE=m
CONFIG_TUN=m
CONFIG_VETH=m
-CONFIG_NET_SB1000=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_BROADCOM_PHY=m
-CONFIG_ICPLUS_PHY=m
-CONFIG_REALTEK_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MACE=m
-CONFIG_BMAC=m
-CONFIG_HAPPYMEAL=m
-CONFIG_SUNGEM=m
-CONFIG_CASSINI=m
-CONFIG_NET_VENDOR_3COM=y
+CONFIG_VIRTIO_NET=m
+CONFIG_ATM_TCP=m
+CONFIG_ATM_LANAI=m
+CONFIG_ATM_ENI=m
+CONFIG_ATM_NICSTAR=m
+CONFIG_ATM_IDT77252=m
+CONFIG_ATM_HE=m
CONFIG_EL3=m
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
CONFIG_VORTEX=m
CONFIG_TYPHOON=m
-CONFIG_NET_VENDOR_SMC=y
-CONFIG_ULTRA=m
+CONFIG_ADAPTEC_STARFIRE=m
+CONFIG_ACENIC=m
+CONFIG_AMD8111_ETH=m
+CONFIG_PCNET32=m
+CONFIG_PCMCIA_NMCLAN=m
+CONFIG_MACE=m
+CONFIG_BMAC=m
+CONFIG_ATL1=m
+CONFIG_B44=m
+CONFIG_BNX2=m
+CONFIG_TIGON3=m
+CONFIG_BNX2X=m
+CONFIG_CHELSIO_T1=m
+CONFIG_CHELSIO_T1_1G=y
+CONFIG_CHELSIO_T3=m
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
@@ -519,69 +500,84 @@ CONFIG_WINBOND_840=m
CONFIG_DM9102=m
CONFIG_ULI526X=m
CONFIG_PCMCIA_XIRCOM=m
-CONFIG_NET_ISA=y
-CONFIG_EWRK3=m
-CONFIG_NE2000=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
-CONFIG_AMD8111_ETH=m
-CONFIG_ADAPTEC_STARFIRE=m
-CONFIG_B44=m
-CONFIG_FORCEDETH=m
+CONFIG_DL2K=m
+CONFIG_SUNDANCE=m
+CONFIG_S2IO=m
+CONFIG_FEC_MPC52xx=m
+CONFIG_GIANFAR=m
+CONFIG_PCMCIA_FMVJ18X=m
CONFIG_E100=m
+CONFIG_E1000=m
+CONFIG_E1000E=m
+CONFIG_IGB=m
+CONFIG_IXGB=m
+CONFIG_IXGBE=m
+CONFIG_IP1000=m
+CONFIG_MV643XX_ETH=m
+CONFIG_SKGE=m
+CONFIG_SKY2=m
+CONFIG_MYRI10GE=m
CONFIG_FEALNX=m
CONFIG_NATSEMI=m
+CONFIG_NS83820=m
+CONFIG_PCMCIA_AXNET=m
+CONFIG_NE2000=m
CONFIG_NE2K_PCI=m
+CONFIG_PCMCIA_PCNET=m
+CONFIG_ULTRA=m
+CONFIG_FORCEDETH=m
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
+CONFIG_QLA3XXX=m
+CONFIG_QLGE=m
+CONFIG_NETXEN_NIC=m
CONFIG_8139CP=m
CONFIG_8139TOO=m
# CONFIG_8139TOO_PIO is not set
CONFIG_8139TOO_8129=y
+CONFIG_R8169=m
CONFIG_R6040=m
+CONFIG_SC92031=m
CONFIG_SIS900=m
+CONFIG_SIS190=m
+CONFIG_SFC=m
+CONFIG_PCMCIA_SMC91C92=m
CONFIG_EPIC100=m
-CONFIG_SUNDANCE=m
+CONFIG_HAPPYMEAL=m
+CONFIG_SUNGEM=m
+CONFIG_CASSINI=m
+CONFIG_NIU=m
+CONFIG_TEHUTI=m
CONFIG_TLAN=m
CONFIG_VIA_RHINE=m
CONFIG_VIA_RHINE_MMIO=y
-CONFIG_SC92031=m
-CONFIG_NET_POCKET=y
-CONFIG_DE600=m
-CONFIG_DE620=m
-CONFIG_FEC_MPC52xx=m
-CONFIG_ACENIC=m
-CONFIG_DL2K=m
-CONFIG_E1000=m
-CONFIG_E1000E=m
-CONFIG_IP1000=m
-CONFIG_IGB=m
-CONFIG_NS83820=m
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
-CONFIG_R8169=m
-CONFIG_R8169_VLAN=y
-CONFIG_SIS190=m
-CONFIG_SKGE=m
-CONFIG_SKY2=m
CONFIG_VIA_VELOCITY=m
-CONFIG_TIGON3=m
-CONFIG_BNX2=m
-CONFIG_GIANFAR=m
-CONFIG_MV643XX_ETH=m
-CONFIG_QLA3XXX=m
-CONFIG_ATL1=m
-CONFIG_CHELSIO_T1=m
-CONFIG_CHELSIO_T1_1G=y
-CONFIG_CHELSIO_T3=m
-CONFIG_IXGBE=m
-CONFIG_IXGB=m
-CONFIG_S2IO=m
-CONFIG_MYRI10GE=m
-CONFIG_NETXEN_NIC=m
-CONFIG_NIU=m
-CONFIG_TEHUTI=m
-CONFIG_BNX2X=m
-CONFIG_QLGE=m
-CONFIG_SFC=m
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_FDDI=y
+CONFIG_SKFP=m
+CONFIG_NET_SB1000=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_PLIP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -597,39 +593,6 @@ CONFIG_USB_ALI_M5632=y
CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_3C589=m
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_FMVJ18X=m
-CONFIG_PCMCIA_PCNET=m
-CONFIG_PCMCIA_NMCLAN=m
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_PCMCIA_XIRC2PS=m
-CONFIG_PCMCIA_AXNET=m
-CONFIG_ATM_TCP=m
-CONFIG_ATM_LANAI=m
-CONFIG_ATM_ENI=m
-CONFIG_ATM_NICSTAR=m
-CONFIG_ATM_IDT77252=m
-CONFIG_ATM_HE=m
-CONFIG_FDDI=y
-CONFIG_SKFP=m
-CONFIG_PLIP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPPOATM=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_NET_FC=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_VIRTIO_NET=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
@@ -670,10 +633,8 @@ CONFIG_TABLET_USB_ACECAD=m
CONFIG_TABLET_USB_AIPTEK=m
CONFIG_TABLET_USB_GTCO=m
CONFIG_TABLET_USB_KBTAB=m
-CONFIG_TABLET_USB_WACOM=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=m
-CONFIG_INPUT_ATI_REMOTE=m
CONFIG_INPUT_ATI_REMOTE2=m
CONFIG_INPUT_KEYSPAN_REMOTE=m
CONFIG_INPUT_POWERMATE=m
@@ -685,16 +646,16 @@ CONFIG_GAMEPORT_NS558=m
CONFIG_GAMEPORT_L4=m
CONFIG_GAMEPORT_EMU10K1=m
CONFIG_GAMEPORT_FM801=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_ROCKETPORT=m
CONFIG_CYCLADES=m
CONFIG_SYNCLINK=m
CONFIG_SYNCLINKMP=m
CONFIG_SYNCLINK_GT=m
-CONFIG_N_HDLC=m
CONFIG_NOZOMI=m
+CONFIG_N_HDLC=m
+# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_CS=m
@@ -711,8 +672,6 @@ CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
CONFIG_SERIAL_JSM=m
CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_BRIQ_PANEL=m
CONFIG_PRINTER=m
CONFIG_LP_CONSOLE=y
CONFIG_PPDEV=m
@@ -735,6 +694,7 @@ CONFIG_I2C_TINY_USB=m
CONFIG_I2C_PCA_ISA=m
CONFIG_I2C_STUB=m
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_MPC8XXX=y
CONFIG_W1=m
CONFIG_W1_MASTER_DS2490=m
CONFIG_W1_MASTER_DS2482=m
@@ -754,15 +714,13 @@ CONFIG_SENSORS_ADM1029=m
CONFIG_SENSORS_ADM1031=m
CONFIG_SENSORS_ADM9240=m
CONFIG_SENSORS_ADT7470=m
-CONFIG_SENSORS_AMS=m
CONFIG_SENSORS_ATXP1=m
CONFIG_SENSORS_DS1621=m
-CONFIG_SENSORS_F71805F=m
-CONFIG_SENSORS_F71882FG=m
CONFIG_SENSORS_F75375S=m
CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_GL520SM=m
-CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_MAX1619=m
+CONFIG_SENSORS_MAX6650=m
CONFIG_SENSORS_LM63=m
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM77=m
@@ -774,20 +732,12 @@ CONFIG_SENSORS_LM87=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_LM92=m
CONFIG_SENSORS_LM93=m
-CONFIG_SENSORS_MAX1619=m
-CONFIG_SENSORS_MAX6650=m
-CONFIG_SENSORS_PC87360=m
-CONFIG_SENSORS_PC87427=m
CONFIG_SENSORS_PCF8591=m
CONFIG_SENSORS_SIS5595=m
-CONFIG_SENSORS_DME1737=m
-CONFIG_SENSORS_SMSC47M1=m
CONFIG_SENSORS_SMSC47M192=m
-CONFIG_SENSORS_SMSC47B397=m
CONFIG_SENSORS_ADS7828=m
CONFIG_SENSORS_THMC50=m
CONFIG_SENSORS_VIA686A=m
-CONFIG_SENSORS_VT1211=m
CONFIG_SENSORS_VT8231=m
CONFIG_SENSORS_W83781D=m
CONFIG_SENSORS_W83791D=m
@@ -795,8 +745,6 @@ CONFIG_SENSORS_W83792D=m
CONFIG_SENSORS_W83793=m
CONFIG_SENSORS_W83L785TS=m
CONFIG_SENSORS_W83L786NG=m
-CONFIG_SENSORS_W83627HF=m
-CONFIG_SENSORS_W83627EHF=m
CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SOFT_WATCHDOG=m
@@ -815,7 +763,6 @@ CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_DRM_VIA=m
CONFIG_DRM_SAVAGE=m
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_CIRRUS=m
CONFIG_FB_OF=y
@@ -850,10 +797,8 @@ CONFIG_FB_TRIDENT=m
CONFIG_FB_SM501=m
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_PLATFORM=m
-CONFIG_DISPLAY_SUPPORT=m
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -897,7 +842,6 @@ CONFIG_SND_CMIPCI=m
CONFIG_SND_OXYGEN=m
CONFIG_SND_CS4281=m
CONFIG_SND_CS46XX=m
-CONFIG_SND_CS5530=m
CONFIG_SND_DARLA20=m
CONFIG_SND_GINA20=m
CONFIG_SND_LAYLA20=m
@@ -919,7 +863,6 @@ CONFIG_SND_ES1968=m
CONFIG_SND_FM801=m
CONFIG_SND_HDSP=m
CONFIG_SND_HDSPM=m
-CONFIG_SND_HIFIER=m
CONFIG_SND_ICE1712=m
CONFIG_SND_ICE1724=m
CONFIG_SND_KORG1212=m
@@ -949,8 +892,6 @@ CONFIG_SND_USB_CAIAQ=m
CONFIG_SND_USB_CAIAQ_INPUT=y
# CONFIG_SND_PCMCIA is not set
CONFIG_HIDRAW=y
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
CONFIG_HID_GYRATION=y
CONFIG_LOGITECH_FF=y
CONFIG_LOGIRUMBLEPAD2_FF=y
@@ -960,12 +901,12 @@ CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_EHCI_FSL=y
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
@@ -1001,7 +942,6 @@ CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
CONFIG_USB_SERIAL_CYPRESS_M8=m
CONFIG_USB_SERIAL_EMPEG=m
CONFIG_USB_SERIAL_FTDI_SIO=m
-CONFIG_USB_SERIAL_FUNSOFT=m
CONFIG_USB_SERIAL_VISOR=m
CONFIG_USB_SERIAL_IPAQ=m
CONFIG_USB_SERIAL_IR=m
@@ -1016,12 +956,10 @@ CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_SERIAL_MOS7720=m
CONFIG_USB_SERIAL_MOS7840=m
-CONFIG_USB_SERIAL_MOTOROLA=m
CONFIG_USB_SERIAL_NAVMAN=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_OTI6858=m
CONFIG_USB_SERIAL_SPCP8X5=m
-CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
@@ -1083,12 +1021,14 @@ CONFIG_RTC_DRV_M48T35=m
CONFIG_RTC_DRV_M48T59=m
CONFIG_RTC_DRV_V3020=m
CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_AUXDISPLAY=y
CONFIG_KS0108=m
CONFIG_UIO=m
CONFIG_UIO_CIF=m
-CONFIG_UIO_PDRV=m
CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT2_FS=m
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -1111,12 +1051,7 @@ CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_GFS2_FS=m
-CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_QUOTA_NETLINK_INTERFACE=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_QFMT_V2=y
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
@@ -1145,20 +1080,17 @@ CONFIG_ROMFS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_CIFS_EXPERIMENTAL=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_NCPFS_IOCTL_LOCKING=y
@@ -1170,17 +1102,6 @@ CONFIG_NCPFS_NLS=y
CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
CONFIG_9P_FS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
@@ -1219,29 +1140,27 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-CONFIG_DLM=m
-CONFIG_DLM_DEBUG=y
+CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
-CONFIG_MAGIC_SYSRQ=y
CONFIG_UNUSED_SYMBOLS=y
CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_HIGHMEM=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-CONFIG_DEBUG_HIGHMEM=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_VM=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
CONFIG_FAIL_PAGE_ALLOC=y
@@ -1250,17 +1169,12 @@ CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
CONFIG_XMON=y
CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_PPC_EARLY_DEBUG_BOOTX=y
-CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -1268,9 +1182,7 @@ CONFIG_SECURITY_NETWORK_XFRM=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
@@ -1278,14 +1190,12 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -1306,5 +1216,3 @@ CONFIG_CRYPTO_DEV_HIFN_795X=m
CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y
CONFIG_CRYPTO_DEV_TALITOS=m
CONFIG_VIRTUALIZATION=y
-CONFIG_VIRTIO_PCI=m
-CONFIG_VIRTIO_BALLOON=m
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index baad8db21b61..3e336ee8bb4a 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -1,20 +1,19 @@
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_82xx=y
CONFIG_PQ2FADS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
-# CONFIG_8260_PCI9 is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -29,7 +28,6 @@ CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -41,55 +39,43 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_I4=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
CONFIG_FS_ENET_MDIO_FCC=y
+CONFIG_DAVICOM_PHY=y
CONFIG_PPP=y
+CONFIG_PPP_DEFLATE=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
-CONFIG_PPP_DEFLATE=y
CONFIG_INPUT_EVDEV=y
-# CONFIG_SERIO_I8042 is not set
# CONFIG_VT is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_M66592=y
+CONFIG_USB_ETH=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BDI_SWITCH=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 879de5efb073..adc14e813a49 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -8,7 +8,6 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_LZMA=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_PERF_EVENTS is not set
@@ -22,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_PS3=y
+CONFIG_PS3_ADVANCED=y
+CONFIG_PS3_REPOSITORY_WRITE=y
CONFIG_PS3_DISK=y
CONFIG_PS3_ROM=y
CONFIG_PS3_FLASH=y
@@ -64,11 +65,9 @@ CONFIG_BT_HCIBTUSB=m
CONFIG_CFG80211=m
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
# CONFIG_MAC80211_RC_MINSTREL is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65535
@@ -76,7 +75,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_MD=y
CONFIG_BLK_DEV_DM=m
@@ -107,7 +105,6 @@ CONFIG_INPUT_EVDEV=m
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_PS3=y
# CONFIG_VGA_CONSOLE is not set
@@ -130,8 +127,6 @@ CONFIG_HID_TWINHAN=m
CONFIG_HID_LOGITECH=m
CONFIG_HID_LOGITECH_DJ=m
CONFIG_HID_MICROSOFT=m
-CONFIG_HID_PS3REMOTE=m
-CONFIG_HID_SONY=m
CONFIG_HID_SUNPLUS=m
CONFIG_HID_SMARTJOYPLUS=m
CONFIG_USB_HIDDEV=y
@@ -169,18 +164,17 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PROVE_LOCKING=y
CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_LIST=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 1f97364017c7..c2e39f66b182 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -1,6 +1,4 @@
CONFIG_PPC64=y
-CONFIG_ALTIVEC=y
-CONFIG_VSX=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2048
CONFIG_SYSVIPC=y
@@ -17,11 +15,18 @@ CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
@@ -40,6 +45,7 @@ CONFIG_DTL=y
# CONFIG_PPC_PMAC is not set
CONFIG_RTAS_FLASH=m
CONFIG_IBMEBUS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
@@ -47,6 +53,8 @@ CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
@@ -69,10 +77,10 @@ CONFIG_INET_IPCOMP=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
CONFIG_BLK_DEV_FD=m
@@ -90,7 +98,6 @@ CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_CXGB3_ISCSI=m
@@ -111,6 +118,7 @@ CONFIG_SCSI_DH=m
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
# CONFIG_ATA_SFF is not set
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
@@ -124,6 +132,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
@@ -132,8 +141,12 @@ CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_UEVENT=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
CONFIG_NETCONSOLE=y
CONFIG_TUN=m
+CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_VHOST_NET=m
CONFIG_VORTEX=y
@@ -166,6 +179,7 @@ CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=m
# CONFIG_SERIO_SERPORT is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_ICOM=m
@@ -242,6 +256,7 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
@@ -270,7 +285,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
@@ -304,7 +318,4 @@ CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
-CONFIG_KVM_BOOK3S_64_HV=y
-CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_KVM_BOOK3S_64_HV=m
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig
index ac7ca5852827..09bc96e792cd 100644
--- a/arch/powerpc/configs/pseries_le_defconfig
+++ b/arch/powerpc/configs/pseries_le_defconfig
@@ -1,6 +1,4 @@
CONFIG_PPC64=y
-CONFIG_ALTIVEC=y
-CONFIG_VSX=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2048
CONFIG_CPU_LITTLE_ENDIAN=y
@@ -18,11 +16,18 @@ CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
@@ -41,6 +46,7 @@ CONFIG_DTL=y
# CONFIG_PPC_PMAC is not set
CONFIG_RTAS_FLASH=m
CONFIG_IBMEBUS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
@@ -48,6 +54,8 @@ CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
@@ -70,10 +78,10 @@ CONFIG_INET_IPCOMP=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
CONFIG_BLK_DEV_FD=m
@@ -91,7 +99,6 @@ CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_CXGB3_ISCSI=m
@@ -112,6 +119,7 @@ CONFIG_SCSI_DH=m
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
# CONFIG_ATA_SFF is not set
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
@@ -125,6 +133,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
@@ -133,8 +142,12 @@ CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_UEVENT=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
CONFIG_NETCONSOLE=y
CONFIG_TUN=m
+CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_VHOST_NET=m
CONFIG_VORTEX=y
@@ -167,6 +180,7 @@ CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=m
# CONFIG_SERIO_SERPORT is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_ICOM=m
@@ -243,6 +257,7 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
@@ -271,7 +286,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
@@ -300,11 +314,6 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_NX=y
-CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
-CONFIG_KVM_BOOK3S_64_HV=y
-CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_KVM_BOOK3S_64_HV=m
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 60ad2c08caa6..b5db7dffe86d 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
@@ -6,13 +5,13 @@ CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_EMBEDDED6xx=y
CONFIG_STORCENTER=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
# CONFIG_SECCOMP is not set
@@ -29,11 +28,8 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_NFTL=y
@@ -41,7 +37,6 @@ CONFIG_NFTL_RW=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_IDE=y
CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_SCSI=y
@@ -57,7 +52,6 @@ CONFIG_MD_RAID456=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_R8169=y
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -81,12 +75,10 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=m
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
@@ -94,5 +86,3 @@ CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 7fe277a7b422..4c973c5321c6 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -1,12 +1,10 @@
CONFIG_PPC_8xx=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
# CONFIG_FUTEX is not set
@@ -15,15 +13,12 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_TQM8XX=y
CONFIG_8xx_COPYBACK=y
# CONFIG_8xx_CPU15 is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
-CONFIG_8XX_MINIMAL_FPEMU=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -40,32 +35,24 @@ CONFIG_SYN_COOKIES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_PROC_DEVICETREE=y
# CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
+CONFIG_FS_ENET=y
CONFIG_DAVICOM_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
@@ -74,13 +61,8 @@ CONFIG_GEN_RTC=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_CRC32 is not set
+CONFIG_CRC32_SLICEBY4=y
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 1e2b7d062aa4..34eaf528fa87 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-wii"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
@@ -6,7 +5,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
CONFIG_PERF_EVENTS=y
@@ -22,7 +20,6 @@ CONFIG_WII=y
CONFIG_PREEMPT=y
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
-# CONFIG_MIGRATION is not set
# CONFIG_SECCOMP is not set
CONFIG_ADVANCED_OPTIONS=y
CONFIG_NET=y
@@ -39,7 +36,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_BT=y
-CONFIG_BT_L2CAP=y
CONFIG_BT_RFCOMM=y
CONFIG_BT_BNEP=y
CONFIG_BT_BNEP_MC_FILTER=y
@@ -49,18 +45,12 @@ CONFIG_MAC80211=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_B43=y
CONFIG_B43_SDIO=y
# CONFIG_B43_PHY_LP is not set
@@ -78,8 +68,8 @@ CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-# CONFIG_DEVKMEM is not set
CONFIG_LEGACY_PTY_COUNT=64
+# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
CONFIG_NVRAM=y
CONFIG_I2C=y
@@ -109,7 +99,6 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
@@ -119,7 +108,6 @@ CONFIG_PROC_KCORE=y
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=y
@@ -127,13 +115,9 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DMA_API_DEBUG=y
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index d3feba5a275f..c154cebc1041 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
+MODULE_ALIAS_CRYPTO("sha1");
MODULE_ALIAS_CRYPTO("sha1-powerpc");
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 5b9312220e84..30b35fff2dea 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -60,13 +60,6 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
-
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-/* internal debugging function */
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 22d5a7da9e68..5cf5a6d10685 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -165,7 +165,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000)
#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000)
#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000800000000)
-#define CPU_FTR_IABR LONG_ASM_CONST(0x0000001000000000)
+/* Free LONG_ASM_CONST(0x0000001000000000) */
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000)
#define CPU_FTR_SMT LONG_ASM_CONST(0x0000008000000000)
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 0652ebe117af..55abfd09e47f 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -38,8 +38,9 @@ struct device_node;
#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */
#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
-#define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */
-#define EEH_EARLY_DUMP_LOG 0x20 /* Dump log immediately */
+#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */
+#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */
+#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */
/*
* Delay for PE reset, all in ms
@@ -77,6 +78,7 @@ struct device_node;
#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
+#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
struct eeh_pe {
int type; /* PE type: PHB/Bus/Device */
@@ -216,6 +218,7 @@ struct eeh_ops {
};
extern int eeh_subsystem_flags;
+extern int eeh_max_freezes;
extern struct eeh_ops *eeh_ops;
extern raw_spinlock_t confirm_error_lock;
@@ -253,12 +256,6 @@ static inline void eeh_serialize_unlock(unsigned long flags)
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
}
-/*
- * Max number of EEH freezes allowed before we consider the device
- * to be permanently disabled.
- */
-#define EEH_MAX_ALLOWED_FREEZES 5
-
typedef void *(*eeh_traverse_func)(void *data, void *flag);
void eeh_set_pe_aux_size(int size);
int eeh_phb_pe_create(struct pci_controller *phb);
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 067fb0dca549..c7240a024b96 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -95,6 +95,9 @@ struct fsl_lbc_bank {
#define OR_FCM_TRLX_SHIFT 2
#define OR_FCM_EHTR 0x00000002
#define OR_FCM_EHTR_SHIFT 1
+
+#define OR_GPCM_AM 0xFFFF8000
+#define OR_GPCM_AM_SHIFT 15
};
struct fsl_lbc_regs {
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
index caa1b21c25cd..38311c98eed9 100644
--- a/arch/powerpc/include/asm/fsl_pamu_stash.h
+++ b/arch/powerpc/include/asm/fsl_pamu_stash.h
@@ -32,8 +32,8 @@ enum pamu_stash_target {
*/
struct pamu_stash_attribute {
- u32 cpu; /* cpu number */
- u32 cache; /* cache to stash to: L1,L2,L3 */
+ u32 cpu; /* cpu number */
+ u32 cache; /* cache to stash to: L1,L2,L3 */
};
#endif /* __FSL_PAMU_STASH_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index af15d4d8d604..039b583db029 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -41,34 +41,59 @@ typedef ppc_opcode_t kprobe_opcode_t;
#define MAX_INSN_SIZE 1
#ifdef CONFIG_PPC64
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+/* PPC64 ABIv2 needs local entry point */
+#define kprobe_lookup_name(name, addr) \
+{ \
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); \
+ if (addr) \
+ addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
+}
+#else
/*
- * 64bit powerpc uses function descriptors.
- * Handle cases where:
- * - User passes a <.symbol> or <module:.symbol>
- * - User passes a <symbol> or <module:symbol>
- * - User passes a non-existent symbol, kallsyms_lookup_name
- * returns 0. Don't deref the NULL pointer in that case
+ * 64bit powerpc ABIv1 uses function descriptors:
+ * - Check for the dot variant of the symbol first.
+ * - If that fails, try looking up the symbol provided.
+ *
+ * This ensures we always get to the actual symbol and not the descriptor.
+ * Also handle <module:symbol> format.
*/
#define kprobe_lookup_name(name, addr) \
{ \
- addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); \
- if (addr) { \
- char *colon; \
- if ((colon = strchr(name, ':')) != NULL) { \
- colon++; \
- if (*colon != '\0' && *colon != '.') \
- addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
- } else if (name[0] != '.') \
- addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
- } else { \
- char dot_name[KSYM_NAME_LEN]; \
+ char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN]; \
+ char *modsym; \
+ bool dot_appended = false; \
+ if ((modsym = strchr(name, ':')) != NULL) { \
+ modsym++; \
+ if (*modsym != '\0' && *modsym != '.') { \
+ /* Convert to <module:.symbol> */ \
+ strncpy(dot_name, name, modsym - name); \
+ dot_name[modsym - name] = '.'; \
+ dot_name[modsym - name + 1] = '\0'; \
+ strncat(dot_name, modsym, \
+ sizeof(dot_name) - (modsym - name) - 2);\
+ dot_appended = true; \
+ } else { \
+ dot_name[0] = '\0'; \
+ strncat(dot_name, name, sizeof(dot_name) - 1); \
+ } \
+ } else if (name[0] != '.') { \
dot_name[0] = '.'; \
dot_name[1] = '\0'; \
strncat(dot_name, name, KSYM_NAME_LEN - 2); \
- addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
+ dot_appended = true; \
+ } else { \
+ dot_name[0] = '\0'; \
+ strncat(dot_name, name, KSYM_NAME_LEN - 1); \
+ } \
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
+ if (!addr && dot_appended) { \
+ /* Let's try the original non-dot symbol lookup */ \
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); \
} \
}
-#endif
+#endif /* defined(_CALL_ELF) && _CALL_ELF == 2 */
+#endif /* CONFIG_PPC64 */
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 7efd666a3fa7..8ef05121d3cd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -107,6 +107,7 @@ struct kvm_vcpu_stat {
u32 emulated_inst_exits;
u32 dec_exits;
u32 ext_intr_exits;
+ u32 halt_successful_poll;
u32 halt_wakeup;
u32 dbell_exits;
u32 gdbell_exits;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index eb95b675109b..9ee0a30a02ce 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -304,7 +304,7 @@ enum OpalMessageType {
*/
OPAL_MSG_MEM_ERR,
OPAL_MSG_EPOW,
- OPAL_MSG_SHUTDOWN,
+ OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
OPAL_MSG_HMI_EVT,
OPAL_MSG_TYPE_MAX,
};
@@ -595,6 +595,14 @@ enum {
OPAL_PHB3_NUM_PEST_REGS = 256
};
+/* CAPI modes for PHB */
+enum {
+ OPAL_PHB_CAPI_MODE_PCIE = 0,
+ OPAL_PHB_CAPI_MODE_CAPI = 1,
+ OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
+ OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
+};
+
struct OpalIoPhbErrorCommon {
__be32 version;
__be32 ioType;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 725247beebec..546d036fe925 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -119,6 +119,10 @@ extern void setup_indirect_pci(struct pci_controller* hose,
extern int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val);
+extern int __indirect_read_config(struct pci_controller *hose,
+ unsigned char bus_number, unsigned int devfn,
+ int offset, int len, u32 *val);
+
extern int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val);
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 234e07c47803..64b52b1cf542 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -45,7 +45,7 @@ extern int icache_44x_need_flush;
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
@@ -178,12 +178,11 @@ static inline unsigned long pte_update(pte_t *p,
andc %1,%0,%5\n\
or %1,%1,%6\n\
/* 0x200 == Extended encoding, bit 22 */ \
- /* Bit 22 has to be 1 if neither _PAGE_USER nor _PAGE_RW are set */ \
- rlwimi %1,%1,32-2,0x200\n /* get _PAGE_USER */ \
- rlwinm %3,%1,32-1,0x200\n /* get _PAGE_RW */ \
- or %1,%3,%1\n\
- xori %1,%1,0x200\n"
-" stwcx. %1,0,%4\n\
+ /* Bit 22 has to be 1 when _PAGE_USER is unset and _PAGE_RO is set */ \
+ rlwimi %1,%1,32-1,0x200\n /* get _PAGE_RO */ \
+ rlwinm %3,%1,32-2,0x200\n /* get _PAGE_USER */ \
+ andc %1,%1,%3\n\
+ stwcx. %1,0,%4\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p), "=&r" (tmp2)
: "r" (p), "r" (clr), "r" (set), "m" (*p)
@@ -275,7 +274,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
+ pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
@@ -286,9 +285,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
- unsigned long bits = pte_val(entry) &
+ unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- pte_update(ptep, 0, bits);
+ unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+
+ pte_update(ptep, clr, set);
}
#define __HAVE_ARCH_PTE_SAME
@@ -332,8 +333,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
- * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
- *_PAGE_HASHPTE bit (if used). -- paulus
+ * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
+ * -- paulus
*/
#define __swp_type(entry) ((entry).val & 0x1f)
#define __swp_offset(entry) ((entry).val >> 5)
@@ -341,15 +342,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-/* Encode and decode a nonlinear file mapping entry */
-#define PTE_FILE_MAX_BITS 29
-#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
-
+#ifndef CONFIG_PPC_4K_PAGES
+void pgtable_cache_init(void);
+#else
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
+#endif
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index b9dcc936e2d1..43e6ad424c7f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -12,7 +12,7 @@
#endif
#include <asm/barrier.h>
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
/*
* Size of EA range mapped by our pagetables.
@@ -352,9 +352,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
-#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
-#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
@@ -389,7 +386,7 @@ void pgtable_cache_init(void);
* The last three bits are intentionally left to zero. This memory location
* are also used as normal page PTE pointers. So if we have any pointers
* left around while we collapse a hugepage, we need to make sure
- * _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them
+ * _PAGE_PRESENT bit of that is zero when we look at them
*/
static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
{
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index a8805fee0df9..9835ac4173b7 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -30,72 +30,36 @@ struct mm_struct;
#include <asm/tlbflush.h>
/* Generic accessors to PTE bits */
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_write(pte_t pte)
+{ return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
#ifdef CONFIG_NUMA_BALANCING
-static inline int pte_present(pte_t pte)
-{
- return pte_val(pte) & _PAGE_NUMA_MASK;
-}
-
-#define pte_present_nonuma pte_present_nonuma
-static inline int pte_present_nonuma(pte_t pte)
-{
- return pte_val(pte) & (_PAGE_PRESENT);
-}
-
-#define ptep_set_numa ptep_set_numa
-static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
- VM_BUG_ON(1);
-
- pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
- return;
-}
-
-#define pmdp_set_numa pmdp_set_numa
-static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp)
-{
- if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
- VM_BUG_ON(1);
-
- pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
- return;
-}
-
/*
- * Generic NUMA pte helpers expect pteval_t and pmdval_t types to exist
- * which was inherited from x86. For the purposes of powerpc pte_basic_t and
- * pmd_t are equivalent
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * work for user pages and always return true for kernel pages.
*/
-#define pteval_t pte_basic_t
-#define pmdval_t pmd_t
-static inline pteval_t ptenuma_flags(pte_t pte)
+static inline int pte_protnone(pte_t pte)
{
- return pte_val(pte) & _PAGE_NUMA_MASK;
+ return (pte_val(pte) &
+ (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
}
-static inline pmdval_t pmdnuma_flags(pmd_t pmd)
+static inline int pmd_protnone(pmd_t pmd)
{
- return pmd_val(pmd) & _PAGE_NUMA_MASK;
+ return pte_protnone(pmd_pte(pmd));
}
-
-# else
+#endif /* CONFIG_NUMA_BALANCING */
static inline int pte_present(pte_t pte)
{
return pte_val(pte) & _PAGE_PRESENT;
}
-#endif /* CONFIG_NUMA_BALANCING */
/* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -115,12 +79,14 @@ static inline unsigned long pte_pfn(pte_t pte) {
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
+ pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE);
+ pte_val(pte) |= _PAGE_RO; return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_mkold(pte_t pte) {
pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_RO;
pte_val(pte) |= _PAGE_RW; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) {
pte_val(pte) |= _PAGE_DIRTY; return pte; }
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index f09a22fa1bd7..f9b498292a5c 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -13,13 +13,13 @@
#include <linux/pci.h>
#include <misc/cxl.h>
-int pnv_phb_to_cxl(struct pci_dev *dev);
+int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
unsigned int virq);
int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
int pnv_cxl_get_irq_count(struct pci_dev *dev);
-struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev);
+struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
#ifdef CONFIG_CXL_BASE
int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/pte-40x.h
index ec0b0b0d1df9..486b1ef81338 100644
--- a/arch/powerpc/include/asm/pte-40x.h
+++ b/arch/powerpc/include/asm/pte-40x.h
@@ -38,7 +38,6 @@
*/
#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
-#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/pte-44x.h
index 4192b9bad901..36f75fab23f5 100644
--- a/arch/powerpc/include/asm/pte-44x.h
+++ b/arch/powerpc/include/asm/pte-44x.h
@@ -44,9 +44,6 @@
* - PRESENT *must* be in the bottom three bits because swap cache
* entries use the top 29 bits for TLB2.
*
- * - FILE *must* be in the bottom three bits because swap cache
- * entries use the top 29 bits for TLB2.
- *
* - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
* because it doesn't support SMP. However, some later 460 variants
* have -some- form of SMP support and so I keep the bit there for
@@ -68,7 +65,6 @@
*
* There are three protection bits available for SWAP entry:
* _PAGE_PRESENT
- * _PAGE_FILE
* _PAGE_HASHPTE (if HW has)
*
* So those three bits have to be inside of 0-2nd LSB of PTE.
@@ -77,7 +73,6 @@
#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
#define _PAGE_RW 0x00000002 /* S: Write permission */
-#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
#define _PAGE_EXEC 0x00000004 /* H: Execute permission */
#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index daa4616e61c4..97bae64afdaa 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -29,7 +29,6 @@
/* Definitions for 8xx embedded chips. */
#define _PAGE_PRESENT 0x0001 /* Page is valid */
-#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
@@ -46,9 +45,9 @@
* require a TLB exception handler change. It is assumed unused bits
* are always zero.
*/
-#define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */
+#define _PAGE_RO 0x0400 /* lsb PP bits */
#define _PAGE_USER 0x0800 /* msb PP bits */
-/* set when neither _PAGE_USER nor _PAGE_RW are set */
+/* set when _PAGE_USER is unset and _PAGE_RO is set */
#define _PAGE_KNLRO 0x0200
#define _PMD_PRESENT 0x0001
@@ -62,9 +61,8 @@
#define PTE_ATOMIC_UPDATES 1
/* We need to add _PAGE_SHARED to kernel pages */
-#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_KNLRO)
-#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_KNLRO)
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_RO | _PAGE_KNLRO)
+#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO | _PAGE_KNLRO)
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
index 576ad88104cb..91a704952ca1 100644
--- a/arch/powerpc/include/asm/pte-book3e.h
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -10,7 +10,6 @@
/* Architected bits */
#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */
-#define _PAGE_FILE 0x000002 /* (!present only) software: pte holds file offset */
#define _PAGE_SW1 0x000002
#define _PAGE_BAP_SR 0x000004
#define _PAGE_BAP_UR 0x000008
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index e040c3595129..c5a755ef7011 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -34,6 +34,12 @@
#ifndef _PAGE_PSIZE
#define _PAGE_PSIZE 0
#endif
+/* _PAGE_RO and _PAGE_RW shall not be defined at the same time */
+#ifndef _PAGE_RO
+#define _PAGE_RO 0
+#else
+#define _PAGE_RW 0
+#endif
#ifndef _PMD_PRESENT_MASK
#define _PMD_PRESENT_MASK _PMD_PRESENT
#endif
@@ -42,10 +48,10 @@
#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
#endif
#ifndef _PAGE_KERNEL_RO
-#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_RO (_PAGE_RO)
#endif
#ifndef _PAGE_KERNEL_ROX
-#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
+#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO)
#endif
#ifndef _PAGE_KERNEL_RW
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
@@ -95,14 +101,9 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/* Mask of bits returned by pte_pgprot() */
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
_PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
- _PAGE_USER | _PAGE_ACCESSED | \
+ _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | \
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
-#ifdef CONFIG_NUMA_BALANCING
-/* Mask of bits that distinguish present and numa ptes */
-#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PRESENT)
-#endif
-
/*
* We define 2 sets of base prot bits, one for basic pages (ie,
* cacheable kernel and user pages) and one for non cacheable
@@ -128,11 +129,14 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
*/
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
+ _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+ _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+ _PAGE_EXEC)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h
index e84dd7ed505e..9f5c3d04a1a3 100644
--- a/arch/powerpc/include/asm/pte-fsl-booke.h
+++ b/arch/powerpc/include/asm/pte-fsl-booke.h
@@ -13,14 +13,11 @@
- PRESENT *must* be in the bottom three bits because swap cache
entries use the top 29 bits.
- - FILE *must* be in the bottom three bits because swap cache
- entries use the top 29 bits.
*/
/* Definitions for FSL Book-E Cores */
#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
-#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
#define _PAGE_EXEC 0x00010 /* H: SX permission */
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
index 4aad4132d0a8..62cfb0c663bb 100644
--- a/arch/powerpc/include/asm/pte-hash32.h
+++ b/arch/powerpc/include/asm/pte-hash32.h
@@ -18,7 +18,6 @@
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
#define _PAGE_USER 0x004 /* usermode access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
index 2505d8eab15c..fc852f7e7b3a 100644
--- a/arch/powerpc/include/asm/pte-hash64.h
+++ b/arch/powerpc/include/asm/pte-hash64.h
@@ -16,7 +16,6 @@
*/
#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
#define _PAGE_USER 0x0002 /* matches one of the PP bits */
-#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED 0x0008
/* We can derive Memory coherence from _PAGE_NO_CACHE */
@@ -27,12 +26,6 @@
#define _PAGE_RW 0x0200 /* software: user write access allowed */
#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
-/*
- * Used for tracking numa faults
- */
-#define _PAGE_NUMA 0x00000010 /* Gather numa placement stats */
-
-
/* No separate kernel read-only */
#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index b390f55b0df1..2e23e92a4372 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -327,7 +327,7 @@ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
extern int rtas_online_cpus_mask(cpumask_var_t cpus);
extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
-extern int rtas_ibm_suspend_me(struct rtas_args *);
+extern int rtas_ibm_suspend_me(u64 handle, int *vasi_return);
struct rtc_time;
extern unsigned long rtas_get_boot_time(void);
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 5a6614a7f0b2..d607df5081a7 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -64,7 +64,6 @@ DECLARE_PER_CPU(unsigned int, cpu_pvr);
extern void migrate_irqs(void);
int generic_cpu_disable(void);
void generic_cpu_die(unsigned int cpu);
-void generic_mach_cpu_die(void);
void generic_set_cpu_dead(unsigned int cpu);
void generic_set_cpu_up(unsigned int cpu);
int generic_check_cpu_restart(unsigned int cpu);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index ebc4f165690a..72489799cf02 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -23,9 +23,9 @@
#define THREAD_SIZE (1 << THREAD_SHIFT)
#ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
#else
-#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
#endif
#ifndef __ASSEMBLY__
@@ -43,7 +43,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
- struct restart_block restart_block;
unsigned long local_flags; /* private flags for thread */
/* low level flags - has atomic operations done on it */
@@ -59,9 +58,6 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
.flags = 0, \
}
@@ -71,12 +67,13 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */
-register unsigned long __current_r1 asm("r1");
static inline struct thread_info *current_thread_info(void)
{
- /* gcc4, at least, is smart enough to turn this into a single
- * rlwinm for ppc32 and clrrdi for ppc64 */
- return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
+ unsigned long val;
+
+ asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+ return (struct thread_info *)val;
}
#endif /* __ASSEMBLY__ */
@@ -124,7 +121,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
-#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
_TIF_NOHZ)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e624f9646350..4717859fdd04 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -644,8 +644,19 @@ int main(void)
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
- HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
- HSTATE_FIELD(HSTATE_PMC, host_pmc);
+ HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]);
+ HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]);
+ HSTATE_FIELD(HSTATE_MMCRA, host_mmcr[2]);
+ HSTATE_FIELD(HSTATE_SIAR, host_mmcr[3]);
+ HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]);
+ HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]);
+ HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]);
+ HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]);
+ HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]);
+ HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]);
+ HSTATE_FIELD(HSTATE_PMC4, host_pmc[3]);
+ HSTATE_FIELD(HSTATE_PMC5, host_pmc[4]);
+ HSTATE_FIELD(HSTATE_PMC6, host_pmc[5]);
HSTATE_FIELD(HSTATE_PURR, host_purr);
HSTATE_FIELD(HSTATE_SPURR, host_spurr);
HSTATE_FIELD(HSTATE_DSCR, host_dscr);
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 40198d50b4c2..ae77b7e59889 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -607,19 +607,16 @@ static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *att
{
struct cache_index_dir *index;
struct cache *cache;
- int len;
- int n = 0;
+ int ret;
index = kobj_to_cache_index_dir(k);
cache = index->cache;
- len = PAGE_SIZE - 2;
- if (len > 1) {
- n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
- buf[n++] = '\n';
- buf[n] = '\0';
- }
- return n;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n",
+ cpumask_pr_args(&cache->shared_cpu_map));
+ buf[ret++] = '\n';
+ buf[ret] = '\0';
+ return ret;
}
static struct kobj_attribute cache_shared_cpu_map_attr =
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 808405906336..f337666768a7 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1133,6 +1133,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index e1b6d8e17289..3b2252e7731b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -104,6 +104,13 @@
int eeh_subsystem_flags;
EXPORT_SYMBOL(eeh_subsystem_flags);
+/*
+ * EEH allowed maximal frozen times. If one particular PE's
+ * frozen count in last hour exceeds this limit, the PE will
+ * be forced to be offline permanently.
+ */
+int eeh_max_freezes = 5;
+
/* Platform dependent EEH operations */
struct eeh_ops *eeh_ops = NULL;
@@ -1652,8 +1659,22 @@ static int eeh_enable_dbgfs_get(void *data, u64 *val)
return 0;
}
+static int eeh_freeze_dbgfs_set(void *data, u64 val)
+{
+ eeh_max_freezes = val;
+ return 0;
+}
+
+static int eeh_freeze_dbgfs_get(void *data, u64 *val)
+{
+ *val = eeh_max_freezes;
+ return 0;
+}
+
DEFINE_SIMPLE_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
eeh_enable_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get,
+ eeh_freeze_dbgfs_set, "0x%llx\n");
#endif
static int __init eeh_init_proc(void)
@@ -1664,6 +1685,9 @@ static int __init eeh_init_proc(void)
debugfs_create_file("eeh_enable", 0600,
powerpc_debugfs_root, NULL,
&eeh_enable_dbgfs_ops);
+ debugfs_create_file("eeh_max_freezes", 0600,
+ powerpc_debugfs_root, NULL,
+ &eeh_freeze_dbgfs_ops);
#endif
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index b17e793ba67e..d099540c0f56 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -667,7 +667,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
eeh_pe_update_time_stamp(pe);
pe->freeze_count++;
- if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES)
+ if (pe->freeze_count > eeh_max_freezes)
goto excess_failures;
pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
pe->freeze_count);
@@ -806,7 +806,7 @@ perm_error:
eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
/* Mark the PE to be removed permanently */
- pe->freeze_count = EEH_MAX_ALLOWED_FREEZES + 1;
+ eeh_pe_state_mark(pe, EEH_PE_REMOVED);
/*
* Shut down the device drivers for good. We mark
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 5a63e2b0f65b..1e4946c36f9e 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -239,10 +239,18 @@ static void *__eeh_pe_get(void *data, void *flag)
if (pe->type & EEH_PE_PHB)
return NULL;
- /* We prefer PE address */
- if (edev->pe_config_addr &&
- (edev->pe_config_addr == pe->addr))
+ /*
+ * We prefer PE address. For most cases, we should
+ * have non-zero PE address
+ */
+ if (eeh_has_flag(EEH_VALID_PE_ZERO)) {
+ if (edev->pe_config_addr == pe->addr)
+ return pe;
+ } else {
+ if (edev->pe_config_addr &&
+ (edev->pe_config_addr == pe->addr))
return pe;
+ }
/* Try BDF address */
if (edev->config_addr &&
@@ -518,8 +526,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
struct pci_dev *pdev;
/* Keep the state of permanently removed PE intact */
- if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) &&
- (state & (EEH_PE_ISOLATED | EEH_PE_RECOVERING)))
+ if (pe->state & EEH_PE_REMOVED)
return NULL;
pe->state |= state;
@@ -592,8 +599,7 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
struct pci_dev *pdev;
/* Keep the state of permanently removed PE intact */
- if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) &&
- (state & EEH_PE_ISOLATED))
+ if (pe->state & EEH_PE_REMOVED)
return NULL;
pe->state &= ~state;
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 10a093579191..46fc0f4d8982 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -33,9 +33,6 @@
#include <asm/ftrace.h>
#include <asm/ptrace.h>
-#undef SHOW_SYSCALLS
-#undef SHOW_SYSCALLS_TASK
-
/*
* MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
*/
@@ -307,9 +304,6 @@ _GLOBAL(DoSyscall)
lwz r11,_CCR(r1) /* Clear SO bit in CR */
rlwinm r11,r11,0,4,2
stw r11,_CCR(r1)
-#ifdef SHOW_SYSCALLS
- bl do_show_syscall
-#endif /* SHOW_SYSCALLS */
#ifdef CONFIG_TRACE_IRQFLAGS
/* Return from syscalls can (and generally will) hard enable
* interrupts. You aren't supposed to call a syscall with
@@ -337,7 +331,7 @@ _GLOBAL(DoSyscall)
#endif /* CONFIG_TRACE_IRQFLAGS */
CURRENT_THREAD_INFO(r10, r1)
lwz r11,TI_FLAGS(r10)
- andi. r11,r11,_TIF_SYSCALL_T_OR_A
+ andi. r11,r11,_TIF_SYSCALL_DOTRACE
bne- syscall_dotrace
syscall_dotrace_cont:
cmplwi 0,r0,NR_syscalls
@@ -352,9 +346,6 @@ syscall_dotrace_cont:
blrl /* Call handler */
.globl ret_from_syscall
ret_from_syscall:
-#ifdef SHOW_SYSCALLS
- bl do_show_syscall_exit
-#endif
mr r6,r3
CURRENT_THREAD_INFO(r12, r1)
/* disable interrupts so current_thread_info()->flags can't change */
@@ -364,7 +355,7 @@ ret_from_syscall:
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
li r8,-_LAST_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
+ andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- syscall_exit_work
cmplw 0,r3,r8
blt+ syscall_exit_cont
@@ -501,7 +492,7 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything which requires enabling interrupts? */
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
beq ret_from_except
/* Re-enable interrupts. There is no need to trace that with
@@ -523,74 +514,6 @@ syscall_exit_work:
bl do_syscall_trace_leave
b ret_from_except_full
-#ifdef SHOW_SYSCALLS
-do_show_syscall:
-#ifdef SHOW_SYSCALLS_TASK
- lis r11,show_syscalls_task@ha
- lwz r11,show_syscalls_task@l(r11)
- cmp 0,r2,r11
- bnelr
-#endif
- stw r31,GPR31(r1)
- mflr r31
- lis r3,7f@ha
- addi r3,r3,7f@l
- lwz r4,GPR0(r1)
- lwz r5,GPR3(r1)
- lwz r6,GPR4(r1)
- lwz r7,GPR5(r1)
- lwz r8,GPR6(r1)
- lwz r9,GPR7(r1)
- bl printk
- lis r3,77f@ha
- addi r3,r3,77f@l
- lwz r4,GPR8(r1)
- mr r5,r2
- bl printk
- lwz r0,GPR0(r1)
- lwz r3,GPR3(r1)
- lwz r4,GPR4(r1)
- lwz r5,GPR5(r1)
- lwz r6,GPR6(r1)
- lwz r7,GPR7(r1)
- lwz r8,GPR8(r1)
- mtlr r31
- lwz r31,GPR31(r1)
- blr
-
-do_show_syscall_exit:
-#ifdef SHOW_SYSCALLS_TASK
- lis r11,show_syscalls_task@ha
- lwz r11,show_syscalls_task@l(r11)
- cmp 0,r2,r11
- bnelr
-#endif
- stw r31,GPR31(r1)
- mflr r31
- stw r3,RESULT(r1) /* Save result */
- mr r4,r3
- lis r3,79f@ha
- addi r3,r3,79f@l
- bl printk
- lwz r3,RESULT(r1)
- mtlr r31
- lwz r31,GPR31(r1)
- blr
-
-7: .string "syscall %d(%x, %x, %x, %x, %x, "
-77: .string "%x), current=%p\n"
-79: .string " -> %x\n"
- .align 2,0
-
-#ifdef SHOW_SYSCALLS_TASK
- .data
- .globl show_syscalls_task
-show_syscalls_task:
- .long -1
- .text
-#endif
-#endif /* SHOW_SYSCALLS */
-
/*
* The fork/clone functions need to copy the full register set into
* the child process. Therefore we need to save all the nonvolatile
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 194e46dcf08d..d180caf2d6de 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -49,8 +49,6 @@ exception_marker:
.section ".text"
.align 7
-#undef SHOW_SYSCALLS
-
.globl system_call_common
system_call_common:
andi. r10,r12,MSR_PR
@@ -142,16 +140,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
li r10,1
std r10,SOFTE(r1)
-#ifdef SHOW_SYSCALLS
- bl do_show_syscall
- REST_GPR(0,r1)
- REST_4GPRS(3,r1)
- REST_2GPRS(7,r1)
- addi r9,r1,STACK_FRAME_OVERHEAD
-#endif
CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11)
- andi. r11,r10,_TIF_SYSCALL_T_OR_A
+ andi. r11,r10,_TIF_SYSCALL_DOTRACE
bne syscall_dotrace
.Lsyscall_dotrace_cont:
cmpldi 0,r0,NR_syscalls
@@ -178,12 +169,8 @@ system_call: /* label this so stack traces look sane */
mtctr r12
bctrl /* Call handler */
-syscall_exit:
+.Lsyscall_exit:
std r3,RESULT(r1)
-#ifdef SHOW_SYSCALLS
- bl do_show_syscall_exit
- ld r3,RESULT(r1)
-#endif
CURRENT_THREAD_INFO(r12, r1)
ld r8,_MSR(r1)
@@ -214,7 +201,7 @@ syscall_exit:
ld r9,TI_FLAGS(r12)
li r11,-_LAST_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
+ andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- syscall_exit_work
cmpld r3,r11
ld r5,_CCR(r1)
@@ -270,7 +257,7 @@ syscall_dotrace:
syscall_enosys:
li r3,-ENOSYS
- b syscall_exit
+ b .Lsyscall_exit
syscall_exit_work:
#ifdef CONFIG_PPC_BOOK3S
@@ -307,7 +294,7 @@ syscall_exit_work:
4: /* Anything else left to do? */
SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
beq ret_from_except_lite
/* Re-enable interrupts */
@@ -347,33 +334,33 @@ _GLOBAL(save_nvgprs)
_GLOBAL(ppc_fork)
bl save_nvgprs
bl sys_fork
- b syscall_exit
+ b .Lsyscall_exit
_GLOBAL(ppc_vfork)
bl save_nvgprs
bl sys_vfork
- b syscall_exit
+ b .Lsyscall_exit
_GLOBAL(ppc_clone)
bl save_nvgprs
bl sys_clone
- b syscall_exit
+ b .Lsyscall_exit
_GLOBAL(ppc32_swapcontext)
bl save_nvgprs
bl compat_sys_swapcontext
- b syscall_exit
+ b .Lsyscall_exit
_GLOBAL(ppc64_swapcontext)
bl save_nvgprs
bl sys_swapcontext
- b syscall_exit
+ b .Lsyscall_exit
_GLOBAL(ret_from_fork)
bl schedule_tail
REST_NVGPRS(r1)
li r3,0
- b syscall_exit
+ b .Lsyscall_exit
_GLOBAL(ret_from_kernel_thread)
bl schedule_tail
@@ -385,7 +372,7 @@ _GLOBAL(ret_from_kernel_thread)
#endif
blrl
li r3,0
- b syscall_exit
+ b .Lsyscall_exit
/*
* This routine switches between two different tasks. The process
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index d99aac0d69f1..9b53fe139bf6 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -319,36 +319,29 @@ InstructionTLBMiss:
* pin the first 8MB of kernel memory */
andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
#endif
- mfspr r11, SPRN_M_TW /* Get level 1 table base address */
+ mfspr r11, SPRN_M_TW /* Get level 1 table */
#ifdef CONFIG_MODULES
beq 3f
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
- ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
#endif
- /* Extract level 1 index */
- rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
- lwzx r11, r10, r11 /* Get the level 1 entry */
- rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
- beq 2f /* If zero, don't try to find a pte */
-
- /* We have a pte table, so load the MI_TWC with the attributes
- * for this "segment."
- */
+ /* Insert level 1 index */
+ rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
+ lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
+
+ /* Load the MI_TWC with the attributes for this "segment." */
MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */
- mfspr r11, SPRN_SRR0 /* Get effective address of fault */
+ rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
/* Extract level 2 index */
- rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+ rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
lwzx r10, r10, r11 /* Get the pte */
#ifdef CONFIG_SWAP
- andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
- cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
- li r11, RPN_PATTERN
- bne- cr0, 2f
-#else
- li r11, RPN_PATTERN
+ rlwinm r11, r10, 32-5, _PAGE_PRESENT
+ and r11, r11, r10
+ rlwimi r10, r11, 0, _PAGE_PRESENT
#endif
+ li r11, RPN_PATTERN
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 21 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
@@ -366,21 +359,6 @@ InstructionTLBMiss:
mfspr r10, SPRN_SPRG_SCRATCH2
EXCEPTION_EPILOG_0
rfi
-2:
- mfspr r10, SPRN_SRR1
- /* clear all error bits as TLB Miss
- * sets a few unconditionally
- */
- rlwinm r10, r10, 0, 0xffff
- mtspr SPRN_SRR1, r10
-
- /* Restore registers */
-#ifdef CONFIG_8xx_CPU6
- mfspr r3, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
-#endif
- mfspr r10, SPRN_SPRG_SCRATCH2
- b InstructionTLBError1
. = 0x1200
DataStoreTLBMiss:
@@ -395,20 +373,16 @@ DataStoreTLBMiss:
* kernel page tables.
*/
andis. r11, r10, 0x8000
- mfspr r11, SPRN_M_TW /* Get level 1 table base address */
+ mfspr r11, SPRN_M_TW /* Get level 1 table */
beq 3f
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
- ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
- /* Extract level 1 index */
- rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
- lwzx r11, r10, r11 /* Get the level 1 entry */
- rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
- beq 2f /* If zero, don't try to find a pte */
+ /* Insert level 1 index */
+ rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
+ lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
/* We have a pte table, so load fetch the pte from the table.
*/
- mfspr r10, SPRN_MD_EPN /* Get address of fault */
/* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
@@ -441,16 +415,13 @@ DataStoreTLBMiss:
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
#endif
- /* invert RW */
- xori r10, r10, _PAGE_RW
-
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 22 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
-2: li r11, RPN_PATTERN
+ li r11, RPN_PATTERN
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
@@ -469,10 +440,7 @@ DataStoreTLBMiss:
*/
. = 0x1300
InstructionTLBError:
- EXCEPTION_PROLOG_0
-InstructionTLBError1:
- EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2
+ EXCEPTION_PROLOG
mr r4,r12
mr r5,r9
andis. r10,r5,0x4000
@@ -532,30 +500,21 @@ DARFixed:/* Return from dcbx instruction bug workaround */
/* define if you don't want to use self modifying code */
#define NO_SELF_MODIFYING_CODE
FixupDAR:/* Entry point for dcbx workaround. */
-#ifdef CONFIG_8xx_CPU6
- mtspr SPRN_DAR, r3
-#endif
mtspr SPRN_SPRG_SCRATCH2, r10
/* fetch instruction from memory. */
mfspr r10, SPRN_SRR0
andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
- mfspr r11, SPRN_M_TW /* Get level 1 table base address */
- beq- 3f /* Branch if user space */
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
- ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
- /* Extract level 1 index */
-3: rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
- lwzx r11, r10, r11 /* Get the level 1 entry */
- rlwinm r10, r11,0,0,19 /* Extract page descriptor page address */
- mfspr r11, SPRN_SRR0 /* Get effective address of fault */
- /* Extract level 2 index */
- rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
- lwzx r11, r10, r11 /* Get the pte */
-#ifdef CONFIG_8xx_CPU6
- mfspr r3, SPRN_DAR
-#endif
+ mfspr r11, SPRN_M_TW /* Get level 1 table */
+ beq 3f
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
+ /* Insert level 1 index */
+3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
+ lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
+ rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
+ /* Insert level 2 index */
+ rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+ lwz r11, 0(r11) /* Get the pte */
/* concat physical page address(r11) and page offset(r10) */
- mfspr r10, SPRN_SRR0
rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
lwz r11,0(r11)
/* Check if it really is a dcbx instruction. */
@@ -705,8 +664,7 @@ start_here:
* init's THREAD like the context switch code does, but this is
* easier......until someone changes init's static structures.
*/
- lis r6, swapper_pg_dir@h
- ori r6, r6, swapper_pg_dir@l
+ lis r6, swapper_pg_dir@ha
tophys(r6,r6)
#ifdef CONFIG_8xx_CPU6
lis r4, cpu6_errata_word@h
@@ -885,23 +843,28 @@ _GLOBAL(set_context)
stw r4, 0x4(r5)
#endif
+ /* Register M_TW will contain base address of level 1 table minus the
+ * lower part of the kernel PGDIR base address, so that all accesses to
+ * level 1 table are done relative to lower part of kernel PGDIR base
+ * address.
+ */
+ li r5, (swapper_pg_dir-PAGE_OFFSET)@l
+ sub r4, r4, r5
+ tophys (r4, r4)
#ifdef CONFIG_8xx_CPU6
lis r6, cpu6_errata_word@h
ori r6, r6, cpu6_errata_word@l
- tophys (r4, r4)
li r7, 0x3f80
stw r7, 12(r6)
lwz r7, 12(r6)
- mtspr SPRN_M_TW, r4 /* Update MMU base address */
+#endif
+ mtspr SPRN_M_TW, r4 /* Update pointeur to level 1 table */
+#ifdef CONFIG_8xx_CPU6
li r7, 0x3380
stw r7, 12(r6)
lwz r7, 12(r6)
- mtspr SPRN_M_CASID, r3 /* Update context */
-#else
- mtspr SPRN_M_CASID,r3 /* Update context */
- tophys (r4, r4)
- mtspr SPRN_M_TW, r4 /* and pgd */
#endif
+ mtspr SPRN_M_CASID, r3 /* Update context */
SYNC
blr
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index d6e195e8cd4c..5a23b69f8129 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -115,6 +115,14 @@ static struct slb_shadow * __init init_slb_shadow(int cpu)
{
struct slb_shadow *s = &slb_shadow[cpu];
+ /*
+ * When we come through here to initialise boot_paca, the slb_shadow
+ * buffers are not allocated yet. That's OK, we'll get one later in
+ * boot, but make sure we don't corrupt memory at 0.
+ */
+ if (!slb_shadow)
+ return NULL;
+
s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
s->buffer_length = cpu_to_be32(sizeof(*s));
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 37d512d35943..2a525c938158 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1184,6 +1184,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+ struct pci_dev *dev = bus->self;
+
if (request_resource(pr, res) == 0)
continue;
/*
@@ -1193,6 +1195,11 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
*/
if (reparent_resources(pr, res) == 0)
continue;
+
+ if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+ pci_claim_bridge_resource(dev,
+ i + PCI_BRIDGE_RESOURCES) == 0)
+ continue;
}
pr_warning("PCI: Cannot allocate resource region "
"%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1401,7 +1408,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 1f61fab59d9b..83df3075d3df 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -147,10 +147,8 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
/* PHB nodes themselves must not match */
update_dn_pci_info(dn, phb);
pdn = dn->data;
- if (pdn) {
+ if (pdn)
pdn->devfn = pdn->busno = -1;
- pdn->phb = phb;
- }
/* Update dn->phb ptrs for new phb and children devices */
traverse_pci_devices(dn, update_dn_pci_info, phb);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6a799b3cc6b4..b8e15c678960 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -652,9 +652,6 @@ void __init early_init_devtree(void *params)
if (!early_init_dt_verify(params))
panic("BUG: Failed verifying flat device tree, bad version?");
- /* Setup flat device-tree pointer */
- initial_boot_params = params;
-
#ifdef CONFIG_PPC_RTAS
/* Some machines might need RTAS info for debugging, grab it now. */
of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 4af905e81ab0..21c45a2d0706 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -897,7 +897,7 @@ int rtas_offline_cpus_mask(cpumask_var_t cpus)
}
EXPORT_SYMBOL(rtas_offline_cpus_mask);
-int rtas_ibm_suspend_me(struct rtas_args *args)
+int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
{
long state;
long rc;
@@ -911,8 +911,7 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
return -ENOSYS;
/* Make sure the state is valid */
- rc = plpar_hcall(H_VASI_STATE, retbuf,
- ((u64)args->args[0] << 32) | args->args[1]);
+ rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
state = retbuf[0];
@@ -920,12 +919,12 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
return rc;
} else if (state == H_VASI_ENABLED) {
- args->args[args->nargs] = RTAS_NOT_SUSPENDABLE;
+ *vasi_return = RTAS_NOT_SUSPENDABLE;
return 0;
} else if (state != H_VASI_SUSPENDING) {
printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
state);
- args->args[args->nargs] = -1;
+ *vasi_return = -1;
return 0;
}
@@ -973,7 +972,7 @@ out:
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
-int rtas_ibm_suspend_me(struct rtas_args *args)
+int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
{
return -ENOSYS;
}
@@ -1053,7 +1052,16 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
/* Need to handle ibm,suspend_me call specially */
if (token == ibm_suspend_me_token) {
- rc = rtas_ibm_suspend_me(&args);
+
+ /*
+ * rtas_ibm_suspend_me assumes args are in cpu endian, or at least the
+ * hcall within it requires it.
+ */
+ int vasi_rc = 0;
+ u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
+ | be32_to_cpu(args.args[1]);
+ rc = rtas_ibm_suspend_me(handle, &vasi_rc);
+ args.rets[0] = cpu_to_be32(vasi_rc);
if (rc)
return rc;
goto copy_return;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index b171001698ff..d3a831ac0f92 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1231,7 +1231,7 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
int tm_restore = 0;
#endif
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
rt_sf = (struct rt_sigframe __user *)
(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
@@ -1504,7 +1504,7 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
#endif
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
sc = &sf->sctx;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2cb0c94cafa5..c7c24d2e2bdb 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -666,7 +666,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
#endif
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(VERIFY_READ, uc, sizeof(*uc)))
goto badframe;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8b2d2dc8ef10..6e19afa35a15 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -434,20 +434,6 @@ void generic_cpu_die(unsigned int cpu)
printk(KERN_ERR "CPU%d didn't die...\n", cpu);
}
-void generic_mach_cpu_die(void)
-{
- unsigned int cpu;
-
- local_irq_disable();
- idle_task_exit();
- cpu = smp_processor_id();
- printk(KERN_DEBUG "CPU%d offline\n", cpu);
- __this_cpu_write(cpu_state, CPU_DEAD);
- smp_wmb();
- while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)
- cpu_relax();
-}
-
void generic_set_cpu_dead(unsigned int cpu)
{
per_cpu(cpu_state, cpu) = CPU_DEAD;
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index cd9be9aa016d..b2702e87db0d 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -121,17 +121,3 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
(u64)len_high << 32 | len_low, advice);
}
-
-void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7, unsigned long r8,
- struct pt_regs *regs)
-{
- printk("syscall %ld(%lx, %lx, %lx, %lx, %lx, %lx) regs=%p current=%p"
- " cpu=%d\n", regs->gpr[0], r3, r4, r5, r6, r7, r8, regs,
- current, smp_processor_id());
-}
-
-void do_show_syscall_exit(unsigned long r3)
-{
- printk(" -> %lx, current=%p cpu=%d\n", r3, current, smp_processor_id());
-}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index fa7c4f12104f..7316dd15278a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -621,6 +621,38 @@ unsigned long long sched_clock(void)
return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
}
+
+#ifdef CONFIG_PPC_PSERIES
+
+/*
+ * Running clock - attempts to give a view of time passing for a virtualised
+ * kernels.
+ * Uses the VTB register if available otherwise a next best guess.
+ */
+unsigned long long running_clock(void)
+{
+ /*
+ * Don't read the VTB as a host since KVM does not switch in host
+ * timebase into the VTB when it takes a guest off the CPU, reading the
+ * VTB would result in reading 'last switched out' guest VTB.
+ *
+ * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
+ * would be unsafe to rely only on the #ifdef above.
+ */
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S))
+ return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
+
+ /*
+ * This is a next best approximation without a VTB.
+ * On a host which is running bare metal there should never be any stolen
+ * time and on a host which doesn't do any virtualisation TB *should* equal
+ * VTB so it makes no difference anyway.
+ */
+ return local_clock() - cputime_to_nsecs(kcpustat_this_cpu->cpustat[CPUTIME_STEAL]);
+}
+#endif
+
static int __init get_freq(char *name, int cells, unsigned long *val)
{
struct device_node *cpu;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index e6595b72269b..19e4744b6eba 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1707,21 +1707,6 @@ void altivec_assist_exception(struct pt_regs *regs)
}
#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-void vsx_assist_exception(struct pt_regs *regs)
-{
- if (!user_mode(regs)) {
- printk(KERN_EMERG "VSX assist exception in kernel mode"
- " at %lx\n", regs->nip);
- die("Kernel VSX assist exception", regs, SIGILL);
- }
-
- flush_vsx_to_thread(current);
- printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-}
-#endif /* CONFIG_VSX */
-
#ifdef CONFIG_FSL_BOOKE
void CacheLockingException(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index f5769f19ae25..11850f310fb4 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_EVENTFD
+ select SRCU
config KVM_BOOK3S_HANDLER
bool
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 888bf466d8c6..cfbcdc654201 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -52,6 +52,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "queue_intr", VCPU_STAT(queue_intr) },
+ { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "pf_storage", VCPU_STAT(pf_storage) },
{ "sp_storage", VCPU_STAT(sp_storage) },
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 36540a99d178..0fdc4a28970b 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -93,15 +93,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r5, SPRN_MMCR1
mfspr r9, SPRN_SIAR
mfspr r10, SPRN_SDAR
- std r7, HSTATE_MMCR(r13)
- std r5, HSTATE_MMCR + 8(r13)
- std r6, HSTATE_MMCR + 16(r13)
- std r9, HSTATE_MMCR + 24(r13)
- std r10, HSTATE_MMCR + 32(r13)
+ std r7, HSTATE_MMCR0(r13)
+ std r5, HSTATE_MMCR1(r13)
+ std r6, HSTATE_MMCRA(r13)
+ std r9, HSTATE_SIAR(r13)
+ std r10, HSTATE_SDAR(r13)
BEGIN_FTR_SECTION
mfspr r9, SPRN_SIER
- std r8, HSTATE_MMCR + 40(r13)
- std r9, HSTATE_MMCR + 48(r13)
+ std r8, HSTATE_MMCR2(r13)
+ std r9, HSTATE_SIER(r13)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r3, SPRN_PMC1
mfspr r5, SPRN_PMC2
@@ -109,12 +109,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r7, SPRN_PMC4
mfspr r8, SPRN_PMC5
mfspr r9, SPRN_PMC6
- stw r3, HSTATE_PMC(r13)
- stw r5, HSTATE_PMC + 4(r13)
- stw r6, HSTATE_PMC + 8(r13)
- stw r7, HSTATE_PMC + 12(r13)
- stw r8, HSTATE_PMC + 16(r13)
- stw r9, HSTATE_PMC + 20(r13)
+ stw r3, HSTATE_PMC1(r13)
+ stw r5, HSTATE_PMC2(r13)
+ stw r6, HSTATE_PMC3(r13)
+ stw r7, HSTATE_PMC4(r13)
+ stw r8, HSTATE_PMC5(r13)
+ stw r9, HSTATE_PMC6(r13)
31:
/*
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 510bdfbc4073..625407e4d3b0 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -212,7 +212,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Look up the Linux PTE for the backing page */
pte_size = psize;
pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
- if (pte_present(pte) && !pte_numa(pte)) {
+ if (pte_present(pte) && !pte_protnone(pte)) {
if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 7b066f6b02ad..7c22997de906 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -152,7 +152,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* in virtual mode.
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
/* Down_CPPR */
new_state.cppr = new_cppr;
@@ -211,7 +211,7 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
* pending priority
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
if (!old_state.xisr)
@@ -277,7 +277,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
* whenever the MFRR is made less favored.
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
/* Set_MFRR */
new_state.mfrr = mfrr;
@@ -352,7 +352,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
icp_rm_clr_vcpu_irq(icp->vcpu);
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
reject = 0;
new_state.cppr = cppr;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 10554df13852..bb94e6f20c81 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -83,35 +83,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
cmpwi r4, 0
beq 23f /* skip if not */
BEGIN_FTR_SECTION
- ld r3, HSTATE_MMCR(r13)
+ ld r3, HSTATE_MMCR0(r13)
andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
cmpwi r4, MMCR0_PMAO
beql kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
- lwz r3, HSTATE_PMC(r13)
- lwz r4, HSTATE_PMC + 4(r13)
- lwz r5, HSTATE_PMC + 8(r13)
- lwz r6, HSTATE_PMC + 12(r13)
- lwz r8, HSTATE_PMC + 16(r13)
- lwz r9, HSTATE_PMC + 20(r13)
+ lwz r3, HSTATE_PMC1(r13)
+ lwz r4, HSTATE_PMC2(r13)
+ lwz r5, HSTATE_PMC3(r13)
+ lwz r6, HSTATE_PMC4(r13)
+ lwz r8, HSTATE_PMC5(r13)
+ lwz r9, HSTATE_PMC6(r13)
mtspr SPRN_PMC1, r3
mtspr SPRN_PMC2, r4
mtspr SPRN_PMC3, r5
mtspr SPRN_PMC4, r6
mtspr SPRN_PMC5, r8
mtspr SPRN_PMC6, r9
- ld r3, HSTATE_MMCR(r13)
- ld r4, HSTATE_MMCR + 8(r13)
- ld r5, HSTATE_MMCR + 16(r13)
- ld r6, HSTATE_MMCR + 24(r13)
- ld r7, HSTATE_MMCR + 32(r13)
+ ld r3, HSTATE_MMCR0(r13)
+ ld r4, HSTATE_MMCR1(r13)
+ ld r5, HSTATE_MMCRA(r13)
+ ld r6, HSTATE_SIAR(r13)
+ ld r7, HSTATE_SDAR(r13)
mtspr SPRN_MMCR1, r4
mtspr SPRN_MMCRA, r5
mtspr SPRN_SIAR, r6
mtspr SPRN_SDAR, r7
BEGIN_FTR_SECTION
- ld r8, HSTATE_MMCR + 40(r13)
- ld r9, HSTATE_MMCR + 48(r13)
+ ld r8, HSTATE_MMCR2(r13)
+ ld r9, HSTATE_SIER(r13)
mtspr SPRN_MMCR2, r8
mtspr SPRN_SIER, r9
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 807351f76f84..a4a8d9f0dcb7 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -327,7 +327,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
icp->server_num);
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
*reject = 0;
@@ -512,7 +512,7 @@ static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* in virtual mode.
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
/* Down_CPPR */
new_state.cppr = new_cppr;
@@ -567,7 +567,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
* pending priority
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
if (!old_state.xisr)
@@ -634,7 +634,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
* whenever the MFRR is made less favored.
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
/* Set_MFRR */
new_state.mfrr = mfrr;
@@ -679,7 +679,7 @@ static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
if (!icp)
return H_PARAMETER;
}
- state = ACCESS_ONCE(icp->state);
+ state = READ_ONCE(icp->state);
kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
kvmppc_set_gpr(vcpu, 5, state.mfrr);
return H_SUCCESS;
@@ -721,7 +721,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
reject = 0;
new_state.cppr = cppr;
@@ -885,7 +885,7 @@ static int xics_debug_show(struct seq_file *m, void *private)
if (!icp)
continue;
- state.raw = ACCESS_ONCE(icp->state.raw);
+ state.raw = READ_ONCE(icp->state.raw);
seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
icp->server_num, state.xisr,
state.pending_pri, state.cppr, state.mfrr,
@@ -1082,7 +1082,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
* the ICS states before the ICP states.
*/
do {
- old_state = ACCESS_ONCE(icp->state);
+ old_state = READ_ONCE(icp->state);
if (new_state.mfrr <= old_state.mfrr) {
resend = false;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 9b55dec2d6cc..6c1316a15a27 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "inst_emu", VCPU_STAT(emulated_inst_exits) },
{ "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
+ { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "doorbell", VCPU_STAT(dbell_exits) },
{ "guest doorbell", VCPU_STAT(gdbell_exits) },
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index c45eaab752b0..27c0face86f4 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -623,9 +623,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
return vcpu;
}
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
- return 0;
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 597562f69b2d..7902802a19a5 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -9,33 +9,30 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_code-patching.o = -pg
CFLAGS_REMOVE_feature-fixups.o = -pg
-obj-y := string.o alloc.o \
- crtsavres.o ppc_ksyms.o
+obj-y += string.o alloc.o crtsavres.o ppc_ksyms.o code-patching.o \
+ feature-fixups.o
+
obj-$(CONFIG_PPC32) += div64.o copy_32.o
-obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
- usercopy_64.o mem_64.o string.o \
- hweight_64.o \
- copyuser_power7.o string_64.o copypage_power7.o
+obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
+ copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
+ memcpy_64.o memcmp_64.o
+
+obj64-$(CONFIG_SMP) += locks.o
+obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
+
ifeq ($(CONFIG_GENERIC_CSUM),)
obj-y += checksum_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC64) += checksum_wrappers_64.o
endif
-obj-$(CONFIG_PPC64) += memcpy_power7.o memcpy_64.o
-
obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
-ifeq ($(CONFIG_PPC64),y)
-obj-$(CONFIG_SMP) += locks.o
-obj-$(CONFIG_ALTIVEC) += vmx-helper.o
-endif
-
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
-obj-y += code-patching.o
-obj-y += feature-fixups.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
obj-$(CONFIG_ALTIVEC) += xor_vmx.o
CFLAGS_xor_vmx.o += -maltivec -mabi=altivec
+
+obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
new file mode 100644
index 000000000000..8953d2382a65
--- /dev/null
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -0,0 +1,233 @@
+/*
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ * Copyright 2015 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ppc_asm.h>
+
+#define off8 r6
+#define off16 r7
+#define off24 r8
+
+#define rA r9
+#define rB r10
+#define rC r11
+#define rD r27
+#define rE r28
+#define rF r29
+#define rG r30
+#define rH r31
+
+#ifdef __LITTLE_ENDIAN__
+#define LD ldbrx
+#else
+#define LD ldx
+#endif
+
+_GLOBAL(memcmp)
+ cmpdi cr1,r5,0
+
+ /* Use the short loop if both strings are not 8B aligned */
+ or r6,r3,r4
+ andi. r6,r6,7
+
+ /* Use the short loop if length is less than 32B */
+ cmpdi cr6,r5,31
+
+ beq cr1,.Lzero
+ bne .Lshort
+ bgt cr6,.Llong
+
+.Lshort:
+ mtctr r5
+
+1: lbz rA,0(r3)
+ lbz rB,0(r4)
+ subf. rC,rB,rA
+ bne .Lnon_zero
+ bdz .Lzero
+
+ lbz rA,1(r3)
+ lbz rB,1(r4)
+ subf. rC,rB,rA
+ bne .Lnon_zero
+ bdz .Lzero
+
+ lbz rA,2(r3)
+ lbz rB,2(r4)
+ subf. rC,rB,rA
+ bne .Lnon_zero
+ bdz .Lzero
+
+ lbz rA,3(r3)
+ lbz rB,3(r4)
+ subf. rC,rB,rA
+ bne .Lnon_zero
+
+ addi r3,r3,4
+ addi r4,r4,4
+
+ bdnz 1b
+
+.Lzero:
+ li r3,0
+ blr
+
+.Lnon_zero:
+ mr r3,rC
+ blr
+
+.Llong:
+ li off8,8
+ li off16,16
+ li off24,24
+
+ std r31,-8(r1)
+ std r30,-16(r1)
+ std r29,-24(r1)
+ std r28,-32(r1)
+ std r27,-40(r1)
+
+ srdi r0,r5,5
+ mtctr r0
+ andi. r5,r5,31
+
+ LD rA,0,r3
+ LD rB,0,r4
+
+ LD rC,off8,r3
+ LD rD,off8,r4
+
+ LD rE,off16,r3
+ LD rF,off16,r4
+
+ LD rG,off24,r3
+ LD rH,off24,r4
+ cmpld cr0,rA,rB
+
+ addi r3,r3,32
+ addi r4,r4,32
+
+ bdz .Lfirst32
+
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr1,rC,rD
+
+ LD rC,off8,r3
+ LD rD,off8,r4
+ cmpld cr6,rE,rF
+
+ LD rE,off16,r3
+ LD rF,off16,r4
+ cmpld cr7,rG,rH
+ bne cr0,.LcmpAB
+
+ LD rG,off24,r3
+ LD rH,off24,r4
+ cmpld cr0,rA,rB
+ bne cr1,.LcmpCD
+
+ addi r3,r3,32
+ addi r4,r4,32
+
+ bdz .Lsecond32
+
+ .balign 16
+
+1: LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr1,rC,rD
+ bne cr6,.LcmpEF
+
+ LD rC,off8,r3
+ LD rD,off8,r4
+ cmpld cr6,rE,rF
+ bne cr7,.LcmpGH
+
+ LD rE,off16,r3
+ LD rF,off16,r4
+ cmpld cr7,rG,rH
+ bne cr0,.LcmpAB
+
+ LD rG,off24,r3
+ LD rH,off24,r4
+ cmpld cr0,rA,rB
+ bne cr1,.LcmpCD
+
+ addi r3,r3,32
+ addi r4,r4,32
+
+ bdnz 1b
+
+.Lsecond32:
+ cmpld cr1,rC,rD
+ bne cr6,.LcmpEF
+
+ cmpld cr6,rE,rF
+ bne cr7,.LcmpGH
+
+ cmpld cr7,rG,rH
+ bne cr0,.LcmpAB
+
+ bne cr1,.LcmpCD
+ bne cr6,.LcmpEF
+ bne cr7,.LcmpGH
+
+.Ltail:
+ ld r31,-8(r1)
+ ld r30,-16(r1)
+ ld r29,-24(r1)
+ ld r28,-32(r1)
+ ld r27,-40(r1)
+
+ cmpdi r5,0
+ beq .Lzero
+ b .Lshort
+
+.Lfirst32:
+ cmpld cr1,rC,rD
+ cmpld cr6,rE,rF
+ cmpld cr7,rG,rH
+
+ bne cr0,.LcmpAB
+ bne cr1,.LcmpCD
+ bne cr6,.LcmpEF
+ bne cr7,.LcmpGH
+
+ b .Ltail
+
+.LcmpAB:
+ li r3,1
+ bgt cr0,.Lout
+ li r3,-1
+ b .Lout
+
+.LcmpCD:
+ li r3,1
+ bgt cr1,.Lout
+ li r3,-1
+ b .Lout
+
+.LcmpEF:
+ li r3,1
+ bgt cr6,.Lout
+ li r3,-1
+ b .Lout
+
+.LcmpGH:
+ li r3,1
+ bgt cr7,.Lout
+ li r3,-1
+
+.Lout:
+ ld r31,-8(r1)
+ ld r30,-16(r1)
+ ld r29,-24(r1)
+ ld r28,-32(r1)
+ ld r27,-40(r1)
+ blr
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 1b5a0a09d609..c80fb49ce607 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -93,6 +93,7 @@ _GLOBAL(strlen)
subf r3,r3,r4
blr
+#ifdef CONFIG_PPC32
_GLOBAL(memcmp)
PPC_LCMPI 0,r5,0
beq- 2f
@@ -106,6 +107,7 @@ _GLOBAL(memcmp)
blr
2: li r3,0
blr
+#endif
_GLOBAL(memchr)
PPC_LCMPI 0,r5,0
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 5a236f082c78..f031a47d7701 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -64,10 +64,14 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock;
} else {
- if (dsisr & DSISR_PROTFAULT)
- goto out_unlock;
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out_unlock;
+ /*
+ * protfault should only happen due to us
+ * mapping a region readonly temporarily. PROT_NONE
+ * is also covered by the VMA check above.
+ */
+ WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
}
ret = 0;
@@ -76,7 +80,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
goto out_unlock;
- } else if (*flt & VM_FAULT_SIGBUS) {
+ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
ret = -EFAULT;
goto out_unlock;
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index eb79907f34fa..b396868d2aa7 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -389,19 +389,6 @@ good_area:
#endif /* CONFIG_8xx */
if (is_exec) {
-#ifdef CONFIG_PPC_STD_MMU
- /* Protection fault on exec go straight to failure on
- * Hash based MMUs as they either don't support per-page
- * execute permission, or if they do, it's handled already
- * at the hash level. This test would probably have to
- * be removed if we change the way this works to make hash
- * processors use the same I/D cache coherency mechanism
- * as embedded.
- */
- if (error_code & DSISR_PROTFAULT)
- goto bad_area;
-#endif /* CONFIG_PPC_STD_MMU */
-
/*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
@@ -416,6 +403,14 @@ good_area:
(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
+#ifdef CONFIG_PPC_STD_MMU
+ /*
+ * protfault should only happen due to us
+ * mapping a region readonly temporarily. PROT_NONE
+ * is also covered by the VMA check above.
+ */
+ WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
+#endif /* CONFIG_PPC_STD_MMU */
/* a write */
} else if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
@@ -423,11 +418,9 @@ good_area:
flags |= FAULT_FLAG_WRITE;
/* a read */
} else {
- /* protection fault */
- if (error_code & 0x08000000)
- goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
+ WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
}
/*
@@ -437,6 +430,8 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+ if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN)
goto bail;
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 94cd728166d3..b46912fee7cd 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -67,8 +67,6 @@ struct tlbcamrange {
phys_addr_t phys;
} tlbcam_addrs[NUM_TLBCAMS];
-extern unsigned int tlbcam_index;
-
unsigned long tlbcam_sz(int idx)
{
return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5ff4e07d920a..7e408bfc7948 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -714,6 +714,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL;
}
+struct page *
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int write)
+{
+ BUG();
+ return NULL;
+}
+
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz)
{
@@ -978,7 +986,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*/
pdshift = PUD_SHIFT;
pudp = pud_offset(&pgd, ea);
- pud = ACCESS_ONCE(*pudp);
+ pud = READ_ONCE(*pudp);
if (pud_none(pud))
return NULL;
@@ -990,7 +998,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
else {
pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea);
- pmd = ACCESS_ONCE(*pmdp);
+ pmd = READ_ONCE(*pmdp);
/*
* A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate.
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 9cba6cba2e50..986afbc22c76 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -52,12 +52,15 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include "mmu_decl.h"
+
static unsigned int first_context, last_context;
static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
static DEFINE_RAW_SPINLOCK(context_lock);
+static bool no_selective_tlbil;
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -133,6 +136,38 @@ static unsigned int steal_context_smp(unsigned int id)
}
#endif /* CONFIG_SMP */
+static unsigned int steal_all_contexts(void)
+{
+ struct mm_struct *mm;
+ int cpu = smp_processor_id();
+ unsigned int id;
+
+ for (id = first_context; id <= last_context; id++) {
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ pr_hardcont(" | steal %d from 0x%p", id, mm);
+
+ /* Mark this mm as having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+ if (id != first_context) {
+ context_mm[id] = NULL;
+ __clear_bit(id, context_map);
+#ifdef DEBUG_MAP_CONSISTENCY
+ mm->context.active = 0;
+#endif
+ }
+ __clear_bit(id, stale_map[cpu]);
+ }
+
+ /* Flush the TLB for all contexts (not to be used on SMP) */
+ _tlbil_all();
+
+ nr_free_contexts = last_context - first_context;
+
+ return first_context;
+}
+
/* Note that this will also be called on SMP if all other CPUs are
* offlined, which means that it may be called for cpu != 0. For
* this to work, we somewhat assume that CPUs that are onlined
@@ -241,7 +276,10 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
goto stolen;
}
#endif /* CONFIG_SMP */
- id = steal_context_up(id);
+ if (no_selective_tlbil)
+ id = steal_all_contexts();
+ else
+ id = steal_context_up(id);
goto stolen;
}
nr_free_contexts--;
@@ -407,12 +445,15 @@ void __init mmu_context_init(void)
if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
first_context = 0;
last_context = 15;
+ no_selective_tlbil = true;
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;
last_context = 65535;
+ no_selective_tlbil = false;
} else {
first_context = 1;
last_context = 255;
+ no_selective_tlbil = false;
}
#ifdef DEBUG_CLAMP_LAST_CONTEXT
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index c90e602677c9..83dfcb55ffef 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -172,9 +172,14 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte)
{
-#ifdef CONFIG_DEBUG_VM
- WARN_ON(pte_val(*ptep) & _PAGE_PRESENT);
-#endif
+ /*
+ * When handling numa faults, we already have the pte marked
+ * _PAGE_PRESENT, but we can be sure that it is not in hpte.
+ * Hence we can use set_pte_at for them.
+ */
+ VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) ==
+ (_PAGE_PRESENT | _PAGE_USER));
+
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
* is called.
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 50fad3801f30..03b1a3b0fbd5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -63,7 +63,6 @@ void setbat(int index, unsigned long virt, phys_addr_t phys,
#endif /* HAVE_BATS */
#ifdef HAVE_TLBCAM
-extern unsigned int tlbcam_index;
extern phys_addr_t v_mapped_by_tlbcam(unsigned long va);
extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
#else /* !HAVE_TLBCAM */
@@ -73,13 +72,25 @@ extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
+#ifndef CONFIG_PPC_4K_PAGES
+static struct kmem_cache *pgtable_cache;
+
+void pgtable_cache_init(void)
+{
+ pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER,
+ 1 << PGDIR_ORDER, 0, NULL);
+ if (pgtable_cache == NULL)
+ panic("Couldn't allocate pgtable caches");
+}
+#endif
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
/* pgdir take page or two with 4K pages and a page fraction otherwise */
#ifndef CONFIG_PPC_4K_PAGES
- ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+ ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO);
#else
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
PGDIR_ORDER - PAGE_SHIFT);
@@ -90,7 +101,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifndef CONFIG_PPC_4K_PAGES
- kfree((void *)pgd);
+ kmem_cache_free(pgtable_cache, (void *)pgd);
#else
free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
#endif
@@ -147,7 +158,7 @@ void __iomem *
ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{
/* writeable implies dirty for kernel addresses */
- if (flags & _PAGE_RW)
+ if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 4fe5f64cc179..6957cc1ca0a7 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -718,7 +718,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_DEBUG_VM
- WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT);
+ WARN_ON((pmd_val(*pmdp) & (_PAGE_PRESENT | _PAGE_USER)) ==
+ (_PAGE_PRESENT | _PAGE_USER));
assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd));
#endif
@@ -781,7 +782,7 @@ pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
{
pmd_t pmd;
/*
- * For a valid pte, we would have _PAGE_PRESENT or _PAGE_FILE always
+ * For a valid pte, we would have _PAGE_PRESENT always
* set. We use this to check THP page at pmd level.
* leaf pte for huge page, bottom two bits != 00
*/
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index ded0ea1afde4..0f432a702870 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -645,35 +645,6 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
spin_unlock_irqrestore(&slice_convert_lock, flags);
}
-void slice_set_psize(struct mm_struct *mm, unsigned long address,
- unsigned int psize)
-{
- unsigned char *hpsizes;
- unsigned long i, flags;
- u64 *lpsizes;
-
- spin_lock_irqsave(&slice_convert_lock, flags);
- if (address < SLICE_LOW_TOP) {
- i = GET_LOW_SLICE_INDEX(address);
- lpsizes = &mm->context.low_slices_psize;
- *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
- ((unsigned long) psize << (i * 4));
- } else {
- int index, mask_index;
- i = GET_HIGH_SLICE_INDEX(address);
- hpsizes = mm->context.high_slices_psize;
- mask_index = i & 0x1;
- index = i >> 1;
- hpsizes[index] = (hpsizes[index] &
- ~(0xf << (mask_index * 4))) |
- (((unsigned long)psize) << (mask_index * 4));
- }
-
- spin_unlock_irqrestore(&slice_convert_lock, flags);
-
- copro_flush_all_slbs(mm);
-}
-
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
{
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 6c0b1f5f8d2c..fa9fb5b4c66c 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -134,7 +134,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
- struct vm_area_struct *vma = walk->private;
+ struct vm_area_struct *vma = walk->vma;
split_huge_page_pmd(vma, addr, pmd);
return 0;
}
@@ -163,9 +163,7 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
if (vma->vm_start >= (addr + len))
break;
vma->vm_flags |= VM_NOHUGEPAGE;
- subpage_proto_walk.private = vma;
- walk_page_range(vma->vm_start, vma->vm_end,
- &subpage_proto_walk);
+ walk_page_vma(vma, &subpage_proto_walk);
vma = vma->vm_next;
}
}
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index f38ea4df6a85..cbd3d069897f 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -284,8 +284,15 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
struct cpumask *cpu_mask;
unsigned int pid;
+ /*
+ * This function as well as __local_flush_tlb_page() must only be called
+ * for user contexts.
+ */
+ if (unlikely(WARN_ON(!mm)))
+ return;
+
preempt_disable();
- pid = mm ? mm->context.id : 0;
+ pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
cpu_mask = mm_cpumask(mm);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 1ca125b9c226..d1916b577f2c 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -699,7 +699,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 4acaea01fe03..5d747b4cb8ee 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -330,9 +330,11 @@ static int fsl_emb_pmu_add(struct perf_event *event, int flags)
}
local64_set(&event->hw.prev_count, val);
- if (!(flags & PERF_EF_START)) {
+ if (unlikely(!(flags & PERF_EF_START))) {
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
val = 0;
+ } else {
+ event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE);
}
write_pmc(i, val);
@@ -389,6 +391,7 @@ static void fsl_emb_pmu_del(struct perf_event *event, int flags)
static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
+ unsigned long val;
s64 left;
if (event->hw.idx < 0 || !event->hw.sample_period)
@@ -405,7 +408,10 @@ static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
- write_pmc(event->hw.idx, left);
+ val = 0;
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+ write_pmc(event->hw.idx, val);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
diff --git a/arch/powerpc/perf/hv-24x7-catalog.h b/arch/powerpc/perf/hv-24x7-catalog.h
index 21b19dd86d9c..69e2e1faf902 100644
--- a/arch/powerpc/perf/hv-24x7-catalog.h
+++ b/arch/powerpc/perf/hv-24x7-catalog.h
@@ -30,4 +30,29 @@ struct hv_24x7_catalog_page_0 {
__u8 reserved6[2];
} __packed;
+struct hv_24x7_event_data {
+ __be16 length; /* in bytes, must be a multiple of 16 */
+ __u8 reserved1[2];
+ __u8 domain; /* Chip = 1, Core = 2 */
+ __u8 reserved2[1];
+ __be16 event_group_record_offs; /* in bytes, must be 8 byte aligned */
+ __be16 event_group_record_len; /* in bytes */
+
+ /* in bytes, offset from event_group_record */
+ __be16 event_counter_offs;
+
+ /* verified_state, unverified_state, caveat_state, broken_state, ... */
+ __be32 flags;
+
+ __be16 primary_group_ix;
+ __be16 group_count;
+ __be16 event_name_len;
+ __u8 remainder[];
+ /* __u8 event_name[event_name_len - 2]; */
+ /* __be16 event_description_len; */
+ /* __u8 event_desc[event_description_len - 2]; */
+ /* __be16 detailed_desc_len; */
+ /* __u8 detailed_desc[detailed_desc_len - 2]; */
+} __packed;
+
#endif
diff --git a/arch/powerpc/perf/hv-24x7-domains.h b/arch/powerpc/perf/hv-24x7-domains.h
new file mode 100644
index 000000000000..49c1efd50045
--- /dev/null
+++ b/arch/powerpc/perf/hv-24x7-domains.h
@@ -0,0 +1,28 @@
+
+/*
+ * DOMAIN(name, num, index_kind, is_physical)
+ *
+ * @name: An all caps token, suitable for use in generating an enum
+ * member and appending to an event name in sysfs.
+ *
+ * @num: The number corresponding to the domain as given in
+ * documentation. We assume the catalog domain and the hcall
+ * domain have the same numbering (so far they do), but this
+ * may need to be changed in the future.
+ *
+ * @index_kind: A stringifiable token describing the meaning of the index
+ * within the given domain. Must fit the parsing rules of the
+ * perf sysfs api.
+ *
+ * @is_physical: True if the domain is physical, false otherwise (if virtual).
+ *
+ * Note: The terms PHYS_CHIP, PHYS_CORE, VCPU correspond to physical chip,
+ * physical core and virtual processor in 24x7 Counters specifications.
+ */
+
+DOMAIN(PHYS_CHIP, 0x01, chip, true)
+DOMAIN(PHYS_CORE, 0x02, core, true)
+DOMAIN(VCPU_HOME_CORE, 0x03, vcpu, false)
+DOMAIN(VCPU_HOME_CHIP, 0x04, vcpu, false)
+DOMAIN(VCPU_HOME_NODE, 0x05, vcpu, false)
+DOMAIN(VCPU_REMOTE_NODE, 0x06, vcpu, false)
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index f162d0b8eea3..9445a824819e 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -13,16 +13,66 @@
#define pr_fmt(fmt) "hv-24x7: " fmt
#include <linux/perf_event.h>
+#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/io.h>
+#include <linux/byteorder/generic.h>
#include "hv-24x7.h"
#include "hv-24x7-catalog.h"
#include "hv-common.h"
+static const char *event_domain_suffix(unsigned domain)
+{
+ switch (domain) {
+#define DOMAIN(n, v, x, c) \
+ case HV_PERF_DOMAIN_##n: \
+ return "__" #n;
+#include "hv-24x7-domains.h"
+#undef DOMAIN
+ default:
+ WARN(1, "unknown domain %d\n", domain);
+ return "__UNKNOWN_DOMAIN_SUFFIX";
+ }
+}
+
+static bool domain_is_valid(unsigned domain)
+{
+ switch (domain) {
+#define DOMAIN(n, v, x, c) \
+ case HV_PERF_DOMAIN_##n: \
+ /* fall through */
+#include "hv-24x7-domains.h"
+#undef DOMAIN
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_physical_domain(unsigned domain)
+{
+ switch (domain) {
+#define DOMAIN(n, v, x, c) \
+ case HV_PERF_DOMAIN_##n: \
+ return c;
+#include "hv-24x7-domains.h"
+#undef DOMAIN
+ default:
+ return false;
+ }
+}
+
+static bool catalog_entry_domain_is_valid(unsigned domain)
+{
+ return is_physical_domain(domain);
+}
+
/*
* TODO: Merging events:
* - Think of the hcall as an interface to a 4d array of counters:
@@ -44,13 +94,14 @@
/*
* Example usage:
- * perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/'
+ * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
*/
/* u3 0-6, one of HV_24X7_PERF_DOMAIN */
EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
/* u16 */
-EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31);
+EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31);
+EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
/* u32, see "data_offset" */
EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
/* u16 */
@@ -63,7 +114,8 @@ EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
static struct attribute *format_attrs[] = {
&format_attr_domain.attr,
&format_attr_offset.attr,
- &format_attr_starting_index.attr,
+ &format_attr_core.attr,
+ &format_attr_vcpu.attr,
&format_attr_lpar.attr,
NULL,
};
@@ -73,8 +125,115 @@ static struct attribute_group format_group = {
.attrs = format_attrs,
};
+static struct attribute_group event_group = {
+ .name = "events",
+ /* .attrs is set in init */
+};
+
+static struct attribute_group event_desc_group = {
+ .name = "event_descs",
+ /* .attrs is set in init */
+};
+
+static struct attribute_group event_long_desc_group = {
+ .name = "event_long_descs",
+ /* .attrs is set in init */
+};
+
static struct kmem_cache *hv_page_cache;
+static char *event_name(struct hv_24x7_event_data *ev, int *len)
+{
+ *len = be16_to_cpu(ev->event_name_len) - 2;
+ return (char *)ev->remainder;
+}
+
+static char *event_desc(struct hv_24x7_event_data *ev, int *len)
+{
+ unsigned nl = be16_to_cpu(ev->event_name_len);
+ __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
+ *len = be16_to_cpu(*desc_len) - 2;
+ return (char *)ev->remainder + nl;
+}
+
+static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
+{
+ unsigned nl = be16_to_cpu(ev->event_name_len);
+ __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
+ unsigned desc_len = be16_to_cpu(*desc_len_);
+ __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
+ *len = be16_to_cpu(*long_desc_len) - 2;
+ return (char *)ev->remainder + nl + desc_len;
+}
+
+static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev,
+ void *end)
+{
+ void *start = ev;
+
+ return (start + offsetof(struct hv_24x7_event_data, remainder)) < end;
+}
+
+/*
+ * Things we don't check:
+ * - padding for desc, name, and long/detailed desc is required to be '\0'
+ * bytes.
+ *
+ * Return NULL if we pass end,
+ * Otherwise return the address of the byte just following the event.
+ */
+static void *event_end(struct hv_24x7_event_data *ev, void *end)
+{
+ void *start = ev;
+ __be16 *dl_, *ldl_;
+ unsigned dl, ldl;
+ unsigned nl = be16_to_cpu(ev->event_name_len);
+
+ if (nl < 2) {
+ pr_debug("%s: name length too short: %d", __func__, nl);
+ return NULL;
+ }
+
+ if (start + nl > end) {
+ pr_debug("%s: start=%p + nl=%u > end=%p",
+ __func__, start, nl, end);
+ return NULL;
+ }
+
+ dl_ = (__be16 *)(ev->remainder + nl - 2);
+ if (!IS_ALIGNED((uintptr_t)dl_, 2))
+ pr_warn("desc len not aligned %p", dl_);
+ dl = be16_to_cpu(*dl_);
+ if (dl < 2) {
+ pr_debug("%s: desc len too short: %d", __func__, dl);
+ return NULL;
+ }
+
+ if (start + nl + dl > end) {
+ pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p",
+ __func__, start, nl, dl, start + nl + dl, end);
+ return NULL;
+ }
+
+ ldl_ = (__be16 *)(ev->remainder + nl + dl - 2);
+ if (!IS_ALIGNED((uintptr_t)ldl_, 2))
+ pr_warn("long desc len not aligned %p", ldl_);
+ ldl = be16_to_cpu(*ldl_);
+ if (ldl < 2) {
+ pr_debug("%s: long desc len too short (ldl=%u)",
+ __func__, ldl);
+ return NULL;
+ }
+
+ if (start + nl + dl + ldl > end) {
+ pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p",
+ __func__, start, nl, dl, ldl, end);
+ return NULL;
+ }
+
+ return start + nl + dl + ldl;
+}
+
static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
unsigned long version,
unsigned long index)
@@ -97,6 +256,609 @@ static unsigned long h_get_24x7_catalog_page(char page[],
version, index);
}
+static unsigned core_domains[] = {
+ HV_PERF_DOMAIN_PHYS_CORE,
+ HV_PERF_DOMAIN_VCPU_HOME_CORE,
+ HV_PERF_DOMAIN_VCPU_HOME_CHIP,
+ HV_PERF_DOMAIN_VCPU_HOME_NODE,
+ HV_PERF_DOMAIN_VCPU_REMOTE_NODE,
+};
+/* chip event data always yeilds a single event, core yeilds multiple */
+#define MAX_EVENTS_PER_EVENT_DATA ARRAY_SIZE(core_domains)
+
+static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
+{
+ const char *sindex;
+ const char *lpar;
+
+ if (is_physical_domain(domain)) {
+ lpar = "0x0";
+ sindex = "core";
+ } else {
+ lpar = "?";
+ sindex = "vcpu";
+ }
+
+ return kasprintf(GFP_KERNEL,
+ "domain=0x%x,offset=0x%x,%s=?,lpar=%s",
+ domain,
+ be16_to_cpu(event->event_counter_offs) +
+ be16_to_cpu(event->event_group_record_offs),
+ sindex,
+ lpar);
+}
+
+/* Avoid trusting fw to NUL terminate strings */
+static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
+{
+ return kasprintf(gfp, "%.*s", max_len, maybe_str);
+}
+
+static ssize_t device_show_string(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *d;
+
+ d = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "%s\n", (char *)d->var);
+}
+
+static struct attribute *device_str_attr_create_(char *name, char *str)
+{
+ struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+
+ if (!attr)
+ return NULL;
+
+ attr->var = str;
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = device_show_string;
+ return &attr->attr.attr;
+}
+
+static struct attribute *device_str_attr_create(char *name, int name_max,
+ int name_nonce,
+ char *str, size_t str_max)
+{
+ char *n;
+ char *s = memdup_to_str(str, str_max, GFP_KERNEL);
+ struct attribute *a;
+
+ if (!s)
+ return NULL;
+
+ if (!name_nonce)
+ n = kasprintf(GFP_KERNEL, "%.*s", name_max, name);
+ else
+ n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name,
+ name_nonce);
+ if (!n)
+ goto out_s;
+
+ a = device_str_attr_create_(n, s);
+ if (!a)
+ goto out_n;
+
+ return a;
+out_n:
+ kfree(n);
+out_s:
+ kfree(s);
+ return NULL;
+}
+
+static void device_str_attr_destroy(struct attribute *attr)
+{
+ struct dev_ext_attribute *d;
+
+ d = container_of(attr, struct dev_ext_attribute, attr.attr);
+ kfree(d->var);
+ kfree(d->attr.attr.name);
+ kfree(d);
+}
+
+static struct attribute *event_to_attr(unsigned ix,
+ struct hv_24x7_event_data *event,
+ unsigned domain,
+ int nonce)
+{
+ int event_name_len;
+ char *ev_name, *a_ev_name, *val;
+ const char *ev_suffix;
+ struct attribute *attr;
+
+ if (!domain_is_valid(domain)) {
+ pr_warn("catalog event %u has invalid domain %u\n",
+ ix, domain);
+ return NULL;
+ }
+
+ val = event_fmt(event, domain);
+ if (!val)
+ return NULL;
+
+ ev_suffix = event_domain_suffix(domain);
+ ev_name = event_name(event, &event_name_len);
+ if (!nonce)
+ a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s",
+ (int)event_name_len, ev_name, ev_suffix);
+ else
+ a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
+ (int)event_name_len, ev_name, ev_suffix, nonce);
+
+
+ if (!a_ev_name)
+ goto out_val;
+
+ attr = device_str_attr_create_(a_ev_name, val);
+ if (!attr)
+ goto out_name;
+
+ return attr;
+out_name:
+ kfree(a_ev_name);
+out_val:
+ kfree(val);
+ return NULL;
+}
+
+static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
+ int nonce)
+{
+ int nl, dl;
+ char *name = event_name(event, &nl);
+ char *desc = event_desc(event, &dl);
+
+ /* If there isn't a description, don't create the sysfs file */
+ if (!dl)
+ return NULL;
+
+ return device_str_attr_create(name, nl, nonce, desc, dl);
+}
+
+static struct attribute *
+event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
+{
+ int nl, dl;
+ char *name = event_name(event, &nl);
+ char *desc = event_long_desc(event, &dl);
+
+ /* If there isn't a description, don't create the sysfs file */
+ if (!dl)
+ return NULL;
+
+ return device_str_attr_create(name, nl, nonce, desc, dl);
+}
+
+static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs,
+ struct hv_24x7_event_data *event, int nonce)
+{
+ unsigned i;
+
+ switch (event->domain) {
+ case HV_PERF_DOMAIN_PHYS_CHIP:
+ *attrs = event_to_attr(ix, event, event->domain, nonce);
+ return 1;
+ case HV_PERF_DOMAIN_PHYS_CORE:
+ for (i = 0; i < ARRAY_SIZE(core_domains); i++) {
+ attrs[i] = event_to_attr(ix, event, core_domains[i],
+ nonce);
+ if (!attrs[i]) {
+ pr_warn("catalog event %u: individual attr %u "
+ "creation failure\n", ix, i);
+ for (; i; i--)
+ device_str_attr_destroy(attrs[i - 1]);
+ return -1;
+ }
+ }
+ return i;
+ default:
+ pr_warn("catalog event %u: domain %u is not allowed in the "
+ "catalog\n", ix, event->domain);
+ return -1;
+ }
+}
+
+static size_t event_to_attr_ct(struct hv_24x7_event_data *event)
+{
+ switch (event->domain) {
+ case HV_PERF_DOMAIN_PHYS_CHIP:
+ return 1;
+ case HV_PERF_DOMAIN_PHYS_CORE:
+ return ARRAY_SIZE(core_domains);
+ default:
+ return 0;
+ }
+}
+
+static unsigned long vmalloc_to_phys(void *v)
+{
+ struct page *p = vmalloc_to_page(v);
+
+ BUG_ON(!p);
+ return page_to_phys(p) + offset_in_page(v);
+}
+
+/* */
+struct event_uniq {
+ struct rb_node node;
+ const char *name;
+ int nl;
+ unsigned ct;
+ unsigned domain;
+};
+
+static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
+{
+ if (s1 < s2)
+ return 1;
+ if (s2 > s1)
+ return -1;
+
+ return memcmp(d1, d2, s1);
+}
+
+static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
+ size_t s2, unsigned d2)
+{
+ int r = memord(v1, s1, v2, s2);
+
+ if (r)
+ return r;
+ if (d1 > d2)
+ return 1;
+ if (d2 > d1)
+ return -1;
+ return 0;
+}
+
+static int event_uniq_add(struct rb_root *root, const char *name, int nl,
+ unsigned domain)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct event_uniq *data;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct event_uniq *it;
+ int result;
+
+ it = container_of(*new, struct event_uniq, node);
+ result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
+ it->domain);
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else {
+ it->ct++;
+ pr_info("found a duplicate event %.*s, ct=%u\n", nl,
+ name, it->ct);
+ return it->ct;
+ }
+ }
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ *data = (struct event_uniq) {
+ .name = name,
+ .nl = nl,
+ .ct = 0,
+ .domain = domain,
+ };
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+
+ /* data->ct */
+ return 0;
+}
+
+static void event_uniq_destroy(struct rb_root *root)
+{
+ /*
+ * the strings we point to are in the giant block of memory filled by
+ * the catalog, and are freed separately.
+ */
+ struct event_uniq *pos, *n;
+
+ rbtree_postorder_for_each_entry_safe(pos, n, root, node)
+ kfree(pos);
+}
+
+
+/*
+ * ensure the event structure's sizes are self consistent and don't cause us to
+ * read outside of the event
+ *
+ * On success, return the event length in bytes.
+ * Otherwise, return -1 (and print as appropriate).
+ */
+static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
+ size_t event_idx,
+ size_t event_data_bytes,
+ size_t event_entry_count,
+ size_t offset, void *end)
+{
+ ssize_t ev_len;
+ void *ev_end, *calc_ev_end;
+
+ if (offset >= event_data_bytes)
+ return -1;
+
+ if (event_idx >= event_entry_count) {
+ pr_devel("catalog event data has %zu bytes of padding after last event\n",
+ event_data_bytes - offset);
+ return -1;
+ }
+
+ if (!event_fixed_portion_is_within(event, end)) {
+ pr_warn("event %zu fixed portion is not within range\n",
+ event_idx);
+ return -1;
+ }
+
+ ev_len = be16_to_cpu(event->length);
+
+ if (ev_len % 16)
+ pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
+ event_idx, ev_len, event);
+
+ ev_end = (__u8 *)event + ev_len;
+ if (ev_end > end) {
+ pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
+ event_idx, ev_len, ev_end, end,
+ offset);
+ return -1;
+ }
+
+ calc_ev_end = event_end(event, end);
+ if (!calc_ev_end) {
+ pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
+ event_idx, event_data_bytes, event, end,
+ offset);
+ return -1;
+ }
+
+ if (calc_ev_end > ev_end) {
+ pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
+ event_idx, event, ev_end, offset, calc_ev_end);
+ return -1;
+ }
+
+ return ev_len;
+}
+
+#define MAX_4K (SIZE_MAX / 4096)
+
+static void create_events_from_catalog(struct attribute ***events_,
+ struct attribute ***event_descs_,
+ struct attribute ***event_long_descs_)
+{
+ unsigned long hret;
+ size_t catalog_len, catalog_page_len, event_entry_count,
+ event_data_len, event_data_offs,
+ event_data_bytes, junk_events, event_idx, event_attr_ct, i,
+ attr_max, event_idx_last, desc_ct, long_desc_ct;
+ ssize_t ct, ev_len;
+ uint32_t catalog_version_num;
+ struct attribute **events, **event_descs, **event_long_descs;
+ struct hv_24x7_catalog_page_0 *page_0 =
+ kmem_cache_alloc(hv_page_cache, GFP_KERNEL);
+ void *page = page_0;
+ void *event_data, *end;
+ struct hv_24x7_event_data *event;
+ struct rb_root ev_uniq = RB_ROOT;
+
+ if (!page)
+ goto e_out;
+
+ hret = h_get_24x7_catalog_page(page, 0, 0);
+ if (hret)
+ goto e_free;
+
+ catalog_version_num = be64_to_cpu(page_0->version);
+ catalog_page_len = be32_to_cpu(page_0->length);
+
+ if (MAX_4K < catalog_page_len) {
+ pr_err("invalid page count: %zu\n", catalog_page_len);
+ goto e_free;
+ }
+
+ catalog_len = catalog_page_len * 4096;
+
+ event_entry_count = be16_to_cpu(page_0->event_entry_count);
+ event_data_offs = be16_to_cpu(page_0->event_data_offs);
+ event_data_len = be16_to_cpu(page_0->event_data_len);
+
+ pr_devel("cv %zu cl %zu eec %zu edo %zu edl %zu\n",
+ (size_t)catalog_version_num, catalog_len,
+ event_entry_count, event_data_offs, event_data_len);
+
+ if ((MAX_4K < event_data_len)
+ || (MAX_4K < event_data_offs)
+ || (MAX_4K - event_data_offs < event_data_len)) {
+ pr_err("invalid event data offs %zu and/or len %zu\n",
+ event_data_offs, event_data_len);
+ goto e_free;
+ }
+
+ if ((event_data_offs + event_data_len) > catalog_page_len) {
+ pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n",
+ event_data_offs,
+ event_data_offs + event_data_len,
+ catalog_page_len);
+ goto e_free;
+ }
+
+ if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
+ pr_err("event_entry_count %zu is invalid\n",
+ event_entry_count);
+ goto e_free;
+ }
+
+ event_data_bytes = event_data_len * 4096;
+
+ /*
+ * event data can span several pages, events can cross between these
+ * pages. Use vmalloc to make this easier.
+ */
+ event_data = vmalloc(event_data_bytes);
+ if (!event_data) {
+ pr_err("could not allocate event data\n");
+ goto e_free;
+ }
+
+ end = event_data + event_data_bytes;
+
+ /*
+ * using vmalloc_to_phys() like this only works if PAGE_SIZE is
+ * divisible by 4096
+ */
+ BUILD_BUG_ON(PAGE_SIZE % 4096);
+
+ for (i = 0; i < event_data_len; i++) {
+ hret = h_get_24x7_catalog_page_(
+ vmalloc_to_phys(event_data + i * 4096),
+ catalog_version_num,
+ i + event_data_offs);
+ if (hret) {
+ pr_err("failed to get event data in page %zu\n",
+ i + event_data_offs);
+ goto e_event_data;
+ }
+ }
+
+ /*
+ * scan the catalog to determine the number of attributes we need, and
+ * verify it at the same time.
+ */
+ for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0;
+ ;
+ event_idx++, event = (void *)event + ev_len) {
+ size_t offset = (void *)event - (void *)event_data;
+ char *name;
+ int nl;
+
+ ev_len = catalog_event_len_validate(event, event_idx,
+ event_data_bytes,
+ event_entry_count,
+ offset, end);
+ if (ev_len < 0)
+ break;
+
+ name = event_name(event, &nl);
+
+ if (event->event_group_record_len == 0) {
+ pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
+ event_idx, nl, name);
+ junk_events++;
+ continue;
+ }
+
+ if (!catalog_entry_domain_is_valid(event->domain)) {
+ pr_info("event %zu (%.*s) has invalid domain %d\n",
+ event_idx, nl, name, event->domain);
+ junk_events++;
+ continue;
+ }
+
+ attr_max += event_to_attr_ct(event);
+ }
+
+ event_idx_last = event_idx;
+ if (event_idx_last != event_entry_count)
+ pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n",
+ event_idx_last, event_entry_count, junk_events);
+
+ events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
+ if (!events)
+ goto e_event_data;
+
+ event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
+ GFP_KERNEL);
+ if (!event_descs)
+ goto e_event_attrs;
+
+ event_long_descs = kmalloc_array(event_idx + 1,
+ sizeof(*event_long_descs), GFP_KERNEL);
+ if (!event_long_descs)
+ goto e_event_descs;
+
+ /* Iterate over the catalog filling in the attribute vector */
+ for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
+ event = event_data, event_idx = 0;
+ event_idx < event_idx_last;
+ event_idx++, ev_len = be16_to_cpu(event->length),
+ event = (void *)event + ev_len) {
+ char *name;
+ int nl;
+ int nonce;
+ /*
+ * these are the only "bad" events that are intermixed and that
+ * we can ignore without issue. make sure to skip them here
+ */
+ if (event->event_group_record_len == 0)
+ continue;
+ if (!catalog_entry_domain_is_valid(event->domain))
+ continue;
+
+ name = event_name(event, &nl);
+ nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
+ ct = event_data_to_attrs(event_idx, events + event_attr_ct,
+ event, nonce);
+ if (ct <= 0) {
+ pr_warn("event %zu (%.*s) creation failure, skipping\n",
+ event_idx, nl, name);
+ junk_events++;
+ } else {
+ event_attr_ct += ct;
+ event_descs[desc_ct] = event_to_desc_attr(event, nonce);
+ if (event_descs[desc_ct])
+ desc_ct++;
+ event_long_descs[long_desc_ct] =
+ event_to_long_desc_attr(event, nonce);
+ if (event_long_descs[long_desc_ct])
+ long_desc_ct++;
+ }
+ }
+
+ pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n",
+ event_idx, event_attr_ct, junk_events, desc_ct);
+
+ events[event_attr_ct] = NULL;
+ event_descs[desc_ct] = NULL;
+ event_long_descs[long_desc_ct] = NULL;
+
+ event_uniq_destroy(&ev_uniq);
+ vfree(event_data);
+ kmem_cache_free(hv_page_cache, page);
+
+ *events_ = events;
+ *event_descs_ = event_descs;
+ *event_long_descs_ = event_long_descs;
+ return;
+
+e_event_descs:
+ kfree(event_descs);
+e_event_attrs:
+ kfree(events);
+e_event_data:
+ vfree(event_data);
+e_free:
+ kmem_cache_free(hv_page_cache, page);
+e_out:
+ *events_ = NULL;
+ *event_descs_ = NULL;
+ *event_long_descs_ = NULL;
+}
+
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
@@ -207,16 +969,13 @@ static struct attribute_group if_group = {
static const struct attribute_group *attr_groups[] = {
&format_group,
+ &event_group,
+ &event_desc_group,
+ &event_long_desc_group,
&if_group,
NULL,
};
-static bool is_physical_domain(int domain)
-{
- return domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP ||
- domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
-}
-
DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
@@ -291,9 +1050,17 @@ out:
static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
bool success_expected)
{
+ u16 idx;
+ unsigned domain = event_get_domain(event);
+
+ if (is_physical_domain(domain))
+ idx = event_get_core(event);
+ else
+ idx = event_get_vcpu(event);
+
return single_24x7_request(event_get_domain(event),
event_get_offset(event),
- event_get_starting_index(event),
+ idx,
event_get_lpar(event),
res,
success_expected);
@@ -356,7 +1123,7 @@ static int h_24x7_event_init(struct perf_event *event)
return -EIO;
}
- /* PHYSICAL domains & other lpars require extra capabilities */
+ /* Physical domains & other lpars require extra capabilities */
if (!caps.collect_privileged && (is_physical_domain(domain) ||
(event_get_lpar(event) != event_get_lpar_max()))) {
pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
@@ -452,6 +1219,10 @@ static int hv_24x7_init(void)
/* sampling not supported */
h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ create_events_from_catalog(&event_group.attrs,
+ &event_desc_group.attrs,
+ &event_long_desc_group.attrs);
+
r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
if (r)
return r;
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 720ebce4b435..69cd4e690f58 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -3,14 +3,14 @@
#include <linux/types.h>
+enum hv_perf_domains {
+#define DOMAIN(n, v, x, c) HV_PERF_DOMAIN_##n = v,
+#include "hv-24x7-domains.h"
+#undef DOMAIN
+};
+
struct hv_24x7_request {
/* PHYSICAL domains require enabling via phyp/hmc. */
-#define HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP 0x01
-#define HV_24X7_PERF_DOMAIN_PHYSICAL_CORE 0x02
-#define HV_24X7_PERF_DOMAIN_VIRTUAL_PROCESSOR_HOME_CORE 0x03
-#define HV_24X7_PERF_DOMAIN_VIRTUAL_PROCESSOR_HOME_CHIP 0x04
-#define HV_24X7_PERF_DOMAIN_VIRTUAL_PROCESSOR_HOME_NODE 0x05
-#define HV_24X7_PERF_DOMAIN_VIRTUAL_PROCESSOR_REMOTE_NODE 0x06
__u8 performance_domain;
__u8 reserved[0x1];
diff --git a/arch/powerpc/perf/hv-common.c b/arch/powerpc/perf/hv-common.c
index 47e02b366f58..7dce8f109967 100644
--- a/arch/powerpc/perf/hv-common.c
+++ b/arch/powerpc/perf/hv-common.c
@@ -9,13 +9,13 @@ unsigned long hv_perf_caps_get(struct hv_perf_caps *caps)
unsigned long r;
struct p {
struct hv_get_perf_counter_info_params params;
- struct cv_system_performance_capabilities caps;
+ struct hv_gpci_system_performance_capabilities caps;
} __packed __aligned(sizeof(uint64_t));
struct p arg = {
.params = {
.counter_request = cpu_to_be32(
- CIR_SYSTEM_PERFORMANCE_CAPABILITIES),
+ HV_GPCI_system_performance_capabilities),
.starting_index = cpu_to_be32(-1),
.counter_info_version_in = 0,
}
@@ -31,9 +31,9 @@ unsigned long hv_perf_caps_get(struct hv_perf_caps *caps)
caps->version = arg.params.counter_info_version_out;
caps->collect_privileged = !!arg.caps.perf_collect_privileged;
- caps->ga = !!(arg.caps.capability_mask & CV_CM_GA);
- caps->expanded = !!(arg.caps.capability_mask & CV_CM_EXPANDED);
- caps->lab = !!(arg.caps.capability_mask & CV_CM_LAB);
+ caps->ga = !!(arg.caps.capability_mask & HV_GPCI_CM_GA);
+ caps->expanded = !!(arg.caps.capability_mask & HV_GPCI_CM_EXPANDED);
+ caps->lab = !!(arg.caps.capability_mask & HV_GPCI_CM_LAB);
return r;
}
diff --git a/arch/powerpc/perf/hv-common.h b/arch/powerpc/perf/hv-common.h
index 5d79cecbd73d..349aaba4d2d1 100644
--- a/arch/powerpc/perf/hv-common.h
+++ b/arch/powerpc/perf/hv-common.h
@@ -20,6 +20,16 @@ unsigned long hv_perf_caps_get(struct hv_perf_caps *caps);
PMU_FORMAT_ATTR(name, #attr_var ":" #bit_start "-" #bit_end); \
EVENT_DEFINE_RANGE(name, attr_var, bit_start, bit_end)
+/*
+ * The EVENT_DEFINE_RANGE_FORMAT() macro above includes helper functions
+ * for the fields (eg: event_get_starting_index()). For some fields we
+ * need the bit-range definition, but no the helper functions. Define a
+ * lite version of the above macro without the helpers and silence
+ * compiler warnings unused static functions.
+ */
+#define EVENT_DEFINE_RANGE_FORMAT_LITE(name, attr_var, bit_start, bit_end) \
+PMU_FORMAT_ATTR(name, #attr_var ":" #bit_start "-" #bit_end);
+
#define EVENT_DEFINE_RANGE(name, attr_var, bit_start, bit_end) \
static u64 event_get_##name##_max(void) \
{ \
diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
new file mode 100644
index 000000000000..acd17648cd18
--- /dev/null
+++ b/arch/powerpc/perf/hv-gpci-requests.h
@@ -0,0 +1,261 @@
+
+#include "req-gen/_begin.h"
+
+/*
+ * Based on the document "getPerfCountInfo v1.07"
+ */
+
+/*
+ * #define REQUEST_NAME counter_request_name
+ * #define REQUEST_NUM r_num
+ * #define REQUEST_IDX_KIND starting_index_kind
+ * #include I(REQUEST_BEGIN)
+ * REQUEST(
+ * __field(...)
+ * __field(...)
+ * __array(...)
+ * __count(...)
+ * )
+ * #include I(REQUEST_END)
+ *
+ * - starting_index_kind is one of the following, depending on the event:
+ *
+ * hw_chip_id: hardware chip id or -1 for current hw chip
+ * partition_id
+ * sibling_part_id,
+ * phys_processor_idx:
+ * 0xffffffffffffffff: or -1, which means it is irrelavant for the event
+ *
+ * __count(offset, bytes, name):
+ * a counter that should be exposed via perf
+ * __field(offset, bytes, name)
+ * a normal field
+ * __array(offset, bytes, name)
+ * an array of bytes
+ *
+ *
+ * @bytes for __count, and __field _must_ be a numeral token
+ * in decimal, not an expression and not in hex.
+ *
+ *
+ * TODO:
+ * - expose secondary index (if any counter ever uses it, only 0xA0
+ * appears to use it right now, and it doesn't have any counters)
+ * - embed versioning info
+ * - include counter descriptions
+ */
+#define REQUEST_NAME dispatch_timebase_by_processor
+#define REQUEST_NUM 0x10
+#define REQUEST_IDX_KIND "phys_processor_idx=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__count(0, 8, processor_time_in_timebase_cycles)
+ __field(0x8, 4, hw_processor_id)
+ __field(0xC, 2, owning_part_id)
+ __field(0xE, 1, processor_state)
+ __field(0xF, 1, version)
+ __field(0x10, 4, hw_chip_id)
+ __field(0x14, 4, phys_module_id)
+ __field(0x18, 4, primary_affinity_domain_idx)
+ __field(0x1C, 4, secondary_affinity_domain_idx)
+ __field(0x20, 4, processor_version)
+ __field(0x24, 2, logical_processor_idx)
+ __field(0x26, 2, reserved)
+ __field(0x28, 4, processor_id_register)
+ __field(0x2C, 4, phys_processor_idx)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME entitled_capped_uncapped_donated_idle_timebase_by_partition
+#define REQUEST_NUM 0x20
+#define REQUEST_IDX_KIND "sibling_part_id=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 8, partition_id)
+ __count(0x8, 8, entitled_cycles)
+ __count(0x10, 8, consumed_capped_cycles)
+ __count(0x18, 8, consumed_uncapped_cycles)
+ __count(0x20, 8, cycles_donated)
+ __count(0x28, 8, purr_idle_cycles)
+)
+#include I(REQUEST_END)
+
+/*
+ * Not available for counter_info_version >= 0x8, use
+ * run_instruction_cycles_by_partition(0x100) instead.
+ */
+#define REQUEST_NAME run_instructions_run_cycles_by_partition
+#define REQUEST_NUM 0x30
+#define REQUEST_IDX_KIND "sibling_part_id=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 8, partition_id)
+ __count(0x8, 8, instructions_completed)
+ __count(0x10, 8, cycles)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME system_performance_capabilities
+#define REQUEST_NUM 0x40
+#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 1, perf_collect_privileged)
+ __field(0x1, 1, capability_mask)
+ __array(0x2, 0xE, reserved)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME processor_bus_utilization_abc_links
+#define REQUEST_NUM 0x50
+#define REQUEST_IDX_KIND "hw_chip_id=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 4, hw_chip_id)
+ __array(0x4, 0xC, reserved1)
+ __count(0x10, 8, total_link_cycles)
+ __count(0x18, 8, idle_cycles_for_a_link)
+ __count(0x20, 8, idle_cycles_for_b_link)
+ __count(0x28, 8, idle_cycles_for_c_link)
+ __array(0x30, 0x20, reserved2)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME processor_bus_utilization_wxyz_links
+#define REQUEST_NUM 0x60
+#define REQUEST_IDX_KIND "hw_chip_id=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 4, hw_chip_id)
+ __array(0x4, 0xC, reserved1)
+ __count(0x10, 8, total_link_cycles)
+ __count(0x18, 8, idle_cycles_for_w_link)
+ __count(0x20, 8, idle_cycles_for_x_link)
+ __count(0x28, 8, idle_cycles_for_y_link)
+ __count(0x30, 8, idle_cycles_for_z_link)
+ __array(0x38, 0x28, reserved2)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME processor_bus_utilization_gx_links
+#define REQUEST_NUM 0x70
+#define REQUEST_IDX_KIND "hw_chip_id=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 4, hw_chip_id)
+ __array(0x4, 0xC, reserved1)
+ __count(0x10, 8, gx0_in_address_cycles)
+ __count(0x18, 8, gx0_in_data_cycles)
+ __count(0x20, 8, gx0_in_retries)
+ __count(0x28, 8, gx0_in_bus_cycles)
+ __count(0x30, 8, gx0_in_cycles_total)
+ __count(0x38, 8, gx0_out_address_cycles)
+ __count(0x40, 8, gx0_out_data_cycles)
+ __count(0x48, 8, gx0_out_retries)
+ __count(0x50, 8, gx0_out_bus_cycles)
+ __count(0x58, 8, gx0_out_cycles_total)
+ __count(0x60, 8, gx1_in_address_cycles)
+ __count(0x68, 8, gx1_in_data_cycles)
+ __count(0x70, 8, gx1_in_retries)
+ __count(0x78, 8, gx1_in_bus_cycles)
+ __count(0x80, 8, gx1_in_cycles_total)
+ __count(0x88, 8, gx1_out_address_cycles)
+ __count(0x90, 8, gx1_out_data_cycles)
+ __count(0x98, 8, gx1_out_retries)
+ __count(0xA0, 8, gx1_out_bus_cycles)
+ __count(0xA8, 8, gx1_out_cycles_total)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME processor_bus_utilization_mc_links
+#define REQUEST_NUM 0x80
+#define REQUEST_IDX_KIND "hw_chip_id=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 4, hw_chip_id)
+ __array(0x4, 0xC, reserved1)
+ __count(0x10, 8, mc0_frames)
+ __count(0x18, 8, mc0_reads)
+ __count(0x20, 8, mc0_write)
+ __count(0x28, 8, mc0_total_cycles)
+ __count(0x30, 8, mc1_frames)
+ __count(0x38, 8, mc1_reads)
+ __count(0x40, 8, mc1_writes)
+ __count(0x48, 8, mc1_total_cycles)
+)
+#include I(REQUEST_END)
+
+/* Processor_config (0x90) skipped, no counters */
+/* Current_processor_frequency (0x91) skipped, no counters */
+
+#define REQUEST_NAME processor_core_utilization
+#define REQUEST_NUM 0x94
+#define REQUEST_IDX_KIND "phys_processor_idx=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 4, phys_processor_idx)
+ __field(0x4, 4, hw_processor_id)
+ __count(0x8, 8, cycles_across_any_thread)
+ __count(0x10, 8, timebase_at_collection)
+ __count(0x18, 8, purr_cycles)
+ __count(0x20, 8, sum_of_cycles_across_all_threads)
+ __count(0x28, 8, instructions_completed)
+)
+#include I(REQUEST_END)
+
+/* Processor_core_power_mode (0x95) skipped, no counters */
+/* Affinity_domain_information_by_virtual_processor (0xA0) skipped,
+ * no counters */
+/* Affinity_domain_information_by_domain (0xB0) skipped, no counters */
+/* Affinity_domain_information_by_partition (0xB1) skipped, no counters */
+/* Physical_memory_info (0xC0) skipped, no counters */
+/* Processor_bus_topology (0xD0) skipped, no counters */
+
+#define REQUEST_NAME partition_hypervisor_queuing_times
+#define REQUEST_NUM 0xE0
+#define REQUEST_IDX_KIND "partition_id=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 2, partition_id)
+ __array(0x2, 6, reserved1)
+ __count(0x8, 8, time_waiting_for_entitlement)
+ __count(0x10, 8, times_waited_for_entitlement)
+ __count(0x18, 8, time_waiting_for_phys_processor)
+ __count(0x20, 8, times_waited_for_phys_processor)
+ __count(0x28, 8, dispatches_on_home_core)
+ __count(0x30, 8, dispatches_on_home_primary_affinity_domain)
+ __count(0x38, 8, dispatches_on_home_secondary_affinity_domain)
+ __count(0x40, 8, dispatches_off_home_secondary_affinity_domain)
+ __count(0x48, 8, dispatches_on_dedicated_processor_donating_cycles)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME system_hypervisor_times
+#define REQUEST_NUM 0xF0
+#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#include I(REQUEST_BEGIN)
+REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
+ __count(0x8, 8, time_spent_processing_virtual_processor_timers)
+ __count(0x10, 8, time_spent_managing_partitions_over_entitlement)
+ __count(0x18, 8, time_spent_on_system_management)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME system_tlbie_count_and_time
+#define REQUEST_NUM 0xF4
+#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#include I(REQUEST_BEGIN)
+REQUEST(__count(0, 8, tlbie_instructions_issued)
+ /*
+ * FIXME: The spec says the offset here is 0x10, which I suspect
+ * is wrong.
+ */
+ __count(0x8, 8, time_spent_issuing_tlbies)
+)
+#include I(REQUEST_END)
+
+#define REQUEST_NAME partition_instruction_count_and_time
+#define REQUEST_NUM 0x100
+#define REQUEST_IDX_KIND "partition_id=?"
+#include I(REQUEST_BEGIN)
+REQUEST(__field(0, 2, partition_id)
+ __array(0x2, 0x6, reserved1)
+ __count(0x8, 8, instructions_performed)
+ __count(0x10, 8, time_collected)
+)
+#include I(REQUEST_END)
+
+/* set_mmcrh (0x80001000) skipped, no counters */
+/* retrieve_hpmcx (0x80002000) skipped, no counters */
+
+#include "req-gen/_end.h"
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index a051fe946c63..856fe6e03c2a 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -31,7 +31,18 @@
/* u32 */
EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31);
/* u32 */
+/*
+ * Note that starting_index, phys_processor_idx, sibling_part_id,
+ * hw_chip_id, partition_id all refer to the same bit range. They
+ * are basically aliases for the starting_index. The specific alias
+ * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
+ */
EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63);
+EVENT_DEFINE_RANGE_FORMAT_LITE(phys_processor_idx, config, 32, 63);
+EVENT_DEFINE_RANGE_FORMAT_LITE(sibling_part_id, config, 32, 63);
+EVENT_DEFINE_RANGE_FORMAT_LITE(hw_chip_id, config, 32, 63);
+EVENT_DEFINE_RANGE_FORMAT_LITE(partition_id, config, 32, 63);
+
/* u16 */
EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15);
/* u8 */
@@ -44,6 +55,10 @@ EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
static struct attribute *format_attrs[] = {
&format_attr_request.attr,
&format_attr_starting_index.attr,
+ &format_attr_phys_processor_idx.attr,
+ &format_attr_sibling_part_id.attr,
+ &format_attr_hw_chip_id.attr,
+ &format_attr_partition_id.attr,
&format_attr_secondary_index.attr,
&format_attr_counter_info_version.attr,
@@ -57,6 +72,11 @@ static struct attribute_group format_group = {
.attrs = format_attrs,
};
+static struct attribute_group event_group = {
+ .name = "events",
+ .attrs = hv_gpci_event_attrs,
+};
+
#define HV_CAPS_ATTR(_name, _format) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -102,6 +122,7 @@ static struct attribute_group interface_group = {
static const struct attribute_group *attr_groups[] = {
&format_group,
+ &event_group,
&interface_group,
NULL,
};
@@ -265,6 +286,8 @@ static int hv_gpci_init(void)
unsigned long hret;
struct hv_perf_caps caps;
+ hv_gpci_assert_offsets_correct();
+
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h
index b25f460c9cce..86ede8275961 100644
--- a/arch/powerpc/perf/hv-gpci.h
+++ b/arch/powerpc/perf/hv-gpci.h
@@ -42,32 +42,19 @@ struct hv_get_perf_counter_info_params {
*/
#define COUNTER_INFO_VERSION_CURRENT 0x8
-/*
- * These determine the counter_value[] layout and the meaning of starting_index
- * and secondary_index.
- *
- * Unless otherwise noted, @secondary_index is unused and ignored.
- */
-enum counter_info_requests {
-
- /* GENERAL */
-
- /* @starting_index: must be -1 (to refer to the current partition)
- */
- CIR_SYSTEM_PERFORMANCE_CAPABILITIES = 0X40,
+/* capability mask masks. */
+enum {
+ HV_GPCI_CM_GA = (1 << 7),
+ HV_GPCI_CM_EXPANDED = (1 << 6),
+ HV_GPCI_CM_LAB = (1 << 5)
};
-struct cv_system_performance_capabilities {
- /* If != 0, allowed to collect data from other partitions */
- __u8 perf_collect_privileged;
-
- /* These following are only valid if counter_info_version >= 0x3 */
-#define CV_CM_GA (1 << 7)
-#define CV_CM_EXPANDED (1 << 6)
-#define CV_CM_LAB (1 << 5)
- /* remaining bits are reserved */
- __u8 capability_mask;
- __u8 reserved[0xE];
-} __packed;
+#define REQUEST_FILE "../hv-gpci-requests.h"
+#define NAME_LOWER hv_gpci
+#define NAME_UPPER HV_GPCI
+#include "req-gen/perf.h"
+#undef REQUEST_FILE
+#undef NAME_LOWER
+#undef NAME_UPPER
#endif
diff --git a/arch/powerpc/perf/req-gen/_begin.h b/arch/powerpc/perf/req-gen/_begin.h
new file mode 100644
index 000000000000..acfb17a55c16
--- /dev/null
+++ b/arch/powerpc/perf/req-gen/_begin.h
@@ -0,0 +1,13 @@
+/* Include paths to be used in interface defining headers */
+#ifndef POWERPC_PERF_REQ_GEN_H_
+#define POWERPC_PERF_REQ_GEN_H_
+
+#define CAT2_STR_(t, s) __stringify(t/s)
+#define CAT2_STR(t, s) CAT2_STR_(t, s)
+#define I(...) __VA_ARGS__
+
+#endif
+
+#define REQ_GEN_PREFIX req-gen
+#define REQUEST_BEGIN CAT2_STR(REQ_GEN_PREFIX, _request-begin.h)
+#define REQUEST_END CAT2_STR(REQ_GEN_PREFIX, _request-end.h)
diff --git a/arch/powerpc/perf/req-gen/_clear.h b/arch/powerpc/perf/req-gen/_clear.h
new file mode 100644
index 000000000000..422974f89573
--- /dev/null
+++ b/arch/powerpc/perf/req-gen/_clear.h
@@ -0,0 +1,5 @@
+
+#undef __field_
+#undef __count_
+#undef __array_
+#undef REQUEST_
diff --git a/arch/powerpc/perf/req-gen/_end.h b/arch/powerpc/perf/req-gen/_end.h
new file mode 100644
index 000000000000..8a406980b6bf
--- /dev/null
+++ b/arch/powerpc/perf/req-gen/_end.h
@@ -0,0 +1,4 @@
+
+#undef REQ_GEN_PREFIX
+#undef REQUEST_BEGIN
+#undef REQUEST_END
diff --git a/arch/powerpc/perf/req-gen/_request-begin.h b/arch/powerpc/perf/req-gen/_request-begin.h
new file mode 100644
index 000000000000..f6d98642cf1d
--- /dev/null
+++ b/arch/powerpc/perf/req-gen/_request-begin.h
@@ -0,0 +1,15 @@
+
+#define REQUEST(r_contents) \
+ REQUEST_(REQUEST_NAME, REQUEST_NUM, REQUEST_IDX_KIND, I(r_contents))
+
+#define __field(f_offset, f_bytes, f_name) \
+ __field_(REQUEST_NAME, REQUEST_NUM, REQUEST_IDX_KIND, \
+ f_offset, f_bytes, f_name)
+
+#define __array(f_offset, f_bytes, f_name) \
+ __array_(REQUEST_NAME, REQUEST_NUM, REQUEST_IDX_KIND, \
+ f_offset, f_bytes, f_name)
+
+#define __count(f_offset, f_bytes, f_name) \
+ __count_(REQUEST_NAME, REQUEST_NUM, REQUEST_IDX_KIND, \
+ f_offset, f_bytes, f_name)
diff --git a/arch/powerpc/perf/req-gen/_request-end.h b/arch/powerpc/perf/req-gen/_request-end.h
new file mode 100644
index 000000000000..5573be6c3588
--- /dev/null
+++ b/arch/powerpc/perf/req-gen/_request-end.h
@@ -0,0 +1,8 @@
+#undef REQUEST
+#undef __field
+#undef __array
+#undef __count
+
+#undef REQUEST_NAME
+#undef REQUEST_NUM
+#undef REQUEST_IDX_KIND
diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h
new file mode 100644
index 000000000000..1b122469323d
--- /dev/null
+++ b/arch/powerpc/perf/req-gen/perf.h
@@ -0,0 +1,155 @@
+#ifndef LINUX_POWERPC_PERF_REQ_GEN_PERF_H_
+#define LINUX_POWERPC_PERF_REQ_GEN_PERF_H_
+
+#include <linux/perf_event.h>
+
+#ifndef REQUEST_FILE
+#error "REQUEST_FILE must be defined before including"
+#endif
+
+#ifndef NAME_LOWER
+#error "NAME_LOWER must be defined before including"
+#endif
+
+#ifndef NAME_UPPER
+#error "NAME_UPPER must be defined before including"
+#endif
+
+#define BE_TYPE_b1 __u8
+#define BE_TYPE_b2 __be16
+#define BE_TYPE_b4 __be32
+#define BE_TYPE_b8 __be64
+
+#define BYTES_TO_BE_TYPE(bytes) \
+ BE_TYPE_b##bytes
+
+#define CAT2_(a, b) a ## b
+#define CAT2(a, b) CAT2_(a, b)
+#define CAT3_(a, b, c) a ## b ## c
+#define CAT3(a, b, c) CAT3_(a, b, c)
+
+/*
+ * enumerate the request values as
+ * <NAME_UPPER>_<request name> = <request value>
+ */
+#define REQUEST_VALUE__(name_upper, r_name) name_upper ## _ ## r_name
+#define REQUEST_VALUE_(name_upper, r_name) REQUEST_VALUE__(name_upper, r_name)
+#define REQUEST_VALUE(r_name) REQUEST_VALUE_(NAME_UPPER, r_name)
+
+#include "_clear.h"
+#define REQUEST_(r_name, r_value, r_idx_1, r_fields) \
+ REQUEST_VALUE(r_name) = r_value,
+enum CAT2(NAME_LOWER, _requests) {
+#include REQUEST_FILE
+};
+
+/*
+ * For each request:
+ * struct <NAME_LOWER>_<request name> {
+ * r_fields
+ * };
+ */
+#include "_clear.h"
+#define STRUCT_NAME__(name_lower, r_name) name_lower ## _ ## r_name
+#define STRUCT_NAME_(name_lower, r_name) STRUCT_NAME__(name_lower, r_name)
+#define STRUCT_NAME(r_name) STRUCT_NAME_(NAME_LOWER, r_name)
+#define REQUEST_(r_name, r_value, r_idx_1, r_fields) \
+struct STRUCT_NAME(r_name) { \
+ r_fields \
+};
+#define __field_(r_name, r_value, r_idx_1, f_offset, f_bytes, f_name) \
+ BYTES_TO_BE_TYPE(f_bytes) f_name;
+#define __count_(r_name, r_value, r_idx_1, f_offset, f_bytes, f_name) \
+ __field_(r_name, r_value, r_idx_1, f_offset, f_bytes, f_name)
+#define __array_(r_name, r_value, r_idx_1, a_offset, a_bytes, a_name) \
+ __u8 a_name[a_bytes];
+
+#include REQUEST_FILE
+
+/*
+ * Generate a check of the field offsets
+ * <NAME_LOWER>_assert_offsets_correct()
+ */
+#include "_clear.h"
+#define REQUEST_(r_name, r_value, index, r_fields) \
+r_fields
+#define __field_(r_name, r_value, r_idx_1, f_offset, f_size, f_name) \
+ BUILD_BUG_ON(offsetof(struct STRUCT_NAME(r_name), f_name) != f_offset);
+#define __count_(r_name, r_value, r_idx_1, c_offset, c_size, c_name) \
+ __field_(r_name, r_value, r_idx_1, c_offset, c_size, c_name)
+#define __array_(r_name, r_value, r_idx_1, a_offset, a_size, a_name) \
+ __field_(r_name, r_value, r_idx_1, a_offset, a_size, a_name)
+
+static inline void CAT2(NAME_LOWER, _assert_offsets_correct)(void)
+{
+#include REQUEST_FILE
+}
+
+/*
+ * Generate event attributes:
+ * PMU_EVENT_ATTR_STRING(<request name>_<field name>,
+ * <NAME_LOWER>_event_attr_<request name>_<field name>,
+ * "request=<request value>"
+ * "starting_index=<starting index type>"
+ * "counter_info_version=CURRENT_COUNTER_INFO_VERSION"
+ * "length=<f_size>"
+ * "offset=<f_offset>")
+ *
+ * TODO: counter_info_version may need to vary, we should interperate the
+ * value to some extent
+ */
+#define EVENT_ATTR_NAME__(name, r_name, c_name) \
+ name ## _event_attr_ ## r_name ## _ ## c_name
+#define EVENT_ATTR_NAME_(name, r_name, c_name) \
+ EVENT_ATTR_NAME__(name, r_name, c_name)
+#define EVENT_ATTR_NAME(r_name, c_name) \
+ EVENT_ATTR_NAME_(NAME_LOWER, r_name, c_name)
+
+#include "_clear.h"
+#define __field_(r_name, r_value, r_idx_1, f_offset, f_size, f_name)
+#define __array_(r_name, r_value, r_idx_1, a_offset, a_size, a_name)
+#define __count_(r_name, r_value, r_idx_1, c_offset, c_size, c_name) \
+PMU_EVENT_ATTR_STRING( \
+ CAT3(r_name, _, c_name), \
+ EVENT_ATTR_NAME(r_name, c_name), \
+ "request=" __stringify(r_value) "," \
+ r_idx_1 "," \
+ "counter_info_version=" \
+ __stringify(COUNTER_INFO_VERSION_CURRENT) "," \
+ "length=" #c_size "," \
+ "offset=" #c_offset)
+#define REQUEST_(r_name, r_value, r_idx_1, r_fields) \
+ r_fields
+
+#include REQUEST_FILE
+
+/*
+ * Define event attribute array
+ * static struct attribute *hv_gpci_event_attrs[] = {
+ * &<NAME_LOWER>_event_attr_<request name>_<field name>.attr,
+ * };
+ */
+#include "_clear.h"
+#define __field_(r_name, r_value, r_idx_1, f_offset, f_size, f_name)
+#define __count_(r_name, r_value, r_idx_1, c_offset, c_size, c_name) \
+ &EVENT_ATTR_NAME(r_name, c_name).attr.attr,
+#define __array_(r_name, r_value, r_idx_1, a_offset, a_size, a_name)
+#define REQUEST_(r_name, r_value, r_idx_1, r_fields) \
+ r_fields
+
+static __maybe_unused struct attribute *hv_gpci_event_attrs[] = {
+#include REQUEST_FILE
+ NULL
+};
+
+/* cleanup */
+#include "_clear.h"
+#undef EVENT_ATTR_NAME
+#undef EVENT_ATTR_NAME_
+#undef BIT_NAME
+#undef BIT_NAME_
+#undef STRUCT_NAME
+#undef REQUEST_VALUE
+#undef REQUEST_VALUE_
+
+#endif
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index d2ac1c116454..5538e57c36c1 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -214,7 +214,6 @@ config AKEBONO
select ETHERNET
select NET_VENDOR_IBM
select IBM_EMAC_EMAC4
- select IBM_EMAC_RGMII_WOL
select USB if USB_SUPPORT
select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 1ad748bb39b4..5c31d8292d3b 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -162,8 +162,7 @@ int mpc831x_usb_cfg(void)
iounmap(immap);
- if (immr_node)
- of_node_put(immr_node);
+ of_node_put(immr_node);
/* Map USB SOC space */
ret = of_address_to_resource(np, 0, &res);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index f22635a71d01..2fb4b24368a6 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -241,6 +241,12 @@ config SGY_CTS1000
help
Enable this to support functionality in Servergy's CTS-1000 systems.
+config MVME2500
+ bool "Artesyn MVME2500"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the Emerson/Artesyn MVME2500 board.
+
endif # PPC32
config PPC_QEMU_E500
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 730326046625..1fe7fb95175a 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -31,3 +31,4 @@ obj-$(CONFIG_XES_MPC85xx) += xes_mpc85xx.o
obj-$(CONFIG_GE_IMP3A) += ge_imp3a.o
obj-$(CONFIG_PPC_QEMU_E500) += qemu_e500.o
obj-$(CONFIG_SGY_CTS1000) += sgy_cts1000.o
+obj-$(CONFIG_MVME2500) += mvme2500.o
diff --git a/arch/powerpc/platforms/85xx/mvme2500.c b/arch/powerpc/platforms/85xx/mvme2500.c
new file mode 100644
index 000000000000..1233050560ae
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/mvme2500.c
@@ -0,0 +1,74 @@
+/*
+ * Board setup routines for the Emerson/Artesyn MVME2500
+ *
+ * Copyright 2014 Elettra-Sincrotrone Trieste S.C.p.A.
+ *
+ * Based on earlier code by:
+ *
+ * Xianghua Xiao (x.xiao@freescale.com)
+ * Tom Armistead (tom.armistead@emerson.com)
+ * Copyright 2012 Emerson
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Author Alessio Igor Bogani <alessio.bogani@elettra.eu>
+ *
+ */
+
+#include <linux/pci.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+
+#include "mpc85xx.h"
+
+void __init mvme2500_pic_init(void)
+{
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+}
+
+/*
+ * Setup the architecture
+ */
+static void __init mvme2500_setup_arch(void)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("mvme2500_setup_arch()", 0);
+ fsl_pci_assign_primary();
+ pr_info("MVME2500 board from Artesyn\n");
+}
+
+machine_arch_initcall(mvme2500, mpc85xx_common_publish_devices);
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mvme2500_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "artesyn,MVME2500");
+}
+
+define_machine(mvme2500) {
+ .name = "MVME2500",
+ .probe = mvme2500_probe,
+ .setup_arch = mvme2500_setup_arch,
+ .init_IRQ = mvme2500_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index f22387598040..94170e4f2ce7 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
}
static struct pci_ops scc_pciex_pci_ops = {
- scc_pciex_read_config,
- scc_pciex_write_config,
+ .read = scc_pciex_read_config,
+ .write = scc_pciex_write_config,
};
static void pciex_clear_intr_all(unsigned int __iomem *base)
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index c8017a7bcabd..b64e7ead752f 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -106,7 +106,7 @@ static int __init smp_iic_probe(void)
{
iic_request_IPIs();
- return cpumask_weight(cpu_possible_mask);
+ return num_possible_cpus();
}
static void smp_cell_setup_cpu(int cpu)
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index beeaf4a173e1..df4ad95f183e 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -156,17 +156,6 @@ void mpc7448_hpc2_restart(char *cmd)
for (;;) ; /* Spin until reset happens */
}
-void mpc7448_hpc2_power_off(void)
-{
- local_irq_disable();
- for (;;) ; /* No way to shut power off with software */
-}
-
-void mpc7448_hpc2_halt(void)
-{
- mpc7448_hpc2_power_off();
-}
-
/*
* Called very early, device-tree isn't unflattened
*/
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 04702db35d45..f4071a67ad00 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -133,17 +133,23 @@ static void __init fixup_bus_range(struct device_node *bridge)
|(((unsigned int)(off)) & 0xFCUL) \
|1UL)
-static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose,
- u8 bus, u8 dev_fn, u8 offset)
+static void __iomem *macrisc_cfg_map_bus(struct pci_bus *bus,
+ unsigned int dev_fn,
+ int offset)
{
unsigned int caddr;
+ struct pci_controller *hose;
- if (bus == hose->first_busno) {
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return NULL;
+
+ if (bus->number == hose->first_busno) {
if (dev_fn < (11 << 3))
return NULL;
caddr = MACRISC_CFA0(dev_fn, offset);
} else
- caddr = MACRISC_CFA1(bus, dev_fn, offset);
+ caddr = MACRISC_CFA1(bus->number, dev_fn, offset);
/* Uninorth will return garbage if we don't read back the value ! */
do {
@@ -154,129 +160,46 @@ static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose,
return hose->cfg_data + offset;
}
-static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 *val)
-{
- struct pci_controller *hose;
- volatile void __iomem *addr;
-
- hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (offset >= 0x100)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- /*
- * Note: the caller has already checked that offset is
- * suitably aligned and that len is 1, 2 or 4.
- */
- switch (len) {
- case 1:
- *val = in_8(addr);
- break;
- case 2:
- *val = in_le16(addr);
- break;
- default:
- *val = in_le32(addr);
- break;
- }
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 val)
-{
- struct pci_controller *hose;
- volatile void __iomem *addr;
-
- hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (offset >= 0x100)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- /*
- * Note: the caller has already checked that offset is
- * suitably aligned and that len is 1, 2 or 4.
- */
- switch (len) {
- case 1:
- out_8(addr, val);
- break;
- case 2:
- out_le16(addr, val);
- break;
- default:
- out_le32(addr, val);
- break;
- }
- return PCIBIOS_SUCCESSFUL;
-}
-
static struct pci_ops macrisc_pci_ops =
{
- .read = macrisc_read_config,
- .write = macrisc_write_config,
+ .map_bus = macrisc_cfg_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
#ifdef CONFIG_PPC32
/*
* Verify that a specific (bus, dev_fn) exists on chaos
*/
-static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
+static void __iomem *chaos_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int offset)
{
struct device_node *np;
const u32 *vendor, *device;
if (offset >= 0x100)
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ return NULL;
np = of_pci_find_child_device(bus->dev.of_node, devfn);
if (np == NULL)
- return PCIBIOS_DEVICE_NOT_FOUND;
+ return NULL;
vendor = of_get_property(np, "vendor-id", NULL);
device = of_get_property(np, "device-id", NULL);
if (vendor == NULL || device == NULL)
- return PCIBIOS_DEVICE_NOT_FOUND;
+ return NULL;
if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
&& (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- return PCIBIOS_SUCCESSFUL;
-}
+ return NULL;
-static int
-chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
- int len, u32 *val)
-{
- int result = chaos_validate_dev(bus, devfn, offset);
- if (result == PCIBIOS_BAD_REGISTER_NUMBER)
- *val = ~0U;
- if (result != PCIBIOS_SUCCESSFUL)
- return result;
- return macrisc_read_config(bus, devfn, offset, len, val);
-}
-
-static int
-chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
- int len, u32 val)
-{
- int result = chaos_validate_dev(bus, devfn, offset);
- if (result != PCIBIOS_SUCCESSFUL)
- return result;
- return macrisc_write_config(bus, devfn, offset, len, val);
+ return macrisc_cfg_map_bus(bus, devfn, offset);
}
static struct pci_ops chaos_pci_ops =
{
- .read = chaos_read_config,
- .write = chaos_write_config,
+ .map_bus = chaos_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
static void __init setup_chaos(struct pci_controller *hose,
@@ -471,15 +394,24 @@ static struct pci_ops u3_ht_pci_ops =
|(((unsigned int)(off)) & 0xfcU) \
|1UL)
-static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
- u8 bus, u8 dev_fn, int offset)
+static void __iomem *u4_pcie_cfg_map_bus(struct pci_bus *bus,
+ unsigned int dev_fn,
+ int offset)
{
+ struct pci_controller *hose;
unsigned int caddr;
- if (bus == hose->first_busno) {
+ if (offset >= 0x1000)
+ return NULL;
+
+ hose = pci_bus_to_host(bus);
+ if (!hose)
+ return NULL;
+
+ if (bus->number == hose->first_busno) {
caddr = U4_PCIE_CFA0(dev_fn, offset);
} else
- caddr = U4_PCIE_CFA1(bus, dev_fn, offset);
+ caddr = U4_PCIE_CFA1(bus->number, dev_fn, offset);
/* Uninorth will return garbage if we don't read back the value ! */
do {
@@ -490,74 +422,11 @@ static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
return hose->cfg_data + offset;
}
-static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 *val)
-{
- struct pci_controller *hose;
- volatile void __iomem *addr;
-
- hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (offset >= 0x1000)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- /*
- * Note: the caller has already checked that offset is
- * suitably aligned and that len is 1, 2 or 4.
- */
- switch (len) {
- case 1:
- *val = in_8(addr);
- break;
- case 2:
- *val = in_le16(addr);
- break;
- default:
- *val = in_le32(addr);
- break;
- }
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 val)
-{
- struct pci_controller *hose;
- volatile void __iomem *addr;
-
- hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (offset >= 0x1000)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- /*
- * Note: the caller has already checked that offset is
- * suitably aligned and that len is 1, 2 or 4.
- */
- switch (len) {
- case 1:
- out_8(addr, val);
- break;
- case 2:
- out_le16(addr, val);
- break;
- default:
- out_le32(addr, val);
- break;
- }
- return PCIBIOS_SUCCESSFUL;
-}
-
static struct pci_ops u4_pcie_pci_ops =
{
- .read = u4_pcie_read_config,
- .write = u4_pcie_write_config,
+ .map_bus = u4_pcie_cfg_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index f241accc053d..6f3c5d33c3af 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,7 +1,7 @@
obj-y += setup.o opal-wrappers.o opal.o opal-async.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
-obj-y += opal-msglog.o opal-hmi.o
+obj-y += opal-msglog.o opal-hmi.o opal-power.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 1d19e7917d7f..e261869adc86 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -68,6 +68,17 @@ static int powernv_eeh_init(void)
if (phb->model == PNV_PHB_MODEL_P7IOC)
eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
+
+ /*
+ * PE#0 should be regarded as valid by EEH core
+ * if it's not the reserved one. Currently, we
+ * have the reserved PE#0 and PE#127 for PHB3
+ * and P7IOC separately. So we should regard
+ * PE#0 as valid for P7IOC.
+ */
+ if (phb->ioda.reserved_pe != 0)
+ eeh_add_flag(EEH_VALID_PE_ZERO);
+
break;
}
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
new file mode 100644
index 000000000000..48bf5b080bcf
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -0,0 +1,65 @@
+/*
+ * PowerNV OPAL power control for graceful shutdown handling
+ *
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+
+#include <asm/opal.h>
+#include <asm/machdep.h>
+
+#define SOFT_OFF 0x00
+#define SOFT_REBOOT 0x01
+
+static int opal_power_control_event(struct notifier_block *nb,
+ unsigned long msg_type, void *msg)
+{
+ struct opal_msg *power_msg = msg;
+ uint64_t type;
+
+ type = be64_to_cpu(power_msg->params[0]);
+
+ switch (type) {
+ case SOFT_REBOOT:
+ /* Fall through. The service processor is responsible for
+ * bringing the machine back up */
+ case SOFT_OFF:
+ pr_info("OPAL: poweroff requested\n");
+ orderly_poweroff(true);
+ break;
+ default:
+ pr_err("OPAL: power control type unexpected %016llx\n", type);
+ }
+
+ return 0;
+}
+
+static struct notifier_block opal_power_control_nb = {
+ .notifier_call = opal_power_control_event,
+ .next = NULL,
+ .priority = 0,
+};
+
+static int __init opal_power_control_init(void)
+{
+ int ret;
+
+ ret = opal_message_notifier_register(OPAL_MSG_SHUTDOWN,
+ &opal_power_control_nb);
+ if (ret) {
+ pr_err("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+machine_subsys_initcall(powernv, opal_power_control_init);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 54eca8b3b288..0509bca5e830 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
ld r12,opal_tracepoint_refcount@toc(r2); \
- std r12,32(r1); \
cmpdi r12,0; \
bne- LABEL; \
1:
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index f10b9ec8c1f5..18fd4e71c9c1 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -208,7 +208,7 @@ static int __init opal_register_exception_handlers(void)
* start catching/handling HMI directly in Linux.
*/
if (!opal_check_token(OPAL_HANDLE_HMI)) {
- pr_info("opal: Old firmware detected, OPAL handles HMIs.\n");
+ pr_info("Old firmware detected, OPAL handles HMIs.\n");
opal_register_exception_handler(
OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
0, glue);
@@ -667,7 +667,13 @@ static void __init opal_dump_region_init(void)
/* Register kernel log buffer */
addr = log_buf_addr_get();
+ if (addr == NULL)
+ return;
+
size = log_buf_len_get();
+ if (size == 0)
+ return;
+
rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
__pa(addr), size);
/* Don't warn if this is just an older OPAL that doesn't
@@ -695,15 +701,54 @@ static void opal_i2c_create_devs(void)
of_platform_device_create(np, NULL, NULL);
}
+static void __init opal_irq_init(struct device_node *dn)
+{
+ const __be32 *irqs;
+ int i, irqlen;
+
+ /* Get interrupt property */
+ irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
+ opal_irq_count = irqs ? (irqlen / 4) : 0;
+ pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count);
+ if (!opal_irq_count)
+ return;
+
+ /* Install interrupt handlers */
+ opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
+ for (i = 0; irqs && i < opal_irq_count; i++, irqs++) {
+ unsigned int irq, virq;
+ int rc;
+
+ /* Get hardware and virtual IRQ */
+ irq = be32_to_cpup(irqs);
+ virq = irq_create_mapping(NULL, irq);
+ if (virq == NO_IRQ) {
+ pr_warn("Failed to map irq 0x%x\n", irq);
+ continue;
+ }
+
+ /* Install interrupt handler */
+ rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
+ if (rc) {
+ irq_dispose_mapping(virq);
+ pr_warn("Error %d requesting irq %d (0x%x)\n",
+ rc, virq, irq);
+ continue;
+ }
+
+ /* Cache IRQ */
+ opal_irqs[i] = virq;
+ }
+}
+
static int __init opal_init(void)
{
struct device_node *np, *consoles;
- const __be32 *irqs;
- int rc, i, irqlen;
+ int rc;
opal_node = of_find_node_by_path("/ibm,opal");
if (!opal_node) {
- pr_warn("opal: Node not found\n");
+ pr_warn("Device node not found\n");
return -ENODEV;
}
@@ -725,24 +770,7 @@ static int __init opal_init(void)
opal_i2c_create_devs();
/* Find all OPAL interrupts and request them */
- irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
- pr_debug("opal: Found %d interrupts reserved for OPAL\n",
- irqs ? (irqlen / 4) : 0);
- opal_irq_count = irqlen / 4;
- opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
- for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
- unsigned int hwirq = be32_to_cpup(irqs);
- unsigned int irq = irq_create_mapping(NULL, hwirq);
- if (irq == NO_IRQ) {
- pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
- continue;
- }
- rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
- if (rc)
- pr_warning("opal: Error %d requesting irq %d"
- " (0x%x)\n", rc, irq, hwirq);
- opal_irqs[i] = irq;
- }
+ opal_irq_init(opal_node);
/* Create "opal" kobject under /sys/firmware */
rc = opal_sysfs_init();
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index fac88ed8a915..6c9ff2b95119 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -75,6 +75,28 @@ static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
#define pe_info(pe, fmt, ...) \
pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
+static bool pnv_iommu_bypass_disabled __read_mostly;
+
+static int __init iommu_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ while (*str) {
+ if (!strncmp(str, "nobypass", 8)) {
+ pnv_iommu_bypass_disabled = true;
+ pr_info("PowerNV: IOMMU bypass window disabled.\n");
+ break;
+ }
+ str += strcspn(str, ",");
+ if (*str == ',')
+ str++;
+ }
+
+ return 0;
+}
+early_param("iommu", iommu_setup);
+
/*
* stdcix is only supposed to be used in hypervisor real mode as per
* the architecture spec
@@ -357,6 +379,9 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe;
phb->ioda.m64_base = pci_addr;
+ pr_info(" MEM64 0x%016llx..0x%016llx -> 0x%016llx\n",
+ res->start, res->end, pci_addr);
+
/* Use last M64 BAR to cover M64 window */
phb->ioda.m64_bar_idx = 15;
phb->init_m64 = pnv_ioda2_init_m64;
@@ -1348,7 +1373,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
/* Also create a bypass window */
- pnv_pci_ioda2_setup_bypass_pe(phb, pe);
+ if (!pnv_iommu_bypass_disabled)
+ pnv_pci_ioda2_setup_bypass_pe(phb, pe);
+
return;
fail:
if (pe->tce32_seg >= 0)
@@ -1460,15 +1487,15 @@ static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
#ifdef CONFIG_CXL_BASE
-struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev)
+struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
- return hose->dn;
+ return of_node_get(hose->dn);
}
-EXPORT_SYMBOL(pnv_pci_to_phb_node);
+EXPORT_SYMBOL(pnv_pci_get_phb_node);
-int pnv_phb_to_cxl(struct pci_dev *dev)
+int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -1481,13 +1508,13 @@ int pnv_phb_to_cxl(struct pci_dev *dev)
pe_info(pe, "Switching PHB to CXL\n");
- rc = opal_pci_set_phb_cxl_mode(phb->opal_id, 1, pe->pe_number);
+ rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
if (rc)
dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
return rc;
}
-EXPORT_SYMBOL(pnv_phb_to_cxl);
+EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
/* Find PHB for cxl dev and allocate MSI hwirqs?
* Returns the absolute hardware IRQ number
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 4945e87f12dc..e69142f4af08 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -781,35 +781,6 @@ static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
-static int pnv_pci_probe_mode(struct pci_bus *bus)
-{
- struct pci_controller *hose = pci_bus_to_host(bus);
- const __be64 *tstamp;
- u64 now, target;
-
-
- /* We hijack this as a way to ensure we have waited long
- * enough since the reset was lifted on the PCI bus
- */
- if (bus != hose->bus)
- return PCI_PROBE_NORMAL;
- tstamp = of_get_property(hose->dn, "reset-clear-timestamp", NULL);
- if (!tstamp || !*tstamp)
- return PCI_PROBE_NORMAL;
-
- now = mftb() / tb_ticks_per_usec;
- target = (be64_to_cpup(tstamp) / tb_ticks_per_usec)
- + PCI_RESET_DELAY_US;
-
- pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n",
- hose->global_number, target, now);
-
- if (now < target)
- msleep((target - now + 999) / 1000);
-
- return PCI_PROBE_NORMAL;
-}
-
void __init pnv_pci_init(void)
{
struct device_node *np;
@@ -856,7 +827,6 @@ void __init pnv_pci_init(void)
ppc_md.tce_build_rm = pnv_tce_build_rm;
ppc_md.tce_free_rm = pnv_tce_free_rm;
ppc_md.tce_get = pnv_tce_get;
- ppc_md.pci_probe_mode = pnv_pci_probe_mode;
set_pci_dma_ops(&dma_iommu_ops);
/* Configure MSIs */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index b700a329c31d..d2de7d5d7574 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
* all cpus at boot. Get these reg values of current cpu and use the
* same accross all cpus.
*/
- uint64_t lpcr_val = mfspr(SPRN_LPCR);
+ uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
uint64_t hid0_val = mfspr(SPRN_HID0);
uint64_t hid1_val = mfspr(SPRN_HID1);
uint64_t hid4_val = mfspr(SPRN_HID4);
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 0c9f643d9e2a..b0f34663b1ae 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -223,6 +223,44 @@ void ps3_mm_vas_destroy(void)
}
}
+static int ps3_mm_get_repository_highmem(struct mem_region *r)
+{
+ int result;
+
+ /* Assume a single highmem region. */
+
+ result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
+
+ if (result)
+ goto zero_region;
+
+ if (!r->base || !r->size) {
+ result = -1;
+ goto zero_region;
+ }
+
+ r->offset = r->base - map.rm.size;
+
+ DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
+ __func__, __LINE__, r->base, r->size);
+
+ return 0;
+
+zero_region:
+ DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
+
+ r->size = r->base = r->offset = 0;
+ return result;
+}
+
+static int ps3_mm_set_repository_highmem(const struct mem_region *r)
+{
+ /* Assume a single highmem region. */
+
+ return r ? ps3_repository_write_highmem_info(0, r->base, r->size) :
+ ps3_repository_write_highmem_info(0, 0, 0);
+}
+
/**
* ps3_mm_region_create - create a memory region in the vas
* @r: pointer to a struct mem_region to accept initialized values
@@ -291,36 +329,7 @@ static void ps3_mm_region_destroy(struct mem_region *r)
r->size = r->base = r->offset = 0;
map.total = map.rm.size;
}
-}
-
-static int ps3_mm_get_repository_highmem(struct mem_region *r)
-{
- int result;
-
- /* Assume a single highmem region. */
-
- result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
-
- if (result)
- goto zero_region;
-
- if (!r->base || !r->size) {
- result = -1;
- goto zero_region;
- }
-
- r->offset = r->base - map.rm.size;
-
- DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
- __func__, __LINE__, r->base, r->size);
-
- return 0;
-
-zero_region:
- DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
-
- r->size = r->base = r->offset = 0;
- return result;
+ ps3_mm_set_repository_highmem(NULL);
}
/*============================================================================*/
@@ -1210,8 +1219,12 @@ void __init ps3_mm_init(void)
/* Check if we got the highmem region from an earlier boot step */
- if (ps3_mm_get_repository_highmem(&map.r1))
- ps3_mm_region_create(&map.r1, map.total - map.rm.size);
+ if (ps3_mm_get_repository_highmem(&map.r1)) {
+ result = ps3_mm_region_create(&map.r1, map.total - map.rm.size);
+
+ if (!result)
+ ps3_mm_set_repository_highmem(&map.r1);
+ }
/* correct map.total for the real total amount of memory we use */
map.total = map.rm.size + map.r1.size;
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index d71329a8e325..1809cfc562ee 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -196,6 +196,7 @@ int ps3_repository_read_highmem_size(unsigned int region_index,
int ps3_repository_read_highmem_info(unsigned int region_index,
u64 *highmem_base, u64 *highmem_size);
+#if defined (CONFIG_PS3_REPOSITORY_WRITE)
int ps3_repository_write_highmem_region_count(unsigned int region_count);
int ps3_repository_write_highmem_base(unsigned int region_index,
u64 highmem_base);
@@ -204,6 +205,18 @@ int ps3_repository_write_highmem_size(unsigned int region_index,
int ps3_repository_write_highmem_info(unsigned int region_index,
u64 highmem_base, u64 highmem_size);
int ps3_repository_delete_highmem_info(unsigned int region_index);
+#else
+static inline int ps3_repository_write_highmem_region_count(
+ unsigned int region_count) {return 0;}
+static inline int ps3_repository_write_highmem_base(unsigned int region_index,
+ u64 highmem_base) {return 0;}
+static inline int ps3_repository_write_highmem_size(unsigned int region_index,
+ u64 highmem_size) {return 0;}
+static inline int ps3_repository_write_highmem_info(unsigned int region_index,
+ u64 highmem_base, u64 highmem_size) {return 0;}
+static inline int ps3_repository_delete_highmem_info(unsigned int region_index)
+ {return 0;}
+#endif
/* repository pme info */
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 756b482f819a..a758a9c3bbba 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -34,6 +34,16 @@ config PPC_SPLPAR
processors, that is, which share physical processors between
two or more partitions.
+config DTL
+ bool "Dispatch Trace Log"
+ depends on PPC_SPLPAR && DEBUG_FS
+ help
+ SPLPAR machines can log hypervisor preempt & dispatch events to a
+ kernel buffer. Saying Y here will enable logging these events,
+ which are accessible through a debugfs file.
+
+ Say N if you are unsure.
+
config PSERIES_MSI
bool
depends on PCI_MSI && PPC_PSERIES && EEH
@@ -123,13 +133,3 @@ config HV_PERF_CTRS
systems. 24x7 is available on Power 8 systems.
If unsure, select Y.
-
-config DTL
- bool "Dispatch Trace Log"
- depends on PPC_SPLPAR && DEBUG_FS
- help
- SPLPAR machines can log hypervisor preempt & dispatch events to a
- kernel buffer. Saying Y here will enable logging these events,
- which are accessible through a debugfs file.
-
- Say N if you are unsure.
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index f30cf4d136a4..62475440fd45 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -272,7 +272,7 @@ static int pseries_add_processor(struct device_node *np)
*/
printk(KERN_ERR "Cannot add cpu %s; this system configuration"
" supports %d logical cpus.\n", np->full_name,
- cpumask_weight(cpu_possible_mask));
+ num_possible_cpus());
goto out_unlock;
}
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index e7cb6d4a871a..90cf3dcbd9f2 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -316,34 +316,24 @@ void post_mobility_fixup(void)
static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
- struct rtas_args args;
u64 streamid;
int rc;
+ int vasi_rc = 0;
rc = kstrtou64(buf, 0, &streamid);
if (rc)
return rc;
- memset(&args, 0, sizeof(args));
- args.token = rtas_token("ibm,suspend-me");
- args.nargs = 2;
- args.nret = 1;
-
- args.args[0] = streamid >> 32 ;
- args.args[1] = streamid & 0xffffffff;
- args.rets = &args.args[args.nargs];
-
do {
- args.rets[0] = 0;
- rc = rtas_ibm_suspend_me(&args);
- if (!rc && args.rets[0] == RTAS_NOT_SUSPENDABLE)
+ rc = rtas_ibm_suspend_me(streamid, &vasi_rc);
+ if (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE)
ssleep(1);
- } while (!rc && args.rets[0] == RTAS_NOT_SUSPENDABLE);
+ } while (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE);
if (rc)
return rc;
- else if (args.rets[0])
- return args.rets[0];
+ if (vasi_rc)
+ return vasi_rc;
post_mobility_fixup();
return count;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index c3b2a7e81ddb..02e4a1745516 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -89,6 +89,8 @@ static void handle_system_shutdown(char event_modifier)
case EPOW_SHUTDOWN_ON_UPS:
pr_emerg("Loss of power reported by firmware, system is "
"running on UPS/battery");
+ pr_emerg("Check RTAS error log for details");
+ orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index f532c92bf99d..ee90db17b097 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -139,26 +139,17 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
* axon_ram_direct_access - direct_access() method for block device
* @device, @sector, @data: see block_device_operations method
*/
-static int
+static long
axon_ram_direct_access(struct block_device *device, sector_t sector,
- void **kaddr, unsigned long *pfn)
+ void **kaddr, unsigned long *pfn, long size)
{
struct axon_ram_bank *bank = device->bd_disk->private_data;
- loff_t offset;
-
- offset = sector;
- if (device->bd_part != NULL)
- offset += device->bd_part->start_sect;
- offset <<= AXON_RAM_SECTOR_SHIFT;
- if (offset >= bank->size) {
- dev_err(&bank->device->dev, "Access outside of address space\n");
- return -ERANGE;
- }
+ loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
*kaddr = (void *)(bank->ph_addr + offset);
- *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
+ *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
- return 0;
+ return bank->size - offset;
}
static const struct block_device_operations axon_ram_devops = {
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 6455c1eada1a..4b74c276e427 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -68,13 +68,10 @@ static int fsl_pcie_check_link(struct pci_controller *hose)
u32 val = 0;
if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
- if (hose->ops->read == fsl_indirect_read_config) {
- struct pci_bus bus;
- bus.number = hose->first_busno;
- bus.sysdata = hose;
- bus.ops = hose->ops;
- indirect_read_config(&bus, 0, PCIE_LTSSM, 4, &val);
- } else
+ if (hose->ops->read == fsl_indirect_read_config)
+ __indirect_read_config(hose, hose->first_busno, 0,
+ PCIE_LTSSM, 4, &val);
+ else
early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
if (val < PCIE_LTSSM_L0)
return 1;
@@ -645,61 +642,21 @@ mapped:
return pcie->cfg_type1 + offset;
}
-static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 *val)
-{
- void __iomem *cfg_addr;
-
- cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
- if (!cfg_addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- switch (len) {
- case 1:
- *val = in_8(cfg_addr);
- break;
- case 2:
- *val = in_le16(cfg_addr);
- break;
- default:
- *val = in_le32(cfg_addr);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
- void __iomem *cfg_addr;
-
- cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
- if (!cfg_addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
val &= 0xffffff00;
- switch (len) {
- case 1:
- out_8(cfg_addr, val);
- break;
- case 2:
- out_le16(cfg_addr, val);
- break;
- default:
- out_le32(cfg_addr, val);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
+ return pci_generic_config_write(bus, devfn, offset, len, val);
}
static struct pci_ops mpc83xx_pcie_ops = {
- .read = mpc83xx_pcie_read_config,
+ .map_bus = mpc83xx_pcie_remap_cfg,
+ .read = pci_generic_config_read,
.write = mpc83xx_pcie_write_config,
};
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index 1f6c570d66d4..692de9dbc680 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -20,31 +20,31 @@
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
-int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 *val)
+int __indirect_read_config(struct pci_controller *hose,
+ unsigned char bus_number, unsigned int devfn,
+ int offset, int len, u32 *val)
{
- struct pci_controller *hose = pci_bus_to_host(bus);
volatile void __iomem *cfg_data;
u8 cfg_type = 0;
u32 bus_no, reg;
if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
- if (bus->number != hose->first_busno)
+ if (bus_number != hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (ppc_md.pci_exclude_device)
- if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
+ if (ppc_md.pci_exclude_device(hose, bus_number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE)
- if (bus->number != hose->first_busno)
+ if (bus_number != hose->first_busno)
cfg_type = 1;
- bus_no = (bus->number == hose->first_busno) ?
- hose->self_busno : bus->number;
+ bus_no = (bus_number == hose->first_busno) ?
+ hose->self_busno : bus_number;
if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)
reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
@@ -77,6 +77,15 @@ int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
+int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+
+ return __indirect_read_config(hose, bus->number, devfn, offset, len,
+ val);
+}
+
int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index c4648ad5c1f3..bbfbbf2025fd 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1929,7 +1929,7 @@ int __init smp_mpic_probe(void)
DBG("smp_mpic_probe()...\n");
- nr_cpus = cpumask_weight(cpu_possible_mask);
+ nr_cpus = num_possible_cpus();
DBG("nr_cpus: %d\n", nr_cpus);
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index ed9970ff8d94..f366d2d4c079 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -145,7 +145,7 @@ static int hsta_msi_probe(struct platform_device *pdev)
ppc4xx_hsta_msi.address = mem->start;
ppc4xx_hsta_msi.data = ioremap(mem->start, resource_size(mem));
ppc4xx_hsta_msi.irq_count = irq_count;
- if (IS_ERR(ppc4xx_hsta_msi.data)) {
+ if (!ppc4xx_hsta_msi.data) {
dev_err(dev, "Unable to map memory\n");
return -ENOMEM;
}
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 1f29cee8da7b..c2518cdb7ddb 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -497,7 +497,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
* saved microcode information and put in the new.
*/
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
- strcpy(qe_firmware_info.id, firmware->id);
+ strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
qe_firmware_info.extended_modes = firmware->extended_modes;
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
sizeof(firmware->vtraps));
@@ -583,8 +583,8 @@ struct qe_firmware_info *qe_get_firmware_info(void)
/* Copy the data into qe_firmware_info*/
sprop = of_get_property(fw, "id", NULL);
if (sprop)
- strncpy(qe_firmware_info.id, sprop,
- sizeof(qe_firmware_info.id) - 1);
+ strlcpy(qe_firmware_info.id, sprop,
+ sizeof(qe_firmware_info.id));
prop = of_find_property(fw, "extended-modes", NULL);
if (prop && (prop->length == sizeof(u64))) {
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index 4ba554ec8eaf..68c7e5cc98e0 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -131,10 +131,8 @@ static int ics_opal_set_affinity(struct irq_data *d,
wanted_server = xics_get_irq_server(d->irq, cpumask, 1);
if (wanted_server < 0) {
- char cpulist[128];
- cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
- pr_warning("%s: No online cpus in the mask %s for irq %d\n",
- __func__, cpulist, d->irq);
+ pr_warning("%s: No online cpus in the mask %*pb for irq %d\n",
+ __func__, cpumask_pr_args(cpumask), d->irq);
return -1;
}
server = ics_opal_mangle_server(wanted_server);
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index bc81335b2cbc..0af97deb83f3 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -140,11 +140,8 @@ static int ics_rtas_set_affinity(struct irq_data *d,
irq_server = xics_get_irq_server(d->irq, cpumask, 1);
if (irq_server == -1) {
- char cpulist[128];
- cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
- printk(KERN_WARNING
- "%s: No online cpus in the mask %s for irq %d\n",
- __func__, cpulist, d->irq);
+ pr_warning("%s: No online cpus in the mask %*pb for irq %d\n",
+ __func__, cpumask_pr_args(cpumask), d->irq);
return -1;
}
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 365249cd346b..125743b58c70 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -148,7 +148,7 @@ int __init xics_smp_probe(void)
/* Register all the IPIs */
xics_request_ipi();
- return cpumask_weight(cpu_possible_mask);
+ return num_possible_cpus();
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 5b150f0c5df9..e599259d84fc 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -25,6 +25,7 @@
#include <linux/irq.h>
#include <linux/bug.h>
#include <linux/nmi.h>
+#include <linux/ctype.h>
#include <asm/ptrace.h>
#include <asm/string.h>
@@ -183,14 +184,6 @@ extern void xmon_leave(void);
#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3])
#endif
-#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
- || ('a' <= (c) && (c) <= 'f') \
- || ('A' <= (c) && (c) <= 'F'))
-#define isalnum(c) (('0' <= (c) && (c) <= '9') \
- || ('a' <= (c) && (c) <= 'z') \
- || ('A' <= (c) && (c) <= 'Z'))
-#define isspace(c) (c == ' ' || c == '\t' || c == 10 || c == 13 || c == 0)
-
static char *help_string = "\
Commands:\n\
b show breakpoints\n\
@@ -337,6 +330,7 @@ static inline void disable_surveillance(void)
args.token = rtas_token("set-indicator");
if (args.token == RTAS_UNKNOWN_SERVICE)
return;
+ args.token = cpu_to_be32(args.token);
args.nargs = cpu_to_be32(3);
args.nret = cpu_to_be32(1);
args.rets = &args.args[3];
@@ -2164,9 +2158,6 @@ static void dump_pacas(void)
}
#endif
-#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
- || ('a' <= (c) && (c) <= 'f') \
- || ('A' <= (c) && (c) <= 'F'))
static void
dump(void)
{
@@ -2569,7 +2560,7 @@ scanhex(unsigned long *vp)
int i;
for (i=0; i<63; i++) {
c = inchar();
- if (isspace(c)) {
+ if (isspace(c) || c == '\0') {
termch = c;
break;
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 68b68d755fdf..373cd5badf1c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -66,6 +66,7 @@ config S390
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_SG_CHAIN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
@@ -116,7 +117,6 @@ config S390
select HAVE_BPF_JIT if 64BIT && PACK_STACK
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
- select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE if 64BIT
select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
@@ -151,7 +151,6 @@ config S390
select TTY
select VIRT_CPU_ACCOUNTING
select VIRT_TO_BUS
- select ARCH_HAS_SG_CHAIN
config SCHED_OMIT_FRAME_POINTER
def_bool y
@@ -185,6 +184,10 @@ config HAVE_MARCH_ZEC12_FEATURES
def_bool n
select HAVE_MARCH_Z196_FEATURES
+config HAVE_MARCH_Z13_FEATURES
+ def_bool n
+ select HAVE_MARCH_ZEC12_FEATURES
+
choice
prompt "Processor type"
default MARCH_G5
@@ -244,6 +247,14 @@ config MARCH_ZEC12
2827 series). The kernel will be slightly faster but will not work on
older machines.
+config MARCH_Z13
+ bool "IBM z13"
+ select HAVE_MARCH_Z13_FEATURES if 64BIT
+ help
+ Select this to enable optimizations for IBM z13 (2964 series).
+ The kernel will be slightly faster but will not work on older
+ machines.
+
endchoice
config MARCH_G5_TUNE
@@ -267,6 +278,9 @@ config MARCH_Z196_TUNE
config MARCH_ZEC12_TUNE
def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
+config MARCH_Z13_TUNE
+ def_bool TUNE_Z13 || MARCH_Z13 && TUNE_DEFAULT
+
choice
prompt "Tune code generation"
default TUNE_DEFAULT
@@ -305,6 +319,9 @@ config TUNE_Z196
config TUNE_ZEC12
bool "IBM zBC12 and zEC12"
+config TUNE_Z13
+ bool "IBM z13"
+
endchoice
config 64BIT
@@ -356,14 +373,14 @@ config SMP
Even if you don't know what to do here, say Y.
config NR_CPUS
- int "Maximum number of CPUs (2-256)"
- range 2 256
+ int "Maximum number of CPUs (2-512)"
+ range 2 512
depends on SMP
default "32" if !64BIT
default "64" if 64BIT
help
This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 256 and the
+ kernel will support. The maximum supported value is 512 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
@@ -378,17 +395,26 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config SCHED_SMT
+ def_bool n
+
config SCHED_MC
def_bool n
config SCHED_BOOK
+ def_bool n
+
+config SCHED_TOPOLOGY
def_bool y
- prompt "Book scheduler support"
+ prompt "Topology scheduler support"
depends on SMP
+ select SCHED_SMT
select SCHED_MC
+ select SCHED_BOOK
help
- Book scheduler support improves the CPU scheduler's decision making
- when dealing with machines that have several books.
+ Topology scheduler support improves the CPU scheduler's decision
+ making when dealing with machines that have multi-threading,
+ multiple cores or multiple books.
source kernel/Kconfig.preempt
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 878e67973151..acb6859c6a95 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
mflags-$(CONFIG_MARCH_Z10) := -march=z10
mflags-$(CONFIG_MARCH_Z196) := -march=z196
mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
+mflags-$(CONFIG_MARCH_Z13) := -march=z13
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
@@ -53,6 +54,7 @@ cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
+cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
@@ -85,6 +87,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_FUNCTION_TRACER
+# make use of hotpatch feature if the compiler supports it
+cc_hotpatch := -mhotpatch=0,3
+ifeq ($(call cc-option-yn,$(cc_hotpatch)),y)
+CC_FLAGS_FTRACE := $(cc_hotpatch)
+KBUILD_AFLAGS += -DCC_USING_HOTPATCH
+KBUILD_CFLAGS += -DCC_USING_HOTPATCH
+endif
+endif
+
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
KBUILD_AFLAGS += $(aflags-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 57cbaff1f397..42506b371b74 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -8,6 +8,7 @@
#include <asm/uaccess.h>
#include <asm/page.h>
+#include <asm/sclp.h>
#include <asm/ipl.h>
#include "sizes.h"
@@ -63,8 +64,6 @@ static unsigned long free_mem_end_ptr;
#include "../../../../lib/decompress_unxz.c"
#endif
-extern _sclp_print_early(const char *);
-
static int puts(const char *s)
{
_sclp_print_early(s);
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 9432d0f202ef..64707750c780 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -555,7 +555,6 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
-CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_RB=y
@@ -563,6 +562,7 @@ CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 219dca6ea926..5c3097272cd8 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -540,6 +540,7 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 822c2f2e0c25..bda70f1ffd2c 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -537,6 +537,7 @@ CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 9d63051ebec4..1b0184a0f7f2 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -71,6 +71,7 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 1f272b24fc0b..5566ce80abdb 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -134,7 +134,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(need_fallback(sctx->key_len))) {
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
@@ -159,7 +159,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(need_fallback(sctx->key_len))) {
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 785c5f24d6f9..83ef702d2403 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -14,7 +14,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
@@ -22,12 +21,8 @@ CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_XZ=y
-CONFIG_RD_LZO=y
-CONFIG_RD_LZ4=y
CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index 06f8d95a16cd..2ee25ba252d6 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o
+s390_hypfs-objs += hypfs_diag0c.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index b34b5ab90a31..eecde500ed49 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -37,6 +37,10 @@ extern int hypfs_vm_init(void);
extern void hypfs_vm_exit(void);
extern int hypfs_vm_create_files(struct dentry *root);
+/* VM diagnose 0c */
+int hypfs_diag0c_init(void);
+void hypfs_diag0c_exit(void);
+
/* Set Partition-Resource Parameter */
int hypfs_sprp_init(void);
void hypfs_sprp_exit(void);
@@ -49,7 +53,6 @@ struct hypfs_dbfs_data {
void *buf_free_ptr;
size_t size;
struct hypfs_dbfs_file *dbfs_file;
- struct kref kref;
};
struct hypfs_dbfs_file {
@@ -61,8 +64,6 @@ struct hypfs_dbfs_file {
unsigned long);
/* Private data for hypfs_dbfs.c */
- struct hypfs_dbfs_data *data;
- struct delayed_work data_free_work;
struct mutex lock;
struct dentry *dentry;
};
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 47fe1055c714..752f6df3e697 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -17,33 +17,16 @@ static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
- kref_init(&data->kref);
data->dbfs_file = f;
return data;
}
-static void hypfs_dbfs_data_free(struct kref *kref)
+static void hypfs_dbfs_data_free(struct hypfs_dbfs_data *data)
{
- struct hypfs_dbfs_data *data;
-
- data = container_of(kref, struct hypfs_dbfs_data, kref);
data->dbfs_file->data_free(data->buf_free_ptr);
kfree(data);
}
-static void data_free_delayed(struct work_struct *work)
-{
- struct hypfs_dbfs_data *data;
- struct hypfs_dbfs_file *df;
-
- df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
- mutex_lock(&df->lock);
- data = df->data;
- df->data = NULL;
- mutex_unlock(&df->lock);
- kref_put(&data->kref, hypfs_dbfs_data_free);
-}
-
static ssize_t dbfs_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
@@ -56,28 +39,21 @@ static ssize_t dbfs_read(struct file *file, char __user *buf,
df = file_inode(file)->i_private;
mutex_lock(&df->lock);
- if (!df->data) {
- data = hypfs_dbfs_data_alloc(df);
- if (!data) {
- mutex_unlock(&df->lock);
- return -ENOMEM;
- }
- rc = df->data_create(&data->buf, &data->buf_free_ptr,
- &data->size);
- if (rc) {
- mutex_unlock(&df->lock);
- kfree(data);
- return rc;
- }
- df->data = data;
- schedule_delayed_work(&df->data_free_work, HZ);
+ data = hypfs_dbfs_data_alloc(df);
+ if (!data) {
+ mutex_unlock(&df->lock);
+ return -ENOMEM;
+ }
+ rc = df->data_create(&data->buf, &data->buf_free_ptr, &data->size);
+ if (rc) {
+ mutex_unlock(&df->lock);
+ kfree(data);
+ return rc;
}
- data = df->data;
- kref_get(&data->kref);
mutex_unlock(&df->lock);
rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
- kref_put(&data->kref, hypfs_dbfs_data_free);
+ hypfs_dbfs_data_free(data);
return rc;
}
@@ -108,7 +84,6 @@ int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
if (IS_ERR(df->dentry))
return PTR_ERR(df->dentry);
mutex_init(&df->lock);
- INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
return 0;
}
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
new file mode 100644
index 000000000000..d4c0d3717543
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -0,0 +1,139 @@
+/*
+ * Hypervisor filesystem for Linux on s390
+ *
+ * Diag 0C implementation
+ *
+ * Copyright IBM Corp. 2014
+ */
+
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <asm/hypfs.h>
+#include "hypfs.h"
+
+#define DBFS_D0C_HDR_VERSION 0
+
+/*
+ * Execute diagnose 0c in 31 bit mode
+ */
+static void diag0c(struct hypfs_diag0c_entry *entry)
+{
+ asm volatile (
+#ifdef CONFIG_64BIT
+ " sam31\n"
+ " diag %0,%0,0x0c\n"
+ " sam64\n"
+#else
+ " diag %0,%0,0x0c\n"
+#endif
+ : /* no output register */
+ : "a" (entry)
+ : "memory");
+}
+
+/*
+ * Get hypfs_diag0c_entry from CPU vector and store diag0c data
+ */
+static void diag0c_fn(void *data)
+{
+ diag0c(((void **) data)[smp_processor_id()]);
+}
+
+/*
+ * Allocate buffer and store diag 0c data
+ */
+static void *diag0c_store(unsigned int *count)
+{
+ struct hypfs_diag0c_data *diag0c_data;
+ unsigned int cpu_count, cpu, i;
+ void **cpu_vec;
+
+ get_online_cpus();
+ cpu_count = num_online_cpus();
+ cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL);
+ if (!cpu_vec)
+ goto fail_put_online_cpus;
+ /* Note: Diag 0c needs 8 byte alignment and real storage */
+ diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) +
+ cpu_count * sizeof(struct hypfs_diag0c_entry),
+ GFP_KERNEL | GFP_DMA);
+ if (!diag0c_data)
+ goto fail_kfree_cpu_vec;
+ i = 0;
+ /* Fill CPU vector for each online CPU */
+ for_each_online_cpu(cpu) {
+ diag0c_data->entry[i].cpu = cpu;
+ cpu_vec[cpu] = &diag0c_data->entry[i++];
+ }
+ /* Collect data all CPUs */
+ on_each_cpu(diag0c_fn, cpu_vec, 1);
+ *count = cpu_count;
+ kfree(cpu_vec);
+ put_online_cpus();
+ return diag0c_data;
+
+fail_kfree_cpu_vec:
+ kfree(cpu_vec);
+fail_put_online_cpus:
+ put_online_cpus();
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Hypfs DBFS callback: Free diag 0c data
+ */
+static void dbfs_diag0c_free(const void *data)
+{
+ kfree(data);
+}
+
+/*
+ * Hypfs DBFS callback: Create diag 0c data
+ */
+static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size)
+{
+ struct hypfs_diag0c_data *diag0c_data;
+ unsigned int count;
+
+ diag0c_data = diag0c_store(&count);
+ if (IS_ERR(diag0c_data))
+ return PTR_ERR(diag0c_data);
+ memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr));
+ get_tod_clock_ext(diag0c_data->hdr.tod_ext);
+ diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry);
+ diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION;
+ diag0c_data->hdr.count = count;
+ *data = diag0c_data;
+ *data_free_ptr = diag0c_data;
+ *size = diag0c_data->hdr.len + sizeof(struct hypfs_diag0c_hdr);
+ return 0;
+}
+
+/*
+ * Hypfs DBFS file structure
+ */
+static struct hypfs_dbfs_file dbfs_file_0c = {
+ .name = "diag_0c",
+ .data_create = dbfs_diag0c_create,
+ .data_free = dbfs_diag0c_free,
+};
+
+/*
+ * Initialize diag 0c interface for z/VM
+ */
+int __init hypfs_diag0c_init(void)
+{
+ if (!MACHINE_IS_VM)
+ return 0;
+ return hypfs_dbfs_create_file(&dbfs_file_0c);
+}
+
+/*
+ * Shutdown diag 0c interface for z/VM
+ */
+void hypfs_diag0c_exit(void)
+{
+ if (!MACHINE_IS_VM)
+ return;
+ hypfs_dbfs_remove_file(&dbfs_file_0c);
+}
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 32040ace00ea..afbe07907c10 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -231,7 +231,7 @@ failed:
struct dbfs_d2fc_hdr {
u64 len; /* Length of d2fc buffer without header */
u16 version; /* Version of header */
- char tod_ext[16]; /* TOD clock for d2fc */
+ char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
u64 count; /* Number of VM guests in d2fc buffer */
char reserved[30];
} __attribute__ ((packed));
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c952b981e4f2..4c8008dd938e 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -482,10 +482,14 @@ static int __init hypfs_init(void)
rc = -ENODATA;
goto fail_hypfs_vm_exit;
}
+ if (hypfs_diag0c_init()) {
+ rc = -ENODATA;
+ goto fail_hypfs_sprp_exit;
+ }
s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
if (!s390_kobj) {
rc = -ENOMEM;
- goto fail_hypfs_sprp_exit;
+ goto fail_hypfs_diag0c_exit;
}
rc = register_filesystem(&hypfs_type);
if (rc)
@@ -494,6 +498,8 @@ static int __init hypfs_init(void)
fail_filesystem:
kobject_put(s390_kobj);
+fail_hypfs_diag0c_exit:
+ hypfs_diag0c_exit();
fail_hypfs_sprp_exit:
hypfs_sprp_exit();
fail_hypfs_vm_exit:
@@ -510,6 +516,7 @@ static void __exit hypfs_exit(void)
{
unregister_filesystem(&hypfs_type);
kobject_put(s390_kobj);
+ hypfs_diag0c_exit();
hypfs_sprp_exit();
hypfs_vm_exit();
hypfs_diag_exit();
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 3e20383d0921..58fae7d098cf 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -4,10 +4,6 @@
/* Caches aren't brain-dead on the s390. */
#include <asm-generic/cacheflush.h>
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index cb700d54bd83..5243a8679a1d 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -189,6 +189,20 @@ static inline int ecctr(u64 ctr, u64 *val)
return cc;
}
+/* Store CPU counter multiple for the MT utilization counter set */
+static inline int stcctm5(u64 num, u64 *val)
+{
+ typedef struct { u64 _[num]; } addrtype;
+ int cc;
+
+ asm volatile (
+ " .insn rsy,0xeb0000000017,%2,5,%1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
+ return cc;
+}
+
/* Query sampling information */
static inline int qsi(struct hws_qsi_info_block *info)
{
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index f6e43d39e3d8..c9df40b5c0ac 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
+extern unsigned long randomize_et_dyn(void);
+#define ELF_ET_DYN_BASE randomize_et_dyn()
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
@@ -209,7 +209,9 @@ do { \
} while (0)
#endif /* CONFIG_COMPAT */
-#define STACK_RND_MASK 0x7ffUL
+extern unsigned long mmap_rnd_mask;
+
+#define STACK_RND_MASK (mmap_rnd_mask)
#define ARCH_DLINFO \
do { \
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index abb618f1ead2..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -3,8 +3,12 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
+#ifdef CC_USING_HOTPATCH
+#define MCOUNT_INSN_SIZE 6
+#else
#define MCOUNT_INSN_SIZE 24
#define MCOUNT_RETURN_FIXUP 18
+#endif
#ifndef __ASSEMBLY__
@@ -37,18 +41,29 @@ struct ftrace_insn {
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
/* jg .+24 */
insn->opc = 0xc0f4;
insn->disp = MCOUNT_INSN_SIZE / 2;
#endif
+#endif
}
static inline int is_ftrace_nop(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ if (insn->disp == 0)
+ return 1;
+#else
if (insn->disp == MCOUNT_INSN_SIZE / 2)
return 1;
#endif
+#endif
return 0;
}
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 37b9091ab8c0..16aa0c779e07 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
static inline notrace unsigned long arch_local_save_flags(void)
{
- return __arch_local_irq_stosm(0x00);
+ return __arch_local_irq_stnsm(0xff);
}
static inline notrace unsigned long arch_local_irq_save(void)
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 346b1c85ffb4..58642fd29c87 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 6
+#define JUMP_LABEL_NOP_OFFSET 2
#ifdef CONFIG_64BIT
#define ASM_PTR ".quad"
@@ -13,9 +14,13 @@
#define ASM_ALIGN ".balign 4"
#endif
+/*
+ * We use a brcl 0,2 instruction for jump labels at compile time so it
+ * can be easily distinguished from a hotpatch generated instruction.
+ */
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm_volatile_goto("0: brcl 0,0\n"
+ asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 9cba74d5d853..d84559e31f32 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -35,11 +35,13 @@
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
-#define SIGP_CTRL_C 0x00800000
+#define SIGP_CTRL_C 0x80
+#define SIGP_CTRL_SCN_MASK 0x3f
struct sca_entry {
- atomic_t ctrl;
- __u32 reserved;
+ __u8 reserved0;
+ __u8 sigp_ctrl;
+ __u16 reserved[3];
__u64 sda;
__u64 reserved2[2];
} __attribute__((packed));
@@ -87,7 +89,8 @@ struct kvm_s390_sie_block {
atomic_t cpuflags; /* 0x0000 */
__u32 : 1; /* 0x0004 */
__u32 prefix : 18;
- __u32 : 13;
+ __u32 : 1;
+ __u32 ibc : 12;
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */
@@ -132,7 +135,9 @@ struct kvm_s390_sie_block {
__u8 reserved60; /* 0x0060 */
__u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */
- __u8 reserved63[1]; /* 0x0063 */
+#define ECB3_AES 0x04
+#define ECB3_DEA 0x08
+ __u8 ecb3; /* 0x0063 */
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */
@@ -159,6 +164,7 @@ struct kvm_s390_sie_block {
__u64 tecmc; /* 0x00e8 */
__u8 reservedf0[12]; /* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
+#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
@@ -192,6 +198,7 @@ struct kvm_vcpu_stat {
u32 exit_stop_request;
u32 exit_validity;
u32 exit_instruction;
+ u32 halt_successful_poll;
u32 halt_wakeup;
u32 instruction_lctl;
u32 instruction_lctlg;
@@ -378,14 +385,11 @@ struct kvm_s390_interrupt_info {
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk;
};
};
-/* for local_interrupt.action_flags */
-#define ACTION_STORE_ON_STOP (1<<0)
-#define ACTION_STOP_ON_STOP (1<<1)
-
struct kvm_s390_irq_payload {
struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext;
@@ -393,6 +397,7 @@ struct kvm_s390_irq_payload {
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk;
};
@@ -401,7 +406,6 @@ struct kvm_s390_local_interrupt {
struct kvm_s390_float_interrupt *float_int;
wait_queue_head_t *wq;
atomic_t *cpuflags;
- unsigned int action_bits;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq;
unsigned long pending_irqs;
@@ -470,7 +474,6 @@ struct kvm_vcpu_arch {
};
struct gmap *gmap;
struct kvm_guestdbg_info_arch guestdbg;
-#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL)
unsigned long pfault_token;
unsigned long pfault_select;
unsigned long pfault_compare;
@@ -504,13 +507,39 @@ struct s390_io_adapter {
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256
+/* maximum size of facilities and facility mask is 2k bytes */
+#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
+#define S390_ARCH_FAC_LIST_SIZE_U64 \
+ (S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
+#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
+#define S390_ARCH_FAC_MASK_SIZE_U64 \
+ (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
+
+struct s390_model_fac {
+ /* facilities used in SIE context */
+ __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64];
+ /* subset enabled by kvm */
+ __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64];
+};
+
+struct kvm_s390_cpu_model {
+ struct s390_model_fac *fac;
+ struct cpuid cpu_id;
+ unsigned short ibc;
+};
+
struct kvm_s390_crypto {
struct kvm_s390_crypto_cb *crycb;
__u32 crycbd;
+ __u8 aes_kw;
+ __u8 dea_kw;
};
struct kvm_s390_crypto_cb {
- __u8 reserved00[128]; /* 0x0000 */
+ __u8 reserved00[72]; /* 0x0000 */
+ __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
+ __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
+ __u8 reserved80[128]; /* 0x0080 */
};
struct kvm_arch{
@@ -523,12 +552,15 @@ struct kvm_arch{
int use_irqchip;
int use_cmma;
int user_cpu_state_ctrl;
+ int user_sigp;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
struct mutex ipte_mutex;
spinlock_t start_stop_lock;
+ struct kvm_s390_cpu_model model;
struct kvm_s390_crypto crypto;
+ u64 epoch;
};
#define KVM_HVA_ERR_BAD (-1UL)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5e102422c9ab..fbb5ee3ae57c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -99,7 +99,7 @@ extern unsigned long zero_page_mask;
#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
@@ -249,10 +249,10 @@ static inline int is_module_addr(void *addr)
_PAGE_YOUNG)
/*
- * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
- * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
- * is used to distinguish present from not-present ptes. It is changed only
- * with the page table lock held.
+ * handle_pte_fault uses pte_present and pte_none to find out the pte type
+ * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
+ * distinguish present from not-present ptes. It is changed only with the page
+ * table lock held.
*
* The following table gives the different possible bit combinations for
* the pte hardware and software bits in the last 12 bits of a pte:
@@ -279,7 +279,6 @@ static inline int is_module_addr(void *addr)
*
* pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
* pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
- * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
* pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
*/
@@ -671,13 +670,6 @@ static inline int pte_swap(pte_t pte)
== (_PAGE_INVALID | _PAGE_TYPE);
}
-static inline int pte_file(pte_t pte)
-{
- /* Bit pattern: (pte & 0x601) == 0x600 */
- return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
- == (_PAGE_INVALID | _PAGE_PROTECT);
-}
-
static inline int pte_special(pte_t pte)
{
return (pte_val(pte) & _PAGE_SPECIAL);
@@ -1756,19 +1748,6 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifndef CONFIG_64BIT
-# define PTE_FILE_MAX_BITS 26
-#else /* CONFIG_64BIT */
-# define PTE_FILE_MAX_BITS 59
-#endif /* CONFIG_64BIT */
-
-#define pte_to_pgoff(__pte) \
- ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
-
-#define pgoff_to_pte(__off) \
- ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
- | _PAGE_INVALID | _PAGE_PROTECT })
-
#endif /* !__ASSEMBLY__ */
#define kern_addr_valid(addr) (1)
@@ -1779,6 +1758,10 @@ extern int s390_enable_sie(void);
extern int s390_enable_skey(void);
extern void s390_reset_cmma(struct mm_struct *mm);
+/* s390 has a private copy of get unmapped area to deal with cache synonyms */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
/*
* No page table caches to initialise
*/
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bed05ea7ec27..e7cbbdcdee13 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -215,10 +215,7 @@ static inline unsigned short stap(void)
/*
* Give up the time slice of the virtual PU.
*/
-static inline void cpu_relax(void)
-{
- barrier();
-}
+void cpu_relax(void);
#define cpu_relax_lowlatency() barrier()
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
index 804578587a7a..72786067b300 100644
--- a/arch/s390/include/asm/reset.h
+++ b/arch/s390/include/asm/reset.h
@@ -15,5 +15,6 @@ struct reset_call {
extern void register_reset_call(struct reset_call *reset);
extern void unregister_reset_call(struct reset_call *reset);
-extern void s390_reset_system(void (*func)(void *), void *data);
+extern void s390_reset_system(void (*fn_pre)(void),
+ void (*fn_post)(void *), void *data);
#endif /* _ASM_S390_RESET_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 1aba89b53cb9..f1096bab5199 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -27,11 +27,12 @@ struct sclp_ipl_info {
};
struct sclp_cpu_entry {
- u8 address;
+ u8 core_id;
u8 reserved0[2];
u8 : 3;
u8 siif : 1;
- u8 : 4;
+ u8 sigpif : 1;
+ u8 : 3;
u8 reserved2[10];
u8 type;
u8 reserved1;
@@ -51,6 +52,9 @@ int sclp_cpu_deconfigure(u8 cpu);
unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void);
unsigned int sclp_get_max_cpu(void);
+unsigned int sclp_get_mtid(u8 cpu_type);
+unsigned int sclp_get_mtid_max(void);
+unsigned int sclp_get_mtid_prev(void);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
@@ -66,6 +70,9 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
unsigned long sclp_get_hsa_size(void);
void sclp_early_detect(void);
int sclp_has_siif(void);
+int sclp_has_sigpif(void);
unsigned int sclp_get_ibc(void);
+long _sclp_print_early(const char *);
+
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 7736fdd72595..b8d1e54b4733 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -57,6 +57,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_TLB_LC (1UL << 17)
#define MACHINE_FLAG_VX (1UL << 18)
+#define MACHINE_FLAG_CAD (1UL << 19)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -80,6 +81,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TE (0)
#define MACHINE_HAS_TLB_LC (0)
#define MACHINE_HAS_VX (0)
+#define MACHINE_HAS_CAD (0)
#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
@@ -93,6 +95,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
+#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
#endif /* CONFIG_64BIT */
/*
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index fad4ae23ece0..ec60cf7fa0a2 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -16,6 +16,7 @@
#define SIGP_SET_ARCHITECTURE 18
#define SIGP_COND_EMERGENCY_SIGNAL 19
#define SIGP_SENSE_RUNNING 21
+#define SIGP_SET_MULTI_THREADING 22
#define SIGP_STORE_ADDITIONAL_STATUS 23
/* SIGP condition codes */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 762d4f88af5a..b3bd0282dd98 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -16,6 +16,8 @@
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern struct mutex smp_cpu_state_mutex;
+extern unsigned int smp_cpu_mt_shift;
+extern unsigned int smp_cpu_mtid;
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
@@ -35,6 +37,8 @@ extern void smp_fill_possible_mask(void);
#else /* CONFIG_SMP */
+#define smp_cpu_mtid 0
+
static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
func(data);
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 7e2dcd7c57ef..8662f5c8e17f 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -44,7 +44,6 @@ extern char *strstr(const char *, const char *);
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
#undef __HAVE_ARCH_STRNCMP
-#undef __HAVE_ARCH_STRNICMP
#undef __HAVE_ARCH_STRPBRK
#undef __HAVE_ARCH_STRSEP
#undef __HAVE_ARCH_STRSPN
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index f92428e459f8..f7054a892d9e 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -15,6 +15,7 @@
#define __ASM_S390_SYSINFO_H
#include <asm/bitsperlong.h>
+#include <linux/uuid.h>
struct sysinfo_1_1_1 {
unsigned char p:1;
@@ -90,7 +91,11 @@ struct sysinfo_2_2_2 {
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
- char reserved_2[16];
+ char reserved_2[8];
+ unsigned char mt_installed;
+ unsigned char mt_general;
+ unsigned char mt_psmtid;
+ char reserved_3[5];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
};
@@ -112,34 +117,39 @@ struct sysinfo_3_2_2 {
char name[8];
unsigned int caf;
char cpi[16];
- char reserved_1[24];
-
+ char reserved_1[3];
+ char ext_name_encoding;
+ unsigned int reserved_2;
+ uuid_be uuid;
} vm[8];
- char reserved_544[3552];
+ char reserved_3[1504];
+ char ext_names[8][256];
};
extern int topology_max_mnest;
-#define TOPOLOGY_CPU_BITS 64
+#define TOPOLOGY_CORE_BITS 64
#define TOPOLOGY_NR_MAG 6
-struct topology_cpu {
- unsigned char reserved0[4];
+struct topology_core {
+ unsigned char nl;
+ unsigned char reserved0[3];
unsigned char :6;
unsigned char pp:2;
unsigned char reserved1;
unsigned short origin;
- unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG];
+ unsigned long mask[TOPOLOGY_CORE_BITS / BITS_PER_LONG];
};
struct topology_container {
- unsigned char reserved[7];
+ unsigned char nl;
+ unsigned char reserved[6];
unsigned char id;
};
union topology_entry {
unsigned char nl;
- struct topology_cpu cpu;
+ struct topology_core cpu;
struct topology_container container;
};
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 4d62fd5b56e5..ef1df718642d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -39,7 +39,6 @@ struct thread_info {
unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
- struct restart_block restart_block;
unsigned int system_call;
__u64 user_timer;
__u64 system_timer;
@@ -56,9 +55,6 @@ struct thread_info {
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8beee1cceba4..98eb2a579223 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
set_clock_comparator(S390_lowcore.clock_comparator);
}
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
typedef unsigned long long cycles_t;
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
{
- typedef struct { char _[sizeof(clk)]; } addrtype;
+ typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
}
static inline unsigned long long get_tod_clock(void)
{
- unsigned char clk[16];
+ unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
}
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 56af53093d24..c4fbb9527c5c 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,9 +9,11 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
struct cpu_topology_s390 {
+ unsigned short thread_id;
unsigned short core_id;
unsigned short socket_id;
unsigned short book_id;
+ cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
};
@@ -19,6 +21,8 @@ struct cpu_topology_s390 {
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
+#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
diff --git a/arch/s390/include/uapi/asm/hypfs.h b/arch/s390/include/uapi/asm/hypfs.h
index 37998b449531..b3fe12d8dd87 100644
--- a/arch/s390/include/uapi/asm/hypfs.h
+++ b/arch/s390/include/uapi/asm/hypfs.h
@@ -1,16 +1,19 @@
/*
- * IOCTL interface for hypfs
+ * Structures for hypfs interface
*
* Copyright IBM Corp. 2013
*
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#ifndef _ASM_HYPFS_CTL_H
-#define _ASM_HYPFS_CTL_H
+#ifndef _ASM_HYPFS_H
+#define _ASM_HYPFS_H
#include <linux/types.h>
+/*
+ * IOCTL for binary interface /sys/kernel/debug/diag_304
+ */
struct hypfs_diag304 {
__u32 args[2];
__u64 data;
@@ -22,4 +25,30 @@ struct hypfs_diag304 {
#define HYPFS_DIAG304 \
_IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304)
+/*
+ * Structures for binary interface /sys/kernel/debug/diag_0c
+ */
+struct hypfs_diag0c_hdr {
+ __u64 len; /* Length of diag0c buffer without header */
+ __u16 version; /* Version of header */
+ char reserved1[6]; /* Reserved */
+ char tod_ext[16]; /* TOD clock for diag0c */
+ __u64 count; /* Number of entries (CPUs) in diag0c array */
+ char reserved2[24]; /* Reserved */
+};
+
+struct hypfs_diag0c_entry {
+ char date[8]; /* MM/DD/YY in EBCDIC */
+ char time[8]; /* HH:MM:SS in EBCDIC */
+ __u64 virtcpu; /* Virtual time consumed by the virt CPU (us) */
+ __u64 totalproc; /* Total of virtual and simulation time (us) */
+ __u32 cpu; /* Linux logical CPU number */
+ __u32 reserved; /* Align to 8 byte */
+};
+
+struct hypfs_diag0c_data {
+ struct hypfs_diag0c_hdr hdr; /* 64 byte header */
+ struct hypfs_diag0c_entry entry[]; /* diag0c entry array */
+};
+
#endif
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 48eda3ab4944..9c77e60b9a26 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -57,10 +57,44 @@ struct kvm_s390_io_adapter_req {
/* kvm attr_group on vm fd */
#define KVM_S390_VM_MEM_CTRL 0
+#define KVM_S390_VM_TOD 1
+#define KVM_S390_VM_CRYPTO 2
+#define KVM_S390_VM_CPU_MODEL 3
/* kvm attributes for mem_ctrl */
#define KVM_S390_VM_MEM_ENABLE_CMMA 0
#define KVM_S390_VM_MEM_CLR_CMMA 1
+#define KVM_S390_VM_MEM_LIMIT_SIZE 2
+
+/* kvm attributes for KVM_S390_VM_TOD */
+#define KVM_S390_VM_TOD_LOW 0
+#define KVM_S390_VM_TOD_HIGH 1
+
+/* kvm attributes for KVM_S390_VM_CPU_MODEL */
+/* processor related attributes are r/w */
+#define KVM_S390_VM_CPU_PROCESSOR 0
+struct kvm_s390_vm_cpu_processor {
+ __u64 cpuid;
+ __u16 ibc;
+ __u8 pad[6];
+ __u64 fac_list[256];
+};
+
+/* machine related attributes are r/o */
+#define KVM_S390_VM_CPU_MACHINE 1
+struct kvm_s390_vm_cpu_machine {
+ __u64 cpuid;
+ __u32 ibc;
+ __u8 pad[4];
+ __u64 fac_mask[256];
+ __u64 fac_list[256];
+};
+
+/* kvm attributes for crypto */
+#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
+#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
+#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
+#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
@@ -107,6 +141,9 @@ struct kvm_guest_debug_arch {
struct kvm_hw_breakpoint __user *hw_bp;
};
+/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */
+#define KVM_S390_PFAULT_TOKEN_INVALID 0xffffffffffffffffULL
+
#define KVM_SYNC_PREFIX (1UL << 0)
#define KVM_SYNC_GPRS (1UL << 1)
#define KVM_SYNC_ACRS (1UL << 2)
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 2b446cf0cc65..67878af257a0 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -289,7 +289,8 @@
#define __NR_bpf 351
#define __NR_s390_pci_mmio_write 352
#define __NR_s390_pci_mmio_read 353
-#define NR_syscalls 354
+#define __NR_execveat 354
+#define NR_syscalls 355
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 204c43a4c245..31fab2676fe9 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,8 +4,8 @@
ifdef CONFIG_FUNCTION_TRACER
# Don't trace early setup code and tracing code
-CFLAGS_REMOVE_early.o = -pg
-CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
endif
#
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 797a823a2275..f74a53d339b0 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -97,7 +97,8 @@ ENTRY(diag308_reset)
lg %r4,0(%r4) # Save PSW
sturg %r4,%r3 # Use sturg, because of large pages
lghi %r1,1
- diag %r1,%r1,0x308
+ lghi %r0,0
+ diag %r0,%r1,0x308
.Lrestart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index c0b03c28d157..632fa06ea162 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -5,37 +5,11 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/notifier.h>
#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/cacheinfo.h>
#include <asm/facility.h>
-struct cache {
- unsigned long size;
- unsigned int line_size;
- unsigned int associativity;
- unsigned int nr_sets;
- unsigned int level : 3;
- unsigned int type : 2;
- unsigned int private : 1;
- struct list_head list;
-};
-
-struct cache_dir {
- struct kobject *kobj;
- struct cache_index_dir *index;
-};
-
-struct cache_index_dir {
- struct kobject kobj;
- int cpu;
- struct cache *cache;
- struct cache_index_dir *next;
-};
-
enum {
CACHE_SCOPE_NOTEXISTS,
CACHE_SCOPE_PRIVATE,
@@ -44,10 +18,10 @@ enum {
};
enum {
- CACHE_TYPE_SEPARATE,
- CACHE_TYPE_DATA,
- CACHE_TYPE_INSTRUCTION,
- CACHE_TYPE_UNIFIED,
+ CTYPE_SEPARATE,
+ CTYPE_DATA,
+ CTYPE_INSTRUCTION,
+ CTYPE_UNIFIED,
};
enum {
@@ -70,37 +44,60 @@ struct cache_info {
};
#define CACHE_MAX_LEVEL 8
-
union cache_topology {
struct cache_info ci[CACHE_MAX_LEVEL];
unsigned long long raw;
};
static const char * const cache_type_string[] = {
- "Data",
+ "",
"Instruction",
+ "Data",
+ "",
"Unified",
};
-static struct cache_dir *cache_dir_cpu[NR_CPUS];
-static LIST_HEAD(cache_list);
+static const enum cache_type cache_type_map[] = {
+ [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
+ [CTYPE_DATA] = CACHE_TYPE_DATA,
+ [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
+ [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
+};
void show_cacheinfo(struct seq_file *m)
{
- struct cache *cache;
- int index = 0;
+ struct cpu_cacheinfo *this_cpu_ci;
+ struct cacheinfo *cache;
+ int idx;
- list_for_each_entry(cache, &cache_list, list) {
- seq_printf(m, "cache%-11d: ", index);
+ get_online_cpus();
+ this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
+ for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
+ cache = this_cpu_ci->info_list + idx;
+ seq_printf(m, "cache%-11d: ", idx);
seq_printf(m, "level=%d ", cache->level);
seq_printf(m, "type=%s ", cache_type_string[cache->type]);
- seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
- seq_printf(m, "size=%luK ", cache->size >> 10);
- seq_printf(m, "line_size=%u ", cache->line_size);
- seq_printf(m, "associativity=%d", cache->associativity);
+ seq_printf(m, "scope=%s ",
+ cache->disable_sysfs ? "Shared" : "Private");
+ seq_printf(m, "size=%dK ", cache->size >> 10);
+ seq_printf(m, "line_size=%u ", cache->coherency_line_size);
+ seq_printf(m, "associativity=%d", cache->ways_of_associativity);
seq_puts(m, "\n");
- index++;
}
+ put_online_cpus();
+}
+
+static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
+{
+ if (level >= CACHE_MAX_LEVEL)
+ return CACHE_TYPE_NOCACHE;
+
+ ci += level;
+
+ if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
+ return CACHE_TYPE_NOCACHE;
+
+ return cache_type_map[ci->type];
}
static inline unsigned long ecag(int ai, int li, int ti)
@@ -113,277 +110,79 @@ static inline unsigned long ecag(int ai, int li, int ti)
return val;
}
-static int __init cache_add(int level, int private, int type)
+static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
+ enum cache_type type, unsigned int level)
{
- struct cache *cache;
- int ti;
+ int ti, num_sets;
+ int cpu = smp_processor_id();
- cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache)
- return -ENOMEM;
- if (type == CACHE_TYPE_INSTRUCTION)
+ if (type == CACHE_TYPE_INST)
ti = CACHE_TI_INSTRUCTION;
else
ti = CACHE_TI_UNIFIED;
- cache->size = ecag(EXTRACT_SIZE, level, ti);
- cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
- cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
- cache->nr_sets = cache->size / cache->associativity;
- cache->nr_sets /= cache->line_size;
- cache->private = private;
- cache->level = level + 1;
- cache->type = type - 1;
- list_add_tail(&cache->list, &cache_list);
- return 0;
-}
-
-static void __init cache_build_info(void)
-{
- struct cache *cache, *next;
- union cache_topology ct;
- int level, private, rc;
-
- ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
- for (level = 0; level < CACHE_MAX_LEVEL; level++) {
- switch (ct.ci[level].scope) {
- case CACHE_SCOPE_SHARED:
- private = 0;
- break;
- case CACHE_SCOPE_PRIVATE:
- private = 1;
- break;
- default:
- return;
- }
- if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
- rc = cache_add(level, private, CACHE_TYPE_DATA);
- rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
- } else {
- rc = cache_add(level, private, ct.ci[level].type);
- }
- if (rc)
- goto error;
- }
- return;
-error:
- list_for_each_entry_safe(cache, next, &cache_list, list) {
- list_del(&cache->list);
- kfree(cache);
- }
-}
-
-static struct cache_dir *cache_create_cache_dir(int cpu)
-{
- struct cache_dir *cache_dir;
- struct kobject *kobj = NULL;
- struct device *dev;
-
- dev = get_cpu_device(cpu);
- if (!dev)
- goto out;
- kobj = kobject_create_and_add("cache", &dev->kobj);
- if (!kobj)
- goto out;
- cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
- if (!cache_dir)
- goto out;
- cache_dir->kobj = kobj;
- cache_dir_cpu[cpu] = cache_dir;
- return cache_dir;
-out:
- kobject_put(kobj);
- return NULL;
-}
-
-static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
-{
- return container_of(kobj, struct cache_index_dir, kobj);
-}
-
-static void cache_index_release(struct kobject *kobj)
-{
- struct cache_index_dir *index;
-
- index = kobj_to_cache_index_dir(kobj);
- kfree(index);
-}
-
-static ssize_t cache_index_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct kobj_attribute *kobj_attr;
-
- kobj_attr = container_of(attr, struct kobj_attribute, attr);
- return kobj_attr->show(kobj, kobj_attr, buf);
-}
-
-#define DEFINE_CACHE_ATTR(_name, _format, _value) \
-static ssize_t cache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *buf) \
-{ \
- struct cache_index_dir *index; \
- \
- index = kobj_to_cache_index_dir(kobj); \
- return sprintf(buf, _format, _value); \
-} \
-static struct kobj_attribute cache_##_name##_attr = \
- __ATTR(_name, 0444, cache_##_name##_show, NULL);
-DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
-DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
-DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
-DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
-DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
-DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
+ this_leaf->level = level + 1;
+ this_leaf->type = type;
+ this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
+ this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
+ level, ti);
+ this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
-static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
-{
- struct cache_index_dir *index;
- int len;
-
- index = kobj_to_cache_index_dir(kobj);
- len = type ?
- cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
- cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
- len += sprintf(&buf[len], "\n");
- return len;
-}
-
-static ssize_t shared_cpu_map_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return shared_cpu_map_func(kobj, 0, buf);
+ num_sets = this_leaf->size / this_leaf->coherency_line_size;
+ num_sets /= this_leaf->ways_of_associativity;
+ this_leaf->number_of_sets = num_sets;
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ if (!private)
+ this_leaf->disable_sysfs = true;
}
-static struct kobj_attribute cache_shared_cpu_map_attr =
- __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
-static ssize_t shared_cpu_list_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+int init_cache_level(unsigned int cpu)
{
- return shared_cpu_map_func(kobj, 1, buf);
-}
-static struct kobj_attribute cache_shared_cpu_list_attr =
- __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
-
-static struct attribute *cache_index_default_attrs[] = {
- &cache_type_attr.attr,
- &cache_size_attr.attr,
- &cache_number_of_sets_attr.attr,
- &cache_ways_of_associativity_attr.attr,
- &cache_level_attr.attr,
- &cache_coherency_line_size_attr.attr,
- &cache_shared_cpu_map_attr.attr,
- &cache_shared_cpu_list_attr.attr,
- NULL,
-};
-
-static const struct sysfs_ops cache_index_ops = {
- .show = cache_index_show,
-};
-
-static struct kobj_type cache_index_type = {
- .sysfs_ops = &cache_index_ops,
- .release = cache_index_release,
- .default_attrs = cache_index_default_attrs,
-};
-
-static int cache_create_index_dir(struct cache_dir *cache_dir,
- struct cache *cache, int index, int cpu)
-{
- struct cache_index_dir *index_dir;
- int rc;
-
- index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
- if (!index_dir)
- return -ENOMEM;
- index_dir->cache = cache;
- index_dir->cpu = cpu;
- rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
- cache_dir->kobj, "index%d", index);
- if (rc)
- goto out;
- index_dir->next = cache_dir->index;
- cache_dir->index = index_dir;
- return 0;
-out:
- kfree(index_dir);
- return rc;
-}
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int level = 0, leaves = 0;
+ union cache_topology ct;
+ enum cache_type ctype;
-static int cache_add_cpu(int cpu)
-{
- struct cache_dir *cache_dir;
- struct cache *cache;
- int rc, index = 0;
+ if (!this_cpu_ci)
+ return -EINVAL;
- if (list_empty(&cache_list))
- return 0;
- cache_dir = cache_create_cache_dir(cpu);
- if (!cache_dir)
- return -ENOMEM;
- list_for_each_entry(cache, &cache_list, list) {
- if (!cache->private)
+ ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
+ do {
+ ctype = get_cache_type(&ct.ci[0], level);
+ if (ctype == CACHE_TYPE_NOCACHE)
break;
- rc = cache_create_index_dir(cache_dir, cache, index, cpu);
- if (rc)
- return rc;
- index++;
- }
- return 0;
-}
+ /* Separate instruction and data caches */
+ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
+ } while (++level < CACHE_MAX_LEVEL);
-static void cache_remove_cpu(int cpu)
-{
- struct cache_index_dir *index, *next;
- struct cache_dir *cache_dir;
+ this_cpu_ci->num_levels = level;
+ this_cpu_ci->num_leaves = leaves;
- cache_dir = cache_dir_cpu[cpu];
- if (!cache_dir)
- return;
- index = cache_dir->index;
- while (index) {
- next = index->next;
- kobject_put(&index->kobj);
- index = next;
- }
- kobject_put(cache_dir->kobj);
- kfree(cache_dir);
- cache_dir_cpu[cpu] = NULL;
+ return 0;
}
-static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+int populate_cache_leaves(unsigned int cpu)
{
- int cpu = (long)hcpu;
- int rc = 0;
+ unsigned int level, idx, pvt;
+ union cache_topology ct;
+ enum cache_type ctype;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = cache_add_cpu(cpu);
- if (rc)
- cache_remove_cpu(cpu);
- break;
- case CPU_DEAD:
- cache_remove_cpu(cpu);
- break;
+ ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
+ for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ if (!this_leaf)
+ return -EINVAL;
+
+ pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
+ ctype = get_cache_type(&ct.ci[0], level);
+ if (ctype == CACHE_TYPE_SEPARATE) {
+ ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
+ } else {
+ ci_leaf_init(this_leaf++, pvt, ctype, level);
+ }
}
- return rc ? NOTIFY_BAD : NOTIFY_OK;
-}
-
-static int __init cache_init(void)
-{
- int cpu;
-
- if (!test_facility(34))
- return 0;
- cache_build_info();
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- cache_add_cpu(cpu);
- __hotcpu_notifier(cache_hotplug, 0);
- cpu_notifier_register_done();
return 0;
}
-device_initcall(cache_init);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 34d5fa7b01b5..bc1df12dd4f8 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -209,7 +209,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
int i;
/* Alwys make any pending restarted system call return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
return -EFAULT;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f3762937dd82..533430307da8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -137,7 +137,7 @@ enum {
INSTR_RSI_RRP,
INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
- INSTR_RSY_RDRM,
+ INSTR_RSY_RDRM, INSTR_RSY_RMRD,
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
INSTR_RS_RURD,
INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
@@ -226,7 +226,6 @@ static const struct s390_operand operands[] =
[U16_32] = { 16, 32, 0 },
[J16_16] = { 16, 16, OPERAND_PCREL },
[J16_32] = { 16, 32, OPERAND_PCREL },
- [I16_32] = { 16, 32, OPERAND_SIGNED },
[I24_24] = { 24, 24, OPERAND_SIGNED },
[J32_16] = { 32, 16, OPERAND_PCREL },
[I32_16] = { 32, 16, OPERAND_SIGNED },
@@ -308,6 +307,7 @@ static const unsigned char formats[][7] = {
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
[INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
+ [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
@@ -451,7 +451,8 @@ enum {
LONG_INSN_VERLLV,
LONG_INSN_VESRAV,
LONG_INSN_VESRLV,
- LONG_INSN_VSBCBI
+ LONG_INSN_VSBCBI,
+ LONG_INSN_STCCTM
};
static char *long_insn_name[] = {
@@ -531,6 +532,7 @@ static char *long_insn_name[] = {
[LONG_INSN_VESRAV] = "vesrav",
[LONG_INSN_VESRLV] = "vesrlv",
[LONG_INSN_VSBCBI] = "vsbcbi",
+ [LONG_INSN_STCCTM] = "stcctm",
};
static struct s390_insn opcode[] = {
@@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = {
{ "lric", 0x60, INSTR_RSY_RDRM },
{ "stric", 0x61, INSTR_RSY_RDRM },
{ "mric", 0x62, INSTR_RSY_RDRM },
+ { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 302ac1f7f8e7..70a329450901 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -393,9 +393,27 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
+ if (test_facility(128))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
#endif
}
+static int __init nocad_setup(char *str)
+{
+ S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
+ return 0;
+}
+early_param("nocad", nocad_setup);
+
+static int __init cad_init(void)
+{
+ if (MACHINE_HAS_CAD)
+ /* Enable problem state CAD. */
+ __ctl_set_bit(2, 3);
+ return 0;
+}
+early_initcall(cad_init);
+
static __init void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 8e61393c8275..834df047d35f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -71,9 +71,11 @@ struct s390_mmap_arg_struct;
struct fadvise64_64_args;
struct old_sigaction;
+long sys_rt_sigreturn(void);
+long sys_sigreturn(void);
+
long sys_s390_personality(unsigned int personality);
long sys_s390_runtime_instr(int command, int signum);
-
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index b86bb8823f15..82c19899574f 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -46,6 +46,13 @@
* lg %r14,8(%r15) # offset 18
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
+ * In case we use gcc's hotpatch feature the original and also the disabled
+ * function prologue contains only a single six byte instruction and looks
+ * like this:
+ * > brcl 0,0 # offset 0
+ * To enable ftrace the code gets patched like above and afterwards looks
+ * like this:
+ * > brasl %r0,ftrace_caller # offset 0
*/
unsigned long ftrace_plt;
@@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- struct ftrace_insn insn;
- unsigned short op;
- void *from, *to;
- size_t size;
-
- ftrace_generate_nop_insn(&insn);
- size = sizeof(insn);
- from = &insn;
- to = (void *) rec->ip;
- if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
+ struct ftrace_insn orig, new, old;
+
+ if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- /*
- * If we find a breakpoint instruction, a kprobe has been placed
- * at the beginning of the function. We write the constant
- * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
- * instruction so that the kprobes handler can execute a nop, if it
- * reaches this breakpoint.
- */
- if (op == BREAKPOINT_INSTRUCTION) {
- size -= 2;
- from += 2;
- to += 2;
- insn.disp = KPROBE_ON_FTRACE_NOP;
+ if (addr == MCOUNT_ADDR) {
+ /* Initial code replacement */
+#ifdef CC_USING_HOTPATCH
+ /* We expect to see brcl 0,0 */
+ ftrace_generate_nop_insn(&orig);
+#else
+ /* We expect to see stg r14,8(r15) */
+ orig.opc = 0xe3e0;
+ orig.disp = 0xf0080024;
+#endif
+ ftrace_generate_nop_insn(&new);
+ } else if (old.opc == BREAKPOINT_INSTRUCTION) {
+ /*
+ * If we find a breakpoint instruction, a kprobe has been
+ * placed at the beginning of the function. We write the
+ * constant KPROBE_ON_FTRACE_NOP into the remaining four
+ * bytes of the original instruction so that the kprobes
+ * handler can execute a nop, if it reaches this breakpoint.
+ */
+ new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
+ orig.disp = KPROBE_ON_FTRACE_CALL;
+ new.disp = KPROBE_ON_FTRACE_NOP;
+ } else {
+ /* Replace ftrace call with a nop. */
+ ftrace_generate_call_insn(&orig, rec->ip);
+ ftrace_generate_nop_insn(&new);
}
- if (probe_kernel_write(to, from, size))
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
return -EPERM;
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- struct ftrace_insn insn;
- unsigned short op;
- void *from, *to;
- size_t size;
-
- ftrace_generate_call_insn(&insn, rec->ip);
- size = sizeof(insn);
- from = &insn;
- to = (void *) rec->ip;
- if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
+ struct ftrace_insn orig, new, old;
+
+ if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- /*
- * If we find a breakpoint instruction, a kprobe has been placed
- * at the beginning of the function. We write the constant
- * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
- * instruction so that the kprobes handler can execute a brasl if it
- * reaches this breakpoint.
- */
- if (op == BREAKPOINT_INSTRUCTION) {
- size -= 2;
- from += 2;
- to += 2;
- insn.disp = KPROBE_ON_FTRACE_CALL;
+ if (old.opc == BREAKPOINT_INSTRUCTION) {
+ /*
+ * If we find a breakpoint instruction, a kprobe has been
+ * placed at the beginning of the function. We write the
+ * constant KPROBE_ON_FTRACE_CALL into the remaining four
+ * bytes of the original instruction so that the kprobes
+ * handler can execute a brasl if it reaches this breakpoint.
+ */
+ new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
+ orig.disp = KPROBE_ON_FTRACE_NOP;
+ new.disp = KPROBE_ON_FTRACE_CALL;
+ } else {
+ /* Replace nop with an ftrace call. */
+ ftrace_generate_nop_insn(&orig);
+ ftrace_generate_call_insn(&new, rec->ip);
}
- if (probe_kernel_write(to, from, size))
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
return -EPERM;
return 0;
}
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index d62eee11f0b5..132f4c9ade60 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -436,7 +436,9 @@ ENTRY(startup_kdump)
# followed by the facility words.
#if defined(CONFIG_64BIT)
-#if defined(CONFIG_MARCH_ZEC12)
+#if defined(CONFIG_MARCH_Z13)
+ .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
+#elif defined(CONFIG_MARCH_ZEC12)
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100eff2, 0xf46c0000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 39badb9ca0b3..5c8651f36509 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2074,7 +2074,8 @@ static void do_reset_calls(void)
u32 dump_prefix_page;
-void s390_reset_system(void (*func)(void *), void *data)
+void s390_reset_system(void (*fn_pre)(void),
+ void (*fn_post)(void *), void *data)
{
struct _lowcore *lc;
@@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data)
/* Store status at absolute zero */
store_status();
+ /* Call function before reset */
+ if (fn_pre)
+ fn_pre();
do_reset_calls();
- if (func)
- func(data);
+ /* Call function after reset */
+ if (fn_post)
+ fn_post(data);
}
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index b987ab2c1541..cb2d51e779df 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -22,31 +22,66 @@ struct insn_args {
enum jump_label_type type;
};
+static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
+{
+ /* brcl 0,0 */
+ insn->opcode = 0xc004;
+ insn->offset = 0;
+}
+
+static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
+{
+ /* brcl 15,offset */
+ insn->opcode = 0xc0f4;
+ insn->offset = (entry->target - entry->code) >> 1;
+}
+
+static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
+{
+ unsigned char *ipc = (unsigned char *)entry->code;
+ unsigned char *ipe = (unsigned char *)insn;
+
+ pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
+ pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
+ ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
+ pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
+ ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+ panic("Corrupted kernel text");
+}
+
+static struct insn orignop = {
+ .opcode = 0xc004,
+ .offset = JUMP_LABEL_NOP_OFFSET >> 1,
+};
+
static void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type)
+ enum jump_label_type type,
+ int init)
{
- struct insn insn;
- int rc;
+ struct insn old, new;
if (type == JUMP_LABEL_ENABLE) {
- /* brcl 15,offset */
- insn.opcode = 0xc0f4;
- insn.offset = (entry->target - entry->code) >> 1;
+ jump_label_make_nop(entry, &old);
+ jump_label_make_branch(entry, &new);
} else {
- /* brcl 0,0 */
- insn.opcode = 0xc004;
- insn.offset = 0;
+ jump_label_make_branch(entry, &old);
+ jump_label_make_nop(entry, &new);
}
-
- rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE);
- WARN_ON_ONCE(rc < 0);
+ if (init) {
+ if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
+ jump_label_bug(entry, &old);
+ } else {
+ if (memcmp((void *)entry->code, &old, sizeof(old)))
+ jump_label_bug(entry, &old);
+ }
+ probe_kernel_write((void *)entry->code, &new, sizeof(new));
}
static int __sm_arch_jump_label_transform(void *data)
{
struct insn_args *args = data;
- __jump_label_transform(args->entry, args->type);
+ __jump_label_transform(args->entry, args->type, 0);
return 0;
}
@@ -64,7 +99,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
- __jump_label_transform(entry, type);
+ __jump_label_transform(entry, type, 1);
}
#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 1e4c710dfb92..f516edc1fbe3 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p)
/*
* If kprobes patches the instruction that is morphed by
* ftrace make sure that kprobes always sees the branch
- * "jg .+24" that skips the mcount block
+ * "jg .+24" that skips the mcount block or the "brcl 0,0"
+ * in case of hotpatch.
*/
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
p->ainsn.is_ftrace_insn = 1;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 4685337fa7c6..fb0901ec4306 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void)
return 0;
}
arch_initcall(machine_kdump_pm_init);
-#endif
/*
* Start kdump: We expect here that a store status has been done on our CPU
*/
static void __do_machine_kdump(void *image)
{
-#ifdef CONFIG_CRASH_DUMP
int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
- setup_regs();
__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
start_kdump(1);
-#endif
}
+#endif
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
@@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data)
*/
static void __machine_kexec(void *data)
{
- struct kimage *image = data;
-
__arch_local_irq_stosm(0x04); /* enable DAT */
pfault_fini();
tracing_off();
debug_locks_off();
- if (image->type == KEXEC_TYPE_CRASH) {
+#ifdef CONFIG_CRASH_DUMP
+ if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {
+
lgr_info_log();
- s390_reset_system(__do_machine_kdump, data);
- } else {
- s390_reset_system(__do_machine_kexec, data);
- }
+ s390_reset_system(setup_regs, __do_machine_kdump, data);
+ } else
+#endif
+ s390_reset_system(NULL, __do_machine_kexec, data);
disabled_wait((unsigned long) __builtin_return_address(0));
}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index b6dfc5bfcb89..e499370fbccb 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,7 +27,9 @@ ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
+#ifndef CC_USING_HOTPATCH
aghi %r0,MCOUNT_RETURN_FIXUP
+#endif
aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index b89b59158b95..36154a2f1814 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -50,19 +50,15 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
- if (mod) {
- vfree(mod->arch.syminfo);
- mod->arch.syminfo = NULL;
- }
- vfree(module_region);
+ vfree(mod->arch.syminfo);
+ mod->arch.syminfo = NULL;
}
static void check_rela(Elf_Rela *rela, struct module *me)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index aa7a83948c7b..13fc0978ca7e 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task)
{
}
+#ifdef CONFIG_64BIT
+void arch_release_task_struct(struct task_struct *tsk)
+{
+ if (tsk->thread.vxrs)
+ kfree(tsk->thread.vxrs);
+}
+#endif
+
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
{
@@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk;
}
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
- unsigned long ret;
-
- if (!(current->flags & PF_RANDOMIZE))
- return base;
- ret = PAGE_ALIGN(base + brk_rnd());
- return (ret > base) ? ret : base;
-}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index dbdd33ee0102..26108232fcaa 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -8,16 +8,24 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/elf.h>
#include <asm/lowcore.h>
#include <asm/param.h>
+#include <asm/smp.h>
static DEFINE_PER_CPU(struct cpuid, cpu_id);
+void cpu_relax(void)
+{
+ if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
+ asm volatile("diag 0,0,0x44");
+ barrier();
+}
+EXPORT_SYMBOL(cpu_relax);
+
/*
* cpu_init - initializes state that is per-CPU.
*/
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index a41f2c99dcc8..7e77e03378f3 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -294,7 +294,8 @@ ENTRY(_sclp_print_early)
#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa3
- lmh %r6,%r15,96(%r15) # store upper register halves
+ lgfr %r2,%r2 # sign extend return value
+ lmh %r6,%r15,96(%r15) # restore upper register halves
ahi %r15,80
.Lesa3:
#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4e532c67832f..bfac77ada4f2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -810,6 +810,9 @@ static void __init setup_hwcaps(void)
case 0x2828:
strcpy(elf_platform, "zEC12");
break;
+ case 0x2964:
+ strcpy(elf_platform, "z13");
+ break;
}
}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6a2ac257d98f..b3ae6f70c6d6 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -162,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
_sigregs user_sregs;
/* Alwys make any pending restarted system call return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
return -EFAULT;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0b499f5cbe19..a668993ff577 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -71,9 +71,30 @@ struct pcpu {
};
static u8 boot_cpu_type;
-static u16 boot_cpu_address;
static struct pcpu pcpu_devices[NR_CPUS];
+unsigned int smp_cpu_mt_shift;
+EXPORT_SYMBOL(smp_cpu_mt_shift);
+
+unsigned int smp_cpu_mtid;
+EXPORT_SYMBOL(smp_cpu_mtid);
+
+static unsigned int smp_max_threads __initdata = -1U;
+
+static int __init early_nosmt(char *s)
+{
+ smp_max_threads = 1;
+ return 0;
+}
+early_param("nosmt", early_nosmt);
+
+static int __init early_smt(char *s)
+{
+ get_option(&s, &smp_max_threads);
+ return 0;
+}
+early_param("smt", early_smt);
+
/*
* The smp_cpu_state_mutex must be held when changing the state or polarization
* member of a pcpu data structure within the pcpu_devices arreay.
@@ -132,7 +153,7 @@ static inline int pcpu_running(struct pcpu *pcpu)
/*
* Find struct pcpu by cpu address.
*/
-static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
+static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
{
int cpu;
@@ -299,6 +320,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
}
/*
+ * Enable additional logical cpus for multi-threading.
+ */
+static int pcpu_set_smt(unsigned int mtid)
+{
+ register unsigned long reg1 asm ("1") = (unsigned long) mtid;
+ int cc;
+
+ if (smp_cpu_mtid == mtid)
+ return 0;
+ asm volatile(
+ " sigp %1,0,%2 # sigp set multi-threading\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
+ : "cc");
+ if (cc == 0) {
+ smp_cpu_mtid = mtid;
+ smp_cpu_mt_shift = 0;
+ while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
+ smp_cpu_mt_shift++;
+ pcpu_devices[0].address = stap();
+ }
+ return cc;
+}
+
+/*
* Call function on an online CPU.
*/
void smp_call_online_cpu(void (*func)(void *), void *data)
@@ -512,22 +559,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
#ifdef CONFIG_CRASH_DUMP
-static void __init smp_get_save_area(int cpu, u16 address)
+static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
{
void *lc = pcpu_devices[0].lowcore;
struct save_area_ext *sa_ext;
unsigned long vx_sa;
- if (is_kdump_kernel())
- return;
- if (!OLDMEM_BASE && (address == boot_cpu_address ||
- ipl_info.type != IPL_TYPE_FCP_DUMP))
- return;
sa_ext = dump_save_area_create(cpu);
if (!sa_ext)
panic("could not allocate memory for save area\n");
- if (address == boot_cpu_address) {
- /* Copy the registers of the boot cpu. */
+ if (is_boot_cpu) {
+ /* Copy the registers of the boot CPU. */
copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
SAVE_AREA_BASE - PAGE_SIZE, 0);
if (MACHINE_HAS_VX)
@@ -548,6 +590,64 @@ static void __init smp_get_save_area(int cpu, u16 address)
free_page(vx_sa);
}
+/*
+ * Collect CPU state of the previous, crashed system.
+ * There are four cases:
+ * 1) standard zfcp dump
+ * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The boot CPU state is located in
+ * the absolute lowcore of the memory stored in the HSA. The zcore code
+ * will allocate the save area and copy the boot CPU state from the HSA.
+ * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
+ * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The firmware or the boot-loader
+ * stored the registers of the boot CPU in the absolute lowcore in the
+ * memory of the old system.
+ * 3) kdump and the old kernel did not store the CPU state,
+ * or stand-alone kdump for DASD
+ * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The kexec code or the boot-loader
+ * stored the registers of the boot CPU in the memory of the old system.
+ * 4) kdump and the old kernel stored the CPU state
+ * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
+ * The state of all CPUs is stored in ELF sections in the memory of the
+ * old system. The ELF sections are picked up by the crash_dump code
+ * via elfcorehdr_addr.
+ */
+static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
+{
+ unsigned int cpu, address, i, j;
+ int is_boot_cpu;
+
+ if (is_kdump_kernel())
+ /* Previous system stored the CPU states. Nothing to do. */
+ return;
+ if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
+ /* No previous system present, normal boot. */
+ return;
+ /* Set multi-threading state to the previous system. */
+ pcpu_set_smt(sclp_get_mtid_prev());
+ /* Collect CPU states. */
+ cpu = 0;
+ for (i = 0; i < info->configured; i++) {
+ /* Skip CPUs with different CPU type. */
+ if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
+ continue;
+ for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
+ address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
+ is_boot_cpu = (address == pcpu_devices[0].address);
+ if (is_boot_cpu && !OLDMEM_BASE)
+ /* Skip boot CPU for standard zfcp dump. */
+ continue;
+ /* Get state for this CPu. */
+ __smp_store_cpu_state(cpu, address, is_boot_cpu);
+ }
+ }
+}
+
int smp_store_status(int cpu)
{
unsigned long vx_sa;
@@ -565,10 +665,6 @@ int smp_store_status(int cpu)
return 0;
}
-#else /* CONFIG_CRASH_DUMP */
-
-static inline void smp_get_save_area(int cpu, u16 address) { }
-
#endif /* CONFIG_CRASH_DUMP */
void smp_cpu_set_polarization(int cpu, int val)
@@ -590,11 +686,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
use_sigp_detection = 1;
- for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
+ for (address = 0; address <= MAX_CPU_ADDRESS;
+ address += (1U << smp_cpu_mt_shift)) {
if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
SIGP_CC_NOT_OPERATIONAL)
continue;
- info->cpu[info->configured].address = address;
+ info->cpu[info->configured].core_id =
+ address >> smp_cpu_mt_shift;
info->configured++;
}
info->combined = info->configured;
@@ -608,7 +706,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
{
struct pcpu *pcpu;
cpumask_t avail;
- int cpu, nr, i;
+ int cpu, nr, i, j;
+ u16 address;
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
@@ -616,51 +715,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
continue;
- if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
- continue;
- pcpu = pcpu_devices + cpu;
- pcpu->address = info->cpu[i].address;
- pcpu->state = (i >= info->configured) ?
- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
- set_cpu_present(cpu, true);
- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
- set_cpu_present(cpu, false);
- else
- nr++;
- cpu = cpumask_next(cpu, &avail);
+ address = info->cpu[i].core_id << smp_cpu_mt_shift;
+ for (j = 0; j <= smp_cpu_mtid; j++) {
+ if (pcpu_find_address(cpu_present_mask, address + j))
+ continue;
+ pcpu = pcpu_devices + cpu;
+ pcpu->address = address + j;
+ pcpu->state =
+ (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
+ CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
+ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ set_cpu_present(cpu, true);
+ if (sysfs_add && smp_add_present_cpu(cpu) != 0)
+ set_cpu_present(cpu, false);
+ else
+ nr++;
+ cpu = cpumask_next(cpu, &avail);
+ if (cpu >= nr_cpu_ids)
+ break;
+ }
}
return nr;
}
static void __init smp_detect_cpus(void)
{
- unsigned int cpu, c_cpus, s_cpus;
+ unsigned int cpu, mtid, c_cpus, s_cpus;
struct sclp_cpu_info *info;
+ u16 address;
+ /* Get CPU information */
info = smp_get_cpu_info();
if (!info)
panic("smp_detect_cpus failed to allocate memory\n");
+
+ /* Find boot CPU type */
if (info->has_cpu_type) {
- for (cpu = 0; cpu < info->combined; cpu++) {
- if (info->cpu[cpu].address != boot_cpu_address)
- continue;
- /* The boot cpu dictates the cpu type. */
- boot_cpu_type = info->cpu[cpu].type;
- break;
- }
+ address = stap();
+ for (cpu = 0; cpu < info->combined; cpu++)
+ if (info->cpu[cpu].core_id == address) {
+ /* The boot cpu dictates the cpu type. */
+ boot_cpu_type = info->cpu[cpu].type;
+ break;
+ }
+ if (cpu >= info->combined)
+ panic("Could not find boot CPU type");
}
+
+#ifdef CONFIG_CRASH_DUMP
+ /* Collect CPU state of previous system */
+ smp_store_cpu_states(info);
+#endif
+
+ /* Set multi-threading state for the current system */
+ mtid = sclp_get_mtid(boot_cpu_type);
+ mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
+ pcpu_set_smt(mtid);
+
+ /* Print number of CPUs */
c_cpus = s_cpus = 0;
for (cpu = 0; cpu < info->combined; cpu++) {
if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
continue;
- if (cpu < info->configured) {
- smp_get_save_area(c_cpus, info->cpu[cpu].address);
- c_cpus++;
- } else
- s_cpus++;
+ if (cpu < info->configured)
+ c_cpus += smp_cpu_mtid + 1;
+ else
+ s_cpus += smp_cpu_mtid + 1;
}
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
+
+ /* Add CPUs present at boot */
get_online_cpus();
__smp_rescan_cpus(info, 0);
put_online_cpus();
@@ -696,12 +820,23 @@ static void smp_start_secondary(void *cpuvoid)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct pcpu *pcpu;
- int rc;
+ int base, i, rc;
pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
+ base = cpu - (cpu % (smp_cpu_mtid + 1));
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (base + i < nr_cpu_ids)
+ if (cpu_online(base + i))
+ break;
+ }
+ /*
+ * If this is the first CPU of the core to get online
+ * do an initial CPU reset.
+ */
+ if (i > smp_cpu_mtid &&
+ pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
@@ -774,7 +909,8 @@ void __init smp_fill_possible_mask(void)
{
unsigned int possible, sclp, cpu;
- sclp = sclp_get_max_cpu() ?: nr_cpu_ids;
+ sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
+ sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
possible = setup_possible_cpus ?: nr_cpu_ids;
possible = min(possible, sclp);
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
@@ -796,9 +932,8 @@ void __init smp_prepare_boot_cpu(void)
{
struct pcpu *pcpu = pcpu_devices;
- boot_cpu_address = stap();
pcpu->state = CPU_STATE_CONFIGURED;
- pcpu->address = boot_cpu_address;
+ pcpu->address = stap();
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
@@ -848,7 +983,7 @@ static ssize_t cpu_configure_store(struct device *dev,
const char *buf, size_t count)
{
struct pcpu *pcpu;
- int cpu, val, rc;
+ int cpu, val, rc, i;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
@@ -860,29 +995,43 @@ static ssize_t cpu_configure_store(struct device *dev,
rc = -EBUSY;
/* disallow configuration changes of online cpus and cpu 0 */
cpu = dev->id;
- if (cpu_online(cpu) || cpu == 0)
+ cpu -= cpu % (smp_cpu_mtid + 1);
+ if (cpu == 0)
goto out;
+ for (i = 0; i <= smp_cpu_mtid; i++)
+ if (cpu_online(cpu + i))
+ goto out;
pcpu = pcpu_devices + cpu;
rc = 0;
switch (val) {
case 0:
if (pcpu->state != CPU_STATE_CONFIGURED)
break;
- rc = sclp_cpu_deconfigure(pcpu->address);
+ rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
- pcpu->state = CPU_STATE_STANDBY;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
+ continue;
+ pcpu[i].state = CPU_STATE_STANDBY;
+ smp_cpu_set_polarization(cpu + i,
+ POLARIZATION_UNKNOWN);
+ }
topology_expect_change();
break;
case 1:
if (pcpu->state != CPU_STATE_STANDBY)
break;
- rc = sclp_cpu_configure(pcpu->address);
+ rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
- pcpu->state = CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
+ continue;
+ pcpu[i].state = CPU_STATE_CONFIGURED;
+ smp_cpu_set_polarization(cpu + i,
+ POLARIZATION_UNKNOWN);
+ }
topology_expect_change();
break;
default:
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index a2987243bc76..939ec474b1dd 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 811f542b8ed4..99babea026ca 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -194,6 +194,41 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
+ if (info->mt_installed & 0x80) {
+ seq_printf(m, "LPAR CPUs G-MTID: %d\n",
+ info->mt_general & 0x1f);
+ seq_printf(m, "LPAR CPUs S-MTID: %d\n",
+ info->mt_installed & 0x1f);
+ seq_printf(m, "LPAR CPUs PS-MTID: %d\n",
+ info->mt_psmtid & 0x1f);
+ }
+}
+
+static void print_ext_name(struct seq_file *m, int lvl,
+ struct sysinfo_3_2_2 *info)
+{
+ if (info->vm[lvl].ext_name_encoding == 0)
+ return;
+ if (info->ext_names[lvl][0] == 0)
+ return;
+ switch (info->vm[lvl].ext_name_encoding) {
+ case 1: /* EBCDIC */
+ EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
+ break;
+ case 2: /* UTF-8 */
+ break;
+ default:
+ return;
+ }
+ seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
+ info->ext_names[lvl]);
+}
+
+static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
+{
+ if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be)))
+ return;
+ seq_printf(m, "VM%02d UUID: %pUb\n", i, &info->vm[i].uuid);
}
static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
@@ -213,6 +248,8 @@ static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured);
seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby);
seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved);
+ print_ext_name(m, i, info);
+ print_uuid(m, i, info);
}
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index b93bed76ea94..24ee33f1af24 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
return mask;
}
-static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
+static cpumask_t cpu_thread_map(unsigned int cpu)
+{
+ cpumask_t mask;
+ int i;
+
+ cpumask_copy(&mask, cpumask_of(cpu));
+ if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ return mask;
+ cpu -= cpu % (smp_cpu_mtid + 1);
+ for (i = 0; i <= smp_cpu_mtid; i++)
+ if (cpu_present(cpu + i))
+ cpumask_set_cpu(cpu + i, &mask);
+ return mask;
+}
+
+static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
struct mask_info *book,
struct mask_info *socket,
int one_socket_per_cpu)
{
- unsigned int cpu;
+ unsigned int core;
- for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
- unsigned int rcpu;
- int lcpu;
+ for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
+ unsigned int rcore;
+ int lcpu, i;
- rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
- lcpu = smp_find_processor_id(rcpu);
+ rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
+ lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
if (lcpu < 0)
continue;
- cpumask_set_cpu(lcpu, &book->mask);
- cpu_topology[lcpu].book_id = book->id;
- cpumask_set_cpu(lcpu, &socket->mask);
- cpu_topology[lcpu].core_id = rcpu;
- if (one_socket_per_cpu) {
- cpu_topology[lcpu].socket_id = rcpu;
- socket = socket->next;
- } else {
- cpu_topology[lcpu].socket_id = socket->id;
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ cpu_topology[lcpu + i].book_id = book->id;
+ cpu_topology[lcpu + i].core_id = rcore;
+ cpu_topology[lcpu + i].thread_id = lcpu + i;
+ cpumask_set_cpu(lcpu + i, &book->mask);
+ cpumask_set_cpu(lcpu + i, &socket->mask);
+ if (one_socket_per_cpu)
+ cpu_topology[lcpu + i].socket_id = rcore;
+ else
+ cpu_topology[lcpu + i].socket_id = socket->id;
+ smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
- smp_cpu_set_polarization(lcpu, tl_cpu->pp);
+ if (one_socket_per_cpu)
+ socket = socket->next;
}
return socket;
}
@@ -108,7 +126,7 @@ static void clear_masks(void)
static union topology_entry *next_tle(union topology_entry *tle)
{
if (!tle->nl)
- return (union topology_entry *)((struct topology_cpu *)tle + 1);
+ return (union topology_entry *)((struct topology_core *)tle + 1);
return (union topology_entry *)((struct topology_container *)tle + 1);
}
@@ -231,9 +249,11 @@ static void update_cpu_masks(void)
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
+ cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) {
+ cpu_topology[cpu].thread_id = cpu;
cpu_topology[cpu].core_id = cpu;
cpu_topology[cpu].socket_id = cpu;
cpu_topology[cpu].book_id = cpu;
@@ -445,6 +465,12 @@ int topology_cpu_init(struct cpu *cpu)
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
}
+const struct cpumask *cpu_thread_mask(int cpu)
+{
+ return &cpu_topology[cpu].thread_mask;
+}
+
+
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_mask;
@@ -456,6 +482,7 @@ static const struct cpumask *cpu_book_mask(int cpu)
}
static struct sched_domain_topology_level s390_topology[] = {
+ { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index f6b3cd056ec2..cc7328080b60 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
return false;
}
+static int check_per_event(unsigned short cause, unsigned long control,
+ struct pt_regs *regs)
+{
+ if (!(regs->psw.mask & PSW_MASK_PER))
+ return 0;
+ /* user space single step */
+ if (control == 0)
+ return 1;
+ /* over indication for storage alteration */
+ if ((control & 0x20200000) && (cause & 0x2000))
+ return 1;
+ if (cause & 0x8000) {
+ /* all branches */
+ if ((control & 0x80800000) == 0x80000000)
+ return 1;
+ /* branch into selected range */
+ if (((control & 0x80800000) == 0x80800000) &&
+ regs->psw.addr >= current->thread.per_user.start &&
+ regs->psw.addr <= current->thread.per_user.end)
+ return 1;
+ }
+ return 0;
+}
+
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (regs->psw.addr - utask->xol_vaddr == ilen)
regs->psw.addr = utask->vaddr + ilen;
}
- /* If per tracing was active generate trap */
- if (regs->psw.mask & PSW_MASK_PER)
- do_per_trap(regs);
+ if (check_per_event(current->thread.per_event.cause,
+ current->thread.per_user.control, regs)) {
+ /* fix per address */
+ current->thread.per_event.address = utask->vaddr;
+ /* trigger per event */
+ set_pt_regs_flag(regs, PIF_PER_TRAP);
+ }
return 0;
}
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
clear_thread_flag(TIF_UPROBE_SINGLESTEP);
regs->int_code = auprobe->saved_int_code;
regs->psw.addr = current->utask->vaddr;
+ current->thread.per_event.address = current->utask->vaddr;
}
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__rc; \
})
-#define emu_store_ril(ptr, input) \
+#define emu_store_ril(regs, ptr, input) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
+ __typeof__(ptr) __ptr = (ptr); \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
- else if ((u64 __force)ptr & mask) \
+ else if ((u64 __force)__ptr & mask) \
__rc = EMU_SPECIFICATION; \
- else if (put_user(*(input), ptr)) \
+ else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
+ if (__rc == 0) \
+ sim_stor_event(regs, __ptr, mask + 1); \
__rc; \
})
@@ -198,6 +230,25 @@ union split_register {
};
/*
+ * If user per registers are setup to trace storage alterations and an
+ * emulated store took place on a fitting address a user trap is generated.
+ */
+static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
+{
+ if (!(regs->psw.mask & PSW_MASK_PER))
+ return;
+ if (!(current->thread.per_user.control & PER_EVENT_STORE))
+ return;
+ if ((void *)current->thread.per_user.start > (addr + len))
+ return;
+ if ((void *)current->thread.per_user.end < addr)
+ return;
+ current->thread.per_event.address = regs->psw.addr;
+ current->thread.per_event.cause = PER_EVENT_STORE >> 16;
+ set_pt_regs_flag(regs, PIF_PER_TRAP);
+}
+
+/*
* pc relative instructions are emulated, since parameters may not be
* accessible from the xol area due to range limitations.
*/
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
break;
case 0x07: /* sthrl */
- rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+ rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
break;
case 0x0b: /* stgrl */
- rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+ rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
break;
case 0x0f: /* strl */
- rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+ rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 7f0089d9a4aa..e53d3595a7c8 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -15,6 +15,8 @@
#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
+#include <asm/cpu_mf.h>
+#include <asm/smp.h>
static void virt_timer_expire(void);
@@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
static atomic64_t virt_timer_elapsed;
+static DEFINE_PER_CPU(u64, mt_cycles[32]);
+static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
+static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
+
static inline u64 get_vtimer(void)
{
u64 timer;
@@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
+ u64 user_scaled, system_scaled;
+ int i;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
+ /* Do MT utilization calculation */
+ if (smp_cpu_mtid) {
+ u64 cycles_new[32], *cycles_old;
+ u64 delta, mult, div;
+
+ cycles_old = this_cpu_ptr(mt_cycles);
+ if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
+ mult = div = 0;
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ delta = cycles_new[i] - cycles_old[i];
+ mult += delta;
+ div += (i + 1) * delta;
+ }
+ if (mult > 0) {
+ /* Update scaling factor */
+ __this_cpu_write(mt_scaling_mult, mult);
+ __this_cpu_write(mt_scaling_div, div);
+ memcpy(cycles_old, cycles_new,
+ sizeof(u64) * (smp_cpu_mtid + 1));
+ }
+ }
+ }
+
user = S390_lowcore.user_timer - ti->user_timer;
S390_lowcore.steal_timer -= user;
ti->user_timer = S390_lowcore.user_timer;
- account_user_time(tsk, user, user);
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
- account_system_time(tsk, hardirq_offset, system, system);
+
+ user_scaled = user;
+ system_scaled = system;
+ /* Do MT utilization scaling */
+ if (smp_cpu_mtid) {
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ user_scaled = (user_scaled * mult) / div;
+ system_scaled = (system_scaled * mult) / div;
+ }
+ account_user_time(tsk, user, user_scaled);
+ account_system_time(tsk, hardirq_offset, system, system_scaled);
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
@@ -126,9 +168,7 @@ void vtime_account_user(struct task_struct *tsk)
void vtime_account_irq_enter(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
- u64 timer, system;
-
- WARN_ON_ONCE(!irqs_disabled());
+ u64 timer, system, system_scaled;
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
@@ -137,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk)
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
- account_system_time(tsk, 0, system, system);
+ system_scaled = system;
+ /* Do MT utilization scaling */
+ if (smp_cpu_mtid) {
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ system_scaled = (system_scaled * mult) / div;
+ }
+ account_system_time(tsk, 0, system, system_scaled);
virt_timer_forward(system);
}
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 646db9c467d1..5fce52cf0e57 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -28,6 +28,7 @@ config KVM
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
+ select SRCU
---help---
Support hosting paravirtualized guest machines using the SIE
virtualization capability on the mainframe. This should work
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 8a1be9017730..267523cac6de 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -357,8 +357,8 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
union asce asce;
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
- edat1 = ctlreg0.edat && test_vfacility(8);
- edat2 = edat1 && test_vfacility(78);
+ edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
+ edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
asce.val = get_vcpu_asce(vcpu);
if (asce.r)
goto real_address;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 81c77ab8102e..bebd2157edd0 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -68,18 +68,27 @@ static int handle_noop(struct kvm_vcpu *vcpu)
static int handle_stop(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0;
- unsigned int action_bits;
+ uint8_t flags, stop_pending;
vcpu->stat.exit_stop_request++;
- trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
- action_bits = vcpu->arch.local_int.action_bits;
+ /* delay the stop if any non-stop irq is pending */
+ if (kvm_s390_vcpu_has_irq(vcpu, 1))
+ return 0;
+
+ /* avoid races with the injection/SIGP STOP code */
+ spin_lock(&li->lock);
+ flags = li->irq.stop.flags;
+ stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
+ spin_unlock(&li->lock);
- if (!(action_bits & ACTION_STOP_ON_STOP))
+ trace_kvm_s390_stop_request(stop_pending, flags);
+ if (!stop_pending)
return 0;
- if (action_bits & ACTION_STORE_ON_STOP) {
+ if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
rc = kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_NOADDR);
if (rc)
@@ -279,11 +288,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
irq.type = KVM_S390_INT_CPU_TIMER;
break;
case EXT_IRQ_EXTERNAL_CALL:
- if (kvm_s390_si_ext_call_pending(vcpu))
- return 0;
irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
- break;
+ rc = kvm_s390_inject_vcpu(vcpu, &irq);
+ /* ignore if another external call is already pending */
+ if (rc == -EBUSY)
+ return 0;
+ return rc;
default:
return -EOPNOTSUPP;
}
@@ -307,17 +318,19 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
/* Make sure that the source is paged-in */
- srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
- if (kvm_is_error_gpa(vcpu->kvm, srcaddr))
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
+ &srcaddr, 0);
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
if (rc != 0)
return rc;
/* Make sure that the destination is paged-in */
- dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
- if (kvm_is_error_gpa(vcpu->kvm, dstaddr))
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
+ &dstaddr, 1);
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
if (rc != 0)
return rc;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index f00f31e66cd8..073b5f387d1d 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -19,6 +19,7 @@
#include <linux/bitmap.h>
#include <asm/asm-offsets.h>
#include <asm/uaccess.h>
+#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -159,6 +160,12 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
+ /*
+ * STOP irqs will never be actively delivered. They are triggered via
+ * intercept requests and cleared when the stop intercept is performed.
+ */
+ __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
+
return active_mask;
}
@@ -186,9 +193,6 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
LCTL_CR10 | LCTL_CR11);
vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
}
-
- if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
- atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
}
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
@@ -216,11 +220,18 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->lctl |= LCTL_CR14;
}
+static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
+{
+ if (kvm_s390_is_stop_irq_pending(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
+}
+
/* Set interception request for non-deliverable local interrupts */
static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
{
set_intercept_indicators_ext(vcpu);
set_intercept_indicators_mchk(vcpu);
+ set_intercept_indicators_stop(vcpu);
}
static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
@@ -392,18 +403,6 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
-{
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
- vcpu->stat.deliver_stop_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
- 0, 0);
-
- __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
- clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
- return 0;
-}
-
static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -705,7 +704,6 @@ static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
[IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
[IRQ_PEND_RESTART] = __deliver_restart,
- [IRQ_PEND_SIGP_STOP] = __deliver_stop,
[IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
[IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
};
@@ -738,21 +736,20 @@ static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
return rc;
}
-/* Check whether SIGP interpretation facility has an external call pending */
-int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
+/* Check whether an external call is pending (deliverable or not) */
+int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{
- atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
- if (!psw_extint_disabled(vcpu) &&
- (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
- (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
- (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
- return 1;
+ if (!sclp_has_sigpif())
+ return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
- return 0;
+ return (sigp_ctrl & SIGP_CTRL_C) &&
+ (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
}
-int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
{
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
struct kvm_s390_interrupt_info *inti;
@@ -773,7 +770,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
if (!rc && kvm_cpu_has_pending_timer(vcpu))
rc = 1;
- if (!rc && kvm_s390_si_ext_call_pending(vcpu))
+ /* external call pending and deliverable */
+ if (!rc && kvm_s390_ext_call_pending(vcpu) &&
+ !psw_extint_disabled(vcpu) &&
+ (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
+ rc = 1;
+
+ if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
rc = 1;
return rc;
@@ -804,14 +807,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; /* disabled wait */
}
- __set_cpu_idle(vcpu);
if (!ckc_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
+ __set_cpu_idle(vcpu);
goto no_timer;
}
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+
+ /* underflow */
+ if (vcpu->arch.sie_block->ckc < now)
+ return 0;
+
+ __set_cpu_idle(vcpu);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer:
@@ -820,7 +829,7 @@ no_timer:
__unset_cpu_idle(vcpu);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
+ hrtimer_cancel(&vcpu->arch.ckc_timer);
return 0;
}
@@ -840,10 +849,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
+ u64 now, sltime;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
- kvm_s390_vcpu_wakeup(vcpu);
+ now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+ /*
+ * If the monotonic clock runs faster than the tod clock we might be
+ * woken up too early and have to go back to sleep to avoid deadlocks.
+ */
+ if (vcpu->arch.sie_block->ckc > now &&
+ hrtimer_forward_now(timer, ns_to_ktime(sltime)))
+ return HRTIMER_RESTART;
+ kvm_s390_vcpu_wakeup(vcpu);
return HRTIMER_NORESTART;
}
@@ -859,8 +878,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
/* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
- atomic_clear_mask(SIGP_CTRL_C,
- &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
+ vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
}
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
@@ -984,18 +1002,43 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0;
}
-int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
+{
+ unsigned char new_val, old_val;
+ uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
+
+ new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
+ old_val = *sigp_ctrl & ~SIGP_CTRL_C;
+ if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
+ /* another external call is pending */
+ return -EBUSY;
+ }
+ atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+ return 0;
+}
+
+static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
+ uint16_t src_id = irq->u.extcall.code;
VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
- irq->u.extcall.code);
+ src_id);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
- irq->u.extcall.code, 0, 2);
+ src_id, 0, 2);
+
+ /* sending vcpu invalid */
+ if (src_id >= KVM_MAX_VCPUS ||
+ kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
+ return -EINVAL;
+ if (sclp_has_sigpif())
+ return __inject_extcall_sigpif(vcpu, src_id);
+
+ if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
+ return -EBUSY;
*extcall = irq->u.extcall;
- set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
@@ -1006,23 +1049,41 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
- prefix->address);
+ irq->u.prefix.address);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
- prefix->address, 0, 2);
+ irq->u.prefix.address, 0, 2);
+
+ if (!is_vcpu_stopped(vcpu))
+ return -EBUSY;
*prefix = irq->u.prefix;
set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
return 0;
}
+#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_stop_info *stop = &li->irq.stop;
+ int rc = 0;
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
- li->action_bits |= ACTION_STOP_ON_STOP;
- set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
+ return -EINVAL;
+
+ if (is_vcpu_stopped(vcpu)) {
+ if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
+ rc = kvm_s390_store_status_unloaded(vcpu,
+ KVM_S390_STORE_STATUS_NOADDR);
+ return rc;
+ }
+
+ if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
+ return -EBUSY;
+ stop->flags = irq->u.stop.flags;
+ __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
return 0;
}
@@ -1042,14 +1103,13 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
irq->u.emerg.code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
- emerg->code, 0, 2);
+ irq->u.emerg.code, 0, 2);
- set_bit(emerg->code, li->sigp_emerg_pending);
+ set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
@@ -1061,9 +1121,9 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
- mchk->mcic);
+ irq->u.mchk.mcic);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
- mchk->mcic, 2);
+ irq->u.mchk.mcic, 2);
/*
* Because repressible machine checks can be indicated along with
@@ -1121,7 +1181,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
if ((!schid && !cr6) || (schid && cr6))
return NULL;
- mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
inti = NULL;
@@ -1149,7 +1208,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
if (list_empty(&fi->list))
atomic_set(&fi->active, 0);
spin_unlock(&fi->lock);
- mutex_unlock(&kvm->lock);
return inti;
}
@@ -1162,7 +1220,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
int sigcpu;
int rc = 0;
- mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
@@ -1187,6 +1244,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
list_add_tail(&inti->list, &iter->list);
}
atomic_set(&fi->active, 1);
+ if (atomic_read(&kvm->online_vcpus) == 0)
+ goto unlock_fi;
sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
if (sigcpu == KVM_MAX_VCPUS) {
do {
@@ -1213,7 +1272,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
unlock_fi:
spin_unlock(&fi->lock);
- mutex_unlock(&kvm->lock);
return rc;
}
@@ -1221,6 +1279,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int)
{
struct kvm_s390_interrupt_info *inti;
+ int rc;
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
@@ -1239,7 +1298,6 @@ int kvm_s390_inject_vm(struct kvm *kvm,
inti->ext.ext_params = s390int->parm;
break;
case KVM_S390_INT_PFAULT_DONE:
- inti->type = s390int->type;
inti->ext.ext_params2 = s390int->parm64;
break;
case KVM_S390_MCHK:
@@ -1268,7 +1326,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
2);
- return __inject_vm(kvm, inti);
+ rc = __inject_vm(kvm, inti);
+ if (rc)
+ kfree(inti);
+ return rc;
}
void kvm_s390_reinject_io_int(struct kvm *kvm,
@@ -1290,13 +1351,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
case KVM_S390_SIGP_SET_PREFIX:
irq->u.prefix.address = s390int->parm;
break;
+ case KVM_S390_SIGP_STOP:
+ irq->u.stop.flags = s390int->parm;
+ break;
case KVM_S390_INT_EXTERNAL_CALL:
- if (irq->u.extcall.code & 0xffff0000)
+ if (s390int->parm & 0xffff0000)
return -EINVAL;
irq->u.extcall.code = s390int->parm;
break;
case KVM_S390_INT_EMERGENCY:
- if (irq->u.emerg.code & 0xffff0000)
+ if (s390int->parm & 0xffff0000)
return -EINVAL;
irq->u.emerg.code = s390int->parm;
break;
@@ -1307,6 +1371,23 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
return 0;
}
+int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+}
+
+void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ spin_lock(&li->lock);
+ li->irq.stop.flags = 0;
+ clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ spin_unlock(&li->lock);
+}
+
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -1363,7 +1444,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
struct kvm_s390_float_interrupt *fi;
struct kvm_s390_interrupt_info *n, *inti = NULL;
- mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
list_for_each_entry_safe(inti, n, &fi->list, list) {
@@ -1373,7 +1453,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
fi->irq_count = 0;
atomic_set(&fi->active, 0);
spin_unlock(&fi->lock);
- mutex_unlock(&kvm->lock);
}
static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
@@ -1413,7 +1492,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
int ret = 0;
int n = 0;
- mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
@@ -1432,7 +1510,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
}
spin_unlock(&fi->lock);
- mutex_unlock(&kvm->lock);
return ret < 0 ? ret : n;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3e09801e3104..0c3623927563 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -22,6 +22,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <asm/asm-offsets.h>
@@ -29,7 +30,6 @@
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
-#include <asm/facility.h>
#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -50,6 +50,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exit_instruction", VCPU_STAT(exit_instruction) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
+ { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -98,15 +99,20 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
-unsigned long *vfacilities;
-static struct gmap_notifier gmap_notifier;
+/* upper facilities limit for kvm */
+unsigned long kvm_s390_fac_list_mask[] = {
+ 0xff82fffbf4fc2000UL,
+ 0x005c000000000000UL,
+};
-/* test availability of vfacility */
-int test_vfacility(unsigned long nr)
+unsigned long kvm_s390_fac_list_mask_size(void)
{
- return __test_facility(nr, (void *) vfacilities);
+ BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
+ return ARRAY_SIZE(kvm_s390_fac_list_mask);
}
+static struct gmap_notifier gmap_notifier;
+
/* Section: not file related */
int kvm_arch_hardware_enable(void)
{
@@ -166,6 +172,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_S390_USER_SIGP:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
@@ -254,6 +261,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.use_irqchip = 1;
r = 0;
break;
+ case KVM_CAP_S390_USER_SIGP:
+ kvm->arch.user_sigp = 1;
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -261,7 +272,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
return r;
}
-static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
+static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_MEM_LIMIT_SIZE:
+ ret = 0;
+ if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
unsigned int idx;
@@ -283,6 +311,36 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
mutex_unlock(&kvm->lock);
ret = 0;
break;
+ case KVM_S390_VM_MEM_LIMIT_SIZE: {
+ unsigned long new_limit;
+
+ if (kvm_is_ucontrol(kvm))
+ return -EINVAL;
+
+ if (get_user(new_limit, (u64 __user *)attr->addr))
+ return -EFAULT;
+
+ if (new_limit > kvm->arch.gmap->asce_end)
+ return -E2BIG;
+
+ ret = -EBUSY;
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus) == 0) {
+ /* gmap_alloc will round the limit up */
+ struct gmap *new = gmap_alloc(current->mm, new_limit);
+
+ if (!new) {
+ ret = -ENOMEM;
+ } else {
+ gmap_free(kvm->arch.gmap);
+ new->private = kvm;
+ kvm->arch.gmap = new;
+ ret = 0;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ break;
+ }
default:
ret = -ENXIO;
break;
@@ -290,13 +348,276 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
return ret;
}
+static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
+
+static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ if (!test_kvm_facility(kvm, 76))
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+ switch (attr->attr) {
+ case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+ get_random_bytes(
+ kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ kvm->arch.crypto.aes_kw = 1;
+ break;
+ case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+ get_random_bytes(
+ kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
+ kvm->arch.crypto.dea_kw = 1;
+ break;
+ case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+ kvm->arch.crypto.aes_kw = 0;
+ memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
+ sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ break;
+ case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
+ kvm->arch.crypto.dea_kw = 0;
+ memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
+ sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
+ break;
+ default:
+ mutex_unlock(&kvm->lock);
+ return -ENXIO;
+ }
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvm_s390_vcpu_crypto_setup(vcpu);
+ exit_sie(vcpu);
+ }
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ u8 gtod_high;
+
+ if (copy_from_user(&gtod_high, (void __user *)attr->addr,
+ sizeof(gtod_high)))
+ return -EFAULT;
+
+ if (gtod_high != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_vcpu *cur_vcpu;
+ unsigned int vcpu_idx;
+ u64 host_tod, gtod;
+ int r;
+
+ if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
+ return -EFAULT;
+
+ r = store_tod_clock(&host_tod);
+ if (r)
+ return r;
+
+ mutex_lock(&kvm->lock);
+ kvm->arch.epoch = gtod - host_tod;
+ kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
+ cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+ exit_sie(cur_vcpu);
+ }
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ if (attr->flags)
+ return -EINVAL;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_TOD_HIGH:
+ ret = kvm_s390_set_tod_high(kvm, attr);
+ break;
+ case KVM_S390_VM_TOD_LOW:
+ ret = kvm_s390_set_tod_low(kvm, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ u8 gtod_high = 0;
+
+ if (copy_to_user((void __user *)attr->addr, &gtod_high,
+ sizeof(gtod_high)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ u64 host_tod, gtod;
+ int r;
+
+ r = store_tod_clock(&host_tod);
+ if (r)
+ return r;
+
+ gtod = host_tod + kvm->arch.epoch;
+ if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ if (attr->flags)
+ return -EINVAL;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_TOD_HIGH:
+ ret = kvm_s390_get_tod_high(kvm, attr);
+ break;
+ case KVM_S390_VM_TOD_LOW:
+ ret = kvm_s390_get_tod_low(kvm, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_processor *proc;
+ int ret = 0;
+
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (!proc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!copy_from_user(proc, (void __user *)attr->addr,
+ sizeof(*proc))) {
+ memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
+ sizeof(struct cpuid));
+ kvm->arch.model.ibc = proc->ibc;
+ memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+ } else
+ ret = -EFAULT;
+ kfree(proc);
+out:
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret = -ENXIO;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_CPU_PROCESSOR:
+ ret = kvm_s390_set_processor(kvm, attr);
+ break;
+ }
+ return ret;
+}
+
+static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_processor *proc;
+ int ret = 0;
+
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (!proc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
+ proc->ibc = kvm->arch.model.ibc;
+ memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
+ if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
+ ret = -EFAULT;
+ kfree(proc);
+out:
+ return ret;
+}
+
+static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_machine *mach;
+ int ret = 0;
+
+ mach = kzalloc(sizeof(*mach), GFP_KERNEL);
+ if (!mach) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ get_cpu_id((struct cpuid *) &mach->cpuid);
+ mach->ibc = sclp_get_ibc();
+ memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
+ kvm_s390_fac_list_mask_size() * sizeof(u64));
+ memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
+ S390_ARCH_FAC_LIST_SIZE_U64);
+ if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
+ ret = -EFAULT;
+ kfree(mach);
+out:
+ return ret;
+}
+
+static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret = -ENXIO;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_CPU_PROCESSOR:
+ ret = kvm_s390_get_processor(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MACHINE:
+ ret = kvm_s390_get_machine(kvm, attr);
+ break;
+ }
+ return ret;
+}
+
static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_S390_VM_MEM_CTRL:
- ret = kvm_s390_mem_control(kvm, attr);
+ ret = kvm_s390_set_mem_control(kvm, attr);
+ break;
+ case KVM_S390_VM_TOD:
+ ret = kvm_s390_set_tod(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MODEL:
+ ret = kvm_s390_set_cpu_model(kvm, attr);
+ break;
+ case KVM_S390_VM_CRYPTO:
+ ret = kvm_s390_vm_set_crypto(kvm, attr);
break;
default:
ret = -ENXIO;
@@ -308,7 +629,24 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
- return -ENXIO;
+ int ret;
+
+ switch (attr->group) {
+ case KVM_S390_VM_MEM_CTRL:
+ ret = kvm_s390_get_mem_control(kvm, attr);
+ break;
+ case KVM_S390_VM_TOD:
+ ret = kvm_s390_get_tod(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MODEL:
+ ret = kvm_s390_get_cpu_model(kvm, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
}
static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
@@ -320,6 +658,42 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA:
case KVM_S390_VM_MEM_CLR_CMMA:
+ case KVM_S390_VM_MEM_LIMIT_SIZE:
+ ret = 0;
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ break;
+ case KVM_S390_VM_TOD:
+ switch (attr->attr) {
+ case KVM_S390_VM_TOD_LOW:
+ case KVM_S390_VM_TOD_HIGH:
+ ret = 0;
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ break;
+ case KVM_S390_VM_CPU_MODEL:
+ switch (attr->attr) {
+ case KVM_S390_VM_CPU_PROCESSOR:
+ case KVM_S390_VM_CPU_MACHINE:
+ ret = 0;
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ break;
+ case KVM_S390_VM_CRYPTO:
+ switch (attr->attr) {
+ case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+ case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+ case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+ case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0;
break;
default:
@@ -401,9 +775,61 @@ long kvm_arch_vm_ioctl(struct file *filp,
return r;
}
+static int kvm_s390_query_ap_config(u8 *config)
+{
+ u32 fcn_code = 0x04000000UL;
+ u32 cc;
+
+ asm volatile(
+ "lgr 0,%1\n"
+ "lgr 2,%2\n"
+ ".long 0xb2af0000\n" /* PQAP(QCI) */
+ "ipm %0\n"
+ "srl %0,28\n"
+ : "=r" (cc)
+ : "r" (fcn_code), "r" (config)
+ : "cc", "0", "2", "memory"
+ );
+
+ return cc;
+}
+
+static int kvm_s390_apxa_installed(void)
+{
+ u8 config[128];
+ int cc;
+
+ if (test_facility(2) && test_facility(12)) {
+ cc = kvm_s390_query_ap_config(config);
+
+ if (cc)
+ pr_err("PQAP(QCI) failed with cc=%d", cc);
+ else
+ return config[0] & 0x40;
+ }
+
+ return 0;
+}
+
+static void kvm_s390_set_crycb_format(struct kvm *kvm)
+{
+ kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
+
+ if (kvm_s390_apxa_installed())
+ kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
+ else
+ kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
+}
+
+static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
+{
+ get_cpu_id(cpu_id);
+ cpu_id->version = 0xff;
+}
+
static int kvm_s390_crypto_init(struct kvm *kvm)
{
- if (!test_vfacility(76))
+ if (!test_kvm_facility(kvm, 76))
return 0;
kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
@@ -411,15 +837,18 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
if (!kvm->arch.crypto.crycb)
return -ENOMEM;
- kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
- CRYCB_FORMAT1;
+ kvm_s390_set_crycb_format(kvm);
+
+ /* Disable AES/DEA protected key functions by default */
+ kvm->arch.crypto.aes_kw = 0;
+ kvm->arch.crypto.dea_kw = 0;
return 0;
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- int rc;
+ int i, rc;
char debug_name[16];
static unsigned long sca_offset;
@@ -454,6 +883,46 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.dbf)
goto out_nodbf;
+ /*
+ * The architectural maximum amount of facilities is 16 kbit. To store
+ * this amount, 2 kbyte of memory is required. Thus we need a full
+ * page to hold the active copy (arch.model.fac->sie) and the current
+ * facilities set (arch.model.fac->kvm). Its address size has to be
+ * 31 bits and word aligned.
+ */
+ kvm->arch.model.fac =
+ (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!kvm->arch.model.fac)
+ goto out_nofac;
+
+ memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
+ S390_ARCH_FAC_LIST_SIZE_U64);
+
+ /*
+ * If this KVM host runs *not* in a LPAR, relax the facility bits
+ * of the kvm facility mask by all missing facilities. This will allow
+ * to determine the right CPU model by means of the remaining facilities.
+ * Live guest migration must prohibit the migration of KVMs running in
+ * a LPAR to non LPAR hosts.
+ */
+ if (!MACHINE_IS_LPAR)
+ for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
+ kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
+
+ /*
+ * Apply the kvm facility mask to limit the kvm supported/tolerated
+ * facility list.
+ */
+ for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
+ if (i < kvm_s390_fac_list_mask_size())
+ kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
+ else
+ kvm->arch.model.fac->kvm[i] = 0UL;
+ }
+
+ kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
+ kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
+
if (kvm_s390_crypto_init(kvm) < 0)
goto out_crypto;
@@ -477,6 +946,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.css_support = 0;
kvm->arch.use_irqchip = 0;
+ kvm->arch.epoch = 0;
spin_lock_init(&kvm->arch.start_stop_lock);
@@ -484,6 +954,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
out_nogmap:
kfree(kvm->arch.crypto.crycb);
out_crypto:
+ free_page((unsigned long)kvm->arch.model.fac);
+out_nofac:
debug_unregister(kvm->arch.dbf);
out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
@@ -536,6 +1008,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_free_vcpus(kvm);
+ free_page((unsigned long)kvm->arch.model.fac);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
kfree(kvm->arch.crypto.crycb);
@@ -546,25 +1019,30 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
/* Section: vcpu related */
+static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
+ if (!vcpu->arch.gmap)
+ return -ENOMEM;
+ vcpu->arch.gmap->private = vcpu->kvm;
+
+ return 0;
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
- if (kvm_is_ucontrol(vcpu->kvm)) {
- vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
- if (!vcpu->arch.gmap)
- return -ENOMEM;
- vcpu->arch.gmap->private = vcpu->kvm;
- return 0;
- }
-
- vcpu->arch.gmap = vcpu->kvm->arch.gmap;
vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
KVM_SYNC_GPRS |
KVM_SYNC_ACRS |
KVM_SYNC_CRS |
KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT;
+
+ if (kvm_is_ucontrol(vcpu->kvm))
+ return __kvm_ucontrol_vcpu_init(vcpu);
+
return 0;
}
@@ -615,16 +1093,27 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
kvm_s390_clear_local_irqs(vcpu);
}
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
- return 0;
+ mutex_lock(&vcpu->kvm->lock);
+ vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+ mutex_unlock(&vcpu->kvm->lock);
+ if (!kvm_is_ucontrol(vcpu->kvm))
+ vcpu->arch.gmap = vcpu->kvm->arch.gmap;
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
- if (!test_vfacility(76))
+ if (!test_kvm_facility(vcpu->kvm, 76))
return;
+ vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
+
+ if (vcpu->kvm->arch.crypto.aes_kw)
+ vcpu->arch.sie_block->ecb3 |= ECB3_AES;
+ if (vcpu->kvm->arch.crypto.dea_kw)
+ vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
+
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}
@@ -654,14 +1143,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_STOPPED |
CPUSTAT_GED);
vcpu->arch.sie_block->ecb = 6;
- if (test_vfacility(50) && test_vfacility(73))
+ if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= 0x10;
vcpu->arch.sie_block->ecb2 = 8;
- vcpu->arch.sie_block->eca = 0xD1002000U;
+ vcpu->arch.sie_block->eca = 0xC1002000U;
if (sclp_has_siif())
vcpu->arch.sie_block->eca |= 1;
- vcpu->arch.sie_block->fac = (int) (long) vfacilities;
+ if (sclp_has_sigpif())
+ vcpu->arch.sie_block->eca |= 0x10000000U;
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
ICTL_TPROT;
@@ -670,10 +1160,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (rc)
return rc;
}
- hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
- get_cpu_id(&vcpu->arch.cpu_id);
- vcpu->arch.cpu_id.version = 0xff;
+
+ mutex_lock(&vcpu->kvm->lock);
+ vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
+ memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+ vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
+ mutex_unlock(&vcpu->kvm->lock);
kvm_s390_vcpu_crypto_setup(vcpu);
@@ -717,6 +1212,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
+ vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
@@ -741,7 +1237,7 @@ out:
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return kvm_cpu_has_interrupt(vcpu);
+ return kvm_s390_vcpu_has_irq(vcpu, 0);
}
void s390_vcpu_block(struct kvm_vcpu *vcpu)
@@ -869,6 +1365,8 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_S390_PFTOKEN:
r = get_user(vcpu->arch.pfault_token,
(u64 __user *)reg->addr);
+ if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
+ kvm_clear_async_pf_completion_queue(vcpu);
break;
case KVM_REG_S390_PFCOMPARE:
r = get_user(vcpu->arch.pfault_compare,
@@ -1176,7 +1674,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
return 0;
if (psw_extint_disabled(vcpu))
return 0;
- if (kvm_cpu_has_interrupt(vcpu))
+ if (kvm_s390_vcpu_has_irq(vcpu, 0))
return 0;
if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
return 0;
@@ -1341,6 +1839,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.pfault_token = kvm_run->s.regs.pft;
vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
+ if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
+ kvm_clear_async_pf_completion_queue(vcpu);
}
kvm_run->kvm_dirty_regs = 0;
}
@@ -1559,15 +2059,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
- /* Need to lock access to action_bits to avoid a SIGP race condition */
- spin_lock(&vcpu->arch.local_int.lock);
- atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
-
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
- vcpu->arch.local_int.action_bits &=
- ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
- spin_unlock(&vcpu->arch.local_int.lock);
+ kvm_s390_clear_stop_irq(vcpu);
+ atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
__disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) {
@@ -1783,30 +2278,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
static int __init kvm_s390_init(void)
{
- int ret;
- ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
- if (ret)
- return ret;
-
- /*
- * guests can ask for up to 255+1 double words, we need a full page
- * to hold the maximum amount of facilities. On the other hand, we
- * only set facilities that are known to work in KVM.
- */
- vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if (!vfacilities) {
- kvm_exit();
- return -ENOMEM;
- }
- memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
- vfacilities[0] &= 0xff82fffbf47c2000UL;
- vfacilities[1] &= 0x005c000000000000UL;
- return 0;
+ return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
}
static void __exit kvm_s390_exit(void)
{
- free_page((unsigned long) vfacilities);
kvm_exit();
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a8f3d9b71c11..985c2114d7ef 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -18,12 +18,10 @@
#include <linux/hrtimer.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <asm/facility.h>
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
-/* declare vfacilities extern */
-extern unsigned long *vfacilities;
-
/* Transactional Memory Execution related macros */
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
#define TDB_FORMAT1 1
@@ -127,6 +125,12 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
}
+/* test availability of facility in a kvm intance */
+static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
+{
+ return __test_facility(nr, kvm->arch.model.fac->kvm);
+}
+
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
@@ -183,7 +187,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
/* is cmma enabled */
bool kvm_s390_cmma_enabled(struct kvm *kvm);
-int test_vfacility(unsigned long nr);
+unsigned long kvm_s390_fac_list_mask_size(void);
+extern unsigned long kvm_s390_fac_list_mask[];
/* implemented in diag.c */
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
@@ -228,11 +233,13 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
struct kvm_s390_irq *s390irq);
/* implemented in interrupt.c */
-int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
int psw_extint_disabled(struct kvm_vcpu *vcpu);
void kvm_s390_destroy_adapters(struct kvm *kvm);
-int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu);
+int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops;
+int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
+void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 1be578d64dfc..bdd9b5b17e03 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -337,19 +337,24 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
static int handle_stfl(struct kvm_vcpu *vcpu)
{
int rc;
+ unsigned int fac;
vcpu->stat.instruction_stfl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+ /*
+ * We need to shift the lower 32 facility bits (bit 0-31) from a u64
+ * into a u32 memory representation. They will remain bits 0-31.
+ */
+ fac = *vcpu->kvm->arch.model.fac->sie >> 32;
rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
- vfacilities, 4);
+ &fac, sizeof(fac));
if (rc)
return rc;
- VCPU_EVENT(vcpu, 5, "store facility list value %x",
- *(unsigned int *) vfacilities);
- trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
+ VCPU_EVENT(vcpu, 5, "store facility list value %x", fac);
+ trace_kvm_s390_handle_stfl(vcpu, fac);
return 0;
}
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 6651f9f73973..23b1e86b2122 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -26,15 +26,17 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
struct kvm_s390_local_interrupt *li;
int cpuflags;
int rc;
+ int ext_call_pending;
li = &dst_vcpu->arch.local_int;
cpuflags = atomic_read(li->cpuflags);
- if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
+ ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
+ if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else {
*reg &= 0xffffffff00000000UL;
- if (cpuflags & CPUSTAT_ECALL_PEND)
+ if (ext_call_pending)
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
if (cpuflags & CPUSTAT_STOPPED)
*reg |= SIGP_STATUS_STOPPED;
@@ -96,7 +98,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
}
static int __sigp_external_call(struct kvm_vcpu *vcpu,
- struct kvm_vcpu *dst_vcpu)
+ struct kvm_vcpu *dst_vcpu, u64 *reg)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EXTERNAL_CALL,
@@ -105,45 +107,31 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu,
int rc;
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
- if (!rc)
+ if (rc == -EBUSY) {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_EXT_CALL_PENDING;
+ return SIGP_CC_STATUS_STORED;
+ } else if (rc == 0) {
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
dst_vcpu->vcpu_id);
-
- return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
-{
- struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
- int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
-
- spin_lock(&li->lock);
- if (li->action_bits & ACTION_STOP_ON_STOP) {
- /* another SIGP STOP is pending */
- rc = SIGP_CC_BUSY;
- goto out;
}
- if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
- if ((action & ACTION_STORE_ON_STOP) != 0)
- rc = -ESHUTDOWN;
- goto out;
- }
- set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
- li->action_bits |= action;
- atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
- kvm_s390_vcpu_wakeup(dst_vcpu);
-out:
- spin_unlock(&li->lock);
- return rc;
+ return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_STOP,
+ };
int rc;
- rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
- VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
+ if (rc == -EBUSY)
+ rc = SIGP_CC_BUSY;
+ else if (rc == 0)
+ VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
+ dst_vcpu->vcpu_id);
return rc;
}
@@ -151,20 +139,18 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg)
{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_STOP,
+ .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
+ };
int rc;
- rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
- ACTION_STORE_ON_STOP);
- VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
- dst_vcpu->vcpu_id);
-
- if (rc == -ESHUTDOWN) {
- /* If the CPU has already been stopped, we still have
- * to save the status when doing stop-and-store. This
- * has to be done after unlocking all spinlocks. */
- rc = kvm_s390_store_status_unloaded(dst_vcpu,
- KVM_S390_STORE_STATUS_NOADDR);
- }
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
+ if (rc == -EBUSY)
+ rc = SIGP_CC_BUSY;
+ else if (rc == 0)
+ VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
+ dst_vcpu->vcpu_id);
return rc;
}
@@ -197,41 +183,33 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u32 address, u64 *reg)
{
- struct kvm_s390_local_interrupt *li;
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_SET_PREFIX,
+ .u.prefix.address = address & 0x7fffe000u,
+ };
int rc;
- li = &dst_vcpu->arch.local_int;
-
/*
* Make sure the new value is valid memory. We only need to check the
* first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB.
*/
- address &= 0x7fffe000u;
- if (kvm_is_error_gpa(vcpu->kvm, address)) {
+ if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER;
return SIGP_CC_STATUS_STORED;
}
- spin_lock(&li->lock);
- /* cpu must be in stopped state */
- if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
+ if (rc == -EBUSY) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
- rc = SIGP_CC_STATUS_STORED;
- goto out_li;
+ return SIGP_CC_STATUS_STORED;
+ } else if (rc == 0) {
+ VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
+ dst_vcpu->vcpu_id, irq.u.prefix.address);
}
- li->irq.prefix.address = address;
- set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
- kvm_s390_vcpu_wakeup(dst_vcpu);
- rc = SIGP_CC_ORDER_CODE_ACCEPTED;
-
- VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
- address);
-out_li:
- spin_unlock(&li->lock);
return rc;
}
@@ -242,9 +220,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
int flags;
int rc;
- spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
- spin_unlock(&dst_vcpu->arch.local_int.lock);
if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -291,8 +267,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
/* handle (RE)START in user space */
int rc = -EOPNOTSUPP;
+ /* make sure we don't race with STOP irq injection */
spin_lock(&li->lock);
- if (li->action_bits & ACTION_STOP_ON_STOP)
+ if (kvm_s390_is_stop_irq_pending(dst_vcpu))
rc = SIGP_CC_BUSY;
spin_unlock(&li->lock);
@@ -333,7 +310,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
break;
case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++;
- rc = __sigp_external_call(vcpu, dst_vcpu);
+ rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
break;
case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++;
@@ -394,6 +371,53 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
return rc;
}
+static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
+{
+ if (!vcpu->kvm->arch.user_sigp)
+ return 0;
+
+ switch (order_code) {
+ case SIGP_SENSE:
+ case SIGP_EXTERNAL_CALL:
+ case SIGP_EMERGENCY_SIGNAL:
+ case SIGP_COND_EMERGENCY_SIGNAL:
+ case SIGP_SENSE_RUNNING:
+ return 0;
+ /* update counters as we're directly dropping to user space */
+ case SIGP_STOP:
+ vcpu->stat.instruction_sigp_stop++;
+ break;
+ case SIGP_STOP_AND_STORE_STATUS:
+ vcpu->stat.instruction_sigp_stop_store_status++;
+ break;
+ case SIGP_STORE_STATUS_AT_ADDRESS:
+ vcpu->stat.instruction_sigp_store_status++;
+ break;
+ case SIGP_SET_PREFIX:
+ vcpu->stat.instruction_sigp_prefix++;
+ break;
+ case SIGP_START:
+ vcpu->stat.instruction_sigp_start++;
+ break;
+ case SIGP_RESTART:
+ vcpu->stat.instruction_sigp_restart++;
+ break;
+ case SIGP_INITIAL_CPU_RESET:
+ vcpu->stat.instruction_sigp_init_cpu_reset++;
+ break;
+ case SIGP_CPU_RESET:
+ vcpu->stat.instruction_sigp_cpu_reset++;
+ break;
+ default:
+ vcpu->stat.instruction_sigp_unknown++;
+ }
+
+ VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space",
+ order_code);
+
+ return 1;
+}
+
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
@@ -408,6 +432,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
order_code = kvm_s390_get_base_disp_rs(vcpu);
+ if (handle_sigp_order_in_user_space(vcpu, order_code))
+ return -EOPNOTSUPP;
if (r1 % 2)
parameter = vcpu->run->s.regs.gprs[r1];
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 647e9d6a4818..653a7ec09ef5 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets,
* Trace point for a vcpu's stop requests.
*/
TRACE_EVENT(kvm_s390_stop_request,
- TP_PROTO(unsigned int action_bits),
- TP_ARGS(action_bits),
+ TP_PROTO(unsigned char stop_irq, unsigned char flags),
+ TP_ARGS(stop_irq, flags),
TP_STRUCT__entry(
- __field(unsigned int, action_bits)
+ __field(unsigned char, stop_irq)
+ __field(unsigned char, flags)
),
TP_fast_assign(
- __entry->action_bits = action_bits;
+ __entry->stop_irq = stop_irq;
+ __entry->flags = flags;
),
- TP_printk("stop request, action_bits = %08x",
- __entry->action_bits)
+ TP_printk("stop request, stop irq = %u, flags = %08x",
+ __entry->stop_irq, __entry->flags)
);
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 034a35a3e9c1..d6c9991f7797 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -12,7 +12,15 @@
#include <linux/smp.h>
#include <asm/io.h>
-int spin_retry = 1000;
+int spin_retry = -1;
+
+static int __init spin_retry_init(void)
+{
+ if (spin_retry < 0)
+ spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
+ return 0;
+}
+early_initcall(spin_retry_init);
/**
* spin_retry= parameter
@@ -24,6 +32,11 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
+{
+ asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
+}
+
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -46,6 +59,8 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
/* Loop for a while on the lock value. */
count = spin_retry;
do {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
@@ -84,6 +99,8 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
/* Loop for a while on the lock value. */
count = spin_retry;
do {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
@@ -100,11 +117,19 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
+ unsigned int cpu = SPINLOCK_LOCKVAL;
+ unsigned int owner;
int count;
- for (count = spin_retry; count > 0; count--)
- if (arch_spin_trylock_once(lp))
- return 1;
+ for (count = spin_retry; count > 0; count--) {
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return 1;
+ } else if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
+ }
return 0;
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
@@ -126,8 +151,11 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
}
old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner);
- if ((int) old < 0)
+ if ((int) old < 0) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
}
@@ -141,8 +169,11 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
while (count-- > 0) {
old = ACCESS_ONCE(rw->lock);
- if ((int) old < 0)
+ if ((int) old < 0) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return 1;
}
@@ -173,6 +204,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
}
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
@@ -201,6 +234,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
smp_rmb();
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
@@ -214,8 +249,11 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
while (count-- > 0) {
old = ACCESS_ONCE(rw->lock);
- if (old)
+ if (old) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return 1;
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 811937bb90be..3ff86533f7db 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -171,7 +171,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
table = table + ((address >> 20) & 0x7ff);
if (bad_address(table))
goto bad;
- pr_cont(KERN_CONT "S:%016lx ", *table);
+ pr_cont("S:%016lx ", *table);
if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
goto out;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
@@ -261,7 +261,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
return;
if (!printk_ratelimit())
return;
- printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
+ printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk(KERN_CONT "\n");
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
do_no_context(regs);
else
pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ do_no_context(regs);
+ else
+ do_sigsegv(regs, SEGV_MAPERR);
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 639fce464008..5c586c78ca8d 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -235,10 +235,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- nr_pages - nr, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
+ ret = get_user_pages_unlocked(current, mm, start,
+ nr_pages - nr, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0)
ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 3c80d2e38f03..210ffede0153 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0;
}
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
if (!MACHINE_HAS_HPAGE)
@@ -210,17 +204,3 @@ int pud_huge(pud_t pud)
{
return 0;
}
-
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmdp, int write)
-{
- struct page *page;
-
- if (!MACHINE_HAS_HPAGE)
- return NULL;
-
- page = pmd_page(*pmdp);
- if (page)
- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
- return page;
-}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7235e01fd67..d35b15113b17 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
break;
case 0x2827: /* zEC12 */
case 0x2828: /* zEC12 */
- default:
order = 5;
break;
+ case 0x2964: /* z13 */
+ default:
+ order = 7;
+ break;
}
/* Limit number of empty zero pages for small memory sizes */
- if (order > 2 && totalram_pages <= 16384)
- order = 2;
+ while (order > 2 && (totalram_pages >> 10) < (1UL << order))
+ order--;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 9b436c21195e..d008f638b2cd 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -28,8 +28,12 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/compat.h>
+#include <linux/security.h>
#include <asm/pgalloc.h>
+unsigned long mmap_rnd_mask;
+unsigned long mmap_align_mask;
+
static unsigned long stack_maxrandom_size(void)
{
if (!(current->flags & PF_RANDOMIZE))
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
{
if (!(current->flags & PF_RANDOMIZE))
return 0;
- /* 8MB randomization for mmap_base */
- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+ if (is_32bit_task())
+ return (get_random_int() & 0x7ff) << PAGE_SHIFT;
+ else
+ return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
}
static unsigned long mmap_base_legacy(void)
@@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
}
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
+ int do_color_align;
+
+ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = !is_32bit_task();
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}
+
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ struct vm_unmapped_area_info info;
+ int do_color_align;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = !is_32bit_task();
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ addr = vm_unmapped_area(&info);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ if (addr & ~PAGE_MASK) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+
+ return addr;
+}
+
+unsigned long randomize_et_dyn(void)
+{
+ unsigned long base;
+
+ base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
+ return base + mmap_rnd();
+}
+
#ifndef CONFIG_64BIT
/*
@@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
}
}
+static int __init setup_mmap_rnd(void)
+{
+ struct cpuid cpu_id;
+
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
+ case 0x9672:
+ case 0x2064:
+ case 0x2066:
+ case 0x2084:
+ case 0x2086:
+ case 0x2094:
+ case 0x2096:
+ case 0x2097:
+ case 0x2098:
+ case 0x2817:
+ case 0x2818:
+ case 0x2827:
+ case 0x2828:
+ mmap_rnd_mask = 0x7ffUL;
+ mmap_align_mask = 0UL;
+ break;
+ case 0x2964: /* z13 */
+ default:
+ mmap_rnd_mask = 0x3ff80UL;
+ mmap_align_mask = 0x7fUL;
+ break;
+ }
+ return 0;
+}
+early_initcall(setup_mmap_rnd);
+
#endif
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index be99357d238c..b2c1542f2ba2 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
{
struct page *page;
- unsigned long offset;
+ unsigned long offset, mask;
offset = (unsigned long) entry / sizeof(unsigned long);
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
- page = pmd_to_page((pmd_t *) entry);
+ mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+ page = virt_to_page((void *)((unsigned long) entry & mask));
return page->index + offset;
}
@@ -526,7 +527,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 53) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
- gaddr & 0xffe0000000000000))
+ gaddr & 0xffe0000000000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
@@ -534,7 +535,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 42) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
- gaddr & 0xfffffc0000000000))
+ gaddr & 0xfffffc0000000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
@@ -542,7 +543,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 31) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
- gaddr & 0xffffffff80000000))
+ gaddr & 0xffffffff80000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index 7e45d13816c1..ba44c9f55346 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -22,8 +22,8 @@
* skb_copy_bits takes 4 parameters:
* %r2 = skb pointer
* %r3 = offset into skb data
- * %r4 = length to copy
- * %r5 = pointer to temp buffer
+ * %r4 = pointer to temp buffer
+ * %r5 = length to copy
*/
#define SKBDATA %r8
@@ -44,8 +44,9 @@ ENTRY(sk_load_word)
sk_load_word_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,4 # 4 bytes
- la %r5,160(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,160(%r15) # pointer to temp buffer
+ lghi %r5,4 # 4 bytes
brasl %r14,skb_copy_bits # get data from skb
l %r5,160(%r15) # load result from temp buffer
ltgr %r2,%r2 # set cc to (%r2 != 0)
@@ -69,8 +70,9 @@ ENTRY(sk_load_half)
sk_load_half_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,2 # 2 bytes
- la %r5,162(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,162(%r15) # pointer to temp buffer
+ lghi %r5,2 # 2 bytes
brasl %r14,skb_copy_bits # get data from skb
xc 160(2,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
@@ -95,8 +97,9 @@ ENTRY(sk_load_byte)
sk_load_byte_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,1 # 1 bytes
- la %r5,163(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,163(%r15) # pointer to temp buffer
+ lghi %r5,1 # 1 byte
brasl %r14,skb_copy_bits # get data from skb
xc 160(3,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
@@ -104,11 +107,11 @@ sk_load_byte_slow:
lgr %r2,%r9 # restore %r2
br %r8
- /* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */
+ /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
ENTRY(sk_load_byte_msh)
llgfr %r1,%r3 # extend offset
clr %r11,%r3 # hlen < offset ?
- jle sk_load_byte_slow
+ jle sk_load_byte_msh_slow
lhi %r12,0
ic %r12,0(%r1,%r10) # get byte from skb
nill %r12,0x0f
@@ -118,8 +121,9 @@ ENTRY(sk_load_byte_msh)
sk_load_byte_msh_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,2 # 2 bytes
- la %r5,162(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,163(%r15) # pointer to temp buffer
+ lghi %r5,1 # 1 byte
brasl %r14,skb_copy_bits # get data from skb
xc 160(3,%r15),160(%r15)
l %r12,160(%r15) # load result from temp buffer
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index c52ac77408ca..bbd1981cc150 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_DISP(0x88500000, K);
break;
case BPF_ALU | BPF_NEG: /* A = -A */
- /* lnr %r5,%r5 */
- EMIT2(0x1155);
+ /* lcr %r5,%r5 */
+ EMIT2(0x1355);
break;
case BPF_JMP | BPF_JA: /* ip += K */
offset = addrs[i + K] + jit->start - jit->prg;
@@ -448,15 +448,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
mask = 0x800000; /* je */
kbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
- if (K <= 16383)
- /* chi %r5,<K> */
- EMIT4_IMM(0xa75e0000, K);
- else if (test_facility(21))
+ if (test_facility(21))
/* clfi %r5,<K> */
EMIT6_IMM(0xc25f0000, K);
else
- /* c %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5950d000, EMIT_CONST(K));
+ /* cl %r5,<d(K)>(%r13) */
+ EMIT4_DISP(0x5550d000, EMIT_CONST(K));
}
branch: if (filter->jt == filter->jf) {
if (filter->jt == 0)
@@ -502,8 +499,8 @@ branch: if (filter->jt == filter->jf) {
xbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
jit->seen |= SEEN_XREG;
- /* cr %r5,%r12 */
- EMIT2(0x195c);
+ /* clr %r5,%r12 */
+ EMIT2(0x155c);
}
goto branch;
case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 62c5ea6d8682..8aa271b3d1ad 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -55,7 +55,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
if (ret)
goto out;
- io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
ret = -EFAULT;
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
@@ -96,7 +96,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
ret = get_pfn(mmio_addr, VM_READ, &pfn);
if (ret)
goto out;
- io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
ret = -EFAULT;
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
diff --git a/arch/score/include/asm/pgtable-bits.h b/arch/score/include/asm/pgtable-bits.h
index 7d65a96a82e5..0e5c6f466520 100644
--- a/arch/score/include/asm/pgtable-bits.h
+++ b/arch/score/include/asm/pgtable-bits.h
@@ -6,7 +6,6 @@
#define _PAGE_WRITE (1<<7) /* implemented in software */
#define _PAGE_PRESENT (1<<9) /* implemented in software */
#define _PAGE_MODIFIED (1<<10) /* implemented in software */
-#define _PAGE_FILE (1<<10)
#define _PAGE_GLOBAL (1<<0)
#define _PAGE_VALID (1<<1)
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h
index db96ad9afc03..0553e5cd5985 100644
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -27,7 +27,7 @@ extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START (0xc0000000UL)
@@ -90,15 +90,6 @@ static inline void pmd_clear(pmd_t *pmdp)
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-/*
- * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
- * split up 30 bits of offset into this range:
- */
-#define PTE_FILE_MAX_BITS 30
-#define pte_to_pgoff(_pte) \
- (((_pte).pte & 0x1ff) | (((_pte).pte >> 11) << 9))
-#define pgoff_to_pte(off) \
- ((pte_t) {((off) & 0x1ff) | (((off) >> 9) << 11) | _PAGE_FILE})
#define __pte_to_swp_entry(pte) \
((swp_entry_t) { pte_val(pte)})
#define __swp_entry_to_pte(x) ((pte_t) {(x).val})
@@ -169,8 +160,8 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
}
#define __swp_type(x) ((x).val & 0x1f)
-#define __swp_offset(x) ((x).val >> 11)
-#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 11)})
+#define __swp_offset(x) ((x).val >> 10)
+#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 10)})
extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
@@ -198,11 +189,6 @@ static inline int pte_young(pte_t pte)
return pte_val(pte) & _PAGE_ACCESSED;
}
-static inline int pte_file(pte_t pte)
-{
- return pte_val(pte) & _PAGE_FILE;
-}
-
#define pte_special(pte) (0)
static inline pte_t pte_wrprotect(pte_t pte)
diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h
index 656b7ada9326..33864fa2a8d4 100644
--- a/arch/score/include/asm/thread_info.h
+++ b/arch/score/include/asm/thread_info.h
@@ -42,7 +42,6 @@ struct thread_info {
* 0-0xFFFFFFFF for kernel-thread
*/
mm_segment_t addr_limit;
- struct restart_block restart_block;
struct pt_regs *regs;
};
@@ -58,9 +57,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/score/kernel/asm-offsets.c b/arch/score/kernel/asm-offsets.c
index 57788f44c6fb..b4d5214a7a7e 100644
--- a/arch/score/kernel/asm-offsets.c
+++ b/arch/score/kernel/asm-offsets.c
@@ -106,7 +106,6 @@ void output_thread_info_defines(void)
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
- OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
DEFINE(KERNEL_STACK_MASK, THREAD_MASK);
diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c
index 1651807774ad..e381c8c4ff65 100644
--- a/arch/score/kernel/signal.c
+++ b/arch/score/kernel/signal.c
@@ -141,7 +141,7 @@ score_rt_sigreturn(struct pt_regs *regs)
int sig;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
frame = (struct rt_sigframe __user *) regs->regs[0];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 52238983527d..6860beb2a280 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -114,6 +114,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0f09f5285d5e..eb4ef274ae9b 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,7 +1,7 @@
config SUPERH
def_bool y
select ARCH_MIGHT_HAVE_PC_PARPORT
- select EXPERT
+ select HAVE_PATA_PLATFORM
select CLKDEV_LOOKUP
select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_MEMBLOCK
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index 7646bf0486c2..1087dba9b015 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -14,9 +14,6 @@
#define DRV_NAME "SE7343-FPGA"
#define pr_fmt(fmt) DRV_NAME ": " fmt
-#define irq_reg_readl ioread16
-#define irq_reg_writel iowrite16
-
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index f5e2af1bf040..00e699232621 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -11,9 +11,6 @@
#define DRV_NAME "SE7722-FPGA"
#define pr_fmt(fmt) DRV_NAME ": " fmt
-#define irq_reg_readl ioread16
-#define irq_reg_writel iowrite16
-
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index cf434c64408d..89c513a982fc 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -62,7 +62,7 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 0bce3d81569e..c646e563abce 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -26,8 +26,6 @@
* and timing control which (together with bit 0) are moved into the
* old-style PTEA on the parts that support it.
*
- * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
- *
* SH-X2 MMUs and extended PTEs
*
* SH-X2 supports an extended mode TLB with split data arrays due to the
@@ -51,7 +49,6 @@
#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
#define _PAGE_PROTNONE 0x200 /* software: if not present */
#define _PAGE_ACCESSED 0x400 /* software: page referenced */
-#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
#define _PAGE_SPECIAL 0x800 /* software: special page */
#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
@@ -105,14 +102,13 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
/* Mask which drops unused bits from the PTEL value */
#if defined(CONFIG_CPU_SH3)
#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \
- _PAGE_FILE | _PAGE_SZ1 | \
- _PAGE_HW_SHARED)
+ _PAGE_SZ1 | _PAGE_HW_SHARED)
#elif defined(CONFIG_X2TLB)
/* Get rid of the legacy PR/SZ bits when using extended mode */
#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \
- _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
+ _PAGE_PR_MASK | _PAGE_SZ_MASK)
#else
-#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
+#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED)
#endif
#define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS))
@@ -343,7 +339,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))
#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
-#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
#ifdef CONFIG_X2TLB
@@ -445,7 +440,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* Encode and de-code a swap entry
*
* Constraints:
- * _PAGE_FILE at bit 0
* _PAGE_PRESENT at bit 8
* _PAGE_PROTNONE at bit 9
*
@@ -453,9 +447,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* swap offset into bits 10:30. For the 64-bit PTE case, we keep the
* preserved bits in the low 32-bits and use the upper 32 as the swap
* offset (along with a 5-bit type), following the same approach as x86
- * PAE. This keeps the logic quite simple, and allows for a full 32
- * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
- * in the pte_low case.
+ * PAE. This keeps the logic quite simple.
*
* As is evident by the Alpha code, if we ever get a 64-bit unsigned
* long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
@@ -471,13 +463,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
-/*
- * Encode and decode a nonlinear file mapping entry
- */
-#define pte_to_pgoff(pte) ((pte).pte_high)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
-
-#define PTE_FILE_MAX_BITS 32
#else
#define __swp_type(x) ((x).val & 0xff)
#define __swp_offset(x) ((x).val >> 10)
@@ -485,13 +470,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
-
-/*
- * Encode and decode a nonlinear file mapping entry
- */
-#define PTE_FILE_MAX_BITS 29
-#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
index dda8c82601b9..07424968df62 100644
--- a/arch/sh/include/asm/pgtable_64.h
+++ b/arch/sh/include/asm/pgtable_64.h
@@ -107,7 +107,6 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
#define _PAGE_PRESENT 0x004 /* software: page referenced */
-#define _PAGE_FILE 0x004 /* software: only when !present */
#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
@@ -129,7 +128,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
#define _PAGE_SPECIAL _PAGE_EXT(0x002)
-#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
+#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_SHARED | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
/* Mask which drops software flags */
@@ -260,7 +259,6 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
*/
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
@@ -304,11 +302,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/* Encode and decode a nonlinear file mapping entry */
-#define PTE_FILE_MAX_BITS 29
-#define pte_to_pgoff(pte) (pte_val(pte))
-#define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE })
-
#endif /* !__ASSEMBLY__ */
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index ad27ffa65e2e..657c03919627 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -33,7 +33,6 @@ struct thread_info {
__u32 cpu;
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* thread address space */
- struct restart_block restart_block;
unsigned long previous_sp; /* sp of previous stack in case
of nested IRQ stacks */
__u8 supervisor_stack[0];
@@ -63,9 +62,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index 08a2be775b6c..542225fedb11 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -25,7 +25,6 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
DEFINE(TI_SIZE, sizeof(struct thread_info));
#ifdef CONFIG_HIBERNATION
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 2f002b24fb92..0b34f2a704fe 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -156,7 +156,7 @@ asmlinkage int sys_sigreturn(void)
int r0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -186,7 +186,7 @@ asmlinkage int sys_rt_sigreturn(void)
int r0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 897abe7b871e..71993c6a7d94 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -260,7 +260,7 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
long long ret;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -294,7 +294,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
long long ret;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 541dc6101508..a58fec9b55e0 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else {
if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area(regs, error_code, address);
else
BUG();
}
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index 37458f38b220..e7af6a65baab 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -17,7 +17,7 @@
static inline pte_t gup_get_pte(pte_t *ptep)
{
#ifndef CONFIG_X2TLB
- return ACCESS_ONCE(*ptep);
+ return READ_ONCE(*ptep);
#else
/*
* With get_user_pages_fast, we walk down the pagetables without
@@ -257,10 +257,8 @@ slow_irqon:
start += nr << PAGE_SHIFT;
pages += nr;
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
+ ret = get_user_pages_unlocked(current, mm, start,
+ (end - start) >> PAGE_SHIFT, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index d7762349ea48..534bc978af8a 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0;
}
-struct page *follow_huge_addr(struct mm_struct *mm,
- unsigned long address, int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
return 0;
@@ -82,9 +76,3 @@ int pud_huge(pud_t pud)
{
return 0;
}
-
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- return NULL;
-}
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 705408766ab0..2e48eb8813ff 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -497,7 +497,7 @@ module_init(aes_sparc64_mod_init);
module_exit(aes_sparc64_mod_fini);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
+MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, sparc64 aes opcode accelerated");
MODULE_ALIAS_CRYPTO("aes");
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 641f55cb61c3..6bf2479a12fb 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
-MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("camellia");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index d11500972994..dd6a34fa6e19 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -533,5 +533,6 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des3_ede");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index 64c7ff5f72a9..b688731d7ede 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -183,7 +183,7 @@ module_init(md5_sparc64_mod_init);
module_exit(md5_sparc64_mod_fini);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
+MODULE_DESCRIPTION("MD5 Message Digest Algorithm, sparc64 md5 opcode accelerated");
MODULE_ALIAS_CRYPTO("md5");
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index 38965379e350..68513c41e10d 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -74,11 +74,6 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
-#ifdef CONFIG_DEBUG_PAGEALLOC
-/* internal debugging function */
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
#endif /* !__ASSEMBLY__ */
#endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index b9b91ae19fe1..f06b36a00a3b 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -44,7 +44,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define PTE_SIZE (PTRS_PER_PTE*4)
#define PAGE_NONE SRMMU_PAGE_NONE
@@ -102,7 +102,8 @@ extern unsigned long empty_zero_page;
*/
static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
{
- __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
+ __asm__ __volatile__("swap [%2], %0" :
+ "=&r" (value) : "0" (value), "r" (addr) : "memory");
return value;
}
@@ -221,14 +222,6 @@ static inline int pte_young(pte_t pte)
return pte_val(pte) & SRMMU_REF;
}
-/*
- * The following only work if pte_present() is not true.
- */
-static inline int pte_file(pte_t pte)
-{
- return pte_val(pte) & SRMMU_FILE;
-}
-
static inline int pte_special(pte_t pte)
{
return 0;
@@ -375,22 +368,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/* file-offset-in-pte helpers */
-static inline unsigned long pte_to_pgoff(pte_t pte)
-{
- return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
-}
-
-static inline pte_t pgoff_to_pte(unsigned long pgoff)
-{
- return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
-}
-
-/*
- * This is made a constant because mm/fremap.c required a constant.
- */
-#define PTE_FILE_MAX_BITS 24
-
static inline unsigned long
__get_phys (unsigned long addr)
{
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1ff9e7864168..dc165ebdf05a 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -93,7 +93,7 @@ bool kern_addr_valid(unsigned long addr);
#define PTRS_PER_PGD (1UL << PGDIR_BITS)
/* Kernel has a separate 44bit address space. */
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
@@ -137,7 +137,6 @@ bool kern_addr_valid(unsigned long addr);
#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
-#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
@@ -167,7 +166,6 @@ bool kern_addr_valid(unsigned long addr);
#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
-#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
@@ -332,22 +330,6 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
}
#endif
-static inline pte_t pgoff_to_pte(unsigned long off)
-{
- off <<= PAGE_SHIFT;
-
- __asm__ __volatile__(
- "\n661: or %0, %2, %0\n"
- " .section .sun4v_1insn_patch, \"ax\"\n"
- " .word 661b\n"
- " or %0, %3, %0\n"
- " .previous\n"
- : "=r" (off)
- : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
-
- return __pte(off);
-}
-
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
unsigned long val = pgprot_val(prot);
@@ -609,22 +591,6 @@ static inline unsigned long pte_exec(pte_t pte)
return (pte_val(pte) & mask);
}
-static inline unsigned long pte_file(pte_t pte)
-{
- unsigned long val = pte_val(pte);
-
- __asm__ __volatile__(
- "\n661: and %0, %2, %0\n"
- " .section .sun4v_1insn_patch, \"ax\"\n"
- " .word 661b\n"
- " and %0, %3, %0\n"
- " .previous\n"
- : "=r" (val)
- : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
-
- return val;
-}
-
static inline unsigned long pte_present(pte_t pte)
{
unsigned long val = pte_val(pte);
@@ -971,12 +937,6 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/* File offset in PTE support. */
-unsigned long pte_file(pte_t);
-#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
-pte_t pgoff_to_pte(unsigned long);
-#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
-
int page_in_phys_avail(unsigned long paddr);
/*
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 79da17866fa8..ae51a111a8c7 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -80,10 +80,6 @@
#define SRMMU_PRIV 0x1c
#define SRMMU_PRIV_RDONLY 0x18
-#define SRMMU_FILE 0x40 /* Implemented in software */
-
-#define SRMMU_PTE_FILE_SHIFT 8 /* == 32-PTE_FILE_MAX_BITS */
-
#define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
/* SRMMU swap entry encoding
@@ -94,13 +90,13 @@
* oooooooooooooooooootttttRRRRRRRR
* fedcba9876543210fedcba9876543210
*
- * The bottom 8 bits are reserved for protection and status bits, especially
- * FILE and PRESENT.
+ * The bottom 7 bits are reserved for protection and status bits, especially
+ * PRESENT.
*/
#define SRMMU_SWP_TYPE_MASK 0x1f
-#define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT
-#define SRMMU_SWP_OFF_MASK 0x7ffff
-#define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5)
+#define SRMMU_SWP_TYPE_SHIFT 7
+#define SRMMU_SWP_OFF_MASK 0xfffff
+#define SRMMU_SWP_OFF_SHIFT (SRMMU_SWP_TYPE_SHIFT + 5)
/* Some day I will implement true fine grained access bits for
* user pages because the SRMMU gives us the capabilities to
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 025c98446b1e..fd7bd0a440ca 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -47,8 +47,6 @@ struct thread_info {
struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
unsigned long rwbuf_stkptrs[NSWINS];
unsigned long w_saved;
-
- struct restart_block restart_block;
};
/*
@@ -62,9 +60,6 @@ struct thread_info {
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
@@ -103,7 +98,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TI_REG_WINDOW 0x30
#define TI_RWIN_SPTRS 0x230
#define TI_W_SAVED 0x250
-/* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */
/*
* thread information flag bit numbers
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 798f0279a4b5..ff455164732a 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -58,8 +58,6 @@ struct thread_info {
unsigned long gsr[7];
unsigned long xfsr[7];
- struct restart_block restart_block;
-
struct pt_regs *kern_una_regs;
unsigned int kern_una_insn;
@@ -92,10 +90,9 @@ struct thread_info {
#define TI_RWIN_SPTRS 0x000003c8
#define TI_GSR 0x00000400
#define TI_XFSR 0x00000438
-#define TI_RESTART_BLOCK 0x00000470
-#define TI_KUNA_REGS 0x000004a0
-#define TI_KUNA_INSN 0x000004a8
-#define TI_FPREGS 0x000004c0
+#define TI_KUNA_REGS 0x00000470
+#define TI_KUNA_INSN 0x00000478
+#define TI_FPREGS 0x00000480
/* We embed this in the uppermost byte of thread_info->flags */
#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
@@ -124,9 +121,6 @@ struct thread_info {
.current_ds = ASI_P, \
.exec_domain = &default_exec_domain, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 97655e0fd243..192a617a32f3 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -29,7 +29,7 @@ static void *module_map(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#else
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index b36365f49478..9ce5afe167ff 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -639,7 +639,10 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 62deba7be1a9..4eed773a7735 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -150,7 +150,7 @@ void do_sigreturn32(struct pt_regs *regs)
int err, i;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
@@ -235,7 +235,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
int err, i;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 9ee72fc8e0e4..52aa5e4ce5e7 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -70,7 +70,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 1a6999868031..d88beff47bab 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -254,7 +254,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack ();
sf = (struct rt_signal_frame __user *)
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 981a769b9558..a27651e866e7 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2730,8 +2730,6 @@ void __init trap_init(void)
TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
TI_CURRENT_DS != offsetof(struct thread_info,
current_ds) ||
- TI_RESTART_BLOCK != offsetof(struct thread_info,
- restart_block) ||
TI_KUNA_REGS != offsetof(struct thread_info,
kern_una_regs) ||
TI_KUNA_INSN != offsetof(struct thread_info,
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 908e8c17c902..70d817154fe8 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -249,6 +249,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 18fcd7167095..479823249429 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -446,6 +446,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index ae6ce383d4df..2e5c4fc2daa9 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -249,10 +249,8 @@ slow:
start += nr << PAGE_SHIFT;
pages += nr;
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
+ ret = get_user_pages_unlocked(current, mm, start,
+ (end - start) >> PAGE_SHIFT, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index d329537739c6..4242eab12e10 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -215,12 +215,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-struct page *follow_huge_addr(struct mm_struct *mm,
- unsigned long address, int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
return 0;
@@ -230,9 +224,3 @@ int pud_huge(pud_t pud)
{
return 0;
}
-
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- return NULL;
-}
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index f33e7c7a3bf7..7931eeeb649a 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -776,7 +776,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
if (unlikely(proglen + ilen > oldproglen)) {
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
- module_free(NULL, image);
+ module_memfree(image);
return;
}
memcpy(image + proglen, temp, ilen);
@@ -822,7 +822,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 5d1950788c69..95a4f19d16c5 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -67,7 +67,7 @@ extern void pgtable_cache_init(void);
extern void paging_init(void);
extern void set_page_homes(void);
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define _PAGE_PRESENT HV_PTE_PRESENT
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
@@ -285,17 +285,6 @@ extern void start_mm_caching(struct mm_struct *mm);
extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
/*
- * Support non-linear file mappings (see sys_remap_file_pages).
- * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the
- * file offset in the 32 high bits.
- */
-#define _PAGE_FILE HV_PTE_CLIENT1
-#define PTE_FILE_MAX_BITS 32
-#define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte))
-#define pte_to_pgoff(pte) ((pte).val >> 32)
-#define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE })
-
-/*
* Encode and de-code a swap entry (see <linux/swapops.h>).
* We put the swap file type+offset in the 32 high bits;
* I believe we can just leave the low bits clear.
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 48e4fd0f38e4..96c14c1430d8 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -36,7 +36,6 @@ struct thread_info {
mm_segment_t addr_limit; /* thread address space
(KERNEL_DS or USER_DS) */
- struct restart_block restart_block;
struct single_step_state *step_state; /* single step state
(if non-zero) */
int align_ctl; /* controls unaligned access */
@@ -57,9 +56,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
.step_state = NULL, \
.align_ctl = 0, \
}
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index b6cde3209b96..f41cb53cf645 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -114,14 +114,14 @@ struct exception_table_entry {
extern int fixup_exception(struct pt_regs *regs);
/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+/*
* Support macros for __get_user().
- *
- * Implementation note: The "case 8" logic of casting to the type of
- * the result of subtracting the value from itself is basically a way
- * of keeping all integer types the same, but casting any pointers to
- * ptrdiff_t, i.e. also an integer type. This way there are no
- * questionable casts seen by the compiler on an ILP32 platform.
- *
* Note that __get_user() and __put_user() assume proper alignment.
*/
@@ -178,7 +178,7 @@ extern int fixup_exception(struct pt_regs *regs);
"9:" \
: "=r" (ret), "=r" (__a), "=&r" (__b) \
: "r" (ptr), "i" (-EFAULT)); \
- (x) = (__typeof(x))(__typeof((x)-(x))) \
+ (x) = (__force __typeof(x))(__inttype(x)) \
(((u64)__hi32(__a, __b) << 32) | \
__lo32(__a, __b)); \
})
@@ -210,14 +210,16 @@ extern int __get_user_bad(void)
#define __get_user(x, ptr) \
({ \
int __ret; \
+ typeof(x) _x; \
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
- case 1: __get_user_1(x, ptr, __ret); break; \
- case 2: __get_user_2(x, ptr, __ret); break; \
- case 4: __get_user_4(x, ptr, __ret); break; \
- case 8: __get_user_8(x, ptr, __ret); break; \
+ case 1: __get_user_1(_x, ptr, __ret); break; \
+ case 2: __get_user_2(_x, ptr, __ret); break; \
+ case 4: __get_user_4(_x, ptr, __ret); break; \
+ case 8: __get_user_8(_x, ptr, __ret); break; \
default: __ret = __get_user_bad(); break; \
} \
+ (x) = (typeof(*(ptr))) _x; \
__ret; \
})
@@ -246,7 +248,7 @@ extern int __get_user_bad(void)
#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
#define __put_user_8(x, ptr, ret) \
({ \
- u64 __x = (__typeof((x)-(x)))(x); \
+ u64 __x = (__force __inttype(x))(x); \
int __lo = (int) __x, __hi = (int) (__x >> 32); \
asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
"2: { sw %0, %3; movei %0, 0 }\n" \
@@ -289,12 +291,13 @@ extern int __put_user_bad(void)
#define __put_user(x, ptr) \
({ \
int __ret; \
+ typeof(*(ptr)) _x = (x); \
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
- case 1: __put_user_1(x, ptr, __ret); break; \
- case 2: __put_user_2(x, ptr, __ret); break; \
- case 4: __put_user_4(x, ptr, __ret); break; \
- case 8: __put_user_8(x, ptr, __ret); break; \
+ case 1: __put_user_1(_x, ptr, __ret); break; \
+ case 2: __put_user_2(_x, ptr, __ret); break; \
+ case 4: __put_user_4(_x, ptr, __ret); break; \
+ case 8: __put_user_8(_x, ptr, __ret); break; \
default: __ret = __put_user_bad(); break; \
} \
__ret; \
diff --git a/arch/tile/include/uapi/asm/byteorder.h b/arch/tile/include/uapi/asm/byteorder.h
index fb72ecf49218..6b8fa2e1cf6e 100644
--- a/arch/tile/include/uapi/asm/byteorder.h
+++ b/arch/tile/include/uapi/asm/byteorder.h
@@ -14,8 +14,6 @@
#if defined (__BIG_ENDIAN__)
#include <linux/byteorder/big_endian.h>
-#elif defined (__LITTLE_ENDIAN__)
-#include <linux/byteorder/little_endian.h>
#else
-#error "__BIG_ENDIAN__ or __LITTLE_ENDIAN__ must be defined."
+#include <linux/byteorder/little_endian.h>
#endif
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index c4646bb99342..2fd1694ac1d0 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -909,11 +909,8 @@ static void hardwall_destroy(struct hardwall_info *info)
static int hardwall_proc_show(struct seq_file *sf, void *v)
{
struct hardwall_info *info = sf->private;
- char buf[256];
- int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
- buf[rc++] = '\n';
- seq_write(sf, buf, rc);
+ seq_printf(sf, "%*pbl\n", cpumask_pr_args(&info->cpumask));
return 0;
}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 96447c9160a0..2305084c9b93 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -74,7 +74,7 @@ error:
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
vfree(module_region);
@@ -83,7 +83,7 @@ void module_free(struct module *mod, void *module_region)
0, 0, 0, NULL, NULL, 0);
/*
- * FIXME: If module_region == mod->module_init, trim exception
+ * FIXME: Add module_arch_freeing_init to trim exception
* table entries.
*/
}
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index f70c7892fa25..325df47f114d 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -245,7 +245,7 @@ static void fixup_read_and_payload_sizes(void)
{
struct pci_dev *dev = NULL;
int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
- int max_read_size = 0x2; /* Limit to 512 byte reads. */
+ int max_read_size = PCI_EXP_DEVCTL_READRQ_512B;
u16 new_values;
/* Scan for the smallest maximum payload size. */
@@ -258,7 +258,7 @@ static void fixup_read_and_payload_sizes(void)
}
/* Now, set the max_payload_size for all devices to that value. */
- new_values = (max_read_size << 12) | (smallest_max_payload << 5);
+ new_values = max_read_size | (smallest_max_payload << 5);
for_each_pci_dev(dev)
pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 6829a9508649..7983e9868df6 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -45,10 +45,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
int n = ptr_to_cpu(v);
if (n == 0) {
- char buf[NR_CPUS*5];
- cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
seq_printf(m, "cpu count\t: %d\n", num_online_cpus());
- seq_printf(m, "cpu list\t: %s\n", buf);
+ seq_printf(m, "cpu list\t: %*pbl\n",
+ cpumask_pr_args(cpu_online_mask));
seq_printf(m, "model name\t: %s\n", chip_model);
seq_printf(m, "flags\t\t:\n"); /* nothing for now */
seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n",
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 864eea69556d..f1f579914952 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -215,12 +215,11 @@ early_param("mem", setup_mem); /* compatibility with x86 */
static int __init setup_isolnodes(char *str)
{
- char buf[MAX_NUMNODES * 5];
if (str == NULL || nodelist_parse(str, isolnodes) != 0)
return -EINVAL;
- nodelist_scnprintf(buf, sizeof(buf), isolnodes);
- pr_info("Set isolnodes value to '%s'\n", buf);
+ pr_info("Set isolnodes value to '%*pbl'\n",
+ nodemask_pr_args(&isolnodes));
return 0;
}
early_param("isolnodes", setup_isolnodes);
@@ -1315,11 +1314,9 @@ early_param("disabled_cpus", disabled_cpus);
void __init print_disabled_cpus(void)
{
- if (!cpumask_empty(&disabled_map)) {
- char buf[100];
- cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
- pr_info("CPUs not available for Linux: %s\n", buf);
- }
+ if (!cpumask_empty(&disabled_map))
+ pr_info("CPUs not available for Linux: %*pbl\n",
+ cpumask_pr_args(&disabled_map));
}
static void __init setup_cpu_maps(void)
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index bb0a9ce7ae23..8a524e332c1a 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -48,7 +48,7 @@ int restore_sigcontext(struct pt_regs *regs,
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Enforce that sigcontext is like pt_regs, and doesn't mess
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
index 2298cb1daff7..1e968f7550dc 100644
--- a/arch/tile/kvm/Kconfig
+++ b/arch/tile/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
depends on HAVE_KVM && MODULES
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select SRCU
---help---
Support hosting paravirtualized guest machines.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 565e25a98334..0f61a73534e6 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -442,6 +442,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index cd3387370ebb..40ca30a9fee3 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -115,7 +115,6 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
struct cpumask *cache_cpumask, *tlb_cpumask;
HV_PhysAddr cache_pa;
- char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
mb(); /* provided just to simplify "magic hypervisor" mode */
@@ -149,13 +148,12 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
asids, asidcount);
if (rc == 0)
return;
- cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
- cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
- pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
- cache_pa, cache_control, cache_cpumask, cache_buf,
- (unsigned long)tlb_va, tlb_length, tlb_pgsize,
- tlb_cpumask, tlb_buf, asids, asidcount, rc);
+ pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
+ cache_pa, cache_control, cache_cpumask,
+ cpumask_pr_args(&cache_cpumask_copy),
+ (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
+ cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
panic("Unsafe to continue.");
}
@@ -263,10 +261,6 @@ static int pte_to_home(pte_t pte)
/* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
pte_t pte_set_home(pte_t pte, int home)
{
- /* Check for non-linear file mapping "PTEs" and pass them through. */
- if (pte_file(pte))
- return pte;
-
#if CHIP_HAS_MMIO()
/* Check for MMIO mappings and pass them through. */
if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 3270e0019266..8416240c322c 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return NULL;
}
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
@@ -166,28 +160,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
}
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pud);
- if (page)
- page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index be240cc4978d..ace32d7d3864 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -353,15 +353,13 @@ static int __init setup_ktext(char *str)
/* Neighborhood ktext pages on specified mask */
else if (cpulist_parse(str, &ktext_mask) == 0) {
- char buf[NR_CPUS * 5];
- cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
if (cpumask_weight(&ktext_mask) > 1) {
ktext_small = 1;
- pr_info("ktext: using caching neighborhood %s with small pages\n",
- buf);
+ pr_info("ktext: using caching neighborhood %*pbl with small pages\n",
+ cpumask_pr_args(&ktext_mask));
} else {
- pr_info("ktext: caching on cpu %s with one huge page\n",
- buf);
+ pr_info("ktext: caching on cpu %*pbl with one huge page\n",
+ cpumask_pr_args(&ktext_mask));
}
}
@@ -492,11 +490,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
struct cpumask bad;
cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
- if (!cpumask_empty(&bad)) {
- char buf[NR_CPUS * 5];
- cpulist_scnprintf(buf, sizeof(buf), &bad);
- pr_info("ktext: not using unavailable cpus %s\n", buf);
- }
+ if (!cpumask_empty(&bad))
+ pr_info("ktext: not using unavailable cpus %*pbl\n",
+ cpumask_pr_args(&bad));
if (cpumask_empty(&ktext_mask)) {
pr_warn("ktext: no valid cpus; caching on %d\n",
smp_processor_id());
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index f534b73e753e..cfbe59752469 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -23,7 +23,7 @@
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define PTRS_PER_PGD 1024
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
@@ -41,13 +41,4 @@ static inline void pgd_mkuptodate(pgd_t pgd) { }
#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
-/*
- * Bits 0 through 4 are taken
- */
-#define PTE_FILE_MAX_BITS 27
-
-#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
-
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
-
#endif
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index 0032f9212e74..2b4274e7c095 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -41,7 +41,7 @@
#endif
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
@@ -112,25 +112,5 @@ static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
}
-/*
- * Bits 0 through 3 are taken in the low part of the pte,
- * put the 32 bits of offset into the high part.
- */
-#define PTE_FILE_MAX_BITS 32
-
-#ifdef CONFIG_64BIT
-
-#define pte_to_pgoff(p) ((p).pte >> 32)
-
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
-
-#else
-
-#define pte_to_pgoff(pte) ((pte).pte_high)
-
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
-
-#endif
-
#endif
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index bf974f712af7..2324b624f195 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -18,7 +18,6 @@
#define _PAGE_ACCESSED 0x080
#define _PAGE_DIRTY 0x100
/* If _PAGE_PRESENT is clear, we use these: */
-#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
pte_present gives true */
@@ -151,14 +150,6 @@ static inline int pte_write(pte_t pte)
!(pte_get_bits(pte, _PAGE_PROTNONE)));
}
-/*
- * The following only works if pte_present() is not true.
- */
-static inline int pte_file(pte_t pte)
-{
- return pte_get_bits(pte, _PAGE_FILE);
-}
-
static inline int pte_dirty(pte_t pte)
{
return pte_get_bits(pte, _PAGE_DIRTY);
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 1c5b2a83046a..e04114c4fcd9 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -22,7 +22,6 @@ struct thread_info {
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user
0-0xFFFFFFFF for kernel */
- struct restart_block restart_block;
struct thread_info *real_thread; /* Points to non-IRQ stack */
};
@@ -34,9 +33,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
.real_thread = NULL, \
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 5678c3571e7c..209617302df8 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -80,6 +80,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ goto out;
} else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES;
goto out;
diff --git a/arch/unicore32/include/asm/pgtable-hwdef.h b/arch/unicore32/include/asm/pgtable-hwdef.h
index 7314e859cca0..e37fa471c2be 100644
--- a/arch/unicore32/include/asm/pgtable-hwdef.h
+++ b/arch/unicore32/include/asm/pgtable-hwdef.h
@@ -44,7 +44,6 @@
#define PTE_TYPE_INVALID (3 << 0)
#define PTE_PRESENT (1 << 2)
-#define PTE_FILE (1 << 3) /* only when !PRESENT */
#define PTE_YOUNG (1 << 3)
#define PTE_DIRTY (1 << 4)
#define PTE_CACHEABLE (1 << 5)
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index ed6f7d000fba..818d0f5598e3 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -283,20 +283,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define MAX_SWAPFILES_CHECK() \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
-/*
- * Encode and decode a file entry. File entries are stored in the Linux
- * page tables as follows:
- *
- * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <----------------------- offset ----------------------> 1 0 0 0
- */
-#define pte_file(pte) (pte_val(pte) & PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 4)
-#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
-
-#define PTE_FILE_MAX_BITS 28
-
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index af36d8eabdf1..63e2839dfeb8 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -79,7 +79,6 @@ struct thread_info {
#ifdef CONFIG_UNICORE_FPU_F64
struct fp_state fpstate __attribute__((aligned(8)));
#endif
- struct restart_block restart_block;
};
#define INIT_THREAD_INFO(tsk) \
@@ -89,9 +88,6 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
index dc41f6dfedb6..e191b3448bd3 100644
--- a/arch/unicore32/kernel/module.c
+++ b/arch/unicore32/kernel/module.c
@@ -25,7 +25,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c
index 7c8fb7018dc6..d329f85766cc 100644
--- a/arch/unicore32/kernel/signal.c
+++ b/arch/unicore32/kernel/signal.c
@@ -105,7 +105,7 @@ asmlinkage int __sys_rt_sigreturn(struct pt_regs *regs)
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
diff --git a/arch/unicore32/mm/pgd.c b/arch/unicore32/mm/pgd.c
index 08b8d4295e70..2ade20d8eab3 100644
--- a/arch/unicore32/mm/pgd.c
+++ b/arch/unicore32/mm/pgd.c
@@ -69,6 +69,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
no_pte:
pmd_free(mm, new_pmd);
+ mm_dec_nr_pmds(mm);
no_pmd:
free_pages((unsigned long)new_pgd, 0);
no_pgd:
@@ -96,7 +97,9 @@ void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
+ atomic_long_dec(&mm->nr_ptes);
pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
free:
free_pages((unsigned long) pgd, 0);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba397bde7948..eb1cf898ed3c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -17,6 +17,7 @@ config X86_64
depends on 64BIT
select X86_DEV_DMA_OPS
select ARCH_USE_CMPXCHG_LOCKREF
+ select HAVE_LIVEPATCH
### Arch settings
config X86
@@ -84,6 +85,7 @@ config X86
select HAVE_CMPXCHG_LOCAL
select HAVE_CMPXCHG_DOUBLE
select HAVE_ARCH_KMEMCHECK
+ select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
select HAVE_USER_RETURN_NOTIFIER
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select HAVE_ARCH_JUMP_LABEL
@@ -138,6 +140,7 @@ config X86
select HAVE_ACPI_APEI_NMI if ACPI
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select X86_FEATURE_NAMES if PROC_FS
+ select SRCU
config INSTRUCTION_DECODER
def_bool y
@@ -496,6 +499,17 @@ config X86_INTEL_LPSS
things like clock tree (common clock framework) and pincontrol
which are needed by the LPSS peripheral drivers.
+config X86_AMD_PLATFORM_DEVICE
+ bool "AMD ACPI2Platform devices support"
+ depends on ACPI
+ select COMMON_CLK
+ select PINCTRL
+ ---help---
+ Select to interpret AMD specific ACPI device to platform device
+ such as I2C, UART, GPIO found on AMD Carrizo and later chipsets.
+ I2C and UART depend on COMMON_CLK to set clock. GPIO driver is
+ implemented under PINCTRL subsystem.
+
config IOSF_MBI
tristate "Intel SoC IOSF Sideband support for SoC platforms"
depends on PCI
@@ -855,9 +869,13 @@ config SCHED_MC
source "kernel/Kconfig.preempt"
+config UP_LATE_INIT
+ def_bool y
+ depends on !SMP && X86_LOCAL_APIC
+
config X86_UP_APIC
bool "Local APIC support on uniprocessors"
- depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
+ depends on X86_32 && !SMP && !X86_32_NON_STANDARD
---help---
A local APIC (Advanced Programmable Interrupt Controller) is an
integrated interrupt controller in the CPU. If you have a single-CPU
@@ -868,6 +886,10 @@ config X86_UP_APIC
performance counters), and the NMI watchdog which detects hard
lockups.
+config X86_UP_APIC_MSI
+ def_bool y
+ select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
+
config X86_UP_IOAPIC
bool "IO-APIC support on uniprocessors"
depends on X86_UP_APIC
@@ -2008,6 +2030,8 @@ config CMDLINE_OVERRIDE
This is used to work around broken boot loaders. This should
be set to 'N' under normal conditions.
+source "kernel/livepatch/Kconfig"
+
endmenu
config ARCH_ENABLE_MEMORY_HOTPLUG
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 920e6160c535..5ba2d9ce82dc 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -148,6 +148,7 @@ cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTI
# does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
+asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 3db07f30636f..57bbf2fb21f6 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -14,6 +14,8 @@
# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
# The number is the same as you would ordinarily press at bootup.
+KASAN_SANITIZE := n
+
SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
targets := vmlinux.bin setup.bin setup.elf bzImage
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index d999398928bc..843feb3eb20b 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -16,6 +16,8 @@
# (see scripts/Makefile.lib size_append)
# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
+KASAN_SANITIZE := n
+
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
@@ -90,7 +92,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_LZ4) := lz4
RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
- perl $(srctree)/arch/x86/tools/calc_run_size.pl)
+ $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
quiet_cmd_mkpiggy = MKPIGGY $@
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 92b9a5f2aed6..ef17683484e9 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -13,8 +13,7 @@
#include <asm/setup.h>
#include <asm/desc.h>
-#undef memcpy /* Use memcpy from misc.c */
-
+#include "../string.h"
#include "eboot.h"
static efi_system_table_t *sys_table;
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index dcc1c536cc21..a950864a64da 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -373,6 +373,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
unsigned long output_len,
unsigned long run_size)
{
+ unsigned char *output_orig = output;
+
real_mode = rmode;
sanitize_boot_params(real_mode);
@@ -421,7 +423,12 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
debug_putstr("\nDecompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
parse_elf(output);
- handle_relocations(output, output_len);
+ /*
+ * 32-bit always performs relocations. 64-bit relocations are only
+ * needed if kASLR has chosen a different load address.
+ */
+ if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
+ handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n");
return output;
}
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 24e3e569a13c..04477d68403f 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -7,6 +7,7 @@
* we just keep it from happening
*/
#undef CONFIG_PARAVIRT
+#undef CONFIG_KASAN
#ifdef CONFIG_X86_32
#define _ASM_X86_DESC_H 1
#endif
diff --git a/arch/x86/boot/ctype.h b/arch/x86/boot/ctype.h
index 25e13403193c..020f137df7a2 100644
--- a/arch/x86/boot/ctype.h
+++ b/arch/x86/boot/ctype.h
@@ -1,6 +1,5 @@
-#ifndef BOOT_ISDIGIT_H
-
-#define BOOT_ISDIGIT_H
+#ifndef BOOT_CTYPE_H
+#define BOOT_CTYPE_H
static inline int isdigit(int ch)
{
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c
index 5df2869c874b..45a07684bbab 100644
--- a/arch/x86/boot/early_serial_console.c
+++ b/arch/x86/boot/early_serial_console.c
@@ -2,8 +2,6 @@
#define DEFAULT_SERIAL_PORT 0x3f8 /* ttyS0 */
-#define XMTRDY 0x20
-
#define DLAB 0x80
#define TXR 0 /* Transmit register (WRITE) */
@@ -74,8 +72,8 @@ static void parse_earlyprintk(void)
static const int bases[] = { 0x3f8, 0x2f8 };
int idx = 0;
- if (!strncmp(arg + pos, "ttyS", 4))
- pos += 4;
+ /* += strlen("ttyS"); */
+ pos += 4;
if (arg[pos++] == '1')
idx = 1;
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 477e9d75149b..6bd2c6c95373 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,12 +32,23 @@
#include <linux/linkage.h>
#include <asm/inst.h>
+/*
+ * The following macros are used to move an (un)aligned 16 byte value to/from
+ * an XMM register. This can done for either FP or integer values, for FP use
+ * movaps (move aligned packed single) or integer use movdqa (move double quad
+ * aligned). It doesn't make a performance difference which instruction is used
+ * since Nehalem (original Core i7) was released. However, the movaps is a byte
+ * shorter, so that is the one we'll use for now. (same for unaligned).
+ */
+#define MOVADQ movaps
+#define MOVUDQ movups
+
#ifdef __x86_64__
+
.data
.align 16
.Lgf128mul_x_ble_mask:
.octa 0x00000000000000010000000000000087
-
POLY: .octa 0xC2000000000000000000000000000001
TWOONE: .octa 0x00000001000000000000000000000001
@@ -89,6 +100,7 @@ enc: .octa 0x2
#define arg8 STACK_OFFSET+16(%r14)
#define arg9 STACK_OFFSET+24(%r14)
#define arg10 STACK_OFFSET+32(%r14)
+#define keysize 2*15*16(%arg1)
#endif
@@ -213,10 +225,12 @@ enc: .octa 0x2
.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
mov arg7, %r10 # %r10 = AAD
mov arg8, %r12 # %r12 = aadLen
mov %r12, %r11
pxor %xmm\i, %xmm\i
+
_get_AAD_loop\num_initial_blocks\operation:
movd (%r10), \TMP1
pslldq $12, \TMP1
@@ -225,16 +239,18 @@ _get_AAD_loop\num_initial_blocks\operation:
add $4, %r10
sub $4, %r12
jne _get_AAD_loop\num_initial_blocks\operation
+
cmp $16, %r11
je _get_AAD_loop2_done\num_initial_blocks\operation
+
mov $16, %r12
_get_AAD_loop2\num_initial_blocks\operation:
psrldq $4, %xmm\i
sub $4, %r12
cmp %r11, %r12
jne _get_AAD_loop2\num_initial_blocks\operation
+
_get_AAD_loop2_done\num_initial_blocks\operation:
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
xor %r11, %r11 # initialise the data pointer offset as zero
@@ -243,59 +259,34 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
mov %arg5, %rax # %rax = *Y0
movdqu (%rax), \XMM0 # XMM0 = Y0
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM0
.if (\i == 5) || (\i == 6) || (\i == 7)
+ MOVADQ ONE(%RIP),\TMP1
+ MOVADQ (%arg1),\TMP2
.irpc index, \i_seq
- paddd ONE(%rip), \XMM0 # INCR Y0
+ paddd \TMP1, \XMM0 # INCR Y0
movdqa \XMM0, %xmm\index
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
-
-.endr
-.irpc index, \i_seq
- pxor 16*0(%arg1), %xmm\index
-.endr
-.irpc index, \i_seq
- movaps 0x10(%rdi), \TMP1
- AESENC \TMP1, %xmm\index # Round 1
-.endr
-.irpc index, \i_seq
- movaps 0x20(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x30(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x40(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x50(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x60(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
+ pxor \TMP2, %xmm\index
.endr
-.irpc index, \i_seq
- movaps 0x70(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x80(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x90(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
+ lea 0x10(%arg1),%r10
+ mov keysize,%eax
+ shr $2,%eax # 128->4, 192->6, 256->8
+ add $5,%eax # 128->9, 192->11, 256->13
+
+aes_loop_initial_dec\num_initial_blocks:
+ MOVADQ (%r10),\TMP1
+.irpc index, \i_seq
+ AESENC \TMP1, %xmm\index
.endr
+ add $16,%r10
+ sub $1,%eax
+ jnz aes_loop_initial_dec\num_initial_blocks
+
+ MOVADQ (%r10), \TMP1
.irpc index, \i_seq
- movaps 0xa0(%arg1), \TMP1
- AESENCLAST \TMP1, %xmm\index # Round 10
+ AESENCLAST \TMP1, %xmm\index # Last Round
.endr
.irpc index, \i_seq
movdqu (%arg3 , %r11, 1), \TMP1
@@ -305,10 +296,8 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
add $16, %r11
movdqa \TMP1, %xmm\index
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, %xmm\index
-
- # prepare plaintext/ciphertext for GHASH computation
+ # prepare plaintext/ciphertext for GHASH computation
.endr
.endif
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
@@ -338,30 +327,28 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
* Precomputations for HashKey parallel with encryption of first 4 blocks.
* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
*/
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, \XMM1
- movdqa SHUF_MASK(%rip), %xmm14
+ MOVADQ ONE(%rip), \TMP1
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, \XMM1
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, \XMM2
- movdqa SHUF_MASK(%rip), %xmm14
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, \XMM2
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, \XMM3
- movdqa SHUF_MASK(%rip), %xmm14
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, \XMM3
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, \XMM4
- movdqa SHUF_MASK(%rip), %xmm14
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, \XMM4
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
- pxor 16*0(%arg1), \XMM1
- pxor 16*0(%arg1), \XMM2
- pxor 16*0(%arg1), \XMM3
- pxor 16*0(%arg1), \XMM4
+ MOVADQ 0(%arg1),\TMP1
+ pxor \TMP1, \XMM1
+ pxor \TMP1, \XMM2
+ pxor \TMP1, \XMM3
+ pxor \TMP1, \XMM4
movdqa \TMP3, \TMP5
pshufd $78, \TMP3, \TMP1
pxor \TMP3, \TMP1
@@ -399,7 +386,23 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1
movdqa \TMP1, HashKey_4_k(%rsp)
- movaps 0xa0(%arg1), \TMP2
+ lea 0xa0(%arg1),%r10
+ mov keysize,%eax
+ shr $2,%eax # 128->4, 192->6, 256->8
+ sub $4,%eax # 128->0, 192->2, 256->4
+ jz aes_loop_pre_dec_done\num_initial_blocks
+
+aes_loop_pre_dec\num_initial_blocks:
+ MOVADQ (%r10),\TMP2
+.irpc index, 1234
+ AESENC \TMP2, %xmm\index
+.endr
+ add $16,%r10
+ sub $1,%eax
+ jnz aes_loop_pre_dec\num_initial_blocks
+
+aes_loop_pre_dec_done\num_initial_blocks:
+ MOVADQ (%r10), \TMP2
AESENCLAST \TMP2, \XMM1
AESENCLAST \TMP2, \XMM2
AESENCLAST \TMP2, \XMM3
@@ -421,15 +424,11 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
movdqa \TMP1, \XMM4
add $64, %r11
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
pxor \XMMDst, \XMM1
# combine GHASHed value with the corresponding ciphertext
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
_initial_blocks_done\num_initial_blocks\operation:
@@ -451,6 +450,7 @@ _initial_blocks_done\num_initial_blocks\operation:
.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
mov arg7, %r10 # %r10 = AAD
mov arg8, %r12 # %r12 = aadLen
mov %r12, %r11
@@ -472,7 +472,6 @@ _get_AAD_loop2\num_initial_blocks\operation:
cmp %r11, %r12
jne _get_AAD_loop2\num_initial_blocks\operation
_get_AAD_loop2_done\num_initial_blocks\operation:
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
xor %r11, %r11 # initialise the data pointer offset as zero
@@ -481,59 +480,35 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
mov %arg5, %rax # %rax = *Y0
movdqu (%rax), \XMM0 # XMM0 = Y0
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM0
.if (\i == 5) || (\i == 6) || (\i == 7)
-.irpc index, \i_seq
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, %xmm\index
- movdqa SHUF_MASK(%rip), %xmm14
- PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
-.endr
-.irpc index, \i_seq
- pxor 16*0(%arg1), %xmm\index
-.endr
-.irpc index, \i_seq
- movaps 0x10(%rdi), \TMP1
- AESENC \TMP1, %xmm\index # Round 1
-.endr
-.irpc index, \i_seq
- movaps 0x20(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
+ MOVADQ ONE(%RIP),\TMP1
+ MOVADQ 0(%arg1),\TMP2
.irpc index, \i_seq
- movaps 0x30(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, %xmm\index
+ PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
+ pxor \TMP2, %xmm\index
.endr
-.irpc index, \i_seq
- movaps 0x40(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x50(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x60(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x70(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x80(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
-.endr
-.irpc index, \i_seq
- movaps 0x90(%arg1), \TMP1
- AESENC \TMP1, %xmm\index # Round 2
+ lea 0x10(%arg1),%r10
+ mov keysize,%eax
+ shr $2,%eax # 128->4, 192->6, 256->8
+ add $5,%eax # 128->9, 192->11, 256->13
+
+aes_loop_initial_enc\num_initial_blocks:
+ MOVADQ (%r10),\TMP1
+.irpc index, \i_seq
+ AESENC \TMP1, %xmm\index
.endr
+ add $16,%r10
+ sub $1,%eax
+ jnz aes_loop_initial_enc\num_initial_blocks
+
+ MOVADQ (%r10), \TMP1
.irpc index, \i_seq
- movaps 0xa0(%arg1), \TMP1
- AESENCLAST \TMP1, %xmm\index # Round 10
+ AESENCLAST \TMP1, %xmm\index # Last Round
.endr
.irpc index, \i_seq
movdqu (%arg3 , %r11, 1), \TMP1
@@ -541,8 +516,6 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
movdqu %xmm\index, (%arg2 , %r11, 1)
# write back plaintext/ciphertext for num_initial_blocks
add $16, %r11
-
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, %xmm\index
# prepare plaintext/ciphertext for GHASH computation
@@ -575,30 +548,28 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
* Precomputations for HashKey parallel with encryption of first 4 blocks.
* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
*/
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, \XMM1
- movdqa SHUF_MASK(%rip), %xmm14
+ MOVADQ ONE(%RIP),\TMP1
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, \XMM1
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, \XMM2
- movdqa SHUF_MASK(%rip), %xmm14
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, \XMM2
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, \XMM3
- movdqa SHUF_MASK(%rip), %xmm14
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, \XMM3
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
- paddd ONE(%rip), \XMM0 # INCR Y0
- movdqa \XMM0, \XMM4
- movdqa SHUF_MASK(%rip), %xmm14
+ paddd \TMP1, \XMM0 # INCR Y0
+ MOVADQ \XMM0, \XMM4
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
- pxor 16*0(%arg1), \XMM1
- pxor 16*0(%arg1), \XMM2
- pxor 16*0(%arg1), \XMM3
- pxor 16*0(%arg1), \XMM4
+ MOVADQ 0(%arg1),\TMP1
+ pxor \TMP1, \XMM1
+ pxor \TMP1, \XMM2
+ pxor \TMP1, \XMM3
+ pxor \TMP1, \XMM4
movdqa \TMP3, \TMP5
pshufd $78, \TMP3, \TMP1
pxor \TMP3, \TMP1
@@ -636,7 +607,23 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1
movdqa \TMP1, HashKey_4_k(%rsp)
- movaps 0xa0(%arg1), \TMP2
+ lea 0xa0(%arg1),%r10
+ mov keysize,%eax
+ shr $2,%eax # 128->4, 192->6, 256->8
+ sub $4,%eax # 128->0, 192->2, 256->4
+ jz aes_loop_pre_enc_done\num_initial_blocks
+
+aes_loop_pre_enc\num_initial_blocks:
+ MOVADQ (%r10),\TMP2
+.irpc index, 1234
+ AESENC \TMP2, %xmm\index
+.endr
+ add $16,%r10
+ sub $1,%eax
+ jnz aes_loop_pre_enc\num_initial_blocks
+
+aes_loop_pre_enc_done\num_initial_blocks:
+ MOVADQ (%r10), \TMP2
AESENCLAST \TMP2, \XMM1
AESENCLAST \TMP2, \XMM2
AESENCLAST \TMP2, \XMM3
@@ -655,15 +642,11 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
add $64, %r11
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
pxor \XMMDst, \XMM1
# combine GHASHed value with the corresponding ciphertext
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
- movdqa SHUF_MASK(%rip), %xmm14
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
_initial_blocks_done\num_initial_blocks\operation:
@@ -794,7 +777,23 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
- movaps 0xa0(%arg1), \TMP3
+ lea 0xa0(%arg1),%r10
+ mov keysize,%eax
+ shr $2,%eax # 128->4, 192->6, 256->8
+ sub $4,%eax # 128->0, 192->2, 256->4
+ jz aes_loop_par_enc_done
+
+aes_loop_par_enc:
+ MOVADQ (%r10),\TMP3
+.irpc index, 1234
+ AESENC \TMP3, %xmm\index
+.endr
+ add $16,%r10
+ sub $1,%eax
+ jnz aes_loop_par_enc
+
+aes_loop_par_enc_done:
+ MOVADQ (%r10), \TMP3
AESENCLAST \TMP3, \XMM1 # Round 10
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
@@ -986,8 +985,24 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
- movaps 0xa0(%arg1), \TMP3
- AESENCLAST \TMP3, \XMM1 # Round 10
+ lea 0xa0(%arg1),%r10
+ mov keysize,%eax
+ shr $2,%eax # 128->4, 192->6, 256->8
+ sub $4,%eax # 128->0, 192->2, 256->4
+ jz aes_loop_par_dec_done
+
+aes_loop_par_dec:
+ MOVADQ (%r10),\TMP3
+.irpc index, 1234
+ AESENC \TMP3, %xmm\index
+.endr
+ add $16,%r10
+ sub $1,%eax
+ jnz aes_loop_par_dec
+
+aes_loop_par_dec_done:
+ MOVADQ (%r10), \TMP3
+ AESENCLAST \TMP3, \XMM1 # last round
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
@@ -1155,33 +1170,29 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pxor \TMP6, \XMMDst # reduced result is in XMMDst
.endm
-/* Encryption of a single block done*/
-.macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1
- pxor (%arg1), \XMM0
- movaps 16(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 32(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 48(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 64(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 80(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 96(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 112(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 128(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 144(%arg1), \TMP1
- AESENC \TMP1, \XMM0
- movaps 160(%arg1), \TMP1
- AESENCLAST \TMP1, \XMM0
-.endm
+/* Encryption of a single block
+* uses eax & r10
+*/
+.macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1
+ pxor (%arg1), \XMM0
+ mov keysize,%eax
+ shr $2,%eax # 128->4, 192->6, 256->8
+ add $5,%eax # 128->9, 192->11, 256->13
+ lea 16(%arg1), %r10 # get first expanded key address
+
+_esb_loop_\@:
+ MOVADQ (%r10),\TMP1
+ AESENC \TMP1,\XMM0
+ add $16,%r10
+ sub $1,%eax
+ jnz _esb_loop_\@
+
+ MOVADQ (%r10),\TMP1
+ AESENCLAST \TMP1,\XMM0
+.endm
/*****************************************************************************
* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
* u8 *out, // Plaintext output. Encrypt in-place is allowed.
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index ae855f4f64b7..947c6bf52c33 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -43,6 +43,7 @@
#include <asm/crypto/glue_helper.h>
#endif
+
/* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location.
* This needs to be 16 byte aligned.
@@ -182,7 +183,8 @@ static void aesni_gcm_enc_avx(void *ctx, u8 *out,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
- if (plaintext_len < AVX_GEN2_OPTSIZE) {
+ struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
+ if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
aad_len, auth_tag, auth_tag_len);
} else {
@@ -197,7 +199,8 @@ static void aesni_gcm_dec_avx(void *ctx, u8 *out,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
- if (ciphertext_len < AVX_GEN2_OPTSIZE) {
+ struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
+ if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
aad_len, auth_tag, auth_tag_len);
} else {
@@ -231,7 +234,8 @@ static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
- if (plaintext_len < AVX_GEN2_OPTSIZE) {
+ struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
+ if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
aad_len, auth_tag, auth_tag_len);
} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
@@ -250,7 +254,8 @@ static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
- if (ciphertext_len < AVX_GEN2_OPTSIZE) {
+ struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
+ if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
aad, aad_len, auth_tag, auth_tag_len);
} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
@@ -511,7 +516,7 @@ static int ctr_crypt(struct blkcipher_desc *desc,
kernel_fpu_begin();
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
+ nbytes & AES_BLOCK_MASK, walk.iv);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -902,7 +907,8 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
}
/*Account for 4 byte nonce at the end.*/
key_len -= 4;
- if (key_len != AES_KEYSIZE_128) {
+ if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
+ key_len != AES_KEYSIZE_256) {
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1013,6 +1019,7 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ u32 key_len = ctx->aes_key_expanded.key_length;
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv_tab[16+AESNI_ALIGN];
@@ -1027,6 +1034,13 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
/* to 8 or 12 bytes */
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
return -EINVAL;
+ if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
+ return -EINVAL;
+ if (unlikely(key_len != AES_KEYSIZE_128 &&
+ key_len != AES_KEYSIZE_192 &&
+ key_len != AES_KEYSIZE_256))
+ return -EINVAL;
+
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
@@ -1091,6 +1105,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
int retval = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ u32 key_len = ctx->aes_key_expanded.key_length;
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv_and_authTag[32+AESNI_ALIGN];
@@ -1104,6 +1119,13 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
if (unlikely((req->cryptlen < auth_tag_len) ||
(req->assoclen != 8 && req->assoclen != 12)))
return -EINVAL;
+ if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
+ return -EINVAL;
+ if (unlikely(key_len != AES_KEYSIZE_128 &&
+ key_len != AES_KEYSIZE_192 &&
+ key_len != AES_KEYSIZE_256))
+ return -EINVAL;
+
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length */
/* equal to 8 or 12 bytes */
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 38a14f818ef1..d6fc59aaaadf 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -504,6 +504,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
MODULE_ALIAS_CRYPTO("des3_ede");
MODULE_ALIAS_CRYPTO("des3_ede-asm");
-MODULE_ALIAS_CRYPTO("des");
-MODULE_ALIAS_CRYPTO("des-asm");
MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a225a5ca1037..fd9f6b035b16 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -931,4 +931,4 @@ module_exit(sha1_mb_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index f9e181aaba97..d0165c9a2932 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -169,7 +169,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
u32 tmp;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
get_user_try {
/*
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 82e8a1d44658..156ebcab4ada 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -179,8 +179,8 @@ sysenter_dispatch:
sysexit_from_sys_call:
andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
/* clear IF, that popfq doesn't enable interrupts early */
- andl $~0x200,EFLAGS-R11(%rsp)
- movl RIP-R11(%rsp),%edx /* User %eip */
+ andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
+ movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
CFI_REGISTER rip,rdx
RESTORE_ARGS 0,24,0,0,0,0
xorq %r8,%r8
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0ab4f9fd2687..3a45668f6dc3 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -50,6 +50,7 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity);
+extern void (*__acpi_unregister_gsi)(u32 gsi);
static inline void disable_acpi(void)
{
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 465b309af254..92003f3c8a42 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -106,7 +106,14 @@ extern u32 native_safe_apic_wait_icr_idle(void);
extern void native_apic_icr_write(u32 low, u32 id);
extern u64 native_apic_icr_read(void);
-extern int x2apic_mode;
+static inline bool apic_is_x2apic_enabled(void)
+{
+ u64 msr;
+
+ if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
+ return false;
+ return msr & X2APIC_ENABLE;
+}
#ifdef CONFIG_X86_X2APIC
/*
@@ -169,48 +176,23 @@ static inline u64 native_x2apic_icr_read(void)
return val;
}
+extern int x2apic_mode;
extern int x2apic_phys;
-extern int x2apic_preenabled;
-extern void check_x2apic(void);
-extern void enable_x2apic(void);
+extern void __init check_x2apic(void);
+extern void x2apic_setup(void);
static inline int x2apic_enabled(void)
{
- u64 msr;
-
- if (!cpu_has_x2apic)
- return 0;
-
- rdmsrl(MSR_IA32_APICBASE, msr);
- if (msr & X2APIC_ENABLE)
- return 1;
- return 0;
+ return cpu_has_x2apic && apic_is_x2apic_enabled();
}
#define x2apic_supported() (cpu_has_x2apic)
-static inline void x2apic_force_phys(void)
-{
- x2apic_phys = 1;
-}
#else
-static inline void disable_x2apic(void)
-{
-}
-static inline void check_x2apic(void)
-{
-}
-static inline void enable_x2apic(void)
-{
-}
-static inline int x2apic_enabled(void)
-{
- return 0;
-}
-static inline void x2apic_force_phys(void)
-{
-}
+static inline void check_x2apic(void) { }
+static inline void x2apic_setup(void) { }
+static inline int x2apic_enabled(void) { return 0; }
-#define x2apic_preenabled 0
-#define x2apic_supported() 0
+#define x2apic_mode (0)
+#define x2apic_supported() (0)
#endif
extern void enable_IR_x2apic(void);
@@ -219,7 +201,6 @@ extern int get_physical_broadcast(void);
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
-extern void connect_bsp_APIC(void);
extern void disconnect_bsp_APIC(int virt_wire_setup);
extern void disable_local_APIC(void);
extern void lapic_shutdown(void);
@@ -227,8 +208,6 @@ extern int verify_local_APIC(void);
extern void sync_Arb_IDs(void);
extern void init_bsp_APIC(void);
extern void setup_local_APIC(void);
-extern void end_local_APIC_setup(void);
-extern void bsp_end_local_APIC_setup(void);
extern void init_apic_mappings(void);
void register_lapic_address(unsigned long address);
extern void setup_boot_APIC_clock(void);
@@ -236,6 +215,9 @@ extern void setup_secondary_APIC_clock(void);
extern int APIC_init_uniprocessor(void);
extern int apic_force_enable(unsigned long addr);
+extern int apic_bsp_setup(bool upmode);
+extern void apic_ap_setup(void);
+
/*
* On 32bit this is mach-xxx local
*/
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 76659b67fd11..1f1297b46f83 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -83,7 +83,6 @@ For 32-bit we have the following conventions - kernel is built with
#define SS 160
#define ARGOFFSET R11
-#define SWFRAME ORIG_RAX
.macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
subq $9*8+\addskip, %rsp
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index aede2c347bde..90a54851aedc 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -174,6 +174,7 @@
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
/*
@@ -388,6 +389,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
+#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 61fd18b83b6c..12cb66f6d3a5 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -114,5 +114,10 @@ static inline void debug_stack_usage_inc(void) { }
static inline void debug_stack_usage_dec(void) { }
#endif /* X86_64 */
+#ifdef CONFIG_CPU_SUP_AMD
+extern void set_dr_addr_mask(unsigned long mask, int dr);
+#else
+static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
+#endif
#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 50d033a8947d..a94b82e8f156 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}
-#define _LDT_empty(info) \
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
+#define LDT_empty(info) \
((info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
(info)->seg_not_present == 1 && \
(info)->useable == 0)
-#ifdef CONFIG_X86_64
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
-#else
-#define LDT_empty(info) (_LDT_empty(info))
-#endif
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
+static inline bool LDT_zero(const struct user_desc *info)
+{
+ return (info->base_addr == 0 &&
+ info->limit == 0 &&
+ info->contents == 0 &&
+ info->read_exec_only == 0 &&
+ info->seg_32bit == 0 &&
+ info->limit_in_pages == 0 &&
+ info->seg_not_present == 0 &&
+ info->useable == 0);
+}
static inline void clear_LDT(void)
{
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index e97622f57722..0dbc08282291 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
if (config_enabled(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
- asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
else {
/* Using "rex64; fxsave %0" is broken because, if the memory
* operand uses any extended registers for addressing, a second
@@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu)
static inline int restore_fpu_checking(struct task_struct *tsk)
{
- /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
- is pending. Clear the x87 state here by setting it to fixed
- values. "m" is a random variable that should be in L1 */
+ /*
+ * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
+ * pending. Clear the x87 state here by setting it to fixed values.
+ * "m" is a random variable that should be in L1.
+ */
if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
asm volatile(
"fnclex\n\t"
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index ef1c4d2d41ec..6c98be864a75 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -12,6 +12,7 @@
*/
struct arch_hw_breakpoint {
unsigned long address;
+ unsigned long mask;
u8 len;
u8 type;
};
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index ed8089d69094..6eb6fcb83f63 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void);
static inline void kernel_fpu_begin(void)
{
- WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
+ WARN_ON_ONCE(!irq_fpu_usable());
__kernel_fpu_begin();
}
@@ -51,6 +51,10 @@ static inline void kernel_fpu_end(void)
preempt_enable();
}
+/* Must be called with preempt disabled */
+extern void kernel_fpu_disable(void);
+extern void kernel_fpu_enable(void);
+
/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index e34e097b6f9d..705d35708a50 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -136,9 +136,6 @@ extern enum intel_mid_timer_options intel_mid_timer_options;
#define SFI_MTMR_MAX_NUM 8
#define SFI_MRTC_MAX 8
-extern struct console early_mrst_console;
-extern void mrst_early_console_init(void);
-
extern struct console early_hsu_console;
extern void hsu_early_console_init(const char *);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index bf006cce9418..2f91685fe1cd 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -279,6 +279,11 @@ static inline void disable_ioapic_support(void) { }
#define native_ioapic_set_affinity NULL
#define native_setup_ioapic_entry NULL
#define native_eoi_ioapic_pin NULL
+
+static inline void setup_IO_APIC(void) { }
+static inline void enable_IO_APIC(void) { }
+static inline void setup_ioapic_dest(void) { }
+
#endif
#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index b7747c4c2cf2..6224d316c405 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -33,8 +33,6 @@ struct irq_cfg;
#ifdef CONFIG_IRQ_REMAP
-extern void setup_irq_remapping_ops(void);
-extern int irq_remapping_supported(void);
extern void set_irq_remapping_broken(void);
extern int irq_remapping_prepare(void);
extern int irq_remapping_enable(void);
@@ -60,8 +58,6 @@ void irq_remap_modify_chip_defaults(struct irq_chip *chip);
#else /* CONFIG_IRQ_REMAP */
-static inline void setup_irq_remapping_ops(void) { }
-static inline int irq_remapping_supported(void) { return 0; }
static inline void set_irq_remapping_broken(void) { }
static inline int irq_remapping_prepare(void) { return -ENODEV; }
static inline int irq_remapping_enable(void) { return -ENODEV; }
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
new file mode 100644
index 000000000000..8b22422fbad8
--- /dev/null
+++ b/arch/x86/include/asm/kasan.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_X86_KASAN_H
+#define _ASM_X86_KASAN_H
+
+/*
+ * Compiler uses shadow offset assuming that addresses start
+ * from 0. Kernel addresses don't start from 0, so shadow
+ * for kernel really starts from compiler's shadow offset +
+ * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT
+ */
+#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
+ (0xffff800000000000ULL >> 3))
+/* 47 bits for kernel address -> (47 - 3) bits for shadow */
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (47 - 3)))
+
+#ifndef __ASSEMBLY__
+
+extern pte_t kasan_zero_pte[];
+extern pte_t kasan_zero_pmd[];
+extern pte_t kasan_zero_pud[];
+
+#ifdef CONFIG_KASAN
+void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_init(void);
+#else
+static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_init(void) { }
+#endif
+
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index eb181178fe0b..57a9d94fe160 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -208,6 +208,7 @@ struct x86_emulate_ops {
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+ void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d89c6b828c96..a236e39cc385 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -38,8 +38,6 @@
#define KVM_PRIVATE_MEM_SLOTS 3
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
-#define KVM_MMIO_SIZE 16
-
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
@@ -51,7 +49,7 @@
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
-#define CR3_PCID_INVD (1UL << 63)
+#define CR3_PCID_INVD BIT_64(63)
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
@@ -160,6 +158,18 @@ enum {
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff2bff
+#define PFERR_PRESENT_BIT 0
+#define PFERR_WRITE_BIT 1
+#define PFERR_USER_BIT 2
+#define PFERR_RSVD_BIT 3
+#define PFERR_FETCH_BIT 4
+
+#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
+#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
+#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
+#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
+#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
+
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
/*
@@ -615,6 +625,8 @@ struct kvm_arch {
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
#endif
+
+ bool boot_vcpu_runs_old_kvmclock;
};
struct kvm_vm_stat {
@@ -643,6 +655,7 @@ struct kvm_vcpu_stat {
u32 irq_window_exits;
u32 nmi_window_exits;
u32 halt_exits;
+ u32 halt_successful_poll;
u32 halt_wakeup;
u32 request_irq_exits;
u32 irq_exits;
@@ -787,6 +800,31 @@ struct kvm_x86_ops {
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
+
+ /*
+ * Arch-specific dirty logging hooks. These hooks are only supposed to
+ * be valid if the specific arch has hardware-accelerated dirty logging
+ * mechanism. Currently only for PML on VMX.
+ *
+ * - slot_enable_log_dirty:
+ * called when enabling log dirty mode for the slot.
+ * - slot_disable_log_dirty:
+ * called when disabling log dirty mode for the slot.
+ * also called when slot is created with log dirty disabled.
+ * - flush_log_dirty:
+ * called before reporting dirty_bitmap to userspace.
+ * - enable_log_dirty_pt_masked:
+ * called when reenabling log dirty for the GFNs in the mask after
+ * corresponding bits are cleared in slot->dirty_bitmap.
+ */
+ void (*slot_enable_log_dirty)(struct kvm *kvm,
+ struct kvm_memory_slot *slot);
+ void (*slot_disable_log_dirty)(struct kvm *kvm,
+ struct kvm_memory_slot *slot);
+ void (*flush_log_dirty)(struct kvm *kvm);
+ void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t offset, unsigned long mask);
};
struct kvm_arch_async_pf {
@@ -819,10 +857,17 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
-void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
-void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask);
+void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_slot_set_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
new file mode 100644
index 000000000000..a455a53d789a
--- /dev/null
+++ b/arch/x86/include/asm/livepatch.h
@@ -0,0 +1,46 @@
+/*
+ * livepatch.h - x86-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2014 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_X86_LIVEPATCH_H
+#define _ASM_X86_LIVEPATCH_H
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+#ifndef CC_USING_FENTRY
+ return 1;
+#endif
+ return 0;
+}
+extern int klp_write_module_reloc(struct module *mod, unsigned long type,
+ unsigned long loc, unsigned long value);
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->ip = ip;
+}
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 51b26e895933..9b3de99dc004 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -190,7 +190,6 @@ enum mcp_flags {
void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
int mce_notify_irq(void);
-void mce_notify_process(void);
DECLARE_PER_CPU(struct mce, injectm);
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 876e74e8eec7..09b9620a73b4 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -19,6 +19,8 @@ typedef struct {
struct mutex lock;
void __user *vdso;
+
+ atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
} mm_context_t;
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 40269a2bf6f9..883f6b933fa4 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -18,6 +18,21 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
}
#endif /* !CONFIG_PARAVIRT */
+#ifdef CONFIG_PERF_EVENTS
+extern struct static_key rdpmc_always_available;
+
+static inline void load_mm_cr4(struct mm_struct *mm)
+{
+ if (static_key_true(&rdpmc_always_available) ||
+ atomic_read(&mm->context.perf_rdpmc_allowed))
+ cr4_set_bits(X86_CR4_PCE);
+ else
+ cr4_clear_bits(X86_CR4_PCE);
+}
+#else
+static inline void load_mm_cr4(struct mm_struct *mm) {}
+#endif
+
/*
* Used for LDT copy/destruction.
*/
@@ -52,15 +67,20 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ /* Load per-mm CR4 state */
+ load_mm_cr4(next);
+
/*
* Load the LDT, if the LDT is different.
*
- * It's possible leave_mm(prev) has been called. If so,
- * then prev->context.ldt could be out of sync with the
- * LDT descriptor or the LDT register. This can only happen
- * if prev->context.ldt is non-null, since we never free
- * an LDT. But LDTs can't be shared across mms, so
- * prev->context.ldt won't be equal to next->context.ldt.
+ * It's possible that prev->context.ldt doesn't match
+ * the LDT register. This can happen if leave_mm(prev)
+ * was called and then modify_ldt changed
+ * prev->context.ldt but suppressed an IPI to this CPU.
+ * In this case, prev->context.ldt != NULL, because we
+ * never free an LDT while the mm still exists. That
+ * means that next->context.ldt != prev->context.ldt,
+ * because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
@@ -85,6 +105,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ load_mm_cr4(next);
load_LDT_nolock(&next->context);
}
}
@@ -130,7 +151,25 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- mpx_notify_unmap(mm, vma, start, end);
+ /*
+ * mpx_notify_unmap() goes and reads a rarely-hot
+ * cacheline in the mm_struct. That can be expensive
+ * enough to be seen in profiles.
+ *
+ * The mpx_notify_unmap() call and its contents have been
+ * observed to affect munmap() performance on hardware
+ * where MPX is not present.
+ *
+ * The unlikely() optimizes for the fast case: no MPX
+ * in the CPU, or no MPX use in the process. Even if
+ * we get this wrong (in the unlikely event that MPX
+ * is widely enabled on some system) the overhead of
+ * MPX itself (reading bounds tables) is expected to
+ * overwhelm the overhead of getting this unlikely()
+ * consistently wrong.
+ */
+ if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
+ mpx_notify_unmap(mm, vma, start, end);
}
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 75450b2c7be4..4edd53b79a81 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,17 +1,23 @@
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
-#define THREAD_SIZE_ORDER 2
+#ifdef CONFIG_KASAN
+#define KASAN_STACK_ORDER 1
+#else
+#define KASAN_STACK_ORDER 0
+#endif
+
+#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
-#define EXCEPTION_STACK_ORDER 0
+#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER)
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-#define IRQ_STACK_ORDER 2
+#define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER)
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define DOUBLEFAULT_STACK 1
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 32444ae939ca..965c47d254aa 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -80,16 +80,16 @@ static inline void write_cr3(unsigned long x)
PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
}
-static inline unsigned long read_cr4(void)
+static inline unsigned long __read_cr4(void)
{
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
}
-static inline unsigned long read_cr4_safe(void)
+static inline unsigned long __read_cr4_safe(void)
{
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
}
-static inline void write_cr4(unsigned long x)
+static inline void __write_cr4(unsigned long x)
{
PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
}
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 164e3f8d3c3d..fa1195dae425 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,8 +93,6 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
-extern bool mp_should_keep_irq(struct device *dev);
-
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 206a87fdd22d..fd74a11959de 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -62,44 +62,8 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
return ((value >> rightshift) & mask) << leftshift;
}
-/*
- * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
- * split up the 29 bits of offset into this range.
- */
-#define PTE_FILE_MAX_BITS 29
-#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
-#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
-#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
-#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
-#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
-
-#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
-#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
-
-#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
-#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
-
-static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
-{
- return (pgoff_t)
- (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
- pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
- pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
-}
-
-static __always_inline pte_t pgoff_to_pte(pgoff_t off)
-{
- return (pte_t){
- .pte_low =
- pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
- pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
- pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
- _PAGE_FILE,
- };
-}
-
/* Encode and de-code a swap entry */
-#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
+#define SWP_TYPE_BITS 5
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 81bb91b49a88..cdaa58c9b39e 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -176,18 +176,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
-/*
- * Bits 0, 6 and 7 are taken in the low part of the pte,
- * put the 32 bits of offset into the high part.
- *
- * For soft-dirty tracking 11 bit is taken from
- * the low part of pte as well.
- */
-#define pte_to_pgoff(pte) ((pte).pte_high)
-#define pgoff_to_pte(off) \
- ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
-#define PTE_FILE_MAX_BITS 32
-
/* Encode and de-code a swap entry */
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
#define __swp_type(x) (((x).val) & 0x1f)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index e8a5454acc99..67fc3d2b0aab 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -115,11 +115,6 @@ static inline int pte_write(pte_t pte)
return pte_flags(pte) & _PAGE_RW;
}
-static inline int pte_file(pte_t pte)
-{
- return pte_flags(pte) & _PAGE_FILE;
-}
-
static inline int pte_huge(pte_t pte)
{
return pte_flags(pte) & _PAGE_PSE;
@@ -137,13 +132,7 @@ static inline int pte_exec(pte_t pte)
static inline int pte_special(pte_t pte)
{
- /*
- * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
- * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
- * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
- */
- return (pte_flags(pte) & _PAGE_SPECIAL) &&
- (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
+ return pte_flags(pte) & _PAGE_SPECIAL;
}
static inline unsigned long pte_pfn(pte_t pte)
@@ -305,7 +294,7 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
- return pmd_clear_flags(pmd, _PAGE_PRESENT);
+ return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
}
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
@@ -329,21 +318,6 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
-static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
-{
- return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
-}
-
-static inline pte_t pte_file_mksoft_dirty(pte_t pte)
-{
- return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
-}
-
-static inline int pte_file_soft_dirty(pte_t pte)
-{
- return pte_flags(pte) & _PAGE_SOFT_DIRTY;
-}
-
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
/*
@@ -463,13 +437,6 @@ static inline int pte_same(pte_t a, pte_t b)
static inline int pte_present(pte_t a)
{
- return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
- _PAGE_NUMA);
-}
-
-#define pte_present_nonuma pte_present_nonuma
-static inline int pte_present_nonuma(pte_t a)
-{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
@@ -479,7 +446,7 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
if (pte_flags(a) & _PAGE_PRESENT)
return true;
- if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
+ if ((pte_flags(a) & _PAGE_PROTNONE) &&
mm_tlb_flush_pending(mm))
return true;
@@ -499,10 +466,25 @@ static inline int pmd_present(pmd_t pmd)
* the _PAGE_PSE flag will remain set at all times while the
* _PAGE_PRESENT bit is clear).
*/
- return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
- _PAGE_NUMA);
+ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
}
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/asm-generic/pgtable.h
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_PROTNONE;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_PROTNONE;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
static inline int pmd_none(pmd_t pmd)
{
/* Only check low word on 32-bit platforms, since it might be
@@ -559,11 +541,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
static inline int pmd_bad(pmd_t pmd)
{
-#ifdef CONFIG_NUMA_BALANCING
- /* pmd_numa check */
- if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
- return 0;
-#endif
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
@@ -882,19 +859,16 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
- VM_BUG_ON(pte_present_nonuma(pte));
return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
static inline int pte_swp_soft_dirty(pte_t pte)
{
- VM_BUG_ON(pte_present_nonuma(pte));
return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
}
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
- VM_BUG_ON(pte_present_nonuma(pte));
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
#endif
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 4572b2f30237..2ee781114d34 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -133,10 +133,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
/* PUD - Level3 access */
/* PMD - Level 2 access */
-#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
- _PAGE_FILE })
-#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
/* PTE - Level 1 access. */
@@ -145,13 +141,8 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
#define pte_unmap(pte) ((void)(pte))/* NOP */
/* Encode and de-code a swap entry */
-#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
-#ifdef CONFIG_NUMA_BALANCING
-/* Automatic NUMA balancing needs to be distinguishable from swap entries */
-#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 2)
-#else
+#define SWP_TYPE_BITS 5
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
-#endif
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 25bcd4a89517..8c7c10802e9c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -4,7 +4,7 @@
#include <linux/const.h>
#include <asm/page_types.h>
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define _PAGE_BIT_PRESENT 0 /* is present */
#define _PAGE_BIT_RW 1 /* writeable */
@@ -27,19 +27,9 @@
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
-/*
- * Swap offsets on configurations that allow automatic NUMA balancing use the
- * bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from
- * swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the
- * maximum possible swap space from 16TB to 8TB.
- */
-#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1)
-
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
-/* - set: nonlinear file mapping, saved PTE; unset:swap */
-#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
@@ -78,21 +68,6 @@
#endif
/*
- * _PAGE_NUMA distinguishes between a numa hinting minor fault and a page
- * that is not present. The hinting fault gathers numa placement statistics
- * (see pte_numa()). The bit is always zero when the PTE is not present.
- *
- * The bit picked must be always zero when the pmd is present and not
- * present, so that we don't lose information when we set it while
- * atomically clearing the present bit.
- */
-#ifdef CONFIG_NUMA_BALANCING
-#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA)
-#else
-#define _PAGE_NUMA (_AT(pteval_t, 0))
-#endif
-
-/*
* Tracking soft dirty bit when a page goes to a swap is tricky.
* We need a bit which can be stored in pte _and_ not conflict
* with swap entry format. On x86 bits 6 and 7 are *not* involved
@@ -114,7 +89,6 @@
#define _PAGE_NX (_AT(pteval_t, 0))
#endif
-#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
@@ -125,8 +99,8 @@
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY | _PAGE_NUMA)
-#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA)
+ _PAGE_SOFT_DIRTY)
+#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*
* The cache modes defined here are used to translate between pure SW usage
@@ -327,20 +301,6 @@ static inline pteval_t pte_flags(pte_t pte)
return native_pte_val(pte) & PTE_FLAGS_MASK;
}
-#ifdef CONFIG_NUMA_BALANCING
-/* Set of bits that distinguishes present, prot_none and numa ptes */
-#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)
-static inline pteval_t ptenuma_flags(pte_t pte)
-{
- return pte_flags(pte) & _PAGE_NUMA_MASK;
-}
-
-static inline pmdval_t pmdnuma_flags(pmd_t pmd)
-{
- return pmd_flags(pmd) & _PAGE_NUMA_MASK;
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
diff --git a/arch/x86/include/asm/pmc_atom.h b/arch/x86/include/asm/pmc_atom.h
index fc7a17c05d35..bc0fc0866553 100644
--- a/arch/x86/include/asm/pmc_atom.h
+++ b/arch/x86/include/asm/pmc_atom.h
@@ -53,6 +53,28 @@
/* Sleep state counter is in units of of 32us */
#define PMC_TMR_SHIFT 5
+/* Power status of power islands */
+#define PMC_PSS 0x98
+
+#define PMC_PSS_BIT_GBE BIT(0)
+#define PMC_PSS_BIT_SATA BIT(1)
+#define PMC_PSS_BIT_HDA BIT(2)
+#define PMC_PSS_BIT_SEC BIT(3)
+#define PMC_PSS_BIT_PCIE BIT(4)
+#define PMC_PSS_BIT_LPSS BIT(5)
+#define PMC_PSS_BIT_LPE BIT(6)
+#define PMC_PSS_BIT_DFX BIT(7)
+#define PMC_PSS_BIT_USH_CTRL BIT(8)
+#define PMC_PSS_BIT_USH_SUS BIT(9)
+#define PMC_PSS_BIT_USH_VCCS BIT(10)
+#define PMC_PSS_BIT_USH_VCCA BIT(11)
+#define PMC_PSS_BIT_OTG_CTRL BIT(12)
+#define PMC_PSS_BIT_OTG_VCCS BIT(13)
+#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14)
+#define PMC_PSS_BIT_OTG_VCCA BIT(15)
+#define PMC_PSS_BIT_USB BIT(16)
+#define PMC_PSS_BIT_USB_SUS BIT(17)
+
/* These registers reflect D3 status of functions */
#define PMC_D3_STS_0 0xA0
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a092a0cce0b7..ec1c93588cef 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -579,39 +579,6 @@ static inline void load_sp0(struct tss_struct *tss,
#define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT */
-/*
- * Save the cr4 feature set we're using (ie
- * Pentium 4MB enable and PPro Global page
- * enable), so that any CPU's that boot up
- * after us can get the correct flags.
- */
-extern unsigned long mmu_cr4_features;
-extern u32 *trampoline_cr4_features;
-
-static inline void set_in_cr4(unsigned long mask)
-{
- unsigned long cr4;
-
- mmu_cr4_features |= mask;
- if (trampoline_cr4_features)
- *trampoline_cr4_features = mmu_cr4_features;
- cr4 = read_cr4();
- cr4 |= mask;
- write_cr4(cr4);
-}
-
-static inline void clear_in_cr4(unsigned long mask)
-{
- unsigned long cr4;
-
- mmu_cr4_features &= ~mask;
- if (trampoline_cr4_features)
- *trampoline_cr4_features = mmu_cr4_features;
- cr4 = read_cr4();
- cr4 &= ~mask;
- write_cr4(cr4);
-}
-
typedef struct {
unsigned long seg;
} mm_segment_t;
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
deleted file mode 100644
index 0da7409f0bec..000000000000
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
- * which needs to alter them. */
-
-static inline void smpboot_clear_io_apic_irqs(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- io_apic_irqs = 0;
-#endif
-}
-
-static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- CMOS_WRITE(0xa, 0xf);
- spin_unlock_irqrestore(&rtc_lock, flags);
- local_flush_tlb();
- pr_debug("1.\n");
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
- start_eip >> 4;
- pr_debug("2.\n");
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
- start_eip & 0xf;
- pr_debug("3.\n");
-}
-
-static inline void smpboot_restore_warm_reset_vector(void)
-{
- unsigned long flags;
-
- /*
- * Install writable page 0 entry to set BIOS data area.
- */
- local_flush_tlb();
-
- /*
- * Paranoid: Set warm reset code and vector here back
- * to default values.
- */
- spin_lock_irqsave(&rtc_lock, flags);
- CMOS_WRITE(0, 0xf);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
-}
-
-static inline void __init smpboot_setup_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- /*
- * Here we can be sure that there is an IO-APIC in the system. Let's
- * go and set it up:
- */
- if (!skip_ioapic_setup && nr_ioapics)
- setup_IO_APIC();
- else {
- nr_ioapics = 0;
- }
-#endif
-}
-
-static inline void smpboot_clear_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- nr_ioapics = 0;
-#endif
-}
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index e820c080a4e9..6a4b00fafb00 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -137,17 +137,17 @@ static inline void write_cr3(unsigned long x)
native_write_cr3(x);
}
-static inline unsigned long read_cr4(void)
+static inline unsigned long __read_cr4(void)
{
return native_read_cr4();
}
-static inline unsigned long read_cr4_safe(void)
+static inline unsigned long __read_cr4_safe(void)
{
return native_read_cr4_safe();
}
-static inline void write_cr4(unsigned long x)
+static inline void __write_cr4(unsigned long x)
{
native_write_cr4(x);
}
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 625660f8a2fc..7050d864f520 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -183,10 +183,10 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- __ticket_t head = ACCESS_ONCE(lock->tickets.head);
+ __ticket_t head = READ_ONCE(lock->tickets.head);
for (;;) {
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
/*
* We need to check "unlocked" in a loop, tmp.head == head
* can be false positive because of overflow.
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 19e2c468fc2c..e4661196994e 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -27,11 +27,12 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
function. */
#define __HAVE_ARCH_MEMCPY 1
+extern void *__memcpy(void *to, const void *from, size_t len);
+
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
extern void *memcpy(void *to, const void *from, size_t len);
#else
-extern void *__memcpy(void *to, const void *from, size_t len);
#define memcpy(dst, src, len) \
({ \
size_t __len = (len); \
@@ -53,9 +54,11 @@ extern void *__memcpy(void *to, const void *from, size_t len);
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
+void *__memset(void *s, int c, size_t n);
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *dest, const void *src, size_t count);
+void *__memmove(void *dest, const void *src, size_t count);
int memcmp(const void *cs, const void *ct, size_t count);
size_t strlen(const char *s);
@@ -63,6 +66,19 @@ char *strcpy(char *dest, const char *src);
char *strcat(char *dest, const char *src);
int strcmp(const char *cs, const char *ct);
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#undef memcpy
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_STRING_64_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 547e344a6dc6..1d4e4f279a32 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -31,7 +31,6 @@ struct thread_info {
__u32 cpu; /* current CPU */
int saved_preempt_count;
mm_segment_t addr_limit;
- struct restart_block restart_block;
void __user *sysenter_return;
unsigned int sig_on_uaccess_error:1;
unsigned int uaccess_err:1; /* uaccess failed */
@@ -45,9 +44,6 @@ struct thread_info {
.cpu = 0, \
.saved_preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
@@ -75,7 +71,6 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
-#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
@@ -100,7 +95,6 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
-#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
@@ -140,7 +134,7 @@ struct thread_info {
/* Only used for 64 bit */
#define _TIF_DO_NOTIFY_MASK \
- (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
_TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
/* flags to check in __switch_to() */
@@ -170,6 +164,17 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
+static inline unsigned long current_stack_pointer(void)
+{
+ unsigned long sp;
+#ifdef CONFIG_X86_64
+ asm("mov %%rsp,%0" : "=g" (sp));
+#else
+ asm("mov %%esp,%0" : "=g" (sp));
+#endif
+ return sp;
+}
+
#else /* !__ASSEMBLY__ */
/* how to get the thread information struct from ASM */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 04905bfc508b..cd791948b286 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -15,6 +15,75 @@
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif
+struct tlb_state {
+#ifdef CONFIG_SMP
+ struct mm_struct *active_mm;
+ int state;
+#endif
+
+ /*
+ * Access to this CR4 shadow and to H/W CR4 is protected by
+ * disabling interrupts when modifying either one.
+ */
+ unsigned long cr4;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
+
+/* Initialize cr4 shadow for this CPU. */
+static inline void cr4_init_shadow(void)
+{
+ this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
+}
+
+/* Set in this cpu's CR4. */
+static inline void cr4_set_bits(unsigned long mask)
+{
+ unsigned long cr4;
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ if ((cr4 | mask) != cr4) {
+ cr4 |= mask;
+ this_cpu_write(cpu_tlbstate.cr4, cr4);
+ __write_cr4(cr4);
+ }
+}
+
+/* Clear in this cpu's CR4. */
+static inline void cr4_clear_bits(unsigned long mask)
+{
+ unsigned long cr4;
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ if ((cr4 & ~mask) != cr4) {
+ cr4 &= ~mask;
+ this_cpu_write(cpu_tlbstate.cr4, cr4);
+ __write_cr4(cr4);
+ }
+}
+
+/* Read the CR4 shadow. */
+static inline unsigned long cr4_read_shadow(void)
+{
+ return this_cpu_read(cpu_tlbstate.cr4);
+}
+
+/*
+ * Save some of cr4 feature set we're using (e.g. Pentium 4MB
+ * enable and PPro Global page enable), so that any CPU's that boot
+ * up after us can get the correct flags. This should only be used
+ * during boot on the boot cpu.
+ */
+extern unsigned long mmu_cr4_features;
+extern u32 *trampoline_cr4_features;
+
+static inline void cr4_set_bits_and_update_boot(unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
+ cr4_set_bits(mask);
+}
+
static inline void __native_flush_tlb(void)
{
native_write_cr3(native_read_cr3());
@@ -24,7 +93,7 @@ static inline void __native_flush_tlb_global_irq_disabled(void)
{
unsigned long cr4;
- cr4 = native_read_cr4();
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
/* clear PGE */
native_write_cr4(cr4 & ~X86_CR4_PGE);
/* write old PGE again and flush TLBs */
@@ -184,12 +253,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
-struct tlb_state {
- struct mm_struct *active_mm;
- int state;
-};
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
-
static inline void reset_lazy_tlbstate(void)
{
this_cpu_write(cpu_tlbstate.state, 0);
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 707adc6549d8..4e49d7dff78e 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_TRAPS_H
#define _ASM_X86_TRAPS_H
+#include <linux/context_tracking_state.h>
#include <linux/kprobes.h>
#include <asm/debugreg.h>
@@ -110,6 +111,11 @@ asmlinkage void smp_thermal_interrupt(void);
asmlinkage void mce_threshold_interrupt(void);
#endif
+extern enum ctx_state ist_enter(struct pt_regs *regs);
+extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state);
+extern void ist_begin_non_atomic(struct pt_regs *regs);
+extern void ist_end_non_atomic(void);
+
/* Interrupts/Exceptions */
enum {
X86_TRAP_DE = 0, /* 0, Divide-by-zero */
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 5da71c27cc59..cce9ee68e335 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -19,6 +19,7 @@
#include <asm/vmx.h>
#include <asm/svm.h>
+#include <asm/tlbflush.h>
/*
* VMX functions:
@@ -40,12 +41,12 @@ static inline int cpu_has_vmx(void)
static inline void cpu_vmxoff(void)
{
asm volatile (ASM_VMX_VMXOFF : : : "cc");
- write_cr4(read_cr4() & ~X86_CR4_VMXE);
+ cr4_clear_bits(X86_CR4_VMXE);
}
static inline int cpu_vmx_enabled(void)
{
- return read_cr4() & X86_CR4_VMXE;
+ return __read_cr4() & X86_CR4_VMXE;
}
/** Disable VMX if it is enabled on the current CPU
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 45afaee9555c..da772edd19ab 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -69,6 +69,7 @@
#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
+#define SECONDARY_EXEC_ENABLE_PML 0x00020000
#define SECONDARY_EXEC_XSAVES 0x00100000
@@ -121,6 +122,7 @@ enum vmcs_field {
GUEST_LDTR_SELECTOR = 0x0000080c,
GUEST_TR_SELECTOR = 0x0000080e,
GUEST_INTR_STATUS = 0x00000810,
+ GUEST_PML_INDEX = 0x00000812,
HOST_ES_SELECTOR = 0x00000c00,
HOST_CS_SELECTOR = 0x00000c02,
HOST_SS_SELECTOR = 0x00000c04,
@@ -140,6 +142,8 @@ enum vmcs_field {
VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
+ PML_ADDRESS = 0x0000200e,
+ PML_ADDRESS_HIGH = 0x0000200f,
TSC_OFFSET = 0x00002010,
TSC_OFFSET_HIGH = 0x00002011,
VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 5eea09915a15..358dcd338915 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -55,9 +55,8 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
-extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
/*
* Helper functions to write or read unsigned long values to/from
@@ -154,21 +153,12 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
return mfn;
pfn = mfn_to_pfn_no_overrides(mfn);
- if (__pfn_to_mfn(pfn) != mfn) {
- /*
- * If this appears to be a foreign mfn (because the pfn
- * doesn't map back to the mfn), then check the local override
- * table to see if there's a better pfn to use.
- *
- * m2p_find_override_pfn returns ~0 if it doesn't find anything.
- */
- pfn = m2p_find_override_pfn(mfn, ~0);
- }
+ if (__pfn_to_mfn(pfn) != mfn)
+ pfn = ~0;
/*
- * pfn is ~0 if there are no entries in the m2p for mfn or if the
- * entry doesn't map back to the mfn and m2p_override doesn't have a
- * valid entry for it.
+ * pfn is ~0 if there are no entries in the m2p for mfn or the
+ * entry doesn't map back to the mfn.
*/
if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
pfn = mfn;
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 462efe746d77..90c458e66e13 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -187,6 +187,17 @@
#define HV_X64_MSR_SINT14 0x4000009E
#define HV_X64_MSR_SINT15 0x4000009F
+/*
+ * Synthetic Timer MSRs. Four timers per vcpu.
+ */
+#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
+#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
+#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
+#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
+#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
+#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
+#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
+#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index c8aa65d56027..3ce079136c11 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -152,6 +152,10 @@
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
+#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690
+#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
+#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
+
/* Hardware P state interface */
#define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f
@@ -251,6 +255,10 @@
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
+#define MSR_F16H_DR1_ADDR_MASK 0xc0011019
+#define MSR_F16H_DR2_ADDR_MASK 0xc001101a
+#define MSR_F16H_DR3_ADDR_MASK 0xc001101b
+#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
/* Fam 15h MSRs */
#define MSR_F15H_PERF_CTL 0xc0010200
@@ -356,8 +364,12 @@
#define MSR_IA32_UCODE_WRITE 0x00000079
#define MSR_IA32_UCODE_REV 0x0000008b
+#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
+#define MSR_IA32_SMBASE 0x0000009e
+
#define MSR_IA32_PERF_STATUS 0x00000198
#define MSR_IA32_PERF_CTL 0x00000199
+#define INTEL_PERF_CTL_MASK 0xffff
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
#define MSR_AMD_PERF_STATUS 0xc0010063
#define MSR_AMD_PERF_CTL 0xc0010062
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index b813bf9da1e2..c5f1a1deb91a 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -56,6 +56,7 @@
#define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_INVALID_STATE 33
+#define EXIT_REASON_MSR_LOAD_FAIL 34
#define EXIT_REASON_MWAIT_INSTRUCTION 36
#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
@@ -72,6 +73,7 @@
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_APIC_WRITE 56
#define EXIT_REASON_INVPCID 58
+#define EXIT_REASON_PML_FULL 62
#define EXIT_REASON_XSAVES 63
#define EXIT_REASON_XRSTORS 64
@@ -116,10 +118,14 @@
{ EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
+ { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
{ EXIT_REASON_INVD, "INVD" }, \
{ EXIT_REASON_INVVPID, "INVVPID" }, \
{ EXIT_REASON_INVPCID, "INVPCID" }, \
{ EXIT_REASON_XSAVES, "XSAVES" }, \
{ EXIT_REASON_XRSTORS, "XRSTORS" }
+#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
+#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
+
#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 5d4502c8b983..cdb1b70ddad0 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -16,6 +16,10 @@ CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif
+KASAN_SANITIZE_head$(BITS).o := n
+KASAN_SANITIZE_dumpstack.o := n
+KASAN_SANITIZE_dumpstack_$(BITS).o := n
+
CFLAGS_irq.o := -I$(src)/../include/asm/trace
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
@@ -63,6 +67,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d1626364a28a..ae97ed0873c6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -611,20 +611,20 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
{
- int irq;
-
- if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
- *irqp = gsi;
- } else {
- mutex_lock(&acpi_ioapic_lock);
- irq = mp_map_gsi_to_irq(gsi,
- IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
- mutex_unlock(&acpi_ioapic_lock);
- if (irq < 0)
- return -1;
- *irqp = irq;
+ int rc, irq, trigger, polarity;
+
+ rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+ if (rc == 0) {
+ trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+ irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
+ if (irq >= 0) {
+ *irqp = irq;
+ return 0;
+ }
}
- return 0;
+
+ return -1;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
@@ -653,6 +653,7 @@ static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
return gsi;
}
+#ifdef CONFIG_X86_LOCAL_APIC
static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
int trigger, int polarity)
{
@@ -675,6 +676,7 @@ static void acpi_unregister_gsi_ioapic(u32 gsi)
mutex_unlock(&acpi_ioapic_lock);
#endif
}
+#endif
int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity) = acpi_register_gsi_pic;
@@ -843,13 +845,7 @@ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
static int __init acpi_parse_sbf(struct acpi_table_header *table)
{
- struct acpi_table_boot *sb;
-
- sb = (struct acpi_table_boot *)table;
- if (!sb) {
- printk(KERN_WARNING PREFIX "Unable to map SBF\n");
- return -ENODEV;
- }
+ struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
sbf_port = sb->cmos_index; /* Save CMOS port */
@@ -863,13 +859,7 @@ static struct resource *hpet_res __initdata;
static int __init acpi_parse_hpet(struct acpi_table_header *table)
{
- struct acpi_table_hpet *hpet_tbl;
-
- hpet_tbl = (struct acpi_table_hpet *)table;
- if (!hpet_tbl) {
- printk(KERN_WARNING PREFIX "Unable to map HPET\n");
- return -ENODEV;
- }
+ struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
printk(KERN_WARNING PREFIX "HPET timers must be located in "
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 31368207837c..d1daead5fcdd 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -78,7 +78,7 @@ int x86_acpi_suspend_lowlevel(void)
header->pmode_cr0 = read_cr0();
if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
- header->pmode_cr4 = read_cr4();
+ header->pmode_cr4 = __read_cr4();
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
}
if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index b708738d016e..6a7c23ff21d3 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -135,14 +135,6 @@ static inline void apbt_clear_mapping(void)
apbt_virt_address = NULL;
}
-/*
- * APBT timer interrupt enable / disable
- */
-static inline int is_apbt_capable(void)
-{
- return apbt_virt_address ? 1 : 0;
-}
-
static int __init apbt_clockevent_register(void)
{
struct sfi_timer_table_entry *mtmr;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 29b5b18afa27..ad3639ae1b9b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -134,9 +134,6 @@ static inline void imcr_apic_to_pic(void)
*/
static int force_enable_local_apic __initdata;
-/* Control whether x2APIC mode is enabled or not */
-static bool nox2apic __initdata;
-
/*
* APIC command line parameters
*/
@@ -161,33 +158,6 @@ static __init int setup_apicpmtimer(char *s)
__setup("apicpmtimer", setup_apicpmtimer);
#endif
-int x2apic_mode;
-#ifdef CONFIG_X86_X2APIC
-/* x2apic enabled before OS handover */
-int x2apic_preenabled;
-static int x2apic_disabled;
-static int __init setup_nox2apic(char *str)
-{
- if (x2apic_enabled()) {
- int apicid = native_apic_msr_read(APIC_ID);
-
- if (apicid >= 255) {
- pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
- apicid);
- return 0;
- }
-
- pr_warning("x2apic already enabled. will disable it\n");
- } else
- setup_clear_cpu_cap(X86_FEATURE_X2APIC);
-
- nox2apic = true;
-
- return 0;
-}
-early_param("nox2apic", setup_nox2apic);
-#endif
-
unsigned long mp_lapic_addr;
int disable_apic;
/* Disable local APIC timer from the kernel commandline or via dmi quirk */
@@ -1475,7 +1445,7 @@ void setup_local_APIC(void)
#endif
}
-void end_local_APIC_setup(void)
+static void end_local_APIC_setup(void)
{
lapic_setup_esr();
@@ -1492,116 +1462,183 @@ void end_local_APIC_setup(void)
apic_pm_activate();
}
-void __init bsp_end_local_APIC_setup(void)
+/*
+ * APIC setup function for application processors. Called from smpboot.c
+ */
+void apic_ap_setup(void)
{
+ setup_local_APIC();
end_local_APIC_setup();
-
- /*
- * Now that local APIC setup is completed for BP, configure the fault
- * handling for interrupt remapping.
- */
- irq_remap_enable_fault_handling();
-
}
#ifdef CONFIG_X86_X2APIC
-/*
- * Need to disable xapic and x2apic at the same time and then enable xapic mode
- */
-static inline void __disable_x2apic(u64 msr)
-{
- wrmsrl(MSR_IA32_APICBASE,
- msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
- wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
-}
+int x2apic_mode;
-static __init void disable_x2apic(void)
+enum {
+ X2APIC_OFF,
+ X2APIC_ON,
+ X2APIC_DISABLED,
+};
+static int x2apic_state;
+
+static inline void __x2apic_disable(void)
{
u64 msr;
- if (!cpu_has_x2apic)
+ if (cpu_has_apic)
return;
rdmsrl(MSR_IA32_APICBASE, msr);
- if (msr & X2APIC_ENABLE) {
- u32 x2apic_id = read_apic_id();
-
- if (x2apic_id >= 255)
- panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
+ if (!(msr & X2APIC_ENABLE))
+ return;
+ /* Disable xapic and x2apic first and then reenable xapic mode */
+ wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
+ wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
+ printk_once(KERN_INFO "x2apic disabled\n");
+}
- pr_info("Disabling x2apic\n");
- __disable_x2apic(msr);
+static inline void __x2apic_enable(void)
+{
+ u64 msr;
- if (nox2apic) {
- clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC);
- setup_clear_cpu_cap(X86_FEATURE_X2APIC);
- }
+ rdmsrl(MSR_IA32_APICBASE, msr);
+ if (msr & X2APIC_ENABLE)
+ return;
+ wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
+ printk_once(KERN_INFO "x2apic enabled\n");
+}
- x2apic_disabled = 1;
- x2apic_mode = 0;
+static int __init setup_nox2apic(char *str)
+{
+ if (x2apic_enabled()) {
+ int apicid = native_apic_msr_read(APIC_ID);
- register_lapic_address(mp_lapic_addr);
+ if (apicid >= 255) {
+ pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
+ apicid);
+ return 0;
+ }
+ pr_warning("x2apic already enabled.\n");
+ __x2apic_disable();
}
+ setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+ x2apic_state = X2APIC_DISABLED;
+ x2apic_mode = 0;
+ return 0;
}
+early_param("nox2apic", setup_nox2apic);
-void check_x2apic(void)
+/* Called from cpu_init() to enable x2apic on (secondary) cpus */
+void x2apic_setup(void)
{
- if (x2apic_enabled()) {
- pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
- x2apic_preenabled = x2apic_mode = 1;
+ /*
+ * If x2apic is not in ON state, disable it if already enabled
+ * from BIOS.
+ */
+ if (x2apic_state != X2APIC_ON) {
+ __x2apic_disable();
+ return;
}
+ __x2apic_enable();
}
-void enable_x2apic(void)
+static __init void x2apic_disable(void)
{
- u64 msr;
+ u32 x2apic_id;
- rdmsrl(MSR_IA32_APICBASE, msr);
- if (x2apic_disabled) {
- __disable_x2apic(msr);
+ if (x2apic_state != X2APIC_ON)
+ goto out;
+
+ x2apic_id = read_apic_id();
+ if (x2apic_id >= 255)
+ panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
+
+ __x2apic_disable();
+ register_lapic_address(mp_lapic_addr);
+out:
+ x2apic_state = X2APIC_DISABLED;
+ x2apic_mode = 0;
+}
+
+static __init void x2apic_enable(void)
+{
+ if (x2apic_state != X2APIC_OFF)
return;
- }
- if (!x2apic_mode)
+ x2apic_mode = 1;
+ x2apic_state = X2APIC_ON;
+ __x2apic_enable();
+}
+
+static __init void try_to_enable_x2apic(int remap_mode)
+{
+ if (x2apic_state == X2APIC_DISABLED)
return;
- if (!(msr & X2APIC_ENABLE)) {
- printk_once(KERN_INFO "Enabling x2apic\n");
- wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
+ if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
+ /* IR is required if there is APIC ID > 255 even when running
+ * under KVM
+ */
+ if (max_physical_apicid > 255 ||
+ !hypervisor_x2apic_available()) {
+ pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
+ x2apic_disable();
+ return;
+ }
+
+ /*
+ * without IR all CPUs can be addressed by IOAPIC/MSI
+ * only in physical mode
+ */
+ x2apic_phys = 1;
}
+ x2apic_enable();
}
-#endif /* CONFIG_X86_X2APIC */
-int __init enable_IR(void)
+void __init check_x2apic(void)
{
-#ifdef CONFIG_IRQ_REMAP
- if (!irq_remapping_supported()) {
- pr_debug("intr-remapping not supported\n");
- return -1;
+ if (x2apic_enabled()) {
+ pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
+ x2apic_mode = 1;
+ x2apic_state = X2APIC_ON;
+ } else if (!cpu_has_x2apic) {
+ x2apic_state = X2APIC_DISABLED;
}
+}
+#else /* CONFIG_X86_X2APIC */
+static int __init validate_x2apic(void)
+{
+ if (!apic_is_x2apic_enabled())
+ return 0;
+ /*
+ * Checkme: Can we simply turn off x2apic here instead of panic?
+ */
+ panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
+}
+early_initcall(validate_x2apic);
- if (!x2apic_preenabled && skip_ioapic_setup) {
- pr_info("Skipped enabling intr-remap because of skipping "
- "io-apic setup\n");
+static inline void try_to_enable_x2apic(int remap_mode) { }
+static inline void __x2apic_enable(void) { }
+#endif /* !CONFIG_X86_X2APIC */
+
+static int __init try_to_enable_IR(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ if (!x2apic_enabled() && skip_ioapic_setup) {
+ pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
return -1;
}
-
- return irq_remapping_enable();
#endif
- return -1;
+ return irq_remapping_enable();
}
void __init enable_IR_x2apic(void)
{
unsigned long flags;
- int ret, x2apic_enabled = 0;
- int hardware_init_ret;
-
- /* Make sure irq_remap_ops are initialized */
- setup_irq_remapping_ops();
+ int ret, ir_stat;
- hardware_init_ret = irq_remapping_prepare();
- if (hardware_init_ret && !x2apic_supported())
+ ir_stat = irq_remapping_prepare();
+ if (ir_stat < 0 && !x2apic_supported())
return;
ret = save_ioapic_entries();
@@ -1614,49 +1651,13 @@ void __init enable_IR_x2apic(void)
legacy_pic->mask_all();
mask_ioapic_entries();
- if (x2apic_preenabled && nox2apic)
- disable_x2apic();
-
- if (hardware_init_ret)
- ret = -1;
- else
- ret = enable_IR();
-
- if (!x2apic_supported())
- goto skip_x2apic;
-
- if (ret < 0) {
- /* IR is required if there is APIC ID > 255 even when running
- * under KVM
- */
- if (max_physical_apicid > 255 ||
- !hypervisor_x2apic_available()) {
- if (x2apic_preenabled)
- disable_x2apic();
- goto skip_x2apic;
- }
- /*
- * without IR all CPUs can be addressed by IOAPIC/MSI
- * only in physical mode
- */
- x2apic_force_phys();
- }
+ /* If irq_remapping_prepare() succeded, try to enable it */
+ if (ir_stat >= 0)
+ ir_stat = try_to_enable_IR();
+ /* ir_stat contains the remap mode or an error code */
+ try_to_enable_x2apic(ir_stat);
- if (ret == IRQ_REMAP_XAPIC_MODE) {
- pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
- goto skip_x2apic;
- }
-
- x2apic_enabled = 1;
-
- if (x2apic_supported() && !x2apic_mode) {
- x2apic_mode = 1;
- enable_x2apic();
- pr_info("Enabled x2apic\n");
- }
-
-skip_x2apic:
- if (ret < 0) /* IR enabling failed */
+ if (ir_stat < 0)
restore_ioapic_entries();
legacy_pic->restore_mask();
local_irq_restore(flags);
@@ -1847,82 +1848,8 @@ void __init register_lapic_address(unsigned long address)
}
}
-/*
- * This initializes the IO-APIC and APIC hardware if this is
- * a UP kernel.
- */
int apic_version[MAX_LOCAL_APIC];
-int __init APIC_init_uniprocessor(void)
-{
- if (disable_apic) {
- pr_info("Apic disabled\n");
- return -1;
- }
-#ifdef CONFIG_X86_64
- if (!cpu_has_apic) {
- disable_apic = 1;
- pr_info("Apic disabled by BIOS\n");
- return -1;
- }
-#else
- if (!smp_found_config && !cpu_has_apic)
- return -1;
-
- /*
- * Complain if the BIOS pretends there is one.
- */
- if (!cpu_has_apic &&
- APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
- pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
- boot_cpu_physical_apicid);
- return -1;
- }
-#endif
-
- default_setup_apic_routing();
-
- verify_local_APIC();
- connect_bsp_APIC();
-
-#ifdef CONFIG_X86_64
- apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
-#else
- /*
- * Hack: In case of kdump, after a crash, kernel might be booting
- * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
- * might be zero if read from MP tables. Get it from LAPIC.
- */
-# ifdef CONFIG_CRASH_DUMP
- boot_cpu_physical_apicid = read_apic_id();
-# endif
-#endif
- physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
- setup_local_APIC();
-
-#ifdef CONFIG_X86_IO_APIC
- /*
- * Now enable IO-APICs, actually call clear_IO_APIC
- * We need clear_IO_APIC before enabling error vector
- */
- if (!skip_ioapic_setup && nr_ioapics)
- enable_IO_APIC();
-#endif
-
- bsp_end_local_APIC_setup();
-
-#ifdef CONFIG_X86_IO_APIC
- if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
- setup_IO_APIC();
- else {
- nr_ioapics = 0;
- }
-#endif
-
- x86_init.timers.setup_percpu_clockev();
- return 0;
-}
-
/*
* Local APIC interrupts
*/
@@ -2027,7 +1954,7 @@ __visible void smp_trace_error_interrupt(struct pt_regs *regs)
/**
* connect_bsp_APIC - attach the APIC to the interrupt system
*/
-void __init connect_bsp_APIC(void)
+static void __init connect_bsp_APIC(void)
{
#ifdef CONFIG_X86_32
if (pic_mode) {
@@ -2274,6 +2201,100 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
}
}
+static void __init apic_bsp_up_setup(void)
+{
+#ifdef CONFIG_X86_64
+ apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
+#else
+ /*
+ * Hack: In case of kdump, after a crash, kernel might be booting
+ * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
+ * might be zero if read from MP tables. Get it from LAPIC.
+ */
+# ifdef CONFIG_CRASH_DUMP
+ boot_cpu_physical_apicid = read_apic_id();
+# endif
+#endif
+ physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
+}
+
+/**
+ * apic_bsp_setup - Setup function for local apic and io-apic
+ * @upmode: Force UP mode (for APIC_init_uniprocessor)
+ *
+ * Returns:
+ * apic_id of BSP APIC
+ */
+int __init apic_bsp_setup(bool upmode)
+{
+ int id;
+
+ connect_bsp_APIC();
+ if (upmode)
+ apic_bsp_up_setup();
+ setup_local_APIC();
+
+ if (x2apic_mode)
+ id = apic_read(APIC_LDR);
+ else
+ id = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
+
+ enable_IO_APIC();
+ end_local_APIC_setup();
+ irq_remap_enable_fault_handling();
+ setup_IO_APIC();
+ /* Setup local timer */
+ x86_init.timers.setup_percpu_clockev();
+ return id;
+}
+
+/*
+ * This initializes the IO-APIC and APIC hardware if this is
+ * a UP kernel.
+ */
+int __init APIC_init_uniprocessor(void)
+{
+ if (disable_apic) {
+ pr_info("Apic disabled\n");
+ return -1;
+ }
+#ifdef CONFIG_X86_64
+ if (!cpu_has_apic) {
+ disable_apic = 1;
+ pr_info("Apic disabled by BIOS\n");
+ return -1;
+ }
+#else
+ if (!smp_found_config && !cpu_has_apic)
+ return -1;
+
+ /*
+ * Complain if the BIOS pretends there is one.
+ */
+ if (!cpu_has_apic &&
+ APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
+ pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
+ boot_cpu_physical_apicid);
+ return -1;
+ }
+#endif
+
+ if (!smp_found_config)
+ disable_ioapic_support();
+
+ default_setup_apic_routing();
+ verify_local_APIC();
+ apic_bsp_setup(true);
+ return 0;
+}
+
+#ifdef CONFIG_UP_LATE_INIT
+void __init up_late_init(void)
+{
+ APIC_init_uniprocessor();
+}
+#endif
+
/*
* Power management
*/
@@ -2359,9 +2380,9 @@ static void lapic_resume(void)
mask_ioapic_entries();
legacy_pic->mask_all();
- if (x2apic_mode)
- enable_x2apic();
- else {
+ if (x2apic_mode) {
+ __x2apic_enable();
+ } else {
/*
* Make sure the APICBASE points to the right address
*
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3f5f60406ab1..f4dc2462a1ac 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1507,7 +1507,10 @@ void __init enable_IO_APIC(void)
int i8259_apic, i8259_pin;
int apic, pin;
- if (!nr_legacy_irqs())
+ if (skip_ioapic_setup)
+ nr_ioapics = 0;
+
+ if (!nr_legacy_irqs() || !nr_ioapics)
return;
for_each_ioapic_pin(apic, pin) {
@@ -2295,7 +2298,7 @@ static inline void __init check_timer(void)
}
local_irq_disable();
apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
- if (x2apic_preenabled)
+ if (apic_is_x2apic_enabled())
apic_printk(APIC_QUIET, KERN_INFO
"Perhaps problem with the pre-enabled x2apic mode\n"
"Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
@@ -2373,9 +2376,9 @@ void __init setup_IO_APIC(void)
{
int ioapic;
- /*
- * calling enable_IO_APIC() is moved to setup_local_APIC for BP
- */
+ if (skip_ioapic_setup || !nr_ioapics)
+ return;
+
io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 15c5df92f74e..a220239cea65 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -869,3 +869,22 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
return false;
}
+
+void set_dr_addr_mask(unsigned long mask, int dr)
+{
+ if (!cpu_has_bpext)
+ return;
+
+ switch (dr) {
+ case 0:
+ wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
+ break;
+ case 1:
+ case 2:
+ case 3:
+ wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c6049650c093..b5c8ff5e9dfc 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -19,6 +19,7 @@
#include <asm/archrandom.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
+#include <asm/tlbflush.h>
#include <asm/debugreg.h>
#include <asm/sections.h>
#include <asm/vsyscall.h>
@@ -278,7 +279,7 @@ __setup("nosmep", setup_disable_smep);
static __always_inline void setup_smep(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_SMEP))
- set_in_cr4(X86_CR4_SMEP);
+ cr4_set_bits(X86_CR4_SMEP);
}
static __init int setup_disable_smap(char *arg)
@@ -298,9 +299,9 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_SMAP)) {
#ifdef CONFIG_X86_SMAP
- set_in_cr4(X86_CR4_SMAP);
+ cr4_set_bits(X86_CR4_SMAP);
#else
- clear_in_cr4(X86_CR4_SMAP);
+ cr4_clear_bits(X86_CR4_SMAP);
#endif
}
}
@@ -491,17 +492,18 @@ u16 __read_mostly tlb_lld_2m[NR_INFO];
u16 __read_mostly tlb_lld_4m[NR_INFO];
u16 __read_mostly tlb_lld_1g[NR_INFO];
-void cpu_detect_tlb(struct cpuinfo_x86 *c)
+static void cpu_detect_tlb(struct cpuinfo_x86 *c)
{
if (this_cpu->c_detect_tlb)
this_cpu->c_detect_tlb(c);
- printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n"
- "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
+ pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
- tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
- tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES],
- tlb_lld_1g[ENTRIES]);
+ tlb_lli_4m[ENTRIES]);
+
+ pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
+ tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
+ tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
}
void detect_ht(struct cpuinfo_x86 *c)
@@ -1294,6 +1296,12 @@ void cpu_init(void)
wait_for_master_cpu(cpu);
/*
+ * Initialize the CR4 shadow before doing anything that could
+ * try to read it.
+ */
+ cr4_init_shadow();
+
+ /*
* Load microcode on this cpu if a valid microcode is available.
* This is early microcode loading procedure.
*/
@@ -1312,7 +1320,7 @@ void cpu_init(void)
pr_debug("Initializing CPU#%d\n", cpu);
- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+ cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
/*
* Initialize the per-CPU GDT with the boot GDT,
@@ -1332,7 +1340,7 @@ void cpu_init(void)
barrier();
x86_configure_nx();
- enable_x2apic();
+ x2apic_setup();
/*
* set up and load the per-CPU TSS
@@ -1393,7 +1401,7 @@ void cpu_init(void)
printk(KERN_INFO "Initializing CPU#%d\n", cpu);
if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+ cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
load_current_idt();
switch_to_new_gdt(cpu);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9cc6b6f25f42..94d7dcb12145 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -487,10 +487,8 @@ static void init_intel(struct cpuinfo_x86 *c)
rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
- printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
- " Set to 'normal', was 'performance'\n"
- "ENERGY_PERF_BIAS: View and update with"
- " x86_energy_perf_policy(8)\n");
+ pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
+ pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index c7035073dfc1..659643376dbf 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -952,20 +952,18 @@ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
int type, char *buf)
{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
-
- if (len > 1) {
- const struct cpumask *mask;
-
- mask = to_cpumask(this_leaf->shared_cpu_map);
- n = type ?
- cpulist_scnprintf(buf, len-2, mask) :
- cpumask_scnprintf(buf, len-2, mask);
- buf[n++] = '\n';
- buf[n] = '\0';
- }
- return n;
+ const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+ int ret;
+
+ if (type)
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ cpumask_pr_args(mask));
+ else
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb",
+ cpumask_pr_args(mask));
+ buf[ret++] = '\n';
+ buf[ret] = '\0';
+ return ret;
}
static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d2c611699cd9..3be9fa69f875 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -43,6 +43,8 @@
#include <linux/export.h>
#include <asm/processor.h>
+#include <asm/traps.h>
+#include <asm/tlbflush.h>
#include <asm/mce.h>
#include <asm/msr.h>
@@ -115,7 +117,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
*/
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
@@ -311,7 +313,7 @@ static void wait_for_panic(void)
panic("Panicing machine check CPU died");
}
-static void mce_panic(char *msg, struct mce *final, char *exp)
+static void mce_panic(const char *msg, struct mce *final, char *exp)
{
int i, apei_err = 0;
@@ -529,7 +531,7 @@ static void mce_schedule_work(void)
schedule_work(this_cpu_ptr(&mce_work));
}
-DEFINE_PER_CPU(struct irq_work, mce_irq_work);
+static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
static void mce_irq_work_cb(struct irq_work *entry)
{
@@ -735,7 +737,7 @@ static atomic_t mce_callin;
/*
* Check if a timeout waiting for other CPUs happened.
*/
-static int mce_timed_out(u64 *t)
+static int mce_timed_out(u64 *t, const char *msg)
{
/*
* The others already did panic for some reason.
@@ -750,8 +752,7 @@ static int mce_timed_out(u64 *t)
goto out;
if ((s64)*t < SPINUNIT) {
if (mca_cfg.tolerant <= 1)
- mce_panic("Timeout synchronizing machine check over CPUs",
- NULL, NULL);
+ mce_panic(msg, NULL, NULL);
cpu_missing = 1;
return 1;
}
@@ -867,7 +868,8 @@ static int mce_start(int *no_way_out)
* Wait for everyone.
*/
while (atomic_read(&mce_callin) != cpus) {
- if (mce_timed_out(&timeout)) {
+ if (mce_timed_out(&timeout,
+ "Timeout: Not all CPUs entered broadcast exception handler")) {
atomic_set(&global_nwo, 0);
return -1;
}
@@ -892,7 +894,8 @@ static int mce_start(int *no_way_out)
* only seen by one CPU before cleared, avoiding duplicates.
*/
while (atomic_read(&mce_executing) < order) {
- if (mce_timed_out(&timeout)) {
+ if (mce_timed_out(&timeout,
+ "Timeout: Subject CPUs unable to finish machine check processing")) {
atomic_set(&global_nwo, 0);
return -1;
}
@@ -936,7 +939,8 @@ static int mce_end(int order)
* loops.
*/
while (atomic_read(&mce_executing) <= cpus) {
- if (mce_timed_out(&timeout))
+ if (mce_timed_out(&timeout,
+ "Timeout: Monarch CPU unable to finish machine check processing"))
goto reset;
ndelay(SPINUNIT);
}
@@ -949,7 +953,8 @@ static int mce_end(int order)
* Subject: Wait for Monarch to finish.
*/
while (atomic_read(&mce_executing) != 0) {
- if (mce_timed_out(&timeout))
+ if (mce_timed_out(&timeout,
+ "Timeout: Monarch CPU did not finish machine check processing"))
goto reset;
ndelay(SPINUNIT);
}
@@ -1003,51 +1008,6 @@ static void mce_clear_state(unsigned long *toclear)
}
/*
- * Need to save faulting physical address associated with a process
- * in the machine check handler some place where we can grab it back
- * later in mce_notify_process()
- */
-#define MCE_INFO_MAX 16
-
-struct mce_info {
- atomic_t inuse;
- struct task_struct *t;
- __u64 paddr;
- int restartable;
-} mce_info[MCE_INFO_MAX];
-
-static void mce_save_info(__u64 addr, int c)
-{
- struct mce_info *mi;
-
- for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
- if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
- mi->t = current;
- mi->paddr = addr;
- mi->restartable = c;
- return;
- }
- }
-
- mce_panic("Too many concurrent recoverable errors", NULL, NULL);
-}
-
-static struct mce_info *mce_find_info(void)
-{
- struct mce_info *mi;
-
- for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
- if (atomic_read(&mi->inuse) && mi->t == current)
- return mi;
- return NULL;
-}
-
-static void mce_clear_info(struct mce_info *mi)
-{
- atomic_set(&mi->inuse, 0);
-}
-
-/*
* The actual machine check handler. This only handles real
* exceptions when something got corrupted coming in through int 18.
*
@@ -1063,6 +1023,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
{
struct mca_config *cfg = &mca_cfg;
struct mce m, *final;
+ enum ctx_state prev_state;
int i;
int worst = 0;
int severity;
@@ -1084,6 +1045,10 @@ void do_machine_check(struct pt_regs *regs, long error_code)
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
char *msg = "Unknown";
+ u64 recover_paddr = ~0ull;
+ int flags = MF_ACTION_REQUIRED;
+
+ prev_state = ist_enter(regs);
this_cpu_inc(mce_exception_count);
@@ -1203,9 +1168,9 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (no_way_out)
mce_panic("Fatal machine check on current CPU", &m, msg);
if (worst == MCE_AR_SEVERITY) {
- /* schedule action before return to userland */
- mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
- set_thread_flag(TIF_MCE_NOTIFY);
+ recover_paddr = m.addr;
+ if (!(m.mcgstatus & MCG_STATUS_RIPV))
+ flags |= MF_MUST_KILL;
} else if (kill_it) {
force_sig(SIGBUS, current);
}
@@ -1216,6 +1181,27 @@ void do_machine_check(struct pt_regs *regs, long error_code)
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
out:
sync_core();
+
+ if (recover_paddr == ~0ull)
+ goto done;
+
+ pr_err("Uncorrected hardware memory error in user-access at %llx",
+ recover_paddr);
+ /*
+ * We must call memory_failure() here even if the current process is
+ * doomed. We still need to mark the page as poisoned and alert any
+ * other users of the page.
+ */
+ ist_begin_non_atomic(regs);
+ local_irq_enable();
+ if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
+ pr_err("Memory error not recovered");
+ force_sig(SIGBUS, current);
+ }
+ local_irq_disable();
+ ist_end_non_atomic();
+done:
+ ist_exit(regs, prev_state);
}
EXPORT_SYMBOL_GPL(do_machine_check);
@@ -1233,42 +1219,6 @@ int memory_failure(unsigned long pfn, int vector, int flags)
#endif
/*
- * Called in process context that interrupted by MCE and marked with
- * TIF_MCE_NOTIFY, just before returning to erroneous userland.
- * This code is allowed to sleep.
- * Attempt possible recovery such as calling the high level VM handler to
- * process any corrupted pages, and kill/signal current process if required.
- * Action required errors are handled here.
- */
-void mce_notify_process(void)
-{
- unsigned long pfn;
- struct mce_info *mi = mce_find_info();
- int flags = MF_ACTION_REQUIRED;
-
- if (!mi)
- mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
- pfn = mi->paddr >> PAGE_SHIFT;
-
- clear_thread_flag(TIF_MCE_NOTIFY);
-
- pr_err("Uncorrected hardware memory error in user-access at %llx",
- mi->paddr);
- /*
- * We must call memory_failure() here even if the current process is
- * doomed. We still need to mark the page as poisoned and alert any
- * other users of the page.
- */
- if (!mi->restartable)
- flags |= MF_MUST_KILL;
- if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
- pr_err("Memory error not recovered");
- force_sig(SIGBUS, current);
- }
- mce_clear_info(mi);
-}
-
-/*
* Action optional processing happens here (picking up
* from the list of faulting pages that do_machine_check()
* placed into the "ring").
@@ -1503,7 +1453,7 @@ static void __mcheck_cpu_init_generic(void)
bitmap_fill(all_banks, MAX_NR_BANKS);
machine_check_poll(MCP_UC | m_fl, &all_banks);
- set_in_cr4(X86_CR4_MCE);
+ cr4_set_bits(X86_CR4_MCE);
rdmsrl(MSR_IA32_MCG_CAP, cap);
if (cap & MCG_CTL_P)
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index a3042989398c..737b0ad4e61a 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -8,6 +8,8 @@
#include <linux/smp.h>
#include <asm/processor.h>
+#include <asm/traps.h>
+#include <asm/tlbflush.h>
#include <asm/mce.h>
#include <asm/msr.h>
@@ -17,8 +19,11 @@ int mce_p5_enabled __read_mostly;
/* Machine check handler for Pentium class Intel CPUs: */
static void pentium_machine_check(struct pt_regs *regs, long error_code)
{
+ enum ctx_state prev_state;
u32 loaddr, hi, lotype;
+ prev_state = ist_enter(regs);
+
rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
@@ -33,6 +38,8 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
}
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+ ist_exit(regs, prev_state);
}
/* Set up machine check reporting for processors with Intel style MCE: */
@@ -59,7 +66,7 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
"Intel old style machine check architecture supported.\n");
/* Enable MCE: */
- set_in_cr4(X86_CR4_MCE);
+ cr4_set_bits(X86_CR4_MCE);
printk(KERN_INFO
"Intel old style machine check reporting enabled on CPU#%d.\n",
smp_processor_id());
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 7dc5564d0cdf..44f138296fbe 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -7,14 +7,20 @@
#include <linux/types.h>
#include <asm/processor.h>
+#include <asm/traps.h>
+#include <asm/tlbflush.h>
#include <asm/mce.h>
#include <asm/msr.h>
/* Machine check handler for WinChip C6: */
static void winchip_machine_check(struct pt_regs *regs, long error_code)
{
+ enum ctx_state prev_state = ist_enter(regs);
+
printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+ ist_exit(regs, prev_state);
}
/* Set up machine check reporting on the Winchip C6 series */
@@ -31,7 +37,7 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
lo &= ~(1<<4); /* Enable MCE */
wrmsr(MSR_IDT_FCR1, lo, hi);
- set_in_cr4(X86_CR4_MCE);
+ cr4_set_bits(X86_CR4_MCE);
printk(KERN_INFO
"Winchip machine check reporting enabled on CPU#0.\n");
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 15c29096136b..36a83617eb21 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -552,7 +552,7 @@ static int __init microcode_init(void)
int error;
if (paravirt_enabled() || dis_ucode_ldr)
- return 0;
+ return -EINVAL;
if (c->x86_vendor == X86_VENDOR_INTEL)
microcode_ops = init_intel_microcode();
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index a450373e8e91..939155ffdece 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -107,6 +107,7 @@ static struct clocksource hyperv_cs = {
.rating = 400, /* use this when running on Hyperv*/
.read = read_hv_clock,
.mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init ms_hyperv_init_platform(void)
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 9e451b0876b5..f8c81ba0b465 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -138,8 +138,8 @@ static void prepare_set(void)
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if (cpu_has_pge) {
- cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE);
+ cr4 = __read_cr4();
+ __write_cr4(cr4 & ~X86_CR4_PGE);
}
/*
@@ -171,7 +171,7 @@ static void post_set(void)
/* Restore value of CR4 */
if (cpu_has_pge)
- write_cr4(cr4);
+ __write_cr4(cr4);
}
static void cyrix_set_arr(unsigned int reg, unsigned long base,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0e25a1bc5ab5..7d74f7b3c6ba 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -678,8 +678,8 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if (cpu_has_pge) {
- cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE);
+ cr4 = __read_cr4();
+ __write_cr4(cr4 & ~X86_CR4_PGE);
}
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
@@ -708,7 +708,7 @@ static void post_set(void) __releases(set_atomicity_lock)
/* Restore value of CR4 */
if (cpu_has_pge)
- write_cr4(cr4);
+ __write_cr4(cr4);
raw_spin_unlock(&set_atomicity_lock);
}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 143e5f5dc855..b71a7f86d68a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -31,6 +31,8 @@
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/alternative.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/desc.h>
#include <asm/ldt.h>
@@ -43,6 +45,8 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
+struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE;
+
u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1327,8 +1331,6 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
break;
case CPU_STARTING:
- if (x86_pmu.attr_rdpmc)
- set_in_cr4(X86_CR4_PCE);
if (x86_pmu.cpu_starting)
x86_pmu.cpu_starting(cpu);
break;
@@ -1804,14 +1806,44 @@ static int x86_pmu_event_init(struct perf_event *event)
event->destroy(event);
}
+ if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
+ event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
+
return err;
}
+static void refresh_pce(void *ignored)
+{
+ if (current->mm)
+ load_mm_cr4(current->mm);
+}
+
+static void x86_pmu_event_mapped(struct perf_event *event)
+{
+ if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+ return;
+
+ if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
+ on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+}
+
+static void x86_pmu_event_unmapped(struct perf_event *event)
+{
+ if (!current->mm)
+ return;
+
+ if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+ return;
+
+ if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
+ on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+}
+
static int x86_pmu_event_idx(struct perf_event *event)
{
int idx = event->hw.idx;
- if (!x86_pmu.attr_rdpmc)
+ if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return 0;
if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
@@ -1829,16 +1861,6 @@ static ssize_t get_attr_rdpmc(struct device *cdev,
return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
}
-static void change_rdpmc(void *info)
-{
- bool enable = !!(unsigned long)info;
-
- if (enable)
- set_in_cr4(X86_CR4_PCE);
- else
- clear_in_cr4(X86_CR4_PCE);
-}
-
static ssize_t set_attr_rdpmc(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1850,14 +1872,27 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (ret)
return ret;
+ if (val > 2)
+ return -EINVAL;
+
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;
- if (!!val != !!x86_pmu.attr_rdpmc) {
- x86_pmu.attr_rdpmc = !!val;
- on_each_cpu(change_rdpmc, (void *)val, 1);
+ if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
+ /*
+ * Changing into or out of always available, aka
+ * perf-event-bypassing mode. This path is extremely slow,
+ * but only root can trigger it, so it's okay.
+ */
+ if (val == 2)
+ static_key_slow_inc(&rdpmc_always_available);
+ else
+ static_key_slow_dec(&rdpmc_always_available);
+ on_each_cpu(refresh_pce, NULL, 1);
}
+ x86_pmu.attr_rdpmc = val;
+
return count;
}
@@ -1900,6 +1935,9 @@ static struct pmu pmu = {
.event_init = x86_pmu_event_init,
+ .event_mapped = x86_pmu_event_mapped,
+ .event_unmapped = x86_pmu_event_unmapped,
+
.add = x86_pmu_add,
.del = x86_pmu_del,
.start = x86_pmu_start,
@@ -1914,13 +1952,15 @@ static struct pmu pmu = {
.flush_branch_stack = x86_pmu_flush_branch_stack,
};
-void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
+void arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg, u64 now)
{
struct cyc2ns_data *data;
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
- userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
+ userpg->cap_user_rdpmc =
+ !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
userpg->pmc_width = x86_pmu.cntval_bits;
if (!sched_clock_stable())
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 4e6cdb0ddc70..df525d2be1e8 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -71,6 +71,8 @@ struct event_constraint {
#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */
#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */
#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_RDPMC_ALLOWED 0x40 /* grant rdpmc permission */
+
struct amd_nb {
int nb_id; /* NorthBridge id */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 944bf019b74f..498b6d967138 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
break;
case 55: /* 22nm Atom "Silvermont" */
+ case 76: /* 14nm Atom "Airmont" */
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3c895d480cd7..073983398364 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -568,8 +568,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
};
struct event_constraint intel_slm_pebs_event_constraints[] = {
- /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 673f930c700f..c4bb8b8e5017 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -103,6 +103,13 @@ static struct kobj_attribute format_attr_##_var = \
#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
+#define RAPL_EVENT_ATTR_STR(_name, v, str) \
+static struct perf_pmu_events_attr event_attr_##v = { \
+ .attr = __ATTR(_name, 0444, rapl_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = str, \
+};
+
struct rapl_pmu {
spinlock_t lock;
int hw_unit; /* 1/2^hw_unit Joule */
@@ -135,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
* or use ldexp(count, -32).
* Watts = Joules/Time delta
*/
- return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
+ return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
}
static u64 rapl_event_update(struct perf_event *event)
@@ -379,23 +386,36 @@ static struct attribute_group rapl_pmu_attr_group = {
.attrs = rapl_pmu_attrs,
};
-EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
-EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
-EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
-EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
+static ssize_t rapl_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr = \
+ container_of(attr, struct perf_pmu_events_attr, attr);
+
+ if (pmu_attr->event_str)
+ return sprintf(page, "%s", pmu_attr->event_str);
+
+ return 0;
+}
+
+RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
+RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
+RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
+RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
-EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
-EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
-EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
-EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
/*
* we compute in 0.23 nJ increments regardless of MSR
*/
-EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
static struct attribute *rapl_events_srv_attr[] = {
EVENT_PTR(rapl_cores),
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 10b8d3eaaf15..c635b8b49e93 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
box->phys_id = phys_id;
box->pci_dev = pdev;
box->pmu = pmu;
- uncore_box_init(box);
pci_set_drvdata(pdev, box);
raw_spin_lock(&uncore_box_lock);
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
pmu = &type->pmus[j];
box = *per_cpu_ptr(pmu->box, cpu);
/* called by uncore_cpu_init? */
- if (box && box->phys_id >= 0) {
- uncore_box_init(box);
+ if (box && box->phys_id >= 0)
continue;
- }
for_each_online_cpu(k) {
exist = *per_cpu_ptr(pmu->box, k);
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
}
}
- if (box) {
+ if (box)
box->phys_id = phys_id;
- uncore_box_init(box);
- }
}
}
return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 863d9b02563e..6c8c1e7e69d8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
return box->pmu->type->num_counters;
}
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+ if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+ if (box->pmu->type->ops->init_box)
+ box->pmu->type->ops->init_box(box);
+ }
+}
+
static inline void uncore_disable_box(struct intel_uncore_box *box)
{
if (box->pmu->type->ops->disable_box)
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
static inline void uncore_enable_box(struct intel_uncore_box *box)
{
+ uncore_box_init(box);
+
if (box->pmu->type->ops->enable_box)
box->pmu->type->ops->enable_box(box);
}
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
return box->pmu->type->ops->read_counter(box, event);
}
-static inline void uncore_box_init(struct intel_uncore_box *box)
-{
- if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
- if (box->pmu->type->ops->init_box)
- box->pmu->type->ops->init_box(box);
- }
-}
-
static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{
return (box->phys_id < 0);
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b74ebc7c4402..cf3df1d8d039 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -265,7 +265,10 @@ int __die(const char *str, struct pt_regs *regs, long err)
printk("SMP ");
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
- printk("DEBUG_PAGEALLOC");
+ printk("DEBUG_PAGEALLOC ");
+#endif
+#ifdef CONFIG_KASAN
+ printk("KASAN");
#endif
printk("\n");
if (notify_die(DIE_OOPS, str, regs, err,
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index dd2f07ae9d0c..46201deee923 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -184,9 +184,9 @@ void __init e820_print_map(char *who)
* overwritten in the same location, starting at biosmap.
*
* The integer pointed to by pnr_map must be valid on entry (the
- * current number of valid entries located at biosmap) and will
- * be updated on return, with the new number of valid entries
- * (something no more than max_nr_map.)
+ * current number of valid entries located at biosmap). If the
+ * sanitizing succeeds the *pnr_map will be updated with the new
+ * number of valid entries (something no more than max_nr_map).
*
* The return value from sanitize_e820_map() is zero if it
* successfully 'sanitized' the map entries passed in, and is -1
@@ -561,23 +561,15 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
void __init update_e820(void)
{
- u32 nr_map;
-
- nr_map = e820.nr_map;
- if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
+ if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map))
return;
- e820.nr_map = nr_map;
printk(KERN_INFO "e820: modified physical RAM map:\n");
e820_print_map("modified");
}
static void __init update_e820_saved(void)
{
- u32 nr_map;
-
- nr_map = e820_saved.nr_map;
- if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
- return;
- e820_saved.nr_map = nr_map;
+ sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map),
+ &e820_saved.nr_map);
}
#define MAX_GAP_END 0x100000000ull
/*
@@ -898,11 +890,9 @@ early_param("memmap", parse_memmap_opt);
void __init finish_e820_parsing(void)
{
if (userdef) {
- u32 nr = e820.nr_map;
-
- if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
+ if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map),
+ &e820.nr_map) < 0)
early_panic("Invalid user supplied memory map");
- e820.nr_map = nr;
printk(KERN_INFO "e820: user-defined physical RAM map:\n");
e820_print_map("user");
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 01d1c187c9f9..a62536a1be88 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -19,6 +19,7 @@
#include <linux/usb/ehci_def.h>
#include <linux/efi.h>
#include <asm/efi.h>
+#include <asm/pci_x86.h>
/* Simple VGA output */
#define VGABASE (__ISA_IO_base + 0xb8000)
@@ -76,7 +77,7 @@ static struct console early_vga_console = {
/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
-static int early_serial_base = 0x3f8; /* ttyS0 */
+static unsigned long early_serial_base = 0x3f8; /* ttyS0 */
#define XMTRDY 0x20
@@ -94,13 +95,40 @@ static int early_serial_base = 0x3f8; /* ttyS0 */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
+static void mem32_serial_out(unsigned long addr, int offset, int value)
+{
+ uint32_t *vaddr = (uint32_t *)addr;
+ /* shift implied by pointer type */
+ writel(value, vaddr + offset);
+}
+
+static unsigned int mem32_serial_in(unsigned long addr, int offset)
+{
+ uint32_t *vaddr = (uint32_t *)addr;
+ /* shift implied by pointer type */
+ return readl(vaddr + offset);
+}
+
+static unsigned int io_serial_in(unsigned long addr, int offset)
+{
+ return inb(addr + offset);
+}
+
+static void io_serial_out(unsigned long addr, int offset, int value)
+{
+ outb(value, addr + offset);
+}
+
+static unsigned int (*serial_in)(unsigned long addr, int offset) = io_serial_in;
+static void (*serial_out)(unsigned long addr, int offset, int value) = io_serial_out;
+
static int early_serial_putc(unsigned char ch)
{
unsigned timeout = 0xffff;
- while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
+ while ((serial_in(early_serial_base, LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
- outb(ch, early_serial_base + TXR);
+ serial_out(early_serial_base, TXR, ch);
return timeout ? 0 : -1;
}
@@ -114,13 +142,28 @@ static void early_serial_write(struct console *con, const char *s, unsigned n)
}
}
+static __init void early_serial_hw_init(unsigned divisor)
+{
+ unsigned char c;
+
+ serial_out(early_serial_base, LCR, 0x3); /* 8n1 */
+ serial_out(early_serial_base, IER, 0); /* no interrupt */
+ serial_out(early_serial_base, FCR, 0); /* no fifo */
+ serial_out(early_serial_base, MCR, 0x3); /* DTR + RTS */
+
+ c = serial_in(early_serial_base, LCR);
+ serial_out(early_serial_base, LCR, c | DLAB);
+ serial_out(early_serial_base, DLL, divisor & 0xff);
+ serial_out(early_serial_base, DLH, (divisor >> 8) & 0xff);
+ serial_out(early_serial_base, LCR, c & ~DLAB);
+}
+
#define DEFAULT_BAUD 9600
static __init void early_serial_init(char *s)
{
- unsigned char c;
unsigned divisor;
- unsigned baud = DEFAULT_BAUD;
+ unsigned long baud = DEFAULT_BAUD;
char *e;
if (*s == ',')
@@ -145,24 +188,124 @@ static __init void early_serial_init(char *s)
s++;
}
- outb(0x3, early_serial_base + LCR); /* 8n1 */
- outb(0, early_serial_base + IER); /* no interrupt */
- outb(0, early_serial_base + FCR); /* no fifo */
- outb(0x3, early_serial_base + MCR); /* DTR + RTS */
+ if (*s) {
+ if (kstrtoul(s, 0, &baud) < 0 || baud == 0)
+ baud = DEFAULT_BAUD;
+ }
+
+ /* Convert from baud to divisor value */
+ divisor = 115200 / baud;
+
+ /* These will always be IO based ports */
+ serial_in = io_serial_in;
+ serial_out = io_serial_out;
+
+ /* Set up the HW */
+ early_serial_hw_init(divisor);
+}
+
+#ifdef CONFIG_PCI
+/*
+ * early_pci_serial_init()
+ *
+ * This function is invoked when the early_printk param starts with "pciserial"
+ * The rest of the param should be ",B:D.F,baud" where B, D & F describe the
+ * location of a PCI device that must be a UART device.
+ */
+static __init void early_pci_serial_init(char *s)
+{
+ unsigned divisor;
+ unsigned long baud = DEFAULT_BAUD;
+ u8 bus, slot, func;
+ uint32_t classcode, bar0;
+ uint16_t cmdreg;
+ char *e;
+
+
+ /*
+ * First, part the param to get the BDF values
+ */
+ if (*s == ',')
+ ++s;
+
+ if (*s == 0)
+ return;
+
+ bus = (u8)simple_strtoul(s, &e, 16);
+ s = e;
+ if (*s != ':')
+ return;
+ ++s;
+ slot = (u8)simple_strtoul(s, &e, 16);
+ s = e;
+ if (*s != '.')
+ return;
+ ++s;
+ func = (u8)simple_strtoul(s, &e, 16);
+ s = e;
+
+ /* A baud might be following */
+ if (*s == ',')
+ s++;
+
+ /*
+ * Second, find the device from the BDF
+ */
+ cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND);
+ classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
+ bar0 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+
+ /*
+ * Verify it is a UART type device
+ */
+ if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) &&
+ (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) ||
+ (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */
+ return;
+
+ /*
+ * Determine if it is IO or memory mapped
+ */
+ if (bar0 & 0x01) {
+ /* it is IO mapped */
+ serial_in = io_serial_in;
+ serial_out = io_serial_out;
+ early_serial_base = bar0&0xfffffffc;
+ write_pci_config(bus, slot, func, PCI_COMMAND,
+ cmdreg|PCI_COMMAND_IO);
+ } else {
+ /* It is memory mapped - assume 32-bit alignment */
+ serial_in = mem32_serial_in;
+ serial_out = mem32_serial_out;
+ /* WARNING! assuming the address is always in the first 4G */
+ early_serial_base =
+ (unsigned long)early_ioremap(bar0 & 0xfffffff0, 0x10);
+ write_pci_config(bus, slot, func, PCI_COMMAND,
+ cmdreg|PCI_COMMAND_MEMORY);
+ }
+ /*
+ * Lastly, initalize the hardware
+ */
if (*s) {
- baud = simple_strtoul(s, &e, 0);
- if (baud == 0 || s == e)
+ if (strcmp(s, "nocfg") == 0)
+ /* Sometimes, we want to leave the UART alone
+ * and assume the BIOS has set it up correctly.
+ * "nocfg" tells us this is the case, and we
+ * should do no more setup.
+ */
+ return;
+ if (kstrtoul(s, 0, &baud) < 0 || baud == 0)
baud = DEFAULT_BAUD;
}
+ /* Convert from baud to divisor value */
divisor = 115200 / baud;
- c = inb(early_serial_base + LCR);
- outb(c | DLAB, early_serial_base + LCR);
- outb(divisor & 0xff, early_serial_base + DLL);
- outb((divisor >> 8) & 0xff, early_serial_base + DLH);
- outb(c & ~DLAB, early_serial_base + LCR);
+
+ /* Set up the HW */
+ early_serial_hw_init(divisor);
}
+#endif
static struct console early_serial_console = {
.name = "earlyser",
@@ -210,6 +353,13 @@ static int __init setup_early_printk(char *buf)
early_serial_init(buf + 4);
early_console_register(&early_serial_console, keep);
}
+#ifdef CONFIG_PCI
+ if (!strncmp(buf, "pciserial", 9)) {
+ early_pci_serial_init(buf + 9);
+ early_console_register(&early_serial_console, keep);
+ buf += 9; /* Keep from match the above "serial" */
+ }
+#endif
if (!strncmp(buf, "vga", 3) &&
boot_params.screen_info.orig_video_isVGA == 1) {
max_xpos = boot_params.screen_info.orig_video_cols;
@@ -226,11 +376,6 @@ static int __init setup_early_printk(char *buf)
early_console_register(&xenboot_console, keep);
#endif
#ifdef CONFIG_EARLY_PRINTK_INTEL_MID
- if (!strncmp(buf, "mrst", 4)) {
- mrst_early_console_init();
- early_console_register(&early_mrst_console, keep);
- }
-
if (!strncmp(buf, "hsu", 3)) {
hsu_early_console_init(buf + 3);
early_console_register(&early_hsu_console, keep);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 9ebaf63ba182..db13655c3a2a 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -143,7 +143,8 @@ ENDPROC(native_usergs_sysret64)
movq \tmp,RSP+\offset(%rsp)
movq $__USER_DS,SS+\offset(%rsp)
movq $__USER_CS,CS+\offset(%rsp)
- movq $-1,RCX+\offset(%rsp)
+ movq RIP+\offset(%rsp),\tmp /* get rip */
+ movq \tmp,RCX+\offset(%rsp) /* copy it to rcx as sysret would do */
movq R11+\offset(%rsp),\tmp /* get eflags */
movq \tmp,EFLAGS+\offset(%rsp)
.endm
@@ -155,27 +156,6 @@ ENDPROC(native_usergs_sysret64)
movq \tmp,R11+\offset(%rsp)
.endm
- .macro FAKE_STACK_FRAME child_rip
- /* push in order ss, rsp, eflags, cs, rip */
- xorl %eax, %eax
- pushq_cfi $__KERNEL_DS /* ss */
- /*CFI_REL_OFFSET ss,0*/
- pushq_cfi %rax /* rsp */
- CFI_REL_OFFSET rsp,0
- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
- /*CFI_REL_OFFSET rflags,0*/
- pushq_cfi $__KERNEL_CS /* cs */
- /*CFI_REL_OFFSET cs,0*/
- pushq_cfi \child_rip /* rip */
- CFI_REL_OFFSET rip,0
- pushq_cfi %rax /* orig rax */
- .endm
-
- .macro UNFAKE_STACK_FRAME
- addq $8*6, %rsp
- CFI_ADJUST_CFA_OFFSET -(6*8)
- .endm
-
/*
* initial frame state for interrupts (and exceptions without error code)
*/
@@ -238,51 +218,6 @@ ENDPROC(native_usergs_sysret64)
CFI_REL_OFFSET r15, R15+\offset
.endm
-/* save partial stack frame */
- .macro SAVE_ARGS_IRQ
- cld
- /* start from rbp in pt_regs and jump over */
- movq_cfi rdi, (RDI-RBP)
- movq_cfi rsi, (RSI-RBP)
- movq_cfi rdx, (RDX-RBP)
- movq_cfi rcx, (RCX-RBP)
- movq_cfi rax, (RAX-RBP)
- movq_cfi r8, (R8-RBP)
- movq_cfi r9, (R9-RBP)
- movq_cfi r10, (R10-RBP)
- movq_cfi r11, (R11-RBP)
-
- /* Save rbp so that we can unwind from get_irq_regs() */
- movq_cfi rbp, 0
-
- /* Save previous stack value */
- movq %rsp, %rsi
-
- leaq -RBP(%rsp),%rdi /* arg1 for handler */
- testl $3, CS-RBP(%rsi)
- je 1f
- SWAPGS
- /*
- * irq_count is used to check if a CPU is already on an interrupt stack
- * or not. While this is essentially redundant with preempt_count it is
- * a little cheaper to use a separate counter in the PDA (short of
- * moving irq_enter into assembly, which would be too much work)
- */
-1: incl PER_CPU_VAR(irq_count)
- cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
- CFI_DEF_CFA_REGISTER rsi
-
- /* Store previous stack value */
- pushq %rsi
- CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
- 0x77 /* DW_OP_breg7 */, 0, \
- 0x06 /* DW_OP_deref */, \
- 0x08 /* DW_OP_const1u */, SS+8-RBP, \
- 0x22 /* DW_OP_plus */
- /* We entered an interrupt context - irqs are off: */
- TRACE_IRQS_OFF
- .endm
-
ENTRY(save_paranoid)
XCPT_FRAME 1 RDI+8
cld
@@ -426,15 +361,12 @@ system_call_fastpath:
* Has incomplete stack frame and undefined top of stack.
*/
ret_from_sys_call:
- movl $_TIF_ALLWORK_MASK,%edi
- /* edi: flagmask */
-sysret_check:
+ testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ jnz int_ret_from_sys_call_fixup /* Go the the slow path */
+
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
- andl %edi,%edx
- jnz sysret_careful
CFI_REMEMBER_STATE
/*
* sysretq will re-enable interrupts:
@@ -448,49 +380,10 @@ sysret_check:
USERGS_SYSRET64
CFI_RESTORE_STATE
- /* Handle reschedules */
- /* edx: work, edi: workmask */
-sysret_careful:
- bt $TIF_NEED_RESCHED,%edx
- jnc sysret_signal
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
- pushq_cfi %rdi
- SCHEDULE_USER
- popq_cfi %rdi
- jmp sysret_check
- /* Handle a signal */
-sysret_signal:
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
-#ifdef CONFIG_AUDITSYSCALL
- bt $TIF_SYSCALL_AUDIT,%edx
- jc sysret_audit
-#endif
- /*
- * We have a signal, or exit tracing or single-step.
- * These all wind up with the iret return path anyway,
- * so just join that path right now.
- */
+int_ret_from_sys_call_fixup:
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
- jmp int_check_syscall_exit_work
-
-#ifdef CONFIG_AUDITSYSCALL
- /*
- * Return fast path for syscall audit. Call __audit_syscall_exit()
- * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
- * masked off.
- */
-sysret_audit:
- movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */
- cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */
- setbe %al /* 1 if so, 0 if not */
- movzbl %al,%edi /* zero-extend that into %edi */
- call __audit_syscall_exit
- movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
- jmp sysret_check
-#endif /* CONFIG_AUDITSYSCALL */
+ jmp int_ret_from_sys_call
/* Do syscall tracing */
tracesys:
@@ -626,19 +519,6 @@ END(\label)
FORK_LIKE vfork
FIXED_FRAME stub_iopl, sys_iopl
-ENTRY(ptregscall_common)
- DEFAULT_FRAME 1 8 /* offset 8: return address */
- RESTORE_TOP_OF_STACK %r11, 8
- movq_cfi_restore R15+8, r15
- movq_cfi_restore R14+8, r14
- movq_cfi_restore R13+8, r13
- movq_cfi_restore R12+8, r12
- movq_cfi_restore RBP+8, rbp
- movq_cfi_restore RBX+8, rbx
- ret $REST_SKIP /* pop extended registers */
- CFI_ENDPROC
-END(ptregscall_common)
-
ENTRY(stub_execve)
CFI_STARTPROC
addq $8, %rsp
@@ -779,7 +659,48 @@ END(interrupt)
/* reserve pt_regs for scratch regs and rbp */
subq $ORIG_RAX-RBP, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
- SAVE_ARGS_IRQ
+ cld
+ /* start from rbp in pt_regs and jump over */
+ movq_cfi rdi, (RDI-RBP)
+ movq_cfi rsi, (RSI-RBP)
+ movq_cfi rdx, (RDX-RBP)
+ movq_cfi rcx, (RCX-RBP)
+ movq_cfi rax, (RAX-RBP)
+ movq_cfi r8, (R8-RBP)
+ movq_cfi r9, (R9-RBP)
+ movq_cfi r10, (R10-RBP)
+ movq_cfi r11, (R11-RBP)
+
+ /* Save rbp so that we can unwind from get_irq_regs() */
+ movq_cfi rbp, 0
+
+ /* Save previous stack value */
+ movq %rsp, %rsi
+
+ leaq -RBP(%rsp),%rdi /* arg1 for handler */
+ testl $3, CS-RBP(%rsi)
+ je 1f
+ SWAPGS
+ /*
+ * irq_count is used to check if a CPU is already on an interrupt stack
+ * or not. While this is essentially redundant with preempt_count it is
+ * a little cheaper to use a separate counter in the PDA (short of
+ * moving irq_enter into assembly, which would be too much work)
+ */
+1: incl PER_CPU_VAR(irq_count)
+ cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
+ CFI_DEF_CFA_REGISTER rsi
+
+ /* Store previous stack value */
+ pushq %rsi
+ CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
+ 0x77 /* DW_OP_breg7 */, 0, \
+ 0x06 /* DW_OP_deref */, \
+ 0x08 /* DW_OP_const1u */, SS+8-RBP, \
+ 0x22 /* DW_OP_plus */
+ /* We entered an interrupt context - irqs are off: */
+ TRACE_IRQS_OFF
+
call \func
.endm
@@ -831,6 +752,60 @@ retint_swapgs: /* return to user-space */
*/
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_IRETQ
+
+ /*
+ * Try to use SYSRET instead of IRET if we're returning to
+ * a completely clean 64-bit userspace context.
+ */
+ movq (RCX-R11)(%rsp), %rcx
+ cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
+ jne opportunistic_sysret_failed
+
+ /*
+ * On Intel CPUs, sysret with non-canonical RCX/RIP will #GP
+ * in kernel space. This essentially lets the user take over
+ * the kernel, since userspace controls RSP. It's not worth
+ * testing for canonicalness exactly -- this check detects any
+ * of the 17 high bits set, which is true for non-canonical
+ * or kernel addresses. (This will pessimize vsyscall=native.
+ * Big deal.)
+ *
+ * If virtual addresses ever become wider, this will need
+ * to be updated to remain correct on both old and new CPUs.
+ */
+ .ifne __VIRTUAL_MASK_SHIFT - 47
+ .error "virtual address width changed -- sysret checks need update"
+ .endif
+ shr $__VIRTUAL_MASK_SHIFT, %rcx
+ jnz opportunistic_sysret_failed
+
+ cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
+ jne opportunistic_sysret_failed
+
+ movq (R11-ARGOFFSET)(%rsp), %r11
+ cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */
+ jne opportunistic_sysret_failed
+
+ testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
+ jnz opportunistic_sysret_failed
+
+ /* nothing to check for RSP */
+
+ cmpq $__USER_DS,(SS-ARGOFFSET)(%rsp) /* SS must match SYSRET */
+ jne opportunistic_sysret_failed
+
+ /*
+ * We win! This label is here just for ease of understanding
+ * perf profiles. Nothing jumps here.
+ */
+irq_return_via_sysret:
+ CFI_REMEMBER_STATE
+ RESTORE_ARGS 1,8,1
+ movq (RSP-RIP)(%rsp),%rsp
+ USERGS_SYSRET64
+ CFI_RESTORE_STATE
+
+opportunistic_sysret_failed:
SWAPGS
jmp restore_args
@@ -1048,6 +1023,11 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
.if \paranoid
+ .if \paranoid == 1
+ CFI_REMEMBER_STATE
+ testl $3, CS(%rsp) /* If coming from userspace, switch */
+ jnz 1f /* stacks. */
+ .endif
call save_paranoid
.else
call error_entry
@@ -1088,6 +1068,36 @@ ENTRY(\sym)
jmp error_exit /* %ebx: no swapgs flag */
.endif
+ .if \paranoid == 1
+ CFI_RESTORE_STATE
+ /*
+ * Paranoid entry from userspace. Switch stacks and treat it
+ * as a normal entry. This means that paranoid handlers
+ * run in real process context if user_mode(regs).
+ */
+1:
+ call error_entry
+
+ DEFAULT_FRAME 0
+
+ movq %rsp,%rdi /* pt_regs pointer */
+ call sync_regs
+ movq %rax,%rsp /* switch stack */
+
+ movq %rsp,%rdi /* pt_regs pointer */
+
+ .if \has_error_code
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ .else
+ xorl %esi,%esi /* no error code */
+ .endif
+
+ call \do_sym
+
+ jmp error_exit /* %ebx: no swapgs flag */
+ .endif
+
CFI_ENDPROC
END(\sym)
.endm
@@ -1108,7 +1118,7 @@ idtentry overflow do_overflow has_error_code=0
idtentry bounds do_bounds has_error_code=0
idtentry invalid_op do_invalid_op has_error_code=0
idtentry device_not_available do_device_not_available has_error_code=0
-idtentry double_fault do_double_fault has_error_code=1 paranoid=1
+idtentry double_fault do_double_fault has_error_code=1 paranoid=2
idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
idtentry invalid_TSS do_invalid_TSS has_error_code=1
idtentry segment_not_present do_segment_not_present has_error_code=1
@@ -1289,16 +1299,14 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
#endif
/*
- * "Paranoid" exit path from exception stack.
- * Paranoid because this is used by NMIs and cannot take
- * any kernel state for granted.
- * We don't do kernel preemption checks here, because only
- * NMI should be common and it does not enable IRQs and
- * cannot get reschedule ticks.
+ * "Paranoid" exit path from exception stack. This is invoked
+ * only on return from non-NMI IST interrupts that came
+ * from kernel space.
*
- * "trace" is 0 for the NMI handler only, because irq-tracing
- * is fundamentally NMI-unsafe. (we cannot change the soft and
- * hard flags at once, atomically)
+ * We may be returning to very strange contexts (e.g. very early
+ * in syscall entry), so checking for preemption here would
+ * be complicated. Fortunately, we there's no good reason
+ * to try to handle preemption here.
*/
/* ebx: no swapgs flag */
@@ -1308,43 +1316,14 @@ ENTRY(paranoid_exit)
TRACE_IRQS_OFF_DEBUG
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
- testl $3,CS(%rsp)
- jnz paranoid_userspace
-paranoid_swapgs:
TRACE_IRQS_IRETQ 0
SWAPGS_UNSAFE_STACK
RESTORE_ALL 8
- jmp irq_return
+ INTERRUPT_RETURN
paranoid_restore:
TRACE_IRQS_IRETQ_DEBUG 0
RESTORE_ALL 8
- jmp irq_return
-paranoid_userspace:
- GET_THREAD_INFO(%rcx)
- movl TI_flags(%rcx),%ebx
- andl $_TIF_WORK_MASK,%ebx
- jz paranoid_swapgs
- movq %rsp,%rdi /* &pt_regs */
- call sync_regs
- movq %rax,%rsp /* switch stack for scheduling */
- testl $_TIF_NEED_RESCHED,%ebx
- jnz paranoid_schedule
- movl %ebx,%edx /* arg3: thread flags */
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
- xorl %esi,%esi /* arg2: oldset */
- movq %rsp,%rdi /* arg1: &pt_regs */
- call do_notify_resume
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
- jmp paranoid_userspace
-paranoid_schedule:
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_ANY)
- SCHEDULE_USER
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF
- jmp paranoid_userspace
+ INTERRUPT_RETURN
CFI_ENDPROC
END(paranoid_exit)
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 2142376dc8c6..8b7b0a51e742 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -674,7 +674,7 @@ static inline void *alloc_tramp(unsigned long size)
}
static inline void tramp_free(void *tramp)
{
- module_free(NULL, tramp);
+ module_memfree(tramp);
}
#else
/* Trampolines can only be created if modules are supported */
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index d6c1b9836995..2911ef3a9f1c 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -31,6 +31,7 @@ static void __init i386_default_early_setup(void)
asmlinkage __visible void __init i386_start_kernel(void)
{
+ cr4_init_shadow();
sanitize_boot_params(&boot_params);
/* Call the subarch specific early setup function */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index eda1a865641e..c4f8d4659070 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -27,6 +27,7 @@
#include <asm/bios_ebda.h>
#include <asm/bootparam_utils.h>
#include <asm/microcode.h>
+#include <asm/kasan.h>
/*
* Manage page tables very early on.
@@ -46,7 +47,7 @@ static void __init reset_early_page_tables(void)
next_early_pgt = 0;
- write_cr3(__pa(early_level4_pgt));
+ write_cr3(__pa_nodebug(early_level4_pgt));
}
/* Create a new PMD entry */
@@ -59,7 +60,7 @@ int __init early_make_pgtable(unsigned long address)
pmdval_t pmd, *pmd_p;
/* Invalid address or early pgt is done ? */
- if (physaddr >= MAXMEM || read_cr3() != __pa(early_level4_pgt))
+ if (physaddr >= MAXMEM || read_cr3() != __pa_nodebug(early_level4_pgt))
return -1;
again:
@@ -155,9 +156,13 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
(__START_KERNEL & PGDIR_MASK)));
BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
+ cr4_init_shadow();
+
/* Kill off the identity-map trampoline */
reset_early_page_tables();
+ kasan_map_early_shadow(early_level4_pgt);
+
/* clear bss before set_intr_gate with early_idt_handler */
clear_bss();
@@ -179,6 +184,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
/* set init_level4_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511];
+ kasan_map_early_shadow(init_level4_pgt);
+
x86_64_start_reservations(real_mode_data);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index a468c0a65c42..6fd514d9f69a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -514,8 +514,38 @@ ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
+#ifdef CONFIG_KASAN
+#define FILL(VAL, COUNT) \
+ .rept (COUNT) ; \
+ .quad (VAL) ; \
+ .endr
+
+NEXT_PAGE(kasan_zero_pte)
+ FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
+NEXT_PAGE(kasan_zero_pmd)
+ FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
+NEXT_PAGE(kasan_zero_pud)
+ FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
+
+#undef FILL
+#endif
+
+
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
+
+#ifdef CONFIG_KASAN
+/*
+ * This page used as early shadow. We don't use empty_zero_page
+ * at early stages, stack instrumentation could write some garbage
+ * to this page.
+ * Latter we reuse it as zero shadow for large ranges of memory
+ * that allowed to access, but not instrumented by kasan
+ * (vmalloc/vmemmap ...).
+ */
+NEXT_PAGE(kasan_zero_page)
+ .skip PAGE_SIZE
+#endif
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 319bcb9372fe..3acbff4716b0 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -168,7 +168,7 @@ static void _hpet_print_config(const char *function, int line)
#define hpet_print_config() \
do { \
if (hpet_verbose) \
- _hpet_print_config(__FUNCTION__, __LINE__); \
+ _hpet_print_config(__func__, __LINE__); \
} while (0)
/*
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 3d5fb509bdeb..7114ba220fd4 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -126,6 +126,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
*dr7 |= encode_dr7(i, info->len, info->type);
set_debugreg(*dr7, 7);
+ if (info->mask)
+ set_dr_addr_mask(info->mask, i);
return 0;
}
@@ -161,29 +163,8 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
*dr7 &= ~__encode_dr7(i, info->len, info->type);
set_debugreg(*dr7, 7);
-}
-
-static int get_hbp_len(u8 hbp_len)
-{
- unsigned int len_in_bytes = 0;
-
- switch (hbp_len) {
- case X86_BREAKPOINT_LEN_1:
- len_in_bytes = 1;
- break;
- case X86_BREAKPOINT_LEN_2:
- len_in_bytes = 2;
- break;
- case X86_BREAKPOINT_LEN_4:
- len_in_bytes = 4;
- break;
-#ifdef CONFIG_X86_64
- case X86_BREAKPOINT_LEN_8:
- len_in_bytes = 8;
- break;
-#endif
- }
- return len_in_bytes;
+ if (info->mask)
+ set_dr_addr_mask(0, i);
}
/*
@@ -196,7 +177,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
va = info->address;
- len = get_hbp_len(info->len);
+ len = bp->attr.bp_len;
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -277,6 +258,8 @@ static int arch_build_bp_info(struct perf_event *bp)
}
/* Len */
+ info->mask = 0;
+
switch (bp->attr.bp_len) {
case HW_BREAKPOINT_LEN_1:
info->len = X86_BREAKPOINT_LEN_1;
@@ -293,11 +276,17 @@ static int arch_build_bp_info(struct perf_event *bp)
break;
#endif
default:
- return -EINVAL;
+ if (!is_power_of_2(bp->attr.bp_len))
+ return -EINVAL;
+ if (!cpu_has_bpext)
+ return -EOPNOTSUPP;
+ info->mask = bp->attr.bp_len - 1;
+ info->len = X86_BREAKPOINT_LEN_1;
}
return 0;
}
+
/*
* Validate the arch-specific HW Breakpoint register settings
*/
@@ -312,11 +301,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
if (ret)
return ret;
- ret = -EINVAL;
-
switch (info->len) {
case X86_BREAKPOINT_LEN_1:
align = 0;
+ if (info->mask)
+ align = info->mask;
break;
case X86_BREAKPOINT_LEN_2:
align = 1;
@@ -330,7 +319,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
break;
#endif
default:
- return ret;
+ WARN_ON_ONCE(1);
}
/*
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index a9a4229f6161..d5651fce0b71 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -13,12 +13,26 @@
#include <asm/sigcontext.h>
#include <asm/processor.h>
#include <asm/math_emu.h>
+#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/user.h>
+static DEFINE_PER_CPU(bool, in_kernel_fpu);
+
+void kernel_fpu_disable(void)
+{
+ WARN_ON(this_cpu_read(in_kernel_fpu));
+ this_cpu_write(in_kernel_fpu, true);
+}
+
+void kernel_fpu_enable(void)
+{
+ this_cpu_write(in_kernel_fpu, false);
+}
+
/*
* Were we in an interrupt that interrupted kernel mode?
*
@@ -33,6 +47,9 @@
*/
static inline bool interrupted_kernel_fpu_idle(void)
{
+ if (this_cpu_read(in_kernel_fpu))
+ return false;
+
if (use_eager_fpu())
return __thread_has_fpu(current);
@@ -73,10 +90,10 @@ void __kernel_fpu_begin(void)
{
struct task_struct *me = current;
+ this_cpu_write(in_kernel_fpu, true);
+
if (__thread_has_fpu(me)) {
- __thread_clear_has_fpu(me);
__save_init_fpu(me);
- /* We do 'stts()' in __kernel_fpu_end() */
} else if (!use_eager_fpu()) {
this_cpu_write(fpu_owner_task, NULL);
clts();
@@ -86,19 +103,16 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void)
{
- if (use_eager_fpu()) {
- /*
- * For eager fpu, most the time, tsk_used_math() is true.
- * Restore the user math as we are done with the kernel usage.
- * At few instances during thread exit, signal handling etc,
- * tsk_used_math() is false. Those few places will take proper
- * actions, so we don't need to restore the math here.
- */
- if (likely(tsk_used_math(current)))
- math_state_restore();
- } else {
+ struct task_struct *me = current;
+
+ if (__thread_has_fpu(me)) {
+ if (WARN_ON(restore_fpu_checking(me)))
+ drop_init_fpu(me);
+ } else if (!use_eager_fpu()) {
stts();
}
+
+ this_cpu_write(in_kernel_fpu, false);
}
EXPORT_SYMBOL(__kernel_fpu_end);
@@ -180,7 +194,7 @@ void fpu_init(void)
if (cpu_has_xmm)
cr4_mask |= X86_CR4_OSXMMEXCPT;
if (cr4_mask)
- set_in_cr4(cr4_mask);
+ cr4_set_bits(cr4_mask);
cr0 = read_cr0();
cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 6307a0f0cf17..705ef8d48e2d 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -127,7 +127,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_puts(p, " Machine check polls\n");
#endif
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
- seq_printf(p, "%*s: ", prec, "THR");
+ seq_printf(p, "%*s: ", prec, "HYP");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
seq_puts(p, " Hypervisor callback interrupts\n");
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 63ce838e5a54..28d28f5eb8f4 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -69,16 +69,9 @@ static void call_on_stack(void *func, void *stack)
: "memory", "cc", "edx", "ecx", "eax");
}
-/* how to get the current stack pointer from C */
-#define current_stack_pointer ({ \
- unsigned long sp; \
- asm("mov %%esp,%0" : "=g" (sp)); \
- sp; \
-})
-
static inline void *current_stack(void)
{
- return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
+ return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
}
static inline int
@@ -103,7 +96,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
/* Save the next esp at the bottom of the stack */
prev_esp = (u32 *)irqstk;
- *prev_esp = current_stack_pointer;
+ *prev_esp = current_stack_pointer();
if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp);
@@ -156,7 +149,7 @@ void do_softirq_own_stack(void)
/* Push the previous esp onto the stack */
prev_esp = (u32 *)irqstk;
- *prev_esp = current_stack_pointer;
+ *prev_esp = current_stack_pointer();
call_on_stack(__do_softirq, isp);
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f7e3cd50ece0..98f654d466e5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
+
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer to get messed up.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
u8 *addr = (u8 *) (regs->ip - 1);
struct jprobe *jp = container_of(p, struct jprobe, kp);
+ void *saved_sp = kcb->jprobe_saved_sp;
if ((addr > (u8 *) jprobe_return) &&
(addr < (u8 *) jprobe_return_end)) {
- if (stack_addr(regs) != kcb->jprobe_saved_sp) {
+ if (stack_addr(regs) != saved_sp) {
struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
printk(KERN_ERR
"current sp %p does not match saved sp %p\n",
- stack_addr(regs), kcb->jprobe_saved_sp);
+ stack_addr(regs), saved_sp);
printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
show_regs(saved_regs);
printk(KERN_ERR "Current registers\n");
show_regs(regs);
BUG();
}
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
- memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
- kcb->jprobes_stack,
- MIN_STACK_SIZE(kcb->jprobe_saved_sp));
+ memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 7c523bbf3dc8..0dd8d089c315 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -322,7 +322,8 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
* Target instructions MUST be relocatable (checked inside)
* This is called when new aggr(opt)probe is allocated or reused.
*/
-int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
+ struct kprobe *__unused)
{
u8 *buf;
int ret;
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
new file mode 100644
index 000000000000..ff3c3101d003
--- /dev/null
+++ b/arch/x86/kernel/livepatch.c
@@ -0,0 +1,90 @@
+/*
+ * livepatch.c - x86-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2014 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/page_types.h>
+#include <asm/elf.h>
+#include <asm/livepatch.h>
+
+/**
+ * klp_write_module_reloc() - write a relocation in a module
+ * @mod: module in which the section to be modified is found
+ * @type: ELF relocation type (see asm/elf.h)
+ * @loc: address that the relocation should be written to
+ * @value: relocation value (sym address + addend)
+ *
+ * This function writes a relocation to the specified location for
+ * a particular module.
+ */
+int klp_write_module_reloc(struct module *mod, unsigned long type,
+ unsigned long loc, unsigned long value)
+{
+ int ret, numpages, size = 4;
+ bool readonly;
+ unsigned long val;
+ unsigned long core = (unsigned long)mod->module_core;
+ unsigned long core_ro_size = mod->core_ro_size;
+ unsigned long core_size = mod->core_size;
+
+ switch (type) {
+ case R_X86_64_NONE:
+ return 0;
+ case R_X86_64_64:
+ val = value;
+ size = 8;
+ break;
+ case R_X86_64_32:
+ val = (u32)value;
+ break;
+ case R_X86_64_32S:
+ val = (s32)value;
+ break;
+ case R_X86_64_PC32:
+ val = (u32)(value - loc);
+ break;
+ default:
+ /* unsupported relocation type */
+ return -EINVAL;
+ }
+
+ if (loc < core || loc >= core + core_size)
+ /* loc does not point to any symbol inside the module */
+ return -EINVAL;
+
+ if (loc < core + core_ro_size)
+ readonly = true;
+ else
+ readonly = false;
+
+ /* determine if the relocation spans a page boundary */
+ numpages = ((loc & PAGE_MASK) == ((loc + size) & PAGE_MASK)) ? 1 : 2;
+
+ if (readonly)
+ set_memory_rw(loc & PAGE_MASK, numpages);
+
+ ret = probe_kernel_write((void *)loc, &val, size);
+
+ if (readonly)
+ set_memory_ro(loc & PAGE_MASK, numpages);
+
+ return ret;
+}
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index e69f9882bf95..d1ac80b72c72 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -24,6 +24,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/kasan.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/gfp.h>
@@ -83,13 +84,22 @@ static unsigned long int get_module_load_offset(void)
void *module_alloc(unsigned long size)
{
+ void *p;
+
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
- return __vmalloc_node_range(size, 1,
+
+ p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+ PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
+ if (p && (kasan_module_alloc(p, size) < 0)) {
+ vfree(p);
+ return NULL;
+ }
+
+ return p;
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/pmc_atom.c b/arch/x86/kernel/pmc_atom.c
index 0ee5025e0fa4..d66a4fe6caee 100644
--- a/arch/x86/kernel/pmc_atom.c
+++ b/arch/x86/kernel/pmc_atom.c
@@ -25,8 +25,6 @@
#include <asm/pmc_atom.h>
-#define DRIVER_NAME KBUILD_MODNAME
-
struct pmc_dev {
u32 base_addr;
void __iomem *regmap;
@@ -38,12 +36,12 @@ struct pmc_dev {
static struct pmc_dev pmc_device;
static u32 acpi_base_addr;
-struct pmc_dev_map {
+struct pmc_bit_map {
const char *name;
u32 bit_mask;
};
-static const struct pmc_dev_map dev_map[] = {
+static const struct pmc_bit_map dev_map[] = {
{"0 - LPSS1_F0_DMA", BIT_LPSS1_F0_DMA},
{"1 - LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1},
{"2 - LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2},
@@ -82,6 +80,27 @@ static const struct pmc_dev_map dev_map[] = {
{"35 - DFX", BIT_DFX},
};
+static const struct pmc_bit_map pss_map[] = {
+ {"0 - GBE", PMC_PSS_BIT_GBE},
+ {"1 - SATA", PMC_PSS_BIT_SATA},
+ {"2 - HDA", PMC_PSS_BIT_HDA},
+ {"3 - SEC", PMC_PSS_BIT_SEC},
+ {"4 - PCIE", PMC_PSS_BIT_PCIE},
+ {"5 - LPSS", PMC_PSS_BIT_LPSS},
+ {"6 - LPE", PMC_PSS_BIT_LPE},
+ {"7 - DFX", PMC_PSS_BIT_DFX},
+ {"8 - USH_CTRL", PMC_PSS_BIT_USH_CTRL},
+ {"9 - USH_SUS", PMC_PSS_BIT_USH_SUS},
+ {"10 - USH_VCCS", PMC_PSS_BIT_USH_VCCS},
+ {"11 - USH_VCCA", PMC_PSS_BIT_USH_VCCA},
+ {"12 - OTG_CTRL", PMC_PSS_BIT_OTG_CTRL},
+ {"13 - OTG_VCCS", PMC_PSS_BIT_OTG_VCCS},
+ {"14 - OTG_VCCA_CLK", PMC_PSS_BIT_OTG_VCCA_CLK},
+ {"15 - OTG_VCCA", PMC_PSS_BIT_OTG_VCCA},
+ {"16 - USB", PMC_PSS_BIT_USB},
+ {"17 - USB_SUS", PMC_PSS_BIT_USB_SUS},
+};
+
static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
{
return readl(pmc->regmap + reg_offset);
@@ -169,6 +188,32 @@ static const struct file_operations pmc_dev_state_ops = {
.release = single_release,
};
+static int pmc_pss_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ u32 pss = pmc_reg_read(pmc, PMC_PSS);
+ int pss_index;
+
+ for (pss_index = 0; pss_index < ARRAY_SIZE(pss_map); pss_index++) {
+ seq_printf(s, "Island: %-32s\tState: %s\n",
+ pss_map[pss_index].name,
+ pss_map[pss_index].bit_mask & pss ? "Off" : "On");
+ }
+ return 0;
+}
+
+static int pmc_pss_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_pss_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_pss_state_ops = {
+ .open = pmc_pss_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int pmc_sleep_tmr_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmc = s->private;
@@ -202,11 +247,7 @@ static const struct file_operations pmc_sleep_tmr_ops = {
static void pmc_dbgfs_unregister(struct pmc_dev *pmc)
{
- if (!pmc->dbgfs_dir)
- return;
-
debugfs_remove_recursive(pmc->dbgfs_dir);
- pmc->dbgfs_dir = NULL;
}
static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
@@ -217,19 +258,29 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
if (!dir)
return -ENOMEM;
+ pmc->dbgfs_dir = dir;
+
f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO,
dir, pmc, &pmc_dev_state_ops);
if (!f) {
- dev_err(&pdev->dev, "dev_states register failed\n");
+ dev_err(&pdev->dev, "dev_state register failed\n");
goto err;
}
+
+ f = debugfs_create_file("pss_state", S_IFREG | S_IRUGO,
+ dir, pmc, &pmc_pss_state_ops);
+ if (!f) {
+ dev_err(&pdev->dev, "pss_state register failed\n");
+ goto err;
+ }
+
f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO,
dir, pmc, &pmc_sleep_tmr_ops);
if (!f) {
dev_err(&pdev->dev, "sleep_state register failed\n");
goto err;
}
- pmc->dbgfs_dir = dir;
+
return 0;
err:
pmc_dbgfs_unregister(pmc);
@@ -292,7 +343,6 @@ MODULE_DEVICE_TABLE(pci, pmc_pci_ids);
static int __init pmc_atom_init(void)
{
- int err = -ENODEV;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
@@ -306,14 +356,11 @@ static int __init pmc_atom_init(void)
*/
for_each_pci_dev(pdev) {
ent = pci_match_id(pmc_pci_ids, pdev);
- if (ent) {
- err = pmc_setup_dev(pdev);
- goto out;
- }
+ if (ent)
+ return pmc_setup_dev(pdev);
}
/* Device not found. */
-out:
- return err;
+ return -ENODEV;
}
module_init(pmc_atom_init);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e127ddaa2d5a..046e2d620bbe 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -28,6 +28,7 @@
#include <asm/fpu-internal.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>
+#include <asm/tlbflush.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -141,7 +142,7 @@ void flush_thread(void)
static void hard_disable_TSC(void)
{
- write_cr4(read_cr4() | X86_CR4_TSD);
+ cr4_set_bits(X86_CR4_TSD);
}
void disable_TSC(void)
@@ -158,7 +159,7 @@ void disable_TSC(void)
static void hard_enable_TSC(void)
{
- write_cr4(read_cr4() & ~X86_CR4_TSD);
+ cr4_clear_bits(X86_CR4_TSD);
}
static void enable_TSC(void)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8f3ebfe710d0..603c4f99cb5a 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -101,7 +101,7 @@ void __show_regs(struct pt_regs *regs, int all)
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
- cr4 = read_cr4_safe();
+ cr4 = __read_cr4_safe();
printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
cr0, cr2, cr3, cr4);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 5a2c02913af3..67fcc43577d2 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -93,7 +93,7 @@ void __show_regs(struct pt_regs *regs, int all)
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
- cr4 = read_cr4();
+ cr4 = __read_cr4();
printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
fs, fsindex, gs, gsindex, shadowgs);
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index ca9622a25e95..cd9685235df9 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -49,11 +49,11 @@ int mach_set_rtc_mmss(const struct timespec *now)
retval = set_rtc_time(&tm);
if (retval)
printk(KERN_ERR "%s: RTC write failed with error %d\n",
- __FUNCTION__, retval);
+ __func__, retval);
} else {
printk(KERN_ERR
"%s: Invalid RTC value: write of %lx to RTC failed\n",
- __FUNCTION__, nowtime);
+ __func__, nowtime);
retval = -EINVAL;
}
return retval;
@@ -170,7 +170,7 @@ static struct platform_device rtc_device = {
static __init int add_rtc_cmos(void)
{
#ifdef CONFIG_PNP
- static const char * const const ids[] __initconst =
+ static const char * const ids[] __initconst =
{ "PNP0b00", "PNP0b01", "PNP0b02", };
struct pnp_dev *dev;
struct pnp_id *id;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ab4734e5411d..0a2421cca01f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -89,6 +89,7 @@
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/bugs.h>
+#include <asm/kasan.h>
#include <asm/vsyscall.h>
#include <asm/cpu.h>
@@ -431,15 +432,13 @@ static void __init parse_setup_data(void)
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
- u32 data_len, map_len, data_type;
+ u32 data_len, data_type;
- map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
- (u64)sizeof(struct setup_data));
- data = early_memremap(pa_data, map_len);
+ data = early_memremap(pa_data, sizeof(*data));
data_len = data->len + sizeof(struct setup_data);
data_type = data->type;
pa_next = data->next;
- early_iounmap(data, map_len);
+ early_iounmap(data, sizeof(*data));
switch (data_type) {
case SETUP_E820_EXT:
@@ -1176,9 +1175,11 @@ void __init setup_arch(char **cmdline_p)
x86_init.paging.pagetable_init();
+ kasan_init();
+
if (boot_cpu_data.cpuid_level >= 0) {
/* A CPU has %cr4 if and only if it has CPUID */
- mmu_cr4_features = read_cr4();
+ mmu_cr4_features = __read_cr4();
if (trampoline_cr4_features)
*trampoline_cr4_features = mmu_cr4_features;
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index ed37a768d0fc..e5042463c1bc 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -69,7 +69,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
unsigned int err = 0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
get_user_try {
@@ -740,12 +740,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
{
user_exit();
-#ifdef CONFIG_X86_MCE
- /* notify userspace of pending MCEs */
- if (thread_info_flags & _TIF_MCE_NOTIFY)
- mce_notify_process();
-#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
-
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6d7022c683e3..febc6aabc72e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -73,7 +73,6 @@
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
-#include <asm/smpboot_hooks.h>
#include <asm/i8259.h>
#include <asm/realmode.h>
#include <asm/misc.h>
@@ -104,6 +103,43 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
atomic_t init_deasserted;
+static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ CMOS_WRITE(0xa, 0xf);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ local_flush_tlb();
+ pr_debug("1.\n");
+ *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+ start_eip >> 4;
+ pr_debug("2.\n");
+ *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+ start_eip & 0xf;
+ pr_debug("3.\n");
+}
+
+static inline void smpboot_restore_warm_reset_vector(void)
+{
+ unsigned long flags;
+
+ /*
+ * Install writable page 0 entry to set BIOS data area.
+ */
+ local_flush_tlb();
+
+ /*
+ * Paranoid: Set warm reset code and vector here back
+ * to default values.
+ */
+ spin_lock_irqsave(&rtc_lock, flags);
+ CMOS_WRITE(0, 0xf);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
+}
+
/*
* Report back to the Boot Processor during boot time or to the caller processor
* during CPU online.
@@ -136,8 +172,7 @@ static void smp_callin(void)
* CPU, first the APIC. (this is probably redundant on most
* boards)
*/
- setup_local_APIC();
- end_local_APIC_setup();
+ apic_ap_setup();
/*
* Need to setup vector mappings before we enable interrupts.
@@ -955,9 +990,12 @@ void arch_disable_smp_support(void)
*/
static __init void disable_smp(void)
{
+ pr_info("SMP disabled\n");
+
+ disable_ioapic_support();
+
init_cpu_present(cpumask_of(0));
init_cpu_possible(cpumask_of(0));
- smpboot_clear_io_apic_irqs();
if (smp_found_config)
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
@@ -967,6 +1005,13 @@ static __init void disable_smp(void)
cpumask_set_cpu(0, cpu_core_mask(0));
}
+enum {
+ SMP_OK,
+ SMP_NO_CONFIG,
+ SMP_NO_APIC,
+ SMP_FORCE_UP,
+};
+
/*
* Various sanity checks.
*/
@@ -1014,10 +1059,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
if (!smp_found_config && !acpi_lapic) {
preempt_enable();
pr_notice("SMP motherboard not detected\n");
- disable_smp();
- if (APIC_init_uniprocessor())
- pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
- return -1;
+ return SMP_NO_CONFIG;
}
/*
@@ -1041,9 +1083,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
boot_cpu_physical_apicid);
pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
}
- smpboot_clear_io_apic();
- disable_ioapic_support();
- return -1;
+ return SMP_NO_APIC;
}
verify_local_APIC();
@@ -1053,15 +1093,10 @@ static int __init smp_sanity_check(unsigned max_cpus)
*/
if (!max_cpus) {
pr_info("SMP mode deactivated\n");
- smpboot_clear_io_apic();
-
- connect_bsp_APIC();
- setup_local_APIC();
- bsp_end_local_APIC_setup();
- return -1;
+ return SMP_FORCE_UP;
}
- return 0;
+ return SMP_OK;
}
static void __init smp_cpu_index_default(void)
@@ -1101,10 +1136,21 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
}
set_cpu_sibling_map(0);
- if (smp_sanity_check(max_cpus) < 0) {
- pr_info("SMP disabled\n");
+ switch (smp_sanity_check(max_cpus)) {
+ case SMP_NO_CONFIG:
disable_smp();
+ if (APIC_init_uniprocessor())
+ pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
return;
+ case SMP_NO_APIC:
+ disable_smp();
+ return;
+ case SMP_FORCE_UP:
+ disable_smp();
+ apic_bsp_setup(false);
+ return;
+ case SMP_OK:
+ break;
}
default_setup_apic_routing();
@@ -1115,33 +1161,10 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
/* Or can we switch back to PIC here? */
}
- connect_bsp_APIC();
-
- /*
- * Switch from PIC to APIC mode.
- */
- setup_local_APIC();
-
- if (x2apic_mode)
- cpu0_logical_apicid = apic_read(APIC_LDR);
- else
- cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
-
- /*
- * Enable IO APIC before setting up error vector
- */
- if (!skip_ioapic_setup && nr_ioapics)
- enable_IO_APIC();
-
- bsp_end_local_APIC_setup();
- smpboot_setup_io_apic();
- /*
- * Set up local APIC timer on boot CPU.
- */
+ cpu0_logical_apicid = apic_bsp_setup(false);
pr_info("CPU%d: ", 0);
print_cpu_info(&cpu_data(0));
- x86_init.timers.setup_percpu_clockev();
if (is_uv_system())
uv_system_init();
@@ -1177,9 +1200,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
nmi_selftest();
impress_friends();
-#ifdef CONFIG_X86_IO_APIC
setup_ioapic_dest();
-#endif
mtrr_aps_init();
}
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 4e942f31b1a7..7fc5e843f247 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -29,7 +29,28 @@ static int get_free_idx(void)
static bool tls_desc_okay(const struct user_desc *info)
{
- if (LDT_empty(info))
+ /*
+ * For historical reasons (i.e. no one ever documented how any
+ * of the segmentation APIs work), user programs can and do
+ * assume that a struct user_desc that's all zeros except for
+ * entry_number means "no segment at all". This never actually
+ * worked. In fact, up to Linux 3.19, a struct user_desc like
+ * this would create a 16-bit read-write segment with base and
+ * limit both equal to zero.
+ *
+ * That was close enough to "no segment at all" until we
+ * hardened this function to disallow 16-bit TLS segments. Fix
+ * it up by interpreting these zeroed segments the way that they
+ * were almost certainly intended to be interpreted.
+ *
+ * The correct way to ask for "no segment at all" is to specify
+ * a user_desc that satisfies LDT_empty. To keep everything
+ * working, we accept both.
+ *
+ * Note that there's a similar kludge in modify_ldt -- look at
+ * the distinction between modes 1 and 0x11.
+ */
+ if (LDT_empty(info) || LDT_zero(info))
return true;
/*
@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
cpu = get_cpu();
while (n-- > 0) {
- if (LDT_empty(info))
+ if (LDT_empty(info) || LDT_zero(info))
desc->a = desc->b = 0;
else
fill_ldt(desc, info);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 88900e288021..9d2073e2ecc9 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -108,6 +108,88 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
preempt_count_dec();
}
+enum ctx_state ist_enter(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ if (user_mode_vm(regs)) {
+ /* Other than that, we're just an exception. */
+ prev_state = exception_enter();
+ } else {
+ /*
+ * We might have interrupted pretty much anything. In
+ * fact, if we're a machine check, we can even interrupt
+ * NMI processing. We don't want in_nmi() to return true,
+ * but we need to notify RCU.
+ */
+ rcu_nmi_enter();
+ prev_state = IN_KERNEL; /* the value is irrelevant. */
+ }
+
+ /*
+ * We are atomic because we're on the IST stack (or we're on x86_32,
+ * in which case we still shouldn't schedule).
+ *
+ * This must be after exception_enter(), because exception_enter()
+ * won't do anything if in_interrupt() returns true.
+ */
+ preempt_count_add(HARDIRQ_OFFSET);
+
+ /* This code is a bit fragile. Test it. */
+ rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
+
+ return prev_state;
+}
+
+void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
+{
+ /* Must be before exception_exit. */
+ preempt_count_sub(HARDIRQ_OFFSET);
+
+ if (user_mode_vm(regs))
+ return exception_exit(prev_state);
+ else
+ rcu_nmi_exit();
+}
+
+/**
+ * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
+ * @regs: regs passed to the IST exception handler
+ *
+ * IST exception handlers normally cannot schedule. As a special
+ * exception, if the exception interrupted userspace code (i.e.
+ * user_mode_vm(regs) would return true) and the exception was not
+ * a double fault, it can be safe to schedule. ist_begin_non_atomic()
+ * begins a non-atomic section within an ist_enter()/ist_exit() region.
+ * Callers are responsible for enabling interrupts themselves inside
+ * the non-atomic section, and callers must call is_end_non_atomic()
+ * before ist_exit().
+ */
+void ist_begin_non_atomic(struct pt_regs *regs)
+{
+ BUG_ON(!user_mode_vm(regs));
+
+ /*
+ * Sanity check: we need to be on the normal thread stack. This
+ * will catch asm bugs and any attempt to use ist_preempt_enable
+ * from double_fault.
+ */
+ BUG_ON(((current_stack_pointer() ^ this_cpu_read_stable(kernel_stack))
+ & ~(THREAD_SIZE - 1)) != 0);
+
+ preempt_count_sub(HARDIRQ_OFFSET);
+}
+
+/**
+ * ist_end_non_atomic() - begin a non-atomic section in an IST exception
+ *
+ * Ends a non-atomic section started with ist_begin_non_atomic().
+ */
+void ist_end_non_atomic(void)
+{
+ preempt_count_add(HARDIRQ_OFFSET);
+}
+
static nokprobe_inline int
do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
struct pt_regs *regs, long error_code)
@@ -251,6 +333,8 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
* end up promoting it to a doublefault. In that case, modify
* the stack to make it look like we just entered the #GP
* handler from user space, similar to bad_iret.
+ *
+ * No need for ist_enter here because we don't use RCU.
*/
if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
regs->cs == __KERNEL_CS &&
@@ -263,12 +347,12 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
regs->ip = (unsigned long)general_protection;
regs->sp = (unsigned long)&normal_regs->orig_ax;
+
return;
}
#endif
- exception_enter();
- /* Return not checked because double check cannot be ignored */
+ ist_enter(regs); /* Discard prev_state because we won't return. */
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
tsk->thread.error_code = error_code;
@@ -434,7 +518,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
if (poke_int3_handler(regs))
return;
- prev_state = exception_enter();
+ prev_state = ist_enter(regs);
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP)
@@ -460,33 +544,20 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
preempt_conditional_cli(regs);
debug_stack_usage_dec();
exit:
- exception_exit(prev_state);
+ ist_exit(regs, prev_state);
}
NOKPROBE_SYMBOL(do_int3);
#ifdef CONFIG_X86_64
/*
- * Help handler running on IST stack to switch back to user stack
- * for scheduling or signal handling. The actual stack switch is done in
- * entry.S
+ * Help handler running on IST stack to switch off the IST stack if the
+ * interrupted code was in user mode. The actual stack switch is done in
+ * entry_64.S
*/
asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
{
- struct pt_regs *regs = eregs;
- /* Did already sync */
- if (eregs == (struct pt_regs *)eregs->sp)
- ;
- /* Exception from user space */
- else if (user_mode(eregs))
- regs = task_pt_regs(current);
- /*
- * Exception from kernel and interrupts are enabled. Move to
- * kernel process stack.
- */
- else if (eregs->flags & X86_EFLAGS_IF)
- regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
- if (eregs != regs)
- *regs = *eregs;
+ struct pt_regs *regs = task_pt_regs(current);
+ *regs = *eregs;
return regs;
}
NOKPROBE_SYMBOL(sync_regs);
@@ -554,7 +625,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
unsigned long dr6;
int si_code;
- prev_state = exception_enter();
+ prev_state = ist_enter(regs);
get_debugreg(dr6, 6);
@@ -629,7 +700,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
debug_stack_usage_dec();
exit:
- exception_exit(prev_state);
+ ist_exit(regs, prev_state);
}
NOKPROBE_SYMBOL(do_debug);
@@ -788,18 +859,16 @@ void math_state_restore(void)
local_irq_disable();
}
+ /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
+ kernel_fpu_disable();
__thread_fpu_begin(tsk);
-
- /*
- * Paranoid restore. send a SIGSEGV if we fail to restore the state.
- */
if (unlikely(restore_fpu_checking(tsk))) {
drop_init_fpu(tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
- return;
+ } else {
+ tsk->thread.fpu_counter++;
}
-
- tsk->thread.fpu_counter++;
+ kernel_fpu_enable();
}
EXPORT_SYMBOL_GPL(math_state_restore);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b7e50bba3bbb..505449700e0c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -617,7 +617,7 @@ static unsigned long quick_pit_calibrate(void)
goto success;
}
}
- pr_err("Fast TSC calibration failed\n");
+ pr_info("Fast TSC calibration failed\n");
return 0;
success:
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 040681928e9d..37d8fa4438f0 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -50,13 +50,19 @@ EXPORT_SYMBOL(csum_partial);
#undef memset
#undef memmove
+extern void *__memset(void *, int, __kernel_size_t);
+extern void *__memcpy(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *, const void *, __kernel_size_t);
extern void *memset(void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
-extern void *__memcpy(void *, const void *, __kernel_size_t);
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(memmove);
#ifndef CONFIG_DEBUG_VIRTUAL
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 0de1fae2bdf0..34f66e58a896 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -12,6 +12,7 @@
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/sigframe.h>
+#include <asm/tlbflush.h>
#include <asm/xcr.h>
/*
@@ -453,7 +454,7 @@ static void prepare_fx_sw_frame(void)
*/
static inline void xstate_enable(void)
{
- set_in_cr4(X86_CR4_OSXSAVE);
+ cr4_set_bits(X86_CR4_OSXSAVE);
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
}
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index f9d16ff56c6b..413a7bf9efbb 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -39,7 +39,9 @@ config KVM
select PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO
+ select SRCU
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 169b09d76ddd..e0b794a84c35 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -86,6 +86,7 @@
#define DstAcc (OpAcc << DstShift)
#define DstDI (OpDI << DstShift)
#define DstMem64 (OpMem64 << DstShift)
+#define DstMem16 (OpMem16 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX (OpDX << DstShift)
#define DstAccLo (OpAccLo << DstShift)
@@ -124,6 +125,7 @@
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
+#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
@@ -165,10 +167,10 @@
#define NoMod ((u64)1 << 47) /* Mod field is ignored */
#define Intercept ((u64)1 << 48) /* Has valid intercept field */
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
-#define NoBigReal ((u64)1 << 50) /* No big real mode */
#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
#define NearBranch ((u64)1 << 52) /* Near branches */
#define No16 ((u64)1 << 53) /* No 16 bit operand */
+#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
@@ -213,6 +215,7 @@ struct opcode {
const struct gprefix *gprefix;
const struct escape *esc;
const struct instr_dual *idual;
+ const struct mode_dual *mdual;
void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
@@ -240,6 +243,11 @@ struct instr_dual {
struct opcode mod3;
};
+struct mode_dual {
+ struct opcode mode32;
+ struct opcode mode64;
+};
+
/* EFLAGS bit definitions. */
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
@@ -262,6 +270,13 @@ struct instr_dual {
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2
+enum x86_transfer_type {
+ X86_TRANSFER_NONE,
+ X86_TRANSFER_CALL_JMP,
+ X86_TRANSFER_RET,
+ X86_TRANSFER_TASK_SWITCH,
+};
+
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
if (!(ctxt->regs_valid & (1 << nr))) {
@@ -669,9 +684,13 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
}
if (addr.ea > lim)
goto bad;
- *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
- if (size > *max_size)
- goto bad;
+ if (lim == 0xffffffff)
+ *max_size = ~0u;
+ else {
+ *max_size = (u64)lim + 1 - addr.ea;
+ if (size > *max_size)
+ goto bad;
+ }
la &= (u32)-1;
break;
}
@@ -722,19 +741,26 @@ static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
const struct desc_struct *cs_desc)
{
enum x86emul_mode mode = ctxt->mode;
+ int rc;
#ifdef CONFIG_X86_64
- if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
- u64 efer = 0;
+ if (ctxt->mode >= X86EMUL_MODE_PROT16) {
+ if (cs_desc->l) {
+ u64 efer = 0;
- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
- if (efer & EFER_LMA)
- mode = X86EMUL_MODE_PROT64;
+ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+ if (efer & EFER_LMA)
+ mode = X86EMUL_MODE_PROT64;
+ } else
+ mode = X86EMUL_MODE_PROT32; /* temporary value */
}
#endif
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
- return assign_eip(ctxt, dst, mode);
+ rc = assign_eip(ctxt, dst, mode);
+ if (rc == X86EMUL_CONTINUE)
+ ctxt->mode = mode;
+ return rc;
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1057,8 +1083,6 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
asm volatile("fnstcw %0": "+m"(fcw));
ctxt->ops->put_fpu(ctxt);
- /* force 2 byte destination */
- ctxt->dst.bytes = 2;
ctxt->dst.val = fcw;
return X86EMUL_CONTINUE;
@@ -1075,8 +1099,6 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
asm volatile("fnstsw %0": "+m"(fsw));
ctxt->ops->put_fpu(ctxt);
- /* force 2 byte destination */
- ctxt->dst.bytes = 2;
ctxt->dst.val = fsw;
return X86EMUL_CONTINUE;
@@ -1223,6 +1245,10 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
else {
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
+ /* Increment ESP on POP [ESP] */
+ if ((ctxt->d & IncSP) &&
+ base_reg == VCPU_REGS_RSP)
+ modrm_ea += ctxt->op_bytes;
}
if (index_reg != 4)
modrm_ea += reg_read(ctxt, index_reg) << scale;
@@ -1435,10 +1461,8 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
ops->get_gdt(ctxt, dt);
}
-/* allowed just for 8 bytes segments */
-static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- u16 selector, struct desc_struct *desc,
- ulong *desc_addr_p)
+static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
+ u16 selector, ulong *desc_addr_p)
{
struct desc_ptr dt;
u16 index = selector >> 3;
@@ -1449,8 +1473,34 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
- *desc_addr_p = addr = dt.address + index * 8;
- return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
+ addr = dt.address + index * 8;
+
+#ifdef CONFIG_X86_64
+ if (addr >> 32 != 0) {
+ u64 efer = 0;
+
+ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+ if (!(efer & EFER_LMA))
+ addr &= (u32)-1;
+ }
+#endif
+
+ *desc_addr_p = addr;
+ return X86EMUL_CONTINUE;
+}
+
+/* allowed just for 8 bytes segments */
+static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ u16 selector, struct desc_struct *desc,
+ ulong *desc_addr_p)
+{
+ int rc;
+
+ rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
&ctxt->exception);
}
@@ -1458,16 +1508,13 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc)
{
- struct desc_ptr dt;
- u16 index = selector >> 3;
+ int rc;
ulong addr;
- get_descriptor_table_ptr(ctxt, selector, &dt);
-
- if (dt.size < index * 8 + 7)
- return emulate_gp(ctxt, selector & 0xfffc);
+ rc = get_descriptor_ptr(ctxt, selector, &addr);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
- addr = dt.address + index * 8;
return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
@@ -1475,7 +1522,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
- bool in_task_switch,
+ enum x86_transfer_type transfer,
struct desc_struct *desc)
{
struct desc_struct seg_desc, old_desc;
@@ -1529,11 +1576,15 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
return ret;
err_code = selector & 0xfffc;
- err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
+ err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
+ GP_VECTOR;
/* can't load system descriptor into segment selector */
- if (seg <= VCPU_SREG_GS && !seg_desc.s)
+ if (seg <= VCPU_SREG_GS && !seg_desc.s) {
+ if (transfer == X86_TRANSFER_CALL_JMP)
+ return X86EMUL_UNHANDLEABLE;
goto exception;
+ }
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
@@ -1605,10 +1656,13 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (seg_desc.s) {
/* mark segment as accessed */
- seg_desc.type |= 1;
- ret = write_segment_descriptor(ctxt, selector, &seg_desc);
- if (ret != X86EMUL_CONTINUE)
- return ret;
+ if (!(seg_desc.type & 1)) {
+ seg_desc.type |= 1;
+ ret = write_segment_descriptor(ctxt, selector,
+ &seg_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ }
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
sizeof(base3), &ctxt->exception);
@@ -1631,7 +1685,8 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
- return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
+ return __load_segment_descriptor(ctxt, selector, seg, cpl,
+ X86_TRANSFER_NONE, NULL);
}
static void write_register_operand(struct operand *op)
@@ -1828,12 +1883,14 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
unsigned long selector;
int rc;
- rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
+ rc = emulate_pop(ctxt, &selector, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
+ if (ctxt->op_bytes > 2)
+ rsp_increment(ctxt, ctxt->op_bytes - 2);
rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc;
@@ -2007,6 +2064,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
+ ctxt->ops->set_nmi_mask(ctxt, false);
return rc;
}
@@ -2041,7 +2099,8 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
- rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
+ X86_TRANSFER_CALL_JMP,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -2130,7 +2189,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
/* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE;
- rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
+ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
+ X86_TRANSFER_RET,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -2163,12 +2223,15 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
fastop(ctxt, em_cmp);
if (ctxt->eflags & EFLG_ZF) {
- /* Success: write back to memory. */
+ /* Success: write back to memory; no update of EAX */
+ ctxt->src.type = OP_NONE;
ctxt->dst.val = ctxt->src.orig_val;
} else {
/* Failure: write the value we saw to EAX. */
- ctxt->dst.type = OP_REG;
- ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
+ ctxt->src.type = OP_REG;
+ ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
+ ctxt->src.val = ctxt->dst.orig_val;
+ /* Create write-cycle to dest by writing the same value */
ctxt->dst.val = ctxt->dst.orig_val;
}
return X86EMUL_CONTINUE;
@@ -2348,7 +2411,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
+ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
@@ -2359,25 +2422,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
- switch (ctxt->mode) {
- case X86EMUL_MODE_PROT32:
- if ((msr_data & 0xfffc) == 0x0)
- return emulate_gp(ctxt, 0);
- break;
- case X86EMUL_MODE_PROT64:
- if (msr_data == 0x0)
- return emulate_gp(ctxt, 0);
- break;
- default:
- break;
- }
+ if ((msr_data & 0xfffc) == 0x0)
+ return emulate_gp(ctxt, 0);
ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
- cs_sel = (u16)msr_data;
- cs_sel &= ~SELECTOR_RPL_MASK;
+ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
ss_sel = cs_sel + 8;
- ss_sel &= ~SELECTOR_RPL_MASK;
- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
+ if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
@@ -2386,10 +2437,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
- ctxt->_eip = msr_data;
+ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
+ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
+ (u32)msr_data;
return X86EMUL_CONTINUE;
}
@@ -2567,23 +2619,23 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2705,31 +2757,31 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
- cpl, true, NULL);
+ cpl, X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
- true, NULL);
+ X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2750,7 +2802,6 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
save_state_to_tss32(ctxt, &tss_seg);
@@ -2759,13 +2810,11 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
ldt_sel_offset - eip_offset, &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
if (old_tss_sel != 0xffff) {
@@ -2776,7 +2825,6 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
}
@@ -3010,15 +3058,16 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
int cpl = ctxt->ops->cpl(ctxt);
+ enum x86emul_mode prev_mode = ctxt->mode;
old_eip = ctxt->_eip;
ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
- rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
- &new_desc);
+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
+ X86_TRANSFER_CALL_JMP, &new_desc);
if (rc != X86EMUL_CONTINUE)
- return X86EMUL_CONTINUE;
+ return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE)
@@ -3033,11 +3082,14 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
rc = em_push(ctxt);
/* If we failed, we tainted the memory, but the very least we should
restore cs */
- if (rc != X86EMUL_CONTINUE)
+ if (rc != X86EMUL_CONTINUE) {
+ pr_warn_once("faulting far call emulation tainted memory\n");
goto fail;
+ }
return rc;
fail:
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
+ ctxt->mode = prev_mode;
return rc;
}
@@ -3488,6 +3540,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_movsxd(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->dst.val = (s32) ctxt->src.val;
+ return X86EMUL_CONTINUE;
+}
+
static bool valid_cr(int nr)
{
switch (nr) {
@@ -3687,6 +3745,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
+#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
@@ -3749,7 +3808,7 @@ static const struct opcode group1[] = {
};
static const struct opcode group1A[] = {
- I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
+ I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
};
static const struct opcode group2[] = {
@@ -3791,8 +3850,8 @@ static const struct opcode group5[] = {
};
static const struct opcode group6[] = {
- DI(Prot, sldt),
- DI(Prot, str),
+ DI(Prot | DstMem, sldt),
+ DI(Prot | DstMem, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
@@ -3865,7 +3924,7 @@ static const struct gprefix pfx_0f_e7 = {
};
static const struct escape escape_d9 = { {
- N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
+ N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
@@ -3907,7 +3966,7 @@ static const struct escape escape_db = { {
} };
static const struct escape escape_dd = { {
- N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
+ N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
@@ -3931,6 +3990,10 @@ static const struct instr_dual instr_dual_0f_c3 = {
I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};
+static const struct mode_dual mode_dual_63 = {
+ N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
+};
+
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
@@ -3965,7 +4028,7 @@ static const struct opcode opcode_table[256] = {
/* 0x60 - 0x67 */
I(ImplicitOps | Stack | No64, em_pusha),
I(ImplicitOps | Stack | No64, em_popa),
- N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
+ N, MD(ModRM, &mode_dual_63),
N, N, N, N,
/* 0x68 - 0x6F */
I(SrcImm | Mov | Stack, em_push),
@@ -4021,8 +4084,8 @@ static const struct opcode opcode_table[256] = {
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
- I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
- I(ImplicitOps | Stack, em_ret_far),
+ I(ImplicitOps | SrcImmU16, em_ret_far_imm),
+ I(ImplicitOps, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
@@ -4119,7 +4182,7 @@ static const struct opcode twobyte_table[256] = {
F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
/* 0xB0 - 0xB7 */
- I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
+ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
@@ -4185,6 +4248,8 @@ static const struct opcode opcode_map_0f_38[256] = {
#undef I
#undef GP
#undef EXT
+#undef MD
+#undef ID
#undef D2bv
#undef D2bvIP
@@ -4574,6 +4639,12 @@ done_prefixes:
else
opcode = opcode.u.idual->mod012;
break;
+ case ModeDual:
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
+ opcode = opcode.u.mdual->mode64;
+ else
+ opcode = opcode.u.mdual->mode32;
+ break;
default:
return EMULATION_FAILED;
}
@@ -4871,8 +4942,13 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
/* optimisation - avoid slow emulated read if Mov */
rc = segmented_read(ctxt, ctxt->dst.addr.mem,
&ctxt->dst.val, ctxt->dst.bytes);
- if (rc != X86EMUL_CONTINUE)
+ if (rc != X86EMUL_CONTINUE) {
+ if (!(ctxt->d & NoWrite) &&
+ rc == X86EMUL_PROPAGATE_FAULT &&
+ ctxt->exception.vector == PF_VECTOR)
+ ctxt->exception.error_code |= PFERR_WRITE_MASK;
goto done;
+ }
}
ctxt->dst.orig_val = ctxt->dst.val;
@@ -4910,11 +4986,6 @@ special_insn:
goto threebyte_insn;
switch (ctxt->b) {
- case 0x63: /* movsxd */
- if (ctxt->mode != X86EMUL_MODE_PROT64)
- goto cannot_emulate;
- ctxt->dst.val = (s32) ctxt->src.val;
- break;
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 3c9195535ffc..c2e36d934af4 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -98,7 +98,7 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
}
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index 17b73eeac8a4..7dbced309ddb 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -138,7 +138,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
gfn += page_size >> PAGE_SHIFT;
-
+ cond_resched();
}
return 0;
@@ -306,6 +306,8 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
kvm_unpin_pages(kvm, pfn, unmap_pages);
gfn += unmap_pages;
+
+ cond_resched();
}
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4f0c0b954686..e55b5fc344eb 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -33,6 +33,7 @@
#include <asm/page.h>
#include <asm/current.h>
#include <asm/apicdef.h>
+#include <asm/delay.h>
#include <linux/atomic.h>
#include <linux/jump_label.h>
#include "kvm_cache_regs.h"
@@ -192,6 +193,9 @@ static void recalculate_apic_map(struct kvm *kvm)
u16 cid, lid;
u32 ldr, aid;
+ if (!kvm_apic_present(vcpu))
+ continue;
+
aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
@@ -324,17 +328,24 @@ static u8 count_vectors(void *bitmap)
return count;
}
-void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
+void __kvm_apic_update_irr(u32 *pir, void *regs)
{
u32 i, pir_val;
- struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i <= 7; i++) {
pir_val = xchg(&pir[i], 0);
if (pir_val)
- *((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val;
+ *((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
}
}
+EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
+
+void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ __kvm_apic_update_irr(pir, apic->regs);
+}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
@@ -402,7 +413,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
* because the processor can modify ISR under the hood. Instead
* just set SVI.
*/
- if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ if (unlikely(kvm_x86_ops->hwapic_isr_update))
kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
else {
++apic->isr_count;
@@ -450,7 +461,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
* on the other hand isr_count and highest_isr_cache are unused
* and must be left alone.
*/
- if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ if (unlikely(kvm_x86_ops->hwapic_isr_update))
kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
apic_find_highest_isr(apic));
else {
@@ -577,55 +588,48 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
apic_update_ppr(apic);
}
-static int kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
+static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
{
return dest == (apic_x2apic_mode(apic) ?
X2APIC_BROADCAST : APIC_BROADCAST);
}
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
+static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
{
return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
}
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
+static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
{
- int result = 0;
u32 logical_id;
if (kvm_apic_broadcast(apic, mda))
- return 1;
+ return true;
- if (apic_x2apic_mode(apic)) {
- logical_id = kvm_apic_get_reg(apic, APIC_LDR);
- return logical_id & mda;
- }
+ logical_id = kvm_apic_get_reg(apic, APIC_LDR);
+
+ if (apic_x2apic_mode(apic))
+ return ((logical_id >> 16) == (mda >> 16))
+ && (logical_id & mda & 0xffff) != 0;
- logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR));
+ logical_id = GET_APIC_LOGICAL_ID(logical_id);
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
case APIC_DFR_FLAT:
- if (logical_id & mda)
- result = 1;
- break;
+ return (logical_id & mda) != 0;
case APIC_DFR_CLUSTER:
- if (((logical_id >> 4) == (mda >> 0x4))
- && (logical_id & mda & 0xf))
- result = 1;
- break;
+ return ((logical_id >> 4) == (mda >> 4))
+ && (logical_id & mda & 0xf) != 0;
default:
apic_debug("Bad DFR vcpu %d: %08x\n",
apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR));
- break;
+ return false;
}
-
- return result;
}
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode)
{
- int result = 0;
struct kvm_lapic *target = vcpu->arch.apic;
apic_debug("target %p, source %p, dest 0x%x, "
@@ -635,29 +639,21 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
ASSERT(target);
switch (short_hand) {
case APIC_DEST_NOSHORT:
- if (dest_mode == 0)
- /* Physical mode. */
- result = kvm_apic_match_physical_addr(target, dest);
+ if (dest_mode == APIC_DEST_PHYSICAL)
+ return kvm_apic_match_physical_addr(target, dest);
else
- /* Logical mode. */
- result = kvm_apic_match_logical_addr(target, dest);
- break;
+ return kvm_apic_match_logical_addr(target, dest);
case APIC_DEST_SELF:
- result = (target == source);
- break;
+ return target == source;
case APIC_DEST_ALLINC:
- result = 1;
- break;
+ return true;
case APIC_DEST_ALLBUT:
- result = (target != source);
- break;
+ return target != source;
default:
apic_debug("kvm: apic: Bad dest shorthand value %x\n",
short_hand);
- break;
+ return false;
}
-
- return result;
}
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
@@ -690,7 +686,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
ret = true;
- if (irq->dest_mode == 0) { /* physical mode */
+ if (irq->dest_mode == APIC_DEST_PHYSICAL) {
if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
goto out;
@@ -1073,25 +1069,72 @@ static void apic_timer_expired(struct kvm_lapic *apic)
{
struct kvm_vcpu *vcpu = apic->vcpu;
wait_queue_head_t *q = &vcpu->wq;
+ struct kvm_timer *ktimer = &apic->lapic_timer;
- /*
- * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
- * vcpu_enter_guest.
- */
if (atomic_read(&apic->lapic_timer.pending))
return;
atomic_inc(&apic->lapic_timer.pending);
- /* FIXME: this code should not know anything about vcpus */
- kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_set_pending_timer(vcpu);
if (waitqueue_active(q))
wake_up_interruptible(q);
+
+ if (apic_lvtt_tscdeadline(apic))
+ ktimer->expired_tscdeadline = ktimer->tscdeadline;
+}
+
+/*
+ * On APICv, this test will cause a busy wait
+ * during a higher-priority task.
+ */
+
+static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ u32 reg = kvm_apic_get_reg(apic, APIC_LVTT);
+
+ if (kvm_apic_hw_enabled(apic)) {
+ int vec = reg & APIC_VECTOR_MASK;
+ void *bitmap = apic->regs + APIC_ISR;
+
+ if (kvm_x86_ops->deliver_posted_interrupt)
+ bitmap = apic->regs + APIC_IRR;
+
+ if (apic_test_vector(vec, bitmap))
+ return true;
+ }
+ return false;
+}
+
+void wait_lapic_expire(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ u64 guest_tsc, tsc_deadline;
+
+ if (!kvm_vcpu_has_lapic(vcpu))
+ return;
+
+ if (apic->lapic_timer.expired_tscdeadline == 0)
+ return;
+
+ if (!lapic_timer_int_injected(vcpu))
+ return;
+
+ tsc_deadline = apic->lapic_timer.expired_tscdeadline;
+ apic->lapic_timer.expired_tscdeadline = 0;
+ guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+ trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
+
+ /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
+ if (guest_tsc < tsc_deadline)
+ __delay(tsc_deadline - guest_tsc);
}
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
+
atomic_set(&apic->lapic_timer.pending, 0);
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
@@ -1137,6 +1180,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
/* lapic timer in tsc deadline mode */
u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
u64 ns = 0;
+ ktime_t expire;
struct kvm_vcpu *vcpu = apic->vcpu;
unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
unsigned long flags;
@@ -1151,8 +1195,10 @@ static void start_apic_timer(struct kvm_lapic *apic)
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
+ expire = ktime_add_ns(now, ns);
+ expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
hrtimer_start(&apic->lapic_timer.timer,
- ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
+ expire, HRTIMER_MODE_ABS);
} else
apic_timer_expired(apic);
@@ -1742,7 +1788,9 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
if (kvm_x86_ops->hwapic_irr_update)
kvm_x86_ops->hwapic_irr_update(vcpu,
apic_find_highest_irr(apic));
- kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
+ if (unlikely(kvm_x86_ops->hwapic_isr_update))
+ kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
+ apic_find_highest_isr(apic));
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_rtc_eoi_tracking_restore_one(vcpu);
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c674fce53cf9..0bc6c656625b 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -14,6 +14,7 @@ struct kvm_timer {
u32 timer_mode;
u32 timer_mode_mask;
u64 tscdeadline;
+ u64 expired_tscdeadline;
atomic_t pending; /* accumulated triggered timers */
};
@@ -56,9 +57,8 @@ u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
void kvm_apic_set_version(struct kvm_vcpu *vcpu);
void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr);
+void __kvm_apic_update_irr(u32 *pir, void *regs);
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest);
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
unsigned long *dest_map);
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
@@ -170,4 +170,6 @@ static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
+void wait_lapic_expire(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f83fc6c5e0ba..cee759299a35 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -63,30 +63,16 @@ enum {
#undef MMU_DEBUG
#ifdef MMU_DEBUG
+static bool dbg = 0;
+module_param(dbg, bool, 0644);
#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
-
+#define MMU_WARN_ON(x) WARN_ON(x)
#else
-
#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)
-
-#endif
-
-#ifdef MMU_DEBUG
-static bool dbg = 0;
-module_param(dbg, bool, 0644);
-#endif
-
-#ifndef MMU_DEBUG
-#define ASSERT(x) do { } while (0)
-#else
-#define ASSERT(x) \
- if (!(x)) { \
- printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
- __FILE__, __LINE__, #x); \
- }
+#define MMU_WARN_ON(x) do { } while (0)
#endif
#define PTE_PREFETCH_NUM 8
@@ -546,6 +532,11 @@ static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
return (old_spte & bit_mask) && !(new_spte & bit_mask);
}
+static bool spte_is_bit_changed(u64 old_spte, u64 new_spte, u64 bit_mask)
+{
+ return (old_spte & bit_mask) != (new_spte & bit_mask);
+}
+
/* Rules for using mmu_spte_set:
* Set the sptep from nonpresent to present.
* Note: the sptep being assigned *must* be either not present
@@ -596,6 +587,14 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
if (!shadow_accessed_mask)
return ret;
+ /*
+ * Flush TLB when accessed/dirty bits are changed in the page tables,
+ * to guarantee consistency between TLB and page tables.
+ */
+ if (spte_is_bit_changed(old_spte, new_spte,
+ shadow_accessed_mask | shadow_dirty_mask))
+ ret = true;
+
if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
@@ -1216,6 +1215,60 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
return flush;
}
+static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep)
+{
+ u64 spte = *sptep;
+
+ rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
+
+ spte &= ~shadow_dirty_mask;
+
+ return mmu_spte_update(sptep, spte);
+}
+
+static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *sptep;
+ struct rmap_iterator iter;
+ bool flush = false;
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ flush |= spte_clear_dirty(kvm, sptep);
+ sptep = rmap_get_next(&iter);
+ }
+
+ return flush;
+}
+
+static bool spte_set_dirty(struct kvm *kvm, u64 *sptep)
+{
+ u64 spte = *sptep;
+
+ rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
+
+ spte |= shadow_dirty_mask;
+
+ return mmu_spte_update(sptep, spte);
+}
+
+static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *sptep;
+ struct rmap_iterator iter;
+ bool flush = false;
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ flush |= spte_set_dirty(kvm, sptep);
+ sptep = rmap_get_next(&iter);
+ }
+
+ return flush;
+}
+
/**
* kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
* @kvm: kvm instance
@@ -1226,7 +1279,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
* Used when we do not need to care about huge page mappings: e.g. during dirty
* logging we do not have any such mappings.
*/
-void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
@@ -1242,6 +1295,53 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
}
}
+/**
+ * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages
+ * @kvm: kvm instance
+ * @slot: slot to clear D-bit
+ * @gfn_offset: start of the BITS_PER_LONG pages we care about
+ * @mask: indicates which pages we should clear D-bit
+ *
+ * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
+ */
+void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ unsigned long *rmapp;
+
+ while (mask) {
+ rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+ PT_PAGE_TABLE_LEVEL, slot);
+ __rmap_clear_dirty(kvm, rmapp);
+
+ /* clear the first set bit */
+ mask &= mask - 1;
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
+
+/**
+ * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
+ * PT level pages.
+ *
+ * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
+ * enable dirty logging for them.
+ *
+ * Used when we do not need to care about huge page mappings: e.g. during dirty
+ * logging we do not have any such mappings.
+ */
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ if (kvm_x86_ops->enable_log_dirty_pt_masked)
+ kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
+ mask);
+ else
+ kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+}
+
static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
{
struct kvm_memory_slot *slot;
@@ -1536,7 +1636,7 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
- ASSERT(is_empty_shadow_page(sp->spt));
+ MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
list_del(&sp->link);
free_page((unsigned long)sp->spt);
@@ -2501,8 +2601,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
}
}
- if (pte_access & ACC_WRITE_MASK)
+ if (pte_access & ACC_WRITE_MASK) {
mark_page_dirty(vcpu->kvm, gfn);
+ spte |= shadow_dirty_mask;
+ }
set_pte:
if (mmu_spte_update(sptep, spte))
@@ -2818,6 +2920,18 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
*/
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
+ /*
+ * Theoretically we could also set dirty bit (and flush TLB) here in
+ * order to eliminate unnecessary PML logging. See comments in
+ * set_spte. But fast_page_fault is very unlikely to happen with PML
+ * enabled, so we do not do this. This might result in the same GPA
+ * to be logged in PML buffer again when the write really happens, and
+ * eventually to be called by mark_page_dirty twice. But it's also no
+ * harm. This also avoids the TLB flush needed after setting dirty bit
+ * so non-PML cases won't be impacted.
+ *
+ * Compare with set_spte where instead shadow_dirty_mask is set.
+ */
if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
mark_page_dirty(vcpu->kvm, gfn);
@@ -3041,7 +3155,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
- ASSERT(!VALID_PAGE(root));
+ MMU_WARN_ON(VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
make_mmu_pages_available(vcpu);
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
@@ -3079,7 +3193,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
- ASSERT(!VALID_PAGE(root));
+ MMU_WARN_ON(VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
make_mmu_pages_available(vcpu);
@@ -3104,7 +3218,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
- ASSERT(!VALID_PAGE(root));
+ MMU_WARN_ON(VALID_PAGE(root));
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
if (!is_present_gpte(pdptr)) {
@@ -3329,8 +3443,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
if (r)
return r;
- ASSERT(vcpu);
- ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
gfn = gva >> PAGE_SHIFT;
@@ -3396,8 +3509,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
- ASSERT(vcpu);
- ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, gpa, error_code, true);
@@ -3718,7 +3830,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
- ASSERT(is_pae(vcpu));
+ MMU_WARN_ON(!is_pae(vcpu));
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
@@ -3763,7 +3875,7 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu *context = vcpu->arch.walk_mmu;
+ struct kvm_mmu *context = &vcpu->arch.mmu;
context->base_role.word = 0;
context->page_fault = tdp_page_fault;
@@ -3803,11 +3915,12 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
update_last_pte_bitmap(vcpu, context);
}
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- ASSERT(vcpu);
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ struct kvm_mmu *context = &vcpu->arch.mmu;
+
+ MMU_WARN_ON(VALID_PAGE(context->root_hpa));
if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context);
@@ -3818,19 +3931,19 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
else
paging32_init_context(vcpu, context);
- vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
- vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
- vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
- vcpu->arch.mmu.base_role.smep_andnot_wp
+ context->base_role.nxe = is_nx(vcpu);
+ context->base_role.cr4_pae = !!is_pae(vcpu);
+ context->base_role.cr0_wp = is_write_protection(vcpu);
+ context->base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
-void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
- bool execonly)
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
{
- ASSERT(vcpu);
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ struct kvm_mmu *context = &vcpu->arch.mmu;
+
+ MMU_WARN_ON(VALID_PAGE(context->root_hpa));
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
@@ -3851,11 +3964,13 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
- kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
- vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
- vcpu->arch.walk_mmu->get_cr3 = get_cr3;
- vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+ struct kvm_mmu *context = &vcpu->arch.mmu;
+
+ kvm_init_shadow_mmu(vcpu);
+ context->set_cr3 = kvm_x86_ops->set_cr3;
+ context->get_cr3 = get_cr3;
+ context->get_pdptr = kvm_pdptr_read;
+ context->inject_page_fault = kvm_inject_page_fault;
}
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
@@ -3900,17 +4015,15 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
static void init_kvm_mmu(struct kvm_vcpu *vcpu)
{
if (mmu_is_nested(vcpu))
- return init_kvm_nested_mmu(vcpu);
+ init_kvm_nested_mmu(vcpu);
else if (tdp_enabled)
- return init_kvm_tdp_mmu(vcpu);
+ init_kvm_tdp_mmu(vcpu);
else
- return init_kvm_softmmu(vcpu);
+ init_kvm_softmmu(vcpu);
}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
kvm_mmu_unload(vcpu);
init_kvm_mmu(vcpu);
}
@@ -4266,8 +4379,6 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
struct page *page;
int i;
- ASSERT(vcpu);
-
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
* Therefore we need to allocate shadow page tables in the first
@@ -4286,8 +4397,6 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
vcpu->arch.mmu.translate_gpa = translate_gpa;
@@ -4298,19 +4407,18 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
init_kvm_mmu(vcpu);
}
-void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
+void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
{
- struct kvm_memory_slot *memslot;
gfn_t last_gfn;
int i;
+ bool flush = false;
- memslot = id_to_memslot(kvm->memslots, slot);
last_gfn = memslot->base_gfn + memslot->npages - 1;
spin_lock(&kvm->mmu_lock);
@@ -4325,7 +4433,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
- __rmap_write_protect(kvm, rmapp, false);
+ flush |= __rmap_write_protect(kvm, rmapp,
+ false);
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
cond_resched_lock(&kvm->mmu_lock);
@@ -4352,8 +4461,124 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
* instead of PT_WRITABLE_MASK, that means it does not depend
* on PT_WRITABLE_MASK anymore.
*/
- kvm_flush_remote_tlbs(kvm);
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+}
+
+void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ gfn_t last_gfn;
+ unsigned long *rmapp;
+ unsigned long last_index, index;
+ bool flush = false;
+
+ last_gfn = memslot->base_gfn + memslot->npages - 1;
+
+ spin_lock(&kvm->mmu_lock);
+
+ rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1];
+ last_index = gfn_to_index(last_gfn, memslot->base_gfn,
+ PT_PAGE_TABLE_LEVEL);
+
+ for (index = 0; index <= last_index; ++index, ++rmapp) {
+ if (*rmapp)
+ flush |= __rmap_clear_dirty(kvm, rmapp);
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+
+ spin_unlock(&kvm->mmu_lock);
+
+ lockdep_assert_held(&kvm->slots_lock);
+
+ /*
+ * It's also safe to flush TLBs out of mmu lock here as currently this
+ * function is only used for dirty logging, in which case flushing TLB
+ * out of mmu lock also guarantees no dirty pages will be lost in
+ * dirty_bitmap.
+ */
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
+
+void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ gfn_t last_gfn;
+ int i;
+ bool flush = false;
+
+ last_gfn = memslot->base_gfn + memslot->npages - 1;
+
+ spin_lock(&kvm->mmu_lock);
+
+ for (i = PT_PAGE_TABLE_LEVEL + 1; /* skip rmap for 4K page */
+ i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+ unsigned long *rmapp;
+ unsigned long last_index, index;
+
+ rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
+ last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
+
+ for (index = 0; index <= last_index; ++index, ++rmapp) {
+ if (*rmapp)
+ flush |= __rmap_write_protect(kvm, rmapp,
+ false);
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+
+ /* see kvm_mmu_slot_remove_write_access */
+ lockdep_assert_held(&kvm->slots_lock);
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
+
+void kvm_mmu_slot_set_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ gfn_t last_gfn;
+ int i;
+ bool flush = false;
+
+ last_gfn = memslot->base_gfn + memslot->npages - 1;
+
+ spin_lock(&kvm->mmu_lock);
+
+ for (i = PT_PAGE_TABLE_LEVEL;
+ i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+ unsigned long *rmapp;
+ unsigned long last_index, index;
+
+ rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
+ last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
+
+ for (index = 0; index <= last_index; ++index, ++rmapp) {
+ if (*rmapp)
+ flush |= __rmap_set_dirty(kvm, rmapp);
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+
+ spin_unlock(&kvm->mmu_lock);
+
+ lockdep_assert_held(&kvm->slots_lock);
+
+ /* see kvm_mmu_slot_leaf_clear_dirty */
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
}
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
#define BATCH_ZAP_PAGES 10
static void kvm_zap_obsolete_pages(struct kvm *kvm)
@@ -4606,8 +4831,6 @@ EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
kvm_mmu_unload(vcpu);
free_mmu_pages(vcpu);
mmu_free_memory_caches(vcpu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index bde8ee725754..c7d65637c851 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,18 +44,6 @@
#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1
-#define PFERR_PRESENT_BIT 0
-#define PFERR_WRITE_BIT 1
-#define PFERR_USER_BIT 2
-#define PFERR_RSVD_BIT 3
-#define PFERR_FETCH_BIT 4
-
-#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
-#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
-#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
-#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
-#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
-
static inline u64 rsvd_bits(int s, int e)
{
return ((1ULL << (e - s + 1)) - 1) << s;
@@ -81,9 +69,8 @@ enum {
};
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
-void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
- bool execonly);
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
bool ept);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 41dd0387cccb..d319e0c24758 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1583,7 +1583,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
+ unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
if (cr4 & X86_CR4_VMXE)
@@ -2003,8 +2003,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
- kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
-
+ WARN_ON(mmu_is_nested(vcpu));
+ kvm_init_shadow_mmu(vcpu);
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index c2a34bb5ad93..7c7bc8bef21f 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -848,6 +848,24 @@ TRACE_EVENT(kvm_track_tsc,
#endif /* CONFIG_X86_64 */
+/*
+ * Tracepoint for PML full VMEXIT.
+ */
+TRACE_EVENT(kvm_pml_full,
+ TP_PROTO(unsigned int vcpu_id),
+ TP_ARGS(vcpu_id),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ ),
+
+ TP_printk("vcpu %d: PML full", __entry->vcpu_id)
+);
+
TRACE_EVENT(kvm_ple_window,
TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
TP_ARGS(grow, vcpu_id, new, old),
@@ -914,6 +932,26 @@ TRACE_EVENT(kvm_pvclock_update,
__entry->flags)
);
+TRACE_EVENT(kvm_wait_lapic_expire,
+ TP_PROTO(unsigned int vcpu_id, s64 delta),
+ TP_ARGS(vcpu_id, delta),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ __field( s64, delta )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->delta = delta;
+ ),
+
+ TP_printk("vcpu %u: delta %lld (%s)",
+ __entry->vcpu_id,
+ __entry->delta,
+ __entry->delta < 0 ? "early" : "late")
+);
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d4c58d884838..14c1a18d206a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -45,6 +45,7 @@
#include <asm/perf_event.h>
#include <asm/debugreg.h>
#include <asm/kexec.h>
+#include <asm/apic.h>
#include "trace.h"
@@ -101,6 +102,9 @@ module_param(nested, bool, S_IRUGO);
static u64 __read_mostly host_xss;
+static bool __read_mostly enable_pml = 1;
+module_param_named(pml, enable_pml, bool, S_IRUGO);
+
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
#define KVM_VM_CR0_ALWAYS_ON \
@@ -215,7 +219,12 @@ struct __packed vmcs12 {
u64 tsc_offset;
u64 virtual_apic_page_addr;
u64 apic_access_addr;
+ u64 posted_intr_desc_addr;
u64 ept_pointer;
+ u64 eoi_exit_bitmap0;
+ u64 eoi_exit_bitmap1;
+ u64 eoi_exit_bitmap2;
+ u64 eoi_exit_bitmap3;
u64 xss_exit_bitmap;
u64 guest_physical_address;
u64 vmcs_link_pointer;
@@ -330,6 +339,7 @@ struct __packed vmcs12 {
u32 vmx_preemption_timer_value;
u32 padding32[7]; /* room for future expansion */
u16 virtual_processor_id;
+ u16 posted_intr_nv;
u16 guest_es_selector;
u16 guest_cs_selector;
u16 guest_ss_selector;
@@ -338,6 +348,7 @@ struct __packed vmcs12 {
u16 guest_gs_selector;
u16 guest_ldtr_selector;
u16 guest_tr_selector;
+ u16 guest_intr_status;
u16 host_es_selector;
u16 host_cs_selector;
u16 host_ss_selector;
@@ -401,6 +412,10 @@ struct nested_vmx {
*/
struct page *apic_access_page;
struct page *virtual_apic_page;
+ struct page *pi_desc_page;
+ struct pi_desc *pi_desc;
+ bool pi_pending;
+ u16 posted_intr_nv;
u64 msr_ia32_feature_control;
struct hrtimer preemption_timer;
@@ -408,6 +423,23 @@ struct nested_vmx {
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl;
+
+ u32 nested_vmx_procbased_ctls_low;
+ u32 nested_vmx_procbased_ctls_high;
+ u32 nested_vmx_true_procbased_ctls_low;
+ u32 nested_vmx_secondary_ctls_low;
+ u32 nested_vmx_secondary_ctls_high;
+ u32 nested_vmx_pinbased_ctls_low;
+ u32 nested_vmx_pinbased_ctls_high;
+ u32 nested_vmx_exit_ctls_low;
+ u32 nested_vmx_exit_ctls_high;
+ u32 nested_vmx_true_exit_ctls_low;
+ u32 nested_vmx_entry_ctls_low;
+ u32 nested_vmx_entry_ctls_high;
+ u32 nested_vmx_true_entry_ctls_low;
+ u32 nested_vmx_misc_low;
+ u32 nested_vmx_misc_high;
+ u32 nested_vmx_ept_caps;
};
#define POSTED_INTR_ON 0
@@ -511,6 +543,10 @@ struct vcpu_vmx {
/* Dynamic PLE window. */
int ple_window;
bool ple_window_dirty;
+
+ /* Support for PML */
+#define PML_ENTITY_NUM 512
+ struct page *pml_pg;
};
enum segment_cache_field {
@@ -594,6 +630,7 @@ static int max_shadow_read_write_fields =
static const unsigned short vmcs_field_to_offset_table[] = {
FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
+ FIELD(POSTED_INTR_NV, posted_intr_nv),
FIELD(GUEST_ES_SELECTOR, guest_es_selector),
FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
@@ -602,6 +639,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
+ FIELD(GUEST_INTR_STATUS, guest_intr_status),
FIELD(HOST_ES_SELECTOR, host_es_selector),
FIELD(HOST_CS_SELECTOR, host_cs_selector),
FIELD(HOST_SS_SELECTOR, host_ss_selector),
@@ -618,7 +656,12 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(TSC_OFFSET, tsc_offset),
FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
+ FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
FIELD64(EPT_POINTER, ept_pointer),
+ FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
+ FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
+ FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
+ FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
@@ -766,6 +809,7 @@ static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void);
+static int vmx_vm_has_apicv(struct kvm *kvm);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
@@ -793,6 +837,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
+static unsigned long *vmx_msr_bitmap_nested;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;
@@ -959,16 +1004,6 @@ static inline bool cpu_has_vmx_ept_execute_only(void)
return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
}
-static inline bool cpu_has_vmx_eptp_uncacheable(void)
-{
- return vmx_capability.ept & VMX_EPTP_UC_BIT;
-}
-
-static inline bool cpu_has_vmx_eptp_writeback(void)
-{
- return vmx_capability.ept & VMX_EPTP_WB_BIT;
-}
-
static inline bool cpu_has_vmx_ept_2m_page(void)
{
return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
@@ -1073,6 +1108,11 @@ static inline bool cpu_has_vmx_shadow_vmcs(void)
SECONDARY_EXEC_SHADOW_VMCS;
}
+static inline bool cpu_has_vmx_pml(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
+}
+
static inline bool report_flexpriority(void)
{
return flexpriority_enabled;
@@ -1112,6 +1152,26 @@ static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
vmx_xsaves_supported();
}
+static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
+}
+
+static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
+}
+
+static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+}
+
+static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
+{
+ return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
+}
+
static inline bool is_exception(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -2284,20 +2344,8 @@ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
* if the corresponding bit in the (32-bit) control field *must* be on, and a
* bit in the high half is on if the corresponding bit in the control field
* may be on. See also vmx_control_verify().
- * TODO: allow these variables to be modified (downgraded) by module options
- * or other means.
*/
-static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
-static u32 nested_vmx_true_procbased_ctls_low;
-static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
-static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
-static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
-static u32 nested_vmx_true_exit_ctls_low;
-static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
-static u32 nested_vmx_true_entry_ctls_low;
-static u32 nested_vmx_misc_low, nested_vmx_misc_high;
-static u32 nested_vmx_ept_caps;
-static __init void nested_vmx_setup_ctls_msrs(void)
+static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
{
/*
* Note that as a general rule, the high half of the MSRs (bits in
@@ -2316,57 +2364,74 @@ static __init void nested_vmx_setup_ctls_msrs(void)
/* pin-based controls */
rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
- nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high);
- nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
- nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK |
- PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS;
- nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+ vmx->nested.nested_vmx_pinbased_ctls_low,
+ vmx->nested.nested_vmx_pinbased_ctls_high);
+ vmx->nested.nested_vmx_pinbased_ctls_low |=
+ PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+ vmx->nested.nested_vmx_pinbased_ctls_high &=
+ PIN_BASED_EXT_INTR_MASK |
+ PIN_BASED_NMI_EXITING |
+ PIN_BASED_VIRTUAL_NMIS;
+ vmx->nested.nested_vmx_pinbased_ctls_high |=
+ PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
PIN_BASED_VMX_PREEMPTION_TIMER;
+ if (vmx_vm_has_apicv(vmx->vcpu.kvm))
+ vmx->nested.nested_vmx_pinbased_ctls_high |=
+ PIN_BASED_POSTED_INTR;
/* exit controls */
rdmsr(MSR_IA32_VMX_EXIT_CTLS,
- nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
- nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
+ vmx->nested.nested_vmx_exit_ctls_low,
+ vmx->nested.nested_vmx_exit_ctls_high);
+ vmx->nested.nested_vmx_exit_ctls_low =
+ VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
- nested_vmx_exit_ctls_high &=
+ vmx->nested.nested_vmx_exit_ctls_high &=
#ifdef CONFIG_X86_64
VM_EXIT_HOST_ADDR_SPACE_SIZE |
#endif
VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
- nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
+ vmx->nested.nested_vmx_exit_ctls_high |=
+ VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
if (vmx_mpx_supported())
- nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+ vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
- nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low &
+ vmx->nested.nested_vmx_true_exit_ctls_low =
+ vmx->nested.nested_vmx_exit_ctls_low &
~VM_EXIT_SAVE_DEBUG_CONTROLS;
/* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
- nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
- nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
- nested_vmx_entry_ctls_high &=
+ vmx->nested.nested_vmx_entry_ctls_low,
+ vmx->nested.nested_vmx_entry_ctls_high);
+ vmx->nested.nested_vmx_entry_ctls_low =
+ VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
+ vmx->nested.nested_vmx_entry_ctls_high &=
#ifdef CONFIG_X86_64
VM_ENTRY_IA32E_MODE |
#endif
VM_ENTRY_LOAD_IA32_PAT;
- nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
- VM_ENTRY_LOAD_IA32_EFER);
+ vmx->nested.nested_vmx_entry_ctls_high |=
+ (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
if (vmx_mpx_supported())
- nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+ vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
- nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low &
+ vmx->nested.nested_vmx_true_entry_ctls_low =
+ vmx->nested.nested_vmx_entry_ctls_low &
~VM_ENTRY_LOAD_DEBUG_CONTROLS;
/* cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
- nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
- nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
- nested_vmx_procbased_ctls_high &=
+ vmx->nested.nested_vmx_procbased_ctls_low,
+ vmx->nested.nested_vmx_procbased_ctls_high);
+ vmx->nested.nested_vmx_procbased_ctls_low =
+ CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+ vmx->nested.nested_vmx_procbased_ctls_high &=
CPU_BASED_VIRTUAL_INTR_PENDING |
CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
@@ -2386,45 +2451,55 @@ static __init void nested_vmx_setup_ctls_msrs(void)
* can use it to avoid exits to L1 - even when L0 runs L2
* without MSR bitmaps.
*/
- nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+ vmx->nested.nested_vmx_procbased_ctls_high |=
+ CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
CPU_BASED_USE_MSR_BITMAPS;
/* We support free control of CR3 access interception. */
- nested_vmx_true_procbased_ctls_low = nested_vmx_procbased_ctls_low &
+ vmx->nested.nested_vmx_true_procbased_ctls_low =
+ vmx->nested.nested_vmx_procbased_ctls_low &
~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
/* secondary cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
- nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
- nested_vmx_secondary_ctls_low = 0;
- nested_vmx_secondary_ctls_high &=
+ vmx->nested.nested_vmx_secondary_ctls_low,
+ vmx->nested.nested_vmx_secondary_ctls_high);
+ vmx->nested.nested_vmx_secondary_ctls_low = 0;
+ vmx->nested.nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
SECONDARY_EXEC_XSAVES;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
- nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST;
- nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
+ vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
- nested_vmx_ept_caps &= vmx_capability.ept;
+ vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
/*
* For nested guests, we don't do anything specific
* for single context invalidation. Hence, only advertise
* support for global context invalidation.
*/
- nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
+ vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
} else
- nested_vmx_ept_caps = 0;
+ vmx->nested.nested_vmx_ept_caps = 0;
/* miscellaneous data */
- rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
- nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
- nested_vmx_misc_low |= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
+ rdmsr(MSR_IA32_VMX_MISC,
+ vmx->nested.nested_vmx_misc_low,
+ vmx->nested.nested_vmx_misc_high);
+ vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
+ vmx->nested.nested_vmx_misc_low |=
+ VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
VMX_MISC_ACTIVITY_HLT;
- nested_vmx_misc_high = 0;
+ vmx->nested.nested_vmx_misc_high = 0;
}
static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
@@ -2443,6 +2518,8 @@ static inline u64 vmx_control_msr(u32 low, u32 high)
/* Returns 0 on success, non-0 otherwise. */
static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
switch (msr_index) {
case MSR_IA32_VMX_BASIC:
/*
@@ -2457,36 +2534,44 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
break;
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_PINBASED_CTLS:
- *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
- nested_vmx_pinbased_ctls_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_pinbased_ctls_low,
+ vmx->nested.nested_vmx_pinbased_ctls_high);
break;
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- *pdata = vmx_control_msr(nested_vmx_true_procbased_ctls_low,
- nested_vmx_procbased_ctls_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_true_procbased_ctls_low,
+ vmx->nested.nested_vmx_procbased_ctls_high);
break;
case MSR_IA32_VMX_PROCBASED_CTLS:
- *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
- nested_vmx_procbased_ctls_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_procbased_ctls_low,
+ vmx->nested.nested_vmx_procbased_ctls_high);
break;
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- *pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low,
- nested_vmx_exit_ctls_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_true_exit_ctls_low,
+ vmx->nested.nested_vmx_exit_ctls_high);
break;
case MSR_IA32_VMX_EXIT_CTLS:
- *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
- nested_vmx_exit_ctls_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_exit_ctls_low,
+ vmx->nested.nested_vmx_exit_ctls_high);
break;
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- *pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low,
- nested_vmx_entry_ctls_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_true_entry_ctls_low,
+ vmx->nested.nested_vmx_entry_ctls_high);
break;
case MSR_IA32_VMX_ENTRY_CTLS:
- *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
- nested_vmx_entry_ctls_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_entry_ctls_low,
+ vmx->nested.nested_vmx_entry_ctls_high);
break;
case MSR_IA32_VMX_MISC:
- *pdata = vmx_control_msr(nested_vmx_misc_low,
- nested_vmx_misc_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_misc_low,
+ vmx->nested.nested_vmx_misc_high);
break;
/*
* These MSRs specify bits which the guest must keep fixed (on or off)
@@ -2511,12 +2596,13 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
*pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
break;
case MSR_IA32_VMX_PROCBASED_CTLS2:
- *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
- nested_vmx_secondary_ctls_high);
+ *pdata = vmx_control_msr(
+ vmx->nested.nested_vmx_secondary_ctls_low,
+ vmx->nested.nested_vmx_secondary_ctls_high);
break;
case MSR_IA32_VMX_EPT_VPID_CAP:
/* Currently, no nested vpid support */
- *pdata = nested_vmx_ept_caps;
+ *pdata = vmx->nested.nested_vmx_ept_caps;
break;
default:
return 1;
@@ -2785,7 +2871,7 @@ static int hardware_enable(void)
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
u64 old, test_bits;
- if (read_cr4() & X86_CR4_VMXE)
+ if (cr4_read_shadow() & X86_CR4_VMXE)
return -EBUSY;
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
@@ -2812,7 +2898,7 @@ static int hardware_enable(void)
/* enable and lock */
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
}
- write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
+ cr4_set_bits(X86_CR4_VMXE);
if (vmm_exclusive) {
kvm_cpu_vmxon(phys_addr);
@@ -2849,7 +2935,7 @@ static void hardware_disable(void)
vmclear_local_loaded_vmcss();
kvm_cpu_vmxoff();
}
- write_cr4(read_cr4() & ~X86_CR4_VMXE);
+ cr4_clear_bits(X86_CR4_VMXE);
}
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
@@ -2929,7 +3015,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_SHADOW_VMCS |
- SECONDARY_EXEC_XSAVES;
+ SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_ENABLE_PML;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -4159,6 +4246,52 @@ static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
}
}
+/*
+ * If a msr is allowed by L0, we should check whether it is allowed by L1.
+ * The corresponding bit will be cleared unless both of L0 and L1 allow it.
+ */
+static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
+ unsigned long *msr_bitmap_nested,
+ u32 msr, int type)
+{
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap()) {
+ WARN_ON(1);
+ return;
+ }
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if (msr <= 0x1fff) {
+ if (type & MSR_TYPE_R &&
+ !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
+ /* read-low */
+ __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
+
+ if (type & MSR_TYPE_W &&
+ !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
+ /* write-low */
+ __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
+
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ if (type & MSR_TYPE_R &&
+ !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
+ /* read-high */
+ __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
+
+ if (type & MSR_TYPE_W &&
+ !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
+ /* write-high */
+ __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
+
+ }
+}
+
static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
{
if (!longmode_only)
@@ -4197,6 +4330,64 @@ static int vmx_vm_has_apicv(struct kvm *kvm)
return enable_apicv && irqchip_in_kernel(kvm);
}
+static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int max_irr;
+ void *vapic_page;
+ u16 status;
+
+ if (vmx->nested.pi_desc &&
+ vmx->nested.pi_pending) {
+ vmx->nested.pi_pending = false;
+ if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+ return 0;
+
+ max_irr = find_last_bit(
+ (unsigned long *)vmx->nested.pi_desc->pir, 256);
+
+ if (max_irr == 256)
+ return 0;
+
+ vapic_page = kmap(vmx->nested.virtual_apic_page);
+ if (!vapic_page) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+ __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
+ kunmap(vmx->nested.virtual_apic_page);
+
+ status = vmcs_read16(GUEST_INTR_STATUS);
+ if ((u8)max_irr > ((u8)status & 0xff)) {
+ status &= ~0xff;
+ status |= (u8)max_irr;
+ vmcs_write16(GUEST_INTR_STATUS, status);
+ }
+ }
+ return 0;
+}
+
+static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
+ int vector)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (is_guest_mode(vcpu) &&
+ vector == vmx->nested.posted_intr_nv) {
+ /* the PIR and ON have been set by L1. */
+ if (vcpu->mode == IN_GUEST_MODE)
+ apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+ POSTED_INTR_VECTOR);
+ /*
+ * If a posted intr is not recognized by hardware,
+ * we will accomplish it in the next vmentry.
+ */
+ vmx->nested.pi_pending = true;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ return 0;
+ }
+ return -1;
+}
/*
* Send interrupt to vcpu via posted interrupt way.
* 1. If target vcpu is running(non-root mode), send posted interrupt
@@ -4209,6 +4400,10 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
struct vcpu_vmx *vmx = to_vmx(vcpu);
int r;
+ r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
+ if (!r)
+ return;
+
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
return;
@@ -4255,7 +4450,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
/* Save the most likely value for this task's CR4 in the VMCS. */
- cr4 = read_cr4();
+ cr4 = cr4_read_shadow();
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
vmx->host_state.vmcs_host_cr4 = cr4;
@@ -4360,6 +4555,9 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
a current VMCS12
*/
exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
+ /* PML is enabled/disabled in creating/destorying vcpu */
+ exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+
return exec_control;
}
@@ -4986,11 +5184,12 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
-static bool nested_cr0_valid(struct vmcs12 *vmcs12, unsigned long val)
+static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
unsigned long always_on = VMXON_CR0_ALWAYSON;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- if (nested_vmx_secondary_ctls_high &
+ if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_UNRESTRICTED_GUEST &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
always_on &= ~(X86_CR0_PE | X86_CR0_PG);
@@ -5015,7 +5214,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
- if (!nested_cr0_valid(vmcs12, val))
+ if (!nested_cr0_valid(vcpu, val))
return 1;
if (kvm_set_cr0(vcpu, val))
@@ -5817,13 +6016,21 @@ static __init int hardware_setup(void)
(unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_longmode_x2apic)
goto out4;
+
+ if (nested) {
+ vmx_msr_bitmap_nested =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_nested)
+ goto out5;
+ }
+
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap)
- goto out5;
+ goto out6;
vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmwrite_bitmap)
- goto out6;
+ goto out7;
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
@@ -5839,10 +6046,12 @@ static __init int hardware_setup(void)
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
+ if (nested)
+ memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
- goto out7;
+ goto out8;
}
if (boot_cpu_has(X86_FEATURE_NX))
@@ -5868,16 +6077,16 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_unrestricted_guest())
enable_unrestricted_guest = 0;
- if (!cpu_has_vmx_flexpriority()) {
+ if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
- /*
- * set_apic_access_page_addr() is used to reload apic access
- * page upon invalidation. No need to do anything if the
- * processor does not have the APIC_ACCESS_ADDR VMCS field.
- */
+ /*
+ * set_apic_access_page_addr() is used to reload apic access
+ * page upon invalidation. No need to do anything if not
+ * using the APIC_ACCESS_ADDR VMCS field.
+ */
+ if (!flexpriority_enabled)
kvm_x86_ops->set_apic_access_page_addr = NULL;
- }
if (!cpu_has_vmx_tpr_shadow())
kvm_x86_ops->update_cr8_intercept = NULL;
@@ -5895,13 +6104,11 @@ static __init int hardware_setup(void)
kvm_x86_ops->update_cr8_intercept = NULL;
else {
kvm_x86_ops->hwapic_irr_update = NULL;
+ kvm_x86_ops->hwapic_isr_update = NULL;
kvm_x86_ops->deliver_posted_interrupt = NULL;
kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
}
- if (nested)
- nested_vmx_setup_ctls_msrs();
-
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
@@ -5945,12 +6152,29 @@ static __init int hardware_setup(void)
update_ple_window_actual_max();
+ /*
+ * Only enable PML when hardware supports PML feature, and both EPT
+ * and EPT A/D bit features are enabled -- PML depends on them to work.
+ */
+ if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
+ enable_pml = 0;
+
+ if (!enable_pml) {
+ kvm_x86_ops->slot_enable_log_dirty = NULL;
+ kvm_x86_ops->slot_disable_log_dirty = NULL;
+ kvm_x86_ops->flush_log_dirty = NULL;
+ kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
+ }
+
return alloc_kvm_area();
-out7:
+out8:
free_page((unsigned long)vmx_vmwrite_bitmap);
-out6:
+out7:
free_page((unsigned long)vmx_vmread_bitmap);
+out6:
+ if (nested)
+ free_page((unsigned long)vmx_msr_bitmap_nested);
out5:
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
out4:
@@ -5977,6 +6201,8 @@ static __exit void hardware_unsetup(void)
free_page((unsigned long)vmx_io_bitmap_a);
free_page((unsigned long)vmx_vmwrite_bitmap);
free_page((unsigned long)vmx_vmread_bitmap);
+ if (nested)
+ free_page((unsigned long)vmx_msr_bitmap_nested);
free_kvm_area();
}
@@ -6143,6 +6369,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
*/
}
+static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
+{
+ /* TODO: not to reset guest simply here. */
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ pr_warn("kvm: nested vmx abort, indicator %d\n", indicator);
+}
+
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
{
struct vcpu_vmx *vmx =
@@ -6432,6 +6665,7 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
vmcs_write64(VMCS_LINK_POINTER, -1ull);
}
+ vmx->nested.posted_intr_nv = -1;
kunmap(vmx->nested.current_vmcs12_page);
nested_release_page(vmx->nested.current_vmcs12_page);
vmx->nested.current_vmptr = -1ull;
@@ -6460,6 +6694,12 @@ static void free_nested(struct vcpu_vmx *vmx)
nested_release_page(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page = NULL;
}
+ if (vmx->nested.pi_desc_page) {
+ kunmap(vmx->nested.pi_desc_page);
+ nested_release_page(vmx->nested.pi_desc_page);
+ vmx->nested.pi_desc_page = NULL;
+ vmx->nested.pi_desc = NULL;
+ }
nested_free_all_saved_vmcss(vmx);
}
@@ -6893,6 +7133,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
/* Emulate the INVEPT instruction */
static int handle_invept(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vmx_instruction_info, types;
unsigned long type;
gva_t gva;
@@ -6901,8 +7142,9 @@ static int handle_invept(struct kvm_vcpu *vcpu)
u64 eptp, gpa;
} operand;
- if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
- !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
+ if (!(vmx->nested.nested_vmx_secondary_ctls_high &
+ SECONDARY_EXEC_ENABLE_EPT) ||
+ !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
@@ -6918,7 +7160,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
- types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
+ types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
@@ -6960,6 +7202,31 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_pml_full(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification;
+
+ trace_kvm_pml_full(vcpu->vcpu_id);
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ /*
+ * PML buffer FULL happened while executing iret from NMI,
+ * "blocked by NMI" bit has to be set before next VM entry.
+ */
+ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ cpu_has_virtual_nmis() &&
+ (exit_qualification & INTR_INFO_UNBLOCK_NMI))
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+
+ /*
+ * PML buffer already flushed at beginning of VMEXIT. Nothing to do
+ * here.., and there's no userspace involvement needed for PML.
+ */
+ return 1;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -7008,6 +7275,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_INVVPID] = handle_invvpid,
[EXIT_REASON_XSAVES] = handle_xsaves,
[EXIT_REASON_XRSTORS] = handle_xrstors,
+ [EXIT_REASON_PML_FULL] = handle_pml_full,
};
static const int kvm_vmx_max_exit_handlers =
@@ -7275,6 +7543,10 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_APIC_ACCESS:
return nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+ case EXIT_REASON_APIC_WRITE:
+ case EXIT_REASON_EOI_INDUCED:
+ /* apic_write and eoi_induced should exit unconditionally. */
+ return 1;
case EXIT_REASON_EPT_VIOLATION:
/*
* L0 always deals with the EPT violation. If nested EPT is
@@ -7314,6 +7586,89 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
}
+static int vmx_enable_pml(struct vcpu_vmx *vmx)
+{
+ struct page *pml_pg;
+ u32 exec_control;
+
+ pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pml_pg)
+ return -ENOMEM;
+
+ vmx->pml_pg = pml_pg;
+
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ exec_control |= SECONDARY_EXEC_ENABLE_PML;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+
+ return 0;
+}
+
+static void vmx_disable_pml(struct vcpu_vmx *vmx)
+{
+ u32 exec_control;
+
+ ASSERT(vmx->pml_pg);
+ __free_page(vmx->pml_pg);
+ vmx->pml_pg = NULL;
+
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+}
+
+static void vmx_flush_pml_buffer(struct vcpu_vmx *vmx)
+{
+ struct kvm *kvm = vmx->vcpu.kvm;
+ u64 *pml_buf;
+ u16 pml_idx;
+
+ pml_idx = vmcs_read16(GUEST_PML_INDEX);
+
+ /* Do nothing if PML buffer is empty */
+ if (pml_idx == (PML_ENTITY_NUM - 1))
+ return;
+
+ /* PML index always points to next available PML buffer entity */
+ if (pml_idx >= PML_ENTITY_NUM)
+ pml_idx = 0;
+ else
+ pml_idx++;
+
+ pml_buf = page_address(vmx->pml_pg);
+ for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
+ u64 gpa;
+
+ gpa = pml_buf[pml_idx];
+ WARN_ON(gpa & (PAGE_SIZE - 1));
+ mark_page_dirty(kvm, gpa >> PAGE_SHIFT);
+ }
+
+ /* reset PML index */
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+}
+
+/*
+ * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
+ * Called before reporting dirty_bitmap to userspace.
+ */
+static void kvm_flush_pml_buffers(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+ /*
+ * We only need to kick vcpu out of guest mode here, as PML buffer
+ * is flushed at beginning of all VMEXITs, and it's obvious that only
+ * vcpus running in guest are possible to have unflushed GPAs in PML
+ * buffer.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
+}
+
/*
* The guest has exited. See if we can fix it or if we need userspace
* assistance.
@@ -7324,6 +7679,16 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
u32 exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
+ /*
+ * Flush logged GPAs PML buffer, this will make dirty_bitmap more
+ * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
+ * querying dirty_bitmap, we only need to kick all vcpus out of guest
+ * mode as if vcpus is in root mode, the PML buffer must has been
+ * flushed already.
+ */
+ if (enable_pml)
+ vmx_flush_pml_buffer(vmx);
+
/* If guest state is invalid, start emulating */
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
@@ -7471,9 +7836,6 @@ static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
u16 status;
u8 old;
- if (!vmx_vm_has_apicv(kvm))
- return;
-
if (isr == -1)
isr = 0;
@@ -7784,7 +8146,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
- cr4 = read_cr4();
+ cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
vmcs_writel(HOST_CR4, cr4);
vmx->host_state.vmcs_host_cr4 = cr4;
@@ -7973,6 +8335,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (enable_pml)
+ vmx_disable_pml(vmx);
free_vpid(vmx);
leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu);
@@ -8040,9 +8404,25 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_vmcs;
}
+ if (nested)
+ nested_vmx_setup_ctls_msrs(vmx);
+
+ vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
vmx->nested.current_vmcs12 = NULL;
+ /*
+ * If PML is turned on, failure on enabling PML just results in failure
+ * of creating the vcpu, therefore we can simplify PML logic (by
+ * avoiding dealing with cases, such as enabling PML partially on vcpus
+ * for the guest, etc.
+ */
+ if (enable_pml) {
+ err = vmx_enable_pml(vmx);
+ if (err)
+ goto free_vmcs;
+ }
+
return &vmx->vcpu;
free_vmcs:
@@ -8184,9 +8564,10 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
- kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
- nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
-
+ WARN_ON(mmu_is_nested(vcpu));
+ kvm_init_shadow_ept_mmu(vcpu,
+ to_vmx(vcpu)->nested.nested_vmx_ept_caps &
+ VMX_EPT_EXECUTE_ONLY_BIT);
vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
@@ -8199,6 +8580,18 @@ static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
}
+static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
+ u16 error_code)
+{
+ bool inequality, bit;
+
+ bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
+ inequality =
+ (error_code & vmcs12->page_fault_error_code_mask) !=
+ vmcs12->page_fault_error_code_match;
+ return inequality ^ bit;
+}
+
static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
@@ -8206,8 +8599,7 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
WARN_ON(!is_guest_mode(vcpu));
- /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
- if (vmcs12->exception_bitmap & (1u << PF_VECTOR))
+ if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code))
nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
@@ -8261,6 +8653,31 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
return false;
}
+ if (nested_cpu_has_posted_intr(vmcs12)) {
+ if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64))
+ return false;
+
+ if (vmx->nested.pi_desc_page) { /* shouldn't happen */
+ kunmap(vmx->nested.pi_desc_page);
+ nested_release_page(vmx->nested.pi_desc_page);
+ }
+ vmx->nested.pi_desc_page =
+ nested_get_page(vcpu, vmcs12->posted_intr_desc_addr);
+ if (!vmx->nested.pi_desc_page)
+ return false;
+
+ vmx->nested.pi_desc =
+ (struct pi_desc *)kmap(vmx->nested.pi_desc_page);
+ if (!vmx->nested.pi_desc) {
+ nested_release_page_clean(vmx->nested.pi_desc_page);
+ return false;
+ }
+ vmx->nested.pi_desc =
+ (struct pi_desc *)((void *)vmx->nested.pi_desc +
+ (unsigned long)(vmcs12->posted_intr_desc_addr &
+ (PAGE_SIZE - 1)));
+ }
+
return true;
}
@@ -8286,6 +8703,310 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
}
+static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ int maxphyaddr;
+ u64 addr;
+
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
+ return 0;
+
+ if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ maxphyaddr = cpuid_maxphyaddr(vcpu);
+
+ if (!PAGE_ALIGNED(vmcs12->msr_bitmap) ||
+ ((addr + PAGE_SIZE) >> maxphyaddr))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Merge L0's and L1's MSR bitmap, return false to indicate that
+ * we do not use the hardware.
+ */
+static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ int msr;
+ struct page *page;
+ unsigned long *msr_bitmap;
+
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
+ return false;
+
+ page = nested_get_page(vcpu, vmcs12->msr_bitmap);
+ if (!page) {
+ WARN_ON(1);
+ return false;
+ }
+ msr_bitmap = (unsigned long *)kmap(page);
+ if (!msr_bitmap) {
+ nested_release_page_clean(page);
+ WARN_ON(1);
+ return false;
+ }
+
+ if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
+ if (nested_cpu_has_apic_reg_virt(vmcs12))
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap,
+ vmx_msr_bitmap_nested,
+ msr, MSR_TYPE_R);
+ /* TPR is allowed */
+ nested_vmx_disable_intercept_for_msr(msr_bitmap,
+ vmx_msr_bitmap_nested,
+ APIC_BASE_MSR + (APIC_TASKPRI >> 4),
+ MSR_TYPE_R | MSR_TYPE_W);
+ if (nested_cpu_has_vid(vmcs12)) {
+ /* EOI and self-IPI are allowed */
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap,
+ vmx_msr_bitmap_nested,
+ APIC_BASE_MSR + (APIC_EOI >> 4),
+ MSR_TYPE_W);
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap,
+ vmx_msr_bitmap_nested,
+ APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
+ MSR_TYPE_W);
+ }
+ } else {
+ /*
+ * Enable reading intercept of all the x2apic
+ * MSRs. We should not rely on vmcs12 to do any
+ * optimizations here, it may have been modified
+ * by L1.
+ */
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ __vmx_enable_intercept_for_msr(
+ vmx_msr_bitmap_nested,
+ msr,
+ MSR_TYPE_R);
+
+ __vmx_enable_intercept_for_msr(
+ vmx_msr_bitmap_nested,
+ APIC_BASE_MSR + (APIC_TASKPRI >> 4),
+ MSR_TYPE_W);
+ __vmx_enable_intercept_for_msr(
+ vmx_msr_bitmap_nested,
+ APIC_BASE_MSR + (APIC_EOI >> 4),
+ MSR_TYPE_W);
+ __vmx_enable_intercept_for_msr(
+ vmx_msr_bitmap_nested,
+ APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
+ MSR_TYPE_W);
+ }
+ kunmap(page);
+ nested_release_page_clean(page);
+
+ return true;
+}
+
+static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ !nested_cpu_has_apic_reg_virt(vmcs12) &&
+ !nested_cpu_has_vid(vmcs12) &&
+ !nested_cpu_has_posted_intr(vmcs12))
+ return 0;
+
+ /*
+ * If virtualize x2apic mode is enabled,
+ * virtualize apic access must be disabled.
+ */
+ if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+ return -EINVAL;
+
+ /*
+ * If virtual interrupt delivery is enabled,
+ * we must exit on external interrupts.
+ */
+ if (nested_cpu_has_vid(vmcs12) &&
+ !nested_exit_on_intr(vcpu))
+ return -EINVAL;
+
+ /*
+ * bits 15:8 should be zero in posted_intr_nv,
+ * the descriptor address has been already checked
+ * in nested_get_vmcs12_pages.
+ */
+ if (nested_cpu_has_posted_intr(vmcs12) &&
+ (!nested_cpu_has_vid(vmcs12) ||
+ !nested_exit_intr_ack_set(vcpu) ||
+ vmcs12->posted_intr_nv & 0xff00))
+ return -EINVAL;
+
+ /* tpr shadow is needed by all apicv features. */
+ if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
+ unsigned long count_field,
+ unsigned long addr_field,
+ int maxphyaddr)
+{
+ u64 count, addr;
+
+ if (vmcs12_read_any(vcpu, count_field, &count) ||
+ vmcs12_read_any(vcpu, addr_field, &addr)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ if (count == 0)
+ return 0;
+ if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
+ (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
+ pr_warn_ratelimited(
+ "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
+ addr_field, maxphyaddr, count, addr);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ int maxphyaddr;
+
+ if (vmcs12->vm_exit_msr_load_count == 0 &&
+ vmcs12->vm_exit_msr_store_count == 0 &&
+ vmcs12->vm_entry_msr_load_count == 0)
+ return 0; /* Fast path */
+ maxphyaddr = cpuid_maxphyaddr(vcpu);
+ if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
+ VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) ||
+ nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
+ VM_EXIT_MSR_STORE_ADDR, maxphyaddr) ||
+ nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
+ VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr))
+ return -EINVAL;
+ return 0;
+}
+
+static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
+ struct vmx_msr_entry *e)
+{
+ /* x2APIC MSR accesses are not allowed */
+ if (apic_x2apic_mode(vcpu->arch.apic) && e->index >> 8 == 0x8)
+ return -EINVAL;
+ if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
+ e->index == MSR_IA32_UCODE_REV)
+ return -EINVAL;
+ if (e->reserved != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
+ struct vmx_msr_entry *e)
+{
+ if (e->index == MSR_FS_BASE ||
+ e->index == MSR_GS_BASE ||
+ e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
+ nested_vmx_msr_check_common(vcpu, e))
+ return -EINVAL;
+ return 0;
+}
+
+static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
+ struct vmx_msr_entry *e)
+{
+ if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
+ nested_vmx_msr_check_common(vcpu, e))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Load guest's/host's msr at nested entry/exit.
+ * return 0 for success, entry index for failure.
+ */
+static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i;
+ struct vmx_msr_entry e;
+ struct msr_data msr;
+
+ msr.host_initiated = false;
+ for (i = 0; i < count; i++) {
+ if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e),
+ &e, sizeof(e))) {
+ pr_warn_ratelimited(
+ "%s cannot read MSR entry (%u, 0x%08llx)\n",
+ __func__, i, gpa + i * sizeof(e));
+ goto fail;
+ }
+ if (nested_vmx_load_msr_check(vcpu, &e)) {
+ pr_warn_ratelimited(
+ "%s check failed (%u, 0x%x, 0x%x)\n",
+ __func__, i, e.index, e.reserved);
+ goto fail;
+ }
+ msr.index = e.index;
+ msr.data = e.value;
+ if (kvm_set_msr(vcpu, &msr)) {
+ pr_warn_ratelimited(
+ "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
+ __func__, i, e.index, e.value);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ return i + 1;
+}
+
+static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i;
+ struct vmx_msr_entry e;
+
+ for (i = 0; i < count; i++) {
+ if (kvm_read_guest(vcpu->kvm,
+ gpa + i * sizeof(e),
+ &e, 2 * sizeof(u32))) {
+ pr_warn_ratelimited(
+ "%s cannot read MSR entry (%u, 0x%08llx)\n",
+ __func__, i, gpa + i * sizeof(e));
+ return -EINVAL;
+ }
+ if (nested_vmx_store_msr_check(vcpu, &e)) {
+ pr_warn_ratelimited(
+ "%s check failed (%u, 0x%x, 0x%x)\n",
+ __func__, i, e.index, e.reserved);
+ return -EINVAL;
+ }
+ if (kvm_get_msr(vcpu, e.index, &e.value)) {
+ pr_warn_ratelimited(
+ "%s cannot read MSR (%u, 0x%x)\n",
+ __func__, i, e.index);
+ return -EINVAL;
+ }
+ if (kvm_write_guest(vcpu->kvm,
+ gpa + i * sizeof(e) +
+ offsetof(struct vmx_msr_entry, value),
+ &e.value, sizeof(e.value))) {
+ pr_warn_ratelimited(
+ "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
+ __func__, i, e.index, e.value);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
/*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -8365,8 +9086,23 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exec_control = vmcs12->pin_based_vm_exec_control;
exec_control |= vmcs_config.pin_based_exec_ctrl;
- exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
- PIN_BASED_POSTED_INTR);
+ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+
+ if (nested_cpu_has_posted_intr(vmcs12)) {
+ /*
+ * Note that we use L0's vector here and in
+ * vmx_deliver_nested_posted_interrupt.
+ */
+ vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
+ vmx->nested.pi_pending = false;
+ vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
+ vmcs_write64(POSTED_INTR_DESC_ADDR,
+ page_to_phys(vmx->nested.pi_desc_page) +
+ (unsigned long)(vmcs12->posted_intr_desc_addr &
+ (PAGE_SIZE - 1)));
+ } else
+ exec_control &= ~PIN_BASED_POSTED_INTR;
+
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
vmx->nested.preemption_timer_expired = false;
@@ -8423,12 +9159,26 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
else
vmcs_write64(APIC_ACCESS_ADDR,
page_to_phys(vmx->nested.apic_access_page));
- } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
+ } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
+ (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) {
exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
kvm_vcpu_reload_apic_access_page(vcpu);
}
+ if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
+ vmcs_write64(EOI_EXIT_BITMAP0,
+ vmcs12->eoi_exit_bitmap0);
+ vmcs_write64(EOI_EXIT_BITMAP1,
+ vmcs12->eoi_exit_bitmap1);
+ vmcs_write64(EOI_EXIT_BITMAP2,
+ vmcs12->eoi_exit_bitmap2);
+ vmcs_write64(EOI_EXIT_BITMAP3,
+ vmcs12->eoi_exit_bitmap3);
+ vmcs_write16(GUEST_INTR_STATUS,
+ vmcs12->guest_intr_status);
+ }
+
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
@@ -8462,11 +9212,17 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
}
+ if (cpu_has_vmx_msr_bitmap() &&
+ exec_control & CPU_BASED_USE_MSR_BITMAPS &&
+ nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) {
+ vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested));
+ } else
+ exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
+
/*
- * Merging of IO and MSR bitmaps not currently supported.
+ * Merging of IO bitmap not currently supported.
* Rather, exit every time.
*/
- exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
exec_control |= CPU_BASED_UNCOND_IO_EXITING;
@@ -8582,6 +9338,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
int cpu;
struct loaded_vmcs *vmcs02;
bool ia32e;
+ u32 msr_entry_idx;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
@@ -8616,41 +9373,42 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return 1;
}
- if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
- !PAGE_ALIGNED(vmcs12->msr_bitmap)) {
+ if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
/*TODO: Also verify bits beyond physical address width are 0*/
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
- if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
- /*TODO: Also verify bits beyond physical address width are 0*/
+ if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
- if (vmcs12->vm_entry_msr_load_count > 0 ||
- vmcs12->vm_exit_msr_load_count > 0 ||
- vmcs12->vm_exit_msr_store_count > 0) {
- pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
- __func__);
+ if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
- nested_vmx_true_procbased_ctls_low,
- nested_vmx_procbased_ctls_high) ||
+ vmx->nested.nested_vmx_true_procbased_ctls_low,
+ vmx->nested.nested_vmx_procbased_ctls_high) ||
!vmx_control_verify(vmcs12->secondary_vm_exec_control,
- nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
+ vmx->nested.nested_vmx_secondary_ctls_low,
+ vmx->nested.nested_vmx_secondary_ctls_high) ||
!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
- nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
+ vmx->nested.nested_vmx_pinbased_ctls_low,
+ vmx->nested.nested_vmx_pinbased_ctls_high) ||
!vmx_control_verify(vmcs12->vm_exit_controls,
- nested_vmx_true_exit_ctls_low,
- nested_vmx_exit_ctls_high) ||
+ vmx->nested.nested_vmx_true_exit_ctls_low,
+ vmx->nested.nested_vmx_exit_ctls_high) ||
!vmx_control_verify(vmcs12->vm_entry_controls,
- nested_vmx_true_entry_ctls_low,
- nested_vmx_entry_ctls_high))
+ vmx->nested.nested_vmx_true_entry_ctls_low,
+ vmx->nested.nested_vmx_entry_ctls_high))
{
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
@@ -8663,7 +9421,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return 1;
}
- if (!nested_cr0_valid(vmcs12, vmcs12->guest_cr0) ||
+ if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
@@ -8739,10 +9497,21 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx_segment_cache_clear(vmx);
- vmcs12->launch_state = 1;
-
prepare_vmcs02(vcpu, vmcs12);
+ msr_entry_idx = nested_vmx_load_msr(vcpu,
+ vmcs12->vm_entry_msr_load_addr,
+ vmcs12->vm_entry_msr_load_count);
+ if (msr_entry_idx) {
+ leave_guest_mode(vcpu);
+ vmx_load_vmcs01(vcpu);
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
+ return 1;
+ }
+
+ vmcs12->launch_state = 1;
+
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
return kvm_emulate_halt(vcpu);
@@ -8869,9 +9638,10 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
if (vmx->nested.nested_run_pending)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
+ return 0;
}
- return 0;
+ return vmx_complete_nested_posted_interrupt(vcpu);
}
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
@@ -8981,6 +9751,9 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
}
+ if (nested_cpu_has_vid(vmcs12))
+ vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
+
vmcs12->vm_entry_controls =
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
@@ -9172,6 +9945,13 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
kvm_set_dr(vcpu, 7, 0x400);
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_set_msr_bitmap(vcpu);
+
+ if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
+ vmcs12->vm_exit_msr_load_count))
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
}
/*
@@ -9193,6 +9973,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
exit_qualification);
+ if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
+ vmcs12->vm_exit_msr_store_count))
+ nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
+
vmx_load_vmcs01(vcpu);
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
@@ -9235,6 +10019,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
nested_release_page(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page = NULL;
}
+ if (vmx->nested.pi_desc_page) {
+ kunmap(vmx->nested.pi_desc_page);
+ nested_release_page(vmx->nested.pi_desc_page);
+ vmx->nested.pi_desc_page = NULL;
+ vmx->nested.pi_desc = NULL;
+ }
/*
* We are now running in L2, mmu_notifier will force to reload the
@@ -9301,6 +10091,31 @@ static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
shrink_ple_window(vcpu);
}
+static void vmx_slot_enable_log_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+ kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
+ kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
+}
+
+static void vmx_slot_disable_log_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+ kvm_mmu_slot_set_dirty(kvm, slot);
+}
+
+static void vmx_flush_log_dirty(struct kvm *kvm)
+{
+ kvm_flush_pml_buffers(kvm);
+}
+
+static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ gfn_t offset, unsigned long mask)
+{
+ kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -9409,6 +10224,11 @@ static struct kvm_x86_ops vmx_x86_ops = {
.check_nested_events = vmx_check_nested_events,
.sched_in = vmx_sched_in,
+
+ .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
+ .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
+ .flush_log_dirty = vmx_flush_log_dirty,
+ .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
};
static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c259814200bd..bd7a70be41b3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -108,6 +108,10 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
static u32 tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
+/* lapic timer advance (tscdeadline mode only) in nanoseconds */
+unsigned int lapic_timer_advance_ns = 0;
+module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
+
static bool backwards_tsc_observed = false;
#define KVM_NR_SHARED_MSRS 16
@@ -141,6 +145,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "irq_window", VCPU_STAT(irq_window_exits) },
{ "nmi_window", VCPU_STAT(nmi_window_exits) },
{ "halt_exits", VCPU_STAT(halt_exits) },
+ { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "hypercalls", VCPU_STAT(hypercalls) },
{ "request_irq", VCPU_STAT(request_irq_exits) },
@@ -492,7 +497,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
-int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
void *data, int offset, int len, u32 access)
{
return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
@@ -643,7 +648,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
}
}
-int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0 = xcr;
u64 old_xcr0 = vcpu->arch.xcr0;
@@ -1083,6 +1088,15 @@ static void update_pvclock_gtod(struct timekeeper *tk)
}
#endif
+void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
+ * vcpu_enter_guest. This function is only called from
+ * the physical CPU that is running vcpu.
+ */
+ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+}
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
@@ -1180,7 +1194,7 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
#endif
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
-unsigned long max_tsc_khz;
+static unsigned long max_tsc_khz;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
@@ -1234,7 +1248,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
return tsc;
}
-void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
+static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
bool vcpus_matched;
@@ -1529,7 +1543,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
&ka->master_cycle_now);
ka->use_master_clock = host_tsc_clocksource && vcpus_matched
- && !backwards_tsc_observed;
+ && !backwards_tsc_observed
+ && !ka->boot_vcpu_runs_old_kvmclock;
if (ka->use_master_clock)
atomic_set(&kvm_guest_has_master_clock, 1);
@@ -2161,8 +2176,20 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
u64 gpa_offset;
+ struct kvm_arch *ka = &vcpu->kvm->arch;
+
kvmclock_reset(vcpu);
+ if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
+ bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
+
+ if (ka->boot_vcpu_runs_old_kvmclock != tmp)
+ set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
+ &vcpu->requests);
+
+ ka->boot_vcpu_runs_old_kvmclock = tmp;
+ }
+
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
@@ -2324,6 +2351,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
}
+EXPORT_SYMBOL_GPL(kvm_get_msr);
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
@@ -2738,6 +2766,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_READONLY_MEM:
case KVM_CAP_HYPERV_TIME:
case KVM_CAP_IOAPIC_POLARITY_IGNORED:
+ case KVM_CAP_TSC_DEADLINE_TIMER:
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_PCI_2_3:
@@ -2776,9 +2805,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
- case KVM_CAP_TSC_DEADLINE_TIMER:
- r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
- break;
default:
r = 0;
break;
@@ -3734,83 +3760,43 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
* @kvm: kvm instance
* @log: slot id and address to which we copy the log
*
- * We need to keep it in mind that VCPU threads can write to the bitmap
- * concurrently. So, to avoid losing data, we keep the following order for
- * each bit:
+ * Steps 1-4 below provide general overview of dirty page logging. See
+ * kvm_get_dirty_log_protect() function description for additional details.
+ *
+ * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
+ * always flush the TLB (step 4) even if previous step failed and the dirty
+ * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
+ * does not preclude user space subsequent dirty log read. Flushing TLB ensures
+ * writes will be marked dirty for next log read.
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
- * 3. Flush TLB's if needed.
- * 4. Copy the snapshot to the userspace.
- *
- * Between 2 and 3, the guest may write to the page using the remaining TLB
- * entry. This is not a problem because the page will be reported dirty at
- * step 4 using the snapshot taken before and step 3 ensures that successive
- * writes will be logged for the next call.
+ * 3. Copy the snapshot to the userspace.
+ * 4. Flush TLB's if needed.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
- int r;
- struct kvm_memory_slot *memslot;
- unsigned long n, i;
- unsigned long *dirty_bitmap;
- unsigned long *dirty_bitmap_buffer;
bool is_dirty = false;
+ int r;
mutex_lock(&kvm->slots_lock);
- r = -EINVAL;
- if (log->slot >= KVM_USER_MEM_SLOTS)
- goto out;
-
- memslot = id_to_memslot(kvm->memslots, log->slot);
-
- dirty_bitmap = memslot->dirty_bitmap;
- r = -ENOENT;
- if (!dirty_bitmap)
- goto out;
-
- n = kvm_dirty_bitmap_bytes(memslot);
-
- dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
- memset(dirty_bitmap_buffer, 0, n);
-
- spin_lock(&kvm->mmu_lock);
-
- for (i = 0; i < n / sizeof(long); i++) {
- unsigned long mask;
- gfn_t offset;
-
- if (!dirty_bitmap[i])
- continue;
-
- is_dirty = true;
-
- mask = xchg(&dirty_bitmap[i], 0);
- dirty_bitmap_buffer[i] = mask;
-
- offset = i * BITS_PER_LONG;
- kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
- }
-
- spin_unlock(&kvm->mmu_lock);
+ /*
+ * Flush potentially hardware-cached dirty pages to dirty_bitmap.
+ */
+ if (kvm_x86_ops->flush_log_dirty)
+ kvm_x86_ops->flush_log_dirty(kvm);
- /* See the comments in kvm_mmu_slot_remove_write_access(). */
- lockdep_assert_held(&kvm->slots_lock);
+ r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
/*
* All the TLBs can be flushed out of mmu lock, see the comments in
* kvm_mmu_slot_remove_write_access().
*/
+ lockdep_assert_held(&kvm->slots_lock);
if (is_dirty)
kvm_flush_remote_tlbs(kvm);
- r = -EFAULT;
- if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
- goto out;
-
- r = 0;
-out:
mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -4516,6 +4502,8 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
if (rc != X86EMUL_CONTINUE)
return rc;
addr += now;
+ if (ctxt->mode != X86EMUL_MODE_PROT64)
+ addr = (u32)addr;
val += now;
bytes -= now;
}
@@ -4984,6 +4972,11 @@ static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulon
kvm_register_write(emul_to_vcpu(ctxt), reg, val);
}
+static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
+{
+ kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
@@ -5019,6 +5012,7 @@ static const struct x86_emulate_ops emulate_ops = {
.put_fpu = emulator_put_fpu,
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
+ .set_nmi_mask = emulator_set_nmi_mask,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6311,6 +6305,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
trace_kvm_entry(vcpu->vcpu_id);
+ wait_lapic_expire(vcpu);
kvm_x86_ops->run(vcpu);
/*
@@ -7041,15 +7036,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return r;
}
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
- int r;
struct msr_data msr;
struct kvm *kvm = vcpu->kvm;
- r = vcpu_load(vcpu);
- if (r)
- return r;
+ if (vcpu_load(vcpu))
+ return;
msr.data = 0x0;
msr.index = MSR_IA32_TSC;
msr.host_initiated = true;
@@ -7058,8 +7051,6 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
KVMCLOCK_SYNC_PERIOD);
-
- return r;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -7549,12 +7540,62 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
return 0;
}
+static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
+ struct kvm_memory_slot *new)
+{
+ /* Still write protect RO slot */
+ if (new->flags & KVM_MEM_READONLY) {
+ kvm_mmu_slot_remove_write_access(kvm, new);
+ return;
+ }
+
+ /*
+ * Call kvm_x86_ops dirty logging hooks when they are valid.
+ *
+ * kvm_x86_ops->slot_disable_log_dirty is called when:
+ *
+ * - KVM_MR_CREATE with dirty logging is disabled
+ * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
+ *
+ * The reason is, in case of PML, we need to set D-bit for any slots
+ * with dirty logging disabled in order to eliminate unnecessary GPA
+ * logging in PML buffer (and potential PML buffer full VMEXT). This
+ * guarantees leaving PML enabled during guest's lifetime won't have
+ * any additonal overhead from PML when guest is running with dirty
+ * logging disabled for memory slots.
+ *
+ * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
+ * to dirty logging mode.
+ *
+ * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
+ *
+ * In case of write protect:
+ *
+ * Write protect all pages for dirty logging.
+ *
+ * All the sptes including the large sptes which point to this
+ * slot are set to readonly. We can not create any new large
+ * spte on this slot until the end of the logging.
+ *
+ * See the comments in fast_page_fault().
+ */
+ if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ if (kvm_x86_ops->slot_enable_log_dirty)
+ kvm_x86_ops->slot_enable_log_dirty(kvm, new);
+ else
+ kvm_mmu_slot_remove_write_access(kvm, new);
+ } else {
+ if (kvm_x86_ops->slot_disable_log_dirty)
+ kvm_x86_ops->slot_disable_log_dirty(kvm, new);
+ }
+}
+
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{
-
+ struct kvm_memory_slot *new;
int nr_mmu_pages = 0;
if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) {
@@ -7573,17 +7614,20 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
if (nr_mmu_pages)
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+
+ /* It's OK to get 'new' slot here as it has already been installed */
+ new = id_to_memslot(kvm->memslots, mem->slot);
+
/*
- * Write protect all pages for dirty logging.
+ * Set up write protection and/or dirty logging for the new slot.
*
- * All the sptes including the large sptes which point to this
- * slot are set to readonly. We can not create any new large
- * spte on this slot until the end of the logging.
- *
- * See the comments in fast_page_fault().
+ * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
+ * been zapped so no dirty logging staff is needed for old slot. For
+ * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
+ * new and it's also covered when dealing with the new slot.
*/
- if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
- kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+ if (change != KVM_MR_DELETE)
+ kvm_mmu_slot_apply_flags(kvm, new);
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -7837,3 +7881,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index cc1d61af6140..f5fef1868096 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,7 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
+void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
@@ -170,5 +171,7 @@ extern u64 kvm_supported_xcr0(void);
extern unsigned int min_timer_period_us;
+extern unsigned int lapic_timer_advance_ns;
+
extern struct static_key kvm_no_apic_vcpu;
#endif
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 56313a326188..89b53c9968e7 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -53,6 +53,8 @@
.Lmemcpy_e_e:
.previous
+.weak memcpy
+
ENTRY(__memcpy)
ENTRY(memcpy)
CFI_STARTPROC
@@ -199,8 +201,8 @@ ENDPROC(__memcpy)
* only outcome...
*/
.section .altinstructions, "a"
- altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
+ altinstruction_entry __memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
.Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c
- altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
+ altinstruction_entry __memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
.Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e
.previous
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 65268a6104f4..9c4b530575da 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -24,7 +24,10 @@
* Output:
* rax: dest
*/
+.weak memmove
+
ENTRY(memmove)
+ENTRY(__memmove)
CFI_STARTPROC
/* Handle more 32 bytes in loop */
@@ -220,4 +223,5 @@ ENTRY(memmove)
.Lmemmove_end_forward-.Lmemmove_begin_forward, \
.Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
.previous
+ENDPROC(__memmove)
ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 2dcb3808cbda..6f44935c6a60 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -56,6 +56,8 @@
.Lmemset_e_e:
.previous
+.weak memset
+
ENTRY(memset)
ENTRY(__memset)
CFI_STARTPROC
@@ -147,8 +149,8 @@ ENDPROC(__memset)
* feature to implement the right patch order.
*/
.section .altinstructions,"a"
- altinstruction_entry memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\
- .Lfinal-memset,.Lmemset_e-.Lmemset_c
- altinstruction_entry memset,.Lmemset_c_e,X86_FEATURE_ERMS, \
- .Lfinal-memset,.Lmemset_e_e-.Lmemset_c_e
+ altinstruction_entry __memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\
+ .Lfinal-__memset,.Lmemset_e-.Lmemset_c
+ altinstruction_entry __memset,.Lmemset_c_e,X86_FEATURE_ERMS, \
+ .Lfinal-__memset,.Lmemset_e_e-.Lmemset_c_e
.previous
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index ecfdc46a024a..c4cc74006c61 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -20,6 +20,9 @@ obj-$(CONFIG_HIGHMEM) += highmem_32.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck/
+KASAN_SANITIZE_kasan_init_$(BITS).o := n
+obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o
+
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 38dcec403b46..ede025fb46f1 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -600,7 +600,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
if (pte && pte_present(*pte) && pte_exec(*pte) &&
(pgd_flags(*pgd) & _PAGE_USER) &&
- (read_cr4() & X86_CR4_SMEP))
+ (__read_cr4() & X86_CR4_SMEP))
printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
}
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area_nosemaphore(regs, error_code, address);
else
BUG();
}
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index d7547824e763..81bf3d2af3eb 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -84,7 +84,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
struct page *page;
/* Similar to the PMD case, NUMA hinting must take slow path */
- if (pte_numa(pte)) {
+ if (pte_protnone(pte)) {
pte_unmap(ptep);
return 0;
}
@@ -172,13 +172,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
*/
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
- if (unlikely(pmd_large(pmd))) {
+ if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
/*
* NUMA hinting faults need to be handled in the GUP
* slowpath for accounting purposes and so that they
* can be serialised against THP migration.
*/
- if (pmd_numa(pmd))
+ if (pmd_protnone(pmd))
return 0;
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
return 0;
@@ -388,10 +388,9 @@ slow_irqon:
start += nr << PAGE_SHIFT;
pages += nr;
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
+ ret = get_user_pages_unlocked(current, mm, start,
+ (end - start) >> PAGE_SHIFT,
+ write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 8b977ebf9388..42982b26e32b 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -52,23 +52,17 @@ int pud_huge(pud_t pud)
return 0;
}
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- return NULL;
-}
#else
-struct page *
-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
+/*
+ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
+ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
+ * Otherwise, returns 0.
+ */
int pmd_huge(pmd_t pmd)
{
- return !!(pmd_val(pmd) & _PAGE_PSE);
+ return !pmd_none(pmd) &&
+ (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
}
int pud_huge(pud_t pud)
@@ -178,4 +172,15 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_CMA
+static __init int gigantic_pages_init(void)
+{
+ /* With CMA we can allocate gigantic pages at runtime */
+ if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
+ hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ return 0;
+}
+arch_initcall(gigantic_pages_init);
+#endif
#endif
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 08a7d313538a..553c094b9cd7 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -43,7 +43,7 @@ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
[_PAGE_CACHE_MODE_WT] = _PAGE_PCD,
[_PAGE_CACHE_MODE_WP] = _PAGE_PCD,
};
-EXPORT_SYMBOL_GPL(__cachemode2pte_tbl);
+EXPORT_SYMBOL(__cachemode2pte_tbl);
uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
[__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
@@ -54,7 +54,7 @@ uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
-EXPORT_SYMBOL_GPL(__pte2cachemode_tbl);
+EXPORT_SYMBOL(__pte2cachemode_tbl);
static unsigned long __initdata pgt_buf_start;
static unsigned long __initdata pgt_buf_end;
@@ -173,11 +173,11 @@ static void __init probe_page_size_mask(void)
/* Enable PSE if available */
if (cpu_has_pse)
- set_in_cr4(X86_CR4_PSE);
+ cr4_set_bits_and_update_boot(X86_CR4_PSE);
/* Enable PGE if available */
if (cpu_has_pge) {
- set_in_cr4(X86_CR4_PGE);
+ cr4_set_bits_and_update_boot(X86_CR4_PGE);
__supported_pte_mask |= _PAGE_GLOBAL;
}
}
@@ -608,7 +608,7 @@ void __init init_mem_mapping(void)
*
*
* On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
+ * contains BIOS code and data regions used by X and dosemu and similar apps.
* Access has to be given to non-kernel-ram areas as well, these contain the PCI
* mmio resources as well as potential bios/acpi data regions.
*/
@@ -713,6 +713,15 @@ void __init zone_sizes_init(void)
free_area_init_nodes(max_zone_pfns);
}
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+#ifdef CONFIG_SMP
+ .active_mm = &init_mm,
+ .state = 0,
+#endif
+ .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
+};
+EXPORT_SYMBOL_GPL(cpu_tlbstate);
+
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
{
/* entry 0 MUST be WB (hardwired to speed up translations) */
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
new file mode 100644
index 000000000000..4860906c6b9f
--- /dev/null
+++ b/arch/x86/mm/kasan_init_64.c
@@ -0,0 +1,206 @@
+#include <linux/bootmem.h>
+#include <linux/kasan.h>
+#include <linux/kdebug.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+
+extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+extern struct range pfn_mapped[E820_X_MAX];
+
+extern unsigned char kasan_zero_page[PAGE_SIZE];
+
+static int __init map_range(struct range *range)
+{
+ unsigned long start;
+ unsigned long end;
+
+ start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
+ end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
+
+ /*
+ * end + 1 here is intentional. We check several shadow bytes in advance
+ * to slightly speed up fastpath. In some rare cases we could cross
+ * boundary of mapped shadow, so we just map some more here.
+ */
+ return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
+}
+
+static void __init clear_pgds(unsigned long start,
+ unsigned long end)
+{
+ for (; start < end; start += PGDIR_SIZE)
+ pgd_clear(pgd_offset_k(start));
+}
+
+void __init kasan_map_early_shadow(pgd_t *pgd)
+{
+ int i;
+ unsigned long start = KASAN_SHADOW_START;
+ unsigned long end = KASAN_SHADOW_END;
+
+ for (i = pgd_index(start); start < end; i++) {
+ pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
+ | _KERNPG_TABLE);
+ start += PGDIR_SIZE;
+ }
+}
+
+static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+
+ while (addr + PAGE_SIZE <= end) {
+ WARN_ON(!pte_none(*pte));
+ set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
+ | __PAGE_KERNEL_RO));
+ addr += PAGE_SIZE;
+ pte = pte_offset_kernel(pmd, addr);
+ }
+ return 0;
+}
+
+static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
+ unsigned long end)
+{
+ int ret = 0;
+ pmd_t *pmd = pmd_offset(pud, addr);
+
+ while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
+ WARN_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
+ | __PAGE_KERNEL_RO));
+ addr += PMD_SIZE;
+ pmd = pmd_offset(pud, addr);
+ }
+ if (addr < end) {
+ if (pmd_none(*pmd)) {
+ void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
+ if (!p)
+ return -ENOMEM;
+ set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
+ }
+ ret = zero_pte_populate(pmd, addr, end);
+ }
+ return ret;
+}
+
+
+static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ int ret = 0;
+ pud_t *pud = pud_offset(pgd, addr);
+
+ while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
+ WARN_ON(!pud_none(*pud));
+ set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
+ | __PAGE_KERNEL_RO));
+ addr += PUD_SIZE;
+ pud = pud_offset(pgd, addr);
+ }
+
+ if (addr < end) {
+ if (pud_none(*pud)) {
+ void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
+ if (!p)
+ return -ENOMEM;
+ set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
+ }
+ ret = zero_pmd_populate(pud, addr, end);
+ }
+ return ret;
+}
+
+static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
+{
+ int ret = 0;
+ pgd_t *pgd = pgd_offset_k(addr);
+
+ while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
+ WARN_ON(!pgd_none(*pgd));
+ set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
+ | __PAGE_KERNEL_RO));
+ addr += PGDIR_SIZE;
+ pgd = pgd_offset_k(addr);
+ }
+
+ if (addr < end) {
+ if (pgd_none(*pgd)) {
+ void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
+ if (!p)
+ return -ENOMEM;
+ set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
+ }
+ ret = zero_pud_populate(pgd, addr, end);
+ }
+ return ret;
+}
+
+
+static void __init populate_zero_shadow(const void *start, const void *end)
+{
+ if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
+ panic("kasan: unable to map zero shadow!");
+}
+
+
+#ifdef CONFIG_KASAN_INLINE
+static int kasan_die_handler(struct notifier_block *self,
+ unsigned long val,
+ void *data)
+{
+ if (val == DIE_GPF) {
+ pr_emerg("CONFIG_KASAN_INLINE enabled");
+ pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block kasan_die_notifier = {
+ .notifier_call = kasan_die_handler,
+};
+#endif
+
+void __init kasan_init(void)
+{
+ int i;
+
+#ifdef CONFIG_KASAN_INLINE
+ register_die_notifier(&kasan_die_notifier);
+#endif
+
+ memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
+ load_cr3(early_level4_pgt);
+
+ clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ populate_zero_shadow((void *)KASAN_SHADOW_START,
+ kasan_mem_to_shadow((void *)PAGE_OFFSET));
+
+ for (i = 0; i < E820_X_MAX; i++) {
+ if (pfn_mapped[i].end == 0)
+ break;
+
+ if (map_range(&pfn_mapped[i]))
+ panic("kasan: unable to allocate shadow!");
+ }
+ populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+ kasan_mem_to_shadow((void *)__START_KERNEL_map));
+
+ vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
+ (unsigned long)kasan_mem_to_shadow(_end),
+ NUMA_NO_NODE);
+
+ populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+ (void *)KASAN_SHADOW_END);
+
+ memset(kasan_zero_page, 0, PAGE_SIZE);
+
+ load_cr3(init_level4_pgt);
+ init_task.kasan_depth = 0;
+}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 67ebf5751222..c439ec478216 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -349,6 +349,12 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
return MPX_INVALID_BOUNDS_DIR;
/*
+ * 32-bit binaries on 64-bit kernels are currently
+ * unsupported.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
+ return MPX_INVALID_BOUNDS_DIR;
+ /*
* The bounds directory pointer is stored in a register
* only accessible if we first do an xsave.
*/
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 1a883705a12a..cd4785bbacb9 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -794,7 +794,6 @@ int early_cpu_to_node(int cpu)
void debug_cpumask_set_cpu(int cpu, int node, bool enable)
{
struct cpumask *mask;
- char buf[64];
if (node == NUMA_NO_NODE) {
/* early_cpu_to_node() already emits a warning and trace */
@@ -812,10 +811,9 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable)
else
cpumask_clear_cpu(cpu, mask);
- cpulist_scnprintf(buf, sizeof(buf), mask);
- printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+ printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
enable ? "numa_add_cpu" : "numa_remove_cpu",
- cpu, node, buf);
+ cpu, node, cpumask_pr_args(mask));
return;
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index edf299c8ff6c..7ac68698406c 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -234,8 +234,13 @@ void pat_init(void)
PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
/* Boot CPU check */
- if (!boot_pat_state)
+ if (!boot_pat_state) {
rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
+ if (!boot_pat_state) {
+ pat_disable("PAT read returns always zero, disabled.");
+ return;
+ }
+ }
wrmsrl(MSR_IA32_CR_PAT, pat);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 6fb6927f9e76..7b22adaad4f1 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -190,7 +190,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
#endif /* CONFIG_X86_PAE */
-static void free_pmds(pmd_t *pmds[])
+static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
{
int i;
@@ -198,10 +198,11 @@ static void free_pmds(pmd_t *pmds[])
if (pmds[i]) {
pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
free_page((unsigned long)pmds[i]);
+ mm_dec_nr_pmds(mm);
}
}
-static int preallocate_pmds(pmd_t *pmds[])
+static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
{
int i;
bool failed = false;
@@ -215,11 +216,13 @@ static int preallocate_pmds(pmd_t *pmds[])
pmd = NULL;
failed = true;
}
+ if (pmd)
+ mm_inc_nr_pmds(mm);
pmds[i] = pmd;
}
if (failed) {
- free_pmds(pmds);
+ free_pmds(mm, pmds);
return -ENOMEM;
}
@@ -246,6 +249,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
}
}
}
@@ -283,7 +287,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
mm->pgd = pgd;
- if (preallocate_pmds(pmds) != 0)
+ if (preallocate_pmds(mm, pmds) != 0)
goto out_free_pgd;
if (paravirt_pgd_alloc(mm) != 0)
@@ -304,7 +308,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
return pgd;
out_free_pmds:
- free_pmds(pmds);
+ free_pmds(mm, pmds);
out_free_pgd:
free_page((unsigned long)pgd);
out:
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ee61c36d64f8..3250f2371aea 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,9 +14,6 @@
#include <asm/uv/uv.h>
#include <linux/debugfs.h>
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
- = { &init_mm, 0, };
-
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index cfd1b132b8e3..6ac273832f28 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -10,9 +10,6 @@
struct pci_root_info {
struct acpi_device *bridge;
char name[16];
- unsigned int res_num;
- struct resource *res;
- resource_size_t *res_offset;
struct pci_sysdata sd;
#ifdef CONFIG_PCI_MMCONFIG
bool mcfg_added;
@@ -218,130 +215,41 @@ static void teardown_mcfg_map(struct pci_root_info *info)
}
#endif
-static acpi_status resource_to_addr(struct acpi_resource *resource,
- struct acpi_resource_address64 *addr)
-{
- acpi_status status;
- struct acpi_resource_memory24 *memory24;
- struct acpi_resource_memory32 *memory32;
- struct acpi_resource_fixed_memory32 *fixed_memory32;
-
- memset(addr, 0, sizeof(*addr));
- switch (resource->type) {
- case ACPI_RESOURCE_TYPE_MEMORY24:
- memory24 = &resource->data.memory24;
- addr->resource_type = ACPI_MEMORY_RANGE;
- addr->minimum = memory24->minimum;
- addr->address_length = memory24->address_length;
- addr->maximum = addr->minimum + addr->address_length - 1;
- return AE_OK;
- case ACPI_RESOURCE_TYPE_MEMORY32:
- memory32 = &resource->data.memory32;
- addr->resource_type = ACPI_MEMORY_RANGE;
- addr->minimum = memory32->minimum;
- addr->address_length = memory32->address_length;
- addr->maximum = addr->minimum + addr->address_length - 1;
- return AE_OK;
- case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
- fixed_memory32 = &resource->data.fixed_memory32;
- addr->resource_type = ACPI_MEMORY_RANGE;
- addr->minimum = fixed_memory32->address;
- addr->address_length = fixed_memory32->address_length;
- addr->maximum = addr->minimum + addr->address_length - 1;
- return AE_OK;
- case ACPI_RESOURCE_TYPE_ADDRESS16:
- case ACPI_RESOURCE_TYPE_ADDRESS32:
- case ACPI_RESOURCE_TYPE_ADDRESS64:
- status = acpi_resource_to_address64(resource, addr);
- if (ACPI_SUCCESS(status) &&
- (addr->resource_type == ACPI_MEMORY_RANGE ||
- addr->resource_type == ACPI_IO_RANGE) &&
- addr->address_length > 0) {
- return AE_OK;
- }
- break;
- }
- return AE_ERROR;
-}
-
-static acpi_status count_resource(struct acpi_resource *acpi_res, void *data)
+static void validate_resources(struct device *dev, struct list_head *crs_res,
+ unsigned long type)
{
- struct pci_root_info *info = data;
- struct acpi_resource_address64 addr;
- acpi_status status;
-
- status = resource_to_addr(acpi_res, &addr);
- if (ACPI_SUCCESS(status))
- info->res_num++;
- return AE_OK;
-}
-
-static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data)
-{
- struct pci_root_info *info = data;
- struct resource *res;
- struct acpi_resource_address64 addr;
- acpi_status status;
- unsigned long flags;
- u64 start, orig_end, end;
-
- status = resource_to_addr(acpi_res, &addr);
- if (!ACPI_SUCCESS(status))
- return AE_OK;
-
- if (addr.resource_type == ACPI_MEMORY_RANGE) {
- flags = IORESOURCE_MEM;
- if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
- flags |= IORESOURCE_PREFETCH;
- } else if (addr.resource_type == ACPI_IO_RANGE) {
- flags = IORESOURCE_IO;
- } else
- return AE_OK;
-
- start = addr.minimum + addr.translation_offset;
- orig_end = end = addr.maximum + addr.translation_offset;
-
- /* Exclude non-addressable range or non-addressable portion of range */
- end = min(end, (u64)iomem_resource.end);
- if (end <= start) {
- dev_info(&info->bridge->dev,
- "host bridge window [%#llx-%#llx] "
- "(ignored, not CPU addressable)\n", start, orig_end);
- return AE_OK;
- } else if (orig_end != end) {
- dev_info(&info->bridge->dev,
- "host bridge window [%#llx-%#llx] "
- "([%#llx-%#llx] ignored, not CPU addressable)\n",
- start, orig_end, end + 1, orig_end);
- }
+ LIST_HEAD(list);
+ struct resource *res1, *res2, *root = NULL;
+ struct resource_entry *tmp, *entry, *entry2;
- res = &info->res[info->res_num];
- res->name = info->name;
- res->flags = flags;
- res->start = start;
- res->end = end;
- info->res_offset[info->res_num] = addr.translation_offset;
- info->res_num++;
+ BUG_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0);
+ root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource;
- if (!pci_use_crs)
- dev_printk(KERN_DEBUG, &info->bridge->dev,
- "host bridge window %pR (ignored)\n", res);
+ list_splice_init(crs_res, &list);
+ resource_list_for_each_entry_safe(entry, tmp, &list) {
+ bool free = false;
+ resource_size_t end;
- return AE_OK;
-}
-
-static void coalesce_windows(struct pci_root_info *info, unsigned long type)
-{
- int i, j;
- struct resource *res1, *res2;
-
- for (i = 0; i < info->res_num; i++) {
- res1 = &info->res[i];
+ res1 = entry->res;
if (!(res1->flags & type))
- continue;
+ goto next;
+
+ /* Exclude non-addressable range or non-addressable portion */
+ end = min(res1->end, root->end);
+ if (end <= res1->start) {
+ dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n",
+ res1);
+ free = true;
+ goto next;
+ } else if (res1->end != end) {
+ dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n",
+ res1, (unsigned long long)end + 1,
+ (unsigned long long)res1->end);
+ res1->end = end;
+ }
- for (j = i + 1; j < info->res_num; j++) {
- res2 = &info->res[j];
+ resource_list_for_each_entry(entry2, crs_res) {
+ res2 = entry2->res;
if (!(res2->flags & type))
continue;
@@ -353,118 +261,92 @@ static void coalesce_windows(struct pci_root_info *info, unsigned long type)
if (resource_overlaps(res1, res2)) {
res2->start = min(res1->start, res2->start);
res2->end = max(res1->end, res2->end);
- dev_info(&info->bridge->dev,
- "host bridge window expanded to %pR; %pR ignored\n",
+ dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
res2, res1);
- res1->flags = 0;
+ free = true;
+ goto next;
}
}
+
+next:
+ resource_list_del(entry);
+ if (free)
+ resource_list_free_entry(entry);
+ else
+ resource_list_add_tail(entry, crs_res);
}
}
static void add_resources(struct pci_root_info *info,
- struct list_head *resources)
+ struct list_head *resources,
+ struct list_head *crs_res)
{
- int i;
- struct resource *res, *root, *conflict;
-
- coalesce_windows(info, IORESOURCE_MEM);
- coalesce_windows(info, IORESOURCE_IO);
+ struct resource_entry *entry, *tmp;
+ struct resource *res, *conflict, *root = NULL;
- for (i = 0; i < info->res_num; i++) {
- res = &info->res[i];
+ validate_resources(&info->bridge->dev, crs_res, IORESOURCE_MEM);
+ validate_resources(&info->bridge->dev, crs_res, IORESOURCE_IO);
+ resource_list_for_each_entry_safe(entry, tmp, crs_res) {
+ res = entry->res;
if (res->flags & IORESOURCE_MEM)
root = &iomem_resource;
else if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
- continue;
+ BUG_ON(res);
conflict = insert_resource_conflict(root, res);
- if (conflict)
+ if (conflict) {
dev_info(&info->bridge->dev,
"ignoring host bridge window %pR (conflicts with %s %pR)\n",
res, conflict->name, conflict);
- else
- pci_add_resource_offset(resources, res,
- info->res_offset[i]);
+ resource_list_destroy_entry(entry);
+ }
}
-}
-static void free_pci_root_info_res(struct pci_root_info *info)
-{
- kfree(info->res);
- info->res = NULL;
- kfree(info->res_offset);
- info->res_offset = NULL;
- info->res_num = 0;
+ list_splice_tail(crs_res, resources);
}
-static void __release_pci_root_info(struct pci_root_info *info)
+static void release_pci_root_info(struct pci_host_bridge *bridge)
{
- int i;
struct resource *res;
+ struct resource_entry *entry;
+ struct pci_root_info *info = bridge->release_data;
- for (i = 0; i < info->res_num; i++) {
- res = &info->res[i];
-
- if (!res->parent)
- continue;
-
- if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
- continue;
-
- release_resource(res);
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ res = entry->res;
+ if (res->parent &&
+ (res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
+ release_resource(res);
}
- free_pci_root_info_res(info);
-
teardown_mcfg_map(info);
-
kfree(info);
}
-static void release_pci_root_info(struct pci_host_bridge *bridge)
-{
- struct pci_root_info *info = bridge->release_data;
-
- __release_pci_root_info(info);
-}
-
static void probe_pci_root_info(struct pci_root_info *info,
struct acpi_device *device,
- int busnum, int domain)
+ int busnum, int domain,
+ struct list_head *list)
{
- size_t size;
+ int ret;
+ struct resource_entry *entry;
sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
info->bridge = device;
-
- info->res_num = 0;
- acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
- info);
- if (!info->res_num)
- return;
-
- size = sizeof(*info->res) * info->res_num;
- info->res = kzalloc_node(size, GFP_KERNEL, info->sd.node);
- if (!info->res) {
- info->res_num = 0;
- return;
- }
-
- size = sizeof(*info->res_offset) * info->res_num;
- info->res_num = 0;
- info->res_offset = kzalloc_node(size, GFP_KERNEL, info->sd.node);
- if (!info->res_offset) {
- kfree(info->res);
- info->res = NULL;
- return;
- }
-
- acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
- info);
+ ret = acpi_dev_get_resources(device, list,
+ acpi_dev_filter_resource_type_cb,
+ (void *)(IORESOURCE_IO | IORESOURCE_MEM));
+ if (ret < 0)
+ dev_warn(&device->dev,
+ "failed to parse _CRS method, error code %d\n", ret);
+ else if (ret == 0)
+ dev_dbg(&device->dev,
+ "no IO and memory resources present in _CRS\n");
+ else
+ resource_list_for_each_entry(entry, list)
+ entry->res->name = info->name;
}
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
@@ -473,6 +355,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
struct pci_root_info *info;
int domain = root->segment;
int busnum = root->secondary.start;
+ struct resource_entry *res_entry;
+ LIST_HEAD(crs_res);
LIST_HEAD(resources);
struct pci_bus *bus;
struct pci_sysdata *sd;
@@ -520,18 +404,22 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
memcpy(bus->sysdata, sd, sizeof(*sd));
kfree(info);
} else {
- probe_pci_root_info(info, device, busnum, domain);
-
/* insert busn res at first */
pci_add_resource(&resources, &root->secondary);
+
/*
* _CRS with no apertures is normal, so only fall back to
* defaults or native bridge info if we're ignoring _CRS.
*/
- if (pci_use_crs)
- add_resources(info, &resources);
- else {
- free_pci_root_info_res(info);
+ probe_pci_root_info(info, device, busnum, domain, &crs_res);
+ if (pci_use_crs) {
+ add_resources(info, &resources, &crs_res);
+ } else {
+ resource_list_for_each_entry(res_entry, &crs_res)
+ dev_printk(KERN_DEBUG, &device->dev,
+ "host bridge window %pR (ignored)\n",
+ res_entry->res);
+ resource_list_free(&crs_res);
x86_pci_root_bus_resources(busnum, &resources);
}
@@ -546,8 +434,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
to_pci_host_bridge(bus->bridge),
release_pci_root_info, info);
} else {
- pci_free_resource_list(&resources);
- __release_pci_root_info(info);
+ resource_list_free(&resources);
+ teardown_mcfg_map(info);
+ kfree(info);
}
}
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index f3a2cfc14125..7bcf06a7cd12 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
{
struct pci_root_info *info = x86_find_pci_root_info(bus);
struct pci_root_res *root_res;
- struct pci_host_bridge_window *window;
+ struct resource_entry *window;
bool found = false;
if (!info)
@@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
bus);
/* already added by acpi ? */
- list_for_each_entry(window, resources, list)
+ resource_list_for_each_entry(window, resources)
if (window->res->flags & IORESOURCE_BUS) {
found = true;
break;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7b20bccf3648..3d2612b68694 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
},
},
+ {
+ .callback = set_scan_all,
+ .ident = "Stratus/NEC ftServer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
+ },
+ },
+ {
+ .callback = set_scan_all,
+ .ident = "Stratus/NEC ftServer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
+ },
+ },
{}
};
@@ -497,6 +513,31 @@ void __init pcibios_set_cache_line_size(void)
}
}
+/*
+ * Some device drivers assume dev->irq won't change after calling
+ * pci_disable_device(). So delay releasing of IRQ resource to driver
+ * unbinding time. Otherwise it will break PM subsystem and drivers
+ * like xen-pciback etc.
+ */
+static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct pci_dev *dev = to_pci_dev(data);
+
+ if (action != BUS_NOTIFY_UNBOUND_DRIVER)
+ return NOTIFY_DONE;
+
+ if (pcibios_disable_irq)
+ pcibios_disable_irq(dev);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block pci_irq_nb = {
+ .notifier_call = pci_irq_notifier,
+ .priority = INT_MIN,
+};
+
int __init pcibios_init(void)
{
if (!raw_pci_ops) {
@@ -509,6 +550,9 @@ int __init pcibios_init(void)
if (pci_bf_sort >= pci_force_bf)
pci_sort_breadthfirst();
+
+ bus_register_notifier(&pci_bus_type, &pci_irq_nb);
+
return 0;
}
@@ -667,12 +711,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return 0;
}
-void pcibios_disable_device (struct pci_dev *dev)
-{
- if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
- pcibios_disable_irq(dev);
-}
-
int pci_ext_cfg_avail(void)
{
if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 9b18ef315a55..349c0d32cc0b 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -216,7 +216,7 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
continue;
if (r->parent) /* Already allocated */
continue;
- if (!r->start || pci_claim_resource(dev, idx) < 0) {
+ if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
/*
* Something is wrong with the region.
* Invalidate the resource to prevent
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 44b9271580b5..efb849323c74 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
- if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
- dev->irq > 0) {
+ if (dev->irq_managed && dev->irq > 0) {
mp_unmap_irq(dev->irq);
dev->irq_managed = 0;
+ dev->irq = 0;
}
}
@@ -293,7 +293,6 @@ static void mrst_power_off_unused_dev(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
/*
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 5dc6ca5e1741..e71b3dbd87b8 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1256,22 +1256,9 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
}
-bool mp_should_keep_irq(struct device *dev)
-{
- if (dev->power.is_prepared)
- return true;
-#ifdef CONFIG_PM
- if (dev->power.runtime_status == RPM_SUSPENDING)
- return true;
-#endif
-
- return false;
-}
-
static void pirq_disable_irq(struct pci_dev *dev)
{
- if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
- dev->irq_managed && dev->irq) {
+ if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
dev->irq = 0;
dev->irq_managed = 0;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 326198a4434e..dd30b7e08bc2 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -397,12 +397,12 @@ static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
status = acpi_resource_to_address64(res, &address);
if (ACPI_FAILURE(status) ||
- (address.address_length <= 0) ||
+ (address.address.address_length <= 0) ||
(address.resource_type != ACPI_MEMORY_RANGE))
return AE_OK;
- if ((mcfg_res->start >= address.minimum) &&
- (mcfg_res->end < (address.minimum + address.address_length))) {
+ if ((mcfg_res->start >= address.address.minimum) &&
+ (mcfg_res->end < (address.address.minimum + address.address.address_length))) {
mcfg_res->flags = 1;
return AE_CTRL_TERMINATE;
}
@@ -610,6 +610,32 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
return 0;
}
+#ifdef CONFIG_ACPI_APEI
+extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
+ void *data), void *data);
+
+static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
+ void *data), void *data)
+{
+ struct pci_mmcfg_region *cfg;
+ int rc;
+
+ if (list_empty(&pci_mmcfg_list))
+ return 0;
+
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ rc = func(cfg->res.start, resource_size(&cfg->res), data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+#define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
+#else
+#define set_apei_filter()
+#endif
+
static void __init __pci_mmcfg_init(int early)
{
pci_mmcfg_reject_broken(early);
@@ -644,6 +670,8 @@ void __init pci_mmcfg_early_init(void)
else
acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
__pci_mmcfg_init(1);
+
+ set_apei_filter();
}
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index c489ef2c1a39..d22f4b5bbc04 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -298,12 +298,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
map_irq.entry_nr = nvec;
} else if (type == PCI_CAP_ID_MSIX) {
int pos;
+ unsigned long flags;
u32 table_offset, bir;
pos = dev->msix_cap;
pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
&table_offset);
bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
+ flags = pci_resource_flags(dev, bir);
+ if (!flags || (flags & IORESOURCE_UNSET))
+ return -EINVAL;
map_irq.table_base = pci_resource_start(dev, bir);
map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
@@ -458,6 +462,7 @@ int __init pci_xen_hvm_init(void)
* just how GSIs get registered.
*/
__acpi_register_gsi = acpi_register_gsi_xen_hvm;
+ __acpi_unregister_gsi = NULL;
#endif
#ifdef CONFIG_PCI_MSI
@@ -471,52 +476,6 @@ int __init pci_xen_hvm_init(void)
}
#ifdef CONFIG_XEN_DOM0
-static __init void xen_setup_acpi_sci(void)
-{
- int rc;
- int trigger, polarity;
- int gsi = acpi_sci_override_gsi;
- int irq = -1;
- int gsi_override = -1;
-
- if (!gsi)
- return;
-
- rc = acpi_get_override_irq(gsi, &trigger, &polarity);
- if (rc) {
- printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
- " sci, rc=%d\n", rc);
- return;
- }
- trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
- polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-
- printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
- "polarity=%d\n", gsi, trigger, polarity);
-
- /* Before we bind the GSI to a Linux IRQ, check whether
- * we need to override it with bus_irq (IRQ) value. Usually for
- * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
- * but there are oddballs where the IRQ != GSI:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
- * which ends up being: gsi_to_irq[9] == 20
- * (which is what acpi_gsi_to_irq ends up calling when starting the
- * the ACPI interpreter and keels over since IRQ 9 has not been
- * setup as we had setup IRQ 20 for it).
- */
- if (acpi_gsi_to_irq(gsi, &irq) == 0) {
- /* Use the provided value if it's valid. */
- if (irq >= 0)
- gsi_override = irq;
- }
-
- gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
- printk(KERN_INFO "xen: acpi sci %d\n", gsi);
-
- return;
-}
-
int __init pci_xen_initial_domain(void)
{
int irq;
@@ -527,8 +486,8 @@ int __init pci_xen_initial_domain(void)
x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
pci_msi_ignore_mask = 1;
#endif
- xen_setup_acpi_sci();
__acpi_register_gsi = acpi_register_gsi_xen;
+ __acpi_unregister_gsi = NULL;
/* Pre-allocate legacy irqs */
for (irq = 0; irq < nr_legacy_irqs(); irq++) {
int trigger, polarity;
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index af9307f2cc28..91ec9f8704bf 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -16,8 +16,6 @@ obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o
obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o
-# SPI Devices
-obj-$(subst m,y,$(CONFIG_SERIAL_MRST_MAX3110)) += platform_max3111.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max3111.c b/arch/x86/platform/intel-mid/device_libs/platform_max3111.c
deleted file mode 100644
index afd1df94e0e5..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_max3111.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * platform_max3111.c: max3111 platform data initilization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <asm/intel-mid.h>
-
-static void __init *max3111_platform_data(void *info)
-{
- struct spi_board_info *spi_info = info;
- int intr = get_gpio_by_name("max3111_int");
-
- spi_info->mode = SPI_MODE_0;
- if (intr == -1)
- return NULL;
- spi_info->irq = intr + INTEL_MID_IRQ_OFFSET;
- return NULL;
-}
-
-static const struct devs_id max3111_dev_id __initconst = {
- .name = "spi_max3111",
- .type = SFI_DEV_TYPE_SPI,
- .get_platform_data = &max3111_platform_data,
-};
-
-sfi_device(max3111_dev_id);
diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
index e0bd082a80e0..4e720829ab90 100644
--- a/arch/x86/platform/intel-mid/early_printk_intel_mid.c
+++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
@@ -10,15 +10,13 @@
*/
/*
- * This file implements two early consoles named mrst and hsu.
- * mrst is based on Maxim3110 spi-uart device, it exists in both
- * Moorestown and Medfield platforms, while hsu is based on a High
- * Speed UART device which only exists in the Medfield platform
+ * This file implements early console named hsu.
+ * hsu is based on a High Speed UART device which only exists in the Medfield
+ * platform
*/
#include <linux/serial_reg.h>
#include <linux/serial_mfd.h>
-#include <linux/kmsg_dump.h>
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -28,216 +26,6 @@
#include <asm/pgtable.h>
#include <asm/intel-mid.h>
-#define MRST_SPI_TIMEOUT 0x200000
-#define MRST_REGBASE_SPI0 0xff128000
-#define MRST_REGBASE_SPI1 0xff128400
-#define MRST_CLK_SPI0_REG 0xff11d86c
-
-/* Bit fields in CTRLR0 */
-#define SPI_DFS_OFFSET 0
-
-#define SPI_FRF_OFFSET 4
-#define SPI_FRF_SPI 0x0
-#define SPI_FRF_SSP 0x1
-#define SPI_FRF_MICROWIRE 0x2
-#define SPI_FRF_RESV 0x3
-
-#define SPI_MODE_OFFSET 6
-#define SPI_SCPH_OFFSET 6
-#define SPI_SCOL_OFFSET 7
-#define SPI_TMOD_OFFSET 8
-#define SPI_TMOD_TR 0x0 /* xmit & recv */
-#define SPI_TMOD_TO 0x1 /* xmit only */
-#define SPI_TMOD_RO 0x2 /* recv only */
-#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
-
-#define SPI_SLVOE_OFFSET 10
-#define SPI_SRL_OFFSET 11
-#define SPI_CFS_OFFSET 12
-
-/* Bit fields in SR, 7 bits */
-#define SR_MASK 0x7f /* cover 7 bits */
-#define SR_BUSY (1 << 0)
-#define SR_TF_NOT_FULL (1 << 1)
-#define SR_TF_EMPT (1 << 2)
-#define SR_RF_NOT_EMPT (1 << 3)
-#define SR_RF_FULL (1 << 4)
-#define SR_TX_ERR (1 << 5)
-#define SR_DCOL (1 << 6)
-
-struct dw_spi_reg {
- u32 ctrl0;
- u32 ctrl1;
- u32 ssienr;
- u32 mwcr;
- u32 ser;
- u32 baudr;
- u32 txfltr;
- u32 rxfltr;
- u32 txflr;
- u32 rxflr;
- u32 sr;
- u32 imr;
- u32 isr;
- u32 risr;
- u32 txoicr;
- u32 rxoicr;
- u32 rxuicr;
- u32 msticr;
- u32 icr;
- u32 dmacr;
- u32 dmatdlr;
- u32 dmardlr;
- u32 idr;
- u32 version;
-
- /* Currently operates as 32 bits, though only the low 16 bits matter */
- u32 dr;
-} __packed;
-
-#define dw_readl(dw, name) __raw_readl(&(dw)->name)
-#define dw_writel(dw, name, val) __raw_writel((val), &(dw)->name)
-
-/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */
-static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
-
-static u32 *pclk_spi0;
-/* Always contains an accessible address, start with 0 */
-static struct dw_spi_reg *pspi;
-
-static struct kmsg_dumper dw_dumper;
-static int dumper_registered;
-
-static void dw_kmsg_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
-{
- static char line[1024];
- size_t len;
-
- /* When run to this, we'd better re-init the HW */
- mrst_early_console_init();
-
- while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
- early_mrst_console.write(&early_mrst_console, line, len);
-}
-
-/* Set the ratio rate to 115200, 8n1, IRQ disabled */
-static void max3110_write_config(void)
-{
- u16 config;
-
- config = 0xc001;
- dw_writel(pspi, dr, config);
-}
-
-/* Translate char to a eligible word and send to max3110 */
-static void max3110_write_data(char c)
-{
- u16 data;
-
- data = 0x8000 | c;
- dw_writel(pspi, dr, data);
-}
-
-void mrst_early_console_init(void)
-{
- u32 ctrlr0 = 0;
- u32 spi0_cdiv;
- u32 freq; /* Freqency info only need be searched once */
-
- /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */
- pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
- MRST_CLK_SPI0_REG);
- spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
- freq = 100000000 / (spi0_cdiv + 1);
-
- if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL)
- mrst_spi_paddr = MRST_REGBASE_SPI1;
-
- pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
- mrst_spi_paddr);
-
- /* Disable SPI controller */
- dw_writel(pspi, ssienr, 0);
-
- /* Set control param, 8 bits, transmit only mode */
- ctrlr0 = dw_readl(pspi, ctrl0);
-
- ctrlr0 &= 0xfcc0;
- ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
- | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
- dw_writel(pspi, ctrl0, ctrlr0);
-
- /*
- * Change the spi0 clk to comply with 115200 bps, use 100000 to
- * calculate the clk dividor to make the clock a little slower
- * than real baud rate.
- */
- dw_writel(pspi, baudr, freq/100000);
-
- /* Disable all INT for early phase */
- dw_writel(pspi, imr, 0x0);
-
- /* Set the cs to spi-uart */
- dw_writel(pspi, ser, 0x2);
-
- /* Enable the HW, the last step for HW init */
- dw_writel(pspi, ssienr, 0x1);
-
- /* Set the default configuration */
- max3110_write_config();
-
- /* Register the kmsg dumper */
- if (!dumper_registered) {
- dw_dumper.dump = dw_kmsg_dump;
- kmsg_dump_register(&dw_dumper);
- dumper_registered = 1;
- }
-}
-
-/* Slave select should be called in the read/write function */
-static void early_mrst_spi_putc(char c)
-{
- unsigned int timeout;
- u32 sr;
-
- timeout = MRST_SPI_TIMEOUT;
- /* Early putc needs to make sure the TX FIFO is not full */
- while (--timeout) {
- sr = dw_readl(pspi, sr);
- if (!(sr & SR_TF_NOT_FULL))
- cpu_relax();
- else
- break;
- }
-
- if (!timeout)
- pr_warn("MRST earlycon: timed out\n");
- else
- max3110_write_data(c);
-}
-
-/* Early SPI only uses polling mode */
-static void early_mrst_spi_write(struct console *con, const char *str,
- unsigned n)
-{
- int i;
-
- for (i = 0; i < n && *str; i++) {
- if (*str == '\n')
- early_mrst_spi_putc('\r');
- early_mrst_spi_putc(*str);
- str++;
- }
-}
-
-struct console early_mrst_console = {
- .name = "earlymrst",
- .write = early_mrst_spi_write,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
/*
* Following is the early console based on Medfield HSU (High
* Speed UART) device.
@@ -259,7 +47,7 @@ void hsu_early_console_init(const char *s)
port = clamp_val(port, 0, 2);
paddr = HSU_PORT_BASE + port * 0x80;
- phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
+ phsu = (void __iomem *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
/* Disable FIFO */
writeb(0x0, phsu + UART_FCR);
diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
index 4762cff7facd..32947ba0f62d 100644
--- a/arch/x86/platform/intel-mid/intel_mid_vrtc.c
+++ b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
@@ -110,7 +110,7 @@ int vrtc_set_mmss(const struct timespec *now)
spin_unlock_irqrestore(&rtc_lock, flags);
} else {
pr_err("%s: Invalid vRTC value: write of %lx to vRTC failed\n",
- __FUNCTION__, now->tv_sec);
+ __func__, now->tv_sec);
retval = -EINVAL;
}
return retval;
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index c6b146e67116..7488cafab955 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -273,20 +273,6 @@ static inline void uv_clear_nmi(int cpu)
}
}
-/* Print non-responding cpus */
-static void uv_nmi_nr_cpus_pr(char *fmt)
-{
- static char cpu_list[1024];
- int len = sizeof(cpu_list);
- int c = cpumask_weight(uv_nmi_cpu_mask);
- int n = cpulist_scnprintf(cpu_list, len, uv_nmi_cpu_mask);
-
- if (n >= len-1)
- strcpy(&cpu_list[len - 6], "...\n");
-
- printk(fmt, c, cpu_list);
-}
-
/* Ping non-responding cpus attemping to force them into the NMI handler */
static void uv_nmi_nr_cpus_ping(void)
{
@@ -371,16 +357,19 @@ static void uv_nmi_wait(int master)
break;
/* if not all made it in, send IPI NMI to them */
- uv_nmi_nr_cpus_pr(KERN_ALERT
- "UV: Sending NMI IPI to %d non-responding CPUs: %s\n");
+ pr_alert("UV: Sending NMI IPI to %d non-responding CPUs: %*pbl\n",
+ cpumask_weight(uv_nmi_cpu_mask),
+ cpumask_pr_args(uv_nmi_cpu_mask));
+
uv_nmi_nr_cpus_ping();
/* if all cpus are in, then done */
if (!uv_nmi_wait_cpus(0))
break;
- uv_nmi_nr_cpus_pr(KERN_ALERT
- "UV: %d CPUs not in NMI loop: %s\n");
+ pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
+ cpumask_weight(uv_nmi_cpu_mask),
+ cpumask_pr_args(uv_nmi_cpu_mask));
} while (0);
pr_alert("UV: %d of %d CPUs in NMI\n",
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 6ec7910f59bf..3e32ed5648a0 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -105,11 +105,8 @@ static void __save_processor_state(struct saved_context *ctxt)
ctxt->cr0 = read_cr0();
ctxt->cr2 = read_cr2();
ctxt->cr3 = read_cr3();
-#ifdef CONFIG_X86_32
- ctxt->cr4 = read_cr4_safe();
-#else
-/* CONFIG_X86_64 */
- ctxt->cr4 = read_cr4();
+ ctxt->cr4 = __read_cr4_safe();
+#ifdef CONFIG_X86_64
ctxt->cr8 = read_cr8();
#endif
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
@@ -175,12 +172,12 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
/* cr4 was introduced in the Pentium CPU */
#ifdef CONFIG_X86_32
if (ctxt->cr4)
- write_cr4(ctxt->cr4);
+ __write_cr4(ctxt->cr4);
#else
/* CONFIG X86_64 */
wrmsrl(MSR_EFER, ctxt->efer);
write_cr8(ctxt->cr8);
- write_cr4(ctxt->cr4);
+ __write_cr4(ctxt->cr4);
#endif
write_cr3(ctxt->cr3);
write_cr2(ctxt->cr2);
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 94f7fbe97b08..e02c2c6c56a5 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -6,7 +6,7 @@
# for more details.
#
#
-
+KASAN_SANITIZE := n
subdir- := rm
obj-y += init.o
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index bad628a620c4..0b7a63d98440 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -81,7 +81,7 @@ void __init setup_real_mode(void)
trampoline_header->start = (u64) secondary_startup_64;
trampoline_cr4_features = &trampoline_header->cr4;
- *trampoline_cr4_features = read_cr4();
+ *trampoline_cr4_features = __read_cr4();
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 7c0d7be176a5..2730d775ef9a 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -6,6 +6,7 @@
# for more details.
#
#
+KASAN_SANITIZE := n
always := realmode.bin realmode.relocs
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
deleted file mode 100644
index 23210baade2d..000000000000
--- a/arch/x86/tools/calc_run_size.pl
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/perl
-#
-# Calculate the amount of space needed to run the kernel, including room for
-# the .bss and .brk sections.
-#
-# Usage:
-# objdump -h a.out | perl calc_run_size.pl
-use strict;
-
-my $mem_size = 0;
-my $file_offset = 0;
-
-my $sections=" *[0-9]+ \.(?:bss|brk) +";
-while (<>) {
- if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
- my $size = hex($1);
- my $offset = hex($2);
- $mem_size += $size;
- if ($file_offset == 0) {
- $file_offset = $offset;
- } elsif ($file_offset != $offset) {
- # BFD linker shows the same file offset in ELF.
- # Gold linker shows them as consecutive.
- next if ($file_offset + $mem_size == $offset + $size);
-
- printf STDERR "file_offset: 0x%lx\n", $file_offset;
- printf STDERR "mem_size: 0x%lx\n", $mem_size;
- printf STDERR "offset: 0x%lx\n", $offset;
- printf STDERR "size: 0x%lx\n", $size;
-
- die ".bss and .brk are non-contiguous\n";
- }
- }
-}
-
-if ($file_offset == 0) {
- die "Never found .bss or .brk file offset\n";
-}
-printf("%d\n", $mem_size + $file_offset);
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
new file mode 100644
index 000000000000..1a4c17bb3910
--- /dev/null
+++ b/arch/x86/tools/calc_run_size.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Calculate the amount of space needed to run the kernel, including room for
+# the .bss and .brk sections.
+#
+# Usage:
+# objdump -h a.out | sh calc_run_size.sh
+
+NUM='\([0-9a-fA-F]*[ \t]*\)'
+OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
+if [ -z "$OUT" ] ; then
+ echo "Never found .bss or .brk file offset" >&2
+ exit 1
+fi
+
+OUT=$(echo ${OUT# })
+sizeA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+sizeB=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetB=$(printf "%d" 0x${OUT%% *})
+
+run_size=$(( $offsetA + $sizeA + $sizeB ))
+
+# BFD linker shows the same file offset in ELF.
+if [ "$offsetA" -ne "$offsetB" ] ; then
+ # Gold linker shows them as consecutive.
+ endB=$(( $offsetB + $sizeB ))
+ if [ "$endB" != "$run_size" ] ; then
+ printf "sizeA: 0x%x\n" $sizeA >&2
+ printf "offsetA: 0x%x\n" $offsetA >&2
+ printf "sizeB: 0x%x\n" $sizeB >&2
+ printf "offsetB: 0x%x\n" $offsetB >&2
+ echo ".bss and .brk are non-contiguous" >&2
+ exit 1
+ fi
+fi
+
+printf "%d\n" $run_size
+exit 0
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 79d824551c1a..0c8c32bfd792 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -157,7 +157,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
int err, pid;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
err = copy_from_user(&sc, from, sizeof(sc));
if (err)
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 5a4affe025e8..7b9be9822724 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -3,6 +3,7 @@
#
KBUILD_CFLAGS += $(DISABLE_LTO)
+KASAN_SANITIZE := n
VDSO64-$(CONFIG_X86_64) := y
VDSOX32-$(CONFIG_X86_X32_ABI) := y
@@ -205,4 +206,4 @@ $(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
PHONY += vdso_install $(vdso_img_insttargets)
vdso_install: $(vdso_img_insttargets) FORCE
-clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80*
+clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64*
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 6bf3a13e3e0f..bd8b8459c3d0 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -40,6 +40,7 @@
#include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
#include <xen/interface/xen-mca.h>
#include <xen/features.h>
#include <xen/page.h>
@@ -66,6 +67,7 @@
#include <asm/reboot.h>
#include <asm/stackprotector.h>
#include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart,
};
+static unsigned char xen_get_nmi_reason(void)
+{
+ unsigned char reason = 0;
+
+ /* Construct a value which looks like it came from port 0x61. */
+ if (test_bit(_XEN_NMIREASON_io_error,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_IOCHK;
+ if (test_bit(_XEN_NMIREASON_pci_serr,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_SERR;
+
+ return reason;
+}
+
static void __init xen_boot_params_init_edd(void)
{
#if IS_ENABLED(CONFIG_EDD)
@@ -1477,10 +1494,10 @@ static void xen_pvh_set_cr_flags(int cpu)
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
*/
if (cpu_has_pse)
- set_in_cr4(X86_CR4_PSE);
+ cr4_set_bits_and_update_boot(X86_CR4_PSE);
if (cpu_has_pge)
- set_in_cr4(X86_CR4_PGE);
+ cr4_set_bits_and_update_boot(X86_CR4_PGE);
}
/*
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
pv_info = xen_info;
pv_init_ops = xen_init_ops;
pv_apic_ops = xen_apic_ops;
- if (!xen_pvh_domain())
+ if (!xen_pvh_domain()) {
pv_cpu_ops = xen_cpu_ops;
+ x86_platform.get_nmi_reason = xen_get_nmi_reason;
+ }
+
if (xen_feature(XENFEAT_auto_translated_physmap))
x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
else
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5c1f9ace7ae7..adca9e2b6553 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1489,7 +1489,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
native_set_pte(ptep, pte);
}
-static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct mmuext_op op;
op.cmd = cmd;
@@ -1657,7 +1657,7 @@ void __init xen_reserve_top(void)
* Like __va(), but returns address in the kernel mapping (which is
* all we have until the physical memory mapping has been set up.
*/
-static void *__ka(phys_addr_t paddr)
+static void * __init __ka(phys_addr_t paddr)
{
#ifdef CONFIG_X86_64
return (void *)(paddr + __START_KERNEL_map);
@@ -1667,7 +1667,7 @@ static void *__ka(phys_addr_t paddr)
}
/* Convert a machine address to physical address */
-static unsigned long m2p(phys_addr_t maddr)
+static unsigned long __init m2p(phys_addr_t maddr)
{
phys_addr_t paddr;
@@ -1678,13 +1678,14 @@ static unsigned long m2p(phys_addr_t maddr)
}
/* Convert a machine address to kernel virtual */
-static void *m2v(phys_addr_t maddr)
+static void * __init m2v(phys_addr_t maddr)
{
return __ka(m2p(maddr));
}
/* Set the page permissions on an identity-mapped pages */
-static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
+static void __init set_page_prot_flags(void *addr, pgprot_t prot,
+ unsigned long flags)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
@@ -1696,7 +1697,7 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG();
}
-static void set_page_prot(void *addr, pgprot_t prot)
+static void __init set_page_prot(void *addr, pgprot_t prot)
{
return set_page_prot_flags(addr, prot, UVMF_NONE);
}
@@ -1733,10 +1734,8 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte;
-#ifdef CONFIG_X86_32
if (pfn > max_pfn_mapped)
max_pfn_mapped = pfn;
-#endif
if (!pte_none(pte_page[pteidx]))
continue;
@@ -1769,7 +1768,7 @@ void __init xen_setup_machphys_mapping(void)
}
#ifdef CONFIG_X86_64
-static void convert_pfn_mfn(void *v)
+static void __init convert_pfn_mfn(void *v)
{
pte_t *pte = v;
int i;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index edbc7a63fd73..740ae3026a14 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -84,8 +84,6 @@
#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE)
-static void __init m2p_override_init(void);
-
unsigned long *xen_p2m_addr __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_addr);
unsigned long xen_p2m_size __read_mostly;
@@ -167,10 +165,13 @@ static void * __ref alloc_p2m_page(void)
return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
}
-/* Only to be called in case of a race for a page just allocated! */
-static void free_p2m_page(void *p)
+static void __ref free_p2m_page(void *p)
{
- BUG_ON(!slab_is_available());
+ if (unlikely(!slab_is_available())) {
+ free_bootmem((unsigned long)p, PAGE_SIZE);
+ return;
+ }
+
free_page((unsigned long)p);
}
@@ -375,7 +376,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
p2m_missing_pte : p2m_identity_pte;
for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
pmdp = populate_extra_pmd(
- (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+ (unsigned long)(p2m + pfn) + i * PMD_SIZE);
set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
}
}
@@ -399,8 +400,6 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_size = xen_max_p2m_pfn;
xen_inv_extra_mem();
-
- m2p_override_init();
}
unsigned long get_phys_to_machine(unsigned long pfn)
@@ -436,10 +435,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
* a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
* pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
*/
-static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{
pte_t *ptechk;
- pte_t *pteret = ptep;
pte_t *pte_newpg[PMDS_PER_MID_PAGE];
pmd_t *pmdp;
unsigned int level;
@@ -473,8 +471,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
if (ptechk == pte_pg) {
set_pmd(pmdp,
__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
- if (vaddr == (addr & ~(PMD_SIZE - 1)))
- pteret = pte_offset_kernel(pmdp, addr);
pte_newpg[i] = NULL;
}
@@ -488,7 +484,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
vaddr += PMD_SIZE;
}
- return pteret;
+ return lookup_address(addr, &level);
}
/*
@@ -517,7 +513,7 @@ static bool alloc_p2m(unsigned long pfn)
if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
/* PMD level is missing, allocate a new one */
- ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+ ptep = alloc_p2m_pmd(addr, pte_pg);
if (!ptep)
return false;
}
@@ -554,7 +550,7 @@ static bool alloc_p2m(unsigned long pfn)
mid_mfn = NULL;
}
- p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep));
+ p2m_pfn = pte_pfn(READ_ONCE(*ptep));
if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
/* p2m leaf page is missing */
@@ -652,100 +648,21 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return true;
}
-#define M2P_OVERRIDE_HASH_SHIFT 10
-#define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT)
-
-static struct list_head *m2p_overrides;
-static DEFINE_SPINLOCK(m2p_override_lock);
-
-static void __init m2p_override_init(void)
-{
- unsigned i;
-
- m2p_overrides = alloc_bootmem_align(
- sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
- sizeof(unsigned long));
-
- for (i = 0; i < M2P_OVERRIDE_HASH; i++)
- INIT_LIST_HEAD(&m2p_overrides[i]);
-}
-
-static unsigned long mfn_hash(unsigned long mfn)
-{
- return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
-}
-
-/* Add an MFN override for a particular page */
-static int m2p_add_override(unsigned long mfn, struct page *page,
- struct gnttab_map_grant_ref *kmap_op)
-{
- unsigned long flags;
- unsigned long pfn;
- unsigned long uninitialized_var(address);
- unsigned level;
- pte_t *ptep = NULL;
-
- pfn = page_to_pfn(page);
- if (!PageHighMem(page)) {
- address = (unsigned long)__va(pfn << PAGE_SHIFT);
- ptep = lookup_address(address, &level);
- if (WARN(ptep == NULL || level != PG_LEVEL_4K,
- "m2p_add_override: pfn %lx not mapped", pfn))
- return -EINVAL;
- }
-
- if (kmap_op != NULL) {
- if (!PageHighMem(page)) {
- struct multicall_space mcs =
- xen_mc_entry(sizeof(*kmap_op));
-
- MULTI_grant_table_op(mcs.mc,
- GNTTABOP_map_grant_ref, kmap_op, 1);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- }
- }
- spin_lock_irqsave(&m2p_override_lock, flags);
- list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
- spin_unlock_irqrestore(&m2p_override_lock, flags);
-
- /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
- * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
- * pfn so that the following mfn_to_pfn(mfn) calls will return the
- * pfn from the m2p_override (the backend pfn) instead.
- * We need to do this because the pages shared by the frontend
- * (xen-blkfront) can be already locked (lock_page, called by
- * do_read_cache_page); when the userspace backend tries to use them
- * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
- * do_blockdev_direct_IO is going to try to lock the same pages
- * again resulting in a deadlock.
- * As a side effect get_user_pages_fast might not be safe on the
- * frontend pages while they are being shared with the backend,
- * because mfn_to_pfn (that ends up being called by GUPF) will
- * return the backend pfn rather than the frontend pfn. */
- pfn = mfn_to_pfn_no_overrides(mfn);
- if (__pfn_to_mfn(pfn) == mfn)
- set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
-
- return 0;
-}
-
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
- bool lazy = false;
pte_t *pte;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
- if (kmap_ops &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
- arch_enter_lazy_mmu_mode();
- lazy = true;
+ if (kmap_ops) {
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+ kmap_ops, count);
+ if (ret)
+ goto out;
}
for (i = 0; i < count; i++) {
@@ -764,170 +681,28 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
}
pfn = page_to_pfn(pages[i]);
- WARN_ON(PagePrivate(pages[i]));
- SetPagePrivate(pages[i]);
- set_page_private(pages[i], mfn);
- pages[i]->index = pfn_to_mfn(pfn);
+ WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
ret = -ENOMEM;
goto out;
}
-
- if (kmap_ops) {
- ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
- if (ret)
- goto out;
- }
}
out:
- if (lazy)
- arch_leave_lazy_mmu_mode();
-
return ret;
}
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
-static struct page *m2p_find_override(unsigned long mfn)
-{
- unsigned long flags;
- struct list_head *bucket;
- struct page *p, *ret;
-
- if (unlikely(!m2p_overrides))
- return NULL;
-
- ret = NULL;
- bucket = &m2p_overrides[mfn_hash(mfn)];
-
- spin_lock_irqsave(&m2p_override_lock, flags);
-
- list_for_each_entry(p, bucket, lru) {
- if (page_private(p) == mfn) {
- ret = p;
- break;
- }
- }
-
- spin_unlock_irqrestore(&m2p_override_lock, flags);
-
- return ret;
-}
-
-static int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn)
-{
- unsigned long flags;
- unsigned long pfn;
- unsigned long uninitialized_var(address);
- unsigned level;
- pte_t *ptep = NULL;
-
- pfn = page_to_pfn(page);
-
- if (!PageHighMem(page)) {
- address = (unsigned long)__va(pfn << PAGE_SHIFT);
- ptep = lookup_address(address, &level);
-
- if (WARN(ptep == NULL || level != PG_LEVEL_4K,
- "m2p_remove_override: pfn %lx not mapped", pfn))
- return -EINVAL;
- }
-
- spin_lock_irqsave(&m2p_override_lock, flags);
- list_del(&page->lru);
- spin_unlock_irqrestore(&m2p_override_lock, flags);
-
- if (kmap_op != NULL) {
- if (!PageHighMem(page)) {
- struct multicall_space mcs;
- struct gnttab_unmap_and_replace *unmap_op;
- struct page *scratch_page = get_balloon_scratch_page();
- unsigned long scratch_page_address = (unsigned long)
- __va(page_to_pfn(scratch_page) << PAGE_SHIFT);
-
- /*
- * It might be that we queued all the m2p grant table
- * hypercalls in a multicall, then m2p_remove_override
- * get called before the multicall has actually been
- * issued. In this case handle is going to -1 because
- * it hasn't been modified yet.
- */
- if (kmap_op->handle == -1)
- xen_mc_flush();
- /*
- * Now if kmap_op->handle is negative it means that the
- * hypercall actually returned an error.
- */
- if (kmap_op->handle == GNTST_general_error) {
- pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
- pfn, mfn);
- put_balloon_scratch_page();
- return -1;
- }
-
- xen_mc_batch();
-
- mcs = __xen_mc_entry(
- sizeof(struct gnttab_unmap_and_replace));
- unmap_op = mcs.args;
- unmap_op->host_addr = kmap_op->host_addr;
- unmap_op->new_addr = scratch_page_address;
- unmap_op->handle = kmap_op->handle;
-
- MULTI_grant_table_op(mcs.mc,
- GNTTABOP_unmap_and_replace, unmap_op, 1);
-
- mcs = __xen_mc_entry(0);
- MULTI_update_va_mapping(mcs.mc, scratch_page_address,
- pfn_pte(page_to_pfn(scratch_page),
- PAGE_KERNEL_RO), 0);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
- kmap_op->host_addr = 0;
- put_balloon_scratch_page();
- }
- }
-
- /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
- * somewhere in this domain, even before being added to the
- * m2p_override (see comment above in m2p_add_override).
- * If there are no other entries in the m2p_override corresponding
- * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
- * the original pfn (the one shared by the frontend): the backend
- * cannot do any IO on this page anymore because it has been
- * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
- * the original pfn causes mfn_to_pfn(mfn) to return the frontend
- * pfn again. */
- mfn &= ~FOREIGN_FRAME_BIT;
- pfn = mfn_to_pfn_no_overrides(mfn);
- if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
- m2p_find_override(mfn) == NULL)
- set_phys_to_machine(pfn, mfn);
-
- return 0;
-}
-
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
- bool lazy = false;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
- if (kmap_ops &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
- arch_enter_lazy_mmu_mode();
- lazy = true;
- }
-
for (i = 0; i < count; i++) {
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]);
@@ -937,36 +712,16 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
goto out;
}
- set_page_private(pages[i], INVALID_P2M_ENTRY);
- WARN_ON(!PagePrivate(pages[i]));
- ClearPagePrivate(pages[i]);
- set_phys_to_machine(pfn, pages[i]->index);
-
- if (kmap_ops)
- ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
- if (ret)
- goto out;
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
-
+ if (kunmap_ops)
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+ kunmap_ops, count);
out:
- if (lazy)
- arch_leave_lazy_mmu_mode();
return ret;
}
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
-unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
-{
- struct page *p = m2p_find_override(mfn);
- unsigned long ret = pfn;
-
- if (p)
- ret = page_to_pfn(p);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
-
#ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h>
#include "debugfs.h"
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index dfd77dec8e2b..55f388ef481a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -32,16 +32,6 @@
#include "p2m.h"
#include "mmu.h"
-/* These are code, but not functions. Defined in entry.S */
-extern const char xen_hypervisor_callback[];
-extern const char xen_failsafe_callback[];
-#ifdef CONFIG_X86_64
-extern asmlinkage void nmi(void);
-#endif
-extern void xen_sysenter_target(void);
-extern void xen_syscall_target(void);
-extern void xen_syscall32_target(void);
-
/* Amount of extra memory space we add to the e820 ranges */
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
@@ -74,7 +64,7 @@ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
*/
#define EXTRA_MEM_RATIO (10)
-static void __init xen_add_extra_mem(u64 start, u64 size)
+static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
{
int i;
@@ -97,10 +87,10 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
memblock_reserve(start, size);
}
-static void __init xen_del_extra_mem(u64 start, u64 size)
+static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
{
int i;
- u64 start_r, size_r;
+ phys_addr_t start_r, size_r;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
start_r = xen_extra_mem[i].start;
@@ -140,7 +130,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{
int i;
- unsigned long addr = PFN_PHYS(pfn);
+ phys_addr_t addr = PFN_PHYS(pfn);
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +150,8 @@ void __init xen_inv_extra_mem(void)
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ if (!xen_extra_mem[i].size)
+ continue;
pfn_s = PFN_DOWN(xen_extra_mem[i].start);
pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +221,14 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
- unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
- unsigned long *released)
+ unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
{
- unsigned long len = 0;
unsigned long pfn, end;
int ret;
WARN_ON(start_pfn > end_pfn);
+ /* Release pages first. */
end = min(end_pfn, nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +241,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
+ (*released)++;
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
break;
- len++;
} else
break;
}
- /* Need to release pages first */
- *released += len;
- *identity += set_phys_range_identity(start_pfn, end_pfn);
+ set_phys_range_identity(start_pfn, end_pfn);
}
/*
@@ -268,7 +257,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
{
struct mmu_update update = {
- .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
+ .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
.val = pfn
};
@@ -287,7 +276,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
}
/* Update kernel mapping, but not for highmem. */
- if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+ if (pfn >= PFN_UP(__pa(high_memory - 1)))
return;
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +307,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
unsigned long ident_pfn_iter, remap_pfn_iter;
unsigned long ident_end_pfn = start_pfn + size;
unsigned long left = size;
- unsigned long ident_cnt = 0;
unsigned int i, chunk;
WARN_ON(size == 0);
@@ -347,8 +335,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
xen_remap_mfn = mfn;
/* Set identity map */
- ident_cnt += set_phys_range_identity(ident_pfn_iter,
- ident_pfn_iter + chunk);
+ set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
left -= chunk;
}
@@ -371,7 +358,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
static unsigned long __init xen_set_identity_and_remap_chunk(
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
- unsigned long *identity, unsigned long *released)
+ unsigned long *released, unsigned long *remapped)
{
unsigned long pfn;
unsigned long i = 0;
@@ -386,8 +373,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) {
/* Identity map remaining pages */
- *identity += set_phys_range_identity(cur_pfn,
- cur_pfn + size);
+ set_phys_range_identity(cur_pfn, cur_pfn + size);
break;
}
if (cur_pfn + size > nr_pages)
@@ -398,7 +384,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
if (!remap_range_size) {
pr_warning("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
- cur_pfn + left, nr_pages, identity, released);
+ cur_pfn + left, nr_pages, released);
break;
}
/* Adjust size to fit in current e820 RAM region */
@@ -410,7 +396,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
- *identity += size;
+ *remapped += size;
}
/*
@@ -427,13 +413,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
static void __init xen_set_identity_and_remap(
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
- unsigned long *released)
+ unsigned long *released, unsigned long *remapped)
{
phys_addr_t start = 0;
- unsigned long identity = 0;
unsigned long last_pfn = nr_pages;
const struct e820entry *entry;
unsigned long num_released = 0;
+ unsigned long num_remapped = 0;
int i;
/*
@@ -460,14 +446,14 @@ static void __init xen_set_identity_and_remap(
last_pfn = xen_set_identity_and_remap_chunk(
list, map_size, start_pfn,
end_pfn, nr_pages, last_pfn,
- &identity, &num_released);
+ &num_released, &num_remapped);
start = end;
}
}
*released = num_released;
+ *remapped = num_remapped;
- pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
pr_info("Released %ld page(s)\n", num_released);
}
@@ -549,20 +535,21 @@ static unsigned long __init xen_get_max_pages(void)
return min(max_pages, MAX_DOMAIN_PAGES);
}
-static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
+static void __init xen_align_and_add_e820_region(phys_addr_t start,
+ phys_addr_t size, int type)
{
- u64 end = start + size;
+ phys_addr_t end = start + size;
/* Align RAM regions to page boundaries. */
if (type == E820_RAM) {
start = PAGE_ALIGN(start);
- end &= ~((u64)PAGE_SIZE - 1);
+ end &= ~((phys_addr_t)PAGE_SIZE - 1);
}
e820_add_region(start, end - start, type);
}
-void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+static void __init xen_ignore_unusable(struct e820entry *list, size_t map_size)
{
struct e820entry *entry;
unsigned int i;
@@ -581,11 +568,12 @@ char * __init xen_memory_setup(void)
static struct e820entry map[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages;
- unsigned long long mem_end;
+ phys_addr_t mem_end;
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
+ unsigned long remapped_pages;
int i;
int op;
@@ -635,9 +623,10 @@ char * __init xen_memory_setup(void)
* underlying RAM.
*/
xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
- &xen_released_pages);
+ &xen_released_pages, &remapped_pages);
extra_pages += xen_released_pages;
+ extra_pages += remapped_pages;
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
@@ -654,16 +643,16 @@ char * __init xen_memory_setup(void)
extra_pages);
i = 0;
while (i < memmap.nr_entries) {
- u64 addr = map[i].addr;
- u64 size = map[i].size;
+ phys_addr_t addr = map[i].addr;
+ phys_addr_t size = map[i].size;
u32 type = map[i].type;
if (type == E820_RAM) {
if (addr < mem_end) {
size = min(size, mem_end - addr);
} else if (extra_pages) {
- size = min(size, (u64)extra_pages * PAGE_SIZE);
- extra_pages -= size / PAGE_SIZE;
+ size = min(size, PFN_PHYS(extra_pages));
+ extra_pages -= PFN_DOWN(size);
xen_add_extra_mem(addr, size);
xen_max_p2m_pfn = PFN_DOWN(addr + size);
} else
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 4c071aeb8417..08e8489c47f1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -507,7 +507,7 @@ static int xen_cpu_disable(void)
static void xen_cpu_die(unsigned int cpu)
{
while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
- current->state = TASK_UNINTERRUPTIBLE;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ/10);
}
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index f473d268d387..55da33b1d51c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
struct xen_clock_event_device {
struct clock_event_device evt;
- char *name;
+ char name[16];
};
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
if (evt->irq >= 0) {
unbind_from_irqhandler(evt->irq, NULL);
evt->irq = -1;
- kfree(per_cpu(xen_clock_events, cpu).name);
- per_cpu(xen_clock_events, cpu).name = NULL;
}
}
void xen_setup_timer(int cpu)
{
- char *name;
- struct clock_event_device *evt;
+ struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
+ struct clock_event_device *evt = &xevt->evt;
int irq;
- evt = &per_cpu(xen_clock_events, cpu).evt;
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
if (evt->irq >= 0)
xen_teardown_timer(cpu);
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
- name = kasprintf(GFP_KERNEL, "timer%d", cpu);
- if (!name)
- name = "<timer kasprintf failed>";
+ snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
- name, NULL);
+ xevt->name, NULL);
(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
evt->irq = irq;
- per_cpu(xen_clock_events, cpu).name = name;
}
void xen_setup_cpu_clockevents(void)
{
- BUG_ON(preemptible());
-
clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
}
@@ -487,6 +479,10 @@ static void __init xen_time_init(void)
int cpu = smp_processor_id();
struct timespec tp;
+ /* As Dom0 is never moved, no penalty on using TSC there */
+ if (xen_initial_domain())
+ xen_clocksource.rating = 275;
+
clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 5686bd9d58cc..9e195c683549 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -10,6 +10,12 @@
extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[];
+void xen_sysenter_target(void);
+#ifdef CONFIG_X86_64
+void xen_syscall_target(void);
+void xen_syscall32_target(void);
+#endif
+
extern void *xen_initial_gdt;
struct trap_info;
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 872bf0194e6d..a5e929a10c20 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -57,7 +57,7 @@
#define PTRS_PER_PGD 1024
#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
/*
@@ -89,8 +89,6 @@
* (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
* +-----------------------------------------+
* swap | index | type | 01 | 11 | 00 |
- * +- - - - - - - - - - - - - - - - - - - - -+
- * file | file offset | 01 | 11 | 10 |
* +-----------------------------------------+
*
* For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
@@ -111,7 +109,6 @@
* index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
* (note that the index is always non-zero)
* type swap type (5 bits -> 32 types)
- * file offset 26-bit offset into the file, in increments of PAGE_SIZE
*
* Notes:
* - (PROT_NONE) is a special case of 'present' but causes an exception for
@@ -144,7 +141,6 @@
#define _PAGE_HW_VALID 0x00
#define _PAGE_NONE 0x0f
#endif
-#define _PAGE_FILE (1<<1) /* file mapped page, only if !present */
#define _PAGE_USER (1<<4) /* user access (ring=1) */
@@ -260,7 +256,6 @@ static inline void pgtable_cache_init(void) { }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte)
@@ -390,11 +385,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define PTE_FILE_MAX_BITS 26
-#define pte_to_pgoff(pte) (pte_val(pte) >> 6)
-#define pgoff_to_pte(off) \
- ((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER })
-
#endif /* !defined (__ASSEMBLY__) */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 470153e8547c..a9b5d3ba196c 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -51,7 +51,6 @@ struct thread_info {
__s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
mm_segment_t addr_limit; /* thread address space */
- struct restart_block restart_block;
unsigned long cpenable;
@@ -72,7 +71,6 @@ struct thread_info {
#define TI_CPU 0x00000010
#define TI_PRE_COUNT 0x00000014
#define TI_ADDR_LIMIT 0x00000018
-#define TI_RESTART_BLOCK 0x000001C
#endif
@@ -90,9 +88,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 06370ccea9e9..28fc57ef5b86 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -574,12 +574,9 @@ void machine_power_off(void)
static int
c_show(struct seq_file *f, void *slot)
{
- char buf[NR_CPUS * 5];
-
- cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
/* high-level stuff */
seq_printf(f, "CPU count\t: %u\n"
- "CPU list\t: %s\n"
+ "CPU list\t: %*pbl\n"
"vendor_id\t: Tensilica\n"
"model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
"core ID\t\t: " XCHAL_CORE_ID "\n"
@@ -588,7 +585,7 @@ c_show(struct seq_file *f, void *slot)
"cpu MHz\t\t: %lu.%02lu\n"
"bogomips\t: %lu.%02lu\n",
num_online_cpus(),
- buf,
+ cpumask_pr_args(cpu_online_mask),
XCHAL_BUILD_UNIQUE_ID,
XCHAL_HAVE_BE ? "big" : "little",
ccount_freq/1000000,
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 4612321c73cc..3d733ba16f28 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -245,7 +245,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
int ret;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (regs->depc > 64)
panic("rt_sigreturn in double exception!\n");
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index b57c4f91f487..9e3571a6535c 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -117,6 +117,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/block/bio.c b/block/bio.c
index 471d7382c7d1..f66a4eae16ee 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -28,7 +28,6 @@
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
-#include <scsi/sg.h> /* for struct sg_iovec */
#include <trace/events/block.h>
@@ -1022,21 +1021,11 @@ void bio_copy_data(struct bio *dst, struct bio *src)
EXPORT_SYMBOL(bio_copy_data);
struct bio_map_data {
- int nr_sgvecs;
int is_our_pages;
- struct sg_iovec sgvecs[];
+ struct iov_iter iter;
+ struct iovec iov[];
};
-static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
- const struct sg_iovec *iov, int iov_count,
- int is_our_pages)
-{
- memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
- bmd->nr_sgvecs = iov_count;
- bmd->is_our_pages = is_our_pages;
- bio->bi_private = bmd;
-}
-
static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
gfp_t gfp_mask)
{
@@ -1044,85 +1033,101 @@ static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
return NULL;
return kmalloc(sizeof(struct bio_map_data) +
- sizeof(struct sg_iovec) * iov_count, gfp_mask);
+ sizeof(struct iovec) * iov_count, gfp_mask);
}
-static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count,
- int to_user, int from_user, int do_free_page)
+/**
+ * bio_copy_from_iter - copy all pages from iov_iter to bio
+ * @bio: The &struct bio which describes the I/O as destination
+ * @iter: iov_iter as source
+ *
+ * Copy all pages from iov_iter to bio.
+ * Returns 0 on success, or error on failure.
+ */
+static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
{
- int ret = 0, i;
+ int i;
struct bio_vec *bvec;
- int iov_idx = 0;
- unsigned int iov_off = 0;
bio_for_each_segment_all(bvec, bio, i) {
- char *bv_addr = page_address(bvec->bv_page);
- unsigned int bv_len = bvec->bv_len;
+ ssize_t ret;
- while (bv_len && iov_idx < iov_count) {
- unsigned int bytes;
- char __user *iov_addr;
+ ret = copy_page_from_iter(bvec->bv_page,
+ bvec->bv_offset,
+ bvec->bv_len,
+ &iter);
- bytes = min_t(unsigned int,
- iov[iov_idx].iov_len - iov_off, bv_len);
- iov_addr = iov[iov_idx].iov_base + iov_off;
+ if (!iov_iter_count(&iter))
+ break;
- if (!ret) {
- if (to_user)
- ret = copy_to_user(iov_addr, bv_addr,
- bytes);
+ if (ret < bvec->bv_len)
+ return -EFAULT;
+ }
- if (from_user)
- ret = copy_from_user(bv_addr, iov_addr,
- bytes);
+ return 0;
+}
- if (ret)
- ret = -EFAULT;
- }
+/**
+ * bio_copy_to_iter - copy all pages from bio to iov_iter
+ * @bio: The &struct bio which describes the I/O as source
+ * @iter: iov_iter as destination
+ *
+ * Copy all pages from bio to iov_iter.
+ * Returns 0 on success, or error on failure.
+ */
+static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
+{
+ int i;
+ struct bio_vec *bvec;
- bv_len -= bytes;
- bv_addr += bytes;
- iov_addr += bytes;
- iov_off += bytes;
+ bio_for_each_segment_all(bvec, bio, i) {
+ ssize_t ret;
- if (iov[iov_idx].iov_len == iov_off) {
- iov_idx++;
- iov_off = 0;
- }
- }
+ ret = copy_page_to_iter(bvec->bv_page,
+ bvec->bv_offset,
+ bvec->bv_len,
+ &iter);
+
+ if (!iov_iter_count(&iter))
+ break;
- if (do_free_page)
- __free_page(bvec->bv_page);
+ if (ret < bvec->bv_len)
+ return -EFAULT;
}
- return ret;
+ return 0;
+}
+
+static void bio_free_pages(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
}
/**
* bio_uncopy_user - finish previously mapped bio
* @bio: bio being terminated
*
- * Free pages allocated from bio_copy_user() and write back data
+ * Free pages allocated from bio_copy_user_iov() and write back data
* to user space in case of a read.
*/
int bio_uncopy_user(struct bio *bio)
{
struct bio_map_data *bmd = bio->bi_private;
- struct bio_vec *bvec;
- int ret = 0, i;
+ int ret = 0;
if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
/*
* if we're in a workqueue, the request is orphaned, so
* don't copy into a random user address space, just free.
*/
- if (current->mm)
- ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
- bio_data_dir(bio) == READ,
- 0, bmd->is_our_pages);
- else if (bmd->is_our_pages)
- bio_for_each_segment_all(bvec, bio, i)
- __free_page(bvec->bv_page);
+ if (current->mm && bio_data_dir(bio) == READ)
+ ret = bio_copy_to_iter(bio, bmd->iter);
+ if (bmd->is_our_pages)
+ bio_free_pages(bio);
}
kfree(bmd);
bio_put(bio);
@@ -1132,12 +1137,10 @@ EXPORT_SYMBOL(bio_uncopy_user);
/**
* bio_copy_user_iov - copy user data to bio
- * @q: destination block queue
- * @map_data: pointer to the rq_map_data holding pages (if necessary)
- * @iov: the iovec.
- * @iov_count: number of elements in the iovec
- * @write_to_vm: bool indicating writing to pages or not
- * @gfp_mask: memory allocation flags
+ * @q: destination block queue
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
+ * @iter: iovec iterator
+ * @gfp_mask: memory allocation flags
*
* Prepares and returns a bio for indirect user io, bouncing data
* to/from kernel pages as necessary. Must be paired with
@@ -1145,25 +1148,25 @@ EXPORT_SYMBOL(bio_uncopy_user);
*/
struct bio *bio_copy_user_iov(struct request_queue *q,
struct rq_map_data *map_data,
- const struct sg_iovec *iov, int iov_count,
- int write_to_vm, gfp_t gfp_mask)
+ const struct iov_iter *iter,
+ gfp_t gfp_mask)
{
struct bio_map_data *bmd;
- struct bio_vec *bvec;
struct page *page;
struct bio *bio;
int i, ret;
int nr_pages = 0;
- unsigned int len = 0;
+ unsigned int len = iter->count;
unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
- for (i = 0; i < iov_count; i++) {
+ for (i = 0; i < iter->nr_segs; i++) {
unsigned long uaddr;
unsigned long end;
unsigned long start;
- uaddr = (unsigned long)iov[i].iov_base;
- end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ uaddr = (unsigned long) iter->iov[i].iov_base;
+ end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
start = uaddr >> PAGE_SHIFT;
/*
@@ -1173,22 +1176,31 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
return ERR_PTR(-EINVAL);
nr_pages += end - start;
- len += iov[i].iov_len;
}
if (offset)
nr_pages++;
- bmd = bio_alloc_map_data(iov_count, gfp_mask);
+ bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
if (!bmd)
return ERR_PTR(-ENOMEM);
+ /*
+ * We need to do a deep copy of the iov_iter including the iovecs.
+ * The caller provided iov might point to an on-stack or otherwise
+ * shortlived one.
+ */
+ bmd->is_our_pages = map_data ? 0 : 1;
+ memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
+ iov_iter_init(&bmd->iter, iter->type, bmd->iov,
+ iter->nr_segs, iter->count);
+
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
goto out_bmd;
- if (!write_to_vm)
+ if (iter->type & WRITE)
bio->bi_rw |= REQ_WRITE;
ret = 0;
@@ -1236,20 +1248,18 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
/*
* success
*/
- if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
+ if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) {
- ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
+ ret = bio_copy_from_iter(bio, *iter);
if (ret)
goto cleanup;
}
- bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
+ bio->bi_private = bmd;
return bio;
cleanup:
if (!map_data)
- bio_for_each_segment_all(bvec, bio, i)
- __free_page(bvec->bv_page);
-
+ bio_free_pages(bio);
bio_put(bio);
out_bmd:
kfree(bmd);
@@ -1257,46 +1267,30 @@ out_bmd:
}
/**
- * bio_copy_user - copy user data to bio
- * @q: destination block queue
- * @map_data: pointer to the rq_map_data holding pages (if necessary)
- * @uaddr: start of user address
- * @len: length in bytes
- * @write_to_vm: bool indicating writing to pages or not
- * @gfp_mask: memory allocation flags
+ * bio_map_user_iov - map user iovec into bio
+ * @q: the struct request_queue for the bio
+ * @iter: iovec iterator
+ * @gfp_mask: memory allocation flags
*
- * Prepares and returns a bio for indirect user io, bouncing data
- * to/from kernel pages as necessary. Must be paired with
- * call bio_uncopy_user() on io completion.
+ * Map the user space address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
*/
-struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
- unsigned long uaddr, unsigned int len,
- int write_to_vm, gfp_t gfp_mask)
+struct bio *bio_map_user_iov(struct request_queue *q,
+ const struct iov_iter *iter,
+ gfp_t gfp_mask)
{
- struct sg_iovec iov;
-
- iov.iov_base = (void __user *)uaddr;
- iov.iov_len = len;
-
- return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
-}
-EXPORT_SYMBOL(bio_copy_user);
-
-static struct bio *__bio_map_user_iov(struct request_queue *q,
- struct block_device *bdev,
- const struct sg_iovec *iov, int iov_count,
- int write_to_vm, gfp_t gfp_mask)
-{
- int i, j;
+ int j;
int nr_pages = 0;
struct page **pages;
struct bio *bio;
int cur_page = 0;
int ret, offset;
+ struct iov_iter i;
+ struct iovec iov;
- for (i = 0; i < iov_count; i++) {
- unsigned long uaddr = (unsigned long)iov[i].iov_base;
- unsigned long len = iov[i].iov_len;
+ iov_for_each(iov, i, *iter) {
+ unsigned long uaddr = (unsigned long) iov.iov_base;
+ unsigned long len = iov.iov_len;
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
@@ -1326,16 +1320,17 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
if (!pages)
goto out;
- for (i = 0; i < iov_count; i++) {
- unsigned long uaddr = (unsigned long)iov[i].iov_base;
- unsigned long len = iov[i].iov_len;
+ iov_for_each(iov, i, *iter) {
+ unsigned long uaddr = (unsigned long) iov.iov_base;
+ unsigned long len = iov.iov_len;
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
const int local_nr_pages = end - start;
const int page_limit = cur_page + local_nr_pages;
ret = get_user_pages_fast(uaddr, local_nr_pages,
- write_to_vm, &pages[cur_page]);
+ (iter->type & WRITE) != WRITE,
+ &pages[cur_page]);
if (ret < local_nr_pages) {
ret = -EFAULT;
goto out_unmap;
@@ -1375,72 +1370,10 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
/*
* set data direction, and check if mapped pages need bouncing
*/
- if (!write_to_vm)
+ if (iter->type & WRITE)
bio->bi_rw |= REQ_WRITE;
- bio->bi_bdev = bdev;
bio->bi_flags |= (1 << BIO_USER_MAPPED);
- return bio;
-
- out_unmap:
- for (i = 0; i < nr_pages; i++) {
- if(!pages[i])
- break;
- page_cache_release(pages[i]);
- }
- out:
- kfree(pages);
- bio_put(bio);
- return ERR_PTR(ret);
-}
-
-/**
- * bio_map_user - map user address into bio
- * @q: the struct request_queue for the bio
- * @bdev: destination block device
- * @uaddr: start of user address
- * @len: length in bytes
- * @write_to_vm: bool indicating writing to pages or not
- * @gfp_mask: memory allocation flags
- *
- * Map the user space address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
-struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
- unsigned long uaddr, unsigned int len, int write_to_vm,
- gfp_t gfp_mask)
-{
- struct sg_iovec iov;
-
- iov.iov_base = (void __user *)uaddr;
- iov.iov_len = len;
-
- return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
-}
-EXPORT_SYMBOL(bio_map_user);
-
-/**
- * bio_map_user_iov - map user sg_iovec table into bio
- * @q: the struct request_queue for the bio
- * @bdev: destination block device
- * @iov: the iovec.
- * @iov_count: number of elements in the iovec
- * @write_to_vm: bool indicating writing to pages or not
- * @gfp_mask: memory allocation flags
- *
- * Map the user space address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
-struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
- const struct sg_iovec *iov, int iov_count,
- int write_to_vm, gfp_t gfp_mask)
-{
- struct bio *bio;
-
- bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
- gfp_mask);
- if (IS_ERR(bio))
- return bio;
/*
* subtle -- if __bio_map_user() ended up bouncing a bio,
@@ -1449,8 +1382,18 @@ struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
* reference to it
*/
bio_get(bio);
-
return bio;
+
+ out_unmap:
+ for (j = 0; j < nr_pages; j++) {
+ if (!pages[j])
+ break;
+ page_cache_release(pages[j]);
+ }
+ out:
+ kfree(pages);
+ bio_put(bio);
+ return ERR_PTR(ret);
}
static void __bio_unmap_user(struct bio *bio)
@@ -1492,8 +1435,18 @@ static void bio_map_kern_endio(struct bio *bio, int err)
bio_put(bio);
}
-static struct bio *__bio_map_kern(struct request_queue *q, void *data,
- unsigned int len, gfp_t gfp_mask)
+/**
+ * bio_map_kern - map kernel address into bio
+ * @q: the struct request_queue for the bio
+ * @data: pointer to buffer to map
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio allocation
+ *
+ * Map the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
+ gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1517,8 +1470,11 @@ static struct bio *__bio_map_kern(struct request_queue *q, void *data,
bytes = len;
if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
- offset) < bytes)
- break;
+ offset) < bytes) {
+ /* we don't support partial mappings */
+ bio_put(bio);
+ return ERR_PTR(-EINVAL);
+ }
data += bytes;
len -= bytes;
@@ -1528,57 +1484,26 @@ static struct bio *__bio_map_kern(struct request_queue *q, void *data,
bio->bi_end_io = bio_map_kern_endio;
return bio;
}
+EXPORT_SYMBOL(bio_map_kern);
-/**
- * bio_map_kern - map kernel address into bio
- * @q: the struct request_queue for the bio
- * @data: pointer to buffer to map
- * @len: length in bytes
- * @gfp_mask: allocation flags for bio allocation
- *
- * Map the kernel address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
-struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
- gfp_t gfp_mask)
+static void bio_copy_kern_endio(struct bio *bio, int err)
{
- struct bio *bio;
-
- bio = __bio_map_kern(q, data, len, gfp_mask);
- if (IS_ERR(bio))
- return bio;
-
- if (bio->bi_iter.bi_size == len)
- return bio;
-
- /*
- * Don't support partial mappings.
- */
+ bio_free_pages(bio);
bio_put(bio);
- return ERR_PTR(-EINVAL);
}
-EXPORT_SYMBOL(bio_map_kern);
-static void bio_copy_kern_endio(struct bio *bio, int err)
+static void bio_copy_kern_endio_read(struct bio *bio, int err)
{
+ char *p = bio->bi_private;
struct bio_vec *bvec;
- const int read = bio_data_dir(bio) == READ;
- struct bio_map_data *bmd = bio->bi_private;
int i;
- char *p = bmd->sgvecs[0].iov_base;
bio_for_each_segment_all(bvec, bio, i) {
- char *addr = page_address(bvec->bv_page);
-
- if (read)
- memcpy(p, addr, bvec->bv_len);
-
- __free_page(bvec->bv_page);
+ memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
p += bvec->bv_len;
}
- kfree(bmd);
- bio_put(bio);
+ bio_copy_kern_endio(bio, err);
}
/**
@@ -1595,28 +1520,59 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
gfp_t gfp_mask, int reading)
{
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
struct bio *bio;
- struct bio_vec *bvec;
- int i;
+ void *p = data;
+ int nr_pages = 0;
+
+ /*
+ * Overflow, abort
+ */
+ if (end < start)
+ return ERR_PTR(-EINVAL);
- bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
- if (IS_ERR(bio))
- return bio;
+ nr_pages = end - start;
+ bio = bio_kmalloc(gfp_mask, nr_pages);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
- if (!reading) {
- void *p = data;
+ while (len) {
+ struct page *page;
+ unsigned int bytes = PAGE_SIZE;
- bio_for_each_segment_all(bvec, bio, i) {
- char *addr = page_address(bvec->bv_page);
+ if (bytes > len)
+ bytes = len;
- memcpy(addr, p, bvec->bv_len);
- p += bvec->bv_len;
- }
+ page = alloc_page(q->bounce_gfp | gfp_mask);
+ if (!page)
+ goto cleanup;
+
+ if (!reading)
+ memcpy(page_address(page), p, bytes);
+
+ if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
+ break;
+
+ len -= bytes;
+ p += bytes;
}
- bio->bi_end_io = bio_copy_kern_endio;
+ if (reading) {
+ bio->bi_end_io = bio_copy_kern_endio_read;
+ bio->bi_private = data;
+ } else {
+ bio->bi_end_io = bio_copy_kern_endio;
+ bio->bi_rw |= REQ_WRITE;
+ }
return bio;
+
+cleanup:
+ bio_free_pages(bio);
+ bio_put(bio);
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(bio_copy_kern);
diff --git a/block/blk-core.c b/block/blk-core.c
index 30f6153a40c2..794c3e7f01cf 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+void blk_set_queue_dying(struct request_queue *q)
+{
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+
+ if (q->mq_ops)
+ blk_mq_wake_waiters(q);
+ else {
+ struct request_list *rl;
+
+ blk_queue_for_each_rl(rl, q) {
+ if (rl->rq_pool) {
+ wake_up(&rl->wait[BLK_RW_SYNC]);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+ blk_set_queue_dying(q);
spin_lock_irq(lock);
/*
@@ -588,7 +607,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
- q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+ q->backing_dev_info.capabilities = 0;
q->backing_dev_info.name = "block";
q->node = node_id;
@@ -2029,6 +2048,13 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
return -EIO;
+ if (q->mq_ops) {
+ if (blk_queue_io_stat(q))
+ blk_account_io_start(rq, true);
+ blk_mq_insert_request(rq, false, true, true);
+ return 0;
+ }
+
spin_lock_irqsave(q->queue_lock, flags);
if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2888,7 +2914,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
- dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
+ dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
@@ -2926,8 +2952,6 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
if (!bs)
bs = fs_bio_set;
- blk_rq_init(NULL, rq);
-
__rq_for_each_bio(bio_src, rq_src) {
bio = bio_clone_fast(bio_src, gfp_mask, bs);
if (!bio)
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 8411be3c19d3..7688ee3f5d72 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -283,24 +283,34 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
* @sector: start sector
* @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc)
+ * @discard: whether to discard the block range
*
* Description:
- * Generate and issue number of bios with zerofiled pages.
+ * Zero-fill a block range. If the discard flag is set and the block
+ * device guarantees that subsequent READ operations to the block range
+ * in question will return zeroes, the blocks will be discarded. Should
+ * the discard request fail, if the discard flag is not set, or if
+ * discard_zeroes_data is not supported, this function will resort to
+ * zeroing the blocks manually, thus provisioning (allocating,
+ * anchoring) them. If the block device supports the WRITE SAME command
+ * blkdev_issue_zeroout() will use it to optimize the process of
+ * clearing the block range. Otherwise the zeroing will be performed
+ * using regular WRITE calls.
*/
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask)
+ sector_t nr_sects, gfp_t gfp_mask, bool discard)
{
- if (bdev_write_same(bdev)) {
- unsigned char bdn[BDEVNAME_SIZE];
+ struct request_queue *q = bdev_get_queue(bdev);
- if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
- ZERO_PAGE(0)))
- return 0;
+ if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
+ blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
+ return 0;
- bdevname(bdev, bdn);
- pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
- }
+ if (bdev_write_same(bdev) &&
+ blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+ ZERO_PAGE(0)) == 0)
+ return 0;
return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
}
diff --git a/block/blk-map.c b/block/blk-map.c
index f890d4345b0c..b8d2725324a6 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <scsi/sg.h> /* for struct sg_iovec */
+#include <linux/uio.h>
#include "blk.h"
@@ -39,138 +39,12 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret;
}
-static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
- struct rq_map_data *map_data, void __user *ubuf,
- unsigned int len, gfp_t gfp_mask)
-{
- unsigned long uaddr;
- struct bio *bio, *orig_bio;
- int reading, ret;
-
- reading = rq_data_dir(rq) == READ;
-
- /*
- * if alignment requirement is satisfied, map in user pages for
- * direct dma. else, set up kernel bounce buffers
- */
- uaddr = (unsigned long) ubuf;
- if (blk_rq_aligned(q, uaddr, len) && !map_data)
- bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
- else
- bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
-
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- if (map_data && map_data->null_mapped)
- bio->bi_flags |= (1 << BIO_NULL_MAPPED);
-
- orig_bio = bio;
- blk_queue_bounce(q, &bio);
-
- /*
- * We link the bounce buffer in and could have to traverse it
- * later so we have to get a ref to prevent it from being freed
- */
- bio_get(bio);
-
- ret = blk_rq_append_bio(q, rq, bio);
- if (!ret)
- return bio->bi_iter.bi_size;
-
- /* if it was boucned we must call the end io function */
- bio_endio(bio, 0);
- __blk_rq_unmap_user(orig_bio);
- bio_put(bio);
- return ret;
-}
-
-/**
- * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
- * @rq: request structure to fill
- * @map_data: pointer to the rq_map_data holding pages (if necessary)
- * @ubuf: the user buffer
- * @len: length of user data
- * @gfp_mask: memory allocation flags
- *
- * Description:
- * Data will be mapped directly for zero copy I/O, if possible. Otherwise
- * a kernel bounce buffer is used.
- *
- * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
- * still in process context.
- *
- * Note: The mapped bio may need to be bounced through blk_queue_bounce()
- * before being submitted to the device, as pages mapped may be out of
- * reach. It's the callers responsibility to make sure this happens. The
- * original bio must be passed back in to blk_rq_unmap_user() for proper
- * unmapping.
- */
-int blk_rq_map_user(struct request_queue *q, struct request *rq,
- struct rq_map_data *map_data, void __user *ubuf,
- unsigned long len, gfp_t gfp_mask)
-{
- unsigned long bytes_read = 0;
- struct bio *bio = NULL;
- int ret;
-
- if (len > (queue_max_hw_sectors(q) << 9))
- return -EINVAL;
- if (!len)
- return -EINVAL;
-
- if (!ubuf && (!map_data || !map_data->null_mapped))
- return -EINVAL;
-
- while (bytes_read != len) {
- unsigned long map_len, end, start;
-
- map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
- end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- start = (unsigned long)ubuf >> PAGE_SHIFT;
-
- /*
- * A bad offset could cause us to require BIO_MAX_PAGES + 1
- * pages. If this happens we just lower the requested
- * mapping len by a page so that we can fit
- */
- if (end - start > BIO_MAX_PAGES)
- map_len -= PAGE_SIZE;
-
- ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
- gfp_mask);
- if (ret < 0)
- goto unmap_rq;
- if (!bio)
- bio = rq->bio;
- bytes_read += ret;
- ubuf += ret;
-
- if (map_data)
- map_data->offset += ret;
- }
-
- if (!bio_flagged(bio, BIO_USER_MAPPED))
- rq->cmd_flags |= REQ_COPY_USER;
-
- return 0;
-unmap_rq:
- blk_rq_unmap_user(bio);
- rq->bio = NULL;
- return ret;
-}
-EXPORT_SYMBOL(blk_rq_map_user);
-
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* @map_data: pointer to the rq_map_data holding pages (if necessary)
- * @iov: pointer to the iovec
- * @iov_count: number of elements in the iovec
- * @len: I/O byte count
+ * @iter: iovec iterator
* @gfp_mask: memory allocation flags
*
* Description:
@@ -187,20 +61,21 @@ EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
- struct rq_map_data *map_data, const struct sg_iovec *iov,
- int iov_count, unsigned int len, gfp_t gfp_mask)
+ struct rq_map_data *map_data,
+ const struct iov_iter *iter, gfp_t gfp_mask)
{
struct bio *bio;
- int i, read = rq_data_dir(rq) == READ;
int unaligned = 0;
+ struct iov_iter i;
+ struct iovec iov;
- if (!iov || iov_count <= 0)
+ if (!iter || !iter->count)
return -EINVAL;
- for (i = 0; i < iov_count; i++) {
- unsigned long uaddr = (unsigned long)iov[i].iov_base;
+ iov_for_each(iov, i, *iter) {
+ unsigned long uaddr = (unsigned long) iov.iov_base;
- if (!iov[i].iov_len)
+ if (!iov.iov_len)
return -EINVAL;
/*
@@ -210,16 +85,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
unaligned = 1;
}
- if (unaligned || (q->dma_pad_mask & len) || map_data)
- bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
- gfp_mask);
+ if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
+ bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
else
- bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
+ bio = bio_map_user_iov(q, iter, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (bio->bi_iter.bi_size != len) {
+ if (map_data && map_data->null_mapped)
+ bio->bi_flags |= (1 << BIO_NULL_MAPPED);
+
+ if (bio->bi_iter.bi_size != iter->count) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
@@ -241,6 +118,21 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
+int blk_rq_map_user(struct request_queue *q, struct request *rq,
+ struct rq_map_data *map_data, void __user *ubuf,
+ unsigned long len, gfp_t gfp_mask)
+{
+ struct iovec iov;
+ struct iov_iter i;
+
+ iov.iov_base = ubuf;
+ iov.iov_len = len;
+ iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len);
+
+ return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
+}
+EXPORT_SYMBOL(blk_rq_map_user);
+
/**
* blk_rq_unmap_user - unmap a request with user data
* @bio: start of bio list
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 89b97b5e0881..fc1ff3b1ea1f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -283,35 +283,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL(blk_rq_map_sg);
-/**
- * blk_bio_map_sg - map a bio to a scatterlist
- * @q: request_queue in question
- * @bio: bio being mapped
- * @sglist: scatterlist being mapped
- *
- * Note:
- * Caller must make sure sg can hold bio->bi_phys_segments entries
- *
- * Will return the number of sg entries setup
- */
-int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
- struct scatterlist *sglist)
-{
- struct scatterlist *sg = NULL;
- int nsegs;
- struct bio *next = bio->bi_next;
- bio->bi_next = NULL;
-
- nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
- bio->bi_next = next;
- if (sg)
- sg_mark_end(sg);
-
- BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
- return nsegs;
-}
-EXPORT_SYMBOL(blk_bio_map_sg);
-
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
@@ -385,6 +356,14 @@ static bool req_no_special_merge(struct request *req)
return !q->mq_ops && req->special;
}
+static int req_gap_to_prev(struct request *req, struct request *next)
+{
+ struct bio *prev = req->biotail;
+
+ return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
+ next->bio->bi_io_vec[0].bv_offset);
+}
+
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
@@ -399,6 +378,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
if (req_no_special_merge(req) || req_no_special_merge(next))
return 0;
+ if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) &&
+ req_gap_to_prev(req, next))
+ return 0;
+
/*
* Will it become too large?
*/
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 32e8dbb9ad1c..d53a764b05ea 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
}
/*
- * Wakeup all potentially sleeping on normal (non-reserved) tags
+ * Wakeup all potentially sleeping on tags
*/
-static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
wake_index = bt_index_inc(wake_index);
}
+
+ if (include_reserve) {
+ bt = &tags->breserved_tags;
+ if (waitqueue_active(&bt->bs[0].wait))
+ wake_up(&bt->bs[0].wait);
+ }
}
/*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
atomic_dec(&tags->active_queues);
- blk_mq_tag_wakeup_all(tags);
+ blk_mq_tag_wakeup_all(tags, false);
}
/*
@@ -134,35 +140,39 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return atomic_read(&hctx->nr_active) < depth;
}
-static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
+static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
+ bool nowrap)
{
- int tag, org_last_tag, end;
- bool wrap = last_tag != 0;
+ int tag, org_last_tag = last_tag;
- org_last_tag = last_tag;
- end = bm->depth;
- do {
-restart:
- tag = find_next_zero_bit(&bm->word, end, last_tag);
- if (unlikely(tag >= end)) {
+ while (1) {
+ tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
+ if (unlikely(tag >= bm->depth)) {
/*
- * We started with an offset, start from 0 to
+ * We started with an offset, and we didn't reset the
+ * offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
- if (wrap) {
- wrap = false;
- end = org_last_tag;
- last_tag = 0;
- goto restart;
+ if (org_last_tag && last_tag && !nowrap) {
+ last_tag = org_last_tag = 0;
+ continue;
}
return -1;
}
+
+ if (!test_and_set_bit(tag, &bm->word))
+ break;
+
last_tag = tag + 1;
- } while (test_and_set_bit(tag, &bm->word));
+ if (last_tag >= bm->depth - 1)
+ last_tag = 0;
+ }
return tag;
}
+#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
+
/*
* Straight forward bitmap tag implementation, where each bit is a tag
* (cleared == free, and set == busy). The small twist is using per-cpu
@@ -175,7 +185,7 @@ restart:
* until the map is exhausted.
*/
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
- unsigned int *tag_cache)
+ unsigned int *tag_cache, struct blk_mq_tags *tags)
{
unsigned int last_tag, org_last_tag;
int index, i, tag;
@@ -187,15 +197,24 @@ static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
index = TAG_TO_INDEX(bt, last_tag);
for (i = 0; i < bt->map_nr; i++) {
- tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
+ tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
+ BT_ALLOC_RR(tags));
if (tag != -1) {
tag += (index << bt->bits_per_word);
goto done;
}
- last_tag = 0;
- if (++index >= bt->map_nr)
+ /*
+ * Jump to next index, and reset the last tag to be the
+ * first tag of that index
+ */
+ index++;
+ last_tag = (index << bt->bits_per_word);
+
+ if (index >= bt->map_nr) {
index = 0;
+ last_tag = 0;
+ }
}
*tag_cache = 0;
@@ -206,7 +225,7 @@ static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
* up using the specific cached tag.
*/
done:
- if (tag == org_last_tag) {
+ if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
last_tag = tag + 1;
if (last_tag >= bt->depth - 1)
last_tag = 0;
@@ -235,13 +254,13 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
static int bt_get(struct blk_mq_alloc_data *data,
struct blk_mq_bitmap_tags *bt,
struct blk_mq_hw_ctx *hctx,
- unsigned int *last_tag)
+ unsigned int *last_tag, struct blk_mq_tags *tags)
{
struct bt_wait_state *bs;
DEFINE_WAIT(wait);
int tag;
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
return tag;
@@ -252,7 +271,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
do {
prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;
@@ -267,7 +286,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;
@@ -298,7 +317,7 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
int tag;
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
- &data->ctx->last_tag);
+ &data->ctx->last_tag, data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;
@@ -314,7 +333,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
return BLK_MQ_TAG_FAIL;
}
- tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
+ tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
+ data->hctx->tags);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
@@ -386,7 +406,8 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
BUG_ON(real_tag >= tags->nr_tags);
bt_clear_tag(&tags->bitmap_tags, real_tag);
- *last_tag = real_tag;
+ if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
+ *last_tag = real_tag;
} else {
BUG_ON(tag >= tags->nr_reserved_tags);
bt_clear_tag(&tags->breserved_tags, tag);
@@ -503,6 +524,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
if (!bt->bs) {
kfree(bt->map);
+ bt->map = NULL;
return -ENOMEM;
}
@@ -523,10 +545,12 @@ static void bt_free(struct blk_mq_bitmap_tags *bt)
}
static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
- int node)
+ int node, int alloc_policy)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
+ tags->alloc_policy = alloc_policy;
+
if (bt_alloc(&tags->bitmap_tags, depth, node, false))
goto enomem;
if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
@@ -540,7 +564,8 @@ enomem:
}
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
- unsigned int reserved_tags, int node)
+ unsigned int reserved_tags,
+ int node, int alloc_policy)
{
struct blk_mq_tags *tags;
@@ -556,7 +581,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
- return blk_mq_init_bitmap_tags(tags, node);
+ return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
}
void blk_mq_free_tags(struct blk_mq_tags *tags)
@@ -584,7 +609,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
* static and should never need resizing.
*/
bt_update_count(&tags->bitmap_tags, tdepth);
- blk_mq_tag_wakeup_all(tags);
+ blk_mq_tag_wakeup_all(tags, false);
return 0;
}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 6206ed17ef76..90767b370308 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -42,10 +42,12 @@ struct blk_mq_tags {
struct request **rqs;
struct list_head page_list;
+
+ int alloc_policy;
};
-extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
+extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
@@ -54,6 +56,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
enum {
BLK_MQ_TAG_CACHE_MIN = 1,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index da1ab5641227..4f4bea21052e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,6 +33,7 @@ static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
+static void blk_mq_run_queues(struct request_queue *q);
/*
* Check if any of the ctx's have pending work in this hardware queue
@@ -107,7 +108,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}
-static void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
{
bool freeze;
@@ -117,9 +118,10 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
if (freeze) {
percpu_ref_kill(&q->mq_usage_counter);
- blk_mq_run_queues(q, false);
+ blk_mq_run_queues(q);
}
}
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
@@ -135,8 +137,9 @@ void blk_mq_freeze_queue(struct request_queue *q)
blk_mq_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q);
}
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
-static void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue(struct request_queue *q)
{
bool wake;
@@ -149,6 +152,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
}
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+
+void blk_mq_wake_waiters(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_wakeup_all(hctx->tags, true);
+
+ /*
+ * If we are called because the queue has now been marked as
+ * dying, we need to ensure that processes currently waiting on
+ * the queue are notified as well.
+ */
+ wake_up_all(&q->mq_freeze_wq);
+}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
{
@@ -258,8 +279,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
ctx = alloc_data.ctx;
}
blk_mq_put_ctx(ctx);
- if (!rq)
+ if (!rq) {
+ blk_mq_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
+ }
return rq;
}
EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -383,6 +406,12 @@ void blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
+int blk_mq_request_started(struct request *rq)
+{
+ return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_started);
+
void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -500,12 +529,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
}
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
+void blk_mq_cancel_requeue_work(struct request_queue *q)
+{
+ cancel_work_sync(&q->requeue_work);
+}
+EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
+
void blk_mq_kick_requeue_list(struct request_queue *q)
{
kblockd_schedule_work(&q->requeue_work);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
+void blk_mq_abort_requeue_list(struct request_queue *q)
+{
+ unsigned long flags;
+ LIST_HEAD(rq_list);
+
+ spin_lock_irqsave(&q->requeue_lock, flags);
+ list_splice_init(&q->requeue_list, &rq_list);
+ spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+ while (!list_empty(&rq_list)) {
+ struct request *rq;
+
+ rq = list_first_entry(&rq_list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ rq->errors = -EIO;
+ blk_mq_end_request(rq, rq->errors);
+ }
+}
+EXPORT_SYMBOL(blk_mq_abort_requeue_list);
+
static inline bool is_flush_request(struct request *rq,
struct blk_flush_queue *fq, unsigned int tag)
{
@@ -566,13 +621,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
break;
}
}
-
+
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
struct blk_mq_timeout_data *data = priv;
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+ /*
+ * If a request wasn't started before the queue was
+ * marked dying, kill it here or it'll go unnoticed.
+ */
+ if (unlikely(blk_queue_dying(rq->q))) {
+ rq->errors = -EIO;
+ blk_mq_complete_request(rq);
+ }
+ return;
+ }
+ if (rq->cmd_flags & REQ_NO_TIMEOUT)
return;
if (time_after_eq(jiffies, rq->deadline)) {
@@ -838,7 +904,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
&hctx->run_work, 0);
}
-void blk_mq_run_queues(struct request_queue *q, bool async)
+static void blk_mq_run_queues(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
@@ -849,10 +915,9 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
- blk_mq_run_hw_queue(hctx, async);
+ blk_mq_run_hw_queue(hctx, false);
}
}
-EXPORT_SYMBOL(blk_mq_run_queues);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
@@ -890,7 +955,6 @@ void blk_mq_start_hw_queues(struct request_queue *q)
}
EXPORT_SYMBOL(blk_mq_start_hw_queues);
-
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
{
struct blk_mq_hw_ctx *hctx;
@@ -1359,7 +1423,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
size_t rq_size, left;
tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
- set->numa_node);
+ set->numa_node,
+ BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
if (!tags)
return NULL;
@@ -1577,10 +1642,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
struct blk_mq_hw_ctx *hctx;
unsigned int i;
- queue_for_each_hw_ctx(q, hctx, i) {
+ queue_for_each_hw_ctx(q, hctx, i)
free_cpumask_var(hctx->cpumask);
- kfree(hctx);
- }
}
static int blk_mq_init_hctx(struct request_queue *q,
@@ -1601,7 +1664,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue = q;
hctx->queue_num = hctx_idx;
hctx->flags = set->flags;
- hctx->cmd_size = set->cmd_size;
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
@@ -1806,6 +1868,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
mutex_unlock(&set->tag_list_lock);
}
+/*
+ * It is the actual release handler for mq, but we do it from
+ * request queue's release handler for avoiding use-after-free
+ * and headache because q->mq_kobj shouldn't have been introduced,
+ * but we can't group ctx/kctx kobj without it.
+ */
+void blk_mq_release(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ /* hctx kobj stays in hctx */
+ queue_for_each_hw_ctx(q, hctx, i)
+ kfree(hctx);
+
+ kfree(q->queue_hw_ctx);
+
+ /* ctx kobj stays in queue_ctx */
+ free_percpu(q->queue_ctx);
+}
+
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
struct blk_mq_hw_ctx **hctxs;
@@ -1939,12 +2022,8 @@ void blk_mq_free_queue(struct request_queue *q)
percpu_ref_exit(&q->mq_usage_counter);
- free_percpu(q->queue_ctx);
- kfree(q->queue_hw_ctx);
kfree(q->mq_map);
- q->queue_ctx = NULL;
- q->queue_hw_ctx = NULL;
q->mq_map = NULL;
mutex_lock(&all_q_mutex);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 206230e64f79..6a48c4c0d8a2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq,
struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+void blk_mq_wake_waiters(struct request_queue *q);
/*
* CPU hotplug helpers
@@ -61,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
+void blk_mq_release(struct request_queue *q);
+
/*
* Basic implementation of sparser bitmap, allowing the user to spread
* the bits over more cachelines.
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 935ea2aa0730..faaf36ade7eb 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
if (!q->mq_ops)
blk_free_flush_queue(q->fq);
+ else
+ blk_mq_release(q);
blk_trace_shutdown(q);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index a185b86741e5..f0344e6939d5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -119,7 +119,7 @@ fail:
}
static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
- int depth)
+ int depth, int alloc_policy)
{
struct blk_queue_tag *tags;
@@ -131,6 +131,8 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
goto fail;
atomic_set(&tags->refcnt, 1);
+ tags->alloc_policy = alloc_policy;
+ tags->next_tag = 0;
return tags;
fail:
kfree(tags);
@@ -140,10 +142,11 @@ fail:
/**
* blk_init_tags - initialize the tag info for an external tag map
* @depth: the maximum queue depth supported
+ * @alloc_policy: tag allocation policy
**/
-struct blk_queue_tag *blk_init_tags(int depth)
+struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
{
- return __blk_queue_init_tags(NULL, depth);
+ return __blk_queue_init_tags(NULL, depth, alloc_policy);
}
EXPORT_SYMBOL(blk_init_tags);
@@ -152,19 +155,20 @@ EXPORT_SYMBOL(blk_init_tags);
* @q: the request queue for the device
* @depth: the maximum queue depth supported
* @tags: the tag to use
+ * @alloc_policy: tag allocation policy
*
* Queue lock must be held here if the function is called to resize an
* existing map.
**/
int blk_queue_init_tags(struct request_queue *q, int depth,
- struct blk_queue_tag *tags)
+ struct blk_queue_tag *tags, int alloc_policy)
{
int rc;
BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
if (!tags && !q->queue_tags) {
- tags = __blk_queue_init_tags(q, depth);
+ tags = __blk_queue_init_tags(q, depth, alloc_policy);
if (!tags)
return -ENOMEM;
@@ -344,9 +348,21 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
}
do {
- tag = find_first_zero_bit(bqt->tag_map, max_depth);
- if (tag >= max_depth)
- return 1;
+ if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
+ tag = find_first_zero_bit(bqt->tag_map, max_depth);
+ if (tag >= max_depth)
+ return 1;
+ } else {
+ int start = bqt->next_tag;
+ int size = min_t(int, bqt->max_depth, max_depth + start);
+ tag = find_next_zero_bit(bqt->tag_map, size, start);
+ if (tag >= size && start + size > bqt->max_depth) {
+ size = start + size - bqt->max_depth;
+ tag = find_first_zero_bit(bqt->tag_map, size);
+ }
+ if (tag >= size)
+ return 1;
+ }
} while (test_and_set_bit_lock(tag, bqt->tag_map));
/*
@@ -354,6 +370,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
* See blk_queue_end_tag for details.
*/
+ bqt->next_tag = (tag + 1) % bqt->max_depth;
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 56c025894cdf..246dfb16c3d9 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
struct request_queue *q = req->q;
unsigned long expiry;
+ if (req->cmd_flags & REQ_NO_TIMEOUT)
+ return;
+
/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
if (!q->mq_ops && !q->rq_timed_out_fn)
return;
diff --git a/block/bsg.c b/block/bsg.c
index 276e869e686c..d214e929ce18 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -136,42 +136,6 @@ static inline struct hlist_head *bsg_dev_idx_hash(int index)
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
}
-static int bsg_io_schedule(struct bsg_device *bd)
-{
- DEFINE_WAIT(wait);
- int ret = 0;
-
- spin_lock_irq(&bd->lock);
-
- BUG_ON(bd->done_cmds > bd->queued_cmds);
-
- /*
- * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
- * work to do", even though we return -ENOSPC after this same test
- * during bsg_write() -- there, it means our buffer can't have more
- * bsg_commands added to it, thus has no space left.
- */
- if (bd->done_cmds == bd->queued_cmds) {
- ret = -ENODATA;
- goto unlock;
- }
-
- if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
- ret = -EAGAIN;
- goto unlock;
- }
-
- prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&bd->lock);
- io_schedule();
- finish_wait(&bd->wq_done, &wait);
-
- return ret;
-unlock:
- spin_unlock_irq(&bd->lock);
- return ret;
-}
-
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, struct bsg_device *bd,
fmode_t has_write_perm)
@@ -482,6 +446,30 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
return ret;
}
+static bool bsg_complete(struct bsg_device *bd)
+{
+ bool ret = false;
+ bool spin;
+
+ do {
+ spin_lock_irq(&bd->lock);
+
+ BUG_ON(bd->done_cmds > bd->queued_cmds);
+
+ /*
+ * All commands consumed.
+ */
+ if (bd->done_cmds == bd->queued_cmds)
+ ret = true;
+
+ spin = !test_bit(BSG_F_BLOCK, &bd->flags);
+
+ spin_unlock_irq(&bd->lock);
+ } while (!ret && spin);
+
+ return ret;
+}
+
static int bsg_complete_all_commands(struct bsg_device *bd)
{
struct bsg_command *bc;
@@ -492,17 +480,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
/*
* wait for all commands to complete
*/
- ret = 0;
- do {
- ret = bsg_io_schedule(bd);
- /*
- * look for -ENODATA specifically -- we'll sometimes get
- * -ERESTARTSYS when we've taken a signal, but we can't
- * return until we're done freeing the queue, so ignore
- * it. The signal will get handled when we're done freeing
- * the bsg_device.
- */
- } while (ret != -ENODATA);
+ io_wait_event(bd->wq_done, bsg_complete(bd));
/*
* discard done commands
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6f2751d305de..5da8e6e9ab4b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3590,6 +3590,11 @@ retry:
blkcg = bio_blkcg(bio);
cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
+ if (!cfqg) {
+ cfqq = &cfqd->oom_cfqq;
+ goto out;
+ }
+
cfqq = cic_to_cfqq(cic, is_sync);
/*
@@ -3626,7 +3631,7 @@ retry:
} else
cfqq = &cfqd->oom_cfqq;
}
-
+out:
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
@@ -3656,12 +3661,17 @@ static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct bio *bio, gfp_t gfp_mask)
{
- const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
- const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
+ int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+ int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
if (!is_sync) {
+ if (!ioprio_valid(cic->ioprio)) {
+ struct task_struct *tsk = current;
+ ioprio = task_nice_ioprio(tsk);
+ ioprio_class = task_nice_ioclass(tsk);
+ }
async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
cfqq = *async_cfqq;
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 6c7bf903742f..7d8befde2aca 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -198,7 +198,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start,
if (start + len > (i_size_read(bdev->bd_inode) >> 9))
return -EINVAL;
- return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL);
+ return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false);
}
static int put_ushort(unsigned long arg, unsigned short val)
diff --git a/block/partitions/check.c b/block/partitions/check.c
index 9ac1df74f699..16118d11dbfc 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -184,12 +184,12 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
if (err)
/* The partition is unrecognized. So report I/O errors if there were any */
res = err;
- if (!res)
- strlcat(state->pp_buf, " unknown partition table\n", PAGE_SIZE);
- else if (warn_no_part)
- strlcat(state->pp_buf, " unable to read partition table\n", PAGE_SIZE);
-
- printk(KERN_INFO "%s", state->pp_buf);
+ if (res) {
+ if (warn_no_part)
+ strlcat(state->pp_buf,
+ " unable to read partition table\n", PAGE_SIZE);
+ printk(KERN_INFO "%s", state->pp_buf);
+ }
free_page((unsigned long)state->pp_buf);
free_partitions(state);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 56d08fd75b1a..26cb624ace05 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -715,7 +715,7 @@ int efi_partition(struct parsed_partitions *state)
state->parts[i + 1].flags = ADDPART_FLAG_RAID;
info = &state->parts[i + 1].info;
- efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid);
+ efi_guid_to_str(&ptes[i].unique_partition_guid, info->uuid);
/* Naively convert UTF16-LE to 7 bits. */
label_max = min(ARRAY_SIZE(info->volname) - 1,
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 28163fad3c5d..e1f71c396193 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -332,7 +332,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
ret = 0;
if (hdr->iovec_count) {
- size_t iov_data_len;
+ struct iov_iter i;
struct iovec *iov = NULL;
ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
@@ -342,20 +342,11 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
goto out_free_cdb;
}
- iov_data_len = ret;
- ret = 0;
-
/* SG_IO howto says that the shorter of the two wins */
- if (hdr->dxfer_len < iov_data_len) {
- hdr->iovec_count = iov_shorten(iov,
- hdr->iovec_count,
- hdr->dxfer_len);
- iov_data_len = hdr->dxfer_len;
- }
+ iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count,
+ min_t(unsigned, ret, hdr->dxfer_len));
- ret = blk_rq_map_user_iov(q, rq, NULL, (struct sg_iovec *) iov,
- hdr->iovec_count,
- iov_data_len, GFP_KERNEL);
+ ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
kfree(iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 87bbc9c1e681..50f4da44a304 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -427,6 +427,15 @@ config CRYPTO_MD5
help
MD5 message digest algorithm (RFC1321).
+config CRYPTO_MD5_OCTEON
+ tristate "MD5 digest algorithm (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
+ select CRYPTO_MD5
+ select CRYPTO_HASH
+ help
+ MD5 message digest algorithm (RFC1321) implemented
+ using OCTEON crypto instructions, when available.
+
config CRYPTO_MD5_SPARC64
tristate "MD5 digest algorithm (SPARC64)"
depends on SPARC64
@@ -1505,6 +1514,15 @@ config CRYPTO_USER_API_SKCIPHER
This option enables the user-spaces interface for symmetric
key cipher algorithms.
+config CRYPTO_USER_API_RNG
+ tristate "User-space interface for random number generator algorithms"
+ depends on NET
+ select CRYPTO_RNG
+ select CRYPTO_USER_API
+ help
+ This option enables the user-spaces interface for random
+ number generator algorithms.
+
config CRYPTO_HASH_INFO
bool
diff --git a/crypto/Makefile b/crypto/Makefile
index 1445b9100c05..ba19465f9ad3 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
+obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
#
# generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 40886c489903..db201bca1581 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -69,6 +69,7 @@ static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
{
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+
return max(start, end_page);
}
@@ -86,7 +87,7 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
if (n == len_this_page)
break;
n -= len_this_page;
- scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
+ scatterwalk_start(&walk->out, sg_next(walk->out.sg));
}
return bsize;
@@ -284,6 +285,7 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
walk->iv = req->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) {
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+
if (err)
return err;
}
@@ -589,7 +591,8 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
if (IS_ERR(inst))
goto put_tmpl;
- if ((err = crypto_register_instance(tmpl, inst))) {
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
tmpl->free(inst);
goto put_tmpl;
}
diff --git a/crypto/aead.c b/crypto/aead.c
index 547491e35c63..222271070b49 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -448,7 +448,8 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
if (IS_ERR(inst))
goto put_tmpl;
- if ((err = crypto_register_instance(tmpl, inst))) {
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
tmpl->free(inst);
goto put_tmpl;
}
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 9b3c54c1cbe8..3dd101144a58 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1475,3 +1475,4 @@ module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-generic");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 4665b79c729a..7f8b7edcadca 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -188,7 +188,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey,
err = type->setkey(ask->private, key, keylen);
out:
- sock_kfree_s(sk, key, keylen);
+ sock_kzfree_s(sk, key, keylen);
return err;
}
@@ -215,6 +215,13 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
goto unlock;
err = alg_setkey(sk, optval, optlen);
+ break;
+ case ALG_SET_AEAD_AUTHSIZE:
+ if (sock->state == SS_CONNECTED)
+ goto unlock;
+ if (!type->setauthsize)
+ goto unlock;
+ err = type->setauthsize(ask->private, optlen);
}
unlock:
@@ -338,49 +345,31 @@ static const struct net_proto_family alg_family = {
.owner = THIS_MODULE,
};
-int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
- int write)
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
{
- unsigned long from = (unsigned long)addr;
- unsigned long npages;
- unsigned off;
- int err;
- int i;
+ size_t off;
+ ssize_t n;
+ int npages, i;
- err = -EFAULT;
- if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
- goto out;
+ n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
+ if (n < 0)
+ return n;
- off = from & ~PAGE_MASK;
- npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (npages > ALG_MAX_PAGES)
- npages = ALG_MAX_PAGES;
-
- err = get_user_pages_fast(from, npages, write, sgl->pages);
- if (err < 0)
- goto out;
-
- npages = err;
- err = -EINVAL;
+ npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (WARN_ON(npages == 0))
- goto out;
-
- err = 0;
+ return -EINVAL;
sg_init_table(sgl->sg, npages);
- for (i = 0; i < npages; i++) {
+ for (i = 0, len = n; i < npages; i++) {
int plen = min_t(int, len, PAGE_SIZE - off);
sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
off = 0;
len -= plen;
- err += plen;
}
-
-out:
- return err;
+ return n;
}
EXPORT_SYMBOL_GPL(af_alg_make_sg);
@@ -405,7 +394,7 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
if (cmsg->cmsg_level != SOL_ALG)
continue;
- switch(cmsg->cmsg_type) {
+ switch (cmsg->cmsg_type) {
case ALG_SET_IV:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
return -EINVAL;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f6a36a52d738..8acb886032ae 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -55,6 +55,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
if (offset & alignmask) {
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
+
if (nbytes > unaligned)
nbytes = unaligned;
}
@@ -120,7 +121,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (!walk->total)
return 0;
- walk->sg = scatterwalk_sg_next(walk->sg);
+ walk->sg = sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 71a8143e23b1..83b04e0884b1 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -473,6 +473,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
list = &tmpl->instances;
hlist_for_each_entry(inst, list, list) {
int err = crypto_remove_alg(&inst->alg, &users);
+
BUG_ON(err);
}
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 01f56eb7816e..01da360bdb55 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -41,8 +41,6 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- unsigned long iovlen;
- const struct iovec *iov;
long copied = 0;
int err;
@@ -58,37 +56,28 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
ctx->more = 0;
- for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
- iovlen--, iov++) {
- unsigned long seglen = iov->iov_len;
- char __user *from = iov->iov_base;
+ while (iov_iter_count(&msg->msg_iter)) {
+ int len = iov_iter_count(&msg->msg_iter);
- while (seglen) {
- int len = min_t(unsigned long, seglen, limit);
- int newlen;
+ if (len > limit)
+ len = limit;
- newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
- if (newlen < 0) {
- err = copied ? 0 : newlen;
- goto unlock;
- }
-
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
- newlen);
-
- err = af_alg_wait_for_completion(
- crypto_ahash_update(&ctx->req),
- &ctx->completion);
+ len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
+ if (len < 0) {
+ err = copied ? 0 : len;
+ goto unlock;
+ }
- af_alg_free_sg(&ctx->sgl);
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
- if (err)
- goto unlock;
+ err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
+ &ctx->completion);
+ af_alg_free_sg(&ctx->sgl);
+ if (err)
+ goto unlock;
- seglen -= newlen;
- from += newlen;
- copied += newlen;
- }
+ copied += len;
+ iov_iter_advance(&msg->msg_iter, len);
}
err = 0;
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
new file mode 100644
index 000000000000..67f612cfed97
--- /dev/null
+++ b/crypto/algif_rng.c
@@ -0,0 +1,192 @@
+/*
+ * algif_rng: User-space interface for random number generators
+ *
+ * This file provides the user-space API for random number generators.
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL2
+ * are required INSTEAD OF the above restrictions. (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <crypto/rng.h>
+#include <linux/random.h>
+#include <crypto/if_alg.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("User-space interface for random number generators");
+
+struct rng_ctx {
+#define MAXSIZE 128
+ unsigned int len;
+ struct crypto_rng *drng;
+};
+
+static int rng_recvmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct rng_ctx *ctx = ask->private;
+ int err = -EFAULT;
+ int genlen = 0;
+ u8 result[MAXSIZE];
+
+ if (len == 0)
+ return 0;
+ if (len > MAXSIZE)
+ len = MAXSIZE;
+
+ /*
+ * although not strictly needed, this is a precaution against coding
+ * errors
+ */
+ memset(result, 0, len);
+
+ /*
+ * The enforcement of a proper seeding of an RNG is done within an
+ * RNG implementation. Some RNGs (DRBG, krng) do not need specific
+ * seeding as they automatically seed. The X9.31 DRNG will return
+ * an error if it was not seeded properly.
+ */
+ genlen = crypto_rng_get_bytes(ctx->drng, result, len);
+ if (genlen < 0)
+ return genlen;
+
+ err = memcpy_to_msg(msg, result, len);
+ memzero_explicit(result, genlen);
+
+ return err ? err : len;
+}
+
+static struct proto_ops algif_rng_ops = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+ .poll = sock_no_poll,
+ .sendmsg = sock_no_sendmsg,
+ .sendpage = sock_no_sendpage,
+
+ .release = af_alg_release,
+ .recvmsg = rng_recvmsg,
+};
+
+static void *rng_bind(const char *name, u32 type, u32 mask)
+{
+ return crypto_alloc_rng(name, type, mask);
+}
+
+static void rng_release(void *private)
+{
+ crypto_free_rng(private);
+}
+
+static void rng_sock_destruct(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct rng_ctx *ctx = ask->private;
+
+ sock_kfree_s(sk, ctx, ctx->len);
+ af_alg_release_parent(sk);
+}
+
+static int rng_accept_parent(void *private, struct sock *sk)
+{
+ struct rng_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+ unsigned int len = sizeof(*ctx);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->len = len;
+
+ /*
+ * No seeding done at that point -- if multiple accepts are
+ * done on one RNG instance, each resulting FD points to the same
+ * state of the RNG.
+ */
+
+ ctx->drng = private;
+ ask->private = ctx;
+ sk->sk_destruct = rng_sock_destruct;
+
+ return 0;
+}
+
+static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen)
+{
+ /*
+ * Check whether seedlen is of sufficient size is done in RNG
+ * implementations.
+ */
+ return crypto_rng_reset(private, (u8 *)seed, seedlen);
+}
+
+static const struct af_alg_type algif_type_rng = {
+ .bind = rng_bind,
+ .release = rng_release,
+ .accept = rng_accept_parent,
+ .setkey = rng_setkey,
+ .ops = &algif_rng_ops,
+ .name = "rng",
+ .owner = THIS_MODULE
+};
+
+static int __init rng_init(void)
+{
+ return af_alg_register_type(&algif_type_rng);
+}
+
+static void __exit rng_exit(void)
+{
+ int err = af_alg_unregister_type(&algif_type_rng);
+ BUG_ON(err);
+}
+
+module_init(rng_init);
+module_exit(rng_exit);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index c12207c8dde9..0c8a1e5ccadf 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -330,6 +330,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
sg = sgl->sg;
+ sg_unmark_end(sg + sgl->cur);
do {
i = sgl->cur;
plen = min_t(int, len, PAGE_SIZE);
@@ -355,6 +356,9 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
sgl->cur++;
} while (len && sgl->cur < MAX_SGL_ENTS);
+ if (!size)
+ sg_mark_end(sg + sgl->cur - 1);
+
ctx->merge = plen & (PAGE_SIZE - 1);
}
@@ -401,6 +405,10 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
ctx->merge = 0;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+ if (sgl->cur)
+ sg_unmark_end(sgl->sg + sgl->cur - 1);
+
+ sg_mark_end(sgl->sg + sgl->cur);
get_page(page);
sg_set_page(sgl->sg + sgl->cur, page, size, offset);
sgl->cur++;
@@ -426,67 +434,58 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
- unsigned long iovlen;
- const struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
- for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
- iovlen--, iov++) {
- unsigned long seglen = iov->iov_len;
- char __user *from = iov->iov_base;
-
- while (seglen) {
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- if (!ctx->used) {
- err = skcipher_wait_for_data(sk, flags);
- if (err)
- goto unlock;
- }
+ while (iov_iter_count(&msg->msg_iter)) {
+ sgl = list_first_entry(&ctx->tsgl,
+ struct skcipher_sg_list, list);
+ sg = sgl->sg;
- used = min_t(unsigned long, ctx->used, seglen);
+ while (!sg->length)
+ sg++;
- used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
- err = used;
- if (err < 0)
+ if (!ctx->used) {
+ err = skcipher_wait_for_data(sk, flags);
+ if (err)
goto unlock;
+ }
+
+ used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter));
- if (ctx->more || used < ctx->used)
- used -= used % bs;
+ used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
+ err = used;
+ if (err < 0)
+ goto unlock;
+
+ if (ctx->more || used < ctx->used)
+ used -= used % bs;
- err = -EINVAL;
- if (!used)
- goto free;
+ err = -EINVAL;
+ if (!used)
+ goto free;
- ablkcipher_request_set_crypt(&ctx->req, sg,
- ctx->rsgl.sg, used,
- ctx->iv);
+ ablkcipher_request_set_crypt(&ctx->req, sg,
+ ctx->rsgl.sg, used,
+ ctx->iv);
- err = af_alg_wait_for_completion(
+ err = af_alg_wait_for_completion(
ctx->enc ?
crypto_ablkcipher_encrypt(&ctx->req) :
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
free:
- af_alg_free_sg(&ctx->rsgl);
+ af_alg_free_sg(&ctx->rsgl);
- if (err)
- goto unlock;
+ if (err)
+ goto unlock;
- copied += used;
- from += used;
- seglen -= used;
- skcipher_pull_sgl(sk, used);
- }
+ copied += used;
+ skcipher_pull_sgl(sk, used);
+ iov_iter_advance(&msg->msg_iter, used);
}
err = 0;
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index b4485a108389..6f5bebc9bf01 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -477,3 +477,4 @@ MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
module_init(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 7bd71f02d0dd..87b392a77a93 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -139,3 +139,4 @@ module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
MODULE_ALIAS_CRYPTO("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish-generic");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 1b74c5a3e891..a02286bf319e 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1099,3 +1099,4 @@ module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-generic");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 84c86db67ec7..df5c72629383 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -550,3 +550,4 @@ module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
MODULE_ALIAS_CRYPTO("cast5");
+MODULE_ALIAS_CRYPTO("cast5-generic");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index f408f0bd8de2..058c8d755d03 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -292,3 +292,4 @@ module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
MODULE_ALIAS_CRYPTO("cast6");
+MODULE_ALIAS_CRYPTO("cast6-generic");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 2a062025749d..06f1b60f02b2 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -171,4 +171,5 @@ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c-generic");
MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 08bb4f504520..c1229614c7e3 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -125,3 +125,4 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
MODULE_DESCRIPTION("T10 DIF CRC calculation.");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif-generic");
diff --git a/crypto/cts.c b/crypto/cts.c
index bd9405820e8a..e467ec0acf9f 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -290,6 +290,9 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
if (!is_power_of_2(alg->cra_blocksize))
goto out_put_alg;
+ if (strncmp(alg->cra_name, "cbc(", 4))
+ goto out_put_alg;
+
inst = crypto_alloc_instance("cts", alg);
if (IS_ERR(inst))
goto out_put_alg;
@@ -307,8 +310,6 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
- inst->alg.cra_blkcipher.geniv = "seqiv";
-
inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx);
inst->alg.cra_init = crypto_cts_init_tfm;
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 42912948776b..a71720544d11 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -983,8 +983,6 @@ static struct crypto_alg des_algs[2] = { {
.cia_decrypt = des3_ede_decrypt } }
} };
-MODULE_ALIAS_CRYPTO("des3_ede");
-
static int __init des_generic_mod_init(void)
{
return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
@@ -1001,4 +999,7 @@ module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>");
-MODULE_ALIAS("des");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des-generic");
+MODULE_ALIAS_CRYPTO("des3_ede");
+MODULE_ALIAS_CRYPTO("des3_ede-generic");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index d748a1d0ca24..d8ff16e5c322 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,7 +98,6 @@
*/
#include <crypto/drbg.h>
-#include <linux/string.h>
/***************************************************************
* Backend cipher definitions available to DRBG
@@ -223,15 +222,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
* function. Thus, the function implicitly knows the size of the
* buffer.
*
- * The FIPS test can be called in an endless loop until it returns
- * true. Although the code looks like a potential for a deadlock, it
- * is not the case, because returning a false cannot mathematically
- * occur (except once when a reseed took place and the updated state
- * would is now set up such that the generation of new value returns
- * an identical one -- this is most unlikely and would happen only once).
- * Thus, if this function repeatedly returns false and thus would cause
- * a deadlock, the integrity of the entire kernel is lost.
- *
* @drbg DRBG handle
* @buf output buffer of random data to be checked
*
@@ -258,6 +248,8 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg,
return false;
}
ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
+ if (!ret)
+ panic("DRBG continuous self test failed\n");
memcpy(drbg->prev, buf, drbg_blocklen(drbg));
/* the test shall pass when the two compared values are not equal */
return ret != 0;
@@ -498,9 +490,9 @@ static int drbg_ctr_df(struct drbg_state *drbg,
ret = 0;
out:
- memzero_explicit(iv, drbg_blocklen(drbg));
- memzero_explicit(temp, drbg_statelen(drbg));
- memzero_explicit(pad, drbg_blocklen(drbg));
+ memset(iv, 0, drbg_blocklen(drbg));
+ memset(temp, 0, drbg_statelen(drbg));
+ memset(pad, 0, drbg_blocklen(drbg));
return ret;
}
@@ -574,9 +566,9 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
ret = 0;
out:
- memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg));
+ memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
if (2 != reseed)
- memzero_explicit(df_data, drbg_statelen(drbg));
+ memset(df_data, 0, drbg_statelen(drbg));
return ret;
}
@@ -634,7 +626,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
len = ret;
out:
- memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
+ memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return len;
}
@@ -872,7 +864,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
}
out:
- memzero_explicit(tmp, drbg_blocklen(drbg));
+ memset(tmp, 0, drbg_blocklen(drbg));
return ret;
}
@@ -916,7 +908,7 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
out:
- memzero_explicit(drbg->scratchpad, drbg_statelen(drbg));
+ memset(drbg->scratchpad, 0, drbg_statelen(drbg));
return ret;
}
@@ -951,7 +943,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
drbg->scratchpad, drbg_blocklen(drbg));
out:
- memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
+ memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return ret;
}
@@ -998,7 +990,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
}
out:
- memzero_explicit(drbg->scratchpad,
+ memset(drbg->scratchpad, 0,
(drbg_statelen(drbg) + drbg_blocklen(drbg)));
return len;
}
@@ -1047,7 +1039,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
out:
- memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
+ memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return len;
}
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 4e97fae9666f..bac70995e064 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -173,3 +173,4 @@ module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
MODULE_ALIAS_CRYPTO("ghash");
+MODULE_ALIAS_CRYPTO("ghash-generic");
diff --git a/crypto/krng.c b/crypto/krng.c
index 67c88b331210..0224841b6579 100644
--- a/crypto/krng.c
+++ b/crypto/krng.c
@@ -63,3 +63,4 @@ module_exit(krng_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Kernel Random Number Generator");
MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_ALIAS_CRYPTO("krng");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 3d0f9df30ac9..f550b5d94630 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -249,3 +249,4 @@ module_exit(salsa20_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
MODULE_ALIAS_CRYPTO("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20-generic");
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 79ca2278c2a3..3bd749c7bb70 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
walk->offset += PAGE_SIZE - 1;
walk->offset &= PAGE_MASK;
if (walk->offset >= walk->sg->offset + walk->sg->length)
- scatterwalk_start(walk, scatterwalk_sg_next(walk->sg));
+ scatterwalk_start(walk, sg_next(walk->sg));
}
}
@@ -116,7 +116,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
break;
offset += sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
scatterwalk_advance(&walk, start - offset);
@@ -136,7 +136,7 @@ int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
do {
offset += sg->length;
n++;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
/* num_bytes is too large */
if (unlikely(!sg && (num_bytes < offset)))
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 9daa854cc485..b7bb9a2f4a31 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -267,6 +267,12 @@ static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
if (IS_ERR(inst))
goto out;
+ if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) {
+ skcipher_geniv_free(inst);
+ inst = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
inst->alg.cra_init = seqiv_init;
@@ -287,6 +293,12 @@ static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
if (IS_ERR(inst))
goto out;
+ if (inst->alg.cra_aead.ivsize < sizeof(u64)) {
+ aead_geniv_free(inst);
+ inst = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
inst->alg.cra_init = seqiv_aead_init;
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index a53b5e2af335..94970a794975 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -667,3 +667,4 @@ MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Ci
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
MODULE_ALIAS_CRYPTO("tnepres");
MODULE_ALIAS_CRYPTO("serpent");
+MODULE_ALIAS_CRYPTO("serpent-generic");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 039e58cfa155..a3e50c37eb6f 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -154,3 +154,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sha1");
+MODULE_ALIAS_CRYPTO("sha1-generic");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 5eb21b120033..b001ff5c2efc 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -385,4 +385,6 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-generic");
MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-generic");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 8d0b19ed4f4b..1c3c3767e079 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -289,4 +289,6 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha384-generic");
MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha512-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1d864e988ea9..4b9e23fa4204 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -250,19 +250,19 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
int k, rem;
- np = (np > XBUFSIZE) ? XBUFSIZE : np;
- rem = buflen % PAGE_SIZE;
if (np > XBUFSIZE) {
rem = PAGE_SIZE;
np = XBUFSIZE;
+ } else {
+ rem = buflen % PAGE_SIZE;
}
+
sg_init_table(sg, np);
- for (k = 0; k < np; ++k) {
- if (k == (np-1))
- sg_set_buf(&sg[k], xbuf[k], rem);
- else
- sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
- }
+ np--;
+ for (k = 0; k < np; k++)
+ sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
+
+ sg_set_buf(&sg[k], xbuf[k], rem);
}
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
@@ -280,16 +280,20 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct scatterlist *sgout;
const char *e;
void *assoc;
- char iv[MAX_IVLEN];
+ char *iv;
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
unsigned int *b_size;
unsigned int iv_len;
+ iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
+ if (!iv)
+ return;
+
if (aad_size >= PAGE_SIZE) {
pr_err("associate data length (%u) too big\n", aad_size);
- return;
+ goto out_noxbuf;
}
if (enc == ENCRYPT)
@@ -355,7 +359,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
- memset(&iv, 0xff, iv_len);
+ memset(iv, 0xff, iv_len);
crypto_aead_clear_flags(tfm, ~0);
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
@@ -408,6 +412,7 @@ out_nooutbuf:
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
+ kfree(iv);
return;
}
@@ -764,10 +769,9 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
- ret = wait_for_completion_interruptible(&tr->completion);
- if (!ret)
- ret = tr->err;
+ wait_for_completion(&tr->completion);
reinit_completion(&tr->completion);
+ ret = tr->err;
}
return ret;
}
@@ -993,10 +997,9 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
- ret = wait_for_completion_interruptible(&tr->completion);
- if (!ret)
- ret = tr->err;
+ wait_for_completion(&tr->completion);
reinit_completion(&tr->completion);
+ ret = tr->err;
}
return ret;
diff --git a/crypto/tea.c b/crypto/tea.c
index 495be2d0077d..b70b441c7d1e 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -270,6 +270,7 @@ static void __exit tea_mod_fini(void)
crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
+MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 037368d34586..f4ed6d4205e7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -181,10 +181,9 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
static int wait_async_op(struct tcrypt_result *tr, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
- ret = wait_for_completion_interruptible(&tr->completion);
- if (!ret)
- ret = tr->err;
+ wait_for_completion(&tr->completion);
reinit_completion(&tr->completion);
+ ret = tr->err;
}
return ret;
}
@@ -353,12 +352,11 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &tresult.completion);
- if (!ret && !(ret = tresult.err)) {
- reinit_completion(&tresult.completion);
+ wait_for_completion(&tresult.completion);
+ reinit_completion(&tresult.completion);
+ ret = tresult.err;
+ if (!ret)
break;
- }
/* fall through */
default:
printk(KERN_ERR "alg: hash: digest failed "
@@ -431,7 +429,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
struct scatterlist *sgout;
const char *e, *d;
struct tcrypt_result result;
- unsigned int authsize;
+ unsigned int authsize, iv_len;
void *input;
void *output;
void *assoc;
@@ -502,10 +500,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(input, template[i].input, template[i].ilen);
memcpy(assoc, template[i].assoc, template[i].alen);
+ iv_len = crypto_aead_ivsize(tfm);
if (template[i].iv)
- memcpy(iv, template[i].iv, MAX_IVLEN);
+ memcpy(iv, template[i].iv, iv_len);
else
- memset(iv, 0, MAX_IVLEN);
+ memset(iv, 0, iv_len);
crypto_aead_clear_flags(tfm, ~0);
if (template[i].wk)
@@ -569,12 +568,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !(ret = result.err)) {
- reinit_completion(&result.completion);
+ wait_for_completion(&result.completion);
+ reinit_completion(&result.completion);
+ ret = result.err;
+ if (!ret)
break;
- }
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -720,12 +718,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !(ret = result.err)) {
- reinit_completion(&result.completion);
+ wait_for_completion(&result.completion);
+ reinit_completion(&result.completion);
+ ret = result.err;
+ if (!ret)
break;
- }
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -1002,12 +999,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !((ret = result.err))) {
- reinit_completion(&result.completion);
+ wait_for_completion(&result.completion);
+ reinit_completion(&result.completion);
+ ret = result.err;
+ if (!ret)
break;
- }
/* fall through */
default:
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
@@ -1097,12 +1093,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !((ret = result.err))) {
- reinit_completion(&result.completion);
+ wait_for_completion(&result.completion);
+ reinit_completion(&result.completion);
+ ret = result.err;
+ if (!ret)
break;
- }
/* fall through */
default:
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
@@ -3299,6 +3294,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "rfc4106(gcm(aes))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = {
.enc = {
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 6e5651c66cf8..321bc6ff2a9d 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -676,6 +676,7 @@ static void __exit tgr192_mod_fini(void)
crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
}
+MODULE_ALIAS_CRYPTO("tgr192");
MODULE_ALIAS_CRYPTO("tgr160");
MODULE_ALIAS_CRYPTO("tgr128");
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 523ad8c4e359..ebf7a3efb572 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-generic");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 0de42eb3d040..7ee5a043a988 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1167,6 +1167,7 @@ static void __exit wp512_mod_fini(void)
crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
}
+MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 694d5a70d6ce..c0cc96bab9e7 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -36,8 +36,6 @@ source "drivers/message/fusion/Kconfig"
source "drivers/firewire/Kconfig"
-source "drivers/message/i2o/Kconfig"
-
source "drivers/macintosh/Kconfig"
source "drivers/net/Kconfig"
@@ -134,8 +132,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
-source "drivers/soc/Kconfig"
-
source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 8951cefb0a96..e6c3ddd92665 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -315,6 +315,12 @@ config ACPI_HOTPLUG_MEMORY
To compile this driver as a module, choose M here:
the module will be called acpi_memhotplug.
+config ACPI_HOTPLUG_IOAPIC
+ bool
+ depends on PCI
+ depends on X86_IO_APIC
+ default y
+
config ACPI_SBS
tristate "Smart Battery System"
depends on X86
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index f74317cc1ca9..b18cd2151ddb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -40,7 +40,7 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o
-acpi-y += acpi_lpss.o
+acpi-y += acpi_lpss.o acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
acpi-y += int340x_thermal.o
@@ -70,6 +70,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-y += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-y += acpi_memhotplug.o
+obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
new file mode 100644
index 000000000000..3984ea96e5f7
--- /dev/null
+++ b/drivers/acpi/acpi_apd.c
@@ -0,0 +1,150 @@
+/*
+ * AMD ACPI support for ACPI2platform device.
+ *
+ * Copyright (c) 2014,2015 AMD Corporation.
+ * Authors: Ken Xue <Ken.Xue@amd.com>
+ * Wu, Jeff <Jeff.Wu@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/clkdev.h>
+#include <linux/acpi.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/pm.h>
+
+#include "internal.h"
+
+ACPI_MODULE_NAME("acpi_apd");
+struct apd_private_data;
+
+/**
+ * ACPI_APD_SYSFS : add device attributes in sysfs
+ * ACPI_APD_PM : attach power domain to device
+ */
+#define ACPI_APD_SYSFS BIT(0)
+#define ACPI_APD_PM BIT(1)
+
+/**
+ * struct apd_device_desc - a descriptor for apd device
+ * @flags: device flags like %ACPI_APD_SYSFS, %ACPI_APD_PM
+ * @fixed_clk_rate: fixed rate input clock source for acpi device;
+ * 0 means no fixed rate input clock source
+ * @setup: a hook routine to set device resource during create platform device
+ *
+ * Device description defined as acpi_device_id.driver_data
+ */
+struct apd_device_desc {
+ unsigned int flags;
+ unsigned int fixed_clk_rate;
+ int (*setup)(struct apd_private_data *pdata);
+};
+
+struct apd_private_data {
+ struct clk *clk;
+ struct acpi_device *adev;
+ const struct apd_device_desc *dev_desc;
+};
+
+#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
+#define APD_ADDR(desc) ((unsigned long)&desc)
+
+static int acpi_apd_setup(struct apd_private_data *pdata)
+{
+ const struct apd_device_desc *dev_desc = pdata->dev_desc;
+ struct clk *clk = ERR_PTR(-ENODEV);
+
+ if (dev_desc->fixed_clk_rate) {
+ clk = clk_register_fixed_rate(&pdata->adev->dev,
+ dev_name(&pdata->adev->dev),
+ NULL, CLK_IS_ROOT,
+ dev_desc->fixed_clk_rate);
+ clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev));
+ pdata->clk = clk;
+ }
+
+ return 0;
+}
+
+static struct apd_device_desc cz_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 133000000,
+};
+
+static struct apd_device_desc cz_uart_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 48000000,
+};
+
+#else
+
+#define APD_ADDR(desc) (0UL)
+
+#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */
+
+/**
+* Create platform device during acpi scan attach handle.
+* Return value > 0 on success of creating device.
+*/
+static int acpi_apd_create_device(struct acpi_device *adev,
+ const struct acpi_device_id *id)
+{
+ const struct apd_device_desc *dev_desc = (void *)id->driver_data;
+ struct apd_private_data *pdata;
+ struct platform_device *pdev;
+ int ret;
+
+ if (!dev_desc) {
+ pdev = acpi_create_platform_device(adev);
+ return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
+ }
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->adev = adev;
+ pdata->dev_desc = dev_desc;
+
+ if (dev_desc->setup) {
+ ret = dev_desc->setup(pdata);
+ if (ret)
+ goto err_out;
+ }
+
+ adev->driver_data = pdata;
+ pdev = acpi_create_platform_device(adev);
+ if (!IS_ERR_OR_NULL(pdev))
+ return 1;
+
+ ret = PTR_ERR(pdev);
+ adev->driver_data = NULL;
+
+ err_out:
+ kfree(pdata);
+ return ret;
+}
+
+static const struct acpi_device_id acpi_apd_device_ids[] = {
+ /* Generic apd devices */
+ { "AMD0010", APD_ADDR(cz_i2c_desc) },
+ { "AMD0020", APD_ADDR(cz_uart_desc) },
+ { "AMD0030", },
+ { }
+};
+
+static struct acpi_scan_handler apd_handler = {
+ .ids = acpi_apd_device_ids,
+ .attach = acpi_apd_create_device,
+};
+
+void __init acpi_apd_init(void)
+{
+ acpi_scan_add_handler(&apd_handler);
+}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 4f3febf8a589..02e835f3cf8a 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1,7 +1,7 @@
/*
* ACPI support for Intel Lynxpoint LPSS.
*
- * Copyright (C) 2013, 2014, Intel Corporation
+ * Copyright (C) 2013, Intel Corporation
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_CLK_DIVIDER BIT(2)
#define LPSS_LTR BIT(3)
#define LPSS_SAVE_CTX BIT(4)
-#define LPSS_DEV_PROXY BIT(5)
-#define LPSS_PROXY_REQ BIT(6)
struct lpss_private_data;
@@ -72,10 +70,8 @@ struct lpss_device_desc {
void (*setup)(struct lpss_private_data *pdata);
};
-static struct device *proxy_device;
-
static struct lpss_device_desc lpss_dma_desc = {
- .flags = LPSS_CLK | LPSS_PROXY_REQ,
+ .flags = LPSS_CLK,
};
struct lpss_private_data {
@@ -129,7 +125,7 @@ static struct lpss_device_desc lpt_dev_desc = {
};
static struct lpss_device_desc lpt_i2c_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
+ .flags = LPSS_CLK | LPSS_LTR,
.prv_offset = 0x800,
};
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
};
static struct lpss_device_desc byt_uart_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
- LPSS_DEV_PROXY,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = lpss_uart_setup,
};
static struct lpss_device_desc byt_spi_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
- LPSS_DEV_PROXY,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x400,
};
static struct lpss_device_desc byt_sdio_dev_desc = {
- .flags = LPSS_CLK | LPSS_DEV_PROXY,
+ .flags = LPSS_CLK,
};
static struct lpss_device_desc byt_i2c_dev_desc = {
- .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY,
+ .flags = LPSS_CLK | LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = byt_i2c_setup,
};
@@ -313,7 +307,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
{
struct lpss_device_desc *dev_desc;
struct lpss_private_data *pdata;
- struct resource_list_entry *rentry;
+ struct resource_entry *rentry;
struct list_head resource_list;
struct platform_device *pdev;
int ret;
@@ -333,13 +327,15 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
goto err_out;
list_for_each_entry(rentry, &resource_list, node)
- if (resource_type(&rentry->res) == IORESOURCE_MEM) {
+ if (resource_type(rentry->res) == IORESOURCE_MEM) {
if (dev_desc->prv_size_override)
pdata->mmio_size = dev_desc->prv_size_override;
else
- pdata->mmio_size = resource_size(&rentry->res);
- pdata->mmio_base = ioremap(rentry->res.start,
+ pdata->mmio_size = resource_size(rentry->res);
+ pdata->mmio_base = ioremap(rentry->res->start,
pdata->mmio_size);
+ if (!pdata->mmio_base)
+ goto err_out;
break;
}
@@ -374,8 +370,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev);
if (!IS_ERR_OR_NULL(pdev)) {
- if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
- proxy_device = &pdev->dev;
return 1;
}
@@ -600,14 +594,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_save_ctx(dev, pdata);
- ret = acpi_dev_runtime_suspend(dev);
- if (ret)
- return ret;
-
- if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
- return pm_runtime_put_sync_suspend(proxy_device);
-
- return 0;
+ return acpi_dev_runtime_suspend(dev);
}
static int acpi_lpss_runtime_resume(struct device *dev)
@@ -615,12 +602,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
- if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
- ret = pm_runtime_get_sync(proxy_device);
- if (ret)
- return ret;
- }
-
ret = acpi_dev_runtime_resume(dev);
if (ret)
return ret;
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 23e2319ead41..ee28f4d15625 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -101,8 +101,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
/* Can we combine the resource range information? */
if ((info->caching == address64.info.mem.caching) &&
(info->write_protect == address64.info.mem.write_protect) &&
- (info->start_addr + info->length == address64.minimum)) {
- info->length += address64.address_length;
+ (info->start_addr + info->length == address64.address.minimum)) {
+ info->length += address64.address.address_length;
return AE_OK;
}
}
@@ -114,8 +114,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
INIT_LIST_HEAD(&new->list);
new->caching = address64.info.mem.caching;
new->write_protect = address64.info.mem.write_protect;
- new->start_addr = address64.minimum;
- new->length = address64.address_length;
+ new->start_addr = address64.address.minimum;
+ new->length = address64.address.address_length;
list_add_tail(&new->list, &mem_device->res_list);
return AE_OK;
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 6ba8beb6b9d2..1284138e42ab 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
struct platform_device *pdev = NULL;
struct acpi_device *acpi_parent;
struct platform_device_info pdevinfo;
- struct resource_list_entry *rentry;
+ struct resource_entry *rentry;
struct list_head resource_list;
struct resource *resources = NULL;
int count;
@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
}
count = 0;
list_for_each_entry(rentry, &resource_list, node)
- resources[count++] = rentry->res;
+ resources[count++] = *rentry->res;
acpi_dev_free_resource_list(&resource_list);
}
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 3d2c88289da9..d863016565b5 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2014 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2015 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH "-64"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 6f1c616910ac..853aa2dbdb61 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 1d026ff1683f..4169bb87a996 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index d3e2cc395d7f..408f04bcaab4 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 7a7811a9fc26..228704b78657 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -143,8 +143,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
acpi_status
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-
acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 7f60582d0c8c..a165d25343e8 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index c318d3e27893..196a55244559 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index b01f71ce0523..1886bde54b5d 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 680d23bbae7c..7add32e5d8c5 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 4bceb11c7380..cf607fe69dbd 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index ee1c040f321c..952fbe0b7231 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8abb393dafab..3e9720e1f34f 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index dda0e6affcf1..a5f17de45ac6 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6168b85463ed..74a390c6db16 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index bd3908d26c4f..a972d11c97c9 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 4b008e8884a1..efc4c7124ccc 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index cf7346110bd8..d14b547b7cd5 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 1afe46e44dac..1c127a43017b 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 486d342e74b6..c2f03e8774ad 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 5908ccec6aea..3a95068fc119 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 3a0beeb86ba5..ee0cdd60b93d 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 720b1cdda711..3e6989738e85 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 8daf9de82b73..39da9da62bbf 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c57666196672..43b40de90484 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index aee5e45f6d35..bbe74bcebbae 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 3c7f7378b94d..d72565a3c646 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index b67522df01ac..2e4c42b377ec 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index a1e7e6b6fcf7..8a7b07b6adc8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 6c0759c0db47..77244182ff02 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 9f74795e2268..e5ff89bcb3f5 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index f7f5107e754d..df54d46225cd 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 15623da26200..843942fb4be5 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 2ac28d297305..fcaa30c611fb 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9d6e2c1de1f8..43b3ea40c0b6 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 24f7d5ea678a..89ac2022465e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c7bffff9ed32..bf6873f95e72 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 3393a73ca0d6..b78dc7c6d5d7 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index aa70154cf4fa..5ed064e8673c 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -114,17 +114,6 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_enable_gpe);
- /*
- * We will only allow a GPE to be enabled if it has either an associated
- * method (_Lxx/_Exx) or a handler, or is using the implicit notify
- * feature. Otherwise, the GPE will be immediately disabled by
- * acpi_ev_gpe_dispatch the first time it fires.
- */
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_NONE) {
- return_ACPI_STATUS(AE_NO_HANDLER);
- }
-
/* Clear the GPE (of stale events) */
status = acpi_hw_clear_gpe(gpe_event_info);
@@ -339,7 +328,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
+ struct acpi_namespace_node *gpe_device;
struct acpi_gpe_register_info *gpe_register_info;
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 gpe_number;
+ struct acpi_gpe_handler_info *gpe_handler_info;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
u32 status_reg;
@@ -367,6 +360,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) {
+ gpe_device = gpe_block->node;
+
/*
* Read all of the 8-bit GPE status and enable registers in this GPE
* block, saving all of them. Find all currently active GP events.
@@ -442,16 +437,68 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
/* Examine one GPE bit */
+ gpe_event_info =
+ &gpe_block->
+ event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH) + j];
+ gpe_number =
+ j + gpe_register_info->base_gpe_number;
+
if (enabled_status_byte & (1 << j)) {
- /*
- * Found an active GPE. Dispatch the event to a handler
- * or method.
- */
- int_status |=
- acpi_ev_gpe_dispatch(gpe_block->
- node,
- &gpe_block->
- event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
+
+ /* Invoke global event handler if present */
+
+ acpi_gpe_count++;
+ if (acpi_gbl_global_event_handler) {
+ acpi_gbl_global_event_handler
+ (ACPI_EVENT_TYPE_GPE,
+ gpe_device, gpe_number,
+ acpi_gbl_global_event_handler_context);
+ }
+
+ /* Found an active GPE */
+
+ if (ACPI_GPE_DISPATCH_TYPE
+ (gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_RAW_HANDLER) {
+
+ /* Dispatch the event to a raw handler */
+
+ gpe_handler_info =
+ gpe_event_info->dispatch.
+ handler;
+
+ /*
+ * There is no protection around the namespace node
+ * and the GPE handler to ensure a safe destruction
+ * because:
+ * 1. The namespace node is expected to always
+ * exist after loading a table.
+ * 2. The GPE handler is expected to be flushed by
+ * acpi_os_wait_events_complete() before the
+ * destruction.
+ */
+ acpi_os_release_lock
+ (acpi_gbl_gpe_lock, flags);
+ int_status |=
+ gpe_handler_info->
+ address(gpe_device,
+ gpe_number,
+ gpe_handler_info->
+ context);
+ flags =
+ acpi_os_acquire_lock
+ (acpi_gbl_gpe_lock);
+ } else {
+ /*
+ * Dispatch the event to a standard handler or
+ * method.
+ */
+ int_status |=
+ acpi_ev_gpe_dispatch
+ (gpe_device, gpe_event_info,
+ gpe_number);
+ }
}
}
}
@@ -484,52 +531,15 @@ unlock_and_exit:
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = context;
- acpi_status status;
- struct acpi_gpe_event_info *local_gpe_event_info;
+ acpi_status status = AE_OK;
struct acpi_evaluate_info *info;
struct acpi_gpe_notify_info *notify;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
- /* Allocate a local GPE block */
-
- local_gpe_event_info =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
- if (!local_gpe_event_info) {
- ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
- return_VOID;
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- ACPI_FREE(local_gpe_event_info);
- return_VOID;
- }
-
- /* Must revalidate the gpe_number/gpe_block */
-
- if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
- status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- ACPI_FREE(local_gpe_event_info);
- return_VOID;
- }
-
- /*
- * Take a snapshot of the GPE info for this level - we copy the info to
- * prevent a race condition with remove_handler/remove_block.
- */
- ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
- sizeof(struct acpi_gpe_event_info));
-
- status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- ACPI_FREE(local_gpe_event_info);
- return_VOID;
- }
-
/* Do the correct dispatch - normal method or implicit notify */
- switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+ switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
case ACPI_GPE_DISPATCH_NOTIFY:
/*
* Implicit notify.
@@ -542,7 +552,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* June 2012: Expand implicit notify mechanism to support
* notifies on multiple device objects.
*/
- notify = local_gpe_event_info->dispatch.notify_list;
+ notify = gpe_event_info->dispatch.notify_list;
while (ACPI_SUCCESS(status) && notify) {
status =
acpi_ev_queue_notify_request(notify->device_node,
@@ -566,7 +576,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* _Lxx/_Exx control method that corresponds to this GPE
*/
info->prefix_node =
- local_gpe_event_info->dispatch.method_node;
+ gpe_event_info->dispatch.method_node;
info->flags = ACPI_IGNORE_RETURN_VALUE;
status = acpi_ns_evaluate(info);
@@ -576,25 +586,27 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"while evaluating GPE method [%4.4s]",
- acpi_ut_get_node_name
- (local_gpe_event_info->dispatch.
- method_node)));
+ acpi_ut_get_node_name(gpe_event_info->
+ dispatch.
+ method_node)));
}
break;
default:
- return_VOID; /* Should never happen */
+ goto error_exit; /* Should never happen */
}
/* Defer enabling of GPE until all notify handlers are done */
status = acpi_os_execute(OSL_NOTIFY_HANDLER,
- acpi_ev_asynch_enable_gpe,
- local_gpe_event_info);
- if (ACPI_FAILURE(status)) {
- ACPI_FREE(local_gpe_event_info);
+ acpi_ev_asynch_enable_gpe, gpe_event_info);
+ if (ACPI_SUCCESS(status)) {
+ return_VOID;
}
+
+error_exit:
+ acpi_ev_asynch_enable_gpe(gpe_event_info);
return_VOID;
}
@@ -622,7 +634,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
(void)acpi_ev_finish_gpe(gpe_event_info);
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- ACPI_FREE(gpe_event_info);
return;
}
@@ -692,15 +703,6 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
- /* Invoke global event handler if present */
-
- acpi_gpe_count++;
- if (acpi_gbl_global_event_handler) {
- acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
- gpe_number,
- acpi_gbl_global_event_handler_context);
- }
-
/*
* Always disable the GPE so that it does not keep firing before
* any asynchronous activity completes (either from the execution
@@ -741,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
* If there is neither a handler nor a method, leave the GPE
* disabled.
*/
- switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+ switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
case ACPI_GPE_DISPATCH_HANDLER:
/* Invoke the installed handler (at interrupt level) */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index d86699eea33c..e0f24c504513 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -474,10 +474,12 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
* Ignore GPEs that have no corresponding _Lxx/_Exx method
* and GPEs that are used to wake the system
*/
- if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
ACPI_GPE_DISPATCH_NONE)
- || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
- == ACPI_GPE_DISPATCH_HANDLER)
+ || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_HANDLER)
+ || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_RAW_HANDLER)
|| (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 7be928379879..8840296d5b20 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -401,15 +401,17 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
return_ACPI_STATUS(AE_OK);
}
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_HANDLER) {
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_HANDLER) ||
+ (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_RAW_HANDLER)) {
/* If there is already a handler, ignore this GPE method */
return_ACPI_STATUS(AE_OK);
}
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
ACPI_GPE_DISPATCH_METHOD) {
/*
* If there is already a method, ignore this method. But check
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 17e4bbfdb096..3a958f3612fe 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,53 +108,6 @@ unlock_and_exit:
/*******************************************************************************
*
- * FUNCTION: acpi_ev_valid_gpe_event
- *
- * PARAMETERS: gpe_event_info - Info for this GPE
- *
- * RETURN: TRUE if the gpe_event is valid
- *
- * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
- * Should be called only when the GPE lists are semaphore locked
- * and not subject to change.
- *
- ******************************************************************************/
-
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
-{
- struct acpi_gpe_xrupt_info *gpe_xrupt_block;
- struct acpi_gpe_block_info *gpe_block;
-
- ACPI_FUNCTION_ENTRY();
-
- /* No need for spin lock since we are not changing any list elements */
-
- /* Walk the GPE interrupt levels */
-
- gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_block) {
- gpe_block = gpe_xrupt_block->gpe_block_list_head;
-
- /* Walk the GPE blocks on this interrupt level */
-
- while (gpe_block) {
- if ((&gpe_block->event_info[0] <= gpe_event_info) &&
- (&gpe_block->event_info[gpe_block->gpe_count] >
- gpe_event_info)) {
- return (TRUE);
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_block = gpe_xrupt_block->next;
- }
-
- return (FALSE);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_get_gpe_device
*
* PARAMETERS: GPE_WALK_CALLBACK
@@ -371,8 +324,10 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
ACPI_GPE_REGISTER_WIDTH)
+ j];
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_HANDLER) {
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_HANDLER) ||
+ (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_RAW_HANDLER)) {
/* Delete an installed handler block */
@@ -380,10 +335,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
- } else
- if ((gpe_event_info->
- flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_NOTIFY) {
+ } else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
+ == ACPI_GPE_DISPATCH_NOTIFY) {
/* Delete the implicit notification device list */
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 78ac29351c9e..74e8595f5a2b 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 24ea3424981b..f7c9dfe7b990 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 8eb8575e8c16..9abace3401f9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 1b148a440d67..da323390bb70 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 29630e303829..0366703d2970 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 55a58f3ec8df..81f2d9e87fad 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,16 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evxface")
+#if (!ACPI_REDUCED_HARDWARE)
+/* Local prototypes */
+static acpi_status
+acpi_ev_install_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ u32 type,
+ u8 is_raw_handler,
+ acpi_gpe_handler address, void *context);
+
+#endif
/*******************************************************************************
@@ -76,6 +86,7 @@ ACPI_MODULE_NAME("evxface")
* handlers.
*
******************************************************************************/
+
acpi_status
acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
@@ -717,32 +728,37 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
/*******************************************************************************
*
- * FUNCTION: acpi_install_gpe_handler
+ * FUNCTION: acpi_ev_install_gpe_handler
*
* PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
* defined GPEs)
* gpe_number - The GPE number within the GPE block
* type - Whether this GPE should be treated as an
* edge- or level-triggered interrupt.
+ * is_raw_handler - Whether this GPE should be handled using
+ * the special GPE handler mode.
* address - Address of the handler
* context - Value passed to the handler on each GPE
*
* RETURN: Status
*
- * DESCRIPTION: Install a handler for a General Purpose Event.
+ * DESCRIPTION: Internal function to install a handler for a General Purpose
+ * Event.
*
******************************************************************************/
-acpi_status
-acpi_install_gpe_handler(acpi_handle gpe_device,
- u32 gpe_number,
- u32 type, acpi_gpe_handler address, void *context)
+static acpi_status
+acpi_ev_install_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ u32 type,
+ u8 is_raw_handler,
+ acpi_gpe_handler address, void *context)
{
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_gpe_handler_info *handler;
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
+ ACPI_FUNCTION_TRACE(ev_install_gpe_handler);
/* Parameter validation */
@@ -775,8 +791,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Make sure that there isn't a handler there already */
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_HANDLER) {
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_HANDLER) ||
+ (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+ ACPI_GPE_DISPATCH_RAW_HANDLER)) {
status = AE_ALREADY_EXISTS;
goto free_and_exit;
}
@@ -793,9 +811,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
* automatically during initialization, in which case it has to be
* disabled now to avoid spurious execution of the handler.
*/
- if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) ||
- (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) &&
- gpe_event_info->runtime_count) {
+ if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+ ACPI_GPE_DISPATCH_METHOD) ||
+ (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+ ACPI_GPE_DISPATCH_NOTIFY)) && gpe_event_info->runtime_count) {
handler->originally_enabled = TRUE;
(void)acpi_ev_remove_gpe_reference(gpe_event_info);
@@ -816,7 +835,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
gpe_event_info->flags &=
~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
- gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_HANDLER);
+ gpe_event_info->flags |=
+ (u8)(type |
+ (is_raw_handler ? ACPI_GPE_DISPATCH_RAW_HANDLER :
+ ACPI_GPE_DISPATCH_HANDLER));
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
@@ -830,10 +852,78 @@ free_and_exit:
goto unlock_and_exit;
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_gpe_handler
+ *
+ * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
+ * defined GPEs)
+ * gpe_number - The GPE number within the GPE block
+ * type - Whether this GPE should be treated as an
+ * edge- or level-triggered interrupt.
+ * address - Address of the handler
+ * context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for a General Purpose Event.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_install_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ u32 type, acpi_gpe_handler address, void *context)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
+
+ status =
+ acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, FALSE,
+ address, context);
+
+ return_ACPI_STATUS(status);
+}
+
ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
/*******************************************************************************
*
+ * FUNCTION: acpi_install_gpe_raw_handler
+ *
+ * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
+ * defined GPEs)
+ * gpe_number - The GPE number within the GPE block
+ * type - Whether this GPE should be treated as an
+ * edge- or level-triggered interrupt.
+ * address - Address of the handler
+ * context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for a General Purpose Event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_raw_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ u32 type, acpi_gpe_handler address, void *context)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_raw_handler);
+
+ status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, TRUE,
+ address, context);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_raw_handler)
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_remove_gpe_handler
*
* PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
@@ -880,8 +970,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
/* Make sure that a handler is indeed installed */
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
- ACPI_GPE_DISPATCH_HANDLER) {
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+ ACPI_GPE_DISPATCH_HANDLER) &&
+ (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+ ACPI_GPE_DISPATCH_RAW_HANDLER)) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
@@ -896,6 +988,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
/* Remove the handler */
handler = gpe_event_info->dispatch.handler;
+ gpe_event_info->dispatch.handler = NULL;
/* Restore Method node (if any), set dispatch flags */
@@ -909,9 +1002,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
* enabled, it should be enabled at this point to restore the
* post-initialization configuration.
*/
- if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) ||
- (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) &&
- handler->originally_enabled) {
+ if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+ ACPI_GPE_DISPATCH_METHOD) ||
+ (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+ ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
(void)acpi_ev_add_gpe_reference(gpe_event_info);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index bb8cbf5961bf..df06a23c4197 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index e889a5304abd..70eb47e3d724 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -132,7 +132,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
*/
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (gpe_event_info) {
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
ACPI_GPE_DISPATCH_NONE) {
status = acpi_ev_add_gpe_reference(gpe_event_info);
} else {
@@ -183,6 +183,77 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
+ * the reference count mechanism used in the acpi_enable_gpe(),
+ * acpi_disable_gpe() interfaces.
+ * This API is typically used by the GPE raw handler mode driver
+ * to switch between the polling mode and the interrupt mode after
+ * the driver has enabled the GPE.
+ * The APIs should be invoked in this order:
+ * acpi_enable_gpe() <- Ensure the reference count > 0
+ * acpi_set_gpe(ACPI_GPE_DISABLE) <- Enter polling mode
+ * acpi_set_gpe(ACPI_GPE_ENABLE) <- Leave polling mode
+ * acpi_disable_gpe() <- Decrease the reference count
+ *
+ * Note: If a GPE is shared by 2 silicon components, then both the drivers
+ * should support GPE polling mode or disabling the GPE for long period
+ * for one driver may break the other. So use it with care since all
+ * firmware _Lxx/_Exx handlers currently rely on the GPE interrupt mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_set_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Perform the action */
+
+ switch (action) {
+ case ACPI_GPE_ENABLE:
+
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+ break;
+
+ case ACPI_GPE_DISABLE:
+
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
+ break;
+
+ default:
+
+ status = AE_BAD_PARAMETER;
+ break;
+ }
+
+unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe)
/*******************************************************************************
*
@@ -313,7 +384,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
* known as an "implicit notify". Note: The GPE is assumed to be
* level-triggered (for windows compatibility).
*/
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
ACPI_GPE_DISPATCH_NONE) {
/*
* This is the first device for implicit notify on this GPE.
@@ -327,7 +398,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
* If we already have an implicit notify on this GPE, add
* this device to the notify list.
*/
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
ACPI_GPE_DISPATCH_NOTIFY) {
/* Ensure that the device is not already in the list */
@@ -530,6 +601,49 @@ unlock_and_exit:
ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_finish_gpe
+ *
+ * PARAMETERS: gpe_device - Namespace node for the GPE Block
+ * (NULL for FADT defined GPEs)
+ * gpe_number - GPE level within the GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear and conditionally reenable a GPE. This completes the GPE
+ * processing. Intended for use by asynchronous host-installed
+ * GPE handlers. The GPE is only reenabled if the enable_for_run bit
+ * is set in the GPE info.
+ *
+ ******************************************************************************/
+acpi_status acpi_finish_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_finish_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_ev_finish_gpe(gpe_event_info);
+
+unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_finish_gpe)
+
/******************************************************************************
*
* FUNCTION: acpi_disable_all_gpes
@@ -604,7 +718,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
* all GPE blocks.
*
******************************************************************************/
-
acpi_status acpi_enable_all_wakeup_gpes(void)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 2d6f187939c7..f21afbab03f7 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 7d2949420db7..6e0df2b9d5a4 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index c545386fee96..89a976b4ccf2 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 95d23dabcfbb..aaeea4840aaa 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 6fbfad47518c..e67d0aca3fe6 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 0f23c3f2678e..7c213b6b6472 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index b994845ed359..c161dd974f74 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 1d1b27a96c5b..49479927e7f7 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 2207e624f538..b56fc9d6f48e 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index b49ea2a95f4f..472030f2b5bb 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index dbb03b544e8c..453b00c30177 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 1b8e94104407..77930683ab7d 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 2ede656ee26a..fcc618aa2061 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 363767cf01e5..b813fed95e56 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 29e9e99f7fe3..c930edda3f65 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 118e942005e5..4c2836dc825b 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index cd5288a257a9..0fe188e238ef 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index ab060261b43e..c7e3b929aa85 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 3cde553bcbe1..b6b7f3af29e4 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3af8de3fcea4..d2964af9ad4d 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index daf49f7ea311..a7eee2400ce0 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 04bd16c08f9e..3101607b4efe 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index fd11018b0168..6fa3c8d8fc5f 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index f7da64123ed5..05450656fe3d 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index d9d72dff2a76..3f4225e95d93 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 1e66d960fc11..e5c5949f9081 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 858fdd6be598..e5599f610808 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 494027f5c067..84bc550f4f1d 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -54,6 +54,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context);
+static acpi_status
+acpi_hw_gpe_enable_write(u8 enable_mask,
+ struct acpi_gpe_register_info *gpe_register_info);
+
/******************************************************************************
*
* FUNCTION: acpi_hw_get_gpe_register_bit
@@ -146,7 +150,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
- gpe_register_info->enable_mask = enable_mask;
+ gpe_register_info->enable_mask = (u8)enable_mask;
}
return (status);
}
@@ -221,7 +225,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
/* GPE currently handled? */
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
ACPI_GPE_DISPATCH_NONE) {
local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
}
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 6aade8e1d2a1..c5214dec4988 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index a4c34d2c556b..3cf77afd142c 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d590693eb54e..7d21cae6d602 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 76ab5c1a814e..675c709a300b 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 6b919127cd9d..2bd33fe56cb3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 96d007df65ec..5f97468df8ff 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 6921c7f3d208..3b3767698827 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f1249e3463be..24fa19a76d70 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 607eb9e5150d..e107f929d9cf 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 80fcfc8c9c1b..5d347a71bd0b 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index b55642c4ee58..1a8b39c8d969 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 3d88ef4a3e0d..80f097eb7381 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 42d37109aa5d..7dc367e6fe09 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index e634a05974db..7bcc68f57afa 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index a3fb7e4c0809..4a85c4517988 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 7c9d0181f341..bd6cd4a81316 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 7eee0a6f02f6..d293d9748036 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index a42ee9d6970d..677bc9330e64 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index e83cff31754b..c95a119767b5 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 392910ffbed9..0eb54315b4be 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 1b13b921dda9..8b79958b7aca 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 7e417aa5c91e..151fcd95ba84 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index b09e6bef72b8..c30672d23878 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index af1cc42a8aa1..4a9d4a66016e 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 4a5e3f5c0ff7..6ad02008c0c2 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 4758a1f2ce22..c68609a2bc1b 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 4bd558bf10d2..b6030a2deee1 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 8c6c11ce9760..d66c326485d8 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index dae9401be7a2..793383501f81 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,50 +53,6 @@ ACPI_MODULE_NAME("nsxfobj")
/*******************************************************************************
*
- * FUNCTION: acpi_get_id
- *
- * PARAMETERS: Handle - Handle of object whose id is desired
- * ret_id - Where the id will be placed
- *
- * RETURN: Status
- *
- * DESCRIPTION: This routine returns the owner id associated with a handle
- *
- ******************************************************************************/
-acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
-{
- struct acpi_namespace_node *node;
- acpi_status status;
-
- /* Parameter Validation */
-
- if (!ret_id) {
- return (AE_BAD_PARAMETER);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Convert and validate the handle */
-
- node = acpi_ns_validate_handle(handle);
- if (!node) {
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return (AE_BAD_PARAMETER);
- }
-
- *ret_id = node->owner_id;
-
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_id)
-
-/*******************************************************************************
- *
* FUNCTION: acpi_get_type
*
* PARAMETERS: handle - Handle of object whose type is desired
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 314d314340ae..6d038770577b 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index b058e2390fdd..90437227d790 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index a6885077d59e..2f5ddd806c58 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1755d2ac5656..1af4a405e351 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 0d8d37ffd04d..e18e7c47f482 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 6d27b597394e..a555f7f7b9a2 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 32d250feea21..9d669cc6cb62 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 0b64181e7720..89984f30addc 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 3cd48802eede..960505ab409a 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 9cb07e1e76d9..ba5f69171288 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index e135acaa5e1c..841a5ea06094 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 916fd095ff34..66d406e8fe36 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,7 +74,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address16[5] = {
* Address Translation Offset
* Address Length
*/
- {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.granularity),
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.address.granularity),
AML_OFFSET(address16.granularity),
5},
@@ -112,7 +112,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address32[5] = {
* Address Translation Offset
* Address Length
*/
- {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.granularity),
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.address.granularity),
AML_OFFSET(address32.granularity),
5},
@@ -150,7 +150,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address64[5] = {
* Address Translation Offset
* Address Length
*/
- {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.granularity),
+ {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.address.granularity),
AML_OFFSET(address64.granularity),
5},
@@ -194,7 +194,8 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
* Address Length
* Type-Specific Attribute
*/
- {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.ext_address64.granularity),
+ {ACPI_RSC_MOVE64,
+ ACPI_RS_OFFSET(data.ext_address64.address.granularity),
AML_OFFSET(ext_address64.granularity),
6}
};
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 689556744b03..cb739a694931 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 049d9c22a0f9..15434e4c9b34 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index c3c56b5a9788..1539394c8c52 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 2f9332d5c973..b29d9ec63d1b 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -183,15 +183,15 @@ struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
"16-Bit WORD Address Space", NULL},
{ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.granularity),
+ "Granularity", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.minimum),
+ "Address Minimum", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.maximum),
+ "Address Maximum", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.translation_offset),
"Translation Offset", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.address_length),
"Address Length", NULL},
{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
};
@@ -200,15 +200,15 @@ struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
"32-Bit DWORD Address Space", NULL},
{ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.granularity),
+ "Granularity", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.minimum),
+ "Address Minimum", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.maximum),
+ "Address Maximum", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.translation_offset),
"Translation Offset", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.address_length),
"Address Length", NULL},
{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
};
@@ -217,15 +217,15 @@ struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
"64-Bit QWORD Address Space", NULL},
{ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.granularity),
+ "Granularity", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.minimum),
+ "Address Minimum", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.maximum),
+ "Address Maximum", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.translation_offset),
"Translation Offset", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.address_length),
"Address Length", NULL},
{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
};
@@ -234,15 +234,16 @@ struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
"64-Bit Extended Address Space", NULL},
{ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.granularity),
"Granularity", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.minimum),
"Address Minimum", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.maximum),
"Address Maximum", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
+ {ACPI_RSD_UINT64,
+ ACPI_RSD_OFFSET(ext_address64.address.translation_offset),
"Translation Offset", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.address_length),
"Address Length", NULL},
{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
"Type-Specific Attribute", NULL}
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 9d3f8a9a24bd..edecfc675979 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 19d64873290a..5adba018bab0 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 3461f7db26df..07cfa70a475b 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 77291293af64..50d5be2ee062 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index eab4483ff5f8..c6b80862030e 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 41eea4bc089c..1fe49d223663 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 9e8407223d95..4c8c6fe6ea74 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 897a5ceb0420..ece3cd60cc6a 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 877ab9202133..8e6276df0226 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,11 +60,11 @@ ACPI_MODULE_NAME("rsxface")
ACPI_COPY_FIELD(out, in, min_address_fixed); \
ACPI_COPY_FIELD(out, in, max_address_fixed); \
ACPI_COPY_FIELD(out, in, info); \
- ACPI_COPY_FIELD(out, in, granularity); \
- ACPI_COPY_FIELD(out, in, minimum); \
- ACPI_COPY_FIELD(out, in, maximum); \
- ACPI_COPY_FIELD(out, in, translation_offset); \
- ACPI_COPY_FIELD(out, in, address_length); \
+ ACPI_COPY_FIELD(out, in, address.granularity); \
+ ACPI_COPY_FIELD(out, in, address.minimum); \
+ ACPI_COPY_FIELD(out, in, address.maximum); \
+ ACPI_COPY_FIELD(out, in, address.translation_offset); \
+ ACPI_COPY_FIELD(out, in, address.address_length); \
ACPI_COPY_FIELD(out, in, resource_source);
/* Local prototypes */
static acpi_status
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index f499c10ceb4a..6a144957aadd 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 41519a958083..7d2486005e3f 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index cb947700206c..0b879fcfef67 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 755b90c40ddf..9bad45e63a45 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index df3bb20ea325..ef16c06e5091 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 6b1ca9991b90..6559a58439c5 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 6482b0ded652..60e94f87f27a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -265,45 +265,6 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
/*******************************************************************************
*
- * FUNCTION: acpi_unload_table_id
- *
- * PARAMETERS: id - Owner ID of the table to be removed.
- *
- * RETURN: Status
- *
- * DESCRIPTION: This routine is used to force the unload of a table (by id)
- *
- ******************************************************************************/
-acpi_status acpi_unload_table_id(acpi_owner_id id)
-{
- int i;
- acpi_status status = AE_NOT_EXIST;
-
- ACPI_FUNCTION_TRACE(acpi_unload_table_id);
-
- /* Find table in the global table list */
- for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
- if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
- continue;
- }
- /*
- * Delete all namespace objects owned by this table. Note that these
- * objects can appear anywhere in the namespace by virtue of the AML
- * "Scope" operator. Thus, we need to track ownership by an ID, not
- * simply a position within the hierarchy
- */
- acpi_tb_delete_namespace_by_owner(i);
- status = acpi_tb_release_owner_id(i);
- acpi_tb_set_table_loaded_flag(i, FALSE);
- break;
- }
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
-
-/*******************************************************************************
- *
* FUNCTION: acpi_get_table_with_size
*
* PARAMETERS: signature - ACPI signature of needed table
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index ab5308b81aa8..aadb3002a2dd 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 43a54af2b548..eac52cf14f1a 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index a1acec9d2ef3..1279f50da757 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index efac83c606dc..61d8f6d186d1 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 038ea887f562..242bd071f007 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 78fde0aac487..eacc5eee362e 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index ff601c0f7c7a..c37ec5035f4c 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index e516254c63b2..4f3f888d33bb 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -111,8 +111,8 @@ void acpi_ut_track_stack_ptr(void)
* RETURN: Updated pointer to the function name
*
* DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
- * This allows compiler macros such as __FUNCTION__ to be used
- * with no change to the debug output.
+ * This allows compiler macros such as __func__ to be used with no
+ * change to the debug output.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 40e923e675fc..988e23b7795c 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index a3516de213fa..71fce389fd48 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 8e544d4688cd..9ef80f2828e3 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 8fed1482d228..6c738fa0cd42 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 0403dcaabaf2..743a0ae9fb17 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
index 4e263a8cc6f0..7e1168be39fa 100644
--- a/drivers/acpi/acpica/utfileio.c
+++ b/drivers/acpi/acpica/utfileio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 77ceac715f28..5e8df9177da4 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 9afa9441b183..aa448278ba28 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 4b12880e5b11..27431cfc1c44 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 77120ec9ea86..e402e07b4846 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index dc6e96547f18..089f78bbd59b 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index d44dee6ee10a..f9ff100f0159 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 2e2bb14e1099..56bbacd576f2 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 82717fff9ffc..37b8b58fcd56 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index dfa9009bfc87..7d83efe1ea29 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 685766fc6ca8..574cd3118313 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 36bec57ebd23..2959217067cb 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index db30caff130a..29e449935a82 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 0ce3f5a0dd67..82ca9142e10d 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index bc1ff820c7dd..b3505dbc715e 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 1cc97a752c15..8274cc16edc3 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 6dc54b3c28b0..83b6c52490dc 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 7d0ee969d781..130dd9f96f0f 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 4dc33130f134..c6149a212149 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 49c873c68756..0929187bdce0 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 88ef77f3cf88..306e785f9418 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index b1fd6886e439..083a76891889 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 2a0f9e04d3a4..f2606af3364c 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 2cd7bdd6c8b3..a85ac07f3da3 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -449,7 +449,7 @@ int apei_resources_sub(struct apei_resources *resources1,
}
EXPORT_SYMBOL_GPL(apei_resources_sub);
-static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
+static int apei_get_res_callback(__u64 start, __u64 size, void *data)
{
struct apei_resources *resources = data;
return apei_res_add(&resources->iomem, start, size);
@@ -457,7 +457,15 @@ static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
static int apei_get_nvs_resources(struct apei_resources *resources)
{
- return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
+ return acpi_nvs_for_each_region(apei_get_res_callback, resources);
+}
+
+int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
+ void *data), void *data);
+static int apei_get_arch_resources(struct apei_resources *resources)
+
+{
+ return arch_apei_filter_addr(apei_get_res_callback, resources);
}
/*
@@ -470,7 +478,7 @@ int apei_resources_request(struct apei_resources *resources,
{
struct apei_res *res, *res_bak = NULL;
struct resource *r;
- struct apei_resources nvs_resources;
+ struct apei_resources nvs_resources, arch_res;
int rc;
rc = apei_resources_sub(resources, &apei_resources_all);
@@ -485,10 +493,20 @@ int apei_resources_request(struct apei_resources *resources,
apei_resources_init(&nvs_resources);
rc = apei_get_nvs_resources(&nvs_resources);
if (rc)
- goto res_fini;
+ goto nvs_res_fini;
rc = apei_resources_sub(resources, &nvs_resources);
if (rc)
- goto res_fini;
+ goto nvs_res_fini;
+
+ if (arch_apei_filter_addr) {
+ apei_resources_init(&arch_res);
+ rc = apei_get_arch_resources(&arch_res);
+ if (rc)
+ goto arch_res_fini;
+ rc = apei_resources_sub(resources, &arch_res);
+ if (rc)
+ goto arch_res_fini;
+ }
rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
@@ -536,7 +554,9 @@ err_unmap_iomem:
break;
release_mem_region(res->start, res->end - res->start);
}
-res_fini:
+arch_res_fini:
+ apei_resources_fini(&arch_res);
+nvs_res_fini:
apei_resources_fini(&nvs_resources);
return rc;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c0d44d394ca3..735db11a9b00 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1027,7 +1027,6 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
-#ifdef CONFIG_PM
.runtime_suspend = acpi_subsys_runtime_suspend,
.runtime_resume = acpi_subsys_runtime_resume,
#ifdef CONFIG_PM_SLEEP
@@ -1041,7 +1040,6 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.poweroff_late = acpi_subsys_suspend_late,
.restore_early = acpi_subsys_resume_early,
#endif
-#endif
},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 1b5853f384e2..982b67faaaf3 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,8 +1,8 @@
/*
- * ec.c - ACPI Embedded Controller Driver (v2.2)
+ * ec.c - ACPI Embedded Controller Driver (v3)
*
- * Copyright (C) 2001-2014 Intel Corporation
- * Author: 2014 Lv Zheng <lv.zheng@intel.com>
+ * Copyright (C) 2001-2015 Intel Corporation
+ * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
* 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
* 2006 Denis Sadykov <denis.m.sadykov@intel.com>
* 2004 Luming Yu <luming.yu@intel.com>
@@ -71,15 +71,18 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+#define ACPI_EC_UDELAY_POLL 1000 /* Wait 1ms for EC transaction polling */
#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
* when trying to clear the EC */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
- EC_FLAGS_GPE_STORM, /* GPE storm detected */
EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
* OpReg are installed */
- EC_FLAGS_BLOCKED, /* Transactions are blocked */
+ EC_FLAGS_STARTED, /* Driver is started */
+ EC_FLAGS_STOPPED, /* Driver is stopped */
+ EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
+ * current command processing */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -105,6 +108,7 @@ struct acpi_ec_query_handler {
acpi_handle handle;
void *data;
u8 query_bit;
+ struct kref kref;
};
struct transaction {
@@ -117,8 +121,12 @@ struct transaction {
u8 wlen;
u8 rlen;
u8 flags;
+ unsigned long timestamp;
};
+static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
+static void advance_transaction(struct acpi_ec *ec);
+
struct acpi_ec *boot_ec, *first_ec;
EXPORT_SYMBOL(first_ec);
@@ -129,7 +137,22 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
/* --------------------------------------------------------------------------
- * Transaction Management
+ * Device Flags
+ * -------------------------------------------------------------------------- */
+
+static bool acpi_ec_started(struct acpi_ec *ec)
+{
+ return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ !test_bit(EC_FLAGS_STOPPED, &ec->flags);
+}
+
+static bool acpi_ec_flushed(struct acpi_ec *ec)
+{
+ return ec->reference_count == 1;
+}
+
+/* --------------------------------------------------------------------------
+ * EC Registers
* -------------------------------------------------------------------------- */
static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
@@ -151,6 +174,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{
u8 x = inb(ec->data_addr);
+ ec->curr->timestamp = jiffies;
pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
return x;
}
@@ -159,12 +183,14 @@ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{
pr_debug("EC_SC(W) = 0x%2.2x\n", command);
outb(command, ec->command_addr);
+ ec->curr->timestamp = jiffies;
}
static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{
pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
outb(data, ec->data_addr);
+ ec->curr->timestamp = jiffies;
}
#ifdef DEBUG
@@ -188,6 +214,140 @@ static const char *acpi_ec_cmd_string(u8 cmd)
#define acpi_ec_cmd_string(cmd) "UNDEF"
#endif
+/* --------------------------------------------------------------------------
+ * GPE Registers
+ * -------------------------------------------------------------------------- */
+
+static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
+{
+ acpi_event_status gpe_status = 0;
+
+ (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
+ return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false;
+}
+
+static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
+{
+ if (open)
+ acpi_enable_gpe(NULL, ec->gpe);
+ else {
+ BUG_ON(ec->reference_count < 1);
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
+ }
+ if (acpi_ec_is_gpe_raised(ec)) {
+ /*
+ * On some platforms, EN=1 writes cannot trigger GPE. So
+ * software need to manually trigger a pseudo GPE event on
+ * EN=1 writes.
+ */
+ pr_debug("***** Polling quirk *****\n");
+ advance_transaction(ec);
+ }
+}
+
+static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
+{
+ if (close)
+ acpi_disable_gpe(NULL, ec->gpe);
+ else {
+ BUG_ON(ec->reference_count < 1);
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+ }
+}
+
+static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
+{
+ /*
+ * GPE STS is a W1C register, which means:
+ * 1. Software can clear it without worrying about clearing other
+ * GPEs' STS bits when the hardware sets them in parallel.
+ * 2. As long as software can ensure only clearing it when it is
+ * set, hardware won't set it in parallel.
+ * So software can clear GPE in any contexts.
+ * Warning: do not move the check into advance_transaction() as the
+ * EC commands will be sent without GPE raised.
+ */
+ if (!acpi_ec_is_gpe_raised(ec))
+ return;
+ acpi_clear_gpe(NULL, ec->gpe);
+}
+
+/* --------------------------------------------------------------------------
+ * Transaction Management
+ * -------------------------------------------------------------------------- */
+
+static void acpi_ec_submit_request(struct acpi_ec *ec)
+{
+ ec->reference_count++;
+ if (ec->reference_count == 1)
+ acpi_ec_enable_gpe(ec, true);
+}
+
+static void acpi_ec_complete_request(struct acpi_ec *ec)
+{
+ bool flushed = false;
+
+ ec->reference_count--;
+ if (ec->reference_count == 0)
+ acpi_ec_disable_gpe(ec, true);
+ flushed = acpi_ec_flushed(ec);
+ if (flushed)
+ wake_up(&ec->wait);
+}
+
+static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
+{
+ if (!test_bit(flag, &ec->flags)) {
+ acpi_ec_disable_gpe(ec, false);
+ pr_debug("+++++ Polling enabled +++++\n");
+ set_bit(flag, &ec->flags);
+ }
+}
+
+static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
+{
+ if (test_bit(flag, &ec->flags)) {
+ clear_bit(flag, &ec->flags);
+ acpi_ec_enable_gpe(ec, false);
+ pr_debug("+++++ Polling disabled +++++\n");
+ }
+}
+
+/*
+ * acpi_ec_submit_flushable_request() - Increase the reference count unless
+ * the flush operation is not in
+ * progress
+ * @ec: the EC device
+ *
+ * This function must be used before taking a new action that should hold
+ * the reference count. If this function returns false, then the action
+ * must be discarded or it will prevent the flush operation from being
+ * completed.
+ */
+static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
+{
+ if (!acpi_ec_started(ec))
+ return false;
+ acpi_ec_submit_request(ec);
+ return true;
+}
+
+static void acpi_ec_submit_query(struct acpi_ec *ec)
+{
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
+ pr_debug("***** Event started *****\n");
+ schedule_work(&ec->work);
+ }
+}
+
+static void acpi_ec_complete_query(struct acpi_ec *ec)
+{
+ if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+ pr_debug("***** Event stopped *****\n");
+ }
+}
+
static int ec_transaction_completed(struct acpi_ec *ec)
{
unsigned long flags;
@@ -200,7 +360,7 @@ static int ec_transaction_completed(struct acpi_ec *ec)
return ret;
}
-static bool advance_transaction(struct acpi_ec *ec)
+static void advance_transaction(struct acpi_ec *ec)
{
struct transaction *t;
u8 status;
@@ -208,6 +368,12 @@ static bool advance_transaction(struct acpi_ec *ec)
pr_debug("===== %s (%d) =====\n",
in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
+ /*
+ * By always clearing STS before handling all indications, we can
+ * ensure a hardware STS 0->1 change after this clearing can always
+ * trigger a GPE interrupt.
+ */
+ acpi_ec_clear_gpe(ec);
status = acpi_ec_read_status(ec);
t = ec->curr;
if (!t)
@@ -235,12 +401,13 @@ static bool advance_transaction(struct acpi_ec *ec)
t->flags |= ACPI_EC_COMMAND_COMPLETE;
wakeup = true;
}
- return wakeup;
+ goto out;
} else {
if (EC_FLAGS_QUERY_HANDSHAKE &&
!(status & ACPI_EC_FLAG_SCI) &&
(t->command == ACPI_EC_COMMAND_QUERY)) {
t->flags |= ACPI_EC_COMMAND_POLL;
+ acpi_ec_complete_query(ec);
t->rdata[t->ri++] = 0x00;
t->flags |= ACPI_EC_COMMAND_COMPLETE;
pr_debug("***** Command(%s) software completion *****\n",
@@ -249,9 +416,10 @@ static bool advance_transaction(struct acpi_ec *ec)
} else if ((status & ACPI_EC_FLAG_IBF) == 0) {
acpi_ec_write_cmd(ec, t->command);
t->flags |= ACPI_EC_COMMAND_POLL;
+ acpi_ec_complete_query(ec);
} else
goto err;
- return wakeup;
+ goto out;
}
err:
/*
@@ -259,28 +427,27 @@ err:
* otherwise will take a not handled IRQ as a false one.
*/
if (!(status & ACPI_EC_FLAG_SCI)) {
- if (in_interrupt() && t)
- ++t->irq_count;
+ if (in_interrupt() && t) {
+ if (t->irq_count < ec_storm_threshold)
+ ++t->irq_count;
+ /* Allow triggering on 0 threshold */
+ if (t->irq_count == ec_storm_threshold)
+ acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ }
}
- return wakeup;
+out:
+ if (status & ACPI_EC_FLAG_SCI)
+ acpi_ec_submit_query(ec);
+ if (wakeup && in_interrupt())
+ wake_up(&ec->wait);
}
static void start_transaction(struct acpi_ec *ec)
{
ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
ec->curr->flags = 0;
- (void)advance_transaction(ec);
-}
-
-static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
-
-static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
-{
- if (state & ACPI_EC_FLAG_SCI) {
- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_ec_sync_query(ec, NULL);
- }
- return 0;
+ ec->curr->timestamp = jiffies;
+ advance_transaction(ec);
}
static int ec_poll(struct acpi_ec *ec)
@@ -291,20 +458,25 @@ static int ec_poll(struct acpi_ec *ec)
while (repeat--) {
unsigned long delay = jiffies +
msecs_to_jiffies(ec_delay);
+ unsigned long usecs = ACPI_EC_UDELAY_POLL;
do {
/* don't sleep with disabled interrupts */
if (EC_FLAGS_MSI || irqs_disabled()) {
- udelay(ACPI_EC_MSI_UDELAY);
+ usecs = ACPI_EC_MSI_UDELAY;
+ udelay(usecs);
if (ec_transaction_completed(ec))
return 0;
} else {
if (wait_event_timeout(ec->wait,
ec_transaction_completed(ec),
- msecs_to_jiffies(1)))
+ usecs_to_jiffies(usecs)))
return 0;
}
spin_lock_irqsave(&ec->lock, flags);
- (void)advance_transaction(ec);
+ if (time_after(jiffies,
+ ec->curr->timestamp +
+ usecs_to_jiffies(usecs)))
+ advance_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
@@ -325,21 +497,27 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
+ /* Enable GPE for command processing (IBF=0/OBF=1) */
+ if (!acpi_ec_submit_flushable_request(ec)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
/* following two actions should be kept atomic */
ec->curr = t;
pr_debug("***** Command(%s) started *****\n",
acpi_ec_cmd_string(t->command));
start_transaction(ec);
- if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
- clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- pr_debug("***** Event stopped *****\n");
- }
spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
spin_lock_irqsave(&ec->lock, tmp);
+ if (t->irq_count == ec_storm_threshold)
+ acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
pr_debug("***** Command(%s) stopped *****\n",
acpi_ec_cmd_string(t->command));
ec->curr = NULL;
+ /* Disable GPE for command processing (IBF=0/OBF=1) */
+ acpi_ec_complete_request(ec);
+unlock:
spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
@@ -354,10 +532,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
if (t->rdata)
memset(t->rdata, 0, t->rlen);
mutex_lock(&ec->mutex);
- if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) {
- status = -EINVAL;
- goto unlock;
- }
if (ec->global_lock) {
status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
if (ACPI_FAILURE(status)) {
@@ -365,26 +539,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
goto unlock;
}
}
- /* disable GPE during transaction if storm is detected */
- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- /* It has to be disabled, so that it doesn't trigger. */
- acpi_disable_gpe(NULL, ec->gpe);
- }
status = acpi_ec_transaction_unlocked(ec, t);
- /* check if we received SCI during transaction */
- ec_check_sci_sync(ec, acpi_ec_read_status(ec));
- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags))
msleep(1);
- /* It is safe to enable the GPE outside of the transaction. */
- acpi_enable_gpe(NULL, ec->gpe);
- } else if (t->irq_count > ec_storm_threshold) {
- pr_info("GPE storm detected(%d GPEs), "
- "transactions will use polling mode\n",
- t->irq_count);
- set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
- }
if (ec->global_lock)
acpi_release_global_lock(glk);
unlock:
@@ -500,7 +659,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
u8 value = 0;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
- status = acpi_ec_sync_query(ec, &value);
+ status = acpi_ec_query(ec, &value);
if (status || !value)
break;
}
@@ -511,6 +670,53 @@ static void acpi_ec_clear(struct acpi_ec *ec)
pr_info("%d stale EC events cleared\n", i);
}
+static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
+ pr_debug("+++++ Starting EC +++++\n");
+ /* Enable GPE for event processing (SCI_EVT=1) */
+ if (!resuming)
+ acpi_ec_submit_request(ec);
+ pr_info("+++++ EC started +++++\n");
+ }
+ spin_unlock_irqrestore(&ec->lock, flags);
+}
+
+static bool acpi_ec_stopped(struct acpi_ec *ec)
+{
+ unsigned long flags;
+ bool flushed;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ flushed = acpi_ec_flushed(ec);
+ spin_unlock_irqrestore(&ec->lock, flags);
+ return flushed;
+}
+
+static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ if (acpi_ec_started(ec)) {
+ pr_debug("+++++ Stopping EC +++++\n");
+ set_bit(EC_FLAGS_STOPPED, &ec->flags);
+ spin_unlock_irqrestore(&ec->lock, flags);
+ wait_event(ec->wait, acpi_ec_stopped(ec));
+ spin_lock_irqsave(&ec->lock, flags);
+ /* Disable GPE for event processing (SCI_EVT=1) */
+ if (!suspending)
+ acpi_ec_complete_request(ec);
+ clear_bit(EC_FLAGS_STARTED, &ec->flags);
+ clear_bit(EC_FLAGS_STOPPED, &ec->flags);
+ pr_info("+++++ EC stopped +++++\n");
+ }
+ spin_unlock_irqrestore(&ec->lock, flags);
+}
+
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -520,7 +726,7 @@ void acpi_ec_block_transactions(void)
mutex_lock(&ec->mutex);
/* Prevent transactions from being carried out */
- set_bit(EC_FLAGS_BLOCKED, &ec->flags);
+ acpi_ec_stop(ec, true);
mutex_unlock(&ec->mutex);
}
@@ -531,14 +737,11 @@ void acpi_ec_unblock_transactions(void)
if (!ec)
return;
- mutex_lock(&ec->mutex);
/* Allow transactions to be carried out again */
- clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
+ acpi_ec_start(ec, true);
if (EC_FLAGS_CLEAR_ON_RESUME)
acpi_ec_clear(ec);
-
- mutex_unlock(&ec->mutex);
}
void acpi_ec_unblock_transactions_early(void)
@@ -548,36 +751,33 @@ void acpi_ec_unblock_transactions_early(void)
* atomic context during wakeup, so we don't need to acquire the mutex).
*/
if (first_ec)
- clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags);
+ acpi_ec_start(first_ec, true);
}
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data)
+/* --------------------------------------------------------------------------
+ Event Management
+ -------------------------------------------------------------------------- */
+static struct acpi_ec_query_handler *
+acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
{
- int result;
- u8 d;
- struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
- .wdata = NULL, .rdata = &d,
- .wlen = 0, .rlen = 1};
+ if (handler)
+ kref_get(&handler->kref);
+ return handler;
+}
- if (!ec || !data)
- return -EINVAL;
- /*
- * Query the EC to find out which _Qxx method we need to evaluate.
- * Note that successful completion of the query causes the ACPI_EC_SCI
- * bit to be cleared (and thus clearing the interrupt source).
- */
- result = acpi_ec_transaction_unlocked(ec, &t);
- if (result)
- return result;
- if (!d)
- return -ENODATA;
- *data = d;
- return 0;
+static void acpi_ec_query_handler_release(struct kref *kref)
+{
+ struct acpi_ec_query_handler *handler =
+ container_of(kref, struct acpi_ec_query_handler, kref);
+
+ kfree(handler);
+}
+
+static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
+{
+ kref_put(&handler->kref, acpi_ec_query_handler_release);
}
-/* --------------------------------------------------------------------------
- Event Management
- -------------------------------------------------------------------------- */
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data)
@@ -593,6 +793,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
handler->func = func;
handler->data = data;
mutex_lock(&ec->mutex);
+ kref_init(&handler->kref);
list_add(&handler->node, &ec->list);
mutex_unlock(&ec->mutex);
return 0;
@@ -602,15 +803,18 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
struct acpi_ec_query_handler *handler, *tmp;
+ LIST_HEAD(free_list);
mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
if (query_bit == handler->query_bit) {
- list_del(&handler->node);
- kfree(handler);
+ list_del_init(&handler->node);
+ list_add(&handler->node, &free_list);
}
}
mutex_unlock(&ec->mutex);
+ list_for_each_entry(handler, &free_list, node)
+ acpi_ec_put_query_handler(handler);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
@@ -626,59 +830,56 @@ static void acpi_ec_run(void *cxt)
else if (handler->handle)
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
- kfree(handler);
+ acpi_ec_put_query_handler(handler);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
+static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
{
u8 value = 0;
- int status;
- struct acpi_ec_query_handler *handler, *copy;
+ int result;
+ acpi_status status;
+ struct acpi_ec_query_handler *handler;
+ struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
+ .wdata = NULL, .rdata = &value,
+ .wlen = 0, .rlen = 1};
- status = acpi_ec_query_unlocked(ec, &value);
+ /*
+ * Query the EC to find out which _Qxx method we need to evaluate.
+ * Note that successful completion of the query causes the ACPI_EC_SCI
+ * bit to be cleared (and thus clearing the interrupt source).
+ */
+ result = acpi_ec_transaction(ec, &t);
+ if (result)
+ return result;
if (data)
*data = value;
- if (status)
- return status;
+ if (!value)
+ return -ENODATA;
+ mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
- copy = kmalloc(sizeof(*handler), GFP_KERNEL);
- if (!copy)
- return -ENOMEM;
- memcpy(copy, handler, sizeof(*copy));
+ handler = acpi_ec_get_query_handler(handler);
pr_debug("##### Query(0x%02x) scheduled #####\n",
handler->query_bit);
- return acpi_os_execute((copy->func) ?
+ status = acpi_os_execute((handler->func) ?
OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
- acpi_ec_run, copy);
+ acpi_ec_run, handler);
+ if (ACPI_FAILURE(status))
+ result = -EBUSY;
+ break;
}
}
- return 0;
-}
-
-static void acpi_ec_gpe_query(void *ec_cxt)
-{
- struct acpi_ec *ec = ec_cxt;
-
- if (!ec)
- return;
- mutex_lock(&ec->mutex);
- acpi_ec_sync_query(ec, NULL);
mutex_unlock(&ec->mutex);
+ return result;
}
-static int ec_check_sci(struct acpi_ec *ec, u8 state)
+static void acpi_ec_gpe_poller(struct work_struct *work)
{
- if (state & ACPI_EC_FLAG_SCI) {
- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
- pr_debug("***** Event started *****\n");
- return acpi_os_execute(OSL_NOTIFY_HANDLER,
- acpi_ec_gpe_query, ec);
- }
- }
- return 0;
+ struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
+
+ acpi_ec_query(ec, NULL);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -688,11 +889,9 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
struct acpi_ec *ec = data;
spin_lock_irqsave(&ec->lock, flags);
- if (advance_transaction(ec))
- wake_up(&ec->wait);
+ advance_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
- ec_check_sci(ec, acpi_ec_read_status(ec));
- return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
+ return ACPI_INTERRUPT_HANDLED;
}
/* --------------------------------------------------------------------------
@@ -755,6 +954,7 @@ static struct acpi_ec *make_acpi_ec(void)
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
spin_lock_init(&ec->lock);
+ INIT_WORK(&ec->work, acpi_ec_gpe_poller);
return ec;
}
@@ -810,13 +1010,13 @@ static int ec_install_handlers(struct acpi_ec *ec)
if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
return 0;
- status = acpi_install_gpe_handler(NULL, ec->gpe,
+ status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
ACPI_GPE_EDGE_TRIGGERED,
&acpi_ec_gpe_handler, ec);
if (ACPI_FAILURE(status))
return -ENODEV;
- acpi_enable_gpe(NULL, ec->gpe);
+ acpi_ec_start(ec, false);
status = acpi_install_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
@@ -831,7 +1031,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
pr_err("Fail in evaluating the _REG object"
" of EC device. Broken bios is suspected.\n");
} else {
- acpi_disable_gpe(NULL, ec->gpe);
+ acpi_ec_stop(ec, false);
acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler);
return -ENODEV;
@@ -846,7 +1046,7 @@ static void ec_remove_handlers(struct acpi_ec *ec)
{
if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
return;
- acpi_disable_gpe(NULL, ec->gpe);
+ acpi_ec_stop(ec, false);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
pr_err("failed to remove space handler\n");
@@ -903,11 +1103,8 @@ static int acpi_ec_add(struct acpi_device *device)
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
/* Clear stale _Q events if hardware might require that */
- if (EC_FLAGS_CLEAR_ON_RESUME) {
- mutex_lock(&ec->mutex);
+ if (EC_FLAGS_CLEAR_ON_RESUME)
acpi_ec_clear(ec);
- mutex_unlock(&ec->mutex);
- }
return ret;
}
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index ef2d730734dc..e24ea4e796e4 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -100,7 +100,6 @@ int acpi_bus_generate_netlink_event(const char *device_class,
struct acpi_genl_event *event;
void *msg_header;
int size;
- int result;
/* allocate memory */
size = nla_total_size(sizeof(struct acpi_genl_event)) +
@@ -137,11 +136,7 @@ int acpi_bus_generate_netlink_event(const char *device_class,
event->data = data;
/* send multicast genetlink message */
- result = genlmsg_end(skb, msg_header);
- if (result < 0) {
- nlmsg_free(skb);
- return result;
- }
+ genlmsg_end(skb, msg_header);
genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC);
return 0;
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
index a27d31d1ba24..9dcf83682e36 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/int340x_thermal.c
@@ -14,10 +14,10 @@
#include "internal.h"
-#define DO_ENUMERATION 0x01
+#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT3400", DO_ENUMERATION },
- {"INT3401"},
+ {"INT3400"},
+ {"INT3401", INT3401_DEVICE},
{"INT3402"},
{"INT3403"},
{"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
- if (id->driver_data == DO_ENUMERATION)
+ acpi_create_platform_device(adev);
+#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+ /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+ if (id->driver_data == INT3401_DEVICE)
acpi_create_platform_device(adev);
#endif
return 1;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 163e82f536fa..56b321aa2b1c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -35,6 +35,13 @@ void acpi_int340x_thermal_init(void);
int acpi_sysfs_init(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+int acpi_ioapic_add(struct acpi_pci_root *root);
+int acpi_ioapic_remove(struct acpi_pci_root *root);
+#else
+static inline int acpi_ioapic_add(struct acpi_pci_root *root) { return 0; }
+static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
+#endif
#ifdef CONFIG_ACPI_DOCK
void register_dock_dependent_device(struct acpi_device *adev,
acpi_handle dshandle);
@@ -68,6 +75,8 @@ static inline void acpi_debugfs_init(void) { return; }
#endif
void acpi_lpss_init(void);
+void acpi_apd_init(void);
+
acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
bool acpi_queue_hotplug_work(struct work_struct *work);
void acpi_device_hotplug(struct acpi_device *adev, u32 src);
@@ -122,11 +131,13 @@ struct acpi_ec {
unsigned long data_addr;
unsigned long global_lock;
unsigned long flags;
+ unsigned long reference_count;
struct mutex mutex;
wait_queue_head_t wait;
struct list_head list;
struct transaction *curr;
spinlock_t lock;
+ struct work_struct work;
};
extern struct acpi_ec *first_ec;
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
new file mode 100644
index 000000000000..ccdc8db16bb8
--- /dev/null
+++ b/drivers/acpi/ioapic.c
@@ -0,0 +1,229 @@
+/*
+ * IOAPIC/IOxAPIC/IOSAPIC driver
+ *
+ * Copyright (C) 2009 Fujitsu Limited.
+ * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on original drivers/pci/ioapic.c
+ * Yinghai Lu <yinghai@kernel.org>
+ * Jiang Liu <jiang.liu@intel.com>
+ */
+
+/*
+ * This driver manages I/O APICs added by hotplug after boot.
+ * We try to claim all I/O APIC devices, but those present at boot were
+ * registered when we parsed the ACPI MADT.
+ */
+
+#define pr_fmt(fmt) "ACPI : IOAPIC: " fmt
+
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <acpi/acpi.h>
+
+struct acpi_pci_ioapic {
+ acpi_handle root_handle;
+ acpi_handle handle;
+ u32 gsi_base;
+ struct resource res;
+ struct pci_dev *pdev;
+ struct list_head list;
+};
+
+static LIST_HEAD(ioapic_list);
+static DEFINE_MUTEX(ioapic_list_lock);
+
+static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
+{
+ struct resource *res = data;
+ struct resource_win win;
+
+ res->flags = 0;
+ if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0)
+ return AE_OK;
+
+ if (!acpi_dev_resource_memory(acpi_res, res)) {
+ if (acpi_dev_resource_address_space(acpi_res, &win) ||
+ acpi_dev_resource_ext_address_space(acpi_res, &win))
+ *res = win.res;
+ }
+ if ((res->flags & IORESOURCE_PREFETCH) ||
+ (res->flags & IORESOURCE_DISABLED))
+ res->flags = 0;
+
+ return AE_CTRL_TERMINATE;
+}
+
+static bool acpi_is_ioapic(acpi_handle handle, char **type)
+{
+ acpi_status status;
+ struct acpi_device_info *info;
+ char *hid = NULL;
+ bool match = false;
+
+ if (!acpi_has_method(handle, "_GSB"))
+ return false;
+
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_SUCCESS(status)) {
+ if (info->valid & ACPI_VALID_HID)
+ hid = info->hardware_id.string;
+ if (hid) {
+ if (strcmp(hid, "ACPI0009") == 0) {
+ *type = "IOxAPIC";
+ match = true;
+ } else if (strcmp(hid, "ACPI000A") == 0) {
+ *type = "IOAPIC";
+ match = true;
+ }
+ }
+ kfree(info);
+ }
+
+ return match;
+}
+
+static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl,
+ void *context, void **rv)
+{
+ acpi_status status;
+ unsigned long long gsi_base;
+ struct acpi_pci_ioapic *ioapic;
+ struct pci_dev *dev = NULL;
+ struct resource *res = NULL;
+ char *type = NULL;
+
+ if (!acpi_is_ioapic(handle, &type))
+ return AE_OK;
+
+ mutex_lock(&ioapic_list_lock);
+ list_for_each_entry(ioapic, &ioapic_list, list)
+ if (ioapic->handle == handle) {
+ mutex_unlock(&ioapic_list_lock);
+ return AE_OK;
+ }
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsi_base);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_warn(handle, "failed to evaluate _GSB method\n");
+ goto exit;
+ }
+
+ ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
+ if (!ioapic) {
+ pr_err("cannot allocate memory for new IOAPIC\n");
+ goto exit;
+ } else {
+ ioapic->root_handle = (acpi_handle)context;
+ ioapic->handle = handle;
+ ioapic->gsi_base = (u32)gsi_base;
+ INIT_LIST_HEAD(&ioapic->list);
+ }
+
+ if (acpi_ioapic_registered(handle, (u32)gsi_base))
+ goto done;
+
+ dev = acpi_get_pci_dev(handle);
+ if (dev && pci_resource_len(dev, 0)) {
+ if (pci_enable_device(dev) < 0)
+ goto exit_put;
+ pci_set_master(dev);
+ if (pci_request_region(dev, 0, type))
+ goto exit_disable;
+ res = &dev->resource[0];
+ ioapic->pdev = dev;
+ } else {
+ pci_dev_put(dev);
+ dev = NULL;
+
+ res = &ioapic->res;
+ acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, res);
+ if (res->flags == 0) {
+ acpi_handle_warn(handle, "failed to get resource\n");
+ goto exit_free;
+ } else if (request_resource(&iomem_resource, res)) {
+ acpi_handle_warn(handle, "failed to insert resource\n");
+ goto exit_free;
+ }
+ }
+
+ if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) {
+ acpi_handle_warn(handle, "failed to register IOAPIC\n");
+ goto exit_release;
+ }
+done:
+ list_add(&ioapic->list, &ioapic_list);
+ mutex_unlock(&ioapic_list_lock);
+
+ if (dev)
+ dev_info(&dev->dev, "%s at %pR, GSI %u\n",
+ type, res, (u32)gsi_base);
+ else
+ acpi_handle_info(handle, "%s at %pR, GSI %u\n",
+ type, res, (u32)gsi_base);
+
+ return AE_OK;
+
+exit_release:
+ if (dev)
+ pci_release_region(dev, 0);
+ else
+ release_resource(res);
+exit_disable:
+ if (dev)
+ pci_disable_device(dev);
+exit_put:
+ pci_dev_put(dev);
+exit_free:
+ kfree(ioapic);
+exit:
+ mutex_unlock(&ioapic_list_lock);
+ *(acpi_status *)rv = AE_ERROR;
+ return AE_OK;
+}
+
+int acpi_ioapic_add(struct acpi_pci_root *root)
+{
+ acpi_status status, retval = AE_OK;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root->device->handle,
+ UINT_MAX, handle_ioapic_add, NULL,
+ root->device->handle, (void **)&retval);
+
+ return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
+}
+
+int acpi_ioapic_remove(struct acpi_pci_root *root)
+{
+ int retval = 0;
+ struct acpi_pci_ioapic *ioapic, *tmp;
+
+ mutex_lock(&ioapic_list_lock);
+ list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
+ if (root->device->handle != ioapic->root_handle)
+ continue;
+
+ if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
+ retval = -EBUSY;
+
+ if (ioapic->pdev) {
+ pci_release_region(ioapic->pdev, 0);
+ pci_disable_device(ioapic->pdev);
+ pci_dev_put(ioapic->pdev);
+ } else if (ioapic->res.flags && ioapic->res.parent) {
+ release_resource(&ioapic->res);
+ }
+ list_del(&ioapic->list);
+ kfree(ioapic);
+ }
+ mutex_unlock(&ioapic_list_lock);
+
+ return retval;
+}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 24b5476449a1..1333cbdc3ea2 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -177,12 +177,7 @@ static int __init slit_valid(struct acpi_table_slit *slit)
static int __init acpi_parse_slit(struct acpi_table_header *table)
{
- struct acpi_table_slit *slit;
-
- if (!table)
- return -EINVAL;
-
- slit = (struct acpi_table_slit *)table;
+ struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
if (!slit_valid(slit)) {
printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
@@ -260,11 +255,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
- struct acpi_table_srat *srat;
- if (!table)
- return -EINVAL;
+ struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
- srat = (struct acpi_table_srat *)table;
acpi_srat_revision = srat->header.revision;
/* Real work done in acpi_table_parse_srat below. */
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 5277a0ee5704..e7f718d6918a 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -485,14 +485,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
- /* Keep IOAPIC pin configuration when suspending */
- if (dev->dev.power.is_prepared)
- return;
-#ifdef CONFIG_PM
- if (dev->dev.power.runtime_status == RPM_SUSPENDING)
- return;
-#endif
-
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
@@ -512,7 +504,7 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- dev->irq = 0;
dev->irq_managed = 0;
+ dev->irq = 0;
}
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index c6bcb8c719d8..68a5f712cd19 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -112,10 +112,10 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
if (ACPI_FAILURE(status))
return AE_OK;
- if ((address.address_length > 0) &&
+ if ((address.address.address_length > 0) &&
(address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
- res->start = address.minimum;
- res->end = address.minimum + address.address_length - 1;
+ res->start = address.address.minimum;
+ res->end = address.address.minimum + address.address.address_length - 1;
}
return AE_OK;
@@ -621,6 +621,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (hotadd) {
pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus);
+ acpi_ioapic_add(root);
}
pci_lock_rescan_remove();
@@ -644,6 +645,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_stop_root_bus(root->bus);
+ WARN_ON(acpi_ioapic_remove(root));
+
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 02e48394276c..7962651cdbd4 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -4,6 +4,10 @@
*
* Alex Chiang <achiang@hp.com>
* - Unified x86/ia64 implementations
+ *
+ * I/O APIC hotplug support
+ * Yinghai Lu <yinghai@kernel.org>
+ * Jiang Liu <jiang.liu@intel.com>
*/
#include <linux/export.h>
#include <linux/acpi.h>
@@ -12,6 +16,21 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_core");
+static struct acpi_table_madt *get_madt_table(void)
+{
+ static struct acpi_table_madt *madt;
+ static int read_madt;
+
+ if (!read_madt) {
+ if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt)))
+ madt = NULL;
+ read_madt++;
+ }
+
+ return madt;
+}
+
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
@@ -67,17 +86,10 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
static int map_madt_entry(int type, u32 acpi_id)
{
unsigned long madt_end, entry;
- static struct acpi_table_madt *madt;
- static int read_madt;
int phys_id = -1; /* CPU hardware ID */
+ struct acpi_table_madt *madt;
- if (!read_madt) {
- if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
- (struct acpi_table_header **)&madt)))
- madt = NULL;
- read_madt++;
- }
-
+ madt = get_madt_table();
if (!madt)
return phys_id;
@@ -203,3 +215,96 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
return acpi_map_cpuid(phys_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
+
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
+ u64 *phys_addr, int *ioapic_id)
+{
+ struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry;
+
+ if (ioapic->global_irq_base != gsi_base)
+ return 0;
+
+ *phys_addr = ioapic->address;
+ *ioapic_id = ioapic->id;
+ return 1;
+}
+
+static int parse_madt_ioapic_entry(u32 gsi_base, u64 *phys_addr)
+{
+ struct acpi_subtable_header *hdr;
+ unsigned long madt_end, entry;
+ struct acpi_table_madt *madt;
+ int apic_id = -1;
+
+ madt = get_madt_table();
+ if (!madt)
+ return apic_id;
+
+ entry = (unsigned long)madt;
+ madt_end = entry + madt->header.length;
+
+ /* Parse all entries looking for a match. */
+ entry += sizeof(struct acpi_table_madt);
+ while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
+ hdr = (struct acpi_subtable_header *)entry;
+ if (hdr->type == ACPI_MADT_TYPE_IO_APIC &&
+ get_ioapic_id(hdr, gsi_base, phys_addr, &apic_id))
+ break;
+ else
+ entry += hdr->length;
+ }
+
+ return apic_id;
+}
+
+static int parse_mat_ioapic_entry(acpi_handle handle, u32 gsi_base,
+ u64 *phys_addr)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_subtable_header *header;
+ union acpi_object *obj;
+ int apic_id = -1;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
+ goto exit;
+
+ if (!buffer.length || !buffer.pointer)
+ goto exit;
+
+ obj = buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER ||
+ obj->buffer.length < sizeof(struct acpi_subtable_header))
+ goto exit;
+
+ header = (struct acpi_subtable_header *)obj->buffer.pointer;
+ if (header->type == ACPI_MADT_TYPE_IO_APIC)
+ get_ioapic_id(header, gsi_base, phys_addr, &apic_id);
+
+exit:
+ kfree(buffer.pointer);
+ return apic_id;
+}
+
+/**
+ * acpi_get_ioapic_id - Get IOAPIC ID and physical address matching @gsi_base
+ * @handle: ACPI object for IOAPIC device
+ * @gsi_base: GSI base to match with
+ * @phys_addr: Pointer to store physical address of matching IOAPIC record
+ *
+ * Walk resources returned by ACPI_MAT method, then ACPI MADT table, to search
+ * for an ACPI IOAPIC record matching @gsi_base.
+ * Return IOAPIC id and store physical address in @phys_addr if found a match,
+ * otherwise return <0.
+ */
+int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr)
+{
+ int apic_id;
+
+ apic_id = parse_mat_ioapic_entry(handle, gsi_base, phys_addr);
+ if (apic_id == -1)
+ apic_id = parse_madt_ioapic_entry(gsi_base, phys_addr);
+
+ return apic_id;
+}
+#endif /* CONFIG_ACPI_HOTPLUG_IOAPIC */
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 87b704e41877..c256bd7fbd78 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -681,15 +681,13 @@ static int acpi_idle_bm_check(void)
}
/**
- * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
+ * acpi_idle_do_entry - enter idle state using the appropriate method
* @cx: cstate data
*
* Caller disables interrupt before call and enables interrupt after return.
*/
-static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
+static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
- /* Don't trace irqs off for idle */
- stop_critical_timings();
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
@@ -703,38 +701,9 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
gets asserted in time to freeze execution properly. */
inl(acpi_gbl_FADT.xpm_timer_block.address);
}
- start_critical_timings();
}
/**
- * acpi_idle_enter_c1 - enters an ACPI C1 state-type
- * @dev: the target CPU
- * @drv: cpuidle driver containing cpuidle state info
- * @index: index of target state
- *
- * This is equivalent to the HALT instruction.
- */
-static int acpi_idle_enter_c1(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- struct acpi_processor *pr;
- struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
- pr = __this_cpu_read(processors);
-
- if (unlikely(!pr))
- return -EINVAL;
-
- lapic_timer_state_broadcast(pr, cx, 1);
- acpi_idle_do_entry(cx);
-
- lapic_timer_state_broadcast(pr, cx, 0);
-
- return index;
-}
-
-
-/**
* acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
* @dev: the target CPU
* @index: the index of suggested state
@@ -761,47 +730,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
return 0;
}
-/**
- * acpi_idle_enter_simple - enters an ACPI state without BM handling
- * @dev: the target CPU
- * @drv: cpuidle driver with cpuidle state information
- * @index: the index of suggested state
- */
-static int acpi_idle_enter_simple(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
{
- struct acpi_processor *pr;
- struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
- pr = __this_cpu_read(processors);
-
- if (unlikely(!pr))
- return -EINVAL;
-
-#ifdef CONFIG_HOTPLUG_CPU
- if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst &&
- !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
- return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
-#endif
-
- /*
- * Must be done before busmaster disable as we might need to
- * access HPET !
- */
- lapic_timer_state_broadcast(pr, cx, 1);
-
- if (cx->type == ACPI_STATE_C3)
- ACPI_FLUSH_CPU_CACHE();
-
- /* Tell the scheduler that we are going deep-idle: */
- sched_clock_idle_sleep_event();
- acpi_idle_do_entry(cx);
-
- sched_clock_idle_wakeup_event(0);
-
- lapic_timer_state_broadcast(pr, cx, 0);
- return index;
+ return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) &&
+ !pr->flags.has_cst;
}
static int c3_cpu_count;
@@ -809,44 +742,14 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
- * @dev: the target CPU
- * @drv: cpuidle driver containing state data
- * @index: the index of suggested state
- *
- * If BM is detected, the deepest non-C3 idle state is entered instead.
+ * @pr: Target processor
+ * @cx: Target state context
*/
-static int acpi_idle_enter_bm(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static void acpi_idle_enter_bm(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx)
{
- struct acpi_processor *pr;
- struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
- pr = __this_cpu_read(processors);
-
- if (unlikely(!pr))
- return -EINVAL;
-
-#ifdef CONFIG_HOTPLUG_CPU
- if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst &&
- !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
- return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
-#endif
-
- if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
- if (drv->safe_state_index >= 0) {
- return drv->states[drv->safe_state_index].enter(dev,
- drv, drv->safe_state_index);
- } else {
- acpi_safe_halt();
- return -EBUSY;
- }
- }
-
acpi_unlazy_tlb(smp_processor_id());
- /* Tell the scheduler that we are going deep-idle: */
- sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
@@ -856,37 +759,71 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
/*
* disable bus master
* bm_check implies we need ARB_DIS
- * !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
- if (pr->flags.bm_check && pr->flags.bm_control) {
+ if (pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
raw_spin_unlock(&c3_lock);
- } else if (!pr->flags.bm_check) {
- ACPI_FLUSH_CPU_CACHE();
}
acpi_idle_do_entry(cx);
/* Re-enable bus master arbitration */
- if (pr->flags.bm_check && pr->flags.bm_control) {
+ if (pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
- sched_clock_idle_wakeup_event(0);
+ lapic_timer_state_broadcast(pr, cx, 0);
+}
+
+static int acpi_idle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+ struct acpi_processor *pr;
+
+ pr = __this_cpu_read(processors);
+ if (unlikely(!pr))
+ return -EINVAL;
+
+ if (cx->type != ACPI_STATE_C1) {
+ if (acpi_idle_fallback_to_c1(pr)) {
+ index = CPUIDLE_DRIVER_STATE_START;
+ cx = per_cpu(acpi_cstate[index], dev->cpu);
+ } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
+ if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
+ acpi_idle_enter_bm(pr, cx);
+ return index;
+ } else if (drv->safe_state_index >= 0) {
+ index = drv->safe_state_index;
+ cx = per_cpu(acpi_cstate[index], dev->cpu);
+ } else {
+ acpi_safe_halt();
+ return -EBUSY;
+ }
+ }
+ }
+
+ lapic_timer_state_broadcast(pr, cx, 1);
+
+ if (cx->type == ACPI_STATE_C3)
+ ACPI_FLUSH_CPU_CACHE();
+
+ acpi_idle_do_entry(cx);
lapic_timer_state_broadcast(pr, cx, 0);
+
return index;
}
@@ -981,27 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
+ state->enter = acpi_idle_enter;
state->flags = 0;
- switch (cx->type) {
- case ACPI_STATE_C1:
-
- state->enter = acpi_idle_enter_c1;
- state->enter_dead = acpi_idle_play_dead;
- drv->safe_state_index = count;
- break;
-
- case ACPI_STATE_C2:
- state->enter = acpi_idle_enter_simple;
+ if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
- break;
-
- case ACPI_STATE_C3:
- state->enter = pr->flags.bm_check ?
- acpi_idle_enter_bm :
- acpi_idle_enter_simple;
- break;
}
count++;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 782a0d15c25f..4752b9939987 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -34,21 +34,34 @@
#define valid_IRQ(i) (true)
#endif
-static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect,
- bool window)
+static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
{
- unsigned long flags = IORESOURCE_MEM;
+ u64 reslen = end - start + 1;
- if (len == 0)
- flags |= IORESOURCE_DISABLED;
+ /*
+ * CHECKME: len might be required to check versus a minimum
+ * length as well. 1 for io is fine, but for memory it does
+ * not make any sense at all.
+ */
+ if (len && reslen && reslen == len && start <= end)
+ return true;
- if (write_protect == ACPI_READ_WRITE_MEMORY)
- flags |= IORESOURCE_MEM_WRITEABLE;
+ pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
+ io ? "io" : "mem", start, end, len);
+
+ return false;
+}
+
+static void acpi_dev_memresource_flags(struct resource *res, u64 len,
+ u8 write_protect)
+{
+ res->flags = IORESOURCE_MEM;
- if (window)
- flags |= IORESOURCE_WINDOW;
+ if (!acpi_dev_resource_len_valid(res->start, res->end, len, false))
+ res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- return flags;
+ if (write_protect == ACPI_READ_WRITE_MEMORY)
+ res->flags |= IORESOURCE_MEM_WRITEABLE;
}
static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
@@ -56,7 +69,7 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
{
res->start = start;
res->end = start + len - 1;
- res->flags = acpi_dev_memresource_flags(len, write_protect, false);
+ acpi_dev_memresource_flags(res, len, write_protect);
}
/**
@@ -67,6 +80,11 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
* Check if the given ACPI resource object represents a memory resource and
* if that's the case, use the information in it to populate the generic
* resource object pointed to by @res.
+ *
+ * Return:
+ * 1) false with res->flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
+ * 3) true: valid assigned resource
*/
bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
{
@@ -77,60 +95,52 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
- if (!memory24->minimum && !memory24->address_length)
- return false;
- acpi_dev_get_memresource(res, memory24->minimum,
- memory24->address_length,
+ acpi_dev_get_memresource(res, memory24->minimum << 8,
+ memory24->address_length << 8,
memory24->write_protect);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
- if (!memory32->minimum && !memory32->address_length)
- return false;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
memory32->write_protect);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
- if (!fixed_memory32->address && !fixed_memory32->address_length)
- return false;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
fixed_memory32->write_protect);
break;
default:
+ res->flags = 0;
return false;
}
- return true;
+
+ return !(res->flags & IORESOURCE_DISABLED);
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_memory);
-static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode,
- bool window)
+static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
+ u8 io_decode)
{
- int flags = IORESOURCE_IO;
+ res->flags = IORESOURCE_IO;
- if (io_decode == ACPI_DECODE_16)
- flags |= IORESOURCE_IO_16BIT_ADDR;
+ if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
+ res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- if (start > end || end >= 0x10003)
- flags |= IORESOURCE_DISABLED;
+ if (res->end >= 0x10003)
+ res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- if (window)
- flags |= IORESOURCE_WINDOW;
-
- return flags;
+ if (io_decode == ACPI_DECODE_16)
+ res->flags |= IORESOURCE_IO_16BIT_ADDR;
}
static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
u8 io_decode)
{
- u64 end = start + len - 1;
-
res->start = start;
- res->end = end;
- res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false);
+ res->end = start + len - 1;
+ acpi_dev_ioresource_flags(res, len, io_decode);
}
/**
@@ -141,6 +151,11 @@ static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
* Check if the given ACPI resource object represents an I/O resource and
* if that's the case, use the information in it to populate the generic
* resource object pointed to by @res.
+ *
+ * Return:
+ * 1) false with res->flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
+ * 3) true: valid assigned resource
*/
bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
{
@@ -150,135 +165,143 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
- if (!io->minimum && !io->address_length)
- return false;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
io->io_decode);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
- if (!fixed_io->address && !fixed_io->address_length)
- return false;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
ACPI_DECODE_10);
break;
default:
+ res->flags = 0;
return false;
}
- return true;
+
+ return !(res->flags & IORESOURCE_DISABLED);
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_io);
-/**
- * acpi_dev_resource_address_space - Extract ACPI address space information.
- * @ares: Input ACPI resource object.
- * @res: Output generic resource object.
- *
- * Check if the given ACPI resource object represents an address space resource
- * and if that's the case, use the information in it to populate the generic
- * resource object pointed to by @res.
- */
-bool acpi_dev_resource_address_space(struct acpi_resource *ares,
- struct resource *res)
+static bool acpi_decode_space(struct resource_win *win,
+ struct acpi_resource_address *addr,
+ struct acpi_address64_attribute *attr)
{
- acpi_status status;
- struct acpi_resource_address64 addr;
- bool window;
- u64 len;
- u8 io_decode;
+ u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
+ bool wp = addr->info.mem.write_protect;
+ u64 len = attr->address_length;
+ struct resource *res = &win->res;
- switch (ares->type) {
- case ACPI_RESOURCE_TYPE_ADDRESS16:
- case ACPI_RESOURCE_TYPE_ADDRESS32:
- case ACPI_RESOURCE_TYPE_ADDRESS64:
- break;
- default:
- return false;
- }
+ /*
+ * Filter out invalid descriptor according to ACPI Spec 5.0, section
+ * 6.4.3.5 Address Space Resource Descriptors.
+ */
+ if ((addr->min_address_fixed != addr->max_address_fixed && len) ||
+ (addr->min_address_fixed && addr->max_address_fixed && !len))
+ pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
+ addr->min_address_fixed, addr->max_address_fixed, len);
- status = acpi_resource_to_address64(ares, &addr);
- if (ACPI_FAILURE(status))
- return false;
+ res->start = attr->minimum;
+ res->end = attr->maximum;
- res->start = addr.minimum;
- res->end = addr.maximum;
- window = addr.producer_consumer == ACPI_PRODUCER;
+ /*
+ * For bridges that translate addresses across the bridge,
+ * translation_offset is the offset that must be added to the
+ * address on the secondary side to obtain the address on the
+ * primary side. Non-bridge devices must list 0 for all Address
+ * Translation offset bits.
+ */
+ if (addr->producer_consumer == ACPI_PRODUCER) {
+ res->start += attr->translation_offset;
+ res->end += attr->translation_offset;
+ } else if (attr->translation_offset) {
+ pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
+ attr->translation_offset);
+ }
- switch(addr.resource_type) {
+ switch (addr->resource_type) {
case ACPI_MEMORY_RANGE:
- len = addr.maximum - addr.minimum + 1;
- res->flags = acpi_dev_memresource_flags(len,
- addr.info.mem.write_protect,
- window);
+ acpi_dev_memresource_flags(res, len, wp);
break;
case ACPI_IO_RANGE:
- io_decode = addr.granularity == 0xfff ?
- ACPI_DECODE_10 : ACPI_DECODE_16;
- res->flags = acpi_dev_ioresource_flags(addr.minimum,
- addr.maximum,
- io_decode, window);
+ acpi_dev_ioresource_flags(res, len, iodec);
break;
case ACPI_BUS_NUMBER_RANGE:
res->flags = IORESOURCE_BUS;
break;
default:
- res->flags = 0;
+ return false;
}
- return true;
+ win->offset = attr->translation_offset;
+
+ if (addr->producer_consumer == ACPI_PRODUCER)
+ res->flags |= IORESOURCE_WINDOW;
+
+ if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+ res->flags |= IORESOURCE_PREFETCH;
+
+ return !(res->flags & IORESOURCE_DISABLED);
+}
+
+/**
+ * acpi_dev_resource_address_space - Extract ACPI address space information.
+ * @ares: Input ACPI resource object.
+ * @win: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an address space resource
+ * and if that's the case, use the information in it to populate the generic
+ * resource object pointed to by @win.
+ *
+ * Return:
+ * 1) false with win->res.flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
+ * resource
+ * 3) true: valid assigned resource
+ */
+bool acpi_dev_resource_address_space(struct acpi_resource *ares,
+ struct resource_win *win)
+{
+ struct acpi_resource_address64 addr;
+
+ win->res.flags = 0;
+ if (ACPI_FAILURE(acpi_resource_to_address64(ares, &addr)))
+ return false;
+
+ return acpi_decode_space(win, (struct acpi_resource_address *)&addr,
+ &addr.address);
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space);
/**
* acpi_dev_resource_ext_address_space - Extract ACPI address space information.
* @ares: Input ACPI resource object.
- * @res: Output generic resource object.
+ * @win: Output generic resource object.
*
* Check if the given ACPI resource object represents an extended address space
* resource and if that's the case, use the information in it to populate the
- * generic resource object pointed to by @res.
+ * generic resource object pointed to by @win.
+ *
+ * Return:
+ * 1) false with win->res.flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
+ * resource
+ * 3) true: valid assigned resource
*/
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
- struct resource *res)
+ struct resource_win *win)
{
struct acpi_resource_extended_address64 *ext_addr;
- bool window;
- u64 len;
- u8 io_decode;
+ win->res.flags = 0;
if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64)
return false;
ext_addr = &ares->data.ext_address64;
- res->start = ext_addr->minimum;
- res->end = ext_addr->maximum;
- window = ext_addr->producer_consumer == ACPI_PRODUCER;
-
- switch(ext_addr->resource_type) {
- case ACPI_MEMORY_RANGE:
- len = ext_addr->maximum - ext_addr->minimum + 1;
- res->flags = acpi_dev_memresource_flags(len,
- ext_addr->info.mem.write_protect,
- window);
- break;
- case ACPI_IO_RANGE:
- io_decode = ext_addr->granularity == 0xfff ?
- ACPI_DECODE_10 : ACPI_DECODE_16;
- res->flags = acpi_dev_ioresource_flags(ext_addr->minimum,
- ext_addr->maximum,
- io_decode, window);
- break;
- case ACPI_BUS_NUMBER_RANGE:
- res->flags = IORESOURCE_BUS;
- break;
- default:
- res->flags = 0;
- }
-
- return true;
+ return acpi_decode_space(win, (struct acpi_resource_address *)ext_addr,
+ &ext_addr->address);
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
@@ -310,7 +333,7 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
{
res->start = gsi;
res->end = gsi;
- res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED;
+ res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
}
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
@@ -369,6 +392,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
* represented by the resource and populate the generic resource object pointed
* to by @res accordingly. If the registration of the GSI is not successful,
* IORESOURCE_DISABLED will be set it that object's flags.
+ *
+ * Return:
+ * 1) false with res->flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
+ * 3) true: valid assigned resource
*/
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res)
@@ -402,6 +430,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
ext_irq->sharable, false);
break;
default:
+ res->flags = 0;
return false;
}
@@ -415,12 +444,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt);
*/
void acpi_dev_free_resource_list(struct list_head *list)
{
- struct resource_list_entry *rentry, *re;
-
- list_for_each_entry_safe(rentry, re, list, node) {
- list_del(&rentry->node);
- kfree(rentry);
- }
+ resource_list_free(list);
}
EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list);
@@ -432,18 +456,19 @@ struct res_proc_context {
int error;
};
-static acpi_status acpi_dev_new_resource_entry(struct resource *r,
+static acpi_status acpi_dev_new_resource_entry(struct resource_win *win,
struct res_proc_context *c)
{
- struct resource_list_entry *rentry;
+ struct resource_entry *rentry;
- rentry = kmalloc(sizeof(*rentry), GFP_KERNEL);
+ rentry = resource_list_create_entry(NULL, 0);
if (!rentry) {
c->error = -ENOMEM;
return AE_NO_MEMORY;
}
- rentry->res = *r;
- list_add_tail(&rentry->node, c->list);
+ *rentry->res = win->res;
+ rentry->offset = win->offset;
+ resource_list_add_tail(rentry, c->list);
c->count++;
return AE_OK;
}
@@ -452,7 +477,8 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
void *context)
{
struct res_proc_context *c = context;
- struct resource r;
+ struct resource_win win;
+ struct resource *res = &win.res;
int i;
if (c->preproc) {
@@ -467,18 +493,18 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
}
}
- memset(&r, 0, sizeof(r));
+ memset(&win, 0, sizeof(win));
- if (acpi_dev_resource_memory(ares, &r)
- || acpi_dev_resource_io(ares, &r)
- || acpi_dev_resource_address_space(ares, &r)
- || acpi_dev_resource_ext_address_space(ares, &r))
- return acpi_dev_new_resource_entry(&r, c);
+ if (acpi_dev_resource_memory(ares, res)
+ || acpi_dev_resource_io(ares, res)
+ || acpi_dev_resource_address_space(ares, &win)
+ || acpi_dev_resource_ext_address_space(ares, &win))
+ return acpi_dev_new_resource_entry(&win, c);
- for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) {
+ for (i = 0; acpi_dev_resource_interrupt(ares, i, res); i++) {
acpi_status status;
- status = acpi_dev_new_resource_entry(&r, c);
+ status = acpi_dev_new_resource_entry(&win, c);
if (ACPI_FAILURE(status))
return status;
}
@@ -503,7 +529,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
* returned as the final error code.
*
* The resultant struct resource objects are put on the list pointed to by
- * @list, that must be empty initially, as members of struct resource_list_entry
+ * @list, that must be empty initially, as members of struct resource_entry
* objects. Callers of this routine should use %acpi_dev_free_resource_list() to
* free that list.
*
@@ -538,3 +564,58 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
return c.count;
}
EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
+
+/**
+ * acpi_dev_filter_resource_type - Filter ACPI resource according to resource
+ * types
+ * @ares: Input ACPI resource object.
+ * @types: Valid resource types of IORESOURCE_XXX
+ *
+ * This is a hepler function to support acpi_dev_get_resources(), which filters
+ * ACPI resource objects according to resource types.
+ */
+int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+ unsigned long types)
+{
+ unsigned long type = 0;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ type = IORESOURCE_MEM;
+ break;
+ case ACPI_RESOURCE_TYPE_IO:
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ type = IORESOURCE_IO;
+ break;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ type = IORESOURCE_IRQ;
+ break;
+ case ACPI_RESOURCE_TYPE_DMA:
+ case ACPI_RESOURCE_TYPE_FIXED_DMA:
+ type = IORESOURCE_DMA;
+ break;
+ case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
+ type = IORESOURCE_REG;
+ break;
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+ if (ares->data.address.resource_type == ACPI_MEMORY_RANGE)
+ type = IORESOURCE_MEM;
+ else if (ares->data.address.resource_type == ACPI_IO_RANGE)
+ type = IORESOURCE_IO;
+ else if (ares->data.address.resource_type ==
+ ACPI_BUS_NUMBER_RANGE)
+ type = IORESOURCE_BUS;
+ break;
+ default:
+ break;
+ }
+
+ return (type & types) ? 0 : 1;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index dc4d8960684a..bbca7830e18a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2544,6 +2544,7 @@ int __init acpi_scan_init(void)
acpi_pci_link_init();
acpi_processor_init();
acpi_lpss_init();
+ acpi_apd_init();
acpi_cmos_rtc_init();
acpi_container_init();
acpi_memory_hotplug_init();
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8aa9254a387f..7f251dd1a687 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -321,7 +321,7 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
{},
};
-static void acpi_sleep_dmi_check(void)
+static void __init acpi_sleep_dmi_check(void)
{
int year;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 032db459370f..88a4f99dd2a7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -522,6 +522,24 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
},
},
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "3570R/370R/470R/450R/510R/4450RV"),
+ },
+ },
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 730U3E/740U3E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
+ },
+ },
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 52ddd9fbb55e..f0099360039e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -18,6 +18,7 @@
#include <linux/pm_domain.h>
#include <linux/amba/bus.h>
#include <linux/sizes.h>
+#include <linux/limits.h>
#include <asm/irq.h>
@@ -43,6 +44,10 @@ static int amba_match(struct device *dev, struct device_driver *drv)
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(drv);
+ /* When driver_override is set, only bind to the matching driver */
+ if (pcdev->driver_override)
+ return !strcmp(pcdev->driver_override, drv->name);
+
return amba_lookup(pcdrv->id_table, pcdev) != NULL;
}
@@ -59,6 +64,47 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
+static ssize_t driver_override_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amba_device *dev = to_amba_device(_dev);
+
+ if (!dev->driver_override)
+ return 0;
+
+ return sprintf(buf, "%s\n", dev->driver_override);
+}
+
+static ssize_t driver_override_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct amba_device *dev = to_amba_device(_dev);
+ char *driver_override, *old = dev->driver_override, *cp;
+
+ if (count > PATH_MAX)
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(driver_override)) {
+ dev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ dev->driver_override = NULL;
+ }
+
+ kfree(old);
+
+ return count;
+}
+
#define amba_attr_func(name,fmt,arg...) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
@@ -81,6 +127,7 @@ amba_attr_func(resource, "\t%016llx\t%016llx\t%016lx\n",
static struct device_attribute amba_dev_attrs[] = {
__ATTR_RO(id),
__ATTR_RO(resource),
+ __ATTR_RW(driver_override),
__ATTR_NULL,
};
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8c43521d3f11..33b09b6568a4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/pid_namespace.h>
+#include <linux/security.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
@@ -1400,6 +1401,11 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
+ if (security_binder_transaction(proc->tsk,
+ target_proc->tsk) < 0) {
+ return_error = BR_FAILED_REPLY;
+ goto err_invalid_target_handle;
+ }
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
@@ -1551,6 +1557,11 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ if (security_binder_transfer_binder(proc->tsk,
+ target_proc->tsk)) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
@@ -1581,6 +1592,11 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
+ if (security_binder_transfer_binder(proc->tsk,
+ target_proc->tsk)) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_failed;
+ }
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
@@ -1638,6 +1654,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
+ if (security_binder_transfer_file(proc->tsk,
+ target_proc->tsk,
+ file) < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ goto err_get_unused_fd_failed;
+ }
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
@@ -2675,6 +2698,9 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
ret = -EBUSY;
goto out;
}
+ ret = security_binder_set_context_mgr(proc->tsk);
+ if (ret < 0)
+ goto out;
if (uid_valid(binder_context_mgr_uid)) {
if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a3a13605a9c4..5f601553b9b0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -835,6 +835,7 @@ config PATA_AT32
config PATA_AT91
tristate "PATA support for AT91SAM9260"
depends on ARM && SOC_AT91SAM9
+ depends on !ARCH_MULTIPLATFORM
help
This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 49f1e6890587..33bb06e006c9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -325,7 +325,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 40f0e34f17af..71262e08648e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -333,7 +333,7 @@ struct ahci_host_priv {
u32 em_msg_type; /* EM message type */
bool got_runtime_pm; /* Did we do pm_runtime_get? */
struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
- struct regulator *target_pwr; /* Optional */
+ struct regulator **target_pwrs; /* Optional */
/*
* If platform uses PHYs. There is a 1:1 relation between the port number and
* the PHY position in this array.
@@ -354,6 +354,10 @@ extern int ahci_ignore_sss;
extern struct device_attribute *ahci_shost_attrs[];
extern struct device_attribute *ahci_sdev_attrs[];
+/*
+ * This must be instantiated by the edge drivers. Read the comments
+ * for ATA_BASE_SHT
+ */
#define AHCI_SHT(drv_name) \
ATA_NCQ_SHT(drv_name), \
.can_queue = AHCI_MAX_CMDS - 1, \
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index ce8a7a6d6c7f..267a3d3e79f4 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -16,6 +16,8 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+#define DRV_NAME "ahci_da850"
+
/* SATA PHY Control Register offset from AHCI base */
#define SATA_P0PHYCR_REG 0x178
@@ -59,6 +61,10 @@ static const struct ata_port_info ahci_da850_port_info = {
.port_ops = &ahci_platform_ops,
};
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
static int ahci_da850_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -85,7 +91,8 @@ static int ahci_da850_probe(struct platform_device *pdev)
da850_sata_init(dev, pwrdn_reg, hpriv->mmio);
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info);
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info,
+ &ahci_platform_sht);
if (rc)
goto disable_resources;
@@ -102,7 +109,7 @@ static struct platform_driver ahci_da850_driver = {
.probe = ahci_da850_probe,
.remove = ata_platform_remove_one,
.driver = {
- .name = "ahci_da850",
+ .name = DRV_NAME,
.pm = &ahci_da850_pm_ops,
},
};
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 35d51c59a370..3f3a7db208ae 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -28,6 +28,8 @@
#include <linux/libata.h>
#include "ahci.h"
+#define DRV_NAME "ahci-imx"
+
enum {
/* Timer 1-ms Register */
IMX_TIMER1MS = 0x00e0,
@@ -221,11 +223,9 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
if (imxpriv->no_device)
return 0;
- if (hpriv->target_pwr) {
- ret = regulator_enable(hpriv->target_pwr);
- if (ret)
- return ret;
- }
+ ret = ahci_platform_enable_regulators(hpriv);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(imxpriv->sata_ref_clk);
if (ret < 0)
@@ -270,8 +270,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
disable_clk:
clk_disable_unprepare(imxpriv->sata_ref_clk);
disable_regulator:
- if (hpriv->target_pwr)
- regulator_disable(hpriv->target_pwr);
+ ahci_platform_disable_regulators(hpriv);
return ret;
}
@@ -291,8 +290,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
clk_disable_unprepare(imxpriv->sata_ref_clk);
- if (hpriv->target_pwr)
- regulator_disable(hpriv->target_pwr);
+ ahci_platform_disable_regulators(hpriv);
}
static void ahci_imx_error_handler(struct ata_port *ap)
@@ -524,6 +522,10 @@ static u32 imx_ahci_parse_props(struct device *dev,
return reg_value;
}
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
static int imx_ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -620,7 +622,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
- ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info);
+ ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
+ &ahci_platform_sht);
if (ret)
goto disable_sata;
@@ -678,7 +681,7 @@ static struct platform_driver imx_ahci_driver = {
.probe = imx_ahci_probe,
.remove = ata_platform_remove_one,
.driver = {
- .name = "ahci-imx",
+ .name = DRV_NAME,
.of_match_table = imx_ahci_of_match,
.pm = &ahci_imx_pm_ops,
},
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 64bb08432b69..23716dd8a7ec 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -19,6 +19,8 @@
#include <linux/platform_device.h>
#include "ahci.h"
+#define DRV_NAME "ahci-mvebu"
+
#define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0
#define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4
@@ -67,6 +69,10 @@ static const struct ata_port_info ahci_mvebu_port_info = {
.port_ops = &ahci_platform_ops,
};
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
static int ahci_mvebu_probe(struct platform_device *pdev)
{
struct ahci_host_priv *hpriv;
@@ -88,7 +94,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
ahci_mvebu_mbus_config(hpriv, dram);
ahci_mvebu_regret_option(hpriv);
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info);
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
+ &ahci_platform_sht);
if (rc)
goto disable_resources;
@@ -114,7 +121,7 @@ static struct platform_driver ahci_mvebu_driver = {
.probe = ahci_mvebu_probe,
.remove = ata_platform_remove_one,
.driver = {
- .name = "ahci-mvebu",
+ .name = DRV_NAME,
.of_match_table = ahci_mvebu_of_match,
},
};
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 18d539837045..78d6ae0b90c4 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -22,6 +22,8 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+#define DRV_NAME "ahci"
+
static const struct ata_port_info ahci_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
@@ -29,6 +31,10 @@ static const struct ata_port_info ahci_port_info = {
.port_ops = &ahci_platform_ops,
};
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
static int ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -46,7 +52,8 @@ static int ahci_probe(struct platform_device *pdev)
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info);
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+ &ahci_platform_sht);
if (rc)
goto disable_resources;
@@ -75,7 +82,7 @@ static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove = ata_platform_remove_one,
.driver = {
- .name = "ahci",
+ .name = DRV_NAME,
.of_match_table = ahci_of_match,
.pm = &ahci_pm_ops,
},
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 2f9e8317cc16..bc971af262e7 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -23,6 +23,8 @@
#include "ahci.h"
+#define DRV_NAME "st_ahci"
+
#define ST_AHCI_OOBR 0xbc
#define ST_AHCI_OOBR_WE BIT(31)
#define ST_AHCI_OOBR_CWMIN_SHIFT 24
@@ -140,6 +142,10 @@ static const struct ata_port_info st_ahci_port_info = {
.port_ops = &st_ahci_port_ops,
};
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
static int st_ahci_probe(struct platform_device *pdev)
{
struct st_ahci_drv_data *drv_data;
@@ -166,7 +172,8 @@ static int st_ahci_probe(struct platform_device *pdev)
if (err)
return err;
- err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info);
+ err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
+ &ahci_platform_sht);
if (err) {
ahci_platform_disable_resources(hpriv);
return err;
@@ -229,7 +236,7 @@ MODULE_DEVICE_TABLE(of, st_ahci_match);
static struct platform_driver st_ahci_driver = {
.driver = {
- .name = "st_ahci",
+ .name = DRV_NAME,
.pm = &st_ahci_pm_ops,
.of_match_table = of_match_ptr(st_ahci_match),
},
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index e2e0da539a2f..b26437430163 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -27,6 +27,8 @@
#include <linux/regulator/consumer.h>
#include "ahci.h"
+#define DRV_NAME "ahci-sunxi"
+
/* Insmod parameters */
static bool enable_pmp;
module_param(enable_pmp, bool, 0);
@@ -169,6 +171,10 @@ static const struct ata_port_info ahci_sunxi_port_info = {
.port_ops = &ahci_platform_ops,
};
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
static int ahci_sunxi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -200,7 +206,8 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
if (!enable_pmp)
hpriv->flags |= AHCI_HFLAG_NO_PMP;
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info);
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info,
+ &ahci_platform_sht);
if (rc)
goto disable_resources;
@@ -251,7 +258,7 @@ static struct platform_driver ahci_sunxi_driver = {
.probe = ahci_sunxi_probe,
.remove = ata_platform_remove_one,
.driver = {
- .name = "ahci-sunxi",
+ .name = DRV_NAME,
.of_match_table = ahci_sunxi_of_match,
.pm = &ahci_sunxi_pm_ops,
},
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 032904402c95..3a62eb246d80 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -31,6 +31,8 @@
#include "ahci.h"
+#define DRV_NAME "tegra-ahci"
+
#define SATA_CONFIGURATION_0 0x180
#define SATA_CONFIGURATION_EN_FPCI BIT(0)
@@ -289,6 +291,10 @@ static const struct of_device_id tegra_ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_ahci_of_match);
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
static int tegra_ahci_probe(struct platform_device *pdev)
{
struct ahci_host_priv *hpriv;
@@ -354,7 +360,8 @@ static int tegra_ahci_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info);
+ ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info,
+ &ahci_platform_sht);
if (ret)
goto deinit_controller;
@@ -370,7 +377,7 @@ static struct platform_driver tegra_ahci_driver = {
.probe = tegra_ahci_probe,
.remove = ata_platform_remove_one,
.driver = {
- .name = "tegra-ahci",
+ .name = DRV_NAME,
.of_match_table = tegra_ahci_of_match,
},
/* LP0 suspend support not implemented */
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index feeb8f1e2fe8..2e8bb603e447 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -30,6 +30,8 @@
#include <linux/phy/phy.h>
#include "ahci.h"
+#define DRV_NAME "xgene-ahci"
+
/* Max # of disk per a controller */
#define MAX_AHCI_CHN_PERCTR 2
@@ -85,6 +87,7 @@ struct xgene_ahci_context {
struct ahci_host_priv *hpriv;
struct device *dev;
u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
+ u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
void __iomem *csr_core; /* Core CSR address of IP */
void __iomem *csr_diag; /* Diag CSR address of IP */
void __iomem *csr_axi; /* AXI CSR address of IP */
@@ -105,17 +108,69 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
}
/**
+ * xgene_ahci_poll_reg_val- Poll a register on a specific value.
+ * @ap : ATA port of interest.
+ * @reg : Register of interest.
+ * @val : Value to be attained.
+ * @interval : waiting interval for polling.
+ * @timeout : timeout for achieving the value.
+ */
+static int xgene_ahci_poll_reg_val(struct ata_port *ap,
+ void __iomem *reg, unsigned
+ int val, unsigned long interval,
+ unsigned long timeout)
+{
+ unsigned long deadline;
+ unsigned int tmp;
+
+ tmp = ioread32(reg);
+ deadline = ata_deadline(jiffies, timeout);
+
+ while (tmp != val && time_before(jiffies, deadline)) {
+ ata_msleep(ap, interval);
+ tmp = ioread32(reg);
+ }
+
+ return tmp;
+}
+
+/**
* xgene_ahci_restart_engine - Restart the dma engine.
* @ap : ATA port of interest
*
- * Restarts the dma engine inside the controller.
+ * Waits for completion of multiple commands and restarts
+ * the DMA engine inside the controller.
*/
static int xgene_ahci_restart_engine(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+
+ /*
+ * In case of PMP multiple IDENTIFY DEVICE commands can be
+ * issued inside PxCI. So need to poll PxCI for the
+ * completion of outstanding IDENTIFY DEVICE commands before
+ * we restart the DMA engine.
+ */
+ if (xgene_ahci_poll_reg_val(ap, port_mmio +
+ PORT_CMD_ISSUE, 0x0, 1, 100))
+ return -EBUSY;
ahci_stop_engine(ap);
ahci_start_fis_rx(ap);
+
+ /*
+ * Enable the PxFBS.FBS_EN bit as it
+ * gets cleared due to stopping the engine.
+ */
+ if (pp->fbs_supported) {
+ fbs = readl(port_mmio + PORT_FBS);
+ writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ }
+
hpriv->start_engine(ap);
return 0;
@@ -129,6 +184,13 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
* clear the BSY bit after receiving the PIO setup FIS. This results in the dma
* state machine goes into the CMFatalErrorUpdate state and locks up. By
* restarting the dma engine, it removes the controller out of lock up state.
+ *
+ * Due to H/W errata, the controller is unable to save the PMP
+ * field fetched from command header before sending the H2D FIS.
+ * When the device returns the PMP port field in the D2H FIS, there is
+ * a mismatch and results in command completion failure. The
+ * workaround is to write the pmp value to PxFBS.DEV field before issuing
+ * any command to PMP.
*/
static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
{
@@ -136,8 +198,23 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
struct ahci_host_priv *hpriv = ap->host->private_data;
struct xgene_ahci_context *ctx = hpriv->plat_data;
int rc = 0;
+ u32 port_fbs;
+ void *port_mmio = ahci_port_base(ap);
- if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+ /*
+ * Write the pmp value to PxFBS.DEV
+ * for case of Port Mulitplier.
+ */
+ if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
+ port_fbs = readl(port_mmio + PORT_FBS);
+ port_fbs &= ~PORT_FBS_DEV_MASK;
+ port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
+ writel(port_fbs, port_mmio + PORT_FBS);
+ }
+
+ if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
+ (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
+ (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
xgene_ahci_restart_engine(ap);
rc = ahci_qc_issue(qc);
@@ -188,7 +265,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
*
* Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
*/
- id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
+ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
@@ -363,16 +440,119 @@ static void xgene_ahci_host_stop(struct ata_host *host)
ahci_platform_disable_resources(hpriv);
}
+/**
+ * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
+ * to Port Multiplier.
+ * @link: link to reset
+ * @class: Return value to indicate class of device
+ * @deadline: deadline jiffies for the operation
+ *
+ * Due to H/W errata, the controller is unable to save the PMP
+ * field fetched from command header before sending the H2D FIS.
+ * When the device returns the PMP port field in the D2H FIS, there is
+ * a mismatch and results in command completion failure. The workaround
+ * is to write the pmp value to PxFBS.DEV field before issuing any command
+ * to PMP.
+ */
+static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int pmp = sata_srst_pmp(link);
+ struct ata_port *ap = link->ap;
+ u32 rc;
+ void *port_mmio = ahci_port_base(ap);
+ u32 port_fbs;
+
+ /*
+ * Set PxFBS.DEV field with pmp
+ * value.
+ */
+ port_fbs = readl(port_mmio + PORT_FBS);
+ port_fbs &= ~PORT_FBS_DEV_MASK;
+ port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
+ writel(port_fbs, port_mmio + PORT_FBS);
+
+ rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+
+ return rc;
+}
+
+/**
+ * xgene_ahci_softreset - Issue the softreset to the drive.
+ * @link: link to reset
+ * @class: Return value to indicate class of device
+ * @deadline: deadline jiffies for the operation
+ *
+ * Due to H/W errata, the controller is unable to save the PMP
+ * field fetched from command header before sending the H2D FIS.
+ * When the device returns the PMP port field in the D2H FIS, there is
+ * a mismatch and results in command completion failure. The workaround
+ * is to write the pmp value to PxFBS.DEV field before issuing any command
+ * to PMP. Here is the algorithm to detect PMP :
+ *
+ * 1. Save the PxFBS value
+ * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
+ * 0xF for both PMP/NON-PMP initially
+ * 3. Issue softreset
+ * 4. If signature class is PMP goto 6
+ * 5. restore the original PxFBS and goto 3
+ * 6. return
+ */
+static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int pmp = sata_srst_pmp(link);
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct xgene_ahci_context *ctx = hpriv->plat_data;
+ void *port_mmio = ahci_port_base(ap);
+ u32 port_fbs;
+ u32 port_fbs_save;
+ u32 retry = 1;
+ u32 rc;
+
+ port_fbs_save = readl(port_mmio + PORT_FBS);
+
+ /*
+ * Set PxFBS.DEV field with pmp
+ * value.
+ */
+ port_fbs = readl(port_mmio + PORT_FBS);
+ port_fbs &= ~PORT_FBS_DEV_MASK;
+ port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
+ writel(port_fbs, port_mmio + PORT_FBS);
+
+softreset_retry:
+ rc = ahci_do_softreset(link, class, pmp,
+ deadline, ahci_check_ready);
+
+ ctx->class[ap->port_no] = *class;
+ if (*class != ATA_DEV_PMP) {
+ /*
+ * Retry for normal drives without
+ * setting PxFBS.DEV field with pmp value.
+ */
+ if (retry--) {
+ writel(port_fbs_save, port_mmio + PORT_FBS);
+ goto softreset_retry;
+ }
+ }
+
+ return rc;
+}
+
static struct ata_port_operations xgene_ahci_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
.qc_issue = xgene_ahci_qc_issue,
+ .softreset = xgene_ahci_softreset,
+ .pmp_softreset = xgene_ahci_pmp_softreset
};
static const struct ata_port_info xgene_ahci_port_info = {
- .flags = AHCI_FLAG_COMMON,
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &xgene_ahci_ops,
@@ -444,6 +624,10 @@ static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
}
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
static int xgene_ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -521,7 +705,8 @@ static int xgene_ahci_probe(struct platform_device *pdev)
skip_clk_phy:
hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
- rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
+ rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info,
+ &ahci_platform_sht);
if (rc)
goto disable_resources;
@@ -543,7 +728,7 @@ static struct platform_driver xgene_ahci_driver = {
.probe = xgene_ahci_probe,
.remove = ata_platform_remove_one,
.driver = {
- .name = "xgene-ahci",
+ .name = DRV_NAME,
.of_match_table = xgene_ahci_of_match,
},
};
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 97683e45ab04..61a9c07e0dff 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2003,7 +2003,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
devslp = readl(port_mmio + PORT_DEVSLP);
if (!(devslp & PORT_DEVSLP_DSP)) {
- dev_err(ap->host->dev, "port does not support device sleep\n");
+ dev_info(ap->host->dev, "port does not support device sleep\n");
return;
}
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 0b03f9056692..d89305d289f6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -24,6 +24,7 @@
#include <linux/ahci_platform.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/of_platform.h>
#include "ahci.h"
static void ahci_host_stop(struct ata_host *host);
@@ -34,10 +35,6 @@ struct ata_port_operations ahci_platform_ops = {
};
EXPORT_SYMBOL_GPL(ahci_platform_ops);
-static struct scsi_host_template ahci_platform_sht = {
- AHCI_SHT("ahci_platform"),
-};
-
/**
* ahci_platform_enable_phys - Enable PHYs
* @hpriv: host private area to store config values
@@ -54,9 +51,6 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
int rc, i;
for (i = 0; i < hpriv->nports; i++) {
- if (!hpriv->phys[i])
- continue;
-
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -89,9 +83,6 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
int i;
for (i = 0; i < hpriv->nports; i++) {
- if (!hpriv->phys[i])
- continue;
-
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -144,6 +135,59 @@ void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
/**
+ * ahci_platform_enable_regulators - Enable regulators
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the regulators found in
+ * hpriv->target_pwrs, if any. If a regulator fails to be enabled, it
+ * disables all the regulators already enabled in reverse order and
+ * returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
+{
+ int rc, i;
+
+ for (i = 0; i < hpriv->nports; i++) {
+ if (!hpriv->target_pwrs[i])
+ continue;
+
+ rc = regulator_enable(hpriv->target_pwrs[i]);
+ if (rc)
+ goto disable_target_pwrs;
+ }
+
+ return 0;
+
+disable_target_pwrs:
+ while (--i >= 0)
+ if (hpriv->target_pwrs[i])
+ regulator_disable(hpriv->target_pwrs[i]);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
+
+/**
+ * ahci_platform_disable_regulators - Disable regulators
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all regulators found in hpriv->target_pwrs.
+ */
+void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
+{
+ int i;
+
+ for (i = 0; i < hpriv->nports; i++) {
+ if (!hpriv->target_pwrs[i])
+ continue;
+ regulator_disable(hpriv->target_pwrs[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
+/**
* ahci_platform_enable_resources - Enable platform resources
* @hpriv: host private area to store config values
*
@@ -163,11 +207,9 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
{
int rc;
- if (hpriv->target_pwr) {
- rc = regulator_enable(hpriv->target_pwr);
- if (rc)
- return rc;
- }
+ rc = ahci_platform_enable_regulators(hpriv);
+ if (rc)
+ return rc;
rc = ahci_platform_enable_clks(hpriv);
if (rc)
@@ -183,8 +225,8 @@ disable_clks:
ahci_platform_disable_clks(hpriv);
disable_regulator:
- if (hpriv->target_pwr)
- regulator_disable(hpriv->target_pwr);
+ ahci_platform_disable_regulators(hpriv);
+
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
@@ -205,8 +247,7 @@ void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
ahci_platform_disable_clks(hpriv);
- if (hpriv->target_pwr)
- regulator_disable(hpriv->target_pwr);
+ ahci_platform_disable_regulators(hpriv);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
@@ -222,6 +263,69 @@ static void ahci_platform_put_resources(struct device *dev, void *res)
for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
clk_put(hpriv->clks[c]);
+ /*
+ * The regulators are tied to child node device and not to the
+ * SATA device itself. So we can't use devm for automatically
+ * releasing them. We have to do it manually here.
+ */
+ for (c = 0; c < hpriv->nports; c++)
+ if (hpriv->target_pwrs && hpriv->target_pwrs[c])
+ regulator_put(hpriv->target_pwrs[c]);
+
+ kfree(hpriv->target_pwrs);
+}
+
+static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
+ struct device *dev, struct device_node *node)
+{
+ int rc;
+
+ hpriv->phys[port] = devm_of_phy_get(dev, node, NULL);
+
+ if (!IS_ERR(hpriv->phys[port]))
+ return 0;
+
+ rc = PTR_ERR(hpriv->phys[port]);
+ switch (rc) {
+ case -ENOSYS:
+ /* No PHY support. Check if PHY is required. */
+ if (of_find_property(node, "phys", NULL)) {
+ dev_err(dev,
+ "couldn't get PHY in node %s: ENOSYS\n",
+ node->name);
+ break;
+ }
+ case -ENODEV:
+ /* continue normally */
+ hpriv->phys[port] = NULL;
+ rc = 0;
+ break;
+
+ default:
+ dev_err(dev,
+ "couldn't get PHY in node %s: %d\n",
+ node->name, rc);
+
+ break;
+ }
+
+ return rc;
+}
+
+static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
+ struct device *dev)
+{
+ struct regulator *target_pwr;
+ int rc = 0;
+
+ target_pwr = regulator_get_optional(dev, "target");
+
+ if (!IS_ERR(target_pwr))
+ hpriv->target_pwrs[port] = target_pwr;
+ else
+ rc = PTR_ERR(target_pwr);
+
+ return rc;
}
/**
@@ -246,7 +350,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
struct ahci_host_priv *hpriv;
struct clk *clk;
struct device_node *child;
- int i, enabled_ports = 0, rc = -ENOMEM;
+ int i, sz, enabled_ports = 0, rc = -ENOMEM, child_nodes;
u32 mask_port_map = 0;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
@@ -267,14 +371,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
goto err_out;
}
- hpriv->target_pwr = devm_regulator_get_optional(dev, "target");
- if (IS_ERR(hpriv->target_pwr)) {
- rc = PTR_ERR(hpriv->target_pwr);
- if (rc == -EPROBE_DEFER)
- goto err_out;
- hpriv->target_pwr = NULL;
- }
-
for (i = 0; i < AHCI_MAX_CLKS; i++) {
/*
* For now we must use clk_get(dev, NULL) for the first clock,
@@ -296,19 +392,33 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
hpriv->clks[i] = clk;
}
- hpriv->nports = of_get_child_count(dev->of_node);
+ hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
- if (hpriv->nports) {
- hpriv->phys = devm_kzalloc(dev,
- hpriv->nports * sizeof(*hpriv->phys),
- GFP_KERNEL);
- if (!hpriv->phys) {
- rc = -ENOMEM;
- goto err_out;
- }
+ /*
+ * If no sub-node was found, we still need to set nports to
+ * one in order to be able to use the
+ * ahci_platform_[en|dis]able_[phys|regulators] functions.
+ */
+ if (!child_nodes)
+ hpriv->nports = 1;
+ sz = hpriv->nports * sizeof(*hpriv->phys);
+ hpriv->phys = devm_kzalloc(dev, sz, GFP_KERNEL);
+ if (!hpriv->phys) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ sz = hpriv->nports * sizeof(*hpriv->target_pwrs);
+ hpriv->target_pwrs = kzalloc(sz, GFP_KERNEL);
+ if (!hpriv->target_pwrs) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ if (child_nodes) {
for_each_child_of_node(dev->of_node, child) {
u32 port;
+ struct platform_device *port_dev __maybe_unused;
if (!of_device_is_available(child))
continue;
@@ -322,17 +432,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
dev_warn(dev, "invalid port number %d\n", port);
continue;
}
-
mask_port_map |= BIT(port);
- hpriv->phys[port] = devm_of_phy_get(dev, child, NULL);
- if (IS_ERR(hpriv->phys[port])) {
- rc = PTR_ERR(hpriv->phys[port]);
- dev_err(dev,
- "couldn't get PHY in node %s: %d\n",
- child->name, rc);
- goto err_out;
+#ifdef CONFIG_OF_ADDRESS
+ of_platform_device_create(child, NULL, NULL);
+
+ port_dev = of_find_device_by_node(child);
+
+ if (port_dev) {
+ rc = ahci_platform_get_regulator(hpriv, port,
+ &port_dev->dev);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
}
+#endif
+
+ rc = ahci_platform_get_phy(hpriv, port, dev, child);
+ if (rc)
+ goto err_out;
enabled_ports++;
}
@@ -349,38 +466,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
* If no sub-node was found, keep this for device tree
* compatibility
*/
- struct phy *phy = devm_phy_get(dev, "sata-phy");
- if (!IS_ERR(phy)) {
- hpriv->phys = devm_kzalloc(dev, sizeof(*hpriv->phys),
- GFP_KERNEL);
- if (!hpriv->phys) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- hpriv->phys[0] = phy;
- hpriv->nports = 1;
- } else {
- rc = PTR_ERR(phy);
- switch (rc) {
- case -ENOSYS:
- /* No PHY support. Check if PHY is required. */
- if (of_find_property(dev->of_node, "phys", NULL)) {
- dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
- goto err_out;
- }
- case -ENODEV:
- /* continue normally */
- hpriv->phys = NULL;
- break;
-
- default:
- goto err_out;
+ rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
+ if (rc)
+ goto err_out;
- }
- }
+ rc = ahci_platform_get_regulator(hpriv, 0, dev);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
}
-
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
hpriv->got_runtime_pm = true;
@@ -399,6 +492,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
* @pdev: platform device pointer for the host
* @hpriv: ahci-host private data for the host
* @pi_template: template for the ata_port_info to use
+ * @sht: scsi_host_template to use when registering
*
* This function does all the usual steps needed to bring up an
* ahci-platform host, note any necessary resources (ie clks, phys, etc.)
@@ -409,7 +503,8 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
*/
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
- const struct ata_port_info *pi_template)
+ const struct ata_port_info *pi_template,
+ struct scsi_host_template *sht)
{
struct device *dev = &pdev->dev;
struct ata_port_info pi = *pi_template;
@@ -493,7 +588,7 @@ int ahci_platform_init_host(struct platform_device *pdev,
ahci_init_controller(host);
ahci_print_info(host, "platform");
- return ahci_host_activate(host, irq, &ahci_platform_sht);
+ return ahci_host_activate(host, irq, sht);
}
EXPORT_SYMBOL_GPL(ahci_platform_init_host);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 5c84fb5c3372..4c35f0822d06 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1585,8 +1585,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
else
tag = 0;
- if (test_and_set_bit(tag, &ap->qc_allocated))
- BUG();
qc = __ata_qc_from_tag(ap, tag);
qc->tag = tag;
@@ -1752,33 +1750,6 @@ unsigned ata_exec_internal(struct ata_device *dev,
}
/**
- * ata_do_simple_cmd - execute simple internal command
- * @dev: Device to which the command is sent
- * @cmd: Opcode to execute
- *
- * Execute a 'simple' command, that only consists of the opcode
- * 'cmd' itself, without filling any other registers
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * Zero on success, AC_ERR_* mask on failure
- */
-unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
-{
- struct ata_taskfile tf;
-
- ata_tf_init(dev, &tf);
-
- tf.command = cmd;
- tf.flags |= ATA_TFLAG_DEVICE;
- tf.protocol = ATA_PROT_NODATA;
-
- return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
-}
-
-/**
* ata_pio_need_iordy - check if iordy needed
* @adev: ATA device
*
@@ -4233,10 +4204,33 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
+ /*
+ * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+ * (Return Zero After Trim) flags in the ATA Command Set are
+ * unreliable in the sense that they only define what happens if
+ * the device successfully executed the DSM TRIM command. TRIM
+ * is only advisory, however, and the device is free to silently
+ * ignore all or parts of the request.
+ *
+ * Whitelist drives that are known to reliably return zeroes
+ * after TRIM.
+ */
+
+ /*
+ * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
+ * that model before whitelisting all other intel SSDs.
+ */
+ { "INTEL*SSDSC2MH*", NULL, 0, },
+
+ { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4726,66 +4720,36 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
}
/**
- * ata_qc_new - Request an available ATA command, for queueing
- * @ap: target port
- *
- * Some ATA host controllers may implement a queue depth which is less
- * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
- * the hardware limitation.
+ * ata_qc_new_init - Request an available ATA command, and initialize it
+ * @dev: Device from whom we request an available command structure
*
* LOCKING:
* None.
*/
-static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
{
- struct ata_queued_cmd *qc = NULL;
- unsigned int max_queue = ap->host->n_tags;
- unsigned int i, tag;
+ struct ata_port *ap = dev->link->ap;
+ struct ata_queued_cmd *qc;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
- for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
- tag = tag < max_queue ? tag : 0;
-
- /* the last tag is reserved for internal command. */
- if (tag == ATA_TAG_INTERNAL)
- continue;
-
- if (!test_and_set_bit(tag, &ap->qc_allocated)) {
- qc = __ata_qc_from_tag(ap, tag);
- qc->tag = tag;
- ap->last_tag = tag;
- break;
- }
+ /* libsas case */
+ if (!ap->scsi_host) {
+ tag = ata_sas_allocate_tag(ap);
+ if (tag < 0)
+ return NULL;
}
- return qc;
-}
-
-/**
- * ata_qc_new_init - Request an available ATA command, and initialize it
- * @dev: Device from whom we request an available command structure
- *
- * LOCKING:
- * None.
- */
-
-struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
-{
- struct ata_port *ap = dev->link->ap;
- struct ata_queued_cmd *qc;
-
- qc = ata_qc_new(ap);
- if (qc) {
- qc->scsicmd = NULL;
- qc->ap = ap;
- qc->dev = dev;
+ qc = __ata_qc_from_tag(ap, tag);
+ qc->tag = tag;
+ qc->scsicmd = NULL;
+ qc->ap = ap;
+ qc->dev = dev;
- ata_qc_reinit(qc);
- }
+ ata_qc_reinit(qc);
return qc;
}
@@ -4812,7 +4776,8 @@ void ata_qc_free(struct ata_queued_cmd *qc)
tag = qc->tag;
if (likely(ata_tag_valid(tag))) {
qc->tag = ATA_TAG_POISON;
- clear_bit(tag, &ap->qc_allocated);
+ if (!ap->scsi_host)
+ ata_sas_free_tag(tag, ap);
}
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3dbec8954c86..d2029a462e2c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1635,7 +1635,6 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
DPRINTK("ATAPI request sense\n");
- /* FIXME: is this needed? */
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
/* initialize sense_buf with the error register,
@@ -2389,6 +2388,7 @@ const char *ata_get_cmd_descript(u8 command)
return NULL;
}
+EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
/**
* ata_eh_link_report - report error handling to user
@@ -2481,7 +2481,6 @@ static void ata_eh_link_report(struct ata_link *link)
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
- const u8 *cdb = qc->cdb;
char data_buf[20] = "";
char cdb_buf[70] = "";
@@ -2509,16 +2508,15 @@ static void ata_eh_link_report(struct ata_link *link)
}
if (ata_is_atapi(qc->tf.protocol)) {
- if (qc->scsicmd)
- scsi_print_command(qc->scsicmd);
- else
- snprintf(cdb_buf, sizeof(cdb_buf),
- "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
- cdb[0], cdb[1], cdb[2], cdb[3],
- cdb[4], cdb[5], cdb[6], cdb[7],
- cdb[8], cdb[9], cdb[10], cdb[11],
- cdb[12], cdb[13], cdb[14], cdb[15]);
+ const u8 *cdb = qc->cdb;
+ size_t cdb_len = qc->dev->cdb_len;
+
+ if (qc->scsicmd) {
+ cdb = qc->scsicmd->cmnd;
+ cdb_len = qc->scsicmd->cmd_len;
+ }
+ __scsi_format_command(cdb_buf, sizeof(cdb_buf),
+ cdb, cdb_len);
} else {
const char *descr = ata_get_cmd_descript(cmd->command);
if (descr)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e364e86e84d7..b061ba2c31d8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -756,7 +756,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
{
struct ata_queued_cmd *qc;
- qc = ata_qc_new_init(dev);
+ qc = ata_qc_new_init(dev, cmd->request->tag);
if (qc) {
qc->scsicmd = cmd;
qc->scsidone = cmd->scsi_done;
@@ -1995,8 +1995,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
VPRINTK("ENTER\n");
- /* set scsi removeable (RMB) bit per ata bit */
- if (ata_id_removeable(args->id))
+ /* set scsi removable (RMB) bit per ata bit */
+ if (ata_id_removable(args->id))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
@@ -2532,13 +2532,15 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id)) {
- rbuf[14] |= 0x80; /* TPE */
+ rbuf[14] |= 0x80; /* LBPME */
- if (ata_id_has_zero_after_trim(args->id))
- rbuf[14] |= 0x40; /* TPRZ */
+ if (ata_id_has_zero_after_trim(args->id) &&
+ dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
+ }
}
}
-
return 0;
}
@@ -3666,6 +3668,9 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
+ if (scsi_init_shared_tag_map(shost, host->n_tags))
+ goto err_add;
+
rc = scsi_add_host_with_dma(ap->scsi_host,
&ap->tdev, ap->host->dev);
if (rc)
@@ -4228,3 +4233,31 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
+
+int ata_sas_allocate_tag(struct ata_port *ap)
+{
+ unsigned int max_queue = ap->host->n_tags;
+ unsigned int i, tag;
+
+ for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
+ if (ap->flags & ATA_FLAG_LOWTAG)
+ tag = 1;
+ else
+ tag = tag < max_queue ? tag : 0;
+
+ /* the last tag is reserved for internal command. */
+ if (tag == ATA_TAG_INTERNAL)
+ continue;
+
+ if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
+ ap->sas_last_tag = tag;
+ return tag;
+ }
+ }
+ return -1;
+}
+
+void ata_sas_free_tag(unsigned int tag, struct ata_port *ap)
+{
+ clear_bit(tag, &ap->sas_tag_allocated);
+}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index db90aa35cb71..2e86e3b85266 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
DPRINTK("ENTER\n");
cancel_delayed_work_sync(&ap->sff_pio_task);
+
+ /*
+ * We wanna reset the HSM state to IDLE. If we do so without
+ * grabbing the port lock, critical sections protected by it which
+ * expect the HSM state to stay stable may get surprised. For
+ * example, we may set IDLE in between the time
+ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
+ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
+ */
+ spin_lock_irq(ap->lock);
ap->hsm_task_state = HSM_ST_IDLE;
+ spin_unlock_irq(ap->lock);
+
ap->sff_pio_task_link = NULL;
if (ata_msg_ctl(ap))
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 5f4e0cca56ec..f840ca18a7c0 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -63,7 +63,7 @@ extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
-extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
+extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
@@ -76,7 +76,6 @@ extern unsigned ata_exec_internal_sg(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, struct scatterlist *sg,
unsigned int n_elem, unsigned long timeout);
-extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
@@ -145,6 +144,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_bus_probe(struct ata_port *ap);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
+int ata_sas_allocate_tag(struct ata_port *ap);
+void ata_sas_free_tag(unsigned int tag, struct ata_port *ap);
/* libata-eh.c */
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 48ae4b434474..f9ca72e937ee 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -276,10 +276,8 @@ static int cs5530_init_chip(void)
pci_dev_put(cs5530_0);
return 0;
fail_put:
- if (master_0)
- pci_dev_put(master_0);
- if (cs5530_0)
- pci_dev_put(cs5530_0);
+ pci_dev_put(master_0);
+ pci_dev_put(cs5530_0);
return -ENODEV;
}
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index dcc408abe171..b6b7af894d9d 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -16,6 +16,12 @@
#include <linux/ata_platform.h>
#include <linux/libata.h>
+#define DRV_NAME "pata_of_platform"
+
+static struct scsi_host_template pata_platform_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
static int pata_of_platform_probe(struct platform_device *ofdev)
{
int ret;
@@ -63,7 +69,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
pio_mask |= (1 << pio_mode) - 1;
return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
- reg_shift, pio_mask);
+ reg_shift, pio_mask, &pata_platform_sht);
}
static struct of_device_id pata_of_platform_match[] = {
@@ -74,7 +80,7 @@ MODULE_DEVICE_TABLE(of, pata_of_platform_match);
static struct platform_driver pata_of_platform_driver = {
.driver = {
- .name = "pata_of_platform",
+ .name = DRV_NAME,
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 4d06a5cda987..dca8251b1aea 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -28,6 +28,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ktime.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
@@ -605,7 +606,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
u32 scr;
long start_count, end_count;
- struct timeval start_time, end_time;
+ ktime_t start_time, end_time;
long pll_clock, usec_elapsed;
/* Start the test mode */
@@ -616,14 +617,14 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
/* Read current counter value */
start_count = pdc_read_counter(host);
- do_gettimeofday(&start_time);
+ start_time = ktime_get();
/* Let the counter run for 100 ms. */
mdelay(100);
/* Read the counter values again */
end_count = pdc_read_counter(host);
- do_gettimeofday(&end_time);
+ end_time = ktime_get();
/* Stop the test mode */
scr = ioread32(mmio_base + PDC_SYS_CTL);
@@ -632,8 +633,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
ioread32(mmio_base + PDC_SYS_CTL); /* flush */
/* calculate the input clock in Hz */
- usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
- (end_time.tv_usec - start_time.tv_usec);
+ usec_elapsed = (long) ktime_us_delta(end_time, start_time);
pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 *
(100000000 / usec_elapsed);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 1eedfe46d7c8..c503ded87bb8 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -78,6 +78,7 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
* @irq_res: Resource representing IRQ and its flags
* @ioport_shift: I/O port shift
* @__pio_mask: PIO mask
+ * @sht: scsi_host_template to use when registering
*
* Register a platform bus IDE interface. Such interfaces are PIO and we
* assume do not support IRQ sharing.
@@ -99,7 +100,8 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
*/
int __pata_platform_probe(struct device *dev, struct resource *io_res,
struct resource *ctl_res, struct resource *irq_res,
- unsigned int ioport_shift, int __pio_mask)
+ unsigned int ioport_shift, int __pio_mask,
+ struct scsi_host_template *sht)
{
struct ata_host *host;
struct ata_port *ap;
@@ -170,7 +172,7 @@ int __pata_platform_probe(struct device *dev, struct resource *io_res,
/* activate */
return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
- irq_flags, &pata_platform_sht);
+ irq_flags, sht);
}
EXPORT_SYMBOL_GPL(__pata_platform_probe);
@@ -216,7 +218,7 @@ static int pata_platform_probe(struct platform_device *pdev)
return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res,
pp_info ? pp_info->ioport_shift : 0,
- pio_mask);
+ pio_mask, &pata_platform_sht);
}
static struct platform_driver pata_platform_driver = {
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index c7ddef89e7b0..fdb0f2879ea7 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -48,6 +48,18 @@
#define DRV_NAME "sata-dwc"
#define DRV_VERSION "1.3"
+#ifndef out_le32
+#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a))
+#endif
+
+#ifndef in_le32
+#define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a)))
+#endif
+
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
/* SATA DMA driver Globals */
#define DMA_NUM_CHANS 1
#define DMA_NUM_CHAN_REGS 8
@@ -273,7 +285,7 @@ struct sata_dwc_device {
struct device *dev; /* generic device struct */
struct ata_probe_ent *pe; /* ptr to probe-ent */
struct ata_host *host;
- u8 *reg_base;
+ u8 __iomem *reg_base;
struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
int irq_dma;
};
@@ -323,7 +335,9 @@ struct sata_dwc_host_priv {
struct device *dwc_dev;
int dma_channel;
};
-struct sata_dwc_host_priv host_pvt;
+
+static struct sata_dwc_host_priv host_pvt;
+
/*
* Prototypes
*/
@@ -580,9 +594,9 @@ static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
sms_val = 0;
dms_val = 1 + host_pvt.dma_channel;
- dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
- " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
- (u32)dmadr_addr);
+ dev_dbg(host_pvt.dwc_dev,
+ "%s: sg=%p nelem=%d lli=%p dma_lli=0x%pad dmadr=0x%p\n",
+ __func__, sg, num_elems, lli, &dma_lli, dmadr_addr);
bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
@@ -773,7 +787,7 @@ static void dma_dwc_exit(struct sata_dwc_device *hsdev)
{
dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
if (host_pvt.sata_dma_regs) {
- iounmap(host_pvt.sata_dma_regs);
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
host_pvt.sata_dma_regs = NULL;
}
@@ -797,7 +811,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
if (err) {
dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
" %d\n", __func__, err);
- goto error_out;
+ return err;
}
/* Enabe DMA */
@@ -808,11 +822,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
sata_dma_regs);
return 0;
-
-error_out:
- dma_dwc_exit(hsdev);
-
- return err;
}
static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
@@ -823,7 +832,7 @@ static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
return -EINVAL;
}
- *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
+ *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4));
dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
__func__, link->ap->print_id, scr, *val);
@@ -839,21 +848,19 @@ static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
__func__, scr);
return -EINVAL;
}
- out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
+ out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val);
return 0;
}
static u32 core_scr_read(unsigned int scr)
{
- return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\
- (scr * 4));
+ return in_le32(host_pvt.scr_addr_sstatus + (scr * 4));
}
static void core_scr_write(unsigned int scr, u32 val)
{
- out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4),
- val);
+ out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val);
}
static void clear_serror(void)
@@ -861,7 +868,6 @@ static void clear_serror(void)
u32 val;
val = core_scr_read(SCR_ERROR);
core_scr_write(SCR_ERROR, val);
-
}
static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
@@ -1261,24 +1267,24 @@ static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
{
- port->cmd_addr = (void *)base + 0x00;
- port->data_addr = (void *)base + 0x00;
+ port->cmd_addr = (void __iomem *)base + 0x00;
+ port->data_addr = (void __iomem *)base + 0x00;
- port->error_addr = (void *)base + 0x04;
- port->feature_addr = (void *)base + 0x04;
+ port->error_addr = (void __iomem *)base + 0x04;
+ port->feature_addr = (void __iomem *)base + 0x04;
- port->nsect_addr = (void *)base + 0x08;
+ port->nsect_addr = (void __iomem *)base + 0x08;
- port->lbal_addr = (void *)base + 0x0c;
- port->lbam_addr = (void *)base + 0x10;
- port->lbah_addr = (void *)base + 0x14;
+ port->lbal_addr = (void __iomem *)base + 0x0c;
+ port->lbam_addr = (void __iomem *)base + 0x10;
+ port->lbah_addr = (void __iomem *)base + 0x14;
- port->device_addr = (void *)base + 0x18;
- port->command_addr = (void *)base + 0x1c;
- port->status_addr = (void *)base + 0x1c;
+ port->device_addr = (void __iomem *)base + 0x18;
+ port->command_addr = (void __iomem *)base + 0x1c;
+ port->status_addr = (void __iomem *)base + 0x1c;
- port->altstatus_addr = (void *)base + 0x20;
- port->ctl_addr = (void *)base + 0x20;
+ port->altstatus_addr = (void __iomem *)base + 0x20;
+ port->ctl_addr = (void __iomem *)base + 0x20;
}
/*
@@ -1319,7 +1325,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
- ap->bmdma_prd = 0; /* set these so libata doesn't use them */
+ ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
ap->bmdma_prd_dma = 0;
/*
@@ -1516,8 +1522,8 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
hsdevp->llit_dma[tag],
- (void *__iomem)(&hsdev->sata_dwc_regs->\
- dmadr), qc->dma_dir);
+ (void __iomem *)&hsdev->sata_dwc_regs->dmadr,
+ qc->dma_dir);
if (dma_chan < 0) {
dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
__func__, dma_chan);
@@ -1590,8 +1596,8 @@ static void sata_dwc_error_handler(struct ata_port *ap)
ata_sff_error_handler(ap);
}
-int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
int ret;
@@ -1623,7 +1629,7 @@ static struct scsi_host_template sata_dwc_sht = {
* max of 1. This will get fixed in in a future release.
*/
.sg_tablesize = LIBATA_MAX_PRD,
- .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */
+ /* .can_queue = ATA_MAX_QUEUE, */
.dma_boundary = ATA_DMA_BOUNDARY,
};
@@ -1660,9 +1666,9 @@ static int sata_dwc_probe(struct platform_device *ofdev)
struct sata_dwc_device *hsdev;
u32 idr, versionr;
char *ver = (char *)&versionr;
- u8 *base = NULL;
+ u8 __iomem *base;
int err = 0;
- int irq, rc;
+ int irq;
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1670,12 +1676,12 @@ static int sata_dwc_probe(struct platform_device *ofdev)
u32 dma_chan;
/* Allocate DWC SATA device */
- hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
- if (hsdev == NULL) {
- dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
- err = -ENOMEM;
- goto error;
- }
+ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
+ hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL);
+ if (!host || !hsdev)
+ return -ENOMEM;
+
+ host->private_data = hsdev;
if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
dev_warn(&ofdev->dev, "no dma-channel property set."
@@ -1685,12 +1691,11 @@ static int sata_dwc_probe(struct platform_device *ofdev)
host_pvt.dma_channel = dma_chan;
/* Ioremap SATA registers */
- base = of_iomap(ofdev->dev.of_node, 0);
+ base = of_iomap(np, 0);
if (!base) {
dev_err(&ofdev->dev, "ioremap failed for SATA register"
" address\n");
- err = -ENODEV;
- goto error_kmalloc;
+ return -ENODEV;
}
hsdev->reg_base = base;
dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
@@ -1698,16 +1703,6 @@ static int sata_dwc_probe(struct platform_device *ofdev)
/* Synopsys DWC SATA specific Registers */
hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
- /* Allocate and fill host */
- host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
- if (!host) {
- dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
- err = -ENOMEM;
- goto error_iomap;
- }
-
- host->private_data = hsdev;
-
/* Setup port */
host->ports[0]->ioaddr.cmd_addr = base;
host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
@@ -1721,33 +1716,35 @@ static int sata_dwc_probe(struct platform_device *ofdev)
idr, ver[0], ver[1], ver[2]);
/* Get SATA DMA interrupt number */
- irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
+ irq = irq_of_parse_and_map(np, 1);
if (irq == NO_IRQ) {
dev_err(&ofdev->dev, "no SATA DMA irq\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Get physical SATA DMA register base address */
- host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1);
+ host_pvt.sata_dma_regs = (void *)of_iomap(np, 1);
if (!(host_pvt.sata_dma_regs)) {
dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
" address\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Save dev for later use in dev_xxx() routines */
host_pvt.dwc_dev = &ofdev->dev;
/* Initialize AHB DMAC */
- dma_dwc_init(hsdev, irq);
+ err = dma_dwc_init(hsdev, irq);
+ if (err)
+ goto error_dma_iomap;
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
/* Get SATA interrupt number */
- irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
dev_err(&ofdev->dev, "no SATA DMA irq\n");
err = -ENODEV;
@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
* device discovery process, invoking our port_start() handler &
* error_handler() to execute a dummy Softreset EH session
*/
- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
-
- if (rc != 0)
+ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+ if (err)
dev_err(&ofdev->dev, "failed to activate host");
dev_set_drvdata(&ofdev->dev, host);
@@ -1770,12 +1766,10 @@ static int sata_dwc_probe(struct platform_device *ofdev)
error_out:
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
-
+error_dma_iomap:
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
error_iomap:
iounmap(base);
-error_kmalloc:
- kfree(hsdev);
-error:
return err;
}
@@ -1786,14 +1780,12 @@ static int sata_dwc_remove(struct platform_device *ofdev)
struct sata_dwc_device *hsdev = host->private_data;
ata_host_detach(host);
- dev_set_drvdata(dev, NULL);
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
iounmap(hsdev->reg_base);
- kfree(hsdev);
- kfree(host);
dev_dbg(&ofdev->dev, "done\n");
return 0;
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index f9a0e34eb111..f8c33e3772b8 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4185,8 +4185,7 @@ err:
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
}
- if (hpriv->port_phys[port])
- phy_power_off(hpriv->port_phys[port]);
+ phy_power_off(hpriv->port_phys[port]);
}
return rc;
@@ -4216,8 +4215,7 @@ static int mv_platform_remove(struct platform_device *pdev)
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
}
- if (hpriv->port_phys[port])
- phy_power_off(hpriv->port_phys[port]);
+ phy_power_off(hpriv->port_phys[port]);
}
return 0;
}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index cb0d2e644af5..d49a5193b7de 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -2,8 +2,8 @@
* Renesas R-Car SATA driver
*
* Author: Vladimir Barinov <source@cogentembedded.com>
- * Copyright (C) 2013 Cogent Embedded, Inc.
- * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013-2015 Cogent Embedded, Inc.
+ * Copyright (C) 2013-2015 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -992,9 +992,30 @@ static int sata_rcar_resume(struct device *dev)
return 0;
}
+static int sata_rcar_restore(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_rcar_priv *priv = host->private_data;
+
+ clk_prepare_enable(priv->clk);
+
+ sata_rcar_setup_port(host);
+
+ /* initialize host controller */
+ sata_rcar_init_controller(host);
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
static const struct dev_pm_ops sata_rcar_pm_ops = {
.suspend = sata_rcar_suspend,
.resume = sata_rcar_resume,
+ .freeze = sata_rcar_suspend,
+ .thaw = sata_rcar_resume,
+ .poweroff = sata_rcar_suspend,
+ .restore = sata_rcar_restore,
};
#endif
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index d81b20ddb527..ba2667fa0528 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -246,7 +246,7 @@ enum {
/* host flags */
SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
- ATA_FLAG_AN | ATA_FLAG_PMP,
+ ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
IRQ_STAT_4PORTS = 0xf,
@@ -388,6 +388,7 @@ static struct scsi_host_template sil24_sht = {
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
+ .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
};
static struct ata_port_operations sil24_ops = {
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index c7fab3ee14ee..6339efd32697 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -354,9 +354,9 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
eni_vcc = ENI_VCC(vcc);
paddr = 0; /* GCC, shut up */
if (skb) {
- paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(eni_dev->pci_dev, paddr))
+ paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
goto dma_map_error;
ENI_PRV_PADDR(skb) = paddr;
if (paddr & 3)
@@ -481,8 +481,8 @@ rx_enqueued++;
trouble:
if (paddr)
- pci_unmap_single(eni_dev->pci_dev,paddr,skb->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&eni_dev->pci_dev->dev,paddr,skb->len,
+ DMA_FROM_DEVICE);
dma_map_error:
if (skb) dev_kfree_skb_irq(skb);
return -1;
@@ -758,8 +758,8 @@ rx_dequeued++;
}
eni_vcc->rxing--;
eni_vcc->rx_pos = ENI_PRV_POS(skb) & (eni_vcc->words-1);
- pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
+ DMA_TO_DEVICE);
if (!skb->len) dev_kfree_skb_irq(skb);
else {
EVENT("pushing (len=%ld)\n",skb->len,0);
@@ -1112,8 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
vcc->dev->number);
return enq_jam;
}
- paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
- PCI_DMA_TODEVICE);
+ paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
+ DMA_TO_DEVICE);
ENI_PRV_PADDR(skb) = paddr;
/* prepare DMA queue entries */
j = 0;
@@ -1226,8 +1226,8 @@ static void dequeue_tx(struct atm_dev *dev)
break;
}
ENI_VCC(vcc)->txing -= ENI_PRV_SIZE(skb);
- pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
+ DMA_TO_DEVICE);
if (vcc->pop) vcc->pop(vcc,skb);
else dev_kfree_skb_irq(skb);
atomic_inc(&vcc->stats->tx);
@@ -2240,13 +2240,18 @@ static int eni_init_one(struct pci_dev *pci_dev,
if (rc < 0)
goto out;
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ if (rc < 0)
+ goto out;
+
rc = -ENOMEM;
eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
if (!eni_dev)
goto err_disable;
zero = &eni_dev->zero;
- zero->addr = pci_alloc_consistent(pci_dev, ENI_ZEROES_SIZE, &zero->dma);
+ zero->addr = dma_alloc_coherent(&pci_dev->dev,
+ ENI_ZEROES_SIZE, &zero->dma, GFP_KERNEL);
if (!zero->addr)
goto err_kfree;
@@ -2277,7 +2282,7 @@ err_eni_release:
err_unregister:
atm_dev_deregister(dev);
err_free_consistent:
- pci_free_consistent(pci_dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
+ dma_free_coherent(&pci_dev->dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
err_kfree:
kfree(eni_dev);
err_disable:
@@ -2302,7 +2307,7 @@ static void eni_remove_one(struct pci_dev *pdev)
eni_do_release(dev);
atm_dev_deregister(dev);
- pci_free_consistent(pdev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
+ dma_free_coherent(&pdev->dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
kfree(ed);
pci_disable_device(pdev);
}
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index d5d9eafbbfcf..75dde903b238 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -425,7 +425,7 @@ static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
static u32
fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
{
- u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
+ u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction);
DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
virt_addr, size, direction, dma_addr);
@@ -440,7 +440,7 @@ fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int di
DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
dma_addr, size, direction);
- pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
+ dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}
@@ -449,7 +449,7 @@ fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size,
{
DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
- pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
+ dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}
static void
@@ -457,7 +457,7 @@ fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int si
{
DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
- pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
+ dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}
@@ -470,9 +470,10 @@ fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
{
/* returned chunks are page-aligned */
chunk->alloc_size = size * nbr;
- chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
- chunk->alloc_size,
- &chunk->dma_addr);
+ chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
+ chunk->alloc_size,
+ &chunk->dma_addr,
+ GFP_KERNEL);
if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
return -ENOMEM;
@@ -488,7 +489,7 @@ fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
static void
fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
{
- pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
+ dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
chunk->alloc_size,
chunk->alloc_addr,
chunk->dma_addr);
@@ -2707,6 +2708,11 @@ static int fore200e_pca_detect(struct pci_dev *pci_dev,
err = -EINVAL;
goto out;
}
+
+ if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
+ err = -EINVAL;
+ goto out;
+ }
fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
if (fore200e == NULL) {
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c39702bc279d..93dca2e73bf5 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -359,7 +359,7 @@ static int he_init_one(struct pci_dev *pci_dev,
if (pci_enable_device(pci_dev))
return -EIO;
- if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING "he: no suitable dma available\n");
err = -EIO;
goto init_one_failure;
@@ -533,9 +533,9 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
static int he_init_tpdrq(struct he_dev *he_dev)
{
- he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev,
- CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
- &he_dev->tpdrq_phys);
+ he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
+ &he_dev->tpdrq_phys, GFP_KERNEL);
if (he_dev->tpdrq_base == NULL) {
hprintk("failed to alloc tpdrq\n");
return -ENOMEM;
@@ -796,16 +796,16 @@ static int he_init_group(struct he_dev *he_dev, int group)
}
/* large buffer pool */
- he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
+ he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
CONFIG_RBPL_BUFSIZE, 64, 0);
if (he_dev->rbpl_pool == NULL) {
hprintk("unable to create rbpl pool\n");
goto out_free_rbpl_virt;
}
- he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev,
- CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
- &he_dev->rbpl_phys);
+ he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
+ &he_dev->rbpl_phys, GFP_KERNEL);
if (he_dev->rbpl_base == NULL) {
hprintk("failed to alloc rbpl_base\n");
goto out_destroy_rbpl_pool;
@@ -815,7 +815,7 @@ static int he_init_group(struct he_dev *he_dev, int group)
for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
- heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
+ heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
if (!heb)
goto out_free_rbpl;
heb->mapping = mapping;
@@ -842,9 +842,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* rx buffer ready queue */
- he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
- CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
- &he_dev->rbrq_phys);
+ he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
+ &he_dev->rbrq_phys, GFP_KERNEL);
if (he_dev->rbrq_base == NULL) {
hprintk("failed to allocate rbrq\n");
goto out_free_rbpl;
@@ -866,9 +866,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* tx buffer ready queue */
- he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
- CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
- &he_dev->tbrq_phys);
+ he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+ &he_dev->tbrq_phys, GFP_KERNEL);
if (he_dev->tbrq_base == NULL) {
hprintk("failed to allocate tbrq\n");
goto out_free_rbpq_base;
@@ -884,18 +884,18 @@ static int he_init_group(struct he_dev *he_dev, int group)
return 0;
out_free_rbpq_base:
- pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
- sizeof(struct he_rbrq), he_dev->rbrq_base,
- he_dev->rbrq_phys);
+ dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
+ sizeof(struct he_rbrq), he_dev->rbrq_base,
+ he_dev->rbrq_phys);
out_free_rbpl:
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
- pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+ dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
- pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
- sizeof(struct he_rbp), he_dev->rbpl_base,
- he_dev->rbpl_phys);
+ dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
+ sizeof(struct he_rbp), he_dev->rbpl_base,
+ he_dev->rbpl_phys);
out_destroy_rbpl_pool:
- pci_pool_destroy(he_dev->rbpl_pool);
+ dma_pool_destroy(he_dev->rbpl_pool);
out_free_rbpl_virt:
kfree(he_dev->rbpl_virt);
out_free_rbpl_table:
@@ -911,8 +911,11 @@ static int he_init_irq(struct he_dev *he_dev)
/* 2.9.3.5 tail offset for each interrupt queue is located after the
end of the interrupt queue */
- he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
- (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
+ he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+ (CONFIG_IRQ_SIZE + 1)
+ * sizeof(struct he_irq),
+ &he_dev->irq_phys,
+ GFP_KERNEL);
if (he_dev->irq_base == NULL) {
hprintk("failed to allocate irq\n");
return -ENOMEM;
@@ -1419,10 +1422,10 @@ static int he_start(struct atm_dev *dev)
he_init_tpdrq(he_dev);
- he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
- sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
+ he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
+ sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
if (he_dev->tpd_pool == NULL) {
- hprintk("unable to create tpd pci_pool\n");
+ hprintk("unable to create tpd dma_pool\n");
return -ENOMEM;
}
@@ -1459,9 +1462,9 @@ static int he_start(struct atm_dev *dev)
/* host status page */
- he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev,
- sizeof(struct he_hsp),
- &he_dev->hsp_phys);
+ he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+ sizeof(struct he_hsp),
+ &he_dev->hsp_phys, GFP_KERNEL);
if (he_dev->hsp == NULL) {
hprintk("failed to allocate host status page\n");
return -ENOMEM;
@@ -1558,41 +1561,41 @@ he_stop(struct he_dev *he_dev)
free_irq(he_dev->irq, he_dev);
if (he_dev->irq_base)
- pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
- * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
+ dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
+ * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
if (he_dev->hsp)
- pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
- he_dev->hsp, he_dev->hsp_phys);
+ dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
+ he_dev->hsp, he_dev->hsp_phys);
if (he_dev->rbpl_base) {
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
- pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+ dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
- pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
- * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
+ dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
+ * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
}
kfree(he_dev->rbpl_virt);
kfree(he_dev->rbpl_table);
if (he_dev->rbpl_pool)
- pci_pool_destroy(he_dev->rbpl_pool);
+ dma_pool_destroy(he_dev->rbpl_pool);
if (he_dev->rbrq_base)
- pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
- he_dev->rbrq_base, he_dev->rbrq_phys);
+ dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
+ he_dev->rbrq_base, he_dev->rbrq_phys);
if (he_dev->tbrq_base)
- pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
- he_dev->tbrq_base, he_dev->tbrq_phys);
+ dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+ he_dev->tbrq_base, he_dev->tbrq_phys);
if (he_dev->tpdrq_base)
- pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
- he_dev->tpdrq_base, he_dev->tpdrq_phys);
+ dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+ he_dev->tpdrq_base, he_dev->tpdrq_phys);
if (he_dev->tpd_pool)
- pci_pool_destroy(he_dev->tpd_pool);
+ dma_pool_destroy(he_dev->tpd_pool);
if (he_dev->pci_dev) {
pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
@@ -1610,7 +1613,7 @@ __alloc_tpd(struct he_dev *he_dev)
struct he_tpd *tpd;
dma_addr_t mapping;
- tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
+ tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
if (tpd == NULL)
return NULL;
@@ -1681,7 +1684,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
clear_bit(i, he_dev->rbpl_table);
list_del(&heb->entry);
- pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+ dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
}
goto next_rbrq_entry;
@@ -1774,7 +1777,7 @@ return_host_buffers:
++pdus_assembled;
list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
- pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+ dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
INIT_LIST_HEAD(&he_vcc->buffers);
he_vcc->pdu_len = 0;
@@ -1843,10 +1846,10 @@ he_service_tbrq(struct he_dev *he_dev, int group)
for (slot = 0; slot < TPD_MAXIOV; ++slot) {
if (tpd->iovec[slot].addr)
- pci_unmap_single(he_dev->pci_dev,
+ dma_unmap_single(&he_dev->pci_dev->dev,
tpd->iovec[slot].addr,
tpd->iovec[slot].len & TPD_LEN_MASK,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (tpd->iovec[slot].len & TPD_LST)
break;
@@ -1861,7 +1864,7 @@ he_service_tbrq(struct he_dev *he_dev, int group)
next_tbrq_entry:
if (tpd)
- pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+ dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
he_dev->tbrq_head = (struct he_tbrq *)
((unsigned long) he_dev->tbrq_base |
TBRQ_MASK(he_dev->tbrq_head + 1));
@@ -1905,7 +1908,7 @@ he_service_rbpl(struct he_dev *he_dev, int group)
}
he_dev->rbpl_hint = i + 1;
- heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
+ heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
if (!heb)
break;
heb->mapping = mapping;
@@ -2084,10 +2087,10 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
*/
for (slot = 0; slot < TPD_MAXIOV; ++slot) {
if (tpd->iovec[slot].addr)
- pci_unmap_single(he_dev->pci_dev,
+ dma_unmap_single(&he_dev->pci_dev->dev,
tpd->iovec[slot].addr,
tpd->iovec[slot].len & TPD_LEN_MASK,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
if (tpd->skb) {
if (tpd->vcc->pop)
@@ -2096,7 +2099,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
dev_kfree_skb_any(tpd->skb);
atomic_inc(&tpd->vcc->stats->tx_err);
}
- pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+ dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
return;
}
}
@@ -2550,8 +2553,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
}
#ifdef USE_SCATTERGATHER
- tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tpd->iovec[slot].len = skb_headlen(skb);
++slot;
@@ -2579,9 +2582,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
slot = 0;
}
- tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
+ tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
(void *) page_address(frag->page) + frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ frag->size, DMA_TO_DEVICE);
tpd->iovec[slot].len = frag->size;
++slot;
@@ -2589,7 +2592,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
tpd->iovec[slot - 1].len |= TPD_LST;
#else
- tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
tpd->length0 = skb->len | TPD_LST;
#endif
tpd->status |= TPD_INT;
diff --git a/drivers/atm/he.h b/drivers/atm/he.h
index 110a27d2ecfc..f3f53674ef3f 100644
--- a/drivers/atm/he.h
+++ b/drivers/atm/he.h
@@ -281,7 +281,7 @@ struct he_dev {
int irq_peak;
struct tasklet_struct tasklet;
- struct pci_pool *tpd_pool;
+ struct dma_pool *tpd_pool;
struct list_head outstanding_tpds;
dma_addr_t tpdrq_phys;
@@ -296,7 +296,7 @@ struct he_dev {
struct he_buff **rbpl_virt;
unsigned long *rbpl_table;
unsigned long rbpl_hint;
- struct pci_pool *rbpl_pool;
+ struct dma_pool *rbpl_pool;
dma_addr_t rbpl_phys;
struct he_rbp *rbpl_base, *rbpl_tail;
struct list_head rbpl_outstanding;
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 1dc0519333f2..527bbd595e37 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -458,12 +458,6 @@ static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode,
return;
}
-static inline u16 query_tx_channel_config (hrz_dev * dev, short chan, u8 mode) {
- wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
- chan * TX_CHANNEL_CONFIG_MULT | mode);
- return rd_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF);
-}
-
/********** dump functions **********/
static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
@@ -513,16 +507,6 @@ static inline void dump_framer (hrz_dev * dev) {
/* RX channels are 10 bit integers, these fns are quite paranoid */
-static inline int channel_to_vpivci (const u16 channel, short * vpi, int * vci) {
- unsigned short vci_bits = 10 - vpi_bits;
- if ((channel & RX_CHANNEL_MASK) == channel) {
- *vci = channel & ((~0)<<vci_bits);
- *vpi = channel >> vci_bits;
- return channel ? 0 : -EINVAL;
- }
- return -EINVAL;
-}
-
static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
unsigned short vci_bits = 10 - vpi_bits;
if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
@@ -1260,14 +1244,6 @@ static u32 rx_queue_entry_next (hrz_dev * dev) {
return rx_queue_entry;
}
-/********** handle RX disabled by device **********/
-
-static inline void rx_disabled_handler (hrz_dev * dev) {
- wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
- // count me please
- PRINTK (KERN_WARNING, "RX was disabled!");
-}
-
/********** handle RX data received by device **********/
// called from IRQ handler
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 2b24ed056728..074616b39f4d 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -641,7 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
if (!scq)
return NULL;
- scq->base = pci_zalloc_consistent(card->pcidev, SCQ_SIZE, &scq->paddr);
+ scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
+ &scq->paddr, GFP_KERNEL);
if (scq->base == NULL) {
kfree(scq);
return NULL;
@@ -669,12 +670,12 @@ free_scq(struct idt77252_dev *card, struct scq_info *scq)
struct sk_buff *skb;
struct atm_vcc *vcc;
- pci_free_consistent(card->pcidev, SCQ_SIZE,
- scq->base, scq->paddr);
+ dma_free_coherent(&card->pcidev->dev, SCQ_SIZE,
+ scq->base, scq->paddr);
while ((skb = skb_dequeue(&scq->transmit))) {
- pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+ skb->len, DMA_TO_DEVICE);
vcc = ATM_SKB(skb)->vcc;
if (vcc->pop)
@@ -684,8 +685,8 @@ free_scq(struct idt77252_dev *card, struct scq_info *scq)
}
while ((skb = skb_dequeue(&scq->pending))) {
- pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+ skb->len, DMA_TO_DEVICE);
vcc = ATM_SKB(skb)->vcc;
if (vcc->pop)
@@ -800,8 +801,8 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
if (skb) {
TXPRINTK("%s: freeing skb at %p.\n", card->name, skb);
- pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+ skb->len, DMA_TO_DEVICE);
vcc = ATM_SKB(skb)->vcc;
@@ -846,8 +847,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
tbd = &IDT77252_PRV_TBD(skb);
vcc = ATM_SKB(skb)->vcc;
- IDT77252_PRV_PADDR(skb) = pci_map_single(card->pcidev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
error = -EINVAL;
@@ -924,8 +925,8 @@ done:
return 0;
errout:
- pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+ skb->len, DMA_TO_DEVICE);
return error;
}
@@ -970,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
{
struct rsq_entry *rsqe;
- card->rsq.base = pci_zalloc_consistent(card->pcidev, RSQSIZE,
- &card->rsq.paddr);
+ card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
+ &card->rsq.paddr, GFP_KERNEL);
if (card->rsq.base == NULL) {
printk("%s: can't allocate RSQ.\n", card->name);
return -1;
@@ -1001,8 +1002,8 @@ init_rsq(struct idt77252_dev *card)
static void
deinit_rsq(struct idt77252_dev *card)
{
- pci_free_consistent(card->pcidev, RSQSIZE,
- card->rsq.base, card->rsq.paddr);
+ dma_free_coherent(&card->pcidev->dev, RSQSIZE,
+ card->rsq.base, card->rsq.paddr);
}
static void
@@ -1057,9 +1058,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
vcc = vc->rx_vcc;
- pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb),
- skb_end_pointer(skb) - skb->data,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+ skb_end_pointer(skb) - skb->data,
+ DMA_FROM_DEVICE);
if ((vcc->qos.aal == ATM_AAL0) ||
(vcc->qos.aal == ATM_AAL34)) {
@@ -1180,9 +1181,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
return;
}
- pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
+ dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
sb_pool_remove(card, skb);
skb_trim(skb, len);
@@ -1254,9 +1255,9 @@ idt77252_rx_raw(struct idt77252_dev *card)
head = IDT77252_PRV_PADDR(queue) + (queue->data - queue->head - 16);
tail = readl(SAR_REG_RAWCT);
- pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
- skb_end_offset(queue) - 16,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(queue),
+ skb_end_offset(queue) - 16,
+ DMA_FROM_DEVICE);
while (head != tail) {
unsigned int vpi, vci;
@@ -1348,11 +1349,11 @@ drop:
if (next) {
card->raw_cell_head = next;
queue = card->raw_cell_head;
- pci_dma_sync_single_for_cpu(card->pcidev,
- IDT77252_PRV_PADDR(queue),
- (skb_end_pointer(queue) -
- queue->data),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->pcidev->dev,
+ IDT77252_PRV_PADDR(queue),
+ (skb_end_pointer(queue) -
+ queue->data),
+ DMA_FROM_DEVICE);
} else {
card->raw_cell_head = NULL;
printk("%s: raw cell queue overrun\n",
@@ -1375,8 +1376,8 @@ init_tsq(struct idt77252_dev *card)
{
struct tsq_entry *tsqe;
- card->tsq.base = pci_alloc_consistent(card->pcidev, RSQSIZE,
- &card->tsq.paddr);
+ card->tsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
+ &card->tsq.paddr, GFP_KERNEL);
if (card->tsq.base == NULL) {
printk("%s: can't allocate TSQ.\n", card->name);
return -1;
@@ -1398,8 +1399,8 @@ init_tsq(struct idt77252_dev *card)
static void
deinit_tsq(struct idt77252_dev *card)
{
- pci_free_consistent(card->pcidev, TSQSIZE,
- card->tsq.base, card->tsq.paddr);
+ dma_free_coherent(&card->pcidev->dev, TSQSIZE,
+ card->tsq.base, card->tsq.paddr);
}
static void
@@ -1861,9 +1862,9 @@ add_rx_skb(struct idt77252_dev *card, int queue,
goto outfree;
}
- paddr = pci_map_single(card->pcidev, skb->data,
+ paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb_end_pointer(skb) - skb->data,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IDT77252_PRV_PADDR(skb) = paddr;
if (push_rx_skb(card, skb, queue)) {
@@ -1875,8 +1876,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
return;
outunmap:
- pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
- skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+ skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
handle = IDT77252_PRV_POOL(skb);
card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
@@ -1892,15 +1893,15 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
u32 handle = IDT77252_PRV_POOL(skb);
int err;
- pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb),
- skb_end_pointer(skb) - skb->data,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+ skb_end_pointer(skb) - skb->data,
+ DMA_FROM_DEVICE);
err = push_rx_skb(card, skb, POOL_QUEUE(handle));
if (err) {
- pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
+ dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
sb_pool_remove(card, skb);
dev_kfree_skb(skb);
}
@@ -3058,11 +3059,11 @@ deinit_card(struct idt77252_dev *card)
for (j = 0; j < FBQ_SIZE; j++) {
skb = card->sbpool[i].skb[j];
if (skb) {
- pci_unmap_single(card->pcidev,
+ dma_unmap_single(&card->pcidev->dev,
IDT77252_PRV_PADDR(skb),
(skb_end_pointer(skb) -
skb->data),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
card->sbpool[i].skb[j] = NULL;
dev_kfree_skb(skb);
}
@@ -3076,8 +3077,8 @@ deinit_card(struct idt77252_dev *card)
vfree(card->vcs);
if (card->raw_cell_hnd) {
- pci_free_consistent(card->pcidev, 2 * sizeof(u32),
- card->raw_cell_hnd, card->raw_cell_paddr);
+ dma_free_coherent(&card->pcidev->dev, 2 * sizeof(u32),
+ card->raw_cell_hnd, card->raw_cell_paddr);
}
if (card->rsq.base) {
@@ -3397,9 +3398,10 @@ static int init_card(struct atm_dev *dev)
writel(0, SAR_REG_GP);
/* Initialize RAW Cell Handle Register */
- card->raw_cell_hnd = pci_zalloc_consistent(card->pcidev,
- 2 * sizeof(u32),
- &card->raw_cell_paddr);
+ card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
+ 2 * sizeof(u32),
+ &card->raw_cell_paddr,
+ GFP_KERNEL);
if (!card->raw_cell_hnd) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
@@ -3611,6 +3613,11 @@ static int idt77252_init_one(struct pci_dev *pcidev,
return err;
}
+ if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
+ printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
+ return err;
+ }
+
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
if (!card) {
printk("idt77252-%d: can't allocate private data\n", index);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 4217f29a85e0..924f8e26789d 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1185,8 +1185,8 @@ static int rx_pkt(struct atm_dev *dev)
/* Build the DLE structure */
wr_ptr = iadev->rx_dle_q.write;
- wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
- len, PCI_DMA_FROMDEVICE);
+ wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
+ len, DMA_FROM_DEVICE);
wr_ptr->local_pkt_addr = buf_addr;
wr_ptr->bytes = len; /* We don't know this do we ?? */
wr_ptr->mode = DMA_INT_ENABLE;
@@ -1306,8 +1306,8 @@ static void rx_dle_intr(struct atm_dev *dev)
u_short length;
struct ia_vcc *ia_vcc;
- pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
- len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
+ len, DMA_FROM_DEVICE);
/* no VCC related housekeeping done as yet. lets see */
vcc = ATM_SKB(skb)->vcc;
if (!vcc) {
@@ -1430,8 +1430,8 @@ static int rx_init(struct atm_dev *dev)
// spin_lock_init(&iadev->rx_lock);
/* Allocate 4k bytes - more aligned than needed (4k boundary) */
- dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
- &iadev->rx_dle_dma);
+ dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
+ &iadev->rx_dle_dma, GFP_KERNEL);
if (!dle_addr) {
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
goto err_out;
@@ -1631,8 +1631,8 @@ static int rx_init(struct atm_dev *dev)
return 0;
err_free_dle:
- pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
- iadev->rx_dle_dma);
+ dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
+ iadev->rx_dle_dma);
err_out:
return -ENOMEM;
}
@@ -1702,8 +1702,8 @@ static void tx_dle_intr(struct atm_dev *dev)
/* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
- pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
+ DMA_TO_DEVICE);
}
vcc = ATM_SKB(skb)->vcc;
if (!vcc) {
@@ -1917,8 +1917,8 @@ static int tx_init(struct atm_dev *dev)
readw(iadev->seg_reg+SEG_MASK_REG));)
/* Allocate 4k (boundary aligned) bytes */
- dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
- &iadev->tx_dle_dma);
+ dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
+ &iadev->tx_dle_dma, GFP_KERNEL);
if (!dle_addr) {
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
goto err_out;
@@ -1989,8 +1989,10 @@ static int tx_init(struct atm_dev *dev)
goto err_free_tx_bufs;
}
iadev->tx_buf[i].cpcs = cpcs;
- iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
- cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
+ iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
+ cpcs,
+ sizeof(*cpcs),
+ DMA_TO_DEVICE);
}
iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
sizeof(struct desc_tbl_t), GFP_KERNEL);
@@ -2198,14 +2200,14 @@ err_free_tx_bufs:
while (--i >= 0) {
struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
- pci_unmap_single(iadev->pci, desc->dma_addr,
- sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
+ dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
+ sizeof(*desc->cpcs), DMA_TO_DEVICE);
kfree(desc->cpcs);
}
kfree(iadev->tx_buf);
err_free_dle:
- pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
- iadev->tx_dle_dma);
+ dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
+ iadev->tx_dle_dma);
err_out:
return -ENOMEM;
}
@@ -2476,20 +2478,20 @@ static void ia_free_tx(IADEV *iadev)
for (i = 0; i < iadev->num_tx_desc; i++) {
struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
- pci_unmap_single(iadev->pci, desc->dma_addr,
- sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
+ dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
+ sizeof(*desc->cpcs), DMA_TO_DEVICE);
kfree(desc->cpcs);
}
kfree(iadev->tx_buf);
- pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
- iadev->tx_dle_dma);
+ dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
+ iadev->tx_dle_dma);
}
static void ia_free_rx(IADEV *iadev)
{
kfree(iadev->rx_open);
- pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
- iadev->rx_dle_dma);
+ dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
+ iadev->rx_dle_dma);
}
static int ia_start(struct atm_dev *dev)
@@ -3009,8 +3011,8 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
/* Build the DLE structure */
wr_ptr = iadev->tx_dle_q.write;
memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
- wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
buf_desc_ptr->buf_start_lo;
/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 93eaf8d94492..ce43ae3e87b3 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -346,7 +346,8 @@ static void lanai_buf_allocate(struct lanai_buffer *buf,
* everything, but the way the lanai uses DMA memory would
* make that a terrific pain. This is much simpler.
*/
- buf->start = pci_alloc_consistent(pci, size, &buf->dmaaddr);
+ buf->start = dma_alloc_coherent(&pci->dev,
+ size, &buf->dmaaddr, GFP_KERNEL);
if (buf->start != NULL) { /* Success */
/* Lanai requires 256-byte alignment of DMA bufs */
APRINTK((buf->dmaaddr & ~0xFFFFFF00) == 0,
@@ -372,8 +373,8 @@ static void lanai_buf_deallocate(struct lanai_buffer *buf,
struct pci_dev *pci)
{
if (buf->start != NULL) {
- pci_free_consistent(pci, lanai_buf_size(buf),
- buf->start, buf->dmaaddr);
+ dma_free_coherent(&pci->dev, lanai_buf_size(buf),
+ buf->start, buf->dmaaddr);
buf->start = buf->end = buf->ptr = NULL;
}
}
@@ -681,15 +682,6 @@ static inline int aal5_size(int size)
return cells * 48;
}
-/* How many bytes can we send if we have "space" space, assuming we have
- * to send full cells
- */
-static inline int aal5_spacefor(int space)
-{
- int cells = space / 48;
- return cells * 48;
-}
-
/* -------------------- FREE AN ATM SKB: */
static inline void lanai_free_skb(struct atm_vcc *atmvcc, struct sk_buff *skb)
@@ -1953,12 +1945,7 @@ static int lanai_pci_start(struct lanai_dev *lanai)
return -ENXIO;
}
pci_set_master(pci);
- if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) != 0) {
- printk(KERN_WARNING DEV_LABEL
- "(itf %d): No suitable DMA available.\n", lanai->number);
- return -EBUSY;
- }
- if (pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING DEV_LABEL
"(itf %d): No suitable DMA available.\n", lanai->number);
return -EBUSY;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 9988ac98b6d8..b7e1cc0a97c8 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -252,10 +252,10 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
}
idr_destroy(&card->idr);
- pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
- card->rsq.org, card->rsq.dma);
- pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
- card->tsq.org, card->tsq.dma);
+ dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+ card->rsq.org, card->rsq.dma);
+ dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+ card->tsq.org, card->tsq.dma);
free_irq(card->pcidev->irq, card);
iounmap(card->membase);
kfree(card);
@@ -370,8 +370,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
ns_init_card_error(card, error);
return error;
}
- if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
+ if (dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING
"nicstar%d: No suitable DMA available.\n", i);
error = 2;
@@ -535,9 +534,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
writel(0x00000000, card->membase + VPM);
/* Initialize TSQ */
- card->tsq.org = pci_alloc_consistent(card->pcidev,
- NS_TSQSIZE + NS_TSQ_ALIGNMENT,
- &card->tsq.dma);
+ card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
+ NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+ &card->tsq.dma, GFP_KERNEL);
if (card->tsq.org == NULL) {
printk("nicstar%d: can't allocate TSQ.\n", i);
error = 10;
@@ -554,9 +553,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
/* Initialize RSQ */
- card->rsq.org = pci_alloc_consistent(card->pcidev,
- NS_RSQSIZE + NS_RSQ_ALIGNMENT,
- &card->rsq.dma);
+ card->rsq.org = dma_alloc_coherent(&card->pcidev->dev,
+ NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+ &card->rsq.dma, GFP_KERNEL);
if (card->rsq.org == NULL) {
printk("nicstar%d: can't allocate RSQ.\n", i);
error = 11;
@@ -874,7 +873,8 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
if (!scq)
return NULL;
- scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
+ scq->org = dma_alloc_coherent(&card->pcidev->dev,
+ 2 * size, &scq->dma, GFP_KERNEL);
if (!scq->org) {
kfree(scq);
return NULL;
@@ -936,10 +936,10 @@ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
}
}
kfree(scq->skb);
- pci_free_consistent(card->pcidev,
- 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
- VBR_SCQSIZE : CBR_SCQSIZE),
- scq->org, scq->dma);
+ dma_free_coherent(&card->pcidev->dev,
+ 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
+ VBR_SCQSIZE : CBR_SCQSIZE),
+ scq->org, scq->dma);
kfree(scq);
}
@@ -957,11 +957,11 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
handle2 = NULL;
addr2 = 0;
handle1 = skb;
- addr1 = pci_map_single(card->pcidev,
+ addr1 = dma_map_single(&card->pcidev->dev,
skb->data,
(NS_PRV_BUFTYPE(skb) == BUF_SM
? NS_SMSKBSIZE : NS_LGSKBSIZE),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
#ifdef GENERAL_DEBUG
@@ -1670,8 +1670,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
ATM_SKB(skb)->vcc = vcc;
- NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
if (vcc->qos.aal == ATM_AAL5) {
buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
@@ -1930,10 +1930,10 @@ static void drain_scq(ns_dev * card, scq_info * scq, int pos)
XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
card->index, skb, i);
if (skb != NULL) {
- pci_unmap_single(card->pcidev,
+ dma_unmap_single(&card->pcidev->dev,
NS_PRV_DMA(skb),
skb->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
vcc = ATM_SKB(skb)->vcc;
if (vcc && vcc->pop != NULL) {
vcc->pop(vcc, skb);
@@ -1992,16 +1992,16 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
return;
}
idr_remove(&card->idr, id);
- pci_dma_sync_single_for_cpu(card->pcidev,
- NS_PRV_DMA(skb),
- (NS_PRV_BUFTYPE(skb) == BUF_SM
- ? NS_SMSKBSIZE : NS_LGSKBSIZE),
- PCI_DMA_FROMDEVICE);
- pci_unmap_single(card->pcidev,
+ dma_sync_single_for_cpu(&card->pcidev->dev,
+ NS_PRV_DMA(skb),
+ (NS_PRV_BUFTYPE(skb) == BUF_SM
+ ? NS_SMSKBSIZE : NS_LGSKBSIZE),
+ DMA_FROM_DEVICE);
+ dma_unmap_single(&card->pcidev->dev,
NS_PRV_DMA(skb),
(NS_PRV_BUFTYPE(skb) == BUF_SM
? NS_SMSKBSIZE : NS_LGSKBSIZE),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
vpi = ns_rsqe_vpi(rsqe);
vci = ns_rsqe_vci(rsqe);
if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 21b0bc6a9c96..74e18b0a6d89 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -785,8 +785,8 @@ static void solos_bh(unsigned long card_arg)
skb = card->rx_skb[port];
card->rx_skb[port] = NULL;
- pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
- RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
+ RX_DMA_SIZE, DMA_FROM_DEVICE);
header = (void *)skb->data;
size = le16_to_cpu(header->size);
@@ -872,8 +872,8 @@ static void solos_bh(unsigned long card_arg)
struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC);
if (skb) {
SKB_CB(skb)->dma_addr =
- pci_map_single(card->dev, skb->data,
- RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
+ dma_map_single(&card->dev->dev, skb->data,
+ RX_DMA_SIZE, DMA_FROM_DEVICE);
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + RX_DMA_ADDR(port));
card->rx_skb[port] = skb;
@@ -1069,8 +1069,8 @@ static uint32_t fpga_tx(struct solos_card *card)
if (tx_pending & 1) {
struct sk_buff *oldskb = card->tx_skb[port];
if (oldskb) {
- pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
- oldskb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&card->dev->dev, SKB_CB(oldskb)->dma_addr,
+ oldskb->len, DMA_TO_DEVICE);
card->tx_skb[port] = NULL;
}
spin_lock(&card->tx_queue_lock);
@@ -1089,8 +1089,8 @@ static uint32_t fpga_tx(struct solos_card *card)
data = card->dma_bounce + (BUF_SIZE * port);
memcpy(data, skb->data, skb->len);
}
- SKB_CB(skb)->dma_addr = pci_map_single(card->dev, data,
- skb->len, PCI_DMA_TODEVICE);
+ SKB_CB(skb)->dma_addr = dma_map_single(&card->dev->dev, data,
+ skb->len, DMA_TO_DEVICE);
card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + TX_DMA_ADDR(port));
@@ -1210,7 +1210,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto out;
}
- err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (err) {
dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n");
goto out;
@@ -1411,14 +1411,14 @@ static void atm_remove(struct solos_card *card)
skb = card->rx_skb[i];
if (skb) {
- pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
- RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
+ RX_DMA_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
skb = card->tx_skb[i];
if (skb) {
- pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
}
while ((skb = skb_dequeue(&card->tx_queue[i])))
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 969c3c29000c..cecfb943762f 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1306,19 +1306,20 @@ static int zatm_start(struct atm_dev *dev)
if (!mbx_entries[i])
continue;
- mbx = pci_alloc_consistent(pdev, 2*MBX_SIZE(i), &mbx_dma);
+ mbx = dma_alloc_coherent(&pdev->dev,
+ 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
if (!mbx) {
error = -ENOMEM;
goto out;
}
/*
- * Alignment provided by pci_alloc_consistent() isn't enough
+ * Alignment provided by dma_alloc_coherent() isn't enough
* for this device.
*/
if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
printk(KERN_ERR DEV_LABEL "(itf %d): system "
"bus incompatible with driver\n", dev->number);
- pci_free_consistent(pdev, 2*MBX_SIZE(i), mbx, mbx_dma);
+ dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
error = -ENODEV;
goto out;
}
@@ -1354,9 +1355,9 @@ out_tx:
kfree(zatm_dev->tx_map);
out:
while (i-- > 0) {
- pci_free_consistent(pdev, 2*MBX_SIZE(i),
- (void *)zatm_dev->mbx_start[i],
- zatm_dev->mbx_dma[i]);
+ dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
+ (void *)zatm_dev->mbx_start[i],
+ zatm_dev->mbx_dma[i]);
}
free_irq(zatm_dev->irq, dev);
goto done;
@@ -1608,6 +1609,10 @@ static int zatm_init_one(struct pci_dev *pci_dev,
if (ret < 0)
goto out_disable;
+ ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ if (ret < 0)
+ goto out_disable;
+
zatm_dev->pci_dev = pci_dev;
dev->dev_data = zatm_dev;
zatm_dev->copper = (int)ent->driver_data;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 97e2baf6e5d8..07304a3b9ee2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2080,54 +2080,47 @@ int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
}
EXPORT_SYMBOL(dev_printk_emit);
-static int __dev_printk(const char *level, const struct device *dev,
+static void __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{
- if (!dev)
- return printk("%s(NULL device *): %pV", level, vaf);
-
- return dev_printk_emit(level[1] - '0', dev,
- "%s %s: %pV",
- dev_driver_string(dev), dev_name(dev), vaf);
+ if (dev)
+ dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
+ dev_driver_string(dev), dev_name(dev), vaf);
+ else
+ printk("%s(NULL device *): %pV", level, vaf);
}
-int dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...)
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int r;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- r = __dev_printk(level, dev, &vaf);
+ __dev_printk(level, dev, &vaf);
va_end(args);
-
- return r;
}
EXPORT_SYMBOL(dev_printk);
#define define_dev_printk_level(func, kern_level) \
-int func(const struct device *dev, const char *fmt, ...) \
+void func(const struct device *dev, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
- int r; \
\
va_start(args, fmt); \
\
vaf.fmt = fmt; \
vaf.va = &args; \
\
- r = __dev_printk(kern_level, dev, &vaf); \
+ __dev_printk(kern_level, dev, &vaf); \
\
va_end(args); \
- \
- return r; \
} \
EXPORT_SYMBOL(func);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f829a4c71749..f160ea44a86d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -245,7 +245,7 @@ static ssize_t print_cpus_offline(struct device *dev,
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
- n = cpulist_scnprintf(buf, len, offline);
+ n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline));
free_cpumask_var(offline);
/* display offline cpus >= nr_cpu_ids */
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 58470c395301..6c5c9edf5ff6 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -94,7 +94,7 @@ static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
{
- return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
+ return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
}
/* firmware behavior options */
@@ -446,7 +446,6 @@ static int fw_add_devm_name(struct device *dev, const char *name)
*/
#ifdef CONFIG_FW_LOADER_USER_HELPER
struct firmware_priv {
- struct delayed_work timeout_work;
bool nowait;
struct device dev;
struct firmware_buf *buf;
@@ -836,16 +835,6 @@ static struct bin_attribute firmware_attr_data = {
.write = firmware_data_write,
};
-static void firmware_class_timeout_work(struct work_struct *work)
-{
- struct firmware_priv *fw_priv = container_of(work,
- struct firmware_priv, timeout_work.work);
-
- mutex_lock(&fw_lock);
- fw_load_abort(fw_priv);
- mutex_unlock(&fw_lock);
-}
-
static struct firmware_priv *
fw_create_instance(struct firmware *firmware, const char *fw_name,
struct device *device, unsigned int opt_flags)
@@ -855,16 +844,12 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
if (!fw_priv) {
- dev_err(device, "%s: kmalloc failed\n", __func__);
fw_priv = ERR_PTR(-ENOMEM);
goto exit;
}
fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
fw_priv->fw = firmware;
- INIT_DELAYED_WORK(&fw_priv->timeout_work,
- firmware_class_timeout_work);
-
f_dev = &fw_priv->dev;
device_initialize(f_dev);
@@ -917,16 +902,19 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
buf->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
- if (timeout != MAX_SCHEDULE_TIMEOUT)
- queue_delayed_work(system_power_efficient_wq,
- &fw_priv->timeout_work, timeout);
-
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
+ } else {
+ timeout = MAX_JIFFY_OFFSET;
}
- retval = wait_for_completion_interruptible(&buf->completion);
+ retval = wait_for_completion_interruptible_timeout(&buf->completion,
+ timeout);
+ if (retval == -ERESTARTSYS || !retval) {
+ mutex_lock(&fw_lock);
+ fw_load_abort(fw_priv);
+ mutex_unlock(&fw_lock);
+ }
- cancel_delayed_work_sync(&fw_priv->timeout_work);
if (is_fw_load_aborted(buf))
retval = -EAGAIN;
else if (!buf->data)
@@ -1194,7 +1182,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
EXPORT_SYMBOL(request_firmware);
/**
- * request_firmware: - load firmware directly without usermode helper
+ * request_firmware_direct: - load firmware directly without usermode helper
* @firmware_p: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded
diff --git a/drivers/base/node.c b/drivers/base/node.c
index a3b82e9c7f20..36fabe43cd44 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -605,7 +605,8 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
{
int n;
- n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
+ n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ nodemask_pr_args(&node_states[state]));
buf[n++] = '\n';
buf[n] = '\0';
return n;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index d626576a4f75..7fdd0172605a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -81,10 +81,8 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
- if (!ce) {
- dev_err(dev, "Not enough memory for clock entry.\n");
+ if (!ce)
return -ENOMEM;
- }
if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index b0f138806bbc..f32b802b98f4 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -19,8 +19,8 @@
* @dev: Device to handle.
*
* If power.subsys_data is NULL, point it to a new object, otherwise increment
- * its reference counter. Return 1 if a new object has been created, otherwise
- * return 0 or error code.
+ * its reference counter. Return 0 if new object has been created or refcount
+ * increased, otherwise negative error code.
*/
int dev_pm_get_subsys_data(struct device *dev)
{
@@ -56,13 +56,11 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
* @dev: Device to handle.
*
* If the reference counter of power.subsys_data is zero after dropping the
- * reference, power.subsys_data is removed. Return 1 if that happens or 0
- * otherwise.
+ * reference, power.subsys_data is removed.
*/
-int dev_pm_put_subsys_data(struct device *dev)
+void dev_pm_put_subsys_data(struct device *dev)
{
struct pm_subsys_data *psd;
- int ret = 1;
spin_lock_irq(&dev->power.lock);
@@ -70,18 +68,14 @@ int dev_pm_put_subsys_data(struct device *dev)
if (!psd)
goto out;
- if (--psd->refcount == 0) {
+ if (--psd->refcount == 0)
dev->power.subsys_data = NULL;
- } else {
+ else
psd = NULL;
- ret = 0;
- }
out:
spin_unlock_irq(&dev->power.lock);
kfree(psd);
-
- return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0d8780c04a5e..ba4abbe4693c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -344,14 +344,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
struct device *dev;
gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
-
- mutex_lock(&gpd_data->lock);
dev = gpd_data->base.dev;
- if (!dev) {
- mutex_unlock(&gpd_data->lock);
- return NOTIFY_DONE;
- }
- mutex_unlock(&gpd_data->lock);
for (;;) {
struct generic_pm_domain *genpd;
@@ -1384,25 +1377,66 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#endif /* CONFIG_PM_SLEEP */
-static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
+ struct generic_pm_domain *genpd,
+ struct gpd_timing_data *td)
{
struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ return ERR_PTR(ret);
gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
- if (!gpd_data)
- return NULL;
+ if (!gpd_data) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+
+ if (td)
+ gpd_data->td = *td;
- mutex_init(&gpd_data->lock);
+ gpd_data->base.dev = dev;
+ gpd_data->need_restore = -1;
+ gpd_data->td.constraint_changed = true;
+ gpd_data->td.effective_constraint_ns = -1;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data->domain_data) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ dev->power.subsys_data->domain_data = &gpd_data->base;
+ dev->pm_domain = &genpd->domain;
+
+ spin_unlock_irq(&dev->power.lock);
+
return gpd_data;
+
+ err_free:
+ spin_unlock_irq(&dev->power.lock);
+ kfree(gpd_data);
+ err_put:
+ dev_pm_put_subsys_data(dev);
+ return ERR_PTR(ret);
}
-static void __pm_genpd_free_dev_data(struct device *dev,
- struct generic_pm_domain_data *gpd_data)
+static void genpd_free_dev_data(struct device *dev,
+ struct generic_pm_domain_data *gpd_data)
{
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ spin_lock_irq(&dev->power.lock);
+
+ dev->pm_domain = NULL;
+ dev->power.subsys_data->domain_data = NULL;
+
+ spin_unlock_irq(&dev->power.lock);
+
kfree(gpd_data);
+ dev_pm_put_subsys_data(dev);
}
/**
@@ -1414,8 +1448,7 @@ static void __pm_genpd_free_dev_data(struct device *dev,
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
struct gpd_timing_data *td)
{
- struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1423,9 +1456,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data_new = __pm_genpd_alloc_dev_data(dev);
- if (!gpd_data_new)
- return -ENOMEM;
+ gpd_data = genpd_alloc_dev_data(dev, genpd, td);
+ if (IS_ERR(gpd_data))
+ return PTR_ERR(gpd_data);
genpd_acquire_lock(genpd);
@@ -1434,50 +1467,22 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
goto out;
}
- list_for_each_entry(pdd, &genpd->dev_list, list_node)
- if (pdd->dev == dev) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = dev_pm_get_subsys_data(dev);
+ ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
if (ret)
goto out;
genpd->device_count++;
genpd->max_off_time_changed = true;
- spin_lock_irq(&dev->power.lock);
-
- dev->pm_domain = &genpd->domain;
- if (dev->power.subsys_data->domain_data) {
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- } else {
- gpd_data = gpd_data_new;
- dev->power.subsys_data->domain_data = &gpd_data->base;
- }
- gpd_data->refcount++;
- if (td)
- gpd_data->td = *td;
-
- spin_unlock_irq(&dev->power.lock);
-
- if (genpd->attach_dev)
- genpd->attach_dev(genpd, dev);
-
- mutex_lock(&gpd_data->lock);
- gpd_data->base.dev = dev;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- gpd_data->need_restore = -1;
- gpd_data->td.constraint_changed = true;
- gpd_data->td.effective_constraint_ns = -1;
- mutex_unlock(&gpd_data->lock);
out:
genpd_release_lock(genpd);
- if (gpd_data != gpd_data_new)
- __pm_genpd_free_dev_data(dev, gpd_data_new);
+ if (ret)
+ genpd_free_dev_data(dev, gpd_data);
+ else
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb);
return ret;
}
@@ -1504,7 +1509,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
- bool remove = false;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1514,6 +1518,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|| pd_to_genpd(dev->pm_domain) != genpd)
return -EINVAL;
+ /* The above validation also means we have existing domain_data. */
+ pdd = dev->power.subsys_data->domain_data;
+ gpd_data = to_gpd_data(pdd);
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+
genpd_acquire_lock(genpd);
if (genpd->prepared_count > 0) {
@@ -1527,58 +1536,22 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
- spin_lock_irq(&dev->power.lock);
-
- dev->pm_domain = NULL;
- pdd = dev->power.subsys_data->domain_data;
list_del_init(&pdd->list_node);
- gpd_data = to_gpd_data(pdd);
- if (--gpd_data->refcount == 0) {
- dev->power.subsys_data->domain_data = NULL;
- remove = true;
- }
-
- spin_unlock_irq(&dev->power.lock);
-
- mutex_lock(&gpd_data->lock);
- pdd->dev = NULL;
- mutex_unlock(&gpd_data->lock);
genpd_release_lock(genpd);
- dev_pm_put_subsys_data(dev);
- if (remove)
- __pm_genpd_free_dev_data(dev, gpd_data);
+ genpd_free_dev_data(dev, gpd_data);
return 0;
out:
genpd_release_lock(genpd);
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb);
return ret;
}
/**
- * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
- * @dev: Device to set/unset the flag for.
- * @val: The new value of the device's "need restore" flag.
- */
-void pm_genpd_dev_need_restore(struct device *dev, bool val)
-{
- struct pm_subsys_data *psd;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- psd = dev_to_psd(dev);
- if (psd && psd->domain_data)
- to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
-
-/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Master PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 106c69359306..677fb2843553 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -117,20 +117,20 @@ do { \
} while (0)
/**
- * find_device_opp() - find device_opp struct using device pointer
+ * _find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
*
* Search list of device OPPs for one containing matching device. Does a RCU
* reader operation to grab the pointer needed.
*
- * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
* Locking: This function must be called under rcu_read_lock(). device_opp
* is a RCU protected pointer. This means that device_opp is valid as long
* as we are under RCU lock.
*/
-static struct device_opp *find_device_opp(struct device *dev)
+static struct device_opp *_find_device_opp(struct device *dev)
{
struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
@@ -153,7 +153,7 @@ static struct device_opp *find_device_opp(struct device *dev)
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
* @opp: opp for which voltage has to be returned for
*
- * Return voltage in micro volt corresponding to the opp, else
+ * Return: voltage in micro volt corresponding to the opp, else
* return 0
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
@@ -169,6 +169,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
struct dev_pm_opp *tmp_opp;
unsigned long v = 0;
+ opp_rcu_lockdep_assert();
+
tmp_opp = rcu_dereference(opp);
if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
@@ -183,7 +185,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
* dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
* @opp: opp for which frequency has to be returned for
*
- * Return frequency in hertz corresponding to the opp, else
+ * Return: frequency in hertz corresponding to the opp, else
* return 0
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
@@ -199,6 +201,8 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
struct dev_pm_opp *tmp_opp;
unsigned long f = 0;
+ opp_rcu_lockdep_assert();
+
tmp_opp = rcu_dereference(opp);
if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
@@ -213,7 +217,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
- * This function returns the number of available opps if there are any,
+ * Return: This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
*
* Locking: This function takes rcu_read_lock().
@@ -226,7 +230,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
rcu_read_lock();
- dev_opp = find_device_opp(dev);
+ dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp)) {
count = PTR_ERR(dev_opp);
dev_err(dev, "%s: device OPP not found (%d)\n",
@@ -251,9 +255,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* @freq: frequency to search for
* @available: true/false - match for available opp
*
- * Searches for exact match in the opp list and returns pointer to the matching
- * opp if found, else returns ERR_PTR in case of error and should be handled
- * using IS_ERR. Error return values can be:
+ * Return: Searches for exact match in the opp list and returns pointer to the
+ * matching opp if found, else returns ERR_PTR in case of error and should
+ * be handled using IS_ERR. Error return values can be:
* EINVAL: for bad pointer
* ERANGE: no match found for search
* ENODEV: if device not found in list of registered devices
@@ -280,7 +284,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
opp_rcu_lockdep_assert();
- dev_opp = find_device_opp(dev);
+ dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
@@ -307,7 +311,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
* Search for the matching ceil *available* OPP from a starting freq
* for a device.
*
- * Returns matching *opp and refreshes *freq accordingly, else returns
+ * Return: matching *opp and refreshes *freq accordingly, else returns
* ERR_PTR in case of error and should be handled using IS_ERR. Error return
* values can be:
* EINVAL: for bad pointer
@@ -333,7 +337,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = find_device_opp(dev);
+ dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp))
return ERR_CAST(dev_opp);
@@ -357,7 +361,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
* Search for the matching floor *available* OPP from a starting freq
* for a device.
*
- * Returns matching *opp and refreshes *freq accordingly, else returns
+ * Return: matching *opp and refreshes *freq accordingly, else returns
* ERR_PTR in case of error and should be handled using IS_ERR. Error return
* values can be:
* EINVAL: for bad pointer
@@ -383,7 +387,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = find_device_opp(dev);
+ dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp))
return ERR_CAST(dev_opp);
@@ -403,7 +407,16 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
-static struct device_opp *add_device_opp(struct device *dev)
+/**
+ * _add_device_opp() - Allocate a new device OPP table
+ * @dev: device for which we do this operation
+ *
+ * New device node which uses OPPs - used when multiple devices with OPP tables
+ * are maintained.
+ *
+ * Return: valid device_opp pointer if success, else NULL.
+ */
+static struct device_opp *_add_device_opp(struct device *dev)
{
struct device_opp *dev_opp;
@@ -424,8 +437,35 @@ static struct device_opp *add_device_opp(struct device *dev)
return dev_opp;
}
-static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
- unsigned long u_volt, bool dynamic)
+/**
+ * _opp_add_dynamic() - Allocate a dynamic OPP.
+ * @dev: device for which we do this operation
+ * @freq: Frequency in Hz for this OPP
+ * @u_volt: Voltage in uVolts for this OPP
+ * @dynamic: Dynamically added OPPs.
+ *
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
+ *
+ * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
+ * freed by of_free_opp_table.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ */
+static int _opp_add_dynamic(struct device *dev, unsigned long freq,
+ long u_volt, bool dynamic)
{
struct device_opp *dev_opp = NULL;
struct dev_pm_opp *opp, *new_opp;
@@ -434,10 +474,8 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
/* allocate new OPP node */
new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
- if (!new_opp) {
- dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
+ if (!new_opp)
return -ENOMEM;
- }
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
@@ -449,9 +487,9 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
new_opp->dynamic = dynamic;
/* Check for existing list for 'dev' */
- dev_opp = find_device_opp(dev);
+ dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp)) {
- dev_opp = add_device_opp(dev);
+ dev_opp = _add_device_opp(dev);
if (!dev_opp) {
ret = -ENOMEM;
goto free_opp;
@@ -519,34 +557,53 @@ free_opp:
* mutex cannot be locked.
*
* Return:
- * 0: On success OR
+ * 0 On success OR
* Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST: Freq are same and volt are different OR
+ * -EEXIST Freq are same and volt are different OR
* Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM: Memory allocation failure
+ * -ENOMEM Memory allocation failure
*/
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
- return dev_pm_opp_add_dynamic(dev, freq, u_volt, true);
+ return _opp_add_dynamic(dev, freq, u_volt, true);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
-static void kfree_opp_rcu(struct rcu_head *head)
+/**
+ * _kfree_opp_rcu() - Free OPP RCU handler
+ * @head: RCU head
+ */
+static void _kfree_opp_rcu(struct rcu_head *head)
{
struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
kfree_rcu(opp, rcu_head);
}
-static void kfree_device_rcu(struct rcu_head *head)
+/**
+ * _kfree_device_rcu() - Free device_opp RCU handler
+ * @head: RCU head
+ */
+static void _kfree_device_rcu(struct rcu_head *head)
{
struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
kfree_rcu(device_opp, rcu_head);
}
-static void __dev_pm_opp_remove(struct device_opp *dev_opp,
- struct dev_pm_opp *opp)
+/**
+ * _opp_remove() - Remove an OPP from a table definition
+ * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp: pointer to the OPP to remove
+ *
+ * This function removes an opp definition from the opp list.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * It is assumed that the caller holds required mutex for an RCU updater
+ * strategy.
+ */
+static void _opp_remove(struct device_opp *dev_opp,
+ struct dev_pm_opp *opp)
{
/*
* Notify the changes in the availability of the operable
@@ -554,12 +611,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp,
*/
srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
list_del_rcu(&opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu);
+ call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
if (list_empty(&dev_opp->opp_list)) {
list_del_rcu(&dev_opp->node);
call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
- kfree_device_rcu);
+ _kfree_device_rcu);
}
}
@@ -569,6 +626,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp,
* @freq: OPP to remove with matching 'freq'
*
* This function removes an opp from the opp list.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
*/
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
@@ -579,7 +642,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
- dev_opp = find_device_opp(dev);
+ dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp))
goto unlock;
@@ -596,14 +659,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
goto unlock;
}
- __dev_pm_opp_remove(dev_opp, opp);
+ _opp_remove(dev_opp, opp);
unlock:
mutex_unlock(&dev_opp_list_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
/**
- * opp_set_availability() - helper to set the availability of an opp
+ * _opp_set_availability() - helper to set the availability of an opp
* @dev: device for which we do this operation
* @freq: OPP frequency to modify availability
* @availability_req: availability status requested for this opp
@@ -611,7 +674,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
* Set the availability of an OPP with an RCU operation, opp_{enable,disable}
* share a common logic which is isolated here.
*
- * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
* copy operation, returns 0 if no modifcation was done OR modification was
* successful.
*
@@ -621,8 +684,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-static int opp_set_availability(struct device *dev, unsigned long freq,
- bool availability_req)
+static int _opp_set_availability(struct device *dev, unsigned long freq,
+ bool availability_req)
{
struct device_opp *dev_opp;
struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
@@ -630,15 +693,13 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
/* keep the node allocated */
new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
- if (!new_opp) {
- dev_warn(dev, "%s: Unable to create OPP\n", __func__);
+ if (!new_opp)
return -ENOMEM;
- }
mutex_lock(&dev_opp_list_lock);
/* Find the device_opp */
- dev_opp = find_device_opp(dev);
+ dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp)) {
r = PTR_ERR(dev_opp);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
@@ -668,7 +729,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
list_replace_rcu(&opp->node, &new_opp->node);
mutex_unlock(&dev_opp_list_lock);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu);
+ call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
/* Notify the change of the OPP availability */
if (availability_req)
@@ -700,10 +761,14 @@ unlock:
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
*/
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
- return opp_set_availability(dev, freq, true);
+ return _opp_set_availability(dev, freq, true);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
@@ -722,26 +787,41 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
*/
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
{
- return opp_set_availability(dev, freq, false);
+ return _opp_set_availability(dev, freq, false);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
/**
* dev_pm_opp_get_notifier() - find notifier_head of the device with opp
* @dev: device pointer used to lookup device OPPs.
+ *
+ * Return: pointer to notifier head if found, otherwise -ENODEV or
+ * -EINVAL based on type of error casted as pointer. value must be checked
+ * with IS_ERR to determine valid pointer or error result.
+ *
+ * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
- struct device_opp *dev_opp = find_device_opp(dev);
+ struct device_opp *dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp))
return ERR_CAST(dev_opp); /* matching type */
return &dev_opp->srcu_head;
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
#ifdef CONFIG_OF
/**
@@ -749,6 +829,22 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
* @dev: device pointer used to lookup device OPPs.
*
* Register the initial OPP table with the OPP library for given device.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ * -ENODEV when 'operating-points' property is not found or is invalid data
+ * in device node.
+ * -ENODATA when empty 'operating-points' property is found
*/
int of_init_opp_table(struct device *dev)
{
@@ -777,7 +873,7 @@ int of_init_opp_table(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (dev_pm_opp_add_dynamic(dev, freq, volt, false))
+ if (_opp_add_dynamic(dev, freq, volt, false))
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
nr -= 2;
@@ -792,6 +888,12 @@ EXPORT_SYMBOL_GPL(of_init_opp_table);
* @dev: device pointer used to lookup device OPPs.
*
* Free OPPs created using static entries present in DT.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
*/
void of_free_opp_table(struct device *dev)
{
@@ -799,7 +901,7 @@ void of_free_opp_table(struct device *dev)
struct dev_pm_opp *opp, *tmp;
/* Check for existing list for 'dev' */
- dev_opp = find_device_opp(dev);
+ dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int error = PTR_ERR(dev_opp);
if (error != -ENODEV)
@@ -816,7 +918,7 @@ void of_free_opp_table(struct device *dev)
/* Free static OPPs */
list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
if (!opp->dynamic)
- __dev_pm_opp_remove(dev_opp, opp);
+ _opp_remove(dev_opp, opp);
}
mutex_unlock(&dev_opp_list_lock);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index a8fe4c1a8d07..e56d538d039e 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -64,6 +64,8 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
struct pm_qos_flags *pqf;
s32 val;
+ lockdep_assert_held(&dev->power.lock);
+
if (IS_ERR_OR_NULL(qos))
return PM_QOS_FLAGS_UNDEFINED;
@@ -104,6 +106,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
*/
s32 __dev_pm_qos_read_value(struct device *dev)
{
+ lockdep_assert_held(&dev->power.lock);
+
return IS_ERR_OR_NULL(dev->power.qos) ?
0 : pm_qos_read_value(&dev->power.qos->resume_latency);
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 0da5865df5b1..beb8b27d4621 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -51,9 +51,11 @@ struct regmap_async {
struct regmap {
union {
struct mutex mutex;
- spinlock_t spinlock;
+ struct {
+ spinlock_t spinlock;
+ unsigned long spinlock_flags;
+ };
};
- unsigned long spinlock_flags;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
@@ -233,6 +235,10 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
void regmap_async_complete_cb(struct regmap_async *async, int ret);
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config);
+
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
extern struct regcache_ops regcache_flat_ops;
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index e4c45d2299c1..8d304e2a943d 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -74,8 +74,8 @@ static int regmap_ac97_reg_write(void *context, unsigned int reg,
}
static const struct regmap_bus ac97_regmap_bus = {
- .reg_write = regmap_ac97_reg_write,
- .reg_read = regmap_ac97_reg_read,
+ .reg_write = regmap_ac97_reg_write,
+ .reg_read = regmap_ac97_reg_read,
};
/**
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 053150a7f9f2..4b76e33110a2 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -14,6 +14,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include "internal.h"
static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
unsigned int *val)
@@ -87,6 +88,42 @@ static struct regmap_bus regmap_smbus_word = {
.reg_read = regmap_smbus_word_reg_read,
};
+static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_word_swapped(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (val > 0xffff || reg > 0xff)
+ return -EINVAL;
+
+ return i2c_smbus_write_word_swapped(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_smbus_word_swapped = {
+ .reg_write = regmap_smbus_word_write_swapped,
+ .reg_read = regmap_smbus_word_read_swapped,
+};
+
static int regmap_i2c_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
@@ -180,7 +217,14 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
else if (config->val_bits == 16 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
- return &regmap_smbus_word;
+ switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
+ case REGMAP_ENDIAN_LITTLE:
+ return &regmap_smbus_word;
+ case REGMAP_ENDIAN_BIG:
+ return &regmap_smbus_word_swapped;
+ default: /* everything else is not supported */
+ break;
+ }
else if (config->val_bits == 8 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index d2f8a818d200..f99b098ddabf 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -473,9 +473,9 @@ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
return REGMAP_ENDIAN_BIG;
}
-static enum regmap_endian regmap_get_val_endian(struct device *dev,
- const struct regmap_bus *bus,
- const struct regmap_config *config)
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config)
{
struct device_node *np;
enum regmap_endian endian;
@@ -513,6 +513,7 @@ static enum regmap_endian regmap_get_val_endian(struct device *dev,
/* Use this if no other value was found */
return REGMAP_ENDIAN_BIG;
}
+EXPORT_SYMBOL_GPL(regmap_get_val_endian);
/**
* regmap_init(): Initialise register map
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 314ae4032f3e..ac6c5fca906d 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -25,22 +25,18 @@ struct bcma_bus;
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout);
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core);
+void bcma_init_bus(struct bcma_bus *bus);
int bcma_bus_register(struct bcma_bus *bus);
void bcma_bus_unregister(struct bcma_bus *bus);
-int __init bcma_bus_early_register(struct bcma_bus *bus,
- struct bcma_device *core_cc,
- struct bcma_device *core_mips);
+int __init bcma_bus_early_register(struct bcma_bus *bus);
#ifdef CONFIG_PM
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
/* scan.c */
+void bcma_detect_chip(struct bcma_bus *bus);
int bcma_bus_scan(struct bcma_bus *bus);
-int __init bcma_bus_scan_early(struct bcma_bus *bus,
- struct bcma_device_id *match,
- struct bcma_device *core);
-void bcma_init_bus(struct bcma_bus *bus);
/* sprom.c */
int bcma_sprom_get(struct bcma_bus *bus);
@@ -111,6 +107,14 @@ extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
+#else
+static inline bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
+{
+ return false;
+}
+static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
+{
+}
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
#ifdef CONFIG_BCMA_DRIVER_GPIO
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 19f679667ca4..b7c8a8d4e6d1 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -79,7 +79,9 @@ static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU) {
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
- /* 4706 CC and PMU watchdogs are clocked at 1/4 of ALP clock */
+ /* 4706 CC and PMU watchdogs are clocked at 1/4 of ALP
+ * clock
+ */
return bcma_chipco_get_alp_clock(cc) / 4000;
else
/* based on 32KHz ILP clock */
@@ -97,7 +99,8 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
wdt.driver_data = cc;
wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
- wdt.max_timer_ms = bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
+ wdt.max_timer_ms =
+ bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
cc->core->bus->num, &wdt,
@@ -175,7 +178,6 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
{
u32 maxt;
- enum bcma_clkmode clkmode;
maxt = bcma_chipco_watchdog_get_max_timer(cc);
if (cc->capabilities & BCMA_CC_CAP_PMU) {
@@ -185,8 +187,13 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
ticks = maxt;
bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
} else {
- clkmode = ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC;
- bcma_core_set_clockmode(cc->core, clkmode);
+ struct bcma_bus *bus = cc->core->bus;
+
+ if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
+ bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
+ bcma_core_set_clockmode(cc->core,
+ ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
+
if (ticks > maxt)
ticks = maxt;
/* instant NMI */
@@ -335,7 +342,8 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
| BCMA_CC_CORECTL_UARTCLKEN);
}
} else {
- bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev);
+ bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n",
+ ccrev);
return;
}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 50329d1057ed..786666488a2d 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -145,6 +145,47 @@ static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
}
/**************************************************
+ * Early init.
+ **************************************************/
+
+static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
+{
+ struct bcma_device *core = pc->core;
+ u16 val16, core_index;
+ uint regoff;
+
+ regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
+ core_index = (u16)core->core_index;
+
+ val16 = pcicore_read16(pc, regoff);
+ if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
+ != core_index) {
+ val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
+ (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
+ pcicore_write16(pc, regoff, val16);
+ }
+}
+
+/*
+ * Apply some early fixes required before accessing SPROM.
+ * See also si_pci_fixcfg.
+ */
+void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
+{
+ if (pc->early_setup_done)
+ return;
+
+ pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
+ if (pc->hostmode)
+ goto out;
+
+ bcma_core_pci_fixcfg(pc);
+
+out:
+ pc->early_setup_done = true;
+}
+
+/**************************************************
* Workarounds.
**************************************************/
@@ -175,24 +216,6 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
}
-static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
-{
- struct bcma_device *core = pc->core;
- u16 val16, core_index;
- uint regoff;
-
- regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
- core_index = (u16)core->core_index;
-
- val16 = pcicore_read16(pc, regoff);
- if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
- != core_index) {
- val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
- (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
- pcicore_write16(pc, regoff, val16);
- }
-}
-
/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
@@ -216,7 +239,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{
- bcma_core_pci_fixcfg(pc);
bcma_pcicore_serdes_workaround(pc);
bcma_core_pci_config_fixup(pc);
}
@@ -226,13 +248,11 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
if (pc->setup_done)
return;
-#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
- pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
+ bcma_core_pci_early_init(pc);
+
if (pc->hostmode)
bcma_core_pci_hostmode_init(pc);
-#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
-
- if (!pc->hostmode)
+ else
bcma_core_pci_clientmode_init(pc);
}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index cd9161a8b3a1..53c6a8a58859 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -13,10 +13,12 @@
static void bcma_host_pci_switch_core(struct bcma_device *core)
{
+ int win2 = core->bus->host_is_pcie2 ?
+ BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
+
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
core->addr);
- pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
- core->wrap);
+ pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
core->bus->mapped_core = core;
bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
}
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 335cbcfd945b..2dce34789329 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -193,7 +193,7 @@ int __init bcma_host_soc_init(struct bcma_soc *soc)
int err;
/* Scan bus and initialize it */
- err = bcma_bus_early_register(bus, &soc->core_cc, &soc->core_mips);
+ err = bcma_bus_early_register(bus);
if (err)
iounmap(bus->mmio);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 534e1337766d..38bde6eab8a4 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -268,6 +268,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
}
}
+void bcma_init_bus(struct bcma_bus *bus)
+{
+ mutex_lock(&bcma_buses_mutex);
+ bus->num = bcma_bus_next_num++;
+ mutex_unlock(&bcma_buses_mutex);
+
+ INIT_LIST_HEAD(&bus->cores);
+ bus->nr_cores = 0;
+
+ bcma_detect_chip(bus);
+}
+
static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
{
int err;
@@ -356,12 +368,19 @@ static void bcma_unregister_cores(struct bcma_bus *bus)
struct bcma_device *core, *tmp;
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
+ if (!core->dev_registered)
+ continue;
list_del(&core->list);
- if (core->dev_registered)
- device_unregister(&core->dev);
+ device_unregister(&core->dev);
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
platform_device_unregister(bus->drv_cc.watchdog);
+
+ /* Now noone uses internally-handled cores, we can free them */
+ list_for_each_entry_safe(core, tmp, &bus->cores, list) {
+ list_del(&core->list);
+ kfree(core);
+ }
}
int bcma_bus_register(struct bcma_bus *bus)
@@ -369,10 +388,6 @@ int bcma_bus_register(struct bcma_bus *bus)
int err;
struct bcma_device *core;
- mutex_lock(&bcma_buses_mutex);
- bus->num = bcma_bus_next_num++;
- mutex_unlock(&bcma_buses_mutex);
-
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
@@ -387,6 +402,13 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_chipcommon_early_init(&bus->drv_cc);
}
+ /* Early init PCIE core */
+ core = bcma_find_core(bus, BCMA_CORE_PCIE);
+ if (core) {
+ bus->drv_pci[0].core = core;
+ bcma_core_pci_early_init(&bus->drv_pci[0]);
+ }
+
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {
if (bcma_is_core_needed_early(core->id.id))
@@ -459,7 +481,6 @@ int bcma_bus_register(struct bcma_bus *bus)
void bcma_bus_unregister(struct bcma_bus *bus)
{
- struct bcma_device *cores[3];
int err;
err = bcma_gpio_unregister(&bus->drv_cc);
@@ -470,46 +491,23 @@ void bcma_bus_unregister(struct bcma_bus *bus)
bcma_core_chipcommon_b_free(&bus->drv_cc_b);
- cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
- cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
- cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
-
bcma_unregister_cores(bus);
-
- kfree(cores[2]);
- kfree(cores[1]);
- kfree(cores[0]);
}
-int __init bcma_bus_early_register(struct bcma_bus *bus,
- struct bcma_device *core_cc,
- struct bcma_device *core_mips)
+/*
+ * This is a special version of bus registration function designed for SoCs.
+ * It scans bus and performs basic initialization of main cores only.
+ * Please note it requires memory allocation, however it won't try to sleep.
+ */
+int __init bcma_bus_early_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
- struct bcma_device_id match;
- match.manuf = BCMA_MANUF_BCM;
- match.id = bcma_cc_core_id(bus);
- match.class = BCMA_CL_SIM;
- match.rev = BCMA_ANY_REV;
-
- /* Scan for chip common core */
- err = bcma_bus_scan_early(bus, &match, core_cc);
- if (err) {
- bcma_err(bus, "Failed to scan for common core: %d\n", err);
- return -1;
- }
-
- match.manuf = BCMA_MANUF_MIPS;
- match.id = BCMA_CORE_MIPS_74K;
- match.class = BCMA_CL_SIM;
- match.rev = BCMA_ANY_REV;
-
- /* Scan for mips core */
- err = bcma_bus_scan_early(bus, &match, core_mips);
+ /* Scan for devices (cores) */
+ err = bcma_bus_scan(bus);
if (err) {
- bcma_err(bus, "Failed to scan for mips core: %d\n", err);
+ bcma_err(bus, "Failed to scan bus: %d\n", err);
return -1;
}
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 917520776879..df806b9c5490 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -435,15 +435,12 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
return 0;
}
-void bcma_init_bus(struct bcma_bus *bus)
+void bcma_detect_chip(struct bcma_bus *bus)
{
s32 tmp;
struct bcma_chipinfo *chipinfo = &(bus->chipinfo);
char chip_id[8];
- INIT_LIST_HEAD(&bus->cores);
- bus->nr_cores = 0;
-
bcma_scan_switch_core(bus, BCMA_ADDR_BASE);
tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID);
@@ -464,6 +461,10 @@ int bcma_bus_scan(struct bcma_bus *bus)
int err, core_num = 0;
+ /* Skip if bus was already scanned (e.g. during early register) */
+ if (bus->nr_cores)
+ return 0;
+
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
@@ -522,61 +523,3 @@ out:
return err;
}
-
-int __init bcma_bus_scan_early(struct bcma_bus *bus,
- struct bcma_device_id *match,
- struct bcma_device *core)
-{
- u32 erombase;
- u32 __iomem *eromptr, *eromend;
-
- int err = -ENODEV;
- int core_num = 0;
-
- erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
- if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
- eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
- if (!eromptr)
- return -ENOMEM;
- } else {
- eromptr = bus->mmio;
- }
-
- eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
-
- bcma_scan_switch_core(bus, erombase);
-
- while (eromptr < eromend) {
- memset(core, 0, sizeof(*core));
- INIT_LIST_HEAD(&core->list);
- core->bus = bus;
-
- err = bcma_get_next_core(bus, &eromptr, match, core_num, core);
- if (err == -ENODEV) {
- core_num++;
- continue;
- } else if (err == -ENXIO)
- continue;
- else if (err == -ESPIPE)
- break;
- else if (err < 0)
- goto out;
-
- core->core_index = core_num++;
- bus->nr_cores++;
- bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
- core->core_index, bcma_device_name(&core->id),
- core->id.manuf, core->id.id, core->id.rev,
- core->id.class);
-
- list_add_tail(&core->list, &bus->cores);
- err = 0;
- break;
- }
-
-out:
- if (bus->hosttype == BCMA_HOSTTYPE_SOC)
- iounmap(eromptr);
-
- return err;
-}
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index efb037f9c98a..206edd3ba668 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -579,7 +579,8 @@ int bcma_sprom_get(struct bcma_bus *bus)
u16 offset = BCMA_CC_SPROM;
u16 *sprom;
size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4,
- SSB_SPROMSIZE_WORDS_R10, };
+ SSB_SPROMSIZE_WORDS_R10,
+ SSB_SPROMSIZE_WORDS_R11, };
int i, err = 0;
if (!bus->drv_cc.core)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 014a1cfc41c5..1b8094d4d7af 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -393,14 +393,15 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
-config BLK_DEV_XIP
- bool "Support XIP filesystems on RAM block device"
- depends on BLK_DEV_RAM
+config BLK_DEV_RAM_DAX
+ bool "Support Direct Access (DAX) to RAM block devices"
+ depends on BLK_DEV_RAM && FS_DAX
default n
help
- Support XIP filesystems (such as ext2 with XIP support on) on
- top of block ram device. This will slightly enlarge the kernel, and
- will prevent RAM block device backing store memory from being
+ Support filesystems using DAX to access RAM block devices. This
+ avoids double-buffering data in the page cache before copying it
+ to the block device. Answering Y will slightly enlarge the kernel,
+ and will prevent RAM block device backing store memory from being
allocated from highmem (only a problem for highmem systems).
config CDROM_PKTCDVD
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3598110d2cef..64ab4951e9d6 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -97,13 +97,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
* Must use NOIO because we don't want to recurse back into the
* block or filesystem layers from page reclaim.
*
- * Cannot support XIP and highmem, because our ->direct_access
- * routine for XIP must return memory that is always addressable.
- * If XIP was reworked to use pfns and kmap throughout, this
+ * Cannot support DAX and highmem, because our ->direct_access
+ * routine for DAX must return memory that is always addressable.
+ * If DAX was reworked to use pfns and kmap throughout, this
* restriction might be able to be lifted.
*/
gfp_flags = GFP_NOIO | __GFP_ZERO;
-#ifndef CONFIG_BLK_DEV_XIP
+#ifndef CONFIG_BLK_DEV_RAM_DAX
gfp_flags |= __GFP_HIGHMEM;
#endif
page = alloc_page(gfp_flags);
@@ -369,27 +369,29 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
return err;
}
-#ifdef CONFIG_BLK_DEV_XIP
-static int brd_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, unsigned long *pfn)
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+static long brd_direct_access(struct block_device *bdev, sector_t sector,
+ void **kaddr, unsigned long *pfn, long size)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
if (!brd)
return -ENODEV;
- if (sector & (PAGE_SECTORS-1))
- return -EINVAL;
- if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk))
- return -ERANGE;
page = brd_insert_page(brd, sector);
if (!page)
return -ENOSPC;
*kaddr = page_address(page);
*pfn = page_to_pfn(page);
- return 0;
+ /*
+ * TODO: If size > PAGE_SIZE, we could look to see if the next page in
+ * the file happens to be mapped to the next page of physical RAM.
+ */
+ return PAGE_SIZE;
}
+#else
+#define brd_direct_access NULL
#endif
static int brd_ioctl(struct block_device *bdev, fmode_t mode,
@@ -430,27 +432,24 @@ static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.rw_page = brd_rw_page,
.ioctl = brd_ioctl,
-#ifdef CONFIG_BLK_DEV_XIP
.direct_access = brd_direct_access,
-#endif
};
/*
* And now the modules code and kernel interface.
*/
-static int rd_nr;
-int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
-static int max_part;
-static int part_shift;
-static int part_show = 0;
+static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
module_param(rd_nr, int, S_IRUGO);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
+
+int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
module_param(rd_size, int, S_IRUGO);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
+
+static int max_part = 1;
module_param(max_part, int, S_IRUGO);
-MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
-module_param(part_show, int, S_IRUGO);
-MODULE_PARM_DESC(part_show, "Control RAM disk visibility in /proc/partitions");
+MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
+
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
MODULE_ALIAS("rd");
@@ -487,25 +486,33 @@ static struct brd_device *brd_alloc(int i)
brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
if (!brd->brd_queue)
goto out_free_dev;
+
blk_queue_make_request(brd->brd_queue, brd_make_request);
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
+ /* This is so fdisk will align partitions on 4k, because of
+ * direct_access API needing 4k alignment, returning a PFN
+ * (This is only a problem on very small devices <= 4M,
+ * otherwise fdisk will align on 1M. Regardless this call
+ * is harmless)
+ */
+ blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
+
brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
brd->brd_queue->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
- disk = brd->brd_disk = alloc_disk(1 << part_shift);
+ disk = brd->brd_disk = alloc_disk(max_part);
if (!disk)
goto out_free_queue;
disk->major = RAMDISK_MAJOR;
- disk->first_minor = i << part_shift;
+ disk->first_minor = i * max_part;
disk->fops = &brd_fops;
disk->private_data = brd;
disk->queue = brd->brd_queue;
- if (!part_show)
- disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
+ disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
@@ -527,10 +534,11 @@ static void brd_free(struct brd_device *brd)
kfree(brd);
}
-static struct brd_device *brd_init_one(int i)
+static struct brd_device *brd_init_one(int i, bool *new)
{
struct brd_device *brd;
+ *new = false;
list_for_each_entry(brd, &brd_devices, brd_list) {
if (brd->brd_number == i)
goto out;
@@ -541,6 +549,7 @@ static struct brd_device *brd_init_one(int i)
add_disk(brd->brd_disk);
list_add_tail(&brd->brd_list, &brd_devices);
}
+ *new = true;
out:
return brd;
}
@@ -556,70 +565,46 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
{
struct brd_device *brd;
struct kobject *kobj;
+ bool new;
mutex_lock(&brd_devices_mutex);
- brd = brd_init_one(MINOR(dev) >> part_shift);
+ brd = brd_init_one(MINOR(dev) / max_part, &new);
kobj = brd ? get_disk(brd->brd_disk) : NULL;
mutex_unlock(&brd_devices_mutex);
- *part = 0;
+ if (new)
+ *part = 0;
+
return kobj;
}
static int __init brd_init(void)
{
- int i, nr;
- unsigned long range;
struct brd_device *brd, *next;
+ int i;
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
- * However, this will not work well with user space tool that doesn't
- * know about such "feature". In order to not break any existing
- * tool, we do the following:
*
- * (1) if rd_nr is specified, create that many upfront, and this
- * also becomes a hard limit.
- * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
- * (default 16) rd device on module load, user can further
- * extend brd device by create dev node themselves and have
- * kernel automatically instantiate actual device on-demand.
+ * (1) if rd_nr is specified, create that many upfront. else
+ * it defaults to CONFIG_BLK_DEV_RAM_COUNT
+ * (2) User can further extend brd devices by create dev node themselves
+ * and have kernel automatically instantiate actual device
+ * on-demand. Example:
+ * mknod /path/devnod_name b 1 X # 1 is the rd major
+ * fdisk -l /path/devnod_name
+ * If (X / max_part) was not already created it will be created
+ * dynamically.
*/
- part_shift = 0;
- if (max_part > 0) {
- part_shift = fls(max_part);
-
- /*
- * Adjust max_part according to part_shift as it is exported
- * to user space so that user can decide correct minor number
- * if [s]he want to create more devices.
- *
- * Note that -1 is required because partition 0 is reserved
- * for the whole disk.
- */
- max_part = (1UL << part_shift) - 1;
- }
-
- if ((1UL << part_shift) > DISK_MAX_PARTS)
- return -EINVAL;
-
- if (rd_nr > 1UL << (MINORBITS - part_shift))
- return -EINVAL;
-
- if (rd_nr) {
- nr = rd_nr;
- range = rd_nr << part_shift;
- } else {
- nr = CONFIG_BLK_DEV_RAM_COUNT;
- range = 1UL << MINORBITS;
- }
-
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
return -EIO;
- for (i = 0; i < nr; i++) {
+ if (unlikely(!max_part))
+ max_part = 1;
+
+ for (i = 0; i < rd_nr; i++) {
brd = brd_alloc(i);
if (!brd)
goto out_free;
@@ -631,10 +616,10 @@ static int __init brd_init(void)
list_for_each_entry(brd, &brd_devices, brd_list)
add_disk(brd->brd_disk);
- blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range,
+ blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
THIS_MODULE, brd_probe, NULL, NULL);
- printk(KERN_INFO "brd: module loaded\n");
+ pr_info("brd: module loaded\n");
return 0;
out_free:
@@ -644,21 +629,21 @@ out_free:
}
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
+ pr_info("brd: module NOT loaded !!!\n");
return -ENOMEM;
}
static void __exit brd_exit(void)
{
- unsigned long range;
struct brd_device *brd, *next;
- range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
-
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd);
- blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range);
+ blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
+
+ pr_info("brd: module unloaded\n");
}
module_init(brd_init);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index d169b4a79267..cee20354ac37 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1388,7 +1388,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
list_add_tail(&peer_req->w.list, &device->active_ee);
spin_unlock_irq(&device->resource->req_lock);
if (blkdev_issue_zeroout(device->ldev->backing_bdev,
- sector, data_size >> 9, GFP_NOIO))
+ sector, data_size >> 9, GFP_NOIO, false))
peer_req->flags |= EE_WAS_ERROR;
drbd_endio_write_sec_final(peer_req);
return 0;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 56d46ffb08e1..a08cda955285 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4112,6 +4112,13 @@ static ssize_t floppy_cmos_show(struct device *dev,
static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
+static struct attribute *floppy_dev_attrs[] = {
+ &dev_attr_cmos.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(floppy_dev);
+
static void floppy_device_release(struct device *dev)
{
}
@@ -4324,16 +4331,12 @@ static int __init do_floppy_init(void)
floppy_device[drive].name = floppy_device_name;
floppy_device[drive].id = drive;
floppy_device[drive].dev.release = floppy_device_release;
+ floppy_device[drive].dev.groups = floppy_dev_groups;
err = platform_device_register(&floppy_device[drive]);
if (err)
goto out_remove_drives;
- err = device_create_file(&floppy_device[drive].dev,
- &dev_attr_cmos);
- if (err)
- goto out_unreg_platform_dev;
-
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
@@ -4343,13 +4346,10 @@ static int __init do_floppy_init(void)
return 0;
-out_unreg_platform_dev:
- platform_device_unregister(&floppy_device[drive]);
out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
del_gendisk(disks[drive]);
- device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
platform_device_unregister(&floppy_device[drive]);
}
}
@@ -4594,7 +4594,6 @@ static void __exit floppy_module_exit(void)
if (floppy_available(drive)) {
del_gendisk(disks[drive]);
- device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 6cb1beb47c25..d1f168b73634 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -85,6 +85,8 @@ static DEFINE_MUTEX(loop_index_mutex);
static int max_part;
static int part_shift;
+static struct workqueue_struct *loop_wq;
+
/*
* Transfer functions
*/
@@ -284,12 +286,12 @@ static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
return ret;
}
-static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
+static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos)
{
int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
struct page *page);
struct bio_vec bvec;
- struct bvec_iter iter;
+ struct req_iterator iter;
struct page *page = NULL;
int ret = 0;
@@ -303,7 +305,7 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
do_lo_send = do_lo_send_direct_write;
}
- bio_for_each_segment(bvec, bio, iter) {
+ rq_for_each_segment(bvec, rq, iter) {
ret = do_lo_send(lo, &bvec, pos, page);
if (ret < 0)
break;
@@ -391,19 +393,22 @@ do_lo_receive(struct loop_device *lo,
}
static int
-lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
+lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos)
{
struct bio_vec bvec;
- struct bvec_iter iter;
+ struct req_iterator iter;
ssize_t s;
- bio_for_each_segment(bvec, bio, iter) {
+ rq_for_each_segment(bvec, rq, iter) {
s = do_lo_receive(lo, &bvec, bsize, pos);
if (s < 0)
return s;
if (s != bvec.bv_len) {
- zero_fill_bio(bio);
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ zero_fill_bio(bio);
break;
}
pos += bvec.bv_len;
@@ -411,106 +416,58 @@ lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
return 0;
}
-static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
+static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
{
- loff_t pos;
+ /*
+ * We use punch hole to reclaim the free space used by the
+ * image a.k.a. discard. However we do not support discard if
+ * encryption is enabled, because it may give an attacker
+ * useful information.
+ */
+ struct file *file = lo->lo_backing_file;
+ int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
int ret;
- pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
-
- if (bio_rw(bio) == WRITE) {
- struct file *file = lo->lo_backing_file;
-
- if (bio->bi_rw & REQ_FLUSH) {
- ret = vfs_fsync(file, 0);
- if (unlikely(ret && ret != -EINVAL)) {
- ret = -EIO;
- goto out;
- }
- }
-
- /*
- * We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
- */
- if (bio->bi_rw & REQ_DISCARD) {
- struct file *file = lo->lo_backing_file;
- int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
-
- if ((!file->f_op->fallocate) ||
- lo->lo_encrypt_key_size) {
- ret = -EOPNOTSUPP;
- goto out;
- }
- ret = file->f_op->fallocate(file, mode, pos,
- bio->bi_iter.bi_size);
- if (unlikely(ret && ret != -EINVAL &&
- ret != -EOPNOTSUPP))
- ret = -EIO;
- goto out;
- }
-
- ret = lo_send(lo, bio, pos);
-
- if ((bio->bi_rw & REQ_FUA) && !ret) {
- ret = vfs_fsync(file, 0);
- if (unlikely(ret && ret != -EINVAL))
- ret = -EIO;
- }
- } else
- ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
+ if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
-out:
+ ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
+ if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
+ ret = -EIO;
+ out:
return ret;
}
-/*
- * Add bio to back of pending list
- */
-static void loop_add_bio(struct loop_device *lo, struct bio *bio)
+static int lo_req_flush(struct loop_device *lo, struct request *rq)
{
- lo->lo_bio_count++;
- bio_list_add(&lo->lo_bio_list, bio);
-}
+ struct file *file = lo->lo_backing_file;
+ int ret = vfs_fsync(file, 0);
+ if (unlikely(ret && ret != -EINVAL))
+ ret = -EIO;
-/*
- * Grab first pending buffer
- */
-static struct bio *loop_get_bio(struct loop_device *lo)
-{
- lo->lo_bio_count--;
- return bio_list_pop(&lo->lo_bio_list);
+ return ret;
}
-static void loop_make_request(struct request_queue *q, struct bio *old_bio)
+static int do_req_filebacked(struct loop_device *lo, struct request *rq)
{
- struct loop_device *lo = q->queuedata;
- int rw = bio_rw(old_bio);
-
- if (rw == READA)
- rw = READ;
+ loff_t pos;
+ int ret;
- BUG_ON(!lo || (rw != READ && rw != WRITE));
+ pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
- spin_lock_irq(&lo->lo_lock);
- if (lo->lo_state != Lo_bound)
- goto out;
- if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
- goto out;
- if (lo->lo_bio_count >= q->nr_congestion_on)
- wait_event_lock_irq(lo->lo_req_wait,
- lo->lo_bio_count < q->nr_congestion_off,
- lo->lo_lock);
- loop_add_bio(lo, old_bio);
- wake_up(&lo->lo_event);
- spin_unlock_irq(&lo->lo_lock);
- return;
+ if (rq->cmd_flags & REQ_WRITE) {
+ if (rq->cmd_flags & REQ_FLUSH)
+ ret = lo_req_flush(lo, rq);
+ else if (rq->cmd_flags & REQ_DISCARD)
+ ret = lo_discard(lo, rq, pos);
+ else
+ ret = lo_send(lo, rq, pos);
+ } else
+ ret = lo_receive(lo, rq, lo->lo_blocksize, pos);
-out:
- spin_unlock_irq(&lo->lo_lock);
- bio_io_error(old_bio);
+ return ret;
}
struct switch_request {
@@ -518,57 +475,26 @@ struct switch_request {
struct completion wait;
};
-static void do_loop_switch(struct loop_device *, struct switch_request *);
-
-static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
-{
- if (unlikely(!bio->bi_bdev)) {
- do_loop_switch(lo, bio->bi_private);
- bio_put(bio);
- } else {
- int ret = do_bio_filebacked(lo, bio);
- bio_endio(bio, ret);
- }
-}
-
/*
- * worker thread that handles reads/writes to file backed loop devices,
- * to avoid blocking in our make_request_fn. it also does loop decrypting
- * on reads for block backed loop, as that is too heavy to do from
- * b_end_io context where irqs may be disabled.
- *
- * Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before
- * calling kthread_stop(). Therefore once kthread_should_stop() is
- * true, make_request will not place any more requests. Therefore
- * once kthread_should_stop() is true and lo_bio is NULL, we are
- * done with the loop.
+ * Do the actual switch; called from the BIO completion routine
*/
-static int loop_thread(void *data)
+static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
{
- struct loop_device *lo = data;
- struct bio *bio;
-
- set_user_nice(current, MIN_NICE);
-
- while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
-
- wait_event_interruptible(lo->lo_event,
- !bio_list_empty(&lo->lo_bio_list) ||
- kthread_should_stop());
-
- if (bio_list_empty(&lo->lo_bio_list))
- continue;
- spin_lock_irq(&lo->lo_lock);
- bio = loop_get_bio(lo);
- if (lo->lo_bio_count < lo->lo_queue->nr_congestion_off)
- wake_up(&lo->lo_req_wait);
- spin_unlock_irq(&lo->lo_lock);
+ struct file *file = p->file;
+ struct file *old_file = lo->lo_backing_file;
+ struct address_space *mapping;
- BUG_ON(!bio);
- loop_handle_bio(lo, bio);
- }
+ /* if no new file, only flush of queued bios requested */
+ if (!file)
+ return;
- return 0;
+ mapping = file->f_mapping;
+ mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
+ lo->lo_backing_file = file;
+ lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
+ mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
+ lo->old_gfp_mask = mapping_gfp_mask(mapping);
+ mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
}
/*
@@ -579,15 +505,18 @@ static int loop_thread(void *data)
static int loop_switch(struct loop_device *lo, struct file *file)
{
struct switch_request w;
- struct bio *bio = bio_alloc(GFP_KERNEL, 0);
- if (!bio)
- return -ENOMEM;
- init_completion(&w.wait);
+
w.file = file;
- bio->bi_private = &w;
- bio->bi_bdev = NULL;
- loop_make_request(lo->lo_queue, bio);
- wait_for_completion(&w.wait);
+
+ /* freeze queue and wait for completion of scheduled requests */
+ blk_mq_freeze_queue(lo->lo_queue);
+
+ /* do the switch action */
+ do_loop_switch(lo, &w);
+
+ /* unfreeze */
+ blk_mq_unfreeze_queue(lo->lo_queue);
+
return 0;
}
@@ -596,39 +525,10 @@ static int loop_switch(struct loop_device *lo, struct file *file)
*/
static int loop_flush(struct loop_device *lo)
{
- /* loop not yet configured, no running thread, nothing to flush */
- if (!lo->lo_thread)
- return 0;
-
return loop_switch(lo, NULL);
}
/*
- * Do the actual switch; called from the BIO completion routine
- */
-static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
-{
- struct file *file = p->file;
- struct file *old_file = lo->lo_backing_file;
- struct address_space *mapping;
-
- /* if no new file, only flush of queued bios requested */
- if (!file)
- goto out;
-
- mapping = file->f_mapping;
- mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
- mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
-out:
- complete(&p->wait);
-}
-
-
-/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
* the original file and in High Availability environments to switch to
@@ -889,12 +789,9 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->transfer = transfer_none;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
- lo->lo_bio_count = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- bio_list_init(&lo->lo_bio_list);
-
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -906,14 +803,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
set_blocksize(bdev, lo_blocksize);
- lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
- lo->lo_number);
- if (IS_ERR(lo->lo_thread)) {
- error = PTR_ERR(lo->lo_thread);
- goto out_clr;
- }
lo->lo_state = Lo_bound;
- wake_up_process(lo->lo_thread);
if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN;
if (lo->lo_flags & LO_FLAGS_PARTSCAN)
@@ -925,18 +815,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
bdgrab(bdev);
return 0;
-out_clr:
- loop_sysfs_exit(lo);
- lo->lo_thread = NULL;
- lo->lo_device = NULL;
- lo->lo_backing_file = NULL;
- lo->lo_flags = 0;
- set_capacity(lo->lo_disk, 0);
- invalidate_bdev(bdev);
- bd_set_size(bdev, 0);
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
- lo->lo_state = Lo_unbound;
out_putf:
fput(file);
out:
@@ -1012,11 +890,6 @@ static int loop_clr_fd(struct loop_device *lo)
spin_lock_irq(&lo->lo_lock);
lo->lo_state = Lo_rundown;
- spin_unlock_irq(&lo->lo_lock);
-
- kthread_stop(lo->lo_thread);
-
- spin_lock_irq(&lo->lo_lock);
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
@@ -1028,7 +901,6 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
lo->lo_encrypt_key_size = 0;
- lo->lo_thread = NULL;
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
@@ -1601,6 +1473,105 @@ int loop_unregister_transfer(int number)
EXPORT_SYMBOL(loop_register_transfer);
EXPORT_SYMBOL(loop_unregister_transfer);
+static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+
+ blk_mq_start_request(bd->rq);
+
+ if (cmd->rq->cmd_flags & REQ_WRITE) {
+ struct loop_device *lo = cmd->rq->q->queuedata;
+ bool need_sched = true;
+
+ spin_lock_irq(&lo->lo_lock);
+ if (lo->write_started)
+ need_sched = false;
+ else
+ lo->write_started = true;
+ list_add_tail(&cmd->list, &lo->write_cmd_head);
+ spin_unlock_irq(&lo->lo_lock);
+
+ if (need_sched)
+ queue_work(loop_wq, &lo->write_work);
+ } else {
+ queue_work(loop_wq, &cmd->read_work);
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static void loop_handle_cmd(struct loop_cmd *cmd)
+{
+ const bool write = cmd->rq->cmd_flags & REQ_WRITE;
+ struct loop_device *lo = cmd->rq->q->queuedata;
+ int ret = -EIO;
+
+ if (lo->lo_state != Lo_bound)
+ goto failed;
+
+ if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+ goto failed;
+
+ ret = do_req_filebacked(lo, cmd->rq);
+
+ failed:
+ if (ret)
+ cmd->rq->errors = -EIO;
+ blk_mq_complete_request(cmd->rq);
+}
+
+static void loop_queue_write_work(struct work_struct *work)
+{
+ struct loop_device *lo =
+ container_of(work, struct loop_device, write_work);
+ LIST_HEAD(cmd_list);
+
+ spin_lock_irq(&lo->lo_lock);
+ repeat:
+ list_splice_init(&lo->write_cmd_head, &cmd_list);
+ spin_unlock_irq(&lo->lo_lock);
+
+ while (!list_empty(&cmd_list)) {
+ struct loop_cmd *cmd = list_first_entry(&cmd_list,
+ struct loop_cmd, list);
+ list_del_init(&cmd->list);
+ loop_handle_cmd(cmd);
+ }
+
+ spin_lock_irq(&lo->lo_lock);
+ if (!list_empty(&lo->write_cmd_head))
+ goto repeat;
+ lo->write_started = false;
+ spin_unlock_irq(&lo->lo_lock);
+}
+
+static void loop_queue_read_work(struct work_struct *work)
+{
+ struct loop_cmd *cmd =
+ container_of(work, struct loop_cmd, read_work);
+
+ loop_handle_cmd(cmd);
+}
+
+static int loop_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->rq = rq;
+ INIT_WORK(&cmd->read_work, loop_queue_read_work);
+
+ return 0;
+}
+
+static struct blk_mq_ops loop_mq_ops = {
+ .queue_rq = loop_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = loop_init_request,
+};
+
static int loop_add(struct loop_device **l, int i)
{
struct loop_device *lo;
@@ -1627,16 +1598,28 @@ static int loop_add(struct loop_device **l, int i)
i = err;
err = -ENOMEM;
- lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
- if (!lo->lo_queue)
+ lo->tag_set.ops = &loop_mq_ops;
+ lo->tag_set.nr_hw_queues = 1;
+ lo->tag_set.queue_depth = 128;
+ lo->tag_set.numa_node = NUMA_NO_NODE;
+ lo->tag_set.cmd_size = sizeof(struct loop_cmd);
+ lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ lo->tag_set.driver_data = lo;
+
+ err = blk_mq_alloc_tag_set(&lo->tag_set);
+ if (err)
goto out_free_idr;
- /*
- * set queue make_request_fn
- */
- blk_queue_make_request(lo->lo_queue, loop_make_request);
+ lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
+ if (IS_ERR_OR_NULL(lo->lo_queue)) {
+ err = PTR_ERR(lo->lo_queue);
+ goto out_cleanup_tags;
+ }
lo->lo_queue->queuedata = lo;
+ INIT_LIST_HEAD(&lo->write_cmd_head);
+ INIT_WORK(&lo->write_work, loop_queue_write_work);
+
disk = lo->lo_disk = alloc_disk(1 << part_shift);
if (!disk)
goto out_free_queue;
@@ -1664,9 +1647,6 @@ static int loop_add(struct loop_device **l, int i)
disk->flags |= GENHD_FL_EXT_DEVT;
mutex_init(&lo->lo_ctl_mutex);
lo->lo_number = i;
- lo->lo_thread = NULL;
- init_waitqueue_head(&lo->lo_event);
- init_waitqueue_head(&lo->lo_req_wait);
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;
@@ -1680,6 +1660,8 @@ static int loop_add(struct loop_device **l, int i)
out_free_queue:
blk_cleanup_queue(lo->lo_queue);
+out_cleanup_tags:
+ blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
idr_remove(&loop_index_idr, i);
out_free_dev:
@@ -1692,6 +1674,7 @@ static void loop_remove(struct loop_device *lo)
{
del_gendisk(lo->lo_disk);
blk_cleanup_queue(lo->lo_queue);
+ blk_mq_free_tag_set(&lo->tag_set);
put_disk(lo->lo_disk);
kfree(lo);
}
@@ -1875,6 +1858,13 @@ static int __init loop_init(void)
goto misc_out;
}
+ loop_wq = alloc_workqueue("kloopd",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0);
+ if (!loop_wq) {
+ err = -ENOMEM;
+ goto misc_out;
+ }
+
blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
THIS_MODULE, loop_probe, NULL, NULL);
@@ -1912,6 +1902,8 @@ static void __exit loop_exit(void)
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
unregister_blkdev(LOOP_MAJOR, "loop");
+ destroy_workqueue(loop_wq);
+
misc_deregister(&loop_misc);
}
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 90df5d6485b6..301c27f8323f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -11,8 +11,10 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/workqueue.h>
#include <uapi/linux/loop.h>
/* Possible states of device */
@@ -52,19 +54,23 @@ struct loop_device {
gfp_t old_gfp_mask;
spinlock_t lo_lock;
- struct bio_list lo_bio_list;
- unsigned int lo_bio_count;
+ struct list_head write_cmd_head;
+ struct work_struct write_work;
+ bool write_started;
int lo_state;
struct mutex lo_ctl_mutex;
- struct task_struct *lo_thread;
- wait_queue_head_t lo_event;
- /* wait queue for incoming requests */
- wait_queue_head_t lo_req_wait;
struct request_queue *lo_queue;
+ struct blk_mq_tag_set tag_set;
struct gendisk *lo_disk;
};
+struct loop_cmd {
+ struct work_struct read_work;
+ struct request *rq;
+ struct list_head list;
+};
+
/* Support for loadable transfer modules */
struct loop_func_table {
int number; /* filter type */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ae9f615382f6..65cd61a4145e 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -530,7 +530,7 @@ static int null_add_dev(void)
goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set);
- if (!nullb->q) {
+ if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
goto out_cleanup_tags;
}
@@ -579,7 +579,7 @@ static int null_add_dev(void)
sector_div(size, bs);
set_capacity(disk, size);
- disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
disk->fops = &null_fops;
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b1d5d8797315..cbdfbbf98392 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -106,7 +106,7 @@ struct nvme_queue {
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
u16 q_depth;
- u16 cq_vector;
+ s16 cq_vector;
u16 sq_head;
u16 sq_tail;
u16 cq_head;
@@ -144,8 +144,37 @@ struct nvme_cmd_info {
void *ctx;
int aborted;
struct nvme_queue *nvmeq;
+ struct nvme_iod iod[0];
};
+/*
+ * Max size of iod being embedded in the request payload
+ */
+#define NVME_INT_PAGES 2
+#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size)
+
+/*
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_npages(unsigned size, struct nvme_dev *dev)
+{
+ unsigned nprps = DIV_ROUND_UP(size + dev->page_size, dev->page_size);
+ return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+}
+
+static unsigned int nvme_cmd_size(struct nvme_dev *dev)
+{
+ unsigned int ret = sizeof(struct nvme_cmd_info);
+
+ ret += sizeof(struct nvme_iod);
+ ret += sizeof(__le64 *) * nvme_npages(NVME_INT_BYTES(dev), dev);
+ ret += sizeof(struct scatterlist) * NVME_INT_PAGES;
+
+ return ret;
+}
+
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -215,6 +244,20 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
cmd->fn = handler;
cmd->ctx = ctx;
cmd->aborted = 0;
+ blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
+}
+
+static void *iod_get_private(struct nvme_iod *iod)
+{
+ return (void *) (iod->private & ~0x1UL);
+}
+
+/*
+ * If bit 0 is set, the iod is embedded in the request payload.
+ */
+static bool iod_should_kfree(struct nvme_iod *iod)
+{
+ return (iod->private & 0x01) == 0;
}
/* Special values must be less than 0x1000 */
@@ -360,35 +403,53 @@ static __le64 **iod_list(struct nvme_iod *iod)
return ((void *)iod) + iod->offset;
}
-/*
- * Will slightly overestimate the number of pages needed. This is OK
- * as it only leads to a small amount of wasted memory for the lifetime of
- * the I/O.
- */
-static int nvme_npages(unsigned size, struct nvme_dev *dev)
+static inline void iod_init(struct nvme_iod *iod, unsigned nbytes,
+ unsigned nseg, unsigned long private)
{
- unsigned nprps = DIV_ROUND_UP(size + dev->page_size, dev->page_size);
- return DIV_ROUND_UP(8 * nprps, dev->page_size - 8);
+ iod->private = private;
+ iod->offset = offsetof(struct nvme_iod, sg[nseg]);
+ iod->npages = -1;
+ iod->length = nbytes;
+ iod->nents = 0;
}
static struct nvme_iod *
-nvme_alloc_iod(unsigned nseg, unsigned nbytes, struct nvme_dev *dev, gfp_t gfp)
+__nvme_alloc_iod(unsigned nseg, unsigned bytes, struct nvme_dev *dev,
+ unsigned long priv, gfp_t gfp)
{
struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
- sizeof(__le64 *) * nvme_npages(nbytes, dev) +
+ sizeof(__le64 *) * nvme_npages(bytes, dev) +
sizeof(struct scatterlist) * nseg, gfp);
- if (iod) {
- iod->offset = offsetof(struct nvme_iod, sg[nseg]);
- iod->npages = -1;
- iod->length = nbytes;
- iod->nents = 0;
- iod->first_dma = 0ULL;
- }
+ if (iod)
+ iod_init(iod, bytes, nseg, priv);
return iod;
}
+static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
+ gfp_t gfp)
+{
+ unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) :
+ sizeof(struct nvme_dsm_range);
+ unsigned long mask = 0;
+ struct nvme_iod *iod;
+
+ if (rq->nr_phys_segments <= NVME_INT_PAGES &&
+ size <= NVME_INT_BYTES(dev)) {
+ struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);
+
+ iod = cmd->iod;
+ mask = 0x01;
+ iod_init(iod, size, rq->nr_phys_segments,
+ (unsigned long) rq | 0x01);
+ return iod;
+ }
+
+ return __nvme_alloc_iod(rq->nr_phys_segments, size, dev,
+ (unsigned long) rq, gfp);
+}
+
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
{
const int last_prp = dev->page_size / 8 - 1;
@@ -404,7 +465,9 @@ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
prp_dma = next_prp_dma;
}
- kfree(iod);
+
+ if (iod_should_kfree(iod))
+ kfree(iod);
}
static int nvme_error_status(u16 status)
@@ -423,7 +486,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct nvme_iod *iod = ctx;
- struct request *req = iod->private;
+ struct request *req = iod_get_private(iod);
struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
u16 status = le16_to_cpup(&cqe->status) >> 1;
@@ -431,8 +494,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
if (unlikely(status)) {
if (!(status & NVME_SC_DNR || blk_noretry_request(req))
&& (jiffies - req->start_time) < req->timeout) {
+ unsigned long flags;
+
blk_mq_requeue_request(req);
- blk_mq_kick_requeue_list(req->q);
+ spin_lock_irqsave(req->q->queue_lock, flags);
+ if (!blk_queue_stopped(req->q))
+ blk_mq_kick_requeue_list(req->q);
+ spin_unlock_irqrestore(req->q->queue_lock, flags);
return;
}
req->errors = nvme_error_status(status);
@@ -579,7 +647,7 @@ static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct nvme_ns *ns)
{
- struct request *req = iod->private;
+ struct request *req = iod_get_private(iod);
struct nvme_command *cmnd;
u16 control = 0;
u32 dsmgmt = 0;
@@ -620,17 +688,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq;
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_iod *iod;
- int psegs = req->nr_phys_segments;
enum dma_data_direction dma_dir;
- unsigned size = !(req->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(req) :
- sizeof(struct nvme_dsm_range);
- iod = nvme_alloc_iod(psegs, size, ns->dev, GFP_ATOMIC);
+ iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
if (!iod)
return BLK_MQ_RQ_QUEUE_BUSY;
- iod->private = req;
-
if (req->cmd_flags & REQ_DISCARD) {
void *range;
/*
@@ -645,10 +708,10 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
goto retry_cmd;
iod_list(iod)[0] = (__le64 *)range;
iod->npages = 0;
- } else if (psegs) {
+ } else if (req->nr_phys_segments) {
dma_dir = rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
- sg_init_table(iod->sg, psegs);
+ sg_init_table(iod->sg, req->nr_phys_segments);
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
goto error_cmd;
@@ -664,8 +727,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- blk_mq_start_request(req);
-
nvme_set_info(cmd, iod, req_completion);
spin_lock_irq(&nvmeq->q_lock);
if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +896,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
if (IS_ERR(req))
return PTR_ERR(req);
+ req->cmd_flags |= REQ_NO_TIMEOUT;
cmd_info = blk_mq_rq_to_pdu(req);
nvme_set_info(cmd_info, req, async_req_completion);
@@ -1016,14 +1078,19 @@ static void nvme_abort_req(struct request *req)
struct nvme_command cmd;
if (!nvmeq->qid || cmd_rq->aborted) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_list_lock, flags);
if (work_busy(&dev->reset_work))
- return;
+ goto out;
list_del_init(&dev->node);
dev_warn(&dev->pci_dev->dev,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
+ out:
+ spin_unlock_irqrestore(&dev_list_lock, flags);
return;
}
@@ -1064,15 +1131,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
void *ctx;
nvme_completion_fn fn;
struct nvme_cmd_info *cmd;
- static struct nvme_completion cqe = {
- .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
- };
+ struct nvme_completion cqe;
+
+ if (!blk_mq_request_started(req))
+ return;
cmd = blk_mq_rq_to_pdu(req);
if (cmd->ctx == CMD_CTX_CANCELLED)
return;
+ if (blk_queue_dying(req->q))
+ cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+ else
+ cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
+
+
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
req->tag, nvmeq->qid);
ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1158,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = cmd->nvmeq;
- dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
- nvmeq->qid);
- if (nvmeq->dev->initialized)
- nvme_abort_req(req);
-
/*
* The aborted req will be completed on receiving the abort req.
* We enable the timer again. If hit twice, it'll cause a device reset,
* as the device then is in a faulty state.
*/
- return BLK_EH_RESET_TIMER;
+ int ret = BLK_EH_RESET_TIMER;
+
+ dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+ nvmeq->qid);
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (!nvmeq->dev->initialized) {
+ /*
+ * Force cancelled command frees the request, which requires we
+ * return BLK_EH_NOT_HANDLED.
+ */
+ nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
+ ret = BLK_EH_NOT_HANDLED;
+ } else
+ nvme_abort_req(req);
+ spin_unlock_irq(&nvmeq->q_lock);
+
+ return ret;
}
static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1108,21 +1194,14 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
- LLIST_HEAD(q_list);
- struct nvme_queue *nvmeq, *next;
- struct llist_node *entry;
int i;
for (i = dev->queue_count - 1; i >= lowest; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
- llist_add(&nvmeq->node, &q_list);
dev->queue_count--;
dev->queues[i] = NULL;
- }
- synchronize_rcu();
- entry = llist_del_all(&q_list);
- llist_for_each_entry_safe(nvmeq, next, entry, node)
nvme_free_queue(nvmeq);
+ }
}
/**
@@ -1131,10 +1210,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
*/
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+ int vector;
spin_lock_irq(&nvmeq->q_lock);
+ if (nvmeq->cq_vector == -1) {
+ spin_unlock_irq(&nvmeq->q_lock);
+ return 1;
+ }
+ vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
nvmeq->dev->online_queues--;
+ nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1254,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
adapter_delete_sq(dev, qid);
adapter_delete_cq(dev, qid);
}
+ if (!qid && dev->admin_q)
+ blk_mq_freeze_queue_start(dev->admin_q);
nvme_clear_queue(nvmeq);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int vector)
+ int depth)
{
struct device *dmadev = &dev->pci_dev->dev;
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1286,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
- nvmeq->cq_vector = vector;
nvmeq->qid = qid;
dev->queue_count++;
dev->queues[qid] = nvmeq;
@@ -1244,6 +1330,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
struct nvme_dev *dev = nvmeq->dev;
int result;
+ nvmeq->cq_vector = qid - 1;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
return result;
@@ -1355,6 +1442,14 @@ static struct blk_mq_ops nvme_mq_ops = {
.timeout = nvme_timeout,
};
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+ if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
+ blk_cleanup_queue(dev->admin_q);
+ blk_mq_free_tag_set(&dev->admin_tagset);
+ }
+}
+
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
if (!dev->admin_q) {
@@ -1363,28 +1458,27 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
- dev->admin_tagset.cmd_size = sizeof(struct nvme_cmd_info);
+ dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
dev->admin_tagset.driver_data = dev;
if (blk_mq_alloc_tag_set(&dev->admin_tagset))
return -ENOMEM;
dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (!dev->admin_q) {
+ if (IS_ERR(dev->admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset);
return -ENOMEM;
}
- }
+ if (!blk_get_queue(dev->admin_q)) {
+ nvme_dev_remove_admin(dev);
+ return -ENODEV;
+ }
+ } else
+ blk_mq_unfreeze_queue(dev->admin_q);
return 0;
}
-static void nvme_free_admin_tags(struct nvme_dev *dev)
-{
- if (dev->admin_q)
- blk_mq_free_tag_set(&dev->admin_tagset);
-}
-
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
@@ -1416,7 +1510,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
nvmeq = dev->queues[0];
if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
+ nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
if (!nvmeq)
return -ENOMEM;
}
@@ -1439,18 +1533,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
goto free_nvmeq;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto free_nvmeq;
-
+ nvmeq->cq_vector = 0;
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
- goto free_tags;
+ goto free_nvmeq;
return result;
- free_tags:
- nvme_free_admin_tags(dev);
free_nvmeq:
nvme_free_queues(dev, 0);
return result;
@@ -1483,7 +1572,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
}
err = -ENOMEM;
- iod = nvme_alloc_iod(count, length, dev, GFP_KERNEL);
+ iod = __nvme_alloc_iod(count, length, dev, 0, GFP_KERNEL);
if (!iod)
goto put_pages;
@@ -1944,7 +2033,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
unsigned i;
for (i = dev->queue_count; i <= dev->max_qid; i++)
- if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+ if (!nvme_alloc_queue(dev, i, dev->q_depth))
break;
for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2109,7 +2198,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = sizeof(struct nvme_cmd_info);
+ dev->tagset.cmd_size = nvme_cmd_size(dev);
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
@@ -2235,13 +2324,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
break;
if (!schedule_timeout(ADMIN_TIMEOUT) ||
fatal_signal_pending(current)) {
+ /*
+ * Disable the controller first since we can't trust it
+ * at this point, but leave the admin queue enabled
+ * until all queue deletion requests are flushed.
+ * FIXME: This may take a while if there are more h/w
+ * queues than admin tags.
+ */
set_current_state(TASK_RUNNING);
-
nvme_disable_ctrl(dev, readq(&dev->bar->cap));
- nvme_disable_queue(dev, 0);
-
- send_sig(SIGKILL, dq->worker->task, 1);
+ nvme_clear_queue(dev->queues[0]);
flush_kthread_worker(dq->worker);
+ nvme_disable_queue(dev, 0);
return;
}
}
@@ -2318,7 +2412,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
{
struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
cmdinfo.work);
- allow_signal(SIGKILL);
if (nvme_delete_sq(nvmeq))
nvme_del_queue_end(nvmeq);
}
@@ -2376,6 +2469,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
kthread_stop(tmp);
}
+static void nvme_freeze_queues(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ blk_mq_freeze_queue_start(ns->queue);
+
+ spin_lock(ns->queue->queue_lock);
+ queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+ spin_unlock(ns->queue->queue_lock);
+
+ blk_mq_cancel_requeue_work(ns->queue);
+ blk_mq_stop_hw_queues(ns->queue);
+ }
+}
+
+static void nvme_unfreeze_queues(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+ blk_mq_unfreeze_queue(ns->queue);
+ blk_mq_start_stopped_hw_queues(ns->queue, true);
+ blk_mq_kick_requeue_list(ns->queue);
+ }
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
@@ -2384,8 +2505,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
dev->initialized = 0;
nvme_dev_list_remove(dev);
- if (dev->bar)
+ if (dev->bar) {
+ nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
+ }
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
for (i = dev->queue_count - 1; i >= 0; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2523,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_unmap(dev);
}
-static void nvme_dev_remove_admin(struct nvme_dev *dev)
-{
- if (dev->admin_q && !blk_queue_dying(dev->admin_q))
- blk_cleanup_queue(dev->admin_q);
-}
-
static void nvme_dev_remove(struct nvme_dev *dev)
{
struct nvme_ns *ns;
@@ -2413,8 +2530,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
list_for_each_entry(ns, &dev->namespaces, list) {
if (ns->disk->flags & GENHD_FL_UP)
del_gendisk(ns->disk);
- if (!blk_queue_dying(ns->queue))
+ if (!blk_queue_dying(ns->queue)) {
+ blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
+ }
}
}
@@ -2495,6 +2614,7 @@ static void nvme_free_dev(struct kref *kref)
nvme_free_namespaces(dev);
nvme_release_instance(dev);
blk_mq_free_tag_set(&dev->tagset);
+ blk_put_queue(dev->admin_q);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2591,15 +2711,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
}
nvme_init_queue(dev->queues[0], 0);
+ result = nvme_alloc_admin_tags(dev);
+ if (result)
+ goto disable;
result = nvme_setup_io_queues(dev);
if (result)
- goto disable;
+ goto free_tags;
nvme_set_irq_hints(dev);
return result;
+ free_tags:
+ nvme_dev_remove_admin(dev);
disable:
nvme_disable_queue(dev, 0);
nvme_dev_list_remove(dev);
@@ -2639,6 +2764,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
dev->reset_workfn = nvme_remove_disks;
queue_work(nvme_workq, &dev->reset_work);
spin_unlock(&dev_list_lock);
+ } else {
+ nvme_unfreeze_queues(dev);
+ nvme_set_irq_hints(dev);
}
dev->initialized = 1;
return 0;
@@ -2776,11 +2904,10 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
- nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
+ nvme_dev_remove(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- nvme_free_admin_tags(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 79aa179305b5..e22942596207 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -423,7 +423,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
}
/* switch queue to TCQ mode; allocate tag map */
- rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL);
+ rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL, BLK_TAG_ALLOC_FIFO);
if (rc) {
blk_cleanup_queue(q);
put_disk(disk);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3ec85dfce124..8a86b62466f7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
* If an image has a non-zero parent overlap, get a reference to its
* parent.
*
- * We must get the reference before checking for the overlap to
- * coordinate properly with zeroing the parent overlap in
- * rbd_dev_v2_parent_info() when an image gets flattened. We
- * drop it again if there is no overlap.
- *
* Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or
* false otherwise.
*/
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
- int counter;
+ int counter = 0;
if (!rbd_dev->parent_spec)
return false;
- counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
- if (counter > 0 && rbd_dev->parent_overlap)
- return true;
-
- /* Image was flattened, but parent is not yet torn down */
+ down_read(&rbd_dev->header_rwsem);
+ if (rbd_dev->parent_overlap)
+ counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+ up_read(&rbd_dev->header_rwsem);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
- return false;
+ return counter > 0;
}
/*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
- smp_mb();
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* treat it specially.
*/
rbd_dev->parent_overlap = overlap;
- smp_mb();
if (!overlap) {
/* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
- /* Drop parent reference unless it's already been done (or none) */
-
- if (rbd_dev->parent_overlap)
- rbd_dev_parent_put(rbd_dev);
+ rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ef7c098708f..cdfbd21e3597 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
- if (!q) {
+ if (IS_ERR(q)) {
err = -ENOMEM;
goto out_free_tags;
}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 63fc7f06a014..2a04d341e598 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -47,6 +47,7 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/balloon.h>
+#include <xen/grant_table.h>
#include "common.h"
/*
@@ -100,7 +101,7 @@ module_param(log_stats, int, 0644);
#define BLKBACK_INVALID_HANDLE (~0)
-/* Number of free pages to remove on each call to free_xenballooned_pages */
+/* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10
static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
@@ -111,7 +112,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
if (list_empty(&blkif->free_pages)) {
BUG_ON(blkif->free_pages_num != 0);
spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
- return alloc_xenballooned_pages(1, page, false);
+ return gnttab_alloc_pages(1, page);
}
BUG_ON(blkif->free_pages_num == 0);
page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
@@ -151,14 +152,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
blkif->free_pages_num--;
if (++num_pages == NUM_BATCH_FREE_PAGES) {
spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
- free_xenballooned_pages(num_pages, page);
+ gnttab_free_pages(num_pages, page);
spin_lock_irqsave(&blkif->free_pages_lock, flags);
num_pages = 0;
}
}
spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
if (num_pages != 0)
- free_xenballooned_pages(num_pages, page);
+ gnttab_free_pages(num_pages, page);
}
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
@@ -262,6 +263,17 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
atomic_dec(&blkif->persistent_gnt_in_use);
}
+static void free_persistent_gnts_unmap_callback(int result,
+ struct gntab_unmap_queue_data *data)
+{
+ struct completion *c = data->data;
+
+ /* BUG_ON used to reproduce existing behaviour,
+ but is this the best way to deal with this? */
+ BUG_ON(result);
+ complete(c);
+}
+
static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
unsigned int num)
{
@@ -269,8 +281,17 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
struct rb_node *n;
- int ret = 0;
int segs_to_unmap = 0;
+ struct gntab_unmap_queue_data unmap_data;
+ struct completion unmap_completion;
+
+ init_completion(&unmap_completion);
+
+ unmap_data.data = &unmap_completion;
+ unmap_data.done = &free_persistent_gnts_unmap_callback;
+ unmap_data.pages = pages;
+ unmap_data.unmap_ops = unmap;
+ unmap_data.kunmap_ops = NULL;
foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle ==
@@ -285,9 +306,11 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) {
- ret = gnttab_unmap_refs(unmap, NULL, pages,
- segs_to_unmap);
- BUG_ON(ret);
+
+ unmap_data.count = segs_to_unmap;
+ gnttab_unmap_refs_async(&unmap_data);
+ wait_for_completion(&unmap_completion);
+
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
}
@@ -653,18 +676,14 @@ void xen_blkbk_free_caches(struct xen_blkif *blkif)
shrink_free_pagepool(blkif, 0 /* All */);
}
-/*
- * Unmap the grant references, and also remove the M2P over-rides
- * used in the 'pending_req'.
- */
-static void xen_blkbk_unmap(struct xen_blkif *blkif,
- struct grant_page *pages[],
- int num)
+static unsigned int xen_blkbk_unmap_prepare(
+ struct xen_blkif *blkif,
+ struct grant_page **pages,
+ unsigned int num,
+ struct gnttab_unmap_grant_ref *unmap_ops,
+ struct page **unmap_pages)
{
- struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0;
- int ret;
for (i = 0; i < num; i++) {
if (pages[i]->persistent_gnt != NULL) {
@@ -674,21 +693,95 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
continue;
unmap_pages[invcount] = pages[i]->page;
- gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
+ gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE;
- if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
- invcount);
+ invcount++;
+ }
+
+ return invcount;
+}
+
+static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
+{
+ struct pending_req* pending_req = (struct pending_req*) (data->data);
+ struct xen_blkif *blkif = pending_req->blkif;
+
+ /* BUG_ON used to reproduce existing behaviour,
+ but is this the best way to deal with this? */
+ BUG_ON(result);
+
+ put_free_pages(blkif, data->pages, data->count);
+ make_response(blkif, pending_req->id,
+ pending_req->operation, pending_req->status);
+ free_req(blkif, pending_req);
+ /*
+ * Make sure the request is freed before releasing blkif,
+ * or there could be a race between free_req and the
+ * cleanup done in xen_blkif_free during shutdown.
+ *
+ * NB: The fact that we might try to wake up pending_free_wq
+ * before drain_complete (in case there's a drain going on)
+ * it's not a problem with our current implementation
+ * because we can assure there's no thread waiting on
+ * pending_free_wq if there's a drain going on, but it has
+ * to be taken into account if the current model is changed.
+ */
+ if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+ complete(&blkif->drain_complete);
+ }
+ xen_blkif_put(blkif);
+}
+
+static void xen_blkbk_unmap_and_respond(struct pending_req *req)
+{
+ struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
+ struct xen_blkif *blkif = req->blkif;
+ struct grant_page **pages = req->segments;
+ unsigned int invcount;
+
+ invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
+ req->unmap, req->unmap_pages);
+
+ work->data = req;
+ work->done = xen_blkbk_unmap_and_respond_callback;
+ work->unmap_ops = req->unmap;
+ work->kunmap_ops = NULL;
+ work->pages = req->unmap_pages;
+ work->count = invcount;
+
+ gnttab_unmap_refs_async(&req->gnttab_unmap_data);
+}
+
+
+/*
+ * Unmap the grant references.
+ *
+ * This could accumulate ops up to the batch size to reduce the number
+ * of hypercalls, but since this is only used in error paths there's
+ * no real need.
+ */
+static void xen_blkbk_unmap(struct xen_blkif *blkif,
+ struct grant_page *pages[],
+ int num)
+{
+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ unsigned int invcount = 0;
+ int ret;
+
+ while (num) {
+ unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+ invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
+ unmap, unmap_pages);
+ if (invcount) {
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
- invcount = 0;
}
- }
- if (invcount) {
- ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
- BUG_ON(ret);
- put_free_pages(blkif, unmap_pages, invcount);
+ pages += batch;
+ num -= batch;
}
}
@@ -982,32 +1075,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the grant references associated with 'request' and provide
* the proper response on the ring.
*/
- if (atomic_dec_and_test(&pending_req->pendcnt)) {
- struct xen_blkif *blkif = pending_req->blkif;
-
- xen_blkbk_unmap(blkif,
- pending_req->segments,
- pending_req->nr_pages);
- make_response(blkif, pending_req->id,
- pending_req->operation, pending_req->status);
- free_req(blkif, pending_req);
- /*
- * Make sure the request is freed before releasing blkif,
- * or there could be a race between free_req and the
- * cleanup done in xen_blkif_free during shutdown.
- *
- * NB: The fact that we might try to wake up pending_free_wq
- * before drain_complete (in case there's a drain going on)
- * it's not a problem with our current implementation
- * because we can assure there's no thread waiting on
- * pending_free_wq if there's a drain going on, but it has
- * to be taken into account if the current model is changed.
- */
- if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
- complete(&blkif->drain_complete);
- }
- xen_blkif_put(blkif);
- }
+ if (atomic_dec_and_test(&pending_req->pendcnt))
+ xen_blkbk_unmap_and_respond(pending_req);
}
/*
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index f65b807e3236..375d28851860 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -214,6 +214,15 @@ enum blkif_protocol {
BLKIF_PROTOCOL_X86_64 = 3,
};
+/*
+ * Default protocol if the frontend doesn't specify one.
+ */
+#ifdef CONFIG_X86
+# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
+#else
+# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
+#endif
+
struct xen_vbd {
/* What the domain refers to this vbd as. */
blkif_vdev_t handle;
@@ -350,6 +359,9 @@ struct pending_req {
struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
struct bio *biolist[MAX_INDIRECT_SEGMENTS];
+ struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
+ struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
+ struct gntab_unmap_queue_data gnttab_unmap_data;
};
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 630a489e757d..e3afe97280b1 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -868,11 +868,11 @@ static int connect_ring(struct backend_info *be)
return err;
}
- be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
"%63s", protocol, NULL);
if (err)
- strcpy(protocol, "unspecified, assuming native");
+ strcpy(protocol, "unspecified, assuming default");
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2236c6f31608..37779e4c4585 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1391,7 +1391,7 @@ static int blkfront_probe(struct xenbus_device *dev,
if (major != XENVBD_MAJOR) {
printk(KERN_INFO
"%s: HVM does not support vbd %d as xen block device\n",
- __FUNCTION__, vdevice);
+ __func__, vdevice);
return -ENODEV;
}
}
@@ -1511,7 +1511,7 @@ static int blkif_recover(struct blkfront_info *info)
merge_bio.tail = copy[i].request->biotail;
bio_list_merge(&bio_list, &merge_bio);
copy[i].request->bio = NULL;
- blk_put_request(copy[i].request);
+ blk_end_request_all(copy[i].request, 0);
}
kfree(copy);
@@ -1534,7 +1534,7 @@ static int blkif_recover(struct blkfront_info *info)
req->bio = NULL;
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA))
pr_alert("diskcache flush request found!\n");
- __blk_put_request(info->rq, req);
+ __blk_end_request_all(req, 0);
}
spin_unlock_irq(&info->io_lock);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index bd8bda386e02..8e233edd7a09 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -53,9 +53,9 @@ static ssize_t name##_show(struct device *d, \
} \
static DEVICE_ATTR_RO(name);
-static inline int init_done(struct zram *zram)
+static inline bool init_done(struct zram *zram)
{
- return zram->meta != NULL;
+ return zram->disksize;
}
static inline struct zram *dev_to_zram(struct device *dev)
@@ -307,42 +307,67 @@ static inline int valid_io_request(struct zram *zram,
return 1;
}
-static void zram_meta_free(struct zram_meta *meta)
+static void zram_meta_free(struct zram_meta *meta, u64 disksize)
{
+ size_t num_pages = disksize >> PAGE_SHIFT;
+ size_t index;
+
+ /* Free all pages that are still in this zram device */
+ for (index = 0; index < num_pages; index++) {
+ unsigned long handle = meta->table[index].handle;
+
+ if (!handle)
+ continue;
+
+ zs_free(meta->mem_pool, handle);
+ }
+
zs_destroy_pool(meta->mem_pool);
vfree(meta->table);
kfree(meta);
}
-static struct zram_meta *zram_meta_alloc(u64 disksize)
+static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
{
size_t num_pages;
+ char pool_name[8];
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+
if (!meta)
- goto out;
+ return NULL;
num_pages = disksize >> PAGE_SHIFT;
meta->table = vzalloc(num_pages * sizeof(*meta->table));
if (!meta->table) {
pr_err("Error allocating zram address table\n");
- goto free_meta;
+ goto out_error;
}
- meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
+ snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
+ meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
- goto free_table;
+ goto out_error;
}
return meta;
-free_table:
+out_error:
vfree(meta->table);
-free_meta:
kfree(meta);
- meta = NULL;
-out:
- return meta;
+ return NULL;
+}
+
+static inline bool zram_meta_get(struct zram *zram)
+{
+ if (atomic_inc_not_zero(&zram->refcount))
+ return true;
+ return false;
+}
+
+static inline void zram_meta_put(struct zram *zram)
+{
+ atomic_dec(&zram->refcount);
}
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
@@ -704,10 +729,11 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
}
-static void zram_reset_device(struct zram *zram, bool reset_capacity)
+static void zram_reset_device(struct zram *zram)
{
- size_t index;
struct zram_meta *meta;
+ struct zcomp *comp;
+ u64 disksize;
down_write(&zram->init_lock);
@@ -719,36 +745,30 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
}
meta = zram->meta;
- /* Free all pages that are still in this zram device */
- for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- unsigned long handle = meta->table[index].handle;
- if (!handle)
- continue;
-
- zs_free(meta->mem_pool, handle);
- }
-
- zcomp_destroy(zram->comp);
- zram->max_comp_streams = 1;
+ comp = zram->comp;
+ disksize = zram->disksize;
+ /*
+ * Refcount will go down to 0 eventually and r/w handler
+ * cannot handle further I/O so it will bail out by
+ * check zram_meta_get.
+ */
+ zram_meta_put(zram);
+ /*
+ * We want to free zram_meta in process context to avoid
+ * deadlock between reclaim path and any other locks.
+ */
+ wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
- zram_meta_free(zram->meta);
- zram->meta = NULL;
/* Reset stats */
memset(&zram->stats, 0, sizeof(zram->stats));
-
zram->disksize = 0;
- if (reset_capacity)
- set_capacity(zram->disk, 0);
+ zram->max_comp_streams = 1;
+ set_capacity(zram->disk, 0);
up_write(&zram->init_lock);
-
- /*
- * Revalidate disk out of the init_lock to avoid lockdep splat.
- * It's okay because disk's capacity is protected by init_lock
- * so that revalidate_disk always sees up-to-date capacity.
- */
- if (reset_capacity)
- revalidate_disk(zram->disk);
+ /* I/O operation under all of CPU are done so let's free */
+ zram_meta_free(meta, disksize);
+ zcomp_destroy(comp);
}
static ssize_t disksize_store(struct device *dev,
@@ -765,7 +785,7 @@ static ssize_t disksize_store(struct device *dev,
return -EINVAL;
disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(disksize);
+ meta = zram_meta_alloc(zram->disk->first_minor, disksize);
if (!meta)
return -ENOMEM;
@@ -784,6 +804,8 @@ static ssize_t disksize_store(struct device *dev,
goto out_destroy_comp;
}
+ init_waitqueue_head(&zram->io_done);
+ atomic_set(&zram->refcount, 1);
zram->meta = meta;
zram->comp = comp;
zram->disksize = disksize;
@@ -803,7 +825,7 @@ out_destroy_comp:
up_write(&zram->init_lock);
zcomp_destroy(comp);
out_free_meta:
- zram_meta_free(meta);
+ zram_meta_free(meta, disksize);
return err;
}
@@ -821,8 +843,9 @@ static ssize_t reset_store(struct device *dev,
if (!bdev)
return -ENOMEM;
+ mutex_lock(&bdev->bd_mutex);
/* Do not reset an active device! */
- if (bdev->bd_holders) {
+ if (bdev->bd_openers) {
ret = -EBUSY;
goto out;
}
@@ -838,12 +861,16 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all pending I/O is finished */
fsync_bdev(bdev);
+ zram_reset_device(zram);
+
+ mutex_unlock(&bdev->bd_mutex);
+ revalidate_disk(zram->disk);
bdput(bdev);
- zram_reset_device(zram, true);
return len;
out:
+ mutex_unlock(&bdev->bd_mutex);
bdput(bdev);
return ret;
}
@@ -909,23 +936,21 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
- down_read(&zram->init_lock);
- if (unlikely(!init_done(zram)))
+ if (unlikely(!zram_meta_get(zram)))
goto error;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
- goto error;
+ goto put_zram;
}
__zram_make_request(zram, bio);
- up_read(&zram->init_lock);
-
+ zram_meta_put(zram);
return;
-
+put_zram:
+ zram_meta_put(zram);
error:
- up_read(&zram->init_lock);
bio_io_error(bio);
}
@@ -947,21 +972,19 @@ static void zram_slot_free_notify(struct block_device *bdev,
static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
- int offset, err;
+ int offset, err = -EIO;
u32 index;
struct zram *zram;
struct bio_vec bv;
zram = bdev->bd_disk->private_data;
+ if (unlikely(!zram_meta_get(zram)))
+ goto out;
+
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
atomic64_inc(&zram->stats.invalid_io);
- return -EINVAL;
- }
-
- down_read(&zram->init_lock);
- if (unlikely(!init_done(zram))) {
- err = -EIO;
- goto out_unlock;
+ err = -EINVAL;
+ goto put_zram;
}
index = sector >> SECTORS_PER_PAGE_SHIFT;
@@ -972,8 +995,9 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_offset = 0;
err = zram_bvec_rw(zram, &bv, index, offset, rw);
-out_unlock:
- up_read(&zram->init_lock);
+put_zram:
+ zram_meta_put(zram);
+out:
/*
* If I/O fails, just return error(ie, non-zero) without
* calling page_endio.
@@ -1039,19 +1063,19 @@ static struct attribute_group zram_disk_attr_group = {
static int create_device(struct zram *zram, int device_id)
{
+ struct request_queue *queue;
int ret = -ENOMEM;
init_rwsem(&zram->init_lock);
- zram->queue = blk_alloc_queue(GFP_KERNEL);
- if (!zram->queue) {
+ queue = blk_alloc_queue(GFP_KERNEL);
+ if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
goto out;
}
- blk_queue_make_request(zram->queue, zram_make_request);
- zram->queue->queuedata = zram;
+ blk_queue_make_request(queue, zram_make_request);
/* gendisk structure */
zram->disk = alloc_disk(1);
@@ -1064,7 +1088,8 @@ static int create_device(struct zram *zram, int device_id)
zram->disk->major = zram_major;
zram->disk->first_minor = device_id;
zram->disk->fops = &zram_devops;
- zram->disk->queue = zram->queue;
+ zram->disk->queue = queue;
+ zram->disk->queue->queuedata = zram;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
@@ -1115,20 +1140,35 @@ out_free_disk:
del_gendisk(zram->disk);
put_disk(zram->disk);
out_free_queue:
- blk_cleanup_queue(zram->queue);
+ blk_cleanup_queue(queue);
out:
return ret;
}
-static void destroy_device(struct zram *zram)
+static void destroy_devices(unsigned int nr)
{
- sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
+ struct zram *zram;
+ unsigned int i;
- del_gendisk(zram->disk);
- put_disk(zram->disk);
+ for (i = 0; i < nr; i++) {
+ zram = &zram_devices[i];
+ /*
+ * Remove sysfs first, so no one will perform a disksize
+ * store while we destroy the devices
+ */
+ sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
+ &zram_disk_attr_group);
+
+ zram_reset_device(zram);
- blk_cleanup_queue(zram->queue);
+ blk_cleanup_queue(zram->disk->queue);
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
+ }
+
+ kfree(zram_devices);
+ unregister_blkdev(zram_major, "zram");
+ pr_info("Destroyed %u device(s)\n", nr);
}
static int __init zram_init(void)
@@ -1138,64 +1178,39 @@ static int __init zram_init(void)
if (num_devices > max_num_devices) {
pr_warn("Invalid value for num_devices: %u\n",
num_devices);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
pr_warn("Unable to get major number\n");
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
/* Allocate the device array and initialize each one */
zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
- ret = -ENOMEM;
- goto unregister;
+ unregister_blkdev(zram_major, "zram");
+ return -ENOMEM;
}
for (dev_id = 0; dev_id < num_devices; dev_id++) {
ret = create_device(&zram_devices[dev_id], dev_id);
if (ret)
- goto free_devices;
+ goto out_error;
}
- pr_info("Created %u device(s) ...\n", num_devices);
-
+ pr_info("Created %u device(s)\n", num_devices);
return 0;
-free_devices:
- while (dev_id)
- destroy_device(&zram_devices[--dev_id]);
- kfree(zram_devices);
-unregister:
- unregister_blkdev(zram_major, "zram");
-out:
+out_error:
+ destroy_devices(dev_id);
return ret;
}
static void __exit zram_exit(void)
{
- int i;
- struct zram *zram;
-
- for (i = 0; i < num_devices; i++) {
- zram = &zram_devices[i];
-
- destroy_device(zram);
- /*
- * Shouldn't access zram->disk after destroy_device
- * because destroy_device already released zram->disk.
- */
- zram_reset_device(zram, false);
- }
-
- unregister_blkdev(zram_major, "zram");
-
- kfree(zram_devices);
- pr_debug("Cleanup done!\n");
+ destroy_devices(num_devices);
}
module_init(zram_init);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index b05a816b09ac..17056e589146 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -100,24 +100,25 @@ struct zram_meta {
struct zram {
struct zram_meta *meta;
- struct request_queue *queue;
- struct gendisk *disk;
struct zcomp *comp;
-
- /* Prevent concurrent execution of device init, reset and R/W request */
+ struct gendisk *disk;
+ /* Prevent concurrent execution of device init */
struct rw_semaphore init_lock;
/*
- * This is the limit on amount of *uncompressed* worth of data
- * we can store in a disk.
+ * the number of pages zram can consume for storing compressed data
*/
- u64 disksize; /* bytes */
+ unsigned long limit_pages;
int max_comp_streams;
+
struct zram_stats stats;
+ atomic_t refcount; /* refcount for zram_meta */
+ /* wait all IO under all of cpu are done */
+ wait_queue_head_t io_done;
/*
- * the number of pages zram can consume for storing compressed data
+ * This is the limit on amount of *uncompressed* worth of data
+ * we can store in a disk.
*/
- unsigned long limit_pages;
-
+ u64 disksize; /* bytes */
char compressor[10];
};
#endif
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 1ee27ac18de0..de4c8499cbac 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -108,6 +108,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
+ { USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },
/* Atheros AR5BBU12 with sflash firmware */
@@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
@@ -174,6 +176,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
#define USB_REQ_DFU_DNLOAD 1
#define BULK_SIZE 4096
#define FW_HDR_SIZE 20
+#define TIMEGAP_USEC_MIN 50
+#define TIMEGAP_USEC_MAX 100
static int ath3k_load_firmware(struct usb_device *udev,
const struct firmware *firmware)
@@ -205,6 +209,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
pipe = usb_sndbulkpipe(udev, 0x02);
while (count) {
+ /* workaround the compatibility issue with xHCI controller*/
+ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
+
size = min_t(uint, count, BULK_SIZE);
memcpy(send_buf, firmware->data + sent, size);
@@ -302,6 +309,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
pipe = usb_sndbulkpipe(udev, 0x02);
while (count) {
+ /* workaround the compatibility issue with xHCI controller*/
+ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
+
size = min_t(uint, count, BULK_SIZE);
memcpy(send_buf, firmware->data + sent, size);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index b2e7e94a6771..fcfb72e9e0ee 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -696,6 +696,8 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
+ set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 330f8f84928d..e75f8ee2512c 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -28,9 +28,9 @@
#define BTM_UPLD_SIZE 2312
/* Time to wait until Host Sleep state change in millisecond */
-#define WAIT_UNTIL_HS_STATE_CHANGED 5000
+#define WAIT_UNTIL_HS_STATE_CHANGED msecs_to_jiffies(5000)
/* Time to wait for command response in millisecond */
-#define WAIT_UNTIL_CMD_RESP 5000
+#define WAIT_UNTIL_CMD_RESP msecs_to_jiffies(5000)
enum rdwr_status {
RDWR_STATUS_SUCCESS = 0,
@@ -104,6 +104,7 @@ struct btmrvl_private {
#ifdef CONFIG_DEBUG_FS
void *debugfs_data;
#endif
+ bool surprise_removed;
};
#define MRVL_VENDOR_PKT 0xFE
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 30939c993d94..413597789c61 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -178,6 +178,11 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
struct sk_buff *skb;
struct hci_command_hdr *hdr;
+ if (priv->surprise_removed) {
+ BT_ERR("Card is removed");
+ return -EFAULT;
+ }
+
skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
@@ -202,10 +207,14 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
wake_up_interruptible(&priv->main_thread.wait_q);
if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
- priv->adapter->cmd_complete,
- msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
+ priv->adapter->cmd_complete ||
+ priv->surprise_removed,
+ WAIT_UNTIL_CMD_RESP))
return -ETIMEDOUT;
+ if (priv->surprise_removed)
+ return -EFAULT;
+
return 0;
}
@@ -287,9 +296,10 @@ int btmrvl_enable_hs(struct btmrvl_private *priv)
}
ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q,
- adapter->hs_state,
- msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED));
- if (ret < 0) {
+ adapter->hs_state ||
+ priv->surprise_removed,
+ WAIT_UNTIL_HS_STATE_CHANGED);
+ if (ret < 0 || priv->surprise_removed) {
BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d",
ret, adapter->hs_state, adapter->ps_state,
adapter->wakeup_tries);
@@ -538,8 +548,11 @@ static int btmrvl_check_device_tree(struct btmrvl_private *priv)
static int btmrvl_setup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
+ int ret;
- btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+ ret = btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+ if (ret)
+ return ret;
priv->btmrvl_dev.gpio_gap = 0xffff;
@@ -597,7 +610,7 @@ static int btmrvl_service_main_thread(void *data)
add_wait_queue(&thread->wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop()) {
+ if (kthread_should_stop() || priv->surprise_removed) {
BT_DBG("main_thread: break from main thread");
break;
}
@@ -616,6 +629,11 @@ static int btmrvl_service_main_thread(void *data)
BT_DBG("main_thread woke up");
+ if (kthread_should_stop() || priv->surprise_removed) {
+ BT_DBG("main_thread: break from main thread");
+ break;
+ }
+
spin_lock_irqsave(&priv->driver_lock, flags);
if (adapter->int_count) {
adapter->int_count = 0;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0057c0b7a776..01d6da577eeb 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -573,7 +573,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
offset += txlen;
} while (true);
- BT_DBG("FW download over, size %d bytes", offset);
+ BT_INFO("FW download over, size %d bytes", offset);
ret = 0;
@@ -798,6 +798,9 @@ static void btmrvl_sdio_interrupt(struct sdio_func *func)
priv = card->priv;
+ if (priv->surprise_removed)
+ return;
+
if (card->reg->int_read_to_clear)
ret = btmrvl_sdio_read_to_clear(card, &ireg);
else
@@ -1466,6 +1469,7 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
btmrvl_sdio_disable_host_int(card);
}
BT_DBG("unregester dev");
+ card->priv->surprise_removed = true;
btmrvl_sdio_unregister_dev(card);
btmrvl_remove_card(card->priv);
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 19cf2cf22e87..b87688881143 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -28,7 +28,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#define VERSION "0.6"
+#define VERSION "0.7"
static bool disable_scofix;
static bool force_scofix;
@@ -49,11 +49,17 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BOOT 0x200
#define BTUSB_BCM_PATCHRAM 0x400
#define BTUSB_MARVELL 0x800
+#define BTUSB_SWAVE 0x1000
+#define BTUSB_INTEL_NEW 0x2000
+#define BTUSB_AMP 0x4000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+ /* Generic Bluetooth AMP device */
+ { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
+
/* Apple-specific (Broadcom) devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
@@ -85,7 +91,7 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x05ac, 0x8281) },
/* AVM BlueFRITZ! USB v2.0 */
- { USB_DEVICE(0x057c, 0x3800) },
+ { USB_DEVICE(0x057c, 0x3800), .driver_info = BTUSB_SWAVE },
/* Bluetooth Ultraport Module from IBM */
{ USB_DEVICE(0x04bf, 0x030a) },
@@ -109,16 +115,24 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x13d3, 0x3404),
.driver_info = BTUSB_BCM_PATCHRAM },
+ /* Broadcom BCM20702B0 (Dynex/Insignia) */
+ { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
+
/* Foxconn - Hon Hai */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
+ /* Lite-On Technology - Broadcom based */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
+
/* Broadcom devices with vendor specific id */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
/* ASUSTek Computer - Broadcom based */
- { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* Belkin F8065bf - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
@@ -188,6 +202,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
@@ -237,6 +252,9 @@ static const struct usb_device_id blacklist_table[] = {
/* CONWISE Technology based adapters with buggy SCO support */
{ USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
+ /* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
+ { USB_DEVICE(0x1300, 0x0001), .driver_info = BTUSB_SWAVE },
+
/* Digianswer devices */
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
{ USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE },
@@ -249,13 +267,18 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x16d3, 0x0002),
.driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC },
- /* Intel Bluetooth device */
+ /* Marvell Bluetooth devices */
+ { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
+ { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+
+ /* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
+ { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
- /* Marvell device */
- { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
- { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+ /* Other Intel Bluetooth devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
+ .driver_info = BTUSB_IGNORE },
{ } /* Terminating entry */
};
@@ -267,6 +290,11 @@ static const struct usb_device_id blacklist_table[] = {
#define BTUSB_ISOC_RUNNING 2
#define BTUSB_SUSPENDING 3
#define BTUSB_DID_ISO_RESUME 4
+#define BTUSB_BOOTLOADER 5
+#define BTUSB_DOWNLOADING 6
+#define BTUSB_FIRMWARE_LOADED 7
+#define BTUSB_FIRMWARE_FAILED 8
+#define BTUSB_BOOTING 9
struct btusb_data {
struct hci_dev *hdev;
@@ -300,14 +328,26 @@ struct btusb_data {
struct usb_endpoint_descriptor *isoc_rx_ep;
__u8 cmdreq_type;
+ __u8 cmdreq;
unsigned int sco_num;
int isoc_altsetting;
int suspend_count;
+ int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb);
int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
};
+static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout,
+ unsigned mode)
+{
+ might_sleep();
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout,
+ mode, timeout);
+}
+
static inline void btusb_free_frags(struct btusb_data *data)
{
unsigned long flags;
@@ -370,7 +410,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
if (bt_cb(skb)->expect == 0) {
/* Complete frame */
- hci_recv_frame(data->hdev, skb);
+ data->recv_event(data->hdev, skb);
skb = NULL;
}
}
@@ -952,7 +992,7 @@ static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb)
}
dr->bRequestType = data->cmdreq_type;
- dr->bRequest = 0;
+ dr->bRequest = data->cmdreq;
dr->wIndex = 0;
dr->wValue = 0;
dr->wLength = __cpu_to_le16(skb->len);
@@ -1290,6 +1330,26 @@ struct intel_version {
u8 fw_patch_num;
} __packed;
+struct intel_boot_params {
+ __u8 status;
+ __u8 otp_format;
+ __u8 otp_content;
+ __u8 otp_patch;
+ __le16 dev_revid;
+ __u8 secure_boot;
+ __u8 key_from_hdr;
+ __u8 key_type;
+ __u8 otp_lock;
+ __u8 api_lock;
+ __u8 debug_lock;
+ bdaddr_t otp_bdaddr;
+ __u8 min_fw_build_nn;
+ __u8 min_fw_build_cw;
+ __u8 min_fw_build_yy;
+ __u8 limited_cce;
+ __u8 unlocked_state;
+} __packed;
+
static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@@ -1698,6 +1758,562 @@ exit_mfg_deactivate:
return 0;
}
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+ struct sk_buff *skb;
+ struct hci_event_hdr *hdr;
+ struct hci_ev_cmd_complete *evt;
+
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr->evt = HCI_EV_CMD_COMPLETE;
+ hdr->plen = sizeof(*evt) + 1;
+
+ evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+ evt->ncmd = 0x01;
+ evt->opcode = cpu_to_le16(opcode);
+
+ *skb_put(skb, 1) = 0x00;
+
+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer,
+ int count)
+{
+ /* When the device is in bootloader mode, then it can send
+ * events via the bulk endpoint. These events are treated the
+ * same way as the ones received from the interrupt endpoint.
+ */
+ if (test_bit(BTUSB_BOOTLOADER, &data->flags))
+ return btusb_recv_intr(data, buffer, count);
+
+ return btusb_recv_bulk(data, buffer, count);
+}
+
+static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+
+ if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ struct hci_event_hdr *hdr = (void *)skb->data;
+
+ /* When the firmware loading completes the device sends
+ * out a vendor specific event indicating the result of
+ * the firmware loading.
+ */
+ if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+ skb->data[2] == 0x06) {
+ if (skb->data[3] != 0x00)
+ test_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
+
+ if (test_and_clear_bit(BTUSB_DOWNLOADING,
+ &data->flags) &&
+ test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
+ }
+ }
+
+ /* When switching to the operational firmware the device
+ * sends a vendor specific event indicating that the bootup
+ * completed.
+ */
+ if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+ skb->data[2] == 0x02) {
+ if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&data->flags, BTUSB_BOOTING);
+ }
+ }
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct urb *urb;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return -EBUSY;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ struct hci_command_hdr *cmd = (void *)skb->data;
+ __u16 opcode = le16_to_cpu(cmd->opcode);
+
+ /* When in bootloader mode and the command 0xfc09
+ * is received, it needs to be send down the
+ * bulk endpoint. So allocate a bulk URB instead.
+ */
+ if (opcode == 0xfc09)
+ urb = alloc_bulk_urb(hdev, skb);
+ else
+ urb = alloc_ctrl_urb(hdev, skb);
+
+ /* When the 0xfc01 command is issued to boot into
+ * the operational firmware, it will actually not
+ * send a command complete event. To keep the flow
+ * control working inject that event here.
+ */
+ if (opcode == 0xfc01)
+ inject_cmd_complete(hdev, opcode);
+ } else {
+ urb = alloc_ctrl_urb(hdev, skb);
+ }
+ if (IS_ERR(urb))
+ return PTR_ERR(urb);
+
+ hdev->stat.cmd_tx++;
+ return submit_or_queue_tx_urb(hdev, urb);
+
+ case HCI_ACLDATA_PKT:
+ urb = alloc_bulk_urb(hdev, skb);
+ if (IS_ERR(urb))
+ return PTR_ERR(urb);
+
+ hdev->stat.acl_tx++;
+ return submit_or_queue_tx_urb(hdev, urb);
+
+ case HCI_SCODATA_PKT:
+ if (hci_conn_num(hdev, SCO_LINK) < 1)
+ return -ENODEV;
+
+ urb = alloc_isoc_urb(hdev, skb);
+ if (IS_ERR(urb))
+ return PTR_ERR(urb);
+
+ hdev->stat.sco_tx++;
+ return submit_tx_urb(hdev, urb);
+ }
+
+ return -EILSEQ;
+}
+
+static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+ u32 plen, const void *param)
+{
+ while (plen > 0) {
+ struct sk_buff *skb;
+ u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+ cmd_param[0] = fragment_type;
+ memcpy(cmd_param + 1, param, fragment_len);
+
+ skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+ cmd_param, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+
+ plen -= fragment_len;
+ param += fragment_len;
+ }
+
+ return 0;
+}
+
+static void btusb_intel_version_info(struct hci_dev *hdev,
+ struct intel_version *ver)
+{
+ const char *variant;
+
+ switch (ver->fw_variant) {
+ case 0x06:
+ variant = "Bootloader";
+ break;
+ case 0x23:
+ variant = "Firmware";
+ break;
+ default:
+ return;
+ }
+
+ BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+ variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+ ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+
+static int btusb_setup_intel_new(struct hci_dev *hdev)
+{
+ static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x08, 0x04, 0x00 };
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct sk_buff *skb;
+ struct intel_version *ver;
+ struct intel_boot_params *params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ char fwname[64];
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ calltime = ktime_get();
+
+ /* Read the Intel version information to determine if the device
+ * is in bootloader mode or if it already has operational firmware
+ * loaded.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading Intel version information failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*ver)) {
+ BT_ERR("%s: Intel version event size mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ ver = (struct intel_version *)skb->data;
+ if (ver->status) {
+ BT_ERR("%s: Intel version command failure (%02x)",
+ hdev->name, ver->status);
+ err = -bt_to_errno(ver->status);
+ kfree_skb(skb);
+ return err;
+ }
+
+ /* The hardware platform number has a fixed value of 0x37 and
+ * for now only accept this single value.
+ */
+ if (ver->hw_platform != 0x37) {
+ BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+ hdev->name, ver->hw_platform);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+ * supported by this firmware loading method. This check has been
+ * put in place to ensure correct forward compatibility options
+ * when newer hardware variants come along.
+ */
+ if (ver->hw_variant != 0x0b) {
+ BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+ hdev->name, ver->hw_variant);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ btusb_intel_version_info(hdev, ver);
+
+ /* The firmware variant determines if the device is in bootloader
+ * mode or is running operational firmware. The value 0x06 identifies
+ * the bootloader and the value 0x23 identifies the operational
+ * firmware.
+ *
+ * When the operational firmware is already present, then only
+ * the check for valid Bluetooth device address is needed. This
+ * determines if the device will be added as configured or
+ * unconfigured controller.
+ *
+ * It is not possible to use the Secure Boot Parameters in this
+ * case since that command is only available in bootloader mode.
+ */
+ if (ver->fw_variant == 0x23) {
+ kfree_skb(skb);
+ clear_bit(BTUSB_BOOTLOADER, &data->flags);
+ btusb_check_bdaddr_intel(hdev);
+ return 0;
+ }
+
+ /* If the device is not in bootloader mode, then the only possible
+ * choice is to return an error and abort the device initialization.
+ */
+ if (ver->fw_variant != 0x06) {
+ BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+ hdev->name, ver->fw_variant);
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ kfree_skb(skb);
+
+ /* Read the secure boot parameters to identify the operating
+ * details of the bootloader.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*params)) {
+ BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ params = (struct intel_boot_params *)skb->data;
+ if (params->status) {
+ BT_ERR("%s: Intel boot parameters command failure (%02x)",
+ hdev->name, params->status);
+ err = -bt_to_errno(params->status);
+ kfree_skb(skb);
+ return err;
+ }
+
+ BT_INFO("%s: Device revision is %u", hdev->name,
+ le16_to_cpu(params->dev_revid));
+
+ BT_INFO("%s: Secure boot is %s", hdev->name,
+ params->secure_boot ? "enabled" : "disabled");
+
+ BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+ params->min_fw_build_nn, params->min_fw_build_cw,
+ 2000 + params->min_fw_build_yy);
+
+ /* It is required that every single firmware fragment is acknowledged
+ * with a command complete event. If the boot parameters indicate
+ * that this bootloader does not send them, then abort the setup.
+ */
+ if (params->limited_cce != 0x00) {
+ BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+ hdev->name, params->limited_cce);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* If the OTP has no valid Bluetooth device address, then there will
+ * also be no valid address for the operational firmware.
+ */
+ if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+ BT_INFO("%s: No device address configured", hdev->name);
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
+
+ /* With this Intel bootloader only the hardware variant and device
+ * revision information are used to select the right firmware.
+ *
+ * Currently this bootloader support is limited to hardware variant
+ * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+ */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+ le16_to_cpu(params->dev_revid));
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ BT_ERR("%s: Failed to load Intel firmware file (%d)",
+ hdev->name, err);
+ kfree_skb(skb);
+ return err;
+ }
+
+ BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+ kfree_skb(skb);
+
+ if (fw->size < 644) {
+ BT_ERR("%s: Invalid size of firmware file (%zu)",
+ hdev->name, fw->size);
+ err = -EBADF;
+ goto done;
+ }
+
+ set_bit(BTUSB_DOWNLOADING, &data->flags);
+
+ /* Start the firmware download transaction with the Init fragment
+ * represented by the 128 bytes of CSS header.
+ */
+ err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware header (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of public key information from the firmware
+ * as the PKey fragment.
+ */
+ err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware public key (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of signature information from the firmware
+ * as the Sign fragment.
+ */
+ err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware signature (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ fw_ptr = fw->data + 644;
+
+ while (fw_ptr - fw->data < fw->size) {
+ struct hci_command_hdr *cmd = (void *)fw_ptr;
+ u8 cmd_len;
+
+ cmd_len = sizeof(*cmd) + cmd->plen;
+
+ /* Send each command from the firmware data buffer as
+ * a single Data fragment.
+ */
+ err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware data (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ fw_ptr += cmd_len;
+ }
+
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+
+ BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+ /* Before switching the device into operational mode and with that
+ * booting the loaded firmware, wait for the bootloader notification
+ * that all fragments have been successfully received.
+ *
+ * When the event processing receives the notification, then the
+ * BTUSB_DOWNLOADING flag will be cleared.
+ *
+ * The firmware loading should not take longer than 5 seconds
+ * and thus just timeout if that happens and fail the setup
+ * of this device.
+ */
+ err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
+ msecs_to_jiffies(5000),
+ TASK_INTERRUPTIBLE);
+ if (err == 1) {
+ BT_ERR("%s: Firmware loading interrupted", hdev->name);
+ err = -EINTR;
+ goto done;
+ }
+
+ if (err) {
+ BT_ERR("%s: Firmware loading timeout", hdev->name);
+ err = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
+ BT_ERR("%s: Firmware loading failed", hdev->name);
+ err = -ENOEXEC;
+ goto done;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+ release_firmware(fw);
+
+ if (err < 0)
+ return err;
+
+ calltime = ktime_get();
+
+ set_bit(BTUSB_BOOTING, &data->flags);
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+
+ /* The bootloader will not indicate when the device is ready. This
+ * is done by the operational firmware sending bootup notification.
+ *
+ * Booting into operational firmware should not take longer than
+ * 1 second. However if that happens, then just fail the setup
+ * since something went wrong.
+ */
+ BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+ err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
+ msecs_to_jiffies(1000),
+ TASK_INTERRUPTIBLE);
+
+ if (err == 1) {
+ BT_ERR("%s: Device boot interrupted", hdev->name);
+ return -EINTR;
+ }
+
+ if (err) {
+ BT_ERR("%s: Device boot timeout", hdev->name);
+ return -ETIMEDOUT;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+ clear_bit(BTUSB_BOOTLOADER, &data->flags);
+
+ return 0;
+}
+
+static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
+{
+ struct sk_buff *skb;
+ u8 type = 0x00;
+
+ BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reset after hardware error failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return;
+ }
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return;
+ }
+
+ if (skb->len != 13) {
+ BT_ERR("%s: Exception info size mismatch", hdev->name);
+ kfree_skb(skb);
+ return;
+ }
+
+ if (skb->data[0] != 0x00) {
+ BT_ERR("%s: Exception info command failure (%02x)",
+ hdev->name, skb->data[0]);
+ kfree_skb(skb);
+ return;
+ }
+
+ BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+ kfree_skb(skb);
+}
+
static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
@@ -1943,6 +2559,31 @@ static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
+static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ u8 buf[10];
+ long ret;
+
+ buf[0] = 0x01;
+ buf[1] = 0x01;
+ buf[2] = 0x00;
+ buf[3] = sizeof(bdaddr_t);
+ memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
+
+ skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ BT_ERR("%s: Change address command failed (%ld)",
+ hdev->name, ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2003,7 +2644,13 @@ static int btusb_probe(struct usb_interface *intf,
if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
return -ENODEV;
- data->cmdreq_type = USB_TYPE_CLASS;
+ if (id->driver_info & BTUSB_AMP) {
+ data->cmdreq_type = USB_TYPE_CLASS | 0x01;
+ data->cmdreq = 0x2b;
+ } else {
+ data->cmdreq_type = USB_TYPE_CLASS;
+ data->cmdreq = 0x00;
+ }
data->udev = interface_to_usbdev(intf);
data->intf = intf;
@@ -2019,7 +2666,14 @@ static int btusb_probe(struct usb_interface *intf,
init_usb_anchor(&data->isoc_anchor);
spin_lock_init(&data->rxlock);
- data->recv_bulk = btusb_recv_bulk;
+ if (id->driver_info & BTUSB_INTEL_NEW) {
+ data->recv_event = btusb_recv_event_intel;
+ data->recv_bulk = btusb_recv_bulk_intel;
+ set_bit(BTUSB_BOOTLOADER, &data->flags);
+ } else {
+ data->recv_event = hci_recv_frame;
+ data->recv_bulk = btusb_recv_bulk;
+ }
hdev = hci_alloc_dev();
if (!hdev)
@@ -2028,6 +2682,11 @@ static int btusb_probe(struct usb_interface *intf,
hdev->bus = HCI_USB;
hci_set_drvdata(hdev, data);
+ if (id->driver_info & BTUSB_AMP)
+ hdev->dev_type = HCI_AMP;
+ else
+ hdev->dev_type = HCI_BREDR;
+
data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
@@ -2050,16 +2709,40 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_INTEL) {
hdev->setup = btusb_setup_intel;
hdev->set_bdaddr = btusb_set_bdaddr_intel;
+ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ }
+
+ if (id->driver_info & BTUSB_INTEL_NEW) {
+ hdev->send = btusb_send_frame_intel;
+ hdev->setup = btusb_setup_intel_new;
+ hdev->hw_error = btusb_hw_error_intel;
+ hdev->set_bdaddr = btusb_set_bdaddr_intel;
+ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
if (id->driver_info & BTUSB_MARVELL)
hdev->set_bdaddr = btusb_set_bdaddr_marvell;
+ if (id->driver_info & BTUSB_SWAVE) {
+ set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ }
+
if (id->driver_info & BTUSB_INTEL_BOOT)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
- /* Interface numbers are hardcoded in the specification */
- data->isoc = usb_ifnum_to_if(data->udev, 1);
+ if (id->driver_info & BTUSB_ATH3012) {
+ hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ }
+
+ if (id->driver_info & BTUSB_AMP) {
+ /* AMP controllers do not support SCO packets */
+ data->isoc = NULL;
+ } else {
+ /* Interface numbers are hardcoded in the specification */
+ data->isoc = usb_ifnum_to_if(data->udev, 1);
+ }
if (!reset)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
@@ -2153,7 +2836,6 @@ static void btusb_disconnect(struct usb_interface *intf)
else if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
- btusb_free_frags(data);
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 2ff6dfd2d3f0..9c4dcf4c62ea 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -51,33 +51,22 @@ struct ath_struct {
static int ath_wakeup_ar3k(struct tty_struct *tty)
{
- struct ktermios ktermios;
int status = tty->driver->ops->tiocmget(tty);
if (status & TIOCM_CTS)
return status;
- /* Disable Automatic RTSCTS */
- ktermios = tty->termios;
- ktermios.c_cflag &= ~CRTSCTS;
- tty_set_termios(tty, &ktermios);
-
/* Clear RTS first */
- status = tty->driver->ops->tiocmget(tty);
+ tty->driver->ops->tiocmget(tty);
tty->driver->ops->tiocmset(tty, 0x00, TIOCM_RTS);
mdelay(20);
/* Set RTS, wake up board */
- status = tty->driver->ops->tiocmget(tty);
+ tty->driver->ops->tiocmget(tty);
tty->driver->ops->tiocmset(tty, TIOCM_RTS, 0x00);
mdelay(20);
status = tty->driver->ops->tiocmget(tty);
-
- /* Enable Automatic RTSCTS */
- ktermios.c_cflag |= CRTSCTS;
- status = tty_set_termios(tty, &ktermios);
-
return status;
}
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 860da40b78ef..84fd66057dad 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -806,8 +806,8 @@ static int cci_pmu_event_init(struct perf_event *event)
static ssize_t pmu_attr_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &pmu->cpus);
-
+ int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ cpumask_pr_args(&pmu->cpus));
buf[n++] = '\n';
buf[n] = '\0';
return n;
@@ -1312,6 +1312,9 @@ static int cci_probe(void)
if (!np)
return -ENODEV;
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
cci_config = of_match_node(arm_cci_matches, np)->data;
if (!cci_config)
return -ENODEV;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index efefd12a0f7b..a4af8221751e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,15 @@ menu "Character devices"
source "drivers/tty/Kconfig"
+config DEVMEM
+ bool "/dev/mem virtual device support"
+ default y
+ help
+ Say Y here if you want to support the /dev/mem device.
+ The /dev/mem device is used to access areas of physical
+ memory.
+ When in doubt, say "Y".
+
config DEVKMEM
bool "/dev/kmem virtual device support"
default y
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index b709749c8639..4eb1c772ded7 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -219,7 +219,10 @@ struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
/* generic functions for user-populated AGP memory types */
struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
void agp_alloc_page_array(size_t size, struct agp_memory *mem);
-void agp_free_page_array(struct agp_memory *mem);
+static inline void agp_free_page_array(struct agp_memory *mem)
+{
+ kvfree(mem->pages);
+}
/* generic routines for agp>=3 */
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 0fbccce1cee9..f002fa5d1887 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -98,17 +98,6 @@ void agp_alloc_page_array(size_t size, struct agp_memory *mem)
}
EXPORT_SYMBOL(agp_alloc_page_array);
-void agp_free_page_array(struct agp_memory *mem)
-{
- if (is_vmalloc_addr(mem->pages)) {
- vfree(mem->pages);
- } else {
- kfree(mem->pages);
- }
-}
-EXPORT_SYMBOL(agp_free_page_array);
-
-
static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
{
struct agp_memory *new;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 92aa43fa8d70..0b4188b9af7c 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -225,7 +225,7 @@ static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
intel_private.driver->write_entry(addr,
i, type);
}
- readl(intel_private.gtt+i-1);
+ wmb();
return 0;
}
@@ -329,7 +329,7 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
break;
}
- writel(addr | pte_flags, intel_private.gtt + entry);
+ writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
@@ -735,7 +735,7 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
if (flags == AGP_USER_CACHED_MEMORY)
pte_flags |= I830_PTE_SYSTEM_CACHED;
- writel(addr | pte_flags, intel_private.gtt + entry);
+ writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
bool intel_enable_gtt(void)
@@ -858,7 +858,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
j++;
}
}
- readl(intel_private.gtt+j-1);
+ wmb();
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
@@ -875,7 +875,7 @@ static void intel_gtt_insert_pages(unsigned int first_entry,
intel_private.driver->write_entry(addr,
j, flags);
}
- readl(intel_private.gtt+j-1);
+ wmb();
}
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
@@ -938,7 +938,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
- readl(intel_private.gtt+i-1);
+ wmb();
}
EXPORT_SYMBOL(intel_gtt_clear_range);
@@ -1106,7 +1106,7 @@ static void i965_write_entry(dma_addr_t addr,
/* Shift high bits down */
addr |= (addr >> 28) & 0xf0;
- writel(addr | pte_flags, intel_private.gtt + entry);
+ writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
static int i9xx_setup(void)
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index d5d4cd82b9f7..5c0baa9ffc64 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -976,8 +976,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
status = acpi_resource_to_address64(res, &addr);
if (ACPI_SUCCESS(status)) {
- hdp->hd_phys_address = addr.minimum;
- hdp->hd_address = ioremap(addr.minimum, addr.address_length);
+ hdp->hd_phys_address = addr.address.minimum;
+ hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 1500cfd799a7..32a8a867f7f8 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/random.h>
+#include <linux/err.h>
#include <asm/uaccess.h>
@@ -53,7 +54,10 @@
static struct hwrng *current_rng;
static struct task_struct *hwrng_fill;
static LIST_HEAD(rng_list);
+/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
+/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
+static DEFINE_MUTEX(reading_mutex);
static int data_avail;
static u8 *rng_buffer, *rng_fillbuf;
static unsigned short current_quality;
@@ -66,6 +70,8 @@ module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
"default entropy content of hwrng per mill");
+static void drop_current_rng(void);
+static int hwrng_init(struct hwrng *rng);
static void start_khwrngd(void);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
@@ -81,13 +87,83 @@ static void add_early_randomness(struct hwrng *rng)
unsigned char bytes[16];
int bytes_read;
+ mutex_lock(&reading_mutex);
bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(bytes, bytes_read);
}
-static inline int hwrng_init(struct hwrng *rng)
+static inline void cleanup_rng(struct kref *kref)
{
+ struct hwrng *rng = container_of(kref, struct hwrng, ref);
+
+ if (rng->cleanup)
+ rng->cleanup(rng);
+
+ complete(&rng->cleanup_done);
+}
+
+static int set_current_rng(struct hwrng *rng)
+{
+ int err;
+
+ BUG_ON(!mutex_is_locked(&rng_mutex));
+
+ err = hwrng_init(rng);
+ if (err)
+ return err;
+
+ drop_current_rng();
+ current_rng = rng;
+
+ return 0;
+}
+
+static void drop_current_rng(void)
+{
+ BUG_ON(!mutex_is_locked(&rng_mutex));
+ if (!current_rng)
+ return;
+
+ /* decrease last reference for triggering the cleanup */
+ kref_put(&current_rng->ref, cleanup_rng);
+ current_rng = NULL;
+}
+
+/* Returns ERR_PTR(), NULL or refcounted hwrng */
+static struct hwrng *get_current_rng(void)
+{
+ struct hwrng *rng;
+
+ if (mutex_lock_interruptible(&rng_mutex))
+ return ERR_PTR(-ERESTARTSYS);
+
+ rng = current_rng;
+ if (rng)
+ kref_get(&rng->ref);
+
+ mutex_unlock(&rng_mutex);
+ return rng;
+}
+
+static void put_rng(struct hwrng *rng)
+{
+ /*
+ * Hold rng_mutex here so we serialize in case they set_current_rng
+ * on rng again immediately.
+ */
+ mutex_lock(&rng_mutex);
+ if (rng)
+ kref_put(&rng->ref, cleanup_rng);
+ mutex_unlock(&rng_mutex);
+}
+
+static int hwrng_init(struct hwrng *rng)
+{
+ if (kref_get_unless_zero(&rng->ref))
+ goto skip_init;
+
if (rng->init) {
int ret;
@@ -95,6 +171,11 @@ static inline int hwrng_init(struct hwrng *rng)
if (ret)
return ret;
}
+
+ kref_init(&rng->ref);
+ reinit_completion(&rng->cleanup_done);
+
+skip_init:
add_early_randomness(rng);
current_quality = rng->quality ? : default_quality;
@@ -108,12 +189,6 @@ static inline int hwrng_init(struct hwrng *rng)
return 0;
}
-static inline void hwrng_cleanup(struct hwrng *rng)
-{
- if (rng && rng->cleanup)
- rng->cleanup(rng);
-}
-
static int rng_dev_open(struct inode *inode, struct file *filp)
{
/* enforce read-only access to this chrdev */
@@ -128,6 +203,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait) {
int present;
+ BUG_ON(!mutex_is_locked(&reading_mutex));
if (rng->read)
return rng->read(rng, (void *)buffer, size, wait);
@@ -148,25 +224,27 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
ssize_t ret = 0;
int err = 0;
int bytes_read, len;
+ struct hwrng *rng;
while (size) {
- if (mutex_lock_interruptible(&rng_mutex)) {
- err = -ERESTARTSYS;
+ rng = get_current_rng();
+ if (IS_ERR(rng)) {
+ err = PTR_ERR(rng);
goto out;
}
-
- if (!current_rng) {
+ if (!rng) {
err = -ENODEV;
- goto out_unlock;
+ goto out;
}
+ mutex_lock(&reading_mutex);
if (!data_avail) {
- bytes_read = rng_get_data(current_rng, rng_buffer,
+ bytes_read = rng_get_data(rng, rng_buffer,
rng_buffer_size(),
!(filp->f_flags & O_NONBLOCK));
if (bytes_read < 0) {
err = bytes_read;
- goto out_unlock;
+ goto out_unlock_reading;
}
data_avail = bytes_read;
}
@@ -174,7 +252,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (!data_avail) {
if (filp->f_flags & O_NONBLOCK) {
err = -EAGAIN;
- goto out_unlock;
+ goto out_unlock_reading;
}
} else {
len = data_avail;
@@ -186,14 +264,15 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (copy_to_user(buf + ret, rng_buffer + data_avail,
len)) {
err = -EFAULT;
- goto out_unlock;
+ goto out_unlock_reading;
}
size -= len;
ret += len;
}
- mutex_unlock(&rng_mutex);
+ mutex_unlock(&reading_mutex);
+ put_rng(rng);
if (need_resched())
schedule_timeout_interruptible(1);
@@ -205,8 +284,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
out:
return ret ? : err;
-out_unlock:
- mutex_unlock(&rng_mutex);
+
+out_unlock_reading:
+ mutex_unlock(&reading_mutex);
+ put_rng(rng);
goto out;
}
@@ -239,16 +320,9 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
err = -ENODEV;
list_for_each_entry(rng, &rng_list, list) {
if (strcmp(rng->name, buf) == 0) {
- if (rng == current_rng) {
- err = 0;
- break;
- }
- err = hwrng_init(rng);
- if (err)
- break;
- hwrng_cleanup(current_rng);
- current_rng = rng;
err = 0;
+ if (rng != current_rng)
+ err = set_current_rng(rng);
break;
}
}
@@ -261,17 +335,15 @@ static ssize_t hwrng_attr_current_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int err;
ssize_t ret;
- const char *name = "none";
+ struct hwrng *rng;
- err = mutex_lock_interruptible(&rng_mutex);
- if (err)
- return -ERESTARTSYS;
- if (current_rng)
- name = current_rng->name;
- ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
- mutex_unlock(&rng_mutex);
+ rng = get_current_rng();
+ if (IS_ERR(rng))
+ return PTR_ERR(rng);
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
+ put_rng(rng);
return ret;
}
@@ -305,14 +377,14 @@ static DEVICE_ATTR(rng_available, S_IRUGO,
NULL);
-static void unregister_miscdev(void)
+static void __exit unregister_miscdev(void)
{
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
misc_deregister(&rng_miscdev);
}
-static int register_miscdev(void)
+static int __init register_miscdev(void)
{
int err;
@@ -342,15 +414,22 @@ static int hwrng_fillfn(void *unused)
long rc;
while (!kthread_should_stop()) {
- if (!current_rng)
+ struct hwrng *rng;
+
+ rng = get_current_rng();
+ if (IS_ERR(rng) || !rng)
break;
- rc = rng_get_data(current_rng, rng_fillbuf,
+ mutex_lock(&reading_mutex);
+ rc = rng_get_data(rng, rng_fillbuf,
rng_buffer_size(), 1);
+ mutex_unlock(&reading_mutex);
+ put_rng(rng);
if (rc <= 0) {
pr_warn("hwrng: no data available\n");
msleep_interruptible(10000);
continue;
}
+ /* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
}
@@ -400,23 +479,16 @@ int hwrng_register(struct hwrng *rng)
goto out_unlock;
}
+ init_completion(&rng->cleanup_done);
+ complete(&rng->cleanup_done);
+
old_rng = current_rng;
- if (!old_rng) {
- err = hwrng_init(rng);
- if (err)
- goto out_unlock;
- current_rng = rng;
- }
err = 0;
if (!old_rng) {
- err = register_miscdev();
- if (err) {
- hwrng_cleanup(rng);
- current_rng = NULL;
+ err = set_current_rng(rng);
+ if (err)
goto out_unlock;
- }
}
- INIT_LIST_HEAD(&rng->list);
list_add_tail(&rng->list, &rng_list);
if (old_rng && !rng->init) {
@@ -439,42 +511,49 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
- int err;
-
mutex_lock(&rng_mutex);
list_del(&rng->list);
if (current_rng == rng) {
- hwrng_cleanup(rng);
- if (list_empty(&rng_list)) {
- current_rng = NULL;
- } else {
- current_rng = list_entry(rng_list.prev, struct hwrng, list);
- err = hwrng_init(current_rng);
- if (err)
- current_rng = NULL;
+ drop_current_rng();
+ if (!list_empty(&rng_list)) {
+ struct hwrng *tail;
+
+ tail = list_entry(rng_list.prev, struct hwrng, list);
+
+ set_current_rng(tail);
}
}
+
if (list_empty(&rng_list)) {
- unregister_miscdev();
+ mutex_unlock(&rng_mutex);
if (hwrng_fill)
kthread_stop(hwrng_fill);
- }
+ } else
+ mutex_unlock(&rng_mutex);
- mutex_unlock(&rng_mutex);
+ wait_for_completion(&rng->cleanup_done);
}
EXPORT_SYMBOL_GPL(hwrng_unregister);
-static void __exit hwrng_exit(void)
+static int __init hwrng_modinit(void)
+{
+ return register_miscdev();
+}
+
+static void __exit hwrng_modexit(void)
{
mutex_lock(&rng_mutex);
BUG_ON(current_rng);
kfree(rng_buffer);
kfree(rng_fillbuf);
mutex_unlock(&rng_mutex);
+
+ unregister_miscdev();
}
-module_exit(hwrng_exit);
+module_init(hwrng_modinit);
+module_exit(hwrng_modexit);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 72295ea2fd1c..3fa2f8a009b3 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -39,7 +39,6 @@ struct virtrng_info {
bool hwrng_removed;
};
-
static void random_recv_done(struct virtqueue *vq)
{
struct virtrng_info *vi = vq->vdev->priv;
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index e34a019eb930..24cc4ed9a780 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -5,7 +5,8 @@
*
* Hwmon integration:
* Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
- * Copyright (C) 2013 Guenter Roeck <linux@roeck-us.net>
+ * Copyright (C) 2013, 2014 Guenter Roeck <linux@roeck-us.net>
+ * Copyright (C) 2014 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -41,11 +43,15 @@
#define I8K_SMM_SET_FAN 0x01a3
#define I8K_SMM_GET_FAN 0x00a3
#define I8K_SMM_GET_SPEED 0x02a3
+#define I8K_SMM_GET_FAN_TYPE 0x03a3
+#define I8K_SMM_GET_NOM_SPEED 0x04a3
#define I8K_SMM_GET_TEMP 0x10a3
+#define I8K_SMM_GET_TEMP_TYPE 0x11a3
#define I8K_SMM_GET_DELL_SIG1 0xfea3
#define I8K_SMM_GET_DELL_SIG2 0xffa3
#define I8K_FAN_MULT 30
+#define I8K_FAN_MAX_RPM 30000
#define I8K_MAX_TEMP 127
#define I8K_FN_NONE 0x00
@@ -58,15 +64,13 @@
#define I8K_POWER_AC 0x05
#define I8K_POWER_BATTERY 0x01
-#define I8K_TEMPERATURE_BUG 1
-
static DEFINE_MUTEX(i8k_mutex);
static char bios_version[4];
static struct device *i8k_hwmon_dev;
static u32 i8k_hwmon_flags;
-static int i8k_fan_mult;
-static int i8k_pwm_mult;
-static int i8k_fan_max = I8K_FAN_HIGH;
+static uint i8k_fan_mult = I8K_FAN_MULT;
+static uint i8k_pwm_mult;
+static uint i8k_fan_max = I8K_FAN_HIGH;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -95,13 +99,13 @@ static bool power_status;
module_param(power_status, bool, 0600);
MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
-static int fan_mult = I8K_FAN_MULT;
-module_param(fan_mult, int, 0);
-MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
+static uint fan_mult;
+module_param(fan_mult, uint, 0);
+MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with (default: autodetect)");
-static int fan_max = I8K_FAN_HIGH;
-module_param(fan_max, int, 0);
-MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed");
+static uint fan_max;
+module_param(fan_max, uint, 0);
+MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed (default: autodetect)");
static int i8k_open_fs(struct inode *inode, struct file *file);
static long i8k_ioctl(struct file *, unsigned int, unsigned long);
@@ -276,6 +280,28 @@ static int i8k_get_fan_speed(int fan)
}
/*
+ * Read the fan type.
+ */
+static int i8k_get_fan_type(int fan)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
+
+ regs.ebx = fan & 0xff;
+ return i8k_smm(&regs) ? : regs.eax & 0xff;
+}
+
+/*
+ * Read the fan nominal rpm for specific fan speed.
+ */
+static int i8k_get_fan_nominal_speed(int fan, int speed)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, };
+
+ regs.ebx = (fan & 0xff) | (speed << 8);
+ return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
+}
+
+/*
* Set the fan speed (off, low, high). Returns the new fan status.
*/
static int i8k_set_fan(int fan, int speed)
@@ -288,42 +314,52 @@ static int i8k_set_fan(int fan, int speed)
return i8k_smm(&regs) ? : i8k_get_fan_status(fan);
}
+static int i8k_get_temp_type(int sensor)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_GET_TEMP_TYPE, };
+
+ regs.ebx = sensor & 0xff;
+ return i8k_smm(&regs) ? : regs.eax & 0xff;
+}
+
/*
* Read the cpu temperature.
*/
-static int i8k_get_temp(int sensor)
+static int _i8k_get_temp(int sensor)
{
- struct smm_regs regs = { .eax = I8K_SMM_GET_TEMP, };
- int rc;
- int temp;
+ struct smm_regs regs = {
+ .eax = I8K_SMM_GET_TEMP,
+ .ebx = sensor & 0xff,
+ };
-#ifdef I8K_TEMPERATURE_BUG
- static int prev[4] = { I8K_MAX_TEMP+1, I8K_MAX_TEMP+1, I8K_MAX_TEMP+1, I8K_MAX_TEMP+1 };
-#endif
- regs.ebx = sensor & 0xff;
- rc = i8k_smm(&regs);
- if (rc < 0)
- return rc;
+ return i8k_smm(&regs) ? : regs.eax & 0xff;
+}
- temp = regs.eax & 0xff;
+static int i8k_get_temp(int sensor)
+{
+ int temp = _i8k_get_temp(sensor);
-#ifdef I8K_TEMPERATURE_BUG
/*
* Sometimes the temperature sensor returns 0x99, which is out of range.
- * In this case we return (once) the previous cached value. For example:
+ * In this case we retry (once) before returning an error.
# 1003655137 00000058 00005a4b
# 1003655138 00000099 00003a80 <--- 0x99 = 153 degrees
# 1003655139 00000054 00005c52
*/
- if (temp > I8K_MAX_TEMP) {
- temp = prev[sensor];
- prev[sensor] = I8K_MAX_TEMP+1;
- } else {
- prev[sensor] = temp;
+ if (temp == 0x99) {
+ msleep(100);
+ temp = _i8k_get_temp(sensor);
}
+ /*
+ * Return -ENODATA for all invalid temperatures.
+ *
+ * Known instances are the 0x99 value as seen above as well as
+ * 0xc1 (193), which may be returned when trying to read the GPU
+ * temperature if the system supports a GPU and it is currently
+ * turned off.
+ */
if (temp > I8K_MAX_TEMP)
- return -ERANGE;
-#endif
+ return -ENODATA;
return temp;
}
@@ -493,6 +529,29 @@ static int i8k_open_fs(struct inode *inode, struct file *file)
* Hwmon interface
*/
+static ssize_t i8k_hwmon_show_temp_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ static const char * const labels[] = {
+ "CPU",
+ "GPU",
+ "SODIMM",
+ "Other",
+ "Ambient",
+ "Other",
+ };
+ int index = to_sensor_dev_attr(devattr)->index;
+ int type;
+
+ type = i8k_get_temp_type(index);
+ if (type < 0)
+ return type;
+ if (type >= ARRAY_SIZE(labels))
+ type = ARRAY_SIZE(labels) - 1;
+ return sprintf(buf, "%s\n", labels[type]);
+}
+
static ssize_t i8k_hwmon_show_temp(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -501,13 +560,42 @@ static ssize_t i8k_hwmon_show_temp(struct device *dev,
int temp;
temp = i8k_get_temp(index);
- if (temp == -ERANGE)
- return -EINVAL;
if (temp < 0)
return temp;
return sprintf(buf, "%d\n", temp * 1000);
}
+static ssize_t i8k_hwmon_show_fan_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ static const char * const labels[] = {
+ "Processor Fan",
+ "Motherboard Fan",
+ "Video Fan",
+ "Power Supply Fan",
+ "Chipset Fan",
+ "Other Fan",
+ };
+ int index = to_sensor_dev_attr(devattr)->index;
+ bool dock = false;
+ int type;
+
+ type = i8k_get_fan_type(index);
+ if (type < 0)
+ return type;
+
+ if (type & 0x10) {
+ dock = true;
+ type &= 0x0F;
+ }
+
+ if (type >= ARRAY_SIZE(labels))
+ type = (ARRAY_SIZE(labels) - 1);
+
+ return sprintf(buf, "%s%s\n", (dock ? "Docking " : ""), labels[type]);
+}
+
static ssize_t i8k_hwmon_show_fan(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -555,45 +643,66 @@ static ssize_t i8k_hwmon_set_pwm(struct device *dev,
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_temp_label, NULL,
+ 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, i8k_hwmon_show_temp_label, NULL,
+ 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, i8k_hwmon_show_temp_label, NULL,
+ 2);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
- I8K_FAN_LEFT);
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, i8k_hwmon_show_temp_label, NULL,
+ 3);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL,
+ 0);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
- i8k_hwmon_set_pwm, I8K_FAN_LEFT);
+ i8k_hwmon_set_pwm, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
- I8K_FAN_RIGHT);
+ 1);
+static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL,
+ 1);
static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
- i8k_hwmon_set_pwm, I8K_FAN_RIGHT);
+ i8k_hwmon_set_pwm, 1);
static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */
- &sensor_dev_attr_temp2_input.dev_attr.attr, /* 1 */
- &sensor_dev_attr_temp3_input.dev_attr.attr, /* 2 */
- &sensor_dev_attr_temp4_input.dev_attr.attr, /* 3 */
- &sensor_dev_attr_fan1_input.dev_attr.attr, /* 4 */
- &sensor_dev_attr_pwm1.dev_attr.attr, /* 5 */
- &sensor_dev_attr_fan2_input.dev_attr.attr, /* 6 */
- &sensor_dev_attr_pwm2.dev_attr.attr, /* 7 */
+ &sensor_dev_attr_temp1_label.dev_attr.attr, /* 1 */
+ &sensor_dev_attr_temp2_input.dev_attr.attr, /* 2 */
+ &sensor_dev_attr_temp2_label.dev_attr.attr, /* 3 */
+ &sensor_dev_attr_temp3_input.dev_attr.attr, /* 4 */
+ &sensor_dev_attr_temp3_label.dev_attr.attr, /* 5 */
+ &sensor_dev_attr_temp4_input.dev_attr.attr, /* 6 */
+ &sensor_dev_attr_temp4_label.dev_attr.attr, /* 7 */
+ &sensor_dev_attr_fan1_input.dev_attr.attr, /* 8 */
+ &sensor_dev_attr_fan1_label.dev_attr.attr, /* 9 */
+ &sensor_dev_attr_pwm1.dev_attr.attr, /* 10 */
+ &sensor_dev_attr_fan2_input.dev_attr.attr, /* 11 */
+ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 12 */
+ &sensor_dev_attr_pwm2.dev_attr.attr, /* 13 */
NULL
};
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
- if (index == 0 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
+ if (index >= 0 && index <= 1 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
return 0;
- if (index == 1 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2))
+ if (index >= 2 && index <= 3 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2))
return 0;
- if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3))
+ if (index >= 4 && index <= 5 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3))
return 0;
- if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
+ if (index >= 6 && index <= 7 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
return 0;
- if (index >= 4 && index <= 5 &&
+ if (index >= 8 && index <= 10 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
return 0;
- if (index >= 6 && index <= 7 &&
+ if (index >= 11 && index <= 13 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
return 0;
@@ -612,28 +721,28 @@ static int __init i8k_init_hwmon(void)
i8k_hwmon_flags = 0;
- /* CPU temperature attributes, if temperature reading is OK */
- err = i8k_get_temp(0);
- if (err >= 0 || err == -ERANGE)
+ /* CPU temperature attributes, if temperature type is OK */
+ err = i8k_get_temp_type(0);
+ if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1;
/* check for additional temperature sensors */
- err = i8k_get_temp(1);
- if (err >= 0 || err == -ERANGE)
+ err = i8k_get_temp_type(1);
+ if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2;
- err = i8k_get_temp(2);
- if (err >= 0 || err == -ERANGE)
+ err = i8k_get_temp_type(2);
+ if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3;
- err = i8k_get_temp(3);
- if (err >= 0 || err == -ERANGE)
+ err = i8k_get_temp_type(3);
+ if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
- /* Left fan attributes, if left fan is present */
- err = i8k_get_fan_status(I8K_FAN_LEFT);
+ /* First fan attributes, if fan type is OK */
+ err = i8k_get_fan_type(0);
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
- /* Right fan attributes, if right fan is present */
- err = i8k_get_fan_status(I8K_FAN_RIGHT);
+ /* Second fan attributes, if fan type is OK */
+ err = i8k_get_fan_type(1);
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
@@ -649,16 +758,15 @@ static int __init i8k_init_hwmon(void)
}
struct i8k_config_data {
- int fan_mult;
- int fan_max;
+ uint fan_mult;
+ uint fan_max;
};
enum i8k_configs {
DELL_LATITUDE_D520,
- DELL_LATITUDE_E6540,
DELL_PRECISION_490,
DELL_STUDIO,
- DELL_XPS_M140,
+ DELL_XPS,
};
static const struct i8k_config_data i8k_config_data[] = {
@@ -666,10 +774,6 @@ static const struct i8k_config_data i8k_config_data[] = {
.fan_mult = 1,
.fan_max = I8K_FAN_TURBO,
},
- [DELL_LATITUDE_E6540] = {
- .fan_mult = 1,
- .fan_max = I8K_FAN_HIGH,
- },
[DELL_PRECISION_490] = {
.fan_mult = 1,
.fan_max = I8K_FAN_TURBO,
@@ -678,7 +782,7 @@ static const struct i8k_config_data i8k_config_data[] = {
.fan_mult = 1,
.fan_max = I8K_FAN_HIGH,
},
- [DELL_XPS_M140] = {
+ [DELL_XPS] = {
.fan_mult = 1,
.fan_max = I8K_FAN_HIGH,
},
@@ -715,22 +819,6 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
.driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
},
{
- .ident = "Dell Latitude E6440",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6440"),
- },
- .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_E6540],
- },
- {
- .ident = "Dell Latitude E6540",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6540"),
- },
- .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_E6540],
- },
- {
.ident = "Dell Latitude 2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -790,12 +878,20 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
.driver_data = (void *)&i8k_config_data[DELL_STUDIO],
},
{
+ .ident = "Dell XPS 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS13"),
+ },
+ .driver_data = (void *)&i8k_config_data[DELL_XPS],
+ },
+ {
.ident = "Dell XPS M140",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"),
},
- .driver_data = (void *)&i8k_config_data[DELL_XPS_M140],
+ .driver_data = (void *)&i8k_config_data[DELL_XPS],
},
{ }
};
@@ -808,6 +904,7 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
static int __init i8k_probe(void)
{
const struct dmi_system_id *id;
+ int fan, ret;
/*
* Get DMI information
@@ -836,19 +933,40 @@ static int __init i8k_probe(void)
return -ENODEV;
}
- i8k_fan_mult = fan_mult;
- i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */
+ /*
+ * Set fan multiplier and maximal fan speed from dmi config
+ * Values specified in module parameters override values from dmi
+ */
id = dmi_first_match(i8k_dmi_table);
if (id && id->driver_data) {
const struct i8k_config_data *conf = id->driver_data;
-
- if (fan_mult == I8K_FAN_MULT && conf->fan_mult)
- i8k_fan_mult = conf->fan_mult;
- if (fan_max == I8K_FAN_HIGH && conf->fan_max)
- i8k_fan_max = conf->fan_max;
+ if (!fan_mult && conf->fan_mult)
+ fan_mult = conf->fan_mult;
+ if (!fan_max && conf->fan_max)
+ fan_max = conf->fan_max;
}
+
+ i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */
i8k_pwm_mult = DIV_ROUND_UP(255, i8k_fan_max);
+ if (!fan_mult) {
+ /*
+ * Autodetect fan multiplier based on nominal rpm
+ * If fan reports rpm value too high then set multiplier to 1
+ */
+ for (fan = 0; fan < 2; ++fan) {
+ ret = i8k_get_fan_nominal_speed(fan, i8k_fan_max);
+ if (ret < 0)
+ continue;
+ if (ret > I8K_FAN_MAX_RPM)
+ i8k_fan_mult = 1;
+ break;
+ }
+ } else {
+ /* Fan multiplier was specified in module param or in dmi */
+ i8k_fan_mult = fan_mult;
+ }
+
return 0;
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 4c58333b4257..297110c12635 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -28,7 +28,7 @@
#include <linux/io.h>
#include <linux/aio.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
@@ -287,13 +287,24 @@ static unsigned long get_unmapped_area_mem(struct file *file,
return pgoff << PAGE_SHIFT;
}
+/* permit direct mmap, for read, write or exec */
+static unsigned memory_mmap_capabilities(struct file *file)
+{
+ return NOMMU_MAP_DIRECT |
+ NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
+}
+
+static unsigned zero_mmap_capabilities(struct file *file)
+{
+ return NOMMU_MAP_COPY;
+}
+
/* can't do an in-place private mapping if there's no MMU */
static inline int private_mapping_ok(struct vm_area_struct *vma)
{
return vma->vm_flags & VM_MAYSHARE;
}
#else
-#define get_unmapped_area_mem NULL
static inline int private_mapping_ok(struct vm_area_struct *vma)
{
@@ -341,7 +352,6 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
return 0;
}
-#ifdef CONFIG_DEVKMEM
static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
{
unsigned long pfn;
@@ -362,9 +372,7 @@ static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
vma->vm_pgoff = pfn;
return mmap_mem(file, vma);
}
-#endif
-#ifdef CONFIG_DEVKMEM
/*
* This function reads the *virtual* memory as seen by the kernel.
*/
@@ -544,9 +552,7 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
*ppos = p;
return virtr + wrote ? : err;
}
-#endif
-#ifdef CONFIG_DEVPORT
static ssize_t read_port(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -575,6 +581,7 @@ static ssize_t write_port(struct file *file, const char __user *buf,
return -EFAULT;
while (count-- > 0 && i < 65536) {
char c;
+
if (__get_user(c, tmp)) {
if (tmp > buf)
break;
@@ -587,7 +594,6 @@ static ssize_t write_port(struct file *file, const char __user *buf,
*ppos = i;
return tmp-buf;
}
-#endif
static ssize_t read_null(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -631,6 +637,7 @@ static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
while (iov_iter_count(iter)) {
size_t chunk = iov_iter_count(iter), n;
+
if (chunk > PAGE_SIZE)
chunk = PAGE_SIZE; /* Just for latency reasons */
n = iov_iter_zero(chunk, iter);
@@ -715,25 +722,29 @@ static int open_port(struct inode *inode, struct file *filp)
#define open_mem open_port
#define open_kmem open_mem
-static const struct file_operations mem_fops = {
+static const struct file_operations __maybe_unused mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
.write = write_mem,
.mmap = mmap_mem,
.open = open_mem,
+#ifndef CONFIG_MMU
.get_unmapped_area = get_unmapped_area_mem,
+ .mmap_capabilities = memory_mmap_capabilities,
+#endif
};
-#ifdef CONFIG_DEVKMEM
-static const struct file_operations kmem_fops = {
+static const struct file_operations __maybe_unused kmem_fops = {
.llseek = memory_lseek,
.read = read_kmem,
.write = write_kmem,
.mmap = mmap_kmem,
.open = open_kmem,
+#ifndef CONFIG_MMU
.get_unmapped_area = get_unmapped_area_mem,
-};
+ .mmap_capabilities = memory_mmap_capabilities,
#endif
+};
static const struct file_operations null_fops = {
.llseek = null_lseek,
@@ -744,14 +755,12 @@ static const struct file_operations null_fops = {
.splice_write = splice_write_null,
};
-#ifdef CONFIG_DEVPORT
-static const struct file_operations port_fops = {
+static const struct file_operations __maybe_unused port_fops = {
.llseek = memory_lseek,
.read = read_port,
.write = write_port,
.open = open_port,
};
-#endif
static const struct file_operations zero_fops = {
.llseek = zero_lseek,
@@ -760,16 +769,9 @@ static const struct file_operations zero_fops = {
.read_iter = read_iter_zero,
.aio_write = aio_write_zero,
.mmap = mmap_zero,
-};
-
-/*
- * capabilities for /dev/zero
- * - permits private mappings, "copies" are taken of the source of zeros
- * - no writeback happens
- */
-static struct backing_dev_info zero_bdi = {
- .name = "char/mem",
- .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
+#ifndef CONFIG_MMU
+ .mmap_capabilities = zero_mmap_capabilities,
+#endif
};
static const struct file_operations full_fops = {
@@ -783,22 +785,24 @@ static const struct memdev {
const char *name;
umode_t mode;
const struct file_operations *fops;
- struct backing_dev_info *dev_info;
+ fmode_t fmode;
} devlist[] = {
- [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#ifdef CONFIG_DEVMEM
+ [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
+#endif
#ifdef CONFIG_DEVKMEM
- [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
+ [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
#endif
- [3] = { "null", 0666, &null_fops, NULL },
+ [3] = { "null", 0666, &null_fops, 0 },
#ifdef CONFIG_DEVPORT
- [4] = { "port", 0, &port_fops, NULL },
+ [4] = { "port", 0, &port_fops, 0 },
#endif
- [5] = { "zero", 0666, &zero_fops, &zero_bdi },
- [7] = { "full", 0666, &full_fops, NULL },
- [8] = { "random", 0666, &random_fops, NULL },
- [9] = { "urandom", 0666, &urandom_fops, NULL },
+ [5] = { "zero", 0666, &zero_fops, 0 },
+ [7] = { "full", 0666, &full_fops, 0 },
+ [8] = { "random", 0666, &random_fops, 0 },
+ [9] = { "urandom", 0666, &urandom_fops, 0 },
#ifdef CONFIG_PRINTK
- [11] = { "kmsg", 0644, &kmsg_fops, NULL },
+ [11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif
};
@@ -816,12 +820,7 @@ static int memory_open(struct inode *inode, struct file *filp)
return -ENXIO;
filp->f_op = dev->fops;
- if (dev->dev_info)
- filp->f_mapping->backing_dev_info = dev->dev_info;
-
- /* Is /dev/mem or /dev/kmem ? */
- if (dev->dev_info == &directly_mappable_cdev_bdi)
- filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+ filp->f_mode |= dev->fmode;
if (dev->fops->open)
return dev->fops->open(inode, filp);
@@ -846,11 +845,6 @@ static struct class *mem_class;
static int __init chr_dev_init(void)
{
int minor;
- int err;
-
- err = bdi_init(&zero_bdi);
- if (err)
- return err;
if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
printk("unable to get major %d for memory devs\n", MEM_MAJOR);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 04645c09fe5e..9cd6968e2f92 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f)
__u32 c = f->pool[2], d = f->pool[3];
a += b; c += d;
- b = rol32(a, 6); d = rol32(c, 27);
+ b = rol32(b, 6); d = rol32(d, 27);
d ^= a; b ^= c;
a += b; c += d;
- b = rol32(a, 16); d = rol32(c, 14);
+ b = rol32(b, 16); d = rol32(d, 14);
d ^= a; b ^= c;
a += b; c += d;
- b = rol32(a, 6); d = rol32(c, 27);
+ b = rol32(b, 6); d = rol32(d, 27);
d ^= a; b ^= c;
a += b; c += d;
- b = rol32(a, 16); d = rol32(c, 14);
+ b = rol32(b, 16); d = rol32(d, 14);
d ^= a; b ^= c;
f->pool[0] = a; f->pool[1] = b;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index a24891b97547..6e29bf2db536 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -104,11 +104,9 @@ static int raw_release(struct inode *inode, struct file *filp)
mutex_lock(&raw_mutex);
bdev = raw_devices[minor].binding;
- if (--raw_devices[minor].inuse == 0) {
+ if (--raw_devices[minor].inuse == 0)
/* Here inode->i_mapping == bdev->bd_inode->i_mapping */
inode->i_mapping = &inode->i_data;
- inode->i_mapping->backing_dev_info = &default_backing_dev_info;
- }
mutex_unlock(&raw_mutex);
blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index c54cac3f8bc8..9d4e37549eb2 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -100,15 +100,15 @@ config TCG_IBMVTPM
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
-config TCG_ST33_I2C
- tristate "STMicroelectronics ST33 I2C TPM"
+config TCG_TIS_I2C_ST33
+ tristate "TPM Interface Specification 1.2 Interface (I2C - STMicroelectronics)"
depends on I2C
depends on GPIOLIB
---help---
If you have a TPM security chip from STMicroelectronics working with
an I2C bus say Yes and it will be accessible from within Linux.
To compile this driver as a module, choose M here; the module will be
- called tpm_stm_st33_i2c.
+ called tpm_i2c_stm_st33.
config TCG_XEN
tristate "XEN TPM Interface"
@@ -122,4 +122,13 @@ config TCG_XEN
To compile this driver as a module, choose M here; the module
will be called xen-tpmfront.
+config TCG_CRB
+ tristate "TPM 2.0 CRB Interface"
+ depends on X86 && ACPI
+ ---help---
+ If you have a TPM security chip that is compliant with the
+ TCG CRB 2.0 TPM specification say Yes and it will be accessible
+ from within Linux. To compile this driver as a module, choose
+ M here; the module will be called tpm_crb.
+
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 4d85dd681b81..990cf183931d 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
-tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o
+tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o
tpm-$(CONFIG_ACPI) += tpm_ppi.o
ifdef CONFIG_ACPI
@@ -20,5 +20,6 @@ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
-obj-$(CONFIG_TCG_ST33_I2C) += tpm_i2c_stm_st33.o
+obj-$(CONFIG_TCG_TIS_I2C_ST33) += tpm_i2c_stm_st33.o
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
+obj-$(CONFIG_TCG_CRB) += tpm_crb.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
new file mode 100644
index 000000000000..1d278ccd751f
--- /dev/null
+++ b/drivers/char/tpm/tpm-chip.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * TPM chip management routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/freezer.h>
+#include <linux/major.h>
+#include "tpm.h"
+#include "tpm_eventlog.h"
+
+static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
+static LIST_HEAD(tpm_chip_list);
+static DEFINE_SPINLOCK(driver_lock);
+
+struct class *tpm_class;
+dev_t tpm_devt;
+
+/*
+ * tpm_chip_find_get - return tpm_chip for a given chip number
+ * @chip_num the device number for the chip
+ */
+struct tpm_chip *tpm_chip_find_get(int chip_num)
+{
+ struct tpm_chip *pos, *chip = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
+ if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
+ continue;
+
+ if (try_module_get(pos->pdev->driver->owner)) {
+ chip = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return chip;
+}
+
+/**
+ * tpm_dev_release() - free chip memory and the device number
+ * @dev: the character device for the TPM chip
+ *
+ * This is used as the release function for the character device.
+ */
+static void tpm_dev_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ spin_lock(&driver_lock);
+ clear_bit(chip->dev_num, dev_mask);
+ spin_unlock(&driver_lock);
+ kfree(chip);
+}
+
+/**
+ * tpmm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @dev: device to which the chip is associated
+ * @ops: struct tpm_class_ops instance
+ *
+ * Allocates a new struct tpm_chip instance and assigns a free
+ * device number for it. Caller does not have to worry about
+ * freeing the allocated resources. When the devices is removed
+ * devres calls tpmm_chip_remove() to do the job.
+ */
+struct tpm_chip *tpmm_chip_alloc(struct device *dev,
+ const struct tpm_class_ops *ops)
+{
+ struct tpm_chip *chip;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&chip->tpm_mutex);
+ INIT_LIST_HEAD(&chip->list);
+
+ chip->ops = ops;
+
+ spin_lock(&driver_lock);
+ chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
+ spin_unlock(&driver_lock);
+
+ if (chip->dev_num >= TPM_NUM_DEVICES) {
+ dev_err(dev, "No available tpm device numbers\n");
+ kfree(chip);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ set_bit(chip->dev_num, dev_mask);
+
+ scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num);
+
+ chip->pdev = dev;
+
+ dev_set_drvdata(dev, chip);
+
+ chip->dev.class = tpm_class;
+ chip->dev.release = tpm_dev_release;
+ chip->dev.parent = chip->pdev;
+
+ if (chip->dev_num == 0)
+ chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
+ else
+ chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
+
+ dev_set_name(&chip->dev, "%s", chip->devname);
+
+ device_initialize(&chip->dev);
+
+ chip->cdev.owner = chip->pdev->driver->owner;
+ cdev_init(&chip->cdev, &tpm_fops);
+
+ return chip;
+}
+EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
+
+static int tpm_dev_add_device(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = device_add(&chip->dev);
+ if (rc) {
+ dev_err(&chip->dev,
+ "unable to device_register() %s, major %d, minor %d, err=%d\n",
+ chip->devname, MAJOR(chip->dev.devt),
+ MINOR(chip->dev.devt), rc);
+
+ return rc;
+ }
+
+ rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
+ if (rc) {
+ dev_err(&chip->dev,
+ "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
+ chip->devname, MAJOR(chip->dev.devt),
+ MINOR(chip->dev.devt), rc);
+
+ device_unregister(&chip->dev);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void tpm_dev_del_device(struct tpm_chip *chip)
+{
+ cdev_del(&chip->cdev);
+ device_unregister(&chip->dev);
+}
+
+/*
+ * tpm_chip_register() - create a character device for the TPM chip
+ * @chip: TPM chip to use.
+ *
+ * Creates a character device for the TPM chip and adds sysfs interfaces for
+ * the device, PPI and TCPA. As the last step this function adds the
+ * chip to the list of TPM chips available for use.
+ *
+ * NOTE: This function should be only called after the chip initialization
+ * is complete.
+ *
+ * Called from tpm_<specific>.c probe function only for devices
+ * the driver has determined it should claim. Prior to calling
+ * this function the specific probe function has called pci_enable_device
+ * upon errant exit from this function specific probe function should call
+ * pci_disable_device
+ */
+int tpm_chip_register(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm_dev_add_device(chip);
+ if (rc)
+ return rc;
+
+ /* Populate sysfs for TPM1 devices. */
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ rc = tpm_sysfs_add_device(chip);
+ if (rc)
+ goto del_misc;
+
+ rc = tpm_add_ppi(chip);
+ if (rc)
+ goto del_sysfs;
+
+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
+ }
+
+ /* Make the chip available. */
+ spin_lock(&driver_lock);
+ list_add_rcu(&chip->list, &tpm_chip_list);
+ spin_unlock(&driver_lock);
+
+ chip->flags |= TPM_CHIP_FLAG_REGISTERED;
+
+ return 0;
+del_sysfs:
+ tpm_sysfs_del_device(chip);
+del_misc:
+ tpm_dev_del_device(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_register);
+
+/*
+ * tpm_chip_unregister() - release the TPM driver
+ * @chip: TPM chip to use.
+ *
+ * Takes the chip first away from the list of available TPM chips and then
+ * cleans up all the resources reserved by tpm_chip_register().
+ *
+ * NOTE: This function should be only called before deinitializing chip
+ * resources.
+ */
+void tpm_chip_unregister(struct tpm_chip *chip)
+{
+ if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
+ return;
+
+ spin_lock(&driver_lock);
+ list_del_rcu(&chip->list);
+ spin_unlock(&driver_lock);
+ synchronize_rcu();
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ if (chip->bios_dir)
+ tpm_bios_log_teardown(chip->bios_dir);
+ tpm_remove_ppi(chip);
+ tpm_sysfs_del_device(chip);
+ }
+
+ tpm_dev_del_device(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index d9b774e02a1f..de0337ebd658 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -17,7 +17,6 @@
* License.
*
*/
-#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "tpm.h"
@@ -54,16 +53,15 @@ static void timeout_work(struct work_struct *work)
static int tpm_open(struct inode *inode, struct file *file)
{
- struct miscdevice *misc = file->private_data;
- struct tpm_chip *chip = container_of(misc, struct tpm_chip,
- vendor.miscdev);
+ struct tpm_chip *chip =
+ container_of(inode->i_cdev, struct tpm_chip, cdev);
struct file_priv *priv;
/* It's assured that the chip will be opened just once,
* by the check of is_open variable, which is protected
* by driver_lock. */
if (test_and_set_bit(0, &chip->is_open)) {
- dev_dbg(chip->dev, "Another process owns this TPM\n");
+ dev_dbg(chip->pdev, "Another process owns this TPM\n");
return -EBUSY;
}
@@ -81,7 +79,7 @@ static int tpm_open(struct inode *inode, struct file *file)
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
- get_device(chip->dev);
+ get_device(chip->pdev);
return 0;
}
@@ -168,12 +166,12 @@ static int tpm_release(struct inode *inode, struct file *file)
file->private_data = NULL;
atomic_set(&priv->data_pending, 0);
clear_bit(0, &priv->chip->is_open);
- put_device(priv->chip->dev);
+ put_device(priv->chip->pdev);
kfree(priv);
return 0;
}
-static const struct file_operations tpm_fops = {
+const struct file_operations tpm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = tpm_open,
@@ -182,32 +180,4 @@ static const struct file_operations tpm_fops = {
.release = tpm_release,
};
-int tpm_dev_add_device(struct tpm_chip *chip)
-{
- int rc;
- chip->vendor.miscdev.fops = &tpm_fops;
- if (chip->dev_num == 0)
- chip->vendor.miscdev.minor = TPM_MINOR;
- else
- chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
-
- chip->vendor.miscdev.name = chip->devname;
- chip->vendor.miscdev.parent = chip->dev;
-
- rc = misc_register(&chip->vendor.miscdev);
- if (rc) {
- chip->vendor.miscdev.name = NULL;
- dev_err(chip->dev,
- "unable to misc_register %s, minor %d err=%d\n",
- chip->vendor.miscdev.name,
- chip->vendor.miscdev.minor, rc);
- }
- return rc;
-}
-
-void tpm_dev_del_device(struct tpm_chip *chip)
-{
- if (chip->vendor.miscdev.name)
- misc_deregister(&chip->vendor.miscdev);
-}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 6af17002a115..bf53a3771da5 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
@@ -47,10 +48,6 @@ module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
MODULE_PARM_DESC(suspend_pcr,
"PCR to use for dummy writes to faciltate flush on suspend.");
-static LIST_HEAD(tpm_chip_list);
-static DEFINE_SPINLOCK(driver_lock);
-static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
-
/*
* Array with one entry per ordinal defining the maximum amount
* of time the chip could take to return the result. The ordinal
@@ -346,7 +343,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
@@ -355,7 +352,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
@@ -363,7 +360,10 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
if (chip->vendor.irq)
goto out_recv;
- stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ stop = jiffies + tpm2_calc_ordinal_duration(chip, ordinal);
+ else
+ stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
u8 status = chip->ops->status(chip);
if ((status & chip->ops->req_complete_mask) ==
@@ -371,7 +371,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
goto out_recv;
if (chip->ops->req_canceled(chip, status)) {
- dev_err(chip->dev, "Operation Canceled\n");
+ dev_err(chip->pdev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
@@ -381,14 +381,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
} while (time_before(jiffies, stop));
chip->ops->cancel(chip);
- dev_err(chip->dev, "Operation Timed out\n");
+ dev_err(chip->pdev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->ops->recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
mutex_unlock(&chip->tpm_mutex);
@@ -398,9 +398,10 @@ out:
#define TPM_DIGEST_SIZE 20
#define TPM_RET_CODE_IDX 6
-static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
- int len, const char *desc)
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd,
+ int len, const char *desc)
{
+ struct tpm_output_header *header;
int err;
len = tpm_transmit(chip, (u8 *) cmd, len);
@@ -409,9 +410,12 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
else if (len < TPM_HEADER_SIZE)
return -EFAULT;
- err = be32_to_cpu(cmd->header.out.return_code);
+ header = cmd;
+
+ err = be32_to_cpu(header->return_code);
if (err != 0 && desc)
- dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
+ dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err,
+ desc);
return err;
}
@@ -448,7 +452,7 @@ ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = subcap_id;
}
- rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
if (!rc)
*cap = tpm_cmd.params.getcap_out.cap;
return rc;
@@ -464,8 +468,8 @@ void tpm_gen_interrupt(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- "attempting to determine the timeouts");
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to determine the timeouts");
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
@@ -483,9 +487,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
{
struct tpm_cmd_t start_cmd;
start_cmd.header.in = tpm_startup_header;
+
start_cmd.params.startup_in.startup_type = startup_type;
- return transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE,
- "attempting to start the TPM");
+ return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to start the TPM");
}
int tpm_get_timeouts(struct tpm_chip *chip)
@@ -500,12 +505,12 @@ int tpm_get_timeouts(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL);
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL);
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
- dev_info(chip->dev, "Issuing TPM_STARTUP");
+ dev_info(chip->pdev, "Issuing TPM_STARTUP");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
@@ -513,11 +518,11 @@ int tpm_get_timeouts(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
NULL);
}
if (rc) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"A TPM error (%zd) occurred attempting to determine the timeouts\n",
rc);
goto duration;
@@ -556,7 +561,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
/* Report adjusted timeouts */
if (chip->vendor.timeout_adjusted) {
- dev_info(chip->dev,
+ dev_info(chip->pdev,
HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
old_timeout[0], new_timeout[0],
old_timeout[1], new_timeout[1],
@@ -575,8 +580,8 @@ duration:
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
- rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- "attempting to determine the durations");
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to determine the durations");
if (rc)
return rc;
@@ -603,7 +608,7 @@ duration:
chip->vendor.duration[TPM_MEDIUM] *= 1000;
chip->vendor.duration[TPM_LONG] *= 1000;
chip->vendor.duration_adjusted = true;
- dev_info(chip->dev, "Adjusting TPM timeout parameters.");
+ dev_info(chip->pdev, "Adjusting TPM timeout parameters.");
}
return 0;
}
@@ -631,32 +636,11 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
struct tpm_cmd_t cmd;
cmd.header.in = continue_selftest_header;
- rc = transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE,
- "continue selftest");
+ rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE,
+ "continue selftest");
return rc;
}
-/*
- * tpm_chip_find_get - return tpm_chip for given chip number
- */
-static struct tpm_chip *tpm_chip_find_get(int chip_num)
-{
- struct tpm_chip *pos, *chip = NULL;
-
- rcu_read_lock();
- list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
- if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
- continue;
-
- if (try_module_get(pos->dev->driver->owner)) {
- chip = pos;
- break;
- }
- }
- rcu_read_unlock();
- return chip;
-}
-
#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
#define READ_PCR_RESULT_SIZE 30
static struct tpm_input_header pcrread_header = {
@@ -672,8 +656,8 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
- rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
- "attempting to read a pcr value");
+ rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
+ "attempting to read a pcr value");
if (rc == 0)
memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
@@ -700,7 +684,10 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
- rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
+ else
+ rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
tpm_chip_put(chip);
return rc;
}
@@ -734,11 +721,17 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
if (chip == NULL)
return -ENODEV;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = tpm2_pcr_extend(chip, pcr_idx, hash);
+ tpm_chip_put(chip);
+ return rc;
+ }
+
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
- rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
- "attempting extend a PCR value");
+ rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ "attempting extend a PCR value");
tpm_chip_put(chip);
return rc;
@@ -781,7 +774,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
* around 300ms while the self test is ongoing, keep trying
* until the self test duration expires. */
if (rc == -ETIME) {
- dev_info(chip->dev, HW_ERR "TPM command timed out during continue self test");
+ dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test");
msleep(delay_msec);
continue;
}
@@ -791,7 +784,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
- dev_info(chip->dev,
+ dev_info(chip->pdev,
"TPM is disabled/deactivated (0x%X)\n", rc);
/* TPM is disabled and/or deactivated; driver can
* proceed and TPM does handle commands for
@@ -817,7 +810,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen)
if (chip == NULL)
return -ENODEV;
- rc = transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
+ rc = tpm_transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
tpm_chip_put(chip);
return rc;
@@ -884,30 +877,6 @@ again:
}
EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
-void tpm_remove_hardware(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- if (chip == NULL) {
- dev_err(dev, "No device data found\n");
- return;
- }
-
- spin_lock(&driver_lock);
- list_del_rcu(&chip->list);
- spin_unlock(&driver_lock);
- synchronize_rcu();
-
- tpm_dev_del_device(chip);
- tpm_sysfs_del_device(chip);
- tpm_remove_ppi(&dev->kobj);
- tpm_bios_log_teardown(chip->bios_dir);
-
- /* write it this way to be explicit (chip->dev == dev) */
- put_device(chip->dev);
-}
-EXPORT_SYMBOL_GPL(tpm_remove_hardware);
-
#define TPM_ORD_SAVESTATE cpu_to_be32(152)
#define SAVESTATE_RESULT_SIZE 10
@@ -932,20 +901,23 @@ int tpm_pm_suspend(struct device *dev)
if (chip == NULL)
return -ENODEV;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return tpm2_shutdown(chip, TPM2_SU_CLEAR);
+
/* for buggy tpm, flush pcrs with extend to selected dummy */
if (tpm_suspend_pcr) {
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
TPM_DIGEST_SIZE);
- rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
- "extending dummy pcr before suspend");
+ rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ "extending dummy pcr before suspend");
}
/* now do the actual savestate */
for (try = 0; try < TPM_RETRY; try++) {
cmd.header.in = savestate_header;
- rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL);
+ rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL);
/*
* If the TPM indicates that it is too busy to respond to
@@ -963,10 +935,10 @@ int tpm_pm_suspend(struct device *dev)
}
if (rc)
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"Error (%d) sending savestate before suspend\n", rc);
else if (try > 0)
- dev_warn(chip->dev, "TPM savestate took %dms\n",
+ dev_warn(chip->pdev, "TPM savestate took %dms\n",
try * TPM_TIMEOUT_RETRY);
return rc;
@@ -1018,11 +990,17 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (chip == NULL)
return -ENODEV;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ err = tpm2_get_random(chip, out, max);
+ tpm_chip_put(chip);
+ return err;
+ }
+
do {
tpm_cmd.header.in = tpm_getrandom_header;
tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
- err = transmit_cmd(chip, &tpm_cmd,
+ err = tpm_transmit_cmd(chip, &tpm_cmd,
TPM_GETRANDOM_RESULT_SIZE + num_bytes,
"attempting get random");
if (err)
@@ -1041,103 +1019,34 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
}
EXPORT_SYMBOL_GPL(tpm_get_random);
-/* In case vendor provided release function, call it too.*/
-
-void tpm_dev_vendor_release(struct tpm_chip *chip)
+static int __init tpm_init(void)
{
- if (!chip)
- return;
-
- clear_bit(chip->dev_num, dev_mask);
-}
-EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
-
-
-/*
- * Once all references to platform device are down to 0,
- * release all allocated structures.
- */
-static void tpm_dev_release(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ int rc;
- if (!chip)
- return;
+ tpm_class = class_create(THIS_MODULE, "tpm");
+ if (IS_ERR(tpm_class)) {
+ pr_err("couldn't create tpm class\n");
+ return PTR_ERR(tpm_class);
+ }
- tpm_dev_vendor_release(chip);
+ rc = alloc_chrdev_region(&tpm_devt, 0, TPM_NUM_DEVICES, "tpm");
+ if (rc < 0) {
+ pr_err("tpm: failed to allocate char dev region\n");
+ class_destroy(tpm_class);
+ return rc;
+ }
- chip->release(dev);
- kfree(chip);
+ return 0;
}
-/*
- * Called from tpm_<specific>.c probe function only for devices
- * the driver has determined it should claim. Prior to calling
- * this function the specific probe function has called pci_enable_device
- * upon errant exit from this function specific probe function should call
- * pci_disable_device
- */
-struct tpm_chip *tpm_register_hardware(struct device *dev,
- const struct tpm_class_ops *ops)
+static void __exit tpm_exit(void)
{
- struct tpm_chip *chip;
-
- /* Driver specific per-device data */
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
-
- if (chip == NULL)
- return NULL;
-
- mutex_init(&chip->tpm_mutex);
- INIT_LIST_HEAD(&chip->list);
-
- chip->ops = ops;
- chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
-
- if (chip->dev_num >= TPM_NUM_DEVICES) {
- dev_err(dev, "No available tpm device numbers\n");
- goto out_free;
- }
-
- set_bit(chip->dev_num, dev_mask);
-
- scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm",
- chip->dev_num);
-
- chip->dev = get_device(dev);
- chip->release = dev->release;
- dev->release = tpm_dev_release;
- dev_set_drvdata(dev, chip);
-
- if (tpm_dev_add_device(chip))
- goto put_device;
-
- if (tpm_sysfs_add_device(chip))
- goto del_misc;
-
- if (tpm_add_ppi(&dev->kobj))
- goto del_sysfs;
-
- chip->bios_dir = tpm_bios_log_setup(chip->devname);
-
- /* Make chip available */
- spin_lock(&driver_lock);
- list_add_rcu(&chip->list, &tpm_chip_list);
- spin_unlock(&driver_lock);
-
- return chip;
-
-del_sysfs:
- tpm_sysfs_del_device(chip);
-del_misc:
- tpm_dev_del_device(chip);
-put_device:
- put_device(chip->dev);
-out_free:
- kfree(chip);
- return NULL;
+ class_destroy(tpm_class);
+ unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES);
}
-EXPORT_SYMBOL_GPL(tpm_register_hardware);
+
+subsys_initcall(tpm_init);
+module_exit(tpm_exit);
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
MODULE_DESCRIPTION("TPM Driver");
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 01730a27ae07..ee66fd4673f3 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -20,25 +20,6 @@
#include <linux/device.h>
#include "tpm.h"
-/* XXX for now this helper is duplicated in tpm-interface.c */
-static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
- int len, const char *desc)
-{
- int err;
-
- len = tpm_transmit(chip, (u8 *) cmd, len);
- if (len < 0)
- return len;
- else if (len < TPM_HEADER_SIZE)
- return -EFAULT;
-
- err = be32_to_cpu(cmd->header.out.return_code);
- if (err != 0 && desc)
- dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
-
- return err;
-}
-
#define READ_PUBEK_RESULT_SIZE 314
#define TPM_ORD_READPUBEK cpu_to_be32(124)
static struct tpm_input_header tpm_readpubek_header = {
@@ -58,8 +39,8 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_readpubek_header;
- err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
- "attempting to read the PUBEK");
+ err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+ "attempting to read the PUBEK");
if (err)
goto out;
@@ -303,16 +284,16 @@ static const struct attribute_group tpm_dev_group = {
int tpm_sysfs_add_device(struct tpm_chip *chip)
{
int err;
- err = sysfs_create_group(&chip->dev->kobj,
+ err = sysfs_create_group(&chip->pdev->kobj,
&tpm_dev_group);
if (err)
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"failed to create sysfs attributes, %d\n", err);
return err;
}
void tpm_sysfs_del_device(struct tpm_chip *chip)
{
- sysfs_remove_group(&chip->dev->kobj, &tpm_dev_group);
+ sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group);
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index e4d0888d2eab..7b0727c5e803 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -10,23 +10,24 @@
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
- * Specifications at www.trustedcomputinggroup.org
+ * Specifications at www.trustedcomputinggroup.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
- *
+ *
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/sched.h>
-#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/tpm.h>
+#include <linux/acpi.h>
+#include <linux/cdev.h>
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
@@ -61,6 +62,59 @@ enum tpm_duration {
#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_HEADER_SIZE 10
+
+enum tpm2_const {
+ TPM2_PLATFORM_PCR = 24,
+ TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8),
+ TPM2_TIMEOUT_A = 750,
+ TPM2_TIMEOUT_B = 2000,
+ TPM2_TIMEOUT_C = 200,
+ TPM2_TIMEOUT_D = 30,
+ TPM2_DURATION_SHORT = 20,
+ TPM2_DURATION_MEDIUM = 750,
+ TPM2_DURATION_LONG = 2000,
+};
+
+enum tpm2_structures {
+ TPM2_ST_NO_SESSIONS = 0x8001,
+ TPM2_ST_SESSIONS = 0x8002,
+};
+
+enum tpm2_return_codes {
+ TPM2_RC_INITIALIZE = 0x0100,
+ TPM2_RC_TESTING = 0x090A,
+ TPM2_RC_DISABLED = 0x0120,
+};
+
+enum tpm2_algorithms {
+ TPM2_ALG_SHA1 = 0x0004,
+};
+
+enum tpm2_command_codes {
+ TPM2_CC_FIRST = 0x011F,
+ TPM2_CC_SELF_TEST = 0x0143,
+ TPM2_CC_STARTUP = 0x0144,
+ TPM2_CC_SHUTDOWN = 0x0145,
+ TPM2_CC_GET_CAPABILITY = 0x017A,
+ TPM2_CC_GET_RANDOM = 0x017B,
+ TPM2_CC_PCR_READ = 0x017E,
+ TPM2_CC_PCR_EXTEND = 0x0182,
+ TPM2_CC_LAST = 0x018F,
+};
+
+enum tpm2_permanent_handles {
+ TPM2_RS_PW = 0x40000009,
+};
+
+enum tpm2_capabilities {
+ TPM2_CAP_TPM_PROPERTIES = 6,
+};
+
+enum tpm2_startup_types {
+ TPM2_SU_CLEAR = 0x0000,
+ TPM2_SU_STATE = 0x0001,
+};
+
struct tpm_chip;
struct tpm_vendor_specific {
@@ -73,7 +127,6 @@ struct tpm_vendor_specific {
int region_size;
int have_region;
- struct miscdevice miscdev;
struct list_head list;
int locality;
unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
@@ -88,15 +141,27 @@ struct tpm_vendor_specific {
u16 manufacturer_id;
};
-#define TPM_VPRIV(c) (c)->vendor.priv
+#define TPM_VPRIV(c) ((c)->vendor.priv)
#define TPM_VID_INTEL 0x8086
#define TPM_VID_WINBOND 0x1050
#define TPM_VID_STM 0x104A
+#define TPM_PPI_VERSION_LEN 3
+
+enum tpm_chip_flags {
+ TPM_CHIP_FLAG_REGISTERED = BIT(0),
+ TPM_CHIP_FLAG_PPI = BIT(1),
+ TPM_CHIP_FLAG_TPM2 = BIT(2),
+};
+
struct tpm_chip {
- struct device *dev; /* Device stuff */
+ struct device *pdev; /* Device stuff */
+ struct device dev;
+ struct cdev cdev;
+
const struct tpm_class_ops *ops;
+ unsigned int flags;
int dev_num; /* /dev/tpm# */
char devname[7];
@@ -109,15 +174,19 @@ struct tpm_chip {
struct dentry **bios_dir;
+#ifdef CONFIG_ACPI
+ acpi_handle acpi_dev_handle;
+ char ppi_version[TPM_PPI_VERSION_LEN + 1];
+#endif /* CONFIG_ACPI */
+
struct list_head list;
- void (*release) (struct device *);
};
#define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
static inline void tpm_chip_put(struct tpm_chip *chip)
{
- module_put(chip->dev->driver->owner);
+ module_put(chip->pdev->driver->owner);
}
static inline int tpm_read_index(int base, int index)
@@ -313,40 +382,57 @@ struct tpm_cmd_t {
tpm_cmd_params params;
} __packed;
-ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
+extern struct class *tpm_class;
+extern dev_t tpm_devt;
+extern const struct file_operations tpm_fops;
+ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
size_t bufsiz);
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, int len,
+ const char *desc);
extern int tpm_get_timeouts(struct tpm_chip *);
extern void tpm_gen_interrupt(struct tpm_chip *);
extern int tpm_do_selftest(struct tpm_chip *);
extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32);
-extern struct tpm_chip* tpm_register_hardware(struct device *,
- const struct tpm_class_ops *ops);
-extern void tpm_dev_vendor_release(struct tpm_chip *);
-extern void tpm_remove_hardware(struct device *);
extern int tpm_pm_suspend(struct device *);
extern int tpm_pm_resume(struct device *);
extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
wait_queue_head_t *, bool);
-int tpm_dev_add_device(struct tpm_chip *chip);
-void tpm_dev_del_device(struct tpm_chip *chip);
+struct tpm_chip *tpm_chip_find_get(int chip_num);
+extern struct tpm_chip *tpmm_chip_alloc(struct device *dev,
+ const struct tpm_class_ops *ops);
+extern int tpm_chip_register(struct tpm_chip *chip);
+extern void tpm_chip_unregister(struct tpm_chip *chip);
+
int tpm_sysfs_add_device(struct tpm_chip *chip);
void tpm_sysfs_del_device(struct tpm_chip *chip);
int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
#ifdef CONFIG_ACPI
-extern int tpm_add_ppi(struct kobject *);
-extern void tpm_remove_ppi(struct kobject *);
+extern int tpm_add_ppi(struct tpm_chip *chip);
+extern void tpm_remove_ppi(struct tpm_chip *chip);
#else
-static inline int tpm_add_ppi(struct kobject *parent)
+static inline int tpm_add_ppi(struct tpm_chip *chip)
{
return 0;
}
-static inline void tpm_remove_ppi(struct kobject *parent)
+static inline void tpm_remove_ppi(struct tpm_chip *chip)
{
}
#endif
+
+int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
+int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
+int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
+ u32 *value, const char *desc);
+
+extern int tpm2_startup(struct tpm_chip *chip, u16 startup_type);
+extern int tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
+extern unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *, u32);
+extern int tpm2_do_selftest(struct tpm_chip *chip);
+extern int tpm2_gen_interrupt(struct tpm_chip *chip, bool quiet);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
new file mode 100644
index 000000000000..1abe6502219f
--- /dev/null
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains TPM2 protocol implementations of the commands
+ * used by the kernel internally.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include "tpm.h"
+
+struct tpm2_startup_in {
+ __be16 startup_type;
+} __packed;
+
+struct tpm2_self_test_in {
+ u8 full_test;
+} __packed;
+
+struct tpm2_pcr_read_in {
+ __be32 pcr_selects_cnt;
+ __be16 hash_alg;
+ u8 pcr_select_size;
+ u8 pcr_select[TPM2_PCR_SELECT_MIN];
+} __packed;
+
+struct tpm2_pcr_read_out {
+ __be32 update_cnt;
+ __be32 pcr_selects_cnt;
+ __be16 hash_alg;
+ u8 pcr_select_size;
+ u8 pcr_select[TPM2_PCR_SELECT_MIN];
+ __be32 digests_cnt;
+ __be16 digest_size;
+ u8 digest[TPM_DIGEST_SIZE];
+} __packed;
+
+struct tpm2_null_auth_area {
+ __be32 handle;
+ __be16 nonce_size;
+ u8 attributes;
+ __be16 auth_size;
+} __packed;
+
+struct tpm2_pcr_extend_in {
+ __be32 pcr_idx;
+ __be32 auth_area_size;
+ struct tpm2_null_auth_area auth_area;
+ __be32 digest_cnt;
+ __be16 hash_alg;
+ u8 digest[TPM_DIGEST_SIZE];
+} __packed;
+
+struct tpm2_get_tpm_pt_in {
+ __be32 cap_id;
+ __be32 property_id;
+ __be32 property_cnt;
+} __packed;
+
+struct tpm2_get_tpm_pt_out {
+ u8 more_data;
+ __be32 subcap_id;
+ __be32 property_cnt;
+ __be32 property_id;
+ __be32 value;
+} __packed;
+
+struct tpm2_get_random_in {
+ __be16 size;
+} __packed;
+
+struct tpm2_get_random_out {
+ __be16 size;
+ u8 buffer[TPM_MAX_RNG_DATA];
+} __packed;
+
+union tpm2_cmd_params {
+ struct tpm2_startup_in startup_in;
+ struct tpm2_self_test_in selftest_in;
+ struct tpm2_pcr_read_in pcrread_in;
+ struct tpm2_pcr_read_out pcrread_out;
+ struct tpm2_pcr_extend_in pcrextend_in;
+ struct tpm2_get_tpm_pt_in get_tpm_pt_in;
+ struct tpm2_get_tpm_pt_out get_tpm_pt_out;
+ struct tpm2_get_random_in getrandom_in;
+ struct tpm2_get_random_out getrandom_out;
+};
+
+struct tpm2_cmd {
+ tpm_cmd_header header;
+ union tpm2_cmd_params params;
+} __packed;
+
+/*
+ * Array with one entry per ordinal defining the maximum amount
+ * of time the chip could take to return the result. The values
+ * of the SHORT, MEDIUM, and LONG durations are taken from the
+ * PC Client Profile (PTP) specification.
+ */
+static const u8 tpm2_ordinal_duration[TPM2_CC_LAST - TPM2_CC_FIRST + 1] = {
+ TPM_UNDEFINED, /* 11F */
+ TPM_UNDEFINED, /* 120 */
+ TPM_LONG, /* 121 */
+ TPM_UNDEFINED, /* 122 */
+ TPM_UNDEFINED, /* 123 */
+ TPM_UNDEFINED, /* 124 */
+ TPM_UNDEFINED, /* 125 */
+ TPM_UNDEFINED, /* 126 */
+ TPM_UNDEFINED, /* 127 */
+ TPM_UNDEFINED, /* 128 */
+ TPM_LONG, /* 129 */
+ TPM_UNDEFINED, /* 12a */
+ TPM_UNDEFINED, /* 12b */
+ TPM_UNDEFINED, /* 12c */
+ TPM_UNDEFINED, /* 12d */
+ TPM_UNDEFINED, /* 12e */
+ TPM_UNDEFINED, /* 12f */
+ TPM_UNDEFINED, /* 130 */
+ TPM_UNDEFINED, /* 131 */
+ TPM_UNDEFINED, /* 132 */
+ TPM_UNDEFINED, /* 133 */
+ TPM_UNDEFINED, /* 134 */
+ TPM_UNDEFINED, /* 135 */
+ TPM_UNDEFINED, /* 136 */
+ TPM_UNDEFINED, /* 137 */
+ TPM_UNDEFINED, /* 138 */
+ TPM_UNDEFINED, /* 139 */
+ TPM_UNDEFINED, /* 13a */
+ TPM_UNDEFINED, /* 13b */
+ TPM_UNDEFINED, /* 13c */
+ TPM_UNDEFINED, /* 13d */
+ TPM_MEDIUM, /* 13e */
+ TPM_UNDEFINED, /* 13f */
+ TPM_UNDEFINED, /* 140 */
+ TPM_UNDEFINED, /* 141 */
+ TPM_UNDEFINED, /* 142 */
+ TPM_LONG, /* 143 */
+ TPM_MEDIUM, /* 144 */
+ TPM_UNDEFINED, /* 145 */
+ TPM_UNDEFINED, /* 146 */
+ TPM_UNDEFINED, /* 147 */
+ TPM_UNDEFINED, /* 148 */
+ TPM_UNDEFINED, /* 149 */
+ TPM_UNDEFINED, /* 14a */
+ TPM_UNDEFINED, /* 14b */
+ TPM_UNDEFINED, /* 14c */
+ TPM_UNDEFINED, /* 14d */
+ TPM_LONG, /* 14e */
+ TPM_UNDEFINED, /* 14f */
+ TPM_UNDEFINED, /* 150 */
+ TPM_UNDEFINED, /* 151 */
+ TPM_UNDEFINED, /* 152 */
+ TPM_UNDEFINED, /* 153 */
+ TPM_UNDEFINED, /* 154 */
+ TPM_UNDEFINED, /* 155 */
+ TPM_UNDEFINED, /* 156 */
+ TPM_UNDEFINED, /* 157 */
+ TPM_UNDEFINED, /* 158 */
+ TPM_UNDEFINED, /* 159 */
+ TPM_UNDEFINED, /* 15a */
+ TPM_UNDEFINED, /* 15b */
+ TPM_MEDIUM, /* 15c */
+ TPM_UNDEFINED, /* 15d */
+ TPM_UNDEFINED, /* 15e */
+ TPM_UNDEFINED, /* 15f */
+ TPM_UNDEFINED, /* 160 */
+ TPM_UNDEFINED, /* 161 */
+ TPM_UNDEFINED, /* 162 */
+ TPM_UNDEFINED, /* 163 */
+ TPM_UNDEFINED, /* 164 */
+ TPM_UNDEFINED, /* 165 */
+ TPM_UNDEFINED, /* 166 */
+ TPM_UNDEFINED, /* 167 */
+ TPM_UNDEFINED, /* 168 */
+ TPM_UNDEFINED, /* 169 */
+ TPM_UNDEFINED, /* 16a */
+ TPM_UNDEFINED, /* 16b */
+ TPM_UNDEFINED, /* 16c */
+ TPM_UNDEFINED, /* 16d */
+ TPM_UNDEFINED, /* 16e */
+ TPM_UNDEFINED, /* 16f */
+ TPM_UNDEFINED, /* 170 */
+ TPM_UNDEFINED, /* 171 */
+ TPM_UNDEFINED, /* 172 */
+ TPM_UNDEFINED, /* 173 */
+ TPM_UNDEFINED, /* 174 */
+ TPM_UNDEFINED, /* 175 */
+ TPM_UNDEFINED, /* 176 */
+ TPM_LONG, /* 177 */
+ TPM_UNDEFINED, /* 178 */
+ TPM_UNDEFINED, /* 179 */
+ TPM_MEDIUM, /* 17a */
+ TPM_LONG, /* 17b */
+ TPM_UNDEFINED, /* 17c */
+ TPM_UNDEFINED, /* 17d */
+ TPM_UNDEFINED, /* 17e */
+ TPM_UNDEFINED, /* 17f */
+ TPM_UNDEFINED, /* 180 */
+ TPM_UNDEFINED, /* 181 */
+ TPM_MEDIUM, /* 182 */
+ TPM_UNDEFINED, /* 183 */
+ TPM_UNDEFINED, /* 184 */
+ TPM_MEDIUM, /* 185 */
+ TPM_MEDIUM, /* 186 */
+ TPM_UNDEFINED, /* 187 */
+ TPM_UNDEFINED, /* 188 */
+ TPM_UNDEFINED, /* 189 */
+ TPM_UNDEFINED, /* 18a */
+ TPM_UNDEFINED, /* 18b */
+ TPM_UNDEFINED, /* 18c */
+ TPM_UNDEFINED, /* 18d */
+ TPM_UNDEFINED, /* 18e */
+ TPM_UNDEFINED /* 18f */
+};
+
+#define TPM2_PCR_READ_IN_SIZE \
+ (sizeof(struct tpm_input_header) + \
+ sizeof(struct tpm2_pcr_read_in))
+
+static const struct tpm_input_header tpm2_pcrread_header = {
+ .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
+ .length = cpu_to_be32(TPM2_PCR_READ_IN_SIZE),
+ .ordinal = cpu_to_be32(TPM2_CC_PCR_READ)
+};
+
+/**
+ * tpm2_pcr_read() - read a PCR value
+ * @chip: TPM chip to use.
+ * @pcr_idx: index of the PCR to read.
+ * @ref_buf: buffer to store the resulting hash,
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
+ int rc;
+ struct tpm2_cmd cmd;
+ u8 *buf;
+
+ if (pcr_idx >= TPM2_PLATFORM_PCR)
+ return -EINVAL;
+
+ cmd.header.in = tpm2_pcrread_header;
+ cmd.params.pcrread_in.pcr_selects_cnt = cpu_to_be32(1);
+ cmd.params.pcrread_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1);
+ cmd.params.pcrread_in.pcr_select_size = TPM2_PCR_SELECT_MIN;
+
+ memset(cmd.params.pcrread_in.pcr_select, 0,
+ sizeof(cmd.params.pcrread_in.pcr_select));
+ cmd.params.pcrread_in.pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
+
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ "attempting to read a pcr value");
+ if (rc == 0) {
+ buf = cmd.params.pcrread_out.digest;
+ memcpy(res_buf, buf, TPM_DIGEST_SIZE);
+ }
+
+ return rc;
+}
+
+#define TPM2_GET_PCREXTEND_IN_SIZE \
+ (sizeof(struct tpm_input_header) + \
+ sizeof(struct tpm2_pcr_extend_in))
+
+static const struct tpm_input_header tpm2_pcrextend_header = {
+ .tag = cpu_to_be16(TPM2_ST_SESSIONS),
+ .length = cpu_to_be32(TPM2_GET_PCREXTEND_IN_SIZE),
+ .ordinal = cpu_to_be32(TPM2_CC_PCR_EXTEND)
+};
+
+/**
+ * tpm2_pcr_extend() - extend a PCR value
+ * @chip: TPM chip to use.
+ * @pcr_idx: index of the PCR.
+ * @hash: hash value to use for the extend operation.
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
+{
+ struct tpm2_cmd cmd;
+ int rc;
+
+ cmd.header.in = tpm2_pcrextend_header;
+ cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
+ cmd.params.pcrextend_in.auth_area_size =
+ cpu_to_be32(sizeof(struct tpm2_null_auth_area));
+ cmd.params.pcrextend_in.auth_area.handle =
+ cpu_to_be32(TPM2_RS_PW);
+ cmd.params.pcrextend_in.auth_area.nonce_size = 0;
+ cmd.params.pcrextend_in.auth_area.attributes = 0;
+ cmd.params.pcrextend_in.auth_area.auth_size = 0;
+ cmd.params.pcrextend_in.digest_cnt = cpu_to_be32(1);
+ cmd.params.pcrextend_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1);
+ memcpy(cmd.params.pcrextend_in.digest, hash, TPM_DIGEST_SIZE);
+
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ "attempting extend a PCR value");
+
+ return rc;
+}
+
+#define TPM2_GETRANDOM_IN_SIZE \
+ (sizeof(struct tpm_input_header) + \
+ sizeof(struct tpm2_get_random_in))
+
+static const struct tpm_input_header tpm2_getrandom_header = {
+ .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
+ .length = cpu_to_be32(TPM2_GETRANDOM_IN_SIZE),
+ .ordinal = cpu_to_be32(TPM2_CC_GET_RANDOM)
+};
+
+/**
+ * tpm2_get_random() - get random bytes from the TPM RNG
+ * @chip: TPM chip to use
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
+{
+ struct tpm2_cmd cmd;
+ u32 recd;
+ u32 num_bytes;
+ int err;
+ int total = 0;
+ int retries = 5;
+ u8 *dest = out;
+
+ num_bytes = min_t(u32, max, sizeof(cmd.params.getrandom_out.buffer));
+
+ if (!out || !num_bytes ||
+ max > sizeof(cmd.params.getrandom_out.buffer))
+ return -EINVAL;
+
+ do {
+ cmd.header.in = tpm2_getrandom_header;
+ cmd.params.getrandom_in.size = cpu_to_be16(num_bytes);
+
+ err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ "attempting get random");
+ if (err)
+ break;
+
+ recd = min_t(u32, be16_to_cpu(cmd.params.getrandom_out.size),
+ num_bytes);
+ memcpy(dest, cmd.params.getrandom_out.buffer, recd);
+
+ dest += recd;
+ total += recd;
+ num_bytes -= recd;
+ } while (retries-- && total < max);
+
+ return total ? total : -EIO;
+}
+
+#define TPM2_GET_TPM_PT_IN_SIZE \
+ (sizeof(struct tpm_input_header) + \
+ sizeof(struct tpm2_get_tpm_pt_in))
+
+static const struct tpm_input_header tpm2_get_tpm_pt_header = {
+ .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
+ .length = cpu_to_be32(TPM2_GET_TPM_PT_IN_SIZE),
+ .ordinal = cpu_to_be32(TPM2_CC_GET_CAPABILITY)
+};
+
+/**
+ * tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property
+ * @chip: TPM chip to use.
+ * @property_id: property ID.
+ * @value: output variable.
+ * @desc: passed to tpm_transmit_cmd()
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
+ const char *desc)
+{
+ struct tpm2_cmd cmd;
+ int rc;
+
+ cmd.header.in = tpm2_get_tpm_pt_header;
+ cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES);
+ cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id);
+ cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
+
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), desc);
+ if (!rc)
+ *value = cmd.params.get_tpm_pt_out.value;
+
+ return rc;
+}
+
+#define TPM2_STARTUP_IN_SIZE \
+ (sizeof(struct tpm_input_header) + \
+ sizeof(struct tpm2_startup_in))
+
+static const struct tpm_input_header tpm2_startup_header = {
+ .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
+ .length = cpu_to_be32(TPM2_STARTUP_IN_SIZE),
+ .ordinal = cpu_to_be32(TPM2_CC_STARTUP)
+};
+
+/**
+ * tpm2_startup() - send startup command to the TPM chip
+ * @chip: TPM chip to use.
+ * @startup_type startup type. The value is either
+ * TPM_SU_CLEAR or TPM_SU_STATE.
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+int tpm2_startup(struct tpm_chip *chip, u16 startup_type)
+{
+ struct tpm2_cmd cmd;
+
+ cmd.header.in = tpm2_startup_header;
+
+ cmd.params.startup_in.startup_type = cpu_to_be16(startup_type);
+ return tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ "attempting to start the TPM");
+}
+EXPORT_SYMBOL_GPL(tpm2_startup);
+
+#define TPM2_SHUTDOWN_IN_SIZE \
+ (sizeof(struct tpm_input_header) + \
+ sizeof(struct tpm2_startup_in))
+
+static const struct tpm_input_header tpm2_shutdown_header = {
+ .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
+ .length = cpu_to_be32(TPM2_SHUTDOWN_IN_SIZE),
+ .ordinal = cpu_to_be32(TPM2_CC_SHUTDOWN)
+};
+
+/**
+ * tpm2_shutdown() - send shutdown command to the TPM chip
+ * @chip: TPM chip to use.
+ * @shutdown_type shutdown type. The value is either
+ * TPM_SU_CLEAR or TPM_SU_STATE.
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+int tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
+{
+ struct tpm2_cmd cmd;
+
+ cmd.header.in = tpm2_shutdown_header;
+
+ cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type);
+ return tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ "stopping the TPM");
+}
+EXPORT_SYMBOL_GPL(tpm2_shutdown);
+
+/*
+ * tpm2_calc_ordinal_duration() - maximum duration for a command
+ * @chip: TPM chip to use.
+ * @ordinal: command code number.
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+{
+ int index = TPM_UNDEFINED;
+ int duration = 0;
+
+ if (ordinal >= TPM2_CC_FIRST && ordinal <= TPM2_CC_LAST)
+ index = tpm2_ordinal_duration[ordinal - TPM2_CC_FIRST];
+
+ if (index != TPM_UNDEFINED)
+ duration = chip->vendor.duration[index];
+
+ if (duration <= 0)
+ duration = 2 * 60 * HZ;
+
+ return duration;
+}
+EXPORT_SYMBOL_GPL(tpm2_calc_ordinal_duration);
+
+#define TPM2_SELF_TEST_IN_SIZE \
+ (sizeof(struct tpm_input_header) + \
+ sizeof(struct tpm2_self_test_in))
+
+static const struct tpm_input_header tpm2_selftest_header = {
+ .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
+ .length = cpu_to_be32(TPM2_SELF_TEST_IN_SIZE),
+ .ordinal = cpu_to_be32(TPM2_CC_SELF_TEST)
+};
+
+/**
+ * tpm2_continue_selftest() - start a self test
+ * @chip: TPM chip to use
+ * @full: test all commands instead of testing only those that were not
+ * previously tested.
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
+{
+ int rc;
+ struct tpm2_cmd cmd;
+
+ cmd.header.in = tpm2_selftest_header;
+ cmd.params.selftest_in.full_test = full;
+
+ rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE,
+ "continue selftest");
+
+ /* At least some prototype chips seem to give RC_TESTING error
+ * immediately. This is a workaround for that.
+ */
+ if (rc == TPM2_RC_TESTING) {
+ dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n");
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm2_do_selftest() - run a full self test
+ * @chip: TPM chip to use
+ *
+ * During the self test TPM2 commands return with the error code RC_TESTING.
+ * Waiting is done by issuing PCR read until it executes successfully.
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+int tpm2_do_selftest(struct tpm_chip *chip)
+{
+ int rc;
+ unsigned int loops;
+ unsigned int delay_msec = 100;
+ unsigned long duration;
+ struct tpm2_cmd cmd;
+ int i;
+
+ duration = tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST);
+
+ loops = jiffies_to_msecs(duration) / delay_msec;
+
+ rc = tpm2_start_selftest(chip, true);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < loops; i++) {
+ /* Attempt to read a PCR value */
+ cmd.header.in = tpm2_pcrread_header;
+ cmd.params.pcrread_in.pcr_selects_cnt = cpu_to_be32(1);
+ cmd.params.pcrread_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1);
+ cmd.params.pcrread_in.pcr_select_size = TPM2_PCR_SELECT_MIN;
+ cmd.params.pcrread_in.pcr_select[0] = 0x01;
+ cmd.params.pcrread_in.pcr_select[1] = 0x00;
+ cmd.params.pcrread_in.pcr_select[2] = 0x00;
+
+ rc = tpm_transmit_cmd(chip, (u8 *) &cmd, sizeof(cmd), NULL);
+ if (rc < 0)
+ break;
+
+ rc = be32_to_cpu(cmd.header.out.return_code);
+ if (rc != TPM2_RC_TESTING)
+ break;
+
+ msleep(delay_msec);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm2_do_selftest);
+
+/**
+ * tpm2_gen_interrupt() - generate an interrupt
+ * @chip: TPM chip to use
+ * @quiet: surpress the error message
+ *
+ * 0 is returned when the operation is successful. If a negative number is
+ * returned it remarks a POSIX error code. If a positive number is returned
+ * it remarks a TPM error.
+ */
+int tpm2_gen_interrupt(struct tpm_chip *chip, bool quiet)
+{
+ const char *desc = NULL;
+ u32 dummy;
+
+ if (!quiet)
+ desc = "attempting to generate an interrupt";
+
+ return tpm2_get_tpm_pt(chip, TPM2_CAP_TPM_PROPERTIES, &dummy, desc);
+}
+EXPORT_SYMBOL_GPL(tpm2_gen_interrupt);
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 435c8b9dd2f8..dfadad0916a1 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -49,7 +49,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
for (i = 0; i < 6; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->dev, "error reading header\n");
+ dev_err(chip->pdev, "error reading header\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@@ -60,12 +60,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size = be32_to_cpu(*native_size);
if (count < size) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"Recv size(%d) less than available space\n", size);
for (; i < size; i++) { /* clear the waiting data anyway */
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->dev, "error reading data\n");
+ dev_err(chip->pdev, "error reading data\n");
return -EIO;
}
}
@@ -76,7 +76,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
for (; i < size; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->dev, "error reading data\n");
+ dev_err(chip->pdev, "error reading data\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@@ -86,7 +86,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
status = ioread8(chip->vendor.iobase + 1);
if (status & ATML_STATUS_DATA_AVAIL) {
- dev_err(chip->dev, "data available is stuck\n");
+ dev_err(chip->pdev, "data available is stuck\n");
return -EIO;
}
@@ -97,9 +97,9 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
int i;
- dev_dbg(chip->dev, "tpm_atml_send:\n");
+ dev_dbg(chip->pdev, "tpm_atml_send:\n");
for (i = 0; i < count; i++) {
- dev_dbg(chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
+ dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
iowrite8(buf[i], chip->vendor.iobase);
}
@@ -138,11 +138,11 @@ static void atml_plat_remove(void)
struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
if (chip) {
+ tpm_chip_unregister(chip);
if (chip->vendor.have_region)
atmel_release_region(chip->vendor.base,
chip->vendor.region_size);
atmel_put_base_addr(chip->vendor.iobase);
- tpm_remove_hardware(chip->dev);
platform_device_unregister(pdev);
}
}
@@ -183,8 +183,9 @@ static int __init init_atmel(void)
goto err_rel_reg;
}
- if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_atmel))) {
- rc = -ENODEV;
+ chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
goto err_unreg_dev;
}
@@ -193,6 +194,10 @@ static int __init init_atmel(void)
chip->vendor.have_region = have_region;
chip->vendor.region_size = region_size;
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto err_unreg_dev;
+
return 0;
err_unreg_dev:
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
new file mode 100644
index 000000000000..3dd23cfae4fe
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG CRB 2.0 TPM specification.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/acpi.h>
+#include <linux/highmem.h>
+#include <linux/rculist.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "tpm.h"
+
+#define ACPI_SIG_TPM2 "TPM2"
+
+static const u8 CRB_ACPI_START_UUID[] = {
+ /* 0000 */ 0xAB, 0x6C, 0xBF, 0x6B, 0x63, 0x54, 0x14, 0x47,
+ /* 0008 */ 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4
+};
+
+enum crb_defaults {
+ CRB_ACPI_START_REVISION_ID = 1,
+ CRB_ACPI_START_INDEX = 1,
+};
+
+enum crb_start_method {
+ CRB_SM_ACPI_START = 2,
+ CRB_SM_CRB = 7,
+ CRB_SM_CRB_WITH_ACPI_START = 8,
+};
+
+struct acpi_tpm2 {
+ struct acpi_table_header hdr;
+ u16 platform_class;
+ u16 reserved;
+ u64 control_area_pa;
+ u32 start_method;
+} __packed;
+
+enum crb_ca_request {
+ CRB_CA_REQ_GO_IDLE = BIT(0),
+ CRB_CA_REQ_CMD_READY = BIT(1),
+};
+
+enum crb_ca_status {
+ CRB_CA_STS_ERROR = BIT(0),
+ CRB_CA_STS_TPM_IDLE = BIT(1),
+};
+
+enum crb_start {
+ CRB_START_INVOKE = BIT(0),
+};
+
+enum crb_cancel {
+ CRB_CANCEL_INVOKE = BIT(0),
+};
+
+struct crb_control_area {
+ u32 req;
+ u32 sts;
+ u32 cancel;
+ u32 start;
+ u32 int_enable;
+ u32 int_sts;
+ u32 cmd_size;
+ u64 cmd_pa;
+ u32 rsp_size;
+ u64 rsp_pa;
+} __packed;
+
+enum crb_status {
+ CRB_STS_COMPLETE = BIT(0),
+};
+
+enum crb_flags {
+ CRB_FL_ACPI_START = BIT(0),
+ CRB_FL_CRB_START = BIT(1),
+};
+
+struct crb_priv {
+ unsigned int flags;
+ struct crb_control_area __iomem *cca;
+ u8 __iomem *cmd;
+ u8 __iomem *rsp;
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int crb_resume(struct device *dev)
+{
+ int rc;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ rc = tpm2_shutdown(chip, TPM2_SU_STATE);
+ if (!rc)
+ rc = tpm2_do_selftest(chip);
+
+ return rc;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, crb_resume);
+
+static u8 crb_status(struct tpm_chip *chip)
+{
+ struct crb_priv *priv = chip->vendor.priv;
+ u8 sts = 0;
+
+ if ((le32_to_cpu(ioread32(&priv->cca->start)) & CRB_START_INVOKE) !=
+ CRB_START_INVOKE)
+ sts |= CRB_STS_COMPLETE;
+
+ return sts;
+}
+
+static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct crb_priv *priv = chip->vendor.priv;
+ unsigned int expected;
+
+ /* sanity check */
+ if (count < 6)
+ return -EIO;
+
+ if (le32_to_cpu(ioread32(&priv->cca->sts)) & CRB_CA_STS_ERROR)
+ return -EIO;
+
+ memcpy_fromio(buf, priv->rsp, 6);
+ expected = be32_to_cpup((__be32 *) &buf[2]);
+
+ if (expected > count)
+ return -EIO;
+
+ memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
+
+ return expected;
+}
+
+static int crb_do_acpi_start(struct tpm_chip *chip)
+{
+ union acpi_object *obj;
+ int rc;
+
+ obj = acpi_evaluate_dsm(chip->acpi_dev_handle,
+ CRB_ACPI_START_UUID,
+ CRB_ACPI_START_REVISION_ID,
+ CRB_ACPI_START_INDEX,
+ NULL);
+ if (!obj)
+ return -ENXIO;
+ rc = obj->integer.value == 0 ? 0 : -ENXIO;
+ ACPI_FREE(obj);
+ return rc;
+}
+
+static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct crb_priv *priv = chip->vendor.priv;
+ int rc = 0;
+
+ if (len > le32_to_cpu(ioread32(&priv->cca->cmd_size))) {
+ dev_err(&chip->dev,
+ "invalid command count value %x %zx\n",
+ (unsigned int) len,
+ (size_t) le32_to_cpu(ioread32(&priv->cca->cmd_size)));
+ return -E2BIG;
+ }
+
+ memcpy_toio(priv->cmd, buf, len);
+
+ /* Make sure that cmd is populated before issuing start. */
+ wmb();
+
+ if (priv->flags & CRB_FL_CRB_START)
+ iowrite32(cpu_to_le32(CRB_START_INVOKE), &priv->cca->start);
+
+ if (priv->flags & CRB_FL_ACPI_START)
+ rc = crb_do_acpi_start(chip);
+
+ return rc;
+}
+
+static void crb_cancel(struct tpm_chip *chip)
+{
+ struct crb_priv *priv = chip->vendor.priv;
+
+ iowrite32(cpu_to_le32(CRB_CANCEL_INVOKE), &priv->cca->cancel);
+
+ /* Make sure that cmd is populated before issuing cancel. */
+ wmb();
+
+ if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip))
+ dev_err(&chip->dev, "ACPI Start failed\n");
+
+ iowrite32(0, &priv->cca->cancel);
+}
+
+static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct crb_priv *priv = chip->vendor.priv;
+ u32 cancel = le32_to_cpu(ioread32(&priv->cca->cancel));
+
+ return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+}
+
+static const struct tpm_class_ops tpm_crb = {
+ .status = crb_status,
+ .recv = crb_recv,
+ .send = crb_send,
+ .cancel = crb_cancel,
+ .req_canceled = crb_req_canceled,
+ .req_complete_mask = CRB_STS_COMPLETE,
+ .req_complete_val = CRB_STS_COMPLETE,
+};
+
+static int crb_acpi_add(struct acpi_device *device)
+{
+ struct tpm_chip *chip;
+ struct acpi_tpm2 *buf;
+ struct crb_priv *priv;
+ struct device *dev = &device->dev;
+ acpi_status status;
+ u32 sm;
+ u64 pa;
+ int rc;
+
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ status = acpi_get_table(ACPI_SIG_TPM2, 1,
+ (struct acpi_table_header **) &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to get TPM2 ACPI table\n");
+ return -ENODEV;
+ }
+
+ if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
+ dev_err(dev, "TPM2 ACPI table has wrong size");
+ return -EINVAL;
+ }
+
+ priv = (struct crb_priv *) devm_kzalloc(dev, sizeof(struct crb_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "failed to devm_kzalloc for private data\n");
+ return -ENOMEM;
+ }
+
+ sm = le32_to_cpu(buf->start_method);
+
+ /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+ * report only ACPI start but in practice seems to require both
+ * ACPI start and CRB start.
+ */
+ if (sm == CRB_SM_CRB || sm == CRB_SM_CRB_WITH_ACPI_START ||
+ !strcmp(acpi_device_hid(device), "MSFT0101"))
+ priv->flags |= CRB_FL_CRB_START;
+
+ if (sm == CRB_SM_ACPI_START || sm == CRB_SM_CRB_WITH_ACPI_START)
+ priv->flags |= CRB_FL_ACPI_START;
+
+ priv->cca = (struct crb_control_area __iomem *)
+ devm_ioremap_nocache(dev, buf->control_area_pa, 0x1000);
+ if (!priv->cca) {
+ dev_err(dev, "ioremap of the control area failed\n");
+ return -ENOMEM;
+ }
+
+ memcpy_fromio(&pa, &priv->cca->cmd_pa, 8);
+ pa = le64_to_cpu(pa);
+ priv->cmd = devm_ioremap_nocache(dev, le64_to_cpu(pa),
+ ioread32(&priv->cca->cmd_size));
+ if (!priv->cmd) {
+ dev_err(dev, "ioremap of the command buffer failed\n");
+ return -ENOMEM;
+ }
+
+ memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
+ pa = le64_to_cpu(pa);
+ priv->rsp = devm_ioremap_nocache(dev, le64_to_cpu(pa),
+ ioread32(&priv->cca->rsp_size));
+ if (!priv->rsp) {
+ dev_err(dev, "ioremap of the response buffer failed\n");
+ return -ENOMEM;
+ }
+
+ chip->vendor.priv = priv;
+
+ /* Default timeouts and durations */
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
+ chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
+ chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
+ chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
+ chip->vendor.duration[TPM_SHORT] =
+ msecs_to_jiffies(TPM2_DURATION_SHORT);
+ chip->vendor.duration[TPM_MEDIUM] =
+ msecs_to_jiffies(TPM2_DURATION_MEDIUM);
+ chip->vendor.duration[TPM_LONG] =
+ msecs_to_jiffies(TPM2_DURATION_LONG);
+
+ chip->acpi_dev_handle = device->handle;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc)
+ return rc;
+
+ return tpm_chip_register(chip);
+}
+
+static int crb_acpi_remove(struct acpi_device *device)
+{
+ struct device *dev = &device->dev;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+ return 0;
+}
+
+static struct acpi_device_id crb_device_ids[] = {
+ {"MSFT0101", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, crb_device_ids);
+
+static struct acpi_driver crb_acpi_driver = {
+ .name = "tpm_crb",
+ .ids = crb_device_ids,
+ .ops = {
+ .add = crb_acpi_add,
+ .remove = crb_acpi_remove,
+ },
+ .drv = {
+ .pm = &crb_pm,
+ },
+};
+
+module_acpi_driver(crb_acpi_driver);
+MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>");
+MODULE_DESCRIPTION("TPM2 Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 77272925dee6..7a0ca78ad3c6 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -52,7 +52,7 @@ struct priv_data {
static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->pdev);
s32 status;
priv->len = 0;
@@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
status = i2c_master_send(client, buf, len);
- dev_dbg(chip->dev,
+ dev_dbg(chip->pdev,
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
(int)min_t(size_t, 64, len), buf, len, status);
return status;
@@ -71,7 +71,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->pdev);
struct tpm_output_header *hdr =
(struct tpm_output_header *)priv->buffer;
u32 expected_len;
@@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return -ENOMEM;
if (priv->len >= expected_len) {
- dev_dbg(chip->dev,
+ dev_dbg(chip->pdev,
"%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
rc = i2c_master_recv(client, buf, expected_len);
- dev_dbg(chip->dev,
+ dev_dbg(chip->pdev,
"%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static void i2c_atmel_cancel(struct tpm_chip *chip)
{
- dev_err(chip->dev, "TPM operation cancellation was requested, but is not supported");
+ dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported");
}
static u8 i2c_atmel_read_status(struct tpm_chip *chip)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->pdev);
int rc;
/* The TPM fails the I2C read until it is ready, so we do the entire
@@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip)
/* Once the TPM has completed the command the command remains readable
* until another command is issued. */
rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
- dev_dbg(chip->dev,
+ dev_dbg(chip->pdev,
"%s: sts=%d", __func__, rc);
if (rc <= 0)
return 0;
@@ -153,21 +153,20 @@ static const struct tpm_class_ops i2c_atmel = {
static int i2c_atmel_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int rc;
struct tpm_chip *chip;
struct device *dev = &client->dev;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- chip = tpm_register_hardware(dev, &i2c_atmel);
- if (!chip) {
- dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
- return -ENODEV;
- }
+ chip = tpmm_chip_alloc(dev, &i2c_atmel);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
GFP_KERNEL);
+ if (!chip->vendor.priv)
+ return -ENOMEM;
/* Default timeouts */
chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
@@ -179,33 +178,20 @@ static int i2c_atmel_probe(struct i2c_client *client,
/* There is no known way to probe for this device, and all version
* information seems to be read via TPM commands. Thus we rely on the
* TPM startup process in the common code to detect the device. */
- if (tpm_get_timeouts(chip)) {
- rc = -ENODEV;
- goto out_err;
- }
-
- if (tpm_do_selftest(chip)) {
- rc = -ENODEV;
- goto out_err;
- }
+ if (tpm_get_timeouts(chip))
+ return -ENODEV;
- return 0;
+ if (tpm_do_selftest(chip))
+ return -ENODEV;
-out_err:
- tpm_dev_vendor_release(chip);
- tpm_remove_hardware(chip->dev);
- return rc;
+ return tpm_chip_register(chip);
}
static int i2c_atmel_remove(struct i2c_client *client)
{
struct device *dev = &(client->dev);
struct tpm_chip *chip = dev_get_drvdata(dev);
-
- if (chip)
- tpm_dev_vendor_release(chip);
- tpm_remove_hardware(dev);
- kfree(chip);
+ tpm_chip_unregister(chip);
return 0;
}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 472af4bb1b61..33c5f360ab01 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -446,7 +446,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
size = recv_data(chip, buf, TPM_HEADER_SIZE);
if (size < TPM_HEADER_SIZE) {
- dev_err(chip->dev, "Unable to read header\n");
+ dev_err(chip->pdev, "Unable to read header\n");
goto out;
}
@@ -459,14 +459,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
if (size < expected) {
- dev_err(chip->dev, "Unable to read remainder of result\n");
+ dev_err(chip->pdev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
- dev_err(chip->dev, "Error left over data\n");
+ dev_err(chip->pdev, "Error left over data\n");
size = -EIO;
goto out;
}
@@ -581,12 +581,9 @@ static int tpm_tis_i2c_init(struct device *dev)
int rc = 0;
struct tpm_chip *chip;
- chip = tpm_register_hardware(dev, &tpm_tis_i2c);
- if (!chip) {
- dev_err(dev, "could not register hardware\n");
- rc = -ENODEV;
- goto out_err;
- }
+ chip = tpmm_chip_alloc(dev, &tpm_tis_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
/* Disable interrupts */
chip->vendor.irq = 0;
@@ -600,7 +597,7 @@ static int tpm_tis_i2c_init(struct device *dev)
if (request_locality(chip, 0) != 0) {
dev_err(dev, "could not request locality\n");
rc = -ENODEV;
- goto out_vendor;
+ goto out_err;
}
/* read four bytes from DID_VID register */
@@ -628,21 +625,9 @@ static int tpm_tis_i2c_init(struct device *dev)
tpm_get_timeouts(chip);
tpm_do_selftest(chip);
- return 0;
-
+ return tpm_chip_register(chip);
out_release:
release_locality(chip, chip->vendor.locality, 1);
-
-out_vendor:
- /* close file handles */
- tpm_dev_vendor_release(chip);
-
- /* remove hardware */
- tpm_remove_hardware(chip->dev);
-
- /* reset these pointers, otherwise we oops */
- chip->dev->release = NULL;
- chip->release = NULL;
tpm_dev.client = NULL;
out_err:
return rc;
@@ -712,17 +697,9 @@ static int tpm_tis_i2c_probe(struct i2c_client *client,
static int tpm_tis_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = tpm_dev.chip;
- release_locality(chip, chip->vendor.locality, 1);
- /* close file handles */
- tpm_dev_vendor_release(chip);
-
- /* remove hardware */
- tpm_remove_hardware(chip->dev);
-
- /* reset these pointers, otherwise we oops */
- chip->dev->release = NULL;
- chip->release = NULL;
+ tpm_chip_unregister(chip);
+ release_locality(chip, chip->vendor.locality, 1);
tpm_dev.client = NULL;
return 0;
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 7b158efd49f7..9d42b7d78e50 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -96,13 +96,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
/* read TPM_STS register */
static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
{
- struct i2c_client *client = to_i2c_client(chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->pdev);
s32 status;
u8 data;
status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
if (status <= 0) {
- dev_err(chip->dev, "%s() error return %d\n", __func__,
+ dev_err(chip->pdev, "%s() error return %d\n", __func__,
status);
data = TPM_STS_ERR_VAL;
}
@@ -127,13 +127,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
/* write commandReady to TPM_STS register */
static void i2c_nuvoton_ready(struct tpm_chip *chip)
{
- struct i2c_client *client = to_i2c_client(chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->pdev);
s32 status;
/* this causes the current command to be aborted */
status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
if (status < 0)
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"%s() fail to write TPM_STS.commandReady\n", __func__);
}
@@ -212,7 +212,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
return 0;
} while (time_before(jiffies, stop));
}
- dev_err(chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
value);
return -ETIMEDOUT;
}
@@ -240,7 +240,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
&chip->vendor.read_queue) == 0) {
burst_count = i2c_nuvoton_get_burstcount(client, chip);
if (burst_count < 0) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"%s() fail to read burstCount=%d\n", __func__,
burst_count);
return -EIO;
@@ -249,12 +249,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
bytes2read, &buf[size]);
if (rc < 0) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"%s() fail on i2c_nuvoton_read_buf()=%d\n",
__func__, rc);
return -EIO;
}
- dev_dbg(chip->dev, "%s(%d):", __func__, bytes2read);
+ dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read);
size += bytes2read;
}
@@ -264,7 +264,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
/* Read TPM command results */
static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct device *dev = chip->dev;
+ struct device *dev = chip->pdev;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
int expected, status, burst_count, retries, size = 0;
@@ -334,7 +334,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
break;
}
i2c_nuvoton_ready(chip);
- dev_dbg(chip->dev, "%s() -> %d\n", __func__, size);
+ dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size);
return size;
}
@@ -347,7 +347,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
*/
static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
- struct device *dev = chip->dev;
+ struct device *dev = chip->pdev;
struct i2c_client *client = to_i2c_client(dev);
u32 ordinal;
size_t count = 0;
@@ -530,14 +530,15 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid,
(u8) (vid >> 16), (u8) (vid >> 24));
- chip = tpm_register_hardware(dev, &tpm_i2c);
- if (!chip) {
- dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
- return -ENODEV;
- }
+ chip = tpmm_chip_alloc(dev, &tpm_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
GFP_KERNEL);
+ if (!chip->vendor.priv)
+ return -ENOMEM;
+
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
@@ -559,7 +560,7 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
rc = devm_request_irq(dev, chip->vendor.irq,
i2c_nuvoton_int_handler,
IRQF_TRIGGER_LOW,
- chip->vendor.miscdev.name,
+ chip->devname,
chip);
if (rc) {
dev_err(dev, "%s() Unable to request irq: %d for use\n",
@@ -584,7 +585,7 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
TPM_DATA_FIFO_W,
1, (u8 *) (&rc));
if (rc < 0)
- goto out_err;
+ return rc;
/* TPM_STS <- 0x40 (commandReady) */
i2c_nuvoton_ready(chip);
} else {
@@ -594,45 +595,29 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
* only TPM_STS_VALID should be set
*/
if (i2c_nuvoton_read_status(chip) !=
- TPM_STS_VALID) {
- rc = -EIO;
- goto out_err;
- }
+ TPM_STS_VALID)
+ return -EIO;
}
}
}
- if (tpm_get_timeouts(chip)) {
- rc = -ENODEV;
- goto out_err;
- }
-
- if (tpm_do_selftest(chip)) {
- rc = -ENODEV;
- goto out_err;
- }
+ if (tpm_get_timeouts(chip))
+ return -ENODEV;
- return 0;
+ if (tpm_do_selftest(chip))
+ return -ENODEV;
-out_err:
- tpm_dev_vendor_release(chip);
- tpm_remove_hardware(chip->dev);
- return rc;
+ return tpm_chip_register(chip);
}
static int i2c_nuvoton_remove(struct i2c_client *client)
{
struct device *dev = &(client->dev);
struct tpm_chip *chip = dev_get_drvdata(dev);
-
- if (chip)
- tpm_dev_vendor_release(chip);
- tpm_remove_hardware(dev);
- kfree(chip);
+ tpm_chip_unregister(chip);
return 0;
}
-
static const struct i2c_device_id i2c_nuvoton_id[] = {
{I2C_DRIVER_NAME, 0},
{}
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 4669e3713428..612845b36c29 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -1,6 +1,6 @@
/*
* STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
- * Copyright (C) 2009, 2010 STMicroelectronics
+ * Copyright (C) 2009, 2010, 2014 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,11 +12,10 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
- * STMicroelectronics version 1.2.0, Copyright (C) 2010
+ * STMicroelectronics version 1.2.1, Copyright (C) 2014
* STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
* This is free software, and you are welcome to redistribute it
* under certain conditions.
@@ -27,7 +26,7 @@
*
* @Synopsis:
* 09/15/2010: First shot driver tpm_tis driver for
- lpc is used as model.
+ * lpc is used as model.
*/
#include <linux/pci.h>
@@ -39,18 +38,38 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/wait.h>
+#include <linux/freezer.h>
#include <linux/string.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/sysfs.h>
#include <linux/gpio.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_data/tpm_stm_st33.h>
#include "tpm.h"
-#include "tpm_i2c_stm_st33.h"
+
+#define TPM_ACCESS 0x0
+#define TPM_STS 0x18
+#define TPM_HASH_END 0x20
+#define TPM_DATA_FIFO 0x24
+#define TPM_HASH_DATA 0x24
+#define TPM_HASH_START 0x28
+#define TPM_INTF_CAPABILITY 0x14
+#define TPM_INT_STATUS 0x10
+#define TPM_INT_ENABLE 0x08
+
+#define TPM_DUMMY_BYTE 0xAA
+#define TPM_WRITE_DIRECTION 0x80
+#define TPM_HEADER_SIZE 10
+#define TPM_BUFSIZE 2048
+
+#define LOCALITY0 0
+
enum stm33zp24_access {
TPM_ACCESS_VALID = 0x80,
@@ -82,6 +101,14 @@ enum tis_defaults {
TIS_LONG_TIMEOUT = 2000,
};
+struct tpm_stm_dev {
+ struct i2c_client *client;
+ struct tpm_chip *chip;
+ u8 buf[TPM_BUFSIZE + 1];
+ u32 intrs;
+ int io_lpcpd;
+};
+
/*
* write8_reg
* Send byte to the TIS register according to the ST33ZP24 I2C protocol.
@@ -90,17 +117,12 @@ enum tis_defaults {
* @param: tpm_size, The length of the data
* @return: Returns negative errno, or else the number of bytes written.
*/
-static int write8_reg(struct i2c_client *client, u8 tpm_register,
+static int write8_reg(struct tpm_stm_dev *tpm_dev, u8 tpm_register,
u8 *tpm_data, u16 tpm_size)
{
- struct st33zp24_platform_data *pin_infos;
-
- pin_infos = client->dev.platform_data;
-
- pin_infos->tpm_i2c_buffer[0][0] = tpm_register;
- memcpy(&pin_infos->tpm_i2c_buffer[0][1], tpm_data, tpm_size);
- return i2c_master_send(client, pin_infos->tpm_i2c_buffer[0],
- tpm_size + 1);
+ tpm_dev->buf[0] = tpm_register;
+ memcpy(tpm_dev->buf + 1, tpm_data, tpm_size);
+ return i2c_master_send(tpm_dev->client, tpm_dev->buf, tpm_size + 1);
} /* write8_reg() */
/*
@@ -111,101 +133,58 @@ static int write8_reg(struct i2c_client *client, u8 tpm_register,
* @param: tpm_size, tpm TPM response size to read.
* @return: number of byte read successfully: should be one if success.
*/
-static int read8_reg(struct i2c_client *client, u8 tpm_register,
+static int read8_reg(struct tpm_stm_dev *tpm_dev, u8 tpm_register,
u8 *tpm_data, int tpm_size)
{
u8 status = 0;
u8 data;
data = TPM_DUMMY_BYTE;
- status = write8_reg(client, tpm_register, &data, 1);
+ status = write8_reg(tpm_dev, tpm_register, &data, 1);
if (status == 2)
- status = i2c_master_recv(client, tpm_data, tpm_size);
+ status = i2c_master_recv(tpm_dev->client, tpm_data, tpm_size);
return status;
} /* read8_reg() */
/*
* I2C_WRITE_DATA
* Send byte to the TIS register according to the ST33ZP24 I2C protocol.
- * @param: client, the chip description
+ * @param: tpm_dev, the chip description
* @param: tpm_register, the tpm tis register where the data should be written
* @param: tpm_data, the tpm_data to write inside the tpm_register
* @param: tpm_size, The length of the data
* @return: number of byte written successfully: should be one if success.
*/
-#define I2C_WRITE_DATA(client, tpm_register, tpm_data, tpm_size) \
- (write8_reg(client, tpm_register | \
+#define I2C_WRITE_DATA(tpm_dev, tpm_register, tpm_data, tpm_size) \
+ (write8_reg(tpm_dev, tpm_register | \
TPM_WRITE_DIRECTION, tpm_data, tpm_size))
/*
* I2C_READ_DATA
* Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
- * @param: tpm, the chip description
+ * @param: tpm_dev, the chip description
* @param: tpm_register, the tpm tis register where the data should be read
* @param: tpm_data, the TPM response
* @param: tpm_size, tpm TPM response size to read.
* @return: number of byte read successfully: should be one if success.
*/
-#define I2C_READ_DATA(client, tpm_register, tpm_data, tpm_size) \
- (read8_reg(client, tpm_register, tpm_data, tpm_size))
+#define I2C_READ_DATA(tpm_dev, tpm_register, tpm_data, tpm_size) \
+ (read8_reg(tpm_dev, tpm_register, tpm_data, tpm_size))
/*
* clear_interruption
* clear the TPM interrupt register.
* @param: tpm, the chip description
+ * @return: the TPM_INT_STATUS value
*/
-static void clear_interruption(struct i2c_client *client)
+static u8 clear_interruption(struct tpm_stm_dev *tpm_dev)
{
u8 interrupt;
- I2C_READ_DATA(client, TPM_INT_STATUS, &interrupt, 1);
- I2C_WRITE_DATA(client, TPM_INT_STATUS, &interrupt, 1);
- I2C_READ_DATA(client, TPM_INT_STATUS, &interrupt, 1);
-} /* clear_interruption() */
-
-/*
- * _wait_for_interrupt_serirq_timeout
- * @param: tpm, the chip description
- * @param: timeout, the timeout of the interrupt
- * @return: the status of the interruption.
- */
-static long _wait_for_interrupt_serirq_timeout(struct tpm_chip *chip,
- unsigned long timeout)
-{
- long status;
- struct i2c_client *client;
- struct st33zp24_platform_data *pin_infos;
-
- client = (struct i2c_client *)TPM_VPRIV(chip);
- pin_infos = client->dev.platform_data;
-
- status = wait_for_completion_interruptible_timeout(
- &pin_infos->irq_detection,
- timeout);
- if (status > 0)
- enable_irq(gpio_to_irq(pin_infos->io_serirq));
- gpio_direction_input(pin_infos->io_serirq);
-
- return status;
-} /* wait_for_interrupt_serirq_timeout() */
-
-static int wait_for_serirq_timeout(struct tpm_chip *chip, bool condition,
- unsigned long timeout)
-{
- int status = 2;
- struct i2c_client *client;
-
- client = (struct i2c_client *)TPM_VPRIV(chip);
- status = _wait_for_interrupt_serirq_timeout(chip, timeout);
- if (!status) {
- status = -EBUSY;
- } else {
- clear_interruption(client);
- if (condition)
- status = 1;
- }
- return status;
-}
+ I2C_READ_DATA(tpm_dev, TPM_INT_STATUS, &interrupt, 1);
+ I2C_WRITE_DATA(tpm_dev, TPM_INT_STATUS, &interrupt, 1);
+ return interrupt;
+} /* clear_interruption() */
/*
* tpm_stm_i2c_cancel, cancel is not implemented.
@@ -213,16 +192,14 @@ static int wait_for_serirq_timeout(struct tpm_chip *chip, bool condition,
*/
static void tpm_stm_i2c_cancel(struct tpm_chip *chip)
{
- struct i2c_client *client;
+ struct tpm_stm_dev *tpm_dev;
u8 data;
- client = (struct i2c_client *)TPM_VPRIV(chip);
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
data = TPM_STS_COMMAND_READY;
- I2C_WRITE_DATA(client, TPM_STS, &data, 1);
- if (chip->vendor.irq)
- wait_for_serirq_timeout(chip, 1, chip->vendor.timeout_a);
-} /* tpm_stm_i2c_cancel() */
+ I2C_WRITE_DATA(tpm_dev, TPM_STS, &data, 1);
+} /* tpm_stm_i2c_cancel() */
/*
* tpm_stm_spi_status return the TPM_STS register
@@ -231,13 +208,14 @@ static void tpm_stm_i2c_cancel(struct tpm_chip *chip)
*/
static u8 tpm_stm_i2c_status(struct tpm_chip *chip)
{
- struct i2c_client *client;
+ struct tpm_stm_dev *tpm_dev;
u8 data;
- client = (struct i2c_client *)TPM_VPRIV(chip);
- I2C_READ_DATA(client, TPM_STS, &data, 1);
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
+
+ I2C_READ_DATA(tpm_dev, TPM_STS, &data, 1);
return data;
-} /* tpm_stm_i2c_status() */
+} /* tpm_stm_i2c_status() */
/*
@@ -247,20 +225,19 @@ static u8 tpm_stm_i2c_status(struct tpm_chip *chip)
*/
static int check_locality(struct tpm_chip *chip)
{
- struct i2c_client *client;
+ struct tpm_stm_dev *tpm_dev;
u8 data;
u8 status;
- client = (struct i2c_client *)TPM_VPRIV(chip);
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
- status = I2C_READ_DATA(client, TPM_ACCESS, &data, 1);
+ status = I2C_READ_DATA(tpm_dev, TPM_ACCESS, &data, 1);
if (status && (data &
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
return chip->vendor.locality;
return -EACCES;
-
} /* check_locality() */
/*
@@ -271,37 +248,31 @@ static int check_locality(struct tpm_chip *chip)
static int request_locality(struct tpm_chip *chip)
{
unsigned long stop;
- long rc;
- struct i2c_client *client;
+ long ret;
+ struct tpm_stm_dev *tpm_dev;
u8 data;
- client = (struct i2c_client *)TPM_VPRIV(chip);
-
if (check_locality(chip) == chip->vendor.locality)
return chip->vendor.locality;
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
+
data = TPM_ACCESS_REQUEST_USE;
- rc = I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1);
- if (rc < 0)
+ ret = I2C_WRITE_DATA(tpm_dev, TPM_ACCESS, &data, 1);
+ if (ret < 0)
goto end;
- if (chip->vendor.irq) {
- rc = wait_for_serirq_timeout(chip, (check_locality
- (chip) >= 0),
- chip->vendor.timeout_a);
- if (rc > 0)
+ stop = jiffies + chip->vendor.timeout_a;
+
+ /* Request locality is usually effective after the request */
+ do {
+ if (check_locality(chip) >= 0)
return chip->vendor.locality;
- } else {
- stop = jiffies + chip->vendor.timeout_a;
- do {
- if (check_locality(chip) >= 0)
- return chip->vendor.locality;
- msleep(TPM_TIMEOUT);
- } while (time_before(jiffies, stop));
- }
- rc = -EACCES;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ ret = -EACCES;
end:
- return rc;
+ return ret;
} /* request_locality() */
/*
@@ -310,13 +281,13 @@ end:
*/
static void release_locality(struct tpm_chip *chip)
{
- struct i2c_client *client;
+ struct tpm_stm_dev *tpm_dev;
u8 data;
- client = (struct i2c_client *)TPM_VPRIV(chip);
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
data = TPM_ACCESS_ACTIVE_LOCALITY;
- I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1);
+ I2C_WRITE_DATA(tpm_dev, TPM_ACCESS, &data, 1);
}
/*
@@ -329,19 +300,20 @@ static int get_burstcount(struct tpm_chip *chip)
unsigned long stop;
int burstcnt, status;
u8 tpm_reg, temp;
+ struct tpm_stm_dev *tpm_dev;
- struct i2c_client *client = (struct i2c_client *)TPM_VPRIV(chip);
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
stop = jiffies + chip->vendor.timeout_d;
do {
tpm_reg = TPM_STS + 1;
- status = I2C_READ_DATA(client, tpm_reg, &temp, 1);
+ status = I2C_READ_DATA(tpm_dev, tpm_reg, &temp, 1);
if (status < 0)
goto end;
tpm_reg = tpm_reg + 1;
burstcnt = temp;
- status = I2C_READ_DATA(client, tpm_reg, &temp, 1);
+ status = I2C_READ_DATA(tpm_dev, tpm_reg, &temp, 1);
if (status < 0)
goto end;
@@ -355,36 +327,107 @@ end:
return -EBUSY;
} /* get_burstcount() */
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * interrupt_to_status
+ * @param: irq_mask, the irq mask value to wait
+ * @return: the corresponding tpm_sts value
+ */
+static u8 interrupt_to_status(u8 irq_mask)
+{
+ u8 status = 0;
+
+ if ((irq_mask & TPM_INTF_STS_VALID_INT) == TPM_INTF_STS_VALID_INT)
+ status |= TPM_STS_VALID;
+ if ((irq_mask & TPM_INTF_DATA_AVAIL_INT) == TPM_INTF_DATA_AVAIL_INT)
+ status |= TPM_STS_DATA_AVAIL;
+ if ((irq_mask & TPM_INTF_CMD_READY_INT) == TPM_INTF_CMD_READY_INT)
+ status |= TPM_STS_COMMAND_READY;
+
+ return status;
+} /* status_to_interrupt() */
+
/*
* wait_for_stat wait for a TPM_STS value
* @param: chip, the tpm chip description
* @param: mask, the value mask to wait
* @param: timeout, the timeout
* @param: queue, the wait queue.
+ * @param: check_cancel, does the command can be cancelled ?
* @return: the tpm status, 0 if success, -ETIME if timeout is reached.
*/
static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue)
+ wait_queue_head_t *queue, bool check_cancel)
{
unsigned long stop;
- long rc;
- u8 status;
+ int ret;
+ bool canceled = false;
+ bool condition;
+ u32 cur_intrs;
+ u8 interrupt, status;
+ struct tpm_stm_dev *tpm_dev;
- if (chip->vendor.irq) {
- rc = wait_for_serirq_timeout(chip, ((tpm_stm_i2c_status
- (chip) & mask) ==
- mask), timeout);
- if (rc > 0)
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
+
+ /* check current status */
+ status = tpm_stm_i2c_status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->vendor.irq) {
+ cur_intrs = tpm_dev->intrs;
+ interrupt = clear_interruption(tpm_dev);
+ enable_irq(chip->vendor.irq);
+
+again:
+ timeout = stop - jiffies;
+ if ((long) timeout <= 0)
+ return -1;
+
+ ret = wait_event_interruptible_timeout(*queue,
+ cur_intrs != tpm_dev->intrs, timeout);
+
+ interrupt |= clear_interruption(tpm_dev);
+ status = interrupt_to_status(interrupt);
+ condition = wait_for_tpm_stat_cond(chip, mask,
+ check_cancel, &canceled);
+
+ if (ret >= 0 && condition) {
+ if (canceled)
+ return -ECANCELED;
return 0;
+ }
+ if (ret == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ disable_irq_nosync(chip->vendor.irq);
+
} else {
- stop = jiffies + timeout;
do {
msleep(TPM_TIMEOUT);
- status = tpm_stm_i2c_status(chip);
+ status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
} while (time_before(jiffies, stop));
}
+
return -ETIME;
} /* wait_for_stat() */
@@ -397,22 +440,24 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
*/
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
- int size = 0, burstcnt, len;
- struct i2c_client *client;
+ int size = 0, burstcnt, len, ret;
+ struct tpm_stm_dev *tpm_dev;
- client = (struct i2c_client *)TPM_VPRIV(chip);
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
while (size < count &&
wait_for_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->vendor.timeout_c,
- &chip->vendor.read_queue)
- == 0) {
+ &chip->vendor.read_queue, true) == 0) {
burstcnt = get_burstcount(chip);
if (burstcnt < 0)
return burstcnt;
len = min_t(int, burstcnt, count - size);
- I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
+ ret = I2C_READ_DATA(tpm_dev, TPM_DATA_FIFO, buf + size, len);
+ if (ret < 0)
+ return ret;
+
size += len;
}
return size;
@@ -427,15 +472,14 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
{
struct tpm_chip *chip = dev_id;
- struct i2c_client *client;
- struct st33zp24_platform_data *pin_infos;
+ struct tpm_stm_dev *tpm_dev;
- disable_irq_nosync(irq);
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
- client = (struct i2c_client *)TPM_VPRIV(chip);
- pin_infos = client->dev.platform_data;
+ tpm_dev->intrs++;
+ wake_up_interruptible(&chip->vendor.read_queue);
+ disable_irq_nosync(chip->vendor.irq);
- complete(&pin_infos->irq_detection);
return IRQ_HANDLED;
} /* tpm_ioserirq_handler() */
@@ -457,13 +501,15 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
int ret;
u8 data;
struct i2c_client *client;
+ struct tpm_stm_dev *tpm_dev;
- if (chip == NULL)
+ if (!chip)
return -EBUSY;
if (len < TPM_HEADER_SIZE)
return -EBUSY;
- client = (struct i2c_client *)TPM_VPRIV(chip);
+ tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
+ client = tpm_dev->client;
client->flags = 0;
@@ -476,7 +522,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
tpm_stm_i2c_cancel(chip);
if (wait_for_stat
(chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
- &chip->vendor.int_queue) < 0) {
+ &chip->vendor.read_queue, false) < 0) {
ret = -ETIME;
goto out_err;
}
@@ -487,7 +533,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
if (burstcnt < 0)
return burstcnt;
size = min_t(int, len - i - 1, burstcnt);
- ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
+ ret = I2C_WRITE_DATA(tpm_dev, TPM_DATA_FIFO, buf + i, size);
if (ret < 0)
goto out_err;
@@ -500,7 +546,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
goto out_err;
}
- ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + len - 1, 1);
+ ret = I2C_WRITE_DATA(tpm_dev, TPM_DATA_FIFO, buf + len - 1, 1);
if (ret < 0)
goto out_err;
@@ -511,7 +557,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
}
data = TPM_STS_GO;
- I2C_WRITE_DATA(client, TPM_STS, &data, 1);
+ I2C_WRITE_DATA(tpm_dev, TPM_STS, &data, 1);
return len;
out_err:
@@ -526,7 +572,7 @@ out_err:
* @param: buf, the buffer to store datas.
* @param: count, the number of bytes to send.
* @return: In case of success the number of bytes received.
- * In other case, a < 0 value describing the issue.
+ * In other case, a < 0 value describing the issue.
*/
static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf,
size_t count)
@@ -534,7 +580,7 @@ static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf,
int size = 0;
int expected;
- if (chip == NULL)
+ if (!chip)
return -EBUSY;
if (count < TPM_HEADER_SIZE) {
@@ -544,7 +590,7 @@ static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf,
size = recv_data(chip, buf, TPM_HEADER_SIZE);
if (size < TPM_HEADER_SIZE) {
- dev_err(chip->dev, "Unable to read header\n");
+ dev_err(chip->pdev, "Unable to read header\n");
goto out;
}
@@ -555,9 +601,9 @@ static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf,
}
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
- expected - TPM_HEADER_SIZE);
+ expected - TPM_HEADER_SIZE);
if (size < expected) {
- dev_err(chip->dev, "Unable to read remainder of result\n");
+ dev_err(chip->pdev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
@@ -568,7 +614,7 @@ out:
return size;
}
-static bool tpm_st33_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+static bool tpm_stm_i2c_req_canceled(struct tpm_chip *chip, u8 status)
{
return (status == TPM_STS_COMMAND_READY);
}
@@ -580,75 +626,134 @@ static const struct tpm_class_ops st_i2c_tpm = {
.status = tpm_stm_i2c_status,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_canceled = tpm_st33_i2c_req_canceled,
+ .req_canceled = tpm_stm_i2c_req_canceled,
};
-static int interrupts;
-module_param(interrupts, int, 0444);
-MODULE_PARM_DESC(interrupts, "Enable interrupts");
+#ifdef CONFIG_OF
+static int tpm_stm_i2c_of_request_resources(struct tpm_chip *chip)
+{
+ struct device_node *pp;
+ struct tpm_stm_dev *tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
+ struct i2c_client *client = tpm_dev->client;
+ int gpio;
+ int ret;
+
+ pp = client->dev.of_node;
+ if (!pp) {
+ dev_err(chip->pdev, "No platform data\n");
+ return -ENODEV;
+ }
+
+ /* Get GPIO from device tree */
+ gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
+ if (gpio < 0) {
+ dev_err(chip->pdev, "Failed to retrieve lpcpd-gpios from dts.\n");
+ tpm_dev->io_lpcpd = -1;
+ /*
+ * lpcpd pin is not specified. This is not an issue as
+ * power management can be also managed by TPM specific
+ * commands. So leave with a success status code.
+ */
+ return 0;
+ }
+ /* GPIO request and configuration */
+ ret = devm_gpio_request_one(&client->dev, gpio,
+ GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
+ if (ret) {
+ dev_err(chip->pdev, "Failed to request lpcpd pin\n");
+ return -ENODEV;
+ }
+ tpm_dev->io_lpcpd = gpio;
+
+ return 0;
+}
+#else
+static int tpm_stm_i2c_of_request_resources(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
-static int power_mgt = 1;
-module_param(power_mgt, int, 0444);
-MODULE_PARM_DESC(power_mgt, "Power Management");
+static int tpm_stm_i2c_request_resources(struct i2c_client *client,
+ struct tpm_chip *chip)
+{
+ struct st33zp24_platform_data *pdata;
+ struct tpm_stm_dev *tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
+ int ret;
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(chip->pdev, "No platform data\n");
+ return -ENODEV;
+ }
+
+ /* store for late use */
+ tpm_dev->io_lpcpd = pdata->io_lpcpd;
+
+ if (gpio_is_valid(pdata->io_lpcpd)) {
+ ret = devm_gpio_request_one(&client->dev,
+ pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH,
+ "TPM IO_LPCPD");
+ if (ret) {
+ dev_err(chip->pdev, "%s : reset gpio_request failed\n",
+ __FILE__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
/*
- * tpm_st33_i2c_probe initialize the TPM device
+ * tpm_stm_i2c_probe initialize the TPM device
* @param: client, the i2c_client drescription (TPM I2C description).
* @param: id, the i2c_device_id struct.
* @return: 0 in case of success.
* -1 in other case.
*/
static int
-tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+tpm_stm_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- int err;
- u8 intmask;
+ int ret;
+ u8 intmask = 0;
struct tpm_chip *chip;
struct st33zp24_platform_data *platform_data;
+ struct tpm_stm_dev *tpm_dev;
- if (client == NULL) {
+ if (!client) {
pr_info("%s: i2c client is NULL. Device not accessible.\n",
__func__);
- err = -ENODEV;
- goto end;
+ return -ENODEV;
}
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_info(&client->dev, "client not i2c capable\n");
- err = -ENODEV;
- goto end;
+ return -ENODEV;
}
- chip = tpm_register_hardware(&client->dev, &st_i2c_tpm);
- if (!chip) {
- dev_info(&client->dev, "fail chip\n");
- err = -ENODEV;
- goto end;
- }
+ tpm_dev = devm_kzalloc(&client->dev, sizeof(struct tpm_stm_dev),
+ GFP_KERNEL);
+ if (!tpm_dev)
+ return -ENOMEM;
- platform_data = client->dev.platform_data;
+ chip = tpmm_chip_alloc(&client->dev, &st_i2c_tpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
- if (!platform_data) {
- dev_info(&client->dev, "chip not available\n");
- err = -ENODEV;
- goto _tpm_clean_answer;
- }
+ TPM_VPRIV(chip) = tpm_dev;
+ tpm_dev->client = client;
- platform_data->tpm_i2c_buffer[0] =
- kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
- if (platform_data->tpm_i2c_buffer[0] == NULL) {
- err = -ENOMEM;
- goto _tpm_clean_answer;
- }
- platform_data->tpm_i2c_buffer[1] =
- kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
- if (platform_data->tpm_i2c_buffer[1] == NULL) {
- err = -ENOMEM;
- goto _tpm_clean_response1;
+ platform_data = client->dev.platform_data;
+ if (!platform_data && client->dev.of_node) {
+ ret = tpm_stm_i2c_of_request_resources(chip);
+ if (ret)
+ goto _tpm_clean_answer;
+ } else if (platform_data) {
+ ret = tpm_stm_i2c_request_resources(client, chip);
+ if (ret)
+ goto _tpm_clean_answer;
}
- TPM_VPRIV(chip) = client;
-
chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
@@ -656,59 +761,44 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
chip->vendor.locality = LOCALITY0;
- if (power_mgt) {
- err = gpio_request(platform_data->io_lpcpd, "TPM IO_LPCPD");
- if (err)
- goto _gpio_init1;
- gpio_set_value(platform_data->io_lpcpd, 1);
- }
+ if (client->irq) {
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&chip->vendor.read_queue);
+ tpm_dev->intrs = 0;
- if (interrupts) {
- init_completion(&platform_data->irq_detection);
if (request_locality(chip) != LOCALITY0) {
- err = -ENODEV;
- goto _tpm_clean_response2;
+ ret = -ENODEV;
+ goto _tpm_clean_answer;
}
- err = gpio_request(platform_data->io_serirq, "TPM IO_SERIRQ");
- if (err)
- goto _gpio_init2;
- clear_interruption(client);
- err = request_irq(gpio_to_irq(platform_data->io_serirq),
- &tpm_ioserirq_handler,
+ clear_interruption(tpm_dev);
+ ret = devm_request_irq(&client->dev, client->irq,
+ tpm_ioserirq_handler,
IRQF_TRIGGER_HIGH,
"TPM SERIRQ management", chip);
- if (err < 0) {
- dev_err(chip->dev , "TPM SERIRQ signals %d not available\n",
- gpio_to_irq(platform_data->io_serirq));
- goto _irq_set;
+ if (ret < 0) {
+ dev_err(chip->pdev, "TPM SERIRQ signals %d not available\n",
+ client->irq);
+ goto _tpm_clean_answer;
}
- err = I2C_READ_DATA(client, TPM_INT_ENABLE, &intmask, 1);
- if (err < 0)
- goto _irq_set;
-
intmask |= TPM_INTF_CMD_READY_INT
- | TPM_INTF_FIFO_AVALAIBLE_INT
- | TPM_INTF_WAKE_UP_READY_INT
- | TPM_INTF_LOCALITY_CHANGE_INT
| TPM_INTF_STS_VALID_INT
| TPM_INTF_DATA_AVAIL_INT;
- err = I2C_WRITE_DATA(client, TPM_INT_ENABLE, &intmask, 1);
- if (err < 0)
- goto _irq_set;
+ ret = I2C_WRITE_DATA(tpm_dev, TPM_INT_ENABLE, &intmask, 1);
+ if (ret < 0)
+ goto _tpm_clean_answer;
intmask = TPM_GLOBAL_INT_ENABLE;
- err = I2C_WRITE_DATA(client, (TPM_INT_ENABLE + 3), &intmask, 1);
- if (err < 0)
- goto _irq_set;
+ ret = I2C_WRITE_DATA(tpm_dev, (TPM_INT_ENABLE + 3),
+ &intmask, 1);
+ if (ret < 0)
+ goto _tpm_clean_answer;
- err = I2C_READ_DATA(client, TPM_INT_STATUS, &intmask, 1);
- if (err < 0)
- goto _irq_set;
+ chip->vendor.irq = client->irq;
- chip->vendor.irq = interrupts;
+ disable_irq_nosync(chip->vendor.irq);
tpm_gen_interrupt(chip);
}
@@ -716,130 +806,106 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
tpm_get_timeouts(chip);
tpm_do_selftest(chip);
- dev_info(chip->dev, "TPM I2C Initialized\n");
- return 0;
-_irq_set:
- free_irq(gpio_to_irq(platform_data->io_serirq), (void *)chip);
-_gpio_init2:
- if (interrupts)
- gpio_free(platform_data->io_serirq);
-_gpio_init1:
- if (power_mgt)
- gpio_free(platform_data->io_lpcpd);
-_tpm_clean_response2:
- kzfree(platform_data->tpm_i2c_buffer[1]);
- platform_data->tpm_i2c_buffer[1] = NULL;
-_tpm_clean_response1:
- kzfree(platform_data->tpm_i2c_buffer[0]);
- platform_data->tpm_i2c_buffer[0] = NULL;
+ return tpm_chip_register(chip);
_tpm_clean_answer:
- tpm_remove_hardware(chip->dev);
-end:
- pr_info("TPM I2C initialisation fail\n");
- return err;
+ dev_info(chip->pdev, "TPM I2C initialisation fail\n");
+ return ret;
}
/*
- * tpm_st33_i2c_remove remove the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
- clear_bit(0, &chip->is_open);
+ * tpm_stm_i2c_remove remove the TPM device
+ * @param: client, the i2c_client description (TPM I2C description).
* @return: 0 in case of success.
*/
-static int tpm_st33_i2c_remove(struct i2c_client *client)
+static int tpm_stm_i2c_remove(struct i2c_client *client)
{
- struct tpm_chip *chip = (struct tpm_chip *)i2c_get_clientdata(client);
- struct st33zp24_platform_data *pin_infos =
- ((struct i2c_client *)TPM_VPRIV(chip))->dev.platform_data;
-
- if (pin_infos != NULL) {
- free_irq(pin_infos->io_serirq, chip);
-
- gpio_free(pin_infos->io_serirq);
- gpio_free(pin_infos->io_lpcpd);
-
- tpm_remove_hardware(chip->dev);
+ struct tpm_chip *chip =
+ (struct tpm_chip *) i2c_get_clientdata(client);
- if (pin_infos->tpm_i2c_buffer[1] != NULL) {
- kzfree(pin_infos->tpm_i2c_buffer[1]);
- pin_infos->tpm_i2c_buffer[1] = NULL;
- }
- if (pin_infos->tpm_i2c_buffer[0] != NULL) {
- kzfree(pin_infos->tpm_i2c_buffer[0]);
- pin_infos->tpm_i2c_buffer[0] = NULL;
- }
- }
+ if (chip)
+ tpm_chip_unregister(chip);
return 0;
}
#ifdef CONFIG_PM_SLEEP
/*
- * tpm_st33_i2c_pm_suspend suspend the TPM device
+ * tpm_stm_i2c_pm_suspend suspend the TPM device
* @param: client, the i2c_client drescription (TPM I2C description).
* @param: mesg, the power management message.
* @return: 0 in case of success.
*/
-static int tpm_st33_i2c_pm_suspend(struct device *dev)
+static int tpm_stm_i2c_pm_suspend(struct device *dev)
{
struct st33zp24_platform_data *pin_infos = dev->platform_data;
int ret = 0;
- if (power_mgt) {
+ if (gpio_is_valid(pin_infos->io_lpcpd))
gpio_set_value(pin_infos->io_lpcpd, 0);
- } else {
+ else
ret = tpm_pm_suspend(dev);
- }
+
return ret;
-} /* tpm_st33_i2c_suspend() */
+} /* tpm_stm_i2c_suspend() */
/*
- * tpm_st33_i2c_pm_resume resume the TPM device
+ * tpm_stm_i2c_pm_resume resume the TPM device
* @param: client, the i2c_client drescription (TPM I2C description).
* @return: 0 in case of success.
*/
-static int tpm_st33_i2c_pm_resume(struct device *dev)
+static int tpm_stm_i2c_pm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct st33zp24_platform_data *pin_infos = dev->platform_data;
int ret = 0;
- if (power_mgt) {
+ if (gpio_is_valid(pin_infos->io_lpcpd)) {
gpio_set_value(pin_infos->io_lpcpd, 1);
- ret = wait_for_serirq_timeout(chip,
- (chip->ops->status(chip) &
- TPM_STS_VALID) == TPM_STS_VALID,
- chip->vendor.timeout_b);
+ ret = wait_for_stat(chip,
+ TPM_STS_VALID, chip->vendor.timeout_b,
+ &chip->vendor.read_queue, false);
} else {
ret = tpm_pm_resume(dev);
if (!ret)
tpm_do_selftest(chip);
}
return ret;
-} /* tpm_st33_i2c_pm_resume() */
+} /* tpm_stm_i2c_pm_resume() */
#endif
-static const struct i2c_device_id tpm_st33_i2c_id[] = {
+static const struct i2c_device_id tpm_stm_i2c_id[] = {
{TPM_ST33_I2C, 0},
{}
};
-MODULE_DEVICE_TABLE(i2c, tpm_st33_i2c_id);
-static SIMPLE_DEV_PM_OPS(tpm_st33_i2c_ops, tpm_st33_i2c_pm_suspend,
- tpm_st33_i2c_pm_resume);
-static struct i2c_driver tpm_st33_i2c_driver = {
+MODULE_DEVICE_TABLE(i2c, tpm_stm_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_st33zp24_i2c_match[] = {
+ { .compatible = "st,st33zp24-i2c", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpm_stm_i2c_ops, tpm_stm_i2c_pm_suspend,
+ tpm_stm_i2c_pm_resume);
+
+static struct i2c_driver tpm_stm_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
- .name = TPM_ST33_I2C,
- .pm = &tpm_st33_i2c_ops,
- },
- .probe = tpm_st33_i2c_probe,
- .remove = tpm_st33_i2c_remove,
- .id_table = tpm_st33_i2c_id
+ .owner = THIS_MODULE,
+ .name = TPM_ST33_I2C,
+ .pm = &tpm_stm_i2c_ops,
+ .of_match_table = of_match_ptr(of_st33zp24_i2c_match),
+ },
+ .probe = tpm_stm_i2c_probe,
+ .remove = tpm_stm_i2c_remove,
+ .id_table = tpm_stm_i2c_id
};
-module_i2c_driver(tpm_st33_i2c_driver);
+module_i2c_driver(tpm_stm_i2c_driver);
MODULE_AUTHOR("Christophe Ricard (tpmsupport@st.com)");
MODULE_DESCRIPTION("STM TPM I2C ST33 Driver");
-MODULE_VERSION("1.2.0");
+MODULE_VERSION("1.2.1");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index af74c57e5090..0840347e251c 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2012 IBM Corporation
*
- * Author: Ashley Lai <adlai@us.ibm.com>
+ * Author: Ashley Lai <ashleydlai@gmail.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
@@ -270,8 +270,11 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
{
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
+ struct tpm_chip *chip = dev_get_drvdata(ibmvtpm->dev);
int rc = 0;
+ tpm_chip_unregister(chip);
+
free_irq(vdev->irq, ibmvtpm);
do {
@@ -290,8 +293,6 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
kfree(ibmvtpm->rtce_buf);
}
- tpm_remove_hardware(ibmvtpm->dev);
-
kfree(ibmvtpm);
return 0;
@@ -307,6 +308,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
{
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
+
+ /* ibmvtpm initializes at probe time, so the data we are
+ * asking for may not be set yet. Estimate that 4K required
+ * for TCE-mapped buffer in addition to CRQ.
+ */
+ if (!ibmvtpm)
+ return CRQ_RES_BUF_SIZE + PAGE_SIZE;
+
return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
}
@@ -555,11 +564,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
struct tpm_chip *chip;
int rc = -ENOMEM, rc1;
- chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
- if (!chip) {
- dev_err(dev, "tpm_register_hardware failed\n");
- return -ENODEV;
- }
+ chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
if (!ibmvtpm) {
@@ -629,7 +636,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (rc)
goto init_irq_cleanup;
- return rc;
+ return tpm_chip_register(chip);
init_irq_cleanup:
do {
rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
@@ -644,8 +651,6 @@ cleanup:
kfree(ibmvtpm);
}
- tpm_remove_hardware(dev);
-
return rc;
}
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index bd82a791f995..f595f14426bf 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2012 IBM Corporation
*
- * Author: Ashley Lai <adlai@us.ibm.com>
+ * Author: Ashley Lai <ashleydlai@gmail.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index dc0a2554034e..6d492132ad2b 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
- dev_err(chip->dev, "Timeout in wait(STAT_XFE)\n");
+ dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n");
if (wait_for_bit == STAT_RDA)
- dev_err(chip->dev, "Timeout in wait(STAT_RDA)\n");
+ dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n");
return -EIO;
}
return 0;
@@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
static void tpm_wtx(struct tpm_chip *chip)
{
number_of_wtx++;
- dev_info(chip->dev, "Granting WTX (%02d / %02d)\n",
+ dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n",
number_of_wtx, TPM_MAX_WTX_PACKAGES);
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX);
@@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip)
static void tpm_wtx_abort(struct tpm_chip *chip)
{
- dev_info(chip->dev, "Aborting WTX\n");
+ dev_info(chip->pdev, "Aborting WTX\n");
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX_ABORT);
wait_and_send(chip, 0x00);
@@ -257,7 +257,7 @@ recv_begin:
}
if (buf[0] != TPM_VL_VER) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"Wrong transport protocol implementation!\n");
return -EIO;
}
@@ -272,7 +272,7 @@ recv_begin:
}
if ((size == 0x6D00) && (buf[1] == 0x80)) {
- dev_err(chip->dev, "Error handling on vendor layer!\n");
+ dev_err(chip->pdev, "Error handling on vendor layer!\n");
return -EIO;
}
@@ -284,7 +284,7 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX) {
- dev_info(chip->dev, "WTX-package received\n");
+ dev_info(chip->pdev, "WTX-package received\n");
if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
tpm_wtx(chip);
goto recv_begin;
@@ -295,14 +295,14 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
- dev_info(chip->dev, "WTX-abort acknowledged\n");
+ dev_info(chip->pdev, "WTX-abort acknowledged\n");
return size;
}
if (buf[1] == TPM_CTRL_ERROR) {
- dev_err(chip->dev, "ERROR-package received:\n");
+ dev_err(chip->pdev, "ERROR-package received:\n");
if (buf[4] == TPM_INF_NAK)
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"-> Negative acknowledgement"
" - retransmit command!\n");
return -EIO;
@@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
ret = empty_fifo(chip, 1);
if (ret) {
- dev_err(chip->dev, "Timeout while clearing FIFO\n");
+ dev_err(chip->pdev, "Timeout while clearing FIFO\n");
return -EIO;
}
@@ -546,7 +546,14 @@ static int tpm_inf_pnp_probe(struct pnp_dev *dev,
vendorid[0], vendorid[1],
productid[0], productid[1], chipname);
- if (!(chip = tpm_register_hardware(&dev->dev, &tpm_inf)))
+ chip = tpmm_chip_alloc(&dev->dev, &tpm_inf);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto err_release_region;
+ }
+
+ rc = tpm_chip_register(chip);
+ if (rc)
goto err_release_region;
return 0;
@@ -572,17 +579,15 @@ static void tpm_inf_pnp_remove(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
- if (chip) {
- if (tpm_dev.iotype == TPM_INF_IO_PORT) {
- release_region(tpm_dev.data_regs, tpm_dev.data_size);
- release_region(tpm_dev.config_port,
- tpm_dev.config_size);
- } else {
- iounmap(tpm_dev.mem_base);
- release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
- }
- tpm_dev_vendor_release(chip);
- tpm_remove_hardware(chip->dev);
+ tpm_chip_unregister(chip);
+
+ if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ release_region(tpm_dev.config_port,
+ tpm_dev.config_size);
+ } else {
+ iounmap(tpm_dev.mem_base);
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
}
}
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 4d0a17ea8cde..289389ecef84 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -113,7 +113,7 @@ static int nsc_wait_for_ready(struct tpm_chip *chip)
}
while (time_before(jiffies, stop));
- dev_info(chip->dev, "wait for ready failed\n");
+ dev_info(chip->pdev, "wait for ready failed\n");
return -EBUSY;
}
@@ -129,12 +129,12 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
- dev_err(chip->dev, "F0 timeout\n");
+ dev_err(chip->pdev, "F0 timeout\n");
return -EIO;
}
if ((data =
inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
- dev_err(chip->dev, "not in normal mode (0x%x)\n",
+ dev_err(chip->pdev, "not in normal mode (0x%x)\n",
data);
return -EIO;
}
@@ -143,7 +143,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
for (p = buffer; p < &buffer[count]; p++) {
if (wait_for_stat
(chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"OBF timeout (while reading data)\n");
return -EIO;
}
@@ -154,11 +154,11 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
if ((data & NSC_STATUS_F0) == 0 &&
(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
- dev_err(chip->dev, "F0 not set\n");
+ dev_err(chip->pdev, "F0 not set\n");
return -EIO;
}
if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"expected end of command(0x%x)\n", data);
return -EIO;
}
@@ -189,19 +189,19 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->dev, "IBF timeout\n");
+ dev_err(chip->pdev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
- dev_err(chip->dev, "IBR timeout\n");
+ dev_err(chip->pdev, "IBR timeout\n");
return -EIO;
}
for (i = 0; i < count; i++) {
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->dev,
+ dev_err(chip->pdev,
"IBF timeout (while writing data)\n");
return -EIO;
}
@@ -209,7 +209,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
}
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->dev, "IBF timeout\n");
+ dev_err(chip->pdev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);
@@ -247,10 +247,9 @@ static struct platform_device *pdev = NULL;
static void tpm_nsc_remove(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
- if ( chip ) {
- release_region(chip->vendor.base, 2);
- tpm_remove_hardware(chip->dev);
- }
+
+ tpm_chip_unregister(chip);
+ release_region(chip->vendor.base, 2);
}
static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
@@ -307,11 +306,16 @@ static int __init init_nsc(void)
goto err_del_dev;
}
- if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
+ chip = tpmm_chip_alloc(&pdev->dev, &tpm_nsc);
+ if (IS_ERR(chip)) {
rc = -ENODEV;
goto err_rel_reg;
}
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto err_rel_reg;
+
dev_dbg(&pdev->dev, "NSC TPM detected\n");
dev_dbg(&pdev->dev,
"NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index 98ba2bd1a355..c002d1bd9caf 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -1,7 +1,7 @@
/*
* Copyright 2012 IBM Corporation
*
- * Author: Ashley Lai <adlai@us.ibm.com>
+ * Author: Ashley Lai <ashleydlai@gmail.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 61dcc8011ec7..6ca9b5d78144 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -1,3 +1,22 @@
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ *
+ * Authors:
+ * Xiaoyan Zhang <xiaoyan.zhang@intel.com>
+ * Jiang Liu <jiang.liu@linux.intel.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains implementation of the sysfs interface for PPI.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
#include <linux/acpi.h>
#include "tpm.h"
@@ -12,7 +31,6 @@
#define PPI_TPM_REQ_MAX 22
#define PPI_VS_REQ_START 128
#define PPI_VS_REQ_END 255
-#define PPI_VERSION_LEN 3
static const u8 tpm_ppi_uuid[] = {
0xA6, 0xFA, 0xDD, 0x3D,
@@ -22,45 +40,22 @@ static const u8 tpm_ppi_uuid[] = {
0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
};
-static char tpm_ppi_version[PPI_VERSION_LEN + 1];
-static acpi_handle tpm_ppi_handle;
-
-static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
- void **return_value)
-{
- union acpi_object *obj;
-
- if (!acpi_check_dsm(handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
- 1 << TPM_PPI_FN_VERSION))
- return AE_OK;
-
- /* Cache version string */
- obj = acpi_evaluate_dsm_typed(handle, tpm_ppi_uuid,
- TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
- NULL, ACPI_TYPE_STRING);
- if (obj) {
- strlcpy(tpm_ppi_version, obj->string.pointer,
- PPI_VERSION_LEN + 1);
- ACPI_FREE(obj);
- }
-
- *return_value = handle;
-
- return AE_CTRL_TERMINATE;
-}
-
static inline union acpi_object *
-tpm_eval_dsm(int func, acpi_object_type type, union acpi_object *argv4)
+tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
+ union acpi_object *argv4)
{
- BUG_ON(!tpm_ppi_handle);
- return acpi_evaluate_dsm_typed(tpm_ppi_handle, tpm_ppi_uuid,
- TPM_PPI_REVISION_ID, func, argv4, type);
+ BUG_ON(!ppi_handle);
+ return acpi_evaluate_dsm_typed(ppi_handle, tpm_ppi_uuid,
+ TPM_PPI_REVISION_ID,
+ func, argv4, type);
}
static ssize_t tpm_show_ppi_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%s\n", tpm_ppi_version);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
}
static ssize_t tpm_show_ppi_request(struct device *dev,
@@ -68,8 +63,10 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
{
ssize_t size = -EINVAL;
union acpi_object *obj;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
- obj = tpm_eval_dsm(TPM_PPI_FN_GETREQ, ACPI_TYPE_PACKAGE, NULL);
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ,
+ ACPI_TYPE_PACKAGE, NULL);
if (!obj)
return -ENXIO;
@@ -103,14 +100,15 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
int func = TPM_PPI_FN_SUBREQ;
union acpi_object *obj, tmp;
union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
/*
* the function to submit TPM operation request to pre-os environment
* is updated with function index from SUBREQ to SUBREQ2 since PPI
* version 1.1
*/
- if (acpi_check_dsm(tpm_ppi_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
- 1 << TPM_PPI_FN_SUBREQ2))
+ if (acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
+ TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2))
func = TPM_PPI_FN_SUBREQ2;
/*
@@ -119,7 +117,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
* string/package type. For PPI version 1.0 and 1.1, use buffer type
* for compatibility, and use package type since 1.2 according to spec.
*/
- if (strcmp(tpm_ppi_version, "1.2") < 0) {
+ if (strcmp(chip->ppi_version, "1.2") < 0) {
if (sscanf(buf, "%d", &req) != 1)
return -EINVAL;
argv4.type = ACPI_TYPE_BUFFER;
@@ -131,7 +129,8 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
return -EINVAL;
}
- obj = tpm_eval_dsm(func, ACPI_TYPE_INTEGER, &argv4);
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, func, ACPI_TYPE_INTEGER,
+ &argv4);
if (!obj) {
return -ENXIO;
} else {
@@ -157,6 +156,7 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
.buffer.length = 0,
.buffer.pointer = NULL
};
+ struct tpm_chip *chip = dev_get_drvdata(dev);
static char *info[] = {
"None",
@@ -171,9 +171,10 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
* (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
* compatibility, define params[3].type as buffer, if PPI version < 1.2
*/
- if (strcmp(tpm_ppi_version, "1.2") < 0)
+ if (strcmp(chip->ppi_version, "1.2") < 0)
obj = &tmp;
- obj = tpm_eval_dsm(TPM_PPI_FN_GETACT, ACPI_TYPE_INTEGER, obj);
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETACT,
+ ACPI_TYPE_INTEGER, obj);
if (!obj) {
return -ENXIO;
} else {
@@ -196,8 +197,10 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
acpi_status status = -EINVAL;
union acpi_object *obj, *ret_obj;
u64 req, res;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
- obj = tpm_eval_dsm(TPM_PPI_FN_GETRSP, ACPI_TYPE_PACKAGE, NULL);
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP,
+ ACPI_TYPE_PACKAGE, NULL);
if (!obj)
return -ENXIO;
@@ -248,7 +251,8 @@ cleanup:
return status;
}
-static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
+static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
+ u32 end)
{
int i;
u32 ret;
@@ -264,14 +268,15 @@ static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
"User not required",
};
- if (!acpi_check_dsm(tpm_ppi_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+ if (!acpi_check_dsm(dev_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
tmp.integer.type = ACPI_TYPE_INTEGER;
for (i = start; i <= end; i++) {
tmp.integer.value = i;
- obj = tpm_eval_dsm(TPM_PPI_FN_GETOPR, ACPI_TYPE_INTEGER, &argv);
+ obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
+ ACPI_TYPE_INTEGER, &argv);
if (!obj) {
return -ENOMEM;
} else {
@@ -291,14 +296,20 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return show_ppi_operations(buf, 0, PPI_TPM_REQ_MAX);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
+ PPI_TPM_REQ_MAX);
}
static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return show_ppi_operations(buf, PPI_VS_REQ_START, PPI_VS_REQ_END);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
+ PPI_VS_REQ_END);
}
static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
@@ -323,16 +334,38 @@ static struct attribute_group ppi_attr_grp = {
.attrs = ppi_attrs
};
-int tpm_add_ppi(struct kobject *parent)
+int tpm_add_ppi(struct tpm_chip *chip)
{
- /* Cache TPM ACPI handle and version string */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
- ppi_callback, NULL, NULL, &tpm_ppi_handle);
- return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
+ union acpi_object *obj;
+ int rc;
+
+ if (!chip->acpi_dev_handle)
+ return 0;
+
+ if (!acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
+ TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION))
+ return 0;
+
+ /* Cache PPI version string. */
+ obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, tpm_ppi_uuid,
+ TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
+ NULL, ACPI_TYPE_STRING);
+ if (obj) {
+ strlcpy(chip->ppi_version, obj->string.pointer,
+ sizeof(chip->ppi_version));
+ ACPI_FREE(obj);
+ }
+
+ rc = sysfs_create_group(&chip->pdev->kobj, &ppi_attr_grp);
+
+ if (!rc)
+ chip->flags |= TPM_CHIP_FLAG_PPI;
+
+ return rc;
}
-void tpm_remove_ppi(struct kobject *parent)
+void tpm_remove_ppi(struct tpm_chip *chip)
{
- if (tpm_ppi_handle)
- sysfs_remove_group(parent, &ppi_attr_grp);
+ if (chip->flags & TPM_CHIP_FLAG_PPI)
+ sysfs_remove_group(&chip->pdev->kobj, &ppi_attr_grp);
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 6f1985496112..6725bef7cb96 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
@@ -64,19 +65,30 @@ enum tis_defaults {
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
};
+
+/* Some timeout values are needed before it is known whether the chip is
+ * TPM 1.0 or TPM 2.0.
+ */
+#define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A)
+#define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B)
+#define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C)
+#define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D)
+
#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
#define TPM_STS(l) (0x0018 | ((l) << 12))
+#define TPM_STS3(l) (0x001b | ((l) << 12))
#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
#define TPM_RID(l) (0x0F04 | ((l) << 12))
-static LIST_HEAD(tis_chips);
-static DEFINE_MUTEX(tis_lock);
+struct priv_data {
+ bool irq_tested;
+};
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int is_itpm(struct pnp_dev *dev)
@@ -241,7 +253,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
if ((size =
recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
- dev_err(chip->dev, "Unable to read header\n");
+ dev_err(chip->pdev, "Unable to read header\n");
goto out;
}
@@ -254,7 +266,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if ((size +=
recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE)) < expected) {
- dev_err(chip->dev, "Unable to read remainder of result\n");
+ dev_err(chip->pdev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
@@ -263,7 +275,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
&chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
- dev_err(chip->dev, "Error left over data\n");
+ dev_err(chip->pdev, "Error left over data\n");
size = -EIO;
goto out;
}
@@ -338,15 +350,31 @@ out_err:
return rc;
}
+static void disable_interrupts(struct tpm_chip *chip)
+{
+ u32 intmask;
+
+ intmask =
+ ioread32(chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ intmask &= ~TPM_GLOBAL_INT_ENABLE;
+ iowrite32(intmask,
+ chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ free_irq(chip->vendor.irq, chip);
+ chip->vendor.irq = 0;
+}
+
/*
* If interrupts are used (signaled by an irq set in the vendor structure)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc;
u32 ordinal;
+ unsigned long dur;
rc = tpm_tis_send_data(chip, buf, len);
if (rc < 0)
@@ -358,9 +386,14 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
if (chip->vendor.irq) {
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ dur = tpm2_calc_ordinal_duration(chip, ordinal);
+ else
+ dur = tpm_calc_ordinal_duration(chip, ordinal);
+
if (wait_for_tpm_stat
- (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- tpm_calc_ordinal_duration(chip, ordinal),
+ (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
&chip->vendor.read_queue, false) < 0) {
rc = -ETIME;
goto out_err;
@@ -373,6 +406,30 @@ out_err:
return rc;
}
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc, irq;
+ struct priv_data *priv = chip->vendor.priv;
+
+ if (!chip->vendor.irq || priv->irq_tested)
+ return tpm_tis_send_main(chip, buf, len);
+
+ /* Verify receipt of the expected IRQ */
+ irq = chip->vendor.irq;
+ chip->vendor.irq = 0;
+ rc = tpm_tis_send_main(chip, buf, len);
+ chip->vendor.irq = irq;
+ if (!priv->irq_tested)
+ msleep(1);
+ if (!priv->irq_tested) {
+ disable_interrupts(chip);
+ dev_err(chip->pdev,
+ FW_BUG "TPM interrupt not working, polling instead\n");
+ }
+ priv->irq_tested = true;
+ return rc;
+}
+
struct tis_vendor_timeout_override {
u32 did_vid;
unsigned long timeout_us[4];
@@ -436,7 +493,7 @@ static int probe_itpm(struct tpm_chip *chip)
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
- dev_info(chip->dev, "Detected an iTPM.\n");
+ dev_info(chip->pdev, "Detected an iTPM.\n");
rc = 1;
} else
rc = -EFAULT;
@@ -505,6 +562,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
if (interrupt == 0)
return IRQ_NONE;
+ ((struct priv_data *)chip->vendor.priv)->irq_tested = true;
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
wake_up_interruptible(&chip->vendor.read_queue);
if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
@@ -528,27 +586,48 @@ static bool interrupts = true;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
-static int tpm_tis_init(struct device *dev, resource_size_t start,
- resource_size_t len, unsigned int irq)
+static void tpm_tis_remove(struct tpm_chip *chip)
+{
+ iowrite32(~TPM_GLOBAL_INT_ENABLE &
+ ioread32(chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.
+ locality)),
+ chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ release_locality(chip, chip->vendor.locality, 1);
+}
+
+static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle,
+ resource_size_t start, resource_size_t len,
+ unsigned int irq)
{
u32 vendor, intfcaps, intmask;
int rc, i, irq_s, irq_e, probe;
struct tpm_chip *chip;
+ struct priv_data *priv;
- if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
- return -ENODEV;
+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
- chip->vendor.iobase = ioremap(start, len);
- if (!chip->vendor.iobase) {
- rc = -EIO;
- goto out_err;
- }
+ chip = tpmm_chip_alloc(dev, &tpm_tis);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ chip->vendor.priv = priv;
+#ifdef CONFIG_ACPI
+ chip->acpi_dev_handle = acpi_dev_handle;
+#endif
+
+ chip->vendor.iobase = devm_ioremap(dev, start, len);
+ if (!chip->vendor.iobase)
+ return -EIO;
- /* Default timeouts */
- chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
- chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ /* Maximum timeouts */
+ chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX;
+ chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX;
+ chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX;
+ chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX;
if (wait_startup(chip, 0) != 0) {
rc = -ENODEV;
@@ -560,11 +639,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
goto out_err;
}
+ /* Every TPM 2.x command has a higher ordinal than TPM 1.x commands.
+ * Therefore, we can use an idempotent TPM 2.x command to probe TPM 2.x.
+ */
+ rc = tpm2_gen_interrupt(chip, true);
+ if (rc == 0 || rc == TPM2_RC_INITIALIZE)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+
vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
chip->vendor.manufacturer_id = vendor;
- dev_info(dev,
- "1.2 TPM (device-id 0x%X, rev-id %d)\n",
+ dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
if (!itpm) {
@@ -605,19 +691,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
- /* get the timeouts before testing for irqs */
- if (tpm_get_timeouts(chip)) {
- dev_err(dev, "Could not get TPM timeouts and durations\n");
- rc = -ENODEV;
- goto out_err;
- }
-
- if (tpm_do_selftest(chip)) {
- dev_err(dev, "TPM self test failed\n");
- rc = -ENODEV;
- goto out_err;
- }
-
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
@@ -649,10 +722,10 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
iowrite8(i, chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
- if (request_irq
- (i, tis_int_probe, IRQF_SHARED,
- chip->vendor.miscdev.name, chip) != 0) {
- dev_info(chip->dev,
+ if (devm_request_irq
+ (dev, i, tis_int_probe, IRQF_SHARED,
+ chip->devname, chip) != 0) {
+ dev_info(chip->pdev,
"Unable to request irq: %d for probe\n",
i);
continue;
@@ -673,7 +746,10 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
chip->vendor.probed_irq = 0;
/* Generate Interrupts */
- tpm_gen_interrupt(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_gen_interrupt(chip, false);
+ else
+ tpm_gen_interrupt(chip);
chip->vendor.irq = chip->vendor.probed_irq;
@@ -690,17 +766,16 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
- free_irq(i, chip);
}
}
if (chip->vendor.irq) {
iowrite8(chip->vendor.irq,
chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
- if (request_irq
- (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
- chip->vendor.miscdev.name, chip) != 0) {
- dev_info(chip->dev,
+ if (devm_request_irq
+ (dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED,
+ chip->devname, chip) != 0) {
+ dev_info(chip->pdev,
"Unable to request irq: %d for use\n",
chip->vendor.irq);
chip->vendor.irq = 0;
@@ -719,17 +794,49 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
}
}
- INIT_LIST_HEAD(&chip->vendor.list);
- mutex_lock(&tis_lock);
- list_add(&chip->vendor.list, &tis_chips);
- mutex_unlock(&tis_lock);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
+ chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
+ chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
+ chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
+ chip->vendor.duration[TPM_SHORT] =
+ msecs_to_jiffies(TPM2_DURATION_SHORT);
+ chip->vendor.duration[TPM_MEDIUM] =
+ msecs_to_jiffies(TPM2_DURATION_MEDIUM);
+ chip->vendor.duration[TPM_LONG] =
+ msecs_to_jiffies(TPM2_DURATION_LONG);
+
+ rc = tpm2_do_selftest(chip);
+ if (rc == TPM2_RC_INITIALIZE) {
+ dev_warn(dev, "Firmware has not started TPM\n");
+ rc = tpm2_startup(chip, TPM2_SU_CLEAR);
+ if (!rc)
+ rc = tpm2_do_selftest(chip);
+ }
+ if (rc) {
+ dev_err(dev, "TPM self test failed\n");
+ if (rc > 0)
+ rc = -ENODEV;
+ goto out_err;
+ }
+ } else {
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
- return 0;
+ if (tpm_do_selftest(chip)) {
+ dev_err(dev, "TPM self test failed\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+ }
+
+ return tpm_chip_register(chip);
out_err:
- if (chip->vendor.iobase)
- iounmap(chip->vendor.iobase);
- tpm_remove_hardware(chip->dev);
+ tpm_tis_remove(chip);
return rc;
}
@@ -758,14 +865,23 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
static int tpm_tis_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
- int ret;
+ int ret = 0;
if (chip->vendor.irq)
tpm_tis_reenable_interrupts(chip);
- ret = tpm_pm_resume(dev);
- if (!ret)
- tpm_do_selftest(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ /* NOP if firmware properly does this. */
+ tpm2_startup(chip, TPM2_SU_STATE);
+
+ ret = tpm2_shutdown(chip, TPM2_SU_STATE);
+ if (!ret)
+ ret = tpm2_do_selftest(chip);
+ } else {
+ ret = tpm_pm_resume(dev);
+ if (!ret)
+ tpm_do_selftest(chip);
+ }
return ret;
}
@@ -779,6 +895,7 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
{
resource_size_t start, len;
unsigned int irq = 0;
+ acpi_handle acpi_dev_handle = NULL;
start = pnp_mem_start(pnp_dev, 0);
len = pnp_mem_len(pnp_dev, 0);
@@ -791,7 +908,12 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
if (is_itpm(pnp_dev))
itpm = true;
- return tpm_tis_init(&pnp_dev->dev, start, len, irq);
+#ifdef CONFIG_ACPI
+ if (pnp_acpi_device(pnp_dev))
+ acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle;
+#endif
+
+ return tpm_tis_init(&pnp_dev->dev, acpi_dev_handle, start, len, irq);
}
static struct pnp_device_id tpm_pnp_tbl[] = {
@@ -811,13 +933,10 @@ MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
static void tpm_tis_pnp_remove(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
-
- tpm_dev_vendor_release(chip);
-
- kfree(chip);
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
}
-
static struct pnp_driver tis_pnp_driver = {
.name = "tpm_tis",
.id_table = tpm_pnp_tbl,
@@ -836,7 +955,7 @@ MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
static struct platform_driver tis_drv = {
.driver = {
- .name = "tpm_tis",
+ .name = "tpm_tis",
.pm = &tpm_tis_pm,
},
};
@@ -862,7 +981,7 @@ static int __init init_tis(void)
rc = PTR_ERR(pdev);
goto err_dev;
}
- rc = tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0);
+ rc = tpm_tis_init(&pdev->dev, NULL, TIS_MEM_BASE, TIS_MEM_LEN, 0);
if (rc)
goto err_init;
return 0;
@@ -875,31 +994,16 @@ err_dev:
static void __exit cleanup_tis(void)
{
- struct tpm_vendor_specific *i, *j;
struct tpm_chip *chip;
- mutex_lock(&tis_lock);
- list_for_each_entry_safe(i, j, &tis_chips, list) {
- chip = to_tpm_chip(i);
- tpm_remove_hardware(chip->dev);
- iowrite32(~TPM_GLOBAL_INT_ENABLE &
- ioread32(chip->vendor.iobase +
- TPM_INT_ENABLE(chip->vendor.
- locality)),
- chip->vendor.iobase +
- TPM_INT_ENABLE(chip->vendor.locality));
- release_locality(chip, chip->vendor.locality, 1);
- if (chip->vendor.irq)
- free_irq(chip->vendor.irq, chip);
- iounmap(i->iobase);
- list_del(&i->list);
- }
- mutex_unlock(&tis_lock);
#ifdef CONFIG_PNP
if (!force) {
pnp_unregister_driver(&tis_pnp_driver);
return;
}
#endif
+ chip = dev_get_drvdata(&pdev->dev);
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
platform_device_unregister(pdev);
platform_driver_unregister(&tis_drv);
}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 441b44e54226..c3b4f5a5ac10 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -175,9 +175,9 @@ static int setup_chip(struct device *dev, struct tpm_private *priv)
{
struct tpm_chip *chip;
- chip = tpm_register_hardware(dev, &tpm_vtpm);
- if (!chip)
- return -ENODEV;
+ chip = tpmm_chip_alloc(dev, &tpm_vtpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
init_waitqueue_head(&chip->vendor.read_queue);
@@ -286,6 +286,7 @@ static int tpmfront_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
struct tpm_private *priv;
+ struct tpm_chip *chip;
int rv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -302,21 +303,22 @@ static int tpmfront_probe(struct xenbus_device *dev,
rv = setup_ring(dev, priv);
if (rv) {
- tpm_remove_hardware(&dev->dev);
+ chip = dev_get_drvdata(&dev->dev);
+ tpm_chip_unregister(chip);
ring_free(priv);
return rv;
}
tpm_get_timeouts(priv->chip);
- return rv;
+ return tpm_chip_register(priv->chip);
}
static int tpmfront_remove(struct xenbus_device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
struct tpm_private *priv = TPM_VPRIV(chip);
- tpm_remove_hardware(&dev->dev);
+ tpm_chip_unregister(chip);
ring_free(priv);
TPM_VPRIV(chip) = NULL;
return 0;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index de03df9dd7c9..26afb56a8073 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1986,6 +1986,12 @@ static int virtcons_probe(struct virtio_device *vdev)
bool multiport;
bool early = early_put_chars != NULL;
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
/* Ensure to read early_put_chars now */
barrier();
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 3f44f292d066..91f86131bb7a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -13,6 +13,7 @@ config COMMON_CLK
bool
select HAVE_CLK_PREPARE
select CLKDEV_LOOKUP
+ select SRCU
---help---
The common clock framework is a single definition of struct
clk, useful across many platforms, as well as an
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 32f7c1b36204..2f13bd5246b5 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -70,6 +70,7 @@ struct clk_sam9x5_slow {
#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
+static struct clk *slow_clk;
static int clk_slow_osc_prepare(struct clk_hw *hw)
{
@@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
clk = clk_register(NULL, &slowck->hw);
if (IS_ERR(clk))
kfree(slowck);
+ else
+ slow_clk = clk;
return clk;
}
@@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
clk = clk_register(NULL, &slowck->hw);
if (IS_ERR(clk))
kfree(slowck);
+ else
+ slow_clk = clk;
return clk;
}
@@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+
+/*
+ * FIXME: All slow clk users are not properly claiming it (get + prepare +
+ * enable) before using it.
+ * If all users properly claiming this clock decide that they don't need it
+ * anymore (or are removed), it is disabled while faulty users are still
+ * requiring it, and the system hangs.
+ * Prevent this clock from being disabled until all users are properly
+ * requesting it.
+ * Once this is done we should remove this function and the slow_clk variable.
+ */
+static int __init of_at91_clk_slow_retain(void)
+{
+ if (!slow_clk)
+ return 0;
+
+ __clk_get(slow_clk);
+ clk_prepare_enable(slow_clk);
+
+ return 0;
+}
+arch_initcall(of_at91_clk_slow_retain);
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 21784e4eb3f0..440ef81ab15c 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
{ "pbridge", "perif", 15, CLK_IGNORE_UNUSED },
{ "sdio", "perif", 16, CLK_IGNORE_UNUSED },
{ "nfc", "perif", 18 },
- { "smemc", "perif", 19 },
{ "pcie", "perif", 22 },
};
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index b6e6c85507a5..0a47d6f49cd6 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
{}
};
-static struct platform_driver ppc_corenet_clk_driver __initdata = {
+static struct platform_driver ppc_corenet_clk_driver = {
.driver = {
.name = "ppc_corenet_clock",
.of_match_table = ppc_clk_ids,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f4963b7d4e17..642cf37124d3 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1366,7 +1366,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
new_rate = clk->ops->determine_rate(clk->hw, rate,
&best_parent_rate,
&parent_hw);
- parent = parent_hw->clk;
+ parent = parent_hw ? parent_hw->clk : NULL;
} else if (clk->ops->round_rate) {
new_rate = clk->ops->round_rate(clk->hw, rate,
&best_parent_rate);
@@ -2048,7 +2048,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
goto fail_out;
}
- clk->name = kstrdup(hw->init->name, GFP_KERNEL);
+ clk->name = kstrdup_const(hw->init->name, GFP_KERNEL);
if (!clk->name) {
pr_err("%s: could not allocate clk->name\n", __func__);
ret = -ENOMEM;
@@ -2075,7 +2075,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
/* copy each string name in case parent_names is __initdata */
for (i = 0; i < clk->num_parents; i++) {
- clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
+ clk->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
GFP_KERNEL);
if (!clk->parent_names[i]) {
pr_err("%s: could not copy parent_names\n", __func__);
@@ -2090,10 +2090,10 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
fail_parent_names_copy:
while (--i >= 0)
- kfree(clk->parent_names[i]);
+ kfree_const(clk->parent_names[i]);
kfree(clk->parent_names);
fail_parent_names:
- kfree(clk->name);
+ kfree_const(clk->name);
fail_name:
kfree(clk);
fail_out:
@@ -2112,10 +2112,10 @@ static void __clk_release(struct kref *ref)
kfree(clk->parents);
while (--i >= 0)
- kfree(clk->parent_names[i]);
+ kfree_const(clk->parent_names[i]);
kfree(clk->parent_names);
- kfree(clk->name);
+ kfree_const(clk->name);
kfree(clk);
}
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 75c8c45ef728..8539c4fd34cc 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -124,10 +124,11 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
{
const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
unsigned long alt_prate, alt_div;
+ unsigned long flags;
alt_prate = clk_get_rate(cpuclk->alt_parent);
- spin_lock(cpuclk->lock);
+ spin_lock_irqsave(cpuclk->lock, flags);
/*
* If the old parent clock speed is less than the clock speed
@@ -164,7 +165,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
cpuclk->reg_base + reg_data->core_reg);
}
- spin_unlock(cpuclk->lock);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
@@ -173,6 +174,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
{
const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
const struct rockchip_cpuclk_rate_table *rate;
+ unsigned long flags;
rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
if (!rate) {
@@ -181,7 +183,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
return -EINVAL;
}
- spin_lock(cpuclk->lock);
+ spin_lock_irqsave(cpuclk->lock, flags);
if (ndata->old_rate < ndata->new_rate)
rockchip_cpuclk_set_dividers(cpuclk, rate);
@@ -201,7 +203,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
if (ndata->old_rate > ndata->new_rate)
rockchip_cpuclk_set_dividers(cpuclk, rate);
- spin_unlock(cpuclk->lock);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index c54078960847..7eb684c50d42 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p) = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
PNAME(mux_mac_p) = { "gpll", "dpll" };
PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
+static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
+ RK2928_MODE_CON, 0, 5, 0, rk3188_pll_rates),
+ [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
+ RK2928_MODE_CON, 4, 4, 0, NULL),
+ [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
+ RK2928_MODE_CON, 8, 6, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+ [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
+ RK2928_MODE_CON, 12, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+};
+
static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
[apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
@@ -427,11 +438,11 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
/* hclk_peri gates */
GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
- GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 7, GFLAGS),
GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
- GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS),
- GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS),
GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
@@ -592,7 +603,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
- GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
+ GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(5), 14, GFLAGS),
GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
@@ -680,7 +692,8 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
- GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
@@ -735,8 +748,8 @@ static void __init rk3188_common_clk_init(struct device_node *np)
static void __init rk3066a_clk_init(struct device_node *np)
{
rk3188_common_clk_init(np);
- rockchip_clk_register_plls(rk3188_pll_clks,
- ARRAY_SIZE(rk3188_pll_clks),
+ rockchip_clk_register_plls(rk3066_pll_clks,
+ ARRAY_SIZE(rk3066_pll_clks),
RK3066_GRF_SOC_STATUS);
rockchip_clk_register_branches(rk3066a_clk_branches,
ARRAY_SIZE(rk3066a_clk_branches));
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index ac6be7c0132d..cbcddcc02475 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -145,20 +145,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
}
static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = {
- RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4),
+ RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3),
};
static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
@@ -190,7 +190,7 @@ PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
PNAME(mux_uart3_p) = { "uart3_src", "uart3_frac", "xin24m" };
PNAME(mux_uart4_p) = { "uart4_src", "uart4_frac", "xin24m" };
PNAME(mux_cif_out_p) = { "cif_src", "xin24m" };
-PNAME(mux_macref_p) = { "mac_src", "ext_gmac" };
+PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" };
PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
@@ -575,18 +575,18 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
- COMPOSITE(0, "mac_src", mux_pll_src_npll_cpll_gpll_p, 0,
+ COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(2), 5, GFLAGS),
- MUX(0, "macref", mux_macref_p, 0,
+ MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
- GATE(0, "sclk_macref_out", "macref", 0,
+ GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 3, GFLAGS),
- GATE(SCLK_MACREF, "sclk_macref", "macref", 0,
+ GATE(SCLK_MACREF, "sclk_macref", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 2, GFLAGS),
- GATE(SCLK_MAC_RX, "sclk_mac_rx", "macref", 0,
+ GATE(SCLK_MAC_RX, "sclk_mac_rx", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 0, GFLAGS),
- GATE(SCLK_MAC_TX, "sclk_mac_tx", "macref", 0,
+ GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 1, GFLAGS),
COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c062b6105d49..1c2506f68122 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -18,6 +18,9 @@ config CLKBLD_I8253
config CLKSRC_MMIO
bool
+config DIGICOLOR_TIMER
+ bool
+
config DW_APB_TIMER
bool
@@ -26,6 +29,10 @@ config DW_APB_TIMER_OF
select DW_APB_TIMER
select CLKSRC_OF
+config ROCKCHIP_TIMER
+ bool
+ select CLKSRC_OF
+
config ARMADA_370_XP_TIMER
bool
select CLKSRC_OF
@@ -232,4 +239,21 @@ config CLKSRC_MIPS_GIC
depends on MIPS_GIC
select CLKSRC_OF
+config CLKSRC_PXA
+ def_bool y if ARCH_PXA || ARCH_SA1100
+ select CLKSRC_OF if USE_OF
+ help
+ This enables OST0 support available on PXA and SA-11x0
+ platforms.
+
+config ASM9260_TIMER
+ bool "Alphascale ASM9260 timer driver"
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ select CLKSRC_OF
+ default y if MACH_ASM9260
+ help
+ This enables build of a clocksource and clockevent driver for
+ the 32-bit System Timer hardware available on a Alphascale ASM9260.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index ba9ebd868ec5..752d5c70b0ef 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -10,18 +10,20 @@ obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
+obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
+obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += time-orion.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o
-obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
+obj-$(CONFIG_ARCH_ATLAS7) += timer-atlas7.o
obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
-obj-$(CONFIG_ARCH_PXA) += pxa_timer.o
+obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
obj-$(CONFIG_ARCH_U300) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
@@ -48,3 +50,4 @@ obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
+obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 095c1774592c..a3025e7ae35f 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -15,6 +15,7 @@
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
new file mode 100644
index 000000000000..2c9c993727c8
--- /dev/null
+++ b/drivers/clocksource/asm9260_timer.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/bitops.h>
+
+#define DRIVER_NAME "asm9260-timer"
+
+/*
+ * this device provide 4 offsets for each register:
+ * 0x0 - plain read write mode
+ * 0x4 - set mode, OR logic.
+ * 0x8 - clr mode, XOR logic.
+ * 0xc - togle mode.
+ */
+#define SET_REG 4
+#define CLR_REG 8
+
+#define HW_IR 0x0000 /* RW. Interrupt */
+#define BM_IR_CR0 BIT(4)
+#define BM_IR_MR3 BIT(3)
+#define BM_IR_MR2 BIT(2)
+#define BM_IR_MR1 BIT(1)
+#define BM_IR_MR0 BIT(0)
+
+#define HW_TCR 0x0010 /* RW. Timer controller */
+/* BM_C*_RST
+ * Timer Counter and the Prescale Counter are synchronously reset on the
+ * next positive edge of PCLK. The counters remain reset until TCR[1] is
+ * returned to zero. */
+#define BM_C3_RST BIT(7)
+#define BM_C2_RST BIT(6)
+#define BM_C1_RST BIT(5)
+#define BM_C0_RST BIT(4)
+/* BM_C*_EN
+ * 1 - Timer Counter and Prescale Counter are enabled for counting
+ * 0 - counters are disabled */
+#define BM_C3_EN BIT(3)
+#define BM_C2_EN BIT(2)
+#define BM_C1_EN BIT(1)
+#define BM_C0_EN BIT(0)
+
+#define HW_DIR 0x0020 /* RW. Direction? */
+/* 00 - count up
+ * 01 - count down
+ * 10 - ?? 2^n/2 */
+#define BM_DIR_COUNT_UP 0
+#define BM_DIR_COUNT_DOWN 1
+#define BM_DIR0_SHIFT 0
+#define BM_DIR1_SHIFT 4
+#define BM_DIR2_SHIFT 8
+#define BM_DIR3_SHIFT 12
+#define BM_DIR_DEFAULT (BM_DIR_COUNT_UP << BM_DIR0_SHIFT | \
+ BM_DIR_COUNT_UP << BM_DIR1_SHIFT | \
+ BM_DIR_COUNT_UP << BM_DIR2_SHIFT | \
+ BM_DIR_COUNT_UP << BM_DIR3_SHIFT)
+
+#define HW_TC0 0x0030 /* RO. Timer counter 0 */
+/* HW_TC*. Timer counter owerflow (0xffff.ffff to 0x0000.0000) do not generate
+ * interrupt. This registers can be used to detect overflow */
+#define HW_TC1 0x0040
+#define HW_TC2 0x0050
+#define HW_TC3 0x0060
+
+#define HW_PR 0x0070 /* RW. prescaler */
+#define BM_PR_DISABLE 0
+#define HW_PC 0x0080 /* RO. Prescaler counter */
+#define HW_MCR 0x0090 /* RW. Match control */
+/* enable interrupt on match */
+#define BM_MCR_INT_EN(n) (1 << (n * 3 + 0))
+/* enable TC reset on match */
+#define BM_MCR_RES_EN(n) (1 << (n * 3 + 1))
+/* enable stop TC on match */
+#define BM_MCR_STOP_EN(n) (1 << (n * 3 + 2))
+
+#define HW_MR0 0x00a0 /* RW. Match reg */
+#define HW_MR1 0x00b0
+#define HW_MR2 0x00C0
+#define HW_MR3 0x00D0
+
+#define HW_CTCR 0x0180 /* Counter control */
+#define BM_CTCR0_SHIFT 0
+#define BM_CTCR1_SHIFT 2
+#define BM_CTCR2_SHIFT 4
+#define BM_CTCR3_SHIFT 6
+#define BM_CTCR_TM 0 /* Timer mode. Every rising PCLK edge. */
+#define BM_CTCR_DEFAULT (BM_CTCR_TM << BM_CTCR0_SHIFT | \
+ BM_CTCR_TM << BM_CTCR1_SHIFT | \
+ BM_CTCR_TM << BM_CTCR2_SHIFT | \
+ BM_CTCR_TM << BM_CTCR3_SHIFT)
+
+static struct asm9260_timer_priv {
+ void __iomem *base;
+ unsigned long ticks_per_jiffy;
+} priv;
+
+static int asm9260_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ /* configure match count for TC0 */
+ writel_relaxed(delta, priv.base + HW_MR0);
+ /* enable TC0 */
+ writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG);
+ return 0;
+}
+
+static void asm9260_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* stop timer0 */
+ writel_relaxed(BM_C0_EN, priv.base + HW_TCR + CLR_REG);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* disable reset and stop on match */
+ writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
+ priv.base + HW_MCR + CLR_REG);
+ /* configure match count for TC0 */
+ writel_relaxed(priv.ticks_per_jiffy, priv.base + HW_MR0);
+ /* enable TC0 */
+ writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* enable reset and stop on match */
+ writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
+ priv.base + HW_MCR + SET_REG);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct clock_event_device event_dev = {
+ .name = DRIVER_NAME,
+ .rating = 200,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = asm9260_timer_set_next_event,
+ .set_mode = asm9260_timer_set_mode,
+};
+
+static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ evt->event_handler(evt);
+
+ writel_relaxed(BM_IR_MR0, priv.base + HW_IR);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * Timer initialization
+ * ---------------------------------------------------------------------------
+ */
+static void __init asm9260_timer_init(struct device_node *np)
+{
+ int irq;
+ struct clk *clk;
+ int ret;
+ unsigned long rate;
+
+ priv.base = of_io_request_and_map(np, 0, np->name);
+ if (!priv.base)
+ panic("%s: unable to map resource", np->name);
+
+ clk = of_clk_get(np, 0);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ panic("Failed to enable clk!\n");
+
+ irq = irq_of_parse_and_map(np, 0);
+ ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER,
+ DRIVER_NAME, &event_dev);
+ if (ret)
+ panic("Failed to setup irq!\n");
+
+ /* set all timers for count-up */
+ writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR);
+ /* disable divider */
+ writel_relaxed(BM_PR_DISABLE, priv.base + HW_PR);
+ /* make sure all timers use every rising PCLK edge. */
+ writel_relaxed(BM_CTCR_DEFAULT, priv.base + HW_CTCR);
+ /* enable interrupt for TC0 and clean setting for all other lines */
+ writel_relaxed(BM_MCR_INT_EN(0) , priv.base + HW_MCR);
+
+ rate = clk_get_rate(clk);
+ clocksource_mmio_init(priv.base + HW_TC1, DRIVER_NAME, rate,
+ 200, 32, clocksource_mmio_readl_up);
+
+ /* Seems like we can't use counter without match register even if
+ * actions for MR are disabled. So, set MR to max value. */
+ writel_relaxed(0xffffffff, priv.base + HW_MR1);
+ /* enable TC1 */
+ writel_relaxed(BM_C1_EN, priv.base + HW_TCR + SET_REG);
+
+ priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
+ event_dev.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe);
+}
+CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
+ asm9260_timer_init);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 0595dc6c453e..f1e33d08dd83 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base)
}
static void
-kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
+kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
- void __iomem *base = IOMEM(timer_base);
int loop_limit = 4;
/*
@@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
*/
while (--loop_limit) {
- *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET);
- *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET);
- if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET))
+ *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
+ *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
+ if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
}
if (!loop_limit) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 9403061a2acc..83564c9cfdbe 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
writel_relaxed(value, reg_base + offset);
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
- switch (offset & EXYNOS4_MCT_L_MASK) {
+ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+ switch (offset & ~EXYNOS4_MCT_L_MASK) {
case MCT_L_TCON_OFFSET:
mask = 1 << 3; /* L_TCON write status */
break;
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
new file mode 100644
index 000000000000..a35993bafb20
--- /dev/null
+++ b/drivers/clocksource/rockchip_timer.c
@@ -0,0 +1,180 @@
+/*
+ * Rockchip timer support
+ *
+ * Copyright (C) Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define TIMER_NAME "rk_timer"
+
+#define TIMER_LOAD_COUNT0 0x00
+#define TIMER_LOAD_COUNT1 0x04
+#define TIMER_CONTROL_REG 0x10
+#define TIMER_INT_STATUS 0x18
+
+#define TIMER_DISABLE 0x0
+#define TIMER_ENABLE 0x1
+#define TIMER_MODE_FREE_RUNNING (0 << 1)
+#define TIMER_MODE_USER_DEFINED_COUNT (1 << 1)
+#define TIMER_INT_UNMASK (1 << 2)
+
+struct bc_timer {
+ struct clock_event_device ce;
+ void __iomem *base;
+ u32 freq;
+};
+
+static struct bc_timer bc_timer;
+
+static inline struct bc_timer *rk_timer(struct clock_event_device *ce)
+{
+ return container_of(ce, struct bc_timer, ce);
+}
+
+static inline void __iomem *rk_base(struct clock_event_device *ce)
+{
+ return rk_timer(ce)->base;
+}
+
+static inline void rk_timer_disable(struct clock_event_device *ce)
+{
+ writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG);
+ dsb();
+}
+
+static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags)
+{
+ writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags,
+ rk_base(ce) + TIMER_CONTROL_REG);
+ dsb();
+}
+
+static void rk_timer_update_counter(unsigned long cycles,
+ struct clock_event_device *ce)
+{
+ writel_relaxed(cycles, rk_base(ce) + TIMER_LOAD_COUNT0);
+ writel_relaxed(0, rk_base(ce) + TIMER_LOAD_COUNT1);
+ dsb();
+}
+
+static void rk_timer_interrupt_clear(struct clock_event_device *ce)
+{
+ writel_relaxed(1, rk_base(ce) + TIMER_INT_STATUS);
+ dsb();
+}
+
+static inline int rk_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *ce)
+{
+ rk_timer_disable(ce);
+ rk_timer_update_counter(cycles, ce);
+ rk_timer_enable(ce, TIMER_MODE_USER_DEFINED_COUNT);
+ return 0;
+}
+
+static inline void rk_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *ce)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ rk_timer_disable(ce);
+ rk_timer_update_counter(rk_timer(ce)->freq / HZ - 1, ce);
+ rk_timer_enable(ce, TIMER_MODE_FREE_RUNNING);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ rk_timer_disable(ce);
+ break;
+ }
+}
+
+static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = dev_id;
+
+ rk_timer_interrupt_clear(ce);
+
+ if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
+ rk_timer_disable(ce);
+
+ ce->event_handler(ce);
+
+ return IRQ_HANDLED;
+}
+
+static void __init rk_timer_init(struct device_node *np)
+{
+ struct clock_event_device *ce = &bc_timer.ce;
+ struct clk *timer_clk;
+ struct clk *pclk;
+ int ret, irq;
+
+ bc_timer.base = of_iomap(np, 0);
+ if (!bc_timer.base) {
+ pr_err("Failed to get base address for '%s'\n", TIMER_NAME);
+ return;
+ }
+
+ pclk = of_clk_get_by_name(np, "pclk");
+ if (IS_ERR(pclk)) {
+ pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
+ return;
+ }
+
+ if (clk_prepare_enable(pclk)) {
+ pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
+ return;
+ }
+
+ timer_clk = of_clk_get_by_name(np, "timer");
+ if (IS_ERR(timer_clk)) {
+ pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
+ return;
+ }
+
+ if (clk_prepare_enable(timer_clk)) {
+ pr_err("Failed to enable timer clock\n");
+ return;
+ }
+
+ bc_timer.freq = clk_get_rate(timer_clk);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq == NO_IRQ) {
+ pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
+ return;
+ }
+
+ ce->name = TIMER_NAME;
+ ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ce->set_next_event = rk_timer_set_next_event;
+ ce->set_mode = rk_timer_set_mode;
+ ce->irq = irq;
+ ce->cpumask = cpumask_of(0);
+ ce->rating = 250;
+
+ rk_timer_interrupt_clear(ce);
+ rk_timer_disable(ce);
+
+ ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce);
+ if (ret) {
+ pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret);
+ return;
+ }
+
+ clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
+}
+CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 0f665b8f2461..f150ca82bfaf 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
- ced->cpumask = cpumask_of(0);
+ ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
ced->suspend = sh_tmu_clock_event_suspend;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-atlas7.c
index 361a789d4bee..60f9de3438b0 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -38,7 +38,7 @@
#define SIRFSOC_TIMER_REG_CNT 6
-static unsigned long marco_timer_rate;
+static unsigned long atlas7_timer_rate;
static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
SIRFSOC_TIMER_WATCHDOG_EN,
@@ -195,7 +195,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
ce->rating = 200;
ce->set_mode = sirfsoc_timer_set_mode;
ce->set_next_event = sirfsoc_timer_set_next_event;
- clockevents_calc_mult_shift(ce, marco_timer_rate, 60);
+ clockevents_calc_mult_shift(ce, atlas7_timer_rate, 60);
ce->max_delta_ns = clockevent_delta2ns(-2, ce);
ce->min_delta_ns = clockevent_delta2ns(2, ce);
ce->cpumask = cpumask_of(cpu);
@@ -255,9 +255,8 @@ static void __init sirfsoc_clockevent_init(void)
}
/* initialize the kernel jiffy timer source */
-static void __init sirfsoc_marco_timer_init(struct device_node *np)
+static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
{
- u32 timer_div;
struct clk *clk;
clk = of_clk_get(np, 0);
@@ -265,7 +264,7 @@ static void __init sirfsoc_marco_timer_init(struct device_node *np)
BUG_ON(clk_prepare_enable(clk));
- marco_timer_rate = clk_get_rate(clk);
+ atlas7_timer_rate = clk_get_rate(clk);
/* timer dividers: 0, not divided */
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
@@ -283,7 +282,7 @@ static void __init sirfsoc_marco_timer_init(struct device_node *np)
/* Clear all interrupts */
writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
- BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, marco_timer_rate));
+ BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
sirfsoc_clockevent_init();
}
@@ -302,6 +301,6 @@ static void __init sirfsoc_of_timer_init(struct device_node *np)
if (!sirfsoc_timer1_irq.irq)
panic("No irq passed for timer1 via DT\n");
- sirfsoc_marco_timer_init(np);
+ sirfsoc_atlas7_timer_init(np);
}
-CLOCKSOURCE_OF_DECLARE(sirfsoc_marco_timer, "sirf,marco-tick", sirfsoc_of_timer_init );
+CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
new file mode 100644
index 000000000000..7f8388cfa810
--- /dev/null
+++ b/drivers/clocksource/timer-digicolor.c
@@ -0,0 +1,199 @@
+/*
+ * Conexant Digicolor timer driver
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2014 Paradox Innovation Ltd.
+ *
+ * Based on:
+ * Allwinner SoCs hstimer driver
+ *
+ * Copyright (C) 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * Conexant Digicolor SoCs have 8 configurable timers, named from "Timer A" to
+ * "Timer H". Timer A is the only one with watchdog support, so it is dedicated
+ * to the watchdog driver. This driver uses Timer B for sched_clock(), and
+ * Timer C for clockevents.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+enum {
+ TIMER_A,
+ TIMER_B,
+ TIMER_C,
+ TIMER_D,
+ TIMER_E,
+ TIMER_F,
+ TIMER_G,
+ TIMER_H,
+};
+
+#define CONTROL(t) ((t)*8)
+#define COUNT(t) ((t)*8 + 4)
+
+#define CONTROL_DISABLE 0
+#define CONTROL_ENABLE BIT(0)
+#define CONTROL_MODE(m) ((m) << 4)
+#define CONTROL_MODE_ONESHOT CONTROL_MODE(1)
+#define CONTROL_MODE_PERIODIC CONTROL_MODE(2)
+
+struct digicolor_timer {
+ struct clock_event_device ce;
+ void __iomem *base;
+ u32 ticks_per_jiffy;
+ int timer_id; /* one of TIMER_* */
+};
+
+struct digicolor_timer *dc_timer(struct clock_event_device *ce)
+{
+ return container_of(ce, struct digicolor_timer, ce);
+}
+
+static inline void dc_timer_disable(struct clock_event_device *ce)
+{
+ struct digicolor_timer *dt = dc_timer(ce);
+ writeb(CONTROL_DISABLE, dt->base + CONTROL(dt->timer_id));
+}
+
+static inline void dc_timer_enable(struct clock_event_device *ce, u32 mode)
+{
+ struct digicolor_timer *dt = dc_timer(ce);
+ writeb(CONTROL_ENABLE | mode, dt->base + CONTROL(dt->timer_id));
+}
+
+static inline void dc_timer_set_count(struct clock_event_device *ce,
+ unsigned long count)
+{
+ struct digicolor_timer *dt = dc_timer(ce);
+ writel(count, dt->base + COUNT(dt->timer_id));
+}
+
+static void digicolor_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *ce)
+{
+ struct digicolor_timer *dt = dc_timer(ce);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ dc_timer_disable(ce);
+ dc_timer_set_count(ce, dt->ticks_per_jiffy);
+ dc_timer_enable(ce, CONTROL_MODE_PERIODIC);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ dc_timer_disable(ce);
+ dc_timer_enable(ce, CONTROL_MODE_ONESHOT);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ dc_timer_disable(ce);
+ break;
+ }
+}
+
+static int digicolor_clkevt_next_event(unsigned long evt,
+ struct clock_event_device *ce)
+{
+ dc_timer_disable(ce);
+ dc_timer_set_count(ce, evt);
+ dc_timer_enable(ce, CONTROL_MODE_ONESHOT);
+
+ return 0;
+}
+
+static struct digicolor_timer dc_timer_dev = {
+ .ce = {
+ .name = "digicolor_tick",
+ .rating = 340,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = digicolor_clkevt_mode,
+ .set_next_event = digicolor_clkevt_next_event,
+ },
+ .timer_id = TIMER_C,
+};
+
+static irqreturn_t digicolor_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static u64 digicolor_timer_sched_read(void)
+{
+ return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
+}
+
+static void __init digicolor_timer_init(struct device_node *node)
+{
+ unsigned long rate;
+ struct clk *clk;
+ int ret, irq;
+
+ /*
+ * timer registers are shared with the watchdog timer;
+ * don't map exclusively
+ */
+ dc_timer_dev.base = of_iomap(node, 0);
+ if (!dc_timer_dev.base) {
+ pr_err("Can't map registers");
+ return;
+ }
+
+ irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return;
+ }
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Can't get timer clock");
+ return;
+ }
+ clk_prepare_enable(clk);
+ rate = clk_get_rate(clk);
+ dc_timer_dev.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+
+ writeb(CONTROL_DISABLE, dc_timer_dev.base + CONTROL(TIMER_B));
+ writel(UINT_MAX, dc_timer_dev.base + COUNT(TIMER_B));
+ writeb(CONTROL_ENABLE, dc_timer_dev.base + CONTROL(TIMER_B));
+
+ sched_clock_register(digicolor_timer_sched_read, 32, rate);
+ clocksource_mmio_init(dc_timer_dev.base + COUNT(TIMER_B), node->name,
+ rate, 340, 32, clocksource_mmio_readl_down);
+
+ ret = request_irq(irq, digicolor_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC",
+ &dc_timer_dev.ce);
+ if (ret)
+ pr_warn("request of timer irq %d failed (%d)\n", irq, ret);
+
+ dc_timer_dev.ce.cpumask = cpu_possible_mask;
+ dc_timer_dev.ce.irq = irq;
+
+ clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff);
+}
+CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
+ digicolor_timer_init);
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c
index 2798e7492234..0a26d3dde6c0 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/versatile.c
@@ -36,5 +36,7 @@ static void __init versatile_sched_clock_init(struct device_node *node)
sched_clock_register(versatile_sys_24mhz_read, 32, 24000000);
}
-CLOCKSOURCE_OF_DECLARE(versatile, "arm,vexpress-sysreg",
+CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
+ versatile_sched_clock_init);
+CLOCKSOURCE_OF_DECLARE(versatile, "arm,versatile-sysreg",
versatile_sched_clock_init);
diff --git a/drivers/coresight/coresight-etb10.c b/drivers/coresight/coresight-etb10.c
index c922d4aded8a..c9acd406f0d0 100644
--- a/drivers/coresight/coresight-etb10.c
+++ b/drivers/coresight/coresight-etb10.c
@@ -454,7 +454,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
return ret;
- drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
+ drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
clk_disable_unprepare(drvdata->clk);
if (drvdata->buffer_depth < 0)
@@ -521,17 +521,7 @@ static struct amba_driver etb_driver = {
.id_table = etb_ids,
};
-static int __init etb_init(void)
-{
- return amba_driver_register(&etb_driver);
-}
-module_init(etb_init);
-
-static void __exit etb_exit(void)
-{
- amba_driver_unregister(&etb_driver);
-}
-module_exit(etb_exit);
+module_amba_driver(etb_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
diff --git a/drivers/coresight/coresight-etm3x.c b/drivers/coresight/coresight-etm3x.c
index d9e3ed6aa857..c965f5724abd 100644
--- a/drivers/coresight/coresight-etm3x.c
+++ b/drivers/coresight/coresight-etm3x.c
@@ -34,14 +34,8 @@
#include "coresight-etm.h"
-#ifdef CONFIG_CORESIGHT_SOURCE_ETM_DEFAULT_ENABLE
-static int boot_enable = 1;
-#else
static int boot_enable;
-#endif
-module_param_named(
- boot_enable, boot_enable, int, S_IRUGO
-);
+module_param_named(boot_enable, boot_enable, int, S_IRUGO);
/* The number of ETM/PTM currently registered */
static int etm_count;
@@ -573,7 +567,8 @@ static ssize_t mode_store(struct device *dev,
if (drvdata->mode & ETM_MODE_STALL) {
if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
dev_warn(drvdata->dev, "stall mode not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
drvdata->ctrl |= ETMCR_STALL_MODE;
} else
@@ -582,7 +577,8 @@ static ssize_t mode_store(struct device *dev,
if (drvdata->mode & ETM_MODE_TIMESTAMP) {
if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
dev_warn(drvdata->dev, "timestamp not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
} else
@@ -595,6 +591,10 @@ static ssize_t mode_store(struct device *dev,
spin_unlock(&drvdata->spinlock);
return size;
+
+err_unlock:
+ spin_unlock(&drvdata->spinlock);
+ return ret;
}
static DEVICE_ATTR_RW(mode);
@@ -1743,7 +1743,11 @@ static void etm_init_arch_data(void *info)
static void etm_init_default_data(struct etm_drvdata *drvdata)
{
- static int etm3x_traceid;
+ /*
+ * A trace ID of value 0 is invalid, so let's start at some
+ * random value that fits in 7 bits and will be just as good.
+ */
+ static int etm3x_traceid = 0x10;
u32 flags = (1 << 0 | /* instruction execute*/
3 << 3 | /* ARM instruction */
diff --git a/drivers/coresight/coresight-funnel.c b/drivers/coresight/coresight-funnel.c
index 2108edffe1f4..3db36f70b666 100644
--- a/drivers/coresight/coresight-funnel.c
+++ b/drivers/coresight/coresight-funnel.c
@@ -252,17 +252,7 @@ static struct amba_driver funnel_driver = {
.id_table = funnel_ids,
};
-static int __init funnel_init(void)
-{
- return amba_driver_register(&funnel_driver);
-}
-module_init(funnel_init);
-
-static void __exit funnel_exit(void)
-{
- amba_driver_unregister(&funnel_driver);
-}
-module_exit(funnel_exit);
+module_amba_driver(funnel_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight Funnel driver");
diff --git a/drivers/coresight/coresight-priv.h b/drivers/coresight/coresight-priv.h
index 7b3372fca4f6..62fcd98cc7cf 100644
--- a/drivers/coresight/coresight-priv.h
+++ b/drivers/coresight/coresight-priv.h
@@ -57,7 +57,7 @@ extern int etm_readl_cp14(u32 off, unsigned int *val);
extern int etm_writel_cp14(u32 off, u32 val);
#else
static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
-static inline int etm_writel_cp14(u32 val, u32 off) { return 0; }
+static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
#endif
#endif
diff --git a/drivers/coresight/coresight-replicator.c b/drivers/coresight/coresight-replicator.c
index a2dfcf903551..cdf05537d574 100644
--- a/drivers/coresight/coresight-replicator.c
+++ b/drivers/coresight/coresight-replicator.c
@@ -87,7 +87,7 @@ static int replicator_probe(struct platform_device *pdev)
return -ENOMEM;
desc->type = CORESIGHT_DEV_TYPE_LINK;
- desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
+ desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc->ops = &replicator_cs_ops;
desc->pdata = pdev->dev.platform_data;
desc->dev = &pdev->dev;
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
index ce2c293f1707..3ff232f9ddf7 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/coresight/coresight-tmc.c
@@ -760,17 +760,7 @@ static struct amba_driver tmc_driver = {
.id_table = tmc_ids,
};
-static int __init tmc_init(void)
-{
- return amba_driver_register(&tmc_driver);
-}
-module_init(tmc_init);
-
-static void __exit tmc_exit(void)
-{
- amba_driver_unregister(&tmc_driver);
-}
-module_exit(tmc_exit);
+module_amba_driver(tmc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/coresight/coresight-tpiu.c
index ae101082791a..3b33af2416bb 100644
--- a/drivers/coresight/coresight-tpiu.c
+++ b/drivers/coresight/coresight-tpiu.c
@@ -201,17 +201,7 @@ static struct amba_driver tpiu_driver = {
.id_table = tpiu_ids,
};
-static int __init tpiu_init(void)
-{
- return amba_driver_register(&tpiu_driver);
-}
-module_init(tpiu_init);
-
-static void __exit tpiu_exit(void)
-{
- amba_driver_unregister(&tpiu_driver);
-}
-module_exit(tpiu_exit);
+module_amba_driver(tpiu_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
index 6e0181f84425..c5def9382357 100644
--- a/drivers/coresight/coresight.c
+++ b/drivers/coresight/coresight.c
@@ -498,17 +498,18 @@ static int coresight_orphan_match(struct device *dev, void *data)
* Circle throuch all the connection of that component. If we find
* an orphan connection whose name matches @csdev, link it.
*/
- for (i = 0; i < i_csdev->nr_outport; i++) {
+ for (i = 0; i < i_csdev->nr_outport; i++) {
conn = &i_csdev->conns[i];
/* We have found at least one orphan connection */
if (conn->child_dev == NULL) {
/* Does it match this newly added device? */
- if (!strcmp(dev_name(&csdev->dev), conn->child_name))
+ if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
conn->child_dev = csdev;
- } else {
- /* Too bad, this component still has an orphan */
- still_orphan = true;
+ } else {
+ /* This component still has an orphan */
+ still_orphan = true;
+ }
}
}
diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c
index 5030c0734508..c3efa418a86d 100644
--- a/drivers/coresight/of_coresight.c
+++ b/drivers/coresight/of_coresight.c
@@ -93,7 +93,7 @@ static int of_coresight_alloc_memory(struct device *dev,
if (!pdata->outports)
return -ENOMEM;
- /* Children connected to this component via @outport */
+ /* Children connected to this component via @outports */
pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
sizeof(*pdata->child_names),
GFP_KERNEL);
@@ -117,7 +117,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
struct coresight_platform_data *pdata;
struct of_endpoint endpoint, rendpoint;
struct device *rdev;
- struct device_node *cpu;
+ struct device_node *dn;
struct device_node *ep = NULL;
struct device_node *rparent = NULL;
struct device_node *rport = NULL;
@@ -126,7 +126,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
if (!pdata)
return ERR_PTR(-ENOMEM);
- /* Use device name as debugfs handle */
+ /* Use device name as sysfs handle */
pdata->name = dev_name(dev);
/* Get the number of input and output port for this component */
@@ -174,7 +174,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
continue;
rdev = of_coresight_get_endpoint_device(rparent);
- if (!dev)
+ if (!rdev)
continue;
pdata->child_names[i] = dev_name(rdev);
@@ -186,14 +186,16 @@ struct coresight_platform_data *of_get_coresight_platform_data(
/* Affinity defaults to CPU0 */
pdata->cpu = 0;
- cpu = of_parse_phandle(node, "cpu", 0);
- if (cpu) {
- const u32 *mpidr;
+ dn = of_parse_phandle(node, "cpu", 0);
+ if (dn) {
+ const u32 *cell;
int len, index;
+ u64 hwid;
- mpidr = of_get_property(cpu, "reg", &len);
- if (mpidr && len == 4) {
- index = get_logical_index(be32_to_cpup(mpidr));
+ cell = of_get_property(dn, "reg", &len);
+ if (cell) {
+ hwid = of_read_number(cell, of_n_addr_cells(dn));
+ index = get_logical_index(hwid);
if (index != -EINVAL)
pdata->cpu = index;
}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 29b2ef5a68b9..a171fef2c2b6 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -2,6 +2,7 @@ menu "CPU Frequency scaling"
config CPU_FREQ
bool "CPU Frequency scaling"
+ select SRCU
help
CPU Frequency scaling allows you to change the clock speed of
CPUs on the fly. This is a nice method to save power, because
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 89ae88f91895..c59bdcb83217 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -57,6 +57,16 @@ config X86_ACPI_CPUFREQ_CPB
By enabling this option the acpi_cpufreq driver provides the old
entry in addition to the new boost ones, for compatibility reasons.
+config X86_SFI_CPUFREQ
+ tristate "SFI Performance-States driver"
+ depends on X86_INTEL_MID && SFI
+ help
+ This adds a CPUFreq driver for some Silvermont based Intel Atom
+ architectures like Z34xx and Z35xx which enumerate processor
+ performance states through SFI.
+
+ If in doubt, say N.
+
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
depends on MELAN
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index b3ca7b0b2c33..8b4220ac888b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
+obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
##################################################################################
# ARM SoC drivers
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index fde97d6e31d6..bab67db54b7e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -320,8 +320,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- if (priv->cdev)
- cpufreq_cooling_unregister(priv->cdev);
+ cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
of_free_opp_table(priv->cpu_dev);
clk_put(policy->clk);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 46bed4f81cde..28e59a48b35f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -27,9 +27,21 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
#include <linux/tick.h>
#include <trace/events/power.h>
+/* Macros to iterate over lists */
+/* Iterate over online CPUs policies */
+static LIST_HEAD(cpufreq_policy_list);
+#define for_each_policy(__policy) \
+ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
+
+/* Iterate over governors */
+static LIST_HEAD(cpufreq_governor_list);
+#define for_each_governor(__governor) \
+ list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
+
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
@@ -40,7 +52,6 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
static DEFINE_RWLOCK(cpufreq_driver_lock);
DEFINE_MUTEX(cpufreq_governor_lock);
-static LIST_HEAD(cpufreq_policy_list);
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
@@ -62,7 +73,7 @@ static DECLARE_RWSEM(cpufreq_rwsem);
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event);
-static unsigned int __cpufreq_get(unsigned int cpu);
+static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static void handle_update(struct work_struct *work);
/**
@@ -93,7 +104,6 @@ void disable_cpufreq(void)
{
off = 1;
}
-static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
@@ -202,7 +212,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
struct cpufreq_policy *policy = NULL;
unsigned long flags;
- if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
+ if (cpu >= nr_cpu_ids)
return NULL;
if (!down_read_trylock(&cpufreq_rwsem))
@@ -229,9 +239,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
- if (cpufreq_disabled())
- return;
-
kobject_put(&policy->kobj);
up_read(&cpufreq_rwsem);
}
@@ -249,12 +256,12 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
* systems as each CPU might be scaled differently. So, use the arch
* per-CPU loops_per_jiffy value wherever possible.
*/
-#ifndef CONFIG_SMP
-static unsigned long l_p_j_ref;
-static unsigned int l_p_j_ref_freq;
-
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
+#ifndef CONFIG_SMP
+ static unsigned long l_p_j_ref;
+ static unsigned int l_p_j_ref_freq;
+
if (ci->flags & CPUFREQ_CONST_LOOPS)
return;
@@ -270,13 +277,8 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
loops_per_jiffy, ci->new);
}
-}
-#else
-static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
-{
- return;
-}
#endif
+}
static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
@@ -432,11 +434,11 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
}
define_one_global_rw(boost);
-static struct cpufreq_governor *__find_governor(const char *str_governor)
+static struct cpufreq_governor *find_governor(const char *str_governor)
{
struct cpufreq_governor *t;
- list_for_each_entry(t, &cpufreq_governor_list, governor_list)
+ for_each_governor(t)
if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
return t;
@@ -463,12 +465,12 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
}
- } else if (has_target()) {
+ } else {
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
- t = __find_governor(str_governor);
+ t = find_governor(str_governor);
if (t == NULL) {
int ret;
@@ -478,7 +480,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_lock(&cpufreq_governor_mutex);
if (ret == 0)
- t = __find_governor(str_governor);
+ t = find_governor(str_governor);
}
if (t != NULL) {
@@ -513,8 +515,7 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
-static ssize_t show_scaling_cur_freq(
- struct cpufreq_policy *policy, char *buf)
+static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
@@ -563,7 +564,7 @@ store_one(scaling_max_freq, max);
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
- unsigned int cur_freq = __cpufreq_get(policy->cpu);
+ unsigned int cur_freq = __cpufreq_get(policy);
if (!cur_freq)
return sprintf(buf, "<unknown>");
return sprintf(buf, "%u\n", cur_freq);
@@ -639,7 +640,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
goto out;
}
- list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
+ for_each_governor(t) {
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
goto out;
@@ -902,7 +903,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
- while ((drv_attr) && (*drv_attr)) {
+ while (drv_attr && *drv_attr) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
if (ret)
return ret;
@@ -936,7 +937,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
memcpy(&new_policy, policy, sizeof(*policy));
/* Update governor of new_policy to the governor used before hotplug */
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+ gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
if (gov)
pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
@@ -958,7 +959,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
}
}
-#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
unsigned int cpu, struct device *dev)
{
@@ -996,7 +996,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
}
-#endif
static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
{
@@ -1033,6 +1032,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
+ init_completion(&policy->kobj_unregister);
+ INIT_WORK(&policy->update, handle_update);
return policy;
@@ -1091,15 +1092,9 @@ static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
}
down_write(&policy->rwsem);
-
- policy->last_cpu = policy->cpu;
policy->cpu = cpu;
-
up_write(&policy->rwsem);
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_UPDATE_POLICY_CPU, policy);
-
return 0;
}
@@ -1110,41 +1105,32 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
struct cpufreq_policy *policy;
unsigned long flags;
bool recover_policy = cpufreq_suspended;
-#ifdef CONFIG_HOTPLUG_CPU
- struct cpufreq_policy *tpolicy;
-#endif
if (cpu_is_offline(cpu))
return 0;
pr_debug("adding CPU %u\n", cpu);
-#ifdef CONFIG_SMP
/* check whether a different CPU already registered this
* CPU because it is in the same boat. */
- policy = cpufreq_cpu_get(cpu);
- if (unlikely(policy)) {
- cpufreq_cpu_put(policy);
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(policy))
return 0;
- }
-#endif
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
-#ifdef CONFIG_HOTPLUG_CPU
/* Check if this cpu was hot-unplugged earlier and has siblings */
read_lock_irqsave(&cpufreq_driver_lock, flags);
- list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
- if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
+ for_each_policy(policy) {
+ if (cpumask_test_cpu(cpu, policy->related_cpus)) {
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
+ ret = cpufreq_add_policy_cpu(policy, cpu, dev);
up_read(&cpufreq_rwsem);
return ret;
}
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
/*
* Restore the saved policy when doing light-weight init and fall back
@@ -1171,9 +1157,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
cpumask_copy(policy->cpus, cpumask_of(cpu));
- init_completion(&policy->kobj_unregister);
- INIT_WORK(&policy->update, handle_update);
-
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
@@ -1371,11 +1354,10 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
pr_err("%s: Failed to stop governor\n", __func__);
return ret;
}
- }
- if (!cpufreq_driver->setpolicy)
strncpy(per_cpu(cpufreq_cpu_governor, cpu),
policy->governor->name, CPUFREQ_NAME_LEN);
+ }
down_read(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
@@ -1416,9 +1398,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
unsigned long flags;
struct cpufreq_policy *policy;
- read_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
@@ -1473,7 +1456,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
}
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
return 0;
}
@@ -1510,30 +1492,23 @@ static void handle_update(struct work_struct *work)
/**
* cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
* in deep trouble.
- * @cpu: cpu number
- * @old_freq: CPU frequency the kernel thinks the CPU runs at
+ * @policy: policy managing CPUs
* @new_freq: CPU frequency the CPU actually runs at
*
* We adjust to current frequency first, and need to clean up later.
* So either call to cpufreq_update_policy() or schedule handle_update()).
*/
-static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
+static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
unsigned int new_freq)
{
- struct cpufreq_policy *policy;
struct cpufreq_freqs freqs;
- unsigned long flags;
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
- old_freq, new_freq);
+ policy->cur, new_freq);
- freqs.old = old_freq;
+ freqs.old = policy->cur;
freqs.new = new_freq;
- read_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = per_cpu(cpufreq_cpu_data, cpu);
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
cpufreq_freq_transition_begin(policy, &freqs);
cpufreq_freq_transition_end(policy, &freqs, 0);
}
@@ -1583,22 +1558,21 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
-static unsigned int __cpufreq_get(unsigned int cpu)
+static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
unsigned int ret_freq = 0;
if (!cpufreq_driver->get)
return ret_freq;
- ret_freq = cpufreq_driver->get(cpu);
+ ret_freq = cpufreq_driver->get(policy->cpu);
if (ret_freq && policy->cur &&
!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
/* verify no discrepancy between actual and
saved value exists */
if (unlikely(ret_freq != policy->cur)) {
- cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
+ cpufreq_out_of_sync(policy, ret_freq);
schedule_work(&policy->update);
}
}
@@ -1619,7 +1593,7 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
- ret_freq = __cpufreq_get(cpu);
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
@@ -1682,7 +1656,7 @@ void cpufreq_suspend(void)
pr_debug("%s: Suspending Governors\n", __func__);
- list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ for_each_policy(policy) {
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy);
@@ -1716,7 +1690,7 @@ void cpufreq_resume(void)
pr_debug("%s: Resuming Governors\n", __func__);
- list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ for_each_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
@@ -2006,10 +1980,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
-/*
- * when "event" is CPUFREQ_GOV_LIMITS
- */
-
static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -2107,7 +2077,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
governor->initialized = 0;
err = -EBUSY;
- if (__find_governor(governor->name) == NULL) {
+ if (!find_governor(governor->name)) {
err = 0;
list_add(&governor->governor_list, &cpufreq_governor_list);
}
@@ -2307,8 +2277,7 @@ int cpufreq_update_policy(unsigned int cpu)
policy->cur = new_policy.cur;
} else {
if (policy->cur != new_policy.cur && has_target())
- cpufreq_out_of_sync(cpu, policy->cur,
- new_policy.cur);
+ cpufreq_out_of_sync(policy, new_policy.cur);
}
}
@@ -2364,7 +2333,7 @@ static int cpufreq_boost_set_sw(int state)
struct cpufreq_policy *policy;
int ret = -EINVAL;
- list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ for_each_policy(policy) {
freq_table = cpufreq_frequency_get_table(policy->cpu);
if (freq_table) {
ret = cpufreq_frequency_table_cpuinfo(policy,
@@ -2454,9 +2423,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("trying to register driver %s\n", driver_data->name);
- if (driver_data->setpolicy)
- driver_data->flags |= CPUFREQ_CONST_LOOPS;
-
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2465,6 +2431,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ if (driver_data->setpolicy)
+ driver_data->flags |= CPUFREQ_CONST_LOOPS;
+
if (cpufreq_boost_supported()) {
/*
* Check if driver provides function to enable boost -
@@ -2485,23 +2454,12 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret)
goto err_boost_unreg;
- if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
- int i;
- ret = -ENODEV;
-
- /* check for at least one working CPU */
- for (i = 0; i < nr_cpu_ids; i++)
- if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
- ret = 0;
- break;
- }
-
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
+ list_empty(&cpufreq_policy_list)) {
/* if all ->init() calls failed, unregister */
- if (ret) {
- pr_debug("no CPU initialized for driver %s\n",
- driver_data->name);
- goto err_if_unreg;
- }
+ pr_debug("%s: No CPU initialized for driver %s\n", __func__,
+ driver_data->name);
+ goto err_if_unreg;
}
register_hotcpu_notifier(&cpufreq_cpu_notifier);
@@ -2556,6 +2514,14 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
+/*
+ * Stop cpufreq at shutdown to make sure it isn't holding any locks
+ * or mutexes when secondary CPUs are halted.
+ */
+static struct syscore_ops cpufreq_syscore_ops = {
+ .shutdown = cpufreq_suspend,
+};
+
static int __init cpufreq_core_init(void)
{
if (cpufreq_disabled())
@@ -2564,6 +2530,8 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
+ register_syscore_ops(&cpufreq_syscore_ops);
+
return 0;
}
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 0cd9b4dcef99..5e370a30a964 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -18,7 +18,6 @@
static spinlock_t cpufreq_stats_lock;
struct cpufreq_stats {
- unsigned int cpu;
unsigned int total_trans;
unsigned long long last_time;
unsigned int max_state;
@@ -31,50 +30,33 @@ struct cpufreq_stats {
#endif
};
-static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
-
-struct cpufreq_stats_attribute {
- struct attribute attr;
- ssize_t(*show) (struct cpufreq_stats *, char *);
-};
-
-static int cpufreq_stats_update(unsigned int cpu)
+static int cpufreq_stats_update(struct cpufreq_stats *stats)
{
- struct cpufreq_stats *stat;
- unsigned long long cur_time;
+ unsigned long long cur_time = get_jiffies_64();
- cur_time = get_jiffies_64();
spin_lock(&cpufreq_stats_lock);
- stat = per_cpu(cpufreq_stats_table, cpu);
- if (stat->time_in_state)
- stat->time_in_state[stat->last_index] +=
- cur_time - stat->last_time;
- stat->last_time = cur_time;
+ stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
+ stats->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
- struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
- if (!stat)
- return 0;
- return sprintf(buf, "%d\n",
- per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
+ return sprintf(buf, "%d\n", policy->stats->total_trans);
}
static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
{
+ struct cpufreq_stats *stats = policy->stats;
ssize_t len = 0;
int i;
- struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
- if (!stat)
- return 0;
- cpufreq_stats_update(stat->cpu);
- for (i = 0; i < stat->state_num; i++) {
- len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
+
+ cpufreq_stats_update(stats);
+ for (i = 0; i < stats->state_num; i++) {
+ len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
(unsigned long long)
- jiffies_64_to_clock_t(stat->time_in_state[i]));
+ jiffies_64_to_clock_t(stats->time_in_state[i]));
}
return len;
}
@@ -82,38 +64,35 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
+ struct cpufreq_stats *stats = policy->stats;
ssize_t len = 0;
int i, j;
- struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
- if (!stat)
- return 0;
- cpufreq_stats_update(stat->cpu);
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
- for (i = 0; i < stat->state_num; i++) {
+ for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
- stat->freq_table[i]);
+ stats->freq_table[i]);
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
- for (i = 0; i < stat->state_num; i++) {
+ for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
- stat->freq_table[i]);
+ stats->freq_table[i]);
- for (j = 0; j < stat->state_num; j++) {
+ for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
- stat->trans_table[i*stat->max_state+j]);
+ stats->trans_table[i*stats->max_state+j]);
}
if (len >= PAGE_SIZE)
break;
@@ -142,28 +121,29 @@ static struct attribute_group stats_attr_group = {
.name = "stats"
};
-static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
+static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
{
int index;
- for (index = 0; index < stat->max_state; index++)
- if (stat->freq_table[index] == freq)
+ for (index = 0; index < stats->max_state; index++)
+ if (stats->freq_table[index] == freq)
return index;
return -1;
}
static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
{
- struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
+ struct cpufreq_stats *stats = policy->stats;
- if (!stat)
+ /* Already freed */
+ if (!stats)
return;
- pr_debug("%s: Free stat table\n", __func__);
+ pr_debug("%s: Free stats table\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
- kfree(stat->time_in_state);
- kfree(stat);
- per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
+ kfree(stats->time_in_state);
+ kfree(stats);
+ policy->stats = NULL;
}
static void cpufreq_stats_free_table(unsigned int cpu)
@@ -174,37 +154,33 @@ static void cpufreq_stats_free_table(unsigned int cpu)
if (!policy)
return;
- if (cpufreq_frequency_get_table(policy->cpu))
- __cpufreq_stats_free_table(policy);
+ __cpufreq_stats_free_table(policy);
cpufreq_cpu_put(policy);
}
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
- unsigned int i, count = 0, ret = 0;
- struct cpufreq_stats *stat;
+ unsigned int i = 0, count = 0, ret = -ENOMEM;
+ struct cpufreq_stats *stats;
unsigned int alloc_size;
unsigned int cpu = policy->cpu;
struct cpufreq_frequency_table *pos, *table;
+ /* We need cpufreq table for creating stats table */
table = cpufreq_frequency_get_table(cpu);
if (unlikely(!table))
return 0;
- if (per_cpu(cpufreq_stats_table, cpu))
- return -EBUSY;
- stat = kzalloc(sizeof(*stat), GFP_KERNEL);
- if ((stat) == NULL)
- return -ENOMEM;
-
- ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
- if (ret)
- goto error_out;
+ /* stats already initialized */
+ if (policy->stats)
+ return -EEXIST;
- stat->cpu = cpu;
- per_cpu(cpufreq_stats_table, cpu) = stat;
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+ /* Find total allocation size */
cpufreq_for_each_valid_entry(pos, table)
count++;
@@ -213,32 +189,40 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
alloc_size += count * count * sizeof(int);
#endif
- stat->max_state = count;
- stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
- if (!stat->time_in_state) {
- ret = -ENOMEM;
- goto error_alloc;
- }
- stat->freq_table = (unsigned int *)(stat->time_in_state + count);
+
+ /* Allocate memory for time_in_state/freq_table/trans_table in one go */
+ stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
+ if (!stats->time_in_state)
+ goto free_stat;
+
+ stats->freq_table = (unsigned int *)(stats->time_in_state + count);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- stat->trans_table = stat->freq_table + count;
+ stats->trans_table = stats->freq_table + count;
#endif
- i = 0;
+
+ stats->max_state = count;
+
+ /* Find valid-unique entries */
cpufreq_for_each_valid_entry(pos, table)
- if (freq_table_get_index(stat, pos->frequency) == -1)
- stat->freq_table[i++] = pos->frequency;
- stat->state_num = i;
- spin_lock(&cpufreq_stats_lock);
- stat->last_time = get_jiffies_64();
- stat->last_index = freq_table_get_index(stat, policy->cur);
- spin_unlock(&cpufreq_stats_lock);
- return 0;
-error_alloc:
- sysfs_remove_group(&policy->kobj, &stats_attr_group);
-error_out:
- kfree(stat);
- per_cpu(cpufreq_stats_table, cpu) = NULL;
+ if (freq_table_get_index(stats, pos->frequency) == -1)
+ stats->freq_table[i++] = pos->frequency;
+
+ stats->state_num = i;
+ stats->last_time = get_jiffies_64();
+ stats->last_index = freq_table_get_index(stats, policy->cur);
+
+ policy->stats = stats;
+ ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
+ if (!ret)
+ return 0;
+
+ /* We failed, release resources */
+ policy->stats = NULL;
+ kfree(stats->time_in_state);
+free_stat:
+ kfree(stats);
+
return ret;
}
@@ -259,30 +243,12 @@ static void cpufreq_stats_create_table(unsigned int cpu)
cpufreq_cpu_put(policy);
}
-static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
-{
- struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
- policy->last_cpu);
-
- pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
- policy->cpu, policy->last_cpu);
- per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
- policy->last_cpu);
- per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
- stat->cpu = policy->cpu;
-}
-
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
int ret = 0;
struct cpufreq_policy *policy = data;
- if (val == CPUFREQ_UPDATE_POLICY_CPU) {
- cpufreq_stats_update_policy_cpu(policy);
- return 0;
- }
-
if (val == CPUFREQ_CREATE_POLICY)
ret = __cpufreq_stats_create_table(policy);
else if (val == CPUFREQ_REMOVE_POLICY)
@@ -295,35 +261,45 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
- struct cpufreq_stats *stat;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
+ struct cpufreq_stats *stats;
int old_index, new_index;
- if (val != CPUFREQ_POSTCHANGE)
+ if (!policy) {
+ pr_err("%s: No policy found\n", __func__);
return 0;
+ }
- stat = per_cpu(cpufreq_stats_table, freq->cpu);
- if (!stat)
- return 0;
+ if (val != CPUFREQ_POSTCHANGE)
+ goto put_policy;
- old_index = stat->last_index;
- new_index = freq_table_get_index(stat, freq->new);
+ if (!policy->stats) {
+ pr_debug("%s: No stats found\n", __func__);
+ goto put_policy;
+ }
- /* We can't do stat->time_in_state[-1]= .. */
- if (old_index == -1 || new_index == -1)
- return 0;
+ stats = policy->stats;
+
+ old_index = stats->last_index;
+ new_index = freq_table_get_index(stats, freq->new);
- cpufreq_stats_update(freq->cpu);
+ /* We can't do stats->time_in_state[-1]= .. */
+ if (old_index == -1 || new_index == -1)
+ goto put_policy;
if (old_index == new_index)
- return 0;
+ goto put_policy;
- spin_lock(&cpufreq_stats_lock);
- stat->last_index = new_index;
+ cpufreq_stats_update(stats);
+
+ stats->last_index = new_index;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- stat->trans_table[old_index * stat->max_state + new_index]++;
+ stats->trans_table[old_index * stats->max_state + new_index]++;
#endif
- stat->total_trans++;
- spin_unlock(&cpufreq_stats_lock);
+ stats->total_trans++;
+
+put_policy:
+ cpufreq_cpu_put(policy);
return 0;
}
@@ -374,8 +350,7 @@ static void __exit cpufreq_stats_exit(void)
}
MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
-MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
- "through sysfs filesystem");
+MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
MODULE_LICENSE("GPL");
module_init(cpufreq_stats_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 742eefba12c2..872c5772c5d3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -148,6 +148,8 @@ struct perf_limits {
int32_t min_perf;
int max_policy_pct;
int max_sysfs_pct;
+ int min_policy_pct;
+ int min_sysfs_pct;
};
static struct perf_limits limits = {
@@ -159,6 +161,8 @@ static struct perf_limits limits = {
.min_perf = 0,
.max_policy_pct = 100,
.max_sysfs_pct = 100,
+ .min_policy_pct = 0,
+ .min_sysfs_pct = 0,
};
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -338,6 +342,33 @@ static void __init intel_pstate_debug_expose_params(void)
return sprintf(buf, "%u\n", limits.object); \
}
+static ssize_t show_turbo_pct(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct cpudata *cpu;
+ int total, no_turbo, turbo_pct;
+ uint32_t turbo_fp;
+
+ cpu = all_cpu_data[0];
+
+ total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+ no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
+ turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+ turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
+ return sprintf(buf, "%u\n", turbo_pct);
+}
+
+static ssize_t show_num_pstates(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct cpudata *cpu;
+ int total;
+
+ cpu = all_cpu_data[0];
+ total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+ return sprintf(buf, "%u\n", total);
+}
+
static ssize_t show_no_turbo(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -404,7 +435,9 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- limits.min_perf_pct = clamp_t(int, input, 0 , 100);
+
+ limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
+ limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
if (hwp_active)
@@ -418,11 +451,15 @@ show_one(min_perf_pct, min_perf_pct);
define_one_global_rw(no_turbo);
define_one_global_rw(max_perf_pct);
define_one_global_rw(min_perf_pct);
+define_one_global_ro(turbo_pct);
+define_one_global_ro(num_pstates);
static struct attribute *intel_pstate_attributes[] = {
&no_turbo.attr,
&max_perf_pct.attr,
&min_perf_pct.attr,
+ &turbo_pct.attr,
+ &num_pstates.attr,
NULL
};
@@ -825,6 +862,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x46, core_params),
ICPU(0x47, core_params),
ICPU(0x4c, byt_params),
+ ICPU(0x4e, core_params),
ICPU(0x4f, core_params),
ICPU(0x56, core_params),
{}
@@ -887,7 +925,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
+ policy->max >= policy->cpuinfo.max_freq) {
+ limits.min_policy_pct = 100;
limits.min_perf_pct = 100;
limits.min_perf = int_tofp(1);
limits.max_policy_pct = 100;
@@ -897,8 +937,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
return 0;
}
- limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
- limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
+ limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+ limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
+ limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
@@ -978,6 +1019,7 @@ static struct cpufreq_driver intel_pstate_driver = {
static int __initdata no_load;
static int __initdata no_hwp;
+static int __initdata hwp_only;
static unsigned int force_load;
static int intel_pstate_msrs_not_valid(void)
@@ -1175,6 +1217,9 @@ static int __init intel_pstate_init(void)
if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
intel_pstate_hwp_enable();
+ if (!hwp_active && hwp_only)
+ goto out;
+
rc = cpufreq_register_driver(&intel_pstate_driver);
if (rc)
goto out;
@@ -1209,6 +1254,8 @@ static int __init intel_pstate_setup(char *str)
no_hwp = 1;
if (!strcmp(str, "force"))
force_load = 1;
+ if (!strcmp(str, "hwp_only"))
+ hwp_only = 1;
return 0;
}
early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
index 25fbd6a1374f..f0913eee2f50 100644
--- a/drivers/cpufreq/ls1x-cpufreq.c
+++ b/drivers/cpufreq/ls1x-cpufreq.c
@@ -210,7 +210,6 @@ out:
static struct platform_driver ls1x_cpufreq_platdrv = {
.driver = {
.name = "ls1x-cpufreq",
- .owner = THIS_MODULE,
},
.probe = ls1x_cpufreq_probe,
.remove = ls1x_cpufreq_remove,
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
new file mode 100644
index 000000000000..ffa3389e535b
--- /dev/null
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -0,0 +1,136 @@
+/*
+ * SFI Performance States Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
+ * Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com>
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sfi.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <asm/msr.h>
+
+struct cpufreq_frequency_table *freq_table;
+static struct sfi_freq_table_entry *sfi_cpufreq_array;
+static int num_freq_table_entries;
+
+static int sfi_parse_freq(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_freq_table_entry *pentry;
+ int totallen;
+
+ sb = (struct sfi_table_simple *)table;
+ num_freq_table_entries = SFI_GET_NUM_ENTRIES(sb,
+ struct sfi_freq_table_entry);
+ if (num_freq_table_entries <= 1) {
+ pr_err("No p-states discovered\n");
+ return -ENODEV;
+ }
+
+ pentry = (struct sfi_freq_table_entry *)sb->pentry;
+ totallen = num_freq_table_entries * sizeof(*pentry);
+
+ sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL);
+ if (!sfi_cpufreq_array)
+ return -ENOMEM;
+
+ memcpy(sfi_cpufreq_array, pentry, totallen);
+
+ return 0;
+}
+
+static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int next_perf_state = 0; /* Index into perf table */
+ u32 lo, hi;
+
+ next_perf_state = policy->freq_table[index].driver_data;
+
+ rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
+ lo = (lo & ~INTEL_PERF_CTL_MASK) |
+ ((u32) sfi_cpufreq_array[next_perf_state].ctrl_val &
+ INTEL_PERF_CTL_MASK);
+ wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
+
+ return 0;
+}
+
+static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+ policy->cpuinfo.transition_latency = 100000; /* 100us */
+
+ return cpufreq_table_validate_and_show(policy, freq_table);
+}
+
+static struct cpufreq_driver sfi_cpufreq_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sfi_cpufreq_target,
+ .init = sfi_cpufreq_cpu_init,
+ .name = "sfi-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init sfi_cpufreq_init(void)
+{
+ int ret, i;
+
+ /* parse the freq table from SFI */
+ ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
+ if (ret)
+ return ret;
+
+ freq_table = kzalloc(sizeof(*freq_table) *
+ (num_freq_table_entries + 1), GFP_KERNEL);
+ if (!freq_table) {
+ ret = -ENOMEM;
+ goto err_free_array;
+ }
+
+ for (i = 0; i < num_freq_table_entries; i++) {
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000;
+ }
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ ret = cpufreq_register_driver(&sfi_cpufreq_driver);
+ if (ret)
+ goto err_free_tbl;
+
+ return ret;
+
+err_free_tbl:
+ kfree(freq_table);
+err_free_array:
+ kfree(sfi_cpufreq_array);
+ return ret;
+}
+late_initcall(sfi_cpufreq_init);
+
+static void __exit sfi_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&sfi_cpufreq_driver);
+ kfree(freq_table);
+ kfree(sfi_cpufreq_array);
+}
+module_exit(sfi_cpufreq_exit);
+
+MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>");
+MODULE_DESCRIPTION("SFI Performance-States Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 7047821a7f8a..4ab7a2156672 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
pr_debug("previous speed is %u\n", prev_speed);
+ preempt_disable();
local_irq_save(flags);
/* switch to low state */
@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
out:
local_irq_restore(flags);
+ preempt_enable();
+
return ret;
}
EXPORT_SYMBOL_GPL(speedstep_get_freqs);
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 5fc96d5d656b..819229e824fb 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state)
return;
/* Disable IRQs */
+ preempt_disable();
local_irq_save(flags);
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state)
do {
if (retry) {
+ /*
+ * We need to enable interrupts, otherwise the blockage
+ * won't resolve.
+ *
+ * We disable preemption so that other processes don't
+ * run. If other processes were running, they could
+ * submit more DMA requests, making the blockage worse.
+ */
pr_debug("retry %u, previous result %u, waiting...\n",
retry, result);
+ local_irq_enable();
mdelay(retry * 50);
+ local_irq_disable();
}
retry++;
__asm__ __volatile__(
@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state)
/* enable IRQs */
local_irq_restore(flags);
+ preempt_enable();
if (new_state == state)
pr_debug("change to %u MHz succeeded after %u tries "
diff --git a/drivers/cpuidle/Kconfig.arm64 b/drivers/cpuidle/Kconfig.arm64
index d0a08ed1b2ee..6effb3656735 100644
--- a/drivers/cpuidle/Kconfig.arm64
+++ b/drivers/cpuidle/Kconfig.arm64
@@ -4,7 +4,6 @@
config ARM64_CPUIDLE
bool "Generic ARM64 CPU idle Driver"
- select ARM64_CPU_SUSPEND
select DT_IDLE_STATES
help
Select this to enable generic cpuidle driver for ARM64.
diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm64.c
index 80704b931ba4..39a2c62716c3 100644
--- a/drivers/cpuidle/cpuidle-arm64.c
+++ b/drivers/cpuidle/cpuidle-arm64.c
@@ -19,7 +19,6 @@
#include <linux/of.h>
#include <asm/cpuidle.h>
-#include <asm/suspend.h>
#include "dt_idle_states.h"
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index e3e225fe6b45..40c34faffe59 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -182,6 +182,10 @@ static int __init bl_idle_init(void)
*/
if (!of_match_node(compatible_machine_match, root))
return -ENODEV;
+
+ if (!mcpm_is_available())
+ return -EUNATCH;
+
/*
* For now the differentiation between little and big cores
* is based on the part number. A7 cores are considered little
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
index de8a7a48775a..69182e2cc3ea 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ b/drivers/crypto/amcc/crypto4xx_sa.c
@@ -34,29 +34,6 @@
#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
-u32 get_dynamic_sa_offset_iv_field(struct crypto4xx_ctx *ctx)
-{
- u32 offset;
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
- offset = cts.bf.key_size
- + cts.bf.inner_size
- + cts.bf.outer_size
- + cts.bf.spi
- + cts.bf.seq_num0
- + cts.bf.seq_num1
- + cts.bf.seq_num_mask0
- + cts.bf.seq_num_mask1
- + cts.bf.seq_num_mask2
- + cts.bf.seq_num_mask3;
-
- return sizeof(struct dynamic_sa_ctl) + offset * 4;
-}
-
u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
{
u32 offset;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 53d1c330f8a8..6597aac9905d 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -673,9 +673,9 @@ err_map_out:
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
DMA_TO_DEVICE);
err_map_in:
+err_alloc:
free_page((unsigned long)dd->buf_out);
free_page((unsigned long)dd->buf_in);
-err_alloc:
if (err)
pr_err("error: %d\n", err);
return err;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index d94f07c78e19..34db04addc18 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -102,10 +102,6 @@ struct atmel_sha_ctx {
struct atmel_sha_dev *dd;
unsigned long flags;
-
- /* fallback stuff */
- struct crypto_shash *fallback;
-
};
#define ATMEL_SHA_QUEUE_LENGTH 50
@@ -974,19 +970,8 @@ static int atmel_sha_digest(struct ahash_request *req)
return atmel_sha_init(req) ?: atmel_sha_finup(req);
}
-static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+static int atmel_sha_cra_init(struct crypto_tfm *tfm)
{
- struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
- const char *alg_name = crypto_tfm_alg_name(tfm);
-
- /* Allocate a fallback and abort if it failed. */
- tctx->fallback = crypto_alloc_shash(alg_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(tctx->fallback)) {
- pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
- alg_name);
- return PTR_ERR(tctx->fallback);
- }
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct atmel_sha_reqctx) +
SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
@@ -994,19 +979,6 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
return 0;
}
-static int atmel_sha_cra_init(struct crypto_tfm *tfm)
-{
- return atmel_sha_cra_init_alg(tfm, NULL);
-}
-
-static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
-{
- struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(tctx->fallback);
- tctx->fallback = NULL;
-}
-
static struct ahash_alg sha_1_256_algs[] = {
{
.init = atmel_sha_init,
@@ -1020,14 +992,12 @@ static struct ahash_alg sha_1_256_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "atmel-sha1",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
},
@@ -1043,14 +1013,12 @@ static struct ahash_alg sha_1_256_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "atmel-sha256",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
},
@@ -1068,14 +1036,12 @@ static struct ahash_alg sha_224_alg = {
.cra_name = "sha224",
.cra_driver_name = "atmel-sha224",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
};
@@ -1093,14 +1059,12 @@ static struct ahash_alg sha_384_512_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "atmel-sha384",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0x3,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
},
@@ -1116,14 +1080,12 @@ static struct ahash_alg sha_384_512_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "atmel-sha512",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0x3,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
},
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 5e7c896cde30..258772d9b22f 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -376,9 +376,9 @@ err_map_out:
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
DMA_TO_DEVICE);
err_map_in:
+err_alloc:
free_page((unsigned long)dd->buf_out);
free_page((unsigned long)dd->buf_in);
-err_alloc:
if (err)
pr_err("error: %d\n", err);
return err;
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 9ae149bddb6e..d9af9403ab6c 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -110,7 +110,7 @@ static int sg_count(struct scatterlist *sg_list)
while (!sg_is_last(sg)) {
sg_nents++;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return sg_nents;
@@ -744,7 +744,7 @@ static int __init bfin_crypto_crc_mod_init(void)
ret = platform_driver_register(&bfin_crypto_crc_driver);
if (ret) {
- pr_info(KERN_ERR "unable to register driver\n");
+ pr_err("unable to register driver\n");
return ret;
}
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 3187400daf31..29071a156cbe 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2532,7 +2532,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+ sec4_sg_index += edesc->src_nents + 1;
in_options = LDST_SGF;
}
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
@@ -2714,10 +2714,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (!all_contig) {
if (!is_gcm) {
sg_to_sec4_sg(req->assoc,
- (assoc_nents ? : 1),
+ assoc_nents,
edesc->sec4_sg +
sec4_sg_index, 0);
- sec4_sg_index += assoc_nents ? : 1;
+ sec4_sg_index += assoc_nents;
}
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
@@ -2726,17 +2726,17 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (is_gcm) {
sg_to_sec4_sg(req->assoc,
- (assoc_nents ? : 1),
+ assoc_nents,
edesc->sec4_sg +
sec4_sg_index, 0);
- sec4_sg_index += assoc_nents ? : 1;
+ sec4_sg_index += assoc_nents;
}
sg_to_sec4_sg_last(req->src,
- (src_nents ? : 1),
+ src_nents,
edesc->sec4_sg +
sec4_sg_index, 0);
- sec4_sg_index += src_nents ? : 1;
+ sec4_sg_index += src_nents;
}
if (dst_nents) {
sg_to_sec4_sg_last(req->dst, dst_nents,
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 70f1e6f37336..efba4ccd4fac 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -175,13 +175,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_ctrl __iomem *ctrl;
- struct rng4tst __iomem *r4tst;
u32 *desc, status, rdsta_val;
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- r4tst = &ctrl->r4tst[0];
-
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -209,8 +206,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* without any error (HW optimizations for later
* CAAM eras), then try again.
*/
- rdsta_val =
- rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
+ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if (status || !(rdsta_val & (1 << sh_idx)))
ret = -EAGAIN;
if (ret)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 66d73bf54166..33e41ea83fcc 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -151,10 +151,15 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
else
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
- dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
- status, error, idx_str, idx,
- cha_str, cha_err_code,
- err_str, err_err_code);
+ /*
+ * CCB ICV check failures are part of normal operation life;
+ * we leave the upper layers to do what they want with them.
+ */
+ if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+ dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
+ status, error, idx_str, idx,
+ cha_str, cha_err_code,
+ err_str, err_err_code);
}
static void report_jump_status(struct device *jrdev, const u32 status,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 9b3ef1bc9bd7..b8b5d47acd7a 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -384,30 +384,28 @@ static int caam_jr_init(struct device *dev)
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
- irq_dispose_mapping(jrp->irq);
- jrp->irq = 0;
- return -EINVAL;
+ goto out_kill_deq;
}
error = caam_reset_hw_jr(dev);
if (error)
- return error;
+ goto out_free_irq;
+ error = -ENOMEM;
jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
&inpbusaddr, GFP_KERNEL);
+ if (!jrp->inpring)
+ goto out_free_irq;
jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+ if (!jrp->outring)
+ goto out_free_inpring;
jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
GFP_KERNEL);
-
- if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
- (jrp->entinfo == NULL)) {
- dev_err(dev, "can't allocate job rings for %d\n",
- jrp->ridx);
- return -ENOMEM;
- }
+ if (!jrp->entinfo)
+ goto out_free_outring;
for (i = 0; i < JOBR_DEPTH; i++)
jrp->entinfo[i].desc_addr_dma = !0;
@@ -434,6 +432,19 @@ static int caam_jr_init(struct device *dev)
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
return 0;
+
+out_free_outring:
+ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+ jrp->outring, outbusaddr);
+out_free_inpring:
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+ dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
+out_free_irq:
+ free_irq(jrp->irq, dev);
+out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
+ return error;
}
@@ -484,8 +495,10 @@ static int caam_jr_probe(struct platform_device *pdev)
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
- if (error)
+ if (error) {
+ irq_dispose_mapping(jrpriv->irq);
return error;
+ }
jrpriv->dev = jrdev;
spin_lock(&driver_data.jr_alloc_lock);
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index ce28a563effc..3b918218aa4c 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -37,7 +37,7 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
sg_dma_len(sg), offset);
sec4_sg_ptr++;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
sg_count--;
}
return sec4_sg_ptr - 1;
@@ -67,7 +67,7 @@ static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
*chained = true;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return sg_nents;
@@ -93,7 +93,7 @@ static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
int i;
for (i = 0; i < nents; i++) {
dma_map_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
} else {
dma_map_sg(dev, sg, nents, dir);
@@ -109,7 +109,7 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
int i;
for (i = 0; i < nents; i++) {
dma_unmap_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
} else {
dma_unmap_sg(dev, sg, nents, dir);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c6e6171eb6d3..ca29c120b85f 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -583,6 +583,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
#ifdef CONFIG_X86
static const struct x86_cpu_id ccp_support[] = {
{ X86_VENDOR_AMD, 22, },
+ { },
};
#endif
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f757a0f428bd..48f453555f1f 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -784,7 +784,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
struct buffer_desc *buf, gfp_t flags,
enum dma_data_direction dir)
{
- for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
+ for (; nbytes > 0; sg = sg_next(sg)) {
unsigned len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
u32 next_buf_phys;
@@ -982,7 +982,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
break;
offset += sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return (start + nbytes > offset + sg->length);
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index a392465d3e3f..1da6dc59d0dd 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -177,7 +177,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
break;
offset += sg_src->length;
- sg_src = scatterwalk_sg_next(sg_src);
+ sg_src = sg_next(sg_src);
}
/* start - offset is the number of bytes to advance in the scatterlist
@@ -187,9 +187,9 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
while (len && (nx_sg - nx_dst) < sglen) {
n = scatterwalk_clamp(&walk, len);
if (!n) {
- /* In cases where we have scatterlist chain scatterwalk_sg_next
+ /* In cases where we have scatterlist chain sg_next
* handles with it properly */
- scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
+ scatterwalk_start(&walk, sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
dst = scatterwalk_map(&walk);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index f79dd410dede..42f95a4326b0 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -994,7 +994,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
scatterwalk_advance(&dd->in_walk, 4);
if (dd->in_sg->length == _calc_walked(in)) {
- dd->in_sg = scatterwalk_sg_next(dd->in_sg);
+ dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
scatterwalk_start(&dd->in_walk,
dd->in_sg);
@@ -1026,7 +1026,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
scatterwalk_advance(&dd->out_walk, 4);
if (dd->out_sg->length == _calc_walked(out)) {
- dd->out_sg = scatterwalk_sg_next(dd->out_sg);
+ dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
scatterwalk_start(&dd->out_walk,
dd->out_sg);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index e350f5be4d2e..46307098f8ba 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -921,7 +921,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
scatterwalk_advance(&dd->in_walk, 4);
if (dd->in_sg->length == _calc_walked(in)) {
- dd->in_sg = scatterwalk_sg_next(dd->in_sg);
+ dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
scatterwalk_start(&dd->in_walk,
dd->in_sg);
@@ -953,7 +953,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
scatterwalk_advance(&dd->out_walk, 4);
if (dd->out_sg->length == _calc_walked(out)) {
- dd->out_sg = scatterwalk_sg_next(dd->out_sg);
+ dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
scatterwalk_start(&dd->out_walk,
dd->out_sg);
@@ -965,9 +965,9 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
}
}
- dd->total -= DES_BLOCK_SIZE;
+ BUG_ON(dd->total < DES_BLOCK_SIZE);
- BUG_ON(dd->total < 0);
+ dd->total -= DES_BLOCK_SIZE;
/* Clear IRQ status */
status &= ~DES_REG_IRQ_DATA_OUT;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 2ed425664a16..19c0efa29ab3 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -47,7 +47,6 @@
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
#include <linux/module.h>
-#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/io.h>
@@ -148,6 +147,11 @@ struct adf_hw_device_data {
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+ int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
+ void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+ int (*init_arb)(struct adf_accel_dev *accel_dev);
+ void (*exit_arb)(struct adf_accel_dev *accel_dev);
+ void (*enable_ints)(struct adf_accel_dev *accel_dev);
const char *fw_name;
uint32_t pci_dev_id;
uint32_t fuses;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 10ce4a2854ab..fa1fef824de2 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -82,28 +82,15 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
-#define PPDSTAT_OFFSET 0x7E
static void adf_dev_restore(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
- uint16_t ppdstat = 0, bridge_ctl = 0;
- int pending = 0;
+ uint16_t bridge_ctl = 0;
pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
- pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
- pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
- if (pending) {
- int ctr = 0;
-
- do {
- msleep(100);
- pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
- pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
- } while (pending && ctr++ < 10);
- }
- if (pending)
+ if (!pci_wait_for_pending_transaction(pdev))
pr_info("QAT: Transaction still in progress. Proceeding\n");
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
@@ -125,8 +112,9 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarting_notify(accel_dev);
adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
adf_dev_restore(accel_dev);
- if (adf_dev_start(accel_dev)) {
+ if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
kfree(reset_data);
@@ -148,8 +136,8 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
{
struct adf_reset_dev_data *reset_data;
- if (adf_dev_started(accel_dev) &&
- !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+ if (!adf_dev_started(accel_dev) ||
+ test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
return 0;
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index aba7f1d043fb..de16da9070a5 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -50,6 +50,7 @@
#include <linux/seq_file.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
+#include "adf_common_drv.h"
static DEFINE_MUTEX(qat_cfg_read_lock);
@@ -159,6 +160,7 @@ void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
+ clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
}
/**
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 5e8f9d431e5d..a62e485c8786 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -93,7 +93,7 @@ int adf_service_unregister(struct service_hndl *service);
int adf_dev_init(struct adf_accel_dev *accel_dev);
int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev);
-int adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
int adf_ctl_dev_register(void);
void adf_ctl_dev_unregister(void);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 7ee93f881db6..74207a6f0516 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -282,6 +282,8 @@ static int adf_ctl_stop_devices(uint32_t id)
if (adf_dev_stop(accel_dev)) {
pr_err("QAT: Failed to stop qat_dev%d\n", id);
ret = -EFAULT;
+ } else {
+ adf_dev_shutdown(accel_dev);
}
}
}
@@ -343,7 +345,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (!adf_dev_started(accel_dev)) {
pr_info("QAT: Starting acceleration device qat_dev%d.\n",
ctl_data->device_id);
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_init(accel_dev);
+ if (!ret)
+ ret = adf_dev_start(accel_dev);
} else {
pr_info("QAT: Acceleration device qat_dev%d already started.\n",
ctl_data->device_id);
@@ -351,6 +355,7 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (ret) {
pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
}
out:
kfree(ctl_data);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 5c0e47a00a87..8f0ca498ab87 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -108,26 +108,47 @@ int adf_service_unregister(struct service_hndl *service)
EXPORT_SYMBOL_GPL(adf_service_unregister);
/**
- * adf_dev_start() - Start acceleration service for the given accel device
- * @accel_dev: Pointer to acceleration device.
+ * adf_dev_init() - Init data structures and services for the given accel device
+ * @accel_dev: Pointer to acceleration device.
*
- * Function notifies all the registered services that the acceleration device
- * is ready to be used.
- * To be used by QAT device specific drivers.
+ * Initialize the ring data structures and the admin comms and arbitration
+ * services.
*
* Return: 0 on success, error code othewise.
*/
-int adf_dev_start(struct adf_accel_dev *accel_dev)
+int adf_dev_init(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ if (!hw_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to init device - hw_data not set\n");
+ return -EFAULT;
+ }
+
if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
pr_info("QAT: Device not configured\n");
return -EFAULT;
}
- set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+ if (adf_init_etr_data(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
+ return -EFAULT;
+ }
+
+ hw_data->enable_ints(accel_dev);
if (adf_ae_init(accel_dev)) {
pr_err("QAT: Failed to initialise Acceleration Engine\n");
@@ -178,6 +199,27 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
hw_data->enable_error_correction(accel_dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_init);
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
if (adf_ae_start(accel_dev)) {
pr_err("QAT: AE Start Failed\n");
return -EFAULT;
@@ -232,16 +274,15 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
*/
int adf_dev_stop(struct adf_accel_dev *accel_dev)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
- int ret, wait = 0;
+ bool wait = false;
+ int ret;
if (!adf_dev_started(accel_dev) &&
!test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
return 0;
}
- clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -258,7 +299,7 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
if (!ret) {
clear_bit(accel_dev->accel_id, &service->start_status);
} else if (ret == -EAGAIN) {
- wait = 1;
+ wait = true;
clear_bit(accel_dev->accel_id, &service->start_status);
}
}
@@ -278,13 +319,36 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
if (wait)
msleep(100);
- if (adf_dev_started(accel_dev)) {
+ if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
if (adf_ae_stop(accel_dev))
pr_err("QAT: failed to stop AE\n");
else
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_stop);
+
+/**
+ * adf_dev_shutdown() - shutdown acceleration services and data strucutures
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Cleanup the ring data structures and the admin comms and arbitration
+ * services.
+ */
+void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ if (!hw_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to shutdown device - hw_data not set\n");
+ return;
+ }
+
if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
if (adf_ae_fw_release(accel_dev))
pr_err("QAT: Failed to release the ucode\n");
@@ -335,9 +399,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
adf_cfg_del_all(accel_dev);
- return 0;
+ if (hw_data->exit_arb)
+ hw_data->exit_arb(accel_dev);
+
+ if (hw_data->exit_admin_comms)
+ hw_data->exit_admin_comms(accel_dev);
+
+ adf_cleanup_etr_data(accel_dev);
}
-EXPORT_SYMBOL_GPL(adf_dev_stop);
+EXPORT_SYMBOL_GPL(adf_dev_shutdown);
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index c40546079981..a4869627fd57 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -48,7 +48,6 @@
#define ADF_TRANSPORT_INTRN_H
#include <linux/interrupt.h>
-#include <linux/atomic.h>
#include <linux/spinlock_types.h>
#include "adf_transport.h"
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index 5031f8c10d75..68f191b653b0 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -301,5 +301,5 @@ struct icp_qat_hw_cipher_aes256_f8 {
struct icp_qat_hw_cipher_algo_blk {
struct icp_qat_hw_cipher_aes256_f8 aes;
-};
+} __aligned(64);
#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 19eea1c832ac..1dc5b0a17cf7 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -63,15 +63,15 @@
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
-#define QAT_AES_HW_CONFIG_ENC(alg) \
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_NO_CONVERT, \
- ICP_QAT_HW_CIPHER_ENCRYPT)
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
-#define QAT_AES_HW_CONFIG_DEC(alg) \
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_KEY_CONVERT, \
- ICP_QAT_HW_CIPHER_DECRYPT)
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
static atomic_t active_dev;
@@ -102,25 +102,31 @@ struct qat_alg_cd {
};
} __aligned(64);
-#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
-
-struct qat_auth_state {
- uint8_t data[MAX_AUTH_STATE_SIZE + 64];
-} __aligned(64);
-
-struct qat_alg_session_ctx {
+struct qat_alg_aead_ctx {
struct qat_alg_cd *enc_cd;
- dma_addr_t enc_cd_paddr;
struct qat_alg_cd *dec_cd;
+ dma_addr_t enc_cd_paddr;
dma_addr_t dec_cd_paddr;
- struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
- struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
- struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
+ struct icp_qat_fw_la_bulk_req enc_fw_req;
+ struct icp_qat_fw_la_bulk_req dec_fw_req;
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ struct qat_crypto_instance *inst;
+ struct crypto_tfm *tfm;
uint8_t salt[AES_BLOCK_SIZE];
- spinlock_t lock; /* protects qat_alg_session_ctx struct */
+ spinlock_t lock; /* protects qat_alg_aead_ctx struct */
+};
+
+struct qat_alg_ablkcipher_ctx {
+ struct icp_qat_hw_cipher_algo_blk *enc_cd;
+ struct icp_qat_hw_cipher_algo_blk *dec_cd;
+ dma_addr_t enc_cd_paddr;
+ dma_addr_t dec_cd_paddr;
+ struct icp_qat_fw_la_bulk_req enc_fw_req;
+ struct icp_qat_fw_la_bulk_req dec_fw_req;
+ struct qat_crypto_instance *inst;
+ struct crypto_tfm *tfm;
+ spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
};
static int get_current_node(void)
@@ -144,43 +150,37 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
}
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
- struct qat_alg_session_ctx *ctx,
+ struct qat_alg_aead_ctx *ctx,
const uint8_t *auth_key,
unsigned int auth_keylen)
{
- struct qat_auth_state auth_state;
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
struct sha1_state sha1;
struct sha256_state sha256;
struct sha512_state sha512;
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- uint8_t *ipad = auth_state.data;
- uint8_t *opad = ipad + block_size;
+ char ipad[block_size];
+ char opad[block_size];
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
- memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
shash->flags = 0x0;
if (auth_keylen > block_size) {
- char buff[SHA512_BLOCK_SIZE];
int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, buff);
+ auth_keylen, ipad);
if (ret)
return ret;
- memcpy(ipad, buff, digest_size);
- memcpy(opad, buff, digest_size);
- memzero_explicit(ipad + digest_size, block_size - digest_size);
- memzero_explicit(opad + digest_size, block_size - digest_size);
+ memcpy(opad, ipad, digest_size);
} else {
memcpy(ipad, auth_key, auth_keylen);
memcpy(opad, auth_key, auth_keylen);
- memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
- memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
}
for (i = 0; i < block_size; i++) {
@@ -267,8 +267,6 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
QAT_COMN_PTR_TYPE_SGL);
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_PARTIAL_NONE);
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
@@ -279,8 +277,9 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
-static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
- int alg, struct crypto_authenc_keys *keys)
+static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
+ int alg,
+ struct crypto_authenc_keys *keys)
{
struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
@@ -289,7 +288,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
struct icp_qat_hw_auth_algo_blk *hash =
(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
- struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
void *ptr = &req_tmpl->cd_ctrl;
@@ -297,7 +296,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
/* CD setup */
- cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
hash->sha.inner_setup.auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -311,6 +310,8 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
/* Request setup */
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
@@ -356,8 +357,9 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
return 0;
}
-static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
- int alg, struct crypto_authenc_keys *keys)
+static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
+ int alg,
+ struct crypto_authenc_keys *keys)
{
struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
@@ -367,7 +369,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
sizeof(struct icp_qat_hw_auth_setup) +
roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
- struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
void *ptr = &req_tmpl->cd_ctrl;
@@ -379,7 +381,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
sizeof(struct icp_qat_fw_la_cipher_req_params));
/* CD setup */
- cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
hash->sha.inner_setup.auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -394,6 +396,8 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
/* Request setup */
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
@@ -444,36 +448,91 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
return 0;
}
-static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
- const uint8_t *key, unsigned int keylen)
+static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct icp_qat_hw_cipher_algo_blk *cd,
+ const uint8_t *key, unsigned int keylen)
{
- struct crypto_authenc_keys keys;
- int alg;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
- if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
- return -EFAULT;
+ memcpy(cd->aes.key, key, keylen);
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+ cd_pars->u.s.content_desc_params_sz =
+ sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+ /* Cipher CD config setup */
+ cd_ctrl->cipher_key_sz = keylen >> 3;
+ cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+ cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+}
- if (crypto_authenc_extractkeys(&keys, key, keylen))
- goto bad_key;
+static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
+ struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+ qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
+ cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+ enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
+}
- switch (keys.enckeylen) {
+static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
+ struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+ qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
+ cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+ dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+}
+
+static int qat_alg_validate_key(int key_len, int *alg)
+{
+ switch (key_len) {
case AES_KEYSIZE_128:
- alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
break;
case AES_KEYSIZE_192:
- alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
break;
case AES_KEYSIZE_256:
- alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
break;
default:
- goto bad_key;
+ return -EINVAL;
}
+ return 0;
+}
- if (qat_alg_init_enc_session(ctx, alg, &keys))
+static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
+ const uint8_t *key, unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int alg;
+
+ if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
+ return -EFAULT;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ if (qat_alg_validate_key(keys.enckeylen, &alg))
+ goto bad_key;
+
+ if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
goto error;
- if (qat_alg_init_dec_session(ctx, alg, &keys))
+ if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
goto error;
return 0;
@@ -484,22 +543,37 @@ error:
return -EFAULT;
}
-static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
- unsigned int keylen)
+static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
+ const uint8_t *key,
+ unsigned int keylen)
+{
+ int alg;
+
+ if (qat_alg_validate_key(keylen, &alg))
+ goto bad_key;
+
+ qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
+ qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
+ return 0;
+bad_key:
+ crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+ unsigned int keylen)
{
- struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev;
spin_lock(&ctx->lock);
if (ctx->enc_cd) {
/* rekeying */
dev = &GET_DEV(ctx->inst->accel_dev);
- memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
- memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
- memzero_explicit(&ctx->enc_fw_req_tmpl,
- sizeof(struct icp_qat_fw_la_bulk_req));
- memzero_explicit(&ctx->dec_fw_req_tmpl,
- sizeof(struct icp_qat_fw_la_bulk_req));
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
} else {
/* new key */
int node = get_current_node();
@@ -512,16 +586,14 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
- ctx->enc_cd = dma_zalloc_coherent(dev,
- sizeof(struct qat_alg_cd),
+ ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
&ctx->enc_cd_paddr,
GFP_ATOMIC);
if (!ctx->enc_cd) {
spin_unlock(&ctx->lock);
return -ENOMEM;
}
- ctx->dec_cd = dma_zalloc_coherent(dev,
- sizeof(struct qat_alg_cd),
+ ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
&ctx->dec_cd_paddr,
GFP_ATOMIC);
if (!ctx->dec_cd) {
@@ -530,18 +602,18 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
}
}
spin_unlock(&ctx->lock);
- if (qat_alg_init_sessions(ctx, key, keylen))
+ if (qat_alg_aead_init_sessions(ctx, key, keylen))
goto out_free_all;
return 0;
out_free_all:
- memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
- memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
@@ -557,7 +629,8 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp = qat_req->buf.blp;
dma_addr_t blpout = qat_req->buf.bloutp;
size_t sz = qat_req->buf.sz;
- int i, bufs = bl->num_bufs;
+ size_t sz_out = qat_req->buf.sz_out;
+ int i;
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr,
@@ -567,14 +640,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
kfree(bl);
if (blp != blpout) {
/* If out of place operation dma unmap only data */
- int bufless = bufs - blout->num_mapped_bufs;
+ int bufless = blout->num_bufs - blout->num_mapped_bufs;
- for (i = bufless; i < bufs; i++) {
+ for (i = bufless; i < blout->num_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
DMA_BIDIRECTIONAL);
}
- dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
kfree(blout);
}
}
@@ -587,19 +660,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_crypto_request *qat_req)
{
struct device *dev = &GET_DEV(inst->accel_dev);
- int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+ int i, bufs = 0, sg_nctr = 0;
+ int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
dma_addr_t blp;
dma_addr_t bloutp = 0;
struct scatterlist *sg;
- size_t sz = sizeof(struct qat_alg_buf_list) +
+ size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
if (unlikely(!n))
return -EINVAL;
- bufl = kmalloc_node(sz, GFP_ATOMIC,
+ bufl = kzalloc_node(sz, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!bufl))
return -ENOMEM;
@@ -620,15 +694,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err;
bufs++;
}
- bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
- DMA_BIDIRECTIONAL);
- bufl->bufers[bufs].len = ivlen;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
- goto err;
- bufs++;
+ if (ivlen) {
+ bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
+ DMA_BIDIRECTIONAL);
+ bufl->bufers[bufs].len = ivlen;
+ if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+ goto err;
+ bufs++;
+ }
for_each_sg(sgl, sg, n, i) {
- int y = i + bufs;
+ int y = sg_nctr + bufs;
+
+ if (!sg->length)
+ continue;
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
@@ -636,8 +715,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
goto err;
+ sg_nctr++;
}
- bufl->num_bufs = n + bufs;
+ bufl->num_bufs = sg_nctr + bufs;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -645,11 +725,15 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (sgl != sglout) {
struct qat_alg_buf *bufers;
- buflout = kmalloc_node(sz, GFP_ATOMIC,
+ n = sg_nents(sglout);
+ sz_out = sizeof(struct qat_alg_buf_list) +
+ ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+ sg_nctr = 0;
+ buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err;
- bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
goto err;
bufers = buflout->bufers;
@@ -660,60 +744,62 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[i].addr = bufl->bufers[i].addr;
}
for_each_sg(sglout, sg, n, i) {
- int y = i + bufs;
+ int y = sg_nctr + bufs;
+
+ if (!sg->length)
+ continue;
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
DMA_BIDIRECTIONAL);
- buflout->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
goto err;
+ bufers[y].len = sg->length;
+ sg_nctr++;
}
- buflout->num_bufs = n + bufs;
- buflout->num_mapped_bufs = n;
+ buflout->num_bufs = sg_nctr + bufs;
+ buflout->num_mapped_bufs = sg_nctr;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
+ qat_req->buf.sz_out = sz_out;
} else {
/* Otherwise set the src and dst to the same address */
qat_req->buf.bloutp = qat_req->buf.blp;
+ qat_req->buf.sz_out = 0;
}
return 0;
err:
dev_err(dev, "Failed to map buf for dma\n");
- for_each_sg(sgl, sg, n + bufs, i) {
- if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
+ sg_nctr = 0;
+ for (i = 0; i < n + bufs; i++)
+ if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- }
- }
+
if (!dma_mapping_error(dev, blp))
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
if (sgl != sglout && buflout) {
- for_each_sg(sglout, sg, n, i) {
- int y = i + bufs;
-
- if (!dma_mapping_error(dev, buflout->bufers[y].addr))
- dma_unmap_single(dev, buflout->bufers[y].addr,
- buflout->bufers[y].len,
+ n = sg_nents(sglout);
+ for (i = bufs; i < n + bufs; i++)
+ if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+ dma_unmap_single(dev, buflout->bufers[i].addr,
+ buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- }
if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
kfree(buflout);
}
return -ENOMEM;
}
-void qat_alg_callback(void *resp)
+static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
{
- struct icp_qat_fw_la_resp *qat_resp = resp;
- struct qat_crypto_request *qat_req =
- (void *)(__force long)qat_resp->opaque_data;
- struct qat_alg_session_ctx *ctx = qat_req->ctx;
+ struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
struct qat_crypto_instance *inst = ctx->inst;
- struct aead_request *areq = qat_req->areq;
+ struct aead_request *areq = qat_req->aead_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
@@ -723,11 +809,35 @@ void qat_alg_callback(void *resp)
areq->base.complete(&areq->base, res);
}
-static int qat_alg_dec(struct aead_request *areq)
+static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
+{
+ struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct ablkcipher_request *areq = qat_req->ablkcipher_req;
+ uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+ qat_alg_free_bufl(inst, qat_req);
+ if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ res = -EINVAL;
+ areq->base.complete(&areq->base, res);
+}
+
+void qat_alg_callback(void *resp)
+{
+ struct icp_qat_fw_la_resp *qat_resp = resp;
+ struct qat_crypto_request *qat_req =
+ (void *)(__force long)qat_resp->opaque_data;
+
+ qat_req->cb(qat_resp, qat_req);
+}
+
+static int qat_alg_aead_dec(struct aead_request *areq)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
@@ -741,9 +851,10 @@ static int qat_alg_dec(struct aead_request *areq)
return ret;
msg = &qat_req->req;
- *msg = ctx->dec_fw_req_tmpl;
- qat_req->ctx = ctx;
- qat_req->areq = areq;
+ *msg = ctx->dec_fw_req;
+ qat_req->aead_ctx = ctx;
+ qat_req->aead_req = areq;
+ qat_req->cb = qat_aead_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
@@ -766,12 +877,12 @@ static int qat_alg_dec(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
- int enc_iv)
+static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
+ int enc_iv)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
@@ -784,9 +895,10 @@ static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
return ret;
msg = &qat_req->req;
- *msg = ctx->enc_fw_req_tmpl;
- qat_req->ctx = ctx;
- qat_req->areq = areq;
+ *msg = ctx->enc_fw_req;
+ qat_req->aead_ctx = ctx;
+ qat_req->aead_req = areq;
+ qat_req->cb = qat_aead_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
@@ -815,31 +927,168 @@ static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
return -EINPROGRESS;
}
-static int qat_alg_enc(struct aead_request *areq)
+static int qat_alg_aead_enc(struct aead_request *areq)
{
- return qat_alg_enc_internal(areq, areq->iv, 0);
+ return qat_alg_aead_enc_internal(areq, areq->iv, 0);
}
-static int qat_alg_genivenc(struct aead_givcrypt_request *req)
+static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
__be64 seq;
memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
seq = cpu_to_be64(req->seq);
memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
&seq, sizeof(uint64_t));
- return qat_alg_enc_internal(&req->areq, req->giv, 1);
+ return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
}
-static int qat_alg_init(struct crypto_tfm *tfm,
- enum icp_qat_hw_auth_algo hash, const char *hash_name)
+static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const uint8_t *key,
+ unsigned int keylen)
{
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct device *dev;
+
+ spin_lock(&ctx->lock);
+ if (ctx->enc_cd) {
+ /* rekeying */
+ dev = &GET_DEV(ctx->inst->accel_dev);
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+ } else {
+ /* new key */
+ int node = get_current_node();
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(node);
+ if (!inst) {
+ spin_unlock(&ctx->lock);
+ return -EINVAL;
+ }
+
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ spin_unlock(&ctx->lock);
+ return -ENOMEM;
+ }
+ ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ spin_unlock(&ctx->lock);
+ goto out_free_enc;
+ }
+ }
+ spin_unlock(&ctx->lock);
+ if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
+ goto out_free_all;
+
+ return 0;
+
+out_free_all:
+ memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
+ dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ ctx->dec_cd = NULL;
+out_free_enc:
+ memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
+ dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ ctx->enc_cd = NULL;
+ return -ENOMEM;
+}
+
+static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ int ret, ctr = 0;
+
+ ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
+ NULL, 0, qat_req);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->enc_fw_req;
+ qat_req->ablkcipher_ctx = ctx;
+ qat_req->ablkcipher_req = req;
+ qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_offset = 0;
+ memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+ do {
+ ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ } while (ret == -EAGAIN && ctr++ < 10);
+
+ if (ret == -EAGAIN) {
+ qat_alg_free_bufl(ctx->inst, qat_req);
+ return -EBUSY;
+ }
+ return -EINPROGRESS;
+}
+
+static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ int ret, ctr = 0;
+
+ ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
+ NULL, 0, qat_req);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->dec_fw_req;
+ qat_req->ablkcipher_ctx = ctx;
+ qat_req->ablkcipher_req = req;
+ qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_offset = 0;
+ memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+ do {
+ ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ } while (ret == -EAGAIN && ctr++ < 10);
+
+ if (ret == -EAGAIN) {
+ qat_alg_free_bufl(ctx->inst, qat_req);
+ return -EBUSY;
+ }
+ return -EINPROGRESS;
+}
+
+static int qat_alg_aead_init(struct crypto_tfm *tfm,
+ enum icp_qat_hw_auth_algo hash,
+ const char *hash_name)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- memzero_explicit(ctx, sizeof(*ctx));
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(ctx->hash_tfm))
return -EFAULT;
@@ -851,24 +1100,24 @@ static int qat_alg_init(struct crypto_tfm *tfm,
return 0;
}
-static int qat_alg_sha1_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
{
- return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
}
-static int qat_alg_sha256_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
{
- return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
}
-static int qat_alg_sha512_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
{
- return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
}
-static void qat_alg_exit(struct crypto_tfm *tfm)
+static void qat_alg_aead_exit(struct crypto_tfm *tfm)
{
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
@@ -880,36 +1129,74 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
dev = &GET_DEV(inst->accel_dev);
if (ctx->enc_cd) {
- memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
}
if (ctx->dec_cd) {
- memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
}
qat_crypto_put_instance(inst);
}
+static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ spin_lock_init(&ctx->lock);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
+ sizeof(struct qat_crypto_request);
+ ctx->tfm = tfm;
+ return 0;
+}
+
+static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev;
+
+ if (!inst)
+ return;
+
+ dev = &GET_DEV(inst->accel_dev);
+ if (ctx->enc_cd) {
+ memset(ctx->enc_cd, 0,
+ sizeof(struct icp_qat_hw_cipher_algo_blk));
+ dma_free_coherent(dev,
+ sizeof(struct icp_qat_hw_cipher_algo_blk),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ }
+ if (ctx->dec_cd) {
+ memset(ctx->dec_cd, 0,
+ sizeof(struct icp_qat_hw_cipher_algo_blk));
+ dma_free_coherent(dev,
+ sizeof(struct icp_qat_hw_cipher_algo_blk),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ }
+ qat_crypto_put_instance(inst);
+}
+
static struct crypto_alg qat_algs[] = { {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = qat_alg_sha1_init,
- .cra_exit = qat_alg_exit,
+ .cra_init = qat_alg_aead_sha1_init,
+ .cra_exit = qat_alg_aead_exit,
.cra_u = {
.aead = {
- .setkey = qat_alg_setkey,
- .decrypt = qat_alg_dec,
- .encrypt = qat_alg_enc,
- .givencrypt = qat_alg_genivenc,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .givencrypt = qat_alg_aead_genivenc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -920,18 +1207,18 @@ static struct crypto_alg qat_algs[] = { {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = qat_alg_sha256_init,
- .cra_exit = qat_alg_exit,
+ .cra_init = qat_alg_aead_sha256_init,
+ .cra_exit = qat_alg_aead_exit,
.cra_u = {
.aead = {
- .setkey = qat_alg_setkey,
- .decrypt = qat_alg_dec,
- .encrypt = qat_alg_enc,
- .givencrypt = qat_alg_genivenc,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .givencrypt = qat_alg_aead_genivenc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -942,22 +1229,44 @@ static struct crypto_alg qat_algs[] = { {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = qat_alg_sha512_init,
- .cra_exit = qat_alg_exit,
+ .cra_init = qat_alg_aead_sha512_init,
+ .cra_exit = qat_alg_aead_exit,
.cra_u = {
.aead = {
- .setkey = qat_alg_setkey,
- .decrypt = qat_alg_dec,
- .encrypt = qat_alg_enc,
- .givencrypt = qat_alg_genivenc,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .givencrypt = qat_alg_aead_genivenc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
},
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "qat_aes_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = qat_alg_ablkcipher_init,
+ .cra_exit = qat_alg_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = qat_alg_ablkcipher_setkey,
+ .decrypt = qat_alg_ablkcipher_decrypt,
+ .encrypt = qat_alg_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ },
} };
int qat_algs_register(void)
@@ -966,8 +1275,11 @@ int qat_algs_register(void)
int i;
for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC;
+ qat_algs[i].cra_flags =
+ (qat_algs[i].cra_type == &crypto_aead_type) ?
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
+ CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+
return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
}
return 0;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index ab8468d11ddb..d503007b49e6 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -72,12 +72,24 @@ struct qat_crypto_request_buffs {
struct qat_alg_buf_list *blout;
dma_addr_t bloutp;
size_t sz;
+ size_t sz_out;
};
+struct qat_crypto_request;
+
struct qat_crypto_request {
struct icp_qat_fw_la_bulk_req req;
- struct qat_alg_session_ctx *ctx;
- struct aead_request *areq;
+ union {
+ struct qat_alg_aead_ctx *aead_ctx;
+ struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
+ };
+ union {
+ struct aead_request *aead_req;
+ struct ablkcipher_request *ablkcipher_req;
+ };
struct qat_crypto_request_buffs buf;
+ void (*cb)(struct icp_qat_fw_la_resp *resp,
+ struct qat_crypto_request *req);
};
+
#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index ef05825cc651..6a735d5c0e37 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -46,6 +46,7 @@
*/
#include <adf_accel_devices.h>
#include "adf_dh895xcc_hw_data.h"
+#include "adf_common_drv.h"
#include "adf_drv.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
@@ -182,6 +183,19 @@ static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
}
}
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+
+ addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+
+ /* Enable bundle and misc interrupts */
+ ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
+ ADF_DH895XCC_SMIA0_MASK);
+ ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
+ ADF_DH895XCC_SMIA1_MASK);
+}
+
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcc_class;
@@ -206,6 +220,11 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_DH895XCC_FW;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->enable_ints = adf_enable_ints;
}
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 948f66be262b..8ffdb95c9804 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -90,9 +90,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
- adf_exit_admin_comms(accel_dev);
- adf_exit_arb(accel_dev);
- adf_cleanup_etr_data(accel_dev);
+ adf_dev_shutdown(accel_dev);
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
@@ -119,7 +117,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev);
}
-static int qat_dev_start(struct adf_accel_dev *accel_dev)
+static int adf_dev_configure(struct adf_accel_dev *accel_dev)
{
int cpus = num_online_cpus();
int banks = GET_MAX_BANKS(accel_dev);
@@ -206,7 +204,7 @@ static int qat_dev_start(struct adf_accel_dev *accel_dev)
goto err;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
- return adf_dev_start(accel_dev);
+ return 0;
err:
dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
return -EINVAL;
@@ -217,7 +215,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- void __iomem *pmisc_bar_addr = NULL;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
int ret;
@@ -347,8 +344,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = -EFAULT;
goto out_err;
}
- if (i == ADF_DH895XCC_PMISC_BAR)
- pmisc_bar_addr = bar->virt_addr;
}
pci_set_master(pdev);
@@ -358,36 +353,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- if (adf_init_etr_data(accel_dev)) {
- dev_err(&pdev->dev, "Failed initialize etr\n");
- ret = -EFAULT;
- goto out_err;
- }
-
- if (adf_init_admin_comms(accel_dev)) {
- dev_err(&pdev->dev, "Failed initialize admin comms\n");
- ret = -EFAULT;
- goto out_err;
- }
-
- if (adf_init_arb(accel_dev)) {
- dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
- ret = -EFAULT;
- goto out_err;
- }
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
goto out_err;
}
- /* Enable bundle and misc interrupts */
- ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
- ADF_DH895XCC_SMIA0_MASK);
- ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
- ADF_DH895XCC_SMIA1_MASK);
+ ret = adf_dev_configure(accel_dev);
+ if (ret)
+ goto out_err;
+
+ ret = adf_dev_init(accel_dev);
+ if (ret)
+ goto out_err;
- ret = qat_dev_start(accel_dev);
+ ret = adf_dev_start(accel_dev);
if (ret) {
adf_dev_stop(accel_dev);
goto out_err;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 0fb21e13f247..378cb768647f 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -64,7 +64,7 @@ int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
err = dma_map_sg(dev, sg, 1, dir);
if (!err)
return -EFAULT;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
} else {
err = dma_map_sg(dev, sg, nents, dir);
@@ -81,7 +81,7 @@ void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
if (chained)
while (sg) {
dma_unmap_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
else
dma_unmap_sg(dev, sg, nents, dir);
@@ -100,7 +100,7 @@ int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
*chained = true;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return nents;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index f3385934eed2..5c5df1d17f90 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -285,7 +285,7 @@ static int qce_ahash_update(struct ahash_request *req)
break;
len += sg_dma_len(sg);
sg_last = sg;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
if (!sg_last)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 220b92f7eabc..290a7f0a681f 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -940,7 +940,7 @@ static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
break;
}
nbytes -= sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return nbytes;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 067ec2193d71..ebbae8d3ce0d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -743,7 +743,7 @@ static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
if (unlikely(chained))
while (sg) {
dma_map_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
else
dma_map_sg(dev, sg, nents, dir);
@@ -755,7 +755,7 @@ static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
{
while (sg) {
dma_unmap_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
}
@@ -915,7 +915,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
cryptlen -= sg_dma_len(sg);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
/* adjust (decrease) last one (or two) entry's len to cryptlen */
@@ -1102,7 +1102,7 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
*chained = true;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return sg_nents;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index f831bb952b2f..d594ae962ed2 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -479,13 +479,13 @@ static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
.dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
.dst_maxburst = 4,
- };
+ };
struct dma_slave_config cryp2mem = {
.direction = DMA_DEV_TO_MEM,
.src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
.src_maxburst = 4,
- };
+ };
dma_cap_zero(device_data->dma.mask);
dma_cap_set(DMA_SLAVE, device_data->dma.mask);
@@ -814,7 +814,7 @@ static int get_nents(struct scatterlist *sg, int nbytes)
while (nbytes > 0) {
nbytes -= sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
nents++;
}
@@ -1774,8 +1774,8 @@ static int ux500_cryp_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
static const struct of_device_id ux500_cryp_match[] = {
- { .compatible = "stericsson,ux500-cryp" },
- { },
+ { .compatible = "stericsson,ux500-cryp" },
+ { },
};
static struct platform_driver cryp_driver = {
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index faf4e70c42e0..64281bb2f650 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,5 +1,6 @@
menuconfig PM_DEVFREQ
bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
+ select SRCU
help
A device may have a list of frequencies and voltages available.
devfreq, a generic DVFS framework can be registered for a device
@@ -87,4 +88,16 @@ config ARM_EXYNOS5_BUS_DEVFREQ
It reads PPMU counters of memory controllers and adjusts the
operating frequencies and voltages with OPP support.
+config ARM_TEGRA_DEVFREQ
+ tristate "Tegra DEVFREQ Driver"
+ depends on ARCH_TEGRA_124_SOC
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_OPP
+ help
+ This adds the DEVFREQ driver for the Tegra family of SoCs.
+ It reads ACTMON counters of memory controllers and adjusts the
+ operating frequencies and voltages with OPP support.
+
+source "drivers/devfreq/event/Kconfig"
+
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 16138c9e0d58..5134f9ee983d 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
+obj-$(CONFIG_PM_DEVFREQ_EVENT) += devfreq-event.o
obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
@@ -7,3 +8,7 @@ obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/
obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/
+obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
+
+# DEVFREQ Event Drivers
+obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
new file mode 100644
index 000000000000..f304a0289eda
--- /dev/null
+++ b/drivers/devfreq/devfreq-event.c
@@ -0,0 +1,494 @@
+/*
+ * devfreq-event: a framework to provide raw data and events of devfreq devices
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on drivers/devfreq/devfreq.c.
+ */
+
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+static struct class *devfreq_event_class;
+
+/* The list of all devfreq event list */
+static LIST_HEAD(devfreq_event_list);
+static DEFINE_MUTEX(devfreq_event_list_lock);
+
+#define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev)
+
+/**
+ * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
+ * the enable_count of devfreq-event dev.
+ * @edev : the devfreq-event device
+ *
+ * Note that this function increase the enable_count and enable the
+ * devfreq-event device. The devfreq-event device should be enabled before
+ * using it by devfreq device.
+ */
+int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
+{
+ int ret = 0;
+
+ if (!edev || !edev->desc)
+ return -EINVAL;
+
+ mutex_lock(&edev->lock);
+ if (edev->desc->ops && edev->desc->ops->enable
+ && edev->enable_count == 0) {
+ ret = edev->desc->ops->enable(edev);
+ if (ret < 0)
+ goto err;
+ }
+ edev->enable_count++;
+err:
+ mutex_unlock(&edev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_enable_edev);
+
+/**
+ * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
+ * the enable_count of the devfreq-event dev.
+ * @edev : the devfreq-event device
+ *
+ * Note that this function decrease the enable_count and disable the
+ * devfreq-event device. After the devfreq-event device is disabled,
+ * devfreq device can't use the devfreq-event device for get/set/reset
+ * operations.
+ */
+int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
+{
+ int ret = 0;
+
+ if (!edev || !edev->desc)
+ return -EINVAL;
+
+ mutex_lock(&edev->lock);
+ if (edev->enable_count <= 0) {
+ dev_warn(&edev->dev, "unbalanced enable_count\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ if (edev->desc->ops && edev->desc->ops->disable
+ && edev->enable_count == 1) {
+ ret = edev->desc->ops->disable(edev);
+ if (ret < 0)
+ goto err;
+ }
+ edev->enable_count--;
+err:
+ mutex_unlock(&edev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_disable_edev);
+
+/**
+ * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
+ * not.
+ * @edev : the devfreq-event device
+ *
+ * Note that this function check whether devfreq-event dev is enabled or not.
+ * If return true, the devfreq-event dev is enabeld. If return false, the
+ * devfreq-event dev is disabled.
+ */
+bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
+{
+ bool enabled = false;
+
+ if (!edev || !edev->desc)
+ return enabled;
+
+ mutex_lock(&edev->lock);
+
+ if (edev->enable_count > 0)
+ enabled = true;
+
+ mutex_unlock(&edev->lock);
+
+ return enabled;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_is_enabled);
+
+/**
+ * devfreq_event_set_event() - Set event to devfreq-event dev to start.
+ * @edev : the devfreq-event device
+ *
+ * Note that this function set the event to the devfreq-event device to start
+ * for getting the event data which could be various event type.
+ */
+int devfreq_event_set_event(struct devfreq_event_dev *edev)
+{
+ int ret;
+
+ if (!edev || !edev->desc)
+ return -EINVAL;
+
+ if (!edev->desc->ops || !edev->desc->ops->set_event)
+ return -EINVAL;
+
+ if (!devfreq_event_is_enabled(edev))
+ return -EPERM;
+
+ mutex_lock(&edev->lock);
+ ret = edev->desc->ops->set_event(edev);
+ mutex_unlock(&edev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_set_event);
+
+/**
+ * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
+ * @edev : the devfreq-event device
+ * @edata : the calculated data of devfreq-event device
+ *
+ * Note that this function get the calculated event data from devfreq-event dev
+ * after stoping the progress of whole sequence of devfreq-event dev.
+ */
+int devfreq_event_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ int ret;
+
+ if (!edev || !edev->desc)
+ return -EINVAL;
+
+ if (!edev->desc->ops || !edev->desc->ops->get_event)
+ return -EINVAL;
+
+ if (!devfreq_event_is_enabled(edev))
+ return -EINVAL;
+
+ edata->total_count = edata->load_count = 0;
+
+ mutex_lock(&edev->lock);
+ ret = edev->desc->ops->get_event(edev, edata);
+ if (ret < 0)
+ edata->total_count = edata->load_count = 0;
+ mutex_unlock(&edev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_event);
+
+/**
+ * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
+ * @edev : the devfreq-event device
+ *
+ * Note that this function stop all operations of devfreq-event dev and reset
+ * the current event data to make the devfreq-event device into initial state.
+ */
+int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+{
+ int ret = 0;
+
+ if (!edev || !edev->desc)
+ return -EINVAL;
+
+ if (!devfreq_event_is_enabled(edev))
+ return -EPERM;
+
+ mutex_lock(&edev->lock);
+ if (edev->desc->ops && edev->desc->ops->reset)
+ ret = edev->desc->ops->reset(edev);
+ mutex_unlock(&edev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
+
+/**
+ * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
+ * devicetree.
+ * @dev : the pointer to the given device
+ * @index : the index into list of devfreq-event device
+ *
+ * Note that this function return the pointer of devfreq-event device.
+ */
+struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
+ int index)
+{
+ struct device_node *node;
+ struct devfreq_event_dev *edev;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device does not have a device node entry\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ node = of_parse_phandle(dev->of_node, "devfreq-events", index);
+ if (!node) {
+ dev_err(dev, "failed to get phandle in %s node\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&devfreq_event_list_lock);
+ list_for_each_entry(edev, &devfreq_event_list, node) {
+ if (!strcmp(edev->desc->name, node->name))
+ goto out;
+ }
+ edev = NULL;
+out:
+ mutex_unlock(&devfreq_event_list_lock);
+
+ if (!edev) {
+ dev_err(dev, "unable to get devfreq-event device : %s\n",
+ node->name);
+ of_node_put(node);
+ return ERR_PTR(-ENODEV);
+ }
+
+ of_node_put(node);
+
+ return edev;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
+
+/**
+ * devfreq_event_get_edev_count() - Get the count of devfreq-event dev
+ * @dev : the pointer to the given device
+ *
+ * Note that this function return the count of devfreq-event devices.
+ */
+int devfreq_event_get_edev_count(struct device *dev)
+{
+ int count;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device does not have a device node entry\n");
+ return -EINVAL;
+ }
+
+ count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
+ sizeof(u32));
+ if (count < 0 ) {
+ dev_err(dev,
+ "failed to get the count of devfreq-event in %s node\n",
+ dev->of_node->full_name);
+ return count;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count);
+
+static void devfreq_event_release_edev(struct device *dev)
+{
+ struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+ kfree(edev);
+}
+
+/**
+ * devfreq_event_add_edev() - Add new devfreq-event device.
+ * @dev : the device owning the devfreq-event device being created
+ * @desc : the devfreq-event device's decriptor which include essential
+ * data for devfreq-event device.
+ *
+ * Note that this function add new devfreq-event device to devfreq-event class
+ * list and register the device of the devfreq-event device.
+ */
+struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc)
+{
+ struct devfreq_event_dev *edev;
+ static atomic_t event_no = ATOMIC_INIT(0);
+ int ret;
+
+ if (!dev || !desc)
+ return ERR_PTR(-EINVAL);
+
+ if (!desc->name || !desc->ops)
+ return ERR_PTR(-EINVAL);
+
+ if (!desc->ops->set_event || !desc->ops->get_event)
+ return ERR_PTR(-EINVAL);
+
+ edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL);
+ if (!edev)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&edev->lock);
+ edev->desc = desc;
+ edev->enable_count = 0;
+ edev->dev.parent = dev;
+ edev->dev.class = devfreq_event_class;
+ edev->dev.release = devfreq_event_release_edev;
+
+ dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1);
+ ret = device_register(&edev->dev);
+ if (ret < 0) {
+ put_device(&edev->dev);
+ return ERR_PTR(ret);
+ }
+ dev_set_drvdata(&edev->dev, edev);
+
+ INIT_LIST_HEAD(&edev->node);
+
+ mutex_lock(&devfreq_event_list_lock);
+ list_add(&edev->node, &devfreq_event_list);
+ mutex_unlock(&devfreq_event_list_lock);
+
+ return edev;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
+
+/**
+ * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
+ * @dev : the devfreq-event device
+ *
+ * Note that this function remove the registered devfreq-event device.
+ */
+int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
+{
+ if (!edev)
+ return -EINVAL;
+
+ WARN_ON(edev->enable_count);
+
+ mutex_lock(&devfreq_event_list_lock);
+ list_del(&edev->node);
+ mutex_unlock(&devfreq_event_list_lock);
+
+ device_unregister(&edev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_remove_edev);
+
+static int devm_devfreq_event_match(struct device *dev, void *res, void *data)
+{
+ struct devfreq_event_dev **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+static void devm_devfreq_event_release(struct device *dev, void *res)
+{
+ devfreq_event_remove_edev(*(struct devfreq_event_dev **)res);
+}
+
+/**
+ * devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
+ * @dev : the device owning the devfreq-event device being created
+ * @desc : the devfreq-event device's decriptor which include essential
+ * data for devfreq-event device.
+ *
+ * Note that this function manages automatically the memory of devfreq-event
+ * device using device resource management and simplify the free operation
+ * for memory of devfreq-event device.
+ */
+struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc)
+{
+ struct devfreq_event_dev **ptr, *edev;
+
+ ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ edev = devfreq_event_add_edev(dev, desc);
+ if (IS_ERR(edev)) {
+ devres_free(ptr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ *ptr = edev;
+ devres_add(dev, ptr);
+
+ return edev;
+}
+EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev);
+
+/**
+ * devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev()
+ * @dev : the device owning the devfreq-event device being created
+ * @edev : the devfreq-event device
+ *
+ * Note that this function manages automatically the memory of devfreq-event
+ * device using device resource management.
+ */
+void devm_devfreq_event_remove_edev(struct device *dev,
+ struct devfreq_event_dev *edev)
+{
+ WARN_ON(devres_release(dev, devm_devfreq_event_release,
+ devm_devfreq_event_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev);
+
+/*
+ * Device attributes for devfreq-event class.
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+ if (!edev || !edev->desc)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", edev->desc->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t enable_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+ if (!edev || !edev->desc)
+ return -EINVAL;
+
+ return sprintf(buf, "%d\n", edev->enable_count);
+}
+static DEVICE_ATTR_RO(enable_count);
+
+static struct attribute *devfreq_event_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_enable_count.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(devfreq_event);
+
+static int __init devfreq_event_init(void)
+{
+ devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
+ if (IS_ERR(devfreq_event_class)) {
+ pr_err("%s: couldn't create class\n", __FILE__);
+ return PTR_ERR(devfreq_event_class);
+ }
+
+ devfreq_event_class->dev_groups = devfreq_event_groups;
+
+ return 0;
+}
+subsys_initcall(devfreq_event_init);
+
+static void __exit devfreq_event_exit(void)
+{
+ class_destroy(devfreq_event_class);
+}
+module_exit(devfreq_event_exit);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_DESCRIPTION("DEVFREQ-Event class support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
new file mode 100644
index 000000000000..a11720affc31
--- /dev/null
+++ b/drivers/devfreq/event/Kconfig
@@ -0,0 +1,25 @@
+menuconfig PM_DEVFREQ_EVENT
+ bool "DEVFREQ-Event device Support"
+ help
+ The devfreq-event device provide the raw data and events which
+ indicate the current state of devfreq-event device. The provided
+ data from devfreq-event device is used to monitor the state of
+ device and determine the suitable size of resource to reduce the
+ wasted resource.
+
+ The devfreq-event device can support the various type of events
+ (e.g., raw data, utilization, latency, bandwidth). The events
+ may be used by devfreq governor and other subsystem.
+
+if PM_DEVFREQ_EVENT
+
+config DEVFREQ_EVENT_EXYNOS_PPMU
+ bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+ depends on ARCH_EXYNOS
+ select PM_OPP
+ help
+ This add the devfreq-event driver for Exynos SoC. It provides PPMU
+ (Platform Performance Monitoring Unit) counters to estimate the
+ utilization of each module.
+
+endif # PM_DEVFREQ_EVENT
diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile
new file mode 100644
index 000000000000..be146ead79cf
--- /dev/null
+++ b/drivers/devfreq/event/Makefile
@@ -0,0 +1,2 @@
+# Exynos DEVFREQ Event Drivers
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
new file mode 100644
index 000000000000..ad8347385f53
--- /dev/null
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -0,0 +1,374 @@
+/*
+ * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include <linux/devfreq-event.h>
+
+#include "exynos-ppmu.h"
+
+struct exynos_ppmu_data {
+ void __iomem *base;
+ struct clk *clk;
+};
+
+struct exynos_ppmu {
+ struct devfreq_event_dev **edev;
+ struct devfreq_event_desc *desc;
+ unsigned int num_events;
+
+ struct device *dev;
+ struct mutex lock;
+
+ struct exynos_ppmu_data ppmu;
+};
+
+#define PPMU_EVENT(name) \
+ { "ppmu-event0-"#name, PPMU_PMNCNT0 }, \
+ { "ppmu-event1-"#name, PPMU_PMNCNT1 }, \
+ { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
+ { "ppmu-event3-"#name, PPMU_PMNCNT3 }
+
+struct __exynos_ppmu_events {
+ char *name;
+ int id;
+} ppmu_events[] = {
+ /* For Exynos3250, Exynos4 and Exynos5260 */
+ PPMU_EVENT(g3d),
+ PPMU_EVENT(fsys),
+
+ /* For Exynos4 SoCs and Exynos3250 */
+ PPMU_EVENT(dmc0),
+ PPMU_EVENT(dmc1),
+ PPMU_EVENT(cpu),
+ PPMU_EVENT(rightbus),
+ PPMU_EVENT(leftbus),
+ PPMU_EVENT(lcd0),
+ PPMU_EVENT(camif),
+
+ /* Only for Exynos3250 and Exynos5260 */
+ PPMU_EVENT(mfc),
+
+ /* Only for Exynos4 SoCs */
+ PPMU_EVENT(mfc-left),
+ PPMU_EVENT(mfc-right),
+
+ /* Only for Exynos5260 SoCs */
+ PPMU_EVENT(drex0-s0),
+ PPMU_EVENT(drex0-s1),
+ PPMU_EVENT(drex1-s0),
+ PPMU_EVENT(drex1-s1),
+ PPMU_EVENT(eagle),
+ PPMU_EVENT(kfc),
+ PPMU_EVENT(isp),
+ PPMU_EVENT(fimc),
+ PPMU_EVENT(gscl),
+ PPMU_EVENT(mscl),
+ PPMU_EVENT(fimd0x),
+ PPMU_EVENT(fimd1x),
+ { /* sentinel */ },
+};
+
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
+ if (!strcmp(edev->desc->name, ppmu_events[i].name))
+ return ppmu_events[i].id;
+
+ return -EINVAL;
+}
+
+static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
+{
+ struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ u32 pmnc;
+
+ /* Disable all counters */
+ __raw_writel(PPMU_CCNT_MASK |
+ PPMU_PMCNT0_MASK |
+ PPMU_PMCNT1_MASK |
+ PPMU_PMCNT2_MASK |
+ PPMU_PMCNT3_MASK,
+ info->ppmu.base + PPMU_CNTENC);
+
+ /* Disable PPMU */
+ pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+ pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+ __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+
+ return 0;
+}
+
+static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
+{
+ struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ int id = exynos_ppmu_find_ppmu_id(edev);
+ u32 pmnc, cntens;
+
+ if (id < 0)
+ return id;
+
+ /* Enable specific counter */
+ cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
+ cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+ __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
+
+ /* Set the event of Read/Write data count */
+ __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
+ info->ppmu.base + PPMU_BEVTxSEL(id));
+
+ /* Reset cycle counter/performance counter and enable PPMU */
+ pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+ pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+ | PPMU_PMNC_COUNTER_RESET_MASK
+ | PPMU_PMNC_CC_RESET_MASK);
+ pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+ pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+ pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+ __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+
+ return 0;
+}
+
+static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ int id = exynos_ppmu_find_ppmu_id(edev);
+ u32 pmnc, cntenc;
+
+ if (id < 0)
+ return -EINVAL;
+
+ /* Disable PPMU */
+ pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+ pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+ __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+
+ /* Read cycle count */
+ edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
+
+ /* Read performance count */
+ switch (id) {
+ case PPMU_PMNCNT0:
+ case PPMU_PMNCNT1:
+ case PPMU_PMNCNT2:
+ edata->load_count
+ = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
+ break;
+ case PPMU_PMNCNT3:
+ edata->load_count =
+ ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
+ | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Disable specific counter */
+ cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
+ cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+ __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
+
+ dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+ edata->load_count, edata->total_count);
+
+ return 0;
+}
+
+static struct devfreq_event_ops exynos_ppmu_ops = {
+ .disable = exynos_ppmu_disable,
+ .set_event = exynos_ppmu_set_event,
+ .get_event = exynos_ppmu_get_event,
+};
+
+static int of_get_devfreq_events(struct device_node *np,
+ struct exynos_ppmu *info)
+{
+ struct devfreq_event_desc *desc;
+ struct device *dev = info->dev;
+ struct device_node *events_np, *node;
+ int i, j, count;
+
+ events_np = of_get_child_by_name(np, "events");
+ if (!events_np) {
+ dev_err(dev,
+ "failed to get child node of devfreq-event devices\n");
+ return -EINVAL;
+ }
+
+ count = of_get_child_count(events_np);
+ desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ info->num_events = count;
+
+ j = 0;
+ for_each_child_of_node(events_np, node) {
+ for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
+ if (!ppmu_events[i].name)
+ continue;
+
+ if (!of_node_cmp(node->name, ppmu_events[i].name))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ppmu_events)) {
+ dev_warn(dev,
+ "don't know how to configure events : %s\n",
+ node->name);
+ continue;
+ }
+
+ desc[j].ops = &exynos_ppmu_ops;
+ desc[j].driver_data = info;
+
+ of_property_read_string(node, "event-name", &desc[j].name);
+
+ j++;
+
+ of_node_put(node);
+ }
+ info->desc = desc;
+
+ of_node_put(events_np);
+
+ return 0;
+}
+
+static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
+{
+ struct device *dev = info->dev;
+ struct device_node *np = dev->of_node;
+ int ret = 0;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
+ }
+
+ /* Maps the memory mapped IO to control PPMU register */
+ info->ppmu.base = of_iomap(np, 0);
+ if (IS_ERR_OR_NULL(info->ppmu.base)) {
+ dev_err(dev, "failed to map memory region\n");
+ return -ENOMEM;
+ }
+
+ info->ppmu.clk = devm_clk_get(dev, "ppmu");
+ if (IS_ERR(info->ppmu.clk)) {
+ info->ppmu.clk = NULL;
+ dev_warn(dev, "cannot get PPMU clock\n");
+ }
+
+ ret = of_get_devfreq_events(np, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse exynos ppmu dt node\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ iounmap(info->ppmu.base);
+
+ return ret;
+}
+
+static int exynos_ppmu_probe(struct platform_device *pdev)
+{
+ struct exynos_ppmu *info;
+ struct devfreq_event_dev **edev;
+ struct devfreq_event_desc *desc;
+ int i, ret = 0, size;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ info->dev = &pdev->dev;
+
+ /* Parse dt data to get resource */
+ ret = exynos_ppmu_parse_dt(info);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to parse devicetree for resource\n");
+ return ret;
+ }
+ desc = info->desc;
+
+ size = sizeof(struct devfreq_event_dev *) * info->num_events;
+ info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!info->edev) {
+ dev_err(&pdev->dev,
+ "failed to allocate memory devfreq-event devices\n");
+ return -ENOMEM;
+ }
+ edev = info->edev;
+ platform_set_drvdata(pdev, info);
+
+ for (i = 0; i < info->num_events; i++) {
+ edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
+ if (IS_ERR(edev[i])) {
+ ret = PTR_ERR(edev[i]);
+ dev_err(&pdev->dev,
+ "failed to add devfreq-event device\n");
+ goto err;
+ }
+ }
+
+ clk_prepare_enable(info->ppmu.clk);
+
+ return 0;
+err:
+ iounmap(info->ppmu.base);
+
+ return ret;
+}
+
+static int exynos_ppmu_remove(struct platform_device *pdev)
+{
+ struct exynos_ppmu *info = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(info->ppmu.clk);
+ iounmap(info->ppmu.base);
+
+ return 0;
+}
+
+static struct of_device_id exynos_ppmu_id_match[] = {
+ { .compatible = "samsung,exynos-ppmu", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver exynos_ppmu_driver = {
+ .probe = exynos_ppmu_probe,
+ .remove = exynos_ppmu_remove,
+ .driver = {
+ .name = "exynos-ppmu",
+ .of_match_table = exynos_ppmu_id_match,
+ },
+};
+module_platform_driver(exynos_ppmu_driver);
+
+MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
new file mode 100644
index 000000000000..4e831d48c138
--- /dev/null
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -0,0 +1,93 @@
+/*
+ * exynos_ppmu.h - EXYNOS PPMU header file
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_PPMU_H__
+#define __EXYNOS_PPMU_H__
+
+enum ppmu_state {
+ PPMU_DISABLE = 0,
+ PPMU_ENABLE,
+};
+
+enum ppmu_counter {
+ PPMU_PMNCNT0 = 0,
+ PPMU_PMNCNT1,
+ PPMU_PMNCNT2,
+ PPMU_PMNCNT3,
+
+ PPMU_PMNCNT_MAX,
+};
+
+enum ppmu_event_type {
+ PPMU_RO_BUSY_CYCLE_CNT = 0x0,
+ PPMU_WO_BUSY_CYCLE_CNT = 0x1,
+ PPMU_RW_BUSY_CYCLE_CNT = 0x2,
+ PPMU_RO_REQUEST_CNT = 0x3,
+ PPMU_WO_REQUEST_CNT = 0x4,
+ PPMU_RO_DATA_CNT = 0x5,
+ PPMU_WO_DATA_CNT = 0x6,
+ PPMU_RO_LATENCY = 0x12,
+ PPMU_WO_LATENCY = 0x16,
+};
+
+enum ppmu_reg {
+ /* PPC control register */
+ PPMU_PMNC = 0x00,
+ PPMU_CNTENS = 0x10,
+ PPMU_CNTENC = 0x20,
+ PPMU_INTENS = 0x30,
+ PPMU_INTENC = 0x40,
+ PPMU_FLAG = 0x50,
+
+ /* Cycle Counter and Performance Event Counter Register */
+ PPMU_CCNT = 0x100,
+ PPMU_PMCNT0 = 0x110,
+ PPMU_PMCNT1 = 0x120,
+ PPMU_PMCNT2 = 0x130,
+ PPMU_PMCNT3_HIGH = 0x140,
+ PPMU_PMCNT3_LOW = 0x150,
+
+ /* Bus Event Generator */
+ PPMU_BEVT0SEL = 0x1000,
+ PPMU_BEVT1SEL = 0x1100,
+ PPMU_BEVT2SEL = 0x1200,
+ PPMU_BEVT3SEL = 0x1300,
+ PPMU_COUNTER_RESET = 0x1810,
+ PPMU_READ_OVERFLOW_CNT = 0x1810,
+ PPMU_READ_UNDERFLOW_CNT = 0x1814,
+ PPMU_WRITE_OVERFLOW_CNT = 0x1850,
+ PPMU_WRITE_UNDERFLOW_CNT = 0x1854,
+ PPMU_READ_PENDING_CNT = 0x1880,
+ PPMU_WRITE_PENDING_CNT = 0x1884
+};
+
+/* PMNC register */
+#define PPMU_PMNC_CC_RESET_SHIFT 2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT 1
+#define PPMU_PMNC_ENABLE_SHIFT 0
+#define PPMU_PMNC_START_MODE_MASK BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK BIT(1)
+#define PPMU_PMNC_ENABLE_MASK BIT(0)
+
+/* CNTENS/CNTENC/INTENS/INTENC/FLAG register */
+#define PPMU_CCNT_MASK BIT(31)
+#define PPMU_PMCNT3_MASK BIT(3)
+#define PPMU_PMCNT2_MASK BIT(2)
+#define PPMU_PMCNT1_MASK BIT(1)
+#define PPMU_PMCNT0_MASK BIT(0)
+
+/* PPMU_PMNCTx/PPMU_BETxSEL registers */
+#define PPMU_PMNCT(x) (PPMU_PMCNT0 + (0x10 * x))
+#define PPMU_BEVTxSEL(x) (PPMU_BEVT0SEL + (0x100 * x))
+
+#endif /* __EXYNOS_PPMU_H__ */
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
new file mode 100644
index 000000000000..34790961af5a
--- /dev/null
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -0,0 +1,718 @@
+/*
+ * A devfreq driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2014 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/devfreq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/reset.h>
+
+#include "governor.h"
+
+#define ACTMON_GLB_STATUS 0x0
+#define ACTMON_GLB_PERIOD_CTRL 0x4
+
+#define ACTMON_DEV_CTRL 0x0
+#define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
+#define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18)
+#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20)
+#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21)
+#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23
+#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26
+#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29)
+#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
+#define ACTMON_DEV_CTRL_ENB BIT(31)
+
+#define ACTMON_DEV_UPPER_WMARK 0x4
+#define ACTMON_DEV_LOWER_WMARK 0x8
+#define ACTMON_DEV_INIT_AVG 0xc
+#define ACTMON_DEV_AVG_UPPER_WMARK 0x10
+#define ACTMON_DEV_AVG_LOWER_WMARK 0x14
+#define ACTMON_DEV_COUNT_WEIGHT 0x18
+#define ACTMON_DEV_AVG_COUNT 0x20
+#define ACTMON_DEV_INTR_STATUS 0x24
+
+#define ACTMON_INTR_STATUS_CLEAR 0xffffffff
+
+#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31)
+#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30)
+
+#define ACTMON_ABOVE_WMARK_WINDOW 1
+#define ACTMON_BELOW_WMARK_WINDOW 3
+#define ACTMON_BOOST_FREQ_STEP 16000
+
+/* activity counter is incremented every 256 memory transactions, and each
+ * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
+ * 4 * 256 = 1024.
+ */
+#define ACTMON_COUNT_WEIGHT 0x400
+
+/*
+ * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
+ * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
+ */
+#define ACTMON_AVERAGE_WINDOW_LOG2 6
+#define ACTMON_SAMPLING_PERIOD 12 /* ms */
+#define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
+
+#define KHZ 1000
+
+/* Assume that the bus is saturated if the utilization is 25% */
+#define BUS_SATURATION_RATIO 25
+
+/**
+ * struct tegra_devfreq_device_config - configuration specific to an ACTMON
+ * device
+ *
+ * Coefficients and thresholds are in %
+ */
+struct tegra_devfreq_device_config {
+ u32 offset;
+ u32 irq_mask;
+
+ unsigned int boost_up_coeff;
+ unsigned int boost_down_coeff;
+ unsigned int boost_up_threshold;
+ unsigned int boost_down_threshold;
+ u32 avg_dependency_threshold;
+};
+
+enum tegra_actmon_device {
+ MCALL = 0,
+ MCCPU,
+};
+
+static struct tegra_devfreq_device_config actmon_device_configs[] = {
+ {
+ /* MCALL */
+ .offset = 0x1c0,
+ .irq_mask = 1 << 26,
+ .boost_up_coeff = 200,
+ .boost_down_coeff = 50,
+ .boost_up_threshold = 60,
+ .boost_down_threshold = 40,
+ },
+ {
+ /* MCCPU */
+ .offset = 0x200,
+ .irq_mask = 1 << 25,
+ .boost_up_coeff = 800,
+ .boost_down_coeff = 90,
+ .boost_up_threshold = 27,
+ .boost_down_threshold = 10,
+ .avg_dependency_threshold = 50000,
+ },
+};
+
+/**
+ * struct tegra_devfreq_device - state specific to an ACTMON device
+ *
+ * Frequencies are in kHz.
+ */
+struct tegra_devfreq_device {
+ const struct tegra_devfreq_device_config *config;
+
+ void __iomem *regs;
+ u32 avg_band_freq;
+ u32 avg_count;
+
+ unsigned long target_freq;
+ unsigned long boost_freq;
+};
+
+struct tegra_devfreq {
+ struct devfreq *devfreq;
+
+ struct platform_device *pdev;
+ struct reset_control *reset;
+ struct clk *clock;
+ void __iomem *regs;
+
+ spinlock_t lock;
+
+ struct clk *emc_clock;
+ unsigned long max_freq;
+ unsigned long cur_freq;
+ struct notifier_block rate_change_nb;
+
+ struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
+};
+
+struct tegra_actmon_emc_ratio {
+ unsigned long cpu_freq;
+ unsigned long emc_freq;
+};
+
+static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+ { 1400000, ULONG_MAX },
+ { 1200000, 750000 },
+ { 1100000, 600000 },
+ { 1000000, 500000 },
+ { 800000, 375000 },
+ { 500000, 200000 },
+ { 250000, 100000 },
+};
+
+static unsigned long do_percent(unsigned long val, unsigned int pct)
+{
+ return val * pct / 100;
+}
+
+static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq_device *dev)
+{
+ u32 avg = dev->avg_count;
+ u32 band = dev->avg_band_freq * ACTMON_SAMPLING_PERIOD;
+
+ writel(avg + band, dev->regs + ACTMON_DEV_AVG_UPPER_WMARK);
+ avg = max(avg, band);
+ writel(avg - band, dev->regs + ACTMON_DEV_AVG_LOWER_WMARK);
+}
+
+static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
+{
+ u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+
+ writel(do_percent(val, dev->config->boost_up_threshold),
+ dev->regs + ACTMON_DEV_UPPER_WMARK);
+
+ writel(do_percent(val, dev->config->boost_down_threshold),
+ dev->regs + ACTMON_DEV_LOWER_WMARK);
+}
+
+static void actmon_write_barrier(struct tegra_devfreq *tegra)
+{
+ /* ensure the update has reached the ACTMON */
+ wmb();
+ readl(tegra->regs + ACTMON_GLB_STATUS);
+}
+
+static irqreturn_t actmon_isr(int irq, void *data)
+{
+ struct tegra_devfreq *tegra = data;
+ struct tegra_devfreq_device *dev = NULL;
+ unsigned long flags;
+ u32 val;
+ unsigned int i;
+
+ val = readl(tegra->regs + ACTMON_GLB_STATUS);
+
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ if (val & tegra->devices[i].config->irq_mask) {
+ dev = tegra->devices + i;
+ break;
+ }
+ }
+
+ if (!dev)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&tegra->lock, flags);
+
+ dev->avg_count = readl(dev->regs + ACTMON_DEV_AVG_COUNT);
+ tegra_devfreq_update_avg_wmark(dev);
+
+ val = readl(dev->regs + ACTMON_DEV_INTR_STATUS);
+ if (val & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
+ val = readl(dev->regs + ACTMON_DEV_CTRL) |
+ ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
+ ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+
+ /*
+ * new_boost = min(old_boost * up_coef + step, max_freq)
+ */
+ dev->boost_freq = do_percent(dev->boost_freq,
+ dev->config->boost_up_coeff);
+ dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
+ if (dev->boost_freq >= tegra->max_freq) {
+ dev->boost_freq = tegra->max_freq;
+ val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+ }
+ writel(val, dev->regs + ACTMON_DEV_CTRL);
+ } else if (val & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
+ val = readl(dev->regs + ACTMON_DEV_CTRL) |
+ ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
+ ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+
+ /*
+ * new_boost = old_boost * down_coef
+ * or 0 if (old_boost * down_coef < step / 2)
+ */
+ dev->boost_freq = do_percent(dev->boost_freq,
+ dev->config->boost_down_coeff);
+ if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
+ dev->boost_freq = 0;
+ val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ }
+ writel(val, dev->regs + ACTMON_DEV_CTRL);
+ }
+
+ if (dev->config->avg_dependency_threshold) {
+ val = readl(dev->regs + ACTMON_DEV_CTRL);
+ if (dev->avg_count >= dev->config->avg_dependency_threshold)
+ val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ else if (dev->boost_freq == 0)
+ val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ writel(val, dev->regs + ACTMON_DEV_CTRL);
+ }
+
+ writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
+
+ actmon_write_barrier(tegra);
+
+ spin_unlock_irqrestore(&tegra->lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
+ unsigned long cpu_freq)
+{
+ unsigned int i;
+ struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
+
+ for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
+ if (cpu_freq >= ratio->cpu_freq) {
+ if (ratio->emc_freq >= tegra->max_freq)
+ return tegra->max_freq;
+ else
+ return ratio->emc_freq;
+ }
+ }
+
+ return 0;
+}
+
+static void actmon_update_target(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
+{
+ unsigned long cpu_freq = 0;
+ unsigned long static_cpu_emc_freq = 0;
+ unsigned int avg_sustain_coef;
+ unsigned long flags;
+
+ if (dev->config->avg_dependency_threshold) {
+ cpu_freq = cpufreq_get(0);
+ static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
+ }
+
+ spin_lock_irqsave(&tegra->lock, flags);
+
+ dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
+ avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
+ dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
+ dev->target_freq += dev->boost_freq;
+
+ if (dev->avg_count >= dev->config->avg_dependency_threshold)
+ dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
+
+ spin_unlock_irqrestore(&tegra->lock, flags);
+}
+
+static irqreturn_t actmon_thread_isr(int irq, void *data)
+{
+ struct tegra_devfreq *tegra = data;
+
+ mutex_lock(&tegra->devfreq->lock);
+ update_devfreq(tegra->devfreq);
+ mutex_unlock(&tegra->devfreq->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ struct clk_notifier_data *data = ptr;
+ struct tegra_devfreq *tegra = container_of(nb, struct tegra_devfreq,
+ rate_change_nb);
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tegra->lock, flags);
+
+ switch (action) {
+ case POST_RATE_CHANGE:
+ tegra->cur_freq = data->new_rate / KHZ;
+
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
+ tegra_devfreq_update_wmark(tegra, tegra->devices + i);
+
+ actmon_write_barrier(tegra);
+ break;
+ case PRE_RATE_CHANGE:
+ /* fall through */
+ case ABORT_RATE_CHANGE:
+ break;
+ };
+
+ spin_unlock_irqrestore(&tegra->lock, flags);
+
+ return NOTIFY_OK;
+}
+
+static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
+{
+ u32 val;
+
+ dev->avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
+ dev->target_freq = tegra->cur_freq;
+
+ dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+ writel(dev->avg_count, dev->regs + ACTMON_DEV_INIT_AVG);
+
+ tegra_devfreq_update_avg_wmark(dev);
+ tegra_devfreq_update_wmark(tegra, dev);
+
+ writel(ACTMON_COUNT_WEIGHT, dev->regs + ACTMON_DEV_COUNT_WEIGHT);
+ writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
+
+ val = 0;
+ val |= ACTMON_DEV_CTRL_ENB_PERIODIC |
+ ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN |
+ ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+ val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
+ << ACTMON_DEV_CTRL_K_VAL_SHIFT;
+ val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
+ << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
+ val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
+ << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
+ val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN |
+ ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+ writel(val, dev->regs + ACTMON_DEV_CTRL);
+
+ actmon_write_barrier(tegra);
+
+ val = readl(dev->regs + ACTMON_DEV_CTRL);
+ val |= ACTMON_DEV_CTRL_ENB;
+ writel(val, dev->regs + ACTMON_DEV_CTRL);
+
+ actmon_write_barrier(tegra);
+}
+
+static int tegra_devfreq_suspend(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct tegra_devfreq *tegra;
+ struct tegra_devfreq_device *actmon_dev;
+ unsigned int i;
+ u32 val;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ tegra = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ actmon_dev = &tegra->devices[i];
+
+ val = readl(actmon_dev->regs + ACTMON_DEV_CTRL);
+ val &= ~ACTMON_DEV_CTRL_ENB;
+ writel(val, actmon_dev->regs + ACTMON_DEV_CTRL);
+
+ writel(ACTMON_INTR_STATUS_CLEAR,
+ actmon_dev->regs + ACTMON_DEV_INTR_STATUS);
+
+ actmon_write_barrier(tegra);
+ }
+
+ return 0;
+}
+
+static int tegra_devfreq_resume(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct tegra_devfreq *tegra;
+ struct tegra_devfreq_device *actmon_dev;
+ unsigned int i;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ tegra = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ actmon_dev = &tegra->devices[i];
+
+ tegra_actmon_configure_device(tegra, actmon_dev);
+ }
+
+ return 0;
+}
+
+static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct platform_device *pdev;
+ struct tegra_devfreq *tegra;
+ struct dev_pm_opp *opp;
+ unsigned long rate = *freq * KHZ;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ tegra = platform_get_drvdata(pdev);
+
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, &rate, flags);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
+ return PTR_ERR(opp);
+ }
+ rate = dev_pm_opp_get_freq(opp);
+ rcu_read_unlock();
+
+ /* TODO: Once we have per-user clk constraints, set a floor */
+ clk_set_rate(tegra->emc_clock, rate);
+
+ /* TODO: Set voltage as well */
+
+ return 0;
+}
+
+static int tegra_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct platform_device *pdev;
+ struct tegra_devfreq *tegra;
+ struct tegra_devfreq_device *actmon_dev;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ tegra = platform_get_drvdata(pdev);
+
+ stat->current_frequency = tegra->cur_freq;
+
+ /* To be used by the tegra governor */
+ stat->private_data = tegra;
+
+ /* The below are to be used by the other governors */
+
+ actmon_dev = &tegra->devices[MCALL];
+
+ /* Number of cycles spent on memory access */
+ stat->busy_time = actmon_dev->avg_count;
+
+ /* The bus can be considered to be saturated way before 100% */
+ stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+
+ /* Number of cycles in a sampling period */
+ stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
+
+ return 0;
+}
+
+static int tegra_devfreq_get_target(struct devfreq *devfreq,
+ unsigned long *freq)
+{
+ struct devfreq_dev_status stat;
+ struct tegra_devfreq *tegra;
+ struct tegra_devfreq_device *dev;
+ unsigned long target_freq = 0;
+ unsigned int i;
+ int err;
+
+ err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat);
+ if (err)
+ return err;
+
+ tegra = stat.private_data;
+
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ dev = &tegra->devices[i];
+
+ actmon_update_target(tegra, dev);
+
+ target_freq = max(target_freq, dev->target_freq);
+ }
+
+ *freq = target_freq;
+
+ return 0;
+}
+
+static int tegra_devfreq_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ return 0;
+}
+
+static struct devfreq_governor tegra_devfreq_governor = {
+ .name = "tegra",
+ .get_target_freq = tegra_devfreq_get_target,
+ .event_handler = tegra_devfreq_event_handler,
+};
+
+static struct devfreq_dev_profile tegra_devfreq_profile = {
+ .polling_ms = 0,
+ .target = tegra_devfreq_target,
+ .get_dev_status = tegra_devfreq_get_dev_status,
+};
+
+static int tegra_devfreq_probe(struct platform_device *pdev)
+{
+ struct tegra_devfreq *tegra;
+ struct tegra_devfreq_device *dev;
+ struct resource *res;
+ unsigned long max_freq;
+ unsigned int i;
+ int irq;
+ int err;
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ spin_lock_init(&tegra->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get regs resource\n");
+ return -ENODEV;
+ }
+
+ tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->regs)) {
+ dev_err(&pdev->dev, "Failed to get IO memory\n");
+ return PTR_ERR(tegra->regs);
+ }
+
+ tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
+ if (IS_ERR(tegra->reset)) {
+ dev_err(&pdev->dev, "Failed to get reset\n");
+ return PTR_ERR(tegra->reset);
+ }
+
+ tegra->clock = devm_clk_get(&pdev->dev, "actmon");
+ if (IS_ERR(tegra->clock)) {
+ dev_err(&pdev->dev, "Failed to get actmon clock\n");
+ return PTR_ERR(tegra->clock);
+ }
+
+ tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(tegra->emc_clock)) {
+ dev_err(&pdev->dev, "Failed to get emc clock\n");
+ return PTR_ERR(tegra->emc_clock);
+ }
+
+ err = of_init_opp_table(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init operating point table\n");
+ return err;
+ }
+
+ tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
+ err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to register rate change notifier\n");
+ return err;
+ }
+
+ reset_control_assert(tegra->reset);
+
+ err = clk_prepare_enable(tegra->clock);
+ if (err) {
+ reset_control_deassert(tegra->reset);
+ return err;
+ }
+
+ reset_control_deassert(tegra->reset);
+
+ max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX);
+ tegra->max_freq = max_freq / KHZ;
+
+ clk_set_rate(tegra->emc_clock, max_freq);
+
+ tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+
+ writel(ACTMON_SAMPLING_PERIOD - 1,
+ tegra->regs + ACTMON_GLB_PERIOD_CTRL);
+
+ for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
+ dev = tegra->devices + i;
+ dev->config = actmon_device_configs + i;
+ dev->regs = tegra->regs + dev->config->offset;
+
+ tegra_actmon_configure_device(tegra, tegra->devices + i);
+ }
+
+ err = devfreq_add_governor(&tegra_devfreq_governor);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add governor\n");
+ return err;
+ }
+
+ tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
+ tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
+ &tegra_devfreq_profile,
+ "tegra",
+ NULL);
+
+ irq = platform_get_irq(pdev, 0);
+ err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
+ actmon_thread_isr, IRQF_SHARED,
+ "tegra-devfreq", tegra);
+ if (err) {
+ dev_err(&pdev->dev, "Interrupt request failed\n");
+ return err;
+ }
+
+ platform_set_drvdata(pdev, tegra);
+
+ return 0;
+}
+
+static int tegra_devfreq_remove(struct platform_device *pdev)
+{
+ struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
+
+ clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
+
+ clk_disable_unprepare(tegra->clock);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tegra_devfreq_pm_ops,
+ tegra_devfreq_suspend,
+ tegra_devfreq_resume);
+
+static struct of_device_id tegra_devfreq_of_match[] = {
+ { .compatible = "nvidia,tegra124-actmon" },
+ { },
+};
+
+static struct platform_driver tegra_devfreq_driver = {
+ .probe = tegra_devfreq_probe,
+ .remove = tegra_devfreq_remove,
+ .driver = {
+ .name = "tegra-devfreq",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_devfreq_of_match,
+ .pm = &tegra_devfreq_pm_ops,
+ },
+};
+module_platform_driver(tegra_devfreq_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Tegra devfreq driver");
+MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
+MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f2b2c4e87aef..faf30a4e642b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -184,7 +184,7 @@ config TEGRA20_APB_DMA
config S3C24XX_DMAC
tristate "Samsung S3C24XX DMA support"
- depends on ARCH_S3C24XX && !S3C24XX_DMA
+ depends on ARCH_S3C24XX
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index de361a156b34..5a635646e05c 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
{
const struct acpi_csrt_shared_info *si;
struct list_head resource_list;
- struct resource_list_entry *rentry;
+ struct resource_entry *rentry;
resource_size_t mem = 0, irq = 0;
int ret;
@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
return 0;
list_for_each_entry(rentry, &resource_list, node) {
- if (resource_type(&rentry->res) == IORESOURCE_MEM)
- mem = rentry->res.start;
- else if (resource_type(&rentry->res) == IORESOURCE_IRQ)
- irq = rentry->res.start;
+ if (resource_type(rentry->res) == IORESOURCE_MEM)
+ mem = rentry->res->start;
+ else if (resource_type(rentry->res) == IORESOURCE_IRQ)
+ irq = rentry->res->start;
}
acpi_dev_free_resource_list(&resource_list);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 380478562b7d..5c062548957c 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1505,7 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dw->regs = chip->regs;
chip->dw = dw;
- pm_runtime_enable(chip->dev);
pm_runtime_get_sync(chip->dev);
dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
@@ -1703,7 +1702,6 @@ int dw_dma_remove(struct dw_dma_chip *chip)
}
pm_runtime_put_sync_suspend(chip->dev);
- pm_runtime_disable(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index a630161473a4..32ea1aca7a0e 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -185,6 +186,8 @@ static int dw_probe(struct platform_device *pdev)
if (err)
return err;
+ pm_runtime_enable(&pdev->dev);
+
err = dw_dma_probe(chip, pdata);
if (err)
goto err_dw_dma_probe;
@@ -205,6 +208,7 @@ static int dw_probe(struct platform_device *pdev)
return 0;
err_dw_dma_probe:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
return err;
}
@@ -217,6 +221,7 @@ static int dw_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dw_dma_remove(chip);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
return 0;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 49c265255a07..cb59619df23f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -385,4 +385,11 @@ config EDAC_ALTERA_MC
preloader must initialize the SDRAM before loading
the kernel.
+config EDAC_SYNOPSYS
+ tristate "Synopsys DDR Memory Controller"
+ depends on EDAC_MM_EDAC && ARCH_ZYNQ
+ help
+ Support for error detection and correction on the Synopsys DDR
+ memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index d40c69a04df7..b255f362b1db 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -67,3 +67,4 @@ obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o
obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o
+obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 670d2829c547..c84eecb191ef 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -157,7 +157,7 @@ struct dev_ch_attribute {
};
#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
- struct dev_ch_attribute dev_attr_legacy_##_name = \
+ static struct dev_ch_attribute dev_attr_legacy_##_name = \
{ __ATTR(_name, _mode, _show, _store), (_var) }
#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
@@ -850,20 +850,20 @@ static const struct file_operations debug_fake_inject_fops = {
#endif
/* default Control file */
-DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
+static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
/* default Attribute files */
-DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
-DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
-DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
-DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
-DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
-DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
-DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
-DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
+static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
+static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
+static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
+static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
+static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
+static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
+static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
+static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
/* memory scrubber attribute file */
-DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
+static DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
static struct attribute *mci_attrs[] = {
&dev_attr_reset_counters.attr,
@@ -989,7 +989,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
err = bus_register(mci->bus);
if (err < 0)
- return err;
+ goto fail_free_name;
/* get the /sys/devices/system/edac subsys reference */
mci->dev.type = &mci_attr_type;
@@ -1005,9 +1005,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
- bus_unregister(mci->bus);
- kfree(mci->bus->name);
- return err;
+ goto fail_unregister_bus;
}
if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
@@ -1015,15 +1013,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
}
+
if (mci->set_sdram_scrub_rate) {
dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
}
- err = device_create_file(&mci->dev,
- &dev_attr_sdram_scrub_rate);
+
+ err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
if (err) {
edac_dbg(1, "failure: create sdram_scrub_rate\n");
- goto fail2;
+ goto fail_unregister_dev;
}
}
/*
@@ -1032,8 +1031,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
for (i = 0; i < mci->tot_dimms; i++) {
struct dimm_info *dimm = mci->dimms[i];
/* Only expose populated DIMMs */
- if (dimm->nr_pages == 0)
+ if (!dimm->nr_pages)
continue;
+
#ifdef CONFIG_EDAC_DEBUG
edac_dbg(1, "creating dimm%d, located at ", i);
if (edac_debug_level >= 1) {
@@ -1048,14 +1048,14 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
err = edac_create_dimm_object(mci, dimm, i);
if (err) {
edac_dbg(1, "failure: create dimm %d obj\n", i);
- goto fail;
+ goto fail_unregister_dimm;
}
}
#ifdef CONFIG_EDAC_LEGACY_SYSFS
err = edac_create_csrow_objects(mci);
if (err < 0)
- goto fail;
+ goto fail_unregister_dimm;
#endif
#ifdef CONFIG_EDAC_DEBUG
@@ -1063,16 +1063,19 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
#endif
return 0;
-fail:
+fail_unregister_dimm:
for (i--; i >= 0; i--) {
struct dimm_info *dimm = mci->dimms[i];
- if (dimm->nr_pages == 0)
+ if (!dimm->nr_pages)
continue;
+
device_unregister(&dimm->dev);
}
-fail2:
+fail_unregister_dev:
device_unregister(&mci->dev);
+fail_unregister_bus:
bus_unregister(mci->bus);
+fail_free_name:
kfree(mci->bus->name);
return err;
}
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 6247d186177e..e9f8a393915a 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -279,11 +279,6 @@ static inline u32 i5100_recmema_rank(u32 a)
return i5100_nrecmema_rank(a);
}
-static inline u32 i5100_recmema_dm_buf_id(u32 a)
-{
- return i5100_nrecmema_dm_buf_id(a);
-}
-
static inline u32 i5100_recmemb_cas(u32 a)
{
return i5100_nrecmemb_cas(a);
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 0bd91a802c67..f7681b553fd5 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -197,7 +197,7 @@ static int inj_bank_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
-struct dfs_node {
+static struct dfs_node {
char *name;
struct dentry *d;
const struct file_operations *fops;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index ffb1a9a15ccd..1fa76a588af3 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1,5 +1,5 @@
/*
- * Freescale MPC85xx Memory Controller kenel module
+ * Freescale MPC85xx Memory Controller kernel module
*
* Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
*
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 8c6256436227..4498baf9ce05 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -1,5 +1,5 @@
/*
- * Freescale MPC85xx Memory Controller kenel module
+ * Freescale MPC85xx Memory Controller kernel module
* Author: Dave Jiang <djiang@mvista.com>
*
* 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 6366e880f978..0574e1bbe45c 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -789,7 +789,8 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
ctl = (ctl & 0xff00ffff) | 0x10000;
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
- if (edac_mc_add_mc(mci)) {
+ res = edac_mc_add_mc(mci);
+ if (res) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
new file mode 100644
index 000000000000..1c9691535e13
--- /dev/null
+++ b/drivers/edac/synopsys_edac.c
@@ -0,0 +1,535 @@
+/*
+ * Synopsys DDR ECC Driver
+ * This driver is based on ppc4xx_edac.c drivers
+ *
+ * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details
+ */
+
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "edac_core.h"
+
+/* Number of cs_rows needed per memory controller */
+#define SYNPS_EDAC_NR_CSROWS 1
+
+/* Number of channels per memory controller */
+#define SYNPS_EDAC_NR_CHANS 1
+
+/* Granularity of reported error in bytes */
+#define SYNPS_EDAC_ERR_GRAIN 1
+
+#define SYNPS_EDAC_MSG_SIZE 256
+
+#define SYNPS_EDAC_MOD_STRING "synps_edac"
+#define SYNPS_EDAC_MOD_VER "1"
+
+/* Synopsys DDR memory controller registers that are relevant to ECC */
+#define CTRL_OFST 0x0
+#define T_ZQ_OFST 0xA4
+
+/* ECC control register */
+#define ECC_CTRL_OFST 0xC4
+/* ECC log register */
+#define CE_LOG_OFST 0xC8
+/* ECC address register */
+#define CE_ADDR_OFST 0xCC
+/* ECC data[31:0] register */
+#define CE_DATA_31_0_OFST 0xD0
+
+/* Uncorrectable error info registers */
+#define UE_LOG_OFST 0xDC
+#define UE_ADDR_OFST 0xE0
+#define UE_DATA_31_0_OFST 0xE4
+
+#define STAT_OFST 0xF0
+#define SCRUB_OFST 0xF4
+
+/* Control register bit field definitions */
+#define CTRL_BW_MASK 0xC
+#define CTRL_BW_SHIFT 2
+
+#define DDRCTL_WDTH_16 1
+#define DDRCTL_WDTH_32 0
+
+/* ZQ register bit field definitions */
+#define T_ZQ_DDRMODE_MASK 0x2
+
+/* ECC control register bit field definitions */
+#define ECC_CTRL_CLR_CE_ERR 0x2
+#define ECC_CTRL_CLR_UE_ERR 0x1
+
+/* ECC correctable/uncorrectable error log register definitions */
+#define LOG_VALID 0x1
+#define CE_LOG_BITPOS_MASK 0xFE
+#define CE_LOG_BITPOS_SHIFT 1
+
+/* ECC correctable/uncorrectable error address register definitions */
+#define ADDR_COL_MASK 0xFFF
+#define ADDR_ROW_MASK 0xFFFF000
+#define ADDR_ROW_SHIFT 12
+#define ADDR_BANK_MASK 0x70000000
+#define ADDR_BANK_SHIFT 28
+
+/* ECC statistic register definitions */
+#define STAT_UECNT_MASK 0xFF
+#define STAT_CECNT_MASK 0xFF00
+#define STAT_CECNT_SHIFT 8
+
+/* ECC scrub register definitions */
+#define SCRUB_MODE_MASK 0x7
+#define SCRUB_MODE_SECDED 0x4
+
+/**
+ * struct ecc_error_info - ECC error log information
+ * @row: Row number
+ * @col: Column number
+ * @bank: Bank number
+ * @bitpos: Bit position
+ * @data: Data causing the error
+ */
+struct ecc_error_info {
+ u32 row;
+ u32 col;
+ u32 bank;
+ u32 bitpos;
+ u32 data;
+};
+
+/**
+ * struct synps_ecc_status - ECC status information to report
+ * @ce_cnt: Correctable error count
+ * @ue_cnt: Uncorrectable error count
+ * @ceinfo: Correctable error log information
+ * @ueinfo: Uncorrectable error log information
+ */
+struct synps_ecc_status {
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct ecc_error_info ceinfo;
+ struct ecc_error_info ueinfo;
+};
+
+/**
+ * struct synps_edac_priv - DDR memory controller private instance data
+ * @baseaddr: Base address of the DDR controller
+ * @message: Buffer for framing the event specific info
+ * @stat: ECC status information
+ * @ce_cnt: Correctable Error count
+ * @ue_cnt: Uncorrectable Error count
+ */
+struct synps_edac_priv {
+ void __iomem *baseaddr;
+ char message[SYNPS_EDAC_MSG_SIZE];
+ struct synps_ecc_status stat;
+ u32 ce_cnt;
+ u32 ue_cnt;
+};
+
+/**
+ * synps_edac_geterror_info - Get the current ecc error info
+ * @base: Pointer to the base address of the ddr memory controller
+ * @p: Pointer to the synopsys ecc status structure
+ *
+ * Determines there is any ecc error or not
+ *
+ * Return: one if there is no error otherwise returns zero
+ */
+static int synps_edac_geterror_info(void __iomem *base,
+ struct synps_ecc_status *p)
+{
+ u32 regval, clearval = 0;
+
+ regval = readl(base + STAT_OFST);
+ if (!regval)
+ return 1;
+
+ p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
+ p->ue_cnt = regval & STAT_UECNT_MASK;
+
+ regval = readl(base + CE_LOG_OFST);
+ if (!(p->ce_cnt && (regval & LOG_VALID)))
+ goto ue_err;
+
+ p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
+ regval = readl(base + CE_ADDR_OFST);
+ p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
+ p->ceinfo.col = regval & ADDR_COL_MASK;
+ p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
+ p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
+ edac_dbg(3, "ce bit position: %d data: %d\n", p->ceinfo.bitpos,
+ p->ceinfo.data);
+ clearval = ECC_CTRL_CLR_CE_ERR;
+
+ue_err:
+ regval = readl(base + UE_LOG_OFST);
+ if (!(p->ue_cnt && (regval & LOG_VALID)))
+ goto out;
+
+ regval = readl(base + UE_ADDR_OFST);
+ p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
+ p->ueinfo.col = regval & ADDR_COL_MASK;
+ p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
+ p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
+ clearval |= ECC_CTRL_CLR_UE_ERR;
+
+out:
+ writel(clearval, base + ECC_CTRL_OFST);
+ writel(0x0, base + ECC_CTRL_OFST);
+
+ return 0;
+}
+
+/**
+ * synps_edac_handle_error - Handle controller error types CE and UE
+ * @mci: Pointer to the edac memory controller instance
+ * @p: Pointer to the synopsys ecc status structure
+ *
+ * Handles the controller ECC correctable and un correctable error.
+ */
+static void synps_edac_handle_error(struct mem_ctl_info *mci,
+ struct synps_ecc_status *p)
+{
+ struct synps_edac_priv *priv = mci->pvt_info;
+ struct ecc_error_info *pinf;
+
+ if (p->ce_cnt) {
+ pinf = &p->ceinfo;
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "DDR ECC error type :%s Row %d Bank %d Col %d ",
+ "CE", pinf->row, pinf->bank, pinf->col);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ p->ce_cnt, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (p->ue_cnt) {
+ pinf = &p->ueinfo;
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "DDR ECC error type :%s Row %d Bank %d Col %d ",
+ "UE", pinf->row, pinf->bank, pinf->col);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ p->ue_cnt, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * synps_edac_check - Check controller for ECC errors
+ * @mci: Pointer to the edac memory controller instance
+ *
+ * Used to check and post ECC errors. Called by the polling thread
+ */
+static void synps_edac_check(struct mem_ctl_info *mci)
+{
+ struct synps_edac_priv *priv = mci->pvt_info;
+ int status;
+
+ status = synps_edac_geterror_info(priv->baseaddr, &priv->stat);
+ if (status)
+ return;
+
+ priv->ce_cnt += priv->stat.ce_cnt;
+ priv->ue_cnt += priv->stat.ue_cnt;
+ synps_edac_handle_error(mci, &priv->stat);
+
+ edac_dbg(3, "Total error count ce %d ue %d\n",
+ priv->ce_cnt, priv->ue_cnt);
+}
+
+/**
+ * synps_edac_get_dtype - Return the controller memory width
+ * @base: Pointer to the ddr memory controller base address
+ *
+ * Get the EDAC device type width appropriate for the current controller
+ * configuration.
+ *
+ * Return: a device type width enumeration.
+ */
+static enum dev_type synps_edac_get_dtype(const void __iomem *base)
+{
+ enum dev_type dt;
+ u32 width;
+
+ width = readl(base + CTRL_OFST);
+ width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
+
+ switch (width) {
+ case DDRCTL_WDTH_16:
+ dt = DEV_X2;
+ break;
+ case DDRCTL_WDTH_32:
+ dt = DEV_X4;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ return dt;
+}
+
+/**
+ * synps_edac_get_eccstate - Return the controller ecc enable/disable status
+ * @base: Pointer to the ddr memory controller base address
+ *
+ * Get the ECC enable/disable status for the controller
+ *
+ * Return: a ecc status boolean i.e true/false - enabled/disabled.
+ */
+static bool synps_edac_get_eccstate(void __iomem *base)
+{
+ enum dev_type dt;
+ u32 ecctype;
+ bool state = false;
+
+ dt = synps_edac_get_dtype(base);
+ if (dt == DEV_UNKNOWN)
+ return state;
+
+ ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
+ if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
+ state = true;
+
+ return state;
+}
+
+/**
+ * synps_edac_get_memsize - reads the size of the attached memory device
+ *
+ * Return: the memory size in bytes
+ */
+static u32 synps_edac_get_memsize(void)
+{
+ struct sysinfo inf;
+
+ si_meminfo(&inf);
+
+ return inf.totalram * inf.mem_unit;
+}
+
+/**
+ * synps_edac_get_mtype - Returns controller memory type
+ * @base: pointer to the synopsys ecc status structure
+ *
+ * Get the EDAC memory type appropriate for the current controller
+ * configuration.
+ *
+ * Return: a memory type enumeration.
+ */
+static enum mem_type synps_edac_get_mtype(const void __iomem *base)
+{
+ enum mem_type mt;
+ u32 memtype;
+
+ memtype = readl(base + T_ZQ_OFST);
+
+ if (memtype & T_ZQ_DDRMODE_MASK)
+ mt = MEM_DDR3;
+ else
+ mt = MEM_DDR2;
+
+ return mt;
+}
+
+/**
+ * synps_edac_init_csrows - Initialize the cs row data
+ * @mci: Pointer to the edac memory controller instance
+ *
+ * Initializes the chip select rows associated with the EDAC memory
+ * controller instance
+ *
+ * Return: Unconditionally 0.
+ */
+static int synps_edac_init_csrows(struct mem_ctl_info *mci)
+{
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ struct synps_edac_priv *priv = mci->pvt_info;
+ u32 size;
+ int row, j;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ size = synps_edac_get_memsize();
+
+ for (j = 0; j < csi->nr_channels; j++) {
+ dimm = csi->channels[j]->dimm;
+ dimm->edac_mode = EDAC_FLAG_SECDED;
+ dimm->mtype = synps_edac_get_mtype(priv->baseaddr);
+ dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
+ dimm->grain = SYNPS_EDAC_ERR_GRAIN;
+ dimm->dtype = synps_edac_get_dtype(priv->baseaddr);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * synps_edac_mc_init - Initialize driver instance
+ * @mci: Pointer to the edac memory controller instance
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Performs initialization of the EDAC memory controller instance and
+ * related driver-private data associated with the memory controller the
+ * instance is bound to.
+ *
+ * Return: Always zero.
+ */
+static int synps_edac_mc_init(struct mem_ctl_info *mci,
+ struct platform_device *pdev)
+{
+ int status;
+ struct synps_edac_priv *priv;
+
+ mci->pdev = &pdev->dev;
+ priv = mci->pvt_info;
+ platform_set_drvdata(pdev, mci);
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_HW_SRC;
+ mci->scrub_mode = SCRUB_NONE;
+
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "synps_ddr_controller";
+ mci->dev_name = SYNPS_EDAC_MOD_STRING;
+ mci->mod_name = SYNPS_EDAC_MOD_VER;
+ mci->mod_ver = "1";
+
+ edac_op_state = EDAC_OPSTATE_POLL;
+ mci->edac_check = synps_edac_check;
+ mci->ctl_page_to_phys = NULL;
+
+ status = synps_edac_init_csrows(mci);
+
+ return status;
+}
+
+/**
+ * synps_edac_mc_probe - Check controller and bind driver
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Probes a specific controller instance for binding with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int synps_edac_mc_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
+ struct synps_edac_priv *priv;
+ int rc;
+ struct resource *res;
+ void __iomem *baseaddr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ if (!synps_edac_get_eccstate(baseaddr)) {
+ edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
+ return -ENXIO;
+ }
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = SYNPS_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = SYNPS_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct synps_edac_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed memory allocation for mc instance\n");
+ return -ENOMEM;
+ }
+
+ priv = mci->pvt_info;
+ priv->baseaddr = baseaddr;
+ rc = synps_edac_mc_init(mci, pdev);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed to initialize instance\n");
+ goto free_edac_mc;
+ }
+
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed to register with EDAC core\n");
+ goto free_edac_mc;
+ }
+
+ /*
+ * Start capturing the correctable and uncorrectable errors. A write of
+ * 0 starts the counters.
+ */
+ writel(0x0, baseaddr + ECC_CTRL_OFST);
+ return rc;
+
+free_edac_mc:
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+/**
+ * synps_edac_mc_remove - Unbind driver from controller
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Return: Unconditionally 0
+ */
+static int synps_edac_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct of_device_id synps_edac_match[] = {
+ { .compatible = "xlnx,zynq-ddrc-a05", },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, synps_edac_match);
+
+static struct platform_driver synps_edac_mc_driver = {
+ .driver = {
+ .name = "synopsys-edac",
+ .of_match_table = synps_edac_match,
+ },
+ .probe = synps_edac_mc_probe,
+ .remove = synps_edac_mc_remove,
+};
+
+module_platform_driver(synps_edac_mc_driver);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Synopsys DDR ECC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 5d7ab577fba9..2bb82e55065a 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -173,6 +173,7 @@ static int adc_jack_remove(struct platform_device *pdev)
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
+ iio_channel_release(data->chan);
return 0;
}
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 043dcd9946c9..8319f25b7145 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -32,7 +32,6 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
-#include <linux/of.h>
/*
* extcon_cable_name suggests the standard cable names for commonly used
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 740a14d35072..af165fd0c6f5 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1033,7 +1033,7 @@ static irqreturn_t max77693_muic_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static struct regmap_config max77693_muic_regmap_config = {
+static const struct regmap_config max77693_muic_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index f712d47f30d8..8de4da5c9ab6 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -12,11 +12,11 @@ config EFI_VARS
Note that using this driver in concert with efibootmgr requires
at least test release version 0.5.0-test3 or later, which is
- available from Matt Domsch's website located at:
+ available from:
<http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz>
Subsequent efibootmgr releases may be found at:
- <http://linux.dell.com/efibootmgr>
+ <http://github.com/vathpela/efibootmgr>
config EFI_VARS_PSTORE
tristate "Register efivars backend for pstore"
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9035c1b74d58..3061bb8629dc 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -115,15 +115,24 @@ EFI_ATTR_SHOW(fw_vendor);
EFI_ATTR_SHOW(runtime);
EFI_ATTR_SHOW(config_table);
+static ssize_t fw_platform_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
+}
+
static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
+static struct kobj_attribute efi_attr_fw_platform_size =
+ __ATTR_RO(fw_platform_size);
static struct attribute *efi_subsys_attrs[] = {
&efi_attr_systab.attr,
&efi_attr_fw_vendor.attr,
&efi_attr_runtime.attr,
&efi_attr_config_table.attr,
+ &efi_attr_fw_platform_size.attr,
NULL,
};
@@ -272,15 +281,10 @@ static __init int match_config_table(efi_guid_t *guid,
unsigned long table,
efi_config_table_type_t *table_types)
{
- u8 str[EFI_VARIABLE_GUID_LEN + 1];
int i;
if (table_types) {
- efi_guid_unparse(guid, str);
-
for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
- efi_guid_unparse(&table_types[i].guid, str);
-
if (!efi_guidcmp(*guid, table_types[i].guid)) {
*(table_types[i].ptr) = table;
pr_cont(" %s=0x%lx ",
@@ -293,29 +297,15 @@ static __init int match_config_table(efi_guid_t *guid,
return 0;
}
-int __init efi_config_init(efi_config_table_type_t *arch_tables)
+int __init efi_config_parse_tables(void *config_tables, int count, int sz,
+ efi_config_table_type_t *arch_tables)
{
- void *config_tables, *tablep;
- int i, sz;
-
- if (efi_enabled(EFI_64BIT))
- sz = sizeof(efi_config_table_64_t);
- else
- sz = sizeof(efi_config_table_32_t);
-
- /*
- * Let's see what config tables the firmware passed to us.
- */
- config_tables = early_memremap(efi.systab->tables,
- efi.systab->nr_tables * sz);
- if (config_tables == NULL) {
- pr_err("Could not map Configuration table!\n");
- return -ENOMEM;
- }
+ void *tablep;
+ int i;
tablep = config_tables;
pr_info("");
- for (i = 0; i < efi.systab->nr_tables; i++) {
+ for (i = 0; i < count; i++) {
efi_guid_t guid;
unsigned long table;
@@ -328,8 +318,6 @@ int __init efi_config_init(efi_config_table_type_t *arch_tables)
if (table64 >> 32) {
pr_cont("\n");
pr_err("Table located above 4GB, disabling EFI.\n");
- early_memunmap(config_tables,
- efi.systab->nr_tables * sz);
return -EINVAL;
}
#endif
@@ -344,13 +332,37 @@ int __init efi_config_init(efi_config_table_type_t *arch_tables)
tablep += sz;
}
pr_cont("\n");
- early_memunmap(config_tables, efi.systab->nr_tables * sz);
-
set_bit(EFI_CONFIG_TABLES, &efi.flags);
-
return 0;
}
+int __init efi_config_init(efi_config_table_type_t *arch_tables)
+{
+ void *config_tables;
+ int sz, ret;
+
+ if (efi_enabled(EFI_64BIT))
+ sz = sizeof(efi_config_table_64_t);
+ else
+ sz = sizeof(efi_config_table_32_t);
+
+ /*
+ * Let's see what config tables the firmware passed to us.
+ */
+ config_tables = early_memremap(efi.systab->tables,
+ efi.systab->nr_tables * sz);
+ if (config_tables == NULL) {
+ pr_err("Could not map Configuration table!\n");
+ return -ENOMEM;
+ }
+
+ ret = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sz,
+ arch_tables);
+
+ early_memunmap(config_tables, efi.systab->nr_tables * sz);
+ return ret;
+}
+
#ifdef CONFIG_EFI_VARS_MODULE
static int __init efi_load_efivars(void)
{
@@ -403,8 +415,7 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
u64 val;
int i, len;
- if (depth != 1 ||
- (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
+ if (depth != 1 || strcmp(uname, "chosen") != 0)
return 0;
for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index f256ecd8a176..7b2e0496e0c0 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -39,7 +39,7 @@
* fix locking per Peter Chubb's findings
*
* 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_unparse()
+ * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_to_str()
*
* 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
* use list_for_each_safe when deleting vars.
@@ -128,7 +128,7 @@ efivar_guid_read(struct efivar_entry *entry, char *buf)
if (!entry || !buf)
return 0;
- efi_guid_unparse(&var->VendorGuid, str);
+ efi_guid_to_str(&var->VendorGuid, str);
str += strlen(str);
str += sprintf(str, "\n");
@@ -569,7 +569,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
private variables from another's. */
*(short_name + strlen(short_name)) = '-';
- efi_guid_unparse(&new_var->var.VendorGuid,
+ efi_guid_to_str(&new_var->var.VendorGuid,
short_name + strlen(short_name));
new_var->kobj.kset = efivars_kset;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index b14bc2b9fb4d..280bc0a63365 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -19,8 +19,23 @@ KBUILD_CFLAGS := $(cflags-y) \
$(call cc-option,-fno-stack-protector)
GCOV_PROFILE := n
+KASAN_SANITIZE := n
lib-y := efi-stub-helper.o
lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o
CFLAGS_fdt.o += -I$(srctree)/scripts/dtc/libfdt/
+
+#
+# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
+# code indefinitely unless it is annotated as __init/__initdata/__initconst etc.
+# So let's apply the __init annotations at the section level, by prefixing
+# the section names directly. This will ensure that even all the inline string
+# literals are covered.
+#
+extra-$(CONFIG_ARM64) := $(lib-y)
+lib-$(CONFIG_ARM64) := $(patsubst %.o,%.init.o,$(lib-y))
+
+OBJCOPYFLAGS := --prefix-alloc-sections=.init
+$(obj)/%.init.o: $(obj)/%.o FORCE
+ $(call if_changed,objcopy)
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index eb48a1a1a576..dcae482a9a17 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -17,10 +17,10 @@
#include "efistub.h"
-static int __init efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
+static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
{
- static efi_guid_t const var_guid __initconst = EFI_GLOBAL_VARIABLE_GUID;
- static efi_char16_t const var_name[] __initconst = {
+ static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
+ static efi_char16_t const var_name[] = {
'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 };
efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable;
@@ -164,7 +164,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* for both archictectures, with the arch-specific code provided in the
* handle_kernel_image() function.
*/
-unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table,
+unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
unsigned long *image_addr)
{
efi_loaded_image_t *image;
@@ -295,3 +295,62 @@ fail_free_image:
fail:
return EFI_ERROR;
}
+
+/*
+ * This is the base address at which to start allocating virtual memory ranges
+ * for UEFI Runtime Services. This is in the low TTBR0 range so that we can use
+ * any allocation we choose, and eliminate the risk of a conflict after kexec.
+ * The value chosen is the largest non-zero power of 2 suitable for this purpose
+ * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
+ * be mapped efficiently.
+ */
+#define EFI_RT_VIRTUAL_BASE 0x40000000
+
+/*
+ * efi_get_virtmap() - create a virtual mapping for the EFI memory map
+ *
+ * This function populates the virt_addr fields of all memory region descriptors
+ * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
+ * are also copied to @runtime_map, and their total count is returned in @count.
+ */
+void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+ unsigned long desc_size, efi_memory_desc_t *runtime_map,
+ int *count)
+{
+ u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
+ efi_memory_desc_t *out = runtime_map;
+ int l;
+
+ for (l = 0; l < map_size; l += desc_size) {
+ efi_memory_desc_t *in = (void *)memory_map + l;
+ u64 paddr, size;
+
+ if (!(in->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+
+ /*
+ * Make the mapping compatible with 64k pages: this allows
+ * a 4k page size kernel to kexec a 64k page size kernel and
+ * vice versa.
+ */
+ paddr = round_down(in->phys_addr, SZ_64K);
+ size = round_up(in->num_pages * EFI_PAGE_SIZE +
+ in->phys_addr - paddr, SZ_64K);
+
+ /*
+ * Avoid wasting memory on PTEs by choosing a virtual base that
+ * is compatible with section mappings if this region has the
+ * appropriate size and physical alignment. (Sections are 2 MB
+ * on 4k granule kernels)
+ */
+ if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+ efi_virt_base = round_up(efi_virt_base, SZ_2M);
+
+ in->virt_addr = efi_virt_base + in->phys_addr - paddr;
+ efi_virt_base += size;
+
+ memcpy(out, in, desc_size);
+ out = (void *)out + desc_size;
+ ++*count;
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index a920fec8fe88..af5d63c7cc53 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -32,6 +32,15 @@
static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
+/*
+ * Allow the platform to override the allocation granularity: this allows
+ * systems that have the capability to run with a larger page size to deal
+ * with the allocations for initrd and fdt more efficiently.
+ */
+#ifndef EFI_ALLOC_ALIGN
+#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+#endif
+
struct file_info {
efi_file_handle_t *handle;
u64 size;
@@ -66,25 +75,29 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
unsigned long key;
u32 desc_version;
- *map_size = sizeof(*m) * 32;
-again:
+ *map_size = 0;
+ *desc_size = 0;
+ key = 0;
+ status = efi_call_early(get_memory_map, map_size, NULL,
+ &key, desc_size, &desc_version);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
/*
* Add an additional efi_memory_desc_t because we're doing an
* allocation which may be in a new descriptor region.
*/
- *map_size += sizeof(*m);
+ *map_size += *desc_size;
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
*map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
- *desc_size = 0;
- key = 0;
status = efi_call_early(get_memory_map, map_size, m,
&key, desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL) {
efi_call_early(free_pool, m);
- goto again;
+ return EFI_LOAD_ERROR;
}
if (status != EFI_SUCCESS)
@@ -101,7 +114,7 @@ fail:
}
-unsigned long __init get_dram_base(efi_system_table_t *sys_table_arg)
+unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
{
efi_status_t status;
unsigned long map_size;
@@ -150,10 +163,10 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
* a specific address. We are doing page-based allocations,
* so we must be aligned to a page.
*/
- if (align < EFI_PAGE_SIZE)
- align = EFI_PAGE_SIZE;
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
- nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
again:
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
@@ -235,10 +248,10 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
* a specific address. We are doing page-based allocations,
* so we must be aligned to a page.
*/
- if (align < EFI_PAGE_SIZE)
- align = EFI_PAGE_SIZE;
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
- nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
unsigned long m = (unsigned long)map;
@@ -292,7 +305,7 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
if (!size)
return;
- nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
efi_call_early(free_pages, addr, nr_pages);
}
@@ -561,7 +574,7 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
* to the preferred address. If that fails, allocate as low
* as possible while respecting the required alignment.
*/
- nr_pages = round_up(alloc_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &efi_addr);
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 304ab295ca1a..47437b16b186 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,6 +5,10 @@
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
+#undef memcpy
+#undef memset
+#undef memmove
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
@@ -39,4 +43,8 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
void *get_fdt(efi_system_table_t *sys_table);
+void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+ unsigned long desc_size, efi_memory_desc_t *runtime_map,
+ int *count);
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index c846a9608cbd..91da56c4fd54 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -14,6 +14,8 @@
#include <linux/libfdt.h>
#include <asm/efi.h>
+#include "efistub.h"
+
efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
unsigned long orig_fdt_size,
void *fdt, int new_fdt_size, char *cmdline_ptr,
@@ -193,9 +195,26 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long map_size, desc_size;
u32 desc_ver;
unsigned long mmap_key;
- efi_memory_desc_t *memory_map;
+ efi_memory_desc_t *memory_map, *runtime_map;
unsigned long new_fdt_size;
efi_status_t status;
+ int runtime_entry_count = 0;
+
+ /*
+ * Get a copy of the current memory map that we will use to prepare
+ * the input for SetVirtualAddressMap(). We don't have to worry about
+ * subsequent allocations adding entries, since they could not affect
+ * the number of EFI_MEMORY_RUNTIME regions.
+ */
+ status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
+ &desc_size, &desc_ver, &mmap_key);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
+ return status;
+ }
+
+ pr_efi(sys_table,
+ "Exiting boot services and installing virtual address map...\n");
/*
* Estimate size of new FDT, and allocate memory for it. We
@@ -248,12 +267,48 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
}
}
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight into SetVirtualAddressMap()
+ */
+ efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
+ &runtime_entry_count);
+
/* Now we are ready to exit_boot_services.*/
status = sys_table->boottime->exit_boot_services(handle, mmap_key);
+ if (status == EFI_SUCCESS) {
+ efi_set_virtual_address_map_t *svam;
- if (status == EFI_SUCCESS)
- return status;
+ /* Install the new virtual address map */
+ svam = sys_table->runtime->set_virtual_address_map;
+ status = svam(runtime_entry_count * desc_size, desc_size,
+ desc_ver, runtime_map);
+
+ /*
+ * We are beyond the point of no return here, so if the call to
+ * SetVirtualAddressMap() failed, we need to signal that to the
+ * incoming kernel but proceed normally otherwise.
+ */
+ if (status != EFI_SUCCESS) {
+ int l;
+
+ /*
+ * Set the virtual address field of all
+ * EFI_MEMORY_RUNTIME entries to 0. This will signal
+ * the incoming kernel that no virtual translation has
+ * been installed.
+ */
+ for (l = 0; l < map_size; l += desc_size) {
+ efi_memory_desc_t *p = (void *)memory_map + l;
+
+ if (p->attribute & EFI_MEMORY_RUNTIME)
+ p->virt_addr = 0;
+ }
+ }
+ return EFI_SUCCESS;
+ }
pr_efi_err(sys_table, "Exit boot services failed.\n");
@@ -264,6 +319,7 @@ fail_free_new_fdt:
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
fail:
+ sys_table->boottime->free_pool(runtime_map);
return EFI_LOAD_ERROR;
}
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 018c29a26615..87b8e3b900d2 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -191,7 +191,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj)
return 0;
out_add_entry:
- for (j = i - 1; j > 0; j--) {
+ for (j = i - 1; j >= 0; j--) {
entry = *(map_entries + j);
kobject_put(&entry->kobj);
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 633ec216e185..c1e2ca3d9a51 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -197,9 +197,16 @@ config GPIO_F7188X
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
+config GPIO_MB86S7X
+ bool "GPIO support for Fujitsu MB86S7x Platforms"
+ depends on ARCH_MB86S7X
+ help
+ Say yes here to support the GPIO controller in Fujitsu MB86S70 SoCs.
+
config GPIO_MOXART
bool "MOXART GPIO support"
depends on ARCH_MOXART
+ select GPIO_GENERIC
help
Select this option to enable GPIO driver for
MOXA ART SoC devices.
@@ -285,6 +292,7 @@ config GPIO_PXA
config GPIO_RCAR
tristate "Renesas R-Car GPIO"
depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST)
+ select GPIOLIB_IRQCHIP
help
Say yes here to support GPIO on Renesas R-Car SoCs.
@@ -365,9 +373,17 @@ config GPIO_XGENE
the generic flash controller's address and data pins. Say yes
here to enable the GFC GPIO functionality.
+config GPIO_XGENE_SB
+ tristate "APM X-Gene GPIO standby controller support"
+ depends on ARCH_XGENE && OF_GPIO
+ select GPIO_GENERIC
+ help
+ This driver supports the GPIO block within the APM X-Gene
+ Standby Domain. Say yes here to enable the GPIO functionality.
+
config GPIO_XILINX
- bool "Xilinx GPIO support"
- depends on PPC_OF || MICROBLAZE || ARCH_ZYNQ
+ tristate "Xilinx GPIO support"
+ depends on OF_GPIO && (PPC || MICROBLAZE || ARCH_ZYNQ || X86)
help
Say yes here to support the Xilinx FPGA GPIO device
@@ -394,25 +410,32 @@ config GPIO_VR41XX
Say yes here to support the NEC VR4100 series General-purpose I/O Uint
config GPIO_SCH
- tristate "Intel SCH/TunnelCreek/Centerton GPIO"
+ tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
depends on PCI && X86
select MFD_CORE
select LPC_SCH
help
Say yes here to support GPIO interface on Intel Poulsbo SCH,
- Intel Tunnel Creek processor or Intel Centerton processor.
+ Intel Tunnel Creek processor, Intel Centerton processor or
+ Intel Quark X1000 SoC.
+
The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
powered by the core power rail and are turned off during sleep
modes (S3 and higher). The remaining four GPIOs are powered by
the Intel SCH suspend power supply. These GPIOs remain
active during S3. The suspend powered GPIOs can be used to wake the
system from the Suspend-to-RAM state.
+
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
+
The Intel Centerton processor has a total of 30 GPIO pins.
Twenty-one are powered by the core power rail and 9 from the
suspend power supply.
+ The Intel Quark X1000 SoC has 2 GPIOs powered by the core
+ power well and 6 from the suspend power well.
+
config GPIO_ICH
tristate "Intel ICH GPIO"
depends on PCI && X86
@@ -450,6 +473,7 @@ config GPIO_VX855
config GPIO_GE_FPGA
bool "GE FPGA based GPIO"
depends on GE_FPGA
+ select GPIO_GENERIC
help
Support for common GPIO functionality provided on some GE Single Board
Computers.
@@ -519,6 +543,7 @@ config GPIO_MAX7300
config GPIO_MAX732X
tristate "MAX7319, MAX7320-7327 I2C Port Expanders"
depends on I2C
+ select IRQ_DOMAIN
help
Say yes here to support the MAX7319, MAX7320-7327 series of I2C
Port Expanders. Each IO port on these chips has a fixed role of
@@ -613,6 +638,7 @@ config GPIO_RC5T583
config GPIO_SX150X
bool "Semtech SX150x I2C GPIO expander"
depends on I2C=y
+ select GPIOLIB_IRQCHIP
default n
help
Say yes here to provide support for Semtech SX150-series I2C
@@ -624,6 +650,7 @@ config GPIO_SX150X
config GPIO_STMPE
bool "STMPE GPIOs"
depends on MFD_STMPE
+ depends on OF_GPIO
select GPIOLIB_IRQCHIP
help
This enables support for the GPIOs found on the STMPE I/O
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 81755f1305e6..bdda6a94d2cd 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
@@ -105,6 +106,7 @@ obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
obj-$(CONFIG_GPIO_XGENE) += gpio-xgene.o
+obj-$(CONFIG_GPIO_XGENE_SB) += gpio-xgene-sb.o
obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index d3d2d1099f64..d00d81928fe8 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -213,6 +213,12 @@ found:
goto out;
}
gp.pm = ioport_map(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ if (!gp.pm) {
+ dev_err(&pdev->dev, "Couldn't map io port into io memory\n");
+ release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ err = -ENOMEM;
+ goto out;
+ }
gp.pdev = pdev;
gp.chip.dev = &pdev->dev;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 55d4803d71b0..3d9e08f7e823 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
if (pending & BIT(gpio)) {
virq = irq_find_mapping(cg->chip.irqdomain, gpio);
- generic_handle_irq(virq);
+ handle_nested_irq(virq);
}
}
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 978b51eae2ec..dbdb4de82c6d 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -47,13 +47,6 @@
#define DLN2_GPIO_MAX_PINS 32
-struct dln2_irq_work {
- struct work_struct work;
- struct dln2_gpio *dln2;
- int pin;
- int type;
-};
-
struct dln2_gpio {
struct platform_device *pdev;
struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
*/
DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
- struct dln2_irq_work *irq_work;
+ /* active IRQs - not synced to hardware */
+ DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
+ /* active IRQS - synced to hardware */
+ DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
+ int irq_type[DLN2_GPIO_MAX_PINS];
+ struct mutex irq_lock;
};
struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
return !!ret;
}
-static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
- unsigned int pin, int value)
+static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
+ unsigned int pin, int value)
{
struct dln2_gpio_pin_val req = {
.pin = cpu_to_le16(pin),
.value = value,
};
- dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
- sizeof(req));
+ return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
+ sizeof(req));
}
#define DLN2_GPIO_DIRECTION_IN 0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
+ struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
+ int ret;
+
+ ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
+ if (ret < 0)
+ return ret;
+
return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
}
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
&req, sizeof(req));
}
-static void dln2_irq_work(struct work_struct *w)
-{
- struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
- struct dln2_gpio *dln2 = iw->dln2;
- u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
-
- if (test_bit(iw->pin, dln2->irqs_enabled))
- dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
- else
- dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
-}
-
-static void dln2_irq_enable(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
- int pin = irqd_to_hwirq(irqd);
-
- set_bit(pin, dln2->irqs_enabled);
- schedule_work(&dln2->irq_work[pin].work);
-}
-
-static void dln2_irq_disable(struct irq_data *irqd)
+static void dln2_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
int pin = irqd_to_hwirq(irqd);
- clear_bit(pin, dln2->irqs_enabled);
- schedule_work(&dln2->irq_work[pin].work);
+ set_bit(pin, dln2->unmasked_irqs);
}
static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
int pin = irqd_to_hwirq(irqd);
- set_bit(pin, dln2->irqs_masked);
-}
-
-static void dln2_irq_unmask(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
- struct device *dev = dln2->gpio.dev;
- int pin = irqd_to_hwirq(irqd);
-
- if (test_and_clear_bit(pin, dln2->irqs_pending)) {
- int irq;
-
- irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
- if (!irq) {
- dev_err(dev, "pin %d not mapped to IRQ\n", pin);
- return;
- }
-
- generic_handle_irq(irq);
- }
+ clear_bit(pin, dln2->unmasked_irqs);
}
static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
break;
case IRQ_TYPE_LEVEL_LOW:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
break;
case IRQ_TYPE_EDGE_BOTH:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
break;
case IRQ_TYPE_EDGE_RISING:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
break;
default:
return -EINVAL;
@@ -387,19 +346,57 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
return 0;
}
+static void dln2_irq_bus_lock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+
+ mutex_lock(&dln2->irq_lock);
+}
+
+static void dln2_irq_bus_unlock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+ int pin = irqd_to_hwirq(irqd);
+ int enabled, unmasked;
+ unsigned type;
+ int ret;
+
+ enabled = test_bit(pin, dln2->enabled_irqs);
+ unmasked = test_bit(pin, dln2->unmasked_irqs);
+
+ if (enabled != unmasked) {
+ if (unmasked) {
+ type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
+ set_bit(pin, dln2->enabled_irqs);
+ } else {
+ type = DLN2_GPIO_EVENT_NONE;
+ clear_bit(pin, dln2->enabled_irqs);
+ }
+
+ ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
+ if (ret)
+ dev_err(dln2->gpio.dev, "failed to set event\n");
+ }
+
+ mutex_unlock(&dln2->irq_lock);
+}
+
static struct irq_chip dln2_gpio_irqchip = {
.name = "dln2-irq",
- .irq_enable = dln2_irq_enable,
- .irq_disable = dln2_irq_disable,
.irq_mask = dln2_irq_mask,
.irq_unmask = dln2_irq_unmask,
.irq_set_type = dln2_irq_set_type,
+ .irq_bus_lock = dln2_irq_bus_lock,
+ .irq_bus_sync_unlock = dln2_irq_bus_unlock,
};
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
const void *data, int len)
{
int pin, irq;
+
const struct {
__le16 count;
__u8 type;
@@ -425,14 +422,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
return;
}
- if (!test_bit(pin, dln2->irqs_enabled))
- return;
- if (test_bit(pin, dln2->irqs_masked)) {
- set_bit(pin, dln2->irqs_pending);
- return;
- }
-
- switch (dln2->irq_work[pin].type) {
+ switch (dln2->irq_type[pin]) {
case DLN2_GPIO_EVENT_CHANGE_RISING:
if (event->value)
generic_handle_irq(irq);
@@ -451,7 +441,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
struct dln2_gpio *dln2;
struct device *dev = &pdev->dev;
int pins;
- int i, ret;
+ int ret;
pins = dln2_gpio_get_pin_count(pdev);
if (pins < 0) {
@@ -467,15 +457,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
if (!dln2)
return -ENOMEM;
- dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
- sizeof(struct dln2_irq_work), GFP_KERNEL);
- if (!dln2->irq_work)
- return -ENOMEM;
- for (i = 0; i < pins; i++) {
- INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
- dln2->irq_work[i].pin = i;
- dln2->irq_work[i].dln2 = dln2;
- }
+ mutex_init(&dln2->irq_lock);
dln2->pdev = pdev;
@@ -529,11 +511,8 @@ out:
static int dln2_gpio_remove(struct platform_device *pdev)
{
struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
- int i;
dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
- for (i = 0; i < dln2->gpio.ngpio; i++)
- flush_work(&dln2->irq_work[i].work);
gpiochip_remove(&dln2->gpio);
return 0;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index b4eb6a657d34..58faf04fce5d 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -469,15 +469,13 @@ dwapb_gpio_get_pdata_of(struct device *dev)
if (nports == 0)
return ERR_PTR(-ENODEV);
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->properties = kcalloc(nports, sizeof(*pp), GFP_KERNEL);
- if (!pdata->properties) {
- kfree(pdata);
+ pdata->properties = devm_kcalloc(dev, nports, sizeof(*pp), GFP_KERNEL);
+ if (!pdata->properties)
return ERR_PTR(-ENOMEM);
- }
pdata->nports = nports;
@@ -490,8 +488,6 @@ dwapb_gpio_get_pdata_of(struct device *dev)
pp->idx >= DWAPB_MAX_PORTS) {
dev_err(dev, "missing/invalid port index for %s\n",
port_np->full_name);
- kfree(pdata->properties);
- kfree(pdata);
return ERR_PTR(-EINVAL);
}
@@ -523,15 +519,6 @@ dwapb_gpio_get_pdata_of(struct device *dev)
return pdata;
}
-static inline void dwapb_free_pdata_of(struct dwapb_platform_data *pdata)
-{
- if (!IS_ENABLED(CONFIG_OF_GPIO) || !pdata)
- return;
-
- kfree(pdata->properties);
- kfree(pdata);
-}
-
static int dwapb_gpio_probe(struct platform_device *pdev)
{
unsigned int i;
@@ -540,40 +527,32 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
int err;
struct device *dev = &pdev->dev;
struct dwapb_platform_data *pdata = dev_get_platdata(dev);
- bool is_pdata_alloc = !pdata;
- if (is_pdata_alloc) {
+ if (!pdata) {
pdata = dwapb_gpio_get_pdata_of(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
- if (!pdata->nports) {
- err = -ENODEV;
- goto out_err;
- }
+ if (!pdata->nports)
+ return -ENODEV;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio) {
- err = -ENOMEM;
- goto out_err;
- }
+ if (!gpio)
+ return -ENOMEM;
+
gpio->dev = &pdev->dev;
gpio->nr_ports = pdata->nports;
gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports,
sizeof(*gpio->ports), GFP_KERNEL);
- if (!gpio->ports) {
- err = -ENOMEM;
- goto out_err;
- }
+ if (!gpio->ports)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gpio->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(gpio->regs)) {
- err = PTR_ERR(gpio->regs);
- goto out_err;
- }
+ if (IS_ERR(gpio->regs))
+ return PTR_ERR(gpio->regs);
for (i = 0; i < gpio->nr_ports; i++) {
err = dwapb_gpio_add_port(gpio, &pdata->properties[i], i);
@@ -582,16 +561,12 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, gpio);
- goto out_err;
+ return 0;
out_unregister:
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
-out_err:
- if (is_pdata_alloc)
- dwapb_free_pdata_of(pdata);
-
return err;
}
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index aea5c2a53cc0..f9ac3f351753 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -19,9 +19,12 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/of_address.h>
#include <linux/module.h>
+#include <linux/basic_mmio_gpio.h>
#define GEF_GPIO_DIRECT 0x00
#define GEF_GPIO_IN 0x04
@@ -33,53 +36,6 @@
#define GEF_GPIO_OVERRUN 0x1C
#define GEF_GPIO_MODE 0x20
-static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
- unsigned int data;
-
- data = ioread32be(mmchip->regs + GEF_GPIO_OUT);
- if (value)
- data = data | BIT(offset);
- else
- data = data & ~BIT(offset);
- iowrite32be(data, mmchip->regs + GEF_GPIO_OUT);
-}
-
-static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
-{
- unsigned int data;
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
-
- data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
- data = data | BIT(offset);
- iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
-
- return 0;
-}
-
-static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
-{
- unsigned int data;
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
-
- /* Set value before switching to output */
- gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
-
- data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
- data = data & ~BIT(offset);
- iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
-
- return 0;
-}
-
-static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
-
- return !!(ioread32be(mmchip->regs + GEF_GPIO_IN) & BIT(offset));
-}
-
static const struct of_device_id gef_gpio_ids[] = {
{
.compatible = "gef,sbc610-gpio",
@@ -99,22 +55,50 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(gef_gpio_ids, &pdev->dev);
- struct of_mm_gpio_chip *mmchip;
+ struct bgpio_chip *bgc;
+ void __iomem *regs;
+ int ret;
- mmchip = devm_kzalloc(&pdev->dev, sizeof(*mmchip), GFP_KERNEL);
- if (!mmchip)
+ bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL);
+ if (!bgc)
return -ENOMEM;
+ regs = of_iomap(pdev->dev.of_node, 0);
+ if (!regs)
+ return -ENOMEM;
+
+ ret = bgpio_init(bgc, &pdev->dev, 4, regs + GEF_GPIO_IN,
+ regs + GEF_GPIO_OUT, NULL, NULL,
+ regs + GEF_GPIO_DIRECT, BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ if (ret) {
+ dev_err(&pdev->dev, "bgpio_init failed\n");
+ goto err0;
+ }
+
/* Setup pointers to chip functions */
- mmchip->gc.ngpio = (u16)(uintptr_t)of_id->data;
- mmchip->gc.of_gpio_n_cells = 2;
- mmchip->gc.direction_input = gef_gpio_dir_in;
- mmchip->gc.direction_output = gef_gpio_dir_out;
- mmchip->gc.get = gef_gpio_get;
- mmchip->gc.set = gef_gpio_set;
+ bgc->gc.label = devm_kstrdup(&pdev->dev, pdev->dev.of_node->full_name,
+ GFP_KERNEL);
+ if (!bgc->gc.label) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ bgc->gc.base = -1;
+ bgc->gc.ngpio = (u16)(uintptr_t)of_id->data;
+ bgc->gc.of_gpio_n_cells = 2;
+ bgc->gc.of_node = pdev->dev.of_node;
/* This function adds a memory mapped GPIO chip */
- return of_mm_gpiochip_add(pdev->dev.of_node, mmchip);
+ ret = gpiochip_add(&bgc->gc);
+ if (ret)
+ goto err0;
+
+ return 0;
+err0:
+ iounmap(regs);
+ pr_err("%s: GPIO chip registration failed\n",
+ pdev->dev.of_node->full_name);
+ return ret;
};
static struct platform_driver gef_gpio_driver = {
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 16f6115e5bdb..b92a690f5765 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -190,6 +190,79 @@ static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&bgc->lock, flags);
}
+static void bgpio_multiple_get_masks(struct bgpio_chip *bgc,
+ unsigned long *mask, unsigned long *bits,
+ unsigned long *set_mask,
+ unsigned long *clear_mask)
+{
+ int i;
+
+ *set_mask = 0;
+ *clear_mask = 0;
+
+ for (i = 0; i < bgc->bits; i++) {
+ if (*mask == 0)
+ break;
+ if (__test_and_clear_bit(i, mask)) {
+ if (test_bit(i, bits))
+ *set_mask |= bgc->pin2mask(bgc, i);
+ else
+ *clear_mask |= bgc->pin2mask(bgc, i);
+ }
+ }
+}
+
+static void bgpio_set_multiple_single_reg(struct bgpio_chip *bgc,
+ unsigned long *mask,
+ unsigned long *bits,
+ void __iomem *reg)
+{
+ unsigned long flags;
+ unsigned long set_mask, clear_mask;
+
+ spin_lock_irqsave(&bgc->lock, flags);
+
+ bgpio_multiple_get_masks(bgc, mask, bits, &set_mask, &clear_mask);
+
+ bgc->data |= set_mask;
+ bgc->data &= ~clear_mask;
+
+ bgc->write_reg(reg, bgc->data);
+
+ spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ bgpio_set_multiple_single_reg(bgc, mask, bits, bgc->reg_dat);
+}
+
+static void bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ bgpio_set_multiple_single_reg(bgc, mask, bits, bgc->reg_set);
+}
+
+static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+ unsigned long set_mask, clear_mask;
+
+ bgpio_multiple_get_masks(bgc, mask, bits, &set_mask, &clear_mask);
+
+ if (set_mask)
+ bgc->write_reg(bgc->reg_set, set_mask);
+ if (clear_mask)
+ bgc->write_reg(bgc->reg_clr, clear_mask);
+}
+
static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
return 0;
@@ -354,11 +427,14 @@ static int bgpio_setup_io(struct bgpio_chip *bgc,
bgc->reg_set = set;
bgc->reg_clr = clr;
bgc->gc.set = bgpio_set_with_clear;
+ bgc->gc.set_multiple = bgpio_set_multiple_with_clear;
} else if (set && !clr) {
bgc->reg_set = set;
bgc->gc.set = bgpio_set_set;
+ bgc->gc.set_multiple = bgpio_set_multiple_set;
} else {
bgc->gc.set = bgpio_set;
+ bgc->gc.set_multiple = bgpio_set_multiple;
}
bgc->gc.get = bgpio_get;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 09daaf2aeb56..35a02770c8b0 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -121,7 +121,7 @@ static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct grgpio_priv *priv = grgpio_gc_to_priv(gc);
- if (offset > gc->ngpio)
+ if (offset >= gc->ngpio)
return -ENXIO;
if (priv->lirqs[offset].index < 0)
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
err = gpiochip_add(gc);
if (err) {
dev_err(&ofdev->dev, "Could not add gpiochip\n");
- irq_domain_remove(priv->domain);
+ if (priv->domain)
+ irq_domain_remove(priv->domain);
return err;
}
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 6c676225b886..a095b2393fe9 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -19,8 +19,10 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/i2c.h>
#include <linux/i2c/max732x.h>
+#include <linux/of.h>
/*
@@ -116,6 +118,22 @@ static const struct i2c_device_id max732x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max732x_id);
+#ifdef CONFIG_OF
+static const struct of_device_id max732x_of_table[] = {
+ { .compatible = "maxim,max7319" },
+ { .compatible = "maxim,max7320" },
+ { .compatible = "maxim,max7321" },
+ { .compatible = "maxim,max7322" },
+ { .compatible = "maxim,max7323" },
+ { .compatible = "maxim,max7324" },
+ { .compatible = "maxim,max7325" },
+ { .compatible = "maxim,max7326" },
+ { .compatible = "maxim,max7327" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max732x_of_table);
+#endif
+
struct max732x_chip {
struct gpio_chip gpio_chip;
@@ -132,16 +150,22 @@ struct max732x_chip {
uint8_t reg_out[2];
#ifdef CONFIG_GPIO_MAX732X_IRQ
- struct mutex irq_lock;
- int irq_base;
- uint8_t irq_mask;
- uint8_t irq_mask_cur;
- uint8_t irq_trig_raise;
- uint8_t irq_trig_fall;
- uint8_t irq_features;
+ struct irq_domain *irq_domain;
+ struct mutex irq_lock;
+ int irq_base;
+ uint8_t irq_mask;
+ uint8_t irq_mask_cur;
+ uint8_t irq_trig_raise;
+ uint8_t irq_trig_fall;
+ uint8_t irq_features;
#endif
};
+static inline struct max732x_chip *to_max732x(struct gpio_chip *gc)
+{
+ return container_of(gc, struct max732x_chip, gpio_chip);
+}
+
static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
{
struct i2c_client *client;
@@ -180,12 +204,10 @@ static inline int is_group_a(struct max732x_chip *chip, unsigned off)
static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
- struct max732x_chip *chip;
+ struct max732x_chip *chip = to_max732x(gc);
uint8_t reg_val;
int ret;
- chip = container_of(gc, struct max732x_chip, gpio_chip);
-
ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
if (ret < 0)
return 0;
@@ -193,18 +215,17 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
return reg_val & (1u << (off & 0x7));
}
-static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+static void max732x_gpio_set_mask(struct gpio_chip *gc, unsigned off, int mask,
+ int val)
{
- struct max732x_chip *chip;
- uint8_t reg_out, mask = 1u << (off & 0x7);
+ struct max732x_chip *chip = to_max732x(gc);
+ uint8_t reg_out;
int ret;
- chip = container_of(gc, struct max732x_chip, gpio_chip);
-
mutex_lock(&chip->lock);
reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
- reg_out = (val) ? reg_out | mask : reg_out & ~mask;
+ reg_out = (reg_out & ~mask) | (val & mask);
ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
if (ret < 0)
@@ -219,13 +240,31 @@ out:
mutex_unlock(&chip->lock);
}
+static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+{
+ unsigned base = off & ~0x7;
+ uint8_t mask = 1u << (off & 0x7);
+
+ max732x_gpio_set_mask(gc, base, mask, val << (off & 0x7));
+}
+
+static void max732x_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ unsigned mask_lo = mask[0] & 0xff;
+ unsigned mask_hi = (mask[0] >> 8) & 0xff;
+
+ if (mask_lo)
+ max732x_gpio_set_mask(gc, 0, mask_lo, bits[0] & 0xff);
+ if (mask_hi)
+ max732x_gpio_set_mask(gc, 8, mask_hi, (bits[0] >> 8) & 0xff);
+}
+
static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
- struct max732x_chip *chip;
+ struct max732x_chip *chip = to_max732x(gc);
unsigned int mask = 1u << off;
- chip = container_of(gc, struct max732x_chip, gpio_chip);
-
if ((mask & chip->dir_input) == 0) {
dev_dbg(&chip->client->dev, "%s port %d is output only\n",
chip->client->name, off);
@@ -245,11 +284,9 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
static int max732x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
- struct max732x_chip *chip;
+ struct max732x_chip *chip = to_max732x(gc);
unsigned int mask = 1u << off;
- chip = container_of(gc, struct max732x_chip, gpio_chip);
-
if ((mask & chip->dir_output) == 0) {
dev_dbg(&chip->client->dev, "%s port %d is input only\n",
chip->client->name, off);
@@ -321,24 +358,28 @@ static void max732x_irq_update_mask(struct max732x_chip *chip)
static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
{
- struct max732x_chip *chip;
+ struct max732x_chip *chip = to_max732x(gc);
- chip = container_of(gc, struct max732x_chip, gpio_chip);
- return chip->irq_base + off;
+ if (chip->irq_domain) {
+ return irq_create_mapping(chip->irq_domain,
+ chip->irq_base + off);
+ } else {
+ return -ENXIO;
+ }
}
static void max732x_irq_mask(struct irq_data *d)
{
struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask_cur &= ~(1 << (d->irq - chip->irq_base));
+ chip->irq_mask_cur &= ~(1 << d->hwirq);
}
static void max732x_irq_unmask(struct irq_data *d)
{
struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask_cur |= 1 << (d->irq - chip->irq_base);
+ chip->irq_mask_cur |= 1 << d->hwirq;
}
static void max732x_irq_bus_lock(struct irq_data *d)
@@ -352,15 +393,25 @@ static void max732x_irq_bus_lock(struct irq_data *d)
static void max732x_irq_bus_sync_unlock(struct irq_data *d)
{
struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+ uint16_t new_irqs;
+ uint16_t level;
max732x_irq_update_mask(chip);
+
+ new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
+ while (new_irqs) {
+ level = __ffs(new_irqs);
+ max732x_gpio_direction_input(&chip->gpio_chip, level);
+ new_irqs &= ~(1 << level);
+ }
+
mutex_unlock(&chip->irq_lock);
}
static int max732x_irq_set_type(struct irq_data *d, unsigned int type)
{
struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
- uint16_t off = d->irq - chip->irq_base;
+ uint16_t off = d->hwirq;
uint16_t mask = 1 << off;
if (!(mask & chip->dir_input)) {
@@ -385,7 +436,7 @@ static int max732x_irq_set_type(struct irq_data *d, unsigned int type)
else
chip->irq_trig_raise &= ~mask;
- return max732x_gpio_direction_input(&chip->gpio_chip, off);
+ return 0;
}
static struct irq_chip max732x_irq_chip = {
@@ -441,7 +492,7 @@ static irqreturn_t max732x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- handle_nested_irq(level + chip->irq_base);
+ handle_nested_irq(irq_find_mapping(chip->irq_domain, level));
pending &= ~(1 << level);
} while (pending);
@@ -449,6 +500,44 @@ static irqreturn_t max732x_irq_handler(int irq, void *devid)
return IRQ_HANDLED;
}
+static int max732x_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct max732x_chip *chip = h->host_data;
+
+ if (!(chip->dir_input & (1 << hw))) {
+ dev_err(&chip->client->dev,
+ "Attempt to map output line as IRQ line: %lu\n",
+ hw);
+ return -EPERM;
+ }
+
+ irq_set_chip_data(virq, chip);
+ irq_set_chip_and_handler(virq, &max732x_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+#ifdef CONFIG_ARM
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops max732x_irq_domain_ops = {
+ .map = max732x_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+ if (chip->client->irq && chip->irq_domain)
+ irq_domain_remove(chip->irq_domain);
+}
+
static int max732x_irq_setup(struct max732x_chip *chip,
const struct i2c_device_id *id)
{
@@ -457,28 +546,19 @@ static int max732x_irq_setup(struct max732x_chip *chip,
int has_irq = max732x_features[id->driver_data] >> 32;
int ret;
- if (pdata->irq_base && has_irq != INT_NONE) {
- int lvl;
-
- chip->irq_base = pdata->irq_base;
+ if (((pdata && pdata->irq_base) || client->irq)
+ && has_irq != INT_NONE) {
+ if (pdata)
+ chip->irq_base = pdata->irq_base;
chip->irq_features = has_irq;
mutex_init(&chip->irq_lock);
- for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
- int irq = lvl + chip->irq_base;
-
- if (!(chip->dir_input & (1 << lvl)))
- continue;
-
- irq_set_chip_data(irq, chip);
- irq_set_chip_and_handler(irq, &max732x_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
+ chip->irq_domain = irq_domain_add_simple(client->dev.of_node,
+ chip->gpio_chip.ngpio, chip->irq_base,
+ &max732x_irq_domain_ops, chip);
+ if (!chip->irq_domain) {
+ dev_err(&client->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
}
ret = request_threaded_irq(client->irq,
@@ -498,15 +578,10 @@ static int max732x_irq_setup(struct max732x_chip *chip,
return 0;
out_failed:
- chip->irq_base = 0;
+ max732x_irq_teardown(chip);
return ret;
}
-static void max732x_irq_teardown(struct max732x_chip *chip)
-{
- if (chip->irq_base)
- free_irq(chip->client->irq, chip);
-}
#else /* CONFIG_GPIO_MAX732X_IRQ */
static int max732x_irq_setup(struct max732x_chip *chip,
const struct i2c_device_id *id)
@@ -515,7 +590,7 @@ static int max732x_irq_setup(struct max732x_chip *chip,
struct max732x_platform_data *pdata = dev_get_platdata(&client->dev);
int has_irq = max732x_features[id->driver_data] >> 32;
- if (pdata->irq_base && has_irq != INT_NONE)
+ if (((pdata && pdata->irq_base) || client->irq) && has_irq != INT_NONE)
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
@@ -562,6 +637,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
if (chip->dir_output) {
gc->direction_output = max732x_gpio_direction_output;
gc->set = max732x_gpio_set_value;
+ gc->set_multiple = max732x_gpio_set_multiple;
}
gc->get = max732x_gpio_get_value;
gc->can_sleep = true;
@@ -574,28 +650,47 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
return port;
}
+static struct max732x_platform_data *of_gpio_max732x(struct device *dev)
+{
+ struct max732x_platform_data *pdata;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->gpio_base = -1;
+
+ return pdata;
+}
+
static int max732x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max732x_platform_data *pdata;
+ struct device_node *node;
struct max732x_chip *chip;
struct i2c_client *c;
uint16_t addr_a, addr_b;
int ret, nr_port;
pdata = dev_get_platdata(&client->dev);
- if (pdata == NULL) {
+ node = client->dev.of_node;
+
+ if (!pdata && node)
+ pdata = of_gpio_max732x(&client->dev);
+
+ if (!pdata) {
dev_dbg(&client->dev, "no platform data\n");
return -EINVAL;
}
- chip = devm_kzalloc(&client->dev, sizeof(struct max732x_chip),
- GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
chip->client = client;
nr_port = max732x_setup_gpio(chip, id, pdata->gpio_base);
+ chip->gpio_chip.dev = &client->dev;
addr_a = (client->addr & 0x0f) | 0x60;
addr_b = (client->addr & 0x0f) | 0x50;
@@ -643,7 +738,7 @@ static int max732x_probe(struct i2c_client *client,
if (ret)
goto out_failed;
- if (pdata->setup) {
+ if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
@@ -664,9 +759,10 @@ static int max732x_remove(struct i2c_client *client)
{
struct max732x_platform_data *pdata = dev_get_platdata(&client->dev);
struct max732x_chip *chip = i2c_get_clientdata(client);
- int ret;
- if (pdata->teardown) {
+ if (pdata && pdata->teardown) {
+ int ret;
+
ret = pdata->teardown(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0) {
@@ -689,8 +785,9 @@ static int max732x_remove(struct i2c_client *client)
static struct i2c_driver max732x_driver = {
.driver = {
- .name = "max732x",
- .owner = THIS_MODULE,
+ .name = "max732x",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(max732x_of_table),
},
.probe = max732x_probe,
.remove = max732x_remove,
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
new file mode 100644
index 000000000000..21b1ce5abdfe
--- /dev/null
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -0,0 +1,232 @@
+/*
+ * linux/drivers/gpio/gpio-mb86s7x.c
+ *
+ * Copyright (C) 2015 Fujitsu Semiconductor Limited
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/of_device.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+/*
+ * Only first 8bits of a register correspond to each pin,
+ * so there are 4 registers for 32 pins.
+ */
+#define PDR(x) (0x0 + x / 8 * 4)
+#define DDR(x) (0x10 + x / 8 * 4)
+#define PFR(x) (0x20 + x / 8 * 4)
+
+#define OFFSET(x) BIT((x) % 8)
+
+struct mb86s70_gpio_chip {
+ struct gpio_chip gc;
+ void __iomem *base;
+ struct clk *clk;
+ spinlock_t lock;
+};
+
+static inline struct mb86s70_gpio_chip *chip_to_mb86s70(struct gpio_chip *gc)
+{
+ return container_of(gc, struct mb86s70_gpio_chip, gc);
+}
+
+static int mb86s70_gpio_request(struct gpio_chip *gc, unsigned gpio)
+{
+ struct mb86s70_gpio_chip *gchip = chip_to_mb86s70(gc);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&gchip->lock, flags);
+
+ val = readl(gchip->base + PFR(gpio));
+ val &= ~OFFSET(gpio);
+ writel(val, gchip->base + PFR(gpio));
+
+ spin_unlock_irqrestore(&gchip->lock, flags);
+
+ return 0;
+}
+
+static void mb86s70_gpio_free(struct gpio_chip *gc, unsigned gpio)
+{
+ struct mb86s70_gpio_chip *gchip = chip_to_mb86s70(gc);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&gchip->lock, flags);
+
+ val = readl(gchip->base + PFR(gpio));
+ val |= OFFSET(gpio);
+ writel(val, gchip->base + PFR(gpio));
+
+ spin_unlock_irqrestore(&gchip->lock, flags);
+}
+
+static int mb86s70_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
+{
+ struct mb86s70_gpio_chip *gchip = chip_to_mb86s70(gc);
+ unsigned long flags;
+ unsigned char val;
+
+ spin_lock_irqsave(&gchip->lock, flags);
+
+ val = readl(gchip->base + DDR(gpio));
+ val &= ~OFFSET(gpio);
+ writel(val, gchip->base + DDR(gpio));
+
+ spin_unlock_irqrestore(&gchip->lock, flags);
+
+ return 0;
+}
+
+static int mb86s70_gpio_direction_output(struct gpio_chip *gc,
+ unsigned gpio, int value)
+{
+ struct mb86s70_gpio_chip *gchip = chip_to_mb86s70(gc);
+ unsigned long flags;
+ unsigned char val;
+
+ spin_lock_irqsave(&gchip->lock, flags);
+
+ val = readl(gchip->base + PDR(gpio));
+ if (value)
+ val |= OFFSET(gpio);
+ else
+ val &= ~OFFSET(gpio);
+ writel(val, gchip->base + PDR(gpio));
+
+ val = readl(gchip->base + DDR(gpio));
+ val |= OFFSET(gpio);
+ writel(val, gchip->base + DDR(gpio));
+
+ spin_unlock_irqrestore(&gchip->lock, flags);
+
+ return 0;
+}
+
+static int mb86s70_gpio_get(struct gpio_chip *gc, unsigned gpio)
+{
+ struct mb86s70_gpio_chip *gchip = chip_to_mb86s70(gc);
+
+ return !!(readl(gchip->base + PDR(gpio)) & OFFSET(gpio));
+}
+
+static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
+{
+ struct mb86s70_gpio_chip *gchip = chip_to_mb86s70(gc);
+ unsigned long flags;
+ unsigned char val;
+
+ spin_lock_irqsave(&gchip->lock, flags);
+
+ val = readl(gchip->base + PDR(gpio));
+ if (value)
+ val |= OFFSET(gpio);
+ else
+ val &= ~OFFSET(gpio);
+ writel(val, gchip->base + PDR(gpio));
+
+ spin_unlock_irqrestore(&gchip->lock, flags);
+}
+
+static int mb86s70_gpio_probe(struct platform_device *pdev)
+{
+ struct mb86s70_gpio_chip *gchip;
+ struct resource *res;
+ int ret;
+
+ gchip = devm_kzalloc(&pdev->dev, sizeof(*gchip), GFP_KERNEL);
+ if (gchip == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gchip);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gchip->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gchip->base))
+ return PTR_ERR(gchip->base);
+
+ gchip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gchip->clk))
+ return PTR_ERR(gchip->clk);
+
+ clk_prepare_enable(gchip->clk);
+
+ spin_lock_init(&gchip->lock);
+
+ gchip->gc.direction_output = mb86s70_gpio_direction_output;
+ gchip->gc.direction_input = mb86s70_gpio_direction_input;
+ gchip->gc.request = mb86s70_gpio_request;
+ gchip->gc.free = mb86s70_gpio_free;
+ gchip->gc.get = mb86s70_gpio_get;
+ gchip->gc.set = mb86s70_gpio_set;
+ gchip->gc.label = dev_name(&pdev->dev);
+ gchip->gc.ngpio = 32;
+ gchip->gc.owner = THIS_MODULE;
+ gchip->gc.dev = &pdev->dev;
+ gchip->gc.base = -1;
+
+ platform_set_drvdata(pdev, gchip);
+
+ ret = gpiochip_add(&gchip->gc);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register gpio driver\n");
+ clk_disable_unprepare(gchip->clk);
+ }
+
+ return ret;
+}
+
+static int mb86s70_gpio_remove(struct platform_device *pdev)
+{
+ struct mb86s70_gpio_chip *gchip = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&gchip->gc);
+ clk_disable_unprepare(gchip->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mb86s70_gpio_dt_ids[] = {
+ { .compatible = "fujitsu,mb86s70-gpio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mb86s70_gpio_dt_ids);
+
+static struct platform_driver mb86s70_gpio_driver = {
+ .driver = {
+ .name = "mb86s70-gpio",
+ .of_match_table = mb86s70_gpio_dt_ids,
+ },
+ .probe = mb86s70_gpio_probe,
+ .remove = mb86s70_gpio_remove,
+};
+
+static int __init mb86s70_gpio_init(void)
+{
+ return platform_driver_register(&mb86s70_gpio_driver);
+}
+module_init(mb86s70_gpio_init);
+
+MODULE_DESCRIPTION("MB86S7x GPIO Driver");
+MODULE_ALIAS("platform:mb86s70-gpio");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index da9c316059bc..eea5d7e578c9 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
} else {
pdata = dev_get_platdata(&client->dev);
- if (!pdata || !gpio_is_valid(pdata->base)) {
- dev_dbg(&client->dev, "invalid platform data\n");
- return -EINVAL;
+ if (!pdata) {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct mcp23s08_platform_data),
+ GFP_KERNEL);
+ pdata->base = -1;
}
}
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
} else {
type = spi_get_device_id(spi)->driver_data;
pdata = dev_get_platdata(&spi->dev);
- if (!pdata || !gpio_is_valid(pdata->base)) {
- dev_dbg(&spi->dev,
- "invalid or missing platform data\n");
- return -EINVAL;
+ if (!pdata) {
+ pdata = devm_kzalloc(&spi->dev,
+ sizeof(struct mcp23s08_platform_data),
+ GFP_KERNEL);
+ pdata->base = -1;
}
for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index f228b1ce0ce0..f67ef2283d64 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -104,35 +104,34 @@ static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
static int ltq_mm_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct ltq_mm *chip;
- const __be32 *shadow;
- int ret = 0;
+ u32 shadow;
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -ENOENT;
- }
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
+ platform_set_drvdata(pdev, chip);
+
chip->mmchip.gc.ngpio = 16;
- chip->mmchip.gc.label = "gpio-mm-ltq";
chip->mmchip.gc.direction_output = ltq_mm_dir_out;
chip->mmchip.gc.set = ltq_mm_set;
chip->mmchip.save_regs = ltq_mm_save_regs;
/* store the shadow value if one was passed by the devicetree */
- shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
- if (shadow)
- chip->shadow = be32_to_cpu(*shadow);
-
- ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
- if (ret)
- kfree(chip);
- return ret;
+ if (!of_property_read_u32(pdev->dev.of_node, "lantiq,shadow", &shadow))
+ chip->shadow = shadow;
+
+ return of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
+}
+
+static int ltq_mm_remove(struct platform_device *pdev)
+{
+ struct ltq_mm *chip = platform_get_drvdata(pdev);
+
+ of_mm_gpiochip_remove(&chip->mmchip);
+
+ return 0;
}
static const struct of_device_id ltq_mm_match[] = {
@@ -143,6 +142,7 @@ MODULE_DEVICE_TABLE(of, ltq_mm_match);
static struct platform_driver ltq_mm_driver = {
.probe = ltq_mm_probe,
+ .remove = ltq_mm_remove,
.driver = {
.name = "gpio-mm-ltq",
.of_match_table = ltq_mm_match,
@@ -155,3 +155,9 @@ static int __init ltq_mm_init(void)
}
subsys_initcall(ltq_mm_init);
+
+static void __exit ltq_mm_exit(void)
+{
+ platform_driver_unregister(&ltq_mm_driver);
+}
+module_exit(ltq_mm_exit);
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index 31e2551ed903..c3ab46e595da 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -23,21 +23,12 @@
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/bitops.h>
+#include <linux/basic_mmio_gpio.h>
#define GPIO_DATA_OUT 0x00
#define GPIO_DATA_IN 0x04
#define GPIO_PIN_DIRECTION 0x08
-struct moxart_gpio_chip {
- struct gpio_chip gpio;
- void __iomem *base;
-};
-
-static inline struct moxart_gpio_chip *to_moxart_gpio(struct gpio_chip *chip)
-{
- return container_of(chip, struct moxart_gpio_chip, gpio);
-}
-
static int moxart_gpio_request(struct gpio_chip *chip, unsigned offset)
{
return pinctrl_request_gpio(offset);
@@ -48,90 +39,60 @@ static void moxart_gpio_free(struct gpio_chip *chip, unsigned offset)
pinctrl_free_gpio(offset);
}
-static void moxart_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
- void __iomem *ioaddr = gc->base + GPIO_DATA_OUT;
- u32 reg = readl(ioaddr);
-
- if (value)
- reg = reg | BIT(offset);
- else
- reg = reg & ~BIT(offset);
-
- writel(reg, ioaddr);
-}
-
static int moxart_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
- u32 ret = readl(gc->base + GPIO_PIN_DIRECTION);
+ struct bgpio_chip *bgc = to_bgpio_chip(chip);
+ u32 ret = bgc->read_reg(bgc->reg_dir);
if (ret & BIT(offset))
- return !!(readl(gc->base + GPIO_DATA_OUT) & BIT(offset));
+ return !!(bgc->read_reg(bgc->reg_set) & BIT(offset));
else
- return !!(readl(gc->base + GPIO_DATA_IN) & BIT(offset));
-}
-
-static int moxart_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
- void __iomem *ioaddr = gc->base + GPIO_PIN_DIRECTION;
-
- writel(readl(ioaddr) & ~BIT(offset), ioaddr);
- return 0;
-}
-
-static int moxart_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
- void __iomem *ioaddr = gc->base + GPIO_PIN_DIRECTION;
-
- moxart_gpio_set(chip, offset, value);
- writel(readl(ioaddr) | BIT(offset), ioaddr);
- return 0;
+ return !!(bgc->read_reg(bgc->reg_dat) & BIT(offset));
}
-static struct gpio_chip moxart_template_chip = {
- .label = "moxart-gpio",
- .request = moxart_gpio_request,
- .free = moxart_gpio_free,
- .direction_input = moxart_gpio_direction_input,
- .direction_output = moxart_gpio_direction_output,
- .set = moxart_gpio_set,
- .get = moxart_gpio_get,
- .ngpio = 32,
- .owner = THIS_MODULE,
-};
-
static int moxart_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
- struct moxart_gpio_chip *mgc;
+ struct bgpio_chip *bgc;
+ void __iomem *base;
int ret;
- mgc = devm_kzalloc(dev, sizeof(*mgc), GFP_KERNEL);
- if (!mgc)
+ bgc = devm_kzalloc(dev, sizeof(*bgc), GFP_KERNEL);
+ if (!bgc)
return -ENOMEM;
- mgc->gpio = moxart_template_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mgc->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(mgc->base))
- return PTR_ERR(mgc->base);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- mgc->gpio.dev = dev;
+ ret = bgpio_init(bgc, dev, 4, base + GPIO_DATA_IN,
+ base + GPIO_DATA_OUT, NULL,
+ base + GPIO_PIN_DIRECTION, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "bgpio_init failed\n");
+ return ret;
+ }
- ret = gpiochip_add(&mgc->gpio);
+ bgc->gc.label = "moxart-gpio";
+ bgc->gc.request = moxart_gpio_request;
+ bgc->gc.free = moxart_gpio_free;
+ bgc->gc.get = moxart_gpio_get;
+ bgc->data = bgc->read_reg(bgc->reg_set);
+ bgc->gc.base = 0;
+ bgc->gc.ngpio = 32;
+ bgc->gc.dev = dev;
+ bgc->gc.owner = THIS_MODULE;
+
+ ret = gpiochip_add(&bgc->gc);
if (ret) {
dev_err(dev, "%s: gpiochip_add failed\n",
dev->of_node->full_name);
return ret;
}
- return 0;
+ return ret;
}
static const struct of_device_id moxart_gpio_match[] = {
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 8ce6c9510035..4c542153e923 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -155,10 +155,12 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
struct gpio_chip *gc;
int ret;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
+ platform_set_drvdata(ofdev, chip);
+
gc = &chip->mmchip.gc;
gc->ngpio = 8;
@@ -181,7 +183,11 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
static int mpc52xx_gpiochip_remove(struct platform_device *ofdev)
{
- return -EBUSY;
+ struct mpc52xx_gpiochip *chip = platform_get_drvdata(ofdev);
+
+ of_mm_gpiochip_remove(&chip->mmchip);
+
+ return 0;
}
static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
@@ -314,10 +320,12 @@ static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
struct mpc52xx_gpio __iomem *regs;
int ret;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
+ platform_set_drvdata(ofdev, chip);
+
gc = &chip->mmchip.gc;
gc->ngpio = 32;
@@ -363,11 +371,16 @@ static int __init mpc52xx_gpio_init(void)
return 0;
}
-
/* Make sure we get initialised before anyone else tries to use us */
subsys_initcall(mpc52xx_gpio_init);
-/* No exit call at the moment as we cannot unregister of gpio chips */
+static void __exit mpc52xx_gpio_exit(void)
+{
+ platform_driver_unregister(&mpc52xx_wkup_gpiochip_driver);
+
+ platform_driver_unregister(&mpc52xx_simple_gpiochip_driver);
+}
+module_exit(mpc52xx_gpio_exit);
MODULE_DESCRIPTION("Freescale MPC52xx gpio driver");
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index d1ff879e6ff2..a6952ba343a8 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/irq.h>
@@ -39,6 +40,7 @@ struct mpc8xxx_gpio_chip {
*/
u32 data;
struct irq_domain *irq;
+ unsigned int irqn;
const void *of_dev_id_data;
};
@@ -342,20 +344,20 @@ static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
{}
};
-static void __init mpc8xxx_add_controller(struct device_node *np)
+static int mpc8xxx_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
const struct of_device_id *id;
- unsigned hwirq;
int ret;
- mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL);
- if (!mpc8xxx_gc) {
- ret = -ENOMEM;
- goto err;
- }
+ mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
+ if (!mpc8xxx_gc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mpc8xxx_gc);
spin_lock_init(&mpc8xxx_gc->lock);
@@ -375,16 +377,16 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
ret = of_mm_gpiochip_add(np, mm_gc);
if (ret)
- goto err;
+ return ret;
- hwirq = irq_of_parse_and_map(np, 0);
- if (hwirq == NO_IRQ)
- goto skip_irq;
+ mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0);
+ if (mpc8xxx_gc->irqn == NO_IRQ)
+ return 0;
mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS,
&mpc8xxx_gpio_irq_ops, mpc8xxx_gc);
if (!mpc8xxx_gc->irq)
- goto skip_irq;
+ return 0;
id = of_match_node(mpc8xxx_gpio_ids, np);
if (id)
@@ -394,27 +396,39 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
out_be32(mm_gc->regs + GPIO_IMR, 0);
- irq_set_handler_data(hwirq, mpc8xxx_gc);
- irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
-
-skip_irq:
- return;
-
-err:
- pr_err("%s: registration failed with status %d\n",
- np->full_name, ret);
- kfree(mpc8xxx_gc);
+ irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc);
+ irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade);
- return;
+ return 0;
}
-static int __init mpc8xxx_add_gpiochips(void)
+static int mpc8xxx_remove(struct platform_device *pdev)
{
- struct device_node *np;
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = platform_get_drvdata(pdev);
+
+ if (mpc8xxx_gc->irq) {
+ irq_set_handler_data(mpc8xxx_gc->irqn, NULL);
+ irq_set_chained_handler(mpc8xxx_gc->irqn, NULL);
+ irq_domain_remove(mpc8xxx_gc->irq);
+ }
- for_each_matching_node(np, mpc8xxx_gpio_ids)
- mpc8xxx_add_controller(np);
+ of_mm_gpiochip_remove(&mpc8xxx_gc->mm_gc);
return 0;
}
-arch_initcall(mpc8xxx_add_gpiochips);
+
+static struct platform_driver mpc8xxx_plat_driver = {
+ .probe = mpc8xxx_probe,
+ .remove = mpc8xxx_remove,
+ .driver = {
+ .name = "gpio-mpc8xxx",
+ .of_match_table = mpc8xxx_gpio_ids,
+ },
+};
+
+static int __init mpc8xxx_init(void)
+{
+ return platform_driver_register(&mpc8xxx_plat_driver);
+}
+
+arch_initcall(mpc8xxx_init);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 7bc3e9b288f3..d0bc123c7975 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -59,7 +59,7 @@
#define GPIO_LEVEL_MASK_OFF 0x001c
/* The MV78200 has per-CPU registers for edge mask and level mask */
-#define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
+#define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
#define GPIO_LEVEL_MASK_MV78200_OFF(cpu) ((cpu) ? 0x34 : 0x1C)
/* The Armada XP has per-CPU registers for interrupt cause, interrupt
@@ -69,11 +69,11 @@
#define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4)
#define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4)
-#define MVEBU_GPIO_SOC_VARIANT_ORION 0x1
-#define MVEBU_GPIO_SOC_VARIANT_MV78200 0x2
+#define MVEBU_GPIO_SOC_VARIANT_ORION 0x1
+#define MVEBU_GPIO_SOC_VARIANT_MV78200 0x2
#define MVEBU_GPIO_SOC_VARIANT_ARMADAXP 0x3
-#define MVEBU_MAX_GPIO_PER_BANK 32
+#define MVEBU_MAX_GPIO_PER_BANK 32
struct mvebu_gpio_chip {
struct gpio_chip chip;
@@ -82,9 +82,9 @@ struct mvebu_gpio_chip {
void __iomem *percpu_membase;
int irqbase;
struct irq_domain *domain;
- int soc_variant;
+ int soc_variant;
- /* Used to preserve GPIO registers accross suspend/resume */
+ /* Used to preserve GPIO registers across suspend/resume */
u32 out_reg;
u32 io_conf_reg;
u32 blink_en_reg;
@@ -107,7 +107,8 @@ static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
return mvchip->membase + GPIO_BLINK_EN_OFF;
}
-static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
+static inline void __iomem *
+mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_IO_CONF_OFF;
}
@@ -117,12 +118,14 @@ static inline void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip)
return mvchip->membase + GPIO_IN_POL_OFF;
}
-static inline void __iomem *mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
+static inline void __iomem *
+mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_DATA_IN_OFF;
}
-static inline void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
+static inline void __iomem *
+mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
{
int cpu;
@@ -132,13 +135,15 @@ static inline void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvc
return mvchip->membase + GPIO_EDGE_CAUSE_OFF;
case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
cpu = smp_processor_id();
- return mvchip->percpu_membase + GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
+ return mvchip->percpu_membase +
+ GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
default:
BUG();
}
}
-static inline void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
+static inline void __iomem *
+mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
{
int cpu;
@@ -150,7 +155,8 @@ static inline void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvch
return mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(cpu);
case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
cpu = smp_processor_id();
- return mvchip->percpu_membase + GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
+ return mvchip->percpu_membase +
+ GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
default:
BUG();
}
@@ -168,7 +174,8 @@ static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
return mvchip->membase + GPIO_LEVEL_MASK_MV78200_OFF(cpu);
case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
cpu = smp_processor_id();
- return mvchip->percpu_membase + GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
+ return mvchip->percpu_membase +
+ GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
default:
BUG();
}
@@ -364,22 +371,22 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
* value of the line or the opposite value.
*
* Level IRQ handlers: DATA_IN is used directly as cause register.
- * Interrupt are masked by LEVEL_MASK registers.
+ * Interrupt are masked by LEVEL_MASK registers.
* Edge IRQ handlers: Change in DATA_IN are latched in EDGE_CAUSE.
- * Interrupt are masked by EDGE_MASK registers.
+ * Interrupt are masked by EDGE_MASK registers.
* Both-edge handlers: Similar to regular Edge handlers, but also swaps
- * the polarity to catch the next line transaction.
- * This is a race condition that might not perfectly
- * work on some use cases.
+ * the polarity to catch the next line transaction.
+ * This is a race condition that might not perfectly
+ * work on some use cases.
*
* Every eight GPIO lines are grouped (OR'ed) before going up to main
* cause register.
*
- * EDGE cause mask
- * data-in /--------| |-----| |----\
- * -----| |----- ---- to main cause reg
- * X \----------------| |----/
- * polarity LEVEL mask
+ * EDGE cause mask
+ * data-in /--------| |-----| |----\
+ * -----| |----- ---- to main cause reg
+ * X \----------------| |----/
+ * polarity LEVEL mask
*
****************************************************************************/
@@ -394,9 +401,8 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin = d->hwirq;
u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin);
- if (!u) {
+ if (!u)
return -EINVAL;
- }
type &= IRQ_TYPE_SENSE_MASK;
if (type == IRQ_TYPE_NONE)
@@ -529,13 +535,13 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
(data_in ^ in_pol) & msk ? "hi" : "lo",
in_pol & msk ? "lo" : "hi");
if (!((edg_msk | lvl_msk) & msk)) {
- seq_printf(s, " disabled\n");
+ seq_puts(s, " disabled\n");
continue;
}
if (edg_msk & msk)
- seq_printf(s, " edge ");
+ seq_puts(s, " edge ");
if (lvl_msk & msk)
- seq_printf(s, " level");
+ seq_puts(s, " level");
seq_printf(s, " (%s)\n", cause & msk ? "pending" : "clear ");
}
}
@@ -546,15 +552,15 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
static const struct of_device_id mvebu_gpio_of_match[] = {
{
.compatible = "marvell,orion-gpio",
- .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
+ .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
},
{
.compatible = "marvell,mv78200-gpio",
- .data = (void *) MVEBU_GPIO_SOC_VARIANT_MV78200,
+ .data = (void *) MVEBU_GPIO_SOC_VARIANT_MV78200,
},
{
.compatible = "marvell,armadaxp-gpio",
- .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
+ .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
},
{
/* sentinel */
@@ -661,6 +667,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
unsigned int ngpios;
int soc_variant;
int i, cpu, id;
+ int err;
match = of_match_device(mvebu_gpio_of_match, &pdev->dev);
if (match)
@@ -668,7 +675,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
else
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
- mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL);
+ mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
+ GFP_KERNEL);
if (!mvchip)
return -ENOMEM;
@@ -767,8 +775,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
* interrupt handlers, with each handler dealing with 8 GPIO
* pins. */
for (i = 0; i < 4; i++) {
- int irq;
- irq = platform_get_irq(pdev, i);
+ int irq = platform_get_irq(pdev, i);
+
if (irq < 0)
continue;
irq_set_handler_data(irq, mvchip);
@@ -778,14 +786,16 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
if (mvchip->irqbase < 0) {
dev_err(&pdev->dev, "no irqs\n");
- return mvchip->irqbase;
+ err = mvchip->irqbase;
+ goto err_gpiochip_add;
}
gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
mvchip->membase, handle_level_irq);
if (!gc) {
dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_gpiochip_add;
}
gc->private = mvchip;
@@ -816,18 +826,26 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
- irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
- IRQ_LEVEL | IRQ_NOPROBE);
- kfree(gc);
- return -ENODEV;
+ err = -ENODEV;
+ goto err_generic_chip;
}
return 0;
+
+err_generic_chip:
+ irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
+ IRQ_LEVEL | IRQ_NOPROBE);
+ kfree(gc);
+
+err_gpiochip_add:
+ gpiochip_remove(&mvchip->chip);
+
+ return err;
}
static struct platform_driver mvebu_gpio_driver = {
.driver = {
- .name = "mvebu-gpio",
+ .name = "mvebu-gpio",
.of_match_table = mvebu_gpio_of_match,
},
.probe = mvebu_gpio_probe,
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 30646cfe0efa..f476ae2eb0b3 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -88,6 +88,8 @@ struct gpio_bank {
#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
#define LINE_USED(line, offset) (line & (BIT(offset)))
+static void omap_gpio_unmask_irq(struct irq_data *d);
+
static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
return bank->chip.base + gpio_irq;
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
return readl_relaxed(reg) & mask;
}
+static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
+ unsigned offset)
+{
+ if (!LINE_USED(bank->mod_usage, offset)) {
+ omap_enable_gpio_module(bank, offset);
+ omap_set_gpio_direction(bank, offset, 1);
+ }
+ bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
+}
+
static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
spin_lock_irqsave(&bank->lock, flags);
offset = GPIO_INDEX(bank, gpio);
retval = omap_set_gpio_triggering(bank, offset, type);
- if (!LINE_USED(bank->mod_usage, offset)) {
- omap_enable_gpio_module(bank, offset);
- omap_set_gpio_direction(bank, offset, 1);
- } else if (!omap_gpio_is_input(bank, BIT(offset))) {
+ omap_gpio_init_irq(bank, gpio, offset);
+ if (!omap_gpio_is_input(bank, BIT(offset))) {
spin_unlock_irqrestore(&bank->lock, flags);
return -EINVAL;
}
-
- bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
spin_unlock_irqrestore(&bank->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -792,6 +800,24 @@ exit:
pm_runtime_put(bank->dev);
}
+static unsigned int omap_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
+ unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+ unsigned long flags;
+ unsigned offset = GPIO_INDEX(bank, gpio);
+
+ if (!BANK_USED(bank))
+ pm_runtime_get_sync(bank->dev);
+
+ spin_lock_irqsave(&bank->lock, flags);
+ omap_gpio_init_irq(bank, gpio, offset);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ omap_gpio_unmask_irq(d);
+
+ return 0;
+}
+
static void omap_gpio_irq_shutdown(struct irq_data *d)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
if (!irqc)
return -ENOMEM;
+ irqc->irq_startup = omap_gpio_irq_startup,
irqc->irq_shutdown = omap_gpio_irq_shutdown,
irqc->irq_ack = omap_gpio_ack_irq,
irqc->irq_mask = omap_gpio_mask_irq,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index ad3feec0075e..2fdb04b6f101 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -17,6 +17,7 @@
#include <linux/gpio.h>
#include <linux/gpio-pxa.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -27,8 +28,6 @@
#include <linux/syscore_ops.h>
#include <linux/slab.h>
-#include <mach/irqs.h>
-
/*
* We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
* one set of registers. The register offsets are organized below:
@@ -42,9 +41,12 @@
* BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C
* BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150
*
+ * BANK 6 - 0x0200 0x020C 0x0218 0x0224 0x0230 0x023C 0x0248
+ *
* NOTE:
* BANK 3 is only available on PXA27x and later processors.
- * BANK 4 and 5 are only available on PXA935
+ * BANK 4 and 5 are only available on PXA935, PXA1928
+ * BANK 6 is only available on PXA1928
*/
#define GPLR_OFFSET 0x00
@@ -57,7 +59,8 @@
#define GAFR_OFFSET 0x54
#define ED_MASK_OFFSET 0x9C /* GPIO edge detection for AP side */
-#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
+#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : ((n) > 5 ? 0x200 : 0x100) \
+ + (((n) % 3) << 2))
int pxa_last_gpio;
static int irq_base;
@@ -93,6 +96,7 @@ enum pxa_gpio_type {
PXA93X_GPIO,
MMP_GPIO = 0x10,
MMP2_GPIO,
+ PXA1928_GPIO,
};
struct pxa_gpio_id {
@@ -140,6 +144,11 @@ static struct pxa_gpio_id mmp2_id = {
.gpio_nums = 192,
};
+static struct pxa_gpio_id pxa1928_id = {
+ .type = PXA1928_GPIO,
+ .gpio_nums = 224,
+};
+
#define for_each_gpio_chip(i, c) \
for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
@@ -487,6 +496,7 @@ static int pxa_gpio_nums(struct platform_device *pdev)
case PXA93X_GPIO:
case MMP_GPIO:
case MMP2_GPIO:
+ case PXA1928_GPIO:
gpio_type = pxa_id->type;
count = pxa_id->gpio_nums - 1;
break;
@@ -506,6 +516,7 @@ static const struct of_device_id pxa_gpio_dt_ids[] = {
{ .compatible = "marvell,pxa93x-gpio", .data = &pxa93x_id, },
{ .compatible = "marvell,mmp-gpio", .data = &mmp_id, },
{ .compatible = "marvell,mmp2-gpio", .data = &mmp2_id, },
+ { .compatible = "marvell,pxa1928-gpio", .data = &pxa1928_id, },
{}
};
@@ -629,19 +640,18 @@ static int pxa_gpio_probe(struct platform_device *pdev)
}
if (!use_of) {
-#ifdef CONFIG_ARCH_PXA
- irq = gpio_to_irq(0);
- irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
- handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler);
-
- irq = gpio_to_irq(1);
- irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
- handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler);
-#endif
+ if (irq0 > 0) {
+ irq = gpio_to_irq(0);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ if (irq1 > 0) {
+ irq = gpio_to_irq(1);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
for (irq = gpio_to_irq(gpio_offset);
irq <= gpio_to_irq(pxa_last_gpio); irq++) {
@@ -649,13 +659,13 @@ static int pxa_gpio_probe(struct platform_device *pdev)
handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- } else {
- if (irq0 > 0)
- irq_set_chained_handler(irq0, pxa_gpio_demux_handler);
- if (irq1 > 0)
- irq_set_chained_handler(irq1, pxa_gpio_demux_handler);
}
+ if (irq0 > 0)
+ irq_set_chained_handler(irq0, pxa_gpio_demux_handler);
+ if (irq1 > 0)
+ irq_set_chained_handler(irq1, pxa_gpio_demux_handler);
+
irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler);
return 0;
}
@@ -668,6 +678,7 @@ static const struct platform_device_id gpio_id_table[] = {
{ "pxa93x-gpio", (unsigned long)&pxa93x_id },
{ "mmp-gpio", (unsigned long)&mmp_id },
{ "mmp2-gpio", (unsigned long)&mmp2_id },
+ { "pxa1928-gpio", (unsigned long)&pxa1928_id },
{ },
};
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 584484e3f1e3..c49522efa7b3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -21,7 +21,6 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
@@ -38,7 +37,6 @@ struct gpio_rcar_priv {
struct platform_device *pdev;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
- struct irq_domain *irq_domain;
};
#define IOINTSEL 0x00
@@ -82,14 +80,18 @@ static void gpio_rcar_modify_bit(struct gpio_rcar_priv *p, int offs,
static void gpio_rcar_irq_disable(struct irq_data *d)
{
- struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
gpio_rcar_write(p, INTMSK, ~BIT(irqd_to_hwirq(d)));
}
static void gpio_rcar_irq_enable(struct irq_data *d)
{
- struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
gpio_rcar_write(p, MSKCLR, BIT(irqd_to_hwirq(d)));
}
@@ -131,7 +133,9 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
unsigned int hwirq = irqd_to_hwirq(d);
dev_dbg(&p->pdev->dev, "sense irq = %d, type = %d\n", hwirq, type);
@@ -175,7 +179,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
gpio_rcar_read(p, INTMSK))) {
offset = __ffs(pending);
gpio_rcar_write(p, INTCLR, BIT(offset));
- generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
+ generic_handle_irq(irq_find_mapping(p->gpio_chip.irqdomain,
+ offset));
irqs_handled++;
}
@@ -265,29 +270,6 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static int gpio_rcar_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- return irq_create_mapping(gpio_to_priv(chip)->irq_domain, offset);
-}
-
-static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct gpio_rcar_priv *p = h->host_data;
-
- dev_dbg(&p->pdev->dev, "map hw irq = %d, irq = %d\n", (int)hwirq, irq);
-
- irq_set_chip_data(irq, h->host_data);
- irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID); /* kill me now */
- return 0;
-}
-
-static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
- .map = gpio_rcar_irq_domain_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
struct gpio_rcar_info {
bool has_both_edge_trigger;
};
@@ -372,10 +354,8 @@ static int gpio_rcar_probe(struct platform_device *pdev)
int ret;
p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
- if (!p) {
- ret = -ENOMEM;
- goto err0;
- }
+ if (!p)
+ return -ENOMEM;
p->pdev = pdev;
spin_lock_init(&p->lock);
@@ -413,7 +393,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->get = gpio_rcar_get;
gpio_chip->direction_output = gpio_rcar_direction_output;
gpio_chip->set = gpio_rcar_set;
- gpio_chip->to_irq = gpio_rcar_to_irq;
gpio_chip->label = name;
gpio_chip->dev = dev;
gpio_chip->owner = THIS_MODULE;
@@ -428,16 +407,19 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED
| IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
- p->config.number_of_pins,
- p->config.irq_base,
- &gpio_rcar_irq_domain_ops, p);
- if (!p->irq_domain) {
- ret = -ENXIO;
- dev_err(dev, "cannot initialize irq domain\n");
+ ret = gpiochip_add(gpio_chip);
+ if (ret) {
+ dev_err(dev, "failed to add GPIO controller\n");
goto err0;
}
+ ret = gpiochip_irqchip_add(&p->gpio_chip, irq_chip, p->config.irq_base,
+ handle_level_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "cannot add irqchip\n");
+ goto err1;
+ }
+
if (devm_request_irq(dev, irq->start, gpio_rcar_irq_handler,
IRQF_SHARED, name, p)) {
dev_err(dev, "failed to request IRQ\n");
@@ -445,17 +427,11 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err1;
}
- ret = gpiochip_add(gpio_chip);
- if (ret) {
- dev_err(dev, "failed to add GPIO controller\n");
- goto err1;
- }
-
dev_info(dev, "driving %d GPIOs\n", p->config.number_of_pins);
/* warn in case of mismatch if irq base is specified */
if (p->config.irq_base) {
- ret = irq_find_mapping(p->irq_domain, 0);
+ ret = irq_find_mapping(p->gpio_chip.irqdomain, 0);
if (p->config.irq_base != ret)
dev_warn(dev, "irq base mismatch (%u/%u)\n",
p->config.irq_base, ret);
@@ -471,7 +447,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
return 0;
err1:
- irq_domain_remove(p->irq_domain);
+ gpiochip_remove(&p->gpio_chip);
err0:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -484,7 +460,6 @@ static int gpio_rcar_remove(struct platform_device *pdev)
gpiochip_remove(&p->gpio_chip);
- irq_domain_remove(p->irq_domain);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index a90be34e4d5c..bec397a60204 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/syscore_ops.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
@@ -50,7 +51,7 @@ static int sa1100_direction_output(struct gpio_chip *chip, unsigned offset, int
static int sa1100_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return offset < 11 ? (IRQ_GPIO0 + offset) : (IRQ_GPIO11 - 11 + offset);
+ return IRQ_GPIO0 + offset;
}
static struct gpio_chip sa1100_gpio_chip = {
@@ -64,7 +65,203 @@ static struct gpio_chip sa1100_gpio_chip = {
.ngpio = GPIO_MAX + 1,
};
+/*
+ * SA1100 GPIO edge detection for IRQs:
+ * IRQs are generated on Falling-Edge, Rising-Edge, or both.
+ * Use this instead of directly setting GRER/GFER.
+ */
+static int GPIO_IRQ_rising_edge;
+static int GPIO_IRQ_falling_edge;
+static int GPIO_IRQ_mask;
+
+static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int mask;
+
+ mask = BIT(d->hwirq);
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
+ return 0;
+ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+ }
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ GPIO_IRQ_rising_edge |= mask;
+ else
+ GPIO_IRQ_rising_edge &= ~mask;
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ GPIO_IRQ_falling_edge |= mask;
+ else
+ GPIO_IRQ_falling_edge &= ~mask;
+
+ GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
+ GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
+
+ return 0;
+}
+
+/*
+ * GPIO IRQs must be acknowledged.
+ */
+static void sa1100_gpio_ack(struct irq_data *d)
+{
+ GEDR = BIT(d->hwirq);
+}
+
+static void sa1100_gpio_mask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq);
+
+ GPIO_IRQ_mask &= ~mask;
+
+ GRER &= ~mask;
+ GFER &= ~mask;
+}
+
+static void sa1100_gpio_unmask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq);
+
+ GPIO_IRQ_mask |= mask;
+
+ GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
+ GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
+}
+
+static int sa1100_gpio_wake(struct irq_data *d, unsigned int on)
+{
+ if (on)
+ PWER |= BIT(d->hwirq);
+ else
+ PWER &= ~BIT(d->hwirq);
+ return 0;
+}
+
+/*
+ * This is for GPIO IRQs
+ */
+static struct irq_chip sa1100_gpio_irq_chip = {
+ .name = "GPIO",
+ .irq_ack = sa1100_gpio_ack,
+ .irq_mask = sa1100_gpio_mask,
+ .irq_unmask = sa1100_gpio_unmask,
+ .irq_set_type = sa1100_gpio_type,
+ .irq_set_wake = sa1100_gpio_wake,
+};
+
+static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops sa1100_gpio_irqdomain_ops = {
+ .map = sa1100_gpio_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static struct irq_domain *sa1100_gpio_irqdomain;
+
+/*
+ * IRQ 0-11 (GPIO) handler. We enter here with the
+ * irq_controller_lock held, and IRQs disabled. Decode the IRQ
+ * and call the handler.
+ */
+static void
+sa1100_gpio_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int mask;
+
+ mask = GEDR;
+ do {
+ /*
+ * clear down all currently active IRQ sources.
+ * We will be processing them all.
+ */
+ GEDR = mask;
+
+ irq = IRQ_GPIO0;
+ do {
+ if (mask & 1)
+ generic_handle_irq(irq);
+ mask >>= 1;
+ irq++;
+ } while (mask);
+
+ mask = GEDR;
+ } while (mask);
+}
+
+static int sa1100_gpio_suspend(void)
+{
+ /*
+ * Set the appropriate edges for wakeup.
+ */
+ GRER = PWER & GPIO_IRQ_rising_edge;
+ GFER = PWER & GPIO_IRQ_falling_edge;
+
+ /*
+ * Clear any pending GPIO interrupts.
+ */
+ GEDR = GEDR;
+
+ return 0;
+}
+
+static void sa1100_gpio_resume(void)
+{
+ GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
+ GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
+}
+
+static struct syscore_ops sa1100_gpio_syscore_ops = {
+ .suspend = sa1100_gpio_suspend,
+ .resume = sa1100_gpio_resume,
+};
+
+static int __init sa1100_gpio_init_devicefs(void)
+{
+ register_syscore_ops(&sa1100_gpio_syscore_ops);
+ return 0;
+}
+
+device_initcall(sa1100_gpio_init_devicefs);
+
void __init sa1100_init_gpio(void)
{
+ /* clear all GPIO edge detects */
+ GFER = 0;
+ GRER = 0;
+ GEDR = -1;
+
gpiochip_add(&sa1100_gpio_chip);
+
+ sa1100_gpio_irqdomain = irq_domain_add_simple(NULL,
+ 28, IRQ_GPIO0,
+ &sa1100_gpio_irqdomain_ops, NULL);
+
+ /*
+ * Install handlers for GPIO 0-10 edge detect interrupts
+ */
+ irq_set_chained_handler(IRQ_GPIO0_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO1_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO2_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO3_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO4_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO5_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO6_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO7_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO8_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO9_SC, sa1100_gpio_handler);
+ irq_set_chained_handler(IRQ_GPIO10_SC, sa1100_gpio_handler);
+ /*
+ * Install handler for GPIO 11-27 edge detect interrupts
+ */
+ irq_set_chained_handler(IRQ_GPIO11_27, sa1100_gpio_handler);
+
}
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 0a0cf1307d2f..b72906f5b999 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -41,7 +41,7 @@ struct sch_gpio {
unsigned short resume_base;
};
-#define to_sch_gpio(c) container_of(c, struct sch_gpio, chip)
+#define to_sch_gpio(gc) container_of(gc, struct sch_gpio, chip)
static unsigned sch_gpio_offset(struct sch_gpio *sch, unsigned gpio,
unsigned reg)
@@ -63,75 +63,59 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
return gpio % 8;
}
-static void sch_gpio_enable(struct sch_gpio *sch, unsigned gpio)
+static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
{
+ struct sch_gpio *sch = to_sch_gpio(gc);
unsigned short offset, bit;
- u8 enable;
-
- spin_lock(&sch->lock);
+ u8 reg_val;
- offset = sch_gpio_offset(sch, gpio, GEN);
+ offset = sch_gpio_offset(sch, gpio, reg);
bit = sch_gpio_bit(sch, gpio);
- enable = inb(sch->iobase + offset);
- if (!(enable & (1 << bit)))
- outb(enable | (1 << bit), sch->iobase + offset);
+ reg_val = !!(inb(sch->iobase + offset) & BIT(bit));
- spin_unlock(&sch->lock);
+ return reg_val;
}
-static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
+static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
+ int val)
{
struct sch_gpio *sch = to_sch_gpio(gc);
- u8 curr_dirs;
unsigned short offset, bit;
+ u8 reg_val;
- spin_lock(&sch->lock);
+ offset = sch_gpio_offset(sch, gpio, reg);
+ bit = sch_gpio_bit(sch, gpio);
- offset = sch_gpio_offset(sch, gpio_num, GIO);
- bit = sch_gpio_bit(sch, gpio_num);
+ reg_val = inb(sch->iobase + offset);
- curr_dirs = inb(sch->iobase + offset);
+ if (val)
+ outb(reg_val | BIT(bit), sch->iobase + offset);
+ else
+ outb((reg_val & ~BIT(bit)), sch->iobase + offset);
+}
- if (!(curr_dirs & (1 << bit)))
- outb(curr_dirs | (1 << bit), sch->iobase + offset);
+static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
+{
+ struct sch_gpio *sch = to_sch_gpio(gc);
+ spin_lock(&sch->lock);
+ sch_gpio_reg_set(gc, gpio_num, GIO, 1);
spin_unlock(&sch->lock);
return 0;
}
static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
{
- struct sch_gpio *sch = to_sch_gpio(gc);
- int res;
- unsigned short offset, bit;
-
- offset = sch_gpio_offset(sch, gpio_num, GLV);
- bit = sch_gpio_bit(sch, gpio_num);
-
- res = !!(inb(sch->iobase + offset) & (1 << bit));
-
- return res;
+ return sch_gpio_reg_get(gc, gpio_num, GLV);
}
static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
{
struct sch_gpio *sch = to_sch_gpio(gc);
- u8 curr_vals;
- unsigned short offset, bit;
spin_lock(&sch->lock);
-
- offset = sch_gpio_offset(sch, gpio_num, GLV);
- bit = sch_gpio_bit(sch, gpio_num);
-
- curr_vals = inb(sch->iobase + offset);
-
- if (val)
- outb(curr_vals | (1 << bit), sch->iobase + offset);
- else
- outb((curr_vals & ~(1 << bit)), sch->iobase + offset);
-
+ sch_gpio_reg_set(gc, gpio_num, GLV, val);
spin_unlock(&sch->lock);
}
@@ -139,18 +123,9 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
int val)
{
struct sch_gpio *sch = to_sch_gpio(gc);
- u8 curr_dirs;
- unsigned short offset, bit;
spin_lock(&sch->lock);
-
- offset = sch_gpio_offset(sch, gpio_num, GIO);
- bit = sch_gpio_bit(sch, gpio_num);
-
- curr_dirs = inb(sch->iobase + offset);
- if (curr_dirs & (1 << bit))
- outb(curr_dirs & ~(1 << bit), sch->iobase + offset);
-
+ sch_gpio_reg_set(gc, gpio_num, GIO, 0);
spin_unlock(&sch->lock);
/*
@@ -209,13 +184,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
* GPIO7 is configured by the CMC as SLPIOVR
* Enable GPIO[9:8] core powered gpios explicitly
*/
- sch_gpio_enable(sch, 8);
- sch_gpio_enable(sch, 9);
+ sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
+ sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
/*
* SUS_GPIO[2:0] enabled by default
* Enable SUS_GPIO3 resume powered gpio explicitly
*/
- sch_gpio_enable(sch, 13);
+ sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
break;
case PCI_DEVICE_ID_INTEL_ITC_LPC:
@@ -230,6 +205,12 @@ static int sch_gpio_probe(struct platform_device *pdev)
sch->chip.ngpio = 30;
break;
+ case PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB:
+ sch->core_base = 0;
+ sch->resume_base = 2;
+ sch->chip.ngpio = 8;
+ break;
+
default:
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 85c5b1974294..dabfb99dddef 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -30,7 +30,7 @@ struct stmpe_gpio {
struct stmpe *stmpe;
struct device *dev;
struct mutex irq_lock;
- unsigned norequest_mask;
+ u32 norequest_mask;
/* Caches of interrupt control registers for bus_lock */
u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
@@ -340,13 +340,10 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
- struct stmpe_gpio_platform_data *pdata;
struct stmpe_gpio *stmpe_gpio;
int ret;
int irq = 0;
- pdata = stmpe->pdata->gpio;
-
irq = platform_get_irq(pdev, 0);
stmpe_gpio = kzalloc(sizeof(struct stmpe_gpio), GFP_KERNEL);
@@ -360,19 +357,14 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->chip = template_chip;
stmpe_gpio->chip.ngpio = stmpe->num_gpios;
stmpe_gpio->chip.dev = &pdev->dev;
-#ifdef CONFIG_OF
stmpe_gpio->chip.of_node = np;
-#endif
stmpe_gpio->chip.base = -1;
if (IS_ENABLED(CONFIG_DEBUG_FS))
stmpe_gpio->chip.dbg_show = stmpe_dbg_show;
- if (pdata)
- stmpe_gpio->norequest_mask = pdata->norequest_mask;
- else if (np)
- of_property_read_u32(np, "st,norequest-mask",
- &stmpe_gpio->norequest_mask);
+ of_property_read_u32(np, "st,norequest-mask",
+ &stmpe_gpio->norequest_mask);
if (irq < 0)
dev_info(&pdev->dev,
@@ -414,9 +406,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
NULL);
}
- if (pdata && pdata->setup)
- pdata->setup(stmpe, stmpe_gpio->chip.base);
-
platform_set_drvdata(pdev, stmpe_gpio);
return 0;
@@ -433,15 +422,9 @@ static int stmpe_gpio_remove(struct platform_device *pdev)
{
struct stmpe_gpio *stmpe_gpio = platform_get_drvdata(pdev);
struct stmpe *stmpe = stmpe_gpio->stmpe;
- struct stmpe_gpio_platform_data *pdata = stmpe->pdata->gpio;
-
- if (pdata && pdata->remove)
- pdata->remove(stmpe, stmpe_gpio->chip.base);
gpiochip_remove(&stmpe_gpio->chip);
-
stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
-
kfree(stmpe_gpio);
return 0;
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index bce6c6108f20..458d9d7952b8 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -23,23 +23,51 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/i2c/sx150x.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
#define NO_UPDATE_PENDING -1
+/* The chip models of sx150x */
+#define SX150X_456 0
+#define SX150X_789 1
+
+struct sx150x_456_pri {
+ u8 reg_pld_mode;
+ u8 reg_pld_table0;
+ u8 reg_pld_table1;
+ u8 reg_pld_table2;
+ u8 reg_pld_table3;
+ u8 reg_pld_table4;
+ u8 reg_advance;
+};
+
+struct sx150x_789_pri {
+ u8 reg_drain;
+ u8 reg_polarity;
+ u8 reg_clock;
+ u8 reg_misc;
+ u8 reg_reset;
+ u8 ngpios;
+};
+
struct sx150x_device_data {
+ u8 model;
u8 reg_pullup;
u8 reg_pulldn;
- u8 reg_drain;
- u8 reg_polarity;
u8 reg_dir;
u8 reg_data;
u8 reg_irq_mask;
u8 reg_irq_src;
u8 reg_sense;
- u8 reg_clock;
- u8 reg_misc;
- u8 reg_reset;
u8 ngpios;
+ union {
+ struct sx150x_456_pri x456;
+ struct sx150x_789_pri x789;
+ } pri;
};
struct sx150x_chip {
@@ -59,44 +87,79 @@ struct sx150x_chip {
static const struct sx150x_device_data sx150x_devices[] = {
[0] = { /* sx1508q */
- .reg_pullup = 0x03,
- .reg_pulldn = 0x04,
- .reg_drain = 0x05,
- .reg_polarity = 0x06,
- .reg_dir = 0x07,
- .reg_data = 0x08,
- .reg_irq_mask = 0x09,
- .reg_irq_src = 0x0c,
- .reg_sense = 0x0b,
- .reg_clock = 0x0f,
- .reg_misc = 0x10,
- .reg_reset = 0x7d,
- .ngpios = 8
+ .model = SX150X_789,
+ .reg_pullup = 0x03,
+ .reg_pulldn = 0x04,
+ .reg_dir = 0x07,
+ .reg_data = 0x08,
+ .reg_irq_mask = 0x09,
+ .reg_irq_src = 0x0c,
+ .reg_sense = 0x0b,
+ .pri.x789 = {
+ .reg_drain = 0x05,
+ .reg_polarity = 0x06,
+ .reg_clock = 0x0f,
+ .reg_misc = 0x10,
+ .reg_reset = 0x7d,
+ },
+ .ngpios = 8,
},
[1] = { /* sx1509q */
- .reg_pullup = 0x07,
- .reg_pulldn = 0x09,
- .reg_drain = 0x0b,
- .reg_polarity = 0x0d,
- .reg_dir = 0x0f,
- .reg_data = 0x11,
- .reg_irq_mask = 0x13,
- .reg_irq_src = 0x19,
- .reg_sense = 0x17,
- .reg_clock = 0x1e,
- .reg_misc = 0x1f,
- .reg_reset = 0x7d,
- .ngpios = 16
+ .model = SX150X_789,
+ .reg_pullup = 0x07,
+ .reg_pulldn = 0x09,
+ .reg_dir = 0x0f,
+ .reg_data = 0x11,
+ .reg_irq_mask = 0x13,
+ .reg_irq_src = 0x19,
+ .reg_sense = 0x17,
+ .pri.x789 = {
+ .reg_drain = 0x0b,
+ .reg_polarity = 0x0d,
+ .reg_clock = 0x1e,
+ .reg_misc = 0x1f,
+ .reg_reset = 0x7d,
+ },
+ .ngpios = 16
+ },
+ [2] = { /* sx1506q */
+ .model = SX150X_456,
+ .reg_pullup = 0x05,
+ .reg_pulldn = 0x07,
+ .reg_dir = 0x03,
+ .reg_data = 0x01,
+ .reg_irq_mask = 0x09,
+ .reg_irq_src = 0x0f,
+ .reg_sense = 0x0d,
+ .pri.x456 = {
+ .reg_pld_mode = 0x21,
+ .reg_pld_table0 = 0x23,
+ .reg_pld_table1 = 0x25,
+ .reg_pld_table2 = 0x27,
+ .reg_pld_table3 = 0x29,
+ .reg_pld_table4 = 0x2b,
+ .reg_advance = 0xad,
+ },
+ .ngpios = 16
},
};
static const struct i2c_device_id sx150x_id[] = {
{"sx1508q", 0},
{"sx1509q", 1},
+ {"sx1506q", 2},
{}
};
MODULE_DEVICE_TABLE(i2c, sx150x_id);
+static const struct of_device_id sx150x_of_match[] = {
+ { .compatible = "semtech,sx1508q" },
+ { .compatible = "semtech,sx1509q" },
+ { .compatible = "semtech,sx1506q" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sx150x_of_match);
+
static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val)
{
s32 err = i2c_smbus_write_byte_data(client, reg, val);
@@ -191,7 +254,7 @@ static int sx150x_get_io(struct sx150x_chip *chip, unsigned offset)
static void sx150x_set_oscio(struct sx150x_chip *chip, int val)
{
sx150x_i2c_write(chip->client,
- chip->dev_cfg->reg_clock,
+ chip->dev_cfg->pri.x789.reg_clock,
(val ? 0x1f : 0x10));
}
@@ -293,27 +356,11 @@ static int sx150x_gpio_direction_output(struct gpio_chip *gc,
return status;
}
-static int sx150x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
-{
- struct sx150x_chip *chip;
-
- chip = container_of(gc, struct sx150x_chip, gpio_chip);
-
- if (offset >= chip->dev_cfg->ngpios)
- return -EINVAL;
-
- if (chip->irq_base < 0)
- return -EINVAL;
-
- return chip->irq_base + offset;
-}
-
static void sx150x_irq_mask(struct irq_data *d)
{
struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
- unsigned n;
+ unsigned n = d->hwirq;
- n = d->irq - chip->irq_base;
chip->irq_masked |= (1 << n);
chip->irq_update = n;
}
@@ -321,9 +368,8 @@ static void sx150x_irq_mask(struct irq_data *d)
static void sx150x_irq_unmask(struct irq_data *d)
{
struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
- unsigned n;
+ unsigned n = d->hwirq;
- n = d->irq - chip->irq_base;
chip->irq_masked &= ~(1 << n);
chip->irq_update = n;
}
@@ -336,7 +382,7 @@ static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
return -EINVAL;
- n = d->irq - chip->irq_base;
+ n = d->hwirq;
if (flow_type & IRQ_TYPE_EDGE_RISING)
val |= 0x1;
@@ -371,7 +417,9 @@ static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
val);
for (n = 0; n < 8; ++n) {
if (val & (1 << n)) {
- sub_irq = chip->irq_base + (i * 8) + n;
+ sub_irq = irq_find_mapping(
+ chip->gpio_chip.irqdomain,
+ (i * 8) + n);
handle_nested_irq(sub_irq);
++nhandled;
}
@@ -401,7 +449,7 @@ static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
/* Avoid updates if nothing changed */
if (chip->dev_sense == chip->irq_sense &&
- chip->dev_sense == chip->irq_masked)
+ chip->dev_masked == chip->irq_masked)
goto out;
chip->dev_sense = chip->irq_sense;
@@ -428,15 +476,19 @@ static void sx150x_init_chip(struct sx150x_chip *chip,
chip->client = client;
chip->dev_cfg = &sx150x_devices[driver_data];
+ chip->gpio_chip.dev = &client->dev;
chip->gpio_chip.label = client->name;
chip->gpio_chip.direction_input = sx150x_gpio_direction_input;
chip->gpio_chip.direction_output = sx150x_gpio_direction_output;
chip->gpio_chip.get = sx150x_gpio_get;
chip->gpio_chip.set = sx150x_gpio_set;
- chip->gpio_chip.to_irq = sx150x_gpio_to_irq;
chip->gpio_chip.base = pdata->gpio_base;
chip->gpio_chip.can_sleep = true;
chip->gpio_chip.ngpio = chip->dev_cfg->ngpios;
+#ifdef CONFIG_OF_GPIO
+ chip->gpio_chip.of_node = client->dev.of_node;
+ chip->gpio_chip.of_gpio_n_cells = 2;
+#endif
if (pdata->oscio_is_gpo)
++chip->gpio_chip.ngpio;
@@ -470,13 +522,13 @@ static int sx150x_reset(struct sx150x_chip *chip)
int err;
err = i2c_smbus_write_byte_data(chip->client,
- chip->dev_cfg->reg_reset,
+ chip->dev_cfg->pri.x789.reg_reset,
0x12);
if (err < 0)
return err;
err = i2c_smbus_write_byte_data(chip->client,
- chip->dev_cfg->reg_reset,
+ chip->dev_cfg->pri.x789.reg_reset,
0x34);
return err;
}
@@ -492,9 +544,14 @@ static int sx150x_init_hw(struct sx150x_chip *chip,
return err;
}
- err = sx150x_i2c_write(chip->client,
- chip->dev_cfg->reg_misc,
- 0x01);
+ if (chip->dev_cfg->model == SX150X_789)
+ err = sx150x_i2c_write(chip->client,
+ chip->dev_cfg->pri.x789.reg_misc,
+ 0x01);
+ else
+ err = sx150x_i2c_write(chip->client,
+ chip->dev_cfg->pri.x456.reg_advance,
+ 0x04);
if (err < 0)
return err;
@@ -508,15 +565,27 @@ static int sx150x_init_hw(struct sx150x_chip *chip,
if (err < 0)
return err;
- err = sx150x_init_io(chip, chip->dev_cfg->reg_drain,
- pdata->io_open_drain_ena);
- if (err < 0)
- return err;
+ if (chip->dev_cfg->model == SX150X_789) {
+ err = sx150x_init_io(chip,
+ chip->dev_cfg->pri.x789.reg_drain,
+ pdata->io_open_drain_ena);
+ if (err < 0)
+ return err;
+
+ err = sx150x_init_io(chip,
+ chip->dev_cfg->pri.x789.reg_polarity,
+ pdata->io_polarity);
+ if (err < 0)
+ return err;
+ } else {
+ /* Set all pins to work in normal mode */
+ err = sx150x_init_io(chip,
+ chip->dev_cfg->pri.x456.reg_pld_mode,
+ 0);
+ if (err < 0)
+ return err;
+ }
- err = sx150x_init_io(chip, chip->dev_cfg->reg_polarity,
- pdata->io_polarity);
- if (err < 0)
- return err;
if (pdata->oscio_is_gpo)
sx150x_set_oscio(chip, 0);
@@ -529,31 +598,24 @@ static int sx150x_install_irq_chip(struct sx150x_chip *chip,
int irq_base)
{
int err;
- unsigned n;
- unsigned irq;
chip->irq_summary = irq_summary;
chip->irq_base = irq_base;
- for (n = 0; n < chip->dev_cfg->ngpios; ++n) {
- irq = irq_base + n;
- irq_set_chip_data(irq, chip);
- irq_set_chip_and_handler(irq, &chip->irq_chip, handle_edge_irq);
- irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
+ /* Add gpio chip to irq subsystem */
+ err = gpiochip_irqchip_add(&chip->gpio_chip,
+ &chip->irq_chip, chip->irq_base,
+ handle_edge_irq, IRQ_TYPE_EDGE_BOTH);
+ if (err) {
+ dev_err(&chip->client->dev,
+ "could not connect irqchip to gpiochip\n");
+ return err;
}
err = devm_request_threaded_irq(&chip->client->dev,
- irq_summary,
- NULL,
- sx150x_irq_thread_fn,
- IRQF_SHARED | IRQF_TRIGGER_FALLING,
- chip->irq_chip.name,
- chip);
+ irq_summary, NULL, sx150x_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_FALLING,
+ chip->irq_chip.name, chip);
if (err < 0) {
chip->irq_summary = -1;
chip->irq_base = -1;
@@ -562,17 +624,6 @@ static int sx150x_install_irq_chip(struct sx150x_chip *chip,
return err;
}
-static void sx150x_remove_irq_chip(struct sx150x_chip *chip)
-{
- unsigned n;
- unsigned irq;
-
- for (n = 0; n < chip->dev_cfg->ngpios; ++n) {
- irq = chip->irq_base + n;
- irq_set_chip_and_handler(irq, NULL, NULL);
- }
-}
-
static int sx150x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -626,16 +677,14 @@ static int sx150x_remove(struct i2c_client *client)
chip = i2c_get_clientdata(client);
gpiochip_remove(&chip->gpio_chip);
- if (chip->irq_summary >= 0)
- sx150x_remove_irq_chip(chip);
-
return 0;
}
static struct i2c_driver sx150x_driver = {
.driver = {
.name = "sx150x",
- .owner = THIS_MODULE
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sx150x_of_match),
},
.probe = sx150x_probe,
.remove = sx150x_remove,
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index abdcf58935f5..11aed2671065 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -232,16 +232,13 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
static int tc3589x_gpio_probe(struct platform_device *pdev)
{
struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent);
- struct tc3589x_gpio_platform_data *pdata;
struct device_node *np = pdev->dev.of_node;
struct tc3589x_gpio *tc3589x_gpio;
int ret;
int irq;
- pdata = tc3589x->pdata->gpio;
-
- if (!(pdata || np)) {
- dev_err(&pdev->dev, "No platform data or Device Tree found\n");
+ if (!np) {
+ dev_err(&pdev->dev, "No Device Tree node found\n");
return -EINVAL;
}
@@ -305,9 +302,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
irq,
NULL);
- if (pdata && pdata->setup)
- pdata->setup(tc3589x, tc3589x_gpio->chip.base);
-
platform_set_drvdata(pdev, tc3589x_gpio);
return 0;
@@ -316,11 +310,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
static int tc3589x_gpio_remove(struct platform_device *pdev)
{
struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev);
- struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
- struct tc3589x_gpio_platform_data *pdata = tc3589x->pdata->gpio;
-
- if (pdata && pdata->remove)
- pdata->remove(tc3589x, tc3589x_gpio->chip.base);
gpiochip_remove(&tc3589x_gpio->chip);
diff --git a/drivers/gpio/gpio-tz1090-pdc.c b/drivers/gpio/gpio-tz1090-pdc.c
index d7536226b847..ede7e403ffde 100644
--- a/drivers/gpio/gpio-tz1090-pdc.c
+++ b/drivers/gpio/gpio-tz1090-pdc.c
@@ -190,7 +190,7 @@ static int tz1090_pdc_gpio_probe(struct platform_device *pdev)
/* Ioremap the registers */
priv->reg = devm_ioremap(&pdev->dev, res_regs->start,
- res_regs->end - res_regs->start);
+ resource_size(res_regs));
if (!priv->reg) {
dev_err(&pdev->dev, "unable to ioremap registers\n");
return -ENOMEM;
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index e3024bbba447..445660adc898 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -573,7 +573,7 @@ static int tz1090_gpio_probe(struct platform_device *pdev)
/* Ioremap the registers */
priv.reg = devm_ioremap(&pdev->dev, res_regs->start,
- res_regs->end - res_regs->start);
+ resource_size(res_regs));
if (!priv.reg) {
dev_err(&pdev->dev, "unable to ioremap registers\n");
return -ENOMEM;
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 4ee4cee832ec..971c73964ef1 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -278,7 +278,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
static struct platform_driver vf610_gpio_driver = {
.driver = {
.name = "gpio-vf610",
- .owner = THIS_MODULE,
.of_match_table = vf610_gpio_dt_ids,
},
.probe = vf610_gpio_probe,
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 9d21d2fcc327..57b470d5b39e 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -52,8 +52,6 @@ struct vx855_gpio {
spinlock_t lock;
u32 io_gpi;
u32 io_gpo;
- bool gpi_reserved;
- bool gpo_reserved;
};
/* resolve a GPIx into the corresponding bit position */
@@ -224,14 +222,13 @@ static int vx855gpio_probe(struct platform_device *pdev)
struct resource *res_gpi;
struct resource *res_gpo;
struct vx855_gpio *vg;
- int ret;
res_gpi = platform_get_resource(pdev, IORESOURCE_IO, 0);
res_gpo = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (!res_gpi || !res_gpo)
return -EBUSY;
- vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+ vg = devm_kzalloc(&pdev->dev, sizeof(*vg), GFP_KERNEL);
if (!vg)
return -ENOMEM;
@@ -250,56 +247,27 @@ static int vx855gpio_probe(struct platform_device *pdev)
* succeed. Ignore and continue.
*/
- if (!request_region(res_gpi->start, resource_size(res_gpi),
- MODULE_NAME "_gpi"))
+ if (!devm_request_region(&pdev->dev, res_gpi->start,
+ resource_size(res_gpi), MODULE_NAME "_gpi"))
dev_warn(&pdev->dev,
"GPI I/O resource busy, probably claimed by ACPI\n");
- else
- vg->gpi_reserved = true;
- if (!request_region(res_gpo->start, resource_size(res_gpo),
- MODULE_NAME "_gpo"))
+ if (!devm_request_region(&pdev->dev, res_gpo->start,
+ resource_size(res_gpo), MODULE_NAME "_gpo"))
dev_warn(&pdev->dev,
"GPO I/O resource busy, probably claimed by ACPI\n");
- else
- vg->gpo_reserved = true;
vx855gpio_gpio_setup(vg);
- ret = gpiochip_add(&vg->gpio);
- if (ret) {
- dev_err(&pdev->dev, "failed to register GPIOs\n");
- goto out_release;
- }
-
- return 0;
-
-out_release:
- if (vg->gpi_reserved)
- release_region(res_gpi->start, resource_size(res_gpi));
- if (vg->gpo_reserved)
- release_region(res_gpi->start, resource_size(res_gpo));
- kfree(vg);
- return ret;
+ return gpiochip_add(&vg->gpio);
}
static int vx855gpio_remove(struct platform_device *pdev)
{
struct vx855_gpio *vg = platform_get_drvdata(pdev);
- struct resource *res;
gpiochip_remove(&vg->gpio);
- if (vg->gpi_reserved) {
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, resource_size(res));
- }
- if (vg->gpo_reserved) {
- res = platform_get_resource(pdev, IORESOURCE_IO, 1);
- release_region(res->start, resource_size(res));
- }
-
- kfree(vg);
return 0;
}
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
new file mode 100644
index 000000000000..b6a15c39293e
--- /dev/null
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -0,0 +1,160 @@
+/*
+ * AppliedMicro X-Gene SoC GPIO-Standby Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Tin Huynh <tnhuynh@apm.com>.
+ * Y Vo <yvo@apm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/basic_mmio_gpio.h>
+
+#define XGENE_MAX_GPIO_DS 22
+#define XGENE_MAX_GPIO_DS_IRQ 6
+
+#define GPIO_MASK(x) (1U << ((x) % 32))
+
+#define MPA_GPIO_INT_LVL 0x0290
+#define MPA_GPIO_OE_ADDR 0x029c
+#define MPA_GPIO_OUT_ADDR 0x02a0
+#define MPA_GPIO_IN_ADDR 0x02a4
+#define MPA_GPIO_SEL_LO 0x0294
+
+/**
+ * struct xgene_gpio_sb - GPIO-Standby private data structure.
+ * @bgc: memory-mapped GPIO controllers.
+ * @irq: Mapping GPIO pins and interrupt number
+ * nirq: Number of GPIO pins that supports interrupt
+ */
+struct xgene_gpio_sb {
+ struct bgpio_chip bgc;
+ u32 *irq;
+ u32 nirq;
+};
+
+static inline struct xgene_gpio_sb *to_xgene_gpio_sb(struct gpio_chip *gc)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return container_of(bgc, struct xgene_gpio_sb, bgc);
+}
+
+static void xgene_gpio_set_bit(struct bgpio_chip *bgc, void __iomem *reg, u32 gpio, int val)
+{
+ u32 data;
+
+ data = bgc->read_reg(reg);
+ if (val)
+ data |= GPIO_MASK(gpio);
+ else
+ data &= ~GPIO_MASK(gpio);
+ bgc->write_reg(reg, data);
+}
+
+static int apm_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
+{
+ struct xgene_gpio_sb *priv = to_xgene_gpio_sb(gc);
+
+ if (priv->irq[gpio])
+ return priv->irq[gpio];
+
+ return -ENXIO;
+}
+
+static int xgene_gpio_sb_probe(struct platform_device *pdev)
+{
+ struct xgene_gpio_sb *priv;
+ u32 ret, i;
+ u32 default_lines[] = {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D};
+ struct resource *res;
+ void __iomem *regs;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (!regs)
+ return PTR_ERR(regs);
+
+ ret = bgpio_init(&priv->bgc, &pdev->dev, 4,
+ regs + MPA_GPIO_IN_ADDR,
+ regs + MPA_GPIO_OUT_ADDR, NULL,
+ regs + MPA_GPIO_OE_ADDR, NULL, 0);
+ if (ret)
+ return ret;
+
+ priv->bgc.gc.to_irq = apm_gpio_sb_to_irq;
+ priv->bgc.gc.ngpio = XGENE_MAX_GPIO_DS;
+
+ priv->nirq = XGENE_MAX_GPIO_DS_IRQ;
+
+ priv->irq = devm_kzalloc(&pdev->dev, sizeof(u32) * XGENE_MAX_GPIO_DS,
+ GFP_KERNEL);
+ if (!priv->irq)
+ return -ENOMEM;
+ memset(priv->irq, 0, sizeof(u32) * XGENE_MAX_GPIO_DS);
+
+ for (i = 0; i < priv->nirq; i++) {
+ priv->irq[default_lines[i]] = platform_get_irq(pdev, i);
+ xgene_gpio_set_bit(&priv->bgc, regs + MPA_GPIO_SEL_LO,
+ default_lines[i] * 2, 1);
+ xgene_gpio_set_bit(&priv->bgc, regs + MPA_GPIO_INT_LVL, i, 1);
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = gpiochip_add(&priv->bgc.gc);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register X-Gene GPIO Standby driver\n");
+ else
+ dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
+
+ return ret;
+}
+
+static int xgene_gpio_sb_remove(struct platform_device *pdev)
+{
+ struct xgene_gpio_sb *priv = platform_get_drvdata(pdev);
+
+ return bgpio_remove(&priv->bgc);
+}
+
+static const struct of_device_id xgene_gpio_sb_of_match[] = {
+ {.compatible = "apm,xgene-gpio-sb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_gpio_sb_of_match);
+
+static struct platform_driver xgene_gpio_sb_driver = {
+ .driver = {
+ .name = "xgene-gpio-sb",
+ .of_match_table = xgene_gpio_sb_of_match,
+ },
+ .probe = xgene_gpio_sb_probe,
+ .remove = xgene_gpio_sb_remove,
+};
+module_platform_driver(xgene_gpio_sb_driver);
+
+MODULE_AUTHOR("AppliedMicro");
+MODULE_DESCRIPTION("APM X-Gene GPIO Standby driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index ba18b06c9a21..61243d177740 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -30,7 +30,7 @@
#define XGPIO_CHANNEL_OFFSET 0x8
/* Read/Write access to the GPIO registers */
-#ifdef CONFIG_ARCH_ZYNQ
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_X86)
# define xgpio_readreg(offset) readl(offset)
# define xgpio_writereg(offset, val) writel(val, offset)
#else
@@ -40,37 +40,66 @@
/**
* struct xgpio_instance - Stores information about GPIO device
- * struct of_mm_gpio_chip mmchip: OF GPIO chip for memory mapped banks
- * gpio_state: GPIO state shadow register
- * gpio_dir: GPIO direction shadow register
- * offset: GPIO channel offset
- * gpio_lock: Lock used for synchronization
+ * @mmchip: OF GPIO chip for memory mapped banks
+ * @gpio_state: GPIO state shadow register
+ * @gpio_dir: GPIO direction shadow register
+ * @gpio_lock: Lock used for synchronization
+ * @inited: True if the port has been inited
*/
struct xgpio_instance {
struct of_mm_gpio_chip mmchip;
- u32 gpio_state;
- u32 gpio_dir;
- u32 offset;
- spinlock_t gpio_lock;
+ unsigned int gpio_width[2];
+ u32 gpio_state[2];
+ u32 gpio_dir[2];
+ spinlock_t gpio_lock[2];
};
+static inline int xgpio_index(struct xgpio_instance *chip, int gpio)
+{
+ if (gpio >= chip->gpio_width[0])
+ return 1;
+
+ return 0;
+}
+
+static inline int xgpio_regoffset(struct xgpio_instance *chip, int gpio)
+{
+ if (xgpio_index(chip, gpio))
+ return XGPIO_CHANNEL_OFFSET;
+
+ return 0;
+}
+
+static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
+{
+ if (xgpio_index(chip, gpio))
+ return gpio - chip->gpio_width[0];
+
+ return gpio;
+}
+
/**
* xgpio_get - Read the specified signal of the GPIO device.
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
*
- * This function reads the specified signal of the GPIO device. It returns 0 if
- * the signal clear, 1 if signal is set or negative value on error.
+ * This function reads the specified signal of the GPIO device.
+ *
+ * Return:
+ * 0 if direction of GPIO signals is set as input otherwise it
+ * returns negative error value.
*/
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip =
container_of(mm_gc, struct xgpio_instance, mmchip);
+ u32 val;
- void __iomem *regs = mm_gc->regs + chip->offset;
+ val = xgpio_readreg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_regoffset(chip, gpio));
- return !!(xgpio_readreg(regs + XGPIO_DATA_OFFSET) & BIT(gpio));
+ return !!(val & BIT(xgpio_offset(chip, gpio)));
}
/**
@@ -88,20 +117,21 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip =
container_of(mm_gc, struct xgpio_instance, mmchip);
- void __iomem *regs = mm_gc->regs;
+ int index = xgpio_index(chip, gpio);
+ int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ spin_lock_irqsave(&chip->gpio_lock[index], flags);
/* Write to GPIO signal and set its direction to output */
if (val)
- chip->gpio_state |= BIT(gpio);
+ chip->gpio_state[index] |= BIT(offset);
else
- chip->gpio_state &= ~BIT(gpio);
+ chip->gpio_state[index] &= ~BIT(offset);
- xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
- chip->gpio_state);
+ xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
}
/**
@@ -109,9 +139,9 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
*
- * This function sets the direction of specified GPIO signal as input.
- * It returns 0 if direction of GPIO signals is set as input otherwise it
- * returns negative error value.
+ * Return:
+ * 0 - if direction of GPIO signals is set as input
+ * otherwise it returns negative error value.
*/
static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
@@ -119,15 +149,17 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip =
container_of(mm_gc, struct xgpio_instance, mmchip);
- void __iomem *regs = mm_gc->regs;
+ int index = xgpio_index(chip, gpio);
+ int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ spin_lock_irqsave(&chip->gpio_lock[index], flags);
/* Set the GPIO bit in shadow register and set direction as input */
- chip->gpio_dir |= BIT(gpio);
- xgpio_writereg(regs + chip->offset + XGPIO_TRI_OFFSET, chip->gpio_dir);
+ chip->gpio_dir[index] |= BIT(offset);
+ xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET +
+ xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
return 0;
}
@@ -138,8 +170,10 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
* @gpio: GPIO signal number.
* @val: Value to be written to specified signal.
*
- * This function sets the direction of specified GPIO signal as output. If all
- * GPIO signals of GPIO chip is configured as input then it returns
+ * This function sets the direction of specified GPIO signal as output.
+ *
+ * Return:
+ * If all GPIO signals of GPIO chip is configured as input then it returns
* error otherwise it returns 0.
*/
static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -148,80 +182,128 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip =
container_of(mm_gc, struct xgpio_instance, mmchip);
- void __iomem *regs = mm_gc->regs;
+ int index = xgpio_index(chip, gpio);
+ int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ spin_lock_irqsave(&chip->gpio_lock[index], flags);
/* Write state of GPIO signal */
if (val)
- chip->gpio_state |= BIT(gpio);
+ chip->gpio_state[index] |= BIT(offset);
else
- chip->gpio_state &= ~BIT(gpio);
- xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
- chip->gpio_state);
+ chip->gpio_state[index] &= ~BIT(offset);
+ xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
/* Clear the GPIO bit in shadow register and set direction as output */
- chip->gpio_dir &= ~BIT(gpio);
- xgpio_writereg(regs + chip->offset + XGPIO_TRI_OFFSET, chip->gpio_dir);
+ chip->gpio_dir[index] &= ~BIT(offset);
+ xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET +
+ xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
return 0;
}
/**
* xgpio_save_regs - Set initial values of GPIO pins
- * @mm_gc: pointer to memory mapped GPIO chip structure
+ * @mm_gc: Pointer to memory mapped GPIO chip structure
*/
static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
struct xgpio_instance *chip =
container_of(mm_gc, struct xgpio_instance, mmchip);
- xgpio_writereg(mm_gc->regs + chip->offset + XGPIO_DATA_OFFSET,
- chip->gpio_state);
- xgpio_writereg(mm_gc->regs + chip->offset + XGPIO_TRI_OFFSET,
- chip->gpio_dir);
+ xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state[0]);
+ xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir[0]);
+
+ if (!chip->gpio_width[1])
+ return;
+
+ xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_TRI_OFFSET,
+ chip->gpio_state[1]);
+ xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_TRI_OFFSET,
+ chip->gpio_dir[1]);
+}
+
+/**
+ * xgpio_remove - Remove method for the GPIO device.
+ * @pdev: pointer to the platform device
+ *
+ * This function remove gpiochips and frees all the allocated resources.
+ */
+static int xgpio_remove(struct platform_device *pdev)
+{
+ struct xgpio_instance *chip = platform_get_drvdata(pdev);
+
+ of_mm_gpiochip_remove(&chip->mmchip);
+
+ return 0;
}
/**
* xgpio_of_probe - Probe method for the GPIO device.
- * @np: pointer to device tree node
+ * @pdev: pointer to the platform device
*
- * This function probes the GPIO device in the device tree. It initializes the
- * driver data structure. It returns 0, if the driver is bound to the GPIO
- * device, or a negative value if there is an error.
+ * Return:
+ * It returns 0, if the driver is bound to the GPIO device, or
+ * a negative value if there is an error.
*/
-static int xgpio_of_probe(struct device_node *np)
+static int xgpio_probe(struct platform_device *pdev)
{
struct xgpio_instance *chip;
int status = 0;
- const u32 *tree_info;
- u32 ngpio;
+ struct device_node *np = pdev->dev.of_node;
+ u32 is_dual;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- /* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state);
+ platform_set_drvdata(pdev, chip);
- /* By default, all pins are inputs */
- chip->gpio_dir = 0xFFFFFFFF;
+ /* Update GPIO state shadow register with default value */
+ of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]);
/* Update GPIO direction shadow register with default value */
- of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir);
+ if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
+ chip->gpio_dir[0] = 0xFFFFFFFF;
/*
* Check device node and parent device node for device width
* and assume default width of 32
*/
- if (of_property_read_u32(np, "xlnx,gpio-width", &ngpio))
- ngpio = 32;
- chip->mmchip.gc.ngpio = (u16)ngpio;
+ if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0]))
+ chip->gpio_width[0] = 32;
+
+ spin_lock_init(&chip->gpio_lock[0]);
+
+ if (of_property_read_u32(np, "xlnx,is-dual", &is_dual))
+ is_dual = 0;
+
+ if (is_dual) {
+ /* Update GPIO state shadow register with default value */
+ of_property_read_u32(np, "xlnx,dout-default-2",
+ &chip->gpio_state[1]);
- spin_lock_init(&chip->gpio_lock);
+ /* Update GPIO direction shadow register with default value */
+ if (of_property_read_u32(np, "xlnx,tri-default-2",
+ &chip->gpio_dir[1]))
+ chip->gpio_dir[1] = 0xFFFFFFFF;
+ /*
+ * Check device node and parent device node for device width
+ * and assume default width of 32
+ */
+ if (of_property_read_u32(np, "xlnx,gpio2-width",
+ &chip->gpio_width[1]))
+ chip->gpio_width[1] = 32;
+
+ spin_lock_init(&chip->gpio_lock[1]);
+ }
+
+ chip->mmchip.gc.ngpio = chip->gpio_width[0] + chip->gpio_width[1];
+ chip->mmchip.gc.dev = &pdev->dev;
chip->mmchip.gc.direction_input = xgpio_dir_in;
chip->mmchip.gc.direction_output = xgpio_dir_out;
chip->mmchip.gc.get = xgpio_get;
@@ -232,63 +314,11 @@ static int xgpio_of_probe(struct device_node *np)
/* Call the OF gpio helper to setup and register the GPIO device */
status = of_mm_gpiochip_add(np, &chip->mmchip);
if (status) {
- kfree(chip);
pr_err("%s: error in probe function with status %d\n",
np->full_name, status);
return status;
}
- pr_info("XGpio: %s: registered, base is %d\n", np->full_name,
- chip->mmchip.gc.base);
-
- tree_info = of_get_property(np, "xlnx,is-dual", NULL);
- if (tree_info && be32_to_cpup(tree_info)) {
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- /* Add dual channel offset */
- chip->offset = XGPIO_CHANNEL_OFFSET;
-
- /* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default-2",
- &chip->gpio_state);
-
- /* By default, all pins are inputs */
- chip->gpio_dir = 0xFFFFFFFF;
-
- /* Update GPIO direction shadow register with default value */
- of_property_read_u32(np, "xlnx,tri-default-2", &chip->gpio_dir);
-
- /*
- * Check device node and parent device node for device width
- * and assume default width of 32
- */
- if (of_property_read_u32(np, "xlnx,gpio2-width", &ngpio))
- ngpio = 32;
- chip->mmchip.gc.ngpio = (u16)ngpio;
-
- spin_lock_init(&chip->gpio_lock);
-
- chip->mmchip.gc.direction_input = xgpio_dir_in;
- chip->mmchip.gc.direction_output = xgpio_dir_out;
- chip->mmchip.gc.get = xgpio_get;
- chip->mmchip.gc.set = xgpio_set;
-
- chip->mmchip.save_regs = xgpio_save_regs;
-
- /* Call the OF gpio helper to setup and register the GPIO dev */
- status = of_mm_gpiochip_add(np, &chip->mmchip);
- if (status) {
- kfree(chip);
- pr_err("%s: error in probe function with status %d\n",
- np->full_name, status);
- return status;
- }
- pr_info("XGpio: %s: dual channel registered, base is %d\n",
- np->full_name, chip->mmchip.gc.base);
- }
-
return 0;
}
@@ -297,19 +327,29 @@ static const struct of_device_id xgpio_of_match[] = {
{ /* end of list */ },
};
-static int __init xgpio_init(void)
-{
- struct device_node *np;
+MODULE_DEVICE_TABLE(of, xgpio_of_match);
- for_each_matching_node(np, xgpio_of_match)
- xgpio_of_probe(np);
+static struct platform_driver xgpio_plat_driver = {
+ .probe = xgpio_probe,
+ .remove = xgpio_remove,
+ .driver = {
+ .name = "gpio-xilinx",
+ .of_match_table = xgpio_of_match,
+ },
+};
- return 0;
+static int __init xgpio_init(void)
+{
+ return platform_driver_register(&xgpio_plat_driver);
}
-/* Make sure we get initialized before anyone else tries to use us */
subsys_initcall(xgpio_init);
-/* No exit call at the moment as we cannot unregister of GPIO chips */
+
+static void __exit xgpio_exit(void)
+{
+ platform_driver_unregister(&xgpio_plat_driver);
+}
+module_exit(xgpio_exit);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx GPIO driver");
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index f769cd53f4e4..6f02d7c4cc57 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -181,6 +181,8 @@ static int zevio_gpio_probe(struct platform_device *pdev)
if (!controller)
return -ENOMEM;
+ platform_set_drvdata(pdev, controller);
+
/* Copy our reference */
controller->chip.gc = zevio_gpio_chip;
controller->chip.gc.dev = &pdev->dev;
@@ -202,6 +204,15 @@ static int zevio_gpio_probe(struct platform_device *pdev)
return 0;
}
+static int zevio_gpio_remove(struct platform_device *pdev)
+{
+ struct zevio_gpio *controller = platform_get_drvdata(pdev);
+
+ of_mm_gpiochip_remove(&controller->chip);
+
+ return 0;
+}
+
static const struct of_device_id zevio_gpio_of_match[] = {
{ .compatible = "lsi,zevio-gpio", },
{ },
@@ -215,6 +226,7 @@ static struct platform_driver zevio_gpio_driver = {
.of_match_table = zevio_gpio_of_match,
},
.probe = zevio_gpio_probe,
+ .remove = zevio_gpio_remove,
};
module_platform_driver(zevio_gpio_driver);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 604dbe60bdee..8cad8e400b44 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
return false;
ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
- if (ret < 0)
- return false;
+ if (ret < 0) {
+ /* We've found the gpio chip, but the translation failed.
+ * Return true to stop looking and return the translation
+ * error via out_gpio
+ */
+ gg_data->out_gpio = ERR_PTR(ret);
+ return true;
+ }
gg_data->out_gpio = gpiochip_get_desc(gc, ret);
return true;
@@ -204,6 +210,23 @@ err0:
}
EXPORT_SYMBOL(of_mm_gpiochip_add);
+/**
+ * of_mm_gpiochip_remove - Remove memory mapped GPIO chip (bank)
+ * @mm_gc: pointer to the of_mm_gpio_chip allocated structure
+ */
+void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc)
+{
+ struct gpio_chip *gc = &mm_gc->gc;
+
+ if (!mm_gc)
+ return;
+
+ gpiochip_remove(gc);
+ iounmap(mm_gc->regs);
+ kfree(gc->label);
+}
+EXPORT_SYMBOL(of_mm_gpiochip_remove);
+
#ifdef CONFIG_PINCTRL
static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
{
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 2ac1800b58bb..7722ed53bd65 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev,
return status;
}
-static const DEVICE_ATTR(value, 0644,
+static DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev,
return status ? : size;
}
-static const DEVICE_ATTR(active_low, 0644,
+static DEVICE_ATTR(active_low, 0644,
gpio_active_low_show, gpio_active_low_store);
-static const struct attribute *gpio_attrs[] = {
+static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+ bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags);
+
+ if (attr == &dev_attr_direction.attr) {
+ if (!show_direction)
+ mode = 0;
+ } else if (attr == &dev_attr_edge.attr) {
+ if (gpiod_to_irq(desc) < 0)
+ mode = 0;
+ if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
+ mode = 0;
+ }
+
+ return mode;
+}
+
+static struct attribute *gpio_attrs[] = {
+ &dev_attr_direction.attr,
+ &dev_attr_edge.attr,
&dev_attr_value.attr,
&dev_attr_active_low.attr,
NULL,
};
-static const struct attribute_group gpio_attr_group = {
- .attrs = (struct attribute **) gpio_attrs,
+static const struct attribute_group gpio_group = {
+ .attrs = gpio_attrs,
+ .is_visible = gpio_is_visible,
+};
+
+static const struct attribute_group *gpio_groups[] = {
+ &gpio_group,
+ NULL
};
/*
@@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
}
static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
-static const struct attribute *gpiochip_attrs[] = {
+static struct attribute *gpiochip_attrs[] = {
&dev_attr_base.attr,
&dev_attr_label.attr,
&dev_attr_ngpio.attr,
NULL,
};
-
-static const struct attribute_group gpiochip_attr_group = {
- .attrs = (struct attribute **) gpiochip_attrs,
-};
+ATTRIBUTE_GROUPS(gpiochip);
/*
* /sys/class/gpio/export ... write-only
@@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
goto fail_unlock;
}
- if (!desc->chip->direction_input || !desc->chip->direction_output)
- direction_may_change = false;
+ if (desc->chip->direction_input && desc->chip->direction_output &&
+ direction_may_change) {
+ set_bit(FLAG_SYSFS_DIR, &desc->flags);
+ }
+
spin_unlock_irqrestore(&gpio_lock, flags);
offset = gpio_chip_hwgpio(desc);
if (desc->chip->names && desc->chip->names[offset])
ioname = desc->chip->names[offset];
- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%u",
- desc_to_gpio(desc));
+ dev = device_create_with_groups(&gpio_class, desc->chip->dev,
+ MKDEV(0, 0), desc, gpio_groups,
+ ioname ? ioname : "gpio%u",
+ desc_to_gpio(desc));
if (IS_ERR(dev)) {
status = PTR_ERR(dev);
goto fail_unlock;
}
- status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
- if (status)
- goto fail_unregister_device;
-
- if (direction_may_change) {
- status = device_create_file(dev, &dev_attr_direction);
- if (status)
- goto fail_unregister_device;
- }
-
- if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
- !test_bit(FLAG_IS_OUT, &desc->flags))) {
- status = device_create_file(dev, &dev_attr_edge);
- if (status)
- goto fail_unregister_device;
- }
-
set_bit(FLAG_EXPORT, &desc->flags);
mutex_unlock(&sysfs_lock);
return 0;
-fail_unregister_device:
- device_unregister(dev);
fail_unlock:
mutex_unlock(&sysfs_lock);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -637,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
if (tdev != NULL) {
status = sysfs_create_link(&dev->kobj, &tdev->kobj,
name);
+ put_device(tdev);
} else {
status = -ENODEV;
}
@@ -684,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
}
status = sysfs_set_active_low(desc, dev, value);
-
+ put_device(dev);
unlock:
mutex_unlock(&sysfs_lock);
@@ -718,6 +730,7 @@ void gpiod_unexport(struct gpio_desc *desc)
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev) {
gpio_setup_irq(desc, dev, 0);
+ clear_bit(FLAG_SYSFS_DIR, &desc->flags);
clear_bit(FLAG_EXPORT, &desc->flags);
} else
status = -ENODEV;
@@ -750,13 +763,13 @@ int gpiochip_export(struct gpio_chip *chip)
/* use chip->base for the ID; it's already known to be unique */
mutex_lock(&sysfs_lock);
- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
- "gpiochip%d", chip->base);
- if (!IS_ERR(dev)) {
- status = sysfs_create_group(&dev->kobj,
- &gpiochip_attr_group);
- } else
+ dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
+ chip, gpiochip_groups,
+ "gpiochip%d", chip->base);
+ if (IS_ERR(dev))
status = PTR_ERR(dev);
+ else
+ status = 0;
chip->exported = (status == 0);
mutex_unlock(&sysfs_lock);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 487afe6f22fc..1ca9295b2c10 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip)
base = gpiochip_find_base(chip->ngpio);
if (base < 0) {
status = base;
- goto unlock;
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ goto err_free_descs;
}
chip->base = base;
}
status = gpiochip_add_to_list(chip);
+ if (status) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ goto err_free_descs;
+ }
- if (status == 0) {
- for (id = 0; id < chip->ngpio; id++) {
- struct gpio_desc *desc = &descs[id];
- desc->chip = chip;
-
- /* REVISIT: most hardware initializes GPIOs as
- * inputs (often with pullups enabled) so power
- * usage is minimized. Linux code should set the
- * gpio direction first thing; but until it does,
- * and in case chip->get_direction is not set,
- * we may expose the wrong direction in sysfs.
- */
- desc->flags = !chip->direction_input
- ? (1 << FLAG_IS_OUT)
- : 0;
- }
+ for (id = 0; id < chip->ngpio; id++) {
+ struct gpio_desc *desc = &descs[id];
+
+ desc->chip = chip;
+
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
chip->desc = descs;
@@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip)
of_gpiochip_add(chip);
acpi_gpiochip_add(chip);
- if (status)
- goto fail;
-
status = gpiochip_export(chip);
if (status)
- goto fail;
+ goto err_remove_chip;
pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
chip->base, chip->base + chip->ngpio - 1,
@@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip)
return 0;
-unlock:
+err_remove_chip:
+ acpi_gpiochip_remove(chip);
+ of_gpiochip_remove(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
-fail:
- kfree(descs);
chip->desc = NULL;
+err_free_descs:
+ kfree(descs);
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
@@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip)
unsigned long flags;
unsigned id;
- acpi_gpiochip_remove(chip);
-
- spin_lock_irqsave(&gpio_lock, flags);
+ gpiochip_unexport(chip);
gpiochip_irqchip_remove(chip);
+
+ acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
for (id = 0; id < chip->ngpio; id++) {
if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags))
dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
@@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip)
list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
- gpiochip_unexport(chip);
kfree(chip->desc);
chip->desc = NULL;
@@ -1657,7 +1659,7 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
{
- static const char *suffixes[] = { "gpios", "gpio" };
+ static const char * const suffixes[] = { "gpios", "gpio" };
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
struct gpio_desc *desc;
@@ -1665,9 +1667,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
for (i = 0; i < ARRAY_SIZE(suffixes); i++) {
if (con_id)
- snprintf(prop_name, 32, "%s-%s", con_id, suffixes[i]);
+ snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
+ suffixes[i]);
else
- snprintf(prop_name, 32, "%s", suffixes[i]);
+ snprintf(prop_name, sizeof(prop_name), "%s",
+ suffixes[i]);
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index e3a52113a541..550a5eafbd38 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -77,6 +77,7 @@ struct gpio_desc {
#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */
#define ID_SHIFT 16 /* add new flags before this one */
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 70da9eb52a42..e9ed439a5b65 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1,3 +1,6 @@
-obj-y += drm/ vga/
+# drm/tegra depends on host1x, so if both drivers are built-in care must be
+# taken to initialize them in the correct order. Link order is the only way
+# to ensure this currently.
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
+obj-y += drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c3413b6adb17..151a050129e7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -62,12 +62,13 @@ config DRM_TTM
config DRM_GEM_CMA_HELPER
bool
- depends on DRM
+ depends on DRM && HAVE_DMA_ATTRS
help
Choose this if you need the GEM CMA helper functions
config DRM_KMS_CMA_HELPER
bool
+ depends on DRM && HAVE_DMA_ATTRS
select DRM_GEM_CMA_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
@@ -110,7 +111,6 @@ config DRM_RADEON
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
- select MMU_NOTIFIER
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -183,6 +183,8 @@ source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/armada/Kconfig"
+source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
+
source "drivers/gpu/drm/rcar-du/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e620807418ea..2c239b99de64 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
- drm_modeset_lock.o drm_atomic.o
+ drm_modeset_lock.o drm_atomic.o drm_bridge.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -55,6 +55,7 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_ARMADA) += armada/
+obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index be6246de5091..0f4960148126 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -7,8 +7,10 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
kfd_process.o kfd_queue.o kfd_mqd_manager.o \
- kfd_kernel_queue.o kfd_packet_manager.o \
+ kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \
+ kfd_kernel_queue.o kfd_kernel_queue_cik.o \
+ kfd_kernel_queue_vi.o kfd_packet_manager.o \
kfd_process_queue_manager.o kfd_device_queue_manager.o \
- kfd_interrupt.o
+ kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \
obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 607fc5ceadbe..01ff332fabd4 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -168,6 +168,8 @@
#define IB_ATC_EN (1U << 23)
#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
+#define AQL_ENABLE 1
+
#define CP_HQD_DEQUEUE_REQUEST 0xC974
#define DEQUEUE_REQUEST_DRAIN 1
#define DEQUEUE_REQUEST_RESET 2
@@ -188,6 +190,17 @@
#define MQD_VMID_MASK (0xf << 0)
#define MQD_CONTROL_PRIV_STATE_EN (1U << 8)
+#define SDMA_RB_VMID(x) (x << 24)
+#define SDMA_RB_ENABLE (1 << 0)
+#define SDMA_RB_SIZE(x) ((x) << 1) /* log2 */
+#define SDMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+#define SDMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define SDMA_OFFSET(x) (x << 0)
+#define SDMA_DB_ENABLE (1 << 28)
+#define SDMA_ATC (1 << 0)
+#define SDMA_VA_PTR32 (1 << 4)
+#define SDMA_VA_SHARED_BASE(x) (x << 8)
+
#define GRBM_GFX_INDEX 0x30800
#define INSTANCE_INDEX(x) ((x) << 0)
#define SH_INDEX(x) ((x) << 8)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index fcfdf23e1913..5c50aa8a8908 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -178,6 +178,22 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EFAULT;
}
+ if (args->eop_buffer_address &&
+ !access_ok(VERIFY_WRITE,
+ (const void __user *) args->eop_buffer_address,
+ sizeof(uint32_t))) {
+ pr_debug("kfd: can't access eop buffer");
+ return -EFAULT;
+ }
+
+ if (args->ctx_save_restore_address &&
+ !access_ok(VERIFY_WRITE,
+ (const void __user *) args->ctx_save_restore_address,
+ sizeof(uint32_t))) {
+ pr_debug("kfd: can't access ctx save restore buffer");
+ return -EFAULT;
+ }
+
q_properties->is_interop = false;
q_properties->queue_percent = args->queue_percentage;
q_properties->priority = args->queue_priority;
@@ -185,9 +201,16 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->queue_size = args->ring_size;
q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
+ q_properties->eop_ring_buffer_address = args->eop_buffer_address;
+ q_properties->eop_ring_buffer_size = args->eop_buffer_size;
+ q_properties->ctx_save_restore_area_address =
+ args->ctx_save_restore_address;
+ q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
+ else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
+ q_properties->type = KFD_QUEUE_TYPE_SDMA;
else
return -ENOTSUPP;
@@ -214,6 +237,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
pr_debug("Queue Format (%d)\n", q_properties->format);
+ pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address);
+
+ pr_debug("Queue CTX save arex (0x%llX)\n",
+ q_properties->ctx_save_restore_area_address);
+
return 0;
}
@@ -235,9 +263,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
if (err)
return err;
+ pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id);
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (dev == NULL) {
+ pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id);
return -EINVAL;
+ }
mutex_lock(&p->mutex);
@@ -251,8 +282,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0,
- KFD_QUEUE_TYPE_COMPUTE, &queue_id);
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties,
+ 0, q_properties.type, &queue_id);
if (err != 0)
goto err_create_queue;
@@ -385,7 +416,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
- if (!dev->dqm->set_cache_memory_policy(dev->dqm,
+ if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
&pdd->qpd,
default_policy,
alternate_policy,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 43884ebd4303..5bc32c26b989 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,15 +26,25 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768
static const struct kfd_device_info kaveri_device_info = {
+ .asic_family = CHIP_KAVERI,
.max_pasid_bits = 16,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.mqd_size_aligned = MQD_SIZE_ALIGNED
};
+static const struct kfd_device_info carrizo_device_info = {
+ .asic_family = CHIP_CARRIZO,
+ .max_pasid_bits = 16,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED
+};
+
struct kfd_deviceid {
unsigned short did;
const struct kfd_device_info *device_info;
@@ -63,9 +73,13 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x1318, &kaveri_device_info }, /* Kaveri */
{ 0x131B, &kaveri_device_info }, /* Kaveri */
{ 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info }, /* Kaveri */
+ { 0x131D, &kaveri_device_info } /* Kaveri */
};
+static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+ unsigned int chunk_size);
+static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+
static const struct kfd_device_info *lookup_device_info(unsigned short did)
{
size_t i;
@@ -169,20 +183,42 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
/* calculate max size of mqds needed for queues */
- size = max_num_of_processes *
- max_num_of_queues_per_process *
- kfd->device_info->mqd_size_aligned;
+ size = max_num_of_queues_per_device *
+ kfd->device_info->mqd_size_aligned;
+
+ /*
+ * calculate max size of runlist packet.
+ * There can be only 2 packets at once
+ */
+ size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) +
+ max_num_of_queues_per_device *
+ sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2;
+
+ /* Add size of HIQ & DIQ */
+ size += KFD_KERNEL_QUEUE_SIZE * 2;
- /* add another 512KB for all other allocations on gart */
+ /* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
- if (kfd2kgd->init_sa_manager(kfd->kgd, size)) {
+ if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem,
+ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) {
dev_err(kfd_device,
- "Error initializing sa manager for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ "Could not allocate %d bytes for device (%x:%x)\n",
+ size, kfd->pdev->vendor, kfd->pdev->device);
goto out;
}
+ dev_info(kfd_device,
+ "Allocated %d bytes on gart for device(%x:%x)\n",
+ size, kfd->pdev->vendor, kfd->pdev->device);
+
+ /* Initialize GTT sa with 512 byte chunk size */
+ if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
+ dev_err(kfd_device,
+ "Error initializing gtt sub-allocator\n");
+ goto kfd_gtt_sa_init_error;
+ }
+
kfd_doorbell_init(kfd);
if (kfd_topology_add_device(kfd) != 0) {
@@ -192,13 +228,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_topology_add_device_error;
}
- if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing interrupts for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
- goto kfd_interrupt_error;
- }
-
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
"Error initializing iommuv2 for device (%x:%x)\n",
@@ -216,7 +245,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_queue_manager_error;
}
- if (kfd->dqm->start(kfd->dqm) != 0) {
+ if (kfd->dqm->ops.start(kfd->dqm) != 0) {
dev_err(kfd_device,
"Error starting queuen manager for device (%x:%x)\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -237,11 +266,11 @@ dqm_start_error:
device_queue_manager_error:
amd_iommu_free_device(kfd->pdev);
device_iommu_pasid_error:
- kfd_interrupt_exit(kfd);
-kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
- kfd2kgd->fini_sa_manager(kfd->kgd);
+ kfd_gtt_sa_fini(kfd);
+kfd_gtt_sa_init_error:
+ kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
"device (%x:%x) NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -254,8 +283,9 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
if (kfd->init_complete) {
device_queue_manager_uninit(kfd->dqm);
amd_iommu_free_device(kfd->pdev);
- kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
+ kfd_gtt_sa_fini(kfd);
+ kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
kfree(kfd);
@@ -266,7 +296,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
BUG_ON(kfd == NULL);
if (kfd->init_complete) {
- kfd->dqm->stop(kfd->dqm);
+ kfd->dqm->ops.stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
amd_iommu_free_device(kfd->pdev);
}
@@ -287,7 +317,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
return -ENXIO;
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
- kfd->dqm->start(kfd->dqm);
+ kfd->dqm->ops.start(kfd->dqm);
}
return 0;
@@ -296,13 +326,190 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- if (kfd->init_complete) {
- spin_lock(&kfd->interrupt_lock);
+ /* Process interrupts / schedule work as necessary */
+}
+
+static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+ unsigned int chunk_size)
+{
+ unsigned int num_of_bits;
+
+ BUG_ON(!kfd);
+ BUG_ON(!kfd->gtt_mem);
+ BUG_ON(buf_size < chunk_size);
+ BUG_ON(buf_size == 0);
+ BUG_ON(chunk_size == 0);
+
+ kfd->gtt_sa_chunk_size = chunk_size;
+ kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
+
+ num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE;
+ BUG_ON(num_of_bits == 0);
+
+ kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL);
+
+ if (!kfd->gtt_sa_bitmap)
+ return -ENOMEM;
+
+ pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
+ kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
+
+ mutex_init(&kfd->gtt_sa_lock);
- if (kfd->interrupts_active
- && enqueue_ih_ring_entry(kfd, ih_ring_entry))
- schedule_work(&kfd->interrupt_work);
+ return 0;
+
+}
+
+static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
+{
+ mutex_destroy(&kfd->gtt_sa_lock);
+ kfree(kfd->gtt_sa_bitmap);
+}
+
+static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
+ unsigned int bit_num,
+ unsigned int chunk_size)
+{
+ return start_addr + bit_num * chunk_size;
+}
- spin_unlock(&kfd->interrupt_lock);
+static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
+ unsigned int bit_num,
+ unsigned int chunk_size)
+{
+ return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
+}
+
+int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+ struct kfd_mem_obj **mem_obj)
+{
+ unsigned int found, start_search, cur_size;
+
+ BUG_ON(!kfd);
+
+ if (size == 0)
+ return -EINVAL;
+
+ if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
+ return -ENOMEM;
+
+ *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if ((*mem_obj) == NULL)
+ return -ENOMEM;
+
+ pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size);
+
+ start_search = 0;
+
+ mutex_lock(&kfd->gtt_sa_lock);
+
+kfd_gtt_restart_search:
+ /* Find the first chunk that is free */
+ found = find_next_zero_bit(kfd->gtt_sa_bitmap,
+ kfd->gtt_sa_num_of_chunks,
+ start_search);
+
+ pr_debug("kfd: found = %d\n", found);
+
+ /* If there wasn't any free chunk, bail out */
+ if (found == kfd->gtt_sa_num_of_chunks)
+ goto kfd_gtt_no_free_chunk;
+
+ /* Update fields of mem_obj */
+ (*mem_obj)->range_start = found;
+ (*mem_obj)->range_end = found;
+ (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
+ kfd->gtt_start_gpu_addr,
+ found,
+ kfd->gtt_sa_chunk_size);
+ (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
+ kfd->gtt_start_cpu_ptr,
+ found,
+ kfd->gtt_sa_chunk_size);
+
+ pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n",
+ (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
+
+ /* If we need only one chunk, mark it as allocated and get out */
+ if (size <= kfd->gtt_sa_chunk_size) {
+ pr_debug("kfd: single bit\n");
+ set_bit(found, kfd->gtt_sa_bitmap);
+ goto kfd_gtt_out;
}
+
+ /* Otherwise, try to see if we have enough contiguous chunks */
+ cur_size = size - kfd->gtt_sa_chunk_size;
+ do {
+ (*mem_obj)->range_end =
+ find_next_zero_bit(kfd->gtt_sa_bitmap,
+ kfd->gtt_sa_num_of_chunks, ++found);
+ /*
+ * If next free chunk is not contiguous than we need to
+ * restart our search from the last free chunk we found (which
+ * wasn't contiguous to the previous ones
+ */
+ if ((*mem_obj)->range_end != found) {
+ start_search = found;
+ goto kfd_gtt_restart_search;
+ }
+
+ /*
+ * If we reached end of buffer, bail out with error
+ */
+ if (found == kfd->gtt_sa_num_of_chunks)
+ goto kfd_gtt_no_free_chunk;
+
+ /* Check if we don't need another chunk */
+ if (cur_size <= kfd->gtt_sa_chunk_size)
+ cur_size = 0;
+ else
+ cur_size -= kfd->gtt_sa_chunk_size;
+
+ } while (cur_size > 0);
+
+ pr_debug("kfd: range_start = %d, range_end = %d\n",
+ (*mem_obj)->range_start, (*mem_obj)->range_end);
+
+ /* Mark the chunks as allocated */
+ for (found = (*mem_obj)->range_start;
+ found <= (*mem_obj)->range_end;
+ found++)
+ set_bit(found, kfd->gtt_sa_bitmap);
+
+kfd_gtt_out:
+ mutex_unlock(&kfd->gtt_sa_lock);
+ return 0;
+
+kfd_gtt_no_free_chunk:
+ pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj);
+ mutex_unlock(&kfd->gtt_sa_lock);
+ kfree(mem_obj);
+ return -ENOMEM;
+}
+
+int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
+{
+ unsigned int bit;
+
+ BUG_ON(!kfd);
+
+ /* Act like kfree when trying to free a NULL object */
+ if (!mem_obj)
+ return 0;
+
+ pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n",
+ mem_obj, mem_obj->range_start, mem_obj->range_end);
+
+ mutex_lock(&kfd->gtt_sa_lock);
+
+ /* Mark the chunks as free */
+ for (bit = mem_obj->range_start;
+ bit <= mem_obj->range_end;
+ bit++)
+ clear_bit(bit, kfd->gtt_sa_bitmap);
+
+ mutex_unlock(&kfd->gtt_sa_lock);
+
+ kfree(mem_obj);
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 9c8961d22360..b3589d0e39b9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -26,34 +26,40 @@
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/bitops.h>
+#include <linux/sched.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"
-#include "../../radeon/cik_reg.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
-static bool is_mem_initialized;
-
-static int init_memory(struct device_queue_manager *dqm);
static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
unsigned int pasid, unsigned int vmid);
static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
+
static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
+
+static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int sdma_queue_id);
-static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
+static inline
+enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{
- BUG_ON(!dqm || !dqm->dev);
- return dqm->dev->shared_resources.compute_pipe_count;
+ if (type == KFD_QUEUE_TYPE_SDMA)
+ return KFD_MQD_TYPE_SDMA;
+ return KFD_MQD_TYPE_CP;
}
static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
@@ -67,61 +73,7 @@ static inline unsigned int get_pipes_num_cpsch(void)
return PIPE_PER_ME_CP_SCHEDULING;
}
-static inline unsigned int
-get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
-{
- uint32_t nybble;
-
- nybble = (pdd->lds_base >> 60) & 0x0E;
-
- return nybble;
-
-}
-
-static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
-{
- unsigned int shared_base;
-
- shared_base = (pdd->lds_base >> 16) & 0xFF;
-
- return shared_base;
-}
-
-static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
-static void init_process_memory(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- BUG_ON(!dqm || !qpd);
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
- DEFAULT_MTYPE(MTYPE_NONCACHED) |
- APE1_MTYPE(MTYPE_NONCACHED);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
- if (qpd->pqm->process->is_32bit_user_mode) {
- temp = get_sh_mem_bases_32(pdd);
- qpd->sh_mem_bases = SHARED_BASE(temp);
- qpd->sh_mem_config |= PTR32;
- } else {
- temp = get_sh_mem_bases_nybble_64(pdd);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
- }
-
- pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
- qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
-}
-
-static void program_sh_mem_settings(struct device_queue_manager *dqm,
+void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
@@ -183,6 +135,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) {
@@ -193,7 +152,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
*allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
- retval = create_compute_queue_nocpsch(dqm, q, qpd);
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
+ retval = create_compute_queue_nocpsch(dqm, q, qpd);
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ retval = create_sdma_queue_nocpsch(dqm, q, qpd);
if (retval != 0) {
if (list_empty(&qpd->queues_list)) {
@@ -205,7 +167,19 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
}
list_add(&q->list, &qpd->queues_list);
- dqm->queue_count++;
+ if (q->properties.is_active)
+ dqm->queue_count++;
+
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count++;
+
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return 0;
@@ -214,12 +188,12 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
{
bool set;
- int pipe, bit;
+ int pipe, bit, i;
set = false;
- for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
- pipe = (pipe + 1) % get_pipes_num(dqm)) {
+ for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm);
+ pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) {
if (dqm->allocated_queues[pipe] != 0) {
bit = find_first_bit(
(unsigned long *)&dqm->allocated_queues[pipe],
@@ -260,7 +234,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !q || !qpd);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL)
return -ENOMEM;
@@ -280,7 +254,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
q->queue);
retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
- q->queue, q->properties.write_ptr);
+ q->queue, (uint32_t __user *) q->properties.write_ptr);
if (retval != 0) {
deallocate_hqd(dqm, q);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -304,28 +278,53 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In Func %s\n", __func__);
mutex_lock(&dqm->lock);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
- if (mqd == NULL) {
- retval = -ENOMEM;
+
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (mqd == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ deallocate_hqd(dqm, q);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ if (mqd == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ dqm->sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ } else {
+ pr_debug("q->properties.type is invalid (%d)\n",
+ q->properties.type);
+ retval = -EINVAL;
goto out;
}
retval = mqd->destroy_mqd(mqd, q->mqd,
- KFD_PREEMPT_TYPE_WAVEFRONT,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
q->pipe, q->queue);
if (retval != 0)
goto out;
- deallocate_hqd(dqm, q);
-
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
- dqm->queue_count--;
+ if (q->properties.is_active)
+ dqm->queue_count--;
+
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -340,7 +339,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
BUG_ON(!dqm || !q || !q->mqd);
mutex_lock(&dqm->lock);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
return -ENOMEM;
@@ -391,6 +391,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct device_process_node *n;
+ int retval;
BUG_ON(!dqm || !qpd);
@@ -405,12 +406,13 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
list_add(&n->list, &dqm->queues);
- init_process_memory(dqm, qpd);
+ retval = dqm->ops_asic_specific.register_process(dqm, qpd);
+
dqm->processes_count++;
mutex_unlock(&dqm->lock);
- return 0;
+ return retval;
}
static int unregister_process_nocpsch(struct device_queue_manager *dqm,
@@ -455,48 +457,7 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
vmid);
}
-static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
-{
- /* In 64-bit mode, we can only control the top 3 bits of the LDS,
- * scratch and GPUVM apertures.
- * The hardware fills in the remaining 59 bits according to the
- * following pattern:
- * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
- * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
- * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
- *
- * (where X/Y is the configurable nybble with the low-bit 0)
- *
- * LDS and scratch will have the same top nybble programmed in the
- * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
- * GPUVM can have a different top nybble programmed in the
- * top 3 bits of SH_MEM_BASES.SHARED_BASE.
- * We don't bother to support different top nybbles
- * for LDS/Scratch and GPUVM.
- */
-
- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
- top_address_nybble == 0);
-
- return PRIVATE_BASE(top_address_nybble << 12) |
- SHARED_BASE(top_address_nybble << 12);
-}
-
-static int init_memory(struct device_queue_manager *dqm)
-{
- int i, retval;
-
- for (i = 8; i < 16; i++)
- set_pasid_vmid_mapping(dqm, 0, i);
-
- retval = kfd2kgd->init_memory(dqm->dev->kgd);
- if (retval == 0)
- is_mem_initialized = true;
- return retval;
-}
-
-
-static int init_pipelines(struct device_queue_manager *dqm,
+int init_pipelines(struct device_queue_manager *dqm,
unsigned int pipes_num, unsigned int first_pipe)
{
void *hpdptr;
@@ -515,11 +476,8 @@ static int init_pipelines(struct device_queue_manager *dqm,
* because it contains no data when there are no active queues.
*/
- err = kfd2kgd->allocate_mem(dqm->dev->kgd,
- CIK_HPD_EOP_BYTES * pipes_num,
- PAGE_SIZE,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &dqm->pipeline_mem);
+ err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
+ &dqm->pipeline_mem);
if (err) {
pr_err("kfd: error allocate vidmem num pipes: %d\n",
@@ -532,26 +490,28 @@ static int init_pipelines(struct device_queue_manager *dqm,
memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) {
- kfd2kgd->free_mem(dqm->dev->kgd,
- (struct kgd_mem *) dqm->pipeline_mem);
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
return -ENOMEM;
}
for (i = 0; i < pipes_num; i++) {
inx = i + first_pipe;
+ /*
+ * HPD buffer on GTT is allocated by amdkfd, no need to waste
+ * space in GTT for pipelines we don't initialize
+ */
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */
- kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+ kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
}
return 0;
}
-
static int init_scheduler(struct device_queue_manager *dqm)
{
int retval;
@@ -560,12 +520,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__);
- retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
- if (retval != 0)
- return retval;
-
- retval = init_memory(dqm);
-
+ retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
return retval;
}
@@ -581,6 +536,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->sdma_queue_count = 0;
dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
sizeof(unsigned int), GFP_KERNEL);
if (!dqm->allocated_queues) {
@@ -592,6 +548,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
+ dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
init_scheduler(dqm);
return 0;
@@ -609,8 +566,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqds[i]);
mutex_destroy(&dqm->lock);
- kfd2kgd->free_mem(dqm->dev->kgd,
- (struct kgd_mem *) dqm->pipeline_mem);
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
static int start_nocpsch(struct device_queue_manager *dqm)
@@ -623,6 +579,77 @@ static int stop_nocpsch(struct device_queue_manager *dqm)
return 0;
}
+static int allocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int *sdma_queue_id)
+{
+ int bit;
+
+ if (dqm->sdma_bitmap == 0)
+ return -ENOMEM;
+
+ bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
+ CIK_SDMA_QUEUES);
+
+ clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
+ *sdma_queue_id = bit;
+
+ return 0;
+}
+
+static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int sdma_queue_id)
+{
+ if (sdma_queue_id >= CIK_SDMA_QUEUES)
+ return;
+ set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
+}
+
+static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ uint32_t value = SDMA_ATC;
+
+ if (q->process->is_32bit_user_mode)
+ value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+ else
+ value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
+ qpd_to_pdd(qpd)));
+ q->properties.sdma_vm_addr = value;
+}
+
+static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ struct mqd_manager *mqd;
+ int retval;
+
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ if (!mqd)
+ return -ENOMEM;
+
+ retval = allocate_sdma_queue(dqm, &q->sdma_id);
+ if (retval != 0)
+ return retval;
+
+ q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
+
+ pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
+ pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
+ pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
+
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0) {
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ return retval;
+ }
+
+ init_sdma_vm(dqm, q, qpd);
+ return 0;
+}
+
/*
* Device Queue Manager implementation for cp scheduler
*/
@@ -664,8 +691,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0;
+ dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
- retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
+ retval = dqm->ops_asic_specific.initialize(dqm);
if (retval != 0)
goto fail_init_pipelines;
@@ -696,18 +724,14 @@ static int start_cpsch(struct device_queue_manager *dqm)
pr_debug("kfd: allocating fence memory\n");
/* allocate fence memory on the gart */
- retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
- sizeof(*dqm->fence_addr),
- 32,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &dqm->fence_mem);
+ retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
+ &dqm->fence_mem);
if (retval != 0)
goto fail_allocate_vidmem;
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
-
list_for_each_entry(node, &dqm->queues, list)
if (node->qpd->pqm->process && dqm->dev)
kfd_bind_process_to_device(dqm->dev,
@@ -736,8 +760,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
pdd = qpd_to_pdd(node->qpd);
pdd->bound = false;
}
- kfd2kgd->free_mem(dqm->dev->kgd,
- (struct kgd_mem *) dqm->fence_mem);
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
return 0;
@@ -752,6 +775,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
@@ -775,9 +813,24 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--;
qpd->is_debug = false;
execute_queues_cpsch(dqm, false);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type.
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
mutex_unlock(&dqm->lock);
}
+static void select_sdma_engine_id(struct queue *q)
+{
+ static int sdma_id;
+
+ q->sdma_id = sdma_id;
+ sdma_id = (sdma_id + 1) % 2;
+}
+
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd, int *allocate_vmid)
{
@@ -793,7 +846,19 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ retval = -EPERM;
+ goto out;
+ }
+
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ select_sdma_engine_id(q);
+
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
return -ENOMEM;
@@ -810,6 +875,17 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false);
}
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count++;
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -827,12 +903,20 @@ static int fence_wait_timeout(unsigned int *fence_addr,
pr_err("kfd: qcm fence wait loop timeout expired\n");
return -ETIME;
}
- cpu_relax();
+ schedule();
}
return 0;
}
+static int destroy_sdma_queues(struct device_queue_manager *dqm,
+ unsigned int sdma_engine)
+{
+ return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
+ KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false,
+ sdma_engine);
+}
+
static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
{
int retval;
@@ -845,6 +929,15 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
mutex_lock(&dqm->lock);
if (dqm->active_runlist == false)
goto out;
+
+ pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
+ dqm->sdma_queue_count);
+
+ if (dqm->sdma_queue_count > 0) {
+ destroy_sdma_queues(dqm, 0);
+ destroy_sdma_queues(dqm, 1);
+ }
+
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
if (retval != 0)
@@ -916,20 +1009,32 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
/* remove queue from list to prevent rescheduling after preemption */
mutex_lock(&dqm->lock);
-
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) {
retval = -ENOMEM;
goto failed;
}
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count--;
+
list_del(&q->list);
- dqm->queue_count--;
+ if (q->properties.is_active)
+ dqm->queue_count--;
execute_queues_cpsch(dqm, false);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
@@ -954,8 +1059,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
- uint32_t default_mtype;
- uint32_t ape1_mtype;
+ bool retval;
pr_debug("kfd: In func %s\n", __func__);
@@ -992,18 +1096,13 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit = limit >> 16;
}
- default_mtype = (default_policy == cache_policy_coherent) ?
- MTYPE_NONCACHED :
- MTYPE_CACHED;
-
- ape1_mtype = (alternate_policy == cache_policy_coherent) ?
- MTYPE_NONCACHED :
- MTYPE_CACHED;
-
- qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
- | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
- | DEFAULT_MTYPE(default_mtype)
- | APE1_MTYPE(ape1_mtype);
+ retval = dqm->ops_asic_specific.set_cache_memory_policy(
+ dqm,
+ qpd,
+ default_policy,
+ alternate_policy,
+ alternate_aperture_base,
+ alternate_aperture_size);
if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
@@ -1013,7 +1112,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit);
mutex_unlock(&dqm->lock);
- return true;
+ return retval;
out:
mutex_unlock(&dqm->lock);
@@ -1026,6 +1125,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
BUG_ON(!dev);
+ pr_debug("kfd: loading device queue manager\n");
+
dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
if (!dqm)
return NULL;
@@ -1035,40 +1136,50 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case KFD_SCHED_POLICY_HWS:
case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
/* initialize dqm for cp scheduling */
- dqm->create_queue = create_queue_cpsch;
- dqm->initialize = initialize_cpsch;
- dqm->start = start_cpsch;
- dqm->stop = stop_cpsch;
- dqm->destroy_queue = destroy_queue_cpsch;
- dqm->update_queue = update_queue;
- dqm->get_mqd_manager = get_mqd_manager_nocpsch;
- dqm->register_process = register_process_nocpsch;
- dqm->unregister_process = unregister_process_nocpsch;
- dqm->uninitialize = uninitialize_nocpsch;
- dqm->create_kernel_queue = create_kernel_queue_cpsch;
- dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
- dqm->set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.create_queue = create_queue_cpsch;
+ dqm->ops.initialize = initialize_cpsch;
+ dqm->ops.start = start_cpsch;
+ dqm->ops.stop = stop_cpsch;
+ dqm->ops.destroy_queue = destroy_queue_cpsch;
+ dqm->ops.update_queue = update_queue;
+ dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+ dqm->ops.register_process = register_process_nocpsch;
+ dqm->ops.unregister_process = unregister_process_nocpsch;
+ dqm->ops.uninitialize = uninitialize_nocpsch;
+ dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
+ dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
+ dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
- dqm->start = start_nocpsch;
- dqm->stop = stop_nocpsch;
- dqm->create_queue = create_queue_nocpsch;
- dqm->destroy_queue = destroy_queue_nocpsch;
- dqm->update_queue = update_queue;
- dqm->get_mqd_manager = get_mqd_manager_nocpsch;
- dqm->register_process = register_process_nocpsch;
- dqm->unregister_process = unregister_process_nocpsch;
- dqm->initialize = initialize_nocpsch;
- dqm->uninitialize = uninitialize_nocpsch;
- dqm->set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.start = start_nocpsch;
+ dqm->ops.stop = stop_nocpsch;
+ dqm->ops.create_queue = create_queue_nocpsch;
+ dqm->ops.destroy_queue = destroy_queue_nocpsch;
+ dqm->ops.update_queue = update_queue;
+ dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+ dqm->ops.register_process = register_process_nocpsch;
+ dqm->ops.unregister_process = unregister_process_nocpsch;
+ dqm->ops.initialize = initialize_nocpsch;
+ dqm->ops.uninitialize = uninitialize_nocpsch;
+ dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
default:
BUG();
break;
}
- if (dqm->initialize(dqm) != 0) {
+ switch (dev->device_info->asic_family) {
+ case CHIP_CARRIZO:
+ device_queue_manager_init_vi(&dqm->ops_asic_specific);
+ break;
+
+ case CHIP_KAVERI:
+ device_queue_manager_init_cik(&dqm->ops_asic_specific);
+ break;
+ }
+
+ if (dqm->ops.initialize(dqm) != 0) {
kfree(dqm);
return NULL;
}
@@ -1080,7 +1191,6 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
BUG_ON(!dqm);
- dqm->uninitialize(dqm);
+ dqm->ops.uninitialize(dqm);
kfree(dqm);
}
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c3f189e8ae35..d64f86cda34f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -36,6 +36,9 @@
#define KFD_VMID_START_OFFSET (8)
#define VMID_PER_DEVICE CIK_VMID_NUM
#define KFD_DQM_FIRST_PIPE (0)
+#define CIK_SDMA_QUEUES (4)
+#define CIK_SDMA_QUEUES_PER_ENGINE (2)
+#define CIK_SDMA_ENGINE_NUM (2)
struct device_process_node {
struct qcm_process_device *qpd;
@@ -43,7 +46,7 @@ struct device_process_node {
};
/**
- * struct device_queue_manager
+ * struct device_queue_manager_ops
*
* @create_queue: Queue creation routine.
*
@@ -78,15 +81,9 @@ struct device_process_node {
* @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
* memory apertures.
*
- * This struct is a base class for the kfd queues scheduler in the
- * device level. The device base class should expose the basic operations
- * for queue creation and queue destruction. This base class hides the
- * scheduling mode of the driver and the specific implementation of the
- * concrete device. This class is the only class in the queues scheduler
- * that configures the H/W.
*/
-struct device_queue_manager {
+struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd,
@@ -121,7 +118,23 @@ struct device_queue_manager {
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
+};
+/**
+ * struct device_queue_manager
+ *
+ * This struct is a base class for the kfd queues scheduler in the
+ * device level. The device base class should expose the basic operations
+ * for queue creation and queue destruction. This base class hides the
+ * scheduling mode of the driver and the specific implementation of the
+ * concrete device. This class is the only class in the queues scheduler
+ * that configures the H/W.
+ *
+ */
+
+struct device_queue_manager {
+ struct device_queue_manager_ops ops;
+ struct device_queue_manager_ops ops_asic_specific;
struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
@@ -130,8 +143,11 @@ struct device_queue_manager {
struct list_head queues;
unsigned int processes_count;
unsigned int queue_count;
+ unsigned int sdma_queue_count;
+ unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
+ unsigned int sdma_bitmap;
unsigned int vmid_bitmap;
uint64_t pipelines_addr;
struct kfd_mem_obj *pipeline_mem;
@@ -141,6 +157,28 @@ struct device_queue_manager {
bool active_runlist;
};
+void device_queue_manager_init_cik(struct device_queue_manager_ops *ops);
+void device_queue_manager_init_vi(struct device_queue_manager_ops *ops);
+void program_sh_mem_settings(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+int init_pipelines(struct device_queue_manager *dqm,
+ unsigned int pipes_num, unsigned int first_pipe);
+
+extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+{
+ return (pdd->lds_base >> 16) & 0xFF;
+}
+
+extern inline unsigned int
+get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+{
+ return (pdd->lds_base >> 60) & 0x0E;
+}
+extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm || !dqm->dev);
+ return dqm->dev->shared_resources.compute_pipe_count;
+}
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
new file mode 100644
index 000000000000..6b072466e2a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "kfd_device_queue_manager.h"
+#include "cik_regs.h"
+
+static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
+static int register_process_cik(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+static int initialize_cpsch_cik(struct device_queue_manager *dqm);
+
+void device_queue_manager_init_cik(struct device_queue_manager_ops *ops)
+{
+ ops->set_cache_memory_policy = set_cache_memory_policy_cik;
+ ops->register_process = register_process_cik;
+ ops->initialize = initialize_cpsch_cik;
+}
+
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+ /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+ * scratch and GPUVM apertures.
+ * The hardware fills in the remaining 59 bits according to the
+ * following pattern:
+ * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
+ * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
+ * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
+ *
+ * (where X/Y is the configurable nybble with the low-bit 0)
+ *
+ * LDS and scratch will have the same top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+ * GPUVM can have a different top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+ * We don't bother to support different top nybbles
+ * for LDS/Scratch and GPUVM.
+ */
+
+ BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ top_address_nybble == 0);
+
+ return PRIVATE_BASE(top_address_nybble << 12) |
+ SHARED_BASE(top_address_nybble << 12);
+}
+
+static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
+{
+ uint32_t default_mtype;
+ uint32_t ape1_mtype;
+
+ default_mtype = (default_policy == cache_policy_coherent) ?
+ MTYPE_NONCACHED :
+ MTYPE_CACHED;
+
+ ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+ MTYPE_NONCACHED :
+ MTYPE_CACHED;
+
+ qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
+ | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
+ | DEFAULT_MTYPE(default_mtype)
+ | APE1_MTYPE(ape1_mtype);
+
+ return true;
+}
+
+static int register_process_cik(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+ BUG_ON(!dqm || !qpd);
+
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+ if (qpd->sh_mem_config == 0) {
+ qpd->sh_mem_config =
+ ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
+ DEFAULT_MTYPE(MTYPE_NONCACHED) |
+ APE1_MTYPE(MTYPE_NONCACHED);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ }
+
+ if (qpd->pqm->process->is_32bit_user_mode) {
+ temp = get_sh_mem_bases_32(pdd);
+ qpd->sh_mem_bases = SHARED_BASE(temp);
+ qpd->sh_mem_config |= PTR32;
+ } else {
+ temp = get_sh_mem_bases_nybble_64(pdd);
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ }
+
+ pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+
+ return 0;
+}
+
+static int initialize_cpsch_cik(struct device_queue_manager *dqm)
+{
+ return init_pipelines(dqm, get_pipes_num(dqm), 0);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
new file mode 100644
index 000000000000..20553dcd257d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "kfd_device_queue_manager.h"
+
+static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
+static int register_process_vi(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+static int initialize_cpsch_vi(struct device_queue_manager *dqm);
+
+void device_queue_manager_init_vi(struct device_queue_manager_ops *ops)
+{
+ pr_warn("amdkfd: VI DQM is not currently supported\n");
+
+ ops->set_cache_memory_policy = set_cache_memory_policy_vi;
+ ops->register_process = register_process_vi;
+ ops->initialize = initialize_cpsch_vi;
+}
+
+static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
+{
+ return false;
+}
+
+static int register_process_vi(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ return -1;
+}
+
+static int initialize_cpsch_vi(struct device_queue_manager *dqm)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index b5791a5c7c06..1a9b355dd114 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -137,10 +137,6 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
if (dev == NULL)
return -EINVAL;
- /* Find if pdd exists for combination of process and gpu id */
- if (!kfd_get_process_device_data(dev, process, 0))
- return -EINVAL;
-
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(dev, process);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index e64aa99e5e41..35b987574633 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -303,10 +303,11 @@ int kfd_init_apertures(struct kfd_process *process)
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
- pdd = kfd_get_process_device_data(dev, process, 1);
- if (!pdd)
+ pdd = kfd_create_process_device_data(dev, process);
+ if (pdd == NULL) {
+ pr_err("Failed to create process device data\n");
return -1;
-
+ }
/*
* For 64 bit process aperture will be statically reserved in
* the x86_64 non canonical process address space
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
deleted file mode 100644
index 5b999095a1f7..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * KFD Interrupts.
- *
- * AMD GPUs deliver interrupts by pushing an interrupt description onto the
- * interrupt ring and then sending an interrupt. KGD receives the interrupt
- * in ISR and sends us a pointer to each new entry on the interrupt ring.
- *
- * We generally can't process interrupt-signaled events from ISR, so we call
- * out to each interrupt client module (currently only the scheduler) to ask if
- * each interrupt is interesting. If they return true, then it requires further
- * processing so we copy it to an internal interrupt ring and call each
- * interrupt client again from a work-queue.
- *
- * There's no acknowledgment for the interrupts we use. The hardware simply
- * queues a new interrupt each time without waiting.
- *
- * The fixed-size internal queue means that it's possible for us to lose
- * interrupts because we have no back-pressure to the hardware.
- */
-
-#include <linux/slab.h>
-#include <linux/device.h>
-#include "kfd_priv.h"
-
-#define KFD_INTERRUPT_RING_SIZE 256
-
-static void interrupt_wq(struct work_struct *);
-
-int kfd_interrupt_init(struct kfd_dev *kfd)
-{
- void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
- kfd->device_info->ih_ring_entry_size,
- GFP_KERNEL);
- if (!interrupt_ring)
- return -ENOMEM;
-
- kfd->interrupt_ring = interrupt_ring;
- kfd->interrupt_ring_size =
- KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
- atomic_set(&kfd->interrupt_ring_wptr, 0);
- atomic_set(&kfd->interrupt_ring_rptr, 0);
-
- spin_lock_init(&kfd->interrupt_lock);
-
- INIT_WORK(&kfd->interrupt_work, interrupt_wq);
-
- kfd->interrupts_active = true;
-
- /*
- * After this function returns, the interrupt will be enabled. This
- * barrier ensures that the interrupt running on a different processor
- * sees all the above writes.
- */
- smp_wmb();
-
- return 0;
-}
-
-void kfd_interrupt_exit(struct kfd_dev *kfd)
-{
- /*
- * Stop the interrupt handler from writing to the ring and scheduling
- * workqueue items. The spinlock ensures that any interrupt running
- * after we have unlocked sees interrupts_active = false.
- */
- unsigned long flags;
-
- spin_lock_irqsave(&kfd->interrupt_lock, flags);
- kfd->interrupts_active = false;
- spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
-
- /*
- * Flush_scheduled_work ensures that there are no outstanding
- * work-queue items that will access interrupt_ring. New work items
- * can't be created because we stopped interrupt handling above.
- */
- flush_scheduled_work();
-
- kfree(kfd->interrupt_ring);
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with dequeue_ih_ring_entry.
- */
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
-{
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
-
- if ((rptr - wptr) % kfd->interrupt_ring_size ==
- kfd->device_info->ih_ring_entry_size) {
- /* This is very bad, the system is likely to hang. */
- dev_err_ratelimited(kfd_chardev(),
- "Interrupt ring overflow, dropping interrupt.\n");
- return false;
- }
-
- memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
-
- wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
- smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
- atomic_set(&kfd->interrupt_ring_wptr, wptr);
-
- return true;
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with enqueue_ih_ring_entry.
- */
-static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
-{
- /*
- * Assume that wait queues have an implicit barrier, i.e. anything that
- * happened in the ISR before it queued work is visible.
- */
-
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
-
- if (rptr == wptr)
- return false;
-
- memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
- kfd->device_info->ih_ring_entry_size);
-
- rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
-
- /*
- * Ensure the rptr write update is not visible until
- * memcpy has finished reading.
- */
- smp_mb();
- atomic_set(&kfd->interrupt_ring_rptr, rptr);
-
- return true;
-}
-
-static void interrupt_wq(struct work_struct *work)
-{
- struct kfd_dev *dev = container_of(work, struct kfd_dev,
- interrupt_work);
-
- uint32_t ih_ring_entry[DIV_ROUND_UP(
- dev->device_info->ih_ring_entry_size,
- sizeof(uint32_t))];
-
- while (dequeue_ih_ring_entry(dev, ih_ring_entry))
- ;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 935071410724..e415a2a9207e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -56,8 +56,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
switch (type) {
case KFD_QUEUE_TYPE_DIQ:
case KFD_QUEUE_TYPE_HIQ:
- kq->mqd = dev->dqm->get_mqd_manager(dev->dqm,
- KFD_MQD_TYPE_CIK_HIQ);
+ kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
+ KFD_MQD_TYPE_HIQ);
break;
default:
BUG();
@@ -72,23 +72,19 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
if (prop.doorbell_ptr == NULL)
goto err_get_kernel_doorbell;
- retval = kfd2kgd->allocate_mem(dev->kgd,
- queue_size,
- PAGE_SIZE,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &kq->pq);
-
+ retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0)
goto err_pq_allocate_vidmem;
kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr;
- retval = kfd2kgd->allocate_mem(dev->kgd,
- sizeof(*kq->rptr_kernel),
- 32,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &kq->rptr_mem);
+ retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
+ if (retval == false)
+ goto err_eop_allocate_vidmem;
+
+ retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
+ &kq->rptr_mem);
if (retval != 0)
goto err_rptr_allocate_vidmem;
@@ -96,11 +92,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
- retval = kfd2kgd->allocate_mem(dev->kgd,
- sizeof(*kq->wptr_kernel),
- 32,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &kq->wptr_mem);
+ retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel),
+ &kq->wptr_mem);
if (retval != 0)
goto err_wptr_allocate_vidmem;
@@ -121,6 +114,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.queue_address = kq->pq_gpu_addr;
prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr;
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
+ prop.eop_ring_buffer_address = kq->eop_gpu_addr;
+ prop.eop_ring_buffer_size = PAGE_SIZE;
if (init_queue(&kq->queue, prop) != 0)
goto err_init_queue;
@@ -145,11 +140,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
} else {
/* allocate fence for DIQ */
- retval = kfd2kgd->allocate_mem(dev->kgd,
- sizeof(uint32_t),
- 32,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &kq->fence_mem_obj);
+ retval = kfd_gtt_sa_allocate(dev, sizeof(uint32_t),
+ &kq->fence_mem_obj);
if (retval != 0)
goto err_alloc_fence;
@@ -165,11 +157,13 @@ err_alloc_fence:
err_init_mqd:
uninit_queue(kq->queue);
err_init_queue:
- kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem);
+ kfd_gtt_sa_free(dev, kq->wptr_mem);
err_wptr_allocate_vidmem:
- kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem);
+ kfd_gtt_sa_free(dev, kq->rptr_mem);
err_rptr_allocate_vidmem:
- kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
+ kfd_gtt_sa_free(dev, kq->eop_mem);
+err_eop_allocate_vidmem:
+ kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem:
pr_err("kfd: error init pq\n");
kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
@@ -190,10 +184,13 @@ static void uninitialize(struct kernel_queue *kq)
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
kq->queue->pipe,
kq->queue->queue);
+ else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
+ kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
- kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem);
- kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
- kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
+ kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
+ kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
+ kq->ops_asic_specific.uninitialize(kq);
+ kfd_gtt_sa_free(kq->dev, kq->pq);
kfd_release_kernel_doorbell(kq->dev,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
@@ -265,28 +262,6 @@ static void submit_packet(struct kernel_queue *kq)
kq->pending_wptr);
}
-static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms)
-{
- unsigned long org_timeout_ms;
-
- BUG_ON(!kq);
-
- org_timeout_ms = timeout_ms;
- timeout_ms += jiffies * 1000 / HZ;
- while (*kq->wptr_kernel != *kq->rptr_kernel) {
- if (time_after(jiffies * 1000 / HZ, timeout_ms)) {
- pr_err("kfd: kernel_queue %s timeout expired %lu\n",
- __func__, org_timeout_ms);
- pr_err("kfd: wptr: %d rptr: %d\n",
- *kq->wptr_kernel, *kq->rptr_kernel);
- return -ETIME;
- }
- schedule();
- }
-
- return 0;
-}
-
static void rollback_packet(struct kernel_queue *kq)
{
BUG_ON(!kq);
@@ -304,14 +279,23 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
if (!kq)
return NULL;
- kq->initialize = initialize;
- kq->uninitialize = uninitialize;
- kq->acquire_packet_buffer = acquire_packet_buffer;
- kq->submit_packet = submit_packet;
- kq->sync_with_hw = sync_with_hw;
- kq->rollback_packet = rollback_packet;
+ kq->ops.initialize = initialize;
+ kq->ops.uninitialize = uninitialize;
+ kq->ops.acquire_packet_buffer = acquire_packet_buffer;
+ kq->ops.submit_packet = submit_packet;
+ kq->ops.rollback_packet = rollback_packet;
+
+ switch (dev->device_info->asic_family) {
+ case CHIP_CARRIZO:
+ kernel_queue_init_vi(&kq->ops_asic_specific);
+ break;
+
+ case CHIP_KAVERI:
+ kernel_queue_init_cik(&kq->ops_asic_specific);
+ break;
+ }
- if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
+ if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
pr_err("kfd: failed to init kernel queue\n");
kfree(kq);
return NULL;
@@ -323,7 +307,7 @@ void kernel_queue_uninit(struct kernel_queue *kq)
{
BUG_ON(!kq);
- kq->uninitialize(kq);
+ kq->ops.uninitialize(kq);
kfree(kq);
}
@@ -335,19 +319,18 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
BUG_ON(!dev);
- pr_debug("kfd: starting kernel queue test\n");
+ pr_err("kfd: starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
BUG_ON(!kq);
- retval = kq->acquire_packet_buffer(kq, 5, &buffer);
+ retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
BUG_ON(retval != 0);
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
- kq->submit_packet(kq);
- kq->sync_with_hw(kq, 1000);
+ kq->ops.submit_packet(kq);
- pr_debug("kfd: ending kernel queue test\n");
+ pr_err("kfd: ending kernel queue test\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index dcd2bdb68d44..594053136ee4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -28,8 +28,31 @@
#include <linux/types.h>
#include "kfd_priv.h"
-struct kernel_queue {
- /* interface */
+/**
+ * struct kernel_queue_ops
+ *
+ * @initialize: Initialize a kernel queue, including allocations of GART memory
+ * needed for the queue.
+ *
+ * @uninitialize: Uninitialize a kernel queue and free all its memory usages.
+ *
+ * @acquire_packet_buffer: Returns a pointer to the location in the kernel
+ * queue ring buffer where the calling function can write its packet. It is
+ * Guaranteed that there is enough space for that packet. It also updates the
+ * pending write pointer to that location so subsequent calls to
+ * acquire_packet_buffer will get a correct write pointer
+ *
+ * @submit_packet: Update the write pointer and doorbell of a kernel queue.
+ *
+ * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
+ * queue are equal, which means the CP has read all the submitted packets.
+ *
+ * @rollback_packet: This routine is called if we failed to build an acquired
+ * packet for some reason. It just overwrites the pending wptr with the current
+ * one
+ *
+ */
+struct kernel_queue_ops {
bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size);
void (*uninitialize)(struct kernel_queue *kq);
@@ -38,9 +61,12 @@ struct kernel_queue {
unsigned int **buffer_ptr);
void (*submit_packet)(struct kernel_queue *kq);
- int (*sync_with_hw)(struct kernel_queue *kq,
- unsigned long timeout_ms);
void (*rollback_packet)(struct kernel_queue *kq);
+};
+
+struct kernel_queue {
+ struct kernel_queue_ops ops;
+ struct kernel_queue_ops ops_asic_specific;
/* data */
struct kfd_dev *dev;
@@ -58,6 +84,9 @@ struct kernel_queue {
struct kfd_mem_obj *pq;
uint64_t pq_gpu_addr;
uint32_t *pq_kernel_addr;
+ struct kfd_mem_obj *eop_mem;
+ uint64_t eop_gpu_addr;
+ uint32_t *eop_kernel_addr;
struct kfd_mem_obj *fence_mem_obj;
uint64_t fence_gpu_addr;
@@ -66,4 +95,7 @@ struct kernel_queue {
struct list_head list;
};
+void kernel_queue_init_cik(struct kernel_queue_ops *ops);
+void kernel_queue_init_vi(struct kernel_queue_ops *ops);
+
#endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
new file mode 100644
index 000000000000..a90eb440b1fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "kfd_kernel_queue.h"
+
+static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size);
+static void uninitialize_cik(struct kernel_queue *kq);
+
+void kernel_queue_init_cik(struct kernel_queue_ops *ops)
+{
+ ops->initialize = initialize_cik;
+ ops->uninitialize = uninitialize_cik;
+}
+
+static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size)
+{
+ return true;
+}
+
+static void uninitialize_cik(struct kernel_queue *kq)
+{
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
new file mode 100644
index 000000000000..f1d48281e322
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "kfd_kernel_queue.h"
+
+static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size);
+static void uninitialize_vi(struct kernel_queue *kq);
+
+void kernel_queue_init_vi(struct kernel_queue_ops *ops)
+{
+ ops->initialize = initialize_vi;
+ ops->uninitialize = uninitialize_vi;
+}
+
+static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size)
+{
+ int retval;
+
+ retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+ if (retval != 0)
+ return false;
+
+ kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+ kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
+
+ memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
+
+ return true;
+}
+
+static void uninitialize_vi(struct kernel_queue *kq)
+{
+ kfd_gtt_sa_free(kq->dev, kq->eop_mem);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 95d5af138e6e..3f34ae16f075 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -29,10 +29,10 @@
#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
-#define KFD_DRIVER_DATE "20141113"
+#define KFD_DRIVER_DATE "20150122"
#define KFD_DRIVER_MAJOR 0
#define KFD_DRIVER_MINOR 7
-#define KFD_DRIVER_PATCHLEVEL 0
+#define KFD_DRIVER_PATCHLEVEL 1
const struct kfd2kgd_calls *kfd2kgd;
static const struct kgd2kfd_calls kgd2kfd = {
@@ -48,17 +48,12 @@ static const struct kgd2kfd_calls kgd2kfd = {
int sched_policy = KFD_SCHED_POLICY_HWS;
module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
- "Kernel cmdline parameter that defines the amdkfd scheduling policy");
+ "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
-int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
-module_param(max_num_of_processes, int, 0444);
-MODULE_PARM_DESC(max_num_of_processes,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
-
-int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
-module_param(max_num_of_queues_per_process, int, 0444);
-MODULE_PARM_DESC(max_num_of_queues_per_process,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+ "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
}
/* Verify module parameters */
- if ((max_num_of_processes < 0) ||
- (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
- pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
- return -1;
- }
-
- if ((max_num_of_queues_per_process < 0) ||
- (max_num_of_queues_per_process >
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
- pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+ if ((max_num_of_queues_per_device < 1) ||
+ (max_num_of_queues_per_device >
+ KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+ pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 4c3828cf45bf..b1ef1368c3bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -21,326 +21,17 @@
*
*/
-#include <linux/printk.h>
-#include <linux/slab.h>
#include "kfd_priv.h"
-#include "kfd_mqd_manager.h"
-#include "cik_regs.h"
-#include "../../radeon/cik_reg.h"
-
-inline void busy_wait(unsigned long ms)
-{
- while (time_before(jiffies, ms))
- cpu_relax();
-}
-
-static inline struct cik_mqd *get_mqd(void *mqd)
-{
- return (struct cik_mqd *)mqd;
-}
-
-static int init_mqd(struct mqd_manager *mm, void **mqd,
- struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
- struct queue_properties *q)
-{
- uint64_t addr;
- struct cik_mqd *m;
- int retval;
-
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- retval = kfd2kgd->allocate_mem(mm->dev->kgd,
- sizeof(struct cik_mqd),
- 256,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) mqd_mem_obj);
-
- if (retval != 0)
- return -ENOMEM;
-
- m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
- addr = (*mqd_mem_obj)->gpu_addr;
-
- memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
-
- m->header = 0xC0310800;
- m->compute_pipelinestat_enable = 1;
- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
-
- /*
- * Make sure to use the last queue state saved on mqd when the cp
- * reassigns the queue, so when queue is switched on/off (e.g over
- * subscription or quantum timeout) the context will be consistent
- */
- m->cp_hqd_persistent_state =
- DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
-
- m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
- m->cp_mqd_base_addr_lo = lower_32_bits(addr);
- m->cp_mqd_base_addr_hi = upper_32_bits(addr);
-
- m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
- /* Although WinKFD writes this, I suspect it should not be necessary */
- m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
-
- m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
- QUANTUM_DURATION(10);
-
- /*
- * Pipe Priority
- * Identifies the pipe relative priority when this queue is connected
- * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
- * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
- * 0 = CS_LOW (typically below GFX)
- * 1 = CS_MEDIUM (typically between HP3D and GFX
- * 2 = CS_HIGH (typically above HP3D)
- */
- m->cp_hqd_pipe_priority = 1;
- m->cp_hqd_queue_priority = 15;
-
- *mqd = m;
- if (gart_addr != NULL)
- *gart_addr = addr;
- retval = mm->update_mqd(mm, m, q);
-
- return retval;
-}
-
-static void uninit_mqd(struct mqd_manager *mm, void *mqd,
- struct kfd_mem_obj *mqd_mem_obj)
-{
- BUG_ON(!mm || !mqd);
- kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
-}
-
-static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
-{
- return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
-
-}
-
-static int update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
-{
- struct cik_mqd *m;
-
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- m = get_mqd(mqd);
- m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
- DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
-
- /*
- * Calculating queue size which is log base 2 of actual queue size -1
- * dwords and another -1 for ffs
- */
- m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- - 1 - 1;
- m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
- m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
- m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
- DOORBELL_OFFSET(q->doorbell_off);
-
- m->cp_hqd_vmid = q->vmid;
-
- if (q->format == KFD_QUEUE_FORMAT_AQL) {
- m->cp_hqd_iq_rptr = AQL_ENABLE;
- m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
- }
-
- m->cp_hqd_active = 0;
- q->is_active = false;
- if (q->queue_size > 0 &&
- q->queue_address != 0 &&
- q->queue_percent > 0) {
- m->cp_hqd_active = 1;
- q->is_active = true;
- }
-
- return 0;
-}
-
-static int destroy_mqd(struct mqd_manager *mm, void *mqd,
- enum kfd_preempt_type type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
- pipe_id, queue_id);
-}
-
-static bool is_occupied(struct mqd_manager *mm, void *mqd,
- uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
-{
-
- return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
- pipe_id, queue_id);
-
-}
-
-/*
- * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
- * The HIQ queue in Kaveri is using the same MQD structure as all the user mode
- * queues but with different initial values.
- */
-
-static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
- struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
- struct queue_properties *q)
-{
- uint64_t addr;
- struct cik_mqd *m;
- int retval;
-
- BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- retval = kfd2kgd->allocate_mem(mm->dev->kgd,
- sizeof(struct cik_mqd),
- 256,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) mqd_mem_obj);
-
- if (retval != 0)
- return -ENOMEM;
-
- m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
- addr = (*mqd_mem_obj)->gpu_addr;
-
- memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
-
- m->header = 0xC0310800;
- m->compute_pipelinestat_enable = 1;
- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
-
- m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
- PRELOAD_REQ;
- m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
- QUANTUM_DURATION(10);
-
- m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
- m->cp_mqd_base_addr_lo = lower_32_bits(addr);
- m->cp_mqd_base_addr_hi = upper_32_bits(addr);
-
- m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
-
- /*
- * Pipe Priority
- * Identifies the pipe relative priority when this queue is connected
- * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
- * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
- * 0 = CS_LOW (typically below GFX)
- * 1 = CS_MEDIUM (typically between HP3D and GFX
- * 2 = CS_HIGH (typically above HP3D)
- */
- m->cp_hqd_pipe_priority = 1;
- m->cp_hqd_queue_priority = 15;
-
- *mqd = m;
- if (gart_addr)
- *gart_addr = addr;
- retval = mm->update_mqd(mm, m, q);
-
- return retval;
-}
-
-static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
-{
- struct cik_mqd *m;
-
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- m = get_mqd(mqd);
- m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
- DEFAULT_MIN_AVAIL_SIZE |
- PRIV_STATE |
- KMD_QUEUE;
-
- /*
- * Calculating queue size which is log base 2 of actual queue
- * size -1 dwords
- */
- m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- - 1 - 1;
- m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
- m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
- m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
- DOORBELL_OFFSET(q->doorbell_off);
-
- m->cp_hqd_vmid = q->vmid;
-
- m->cp_hqd_active = 0;
- q->is_active = false;
- if (q->queue_size > 0 &&
- q->queue_address != 0 &&
- q->queue_percent > 0) {
- m->cp_hqd_active = 1;
- q->is_active = true;
- }
-
- return 0;
-}
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
- struct mqd_manager *mqd;
-
- BUG_ON(!dev);
- BUG_ON(type >= KFD_MQD_TYPE_MAX);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
- if (!mqd)
- return NULL;
-
- mqd->dev = dev;
-
- switch (type) {
- case KFD_MQD_TYPE_CIK_CP:
- case KFD_MQD_TYPE_CIK_COMPUTE:
- mqd->init_mqd = init_mqd;
- mqd->uninit_mqd = uninit_mqd;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
- break;
- case KFD_MQD_TYPE_CIK_HIQ:
- mqd->init_mqd = init_mqd_hiq;
- mqd->uninit_mqd = uninit_mqd;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
- break;
- default:
- kfree(mqd);
- return NULL;
+ switch (dev->device_info->asic_family) {
+ case CHIP_KAVERI:
+ return mqd_manager_init_cik(type, dev);
+ case CHIP_CARRIZO:
+ return mqd_manager_init_vi(type, dev);
}
- return mqd;
+ return NULL;
}
-
-/* SDMA queues should be implemented here when the cp will supports them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
new file mode 100644
index 000000000000..a09e18a339f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+#include "cik_regs.h"
+#include "cik_structs.h"
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+ return (struct cik_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ uint64_t addr;
+ struct cik_mqd *m;
+ int retval;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
+ mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+
+ memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+
+ m->header = 0xC0310800;
+ m->compute_pipelinestat_enable = 1;
+ m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+ /*
+ * Make sure to use the last queue state saved on mqd when the cp
+ * reassigns the queue, so when queue is switched on/off (e.g over
+ * subscription or quantum timeout) the context will be consistent
+ */
+ m->cp_hqd_persistent_state =
+ DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
+
+ m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
+ m->cp_mqd_base_addr_lo = lower_32_bits(addr);
+ m->cp_mqd_base_addr_hi = upper_32_bits(addr);
+
+ m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
+ /* Although WinKFD writes this, I suspect it should not be necessary */
+ m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
+
+ m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+ QUANTUM_DURATION(10);
+
+ /*
+ * Pipe Priority
+ * Identifies the pipe relative priority when this queue is connected
+ * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+ * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+ * 0 = CS_LOW (typically below GFX)
+ * 1 = CS_MEDIUM (typically between HP3D and GFX
+ * 2 = CS_HIGH (typically above HP3D)
+ */
+ m->cp_hqd_pipe_priority = 1;
+ m->cp_hqd_queue_priority = 15;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL)
+ m->cp_hqd_iq_rptr = AQL_ENABLE;
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ int retval;
+ struct cik_sdma_rlc_registers *m;
+
+ BUG_ON(!mm || !mqd || !mqd_mem_obj);
+
+ retval = kfd_gtt_sa_allocate(mm->dev,
+ sizeof(struct cik_sdma_rlc_registers),
+ mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct cik_sdma_rlc_registers *) (*mqd_mem_obj)->cpu_ptr;
+
+ memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = (*mqd_mem_obj)->gpu_addr;
+
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+}
+
+static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr)
+{
+ return kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
+}
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ m = get_mqd(mqd);
+ m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+ DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
+
+ /*
+ * Calculating queue size which is log base 2 of actual queue size -1
+ * dwords and another -1 for ffs
+ */
+ m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
+ - 1 - 1;
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
+ DOORBELL_OFFSET(q->doorbell_off);
+
+ m->cp_hqd_vmid = q->vmid;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
+ }
+
+ m->cp_hqd_active = 0;
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->cp_hqd_active = 1;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_sdma_rlc_registers *m;
+
+ BUG_ON(!mm || !mqd || !q);
+
+ m = get_sdma_mqd(mqd);
+ m->sdma_rlc_rb_cntl =
+ SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
+ SDMA_RB_VMID(q->vmid) |
+ SDMA_RPTR_WRITEBACK_ENABLE |
+ SDMA_RPTR_WRITEBACK_TIMER(6);
+
+ m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
+ m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
+ m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
+ m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
+
+ m->sdma_engine_id = q->sdma_engine_id;
+ m->sdma_queue_id = q->sdma_queue_id;
+
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+ pipe_id, queue_id);
+}
+
+/*
+ * preempt type here is ignored because there is only one way
+ * to preempt sdma queue
+ */
+static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+
+ return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
+ pipe_id, queue_id);
+
+}
+
+static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+}
+
+/*
+ * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
+ * The HIQ queue in Kaveri is using the same MQD structure as all the user mode
+ * queues but with different initial values.
+ */
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ uint64_t addr;
+ struct cik_mqd *m;
+ int retval;
+
+ BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
+ mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+
+ memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+
+ m->header = 0xC0310800;
+ m->compute_pipelinestat_enable = 1;
+ m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+ m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
+ PRELOAD_REQ;
+ m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+ QUANTUM_DURATION(10);
+
+ m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
+ m->cp_mqd_base_addr_lo = lower_32_bits(addr);
+ m->cp_mqd_base_addr_hi = upper_32_bits(addr);
+
+ m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
+
+ /*
+ * Pipe Priority
+ * Identifies the pipe relative priority when this queue is connected
+ * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+ * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+ * 0 = CS_LOW (typically below GFX)
+ * 1 = CS_MEDIUM (typically between HP3D and GFX
+ * 2 = CS_HIGH (typically above HP3D)
+ */
+ m->cp_hqd_pipe_priority = 1;
+ m->cp_hqd_queue_priority = 15;
+
+ *mqd = m;
+ if (gart_addr)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ m = get_mqd(mqd);
+ m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+ DEFAULT_MIN_AVAIL_SIZE |
+ PRIV_STATE |
+ KMD_QUEUE;
+
+ /*
+ * Calculating queue size which is log base 2 of actual queue
+ * size -1 dwords
+ */
+ m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
+ - 1 - 1;
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
+ DOORBELL_OFFSET(q->doorbell_off);
+
+ m->cp_hqd_vmid = q->vmid;
+
+ m->cp_hqd_active = 0;
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->cp_hqd_active = 1;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ struct cik_sdma_rlc_registers *m;
+
+ BUG_ON(!mqd);
+
+ m = (struct cik_sdma_rlc_registers *)mqd;
+
+ return m;
+}
+
+struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev)
+{
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dev);
+ BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ if (!mqd)
+ return NULL;
+
+ mqd->dev = dev;
+
+ switch (type) {
+ case KFD_MQD_TYPE_CP:
+ case KFD_MQD_TYPE_COMPUTE:
+ mqd->init_mqd = init_mqd;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ case KFD_MQD_TYPE_HIQ:
+ mqd->init_mqd = init_mqd_hiq;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ case KFD_MQD_TYPE_SDMA:
+ mqd->init_mqd = init_mqd_sdma;
+ mqd->uninit_mqd = uninit_mqd_sdma;
+ mqd->load_mqd = load_mqd_sdma;
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
+ break;
+ default:
+ kfree(mqd);
+ return NULL;
+ }
+
+ return mqd;
+}
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
new file mode 100644
index 000000000000..b3a7e3ba1e38
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/printk.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+
+struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev)
+{
+ pr_warn("amdkfd: VI MQD is not currently supported\n");
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 5ce9233d2004..e2533d875f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -97,11 +97,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
- retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
- *rl_buffer_size,
- PAGE_SIZE,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &pm->ib_buffer_obj);
+ retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
+ &pm->ib_buffer_obj);
if (retval != 0) {
pr_err("kfd: failed to allocate runlist IB\n");
@@ -351,7 +348,7 @@ int pm_send_set_resources(struct packet_manager *pm,
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&pm->lock);
- pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
sizeof(*packet) / sizeof(uint32_t),
(unsigned int **)&packet);
if (packet == NULL) {
@@ -378,8 +375,7 @@ int pm_send_set_resources(struct packet_manager *pm,
packet->queue_mask_lo = lower_32_bits(res->queue_mask);
packet->queue_mask_hi = upper_32_bits(res->queue_mask);
- pm->priv_queue->submit_packet(pm->priv_queue);
- pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+ pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
@@ -405,7 +401,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
mutex_lock(&pm->lock);
- retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+ retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
if (retval != 0)
goto fail_acquire_packet_buffer;
@@ -415,15 +411,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
if (retval != 0)
goto fail_create_runlist;
- pm->priv_queue->submit_packet(pm->priv_queue);
- pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+ pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return retval;
fail_create_runlist:
- pm->priv_queue->rollback_packet(pm->priv_queue);
+ pm->priv_queue->ops.rollback_packet(pm->priv_queue);
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
@@ -441,7 +436,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
BUG_ON(!pm || !fence_address);
mutex_lock(&pm->lock);
- retval = pm->priv_queue->acquire_packet_buffer(
+ retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
sizeof(struct pm4_query_status) / sizeof(uint32_t),
(unsigned int **)&packet);
@@ -462,8 +457,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
packet->data_hi = upper_32_bits((uint64_t)fence_value);
packet->data_lo = lower_32_bits((uint64_t)fence_value);
- pm->priv_queue->submit_packet(pm->priv_queue);
- pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+ pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return 0;
@@ -485,7 +479,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
BUG_ON(!pm);
mutex_lock(&pm->lock);
- retval = pm->priv_queue->acquire_packet_buffer(
+ retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
&buffer);
@@ -540,8 +534,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
break;
};
- pm->priv_queue->submit_packet(pm->priv_queue);
- pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+ pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return 0;
@@ -557,8 +550,7 @@ void pm_release_ib(struct packet_manager *pm)
mutex_lock(&pm->lock);
if (pm->allocated) {
- kfd2kgd->free_mem(pm->dqm->dev->kgd,
- (struct kgd_mem *) pm->ib_buffer_obj);
+ kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
pm->allocated = false;
}
mutex_unlock(&pm->lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 4c25ef504f79..6cfe7f1f18cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
int kfd_pasid_init(void)
{
- pasid_limit = max_num_of_processes;
+ pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index a5edb29507e3..5a44f2fecf38 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -52,20 +52,19 @@
#define kfd_alloc_struct(ptr_to_struct) \
((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
-/* Kernel module parameter to specify maximum number of supported processes */
-extern int max_num_of_processes;
-
-#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
#define KFD_MAX_NUM_OF_PROCESSES 512
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/*
- * Kernel module parameter to specify maximum number of supported queues
- * per process
+ * Kernel module parameter to specify maximum number of supported queues per
+ * device
*/
-extern int max_num_of_queues_per_process;
+extern int max_num_of_queues_per_device;
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
+ (KFD_MAX_NUM_OF_PROCESSES * \
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
#define KFD_KERNEL_QUEUE_SIZE 2048
@@ -104,12 +103,26 @@ enum cache_policy {
cache_policy_noncoherent
};
+enum asic_family_type {
+ CHIP_KAVERI = 0,
+ CHIP_CARRIZO
+};
+
struct kfd_device_info {
+ unsigned int asic_family;
unsigned int max_pasid_bits;
size_t ih_ring_entry_size;
+ uint8_t num_of_watch_points;
uint16_t mqd_size_aligned;
};
+struct kfd_mem_obj {
+ uint32_t range_start;
+ uint32_t range_end;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+};
+
struct kfd_dev {
struct kgd_dev *kgd;
@@ -135,22 +148,18 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources;
- void *interrupt_ring;
- size_t interrupt_ring_size;
- atomic_t interrupt_ring_rptr;
- atomic_t interrupt_ring_wptr;
- struct work_struct interrupt_work;
- spinlock_t interrupt_lock;
+ void *gtt_mem;
+ uint64_t gtt_start_gpu_addr;
+ void *gtt_start_cpu_ptr;
+ void *gtt_sa_bitmap;
+ struct mutex gtt_sa_lock;
+ unsigned int gtt_sa_chunk_size;
+ unsigned int gtt_sa_num_of_chunks;
/* QCM Device instance */
struct device_queue_manager *dqm;
bool init_complete;
- /*
- * Interrupts of interest to KFD are copied
- * from the HW ring into a SW ring.
- */
- bool interrupts_active;
};
/* KGD2KFD callbacks */
@@ -162,12 +171,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd);
extern const struct kfd2kgd_calls *kfd2kgd;
-struct kfd_mem_obj {
- void *bo;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
-};
-
enum kfd_mempool {
KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
@@ -285,6 +288,15 @@ struct queue_properties {
bool is_active;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
+ /* Relevant only for sdma queues*/
+ uint32_t sdma_engine_id;
+ uint32_t sdma_queue_id;
+ uint32_t sdma_vm_addr;
+ /* Relevant only for VI */
+ uint64_t eop_ring_buffer_address;
+ uint32_t eop_ring_buffer_size;
+ uint64_t ctx_save_restore_area_address;
+ uint32_t ctx_save_restore_area_size;
};
/**
@@ -327,6 +339,8 @@ struct queue {
uint32_t pipe;
uint32_t queue;
+ unsigned int sdma_id;
+
struct kfd_process *process;
struct kfd_dev *device;
};
@@ -335,10 +349,10 @@ struct queue {
* Please read the kfd_mqd_manager.h description.
*/
enum KFD_MQD_TYPE {
- KFD_MQD_TYPE_CIK_COMPUTE = 0, /* for no cp scheduling */
- KFD_MQD_TYPE_CIK_HIQ, /* for hiq */
- KFD_MQD_TYPE_CIK_CP, /* for cp queues and diq */
- KFD_MQD_TYPE_CIK_SDMA, /* for sdma queues */
+ KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
+ KFD_MQD_TYPE_HIQ, /* for hiq */
+ KFD_MQD_TYPE_CP, /* for cp queues and diq */
+ KFD_MQD_TYPE_SDMA, /* for sdma queues */
KFD_MQD_TYPE_MAX
};
@@ -490,8 +504,9 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p);
void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
- struct kfd_process *p,
- int create_pdd);
+ struct kfd_process *p);
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p);
/* Process device data iterator */
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
@@ -519,6 +534,13 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int queue_id);
+/* GTT Sub-Allocator */
+
+int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+ struct kfd_mem_obj **mem_obj);
+
+int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
+
extern struct device *kfd_device;
/* Topology */
@@ -531,10 +553,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
/* Interrupts */
-int kfd_interrupt_init(struct kfd_dev *dev);
-void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
@@ -546,6 +565,8 @@ int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
inline uint32_t lower_32(uint64_t x);
inline uint32_t upper_32(uint64_t x);
+struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
+inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
int init_queue(struct queue **q, struct queue_properties properties);
void uninit_queue(struct queue *q);
@@ -554,6 +575,10 @@ void print_queue(struct queue *q);
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev);
+struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev);
+struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev);
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 3c76ef05cbcf..a369c149d172 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -311,24 +311,29 @@ err_alloc_process:
}
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
- struct kfd_process *p,
- int create_pdd)
+ struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
if (pdd->dev == dev)
- return pdd;
-
- if (create_pdd) {
- pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
- if (pdd != NULL) {
- pdd->dev = dev;
- INIT_LIST_HEAD(&pdd->qpd.queues_list);
- INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
- pdd->qpd.dqm = dev->dqm;
- list_add(&pdd->per_device_list, &p->per_device_data);
- }
+ break;
+
+ return pdd;
+}
+
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p)
+{
+ struct kfd_process_device *pdd = NULL;
+
+ pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
+ if (pdd != NULL) {
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+ pdd->qpd.dqm = dev->dqm;
+ list_add(&pdd->per_device_list, &p->per_device_data);
}
return pdd;
@@ -344,11 +349,14 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p)
{
- struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
+ struct kfd_process_device *pdd;
int err;
- if (pdd == NULL)
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
return ERR_PTR(-ENOMEM);
+ }
if (pdd->bound)
return pdd;
@@ -384,7 +392,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pqm_uninit(&p->pqm);
- pdd = kfd_get_process_device_data(dev, p, 0);
+ pdd = kfd_get_process_device_data(dev, p);
/*
* Just mark pdd as unbound, because we still need it to call
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 47526780d736..530b82c4e78b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap,
- max_num_of_queues_per_process);
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found);
- if (found >= max_num_of_queues_per_process) {
+ if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
- kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+ kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL)
return -ENOMEM;
@@ -128,7 +128,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
/* let DQM handle it*/
q_properties->vmid = 0;
q_properties->queue_id = qid;
- q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
retval = init_queue(q, *q_properties);
if (retval != 0)
@@ -167,8 +166,11 @@ int pqm_create_queue(struct process_queue_manager *pqm,
q = NULL;
kq = NULL;
- pdd = kfd_get_process_device_data(dev, pqm->process, 1);
- BUG_ON(!pdd);
+ pdd = kfd_get_process_device_data(dev, pqm->process);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
+ return -1;
+ }
retval = find_available_queue_slot(pqm, qid);
if (retval != 0)
@@ -176,7 +178,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (list_empty(&pqm->queues)) {
pdd->qpd.pqm = pqm;
- dev->dqm->register_process(dev->dqm, &pdd->qpd);
+ dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
}
pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
@@ -186,6 +188,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
switch (type) {
+ case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
@@ -201,8 +204,9 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
- retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -214,7 +218,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq->queue->properties.queue_id = *qid;
pqn->kq = kq;
pqn->q = NULL;
- retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd);
+ retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
+ kq, &pdd->qpd);
break;
default:
BUG();
@@ -222,7 +227,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("kfd: error dqm create queue\n");
+ pr_debug("Error dqm create queue\n");
goto err_create_queue;
}
@@ -241,7 +246,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue:
kfree(pqn);
err_allocate_pqn:
+ /* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
+ if (list_empty(&pqm->queues))
+ dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
@@ -273,19 +281,22 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dev = pqn->q->device;
BUG_ON(!dev);
- pdd = kfd_get_process_device_data(dev, pqm->process, 1);
- BUG_ON(!pdd);
+ pdd = kfd_get_process_device_data(dev, pqm->process);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
+ return -1;
+ }
if (pqn->kq) {
/* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm;
- dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
+ dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
kernel_queue_uninit(pqn->kq);
}
if (pqn->q) {
dqm = pqn->q->device->dqm;
- retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q);
+ retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval != 0)
return retval;
@@ -297,7 +308,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
clear_bit(qid, pqm->queue_slot_bitmap);
if (list_empty(&pqm->queues))
- dqm->unregister_process(dqm, &pdd->qpd);
+ dqm->ops.unregister_process(dqm, &pdd->qpd);
return retval;
}
@@ -311,14 +322,19 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
BUG_ON(!pqm);
pqn = get_queue_by_qid(pqm, qid);
- BUG_ON(!pqn);
+ if (!pqn) {
+ pr_debug("amdkfd: No queue %d exists for update operation\n",
+ qid);
+ return -EFAULT;
+ }
pqn->q->properties.queue_address = p->queue_address;
pqn->q->properties.queue_size = p->queue_size;
pqn->q->properties.queue_percent = p->queue_percent;
pqn->q->properties.priority = p->priority;
- retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q);
+ retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
+ pqn->q);
if (retval != 0)
return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index cca1708fd811..498399323a8c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <linux/hash.h>
#include <linux/cpufreq.h>
+#include <linux/log2.h>
#include "kfd_priv.h"
#include "kfd_crat.h"
@@ -630,10 +631,10 @@ static struct kobj_type cache_type = {
static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- ssize_t ret;
struct kfd_topology_device *dev;
char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
uint32_t i;
+ uint32_t log_max_watch_addr;
/* Making sure that the buffer is an empty string */
buffer[0] = 0;
@@ -641,8 +642,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
- ret = sysfs_show_32bit_val(buffer, dev->gpu_id);
- } else if (strcmp(attr->name, "name") == 0) {
+ return sysfs_show_32bit_val(buffer, dev->gpu_id);
+ }
+
+ if (strcmp(attr->name, "name") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_name);
for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
@@ -652,80 +655,90 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
break;
}
public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
- ret = sysfs_show_str_val(buffer, public_name);
- } else {
- dev = container_of(attr, struct kfd_topology_device,
- attr_props);
- sysfs_show_32bit_prop(buffer, "cpu_cores_count",
- dev->node_props.cpu_cores_count);
- sysfs_show_32bit_prop(buffer, "simd_count",
- dev->node_props.simd_count);
-
- if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
- dev->node_props.mem_banks_count,
- dev->mem_bank_count);
- sysfs_show_32bit_prop(buffer, "mem_banks_count",
- dev->mem_bank_count);
- } else {
- sysfs_show_32bit_prop(buffer, "mem_banks_count",
- dev->node_props.mem_banks_count);
- }
+ return sysfs_show_str_val(buffer, public_name);
+ }
- sysfs_show_32bit_prop(buffer, "caches_count",
- dev->node_props.caches_count);
- sysfs_show_32bit_prop(buffer, "io_links_count",
- dev->node_props.io_links_count);
- sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
- dev->node_props.cpu_core_id_base);
- sysfs_show_32bit_prop(buffer, "simd_id_base",
- dev->node_props.simd_id_base);
- sysfs_show_32bit_prop(buffer, "capability",
- dev->node_props.capability);
- sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
- dev->node_props.max_waves_per_simd);
- sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
- dev->node_props.lds_size_in_kb);
- sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
- dev->node_props.gds_size_in_kb);
- sysfs_show_32bit_prop(buffer, "wave_front_size",
- dev->node_props.wave_front_size);
- sysfs_show_32bit_prop(buffer, "array_count",
- dev->node_props.array_count);
- sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
- dev->node_props.simd_arrays_per_engine);
- sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
- dev->node_props.cu_per_simd_array);
- sysfs_show_32bit_prop(buffer, "simd_per_cu",
- dev->node_props.simd_per_cu);
- sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
- dev->node_props.max_slots_scratch_cu);
- sysfs_show_32bit_prop(buffer, "vendor_id",
- dev->node_props.vendor_id);
- sysfs_show_32bit_prop(buffer, "device_id",
- dev->node_props.device_id);
- sysfs_show_32bit_prop(buffer, "location_id",
- dev->node_props.location_id);
-
- if (dev->gpu) {
- sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
- kfd2kgd->get_max_engine_clock_in_mhz(
- dev->gpu->kgd));
- sysfs_show_64bit_prop(buffer, "local_mem_size",
- kfd2kgd->get_vmem_size(dev->gpu->kgd));
-
- sysfs_show_32bit_prop(buffer, "fw_version",
- kfd2kgd->get_fw_version(
- dev->gpu->kgd,
- KGD_ENGINE_MEC1));
+ dev = container_of(attr, struct kfd_topology_device,
+ attr_props);
+ sysfs_show_32bit_prop(buffer, "cpu_cores_count",
+ dev->node_props.cpu_cores_count);
+ sysfs_show_32bit_prop(buffer, "simd_count",
+ dev->node_props.simd_count);
+
+ if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
+ pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+ dev->node_props.mem_banks_count,
+ dev->mem_bank_count);
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->mem_bank_count);
+ } else {
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->node_props.mem_banks_count);
+ }
+ sysfs_show_32bit_prop(buffer, "caches_count",
+ dev->node_props.caches_count);
+ sysfs_show_32bit_prop(buffer, "io_links_count",
+ dev->node_props.io_links_count);
+ sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
+ dev->node_props.cpu_core_id_base);
+ sysfs_show_32bit_prop(buffer, "simd_id_base",
+ dev->node_props.simd_id_base);
+ sysfs_show_32bit_prop(buffer, "capability",
+ dev->node_props.capability);
+ sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
+ dev->node_props.max_waves_per_simd);
+ sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
+ dev->node_props.lds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
+ dev->node_props.gds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "wave_front_size",
+ dev->node_props.wave_front_size);
+ sysfs_show_32bit_prop(buffer, "array_count",
+ dev->node_props.array_count);
+ sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
+ dev->node_props.simd_arrays_per_engine);
+ sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
+ dev->node_props.cu_per_simd_array);
+ sysfs_show_32bit_prop(buffer, "simd_per_cu",
+ dev->node_props.simd_per_cu);
+ sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
+ dev->node_props.max_slots_scratch_cu);
+ sysfs_show_32bit_prop(buffer, "vendor_id",
+ dev->node_props.vendor_id);
+ sysfs_show_32bit_prop(buffer, "device_id",
+ dev->node_props.device_id);
+ sysfs_show_32bit_prop(buffer, "location_id",
+ dev->node_props.location_id);
+
+ if (dev->gpu) {
+ log_max_watch_addr =
+ __ilog2_u32(dev->gpu->device_info->num_of_watch_points);
+
+ if (log_max_watch_addr) {
+ dev->node_props.capability |=
+ HSA_CAP_WATCH_POINTS_SUPPORTED;
+
+ dev->node_props.capability |=
+ ((log_max_watch_addr <<
+ HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT) &
+ HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
}
- ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
- cpufreq_quick_get_max(0)/1000);
+ sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
+ kfd2kgd->get_max_engine_clock_in_mhz(
+ dev->gpu->kgd));
+ sysfs_show_64bit_prop(buffer, "local_mem_size",
+ kfd2kgd->get_vmem_size(dev->gpu->kgd));
+
+ sysfs_show_32bit_prop(buffer, "fw_version",
+ kfd2kgd->get_fw_version(
+ dev->gpu->kgd,
+ KGD_ENGINE_MEC1));
}
- return ret;
+ return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
+ cpufreq_quick_get_max(0)/1000);
}
static const struct sysfs_ops node_ops = {
diff --git a/drivers/gpu/drm/amd/include/cik_structs.h b/drivers/gpu/drm/amd/include/cik_structs.h
new file mode 100644
index 000000000000..749eab94e335
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cik_structs.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CIK_STRUCTS_H_
+#define CIK_STRUCTS_H_
+
+struct cik_mqd {
+ uint32_t header;
+ uint32_t compute_dispatch_initiator;
+ uint32_t compute_dim_x;
+ uint32_t compute_dim_y;
+ uint32_t compute_dim_z;
+ uint32_t compute_start_x;
+ uint32_t compute_start_y;
+ uint32_t compute_start_z;
+ uint32_t compute_num_thread_x;
+ uint32_t compute_num_thread_y;
+ uint32_t compute_num_thread_z;
+ uint32_t compute_pipelinestat_enable;
+ uint32_t compute_perfcount_enable;
+ uint32_t compute_pgm_lo;
+ uint32_t compute_pgm_hi;
+ uint32_t compute_tba_lo;
+ uint32_t compute_tba_hi;
+ uint32_t compute_tma_lo;
+ uint32_t compute_tma_hi;
+ uint32_t compute_pgm_rsrc1;
+ uint32_t compute_pgm_rsrc2;
+ uint32_t compute_vmid;
+ uint32_t compute_resource_limits;
+ uint32_t compute_static_thread_mgmt_se0;
+ uint32_t compute_static_thread_mgmt_se1;
+ uint32_t compute_tmpring_size;
+ uint32_t compute_static_thread_mgmt_se2;
+ uint32_t compute_static_thread_mgmt_se3;
+ uint32_t compute_restart_x;
+ uint32_t compute_restart_y;
+ uint32_t compute_restart_z;
+ uint32_t compute_thread_trace_enable;
+ uint32_t compute_misc_reserved;
+ uint32_t compute_user_data_0;
+ uint32_t compute_user_data_1;
+ uint32_t compute_user_data_2;
+ uint32_t compute_user_data_3;
+ uint32_t compute_user_data_4;
+ uint32_t compute_user_data_5;
+ uint32_t compute_user_data_6;
+ uint32_t compute_user_data_7;
+ uint32_t compute_user_data_8;
+ uint32_t compute_user_data_9;
+ uint32_t compute_user_data_10;
+ uint32_t compute_user_data_11;
+ uint32_t compute_user_data_12;
+ uint32_t compute_user_data_13;
+ uint32_t compute_user_data_14;
+ uint32_t compute_user_data_15;
+ uint32_t cp_compute_csinvoc_count_lo;
+ uint32_t cp_compute_csinvoc_count_hi;
+ uint32_t cp_mqd_base_addr_lo;
+ uint32_t cp_mqd_base_addr_hi;
+ uint32_t cp_hqd_active;
+ uint32_t cp_hqd_vmid;
+ uint32_t cp_hqd_persistent_state;
+ uint32_t cp_hqd_pipe_priority;
+ uint32_t cp_hqd_queue_priority;
+ uint32_t cp_hqd_quantum;
+ uint32_t cp_hqd_pq_base_lo;
+ uint32_t cp_hqd_pq_base_hi;
+ uint32_t cp_hqd_pq_rptr;
+ uint32_t cp_hqd_pq_rptr_report_addr_lo;
+ uint32_t cp_hqd_pq_rptr_report_addr_hi;
+ uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+ uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+ uint32_t cp_hqd_pq_doorbell_control;
+ uint32_t cp_hqd_pq_wptr;
+ uint32_t cp_hqd_pq_control;
+ uint32_t cp_hqd_ib_base_addr_lo;
+ uint32_t cp_hqd_ib_base_addr_hi;
+ uint32_t cp_hqd_ib_rptr;
+ uint32_t cp_hqd_ib_control;
+ uint32_t cp_hqd_iq_timer;
+ uint32_t cp_hqd_iq_rptr;
+ uint32_t cp_hqd_dequeue_request;
+ uint32_t cp_hqd_dma_offload;
+ uint32_t cp_hqd_sema_cmd;
+ uint32_t cp_hqd_msg_type;
+ uint32_t cp_hqd_atomic0_preop_lo;
+ uint32_t cp_hqd_atomic0_preop_hi;
+ uint32_t cp_hqd_atomic1_preop_lo;
+ uint32_t cp_hqd_atomic1_preop_hi;
+ uint32_t cp_hqd_hq_status0;
+ uint32_t cp_hqd_hq_control0;
+ uint32_t cp_mqd_control;
+ uint32_t cp_mqd_query_time_lo;
+ uint32_t cp_mqd_query_time_hi;
+ uint32_t cp_mqd_connect_start_time_lo;
+ uint32_t cp_mqd_connect_start_time_hi;
+ uint32_t cp_mqd_connect_end_time_lo;
+ uint32_t cp_mqd_connect_end_time_hi;
+ uint32_t cp_mqd_connect_end_wf_count;
+ uint32_t cp_mqd_connect_end_pq_rptr;
+ uint32_t cp_mqd_connect_end_pq_wptr;
+ uint32_t cp_mqd_connect_end_ib_rptr;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t reserved_98;
+ uint32_t reserved_99;
+ uint32_t iqtimer_pkt_header;
+ uint32_t iqtimer_pkt_dw0;
+ uint32_t iqtimer_pkt_dw1;
+ uint32_t iqtimer_pkt_dw2;
+ uint32_t iqtimer_pkt_dw3;
+ uint32_t iqtimer_pkt_dw4;
+ uint32_t iqtimer_pkt_dw5;
+ uint32_t iqtimer_pkt_dw6;
+ uint32_t reserved_108;
+ uint32_t reserved_109;
+ uint32_t reserved_110;
+ uint32_t reserved_111;
+ uint32_t queue_doorbell_id0;
+ uint32_t queue_doorbell_id1;
+ uint32_t queue_doorbell_id2;
+ uint32_t queue_doorbell_id3;
+ uint32_t queue_doorbell_id4;
+ uint32_t queue_doorbell_id5;
+ uint32_t queue_doorbell_id6;
+ uint32_t queue_doorbell_id7;
+ uint32_t queue_doorbell_id8;
+ uint32_t queue_doorbell_id9;
+ uint32_t queue_doorbell_id10;
+ uint32_t queue_doorbell_id11;
+ uint32_t queue_doorbell_id12;
+ uint32_t queue_doorbell_id13;
+ uint32_t queue_doorbell_id14;
+ uint32_t queue_doorbell_id15;
+};
+
+struct cik_sdma_rlc_registers {
+ uint32_t sdma_rlc_rb_cntl;
+ uint32_t sdma_rlc_rb_base;
+ uint32_t sdma_rlc_rb_base_hi;
+ uint32_t sdma_rlc_rb_rptr;
+ uint32_t sdma_rlc_rb_wptr;
+ uint32_t sdma_rlc_rb_wptr_poll_cntl;
+ uint32_t sdma_rlc_rb_wptr_poll_addr_hi;
+ uint32_t sdma_rlc_rb_wptr_poll_addr_lo;
+ uint32_t sdma_rlc_rb_rptr_addr_hi;
+ uint32_t sdma_rlc_rb_rptr_addr_lo;
+ uint32_t sdma_rlc_ib_cntl;
+ uint32_t sdma_rlc_ib_rptr;
+ uint32_t sdma_rlc_ib_offset;
+ uint32_t sdma_rlc_ib_base_lo;
+ uint32_t sdma_rlc_ib_base_hi;
+ uint32_t sdma_rlc_ib_size;
+ uint32_t sdma_rlc_skip_cntl;
+ uint32_t sdma_rlc_context_status;
+ uint32_t sdma_rlc_doorbell;
+ uint32_t sdma_rlc_virtual_addr;
+ uint32_t sdma_rlc_ape1_cntl;
+ uint32_t sdma_rlc_doorbell_log;
+ uint32_t reserved_22;
+ uint32_t reserved_23;
+ uint32_t reserved_24;
+ uint32_t reserved_25;
+ uint32_t reserved_26;
+ uint32_t reserved_27;
+ uint32_t reserved_28;
+ uint32_t reserved_29;
+ uint32_t reserved_30;
+ uint32_t reserved_31;
+ uint32_t reserved_32;
+ uint32_t reserved_33;
+ uint32_t reserved_34;
+ uint32_t reserved_35;
+ uint32_t reserved_36;
+ uint32_t reserved_37;
+ uint32_t reserved_38;
+ uint32_t reserved_39;
+ uint32_t reserved_40;
+ uint32_t reserved_41;
+ uint32_t reserved_42;
+ uint32_t reserved_43;
+ uint32_t reserved_44;
+ uint32_t reserved_45;
+ uint32_t reserved_46;
+ uint32_t reserved_47;
+ uint32_t reserved_48;
+ uint32_t reserved_49;
+ uint32_t reserved_50;
+ uint32_t reserved_51;
+ uint32_t reserved_52;
+ uint32_t reserved_53;
+ uint32_t reserved_54;
+ uint32_t reserved_55;
+ uint32_t reserved_56;
+ uint32_t reserved_57;
+ uint32_t reserved_58;
+ uint32_t reserved_59;
+ uint32_t reserved_60;
+ uint32_t reserved_61;
+ uint32_t reserved_62;
+ uint32_t reserved_63;
+ uint32_t reserved_64;
+ uint32_t reserved_65;
+ uint32_t reserved_66;
+ uint32_t reserved_67;
+ uint32_t reserved_68;
+ uint32_t reserved_69;
+ uint32_t reserved_70;
+ uint32_t reserved_71;
+ uint32_t reserved_72;
+ uint32_t reserved_73;
+ uint32_t reserved_74;
+ uint32_t reserved_75;
+ uint32_t reserved_76;
+ uint32_t reserved_77;
+ uint32_t reserved_78;
+ uint32_t reserved_79;
+ uint32_t reserved_80;
+ uint32_t reserved_81;
+ uint32_t reserved_82;
+ uint32_t reserved_83;
+ uint32_t reserved_84;
+ uint32_t reserved_85;
+ uint32_t reserved_86;
+ uint32_t reserved_87;
+ uint32_t reserved_88;
+ uint32_t reserved_89;
+ uint32_t reserved_90;
+ uint32_t reserved_91;
+ uint32_t reserved_92;
+ uint32_t reserved_93;
+ uint32_t reserved_94;
+ uint32_t reserved_95;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t reserved_98;
+ uint32_t reserved_99;
+ uint32_t reserved_100;
+ uint32_t reserved_101;
+ uint32_t reserved_102;
+ uint32_t reserved_103;
+ uint32_t reserved_104;
+ uint32_t reserved_105;
+ uint32_t reserved_106;
+ uint32_t reserved_107;
+ uint32_t reserved_108;
+ uint32_t reserved_109;
+ uint32_t reserved_110;
+ uint32_t reserved_111;
+ uint32_t reserved_112;
+ uint32_t reserved_113;
+ uint32_t reserved_114;
+ uint32_t reserved_115;
+ uint32_t reserved_116;
+ uint32_t reserved_117;
+ uint32_t reserved_118;
+ uint32_t reserved_119;
+ uint32_t reserved_120;
+ uint32_t reserved_121;
+ uint32_t reserved_122;
+ uint32_t reserved_123;
+ uint32_t reserved_124;
+ uint32_t reserved_125;
+ uint32_t reserved_126;
+ uint32_t reserved_127;
+ uint32_t sdma_engine_id;
+ uint32_t sdma_queue_id;
+};
+
+
+
+#endif /* CIK_STRUCTS_H_ */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 96a512208fad..239bc16a1ddd 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -110,17 +110,10 @@ struct kgd2kfd_calls {
/**
* struct kfd2kgd_calls
*
- * @init_sa_manager: Initialize an instance of the sa manager, used by
- * amdkfd for all system memory allocations that are mapped to the GART
- * address space
+ * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
+ * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
*
- * @fini_sa_manager: Releases all memory allocations for amdkfd that are
- * handled by kgd sa manager
- *
- * @allocate_mem: Allocate a buffer from amdkfd's sa manager. The buffer can
- * be used for mqds, hpds, kernel queue, fence and runlists
- *
- * @free_mem: Frees a buffer that was allocated by amdkfd's sa manager
+ * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
*
* @get_vmem_size: Retrieves (physical) size of VRAM
*
@@ -136,18 +129,23 @@ struct kgd2kfd_calls {
* @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
* scheduling mode. Only used for no cp scheduling mode.
*
- * @init_memory: Initializes memory apertures to fixed base/limit address
- * and non cached memory types.
- *
* @init_pipeline: Initialized the compute pipelines.
*
* @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
* sceduling mode.
*
+ * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
+ * used only for no HWS mode.
+ *
* @hqd_is_occupies: Checks if a hqd slot is occupied.
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
*
+ * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
+ *
+ * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
+ * SDMA hqd slot.
+ *
* @get_fw_version: Returns FW versions from the header
*
* This structure contains function pointers to services that the kgd driver
@@ -155,13 +153,11 @@ struct kgd2kfd_calls {
*
*/
struct kfd2kgd_calls {
- /* Memory management. */
- int (*init_sa_manager)(struct kgd_dev *kgd, unsigned int size);
- void (*fini_sa_manager)(struct kgd_dev *kgd);
- int (*allocate_mem)(struct kgd_dev *kgd, size_t size, size_t alignment,
- enum kgd_memory_pool pool, struct kgd_mem **mem);
+ int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr);
- void (*free_mem)(struct kgd_dev *kgd, struct kgd_mem *mem);
+ void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
@@ -176,25 +172,32 @@ struct kfd2kgd_calls {
int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
unsigned int vmid);
- int (*init_memory)(struct kgd_dev *kgd);
int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
+ int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd);
+
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
+
+ bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
+
+ int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout);
+
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
};
bool kgd2kfd_init(unsigned interface_version,
- const struct kfd2kgd_calls *f2g,
- const struct kgd2kfd_calls **g2f);
+ const struct kfd2kgd_calls *f2g,
+ const struct kgd2kfd_calls **g2f);
-#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
+#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index e3a7a5078e5c..42d2ffa08716 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -653,10 +653,6 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
/* The mode_config.mutex will be held for this call */
static void armada_drm_crtc_disable(struct drm_crtc *crtc)
{
@@ -678,7 +674,6 @@ static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
.mode_fixup = armada_drm_crtc_mode_fixup,
.mode_set = armada_drm_crtc_mode_set,
.mode_set_base = armada_drm_crtc_mode_set_base,
- .load_lut = armada_drm_crtc_load_lut,
.disable = armada_drm_crtc_disable,
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 5c60ae524c45..ff68eefae273 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -335,18 +335,27 @@ int ast_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1);
- if (ret) {
- kfree(afbdev);
- return ret;
- }
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(&afbdev->helper, 32);
+ ret = drm_fb_helper_initial_config(&afbdev->helper, 32);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&afbdev->helper);
+free:
+ kfree(afbdev);
+ return ret;
}
void ast_fbdev_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
new file mode 100644
index 000000000000..99b4f0698a30
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -0,0 +1,11 @@
+config DRM_ATMEL_HLCDC
+ tristate "DRM Support for ATMEL HLCDC Display Controller"
+ depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_PANEL
+ help
+ Choose this option if you have an ATMEL SoC with an HLCDC display
+ controller (i.e. at91sam9n12, at91sam9x5 family or sama5d3 family).
diff --git a/drivers/gpu/drm/atmel-hlcdc/Makefile b/drivers/gpu/drm/atmel-hlcdc/Makefile
new file mode 100644
index 000000000000..10ae426e60bd
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/Makefile
@@ -0,0 +1,7 @@
+atmel-hlcdc-dc-y := atmel_hlcdc_crtc.o \
+ atmel_hlcdc_dc.o \
+ atmel_hlcdc_layer.o \
+ atmel_hlcdc_output.o \
+ atmel_hlcdc_plane.o
+
+obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc-dc.o
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
new file mode 100644
index 000000000000..0409b907de5d
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2014 Traphandler
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drmP.h>
+
+#include <video/videomode.h>
+
+#include "atmel_hlcdc_dc.h"
+
+/**
+ * Atmel HLCDC CRTC structure
+ *
+ * @base: base DRM CRTC structure
+ * @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
+ * @event: pointer to the current page flip event
+ * @id: CRTC id (returned by drm_crtc_index)
+ * @dpms: DPMS mode
+ */
+struct atmel_hlcdc_crtc {
+ struct drm_crtc base;
+ struct atmel_hlcdc_dc *dc;
+ struct drm_pending_vblank_event *event;
+ int id;
+ int dpms;
+};
+
+static inline struct atmel_hlcdc_crtc *
+drm_crtc_to_atmel_hlcdc_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct atmel_hlcdc_crtc, base);
+}
+
+static void atmel_hlcdc_crtc_dpms(struct drm_crtc *c, int mode)
+{
+ struct drm_device *dev = c->dev;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ struct regmap *regmap = crtc->dc->hlcdc->regmap;
+ unsigned int status;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (crtc->dpms == mode)
+ return;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_DISP))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_SYNC))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_PIXEL_CLK))
+ cpu_relax();
+
+ clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
+
+ pm_runtime_allow(dev->dev);
+ } else {
+ pm_runtime_forbid(dev->dev);
+
+ clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_PIXEL_CLK))
+ cpu_relax();
+
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_SYNC))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_DISP))
+ cpu_relax();
+ }
+
+ pm_runtime_put_sync(dev->dev);
+
+ crtc->dpms = mode;
+}
+
+static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ struct regmap *regmap = crtc->dc->hlcdc->regmap;
+ struct drm_plane *plane = c->primary;
+ struct drm_framebuffer *fb;
+ unsigned long mode_rate;
+ struct videomode vm;
+ unsigned long prate;
+ unsigned int cfg;
+ int div;
+
+ if (atmel_hlcdc_dc_mode_valid(crtc->dc, adj) != MODE_OK)
+ return -EINVAL;
+
+ vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
+ vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
+ vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start;
+ vm.hfront_porch = adj->crtc_hsync_start - adj->crtc_hdisplay;
+ vm.hback_porch = adj->crtc_htotal - adj->crtc_hsync_end;
+ vm.hsync_len = adj->crtc_hsync_end - adj->crtc_hsync_start;
+
+ regmap_write(regmap, ATMEL_HLCDC_CFG(1),
+ (vm.hsync_len - 1) | ((vm.vsync_len - 1) << 16));
+
+ regmap_write(regmap, ATMEL_HLCDC_CFG(2),
+ (vm.vfront_porch - 1) | (vm.vback_porch << 16));
+
+ regmap_write(regmap, ATMEL_HLCDC_CFG(3),
+ (vm.hfront_porch - 1) | ((vm.hback_porch - 1) << 16));
+
+ regmap_write(regmap, ATMEL_HLCDC_CFG(4),
+ (adj->crtc_hdisplay - 1) |
+ ((adj->crtc_vdisplay - 1) << 16));
+
+ cfg = ATMEL_HLCDC_CLKPOL;
+
+ prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
+ mode_rate = mode->crtc_clock * 1000;
+ if ((prate / 2) < mode_rate) {
+ prate *= 2;
+ cfg |= ATMEL_HLCDC_CLKSEL;
+ }
+
+ div = DIV_ROUND_UP(prate, mode_rate);
+ if (div < 2)
+ div = 2;
+
+ cfg |= ATMEL_HLCDC_CLKDIV(div);
+
+ regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0),
+ ATMEL_HLCDC_CLKSEL | ATMEL_HLCDC_CLKDIV_MASK |
+ ATMEL_HLCDC_CLKPOL, cfg);
+
+ cfg = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ cfg |= ATMEL_HLCDC_VSPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ cfg |= ATMEL_HLCDC_HSPOL;
+
+ regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
+ ATMEL_HLCDC_HSPOL | ATMEL_HLCDC_VSPOL |
+ ATMEL_HLCDC_VSPDLYS | ATMEL_HLCDC_VSPDLYE |
+ ATMEL_HLCDC_DISPPOL | ATMEL_HLCDC_DISPDLY |
+ ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
+ ATMEL_HLCDC_GUARDTIME_MASK,
+ cfg);
+
+ fb = plane->fb;
+ plane->fb = old_fb;
+
+ return atmel_hlcdc_plane_update_with_mode(plane, c, fb, 0, 0,
+ adj->hdisplay, adj->vdisplay,
+ x << 16, y << 16,
+ adj->hdisplay << 16,
+ adj->vdisplay << 16,
+ adj);
+}
+
+int atmel_hlcdc_crtc_mode_set_base(struct drm_crtc *c, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_plane *plane = c->primary;
+ struct drm_framebuffer *fb = plane->fb;
+ struct drm_display_mode *mode = &c->hwmode;
+
+ plane->fb = old_fb;
+
+ return plane->funcs->update_plane(plane, c, fb,
+ 0, 0,
+ mode->hdisplay,
+ mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16,
+ mode->vdisplay << 16);
+}
+
+static void atmel_hlcdc_crtc_prepare(struct drm_crtc *crtc)
+{
+ atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void atmel_hlcdc_crtc_commit(struct drm_crtc *crtc)
+{
+ atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void atmel_hlcdc_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+
+ atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ crtc->primary->funcs->disable_plane(crtc->primary);
+
+ drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) {
+ if (plane->crtc != crtc)
+ continue;
+
+ plane->funcs->disable_plane(crtc->primary);
+ plane->crtc = NULL;
+ }
+}
+
+static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
+ .mode_fixup = atmel_hlcdc_crtc_mode_fixup,
+ .dpms = atmel_hlcdc_crtc_dpms,
+ .mode_set = atmel_hlcdc_crtc_mode_set,
+ .mode_set_base = atmel_hlcdc_crtc_mode_set_base,
+ .prepare = atmel_hlcdc_crtc_prepare,
+ .commit = atmel_hlcdc_crtc_commit,
+ .disable = atmel_hlcdc_crtc_disable,
+};
+
+static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
+{
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+ drm_crtc_cleanup(c);
+ kfree(crtc);
+}
+
+void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *c,
+ struct drm_file *file)
+{
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = c->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = crtc->event;
+ if (event && event->base.file_priv == file) {
+ event->base.destroy(&event->base);
+ drm_vblank_put(dev, crtc->id);
+ crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (crtc->event) {
+ drm_send_vblank_event(dev, crtc->id, crtc->event);
+ drm_vblank_put(dev, crtc->id);
+ crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
+{
+ drm_handle_vblank(c->dev, 0);
+ atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
+}
+
+static int atmel_hlcdc_crtc_page_flip(struct drm_crtc *c,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ struct atmel_hlcdc_plane_update_req req;
+ struct drm_plane *plane = c->primary;
+ struct drm_device *dev = c->dev;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (crtc->event)
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (ret)
+ return ret;
+
+ memset(&req, 0, sizeof(req));
+ req.crtc_x = 0;
+ req.crtc_y = 0;
+ req.crtc_h = c->mode.crtc_vdisplay;
+ req.crtc_w = c->mode.crtc_hdisplay;
+ req.src_x = c->x << 16;
+ req.src_y = c->y << 16;
+ req.src_w = req.crtc_w << 16;
+ req.src_h = req.crtc_h << 16;
+ req.fb = fb;
+
+ ret = atmel_hlcdc_plane_prepare_update_req(plane, &req, &c->hwmode);
+ if (ret)
+ return ret;
+
+ if (event) {
+ drm_vblank_get(c->dev, crtc->id);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ crtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ ret = atmel_hlcdc_plane_apply_update_req(plane, &req);
+ if (ret)
+ crtc->event = NULL;
+ else
+ plane->fb = fb;
+
+ return ret;
+}
+
+static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
+ .page_flip = atmel_hlcdc_crtc_page_flip,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = atmel_hlcdc_crtc_destroy,
+};
+
+int atmel_hlcdc_crtc_create(struct drm_device *dev)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ struct atmel_hlcdc_planes *planes = dc->planes;
+ struct atmel_hlcdc_crtc *crtc;
+ int ret;
+ int i;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return -ENOMEM;
+
+ crtc->dpms = DRM_MODE_DPMS_OFF;
+ crtc->dc = dc;
+
+ ret = drm_crtc_init_with_planes(dev, &crtc->base,
+ &planes->primary->base,
+ planes->cursor ? &planes->cursor->base : NULL,
+ &atmel_hlcdc_crtc_funcs);
+ if (ret < 0)
+ goto fail;
+
+ crtc->id = drm_crtc_index(&crtc->base);
+
+ if (planes->cursor)
+ planes->cursor->base.possible_crtcs = 1 << crtc->id;
+
+ for (i = 0; i < planes->noverlays; i++)
+ planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
+
+ drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
+
+ dc->crtc = &crtc->base;
+
+ return 0;
+
+fail:
+ atmel_hlcdc_crtc_destroy(&crtc->base);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
new file mode 100644
index 000000000000..7320a6c6613f
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (C) 2014 Traphandler
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include "atmel_hlcdc_dc.h"
+
+#define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8
+
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
+ {
+ .name = "base",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x40,
+ .id = 0,
+ .type = ATMEL_HLCDC_BASE_LAYER,
+ .nconfigs = 7,
+ .layout = {
+ .xstride = { 2 },
+ .default_color = 3,
+ .general_config = 4,
+ .disc_pos = 5,
+ .disc_size = 6,
+ },
+ },
+ {
+ .name = "overlay1",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x140,
+ .id = 1,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .nconfigs = 10,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .pstride = { 5 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ },
+ {
+ .name = "overlay2",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x240,
+ .id = 2,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .nconfigs = 10,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .pstride = { 5 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ },
+ {
+ .name = "high-end-overlay",
+ .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+ .regs_offset = 0x340,
+ .id = 3,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .nconfigs = 42,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .memsize = 4,
+ .xstride = { 5, 7 },
+ .pstride = { 6, 8 },
+ .default_color = 9,
+ .chroma_key = 10,
+ .chroma_key_mask = 11,
+ .general_config = 12,
+ .csc = 14,
+ },
+ },
+ {
+ .name = "cursor",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x440,
+ .id = 4,
+ .type = ATMEL_HLCDC_CURSOR_LAYER,
+ .nconfigs = 10,
+ .max_width = 128,
+ .max_height = 128,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .pstride = { 5 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ },
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = {
+ .min_width = 0,
+ .min_height = 0,
+ .max_width = 2048,
+ .max_height = 2048,
+ .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d3_layers),
+ .layers = atmel_hlcdc_sama5d3_layers,
+};
+
+static const struct of_device_id atmel_hlcdc_of_match[] = {
+ {
+ .compatible = "atmel,sama5d3-hlcdc",
+ .data = &atmel_hlcdc_dc_sama5d3,
+ },
+ { /* sentinel */ },
+};
+
+int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
+ struct drm_display_mode *mode)
+{
+ int vfront_porch = mode->vsync_start - mode->vdisplay;
+ int vback_porch = mode->vtotal - mode->vsync_end;
+ int vsync_len = mode->vsync_end - mode->vsync_start;
+ int hfront_porch = mode->hsync_start - mode->hdisplay;
+ int hback_porch = mode->htotal - mode->hsync_end;
+ int hsync_len = mode->hsync_end - mode->hsync_start;
+
+ if (hsync_len > 0x40 || hsync_len < 1)
+ return MODE_HSYNC;
+
+ if (vsync_len > 0x40 || vsync_len < 1)
+ return MODE_VSYNC;
+
+ if (hfront_porch > 0x200 || hfront_porch < 1 ||
+ hback_porch > 0x200 || hback_porch < 1 ||
+ mode->hdisplay < 1)
+ return MODE_H_ILLEGAL;
+
+ if (vfront_porch > 0x40 || vfront_porch < 1 ||
+ vback_porch > 0x40 || vback_porch < 0 ||
+ mode->vdisplay < 1)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
+{
+ struct drm_device *dev = data;
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ unsigned long status;
+ unsigned int imr, isr;
+ int i;
+
+ regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_IMR, &imr);
+ regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr);
+ status = imr & isr;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & ATMEL_HLCDC_SOF)
+ atmel_hlcdc_crtc_irq(dc->crtc);
+
+ for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) {
+ struct atmel_hlcdc_layer *layer = dc->layers[i];
+
+ if (!(ATMEL_HLCDC_LAYER_STATUS(i) & status) || !layer)
+ continue;
+
+ atmel_hlcdc_layer_irq(layer);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+
+ if (dc->fbdev) {
+ drm_fbdev_cma_hotplug_event(dc->fbdev);
+ } else {
+ dc->fbdev = drm_fbdev_cma_init(dev, 24,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+ if (IS_ERR(dc->fbdev))
+ dc->fbdev = NULL;
+ }
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = atmel_hlcdc_fb_create,
+ .output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
+};
+
+static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ struct atmel_hlcdc_planes *planes;
+ int ret;
+ int i;
+
+ drm_mode_config_init(dev);
+
+ ret = atmel_hlcdc_create_outputs(dev);
+ if (ret) {
+ dev_err(dev->dev, "failed to create panel: %d\n", ret);
+ return ret;
+ }
+
+ planes = atmel_hlcdc_create_planes(dev);
+ if (IS_ERR(planes)) {
+ dev_err(dev->dev, "failed to create planes\n");
+ return PTR_ERR(planes);
+ }
+
+ dc->planes = planes;
+
+ dc->layers[planes->primary->layer.desc->id] =
+ &planes->primary->layer;
+
+ if (planes->cursor)
+ dc->layers[planes->cursor->layer.desc->id] =
+ &planes->cursor->layer;
+
+ for (i = 0; i < planes->noverlays; i++)
+ dc->layers[planes->overlays[i]->layer.desc->id] =
+ &planes->overlays[i]->layer;
+
+ ret = atmel_hlcdc_crtc_create(dev);
+ if (ret) {
+ dev_err(dev->dev, "failed to create crtc\n");
+ return ret;
+ }
+
+ dev->mode_config.min_width = dc->desc->min_width;
+ dev->mode_config.min_height = dc->desc->min_height;
+ dev->mode_config.max_width = dc->desc->max_width;
+ dev->mode_config.max_height = dc->desc->max_height;
+ dev->mode_config.funcs = &mode_config_funcs;
+
+ return 0;
+}
+
+static int atmel_hlcdc_dc_load(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ const struct of_device_id *match;
+ struct atmel_hlcdc_dc *dc;
+ int ret;
+
+ match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node);
+ if (!match) {
+ dev_err(&pdev->dev, "invalid compatible string\n");
+ return -ENODEV;
+ }
+
+ if (!match->data) {
+ dev_err(&pdev->dev, "invalid hlcdc description\n");
+ return -EINVAL;
+ }
+
+ dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+ return -ENOMEM;
+
+ dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0);
+ if (!dc->wq)
+ return -ENOMEM;
+
+ dc->desc = match->data;
+ dc->hlcdc = dev_get_drvdata(dev->dev->parent);
+ dev->dev_private = dc;
+
+ ret = clk_prepare_enable(dc->hlcdc->periph_clk);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable periph_clk\n");
+ goto err_destroy_wq;
+ }
+
+ pm_runtime_enable(dev->dev);
+
+ pm_runtime_put_sync(dev->dev);
+
+ ret = atmel_hlcdc_dc_modeset_init(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize mode setting\n");
+ goto err_periph_clk_disable;
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto err_periph_clk_disable;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+ ret = drm_irq_install(dev, dc->hlcdc->irq);
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+ goto err_periph_clk_disable;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ drm_kms_helper_poll_init(dev);
+
+ /* force connectors detection */
+ drm_helper_hpd_irq_event(dev);
+
+ return 0;
+
+err_periph_clk_disable:
+ pm_runtime_disable(dev->dev);
+ clk_disable_unprepare(dc->hlcdc->periph_clk);
+
+err_destroy_wq:
+ destroy_workqueue(dc->wq);
+
+ return ret;
+}
+
+static void atmel_hlcdc_dc_unload(struct drm_device *dev)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+
+ if (dc->fbdev)
+ drm_fbdev_cma_fini(dc->fbdev);
+ flush_workqueue(dc->wq);
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+
+ pm_runtime_get_sync(dev->dev);
+ drm_irq_uninstall(dev);
+ pm_runtime_put_sync(dev->dev);
+
+ dev->dev_private = NULL;
+
+ pm_runtime_disable(dev->dev);
+ clk_disable_unprepare(dc->hlcdc->periph_clk);
+ destroy_workqueue(dc->wq);
+}
+
+static int atmel_hlcdc_dc_connector_plug_all(struct drm_device *dev)
+{
+ struct drm_connector *connector, *failed;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ ret = drm_connector_register(connector);
+ if (ret) {
+ failed = connector;
+ goto err;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+
+err:
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (failed == connector)
+ break;
+
+ drm_connector_unregister(connector);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
+{
+ mutex_lock(&dev->mode_config.mutex);
+ drm_connector_unplug_all(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void atmel_hlcdc_dc_preclose(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ atmel_hlcdc_crtc_cancel_page_flip(crtc, file);
+}
+
+static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+
+ drm_fbdev_cma_restore_mode(dc->fbdev);
+}
+
+static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ unsigned int cfg = 0;
+ int i;
+
+ /* Enable interrupts on activated layers */
+ for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) {
+ if (dc->layers[i])
+ cfg |= ATMEL_HLCDC_LAYER_STATUS(i);
+ }
+
+ regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, cfg);
+
+ return 0;
+}
+
+static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ unsigned int isr;
+
+ regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IDR, 0xffffffff);
+ regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr);
+}
+
+static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+
+ /* Enable SOF (Start Of Frame) interrupt for vblank counting */
+ regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, ATMEL_HLCDC_SOF);
+
+ return 0;
+}
+
+static void atmel_hlcdc_dc_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+
+ regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IDR, ATMEL_HLCDC_SOF);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver atmel_hlcdc_dc_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .preclose = atmel_hlcdc_dc_preclose,
+ .lastclose = atmel_hlcdc_dc_lastclose,
+ .irq_handler = atmel_hlcdc_dc_irq_handler,
+ .irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
+ .irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
+ .irq_uninstall = atmel_hlcdc_dc_irq_uninstall,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = atmel_hlcdc_dc_enable_vblank,
+ .disable_vblank = atmel_hlcdc_dc_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .fops = &fops,
+ .name = "atmel-hlcdc",
+ .desc = "Atmel HLCD Controller DRM",
+ .date = "20141504",
+ .major = 1,
+ .minor = 0,
+};
+
+static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
+{
+ struct drm_device *ddev;
+ int ret;
+
+ ddev = drm_dev_alloc(&atmel_hlcdc_dc_driver, &pdev->dev);
+ if (!ddev)
+ return -ENOMEM;
+
+ ret = drm_dev_set_unique(ddev, dev_name(ddev->dev));
+ if (ret)
+ goto err_unref;
+
+ ret = atmel_hlcdc_dc_load(ddev);
+ if (ret)
+ goto err_unref;
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto err_unload;
+
+ ret = atmel_hlcdc_dc_connector_plug_all(ddev);
+ if (ret)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ drm_dev_unregister(ddev);
+
+err_unload:
+ atmel_hlcdc_dc_unload(ddev);
+
+err_unref:
+ drm_dev_unref(ddev);
+
+ return ret;
+}
+
+static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+
+ atmel_hlcdc_dc_connector_unplug_all(ddev);
+ drm_dev_unregister(ddev);
+ atmel_hlcdc_dc_unload(ddev);
+ drm_dev_unref(ddev);
+
+ return 0;
+}
+
+static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
+ { .compatible = "atmel,hlcdc-display-controller" },
+ { },
+};
+
+static struct platform_driver atmel_hlcdc_dc_platform_driver = {
+ .probe = atmel_hlcdc_dc_drm_probe,
+ .remove = atmel_hlcdc_dc_drm_remove,
+ .driver = {
+ .name = "atmel-hlcdc-display-controller",
+ .of_match_table = atmel_hlcdc_dc_of_match,
+ },
+};
+module_platform_driver(atmel_hlcdc_dc_platform_driver);
+
+MODULE_AUTHOR("Jean-Jacques Hiblot <jjhiblot@traphandler.com>");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Atmel HLCDC Display Controller DRM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel-hlcdc-dc");
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
new file mode 100644
index 000000000000..7bc96af3397a
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2014 Traphandler
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DRM_ATMEL_HLCDC_H
+#define DRM_ATMEL_HLCDC_H
+
+#include <linux/clk.h>
+#include <linux/irqdomain.h>
+#include <linux/pwm.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+
+#include "atmel_hlcdc_layer.h"
+
+#define ATMEL_HLCDC_MAX_LAYERS 5
+
+/**
+ * Atmel HLCDC Display Controller description structure.
+ *
+ * This structure describe the HLCDC IP capabilities and depends on the
+ * HLCDC IP version (or Atmel SoC family).
+ *
+ * @min_width: minimum width supported by the Display Controller
+ * @min_height: minimum height supported by the Display Controller
+ * @max_width: maximum width supported by the Display Controller
+ * @max_height: maximum height supported by the Display Controller
+ * @layers: a layer description table describing available layers
+ * @nlayers: layer description table size
+ */
+struct atmel_hlcdc_dc_desc {
+ int min_width;
+ int min_height;
+ int max_width;
+ int max_height;
+ const struct atmel_hlcdc_layer_desc *layers;
+ int nlayers;
+};
+
+/**
+ * Atmel HLCDC Plane properties.
+ *
+ * This structure stores plane property definitions.
+ *
+ * @alpha: alpha blending (or transparency) property
+ * @rotation: rotation property
+ */
+struct atmel_hlcdc_plane_properties {
+ struct drm_property *alpha;
+ struct drm_property *rotation;
+};
+
+/**
+ * Atmel HLCDC Plane.
+ *
+ * @base: base DRM plane structure
+ * @layer: HLCDC layer structure
+ * @properties: pointer to the property definitions structure
+ * @rotation: current rotation status
+ */
+struct atmel_hlcdc_plane {
+ struct drm_plane base;
+ struct atmel_hlcdc_layer layer;
+ struct atmel_hlcdc_plane_properties *properties;
+ unsigned int rotation;
+};
+
+static inline struct atmel_hlcdc_plane *
+drm_plane_to_atmel_hlcdc_plane(struct drm_plane *p)
+{
+ return container_of(p, struct atmel_hlcdc_plane, base);
+}
+
+static inline struct atmel_hlcdc_plane *
+atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *l)
+{
+ return container_of(l, struct atmel_hlcdc_plane, layer);
+}
+
+/**
+ * Atmel HLCDC Plane update request structure.
+ *
+ * @crtc_x: x position of the plane relative to the CRTC
+ * @crtc_y: y position of the plane relative to the CRTC
+ * @crtc_w: visible width of the plane
+ * @crtc_h: visible height of the plane
+ * @src_x: x buffer position
+ * @src_y: y buffer position
+ * @src_w: buffer width
+ * @src_h: buffer height
+ * @fb: framebuffer object object
+ * @bpp: bytes per pixel deduced from pixel_format
+ * @offsets: offsets to apply to the GEM buffers
+ * @xstride: value to add to the pixel pointer between each line
+ * @pstride: value to add to the pixel pointer between each pixel
+ * @nplanes: number of planes (deduced from pixel_format)
+ */
+struct atmel_hlcdc_plane_update_req {
+ int crtc_x;
+ int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+ uint32_t src_x;
+ uint32_t src_y;
+ uint32_t src_w;
+ uint32_t src_h;
+ struct drm_framebuffer *fb;
+
+ /* These fields are private and should not be touched */
+ int bpp[ATMEL_HLCDC_MAX_PLANES];
+ unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
+ int xstride[ATMEL_HLCDC_MAX_PLANES];
+ int pstride[ATMEL_HLCDC_MAX_PLANES];
+ int nplanes;
+};
+
+/**
+ * Atmel HLCDC Planes.
+ *
+ * This structure stores the instantiated HLCDC Planes and can be accessed by
+ * the HLCDC Display Controller or the HLCDC CRTC.
+ *
+ * @primary: primary plane
+ * @cursor: hardware cursor plane
+ * @overlays: overlay plane table
+ * @noverlays: number of overlay planes
+ */
+struct atmel_hlcdc_planes {
+ struct atmel_hlcdc_plane *primary;
+ struct atmel_hlcdc_plane *cursor;
+ struct atmel_hlcdc_plane **overlays;
+ int noverlays;
+};
+
+/**
+ * Atmel HLCDC Display Controller.
+ *
+ * @desc: HLCDC Display Controller description
+ * @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
+ * @fbdev: framebuffer device attached to the Display Controller
+ * @crtc: CRTC provided by the display controller
+ * @planes: instantiated planes
+ * @layers: active HLCDC layer
+ * @wq: display controller workqueue
+ */
+struct atmel_hlcdc_dc {
+ const struct atmel_hlcdc_dc_desc *desc;
+ struct atmel_hlcdc *hlcdc;
+ struct drm_fbdev_cma *fbdev;
+ struct drm_crtc *crtc;
+ struct atmel_hlcdc_planes *planes;
+ struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
+ struct workqueue_struct *wq;
+};
+
+extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats;
+extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_and_yuv_formats;
+
+int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
+ struct drm_display_mode *mode);
+
+struct atmel_hlcdc_planes *
+atmel_hlcdc_create_planes(struct drm_device *dev);
+
+int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
+ struct atmel_hlcdc_plane_update_req *req,
+ const struct drm_display_mode *mode);
+
+int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
+ struct atmel_hlcdc_plane_update_req *req);
+
+int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w,
+ unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ const struct drm_display_mode *mode);
+
+void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
+
+void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc,
+ struct drm_file *file);
+
+int atmel_hlcdc_crtc_create(struct drm_device *dev);
+
+int atmel_hlcdc_create_outputs(struct drm_device *dev);
+
+#endif /* DRM_ATMEL_HLCDC_H */
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
new file mode 100644
index 000000000000..063d2a7b941f
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -0,0 +1,667 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+
+#include "atmel_hlcdc_dc.h"
+
+static void
+atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val)
+{
+ struct atmel_hlcdc_layer_fb_flip *flip = val;
+
+ if (flip->fb)
+ drm_framebuffer_unreference(flip->fb);
+ kfree(flip);
+}
+
+static void
+atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip)
+{
+ if (flip->fb)
+ drm_framebuffer_unreference(flip->fb);
+ kfree(flip->task);
+ kfree(flip);
+}
+
+static void
+atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer,
+ struct atmel_hlcdc_layer_fb_flip *flip)
+{
+ int i;
+
+ if (!flip)
+ return;
+
+ for (i = 0; i < layer->max_planes; i++) {
+ if (!flip->dscrs[i])
+ break;
+
+ flip->dscrs[i]->status = 0;
+ flip->dscrs[i] = NULL;
+ }
+
+ drm_flip_work_queue_task(&layer->gc, flip->task);
+ drm_flip_work_commit(&layer->gc, layer->wq);
+}
+
+static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer,
+ int id)
+{
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+ struct atmel_hlcdc_layer_update_slot *slot;
+
+ if (id < 0 || id > 1)
+ return;
+
+ slot = &upd->slots[id];
+ bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs);
+ memset(slot->configs, 0,
+ sizeof(*slot->configs) * layer->desc->nconfigs);
+
+ if (slot->fb_flip) {
+ atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip);
+ slot->fb_flip = NULL;
+ }
+}
+
+static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer)
+{
+ struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
+ const struct atmel_hlcdc_layer_desc *desc = layer->desc;
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+ struct regmap *regmap = layer->hlcdc->regmap;
+ struct atmel_hlcdc_layer_update_slot *slot;
+ struct atmel_hlcdc_layer_fb_flip *fb_flip;
+ struct atmel_hlcdc_dma_channel_dscr *dscr;
+ unsigned int cfg;
+ u32 action = 0;
+ int i = 0;
+
+ if (upd->pending < 0 || upd->pending > 1)
+ return;
+
+ slot = &upd->slots[upd->pending];
+
+ for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) {
+ regmap_write(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_CFG(layer, cfg),
+ slot->configs[cfg]);
+ action |= ATMEL_HLCDC_LAYER_UPDATE;
+ }
+
+ fb_flip = slot->fb_flip;
+
+ if (!fb_flip->fb)
+ goto apply;
+
+ if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) {
+ for (i = 0; i < fb_flip->ngems; i++) {
+ dscr = fb_flip->dscrs[i];
+ dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
+ ATMEL_HLCDC_LAYER_DMA_IRQ |
+ ATMEL_HLCDC_LAYER_ADD_IRQ |
+ ATMEL_HLCDC_LAYER_DONE_IRQ;
+
+ regmap_write(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
+ dscr->addr);
+ regmap_write(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
+ dscr->ctrl);
+ regmap_write(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
+ dscr->next);
+ }
+
+ action |= ATMEL_HLCDC_LAYER_DMA_CHAN;
+ dma->status = ATMEL_HLCDC_LAYER_ENABLED;
+ } else {
+ for (i = 0; i < fb_flip->ngems; i++) {
+ dscr = fb_flip->dscrs[i];
+ dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
+ ATMEL_HLCDC_LAYER_DMA_IRQ |
+ ATMEL_HLCDC_LAYER_DSCR_IRQ |
+ ATMEL_HLCDC_LAYER_DONE_IRQ;
+
+ regmap_write(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
+ dscr->next);
+ }
+
+ action |= ATMEL_HLCDC_LAYER_A2Q;
+ }
+
+ /* Release unneeded descriptors */
+ for (i = fb_flip->ngems; i < layer->max_planes; i++) {
+ fb_flip->dscrs[i]->status = 0;
+ fb_flip->dscrs[i] = NULL;
+ }
+
+ dma->queue = fb_flip;
+ slot->fb_flip = NULL;
+
+apply:
+ if (action)
+ regmap_write(regmap,
+ desc->regs_offset + ATMEL_HLCDC_LAYER_CHER,
+ action);
+
+ atmel_hlcdc_layer_update_reset(layer, upd->pending);
+
+ upd->pending = -1;
+}
+
+void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
+{
+ struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
+ const struct atmel_hlcdc_layer_desc *desc = layer->desc;
+ struct regmap *regmap = layer->hlcdc->regmap;
+ struct atmel_hlcdc_layer_fb_flip *flip;
+ unsigned long flags;
+ unsigned int isr, imr;
+ unsigned int status;
+ unsigned int plane_status;
+ u32 flip_status;
+
+ int i;
+
+ regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr);
+ regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
+ status = imr & isr;
+ if (!status)
+ return;
+
+ spin_lock_irqsave(&layer->lock, flags);
+
+ flip = dma->queue ? dma->queue : dma->cur;
+
+ if (!flip) {
+ spin_unlock_irqrestore(&layer->lock, flags);
+ return;
+ }
+
+ /*
+ * Set LOADED and DONE flags: they'll be cleared if at least one
+ * memory plane is not LOADED or DONE.
+ */
+ flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED |
+ ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
+ for (i = 0; i < flip->ngems; i++) {
+ plane_status = (status >> (8 * i));
+
+ if (plane_status &
+ (ATMEL_HLCDC_LAYER_ADD_IRQ |
+ ATMEL_HLCDC_LAYER_DSCR_IRQ) &
+ ~flip->dscrs[i]->ctrl) {
+ flip->dscrs[i]->status |=
+ ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
+ flip->dscrs[i]->ctrl |=
+ ATMEL_HLCDC_LAYER_ADD_IRQ |
+ ATMEL_HLCDC_LAYER_DSCR_IRQ;
+ }
+
+ if (plane_status &
+ ATMEL_HLCDC_LAYER_DONE_IRQ &
+ ~flip->dscrs[i]->ctrl) {
+ flip->dscrs[i]->status |=
+ ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
+ flip->dscrs[i]->ctrl |=
+ ATMEL_HLCDC_LAYER_DONE_IRQ;
+ }
+
+ if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ)
+ flip->dscrs[i]->status |=
+ ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
+
+ /*
+ * Clear LOADED and DONE flags if the memory plane is either
+ * not LOADED or not DONE.
+ */
+ if (!(flip->dscrs[i]->status &
+ ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED))
+ flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
+
+ if (!(flip->dscrs[i]->status &
+ ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE))
+ flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
+
+ /*
+ * An overrun on one memory plane impact the whole framebuffer
+ * transfer, hence we set the OVERRUN flag as soon as there's
+ * one memory plane reporting such an overrun.
+ */
+ flip_status |= flip->dscrs[i]->status &
+ ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
+ }
+
+ /* Get changed bits */
+ flip_status ^= flip->status;
+ flip->status |= flip_status;
+
+ if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) {
+ atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
+ dma->cur = dma->queue;
+ dma->queue = NULL;
+ }
+
+ if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) {
+ atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
+ dma->cur = NULL;
+ }
+
+ if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) {
+ regmap_write(regmap,
+ desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
+ ATMEL_HLCDC_LAYER_RST);
+ if (dma->queue)
+ atmel_hlcdc_layer_fb_flip_release_queue(layer,
+ dma->queue);
+
+ if (dma->cur)
+ atmel_hlcdc_layer_fb_flip_release_queue(layer,
+ dma->cur);
+
+ dma->cur = NULL;
+ dma->queue = NULL;
+ }
+
+ if (!dma->queue) {
+ atmel_hlcdc_layer_update_apply(layer);
+
+ if (!dma->cur)
+ dma->status = ATMEL_HLCDC_LAYER_DISABLED;
+ }
+
+ spin_unlock_irqrestore(&layer->lock, flags);
+}
+
+int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
+{
+ struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+ struct regmap *regmap = layer->hlcdc->regmap;
+ const struct atmel_hlcdc_layer_desc *desc = layer->desc;
+ unsigned long flags;
+ unsigned int isr;
+
+ spin_lock_irqsave(&layer->lock, flags);
+
+ /* Disable the layer */
+ regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
+ ATMEL_HLCDC_LAYER_RST);
+
+ /* Clear all pending interrupts */
+ regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
+
+ /* Discard current and queued framebuffer transfers. */
+ if (dma->cur) {
+ atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
+ dma->cur = NULL;
+ }
+
+ if (dma->queue) {
+ atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue);
+ dma->queue = NULL;
+ }
+
+ /*
+ * Then discard the pending update request (if any) to prevent
+ * DMA irq handler from restarting the DMA channel after it has
+ * been disabled.
+ */
+ if (upd->pending >= 0) {
+ atmel_hlcdc_layer_update_reset(layer, upd->pending);
+ upd->pending = -1;
+ }
+
+ dma->status = ATMEL_HLCDC_LAYER_DISABLED;
+
+ spin_unlock_irqrestore(&layer->lock, flags);
+
+ return 0;
+}
+
+int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
+{
+ struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+ struct regmap *regmap = layer->hlcdc->regmap;
+ struct atmel_hlcdc_layer_fb_flip *fb_flip;
+ struct atmel_hlcdc_layer_update_slot *slot;
+ unsigned long flags;
+ int i, j = 0;
+
+ fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL);
+ if (!fb_flip)
+ return -ENOMEM;
+
+ fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL);
+ if (!fb_flip->task) {
+ kfree(fb_flip);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&layer->lock, flags);
+
+ upd->next = upd->pending ? 0 : 1;
+
+ slot = &upd->slots[upd->next];
+
+ for (i = 0; i < layer->max_planes * 4; i++) {
+ if (!dma->dscrs[i].status) {
+ fb_flip->dscrs[j++] = &dma->dscrs[i];
+ dma->dscrs[i].status =
+ ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED;
+ if (j == layer->max_planes)
+ break;
+ }
+ }
+
+ if (j < layer->max_planes) {
+ for (i = 0; i < j; i++)
+ fb_flip->dscrs[i]->status = 0;
+ }
+
+ if (j < layer->max_planes) {
+ spin_unlock_irqrestore(&layer->lock, flags);
+ atmel_hlcdc_layer_fb_flip_destroy(fb_flip);
+ return -EBUSY;
+ }
+
+ slot->fb_flip = fb_flip;
+
+ if (upd->pending >= 0) {
+ memcpy(slot->configs,
+ upd->slots[upd->pending].configs,
+ layer->desc->nconfigs * sizeof(u32));
+ memcpy(slot->updated_configs,
+ upd->slots[upd->pending].updated_configs,
+ DIV_ROUND_UP(layer->desc->nconfigs,
+ BITS_PER_BYTE * sizeof(unsigned long)) *
+ sizeof(unsigned long));
+ slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb;
+ if (upd->slots[upd->pending].fb_flip->fb) {
+ slot->fb_flip->fb =
+ upd->slots[upd->pending].fb_flip->fb;
+ slot->fb_flip->ngems =
+ upd->slots[upd->pending].fb_flip->ngems;
+ drm_framebuffer_reference(slot->fb_flip->fb);
+ }
+ } else {
+ regmap_bulk_read(regmap,
+ layer->desc->regs_offset +
+ ATMEL_HLCDC_LAYER_CFG(layer, 0),
+ upd->slots[upd->next].configs,
+ layer->desc->nconfigs);
+ }
+
+ spin_unlock_irqrestore(&layer->lock, flags);
+
+ return 0;
+}
+
+void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer)
+{
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+
+ atmel_hlcdc_layer_update_reset(layer, upd->next);
+ upd->next = -1;
+}
+
+void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
+ struct drm_framebuffer *fb,
+ unsigned int *offsets)
+{
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+ struct atmel_hlcdc_layer_fb_flip *fb_flip;
+ struct atmel_hlcdc_layer_update_slot *slot;
+ struct atmel_hlcdc_dma_channel_dscr *dscr;
+ struct drm_framebuffer *old_fb;
+ int nplanes = 0;
+ int i;
+
+ if (upd->next < 0 || upd->next > 1)
+ return;
+
+ if (fb)
+ nplanes = drm_format_num_planes(fb->pixel_format);
+
+ if (nplanes > layer->max_planes)
+ return;
+
+ slot = &upd->slots[upd->next];
+
+ fb_flip = slot->fb_flip;
+ old_fb = slot->fb_flip->fb;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_gem_cma_object *gem;
+
+ dscr = slot->fb_flip->dscrs[i];
+ gem = drm_fb_cma_get_gem_obj(fb, i);
+ dscr->addr = gem->paddr + offsets[i];
+ }
+
+ fb_flip->ngems = nplanes;
+ fb_flip->fb = fb;
+
+ if (fb)
+ drm_framebuffer_reference(fb);
+
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+}
+
+void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
+ u32 mask, u32 val)
+{
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+ struct atmel_hlcdc_layer_update_slot *slot;
+
+ if (upd->next < 0 || upd->next > 1)
+ return;
+
+ if (cfg >= layer->desc->nconfigs)
+ return;
+
+ slot = &upd->slots[upd->next];
+ slot->configs[cfg] &= ~mask;
+ slot->configs[cfg] |= (val & mask);
+ set_bit(cfg, slot->updated_configs);
+}
+
+void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer)
+{
+ struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+ struct atmel_hlcdc_layer_update_slot *slot;
+ unsigned long flags;
+
+ if (upd->next < 0 || upd->next > 1)
+ return;
+
+ slot = &upd->slots[upd->next];
+
+ spin_lock_irqsave(&layer->lock, flags);
+
+ /*
+ * Release pending update request and replace it by the new one.
+ */
+ if (upd->pending >= 0)
+ atmel_hlcdc_layer_update_reset(layer, upd->pending);
+
+ upd->pending = upd->next;
+ upd->next = -1;
+
+ if (!dma->queue)
+ atmel_hlcdc_layer_update_apply(layer);
+
+ spin_unlock_irqrestore(&layer->lock, flags);
+
+
+ upd->next = -1;
+}
+
+static int atmel_hlcdc_layer_dma_init(struct drm_device *dev,
+ struct atmel_hlcdc_layer *layer)
+{
+ struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
+ dma_addr_t dma_addr;
+ int i;
+
+ dma->dscrs = dma_alloc_coherent(dev->dev,
+ layer->max_planes * 4 *
+ sizeof(*dma->dscrs),
+ &dma_addr, GFP_KERNEL);
+ if (!dma->dscrs)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->max_planes * 4; i++) {
+ struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
+
+ dscr->next = dma_addr + (i * sizeof(*dscr));
+ }
+
+ return 0;
+}
+
+static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev,
+ struct atmel_hlcdc_layer *layer)
+{
+ struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
+ int i;
+
+ for (i = 0; i < layer->max_planes * 4; i++) {
+ struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
+
+ dscr->status = 0;
+ }
+
+ dma_free_coherent(dev->dev, layer->max_planes * 4 *
+ sizeof(*dma->dscrs), dma->dscrs,
+ dma->dscrs[0].next);
+}
+
+static int atmel_hlcdc_layer_update_init(struct drm_device *dev,
+ struct atmel_hlcdc_layer *layer,
+ const struct atmel_hlcdc_layer_desc *desc)
+{
+ struct atmel_hlcdc_layer_update *upd = &layer->update;
+ int updated_size;
+ void *buffer;
+ int i;
+
+ updated_size = DIV_ROUND_UP(desc->nconfigs,
+ BITS_PER_BYTE *
+ sizeof(unsigned long));
+
+ buffer = devm_kzalloc(dev->dev,
+ ((desc->nconfigs * sizeof(u32)) +
+ (updated_size * sizeof(unsigned long))) * 2,
+ GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ for (i = 0; i < 2; i++) {
+ upd->slots[i].updated_configs = buffer;
+ buffer += updated_size * sizeof(unsigned long);
+ upd->slots[i].configs = buffer;
+ buffer += desc->nconfigs * sizeof(u32);
+ }
+
+ upd->pending = -1;
+ upd->next = -1;
+
+ return 0;
+}
+
+int atmel_hlcdc_layer_init(struct drm_device *dev,
+ struct atmel_hlcdc_layer *layer,
+ const struct atmel_hlcdc_layer_desc *desc)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ struct regmap *regmap = dc->hlcdc->regmap;
+ unsigned int tmp;
+ int ret;
+ int i;
+
+ layer->hlcdc = dc->hlcdc;
+ layer->wq = dc->wq;
+ layer->desc = desc;
+
+ regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
+ ATMEL_HLCDC_LAYER_RST);
+ for (i = 0; i < desc->formats->nformats; i++) {
+ int nplanes = drm_format_num_planes(desc->formats->formats[i]);
+
+ if (nplanes > layer->max_planes)
+ layer->max_planes = nplanes;
+ }
+
+ spin_lock_init(&layer->lock);
+ drm_flip_work_init(&layer->gc, desc->name,
+ atmel_hlcdc_layer_fb_flip_release);
+ ret = atmel_hlcdc_layer_dma_init(dev, layer);
+ if (ret)
+ return ret;
+
+ ret = atmel_hlcdc_layer_update_init(dev, layer, desc);
+ if (ret)
+ return ret;
+
+ /* Flush Status Register */
+ regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
+ 0xffffffff);
+ regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR,
+ &tmp);
+
+ tmp = 0;
+ for (i = 0; i < layer->max_planes; i++)
+ tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ |
+ ATMEL_HLCDC_LAYER_DSCR_IRQ |
+ ATMEL_HLCDC_LAYER_ADD_IRQ |
+ ATMEL_HLCDC_LAYER_DONE_IRQ |
+ ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i);
+
+ regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp);
+
+ return 0;
+}
+
+void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
+ struct atmel_hlcdc_layer *layer)
+{
+ const struct atmel_hlcdc_layer_desc *desc = layer->desc;
+ struct regmap *regmap = layer->hlcdc->regmap;
+
+ regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
+ 0xffffffff);
+ regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
+ ATMEL_HLCDC_LAYER_RST);
+
+ atmel_hlcdc_layer_dma_cleanup(dev, layer);
+ drm_flip_work_cleanup(&layer->gc);
+}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
new file mode 100644
index 000000000000..27e56c0862ec
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DRM_ATMEL_HLCDC_LAYER_H
+#define DRM_ATMEL_HLCDC_LAYER_H
+
+#include <linux/mfd/atmel-hlcdc.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drmP.h>
+
+#define ATMEL_HLCDC_LAYER_CHER 0x0
+#define ATMEL_HLCDC_LAYER_CHDR 0x4
+#define ATMEL_HLCDC_LAYER_CHSR 0x8
+#define ATMEL_HLCDC_LAYER_DMA_CHAN BIT(0)
+#define ATMEL_HLCDC_LAYER_UPDATE BIT(1)
+#define ATMEL_HLCDC_LAYER_A2Q BIT(2)
+#define ATMEL_HLCDC_LAYER_RST BIT(8)
+
+#define ATMEL_HLCDC_LAYER_IER 0xc
+#define ATMEL_HLCDC_LAYER_IDR 0x10
+#define ATMEL_HLCDC_LAYER_IMR 0x14
+#define ATMEL_HLCDC_LAYER_ISR 0x18
+#define ATMEL_HLCDC_LAYER_DFETCH BIT(0)
+#define ATMEL_HLCDC_LAYER_LFETCH BIT(1)
+#define ATMEL_HLCDC_LAYER_DMA_IRQ BIT(2)
+#define ATMEL_HLCDC_LAYER_DSCR_IRQ BIT(3)
+#define ATMEL_HLCDC_LAYER_ADD_IRQ BIT(4)
+#define ATMEL_HLCDC_LAYER_DONE_IRQ BIT(5)
+#define ATMEL_HLCDC_LAYER_OVR_IRQ BIT(6)
+
+#define ATMEL_HLCDC_LAYER_PLANE_HEAD(n) (((n) * 0x10) + 0x1c)
+#define ATMEL_HLCDC_LAYER_PLANE_ADDR(n) (((n) * 0x10) + 0x20)
+#define ATMEL_HLCDC_LAYER_PLANE_CTRL(n) (((n) * 0x10) + 0x24)
+#define ATMEL_HLCDC_LAYER_PLANE_NEXT(n) (((n) * 0x10) + 0x28)
+#define ATMEL_HLCDC_LAYER_CFG(p, c) (((c) * 4) + ((p)->max_planes * 0x10) + 0x1c)
+
+#define ATMEL_HLCDC_LAYER_DMA_CFG_ID 0
+#define ATMEL_HLCDC_LAYER_DMA_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, ATMEL_HLCDC_LAYER_DMA_CFG_ID)
+#define ATMEL_HLCDC_LAYER_DMA_SIF BIT(0)
+#define ATMEL_HLCDC_LAYER_DMA_BLEN_MASK GENMASK(5, 4)
+#define ATMEL_HLCDC_LAYER_DMA_BLEN_SINGLE (0 << 4)
+#define ATMEL_HLCDC_LAYER_DMA_BLEN_INCR4 (1 << 4)
+#define ATMEL_HLCDC_LAYER_DMA_BLEN_INCR8 (2 << 4)
+#define ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16 (3 << 4)
+#define ATMEL_HLCDC_LAYER_DMA_DLBO BIT(8)
+#define ATMEL_HLCDC_LAYER_DMA_ROTDIS BIT(12)
+#define ATMEL_HLCDC_LAYER_DMA_LOCKDIS BIT(13)
+
+#define ATMEL_HLCDC_LAYER_FORMAT_CFG_ID 1
+#define ATMEL_HLCDC_LAYER_FORMAT_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, ATMEL_HLCDC_LAYER_FORMAT_CFG_ID)
+#define ATMEL_HLCDC_LAYER_RGB (0 << 0)
+#define ATMEL_HLCDC_LAYER_CLUT (1 << 0)
+#define ATMEL_HLCDC_LAYER_YUV (2 << 0)
+#define ATMEL_HLCDC_RGB_MODE(m) (((m) & 0xf) << 4)
+#define ATMEL_HLCDC_CLUT_MODE(m) (((m) & 0x3) << 8)
+#define ATMEL_HLCDC_YUV_MODE(m) (((m) & 0xf) << 12)
+#define ATMEL_HLCDC_YUV422ROT BIT(16)
+#define ATMEL_HLCDC_YUV422SWP BIT(17)
+#define ATMEL_HLCDC_DSCALEOPT BIT(20)
+
+#define ATMEL_HLCDC_XRGB4444_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(0))
+#define ATMEL_HLCDC_ARGB4444_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(1))
+#define ATMEL_HLCDC_RGBA4444_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(2))
+#define ATMEL_HLCDC_RGB565_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(3))
+#define ATMEL_HLCDC_ARGB1555_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(4))
+#define ATMEL_HLCDC_XRGB8888_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(9))
+#define ATMEL_HLCDC_RGB888_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(10))
+#define ATMEL_HLCDC_ARGB8888_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(12))
+#define ATMEL_HLCDC_RGBA8888_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(13))
+
+#define ATMEL_HLCDC_AYUV_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(0))
+#define ATMEL_HLCDC_YUYV_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(1))
+#define ATMEL_HLCDC_UYVY_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(2))
+#define ATMEL_HLCDC_YVYU_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(3))
+#define ATMEL_HLCDC_VYUY_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(4))
+#define ATMEL_HLCDC_NV61_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(5))
+#define ATMEL_HLCDC_YUV422_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(6))
+#define ATMEL_HLCDC_NV21_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(7))
+#define ATMEL_HLCDC_YUV420_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(8))
+
+#define ATMEL_HLCDC_LAYER_POS_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.pos)
+#define ATMEL_HLCDC_LAYER_SIZE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.size)
+#define ATMEL_HLCDC_LAYER_MEMSIZE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.memsize)
+#define ATMEL_HLCDC_LAYER_XSTRIDE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.xstride)
+#define ATMEL_HLCDC_LAYER_PSTRIDE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.pstride)
+#define ATMEL_HLCDC_LAYER_DFLTCOLOR_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.default_color)
+#define ATMEL_HLCDC_LAYER_CRKEY_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.chroma_key)
+#define ATMEL_HLCDC_LAYER_CRKEY_MASK_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.chroma_key_mask)
+
+#define ATMEL_HLCDC_LAYER_GENERAL_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.general_config)
+#define ATMEL_HLCDC_LAYER_CRKEY BIT(0)
+#define ATMEL_HLCDC_LAYER_INV BIT(1)
+#define ATMEL_HLCDC_LAYER_ITER2BL BIT(2)
+#define ATMEL_HLCDC_LAYER_ITER BIT(3)
+#define ATMEL_HLCDC_LAYER_REVALPHA BIT(4)
+#define ATMEL_HLCDC_LAYER_GAEN BIT(5)
+#define ATMEL_HLCDC_LAYER_LAEN BIT(6)
+#define ATMEL_HLCDC_LAYER_OVR BIT(7)
+#define ATMEL_HLCDC_LAYER_DMA BIT(8)
+#define ATMEL_HLCDC_LAYER_REP BIT(9)
+#define ATMEL_HLCDC_LAYER_DSTKEY BIT(10)
+#define ATMEL_HLCDC_LAYER_DISCEN BIT(11)
+#define ATMEL_HLCDC_LAYER_GA_SHIFT 16
+#define ATMEL_HLCDC_LAYER_GA_MASK GENMASK(23, ATMEL_HLCDC_LAYER_GA_SHIFT)
+
+#define ATMEL_HLCDC_LAYER_CSC_CFG(p, o) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.csc + o)
+
+#define ATMEL_HLCDC_LAYER_DISC_POS_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.disc_pos)
+
+#define ATMEL_HLCDC_LAYER_DISC_SIZE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.disc_size)
+
+#define ATMEL_HLCDC_MAX_PLANES 3
+
+#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED BIT(0)
+#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED BIT(1)
+#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE BIT(2)
+#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN BIT(3)
+
+/**
+ * Atmel HLCDC Layer registers layout structure
+ *
+ * Each HLCDC layer has its own register organization and a given register
+ * can be placed differently on 2 different layers depending on its
+ * capabilities.
+ * This structure stores common registers layout for a given layer and is
+ * used by HLCDC layer code to choose the appropriate register to write to
+ * or to read from.
+ *
+ * For all fields, a value of zero means "unsupported".
+ *
+ * See Atmel's datasheet for a detailled description of these registers.
+ *
+ * @xstride: xstride registers
+ * @pstride: pstride registers
+ * @pos: position register
+ * @size: displayed size register
+ * @memsize: memory size register
+ * @default_color: default color register
+ * @chroma_key: chroma key register
+ * @chroma_key_mask: chroma key mask register
+ * @general_config: general layer config register
+ * @disc_pos: discard area position register
+ * @disc_size: discard area size register
+ * @csc: color space conversion register
+ */
+struct atmel_hlcdc_layer_cfg_layout {
+ int xstride[ATMEL_HLCDC_MAX_PLANES];
+ int pstride[ATMEL_HLCDC_MAX_PLANES];
+ int pos;
+ int size;
+ int memsize;
+ int default_color;
+ int chroma_key;
+ int chroma_key_mask;
+ int general_config;
+ int disc_pos;
+ int disc_size;
+ int csc;
+};
+
+/**
+ * Atmel HLCDC framebuffer flip structure
+ *
+ * This structure is allocated when someone asked for a layer update (most
+ * likely a DRM plane update, either primary, overlay or cursor plane) and
+ * released when the layer do not need to reference the framebuffer object
+ * anymore (i.e. the layer was disabled or updated).
+ *
+ * @dscrs: DMA descriptors
+ * @fb: the referenced framebuffer object
+ * @ngems: number of GEM objects referenced by the fb element
+ * @status: fb flip operation status
+ */
+struct atmel_hlcdc_layer_fb_flip {
+ struct atmel_hlcdc_dma_channel_dscr *dscrs[ATMEL_HLCDC_MAX_PLANES];
+ struct drm_flip_task *task;
+ struct drm_framebuffer *fb;
+ int ngems;
+ u32 status;
+};
+
+/**
+ * Atmel HLCDC DMA descriptor structure
+ *
+ * This structure is used by the HLCDC DMA engine to schedule a DMA transfer.
+ *
+ * The structure fields must remain in this specific order, because they're
+ * used by the HLCDC DMA engine, which expect them in this order.
+ * HLCDC DMA descriptors must be aligned on 64 bits.
+ *
+ * @addr: buffer DMA address
+ * @ctrl: DMA transfer options
+ * @next: next DMA descriptor to fetch
+ * @gem_flip: the attached gem_flip operation
+ */
+struct atmel_hlcdc_dma_channel_dscr {
+ dma_addr_t addr;
+ u32 ctrl;
+ dma_addr_t next;
+ u32 status;
+} __aligned(sizeof(u64));
+
+/**
+ * Atmel HLCDC layer types
+ */
+enum atmel_hlcdc_layer_type {
+ ATMEL_HLCDC_BASE_LAYER,
+ ATMEL_HLCDC_OVERLAY_LAYER,
+ ATMEL_HLCDC_CURSOR_LAYER,
+ ATMEL_HLCDC_PP_LAYER,
+};
+
+/**
+ * Atmel HLCDC Supported formats structure
+ *
+ * This structure list all the formats supported by a given layer.
+ *
+ * @nformats: number of supported formats
+ * @formats: supported formats
+ */
+struct atmel_hlcdc_formats {
+ int nformats;
+ uint32_t *formats;
+};
+
+/**
+ * Atmel HLCDC Layer description structure
+ *
+ * This structure describe the capabilities provided by a given layer.
+ *
+ * @name: layer name
+ * @type: layer type
+ * @id: layer id
+ * @regs_offset: offset of the layer registers from the HLCDC registers base
+ * @nconfigs: number of config registers provided by this layer
+ * @formats: supported formats
+ * @layout: config registers layout
+ * @max_width: maximum width supported by this layer (0 means unlimited)
+ * @max_height: maximum height supported by this layer (0 means unlimited)
+ */
+struct atmel_hlcdc_layer_desc {
+ const char *name;
+ enum atmel_hlcdc_layer_type type;
+ int id;
+ int regs_offset;
+ int nconfigs;
+ struct atmel_hlcdc_formats *formats;
+ struct atmel_hlcdc_layer_cfg_layout layout;
+ int max_width;
+ int max_height;
+};
+
+/**
+ * Atmel HLCDC Layer Update Slot structure
+ *
+ * This structure stores layer update requests to be applied on next frame.
+ * This is the base structure behind the atomic layer update infrastructure.
+ *
+ * Atomic layer update provides a way to update all layer's parameters
+ * simultaneously. This is needed to avoid incompatible sequential updates
+ * like this one:
+ * 1) update layer format from RGB888 (1 plane/buffer) to YUV422
+ * (2 planes/buffers)
+ * 2) the format update is applied but the DMA channel for the second
+ * plane/buffer is not enabled
+ * 3) enable the DMA channel for the second plane
+ *
+ * @fb_flip: fb_flip object
+ * @updated_configs: bitmask used to record modified configs
+ * @configs: new config values
+ */
+struct atmel_hlcdc_layer_update_slot {
+ struct atmel_hlcdc_layer_fb_flip *fb_flip;
+ unsigned long *updated_configs;
+ u32 *configs;
+};
+
+/**
+ * Atmel HLCDC Layer Update structure
+ *
+ * This structure provides a way to queue layer update requests.
+ *
+ * At a given time there is at most:
+ * - one pending update request, which means the update request has been
+ * committed (or validated) and is waiting for the DMA channel(s) to be
+ * available
+ * - one request being prepared, which means someone started a layer update
+ * but has not committed it yet. There cannot be more than one started
+ * request, because the update lock is taken when starting a layer update
+ * and release when committing or rolling back the request.
+ *
+ * @slots: update slots. One is used for pending request and the other one
+ * for started update request
+ * @pending: the pending slot index or -1 if no request is pending
+ * @next: the started update slot index or -1 no update has been started
+ */
+struct atmel_hlcdc_layer_update {
+ struct atmel_hlcdc_layer_update_slot slots[2];
+ int pending;
+ int next;
+};
+
+enum atmel_hlcdc_layer_dma_channel_status {
+ ATMEL_HLCDC_LAYER_DISABLED,
+ ATMEL_HLCDC_LAYER_ENABLED,
+ ATMEL_HLCDC_LAYER_DISABLING,
+};
+
+/**
+ * Atmel HLCDC Layer DMA channel structure
+ *
+ * This structure stores information on the DMA channel associated to a
+ * given layer.
+ *
+ * @status: DMA channel status
+ * @cur: current framebuffer
+ * @queue: next framebuffer
+ * @dscrs: allocated DMA descriptors
+ */
+struct atmel_hlcdc_layer_dma_channel {
+ enum atmel_hlcdc_layer_dma_channel_status status;
+ struct atmel_hlcdc_layer_fb_flip *cur;
+ struct atmel_hlcdc_layer_fb_flip *queue;
+ struct atmel_hlcdc_dma_channel_dscr *dscrs;
+};
+
+/**
+ * Atmel HLCDC Layer structure
+ *
+ * This structure stores information on the layer instance.
+ *
+ * @desc: layer description
+ * @max_planes: maximum planes/buffers that can be associated with this layer.
+ * This depends on the supported formats.
+ * @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
+ * @dma: dma channel
+ * @gc: fb flip garbage collector
+ * @update: update handler
+ * @lock: layer lock
+ */
+struct atmel_hlcdc_layer {
+ const struct atmel_hlcdc_layer_desc *desc;
+ int max_planes;
+ struct atmel_hlcdc *hlcdc;
+ struct workqueue_struct *wq;
+ struct drm_flip_work gc;
+ struct atmel_hlcdc_layer_dma_channel dma;
+ struct atmel_hlcdc_layer_update update;
+ spinlock_t lock;
+};
+
+void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer);
+
+int atmel_hlcdc_layer_init(struct drm_device *dev,
+ struct atmel_hlcdc_layer *layer,
+ const struct atmel_hlcdc_layer_desc *desc);
+
+void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
+ struct atmel_hlcdc_layer *layer);
+
+int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer);
+
+int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer);
+
+void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
+ u32 mask, u32 val);
+
+void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
+ struct drm_framebuffer *fb,
+ unsigned int *offsets);
+
+void atmel_hlcdc_layer_update_set_finished(struct atmel_hlcdc_layer *layer,
+ void (*finished)(void *data),
+ void *finished_data);
+
+void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer);
+
+void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer);
+
+#endif /* DRM_ATMEL_HLCDC_LAYER_H */
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
new file mode 100644
index 000000000000..c402192362c5
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2014 Traphandler
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include "atmel_hlcdc_dc.h"
+
+/**
+ * Atmel HLCDC RGB output mode
+ */
+enum atmel_hlcdc_connector_rgb_mode {
+ ATMEL_HLCDC_CONNECTOR_RGB444,
+ ATMEL_HLCDC_CONNECTOR_RGB565,
+ ATMEL_HLCDC_CONNECTOR_RGB666,
+ ATMEL_HLCDC_CONNECTOR_RGB888,
+};
+
+/**
+ * Atmel HLCDC RGB connector structure
+ *
+ * This structure stores RGB slave device information.
+ *
+ * @connector: DRM connector
+ * @encoder: DRM encoder
+ * @dc: pointer to the atmel_hlcdc_dc structure
+ * @dpms: current DPMS mode
+ */
+struct atmel_hlcdc_rgb_output {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct atmel_hlcdc_dc *dc;
+ int dpms;
+};
+
+static inline struct atmel_hlcdc_rgb_output *
+drm_connector_to_atmel_hlcdc_rgb_output(struct drm_connector *connector)
+{
+ return container_of(connector, struct atmel_hlcdc_rgb_output,
+ connector);
+}
+
+static inline struct atmel_hlcdc_rgb_output *
+drm_encoder_to_atmel_hlcdc_rgb_output(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct atmel_hlcdc_rgb_output, encoder);
+}
+
+/**
+ * Atmel HLCDC Panel device structure
+ *
+ * This structure is specialization of the slave device structure to
+ * interface with drm panels.
+ *
+ * @base: base slave device fields
+ * @panel: drm panel attached to this slave device
+ */
+struct atmel_hlcdc_panel {
+ struct atmel_hlcdc_rgb_output base;
+ struct drm_panel *panel;
+};
+
+static inline struct atmel_hlcdc_panel *
+atmel_hlcdc_rgb_output_to_panel(struct atmel_hlcdc_rgb_output *output)
+{
+ return container_of(output, struct atmel_hlcdc_panel, base);
+}
+
+static void atmel_hlcdc_panel_encoder_dpms(struct drm_encoder *encoder,
+ int mode)
+{
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
+ struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
+
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == rgb->dpms)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ drm_panel_disable(panel->panel);
+ else
+ drm_panel_enable(panel->panel);
+
+ rgb->dpms = mode;
+}
+
+static bool
+atmel_hlcdc_panel_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+static void atmel_hlcdc_panel_encoder_prepare(struct drm_encoder *encoder)
+{
+ atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void atmel_hlcdc_panel_encoder_commit(struct drm_encoder *encoder)
+{
+ atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void
+atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
+ struct drm_display_info *info = &rgb->connector.display_info;
+ unsigned int cfg;
+
+ cfg = 0;
+
+ if (info->num_bus_formats) {
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
+ break;
+ default:
+ break;
+ }
+ }
+
+ regmap_update_bits(rgb->dc->hlcdc->regmap, ATMEL_HLCDC_CFG(5),
+ ATMEL_HLCDC_MODE_MASK,
+ cfg);
+}
+
+static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
+ .dpms = atmel_hlcdc_panel_encoder_dpms,
+ .mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
+ .prepare = atmel_hlcdc_panel_encoder_prepare,
+ .commit = atmel_hlcdc_panel_encoder_commit,
+ .mode_set = atmel_hlcdc_rgb_encoder_mode_set,
+};
+
+static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ memset(encoder, 0, sizeof(*encoder));
+}
+
+static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
+ .destroy = atmel_hlcdc_rgb_encoder_destroy,
+};
+
+static int atmel_hlcdc_panel_get_modes(struct drm_connector *connector)
+{
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_connector_to_atmel_hlcdc_rgb_output(connector);
+ struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
+
+ return panel->panel->funcs->get_modes(panel->panel);
+}
+
+static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_connector_to_atmel_hlcdc_rgb_output(connector);
+
+ return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
+}
+
+
+
+static struct drm_encoder *
+atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
+{
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_connector_to_atmel_hlcdc_rgb_output(connector);
+
+ return &rgb->encoder;
+}
+
+static struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
+ .get_modes = atmel_hlcdc_panel_get_modes,
+ .mode_valid = atmel_hlcdc_rgb_mode_valid,
+ .best_encoder = atmel_hlcdc_rgb_best_encoder,
+};
+
+static enum drm_connector_status
+atmel_hlcdc_panel_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void
+atmel_hlcdc_panel_connector_destroy(struct drm_connector *connector)
+{
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_connector_to_atmel_hlcdc_rgb_output(connector);
+ struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
+
+ drm_panel_detach(panel->panel);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = atmel_hlcdc_panel_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = atmel_hlcdc_panel_connector_destroy,
+};
+
+static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
+ struct of_endpoint *ep)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ struct device_node *np;
+ struct drm_panel *p = NULL;
+ struct atmel_hlcdc_panel *panel;
+ int ret;
+
+ np = of_graph_get_remote_port_parent(ep->local_node);
+ if (!np)
+ return -EINVAL;
+
+ p = of_drm_find_panel(np);
+ of_node_put(np);
+
+ if (!p)
+ return -EPROBE_DEFER;
+
+ panel = devm_kzalloc(dev->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -EINVAL;
+
+ panel->base.dpms = DRM_MODE_DPMS_OFF;
+
+ panel->base.dc = dc;
+
+ drm_encoder_helper_add(&panel->base.encoder,
+ &atmel_hlcdc_panel_encoder_helper_funcs);
+ ret = drm_encoder_init(dev, &panel->base.encoder,
+ &atmel_hlcdc_panel_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ if (ret)
+ return ret;
+
+ panel->base.connector.dpms = DRM_MODE_DPMS_OFF;
+ panel->base.connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+ drm_connector_helper_add(&panel->base.connector,
+ &atmel_hlcdc_panel_connector_helper_funcs);
+ ret = drm_connector_init(dev, &panel->base.connector,
+ &atmel_hlcdc_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret)
+ goto err_encoder_cleanup;
+
+ drm_mode_connector_attach_encoder(&panel->base.connector,
+ &panel->base.encoder);
+ panel->base.encoder.possible_crtcs = 0x1;
+
+ drm_panel_attach(p, &panel->base.connector);
+ panel->panel = p;
+
+ return 0;
+
+err_encoder_cleanup:
+ drm_encoder_cleanup(&panel->base.encoder);
+
+ return ret;
+}
+
+int atmel_hlcdc_create_outputs(struct drm_device *dev)
+{
+ struct device_node *port_np, *np;
+ struct of_endpoint ep;
+ int ret;
+
+ port_np = of_get_child_by_name(dev->dev->of_node, "port");
+ if (!port_np)
+ return -EINVAL;
+
+ np = of_get_child_by_name(port_np, "endpoint");
+ of_node_put(port_np);
+
+ if (!np)
+ return -EINVAL;
+
+ ret = of_graph_parse_endpoint(np, &ep);
+ of_node_put(port_np);
+
+ if (ret)
+ return ret;
+
+ /* We currently only support panel output */
+ return atmel_hlcdc_create_panel_output(dev, &ep);
+}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
new file mode 100644
index 000000000000..c5892dcfd745
--- /dev/null
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "atmel_hlcdc_dc.h"
+
+#define SUBPIXEL_MASK 0xffff
+
+static uint32_t rgb_formats[] = {
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+};
+
+struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats = {
+ .formats = rgb_formats,
+ .nformats = ARRAY_SIZE(rgb_formats),
+};
+
+static uint32_t rgb_and_yuv_formats[] = {
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_AYUV,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV420,
+};
+
+struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_and_yuv_formats = {
+ .formats = rgb_and_yuv_formats,
+ .nformats = ARRAY_SIZE(rgb_and_yuv_formats),
+};
+
+static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB4444:
+ *mode = ATMEL_HLCDC_XRGB4444_MODE;
+ break;
+ case DRM_FORMAT_ARGB4444:
+ *mode = ATMEL_HLCDC_ARGB4444_MODE;
+ break;
+ case DRM_FORMAT_RGBA4444:
+ *mode = ATMEL_HLCDC_RGBA4444_MODE;
+ break;
+ case DRM_FORMAT_RGB565:
+ *mode = ATMEL_HLCDC_RGB565_MODE;
+ break;
+ case DRM_FORMAT_RGB888:
+ *mode = ATMEL_HLCDC_RGB888_MODE;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ *mode = ATMEL_HLCDC_ARGB1555_MODE;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ *mode = ATMEL_HLCDC_XRGB8888_MODE;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ *mode = ATMEL_HLCDC_ARGB8888_MODE;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ *mode = ATMEL_HLCDC_RGBA8888_MODE;
+ break;
+ case DRM_FORMAT_AYUV:
+ *mode = ATMEL_HLCDC_AYUV_MODE;
+ break;
+ case DRM_FORMAT_YUYV:
+ *mode = ATMEL_HLCDC_YUYV_MODE;
+ break;
+ case DRM_FORMAT_UYVY:
+ *mode = ATMEL_HLCDC_UYVY_MODE;
+ break;
+ case DRM_FORMAT_YVYU:
+ *mode = ATMEL_HLCDC_YVYU_MODE;
+ break;
+ case DRM_FORMAT_VYUY:
+ *mode = ATMEL_HLCDC_VYUY_MODE;
+ break;
+ case DRM_FORMAT_NV21:
+ *mode = ATMEL_HLCDC_NV21_MODE;
+ break;
+ case DRM_FORMAT_NV61:
+ *mode = ATMEL_HLCDC_NV61_MODE;
+ break;
+ case DRM_FORMAT_YUV420:
+ *mode = ATMEL_HLCDC_YUV420_MODE;
+ break;
+ case DRM_FORMAT_YUV422:
+ *mode = ATMEL_HLCDC_YUV422_MODE;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool atmel_hlcdc_format_embedds_alpha(u32 format)
+{
+ int i;
+
+ for (i = 0; i < sizeof(format); i++) {
+ char tmp = (format >> (8 * i)) & 0xff;
+
+ if (tmp == 'A')
+ return true;
+ }
+
+ return false;
+}
+
+static u32 heo_downscaling_xcoef[] = {
+ 0x11343311,
+ 0x000000f7,
+ 0x1635300c,
+ 0x000000f9,
+ 0x1b362c08,
+ 0x000000fb,
+ 0x1f372804,
+ 0x000000fe,
+ 0x24382400,
+ 0x00000000,
+ 0x28371ffe,
+ 0x00000004,
+ 0x2c361bfb,
+ 0x00000008,
+ 0x303516f9,
+ 0x0000000c,
+};
+
+static u32 heo_downscaling_ycoef[] = {
+ 0x00123737,
+ 0x00173732,
+ 0x001b382d,
+ 0x001f3928,
+ 0x00243824,
+ 0x0028391f,
+ 0x002d381b,
+ 0x00323717,
+};
+
+static u32 heo_upscaling_xcoef[] = {
+ 0xf74949f7,
+ 0x00000000,
+ 0xf55f33fb,
+ 0x000000fe,
+ 0xf5701efe,
+ 0x000000ff,
+ 0xf87c0dff,
+ 0x00000000,
+ 0x00800000,
+ 0x00000000,
+ 0x0d7cf800,
+ 0x000000ff,
+ 0x1e70f5ff,
+ 0x000000fe,
+ 0x335ff5fe,
+ 0x000000fb,
+};
+
+static u32 heo_upscaling_ycoef[] = {
+ 0x00004040,
+ 0x00075920,
+ 0x00056f0c,
+ 0x00027b03,
+ 0x00008000,
+ 0x00037b02,
+ 0x000c6f05,
+ 0x00205907,
+};
+
+static void
+atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_update_req *req)
+{
+ const struct atmel_hlcdc_layer_cfg_layout *layout =
+ &plane->layer.desc->layout;
+
+ if (layout->size)
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->size,
+ 0xffffffff,
+ (req->crtc_w - 1) |
+ ((req->crtc_h - 1) << 16));
+
+ if (layout->memsize)
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->memsize,
+ 0xffffffff,
+ (req->src_w - 1) |
+ ((req->src_h - 1) << 16));
+
+ if (layout->pos)
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->pos,
+ 0xffffffff,
+ req->crtc_x |
+ (req->crtc_y << 16));
+
+ /* TODO: rework the rescaling part */
+ if (req->crtc_w != req->src_w || req->crtc_h != req->src_h) {
+ u32 factor_reg = 0;
+
+ if (req->crtc_w != req->src_w) {
+ int i;
+ u32 factor;
+ u32 *coeff_tab = heo_upscaling_xcoef;
+ u32 max_memsize;
+
+ if (req->crtc_w < req->src_w)
+ coeff_tab = heo_downscaling_xcoef;
+ for (i = 0; i < ARRAY_SIZE(heo_upscaling_xcoef); i++)
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ 17 + i,
+ 0xffffffff,
+ coeff_tab[i]);
+ factor = ((8 * 256 * req->src_w) - (256 * 4)) /
+ req->crtc_w;
+ factor++;
+ max_memsize = ((factor * req->crtc_w) + (256 * 4)) /
+ 2048;
+ if (max_memsize > req->src_w)
+ factor--;
+ factor_reg |= factor | 0x80000000;
+ }
+
+ if (req->crtc_h != req->src_h) {
+ int i;
+ u32 factor;
+ u32 *coeff_tab = heo_upscaling_ycoef;
+ u32 max_memsize;
+
+ if (req->crtc_w < req->src_w)
+ coeff_tab = heo_downscaling_ycoef;
+ for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ 33 + i,
+ 0xffffffff,
+ coeff_tab[i]);
+ factor = ((8 * 256 * req->src_w) - (256 * 4)) /
+ req->crtc_w;
+ factor++;
+ max_memsize = ((factor * req->crtc_w) + (256 * 4)) /
+ 2048;
+ if (max_memsize > req->src_w)
+ factor--;
+ factor_reg |= (factor << 16) | 0x80000000;
+ }
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
+ factor_reg);
+ }
+}
+
+static void
+atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_update_req *req)
+{
+ const struct atmel_hlcdc_layer_cfg_layout *layout =
+ &plane->layer.desc->layout;
+ unsigned int cfg = ATMEL_HLCDC_LAYER_DMA;
+
+ if (plane->base.type != DRM_PLANE_TYPE_PRIMARY) {
+ cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
+ ATMEL_HLCDC_LAYER_ITER;
+
+ if (atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format))
+ cfg |= ATMEL_HLCDC_LAYER_LAEN;
+ else
+ cfg |= ATMEL_HLCDC_LAYER_GAEN;
+ }
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ ATMEL_HLCDC_LAYER_DMA_CFG_ID,
+ ATMEL_HLCDC_LAYER_DMA_BLEN_MASK,
+ ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16);
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer, layout->general_config,
+ ATMEL_HLCDC_LAYER_ITER2BL |
+ ATMEL_HLCDC_LAYER_ITER |
+ ATMEL_HLCDC_LAYER_GAEN |
+ ATMEL_HLCDC_LAYER_LAEN |
+ ATMEL_HLCDC_LAYER_OVR |
+ ATMEL_HLCDC_LAYER_DMA, cfg);
+}
+
+static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_update_req *req)
+{
+ u32 cfg;
+ int ret;
+
+ ret = atmel_hlcdc_format_to_plane_mode(req->fb->pixel_format, &cfg);
+ if (ret)
+ return;
+
+ if ((req->fb->pixel_format == DRM_FORMAT_YUV422 ||
+ req->fb->pixel_format == DRM_FORMAT_NV61) &&
+ (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
+ cfg |= ATMEL_HLCDC_YUV422ROT;
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ ATMEL_HLCDC_LAYER_FORMAT_CFG_ID,
+ 0xffffffff,
+ cfg);
+
+ /*
+ * Rotation optimization is not working on RGB888 (rotation is still
+ * working but without any optimization).
+ */
+ if (req->fb->pixel_format == DRM_FORMAT_RGB888)
+ cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS;
+ else
+ cfg = 0;
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ ATMEL_HLCDC_LAYER_DMA_CFG_ID,
+ ATMEL_HLCDC_LAYER_DMA_ROTDIS, cfg);
+}
+
+static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_update_req *req)
+{
+ struct atmel_hlcdc_layer *layer = &plane->layer;
+ const struct atmel_hlcdc_layer_cfg_layout *layout =
+ &layer->desc->layout;
+ int i;
+
+ atmel_hlcdc_layer_update_set_fb(&plane->layer, req->fb, req->offsets);
+
+ for (i = 0; i < req->nplanes; i++) {
+ if (layout->xstride[i]) {
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->xstride[i],
+ 0xffffffff,
+ req->xstride[i]);
+ }
+
+ if (layout->pstride[i]) {
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->pstride[i],
+ 0xffffffff,
+ req->pstride[i]);
+ }
+ }
+}
+
+static int atmel_hlcdc_plane_check_update_req(struct drm_plane *p,
+ struct atmel_hlcdc_plane_update_req *req,
+ const struct drm_display_mode *mode)
+{
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ const struct atmel_hlcdc_layer_cfg_layout *layout =
+ &plane->layer.desc->layout;
+
+ if (!layout->size &&
+ (mode->hdisplay != req->crtc_w ||
+ mode->vdisplay != req->crtc_h))
+ return -EINVAL;
+
+ if (plane->layer.desc->max_height &&
+ req->crtc_h > plane->layer.desc->max_height)
+ return -EINVAL;
+
+ if (plane->layer.desc->max_width &&
+ req->crtc_w > plane->layer.desc->max_width)
+ return -EINVAL;
+
+ if ((req->crtc_h != req->src_h || req->crtc_w != req->src_w) &&
+ (!layout->memsize ||
+ atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format)))
+ return -EINVAL;
+
+ if (req->crtc_x < 0 || req->crtc_y < 0)
+ return -EINVAL;
+
+ if (req->crtc_w + req->crtc_x > mode->hdisplay ||
+ req->crtc_h + req->crtc_y > mode->vdisplay)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
+ struct atmel_hlcdc_plane_update_req *req,
+ const struct drm_display_mode *mode)
+{
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ unsigned int patched_crtc_w;
+ unsigned int patched_crtc_h;
+ unsigned int patched_src_w;
+ unsigned int patched_src_h;
+ unsigned int tmp;
+ int x_offset = 0;
+ int y_offset = 0;
+ int hsub = 1;
+ int vsub = 1;
+ int i;
+
+ if ((req->src_x | req->src_y | req->src_w | req->src_h) &
+ SUBPIXEL_MASK)
+ return -EINVAL;
+
+ req->src_x >>= 16;
+ req->src_y >>= 16;
+ req->src_w >>= 16;
+ req->src_h >>= 16;
+
+ req->nplanes = drm_format_num_planes(req->fb->pixel_format);
+ if (req->nplanes > ATMEL_HLCDC_MAX_PLANES)
+ return -EINVAL;
+
+ /*
+ * Swap width and size in case of 90 or 270 degrees rotation
+ */
+ if (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+ tmp = req->crtc_w;
+ req->crtc_w = req->crtc_h;
+ req->crtc_h = tmp;
+ tmp = req->src_w;
+ req->src_w = req->src_h;
+ req->src_h = tmp;
+ }
+
+ if (req->crtc_x + req->crtc_w > mode->hdisplay)
+ patched_crtc_w = mode->hdisplay - req->crtc_x;
+ else
+ patched_crtc_w = req->crtc_w;
+
+ if (req->crtc_x < 0) {
+ patched_crtc_w += req->crtc_x;
+ x_offset = -req->crtc_x;
+ req->crtc_x = 0;
+ }
+
+ if (req->crtc_y + req->crtc_h > mode->vdisplay)
+ patched_crtc_h = mode->vdisplay - req->crtc_y;
+ else
+ patched_crtc_h = req->crtc_h;
+
+ if (req->crtc_y < 0) {
+ patched_crtc_h += req->crtc_y;
+ y_offset = -req->crtc_y;
+ req->crtc_y = 0;
+ }
+
+ patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * req->src_w,
+ req->crtc_w);
+ patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * req->src_h,
+ req->crtc_h);
+
+ hsub = drm_format_horz_chroma_subsampling(req->fb->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(req->fb->pixel_format);
+
+ for (i = 0; i < req->nplanes; i++) {
+ unsigned int offset = 0;
+ int xdiv = i ? hsub : 1;
+ int ydiv = i ? vsub : 1;
+
+ req->bpp[i] = drm_format_plane_cpp(req->fb->pixel_format, i);
+ if (!req->bpp[i])
+ return -EINVAL;
+
+ switch (plane->rotation & 0xf) {
+ case BIT(DRM_ROTATE_90):
+ offset = ((y_offset + req->src_y + patched_src_w - 1) /
+ ydiv) * req->fb->pitches[i];
+ offset += ((x_offset + req->src_x) / xdiv) *
+ req->bpp[i];
+ req->xstride[i] = ((patched_src_w - 1) / ydiv) *
+ req->fb->pitches[i];
+ req->pstride[i] = -req->fb->pitches[i] - req->bpp[i];
+ break;
+ case BIT(DRM_ROTATE_180):
+ offset = ((y_offset + req->src_y + patched_src_h - 1) /
+ ydiv) * req->fb->pitches[i];
+ offset += ((x_offset + req->src_x + patched_src_w - 1) /
+ xdiv) * req->bpp[i];
+ req->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) *
+ req->bpp[i]) - req->fb->pitches[i];
+ req->pstride[i] = -2 * req->bpp[i];
+ break;
+ case BIT(DRM_ROTATE_270):
+ offset = ((y_offset + req->src_y) / ydiv) *
+ req->fb->pitches[i];
+ offset += ((x_offset + req->src_x + patched_src_h - 1) /
+ xdiv) * req->bpp[i];
+ req->xstride[i] = -(((patched_src_w - 1) / ydiv) *
+ req->fb->pitches[i]) -
+ (2 * req->bpp[i]);
+ req->pstride[i] = req->fb->pitches[i] - req->bpp[i];
+ break;
+ case BIT(DRM_ROTATE_0):
+ default:
+ offset = ((y_offset + req->src_y) / ydiv) *
+ req->fb->pitches[i];
+ offset += ((x_offset + req->src_x) / xdiv) *
+ req->bpp[i];
+ req->xstride[i] = req->fb->pitches[i] -
+ ((patched_src_w / xdiv) *
+ req->bpp[i]);
+ req->pstride[i] = 0;
+ break;
+ }
+
+ req->offsets[i] = offset + req->fb->offsets[i];
+ }
+
+ req->src_w = patched_src_w;
+ req->src_h = patched_src_h;
+ req->crtc_w = patched_crtc_w;
+ req->crtc_h = patched_crtc_h;
+
+ return atmel_hlcdc_plane_check_update_req(p, req, mode);
+}
+
+int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
+ struct atmel_hlcdc_plane_update_req *req)
+{
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ int ret;
+
+ ret = atmel_hlcdc_layer_update_start(&plane->layer);
+ if (ret)
+ return ret;
+
+ atmel_hlcdc_plane_update_pos_and_size(plane, req);
+ atmel_hlcdc_plane_update_general_settings(plane, req);
+ atmel_hlcdc_plane_update_format(plane, req);
+ atmel_hlcdc_plane_update_buffers(plane, req);
+
+ atmel_hlcdc_layer_update_commit(&plane->layer);
+
+ return 0;
+}
+
+int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w,
+ unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ const struct drm_display_mode *mode)
+{
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_update_req req;
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ req.crtc_x = crtc_x;
+ req.crtc_y = crtc_y;
+ req.crtc_w = crtc_w;
+ req.crtc_h = crtc_h;
+ req.src_x = src_x;
+ req.src_y = src_y;
+ req.src_w = src_w;
+ req.src_h = src_h;
+ req.fb = fb;
+
+ ret = atmel_hlcdc_plane_prepare_update_req(&plane->base, &req, mode);
+ if (ret)
+ return ret;
+
+ if (!req.crtc_h || !req.crtc_w)
+ return atmel_hlcdc_layer_disable(&plane->layer);
+
+ return atmel_hlcdc_plane_apply_update_req(&plane->base, &req);
+}
+
+static int atmel_hlcdc_plane_update(struct drm_plane *p,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ return atmel_hlcdc_plane_update_with_mode(p, crtc, fb, crtc_x, crtc_y,
+ crtc_w, crtc_h, src_x, src_y,
+ src_w, src_h, &crtc->hwmode);
+}
+
+static int atmel_hlcdc_plane_disable(struct drm_plane *p)
+{
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+
+ return atmel_hlcdc_layer_disable(&plane->layer);
+}
+
+static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
+{
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+
+ if (plane->base.fb)
+ drm_framebuffer_unreference(plane->base.fb);
+
+ atmel_hlcdc_layer_cleanup(p->dev, &plane->layer);
+
+ drm_plane_cleanup(p);
+ devm_kfree(p->dev->dev, plane);
+}
+
+static int atmel_hlcdc_plane_set_alpha(struct atmel_hlcdc_plane *plane,
+ u8 alpha)
+{
+ atmel_hlcdc_layer_update_start(&plane->layer);
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ plane->layer.desc->layout.general_config,
+ ATMEL_HLCDC_LAYER_GA_MASK,
+ alpha << ATMEL_HLCDC_LAYER_GA_SHIFT);
+ atmel_hlcdc_layer_update_commit(&plane->layer);
+
+ return 0;
+}
+
+static int atmel_hlcdc_plane_set_rotation(struct atmel_hlcdc_plane *plane,
+ unsigned int rotation)
+{
+ plane->rotation = rotation;
+
+ return 0;
+}
+
+static int atmel_hlcdc_plane_set_property(struct drm_plane *p,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_properties *props = plane->properties;
+
+ if (property == props->alpha)
+ atmel_hlcdc_plane_set_alpha(plane, value);
+ else if (property == props->rotation)
+ atmel_hlcdc_plane_set_rotation(plane, value);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
+ const struct atmel_hlcdc_layer_desc *desc,
+ struct atmel_hlcdc_plane_properties *props)
+{
+ struct regmap *regmap = plane->layer.hlcdc->regmap;
+
+ if (desc->type == ATMEL_HLCDC_OVERLAY_LAYER ||
+ desc->type == ATMEL_HLCDC_CURSOR_LAYER) {
+ drm_object_attach_property(&plane->base.base,
+ props->alpha, 255);
+
+ /* Set default alpha value */
+ regmap_update_bits(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_GENERAL_CFG(&plane->layer),
+ ATMEL_HLCDC_LAYER_GA_MASK,
+ ATMEL_HLCDC_LAYER_GA_MASK);
+ }
+
+ if (desc->layout.xstride && desc->layout.pstride)
+ drm_object_attach_property(&plane->base.base,
+ props->rotation,
+ BIT(DRM_ROTATE_0));
+
+ if (desc->layout.csc) {
+ /*
+ * TODO: decare a "yuv-to-rgb-conv-factors" property to let
+ * userspace modify these factors (using a BLOB property ?).
+ */
+ regmap_write(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 0),
+ 0x4c900091);
+ regmap_write(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 1),
+ 0x7a5f5090);
+ regmap_write(regmap,
+ desc->regs_offset +
+ ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 2),
+ 0x40040890);
+ }
+}
+
+static struct drm_plane_funcs layer_plane_funcs = {
+ .update_plane = atmel_hlcdc_plane_update,
+ .disable_plane = atmel_hlcdc_plane_disable,
+ .set_property = atmel_hlcdc_plane_set_property,
+ .destroy = atmel_hlcdc_plane_destroy,
+};
+
+static struct atmel_hlcdc_plane *
+atmel_hlcdc_plane_create(struct drm_device *dev,
+ const struct atmel_hlcdc_layer_desc *desc,
+ struct atmel_hlcdc_plane_properties *props)
+{
+ struct atmel_hlcdc_plane *plane;
+ enum drm_plane_type type;
+ int ret;
+
+ plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ ret = atmel_hlcdc_layer_init(dev, &plane->layer, desc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (desc->type == ATMEL_HLCDC_BASE_LAYER)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else if (desc->type == ATMEL_HLCDC_CURSOR_LAYER)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ ret = drm_universal_plane_init(dev, &plane->base, 0,
+ &layer_plane_funcs,
+ desc->formats->formats,
+ desc->formats->nformats, type);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Set default property values*/
+ atmel_hlcdc_plane_init_properties(plane, desc, props);
+
+ return plane;
+}
+
+static struct atmel_hlcdc_plane_properties *
+atmel_hlcdc_plane_create_properties(struct drm_device *dev)
+{
+ struct atmel_hlcdc_plane_properties *props;
+
+ props = devm_kzalloc(dev->dev, sizeof(*props), GFP_KERNEL);
+ if (!props)
+ return ERR_PTR(-ENOMEM);
+
+ props->alpha = drm_property_create_range(dev, 0, "alpha", 0, 255);
+ if (!props->alpha)
+ return ERR_PTR(-ENOMEM);
+
+ props->rotation = drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_90) |
+ BIT(DRM_ROTATE_180) |
+ BIT(DRM_ROTATE_270));
+ if (!props->rotation)
+ return ERR_PTR(-ENOMEM);
+
+ return props;
+}
+
+struct atmel_hlcdc_planes *
+atmel_hlcdc_create_planes(struct drm_device *dev)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ struct atmel_hlcdc_plane_properties *props;
+ struct atmel_hlcdc_planes *planes;
+ const struct atmel_hlcdc_layer_desc *descs = dc->desc->layers;
+ int nlayers = dc->desc->nlayers;
+ int i;
+
+ planes = devm_kzalloc(dev->dev, sizeof(*planes), GFP_KERNEL);
+ if (!planes)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nlayers; i++) {
+ if (descs[i].type == ATMEL_HLCDC_OVERLAY_LAYER)
+ planes->noverlays++;
+ }
+
+ if (planes->noverlays) {
+ planes->overlays = devm_kzalloc(dev->dev,
+ planes->noverlays *
+ sizeof(*planes->overlays),
+ GFP_KERNEL);
+ if (!planes->overlays)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ props = atmel_hlcdc_plane_create_properties(dev);
+ if (IS_ERR(props))
+ return ERR_CAST(props);
+
+ planes->noverlays = 0;
+ for (i = 0; i < nlayers; i++) {
+ struct atmel_hlcdc_plane *plane;
+
+ if (descs[i].type == ATMEL_HLCDC_PP_LAYER)
+ continue;
+
+ plane = atmel_hlcdc_plane_create(dev, &descs[i], props);
+ if (IS_ERR(plane))
+ return ERR_CAST(plane);
+
+ plane->properties = props;
+
+ switch (descs[i].type) {
+ case ATMEL_HLCDC_BASE_LAYER:
+ if (planes->primary)
+ return ERR_PTR(-EINVAL);
+ planes->primary = plane;
+ break;
+
+ case ATMEL_HLCDC_OVERLAY_LAYER:
+ planes->overlays[planes->noverlays++] = plane;
+ break;
+
+ case ATMEL_HLCDC_CURSOR_LAYER:
+ if (planes->cursor)
+ return ERR_PTR(-EINVAL);
+ planes->cursor = plane;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return planes;
+}
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 61dbf09dff5d..976d9798dc99 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -207,12 +207,22 @@ int bochs_fbdev_init(struct bochs_device *bochs)
if (ret)
return ret;
- drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+ ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+ if (ret)
+ goto fini;
+
drm_helper_disable_unused_functions(bochs->dev);
- drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+
+ ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+ if (ret)
+ goto fini;
bochs->fb.initialized = true;
return 0;
+
+fini:
+ drm_fb_helper_fini(&bochs->fb.helper);
+ return ret;
}
void bochs_fbdev_fini(struct bochs_device *bochs)
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 85f0f8cf1fb8..26bcd03a8cb6 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -18,10 +18,6 @@ MODULE_PARM_DESC(defy, "default y resolution");
/* ---------------------------------------------------------------------- */
-static void bochs_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
{
switch (mode) {
@@ -144,7 +140,6 @@ static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
.mode_set_base = bochs_crtc_mode_set_base,
.prepare = bochs_crtc_prepare,
.commit = bochs_crtc_commit,
- .load_lut = bochs_crtc_load_lut,
};
static void bochs_crtc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 884923f982d9..f38bbcdf929b 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -1,5 +1,13 @@
+config DRM_DW_HDMI
+ tristate
+ depends on DRM
+ select DRM_KMS_HELPER
+
config DRM_PTN3460
tristate "PTN3460 DP/LVDS bridge"
depends on DRM
+ depends on OF
select DRM_KMS_HELPER
+ select DRM_PANEL
---help---
+ ptn3460 eDP-LVDS bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index b4733e1fbd2e..d8a8cfd12fbb 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,3 +1,4 @@
ccflags-y := -Iinclude/drm
obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
+obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
diff --git a/drivers/gpu/drm/imx/imx-hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index ddc53e039530..cd6a70647e32 100644
--- a/drivers/gpu/drm/imx/imx-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -6,31 +6,26 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * SH-Mobile High-Definition Multimedia Interface (HDMI) driver
- * for SLISHDMI13T and SLIPHDMIT IP cores
+ * Designware High-Definition Multimedia Interface (HDMI) driver
*
* Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
-
-#include <linux/component.h>
+#include <linux/module.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/hdmi.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/of_device.h>
+#include <drm/drm_of.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
-#include <video/imx-ipu-v3.h>
+#include <drm/bridge/dw_hdmi.h>
-#include "imx-hdmi.h"
-#include "imx-drm.h"
+#include "dw_hdmi.h"
#define HDMI_EDID_LEN 512
@@ -54,11 +49,6 @@ enum hdmi_datamap {
YCbCr422_12B = 0x12,
};
-enum imx_hdmi_devtype {
- IMX6Q_HDMI,
- IMX6DL_HDMI,
-};
-
static const u16 csc_coeff_default[3][4] = {
{ 0x2000, 0x0000, 0x0000, 0x0000 },
{ 0x0000, 0x2000, 0x0000, 0x0000 },
@@ -111,16 +101,19 @@ struct hdmi_data_info {
struct hdmi_vmode video_mode;
};
-struct imx_hdmi {
+struct dw_hdmi {
struct drm_connector connector;
- struct drm_encoder encoder;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
- enum imx_hdmi_devtype dev_type;
+ enum dw_hdmi_devtype dev_type;
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
struct hdmi_data_info hdmi_data;
+ const struct dw_hdmi_plat_data *plat_data;
+
int vic;
u8 edid[HDMI_EDID_LEN];
@@ -135,26 +128,42 @@ struct imx_hdmi {
unsigned int sample_rate;
int ratio;
+
+ void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
+ u8 (*read)(struct dw_hdmi *hdmi, int offset);
};
-static void imx_hdmi_set_ipu_di_mux(struct imx_hdmi *hdmi, int ipu_di)
+static void dw_hdmi_writel(struct dw_hdmi *hdmi, u8 val, int offset)
{
- regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
- IMX6Q_GPR3_HDMI_MUX_CTL_MASK,
- ipu_di << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
+ writel(val, hdmi->regs + (offset << 2));
}
-static inline void hdmi_writeb(struct imx_hdmi *hdmi, u8 val, int offset)
+static u8 dw_hdmi_readl(struct dw_hdmi *hdmi, int offset)
+{
+ return readl(hdmi->regs + (offset << 2));
+}
+
+static void dw_hdmi_writeb(struct dw_hdmi *hdmi, u8 val, int offset)
{
writeb(val, hdmi->regs + offset);
}
-static inline u8 hdmi_readb(struct imx_hdmi *hdmi, int offset)
+static u8 dw_hdmi_readb(struct dw_hdmi *hdmi, int offset)
{
return readb(hdmi->regs + offset);
}
-static void hdmi_modb(struct imx_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
+static inline void hdmi_writeb(struct dw_hdmi *hdmi, u8 val, int offset)
+{
+ hdmi->write(hdmi, val, offset);
+}
+
+static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset)
+{
+ return hdmi->read(hdmi, offset);
+}
+
+static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
{
u8 val = hdmi_readb(hdmi, reg) & ~mask;
@@ -162,13 +171,13 @@ static void hdmi_modb(struct imx_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
hdmi_writeb(hdmi, val, reg);
}
-static void hdmi_mask_writeb(struct imx_hdmi *hdmi, u8 data, unsigned int reg,
- u8 shift, u8 mask)
+static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
+ u8 shift, u8 mask)
{
hdmi_modb(hdmi, data << shift, mask, reg);
}
-static void hdmi_set_clock_regenerator_n(struct imx_hdmi *hdmi,
+static void hdmi_set_clock_regenerator_n(struct dw_hdmi *hdmi,
unsigned int value)
{
hdmi_writeb(hdmi, value & 0xff, HDMI_AUD_N1);
@@ -179,7 +188,7 @@ static void hdmi_set_clock_regenerator_n(struct imx_hdmi *hdmi,
hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_N_SHIFT_MASK, HDMI_AUD_CTS3);
}
-static void hdmi_regenerate_cts(struct imx_hdmi *hdmi, unsigned int cts)
+static void hdmi_regenerate_cts(struct dw_hdmi *hdmi, unsigned int cts)
{
/* Must be set/cleared first */
hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
@@ -326,8 +335,8 @@ static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
return (cts * ratio) / 100;
}
-static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi,
- unsigned long pixel_clk)
+static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
+ unsigned long pixel_clk)
{
unsigned int clk_n, clk_cts;
@@ -338,7 +347,7 @@ static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi,
if (!clk_cts) {
dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
- __func__, pixel_clk);
+ __func__, pixel_clk);
return;
}
@@ -350,12 +359,12 @@ static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi,
hdmi_regenerate_cts(hdmi, clk_cts);
}
-static void hdmi_init_clk_regenerator(struct imx_hdmi *hdmi)
+static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
{
hdmi_set_clk_regenerator(hdmi, 74250000);
}
-static void hdmi_clk_regenerator_update_pixel_clock(struct imx_hdmi *hdmi)
+static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
{
hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
}
@@ -367,7 +376,7 @@ static void hdmi_clk_regenerator_update_pixel_clock(struct imx_hdmi *hdmi)
* pin{31~24} <==> G[7:0]
* pin{15~8} <==> B[7:0]
*/
-static void hdmi_video_sample(struct imx_hdmi *hdmi)
+static void hdmi_video_sample(struct dw_hdmi *hdmi)
{
int color_format = 0;
u8 val;
@@ -423,12 +432,12 @@ static void hdmi_video_sample(struct imx_hdmi *hdmi)
hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA1);
}
-static int is_color_space_conversion(struct imx_hdmi *hdmi)
+static int is_color_space_conversion(struct dw_hdmi *hdmi)
{
return hdmi->hdmi_data.enc_in_format != hdmi->hdmi_data.enc_out_format;
}
-static int is_color_space_decimation(struct imx_hdmi *hdmi)
+static int is_color_space_decimation(struct dw_hdmi *hdmi)
{
if (hdmi->hdmi_data.enc_out_format != YCBCR422_8BITS)
return 0;
@@ -438,7 +447,7 @@ static int is_color_space_decimation(struct imx_hdmi *hdmi)
return 0;
}
-static int is_color_space_interpolation(struct imx_hdmi *hdmi)
+static int is_color_space_interpolation(struct dw_hdmi *hdmi)
{
if (hdmi->hdmi_data.enc_in_format != YCBCR422_8BITS)
return 0;
@@ -448,7 +457,7 @@ static int is_color_space_interpolation(struct imx_hdmi *hdmi)
return 0;
}
-static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
+static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
{
const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
unsigned i;
@@ -477,13 +486,11 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
u16 coeff_b = (*csc_coeff)[1][i];
u16 coeff_c = (*csc_coeff)[2][i];
- hdmi_writeb(hdmi, coeff_a & 0xff,
- HDMI_CSC_COEF_A1_LSB + i * 2);
+ hdmi_writeb(hdmi, coeff_a & 0xff, HDMI_CSC_COEF_A1_LSB + i * 2);
hdmi_writeb(hdmi, coeff_a >> 8, HDMI_CSC_COEF_A1_MSB + i * 2);
hdmi_writeb(hdmi, coeff_b & 0xff, HDMI_CSC_COEF_B1_LSB + i * 2);
hdmi_writeb(hdmi, coeff_b >> 8, HDMI_CSC_COEF_B1_MSB + i * 2);
- hdmi_writeb(hdmi, coeff_c & 0xff,
- HDMI_CSC_COEF_C1_LSB + i * 2);
+ hdmi_writeb(hdmi, coeff_c & 0xff, HDMI_CSC_COEF_C1_LSB + i * 2);
hdmi_writeb(hdmi, coeff_c >> 8, HDMI_CSC_COEF_C1_MSB + i * 2);
}
@@ -491,7 +498,7 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
HDMI_CSC_SCALE);
}
-static void hdmi_video_csc(struct imx_hdmi *hdmi)
+static void hdmi_video_csc(struct dw_hdmi *hdmi)
{
int color_depth = 0;
int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
@@ -519,7 +526,7 @@ static void hdmi_video_csc(struct imx_hdmi *hdmi)
hdmi_modb(hdmi, color_depth, HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK,
HDMI_CSC_SCALE);
- imx_hdmi_update_csc_coeffs(hdmi);
+ dw_hdmi_update_csc_coeffs(hdmi);
}
/*
@@ -527,7 +534,7 @@ static void hdmi_video_csc(struct imx_hdmi *hdmi)
* for example, if input is YCC422 mode or repeater is used,
* data should be repacked this module can be bypassed.
*/
-static void hdmi_video_packetize(struct imx_hdmi *hdmi)
+static void hdmi_video_packetize(struct dw_hdmi *hdmi)
{
unsigned int color_depth = 0;
unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
@@ -535,21 +542,22 @@ static void hdmi_video_packetize(struct imx_hdmi *hdmi)
struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
u8 val, vp_conf;
- if (hdmi_data->enc_out_format == RGB
- || hdmi_data->enc_out_format == YCBCR444) {
- if (!hdmi_data->enc_color_depth)
+ if (hdmi_data->enc_out_format == RGB ||
+ hdmi_data->enc_out_format == YCBCR444) {
+ if (!hdmi_data->enc_color_depth) {
output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
- else if (hdmi_data->enc_color_depth == 8) {
+ } else if (hdmi_data->enc_color_depth == 8) {
color_depth = 4;
output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
- } else if (hdmi_data->enc_color_depth == 10)
+ } else if (hdmi_data->enc_color_depth == 10) {
color_depth = 5;
- else if (hdmi_data->enc_color_depth == 12)
+ } else if (hdmi_data->enc_color_depth == 12) {
color_depth = 6;
- else if (hdmi_data->enc_color_depth == 16)
+ } else if (hdmi_data->enc_color_depth == 16) {
color_depth = 7;
- else
+ } else {
return;
+ }
} else if (hdmi_data->enc_out_format == YCBCR422_8BITS) {
if (!hdmi_data->enc_color_depth ||
hdmi_data->enc_color_depth == 8)
@@ -561,8 +569,9 @@ static void hdmi_video_packetize(struct imx_hdmi *hdmi)
else
return;
output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
- } else
+ } else {
return;
+ }
/* set the packetizer registers */
val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
@@ -622,182 +631,132 @@ static void hdmi_video_packetize(struct imx_hdmi *hdmi)
HDMI_VP_CONF);
}
-static inline void hdmi_phy_test_clear(struct imx_hdmi *hdmi,
- unsigned char bit)
+static inline void hdmi_phy_test_clear(struct dw_hdmi *hdmi,
+ unsigned char bit)
{
hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTCLR_OFFSET,
HDMI_PHY_TST0_TSTCLR_MASK, HDMI_PHY_TST0);
}
-static inline void hdmi_phy_test_enable(struct imx_hdmi *hdmi,
- unsigned char bit)
+static inline void hdmi_phy_test_enable(struct dw_hdmi *hdmi,
+ unsigned char bit)
{
hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTEN_OFFSET,
HDMI_PHY_TST0_TSTEN_MASK, HDMI_PHY_TST0);
}
-static inline void hdmi_phy_test_clock(struct imx_hdmi *hdmi,
- unsigned char bit)
+static inline void hdmi_phy_test_clock(struct dw_hdmi *hdmi,
+ unsigned char bit)
{
hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTCLK_OFFSET,
HDMI_PHY_TST0_TSTCLK_MASK, HDMI_PHY_TST0);
}
-static inline void hdmi_phy_test_din(struct imx_hdmi *hdmi,
- unsigned char bit)
+static inline void hdmi_phy_test_din(struct dw_hdmi *hdmi,
+ unsigned char bit)
{
hdmi_writeb(hdmi, bit, HDMI_PHY_TST1);
}
-static inline void hdmi_phy_test_dout(struct imx_hdmi *hdmi,
- unsigned char bit)
+static inline void hdmi_phy_test_dout(struct dw_hdmi *hdmi,
+ unsigned char bit)
{
hdmi_writeb(hdmi, bit, HDMI_PHY_TST2);
}
-static bool hdmi_phy_wait_i2c_done(struct imx_hdmi *hdmi, int msec)
+static bool hdmi_phy_wait_i2c_done(struct dw_hdmi *hdmi, int msec)
{
- while ((hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3) == 0) {
+ u32 val;
+
+ while ((val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3) == 0) {
if (msec-- == 0)
return false;
udelay(1000);
}
+ hdmi_writeb(hdmi, val, HDMI_IH_I2CMPHY_STAT0);
+
return true;
}
-static void __hdmi_phy_i2c_write(struct imx_hdmi *hdmi, unsigned short data,
- unsigned char addr)
+static void __hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
+ unsigned char addr)
{
hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0);
hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
hdmi_writeb(hdmi, (unsigned char)(data >> 8),
- HDMI_PHY_I2CM_DATAO_1_ADDR);
+ HDMI_PHY_I2CM_DATAO_1_ADDR);
hdmi_writeb(hdmi, (unsigned char)(data >> 0),
- HDMI_PHY_I2CM_DATAO_0_ADDR);
+ HDMI_PHY_I2CM_DATAO_0_ADDR);
hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_WRITE,
- HDMI_PHY_I2CM_OPERATION_ADDR);
+ HDMI_PHY_I2CM_OPERATION_ADDR);
hdmi_phy_wait_i2c_done(hdmi, 1000);
}
-static int hdmi_phy_i2c_write(struct imx_hdmi *hdmi, unsigned short data,
- unsigned char addr)
+static int hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
+ unsigned char addr)
{
__hdmi_phy_i2c_write(hdmi, data, addr);
return 0;
}
-static void imx_hdmi_phy_enable_power(struct imx_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_enable_power(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_PDZ_OFFSET,
HDMI_PHY_CONF0_PDZ_MASK);
}
-static void imx_hdmi_phy_enable_tmds(struct imx_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_enable_tmds(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_ENTMDS_OFFSET,
HDMI_PHY_CONF0_ENTMDS_MASK);
}
-static void imx_hdmi_phy_gen2_pddq(struct imx_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_enable_spare(struct dw_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_SPARECTRL_OFFSET,
+ HDMI_PHY_CONF0_SPARECTRL_MASK);
+}
+
+static void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET,
HDMI_PHY_CONF0_GEN2_PDDQ_MASK);
}
-static void imx_hdmi_phy_gen2_txpwron(struct imx_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET,
HDMI_PHY_CONF0_GEN2_TXPWRON_MASK);
}
-static void imx_hdmi_phy_sel_data_en_pol(struct imx_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_sel_data_en_pol(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_SELDATAENPOL_OFFSET,
HDMI_PHY_CONF0_SELDATAENPOL_MASK);
}
-static void imx_hdmi_phy_sel_interface_control(struct imx_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_SELDIPIF_OFFSET,
HDMI_PHY_CONF0_SELDIPIF_MASK);
}
-enum {
- RES_8,
- RES_10,
- RES_12,
- RES_MAX,
-};
-
-struct mpll_config {
- unsigned long mpixelclock;
- struct {
- u16 cpce;
- u16 gmp;
- } res[RES_MAX];
-};
-
-static const struct mpll_config mpll_config[] = {
- {
- 45250000, {
- { 0x01e0, 0x0000 },
- { 0x21e1, 0x0000 },
- { 0x41e2, 0x0000 }
- },
- }, {
- 92500000, {
- { 0x0140, 0x0005 },
- { 0x2141, 0x0005 },
- { 0x4142, 0x0005 },
- },
- }, {
- 148500000, {
- { 0x00a0, 0x000a },
- { 0x20a1, 0x000a },
- { 0x40a2, 0x000a },
- },
- }, {
- ~0UL, {
- { 0x00a0, 0x000a },
- { 0x2001, 0x000f },
- { 0x4002, 0x000f },
- },
- }
-};
-
-struct curr_ctrl {
- unsigned long mpixelclock;
- u16 curr[RES_MAX];
-};
-
-static const struct curr_ctrl curr_ctrl[] = {
- /* pixelclk bpp8 bpp10 bpp12 */
- {
- 54000000, { 0x091c, 0x091c, 0x06dc },
- }, {
- 58400000, { 0x091c, 0x06dc, 0x06dc },
- }, {
- 72000000, { 0x06dc, 0x06dc, 0x091c },
- }, {
- 74250000, { 0x06dc, 0x0b5c, 0x091c },
- }, {
- 118800000, { 0x091c, 0x091c, 0x06dc },
- }, {
- 216000000, { 0x06dc, 0x0b5c, 0x091c },
- }
-};
-
-static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
+static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
unsigned char res, int cscon)
{
unsigned res_idx, i;
u8 val, msec;
+ const struct dw_hdmi_mpll_config *mpll_config =
+ hdmi->plat_data->mpll_cfg;
+ const struct dw_hdmi_curr_ctrl *curr_ctrl = hdmi->plat_data->cur_ctr;
+ const struct dw_hdmi_sym_term *sym_term = hdmi->plat_data->sym_term;
if (prep)
return -EINVAL;
@@ -805,13 +764,13 @@ static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
switch (res) {
case 0: /* color resolution 0 is 8 bit colour depth */
case 8:
- res_idx = RES_8;
+ res_idx = DW_HDMI_RES_8;
break;
case 10:
- res_idx = RES_10;
+ res_idx = DW_HDMI_RES_10;
break;
case 12:
- res_idx = RES_12;
+ res_idx = DW_HDMI_RES_12;
break;
default:
return -EINVAL;
@@ -826,10 +785,10 @@ static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
hdmi_writeb(hdmi, val, HDMI_MC_FLOWCTRL);
/* gen2 tx power off */
- imx_hdmi_phy_gen2_txpwron(hdmi, 0);
+ dw_hdmi_phy_gen2_txpwron(hdmi, 0);
/* gen2 pddq */
- imx_hdmi_phy_gen2_pddq(hdmi, 1);
+ dw_hdmi_phy_gen2_pddq(hdmi, 1);
/* PHY reset */
hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
@@ -839,11 +798,11 @@ static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
hdmi_phy_test_clear(hdmi, 1);
hdmi_writeb(hdmi, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2,
- HDMI_PHY_I2CM_SLAVE_ADDR);
+ HDMI_PHY_I2CM_SLAVE_ADDR);
hdmi_phy_test_clear(hdmi, 0);
/* PLL/MPLL Cfg - always match on final entry */
- for (i = 0; i < ARRAY_SIZE(mpll_config) - 1; i++)
+ for (i = 0; mpll_config[i].mpixelclock != (~0UL); i++)
if (hdmi->hdmi_data.video_mode.mpixelclock <=
mpll_config[i].mpixelclock)
break;
@@ -851,15 +810,14 @@ static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].cpce, 0x06);
hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].gmp, 0x15);
- for (i = 0; i < ARRAY_SIZE(curr_ctrl); i++)
+ for (i = 0; curr_ctrl[i].mpixelclock != (~0UL); i++)
if (hdmi->hdmi_data.video_mode.mpixelclock <=
curr_ctrl[i].mpixelclock)
break;
- if (i >= ARRAY_SIZE(curr_ctrl)) {
- dev_err(hdmi->dev,
- "Pixel clock %d - unsupported by HDMI\n",
- hdmi->hdmi_data.video_mode.mpixelclock);
+ if (curr_ctrl[i].mpixelclock == (~0UL)) {
+ dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
+ hdmi->hdmi_data.video_mode.mpixelclock);
return -EINVAL;
}
@@ -868,24 +826,34 @@ static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
+
+ for (i = 0; sym_term[i].mpixelclock != (~0UL); i++)
+ if (hdmi->hdmi_data.video_mode.mpixelclock <=
+ sym_term[i].mpixelclock)
+ break;
+
/* RESISTANCE TERM 133Ohm Cfg */
- hdmi_phy_i2c_write(hdmi, 0x0005, 0x19); /* TXTERM */
+ hdmi_phy_i2c_write(hdmi, sym_term[i].term, 0x19); /* TXTERM */
/* PREEMP Cgf 0.00 */
- hdmi_phy_i2c_write(hdmi, 0x800d, 0x09); /* CKSYMTXCTRL */
+ hdmi_phy_i2c_write(hdmi, sym_term[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
+
/* TX/CK LVL 10 */
hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
/* REMOVE CLK TERM */
hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
- imx_hdmi_phy_enable_power(hdmi, 1);
+ dw_hdmi_phy_enable_power(hdmi, 1);
/* toggle TMDS enable */
- imx_hdmi_phy_enable_tmds(hdmi, 0);
- imx_hdmi_phy_enable_tmds(hdmi, 1);
+ dw_hdmi_phy_enable_tmds(hdmi, 0);
+ dw_hdmi_phy_enable_tmds(hdmi, 1);
/* gen2 tx power on */
- imx_hdmi_phy_gen2_txpwron(hdmi, 1);
- imx_hdmi_phy_gen2_pddq(hdmi, 0);
+ dw_hdmi_phy_gen2_txpwron(hdmi, 1);
+ dw_hdmi_phy_gen2_pddq(hdmi, 0);
+
+ if (hdmi->dev_type == RK3288_HDMI)
+ dw_hdmi_phy_enable_spare(hdmi, 1);
/*Wait for PHY PLL lock */
msec = 5;
@@ -906,7 +874,7 @@ static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
return 0;
}
-static int imx_hdmi_phy_init(struct imx_hdmi *hdmi)
+static int dw_hdmi_phy_init(struct dw_hdmi *hdmi)
{
int i, ret;
bool cscon = false;
@@ -917,10 +885,10 @@ static int imx_hdmi_phy_init(struct imx_hdmi *hdmi)
/* HDMI Phy spec says to do the phy initialization sequence twice */
for (i = 0; i < 2; i++) {
- imx_hdmi_phy_sel_data_en_pol(hdmi, 1);
- imx_hdmi_phy_sel_interface_control(hdmi, 0);
- imx_hdmi_phy_enable_tmds(hdmi, 0);
- imx_hdmi_phy_enable_power(hdmi, 0);
+ dw_hdmi_phy_sel_data_en_pol(hdmi, 1);
+ dw_hdmi_phy_sel_interface_control(hdmi, 0);
+ dw_hdmi_phy_enable_tmds(hdmi, 0);
+ dw_hdmi_phy_enable_power(hdmi, 0);
/* Enable CSC */
ret = hdmi_phy_configure(hdmi, 0, 8, cscon);
@@ -932,7 +900,7 @@ static int imx_hdmi_phy_init(struct imx_hdmi *hdmi)
return 0;
}
-static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi)
+static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi)
{
u8 de;
@@ -951,7 +919,7 @@ static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi)
HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1);
}
-static void hdmi_config_AVI(struct imx_hdmi *hdmi)
+static void hdmi_config_AVI(struct dw_hdmi *hdmi)
{
u8 val, pix_fmt, under_scan;
u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
@@ -1045,7 +1013,7 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi)
hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1);
}
-static void hdmi_av_composer(struct imx_hdmi *hdmi,
+static void hdmi_av_composer(struct dw_hdmi *hdmi,
const struct drm_display_mode *mode)
{
u8 inv_val;
@@ -1129,19 +1097,19 @@ static void hdmi_av_composer(struct imx_hdmi *hdmi,
hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH);
}
-static void imx_hdmi_phy_disable(struct imx_hdmi *hdmi)
+static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi)
{
if (!hdmi->phy_enabled)
return;
- imx_hdmi_phy_enable_tmds(hdmi, 0);
- imx_hdmi_phy_enable_power(hdmi, 0);
+ dw_hdmi_phy_enable_tmds(hdmi, 0);
+ dw_hdmi_phy_enable_power(hdmi, 0);
hdmi->phy_enabled = false;
}
/* HDMI Initialization Step B.4 */
-static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi)
+static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
{
u8 clkdis;
@@ -1170,13 +1138,13 @@ static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi)
}
}
-static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi)
+static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi)
{
hdmi_modb(hdmi, 0, HDMI_MC_CLKDIS_AUDCLK_DISABLE, HDMI_MC_CLKDIS);
}
/* Workaround to clear the overflow condition */
-static void imx_hdmi_clear_overflow(struct imx_hdmi *hdmi)
+static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
{
int count;
u8 val;
@@ -1194,19 +1162,19 @@ static void imx_hdmi_clear_overflow(struct imx_hdmi *hdmi)
hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
}
-static void hdmi_enable_overflow_interrupts(struct imx_hdmi *hdmi)
+static void hdmi_enable_overflow_interrupts(struct dw_hdmi *hdmi)
{
hdmi_writeb(hdmi, 0, HDMI_FC_MASK2);
hdmi_writeb(hdmi, 0, HDMI_IH_MUTE_FC_STAT2);
}
-static void hdmi_disable_overflow_interrupts(struct imx_hdmi *hdmi)
+static void hdmi_disable_overflow_interrupts(struct dw_hdmi *hdmi)
{
hdmi_writeb(hdmi, HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
HDMI_IH_MUTE_FC_STAT2);
}
-static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
+static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
{
int ret;
@@ -1223,21 +1191,21 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
}
if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
- (hdmi->vic == 21) || (hdmi->vic == 22) ||
- (hdmi->vic == 2) || (hdmi->vic == 3) ||
- (hdmi->vic == 17) || (hdmi->vic == 18))
+ (hdmi->vic == 21) || (hdmi->vic == 22) ||
+ (hdmi->vic == 2) || (hdmi->vic == 3) ||
+ (hdmi->vic == 17) || (hdmi->vic == 18))
hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
else
hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
- (hdmi->vic == 12) || (hdmi->vic == 13) ||
- (hdmi->vic == 14) || (hdmi->vic == 15) ||
- (hdmi->vic == 25) || (hdmi->vic == 26) ||
- (hdmi->vic == 27) || (hdmi->vic == 28) ||
- (hdmi->vic == 29) || (hdmi->vic == 30) ||
- (hdmi->vic == 35) || (hdmi->vic == 36) ||
- (hdmi->vic == 37) || (hdmi->vic == 38))
+ (hdmi->vic == 12) || (hdmi->vic == 13) ||
+ (hdmi->vic == 14) || (hdmi->vic == 15) ||
+ (hdmi->vic == 25) || (hdmi->vic == 26) ||
+ (hdmi->vic == 27) || (hdmi->vic == 28) ||
+ (hdmi->vic == 29) || (hdmi->vic == 30) ||
+ (hdmi->vic == 35) || (hdmi->vic == 36) ||
+ (hdmi->vic == 37) || (hdmi->vic == 38))
hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1;
else
hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
@@ -1258,17 +1226,17 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
hdmi_av_composer(hdmi, mode);
/* HDMI Initializateion Step B.2 */
- ret = imx_hdmi_phy_init(hdmi);
+ ret = dw_hdmi_phy_init(hdmi);
if (ret)
return ret;
/* HDMI Initialization Step B.3 */
- imx_hdmi_enable_video_path(hdmi);
+ dw_hdmi_enable_video_path(hdmi);
/* not for DVI mode */
- if (hdmi->hdmi_data.video_mode.mdvi)
+ if (hdmi->hdmi_data.video_mode.mdvi) {
dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
- else {
+ } else {
dev_dbg(hdmi->dev, "%s CEA mode\n", __func__);
/* HDMI Initialization Step E - Configure audio */
@@ -1284,7 +1252,7 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
hdmi_video_sample(hdmi);
hdmi_tx_hdcp_config(hdmi);
- imx_hdmi_clear_overflow(hdmi);
+ dw_hdmi_clear_overflow(hdmi);
if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi)
hdmi_enable_overflow_interrupts(hdmi);
@@ -1292,7 +1260,7 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
}
/* Wait until we are registered to enable interrupts */
-static int imx_hdmi_fb_registered(struct imx_hdmi *hdmi)
+static int dw_hdmi_fb_registered(struct dw_hdmi *hdmi)
{
hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
HDMI_PHY_I2CM_INT_ADDR);
@@ -1310,7 +1278,7 @@ static int imx_hdmi_fb_registered(struct imx_hdmi *hdmi)
return 0;
}
-static void initialize_hdmi_ih_mutes(struct imx_hdmi *hdmi)
+static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
{
u8 ih_mute;
@@ -1362,29 +1330,67 @@ static void initialize_hdmi_ih_mutes(struct imx_hdmi *hdmi)
hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE);
}
-static void imx_hdmi_poweron(struct imx_hdmi *hdmi)
+static void dw_hdmi_poweron(struct dw_hdmi *hdmi)
+{
+ dw_hdmi_setup(hdmi, &hdmi->previous_mode);
+}
+
+static void dw_hdmi_poweroff(struct dw_hdmi *hdmi)
{
- imx_hdmi_setup(hdmi, &hdmi->previous_mode);
+ dw_hdmi_phy_disable(hdmi);
}
-static void imx_hdmi_poweroff(struct imx_hdmi *hdmi)
+static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
{
- imx_hdmi_phy_disable(hdmi);
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ dw_hdmi_setup(hdmi, mode);
+
+ /* Store the display mode for plugin/DKMS poweron events */
+ memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
}
-static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector
- *connector, bool force)
+static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
+ return true;
+}
+
+static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ dw_hdmi_poweroff(hdmi);
+}
+
+static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ dw_hdmi_poweron(hdmi);
+}
+
+static void dw_hdmi_bridge_nop(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static enum drm_connector_status
+dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ?
connector_status_connected : connector_status_disconnected;
}
-static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
+static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
{
- struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
+ struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
struct edid *edid;
int ret;
@@ -1407,94 +1413,60 @@ static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
return 0;
}
-static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector
- *connector)
-{
- struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
- connector);
-
- return &hdmi->encoder;
-}
-
-static void imx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
-
- imx_hdmi_setup(hdmi, mode);
-
- /* Store the display mode for plugin/DKMS poweron events */
- memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
-}
-
-static bool imx_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static enum drm_mode_status
+dw_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- return true;
-}
+ struct dw_hdmi *hdmi = container_of(connector,
+ struct dw_hdmi, connector);
+ enum drm_mode_status mode_status = MODE_OK;
-static void imx_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
-}
-
-static void imx_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+ if (hdmi->plat_data->mode_valid)
+ mode_status = hdmi->plat_data->mode_valid(connector, mode);
- if (mode)
- imx_hdmi_poweroff(hdmi);
- else
- imx_hdmi_poweron(hdmi);
+ return mode_status;
}
-static void imx_hdmi_encoder_prepare(struct drm_encoder *encoder)
+static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector
+ *connector)
{
- struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+ struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
+ connector);
- imx_hdmi_poweroff(hdmi);
- imx_drm_panel_format(encoder, V4L2_PIX_FMT_RGB24);
+ return hdmi->encoder;
}
-static void imx_hdmi_encoder_commit(struct drm_encoder *encoder)
+static void dw_hdmi_connector_destroy(struct drm_connector *connector)
{
- struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
- int mux = imx_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
-
- imx_hdmi_set_ipu_di_mux(hdmi, mux);
-
- imx_hdmi_poweron(hdmi);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
}
-static struct drm_encoder_funcs imx_hdmi_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
-static struct drm_encoder_helper_funcs imx_hdmi_encoder_helper_funcs = {
- .dpms = imx_hdmi_encoder_dpms,
- .prepare = imx_hdmi_encoder_prepare,
- .commit = imx_hdmi_encoder_commit,
- .mode_set = imx_hdmi_encoder_mode_set,
- .mode_fixup = imx_hdmi_encoder_mode_fixup,
- .disable = imx_hdmi_encoder_disable,
-};
-
-static struct drm_connector_funcs imx_hdmi_connector_funcs = {
+static struct drm_connector_funcs dw_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = imx_hdmi_connector_detect,
- .destroy = imx_drm_connector_destroy,
+ .detect = dw_hdmi_connector_detect,
+ .destroy = dw_hdmi_connector_destroy,
+};
+
+static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
+ .get_modes = dw_hdmi_connector_get_modes,
+ .mode_valid = dw_hdmi_connector_mode_valid,
+ .best_encoder = dw_hdmi_connector_best_encoder,
};
-static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
- .get_modes = imx_hdmi_connector_get_modes,
- .best_encoder = imx_hdmi_connector_best_encoder,
+struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+ .enable = dw_hdmi_bridge_enable,
+ .disable = dw_hdmi_bridge_disable,
+ .pre_enable = dw_hdmi_bridge_nop,
+ .post_disable = dw_hdmi_bridge_nop,
+ .mode_set = dw_hdmi_bridge_mode_set,
+ .mode_fixup = dw_hdmi_bridge_mode_fixup,
};
-static irqreturn_t imx_hdmi_hardirq(int irq, void *dev_id)
+static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
{
- struct imx_hdmi *hdmi = dev_id;
+ struct dw_hdmi *hdmi = dev_id;
u8 intr_stat;
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
@@ -1504,9 +1476,9 @@ static irqreturn_t imx_hdmi_hardirq(int irq, void *dev_id)
return intr_stat ? IRQ_WAKE_THREAD : IRQ_NONE;
}
-static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
+static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
{
- struct imx_hdmi *hdmi = dev_id;
+ struct dw_hdmi *hdmi = dev_id;
u8 intr_stat;
u8 phy_int_pol;
@@ -1520,14 +1492,14 @@ static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
- imx_hdmi_poweron(hdmi);
+ dw_hdmi_poweron(hdmi);
} else {
dev_dbg(hdmi->dev, "EVENT=plugout\n");
hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
- HDMI_PHY_POL0);
+ HDMI_PHY_POL0);
- imx_hdmi_poweroff(hdmi);
+ dw_hdmi_poweroff(hdmi);
}
drm_helper_hpd_irq_event(hdmi->connector.dev);
}
@@ -1538,147 +1510,140 @@ static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int imx_hdmi_register(struct drm_device *drm, struct imx_hdmi *hdmi)
+static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
{
+ struct drm_encoder *encoder = hdmi->encoder;
+ struct drm_bridge *bridge;
int ret;
- ret = imx_drm_encoder_parse_of(drm, &hdmi->encoder,
- hdmi->dev->of_node);
- if (ret)
- return ret;
+ bridge = devm_kzalloc(drm->dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ DRM_ERROR("Failed to allocate drm bridge\n");
+ return -ENOMEM;
+ }
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ hdmi->bridge = bridge;
+ bridge->driver_private = hdmi;
+ bridge->funcs = &dw_hdmi_bridge_funcs;
+ ret = drm_bridge_attach(drm, bridge);
+ if (ret) {
+ DRM_ERROR("Failed to initialize bridge with drm\n");
+ return -EINVAL;
+ }
- drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, &hdmi->encoder, &imx_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ encoder->bridge = bridge;
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_helper_add(&hdmi->connector,
- &imx_hdmi_connector_helper_funcs);
- drm_connector_init(drm, &hdmi->connector, &imx_hdmi_connector_funcs,
+ &dw_hdmi_connector_helper_funcs);
+ drm_connector_init(drm, &hdmi->connector, &dw_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
- hdmi->connector.encoder = &hdmi->encoder;
+ hdmi->connector.encoder = encoder;
- drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
+ drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
-static struct platform_device_id imx_hdmi_devtype[] = {
- {
- .name = "imx6q-hdmi",
- .driver_data = IMX6Q_HDMI,
- }, {
- .name = "imx6dl-hdmi",
- .driver_data = IMX6DL_HDMI,
- }, { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype);
-
-static const struct of_device_id imx_hdmi_dt_ids[] = {
-{ .compatible = "fsl,imx6q-hdmi", .data = &imx_hdmi_devtype[IMX6Q_HDMI], },
-{ .compatible = "fsl,imx6dl-hdmi", .data = &imx_hdmi_devtype[IMX6DL_HDMI], },
-{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
-
-static int imx_hdmi_bind(struct device *dev, struct device *master, void *data)
+int dw_hdmi_bind(struct device *dev, struct device *master,
+ void *data, struct drm_encoder *encoder,
+ struct resource *iores, int irq,
+ const struct dw_hdmi_plat_data *plat_data)
{
- struct platform_device *pdev = to_platform_device(dev);
- const struct of_device_id *of_id =
- of_match_device(imx_hdmi_dt_ids, dev);
struct drm_device *drm = data;
struct device_node *np = dev->of_node;
struct device_node *ddc_node;
- struct imx_hdmi *hdmi;
- struct resource *iores;
- int ret, irq;
+ struct dw_hdmi *hdmi;
+ int ret;
+ u32 val = 1;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
+ hdmi->plat_data = plat_data;
hdmi->dev = dev;
+ hdmi->dev_type = plat_data->dev_type;
hdmi->sample_rate = 48000;
hdmi->ratio = 100;
+ hdmi->encoder = encoder;
- if (of_id) {
- const struct platform_device_id *device_id = of_id->data;
+ of_property_read_u32(np, "reg-io-width", &val);
- hdmi->dev_type = device_id->driver_data;
+ switch (val) {
+ case 4:
+ hdmi->write = dw_hdmi_writel;
+ hdmi->read = dw_hdmi_readl;
+ break;
+ case 1:
+ hdmi->write = dw_hdmi_writeb;
+ hdmi->read = dw_hdmi_readb;
+ break;
+ default:
+ dev_err(dev, "reg-io-width must be 1 or 4\n");
+ return -EINVAL;
}
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
- if (!hdmi->ddc)
+ of_node_put(ddc_node);
+ if (!hdmi->ddc) {
dev_dbg(hdmi->dev, "failed to read ddc node\n");
+ return -EPROBE_DEFER;
+ }
- of_node_put(ddc_node);
} else {
dev_dbg(hdmi->dev, "no ddc property found\n");
}
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_threaded_irq(dev, irq, imx_hdmi_hardirq,
- imx_hdmi_irq, IRQF_SHARED,
- dev_name(dev), hdmi);
- if (ret)
- return ret;
-
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->regs = devm_ioremap_resource(dev, iores);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
- hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
- if (IS_ERR(hdmi->regmap))
- return PTR_ERR(hdmi->regmap);
-
hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
if (IS_ERR(hdmi->isfr_clk)) {
ret = PTR_ERR(hdmi->isfr_clk);
- dev_err(hdmi->dev,
- "Unable to get HDMI isfr clk: %d\n", ret);
+ dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(hdmi->isfr_clk);
if (ret) {
- dev_err(hdmi->dev,
- "Cannot enable HDMI isfr clock: %d\n", ret);
+ dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret);
return ret;
}
hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
if (IS_ERR(hdmi->iahb_clk)) {
ret = PTR_ERR(hdmi->iahb_clk);
- dev_err(hdmi->dev,
- "Unable to get HDMI iahb clk: %d\n", ret);
+ dev_err(hdmi->dev, "Unable to get HDMI iahb clk: %d\n", ret);
goto err_isfr;
}
ret = clk_prepare_enable(hdmi->iahb_clk);
if (ret) {
- dev_err(hdmi->dev,
- "Cannot enable HDMI iahb clock: %d\n", ret);
+ dev_err(hdmi->dev, "Cannot enable HDMI iahb clock: %d\n", ret);
goto err_isfr;
}
/* Product and revision IDs */
dev_info(dev,
- "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
- hdmi_readb(hdmi, HDMI_DESIGN_ID),
- hdmi_readb(hdmi, HDMI_REVISION_ID),
- hdmi_readb(hdmi, HDMI_PRODUCT_ID0),
- hdmi_readb(hdmi, HDMI_PRODUCT_ID1));
+ "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
+ hdmi_readb(hdmi, HDMI_DESIGN_ID),
+ hdmi_readb(hdmi, HDMI_REVISION_ID),
+ hdmi_readb(hdmi, HDMI_PRODUCT_ID0),
+ hdmi_readb(hdmi, HDMI_PRODUCT_ID1));
initialize_hdmi_ih_mutes(hdmi);
+ ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq,
+ dw_hdmi_irq, IRQF_SHARED,
+ dev_name(dev), hdmi);
+ if (ret)
+ goto err_iahb;
+
/*
* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
* N and cts values before enabling phy
@@ -1694,11 +1659,11 @@ static int imx_hdmi_bind(struct device *dev, struct device *master, void *data)
/* Clear Hotplug interrupts */
hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
- ret = imx_hdmi_fb_registered(hdmi);
+ ret = dw_hdmi_fb_registered(hdmi);
if (ret)
goto err_iahb;
- ret = imx_hdmi_register(drm, hdmi);
+ ret = dw_hdmi_register(drm, hdmi);
if (ret)
goto err_iahb;
@@ -1716,51 +1681,27 @@ err_isfr:
return ret;
}
+EXPORT_SYMBOL_GPL(dw_hdmi_bind);
-static void imx_hdmi_unbind(struct device *dev, struct device *master,
- void *data)
+void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
{
- struct imx_hdmi *hdmi = dev_get_drvdata(dev);
+ struct dw_hdmi *hdmi = dev_get_drvdata(dev);
/* Disable all interrupts */
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
hdmi->connector.funcs->destroy(&hdmi->connector);
- hdmi->encoder.funcs->destroy(&hdmi->encoder);
+ hdmi->encoder->funcs->destroy(hdmi->encoder);
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
i2c_put_adapter(hdmi->ddc);
}
-
-static const struct component_ops hdmi_ops = {
- .bind = imx_hdmi_bind,
- .unbind = imx_hdmi_unbind,
-};
-
-static int imx_hdmi_platform_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &hdmi_ops);
-}
-
-static int imx_hdmi_platform_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &hdmi_ops);
- return 0;
-}
-
-static struct platform_driver imx_hdmi_driver = {
- .probe = imx_hdmi_platform_probe,
- .remove = imx_hdmi_platform_remove,
- .driver = {
- .name = "imx-hdmi",
- .of_match_table = imx_hdmi_dt_ids,
- },
-};
-
-module_platform_driver(imx_hdmi_driver);
+EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
-MODULE_DESCRIPTION("i.MX6 HDMI transmitter driver");
+MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_DESCRIPTION("DW HDMI transmitter driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-hdmi");
+MODULE_ALIAS("platform:dw-hdmi");
diff --git a/drivers/gpu/drm/imx/imx-hdmi.h b/drivers/gpu/drm/bridge/dw_hdmi.h
index 39b677689db6..175dbc89a824 100644
--- a/drivers/gpu/drm/imx/imx-hdmi.h
+++ b/drivers/gpu/drm/bridge/dw_hdmi.h
@@ -837,7 +837,8 @@ enum {
HDMI_PHY_CONF0_PDZ_OFFSET = 7,
HDMI_PHY_CONF0_ENTMDS_MASK = 0x40,
HDMI_PHY_CONF0_ENTMDS_OFFSET = 6,
- HDMI_PHY_CONF0_SPARECTRL = 0x20,
+ HDMI_PHY_CONF0_SPARECTRL_MASK = 0x20,
+ HDMI_PHY_CONF0_SPARECTRL_OFFSET = 5,
HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10,
HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4,
HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8,
@@ -1029,4 +1030,5 @@ enum {
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
};
+
#endif /* __IMX_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index d466696ed5e8..826833e396f0 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -13,20 +13,23 @@
* GNU General Public License for more details.
*/
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
+#include <linux/of_graph.h>
-#include "drmP.h"
-#include "drm_edid.h"
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include <drm/drm_panel.h>
#include "bridge/ptn3460.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+#include "drmP.h"
+
#define PTN3460_EDID_ADDR 0x0
#define PTN3460_EDID_EMULATION_ADDR 0x84
#define PTN3460_EDID_ENABLE_EMULATION 0
@@ -36,15 +39,27 @@
struct ptn3460_bridge {
struct drm_connector connector;
struct i2c_client *client;
- struct drm_encoder *encoder;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
struct edid *edid;
- int gpio_pd_n;
- int gpio_rst_n;
+ struct drm_panel *panel;
+ struct gpio_desc *gpio_pd_n;
+ struct gpio_desc *gpio_rst_n;
u32 edid_emulation;
bool enabled;
};
+static inline struct ptn3460_bridge *
+ bridge_to_ptn3460(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ptn3460_bridge, bridge);
+}
+
+static inline struct ptn3460_bridge *
+ connector_to_ptn3460(struct drm_connector *connector)
+{
+ return container_of(connector, struct ptn3460_bridge, connector);
+}
+
static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
u8 *buf, int len)
{
@@ -92,7 +107,7 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR,
ptn_bridge->edid_emulation);
if (ret) {
- DRM_ERROR("Failed to transfer edid to sram, ret=%d\n", ret);
+ DRM_ERROR("Failed to transfer EDID to sram, ret=%d\n", ret);
return ret;
}
@@ -102,7 +117,7 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val);
if (ret) {
- DRM_ERROR("Failed to write edid value, ret=%d\n", ret);
+ DRM_ERROR("Failed to write EDID value, ret=%d\n", ret);
return ret;
}
@@ -111,19 +126,21 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
static void ptn3460_pre_enable(struct drm_bridge *bridge)
{
- struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
+ struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
if (ptn_bridge->enabled)
return;
- if (gpio_is_valid(ptn_bridge->gpio_pd_n))
- gpio_set_value(ptn_bridge->gpio_pd_n, 1);
+ gpiod_set_value(ptn_bridge->gpio_pd_n, 1);
+
+ gpiod_set_value(ptn_bridge->gpio_rst_n, 0);
+ usleep_range(10, 20);
+ gpiod_set_value(ptn_bridge->gpio_rst_n, 1);
- if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
- gpio_set_value(ptn_bridge->gpio_rst_n, 0);
- udelay(10);
- gpio_set_value(ptn_bridge->gpio_rst_n, 1);
+ if (drm_panel_prepare(ptn_bridge->panel)) {
+ DRM_ERROR("failed to prepare panel\n");
+ return;
}
/*
@@ -135,73 +152,67 @@ static void ptn3460_pre_enable(struct drm_bridge *bridge)
ret = ptn3460_select_edid(ptn_bridge);
if (ret)
- DRM_ERROR("Select edid failed ret=%d\n", ret);
+ DRM_ERROR("Select EDID failed ret=%d\n", ret);
ptn_bridge->enabled = true;
}
static void ptn3460_enable(struct drm_bridge *bridge)
{
+ struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
+
+ if (drm_panel_enable(ptn_bridge->panel)) {
+ DRM_ERROR("failed to enable panel\n");
+ return;
+ }
}
static void ptn3460_disable(struct drm_bridge *bridge)
{
- struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
+ struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
if (!ptn_bridge->enabled)
return;
ptn_bridge->enabled = false;
- if (gpio_is_valid(ptn_bridge->gpio_rst_n))
- gpio_set_value(ptn_bridge->gpio_rst_n, 1);
+ if (drm_panel_disable(ptn_bridge->panel)) {
+ DRM_ERROR("failed to disable panel\n");
+ return;
+ }
- if (gpio_is_valid(ptn_bridge->gpio_pd_n))
- gpio_set_value(ptn_bridge->gpio_pd_n, 0);
+ gpiod_set_value(ptn_bridge->gpio_rst_n, 1);
+ gpiod_set_value(ptn_bridge->gpio_pd_n, 0);
}
static void ptn3460_post_disable(struct drm_bridge *bridge)
{
-}
+ struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
-void ptn3460_bridge_destroy(struct drm_bridge *bridge)
-{
- struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
-
- drm_bridge_cleanup(bridge);
- if (gpio_is_valid(ptn_bridge->gpio_pd_n))
- gpio_free(ptn_bridge->gpio_pd_n);
- if (gpio_is_valid(ptn_bridge->gpio_rst_n))
- gpio_free(ptn_bridge->gpio_rst_n);
- /* Nothing else to free, we've got devm allocated memory */
+ if (drm_panel_unprepare(ptn_bridge->panel)) {
+ DRM_ERROR("failed to unprepare panel\n");
+ return;
+ }
}
-struct drm_bridge_funcs ptn3460_bridge_funcs = {
- .pre_enable = ptn3460_pre_enable,
- .enable = ptn3460_enable,
- .disable = ptn3460_disable,
- .post_disable = ptn3460_post_disable,
- .destroy = ptn3460_bridge_destroy,
-};
-
-int ptn3460_get_modes(struct drm_connector *connector)
+static int ptn3460_get_modes(struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge;
u8 *edid;
- int ret, num_modes;
+ int ret, num_modes = 0;
bool power_off;
- ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
+ ptn_bridge = connector_to_ptn3460(connector);
if (ptn_bridge->edid)
return drm_add_edid_modes(connector, ptn_bridge->edid);
power_off = !ptn_bridge->enabled;
- ptn3460_pre_enable(ptn_bridge->bridge);
+ ptn3460_pre_enable(&ptn_bridge->bridge);
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (!edid) {
- DRM_ERROR("Failed to allocate edid\n");
+ DRM_ERROR("Failed to allocate EDID\n");
return 0;
}
@@ -209,7 +220,6 @@ int ptn3460_get_modes(struct drm_connector *connector)
EDID_LENGTH);
if (ret) {
kfree(edid);
- num_modes = 0;
goto out;
}
@@ -220,124 +230,188 @@ int ptn3460_get_modes(struct drm_connector *connector)
out:
if (power_off)
- ptn3460_disable(ptn_bridge->bridge);
+ ptn3460_disable(&ptn_bridge->bridge);
return num_modes;
}
-struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
+static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
{
- struct ptn3460_bridge *ptn_bridge;
-
- ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
+ struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
- return ptn_bridge->encoder;
+ return ptn_bridge->bridge.encoder;
}
-struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
+static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
.get_modes = ptn3460_get_modes,
.best_encoder = ptn3460_best_encoder,
};
-enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
+static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
bool force)
{
return connector_status_connected;
}
-void ptn3460_connector_destroy(struct drm_connector *connector)
+static void ptn3460_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
}
-struct drm_connector_funcs ptn3460_connector_funcs = {
+static struct drm_connector_funcs ptn3460_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = ptn3460_detect,
.destroy = ptn3460_connector_destroy,
};
-int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
- struct i2c_client *client, struct device_node *node)
+int ptn3460_bridge_attach(struct drm_bridge *bridge)
{
+ struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
- struct drm_bridge *bridge;
- struct ptn3460_bridge *ptn_bridge;
- bridge = devm_kzalloc(dev->dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- DRM_ERROR("Failed to allocate drm bridge\n");
- return -ENOMEM;
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ ptn_bridge->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(bridge->dev, &ptn_bridge->connector,
+ &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
}
+ drm_connector_helper_add(&ptn_bridge->connector,
+ &ptn3460_connector_helper_funcs);
+ drm_connector_register(&ptn_bridge->connector);
+ drm_mode_connector_attach_encoder(&ptn_bridge->connector,
+ bridge->encoder);
- ptn_bridge = devm_kzalloc(dev->dev, sizeof(*ptn_bridge), GFP_KERNEL);
+ if (ptn_bridge->panel)
+ drm_panel_attach(ptn_bridge->panel, &ptn_bridge->connector);
+
+ drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
+
+ return ret;
+}
+
+static struct drm_bridge_funcs ptn3460_bridge_funcs = {
+ .pre_enable = ptn3460_pre_enable,
+ .enable = ptn3460_enable,
+ .disable = ptn3460_disable,
+ .post_disable = ptn3460_post_disable,
+ .attach = ptn3460_bridge_attach,
+};
+
+static int ptn3460_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ptn3460_bridge *ptn_bridge;
+ struct device_node *endpoint, *panel_node;
+ int ret;
+
+ ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
if (!ptn_bridge) {
- DRM_ERROR("Failed to allocate ptn bridge\n");
return -ENOMEM;
}
- ptn_bridge->client = client;
- ptn_bridge->encoder = encoder;
- ptn_bridge->bridge = bridge;
- ptn_bridge->gpio_pd_n = of_get_named_gpio(node, "powerdown-gpio", 0);
- if (gpio_is_valid(ptn_bridge->gpio_pd_n)) {
- ret = gpio_request_one(ptn_bridge->gpio_pd_n,
- GPIOF_OUT_INIT_HIGH, "PTN3460_PD_N");
- if (ret) {
- DRM_ERROR("Request powerdown-gpio failed (%d)\n", ret);
- return ret;
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ if (panel_node) {
+ ptn_bridge->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!ptn_bridge->panel)
+ return -EPROBE_DEFER;
}
}
- ptn_bridge->gpio_rst_n = of_get_named_gpio(node, "reset-gpio", 0);
- if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
- /*
- * Request the reset pin low to avoid the bridge being
- * initialized prematurely
- */
- ret = gpio_request_one(ptn_bridge->gpio_rst_n,
- GPIOF_OUT_INIT_LOW, "PTN3460_RST_N");
- if (ret) {
- DRM_ERROR("Request reset-gpio failed (%d)\n", ret);
- gpio_free(ptn_bridge->gpio_pd_n);
- return ret;
- }
+ ptn_bridge->client = client;
+
+ ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown");
+ if (IS_ERR(ptn_bridge->gpio_pd_n)) {
+ ret = PTR_ERR(ptn_bridge->gpio_pd_n);
+ dev_err(dev, "cannot get gpio_pd_n %d\n", ret);
+ return ret;
}
- ret = of_property_read_u32(node, "edid-emulation",
- &ptn_bridge->edid_emulation);
+ ret = gpiod_direction_output(ptn_bridge->gpio_pd_n, 1);
if (ret) {
- DRM_ERROR("Can't read edid emulation value\n");
- goto err;
+ DRM_ERROR("cannot configure gpio_pd_n\n");
+ return ret;
}
- ret = drm_bridge_init(dev, bridge, &ptn3460_bridge_funcs);
+ ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset");
+ if (IS_ERR(ptn_bridge->gpio_rst_n)) {
+ ret = PTR_ERR(ptn_bridge->gpio_rst_n);
+ DRM_ERROR("cannot get gpio_rst_n %d\n", ret);
+ return ret;
+ }
+ /*
+ * Request the reset pin low to avoid the bridge being
+ * initialized prematurely
+ */
+ ret = gpiod_direction_output(ptn_bridge->gpio_rst_n, 0);
if (ret) {
- DRM_ERROR("Failed to initialize bridge with drm\n");
- goto err;
+ DRM_ERROR("cannot configure gpio_rst_n\n");
+ return ret;
}
- bridge->driver_private = ptn_bridge;
- encoder->bridge = bridge;
+ ret = of_property_read_u32(dev->of_node, "edid-emulation",
+ &ptn_bridge->edid_emulation);
+ if (ret) {
+ dev_err(dev, "Can't read EDID emulation value\n");
+ return ret;
+ }
- ret = drm_connector_init(dev, &ptn_bridge->connector,
- &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
+ ptn_bridge->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&ptn_bridge->bridge);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- goto err;
+ DRM_ERROR("Failed to add bridge\n");
+ return ret;
}
- drm_connector_helper_add(&ptn_bridge->connector,
- &ptn3460_connector_helper_funcs);
- drm_connector_register(&ptn_bridge->connector);
- drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
+
+ i2c_set_clientdata(client, ptn_bridge);
return 0;
+}
-err:
- if (gpio_is_valid(ptn_bridge->gpio_pd_n))
- gpio_free(ptn_bridge->gpio_pd_n);
- if (gpio_is_valid(ptn_bridge->gpio_rst_n))
- gpio_free(ptn_bridge->gpio_rst_n);
- return ret;
+static int ptn3460_remove(struct i2c_client *client)
+{
+ struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&ptn_bridge->bridge);
+
+ return 0;
}
-EXPORT_SYMBOL(ptn3460_init);
+
+static const struct i2c_device_id ptn3460_i2c_table[] = {
+ {"nxp,ptn3460", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table);
+
+static const struct of_device_id ptn3460_match[] = {
+ { .compatible = "nxp,ptn3460" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ptn3460_match);
+
+static struct i2c_driver ptn3460_driver = {
+ .id_table = ptn3460_i2c_table,
+ .probe = ptn3460_probe,
+ .remove = ptn3460_remove,
+ .driver = {
+ .name = "nxp,ptn3460",
+ .owner = THIS_MODULE,
+ .of_match_table = ptn3460_match,
+ },
+};
+module_i2c_driver(ptn3460_driver);
+
+MODULE_AUTHOR("Sean Paul <seanpaul@chromium.org>");
+MODULE_DESCRIPTION("NXP ptn3460 eDP-LVDS converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index c2a1cba1e984..b9140032962d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -16,9 +16,12 @@
#include "cirrus_drv.h"
int cirrus_modeset = -1;
+int cirrus_bpp = 24;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, cirrus_modeset, int, 0400);
+MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
+module_param_named(bpp, cirrus_bpp, int, 0400);
/*
* This is the generic driver code. This binds the driver to the drm core,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 693a4565c4ff..705061537a27 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
int cirrus_bo_push_sysram(struct cirrus_bo *bo);
int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+
+extern int cirrus_bpp;
+
#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 502a89eb54b5..13ddf1c4bb8e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -317,17 +317,17 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
- if (ret) {
- kfree(gfbdev);
+ if (ret)
+ return ret;
+
+ ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+ if (ret)
return ret;
- }
- drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(cdev->dev);
- drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
- return 0;
+ return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
}
void cirrus_fbdev_fini(struct cirrus_device *cdev)
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 4c2d68e9102d..e4b976658087 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
const int max_size = cdev->mc.vram_size;
+ if (bpp > cirrus_bpp)
+ return false;
if (bpp > 32)
return false;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 99d4a74ffeaf..61385f2298bf 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
int count;
/* Just add a static list of modes */
- count = drm_add_modes_noedid(connector, 1280, 1024);
- drm_set_preferred_mode(connector, 1024, 768);
+ if (cirrus_bpp <= 24) {
+ count = drm_add_modes_noedid(connector, 1280, 1024);
+ drm_set_preferred_mode(connector, 1024, 768);
+ } else {
+ count = drm_add_modes_noedid(connector, 800, 600);
+ drm_set_preferred_mode(connector, 800, 600);
+ }
return count;
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index ff5f034cc405..c2e9c5283136 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -56,6 +56,11 @@ drm_atomic_state_alloc(struct drm_device *dev)
if (!state)
return NULL;
+ /* TODO legacy paths should maybe do a better job about
+ * setting this appropriately?
+ */
+ state->allow_modeset = true;
+
state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
state->crtcs = kcalloc(dev->mode_config.num_crtc,
@@ -129,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
connector->funcs->atomic_destroy_state(connector,
state->connector_states[i]);
+ state->connector_states[i] = NULL;
}
for (i = 0; i < config->num_crtc; i++) {
@@ -139,6 +145,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
crtc->funcs->atomic_destroy_state(crtc,
state->crtc_states[i]);
+ state->crtc_states[i] = NULL;
}
for (i = 0; i < config->num_total_plane; i++) {
@@ -149,6 +156,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
plane->funcs->atomic_destroy_state(plane,
state->plane_states[i]);
+ state->plane_states[i] = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_state_clear);
@@ -217,6 +225,83 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
/**
+ * drm_atomic_crtc_set_property - set property on CRTC
+ * @crtc: the drm CRTC to set a property on
+ * @state: the state object to update with the new property value
+ * @property: the property to set
+ * @val: the new property value
+ *
+ * Use this instead of calling crtc->atomic_set_property directly.
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_set_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ /* FIXME: Mode prop is missing, which also controls ->enable. */
+ if (property == config->prop_active) {
+ state->active = val;
+ } else if (crtc->funcs->atomic_set_property)
+ return crtc->funcs->atomic_set_property(crtc, state, property, val);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_atomic_crtc_set_property);
+
+/*
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_get_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ */
+int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ if (crtc->funcs->atomic_get_property)
+ return crtc->funcs->atomic_get_property(crtc, state, property, val);
+ return -EINVAL;
+}
+
+/**
+ * drm_atomic_crtc_check - check crtc state
+ * @crtc: crtc to check
+ * @state: crtc state to check
+ *
+ * Provides core sanity checks for crtc state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ /* NOTE: we explicitly don't enforce constraints such as primary
+ * layer covering entire screen, since that is something we want
+ * to allow (on hw that supports it). For hw that does not, it
+ * should be checked in driver's crtc->atomic_check() vfunc.
+ *
+ * TODO: Add generic modeset state checks once we support those.
+ */
+
+ if (state->active && !state->enable) {
+ DRM_DEBUG_KMS("[CRTC:%d] active without enabled\n",
+ crtc->base.id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
* @plane: plane to get state object for
@@ -272,6 +357,185 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_plane_state);
/**
+ * drm_atomic_plane_set_property - set property on plane
+ * @plane: the drm plane to set a property on
+ * @state: the state object to update with the new property value
+ * @property: the property to set
+ * @val: the new property value
+ *
+ * Use this instead of calling plane->atomic_set_property directly.
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_set_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_fb_id) {
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
+ drm_atomic_set_fb_for_plane(state, fb);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ } else if (property == config->prop_crtc_id) {
+ struct drm_crtc *crtc = drm_crtc_find(dev, val);
+ return drm_atomic_set_crtc_for_plane(state, crtc);
+ } else if (property == config->prop_crtc_x) {
+ state->crtc_x = U642I64(val);
+ } else if (property == config->prop_crtc_y) {
+ state->crtc_y = U642I64(val);
+ } else if (property == config->prop_crtc_w) {
+ state->crtc_w = val;
+ } else if (property == config->prop_crtc_h) {
+ state->crtc_h = val;
+ } else if (property == config->prop_src_x) {
+ state->src_x = val;
+ } else if (property == config->prop_src_y) {
+ state->src_y = val;
+ } else if (property == config->prop_src_w) {
+ state->src_w = val;
+ } else if (property == config->prop_src_h) {
+ state->src_h = val;
+ } else if (property == config->rotation_property) {
+ state->rotation = val;
+ } else if (plane->funcs->atomic_set_property) {
+ return plane->funcs->atomic_set_property(plane, state,
+ property, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_plane_set_property);
+
+/*
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_get_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ */
+static int
+drm_atomic_plane_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_fb_id) {
+ *val = (state->fb) ? state->fb->base.id : 0;
+ } else if (property == config->prop_crtc_id) {
+ *val = (state->crtc) ? state->crtc->base.id : 0;
+ } else if (property == config->prop_crtc_x) {
+ *val = I642U64(state->crtc_x);
+ } else if (property == config->prop_crtc_y) {
+ *val = I642U64(state->crtc_y);
+ } else if (property == config->prop_crtc_w) {
+ *val = state->crtc_w;
+ } else if (property == config->prop_crtc_h) {
+ *val = state->crtc_h;
+ } else if (property == config->prop_src_x) {
+ *val = state->src_x;
+ } else if (property == config->prop_src_y) {
+ *val = state->src_y;
+ } else if (property == config->prop_src_w) {
+ *val = state->src_w;
+ } else if (property == config->prop_src_h) {
+ *val = state->src_h;
+ } else if (plane->funcs->atomic_get_property) {
+ return plane->funcs->atomic_get_property(plane, state, property, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_atomic_plane_check - check plane state
+ * @plane: plane to check
+ * @state: plane state to check
+ *
+ * Provides core sanity checks for plane state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_plane_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ unsigned int fb_width, fb_height;
+ unsigned int i;
+
+ /* either *both* CRTC and FB must be set, or neither */
+ if (WARN_ON(state->crtc && !state->fb)) {
+ DRM_DEBUG_KMS("CRTC set but no FB\n");
+ return -EINVAL;
+ } else if (WARN_ON(state->fb && !state->crtc)) {
+ DRM_DEBUG_KMS("FB set but no CRTC\n");
+ return -EINVAL;
+ }
+
+ /* if disabled, we don't care about the rest of the state: */
+ if (!state->crtc)
+ return 0;
+
+ /* Check whether this plane is usable on this CRTC */
+ if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
+ DRM_DEBUG_KMS("Invalid crtc for plane\n");
+ return -EINVAL;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (state->fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(state->fb->pixel_format));
+ return -EINVAL;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (state->crtc_w > INT_MAX ||
+ state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
+ state->crtc_h > INT_MAX ||
+ state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ state->crtc_w, state->crtc_h,
+ state->crtc_x, state->crtc_y);
+ return -ERANGE;
+ }
+
+ fb_width = state->fb->width << 16;
+ fb_height = state->fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (state->src_w > fb_width ||
+ state->src_x > fb_width - state->src_w ||
+ state->src_h > fb_height ||
+ state->src_y > fb_height - state->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
+ state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
+ state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
+ state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
@@ -343,9 +607,113 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_connector_state);
/**
+ * drm_atomic_connector_set_property - set property on connector.
+ * @connector: the drm connector to set a property on
+ * @state: the state object to update with the new property value
+ * @property: the property to set
+ * @val: the new property value
+ *
+ * Use this instead of calling connector->atomic_set_property directly.
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_set_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_crtc_id) {
+ struct drm_crtc *crtc = drm_crtc_find(dev, val);
+ return drm_atomic_set_crtc_for_connector(state, crtc);
+ } else if (property == config->dpms_property) {
+ /* setting DPMS property requires special handling, which
+ * is done in legacy setprop path for us. Disallow (for
+ * now?) atomic writes to DPMS property:
+ */
+ return -EINVAL;
+ } else if (connector->funcs->atomic_set_property) {
+ return connector->funcs->atomic_set_property(connector,
+ state, property, val);
+ } else {
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(drm_atomic_connector_set_property);
+
+/*
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_get_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ */
+static int
+drm_atomic_connector_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_crtc_id) {
+ *val = (state->crtc) ? state->crtc->base.id : 0;
+ } else if (property == config->dpms_property) {
+ *val = connector->dpms;
+ } else if (connector->funcs->atomic_get_property) {
+ return connector->funcs->atomic_get_property(connector,
+ state, property, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int drm_atomic_get_property(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = property->dev;
+ int ret;
+
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR: {
+ struct drm_connector *connector = obj_to_connector(obj);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ ret = drm_atomic_connector_get_property(connector,
+ connector->state, property, val);
+ break;
+ }
+ case DRM_MODE_OBJECT_CRTC: {
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ ret = drm_atomic_crtc_get_property(crtc,
+ crtc->state, property, val);
+ break;
+ }
+ case DRM_MODE_OBJECT_PLANE: {
+ struct drm_plane *plane = obj_to_plane(obj);
+ WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+ ret = drm_atomic_plane_get_property(plane,
+ plane->state, property, val);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
* drm_atomic_set_crtc_for_plane - set crtc for plane
- * @state: the incoming atomic state
- * @plane: the plane whose incoming state to update
+ * @plane_state: the plane whose incoming state to update
* @crtc: crtc to use for the plane
*
* Changing the assigned crtc for a plane requires us to grab the lock and state
@@ -358,16 +726,12 @@ EXPORT_SYMBOL(drm_atomic_get_connector_state);
* sequence must be restarted. All other errors are fatal.
*/
int
-drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
- struct drm_plane *plane, struct drm_crtc *crtc)
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc)
{
- struct drm_plane_state *plane_state =
- drm_atomic_get_plane_state(state, plane);
+ struct drm_plane *plane = plane_state->plane;
struct drm_crtc_state *crtc_state;
- if (WARN_ON(IS_ERR(plane_state)))
- return PTR_ERR(plane_state);
-
if (plane_state->crtc) {
crtc_state = drm_atomic_get_crtc_state(plane_state->state,
plane_state->crtc);
@@ -583,14 +947,63 @@ EXPORT_SYMBOL(drm_atomic_legacy_backoff);
*/
int drm_atomic_check_only(struct drm_atomic_state *state)
{
- struct drm_mode_config *config = &state->dev->mode_config;
+ struct drm_device *dev = state->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ int nplanes = config->num_total_plane;
+ int ncrtcs = config->num_crtc;
+ int i, ret = 0;
DRM_DEBUG_KMS("checking %p\n", state);
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ ret = drm_atomic_plane_check(plane, state->plane_states[i]);
+ if (ret) {
+ DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n",
+ plane->base.id);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]);
+ if (ret) {
+ DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n",
+ crtc->base.id);
+ return ret;
+ }
+ }
+
if (config->funcs->atomic_check)
- return config->funcs->atomic_check(state->dev, state);
- else
- return 0;
+ ret = config->funcs->atomic_check(state->dev, state);
+
+ if (!state->allow_modeset) {
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ struct drm_crtc_state *crtc_state = state->crtc_states[i];
+
+ if (!crtc)
+ continue;
+
+ if (crtc_state->mode_changed ||
+ crtc_state->active_changed) {
+ DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n",
+ crtc->base.id);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -655,3 +1068,315 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
return config->funcs->atomic_commit(state->dev, state, true);
}
EXPORT_SYMBOL(drm_atomic_async_commit);
+
+/*
+ * The big monstor ioctl
+ */
+
+static struct drm_pending_vblank_event *create_vblank_event(
+ struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
+{
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (file_priv->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+ file_priv->event_space -= sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+out:
+ return e;
+}
+
+static void destroy_vblank_event(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_pending_vblank_event *e)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+}
+
+static int atomic_set_prop(struct drm_atomic_state *state,
+ struct drm_mode_object *obj, struct drm_property *prop,
+ uint64_t prop_value)
+{
+ struct drm_mode_object *ref;
+ int ret;
+
+ if (!drm_property_change_valid_get(prop, prop_value, &ref))
+ return -EINVAL;
+
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR: {
+ struct drm_connector *connector = obj_to_connector(obj);
+ struct drm_connector_state *connector_state;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ break;
+ }
+
+ ret = drm_atomic_connector_set_property(connector,
+ connector_state, prop, prop_value);
+ break;
+ }
+ case DRM_MODE_OBJECT_CRTC: {
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ break;
+ }
+
+ ret = drm_atomic_crtc_set_property(crtc,
+ crtc_state, prop, prop_value);
+ break;
+ }
+ case DRM_MODE_OBJECT_PLANE: {
+ struct drm_plane *plane = obj_to_plane(obj);
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ break;
+ }
+
+ ret = drm_atomic_plane_set_property(plane,
+ plane_state, prop, prop_value);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ drm_property_change_valid_put(prop, ref);
+ return ret;
+}
+
+int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_atomic *arg = data;
+ uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
+ uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
+ uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
+ unsigned int copied_objs, copied_props;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_plane *plane;
+ unsigned plane_mask = 0;
+ int ret = 0;
+ unsigned int i, j;
+
+ /* disallow for drivers not supporting atomic: */
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return -EINVAL;
+
+ /* disallow for userspace that has not enabled atomic cap (even
+ * though this may be a bit overkill, since legacy userspace
+ * wouldn't know how to call this ioctl)
+ */
+ if (!file_priv->atomic)
+ return -EINVAL;
+
+ if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
+ return -EINVAL;
+
+ if (arg->reserved)
+ return -EINVAL;
+
+ if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
+ !dev->mode_config.async_page_flip)
+ return -EINVAL;
+
+ /* can't test and expect an event at the same time. */
+ if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
+ (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
+ return -EINVAL;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = &ctx;
+ state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
+
+retry:
+ copied_objs = 0;
+ copied_props = 0;
+
+ for (i = 0; i < arg->count_objs; i++) {
+ uint32_t obj_id, count_props;
+ struct drm_mode_object *obj;
+
+ if (get_user(obj_id, objs_ptr + copied_objs)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
+ if (!obj || !obj->properties) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ if (obj->type == DRM_MODE_OBJECT_PLANE) {
+ plane = obj_to_plane(obj);
+ plane_mask |= (1 << drm_plane_index(plane));
+ plane->old_fb = plane->fb;
+ }
+
+ if (get_user(count_props, count_props_ptr + copied_objs)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ copied_objs++;
+
+ for (j = 0; j < count_props; j++) {
+ uint32_t prop_id;
+ uint64_t prop_value;
+ struct drm_property *prop;
+
+ if (get_user(prop_id, props_ptr + copied_props)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ prop = drm_property_find(dev, prop_id);
+ if (!prop) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ if (copy_from_user(&prop_value,
+ prop_values_ptr + copied_props,
+ sizeof(prop_value))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ ret = atomic_set_prop(state, obj, prop, prop_value);
+ if (ret)
+ goto fail;
+
+ copied_props++;
+ }
+ }
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ int ncrtcs = dev->mode_config.num_crtc;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_state *crtc_state = state->crtc_states[i];
+ struct drm_pending_vblank_event *e;
+
+ if (!crtc_state)
+ continue;
+
+ e = create_vblank_event(dev, file_priv, arg->user_data);
+ if (!e) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ crtc_state->event = e;
+ }
+ }
+
+ if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
+ ret = drm_atomic_check_only(state);
+ /* _check_only() does not free state, unlike _commit() */
+ drm_atomic_state_free(state);
+ } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
+ ret = drm_atomic_async_commit(state);
+ } else {
+ ret = drm_atomic_commit(state);
+ }
+
+ /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
+ * locks (ie. while it is still safe to deref plane->state). We
+ * need to do this here because the driver entry points cannot
+ * distinguish between legacy and atomic ioctls.
+ */
+ drm_for_each_plane_mask(plane, dev, plane_mask) {
+ if (ret == 0) {
+ struct drm_framebuffer *new_fb = plane->state->fb;
+ if (new_fb)
+ drm_framebuffer_reference(new_fb);
+ plane->fb = new_fb;
+ plane->crtc = plane->state->crtc;
+ } else {
+ plane->old_fb = NULL;
+ }
+ if (plane->old_fb) {
+ drm_framebuffer_unreference(plane->old_fb);
+ plane->old_fb = NULL;
+ }
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ int ncrtcs = dev->mode_config.num_crtc;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_state *crtc_state = state->crtc_states[i];
+
+ if (!crtc_state)
+ continue;
+
+ destroy_vblank_event(dev, file_priv, crtc_state->event);
+ crtc_state->event = NULL;
+ }
+ }
+
+ drm_atomic_state_free(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+backoff:
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+
+ goto retry;
+}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index bbdbe4721573..7e3a52b97c7d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -297,13 +297,22 @@ mode_fixup(struct drm_atomic_state *state)
}
}
-
- ret = funcs->mode_fixup(encoder, &crtc_state->mode,
- &crtc_state->adjusted_mode);
- if (!ret) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
- encoder->base.id, encoder->name);
- return -EINVAL;
+ if (funcs->atomic_check) {
+ ret = funcs->atomic_check(encoder, crtc_state,
+ conn_state);
+ if (ret) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] check failed\n",
+ encoder->base.id, encoder->name);
+ return ret;
+ }
+ } else {
+ ret = funcs->mode_fixup(encoder, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
+ encoder->base.id, encoder->name);
+ return -EINVAL;
+ }
}
}
@@ -330,7 +339,35 @@ mode_fixup(struct drm_atomic_state *state)
return 0;
}
-static int
+static bool
+needs_modeset(struct drm_crtc_state *state)
+{
+ return state->mode_changed || state->active_changed;
+}
+
+/**
+ * drm_atomic_helper_check - validate state object for modeset changes
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Check the state object to see if the requested state is physically possible.
+ * This does all the crtc and connector related computations for an atomic
+ * update. It computes and updates crtc_state->mode_changed, adds any additional
+ * connectors needed for full modesets and calls down into ->mode_fixup
+ * functions of the driver backend.
+ *
+ * IMPORTANT:
+ *
+ * Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
+ * plane update can't be done without a full modeset) _must_ call this function
+ * afterwards after that change. It is permitted to call this function multiple
+ * times for the same update, e.g. when the ->atomic_check functions depend upon
+ * the adjusted dotclock for fifo space allocation and watermark computation.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int
drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -382,12 +419,27 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
crtc = state->crtcs[i];
crtc_state = state->crtc_states[i];
- if (!crtc || !crtc_state->mode_changed)
+ if (!crtc)
continue;
- DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n",
+ /*
+ * We must set ->active_changed after walking connectors for
+ * otherwise an update that only changes active would result in
+ * a full modeset because update_connector_routing force that.
+ */
+ if (crtc->state->active != crtc_state->active) {
+ DRM_DEBUG_KMS("[CRTC:%d] active changed\n",
+ crtc->base.id);
+ crtc_state->active_changed = true;
+ }
+
+ if (!needs_modeset(crtc_state))
+ continue;
+
+ DRM_DEBUG_KMS("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
crtc->base.id,
- crtc_state->enable ? 'y' : 'n');
+ crtc_state->enable ? 'y' : 'n',
+ crtc_state->active ? 'y' : 'n');
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret != 0)
@@ -406,23 +458,23 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return mode_fixup(state);
}
+EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
- * drm_atomic_helper_check - validate state object
+ * drm_atomic_helper_check - validate state object for modeset changes
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
- * Only crtcs and planes have check callbacks, so for any additional (global)
- * checking that a driver needs it can simply wrap that around this function.
- * Drivers without such needs can directly use this as their ->atomic_check()
- * callback.
+ * This does all the plane update related checks using by calling into the
+ * ->atomic_check hooks provided by the driver.
*
* RETURNS
* Zero for success or -errno
*/
-int drm_atomic_helper_check(struct drm_device *dev,
- struct drm_atomic_state *state)
+int
+drm_atomic_helper_check_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
@@ -445,7 +497,7 @@ int drm_atomic_helper_check(struct drm_device *dev,
ret = funcs->atomic_check(plane, plane_state);
if (ret) {
- DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n",
+ DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n",
plane->base.id);
return ret;
}
@@ -465,16 +517,49 @@ int drm_atomic_helper_check(struct drm_device *dev,
ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) {
- DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n",
+ DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n",
crtc->base.id);
return ret;
}
}
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_planes);
+
+/**
+ * drm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Check the state object to see if the requested state is physically possible.
+ * Only crtcs and planes have check callbacks, so for any additional (global)
+ * checking that a driver needs it can simply wrap that around this function.
+ * Drivers without such needs can directly use this as their ->atomic_check()
+ * callback.
+ *
+ * This just wraps the two parts of the state checking for planes and modeset
+ * state in the default order: First it calls drm_atomic_helper_check_modeset()
+ * and then drm_atomic_helper_check_planes(). The assumption is that the
+ * ->atomic_check functions depend upon an updated adjusted_mode.clock to
+ * e.g. properly compute watermarks.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int drm_atomic_helper_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
@@ -490,6 +575,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_connector *connector;
struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
+ struct drm_crtc_state *old_crtc_state;
old_conn_state = old_state->connector_states[i];
connector = old_state->connectors[i];
@@ -499,6 +585,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state || !old_conn_state->crtc)
continue;
+ old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
+
+ if (!old_crtc_state->active)
+ continue;
+
encoder = old_conn_state->best_encoder;
/* We shouldn't get this far if we didn't previously have
@@ -509,6 +600,9 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = encoder->helper_private;
+ DRM_DEBUG_KMS("disabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call call disable hooks twice.
@@ -517,7 +611,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
encoder->bridge->funcs->disable(encoder->bridge);
/* Right function depends upon target state. */
- if (connector->state->crtc)
+ if (connector->state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
funcs->disable(encoder);
@@ -531,17 +625,26 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc_helper_funcs *funcs;
struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
crtc = old_state->crtcs[i];
+ old_crtc_state = old_state->crtc_states[i];
/* Shut down everything that needs a full modeset. */
- if (!crtc || !crtc->state->mode_changed)
+ if (!crtc || !needs_modeset(crtc->state))
+ continue;
+
+ if (!old_crtc_state->active)
continue;
funcs = crtc->helper_private;
+ DRM_DEBUG_KMS("disabling [CRTC:%d]\n",
+ crtc->base.id);
+
+
/* Right function depends upon target state. */
- if (crtc->state->enable)
+ if (crtc->state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->disable)
funcs->disable(crtc);
@@ -620,8 +723,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private;
- if (crtc->state->enable)
+ if (crtc->state->enable) {
+ DRM_DEBUG_KMS("modeset on [CRTC:%d]\n",
+ crtc->base.id);
+
funcs->mode_set_nofb(crtc);
+ }
}
for (i = 0; i < old_state->num_connector; i++) {
@@ -642,6 +749,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
+ if (!new_crtc_state->mode_changed)
+ continue;
+
+ DRM_DEBUG_KMS("modeset on [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call call mode_set hooks twice.
@@ -694,13 +807,23 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
crtc = old_state->crtcs[i];
/* Need to filter out CRTCs where only planes change. */
- if (!crtc || !crtc->state->mode_changed)
+ if (!crtc || !needs_modeset(crtc->state))
+ continue;
+
+ if (!crtc->state->active)
continue;
funcs = crtc->helper_private;
- if (crtc->state->enable)
- funcs->commit(crtc);
+ if (crtc->state->enable) {
+ DRM_DEBUG_KMS("enabling [CRTC:%d]\n",
+ crtc->base.id);
+
+ if (funcs->enable)
+ funcs->enable(crtc);
+ else
+ funcs->commit(crtc);
+ }
}
for (i = 0; i < old_state->num_connector; i++) {
@@ -713,9 +836,15 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
if (!connector || !connector->state->best_encoder)
continue;
+ if (!connector->state->crtc->state->active)
+ continue;
+
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
+ DRM_DEBUG_KMS("enabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call call enable hooks twice.
@@ -723,7 +852,10 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
if (encoder->bridge)
encoder->bridge->funcs->pre_enable(encoder->bridge);
- funcs->commit(encoder);
+ if (funcs->enable)
+ funcs->enable(encoder);
+ else
+ funcs->commit(encoder);
if (encoder->bridge)
encoder->bridge->funcs->enable(encoder->bridge);
@@ -813,6 +945,11 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
if (!crtc->state->enable)
continue;
+ /* Legacy cursor ioctls are completely unsynced, and userspace
+ * relies on that (by doing tons of cursor updates). */
+ if (old_state->legacy_cursor_update)
+ continue;
+
if (!framebuffer_changed(dev, old_state, crtc))
continue;
@@ -1053,12 +1190,19 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs = plane->helper_private;
- if (!funcs || !funcs->atomic_update)
+ if (!funcs)
continue;
old_plane_state = old_state->plane_states[i];
- funcs->atomic_update(plane, old_plane_state);
+ /*
+ * Special-case disabling the plane if drivers support it.
+ */
+ if (drm_atomic_plane_disabling(plane, old_plane_state) &&
+ funcs->atomic_disable)
+ funcs->atomic_disable(plane, old_plane_state);
+ else
+ funcs->atomic_update(plane, old_plane_state);
}
for (i = 0; i < ncrtcs; i++) {
@@ -1222,7 +1366,7 @@ retry:
goto fail;
}
- ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
@@ -1239,6 +1383,9 @@ retry:
if (ret != 0)
goto fail;
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
@@ -1301,7 +1448,7 @@ retry:
goto fail;
}
- ret = drm_atomic_set_crtc_for_plane(state, plane, NULL);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, NULL);
@@ -1314,6 +1461,9 @@ retry:
plane_state->src_h = 0;
plane_state->src_w = 0;
+ if (plane == plane->crtc->cursor)
+ state->legacy_cursor_update = true;
+
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
@@ -1463,8 +1613,9 @@ retry:
WARN_ON(set->num_connectors);
crtc_state->enable = false;
+ crtc_state->active = false;
- ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL);
+ ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
if (ret != 0)
goto fail;
@@ -1477,9 +1628,10 @@ retry:
WARN_ON(!set->num_connectors);
crtc_state->enable = true;
+ crtc_state->active = true;
drm_mode_copy(&crtc_state->mode, set->mode);
- ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc);
+ ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(primary_state, set->fb);
@@ -1558,8 +1710,8 @@ retry:
goto fail;
}
- ret = crtc->funcs->atomic_set_property(crtc, crtc_state,
- property, val);
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+ property, val);
if (ret)
goto fail;
@@ -1617,8 +1769,8 @@ retry:
goto fail;
}
- ret = plane->funcs->atomic_set_property(plane, plane_state,
- property, val);
+ ret = drm_atomic_plane_set_property(plane, plane_state,
+ property, val);
if (ret)
goto fail;
@@ -1676,8 +1828,8 @@ retry:
goto fail;
}
- ret = connector->funcs->atomic_set_property(connector, connector_state,
- property, val);
+ ret = drm_atomic_connector_set_property(connector, connector_state,
+ property, val);
if (ret)
goto fail;
@@ -1751,7 +1903,7 @@ retry:
goto fail;
}
- ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
@@ -1789,6 +1941,83 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_page_flip);
/**
+ * drm_atomic_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
+ *
+ * This is the main helper function provided by the atomic helper framework for
+ * implementing the legacy DPMS connector interface. It computes the new desired
+ * ->active state for the corresponding CRTC (if the connector is enabled) and
+ * updates it.
+ */
+void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+ struct drm_mode_config *config = &connector->dev->mode_config;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct drm_connector *tmp_connector;
+ int ret;
+ bool active = false;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ connector->dpms = mode;
+ crtc = connector->state->crtc;
+
+ if (!crtc)
+ return;
+
+ /* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */
+ state = drm_atomic_state_alloc(connector->dev);
+ if (!state)
+ return;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return;
+
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+ list_for_each_entry(tmp_connector, &config->connector_list, head) {
+ if (connector->state->crtc != crtc)
+ continue;
+
+ if (connector->dpms == DRM_MODE_DPMS_ON) {
+ active = true;
+ break;
+ }
+ }
+ crtc_state->active = active;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful async commit. */
+ return;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret);
+
+ return;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
+
+/**
* DOC: atomic state reset and initialization
*
* Both the drm core and the atomic helpers assume that there is always the full
@@ -1814,6 +2043,9 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+
+ if (crtc->state)
+ crtc->state->crtc = crtc;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
@@ -1836,6 +2068,7 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
if (state) {
state->mode_changed = false;
+ state->active_changed = false;
state->planes_changed = false;
state->event = NULL;
}
@@ -1873,6 +2106,9 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
+
+ if (plane->state)
+ plane->state->plane = plane;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
@@ -1930,6 +2166,9 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
{
kfree(connector->state);
connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
+
+ if (connector->state)
+ connector->state->connector = connector;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
new file mode 100644
index 000000000000..d1187e571c6d
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <drm/drm_crtc.h>
+
+#include "drm/drmP.h"
+
+static DEFINE_MUTEX(bridge_lock);
+static LIST_HEAD(bridge_list);
+
+int drm_bridge_add(struct drm_bridge *bridge)
+{
+ mutex_lock(&bridge_lock);
+ list_add_tail(&bridge->list, &bridge_list);
+ mutex_unlock(&bridge_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_bridge_add);
+
+void drm_bridge_remove(struct drm_bridge *bridge)
+{
+ mutex_lock(&bridge_lock);
+ list_del_init(&bridge->list);
+ mutex_unlock(&bridge_lock);
+}
+EXPORT_SYMBOL(drm_bridge_remove);
+
+extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
+{
+ if (!dev || !bridge)
+ return -EINVAL;
+
+ if (bridge->dev)
+ return -EBUSY;
+
+ bridge->dev = dev;
+
+ if (bridge->funcs->attach)
+ return bridge->funcs->attach(bridge);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_bridge_attach);
+
+#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_bridge(struct device_node *np)
+{
+ struct drm_bridge *bridge;
+
+ mutex_lock(&bridge_lock);
+
+ list_for_each_entry(bridge, &bridge_list, list) {
+ if (bridge->of_node == np) {
+ mutex_unlock(&bridge_lock);
+ return bridge;
+ }
+ }
+
+ mutex_unlock(&bridge_lock);
+ return NULL;
+}
+EXPORT_SYMBOL(of_drm_find_bridge);
+#endif
+
+MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
+MODULE_DESCRIPTION("DRM bridge infrastructure");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index a6b690626a6b..9a62d7a53553 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -32,6 +32,7 @@
#include <drm/drmP.h>
#if defined(CONFIG_X86)
+#include <asm/smp.h>
/*
* clflushopt is an unordered instruction which needs fencing with mfence or
@@ -64,12 +65,6 @@ static void drm_cache_flush_clflush(struct page *pages[],
drm_clflush_page(*pages++);
mb();
}
-
-static void
-drm_clflush_ipi_handler(void *null)
-{
- wbinvd();
-}
#endif
void
@@ -82,7 +77,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
return;
}
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#elif defined(__powerpc__)
@@ -121,7 +116,7 @@ drm_clflush_sg(struct sg_table *st)
return;
}
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
@@ -144,7 +139,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
return;
}
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5213da499d39..6b00173d1be4 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
+#include <drm/drm_atomic.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -61,8 +62,8 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
/*
* Global properties
*/
-static const struct drm_prop_enum_list drm_dpms_enum_list[] =
-{ { DRM_MODE_DPMS_ON, "On" },
+static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
+ { DRM_MODE_DPMS_ON, "On" },
{ DRM_MODE_DPMS_STANDBY, "Standby" },
{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
{ DRM_MODE_DPMS_OFF, "Off" }
@@ -70,8 +71,7 @@ static const struct drm_prop_enum_list drm_dpms_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
{ DRM_PLANE_TYPE_CURSOR, "Cursor" },
@@ -80,8 +80,7 @@ static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
/*
* Optional properties
*/
-static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
{ DRM_MODE_SCALE_NONE, "None" },
{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
{ DRM_MODE_SCALE_CENTER, "Center" },
@@ -97,8 +96,7 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
/*
* Non-global properties, but "required" for certain connectors.
*/
-static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
@@ -106,8 +104,7 @@ static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
@@ -116,8 +113,7 @@ static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
-static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
@@ -127,8 +123,7 @@ static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
@@ -154,8 +149,8 @@ struct drm_conn_prop_enum_list {
/*
* Connector and encoder types.
*/
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
+ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
{ DRM_MODE_CONNECTOR_VGA, "VGA" },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
@@ -174,8 +169,8 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
};
-static const struct drm_prop_enum_list drm_encoder_enum_list[] =
-{ { DRM_MODE_ENCODER_NONE, "None" },
+static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
+ { DRM_MODE_ENCODER_NONE, "None" },
{ DRM_MODE_ENCODER_DAC, "DAC" },
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
@@ -185,8 +180,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_DPMST, "DP MST" },
};
-static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
{ SubPixelUnknown, "Unknown" },
{ SubPixelHorizontalRGB, "Horizontal RGB" },
{ SubPixelHorizontalBGR, "Horizontal BGR" },
@@ -697,6 +691,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (cursor)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ drm_object_attach_property(&crtc->base, config->prop_active, 0);
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
@@ -768,6 +766,40 @@ static void drm_mode_remove(struct drm_connector *connector,
}
/**
+ * drm_display_info_set_bus_formats - set the supported bus formats
+ * @info: display info to store bus formats in
+ * @formats: array containing the supported bus formats
+ * @num_formats: the number of entries in the fmts array
+ *
+ * Store the supported bus formats in display info structure.
+ * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
+ * a full list of available formats.
+ */
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+ const u32 *formats,
+ unsigned int num_formats)
+{
+ u32 *fmts = NULL;
+
+ if (!formats && num_formats)
+ return -EINVAL;
+
+ if (formats && num_formats) {
+ fmts = kmemdup(formats, sizeof(*formats) * num_formats,
+ GFP_KERNEL);
+ if (!fmts)
+ return -ENOMEM;
+ }
+
+ kfree(info->bus_formats);
+ info->bus_formats = fmts;
+ info->num_bus_formats = num_formats;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_display_info_set_bus_formats);
+
+/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
*
@@ -837,6 +869,7 @@ int drm_connector_init(struct drm_device *dev,
const struct drm_connector_funcs *funcs,
int connector_type)
{
+ struct drm_mode_config *config = &dev->mode_config;
int ret;
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
@@ -875,16 +908,20 @@ int drm_connector_init(struct drm_device *dev,
/* We should add connectors at the end to avoid upsetting the connector
* index too much. */
- list_add_tail(&connector->head, &dev->mode_config.connector_list);
- dev->mode_config.num_connector++;
+ list_add_tail(&connector->head, &config->connector_list);
+ config->num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_object_attach_property(&connector->base,
- dev->mode_config.edid_property,
+ config->edid_property,
0);
drm_object_attach_property(&connector->base,
- dev->mode_config.dpms_property, 0);
+ config->dpms_property, 0);
+
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
+ }
connector->debugfs_entry = NULL;
@@ -924,6 +961,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id);
+ kfree(connector->display_info.bus_formats);
drm_mode_object_put(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
@@ -1028,61 +1066,6 @@ void drm_connector_unplug_all(struct drm_device *dev)
EXPORT_SYMBOL(drm_connector_unplug_all);
/**
- * drm_bridge_init - initialize a drm transcoder/bridge
- * @dev: drm device
- * @bridge: transcoder/bridge to set up
- * @funcs: bridge function table
- *
- * Initialises a preallocated bridge. Bridges should be
- * subclassed as part of driver connector objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
- const struct drm_bridge_funcs *funcs)
-{
- int ret;
-
- drm_modeset_lock_all(dev);
-
- ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
- if (ret)
- goto out;
-
- bridge->dev = dev;
- bridge->funcs = funcs;
-
- list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
- dev->mode_config.num_bridge++;
-
- out:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-EXPORT_SYMBOL(drm_bridge_init);
-
-/**
- * drm_bridge_cleanup - cleans up an initialised bridge
- * @bridge: bridge to cleanup
- *
- * Cleans up the bridge but doesn't free the object.
- */
-void drm_bridge_cleanup(struct drm_bridge *bridge)
-{
- struct drm_device *dev = bridge->dev;
-
- drm_modeset_lock_all(dev);
- drm_mode_object_put(dev, &bridge->base);
- list_del(&bridge->head);
- dev->mode_config.num_bridge--;
- drm_modeset_unlock_all(dev);
-
- memset(bridge, 0, sizeof(*bridge));
-}
-EXPORT_SYMBOL(drm_bridge_cleanup);
-
-/**
* drm_encoder_init - Init a preallocated encoder
* @dev: drm device
* @encoder: the encoder to init
@@ -1142,6 +1125,7 @@ EXPORT_SYMBOL(drm_encoder_init);
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
@@ -1174,6 +1158,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
const uint32_t *formats, uint32_t format_count,
enum drm_plane_type type)
{
+ struct drm_mode_config *config = &dev->mode_config;
int ret;
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
@@ -1185,8 +1170,8 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
- plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
- GFP_KERNEL);
+ plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
+ GFP_KERNEL);
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_put(dev, &plane->base);
@@ -1198,15 +1183,28 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
plane->possible_crtcs = possible_crtcs;
plane->type = type;
- list_add_tail(&plane->head, &dev->mode_config.plane_list);
- dev->mode_config.num_total_plane++;
+ list_add_tail(&plane->head, &config->plane_list);
+ config->num_total_plane++;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- dev->mode_config.num_overlay_plane++;
+ config->num_overlay_plane++;
drm_object_attach_property(&plane->base,
- dev->mode_config.plane_type_property,
+ config->plane_type_property,
plane->type);
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_x, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_y, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_w, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_h, 0);
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_universal_plane_init);
@@ -1328,50 +1326,115 @@ void drm_plane_force_disable(struct drm_plane *plane)
}
EXPORT_SYMBOL(drm_plane_force_disable);
-static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+static int drm_mode_create_standard_properties(struct drm_device *dev)
{
- struct drm_property *edid;
- struct drm_property *dpms;
- struct drm_property *dev_path;
+ struct drm_property *prop;
/*
* Standard properties (apply to all connectors)
*/
- edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"EDID", 0);
- dev->mode_config.edid_property = edid;
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.edid_property = prop;
- dpms = drm_property_create_enum(dev, 0,
+ prop = drm_property_create_enum(dev, 0,
"DPMS", drm_dpms_enum_list,
ARRAY_SIZE(drm_dpms_enum_list));
- dev->mode_config.dpms_property = dpms;
-
- dev_path = drm_property_create(dev,
- DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "PATH", 0);
- dev->mode_config.path_property = dev_path;
-
- dev->mode_config.tile_property = drm_property_create(dev,
- DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "TILE", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.dpms_property = prop;
- return 0;
-}
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "PATH", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.path_property = prop;
-static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
-{
- struct drm_property *type;
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "TILE", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.tile_property = prop;
- /*
- * Standard properties (apply to all planes)
- */
- type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"type", drm_plane_type_enum_list,
ARRAY_SIZE(drm_plane_type_enum_list));
- dev->mode_config.plane_type_property = type;
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.plane_type_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_X", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_x = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_Y", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_W", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_H", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_h = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_X", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_x = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_Y", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_W", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_H", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_h = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "FB_ID", DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_id = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_ID", DRM_MODE_OBJECT_CRTC);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_id = prop;
+
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
+ "ACTIVE");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_active = prop;
return 0;
}
@@ -1597,16 +1660,14 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
total_objects += dev->mode_config.num_crtc;
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
- total_objects += dev->mode_config.num_bridge;
- group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+ group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
return -ENOMEM;
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
- group->num_bridges = 0;
return 0;
}
@@ -1626,10 +1687,10 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct drm_bridge *bridge;
int ret;
- if ((ret = drm_mode_group_init(dev, group)))
+ ret = drm_mode_group_init(dev, group);
+ if (ret)
return ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -1643,11 +1704,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors++] = connector->base.id;
- list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
- group->id_list[group->num_crtcs + group->num_encoders +
- group->num_connectors + group->num_bridges++] =
- bridge->base.id;
-
return 0;
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
@@ -1996,6 +2052,44 @@ static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *conne
return connector->encoder;
}
+/* helper for getconnector and getproperties ioctls */
+static int get_properties(struct drm_mode_object *obj, bool atomic,
+ uint32_t __user *prop_ptr, uint64_t __user *prop_values,
+ uint32_t *arg_count_props)
+{
+ int props_count;
+ int i, ret, copied;
+
+ props_count = obj->properties->count;
+ if (!atomic)
+ props_count -= obj->properties->atomic_count;
+
+ if ((*arg_count_props >= props_count) && props_count) {
+ for (i = 0, copied = 0; copied < props_count; i++) {
+ struct drm_property *prop = obj->properties->properties[i];
+ uint64_t val;
+
+ if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
+ continue;
+
+ ret = drm_object_property_get_value(obj, prop, &val);
+ if (ret)
+ return ret;
+
+ if (put_user(prop->base.id, prop_ptr + copied))
+ return -EFAULT;
+
+ if (put_user(val, prop_values + copied))
+ return -EFAULT;
+
+ copied++;
+ }
+ }
+ *arg_count_props = props_count;
+
+ return 0;
+}
+
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
@@ -2017,15 +2111,12 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_encoder *encoder;
struct drm_display_mode *mode;
int mode_count = 0;
- int props_count = 0;
int encoders_count = 0;
int ret = 0;
int copied = 0;
int i;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
- uint32_t __user *prop_ptr;
- uint64_t __user *prop_values;
uint32_t __user *encoder_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -2036,6 +2127,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
connector = drm_connector_find(dev, out_resp->connector_id);
if (!connector) {
@@ -2043,13 +2135,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
goto out;
}
- props_count = connector->properties.count;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+ if (connector->encoder_ids[i] != 0)
encoders_count++;
- }
- }
if (out_resp->count_modes == 0) {
connector->funcs->fill_modes(connector,
@@ -2069,14 +2157,11 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
/*
* This ioctl is called twice, once to determine how much space is
@@ -2100,26 +2185,12 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
}
out_resp->count_modes = mode_count;
- if ((out_resp->count_props >= props_count) && props_count) {
- copied = 0;
- prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < connector->properties.count; i++) {
- if (put_user(connector->properties.ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
-
- if (put_user(connector->properties.values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- out_resp->count_props = props_count;
+ ret = get_properties(&connector->base, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+ (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+ &out_resp->count_props);
+ if (ret)
+ goto out;
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
@@ -2138,6 +2209,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->count_encoders = encoders_count;
out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -2529,7 +2601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
*
* This is a little helper to wrap internal calls to the ->set_config driver
* interface. The only thing it adds is correct refcounting dance.
- *
+ *
* Returns:
* Zero on success, negative errno on failure.
*/
@@ -2569,6 +2641,27 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
EXPORT_SYMBOL(drm_mode_set_config_internal);
/**
+ * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
+ * @mode: mode to query
+ * @hdisplay: hdisplay value to fill in
+ * @vdisplay: vdisplay value to fill in
+ *
+ * The vdisplay value will be doubled if the specified mode is a stereo mode of
+ * the appropriate layout.
+ */
+void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay)
+{
+ struct drm_display_mode adjusted;
+
+ drm_mode_copy(&adjusted, mode);
+ drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
+ *hdisplay = adjusted.crtc_hdisplay;
+ *vdisplay = adjusted.crtc_vdisplay;
+}
+EXPORT_SYMBOL(drm_crtc_get_hv_timing);
+
+/**
* drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
* CRTC viewport
* @crtc: CRTC that framebuffer will be displayed on
@@ -2585,16 +2678,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
{
int hdisplay, vdisplay;
- hdisplay = mode->hdisplay;
- vdisplay = mode->vdisplay;
-
- if (drm_mode_is_stereo(mode)) {
- struct drm_display_mode adjusted = *mode;
-
- drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
- hdisplay = adjusted.crtc_hdisplay;
- vdisplay = adjusted.crtc_vdisplay;
- }
+ drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
@@ -2690,6 +2774,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
+ mode->status = drm_mode_validate_basic(mode);
+ if (mode->status != MODE_OK) {
+ ret = -EINVAL;
+ goto out;
+ }
+
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
@@ -2721,9 +2811,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
- connector_set = kmalloc(crtc_req->count_connectors *
- sizeof(struct drm_connector *),
- GFP_KERNEL);
+ connector_set = kmalloc_array(crtc_req->count_connectors,
+ sizeof(struct drm_connector *),
+ GFP_KERNEL);
if (!connector_set) {
ret = -ENOMEM;
goto out;
@@ -2968,6 +3058,7 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor2 *req = data;
+
return drm_mode_cursor_common(dev, req, file_priv);
}
@@ -3415,7 +3506,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
ret = -EINVAL;
goto out_err1;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
goto out_err1;
@@ -3516,7 +3607,8 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
property->dev = dev;
if (num_values) {
- property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+ property->values = kcalloc(num_values, sizeof(uint64_t),
+ GFP_KERNEL);
if (!property->values)
goto fail;
}
@@ -3665,7 +3757,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
}
/**
- * drm_property_create_range - create a new ranged property type
+ * drm_property_create_range - create a new unsigned ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3676,8 +3768,8 @@ static struct drm_property *property_create_range(struct drm_device *dev,
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
- * Userspace is allowed to set any integer value in the (min, max) range
- * inclusive.
+ * Userspace is allowed to set any unsigned integer value in the (min, max)
+ * range inclusive.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
@@ -3691,6 +3783,24 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags
}
EXPORT_SYMBOL(drm_property_create_range);
+/**
+ * drm_property_create_signed_range - create a new signed ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Userspace is allowed to set any signed integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
int flags, const char *name,
int64_t min, int64_t max)
@@ -3700,6 +3810,23 @@ struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_property_create_signed_range);
+/**
+ * drm_property_create_object - create a new object property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @type: object type from DRM_MODE_OBJECT_* defines
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Userspace is only allowed to set this to any property value of the given
+ * @type. Only useful for atomic properties, which is enforced.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type)
{
@@ -3707,6 +3834,9 @@ struct drm_property *drm_property_create_object(struct drm_device *dev,
flags |= DRM_MODE_PROP_OBJECT;
+ if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
+ return NULL;
+
property = drm_property_create(dev, flags, name, 1);
if (!property)
return NULL;
@@ -3718,6 +3848,28 @@ struct drm_property *drm_property_create_object(struct drm_device *dev,
EXPORT_SYMBOL(drm_property_create_object);
/**
+ * drm_property_create_bool - create a new boolean property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * This is implemented as a ranged property with only {0, 1} as valid values.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+ const char *name)
+{
+ return drm_property_create_range(dev, flags, name, 0, 1);
+}
+EXPORT_SYMBOL(drm_property_create_bool);
+
+/**
* drm_property_add_enum - add a possible value to an enumeration property
* @property: enumeration property to change
* @index: index of the new enumeration
@@ -3822,9 +3974,11 @@ void drm_object_attach_property(struct drm_mode_object *obj,
return;
}
- obj->properties->ids[count] = property->base.id;
+ obj->properties->properties[count] = property;
obj->properties->values[count] = init_val;
obj->properties->count++;
+ if (property->flags & DRM_MODE_PROP_ATOMIC)
+ obj->properties->atomic_count++;
}
EXPORT_SYMBOL(drm_object_attach_property);
@@ -3847,7 +4001,7 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
int i;
for (i = 0; i < obj->properties->count; i++) {
- if (obj->properties->ids[i] == property->base.id) {
+ if (obj->properties->properties[i] == property) {
obj->properties->values[i] = val;
return 0;
}
@@ -3876,8 +4030,16 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
{
int i;
+ /* read-only properties bypass atomic mechanism and still store
+ * their value in obj->properties->values[].. mostly to avoid
+ * having to deal w/ EDID and similar props in atomic paths:
+ */
+ if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
+ !(property->flags & DRM_MODE_PROP_IMMUTABLE))
+ return drm_atomic_get_property(obj, property, val);
+
for (i = 0; i < obj->properties->count; i++) {
- if (obj->properties->ids[i] == property->base.id) {
+ if (obj->properties->properties[i] == property) {
*val = obj->properties->values[i];
return 0;
}
@@ -4057,7 +4219,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
if (out_resp->length == blob->length) {
blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)){
+ if (copy_to_user(blob_ptr, blob->data, blob->length)) {
ret = -EFAULT;
goto done;
}
@@ -4193,25 +4355,38 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-static bool drm_property_change_is_valid(struct drm_property *property,
- uint64_t value)
+/* Some properties could refer to dynamic refcnt'd objects, or things that
+ * need special locking to handle lifetime issues (ie. to ensure the prop
+ * value doesn't become invalid part way through the property update due to
+ * race). The value returned by reference via 'obj' should be passed back
+ * to drm_property_change_valid_put() after the property is set (and the
+ * object to which the property is attached has a chance to take it's own
+ * reference).
+ */
+bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value, struct drm_mode_object **ref)
{
+ int i;
+
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
+ *ref = NULL;
+
if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
if (value < property->values[0] || value > property->values[1])
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
int64_t svalue = U642I64(value);
+
if (svalue < U642I64(property->values[0]) ||
svalue > U642I64(property->values[1]))
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- int i;
uint64_t valid_mask = 0;
+
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
@@ -4219,25 +4394,40 @@ static bool drm_property_change_is_valid(struct drm_property *property,
/* Only the driver knows */
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
- struct drm_mode_object *obj;
/* a zero value for an object property translates to null: */
if (value == 0)
return true;
- /*
- * NOTE: use _object_find() directly to bypass restriction on
- * looking up refcnt'd objects (ie. fb's). For a refcnt'd
- * object this could race against object finalization, so it
- * simply tells us that the object *was* valid. Which is good
- * enough.
- */
- obj = _object_find(property->dev, value, property->values[0]);
- return obj != NULL;
- } else {
- int i;
- for (i = 0; i < property->num_values; i++)
- if (property->values[i] == value)
+
+ /* handle refcnt'd objects specially: */
+ if (property->values[0] == DRM_MODE_OBJECT_FB) {
+ struct drm_framebuffer *fb;
+ fb = drm_framebuffer_lookup(property->dev, value);
+ if (fb) {
+ *ref = &fb->base;
return true;
- return false;
+ } else {
+ return false;
+ }
+ } else {
+ return _object_find(property->dev, value, property->values[0]) != NULL;
+ }
+ }
+
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+}
+
+void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref)
+{
+ if (!ref)
+ return;
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+ if (property->values[0] == DRM_MODE_OBJECT_FB)
+ drm_framebuffer_unreference(obj_to_fb(ref));
}
}
@@ -4356,11 +4546,6 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
int ret = 0;
- int i;
- int copied = 0;
- int props_count = 0;
- uint32_t __user *props_ptr;
- uint64_t __user *prop_values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -4377,30 +4562,11 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
goto out;
}
- props_count = obj->properties->count;
+ ret = get_properties(obj, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(arg->props_ptr),
+ (uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
+ &arg->count_props);
- /* This ioctl is called twice, once to determine how much space is
- * needed, and the 2nd time to fill it. */
- if ((arg->count_props >= props_count) && props_count) {
- copied = 0;
- props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
- prop_values_ptr = (uint64_t __user *)(unsigned long)
- (arg->prop_values_ptr);
- for (i = 0; i < props_count; i++) {
- if (put_user(obj->properties->ids[i],
- props_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- if (put_user(obj->properties->values[i],
- prop_values_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- arg->count_props = props_count;
out:
drm_modeset_unlock_all(dev);
return ret;
@@ -4429,8 +4595,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_mode_object *arg_obj;
struct drm_mode_object *prop_obj;
struct drm_property *property;
- int ret = -EINVAL;
- int i;
+ int i, ret = -EINVAL;
+ struct drm_mode_object *ref;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -4446,7 +4612,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
goto out;
for (i = 0; i < arg_obj->properties->count; i++)
- if (arg_obj->properties->ids[i] == arg->prop_id)
+ if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
break;
if (i == arg_obj->properties->count)
@@ -4460,7 +4626,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
}
property = obj_to_property(prop_obj);
- if (!drm_property_change_is_valid(property, arg->value))
+ if (!drm_property_change_valid_get(property, arg->value, &ref))
goto out;
switch (arg_obj->type) {
@@ -4477,6 +4643,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
break;
}
+ drm_property_change_valid_put(property, ref);
+
out:
drm_modeset_unlock_all(dev);
return ret;
@@ -4526,7 +4694,8 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
{
crtc->gamma_size = gamma_size;
- crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+ crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
+ GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
return -ENOMEM;
@@ -4741,23 +4910,23 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
ret = -ENOMEM;
spin_lock_irqsave(&dev->event_lock, flags);
- if (file_priv->event_space < sizeof e->event) {
+ if (file_priv->event_space < sizeof(e->event)) {
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
- file_priv->event_space -= sizeof e->event;
+ file_priv->event_space -= sizeof(e->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
- e = kzalloc(sizeof *e, GFP_KERNEL);
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL) {
spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
+ file_priv->event_space += sizeof(e->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
- e->event.base.length = sizeof e->event;
+ e->event.base.length = sizeof(e->event);
e->event.user_data = page_flip->user_data;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
@@ -4770,7 +4939,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
+ file_priv->event_space += sizeof(e->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
@@ -5211,7 +5380,6 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.bridge_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
@@ -5220,8 +5388,7 @@ void drm_mode_config_init(struct drm_device *dev)
idr_init(&dev->mode_config.tile_idr);
drm_modeset_lock_all(dev);
- drm_mode_create_standard_connector_properties(dev);
- drm_mode_create_standard_plane_properties(dev);
+ drm_mode_create_standard_properties(dev);
drm_modeset_unlock_all(dev);
/* Just to be sure */
@@ -5252,7 +5419,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
struct drm_connector *connector, *ot;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
- struct drm_bridge *bridge, *brt;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_property_blob *blob, *bt;
@@ -5263,11 +5429,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
encoder->funcs->destroy(encoder);
}
- list_for_each_entry_safe(bridge, brt,
- &dev->mode_config.bridge_list, head) {
- bridge->funcs->destroy(bridge);
- }
-
list_for_each_entry_safe(connector, ot,
&dev->mode_config.connector_list, head) {
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d552708409de..b1979e7bdc88 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -946,6 +946,7 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
return -ENOMEM;
+ crtc_state->crtc = crtc;
crtc_state->enable = true;
crtc_state->planes_changed = true;
@@ -1005,6 +1006,7 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
+ plane_state->plane = plane;
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a2945ee6d675..247dc8b62564 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -36,3 +36,9 @@ int drm_mode_object_get(struct drm_device *dev,
void drm_mode_object_put(struct drm_device *dev,
struct drm_mode_object *object);
+/* drm_atomic.c */
+int drm_atomic_get_property(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val);
+int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 79968e39c8d0..f1283878ff6d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -354,6 +354,37 @@ int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
EXPORT_SYMBOL(drm_dp_link_power_up);
/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_down);
+
+/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 4f41377b0b80..d51213464672 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -40,15 +40,19 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
+bool drm_atomic = 0;
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(atomic, "Enable experimental atomic KMS API");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
+module_param_named_unsafe(atomic, drm_atomic, bool, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 52ce26d6b4fb..1e6a0c760c5d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+static void remove_from_modeset(struct drm_mode_set *set,
+ struct drm_connector *connector)
+{
+ int i, j;
+
+ for (i = 0; i < set->num_connectors; i++) {
+ if (set->connectors[i] == connector)
+ break;
+ }
+
+ if (i == set->num_connectors)
+ return;
+
+ for (j = i + 1; j < set->num_connectors; j++) {
+ set->connectors[j - 1] = set->connectors[j];
+ }
+ set->num_connectors--;
+
+ /* because i915 is pissy about this..
+ * TODO maybe need to makes sure we set it back to !=NULL somewhere?
+ */
+ if (set->num_connectors == 0)
+ set->fb = NULL;
+}
+
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
}
fb_helper->connector_count--;
kfree(fb_helper_connector);
+
+ /* also cleanup dangling references to the connector: */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
@@ -741,7 +771,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
int i, j, rc = 0;
int start;
- drm_modeset_lock_all(dev);
+ if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ return -EBUSY;
+ }
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -915,7 +947,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- drm_modeset_lock_all(dev);
+ if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ return -EBUSY;
+ }
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -1688,7 +1722,7 @@ out:
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
-bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 0b9514b6cd64..076dd606b580 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -478,64 +478,59 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
-static bool
-drm_dequeue_event(struct drm_file *file_priv,
- size_t total, size_t max, struct drm_pending_event **out)
+ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
{
+ struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e;
- unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ ssize_t ret = 0;
- *out = NULL;
- if (list_empty(&file_priv->event_list))
- goto out;
- e = list_first_entry(&file_priv->event_list,
- struct drm_pending_event, link);
- if (e->event->length + total > max)
- goto out;
+ if (!access_ok(VERIFY_WRITE, buffer, count))
+ return -EFAULT;
- file_priv->event_space += e->event->length;
- list_del(&e->link);
- *out = e;
- ret = true;
+ spin_lock_irq(&dev->event_lock);
+ for (;;) {
+ if (list_empty(&file_priv->event_list)) {
+ if (ret)
+ break;
-out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return ret;
-}
-
-ssize_t drm_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_pending_event *e;
- size_t total;
- ssize_t ret;
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
- if ((filp->f_flags & O_NONBLOCK) == 0) {
- ret = wait_event_interruptible(file_priv->event_wait,
- !list_empty(&file_priv->event_list));
- if (ret < 0)
- return ret;
- }
+ spin_unlock_irq(&dev->event_lock);
+ ret = wait_event_interruptible(file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ spin_lock_irq(&dev->event_lock);
+ if (ret < 0)
+ break;
+
+ ret = 0;
+ } else {
+ struct drm_pending_event *e;
+
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length + ret > count)
+ break;
+
+ if (__copy_to_user_inatomic(buffer + ret,
+ e->event, e->event->length)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
- total = 0;
- while (drm_dequeue_event(file_priv, total, count, &e)) {
- if (copy_to_user(buffer + total,
- e->event, e->event->length)) {
- total = -EFAULT;
+ file_priv->event_space += e->event->length;
+ ret += e->event->length;
+ list_del(&e->link);
e->destroy(e);
- break;
}
-
- total += e->event->length;
- e->destroy(e);
}
+ spin_unlock_irq(&dev->event_lock);
- return total ?: -EAGAIN;
+ return ret;
}
EXPORT_SYMBOL(drm_read);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 51efebd434f3..f1b32f91d941 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -153,30 +153,6 @@ int drm_bufs_info(struct seq_file *m, void *data)
}
/**
- * Called when "/proc/dri/.../vblank" is read.
- */
-int drm_vblank_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int crtc;
-
- mutex_lock(&dev->struct_mutex);
- for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- seq_printf(m, "CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank[crtc].refcount));
- seq_printf(m, "CRTC %d counter: %d\n",
- crtc, drm_vblank_count(dev, crtc));
- seq_printf(m, "CRTC %d last wait: %d\n",
- crtc, dev->vblank[crtc].last_wait);
- seq_printf(m, "CRTC %d in modeset: %d\n",
- crtc, dev->vblank[crtc].inmodeset);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
* Called when "/proc/dri/.../clients" is read.
*
*/
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 7cc0a3516871..12a61d706827 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -55,7 +55,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
int drm_name_info(struct seq_file *m, void *data);
int drm_vm_info(struct seq_file *m, void *data);
int drm_bufs_info(struct seq_file *m, void *data);
-int drm_vblank_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 00587a1e3c83..3785d66721f2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -32,6 +32,7 @@
#include <drm/drm_core.h>
#include "drm_legacy.h"
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
#include <linux/pci.h>
#include <linux/export.h>
@@ -345,6 +346,17 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->universal_planes = req->value;
break;
+ case DRM_CLIENT_CAP_ATOMIC:
+ /* for now, hide behind experimental drm.atomic moduleparam */
+ if (!drm_atomic)
+ return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->atomic = req->value;
+ file_priv->universal_planes = req->value;
+ break;
default:
return -EINVAL;
}
@@ -620,6 +632,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 4d79dad9d44f..10574a0c3a55 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -185,8 +185,15 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
return;
}
- dev->driver->disable_vblank(dev, crtc);
- vblank->enabled = false;
+ /*
+ * Only disable vblank interrupts if they're enabled. This avoids
+ * calling the ->disable_vblank() operation in atomic context with the
+ * hardware potentially runtime suspended.
+ */
+ if (vblank->enabled) {
+ dev->driver->disable_vblank(dev, crtc);
+ vblank->enabled = false;
+ }
/* No further vblank irq's will be processed after
* this point. Get current hardware vblank count and
@@ -778,7 +785,7 @@ static struct timeval get_drm_timestamp(void)
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
- * vblank interval
+ * vblank interval
* @dev: DRM device
* @crtc: which CRTC's vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
@@ -933,6 +940,7 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
{
struct timeval now;
unsigned int seq;
+
if (crtc >= 0) {
seq = drm_vblank_count_and_time(dev, crtc, &now);
} else {
@@ -1422,7 +1430,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
unsigned int seq;
int ret;
- e = kzalloc(sizeof *e, GFP_KERNEL);
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL) {
ret = -ENOMEM;
goto err_put;
@@ -1431,7 +1439,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->pipe = pipe;
e->base.pid = current->pid;
e->event.base.type = DRM_EVENT_VBLANK;
- e->event.base.length = sizeof e->event;
+ e->event.base.length = sizeof(e->event);
e->event.user_data = vblwait->request.signal;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
@@ -1451,12 +1459,12 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
goto err_unlock;
}
- if (file_priv->event_space < sizeof e->event) {
+ if (file_priv->event_space < sizeof(e->event)) {
ret = -EBUSY;
goto err_unlock;
}
- file_priv->event_space -= sizeof e->event;
+ file_priv->event_space -= sizeof(e->event);
seq = drm_vblank_count_and_time(dev, pipe, &now);
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index c0644bb865f2..2d5ca8eec13a 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -323,8 +323,6 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
const struct mipi_dsi_msg *msg)
{
- const u8 *tx = msg->tx_buf;
-
if (!packet || !msg)
return -EINVAL;
@@ -353,8 +351,10 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
packet->header[2] = (msg->tx_len >> 8) & 0xff;
packet->payload_length = msg->tx_len;
- packet->payload = tx;
+ packet->payload = msg->tx_buf;
} else {
+ const u8 *tx = msg->tx_buf;
+
packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6d8b941c8200..487d0e35c134 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -615,6 +615,46 @@ void drm_display_mode_from_videomode(const struct videomode *vm,
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
+/**
+ * drm_display_mode_to_videomode - fill in @vm using @dmode,
+ * @dmode: drm_display_mode structure to use as source
+ * @vm: videomode structure to use as destination
+ *
+ * Fills out @vm using the display mode specified in @dmode.
+ */
+void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
+ struct videomode *vm)
+{
+ vm->hactive = dmode->hdisplay;
+ vm->hfront_porch = dmode->hsync_start - dmode->hdisplay;
+ vm->hsync_len = dmode->hsync_end - dmode->hsync_start;
+ vm->hback_porch = dmode->htotal - dmode->hsync_end;
+
+ vm->vactive = dmode->vdisplay;
+ vm->vfront_porch = dmode->vsync_start - dmode->vdisplay;
+ vm->vsync_len = dmode->vsync_end - dmode->vsync_start;
+ vm->vback_porch = dmode->vtotal - dmode->vsync_end;
+
+ vm->pixelclock = dmode->clock * 1000;
+
+ vm->flags = 0;
+ if (dmode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ else if (dmode->flags & DRM_MODE_FLAG_NHSYNC)
+ vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (dmode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ else if (dmode->flags & DRM_MODE_FLAG_NVSYNC)
+ vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
+ if (dmode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm->flags |= DISPLAY_FLAGS_INTERLACED;
+ if (dmode->flags & DRM_MODE_FLAG_DBLSCAN)
+ vm->flags |= DISPLAY_FLAGS_DOUBLESCAN;
+ if (dmode->flags & DRM_MODE_FLAG_DBLCLK)
+ vm->flags |= DISPLAY_FLAGS_DOUBLECLK;
+}
+EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
+
#ifdef CONFIG_OF
/**
* of_get_drm_display_mode - get a drm_display_mode from devicetree
@@ -739,6 +779,8 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full").
+ * - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not*
+ * be performed for doublescan and vscan > 1 modes respectively.
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
@@ -765,18 +807,22 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
}
}
- if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
- p->crtc_vdisplay *= 2;
- p->crtc_vsync_start *= 2;
- p->crtc_vsync_end *= 2;
- p->crtc_vtotal *= 2;
+ if (!(adjust_flags & CRTC_NO_DBLSCAN)) {
+ if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+ p->crtc_vdisplay *= 2;
+ p->crtc_vsync_start *= 2;
+ p->crtc_vsync_end *= 2;
+ p->crtc_vtotal *= 2;
+ }
}
- if (p->vscan > 1) {
- p->crtc_vdisplay *= p->vscan;
- p->crtc_vsync_start *= p->vscan;
- p->crtc_vsync_end *= p->vscan;
- p->crtc_vtotal *= p->vscan;
+ if (!(adjust_flags & CRTC_NO_VSCAN)) {
+ if (p->vscan > 1) {
+ p->crtc_vdisplay *= p->vscan;
+ p->crtc_vsync_start *= p->vscan;
+ p->crtc_vsync_end *= p->vscan;
+ p->crtc_vtotal *= p->vscan;
+ }
}
if (adjust_flags & CRTC_STEREO_DOUBLE) {
@@ -906,9 +952,40 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
/**
+ * drm_mode_validate_basic - make sure the mode is somewhat sane
+ * @mode: mode to check
+ *
+ * Check that the mode timings are at least somewhat reasonable.
+ * Any hardware specific limits are left up for each driver to check.
+ *
+ * Returns:
+ * The mode status
+ */
+enum drm_mode_status
+drm_mode_validate_basic(const struct drm_display_mode *mode)
+{
+ if (mode->clock == 0)
+ return MODE_CLOCK_LOW;
+
+ if (mode->hdisplay == 0 ||
+ mode->hsync_start < mode->hdisplay ||
+ mode->hsync_end < mode->hsync_start ||
+ mode->htotal < mode->hsync_end)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay == 0 ||
+ mode->vsync_start < mode->vdisplay ||
+ mode->vsync_end < mode->vsync_start ||
+ mode->vtotal < mode->vsync_end)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+EXPORT_SYMBOL(drm_mode_validate_basic);
+
+/**
* drm_mode_validate_size - make sure modes adhere to size constraints
- * @dev: DRM device
- * @mode_list: list of modes to check
+ * @mode: mode to check
* @maxX: maximum width
* @maxY: maximum height
*
@@ -916,23 +993,80 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
* limitations of the DRM device/connector. If a mode is too big its status
* member is updated with the appropriate validation failure code. The list
* itself is not changed.
+ *
+ * Returns:
+ * The mode status
*/
-void drm_mode_validate_size(struct drm_device *dev,
- struct list_head *mode_list,
- int maxX, int maxY)
+enum drm_mode_status
+drm_mode_validate_size(const struct drm_display_mode *mode,
+ int maxX, int maxY)
{
- struct drm_display_mode *mode;
+ if (maxX > 0 && mode->hdisplay > maxX)
+ return MODE_VIRTUAL_X;
- list_for_each_entry(mode, mode_list, head) {
- if (maxX > 0 && mode->hdisplay > maxX)
- mode->status = MODE_VIRTUAL_X;
+ if (maxY > 0 && mode->vdisplay > maxY)
+ return MODE_VIRTUAL_Y;
- if (maxY > 0 && mode->vdisplay > maxY)
- mode->status = MODE_VIRTUAL_Y;
- }
+ return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_size);
+#define MODE_STATUS(status) [MODE_ ## status + 3] = #status
+
+static const char * const drm_mode_status_names[] = {
+ MODE_STATUS(OK),
+ MODE_STATUS(HSYNC),
+ MODE_STATUS(VSYNC),
+ MODE_STATUS(H_ILLEGAL),
+ MODE_STATUS(V_ILLEGAL),
+ MODE_STATUS(BAD_WIDTH),
+ MODE_STATUS(NOMODE),
+ MODE_STATUS(NO_INTERLACE),
+ MODE_STATUS(NO_DBLESCAN),
+ MODE_STATUS(NO_VSCAN),
+ MODE_STATUS(MEM),
+ MODE_STATUS(VIRTUAL_X),
+ MODE_STATUS(VIRTUAL_Y),
+ MODE_STATUS(MEM_VIRT),
+ MODE_STATUS(NOCLOCK),
+ MODE_STATUS(CLOCK_HIGH),
+ MODE_STATUS(CLOCK_LOW),
+ MODE_STATUS(CLOCK_RANGE),
+ MODE_STATUS(BAD_HVALUE),
+ MODE_STATUS(BAD_VVALUE),
+ MODE_STATUS(BAD_VSCAN),
+ MODE_STATUS(HSYNC_NARROW),
+ MODE_STATUS(HSYNC_WIDE),
+ MODE_STATUS(HBLANK_NARROW),
+ MODE_STATUS(HBLANK_WIDE),
+ MODE_STATUS(VSYNC_NARROW),
+ MODE_STATUS(VSYNC_WIDE),
+ MODE_STATUS(VBLANK_NARROW),
+ MODE_STATUS(VBLANK_WIDE),
+ MODE_STATUS(PANEL),
+ MODE_STATUS(INTERLACE_WIDTH),
+ MODE_STATUS(ONE_WIDTH),
+ MODE_STATUS(ONE_HEIGHT),
+ MODE_STATUS(ONE_SIZE),
+ MODE_STATUS(NO_REDUCED),
+ MODE_STATUS(NO_STEREO),
+ MODE_STATUS(UNVERIFIED),
+ MODE_STATUS(BAD),
+ MODE_STATUS(ERROR),
+};
+
+#undef MODE_STATUS
+
+static const char *drm_get_mode_status_name(enum drm_mode_status status)
+{
+ int index = status + 3;
+
+ if (WARN_ON(index < 0 || index >= ARRAY_SIZE(drm_mode_status_names)))
+ return "";
+
+ return drm_mode_status_names[index];
+}
+
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
@@ -954,8 +1088,9 @@ void drm_mode_prune_invalid(struct drm_device *dev,
list_del(&mode->head);
if (verbose) {
drm_mode_debug_printmodeline(mode);
- DRM_DEBUG_KMS("Not using %s mode %d\n",
- mode->name, mode->status);
+ DRM_DEBUG_KMS("Not using %s mode: %s\n",
+ mode->name,
+ drm_get_mode_status_name(mode->status));
}
drm_mode_destroy(dev, mode);
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 18a1ac6ac22f..5ba5792bfdba 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -142,6 +142,17 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
{
int hscale, vscale;
+ if (!fb) {
+ *visible = false;
+ return 0;
+ }
+
+ /* crtc should only be NULL when disabling (i.e., !fb) */
+ if (WARN_ON(!crtc)) {
+ *visible = false;
+ return 0;
+ }
+
if (!crtc->enabled && !can_update_disabled) {
DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
@@ -155,11 +166,6 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
return -ERANGE;
}
- if (!fb) {
- *visible = false;
- return 0;
- }
-
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
@@ -429,7 +435,8 @@ int drm_plane_helper_commit(struct drm_plane *plane,
goto out;
}
- if (plane_funcs->prepare_fb && plane_state->fb) {
+ if (plane_funcs->prepare_fb && plane_state->fb &&
+ plane_state->fb != old_fb) {
ret = plane_funcs->prepare_fb(plane, plane_state->fb);
if (ret)
goto out;
@@ -443,13 +450,28 @@ int drm_plane_helper_commit(struct drm_plane *plane,
crtc_funcs[i]->atomic_begin(crtc[i]);
}
- plane_funcs->atomic_update(plane, plane_state);
+ /*
+ * Drivers may optionally implement the ->atomic_disable callback, so
+ * special-case that here.
+ */
+ if (drm_atomic_plane_disabling(plane, plane_state) &&
+ plane_funcs->atomic_disable)
+ plane_funcs->atomic_disable(plane, plane_state);
+ else
+ plane_funcs->atomic_update(plane, plane_state);
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
crtc_funcs[i]->atomic_flush(crtc[i]);
}
+ /*
+ * If we only moved the plane and didn't change fb's, there's no need to
+ * wait for vblank.
+ */
+ if (plane->state->fb == old_fb)
+ goto out;
+
for (i = 0; i < 2; i++) {
if (!crtc[i])
continue;
@@ -478,7 +500,7 @@ out:
}
/**
- * drm_plane_helper_update() - Helper for primary plane update
+ * drm_plane_helper_update() - Transitional helper for plane update
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
@@ -517,6 +539,7 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
+ plane_state->plane = plane;
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, fb);
@@ -534,7 +557,7 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_plane_helper_update);
/**
- * drm_plane_helper_disable() - Helper for primary plane disable
+ * drm_plane_helper_disable() - Transitional helper for plane disable
* @plane: plane to disable
*
* Provides a default plane disable handler using the atomic plane update
@@ -563,6 +586,7 @@ int drm_plane_helper_disable(struct drm_plane *plane)
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
+ plane_state->plane = plane;
plane_state->crtc = NULL;
drm_atomic_set_fb_for_plane(plane_state, NULL);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 7483a47de8e4..6591d48c1b9d 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -58,28 +58,23 @@
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
-static void drm_mode_validate_flag(struct drm_connector *connector,
- int flags)
+static enum drm_mode_status
+drm_mode_validate_flag(const struct drm_display_mode *mode,
+ int flags)
{
- struct drm_display_mode *mode;
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ !(flags & DRM_MODE_FLAG_INTERLACE))
+ return MODE_NO_INTERLACE;
- if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
- DRM_MODE_FLAG_3D_MASK))
- return;
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+ !(flags & DRM_MODE_FLAG_DBLSCAN))
+ return MODE_NO_DBLESCAN;
- list_for_each_entry(mode, &connector->modes, head) {
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
- !(flags & DRM_MODE_FLAG_INTERLACE))
- mode->status = MODE_NO_INTERLACE;
- if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
- !(flags & DRM_MODE_FLAG_DBLSCAN))
- mode->status = MODE_NO_DBLESCAN;
- if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
- !(flags & DRM_MODE_FLAG_3D_MASK))
- mode->status = MODE_NO_STEREO;
- }
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+ !(flags & DRM_MODE_FLAG_3D_MASK))
+ return MODE_NO_STEREO;
- return;
+ return MODE_OK;
}
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
@@ -108,6 +103,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
int count = 0;
int mode_flags = 0;
bool verbose_prune = true;
+ enum drm_connector_status old_status;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -126,7 +122,33 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
+ old_status = connector->status;
+
connector->status = connector->funcs->detect(connector, true);
+
+ /*
+ * Normally either the driver's hpd code or the poll loop should
+ * pick up any changes and fire the hotplug event. But if
+ * userspace sneaks in a probe, we might miss a change. Hence
+ * check here, and if anything changed start the hotplug code.
+ */
+ if (old_status != connector->status) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ connector->name,
+ old_status, connector->status);
+
+ /*
+ * The hotplug event code might call into the fb
+ * helpers, and so expects that we do not hold any
+ * locks. Fire up the poll struct instead, it will
+ * disable itself again.
+ */
+ dev->mode_config.delayed_event = true;
+ if (dev->mode_config.poll_enabled)
+ schedule_delayed_work(&dev->mode_config.output_poll_work,
+ 0);
+ }
}
/* Re-enable polling in case the global poll config changed. */
@@ -164,18 +186,22 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
drm_mode_connector_list_update(connector, merge_type_bits);
- if (maxX && maxY)
- drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
-
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
- drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry(mode, &connector->modes, head) {
+ mode->status = drm_mode_validate_basic(mode);
+
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_size(mode, maxX, maxY);
+
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_flag(mode, mode_flags);
+
if (mode->status == MODE_OK && connector_funcs->mode_valid)
mode->status = connector_funcs->mode_valid(connector,
mode);
@@ -275,10 +301,14 @@ static void output_poll_execute(struct work_struct *work)
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status;
- bool repoll = false, changed = false;
+ bool repoll = false, changed;
+
+ /* Pick up any changes detected by the probe functions. */
+ changed = dev->mode_config.delayed_event;
+ dev->mode_config.delayed_event = false;
if (!drm_kms_helper_poll)
- return;
+ goto out;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -305,6 +335,24 @@ static void output_poll_execute(struct work_struct *work)
if (old_status != connector->status) {
const char *old, *new;
+ /*
+ * The poll work sets force=false when calling detect so
+ * that drivers can avoid to do disruptive tests (e.g.
+ * when load detect cycles could cause flickering on
+ * other, running displays). This bears the risk that we
+ * flip-flop between unknown here in the poll work and
+ * the real state when userspace forces a full detect
+ * call after receiving a hotplug event due to this
+ * change.
+ *
+ * Hence clamp an unknown detect status to the old
+ * value.
+ */
+ if (connector->status == connector_status_unknown) {
+ connector->status = old_status;
+ continue;
+ }
+
old = drm_get_connector_status_name(old_status);
new = drm_get_connector_status_name(connector->status);
@@ -320,6 +368,7 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex);
+out:
if (changed)
drm_kms_helper_hotplug_event(dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index cc3d6d6d67e0..5c99d3773212 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -339,19 +339,51 @@ static ssize_t select_subconnector_show(struct device *device,
drm_get_dvi_i_select_name((int)subconnector));
}
-static struct device_attribute connector_attrs[] = {
- __ATTR_RO(status),
- __ATTR_RO(enabled),
- __ATTR_RO(dpms),
- __ATTR_RO(modes),
+static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_RO(enabled);
+static DEVICE_ATTR_RO(dpms);
+static DEVICE_ATTR_RO(modes);
+
+static struct attribute *connector_dev_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_dpms.attr,
+ &dev_attr_modes.attr,
+ NULL
};
/* These attributes are for both DVI-I connectors and all types of tv-out. */
-static struct device_attribute connector_attrs_opt1[] = {
- __ATTR_RO(subconnector),
- __ATTR_RO(select_subconnector),
+static DEVICE_ATTR_RO(subconnector);
+static DEVICE_ATTR_RO(select_subconnector);
+
+static struct attribute *connector_opt_dev_attrs[] = {
+ &dev_attr_subconnector.attr,
+ &dev_attr_select_subconnector.attr,
+ NULL
};
+static umode_t connector_opt_dev_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_connector *connector = to_drm_connector(dev);
+
+ /*
+ * In the long run it maybe a good idea to make one set of
+ * optionals per connector type.
+ */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_TV:
+ return attr->mode;
+ }
+
+ return 0;
+}
+
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
@@ -359,6 +391,27 @@ static struct bin_attribute edid_attr = {
.read = edid_show,
};
+static struct bin_attribute *connector_bin_attrs[] = {
+ &edid_attr,
+ NULL
+};
+
+static const struct attribute_group connector_dev_group = {
+ .attrs = connector_dev_attrs,
+ .bin_attrs = connector_bin_attrs,
+};
+
+static const struct attribute_group connector_opt_dev_group = {
+ .attrs = connector_opt_dev_attrs,
+ .is_visible = connector_opt_dev_is_visible,
+};
+
+static const struct attribute_group *connector_dev_groups[] = {
+ &connector_dev_group,
+ &connector_opt_dev_group,
+ NULL
+};
+
/**
* drm_sysfs_connector_add - add a connector to sysfs
* @connector: connector to add
@@ -371,73 +424,27 @@ static struct bin_attribute edid_attr = {
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- int attr_cnt = 0;
- int opt_cnt = 0;
- int i;
- int ret;
if (connector->kdev)
return 0;
- connector->kdev = device_create(drm_class, dev->primary->kdev,
- 0, connector, "card%d-%s",
- dev->primary->index, connector->name);
+ connector->kdev =
+ device_create_with_groups(drm_class, dev->primary->kdev, 0,
+ connector, connector_dev_groups,
+ "card%d-%s", dev->primary->index,
+ connector->name);
DRM_DEBUG("adding \"%s\" to sysfs\n",
connector->name);
if (IS_ERR(connector->kdev)) {
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
- ret = PTR_ERR(connector->kdev);
- goto out;
- }
-
- /* Standard attributes */
-
- for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
- ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
- if (ret)
- goto err_out_files;
+ return PTR_ERR(connector->kdev);
}
- /* Optional attributes */
- /*
- * In the long run it maybe a good idea to make one set of
- * optionals per connector type.
- */
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
- ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
- if (ret)
- goto err_out_files;
- }
- break;
- default:
- break;
- }
-
- ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
- if (ret)
- goto err_out_files;
-
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(dev);
return 0;
-
-err_out_files:
- for (i = 0; i < opt_cnt; i++)
- device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
- for (i = 0; i < attr_cnt; i++)
- device_remove_file(connector->kdev, &connector_attrs[i]);
- device_unregister(connector->kdev);
-
-out:
- return ret;
}
/**
@@ -455,16 +462,11 @@ out:
*/
void drm_sysfs_connector_remove(struct drm_connector *connector)
{
- int i;
-
if (!connector->kdev)
return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
connector->name);
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
- device_remove_file(connector->kdev, &connector_attrs[i]);
- sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
device_unregister(connector->kdev);
connector->kdev = NULL;
}
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 63b471205072..68c1f32fb086 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -50,8 +50,7 @@
*
* You must not use multiple offset managers on a single address_space.
* Otherwise, mm-core will be unable to tear down memory mappings as the VM will
- * no longer be linear. Please use VM_NONLINEAR in that case and implement your
- * own offset managers.
+ * no longer be linear.
*
* This offset manager works on page-based addresses. That is, every argument
* and return code (with the exception of drm_vma_node_offset_addr()) is given
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 7f9f6f9e9b7e..a5e74612100e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -6,23 +6,15 @@ config DRM_EXYNOS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
select VIDEOMODE_HELPERS
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
config DRM_EXYNOS_IOMMU
- bool "EXYNOS DRM IOMMU Support"
+ bool
depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
- help
- Choose this option if you want to use IOMMU feature for DRM.
-
-config DRM_EXYNOS_DMABUF
- bool "EXYNOS DRM DMABUF"
- depends on DRM_EXYNOS
- help
- Choose this option if you want to use DMABUF feature for DRM.
+ default y
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
@@ -32,9 +24,16 @@ config DRM_EXYNOS_FIMD
help
Choose this option if you want to use Exynos FIMD for DRM.
+config DRM_EXYNOS7_DECON
+ bool "Exynos DRM DECON"
+ depends on DRM_EXYNOS
+ select FB_MODE_HELPERS
+ help
+ Choose this option if you want to use Exynos DECON for DRM.
+
config DRM_EXYNOS_DPI
bool "EXYNOS DRM parallel output support"
- depends on DRM_EXYNOS_FIMD
+ depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
select DRM_PANEL
default n
help
@@ -42,7 +41,7 @@ config DRM_EXYNOS_DPI
config DRM_EXYNOS_DSI
bool "EXYNOS DRM MIPI-DSI driver support"
- depends on DRM_EXYNOS_FIMD
+ depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
select DRM_MIPI_DSI
select DRM_PANEL
default n
@@ -51,7 +50,7 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
- depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
+ depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 33ae3652b8da..cc90679cfc06 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -6,11 +6,11 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
- exynos_drm_plane.o
+ exynos_drm_plane.o exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
+exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
new file mode 100644
index 000000000000..63f02e2380ae
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -0,0 +1,990 @@
+/* drivers/gpu/drm/exynos/exynos7_drm_decon.c
+ *
+ * Copyright (C) 2014 Samsung Electronics Co.Ltd
+ * Authors:
+ * Akshu Agarwal <akshua@gmail.com>
+ * Ajay Kumar <ajaykumar.rs@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+#include <video/exynos7_decon.h>
+
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * DECON stands for Display and Enhancement controller.
+ */
+
+#define DECON_DEFAULT_FRAMERATE 60
+#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
+
+#define WINDOWS_NR 2
+
+struct decon_win_data {
+ unsigned int ovl_x;
+ unsigned int ovl_y;
+ unsigned int offset_x;
+ unsigned int offset_y;
+ unsigned int ovl_width;
+ unsigned int ovl_height;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int bpp;
+ unsigned int pixel_format;
+ dma_addr_t dma_addr;
+ bool enabled;
+ bool resume;
+};
+
+struct decon_context {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct exynos_drm_crtc *crtc;
+ struct clk *pclk;
+ struct clk *aclk;
+ struct clk *eclk;
+ struct clk *vclk;
+ void __iomem *regs;
+ struct decon_win_data win_data[WINDOWS_NR];
+ unsigned int default_win;
+ unsigned long irq_flags;
+ bool i80_if;
+ bool suspended;
+ int pipe;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
+
+ struct exynos_drm_panel_info panel;
+ struct exynos_drm_display *display;
+};
+
+static const struct of_device_id decon_driver_dt_match[] = {
+ {.compatible = "samsung,exynos7-decon"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
+
+static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
+{
+ struct decon_context *ctx = crtc->ctx;
+
+ if (ctx->suspended)
+ return;
+
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for DECON to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(ctx->wait_vsync_queue,
+ !atomic_read(&ctx->wait_vsync_event),
+ HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static void decon_clear_channel(struct decon_context *ctx)
+{
+ int win, ch_enabled = 0;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* Check if any channel is enabled. */
+ for (win = 0; win < WINDOWS_NR; win++) {
+ u32 val = readl(ctx->regs + WINCON(win));
+
+ if (val & WINCONx_ENWIN) {
+ val &= ~WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+ ch_enabled = 1;
+ }
+ }
+
+ /* Wait for vsync, as disable channel takes effect at next vsync */
+ if (ch_enabled) {
+ unsigned int state = ctx->suspended;
+
+ ctx->suspended = 0;
+ decon_wait_for_vblank(ctx->crtc);
+ ctx->suspended = state;
+ }
+}
+
+static int decon_ctx_initialize(struct decon_context *ctx,
+ struct drm_device *drm_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ ctx->drm_dev = drm_dev;
+ ctx->pipe = priv->pipe++;
+
+ /* attach this sub driver to iommu mapping if supported. */
+ if (is_drm_iommu_supported(ctx->drm_dev)) {
+ int ret;
+
+ /*
+ * If any channel is already active, iommu will throw
+ * a PAGE FAULT when enabled. So clear any channel if enabled.
+ */
+ decon_clear_channel(ctx);
+ ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
+ if (ret) {
+ DRM_ERROR("drm_iommu_attach failed.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void decon_ctx_remove(struct decon_context *ctx)
+{
+ /* detach this sub driver from iommu mapping if supported. */
+ if (is_drm_iommu_supported(ctx->drm_dev))
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+}
+
+static u32 decon_calc_clkdiv(struct decon_context *ctx,
+ const struct drm_display_mode *mode)
+{
+ unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+ u32 clkdiv;
+
+ /* Find the clock divider value that gets us closest to ideal_clk */
+ clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->vclk), ideal_clk);
+
+ return (clkdiv < 0x100) ? clkdiv : 0xff;
+}
+
+static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (adjusted_mode->vrefresh == 0)
+ adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
+
+ return true;
+}
+
+static void decon_commit(struct exynos_drm_crtc *crtc)
+{
+ struct decon_context *ctx = crtc->ctx;
+ struct drm_display_mode *mode = &crtc->base.mode;
+ u32 val, clkdiv;
+
+ if (ctx->suspended)
+ return;
+
+ /* nothing to do if we haven't set the mode yet */
+ if (mode->htotal == 0 || mode->vtotal == 0)
+ return;
+
+ if (!ctx->i80_if) {
+ int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+ /* setup vertical timing values. */
+ vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
+ vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
+ val = VIDTCON0_VBPD(vbpd - 1) | VIDTCON0_VFPD(vfpd - 1);
+ writel(val, ctx->regs + VIDTCON0);
+
+ val = VIDTCON1_VSPW(vsync_len - 1);
+ writel(val, ctx->regs + VIDTCON1);
+
+ /* setup horizontal timing values. */
+ hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
+ hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+ /* setup horizontal timing values. */
+ val = VIDTCON2_HBPD(hbpd - 1) | VIDTCON2_HFPD(hfpd - 1);
+ writel(val, ctx->regs + VIDTCON2);
+
+ val = VIDTCON3_HSPW(hsync_len - 1);
+ writel(val, ctx->regs + VIDTCON3);
+ }
+
+ /* setup horizontal and vertical display size. */
+ val = VIDTCON4_LINEVAL(mode->vdisplay - 1) |
+ VIDTCON4_HOZVAL(mode->hdisplay - 1);
+ writel(val, ctx->regs + VIDTCON4);
+
+ writel(mode->vdisplay - 1, ctx->regs + LINECNT_OP_THRESHOLD);
+
+ /*
+ * fields of register with prefix '_F' would be updated
+ * at vsync(same as dma start)
+ */
+ val = VIDCON0_ENVID | VIDCON0_ENVID_F;
+ writel(val, ctx->regs + VIDCON0);
+
+ clkdiv = decon_calc_clkdiv(ctx, mode);
+ if (clkdiv > 1) {
+ val = VCLKCON1_CLKVAL_NUM_VCLK(clkdiv - 1);
+ writel(val, ctx->regs + VCLKCON1);
+ writel(val, ctx->regs + VCLKCON2);
+ }
+
+ val = readl(ctx->regs + DECON_UPDATE);
+ val |= DECON_UPDATE_STANDALONE_F;
+ writel(val, ctx->regs + DECON_UPDATE);
+}
+
+static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
+{
+ struct decon_context *ctx = crtc->ctx;
+ u32 val;
+
+ if (ctx->suspended)
+ return -EPERM;
+
+ if (!test_and_set_bit(0, &ctx->irq_flags)) {
+ val = readl(ctx->regs + VIDINTCON0);
+
+ val |= VIDINTCON0_INT_ENABLE;
+
+ if (!ctx->i80_if) {
+ val |= VIDINTCON0_INT_FRAME;
+ val &= ~VIDINTCON0_FRAMESEL0_MASK;
+ val |= VIDINTCON0_FRAMESEL0_VSYNC;
+ }
+
+ writel(val, ctx->regs + VIDINTCON0);
+ }
+
+ return 0;
+}
+
+static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
+{
+ struct decon_context *ctx = crtc->ctx;
+ u32 val;
+
+ if (ctx->suspended)
+ return;
+
+ if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ val = readl(ctx->regs + VIDINTCON0);
+
+ val &= ~VIDINTCON0_INT_ENABLE;
+ if (!ctx->i80_if)
+ val &= ~VIDINTCON0_INT_FRAME;
+
+ writel(val, ctx->regs + VIDINTCON0);
+ }
+}
+
+static void decon_win_mode_set(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
+{
+ struct decon_context *ctx = crtc->ctx;
+ struct decon_win_data *win_data;
+ int win, padding;
+
+ if (!plane) {
+ DRM_ERROR("plane is NULL\n");
+ return;
+ }
+
+ win = plane->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+
+ win_data = &ctx->win_data[win];
+
+ padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+ win_data->offset_x = plane->fb_x;
+ win_data->offset_y = plane->fb_y;
+ win_data->fb_width = plane->fb_width + padding;
+ win_data->fb_height = plane->fb_height;
+ win_data->ovl_x = plane->crtc_x;
+ win_data->ovl_y = plane->crtc_y;
+ win_data->ovl_width = plane->crtc_width;
+ win_data->ovl_height = plane->crtc_height;
+ win_data->dma_addr = plane->dma_addr[0];
+ win_data->bpp = plane->bpp;
+ win_data->pixel_format = plane->pixel_format;
+
+ DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
+ win_data->offset_x, win_data->offset_y);
+ DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ win_data->ovl_width, win_data->ovl_height);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
+ DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
+ plane->fb_width, plane->crtc_width);
+}
+
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+{
+ struct decon_win_data *win_data = &ctx->win_data[win];
+ unsigned long val;
+
+ val = readl(ctx->regs + WINCON(win));
+ val &= ~WINCONx_BPPMODE_MASK;
+
+ switch (win_data->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ val |= WINCONx_BPPMODE_16BPP_565;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= WINCONx_BPPMODE_24BPP_xRGB;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ val |= WINCONx_BPPMODE_24BPP_xBGR;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ val |= WINCONx_BPPMODE_24BPP_RGBx;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ val |= WINCONx_BPPMODE_24BPP_BGRx;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX |
+ WINCONx_ALPHA_SEL;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX |
+ WINCONx_ALPHA_SEL;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX |
+ WINCONx_ALPHA_SEL;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_BGRA8888:
+ val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
+ WINCONx_ALPHA_SEL;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ default:
+ DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
+
+ val |= WINCONx_BPPMODE_24BPP_xRGB;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ }
+
+ DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+
+ /*
+ * In case of exynos, setting dma-burst to 16Word causes permanent
+ * tearing for very small buffers, e.g. cursor buffer. Burst Mode
+ * switching which is based on plane size is not recommended as
+ * plane size varies a lot towards the end of the screen and rapid
+ * movement causes unstable DMA which results into iommu crash/tear.
+ */
+
+ if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ val &= ~WINCONx_BURSTLEN_MASK;
+ val |= WINCONx_BURSTLEN_8WORD;
+ }
+
+ writel(val, ctx->regs + WINCON(win));
+}
+
+static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
+{
+ unsigned int keycon0 = 0, keycon1 = 0;
+
+ keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
+ WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
+
+ keycon1 = WxKEYCON1_COLVAL(0xffffffff);
+
+ writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
+ writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
+}
+
+/**
+ * shadow_protect_win() - disable updating values from shadow registers at vsync
+ *
+ * @win: window to protect registers for
+ * @protect: 1 to protect (disable updates)
+ */
+static void decon_shadow_protect_win(struct decon_context *ctx,
+ int win, bool protect)
+{
+ u32 bits, val;
+
+ bits = SHADOWCON_WINx_PROTECT(win);
+
+ val = readl(ctx->regs + SHADOWCON);
+ if (protect)
+ val |= bits;
+ else
+ val &= ~bits;
+ writel(val, ctx->regs + SHADOWCON);
+}
+
+static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+{
+ struct decon_context *ctx = crtc->ctx;
+ struct drm_display_mode *mode = &crtc->base.mode;
+ struct decon_win_data *win_data;
+ int win = zpos;
+ unsigned long val, alpha;
+ unsigned int last_x;
+ unsigned int last_y;
+
+ if (ctx->suspended)
+ return;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+
+ /* If suspended, enable this on resume */
+ if (ctx->suspended) {
+ win_data->resume = true;
+ return;
+ }
+
+ /*
+ * SHADOWCON/PRTCON register is used for enabling timing.
+ *
+ * for example, once only width value of a register is set,
+ * if the dma is started then decon hardware could malfunction so
+ * with protect window setting, the register fields with prefix '_F'
+ * wouldn't be updated at vsync also but updated once unprotect window
+ * is set.
+ */
+
+ /* protect windows */
+ decon_shadow_protect_win(ctx, win, true);
+
+ /* buffer start address */
+ val = (unsigned long)win_data->dma_addr;
+ writel(val, ctx->regs + VIDW_BUF_START(win));
+
+ /* buffer size */
+ writel(win_data->fb_width, ctx->regs + VIDW_WHOLE_X(win));
+ writel(win_data->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+
+ /* offset from the start of the buffer to read */
+ writel(win_data->offset_x, ctx->regs + VIDW_OFFSET_X(win));
+ writel(win_data->offset_y, ctx->regs + VIDW_OFFSET_Y(win));
+
+ DRM_DEBUG_KMS("start addr = 0x%lx\n",
+ (unsigned long)win_data->dma_addr);
+ DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ win_data->ovl_width, win_data->ovl_height);
+
+ /*
+ * OSD position.
+ * In case the window layout goes of LCD layout, DECON fails.
+ */
+ if ((win_data->ovl_x + win_data->ovl_width) > mode->hdisplay)
+ win_data->ovl_x = mode->hdisplay - win_data->ovl_width;
+ if ((win_data->ovl_y + win_data->ovl_height) > mode->vdisplay)
+ win_data->ovl_y = mode->vdisplay - win_data->ovl_height;
+
+ val = VIDOSDxA_TOPLEFT_X(win_data->ovl_x) |
+ VIDOSDxA_TOPLEFT_Y(win_data->ovl_y);
+ writel(val, ctx->regs + VIDOSD_A(win));
+
+ last_x = win_data->ovl_x + win_data->ovl_width;
+ if (last_x)
+ last_x--;
+ last_y = win_data->ovl_y + win_data->ovl_height;
+ if (last_y)
+ last_y--;
+
+ val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y);
+
+ writel(val, ctx->regs + VIDOSD_B(win));
+
+ DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ win_data->ovl_x, win_data->ovl_y, last_x, last_y);
+
+ /* OSD alpha */
+ alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
+ VIDOSDxC_ALPHA0_G_F(0x0) |
+ VIDOSDxC_ALPHA0_B_F(0x0);
+
+ writel(alpha, ctx->regs + VIDOSD_C(win));
+
+ alpha = VIDOSDxD_ALPHA1_R_F(0xff) |
+ VIDOSDxD_ALPHA1_G_F(0xff) |
+ VIDOSDxD_ALPHA1_B_F(0xff);
+
+ writel(alpha, ctx->regs + VIDOSD_D(win));
+
+ decon_win_set_pixfmt(ctx, win);
+
+ /* hardware window 0 doesn't support color key. */
+ if (win != 0)
+ decon_win_set_colkey(ctx, win);
+
+ /* wincon */
+ val = readl(ctx->regs + WINCON(win));
+ val |= WINCONx_TRIPLE_BUF_MODE;
+ val |= WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+
+ /* Enable DMA channel and unprotect windows */
+ decon_shadow_protect_win(ctx, win, false);
+
+ val = readl(ctx->regs + DECON_UPDATE);
+ val |= DECON_UPDATE_STANDALONE_F;
+ writel(val, ctx->regs + DECON_UPDATE);
+
+ win_data->enabled = true;
+}
+
+static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+{
+ struct decon_context *ctx = crtc->ctx;
+ struct decon_win_data *win_data;
+ int win = zpos;
+ u32 val;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+
+ if (ctx->suspended) {
+ /* do not resume this window*/
+ win_data->resume = false;
+ return;
+ }
+
+ /* protect windows */
+ decon_shadow_protect_win(ctx, win, true);
+
+ /* wincon */
+ val = readl(ctx->regs + WINCON(win));
+ val &= ~WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+
+ /* unprotect windows */
+ decon_shadow_protect_win(ctx, win, false);
+
+ val = readl(ctx->regs + DECON_UPDATE);
+ val |= DECON_UPDATE_STANDALONE_F;
+ writel(val, ctx->regs + DECON_UPDATE);
+
+ win_data->enabled = false;
+}
+
+static void decon_window_suspend(struct decon_context *ctx)
+{
+ struct decon_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ if (win_data->enabled)
+ decon_win_disable(ctx->crtc, i);
+ }
+}
+
+static void decon_window_resume(struct decon_context *ctx)
+{
+ struct decon_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
+static void decon_apply(struct decon_context *ctx)
+{
+ struct decon_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ if (win_data->enabled)
+ decon_win_commit(ctx->crtc, i);
+ else
+ decon_win_disable(ctx->crtc, i);
+ }
+
+ decon_commit(ctx->crtc);
+}
+
+static void decon_init(struct decon_context *ctx)
+{
+ u32 val;
+
+ writel(VIDCON0_SWRESET, ctx->regs + VIDCON0);
+
+ val = VIDOUTCON0_DISP_IF_0_ON;
+ if (!ctx->i80_if)
+ val |= VIDOUTCON0_RGBIF;
+ writel(val, ctx->regs + VIDOUTCON0);
+
+ writel(VCLKCON0_CLKVALUP | VCLKCON0_VCLKFREE, ctx->regs + VCLKCON0);
+
+ if (!ctx->i80_if)
+ writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
+}
+
+static int decon_poweron(struct decon_context *ctx)
+{
+ int ret;
+
+ if (!ctx->suspended)
+ return 0;
+
+ ctx->suspended = false;
+
+ pm_runtime_get_sync(ctx->dev);
+
+ ret = clk_prepare_enable(ctx->pclk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
+ goto pclk_err;
+ }
+
+ ret = clk_prepare_enable(ctx->aclk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
+ goto aclk_err;
+ }
+
+ ret = clk_prepare_enable(ctx->eclk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
+ goto eclk_err;
+ }
+
+ ret = clk_prepare_enable(ctx->vclk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
+ goto vclk_err;
+ }
+
+ decon_init(ctx);
+
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ ret = decon_enable_vblank(ctx->crtc);
+ if (ret) {
+ DRM_ERROR("Failed to re-enable vblank [%d]\n", ret);
+ goto err;
+ }
+ }
+
+ decon_window_resume(ctx);
+
+ decon_apply(ctx);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(ctx->vclk);
+vclk_err:
+ clk_disable_unprepare(ctx->eclk);
+eclk_err:
+ clk_disable_unprepare(ctx->aclk);
+aclk_err:
+ clk_disable_unprepare(ctx->pclk);
+pclk_err:
+ ctx->suspended = true;
+ return ret;
+}
+
+static int decon_poweroff(struct decon_context *ctx)
+{
+ if (ctx->suspended)
+ return 0;
+
+ /*
+ * We need to make sure that all windows are disabled before we
+ * suspend that connector. Otherwise we might try to scan from
+ * a destroyed buffer later.
+ */
+ decon_window_suspend(ctx);
+
+ clk_disable_unprepare(ctx->vclk);
+ clk_disable_unprepare(ctx->eclk);
+ clk_disable_unprepare(ctx->aclk);
+ clk_disable_unprepare(ctx->pclk);
+
+ pm_runtime_put_sync(ctx->dev);
+
+ ctx->suspended = true;
+ return 0;
+}
+
+static void decon_dpms(struct exynos_drm_crtc *crtc, int mode)
+{
+ DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ decon_poweron(crtc->ctx);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ decon_poweroff(crtc->ctx);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_drm_crtc_ops decon_crtc_ops = {
+ .dpms = decon_dpms,
+ .mode_fixup = decon_mode_fixup,
+ .commit = decon_commit,
+ .enable_vblank = decon_enable_vblank,
+ .disable_vblank = decon_disable_vblank,
+ .wait_for_vblank = decon_wait_for_vblank,
+ .win_mode_set = decon_win_mode_set,
+ .win_commit = decon_win_commit,
+ .win_disable = decon_win_disable,
+};
+
+
+static irqreturn_t decon_irq_handler(int irq, void *dev_id)
+{
+ struct decon_context *ctx = (struct decon_context *)dev_id;
+ u32 val, clear_bit;
+
+ val = readl(ctx->regs + VIDINTCON1);
+
+ clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
+ if (val & clear_bit)
+ writel(clear_bit, ctx->regs + VIDINTCON1);
+
+ /* check the crtc is detached already from encoder */
+ if (ctx->pipe < 0 || !ctx->drm_dev)
+ goto out;
+
+ if (!ctx->i80_if) {
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+ }
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static int decon_bind(struct device *dev, struct device *master, void *data)
+{
+ struct decon_context *ctx = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = decon_ctx_initialize(ctx, drm_dev);
+ if (ret) {
+ DRM_ERROR("decon_ctx_initialize failed.\n");
+ return ret;
+ }
+
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
+ EXYNOS_DISPLAY_TYPE_LCD,
+ &decon_crtc_ops, ctx);
+ if (IS_ERR(ctx->crtc)) {
+ decon_ctx_remove(ctx);
+ return PTR_ERR(ctx->crtc);
+ }
+
+ if (ctx->display)
+ exynos_drm_create_enc_conn(drm_dev, ctx->display);
+
+ return 0;
+
+}
+
+static void decon_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct decon_context *ctx = dev_get_drvdata(dev);
+
+ decon_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
+
+ if (ctx->display)
+ exynos_dpi_remove(ctx->display);
+
+ decon_ctx_remove(ctx);
+}
+
+static const struct component_ops decon_component_ops = {
+ .bind = decon_bind,
+ .unbind = decon_unbind,
+};
+
+static int decon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct decon_context *ctx;
+ struct device_node *i80_if_timings;
+ struct resource *res;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
+ EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret)
+ return ret;
+
+ ctx->dev = dev;
+ ctx->suspended = true;
+
+ i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
+ if (i80_if_timings)
+ ctx->i80_if = true;
+ of_node_put(i80_if_timings);
+
+ ctx->regs = of_iomap(dev->of_node, 0);
+ if (IS_ERR(ctx->regs)) {
+ ret = PTR_ERR(ctx->regs);
+ goto err_del_component;
+ }
+
+ ctx->pclk = devm_clk_get(dev, "pclk_decon0");
+ if (IS_ERR(ctx->pclk)) {
+ dev_err(dev, "failed to get bus clock pclk\n");
+ ret = PTR_ERR(ctx->pclk);
+ goto err_iounmap;
+ }
+
+ ctx->aclk = devm_clk_get(dev, "aclk_decon0");
+ if (IS_ERR(ctx->aclk)) {
+ dev_err(dev, "failed to get bus clock aclk\n");
+ ret = PTR_ERR(ctx->aclk);
+ goto err_iounmap;
+ }
+
+ ctx->eclk = devm_clk_get(dev, "decon0_eclk");
+ if (IS_ERR(ctx->eclk)) {
+ dev_err(dev, "failed to get eclock\n");
+ ret = PTR_ERR(ctx->eclk);
+ goto err_iounmap;
+ }
+
+ ctx->vclk = devm_clk_get(dev, "decon0_vclk");
+ if (IS_ERR(ctx->vclk)) {
+ dev_err(dev, "failed to get vclock\n");
+ ret = PTR_ERR(ctx->vclk);
+ goto err_iounmap;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ ctx->i80_if ? "lcd_sys" : "vsync");
+ if (!res) {
+ dev_err(dev, "irq request failed.\n");
+ ret = -ENXIO;
+ goto err_iounmap;
+ }
+
+ ret = devm_request_irq(dev, res->start, decon_irq_handler,
+ 0, "drm_decon", ctx);
+ if (ret) {
+ dev_err(dev, "irq request failed.\n");
+ goto err_iounmap;
+ }
+
+ init_waitqueue_head(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
+
+ platform_set_drvdata(pdev, ctx);
+
+ ctx->display = exynos_dpi_probe(dev);
+ if (IS_ERR(ctx->display)) {
+ ret = PTR_ERR(ctx->display);
+ goto err_iounmap;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = component_add(dev, &decon_component_ops);
+ if (ret)
+ goto err_disable_pm_runtime;
+
+ return ret;
+
+err_disable_pm_runtime:
+ pm_runtime_disable(dev);
+
+err_iounmap:
+ iounmap(ctx->regs);
+
+err_del_component:
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC);
+ return ret;
+}
+
+static int decon_remove(struct platform_device *pdev)
+{
+ struct decon_context *ctx = dev_get_drvdata(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ iounmap(ctx->regs);
+
+ component_del(&pdev->dev, &decon_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
+ return 0;
+}
+
+struct platform_driver decon_driver = {
+ .probe = decon_probe,
+ .remove = decon_remove,
+ .driver = {
+ .name = "exynos-decon",
+ .of_match_table = decon_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 34d46aa75416..bf17a60b40ed 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
#include <linux/gpio.h>
#include <linux/component.h>
#include <linux/phy/phy.h>
@@ -993,32 +994,20 @@ static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
.best_encoder = exynos_dp_best_encoder,
};
-static bool find_bridge(const char *compat, struct bridge_init *bridge)
-{
- bridge->client = NULL;
- bridge->node = of_find_compatible_node(NULL, NULL, compat);
- if (!bridge->node)
- return false;
-
- bridge->client = of_find_i2c_device_by_node(bridge->node);
- if (!bridge->client)
- return false;
-
- return true;
-}
-
/* returns the number of bridges attached */
-static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
+static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
struct drm_encoder *encoder)
{
- struct bridge_init bridge;
int ret;
- if (find_bridge("nxp,ptn3460", &bridge)) {
- ret = ptn3460_init(dev, encoder, bridge.client, bridge.node);
- if (!ret)
- return 1;
+ encoder->bridge = dp->bridge;
+ dp->bridge->encoder = encoder;
+ ret = drm_bridge_attach(encoder->dev, dp->bridge);
+ if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+ return ret;
}
+
return 0;
}
@@ -1032,9 +1021,11 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
dp->encoder = encoder;
/* Pre-empt DP connector creation if there's a bridge */
- ret = exynos_drm_attach_lcd_bridge(dp->drm_dev, encoder);
- if (ret)
- return 0;
+ if (dp->bridge) {
+ ret = exynos_drm_attach_lcd_bridge(dp, encoder);
+ if (!ret)
+ return 0;
+ }
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -1067,10 +1058,8 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
phy_power_off(dp->phy);
}
-static void exynos_dp_poweron(struct exynos_drm_display *display)
+static void exynos_dp_poweron(struct exynos_dp_device *dp)
{
- struct exynos_dp_device *dp = display_to_dp(display);
-
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
@@ -1085,13 +1074,11 @@ static void exynos_dp_poweron(struct exynos_drm_display *display)
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
- exynos_dp_commit(display);
+ exynos_dp_commit(&dp->display);
}
-static void exynos_dp_poweroff(struct exynos_drm_display *display)
+static void exynos_dp_poweroff(struct exynos_dp_device *dp)
{
- struct exynos_dp_device *dp = display_to_dp(display);
-
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1119,12 +1106,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- exynos_dp_poweron(display);
+ exynos_dp_poweron(dp);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- exynos_dp_poweroff(display);
+ exynos_dp_poweroff(dp);
break;
default:
break;
@@ -1241,7 +1228,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
}
}
- if (!dp->panel) {
+ if (!dp->panel && !dp->bridge) {
ret = exynos_dp_dt_parse_panel(dp);
if (ret)
return ret;
@@ -1325,7 +1312,7 @@ static const struct component_ops exynos_dp_ops = {
static int exynos_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *panel_node;
+ struct device_node *panel_node, *bridge_node, *endpoint;
struct exynos_dp_device *dp;
int ret;
@@ -1351,6 +1338,18 @@ static int exynos_dp_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ bridge_node = of_graph_get_remote_port_parent(endpoint);
+ if (bridge_node) {
+ dp->bridge = of_drm_find_bridge(bridge_node);
+ of_node_put(bridge_node);
+ if (!dp->bridge)
+ return -EPROBE_DEFER;
+ } else
+ return -EPROBE_DEFER;
+ }
+
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 164f171168e7..a4e799679669 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -153,6 +153,7 @@ struct exynos_dp_device {
struct drm_connector connector;
struct drm_encoder *encoder;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 9c8088462c26..24994ba10e28 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -63,11 +63,11 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
return -ENOMEM;
}
- buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
+ buf->cookie = dma_alloc_attrs(dev->dev,
buf->size,
&buf->dma_addr, GFP_KERNEL,
&buf->dma_attrs);
- if (!buf->kvaddr) {
+ if (!buf->cookie) {
DRM_ERROR("failed to allocate buffer.\n");
ret = -ENOMEM;
goto err_free;
@@ -132,7 +132,7 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
buf->sgt = NULL;
if (!is_drm_iommu_supported(dev)) {
- dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+ dma_free_attrs(dev->dev, buf->size, buf->cookie,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
drm_free_large(buf->pages);
} else
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 45026e693225..48ccab7fdf63 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -20,43 +20,9 @@
#include "exynos_drm_encoder.h"
#include "exynos_drm_plane.h"
-#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
- drm_crtc)
-
-enum exynos_crtc_mode {
- CRTC_MODE_NORMAL, /* normal mode */
- CRTC_MODE_BLANK, /* The private plane of crtc is blank */
-};
-
-/*
- * Exynos specific crtc structure.
- *
- * @drm_crtc: crtc object.
- * @manager: the manager associated with this crtc
- * @pipe: a crtc index created at load() with a new crtc object creation
- * and the crtc object would be set to private->crtc array
- * to get a crtc object corresponding to this pipe from private->crtc
- * array when irq interrupt occurred. the reason of using this pipe is that
- * drm framework doesn't support multiple irq yet.
- * we can refer to the crtc to current hardware interrupt occurred through
- * this pipe value.
- * @dpms: store the crtc dpms value
- * @mode: store the crtc mode value
- */
-struct exynos_drm_crtc {
- struct drm_crtc drm_crtc;
- struct exynos_drm_manager *manager;
- unsigned int pipe;
- unsigned int dpms;
- enum exynos_crtc_mode mode;
- wait_queue_head_t pending_flip_queue;
- atomic_t pending_flip;
-};
-
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct exynos_drm_manager *manager = exynos_crtc->manager;
DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
@@ -74,8 +40,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
drm_crtc_vblank_off(crtc);
}
- if (manager->ops->dpms)
- manager->ops->dpms(manager, mode);
+ if (exynos_crtc->ops->dpms)
+ exynos_crtc->ops->dpms(exynos_crtc, mode);
exynos_crtc->dpms = mode;
@@ -91,16 +57,15 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct exynos_drm_manager *manager = exynos_crtc->manager;
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(crtc->primary);
exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- exynos_plane_commit(crtc->primary);
-
- if (manager->ops->commit)
- manager->ops->commit(manager);
+ if (exynos_crtc->ops->win_commit)
+ exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
- exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_ON);
+ if (exynos_crtc->ops->commit)
+ exynos_crtc->ops->commit(exynos_crtc);
}
static bool
@@ -109,10 +74,10 @@ exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct exynos_drm_manager *manager = exynos_crtc->manager;
- if (manager->ops->mode_fixup)
- return manager->ops->mode_fixup(manager, mode, adjusted_mode);
+ if (exynos_crtc->ops->mode_fixup)
+ return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
+ adjusted_mode);
return true;
}
@@ -122,11 +87,10 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb)
{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct exynos_drm_manager *manager = exynos_crtc->manager;
struct drm_framebuffer *fb = crtc->primary->fb;
unsigned int crtc_w;
unsigned int crtc_h;
+ int ret;
/*
* copy the mode data adjusted by mode_fixup() into crtc->mode
@@ -134,24 +98,25 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
*/
memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
+ ret = exynos_check_plane(crtc->primary, fb);
+ if (ret < 0)
+ return ret;
+
crtc_w = fb->width - x;
crtc_h = fb->height - y;
+ exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
+ crtc_w, crtc_h, x, y, crtc_w, crtc_h);
- if (manager->ops->mode_set)
- manager->ops->mode_set(manager, &crtc->mode);
-
- return exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
- crtc_w, crtc_h, x, y, crtc_w, crtc_h);
+ return 0;
}
-static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
+static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
unsigned int crtc_w;
unsigned int crtc_h;
- int ret;
/* when framebuffer changing is requested, crtc's dpms should be on */
if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
@@ -162,20 +127,8 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
crtc_w = fb->width - x;
crtc_h = fb->height - y;
- ret = exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
- crtc_w, crtc_h, x, y, crtc_w, crtc_h);
- if (ret)
- return ret;
-
- exynos_drm_crtc_commit(crtc);
-
- return 0;
-}
-
-static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return exynos_drm_crtc_mode_set_commit(crtc, x, y, old_fb);
+ return exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
+ crtc_w, crtc_h, x, y, crtc_w, crtc_h);
}
static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
@@ -214,6 +167,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
struct exynos_drm_private *dev_priv = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_framebuffer *old_fb = crtc->primary->fb;
+ unsigned int crtc_w, crtc_h;
int ret = -EINVAL;
/* when the page flip is requested, crtc's dpms should be on */
@@ -245,8 +199,11 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
spin_unlock_irq(&dev->event_lock);
crtc->primary->fb = fb;
- ret = exynos_drm_crtc_mode_set_commit(crtc, crtc->x, crtc->y,
- NULL);
+ crtc_w = fb->width - crtc->x;
+ crtc_h = fb->height - crtc->y;
+ ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
+ crtc_w, crtc_h, crtc->x, crtc->y,
+ crtc_w, crtc_h);
if (ret) {
crtc->primary->fb = old_fb;
@@ -275,116 +232,61 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(exynos_crtc);
}
-static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = crtc->dev;
- struct exynos_drm_private *dev_priv = dev->dev_private;
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (property == dev_priv->crtc_mode_property) {
- enum exynos_crtc_mode mode = val;
-
- if (mode == exynos_crtc->mode)
- return 0;
-
- exynos_crtc->mode = mode;
-
- switch (mode) {
- case CRTC_MODE_NORMAL:
- exynos_drm_crtc_commit(crtc);
- break;
- case CRTC_MODE_BLANK:
- exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_OFF);
- break;
- default:
- break;
- }
-
- return 0;
- }
-
- return -EINVAL;
-}
-
static struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.page_flip = exynos_drm_crtc_page_flip,
.destroy = exynos_drm_crtc_destroy,
- .set_property = exynos_drm_crtc_set_property,
-};
-
-static const struct drm_prop_enum_list mode_names[] = {
- { CRTC_MODE_NORMAL, "normal" },
- { CRTC_MODE_BLANK, "blank" },
};
-static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_property *prop;
-
- prop = dev_priv->crtc_mode_property;
- if (!prop) {
- prop = drm_property_create_enum(dev, 0, "mode", mode_names,
- ARRAY_SIZE(mode_names));
- if (!prop)
- return;
-
- dev_priv->crtc_mode_property = prop;
- }
-
- drm_object_attach_property(&crtc->base, prop, 0);
-}
-
-int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
+struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+ int pipe,
+ enum exynos_drm_output_type type,
+ struct exynos_drm_crtc_ops *ops,
+ void *ctx)
{
struct exynos_drm_crtc *exynos_crtc;
struct drm_plane *plane;
- struct exynos_drm_private *private = manager->drm_dev->dev_private;
+ struct exynos_drm_private *private = drm_dev->dev_private;
struct drm_crtc *crtc;
int ret;
exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
if (!exynos_crtc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
init_waitqueue_head(&exynos_crtc->pending_flip_queue);
atomic_set(&exynos_crtc->pending_flip, 0);
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
- exynos_crtc->manager = manager;
- exynos_crtc->pipe = manager->pipe;
- plane = exynos_plane_init(manager->drm_dev, 1 << manager->pipe,
+ exynos_crtc->pipe = pipe;
+ exynos_crtc->type = type;
+ exynos_crtc->ops = ops;
+ exynos_crtc->ctx = ctx;
+ plane = exynos_plane_init(drm_dev, 1 << pipe,
DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto err_plane;
}
- manager->crtc = &exynos_crtc->drm_crtc;
- crtc = &exynos_crtc->drm_crtc;
+ crtc = &exynos_crtc->base;
- private->crtc[manager->pipe] = crtc;
+ private->crtc[pipe] = crtc;
- ret = drm_crtc_init_with_planes(manager->drm_dev, crtc, plane, NULL,
+ ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL,
&exynos_crtc_funcs);
if (ret < 0)
goto err_crtc;
drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
- exynos_drm_crtc_attach_mode_property(crtc);
-
- return 0;
+ return exynos_crtc;
err_crtc:
plane->funcs->destroy(plane);
err_plane:
kfree(exynos_crtc);
- return ret;
+ return ERR_PTR(ret);
}
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
@@ -392,13 +294,12 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
struct exynos_drm_private *private = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc =
to_exynos_crtc(private->crtc[pipe]);
- struct exynos_drm_manager *manager = exynos_crtc->manager;
if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
return -EPERM;
- if (manager->ops->enable_vblank)
- manager->ops->enable_vblank(manager);
+ if (exynos_crtc->ops->enable_vblank)
+ exynos_crtc->ops->enable_vblank(exynos_crtc);
return 0;
}
@@ -408,13 +309,12 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
struct exynos_drm_private *private = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc =
to_exynos_crtc(private->crtc[pipe]);
- struct exynos_drm_manager *manager = exynos_crtc->manager;
if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
return;
- if (manager->ops->disable_vblank)
- manager->ops->disable_vblank(manager);
+ if (exynos_crtc->ops->disable_vblank)
+ exynos_crtc->ops->disable_vblank(exynos_crtc);
}
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
@@ -443,42 +343,9 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
- struct exynos_drm_overlay *overlay)
-{
- struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
-
- if (manager->ops->win_mode_set)
- manager->ops->win_mode_set(manager, overlay);
-}
-
-void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos)
-{
- struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
-
- if (manager->ops->win_commit)
- manager->ops->win_commit(manager, zpos);
-}
-
-void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos)
-{
- struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
-
- if (manager->ops->win_enable)
- manager->ops->win_enable(manager, zpos);
-}
-
-void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos)
-{
- struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
-
- if (manager->ops->win_disable)
- manager->ops->win_disable(manager, zpos);
-}
-
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
{
- struct exynos_drm_manager *manager;
+ struct exynos_drm_crtc *exynos_crtc;
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
@@ -487,15 +354,15 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
* for all encoders.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- manager = to_exynos_crtc(crtc)->manager;
+ exynos_crtc = to_exynos_crtc(crtc);
/*
* wait for vblank interrupt
* - this makes sure that overlay data are updated to
* real hardware.
*/
- if (manager->ops->wait_for_vblank)
- manager->ops->wait_for_vblank(manager);
+ if (exynos_crtc->ops->wait_for_vblank)
+ exynos_crtc->ops->wait_for_vblank(exynos_crtc);
}
}
@@ -508,8 +375,8 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
struct exynos_drm_crtc *exynos_crtc;
exynos_crtc = to_exynos_crtc(crtc);
- if (exynos_crtc->manager->type == out_type)
- return exynos_crtc->manager->pipe;
+ if (exynos_crtc->type == out_type)
+ return exynos_crtc->pipe;
}
return -EPERM;
@@ -517,8 +384,8 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
{
- struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- if (manager->ops->te_handler)
- manager->ops->te_handler(manager);
+ if (exynos_crtc->ops->te_handler)
+ exynos_crtc->ops->te_handler(exynos_crtc);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index e353d353836f..6258b800aab8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -17,14 +17,18 @@
#include "exynos_drm_drv.h"
-int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
+struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+ int pipe,
+ enum exynos_drm_output_type type,
+ struct exynos_drm_crtc_ops *ops,
+ void *context);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
- struct exynos_drm_overlay *overlay);
+ struct exynos_drm_plane *plane);
void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 60192ed544f0..3833bf8ca025 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -279,7 +279,3 @@ err_buf_detach:
return ERR_PTR(ret);
}
-
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
index 49acfafb4fdb..886de9ff484d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -12,14 +12,9 @@
#ifndef _EXYNOS_DRM_DMABUF_H_
#define _EXYNOS_DRM_DMABUF_H_
-#ifdef CONFIG_DRM_EXYNOS_DMABUF
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct drm_gem_object *obj, int flags);
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct dma_buf *dma_buf);
-#else
-#define exynos_dmabuf_prime_export NULL
-#define exynos_dmabuf_prime_import NULL
-#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 121470a83d1a..90168d7cf66a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -556,6 +556,9 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = {
#ifdef CONFIG_DRM_EXYNOS_FIMD
&fimd_driver,
#endif
+#ifdef CONFIG_DRM_EXYNOS7_DECON
+ &decon_driver,
+#endif
#ifdef CONFIG_DRM_EXYNOS_DP
&dp_driver,
#endif
@@ -612,6 +615,7 @@ static const char * const strings[] = {
"samsung,exynos3",
"samsung,exynos4",
"samsung,exynos5",
+ "samsung,exynos7",
};
static struct platform_driver exynos_drm_platform_driver = {
@@ -645,18 +649,6 @@ static int exynos_drm_init(void)
if (!is_exynos)
return -ENODEV;
- /*
- * Register device object only in case of Exynos SoC.
- *
- * Below codes resolves temporarily infinite loop issue incurred
- * by Exynos drm driver when using multi-platform kernel.
- * So these codes will be replaced with more generic way later.
- */
- if (!of_machine_is_compatible("samsung,exynos3") &&
- !of_machine_is_compatible("samsung,exynos4") &&
- !of_machine_is_compatible("samsung,exynos5"))
- return -ENODEV;
-
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
if (IS_ERR(exynos_drm_pdev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 2e5063488c50..9afd390d4674 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -23,6 +23,9 @@
#define MAX_FB_BUFFER 4
#define DEFAULT_ZPOS -1
+#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base)
+#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base)
+
/* This enumerates device type. */
enum exynos_drm_device_type {
EXYNOS_DEVICE_TYPE_NONE,
@@ -44,6 +47,7 @@ enum exynos_drm_output_type {
/*
* Exynos drm common overlay structure.
*
+ * @base: plane object
* @fb_x: offset x on a framebuffer to be displayed.
* - the unit is screen coordinates.
* @fb_y: offset y on a framebuffer to be displayed.
@@ -73,11 +77,14 @@ enum exynos_drm_output_type {
* @local_path: in case of lcd type, local path mode on or off.
* @transparency: transparency on or off.
* @activated: activated or not.
+ * @enabled: enabled or not.
*
* this structure is common to exynos SoC and its contents would be copied
* to hardware specific overlay info.
*/
-struct exynos_drm_overlay {
+
+struct exynos_drm_plane {
+ struct drm_plane base;
unsigned int fb_x;
unsigned int fb_y;
unsigned int fb_width;
@@ -104,6 +111,7 @@ struct exynos_drm_overlay {
bool local_path:1;
bool transparency:1;
bool activated:1;
+ bool enabled:1;
};
/*
@@ -155,11 +163,10 @@ struct exynos_drm_display {
};
/*
- * Exynos drm manager ops
+ * Exynos drm crtc ops
*
* @dpms: control device power.
* @mode_fixup: fix mode data before applying it
- * @mode_set: set the given mode to the manager
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -172,44 +179,49 @@ struct exynos_drm_display {
* @te_handler: trigger to transfer video image at the tearing effect
* synchronization signal if there is a page flip request.
*/
-struct exynos_drm_manager;
-struct exynos_drm_manager_ops {
- void (*dpms)(struct exynos_drm_manager *mgr, int mode);
- bool (*mode_fixup)(struct exynos_drm_manager *mgr,
+struct exynos_drm_crtc;
+struct exynos_drm_crtc_ops {
+ void (*dpms)(struct exynos_drm_crtc *crtc, int mode);
+ bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
- void (*mode_set)(struct exynos_drm_manager *mgr,
- const struct drm_display_mode *mode);
- void (*commit)(struct exynos_drm_manager *mgr);
- int (*enable_vblank)(struct exynos_drm_manager *mgr);
- void (*disable_vblank)(struct exynos_drm_manager *mgr);
- void (*wait_for_vblank)(struct exynos_drm_manager *mgr);
- void (*win_mode_set)(struct exynos_drm_manager *mgr,
- struct exynos_drm_overlay *overlay);
- void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
- void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
- void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
- void (*te_handler)(struct exynos_drm_manager *mgr);
+ void (*commit)(struct exynos_drm_crtc *crtc);
+ int (*enable_vblank)(struct exynos_drm_crtc *crtc);
+ void (*disable_vblank)(struct exynos_drm_crtc *crtc);
+ void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
+ void (*win_mode_set)(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane);
+ void (*win_commit)(struct exynos_drm_crtc *crtc, int zpos);
+ void (*win_enable)(struct exynos_drm_crtc *crtc, int zpos);
+ void (*win_disable)(struct exynos_drm_crtc *crtc, int zpos);
+ void (*te_handler)(struct exynos_drm_crtc *crtc);
};
/*
- * Exynos drm common manager structure, maps 1:1 with a crtc
+ * Exynos specific crtc structure.
*
- * @list: the list entry for this manager
+ * @base: crtc object.
* @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
- * @drm_dev: pointer to the drm device
- * @crtc: crtc object.
- * @pipe: the pipe number for this crtc/manager
+ * @pipe: a crtc index created at load() with a new crtc object creation
+ * and the crtc object would be set to private->crtc array
+ * to get a crtc object corresponding to this pipe from private->crtc
+ * array when irq interrupt occurred. the reason of using this pipe is that
+ * drm framework doesn't support multiple irq yet.
+ * we can refer to the crtc to current hardware interrupt occurred through
+ * this pipe value.
+ * @dpms: store the crtc dpms value
* @ops: pointer to callbacks for exynos drm specific functionality
- * @ctx: A pointer to the manager's implementation specific context
+ * @ctx: A pointer to the crtc's implementation specific context
*/
-struct exynos_drm_manager {
- struct list_head list;
- enum exynos_drm_output_type type;
- struct drm_device *drm_dev;
- struct drm_crtc *crtc;
- int pipe;
- struct exynos_drm_manager_ops *ops;
+struct exynos_drm_crtc {
+ struct drm_crtc base;
+ enum exynos_drm_output_type type;
+ unsigned int pipe;
+ unsigned int dpms;
+ wait_queue_head_t pending_flip_queue;
+ atomic_t pending_flip;
+ struct exynos_drm_crtc_ops *ops;
+ void *ctx;
};
struct exynos_drm_g2d_private {
@@ -246,7 +258,6 @@ struct exynos_drm_private {
*/
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
- struct drm_property *crtc_mode_property;
unsigned long da_start;
unsigned long da_space_size;
@@ -333,6 +344,7 @@ void exynos_drm_component_del(struct device *dev,
enum exynos_drm_device_type dev_type);
extern struct platform_driver fimd_driver;
+extern struct platform_driver decon_driver;
extern struct platform_driver dp_driver;
extern struct platform_driver dsi_driver;
extern struct platform_driver mixer_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7e282e3d6038..57de0bdc5a3b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -102,7 +102,7 @@ static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
/* all planes connected to this encoder should be also disabled. */
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
- if (plane->crtc == encoder->crtc)
+ if (plane->crtc && (plane->crtc == encoder->crtc))
plane->funcs->disable_plane(plane);
}
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e12ea90c6237..84f8dfe1c5ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -79,9 +79,9 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
- struct drm_device *dev = helper->dev;
struct exynos_drm_gem_buf *buffer;
unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
+ unsigned int nr_pages;
unsigned long offset;
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
@@ -94,25 +94,14 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
- /* map pages with kernel virtual space. */
+ nr_pages = buffer->size >> PAGE_SHIFT;
+
+ buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
+ nr_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
if (!buffer->kvaddr) {
- if (is_drm_iommu_supported(dev)) {
- unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
-
- buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
- nr_pages, VM_MAP,
- pgprot_writecombine(PAGE_KERNEL));
- } else {
- phys_addr_t dma_addr = buffer->dma_addr;
- if (dma_addr)
- buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
- else
- buffer->kvaddr = (void __iomem *)NULL;
- }
- if (!buffer->kvaddr) {
- DRM_ERROR("failed to map pages to kernel space.\n");
- return -EIO;
- }
+ DRM_ERROR("failed to map pages to kernel space.\n");
+ return -EIO;
}
/* buffer count to framebuffer always is 1 at booting time. */
@@ -313,7 +302,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
- if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
+ if (exynos_gem_obj->buffer->kvaddr)
vunmap(exynos_gem_obj->buffer->kvaddr);
/* release drm framebuffer and real buffer */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 835b6af00970..842d6b8dc3c4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -461,7 +461,6 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
break;
case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV12MT:
case DRM_FORMAT_NV16:
cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
EXYNOS_MSCTRL_C_INT_IN_2PLANE);
@@ -511,7 +510,6 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV12MT:
cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
break;
default:
@@ -524,10 +522,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
- if (fmt == DRM_FORMAT_NV12MT)
- cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
- else
- cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
@@ -812,7 +807,6 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
break;
case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV12MT:
case DRM_FORMAT_NV16:
cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
@@ -867,7 +861,6 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV12MT:
case DRM_FORMAT_NV21:
cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
break;
@@ -883,10 +876,7 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
- if (fmt == DRM_FORMAT_NV12MT)
- cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
- else
- cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e5810d13bf9c..925fc69af1a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -157,14 +157,13 @@ struct fimd_win_data {
};
struct fimd_context {
- struct exynos_drm_manager manager;
struct device *dev;
struct drm_device *drm_dev;
+ struct exynos_drm_crtc *crtc;
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
struct regmap *sysreg;
- struct drm_display_mode mode;
struct fimd_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
@@ -185,11 +184,6 @@ struct fimd_context {
struct exynos_drm_display *display;
};
-static inline struct fimd_context *mgr_to_fimd(struct exynos_drm_manager *mgr)
-{
- return container_of(mgr, struct fimd_context, manager);
-}
-
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
@@ -214,9 +208,9 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
return (struct fimd_driver_data *)of_id->data;
}
-static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
+static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
+ struct fimd_context *ctx = crtc->ctx;
if (ctx->suspended)
return;
@@ -259,9 +253,8 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
writel(val, ctx->regs + SHADOWCON);
}
-static void fimd_clear_channel(struct exynos_drm_manager *mgr)
+static void fimd_clear_channel(struct fimd_context *ctx)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
int win, ch_enabled = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -286,38 +279,42 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
unsigned int state = ctx->suspended;
ctx->suspended = 0;
- fimd_wait_for_vblank(mgr);
+ fimd_wait_for_vblank(ctx->crtc);
ctx->suspended = state;
}
}
-static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
+static int fimd_ctx_initialize(struct fimd_context *ctx,
struct drm_device *drm_dev)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
- mgr->drm_dev = ctx->drm_dev = drm_dev;
- mgr->pipe = ctx->pipe = priv->pipe++;
+ ctx->drm_dev = drm_dev;
+ ctx->pipe = priv->pipe++;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) {
+ int ret;
+
/*
* If any channel is already active, iommu will throw
* a PAGE FAULT when enabled. So clear any channel if enabled.
*/
- fimd_clear_channel(mgr);
- drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
+ fimd_clear_channel(ctx);
+ ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
+ if (ret) {
+ DRM_ERROR("drm_iommu_attach failed.\n");
+ return ret;
+ }
+
}
return 0;
}
-static void fimd_mgr_remove(struct exynos_drm_manager *mgr)
+static void fimd_ctx_remove(struct fimd_context *ctx)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
-
/* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev))
drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
@@ -343,7 +340,7 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
-static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
+static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -353,18 +350,10 @@ static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
return true;
}
-static void fimd_mode_set(struct exynos_drm_manager *mgr,
- const struct drm_display_mode *in_mode)
-{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
-
- drm_mode_copy(&ctx->mode, in_mode);
-}
-
-static void fimd_commit(struct exynos_drm_manager *mgr)
+static void fimd_commit(struct exynos_drm_crtc *crtc)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
- struct drm_display_mode *mode = &ctx->mode;
+ struct fimd_context *ctx = crtc->ctx;
+ struct drm_display_mode *mode = &crtc->base.mode;
struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
u32 val, clkdiv;
@@ -461,9 +450,9 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
writel(val, ctx->regs + VIDCON0);
}
-static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
+static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
+ struct fimd_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
@@ -493,9 +482,9 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
return 0;
}
-static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
+static void fimd_disable_vblank(struct exynos_drm_crtc *crtc)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
+ struct fimd_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
@@ -517,45 +506,45 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
}
}
-static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
- struct exynos_drm_overlay *overlay)
+static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
+ struct fimd_context *ctx = crtc->ctx;
struct fimd_win_data *win_data;
int win;
unsigned long offset;
- if (!overlay) {
- DRM_ERROR("overlay is NULL\n");
+ if (!plane) {
+ DRM_ERROR("plane is NULL\n");
return;
}
- win = overlay->zpos;
+ win = plane->zpos;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
if (win < 0 || win >= WINDOWS_NR)
return;
- offset = overlay->fb_x * (overlay->bpp >> 3);
- offset += overlay->fb_y * overlay->pitch;
+ offset = plane->fb_x * (plane->bpp >> 3);
+ offset += plane->fb_y * plane->pitch;
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+ DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, plane->pitch);
win_data = &ctx->win_data[win];
- win_data->offset_x = overlay->crtc_x;
- win_data->offset_y = overlay->crtc_y;
- win_data->ovl_width = overlay->crtc_width;
- win_data->ovl_height = overlay->crtc_height;
- win_data->fb_width = overlay->fb_width;
- win_data->fb_height = overlay->fb_height;
- win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->bpp = overlay->bpp;
- win_data->pixel_format = overlay->pixel_format;
- win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
- (overlay->bpp >> 3);
- win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+ win_data->offset_x = plane->crtc_x;
+ win_data->offset_y = plane->crtc_y;
+ win_data->ovl_width = plane->crtc_width;
+ win_data->ovl_height = plane->crtc_height;
+ win_data->fb_width = plane->fb_width;
+ win_data->fb_height = plane->fb_height;
+ win_data->dma_addr = plane->dma_addr[0] + offset;
+ win_data->bpp = plane->bpp;
+ win_data->pixel_format = plane->pixel_format;
+ win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
+ (plane->bpp >> 3);
+ win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
win_data->offset_x, win_data->offset_y);
@@ -563,7 +552,7 @@ static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- overlay->fb_width, overlay->crtc_width);
+ plane->fb_width, plane->crtc_width);
}
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
@@ -623,8 +612,8 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
* tearing for very small buffers, e.g. cursor buffer. Burst Mode
- * switching which is based on overlay size is not recommended as
- * overlay size varies alot towards the end of the screen and rapid
+ * switching which is based on plane size is not recommended as
+ * plane size varies alot towards the end of the screen and rapid
* movement causes unstable DMA which results into iommu crash/tear.
*/
@@ -676,9 +665,9 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
writel(val, ctx->regs + reg);
}
-static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
+static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
+ struct fimd_context *ctx = crtc->ctx;
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
@@ -799,9 +788,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
atomic_set(&ctx->win_updated, 1);
}
-static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
+static void fimd_win_disable(struct exynos_drm_crtc *crtc, int zpos)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
+ struct fimd_context *ctx = crtc->ctx;
struct fimd_win_data *win_data;
int win = zpos;
@@ -833,9 +822,8 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
win_data->enabled = false;
}
-static void fimd_window_suspend(struct exynos_drm_manager *mgr)
+static void fimd_window_suspend(struct fimd_context *ctx)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
@@ -843,13 +831,12 @@ static void fimd_window_suspend(struct exynos_drm_manager *mgr)
win_data = &ctx->win_data[i];
win_data->resume = win_data->enabled;
if (win_data->enabled)
- fimd_win_disable(mgr, i);
+ fimd_win_disable(ctx->crtc, i);
}
}
-static void fimd_window_resume(struct exynos_drm_manager *mgr)
+static void fimd_window_resume(struct fimd_context *ctx)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
@@ -860,26 +847,24 @@ static void fimd_window_resume(struct exynos_drm_manager *mgr)
}
}
-static void fimd_apply(struct exynos_drm_manager *mgr)
+static void fimd_apply(struct fimd_context *ctx)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
if (win_data->enabled)
- fimd_win_commit(mgr, i);
+ fimd_win_commit(ctx->crtc, i);
else
- fimd_win_disable(mgr, i);
+ fimd_win_disable(ctx->crtc, i);
}
- fimd_commit(mgr);
+ fimd_commit(ctx->crtc);
}
-static int fimd_poweron(struct exynos_drm_manager *mgr)
+static int fimd_poweron(struct fimd_context *ctx)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
int ret;
if (!ctx->suspended)
@@ -903,16 +888,16 @@ static int fimd_poweron(struct exynos_drm_manager *mgr)
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags)) {
- ret = fimd_enable_vblank(mgr);
+ ret = fimd_enable_vblank(ctx->crtc);
if (ret) {
DRM_ERROR("Failed to re-enable vblank [%d]\n", ret);
goto enable_vblank_err;
}
}
- fimd_window_resume(mgr);
+ fimd_window_resume(ctx);
- fimd_apply(mgr);
+ fimd_apply(ctx);
return 0;
@@ -925,10 +910,8 @@ bus_clk_err:
return ret;
}
-static int fimd_poweroff(struct exynos_drm_manager *mgr)
+static int fimd_poweroff(struct fimd_context *ctx)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
-
if (ctx->suspended)
return 0;
@@ -937,7 +920,7 @@ static int fimd_poweroff(struct exynos_drm_manager *mgr)
* suspend that connector. Otherwise we might try to scan from
* a destroyed buffer later.
*/
- fimd_window_suspend(mgr);
+ fimd_window_suspend(ctx);
clk_disable_unprepare(ctx->lcd_clk);
clk_disable_unprepare(ctx->bus_clk);
@@ -948,18 +931,18 @@ static int fimd_poweroff(struct exynos_drm_manager *mgr)
return 0;
}
-static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
+static void fimd_dpms(struct exynos_drm_crtc *crtc, int mode)
{
DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
- fimd_poweron(mgr);
+ fimd_poweron(crtc->ctx);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- fimd_poweroff(mgr);
+ fimd_poweroff(crtc->ctx);
break;
default:
DRM_DEBUG_KMS("unspecified mode %d\n", mode);
@@ -996,9 +979,9 @@ static void fimd_trigger(struct device *dev)
atomic_set(&ctx->triggering, 0);
}
-static void fimd_te_handler(struct exynos_drm_manager *mgr)
+static void fimd_te_handler(struct exynos_drm_crtc *crtc)
{
- struct fimd_context *ctx = mgr_to_fimd(mgr);
+ struct fimd_context *ctx = crtc->ctx;
/* Checks the crtc is detached already from encoder */
if (ctx->pipe < 0 || !ctx->drm_dev)
@@ -1021,10 +1004,9 @@ static void fimd_te_handler(struct exynos_drm_manager *mgr)
drm_handle_vblank(ctx->drm_dev, ctx->pipe);
}
-static struct exynos_drm_manager_ops fimd_manager_ops = {
+static struct exynos_drm_crtc_ops fimd_crtc_ops = {
.dpms = fimd_dpms,
.mode_fixup = fimd_mode_fixup,
- .mode_set = fimd_mode_set,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
@@ -1074,9 +1056,22 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = fimd_ctx_initialize(ctx, drm_dev);
+ if (ret) {
+ DRM_ERROR("fimd_ctx_initialize failed.\n");
+ return ret;
+ }
+
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
+ EXYNOS_DISPLAY_TYPE_LCD,
+ &fimd_crtc_ops, ctx);
+ if (IS_ERR(ctx->crtc)) {
+ fimd_ctx_remove(ctx);
+ return PTR_ERR(ctx->crtc);
+ }
- fimd_mgr_initialize(&ctx->manager, drm_dev);
- exynos_drm_crtc_create(&ctx->manager);
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
@@ -1089,12 +1084,12 @@ static void fimd_unbind(struct device *dev, struct device *master,
{
struct fimd_context *ctx = dev_get_drvdata(dev);
- fimd_dpms(&ctx->manager, DRM_MODE_DPMS_OFF);
+ fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
if (ctx->display)
exynos_dpi_remove(ctx->display);
- fimd_mgr_remove(&ctx->manager);
+ fimd_ctx_remove(ctx);
}
static const struct component_ops fimd_component_ops = {
@@ -1108,7 +1103,7 @@ static int fimd_probe(struct platform_device *pdev)
struct fimd_context *ctx;
struct device_node *i80_if_timings;
struct resource *res;
- int ret = -EINVAL;
+ int ret;
if (!dev->of_node)
return -ENODEV;
@@ -1117,11 +1112,8 @@ static int fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->manager.type = EXYNOS_DISPLAY_TYPE_LCD;
- ctx->manager.ops = &fimd_manager_ops;
-
ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
- ctx->manager.type);
+ EXYNOS_DISPLAY_TYPE_LCD);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index ec58fe9c40df..308173cb4f0a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -22,6 +22,7 @@
/*
* exynos drm gem buffer structure.
*
+ * @cookie: cookie returned by dma_alloc_attrs
* @kvaddr: kernel virtual address to allocated memory region.
* *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
@@ -35,6 +36,7 @@
* VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
+ void *cookie;
void __iomem *kvaddr;
unsigned long userptr;
dma_addr_t dma_addr;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 0261468c8019..8040ed2a831f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -542,9 +542,6 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
GSC_IN_YUV420_2P);
break;
- case DRM_FORMAT_NV12MT:
- cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
- break;
default:
dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
return -EINVAL;
@@ -809,9 +806,6 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
GSC_OUT_YUV420_2P);
break;
- case DRM_FORMAT_NV12MT:
- cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
- break;
default:
dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
return -EINVAL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index c7045a663763..a5616872eee7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -12,25 +12,17 @@
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
+#include <drm/drm_plane_helper.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
-#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
-
-struct exynos_plane {
- struct drm_plane base;
- struct exynos_drm_overlay overlay;
- bool enabled;
-};
-
static const uint32_t formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_NV12,
- DRM_FORMAT_NV12MT,
};
/*
@@ -69,16 +61,9 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
return size;
}
-int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb)
{
- struct exynos_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
- unsigned int actual_w;
- unsigned int actual_h;
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
int nr;
int i;
@@ -91,12 +76,26 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
return -EFAULT;
}
- overlay->dma_addr[i] = buffer->dma_addr;
+ exynos_plane->dma_addr[i] = buffer->dma_addr;
DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
- i, (unsigned long)overlay->dma_addr[i]);
+ i, (unsigned long)exynos_plane->dma_addr[i]);
}
+ return 0;
+}
+
+void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ unsigned int actual_w;
+ unsigned int actual_h;
+
actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
actual_h = exynos_plane_get_size(crtc_y, crtc_h, crtc->mode.vdisplay);
@@ -113,98 +112,79 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
}
/* set drm framebuffer data. */
- overlay->fb_x = src_x;
- overlay->fb_y = src_y;
- overlay->fb_width = fb->width;
- overlay->fb_height = fb->height;
- overlay->src_width = src_w;
- overlay->src_height = src_h;
- overlay->bpp = fb->bits_per_pixel;
- overlay->pitch = fb->pitches[0];
- overlay->pixel_format = fb->pixel_format;
-
- /* set overlay range to be displayed. */
- overlay->crtc_x = crtc_x;
- overlay->crtc_y = crtc_y;
- overlay->crtc_width = actual_w;
- overlay->crtc_height = actual_h;
+ exynos_plane->fb_x = src_x;
+ exynos_plane->fb_y = src_y;
+ exynos_plane->fb_width = fb->width;
+ exynos_plane->fb_height = fb->height;
+ exynos_plane->src_width = src_w;
+ exynos_plane->src_height = src_h;
+ exynos_plane->bpp = fb->bits_per_pixel;
+ exynos_plane->pitch = fb->pitches[0];
+ exynos_plane->pixel_format = fb->pixel_format;
+
+ /* set plane range to be displayed. */
+ exynos_plane->crtc_x = crtc_x;
+ exynos_plane->crtc_y = crtc_y;
+ exynos_plane->crtc_width = actual_w;
+ exynos_plane->crtc_height = actual_h;
/* set drm mode data. */
- overlay->mode_width = crtc->mode.hdisplay;
- overlay->mode_height = crtc->mode.vdisplay;
- overlay->refresh = crtc->mode.vrefresh;
- overlay->scan_flag = crtc->mode.flags;
+ exynos_plane->mode_width = crtc->mode.hdisplay;
+ exynos_plane->mode_height = crtc->mode.vdisplay;
+ exynos_plane->refresh = crtc->mode.vrefresh;
+ exynos_plane->scan_flag = crtc->mode.flags;
- DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
- overlay->crtc_x, overlay->crtc_y,
- overlay->crtc_width, overlay->crtc_height);
+ DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
+ exynos_plane->crtc_x, exynos_plane->crtc_y,
+ exynos_plane->crtc_width, exynos_plane->crtc_height);
plane->crtc = crtc;
- exynos_drm_crtc_plane_mode_set(crtc, overlay);
-
- return 0;
-}
-
-void exynos_plane_commit(struct drm_plane *plane)
-{
- struct exynos_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
-
- exynos_drm_crtc_plane_commit(plane->crtc, overlay->zpos);
+ if (exynos_crtc->ops->win_mode_set)
+ exynos_crtc->ops->win_mode_set(exynos_crtc, exynos_plane);
}
-void exynos_plane_dpms(struct drm_plane *plane, int mode)
-{
- struct exynos_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
-
- if (mode == DRM_MODE_DPMS_ON) {
- if (exynos_plane->enabled)
- return;
-
- exynos_drm_crtc_plane_enable(plane->crtc, overlay->zpos);
- exynos_plane->enabled = true;
- } else {
- if (!exynos_plane->enabled)
- return;
-
- exynos_drm_crtc_plane_disable(plane->crtc, overlay->zpos);
- exynos_plane->enabled = false;
- }
-}
-
-static int
+int
exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
+
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
int ret;
- ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y,
- crtc_w, crtc_h, src_x >> 16, src_y >> 16,
- src_w >> 16, src_h >> 16);
+ ret = exynos_check_plane(plane, fb);
if (ret < 0)
return ret;
- exynos_plane_commit(plane);
- exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
+ exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y,
+ crtc_w, crtc_h, src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
+
+ if (exynos_crtc->ops->win_commit)
+ exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
return 0;
}
static int exynos_disable_plane(struct drm_plane *plane)
{
- exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
+
+ if (exynos_crtc->ops->win_disable)
+ exynos_crtc->ops->win_disable(exynos_crtc,
+ exynos_plane->zpos);
return 0;
}
static void exynos_plane_destroy(struct drm_plane *plane)
{
- struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
exynos_disable_plane(plane);
drm_plane_cleanup(plane);
@@ -216,11 +196,11 @@ static int exynos_plane_set_property(struct drm_plane *plane,
uint64_t val)
{
struct drm_device *dev = plane->dev;
- struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_private *dev_priv = dev->dev_private;
if (property == dev_priv->plane_zpos_property) {
- exynos_plane->overlay.zpos = val;
+ exynos_plane->zpos = val;
return 0;
}
@@ -257,10 +237,10 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev,
unsigned long possible_crtcs,
enum drm_plane_type type)
{
- struct exynos_plane *exynos_plane;
+ struct exynos_drm_plane *exynos_plane;
int err;
- exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+ exynos_plane = kzalloc(sizeof(struct exynos_drm_plane), GFP_KERNEL);
if (!exynos_plane)
return ERR_PTR(-ENOMEM);
@@ -274,7 +254,7 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev,
}
if (type == DRM_PLANE_TYPE_PRIMARY)
- exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+ exynos_plane->zpos = DEFAULT_ZPOS;
else
exynos_plane_attach_zpos_property(&exynos_plane->base);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 0d1986b115f8..9d3c374e7b3e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,13 +9,17 @@
*
*/
-int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-void exynos_plane_commit(struct drm_plane *plane);
-void exynos_plane_dpms(struct drm_plane *plane, int mode);
+int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb);
+void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
struct drm_plane *exynos_plane_init(struct drm_device *dev,
unsigned long possible_crtcs,
enum drm_plane_type type);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 45899fb63272..b886972b5888 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -47,11 +47,10 @@ struct vidi_win_data {
};
struct vidi_context {
- struct exynos_drm_manager manager;
struct exynos_drm_display display;
struct platform_device *pdev;
struct drm_device *drm_dev;
- struct drm_crtc *crtc;
+ struct exynos_drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector connector;
struct vidi_win_data win_data[WINDOWS_NR];
@@ -68,11 +67,6 @@ struct vidi_context {
int pipe;
};
-static inline struct vidi_context *manager_to_vidi(struct exynos_drm_manager *m)
-{
- return container_of(m, struct vidi_context, manager);
-}
-
static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
{
return container_of(d, struct vidi_context, display);
@@ -103,34 +97,22 @@ static const char fake_edid_info[] = {
0x00, 0x00, 0x00, 0x06
};
-static void vidi_apply(struct exynos_drm_manager *mgr)
+static void vidi_apply(struct vidi_context *ctx)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
- struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+ struct exynos_drm_crtc_ops *crtc_ops = ctx->crtc->ops;
struct vidi_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
- if (win_data->enabled && (mgr_ops && mgr_ops->win_commit))
- mgr_ops->win_commit(mgr, i);
+ if (win_data->enabled && (crtc_ops && crtc_ops->win_commit))
+ crtc_ops->win_commit(ctx->crtc, i);
}
-
- if (mgr_ops && mgr_ops->commit)
- mgr_ops->commit(mgr);
}
-static void vidi_commit(struct exynos_drm_manager *mgr)
+static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
-
- if (ctx->suspended)
- return;
-}
-
-static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
-{
- struct vidi_context *ctx = manager_to_vidi(mgr);
+ struct vidi_context *ctx = crtc->ctx;
if (ctx->suspended)
return -EPERM;
@@ -143,16 +125,16 @@ static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
/*
* in case of page flip request, vidi_finish_pageflip function
* will not be called because direct_vblank is true and then
- * that function will be called by manager_ops->win_commit callback
+ * that function will be called by crtc_ops->win_commit callback
*/
schedule_work(&ctx->work);
return 0;
}
-static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
+static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
+ struct vidi_context *ctx = crtc->ctx;
if (ctx->suspended)
return;
@@ -161,44 +143,44 @@ static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
ctx->vblank_on = false;
}
-static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
- struct exynos_drm_overlay *overlay)
+static void vidi_win_mode_set(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
+ struct vidi_context *ctx = crtc->ctx;
struct vidi_win_data *win_data;
int win;
unsigned long offset;
- if (!overlay) {
- DRM_ERROR("overlay is NULL\n");
+ if (!plane) {
+ DRM_ERROR("plane is NULL\n");
return;
}
- win = overlay->zpos;
+ win = plane->zpos;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
if (win < 0 || win >= WINDOWS_NR)
return;
- offset = overlay->fb_x * (overlay->bpp >> 3);
- offset += overlay->fb_y * overlay->pitch;
+ offset = plane->fb_x * (plane->bpp >> 3);
+ offset += plane->fb_y * plane->pitch;
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+ DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, plane->pitch);
win_data = &ctx->win_data[win];
- win_data->offset_x = overlay->crtc_x;
- win_data->offset_y = overlay->crtc_y;
- win_data->ovl_width = overlay->crtc_width;
- win_data->ovl_height = overlay->crtc_height;
- win_data->fb_width = overlay->fb_width;
- win_data->fb_height = overlay->fb_height;
- win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->bpp = overlay->bpp;
- win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
- (overlay->bpp >> 3);
- win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+ win_data->offset_x = plane->crtc_x;
+ win_data->offset_y = plane->crtc_y;
+ win_data->ovl_width = plane->crtc_width;
+ win_data->ovl_height = plane->crtc_height;
+ win_data->fb_width = plane->fb_width;
+ win_data->fb_height = plane->fb_height;
+ win_data->dma_addr = plane->dma_addr[0] + offset;
+ win_data->bpp = plane->bpp;
+ win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
+ (plane->bpp >> 3);
+ win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
/*
* some parts of win_data should be transferred to user side
@@ -211,12 +193,12 @@ static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- overlay->fb_width, overlay->crtc_width);
+ plane->fb_width, plane->crtc_width);
}
-static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
+static void vidi_win_commit(struct exynos_drm_crtc *crtc, int zpos)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
+ struct vidi_context *ctx = crtc->ctx;
struct vidi_win_data *win_data;
int win = zpos;
@@ -239,9 +221,9 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
schedule_work(&ctx->work);
}
-static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
+static void vidi_win_disable(struct exynos_drm_crtc *crtc, int zpos)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
+ struct vidi_context *ctx = crtc->ctx;
struct vidi_win_data *win_data;
int win = zpos;
@@ -257,10 +239,8 @@ static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
/* TODO. */
}
-static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
+static int vidi_power_on(struct vidi_context *ctx, bool enable)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
-
DRM_DEBUG_KMS("%s\n", __FILE__);
if (enable != false && enable != true)
@@ -271,9 +251,9 @@ static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
- vidi_enable_vblank(mgr);
+ vidi_enable_vblank(ctx->crtc);
- vidi_apply(mgr);
+ vidi_apply(ctx);
} else {
ctx->suspended = true;
}
@@ -281,9 +261,9 @@ static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
return 0;
}
-static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
+static void vidi_dpms(struct exynos_drm_crtc *crtc, int mode)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
+ struct vidi_context *ctx = crtc->ctx;
DRM_DEBUG_KMS("%d\n", mode);
@@ -291,12 +271,12 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- vidi_power_on(mgr, true);
+ vidi_power_on(ctx, true);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- vidi_power_on(mgr, false);
+ vidi_power_on(ctx, false);
break;
default:
DRM_DEBUG_KMS("unspecified mode %d\n", mode);
@@ -306,21 +286,19 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
mutex_unlock(&ctx->lock);
}
-static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
+static int vidi_ctx_initialize(struct vidi_context *ctx,
struct drm_device *drm_dev)
{
- struct vidi_context *ctx = manager_to_vidi(mgr);
struct exynos_drm_private *priv = drm_dev->dev_private;
- mgr->drm_dev = ctx->drm_dev = drm_dev;
- mgr->pipe = ctx->pipe = priv->pipe++;
+ ctx->drm_dev = drm_dev;
+ ctx->pipe = priv->pipe++;
return 0;
}
-static struct exynos_drm_manager_ops vidi_manager_ops = {
+static struct exynos_drm_crtc_ops vidi_crtc_ops = {
.dpms = vidi_dpms,
- .commit = vidi_commit,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
.win_mode_set = vidi_win_mode_set,
@@ -565,21 +543,21 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- struct drm_crtc *crtc = ctx->crtc;
int ret;
- vidi_mgr_initialize(&ctx->manager, drm_dev);
+ vidi_ctx_initialize(ctx, drm_dev);
- ret = exynos_drm_crtc_create(&ctx->manager);
- if (ret) {
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
+ EXYNOS_DISPLAY_TYPE_VIDI,
+ &vidi_crtc_ops, ctx);
+ if (IS_ERR(ctx->crtc)) {
DRM_ERROR("failed to create crtc.\n");
- return ret;
+ return PTR_ERR(ctx->crtc);
}
ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
if (ret) {
- crtc->funcs->destroy(crtc);
- DRM_ERROR("failed to create encoder and connector.\n");
+ ctx->crtc->base.funcs->destroy(&ctx->crtc->base);
return ret;
}
@@ -605,15 +583,13 @@ static int vidi_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->manager.type = EXYNOS_DISPLAY_TYPE_VIDI;
- ctx->manager.ops = &vidi_manager_ops;
ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
ctx->display.ops = &vidi_display_ops;
ctx->default_win = 0;
ctx->pdev = pdev;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
- ctx->manager.type);
+ EXYNOS_DISPLAY_TYPE_VIDI);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 5765a161abdd..229b3613c60b 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{
- u8 buffer[2];
u32 reg;
clk_disable_unprepare(hdata->res.sclk_hdmi);
@@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
clk_prepare_enable(hdata->res.sclk_hdmi);
/* operation mode */
- buffer[0] = 0x1f;
- buffer[1] = 0x00;
-
- if (hdata->hdmiphy_port)
- i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_ENABLE_MODE_SET);
if (hdata->type == HDMI_TYPE13)
reg = HDMI_V13_PHY_RSTOUT;
@@ -2036,9 +2032,8 @@ static void hdmi_commit(struct exynos_drm_display *display)
hdmi_conf_apply(hdata);
}
-static void hdmi_poweron(struct exynos_drm_display *display)
+static void hdmi_poweron(struct hdmi_context *hdata)
{
- struct hdmi_context *hdata = display_to_hdmi(display);
struct hdmi_resources *res = &hdata->res;
mutex_lock(&hdata->hdmi_mutex);
@@ -2064,12 +2059,11 @@ static void hdmi_poweron(struct exynos_drm_display *display)
clk_prepare_enable(res->sclk_hdmi);
hdmiphy_poweron(hdata);
- hdmi_commit(display);
+ hdmi_commit(&hdata->display);
}
-static void hdmi_poweroff(struct exynos_drm_display *display)
+static void hdmi_poweroff(struct hdmi_context *hdata)
{
- struct hdmi_context *hdata = display_to_hdmi(display);
struct hdmi_resources *res = &hdata->res;
mutex_lock(&hdata->hdmi_mutex);
@@ -2113,7 +2107,7 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- hdmi_poweron(display);
+ hdmi_poweron(hdata);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
@@ -2132,7 +2126,7 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
if (funcs && funcs->dpms)
(*funcs->dpms)(crtc, mode);
- hdmi_poweroff(display);
+ hdmi_poweroff(hdata);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 820b76234ef4..3518bc4654c5 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -72,6 +72,7 @@ struct mixer_resources {
spinlock_t reg_slock;
struct clk *mixer;
struct clk *vp;
+ struct clk *hdmi;
struct clk *sclk_mixer;
struct clk *sclk_hdmi;
struct clk *mout_mixer;
@@ -84,10 +85,10 @@ enum mixer_version_id {
};
struct mixer_context {
- struct exynos_drm_manager manager;
struct platform_device *pdev;
struct device *dev;
struct drm_device *drm_dev;
+ struct exynos_drm_crtc *crtc;
int pipe;
bool interlace;
bool powered;
@@ -103,11 +104,6 @@ struct mixer_context {
atomic_t wait_vsync_event;
};
-static inline struct mixer_context *mgr_to_mixer(struct exynos_drm_manager *mgr)
-{
- return container_of(mgr, struct mixer_context, manager);
-}
-
struct mixer_drv_data {
enum mixer_version_id version;
bool is_vp_enabled;
@@ -417,8 +413,6 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
win_data = &ctx->win_data[win];
switch (win_data->pixel_format) {
- case DRM_FORMAT_NV12MT:
- tiled_mode = true;
case DRM_FORMAT_NV12:
crcb_mode = false;
buf_num = 2;
@@ -587,8 +581,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
win == MIXER_DEFAULT_WIN) {
- val = MXR_MXR_RES_HEIGHT(win_data->fb_height);
- val |= MXR_MXR_RES_WIDTH(win_data->fb_width);
+ val = MXR_MXR_RES_HEIGHT(win_data->mode_height);
+ val |= MXR_MXR_RES_WIDTH(win_data->mode_width);
mixer_reg_write(res, MXR_RESOLUTION, val);
}
@@ -774,6 +768,12 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
return -ENODEV;
}
+ mixer_res->hdmi = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(mixer_res->hdmi)) {
+ dev_err(dev, "failed to get clock 'hdmi'\n");
+ return PTR_ERR(mixer_res->hdmi);
+ }
+
mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
@@ -854,16 +854,15 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
return 0;
}
-static int mixer_initialize(struct exynos_drm_manager *mgr,
+static int mixer_initialize(struct mixer_context *mixer_ctx,
struct drm_device *drm_dev)
{
int ret;
- struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
- mgr->drm_dev = mixer_ctx->drm_dev = drm_dev;
- mgr->pipe = mixer_ctx->pipe = priv->pipe++;
+ mixer_ctx->drm_dev = drm_dev;
+ mixer_ctx->pipe = priv->pipe++;
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(mixer_ctx);
@@ -887,17 +886,15 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
return drm_iommu_attach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
}
-static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
+static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
-
if (is_drm_iommu_supported(mixer_ctx->drm_dev))
drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
}
-static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
+static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
{
- struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
if (!mixer_ctx->powered) {
@@ -912,34 +909,34 @@ static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
return 0;
}
-static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
+static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
{
- struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
/* disable vsync interrupt */
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
- struct exynos_drm_overlay *overlay)
+static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
- struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ struct mixer_context *mixer_ctx = crtc->ctx;
struct hdmi_win_data *win_data;
int win;
- if (!overlay) {
- DRM_ERROR("overlay is NULL\n");
+ if (!plane) {
+ DRM_ERROR("plane is NULL\n");
return;
}
DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
- overlay->fb_width, overlay->fb_height,
- overlay->fb_x, overlay->fb_y,
- overlay->crtc_width, overlay->crtc_height,
- overlay->crtc_x, overlay->crtc_y);
+ plane->fb_width, plane->fb_height,
+ plane->fb_x, plane->fb_y,
+ plane->crtc_width, plane->crtc_height,
+ plane->crtc_x, plane->crtc_y);
- win = overlay->zpos;
+ win = plane->zpos;
if (win == DEFAULT_ZPOS)
win = MIXER_DEFAULT_WIN;
@@ -950,32 +947,32 @@ static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
win_data = &mixer_ctx->win_data[win];
- win_data->dma_addr = overlay->dma_addr[0];
- win_data->chroma_dma_addr = overlay->dma_addr[1];
- win_data->pixel_format = overlay->pixel_format;
- win_data->bpp = overlay->bpp;
+ win_data->dma_addr = plane->dma_addr[0];
+ win_data->chroma_dma_addr = plane->dma_addr[1];
+ win_data->pixel_format = plane->pixel_format;
+ win_data->bpp = plane->bpp;
- win_data->crtc_x = overlay->crtc_x;
- win_data->crtc_y = overlay->crtc_y;
- win_data->crtc_width = overlay->crtc_width;
- win_data->crtc_height = overlay->crtc_height;
+ win_data->crtc_x = plane->crtc_x;
+ win_data->crtc_y = plane->crtc_y;
+ win_data->crtc_width = plane->crtc_width;
+ win_data->crtc_height = plane->crtc_height;
- win_data->fb_x = overlay->fb_x;
- win_data->fb_y = overlay->fb_y;
- win_data->fb_width = overlay->fb_width;
- win_data->fb_height = overlay->fb_height;
- win_data->src_width = overlay->src_width;
- win_data->src_height = overlay->src_height;
+ win_data->fb_x = plane->fb_x;
+ win_data->fb_y = plane->fb_y;
+ win_data->fb_width = plane->fb_width;
+ win_data->fb_height = plane->fb_height;
+ win_data->src_width = plane->src_width;
+ win_data->src_height = plane->src_height;
- win_data->mode_width = overlay->mode_width;
- win_data->mode_height = overlay->mode_height;
+ win_data->mode_width = plane->mode_width;
+ win_data->mode_height = plane->mode_height;
- win_data->scan_flags = overlay->scan_flag;
+ win_data->scan_flags = plane->scan_flag;
}
-static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
+static void mixer_win_commit(struct exynos_drm_crtc *crtc, int zpos)
{
- struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ struct mixer_context *mixer_ctx = crtc->ctx;
int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -995,9 +992,9 @@ static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
mixer_ctx->win_data[win].enabled = true;
}
-static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
+static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
{
- struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
unsigned long flags;
@@ -1023,9 +1020,10 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
mixer_ctx->win_data[win].enabled = false;
}
-static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
+static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
{
- struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ struct mixer_context *mixer_ctx = crtc->ctx;
+ int err;
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
@@ -1034,7 +1032,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
}
mutex_unlock(&mixer_ctx->mixer_mutex);
- drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+ err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
+ if (err < 0) {
+ DRM_DEBUG_KMS("failed to acquire vblank counter\n");
+ return;
+ }
atomic_set(&mixer_ctx->wait_vsync_event, 1);
@@ -1047,26 +1049,24 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
- drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
+ drm_vblank_put(mixer_ctx->drm_dev, mixer_ctx->pipe);
}
-static void mixer_window_suspend(struct exynos_drm_manager *mgr)
+static void mixer_window_suspend(struct mixer_context *ctx)
{
- struct mixer_context *ctx = mgr_to_mixer(mgr);
struct hdmi_win_data *win_data;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
win_data = &ctx->win_data[i];
win_data->resume = win_data->enabled;
- mixer_win_disable(mgr, i);
+ mixer_win_disable(ctx->crtc, i);
}
- mixer_wait_for_vblank(mgr);
+ mixer_wait_for_vblank(ctx->crtc);
}
-static void mixer_window_resume(struct exynos_drm_manager *mgr)
+static void mixer_window_resume(struct mixer_context *ctx)
{
- struct mixer_context *ctx = mgr_to_mixer(mgr);
struct hdmi_win_data *win_data;
int i;
@@ -1075,13 +1075,12 @@ static void mixer_window_resume(struct exynos_drm_manager *mgr)
win_data->enabled = win_data->resume;
win_data->resume = false;
if (win_data->enabled)
- mixer_win_commit(mgr, i);
+ mixer_win_commit(ctx->crtc, i);
}
}
-static void mixer_poweron(struct exynos_drm_manager *mgr)
+static void mixer_poweron(struct mixer_context *ctx)
{
- struct mixer_context *ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
@@ -1095,6 +1094,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
pm_runtime_get_sync(ctx->dev);
clk_prepare_enable(res->mixer);
+ clk_prepare_enable(res->hdmi);
if (ctx->vp_enabled) {
clk_prepare_enable(res->vp);
if (ctx->has_sclk)
@@ -1110,12 +1110,11 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
- mixer_window_resume(mgr);
+ mixer_window_resume(ctx);
}
-static void mixer_poweroff(struct exynos_drm_manager *mgr)
+static void mixer_poweroff(struct mixer_context *ctx)
{
- struct mixer_context *ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
@@ -1126,7 +1125,7 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
mutex_unlock(&ctx->mixer_mutex);
mixer_stop(ctx);
- mixer_window_suspend(mgr);
+ mixer_window_suspend(ctx);
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
@@ -1134,6 +1133,7 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
ctx->powered = false;
mutex_unlock(&ctx->mixer_mutex);
+ clk_disable_unprepare(res->hdmi);
clk_disable_unprepare(res->mixer);
if (ctx->vp_enabled) {
clk_disable_unprepare(res->vp);
@@ -1144,16 +1144,16 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
pm_runtime_put_sync(ctx->dev);
}
-static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
+static void mixer_dpms(struct exynos_drm_crtc *crtc, int mode)
{
switch (mode) {
case DRM_MODE_DPMS_ON:
- mixer_poweron(mgr);
+ mixer_poweron(crtc->ctx);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- mixer_poweroff(mgr);
+ mixer_poweroff(crtc->ctx);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -1181,7 +1181,7 @@ int mixer_check_mode(struct drm_display_mode *mode)
return -EINVAL;
}
-static struct exynos_drm_manager_ops mixer_manager_ops = {
+static struct exynos_drm_crtc_ops mixer_crtc_ops = {
.dpms = mixer_dpms,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
@@ -1252,28 +1252,31 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
struct drm_device *drm_dev = data;
int ret;
- ret = mixer_initialize(&ctx->manager, drm_dev);
+ ret = mixer_initialize(ctx, drm_dev);
if (ret)
return ret;
- ret = exynos_drm_crtc_create(&ctx->manager);
- if (ret) {
- mixer_mgr_remove(&ctx->manager);
- return ret;
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
+ EXYNOS_DISPLAY_TYPE_HDMI,
+ &mixer_crtc_ops, ctx);
+ if (IS_ERR(ctx->crtc)) {
+ mixer_ctx_remove(ctx);
+ ret = PTR_ERR(ctx->crtc);
+ goto free_ctx;
}
- pm_runtime_enable(dev);
-
return 0;
+
+free_ctx:
+ devm_kfree(dev, ctx);
+ return ret;
}
static void mixer_unbind(struct device *dev, struct device *master, void *data)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
- mixer_mgr_remove(&ctx->manager);
-
- pm_runtime_disable(dev);
+ mixer_ctx_remove(ctx);
}
static const struct component_ops mixer_component_ops = {
@@ -1296,9 +1299,6 @@ static int mixer_probe(struct platform_device *pdev)
mutex_init(&ctx->mixer_mutex);
- ctx->manager.type = EXYNOS_DISPLAY_TYPE_HDMI;
- ctx->manager.ops = &mixer_manager_ops;
-
if (dev->of_node) {
const struct of_device_id *match;
@@ -1320,7 +1320,7 @@ static int mixer_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
- ctx->manager.type);
+ EXYNOS_DISPLAY_TYPE_HDMI);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index ddd90ddbc200..2d42ce6d3757 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -593,6 +593,7 @@ int psb_fbdev_init(struct drm_device *dev)
{
struct psb_fbdev *fbdev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
if (!fbdev) {
@@ -604,16 +605,29 @@ int psb_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
- drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
- INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
+ dev_priv->ops->crtcs, INTELFB_CONN_LIMIT);
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+ ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&fbdev->psb_fb_helper);
+free:
+ kfree(fbdev);
+ return ret;
}
static void psb_fbdev_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index faf1c0c5ab2e..fa140e04d5fa 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -644,9 +644,6 @@ static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- return MODE_NO_INTERLACE;
-
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index d4762799351d..a9041d1a8ff0 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -32,6 +32,8 @@
struct tda998x_priv {
struct i2c_client *cec;
struct i2c_client *hdmi;
+ struct mutex mutex;
+ struct delayed_work dwork;
uint16_t rev;
uint8_t current_page;
int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
uint8_t addr = REG2ADDR(reg);
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return ret;
+ goto out;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
if (ret < 0)
goto fail;
- return ret;
+ goto out;
fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
return ret;
}
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
uint8_t buf[] = {REG2ADDR(reg), val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
}
+/* handle HDMI connect/disconnect */
+static void tda998x_hpd(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tda998x_priv *priv =
+ container_of(dwork, struct tda998x_priv, dwork);
+
+ if (priv->encoder && priv->encoder->dev)
+ drm_kms_helper_hotplug_event(priv->encoder->dev);
+}
+
/*
* only 2 interrupts may occur: screen plug/unplug and EDID read
*/
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid);
} else if (cec != 0) { /* HPD change */
- if (priv->encoder && priv->encoder->dev)
- drm_helper_hpd_irq_event(priv->encoder->dev);
+ schedule_delayed_work(&priv->dwork, HZ/10);
}
return IRQ_HANDLED;
}
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (priv->hdmi->irq)
+ if (priv->hdmi->irq) {
free_irq(priv->hdmi->irq, priv);
+ cancel_delayed_work_sync(&priv->dwork);
+ }
i2c_unregister_device(priv->cec);
}
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
+ unsigned short cec_addr;
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->current_page = 0xff;
priv->hdmi = client;
- priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ cec_addr = 0x34 + (client->addr & 0x03);
+ priv->cec = i2c_new_dummy(client->adapter, cec_addr);
if (!priv->cec)
return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF;
+ mutex_init(&priv->mutex); /* protect the page access */
+
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (client->irq) {
int irqf_trigger;
- /* init read EDID waitqueue */
+ /* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
+ INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
/* clear pending interrupts */
reg_read(priv, REG_INT_FLAGS_0);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 4e39ab34eb1c..74acca9bcd9d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -11,6 +11,8 @@ config DRM_I915
select SHMEM
select TMPFS
select DRM_KMS_HELPER
+ select DRM_PANEL
+ select DRM_MIPI_DSI
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e4083e41a600..f01922591679 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -19,6 +19,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code
i915-y += i915_cmd_parser.o \
+ i915_gem_batch_pool.o \
i915_gem_context.o \
i915_gem_render_state.o \
i915_gem_debug.o \
@@ -47,6 +48,7 @@ i915-y += intel_renderstate_gen6.o \
i915-y += intel_audio.o \
intel_bios.o \
intel_display.o \
+ intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
intel_modes.o \
@@ -64,11 +66,12 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
+ intel_atomic.o \
+ intel_atomic_plane.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
intel_dp_mst.o \
- intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
intel_dsi_panel_vbt.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 22c992a78ac6..806e812340d0 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -152,6 +152,7 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = {
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
@@ -210,6 +211,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
CMD( MI_RS_CONTROL, SMI, F, 1, S ),
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
@@ -229,6 +231,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
@@ -272,6 +275,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
@@ -401,6 +405,7 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
#define REG64(addr) (addr), (addr + sizeof(u32))
static const u32 gen7_render_regs[] = {
+ REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
REG64(IA_VERTICES_COUNT),
@@ -481,13 +486,17 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+ u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
- if (subclient == INSTR_MEDIA_SUBCLIENT)
- return 0xFFF;
- else
+ if (subclient == INSTR_MEDIA_SUBCLIENT) {
+ if (op == 6)
+ return 0xFFFF;
+ else
+ return 0xFFF;
+ } else
return 0xFF;
}
@@ -716,13 +725,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
- if (hash_empty(ring->cmd_hash)) {
- ret = init_hash_table(ring, cmd_tables, cmd_table_count);
- if (ret) {
- DRM_ERROR("CMD: cmd_parser_init failed!\n");
- fini_hash_table(ring);
- return ret;
- }
+ WARN_ON(!hash_empty(ring->cmd_hash));
+
+ ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ if (ret) {
+ DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ fini_hash_table(ring);
+ return ret;
}
ring->needs_cmd_parser = true;
@@ -840,6 +849,69 @@ finish:
return (u32*)addr;
}
+/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+ struct drm_i915_gem_object *src_obj,
+ u32 batch_start_offset,
+ u32 batch_len)
+{
+ int ret = 0;
+ int needs_clflush = 0;
+ u32 *src_base, *dest_base = NULL;
+ u32 *src_addr, *dest_addr;
+ u32 offset = batch_start_offset / sizeof(*dest_addr);
+ u32 end = batch_start_offset + batch_len;
+
+ if (end > dest_obj->base.size || end > src_obj->base.size)
+ return ERR_PTR(-E2BIG);
+
+ ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ return ERR_PTR(ret);
+ }
+
+ src_base = vmap_batch(src_obj);
+ if (!src_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+ ret = -ENOMEM;
+ goto unpin_src;
+ }
+
+ src_addr = src_base + offset;
+
+ if (needs_clflush)
+ drm_clflush_virt_range((char *)src_addr, batch_len);
+
+ ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+ goto unmap_src;
+ }
+
+ dest_base = vmap_batch(dest_obj);
+ if (!dest_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+ ret = -ENOMEM;
+ goto unmap_src;
+ }
+
+ dest_addr = dest_base + offset;
+
+ if (batch_start_offset != 0)
+ memset((u8 *)dest_base, 0, batch_start_offset);
+
+ memcpy(dest_addr, src_addr, batch_len);
+ memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
+
+unmap_src:
+ vunmap(src_base);
+unpin_src:
+ i915_gem_object_unpin_pages(src_obj);
+
+ return ret ? ERR_PTR(ret) : dest_base;
+}
+
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
@@ -956,7 +1028,9 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
+ * @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
+ * @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master?
*
* Parses the specified batch buffer looking for privilege violations as
@@ -967,33 +1041,38 @@ static bool check_cmd(const struct intel_engine_cs *ring,
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
+ u32 batch_len,
bool is_master)
{
int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
- int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
- ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
+ ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
- return ret;
+ DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
+ return -1;
}
- batch_base = vmap_batch(batch_obj);
- if (!batch_base) {
- DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
- i915_gem_object_unpin_pages(batch_obj);
- return -ENOMEM;
+ batch_base = copy_batch(shadow_batch_obj, batch_obj,
+ batch_start_offset, batch_len);
+ if (IS_ERR(batch_base)) {
+ DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+ i915_gem_object_ggtt_unpin(shadow_batch_obj);
+ return PTR_ERR(batch_base);
}
- if (needs_clflush)
- drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
-
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
- batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+
+ /*
+ * We use the batch length as size because the shadow object is as
+ * large or larger and copy_batch() will write MI_NOPs to the extra
+ * space. Parsing should be faster in some cases this way.
+ */
+ batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
@@ -1053,8 +1132,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
}
vunmap(batch_base);
-
- i915_gem_object_unpin_pages(batch_obj);
+ i915_gem_object_ggtt_unpin(shadow_batch_obj);
return ret;
}
@@ -1076,6 +1154,7 @@ int i915_cmd_parser_get_version(void)
* hardware parsing enabled (so does not allow new use cases).
* 2. Allow access to the MI_PREDICATE_SRC0 and
* MI_PREDICATE_SRC1 registers.
+ * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
*/
- return 2;
+ return 3;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 779a275eb1fd..96e811fe24ca 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,9 +96,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (obj->user_pin_count > 0)
- return "P";
- else if (i915_gem_obj_is_pinned(obj))
+ if (i915_gem_obj_is_pinned(obj))
return "p";
else
return " ";
@@ -125,7 +123,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int pin_count = 0;
- seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
+ seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %x %x %x%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -133,9 +131,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_read_seqno,
- obj->last_write_seqno,
- obj->last_fenced_seqno,
+ i915_gem_request_get_seqno(obj->last_read_req),
+ i915_gem_request_get_seqno(obj->last_write_req),
+ i915_gem_request_get_seqno(obj->last_fenced_req),
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -154,8 +152,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_puts(m, " (pp");
else
seq_puts(m, " (g");
- seq_printf(m, "gtt offset: %08lx, size: %08lx)",
- vma->node.start, vma->node.size);
+ seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
+ vma->node.start, vma->node.size,
+ vma->ggtt_view.type);
}
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
@@ -168,8 +167,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
- if (obj->ring != NULL)
- seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->last_read_req != NULL)
+ seq_printf(m, " (%s)",
+ i915_gem_request_get_ring(obj->last_read_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@@ -336,7 +336,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (ppgtt->file_priv != stats->file_priv)
continue;
- if (obj->ring) /* XXX per-vma statistic */
+ if (obj->active) /* XXX per-vma statistic */
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@@ -346,7 +346,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
if (i915_gem_obj_ggtt_bound(obj)) {
stats->global += obj->base.size;
- if (obj->ring)
+ if (obj->active)
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@@ -360,6 +360,33 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
+#define print_file_stats(m, name, stats) \
+ seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+ name, \
+ stats.count, \
+ stats.total, \
+ stats.active, \
+ stats.inactive, \
+ stats.global, \
+ stats.shared, \
+ stats.unbound)
+
+static void print_batch_pool_stats(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+ struct file_stats stats;
+
+ memset(&stats, 0, sizeof(stats));
+
+ list_for_each_entry(obj,
+ &dev_priv->mm.batch_pool.cache_list,
+ batch_pool_list)
+ per_file_stats(0, obj, &stats);
+
+ print_file_stats(m, "batch pool", stats);
+}
+
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -442,6 +469,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
seq_putc(m, '\n');
+ print_batch_pool_stats(m, dev_priv);
+
+ seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -459,15 +489,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
*/
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
- task ? task->comm : "<unknown>",
- stats.count,
- stats.total,
- stats.active,
- stats.inactive,
- stats.global,
- stats.shared,
- stats.unbound);
+ print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
}
@@ -543,14 +565,16 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
- if (work->flip_queued_ring) {
- seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
- work->flip_queued_ring->name,
- work->flip_queued_seqno,
+ if (work->flip_queued_req) {
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(work->flip_queued_req);
+
+ seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
+ ring->name,
+ i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
- work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- work->flip_queued_seqno));
+ ring->get_seqno(ring, true),
+ i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
@@ -582,6 +606,36 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int count = 0;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_puts(m, "cache:\n");
+ list_for_each_entry(obj,
+ &dev_priv->mm.batch_pool.cache_list,
+ batch_pool_list) {
+ seq_puts(m, " ");
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
+ count++;
+ }
+
+ seq_printf(m, "total: %d\n", count);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -604,7 +658,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
list_for_each_entry(gem_request,
&ring->request_list,
list) {
- seq_printf(m, " %d @ %d\n",
+ seq_printf(m, " %x @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
@@ -622,7 +676,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *ring)
{
if (ring->get_seqno) {
- seq_printf(m, "Current sequence (%s): %u\n",
+ seq_printf(m, "Current sequence (%s): %x\n",
ring->name, ring->get_seqno(ring, false));
}
}
@@ -1051,7 +1105,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
if (ret)
goto out;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
@@ -1059,7 +1113,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
reqf >>= 24;
else
reqf >>= 25;
- reqf *= GT_FREQUENCY_MULTIPLIER;
+ reqf = intel_gpu_freq(dev_priv, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -1076,9 +1130,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- cagf *= GT_FREQUENCY_MULTIPLIER;
+ cagf = intel_gpu_freq(dev_priv, cagf);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (IS_GEN6(dev) || IS_GEN7(dev)) {
@@ -1124,18 +1178,18 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1145,16 +1199,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
- seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ seq_printf(m,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
seq_printf(m, "current GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+ intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
seq_puts(m, "no P-state info available\n");
@@ -1165,6 +1220,53 @@ out:
return ret;
}
+static int i915_hangcheck_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u64 acthd[I915_NUM_RINGS];
+ u32 seqno[I915_NUM_RINGS];
+ int i;
+
+ if (!i915.enable_hangcheck) {
+ seq_printf(m, "Hangcheck disabled\n");
+ return 0;
+ }
+
+ intel_runtime_pm_get(dev_priv);
+
+ for_each_ring(ring, dev_priv, i) {
+ seqno[i] = ring->get_seqno(ring, false);
+ acthd[i] = intel_ring_get_active_head(ring);
+ }
+
+ intel_runtime_pm_put(dev_priv);
+
+ if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
+ seq_printf(m, "Hangcheck active, fires in %dms\n",
+ jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
+ jiffies));
+ } else
+ seq_printf(m, "Hangcheck inactive\n");
+
+ for_each_ring(ring, dev_priv, i) {
+ seq_printf(m, "%s:\n", ring->name);
+ seq_printf(m, "\tseqno = %x [current %x]\n",
+ ring->hangcheck.seqno, seqno[i]);
+ seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
+ (long long)ring->hangcheck.acthd,
+ (long long)acthd[i]);
+ seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
+ (long long)ring->hangcheck.max_acthd);
+ seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
+ seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+ }
+
+ return 0;
+}
+
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
@@ -1234,14 +1336,31 @@ static int ironlake_drpc_info(struct seq_file *m)
return 0;
}
-static int vlv_drpc_info(struct seq_file *m)
+static int i915_forcewake_domains(struct seq_file *m, void *data)
{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore_forcewake_domain *fw_domain;
+ int i;
+ spin_lock_irq(&dev_priv->uncore.lock);
+ for_each_fw_domain(fw_domain, dev_priv, i) {
+ seq_printf(m, "%s.wake_count = %u\n",
+ intel_uncore_forcewake_domain_to_str(i),
+ fw_domain->wake_count);
+ }
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return 0;
+}
+
+static int vlv_drpc_info(struct seq_file *m)
+{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, rcctl1, pw_status;
- unsigned fw_rendercount = 0, fw_mediacount = 0;
intel_runtime_pm_get(dev_priv);
@@ -1273,22 +1392,11 @@ static int vlv_drpc_info(struct seq_file *m)
seq_printf(m, "Media RC6 residency since boot: %u\n",
I915_READ(VLV_GT_MEDIA_RC6));
- spin_lock_irq(&dev_priv->uncore.lock);
- fw_rendercount = dev_priv->uncore.fw_rendercount;
- fw_mediacount = dev_priv->uncore.fw_mediacount;
- spin_unlock_irq(&dev_priv->uncore.lock);
-
- seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
- seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
-
-
- return 0;
+ return i915_forcewake_domains(m, NULL);
}
-
static int gen6_drpc_info(struct seq_file *m)
{
-
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1302,7 +1410,7 @@ static int gen6_drpc_info(struct seq_file *m)
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
- forcewake_count = dev_priv->uncore.forcewake_count;
+ forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
spin_unlock_irq(&dev_priv->uncore.lock);
if (forcewake_count) {
@@ -1617,7 +1725,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- gpu_freq * GT_FREQUENCY_MULTIPLIER,
+ intel_gpu_freq(dev_priv, gpu_freq),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1874,7 +1982,7 @@ static int i915_execlists(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, ring_id) {
- struct intel_ctx_submit_request *head_req = NULL;
+ struct drm_i915_gem_request *head_req = NULL;
int count = 0;
unsigned long flags;
@@ -1907,7 +2015,7 @@ static int i915_execlists(struct seq_file *m, void *data)
list_for_each(cursor, &ring->execlist_queue)
count++;
head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct intel_ctx_submit_request, execlist_link);
+ struct drm_i915_gem_request, execlist_link);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
seq_printf(m, "\t%d requests in queue\n", count);
@@ -1930,30 +2038,6 @@ static int i915_execlists(struct seq_file *m, void *data)
return 0;
}
-static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
-
- spin_lock_irq(&dev_priv->uncore.lock);
- if (IS_VALLEYVIEW(dev)) {
- fw_rendercount = dev_priv->uncore.fw_rendercount;
- fw_mediacount = dev_priv->uncore.fw_mediacount;
- } else
- forcewake_count = dev_priv->uncore.forcewake_count;
- spin_unlock_irq(&dev_priv->uncore.lock);
-
- if (IS_VALLEYVIEW(dev)) {
- seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
- seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
- } else
- seq_printf(m, "forcewake count = %u\n", forcewake_count);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2155,6 +2239,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 psrperf = 0;
+ u32 stat[3];
+ enum pipe pipe;
bool enabled = false;
intel_runtime_pm_get(dev_priv);
@@ -2169,14 +2255,39 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- enabled = HAS_PSR(dev) &&
- I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
+ if (HAS_PSR(dev)) {
+ if (HAS_DDI(dev))
+ enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ else {
+ for_each_pipe(dev_priv, pipe) {
+ stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ enabled = true;
+ }
+ }
+ }
+ seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
+
+ if (!HAS_DDI(dev))
+ for_each_pipe(dev_priv, pipe) {
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ seq_printf(m, " pipe %c", pipe_name(pipe));
+ }
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Link standby: %s\n",
+ yesno((bool)dev_priv->psr.link_standby));
- if (HAS_PSR(dev))
+ /* CHV PSR has no kind of performance counter */
+ if (HAS_PSR(dev) && HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance_Counter: %u\n", psrperf);
+
+ seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ }
mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
@@ -2319,10 +2430,18 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "AUDIO";
case POWER_DOMAIN_PLLS:
return "PLLS";
+ case POWER_DOMAIN_AUX_A:
+ return "AUX_A";
+ case POWER_DOMAIN_AUX_B:
+ return "AUX_B";
+ case POWER_DOMAIN_AUX_C:
+ return "AUX_C";
+ case POWER_DOMAIN_AUX_D:
+ return "AUX_D";
case POWER_DOMAIN_INIT:
return "INIT";
default:
- WARN_ON(1);
+ MISSING_CASE(domain);
return "?";
}
}
@@ -2547,7 +2666,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
+ yesno(crtc->active), crtc->config->pipe_src_w,
+ crtc->config->pipe_src_h);
if (crtc->active) {
intel_crtc_info(m, crtc);
@@ -2718,6 +2838,9 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
enum pipe pipe;
int plane;
+ if (INTEL_INFO(dev)->gen < 9)
+ return 0;
+
drm_modeset_lock_all(dev);
ddb = &dev_priv->wm.skl_hw.ddb;
@@ -2830,7 +2953,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
char buf[PIPE_CRC_BUFFER_LEN];
- int head, tail, n_entries, n;
+ int n_entries;
ssize_t bytes_read;
/*
@@ -2862,36 +2985,39 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
}
/* We now have one or more entries to read */
- head = pipe_crc->head;
- tail = pipe_crc->tail;
- n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
- count / PIPE_CRC_LINE_LEN);
- spin_unlock_irq(&pipe_crc->lock);
+ n_entries = count / PIPE_CRC_LINE_LEN;
bytes_read = 0;
- n = 0;
- do {
- struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+ while (n_entries > 0) {
+ struct intel_pipe_crc_entry *entry =
+ &pipe_crc->entries[pipe_crc->tail];
int ret;
+ if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR) < 1)
+ break;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+ pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+
bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
"%8u %8x %8x %8x %8x %8x\n",
entry->frame, entry->crc[0],
entry->crc[1], entry->crc[2],
entry->crc[3], entry->crc[4]);
- ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
- buf, PIPE_CRC_LINE_LEN);
+ spin_unlock_irq(&pipe_crc->lock);
+
+ ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
if (ret == PIPE_CRC_LINE_LEN)
return -EFAULT;
- BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
- tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- n++;
- } while (--n_entries);
+ user_buf += PIPE_CRC_LINE_LEN;
+ n_entries--;
+
+ spin_lock_irq(&pipe_crc->lock);
+ }
- spin_lock_irq(&pipe_crc->lock);
- pipe_crc->tail = tail;
spin_unlock_irq(&pipe_crc->lock);
return bytes_read;
@@ -3072,6 +3198,12 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
need_stable_symbols = true;
break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_CHERRYVIEW(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
+ need_stable_symbols = true;
+ break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
@@ -3092,11 +3224,19 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
- if (pipe == PIPE_A)
+ switch (pipe) {
+ case PIPE_A:
tmp |= PIPE_A_SCRAMBLE_RESET;
- else
+ break;
+ case PIPE_B:
tmp |= PIPE_B_SCRAMBLE_RESET;
-
+ break;
+ case PIPE_C:
+ tmp |= PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return -EINVAL;
+ }
I915_WRITE(PORT_DFT2_G4X, tmp);
}
@@ -3185,10 +3325,19 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
- if (pipe == PIPE_A)
+ switch (pipe) {
+ case PIPE_A:
tmp &= ~PIPE_A_SCRAMBLE_RESET;
- else
+ break;
+ case PIPE_B:
tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp &= ~PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return;
+ }
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
I915_WRITE(PORT_DFT2_G4X, tmp);
@@ -3252,9 +3401,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
* relevant on hsw with pipe A when using the always-on power well
* routing.
*/
- if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
- !crtc->config.pch_pfit.enabled) {
- crtc->config.pch_pfit.force_thru = true;
+ if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
+ !crtc->config->pch_pfit.enabled) {
+ crtc->config->pch_pfit.force_thru = true;
intel_display_power_get(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
@@ -3278,8 +3427,8 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
* relevant on hsw with pipe A when using the always-on power well
* routing.
*/
- if (crtc->config.pch_pfit.force_thru) {
- crtc->config.pch_pfit.force_thru = false;
+ if (crtc->config->pch_pfit.force_thru) {
+ crtc->config->pch_pfit.force_thru = false;
dev_priv->display.crtc_disable(&crtc->base);
dev_priv->display.crtc_enable(&crtc->base);
@@ -3359,13 +3508,15 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
/* none -> real source transition */
if (source) {
+ struct intel_pipe_crc_entry *entries;
+
DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
pipe_name(pipe), pipe_crc_source_name(source));
- pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
- INTEL_PIPE_CRC_ENTRIES_NR,
- GFP_KERNEL);
- if (!pipe_crc->entries)
+ entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
+ sizeof(pipe_crc->entries[0]),
+ GFP_KERNEL);
+ if (!entries)
return -ENOMEM;
/*
@@ -3377,6 +3528,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
hsw_disable_ips(crtc);
spin_lock_irq(&pipe_crc->lock);
+ kfree(pipe_crc->entries);
+ pipe_crc->entries = entries;
pipe_crc->head = 0;
pipe_crc->tail = 0;
spin_unlock_irq(&pipe_crc->lock);
@@ -3404,6 +3557,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
pipe_crc->entries = NULL;
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
spin_unlock_irq(&pipe_crc->lock);
kfree(entries);
@@ -3826,6 +3981,17 @@ i915_wedged_set(void *data, u64 val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
+ /*
+ * There is no safeguard against this debugfs entry colliding
+ * with the hangcheck calling same i915_handle_error() in
+ * parallel, causing an explosion. For now we assume that the
+ * test harness is responsible enough not to inject gpu hangs
+ * while it is writing to 'i915_wedged'
+ */
+
+ if (i915_reset_in_progress(&dev_priv->gpu_error))
+ return -EAGAIN;
+
intel_runtime_pm_get(dev_priv);
i915_handle_error(dev, val,
@@ -4012,10 +4178,7 @@ i915_max_freq_get(void *data, u64 *val)
if (ret)
return ret;
- if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- else
- *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -4044,12 +4207,12 @@ i915_max_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go above the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq;
} else {
- do_div(val, GT_FREQUENCY_MULTIPLIER);
+ val = intel_freq_opcode(dev_priv, val);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.max_freq;
@@ -4093,10 +4256,7 @@ i915_min_freq_get(void *data, u64 *val)
if (ret)
return ret;
- if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- else
- *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -4125,12 +4285,12 @@ i915_min_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go below the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq;
} else {
- do_div(val, GT_FREQUENCY_MULTIPLIER);
+ val = intel_freq_opcode(dev_priv, val);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.max_freq;
@@ -4222,7 +4382,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_get(dev_priv);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
return 0;
}
@@ -4235,7 +4396,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -4296,7 +4458,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
+ {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
+ {"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
@@ -4308,7 +4472,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_execlists", i915_execlists, 0},
- {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+ {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ecee3bcc8772..1a46787129e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -92,6 +92,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_VEBOX:
value = intel_ring_initialized(&dev_priv->ring[VECS]);
break;
+ case I915_PARAM_HAS_BSD2:
+ value = intel_ring_initialized(&dev_priv->ring[VCS2]);
+ break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
break;
@@ -143,6 +146,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1;
break;
+ case I915_PARAM_MMAP_VERSION:
+ value = 1;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -598,6 +604,17 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->num_pipes = 0;
}
}
+
+ if (IS_CHERRYVIEW(dev)) {
+ u32 fuse, mask_eu;
+
+ fuse = I915_READ(CHV_FUSE_GT);
+ mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK |
+ CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total = 16 - hweight32(mask_eu);
+ }
}
/**
@@ -773,6 +790,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_freewq;
}
+ dev_priv->gpu_error.hangcheck_wq =
+ alloc_ordered_workqueue("i915-hangcheck", 0);
+ if (dev_priv->gpu_error.hangcheck_wq == NULL) {
+ DRM_ERROR("Failed to create our hangcheck workqueue.\n");
+ ret = -ENOMEM;
+ goto out_freedpwq;
+ }
+
intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
@@ -830,6 +855,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_runtime_pm_enable(dev_priv);
+ i915_audio_component_init(dev_priv);
+
return 0;
out_power_well:
@@ -845,6 +872,8 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+out_freedpwq:
destroy_workqueue(dev_priv->dp_wq);
out_freewq:
destroy_workqueue(dev_priv->wq);
@@ -870,6 +899,8 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ i915_audio_component_cleanup(dev_priv);
+
ret = i915_gem_suspend(dev);
if (ret) {
DRM_ERROR("failed to idle hardware: %d\n", ret);
@@ -913,8 +944,7 @@ int i915_driver_unload(struct drm_device *dev)
}
/* Free error state after interrupts are fully disabled. */
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
- cancel_work_sync(&dev_priv->gpu_error.work);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)
@@ -928,6 +958,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
@@ -938,6 +969,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq);
+ destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
i915_global_gtt_cleanup(dev);
@@ -1004,6 +1036,13 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
kfree(file_priv);
}
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -ENODEV;
+}
+
const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
@@ -1025,8 +1064,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
@@ -1055,6 +1094,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 574057cd1d09..8039cec71fc2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_HSW_ULT(dev));
- } else if (IS_BROADWELL(dev)) {
- dev_priv->pch_type = PCH_LPT;
- dev_priv->pch_id =
- INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- DRM_DEBUG_KMS("This is Broadwell, assuming "
- "LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
@@ -841,6 +835,8 @@ int i915_reset(struct drm_device *dev)
return ret;
}
+ intel_overlay_reset(dev_priv);
+
/* Ok, now get things going again... */
/*
@@ -940,8 +936,7 @@ static int i915_pm_suspend(struct device *dev)
static int i915_pm_suspend_late(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -960,8 +955,7 @@ static int i915_pm_suspend_late(struct device *dev)
static int i915_pm_resume_early(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -971,8 +965,7 @@ static int i915_pm_resume_early(struct device *dev)
static int i915_pm_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1299,7 +1292,9 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
err = vlv_allow_gt_wake(dev_priv, false);
if (err)
goto err2;
- vlv_save_gunit_s0ix_state(dev_priv);
+
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
if (err)
@@ -1330,7 +1325,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
- vlv_restore_gunit_s0ix_state(dev_priv);
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
if (!ret)
@@ -1363,8 +1359,6 @@ static int intel_runtime_suspend(struct device *device)
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
- assert_force_wake_inactive(dev_priv);
-
DRM_DEBUG_KMS("Suspending device\n");
/*
@@ -1402,7 +1396,8 @@ static int intel_runtime_suspend(struct device *device)
return ret;
}
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ intel_uncore_forcewake_reset(dev, false);
dev_priv->pm.suspended = true;
/*
@@ -1430,6 +1425,8 @@ static int intel_runtime_suspend(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D3hot);
}
+ assert_forcewakes_inactive(dev_priv);
+
DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
@@ -1640,6 +1637,14 @@ static int __init i915_init(void)
#endif
}
+ /*
+ * FIXME: Note that we're lying to the DRM core here so that we can get access
+ * to the atomic ioctl and the atomic properties. Only plane operations on
+ * a single CRTC will actually work.
+ */
+ if (i915.nuclear_pageflip)
+ driver.driver_features |= DRIVER_ATOMIC;
+
return drm_pci_init(&driver, &i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e9f891c432f8..f2a825e39646 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,10 +55,51 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20141121"
+#define DRIVER_DATE "20150130"
#undef WARN_ON
-#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
+/* Many gcc seem to no see through this and fall over :( */
+#if 0
+#define WARN_ON(x) ({ \
+ bool __i915_warn_cond = (x); \
+ if (__builtin_constant_p(__i915_warn_cond)) \
+ BUILD_BUG_ON(__i915_warn_cond); \
+ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
+#else
+#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
+#endif
+
+#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
+ (long) (x), __func__);
+
+/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+ * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+ * which may not necessarily be a user visible problem. This will either
+ * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+ * enable distros and users to tailor their preferred amount of i915 abrt
+ * spam.
+ */
+#define I915_STATE_WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) { \
+ if (i915.verbose_state_checks) \
+ WARN(1, format); \
+ else \
+ DRM_ERROR(format); \
+ } \
+ unlikely(__ret_warn_on); \
+})
+
+#define I915_STATE_WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) { \
+ if (i915.verbose_state_checks) \
+ WARN(1, "WARN_ON(" #condition ")\n"); \
+ else \
+ DRM_ERROR("WARN_ON(" #condition ")\n"); \
+ } \
+ unlikely(__ret_warn_on); \
+})
enum pipe {
INVALID_PIPE = -1,
@@ -143,6 +184,10 @@ enum intel_display_power_domain {
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_PLLS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -458,8 +503,8 @@ struct drm_i915_error_state {
struct intel_connector;
struct intel_encoder;
-struct intel_crtc_config;
-struct intel_plane_config;
+struct intel_crtc_state;
+struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
@@ -497,10 +542,11 @@ struct drm_i915_display_funcs {
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
- struct intel_crtc_config *);
- void (*get_plane_config)(struct intel_crtc *,
- struct intel_plane_config *);
- int (*crtc_compute_clock)(struct intel_crtc *crtc);
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ int (*crtc_compute_clock)(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
@@ -533,11 +579,28 @@ struct drm_i915_display_funcs {
void (*enable_backlight)(struct intel_connector *connector);
};
+enum forcewake_domain_id {
+ FW_DOMAIN_ID_RENDER = 0,
+ FW_DOMAIN_ID_BLITTER,
+ FW_DOMAIN_ID_MEDIA,
+
+ FW_DOMAIN_ID_COUNT
+};
+
+enum forcewake_domains {
+ FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_ALL = (FORCEWAKE_RENDER |
+ FORCEWAKE_BLITTER |
+ FORCEWAKE_MEDIA)
+};
+
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
- int fw_engine);
+ enum forcewake_domains domains);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
- int fw_engine);
+ enum forcewake_domains domains);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@ -560,14 +623,31 @@ struct intel_uncore {
struct intel_uncore_funcs funcs;
unsigned fifo_count;
- unsigned forcewake_count;
-
- unsigned fw_rendercount;
- unsigned fw_mediacount;
- unsigned fw_blittercount;
-
- struct timer_list force_wake_timer;
-};
+ enum forcewake_domains fw_domains;
+
+ struct intel_uncore_forcewake_domain {
+ struct drm_i915_private *i915;
+ enum forcewake_domain_id id;
+ unsigned wake_count;
+ struct timer_list timer;
+ u32 reg_set;
+ u32 val_set;
+ u32 val_clear;
+ u32 reg_ack;
+ u32 reg_post;
+ u32 val_reset;
+ } fw_domain[FW_DOMAIN_ID_COUNT];
+};
+
+/* Iterate over initialised fw domains */
+#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
+ for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
+ (i__) < FW_DOMAIN_ID_COUNT; \
+ (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
+ if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
+
+#define for_each_fw_domain(domain__, dev_priv__, i__) \
+ for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
@@ -612,6 +692,7 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
+ unsigned int eu_total;
};
#undef DEFINE_FLAG
@@ -637,6 +718,11 @@ struct i915_ctx_hang_stats {
/* Time when this context was last blamed for a GPU reset */
unsigned long guilty_ts;
+ /* If the contexts causes a second GPU hang within this time,
+ * it is permanently banned from submitting any more work.
+ */
+ unsigned long ban_period_seconds;
+
/* This context is banned to submit more work */
bool banned;
};
@@ -679,7 +765,7 @@ struct intel_context {
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
- int unpin_count;
+ int pin_count;
} engine[I915_NUM_RINGS];
struct list_head link;
@@ -730,11 +816,33 @@ struct i915_fbc {
} no_fbc_reason;
};
-struct i915_drrs {
- struct intel_connector *connector;
+/**
+ * HIGH_RR is the highest eDP panel refresh rate read from EDID
+ * LOW_RR is the lowest eDP panel refresh rate found from EDID
+ * parsing for same resolution.
+ */
+enum drrs_refresh_rate_type {
+ DRRS_HIGH_RR,
+ DRRS_LOW_RR,
+ DRRS_MAX_RR, /* RR count */
+};
+
+enum drrs_support_type {
+ DRRS_NOT_SUPPORTED = 0,
+ STATIC_DRRS_SUPPORT = 1,
+ SEAMLESS_DRRS_SUPPORT = 2
};
struct intel_dp;
+struct i915_drrs {
+ struct mutex mutex;
+ struct delayed_work work;
+ struct intel_dp *dp;
+ unsigned busy_frontbuffer_bits;
+ enum drrs_refresh_rate_type refresh_rate_type;
+ enum drrs_support_type type;
+};
+
struct i915_psr {
struct mutex lock;
bool sink_support;
@@ -743,6 +851,7 @@ struct i915_psr {
bool active;
struct delayed_work work;
unsigned busy_frontbuffer_bits;
+ bool link_standby;
};
enum intel_pch {
@@ -1130,6 +1239,11 @@ struct intel_l3_parity {
int which_slice;
};
+struct i915_gem_batch_pool {
+ struct drm_device *dev;
+ struct list_head cache_list;
+};
+
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
@@ -1143,6 +1257,13 @@ struct i915_gem_mm {
*/
struct list_head unbound_list;
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ */
+ struct i915_gem_batch_pool batch_pool;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -1224,14 +1345,13 @@ struct i915_gpu_error {
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
- struct timer_list hangcheck_timer;
+ struct workqueue_struct *hangcheck_wq;
+ struct delayed_work hangcheck_work;
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
- struct work_struct work;
-
unsigned long missed_irq_rings;
@@ -1301,10 +1421,11 @@ struct ddi_vbt_port_info {
uint8_t supports_dp:1;
};
-enum drrs_support_type {
- DRRS_NOT_SUPPORTED = 0,
- STATIC_DRRS_SUPPORT = 1,
- SEAMLESS_DRRS_SUPPORT = 2
+enum psr_lines_to_wait {
+ PSR_0_LINES_TO_WAIT = 0,
+ PSR_1_LINE_TO_WAIT,
+ PSR_4_LINES_TO_WAIT,
+ PSR_8_LINES_TO_WAIT
};
struct intel_vbt_data {
@@ -1336,6 +1457,15 @@ struct intel_vbt_data {
struct edp_power_seq edp_pps;
struct {
+ bool full_link;
+ bool require_aux_wakeup;
+ int idle_frames;
+ enum psr_lines_to_wait lines_to_wait;
+ int tp1_wakeup_time;
+ int tp2_tp3_wakeup_time;
+ } psr;
+
+ struct {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
@@ -1698,6 +1828,9 @@ struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
+ /* hda/i915 audio component */
+ bool audio_component_registered;
+
uint32_t hw_context_size;
struct list_head context_list;
@@ -1770,6 +1903,8 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
+ uint32_t request_uniq;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -1781,6 +1916,11 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return dev->dev_private;
}
+static inline struct drm_i915_private *dev_to_i915(struct device *dev)
+{
+ return to_i915(dev_get_drvdata(dev));
+}
+
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -1853,6 +1993,8 @@ struct drm_i915_gem_object {
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
+ struct list_head batch_pool_list;
+
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
@@ -1912,6 +2054,7 @@ struct drm_i915_gem_object {
*/
unsigned long gt_ro:1;
unsigned int cache_level:3;
+ unsigned int cache_dirty:1;
unsigned int has_dma_mapping:1;
@@ -1924,13 +2067,11 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
- struct intel_engine_cs *ring;
-
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_read_seqno;
- uint32_t last_write_seqno;
+ struct drm_i915_gem_request *last_read_req;
+ struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
- uint32_t last_fenced_seqno;
+ struct drm_i915_gem_request *last_fenced_req;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -1941,10 +2082,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
- /** User space pin count and filp owning the pin */
- unsigned long user_pin_count;
- struct drm_file *pin_filp;
-
union {
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
@@ -1973,11 +2110,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* The request queue allows us to note sequence numbers that have been emitted
* and may be associated with active buffers to be retired.
*
- * By keeping this list, we can avoid having to do questionable
- * sequence-number comparisons on buffer last_rendering_seqnos, and associate
- * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
*/
struct drm_i915_gem_request {
+ struct kref ref;
+
/** On Which ring this request was generated */
struct intel_engine_cs *ring;
@@ -1987,7 +2127,14 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the start of the request */
u32 head;
- /** Position in the ringbuffer of the end of the request */
+ /**
+ * Position in the ringbuffer of the start of the postfix.
+ * This is required to calculate the maximum available ringbuffer
+ * space without overwriting the postfix.
+ */
+ u32 postfix;
+
+ /** Position in the ringbuffer of the end of the whole request */
u32 tail;
/** Context related to this request */
@@ -2005,8 +2152,75 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
+
+ uint32_t uniq;
+
+ /**
+ * The ELSP only accepts two elements at a time, so we queue
+ * context/tail pairs on a given queue (ring->execlist_queue) until the
+ * hardware is available. The queue serves a double purpose: we also use
+ * it to keep track of the up to 2 contexts currently in the hardware
+ * (usually one in execution and the other queued up by the GPU): We
+ * only remove elements from the head of the queue when the hardware
+ * informs us that an element has been completed.
+ *
+ * All accesses to the queue are mediated by a spinlock
+ * (ring->execlist_lock).
+ */
+
+ /** Execlist link in the submission queue.*/
+ struct list_head execlist_link;
+
+ /** Execlists no. of times this request has been sent to the ELSP */
+ int elsp_submitted;
+
};
+void i915_gem_request_free(struct kref *req_ref);
+
+static inline uint32_t
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+ return req ? req->seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+{
+ return req ? req->ring : NULL;
+}
+
+static inline void
+i915_gem_request_reference(struct drm_i915_gem_request *req)
+{
+ kref_get(&req->ref);
+}
+
+static inline void
+i915_gem_request_unreference(struct drm_i915_gem_request *req)
+{
+ WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+ kref_put(&req->ref, i915_gem_request_free);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+ struct drm_i915_gem_request *src)
+{
+ if (src)
+ i915_gem_request_reference(src);
+
+ if (*pdst)
+ i915_gem_request_unreference(*pdst);
+
+ *pdst = src;
+}
+
+/*
+ * XXX: i915_gem_request_completed should be here but currently needs the
+ * definition of i915_seqno_passed() which is below. It will be moved in
+ * a later patch when the call to i915_seqno_passed() is obsoleted...
+ */
+
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
@@ -2159,8 +2373,7 @@ struct drm_i915_cmd_table {
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
- (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
@@ -2240,7 +2453,9 @@ struct drm_i915_cmd_table {
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
+ IS_SKYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -2310,6 +2525,8 @@ struct i915_params {
bool disable_vtd_wa;
int use_mmio_flip;
bool mmio_debug;
+ bool verbose_state_checks;
+ bool nuclear_pageflip;
};
extern struct i915_params i915 __read_mostly;
@@ -2354,6 +2571,12 @@ extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2410,10 +2633,6 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -2458,10 +2677,23 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
+int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags,
+ const struct i915_ggtt_view *view);
+static inline
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- uint64_t flags);
+ uint64_t flags)
+{
+ return i915_gem_object_pin_view(obj, vm, alignment, flags,
+ &i915_ggtt_view_normal);
+}
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2510,6 +2742,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+ bool lazy_coherency)
+{
+ u32 seqno;
+
+ BUG_ON(req == NULL);
+
+ seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+
+ return i915_seqno_passed(seqno, req->seqno);
+}
+
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
@@ -2525,7 +2769,7 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
-int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
@@ -2568,17 +2812,15 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *batch_obj,
- u32 *seqno);
-#define i915_add_request(ring, seqno) \
- __i915_add_request(ring, NULL, NULL, seqno)
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+ struct drm_i915_gem_object *batch_obj);
+#define i915_add_request(ring) \
+ __i915_add_request(ring, NULL, NULL)
+int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv);
-int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
- uint32_t seqno);
+int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -2612,18 +2854,51 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
+unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view);
+static inline
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+}
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view);
+static inline
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
+}
+
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
+struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+static inline
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+
+static inline
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
+ &i915_ggtt_view_normal);
+}
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
@@ -2720,6 +2995,10 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -2805,6 +3084,13 @@ void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
+/* i915_gem_batch_pool.c */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool);
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
+struct drm_i915_gem_object*
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
+
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
@@ -2812,7 +3098,9 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
+ u32 batch_len,
bool is_master);
/* i915_suspend.c */
@@ -2892,9 +3180,6 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
-extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
-extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
@@ -2923,20 +3208,12 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
-/* On SNB platform, before reading ring registers forcewake bit
- * must be set to prevent GT core from power down and stale values being
- * returned.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
-void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
-
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
/* intel_sideband.c */
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
-void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
+void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
@@ -2957,15 +3234,8 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
-
-#define FORCEWAKE_RENDER (1 << 0)
-#define FORCEWAKE_MEDIA (1 << 1)
-#define FORCEWAKE_BLITTER (1 << 2)
-#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
- FORCEWAKE_BLITTER)
-
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
@@ -3070,4 +3340,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
+static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req)
+{
+ if (ring->trace_irq_req == NULL && ring->irq_get(ring))
+ i915_gem_request_assign(&ring->trace_irq_req, req);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c11603b4cf1d..c26d36cc4b31 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -39,8 +39,7 @@
#include <linux/dma-buf.h>
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
- bool force);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
@@ -153,12 +152,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
return 0;
}
-static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
-{
- return i915_gem_obj_bound_any(obj) && !obj->active;
-}
-
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -1157,19 +1150,18 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
}
/*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
+ * Compare arbitrary request against outstanding lazy request. Emit on match.
*/
int
-i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+i915_gem_check_olr(struct drm_i915_gem_request *req)
{
int ret;
- BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
ret = 0;
- if (seqno == ring->outstanding_lazy_seqno)
- ret = i915_add_request(ring, NULL);
+ if (req == req->ring->outstanding_lazy_request)
+ ret = i915_add_request(req->ring);
return ret;
}
@@ -1194,10 +1186,9 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
}
/**
- * __i915_wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @reset_counter: reset sequence associated with the given seqno
+ * __i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
@@ -1208,15 +1199,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
* inserted.
*
- * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv)
{
+ struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
@@ -1228,7 +1220,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
- if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ if (i915_gem_request_completed(req, true))
return 0;
timeout_expire = timeout ?
@@ -1246,7 +1238,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged */
- trace_i915_gem_request_wait_begin(ring, seqno);
+ trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
for (;;) {
struct timer_list timer;
@@ -1265,7 +1257,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
break;
}
- if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+ if (i915_gem_request_completed(req, false)) {
ret = 0;
break;
}
@@ -1297,7 +1289,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
}
}
now = ktime_get_raw_ns();
- trace_i915_gem_request_wait_end(ring, seqno);
+ trace_i915_gem_request_wait_end(req);
if (!irq_test_in_progress)
ring->irq_put(ring);
@@ -1324,32 +1316,40 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
}
/**
- * Waits for a sequence number to be signaled, and cleans up the
+ * Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
-i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
+i915_wait_request(struct drm_i915_gem_request *req)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool interruptible = dev_priv->mm.interruptible;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ bool interruptible;
unsigned reset_counter;
int ret;
+ BUG_ON(req == NULL);
+
+ dev = req->ring->dev;
+ dev_priv = dev->dev_private;
+ interruptible = dev_priv->mm.interruptible;
+
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(seqno == 0);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
- ret = i915_gem_check_olr(ring, seqno);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
- NULL, NULL);
+ i915_gem_request_reference(req);
+ ret = __i915_wait_request(req, reset_counter,
+ interruptible, NULL, NULL);
+ i915_gem_request_unreference(req);
+ return ret;
}
static int
@@ -1361,11 +1361,11 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*
- * Note that the last_write_seqno is always the earlier of
- * the two (read/write) seqno, so if we haved successfully waited,
+ * Note that the last_write_req is always the earlier of
+ * the two (read/write) requests, so if we haved successfully waited,
* we know we have passed the last write.
*/
- obj->last_write_seqno = 0;
+ i915_gem_request_assign(&obj->last_write_req, NULL);
return 0;
}
@@ -1378,15 +1378,14 @@ static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct intel_engine_cs *ring = obj->ring;
- u32 seqno;
+ struct drm_i915_gem_request *req;
int ret;
- seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
- if (seqno == 0)
+ req = readonly ? obj->last_write_req : obj->last_read_req;
+ if (!req)
return 0;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(req);
if (ret)
return ret;
@@ -1401,33 +1400,33 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_i915_file_private *file_priv,
bool readonly)
{
+ struct drm_i915_gem_request *req;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter;
- u32 seqno;
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
- seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
- if (seqno == 0)
+ req = readonly ? obj->last_write_req : obj->last_read_req;
+ if (!req)
return 0;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
- ret = i915_gem_check_olr(ring, seqno);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
- file_priv);
+ ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(req);
if (ret)
return ret;
@@ -1481,18 +1480,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
goto unref;
- if (read_domains & I915_GEM_DOMAIN_GTT) {
+ if (read_domains & I915_GEM_DOMAIN_GTT)
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
-
- /* Silently promote "you're not bound, there was nothing to do"
- * to success, since the client was just asking us to
- * make sure everything was done.
- */
- if (ret == -EINVAL)
- ret = 0;
- } else {
+ else
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
- }
unref:
drm_gem_object_unreference(&obj->base);
@@ -1524,7 +1515,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
/* Pinned buffers may be scanout, so flush the cache */
if (obj->pin_display)
- i915_gem_object_flush_cpu_write_domain(obj, true);
+ i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(&obj->base);
unlock:
@@ -1557,6 +1548,12 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
unsigned long addr;
+ if (args->flags & ~(I915_MMAP_WC))
+ return -EINVAL;
+
+ if (args->flags & I915_MMAP_WC && !cpu_has_pat)
+ return -ENODEV;
+
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
@@ -1572,6 +1569,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
+ if (args->flags & I915_MMAP_WC) {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+
+ down_write(&mm->mmap_sem);
+ vma = find_vma(mm, addr);
+ if (vma)
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ addr = -ENOMEM;
+ up_write(&mm->mmap_sem);
+ }
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -2256,14 +2266,18 @@ static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
- u32 seqno = intel_ring_get_seqno(ring);
+ struct drm_i915_gem_request *req;
+ struct intel_engine_cs *old_ring;
BUG_ON(ring == NULL);
- if (obj->ring != ring && obj->last_write_seqno) {
- /* Keep the seqno relative to the current ring */
- obj->last_write_seqno = seqno;
+
+ req = intel_ring_get_request(ring);
+ old_ring = i915_gem_request_get_ring(obj->last_read_req);
+
+ if (old_ring != ring && obj->last_write_req) {
+ /* Keep the request relative to the current ring */
+ i915_gem_request_assign(&obj->last_write_req, req);
}
- obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
if (!obj->active) {
@@ -2273,7 +2287,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_read_seqno = seqno;
+ i915_gem_request_assign(&obj->last_read_req, req);
}
void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2286,29 +2300,25 @@ void i915_vma_move_to_active(struct i915_vma *vma,
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_address_space *vm;
struct i915_vma *vma;
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- vma = i915_gem_obj_to_vma(obj, vm);
- if (vma && !list_empty(&vma->mm_list))
- list_move_tail(&vma->mm_list, &vm->inactive_list);
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (!list_empty(&vma->mm_list))
+ list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
}
intel_fb_obj_flush(obj, true);
list_del_init(&obj->ring_list);
- obj->ring = NULL;
- obj->last_read_seqno = 0;
- obj->last_write_seqno = 0;
+ i915_gem_request_assign(&obj->last_read_req, NULL);
+ i915_gem_request_assign(&obj->last_write_req, NULL);
obj->base.write_domain = 0;
- obj->last_fenced_seqno = 0;
+ i915_gem_request_assign(&obj->last_fenced_req, NULL);
obj->active = 0;
drm_gem_object_unreference(&obj->base);
@@ -2319,13 +2329,10 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
- struct intel_engine_cs *ring = obj->ring;
-
- if (ring == NULL)
+ if (obj->last_read_req == NULL)
return;
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- obj->last_read_seqno))
+ if (i915_gem_request_completed(obj->last_read_req, true))
i915_gem_object_move_to_inactive(obj);
}
@@ -2401,22 +2408,20 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *obj,
- u32 *out_seqno)
+ struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
- u32 request_ring_position, request_start;
+ u32 request_start;
int ret;
- request = ring->preallocated_lazy_request;
+ request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL))
return -ENOMEM;
if (i915.enable_execlists) {
- struct intel_context *ctx = request->ctx;
- ringbuf = ctx->engine[ring->id].ringbuf;
+ ringbuf = request->ctx->engine[ring->id].ringbuf;
} else
ringbuf = ring->buffer;
@@ -2429,7 +2434,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* what.
*/
if (i915.enable_execlists) {
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
if (ret)
return ret;
} else {
@@ -2443,10 +2448,10 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ringbuf);
+ request->postfix = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists) {
- ret = ring->emit_request(ringbuf);
+ ret = ring->emit_request(ringbuf, request);
if (ret)
return ret;
} else {
@@ -2455,10 +2460,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
return ret;
}
- request->seqno = intel_ring_get_seqno(ring);
- request->ring = ring;
request->head = request_start;
- request->tail = request_ring_position;
+ request->tail = intel_ring_get_tail(ringbuf);
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
@@ -2491,9 +2494,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
spin_unlock(&file_priv->mm.lock);
}
- trace_i915_gem_request_add(ring, request->seqno);
- ring->outstanding_lazy_seqno = 0;
- ring->preallocated_lazy_request = NULL;
+ trace_i915_gem_request_add(request);
+ ring->outstanding_lazy_request = NULL;
i915_queue_hangcheck(ring->dev);
@@ -2503,8 +2505,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
- if (out_seqno)
- *out_seqno = request->seqno;
return 0;
}
@@ -2532,7 +2532,8 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
if (ctx->hang_stats.banned)
return true;
- if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+ if (ctx->hang_stats.ban_period_seconds &&
+ elapsed <= ctx->hang_stats.ban_period_seconds) {
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
@@ -2568,33 +2569,39 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
- struct intel_context *ctx = request->ctx;
-
list_del(&request->list);
i915_gem_request_remove_from_client(request);
+ i915_gem_request_unreference(request);
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+ struct drm_i915_gem_request *req = container_of(req_ref,
+ typeof(*req), ref);
+ struct intel_context *ctx = req->ctx;
+
if (ctx) {
if (i915.enable_execlists) {
- struct intel_engine_cs *ring = request->ring;
+ struct intel_engine_cs *ring = req->ring;
if (ctx != ring->default_context)
intel_lr_context_unpin(ring, ctx);
}
+
i915_gem_context_unreference(ctx);
}
- kfree(request);
+
+ kfree(req);
}
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
- u32 completed_seqno;
-
- completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
- if (i915_seqno_passed(completed_seqno, request->seqno))
+ if (i915_gem_request_completed(request, false))
continue;
return request;
@@ -2641,13 +2648,17 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* pinned in place.
*/
while (!list_empty(&ring->execlist_queue)) {
- struct intel_ctx_submit_request *submit_req;
+ struct drm_i915_gem_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
+
+ if (submit_req->ctx != ring->default_context)
+ intel_lr_context_unpin(ring, submit_req->ctx);
+
i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
}
@@ -2669,10 +2680,8 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
- /* These may not have been flush before the reset, do so now */
- kfree(ring->preallocated_lazy_request);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ /* This may not have been flushed before the reset, so clean it now */
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
}
void i915_gem_restore_fences(struct drm_device *dev)
@@ -2724,15 +2733,11 @@ void i915_gem_reset(struct drm_device *dev)
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- uint32_t seqno;
-
if (list_empty(&ring->request_list))
return;
WARN_ON(i915_verify_lists(ring->dev));
- seqno = ring->get_seqno(ring, true);
-
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
@@ -2744,7 +2749,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+ if (!i915_gem_request_completed(obj->last_read_req, true))
break;
i915_gem_object_move_to_inactive(obj);
@@ -2759,10 +2764,10 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_request,
list);
- if (!i915_seqno_passed(seqno, request->seqno))
+ if (!i915_gem_request_completed(request, true))
break;
- trace_i915_gem_request_retire(ring, request->seqno);
+ trace_i915_gem_request_retire(request);
/* This is one of the few common intersection points
* between legacy ringbuffer submission and execlists:
@@ -2780,15 +2785,15 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* of tail of the request to update the last known position
* of the GPU head.
*/
- ringbuf->last_retired_head = request->tail;
+ ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request);
}
- if (unlikely(ring->trace_irq_seqno &&
- i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+ if (unlikely(ring->trace_irq_req &&
+ i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);
- ring->trace_irq_seqno = 0;
+ i915_gem_request_assign(&ring->trace_irq_req, NULL);
}
WARN_ON(i915_verify_lists(ring->dev));
@@ -2860,14 +2865,17 @@ i915_gem_idle_work_handler(struct work_struct *work)
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
+ struct intel_engine_cs *ring;
int ret;
if (obj->active) {
- ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+
+ ret = i915_gem_check_olr(obj->last_read_req);
if (ret)
return ret;
- i915_gem_retire_requests_ring(obj->ring);
+ i915_gem_retire_requests_ring(ring);
}
return 0;
@@ -2901,9 +2909,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring = NULL;
+ struct drm_i915_gem_request *req;
unsigned reset_counter;
- u32 seqno = 0;
int ret = 0;
if (args->flags != 0)
@@ -2924,13 +2931,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret)
goto out;
- if (obj->active) {
- seqno = obj->last_read_seqno;
- ring = obj->ring;
- }
+ if (!obj->active || !obj->last_read_req)
+ goto out;
- if (seqno == 0)
- goto out;
+ req = obj->last_read_req;
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2942,10 +2946,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- return __i915_wait_seqno(ring, seqno, reset_counter, true,
- &args->timeout_ns, file->driver_priv);
+ ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+ file->driver_priv);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(req);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
out:
drm_gem_object_unreference(&obj->base);
@@ -2969,10 +2978,12 @@ int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to)
{
- struct intel_engine_cs *from = obj->ring;
+ struct intel_engine_cs *from;
u32 seqno;
int ret, idx;
+ from = i915_gem_request_get_ring(obj->last_read_req);
+
if (from == NULL || to == from)
return 0;
@@ -2981,24 +2992,25 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_read_seqno;
+ seqno = i915_gem_request_get_seqno(obj->last_read_req);
/* Optimization: Avoid semaphore sync when we are sure we already
* waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
- ret = i915_gem_check_olr(obj->ring, seqno);
+ ret = i915_gem_check_olr(obj->last_read_req);
if (ret)
return ret;
- trace_i915_gem_ring_sync_to(from, to, seqno);
+ trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
ret = to->semaphore.sync_to(to, from, seqno);
if (!ret)
- /* We use last_read_seqno because sync_to()
+ /* We use last_read_req because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
- from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
+ from->semaphore.sync_seqno[idx] =
+ i915_gem_request_get_seqno(obj->last_read_req);
return ret;
}
@@ -3054,10 +3066,8 @@ int i915_vma_unbind(struct i915_vma *vma)
* cause memory corruption through use-after-free.
*/
- /* Throw away the active reference before moving to the unbound list */
- i915_gem_object_retire(obj);
-
- if (i915_is_ggtt(vma->vm)) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */
@@ -3071,8 +3081,15 @@ int i915_vma_unbind(struct i915_vma *vma)
vma->unbind_vma(vma);
list_del_init(&vma->mm_list);
- if (i915_is_ggtt(vma->vm))
- obj->map_and_fenceable = false;
+ if (i915_is_ggtt(vma->vm)) {
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+ obj->map_and_fenceable = false;
+ } else if (vma->ggtt_view.pages) {
+ sg_free_table(vma->ggtt_view.pages);
+ kfree(vma->ggtt_view.pages);
+ vma->ggtt_view.pages = NULL;
+ }
+ }
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
@@ -3080,6 +3097,10 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (list_empty(&obj->vma_list)) {
+ /* Throw away the active reference before
+ * moving to the unbound list. */
+ i915_gem_object_retire(obj);
+
i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
}
@@ -3148,6 +3169,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
+ /* Adjust fence size to match tiled area */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ uint32_t row_size = obj->stride *
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+ size = (size / row_size) * row_size;
+ }
+
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -3263,17 +3291,12 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
obj->stride, obj->tiling_mode);
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
- case 5:
- case 4: i965_write_fence_reg(dev, reg, obj); break;
- case 3: i915_write_fence_reg(dev, reg, obj); break;
- case 2: i830_write_fence_reg(dev, reg, obj); break;
- default: BUG();
- }
+ if (IS_GEN2(dev))
+ i830_write_fence_reg(dev, reg, obj);
+ else if (IS_GEN3(dev))
+ i915_write_fence_reg(dev, reg, obj);
+ else if (INTEL_INFO(dev)->gen >= 4)
+ i965_write_fence_reg(dev, reg, obj);
/* And similarly be paranoid that no direct access to this region
* is reordered to before the fence is installed.
@@ -3312,12 +3335,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
- if (obj->last_fenced_seqno) {
- int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ if (obj->last_fenced_req) {
+ int ret = i915_wait_request(obj->last_fenced_req);
if (ret)
return ret;
- obj->last_fenced_seqno = 0;
+ i915_gem_request_assign(&obj->last_fenced_req, NULL);
}
return 0;
@@ -3490,7 +3513,8 @@ static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
- uint64_t flags)
+ uint64_t flags,
+ const struct i915_ggtt_view *view)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3540,7 +3564,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
if (IS_ERR(vma))
goto err_unpin;
@@ -3570,15 +3594,19 @@ search_free:
if (ret)
goto err_remove_node;
+ trace_i915_vma_bind(vma, flags);
+ ret = i915_vma_bind(vma, obj->cache_level,
+ flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+ if (ret)
+ goto err_finish_gtt;
+
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
- trace_i915_vma_bind(vma, flags);
- vma->bind_vma(vma, obj->cache_level,
- flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
-
return vma;
+err_finish_gtt:
+ i915_gem_gtt_finish_object(obj);
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
@@ -3615,11 +3643,14 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
- if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
+ obj->cache_dirty = true;
return false;
+ }
trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->pages);
+ obj->cache_dirty = false;
return true;
}
@@ -3655,15 +3686,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
- bool force)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- if (i915_gem_clflush_object(obj, force))
+ if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
@@ -3685,15 +3715,10 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
uint32_t old_write_domain, old_read_domains;
+ struct i915_vma *vma;
int ret;
- /* Not valid to be called on unbound objects. */
- if (vma == NULL)
- return -EINVAL;
-
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
@@ -3702,7 +3727,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return ret;
i915_gem_object_retire(obj);
- i915_gem_object_flush_cpu_write_domain(obj, false);
+
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -3733,9 +3771,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_write_domain);
/* And bump the LRU for this access */
- if (i915_gem_object_is_inactive(obj))
+ vma = i915_gem_obj_to_ggtt(obj);
+ if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->mm_list,
- &dev_priv->gtt.base.inactive_list);
+ &to_i915(obj->base.dev)->gtt.base.inactive_list);
return 0;
}
@@ -3781,36 +3820,23 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- vma->bind_vma(vma, cache_level,
- vma->bound & GLOBAL_BIND);
+ if (drm_mm_node_allocated(&vma->node)) {
+ ret = i915_vma_bind(vma, cache_level,
+ vma->bound & GLOBAL_BIND);
+ if (ret)
+ return ret;
+ }
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
- if (cpu_write_needs_clflush(obj)) {
- u32 old_read_domains, old_write_domain;
-
- /* If we're coming from LLC cached, then we haven't
- * actually been tracking whether the data is in the
- * CPU cache or not, since we only allow one bit set
- * in obj->write_domain and have been skipping the clflushes.
- * Just set it to the CPU cache for now.
- */
- i915_gem_object_retire(obj);
- WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
-
- old_read_domains = obj->base.read_domains;
- old_write_domain = obj->base.write_domain;
-
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
+ if (obj->cache_dirty &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+ cpu_write_needs_clflush(obj)) {
+ if (i915_gem_clflush_object(obj, true))
+ i915_gem_chipset_flush(obj->base.dev);
}
return 0;
@@ -3902,18 +3928,14 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
if (!vma)
return false;
- /* There are 3 sources that pin objects:
+ /* There are 2 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
- * 3. The user.
*
* We can ignore reservations as we hold the struct_mutex and
- * are only called outside of the reservation path. The user
- * can only increment pin_count once, and so if after
- * subtracting the potential reference by the user, any pin_count
- * remains, it must be due to another use by the display engine.
+ * are only called outside of the reservation path.
*/
- return vma->pin_count - !!obj->user_pin_count;
+ return vma->pin_count;
}
/*
@@ -3930,7 +3952,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
bool was_pin_display;
int ret;
- if (pipelined != obj->ring) {
+ if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
return ret;
@@ -3964,7 +3986,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (ret)
goto err_unpin_display;
- i915_gem_object_flush_cpu_write_domain(obj, true);
+ i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -4082,10 +4104,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
- struct drm_i915_gem_request *request;
- struct intel_engine_cs *ring = NULL;
+ struct drm_i915_gem_request *request, *target = NULL;
unsigned reset_counter;
- u32 seqno = 0;
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -4101,19 +4121,24 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ring = request->ring;
- seqno = request->seqno;
+ target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ if (target)
+ i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
- if (seqno == 0)
+ if (target == NULL)
return 0;
- ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(target);
+ mutex_unlock(&dev->struct_mutex);
+
return ret;
}
@@ -4137,10 +4162,11 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
}
int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags)
+i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags,
+ const struct i915_ggtt_view *view)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
@@ -4156,7 +4182,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
- vma = i915_gem_obj_to_vma(obj, vm);
+ vma = i915_gem_obj_to_vma_view(obj, vm, view);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
@@ -4166,7 +4192,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset(obj, vm), alignment,
+ i915_gem_obj_offset_view(obj, vm, view->type),
+ alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
@@ -4179,13 +4206,17 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+ vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
+ flags, view);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
- if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
+ ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+ if (ret)
+ return ret;
+ }
if ((bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
@@ -4257,102 +4288,6 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
int
-i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_pin *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
-
- if (obj->madv != I915_MADV_WILLNEED) {
- DRM_DEBUG("Attempting to pin a purgeable buffer\n");
- ret = -EFAULT;
- goto out;
- }
-
- if (obj->pin_filp != NULL && obj->pin_filp != file) {
- DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- ret = -EINVAL;
- goto out;
- }
-
- if (obj->user_pin_count == ULONG_MAX) {
- ret = -EBUSY;
- goto out;
- }
-
- if (obj->user_pin_count == 0) {
- ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
- if (ret)
- goto out;
- }
-
- obj->user_pin_count++;
- obj->pin_filp = file;
-
- args->offset = i915_gem_obj_ggtt_offset(obj);
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-int
-i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_pin *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
-
- if (obj->pin_filp != file) {
- DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- ret = -EINVAL;
- goto out;
- }
- obj->user_pin_count--;
- if (obj->user_pin_count == 0) {
- obj->pin_filp = NULL;
- i915_gem_object_ggtt_unpin(obj);
- }
-
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -4378,9 +4313,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
- if (obj->ring) {
+ if (obj->last_read_req) {
+ struct intel_engine_cs *ring;
BUILD_BUG_ON(I915_NUM_RINGS > 16);
- args->busy |= intel_ring_flag(obj->ring) << 16;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+ args->busy |= intel_ring_flag(ring) << 16;
}
drm_gem_object_unreference(&obj->base);
@@ -4460,6 +4397,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
+ INIT_LIST_HEAD(&obj->batch_pool_list);
obj->ops = ops;
@@ -4615,12 +4553,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm)
+ if (vma->vm == vm && vma->ggtt_view.type == view->type)
return vma;
return NULL;
@@ -4676,10 +4615,15 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex);
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
flush_delayed_work(&dev_priv->mm.idle_work);
+ /* Assert that we sucessfully flushed all the work and
+ * reset the GPU back to its idle, low power state.
+ */
+ WARN_ON(dev_priv->mm.busy);
+
return 0;
err:
@@ -4791,14 +4735,6 @@ int i915_gem_init_rings(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- /*
- * At least 830 can leave some of the unused rings
- * "active" (ie. head != tail) after resume which
- * will prevent c3 entry. Makes sure all unused rings
- * are totally idle.
- */
- init_unused_rings(dev);
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -4851,6 +4787,7 @@ int
i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
int ret, i;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@@ -4877,32 +4814,35 @@ i915_gem_init_hw(struct drm_device *dev)
i915_gem_init_swizzling(dev);
- ret = dev_priv->gt.init_rings(dev);
- if (ret)
- return ret;
+ /*
+ * At least 830 can leave some of the unused rings
+ * "active" (ie. head != tail) after resume which
+ * will prevent c3 entry. Makes sure all unused rings
+ * are totally idle.
+ */
+ init_unused_rings(dev);
+
+ for_each_ring(ring, dev_priv, i) {
+ ret = ring->init_hw(ring);
+ if (ret)
+ return ret;
+ }
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
- /*
- * XXX: Contexts should only be initialized once. Doing a switch to the
- * default context switch however is something we'd like to do after
- * reset or thaw (the latter may not actually be necessary for HW, but
- * goes with our code better). Context switching requires rings (for
- * the do_switch), but before enabling PPGTT. So don't move this.
- */
- ret = i915_gem_context_enable(dev_priv);
+ ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
- DRM_ERROR("Context enable failed %d\n", ret);
+ DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
-
- return ret;
}
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable failed %d\n", ret);
+ DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
+
+ return ret;
}
return ret;
@@ -4939,18 +4879,18 @@ int i915_gem_init(struct drm_device *dev)
}
ret = i915_gem_init_userptr(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
i915_gem_init_global_gtt(dev);
ret = i915_gem_context_init(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
+
+ ret = dev_priv->gt.init_rings(dev);
+ if (ret)
+ goto out_unlock;
ret = i915_gem_init_hw(dev);
if (ret == -EIO) {
@@ -4962,6 +4902,8 @@ int i915_gem_init(struct drm_device *dev)
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0;
}
+
+out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -5062,6 +5004,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
+ i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
+
mutex_init(&dev_priv->fb_tracking.lock);
}
@@ -5222,8 +5166,9 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
}
/* All the new VM stuff */
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
@@ -5231,7 +5176,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (vma->vm == vm)
+ if (vma->vm == vm && vma->ggtt_view.type == view)
return vma->node.start;
}
@@ -5240,13 +5185,16 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
return -1;
}
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view)
{
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
- if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ if (vma->vm == vm &&
+ vma->ggtt_view.type == view &&
+ drm_mm_node_allocated(&vma->node))
return true;
return false;
@@ -5378,11 +5326,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
- vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
- if (vma->vm != i915_obj_to_ggtt(obj))
- return NULL;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+ return vma;
- return vma;
+ return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
new file mode 100644
index 000000000000..c690170a1c4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: batch pool
+ *
+ * In order to submit batch buffers as 'secure', the software command parser
+ * must ensure that a batch buffer cannot be modified after parsing. It does
+ * this by copying the user provided batch buffer contents to a kernel owned
+ * buffer from which the hardware will actually execute, and by carefully
+ * managing the address space bindings for such buffers.
+ *
+ * The batch pool framework provides a mechanism for the driver to manage a
+ * set of scratch buffers to use for this purpose. The framework can be
+ * extended to support other uses cases should they arise.
+ */
+
+/**
+ * i915_gem_batch_pool_init() - initialize a batch buffer pool
+ * @dev: the drm device
+ * @pool: the batch buffer pool
+ */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool)
+{
+ pool->dev = dev;
+ INIT_LIST_HEAD(&pool->cache_list);
+}
+
+/**
+ * i915_gem_batch_pool_fini() - clean up a batch buffer pool
+ * @pool: the pool to clean up
+ *
+ * Note: Callers must hold the struct_mutex.
+ */
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
+{
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+ while (!list_empty(&pool->cache_list)) {
+ struct drm_i915_gem_object *obj =
+ list_first_entry(&pool->cache_list,
+ struct drm_i915_gem_object,
+ batch_pool_list);
+
+ WARN_ON(obj->active);
+
+ list_del_init(&obj->batch_pool_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+}
+
+/**
+ * i915_gem_batch_pool_get() - select a buffer from the pool
+ * @pool: the batch buffer pool
+ * @size: the minimum desired size of the returned buffer
+ *
+ * Finds or allocates a batch buffer in the pool with at least the requested
+ * size. The caller is responsible for any domain, active/inactive, or
+ * purgeability management for the returned buffer.
+ *
+ * Note: Callers must hold the struct_mutex
+ *
+ * Return: the selected batch buffer object
+ */
+struct drm_i915_gem_object *
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
+ size_t size)
+{
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_i915_gem_object *tmp, *next;
+
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+ list_for_each_entry_safe(tmp, next,
+ &pool->cache_list, batch_pool_list) {
+
+ if (tmp->active)
+ continue;
+
+ /* While we're looping, do some clean up */
+ if (tmp->madv == __I915_MADV_PURGED) {
+ list_del(&tmp->batch_pool_list);
+ drm_gem_object_unreference(&tmp->base);
+ continue;
+ }
+
+ /*
+ * Select a buffer that is at least as big as needed
+ * but not 'too much' bigger. A better way to do this
+ * might be to bucket the pool objects based on size.
+ */
+ if (tmp->base.size >= size &&
+ tmp->base.size <= (2 * size)) {
+ obj = tmp;
+ break;
+ }
+ }
+
+ if (!obj) {
+ obj = i915_gem_alloc_object(pool->dev, size);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ list_add_tail(&obj->batch_pool_list, &pool->cache_list);
+ }
+ else
+ /* Keep list in LRU order */
+ list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+
+ obj->madv = I915_MADV_WILLNEED;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d011ec82ef1e..8603bf48d3ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -222,6 +222,8 @@ __create_hw_context(struct drm_device *dev,
* is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+ ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+
return ctx;
err_out:
@@ -408,14 +410,25 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
BUG_ON(!dev_priv->ring[RCS].default_context);
- if (i915.enable_execlists)
- return 0;
+ if (i915.enable_execlists) {
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->init_context) {
+ ret = ring->init_context(ring,
+ ring->default_context);
+ if (ret) {
+ DRM_ERROR("ring init context: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
- for_each_ring(ring, dev_priv, i) {
- ret = i915_switch_context(ring, ring->default_context);
- if (ret)
- return ret;
- }
+ } else
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_switch_context(ring, ring->default_context);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -611,9 +624,14 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
- if (!(vma->bound & GLOBAL_BIND))
- vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
- GLOBAL_BIND);
+ if (!(vma->bound & GLOBAL_BIND)) {
+ ret = i915_vma_bind(vma,
+ to->legacy_hw_ctx.rcs_state->cache_level,
+ GLOBAL_BIND);
+ /* This shouldn't ever fail. */
+ if (WARN_ONCE(ret, "GGTT context bind failed!"))
+ goto unpin_out;
+ }
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
@@ -651,7 +669,8 @@ static int do_switch(struct intel_engine_cs *ring,
* swapped, but there is no way to do that yet.
*/
from->legacy_hw_ctx.rcs_state->dirty = 1;
- BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
+ BUG_ON(i915_gem_request_get_ring(
+ from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
@@ -671,10 +690,6 @@ done:
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
-
- ret = i915_gem_render_state_init(ring);
- if (ret)
- DRM_ERROR("init render state: %d\n", ret);
}
return 0;
@@ -779,3 +794,72 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
return 0;
}
+
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct intel_context *ctx;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+
+ args->size = 0;
+ switch (args->param) {
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
+ args->value = ctx->hang_stats.ban_period_seconds;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct intel_context *ctx;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+
+ switch (args->param) {
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
+ if (args->size)
+ ret = -EINVAL;
+ else if (args->value < ctx->hang_stats.ban_period_seconds &&
+ !capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ctx->hang_stats.ban_period_seconds = args->value;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 886ff2ee7a28..e3a49d94da3a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -50,11 +50,12 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @dev: drm_device
* @vm: address space to evict from
- * @size: size of the desired free space
+ * @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
* @cache_level: cache_level for the desired space
- * @mappable: whether the free space must be mappable
- * @nonblocking: whether evicting active objects is allowed or not
+ * @start: start (inclusive) of the range from which to evict objects
+ * @end: end (exclusive) of the range from which to evict objects
+ * @flags: additional flags to control the eviction algorithm
*
* This function will try to evict vmas until a free space satisfying the
* requirements is found. Callers must check first whether any such hole exists
@@ -196,7 +197,6 @@ found:
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
- *
* @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
@@ -214,6 +214,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
+ WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
trace_i915_gem_evict_vm(vm);
if (do_idle) {
@@ -222,6 +223,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
return ret;
i915_gem_retire_requests(vm->dev);
+
+ WARN_ON(!list_empty(&vm->active_list));
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 11738316394a..b773368fc62c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -37,6 +37,7 @@
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+#define __EXEC_OBJECT_PURGEABLE (1<<27)
#define BATCH_OFFSET_BIAS (256*1024)
@@ -223,7 +224,12 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
vma->pin_count--;
- entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+ if (entry->flags & __EXEC_OBJECT_PURGEABLE)
+ obj->madv = I915_MADV_DONTNEED;
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
+ __EXEC_OBJECT_HAS_PIN |
+ __EXEC_OBJECT_PURGEABLE);
}
static void eb_destroy(struct eb_vmas *eb)
@@ -357,9 +363,12 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !(target_vma->bound & GLOBAL_BIND)))
- target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
- GLOBAL_BIND);
+ !(target_vma->bound & GLOBAL_BIND))) {
+ ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
+ if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
+ return ret;
+ }
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@ -943,7 +952,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring)
{
- u32 seqno = intel_ring_get_seqno(ring);
+ struct drm_i915_gem_request *req = intel_ring_get_request(ring);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@@ -960,7 +969,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = seqno;
+ i915_gem_request_assign(&obj->last_write_req, req);
intel_fb_obj_invalidate(obj, ring);
@@ -968,7 +977,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- obj->last_fenced_seqno = seqno;
+ i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
@@ -990,7 +999,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- (void)__i915_add_request(ring, file, obj, NULL);
+ (void)__i915_add_request(ring, file, obj);
}
static int
@@ -1060,6 +1069,67 @@ i915_emit_box(struct intel_engine_cs *ring,
return 0;
}
+static struct drm_i915_gem_object*
+i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+ struct drm_i915_gem_exec_object2 *shadow_exec_entry,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ u32 batch_len,
+ bool is_master,
+ u32 *flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
+ struct drm_i915_gem_object *shadow_batch_obj;
+ bool need_reloc = false;
+ int ret;
+
+ shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
+ batch_obj->base.size);
+ if (IS_ERR(shadow_batch_obj))
+ return shadow_batch_obj;
+
+ ret = i915_parse_cmds(ring,
+ batch_obj,
+ shadow_batch_obj,
+ batch_start_offset,
+ batch_len,
+ is_master);
+ if (ret) {
+ if (ret == -EACCES)
+ return batch_obj;
+ } else {
+ struct i915_vma *vma;
+
+ memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+
+ vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+ vma->exec_entry = shadow_exec_entry;
+ vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
+ drm_gem_object_reference(&shadow_batch_obj->base);
+ i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
+ list_add_tail(&vma->exec_list, &eb->vmas);
+
+ shadow_batch_obj->base.pending_read_domains =
+ batch_obj->base.pending_read_domains;
+
+ /*
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically
+ * don't want that set when the command parser is
+ * enabled.
+ *
+ * FIXME: with aliasing ppgtt, buffers that should only
+ * be in ggtt still end up in the aliasing ppgtt. remove
+ * this check when that is fixed.
+ */
+ if (USES_FULL_PPGTT(dev))
+ *flags |= I915_DISPATCH_SECURE;
+ }
+
+ return ret ? ERR_PTR(ret) : shadow_batch_obj;
+}
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
@@ -1208,7 +1278,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
return ret;
}
- trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1277,6 +1347,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
+ struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
@@ -1309,13 +1380,35 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
+ ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+ DRM_DEBUG("execbuf with non bsd ring but with invalid "
+ "bsd dispatch flags: %d\n", (int)(args->flags));
+ return -EINVAL;
+ }
+
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
- ring_id = gen8_dispatch_bsd_ring(dev, file);
- ring = &dev_priv->ring[ring_id];
+
+ switch (args->flags & I915_EXEC_BSD_MASK) {
+ case I915_EXEC_BSD_DEFAULT:
+ ring_id = gen8_dispatch_bsd_ring(dev, file);
+ ring = &dev_priv->ring[ring_id];
+ break;
+ case I915_EXEC_BSD_RING1:
+ ring = &dev_priv->ring[VCS];
+ break;
+ case I915_EXEC_BSD_RING2:
+ ring = &dev_priv->ring[VCS2];
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
+ (int)(args->flags & I915_EXEC_BSD_MASK));
+ return -EINVAL;
+ }
} else
ring = &dev_priv->ring[VCS];
} else
@@ -1393,28 +1486,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = -EINVAL;
goto err;
}
- batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
if (i915_needs_cmd_parser(ring)) {
- ret = i915_parse_cmds(ring,
- batch_obj,
- args->batch_start_offset,
- file->is_master);
- if (ret) {
- if (ret != -EACCES)
- goto err;
- } else {
- /*
- * XXX: Actually do this when enabling batch copy...
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
- * from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically don't
- * want that set when the command parser is enabled.
- */
+ batch_obj = i915_gem_execbuffer_parse(ring,
+ &shadow_exec_entry,
+ eb,
+ batch_obj,
+ args->batch_start_offset,
+ args->batch_len,
+ file->is_master,
+ &flags);
+ if (IS_ERR(batch_obj)) {
+ ret = PTR_ERR(batch_obj);
+ goto err;
}
}
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 171f6eafdeee..746f77fb57a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,68 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/**
+ * DOC: Global GTT views
+ *
+ * Background and previous state
+ *
+ * Historically objects could exists (be bound) in global GTT space only as
+ * singular instances with a view representing all of the object's backing pages
+ * in a linear fashion. This view will be called a normal view.
+ *
+ * To support multiple views of the same object, where the number of mapped
+ * pages is not equal to the backing store, or where the layout of the pages
+ * is not linear, concept of a GGTT view was added.
+ *
+ * One example of an alternative view is a stereo display driven by a single
+ * image. In this case we would have a framebuffer looking like this
+ * (2x2 pages):
+ *
+ * 12
+ * 34
+ *
+ * Above would represent a normal GGTT view as normally mapped for GPU or CPU
+ * rendering. In contrast, fed to the display engine would be an alternative
+ * view which could look something like this:
+ *
+ * 1212
+ * 3434
+ *
+ * In this example both the size and layout of pages in the alternative view is
+ * different from the normal view.
+ *
+ * Implementation and usage
+ *
+ * GGTT views are implemented using VMAs and are distinguished via enum
+ * i915_ggtt_view_type and struct i915_ggtt_view.
+ *
+ * A new flavour of core GEM functions which work with GGTT bound objects were
+ * added with the _view suffix. They take the struct i915_ggtt_view parameter
+ * encapsulating all metadata required to implement a view.
+ *
+ * As a helper for callers which are only interested in the normal view,
+ * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * GEM API functions, the ones not taking the view parameter, are operating on,
+ * or with the normal GGTT view.
+ *
+ * Code wanting to add or use a new GGTT view needs to:
+ *
+ * 1. Add a new enum with a suitable name.
+ * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 3. Add support to i915_get_vma_pages().
+ *
+ * New views are required to build a scatter-gather table from within the
+ * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * exists for the lifetime of an VMA.
+ *
+ * Core API is designed to have copy semantics which means that passed in
+ * struct i915_ggtt_view does not need to be persistent (left around after
+ * calling the core API functions).
+ *
+ */
+
+const struct i915_ggtt_view i915_ggtt_view_normal;
+
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -40,8 +102,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
- if (IS_GEN8(dev))
- has_full_ppgtt = false; /* XXX why? */
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
@@ -72,7 +132,10 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return 0;
}
- return has_aliasing_ppgtt ? 1 : 0;
+ if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+ return 2;
+ else
+ return has_aliasing_ppgtt ? 1 : 0;
}
@@ -132,7 +195,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(level);
}
return pte;
@@ -156,7 +219,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(level);
}
return pte;
@@ -1102,10 +1165,8 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
- else if (IS_GEN8(dev) || IS_GEN9(dev))
- return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
- BUG();
+ return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
@@ -1146,7 +1207,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev);
else
- WARN_ON(1);
+ MISSING_CASE(INTEL_INFO(dev)->gen);
if (ppgtt) {
for_each_ring(ring, dev_priv, i) {
@@ -1341,9 +1402,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* The bind_vma code tries to be smart about tracking mappings.
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
+ *
+ * Bind is not expected to fail since this is only called on
+ * resume and assumption is all requirements exist already.
*/
vma->bound &= ~GLOBAL_BIND;
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
}
@@ -1538,7 +1602,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
BUG_ON(!i915_is_ggtt(vma->vm));
- intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+ intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
vma->bound = GLOBAL_BIND;
}
@@ -1588,7 +1652,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, obj->pages,
+ vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
@@ -1600,7 +1664,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
- vma->obj->pages,
+ vma->ggtt_view.pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
@@ -2165,7 +2229,8 @@ int i915_gem_gtt_init(struct drm_device *dev)
}
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
@@ -2176,12 +2241,9 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
+ vma->ggtt_view = *view;
- switch (INTEL_INFO(vm->dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
+ if (INTEL_INFO(vm->dev)->gen >= 6) {
if (i915_is_ggtt(vm)) {
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
@@ -2189,39 +2251,73 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->unbind_vma = ppgtt_unbind_vma;
vma->bind_vma = ppgtt_bind_vma;
}
- break;
- case 5:
- case 4:
- case 3:
- case 2:
+ } else {
BUG_ON(!i915_is_ggtt(vm));
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
- break;
- default:
- BUG();
}
- /* Keep GGTT vmas first to make debug easier */
- if (i915_is_ggtt(vm))
- list_add(&vma->vma_link, &obj->vma_list);
- else {
- list_add_tail(&vma->vma_link, &obj->vma_list);
+ list_add_tail(&vma->vma_link, &obj->vma_list);
+ if (!i915_is_ggtt(vm))
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
- }
return vma;
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma(obj, vm);
+ vma = i915_gem_obj_to_vma_view(obj, vm, view);
if (!vma)
- vma = __i915_gem_vma_create(obj, vm);
+ vma = __i915_gem_vma_create(obj, vm, view);
return vma;
}
+
+static inline
+int i915_get_vma_pages(struct i915_vma *vma)
+{
+ if (vma->ggtt_view.pages)
+ return 0;
+
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+ vma->ggtt_view.pages = vma->obj->pages;
+ else
+ WARN_ONCE(1, "GGTT view %u not implemented!\n",
+ vma->ggtt_view.type);
+
+ if (!vma->ggtt_view.pages) {
+ DRM_ERROR("Failed to get pages for VMA view type %u!\n",
+ vma->ggtt_view.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags)
+{
+ int ret = i915_get_vma_pages(vma);
+
+ if (ret)
+ return ret;
+
+ vma->bind_vma(vma, cache_level, flags);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index beaf4bcfdac8..e377c7d27bd4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -109,7 +109,20 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+enum i915_ggtt_view_type {
+ I915_GGTT_VIEW_NORMAL = 0,
+};
+
+struct i915_ggtt_view {
+ enum i915_ggtt_view_type type;
+
+ struct sg_table *pages;
+};
+
+extern const struct i915_ggtt_view i915_ggtt_view_normal;
+
enum i915_cache_level;
+
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
@@ -129,6 +142,15 @@ struct i915_vma {
#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4;
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
/** This object's place on the active/inactive lists */
struct list_head mm_list;
@@ -146,11 +168,10 @@ struct i915_vma {
/**
* How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, pin_ioctl
- * (via user_pin_count), execbuffer (objects are not allowed multiple
- * times for the same batchbuffer), and the framebuffer code. When
- * switching/pageflipping, the framebuffer code has at most two buffers
- * pinned per crtc.
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
@@ -182,7 +203,7 @@ struct i915_address_space {
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
+ * flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@@ -193,7 +214,7 @@ struct i915_address_space {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
- * last_rendering_seqno is 0 while an object is in this list.
+ * last_read_req is NULL while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 98dcd94acba8..521548a08578 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so.obj, NULL);
+ ret = __i915_add_request(ring, NULL, so.obj);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4727a4e2c87c..7a24bd1a51f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -399,7 +399,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
obj->fence_dirty =
- obj->last_fenced_seqno ||
+ obj->last_fenced_req ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d182058383a9..1719078c763a 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -113,7 +113,10 @@ restart:
continue;
obj = mo->obj;
- drm_gem_object_reference(&obj->base);
+
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
spin_unlock(&mn->lock);
cancel_userptr(obj);
@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
it = interval_tree_iter_first(&mn->objects, start, end);
if (it != NULL) {
obj = container_of(it, struct i915_mmu_object, it)->obj;
- drm_gem_object_reference(&obj->base);
+
+ /* The mmu_object is released late when destroying the
+ * GEM object so it is entirely possible to gain a
+ * reference on an object in the process of being freed
+ * since our serialisation is via the spinlock and not
+ * the struct_mutex - and consequently use it after it
+ * is freed and then double free it.
+ */
+ if (!kref_get_unless_zero(&obj->base.refcount)) {
+ spin_unlock(&mn->lock);
+ serial = 0;
+ continue;
+ }
+
serial = mn->serial;
}
spin_unlock(&mn->lock);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index cdaee6ce05f8..48ddbf44c862 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -670,8 +670,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
- err->rseqno = obj->last_read_seqno;
- err->wseqno = obj->last_write_seqno;
+ err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
+ err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -679,13 +679,12 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->pinned = 0;
if (i915_gem_obj_is_pinned(obj))
err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
- err->ring = obj->ring ? obj->ring->id : -1;
+ err->ring = obj->last_read_req ?
+ i915_gem_request_get_ring(obj->last_read_req)->id : -1;
err->cache_level = obj->cache_level;
}
@@ -719,10 +718,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
break;
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->pin_count > 0) {
+ if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
- break;
- }
}
return err - first;
@@ -767,32 +764,21 @@ static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
+ if (IS_GEN3(dev) || IS_GEN2(dev)) {
for (i = 0; i < 8; i++)
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- default:
- BUG();
- }
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
+ (i * 4));
+ } else if (IS_GEN5(dev) || IS_GEN4(dev))
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 +
+ (i * 8));
+ else if (INTEL_INFO(dev)->gen >= 6)
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 +
+ (i * 8));
}
@@ -926,9 +912,13 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
+ if (IS_GEN6(dev))
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE_READ(ring));
+ else if (IS_GEN7(dev))
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE(ring));
+ else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(ring, i));
@@ -936,16 +926,6 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(ring, i));
}
- break;
- case 7:
- ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE(ring));
- break;
- case 6:
- ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE_READ(ring));
- break;
- }
}
}
@@ -1072,7 +1052,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
- erq->tail = request->tail;
+ erq->tail = request->postfix;
}
}
}
@@ -1097,10 +1077,8 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->pin_count > 0) {
+ if (vma->vm == vm && vma->pin_count > 0)
i++;
- break;
- }
}
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
@@ -1378,26 +1356,15 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
- switch (INTEL_INFO(dev)->gen) {
- case 2:
- case 3:
+ if (IS_GEN2(dev) || IS_GEN3(dev))
instdone[0] = I915_READ(INSTDONE);
- break;
- case 4:
- case 5:
- case 6:
+ else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
instdone[0] = I915_READ(INSTDONE_I965);
instdone[1] = I915_READ(INSTDONE1);
- break;
- default:
- WARN_ONCE(1, "Unsupported platform\n");
- case 7:
- case 8:
- case 9:
+ } else if (INTEL_INFO(dev)->gen >= 7) {
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- break;
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d0d3dfbe6d2a..4145d95902f5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -45,7 +45,7 @@
* and related files, but that will be described in separate chapters.
*/
-static const u32 hpd_ibx[] = {
+static const u32 hpd_ibx[HPD_NUM_PINS] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
@@ -53,7 +53,7 @@ static const u32 hpd_ibx[] = {
[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};
-static const u32 hpd_cpt[] = {
+static const u32 hpd_cpt[HPD_NUM_PINS] = {
[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
@@ -61,7 +61,7 @@ static const u32 hpd_cpt[] = {
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};
-static const u32 hpd_mask_i915[] = {
+static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_EN,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
@@ -70,7 +70,7 @@ static const u32 hpd_mask_i915[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};
-static const u32 hpd_status_g4x[] = {
+static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
@@ -79,7 +79,7 @@ static const u32 hpd_status_g4x[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
-static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
+static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
@@ -183,6 +183,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -229,6 +231,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
{
uint32_t new_val;
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
assert_spin_locked(&dev_priv->irq_lock);
new_val = dev_priv->pm_irq_mask;
@@ -292,6 +296,23 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
+{
+ /*
+ * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+ return mask;
+}
+
void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -304,8 +325,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
- ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
+ I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -332,6 +352,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
assert_spin_locked(&dev_priv->irq_lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -571,7 +593,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
const struct drm_display_mode *mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
@@ -642,7 +664,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
@@ -669,7 +691,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@@ -827,7 +849,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
crtc,
- &to_intel_crtc(crtc)->config.adjusted_mode);
+ &to_intel_crtc(crtc)->config->base.adjusted_mode);
}
static bool intel_hpd_irq_event(struct drm_device *dev,
@@ -857,7 +879,7 @@ static void i915_digport_work_func(struct work_struct *work)
container_of(work, struct drm_i915_private, dig_port_work);
u32 long_port_mask, short_port_mask;
struct intel_digital_port *intel_dig_port;
- int i, ret;
+ int i;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
@@ -881,9 +903,11 @@ static void i915_digport_work_func(struct work_struct *work)
valid = true;
if (valid) {
+ enum irqreturn ret;
+
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
- if (ret == true) {
- /* if we get true fallback to old school hpd */
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
old_bits |= (1 << intel_dig_port->base.hpd_pin);
}
}
@@ -1017,7 +1041,7 @@ static void notify_ring(struct drm_device *dev,
if (!intel_ring_initialized(ring))
return;
- trace_i915_gem_request_complete(ring);
+ trace_i915_gem_request_notify(ring);
wake_up_all(&ring->irq_queue);
}
@@ -1383,14 +1407,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1406,14 +1430,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1440,7 +1464,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -1500,7 +1524,7 @@ static inline enum port get_port_from_pin(enum hpd_pin pin)
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
u32 dig_hotplug_reg,
- const u32 *hpd)
+ const u32 hpd[HPD_NUM_PINS])
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
@@ -2397,19 +2421,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
}
/**
- * i915_error_work_func - do process context error handling work
- * @work: work struct
+ * i915_reset_and_wakeup - do process context error handling work
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_error_work_func(struct work_struct *work)
+static void i915_reset_and_wakeup(struct drm_device *dev)
{
- struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
- work);
- struct drm_i915_private *dev_priv =
- container_of(error, struct drm_i915_private, gpu_error);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2576,10 +2596,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
/**
- * i915_handle_error - handle an error interrupt
+ * i915_handle_error - handle a gpu error
* @dev: drm device
*
- * Do some basic checking of regsiter state at error interrupt time and
+ * Do some basic checking of regsiter state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
@@ -2604,9 +2624,9 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
&dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so that the reset work function
- * i915_error_work_func doesn't deadlock trying to grab various
- * locks. By bumping the reset counter first, the woken
+ * Wakeup waiting processes so that the reset function
+ * i915_reset_and_wakeup doesn't deadlock trying to grab
+ * various locks. By bumping the reset counter first, the woken
* processes will see a reset in progress and back off,
* releasing their locks and then wait for the reset completion.
* We must do this for _all_ gpu waiters that might hold locks
@@ -2619,13 +2639,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
i915_error_wake_up(dev_priv, false);
}
- /*
- * Our reset work can grab modeset locks (since it needs to reset the
- * state of outstanding pagelips). Hence it must not be run on our own
- * dev-priv->wq work queue for otherwise the flush_work in the pageflip
- * code will deadlock.
- */
- schedule_work(&dev_priv->gpu_error.work);
+ i915_reset_and_wakeup(dev);
}
/* Called from drm generic code, passed 'crtc' which
@@ -2753,18 +2767,18 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static u32
-ring_last_seqno(struct intel_engine_cs *ring)
+static struct drm_i915_gem_request *
+ring_last_request(struct intel_engine_cs *ring)
{
return list_entry(ring->request_list.prev,
- struct drm_i915_gem_request, list)->seqno;
+ struct drm_i915_gem_request, list);
}
static bool
-ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *ring)
{
return (list_empty(&ring->request_list) ||
- i915_seqno_passed(seqno, ring_last_seqno(ring)));
+ i915_gem_request_completed(ring_last_request(ring), false));
}
static bool
@@ -2950,7 +2964,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_HUNG;
}
-/**
+/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased.
@@ -2958,10 +2972,12 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
-static void i915_hangcheck_elapsed(unsigned long data)
+static void i915_hangcheck_elapsed(struct work_struct *work)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ gpu_error.hangcheck_work.work);
+ struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring;
int i;
int busy_count = 0, rings_hung = 0;
@@ -2984,7 +3000,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring, seqno)) {
+ if (ring_idle(ring)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
@@ -3075,17 +3091,18 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
+ struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
if (!i915.enable_hangcheck)
return;
- /* Don't continually defer the hangcheck, but make sure it is active */
- if (timer_pending(timer))
- return;
- mod_timer(timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+ /* Don't continually defer the hangcheck so that it is always run at
+ * least once after work has been scheduled on any ring. Otherwise,
+ * we will ignore a hung ring if a second ring is kept busy.
+ */
+
+ queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
+ round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
}
static void ibx_irq_reset(struct drm_device *dev)
@@ -4123,26 +4140,24 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
- if (I915_HAS_HOTPLUG(dev)) {
- hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- hotplug_en &= ~HOTPLUG_INT_EN_MASK;
- /* Note HDMI and DP share hotplug bits */
- /* enable bits are the same for all generations */
- for_each_intel_encoder(dev, intel_encoder)
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
- hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
- /* Programming the CRT detection parameters tends
- to generate a spurious hotplug event about three
- seconds later. So just do it once.
- */
- if (IS_G4X(dev))
- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
- hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
-
- /* Ignore TV since it's buggy */
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- }
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~HOTPLUG_INT_EN_MASK;
+ /* Note HDMI and DP share hotplug bits */
+ /* enable bits are the same for all generations */
+ for_each_intel_encoder(dev, intel_encoder)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ /* Ignore TV since it's buggy */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -4320,7 +4335,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
- INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
@@ -4331,9 +4345,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
- setup_timer(&dev_priv->gpu_error.hangcheck_timer,
- i915_hangcheck_elapsed,
- (unsigned long) dev);
+ INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
+ i915_hangcheck_elapsed);
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
intel_hpd_irq_reenable_work);
@@ -4406,14 +4419,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
+ if (I915_HAS_HOTPLUG(dev_priv))
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index c91cb2033cc5..44f2262a5553 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,7 +35,7 @@ struct i915_params i915 __read_mostly = {
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
- .enable_execlists = 0,
+ .enable_execlists = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = 0,
@@ -51,6 +51,8 @@ struct i915_params i915 __read_mostly = {
.disable_vtd_wa = 0,
.use_mmio_flip = 0,
.mmio_debug = 0,
+ .verbose_state_checks = 1,
+ .nuclear_pageflip = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -122,7 +124,7 @@ MODULE_PARM_DESC(enable_ppgtt,
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
- "(-1=auto, 0=disabled [default], 1=enabled)");
+ "(-1=auto [default], 0=disabled, 1=enabled)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
@@ -173,3 +175,11 @@ module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
MODULE_PARM_DESC(mmio_debug,
"Enable the MMIO debug code (default: false). This may negatively "
"affect performance.");
+
+module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
+MODULE_PARM_DESC(verbose_state_checks,
+ "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
+
+module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
+MODULE_PARM_DESC(nuclear_pageflip,
+ "Force atomic modeset functionality; only planes work for now (default: false).");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 172de3b3433b..33b3d0a24071 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -31,6 +31,8 @@
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
+#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
+ (port) == PORT_B ? (b) : (c))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
@@ -217,6 +219,8 @@
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
#define INSTR_MEDIA_SUBCLIENT 0x2
+#define INSTR_26_TO_24_MASK 0x7000000
+#define INSTR_26_TO_24_SHIFT 24
/*
* Memory interface instructions used by the kernel
@@ -246,6 +250,7 @@
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
+#define MI_SET_APPID MI_INSTR(0x0e, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
@@ -303,8 +308,9 @@
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
-#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
-#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
+#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
+#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
+#define MI_USE_GGTT (1 << 22) /* g4x+ */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
@@ -470,17 +476,18 @@
*/
#define BCS_SWCTRL 0x22200
-#define HS_INVOCATION_COUNT 0x2300
-#define DS_INVOCATION_COUNT 0x2308
-#define IA_VERTICES_COUNT 0x2310
-#define IA_PRIMITIVES_COUNT 0x2318
-#define VS_INVOCATION_COUNT 0x2320
-#define GS_INVOCATION_COUNT 0x2328
-#define GS_PRIMITIVES_COUNT 0x2330
-#define CL_INVOCATION_COUNT 0x2338
-#define CL_PRIMITIVES_COUNT 0x2340
-#define PS_INVOCATION_COUNT 0x2348
-#define PS_DEPTH_COUNT 0x2350
+#define GPGPU_THREADS_DISPATCHED 0x2290
+#define HS_INVOCATION_COUNT 0x2300
+#define DS_INVOCATION_COUNT 0x2308
+#define IA_VERTICES_COUNT 0x2310
+#define IA_PRIMITIVES_COUNT 0x2318
+#define VS_INVOCATION_COUNT 0x2320
+#define GS_INVOCATION_COUNT 0x2328
+#define GS_PRIMITIVES_COUNT 0x2330
+#define CL_INVOCATION_COUNT 0x2338
+#define CL_PRIMITIVES_COUNT 0x2340
+#define PS_INVOCATION_COUNT 0x2348
+#define PS_DEPTH_COUNT 0x2350
/* There are the 4 64-bit counter registers, one for each stream output */
#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
@@ -598,6 +605,15 @@ enum punit_power_well {
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136
+#define FB_GFX_FREQ_FUSE_MASK 0xff
+#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24
+#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16
+#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8
+
+#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
+#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
+
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
@@ -1464,6 +1480,17 @@ enum punit_power_well {
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
+/* Fuse readout registers for GT */
+#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
+#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
+#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
+#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
+#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -1509,7 +1536,7 @@ enum punit_power_well {
#define I915_ISP_INTERRUPT (1<<22)
#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
-#define I915_MIPIB_INTERRUPT (1<<19)
+#define I915_MIPIC_INTERRUPT (1<<19)
#define I915_MIPIA_INTERRUPT (1<<18)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
@@ -2539,6 +2566,42 @@ enum punit_power_well {
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
+/* VLV eDP PSR registers */
+#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
+#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
+#define VLV_EDP_PSR_ENABLE (1<<0)
+#define VLV_EDP_PSR_RESET (1<<1)
+#define VLV_EDP_PSR_MODE_MASK (7<<2)
+#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
+#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
+#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
+#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
+#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
+#define VLV_EDP_PSR_DBL_FRAME (1<<10)
+#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
+#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
+#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
+
+#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
+#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
+#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
+#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
+#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB)
+
+#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
+#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
+#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
+#define VLV_EDP_PSR_CURR_STATE_MASK 7
+#define VLV_EDP_PSR_DISABLED (0<<0)
+#define VLV_EDP_PSR_INACTIVE (1<<0)
+#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
+#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
+#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
+#define VLV_EDP_PSR_EXIT (5<<0)
+#define VLV_EDP_PSR_IN_TRANS (1<<7)
+#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
+
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
@@ -2762,7 +2825,8 @@ enum punit_power_well {
#define DC_BALANCE_RESET (1 << 25)
#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
-#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
+#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
+#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
#define PIPE_A_SCRAMBLE_RESET (1 << 0)
@@ -3704,6 +3768,11 @@ enum punit_power_well {
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
+#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
+#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
+#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (1f << 5)
+#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
/*
@@ -5158,6 +5227,9 @@ enum punit_power_well {
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+#define HIZ_CHICKEN 0x7018
+# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
+
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -6005,6 +6077,13 @@ enum punit_power_well {
#define GEN6_PMINTRMSK 0xA168
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294
+#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4
+#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8
+#define GEN9_PG_ENABLE 0xA210
+
+#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
+#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
+#define PIXEL_OVERLAP_CNT_SHIFT 30
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
@@ -6119,6 +6198,7 @@ enum punit_power_well {
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
#define HALF_SLICE_CHICKEN3 0xe184
+#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
@@ -6631,29 +6711,31 @@ enum punit_power_well {
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
-/* VLV MIPI registers */
+/* MIPI DSI registers */
+
+#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
-#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
- _MIPIB_PORT_CTRL)
-#define DPI_ENABLE (1 << 31) /* A + B */
+#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+#define DUAL_LINK_MODE_SHIFT 26
#define DUAL_LINK_MODE_MASK (1 << 26)
#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
-#define DITHERING_ENABLE (1 << 25) /* A + B */
+#define DITHERING_ENABLE (1 << 25) /* A + C */
#define FLOPPED_HSTX (1 << 23)
#define DE_INVERT (1 << 19) /* XXX */
#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
#define AFE_LATCHOUT (1 << 17)
#define LP_OUTPUT_HOLD (1 << 16)
-#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
-#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
-#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
-#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
#define CSB_SHIFT 9
#define CSB_MASK (3 << 9)
#define CSB_20MHZ (0 << 9)
@@ -6662,10 +6744,10 @@ enum punit_power_well {
#define BANDGAP_MASK (1 << 8)
#define BANDGAP_PNW_CIRCUIT (0 << 8)
#define BANDGAP_LNC_CIRCUIT (1 << 8)
-#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
-#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
-#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
-#define TEARING_EFFECT_SHIFT 2 /* A + B */
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
+#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */
+#define TEARING_EFFECT_SHIFT 2 /* A + C */
#define TEARING_EFFECT_MASK (3 << 2)
#define TEARING_EFFECT_OFF (0 << 2)
#define TEARING_EFFECT_DSI (1 << 2)
@@ -6677,9 +6759,9 @@ enum punit_power_well {
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
-#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
- _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \
+ _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
@@ -6689,9 +6771,9 @@ enum punit_power_well {
/* MIPI DSI Controller and D-PHY registers */
#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
-#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
-#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
- _MIPIB_DEVICE_READY)
+#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \
+ _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -6700,13 +6782,13 @@ enum punit_power_well {
#define DEVICE_READY (1 << 0)
#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
-#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
-#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
- _MIPIB_INTR_STAT)
+#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \
+ _MIPIC_INTR_STAT)
#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
-#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
-#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
- _MIPIB_INTR_EN)
+#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \
+ _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -6741,9 +6823,9 @@ enum punit_power_well {
#define RXSOT_ERROR (1 << 0)
#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
- _MIPIB_DSI_FUNC_PRG)
+#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \
+ _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -6765,93 +6847,93 @@ enum punit_power_well {
#define DATA_LANES_PRG_REG_MASK (7 << 0)
#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
- _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \
+ _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
- _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \
+ _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
- _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \
+ _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
- _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \
+ _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
-#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
-#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
- _MIPIB_DPI_RESOLUTION)
+#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \
+ _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
- _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \
+ _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
-#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
-#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
- _MIPIB_HBP_COUNT)
+#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \
+ _MIPIC_HBP_COUNT)
#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
-#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
-#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
- _MIPIB_HFP_COUNT)
+#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \
+ _MIPIC_HFP_COUNT)
#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
-#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
-#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
- _MIPIB_VBP_COUNT)
+#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \
+ _MIPIC_VBP_COUNT)
#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
-#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
-#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
- _MIPIB_VFP_COUNT)
+#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \
+ _MIPIC_VFP_COUNT)
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
-#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
-#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
- _MIPIB_DPI_CONTROL)
+#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \
+ _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -6861,30 +6943,30 @@ enum punit_power_well {
#define SHUTDOWN (1 << 0)
#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
-#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
-#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
- _MIPIB_DPI_DATA)
+#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \
+ _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
-#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
-#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
- _MIPIB_INIT_COUNT)
+#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \
+ _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
- _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
- _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \
+ _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -6893,9 +6975,9 @@ enum punit_power_well {
#define VIDEO_MODE_BURST (3 << 0)
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
-#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
-#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
- _MIPIB_EOT_DISABLE)
+#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \
+ _MIPIC_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -6906,32 +6988,32 @@ enum punit_power_well {
#define EOT_DISABLE (1 << 0)
#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
-#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
-#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
- _MIPIB_LP_BYTECLK)
+#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \
+ _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
-#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
-#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
- _MIPIB_LP_GEN_DATA)
+#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \
+ _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
-#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
-#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
- _MIPIB_HS_GEN_DATA)
+#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \
+ _MIPIC_HS_GEN_DATA)
#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
-#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
- _MIPIB_LP_GEN_CTRL)
+#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \
+ _MIPIC_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
-#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
-#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
- _MIPIB_HS_GEN_CTRL)
+#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \
+ _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -6943,9 +7025,9 @@ enum punit_power_well {
/* data type values, see include/video/mipi_display.h */
#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
-#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
- _MIPIB_GEN_FIFO_STAT)
+#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \
+ _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -6962,17 +7044,17 @@ enum punit_power_well {
#define HS_DATA_FIFO_FULL (1 << 0)
#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
- _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \
+ _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
-#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
-#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
- _MIPIB_DPHY_PARAM)
+#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \
+ _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -6984,36 +7066,36 @@ enum punit_power_well {
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
-#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
-#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
- _MIPIB_DBI_BW_CTRL)
+#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \
+ _MIPIC_DBI_BW_CTRL)
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
- _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \
+ _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
-#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
- _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \
+ _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
-#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
- _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \
+ _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
-#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
-#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
- _MIPIB_INTR_EN_REG_1)
+#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \
+ _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
@@ -7032,9 +7114,9 @@ enum punit_power_well {
/* MIPI adapter registers */
#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
-#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
-#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
- _MIPIB_CTRL)
+#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \
+ _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -7047,24 +7129,24 @@ enum punit_power_well {
#define RGB_FLIP_TO_BGR (1 << 2)
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
-#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
-#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
- _MIPIB_DATA_ADDRESS)
+#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
+ _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
-#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
-#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
- _MIPIB_DATA_LENGTH)
+#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \
+ _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
-#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
- _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \
+ _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
@@ -7072,22 +7154,22 @@ enum punit_power_well {
#define COMMAND_VALID (1 << 0)
#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
-#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
-#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
- _MIPIB_COMMAND_LENGTH)
+#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \
+ _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
-#define MIPI_READ_DATA_RETURN(tc, n) \
- (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(port, n) \
+ (_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
+ 4 * (n)) /* n: 0...7 */
#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
-#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
-#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
- _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \
+ _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
/* For UMS only (deprecated): */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 26368822a33f..9f19ed38cdc3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -264,7 +264,7 @@ static void i915_restore_display(struct drm_device *dev)
}
/* only restore FBC info on the platform that supports FBC*/
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 4a5af695307e..49f5ade0edb7 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -49,14 +49,14 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
- u32 reg, czcount_30ns;
+ u32 clk_reg, czcount_30ns;
if (IS_CHERRYVIEW(dev))
- reg = CHV_CLK_CTL1;
+ clk_reg = CHV_CLK_CTL1;
else
- reg = VLV_CLK_CTL2;
+ clk_reg = VLV_CLK_CTL2;
- czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
+ czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!czcount_30ns) {
WARN(!czcount_30ns, "bogus CZ count value");
@@ -116,8 +116,6 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
- if (IS_VALLEYVIEW(dminor->dev))
- rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
@@ -126,8 +124,6 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
- if (IS_VALLEYVIEW(dminor->dev))
- rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
@@ -285,7 +281,7 @@ static struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
@@ -301,9 +297,14 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
+ ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
- ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
+ u32 rpstat = I915_READ(GEN6_RPSTAT1);
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ ret = intel_gpu_freq(dev_priv, ret);
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -312,6 +313,27 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
@@ -319,8 +341,9 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ return snprintf(buf, PAGE_SIZE,
+ "%d\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -333,10 +356,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- else
- ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -360,10 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- val = vlv_freq_opcode(dev_priv, val);
- else
- val /= GT_FREQUENCY_MULTIPLIER;
+ val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
@@ -374,21 +391,21 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > dev_priv->rps.rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
- val * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, val));
dev_priv->rps.max_freq_softlimit = val;
- if (dev_priv->rps.cur_freq > val) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
- } else if (!IS_VALLEYVIEW(dev)) {
- /* We still need gen6_set_rps to process the new max_delay and
- * update the interrupt limits even though frequency request is
- * unchanged. */
- gen6_set_rps(dev, dev_priv->rps.cur_freq);
- }
+ val = clamp_t(int, dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ /* We still need *_set_rps to process the new max_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged. */
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -405,10 +422,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- else
- ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -432,10 +446,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev))
- val = vlv_freq_opcode(dev_priv, val);
- else
- val /= GT_FREQUENCY_MULTIPLIER;
+ val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
@@ -446,17 +457,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_freq_softlimit = val;
- if (dev_priv->rps.cur_freq < val) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
- } else if (!IS_VALLEYVIEW(dev)) {
- /* We still need gen6_set_rps to process the new min_delay and
- * update the interrupt limits even though frequency request is
- * unchanged. */
- gen6_set_rps(dev, dev_priv->rps.cur_freq);
- }
+ val = clamp_t(int, dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ /* We still need *_set_rps to process the new min_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged. */
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -464,6 +475,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
}
+static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
@@ -494,19 +506,22 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
if (attr == &dev_attr_gt_RP0_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
else
- val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0x0000ff) >> 0));
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
else
- val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0x00ff00) >> 8));
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
else
- val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0xff0000) >> 16));
} else {
BUG();
}
@@ -514,6 +529,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
}
static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
@@ -524,6 +540,7 @@ static const struct attribute *gen6_attrs[] = {
};
static const struct attribute *vlv_attrs[] = {
+ &dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 751d4ad14d62..6058a01b4443 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm,
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to,
- u32 seqno),
- TP_ARGS(from, to, seqno),
+ struct drm_i915_gem_request *req),
+ TP_ARGS(from, to, req),
TP_STRUCT__entry(
__field(u32, dev)
@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
- TP_ARGS(ring, seqno, flags),
+ TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
+ TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
- i915_trace_irq_get(ring, seqno);
+ i915_trace_irq_get(ring, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -398,31 +400,36 @@ TRACE_EVENT(i915_gem_ring_flush,
);
DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
+ __field(u32, uniq)
__field(u32, seqno)
),
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->uniq = req ? req->uniq : 0;
+ __entry->seqno = i915_gem_request_get_seqno(req);
),
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->uniq,
+ __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
-TRACE_EVENT(i915_gem_request_complete,
+TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
@@ -443,17 +450,23 @@ TRACE_EVENT(i915_gem_request_complete,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
+ __field(u32, uniq)
__field(u32, seqno)
__field(bool, blocking)
),
@@ -465,20 +478,24 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
- __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
+ __entry->uniq = req ? req->uniq : 0;
+ __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->blocking =
+ mutex_is_locked(&ring->dev->struct_mutex);
),
- TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
- __entry->dev, __entry->ring, __entry->seqno,
- __entry->blocking ? "yes (NB)" : "no")
+ TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
+ __entry->dev, __entry->ring, __entry->uniq,
+ __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
DECLARE_EVENT_CLASS(i915_ring,
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
new file mode 100644
index 000000000000..19a9dd5408f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic modeset support
+ *
+ * The functions here implement the state management and hardware programming
+ * dispatch required by the atomic modeset infrastructure.
+ * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "intel_drv.h"
+
+
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ int nconnectors = dev->mode_config.num_connector;
+ enum pipe nuclear_pipe = INVALID_PIPE;
+ int ret;
+ int i;
+ bool not_nuclear = false;
+
+ /*
+ * FIXME: At the moment, we only support "nuclear pageflip" on a
+ * single CRTC. Cross-crtc updates will be added later.
+ */
+ for (i = 0; i < nplanes; i++) {
+ struct intel_plane *plane = to_intel_plane(state->planes[i]);
+ if (!plane)
+ continue;
+
+ if (nuclear_pipe == INVALID_PIPE) {
+ nuclear_pipe = plane->pipe;
+ } else if (nuclear_pipe != plane->pipe) {
+ DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * FIXME: We only handle planes for now; make sure there are no CRTC's
+ * or connectors involved.
+ */
+ state->allow_modeset = false;
+ for (i = 0; i < ncrtcs; i++) {
+ struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
+ if (crtc && crtc->pipe != nuclear_pipe)
+ not_nuclear = true;
+ }
+ for (i = 0; i < nconnectors; i++)
+ if (state->connectors[i] != NULL)
+ not_nuclear = true;
+
+ if (not_nuclear) {
+ DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ int ret;
+ int i;
+
+ if (async) {
+ DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /* Point of no return */
+
+ /*
+ * FIXME: The proper sequence here will eventually be:
+ *
+ * drm_atomic_helper_swap_state(dev, state)
+ * drm_atomic_helper_commit_pre_planes(dev, state);
+ * drm_atomic_helper_commit_planes(dev, state);
+ * drm_atomic_helper_commit_post_planes(dev, state);
+ * drm_atomic_helper_wait_for_vblanks(dev, state);
+ * drm_atomic_helper_cleanup_planes(dev, state);
+ * drm_atomic_state_free(state);
+ *
+ * once we have full atomic modeset. For now, just manually update
+ * plane states to avoid clobbering good states with dummy states
+ * while nuclear pageflipping.
+ */
+ for (i = 0; i < dev->mode_config.num_total_plane; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ plane->state->state = state;
+ swap(state->plane_states[i], plane->state);
+ plane->state->state = NULL;
+ }
+ drm_atomic_helper_commit_planes(dev, state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_state_free(state);
+
+ return 0;
+}
+
+/**
+ * intel_connector_atomic_get_property - fetch connector property value
+ * @connector: connector to fetch property for
+ * @state: state containing the property value
+ * @property: property to look up
+ * @val: pointer to write property value into
+ *
+ * The DRM core does not store shadow copies of properties for
+ * atomic-capable drivers. This entrypoint is used to fetch
+ * the current value of a driver-specific connector property.
+ */
+int
+intel_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ int i;
+
+ /*
+ * TODO: We only have atomic modeset for planes at the moment, so the
+ * crtc/connector code isn't quite ready yet. Until it's ready,
+ * continue to look up all property values in the DRM's shadow copy
+ * in obj->properties->values[].
+ *
+ * When the crtc/connector state work matures, this function should
+ * be updated to read the values out of the state structure instead.
+ */
+ for (i = 0; i < connector->base.properties->count; i++) {
+ if (connector->base.properties->properties[i] == property) {
+ *val = connector->base.properties->values[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * intel_crtc_duplicate_state - duplicate crtc state
+ * @crtc: drm crtc
+ *
+ * Allocates and returns a copy of the crtc state (both common and
+ * Intel-specific) for the specified crtc.
+ *
+ * Returns: The newly allocated crtc state, or NULL on failure.
+ */
+struct drm_crtc_state *
+intel_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (WARN_ON(!intel_crtc->config))
+ return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL);
+
+ return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config),
+ GFP_KERNEL);
+}
+
+/**
+ * intel_crtc_destroy_state - destroy crtc state
+ * @crtc: drm crtc
+ *
+ * Destroys the crtc state (both common and Intel-specific) for the
+ * specified crtc.
+ */
+void
+intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ drm_atomic_helper_crtc_destroy_state(crtc, state);
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
new file mode 100644
index 000000000000..9e6f727dfd19
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic plane helpers
+ *
+ * The functions here are used by the atomic plane helper functions to
+ * implement legacy plane updates (i.e., drm_plane->update_plane() and
+ * drm_plane->disable_plane()). This allows plane updates to use the
+ * atomic state infrastructure and perform plane updates as separate
+ * prepare/check/commit/cleanup steps.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "intel_drv.h"
+
+/**
+ * intel_create_plane_state - create plane state object
+ * @plane: drm plane
+ *
+ * Allocates a fresh plane state for the given plane and sets some of
+ * the state values to sensible initial values.
+ *
+ * Returns: A newly allocated plane state, or NULL on failure
+ */
+struct intel_plane_state *
+intel_create_plane_state(struct drm_plane *plane)
+{
+ struct intel_plane_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->base.plane = plane;
+ state->base.rotation = BIT(DRM_ROTATE_0);
+
+ return state;
+}
+
+/**
+ * intel_plane_duplicate_state - duplicate plane state
+ * @plane: drm plane
+ *
+ * Allocates and returns a copy of the plane state (both common and
+ * Intel-specific) for the specified plane.
+ *
+ * Returns: The newly allocated plane state, or NULL on failure.
+ */
+struct drm_plane_state *
+intel_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_plane_state *state;
+ struct intel_plane_state *intel_state;
+
+ if (WARN_ON(!plane->state))
+ intel_state = intel_create_plane_state(plane);
+ else
+ intel_state = kmemdup(plane->state, sizeof(*intel_state),
+ GFP_KERNEL);
+
+ if (!intel_state)
+ return NULL;
+
+ state = &intel_state->base;
+ if (state->fb)
+ drm_framebuffer_reference(state->fb);
+
+ return state;
+}
+
+/**
+ * intel_plane_destroy_state - destroy plane state
+ * @plane: drm plane
+ * @state: state object to destroy
+ *
+ * Destroys the plane state (both common and Intel-specific) for the
+ * specified plane.
+ */
+void
+intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+static int intel_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct intel_crtc *intel_crtc;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_plane_state *intel_state = to_intel_plane_state(state);
+
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
+
+ /*
+ * Both crtc and plane->crtc could be NULL if we're updating a
+ * property while the plane is disabled. We don't actually have
+ * anything driver-specific we need to test in that case, so
+ * just return success.
+ */
+ if (!crtc)
+ return 0;
+
+ /*
+ * The original src/dest coordinates are stored in state->base, but
+ * we want to keep another copy internal to our driver that we can
+ * clip/modify ourselves.
+ */
+ intel_state->src.x1 = state->src_x;
+ intel_state->src.y1 = state->src_y;
+ intel_state->src.x2 = state->src_x + state->src_w;
+ intel_state->src.y2 = state->src_y + state->src_h;
+ intel_state->dst.x1 = state->crtc_x;
+ intel_state->dst.y1 = state->crtc_y;
+ intel_state->dst.x2 = state->crtc_x + state->crtc_w;
+ intel_state->dst.y2 = state->crtc_y + state->crtc_h;
+
+ /* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
+ intel_state->clip.x1 = 0;
+ intel_state->clip.y1 = 0;
+ intel_state->clip.x2 =
+ intel_crtc->active ? intel_crtc->config->pipe_src_w : 0;
+ intel_state->clip.y2 =
+ intel_crtc->active ? intel_crtc->config->pipe_src_h : 0;
+
+ /*
+ * Disabling a plane is always okay; we just need to update
+ * fb tracking in a special way since cleanup_fb() won't
+ * get called by the plane helpers.
+ */
+ if (state->fb == NULL && plane->state->fb != NULL) {
+ /*
+ * 'prepare' is never called when plane is being disabled, so
+ * we need to handle frontbuffer tracking as a special case
+ */
+ intel_crtc->atomic.disabled_planes |=
+ (1 << drm_plane_index(plane));
+ }
+
+ return intel_plane->check_plane(plane, intel_state);
+}
+
+static void intel_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_plane_state *intel_state =
+ to_intel_plane_state(plane->state);
+
+ /* Don't disable an already disabled plane */
+ if (!plane->state->fb && !old_state->fb)
+ return;
+
+ intel_plane->commit_plane(plane, intel_state);
+}
+
+const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
+ .prepare_fb = intel_prepare_plane_fb,
+ .cleanup_fb = intel_cleanup_plane_fb,
+ .atomic_check = intel_plane_atomic_check,
+ .atomic_update = intel_plane_atomic_update,
+};
+
+/**
+ * intel_plane_atomic_get_property - fetch plane property value
+ * @plane: plane to fetch property for
+ * @state: state containing the property value
+ * @property: property to look up
+ * @val: pointer to write property value into
+ *
+ * The DRM core does not store shadow copies of properties for
+ * atomic-capable drivers. This entrypoint is used to fetch
+ * the current value of a driver-specific plane property.
+ */
+int
+intel_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_mode_config *config = &plane->dev->mode_config;
+
+ if (property == config->rotation_property) {
+ *val = state->rotation;
+ } else {
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_plane_atomic_set_property - set plane property value
+ * @plane: plane to set property for
+ * @state: state to update property value in
+ * @property: property to set
+ * @val: value to set property to
+ *
+ * Writes the specified property value for a plane into the provided atomic
+ * state object.
+ *
+ * Returns 0 on success, -EINVAL on unrecognized properties
+ */
+int
+intel_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_mode_config *config = &plane->dev->mode_config;
+
+ if (property == config->rotation_property) {
+ state->rotation = val;
+ } else {
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 2c7ed5cb29c0..2396cc702d18 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -22,6 +22,9 @@
*/
#include <linux/kernel.h>
+#include <linux/component.h>
+#include <drm/i915_component.h>
+#include "intel_drv.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
@@ -397,7 +400,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -461,3 +464,110 @@ void intel_init_audio(struct drm_device *dev)
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
}
}
+
+static void i915_audio_component_get_power(struct device *dev)
+{
+ intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+}
+
+static void i915_audio_component_put_power(struct device *dev)
+{
+ intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+}
+
+/* Get CDCLK in kHz */
+static int i915_audio_component_get_cdclk_freq(struct device *dev)
+{
+ struct drm_i915_private *dev_priv = dev_to_i915(dev);
+ int ret;
+
+ if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+ return -ENODEV;
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ ret = intel_ddi_get_cdclk_freq(dev_priv);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+
+ return ret;
+}
+
+static const struct i915_audio_component_ops i915_audio_component_ops = {
+ .owner = THIS_MODULE,
+ .get_power = i915_audio_component_get_power,
+ .put_power = i915_audio_component_put_power,
+ .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
+};
+
+static int i915_audio_component_bind(struct device *i915_dev,
+ struct device *hda_dev, void *data)
+{
+ struct i915_audio_component *acomp = data;
+
+ if (WARN_ON(acomp->ops || acomp->dev))
+ return -EEXIST;
+
+ acomp->ops = &i915_audio_component_ops;
+ acomp->dev = i915_dev;
+
+ return 0;
+}
+
+static void i915_audio_component_unbind(struct device *i915_dev,
+ struct device *hda_dev, void *data)
+{
+ struct i915_audio_component *acomp = data;
+
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+}
+
+static const struct component_ops i915_audio_component_bind_ops = {
+ .bind = i915_audio_component_bind,
+ .unbind = i915_audio_component_unbind,
+};
+
+/**
+ * i915_audio_component_init - initialize and register the audio component
+ * @dev_priv: i915 device instance
+ *
+ * This will register with the component framework a child component which
+ * will bind dynamically to the snd_hda_intel driver's corresponding master
+ * component when the latter is registered. During binding the child
+ * initializes an instance of struct i915_audio_component which it receives
+ * from the master. The master can then start to use the interface defined by
+ * this struct. Each side can break the binding at any point by deregistering
+ * its own component after which each side's component unbind callback is
+ * called.
+ *
+ * We ignore any error during registration and continue with reduced
+ * functionality (i.e. without HDMI audio).
+ */
+void i915_audio_component_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+ if (ret < 0) {
+ DRM_ERROR("failed to add audio component (%d)\n", ret);
+ /* continue with reduced functionality */
+ return;
+ }
+
+ dev_priv->audio_component_registered = true;
+}
+
+/**
+ * i915_audio_component_cleanup - deregister the audio component
+ * @dev_priv: i915 device instance
+ *
+ * Deregisters the audio component, breaking any existing binding to the
+ * corresponding snd_hda_intel driver's master component.
+ */
+void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->audio_component_registered)
+ return;
+
+ component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+ dev_priv->audio_component_registered = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index a4bd90f36a03..3f178258d9f9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -664,6 +664,50 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
}
+static void
+parse_psr(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_psr *psr;
+ struct psr_table *psr_table;
+
+ psr = find_section(bdb, BDB_PSR);
+ if (!psr) {
+ DRM_DEBUG_KMS("No PSR BDB found.\n");
+ return;
+ }
+
+ psr_table = &psr->psr_table[panel_type];
+
+ dev_priv->vbt.psr.full_link = psr_table->full_link;
+ dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+
+ /* Allowed VBT values goes from 0 to 15 */
+ dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+ psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+
+ switch (psr_table->lines_to_wait) {
+ case 0:
+ dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
+ break;
+ case 1:
+ dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
+ break;
+ case 2:
+ dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
+ break;
+ case 3:
+ dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
+ psr_table->lines_to_wait);
+ break;
+ }
+
+ dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+}
+
static u8 *goto_next_sequence(u8 *data, int *size)
{
u16 len;
@@ -1241,6 +1285,7 @@ intel_parse_bios(struct drm_device *dev)
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
+ parse_psr(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 7603765c91fc..a6a8710f665f 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -80,7 +80,7 @@ struct vbios_data {
#define BDB_EXT_MMIO_REGS 6
#define BDB_SWF_IO 7
#define BDB_SWF_MMIO 8
-#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_PSR 9
#define BDB_MODE_REMOVAL_TABLE 10
#define BDB_CHILD_DEVICE_TABLE 11
#define BDB_DRIVER_FEATURES 12
@@ -556,6 +556,26 @@ struct bdb_edp {
u16 edp_t3_optimization;
} __packed;
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1;
+ u8 require_aux_to_wakeup:1;
+ u8 feature_bits_rsvd:6;
+
+ /* Wait times */
+ u8 idle_frames:4;
+ u8 lines_to_wait:3;
+ u8 wait_times_rsvd:1;
+
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
+} __packed;
+
+struct bdb_psr {
+ struct psr_table psr_table[16];
+} __packed;
+
void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
@@ -798,7 +818,8 @@ struct mipi_config {
#define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2;
u16 lane_cnt:2;
- u16 rsvd3:12;
+ u16 pixel_overlap:3;
+ u16 rsvd3:9;
u16 rsvd4;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index a9af9a4866db..e66e17af0a56 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -110,31 +111,31 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
}
static void intel_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
int dotclock;
- pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
intel_ddi_get_config(encoder, pipe_config);
- pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
- pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
@@ -157,7 +158,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 adpa;
if (INTEL_INFO(dev)->gen >= 5)
@@ -303,7 +304,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
}
static bool intel_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
@@ -792,6 +793,8 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_get_property = intel_connector_atomic_get_property,
};
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e6b45cd150d3..f14e8a2a022d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -128,15 +128,15 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
};
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
- { 0x00000018, 0x000000a0 },
- { 0x00004014, 0x00000098 },
+ { 0x00000018, 0x000000a2 },
+ { 0x00004014, 0x0000009B },
{ 0x00006012, 0x00000088 },
- { 0x00008010, 0x00000080 },
- { 0x00000018, 0x00000098 },
+ { 0x00008010, 0x00000087 },
+ { 0x00000018, 0x0000009B },
{ 0x00004014, 0x00000088 },
- { 0x00006012, 0x00000080 },
+ { 0x00006012, 0x00000087 },
{ 0x00000018, 0x00000088 },
- { 0x00004014, 0x00000080 },
+ { 0x00004014, 0x00000087 },
};
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
@@ -328,7 +328,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
- FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
@@ -338,8 +338,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
/* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
- WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
+ WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -357,7 +357,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
- ((intel_crtc->config.fdi_lanes - 1) << 1) |
+ ((intel_crtc->config->fdi_lanes - 1) << 1) |
DDI_BUF_TRANS_SELECT(i / 2));
POSTING_READ(DDI_BUF_CTL(PORT_E));
@@ -732,7 +732,7 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
@@ -768,15 +768,15 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock;
if (pipe_config->has_dp_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
@@ -820,21 +820,26 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock * 2;
if (pipe_config->has_pch_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
else if (pipe_config->has_dp_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- hsw_ddi_clock_get(encoder, pipe_config);
+ struct drm_device *dev = encoder->base.dev;
+
+ if (INTEL_INFO(dev)->gen <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
+ else
+ skl_ddi_clock_get(encoder, pipe_config);
}
static void
@@ -904,6 +909,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder,
int clock)
{
@@ -918,16 +924,16 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- intel_crtc->new_config->dpll_hw_state.wrpll = val;
+ crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
- intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+ crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
@@ -1090,6 +1096,7 @@ found:
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder,
int clock)
{
@@ -1139,11 +1146,11 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
} else /* eDP */
return true;
- intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
- intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
- intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
@@ -1151,7 +1158,7 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
}
/* shared DPLL id 0 is DPLL 1 */
- intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
+ crtc_state->ddi_pll_sel = pll->id + 1;
return true;
}
@@ -1163,17 +1170,20 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
* For private DPLLs, compute_config() should do the selection for us. This
* function should be folded into compute_config() eventually.
*/
-bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(intel_crtc);
- int clock = intel_crtc->new_config->port_clock;
+ int clock = crtc_state->port_clock;
if (IS_SKYLAKE(dev))
- return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ return skl_ddi_pll_select(intel_crtc, crtc_state,
+ intel_encoder, clock);
else
- return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ return hsw_ddi_pll_select(intel_crtc, crtc_state,
+ intel_encoder, clock);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -1181,13 +1191,13 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
@@ -1212,7 +1222,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@@ -1230,7 +1240,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t temp;
@@ -1239,7 +1249,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@@ -1256,9 +1266,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1269,8 +1279,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev) &&
- (intel_crtc->config.pch_pfit.enabled ||
- intel_crtc->config.pch_pfit.force_thru))
+ (intel_crtc->config->pch_pfit.enabled ||
+ intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1288,14 +1298,14 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
}
if (type == INTEL_OUTPUT_HDMI) {
- if (intel_crtc->config.has_hdmi_sink)
+ if (intel_crtc->config->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
- temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
+ temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
type == INTEL_OUTPUT_EDP) {
@@ -1445,7 +1455,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1455,7 +1465,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1477,7 +1487,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
}
if (IS_SKYLAKE(dev)) {
- uint32_t dpll = crtc->config.ddi_pll_sel;
+ uint32_t dpll = crtc->config->ddi_pll_sel;
uint32_t val;
/*
@@ -1492,7 +1502,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
- val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
+ val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
@@ -1509,8 +1519,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
I915_WRITE(DPLL_CTRL2, val);
} else {
- WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
}
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
@@ -1527,8 +1537,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_hdmi->set_infoframes(encoder,
- crtc->config.has_hdmi_sink,
- &crtc->config.adjusted_mode);
+ crtc->config->has_hdmi_sink,
+ &crtc->config->base.adjusted_mode);
}
}
@@ -1600,9 +1610,10 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
intel_edp_backlight_on(intel_dp);
intel_psr_enable(intel_dp);
+ intel_edp_drrs_enable(intel_dp);
}
- if (intel_crtc->config.has_audio) {
+ if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
intel_audio_codec_enable(intel_encoder);
}
@@ -1617,7 +1628,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_crtc->config.has_audio) {
+ if (intel_crtc->config->has_audio) {
intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
@@ -1625,6 +1636,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_edp_drrs_disable(intel_dp);
intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
@@ -2022,14 +2034,13 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
}
void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
- struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
@@ -2041,7 +2052,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
@@ -2106,10 +2117,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
- if (INTEL_INFO(dev)->gen <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
- else
- skl_ddi_clock_get(encoder, pipe_config);
+ intel_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -2119,7 +2127,7 @@ static void intel_ddi_destroy(struct drm_encoder *encoder)
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
int type = encoder->type;
int port = intel_ddi_get_encoder_port(encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e2af1383b179..3d220a67f865 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,6 +37,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
@@ -76,9 +77,9 @@ static const uint32_t intel_cursor_formats[] = {
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
@@ -95,9 +96,11 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config);
+ const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config);
+ const struct intel_crtc_state *pipe_config);
+static void intel_begin_crtc_commit(struct drm_crtc *crtc);
+static void intel_finish_crtc_commit(struct drm_crtc *crtc);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
@@ -895,7 +898,7 @@ bool intel_crtc_active(struct drm_crtc *crtc)
* properly reconstruct framebuffers.
*/
return intel_crtc->active && crtc->primary->fb &&
- intel_crtc->config.adjusted_mode.crtc_clock;
+ intel_crtc->config->base.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -904,7 +907,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- return intel_crtc->config.cpu_transcoder;
+ return intel_crtc->config->cpu_transcoder;
}
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
@@ -946,7 +949,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1024,7 +1027,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
reg = DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1040,7 +1043,7 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
mutex_unlock(&dev_priv->dpio_lock);
cur_state = val & DSI_PLL_VCO_EN;
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1052,10 +1055,10 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (crtc->config.shared_dpll < 0)
+ if (crtc->config->shared_dpll < 0)
return NULL;
- return &dev_priv->shared_dplls[crtc->config.shared_dpll];
+ return &dev_priv->shared_dplls[crtc->config->shared_dpll];
}
/* For ILK+ */
@@ -1071,7 +1074,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
return;
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
@@ -1095,7 +1098,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
val = I915_READ(reg);
cur_state = !!(val & FDI_TX_ENABLE);
}
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1112,7 +1115,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1135,7 +1138,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
- WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+ I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
@@ -1148,7 +1151,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_PLL_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1190,7 +1193,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
- WARN(panel_pipe == pipe && locked,
+ I915_STATE_WARN(panel_pipe == pipe && locked,
"panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
}
@@ -1206,7 +1209,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
@@ -1236,7 +1239,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
cur_state = !!(val & PIPECONF_ENABLE);
}
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
@@ -1251,7 +1254,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
reg = DSPCNTR(plane);
val = I915_READ(reg);
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"plane %c assertion failure (expected %s, current %s)\n",
plane_name(plane), state_string(state), state_string(cur_state));
}
@@ -1271,7 +1274,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
if (INTEL_INFO(dev)->gen >= 4) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
- WARN(val & DISPLAY_PLANE_ENABLE,
+ I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
plane_name(pipe));
return;
@@ -1283,7 +1286,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
- WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
"plane %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(i), pipe_name(pipe));
}
@@ -1299,7 +1302,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
if (INTEL_INFO(dev)->gen >= 9) {
for_each_sprite(pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
- WARN(val & PLANE_CTL_ENABLE,
+ I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
@@ -1307,20 +1310,20 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
- WARN(val & SP_ENABLE,
+ I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
val = I915_READ(reg);
- WARN(val & SPRITE_ENABLE,
+ I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
} else if (INTEL_INFO(dev)->gen >= 5) {
reg = DVSCNTR(pipe);
val = I915_READ(reg);
- WARN(val & DVS_ENABLE,
+ I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
}
@@ -1328,7 +1331,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
static void assert_vblank_disabled(struct drm_crtc *crtc)
{
- if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
drm_crtc_vblank_put(crtc);
}
@@ -1337,12 +1340,12 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
u32 val;
bool enabled;
- WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
+ I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
- WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+ I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
@@ -1355,7 +1358,7 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
enabled = !!(val & TRANS_ENABLE);
- WARN(enabled,
+ I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
@@ -1435,11 +1438,11 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+ I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
@@ -1448,11 +1451,11 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
@@ -1469,13 +1472,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
reg = PCH_ADPA;
val = I915_READ(reg);
- WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
- WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
@@ -1505,7 +1508,7 @@ static void intel_init_dpio(struct drm_device *dev)
}
static void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1544,7 +1547,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
}
static void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1599,7 +1602,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
- u32 dpll = crtc->config.dpll_hw_state.dpll;
+ u32 dpll = crtc->config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1629,7 +1632,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
- crtc->config.dpll_hw_state.dpll_md);
+ crtc->config->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
@@ -2034,7 +2037,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
else
assert_pll_enabled(dev_priv, pipe);
else {
- if (crtc->config.has_pch_encoder) {
+ if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv,
@@ -2068,7 +2071,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
static void intel_disable_pipe(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
int reg;
u32 val;
@@ -2090,7 +2093,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
* Double wide has implications for planes
* so best keep it disabled when not needed.
*/
- if (crtc->config.double_wide)
+ if (crtc->config->double_wide)
val &= ~PIPECONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
@@ -2165,7 +2168,8 @@ static void intel_disable_primary_hw_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- assert_pipe_enabled(dev_priv, intel_crtc->pipe);
+ if (WARN_ON(!intel_crtc->active))
+ return;
if (!intel_crtc->primary_enabled)
return;
@@ -2185,11 +2189,12 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
-static int intel_align_height(struct drm_device *dev, int height, bool tiled)
+int
+intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
{
int tile_height;
- tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
+ tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
return ALIGN(height, tile_height);
}
@@ -2312,7 +2317,7 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
}
}
-int intel_format_to_fourcc(int format)
+static int i9xx_format_to_fourcc(int format)
{
switch (format) {
case DISPPLANE_8BPP:
@@ -2333,8 +2338,35 @@ int intel_format_to_fourcc(int format)
}
}
-static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+{
+ switch (format) {
+ case PLANE_CTL_FORMAT_RGB_565:
+ return DRM_FORMAT_RGB565;
+ default:
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR8888;
+ else
+ return DRM_FORMAT_XBGR8888;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB8888;
+ else
+ return DRM_FORMAT_XRGB8888;
+ }
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ if (rgb_order)
+ return DRM_FORMAT_XBGR2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
+}
+
+static bool
+intel_alloc_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
@@ -2349,10 +2381,9 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
if (!obj)
return false;
- if (plane_config->tiled) {
- obj->tiling_mode = I915_TILING_X;
+ obj->tiling_mode = plane_config->tiling;
+ if (obj->tiling_mode == I915_TILING_X)
obj->stride = crtc->base.primary->fb->pitches[0];
- }
mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
mode_cmd.width = crtc->base.primary->fb->width;
@@ -2379,8 +2410,9 @@ out_unref_obj:
return false;
}
-static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
- struct intel_plane_config *plane_config)
+static void
+intel_find_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2468,13 +2500,13 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
+ ((intel_crtc->config->pipe_src_h - 1) << 16) |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
+ ((intel_crtc->config->pipe_src_h - 1) << 16) |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PRIMPOS(plane), 0);
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
@@ -2529,17 +2561,17 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
intel_crtc->dspaddr_offset = linear_offset;
}
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
- x += (intel_crtc->config.pipe_src_w - 1);
- y += (intel_crtc->config.pipe_src_h - 1);
+ x += (intel_crtc->config->pipe_src_w - 1);
+ y += (intel_crtc->config->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config.pipe_src_w - 1) * pixel_size;
+ (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
+ (intel_crtc->config->pipe_src_w - 1) * pixel_size;
}
I915_WRITE(reg, dspcntr);
@@ -2631,18 +2663,18 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += (intel_crtc->config.pipe_src_w - 1);
- y += (intel_crtc->config.pipe_src_h - 1);
+ x += (intel_crtc->config->pipe_src_w - 1);
+ y += (intel_crtc->config->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config.pipe_src_w - 1) * pixel_size;
+ (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
+ (intel_crtc->config->pipe_src_w - 1) * pixel_size;
}
}
@@ -2728,7 +2760,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
}
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
@@ -2741,8 +2773,8 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
- (intel_crtc->config.pipe_src_h - 1) << 16 |
- (intel_crtc->config.pipe_src_w - 1));
+ (intel_crtc->config->pipe_src_h - 1) << 16 |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
@@ -2938,85 +2970,20 @@ static void intel_update_pipe_size(struct intel_crtc *crtc)
* then update the pipesrc and pfit state, even on the flip path.
*/
- adjusted_mode = &crtc->config.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
I915_WRITE(PIPESRC(crtc->pipe),
((adjusted_mode->crtc_hdisplay - 1) << 16) |
(adjusted_mode->crtc_vdisplay - 1));
- if (!crtc->config.pch_pfit.enabled &&
+ if (!crtc->config->pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(crtc->pipe), 0);
I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
}
- crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
- crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
-}
-
-static int
-intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
- int ret;
-
- if (intel_crtc_has_pending_flip(crtc)) {
- DRM_ERROR("pipe is still busy with an old pageflip\n");
- return -EBUSY;
- }
-
- /* no fb bound */
- if (!fb) {
- DRM_ERROR("No FB bound\n");
- return 0;
- }
-
- if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
- DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
- plane_name(intel_crtc->plane),
- INTEL_INFO(dev)->num_pipes);
- return -EINVAL;
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, intel_fb_obj(fb),
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret != 0) {
- DRM_ERROR("pin & fence failed\n");
- return ret;
- }
-
- dev_priv->display.update_primary_plane(crtc, fb, x, y);
-
- if (intel_crtc->active)
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
-
- crtc->primary->fb = fb;
- crtc->x = x;
- crtc->y = y;
-
- if (old_fb) {
- if (intel_crtc->active && old_fb != fb)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- mutex_unlock(&dev->struct_mutex);
- }
-
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
+ crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
}
static void intel_fdi_normal_train(struct drm_crtc *crtc)
@@ -3063,7 +3030,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
{
return crtc->base.enabled && crtc->active &&
- crtc->config.has_pch_encoder;
+ crtc->config->has_pch_encoder;
}
static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -3118,7 +3085,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
@@ -3216,7 +3183,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -3367,7 +3334,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
@@ -3455,7 +3422,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
@@ -3639,7 +3606,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
+ int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -3728,7 +3695,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
@@ -3774,7 +3741,7 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
case PIPE_A:
break;
case PIPE_B:
- if (intel_crtc->config.fdi_lanes > 2)
+ if (intel_crtc->config->fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
else
cpt_enable_fdi_bc_bifurcation(dev);
@@ -3826,7 +3793,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp = I915_READ(PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
+ if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
temp |= sel;
else
temp &= ~sel;
@@ -3849,7 +3816,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
+ if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -3890,7 +3857,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
@@ -3920,10 +3887,11 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
WARN_ON(pll->active);
}
- crtc->config.shared_dpll = DPLL_ID_PRIVATE;
+ crtc->config->shared_dpll = DPLL_ID_PRIVATE;
}
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll;
@@ -3949,7 +3917,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
if (pll->new_config->crtc_mask == 0)
continue;
- if (memcmp(&crtc->new_config->dpll_hw_state,
+ if (memcmp(&crtc_state->dpll_hw_state,
&pll->new_config->hw_state,
sizeof(pll->new_config->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
@@ -3974,9 +3942,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
found:
if (pll->new_config->crtc_mask == 0)
- pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
+ pll->new_config->hw_state = crtc_state->dpll_hw_state;
- crtc->new_config->shared_dpll = i;
+ crtc_state->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
@@ -4073,10 +4041,10 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), PS_ENABLE);
- I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
- I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos);
+ I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size);
}
}
@@ -4086,7 +4054,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
@@ -4096,12 +4064,12 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
+ I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
}
}
-static void intel_enable_planes(struct drm_crtc *crtc)
+static void intel_enable_sprite_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -4115,7 +4083,7 @@ static void intel_enable_planes(struct drm_crtc *crtc)
}
}
-static void intel_disable_planes(struct drm_crtc *crtc)
+static void intel_disable_sprite_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -4125,7 +4093,7 @@ static void intel_disable_planes(struct drm_crtc *crtc)
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
- intel_plane_disable(&intel_plane->base);
+ plane->funcs->disable_plane(plane);
}
}
@@ -4134,7 +4102,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.ips_enabled)
+ if (!crtc->config->ips_enabled)
return;
/* We can only enable IPS after we enable a plane and wait for a vblank */
@@ -4167,7 +4135,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.ips_enabled)
+ if (!crtc->config->ips_enabled)
return;
assert_plane_enabled(dev_priv, crtc->plane);
@@ -4216,7 +4184,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
+ if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
@@ -4259,14 +4227,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
intel_enable_primary_hw_plane(crtc->primary, crtc);
- intel_enable_planes(crtc);
+ intel_enable_sprite_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
/*
@@ -4288,13 +4256,13 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
intel_crtc_wait_for_pending_flips(crtc);
if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
hsw_disable_ips(intel_crtc);
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
- intel_disable_planes(crtc);
+ intel_disable_sprite_planes(crtc);
intel_disable_primary_hw_plane(crtc->primary, crtc);
/*
@@ -4318,17 +4286,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n, NULL);
+ &intel_crtc->config->fdi_m_n, NULL);
}
ironlake_set_pipeconf(crtc);
@@ -4342,7 +4310,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
@@ -4363,18 +4331,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
ironlake_pch_enable(crtc);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
- assert_vblank_disabled(crtc);
- drm_crtc_vblank_on(crtc);
-
intel_crtc_enable_planes(crtc);
}
@@ -4429,19 +4397,19 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_enable_shared_dpll(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
- I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
- intel_crtc->config.pixel_multiplier - 1);
+ if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
+ I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
+ intel_crtc->config->pixel_multiplier - 1);
}
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n, NULL);
+ &intel_crtc->config->fdi_m_n, NULL);
}
haswell_set_pipeconf(crtc);
@@ -4455,7 +4423,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
dev_priv->display.fdi_link_train(crtc);
@@ -4480,20 +4448,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config.dp_encoder_is_mst)
+ if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, true);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
}
- assert_vblank_disabled(crtc);
- drm_crtc_vblank_on(crtc);
-
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
@@ -4508,7 +4476,7 @@ static void skylake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), 0);
I915_WRITE(PS_WIN_POS(pipe), 0);
I915_WRITE(PS_WIN_SZ(pipe), 0);
@@ -4523,7 +4491,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -4544,13 +4512,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_disable_planes(crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
- if (intel_crtc->config.has_pch_encoder)
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
+ if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_disable_pipe(intel_crtc);
@@ -4561,7 +4529,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
ironlake_fdi_disable(crtc);
ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -4591,7 +4559,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4601,27 +4569,27 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (!intel_crtc->active)
return;
intel_crtc_disable_planes(crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
}
- if (intel_crtc->config.has_pch_encoder)
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
+ if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
intel_disable_pipe(intel_crtc);
- if (intel_crtc->config.dp_encoder_is_mst)
+ if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
@@ -4633,7 +4601,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_pipe_clock(intel_crtc);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_ddi_fdi_disable(crtc);
}
@@ -4646,7 +4614,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -4664,9 +4632,9 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc_config *pipe_config = &crtc->config;
+ struct intel_crtc_state *pipe_config = crtc->config;
- if (!crtc->config.gmch_pfit.control)
+ if (!pipe_config->gmch_pfit.control)
return;
/*
@@ -4745,8 +4713,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (intel_crtc->config.pch_pfit.enabled ||
- intel_crtc->config.pch_pfit.force_thru)
+ if (intel_crtc->config->pch_pfit.enabled ||
+ intel_crtc->config->pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4909,7 +4877,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
cmd = 0;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(cdclk);
return;
}
@@ -4970,7 +4938,7 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->new_enabled)
max_pixclk = max(max_pixclk,
- intel_crtc->new_config->adjusted_mode.crtc_clock);
+ intel_crtc->new_config->base.adjusted_mode.crtc_clock);
}
return max_pixclk;
@@ -5038,12 +5006,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_prepare_pll(intel_crtc, &intel_crtc->config);
+ chv_prepare_pll(intel_crtc, intel_crtc->config);
else
- vlv_prepare_pll(intel_crtc, &intel_crtc->config);
+ vlv_prepare_pll(intel_crtc, intel_crtc->config);
}
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
@@ -5067,9 +5035,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_enable_pll(intel_crtc, &intel_crtc->config);
+ chv_enable_pll(intel_crtc, intel_crtc->config);
else
- vlv_enable_pll(intel_crtc, &intel_crtc->config);
+ vlv_enable_pll(intel_crtc, intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5083,12 +5051,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
-
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
intel_crtc_enable_planes(crtc);
/* Underruns don't raise interrupts, so check manually. */
@@ -5100,8 +5068,8 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
+ I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -5119,7 +5087,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
@@ -5144,12 +5112,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
-
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
intel_crtc_enable_planes(crtc);
/*
@@ -5171,7 +5139,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.gmch_pfit.control)
+ if (!crtc->config->gmch_pfit.control)
return;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -5221,12 +5189,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
intel_disable_pipe(intel_crtc);
i9xx_pfit_disable(intel_crtc);
@@ -5251,7 +5219,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -5309,8 +5277,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
@@ -5318,14 +5284,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
- if (crtc->primary->fb) {
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- i915_gem_track_fb(old_obj, NULL,
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- crtc->primary->fb = NULL;
- }
+ crtc->primary->funcs->disable_plane(crtc->primary);
/* Update computed state. */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -5382,25 +5341,25 @@ static void intel_connector_check_state(struct intel_connector *connector)
if (connector->mst_port)
return;
- WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
- WARN(connector->base.encoder != &encoder->base,
+ I915_STATE_WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
if (encoder) {
- WARN(!encoder->connectors_active,
+ I915_STATE_WARN(!encoder->connectors_active,
"encoder->connectors_active not set\n");
encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- WARN(!encoder_enabled, "encoder not enabled\n");
- if (WARN_ON(!encoder->base.crtc))
+ I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
+ if (I915_STATE_WARN_ON(!encoder->base.crtc))
return;
crtc = encoder->base.crtc;
- WARN(!crtc->enabled, "crtc not enabled\n");
- WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- WARN(pipe != to_intel_crtc(crtc)->pipe,
+ I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n");
+ I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n");
}
}
@@ -5438,7 +5397,7 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
}
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
@@ -5479,7 +5438,7 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return true;
case PIPE_C:
if (!pipe_has_enabled_pch(pipe_B_crtc) ||
- pipe_B_crtc->config.fdi_lanes <= 2) {
+ pipe_B_crtc->config->fdi_lanes <= 2) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
@@ -5497,10 +5456,10 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
#define RETRY 1
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int lane, link_bw, fdi_dotclock;
bool setup_ok, needs_recompute = false;
@@ -5543,7 +5502,7 @@ retry:
}
static void hsw_compute_ips_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
@@ -5551,11 +5510,11 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
@@ -5800,30 +5759,31 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
}
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
u32 fp, fp2 = 0;
if (IS_PINEVIEW(dev)) {
- fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
+ fp = pnv_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
- fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
- crtc->new_config->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
- crtc->new_config->dpll_hw_state.fp1 = fp2;
+ crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
- crtc->new_config->dpll_hw_state.fp1 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp;
}
}
@@ -5876,7 +5836,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- enum transcoder transcoder = crtc->config.cpu_transcoder;
+ enum transcoder transcoder = crtc->config->cpu_transcoder;
if (INTEL_INFO(dev)->gen >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -5888,7 +5848,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* registers are not unnecessarily accessed).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
- crtc->config.has_drrs) {
+ crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -5905,15 +5865,15 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(struct intel_crtc *crtc)
{
- if (crtc->config.has_pch_encoder)
- intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+ if (crtc->config->has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
- &crtc->config.dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n,
+ &crtc->config->dp_m2_n2);
}
static void vlv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
u32 dpll, dpll_md;
@@ -5936,7 +5896,7 @@ static void vlv_update_pll(struct intel_crtc *crtc,
}
static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5997,7 +5957,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (crtc->config.has_dp_encoder) {
+ if (pipe_config->has_dp_encoder) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -6027,7 +5987,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
}
static void chv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
@@ -6040,7 +6000,7 @@ static void chv_update_pll(struct intel_crtc *crtc,
}
static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6125,7 +6085,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
{
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
- struct intel_crtc_config pipe_config = {
+ struct intel_crtc_state pipe_config = {
.pixel_multiplier = 1,
.dpll = *dpll,
};
@@ -6158,6 +6118,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
}
static void i9xx_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock,
int num_connectors)
{
@@ -6165,9 +6126,9 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
bool is_sdvo;
- struct dpll *clock = &crtc->new_config->dpll;
+ struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, reduced_clock);
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
@@ -6180,14 +6141,14 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= DPLLB_MODE_DAC_SERIAL;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
- dpll |= (crtc->new_config->pixel_multiplier - 1)
+ dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (crtc->new_config->has_dp_encoder)
+ if (crtc_state->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -6215,7 +6176,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (crtc->new_config->sdvo_tv_clock)
+ if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
@@ -6224,25 +6185,26 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
- u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
+ u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
+ crtc_state->dpll_hw_state.dpll_md = dpll_md;
}
}
static void i8xx_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
- struct dpll *clock = &crtc->new_config->dpll;
+ struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, reduced_clock);
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
@@ -6267,7 +6229,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -6275,9 +6237,9 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
uint32_t crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -6335,12 +6297,12 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
- ((intel_crtc->config.pipe_src_w - 1) << 16) |
- (intel_crtc->config.pipe_src_h - 1));
+ ((intel_crtc->config->pipe_src_w - 1) << 16) |
+ (intel_crtc->config->pipe_src_h - 1));
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6348,56 +6310,56 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
uint32_t tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HBLANK(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HSYNC(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VTOTAL(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VBLANK(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VSYNC(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->adjusted_mode.crtc_vtotal += 1;
- pipe_config->adjusted_mode.crtc_vblank_end += 1;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->base.adjusted_mode.crtc_vtotal += 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
}
tmp = I915_READ(PIPESRC(crtc->pipe));
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
- pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
- pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
+ pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
}
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
- mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
- mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
- mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+ mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
+ mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
+ mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
+ mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
- mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
- mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
- mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
- mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+ mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
+ mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
+ mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
- mode->flags = pipe_config->adjusted_mode.flags;
+ mode->flags = pipe_config->base.adjusted_mode.flags;
- mode->clock = pipe_config->adjusted_mode.crtc_clock;
- mode->flags |= pipe_config->adjusted_mode.flags;
+ mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+ mode->flags |= pipe_config->base.adjusted_mode.flags;
}
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -6412,17 +6374,17 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
(intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
- if (intel_crtc->config.double_wide)
+ if (intel_crtc->config->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
- if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
+ if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
@@ -6447,7 +6409,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
@@ -6456,14 +6418,15 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
} else
pipeconf |= PIPECONF_PROGRESSIVE;
- if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
+ if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6495,7 +6458,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
if (is_dsi)
return 0;
- if (!crtc->new_config->clock_set) {
+ if (!crtc_state->clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
/*
@@ -6506,7 +6469,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
- crtc->new_config->port_clock,
+ crtc_state->port_clock,
refclk, NULL, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6527,23 +6490,23 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
- crtc->new_config->dpll.n = clock.n;
- crtc->new_config->dpll.m1 = clock.m1;
- crtc->new_config->dpll.m2 = clock.m2;
- crtc->new_config->dpll.p1 = clock.p1;
- crtc->new_config->dpll.p2 = clock.p2;
+ crtc_state->dpll.n = clock.n;
+ crtc_state->dpll.m1 = clock.m1;
+ crtc_state->dpll.m2 = clock.m2;
+ crtc_state->dpll.p1 = clock.p1;
+ crtc_state->dpll.p2 = clock.p2;
}
if (IS_GEN2(dev)) {
- i8xx_update_pll(crtc,
+ i8xx_update_pll(crtc, crtc_state,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(crtc, crtc->new_config);
+ chv_update_pll(crtc, crtc_state);
} else if (IS_VALLEYVIEW(dev)) {
- vlv_update_pll(crtc, crtc->new_config);
+ vlv_update_pll(crtc, crtc_state);
} else {
- i9xx_update_pll(crtc,
+ i9xx_update_pll(crtc, crtc_state,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
}
@@ -6552,7 +6515,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
}
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6582,7 +6545,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6611,8 +6574,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = clock.dot / 5;
}
-static void i9xx_get_plane_config(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6620,27 +6584,30 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
- crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
- if (!crtc->base.primary->fb) {
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
+ fb = &intel_fb->base;
+
val = I915_READ(DSPCNTR(plane));
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
- plane_config->tiled = true;
+ plane_config->tiling = I915_TILING_X;
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = intel_format_to_fourcc(pixel_format);
- crtc->base.primary->fb->pixel_format = fourcc;
- crtc->base.primary->fb->bits_per_pixel =
- drm_format_plane_cpp(fourcc, 0) * 8;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
if (INTEL_INFO(dev)->gen >= 4) {
- if (plane_config->tiled)
+ if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
offset = I915_READ(DSPLINOFF(plane));
@@ -6651,29 +6618,27 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
plane_config->base = base;
val = I915_READ(PIPESRC(pipe));
- crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
- crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+ fb->width = ((val >> 16) & 0xfff) + 1;
+ fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
+ fb->pitches[0] = val & 0xffffffc0;
- aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
- plane_config->tiled);
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
- plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height);
+ plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
- DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe, plane, crtc->base.primary->fb->width,
- crtc->base.primary->fb->height,
- crtc->base.primary->fb->bits_per_pixel, base,
- crtc->base.primary->fb->pitches[0],
+ DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), plane, fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
+ crtc->base.primary->fb = fb;
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6703,7 +6668,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
}
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7183,7 +7148,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
val = 0;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
break;
@@ -7201,15 +7166,15 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.dither)
+ if (intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(pipe), val);
@@ -7238,7 +7203,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
* consideration.
*/
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
/*
@@ -7262,7 +7227,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
if (INTEL_INFO(dev)->gen > 6) {
uint16_t postoff = 0;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -7273,7 +7238,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -7286,15 +7251,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t val;
val = 0;
- if (IS_HASWELL(dev) && intel_crtc->config.dither)
+ if (IS_HASWELL(dev) && intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -7308,7 +7273,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
val = 0;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
val |= PIPEMISC_DITHER_6_BPC;
break;
@@ -7326,7 +7291,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.dither)
+ if (intel_crtc->config->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
I915_WRITE(PIPEMISC(pipe), val);
@@ -7334,6 +7299,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
}
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *clock,
bool *has_reduced_clock,
intel_clock_t *reduced_clock)
@@ -7356,7 +7322,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
*/
limit = intel_limit(intel_crtc, refclk);
ret = dev_priv->display.find_dpll(limit, intel_crtc,
- intel_crtc->new_config->port_clock,
+ crtc_state->port_clock,
refclk, NULL, clock);
if (!ret)
return false;
@@ -7395,6 +7361,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
}
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
u32 *fp,
intel_clock_t *reduced_clock, u32 *fp2)
{
@@ -7432,10 +7399,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
- } else if (intel_crtc->new_config->sdvo_tv_clock)
+ } else if (crtc_state->sdvo_tv_clock)
factor = 20;
- if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
+ if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
*fp |= FP_CB_TUNE;
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -7448,20 +7415,20 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
+ dpll |= (crtc_state->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_crtc->new_config->has_dp_encoder)
+ if (crtc_state->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (intel_crtc->new_config->dpll.p2) {
+ switch (crtc_state->dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -7484,7 +7451,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
return dpll | DPLL_VCO_ENABLE;
}
-static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock, reduced_clock;
@@ -7498,39 +7466,39 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
- ok = ironlake_compute_clocks(&crtc->base, &clock,
+ ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
&has_reduced_clock, &reduced_clock);
- if (!ok && !crtc->new_config->clock_set) {
+ if (!ok && !crtc_state->clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Compat-code for transition, will disappear. */
- if (!crtc->new_config->clock_set) {
- crtc->new_config->dpll.n = clock.n;
- crtc->new_config->dpll.m1 = clock.m1;
- crtc->new_config->dpll.m2 = clock.m2;
- crtc->new_config->dpll.p1 = clock.p1;
- crtc->new_config->dpll.p2 = clock.p2;
+ if (!crtc_state->clock_set) {
+ crtc_state->dpll.n = clock.n;
+ crtc_state->dpll.m1 = clock.m1;
+ crtc_state->dpll.m2 = clock.m2;
+ crtc_state->dpll.p1 = clock.p1;
+ crtc_state->dpll.p2 = clock.p2;
}
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (crtc->new_config->has_pch_encoder) {
- fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
+ if (crtc_state->has_pch_encoder) {
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
if (has_reduced_clock)
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
- dpll = ironlake_compute_dpll(crtc,
+ dpll = ironlake_compute_dpll(crtc, crtc_state,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
- crtc->new_config->dpll_hw_state.dpll = dpll;
- crtc->new_config->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
- crtc->new_config->dpll_hw_state.fp1 = fp2;
+ crtc_state->dpll_hw_state.fp1 = fp2;
else
- crtc->new_config->dpll_hw_state.fp1 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(crtc);
+ pll = intel_get_shared_dpll(crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
@@ -7584,7 +7552,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
* registers are not unnecessarily read).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
- crtc->config.has_drrs) {
+ crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -7605,9 +7573,9 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
}
void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- if (crtc->config.has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
else
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
@@ -7616,14 +7584,14 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
}
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
}
static void skylake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7638,8 +7606,80 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
}
}
+static void
+skylake_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, base, offset, stride_mult;
+ int pipe = crtc->pipe;
+ int fourcc, pixel_format;
+ int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ DRM_DEBUG_KMS("failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ val = I915_READ(PLANE_CTL(pipe, 0));
+ if (val & PLANE_CTL_TILED_MASK)
+ plane_config->tiling = I915_TILING_X;
+
+ pixel_format = val & PLANE_CTL_FORMAT_MASK;
+ fourcc = skl_format_to_fourcc(pixel_format,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+
+ base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
+ plane_config->base = base;
+
+ offset = I915_READ(PLANE_OFFSET(pipe, 0));
+
+ val = I915_READ(PLANE_SIZE(pipe, 0));
+ fb->height = ((val >> 16) & 0xfff) + 1;
+ fb->width = ((val >> 0) & 0x1fff) + 1;
+
+ val = I915_READ(PLANE_STRIDE(pipe, 0));
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ stride_mult = 64;
+ break;
+ case I915_TILING_X:
+ stride_mult = 512;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto error;
+ }
+ fb->pitches[0] = (val & 0x3ff) * stride_mult;
+
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
+
+ plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
+ plane_config->size);
+
+ crtc->base.primary->fb = fb;
+ return;
+
+error:
+ kfree(fb);
+}
+
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7662,68 +7702,71 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
-static void ironlake_get_plane_config(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static void
+ironlake_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, base, offset;
- int pipe = crtc->pipe, plane = crtc->plane;
+ int pipe = crtc->pipe;
int fourcc, pixel_format;
int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
- crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
- if (!crtc->base.primary->fb) {
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
- val = I915_READ(DSPCNTR(plane));
+ fb = &intel_fb->base;
+
+ val = I915_READ(DSPCNTR(pipe));
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
- plane_config->tiled = true;
+ plane_config->tiling = I915_TILING_X;
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = intel_format_to_fourcc(pixel_format);
- crtc->base.primary->fb->pixel_format = fourcc;
- crtc->base.primary->fb->bits_per_pixel =
- drm_format_plane_cpp(fourcc, 0) * 8;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
- base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- offset = I915_READ(DSPOFFSET(plane));
+ offset = I915_READ(DSPOFFSET(pipe));
} else {
- if (plane_config->tiled)
- offset = I915_READ(DSPTILEOFF(plane));
+ if (plane_config->tiling)
+ offset = I915_READ(DSPTILEOFF(pipe));
else
- offset = I915_READ(DSPLINOFF(plane));
+ offset = I915_READ(DSPLINOFF(pipe));
}
plane_config->base = base;
val = I915_READ(PIPESRC(pipe));
- crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
- crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+ fb->width = ((val >> 16) & 0xfff) + 1;
+ fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
+ fb->pitches[0] = val & 0xffffffc0;
- aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
- plane_config->tiled);
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
- plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height);
+ plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
- DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe, plane, crtc->base.primary->fb->width,
- crtc->base.primary->fb->height,
- crtc->base.primary->fb->bits_per_pixel, base,
- crtc->base.primary->fb->pitches[0],
+ DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
+
+ crtc->base.primary->fb = fb;
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7810,24 +7853,24 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
- WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
- WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
- WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
- WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
- WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
- WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev))
- WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
- WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
- WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
- WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
/*
* In theory we can still leave IRQs enabled, as long as only the HPD
@@ -7835,7 +7878,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
- WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+ I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
@@ -7933,19 +7976,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/*
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
- *
- * The other problem is that hsw_restore_lcpll() is called as part of
- * the runtime PM resume sequence, so we can't just call
- * gen6_gt_force_wake_get() because that function calls
- * intel_runtime_pm_get(), and we can't change the runtime PM refcount
- * while we are on the resume sequence. So to solve this problem we have
- * to call special forcewake code that doesn't touch runtime PM and
- * doesn't enable the forcewake delayed work.
*/
- spin_lock_irq(&dev_priv->uncore.lock);
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7975,11 +8007,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- /* See the big comment above. */
- spin_lock_irq(&dev_priv->uncore.lock);
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
/*
@@ -8041,9 +8069,10 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
- if (!intel_ddi_pll_select(crtc))
+ if (!intel_ddi_pll_select(crtc, crtc_state))
return -EINVAL;
crtc->lowfreq_avail = false;
@@ -8053,14 +8082,23 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- u32 temp;
+ u32 temp, dpll_ctl1;
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
switch (pipe_config->ddi_pll_sel) {
+ case SKL_DPLL0:
+ /*
+ * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
+ * of the shared DPLL framework and thus needs to be read out
+ * separately
+ */
+ dpll_ctl1 = I915_READ(DPLL_CTRL1);
+ pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
+ break;
case SKL_DPLL1:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
break;
@@ -8075,7 +8113,7 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
@@ -8090,7 +8128,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
}
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8132,7 +8170,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
}
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8286,7 +8324,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(intel_crtc->cursor_width);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
@@ -8295,7 +8333,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_PIPE_CSC_ENABLE;
}
- if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
+ if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
cntl |= CURSOR_ROTATE_180;
if (intel_crtc->cursor_cntl != cntl) {
@@ -8326,10 +8364,10 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (on)
base = intel_crtc->cursor_addr;
- if (x >= intel_crtc->config.pipe_src_w)
+ if (x >= intel_crtc->config->pipe_src_w)
base = 0;
- if (y >= intel_crtc->config.pipe_src_h)
+ if (y >= intel_crtc->config->pipe_src_h)
base = 0;
if (x < 0) {
@@ -8357,7 +8395,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
- to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
+ crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
base += (intel_crtc->cursor_height *
intel_crtc->cursor_width - 1) * 4;
}
@@ -8405,109 +8443,6 @@ static bool cursor_size_ok(struct drm_device *dev,
return true;
}
-static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
- struct drm_i915_gem_object *obj,
- uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- unsigned old_width;
- uint32_t addr;
- int ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!obj) {
- DRM_DEBUG_KMS("cursor off\n");
- addr = 0;
- mutex_lock(&dev->struct_mutex);
- goto finish;
- }
-
- /* we only need to pin inside GTT if cursor is non-phy */
- mutex_lock(&dev->struct_mutex);
- if (!INTEL_INFO(dev)->cursor_needs_physical) {
- unsigned alignment;
-
- /*
- * Global gtt pte registers are special registers which actually
- * forward writes to a chunk of system memory. Which means that
- * there is no risk that the register values disappear as soon
- * as we call intel_runtime_pm_put(), so it is correct to wrap
- * only the pin/unpin/fence and not more.
- */
- intel_runtime_pm_get(dev_priv);
-
- /* Note that the w/a also requires 2 PTE of padding following
- * the bo. We currently fill all unused PTE with the shadow
- * page and so we should always have valid PTE following the
- * cursor preventing the VT-d warning.
- */
- alignment = 0;
- if (need_vtd_wa(dev))
- alignment = 64*1024;
-
- ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
- if (ret) {
- DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
- intel_runtime_pm_put(dev_priv);
- goto fail_locked;
- }
-
- ret = i915_gem_object_put_fence(obj);
- if (ret) {
- DRM_DEBUG_KMS("failed to release fence for cursor");
- intel_runtime_pm_put(dev_priv);
- goto fail_unpin;
- }
-
- addr = i915_gem_obj_ggtt_offset(obj);
-
- intel_runtime_pm_put(dev_priv);
- } else {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_object_attach_phys(obj, align);
- if (ret) {
- DRM_DEBUG_KMS("failed to attach phys object\n");
- goto fail_locked;
- }
- addr = obj->phys_handle->busaddr;
- }
-
- finish:
- if (intel_crtc->cursor_bo) {
- if (!INTEL_INFO(dev)->cursor_needs_physical)
- i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
- }
-
- i915_gem_track_fb(intel_crtc->cursor_bo, obj,
- INTEL_FRONTBUFFER_CURSOR(pipe));
- mutex_unlock(&dev->struct_mutex);
-
- old_width = intel_crtc->cursor_width;
-
- intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = obj;
- intel_crtc->cursor_width = width;
- intel_crtc->cursor_height = height;
-
- if (intel_crtc->active) {
- if (old_width != width)
- intel_update_watermarks(crtc);
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
- }
-
- return 0;
-fail_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
-fail_locked:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -8730,7 +8665,7 @@ retry:
intel_crtc = to_intel_crtc(crtc);
intel_crtc->new_enabled = true;
- intel_crtc->new_config = &intel_crtc->config;
+ intel_crtc->new_config = intel_crtc->config;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
@@ -8771,7 +8706,7 @@ retry:
fail:
intel_crtc->new_enabled = crtc->enabled;
if (intel_crtc->new_enabled)
- intel_crtc->new_config = &intel_crtc->config;
+ intel_crtc->new_config = intel_crtc->config;
else
intel_crtc->new_config = NULL;
fail_unlock:
@@ -8817,7 +8752,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
}
static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll = pipe_config->dpll_hw_state.dpll;
@@ -8834,7 +8769,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
/* Returns the clock of the currently programmed mode of the given pipe. */
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8941,7 +8876,7 @@ int intel_dotclock_calculate(int link_freq,
}
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
@@ -8954,7 +8889,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
* agree once we know their relationship in the encoder's
* get_config() function.
*/
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
&pipe_config->fdi_m_n);
}
@@ -8965,9 +8900,9 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
- struct intel_crtc_config pipe_config;
+ struct intel_crtc_state pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -9082,6 +9017,14 @@ out:
intel_runtime_pm_put(dev_priv);
}
+static void intel_crtc_set_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ kfree(crtc->config);
+ crtc->config = crtc_state;
+ crtc->base.state = &crtc_state->base;
+}
+
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -9098,6 +9041,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
+ intel_crtc_set_state(intel_crtc, NULL);
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -9115,7 +9059,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
+
+ if (work->flip_queued_req)
+ i915_gem_request_assign(&work->flip_queued_req, NULL);
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
@@ -9511,25 +9458,53 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
else if (i915.enable_execlists)
return true;
else
- return ring != obj->ring;
+ return ring != i915_gem_request_get_ring(obj->last_read_req);
}
-static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ const enum pipe pipe = intel_crtc->pipe;
+ u32 ctl, stride;
+
+ ctl = I915_READ(PLANE_CTL(pipe, 0));
+ ctl &= ~PLANE_CTL_TILED_MASK;
+ if (obj->tiling_mode == I915_TILING_X)
+ ctl |= PLANE_CTL_TILED_X;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ stride = fb->pitches[0] >> 6;
+ if (obj->tiling_mode == I915_TILING_X)
+ stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */
+
+ /*
+ * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
+ * PLANE_SURF updates, the update is then guaranteed to be atomic.
+ */
+ I915_WRITE(PLANE_CTL(pipe, 0), ctl);
+ I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+
+ I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
+static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- bool atomic_update;
- u32 start_vbl_count;
u32 dspcntr;
u32 reg;
- intel_mark_page_flip_active(intel_crtc);
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
@@ -9544,26 +9519,50 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
intel_crtc->unpin_work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+/*
+ * XXX: This is the temporary way to update the plane registers until we get
+ * around to using the usual plane update functions for MMIO flips
+ */
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ bool atomic_update;
+ u32 start_vbl_count;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ skl_do_mmio_flip(intel_crtc);
+ else
+ /* use_mmio_flip() retricts MMIO flips to ilk+ */
+ ilk_do_mmio_flip(intel_crtc);
+
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void intel_mmio_flip_work_func(struct work_struct *work)
{
- struct intel_crtc *intel_crtc =
+ struct intel_crtc *crtc =
container_of(work, struct intel_crtc, mmio_flip.work);
- struct intel_engine_cs *ring;
- uint32_t seqno;
+ struct intel_mmio_flip *mmio_flip;
- seqno = intel_crtc->mmio_flip.seqno;
- ring = intel_crtc->mmio_flip.ring;
+ mmio_flip = &crtc->mmio_flip;
+ if (mmio_flip->req)
+ WARN_ON(__i915_wait_request(mmio_flip->req,
+ crtc->reset_counter,
+ false, NULL, NULL) != 0);
- if (seqno)
- WARN_ON(__i915_wait_seqno(ring, seqno,
- intel_crtc->reset_counter,
- false, NULL, NULL) != 0);
-
- intel_do_mmio_flip(intel_crtc);
+ intel_do_mmio_flip(crtc);
+ if (mmio_flip->req) {
+ mutex_lock(&crtc->base.dev->struct_mutex);
+ i915_gem_request_assign(&mmio_flip->req, NULL);
+ mutex_unlock(&crtc->base.dev->struct_mutex);
+ }
}
static int intel_queue_mmio_flip(struct drm_device *dev,
@@ -9575,8 +9574,8 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
- intel_crtc->mmio_flip.ring = obj->ring;
+ i915_gem_request_assign(&intel_crtc->mmio_flip.req,
+ obj->last_write_req);
schedule_work(&intel_crtc->mmio_flip.work);
@@ -9671,9 +9670,8 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
return false;
if (work->flip_ready_vblank == 0) {
- if (work->flip_queued_ring &&
- !i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- work->flip_queued_seqno))
+ if (work->flip_queued_req &&
+ !i915_gem_request_completed(work->flip_queued_req, true))
return false;
work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
@@ -9726,6 +9724,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
@@ -9815,10 +9814,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = obj->ring;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
} else {
@@ -9838,16 +9837,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_unpin;
- work->flip_queued_seqno = obj->last_write_seqno;
- work->flip_queued_ring = obj->ring;
+ i915_gem_request_assign(&work->flip_queued_req,
+ obj->last_write_req);
} else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
if (ret)
goto cleanup_unpin;
- work->flip_queued_seqno = intel_ring_get_seqno(ring);
- work->flip_queued_ring = ring;
+ i915_gem_request_assign(&work->flip_queued_req,
+ intel_ring_get_request(ring));
}
work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
@@ -9856,7 +9855,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_gem_track_fb(work->old_fb_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
@@ -9884,8 +9883,7 @@ free_work:
if (ret == -EIO) {
out_hang:
- intel_crtc_wait_for_pending_flips(crtc);
- ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
+ ret = intel_plane_restore(primary);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
@@ -9898,6 +9896,8 @@ out_hang:
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
+ .atomic_begin = intel_begin_crtc_commit,
+ .atomic_flush = intel_finish_crtc_commit,
};
/**
@@ -9927,7 +9927,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
crtc->new_enabled = crtc->base.enabled;
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -9960,7 +9960,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
static void
connected_sink_compute_bpp(struct intel_connector *connector,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
int bpp = pipe_config->pipe_bpp;
@@ -9987,7 +9987,7 @@ connected_sink_compute_bpp(struct intel_connector *connector,
static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct intel_connector *connector;
@@ -10056,7 +10056,7 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
}
static void intel_dump_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
const char *context)
{
DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
@@ -10090,10 +10090,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->requested_mode);
+ drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+ drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
DRM_DEBUG_KMS("pipe src size: %dx%d\n",
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
@@ -10192,14 +10192,14 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
return true;
}
-static struct intel_crtc_config *
+static struct intel_crtc_state *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
int plane_bpp, ret = -EINVAL;
bool retry = true;
@@ -10217,8 +10217,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (!pipe_config)
return ERR_PTR(-ENOMEM);
- drm_mode_copy(&pipe_config->adjusted_mode, mode);
- drm_mode_copy(&pipe_config->requested_mode, mode);
+ drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
+ drm_mode_copy(&pipe_config->base.mode, mode);
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
@@ -10229,13 +10229,13 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
- if (!(pipe_config->adjusted_mode.flags &
+ if (!(pipe_config->base.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
- if (!(pipe_config->adjusted_mode.flags &
+ if (!(pipe_config->base.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
@@ -10254,9 +10254,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
- pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
- pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+ drm_crtc_get_hv_timing(&pipe_config->base.mode,
+ &pipe_config->pipe_src_w,
+ &pipe_config->pipe_src_h);
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
@@ -10264,7 +10264,8 @@ encoder_retry:
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
+ drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+ CRTC_STEREO_DOUBLE);
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
@@ -10284,7 +10285,7 @@ encoder_retry:
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+ pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
@@ -10441,7 +10442,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
for_each_intel_crtc(dev, intel_crtc) {
WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
- intel_crtc->new_config != &intel_crtc->config);
+ intel_crtc->new_config != intel_crtc->config);
WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
}
@@ -10493,8 +10494,8 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
static bool
intel_pipe_config_compare(struct drm_device *dev,
- struct intel_crtc_config *current_config,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *current_config,
+ struct intel_crtc_state *pipe_config)
{
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
@@ -10585,19 +10586,19 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
}
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
@@ -10608,17 +10609,17 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(has_audio);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
@@ -10668,7 +10669,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
#undef PIPE_CONF_CHECK_X
@@ -10742,7 +10743,7 @@ check_connector_state(struct drm_device *dev)
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
- WARN(&connector->new_encoder->base != connector->base.encoder,
+ I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
"connector's staged encoder doesn't match current encoder\n");
}
}
@@ -10762,9 +10763,9 @@ check_encoder_state(struct drm_device *dev)
encoder->base.base.id,
encoder->base.name);
- WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
"encoder's stage crtc doesn't match current crtc\n");
- WARN(encoder->connectors_active && !encoder->base.crtc,
+ I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10783,19 +10784,19 @@ check_encoder_state(struct drm_device *dev)
if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
continue;
- WARN(!!encoder->base.crtc != enabled,
+ I915_STATE_WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
!!encoder->base.crtc, enabled);
- WARN(active && !encoder->base.crtc,
+ I915_STATE_WARN(active && !encoder->base.crtc,
"active encoder with no crtc\n");
- WARN(encoder->connectors_active != active,
+ I915_STATE_WARN(encoder->connectors_active != active,
"encoder's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, encoder->connectors_active);
active = encoder->get_hw_state(encoder, &pipe);
- WARN(active != encoder->connectors_active,
+ I915_STATE_WARN(active != encoder->connectors_active,
"encoder's hw state doesn't match sw tracking "
"(expected %i, found %i)\n",
encoder->connectors_active, active);
@@ -10804,7 +10805,7 @@ check_encoder_state(struct drm_device *dev)
continue;
tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- WARN(active && pipe != tracked_pipe,
+ I915_STATE_WARN(active && pipe != tracked_pipe,
"active encoder's pipe doesn't match"
"(expected %i, found %i)\n",
tracked_pipe, pipe);
@@ -10818,7 +10819,7 @@ check_crtc_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
- struct intel_crtc_config pipe_config;
+ struct intel_crtc_state pipe_config;
for_each_intel_crtc(dev, crtc) {
bool enabled = false;
@@ -10829,7 +10830,7 @@ check_crtc_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
- WARN(crtc->active && !crtc->base.enabled,
+ I915_STATE_WARN(crtc->active && !crtc->base.enabled,
"active crtc, but not enabled in sw tracking\n");
for_each_intel_encoder(dev, encoder) {
@@ -10840,10 +10841,10 @@ check_crtc_state(struct drm_device *dev)
active = true;
}
- WARN(active != crtc->active,
+ I915_STATE_WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
- WARN(enabled != crtc->base.enabled,
+ I915_STATE_WARN(enabled != crtc->base.enabled,
"crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled);
@@ -10863,16 +10864,16 @@ check_crtc_state(struct drm_device *dev)
encoder->get_config(encoder, &pipe_config);
}
- WARN(crtc->active != active,
+ I915_STATE_WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
if (active &&
- !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
- WARN(1, "pipe state doesn't match!\n");
+ !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
+ I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(crtc, &pipe_config,
"[hw state]");
- intel_dump_pipe_config(crtc, &crtc->config,
+ intel_dump_pipe_config(crtc, crtc->config,
"[sw state]");
}
}
@@ -10897,14 +10898,14 @@ check_shared_dpll_state(struct drm_device *dev)
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
- WARN(pll->active > hweight32(pll->config.crtc_mask),
+ I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
"more active pll users than references: %i vs %i\n",
pll->active, hweight32(pll->config.crtc_mask));
- WARN(pll->active && !pll->on,
+ I915_STATE_WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
- WARN(pll->on && !pll->active,
+ I915_STATE_WARN(pll->on && !pll->active,
"pll in on but not on in use in sw tracking\n");
- WARN(pll->on != active,
+ I915_STATE_WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
@@ -10914,14 +10915,14 @@ check_shared_dpll_state(struct drm_device *dev)
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
}
- WARN(pll->active != active_crtcs,
+ I915_STATE_WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
- WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
+ I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
hweight32(pll->config.crtc_mask), enabled_crtcs);
- WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
+ I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
@@ -10937,16 +10938,16 @@ intel_modeset_check_state(struct drm_device *dev)
check_shared_dpll_state(dev);
}
-void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock)
{
/*
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
- WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+ WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- pipe_config->adjusted_mode.crtc_clock, dotclock);
+ pipe_config->base.adjusted_mode.crtc_clock, dotclock);
}
static void update_scanline_offset(struct intel_crtc *crtc)
@@ -10972,7 +10973,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* one to the value.
*/
if (IS_GEN2(dev)) {
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
int vtotal;
vtotal = mode->crtc_vtotal;
@@ -10987,7 +10988,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = 1;
}
-static struct intel_crtc_config *
+static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
@@ -10995,7 +10996,7 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
- struct intel_crtc_config *pipe_config = NULL;
+ struct intel_crtc_state *pipe_config = NULL;
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
@@ -11020,10 +11021,40 @@ out:
return pipe_config;
}
+static int __intel_set_mode_setup_plls(struct drm_device *dev,
+ unsigned modeset_pipes,
+ unsigned disable_pipes)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned clear_pipes = modeset_pipes | disable_pipes;
+ struct intel_crtc *intel_crtc;
+ int ret = 0;
+
+ if (!dev_priv->display.crtc_compute_clock)
+ return 0;
+
+ ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
+ if (ret)
+ goto done;
+
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ struct intel_crtc_state *state = intel_crtc->new_config;
+ ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+ state);
+ if (ret) {
+ intel_shared_dpll_abort_config(dev_priv);
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
@@ -11057,21 +11088,9 @@ static int __intel_set_mode(struct drm_crtc *crtc,
prepare_pipes &= ~disable_pipes;
}
- if (dev_priv->display.crtc_compute_clock) {
- unsigned clear_pipes = modeset_pipes | disable_pipes;
-
- ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
- if (ret)
- goto done;
-
- for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- ret = dev_priv->display.crtc_compute_clock(intel_crtc);
- if (ret) {
- intel_shared_dpll_abort_config(dev_priv);
- goto done;
- }
- }
- }
+ ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes);
+ if (ret)
+ goto done;
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
@@ -11092,8 +11111,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
crtc->mode = *mode;
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
- to_intel_crtc(crtc)->config = *pipe_config;
- to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
+ intel_crtc_set_state(to_intel_crtc(crtc), pipe_config);
/*
* Calculate and store various constants which
@@ -11101,7 +11119,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc,
- &pipe_config->adjusted_mode);
+ &pipe_config->base.adjusted_mode);
}
/* Only after disabling all output pipelines that will be changed can we
@@ -11114,26 +11132,15 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_plane *primary = intel_crtc->base.primary;
+ int vdisplay, hdisplay;
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
- if (ret != 0) {
- DRM_ERROR("pin & fence failed\n");
- mutex_unlock(&dev->struct_mutex);
- goto done;
- }
- if (old_fb)
- intel_unpin_fb_obj(old_obj);
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
- mutex_unlock(&dev->struct_mutex);
-
- crtc->primary->fb = fb;
- crtc->x = x;
- crtc->y = y;
+ drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+ ret = primary->funcs->update_plane(primary, &intel_crtc->base,
+ fb, 0, 0,
+ hdisplay, vdisplay,
+ x << 16, y << 16,
+ hdisplay << 16, vdisplay << 16);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -11148,7 +11155,6 @@ done:
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
- kfree(pipe_config);
kfree(saved_mode);
return ret;
}
@@ -11156,7 +11162,7 @@ done:
static int intel_set_mode_pipes(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
@@ -11176,7 +11182,7 @@ static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
pipe_config = intel_modeset_compute_config(crtc, mode, fb,
@@ -11271,7 +11277,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
crtc->new_enabled = config->save_crtc_enabled[count++];
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -11483,7 +11489,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -11520,7 +11526,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
@@ -11577,7 +11583,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
goto fail;
} else if (pipe_config) {
if (pipe_config->has_audio !=
- to_intel_crtc(set->crtc)->config.has_audio)
+ to_intel_crtc(set->crtc)->config->has_audio)
config->mode_changed = true;
/*
@@ -11601,11 +11607,14 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
disable_pipes);
} else if (config->fb_changed) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+ struct drm_plane *primary = set->crtc->primary;
+ int vdisplay, hdisplay;
- intel_crtc_wait_for_pending_flips(set->crtc);
-
- ret = intel_pipe_set_base(set->crtc,
- set->x, set->y, set->fb);
+ drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
+ ret = primary->funcs->update_plane(primary, set->crtc, set->fb,
+ 0, 0, hdisplay, vdisplay,
+ set->x << 16, set->y << 16,
+ hdisplay << 16, vdisplay << 16);
/*
* We need to make sure the primary plane is re-enabled if it
@@ -11660,6 +11669,8 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
+ .atomic_duplicate_state = intel_crtc_duplicate_state,
+ .atomic_destroy_state = intel_crtc_destroy_state,
};
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -11762,93 +11773,149 @@ static void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
-static int
-intel_primary_plane_disable(struct drm_plane *plane)
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @plane: drm plane to prepare for
+ * @fb: framebuffer to prepare for presentation
+ *
+ * Prepares a framebuffer for usage on a display plane. Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits. Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int
+intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_plane->pipe;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ unsigned frontbuffer_bits = 0;
+ int ret = 0;
- if (!plane->fb)
+ if (!obj)
return 0;
- BUG_ON(!plane->crtc);
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
+ break;
+ }
- intel_crtc = to_intel_crtc(plane->crtc);
+ mutex_lock(&dev->struct_mutex);
- /*
- * Even though we checked plane->fb above, it's still possible that
- * the primary plane has been implicitly disabled because the crtc
- * coordinates given weren't visible, or because we detected
- * that it was 100% covered by a sprite plane. Or, the CRTC may be
- * off and we've set a fb, but haven't actually turned on the CRTC yet.
- * In either case, we need to unpin the FB and let the fb pointer get
- * updated, but otherwise we don't need to touch the hardware.
- */
- if (!intel_crtc->primary_enabled)
- goto disable_unpin;
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ INTEL_INFO(dev)->cursor_needs_physical) {
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
+ ret = i915_gem_object_attach_phys(obj, align);
+ if (ret)
+ DRM_DEBUG_KMS("failed to attach phys object\n");
+ } else {
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ }
- intel_crtc_wait_for_pending_flips(plane->crtc);
- intel_disable_primary_hw_plane(plane, plane->crtc);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
-disable_unpin:
- mutex_lock(&dev->struct_mutex);
- i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
- intel_unpin_fb_obj(intel_fb_obj(plane->fb));
mutex_unlock(&dev->struct_mutex);
- plane->fb = NULL;
- return 0;
+ return ret;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @fb: old framebuffer that was on plane
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+ if (WARN_ON(!obj))
+ return;
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR ||
+ !INTEL_INFO(dev)->cursor_needs_physical) {
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
static int
intel_check_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
+ int ret;
- return drm_plane_helper_check_update(plane, crtc, fb,
- src, dest, clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &state->visible);
-}
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
-static int
-intel_prepare_primary_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
- int ret;
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ src, dest, clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &state->visible);
+ if (ret)
+ return ret;
- intel_crtc_wait_for_pending_flips(crtc);
+ if (intel_crtc->active) {
+ intel_crtc->atomic.wait_for_flips = true;
- if (intel_crtc_has_pending_flip(crtc)) {
- DRM_ERROR("pipe is still busy with an old pageflip\n");
- return -EBUSY;
- }
+ /*
+ * FBC does not work on some platforms for rotated
+ * planes, so disable it when rotation is not 0 and
+ * update it when rotation is set back to 0.
+ *
+ * FIXME: This is redundant with the fbc update done in
+ * the primary plane enable function except that that
+ * one is done too late. We eventually need to unify
+ * this.
+ */
+ if (intel_crtc->primary_enabled &&
+ INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ dev_priv->fbc.plane == intel_crtc->plane &&
+ state->base.rotation != BIT(DRM_ROTATE_0)) {
+ intel_crtc->atomic.disable_fbc = true;
+ }
- if (old_obj != obj) {
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret != 0) {
- DRM_DEBUG_KMS("pin & fence failed\n");
- return ret;
+ if (state->visible) {
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev) && !intel_crtc->primary_enabled)
+ intel_crtc->atomic.wait_vblank = true;
}
+
+ intel_crtc->atomic.fb_bits |=
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+
+ intel_crtc->atomic.update_fbc = true;
}
return 0;
@@ -11858,53 +11925,26 @@ static void
intel_commit_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_framebuffer *fb = state->base.fb;
+ struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *old_fb = plane->fb;
+ struct intel_crtc *intel_crtc;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_rect *src = &state->src;
- crtc->primary->fb = fb;
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
+
+ plane->fb = fb;
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
- intel_plane->crtc_x = state->orig_dst.x1;
- intel_plane->crtc_y = state->orig_dst.y1;
- intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
- intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
- intel_plane->src_x = state->orig_src.x1;
- intel_plane->src_y = state->orig_src.y1;
- intel_plane->src_w = drm_rect_width(&state->orig_src);
- intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
if (intel_crtc->active) {
- /*
- * FBC does not work on some platforms for rotated
- * planes, so disable it when rotation is not 0 and
- * update it when rotation is set back to 0.
- *
- * FIXME: This is redundant with the fbc update done in
- * the primary plane enable function except that that
- * one is done too late. We eventually need to unify
- * this.
- */
- if (intel_crtc->primary_enabled &&
- INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.plane == intel_crtc->plane &&
- intel_plane->rotation != BIT(DRM_ROTATE_0)) {
- intel_disable_fbc(dev);
- }
-
if (state->visible) {
- bool was_enabled = intel_crtc->primary_enabled;
-
/* FIXME: kill this fastboot hack */
intel_update_pipe_size(intel_crtc);
@@ -11912,14 +11952,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (IS_BROADWELL(dev) && !was_enabled)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
} else {
/*
* If clipping results in a non-visible primary plane,
@@ -11930,90 +11962,129 @@ intel_commit_primary_plane(struct drm_plane *plane,
*/
intel_disable_primary_hw_plane(plane, crtc);
}
+ }
+}
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane;
+ struct drm_plane *p;
+ unsigned fb_bits = 0;
+
+ /* Track fb's for any planes being disabled */
+ list_for_each_entry(p, &dev->mode_config.plane_list, head) {
+ intel_plane = to_intel_plane(p);
+
+ if (intel_crtc->atomic.disabled_planes &
+ (1 << drm_plane_index(p))) {
+ switch (p->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
+ break;
+ }
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
- if (old_fb && old_fb != fb) {
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ if (intel_crtc->atomic.wait_for_flips)
+ intel_crtc_wait_for_pending_flips(crtc);
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (intel_crtc->atomic.disable_fbc)
+ intel_fbc_disable(dev);
+
+ if (intel_crtc->atomic.pre_disable_primary)
+ intel_pre_disable_primary(crtc);
+
+ if (intel_crtc->atomic.update_wm)
+ intel_update_watermarks(crtc);
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* Perform vblank evasion around commit operation */
+ if (intel_crtc->active)
+ intel_crtc->atomic.evade =
+ intel_pipe_update_start(intel_crtc,
+ &intel_crtc->atomic.start_vbl_count);
}
-static int
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static void intel_finish_crtc_commit(struct drm_crtc *crtc)
{
- struct intel_plane_state state;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int ret;
+ struct drm_plane *p;
- state.crtc = crtc;
- state.fb = fb;
+ if (intel_crtc->atomic.evade)
+ intel_pipe_update_end(intel_crtc,
+ intel_crtc->atomic.start_vbl_count);
- /* sample coordinates in 16.16 fixed point */
- state.src.x1 = src_x;
- state.src.x2 = src_x + src_w;
- state.src.y1 = src_y;
- state.src.y2 = src_y + src_h;
-
- /* integer pixels */
- state.dst.x1 = crtc_x;
- state.dst.x2 = crtc_x + crtc_w;
- state.dst.y1 = crtc_y;
- state.dst.y2 = crtc_y + crtc_h;
+ intel_runtime_pm_put(dev_priv);
- state.clip.x1 = 0;
- state.clip.y1 = 0;
- state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
- state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+ if (intel_crtc->atomic.wait_vblank)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
- state.orig_src = state.src;
- state.orig_dst = state.dst;
+ intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
- ret = intel_check_primary_plane(plane, &state);
- if (ret)
- return ret;
+ if (intel_crtc->atomic.update_fbc) {
+ mutex_lock(&dev->struct_mutex);
+ intel_fbc_update(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
- ret = intel_prepare_primary_plane(plane, &state);
- if (ret)
- return ret;
+ if (intel_crtc->atomic.post_enable_primary)
+ intel_post_enable_primary(crtc);
- intel_commit_primary_plane(plane, &state);
+ drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
+ if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
+ intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
+ false, false);
- return 0;
+ memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
}
-/* Common destruction function for both primary and cursor planes */
-static void intel_plane_destroy(struct drm_plane *plane)
+/**
+ * intel_plane_destroy - destroy a plane
+ * @plane: plane to destroy
+ *
+ * Common destruction function for all types of planes (primary, cursor,
+ * sprite).
+ */
+void intel_plane_destroy(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
drm_plane_cleanup(plane);
kfree(intel_plane);
}
-static const struct drm_plane_funcs intel_primary_plane_funcs = {
- .update_plane = intel_primary_plane_setplane,
- .disable_plane = intel_primary_plane_disable,
+const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = drm_plane_helper_update,
+ .disable_plane = drm_plane_helper_disable,
.destroy = intel_plane_destroy,
- .set_property = intel_plane_set_property
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+
};
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *primary;
+ struct intel_plane_state *state;
const uint32_t *intel_primary_formats;
int num_formats;
@@ -12021,11 +12092,19 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
if (primary == NULL)
return NULL;
+ state = intel_create_plane_state(&primary->base);
+ if (!state) {
+ kfree(primary);
+ return NULL;
+ }
+ primary->base.state = &state->base;
+
primary->can_scale = false;
primary->max_downscale = 1;
primary->pipe = pipe;
primary->plane = pipe;
- primary->rotation = BIT(DRM_ROTATE_0);
+ primary->check_plane = intel_check_primary_plane;
+ primary->commit_plane = intel_commit_primary_plane;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
@@ -12038,7 +12117,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
}
drm_universal_plane_init(dev, &primary->base, 0,
- &intel_primary_plane_funcs,
+ &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
@@ -12051,38 +12130,32 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
if (dev->mode_config.rotation_property)
drm_object_attach_property(&primary->base.base,
dev->mode_config.rotation_property,
- primary->rotation);
+ state->base.rotation);
}
- return &primary->base;
-}
-
-static int
-intel_cursor_plane_disable(struct drm_plane *plane)
-{
- if (!plane->fb)
- return 0;
-
- BUG_ON(!plane->crtc);
+ drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
- return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
+ return &primary->base;
}
static int
intel_check_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_device *dev = plane->dev;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- int crtc_w, crtc_h;
+ struct intel_crtc *intel_crtc;
unsigned stride;
int ret;
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
+
ret = drm_plane_helper_check_update(plane, crtc, fb,
src, dest, clip,
DRM_PLANE_HELPER_NO_SCALING,
@@ -12094,18 +12167,17 @@ intel_check_cursor_plane(struct drm_plane *plane,
/* if we want to turn off the cursor ignore width and height */
if (!obj)
- return 0;
+ goto finish;
/* Check for which cursor types we support */
- crtc_w = drm_rect_width(&state->orig_dst);
- crtc_h = drm_rect_height(&state->orig_dst);
- if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
- DRM_DEBUG("Cursor dimension not supported\n");
+ if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
+ DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+ state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
}
- stride = roundup_pow_of_two(crtc_w) * 4;
- if (obj->base.size < stride * crtc_h) {
+ stride = roundup_pow_of_two(state->base.crtc_w) * 4;
+ if (obj->base.size < stride * state->base.crtc_h) {
DRM_DEBUG_KMS("buffer is too small\n");
return -ENOMEM;
}
@@ -12121,113 +12193,84 @@ intel_check_cursor_plane(struct drm_plane *plane,
}
mutex_unlock(&dev->struct_mutex);
+finish:
+ if (intel_crtc->active) {
+ if (intel_crtc->cursor_width != state->base.crtc_w)
+ intel_crtc->atomic.update_wm = true;
+
+ intel_crtc->atomic.fb_bits |=
+ INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
+ }
+
return ret;
}
-static int
+static void
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_device *dev = plane->dev;
+ struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- int crtc_w, crtc_h;
-
- crtc->cursor_x = state->orig_dst.x1;
- crtc->cursor_y = state->orig_dst.y1;
-
- intel_plane->crtc_x = state->orig_dst.x1;
- intel_plane->crtc_y = state->orig_dst.y1;
- intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
- intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
- intel_plane->src_x = state->orig_src.x1;
- intel_plane->src_y = state->orig_src.y1;
- intel_plane->src_w = drm_rect_width(&state->orig_src);
- intel_plane->src_h = drm_rect_height(&state->orig_src);
- intel_plane->obj = obj;
-
- if (fb != crtc->cursor->fb) {
- crtc_w = drm_rect_width(&state->orig_dst);
- crtc_h = drm_rect_height(&state->orig_dst);
- return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
- } else {
- intel_crtc_update_cursor(crtc, state->visible);
-
- intel_frontbuffer_flip(crtc->dev,
- INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
-
- return 0;
- }
-}
-
-static int
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane_state state;
- int ret;
+ struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
+ uint32_t addr;
- state.crtc = crtc;
- state.fb = fb;
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
- /* sample coordinates in 16.16 fixed point */
- state.src.x1 = src_x;
- state.src.x2 = src_x + src_w;
- state.src.y1 = src_y;
- state.src.y2 = src_y + src_h;
+ plane->fb = state->base.fb;
+ crtc->cursor_x = state->base.crtc_x;
+ crtc->cursor_y = state->base.crtc_y;
- /* integer pixels */
- state.dst.x1 = crtc_x;
- state.dst.x2 = crtc_x + crtc_w;
- state.dst.y1 = crtc_y;
- state.dst.y2 = crtc_y + crtc_h;
+ intel_plane->obj = obj;
- state.clip.x1 = 0;
- state.clip.y1 = 0;
- state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
- state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+ if (intel_crtc->cursor_bo == obj)
+ goto update;
- state.orig_src = state.src;
- state.orig_dst = state.dst;
+ if (!obj)
+ addr = 0;
+ else if (!INTEL_INFO(dev)->cursor_needs_physical)
+ addr = i915_gem_obj_ggtt_offset(obj);
+ else
+ addr = obj->phys_handle->busaddr;
- ret = intel_check_cursor_plane(plane, &state);
- if (ret)
- return ret;
+ intel_crtc->cursor_addr = addr;
+ intel_crtc->cursor_bo = obj;
+update:
+ intel_crtc->cursor_width = state->base.crtc_w;
+ intel_crtc->cursor_height = state->base.crtc_h;
- return intel_commit_cursor_plane(plane, &state);
+ if (intel_crtc->active)
+ intel_crtc_update_cursor(crtc, state->visible);
}
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
- .update_plane = intel_cursor_plane_update,
- .disable_plane = intel_cursor_plane_disable,
- .destroy = intel_plane_destroy,
- .set_property = intel_plane_set_property,
-};
-
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *cursor;
+ struct intel_plane_state *state;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
if (cursor == NULL)
return NULL;
+ state = intel_create_plane_state(&cursor->base);
+ if (!state) {
+ kfree(cursor);
+ return NULL;
+ }
+ cursor->base.state = &state->base;
+
cursor->can_scale = false;
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
- cursor->rotation = BIT(DRM_ROTATE_0);
+ cursor->check_plane = intel_check_cursor_plane;
+ cursor->commit_plane = intel_commit_cursor_plane;
drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_cursor_plane_funcs,
+ &intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
@@ -12241,9 +12284,11 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (dev->mode_config.rotation_property)
drm_object_attach_property(&cursor->base.base,
dev->mode_config.rotation_property,
- cursor->rotation);
+ state->base.rotation);
}
+ drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
return &cursor->base;
}
@@ -12251,6 +12296,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state = NULL;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
@@ -12259,6 +12305,11 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (intel_crtc == NULL)
return;
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ if (!crtc_state)
+ goto fail;
+ intel_crtc_set_state(intel_crtc, crtc_state);
+
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
goto fail;
@@ -12311,6 +12362,7 @@ fail:
drm_plane_cleanup(primary);
if (cursor)
drm_plane_cleanup(cursor);
+ kfree(crtc_state);
kfree(intel_crtc);
}
@@ -12383,28 +12435,6 @@ static bool has_edp_a(struct drm_device *dev)
return true;
}
-const char *intel_output_name(int output)
-{
- static const char *names[] = {
- [INTEL_OUTPUT_UNUSED] = "Unused",
- [INTEL_OUTPUT_ANALOG] = "Analog",
- [INTEL_OUTPUT_DVO] = "DVO",
- [INTEL_OUTPUT_SDVO] = "SDVO",
- [INTEL_OUTPUT_LVDS] = "LVDS",
- [INTEL_OUTPUT_TVOUT] = "TV",
- [INTEL_OUTPUT_HDMI] = "HDMI",
- [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
- [INTEL_OUTPUT_EDP] = "eDP",
- [INTEL_OUTPUT_DSI] = "DSI",
- [INTEL_OUTPUT_UNKNOWN] = "Unknown",
- };
-
- if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
- return "Invalid";
-
- return names[output];
-}
-
static bool intel_crt_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -12428,6 +12458,7 @@ static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
+ struct drm_connector *connector;
bool dpd_is_edp = false;
intel_lvds_init(dev);
@@ -12491,14 +12522,16 @@ static void intel_setup_outputs(struct drm_device *dev)
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
*/
- if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED &&
+ !intel_dp_is_edp(dev, PORT_B))
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_B))
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
- if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED &&
+ !intel_dp_is_edp(dev, PORT_C))
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
@@ -12556,6 +12589,37 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
+ /*
+ * FIXME: We don't have full atomic support yet, but we want to be
+ * able to enable/test plane updates via the atomic interface in the
+ * meantime. However as soon as we flip DRIVER_ATOMIC on, the DRM core
+ * will take some atomic codepaths to lookup properties during
+ * drmModeGetConnector() that unconditionally dereference
+ * connector->state.
+ *
+ * We create a dummy connector state here for each connector to ensure
+ * the DRM core doesn't try to dereference a NULL connector->state.
+ * The actual connector properties will never be updated or contain
+ * useful information, but since we're doing this specifically for
+ * testing/debug of the plane operations (and only when a specific
+ * kernel module option is given), that shouldn't really matter.
+ *
+ * Once atomic support for crtc's + connectors lands, this loop should
+ * be removed since we'll be setting up real connector state, which
+ * will contain Intel-specific properties.
+ */
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ if (!WARN_ON(connector->state)) {
+ connector->state =
+ kzalloc(sizeof(*connector->state),
+ GFP_KERNEL);
+ }
+ }
+ }
+
intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
@@ -12696,8 +12760,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
- aligned_height = intel_align_height(dev, mode_cmd->height,
- obj->tiling_mode);
+ aligned_height = intel_fb_align_height(dev, mode_cmd->height,
+ obj->tiling_mode);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
@@ -12739,6 +12803,8 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
+ .atomic_check = intel_atomic_check,
+ .atomic_commit = intel_atomic_commit,
};
/* Set up chip specific display functions */
@@ -12757,23 +12823,32 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.find_dpll = i9xx_find_best_dpll;
- if (HAS_DDI(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
- dev_priv->display.get_plane_config = ironlake_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ skylake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
- if (INTEL_INFO(dev)->gen >= 9)
- dev_priv->display.update_primary_plane =
- skylake_update_primary_plane;
- else
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
+ dev_priv->display.update_primary_plane =
+ skylake_update_primary_plane;
+ } else if (HAS_DDI(dev)) {
+ dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ ironlake_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock =
+ haswell_crtc_compute_clock;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = ironlake_crtc_off;
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
- dev_priv->display.get_plane_config = ironlake_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ ironlake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
@@ -12783,7 +12858,8 @@ static void intel_init_display(struct drm_device *dev)
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_plane_config = i9xx_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -12792,7 +12868,8 @@ static void intel_init_display(struct drm_device *dev)
i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_plane_config = i9xx_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -13147,7 +13224,7 @@ void intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev);
/* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, false);
@@ -13164,8 +13241,8 @@ void intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- if (dev_priv->display.get_plane_config) {
- dev_priv->display.get_plane_config(crtc,
+ if (dev_priv->display.get_initial_plane_config) {
+ dev_priv->display.get_initial_plane_config(crtc,
&crtc->plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -13229,7 +13306,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
- reg = PIPECONF(crtc->config.cpu_transcoder);
+ reg = PIPECONF(crtc->config->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* restore vblank interrupts to correct state */
@@ -13433,12 +13510,12 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
int i;
for_each_intel_crtc(dev, crtc) {
- memset(&crtc->config, 0, sizeof(crtc->config));
+ memset(crtc->config, 0, sizeof(*crtc->config));
- crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+ crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
crtc->active = dev_priv->display.get_pipe_config(crtc,
- &crtc->config);
+ crtc->config);
crtc->base.enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc);
@@ -13475,7 +13552,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
- encoder->get_config(encoder, &crtc->config);
+ encoder->get_config(encoder, crtc->config);
} else {
encoder->base.crtc = NULL;
}
@@ -13525,7 +13602,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
*/
for_each_intel_crtc(dev, crtc) {
if (crtc->active && i915.fastboot) {
- intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
+ intel_mode_from_pipe_config(&crtc->base.mode,
+ crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
crtc->base.base.id);
drm_mode_debug_printmodeline(&crtc->base.mode);
@@ -13540,7 +13618,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_sanitize_crtc(crtc);
- intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
+ intel_dump_pipe_config(crtc, crtc->config,
+ "[setup_hw_state]");
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -13664,7 +13743,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_unregister_dsm_handler();
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5cecc20efa71..a74aaf9242b9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -31,6 +31,7 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -1074,7 +1075,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
-skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
{
u32 ctrl1;
@@ -1101,7 +1102,7 @@ skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
}
static void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
@@ -1118,7 +1119,7 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
static void
intel_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config, int link_bw)
+ struct intel_crtc_state *pipe_config, int link_bw)
{
struct drm_device *dev = encoder->base.dev;
const struct dp_link_dpll *divisor = NULL;
@@ -1151,11 +1152,11 @@ intel_dp_set_clock(struct intel_encoder *encoder,
bool
intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *intel_crtc = encoder->new_crtc;
@@ -1269,7 +1270,7 @@ found:
&pipe_config->dp_m_n);
if (intel_connector->panel.downclock_mode != NULL &&
- intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
+ dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
@@ -1295,11 +1296,12 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
+ crtc->config->port_clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
- if (crtc->config.port_clock == 162000) {
+ if (crtc->config->port_clock == 162000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
@@ -1324,7 +1326,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
/*
* There are four kinds of DP registers:
@@ -1352,7 +1354,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
/* Split out the IBX/CPU vs CPT settings */
@@ -1558,7 +1560,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
- WARN(!vdd, "eDP port %c VDD already requested on\n",
+ I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->port));
}
@@ -1642,7 +1644,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp))
return;
- WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
+ I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
port_name(dp_to_dig_port(intel_dp)->port));
intel_dp->want_panel_vdd = false;
@@ -2013,7 +2015,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dp_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
@@ -2050,7 +2052,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
tmp & DP_COLOR_RANGE_16_235)
@@ -2073,7 +2075,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
@@ -2102,9 +2104,12 @@ static void intel_disable_dp(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
+ if (HAS_PSR(dev) && !HAS_DDI(dev))
+ intel_psr_disable(intel_dp);
+
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
@@ -2309,7 +2314,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
- if (crtc->config.has_audio) {
+ if (crtc->config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder);
@@ -2329,6 +2334,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(intel_dp);
+ intel_psr_enable(intel_dp);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -3515,8 +3521,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev)))
@@ -3541,8 +3545,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
-
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
* corresponding HDMI output on transcoder A.
@@ -3553,18 +3555,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
*/
DP &= ~DP_PIPEB_SELECT;
I915_WRITE(intel_dp->output_reg, DP);
-
- /* Changes to enable or select take place the vblank
- * after being written.
- */
- if (WARN_ON(crtc == NULL)) {
- /* We should never try to disable a port without a crtc
- * attached. For paranoia keep the code around for a
- * bit. */
- POSTING_READ(intel_dp->output_reg);
- msleep(50);
- } else
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ POSTING_READ(intel_dp->output_reg);
}
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@@ -3769,7 +3760,7 @@ go_again:
intel_dp_stop_link_train(intel_dp);
}
- DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ DRM_DEBUG_KMS("got esi %3ph\n", esi);
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
if (handled) {
@@ -3785,7 +3776,7 @@ go_again:
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (bret == true) {
- DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
goto go_again;
}
} else
@@ -4306,7 +4297,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port);
- drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
@@ -4322,6 +4312,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp->edp_notifier.notifier_call = NULL;
}
}
+ drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
@@ -4396,7 +4387,9 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4416,7 +4409,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
return;
}
-bool
+enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -4424,7 +4417,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
- bool ret = true;
+ enum irqreturn ret = IRQ_NONE;
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
@@ -4438,7 +4431,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
*/
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
port_name(intel_dig_port->port));
- return false;
+ return IRQ_HANDLED;
}
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
@@ -4483,7 +4476,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
- ret = false;
+
+ ret = IRQ_HANDLED;
+
goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
@@ -4741,39 +4736,34 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(pp_div_reg));
}
-void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct intel_dp *intel_dp = NULL;
- struct intel_crtc_config *config = NULL;
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_dp *intel_dp = dev_priv->drrs.dp;
+ struct intel_crtc_state *config = NULL;
struct intel_crtc *intel_crtc = NULL;
- struct intel_connector *intel_connector = dev_priv->drrs.connector;
u32 reg, val;
- enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
+ enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
return;
}
- if (intel_connector == NULL) {
- DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
+ if (intel_dp == NULL) {
+ DRM_DEBUG_KMS("DRRS not supported.\n");
return;
}
/*
- * FIXME: This needs proper synchronization with psr state. But really
- * hard to tell without seeing the user of this function of this code.
- * Check locking and ordering once that lands.
+ * FIXME: This needs proper synchronization with psr state for some
+ * platforms that cannot have PSR and DRRS enabled at the same time.
*/
- if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
- DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
- return;
- }
- encoder = intel_attached_encoder(&intel_connector->base);
- intel_dp = enc_to_intel_dp(&encoder->base);
+ dig_port = dp_to_dig_port(intel_dp);
+ encoder = &dig_port->base;
intel_crtc = encoder->new_crtc;
if (!intel_crtc) {
@@ -4781,17 +4771,18 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
- config = &intel_crtc->config;
+ config = intel_crtc->config;
- if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
+ if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
return;
}
- if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
+ if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
+ refresh_rate)
index = DRRS_LOW_RR;
- if (index == intel_dp->drrs_state.refresh_rate_type) {
+ if (index == dev_priv->drrs.refresh_rate_type) {
DRM_DEBUG_KMS(
"DRRS requested for previously set RR...ignoring\n");
return;
@@ -4803,7 +4794,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
}
if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
- reg = PIPECONF(intel_crtc->config.cpu_transcoder);
+ reg = PIPECONF(intel_crtc->config->cpu_transcoder);
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
val |= PIPECONF_EDP_RR_MODE_SWITCH;
@@ -4814,30 +4805,154 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
I915_WRITE(reg, val);
}
+ dev_priv->drrs.refresh_rate_type = index;
+
+ DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+}
+
+void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->config->has_drrs) {
+ DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ if (WARN_ON(dev_priv->drrs.dp)) {
+ DRM_ERROR("DRRS already enabled\n");
+ goto unlock;
+ }
+
+ dev_priv->drrs.busy_frontbuffer_bits = 0;
+
+ dev_priv->drrs.dp = intel_dp;
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->config->has_drrs)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
+ if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev_priv->dev,
+ intel_dp->attached_connector->panel.
+ fixed_mode->vrefresh);
+
+ dev_priv->drrs.dp = NULL;
+ mutex_unlock(&dev_priv->drrs.mutex);
+
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+}
+
+static void intel_edp_drrs_downclock_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), drrs.work.work);
+ struct intel_dp *intel_dp;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ intel_dp = dev_priv->drrs.dp;
+
+ if (!intel_dp)
+ goto unlock;
+
/*
- * mutex taken to ensure that there is no race between differnt
- * drrs calls trying to update refresh rate. This scenario may occur
- * in future when idleness detection based DRRS in kernel and
- * possible calls from user space to set differnt RR are made.
+ * The delayed work can race with an invalidate hence we need to
+ * recheck.
*/
- mutex_lock(&intel_dp->drrs_state.mutex);
+ if (dev_priv->drrs.busy_frontbuffer_bits)
+ goto unlock;
- intel_dp->drrs_state.refresh_rate_type = index;
+ if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev_priv->dev,
+ intel_dp->attached_connector->panel.
+ downclock_mode->vrefresh);
- mutex_unlock(&intel_dp->drrs_state.mutex);
+unlock:
- DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ if (!dev_priv->drrs.dp)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+ intel_dp_set_drrs_state(dev_priv->dev,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
+ }
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ if (!dev_priv->drrs.dp)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+
+ if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
+ !dev_priv->drrs.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->drrs.work,
+ msecs_to_jiffies(1000));
+ mutex_unlock(&dev_priv->drrs.mutex);
}
static struct drm_display_mode *
-intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector,
- struct drm_display_mode *fixed_mode)
+intel_dp_drrs_init(struct intel_connector *intel_connector,
+ struct drm_display_mode *fixed_mode)
{
struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
@@ -4859,13 +4974,13 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
return NULL;
}
- dev_priv->drrs.connector = intel_connector;
+ INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
- mutex_init(&intel_dp->drrs_state.mutex);
+ mutex_init(&dev_priv->drrs.mutex);
- intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
+ dev_priv->drrs.type = dev_priv->vbt.drrs_type;
- intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
+ dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
@@ -4885,7 +5000,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
+ dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
if (!is_edp(intel_dp))
return true;
@@ -4934,7 +5049,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
- intel_dig_port,
intel_connector, fixed_mode);
break;
}
@@ -5086,7 +5200,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector);
/* init MST on ports that can support it */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
if (port == PORT_B || port == PORT_C || port == PORT_D) {
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7f8c6a66680a..9f67a379a9a5 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -26,11 +26,12 @@
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -38,7 +39,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
int bpp;
int lane_count, slots;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_connector *found = NULL, *intel_connector;
int mst_pbn;
@@ -157,7 +158,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
- I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
+ I915_WRITE(PORT_CLK_SEL(port),
+ intel_crtc->config->ddi_pll_sel);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
@@ -170,7 +172,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
}
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
- intel_mst->port, intel_crtc->config.pbn, &slots);
+ intel_mst->port,
+ intel_crtc->config->pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
return;
@@ -216,14 +219,14 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
pipe_config->has_dp_encoder = true;
@@ -254,7 +257,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
default:
break;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
intel_dp_get_m_n(crtc, pipe_config);
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
@@ -311,7 +314,9 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25fdbb16d4e0..eef79ccd0b7c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -143,7 +143,7 @@ struct intel_encoder {
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
- struct intel_crtc_config *);
+ struct intel_crtc_state *);
void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
@@ -159,7 +159,7 @@ struct intel_encoder {
* pre-filled the pipe config. Note that intel_encoder->base.crtc must
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@@ -244,23 +244,28 @@ typedef struct dpll {
} intel_clock_t;
struct intel_plane_state {
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_plane_state base;
struct drm_rect src;
struct drm_rect dst;
struct drm_rect clip;
- struct drm_rect orig_src;
- struct drm_rect orig_dst;
bool visible;
+
+ /*
+ * used only for sprite planes to determine when to implicitly
+ * enable/disable the primary plane
+ */
+ bool hides_primary;
};
-struct intel_plane_config {
- bool tiled;
+struct intel_initial_plane_config {
+ unsigned int tiling;
int size;
u32 base;
};
-struct intel_crtc_config {
+struct intel_crtc_state {
+ struct drm_crtc_state base;
+
/**
* quirks - bitfield with hw state readout quirks
*
@@ -273,16 +278,6 @@ struct intel_crtc_config {
#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
- /* User requested mode, only valid as a starting point to
- * compute adjusted_mode, except in the case of (S)DVO where
- * it's also for the output timings of the (S)DVO chip.
- * adjusted_mode will then correspond to the S(DVO) chip's
- * preferred input timings. */
- struct drm_display_mode requested_mode;
- /* Actual pipe timings ie. what we program into the pipe timing
- * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
- struct drm_display_mode adjusted_mode;
-
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
@@ -406,8 +401,7 @@ struct intel_pipe_wm {
};
struct intel_mmio_flip {
- u32 seqno;
- struct intel_engine_cs *ring;
+ struct drm_i915_gem_request *req;
struct work_struct work;
};
@@ -417,6 +411,32 @@ struct skl_pipe_wm {
uint32_t linetime;
};
+/*
+ * Tracking of operations that need to be performed at the beginning/end of an
+ * atomic commit, outside the atomic section where interrupts are disabled.
+ * These are generally operations that grab mutexes or might otherwise sleep
+ * and thus can't be run with interrupts disabled.
+ */
+struct intel_crtc_atomic_commit {
+ /* vblank evasion */
+ bool evade;
+ unsigned start_vbl_count;
+
+ /* Sleepable operations to perform before commit */
+ bool wait_for_flips;
+ bool disable_fbc;
+ bool pre_disable_primary;
+ bool update_wm;
+ unsigned disabled_planes;
+
+ /* Sleepable operations to perform after commit */
+ unsigned fb_bits;
+ bool wait_vblank;
+ bool update_fbc;
+ bool post_enable_primary;
+ unsigned update_sprite_watermarks;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -448,9 +468,9 @@ struct intel_crtc {
uint32_t cursor_size;
uint32_t cursor_base;
- struct intel_plane_config plane_config;
- struct intel_crtc_config config;
- struct intel_crtc_config *new_config;
+ struct intel_initial_plane_config plane_config;
+ struct intel_crtc_state *config;
+ struct intel_crtc_state *new_config;
bool new_enabled;
/* reset counter value when the last flip was submitted */
@@ -470,6 +490,8 @@ struct intel_crtc {
int scanline_offset;
struct intel_mmio_flip mmio_flip;
+
+ struct intel_crtc_atomic_commit atomic;
};
struct intel_plane_wm_parameters {
@@ -487,11 +509,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y;
- uint32_t src_w, src_h;
- unsigned int rotation;
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
@@ -500,6 +517,12 @@ struct intel_plane {
*/
struct intel_plane_wm_parameters wm;
+ /*
+ * NOTE: Do not place new plane state fields here (e.g., when adding
+ * new plane properties). New runtime state should now be placed in
+ * the intel_plane_state structure and accessed via drm_plane->state.
+ */
+
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -510,6 +533,10 @@ struct intel_plane {
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
+ int (*check_plane)(struct drm_plane *plane,
+ struct intel_plane_state *state);
+ void (*commit_plane)(struct drm_plane *plane,
+ struct intel_plane_state *state);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
@@ -540,6 +567,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi {
@@ -564,17 +592,6 @@ struct intel_hdmi {
struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
-/**
- * HIGH_RR is the highest eDP panel refresh rate read from EDID
- * LOW_RR is the lowest eDP panel refresh rate found from EDID
- * parsing for same resolution.
- */
-enum edp_drrs_refresh_rate_type {
- DRRS_HIGH_RR,
- DRRS_LOW_RR,
- DRRS_MAX_RR, /* RR count */
-};
-
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
@@ -630,12 +647,6 @@ struct intel_dp {
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
- struct {
- enum drrs_support_type type;
- enum edp_drrs_refresh_rate_type refresh_rate_type;
- struct mutex mutex;
- } drrs_state;
-
};
struct intel_digital_port {
@@ -644,7 +655,7 @@ struct intel_digital_port {
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
- bool (*hpd_pulse)(struct intel_digital_port *, bool);
+ enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
};
struct intel_dp_mst_encoder {
@@ -708,8 +719,7 @@ struct intel_unpin_work {
#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
- struct intel_engine_cs *flip_queued_ring;
- u32 flip_queued_seqno;
+ struct drm_i915_gem_request *flip_queued_req;
int flip_queued_vblank;
int flip_ready_vblank;
bool enable_stall_check;
@@ -794,6 +804,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
@@ -825,17 +836,18 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-bool intel_ddi_pll_select(struct intel_crtc *crtc);
+bool intel_ddi_pll_select(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_frontbuffer.c */
@@ -865,6 +877,8 @@ void intel_frontbuffer_flip(struct drm_device *dev,
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
+int intel_fb_align_height(struct drm_device *dev, int height,
+ unsigned int tiling);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
@@ -872,9 +886,11 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_init_audio(struct drm_device *dev);
void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder);
+void i915_audio_component_init(struct drm_i915_private *dev_priv);
+void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
-const char *intel_output_name(int output);
+extern const struct drm_plane_funcs intel_plane_funcs;
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
@@ -925,6 +941,18 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
+int intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+void intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+int intel_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+int intel_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val);
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
@@ -933,7 +961,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *state);
void intel_put_shared_dpll(struct intel_crtc *crtc);
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
@@ -963,11 +992,11 @@ void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
-ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock);
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
@@ -975,8 +1004,7 @@ void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_config *pipe_config);
-int intel_format_to_fourcc(int format);
+ struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
@@ -992,16 +1020,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
-bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
- bool long_hpd);
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
@@ -1010,6 +1037,18 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
+int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int intel_disable_plane(struct drm_plane *plane);
+void intel_plane_destroy(struct drm_plane *plane);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp);
+void intel_edp_drrs_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1053,13 +1092,20 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
}
#endif
+/* intel_fbc.c */
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_fbc_update(struct drm_device *dev);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_disable(struct drm_device *dev);
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
+
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
/* intel_lvds.c */
@@ -1083,6 +1129,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */
@@ -1093,10 +1140,10 @@ void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
@@ -1115,7 +1162,6 @@ void intel_backlight_unregister(struct drm_device *dev);
/* intel_psr.c */
-bool intel_psr_is_enabled(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
@@ -1159,8 +1205,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_device *dev);
@@ -1191,7 +1235,6 @@ int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop,
uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
-void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -1199,8 +1242,31 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
+void intel_post_enable_primary(struct drm_crtc *crtc);
+void intel_pre_disable_primary(struct drm_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
+/* intel_atomic.c */
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async);
+int intel_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
+void intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/* intel_atomic_plane.c */
+struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
+struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+void intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 0b184079de14..10ab68457ca8 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -24,24 +24,219 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
#include <linux/slab.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
-/* the sub-encoders aka panel drivers */
-static const struct intel_dsi_device intel_dsi_devices[] = {
+static const struct {
+ u16 panel_id;
+ struct drm_panel * (*init)(struct intel_dsi *intel_dsi, u16 panel_id);
+} intel_dsi_drivers[] = {
{
.panel_id = MIPI_DSI_GENERIC_PANEL_ID,
- .name = "vbt-generic-dsi-vid-mode-display",
- .dev_ops = &vbt_generic_dsi_display_ops,
+ .init = vbt_panel_init,
},
};
+static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask;
+
+ mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
+ LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
+ DRM_ERROR("DPI FIFOs are not empty\n");
+}
+
+static void write_data(struct drm_i915_private *dev_priv, u32 reg,
+ const u8 *data, u32 len)
+{
+ u32 i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = 0;
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ val |= *data++ << 8 * j;
+
+ I915_WRITE(reg, val);
+ }
+}
+
+static void read_data(struct drm_i915_private *dev_priv, u32 reg,
+ u8 *data, u32 len)
+{
+ u32 i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = I915_READ(reg);
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ *data++ = val >> 8 * j;
+ }
+}
+
+static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dsi_host->port;
+ struct mipi_dsi_packet packet;
+ ssize_t ret;
+ const u8 *header, *data;
+ u32 data_reg, data_mask, ctrl_reg, ctrl_mask;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret < 0)
+ return ret;
+
+ header = packet.header;
+ data = packet.payload;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
+ data_reg = MIPI_LP_GEN_DATA(port);
+ data_mask = LP_DATA_FIFO_FULL;
+ ctrl_reg = MIPI_LP_GEN_CTRL(port);
+ ctrl_mask = LP_CTRL_FIFO_FULL;
+ } else {
+ data_reg = MIPI_HS_GEN_DATA(port);
+ data_mask = HS_DATA_FIFO_FULL;
+ ctrl_reg = MIPI_HS_GEN_CTRL(port);
+ ctrl_mask = HS_CTRL_FIFO_FULL;
+ }
+
+ /* note: this is never true for reads */
+ if (packet.payload_length) {
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
+ DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+ write_data(dev_priv, data_reg, packet.payload,
+ packet.payload_length);
+ }
+
+ if (msg->rx_len) {
+ I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
+ DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ }
+
+ I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
+
+ /* ->rx_len is set only for reads */
+ if (msg->rx_len) {
+ data_mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+ }
+
+ /* XXX: fix for reads and writes */
+ return 4 + packet.payload_length;
+}
+
+static int intel_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int intel_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
+ .attach = intel_dsi_host_attach,
+ .detach = intel_dsi_host_detach,
+ .transfer = intel_dsi_host_transfer,
+};
+
+static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ enum port port)
+{
+ struct intel_dsi_host *host;
+ struct mipi_dsi_device *device;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ host->base.ops = &intel_dsi_host_ops;
+ host->intel_dsi = intel_dsi;
+ host->port = port;
+
+ /*
+ * We should call mipi_dsi_host_register(&host->base) here, but we don't
+ * have a host->dev, and we don't have OF stuff either. So just use the
+ * dsi framework as a library and hope for the best. Create the dsi
+ * devices by ourselves here too. Need to be careful though, because we
+ * don't initialize any of the driver model devices here.
+ */
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ kfree(host);
+ return NULL;
+ }
+
+ device->host = &host->base;
+ host->device = device;
+
+ return host;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
+ enum port port)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask;
+
+ /* XXX: pipe, hs */
+ if (hs)
+ cmd &= ~DPI_LP_MODE;
+ else
+ cmd |= DPI_LP_MODE;
+
+ /* clear bit */
+ I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+
+ /* XXX: old code skips write if control unchanged */
+ if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
+ DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+ I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
+
+ mask = SPL_PKT_SENT_INTERRUPT;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
+ DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+ return 0;
+}
+
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->dpio_lock);
@@ -56,12 +251,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->dpio_lock);
}
-static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
-{
- return container_of(intel_attached_encoder(connector),
- struct intel_dsi, base);
-}
-
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
@@ -78,14 +267,13 @@ static void intel_dsi_hot_plug(struct intel_encoder *encoder)
}
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *config)
+ struct intel_crtc_state *config)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
- struct drm_display_mode *mode = &config->requested_mode;
+ struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
DRM_DEBUG_KMS("\n");
@@ -95,18 +283,65 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
- if (intel_dsi->dev.dev_ops->mode_fixup)
- return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
- mode, adjusted_mode);
-
return true;
}
+static void intel_dsi_port_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 temp;
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ temp = I915_READ(VLV_CHICKEN_3);
+ temp &= ~PIXEL_OVERLAP_CNT_MASK |
+ intel_dsi->pixel_overlap <<
+ PIXEL_OVERLAP_CNT_SHIFT;
+ I915_WRITE(VLV_CHICKEN_3, temp);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ temp = I915_READ(MIPI_PORT_CTRL(port));
+ temp &= ~LANE_CONFIGURATION_MASK;
+ temp &= ~DUAL_LINK_MODE_MASK;
+
+ if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
+ temp |= (intel_dsi->dual_link - 1)
+ << DUAL_LINK_MODE_SHIFT;
+ temp |= intel_crtc->pipe ?
+ LANE_CONFIGURATION_DUAL_LINK_B :
+ LANE_CONFIGURATION_DUAL_LINK_A;
+ }
+ /* assert ip_tg_enable signal */
+ I915_WRITE(MIPI_PORT_CTRL(port), temp | DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(port));
+ }
+}
+
+static void intel_dsi_port_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 temp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* de-assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(port));
+ I915_WRITE(MIPI_PORT_CTRL(port), temp & ~DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(port));
+ }
+}
+
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
@@ -120,48 +355,51 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
- usleep_range(2500, 3000);
+ for_each_dsi_port(port, intel_dsi->ports) {
+
+ I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
+ /* Enable MIPI PHY transparent latch
+ * Common bit for both MIPI Port A & MIPI Port C
+ * No similar bit in MIPI Port C reg
+ */
+ val = I915_READ(MIPI_PORT_CTRL(PORT_A));
+ I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
- usleep_range(2500, 3000);
+ I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2500, 3000);
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
+ usleep_range(2500, 3000);
+ }
}
static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- int pipe = intel_crtc->pipe;
- u32 temp;
+ enum port port;
DRM_DEBUG_KMS("\n");
- if (is_cmd_mode(intel_dsi))
- I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
- else {
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ } else {
msleep(20); /* XXX */
- dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, TURN_ON, false, port);
msleep(100);
- if (intel_dsi->dev.dev_ops->enable)
- intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+ drm_panel_enable(intel_dsi->panel);
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
- /* assert ip_tg_enable signal */
- temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
- temp = temp | intel_dsi->port_bits;
- I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
- POSTING_READ(MIPI_PORT_CTRL(pipe));
+ intel_dsi_port_enable(encoder);
}
}
@@ -172,6 +410,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port;
u32 tmp;
DRM_DEBUG_KMS("\n");
@@ -183,7 +422,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
I915_WRITE(DPLL(pipe), tmp);
/* update the hw state for DPLL */
- intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
+ intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
DPLL_REFA_CLK_ENABLE_VLV;
tmp = I915_READ(DSPCLK_GATE_D);
@@ -195,13 +434,10 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
- if (intel_dsi->dev.dev_ops->panel_reset)
- intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+ drm_panel_prepare(intel_dsi->panel);
- if (intel_dsi->dev.dev_ops->send_otp_cmds)
- intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
-
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
@@ -221,12 +457,14 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder)
static void intel_dsi_pre_disable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
- dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, SHUTDOWN, false, port);
msleep(10);
}
}
@@ -235,77 +473,85 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- int pipe = intel_crtc->pipe;
+ enum port port;
u32 temp;
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
- wait_for_dsi_fifo_empty(intel_dsi);
-
- /* de-assert ip_tg_enable signal */
- temp = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
- POSTING_READ(MIPI_PORT_CTRL(pipe));
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
+ intel_dsi_port_disable(encoder);
msleep(2);
}
- /* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
-
- temp = I915_READ(MIPI_CTRL(pipe));
- temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(pipe), temp |
- intel_dsi->escape_clk_div <<
- ESCAPE_CLOCK_DIVIDER_SHIFT);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
- I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
+ temp = I915_READ(MIPI_CTRL(port));
+ temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(port), temp |
+ intel_dsi->escape_clk_div <<
+ ESCAPE_CLOCK_DIVIDER_SHIFT);
- temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
- temp &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
+ I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
+ temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ temp &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp);
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+ }
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
- if (intel_dsi->dev.dev_ops->disable)
- intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+ drm_panel_disable(intel_dsi->panel);
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
}
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
-
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
- usleep_range(2000, 2500);
-
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
- usleep_range(2000, 2500);
-
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
- usleep_range(2000, 2500);
-
- if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
- == 0x00000), 30))
- DRM_ERROR("DSI LP not going Low\n");
-
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
-
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
- usleep_range(2000, 2500);
+ for_each_dsi_port(port, intel_dsi->ports) {
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ /* Wait till Clock lanes are in LP-00 state for MIPI Port A
+ * only. MIPI Port C has no similar bit for checking
+ */
+ if (wait_for(((I915_READ(MIPI_PORT_CTRL(PORT_A)) & AFE_LATCHOUT)
+ == 0x00000), 30))
+ DRM_ERROR("DSI LP not going Low\n");
+
+ /* Disable MIPI PHY transparent latch
+ * Common bit for both MIPI Port A & MIPI Port C
+ */
+ val = I915_READ(MIPI_PORT_CTRL(PORT_A));
+ I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+ usleep_range(2000, 2500);
+ }
vlv_disable_dsi_pll(encoder);
}
@@ -326,8 +572,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
- if (intel_dsi->dev.dev_ops->disable_panel_power)
- intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+ drm_panel_unprepare(intel_dsi->panel);
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
@@ -337,9 +582,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
- u32 port, func;
- enum pipe p;
+ u32 dpi_enabled, func;
+ enum port port;
DRM_DEBUG_KMS("\n");
@@ -348,13 +595,23 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
return false;
/* XXX: this only works for one DSI output */
- for (p = PIPE_A; p <= PIPE_B; p++) {
- port = I915_READ(MIPI_PORT_CTRL(p));
- func = I915_READ(MIPI_DSI_FUNC_PRG(p));
-
- if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
- if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
- *pipe = p;
+ for_each_dsi_port(port, intel_dsi->ports) {
+ func = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ dpi_enabled = I915_READ(MIPI_PORT_CTRL(port)) &
+ DPI_ENABLE;
+
+ /* Due to some hardware limitations on BYT, MIPI Port C DPI
+ * Enable bit does not get set. To check whether DSI Port C
+ * was enabled in BIOS, check the Pipe B enable bit
+ */
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ (port == PORT_C))
+ dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
+ PIPECONF_ENABLE;
+
+ if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+ if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
+ *pipe = port == PORT_A ? PIPE_A : PIPE_B;
return true;
}
}
@@ -364,7 +621,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
u32 pclk;
DRM_DEBUG_KMS("\n");
@@ -379,7 +636,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
if (!pclk)
return;
- pipe_config->adjusted_mode.crtc_clock = pclk;
+ pipe_config->base.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
@@ -389,7 +646,6 @@ intel_dsi_mode_valid(struct drm_connector *connector,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
DRM_DEBUG_KMS("\n");
@@ -405,7 +661,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+ return MODE_OK;
}
/* return txclkesc cycles in terms of divider and duration in us */
@@ -437,8 +693,8 @@ static void set_dsi_timings(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int pipe = intel_crtc->pipe;
- unsigned int bpp = intel_crtc->config.pipe_bpp;
+ enum port port;
+ unsigned int bpp = intel_crtc->config->pipe_bpp;
unsigned int lane_count = intel_dsi->lane_count;
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -448,6 +704,15 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
+ if (intel_dsi->dual_link) {
+ hactive /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ hactive += intel_dsi->pixel_overlap;
+ hfp /= 2;
+ hsync /= 2;
+ hbp /= 2;
+ }
+
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
@@ -460,18 +725,20 @@ static void set_dsi_timings(struct drm_encoder *encoder,
intel_dsi->burst_mode_ratio);
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
- I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
- I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
+ I915_WRITE(MIPI_HFP_COUNT(port), hfp);
- /* meaningful for video mode non-burst sync pulse mode only, can be zero
- * for non-burst sync events and burst modes */
- I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
- I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+ /* meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes */
+ I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
+ I915_WRITE(MIPI_HBP_COUNT(port), hbp);
- /* vertical values are in terms of lines */
- I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
- I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
- I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+ /* vertical values are in terms of lines */
+ I915_WRITE(MIPI_VFP_COUNT(port), vfp);
+ I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
+ I915_WRITE(MIPI_VBP_COUNT(port), vbp);
+ }
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
@@ -482,33 +749,44 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
- int pipe = intel_crtc->pipe;
- unsigned int bpp = intel_crtc->config.pipe_bpp;
+ &intel_crtc->config->base.adjusted_mode;
+ enum port port;
+ unsigned int bpp = intel_crtc->config->pipe_bpp;
u32 val, tmp;
+ u16 mode_hdisplay;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
- /* escape clock divider, 20MHz, shared for A and C. device ready must be
- * off when doing this! txclkesc? */
- tmp = I915_READ(MIPI_CTRL(0));
- tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+ mode_hdisplay = adjusted_mode->hdisplay;
- /* read request priority is per pipe */
- tmp = I915_READ(MIPI_CTRL(pipe));
- tmp &= ~READ_REQUEST_PRIORITY_MASK;
- I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+ if (intel_dsi->dual_link) {
+ mode_hdisplay /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ mode_hdisplay += intel_dsi->pixel_overlap;
+ }
- /* XXX: why here, why like this? handling in irq handler?! */
- I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
- I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* escape clock divider, 20MHz, shared for A and C.
+ * device ready must be off when doing this! txclkesc? */
+ tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1);
- I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
+ /* read request priority is per pipe */
+ tmp = I915_READ(MIPI_CTRL(port));
+ tmp &= ~READ_REQUEST_PRIORITY_MASK;
+ I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH);
- I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
- adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
- adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ /* XXX: why here, why like this? handling in irq handler?! */
+ I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
+ I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
+
+ I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
+
+ I915_WRITE(MIPI_DPI_RESOLUTION(port),
+ adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+ mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ }
set_dsi_timings(encoder, adjusted_mode);
@@ -522,95 +800,102 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
/* XXX: cross-check bpp vs. pixel format? */
val |= intel_dsi->pixel_format;
}
- I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
-
- /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
- * stop state. */
-
- /*
- * In burst mode, value greater than one DPI line Time in byte clock
- * (txbyteclkhs) To timeout this timer 1+ of the above said value is
- * recommended.
- *
- * In non-burst mode, Value greater than one DPI frame time in byte
- * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
- * is recommended.
- *
- * In DBI only mode, value greater than one DBI frame time in byte
- * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
- * is recommended.
- */
-
- if (is_vid_mode(intel_dsi) &&
- intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
- txbyteclkhs(adjusted_mode->htotal, bpp,
- intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
- } else {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
- txbyteclkhs(adjusted_mode->vtotal *
- adjusted_mode->htotal,
- bpp, intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
- }
- I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
- I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
- I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
-
- /* dphy stuff */
- /* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
-
- val = 0;
+ tmp = 0;
if (intel_dsi->eotp_pkt == 0)
- val |= EOT_DISABLE;
-
+ tmp |= EOT_DISABLE;
if (intel_dsi->clock_stop)
- val |= CLOCKSTOP;
-
- /* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
-
- /* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
-
- /* in terms of txbyteclkhs. actual high to low switch +
- * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
- *
- * XXX: write MIPI_STOP_STATE_STALL?
- */
- I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
- intel_dsi->hs_to_lp_count);
-
- /* XXX: low power clock equivalence in terms of byte clock. the number
- * of byte clocks occupied in one low power clock. based on txbyteclkhs
- * and txclkesc. txclkesc time / txbyteclk time * (105 +
- * MIPI_STOP_STATE_STALL) / 105.???
- */
- I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
-
- /* the bw essential for transmitting 16 long packets containing 252
- * bytes meant for dcs write memory command is programmed in this
- * register in terms of byte clocks. based on dsi transfer rate and the
- * number of lanes configured the time taken to transmit 16 long packets
- * in a dsi stream varies. */
- I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
-
- I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
- intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
- intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
-
- if (is_vid_mode(intel_dsi))
- /* Some panels might have resolution which is not a multiple of
- * 64 like 1366 x 768. Enable RANDOM resolution support for such
- * panels by default */
- I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
- intel_dsi->video_frmt_cfg_bits |
- intel_dsi->video_mode_format |
- IP_TG_CONFIG |
- RANDOM_DPI_DISPLAY_RESOLUTION);
+ tmp |= CLOCKSTOP;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+
+ /* timeouts for recovery. one frame IIUC. if counter expires,
+ * EOT and stop state. */
+
+ /*
+ * In burst mode, value greater than one DPI line Time in byte
+ * clock (txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ *
+ * In non-burst mode, Value greater than one DPI frame time in
+ * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ *
+ * In DBI only mode, value greater than one DBI frame time in
+ * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ */
+
+ if (is_vid_mode(intel_dsi) &&
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->htotal, bpp,
+ intel_dsi->lane_count,
+ intel_dsi->burst_mode_ratio) + 1);
+ } else {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->vtotal *
+ adjusted_mode->htotal,
+ bpp, intel_dsi->lane_count,
+ intel_dsi->burst_mode_ratio) + 1);
+ }
+ I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
+ I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_dsi->turn_arnd_val);
+ I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
+ intel_dsi->rst_timer_val);
+
+ /* dphy stuff */
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(port),
+ txclkesc(intel_dsi->escape_clk_div, 100));
+
+
+ /* recovery disables */
+ I915_WRITE(MIPI_EOT_DISABLE(port), val);
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
+
+ /* in terms of txbyteclkhs. actual high to low switch +
+ * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+ *
+ * XXX: write MIPI_STOP_STATE_STALL?
+ */
+ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_dsi->hs_to_lp_count);
+
+ /* XXX: low power clock equivalence in terms of byte clock.
+ * the number of byte clocks occupied in one low power clock.
+ * based on txbyteclkhs and txclkesc.
+ * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
+ * ) / 105.???
+ */
+ I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+
+ /* the bw essential for transmitting 16 long packets containing
+ * 252 bytes meant for dcs write memory command is programmed in
+ * this register in terms of byte clocks. based on dsi transfer
+ * rate and the number of lanes configured the time taken to
+ * transmit 16 long packets in a dsi stream varies. */
+ I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
+
+ I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+ intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+
+ if (is_vid_mode(intel_dsi))
+ /* Some panels might have resolution which is not a
+ * multiple of 64 like 1366 x 768. Enable RANDOM
+ * resolution support for such panels by default */
+ I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
+ intel_dsi->video_frmt_cfg_bits |
+ intel_dsi->video_mode_format |
+ IP_TG_CONFIG |
+ RANDOM_DPI_DISPLAY_RESOLUTION);
+ }
}
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
@@ -625,20 +910,7 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
- struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
- struct intel_encoder *intel_encoder = &intel_dsi->base;
- enum intel_display_power_domain power_domain;
- enum drm_connector_status connector_status;
- struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
-
- DRM_DEBUG_KMS("\n");
- power_domain = intel_display_port_power_domain(intel_encoder);
-
- intel_display_power_get(dev_priv, power_domain);
- connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
- intel_display_power_put(dev_priv, power_domain);
-
- return connector_status;
+ return connector_status_connected;
}
static int intel_dsi_get_modes(struct drm_connector *connector)
@@ -664,7 +936,7 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
return 1;
}
-static void intel_dsi_destroy(struct drm_connector *connector)
+static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -674,8 +946,20 @@ static void intel_dsi_destroy(struct drm_connector *connector)
kfree(connector);
}
+static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ if (intel_dsi->panel) {
+ drm_panel_detach(intel_dsi->panel);
+ /* XXX: Logically this call belongs in the panel driver. */
+ drm_panel_remove(intel_dsi->panel);
+ }
+ intel_encoder_destroy(encoder);
+}
+
static const struct drm_encoder_funcs intel_dsi_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_dsi_encoder_destroy,
};
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
@@ -687,8 +971,10 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dsi_detect,
- .destroy = intel_dsi_destroy,
+ .destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_connector_atomic_get_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
void intel_dsi_init(struct drm_device *dev)
@@ -698,9 +984,9 @@ void intel_dsi_init(struct drm_device *dev)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *fixed_mode = NULL;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct intel_dsi_device *dsi;
+ enum port port;
unsigned int i;
DRM_DEBUG_KMS("\n");
@@ -748,22 +1034,43 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
- for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
- dsi = &intel_dsi_devices[i];
- intel_dsi->dev = *dsi;
+ /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
+ if (dev_priv->vbt.dsi.config->dual_link) {
+ /* XXX: does dual link work on either pipe? */
+ intel_encoder->crtc_mask = (1 << PIPE_A);
+ intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+ } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+ intel_encoder->crtc_mask = (1 << PIPE_A);
+ intel_dsi->ports = (1 << PORT_A);
+ } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
+ intel_encoder->crtc_mask = (1 << PIPE_B);
+ intel_dsi->ports = (1 << PORT_C);
+ }
- if (dsi->dev_ops->init(&intel_dsi->dev))
+ /* Create a DSI host (and a device) for each port. */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(intel_dsi_drivers); i++) {
+ intel_dsi->panel = intel_dsi_drivers[i].init(intel_dsi,
+ intel_dsi_drivers[i].panel_id);
+ if (intel_dsi->panel)
break;
}
- if (i == ARRAY_SIZE(intel_dsi_devices)) {
+ if (!intel_dsi->panel) {
DRM_DEBUG_KMS("no device found\n");
goto err;
}
intel_encoder->type = INTEL_OUTPUT_DSI;
- intel_encoder->crtc_mask = (1 << 0); /* XXX */
-
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -778,13 +1085,23 @@ void intel_dsi_init(struct drm_device *dev)
drm_connector_register(connector);
- fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+ drm_panel_attach(intel_dsi->panel, connector);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_panel_get_modes(intel_dsi->panel);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
if (!fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
goto err;
}
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
return;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 657eb5c1b9d8..2784ac442368 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -26,58 +26,27 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
-struct intel_dsi_device {
- unsigned int panel_id;
- const char *name;
- const struct intel_dsi_dev_ops *dev_ops;
- void *dev_priv;
-};
-
-struct intel_dsi_dev_ops {
- bool (*init)(struct intel_dsi_device *dsi);
-
- void (*panel_reset)(struct intel_dsi_device *dsi);
-
- void (*disable_panel_power)(struct intel_dsi_device *dsi);
-
- /* one time programmable commands if needed */
- void (*send_otp_cmds)(struct intel_dsi_device *dsi);
-
- /* This callback must be able to assume DSI commands can be sent */
- void (*enable)(struct intel_dsi_device *dsi);
-
- /* This callback must be able to assume DSI commands can be sent */
- void (*disable)(struct intel_dsi_device *dsi);
-
- int (*mode_valid)(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode);
-
- bool (*mode_fixup)(struct intel_dsi_device *dsi,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- void (*mode_set)(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
+/* Dual Link support */
+#define DSI_DUAL_LINK_NONE 0
+#define DSI_DUAL_LINK_FRONT_BACK 1
+#define DSI_DUAL_LINK_PIXEL_ALT 2
- bool (*get_hw_state)(struct intel_dsi_device *dev);
-
- struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
-
- void (*destroy) (struct intel_dsi_device *dsi);
-};
+struct intel_dsi_host;
struct intel_dsi {
struct intel_encoder base;
- struct intel_dsi_device dev;
+ struct drm_panel *panel;
+ struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
struct intel_connector *attached_connector;
+ /* bit mask of ports being driven */
+ u16 ports;
+
/* if true, use HS mode, otherwise LP */
bool hs;
@@ -101,6 +70,8 @@ struct intel_dsi {
u8 clock_stop;
u8 escape_clk_div;
+ u8 dual_link;
+ u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
@@ -127,6 +98,24 @@ struct intel_dsi {
u16 panel_pwr_cycle_delay;
};
+struct intel_dsi_host {
+ struct mipi_dsi_host base;
+ struct intel_dsi *intel_dsi;
+ enum port port;
+
+ /* our little hack */
+ struct mipi_dsi_device *device;
+};
+
+static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct intel_dsi_host, base);
+}
+
+#define for_each_dsi_port(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ if ((__ports_mask) & (1 << (__port)))
+
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);
@@ -136,6 +125,6 @@ extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
+struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
deleted file mode 100644
index f4767fd2ebeb..000000000000
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright © 2013 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Jani Nikula <jani.nikula@intel.com>
- */
-
-#include <linux/export.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <video/mipi_display.h>
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
-
-/*
- * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
- * MIPI_COMMAND_ADDRESS registers.
- *
- * Apparently these registers provide a MIPI adapter level way to send (lots of)
- * commands and data to the receiver, without having to write the commands and
- * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
- *
- * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
- * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
- * framebuffer in command mode displays) these are just an optimization that can
- * come later.
- *
- * For memory writes, these should probably be used for performance.
- */
-
-static void print_stat(struct intel_dsi *intel_dsi)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 val;
-
- val = I915_READ(MIPI_INTR_STAT(pipe));
-
-#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
- DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
- "\n", pipe, val,
- STAT_BIT(val, TEARING_EFFECT),
- STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
- STAT_BIT(val, GEN_READ_DATA_AVAIL),
- STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
- STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
- STAT_BIT(val, RX_PROT_VIOLATION),
- STAT_BIT(val, RX_INVALID_TX_LENGTH),
- STAT_BIT(val, ACK_WITH_NO_ERROR),
- STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
- STAT_BIT(val, LP_RX_TIMEOUT),
- STAT_BIT(val, HS_TX_TIMEOUT),
- STAT_BIT(val, DPI_FIFO_UNDERRUN),
- STAT_BIT(val, LOW_CONTENTION),
- STAT_BIT(val, HIGH_CONTENTION),
- STAT_BIT(val, TXDSI_VC_ID_INVALID),
- STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
- STAT_BIT(val, TXCHECKSUM_ERROR),
- STAT_BIT(val, TXECC_MULTIBIT_ERROR),
- STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
- STAT_BIT(val, TXFALSE_CONTROL_ERROR),
- STAT_BIT(val, RXDSI_VC_ID_INVALID),
- STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
- STAT_BIT(val, RXCHECKSUM_ERROR),
- STAT_BIT(val, RXECC_MULTIBIT_ERROR),
- STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
- STAT_BIT(val, RXFALSE_CONTROL_ERROR),
- STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
- STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
- STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
- STAT_BIT(val, RXEOT_SYNC_ERROR),
- STAT_BIT(val, RXSOT_SYNC_ERROR),
- STAT_BIT(val, RXSOT_ERROR));
-#undef STAT_BIT
-}
-
-enum dsi_type {
- DSI_DCS,
- DSI_GENERIC,
-};
-
-/* enable or disable command mode hs transmissions */
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 temp;
- u32 mask = DBI_FIFO_EMPTY;
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
-
- temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
- temp &= DBI_HS_LP_MODE_MASK;
- I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
-
- intel_dsi->hs = enable;
-}
-
-static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
- u8 data_type, u16 data)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 ctrl_reg;
- u32 ctrl;
- u32 mask;
-
- DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
- channel, data_type, data);
-
- if (intel_dsi->hs) {
- ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
- mask = HS_CTRL_FIFO_FULL;
- } else {
- ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
- mask = LP_CTRL_FIFO_FULL;
- }
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
- DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
- print_stat(intel_dsi);
- }
-
- /*
- * Note: This function is also used for long packets, with length passed
- * as data, since SHORT_PACKET_PARAM_SHIFT ==
- * LONG_PACKET_WORD_COUNT_SHIFT.
- */
- ctrl = data << SHORT_PACKET_PARAM_SHIFT |
- channel << VIRTUAL_CHANNEL_SHIFT |
- data_type << DATA_TYPE_SHIFT;
-
- I915_WRITE(ctrl_reg, ctrl);
-
- return 0;
-}
-
-static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
- u8 data_type, const u8 *data, int len)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 data_reg;
- int i, j, n;
- u32 mask;
-
- DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
- channel, data_type, len);
-
- if (intel_dsi->hs) {
- data_reg = MIPI_HS_GEN_DATA(pipe);
- mask = HS_DATA_FIFO_FULL;
- } else {
- data_reg = MIPI_LP_GEN_DATA(pipe);
- mask = LP_DATA_FIFO_FULL;
- }
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
- DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
-
- for (i = 0; i < len; i += n) {
- u32 val = 0;
- n = min_t(int, len - i, 4);
-
- for (j = 0; j < n; j++)
- val |= *data++ << 8 * j;
-
- I915_WRITE(data_reg, val);
- /* XXX: check for data fifo full, once that is set, write 4
- * dwords, then wait for not set, then continue. */
- }
-
- return dsi_vc_send_short(intel_dsi, channel, data_type, len);
-}
-
-static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
- int channel, const u8 *data, int len,
- enum dsi_type type)
-{
- int ret;
-
- if (len == 0) {
- BUG_ON(type == DSI_GENERIC);
- ret = dsi_vc_send_short(intel_dsi, channel,
- MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
- 0);
- } else if (len == 1) {
- ret = dsi_vc_send_short(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE, data[0]);
- } else if (len == 2) {
- ret = dsi_vc_send_short(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE_PARAM,
- (data[1] << 8) | data[0]);
- } else {
- ret = dsi_vc_send_long(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_LONG_WRITE :
- MIPI_DSI_DCS_LONG_WRITE, data, len);
- }
-
- return ret;
-}
-
-int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len)
-{
- return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
-}
-
-int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len)
-{
- return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
-}
-
-static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd)
-{
- return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
- dcs_cmd);
-}
-
-static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
- int channel, u8 *reqdata,
- int reqlen)
-{
- u16 data;
- u8 data_type;
-
- switch (reqlen) {
- case 0:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
- data = 0;
- break;
- case 1:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
- data = reqdata[0];
- break;
- case 2:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
- data = (reqdata[1] << 8) | reqdata[0];
- break;
- default:
- BUG();
- }
-
- return dsi_vc_send_short(intel_dsi, channel, data_type, data);
-}
-
-static int dsi_read_data_return(struct intel_dsi *intel_dsi,
- u8 *buf, int buflen)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- int i, len = 0;
- u32 data_reg, val;
-
- if (intel_dsi->hs) {
- data_reg = MIPI_HS_GEN_DATA(pipe);
- } else {
- data_reg = MIPI_LP_GEN_DATA(pipe);
- }
-
- while (len < buflen) {
- val = I915_READ(data_reg);
- for (i = 0; i < 4 && len < buflen; i++, len++)
- buf[len] = val >> 8 * i;
- }
-
- return len;
-}
-
-int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
- u8 *buf, int buflen)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 mask;
- int ret;
-
- /*
- * XXX: should issue multiple read requests and reads if request is
- * longer than MIPI_MAX_RETURN_PKT_SIZE
- */
-
- I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
-
- ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
- if (ret)
- return ret;
-
- mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
-
- ret = dsi_read_data_return(intel_dsi, buf, buflen);
- if (ret < 0)
- return ret;
-
- if (ret != buflen)
- return -EIO;
-
- return 0;
-}
-
-int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 mask;
- int ret;
-
- /*
- * XXX: should issue multiple read requests and reads if request is
- * longer than MIPI_MAX_RETURN_PKT_SIZE
- */
-
- I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
-
- ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
- reqlen);
- if (ret)
- return ret;
-
- mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
-
- ret = dsi_read_data_return(intel_dsi, buf, buflen);
- if (ret < 0)
- return ret;
-
- if (ret != buflen)
- return -EIO;
-
- return 0;
-}
-
-/*
- * send a video mode command
- *
- * XXX: commands with data in MIPI_DPI_DATA?
- */
-int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 mask;
-
- /* XXX: pipe, hs */
- if (hs)
- cmd &= ~DPI_LP_MODE;
- else
- cmd |= DPI_LP_MODE;
-
- /* clear bit */
- I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
-
- /* XXX: old code skips write if control unchanged */
- if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
- DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
-
- I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
-
- mask = SPL_PKT_SENT_INTERRUPT;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
- DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
-
- return 0;
-}
-
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 mask;
-
- mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
- LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
- DRM_ERROR("DPI FIFOs are not empty\n");
-}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 46aa1acc00eb..886779030f1a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -33,81 +33,7 @@
#include "intel_drv.h"
#include "intel_dsi.h"
-#define DPI_LP_MODE_EN false
-#define DPI_HS_MODE_EN true
-
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
-
-int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len);
-
-int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len);
-
-int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
- u8 *buf, int buflen);
-
-int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen);
-
-int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
-
-/* XXX: questionable write helpers */
-static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd)
-{
- return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
-}
-
-static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd, u8 param)
-{
- u8 buf[2] = { dcs_cmd, param };
- return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
-}
-
-static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
- int channel)
-{
- return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
-}
-
-static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
- int channel, u8 param)
-{
- return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
-}
-
-static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
- int channel, u8 param1, u8 param2)
-{
- u8 buf[2] = { param1, param2 };
- return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
-}
-
-/* XXX: questionable read helpers */
-static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
- int channel, u8 *buf, int buflen)
-{
- return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
-}
-
-static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
- int channel, u8 param, u8 *buf,
- int buflen)
-{
- return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
-}
-
-static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
- int channel, u8 param1, u8 param2,
- u8 *buf, int buflen)
-{
- u8 req[2] = { param1, param2 };
-
- return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
-}
-
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
+ enum port port);
#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index f6bdd44069ce..d2cd8d5b27a1 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -28,6 +28,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
+#include <drm/drm_panel.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
#include <asm/intel-mid.h>
@@ -35,7 +36,16 @@
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
+
+struct vbt_panel {
+ struct drm_panel panel;
+ struct intel_dsi *intel_dsi;
+};
+
+static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct vbt_panel, panel);
+}
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
@@ -94,34 +104,59 @@ static struct gpio_table gtable[] = {
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
};
-static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
+static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
- u8 type, byte, mode, vc, port;
- u16 len;
-
- byte = *data++;
- mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
- vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
- port = (byte >> MIPI_PORT_SHIFT) & 0x3;
+ return port ? PORT_C : PORT_A;
+}
- /* LP or HS mode */
- intel_dsi->hs = mode;
+static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
+ const u8 *data)
+{
+ struct mipi_dsi_device *dsi_device;
+ u8 type, flags, seq_port;
+ u16 len;
+ enum port port;
- /* get packet type and increment the pointer */
+ flags = *data++;
type = *data++;
len = *((u16 *) data);
data += 2;
+ seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
+
+ /* For DSI single link on Port A & C, the seq_port value which is
+ * parsed from Sequence Block#53 of VBT has been set to 0
+ * Now, read/write of packets for the DSI single link on Port A and
+ * Port C will based on the DVO port from VBT block 2.
+ */
+ if (intel_dsi->ports == (1 << PORT_C))
+ port = PORT_C;
+ else
+ port = intel_dsi_seq_port_to_port(seq_port);
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ if (!dsi_device) {
+ DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+ goto out;
+ }
+
+ if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
+ dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ else
+ dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
+
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- dsi_vc_generic_write_0(intel_dsi, vc);
+ mipi_dsi_generic_write(dsi_device, NULL, 0);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- dsi_vc_generic_write_1(intel_dsi, vc, *data);
+ mipi_dsi_generic_write(dsi_device, data, 1);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
+ mipi_dsi_generic_write(dsi_device, data, 2);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
@@ -129,30 +164,31 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
- dsi_vc_generic_write(intel_dsi, vc, data, len);
+ mipi_dsi_generic_write(dsi_device, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
- dsi_vc_dcs_write_0(intel_dsi, vc, *data);
+ mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
+ mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
- dsi_vc_dcs_write(intel_dsi, vc, data, len);
+ mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
+out:
data += len;
return data;
}
-static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
+static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
- u32 delay = *((u32 *) data);
+ u32 delay = *((const u32 *) data);
usleep_range(delay, delay + 10);
data += 4;
@@ -160,7 +196,7 @@ static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
return data;
}
-static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
+static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
u8 gpio, action;
u16 function, pad;
@@ -193,7 +229,8 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
return data;
}
-typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
+typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
+ const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
NULL, /* reserved */
mipi_exec_send_packet,
@@ -217,13 +254,12 @@ static const char * const seq_name[] = {
"MIPI_SEQ_DEASSERT_RESET"
};
-static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
+static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
{
- u8 *data = sequence;
fn_mipi_elem_exec mipi_elem_exec;
int index;
- if (!sequence)
+ if (!data)
return;
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
@@ -256,14 +292,103 @@ static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
}
}
-static bool generic_init(struct intel_dsi_device *dsi)
+static int vbt_panel_prepare(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_unprepare(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_enable(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_disable(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_get_modes(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *mode;
+
+ if (!panel->connector)
+ return 0;
+
+ mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (!mode)
+ return 0;
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs vbt_panel_funcs = {
+ .disable = vbt_panel_disable,
+ .unprepare = vbt_panel_unprepare,
+ .prepare = vbt_panel_prepare,
+ .enable = vbt_panel_enable,
+ .get_modes = vbt_panel_get_modes,
+};
+
+struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ struct vbt_panel *vbt_panel;
u32 bits_per_pixel = 24;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
@@ -273,6 +398,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
u16 burst_mode_ratio;
+ enum port port;
DRM_DEBUG_KMS("\n");
@@ -280,6 +406,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+ intel_dsi->dual_link = mipi_config->dual_link;
+ intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
@@ -299,6 +427,20 @@ static bool generic_init(struct intel_dsi_device *dsi)
pclk = mode->clock;
+ /* In dual link mode each port needs half of pixel clock */
+ if (intel_dsi->dual_link) {
+ pclk = pclk / 2;
+
+ /* we can enable pixel_overlap if needed by panel. In this
+ * case we need to increase the pixelclock for extra pixels
+ */
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ pclk += DIV_ROUND_UP(mode->vtotal *
+ intel_dsi->pixel_overlap *
+ 60, 1000);
+ }
+ }
+
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
@@ -311,7 +453,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
- return false;
+ return NULL;
}
burst_mode_ratio = DIV_ROUND_UP(
@@ -321,7 +463,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
- return false;
+ return NULL;
}
} else
burst_mode_ratio = 100;
@@ -493,6 +635,12 @@ static bool generic_init(struct intel_dsi_device *dsi)
DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
"disabled" : "enabled");
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ else
+ DRM_DEBUG_KMS("Dual link: NONE\n");
DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
@@ -516,110 +664,18 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
- return true;
-}
-
-static int generic_mode_valid(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static bool generic_mode_fixup(struct intel_dsi_device *dsi,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode) {
- return true;
-}
-
-static void generic_panel_reset(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_disable_panel_power(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_enable(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_disable(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /* This is cheating a bit with the cleanup. */
+ vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+ vbt_panel->intel_dsi = intel_dsi;
+ drm_panel_init(&vbt_panel->panel);
+ vbt_panel->panel.funcs = &vbt_panel_funcs;
+ drm_panel_add(&vbt_panel->panel);
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
-{
- return connector_status_connected;
-}
-
-static bool generic_get_hw_state(struct intel_dsi_device *dev)
-{
- return true;
-}
-
-static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /* a regular driver would get the device in probe */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
+ }
- dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
- return dev_priv->vbt.lfp_lvds_vbt_mode;
+ return &vbt_panel->panel;
}
-
-static void generic_destroy(struct intel_dsi_device *dsi) { }
-
-/* Callbacks. We might not need them all. */
-struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
- .init = generic_init,
- .mode_valid = generic_mode_valid,
- .mode_fixup = generic_mode_fixup,
- .panel_reset = generic_panel_reset,
- .disable_panel_power = generic_disable_panel_power,
- .send_otp_cmds = generic_send_otp_cmds,
- .enable = generic_enable,
- .disable = generic_disable,
- .detect = generic_detect,
- .get_hw_state = generic_get_hw_state,
- .get_modes = generic_get_modes,
- .destroy = generic_destroy,
-};
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index fa7a6ca34cd6..3622d0bafdf8 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -241,7 +241,11 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
return;
}
- dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+ if (intel_dsi->ports & (1 << PORT_A))
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+ if (intel_dsi->ports & (1 << PORT_C))
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
@@ -269,12 +273,14 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder)
tmp |= DSI_PLL_VCO_EN;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
- mutex_unlock(&dev_priv->dpio_lock);
+ if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+ DSI_PLL_LOCK, 20)) {
- if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+ mutex_unlock(&dev_priv->dpio_lock);
DRM_ERROR("DSI PLL lock failed\n");
return;
}
+ mutex_unlock(&dev_priv->dpio_lock);
DRM_DEBUG_KMS("DSI PLL locked\n");
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index e40e3df33517..d8579510beb0 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -144,7 +145,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -160,9 +161,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -186,8 +187,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &crtc->config.requested_mode,
- &crtc->config.adjusted_mode);
+ &crtc->config->base.mode,
+ &crtc->config->base.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
@@ -200,7 +201,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc;
- struct intel_crtc_config *config;
+ struct intel_crtc_state *config;
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
@@ -221,7 +222,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
- config = &to_intel_crtc(crtc)->config;
+ config = to_intel_crtc(crtc)->config;
intel_dvo->base.connectors_active = true;
@@ -261,10 +262,10 @@ intel_dvo_mode_valid(struct drm_connector *connector,
}
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -295,7 +296,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
@@ -390,6 +391,8 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_connector_atomic_get_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
new file mode 100644
index 000000000000..624d1d92d284
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Frame Buffer Compression (FBC)
+ *
+ * FBC tries to save memory bandwidth (and so power consumption) by
+ * compressing the amount of memory used by the display. It is total
+ * transparent to user space and completely handled in the kernel.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns. It comes from keeping the memory footprint small
+ * and having fewer memory pages opened and accessed for refreshing the display.
+ *
+ * i915 is responsible to reserve stolen memory for FBC and configure its
+ * offset on proper registers. The hardware takes care of all
+ * compress/decompress. However there are many known cases where we have to
+ * forcibly disable it to allow proper screen updates.
+ */
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static void i8xx_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int i;
+ u32 fbc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 32B or 64B units */
+ if (IS_GEN2(dev))
+ cfb_pitch = (cfb_pitch / 32) - 1;
+ else
+ cfb_pitch = (cfb_pitch / 64) - 1;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ if (IS_GEN4(dev)) {
+ u32 fbc_ctl2;
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ }
+
+ /* enable it... */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
+ cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void g4x_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void snb_fbc_blit_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+
+ /* Blitter is part of Media powerwell on VLV. No impact of
+ * his param in other platforms for now */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+}
+
+static void ilk_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ break;
+ case 1:
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+ dpfc_ctl |= DPFC_CTL_FENCE_EN;
+ if (IS_GEN5(dev))
+ dpfc_ctl |= obj->fence_reg;
+
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ snb_fbc_blit_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void ilk_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ilk_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void gen7_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ break;
+ case 1:
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+
+ dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+
+ if (dev_priv->fbc.false_color)
+ dpfc_ctl |= FBC_CTL_FALSE_COLOR;
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ } else {
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
+ I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+ HSW_FBCQ_DIS);
+ }
+
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+
+ snb_fbc_blit_update(dev);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+/**
+ * intel_fbc_enabled - Is FBC enabled?
+ * @dev: the drm_device
+ *
+ * This function is used to verify the current state of FBC.
+ * FIXME: This should be tracked in the plane config eventually
+ * instead of queried at runtime for most callers.
+ */
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return dev_priv->fbc.enabled;
+}
+
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_GEN8(dev))
+ return;
+
+ if (!intel_fbc_enabled(dev))
+ return;
+
+ I915_WRITE(MSG_FBC_REND_STATE, value);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc.fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->primary->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc);
+
+ dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
+ dev_priv->fbc.y = work->crtc->y;
+ }
+
+ dev_priv->fbc.fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc.fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc.fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc.fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc.fbc_work = NULL;
+}
+
+static void intel_fbc_enable(struct drm_crtc *crtc)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_fbc_cancel_work(dev_priv);
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (work == NULL) {
+ DRM_ERROR("Failed to allocate FBC work structure\n");
+ dev_priv->display.enable_fbc(crtc);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->primary->fb;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc.fbc_work = work;
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ *
+ * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+/**
+ * intel_fbc_disable - disable FBC
+ * @dev: the drm_device
+ *
+ * This function disables FBC.
+ */
+void intel_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_fbc_cancel_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->fbc.plane = -1;
+}
+
+static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+ enum no_fbc_reason reason)
+{
+ if (dev_priv->fbc.no_fbc_reason == reason)
+ return false;
+
+ dev_priv->fbc.no_fbc_reason = reason;
+ return true;
+}
+
+/**
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= max_hdisplay in width, max_vdisplay in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ const struct drm_display_mode *adjusted_mode;
+ unsigned int max_width, max_height;
+
+ if (!HAS_FBC(dev)) {
+ set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
+ return;
+ }
+
+ if (!i915.powersave) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ return;
+ }
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ for_each_crtc(dev, tmp_crtc) {
+ if (intel_crtc_active(tmp_crtc) &&
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
+ if (crtc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->primary->fb == NULL) {
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->primary->fb;
+ obj = intel_fb_obj(fb);
+ adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+
+ if (i915.enable_fbc < 0) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
+ }
+ if (!i915.enable_fbc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ goto out_disable;
+ }
+ if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ goto out_disable;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ max_width = 4096;
+ max_height = 4096;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ max_width = 4096;
+ max_height = 2048;
+ } else {
+ max_width = 2048;
+ max_height = 1536;
+ }
+ if (intel_crtc->config->pipe_src_w > max_width ||
+ intel_crtc->config->pipe_src_h > max_height) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ goto out_disable;
+ }
+ if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+ intel_crtc->plane != PLANE_A) {
+ if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
+ DRM_DEBUG_KMS("plane not A, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ goto out_disable;
+ }
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
+ if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
+ if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.fb_id == fb->base.id &&
+ dev_priv->fbc.y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_fbc_disable(dev);
+ }
+
+ intel_fbc_enable(crtc);
+ dev_priv->fbc.no_fbc_reason = FBC_OK;
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_fbc_disable(dev);
+ }
+ i915_gem_stolen_cleanup_compression(dev);
+}
+
+/**
+ * intel_fbc_init - Initialize FBC
+ * @dev_priv: the i915 device
+ *
+ * This function might be called during PM init process.
+ */
+void intel_fbc_init(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_FBC(dev_priv)) {
+ dev_priv->fbc.enabled = false;
+ return;
+ }
+
+ if (INTEL_INFO(dev_priv)->gen >= 7) {
+ dev_priv->display.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->display.enable_fbc = gen7_fbc_enable;
+ dev_priv->display.disable_fbc = ilk_fbc_disable;
+ } else if (INTEL_INFO(dev_priv)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->display.enable_fbc = ilk_fbc_enable;
+ dev_priv->display.disable_fbc = ilk_fbc_disable;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_fbc_enable;
+ dev_priv->display.disable_fbc = g4x_fbc_disable;
+ } else {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_fbc_enable;
+ dev_priv->display.disable_fbc = i8xx_fbc_disable;
+
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+ }
+
+ dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 850cf7d6578c..3001a8674611 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -443,7 +443,7 @@ retry:
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
- &to_intel_crtc(encoder->crtc)->config);
+ to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode;
}
crtcs[i] = new_crtc;
@@ -531,7 +531,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- struct intel_plane_config *plane_config = NULL;
+ struct intel_initial_plane_config *plane_config = NULL;
unsigned int max_size = 0;
if (!i915.fastboot)
@@ -581,7 +581,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
- cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
+ cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.bits_per_pixel / 8;
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -592,13 +592,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
- cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
+ cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+ cur_size = intel_fb_align_height(dev, cur_size,
+ plane_config->tiling);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
- intel_crtc->config.adjusted_mode.crtc_hdisplay,
- intel_crtc->config.adjusted_mode.crtc_vdisplay,
+ intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
+ intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
fb->base.bits_per_pixel,
cur_size);
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 77af512d2d35..04e248dd2259 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -341,7 +341,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
}
/**
- * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
* @dev_priv: i915 device instance
* @pipe: (CPU) pipe to set state for
*
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 79f6d72179c5..73cb6e036445 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -157,6 +157,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
intel_psr_invalidate(dev, obj->frontbuffer_bits);
+ intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
}
/**
@@ -182,6 +183,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+ intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
/*
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 3abc2000fce9..995c5b261f4f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -337,13 +338,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 data_reg;
int i;
u32 val = I915_READ(ctl_reg);
data_reg = hsw_infoframe_data_reg(type,
- intel_crtc->config.cpu_transcoder,
+ intel_crtc->config->cpu_transcoder,
dev_priv);
if (data_reg == 0)
return;
@@ -371,7 +372,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(ctl_reg);
return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
@@ -436,7 +437,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
}
if (intel_hdmi->rgb_quant_range_selectable) {
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
@@ -672,7 +673,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -700,7 +701,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 hdmi_val;
hdmi_val = SDVO_ENCODING_HDMI;
@@ -711,12 +712,12 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
- if (crtc->config.pipe_bpp > 24)
+ if (crtc->config->pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
- if (crtc->config.has_hdmi_sink)
+ if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
if (HAS_PCH_CPT(dev))
@@ -759,7 +760,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
}
static void intel_hdmi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -792,7 +793,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
dotclock = pipe_config->port_clock * 2 / 3;
@@ -802,7 +803,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -814,7 +815,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_crtc->config.has_audio)
+ if (intel_crtc->config->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -845,8 +846,8 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (intel_crtc->config.has_audio) {
- WARN_ON(!intel_crtc->config.has_hdmi_sink);
+ if (intel_crtc->config->has_audio) {
+ WARN_ON(!intel_crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
intel_audio_codec_enable(encoder);
@@ -866,7 +867,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -975,12 +976,12 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
@@ -1252,12 +1253,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
intel_hdmi_prepare(encoder);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
}
@@ -1270,7 +1271,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
@@ -1302,7 +1303,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
intel_enable_hdmi(encoder);
@@ -1467,7 +1468,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
@@ -1593,7 +1594,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
intel_enable_hdmi(encoder);
@@ -1614,7 +1615,9 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_hdmi_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e588376227ea..0f358c5999ec 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -212,8 +212,7 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
- * support for Logical Ring Contexts and Aliasing PPGTT or better),
- * and only when enabled via module parameter.
+ * support for Logical Ring Contexts and Aliasing PPGTT or better).
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
@@ -284,7 +283,6 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
- unsigned long flags;
/* XXX: You must always write both descriptors in the order below. */
if (ctx_obj1)
@@ -298,63 +296,17 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
- /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
- * are in progress.
- *
- * The other problem is that we can't just call gen6_gt_force_wake_get()
- * because that function calls intel_runtime_pm_get(), which might sleep.
- * Instead, we do the runtime_pm_get/put when creating/destroying requests.
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->uncore.fw_rendercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_RENDER);
- if (dev_priv->uncore.fw_mediacount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_MEDIA);
- if (INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->uncore.fw_blittercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_BLITTER);
- }
- } else {
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_ALL);
- }
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
-
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
I915_WRITE(RING_ELSP(ring), desc[1]);
I915_WRITE(RING_ELSP(ring), desc[0]);
I915_WRITE(RING_ELSP(ring), desc[3]);
+
/* The context is automatically loaded after the following */
I915_WRITE(RING_ELSP(ring), desc[2]);
/* ELSP is a wo register, so use another nearby reg for posting instead */
POSTING_READ(RING_EXECLIST_STATUS(ring));
-
- /* Release Force Wakeup (see the big comment above). */
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
- if (--dev_priv->uncore.fw_rendercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_RENDER);
- if (--dev_priv->uncore.fw_mediacount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_MEDIA);
- if (INTEL_INFO(dev)->gen >= 9) {
- if (--dev_priv->uncore.fw_blittercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_BLITTER);
- }
- } else {
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_ALL);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
@@ -405,8 +357,8 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
- struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
- struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
+ struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
+ struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
assert_spin_locked(&ring->execlist_lock);
@@ -446,12 +398,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
- struct intel_ctx_submit_request *head_req;
+ struct drm_i915_gem_request *head_req;
assert_spin_locked(&ring->execlist_lock);
head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
if (head_req != NULL) {
@@ -474,13 +426,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
}
/**
- * intel_execlists_handle_ctx_events() - handle Context Switch interrupts
+ * intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
+void intel_lrc_irq_handler(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 status_pointer;
@@ -535,24 +487,34 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
- u32 tail)
+ u32 tail,
+ struct drm_i915_gem_request *request)
{
- struct intel_ctx_submit_request *req = NULL, *cursor;
+ struct drm_i915_gem_request *cursor;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
int num_elements = 0;
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (req == NULL)
- return -ENOMEM;
- req->ctx = to;
- i915_gem_context_reference(req->ctx);
-
if (to != ring->default_context)
intel_lr_context_pin(ring, to);
- req->ring = ring;
- req->tail = tail;
+ if (!request) {
+ /*
+ * If there isn't a request associated with this submission,
+ * create one as a temporary holder.
+ */
+ WARN(1, "execlist context submission without request");
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+ request->ring = ring;
+ request->ctx = to;
+ } else {
+ WARN_ON(to != request->ctx);
+ }
+ request->tail = tail;
+ i915_gem_request_reference(request);
+ i915_gem_context_reference(request->ctx);
intel_runtime_pm_get(dev_priv);
@@ -563,10 +525,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
break;
if (num_elements > 2) {
- struct intel_ctx_submit_request *tail_req;
+ struct drm_i915_gem_request *tail_req;
tail_req = list_last_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
if (to == tail_req->ctx) {
@@ -578,7 +540,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
}
}
- list_add_tail(&req->execlist_link, &ring->execlist_queue);
+ list_add_tail(&request->execlist_link, &ring->execlist_queue);
if (num_elements == 0)
execlists_context_unqueue(ring);
@@ -587,7 +549,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return 0;
}
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
+static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
struct intel_engine_cs *ring = ringbuf->ring;
uint32_t flush_domains;
@@ -597,7 +560,8 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = ring->emit_flush(ringbuf, ctx,
+ I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
@@ -606,6 +570,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
}
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
struct list_head *vmas)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -633,7 +598,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(ringbuf);
+ return logical_ring_invalidate_all_caches(ringbuf, ctx);
}
/**
@@ -713,13 +678,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return -EINVAL;
}
- ret = execlists_move_to_gpu(ringbuf, vmas);
+ ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
if (ret)
return ret;
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
@@ -732,7 +697,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
- ret = ring->emit_bb_start(ringbuf, exec_start, flags);
+ ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
if (ret)
return ret;
@@ -744,7 +709,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
void intel_execlists_retire_requests(struct intel_engine_cs *ring)
{
- struct intel_ctx_submit_request *req, *tmp;
+ struct drm_i915_gem_request *req, *tmp;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
struct list_head retired_list;
@@ -766,9 +731,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(req->ctx);
+ i915_gem_context_unreference(ctx);
list_del(&req->execlist_link);
- kfree(req);
+ i915_gem_request_unreference(req);
}
}
@@ -794,7 +759,8 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
struct intel_engine_cs *ring = ringbuf->ring;
int ret;
@@ -802,7 +768,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
if (!ring->gpu_caches_dirty)
return 0;
- ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
+ ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -819,17 +785,18 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
+void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
- struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
intel_logical_ring_advance(ringbuf);
if (intel_ring_stopped(ring))
return;
- execlists_context_queue(ring, ctx, ringbuf->tail);
+ execlists_context_queue(ring, ctx, ringbuf->tail, request);
}
static int intel_lr_context_pin(struct intel_engine_cs *ring,
@@ -840,11 +807,11 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
int ret = 0;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (ctx->engine[ring->id].unpin_count++ == 0) {
+ if (ctx->engine[ring->id].pin_count++ == 0) {
ret = i915_gem_obj_ggtt_pin(ctx_obj,
GEN8_LR_CONTEXT_ALIGN, 0);
if (ret)
- goto reset_unpin_count;
+ goto reset_pin_count;
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
@@ -855,8 +822,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
-reset_unpin_count:
- ctx->engine[ring->id].unpin_count = 0;
+reset_pin_count:
+ ctx->engine[ring->id].pin_count = 0;
return ret;
}
@@ -869,47 +836,55 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (--ctx->engine[ring->id].unpin_count == 0) {
+ if (--ctx->engine[ring->id].pin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
}
}
-static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int logical_ring_alloc_request(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
{
+ struct drm_i915_gem_request *request;
+ struct drm_i915_private *dev_private = ring->dev->dev_private;
int ret;
- if (ring->outstanding_lazy_seqno)
+ if (ring->outstanding_lazy_request)
return 0;
- if (ring->preallocated_lazy_request == NULL) {
- struct drm_i915_gem_request *request;
-
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
- if (ctx != ring->default_context) {
- ret = intel_lr_context_pin(ring, ctx);
- if (ret) {
- kfree(request);
- return ret;
- }
+ if (ctx != ring->default_context) {
+ ret = intel_lr_context_pin(ring, ctx);
+ if (ret) {
+ kfree(request);
+ return ret;
}
+ }
- /* Hold a reference to the context this request belongs to
- * (we will need it when the time comes to emit/retire the
- * request).
- */
- request->ctx = ctx;
- i915_gem_context_reference(request->ctx);
+ kref_init(&request->ref);
+ request->ring = ring;
+ request->uniq = dev_private->request_uniq++;
- ring->preallocated_lazy_request = request;
+ ret = i915_gem_get_seqno(ring->dev, &request->seqno);
+ if (ret) {
+ intel_lr_context_unpin(ring, ctx);
+ kfree(request);
+ return ret;
}
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+ /* Hold a reference to the context this request belongs to
+ * (we will need it when the time comes to emit/retire the
+ * request).
+ */
+ request->ctx = ctx;
+ i915_gem_context_reference(request->ctx);
+
+ ring->outstanding_lazy_request = request;
+ return 0;
}
static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
@@ -917,42 +892,42 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
- if (ringbuf->last_retired_head != -1) {
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
-
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= bytes)
- return 0;
- }
+ if (intel_ring_space(ringbuf) >= bytes)
+ return 0;
list_for_each_entry(request, &ring->request_list, list) {
+ /*
+ * The request queue is per-engine, so can contain requests
+ * from multiple ringbuffers. Here, we must ignore any that
+ * aren't from the ringbuffer we're considering.
+ */
+ struct intel_context *ctx = request->ctx;
+ if (ctx->engine[ring->id].ringbuf != ringbuf)
+ continue;
+
+ /* Would completion of this request free enough space? */
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= bytes) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
- ringbuf->space = intel_ring_space(ringbuf);
- return 0;
+ return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
}
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
int bytes)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -966,7 +941,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
return ret;
/* Force the context submission in case we have been skipping it */
- intel_logical_ring_advance_and_submit(ringbuf);
+ intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
@@ -975,13 +950,10 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ ret = 0;
do {
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= bytes) {
- ret = 0;
+ if (intel_ring_space(ringbuf) >= bytes)
break;
- }
msleep(1);
@@ -1004,13 +976,14 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
return ret;
}
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
+static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
if (ringbuf->space < rem) {
- int ret = logical_ring_wait_for_space(ringbuf, rem);
+ int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
if (ret)
return ret;
@@ -1022,23 +995,24 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = intel_ring_space(ringbuf);
+ intel_ring_update_space(ringbuf);
return 0;
}
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
+static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx, int bytes)
{
int ret;
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = logical_ring_wrap_buffer(ringbuf);
+ ret = logical_ring_wrap_buffer(ringbuf, ctx);
if (unlikely(ret))
return ret;
}
if (unlikely(ringbuf->space < bytes)) {
- ret = logical_ring_wait_for_space(ringbuf, bytes);
+ ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
if (unlikely(ret))
return ret;
}
@@ -1059,7 +1033,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
*
* Return: non-zero if the ringbuffer is not ready to be written to.
*/
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx, int num_dwords)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
@@ -1071,12 +1046,12 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
if (ret)
return ret;
- ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
+ ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
/* Preallocate the olr before touching the ring */
- ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
+ ret = logical_ring_alloc_request(ring, ctx);
if (ret)
return ret;
@@ -1093,15 +1068,15 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(w->count == 0))
+ if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, ctx);
if (ret)
return ret;
- ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
+ ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
if (ret)
return ret;
@@ -1115,7 +1090,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
intel_logical_ring_advance(ringbuf);
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, ctx);
if (ret)
return ret;
@@ -1134,6 +1109,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
+ ring->next_context_status_buffer = 0;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -1159,22 +1135,19 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
*/
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
- ret = intel_init_pipe_control(ring);
- if (ret)
- return ret;
-
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return init_workarounds_ring(ring);
}
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u64 offset, unsigned flags)
{
bool ppgtt = !(flags & I915_DISPATCH_SECURE);
int ret;
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
@@ -1222,6 +1195,7 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
}
static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 unused)
{
@@ -1231,21 +1205,23 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
cmd = MI_FLUSH_DW + 1;
- if (ring == &dev_priv->ring[VCS]) {
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
- MI_FLUSH_DW_STORE_INDEX |
- MI_FLUSH_DW_OP_STOREDW;
- } else {
- if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
- MI_FLUSH_DW_OP_STOREDW;
+ /* We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (ring == &dev_priv->ring[VCS])
+ cmd |= MI_INVALIDATE_BSD;
}
intel_logical_ring_emit(ringbuf, cmd);
@@ -1260,6 +1236,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
}
static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -1286,7 +1263,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
- ret = intel_logical_ring_begin(ringbuf, 6);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 6);
if (ret)
return ret;
@@ -1311,17 +1288,18 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
+static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, 6);
+ ret = intel_logical_ring_begin(ringbuf, request->ctx, 6);
if (ret)
return ret;
- cmd = MI_STORE_DWORD_IMM_GEN8;
+ cmd = MI_STORE_DWORD_IMM_GEN4;
cmd |= MI_GLOBAL_GTT;
intel_logical_ring_emit(ringbuf, cmd);
@@ -1329,14 +1307,27 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
+ intel_logical_ring_emit(ringbuf,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance_and_submit(ringbuf);
+ intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
return 0;
}
+static int gen8_init_rcs_context(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret;
+
+ ret = intel_logical_ring_workarounds_emit(ring, ctx);
+ if (ret)
+ return ret;
+
+ return intel_lr_context_render_state_init(ring, ctx);
+}
+
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
@@ -1354,8 +1345,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1383,18 +1373,11 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
- ring->next_context_status_buffer = 0;
ret = i915_cmd_parser_init_ring(ring);
if (ret)
return ret;
- if (ring->init) {
- ret = ring->init(ring);
- if (ret)
- return ret;
- }
-
ret = intel_lr_context_deferred_create(ring->default_context, ring);
return ret;
@@ -1404,6 +1387,7 @@ static int logical_render_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ int ret;
ring->name = "render ring";
ring->id = RCS;
@@ -1415,8 +1399,8 @@ static int logical_render_ring_init(struct drm_device *dev)
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- ring->init = gen8_init_render_ring;
- ring->init_context = intel_logical_ring_workarounds_emit;
+ ring->init_hw = gen8_init_render_ring;
+ ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
@@ -1426,7 +1410,12 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
- return logical_ring_init(dev, ring);
+ ring->dev = dev;
+ ret = logical_ring_init(dev, ring);
+ if (ret)
+ return ret;
+
+ return intel_init_pipe_control(ring);
}
static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1442,7 +1431,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1467,7 +1456,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1492,7 +1481,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1517,7 +1506,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1609,6 +1598,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
return 0;
ret = ring->emit_bb_start(ringbuf,
+ ctx,
so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
@@ -1616,7 +1606,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, file, so.obj, NULL);
+ ret = __i915_add_request(ring, file, so.obj);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
@@ -1763,6 +1753,7 @@ void intel_lr_context_free(struct intel_context *ctx)
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
+ WARN_ON(ctx->engine[ring->id].pin_count);
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
@@ -1835,8 +1826,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- if (ctx->engine[ring->id].state)
- return 0;
+ WARN_ON(ctx->engine[ring->id].state);
context_size = round_up(get_lr_context_size(ring), 4096);
@@ -1866,14 +1856,13 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
}
ringbuf->ring = ring;
- ringbuf->FIXME_lrc_ctx = ctx;
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->effective_size = ringbuf->size;
ringbuf->head = 0;
ringbuf->tail = 0;
- ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
+ intel_ring_update_space(ringbuf);
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
@@ -1907,21 +1896,17 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
if (ctx == ring->default_context)
lrc_setup_hardware_status_page(ring, ctx_obj);
-
- if (ring->id == RCS && !ctx->rcs_initialized) {
+ else if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
ret = ring->init_context(ring, ctx);
- if (ret)
+ if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
+ ctx->engine[ring->id].ringbuf = NULL;
+ ctx->engine[ring->id].state = NULL;
+ goto error;
+ }
}
- ret = intel_lr_context_render_state_init(ring, ctx);
- if (ret) {
- DRM_ERROR("Init render state failed: %d\n", ret);
- ctx->engine[ring->id].ringbuf = NULL;
- ctx->engine[ring->id].state = NULL;
- goto error;
- }
ctx->rcs_initialized = true;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 14b216b9be7f..6f2d7da594f6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -38,8 +38,12 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx);
+void intel_logical_ring_advance_and_submit(
+ struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
@@ -61,7 +65,9 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ int num_dwords);
/* Logical Ring Contexts */
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
@@ -83,36 +89,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
u64 exec_start, u32 flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
-/**
- * struct intel_ctx_submit_request - queued context submission request
- * @ctx: Context to submit to the ELSP.
- * @ring: Engine to submit it to.
- * @tail: how far in the context's ringbuffer this request goes to.
- * @execlist_link: link in the submission queue.
- * @work: workqueue for processing this request in a bottom half.
- * @elsp_submitted: no. of times this request has been sent to the ELSP.
- *
- * The ELSP only accepts two elements at a time, so we queue context/tail
- * pairs on a given queue (ring->execlist_queue) until the hardware is
- * available. The queue serves a double purpose: we also use it to keep track
- * of the up to 2 contexts currently in the hardware (usually one in execution
- * and the other queued up by the GPU): We only remove elements from the head
- * of the queue when the hardware informs us that an element has been
- * completed.
- *
- * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
- */
-struct intel_ctx_submit_request {
- struct intel_context *ctx;
- struct intel_engine_cs *ring;
- u32 tail;
-
- struct list_head execlist_link;
-
- int elsp_submitted;
-};
-
-void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 14654d628ca4..071b96d6e146 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -32,6 +32,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -93,7 +94,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -115,7 +116,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
@@ -129,7 +130,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
@@ -139,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode =
- &crtc->config.adjusted_mode;
+ &crtc->config->base.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
@@ -167,7 +168,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= crtc->config.gmch_pfit.lvds_border_bits;
+ temp |= crtc->config->gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@@ -190,7 +191,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
if (INTEL_INFO(dev)->gen == 4) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
- if (crtc->config.dither && crtc->config.pipe_bpp == 18)
+ if (crtc->config->dither && crtc->config->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -277,14 +278,14 @@ intel_lvds_mode_valid(struct drm_connector *connector,
}
static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
unsigned int lvds_bpp;
@@ -531,7 +532,9 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_lvds_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index dc2f4f26c961..f93dfc174495 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -182,7 +182,7 @@ struct intel_overlay {
u32 flip_addr;
struct drm_i915_gem_object *reg_bo;
/* flip handling */
- uint32_t last_flip_req;
+ struct drm_i915_gem_request *last_flip_req;
void (*flip_tail)(struct intel_overlay *);
};
@@ -217,17 +217,19 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(ring, &overlay->last_flip_req);
+ i915_gem_request_assign(&overlay->last_flip_req,
+ ring->outstanding_lazy_request);
+ ret = i915_add_request(ring);
if (ret)
return ret;
overlay->flip_tail = tail;
- ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
- overlay->last_flip_req = 0;
+ i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
}
@@ -286,7 +288,10 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
- return i915_add_request(ring, &overlay->last_flip_req);
+ WARN_ON(overlay->last_flip_req);
+ i915_gem_request_assign(&overlay->last_flip_req,
+ ring->outstanding_lazy_request);
+ return i915_add_request(ring);
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -361,23 +366,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
- if (overlay->last_flip_req == 0)
+ if (overlay->last_flip_req == NULL)
return 0;
- ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(overlay->dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
- overlay->last_flip_req = 0;
+ i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
}
@@ -392,6 +394,8 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
/* Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
@@ -422,6 +426,22 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return 0;
}
+void intel_overlay_reset(struct drm_i915_private *dev_priv)
+{
+ struct intel_overlay *overlay = dev_priv->overlay;
+
+ if (!overlay)
+ return;
+
+ intel_overlay_release_old_vid(overlay);
+
+ overlay->last_flip_req = NULL;
+ overlay->old_xscale = 0;
+ overlay->old_yscale = 0;
+ overlay->crtc = NULL;
+ overlay->active = false;
+}
+
struct put_image_params {
int format;
short dst_x;
@@ -836,7 +856,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
return -EINVAL;
/* can't use the overlay with double wide pipe */
- if (crtc->config.double_wide)
+ if (crtc->config->double_wide)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..d8686ce89160 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -98,13 +98,13 @@ intel_find_panel_downclock(struct drm_device *dev,
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode)
{
struct drm_display_mode *adjusted_mode;
int x, y, width, height;
- adjusted_mode = &pipe_config->adjusted_mode;
+ adjusted_mode = &pipe_config->base.adjusted_mode;
x = y = width = height = 0;
@@ -223,10 +223,10 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
-static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -243,11 +243,11 @@ static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
-static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -301,14 +301,14 @@ static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
}
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode;
- adjusted_mode = &pipe_config->adjusted_mode;
+ adjusted_mode = &pipe_config->base.adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level == 0) {
+ if (panel->backlight.level <= panel->backlight.min) {
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 964b28e3c630..24d77ddcc5f4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -52,17 +52,6 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-/* FBC, or Frame Buffer Compression, is a technique employed to compress the
- * framebuffer contents in-memory, aiming at reducing the required bandwidth
- * during in-memory transfers and, therefore, reduce the power packet.
- *
- * The benefits of FBC are mostly visible with solid backgrounds and
- * variation-less patterns.
- *
- * FBC-related functionality can be enabled by the means of the
- * i915.i915_enable_fbc parameter
- */
-
static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -87,614 +76,6 @@ static void gen9_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
}
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
-}
-
-static void i8xx_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int cfb_pitch;
- int i;
- u32 fbc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
-
- /* FBC_CTL wants 32B or 64B units */
- if (IS_GEN2(dev))
- cfb_pitch = (cfb_pitch / 32) - 1;
- else
- cfb_pitch = (cfb_pitch / 64) - 1;
-
- /* Clear old tags */
- for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG + (i * 4), 0);
-
- if (IS_GEN4(dev)) {
- u32 fbc_ctl2;
-
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
- }
-
- /* enable it... */
- fbc_ctl = I915_READ(FBC_CONTROL);
- fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
- fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
- fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= obj->fence_reg;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
- cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
-}
-
-static bool i8xx_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
-
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-
- /* enable it... */
- I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-static void g4x_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool g4x_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
-
- /* Blitter is part of Media powerwell on VLV. No impact of
- * his param in other platforms for now */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
-
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
-
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
-}
-
-static void ironlake_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
-
- switch (dev_priv->fbc.threshold) {
- case 4:
- case 3:
- dpfc_ctl |= DPFC_CTL_LIMIT_4X;
- break;
- case 2:
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- break;
- case 1:
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- break;
- }
- dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN5(dev))
- dpfc_ctl |= obj->fence_reg;
-
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
- /* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_GEN6(dev)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
- }
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-static void ironlake_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool ironlake_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void gen7_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
-
- switch (dev_priv->fbc.threshold) {
- case 4:
- case 3:
- dpfc_ctl |= DPFC_CTL_LIMIT_4X;
- break;
- case 2:
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- break;
- case 1:
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- break;
- }
-
- dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
-
- if (dev_priv->fbc.false_color)
- dpfc_ctl |= FBC_CTL_FALSE_COLOR;
-
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_IVYBRIDGE(dev)) {
- /* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
- } else {
- /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
- I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
- HSW_FBCQ_DIS);
- }
-
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
-
- sandybridge_blit_fbc_update(dev);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-bool intel_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return dev_priv->fbc.enabled;
-}
-
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_GEN8(dev))
- return;
-
- if (!intel_fbc_enabled(dev))
- return;
-
- I915_WRITE(MSG_FBC_REND_STATE, value);
-}
-
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc.fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (work->crtc->primary->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc);
-
- dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
- dev_priv->fbc.y = work->crtc->y;
- }
-
- dev_priv->fbc.fbc_work = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
-
- kfree(work);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->fbc.fbc_work == NULL)
- return;
-
- DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc.fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc.fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc.fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc)
-{
- struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.enable_fbc)
- return;
-
- intel_cancel_fbc_work(dev_priv);
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL) {
- DRM_ERROR("Failed to allocate FBC work structure\n");
- dev_priv->display.enable_fbc(crtc);
- return;
- }
-
- work->crtc = crtc;
- work->fb = crtc->primary->fb;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc.fbc_work = work;
-
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- *
- * WaFbcWaitForVBlankBeforeEnable:ilk,snb
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
-}
-
-void intel_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_cancel_fbc_work(dev_priv);
-
- if (!dev_priv->display.disable_fbc)
- return;
-
- dev_priv->display.disable_fbc(dev);
- dev_priv->fbc.plane = -1;
-}
-
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
- enum no_fbc_reason reason)
-{
- if (dev_priv->fbc.no_fbc_reason == reason)
- return false;
-
- dev_priv->fbc.no_fbc_reason = reason;
- return true;
-}
-
-/**
- * intel_update_fbc - enable/disable FBC as needed
- * @dev: the drm_device
- *
- * Set up the framebuffer compression hardware at mode set time. We
- * enable it if possible:
- * - plane A only (on pre-965)
- * - no pixel mulitply/line duplication
- * - no alpha buffer discard
- * - no dual wide
- * - framebuffer <= max_hdisplay in width, max_vdisplay in height
- *
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one. It also must reside (along with the line length buffer) in
- * stolen memory.
- *
- * We need to enable/disable FBC on a global basis.
- */
-void intel_update_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
- struct intel_crtc *intel_crtc;
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- const struct drm_display_mode *adjusted_mode;
- unsigned int max_width, max_height;
-
- if (!HAS_FBC(dev)) {
- set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
- return;
- }
-
- if (!i915.powersave) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- return;
- }
-
- /*
- * If FBC is already on, we just have to verify that we can
- * keep it that way...
- * Need to disable if:
- * - more than one pipe is active
- * - changing FBC params (stride, fence, mode)
- * - new fb is too large to fit in compressed buffer
- * - going to an unsupported config (interlace, pixel multiply, etc.)
- */
- for_each_crtc(dev, tmp_crtc) {
- if (intel_crtc_active(tmp_crtc) &&
- to_intel_crtc(tmp_crtc)->primary_enabled) {
- if (crtc) {
- if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->primary->fb == NULL) {
- if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
- DRM_DEBUG_KMS("no output, disabling\n");
- goto out_disable;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- fb = crtc->primary->fb;
- obj = intel_fb_obj(fb);
- adjusted_mode = &intel_crtc->config.adjusted_mode;
-
- if (i915.enable_fbc < 0) {
- if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
- DRM_DEBUG_KMS("disabled per chip default\n");
- goto out_disable;
- }
- if (!i915.enable_fbc) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- goto out_disable;
- }
- if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- goto out_disable;
- }
-
- if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
- max_width = 4096;
- max_height = 4096;
- } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
- max_width = 4096;
- max_height = 2048;
- } else {
- max_width = 2048;
- max_height = 1536;
- }
- if (intel_crtc->config.pipe_src_w > max_width ||
- intel_crtc->config.pipe_src_h > max_height) {
- if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- goto out_disable;
- }
- if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
- intel_crtc->plane != PLANE_A) {
- if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
- DRM_DEBUG_KMS("plane not A, disabling compression\n");
- goto out_disable;
- }
-
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
- */
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- goto out_disable;
- }
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
- goto out_disable;
- }
-
- /* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
- goto out_disable;
-
- if (i915_gem_stolen_setup_compression(dev, obj->base.size,
- drm_format_plane_cpp(fb->pixel_format, 0))) {
- if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
- DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
- goto out_disable;
- }
-
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (dev_priv->fbc.plane == intel_crtc->plane &&
- dev_priv->fbc.fb_id == fb->base.id &&
- dev_priv->fbc.y == crtc->y)
- return;
-
- if (intel_fbc_enabled(dev)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_disable_fbc(dev);
- }
-
- intel_enable_fbc(crtc);
- dev_priv->fbc.no_fbc_reason = FBC_OK;
- return;
-
-out_disable:
- /* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_disable_fbc(dev);
- }
- i915_gem_stolen_cleanup_compression(dev);
-}
-
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1157,7 +538,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
int clock;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -1226,10 +607,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
return false;
}
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
@@ -1313,10 +694,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1347,7 +728,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
int entries;
- int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
+ int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
if (WARN(clock == 0, "Pixel clock is zero!\n"))
return false;
@@ -1677,10 +1058,10 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
const struct drm_display_mode *adjusted_mode =
- &to_intel_crtc(crtc)->config.adjusted_mode;
+ &to_intel_crtc(crtc)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1762,7 +1143,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_GEN2(dev))
cpp = 4;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1784,7 +1165,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_GEN2(dev))
cpp = 4;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1823,10 +1204,10 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *adjusted_mode =
- &to_intel_crtc(enabled)->config.adjusted_mode;
+ &to_intel_crtc(enabled)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
+ int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1879,7 +1260,7 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
if (crtc == NULL)
return;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
@@ -1898,17 +1279,17 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
- pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
+ pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
- if (intel_crtc->config.pch_pfit.enabled) {
+ if (intel_crtc->config->pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
- uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
+ uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
- pipe_w = intel_crtc->config.pipe_src_w;
- pipe_h = intel_crtc->config.pipe_src_h;
+ pipe_w = intel_crtc->config->pipe_src_w;
+ pipe_h = intel_crtc->config->pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
@@ -2261,7 +1642,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
if (!intel_crtc_active(crtc))
@@ -2521,11 +1902,11 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
return;
p->active = true;
- p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+ p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
- p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
+ p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
p->cur.horiz_pixels = intel_crtc->cursor_width;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
@@ -3174,10 +2555,10 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
}
-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
+static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
{
/* TODO: Take into account the scalers once we support them */
- return config->adjusted_mode.crtc_clock;
+ return config->base.adjusted_mode.crtc_clock;
}
/*
@@ -3265,8 +2646,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->active = intel_crtc_active(crtc);
if (p->active) {
- p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
- p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
+ p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
+ p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
/*
* For now, assume primary and cursor planes are always enabled.
@@ -3274,8 +2655,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->plane[0].enabled = true;
p->plane[0].bytes_per_pixel =
crtc->primary->fb->bits_per_pixel / 8;
- p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
- p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
+ p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
+ p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
p->cursor.enabled = true;
p->cursor.bytes_per_pixel = 4;
@@ -3286,7 +2667,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
+ if (intel_plane->pipe == pipe &&
+ plane->type == DRM_PLANE_TYPE_OVERLAY)
p->plane[i++] = intel_plane->wm;
}
}
@@ -3621,9 +3003,8 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
skl_wm_flush_pipe(dev_priv, pipe, 2);
intel_wait_for_vblank(dev, pipe);
+ reallocated[pipe] = true;
}
-
- reallocated[pipe] = true;
}
/*
@@ -4363,16 +3744,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
- /* IVB and SNB hard hangs on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- */
- if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
- mask |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (IS_GEN8(dev_priv->dev))
- mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
- return ~mask;
+ return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
}
/* gen6_set_rps is called to update the frequency request, but should also be
@@ -4427,8 +3799,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- /* Latest VLV doesn't need to force the gfx clock */
- if (dev->pdev->revision >= 0xd) {
+ /* CHV and latest VLV don't need to force the gfx clock */
+ if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
return;
}
@@ -4441,7 +3813,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
return;
/* Mask turbo interrupt so that they will not come in between */
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
vlv_force_gfx_clock(dev_priv, true);
@@ -4466,9 +3839,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_CHERRYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- else if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -4510,7 +3881,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
dev_priv->rps.cur_freq = val;
- trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
+ trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
static void gen9_disable_rps(struct drm_device *dev)
@@ -4518,6 +3889,7 @@ static void gen9_disable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
+ I915_WRITE(GEN9_PG_ENABLE, 0);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -4541,11 +3913,11 @@ static void valleyview_disable_rps(struct drm_device *dev)
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
I915_WRITE(GEN6_RC_CONTROL, 0);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -4633,7 +4005,10 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
&ddcc_status);
if (0 == ret)
dev_priv->rps.efficient_freq =
- (ddcc_status >> 8) & 0xff;
+ clamp_t(u8,
+ ((ddcc_status >> 8) & 0xff),
+ dev_priv->rps.min_freq,
+ dev_priv->rps.max_freq);
}
/* Preserve min/max settings in case of re-init */
@@ -4651,9 +4026,39 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
}
}
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ gen6_init_rps_frequencies(dev);
+
+ I915_WRITE(GEN6_RPNSWREQ, 0xc800000);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000);
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08);
+ I915_WRITE(GEN6_RP_UP_EI, 0x101d0);
+ I915_WRITE(GEN6_RP_DOWN_EI, 0x55730);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
+ I915_WRITE(GEN6_PMINTRMSK, 0x6);
+ I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ gen6_enable_rps_interrupts(dev);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen9_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0;
int unused;
@@ -4663,7 +4068,7 @@ static void gen9_enable_rps(struct drm_device *dev)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -4677,6 +4082,10 @@ static void gen9_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+ /* 2c: Program Coarse Power Gating Policies. */
+ I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
+ I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
+
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
@@ -4686,7 +4095,10 @@ static void gen9_enable_rps(struct drm_device *dev)
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ /* 3b: Enable Coarse Power Gating only when RC6 is enabled */
+ I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 3 : 0);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4702,7 +4114,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 1c & 1d: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -4769,7 +4181,7 @@ static void gen8_enable_rps(struct drm_device *dev)
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -4797,7 +4209,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Initialize rps frequencies */
gen6_init_rps_frequencies(dev);
@@ -4877,7 +4289,7 @@ static void gen6_enable_rps(struct drm_device *dev)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void __gen6_update_ring_freq(struct drm_device *dev)
@@ -4964,11 +4376,35 @@ void gen6_update_ring_freq(struct drm_device *dev)
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rp0;
- val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
- rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+ switch (INTEL_INFO(dev)->eu_total) {
+ case 8:
+ /* (2 * 4) config */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
+ break;
+ case 12:
+ /* (2 * 6) config */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
+ break;
+ case 16:
+ /* (2 * 8) config */
+ default:
+ /* Setting (2 * 8) Min RP0 for any other combination */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
+ break;
+ }
+ rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
+ } else {
+ /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+ }
return rp0;
}
@@ -4984,20 +4420,36 @@ static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rp1;
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
-
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+ rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
+ } else {
+ /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_MAX_FREQ_MASK);
+ }
return rp1;
}
static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rpn;
- val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
- rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
+ rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
+ FB_GFX_FREQ_FUSE_MASK);
+ } else { /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rpn = ((val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK);
+ }
+
return rpn;
}
@@ -5168,22 +4620,22 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
/* Preserve min/max settings in case of re-init */
@@ -5237,22 +4689,22 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
WARN_ONCE((dev_priv->rps.max_freq |
@@ -5296,7 +4748,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
/* 2a: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -5307,7 +4762,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
@@ -5321,11 +4777,12 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
/* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
@@ -5333,14 +4790,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- /* WaDisablePwrmtrEvent:chv (pre-production hw) */
- I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
- I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
-
/* 5: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+ GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
@@ -5355,16 +4808,16 @@ static void cherryview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void valleyview_enable_rps(struct drm_device *dev)
@@ -5385,15 +4838,18 @@ static void valleyview_enable_rps(struct drm_device *dev)
}
/* If VLV, Forcewake all wells, else re-direct to regular path */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ /* Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -5436,16 +4892,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
void ironlake_teardown_rc6(struct drm_device *dev)
@@ -5681,146 +5137,27 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
return ((m * x) / 127) - b;
}
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static int _pxvid_to_vd(u8 pxvid)
+{
+ if (pxvid == 0)
+ return 0;
+
+ if (pxvid >= 8 && pxvid < 31)
+ pxvid = 31;
+
+ return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
struct drm_device *dev = dev_priv->dev;
- static const struct v_table {
- u16 vd; /* in .1 mil */
- u16 vm; /* in .1 mil */
- } v_table[] = {
- { 0, 0, },
- { 375, 0, },
- { 500, 0, },
- { 625, 0, },
- { 750, 0, },
- { 875, 0, },
- { 1000, 0, },
- { 1125, 0, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4250, 3125, },
- { 4375, 3250, },
- { 4500, 3375, },
- { 4625, 3500, },
- { 4750, 3625, },
- { 4875, 3750, },
- { 5000, 3875, },
- { 5125, 4000, },
- { 5250, 4125, },
- { 5375, 4250, },
- { 5500, 4375, },
- { 5625, 4500, },
- { 5750, 4625, },
- { 5875, 4750, },
- { 6000, 4875, },
- { 6125, 5000, },
- { 6250, 5125, },
- { 6375, 5250, },
- { 6500, 5375, },
- { 6625, 5500, },
- { 6750, 5625, },
- { 6875, 5750, },
- { 7000, 5875, },
- { 7125, 6000, },
- { 7250, 6125, },
- { 7375, 6250, },
- { 7500, 6375, },
- { 7625, 6500, },
- { 7750, 6625, },
- { 7875, 6750, },
- { 8000, 6875, },
- { 8125, 7000, },
- { 8250, 7125, },
- { 8375, 7250, },
- { 8500, 7375, },
- { 8625, 7500, },
- { 8750, 7625, },
- { 8875, 7750, },
- { 9000, 7875, },
- { 9125, 8000, },
- { 9250, 8125, },
- { 9375, 8250, },
- { 9500, 8375, },
- { 9625, 8500, },
- { 9750, 8625, },
- { 9875, 8750, },
- { 10000, 8875, },
- { 10125, 9000, },
- { 10250, 9125, },
- { 10375, 9250, },
- { 10500, 9375, },
- { 10625, 9500, },
- { 10750, 9625, },
- { 10875, 9750, },
- { 11000, 9875, },
- { 11125, 10000, },
- { 11250, 10125, },
- { 11375, 10250, },
- { 11500, 10375, },
- { 11625, 10500, },
- { 11750, 10625, },
- { 11875, 10750, },
- { 12000, 10875, },
- { 12125, 11000, },
- { 12250, 11125, },
- { 12375, 11250, },
- { 12500, 11375, },
- { 12625, 11500, },
- { 12750, 11625, },
- { 12875, 11750, },
- { 13000, 11875, },
- { 13125, 12000, },
- { 13250, 12125, },
- { 13375, 12250, },
- { 13500, 12375, },
- { 13625, 12500, },
- { 13750, 12625, },
- { 13875, 12750, },
- { 14000, 12875, },
- { 14125, 13000, },
- { 14250, 13125, },
- { 14375, 13250, },
- { 14500, 13375, },
- { 14625, 13500, },
- { 14750, 13625, },
- { 14875, 13750, },
- { 15000, 13875, },
- { 15125, 14000, },
- { 15250, 14125, },
- { 15375, 14250, },
- { 15500, 14375, },
- { 15625, 14500, },
- { 15750, 14625, },
- { 15875, 14750, },
- { 16000, 14875, },
- { 16125, 15000, },
- };
+ const int vd = _pxvid_to_vd(pxvid);
+ const int vm = vd - 1125;
+
if (INTEL_INFO(dev)->is_mobile)
- return v_table[pxvid].vm;
- else
- return v_table[pxvid].vd;
+ return vm > 0 ? vm : 0;
+
+ return vd;
}
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -6272,7 +5609,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_enable_rc6(dev);
gen9_enable_rps(dev);
+ __gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
@@ -6718,6 +6057,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+ /* WaSampleCChickenBitEnable:hsw */
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
+
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -6888,6 +6231,17 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+
+ /*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
@@ -7051,43 +6405,12 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
-static void intel_init_fbc(struct drm_i915_private *dev_priv)
-{
- if (!HAS_FBC(dev_priv)) {
- dev_priv->fbc.enabled = false;
- return;
- }
-
- if (INTEL_INFO(dev_priv)->gen >= 7) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = gen7_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (INTEL_INFO(dev_priv)->gen >= 5) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev_priv)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
- /* This value was pulled out of someone's hat */
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
- }
-
- dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
-}
-
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_init_fbc(dev_priv);
+ intel_fbc_init(dev_priv);
/* For cxsr */
if (IS_PINEVIEW(dev))
@@ -7293,28 +6616,24 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int ret = -1;
-
if (IS_CHERRYVIEW(dev_priv->dev))
- ret = chv_gpu_freq(dev_priv, val);
+ return chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
- ret = byt_gpu_freq(dev_priv, val);
-
- return ret;
+ return byt_gpu_freq(dev_priv, val);
+ else
+ return val * GT_FREQUENCY_MULTIPLIER;
}
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int ret = -1;
-
if (IS_CHERRYVIEW(dev_priv->dev))
- ret = chv_freq_opcode(dev_priv, val);
+ return chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
- ret = byt_freq_opcode(dev_priv, val);
-
- return ret;
+ return byt_freq_opcode(dev_priv, val);
+ else
+ return val / GT_FREQUENCY_MULTIPLIER;
}
void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 716b8a961eea..b9f40c2e0af7 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -61,14 +61,15 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
-bool intel_psr_is_enabled(struct drm_device *dev)
+static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
- if (!HAS_PSR(dev))
- return false;
-
- return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ val = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
@@ -78,8 +79,8 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
- u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config->cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config->cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
@@ -100,7 +101,23 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
POSTING_READ(ctl_reg);
}
-static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
+static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ uint32_t val;
+
+ /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
+ val = I915_READ(VLV_VSCSDP(pipe));
+ val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
+ val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
+ I915_WRITE(VLV_VSCSDP(pipe), val);
+}
+
+static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
@@ -113,14 +130,20 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
-static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE);
+}
+
+static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
+ uint32_t aux_data_reg, aux_ctl_reg;
int precharge = 0x3;
- bool only_standby = false;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@@ -134,49 +157,96 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
-
/* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
+ if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+ DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+
+ aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
+ DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
+ aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
+ DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
+ I915_WRITE(aux_data_reg + i,
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
- I915_WRITE(EDP_PSR_AUX_CTL(dev),
+ if (INTEL_INFO(dev)->gen >= 9) {
+ uint32_t val;
+
+ val = I915_READ(aux_ctl_reg);
+ val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
+ val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
+ val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
+ val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ /* Use hardcoded data values for PSR */
+ val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
+ I915_WRITE(aux_ctl_reg, val);
+ } else {
+ I915_WRITE(aux_ctl_reg,
DP_AUX_CH_CTL_TIME_OUT_400us |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+ }
}
-static void intel_psr_enable_source(struct intel_dp *intel_dp)
+static void vlv_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
+ I915_WRITE(VLV_PSRCTL(pipe),
+ VLV_EDP_PSR_MODE_SW_TIMER |
+ VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
+ VLV_EDP_PSR_ENABLE);
+}
+
+static void vlv_psr_activate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ /* Let's do the transition from PSR_state 1 to PSR_state 2
+ * that is PSR transition to active - static frame transmission.
+ * Then Hardware is responsible for the transition to PSR_state 3
+ * that is PSR active - no Remote Frame Buffer (RFB) update.
+ */
+ I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
+ VLV_EDP_PSR_ACTIVE_ENTRY);
+}
+
+static void hsw_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
- uint32_t idle_frames = 1;
+ /* Lately it was identified that depending on panel idle frame count
+ * calculated at HW can be off by 1. So let's use what came
+ * from VBT + 1 and at minimum 2 to be on the safe side.
+ */
+ uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
+ dev_priv->vbt.psr.idle_frames + 1 : 2;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- bool only_standby = false;
-
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
+ if (dev_priv->psr.link_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
- val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
@@ -211,27 +281,24 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- /* Below limitations aren't valid for Broadwell */
- if (IS_BROADWELL(dev))
- goto out;
-
- if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
- S3D_ENABLE) {
+ if (IS_HASWELL(dev) &&
+ I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
+ S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (IS_HASWELL(dev) &&
+ intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
- out:
dev_priv->psr.source_ok = true;
return true;
}
-static void intel_psr_do_enable(struct intel_dp *intel_dp)
+static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -242,7 +309,14 @@ static void intel_psr_do_enable(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
- intel_psr_enable_source(intel_dp);
+ if (HAS_DDI(dev))
+ /* On HSW+ after we enable PSR on source it will activate it
+ * as soon as it match configure idle_frame count. So
+ * we just actually enable it here on activation time.
+ */
+ hsw_psr_enable_source(intel_dp);
+ else
+ vlv_psr_activate(intel_dp);
dev_priv->psr.active = true;
}
@@ -278,39 +352,79 @@ void intel_psr_enable(struct intel_dp *intel_dp)
if (!intel_psr_match_conditions(intel_dp))
goto unlock;
+ /* First we check VBT, but we must respect sink and source
+ * known restrictions */
+ dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+ if ((intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) ||
+ (IS_BROADWELL(dev) && intel_dig_port->port != PORT_A))
+ dev_priv->psr.link_standby = true;
+
dev_priv->psr.busy_frontbuffer_bits = 0;
- intel_psr_setup_vsc(intel_dp);
+ if (HAS_DDI(dev)) {
+ hsw_psr_setup_vsc(intel_dp);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
- /* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+ /* Enable PSR on the panel */
+ hsw_psr_enable_sink(intel_dp);
- /* Enable PSR on the panel */
- intel_psr_enable_sink(intel_dp);
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_psr_activate(intel_dp);
+ } else {
+ vlv_psr_setup_vsc(intel_dp);
+
+ /* Enable PSR on the panel */
+ vlv_psr_enable_sink(intel_dp);
+
+ /* On HSW+ enable_source also means go to PSR entry/active
+ * state as soon as idle_frame achieved and here would be
+ * to soon. However on VLV enable_source just enable PSR
+ * but let it on inactive state. So we might do this prior
+ * to active transition, i.e. here.
+ */
+ vlv_psr_enable_source(intel_dp);
+ }
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-/**
- * intel_psr_disable - Disable PSR
- * @intel_dp: Intel DP
- *
- * This function needs to be called before disabling pipe.
- */
-void intel_psr_disable(struct intel_dp *intel_dp)
+static void vlv_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ uint32_t val;
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
+ if (dev_priv->psr.active) {
+ /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
+ if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
+ VLV_EDP_PSR_IN_TRANS) == 0, 1))
+ WARN(1, "PSR transition took longer than expected\n");
+
+ val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
+ val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+ val &= ~VLV_EDP_PSR_ENABLE;
+ val &= ~VLV_EDP_PSR_MODE_MASK;
+ I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
}
+}
+
+static void hsw_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
@@ -325,6 +439,30 @@ void intel_psr_disable(struct intel_dp *intel_dp)
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ if (HAS_DDI(dev))
+ hsw_psr_disable(intel_dp);
+ else
+ vlv_psr_disable(intel_dp);
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
@@ -337,18 +475,27 @@ static void intel_psr_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
+ struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
+ if (HAS_DDI(dev_priv->dev)) {
+ if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
+ } else {
+ if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
}
-
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
@@ -363,7 +510,7 @@ static void intel_psr_work(struct work_struct *work)
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
- intel_psr_do_enable(intel_dp);
+ intel_psr_activate(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
@@ -371,17 +518,47 @@ unlock:
static void intel_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
+ struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ u32 val;
- if (dev_priv->psr.active) {
- u32 val = I915_READ(EDP_PSR_CTL(dev));
+ if (!dev_priv->psr.active)
+ return;
+
+ if (HAS_DDI(dev)) {
+ val = I915_READ(EDP_PSR_CTL(dev));
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
dev_priv->psr.active = false;
+ } else {
+ val = I915_READ(VLV_PSRCTL(pipe));
+
+ /* Here we do the transition from PSR_state 3 to PSR_state 5
+ * directly once PSR State 4 that is active with single frame
+ * update can be skipped. PSR_state 5 that is PSR exit then
+ * Hardware is responsible to transition back to PSR_state 1
+ * that is PSR inactive. Same state after
+ * vlv_edp_psr_enable_source.
+ */
+ val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+ I915_WRITE(VLV_PSRCTL(pipe), val);
+
+ /* Send AUX wake up - Spec says after transitioning to PSR
+ * active we have to send AUX wake up by writing 01h in DPCD
+ * 600h of sink device.
+ * XXX: This might slow down the transition, but without this
+ * HW doesn't complete the transition to PSR_state 1 and we
+ * never get the screen updated.
+ */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
}
+ dev_priv->psr.active = false;
}
/**
@@ -459,6 +636,15 @@ void intel_psr_flush(struct drm_device *dev,
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_psr_exit(dev);
+ /*
+ * On Valleyview and Cherryview we don't use hardware tracking so
+ * any plane updates or cursor moves don't result in a PSR
+ * invalidating. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering. */
+ if (!HAS_DDI(dev))
+ intel_psr_exit(dev);
+
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 56c1429d8a60..11c8e7b3dd7c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen6_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 419e35a7b0ff..655180646152 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen7_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 78011d73fa9f..95288a34c15d 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
index 875075373807..16a7ec273bd9 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen9_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c7bc93d28d84..e5b3c6dbd467 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -52,16 +52,27 @@ intel_ring_initialized(struct intel_engine_cs *ring)
int __intel_ring_space(int head, int tail, int size)
{
- int space = head - (tail + I915_RING_FREE_SPACE);
- if (space < 0)
+ int space = head - tail;
+ if (space <= 0)
space += size;
- return space;
+ return space - I915_RING_FREE_SPACE;
+}
+
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+{
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+ }
+
+ ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
+ ringbuf->tail, ringbuf->size);
}
int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
- return __intel_ring_space(ringbuf->head & HEAD_ADDR,
- ringbuf->tail, ringbuf->size);
+ intel_ring_update_space(ringbuf);
+ return ringbuf->space;
}
bool intel_ring_stopped(struct intel_engine_cs *ring)
@@ -528,7 +539,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (!stop_ring(ring)) {
/* G45 ring initialization often fails to reset head to zero */
@@ -592,15 +603,15 @@ static int init_ring_common(struct intel_engine_cs *ring)
goto out;
}
+ ringbuf->last_retired_head = -1;
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = intel_ring_space(ringbuf);
- ringbuf->last_retired_head = -1;
+ intel_ring_update_space(ringbuf);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
@@ -627,8 +638,7 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
- if (ring->scratch.obj)
- return 0;
+ WARN_ON(ring->scratch.obj);
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) {
@@ -672,7 +682,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(w->count == 0))
+ if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
@@ -703,6 +713,22 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
}
+static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret;
+
+ ret = intel_ring_workarounds_emit(ring, ctx);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_render_state_init(ring);
+ if (ret)
+ DRM_ERROR("init render state: %d\n", ret);
+
+ return ret;
+}
+
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val)
{
@@ -762,11 +788,24 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
+ /* WaForceEnableNonCoherent:bdw */
+ /* WaHdcDisableFetchWhenMasked:bdw */
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+ * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+ * polygons in the same 8x4 pixel/sample area to be processed without
+ * stalling waiting for the earlier ones to write to Hierarchical Z
+ * buffer."
+ *
+ * This optimization is off by default for Broadwell; turn it on.
+ */
+ WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
/* Wa4x4STCOptimizationDisable:bdw */
WA_SET_BIT_MASKED(CACHE_MODE_1,
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
@@ -807,6 +846,30 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
HDC_FORCE_NON_COHERENT |
HDC_DONOT_FETCH_MEM_WHEN_MASKED);
+ /* According to the CACHE_MODE_0 default value documentation, some
+ * CHV platforms disable this optimization by default. Turn it on.
+ */
+ WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* Wa4x4STCOptimizationDisable:chv */
+ WA_SET_BIT_MASKED(CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /* Improve HiZ throughput on CHV. */
+ WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+
return 0;
}
@@ -861,12 +924,6 @@ static int init_render_ring(struct intel_engine_cs *ring)
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(ring);
- if (ret)
- return ret;
- }
-
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -918,17 +975,20 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
return ret;
for_each_ring(waiter, dev_priv, i) {
+ u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
+ seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
@@ -956,16 +1016,19 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
return ret;
for_each_ring(waiter, dev_priv, i) {
+ u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
+ seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
@@ -994,9 +1057,11 @@ static int gen6_signal(struct intel_engine_cs *signaller,
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
+ u32 seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
}
}
@@ -1031,7 +1096,8 @@ gen6_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1149,7 +1215,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1168,7 +1235,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
__intel_ring_advance(ring);
@@ -1408,7 +1476,8 @@ i9xx_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1789,15 +1858,15 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf;
int ret;
- if (ringbuf == NULL) {
- ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
- if (!ringbuf)
- return -ENOMEM;
- ring->buffer = ringbuf;
- }
+ WARN_ON(ring->buffer);
+
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
@@ -1820,21 +1889,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- if (ringbuf->obj == NULL) {
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
- ring->name, ret);
- goto error;
- }
+ WARN_ON(ringbuf->obj);
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
- ring->name, ret);
- intel_destroy_ringbuffer_obj(ringbuf);
- goto error;
- }
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error;
+ }
+
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ intel_destroy_ringbuffer_obj(ringbuf);
+ goto error;
}
/* Workaround an erratum on the i830 which causes a hang if
@@ -1849,10 +1918,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
- ret = ring->init(ring);
- if (ret)
- goto error;
-
return 0;
error:
@@ -1877,8 +1942,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1895,38 +1959,27 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
- if (ringbuf->last_retired_head != -1) {
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
-
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= n)
- return 0;
- }
+ if (intel_ring_space(ringbuf) >= n)
+ return 0;
list_for_each_entry(request, &ring->request_list, list) {
- if (__intel_ring_space(request->tail, ringbuf->tail,
+ if (__intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size) >= n) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
- ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
@@ -1952,14 +2005,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ ret = 0;
trace_i915_ring_wait_begin(ring);
do {
+ if (intel_ring_space(ringbuf) >= n)
+ break;
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= n) {
- ret = 0;
+ if (intel_ring_space(ringbuf) >= n)
break;
- }
msleep(1);
@@ -2000,19 +2053,19 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = intel_ring_space(ringbuf);
+ intel_ring_update_space(ringbuf);
return 0;
}
int intel_ring_idle(struct intel_engine_cs *ring)
{
- u32 seqno;
+ struct drm_i915_gem_request *req;
int ret;
/* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_seqno) {
- ret = i915_add_request(ring, NULL);
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring);
if (ret)
return ret;
}
@@ -2021,30 +2074,39 @@ int intel_ring_idle(struct intel_engine_cs *ring)
if (list_empty(&ring->request_list))
return 0;
- seqno = list_entry(ring->request_list.prev,
+ req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
- list)->seqno;
+ list);
- return i915_wait_seqno(ring, seqno);
+ return i915_wait_request(req);
}
static int
-intel_ring_alloc_seqno(struct intel_engine_cs *ring)
+intel_ring_alloc_request(struct intel_engine_cs *ring)
{
- if (ring->outstanding_lazy_seqno)
+ int ret;
+ struct drm_i915_gem_request *request;
+ struct drm_i915_private *dev_private = ring->dev->dev_private;
+
+ if (ring->outstanding_lazy_request)
return 0;
- if (ring->preallocated_lazy_request == NULL) {
- struct drm_i915_gem_request *request;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
+ kref_init(&request->ref);
+ request->ring = ring;
+ request->uniq = dev_private->request_uniq++;
- ring->preallocated_lazy_request = request;
+ ret = i915_gem_get_seqno(ring->dev, &request->seqno);
+ if (ret) {
+ kfree(request);
+ return ret;
}
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+ ring->outstanding_lazy_request = request;
+ return 0;
}
static int __intel_ring_prepare(struct intel_engine_cs *ring,
@@ -2084,7 +2146,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
return ret;
/* Preallocate the olr before touching the ring */
- ret = intel_ring_alloc_seqno(ring);
+ ret = intel_ring_alloc_request(ring);
if (ret)
return ret;
@@ -2119,7 +2181,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- BUG_ON(ring->outstanding_lazy_seqno);
+ BUG_ON(ring->outstanding_lazy_request);
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -2178,6 +2240,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1;
+
+ /* We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
@@ -2185,8 +2255,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
- MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
@@ -2282,6 +2352,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1;
+
+ /* We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
@@ -2289,8 +2367,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
- MI_FLUSH_DW_OP_STOREDW;
+ cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
@@ -2341,7 +2418,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
}
}
- ring->init_context = intel_ring_workarounds_emit;
+ ring->init_context = intel_rcs_ctx_init;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@@ -2426,7 +2503,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init = init_render_ring;
+ ring->init_hw = init_render_ring;
ring->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
@@ -2448,7 +2525,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
- return intel_init_ring_buffer(dev, ring);
+ ret = intel_init_ring_buffer(dev, ring);
+ if (ret)
+ return ret;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = intel_init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
@@ -2519,7 +2606,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2558,7 +2645,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2615,7 +2702,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2666,7 +2753,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fe426cff598b..714f3fdd57d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -99,13 +99,6 @@ struct intel_ringbuffer {
struct intel_engine_cs *ring;
- /*
- * FIXME: This backpointer is an artifact of the history of how the
- * execlist patches came into being. It will get removed once the basic
- * code has landed.
- */
- struct intel_context *FIXME_lrc_ctx;
-
u32 head;
u32 tail;
int space;
@@ -123,6 +116,8 @@ struct intel_ringbuffer {
u32 last_retired_head;
};
+struct intel_context;
+
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
@@ -142,11 +137,11 @@ struct intel_engine_cs {
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- u32 trace_irq_seqno;
+ struct drm_i915_gem_request *trace_irq_req;
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
void (*irq_put)(struct intel_engine_cs *ring);
- int (*init)(struct intel_engine_cs *ring);
+ int (*init_hw)(struct intel_engine_cs *ring);
int (*init_context)(struct intel_engine_cs *ring,
struct intel_context *ctx);
@@ -239,11 +234,14 @@ struct intel_engine_cs {
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
- int (*emit_request)(struct intel_ringbuffer *ringbuf);
+ int (*emit_request)(struct intel_ringbuffer *ringbuf,
+ struct drm_i915_gem_request *request);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u64 offset, unsigned flags);
/**
@@ -251,7 +249,7 @@ struct intel_engine_cs {
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
+ * flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@@ -267,8 +265,7 @@ struct intel_engine_cs {
/**
* Do we have some not yet emitted requests outstanding?
*/
- struct drm_i915_gem_request *preallocated_lazy_request;
- u32 outstanding_lazy_seqno;
+ struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
@@ -408,6 +405,7 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
@@ -436,16 +434,11 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
return ringbuf->tail;
}
-static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
-{
- BUG_ON(ring->outstanding_lazy_seqno == 0);
- return ring->outstanding_lazy_seqno;
-}
-
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
+static inline struct drm_i915_gem_request *
+intel_ring_get_request(struct intel_engine_cs *ring)
{
- if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
- ring->trace_irq_seqno = seqno;
+ BUG_ON(ring->outstanding_lazy_request == NULL);
+ return ring->outstanding_lazy_request;
}
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ac6da7102fbb..49695d7d51e3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -31,7 +31,6 @@
#include "i915_drv.h"
#include "intel_drv.h"
-#include <drm/i915_powerwell.h>
/**
* DOC: runtime pm
@@ -50,8 +49,6 @@
* present for a given platform.
*/
-static struct i915_power_domains *hsw_pwr;
-
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
@@ -118,7 +115,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
}
/**
- * intel_display_power_is_enabled - unlocked check for a power domain
+ * intel_display_power_is_enabled - check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
@@ -706,6 +703,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -727,24 +728,30 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_PIPE_A_POWER_DOMAINS ( \
@@ -764,20 +771,25 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
@@ -1071,10 +1083,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
- hsw_pwr = power_domains;
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
- hsw_pwr = power_domains;
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -1118,8 +1128,6 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_display_set_init_power(dev_priv, true);
-
- hsw_pwr = NULL;
}
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
@@ -1328,52 +1336,3 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
pm_runtime_put_autosuspend(device);
}
-/* Display audio driver power well request */
-int i915_request_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_request_power_well);
-
-/* Display audio driver power well release */
-int i915_release_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_release_power_well);
-
-/*
- * Private interface for the audio driver to get CDCLK in kHz.
- *
- * Caller must request power well using i915_request_power_well() prior to
- * making the call.
- */
-int i915_get_cdclk_freq(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
-
- return intel_ddi_get_cdclk_freq(dev_priv);
-}
-EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6d7a277458b5..64ad2b40179f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -1007,7 +1008,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
}
if (intel_sdvo->rgb_quant_range_selectable) {
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
@@ -1085,7 +1086,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return true;
}
-static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
{
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
@@ -1112,11 +1113,11 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
}
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- struct drm_display_mode *mode = &pipe_config->requested_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->base.mode;
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
@@ -1181,8 +1182,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &crtc->config.requested_mode;
+ &crtc->config->base.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1224,7 +1225,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (crtc->config.has_hdmi_sink) {
+ if (crtc->config->has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
@@ -1244,7 +1245,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (crtc->config.pixel_multiplier) {
+ switch (crtc->config->pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1259,7 +1260,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
+ if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@@ -1289,7 +1290,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (crtc->config.pixel_multiplier - 1)
+ sdvox |= (crtc->config->pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
@@ -1338,7 +1339,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
}
static void intel_sdvo_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1370,7 +1371,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
@@ -1392,7 +1393,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
@@ -1617,6 +1618,9 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
struct drm_device *dev = intel_sdvo->base.base.dev;
uint16_t hotplug;
+ if (!I915_HAS_HOTPLUG(dev))
+ return 0;
+
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
@@ -2187,7 +2191,9 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_sdvo_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 01d841ea3140..693ce8281970 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -75,26 +75,26 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
return 0;
}
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
{
u32 val = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
}
-void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRRDDA_NP, reg, &val);
return val;
@@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRWRDA_NP, reg, &val);
}
@@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
@@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRWRDA_NP, reg, &val);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7d9c340f7693..0a52c44ad03d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
+#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -79,7 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
@@ -255,7 +256,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
default:
BUG();
}
- if (intel_plane->rotation == BIT(DRM_ROTATE_180))
+ if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
plane_ctl |= PLANE_CTL_ENABLE;
@@ -412,8 +413,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- u32 start_vbl_count;
- bool atomic_update;
sprctl = I915_READ(SPCNTR(pipe, plane));
@@ -494,7 +493,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
x += src_w;
@@ -502,8 +501,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
intel_update_primary_plane(intel_crtc);
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
@@ -525,9 +522,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
sprsurf_offset);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -539,10 +533,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
- u32 start_vbl_count;
- bool atomic_update;
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -553,9 +543,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
-
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
@@ -626,8 +613,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- u32 start_vbl_count;
- bool atomic_update;
sprctl = I915_READ(SPRCTL(pipe));
@@ -699,7 +684,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
@@ -711,8 +696,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
}
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
intel_update_primary_plane(intel_crtc);
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -735,9 +718,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -748,10 +728,6 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
- u32 start_vbl_count;
- bool atomic_update;
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -764,16 +740,12 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
-
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
*/
- intel_wait_for_vblank(dev, pipe);
-
- intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
}
static int
@@ -846,8 +818,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- u32 start_vbl_count;
- bool atomic_update;
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -914,7 +884,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
@@ -922,8 +892,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
intel_update_primary_plane(intel_crtc);
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -941,9 +909,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -954,10 +919,6 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
- u32 start_vbl_count;
- bool atomic_update;
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -969,19 +930,25 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
-
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
*/
- intel_wait_for_vblank(dev, pipe);
-
- intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
}
-static void
+/**
+ * intel_post_enable_primary - Perform operations after enabling primary plane
+ * @crtc: the CRTC whose primary plane was just enabled
+ *
+ * Performs potentially sleeping operations that must be done after the primary
+ * plane is enabled, such as updating FBC and IPS. Note that this may be
+ * called due to an explicit primary plane update, or due to an implicit
+ * re-enable that is caused when a sprite plane is updated to no longer
+ * completely hide the primary plane.
+ */
+void
intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1004,11 +971,21 @@ intel_post_enable_primary(struct drm_crtc *crtc)
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
-static void
+/**
+ * intel_pre_disable_primary - Perform operations before disabling primary plane
+ * @crtc: the CRTC whose primary plane is to be disabled
+ *
+ * Performs potentially sleeping operations that must be done before the
+ * primary plane is enabled, such as updating FBC and IPS. Note that this may
+ * be called due to an explicit primary plane update, or due to an implicit
+ * disable that is caused when a sprite plane completely hides the primary
+ * plane.
+ */
+void
intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1017,7 +994,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
mutex_unlock(&dev->struct_mutex);
/*
@@ -1096,20 +1073,26 @@ static int
intel_check_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
struct drm_rect *src = &state->src;
struct drm_rect *dst = &state->dst;
- struct drm_rect *orig_src = &state->orig_src;
const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
- int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ int pixel_size;
+
+ intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
+
+ if (!fb) {
+ state->visible = false;
+ goto finish;
+ }
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@@ -1142,7 +1125,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
- intel_plane->rotation);
+ state->base.rotation);
hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(hscale < 0);
@@ -1183,13 +1166,13 @@ intel_check_sprite_plane(struct drm_plane *plane,
drm_rect_height(dst) * vscale - drm_rect_height(src));
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
- intel_plane->rotation);
+ state->base.rotation);
/* sanity check to make sure the src viewport wasn't enlarged */
- WARN_ON(src->x1 < (int) orig_src->x1 ||
- src->y1 < (int) orig_src->y1 ||
- src->x2 > (int) orig_src->x2 ||
- src->y2 > (int) orig_src->y2);
+ WARN_ON(src->x1 < (int) state->base.src_x ||
+ src->y1 < (int) state->base.src_y ||
+ src->x2 > (int) state->base.src_x + state->base.src_w ||
+ src->y2 > (int) state->base.src_y + state->base.src_h);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -1232,6 +1215,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
if (src_w < 3 || src_h < 3)
state->visible = false;
+ pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
@@ -1254,39 +1238,27 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
- return 0;
-}
+finish:
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ state->hides_primary = fb != NULL && drm_rect_equals(dst, clip) &&
+ !colorkey_enabled(intel_plane);
+ WARN_ON(state->hides_primary && !state->visible && intel_crtc->active);
-static int
-intel_prepare_sprite_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct drm_crtc *crtc = state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *fb = state->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
- int ret;
+ if (intel_crtc->active) {
+ if (intel_crtc->primary_enabled == state->hides_primary)
+ intel_crtc->atomic.wait_for_flips = true;
- if (old_obj != obj) {
- mutex_lock(&dev->struct_mutex);
+ if (intel_crtc->primary_enabled && state->hides_primary)
+ intel_crtc->atomic.pre_disable_primary = true;
- /* Note that this will apply the VT-d workaround for scanouts,
- * which is more restrictive than required for sprites. (The
- * primary plane requires 256KiB alignment with 64 PTE padding,
- * the sprite planes only require 128KiB alignment and 32 PTE
- * padding.
- */
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret)
- return ret;
+ intel_crtc->atomic.fb_bits |=
+ INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
+
+ if (!intel_crtc->primary_enabled && !state->hides_primary)
+ intel_crtc->atomic.post_enable_primary = true;
}
return 0;
@@ -1296,48 +1268,23 @@ static void
intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
- struct drm_crtc *crtc = state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_crtc *crtc = state->base.crtc;
+ struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
- struct drm_rect *dst = &state->dst;
- const struct drm_rect *clip = &state->clip;
- bool primary_enabled;
- /*
- * If the sprite is completely covering the primary plane,
- * we can disable the primary and save power.
- */
- primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
- WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
-
- intel_plane->crtc_x = state->orig_dst.x1;
- intel_plane->crtc_y = state->orig_dst.y1;
- intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
- intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
- intel_plane->src_x = state->orig_src.x1;
- intel_plane->src_y = state->orig_src.y1;
- intel_plane->src_w = drm_rect_width(&state->orig_src);
- intel_plane->src_h = drm_rect_height(&state->orig_src);
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
+
+ plane->fb = state->base.fb;
intel_plane->obj = obj;
if (intel_crtc->active) {
- bool primary_was_enabled = intel_crtc->primary_enabled;
-
- intel_crtc->primary_enabled = primary_enabled;
-
- if (primary_was_enabled != primary_enabled)
- intel_crtc_wait_for_pending_flips(crtc);
-
- if (primary_was_enabled && !primary_enabled)
- intel_pre_disable_primary(crtc);
+ intel_crtc->primary_enabled = !state->hides_primary;
if (state->visible) {
crtc_x = state->dst.x1;
@@ -1354,129 +1301,9 @@ intel_commit_sprite_plane(struct drm_plane *plane,
} else {
intel_plane->disable_plane(plane, crtc);
}
-
-
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
-
- if (!primary_was_enabled && primary_enabled)
- intel_post_enable_primary(crtc);
- }
-
- /* Unpin old obj after new one is active to avoid ugliness */
- if (old_obj && old_obj != obj) {
-
- /*
- * It's fairly common to simply update the position of
- * an existing object. In that case, we don't need to
- * wait for vblank to avoid ugliness, we only need to
- * do the pin & ref bookkeeping.
- */
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- mutex_unlock(&dev->struct_mutex);
}
}
-static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct intel_plane_state state;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int ret;
-
- state.crtc = crtc;
- state.fb = fb;
-
- /* sample coordinates in 16.16 fixed point */
- state.src.x1 = src_x;
- state.src.x2 = src_x + src_w;
- state.src.y1 = src_y;
- state.src.y2 = src_y + src_h;
-
- /* integer pixels */
- state.dst.x1 = crtc_x;
- state.dst.x2 = crtc_x + crtc_w;
- state.dst.y1 = crtc_y;
- state.dst.y2 = crtc_y + crtc_h;
-
- state.clip.x1 = 0;
- state.clip.y1 = 0;
- state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
- state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
- state.orig_src = state.src;
- state.orig_dst = state.dst;
-
- ret = intel_check_sprite_plane(plane, &state);
- if (ret)
- return ret;
-
- ret = intel_prepare_sprite_plane(plane, &state);
- if (ret)
- return ret;
-
- intel_commit_sprite_plane(plane, &state);
- return 0;
-}
-
-static int
-intel_disable_plane(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_crtc *intel_crtc;
- enum pipe pipe;
-
- if (!plane->fb)
- return 0;
-
- if (WARN_ON(!plane->crtc))
- return -EINVAL;
-
- intel_crtc = to_intel_crtc(plane->crtc);
- pipe = intel_crtc->pipe;
-
- if (intel_crtc->active) {
- bool primary_was_enabled = intel_crtc->primary_enabled;
-
- intel_crtc->primary_enabled = true;
-
- intel_plane->disable_plane(plane, plane->crtc);
-
- if (!primary_was_enabled && intel_crtc->primary_enabled)
- intel_post_enable_primary(plane->crtc);
- }
-
- if (intel_plane->obj) {
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_plane->pipe);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(intel_plane->obj);
- i915_gem_track_fb(intel_plane->obj, NULL,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
-
- intel_plane->obj = NULL;
- }
-
- return 0;
-}
-
-static void intel_destroy_plane(struct drm_plane *plane)
-{
- struct intel_plane *intel_plane = to_intel_plane(plane);
- intel_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(intel_plane);
-}
-
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1535,62 +1362,18 @@ out_unlock:
return ret;
}
-int intel_plane_set_property(struct drm_plane *plane,
- struct drm_property *prop,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- uint64_t old_val;
- int ret = -ENOENT;
-
- if (prop == dev->mode_config.rotation_property) {
- /* exactly one rotation angle please */
- if (hweight32(val & 0xf) != 1)
- return -EINVAL;
-
- if (intel_plane->rotation == val)
- return 0;
-
- old_val = intel_plane->rotation;
- intel_plane->rotation = val;
- ret = intel_plane_restore(plane);
- if (ret)
- intel_plane->rotation = old_val;
- }
-
- return ret;
-}
-
int intel_plane_restore(struct drm_plane *plane)
{
- struct intel_plane *intel_plane = to_intel_plane(plane);
-
if (!plane->crtc || !plane->fb)
return 0;
return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
- intel_plane->crtc_x, intel_plane->crtc_y,
- intel_plane->crtc_w, intel_plane->crtc_h,
- intel_plane->src_x, intel_plane->src_y,
- intel_plane->src_w, intel_plane->src_h);
-}
-
-void intel_plane_disable(struct drm_plane *plane)
-{
- if (!plane->crtc || !plane->fb)
- return;
-
- intel_disable_plane(plane);
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h,
+ plane->state->src_x, plane->state->src_y,
+ plane->state->src_w, plane->state->src_h);
}
-static const struct drm_plane_funcs intel_plane_funcs = {
- .update_plane = intel_update_plane,
- .disable_plane = intel_disable_plane,
- .destroy = intel_destroy_plane,
- .set_property = intel_plane_set_property,
-};
-
static uint32_t ilk_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
@@ -1638,6 +1421,7 @@ int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
struct intel_plane *intel_plane;
+ struct intel_plane_state *state;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
@@ -1650,6 +1434,13 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (!intel_plane)
return -ENOMEM;
+ state = intel_create_plane_state(&intel_plane->base);
+ if (!state) {
+ kfree(intel_plane);
+ return -ENOMEM;
+ }
+ intel_plane->base.state = &state->base;
+
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
@@ -1719,7 +1510,8 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->pipe = pipe;
intel_plane->plane = plane;
- intel_plane->rotation = BIT(DRM_ROTATE_0);
+ intel_plane->check_plane = intel_check_sprite_plane;
+ intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
@@ -1739,7 +1531,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (dev->mode_config.rotation_property)
drm_object_attach_property(&intel_plane->base.base,
dev->mode_config.rotation_property,
- intel_plane->rotation);
+ state->base.rotation);
+
+ drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
out:
return ret;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6f5f59b880f5..892d23c8479d 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -31,6 +31,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -908,14 +909,14 @@ intel_tv_mode_valid(struct drm_connector *connector,
static void
intel_tv_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -923,12 +924,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (!tv_mode)
return false;
- pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
+ pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
/* TV has it's own notion of sync and other mode flags, so clear them. */
- pipe_config->adjusted_mode.flags = 0;
+ pipe_config->base.adjusted_mode.flags = 0;
/*
* FIXME: We don't check whether the input mode is actually what we want
@@ -1512,7 +1513,9 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 46de8d75b4bf..c47a3baa53d5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -24,6 +24,8 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include <linux/pm_runtime.h>
+
#define FORCEWAKE_ACK_TIMEOUT_MS 2
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
@@ -40,6 +42,26 @@
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+static const char * const forcewake_domain_names[] = {
+ "render",
+ "blitter",
+ "media",
+};
+
+const char *
+intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
+{
+ BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
+ FW_DOMAIN_ID_COUNT);
+
+ if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
+ return forcewake_domain_names[id];
+
+ WARN_ON(id);
+
+ return "unknown";
+}
+
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
@@ -47,73 +69,128 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
"Device suspended\n");
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{
- /* w/a for a sporadic read returning 0 by waiting for the GT
- * thread to wake up.
- */
- if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
- GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
- DRM_ERROR("GT thread status wait timed out\n");
+ WARN_ON(d->reg_set == 0);
+ __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
}
-static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+ mod_timer_pinned(&d->timer, jiffies + 1);
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
- int fw_engine)
+static inline void
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
+ if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+ DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
+ intel_uncore_forcewake_domain_to_str(d->id));
+}
- __raw_i915_write32(dev_priv, FORCEWAKE, 1);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+static inline void
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
+{
+ __raw_i915_write32(d->i915, d->reg_set, d->val_set);
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
+static inline void
+fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
+{
+ if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+ DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
+ intel_uncore_forcewake_domain_to_str(d->id));
+}
- /* WaRsForcewakeWaitTC0:snb */
- __gen6_gt_wait_for_thread_c0(dev_priv);
+static inline void
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
+{
+ __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
}
-static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
+ /* something from same cacheline, but not from the set register */
+ if (d->reg_post)
+ __raw_posting_read(d->i915, d->reg_post);
}
-static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
- int fw_engine)
+static void
+fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
- u32 forcewake_ack;
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
- if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
- forcewake_ack = FORCEWAKE_ACK_HSW;
- else
- forcewake_ack = FORCEWAKE_MT_ACK;
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ fw_domain_wait_ack_clear(d);
+ fw_domain_get(d);
+ fw_domain_wait_ack(d);
+ }
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+static void
+fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
- __raw_i915_write32(dev_priv, FORCEWAKE_MT,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ fw_domain_put(d);
+ fw_domain_posting_read(d);
+ }
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+static void
+fw_domains_posting_read(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
- /* WaRsForcewakeWaitTC0:ivb,hsw */
+ /* No need to do for all, just do for first found */
+ for_each_fw_domain(d, dev_priv, id) {
+ fw_domain_posting_read(d);
+ break;
+ }
+}
+
+static void
+fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
+
+ if (dev_priv->uncore.fw_domains == 0)
+ return;
+
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
+ fw_domain_reset(d);
+
+ fw_domains_posting_read(dev_priv);
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ fw_domains_get(dev_priv, fw_domains);
+
+ /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -126,27 +203,13 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
- int fw_engine)
+static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+ fw_domains_put(dev_priv, fw_domains);
gen6_gt_check_fifodbg(dev_priv);
}
-static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_MT,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
-
- if (IS_GEN7(dev_priv->dev))
- gen6_gt_check_fifodbg(dev_priv);
-}
-
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
@@ -174,332 +237,78 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
-static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_DISABLE(0xffff));
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_VLV */
- __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
-}
-
-static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_VLV) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Render to ack.\n");
- }
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for media to ack.\n");
- }
-}
-
-static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
-
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* something from same cacheline, but !FORCEWAKE_VLV */
- __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
- if (!IS_CHERRYVIEW(dev_priv->dev))
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+static void intel_uncore_fw_release_timer(unsigned long arg)
{
+ struct intel_uncore_forcewake_domain *domain = (void *)arg;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (fw_engine & FORCEWAKE_RENDER &&
- dev_priv->uncore.fw_rendercount++ != 0)
- fw_engine &= ~FORCEWAKE_RENDER;
- if (fw_engine & FORCEWAKE_MEDIA &&
- dev_priv->uncore.fw_mediacount++ != 0)
- fw_engine &= ~FORCEWAKE_MEDIA;
-
- if (fw_engine)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (fw_engine & FORCEWAKE_RENDER) {
- WARN_ON(!dev_priv->uncore.fw_rendercount);
- if (--dev_priv->uncore.fw_rendercount != 0)
- fw_engine &= ~FORCEWAKE_RENDER;
- }
-
- if (fw_engine & FORCEWAKE_MEDIA) {
- WARN_ON(!dev_priv->uncore.fw_mediacount);
- if (--dev_priv->uncore.fw_mediacount != 0)
- fw_engine &= ~FORCEWAKE_MEDIA;
- }
-
- if (fw_engine)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-}
-
-static void
-__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_RENDER_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_RENDER_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Render to ack.\n");
- }
+ assert_device_not_suspended(domain->i915);
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Media to ack.\n");
- }
+ spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+ if (WARN_ON(domain->wake_count == 0))
+ domain->wake_count++;
- /* Check for Blitter Engine */
- if (FORCEWAKE_BLITTER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_BLITTER_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_BLITTER_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
- }
-}
+ if (--domain->wake_count == 0)
+ domain->i915->uncore.funcs.force_wake_put(domain->i915,
+ 1 << domain->id);
-static void
-__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* Check for Blitter Engine */
- if (FORCEWAKE_BLITTER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
}
-static void
-gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ struct intel_uncore_forcewake_domain *domain;
+ int retry_count = 100;
+ enum forcewake_domain_id id;
+ enum forcewake_domains fw = 0, active_domains;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (FORCEWAKE_RENDER & fw_engine) {
- if (dev_priv->uncore.fw_rendercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_RENDER);
- }
-
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (dev_priv->uncore.fw_mediacount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_MEDIA);
- }
+ /* Hold uncore.lock across reset to prevent any register access
+ * with forcewake not set correctly. Wait until all pending
+ * timers are run before holding.
+ */
+ while (1) {
+ active_domains = 0;
- if (FORCEWAKE_BLITTER & fw_engine) {
- if (dev_priv->uncore.fw_blittercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_BLITTER);
- }
+ for_each_fw_domain(domain, dev_priv, id) {
+ if (del_timer_sync(&domain->timer) == 0)
+ continue;
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
+ intel_uncore_fw_release_timer((unsigned long)domain);
+ }
-static void
-gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-{
- unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ for_each_fw_domain(domain, dev_priv, id) {
+ if (timer_pending(&domain->timer))
+ active_domains |= (1 << id);
+ }
- if (FORCEWAKE_RENDER & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_rendercount == 0);
- if (--dev_priv->uncore.fw_rendercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_RENDER);
- }
+ if (active_domains == 0)
+ break;
- if (FORCEWAKE_MEDIA & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_mediacount == 0);
- if (--dev_priv->uncore.fw_mediacount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_MEDIA);
- }
+ if (--retry_count == 0) {
+ DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
+ break;
+ }
- if (FORCEWAKE_BLITTER & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_blittercount == 0);
- if (--dev_priv->uncore.fw_blittercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_BLITTER);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ cond_resched();
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void gen6_force_wake_timer(unsigned long arg)
-{
- struct drm_i915_private *dev_priv = (void *)arg;
- unsigned long irqflags;
-
- assert_device_not_suspended(dev_priv);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- WARN_ON(!dev_priv->uncore.forcewake_count);
-
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- intel_runtime_pm_put(dev_priv);
-}
-
-void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
-
- if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
- gen6_force_wake_timer((unsigned long)dev_priv);
-
- /* Hold uncore.lock across reset to prevent any register access
- * with forcewake not set correctly
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ WARN_ON(active_domains);
- if (IS_VALLEYVIEW(dev))
- vlv_force_wake_reset(dev_priv);
- else if (IS_GEN6(dev) || IS_GEN7(dev))
- __gen6_gt_force_wake_reset(dev_priv);
+ for_each_fw_domain(domain, dev_priv, id)
+ if (domain->wake_count)
+ fw |= 1 << id;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
- __gen7_gt_force_wake_mt_reset(dev_priv);
+ if (fw)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
- if (IS_GEN9(dev))
- __gen9_gt_force_wake_mt_reset(dev_priv);
+ fw_domains_reset(dev_priv, FORCEWAKE_ALL);
if (restore) { /* If reset with a user forcewake, try to restore */
- unsigned fw = 0;
-
- if (IS_VALLEYVIEW(dev)) {
- if (dev_priv->uncore.fw_rendercount)
- fw |= FORCEWAKE_RENDER;
-
- if (dev_priv->uncore.fw_mediacount)
- fw |= FORCEWAKE_MEDIA;
- } else if (IS_GEN9(dev)) {
- if (dev_priv->uncore.fw_rendercount)
- fw |= FORCEWAKE_RENDER;
-
- if (dev_priv->uncore.fw_mediacount)
- fw |= FORCEWAKE_MEDIA;
-
- if (dev_priv->uncore.fw_blittercount)
- fw |= FORCEWAKE_BLITTER;
- } else {
- if (dev_priv->uncore.forcewake_count)
- fw = FORCEWAKE_ALL;
- }
-
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
@@ -509,17 +318,16 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
GT_FIFO_FREE_ENTRIES_MASK;
}
+ if (!restore)
+ assert_forcewakes_inactive(dev_priv);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void __intel_uncore_early_sanitize(struct drm_device *dev,
- bool restore_forcewake)
+static void intel_uncore_ellc_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
@@ -530,6 +338,15 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev,
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
+}
+
+static void __intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
@@ -551,81 +368,92 @@ void intel_uncore_sanitize(struct drm_device *dev)
intel_disable_gt_powersave(dev);
}
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
+/**
+ * intel_uncore_forcewake_get - grab forcewake domain references
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to get reference on
+ *
+ * This function can be used get GT's forcewake domain references.
+ * Normal register access will handle the forcewake domains automatically.
+ * However if some sequence requires the GT to not power down a particular
+ * forcewake domains this function should be called at the beginning of the
+ * sequence. And subsequently the reference should be dropped by symmetric
+ * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
+ * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
*/
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
unsigned long irqflags;
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- intel_runtime_pm_get(dev_priv);
+ WARN_ON(dev_priv->pm.suspended);
- /* Redirect to Gen9 specific routine */
- if (IS_GEN9(dev_priv->dev))
- return gen9_force_wake_get(dev_priv, fw_engine);
-
- /* Redirect to VLV specific routine */
- if (IS_VALLEYVIEW(dev_priv->dev))
- return vlv_force_wake_get(dev_priv, fw_engine);
+ fw_domains &= dev_priv->uncore.fw_domains;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (domain->wake_count++)
+ fw_domains &= ~(1 << id);
+ }
+
+ if (fw_domains)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-/*
- * see gen6_gt_force_wake_get()
+/**
+ * intel_uncore_forcewake_put - release a forcewake domain reference
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to put references
+ *
+ * This function drops the device-level forcewakes for specified
+ * domains obtained by intel_uncore_forcewake_get().
*/
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- bool delayed = false;
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
- /* Redirect to Gen9 specific routine */
- if (IS_GEN9(dev_priv->dev)) {
- gen9_force_wake_put(dev_priv, fw_engine);
- goto out;
- }
+ fw_domains &= dev_priv->uncore.fw_domains;
- /* Redirect to VLV specific routine */
- if (IS_VALLEYVIEW(dev_priv->dev)) {
- vlv_force_wake_put(dev_priv, fw_engine);
- goto out;
- }
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (WARN_ON(domain->wake_count == 0))
+ continue;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- WARN_ON(!dev_priv->uncore.forcewake_count);
+ if (--domain->wake_count)
+ continue;
- if (--dev_priv->uncore.forcewake_count == 0) {
- dev_priv->uncore.forcewake_count++;
- delayed = true;
- mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
- jiffies + 1);
+ domain->wake_count++;
+ fw_domain_arm_timer(domain);
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-out:
- if (!delayed)
- intel_runtime_pm_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
+
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- WARN_ON(dev_priv->uncore.forcewake_count > 0);
+ for_each_fw_domain(domain, dev_priv, id)
+ WARN_ON(domain->wake_count);
}
/* We give fast paths for the really cool registers */
@@ -647,9 +475,9 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0x5200, 0x8000) || \
REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0xB000, 0xC000) || \
+ REG_RANGE((reg), 0xB000, 0xB480) || \
REG_RANGE((reg), 0xE000, 0xE800))
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
@@ -658,17 +486,14 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x1A000, 0x1C000) || \
REG_RANGE((reg), 0x1E800, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x40000))
+ REG_RANGE((reg), 0x30000, 0x38000))
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x4000, 0x5000) || \
REG_RANGE((reg), 0x8000, 0x8300) || \
REG_RANGE((reg), 0x8500, 0x8600) || \
REG_RANGE((reg), 0x9000, 0xB000) || \
- REG_RANGE((reg), 0xC000, 0xC800) || \
- REG_RANGE((reg), 0xF000, 0x10000) || \
- REG_RANGE((reg), 0x14000, 0x14400) || \
- REG_RANGE((reg), 0x22000, 0x24000))
+ REG_RANGE((reg), 0xF000, 0x10000))
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
@@ -740,96 +565,118 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
}
}
-#define REG_READ_HEADER(x) \
- unsigned long irqflags; \
+#define GEN2_READ_HEADER(x) \
u##x val = 0; \
- assert_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+ assert_device_not_suspended(dev_priv);
-#define REG_READ_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-#define __gen4_read(x) \
+#define __gen2_read(x) \
static u##x \
-gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ GEN2_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
- REG_READ_FOOTER; \
+ GEN2_READ_FOOTER; \
}
#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+ GEN2_READ_HEADER(x); \
ilk_dummy_write(dev_priv); \
val = __raw_i915_read##x(dev_priv, reg); \
- REG_READ_FOOTER; \
+ GEN2_READ_FOOTER; \
+}
+
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen2_read(8)
+__gen2_read(16)
+__gen2_read(32)
+__gen2_read(64)
+
+#undef __gen5_read
+#undef __gen2_read
+
+#undef GEN2_READ_FOOTER
+#undef GEN2_READ_HEADER
+
+#define GEN6_READ_HEADER(x) \
+ unsigned long irqflags; \
+ u##x val = 0; \
+ assert_device_not_suspended(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define GEN6_READ_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val
+
+static inline void __force_wake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
+
+ if (WARN_ON(!fw_domains))
+ return;
+
+ /* Ideally GCC would be constant-fold and eliminate this loop */
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (domain->wake_count) {
+ fw_domains &= ~(1 << id);
+ continue;
+ }
+
+ domain->wake_count++;
+ fw_domain_arm_timer(domain);
+ }
+
+ if (fw_domains)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+ GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
- if (dev_priv->uncore.forcewake_count == 0 && \
- NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- val = __raw_i915_read##x(dev_priv, reg); \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
- } else { \
- val = __raw_i915_read##x(dev_priv, reg); \
- } \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ val = __raw_i915_read##x(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- unsigned fwengine = 0; \
- REG_READ_HEADER(x); \
- if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ GEN6_READ_HEADER(x); \
+ if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- unsigned fwengine = 0; \
- REG_READ_HEADER(x); \
- if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ GEN6_READ_HEADER(x); \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, \
+ FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
@@ -838,33 +685,22 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
- if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- val = __raw_i915_read##x(dev_priv, reg); \
- } else { \
- unsigned fwengine = 0; \
- if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } else { \
- if (dev_priv->uncore.fw_blittercount == 0) \
- fwengine = FORCEWAKE_BLITTER; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- } \
- REG_READ_FOOTER; \
+ enum forcewake_domains fw_engine; \
+ GEN6_READ_HEADER(x); \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \
+ fw_engine = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ fw_engine = FORCEWAKE_BLITTER; \
+ if (fw_engine) \
+ __force_wake_get(dev_priv, fw_engine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ GEN6_READ_FOOTER; \
}
__gen9_read(8)
@@ -883,55 +719,66 @@ __gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
-__gen5_read(8)
-__gen5_read(16)
-__gen5_read(32)
-__gen5_read(64)
-__gen4_read(8)
-__gen4_read(16)
-__gen4_read(32)
-__gen4_read(64)
#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
-#undef __gen5_read
-#undef __gen4_read
-#undef REG_READ_FOOTER
-#undef REG_READ_HEADER
+#undef GEN6_READ_FOOTER
+#undef GEN6_READ_HEADER
-#define REG_WRITE_HEADER \
- unsigned long irqflags; \
+#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-#define REG_WRITE_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+#define GEN2_WRITE_FOOTER
-#define __gen4_write(x) \
+#define __gen2_write(x) \
static void \
-gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ GEN2_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
- REG_WRITE_FOOTER; \
+ GEN2_WRITE_FOOTER; \
}
#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+ GEN2_WRITE_HEADER; \
ilk_dummy_write(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
- REG_WRITE_FOOTER; \
+ GEN2_WRITE_FOOTER; \
}
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen2_write(8)
+__gen2_write(16)
+__gen2_write(32)
+__gen2_write(64)
+
+#undef __gen5_write
+#undef __gen2_write
+
+#undef GEN2_WRITE_FOOTER
+#undef GEN2_WRITE_HEADER
+
+#define GEN6_WRITE_HEADER \
+ unsigned long irqflags; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ assert_device_not_suspended(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define GEN6_WRITE_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -939,14 +786,14 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
#define __hsw_write(x) \
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -957,7 +804,7 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
static const u32 gen8_shadowed_regs[] = {
@@ -984,50 +831,31 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
- if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
- } else { \
- __raw_i915_write##x(dev_priv, reg, val); \
- } \
+ if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ __raw_i915_write##x(dev_priv, reg, val); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- unsigned fwengine = 0; \
bool shadowed = is_gen8_shadowed(dev_priv, reg); \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (!shadowed) { \
- if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
} \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
__raw_i915_write##x(dev_priv, reg, val); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
static const u32 gen9_shadowed_regs[] = {
@@ -1057,36 +885,23 @@ static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static void \
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
bool trace) { \
- REG_WRITE_HEADER; \
- if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
- is_gen9_shadowed(dev_priv, reg)) { \
- __raw_i915_write##x(dev_priv, reg, val); \
- } else { \
- unsigned fwengine = 0; \
- if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } else { \
- if (dev_priv->uncore.fw_blittercount == 0) \
- fwengine = FORCEWAKE_BLITTER; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- fwengine); \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- fwengine); \
- } \
- REG_WRITE_FOOTER; \
+ enum forcewake_domains fw_engine; \
+ GEN6_WRITE_HEADER; \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
+ is_gen9_shadowed(dev_priv, reg)) \
+ fw_engine = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ fw_engine = FORCEWAKE_BLITTER; \
+ if (fw_engine) \
+ __force_wake_get(dev_priv, fw_engine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ GEN6_WRITE_FOOTER; \
}
__gen9_write(8)
@@ -1109,24 +924,14 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
-__gen5_write(8)
-__gen5_write(16)
-__gen5_write(32)
-__gen5_write(64)
-__gen4_write(8)
-__gen4_write(16)
-__gen4_write(32)
-__gen4_write(64)
#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
-#undef __gen5_write
-#undef __gen4_write
-#undef REG_WRITE_FOOTER
-#undef REG_WRITE_HEADER
+#undef GEN6_WRITE_FOOTER
+#undef GEN6_WRITE_HEADER
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
@@ -1144,24 +949,86 @@ do { \
dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)
-void intel_uncore_init(struct drm_device *dev)
+
+static void fw_domain_init(struct drm_i915_private *dev_priv,
+ enum forcewake_domain_id domain_id,
+ u32 reg_set, u32 reg_ack)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore_forcewake_domain *d;
- setup_timer(&dev_priv->uncore.force_wake_timer,
- gen6_force_wake_timer, (unsigned long)dev_priv);
+ if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
+ return;
- __intel_uncore_early_sanitize(dev, false);
+ d = &dev_priv->uncore.fw_domain[domain_id];
+
+ WARN_ON(d->wake_count);
+
+ d->wake_count = 0;
+ d->reg_set = reg_set;
+ d->reg_ack = reg_ack;
+
+ if (IS_GEN6(dev_priv)) {
+ d->val_reset = 0;
+ d->val_set = FORCEWAKE_KERNEL;
+ d->val_clear = 0;
+ } else {
+ d->val_reset = _MASKED_BIT_DISABLE(0xffff);
+ d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
+ d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
+ }
+
+ if (IS_VALLEYVIEW(dev_priv))
+ d->reg_post = FORCEWAKE_ACK_VLV;
+ else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
+ d->reg_post = ECOBUS;
+ else
+ d->reg_post = 0;
+
+ d->i915 = dev_priv;
+ d->id = domain_id;
+
+ setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
+
+ dev_priv->uncore.fw_domains |= (1 << domain_id);
+
+ fw_domain_reset(d);
+}
+
+static void intel_uncore_fw_domains_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev_priv->dev)->gen <= 5)
+ return;
if (IS_GEN9(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
+ dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_RENDER_GEN9,
+ FORCEWAKE_ACK_RENDER_GEN9);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ FORCEWAKE_BLITTER_GEN9,
+ FORCEWAKE_ACK_BLITTER_GEN9);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
+ dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ if (!IS_CHERRYVIEW(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_thread_status;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
@@ -1174,35 +1041,54 @@ void intel_uncore_init(struct drm_device *dev)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_thread_status;
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+
+ /* We need to init first for ECOBUS access and then
+ * determine later if we want to reinit, in case of MT access is
+ * not working
+ */
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+
mutex_lock(&dev->struct_mutex);
- __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
+ fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
+ fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- dev_priv->uncore.funcs.force_wake_get =
- __gen7_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen7_gt_force_wake_mt_put;
- } else {
+ if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE, FORCEWAKE_ACK);
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
+ fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
+ fw_domains_put_with_fifo;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE, FORCEWAKE_ACK);
}
+ /* All future platforms are expected to require complex power gating */
+ WARN_ON(dev_priv->uncore.fw_domains == 0);
+}
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_uncore_ellc_detect(dev);
+ intel_uncore_fw_domains_init(dev);
+ __intel_uncore_early_sanitize(dev, false);
+
switch (INTEL_INFO(dev)->gen) {
default:
- WARN_ON(1);
+ MISSING_CASE(INTEL_INFO(dev)->gen);
return;
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
@@ -1239,8 +1125,8 @@ void intel_uncore_init(struct drm_device *dev)
case 4:
case 3:
case 2:
- ASSIGN_WRITE_MMIO_VFUNCS(gen4);
- ASSIGN_READ_MMIO_VFUNCS(gen4);
+ ASSIGN_WRITE_MMIO_VFUNCS(gen2);
+ ASSIGN_READ_MMIO_VFUNCS(gen2);
break;
}
@@ -1300,7 +1186,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
reg->val = I915_READ8(reg->offset);
break;
default:
- WARN_ON(1);
+ MISSING_CASE(entry->size);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index ab31848e92cf..33cdddf26684 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -5,7 +5,7 @@ config DRM_IMX
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
+ depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS
depends on IMX_IPUV3_CORE
help
enable i.MX graphics support
@@ -49,6 +49,7 @@ config DRM_IMX_IPUV3
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
+ select DRM_DW_HDMI
depends on DRM_IMX
help
Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 582c438d8cbd..f3ecd8903d97 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -9,4 +9,4 @@ obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
-obj-$(CONFIG_DRM_IMX_HDMI) += imx-hdmi.o
+obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
new file mode 100644
index 000000000000..121d30ca2d44
--- /dev/null
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -0,0 +1,258 @@
+/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <drm/bridge/dw_hdmi.h>
+#include <video/imx-ipu-v3.h>
+#include <linux/regmap.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "imx-drm.h"
+
+struct imx_hdmi {
+ struct device *dev;
+ struct drm_encoder encoder;
+ struct regmap *regmap;
+};
+
+static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
+ {
+ 45250000, {
+ { 0x01e0, 0x0000 },
+ { 0x21e1, 0x0000 },
+ { 0x41e2, 0x0000 }
+ },
+ }, {
+ 92500000, {
+ { 0x0140, 0x0005 },
+ { 0x2141, 0x0005 },
+ { 0x4142, 0x0005 },
+ },
+ }, {
+ 148500000, {
+ { 0x00a0, 0x000a },
+ { 0x20a1, 0x000a },
+ { 0x40a2, 0x000a },
+ },
+ }, {
+ ~0UL, {
+ { 0x00a0, 0x000a },
+ { 0x2001, 0x000f },
+ { 0x4002, 0x000f },
+ },
+ }
+};
+
+static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
+ /* pixelclk bpp8 bpp10 bpp12 */
+ {
+ 54000000, { 0x091c, 0x091c, 0x06dc },
+ }, {
+ 58400000, { 0x091c, 0x06dc, 0x06dc },
+ }, {
+ 72000000, { 0x06dc, 0x06dc, 0x091c },
+ }, {
+ 74250000, { 0x06dc, 0x0b5c, 0x091c },
+ }, {
+ 118800000, { 0x091c, 0x091c, 0x06dc },
+ }, {
+ 216000000, { 0x06dc, 0x0b5c, 0x091c },
+ }
+};
+
+static const struct dw_hdmi_sym_term imx_sym_term[] = {
+ /*pixelclk symbol term*/
+ { 148500000, 0x800d, 0x0005 },
+ { ~0UL, 0x0000, 0x0000 }
+};
+
+static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
+{
+ struct device_node *np = hdmi->dev->of_node;
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(hdmi->regmap)) {
+ dev_err(hdmi->dev, "Unable to get gpr\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
+ return 0;
+}
+
+static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static bool dw_hdmi_imx_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ return true;
+}
+
+static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+}
+
+static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+ int mux = imx_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
+
+ regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
+ IMX6Q_GPR3_HDMI_MUX_CTL_MASK,
+ mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
+}
+
+static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
+{
+ imx_drm_panel_format(encoder, V4L2_PIX_FMT_RGB24);
+}
+
+static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
+ .mode_fixup = dw_hdmi_imx_encoder_mode_fixup,
+ .mode_set = dw_hdmi_imx_encoder_mode_set,
+ .prepare = dw_hdmi_imx_encoder_prepare,
+ .commit = dw_hdmi_imx_encoder_commit,
+ .disable = dw_hdmi_imx_encoder_disable,
+};
+
+static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
+ .mpll_cfg = imx_mpll_cfg,
+ .cur_ctr = imx_cur_ctr,
+ .sym_term = imx_sym_term,
+ .dev_type = IMX6Q_HDMI,
+};
+
+static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
+ .mpll_cfg = imx_mpll_cfg,
+ .cur_ctr = imx_cur_ctr,
+ .sym_term = imx_sym_term,
+ .dev_type = IMX6DL_HDMI,
+};
+
+static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
+ { .compatible = "fsl,imx6q-hdmi",
+ .data = &imx6q_hdmi_drv_data
+ }, {
+ .compatible = "fsl,imx6dl-hdmi",
+ .data = &imx6dl_hdmi_drv_data
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids);
+
+static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct dw_hdmi_plat_data *plat_data;
+ const struct of_device_id *match;
+ struct drm_device *drm = data;
+ struct drm_encoder *encoder;
+ struct imx_hdmi *hdmi;
+ struct resource *iores;
+ int irq;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
+ plat_data = match->data;
+ hdmi->dev = &pdev->dev;
+ encoder = &hdmi->encoder;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -ENXIO;
+
+ platform_set_drvdata(pdev, hdmi);
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ ret = dw_hdmi_imx_parse_dt(hdmi);
+ if (ret < 0)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+}
+
+static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ return dw_hdmi_unbind(dev, master, data);
+}
+
+static const struct component_ops dw_hdmi_imx_ops = {
+ .bind = dw_hdmi_imx_bind,
+ .unbind = dw_hdmi_imx_unbind,
+};
+
+static int dw_hdmi_imx_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dw_hdmi_imx_ops);
+}
+
+static int dw_hdmi_imx_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dw_hdmi_imx_ops);
+
+ return 0;
+}
+
+static struct platform_driver dw_hdmi_imx_platform_driver = {
+ .probe = dw_hdmi_imx_probe,
+ .remove = dw_hdmi_imx_remove,
+ .driver = {
+ .name = "dwhdmi-imx",
+ .of_match_table = dw_hdmi_imx_dt_ids,
+ },
+};
+
+module_platform_driver(dw_hdmi_imx_platform_driver);
+
+MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_DESCRIPTION("IMX6 Specific DW-HDMI Driver Extension");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dwhdmi-imx");
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index b250130debc8..a002f53aab0e 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_of.h>
#include "imx-drm.h"
@@ -46,7 +47,6 @@ struct imx_drm_crtc {
struct drm_crtc *crtc;
int pipe;
struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs;
- struct device_node *port;
};
static int legacyfb_depth = 16;
@@ -116,8 +116,7 @@ int imx_drm_panel_format_pins(struct drm_encoder *encoder,
helper = &imx_crtc->imx_drm_helper_funcs;
if (helper->set_interface_pix_fmt)
return helper->set_interface_pix_fmt(encoder->crtc,
- encoder->encoder_type, interface_pix_fmt,
- hsync_pin, vsync_pin);
+ interface_pix_fmt, hsync_pin, vsync_pin);
return 0;
}
EXPORT_SYMBOL_GPL(imx_drm_panel_format_pins);
@@ -365,9 +364,10 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs;
imx_drm_crtc->pipe = imxdrm->pipes++;
- imx_drm_crtc->port = port;
imx_drm_crtc->crtc = crtc;
+ crtc->port = port;
+
imxdrm->crtc[imx_drm_crtc->pipe] = imx_drm_crtc;
*new_crtc = imx_drm_crtc;
@@ -408,33 +408,28 @@ int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc)
}
EXPORT_SYMBOL_GPL(imx_drm_remove_crtc);
-/*
- * Find the DRM CRTC possible mask for the connected endpoint.
- *
- * The encoder possible masks are defined by their position in the
- * mode_config crtc_list. This means that CRTCs must not be added
- * or removed once the DRM device has been fully initialised.
- */
-static uint32_t imx_drm_find_crtc_mask(struct imx_drm_device *imxdrm,
- struct device_node *endpoint)
+int imx_drm_encoder_parse_of(struct drm_device *drm,
+ struct drm_encoder *encoder, struct device_node *np)
{
- struct device_node *port;
- unsigned i;
+ uint32_t crtc_mask = drm_of_find_possible_crtcs(drm, np);
- port = of_graph_get_remote_port(endpoint);
- if (!port)
- return 0;
- of_node_put(port);
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (crtc_mask == 0)
+ return -EPROBE_DEFER;
- for (i = 0; i < MAX_CRTC; i++) {
- struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[i];
+ encoder->possible_crtcs = crtc_mask;
- if (imx_drm_crtc && imx_drm_crtc->port == port)
- return drm_crtc_mask(imx_drm_crtc->crtc);
- }
+ /* FIXME: this is the mask of outputs which can clone this output. */
+ encoder->possible_clones = ~0;
return 0;
}
+EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
static struct device_node *imx_drm_of_get_next_endpoint(
const struct device_node *parent, struct device_node *prev)
@@ -445,48 +440,6 @@ static struct device_node *imx_drm_of_get_next_endpoint(
return node;
}
-int imx_drm_encoder_parse_of(struct drm_device *drm,
- struct drm_encoder *encoder, struct device_node *np)
-{
- struct imx_drm_device *imxdrm = drm->dev_private;
- struct device_node *ep = NULL;
- uint32_t crtc_mask = 0;
- int i;
-
- for (i = 0; ; i++) {
- u32 mask;
-
- ep = imx_drm_of_get_next_endpoint(np, ep);
- if (!ep)
- break;
-
- mask = imx_drm_find_crtc_mask(imxdrm, ep);
-
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (mask == 0)
- return -EPROBE_DEFER;
-
- crtc_mask |= mask;
- }
-
- of_node_put(ep);
- if (i == 0)
- return -ENOENT;
-
- encoder->possible_crtcs = crtc_mask;
-
- /* FIXME: this is the mask of outputs which can clone this output. */
- encoder->possible_clones = ~0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
-
/*
* @node: device tree node containing encoder input ports
* @encoder: drm_encoder
@@ -510,7 +463,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
port = of_graph_get_remote_port(ep);
of_node_put(port);
- if (port == imx_crtc->port) {
+ if (port == imx_crtc->crtc->port) {
ret = of_graph_parse_endpoint(ep, &endpoint);
return ret ? ret : endpoint.port;
}
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 7453ae00c412..3c559ccd6af0 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -17,7 +17,7 @@ int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
- int (*set_interface_pix_fmt)(struct drm_crtc *crtc, u32 encoder_type,
+ int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
u32 pix_fmt, int hsync_pin, int vsync_pin);
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index c60460043e24..1b86aac0b341 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -163,7 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
- struct drm_display_mode *mode = &encoder->crtc->mode;
+ struct drm_display_mode *mode = &encoder->crtc->hwmode;
u32 pixel_fmt;
unsigned long serial_clk;
unsigned long di_clk = mode->clock * 1000;
@@ -241,8 +241,8 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
}
static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
@@ -574,6 +574,8 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
channel->connector.funcs->destroy(&channel->connector);
channel->encoder.funcs->destroy(&channel->encoder);
+
+ kfree(channel->edid);
}
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index a729f4f7074c..4216e479a9be 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -191,10 +191,18 @@ static int tve_setup_vga(struct imx_tve *tve)
/* set gain to (1 + 10/128) to provide 0.7V peak-to-peak amplitude */
ret = regmap_update_bits(tve->regmap, TVE_TVDAC0_CONT_REG,
TVE_TVDAC_GAIN_MASK, 0x0a);
+ if (ret)
+ return ret;
+
ret = regmap_update_bits(tve->regmap, TVE_TVDAC1_CONT_REG,
TVE_TVDAC_GAIN_MASK, 0x0a);
+ if (ret)
+ return ret;
+
ret = regmap_update_bits(tve->regmap, TVE_TVDAC2_CONT_REG,
TVE_TVDAC_GAIN_MASK, 0x0a);
+ if (ret)
+ return ret;
/* set configuration register */
mask = TVE_DATA_SOURCE_MASK | TVE_INP_VIDEO_FORM;
@@ -204,16 +212,12 @@ static int tve_setup_vga(struct imx_tve *tve)
mask |= TVE_TV_OUT_MODE_MASK | TVE_SYNC_CH_0_EN;
val |= TVE_TV_OUT_RGB | TVE_SYNC_CH_0_EN;
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, mask, val);
- if (ret < 0) {
- dev_err(tve->dev, "failed to set configuration: %d\n", ret);
+ if (ret)
return ret;
- }
/* set test mode (as documented) */
- ret = regmap_update_bits(tve->regmap, TVE_TST_MODE_REG,
+ return regmap_update_bits(tve->regmap, TVE_TST_MODE_REG,
TVE_TVDAC_TEST_MODE_MASK, 1);
-
- return 0;
}
static enum drm_connector_status imx_tve_connector_detect(
@@ -307,8 +311,8 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
}
static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
{
struct imx_tve *tve = enc_to_tve(encoder);
unsigned long rounded_rate;
@@ -335,9 +339,11 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
}
if (tve->mode == TVE_MODE_VGA)
- tve_setup_vga(tve);
+ ret = tve_setup_vga(tve);
else
- tve_setup_tvout(tve);
+ ret = tve_setup_tvout(tve);
+ if (ret)
+ dev_err(tve->dev, "failed to set configuration: %d\n", ret);
}
static void imx_tve_encoder_commit(struct drm_encoder *encoder)
@@ -671,6 +677,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
/* disable cable detection for VGA mode */
ret = regmap_write(tve->regmap, TVE_CD_CONT_REG, 0);
+ if (ret)
+ return ret;
ret = imx_tve_register(drm, tve);
if (ret)
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index ebee59cb96d8..98551e356e12 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -46,7 +46,6 @@ struct ipu_crtc {
struct drm_framebuffer *newfb;
int irq;
u32 interface_pix_fmt;
- unsigned long di_clkflags;
int di_hsync_pin;
int di_vsync_pin;
};
@@ -141,47 +140,51 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb)
{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- int ret;
struct ipu_di_signal_cfg sig_cfg = {};
+ unsigned long encoder_types = 0;
u32 out_pixel_fmt;
+ int ret;
dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
mode->hdisplay);
dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
mode->vdisplay);
- out_pixel_fmt = ipu_crtc->interface_pix_fmt;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ encoder_types |= BIT(encoder->encoder_type);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- sig_cfg.interlaced = 1;
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- sig_cfg.Hsync_pol = 1;
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- sig_cfg.Vsync_pol = 1;
+ dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
+ __func__, encoder_types);
+
+ /*
+ * If we have DAC, TVDAC or LDB, then we need the IPU DI clock
+ * to be the same as the LDB DI clock.
+ */
+ if (encoder_types & (BIT(DRM_MODE_ENCODER_DAC) |
+ BIT(DRM_MODE_ENCODER_TVDAC) |
+ BIT(DRM_MODE_ENCODER_LVDS)))
+ sig_cfg.clkflags = IPU_DI_CLKMODE_SYNC | IPU_DI_CLKMODE_EXT;
+ else
+ sig_cfg.clkflags = 0;
+
+ out_pixel_fmt = ipu_crtc->interface_pix_fmt;
sig_cfg.enable_pol = 1;
sig_cfg.clk_pol = 0;
- sig_cfg.width = mode->hdisplay;
- sig_cfg.height = mode->vdisplay;
sig_cfg.pixel_fmt = out_pixel_fmt;
- sig_cfg.h_start_width = mode->htotal - mode->hsync_end;
- sig_cfg.h_sync_width = mode->hsync_end - mode->hsync_start;
- sig_cfg.h_end_width = mode->hsync_start - mode->hdisplay;
-
- sig_cfg.v_start_width = mode->vtotal - mode->vsync_end;
- sig_cfg.v_sync_width = mode->vsync_end - mode->vsync_start;
- sig_cfg.v_end_width = mode->vsync_start - mode->vdisplay;
- sig_cfg.pixelclock = mode->clock * 1000;
- sig_cfg.clkflags = ipu_crtc->di_clkflags;
-
sig_cfg.v_to_h_sync = 0;
-
sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
- ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, sig_cfg.interlaced,
- out_pixel_fmt, mode->hdisplay);
+ drm_display_mode_to_videomode(mode, &sig_cfg.mode);
+
+ ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
+ mode->flags & DRM_MODE_FLAG_INTERLACE,
+ out_pixel_fmt, mode->hdisplay);
if (ret) {
dev_err(ipu_crtc->dev,
"initializing display controller failed with %d\n",
@@ -237,6 +240,18 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct videomode vm;
+ int ret;
+
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+
+ ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
+ if (ret)
+ return false;
+
+ drm_display_mode_from_videomode(&vm, adjusted_mode);
+
return true;
}
@@ -275,7 +290,7 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
ipu_crtc->newfb = NULL;
}
-static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, u32 encoder_type,
+static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
u32 pixfmt, int hsync_pin, int vsync_pin)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
@@ -284,19 +299,6 @@ static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, u32 encoder_type,
ipu_crtc->di_hsync_pin = hsync_pin;
ipu_crtc->di_vsync_pin = vsync_pin;
- switch (encoder_type) {
- case DRM_MODE_ENCODER_DAC:
- case DRM_MODE_ENCODER_TVDAC:
- case DRM_MODE_ENCODER_LVDS:
- ipu_crtc->di_clkflags = IPU_DI_CLKMODE_SYNC |
- IPU_DI_CLKMODE_EXT;
- break;
- case DRM_MODE_ENCODER_TMDS:
- case DRM_MODE_ENCODER_NONE:
- ipu_crtc->di_clkflags = 0;
- break;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 796c3c1c170a..5e83e007080f 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -130,8 +130,8 @@ static void imx_pd_encoder_commit(struct drm_encoder *encoder)
}
static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
{
}
@@ -257,6 +257,8 @@ static void imx_pd_unbind(struct device *dev, struct device *master,
imxpd->encoder.funcs->destroy(&imxpd->encoder);
imxpd->connector.funcs->destroy(&imxpd->connector);
+
+ kfree(imxpd->edid);
}
static const struct component_ops imx_pd_ops = {
@@ -272,6 +274,7 @@ static int imx_pd_probe(struct platform_device *pdev)
static int imx_pd_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &imx_pd_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 4415af3666ab..c36b8304042b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -303,14 +303,22 @@ int mgag200_fbdev_init(struct mga_device *mdev)
if (ret)
return ret;
- drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(mdev->dev);
- drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
+ ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
+ if (ret)
+ goto fini;
return 0;
+
+fini:
+ drm_fb_helper_fini(&mfbdev->helper);
+ return ret;
}
void mgag200_fbdev_fini(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 5b2a1ff95d3d..bacbbb70f679 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,6 +3,7 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ depends on OF && COMMON_CLK
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 143d988f8add..674a132fd76e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,7 +1,4 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
- ccflags-y += -Werror
-endif
msm-y := \
adreno/adreno_device.o \
@@ -16,6 +13,12 @@ msm-y := \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
+ edp/edp.o \
+ edp/edp_aux.o \
+ edp/edp_bridge.o \
+ edp/edp_connector.o \
+ edp/edp_ctrl.o \
+ edp/edp_phy.o \
mdp/mdp_format.o \
mdp/mdp_kms.o \
mdp/mdp4/mdp4_crtc.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 22882cc0a573..edc845fffdf4 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 109e9a263daf..e91a739452d7 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -58,111 +58,130 @@ enum a3xx_cache_opcode {
};
enum a3xx_vtx_fmt {
- VFMT_FLOAT_32 = 0,
- VFMT_FLOAT_32_32 = 1,
- VFMT_FLOAT_32_32_32 = 2,
- VFMT_FLOAT_32_32_32_32 = 3,
- VFMT_FLOAT_16 = 4,
- VFMT_FLOAT_16_16 = 5,
- VFMT_FLOAT_16_16_16 = 6,
- VFMT_FLOAT_16_16_16_16 = 7,
- VFMT_FIXED_32 = 8,
- VFMT_FIXED_32_32 = 9,
- VFMT_FIXED_32_32_32 = 10,
- VFMT_FIXED_32_32_32_32 = 11,
- VFMT_SHORT_16 = 16,
- VFMT_SHORT_16_16 = 17,
- VFMT_SHORT_16_16_16 = 18,
- VFMT_SHORT_16_16_16_16 = 19,
- VFMT_USHORT_16 = 20,
- VFMT_USHORT_16_16 = 21,
- VFMT_USHORT_16_16_16 = 22,
- VFMT_USHORT_16_16_16_16 = 23,
- VFMT_NORM_SHORT_16 = 24,
- VFMT_NORM_SHORT_16_16 = 25,
- VFMT_NORM_SHORT_16_16_16 = 26,
- VFMT_NORM_SHORT_16_16_16_16 = 27,
- VFMT_NORM_USHORT_16 = 28,
- VFMT_NORM_USHORT_16_16 = 29,
- VFMT_NORM_USHORT_16_16_16 = 30,
- VFMT_NORM_USHORT_16_16_16_16 = 31,
- VFMT_UINT_32 = 32,
- VFMT_UINT_32_32 = 33,
- VFMT_UINT_32_32_32 = 34,
- VFMT_UINT_32_32_32_32 = 35,
- VFMT_INT_32 = 36,
- VFMT_INT_32_32 = 37,
- VFMT_INT_32_32_32 = 38,
- VFMT_INT_32_32_32_32 = 39,
- VFMT_UBYTE_8 = 40,
- VFMT_UBYTE_8_8 = 41,
- VFMT_UBYTE_8_8_8 = 42,
- VFMT_UBYTE_8_8_8_8 = 43,
- VFMT_NORM_UBYTE_8 = 44,
- VFMT_NORM_UBYTE_8_8 = 45,
- VFMT_NORM_UBYTE_8_8_8 = 46,
- VFMT_NORM_UBYTE_8_8_8_8 = 47,
- VFMT_BYTE_8 = 48,
- VFMT_BYTE_8_8 = 49,
- VFMT_BYTE_8_8_8 = 50,
- VFMT_BYTE_8_8_8_8 = 51,
- VFMT_NORM_BYTE_8 = 52,
- VFMT_NORM_BYTE_8_8 = 53,
- VFMT_NORM_BYTE_8_8_8 = 54,
- VFMT_NORM_BYTE_8_8_8_8 = 55,
- VFMT_UINT_10_10_10_2 = 60,
- VFMT_NORM_UINT_10_10_10_2 = 61,
- VFMT_INT_10_10_10_2 = 62,
- VFMT_NORM_INT_10_10_10_2 = 63,
+ VFMT_32_FLOAT = 0,
+ VFMT_32_32_FLOAT = 1,
+ VFMT_32_32_32_FLOAT = 2,
+ VFMT_32_32_32_32_FLOAT = 3,
+ VFMT_16_FLOAT = 4,
+ VFMT_16_16_FLOAT = 5,
+ VFMT_16_16_16_FLOAT = 6,
+ VFMT_16_16_16_16_FLOAT = 7,
+ VFMT_32_FIXED = 8,
+ VFMT_32_32_FIXED = 9,
+ VFMT_32_32_32_FIXED = 10,
+ VFMT_32_32_32_32_FIXED = 11,
+ VFMT_16_SINT = 16,
+ VFMT_16_16_SINT = 17,
+ VFMT_16_16_16_SINT = 18,
+ VFMT_16_16_16_16_SINT = 19,
+ VFMT_16_UINT = 20,
+ VFMT_16_16_UINT = 21,
+ VFMT_16_16_16_UINT = 22,
+ VFMT_16_16_16_16_UINT = 23,
+ VFMT_16_SNORM = 24,
+ VFMT_16_16_SNORM = 25,
+ VFMT_16_16_16_SNORM = 26,
+ VFMT_16_16_16_16_SNORM = 27,
+ VFMT_16_UNORM = 28,
+ VFMT_16_16_UNORM = 29,
+ VFMT_16_16_16_UNORM = 30,
+ VFMT_16_16_16_16_UNORM = 31,
+ VFMT_32_UINT = 32,
+ VFMT_32_32_UINT = 33,
+ VFMT_32_32_32_UINT = 34,
+ VFMT_32_32_32_32_UINT = 35,
+ VFMT_32_SINT = 36,
+ VFMT_32_32_SINT = 37,
+ VFMT_32_32_32_SINT = 38,
+ VFMT_32_32_32_32_SINT = 39,
+ VFMT_8_UINT = 40,
+ VFMT_8_8_UINT = 41,
+ VFMT_8_8_8_UINT = 42,
+ VFMT_8_8_8_8_UINT = 43,
+ VFMT_8_UNORM = 44,
+ VFMT_8_8_UNORM = 45,
+ VFMT_8_8_8_UNORM = 46,
+ VFMT_8_8_8_8_UNORM = 47,
+ VFMT_8_SINT = 48,
+ VFMT_8_8_SINT = 49,
+ VFMT_8_8_8_SINT = 50,
+ VFMT_8_8_8_8_SINT = 51,
+ VFMT_8_SNORM = 52,
+ VFMT_8_8_SNORM = 53,
+ VFMT_8_8_8_SNORM = 54,
+ VFMT_8_8_8_8_SNORM = 55,
+ VFMT_10_10_10_2_UINT = 60,
+ VFMT_10_10_10_2_UNORM = 61,
+ VFMT_10_10_10_2_SINT = 62,
+ VFMT_10_10_10_2_SNORM = 63,
};
enum a3xx_tex_fmt {
- TFMT_NORM_USHORT_565 = 4,
- TFMT_NORM_USHORT_5551 = 6,
- TFMT_NORM_USHORT_4444 = 7,
- TFMT_NORM_USHORT_Z16 = 9,
- TFMT_NORM_UINT_X8Z24 = 10,
- TFMT_FLOAT_Z32 = 11,
- TFMT_NORM_UINT_NV12_UV_TILED = 17,
- TFMT_NORM_UINT_NV12_Y_TILED = 19,
- TFMT_NORM_UINT_NV12_UV = 21,
- TFMT_NORM_UINT_NV12_Y = 23,
- TFMT_NORM_UINT_I420_Y = 24,
- TFMT_NORM_UINT_I420_U = 26,
- TFMT_NORM_UINT_I420_V = 27,
- TFMT_NORM_UINT_2_10_10_10 = 41,
- TFMT_FLOAT_9_9_9_E5 = 42,
- TFMT_FLOAT_10_11_11 = 43,
- TFMT_NORM_UINT_A8 = 44,
- TFMT_NORM_UINT_L8_A8 = 47,
- TFMT_NORM_UINT_8 = 48,
- TFMT_NORM_UINT_8_8 = 49,
- TFMT_NORM_UINT_8_8_8 = 50,
- TFMT_NORM_UINT_8_8_8_8 = 51,
- TFMT_NORM_SINT_8_8 = 53,
- TFMT_NORM_SINT_8_8_8_8 = 55,
- TFMT_UINT_8_8 = 57,
- TFMT_UINT_8_8_8_8 = 59,
- TFMT_SINT_8_8 = 61,
- TFMT_SINT_8_8_8_8 = 63,
- TFMT_FLOAT_16 = 64,
- TFMT_FLOAT_16_16 = 65,
- TFMT_FLOAT_16_16_16_16 = 67,
- TFMT_UINT_16 = 68,
- TFMT_UINT_16_16 = 69,
- TFMT_UINT_16_16_16_16 = 71,
- TFMT_SINT_16 = 72,
- TFMT_SINT_16_16 = 73,
- TFMT_SINT_16_16_16_16 = 75,
- TFMT_FLOAT_32 = 84,
- TFMT_FLOAT_32_32 = 85,
- TFMT_FLOAT_32_32_32_32 = 87,
- TFMT_UINT_32 = 88,
- TFMT_UINT_32_32 = 89,
- TFMT_UINT_32_32_32_32 = 91,
- TFMT_SINT_32 = 92,
- TFMT_SINT_32_32 = 93,
- TFMT_SINT_32_32_32_32 = 95,
+ TFMT_5_6_5_UNORM = 4,
+ TFMT_5_5_5_1_UNORM = 5,
+ TFMT_4_4_4_4_UNORM = 7,
+ TFMT_Z16_UNORM = 9,
+ TFMT_X8Z24_UNORM = 10,
+ TFMT_Z32_FLOAT = 11,
+ TFMT_NV12_UV_TILED = 17,
+ TFMT_NV12_Y_TILED = 19,
+ TFMT_NV12_UV = 21,
+ TFMT_NV12_Y = 23,
+ TFMT_I420_Y = 24,
+ TFMT_I420_U = 26,
+ TFMT_I420_V = 27,
+ TFMT_DXT1 = 36,
+ TFMT_DXT3 = 37,
+ TFMT_DXT5 = 38,
+ TFMT_10_10_10_2_UNORM = 41,
+ TFMT_9_9_9_E5_FLOAT = 42,
+ TFMT_11_11_10_FLOAT = 43,
+ TFMT_A8_UNORM = 44,
+ TFMT_L8_A8_UNORM = 47,
+ TFMT_8_UNORM = 48,
+ TFMT_8_8_UNORM = 49,
+ TFMT_8_8_8_UNORM = 50,
+ TFMT_8_8_8_8_UNORM = 51,
+ TFMT_8_SNORM = 52,
+ TFMT_8_8_SNORM = 53,
+ TFMT_8_8_8_SNORM = 54,
+ TFMT_8_8_8_8_SNORM = 55,
+ TFMT_8_UINT = 56,
+ TFMT_8_8_UINT = 57,
+ TFMT_8_8_8_UINT = 58,
+ TFMT_8_8_8_8_UINT = 59,
+ TFMT_8_SINT = 60,
+ TFMT_8_8_SINT = 61,
+ TFMT_8_8_8_SINT = 62,
+ TFMT_8_8_8_8_SINT = 63,
+ TFMT_16_FLOAT = 64,
+ TFMT_16_16_FLOAT = 65,
+ TFMT_16_16_16_16_FLOAT = 67,
+ TFMT_16_UINT = 68,
+ TFMT_16_16_UINT = 69,
+ TFMT_16_16_16_16_UINT = 71,
+ TFMT_16_SINT = 72,
+ TFMT_16_16_SINT = 73,
+ TFMT_16_16_16_16_SINT = 75,
+ TFMT_16_UNORM = 76,
+ TFMT_16_16_UNORM = 77,
+ TFMT_16_16_16_16_UNORM = 79,
+ TFMT_16_SNORM = 80,
+ TFMT_16_16_SNORM = 81,
+ TFMT_16_16_16_16_SNORM = 83,
+ TFMT_32_FLOAT = 84,
+ TFMT_32_32_FLOAT = 85,
+ TFMT_32_32_32_32_FLOAT = 87,
+ TFMT_32_UINT = 88,
+ TFMT_32_32_UINT = 89,
+ TFMT_32_32_32_32_UINT = 91,
+ TFMT_32_SINT = 92,
+ TFMT_32_32_SINT = 93,
+ TFMT_32_32_32_32_SINT = 95,
+ TFMT_RGTC2_SNORM = 112,
+ TFMT_RGTC2_UNORM = 113,
+ TFMT_RGTC1_SNORM = 114,
+ TFMT_RGTC1_UNORM = 115,
};
enum a3xx_tex_fetchsize {
@@ -180,9 +199,11 @@ enum a3xx_color_fmt {
RB_R4G4B4A4_UNORM = 3,
RB_R8G8B8_UNORM = 4,
RB_R8G8B8A8_UNORM = 8,
+ RB_R8G8B8A8_SNORM = 9,
RB_R8G8B8A8_UINT = 10,
RB_R8G8B8A8_SINT = 11,
RB_R8G8_UNORM = 12,
+ RB_R8G8_SNORM = 13,
RB_R8_UINT = 14,
RB_R8_SINT = 15,
RB_R10G10B10A2_UNORM = 16,
@@ -258,6 +279,14 @@ enum a3xx_tex_clamp {
A3XX_TEX_MIRROR_CLAMP = 4,
};
+enum a3xx_tex_aniso {
+ A3XX_TEX_ANISO_1 = 0,
+ A3XX_TEX_ANISO_2 = 1,
+ A3XX_TEX_ANISO_4 = 2,
+ A3XX_TEX_ANISO_8 = 3,
+ A3XX_TEX_ANISO_16 = 4,
+};
+
enum a3xx_tex_swiz {
A3XX_TEX_X = 0,
A3XX_TEX_Y = 1,
@@ -1563,12 +1592,13 @@ static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
{
return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
}
-#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0000ff80
#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
{
return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
}
+#define A3XX_VFD_FETCH_INSTR_0_INSTANCED 0x00010000
#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
@@ -2509,6 +2539,12 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
{
return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
}
+#define A3XX_TEX_SAMP_0_ANISO__MASK 0x00038000
+#define A3XX_TEX_SAMP_0_ANISO__SHIFT 15
+static inline uint32_t A3XX_TEX_SAMP_0_ANISO(enum a3xx_tex_aniso val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_ANISO__SHIFT) & A3XX_TEX_SAMP_0_ANISO__MASK;
+}
#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000
#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20
static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 5a24c416d2dd..755723fd8ba5 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -63,72 +63,82 @@ enum a4xx_rb_blend_opcode {
};
enum a4xx_vtx_fmt {
- VFMT4_FLOAT_32 = 1,
- VFMT4_FLOAT_32_32 = 2,
- VFMT4_FLOAT_32_32_32 = 3,
- VFMT4_FLOAT_32_32_32_32 = 4,
- VFMT4_FLOAT_16 = 5,
- VFMT4_FLOAT_16_16 = 6,
- VFMT4_FLOAT_16_16_16 = 7,
- VFMT4_FLOAT_16_16_16_16 = 8,
- VFMT4_FIXED_32 = 9,
- VFMT4_FIXED_32_32 = 10,
- VFMT4_FIXED_32_32_32 = 11,
- VFMT4_FIXED_32_32_32_32 = 12,
- VFMT4_SHORT_16 = 16,
- VFMT4_SHORT_16_16 = 17,
- VFMT4_SHORT_16_16_16 = 18,
- VFMT4_SHORT_16_16_16_16 = 19,
- VFMT4_USHORT_16 = 20,
- VFMT4_USHORT_16_16 = 21,
- VFMT4_USHORT_16_16_16 = 22,
- VFMT4_USHORT_16_16_16_16 = 23,
- VFMT4_NORM_SHORT_16 = 24,
- VFMT4_NORM_SHORT_16_16 = 25,
- VFMT4_NORM_SHORT_16_16_16 = 26,
- VFMT4_NORM_SHORT_16_16_16_16 = 27,
- VFMT4_NORM_USHORT_16 = 28,
- VFMT4_NORM_USHORT_16_16 = 29,
- VFMT4_NORM_USHORT_16_16_16 = 30,
- VFMT4_NORM_USHORT_16_16_16_16 = 31,
- VFMT4_UBYTE_8 = 40,
- VFMT4_UBYTE_8_8 = 41,
- VFMT4_UBYTE_8_8_8 = 42,
- VFMT4_UBYTE_8_8_8_8 = 43,
- VFMT4_NORM_UBYTE_8 = 44,
- VFMT4_NORM_UBYTE_8_8 = 45,
- VFMT4_NORM_UBYTE_8_8_8 = 46,
- VFMT4_NORM_UBYTE_8_8_8_8 = 47,
- VFMT4_BYTE_8 = 48,
- VFMT4_BYTE_8_8 = 49,
- VFMT4_BYTE_8_8_8 = 50,
- VFMT4_BYTE_8_8_8_8 = 51,
- VFMT4_NORM_BYTE_8 = 52,
- VFMT4_NORM_BYTE_8_8 = 53,
- VFMT4_NORM_BYTE_8_8_8 = 54,
- VFMT4_NORM_BYTE_8_8_8_8 = 55,
- VFMT4_UINT_10_10_10_2 = 60,
- VFMT4_NORM_UINT_10_10_10_2 = 61,
- VFMT4_INT_10_10_10_2 = 62,
- VFMT4_NORM_INT_10_10_10_2 = 63,
+ VFMT4_32_FLOAT = 1,
+ VFMT4_32_32_FLOAT = 2,
+ VFMT4_32_32_32_FLOAT = 3,
+ VFMT4_32_32_32_32_FLOAT = 4,
+ VFMT4_16_FLOAT = 5,
+ VFMT4_16_16_FLOAT = 6,
+ VFMT4_16_16_16_FLOAT = 7,
+ VFMT4_16_16_16_16_FLOAT = 8,
+ VFMT4_32_FIXED = 9,
+ VFMT4_32_32_FIXED = 10,
+ VFMT4_32_32_32_FIXED = 11,
+ VFMT4_32_32_32_32_FIXED = 12,
+ VFMT4_16_SINT = 16,
+ VFMT4_16_16_SINT = 17,
+ VFMT4_16_16_16_SINT = 18,
+ VFMT4_16_16_16_16_SINT = 19,
+ VFMT4_16_UINT = 20,
+ VFMT4_16_16_UINT = 21,
+ VFMT4_16_16_16_UINT = 22,
+ VFMT4_16_16_16_16_UINT = 23,
+ VFMT4_16_SNORM = 24,
+ VFMT4_16_16_SNORM = 25,
+ VFMT4_16_16_16_SNORM = 26,
+ VFMT4_16_16_16_16_SNORM = 27,
+ VFMT4_16_UNORM = 28,
+ VFMT4_16_16_UNORM = 29,
+ VFMT4_16_16_16_UNORM = 30,
+ VFMT4_16_16_16_16_UNORM = 31,
+ VFMT4_32_32_SINT = 37,
+ VFMT4_8_UINT = 40,
+ VFMT4_8_8_UINT = 41,
+ VFMT4_8_8_8_UINT = 42,
+ VFMT4_8_8_8_8_UINT = 43,
+ VFMT4_8_UNORM = 44,
+ VFMT4_8_8_UNORM = 45,
+ VFMT4_8_8_8_UNORM = 46,
+ VFMT4_8_8_8_8_UNORM = 47,
+ VFMT4_8_SINT = 48,
+ VFMT4_8_8_SINT = 49,
+ VFMT4_8_8_8_SINT = 50,
+ VFMT4_8_8_8_8_SINT = 51,
+ VFMT4_8_SNORM = 52,
+ VFMT4_8_8_SNORM = 53,
+ VFMT4_8_8_8_SNORM = 54,
+ VFMT4_8_8_8_8_SNORM = 55,
+ VFMT4_10_10_10_2_UINT = 60,
+ VFMT4_10_10_10_2_UNORM = 61,
+ VFMT4_10_10_10_2_SINT = 62,
+ VFMT4_10_10_10_2_SNORM = 63,
};
enum a4xx_tex_fmt {
- TFMT4_NORM_USHORT_565 = 11,
- TFMT4_NORM_USHORT_5551 = 10,
- TFMT4_NORM_USHORT_4444 = 8,
- TFMT4_NORM_UINT_X8Z24 = 71,
- TFMT4_NORM_UINT_2_10_10_10 = 33,
- TFMT4_NORM_UINT_A8 = 3,
- TFMT4_NORM_UINT_L8_A8 = 13,
- TFMT4_NORM_UINT_8 = 4,
- TFMT4_NORM_UINT_8_8_8_8 = 28,
- TFMT4_FLOAT_16 = 20,
- TFMT4_FLOAT_16_16 = 40,
- TFMT4_FLOAT_16_16_16_16 = 53,
- TFMT4_FLOAT_32 = 43,
- TFMT4_FLOAT_32_32 = 56,
- TFMT4_FLOAT_32_32_32_32 = 63,
+ TFMT4_5_6_5_UNORM = 11,
+ TFMT4_5_5_5_1_UNORM = 10,
+ TFMT4_4_4_4_4_UNORM = 8,
+ TFMT4_X8Z24_UNORM = 71,
+ TFMT4_10_10_10_2_UNORM = 33,
+ TFMT4_A8_UNORM = 3,
+ TFMT4_L8_A8_UNORM = 13,
+ TFMT4_8_UNORM = 4,
+ TFMT4_8_8_UNORM = 14,
+ TFMT4_8_8_8_8_UNORM = 28,
+ TFMT4_16_FLOAT = 20,
+ TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_16_16_FLOAT = 53,
+ TFMT4_32_FLOAT = 43,
+ TFMT4_32_32_FLOAT = 56,
+ TFMT4_32_32_32_32_FLOAT = 63,
+};
+
+enum a4xx_tex_fetchsize {
+ TFETCH4_1_BYTE = 0,
+ TFETCH4_2_BYTE = 1,
+ TFETCH4_4_BYTE = 2,
+ TFETCH4_8_BYTE = 3,
+ TFETCH4_16_BYTE = 4,
};
enum a4xx_depth_format {
@@ -264,14 +274,19 @@ static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
}
-#define REG_A4XX_RB_MSAA_CONTROL2 0x000020a3
-#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
-#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT 7
-static inline uint32_t A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES(uint32_t val)
+#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3
+#define A4XX_RB_RENDER_CONTROL2_XCOORD 0x00000001
+#define A4XX_RB_RENDER_CONTROL2_YCOORD 0x00000002
+#define A4XX_RB_RENDER_CONTROL2_ZCOORD 0x00000004
+#define A4XX_RB_RENDER_CONTROL2_WCOORD 0x00000008
+#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020
+#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val)
{
- return ((val) << A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK;
+ return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK;
}
-#define A4XX_RB_MSAA_CONTROL2_VARYING 0x00001000
+#define A4XX_RB_RENDER_CONTROL2_VARYING 0x00001000
static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
@@ -362,7 +377,69 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
}
+#define REG_A4XX_RB_BLEND_RED 0x000020f3
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x00007fff
+#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN 0x000020f4
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x00007fff
+#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE 0x000020f5
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x00007fff
+#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x00007fff
+#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
@@ -372,7 +449,7 @@ static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare
}
#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
-#define A4XX_RB_FS_OUTPUT_ENABLE_COLOR_PIPE 0x00000001
+#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND 0x00000001
#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
@@ -416,11 +493,11 @@ static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
}
#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
-#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
-#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
+#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xffffffe0
+#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5
static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
{
- return ((val >> 4) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
+ return ((val >> 5) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
}
#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
@@ -508,7 +585,7 @@ static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
#define A4XX_RB_DEPTH_PITCH__SHIFT 0
static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
{
- return ((val >> 4) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
+ return ((val >> 5) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
}
#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
@@ -516,7 +593,7 @@ static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
{
- return ((val >> 4) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
+ return ((val >> 5) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
}
#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
@@ -630,7 +707,11 @@ static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
}
-#define REG_A4XX_RB_VPORT_Z_CLAMP_MAX_15 0x0000213f
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP(uint32_t i0) { return 0x00002120 + 0x2*i0; }
+
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; }
+
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x00002121 + 0x2*i0; }
#define REG_A4XX_RBBM_HW_VERSION 0x00000000
@@ -1121,7 +1202,9 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
}
+#define A4XX_SP_FS_CTRL_REG1_FACENESS 0x00080000
#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000
+#define A4XX_SP_FS_CTRL_REG1_FRAGCOORD 0x00200000
#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea
#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
@@ -1384,6 +1467,12 @@ static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
#define REG_A4XX_VFD_CONTROL_2 0x00002202
#define REG_A4XX_VFD_CONTROL_3 0x00002203
+#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK 0x0000ff00
+#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT 8
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
+}
#define REG_A4XX_VFD_CONTROL_4 0x00002204
@@ -1405,12 +1494,7 @@ static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
}
#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000
-#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
-#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
-static inline uint32_t A4XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
-{
- return ((val) << A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
-}
+#define A4XX_VFD_FETCH_INSTR_0_INSTANCED 0x00100000
static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
@@ -1423,6 +1507,12 @@ static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
}
static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK 0x000001ff
+#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK;
+}
static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
@@ -1446,6 +1536,7 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
{
return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
}
+#define A4XX_VFD_DECODE_INSTR_INT 0x00100000
#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
@@ -1585,7 +1676,47 @@ static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
}
-#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
+#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
+static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
+
+#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c
#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
@@ -1647,46 +1778,34 @@ static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
}
-#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
-#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
-#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
-static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_BR 0x0000209e
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_X(uint32_t val)
{
- return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK;
}
-
-#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
-#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
-#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
-#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
-#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
-#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
-static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y(uint32_t val)
{
- return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK;
}
-#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
-#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
-#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
-#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
-#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
-static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
-{
- return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
-}
-#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
-#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
-static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_X(uint32_t val)
{
- return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK;
}
-#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
-#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
-#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
-static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
{
- return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK;
}
#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80
@@ -1742,6 +1861,12 @@ static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize
}
#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK 0x00ff0000
+#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT 16
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK;
+}
#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
@@ -1751,6 +1876,12 @@ static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
{
return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
}
+#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000003fc
+#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
@@ -1965,15 +2096,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_UNKNOWN_20F2 0x000020f2
-#define REG_A4XX_UNKNOWN_20F3 0x000020f3
-
-#define REG_A4XX_UNKNOWN_20F4 0x000020f4
-
-#define REG_A4XX_UNKNOWN_20F5 0x000020f5
-
-#define REG_A4XX_UNKNOWN_20F6 0x000020f6
-
#define REG_A4XX_UNKNOWN_20F7 0x000020f7
+#define A4XX_UNKNOWN_20F7__MASK 0xffffffff
+#define A4XX_UNKNOWN_20F7__SHIFT 0
+static inline uint32_t A4XX_UNKNOWN_20F7(float val)
+{
+ return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK;
+}
#define REG_A4XX_UNKNOWN_2152 0x00002152
@@ -2000,6 +2129,7 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_UNKNOWN_23A0 0x000023a0
#define REG_A4XX_TEX_SAMP_0 0x00000000
+#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1
static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
@@ -2038,17 +2168,19 @@ static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val
{
return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
}
+#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
{
- return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
+ return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
}
#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
{
- return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
+ return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
}
#define REG_A4XX_TEX_CONST_0 0x00000000
@@ -2077,6 +2209,12 @@ static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
{
return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
}
+#define A4XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A4XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A4XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_0_MIPLVLS__SHIFT) & A4XX_TEX_CONST_0_MIPLVLS__MASK;
+}
#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000
#define A4XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
@@ -2105,6 +2243,12 @@ static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
}
#define REG_A4XX_TEX_CONST_2 0x00000002
+#define A4XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A4XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_2_FETCHSIZE(enum a4xx_tex_fetchsize val)
+{
+ return ((val) << A4XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A4XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
@@ -2119,19 +2263,31 @@ static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
}
#define REG_A4XX_TEX_CONST_3 0x00000003
-#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x0000000f
+#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff
#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
{
return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
}
+#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000
+#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18
+static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_3_DEPTH__SHIFT) & A4XX_TEX_CONST_3_DEPTH__MASK;
+}
#define REG_A4XX_TEX_CONST_4 0x00000004
-#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffff
-#define A4XX_TEX_CONST_4_BASE__SHIFT 0
+#define A4XX_TEX_CONST_4_LAYERSZ__MASK 0x0000000f
+#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK;
+}
+#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0
+#define A4XX_TEX_CONST_4_BASE__SHIFT 5
static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
{
- return ((val) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
+ return ((val >> 5) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
}
#define REG_A4XX_TEX_CONST_5 0x00000005
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index a4b33af9338d..8531beb982e7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 6a75cee94d81..6ffc4f6c8af1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -172,7 +172,9 @@ enum adreno_pm4_type3_packets {
CP_DRAW_INDIRECT = 40,
CP_DRAW_INDX_INDIRECT = 41,
CP_DRAW_AUTO = 36,
+ CP_UNKNOWN_19 = 25,
CP_UNKNOWN_1A = 26,
+ CP_UNKNOWN_4E = 78,
CP_WIDE_REG_WRITE = 116,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
@@ -203,6 +205,12 @@ enum adreno_state_src {
SS_INDIRECT = 4,
};
+enum a4xx_index_size {
+ INDEX4_SIZE_8_BIT = 0,
+ INDEX4_SIZE_16_BIT = 1,
+ INDEX4_SIZE_32_BIT = 2,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -374,29 +382,20 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
{
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000700
-#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
-}
-#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000800
-#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 11
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size val)
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
{
return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
-#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
-#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK 0xffff0000
-#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT 16
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK;
-}
#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
+#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK;
+}
#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 448438b759b4..abf1bba520bf 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index c102a7f074ac..695f99d4bec2 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index a900134bdf33..50ff9851d73d 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
new file mode 100644
index 000000000000..0940e84b2821
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_irq.h>
+#include "edp.h"
+
+static irqreturn_t edp_irq(int irq, void *dev_id)
+{
+ struct msm_edp *edp = dev_id;
+
+ /* Process eDP irq */
+ return msm_edp_ctrl_irq(edp->ctrl);
+}
+
+static void edp_destroy(struct platform_device *pdev)
+{
+ struct msm_edp *edp = platform_get_drvdata(pdev);
+
+ if (!edp)
+ return;
+
+ if (edp->ctrl) {
+ msm_edp_ctrl_destroy(edp->ctrl);
+ edp->ctrl = NULL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+/* construct eDP at bind/probe time, grab all the resources. */
+static struct msm_edp *edp_init(struct platform_device *pdev)
+{
+ struct msm_edp *edp = NULL;
+ int ret;
+
+ if (!pdev) {
+ pr_err("no eDP device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ edp = devm_kzalloc(&pdev->dev, sizeof(*edp), GFP_KERNEL);
+ if (!edp) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ DBG("eDP probed=%p", edp);
+
+ edp->pdev = pdev;
+ platform_set_drvdata(pdev, edp);
+
+ ret = msm_edp_ctrl_init(edp);
+ if (ret)
+ goto fail;
+
+ return edp;
+
+fail:
+ if (edp)
+ edp_destroy(pdev);
+
+ return ERR_PTR(ret);
+}
+
+static int edp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ struct msm_edp *edp;
+
+ DBG("");
+ edp = edp_init(to_platform_device(dev));
+ if (IS_ERR(edp))
+ return PTR_ERR(edp);
+ priv->edp = edp;
+
+ return 0;
+}
+
+static void edp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+
+ DBG("");
+ if (priv->edp) {
+ edp_destroy(to_platform_device(dev));
+ priv->edp = NULL;
+ }
+}
+
+static const struct component_ops edp_ops = {
+ .bind = edp_bind,
+ .unbind = edp_unbind,
+};
+
+static int edp_dev_probe(struct platform_device *pdev)
+{
+ DBG("");
+ return component_add(&pdev->dev, &edp_ops);
+}
+
+static int edp_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_del(&pdev->dev, &edp_ops);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss-edp" },
+ {}
+};
+
+static struct platform_driver edp_driver = {
+ .probe = edp_dev_probe,
+ .remove = edp_dev_remove,
+ .driver = {
+ .name = "msm_edp",
+ .of_match_table = dt_match,
+ },
+};
+
+void __init msm_edp_register(void)
+{
+ DBG("");
+ platform_driver_register(&edp_driver);
+}
+
+void __exit msm_edp_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&edp_driver);
+}
+
+/* Second part of initialization, the drm/kms level modeset_init */
+int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct platform_device *pdev = edp->pdev;
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret;
+
+ edp->encoder = encoder;
+ edp->dev = dev;
+
+ edp->bridge = msm_edp_bridge_init(edp);
+ if (IS_ERR(edp->bridge)) {
+ ret = PTR_ERR(edp->bridge);
+ dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
+ edp->bridge = NULL;
+ goto fail;
+ }
+
+ edp->connector = msm_edp_connector_init(edp);
+ if (IS_ERR(edp->connector)) {
+ ret = PTR_ERR(edp->connector);
+ dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
+ edp->connector = NULL;
+ goto fail;
+ }
+
+ edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (edp->irq < 0) {
+ ret = edp->irq;
+ dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
+ goto fail;
+ }
+
+ ret = devm_request_irq(&pdev->dev, edp->irq,
+ edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "edp_isr", edp);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ edp->irq, ret);
+ goto fail;
+ }
+
+ encoder->bridge = edp->bridge;
+
+ priv->bridges[priv->num_bridges++] = edp->bridge;
+ priv->connectors[priv->num_connectors++] = edp->connector;
+
+ return 0;
+
+fail:
+ /* bridge/connector are normally destroyed by drm */
+ if (edp->bridge) {
+ edp_bridge_destroy(edp->bridge);
+ edp->bridge = NULL;
+ }
+ if (edp->connector) {
+ edp->connector->funcs->destroy(edp->connector);
+ edp->connector = NULL;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
new file mode 100644
index 000000000000..ba5bedde5241
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __EDP_CONNECTOR_H__
+#define __EDP_CONNECTOR_H__
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "drm_crtc.h"
+#include "drm_dp_helper.h"
+#include "msm_drv.h"
+
+#define edp_read(offset) msm_readl((offset))
+#define edp_write(offset, data) msm_writel((data), (offset))
+
+struct edp_ctrl;
+struct edp_aux;
+struct edp_phy;
+
+struct msm_edp {
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ /* the encoder we are hooked to (outside of eDP block) */
+ struct drm_encoder *encoder;
+
+ struct edp_ctrl *ctrl;
+
+ int irq;
+};
+
+/* eDP bridge */
+struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp);
+void edp_bridge_destroy(struct drm_bridge *bridge);
+
+/* eDP connector */
+struct drm_connector *msm_edp_connector_init(struct msm_edp *edp);
+
+/* AUX */
+void *msm_edp_aux_init(struct device *dev, void __iomem *regbase,
+ struct drm_dp_aux **drm_aux);
+void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux);
+irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr);
+void msm_edp_aux_ctrl(struct edp_aux *aux, int enable);
+
+/* Phy */
+bool msm_edp_phy_ready(struct edp_phy *phy);
+void msm_edp_phy_ctrl(struct edp_phy *phy, int enable);
+void msm_edp_phy_vm_pe_init(struct edp_phy *phy);
+void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1);
+void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane);
+void *msm_edp_phy_init(struct device *dev, void __iomem *regbase);
+
+/* Ctrl */
+irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl);
+void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on);
+int msm_edp_ctrl_init(struct msm_edp *edp);
+void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl);
+bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl);
+int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
+ struct drm_connector *connector, struct edid **edid);
+int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info);
+/* @pixel_rate is in kHz */
+bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
+ u32 pixel_rate, u32 *pm, u32 *pn);
+
+#endif /* __EDP_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
new file mode 100644
index 000000000000..a29f1df15143
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -0,0 +1,292 @@
+#ifndef EDP_XML
+#define EDP_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
+
+Copyright (C) 2013-2014 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum edp_color_depth {
+ EDP_6BIT = 0,
+ EDP_8BIT = 1,
+ EDP_10BIT = 2,
+ EDP_12BIT = 3,
+ EDP_16BIT = 4,
+};
+
+enum edp_component_format {
+ EDP_RGB = 0,
+ EDP_YUV422 = 1,
+ EDP_YUV444 = 2,
+};
+
+#define REG_EDP_MAINLINK_CTRL 0x00000004
+#define EDP_MAINLINK_CTRL_ENABLE 0x00000001
+#define EDP_MAINLINK_CTRL_RESET 0x00000002
+
+#define REG_EDP_STATE_CTRL 0x00000008
+#define EDP_STATE_CTRL_TRAIN_PATTERN_1 0x00000001
+#define EDP_STATE_CTRL_TRAIN_PATTERN_2 0x00000002
+#define EDP_STATE_CTRL_TRAIN_PATTERN_3 0x00000004
+#define EDP_STATE_CTRL_SYMBOL_ERR_RATE_MEAS 0x00000008
+#define EDP_STATE_CTRL_PRBS7 0x00000010
+#define EDP_STATE_CTRL_CUSTOM_80_BIT_PATTERN 0x00000020
+#define EDP_STATE_CTRL_SEND_VIDEO 0x00000040
+#define EDP_STATE_CTRL_PUSH_IDLE 0x00000080
+
+#define REG_EDP_CONFIGURATION_CTRL 0x0000000c
+#define EDP_CONFIGURATION_CTRL_SYNC_CLK 0x00000001
+#define EDP_CONFIGURATION_CTRL_STATIC_MVID 0x00000002
+#define EDP_CONFIGURATION_CTRL_PROGRESSIVE 0x00000004
+#define EDP_CONFIGURATION_CTRL_LANES__MASK 0x00000030
+#define EDP_CONFIGURATION_CTRL_LANES__SHIFT 4
+static inline uint32_t EDP_CONFIGURATION_CTRL_LANES(uint32_t val)
+{
+ return ((val) << EDP_CONFIGURATION_CTRL_LANES__SHIFT) & EDP_CONFIGURATION_CTRL_LANES__MASK;
+}
+#define EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING 0x00000040
+#define EDP_CONFIGURATION_CTRL_COLOR__MASK 0x00000100
+#define EDP_CONFIGURATION_CTRL_COLOR__SHIFT 8
+static inline uint32_t EDP_CONFIGURATION_CTRL_COLOR(enum edp_color_depth val)
+{
+ return ((val) << EDP_CONFIGURATION_CTRL_COLOR__SHIFT) & EDP_CONFIGURATION_CTRL_COLOR__MASK;
+}
+
+#define REG_EDP_SOFTWARE_MVID 0x00000014
+
+#define REG_EDP_SOFTWARE_NVID 0x00000018
+
+#define REG_EDP_TOTAL_HOR_VER 0x0000001c
+#define EDP_TOTAL_HOR_VER_HORIZ__MASK 0x0000ffff
+#define EDP_TOTAL_HOR_VER_HORIZ__SHIFT 0
+static inline uint32_t EDP_TOTAL_HOR_VER_HORIZ(uint32_t val)
+{
+ return ((val) << EDP_TOTAL_HOR_VER_HORIZ__SHIFT) & EDP_TOTAL_HOR_VER_HORIZ__MASK;
+}
+#define EDP_TOTAL_HOR_VER_VERT__MASK 0xffff0000
+#define EDP_TOTAL_HOR_VER_VERT__SHIFT 16
+static inline uint32_t EDP_TOTAL_HOR_VER_VERT(uint32_t val)
+{
+ return ((val) << EDP_TOTAL_HOR_VER_VERT__SHIFT) & EDP_TOTAL_HOR_VER_VERT__MASK;
+}
+
+#define REG_EDP_START_HOR_VER_FROM_SYNC 0x00000020
+#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK 0x0000ffff
+#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT 0
+static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_HORIZ(uint32_t val)
+{
+ return ((val) << EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK;
+}
+#define EDP_START_HOR_VER_FROM_SYNC_VERT__MASK 0xffff0000
+#define EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT 16
+static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_VERT(uint32_t val)
+{
+ return ((val) << EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_VERT__MASK;
+}
+
+#define REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY 0x00000024
+#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK 0x00007fff
+#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT 0
+static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(uint32_t val)
+{
+ return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK;
+}
+#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC 0x00008000
+#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK 0x7fff0000
+#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT 16
+static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(uint32_t val)
+{
+ return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK;
+}
+#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC 0x80000000
+
+#define REG_EDP_ACTIVE_HOR_VER 0x00000028
+#define EDP_ACTIVE_HOR_VER_HORIZ__MASK 0x0000ffff
+#define EDP_ACTIVE_HOR_VER_HORIZ__SHIFT 0
+static inline uint32_t EDP_ACTIVE_HOR_VER_HORIZ(uint32_t val)
+{
+ return ((val) << EDP_ACTIVE_HOR_VER_HORIZ__SHIFT) & EDP_ACTIVE_HOR_VER_HORIZ__MASK;
+}
+#define EDP_ACTIVE_HOR_VER_VERT__MASK 0xffff0000
+#define EDP_ACTIVE_HOR_VER_VERT__SHIFT 16
+static inline uint32_t EDP_ACTIVE_HOR_VER_VERT(uint32_t val)
+{
+ return ((val) << EDP_ACTIVE_HOR_VER_VERT__SHIFT) & EDP_ACTIVE_HOR_VER_VERT__MASK;
+}
+
+#define REG_EDP_MISC1_MISC0 0x0000002c
+#define EDP_MISC1_MISC0_MISC0__MASK 0x000000ff
+#define EDP_MISC1_MISC0_MISC0__SHIFT 0
+static inline uint32_t EDP_MISC1_MISC0_MISC0(uint32_t val)
+{
+ return ((val) << EDP_MISC1_MISC0_MISC0__SHIFT) & EDP_MISC1_MISC0_MISC0__MASK;
+}
+#define EDP_MISC1_MISC0_SYNC 0x00000001
+#define EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK 0x00000006
+#define EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT 1
+static inline uint32_t EDP_MISC1_MISC0_COMPONENT_FORMAT(enum edp_component_format val)
+{
+ return ((val) << EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT) & EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK;
+}
+#define EDP_MISC1_MISC0_CEA 0x00000008
+#define EDP_MISC1_MISC0_BT709_5 0x00000010
+#define EDP_MISC1_MISC0_COLOR__MASK 0x000000e0
+#define EDP_MISC1_MISC0_COLOR__SHIFT 5
+static inline uint32_t EDP_MISC1_MISC0_COLOR(enum edp_color_depth val)
+{
+ return ((val) << EDP_MISC1_MISC0_COLOR__SHIFT) & EDP_MISC1_MISC0_COLOR__MASK;
+}
+#define EDP_MISC1_MISC0_MISC1__MASK 0x0000ff00
+#define EDP_MISC1_MISC0_MISC1__SHIFT 8
+static inline uint32_t EDP_MISC1_MISC0_MISC1(uint32_t val)
+{
+ return ((val) << EDP_MISC1_MISC0_MISC1__SHIFT) & EDP_MISC1_MISC0_MISC1__MASK;
+}
+#define EDP_MISC1_MISC0_INTERLACED_ODD 0x00000100
+#define EDP_MISC1_MISC0_STEREO__MASK 0x00000600
+#define EDP_MISC1_MISC0_STEREO__SHIFT 9
+static inline uint32_t EDP_MISC1_MISC0_STEREO(uint32_t val)
+{
+ return ((val) << EDP_MISC1_MISC0_STEREO__SHIFT) & EDP_MISC1_MISC0_STEREO__MASK;
+}
+
+#define REG_EDP_PHY_CTRL 0x00000074
+#define EDP_PHY_CTRL_SW_RESET_PLL 0x00000001
+#define EDP_PHY_CTRL_SW_RESET 0x00000004
+
+#define REG_EDP_MAINLINK_READY 0x00000084
+#define EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY 0x00000008
+#define EDP_MAINLINK_READY_TRAIN_PATTERN_2_READY 0x00000010
+#define EDP_MAINLINK_READY_TRAIN_PATTERN_3_READY 0x00000020
+
+#define REG_EDP_AUX_CTRL 0x00000300
+#define EDP_AUX_CTRL_ENABLE 0x00000001
+#define EDP_AUX_CTRL_RESET 0x00000002
+
+#define REG_EDP_INTERRUPT_REG_1 0x00000308
+#define EDP_INTERRUPT_REG_1_HPD 0x00000001
+#define EDP_INTERRUPT_REG_1_HPD_ACK 0x00000002
+#define EDP_INTERRUPT_REG_1_HPD_EN 0x00000004
+#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE 0x00000008
+#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_ACK 0x00000010
+#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_EN 0x00000020
+#define EDP_INTERRUPT_REG_1_WRONG_ADDR 0x00000040
+#define EDP_INTERRUPT_REG_1_WRONG_ADDR_ACK 0x00000080
+#define EDP_INTERRUPT_REG_1_WRONG_ADDR_EN 0x00000100
+#define EDP_INTERRUPT_REG_1_TIMEOUT 0x00000200
+#define EDP_INTERRUPT_REG_1_TIMEOUT_ACK 0x00000400
+#define EDP_INTERRUPT_REG_1_TIMEOUT_EN 0x00000800
+#define EDP_INTERRUPT_REG_1_NACK_DEFER 0x00001000
+#define EDP_INTERRUPT_REG_1_NACK_DEFER_ACK 0x00002000
+#define EDP_INTERRUPT_REG_1_NACK_DEFER_EN 0x00004000
+#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT 0x00008000
+#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_ACK 0x00010000
+#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_EN 0x00020000
+#define EDP_INTERRUPT_REG_1_I2C_NACK 0x00040000
+#define EDP_INTERRUPT_REG_1_I2C_NACK_ACK 0x00080000
+#define EDP_INTERRUPT_REG_1_I2C_NACK_EN 0x00100000
+#define EDP_INTERRUPT_REG_1_I2C_DEFER 0x00200000
+#define EDP_INTERRUPT_REG_1_I2C_DEFER_ACK 0x00400000
+#define EDP_INTERRUPT_REG_1_I2C_DEFER_EN 0x00800000
+#define EDP_INTERRUPT_REG_1_PLL_UNLOCK 0x01000000
+#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_ACK 0x02000000
+#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_EN 0x04000000
+#define EDP_INTERRUPT_REG_1_AUX_ERROR 0x08000000
+#define EDP_INTERRUPT_REG_1_AUX_ERROR_ACK 0x10000000
+#define EDP_INTERRUPT_REG_1_AUX_ERROR_EN 0x20000000
+
+#define REG_EDP_INTERRUPT_REG_2 0x0000030c
+#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO 0x00000001
+#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_ACK 0x00000002
+#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_EN 0x00000004
+#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT 0x00000008
+#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_ACK 0x00000010
+#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_EN 0x00000020
+#define EDP_INTERRUPT_REG_2_FRAME_END 0x00000200
+#define EDP_INTERRUPT_REG_2_FRAME_END_ACK 0x00000080
+#define EDP_INTERRUPT_REG_2_FRAME_END_EN 0x00000100
+#define EDP_INTERRUPT_REG_2_CRC_UPDATED 0x00000200
+#define EDP_INTERRUPT_REG_2_CRC_UPDATED_ACK 0x00000400
+#define EDP_INTERRUPT_REG_2_CRC_UPDATED_EN 0x00000800
+
+#define REG_EDP_INTERRUPT_TRANS_NUM 0x00000310
+
+#define REG_EDP_AUX_DATA 0x00000314
+#define EDP_AUX_DATA_READ 0x00000001
+#define EDP_AUX_DATA_DATA__MASK 0x0000ff00
+#define EDP_AUX_DATA_DATA__SHIFT 8
+static inline uint32_t EDP_AUX_DATA_DATA(uint32_t val)
+{
+ return ((val) << EDP_AUX_DATA_DATA__SHIFT) & EDP_AUX_DATA_DATA__MASK;
+}
+#define EDP_AUX_DATA_INDEX__MASK 0x00ff0000
+#define EDP_AUX_DATA_INDEX__SHIFT 16
+static inline uint32_t EDP_AUX_DATA_INDEX(uint32_t val)
+{
+ return ((val) << EDP_AUX_DATA_INDEX__SHIFT) & EDP_AUX_DATA_INDEX__MASK;
+}
+#define EDP_AUX_DATA_INDEX_WRITE 0x80000000
+
+#define REG_EDP_AUX_TRANS_CTRL 0x00000318
+#define EDP_AUX_TRANS_CTRL_I2C 0x00000100
+#define EDP_AUX_TRANS_CTRL_GO 0x00000200
+
+#define REG_EDP_AUX_STATUS 0x00000324
+
+static inline uint32_t REG_EDP_PHY_LN(uint32_t i0) { return 0x00000400 + 0x40*i0; }
+
+static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 + 0x40*i0; }
+
+#define REG_EDP_PHY_GLB_VM_CFG0 0x00000510
+
+#define REG_EDP_PHY_GLB_VM_CFG1 0x00000514
+
+#define REG_EDP_PHY_GLB_MISC9 0x00000518
+
+#define REG_EDP_PHY_GLB_CFG 0x00000528
+
+#define REG_EDP_PHY_GLB_PD_CTL 0x0000052c
+
+#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598
+
+
+#endif /* EDP_XML */
diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c
new file mode 100644
index 000000000000..5f5a84f6074c
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp_aux.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edp.h"
+#include "edp.xml.h"
+
+#define AUX_CMD_FIFO_LEN 144
+#define AUX_CMD_NATIVE_MAX 16
+#define AUX_CMD_I2C_MAX 128
+
+#define EDP_INTR_AUX_I2C_ERR \
+ (EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
+ EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
+ EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER)
+#define EDP_INTR_TRANS_STATUS \
+ (EDP_INTERRUPT_REG_1_AUX_I2C_DONE | EDP_INTR_AUX_I2C_ERR)
+
+struct edp_aux {
+ void __iomem *base;
+ bool msg_err;
+
+ struct completion msg_comp;
+
+ /* To prevent the message transaction routine from reentry. */
+ struct mutex msg_mutex;
+
+ struct drm_dp_aux drm_aux;
+};
+#define to_edp_aux(x) container_of(x, struct edp_aux, drm_aux)
+
+static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ u32 data[4];
+ u32 reg, len;
+ bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+ bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+ u8 *msgdata = msg->buffer;
+ int i;
+
+ if (read)
+ len = 4;
+ else
+ len = msg->size + 4;
+
+ /*
+ * cmd fifo only has depth of 144 bytes
+ */
+ if (len > AUX_CMD_FIFO_LEN)
+ return -EINVAL;
+
+ /* Pack cmd and write to HW */
+ data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+ if (read)
+ data[0] |= BIT(4); /* R/W */
+
+ data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
+ data[2] = msg->address & 0xff; /* addr[7:0] */
+ data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
+
+ for (i = 0; i < len; i++) {
+ reg = (i < 4) ? data[i] : msgdata[i - 4];
+ reg = EDP_AUX_DATA_DATA(reg); /* index = 0, write */
+ if (i == 0)
+ reg |= EDP_AUX_DATA_INDEX_WRITE;
+ edp_write(aux->base + REG_EDP_AUX_DATA, reg);
+ }
+
+ reg = 0; /* Transaction number is always 1 */
+ if (!native) /* i2c */
+ reg |= EDP_AUX_TRANS_CTRL_I2C;
+
+ reg |= EDP_AUX_TRANS_CTRL_GO;
+ edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, reg);
+
+ return 0;
+}
+
+static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ u32 data;
+ u8 *dp;
+ int i;
+ u32 len = msg->size;
+
+ edp_write(aux->base + REG_EDP_AUX_DATA,
+ EDP_AUX_DATA_INDEX_WRITE | EDP_AUX_DATA_READ); /* index = 0 */
+
+ dp = msg->buffer;
+
+ /* discard first byte */
+ data = edp_read(aux->base + REG_EDP_AUX_DATA);
+ for (i = 0; i < len; i++) {
+ data = edp_read(aux->base + REG_EDP_AUX_DATA);
+ dp[i] = (u8)((data >> 8) & 0xff);
+ }
+
+ return 0;
+}
+
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call msm_edp_aux_ctrl() function to reset the AUX channel,
+ * if the waiting is timeout.
+ * The caller who triggers the transaction should avoid the
+ * msm_edp_aux_ctrl() running concurrently in other threads, i.e.
+ * start transaction only when AUX channel is fully enabled.
+ */
+ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
+{
+ struct edp_aux *aux = to_edp_aux(drm_aux);
+ ssize_t ret;
+ bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+ bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+
+ /* Ignore address only message */
+ if ((msg->size == 0) || (msg->buffer == NULL)) {
+ msg->reply = native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ return msg->size;
+ }
+
+ /* msg sanity check */
+ if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) ||
+ (msg->size > AUX_CMD_I2C_MAX)) {
+ pr_err("%s: invalid msg: size(%d), request(%x)\n",
+ __func__, msg->size, msg->request);
+ return -EINVAL;
+ }
+
+ mutex_lock(&aux->msg_mutex);
+
+ aux->msg_err = false;
+ reinit_completion(&aux->msg_comp);
+
+ ret = edp_msg_fifo_tx(aux, msg);
+ if (ret < 0)
+ goto unlock_exit;
+
+ DBG("wait_for_completion");
+ ret = wait_for_completion_timeout(&aux->msg_comp, 300);
+ if (ret <= 0) {
+ /*
+ * Clear GO and reset AUX channel
+ * to cancel the current transaction.
+ */
+ edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
+ msm_edp_aux_ctrl(aux, 1);
+ pr_err("%s: aux timeout, %d\n", __func__, ret);
+ goto unlock_exit;
+ }
+ DBG("completion");
+
+ if (!aux->msg_err) {
+ if (read) {
+ ret = edp_msg_fifo_rx(aux, msg);
+ if (ret < 0)
+ goto unlock_exit;
+ }
+
+ msg->reply = native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ } else {
+ /* Reply defer to retry */
+ msg->reply = native ?
+ DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+ /*
+ * The sleep time in caller is not long enough to make sure
+ * our H/W completes transactions. Add more defer time here.
+ */
+ msleep(100);
+ }
+
+ /* Return requested size for success or retry */
+ ret = msg->size;
+
+unlock_exit:
+ mutex_unlock(&aux->msg_mutex);
+ return ret;
+}
+
+void *msm_edp_aux_init(struct device *dev, void __iomem *regbase,
+ struct drm_dp_aux **drm_aux)
+{
+ struct edp_aux *aux = NULL;
+ int ret;
+
+ DBG("");
+ aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
+ if (!aux)
+ return NULL;
+
+ aux->base = regbase;
+ mutex_init(&aux->msg_mutex);
+ init_completion(&aux->msg_comp);
+
+ aux->drm_aux.name = "msm_edp_aux";
+ aux->drm_aux.dev = dev;
+ aux->drm_aux.transfer = edp_aux_transfer;
+ ret = drm_dp_aux_register(&aux->drm_aux);
+ if (ret) {
+ pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
+ mutex_destroy(&aux->msg_mutex);
+ }
+
+ if (drm_aux && aux)
+ *drm_aux = &aux->drm_aux;
+
+ return aux;
+}
+
+void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux)
+{
+ if (aux) {
+ drm_dp_aux_unregister(&aux->drm_aux);
+ mutex_destroy(&aux->msg_mutex);
+ }
+}
+
+irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr)
+{
+ if (isr & EDP_INTR_TRANS_STATUS) {
+ DBG("isr=%x", isr);
+ edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
+
+ if (isr & EDP_INTR_AUX_I2C_ERR)
+ aux->msg_err = true;
+ else
+ aux->msg_err = false;
+
+ complete(&aux->msg_comp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void msm_edp_aux_ctrl(struct edp_aux *aux, int enable)
+{
+ u32 data;
+
+ DBG("enable=%d", enable);
+ data = edp_read(aux->base + REG_EDP_AUX_CTRL);
+
+ if (enable) {
+ data |= EDP_AUX_CTRL_RESET;
+ edp_write(aux->base + REG_EDP_AUX_CTRL, data);
+ /* Make sure full reset */
+ wmb();
+ usleep_range(500, 1000);
+
+ data &= ~EDP_AUX_CTRL_RESET;
+ data |= EDP_AUX_CTRL_ENABLE;
+ edp_write(aux->base + REG_EDP_AUX_CTRL, data);
+ } else {
+ data &= ~EDP_AUX_CTRL_ENABLE;
+ edp_write(aux->base + REG_EDP_AUX_CTRL, data);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
new file mode 100644
index 000000000000..2bc73f82f3f5
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edp.h"
+
+struct edp_bridge {
+ struct drm_bridge base;
+ struct msm_edp *edp;
+};
+#define to_edp_bridge(x) container_of(x, struct edp_bridge, base)
+
+void edp_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+static void edp_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
+ struct msm_edp *edp = edp_bridge->edp;
+
+ DBG("");
+ msm_edp_ctrl_power(edp->ctrl, true);
+}
+
+static void edp_bridge_enable(struct drm_bridge *bridge)
+{
+ DBG("");
+}
+
+static void edp_bridge_disable(struct drm_bridge *bridge)
+{
+ DBG("");
+}
+
+static void edp_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
+ struct msm_edp *edp = edp_bridge->edp;
+
+ DBG("");
+ msm_edp_ctrl_power(edp->ctrl, false);
+}
+
+static void edp_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = bridge->dev;
+ struct drm_connector *connector;
+ struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
+ struct msm_edp *edp = edp_bridge->edp;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if ((connector->encoder != NULL) &&
+ (connector->encoder->bridge == bridge)) {
+ msm_edp_ctrl_timing_cfg(edp->ctrl,
+ adjusted_mode, &connector->display_info);
+ break;
+ }
+ }
+}
+
+static const struct drm_bridge_funcs edp_bridge_funcs = {
+ .pre_enable = edp_bridge_pre_enable,
+ .enable = edp_bridge_enable,
+ .disable = edp_bridge_disable,
+ .post_disable = edp_bridge_post_disable,
+ .mode_set = edp_bridge_mode_set,
+};
+
+/* initialize bridge */
+struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
+{
+ struct drm_bridge *bridge = NULL;
+ struct edp_bridge *edp_bridge;
+ int ret;
+
+ edp_bridge = devm_kzalloc(edp->dev->dev,
+ sizeof(*edp_bridge), GFP_KERNEL);
+ if (!edp_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ edp_bridge->edp = edp;
+
+ bridge = &edp_bridge->base;
+ bridge->funcs = &edp_bridge_funcs;
+
+ ret = drm_bridge_attach(edp->dev, bridge);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ edp_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
new file mode 100644
index 000000000000..d8812e84da54
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "drm/drm_edid.h"
+#include "msm_kms.h"
+#include "edp.h"
+
+struct edp_connector {
+ struct drm_connector base;
+ struct msm_edp *edp;
+};
+#define to_edp_connector(x) container_of(x, struct edp_connector, base)
+
+static enum drm_connector_status edp_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct edp_connector *edp_connector = to_edp_connector(connector);
+ struct msm_edp *edp = edp_connector->edp;
+
+ DBG("");
+ return msm_edp_ctrl_panel_connected(edp->ctrl) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void edp_connector_destroy(struct drm_connector *connector)
+{
+ struct edp_connector *edp_connector = to_edp_connector(connector);
+
+ DBG("");
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+
+ kfree(edp_connector);
+}
+
+static int edp_connector_get_modes(struct drm_connector *connector)
+{
+ struct edp_connector *edp_connector = to_edp_connector(connector);
+ struct msm_edp *edp = edp_connector->edp;
+
+ struct edid *drm_edid = NULL;
+ int ret = 0;
+
+ DBG("");
+ ret = msm_edp_ctrl_get_panel_info(edp->ctrl, connector, &drm_edid);
+ if (ret)
+ return ret;
+
+ drm_mode_connector_update_edid_property(connector, drm_edid);
+ if (drm_edid)
+ ret = drm_add_edid_modes(connector, drm_edid);
+
+ return ret;
+}
+
+static int edp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct edp_connector *edp_connector = to_edp_connector(connector);
+ struct msm_edp *edp = edp_connector->edp;
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, edp_connector->edp->encoder);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ if (!msm_edp_ctrl_pixel_clock_valid(
+ edp->ctrl, mode->clock, NULL, NULL))
+ return MODE_CLOCK_RANGE;
+
+ /* Invalidate all modes if color format is not supported */
+ if (connector->display_info.bpc > 8)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+edp_connector_best_encoder(struct drm_connector *connector)
+{
+ struct edp_connector *edp_connector = to_edp_connector(connector);
+
+ DBG("");
+ return edp_connector->edp->encoder;
+}
+
+static const struct drm_connector_funcs edp_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = edp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = edp_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
+ .get_modes = edp_connector_get_modes,
+ .mode_valid = edp_connector_mode_valid,
+ .best_encoder = edp_connector_best_encoder,
+};
+
+/* initialize connector */
+struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
+{
+ struct drm_connector *connector = NULL;
+ struct edp_connector *edp_connector;
+ int ret;
+
+ edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL);
+ if (!edp_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ edp_connector->edp = edp;
+
+ connector = &edp_connector->base;
+
+ ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (ret)
+ goto fail;
+
+ drm_connector_helper_add(connector, &edp_connector_helper_funcs);
+
+ /* We don't support HPD, so only poll status until connected. */
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ /* Display driver doesn't support interlace now. */
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ ret = drm_connector_register(connector);
+ if (ret)
+ goto fail;
+
+ return connector;
+
+fail:
+ if (connector)
+ edp_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
new file mode 100644
index 000000000000..3e246210c46f
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -0,0 +1,1373 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include "drm_crtc.h"
+#include "drm_dp_helper.h"
+#include "drm_edid.h"
+#include "edp.h"
+#include "edp.xml.h"
+
+#define VDDA_MIN_UV 1800000 /* uV units */
+#define VDDA_MAX_UV 1800000 /* uV units */
+#define VDDA_UA_ON_LOAD 100000 /* uA units */
+#define VDDA_UA_OFF_LOAD 100 /* uA units */
+
+#define DPCD_LINK_VOLTAGE_MAX 4
+#define DPCD_LINK_PRE_EMPHASIS_MAX 4
+
+#define EDP_LINK_BW_MAX DP_LINK_BW_2_7
+
+/* Link training return value */
+#define EDP_TRAIN_FAIL -1
+#define EDP_TRAIN_SUCCESS 0
+#define EDP_TRAIN_RECONFIG 1
+
+#define EDP_CLK_MASK_AHB BIT(0)
+#define EDP_CLK_MASK_AUX BIT(1)
+#define EDP_CLK_MASK_LINK BIT(2)
+#define EDP_CLK_MASK_PIXEL BIT(3)
+#define EDP_CLK_MASK_MDP_CORE BIT(4)
+#define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL)
+#define EDP_CLK_MASK_AUX_CHAN \
+ (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE)
+#define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN)
+
+#define EDP_BACKLIGHT_MAX 255
+
+#define EDP_INTR_STATUS1 \
+ (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \
+ EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
+ EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
+ EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \
+ EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR)
+#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
+#define EDP_INTR_STATUS2 \
+ (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \
+ EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \
+ EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED)
+#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
+
+struct edp_ctrl {
+ struct platform_device *pdev;
+
+ void __iomem *base;
+
+ /* regulators */
+ struct regulator *vdda_vreg;
+ struct regulator *lvl_vreg;
+
+ /* clocks */
+ struct clk *aux_clk;
+ struct clk *pixel_clk;
+ struct clk *ahb_clk;
+ struct clk *link_clk;
+ struct clk *mdp_core_clk;
+
+ /* gpios */
+ struct gpio_desc *panel_en_gpio;
+ struct gpio_desc *panel_hpd_gpio;
+
+ /* completion and mutex */
+ struct completion idle_comp;
+ struct mutex dev_mutex; /* To protect device power status */
+
+ /* work queue */
+ struct work_struct on_work;
+ struct work_struct off_work;
+ struct workqueue_struct *workqueue;
+
+ /* Interrupt register lock */
+ spinlock_t irq_lock;
+
+ bool edp_connected;
+ bool power_on;
+
+ /* edid raw data */
+ struct edid *edid;
+
+ struct drm_dp_link dp_link;
+ struct drm_dp_aux *drm_aux;
+
+ /* dpcd raw data */
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ /* Link status */
+ u8 link_rate;
+ u8 lane_cnt;
+ u8 v_level;
+ u8 p_level;
+
+ /* Timing status */
+ u8 interlaced;
+ u32 pixel_rate; /* in kHz */
+ u32 color_depth;
+
+ struct edp_aux *aux;
+ struct edp_phy *phy;
+};
+
+struct edp_pixel_clk_div {
+ u32 rate; /* in kHz */
+ u32 m;
+ u32 n;
+};
+
+#define EDP_PIXEL_CLK_NUM 8
+static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
+ { /* Link clock = 162MHz, source clock = 810MHz */
+ {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */
+ {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */
+ {148500, 11, 60}, /* FHD 1920x1080@60Hz */
+ {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */
+ {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */
+ {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */
+ {138530, 33, 193}, /* AUO B116HAN03.0 Panel */
+ {141400, 48, 275}, /* AUO B133HTN01.2 Panel */
+ },
+ { /* Link clock = 270MHz, source clock = 675MHz */
+ {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */
+ {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */
+ {148500, 11, 50}, /* FHD 1920x1080@60Hz */
+ {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */
+ {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */
+ {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */
+ {138530, 63, 307}, /* AUO B116HAN03.0 Panel */
+ {141400, 53, 253}, /* AUO B133HTN01.2 Panel */
+ },
+};
+
+static int edp_clk_init(struct edp_ctrl *ctrl)
+{
+ struct device *dev = &ctrl->pdev->dev;
+ int ret;
+
+ ctrl->aux_clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(ctrl->aux_clk)) {
+ ret = PTR_ERR(ctrl->aux_clk);
+ pr_err("%s: Can't find aux_clk, %d\n", __func__, ret);
+ ctrl->aux_clk = NULL;
+ return ret;
+ }
+
+ ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ if (IS_ERR(ctrl->pixel_clk)) {
+ ret = PTR_ERR(ctrl->pixel_clk);
+ pr_err("%s: Can't find pixel_clk, %d\n", __func__, ret);
+ ctrl->pixel_clk = NULL;
+ return ret;
+ }
+
+ ctrl->ahb_clk = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(ctrl->ahb_clk)) {
+ ret = PTR_ERR(ctrl->ahb_clk);
+ pr_err("%s: Can't find ahb_clk, %d\n", __func__, ret);
+ ctrl->ahb_clk = NULL;
+ return ret;
+ }
+
+ ctrl->link_clk = devm_clk_get(dev, "link_clk");
+ if (IS_ERR(ctrl->link_clk)) {
+ ret = PTR_ERR(ctrl->link_clk);
+ pr_err("%s: Can't find link_clk, %d\n", __func__, ret);
+ ctrl->link_clk = NULL;
+ return ret;
+ }
+
+ /* need mdp core clock to receive irq */
+ ctrl->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+ if (IS_ERR(ctrl->mdp_core_clk)) {
+ ret = PTR_ERR(ctrl->mdp_core_clk);
+ pr_err("%s: Can't find mdp_core_clk, %d\n", __func__, ret);
+ ctrl->mdp_core_clk = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask)
+{
+ int ret;
+
+ DBG("mask=%x", clk_mask);
+ /* ahb_clk should be enabled first */
+ if (clk_mask & EDP_CLK_MASK_AHB) {
+ ret = clk_prepare_enable(ctrl->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable ahb clk\n", __func__);
+ goto f0;
+ }
+ }
+ if (clk_mask & EDP_CLK_MASK_AUX) {
+ ret = clk_set_rate(ctrl->aux_clk, 19200000);
+ if (ret) {
+ pr_err("%s: Failed to set rate aux clk\n", __func__);
+ goto f1;
+ }
+ ret = clk_prepare_enable(ctrl->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable aux clk\n", __func__);
+ goto f1;
+ }
+ }
+ /* Need to set rate and enable link_clk prior to pixel_clk */
+ if (clk_mask & EDP_CLK_MASK_LINK) {
+ DBG("edp->link_clk, set_rate %ld",
+ (unsigned long)ctrl->link_rate * 27000000);
+ ret = clk_set_rate(ctrl->link_clk,
+ (unsigned long)ctrl->link_rate * 27000000);
+ if (ret) {
+ pr_err("%s: Failed to set rate to link clk\n",
+ __func__);
+ goto f2;
+ }
+
+ ret = clk_prepare_enable(ctrl->link_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable link clk\n", __func__);
+ goto f2;
+ }
+ }
+ if (clk_mask & EDP_CLK_MASK_PIXEL) {
+ DBG("edp->pixel_clk, set_rate %ld",
+ (unsigned long)ctrl->pixel_rate * 1000);
+ ret = clk_set_rate(ctrl->pixel_clk,
+ (unsigned long)ctrl->pixel_rate * 1000);
+ if (ret) {
+ pr_err("%s: Failed to set rate to pixel clk\n",
+ __func__);
+ goto f3;
+ }
+
+ ret = clk_prepare_enable(ctrl->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable pixel clk\n", __func__);
+ goto f3;
+ }
+ }
+ if (clk_mask & EDP_CLK_MASK_MDP_CORE) {
+ ret = clk_prepare_enable(ctrl->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable mdp core clk\n", __func__);
+ goto f4;
+ }
+ }
+
+ return 0;
+
+f4:
+ if (clk_mask & EDP_CLK_MASK_PIXEL)
+ clk_disable_unprepare(ctrl->pixel_clk);
+f3:
+ if (clk_mask & EDP_CLK_MASK_LINK)
+ clk_disable_unprepare(ctrl->link_clk);
+f2:
+ if (clk_mask & EDP_CLK_MASK_AUX)
+ clk_disable_unprepare(ctrl->aux_clk);
+f1:
+ if (clk_mask & EDP_CLK_MASK_AHB)
+ clk_disable_unprepare(ctrl->ahb_clk);
+f0:
+ return ret;
+}
+
+static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask)
+{
+ if (clk_mask & EDP_CLK_MASK_MDP_CORE)
+ clk_disable_unprepare(ctrl->mdp_core_clk);
+ if (clk_mask & EDP_CLK_MASK_PIXEL)
+ clk_disable_unprepare(ctrl->pixel_clk);
+ if (clk_mask & EDP_CLK_MASK_LINK)
+ clk_disable_unprepare(ctrl->link_clk);
+ if (clk_mask & EDP_CLK_MASK_AUX)
+ clk_disable_unprepare(ctrl->aux_clk);
+ if (clk_mask & EDP_CLK_MASK_AHB)
+ clk_disable_unprepare(ctrl->ahb_clk);
+}
+
+static int edp_regulator_init(struct edp_ctrl *ctrl)
+{
+ struct device *dev = &ctrl->pdev->dev;
+
+ DBG("");
+ ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(ctrl->vdda_vreg)) {
+ pr_err("%s: Could not get vdda reg, ret = %ld\n", __func__,
+ PTR_ERR(ctrl->vdda_vreg));
+ ctrl->vdda_vreg = NULL;
+ return PTR_ERR(ctrl->vdda_vreg);
+ }
+ ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
+ if (IS_ERR(ctrl->lvl_vreg)) {
+ pr_err("Could not get lvl-vdd reg, %ld",
+ PTR_ERR(ctrl->lvl_vreg));
+ ctrl->lvl_vreg = NULL;
+ return PTR_ERR(ctrl->lvl_vreg);
+ }
+
+ return 0;
+}
+
+static int edp_regulator_enable(struct edp_ctrl *ctrl)
+{
+ int ret;
+
+ ret = regulator_set_voltage(ctrl->vdda_vreg, VDDA_MIN_UV, VDDA_MAX_UV);
+ if (ret) {
+ pr_err("%s:vdda_vreg set_voltage failed, %d\n", __func__, ret);
+ goto vdda_set_fail;
+ }
+
+ ret = regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
+ if (ret < 0) {
+ pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
+ goto vdda_set_fail;
+ }
+
+ ret = regulator_enable(ctrl->vdda_vreg);
+ if (ret) {
+ pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
+ goto vdda_enable_fail;
+ }
+
+ ret = regulator_enable(ctrl->lvl_vreg);
+ if (ret) {
+ pr_err("Failed to enable lvl-vdd reg regulator, %d", ret);
+ goto lvl_enable_fail;
+ }
+
+ DBG("exit");
+ return 0;
+
+lvl_enable_fail:
+ regulator_disable(ctrl->vdda_vreg);
+vdda_enable_fail:
+ regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
+vdda_set_fail:
+ return ret;
+}
+
+static void edp_regulator_disable(struct edp_ctrl *ctrl)
+{
+ regulator_disable(ctrl->lvl_vreg);
+ regulator_disable(ctrl->vdda_vreg);
+ regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
+}
+
+static int edp_gpio_config(struct edp_ctrl *ctrl)
+{
+ struct device *dev = &ctrl->pdev->dev;
+ int ret;
+
+ ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd");
+ if (IS_ERR(ctrl->panel_hpd_gpio)) {
+ ret = PTR_ERR(ctrl->panel_hpd_gpio);
+ ctrl->panel_hpd_gpio = NULL;
+ pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = gpiod_direction_input(ctrl->panel_hpd_gpio);
+ if (ret) {
+ pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en");
+ if (IS_ERR(ctrl->panel_en_gpio)) {
+ ret = PTR_ERR(ctrl->panel_en_gpio);
+ ctrl->panel_en_gpio = NULL;
+ pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = gpiod_direction_output(ctrl->panel_en_gpio, 0);
+ if (ret) {
+ pr_err("%s: Set direction for panel_en failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ DBG("gpio on");
+
+ return 0;
+}
+
+static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable)
+{
+ unsigned long flags;
+
+ DBG("%d", enable);
+ spin_lock_irqsave(&ctrl->irq_lock, flags);
+ if (enable) {
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1);
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2);
+ } else {
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0);
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0);
+ }
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+ DBG("exit");
+}
+
+static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
+{
+ u32 prate;
+ u32 lrate;
+ u32 bpp;
+ u8 max_lane = ctrl->dp_link.num_lanes;
+ u8 lane;
+
+ prate = ctrl->pixel_rate;
+ bpp = ctrl->color_depth * 3;
+
+ /*
+ * By default, use the maximum link rate and minimum lane count,
+ * so that we can do rate down shift during link training.
+ */
+ ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+
+ prate *= bpp;
+ prate /= 8; /* in kByte */
+
+ lrate = 270000; /* in kHz */
+ lrate *= ctrl->link_rate;
+ lrate /= 10; /* in kByte, 10 bits --> 8 bits */
+
+ for (lane = 1; lane <= max_lane; lane <<= 1) {
+ if (lrate >= prate)
+ break;
+ lrate <<= 1;
+ }
+
+ ctrl->lane_cnt = lane;
+ DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt);
+}
+
+static void edp_config_ctrl(struct edp_ctrl *ctrl)
+{
+ u32 data;
+ enum edp_color_depth depth;
+
+ data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
+
+ if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
+
+ depth = EDP_6BIT;
+ if (ctrl->color_depth == 8)
+ depth = EDP_8BIT;
+
+ data |= EDP_CONFIGURATION_CTRL_COLOR(depth);
+
+ if (!ctrl->interlaced) /* progressive */
+ data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE;
+
+ data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK |
+ EDP_CONFIGURATION_CTRL_STATIC_MVID);
+
+ edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data);
+}
+
+static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state)
+{
+ edp_write(ctrl->base + REG_EDP_STATE_CTRL, state);
+ /* Make sure H/W status is set */
+ wmb();
+}
+
+static int edp_lane_set_write(struct edp_ctrl *ctrl,
+ u8 voltage_level, u8 pre_emphasis_level)
+{
+ int i;
+ u8 buf[4];
+
+ if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
+ voltage_level |= 0x04;
+
+ if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
+ pre_emphasis_level |= 0x04;
+
+ pre_emphasis_level <<= 3;
+
+ for (i = 0; i < 4; i++)
+ buf[i] = voltage_level | pre_emphasis_level;
+
+ DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
+ if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) {
+ pr_err("%s: Set sw/pe to panel failed\n", __func__);
+ return -ENOLINK;
+ }
+
+ return 0;
+}
+
+static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern)
+{
+ u8 p = pattern;
+
+ DBG("pattern=%x", p);
+ if (drm_dp_dpcd_write(ctrl->drm_aux,
+ DP_TRAINING_PATTERN_SET, &p, 1) < 1) {
+ pr_err("%s: Set training pattern to panel failed\n", __func__);
+ return -ENOLINK;
+ }
+
+ return 0;
+}
+
+static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl,
+ const u8 *link_status)
+{
+ int i;
+ u8 max = 0;
+ u8 data;
+
+ /* use the max level across lanes */
+ for (i = 0; i < ctrl->lane_cnt; i++) {
+ data = drm_dp_get_adjust_request_voltage(link_status, i);
+ DBG("lane=%d req_voltage_swing=0x%x", i, data);
+ if (max < data)
+ max = data;
+ }
+
+ ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+ /* use the max level across lanes */
+ max = 0;
+ for (i = 0; i < ctrl->lane_cnt; i++) {
+ data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+ DBG("lane=%d req_pre_emphasis=0x%x", i, data);
+ if (max < data)
+ max = data;
+ }
+
+ ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level);
+}
+
+static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train)
+{
+ int cnt = 10;
+ u32 data;
+ u32 shift = train - 1;
+
+ DBG("train=%d", train);
+
+ edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift);
+ while (--cnt) {
+ data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY);
+ if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift))
+ break;
+ }
+
+ if (cnt == 0)
+ pr_err("%s: set link_train=%d failed\n", __func__, train);
+}
+
+static const u8 vm_pre_emphasis[4][4] = {
+ {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
+ {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
+ {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
+ {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
+};
+
+/* voltage swing, 0.2v and 1.0v are not support */
+static const u8 vm_voltage_swing[4][4] = {
+ {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
+ {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
+ {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+ {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
+};
+
+static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl)
+{
+ u32 value0;
+ u32 value1;
+
+ DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level);
+
+ value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
+ value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
+
+ /* Configure host and panel only if both values are allowed */
+ if (value0 != 0xFF && value1 != 0xFF) {
+ msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1);
+ return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level);
+ }
+
+ return -EINVAL;
+}
+
+static int edp_start_link_train_1(struct edp_ctrl *ctrl)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 old_v_level;
+ int tries;
+ int ret;
+ int rlen;
+
+ DBG("");
+
+ edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1);
+ ret = edp_voltage_pre_emphasise_set(ctrl);
+ if (ret)
+ return ret;
+ ret = edp_train_pattern_set_write(ctrl,
+ DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN);
+ if (ret)
+ return ret;
+
+ tries = 0;
+ old_v_level = ctrl->v_level;
+ while (1) {
+ drm_dp_link_train_clock_recovery_delay(ctrl->dpcd);
+
+ rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
+ if (rlen < DP_LINK_STATUS_SIZE) {
+ pr_err("%s: read link status failed\n", __func__);
+ return -ENOLINK;
+ }
+ if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) {
+ ret = 0;
+ break;
+ }
+
+ if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) {
+ ret = -1;
+ break;
+ }
+
+ if (old_v_level == ctrl->v_level) {
+ tries++;
+ if (tries >= 5) {
+ ret = -1;
+ break;
+ }
+ } else {
+ tries = 0;
+ old_v_level = ctrl->v_level;
+ }
+
+ edp_sink_train_set_adjust(ctrl, link_status);
+ ret = edp_voltage_pre_emphasise_set(ctrl);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int edp_start_link_train_2(struct edp_ctrl *ctrl)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int tries = 0;
+ int ret;
+ int rlen;
+
+ DBG("");
+
+ edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2);
+ ret = edp_voltage_pre_emphasise_set(ctrl);
+ if (ret)
+ return ret;
+
+ ret = edp_train_pattern_set_write(ctrl,
+ DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN);
+ if (ret)
+ return ret;
+
+ while (1) {
+ drm_dp_link_train_channel_eq_delay(ctrl->dpcd);
+
+ rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
+ if (rlen < DP_LINK_STATUS_SIZE) {
+ pr_err("%s: read link status failed\n", __func__);
+ return -ENOLINK;
+ }
+ if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) {
+ ret = 0;
+ break;
+ }
+
+ tries++;
+ if (tries > 10) {
+ ret = -1;
+ break;
+ }
+
+ edp_sink_train_set_adjust(ctrl, link_status);
+ ret = edp_voltage_pre_emphasise_set(ctrl);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
+{
+ u32 prate, lrate, bpp;
+ u8 rate, lane, max_lane;
+ int changed = 0;
+
+ rate = ctrl->link_rate;
+ lane = ctrl->lane_cnt;
+ max_lane = ctrl->dp_link.num_lanes;
+
+ bpp = ctrl->color_depth * 3;
+ prate = ctrl->pixel_rate;
+ prate *= bpp;
+ prate /= 8; /* in kByte */
+
+ if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) {
+ rate -= 4; /* reduce rate */
+ changed++;
+ }
+
+ if (changed) {
+ if (lane >= 1 && lane < max_lane)
+ lane <<= 1; /* increase lane */
+
+ lrate = 270000; /* in kHz */
+ lrate *= rate;
+ lrate /= 10; /* kByte, 10 bits --> 8 bits */
+ lrate *= lane;
+
+ DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d",
+ lrate, prate, rate, lane,
+ ctrl->pixel_rate,
+ bpp);
+
+ if (lrate > prate) {
+ ctrl->link_rate = rate;
+ ctrl->lane_cnt = lane;
+ DBG("new rate=%d %d", rate, lane);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
+{
+ int ret;
+
+ ret = edp_train_pattern_set_write(ctrl, 0);
+
+ drm_dp_link_train_channel_eq_delay(ctrl->dpcd);
+
+ return ret;
+}
+
+static int edp_do_link_train(struct edp_ctrl *ctrl)
+{
+ int ret;
+ struct drm_dp_link dp_link;
+
+ DBG("");
+ /*
+ * Set the current link rate and lane cnt to panel. They may have been
+ * adjusted and the values are different from them in DPCD CAP
+ */
+ dp_link.num_lanes = ctrl->lane_cnt;
+ dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate);
+ dp_link.capabilities = ctrl->dp_link.capabilities;
+ if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0)
+ return EDP_TRAIN_FAIL;
+
+ ctrl->v_level = 0; /* start from default level */
+ ctrl->p_level = 0;
+
+ edp_state_ctrl(ctrl, 0);
+ if (edp_clear_training_pattern(ctrl))
+ return EDP_TRAIN_FAIL;
+
+ ret = edp_start_link_train_1(ctrl);
+ if (ret < 0) {
+ if (edp_link_rate_down_shift(ctrl) == 0) {
+ DBG("link reconfig");
+ ret = EDP_TRAIN_RECONFIG;
+ goto clear;
+ } else {
+ pr_err("%s: Training 1 failed", __func__);
+ ret = EDP_TRAIN_FAIL;
+ goto clear;
+ }
+ }
+ DBG("Training 1 completed successfully");
+
+ edp_state_ctrl(ctrl, 0);
+ if (edp_clear_training_pattern(ctrl))
+ return EDP_TRAIN_FAIL;
+
+ ret = edp_start_link_train_2(ctrl);
+ if (ret < 0) {
+ if (edp_link_rate_down_shift(ctrl) == 0) {
+ DBG("link reconfig");
+ ret = EDP_TRAIN_RECONFIG;
+ goto clear;
+ } else {
+ pr_err("%s: Training 2 failed", __func__);
+ ret = EDP_TRAIN_FAIL;
+ goto clear;
+ }
+ }
+ DBG("Training 2 completed successfully");
+
+ edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO);
+clear:
+ edp_clear_training_pattern(ctrl);
+
+ return ret;
+}
+
+static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync)
+{
+ u32 data;
+ enum edp_color_depth depth;
+
+ data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0);
+
+ if (sync)
+ data |= EDP_MISC1_MISC0_SYNC;
+ else
+ data &= ~EDP_MISC1_MISC0_SYNC;
+
+ /* only legacy rgb mode supported */
+ depth = EDP_6BIT; /* Default */
+ if (ctrl->color_depth == 8)
+ depth = EDP_8BIT;
+ else if (ctrl->color_depth == 10)
+ depth = EDP_10BIT;
+ else if (ctrl->color_depth == 12)
+ depth = EDP_12BIT;
+ else if (ctrl->color_depth == 16)
+ depth = EDP_16BIT;
+
+ data |= EDP_MISC1_MISC0_COLOR(depth);
+
+ edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data);
+}
+
+static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n)
+{
+ u32 n_multi, m_multi = 5;
+
+ if (ctrl->link_rate == DP_LINK_BW_1_62) {
+ n_multi = 1;
+ } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
+ n_multi = 2;
+ } else {
+ pr_err("%s: Invalid link rate, %d\n", __func__,
+ ctrl->link_rate);
+ return -EINVAL;
+ }
+
+ edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi);
+ edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi);
+
+ return 0;
+}
+
+static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable)
+{
+ u32 data = 0;
+
+ edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET);
+ /* Make sure fully reset */
+ wmb();
+ usleep_range(500, 1000);
+
+ if (enable)
+ data |= EDP_MAINLINK_CTRL_ENABLE;
+
+ edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data);
+}
+
+static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable)
+{
+ if (enable) {
+ edp_regulator_enable(ctrl);
+ edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN);
+ msm_edp_phy_ctrl(ctrl->phy, 1);
+ msm_edp_aux_ctrl(ctrl->aux, 1);
+ gpiod_set_value(ctrl->panel_en_gpio, 1);
+ } else {
+ gpiod_set_value(ctrl->panel_en_gpio, 0);
+ msm_edp_aux_ctrl(ctrl->aux, 0);
+ msm_edp_phy_ctrl(ctrl->phy, 0);
+ edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN);
+ edp_regulator_disable(ctrl);
+ }
+}
+
+static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable)
+{
+ u32 m, n;
+
+ if (enable) {
+ /* Enable link channel clocks */
+ edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN);
+
+ msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt);
+
+ msm_edp_phy_vm_pe_init(ctrl->phy);
+
+ /* Make sure phy is programed */
+ wmb();
+ msm_edp_phy_ready(ctrl->phy);
+
+ edp_config_ctrl(ctrl);
+ msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n);
+ edp_sw_mvid_nvid(ctrl, m, n);
+ edp_mainlink_ctrl(ctrl, 1);
+ } else {
+ edp_mainlink_ctrl(ctrl, 0);
+
+ msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0);
+ edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN);
+ }
+}
+
+static int edp_ctrl_training(struct edp_ctrl *ctrl)
+{
+ int ret;
+
+ /* Do link training only when power is on */
+ if (!ctrl->power_on)
+ return -EINVAL;
+
+train_start:
+ ret = edp_do_link_train(ctrl);
+ if (ret == EDP_TRAIN_RECONFIG) {
+ /* Re-configure main link */
+ edp_ctrl_irq_enable(ctrl, 0);
+ edp_ctrl_link_enable(ctrl, 0);
+ msm_edp_phy_ctrl(ctrl->phy, 0);
+
+ /* Make sure link is fully disabled */
+ wmb();
+ usleep_range(500, 1000);
+
+ msm_edp_phy_ctrl(ctrl->phy, 1);
+ edp_ctrl_link_enable(ctrl, 1);
+ edp_ctrl_irq_enable(ctrl, 1);
+ goto train_start;
+ }
+
+ return ret;
+}
+
+static void edp_ctrl_on_worker(struct work_struct *work)
+{
+ struct edp_ctrl *ctrl = container_of(
+ work, struct edp_ctrl, on_work);
+ int ret;
+
+ mutex_lock(&ctrl->dev_mutex);
+
+ if (ctrl->power_on) {
+ DBG("already on");
+ goto unlock_ret;
+ }
+
+ edp_ctrl_phy_aux_enable(ctrl, 1);
+ edp_ctrl_link_enable(ctrl, 1);
+
+ edp_ctrl_irq_enable(ctrl, 1);
+ ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link);
+ if (ret)
+ goto fail;
+
+ ctrl->power_on = true;
+
+ /* Start link training */
+ ret = edp_ctrl_training(ctrl);
+ if (ret != EDP_TRAIN_SUCCESS)
+ goto fail;
+
+ DBG("DONE");
+ goto unlock_ret;
+
+fail:
+ edp_ctrl_irq_enable(ctrl, 0);
+ edp_ctrl_link_enable(ctrl, 0);
+ edp_ctrl_phy_aux_enable(ctrl, 0);
+ ctrl->power_on = false;
+unlock_ret:
+ mutex_unlock(&ctrl->dev_mutex);
+}
+
+static void edp_ctrl_off_worker(struct work_struct *work)
+{
+ struct edp_ctrl *ctrl = container_of(
+ work, struct edp_ctrl, off_work);
+ int ret;
+
+ mutex_lock(&ctrl->dev_mutex);
+
+ if (!ctrl->power_on) {
+ DBG("already off");
+ goto unlock_ret;
+ }
+
+ reinit_completion(&ctrl->idle_comp);
+ edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE);
+
+ ret = wait_for_completion_timeout(&ctrl->idle_comp,
+ msecs_to_jiffies(500));
+ if (ret <= 0)
+ DBG("%s: idle pattern timedout, %d\n",
+ __func__, ret);
+
+ edp_state_ctrl(ctrl, 0);
+
+ drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link);
+
+ edp_ctrl_irq_enable(ctrl, 0);
+
+ edp_ctrl_link_enable(ctrl, 0);
+
+ edp_ctrl_phy_aux_enable(ctrl, 0);
+
+ ctrl->power_on = false;
+
+unlock_ret:
+ mutex_unlock(&ctrl->dev_mutex);
+}
+
+irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl)
+{
+ u32 isr1, isr2, mask1, mask2;
+ u32 ack;
+
+ DBG("");
+ spin_lock(&ctrl->irq_lock);
+ isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1);
+ isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2);
+
+ mask1 = isr1 & EDP_INTR_MASK1;
+ mask2 = isr2 & EDP_INTR_MASK2;
+
+ isr1 &= ~mask1; /* remove masks bit */
+ isr2 &= ~mask2;
+
+ DBG("isr=%x mask=%x isr2=%x mask2=%x",
+ isr1, mask1, isr2, mask2);
+
+ ack = isr1 & EDP_INTR_STATUS1;
+ ack <<= 1; /* ack bits */
+ ack |= mask1;
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack);
+
+ ack = isr2 & EDP_INTR_STATUS2;
+ ack <<= 1; /* ack bits */
+ ack |= mask2;
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack);
+ spin_unlock(&ctrl->irq_lock);
+
+ if (isr1 & EDP_INTERRUPT_REG_1_HPD)
+ DBG("edp_hpd");
+
+ if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO)
+ DBG("edp_video_ready");
+
+ if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) {
+ DBG("idle_patterns_sent");
+ complete(&ctrl->idle_comp);
+ }
+
+ msm_edp_aux_irq(ctrl->aux, isr1);
+
+ return IRQ_HANDLED;
+}
+
+void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
+{
+ if (on)
+ queue_work(ctrl->workqueue, &ctrl->on_work);
+ else
+ queue_work(ctrl->workqueue, &ctrl->off_work);
+}
+
+int msm_edp_ctrl_init(struct msm_edp *edp)
+{
+ struct edp_ctrl *ctrl = NULL;
+ struct device *dev = &edp->pdev->dev;
+ int ret;
+
+ if (!edp) {
+ pr_err("%s: edp is NULL!\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ edp->ctrl = ctrl;
+ ctrl->pdev = edp->pdev;
+
+ ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP");
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ /* Get regulator, clock, gpio, pwm */
+ ret = edp_regulator_init(ctrl);
+ if (ret) {
+ pr_err("%s:regulator init fail\n", __func__);
+ return ret;
+ }
+ ret = edp_clk_init(ctrl);
+ if (ret) {
+ pr_err("%s:clk init fail\n", __func__);
+ return ret;
+ }
+ ret = edp_gpio_config(ctrl);
+ if (ret) {
+ pr_err("%s:failed to configure GPIOs: %d", __func__, ret);
+ return ret;
+ }
+
+ /* Init aux and phy */
+ ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux);
+ if (!ctrl->aux || !ctrl->drm_aux) {
+ pr_err("%s:failed to init aux\n", __func__);
+ return ret;
+ }
+
+ ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
+ if (!ctrl->phy) {
+ pr_err("%s:failed to init phy\n", __func__);
+ goto err_destory_aux;
+ }
+
+ spin_lock_init(&ctrl->irq_lock);
+ mutex_init(&ctrl->dev_mutex);
+ init_completion(&ctrl->idle_comp);
+
+ /* setup workqueue */
+ ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0);
+ INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker);
+ INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker);
+
+ return 0;
+
+err_destory_aux:
+ msm_edp_aux_destroy(dev, ctrl->aux);
+ ctrl->aux = NULL;
+ return ret;
+}
+
+void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl)
+{
+ if (!ctrl)
+ return;
+
+ if (ctrl->workqueue) {
+ flush_workqueue(ctrl->workqueue);
+ destroy_workqueue(ctrl->workqueue);
+ ctrl->workqueue = NULL;
+ }
+
+ if (ctrl->aux) {
+ msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux);
+ ctrl->aux = NULL;
+ }
+
+ kfree(ctrl->edid);
+ ctrl->edid = NULL;
+
+ mutex_destroy(&ctrl->dev_mutex);
+}
+
+bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl)
+{
+ mutex_lock(&ctrl->dev_mutex);
+ DBG("connect status = %d", ctrl->edp_connected);
+ if (ctrl->edp_connected) {
+ mutex_unlock(&ctrl->dev_mutex);
+ return true;
+ }
+
+ if (!ctrl->power_on) {
+ edp_ctrl_phy_aux_enable(ctrl, 1);
+ edp_ctrl_irq_enable(ctrl, 1);
+ }
+
+ if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd,
+ DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) {
+ pr_err("%s: AUX channel is NOT ready\n", __func__);
+ memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE);
+ } else {
+ ctrl->edp_connected = true;
+ }
+
+ if (!ctrl->power_on) {
+ edp_ctrl_irq_enable(ctrl, 0);
+ edp_ctrl_phy_aux_enable(ctrl, 0);
+ }
+
+ DBG("exit: connect status=%d", ctrl->edp_connected);
+
+ mutex_unlock(&ctrl->dev_mutex);
+
+ return ctrl->edp_connected;
+}
+
+int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
+ struct drm_connector *connector, struct edid **edid)
+{
+ int ret = 0;
+
+ mutex_lock(&ctrl->dev_mutex);
+
+ if (ctrl->edid) {
+ if (edid) {
+ DBG("Just return edid buffer");
+ *edid = ctrl->edid;
+ }
+ goto unlock_ret;
+ }
+
+ if (!ctrl->power_on) {
+ edp_ctrl_phy_aux_enable(ctrl, 1);
+ edp_ctrl_irq_enable(ctrl, 1);
+ }
+
+ ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link);
+ if (ret) {
+ pr_err("%s: read dpcd cap failed, %d\n", __func__, ret);
+ goto disable_ret;
+ }
+
+ /* Initialize link rate as panel max link rate */
+ ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+
+ ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
+ if (!ctrl->edid) {
+ pr_err("%s: edid read fail\n", __func__);
+ goto disable_ret;
+ }
+
+ if (edid)
+ *edid = ctrl->edid;
+
+disable_ret:
+ if (!ctrl->power_on) {
+ edp_ctrl_irq_enable(ctrl, 0);
+ edp_ctrl_phy_aux_enable(ctrl, 0);
+ }
+unlock_ret:
+ mutex_unlock(&ctrl->dev_mutex);
+ return ret;
+}
+
+int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info)
+{
+ u32 hstart_from_sync, vstart_from_sync;
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&ctrl->dev_mutex);
+ /*
+ * Need to keep color depth, pixel rate and
+ * interlaced information in ctrl context
+ */
+ ctrl->color_depth = info->bpc;
+ ctrl->pixel_rate = mode->clock;
+ ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ /* Fill initial link config based on passed in timing */
+ edp_fill_link_cfg(ctrl);
+
+ if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) {
+ pr_err("%s, fail to prepare enable ahb clk\n", __func__);
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
+ edp_clock_synchrous(ctrl, 1);
+
+ /* Configure eDP timing to HW */
+ edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER,
+ EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) |
+ EDP_TOTAL_HOR_VER_VERT(mode->vtotal));
+
+ vstart_from_sync = mode->vtotal - mode->vsync_start;
+ hstart_from_sync = mode->htotal - mode->hsync_start;
+ edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC,
+ EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) |
+ EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync));
+
+ data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(
+ mode->vsync_end - mode->vsync_start);
+ data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(
+ mode->hsync_end - mode->hsync_start);
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC;
+ edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data);
+
+ edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER,
+ EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) |
+ EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay));
+
+ edp_clk_disable(ctrl, EDP_CLK_MASK_AHB);
+
+unlock_ret:
+ mutex_unlock(&ctrl->dev_mutex);
+ return ret;
+}
+
+bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
+ u32 pixel_rate, u32 *pm, u32 *pn)
+{
+ const struct edp_pixel_clk_div *divs;
+ u32 err = 1; /* 1% error tolerance */
+ u32 clk_err;
+ int i;
+
+ if (ctrl->link_rate == DP_LINK_BW_1_62) {
+ divs = clk_divs[0];
+ } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
+ divs = clk_divs[1];
+ } else {
+ pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate);
+ return false;
+ }
+
+ for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) {
+ clk_err = abs(divs[i].rate - pixel_rate);
+ if ((divs[i].rate * err / 100) >= clk_err) {
+ if (pm)
+ *pm = divs[i].m;
+ if (pn)
+ *pn = divs[i].n;
+ return true;
+ }
+ }
+
+ DBG("pixel clock %d(kHz) not supported", pixel_rate);
+
+ return false;
+}
+
diff --git a/drivers/gpu/drm/msm/edp/edp_phy.c b/drivers/gpu/drm/msm/edp/edp_phy.c
new file mode 100644
index 000000000000..36bb8933e9ee
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp_phy.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edp.h"
+#include "edp.xml.h"
+
+#define EDP_MAX_LANE 4
+
+struct edp_phy {
+ void __iomem *base;
+};
+
+bool msm_edp_phy_ready(struct edp_phy *phy)
+{
+ u32 status;
+ int cnt = 100;
+
+ while (--cnt) {
+ status = edp_read(phy->base +
+ REG_EDP_PHY_GLB_PHY_STATUS);
+ if (status & 0x01)
+ break;
+ usleep_range(500, 1000);
+ }
+
+ if (cnt == 0) {
+ pr_err("%s: PHY NOT ready\n", __func__);
+ return false;
+ } else {
+ return true;
+ }
+}
+
+void msm_edp_phy_ctrl(struct edp_phy *phy, int enable)
+{
+ DBG("enable=%d", enable);
+ if (enable) {
+ /* Reset */
+ edp_write(phy->base + REG_EDP_PHY_CTRL,
+ EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL);
+ /* Make sure fully reset */
+ wmb();
+ usleep_range(500, 1000);
+ edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000);
+ edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f);
+ edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1);
+ } else {
+ edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0);
+ }
+}
+
+/* voltage mode and pre emphasis cfg */
+void msm_edp_phy_vm_pe_init(struct edp_phy *phy)
+{
+ edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3);
+ edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64);
+ edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c);
+}
+
+void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1)
+{
+ edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0);
+ edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1);
+}
+
+void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane)
+{
+ u32 i;
+ u32 data;
+
+ if (up)
+ data = 0; /* power up */
+ else
+ data = 0x7; /* power down */
+
+ for (i = 0; i < max_lane; i++)
+ edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data);
+
+ /* power down unused lane */
+ data = 0x7; /* power down */
+ for (i = max_lane; i < EDP_MAX_LANE; i++)
+ edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data);
+}
+
+void *msm_edp_phy_init(struct device *dev, void __iomem *regbase)
+{
+ struct edp_phy *phy = NULL;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ phy->base = regbase;
+ return phy;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 062c68725376..814536202efe 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -106,7 +107,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
- BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs));
+ hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
+ config->hpd_reg_cnt, GFP_KERNEL);
+ if (!hdmi->hpd_regs) {
+ ret = -ENOMEM;
+ goto fail;
+ }
for (i = 0; i < config->hpd_reg_cnt; i++) {
struct regulator *reg;
@@ -122,7 +128,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->hpd_regs[i] = reg;
}
- BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs));
+ hdmi->pwr_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_regs[0]) *
+ config->pwr_reg_cnt, GFP_KERNEL);
+ if (!hdmi->pwr_regs) {
+ ret = -ENOMEM;
+ goto fail;
+ }
for (i = 0; i < config->pwr_reg_cnt; i++) {
struct regulator *reg;
@@ -138,7 +149,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->pwr_regs[i] = reg;
}
- BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks));
+ hdmi->hpd_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_clks[0]) *
+ config->hpd_clk_cnt, GFP_KERNEL);
+ if (!hdmi->hpd_clks) {
+ ret = -ENOMEM;
+ goto fail;
+ }
for (i = 0; i < config->hpd_clk_cnt; i++) {
struct clk *clk;
@@ -153,7 +169,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->hpd_clks[i] = clk;
}
- BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks));
+ hdmi->pwr_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_clks[0]) *
+ config->pwr_clk_cnt, GFP_KERNEL);
+ if (!hdmi->pwr_clks) {
+ ret = -ENOMEM;
+ goto fail;
+ }
for (i = 0; i < config->pwr_clk_cnt; i++) {
struct clk *clk;
@@ -247,9 +268,9 @@ int hdmi_modeset_init(struct hdmi *hdmi,
return 0;
fail:
- /* bridge/connector are normally destroyed by drm: */
+ /* bridge is normally destroyed by drm: */
if (hdmi->bridge) {
- hdmi->bridge->funcs->destroy(hdmi->bridge);
+ hdmi_bridge_destroy(hdmi->bridge);
hdmi->bridge = NULL;
}
if (hdmi->connector) {
@@ -266,6 +287,57 @@ fail:
#include <linux/of_gpio.h>
+#define HDMI_CFG(item, entry) \
+ .item ## _names = item ##_names_ ## entry, \
+ .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
+
+static struct hdmi_platform_config hdmi_tx_8660_config = {
+ .phy_init = hdmi_phy_8x60_init,
+};
+
+static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"};
+static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
+
+static struct hdmi_platform_config hdmi_tx_8960_config = {
+ .phy_init = hdmi_phy_8960_init,
+ HDMI_CFG(hpd_reg, 8960),
+ HDMI_CFG(hpd_clk, 8960),
+};
+
+static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
+static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"};
+static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"};
+static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
+
+static struct hdmi_platform_config hdmi_tx_8074_config = {
+ .phy_init = hdmi_phy_8x74_init,
+ HDMI_CFG(pwr_reg, 8x74),
+ HDMI_CFG(hpd_reg, 8x74),
+ HDMI_CFG(pwr_clk, 8x74),
+ HDMI_CFG(hpd_clk, 8x74),
+ .hpd_freq = hpd_clk_freq_8x74,
+};
+
+static const char *hpd_reg_names_8084[] = {"hpd-gdsc", "hpd-5v", "hpd-5v-en"};
+
+static struct hdmi_platform_config hdmi_tx_8084_config = {
+ .phy_init = hdmi_phy_8x74_init,
+ HDMI_CFG(pwr_reg, 8x74),
+ HDMI_CFG(hpd_reg, 8084),
+ HDMI_CFG(pwr_clk, 8x74),
+ HDMI_CFG(hpd_clk, 8x74),
+ .hpd_freq = hpd_clk_freq_8x74,
+};
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
+ { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config },
+ { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
+ { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
+ {}
+};
+
#ifdef CONFIG_OF
static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
{
@@ -288,50 +360,31 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
- static struct hdmi_platform_config config = {};
+ static struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
+ const struct of_device_id *match;
- if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
- static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
- static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
- static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
- static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
- static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
- config.phy_init = hdmi_phy_8x74_init;
- config.hpd_reg_names = hpd_reg_names;
- config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
- config.pwr_reg_names = pwr_reg_names;
- config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
- config.hpd_clk_names = hpd_clk_names;
- config.hpd_freq = hpd_clk_freq;
- config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
- config.pwr_clk_names = pwr_clk_names;
- config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
- } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
- static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
- static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
- config.phy_init = hdmi_phy_8960_init;
- config.hpd_reg_names = hpd_reg_names;
- config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
- config.hpd_clk_names = hpd_clk_names;
- config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
- } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) {
- config.phy_init = hdmi_phy_8x60_init;
+ match = of_match_node(dt_match, of_node);
+ if (match && match->data) {
+ hdmi_cfg = (struct hdmi_platform_config *)match->data;
+ DBG("hdmi phy: %s", match->compatible);
} else {
dev_err(dev, "unknown phy: %s\n", of_node->name);
+ return -ENXIO;
}
- config.mmio_name = "core_physical";
- config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
- config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
- config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
- config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
- config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
- config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
+ hdmi_cfg->mmio_name = "core_physical";
+ hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
+ hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
+ hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
+ hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
+ hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
+ hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
#else
+ static struct hdmi_platform_config config = {};
static const char *hpd_clk_names[] = {
"core_clk", "master_iface_clk", "slave_iface_clk",
};
@@ -377,12 +430,15 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.mux_en_gpio = -1;
config.mux_sel_gpio = -1;
}
+ hdmi_cfg = &config;
#endif
- dev->platform_data = &config;
+ dev->platform_data = hdmi_cfg;
+
hdmi = hdmi_init(to_platform_device(dev));
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
priv->hdmi = hdmi;
+
return 0;
}
@@ -413,13 +469,6 @@ static int hdmi_dev_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,hdmi-tx-8074" },
- { .compatible = "qcom,hdmi-tx-8960" },
- { .compatible = "qcom,hdmi-tx-8660" },
- {}
-};
-
static struct platform_driver hdmi_driver = {
.probe = hdmi_dev_probe,
.remove = hdmi_dev_remove,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 43e654f751b7..68fdfb3622a5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -52,10 +52,10 @@ struct hdmi {
void __iomem *mmio;
- struct regulator *hpd_regs[2];
- struct regulator *pwr_regs[2];
- struct clk *hpd_clks[3];
- struct clk *pwr_clks[2];
+ struct regulator **hpd_regs;
+ struct regulator **pwr_regs;
+ struct clk **hpd_clks;
+ struct clk **pwr_clks;
struct hdmi_phy *phy;
struct i2c_adapter *i2c;
@@ -146,6 +146,7 @@ void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
*/
struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi);
+void hdmi_bridge_destroy(struct drm_bridge *bridge);
/*
* hdmi connector:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 5b0844befbab..350988740e9f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
-Copyright (C) 2013-2014 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -45,12 +46,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum hdmi_hdcp_key_state {
- NO_KEYS = 0,
- NOT_CHECKED = 1,
- CHECKING = 2,
- KEYS_VALID = 3,
- AKSV_INVALID = 4,
- CHECKSUM_MISMATCH = 5,
+ HDCP_KEYS_STATE_NO_KEYS = 0,
+ HDCP_KEYS_STATE_NOT_CHECKED = 1,
+ HDCP_KEYS_STATE_CHECKING = 2,
+ HDCP_KEYS_STATE_VALID = 3,
+ HDCP_KEYS_STATE_AKSV_NOT_VALID = 4,
+ HDCP_KEYS_STATE_CHKSUM_MISMATCH = 5,
+ HDCP_KEYS_STATE_PROD_AKSV = 6,
+ HDCP_KEYS_STATE_RESERVED = 7,
};
enum hdmi_ddc_read_write {
@@ -199,11 +202,29 @@ static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
#define HDMI_HDCP_CTRL_ENABLE 0x00000001
#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
+#define REG_HDMI_HDCP_DEBUG_CTRL 0x00000114
+#define HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER 0x00000004
+
#define REG_HDMI_HDCP_INT_CTRL 0x00000118
+#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT 0x00000001
+#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK 0x00000002
+#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK 0x00000004
+#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT 0x00000010
+#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK 0x00000020
+#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK 0x00000040
+#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK 0x00000080
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT 0x00000100
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_ACK 0x00000200
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_MASK 0x00000400
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT 0x00001000
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_ACK 0x00002000
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_MASK 0x00004000
#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
+#define HDMI_HDCP_LINK0_STATUS_RI_MATCHES 0x00001000
+#define HDMI_HDCP_LINK0_STATUS_V_MATCHES 0x00100000
#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
@@ -211,9 +232,56 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state
return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
}
+#define REG_HDMI_HDCP_DDC_CTRL_0 0x00000120
+#define HDMI_HDCP_DDC_CTRL_0_DISABLE 0x00000001
+
+#define REG_HDMI_HDCP_DDC_CTRL_1 0x00000124
+#define HDMI_HDCP_DDC_CTRL_1_FAILED_ACK 0x00000001
+
+#define REG_HDMI_HDCP_DDC_STATUS 0x00000128
+#define HDMI_HDCP_DDC_STATUS_XFER_REQ 0x00000010
+#define HDMI_HDCP_DDC_STATUS_XFER_DONE 0x00000400
+#define HDMI_HDCP_DDC_STATUS_ABORTED 0x00001000
+#define HDMI_HDCP_DDC_STATUS_TIMEOUT 0x00002000
+#define HDMI_HDCP_DDC_STATUS_NACK0 0x00004000
+#define HDMI_HDCP_DDC_STATUS_NACK1 0x00008000
+#define HDMI_HDCP_DDC_STATUS_FAILED 0x00010000
+
+#define REG_HDMI_HDCP_ENTROPY_CTRL0 0x0000012c
+
+#define REG_HDMI_HDCP_ENTROPY_CTRL1 0x0000025c
+
#define REG_HDMI_HDCP_RESET 0x00000130
#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
+#define REG_HDMI_HDCP_RCVPORT_DATA0 0x00000134
+
+#define REG_HDMI_HDCP_RCVPORT_DATA1 0x00000138
+
+#define REG_HDMI_HDCP_RCVPORT_DATA2_0 0x0000013c
+
+#define REG_HDMI_HDCP_RCVPORT_DATA2_1 0x00000140
+
+#define REG_HDMI_HDCP_RCVPORT_DATA3 0x00000144
+
+#define REG_HDMI_HDCP_RCVPORT_DATA4 0x00000148
+
+#define REG_HDMI_HDCP_RCVPORT_DATA5 0x0000014c
+
+#define REG_HDMI_HDCP_RCVPORT_DATA6 0x00000150
+
+#define REG_HDMI_HDCP_RCVPORT_DATA7 0x00000154
+
+#define REG_HDMI_HDCP_RCVPORT_DATA8 0x00000158
+
+#define REG_HDMI_HDCP_RCVPORT_DATA9 0x0000015c
+
+#define REG_HDMI_HDCP_RCVPORT_DATA10 0x00000160
+
+#define REG_HDMI_HDCP_RCVPORT_DATA11 0x00000164
+
+#define REG_HDMI_HDCP_RCVPORT_DATA12 0x00000168
+
#define REG_HDMI_VENSPEC_INFO0 0x0000016c
#define REG_HDMI_VENSPEC_INFO1 0x00000170
@@ -266,6 +334,7 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
#define REG_HDMI_DDC_HW_STATUS 0x0000021c
+#define HDMI_DDC_HW_STATUS_DONE 0x00000008
#define REG_HDMI_DDC_SPEED 0x00000220
#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
@@ -329,6 +398,15 @@ static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
}
#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
+#define REG_HDMI_HDCP_SHA_CTRL 0x0000023c
+
+#define REG_HDMI_HDCP_SHA_STATUS 0x00000240
+#define HDMI_HDCP_SHA_STATUS_BLOCK_DONE 0x00000001
+#define HDMI_HDCP_SHA_STATUS_COMP_DONE 0x00000010
+
+#define REG_HDMI_HDCP_SHA_DATA 0x00000244
+#define HDMI_HDCP_SHA_DATA_DONE 0x00000001
+
#define REG_HDMI_HPD_INT_STATUS 0x00000250
#define HDMI_HPD_INT_STATUS_INT 0x00000001
#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
@@ -359,6 +437,10 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
}
+#define REG_HDMI_HDCP_SW_UPPER_AKSV 0x00000284
+
+#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288
+
#define REG_HDMI_CEC_STATUS 0x00000298
#define REG_HDMI_CEC_INT 0x0000029c
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 6902ad6da710..a7a1d8267cf0 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -23,11 +23,8 @@ struct hdmi_bridge {
};
#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
-static void hdmi_bridge_destroy(struct drm_bridge *bridge)
+void hdmi_bridge_destroy(struct drm_bridge *bridge)
{
- struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
- drm_bridge_cleanup(bridge);
- kfree(hdmi_bridge);
}
static void power_on(struct drm_bridge *bridge)
@@ -200,7 +197,6 @@ static const struct drm_bridge_funcs hdmi_bridge_funcs = {
.disable = hdmi_bridge_disable,
.post_disable = hdmi_bridge_post_disable,
.mode_set = hdmi_bridge_mode_set,
- .destroy = hdmi_bridge_destroy,
};
@@ -211,7 +207,8 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
struct hdmi_bridge *hdmi_bridge;
int ret;
- hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL);
+ hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
+ sizeof(*hdmi_bridge), GFP_KERNEL);
if (!hdmi_bridge) {
ret = -ENOMEM;
goto fail;
@@ -220,8 +217,11 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
hdmi_bridge->hdmi = hdmi;
bridge = &hdmi_bridge->base;
+ bridge->funcs = &hdmi_bridge_funcs;
- drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs);
+ ret = drm_bridge_attach(hdmi->dev, bridge);
+ if (ret)
+ goto fail;
return bridge;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index b4e70e0e3cfa..b62cdb968614 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -386,7 +386,7 @@ hdmi_connector_best_encoder(struct drm_connector *connector)
}
static const struct drm_connector_funcs hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = hdmi_connector_destroy,
@@ -426,7 +426,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
- connector->interlace_allowed = 1;
+ connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
drm_connector_register(connector);
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 29bd796797de..43bb54a9afbf 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index a4a7f8c7122a..1d39174d91fb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -72,6 +73,18 @@ enum mdp4_cursor_format {
CURSOR_XRGB = 2,
};
+enum mdp4_frame_format {
+ FRAME_LINEAR = 0,
+ FRAME_TILE_ARGB_4X4 = 1,
+ FRAME_TILE_YCBCR_420 = 2,
+};
+
+enum mdp4_scale_unit {
+ SCALE_FIR = 0,
+ SCALE_MN_PHASE = 1,
+ SCALE_PIXEL_RPT = 2,
+};
+
enum mdp4_dma {
DMA_P = 0,
DMA_S = 1,
@@ -637,6 +650,8 @@ static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00
static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; }
+
static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
@@ -720,7 +735,25 @@ static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
}
#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000
+#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK;
+}
#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
+#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000
+#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000
+#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK;
+}
static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
@@ -751,6 +784,18 @@ static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
+#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c
+#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2
+static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val)
+{
+ return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK;
+}
+#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030
+#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4
+static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val)
+{
+ return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK;
+}
#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 3449213f1e76..73afa21822b4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -140,26 +140,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
kfree(mdp4_crtc);
}
-static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- bool enabled = (mode == DRM_MODE_DPMS_ON);
-
- DBG("%s: mode=%d", mdp4_crtc->name, mode);
-
- if (enabled != mdp4_crtc->enabled) {
- if (enabled) {
- mdp4_enable(mdp4_kms);
- mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
- } else {
- mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
- mdp4_disable(mdp4_kms);
- }
- mdp4_crtc->enabled = enabled;
- }
-}
-
static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -304,27 +284,38 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
}
-static void mdp4_crtc_prepare(struct drm_crtc *crtc)
+static void mdp4_crtc_disable(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
DBG("%s", mdp4_crtc->name);
- /* make sure we hold a ref to mdp clks while setting up mode: */
- drm_crtc_vblank_get(crtc);
- mdp4_enable(get_kms(crtc));
- mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (WARN_ON(!mdp4_crtc->enabled))
+ return;
+
+ mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
+ mdp4_disable(mdp4_kms);
+
+ mdp4_crtc->enabled = false;
}
-static void mdp4_crtc_commit(struct drm_crtc *crtc)
+static void mdp4_crtc_enable(struct drm_crtc *crtc)
{
- mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+ DBG("%s", mdp4_crtc->name);
+
+ if (WARN_ON(mdp4_crtc->enabled))
+ return;
+
+ mdp4_enable(mdp4_kms);
+ mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
+
crtc_flush(crtc);
- /* drop the ref to mdp clk's that we got in prepare: */
- mdp4_disable(get_kms(crtc));
- drm_crtc_vblank_put(crtc);
-}
-static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
-{
+ mdp4_crtc->enabled = true;
}
static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
@@ -508,14 +499,10 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
- .dpms = mdp4_crtc_dpms,
.mode_fixup = mdp4_crtc_mode_fixup,
.mode_set_nofb = mdp4_crtc_mode_set_nofb,
- .mode_set = drm_helper_crtc_mode_set,
- .mode_set_base = drm_helper_crtc_mode_set_base,
- .prepare = mdp4_crtc_prepare,
- .commit = mdp4_crtc_commit,
- .load_lut = mdp4_crtc_load_lut,
+ .disable = mdp4_crtc_disable,
+ .enable = mdp4_crtc_enable,
.atomic_check = mdp4_crtc_atomic_check,
.atomic_begin = mdp4_crtc_atomic_begin,
.atomic_flush = mdp4_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index c3878420180b..7896323b2631 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -94,61 +94,6 @@ static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
.destroy = mdp4_dtv_encoder_destroy,
};
-static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
- struct mdp4_kms *mdp4_kms = get_kms(encoder);
- bool enabled = (mode == DRM_MODE_DPMS_ON);
-
- DBG("mode=%d", mode);
-
- if (enabled == mdp4_dtv_encoder->enabled)
- return;
-
- if (enabled) {
- unsigned long pc = mdp4_dtv_encoder->pixclock;
- int ret;
-
- bs_set(mdp4_dtv_encoder, 1);
-
- DBG("setting src_clk=%lu", pc);
-
- ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
- if (ret)
- dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
- clk_prepare_enable(mdp4_dtv_encoder->src_clk);
- ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
- if (ret)
- dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
- ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
- if (ret)
- dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
-
- mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
- } else {
- mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
-
- /*
- * Wait for a vsync so we know the ENABLE=0 latched before
- * the (connector) source of the vsync's gets disabled,
- * otherwise we end up in a funny state if we re-enable
- * before the disable latches, which results that some of
- * the settings changes for the new modeset (like new
- * scanout buffer) don't latch properly..
- */
- mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
-
- clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
- clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
- clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
-
- bs_set(mdp4_dtv_encoder, 0);
- }
-
- mdp4_dtv_encoder->enabled = enabled;
-}
-
static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -221,28 +166,78 @@ static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
}
-static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
+static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
{
- mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (WARN_ON(!mdp4_dtv_encoder->enabled))
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
+
+ clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
+
+ bs_set(mdp4_dtv_encoder, 0);
+
+ mdp4_dtv_encoder->enabled = false;
}
-static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
+static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ unsigned long pc = mdp4_dtv_encoder->pixclock;
+ int ret;
+
+ if (WARN_ON(mdp4_dtv_encoder->enabled))
+ return;
+
mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_R_BPC(BPC8) |
MDP4_DMA_CONFIG_G_BPC(BPC8) |
MDP4_DMA_CONFIG_B_BPC(BPC8) |
MDP4_DMA_CONFIG_PACK(0x21));
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
- mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ bs_set(mdp4_dtv_encoder, 1);
+
+ DBG("setting src_clk=%lu", pc);
+
+ ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+ if (ret)
+ dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
+ clk_prepare_enable(mdp4_dtv_encoder->src_clk);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
+
+ mdp4_dtv_encoder->enabled = true;
}
static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
- .dpms = mdp4_dtv_encoder_dpms,
.mode_fixup = mdp4_dtv_encoder_mode_fixup,
.mode_set = mdp4_dtv_encoder_mode_set,
- .prepare = mdp4_dtv_encoder_prepare,
- .commit = mdp4_dtv_encoder_commit,
+ .enable = mdp4_dtv_encoder_enable,
+ .disable = mdp4_dtv_encoder_disable,
};
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index a62109e4ae0d..d847b9436194 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -125,6 +125,38 @@ out:
return ret;
}
+static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ int i, ncrtcs = state->dev->mode_config.num_crtc;
+
+ mdp4_enable(mdp4_kms);
+
+ /* see 119ecb7fd */
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ if (!crtc)
+ continue;
+ drm_crtc_vblank_get(crtc);
+ }
+}
+
+static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ int i, ncrtcs = state->dev->mode_config.num_crtc;
+
+ /* see 119ecb7fd */
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ if (!crtc)
+ continue;
+ drm_crtc_vblank_put(crtc);
+ }
+
+ mdp4_disable(mdp4_kms);
+}
+
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder)
{
@@ -161,6 +193,8 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp4_irq,
.enable_vblank = mdp4_enable_vblank,
.disable_vblank = mdp4_disable_vblank,
+ .prepare_commit = mdp4_prepare_commit,
+ .complete_commit = mdp4_complete_commit,
.get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
.preclose = mdp4_preclose,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index cbd77bc626d5..0a5c58bde7a9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -175,14 +175,25 @@ irqreturn_t mdp4_irq(struct msm_kms *kms);
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+static inline bool pipe_supports_yuv(enum mdp4_pipe pipe)
+{
+ switch (pipe) {
+ case VG1:
+ case VG2:
+ case VG3:
+ case VG4:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline
uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
uint32_t max_formats)
{
- /* TODO when we have YUV, we need to filter supported formats
- * based on pipe_id..
- */
- return mdp_get_formats(pixel_formats, max_formats);
+ return mdp_get_formats(pixel_formats, max_formats,
+ !pipe_supports_yuv(pipe_id));
}
void mdp4_plane_install_properties(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index 41f6436754fc..60ec8222c9f6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -259,77 +259,6 @@ static void setup_phy(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
}
-static void mdp4_lcdc_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
- to_mdp4_lcdc_encoder(encoder);
- struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel = mdp4_lcdc_encoder->panel;
- bool enabled = (mode == DRM_MODE_DPMS_ON);
- int i, ret;
-
- DBG("mode=%d", mode);
-
- if (enabled == mdp4_lcdc_encoder->enabled)
- return;
-
- if (enabled) {
- unsigned long pc = mdp4_lcdc_encoder->pixclock;
- int ret;
-
- bs_set(mdp4_lcdc_encoder, 1);
-
- for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
- ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
- if (ret)
- dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
- }
-
- DBG("setting lcdc_clk=%lu", pc);
- ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
- if (ret)
- dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
- ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
- if (ret)
- dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
-
- if (panel)
- drm_panel_enable(panel);
-
- setup_phy(encoder);
-
- mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
- } else {
- mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
-
- if (panel)
- drm_panel_disable(panel);
-
- /*
- * Wait for a vsync so we know the ENABLE=0 latched before
- * the (connector) source of the vsync's gets disabled,
- * otherwise we end up in a funny state if we re-enable
- * before the disable latches, which results that some of
- * the settings changes for the new modeset (like new
- * scanout buffer) don't latch properly..
- */
- mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
-
- clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
-
- for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
- ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
- if (ret)
- dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
- }
-
- bs_set(mdp4_lcdc_encoder, 0);
- }
-
- mdp4_lcdc_encoder->enabled = enabled;
-}
-
static bool mdp4_lcdc_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -403,13 +332,59 @@ static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0);
}
-static void mdp4_lcdc_encoder_prepare(struct drm_encoder *encoder)
+static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
{
- mdp4_lcdc_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ struct drm_panel *panel = mdp4_lcdc_encoder->panel;
+ int i, ret;
+
+ if (WARN_ON(!mdp4_lcdc_encoder->enabled))
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+
+ if (panel)
+ drm_panel_disable(panel);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
+
+ clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
+ ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
+ if (ret)
+ dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
+ }
+
+ bs_set(mdp4_lcdc_encoder, 0);
+
+ mdp4_lcdc_encoder->enabled = false;
}
-static void mdp4_lcdc_encoder_commit(struct drm_encoder *encoder)
+static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ unsigned long pc = mdp4_lcdc_encoder->pixclock;
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ struct drm_panel *panel = mdp4_lcdc_encoder->panel;
+ int i, ret;
+
+ if (WARN_ON(mdp4_lcdc_encoder->enabled))
+ return;
+
/* TODO: hard-coded for 18bpp: */
mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_R_BPC(BPC6) |
@@ -420,15 +395,38 @@ static void mdp4_lcdc_encoder_commit(struct drm_encoder *encoder)
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
- mdp4_lcdc_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ bs_set(mdp4_lcdc_encoder, 1);
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
+ ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
+ if (ret)
+ dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ }
+
+ DBG("setting lcdc_clk=%lu", pc);
+ ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
+ if (ret)
+ dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+
+ if (panel)
+ drm_panel_enable(panel);
+
+ setup_phy(encoder);
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
+
+ mdp4_lcdc_encoder->enabled = true;
}
static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
- .dpms = mdp4_lcdc_encoder_dpms,
.mode_fixup = mdp4_lcdc_encoder_mode_fixup,
.mode_set = mdp4_lcdc_encoder_mode_set,
- .prepare = mdp4_lcdc_encoder_prepare,
- .commit = mdp4_lcdc_encoder_commit,
+ .disable = mdp4_lcdc_encoder_disable,
+ .enable = mdp4_lcdc_encoder_enable,
};
long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 4ddc28e1275b..921185133d38 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -94,7 +94,7 @@ mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
}
static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = mdp4_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mdp4_lvds_connector_destroy,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 1e5ebe83647d..cde25009203a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -17,6 +17,8 @@
#include "mdp4_kms.h"
+#define DOWN_SCALE_MAX 8
+#define UP_SCALE_MAX 8
struct mdp4_plane {
struct drm_plane base;
@@ -136,10 +138,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
- uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0);
-
- DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name,
- iova, fb->pitches[0]);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -149,11 +147,45 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
- mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
+ msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
+ msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
+ msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
+ msm_framebuffer_iova(fb, mdp4_kms->id, 3));
plane->fb = fb;
}
+static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
+ enum mdp4_pipe pipe, struct csc_cfg *csc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i),
+ csc->matrix[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i),
+ csc->pre_bias[i]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i),
+ csc->post_bias[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i),
+ csc->pre_clamp[i]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i),
+ csc->post_clamp[i]);
+ }
+}
+
#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
static int mdp4_plane_mode_set(struct drm_plane *plane,
@@ -163,6 +195,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
+ struct drm_device *dev = plane->dev;
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
@@ -186,14 +219,59 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+ format = to_mdp_format(msm_framebuffer_format(fb));
+
+ if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
+ dev_err(dev->dev, "Width down scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
+ dev_err(dev->dev, "Height down scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (crtc_w > (src_w * UP_SCALE_MAX)) {
+ dev_err(dev->dev, "Width up scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (crtc_h > (src_h * UP_SCALE_MAX)) {
+ dev_err(dev->dev, "Height up scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
if (src_w != crtc_w) {
+ uint32_t sel_unit = SCALE_FIR;
op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
- /* TODO calc phasex_step */
+
+ if (MDP_FORMAT_IS_YUV(format)) {
+ if (crtc_w > src_w)
+ sel_unit = SCALE_PIXEL_RPT;
+ else if (crtc_w <= (src_w / 4))
+ sel_unit = SCALE_MN_PHASE;
+
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit);
+ phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
+ src_w, crtc_w);
+ }
}
if (src_h != crtc_h) {
+ uint32_t sel_unit = SCALE_FIR;
op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
- /* TODO calc phasey_step */
+
+ if (MDP_FORMAT_IS_YUV(format)) {
+
+ if (crtc_h > src_h)
+ sel_unit = SCALE_PIXEL_RPT;
+ else if (crtc_h <= (src_h / 4))
+ sel_unit = SCALE_MN_PHASE;
+
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit);
+ phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
+ src_h, crtc_h);
+ }
}
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
@@ -214,8 +292,6 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
mdp4_plane_set_scanout(plane, fb);
- format = to_mdp_format(msm_framebuffer_format(fb));
-
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
@@ -224,6 +300,8 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) |
+ MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) |
COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
@@ -232,6 +310,14 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+ if (MDP_FORMAT_IS_YUV(format)) {
+ struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB);
+
+ op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR;
+ op_mode |= MDP4_PIPE_OP_MODE_CSC_EN;
+ mdp4_write_csc_config(mdp4_kms, pipe, csc);
+ }
+
mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index e87ef5512cb0..09b4a25eb553 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
-Copyright (C) 2013-2014 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -88,13 +89,6 @@ enum mdp5_pack_3d {
PACK_3D_COL_INT = 3,
};
-enum mdp5_chroma_samp_type {
- CHROMA_RGB = 0,
- CHROMA_H2V1 = 1,
- CHROMA_H1V2 = 2,
- CHROMA_420 = 3,
-};
-
enum mdp5_scale_filter {
SCALE_FILTER_NEAREST = 0,
SCALE_FILTER_BIL = 1,
@@ -135,6 +129,17 @@ enum mdp5_client_id {
CID_MAX = 23,
};
+enum mdp5_cursor_format {
+ CURSOR_FMT_ARGB8888 = 0,
+ CURSOR_FMT_ARGB1555 = 2,
+ CURSOR_FMT_ARGB4444 = 4,
+};
+
+enum mdp5_cursor_alpha {
+ CURSOR_ALPHA_CONST = 0,
+ CURSOR_ALPHA_PER_PIXEL = 2,
+};
+
enum mdp5_igc_type {
IGC_VIG = 0,
IGC_RGB = 1,
@@ -142,6 +147,11 @@ enum mdp5_igc_type {
IGC_DSPP = 3,
};
+enum mdp5_data_format {
+ DATA_FORMAT_RGB = 0,
+ DATA_FORMAT_YUV = 1,
+};
+
#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001
#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002
#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004
@@ -463,12 +473,143 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
}
static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
+static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); }
+#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000
+#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19
+static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val)
+{
+ return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK;
+}
+#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000
+#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18
+static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val)
+{
+ return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK;
+}
+#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000
+
static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); }
static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); }
static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); }
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK;
+}
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK;
+}
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK;
+}
+
static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
@@ -618,15 +759,15 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
}
#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000
#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val)
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_sspp_fetch_type val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val)
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
}
@@ -753,6 +894,10 @@ static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { ret
static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); }
+static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); }
+
static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); }
static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); }
@@ -839,20 +984,88 @@ static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i
static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK;
+}
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK;
+}
static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK;
+}
+#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000
+#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK;
+}
static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK;
+}
+#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000
+#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK;
+}
static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK;
+}
static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007
+#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val)
+{
+ return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK;
+}
static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); }
static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK;
+}
+#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000
+#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK;
+}
static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1
+static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val)
+{
+ return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK;
+}
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008
static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index f021f960a8a2..46fac545dc2b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -24,6 +24,9 @@
#include "drm_crtc_helper.h"
#include "drm_flip_work.h"
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
struct mdp5_crtc {
@@ -47,8 +50,21 @@ struct mdp5_crtc {
#define PENDING_FLIP 0x2
atomic_t pending;
+ /* for unref'ing cursor bo's after scanout completes: */
+ struct drm_flip_work unref_cursor_work;
+
struct mdp_irq vblank;
struct mdp_irq err;
+
+ struct {
+ /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
+ spinlock_t lock;
+
+ /* current cursor being scanned out: */
+ struct drm_gem_object *scanout_bo;
+ uint32_t width;
+ uint32_t height;
+ } cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -129,37 +145,26 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
}
}
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(work, struct mdp5_crtc, unref_cursor_work);
+ struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
+
+ msm_gem_put_iova(val, mdp5_kms->id);
+ drm_gem_object_unreference_unlocked(val);
+}
+
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
kfree(mdp5_crtc);
}
-static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- struct mdp5_kms *mdp5_kms = get_kms(crtc);
- bool enabled = (mode == DRM_MODE_DPMS_ON);
-
- DBG("%s: mode=%d", mdp5_crtc->name, mode);
-
- if (enabled != mdp5_crtc->enabled) {
- if (enabled) {
- mdp5_enable(mdp5_kms);
- mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
- } else {
- /* set STAGE_UNUSED for all layers */
- mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
- mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
- mdp5_disable(mdp5_kms);
- }
- mdp5_crtc->enabled = enabled;
- }
-}
-
static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -256,27 +261,41 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
-static void mdp5_crtc_prepare(struct drm_crtc *crtc)
+static void mdp5_crtc_disable(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+
DBG("%s", mdp5_crtc->name);
- /* make sure we hold a ref to mdp clks while setting up mode: */
- mdp5_enable(get_kms(crtc));
- mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (WARN_ON(!mdp5_crtc->enabled))
+ return;
+
+ /* set STAGE_UNUSED for all layers */
+ mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
+
+ mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
+ mdp5_disable(mdp5_kms);
+
+ mdp5_crtc->enabled = false;
}
-static void mdp5_crtc_commit(struct drm_crtc *crtc)
+static void mdp5_crtc_enable(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+
DBG("%s", mdp5_crtc->name);
- mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
+ if (WARN_ON(mdp5_crtc->enabled))
+ return;
+
+ mdp5_enable(mdp5_kms);
+ mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
+
crtc_flush_all(crtc);
- /* drop the ref to mdp clk's that we got in prepare: */
- mdp5_disable(get_kms(crtc));
-}
-static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
-{
+ mdp5_crtc->enabled = true;
}
struct plane_state {
@@ -384,6 +403,132 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
return -EINVAL;
}
+static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct drm_gem_object *cursor_bo, *old_bo;
+ uint32_t blendcfg, cursor_addr, stride;
+ int ret, bpp, lm;
+ unsigned int depth;
+ enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
+ uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ unsigned long flags;
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ return -EINVAL;
+ }
+
+ if (NULL == mdp5_crtc->ctl)
+ return -EINVAL;
+
+ if (!handle) {
+ DBG("Cursor off");
+ return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false);
+ }
+
+ cursor_bo = drm_gem_object_lookup(dev, file, handle);
+ if (!cursor_bo)
+ return -ENOENT;
+
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ if (ret)
+ return -EINVAL;
+
+ lm = mdp5_crtc->lm;
+ drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
+ stride = width * (bpp >> 3);
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ old_bo = mdp5_crtc->cursor.scanout_bo;
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
+ MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
+ MDP5_LM_CURSOR_SIZE_ROI_H(height) |
+ MDP5_LM_CURSOR_SIZE_ROI_W(width));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
+
+
+ blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
+ blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
+ blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
+
+ mdp5_crtc->cursor.scanout_bo = cursor_bo;
+ mdp5_crtc->cursor.width = width;
+ mdp5_crtc->cursor.height = height;
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+ ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
+ if (ret)
+ goto end;
+
+ flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
+ crtc_flush(crtc, flush_mask);
+
+end:
+ if (old_bo) {
+ drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
+ /* enable vblank to complete cursor work: */
+ request_pending(crtc, PENDING_CURSOR);
+ }
+ return ret;
+}
+
+static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ uint32_t xres = crtc->mode.hdisplay;
+ uint32_t yres = crtc->mode.vdisplay;
+ uint32_t roi_w;
+ uint32_t roi_h;
+ unsigned long flags;
+
+ x = (x > 0) ? x : 0;
+ y = (y > 0) ? y : 0;
+
+ /*
+ * Cursor Region Of Interest (ROI) is a plane read from cursor
+ * buffer to render. The ROI region is determined by the visiblity of
+ * the cursor point. In the default Cursor image the cursor point will
+ * be at the top left of the cursor image, unless it is specified
+ * otherwise using hotspot feature.
+ *
+ * If the cursor point reaches the right (xres - x < cursor.width) or
+ * bottom (yres - y < cursor.height) boundary of the screen, then ROI
+ * width and ROI height need to be evaluated to crop the cursor image
+ * accordingly.
+ * (xres-x) will be new cursor width when x > (xres - cursor.width)
+ * (yres-y) will be new cursor height when y > (yres - cursor.height)
+ */
+ roi_w = min(mdp5_crtc->cursor.width, xres - x);
+ roi_h = min(mdp5_crtc->cursor.height, yres - y);
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
+ MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
+ MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
+ MDP5_LM_CURSOR_START_XY_Y_START(y) |
+ MDP5_LM_CURSOR_START_XY_X_START(x));
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc, flush_mask);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
@@ -392,17 +537,15 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .cursor_set = mdp5_crtc_cursor_set,
+ .cursor_move = mdp5_crtc_cursor_move,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
- .dpms = mdp5_crtc_dpms,
.mode_fixup = mdp5_crtc_mode_fixup,
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
- .mode_set = drm_helper_crtc_mode_set,
- .mode_set_base = drm_helper_crtc_mode_set_base,
- .prepare = mdp5_crtc_prepare,
- .commit = mdp5_crtc_commit,
- .load_lut = mdp5_crtc_load_lut,
+ .prepare = mdp5_crtc_disable,
+ .commit = mdp5_crtc_enable,
.atomic_check = mdp5_crtc_atomic_check,
.atomic_begin = mdp5_crtc_atomic_begin,
.atomic_flush = mdp5_crtc_atomic_flush,
@@ -412,6 +555,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
@@ -421,6 +565,9 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
}
+
+ if (pending & PENDING_CURSOR)
+ drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
}
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -520,6 +667,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->lm = GET_LM_ID(id);
spin_lock_init(&mdp5_crtc->lm_lock);
+ spin_lock_init(&mdp5_crtc->cursor.lock);
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -528,6 +676,10 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
pipe2name(mdp5_plane_pipe(plane)), id);
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
+
+ drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
+ "unref cursor", unref_cursor_worker);
+
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
plane->crtc = crtc;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index dea4505ac963..151129032d16 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -95,7 +95,7 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
}
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf)
{
unsigned long flags;
static const enum mdp5_intfnum intfnum[] = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 1018519b6af2..ad48788efeea 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -34,7 +34,7 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 0254bfdeb92f..d6a14bb99988 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -110,45 +111,6 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = {
.destroy = mdp5_encoder_destroy,
};
-static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf;
- bool enabled = (mode == DRM_MODE_DPMS_ON);
- unsigned long flags;
-
- DBG("mode=%d", mode);
-
- if (enabled == mdp5_encoder->enabled)
- return;
-
- if (enabled) {
- bs_set(mdp5_encoder, 1);
- spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
- spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- } else {
- spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
- spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
-
- /*
- * Wait for a vsync so we know the ENABLE=0 latched before
- * the (connector) source of the vsync's gets disabled,
- * otherwise we end up in a funny state if we re-enable
- * before the disable latches, which results that some of
- * the settings changes for the new modeset (like new
- * scanout buffer) don't latch properly..
- */
- mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
-
- bs_set(mdp5_encoder, 0);
- }
-
- mdp5_encoder->enabled = enabled;
-}
-
static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -162,11 +124,13 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
int intf = mdp5_encoder->intf;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
- uint32_t format;
+ uint32_t format = 0x2100;
unsigned long flags;
mode = adjusted_mode;
@@ -188,7 +152,28 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
/* probably need to get DATA_EN polarity from panel.. */
dtv_hsync_skew = 0; /* get this from panel? */
- format = 0x213f; /* get this from panel? */
+
+ /* Get color format from panel, default is 8bpc */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ switch (connector->display_info.bpc) {
+ case 4:
+ format |= 0;
+ break;
+ case 5:
+ format |= 0x15;
+ break;
+ case 6:
+ format |= 0x2A;
+ break;
+ case 8:
+ default:
+ format |= 0x3F;
+ break;
+ }
+ break;
+ }
+ }
hsync_start_x = (mode->htotal - mode->hsync_start);
hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
@@ -198,6 +183,16 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ if (mdp5_encoder->intf_id == INTF_eDP) {
+ display_v_start += mode->htotal - mode->hsync_start;
+ display_v_end -= mode->hsync_start - mode->hdisplay;
+ }
+
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
@@ -225,25 +220,61 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
}
-static void mdp5_encoder_prepare(struct drm_encoder *encoder)
+static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
- mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf;
+ unsigned long flags;
+
+ if (WARN_ON(!mdp5_encoder->enabled))
+ return;
+
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
+
+ bs_set(mdp5_encoder, 0);
+
+ mdp5_encoder->enabled = false;
}
-static void mdp5_encoder_commit(struct drm_encoder *encoder)
+static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf;
+ unsigned long flags;
+
+ if (WARN_ON(mdp5_encoder->enabled))
+ return;
+
mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
mdp5_encoder->intf_id);
- mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ bs_set(mdp5_encoder, 1);
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ mdp5_encoder->enabled = false;
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
- .dpms = mdp5_encoder_dpms,
.mode_fixup = mdp5_encoder_mode_fixup,
.mode_set = mdp5_encoder_mode_set,
- .prepare = mdp5_encoder_prepare,
- .commit = mdp5_encoder_commit,
+ .prepare = mdp5_encoder_disable,
+ .commit = mdp5_encoder_enable,
};
/* initialize encoder */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 9f01a4f21af2..92b61db5754c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -68,6 +68,18 @@ static int mdp5_hw_init(struct msm_kms *kms)
return 0;
}
+static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_enable(mdp5_kms);
+}
+
+static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_disable(mdp5_kms);
+}
+
static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder)
{
@@ -115,6 +127,8 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
+ .prepare_commit = mdp5_prepare_commit,
+ .complete_commit = mdp5_complete_commit,
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.preclose = mdp5_preclose,
@@ -208,19 +222,18 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
}
}
- /* Construct encoder for HDMI: */
- encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
+ if (priv->hdmi) {
+ /* Construct encoder for HDMI: */
+ encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
- priv->encoders[priv->num_encoders++] = encoder;
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
+ priv->encoders[priv->num_encoders++] = encoder;
- /* Construct bridge/connector for HDMI: */
- if (priv->hdmi) {
ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
@@ -228,6 +241,27 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
}
}
+ if (priv->edp) {
+ /* Construct encoder for eDP: */
+ encoder = mdp5_encoder_init(dev, 0, INTF_eDP);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct eDP encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ /* Construct bridge/connector for eDP: */
+ ret = msm_edp_modeset_init(priv->edp, dev, encoder);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize eDP: %d\n",
+ ret);
+ goto fail;
+ }
+ }
+
return 0;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index dd69c77c0d64..49d011e8835b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -165,14 +165,25 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
+static inline bool pipe_supports_yuv(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0:
+ case SSPP_VIG1:
+ case SSPP_VIG2:
+ case SSPP_VIG3:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline
uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
uint32_t max_formats)
{
- /* TODO when we have YUV, we need to filter supported formats
- * based on pipe id..
- */
- return mdp_get_formats(pixel_formats, max_formats);
+ return mdp_get_formats(pixel_formats, max_formats,
+ !pipe_supports_yuv(pipe));
}
void mdp5_plane_install_properties(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 26e5fdea6594..05cf9ab2a876 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -18,8 +18,6 @@
#include "mdp5_kms.h"
-#define MAX_PLANE 4
-
struct mdp5_plane {
struct drm_plane base;
const char *name;
@@ -113,6 +111,7 @@ static void mdp5_plane_reset(struct drm_plane *plane)
} else {
mdp5_state->zpos = 1 + drm_plane_index(plane);
}
+ mdp5_state->base.plane = plane;
plane->state = &mdp5_state->base;
}
@@ -277,6 +276,155 @@ static void set_scanout_locked(struct drm_plane *plane,
plane->fb = fb;
}
+/* Note: mdp5_plane->pipe_lock must be locked */
+static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
+{
+ uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
+ ~MDP5_PIPE_OP_MODE_CSC_1_EN;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
+}
+
+/* Note: mdp5_plane->pipe_lock must be locked */
+static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
+ struct csc_cfg *csc)
+{
+ uint32_t i, mode = 0; /* RGB, no CSC */
+ uint32_t *matrix;
+
+ if (unlikely(!csc))
+ return;
+
+ if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
+ mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
+ if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
+ mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
+ mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
+
+ matrix = csc->matrix;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
+
+ for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
+ uint32_t *pre_clamp = csc->pre_clamp;
+ uint32_t *post_clamp = csc->post_clamp;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
+ MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
+ MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
+ MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
+ MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
+ MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
+ MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
+ }
+}
+
+#define PHASE_STEP_SHIFT 21
+#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */
+
+static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
+{
+ uint32_t unit;
+
+ if (src == 0 || dst == 0)
+ return -EINVAL;
+
+ /*
+ * PHASE_STEP_X/Y is coded on 26 bits (25:0),
+ * where 2^21 represents the unity "1" in fixed-point hardware design.
+ * This leaves 5 bits for the integer part (downscale case):
+ * -> maximum downscale ratio = 0b1_1111 = 31
+ */
+ if (src > (dst * DOWN_SCALE_RATIO_MAX))
+ return -EOVERFLOW;
+
+ unit = 1 << PHASE_STEP_SHIFT;
+ *out_phase = mult_frac(unit, src, dst);
+
+ return 0;
+}
+
+static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+ uint32_t phasex_steps[2])
+{
+ uint32_t phasex_step;
+ unsigned int hsub;
+ int ret;
+
+ ret = calc_phase_step(src, dest, &phasex_step);
+ if (ret)
+ return ret;
+
+ hsub = drm_format_horz_chroma_subsampling(pixel_format);
+
+ phasex_steps[0] = phasex_step;
+ phasex_steps[1] = phasex_step / hsub;
+
+ return 0;
+}
+
+static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+ uint32_t phasey_steps[2])
+{
+ uint32_t phasey_step;
+ unsigned int vsub;
+ int ret;
+
+ ret = calc_phase_step(src, dest, &phasey_step);
+ if (ret)
+ return ret;
+
+ vsub = drm_format_vert_chroma_subsampling(pixel_format);
+
+ phasey_steps[0] = phasey_step;
+ phasey_steps[1] = phasey_step / vsub;
+
+ return 0;
+}
+
+static uint32_t get_scalex_config(uint32_t src, uint32_t dest)
+{
+ uint32_t filter;
+
+ filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+
+ return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter);
+}
+
+static uint32_t get_scaley_config(uint32_t src, uint32_t dest)
+{
+ uint32_t filter;
+
+ filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+
+ return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter);
+}
+
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
@@ -286,11 +434,14 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
enum mdp5_pipe pipe = mdp5_plane->pipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
- uint32_t phasex_step = 0, phasey_step = 0;
+ /* below array -> index 0: comp 0/3 ; index 1: comp 1/2 */
+ uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,};
uint32_t hdecm = 0, vdecm = 0;
+ uint32_t pix_format;
unsigned long flags;
int ret;
@@ -300,6 +451,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
if (WARN_ON(nplanes > pipe2nclients(pipe)))
return -EINVAL;
+ format = to_mdp_format(msm_framebuffer_format(fb));
+ pix_format = format->base.pixel_format;
+
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
@@ -324,14 +478,28 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
*/
mdp5_smp_configure(mdp5_kms->smp, pipe);
- if (src_w != crtc_w) {
- config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
- /* TODO calc phasex_step, hdecm */
+ /* SCALE is used to both scale and up-sample chroma components */
+
+ if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) {
+ /* TODO calc hdecm */
+ ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step);
+ if (ret) {
+ dev_err(dev, "X scaling (%d -> %d) failed: %d\n",
+ src_w, crtc_w, ret);
+ return ret;
+ }
+ config |= get_scalex_config(src_w, crtc_w);
}
- if (src_h != crtc_h) {
- config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN;
- /* TODO calc phasey_step, vdecm */
+ if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) {
+ /* TODO calc vdecm */
+ ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step);
+ if (ret) {
+ dev_err(dev, "Y scaling (%d -> %d) failed: %d\n",
+ src_h, crtc_h, ret);
+ return ret;
+ }
+ config |= get_scaley_config(src_h, crtc_h);
}
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
@@ -356,8 +524,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_OUT_XY_X(crtc_x) |
MDP5_PIPE_OUT_XY_Y(crtc_y));
- format = to_mdp_format(msm_framebuffer_format(fb));
-
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
@@ -367,8 +533,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
- MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) |
- MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB));
+ MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
@@ -382,18 +548,24 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
/* not using secure mode: */
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+ phasex_step[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+ phasey_step[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+ phasex_step[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+ phasey_step[1]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
MDP5_PIPE_DECIMATION_VERT(vdecm) |
MDP5_PIPE_DECIMATION_HORZ(hdecm));
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
- MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) |
- MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) |
- MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
+
+ if (MDP_FORMAT_IS_YUV(format))
+ csc_enable(mdp5_kms, pipe,
+ mdp_get_default_csc_cfg(CSC_YUV2RGB));
+ else
+ csc_disable(mdp5_kms, pipe);
set_scanout_locked(plane, fb);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index bf551885e019..1f795af89680 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -119,9 +119,10 @@ static int smp_request_block(struct mdp5_smp *smp,
spin_lock_irqsave(&smp->state_lock, flags);
- nblks -= reserved;
- if (reserved)
+ if (reserved) {
+ nblks = max(0, nblks - reserved);
DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
+ }
avail = cnt - bitmap_weight(smp->state, cnt);
if (nblks > avail) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 64c1afd6030a..a1d35f162c7f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -44,6 +45,19 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+enum mdp_chroma_samp_type {
+ CHROMA_RGB = 0,
+ CHROMA_H2V1 = 1,
+ CHROMA_H1V2 = 2,
+ CHROMA_420 = 3,
+};
+
+enum mdp_sspp_fetch_type {
+ MDP_PLANE_INTERLEAVED = 0,
+ MDP_PLANE_PLANAR = 1,
+ MDP_PLANE_PSEUDO_PLANAR = 2,
+};
+
enum mdp_mixer_stage_id {
STAGE_UNUSED = 0,
STAGE_BASE = 1,
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index e0a6ffbe6ab4..f683433b6727 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -19,7 +20,58 @@
#include "msm_drv.h"
#include "mdp_kms.h"
-#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
+static struct csc_cfg csc_convert[CSC_MAX] = {
+ [CSC_RGB2RGB] = {
+ .type = CSC_RGB2RGB,
+ .matrix = {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200
+ },
+ .pre_bias = { 0x0, 0x0, 0x0 },
+ .post_bias = { 0x0, 0x0, 0x0 },
+ .pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff },
+ .post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff },
+ },
+ [CSC_YUV2RGB] = {
+ .type = CSC_YUV2RGB,
+ .matrix = {
+ 0x0254, 0x0000, 0x0331,
+ 0x0254, 0xff37, 0xfe60,
+ 0x0254, 0x0409, 0x0000
+ },
+ .pre_bias = { 0xfff0, 0xff80, 0xff80 },
+ .post_bias = { 0x00, 0x00, 0x00 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ },
+ [CSC_RGB2YUV] = {
+ .type = CSC_RGB2YUV,
+ .matrix = {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ .pre_bias = { 0x00, 0x00, 0x00 },
+ .post_bias = { 0x10, 0x80, 0x80 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 },
+ },
+ [CSC_YUV2YUV] = {
+ .type = CSC_YUV2YUV,
+ .matrix = {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200
+ },
+ .pre_bias = { 0x00, 0x00, 0x00 },
+ .post_bias = { 0x00, 0x00, 0x00 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ },
+};
+
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \
.base = { .pixel_format = DRM_FORMAT_ ## name }, \
.bpc_a = BPC ## a ## A, \
.bpc_r = BPC ## r, \
@@ -30,21 +82,46 @@
.unpack_tight = tight, \
.cpp = c, \
.unpack_count = cnt, \
- }
+ .fetch_type = fp, \
+ .chroma_sample = cs \
+}
#define BPC0A 0
+/*
+ * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking
+ * mdp_get_rgb_formats()'s implementation.
+ */
static const struct mdp_format formats[] = {
- /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
- FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
- FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
- FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3),
- FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3),
- FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3),
- FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
+ /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
+ FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+
+ /* --- RGB formats above / YUV formats below this line --- */
+
+ FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+ FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
};
-uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats)
+/*
+ * Note:
+ * @rgb_only must be set to true, when requesting
+ * supported formats for RGB pipes.
+ */
+uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
+ bool rgb_only)
{
uint32_t i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
@@ -53,6 +130,9 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats)
if (i == max_formats)
break;
+ if (rgb_only && MDP_FORMAT_IS_YUV(f))
+ break;
+
pixel_formats[i] = f->base.pixel_format;
}
@@ -69,3 +149,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
}
return NULL;
}
+
+struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type)
+{
+ if (unlikely(WARN_ON(type >= CSC_MAX)))
+ return NULL;
+
+ return &csc_convert[type];
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 2a731722d840..1988c243f437 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -34,7 +34,7 @@ static void update_irq(struct mdp_kms *mdp_kms)
struct mdp_irq *irq;
uint32_t irqmask = mdp_kms->vblank_mask;
- BUG_ON(!spin_is_locked(&list_lock));
+ assert_spin_locked(&list_lock);
list_for_each_entry(irq, &mdp_kms->irq_list, node)
irqmask |= irq->irqmask;
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index b268ce95d394..5ae4039d68e4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -88,10 +88,32 @@ struct mdp_format {
uint8_t unpack[4];
bool alpha_enable, unpack_tight;
uint8_t cpp, unpack_count;
+ enum mdp_sspp_fetch_type fetch_type;
+ enum mdp_chroma_samp_type chroma_sample;
};
#define to_mdp_format(x) container_of(x, struct mdp_format, base)
+#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB)
-uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats);
+uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+enum csc_type {
+ CSC_RGB2RGB = 0,
+ CSC_YUV2RGB,
+ CSC_RGB2YUV,
+ CSC_YUV2YUV,
+ CSC_MAX
+};
+
+struct csc_cfg {
+ enum csc_type type;
+ uint32_t matrix[9];
+ uint32_t pre_bias[3];
+ uint32_t post_bias[3];
+ uint32_t pre_clamp[6];
+ uint32_t post_clamp[6];
+};
+
+struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type);
+
#endif /* __MDP_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 191968256c58..871aa2108dc6 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -20,6 +20,7 @@
#include "msm_gem.h"
struct msm_commit {
+ struct drm_device *dev;
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
@@ -58,14 +59,16 @@ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
spin_unlock(&priv->pending_crtcs_event.lock);
}
-static struct msm_commit *new_commit(struct drm_atomic_state *state)
+static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return NULL;
+ c->dev = state->dev;
c->state = state;
+
/* TODO we might need a way to indicate to run the cb on a
* different wq so wait_for_vblanks() doesn't block retiring
* bo's..
@@ -75,6 +78,12 @@ static struct msm_commit *new_commit(struct drm_atomic_state *state)
return c;
}
+static void commit_destroy(struct msm_commit *c)
+{
+ end_atomic(c->dev->dev_private, c->crtc_mask);
+ kfree(c);
+}
+
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
@@ -82,6 +91,10 @@ static void complete_commit(struct msm_commit *c)
{
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_pre_planes(dev, state);
@@ -106,11 +119,11 @@ static void complete_commit(struct msm_commit *c)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ kms->funcs->complete_commit(kms, state);
- end_atomic(dev->dev_private, c->crtc_mask);
+ drm_atomic_state_free(state);
- kfree(c);
+ commit_destroy(c);
}
static void fence_cb(struct msm_fence_cb *cb)
@@ -127,6 +140,26 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
}
+int msm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ /*
+ * msm ->atomic_check can update ->mode_changed for pixel format
+ * changes, hence must be run before we check the modeset changes.
+ */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
@@ -145,6 +178,7 @@ int msm_atomic_commit(struct drm_device *dev,
{
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
+ struct timespec timeout;
struct msm_commit *c;
int i, ret;
@@ -152,7 +186,7 @@ int msm_atomic_commit(struct drm_device *dev,
if (ret)
return ret;
- c = new_commit(state);
+ c = commit_init(state);
if (!c)
return -ENOMEM;
@@ -217,10 +251,12 @@ int msm_atomic_commit(struct drm_device *dev,
return 0;
}
- ret = msm_wait_fence_interruptable(dev, c->fence, NULL);
+ jiffies_to_timespec(jiffies + msecs_to_jiffies(1000), &timeout);
+
+ ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
if (ret) {
WARN_ON(ret); // TODO unswap state back? or??
- kfree(c);
+ commit_destroy(c);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9a61546a0b05..a4269119f9ea 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -29,7 +29,7 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = msm_atomic_check,
.atomic_commit = msm_atomic_commit,
};
@@ -54,6 +54,12 @@ module_param(reglog, bool, 0600);
#define reglog 0
#endif
+#ifdef CONFIG_DRM_MSM_FBDEV
+static bool fbdev = true;
+MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
+module_param(fbdev, bool, 0600);
+#endif
+
static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
module_param(vram, charp, 0);
@@ -300,7 +306,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_reset(dev);
#ifdef CONFIG_DRM_MSM_FBDEV
- priv->fbdev = msm_fbdev_init(dev);
+ if (fbdev)
+ priv->fbdev = msm_fbdev_init(dev);
#endif
ret = msm_debugfs_late_init(dev);
@@ -1023,6 +1030,7 @@ static struct platform_driver msm_platform_driver = {
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_edp_register();
hdmi_register();
adreno_register();
return platform_driver_register(&msm_platform_driver);
@@ -1034,6 +1042,7 @@ static void __exit msm_drm_unregister(void)
platform_driver_unregister(&msm_platform_driver);
hdmi_unregister();
adreno_unregister();
+ msm_edp_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b69ef2d5a26c..9e8d441b61c3 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -76,6 +76,12 @@ struct msm_drm_private {
*/
struct hdmi *hdmi;
+ /* eDP is for mdp5 only, but kms has not been created
+ * when edp_bind() and edp_init() are called. Here is the only
+ * place to keep the edp instance.
+ */
+ struct msm_edp *edp;
+
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
@@ -148,6 +154,8 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
+int msm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
@@ -222,6 +230,12 @@ int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
void __init hdmi_register(void);
void __exit hdmi_unregister(void);
+struct msm_edp;
+void __init msm_edp_register(void);
+void __exit msm_edp_unregister(void);
+int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
+ struct drm_encoder *encoder);
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 84dec161d836..6b573e612f27 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -24,7 +24,7 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
- struct drm_gem_object *planes[3];
+ struct drm_gem_object *planes[MAX_PLANE];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
@@ -122,7 +122,7 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id);
+ return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 1f3af13ccede..df60f65728ff 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -241,17 +241,20 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
goto fail;
}
- drm_fb_helper_single_add_all_connectors(helper);
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret)
+ goto fini;
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- drm_fb_helper_initial_config(helper, 32);
+ ret = drm_fb_helper_initial_config(helper, 32);
+ if (ret)
+ goto fini;
priv->fbdev = helper;
return helper;
+fini:
+ drm_fb_helper_fini(helper);
fail:
kfree(fbdev);
return NULL;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 06437745bc2c..3a78cb48662b 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -23,6 +23,8 @@
#include "msm_drv.h"
+#define MAX_PLANE 4
+
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
* implementation is loaded at runtime. The kms module is responsible
@@ -38,6 +40,9 @@ struct msm_kms_funcs {
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* modeset, bracketing atomic_commit(): */
+ void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
/* misc: */
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
new file mode 100644
index 000000000000..2b765663c1a3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -0,0 +1,66 @@
+ccflags-y := -Iinclude/drm
+ccflags-y += -I$(src)/include
+ccflags-y += -I$(src)/include/nvkm
+ccflags-y += -I$(src)/nvkm
+ccflags-y += -I$(src)
+
+# NVKM - HW resource manager
+#- code also used by various userspace tools/tests
+include $(src)/nvif/Kbuild
+nouveau-y := $(nvif-y)
+
+# NVIF - NVKM interface library (NVKM user interface also defined here)
+#- code also used by various userspace tools/tests
+include $(src)/nvkm/Kbuild
+nouveau-y += $(nvkm-y)
+
+# DRM - general
+ifdef CONFIG_X86
+nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+endif
+nouveau-y += nouveau_agp.o
+nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
+nouveau-y += nouveau_drm.o
+nouveau-y += nouveau_hwmon.o
+nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-y += nouveau_nvif.o
+nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
+nouveau-y += nouveau_sysfs.o
+nouveau-y += nouveau_usif.o # userspace <-> nvif
+nouveau-y += nouveau_vga.o
+
+# DRM - memory management
+nouveau-y += nouveau_bo.o
+nouveau-y += nouveau_gem.o
+nouveau-y += nouveau_prime.o
+nouveau-y += nouveau_sgdma.o
+nouveau-y += nouveau_ttm.o
+
+# DRM - modesetting
+nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-y += nouveau_connector.o
+nouveau-y += nouveau_display.o
+nouveau-y += nv50_display.o
+nouveau-y += nouveau_dp.o
+nouveau-y += nouveau_fbcon.o
+nouveau-y += nv04_fbcon.o
+nouveau-y += nv50_fbcon.o
+nouveau-y += nvc0_fbcon.o
+
+# DRM - command submission
+nouveau-y += nouveau_abi16.o
+nouveau-y += nouveau_chan.o
+nouveau-y += nouveau_dma.o
+nouveau-y += nouveau_fence.o
+nouveau-y += nv04_fence.o
+nouveau-y += nv10_fence.o
+nouveau-y += nv17_fence.o
+nouveau-y += nv50_fence.o
+nouveau-y += nv84_fence.o
+nouveau-y += nvc0_fence.o
+
+# DRM - prehistoric modesetting (NV04-G7x)
+nouveau-y += nouveau_bios.o
+include $(src)/dispnv04/Kbuild
+
+obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 40afc69a3778..5ab13e7939db 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -26,7 +26,7 @@ config DRM_NOUVEAU
Choose this option for open-source NVIDIA support.
config NOUVEAU_PLATFORM_DRIVER
- tristate "Nouveau (NVIDIA) SoC GPUs"
+ bool "Nouveau (NVIDIA) SoC GPUs"
depends on DRM_NOUVEAU && ARCH_TEGRA
default y
help
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
deleted file mode 100644
index 6461e3565afe..000000000000
--- a/drivers/gpu/drm/nouveau/Makefile
+++ /dev/null
@@ -1,400 +0,0 @@
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-ccflags-y := -Iinclude/drm
-ccflags-y += -I$(src)/core/include
-ccflags-y += -I$(src)/core
-ccflags-y += -I$(src)
-
-nouveau-y := core/core/client.o
-nouveau-y += core/core/engctx.o
-nouveau-y += core/core/engine.o
-nouveau-y += core/core/enum.o
-nouveau-y += core/core/event.o
-nouveau-y += core/core/gpuobj.o
-nouveau-y += core/core/handle.o
-nouveau-y += core/core/ioctl.o
-nouveau-y += core/core/mm.o
-nouveau-y += core/core/namedb.o
-nouveau-y += core/core/notify.o
-nouveau-y += core/core/object.o
-nouveau-y += core/core/option.o
-nouveau-y += core/core/parent.o
-nouveau-y += core/core/printk.o
-nouveau-y += core/core/ramht.o
-nouveau-y += core/core/subdev.o
-
-nouveau-y += core/subdev/bar/base.o
-nouveau-y += core/subdev/bar/nv50.o
-nouveau-y += core/subdev/bar/nvc0.o
-nouveau-y += core/subdev/bar/gk20a.o
-nouveau-y += core/subdev/bios/base.o
-nouveau-y += core/subdev/bios/bit.o
-nouveau-y += core/subdev/bios/boost.o
-nouveau-y += core/subdev/bios/conn.o
-nouveau-y += core/subdev/bios/cstep.o
-nouveau-y += core/subdev/bios/dcb.o
-nouveau-y += core/subdev/bios/disp.o
-nouveau-y += core/subdev/bios/dp.o
-nouveau-y += core/subdev/bios/extdev.o
-nouveau-y += core/subdev/bios/fan.o
-nouveau-y += core/subdev/bios/gpio.o
-nouveau-y += core/subdev/bios/i2c.o
-nouveau-y += core/subdev/bios/image.o
-nouveau-y += core/subdev/bios/init.o
-nouveau-y += core/subdev/bios/mxm.o
-nouveau-y += core/subdev/bios/npde.o
-nouveau-y += core/subdev/bios/pcir.o
-nouveau-y += core/subdev/bios/perf.o
-nouveau-y += core/subdev/bios/pll.o
-nouveau-y += core/subdev/bios/pmu.o
-nouveau-y += core/subdev/bios/ramcfg.o
-nouveau-y += core/subdev/bios/rammap.o
-nouveau-y += core/subdev/bios/shadow.o
-nouveau-y += core/subdev/bios/shadowacpi.o
-nouveau-y += core/subdev/bios/shadowof.o
-nouveau-y += core/subdev/bios/shadowpci.o
-nouveau-y += core/subdev/bios/shadowramin.o
-nouveau-y += core/subdev/bios/shadowrom.o
-nouveau-y += core/subdev/bios/timing.o
-nouveau-y += core/subdev/bios/therm.o
-nouveau-y += core/subdev/bios/vmap.o
-nouveau-y += core/subdev/bios/volt.o
-nouveau-y += core/subdev/bios/xpio.o
-nouveau-y += core/subdev/bios/M0203.o
-nouveau-y += core/subdev/bios/M0205.o
-nouveau-y += core/subdev/bios/M0209.o
-nouveau-y += core/subdev/bios/P0260.o
-nouveau-y += core/subdev/bus/hwsq.o
-nouveau-y += core/subdev/bus/nv04.o
-nouveau-y += core/subdev/bus/nv31.o
-nouveau-y += core/subdev/bus/nv50.o
-nouveau-y += core/subdev/bus/nv94.o
-nouveau-y += core/subdev/bus/nvc0.o
-nouveau-y += core/subdev/clock/base.o
-nouveau-y += core/subdev/clock/nv04.o
-nouveau-y += core/subdev/clock/nv40.o
-nouveau-y += core/subdev/clock/nv50.o
-nouveau-y += core/subdev/clock/nv84.o
-nouveau-y += core/subdev/clock/nva3.o
-nouveau-y += core/subdev/clock/nvaa.o
-nouveau-y += core/subdev/clock/nvc0.o
-nouveau-y += core/subdev/clock/nve0.o
-nouveau-y += core/subdev/clock/gk20a.o
-nouveau-y += core/subdev/clock/pllnv04.o
-nouveau-y += core/subdev/clock/pllnva3.o
-nouveau-y += core/subdev/devinit/base.o
-nouveau-y += core/subdev/devinit/nv04.o
-nouveau-y += core/subdev/devinit/nv05.o
-nouveau-y += core/subdev/devinit/nv10.o
-nouveau-y += core/subdev/devinit/nv1a.o
-nouveau-y += core/subdev/devinit/nv20.o
-nouveau-y += core/subdev/devinit/nv50.o
-nouveau-y += core/subdev/devinit/nv84.o
-nouveau-y += core/subdev/devinit/nv98.o
-nouveau-y += core/subdev/devinit/nva3.o
-nouveau-y += core/subdev/devinit/nvaf.o
-nouveau-y += core/subdev/devinit/nvc0.o
-nouveau-y += core/subdev/devinit/gm107.o
-nouveau-y += core/subdev/devinit/gm204.o
-nouveau-y += core/subdev/fb/base.o
-nouveau-y += core/subdev/fb/nv04.o
-nouveau-y += core/subdev/fb/nv10.o
-nouveau-y += core/subdev/fb/nv1a.o
-nouveau-y += core/subdev/fb/nv20.o
-nouveau-y += core/subdev/fb/nv25.o
-nouveau-y += core/subdev/fb/nv30.o
-nouveau-y += core/subdev/fb/nv35.o
-nouveau-y += core/subdev/fb/nv36.o
-nouveau-y += core/subdev/fb/nv40.o
-nouveau-y += core/subdev/fb/nv41.o
-nouveau-y += core/subdev/fb/nv44.o
-nouveau-y += core/subdev/fb/nv46.o
-nouveau-y += core/subdev/fb/nv47.o
-nouveau-y += core/subdev/fb/nv49.o
-nouveau-y += core/subdev/fb/nv4e.o
-nouveau-y += core/subdev/fb/nv50.o
-nouveau-y += core/subdev/fb/nv84.o
-nouveau-y += core/subdev/fb/nva3.o
-nouveau-y += core/subdev/fb/nvaa.o
-nouveau-y += core/subdev/fb/nvaf.o
-nouveau-y += core/subdev/fb/nvc0.o
-nouveau-y += core/subdev/fb/nve0.o
-nouveau-y += core/subdev/fb/gk20a.o
-nouveau-y += core/subdev/fb/gm107.o
-nouveau-y += core/subdev/fb/ramnv04.o
-nouveau-y += core/subdev/fb/ramnv10.o
-nouveau-y += core/subdev/fb/ramnv1a.o
-nouveau-y += core/subdev/fb/ramnv20.o
-nouveau-y += core/subdev/fb/ramnv40.o
-nouveau-y += core/subdev/fb/ramnv41.o
-nouveau-y += core/subdev/fb/ramnv44.o
-nouveau-y += core/subdev/fb/ramnv49.o
-nouveau-y += core/subdev/fb/ramnv4e.o
-nouveau-y += core/subdev/fb/ramnv50.o
-nouveau-y += core/subdev/fb/ramnva3.o
-nouveau-y += core/subdev/fb/ramnvaa.o
-nouveau-y += core/subdev/fb/ramnvc0.o
-nouveau-y += core/subdev/fb/ramnve0.o
-nouveau-y += core/subdev/fb/ramgk20a.o
-nouveau-y += core/subdev/fb/ramgm107.o
-nouveau-y += core/subdev/fb/sddr2.o
-nouveau-y += core/subdev/fb/sddr3.o
-nouveau-y += core/subdev/fb/gddr3.o
-nouveau-y += core/subdev/fb/gddr5.o
-nouveau-y += core/subdev/fuse/base.o
-nouveau-y += core/subdev/fuse/g80.o
-nouveau-y += core/subdev/fuse/gf100.o
-nouveau-y += core/subdev/fuse/gm107.o
-nouveau-y += core/subdev/gpio/base.o
-nouveau-y += core/subdev/gpio/nv10.o
-nouveau-y += core/subdev/gpio/nv50.o
-nouveau-y += core/subdev/gpio/nv94.o
-nouveau-y += core/subdev/gpio/nvd0.o
-nouveau-y += core/subdev/gpio/nve0.o
-nouveau-y += core/subdev/i2c/base.o
-nouveau-y += core/subdev/i2c/anx9805.o
-nouveau-y += core/subdev/i2c/aux.o
-nouveau-y += core/subdev/i2c/bit.o
-nouveau-y += core/subdev/i2c/pad.o
-nouveau-y += core/subdev/i2c/padnv04.o
-nouveau-y += core/subdev/i2c/padnv94.o
-nouveau-y += core/subdev/i2c/padgm204.o
-nouveau-y += core/subdev/i2c/nv04.o
-nouveau-y += core/subdev/i2c/nv4e.o
-nouveau-y += core/subdev/i2c/nv50.o
-nouveau-y += core/subdev/i2c/nv94.o
-nouveau-y += core/subdev/i2c/nvd0.o
-nouveau-y += core/subdev/i2c/gf117.o
-nouveau-y += core/subdev/i2c/nve0.o
-nouveau-y += core/subdev/i2c/gm204.o
-nouveau-y += core/subdev/ibus/nvc0.o
-nouveau-y += core/subdev/ibus/nve0.o
-nouveau-y += core/subdev/ibus/gk20a.o
-nouveau-y += core/subdev/instmem/base.o
-nouveau-y += core/subdev/instmem/nv04.o
-nouveau-y += core/subdev/instmem/nv40.o
-nouveau-y += core/subdev/instmem/nv50.o
-nouveau-y += core/subdev/ltc/base.o
-nouveau-y += core/subdev/ltc/gf100.o
-nouveau-y += core/subdev/ltc/gk104.o
-nouveau-y += core/subdev/ltc/gm107.o
-nouveau-y += core/subdev/mc/base.o
-nouveau-y += core/subdev/mc/nv04.o
-nouveau-y += core/subdev/mc/nv40.o
-nouveau-y += core/subdev/mc/nv44.o
-nouveau-y += core/subdev/mc/nv4c.o
-nouveau-y += core/subdev/mc/nv50.o
-nouveau-y += core/subdev/mc/nv94.o
-nouveau-y += core/subdev/mc/nv98.o
-nouveau-y += core/subdev/mc/nvc0.o
-nouveau-y += core/subdev/mc/nvc3.o
-nouveau-y += core/subdev/mc/gk20a.o
-nouveau-y += core/subdev/mxm/base.o
-nouveau-y += core/subdev/mxm/mxms.o
-nouveau-y += core/subdev/mxm/nv50.o
-nouveau-y += core/subdev/pwr/base.o
-nouveau-y += core/subdev/pwr/memx.o
-nouveau-y += core/subdev/pwr/nva3.o
-nouveau-y += core/subdev/pwr/nvc0.o
-nouveau-y += core/subdev/pwr/nvd0.o
-nouveau-y += core/subdev/pwr/gk104.o
-nouveau-y += core/subdev/pwr/nv108.o
-nouveau-y += core/subdev/therm/base.o
-nouveau-y += core/subdev/therm/fan.o
-nouveau-y += core/subdev/therm/fannil.o
-nouveau-y += core/subdev/therm/fanpwm.o
-nouveau-y += core/subdev/therm/fantog.o
-nouveau-y += core/subdev/therm/ic.o
-nouveau-y += core/subdev/therm/temp.o
-nouveau-y += core/subdev/therm/nv40.o
-nouveau-y += core/subdev/therm/nv50.o
-nouveau-y += core/subdev/therm/nv84.o
-nouveau-y += core/subdev/therm/nva3.o
-nouveau-y += core/subdev/therm/nvd0.o
-nouveau-y += core/subdev/therm/gm107.o
-nouveau-y += core/subdev/timer/base.o
-nouveau-y += core/subdev/timer/nv04.o
-nouveau-y += core/subdev/timer/gk20a.o
-nouveau-y += core/subdev/vm/base.o
-nouveau-y += core/subdev/vm/nv04.o
-nouveau-y += core/subdev/vm/nv41.o
-nouveau-y += core/subdev/vm/nv44.o
-nouveau-y += core/subdev/vm/nv50.o
-nouveau-y += core/subdev/vm/nvc0.o
-nouveau-y += core/subdev/volt/base.o
-nouveau-y += core/subdev/volt/gpio.o
-nouveau-y += core/subdev/volt/nv40.o
-nouveau-y += core/subdev/volt/gk20a.o
-
-nouveau-y += core/engine/falcon.o
-nouveau-y += core/engine/xtensa.o
-nouveau-y += core/engine/dmaobj/base.o
-nouveau-y += core/engine/dmaobj/nv04.o
-nouveau-y += core/engine/dmaobj/nv50.o
-nouveau-y += core/engine/dmaobj/nvc0.o
-nouveau-y += core/engine/dmaobj/nvd0.o
-nouveau-y += core/engine/bsp/nv84.o
-nouveau-y += core/engine/bsp/nv98.o
-nouveau-y += core/engine/bsp/nvc0.o
-nouveau-y += core/engine/bsp/nve0.o
-nouveau-y += core/engine/copy/nva3.o
-nouveau-y += core/engine/copy/nvc0.o
-nouveau-y += core/engine/copy/nve0.o
-nouveau-y += core/engine/crypt/nv84.o
-nouveau-y += core/engine/crypt/nv98.o
-nouveau-y += core/engine/device/acpi.o
-nouveau-y += core/engine/device/base.o
-nouveau-y += core/engine/device/ctrl.o
-nouveau-y += core/engine/device/nv04.o
-nouveau-y += core/engine/device/nv10.o
-nouveau-y += core/engine/device/nv20.o
-nouveau-y += core/engine/device/nv30.o
-nouveau-y += core/engine/device/nv40.o
-nouveau-y += core/engine/device/nv50.o
-nouveau-y += core/engine/device/nvc0.o
-nouveau-y += core/engine/device/nve0.o
-nouveau-y += core/engine/device/gm100.o
-nouveau-y += core/engine/disp/base.o
-nouveau-y += core/engine/disp/conn.o
-nouveau-y += core/engine/disp/outp.o
-nouveau-y += core/engine/disp/outpdp.o
-nouveau-y += core/engine/disp/nv04.o
-nouveau-y += core/engine/disp/nv50.o
-nouveau-y += core/engine/disp/nv84.o
-nouveau-y += core/engine/disp/nv94.o
-nouveau-y += core/engine/disp/nva0.o
-nouveau-y += core/engine/disp/nva3.o
-nouveau-y += core/engine/disp/nvd0.o
-nouveau-y += core/engine/disp/nve0.o
-nouveau-y += core/engine/disp/nvf0.o
-nouveau-y += core/engine/disp/gm107.o
-nouveau-y += core/engine/disp/gm204.o
-nouveau-y += core/engine/disp/dacnv50.o
-nouveau-y += core/engine/disp/dport.o
-nouveau-y += core/engine/disp/hdanva3.o
-nouveau-y += core/engine/disp/hdanvd0.o
-nouveau-y += core/engine/disp/hdminv84.o
-nouveau-y += core/engine/disp/hdminva3.o
-nouveau-y += core/engine/disp/hdminvd0.o
-nouveau-y += core/engine/disp/hdminve0.o
-nouveau-y += core/engine/disp/piornv50.o
-nouveau-y += core/engine/disp/sornv50.o
-nouveau-y += core/engine/disp/sornv94.o
-nouveau-y += core/engine/disp/sornvd0.o
-nouveau-y += core/engine/disp/sorgm204.o
-nouveau-y += core/engine/disp/vga.o
-nouveau-y += core/engine/fifo/base.o
-nouveau-y += core/engine/fifo/nv04.o
-nouveau-y += core/engine/fifo/nv10.o
-nouveau-y += core/engine/fifo/nv17.o
-nouveau-y += core/engine/fifo/nv40.o
-nouveau-y += core/engine/fifo/nv50.o
-nouveau-y += core/engine/fifo/nv84.o
-nouveau-y += core/engine/fifo/nvc0.o
-nouveau-y += core/engine/fifo/nve0.o
-nouveau-y += core/engine/fifo/gk20a.o
-nouveau-y += core/engine/fifo/nv108.o
-nouveau-y += core/engine/graph/ctxnv40.o
-nouveau-y += core/engine/graph/ctxnv50.o
-nouveau-y += core/engine/graph/ctxnvc0.o
-nouveau-y += core/engine/graph/ctxnvc1.o
-nouveau-y += core/engine/graph/ctxnvc4.o
-nouveau-y += core/engine/graph/ctxnvc8.o
-nouveau-y += core/engine/graph/ctxnvd7.o
-nouveau-y += core/engine/graph/ctxnvd9.o
-nouveau-y += core/engine/graph/ctxnve4.o
-nouveau-y += core/engine/graph/ctxgk20a.o
-nouveau-y += core/engine/graph/ctxnvf0.o
-nouveau-y += core/engine/graph/ctxgk110b.o
-nouveau-y += core/engine/graph/ctxnv108.o
-nouveau-y += core/engine/graph/ctxgm107.o
-nouveau-y += core/engine/graph/nv04.o
-nouveau-y += core/engine/graph/nv10.o
-nouveau-y += core/engine/graph/nv20.o
-nouveau-y += core/engine/graph/nv25.o
-nouveau-y += core/engine/graph/nv2a.o
-nouveau-y += core/engine/graph/nv30.o
-nouveau-y += core/engine/graph/nv34.o
-nouveau-y += core/engine/graph/nv35.o
-nouveau-y += core/engine/graph/nv40.o
-nouveau-y += core/engine/graph/nv50.o
-nouveau-y += core/engine/graph/nvc0.o
-nouveau-y += core/engine/graph/nvc1.o
-nouveau-y += core/engine/graph/nvc4.o
-nouveau-y += core/engine/graph/nvc8.o
-nouveau-y += core/engine/graph/nvd7.o
-nouveau-y += core/engine/graph/nvd9.o
-nouveau-y += core/engine/graph/nve4.o
-nouveau-y += core/engine/graph/gk20a.o
-nouveau-y += core/engine/graph/nvf0.o
-nouveau-y += core/engine/graph/gk110b.o
-nouveau-y += core/engine/graph/nv108.o
-nouveau-y += core/engine/graph/gm107.o
-nouveau-y += core/engine/mpeg/nv31.o
-nouveau-y += core/engine/mpeg/nv40.o
-nouveau-y += core/engine/mpeg/nv44.o
-nouveau-y += core/engine/mpeg/nv50.o
-nouveau-y += core/engine/mpeg/nv84.o
-nouveau-y += core/engine/perfmon/base.o
-nouveau-y += core/engine/perfmon/daemon.o
-nouveau-y += core/engine/perfmon/nv40.o
-nouveau-y += core/engine/perfmon/nv50.o
-nouveau-y += core/engine/perfmon/nv84.o
-nouveau-y += core/engine/perfmon/nva3.o
-nouveau-y += core/engine/perfmon/nvc0.o
-nouveau-y += core/engine/perfmon/nve0.o
-nouveau-y += core/engine/perfmon/nvf0.o
-nouveau-y += core/engine/ppp/nv98.o
-nouveau-y += core/engine/ppp/nvc0.o
-nouveau-y += core/engine/software/nv04.o
-nouveau-y += core/engine/software/nv10.o
-nouveau-y += core/engine/software/nv50.o
-nouveau-y += core/engine/software/nvc0.o
-nouveau-y += core/engine/vp/nv84.o
-nouveau-y += core/engine/vp/nv98.o
-nouveau-y += core/engine/vp/nvc0.o
-nouveau-y += core/engine/vp/nve0.o
-
-# nvif
-nouveau-y += nvif/object.o
-nouveau-y += nvif/client.o
-nouveau-y += nvif/device.o
-nouveau-y += nvif/notify.o
-
-# drm/core
-nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
-nouveau-y += nouveau_vga.o nouveau_agp.o
-nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
-nouveau-y += nouveau_prime.o nouveau_abi16.o
-nouveau-y += nouveau_nvif.o nouveau_usif.o
-nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
-nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
-
-# drm/kms
-nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_dp.o
-nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
-
-# drm/kms/nv04:nv50
-include $(src)/dispnv04/Makefile
-
-# drm/kms/nv50-
-nouveau-y += nv50_display.o
-
-# drm/pm
-nouveau-y += nouveau_hwmon.o nouveau_sysfs.o
-
-# other random bits
-nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
-ifdef CONFIG_X86
-nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
-endif
-nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
-nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
-
-obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
-
-# platform driver
-obj-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
deleted file mode 100644
index daee87702502..000000000000
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <core/object.h>
-#include <core/gpuobj.h>
-
-#include <subdev/instmem.h>
-#include <subdev/bar.h>
-#include <subdev/vm.h>
-
-void
-nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
-{
- int i;
-
- if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
- for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, 0x00000000);
- }
-
- if (gpuobj->node) {
- nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap,
- &gpuobj->node);
- }
-
- if (gpuobj->heap.block_size)
- nouveau_mm_fini(&gpuobj->heap);
-
- nouveau_object_destroy(&gpuobj->base);
-}
-
-int
-nouveau_gpuobj_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 pclass,
- struct nouveau_object *pargpu,
- u32 size, u32 align, u32 flags,
- int length, void **pobject)
-{
- struct nouveau_instmem *imem = nouveau_instmem(parent);
- struct nouveau_bar *bar = nouveau_bar(parent);
- struct nouveau_gpuobj *gpuobj;
- struct nouveau_mm *heap = NULL;
- int ret, i;
- u64 addr;
-
- *pobject = NULL;
-
- if (pargpu) {
- while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
- if (nv_gpuobj(pargpu)->heap.block_size)
- break;
- pargpu = pargpu->parent;
- }
-
- if (unlikely(pargpu == NULL)) {
- nv_error(parent, "no gpuobj heap\n");
- return -EINVAL;
- }
-
- addr = nv_gpuobj(pargpu)->addr;
- heap = &nv_gpuobj(pargpu)->heap;
- atomic_inc(&parent->refcount);
- } else {
- ret = imem->alloc(imem, parent, size, align, &parent);
- pargpu = parent;
- if (ret)
- return ret;
-
- addr = nv_memobj(pargpu)->addr;
- size = nv_memobj(pargpu)->size;
-
- if (bar && bar->alloc) {
- struct nouveau_instobj *iobj = (void *)parent;
- struct nouveau_mem **mem = (void *)(iobj + 1);
- struct nouveau_mem *node = *mem;
- if (!bar->alloc(bar, parent, node, &pargpu)) {
- nouveau_object_ref(NULL, &parent);
- parent = pargpu;
- }
- }
- }
-
- ret = nouveau_object_create_(parent, engine, oclass, pclass |
- NV_GPUOBJ_CLASS, length, pobject);
- nouveau_object_ref(NULL, &parent);
- gpuobj = *pobject;
- if (ret)
- return ret;
-
- gpuobj->parent = pargpu;
- gpuobj->flags = flags;
- gpuobj->addr = addr;
- gpuobj->size = size;
-
- if (heap) {
- ret = nouveau_mm_head(heap, 0, 1, size, size,
- max(align, (u32)1), &gpuobj->node);
- if (ret)
- return ret;
-
- gpuobj->addr += gpuobj->node->offset;
- }
-
- if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
- ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
- if (ret)
- return ret;
- }
-
- if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
- for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, 0x00000000);
- }
-
- return ret;
-}
-
-struct nouveau_gpuobj_class {
- struct nouveau_object *pargpu;
- u64 size;
- u32 align;
- u32 flags;
-};
-
-static int
-_nouveau_gpuobj_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_gpuobj_class *args = data;
- struct nouveau_gpuobj *object;
- int ret;
-
- ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
- args->size, args->align, args->flags,
- &object);
- *pobject = nv_object(object);
- if (ret)
- return ret;
-
- return 0;
-}
-
-void
-_nouveau_gpuobj_dtor(struct nouveau_object *object)
-{
- nouveau_gpuobj_destroy(nv_gpuobj(object));
-}
-
-int
-_nouveau_gpuobj_init(struct nouveau_object *object)
-{
- return nouveau_gpuobj_init(nv_gpuobj(object));
-}
-
-int
-_nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
-{
- return nouveau_gpuobj_fini(nv_gpuobj(object), suspend);
-}
-
-u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
-{
- struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
- struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
- if (gpuobj->node)
- addr += gpuobj->node->offset;
- return pfuncs->rd32(gpuobj->parent, addr);
-}
-
-void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
- struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
- if (gpuobj->node)
- addr += gpuobj->node->offset;
- pfuncs->wr32(gpuobj->parent, addr, data);
-}
-
-static struct nouveau_oclass
-_nouveau_gpuobj_oclass = {
- .handle = 0x00000000,
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_gpuobj_ctor,
- .dtor = _nouveau_gpuobj_dtor,
- .init = _nouveau_gpuobj_init,
- .fini = _nouveau_gpuobj_fini,
- .rd32 = _nouveau_gpuobj_rd32,
- .wr32 = _nouveau_gpuobj_wr32,
- },
-};
-
-int
-nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
- u32 size, u32 align, u32 flags,
- struct nouveau_gpuobj **pgpuobj)
-{
- struct nouveau_object *engine = parent;
- struct nouveau_gpuobj_class args = {
- .pargpu = pargpu,
- .size = size,
- .align = align,
- .flags = flags,
- };
-
- if (!nv_iclass(engine, NV_SUBDEV_CLASS))
- engine = engine->engine;
- BUG_ON(engine == NULL);
-
- return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass,
- &args, sizeof(args),
- (struct nouveau_object **)pgpuobj);
-}
-
-int
-nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access,
- struct nouveau_vma *vma)
-{
- struct nouveau_bar *bar = nouveau_bar(gpuobj);
- int ret = -EINVAL;
-
- if (bar && bar->umap) {
- struct nouveau_instobj *iobj = (void *)
- nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
- struct nouveau_mem **mem = (void *)(iobj + 1);
- ret = bar->umap(bar, *mem, access, vma);
- }
-
- return ret;
-}
-
-int
-nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
- u32 access, struct nouveau_vma *vma)
-{
- struct nouveau_instobj *iobj = (void *)
- nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
- struct nouveau_mem **mem = (void *)(iobj + 1);
- int ret;
-
- ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma);
- if (ret)
- return ret;
-
- nouveau_vm_map(vma, *mem);
- return 0;
-}
-
-void
-nouveau_gpuobj_unmap(struct nouveau_vma *vma)
-{
- if (vma->node) {
- nouveau_vm_unmap(vma);
- nouveau_vm_put(vma);
- }
-}
-
-/* the below is basically only here to support sharing the paged dma object
- * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
- * anywhere else.
- */
-
-static void
-nouveau_gpudup_dtor(struct nouveau_object *object)
-{
- struct nouveau_gpuobj *gpuobj = (void *)object;
- nouveau_object_ref(NULL, &gpuobj->parent);
- nouveau_object_destroy(&gpuobj->base);
-}
-
-static struct nouveau_oclass
-nouveau_gpudup_oclass = {
- .handle = NV_GPUOBJ_CLASS,
- .ofuncs = &(struct nouveau_ofuncs) {
- .dtor = nouveau_gpudup_dtor,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
- },
-};
-
-int
-nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base,
- struct nouveau_gpuobj **pgpuobj)
-{
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_object_create(parent, parent->engine,
- &nouveau_gpudup_oclass, 0, &gpuobj);
- *pgpuobj = gpuobj;
- if (ret)
- return ret;
-
- nouveau_object_ref(nv_object(base), &gpuobj->parent);
- gpuobj->addr = base->addr;
- gpuobj->size = base->size;
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
deleted file mode 100644
index 9261694d0d35..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <engine/falcon.h>
-#include <engine/fifo.h>
-#include <engine/copy.h>
-
-#include <core/enum.h>
-#include <core/enum.h>
-
-#include "fuc/nvc0.fuc.h"
-
-struct nvc0_copy_priv {
- struct nouveau_falcon base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nouveau_oclass
-nvc0_copy0_sclass[] = {
- { 0x90b5, &nouveau_object_ofuncs },
- {},
-};
-
-static struct nouveau_oclass
-nvc0_copy1_sclass[] = {
- { 0x90b8, &nouveau_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PCOPY context
- ******************************************************************************/
-
-static struct nouveau_ofuncs
-nvc0_copy_context_ofuncs = {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
-};
-
-static struct nouveau_oclass
-nvc0_copy0_cclass = {
- .handle = NV_ENGCTX(COPY0, 0xc0),
- .ofuncs = &nvc0_copy_context_ofuncs,
-};
-
-static struct nouveau_oclass
-nvc0_copy1_cclass = {
- .handle = NV_ENGCTX(COPY1, 0xc0),
- .ofuncs = &nvc0_copy_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCOPY engine/subdev functions
- ******************************************************************************/
-
-static int
-nvc0_copy_init(struct nouveau_object *object)
-{
- struct nvc0_copy_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
- return 0;
-}
-
-static int
-nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_copy_priv *priv;
- int ret;
-
- ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
- "PCE0", "copy0", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = nva3_copy_intr;
- nv_engine(priv)->cclass = &nvc0_copy0_cclass;
- nv_engine(priv)->sclass = nvc0_copy0_sclass;
- nv_falcon(priv)->code.data = nvc0_pcopy_code;
- nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
- nv_falcon(priv)->data.data = nvc0_pcopy_data;
- nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
- return 0;
-}
-
-static int
-nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_copy_priv *priv;
- int ret;
-
- ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
- "PCE1", "copy1", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = nva3_copy_intr;
- nv_engine(priv)->cclass = &nvc0_copy1_cclass;
- nv_engine(priv)->sclass = nvc0_copy1_sclass;
- nv_falcon(priv)->code.data = nvc0_pcopy_code;
- nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
- nv_falcon(priv)->data.data = nvc0_pcopy_data;
- nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
- return 0;
-}
-
-struct nouveau_oclass
-nvc0_copy0_oclass = {
- .handle = NV_ENGINE(COPY0, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_copy0_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nvc0_copy_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
- },
-};
-
-struct nouveau_oclass
-nvc0_copy1_oclass = {
- .handle = NV_ENGINE(COPY1, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_copy1_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nvc0_copy_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
deleted file mode 100644
index c7194b354605..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <core/os.h>
-#include <core/enum.h>
-#include <core/engctx.h>
-
-#include <engine/copy.h>
-
-struct nve0_copy_priv {
- struct nouveau_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nouveau_oclass
-nve0_copy_sclass[] = {
- { 0xa0b5, &nouveau_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PCOPY context
- ******************************************************************************/
-
-static struct nouveau_ofuncs
-nve0_copy_context_ofuncs = {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
-};
-
-static struct nouveau_oclass
-nve0_copy_cclass = {
- .handle = NV_ENGCTX(COPY0, 0xc0),
- .ofuncs = &nve0_copy_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCOPY engine/subdev functions
- ******************************************************************************/
-
-static void
-nve0_copy_intr(struct nouveau_subdev *subdev)
-{
- const int ce = nv_subidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
- struct nve0_copy_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
- if (stat) {
- nv_warn(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
- }
-}
-
-static int
-nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_copy_priv *priv;
- int ret;
-
- ret = nouveau_engine_create(parent, engine, oclass, true,
- "PCE0", "copy0", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = nve0_copy_intr;
- nv_engine(priv)->cclass = &nve0_copy_cclass;
- nv_engine(priv)->sclass = nve0_copy_sclass;
- return 0;
-}
-
-static int
-nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_copy_priv *priv;
- int ret;
-
- ret = nouveau_engine_create(parent, engine, oclass, true,
- "PCE1", "copy1", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = nve0_copy_intr;
- nv_engine(priv)->cclass = &nve0_copy_cclass;
- nv_engine(priv)->sclass = nve0_copy_sclass;
- return 0;
-}
-
-static int
-nve0_copy2_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_copy_priv *priv;
- int ret;
-
- ret = nouveau_engine_create(parent, engine, oclass, true,
- "PCE2", "copy2", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00200000;
- nv_subdev(priv)->intr = nve0_copy_intr;
- nv_engine(priv)->cclass = &nve0_copy_cclass;
- nv_engine(priv)->sclass = nve0_copy_sclass;
- return 0;
-}
-
-struct nouveau_oclass
-nve0_copy0_oclass = {
- .handle = NV_ENGINE(COPY0, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_copy0_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
- },
-};
-
-struct nouveau_oclass
-nve0_copy1_oclass = {
- .handle = NV_ENGINE(COPY1, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_copy1_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
- },
-};
-
-struct nouveau_oclass
-nve0_copy2_oclass = {
- .handle = NV_ENGINE(COPY2, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_copy2_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.h b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h
deleted file mode 100644
index cc49f4f568cd..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/device/acpi.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __NVKM_DEVICE_ACPI_H__
-#define __NVKM_DEVICE_ACPI_H__
-
-#include <engine/device.h>
-
-int nvkm_acpi_init(struct nouveau_device *);
-int nvkm_acpi_fini(struct nouveau_device *, bool);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
deleted file mode 100644
index 96f568d1321b..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clock.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-#include <subdev/pwr.h>
-#include <subdev/volt.h>
-
-#include <engine/device.h>
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
-#include <engine/mpeg.h>
-#include <engine/vp.h>
-#include <engine/crypt.h>
-#include <engine/bsp.h>
-#include <engine/ppp.h>
-#include <engine/copy.h>
-#include <engine/disp.h>
-#include <engine/perfmon.h>
-
-int
-nv50_identify(struct nouveau_device *device)
-{
- switch (device->chipset) {
- case 0x50:
- device->cname = "G80";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv50_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv50_perfmon_oclass;
- break;
- case 0x84:
- device->cname = "G84";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0x86:
- device->cname = "G86";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0x92:
- device->cname = "G92";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0x94:
- device->cname = "G94";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0x96:
- device->cname = "G96";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0x98:
- device->cname = "G98";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0xa0:
- device->cname = "G200";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva0_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0xaa:
- device->cname = "MCP77/MCP78";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0xac:
- device->cname = "MCP79/MCP7A";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
- device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
- break;
- case 0xa3:
- device->cname = "GT215";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
- break;
- case 0xa5:
- device->cname = "GT216";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
- break;
- case 0xa8:
- device->cname = "GT218";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
- break;
- case 0xaf:
- device->cname = "MCP89";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvaf_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
- break;
- default:
- nv_fatal(device, "unknown Tesla chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
deleted file mode 100644
index 72a40f95d048..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clock.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-#include <subdev/pwr.h>
-#include <subdev/volt.h>
-
-#include <engine/device.h>
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
-#include <engine/vp.h>
-#include <engine/bsp.h>
-#include <engine/ppp.h>
-#include <engine/copy.h>
-#include <engine/disp.h>
-#include <engine/perfmon.h>
-
-int
-nvc0_identify(struct nouveau_device *device)
-{
- switch (device->chipset) {
- case 0xc0:
- device->cname = "GF100";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- case 0xc4:
- device->cname = "GF104";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- case 0xc3:
- device->cname = "GF106";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- case 0xce:
- device->cname = "GF114";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- case 0xcf:
- device->cname = "GF116";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- case 0xc1:
- device->cname = "GF108";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- case 0xc8:
- device->cname = "GF110";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- case 0xd9:
- device->cname = "GF119";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nvd0_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- case 0xd7:
- device->cname = "GF117";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nvd0_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
- break;
- default:
- nv_fatal(device, "unknown Fermi chipset\n");
- return -EINVAL;
- }
-
- return 0;
- }
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
deleted file mode 100644
index 732922690653..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clock.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-#include <subdev/pwr.h>
-#include <subdev/volt.h>
-
-#include <engine/device.h>
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
-#include <engine/disp.h>
-#include <engine/copy.h>
-#include <engine/bsp.h>
-#include <engine/vp.h>
-#include <engine/ppp.h>
-#include <engine/perfmon.h>
-
-int
-nve0_identify(struct nouveau_device *device)
-{
- switch (device->chipset) {
- case 0xe4:
- device->cname = "GK104";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
- break;
- case 0xe7:
- device->cname = "GK107";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
- break;
- case 0xe6:
- device->cname = "GK106";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
- break;
- case 0xea:
- device->cname = "GK20A";
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &gk20a_clock_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &gk20a_volt_oclass;
- break;
- case 0xf0:
- device->cname = "GK110";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
- break;
- case 0xf1:
- device->cname = "GK110B";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk110b_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
- break;
- case 0x106:
- device->cname = "GK208B";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- break;
- case 0x108:
- device->cname = "GK208";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
- break;
- default:
- nv_fatal(device, "unknown Kepler chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/priv.h b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
deleted file mode 100644
index 035fd5b9cfc3..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/device/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __NVKM_DEVICE_PRIV_H__
-#define __NVKM_DEVICE_PRIV_H__
-
-#include <engine/device.h>
-
-extern struct nouveau_oclass nouveau_control_oclass[];
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
deleted file mode 100644
index 7f08078ee925..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ /dev/null
@@ -1,252 +0,0 @@
-#ifndef __NV50_DISP_H__
-#define __NV50_DISP_H__
-
-#include <core/parent.h>
-#include <core/namedb.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <core/event.h>
-
-#include <engine/dmaobj.h>
-
-#include "dport.h"
-#include "priv.h"
-#include "outp.h"
-#include "outpdp.h"
-
-#define NV50_DISP_MTHD_ struct nouveau_object *object, \
- struct nv50_disp_priv *priv, void *data, u32 size
-#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
-#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
-
-struct nv50_disp_priv {
- struct nouveau_disp base;
- struct nouveau_oclass *sclass;
-
- struct work_struct supervisor;
- u32 super;
-
- struct nvkm_event uevent;
-
- struct {
- int nr;
- } head;
- struct {
- int nr;
- int (*power)(NV50_DISP_MTHD_V1);
- int (*sense)(NV50_DISP_MTHD_V1);
- } dac;
- struct {
- int nr;
- int (*power)(NV50_DISP_MTHD_V1);
- int (*hda_eld)(NV50_DISP_MTHD_V1);
- int (*hdmi)(NV50_DISP_MTHD_V1);
- u32 lvdsconf;
- void (*magic)(struct nvkm_output *);
- } sor;
- struct {
- int nr;
- int (*power)(NV50_DISP_MTHD_V1);
- u8 type[3];
- } pior;
-};
-
-struct nv50_disp_impl {
- struct nouveau_disp_impl base;
- struct {
- const struct nv50_disp_mthd_chan *core;
- const struct nv50_disp_mthd_chan *base;
- const struct nv50_disp_mthd_chan *ovly;
- int prev;
- } mthd;
- struct {
- int (*scanoutpos)(NV50_DISP_MTHD_V0);
- } head;
-};
-
-int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
-int nv50_disp_main_mthd(struct nouveau_object *, u32, void *, u32);
-
-int nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
-
-int nv50_dac_power(NV50_DISP_MTHD_V1);
-int nv50_dac_sense(NV50_DISP_MTHD_V1);
-
-int nva3_hda_eld(NV50_DISP_MTHD_V1);
-int nvd0_hda_eld(NV50_DISP_MTHD_V1);
-
-int nv84_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int nva3_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int nve0_hdmi_ctrl(NV50_DISP_MTHD_V1);
-
-int nv50_sor_power(NV50_DISP_MTHD_V1);
-
-int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
- u32, struct dcb_output *);
-int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
- u32, struct dcb_output *);
-int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
- struct dcb_output *);
-int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
- struct dcb_output *);
-int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
- struct dcb_output *);
-
-int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
- struct dcb_output *);
-int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
- struct dcb_output *);
-int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
- struct dcb_output *);
-
-int nv50_pior_power(NV50_DISP_MTHD_V1);
-
-struct nv50_disp_base {
- struct nouveau_parent base;
- struct nouveau_ramht *ramht;
- u32 chan;
-};
-
-struct nv50_disp_chan_impl {
- struct nouveau_ofuncs base;
- int chid;
- int (*attach)(struct nouveau_object *, struct nouveau_object *, u32);
- void (*detach)(struct nouveau_object *, int);
-};
-
-struct nv50_disp_chan {
- struct nouveau_namedb base;
- int chid;
-};
-
-int nv50_disp_chan_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
-int nv50_disp_chan_map(struct nouveau_object *, u64 *, u32 *);
-u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
-void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
-extern const struct nvkm_event_func nv50_disp_chan_uevent;
-int nv50_disp_chan_uevent_ctor(struct nouveau_object *, void *, u32,
- struct nvkm_notify *);
-void nv50_disp_chan_uevent_send(struct nv50_disp_priv *, int);
-
-extern const struct nvkm_event_func nvd0_disp_chan_uevent;
-
-#define nv50_disp_chan_init(a) \
- nouveau_namedb_init(&(a)->base)
-#define nv50_disp_chan_fini(a,b) \
- nouveau_namedb_fini(&(a)->base, (b))
-
-struct nv50_disp_dmac {
- struct nv50_disp_chan base;
- struct nouveau_dmaobj *pushdma;
- u32 push;
-};
-
-void nv50_disp_dmac_dtor(struct nouveau_object *);
-
-struct nv50_disp_pioc {
- struct nv50_disp_chan base;
-};
-
-void nv50_disp_pioc_dtor(struct nouveau_object *);
-
-struct nv50_disp_mthd_list {
- u32 mthd;
- u32 addr;
- struct {
- u32 mthd;
- u32 addr;
- const char *name;
- } data[];
-};
-
-struct nv50_disp_mthd_chan {
- const char *name;
- u32 addr;
- struct {
- const char *name;
- int nr;
- const struct nv50_disp_mthd_list *mthd;
- } data[];
-};
-
-extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
-int nv50_disp_core_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
-int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
-extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
-int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
-extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
-int nv50_disp_oimm_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
-int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-extern struct nouveau_ofuncs nv50_disp_main_ofuncs;
-int nv50_disp_main_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nv50_disp_main_dtor(struct nouveau_object *);
-extern struct nouveau_omthds nv50_disp_main_omthds[];
-extern struct nouveau_oclass nv50_disp_cclass;
-void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
- const struct nv50_disp_mthd_chan *);
-void nv50_disp_intr_supervisor(struct work_struct *);
-void nv50_disp_intr(struct nouveau_subdev *);
-extern const struct nvkm_event_func nv50_disp_vblank_func;
-
-extern const struct nv50_disp_mthd_chan nv84_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_head;
-extern const struct nv50_disp_mthd_chan nv84_disp_base_mthd_chan;
-extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
-
-extern const struct nv50_disp_mthd_chan nv94_disp_core_mthd_chan;
-
-extern struct nv50_disp_chan_impl nvd0_disp_core_ofuncs;
-extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl nvd0_disp_base_ofuncs;
-extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs;
-extern const struct nv50_disp_mthd_chan nvd0_disp_base_mthd_chan;
-extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs;
-extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs;
-extern struct nouveau_ofuncs nvd0_disp_main_ofuncs;
-extern struct nouveau_oclass nvd0_disp_cclass;
-void nvd0_disp_intr_supervisor(struct work_struct *);
-void nvd0_disp_intr(struct nouveau_subdev *);
-extern const struct nvkm_event_func nvd0_disp_vblank_func;
-
-extern const struct nv50_disp_mthd_chan nve0_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
-
-extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
-extern struct nouveau_oclass *nv50_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl nv94_sor_dp_impl;
-int nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
-extern struct nouveau_oclass *nv94_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl nvd0_sor_dp_impl;
-int nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
-extern struct nouveau_oclass *nvd0_disp_outp_sclass[];
-
-void gm204_sor_magic(struct nvkm_output *outp);
-extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
deleted file mode 100644
index 6a0511d54ce6..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef __NVKM_DISP_PRIV_H__
-#define __NVKM_DISP_PRIV_H__
-
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/conn.h>
-
-#include <engine/disp.h>
-
-struct nouveau_disp_impl {
- struct nouveau_oclass base;
- struct nouveau_oclass **outp;
- struct nouveau_oclass **conn;
- const struct nvkm_event_func *vblank;
-};
-
-#define nouveau_disp_create(p,e,c,h,i,x,d) \
- nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
- sizeof(**d), (void **)d)
-#define nouveau_disp_destroy(d) ({ \
- struct nouveau_disp *disp = (d); \
- _nouveau_disp_dtor(nv_object(disp)); \
-})
-#define nouveau_disp_init(d) ({ \
- struct nouveau_disp *disp = (d); \
- _nouveau_disp_init(nv_object(disp)); \
-})
-#define nouveau_disp_fini(d,s) ({ \
- struct nouveau_disp *disp = (d); \
- _nouveau_disp_fini(nv_object(disp), (s)); \
-})
-
-int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int heads,
- const char *, const char *, int, void **);
-void _nouveau_disp_dtor(struct nouveau_object *);
-int _nouveau_disp_init(struct nouveau_object *);
-int _nouveau_disp_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass *nvkm_output_oclass;
-extern struct nouveau_oclass *nvkm_connector_oclass;
-
-int nouveau_disp_vblank_ctor(struct nouveau_object *, void *data, u32 size,
- struct nvkm_notify *);
-void nouveau_disp_vblank(struct nouveau_disp *, int head);
-int nouveau_disp_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h
deleted file mode 100644
index 36f743866937..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __NVKM_DMAOBJ_PRIV_H__
-#define __NVKM_DMAOBJ_PRIV_H__
-
-#include <engine/dmaobj.h>
-
-#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \
- nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
-
-int nvkm_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void **, u32 *,
- int, void **);
-#define _nvkm_dmaobj_dtor nouveau_object_destroy
-#define _nvkm_dmaobj_init nouveau_object_init
-#define _nvkm_dmaobj_fini nouveau_object_fini
-
-int _nvkm_dmaeng_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-#define _nvkm_dmaeng_dtor _nouveau_engine_dtor
-#define _nvkm_dmaeng_init _nouveau_engine_init
-#define _nvkm_dmaeng_fini _nouveau_engine_fini
-
-struct nvkm_dmaeng_impl {
- struct nouveau_oclass base;
- struct nouveau_oclass *sclass;
- int (*bind)(struct nouveau_dmaobj *, struct nouveau_object *,
- struct nouveau_gpuobj **);
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
deleted file mode 100644
index 3a9ceb315c20..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __NV50_FIFO_H__
-#define __NV50_FIFO_H__
-
-struct nv50_fifo_priv {
- struct nouveau_fifo base;
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-};
-
-struct nv50_fifo_base {
- struct nouveau_fifo_base base;
- struct nouveau_gpuobj *ramfc;
- struct nouveau_gpuobj *cache;
- struct nouveau_gpuobj *eng;
- struct nouveau_gpuobj *pgd;
- struct nouveau_vm *vm;
-};
-
-struct nv50_fifo_chan {
- struct nouveau_fifo_chan base;
- u32 subc[8];
- struct nouveau_ramht *ramht;
-};
-
-void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
-
-void nv50_fifo_object_detach(struct nouveau_object *, int);
-void nv50_fifo_chan_dtor(struct nouveau_object *);
-int nv50_fifo_chan_fini(struct nouveau_object *, bool);
-
-void nv50_fifo_context_dtor(struct nouveau_object *);
-
-void nv50_fifo_dtor(struct nouveau_object *);
-int nv50_fifo_init(struct nouveau_object *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
deleted file mode 100644
index e96b32bb1bbc..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef __NVKM_FIFO_NVE0_H__
-#define __NVKM_FIFO_NVE0_H__
-
-#include <engine/fifo.h>
-
-int nve0_fifo_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nve0_fifo_dtor(struct nouveau_object *);
-int nve0_fifo_init(struct nouveau_object *);
-int nve0_fifo_fini(struct nouveau_object *, bool);
-
-struct nve0_fifo_impl {
- struct nouveau_oclass base;
- u32 channels;
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
deleted file mode 100644
index c776cd715e33..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
+++ /dev/null
@@ -1,202 +0,0 @@
-#ifndef __NVKM_GRCTX_NVC0_H__
-#define __NVKM_GRCTX_NVC0_H__
-
-#include "nvc0.h"
-
-struct nvc0_grctx {
- struct nvc0_graph_priv *priv;
- struct nvc0_graph_data *data;
- struct nvc0_graph_mmio *mmio;
- int buffer_nr;
- u64 buffer[4];
- u64 addr;
-};
-
-int nvc0_grctx_mmio_data(struct nvc0_grctx *, u32 size, u32 align, u32 access);
-void nvc0_grctx_mmio_item(struct nvc0_grctx *, u32 addr, u32 data, int s, int);
-
-#define mmio_vram(a,b,c,d) nvc0_grctx_mmio_data((a), (b), (c), (d))
-#define mmio_refn(a,b,c,d,e) nvc0_grctx_mmio_item((a), (b), (c), (d), (e))
-#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
-#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
-
-struct nvc0_grctx_oclass {
- struct nouveau_oclass base;
- /* main context generation function */
- void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *);
- /* context-specific modify-on-first-load list generation function */
- void (*unkn)(struct nvc0_graph_priv *);
- /* mmio context data */
- const struct nvc0_graph_pack *hub;
- const struct nvc0_graph_pack *gpc;
- const struct nvc0_graph_pack *zcull;
- const struct nvc0_graph_pack *tpc;
- const struct nvc0_graph_pack *ppc;
- /* indirect context data, generated with icmds/mthds */
- const struct nvc0_graph_pack *icmd;
- const struct nvc0_graph_pack *mthd;
- /* bundle circular buffer */
- void (*bundle)(struct nvc0_grctx *);
- u32 bundle_size;
- u32 bundle_min_gpm_fifo_depth;
- u32 bundle_token_limit;
- /* pagepool */
- void (*pagepool)(struct nvc0_grctx *);
- u32 pagepool_size;
- /* attribute(/alpha) circular buffer */
- void (*attrib)(struct nvc0_grctx *);
- u32 attrib_nr_max;
- u32 attrib_nr;
- u32 alpha_nr_max;
- u32 alpha_nr;
-};
-
-static inline const struct nvc0_grctx_oclass *
-nvc0_grctx_impl(struct nvc0_graph_priv *priv)
-{
- return (void *)nv_engine(priv)->cclass;
-}
-
-extern struct nouveau_oclass *nvc0_grctx_oclass;
-int nvc0_grctx_generate(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nvc0_grctx_generate_bundle(struct nvc0_grctx *);
-void nvc0_grctx_generate_pagepool(struct nvc0_grctx *);
-void nvc0_grctx_generate_attrib(struct nvc0_grctx *);
-void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_r4060a8(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *);
-void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *);
-
-extern struct nouveau_oclass *nvc1_grctx_oclass;
-void nvc1_grctx_generate_attrib(struct nvc0_grctx *);
-void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *);
-
-extern struct nouveau_oclass *nvc4_grctx_oclass;
-extern struct nouveau_oclass *nvc8_grctx_oclass;
-
-extern struct nouveau_oclass *nvd7_grctx_oclass;
-void nvd7_grctx_generate_attrib(struct nvc0_grctx *);
-
-extern struct nouveau_oclass *nvd9_grctx_oclass;
-
-extern struct nouveau_oclass *nve4_grctx_oclass;
-extern struct nouveau_oclass *gk20a_grctx_oclass;
-void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nve4_grctx_generate_bundle(struct nvc0_grctx *);
-void nve4_grctx_generate_pagepool(struct nvc0_grctx *);
-void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
-void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
-
-extern struct nouveau_oclass *nvf0_grctx_oclass;
-extern struct nouveau_oclass *gk110b_grctx_oclass;
-extern struct nouveau_oclass *nv108_grctx_oclass;
-extern struct nouveau_oclass *gm107_grctx_oclass;
-
-/* context init value lists */
-
-extern const struct nvc0_graph_pack nvc0_grctx_pack_icmd[];
-
-extern const struct nvc0_graph_pack nvc0_grctx_pack_mthd[];
-extern const struct nvc0_graph_init nvc0_grctx_init_902d_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_9039_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_90c0_0[];
-
-extern const struct nvc0_graph_pack nvc0_grctx_pack_hub[];
-extern const struct nvc0_graph_init nvc0_grctx_init_main_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_fe_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_pri_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_memfmt_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_rstr2d_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_scc_0[];
-
-extern const struct nvc0_graph_pack nvc0_grctx_pack_gpc[];
-extern const struct nvc0_graph_init nvc0_grctx_init_gpc_unk_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_prop_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_gpc_unk_1[];
-extern const struct nvc0_graph_init nvc0_grctx_init_zcull_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_crstr_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_gpm_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_gcc_0[];
-
-extern const struct nvc0_graph_pack nvc0_grctx_pack_zcull[];
-
-extern const struct nvc0_graph_pack nvc0_grctx_pack_tpc[];
-extern const struct nvc0_graph_init nvc0_grctx_init_pe_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_wwdx_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_mpc_0[];
-extern const struct nvc0_graph_init nvc0_grctx_init_tpccs_0[];
-
-extern const struct nvc0_graph_init nvc4_grctx_init_tex_0[];
-extern const struct nvc0_graph_init nvc4_grctx_init_l1c_0[];
-extern const struct nvc0_graph_init nvc4_grctx_init_sm_0[];
-
-extern const struct nvc0_graph_init nvc1_grctx_init_9097_0[];
-
-extern const struct nvc0_graph_init nvc1_grctx_init_gpm_0[];
-
-extern const struct nvc0_graph_init nvc1_grctx_init_pe_0[];
-extern const struct nvc0_graph_init nvc1_grctx_init_wwdx_0[];
-extern const struct nvc0_graph_init nvc1_grctx_init_tpccs_0[];
-
-extern const struct nvc0_graph_init nvc8_grctx_init_9197_0[];
-extern const struct nvc0_graph_init nvc8_grctx_init_9297_0[];
-
-extern const struct nvc0_graph_pack nvd9_grctx_pack_icmd[];
-
-extern const struct nvc0_graph_pack nvd9_grctx_pack_mthd[];
-
-extern const struct nvc0_graph_init nvd9_grctx_init_fe_0[];
-extern const struct nvc0_graph_init nvd9_grctx_init_be_0[];
-
-extern const struct nvc0_graph_init nvd9_grctx_init_prop_0[];
-extern const struct nvc0_graph_init nvd9_grctx_init_gpc_unk_1[];
-extern const struct nvc0_graph_init nvd9_grctx_init_crstr_0[];
-
-extern const struct nvc0_graph_init nvd9_grctx_init_sm_0[];
-
-extern const struct nvc0_graph_init nvd7_grctx_init_pe_0[];
-
-extern const struct nvc0_graph_init nvd7_grctx_init_wwdx_0[];
-
-extern const struct nvc0_graph_init nve4_grctx_init_memfmt_0[];
-extern const struct nvc0_graph_init nve4_grctx_init_ds_0[];
-extern const struct nvc0_graph_init nve4_grctx_init_scc_0[];
-
-extern const struct nvc0_graph_init nve4_grctx_init_gpm_0[];
-
-extern const struct nvc0_graph_init nve4_grctx_init_pes_0[];
-
-extern const struct nvc0_graph_pack nve4_grctx_pack_hub[];
-extern const struct nvc0_graph_pack nve4_grctx_pack_gpc[];
-extern const struct nvc0_graph_pack nve4_grctx_pack_tpc[];
-extern const struct nvc0_graph_pack nve4_grctx_pack_ppc[];
-extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[];
-extern const struct nvc0_graph_init nve4_grctx_init_a097_0[];
-
-extern const struct nvc0_graph_pack nvf0_grctx_pack_icmd[];
-
-extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[];
-
-extern const struct nvc0_graph_pack nvf0_grctx_pack_hub[];
-extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[];
-extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[];
-
-extern const struct nvc0_graph_pack nvf0_grctx_pack_gpc[];
-extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[];
-
-extern const struct nvc0_graph_init nvf0_grctx_init_tex_0[];
-extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[];
-extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[];
-
-extern const struct nvc0_graph_pack nvf0_grctx_pack_ppc[];
-
-extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[];
-
-extern const struct nvc0_graph_init nv108_grctx_init_prop_0[];
-extern const struct nvc0_graph_init nv108_grctx_init_crstr_0[];
-
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
deleted file mode 100644
index 2bea7313e03f..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __NV20_GRAPH_H__
-#define __NV20_GRAPH_H__
-
-#include <core/enum.h>
-
-#include <engine/graph.h>
-#include <engine/fifo.h>
-
-struct nv20_graph_priv {
- struct nouveau_graph base;
- struct nouveau_gpuobj *ctxtab;
-};
-
-struct nv20_graph_chan {
- struct nouveau_graph_chan base;
- int chid;
-};
-
-extern struct nouveau_oclass nv25_graph_sclass[];
-int nv20_graph_context_init(struct nouveau_object *);
-int nv20_graph_context_fini(struct nouveau_object *, bool);
-
-void nv20_graph_tile_prog(struct nouveau_engine *, int);
-void nv20_graph_intr(struct nouveau_subdev *);
-
-void nv20_graph_dtor(struct nouveau_object *);
-int nv20_graph_init(struct nouveau_object *);
-
-int nv30_graph_init(struct nouveau_object *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
deleted file mode 100644
index 0505fb419bde..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __NV50_GRAPH_H__
-#define __NV50_GRAPH_H__
-
-int nv50_grctx_init(struct nouveau_device *, u32 *size);
-void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
deleted file mode 100644
index 7ed9e89c3435..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NVC0_GRAPH_H__
-#define __NVC0_GRAPH_H__
-
-#include <core/client.h>
-#include <core/handle.h>
-#include <core/gpuobj.h>
-#include <core/option.h>
-
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-#include <subdev/timer.h>
-#include <subdev/mc.h>
-#include <subdev/ltc.h>
-
-#include <engine/fifo.h>
-#include <engine/graph.h>
-
-#include "fuc/os.h"
-
-#define GPC_MAX 32
-#define TPC_MAX (GPC_MAX * 8)
-
-#define ROP_BCAST(r) (0x408800 + (r))
-#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
-#define GPC_BCAST(r) (0x418000 + (r))
-#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
-#define PPC_UNIT(t, m, r) (0x503000 + (t) * 0x8000 + (m) * 0x200 + (r))
-#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
-
-struct nvc0_graph_data {
- u32 size;
- u32 align;
- u32 access;
-};
-
-struct nvc0_graph_mmio {
- u32 addr;
- u32 data;
- u32 shift;
- int buffer;
-};
-
-struct nvc0_graph_fuc {
- u32 *data;
- u32 size;
-};
-
-struct nvc0_graph_zbc_color {
- u32 format;
- u32 ds[4];
- u32 l2[4];
-};
-
-struct nvc0_graph_zbc_depth {
- u32 format;
- u32 ds;
- u32 l2;
-};
-
-struct nvc0_graph_priv {
- struct nouveau_graph base;
-
- struct nvc0_graph_fuc fuc409c;
- struct nvc0_graph_fuc fuc409d;
- struct nvc0_graph_fuc fuc41ac;
- struct nvc0_graph_fuc fuc41ad;
- bool firmware;
-
- struct nvc0_graph_zbc_color zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT];
- struct nvc0_graph_zbc_depth zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT];
-
- u8 rop_nr;
- u8 gpc_nr;
- u8 tpc_nr[GPC_MAX];
- u8 tpc_total;
- u8 ppc_nr[GPC_MAX];
- u8 ppc_tpc_nr[GPC_MAX][4];
-
- struct nouveau_gpuobj *unk4188b4;
- struct nouveau_gpuobj *unk4188b8;
-
- struct nvc0_graph_data mmio_data[4];
- struct nvc0_graph_mmio mmio_list[4096/8];
- u32 size;
- u32 *data;
-
- u8 magic_not_rop_nr;
-};
-
-struct nvc0_graph_chan {
- struct nouveau_graph_chan base;
-
- struct nouveau_gpuobj *mmio;
- struct nouveau_vma mmio_vma;
- int mmio_nr;
- struct {
- struct nouveau_gpuobj *mem;
- struct nouveau_vma vma;
- } data[4];
-};
-
-int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nvc0_graph_context_dtor(struct nouveau_object *);
-
-void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *);
-
-u64 nvc0_graph_units(struct nouveau_graph *);
-int nvc0_graph_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *data, u32 size,
- struct nouveau_object **);
-void nvc0_graph_dtor(struct nouveau_object *);
-int nvc0_graph_init(struct nouveau_object *);
-void nvc0_graph_zbc_init(struct nvc0_graph_priv *);
-
-int nve4_graph_fini(struct nouveau_object *, bool);
-int nve4_graph_init(struct nouveau_object *);
-
-int nvf0_graph_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_ofuncs nvc0_fermi_ofuncs;
-
-extern struct nouveau_oclass nvc0_graph_sclass[];
-extern struct nouveau_omthds nvc0_graph_9097_omthds[];
-extern struct nouveau_omthds nvc0_graph_90c0_omthds[];
-extern struct nouveau_oclass nvc8_graph_sclass[];
-extern struct nouveau_oclass nvf0_graph_sclass[];
-
-struct nvc0_graph_init {
- u32 addr;
- u8 count;
- u8 pitch;
- u32 data;
-};
-
-struct nvc0_graph_pack {
- const struct nvc0_graph_init *init;
- u32 type;
-};
-
-#define pack_for_each_init(init, pack, head) \
- for (pack = head; pack && pack->init; pack++) \
- for (init = pack->init; init && init->count; init++)
-
-struct nvc0_graph_ucode {
- struct nvc0_graph_fuc code;
- struct nvc0_graph_fuc data;
-};
-
-extern struct nvc0_graph_ucode nvc0_graph_fecs_ucode;
-extern struct nvc0_graph_ucode nvc0_graph_gpccs_ucode;
-
-extern struct nvc0_graph_ucode nvf0_graph_fecs_ucode;
-extern struct nvc0_graph_ucode nvf0_graph_gpccs_ucode;
-
-struct nvc0_graph_oclass {
- struct nouveau_oclass base;
- struct nouveau_oclass **cclass;
- struct nouveau_oclass *sclass;
- const struct nvc0_graph_pack *mmio;
- struct {
- struct nvc0_graph_ucode *ucode;
- } fecs;
- struct {
- struct nvc0_graph_ucode *ucode;
- } gpccs;
- int ppc_nr;
-};
-
-void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
-void nvc0_graph_icmd(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
-void nvc0_graph_mthd(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
-int nvc0_graph_init_ctxctl(struct nvc0_graph_priv *);
-
-/* register init value lists */
-
-extern const struct nvc0_graph_init nvc0_graph_init_main_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_fe_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_pri_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_rstr2d_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_pd_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_ds_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_scc_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_prop_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_gpc_unk_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_setup_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_crstr_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_setup_1[];
-extern const struct nvc0_graph_init nvc0_graph_init_zcull_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_gpm_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_gpc_unk_1[];
-extern const struct nvc0_graph_init nvc0_graph_init_gcc_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_tpccs_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_tex_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_pe_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_l1c_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_wwdx_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_tpccs_1[];
-extern const struct nvc0_graph_init nvc0_graph_init_mpc_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_be_0[];
-extern const struct nvc0_graph_init nvc0_graph_init_fe_1[];
-extern const struct nvc0_graph_init nvc0_graph_init_pe_1[];
-
-extern const struct nvc0_graph_init nvc4_graph_init_ds_0[];
-extern const struct nvc0_graph_init nvc4_graph_init_tex_0[];
-extern const struct nvc0_graph_init nvc4_graph_init_sm_0[];
-
-extern const struct nvc0_graph_init nvc1_graph_init_gpc_unk_0[];
-extern const struct nvc0_graph_init nvc1_graph_init_setup_1[];
-
-extern const struct nvc0_graph_init nvd9_graph_init_pd_0[];
-extern const struct nvc0_graph_init nvd9_graph_init_ds_0[];
-extern const struct nvc0_graph_init nvd9_graph_init_prop_0[];
-extern const struct nvc0_graph_init nvd9_graph_init_gpm_0[];
-extern const struct nvc0_graph_init nvd9_graph_init_gpc_unk_1[];
-extern const struct nvc0_graph_init nvd9_graph_init_tex_0[];
-extern const struct nvc0_graph_init nvd9_graph_init_sm_0[];
-extern const struct nvc0_graph_init nvd9_graph_init_fe_1[];
-
-extern const struct nvc0_graph_init nvd7_graph_init_pes_0[];
-extern const struct nvc0_graph_init nvd7_graph_init_wwdx_0[];
-extern const struct nvc0_graph_init nvd7_graph_init_cbm_0[];
-
-extern const struct nvc0_graph_init nve4_graph_init_main_0[];
-extern const struct nvc0_graph_init nve4_graph_init_tpccs_0[];
-extern const struct nvc0_graph_init nve4_graph_init_pe_0[];
-extern const struct nvc0_graph_init nve4_graph_init_be_0[];
-extern const struct nvc0_graph_pack nve4_graph_pack_mmio[];
-
-extern const struct nvc0_graph_init nvf0_graph_init_fe_0[];
-extern const struct nvc0_graph_init nvf0_graph_init_ds_0[];
-extern const struct nvc0_graph_init nvf0_graph_init_sked_0[];
-extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[];
-extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[];
-extern const struct nvc0_graph_init nvf0_graph_init_tex_0[];
-extern const struct nvc0_graph_init nvf0_graph_init_sm_0[];
-
-extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[];
-
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
deleted file mode 100644
index 1b5792d1df14..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __NVKM_PM_NV40_H__
-#define __NVKM_PM_NV40_H__
-
-#include "priv.h"
-
-struct nv40_perfmon_oclass {
- struct nouveau_oclass base;
- const struct nouveau_specdom *doms;
-};
-
-struct nv40_perfmon_priv {
- struct nouveau_perfmon base;
- u32 sequence;
-};
-
-int nv40_perfmon_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *data, u32 size,
- struct nouveau_object **pobject);
-
-struct nv40_perfmon_cntr {
- struct nouveau_perfctr base;
-};
-
-extern const struct nouveau_funcdom nv40_perfctr_func;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
deleted file mode 100644
index 94217691fe67..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "nv40.h"
-
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static const struct nouveau_specdom
-nv50_perfmon[] = {
- { 0x040, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x100, (const struct nouveau_specsig[]) {
- { 0xc8, "gr_idle" },
- {}
- }, &nv40_perfctr_func },
- { 0x100, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x020, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x040, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-struct nouveau_oclass *
-nv50_perfmon_oclass = &(struct nv40_perfmon_oclass) {
- .base.handle = NV_ENGINE(PERFMON, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv40_perfmon_ctor,
- .dtor = _nouveau_perfmon_dtor,
- .init = _nouveau_perfmon_init,
- .fini = _nouveau_perfmon_fini,
- },
- .doms = nv50_perfmon,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
deleted file mode 100644
index 6197ebdeb648..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "nv40.h"
-
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static const struct nouveau_specdom
-nva3_perfmon[] = {
- { 0x20, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-static int
-nva3_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **object)
-{
- int ret = nv40_perfmon_ctor(parent, engine, oclass, data, size, object);
- if (ret == 0) {
- struct nv40_perfmon_priv *priv = (void *)*object;
- ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
- nva3_perfmon_pwr);
- if (ret)
- return ret;
-
- priv->base.last = 3;
- }
- return ret;
-}
-
-struct nouveau_oclass *
-nva3_perfmon_oclass = &(struct nv40_perfmon_oclass) {
- .base.handle = NV_ENGINE(PERFMON, 0xa3),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_perfmon_ctor,
- .dtor = _nouveau_perfmon_dtor,
- .init = _nouveau_perfmon_init,
- .fini = _nouveau_perfmon_fini,
- },
- .doms = nva3_perfmon,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
deleted file mode 100644
index f66bca484263..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __NVKM_PM_NVC0_H__
-#define __NVKM_PM_NVC0_H__
-
-#include "priv.h"
-
-struct nvc0_perfmon_priv {
- struct nouveau_perfmon base;
-};
-
-struct nvc0_perfmon_cntr {
- struct nouveau_perfctr base;
-};
-
-extern const struct nouveau_funcdom nvc0_perfctr_func;
-int nvc0_perfmon_fini(struct nouveau_object *, bool);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
deleted file mode 100644
index 71d718c12075..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "nvc0.h"
-
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static const struct nouveau_specdom
-nve0_perfmon_hub[] = {
- { 0x60, (const struct nouveau_specsig[]) {
- { 0x47, "hub00_user_0" },
- {}
- }, &nvc0_perfctr_func },
- { 0x40, (const struct nouveau_specsig[]) {
- { 0x27, "hub01_user_0" },
- {}
- }, &nvc0_perfctr_func },
- { 0x60, (const struct nouveau_specsig[]) {
- { 0x47, "hub02_user_0" },
- {}
- }, &nvc0_perfctr_func },
- { 0x60, (const struct nouveau_specsig[]) {
- { 0x47, "hub03_user_0" },
- {}
- }, &nvc0_perfctr_func },
- { 0x40, (const struct nouveau_specsig[]) {
- { 0x03, "host_mmio_rd" },
- { 0x27, "hub04_user_0" },
- {}
- }, &nvc0_perfctr_func },
- { 0x60, (const struct nouveau_specsig[]) {
- { 0x47, "hub05_user_0" },
- {}
- }, &nvc0_perfctr_func },
- { 0xc0, (const struct nouveau_specsig[]) {
- { 0x74, "host_fb_rd3x" },
- { 0x75, "host_fb_rd3x_2" },
- { 0xa7, "hub06_user_0" },
- {}
- }, &nvc0_perfctr_func },
- { 0x60, (const struct nouveau_specsig[]) {
- { 0x47, "hub07_user_0" },
- {}
- }, &nvc0_perfctr_func },
- {}
-};
-
-static const struct nouveau_specdom
-nve0_perfmon_gpc[] = {
- { 0xe0, (const struct nouveau_specsig[]) {
- { 0xc7, "gpc00_user_0" },
- {}
- }, &nvc0_perfctr_func },
- {}
-};
-
-static const struct nouveau_specdom
-nve0_perfmon_part[] = {
- { 0x60, (const struct nouveau_specsig[]) {
- { 0x47, "part00_user_0" },
- {}
- }, &nvc0_perfctr_func },
- { 0x60, (const struct nouveau_specsig[]) {
- { 0x47, "part01_user_0" },
- {}
- }, &nvc0_perfctr_func },
- {}
-};
-
-static int
-nve0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_perfmon_priv *priv;
- u32 mask;
- int ret;
-
- ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- /* PDAEMON */
- ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
- nve0_perfmon_pwr);
- if (ret)
- return ret;
-
- /* HUB */
- ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
- nve0_perfmon_hub);
- if (ret)
- return ret;
-
- /* GPC */
- mask = (1 << nv_rd32(priv, 0x022430)) - 1;
- mask &= ~nv_rd32(priv, 0x022504);
- mask &= ~nv_rd32(priv, 0x022584);
-
- ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
- 0x1000, 0x200, nve0_perfmon_gpc);
- if (ret)
- return ret;
-
- /* PART */
- mask = (1 << nv_rd32(priv, 0x022438)) - 1;
- mask &= ~nv_rd32(priv, 0x022548);
- mask &= ~nv_rd32(priv, 0x0225c8);
-
- ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
- 0x1000, 0x200, nve0_perfmon_part);
- if (ret)
- return ret;
-
- nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
- nv_engine(priv)->sclass = nouveau_perfmon_sclass;
- priv->base.last = 7;
- return 0;
-}
-
-struct nouveau_oclass
-nve0_perfmon_oclass = {
- .handle = NV_ENGINE(PERFMON, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_perfmon_ctor,
- .dtor = _nouveau_perfmon_dtor,
- .init = _nouveau_perfmon_init,
- .fini = nvc0_perfmon_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
deleted file mode 100644
index 47256f78a895..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "nvc0.h"
-
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static int
-nvf0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_perfmon_priv *priv;
- int ret;
-
- ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
- nve0_perfmon_pwr);
- if (ret)
- return ret;
-
- nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
- nv_engine(priv)->sclass = nouveau_perfmon_sclass;
- return 0;
-}
-
-struct nouveau_oclass
-nvf0_perfmon_oclass = {
- .handle = NV_ENGINE(PERFMON, 0xf0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvf0_perfmon_ctor,
- .dtor = _nouveau_perfmon_dtor,
- .init = _nouveau_perfmon_init,
- .fini = nvc0_perfmon_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
deleted file mode 100644
index 0ac8714fe0ba..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
+++ /dev/null
@@ -1,91 +0,0 @@
-#ifndef __NVKM_PERFMON_PRIV_H__
-#define __NVKM_PERFMON_PRIV_H__
-
-#include <engine/perfmon.h>
-
-struct nouveau_perfctr {
- struct nouveau_object base;
- struct list_head head;
- struct nouveau_perfsig *signal[4];
- int slot;
- u32 logic_op;
- u32 clk;
- u32 ctr;
-};
-
-extern struct nouveau_oclass nouveau_perfmon_sclass[];
-
-struct nouveau_perfctx {
- struct nouveau_engctx base;
-};
-
-extern struct nouveau_oclass nouveau_perfmon_cclass;
-
-struct nouveau_specsig {
- u8 signal;
- const char *name;
-};
-
-struct nouveau_perfsig {
- const char *name;
-};
-
-struct nouveau_perfdom;
-struct nouveau_perfctr *
-nouveau_perfsig_wrap(struct nouveau_perfmon *, const char *,
- struct nouveau_perfdom **);
-
-struct nouveau_specdom {
- u16 signal_nr;
- const struct nouveau_specsig *signal;
- const struct nouveau_funcdom *func;
-};
-
-extern const struct nouveau_specdom nva3_perfmon_pwr[];
-extern const struct nouveau_specdom nvc0_perfmon_pwr[];
-extern const struct nouveau_specdom nve0_perfmon_pwr[];
-
-struct nouveau_perfdom {
- struct list_head head;
- struct list_head list;
- const struct nouveau_funcdom *func;
- char name[32];
- u32 addr;
- u8 quad;
- u32 signal_nr;
- struct nouveau_perfsig signal[];
-};
-
-struct nouveau_funcdom {
- void (*init)(struct nouveau_perfmon *, struct nouveau_perfdom *,
- struct nouveau_perfctr *);
- void (*read)(struct nouveau_perfmon *, struct nouveau_perfdom *,
- struct nouveau_perfctr *);
- void (*next)(struct nouveau_perfmon *, struct nouveau_perfdom *);
-};
-
-int nouveau_perfdom_new(struct nouveau_perfmon *, const char *, u32,
- u32, u32, u32, const struct nouveau_specdom *);
-
-#define nouveau_perfmon_create(p,e,o,d) \
- nouveau_perfmon_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_perfmon_dtor(p) ({ \
- struct nouveau_perfmon *c = (p); \
- _nouveau_perfmon_dtor(nv_object(c)); \
-})
-#define nouveau_perfmon_init(p) ({ \
- struct nouveau_perfmon *c = (p); \
- _nouveau_perfmon_init(nv_object(c)); \
-})
-#define nouveau_perfmon_fini(p,s) ({ \
- struct nouveau_perfmon *c = (p); \
- _nouveau_perfmon_fini(nv_object(c), (s)); \
-})
-
-int nouveau_perfmon_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_perfmon_dtor(struct nouveau_object *);
-int _nouveau_perfmon_init(struct nouveau_object *);
-int _nouveau_perfmon_fini(struct nouveau_object *, bool);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
deleted file mode 100644
index 41542e725b4b..000000000000
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef __NVKM_SW_NV50_H__
-#define __NVKM_SW_NV50_H__
-
-#include <engine/software.h>
-
-struct nv50_software_oclass {
- struct nouveau_oclass base;
- struct nouveau_oclass *cclass;
- struct nouveau_oclass *sclass;
-};
-
-struct nv50_software_priv {
- struct nouveau_software base;
-};
-
-int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-
-struct nv50_software_cclass {
- struct nouveau_oclass base;
- int (*vblank)(struct nvkm_notify *);
-};
-
-struct nv50_software_chan {
- struct nouveau_software_chan base;
- struct {
- struct nvkm_notify notify[4];
- u32 channel;
- u32 ctxdma;
- u64 offset;
- u32 value;
- } vblank;
-};
-
-int nv50_software_context_ctor(struct nouveau_object *,
- struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nv50_software_context_dtor(struct nouveau_object *);
-
-int nv50_software_mthd_vblsem_value(struct nouveau_object *, u32, void *, u32);
-int nv50_software_mthd_vblsem_release(struct nouveau_object *, u32, void *, u32);
-int nv50_software_mthd_flip(struct nouveau_object *, u32, void *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
deleted file mode 100644
index b0ce9f6680b5..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __NOUVEAU_CLIENT_H__
-#define __NOUVEAU_CLIENT_H__
-
-#include <core/namedb.h>
-
-struct nouveau_client {
- struct nouveau_namedb base;
- struct nouveau_handle *root;
- struct nouveau_object *device;
- char name[32];
- u32 debug;
- struct nouveau_vm *vm;
- bool super;
- void *data;
-
- int (*ntfy)(const void *, u32, const void *, u32);
- struct nvkm_client_notify *notify[16];
-};
-
-static inline struct nouveau_client *
-nv_client(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
- nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-static inline struct nouveau_client *
-nouveau_client(void *obj)
-{
- struct nouveau_object *client = nv_object(obj);
- while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
- client = client->parent;
- return (void *)client;
-}
-
-#define nouveau_client_create(n,c,oc,od,d) \
- nouveau_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
-
-int nouveau_client_create_(const char *name, u64 device, const char *cfg,
- const char *dbg, int, void **);
-#define nouveau_client_destroy(p) \
- nouveau_namedb_destroy(&(p)->base)
-
-int nouveau_client_init(struct nouveau_client *);
-int nouveau_client_fini(struct nouveau_client *, bool suspend);
-const char *nouveau_client_name(void *obj);
-
-int nvkm_client_notify_new(struct nouveau_object *, struct nvkm_event *,
- void *data, u32 size);
-int nvkm_client_notify_del(struct nouveau_client *, int index);
-int nvkm_client_notify_get(struct nouveau_client *, int index);
-int nvkm_client_notify_put(struct nouveau_client *, int index);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
deleted file mode 100644
index 2ec2e50d3676..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ /dev/null
@@ -1,184 +0,0 @@
-#ifndef __NOUVEAU_DEVICE_H__
-#define __NOUVEAU_DEVICE_H__
-
-#include <core/object.h>
-#include <core/subdev.h>
-#include <core/engine.h>
-#include <core/event.h>
-
-enum nv_subdev_type {
- NVDEV_ENGINE_DEVICE,
- NVDEV_SUBDEV_VBIOS,
-
- /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
- * *any* of them are initialised. This subdev category is used
- * for any subdevs that the VBIOS init table parsing may call out
- * to during POST.
- */
- NVDEV_SUBDEV_DEVINIT,
- NVDEV_SUBDEV_IBUS,
- NVDEV_SUBDEV_GPIO,
- NVDEV_SUBDEV_I2C,
- NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
-
- /* This grouping of subdevs are initialised right after they've
- * been created, and are allowed to assume any subdevs in the
- * list above them exist and have been initialised.
- */
- NVDEV_SUBDEV_FUSE,
- NVDEV_SUBDEV_MXM,
- NVDEV_SUBDEV_MC,
- NVDEV_SUBDEV_BUS,
- NVDEV_SUBDEV_TIMER,
- NVDEV_SUBDEV_FB,
- NVDEV_SUBDEV_LTC,
- NVDEV_SUBDEV_INSTMEM,
- NVDEV_SUBDEV_VM,
- NVDEV_SUBDEV_BAR,
- NVDEV_SUBDEV_PWR,
- NVDEV_SUBDEV_VOLT,
- NVDEV_SUBDEV_THERM,
- NVDEV_SUBDEV_CLOCK,
-
- NVDEV_ENGINE_FIRST,
- NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
- NVDEV_ENGINE_IFB,
- NVDEV_ENGINE_FIFO,
- NVDEV_ENGINE_SW,
- NVDEV_ENGINE_GR,
- NVDEV_ENGINE_MPEG,
- NVDEV_ENGINE_ME,
- NVDEV_ENGINE_VP,
- NVDEV_ENGINE_CRYPT,
- NVDEV_ENGINE_BSP,
- NVDEV_ENGINE_PPP,
- NVDEV_ENGINE_COPY0,
- NVDEV_ENGINE_COPY1,
- NVDEV_ENGINE_COPY2,
- NVDEV_ENGINE_VIC,
- NVDEV_ENGINE_VENC,
- NVDEV_ENGINE_DISP,
- NVDEV_ENGINE_PERFMON,
-
- NVDEV_SUBDEV_NR,
-};
-
-struct nouveau_device {
- struct nouveau_engine base;
- struct list_head head;
-
- struct pci_dev *pdev;
- struct platform_device *platformdev;
- u64 handle;
-
- struct nvkm_event event;
-
- const char *cfgopt;
- const char *dbgopt;
- const char *name;
- const char *cname;
- u64 disable_mask;
-
- enum {
- NV_04 = 0x04,
- NV_10 = 0x10,
- NV_11 = 0x11,
- NV_20 = 0x20,
- NV_30 = 0x30,
- NV_40 = 0x40,
- NV_50 = 0x50,
- NV_C0 = 0xc0,
- NV_E0 = 0xe0,
- GM100 = 0x110,
- } card_type;
- u32 chipset;
- u8 chiprev;
- u32 crystal;
-
- struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
- struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
-
- struct {
- struct notifier_block nb;
- } acpi;
-};
-
-int nouveau_device_list(u64 *name, int size);
-
-static inline struct nouveau_device *
-nv_device(void *obj)
-{
- struct nouveau_object *object = nv_object(obj);
- struct nouveau_object *device = object;
-
- if (device->engine)
- device = device->engine;
- if (device->parent)
- device = device->parent;
-
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
- (nv_hclass(device) & 0xff) != NVDEV_ENGINE_DEVICE)) {
- nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
- nv_hclass(object), nv_hclass(device));
- }
-#endif
-
- return (void *)device;
-}
-
-static inline struct nouveau_subdev *
-nouveau_subdev(void *obj, int sub)
-{
- if (nv_device(obj)->subdev[sub])
- return nv_subdev(nv_device(obj)->subdev[sub]);
- return NULL;
-}
-
-static inline struct nouveau_engine *
-nouveau_engine(void *obj, int sub)
-{
- struct nouveau_subdev *subdev = nouveau_subdev(obj, sub);
- if (subdev && nv_iclass(subdev, NV_ENGINE_CLASS))
- return nv_engine(subdev);
- return NULL;
-}
-
-static inline bool
-nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub)
-{
- struct nouveau_device *device = nv_device(object);
- return device->pdev->device == dev &&
- device->pdev->subsystem_vendor == ven &&
- device->pdev->subsystem_device == sub;
-}
-
-static inline bool
-nv_device_is_pci(struct nouveau_device *device)
-{
- return device->pdev != NULL;
-}
-
-static inline bool
-nv_device_is_cpu_coherent(struct nouveau_device *device)
-{
- return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
-}
-
-static inline struct device *
-nv_device_base(struct nouveau_device *device)
-{
- return nv_device_is_pci(device) ? &device->pdev->dev :
- &device->platformdev->dev;
-}
-
-resource_size_t
-nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
-
-resource_size_t
-nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
-
-int
-nv_device_get_irq(struct nouveau_device *device, bool stall);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
deleted file mode 100644
index 2fd48b564c7d..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/engctx.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef __NOUVEAU_ENGCTX_H__
-#define __NOUVEAU_ENGCTX_H__
-
-#include <core/object.h>
-#include <core/gpuobj.h>
-
-#include <subdev/vm.h>
-
-#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
-#define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
-
-struct nouveau_engctx {
- struct nouveau_gpuobj base;
- struct nouveau_vma vma;
- struct list_head head;
- unsigned long save;
- u64 addr;
-};
-
-static inline struct nouveau_engctx *
-nv_engctx(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
- nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-#define nouveau_engctx_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create_((p), (e), (c), (g), (s), (a), (f), \
- sizeof(**d), (void **)d)
-
-int nouveau_engctx_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, struct nouveau_object *,
- u32 size, u32 align, u32 flags,
- int length, void **data);
-void nouveau_engctx_destroy(struct nouveau_engctx *);
-int nouveau_engctx_init(struct nouveau_engctx *);
-int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
-
-int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void _nouveau_engctx_dtor(struct nouveau_object *);
-int _nouveau_engctx_init(struct nouveau_object *);
-int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
-#define _nouveau_engctx_rd32 _nouveau_gpuobj_rd32
-#define _nouveau_engctx_wr32 _nouveau_gpuobj_wr32
-
-struct nouveau_object *nouveau_engctx_get(struct nouveau_engine *, u64 addr);
-void nouveau_engctx_put(struct nouveau_object *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engine.h b/drivers/gpu/drm/nouveau/core/include/core/engine.h
deleted file mode 100644
index 666d06de77ec..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/engine.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __NOUVEAU_ENGINE_H__
-#define __NOUVEAU_ENGINE_H__
-
-#include <core/object.h>
-#include <core/subdev.h>
-
-#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
-#define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var))
-
-struct nouveau_engine {
- struct nouveau_subdev base;
- struct nouveau_oclass *cclass;
- struct nouveau_oclass *sclass;
-
- struct list_head contexts;
- spinlock_t lock;
-
- void (*tile_prog)(struct nouveau_engine *, int region);
- int (*tlb_flush)(struct nouveau_engine *);
-};
-
-static inline struct nouveau_engine *
-nv_engine(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
- nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-static inline int
-nv_engidx(struct nouveau_object *object)
-{
- return nv_subidx(object);
-}
-
-#define nouveau_engine_create(p,e,c,d,i,f,r) \
- nouveau_engine_create_((p), (e), (c), (d), (i), (f), \
- sizeof(**r),(void **)r)
-
-#define nouveau_engine_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_engine_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_engine_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-int nouveau_engine_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, bool, const char *,
- const char *, int, void **);
-
-#define _nouveau_engine_dtor _nouveau_subdev_dtor
-#define _nouveau_engine_init _nouveau_subdev_init
-#define _nouveau_engine_fini _nouveau_subdev_fini
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h
deleted file mode 100644
index 4fc62bb8c1f0..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/enum.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __NOUVEAU_ENUM_H__
-#define __NOUVEAU_ENUM_H__
-
-struct nouveau_enum {
- u32 value;
- const char *name;
- const void *data;
- u32 data2;
-};
-
-const struct nouveau_enum *
-nouveau_enum_find(const struct nouveau_enum *, u32 value);
-
-const struct nouveau_enum *
-nouveau_enum_print(const struct nouveau_enum *en, u32 value);
-
-struct nouveau_bitfield {
- u32 mask;
- const char *name;
-};
-
-void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
deleted file mode 100644
index b3b9ce4e9d38..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef __NOUVEAU_GPUOBJ_H__
-#define __NOUVEAU_GPUOBJ_H__
-
-#include <core/object.h>
-#include <core/device.h>
-#include <core/parent.h>
-#include <core/mm.h>
-
-struct nouveau_vma;
-struct nouveau_vm;
-
-#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
-#define NVOBJ_FLAG_ZERO_FREE 0x00000002
-#define NVOBJ_FLAG_HEAP 0x00000004
-
-struct nouveau_gpuobj {
- struct nouveau_object base;
- struct nouveau_object *parent;
- struct nouveau_mm_node *node;
- struct nouveau_mm heap;
-
- u32 flags;
- u64 addr;
- u32 size;
-};
-
-static inline struct nouveau_gpuobj *
-nv_gpuobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
- nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-#define nouveau_gpuobj_create(p,e,c,v,g,s,a,f,d) \
- nouveau_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \
- sizeof(**d), (void **)d)
-#define nouveau_gpuobj_init(p) nouveau_object_init(&(p)->base)
-#define nouveau_gpuobj_fini(p,s) nouveau_object_fini(&(p)->base, (s))
-int nouveau_gpuobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32 pclass,
- struct nouveau_object *, u32 size, u32 align,
- u32 flags, int length, void **);
-void nouveau_gpuobj_destroy(struct nouveau_gpuobj *);
-
-int nouveau_gpuobj_new(struct nouveau_object *, struct nouveau_object *,
- u32 size, u32 align, u32 flags,
- struct nouveau_gpuobj **);
-int nouveau_gpuobj_dup(struct nouveau_object *, struct nouveau_gpuobj *,
- struct nouveau_gpuobj **);
-
-int nouveau_gpuobj_map(struct nouveau_gpuobj *, u32 acc, struct nouveau_vma *);
-int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *, struct nouveau_vm *,
- u32 access, struct nouveau_vma *);
-void nouveau_gpuobj_unmap(struct nouveau_vma *);
-
-static inline void
-nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
-{
- nouveau_object_ref(&obj->base, (struct nouveau_object **)ref);
-}
-
-void _nouveau_gpuobj_dtor(struct nouveau_object *);
-int _nouveau_gpuobj_init(struct nouveau_object *);
-int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
-u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
-void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
deleted file mode 100644
index d22a59138a9b..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/handle.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __NOUVEAU_HANDLE_H__
-#define __NOUVEAU_HANDLE_H__
-
-struct nouveau_handle {
- struct nouveau_namedb *namedb;
- struct list_head node;
-
- struct list_head head;
- struct list_head tree;
- u32 name;
- u32 priv;
-
- u8 route;
- u64 token;
-
- struct nouveau_handle *parent;
- struct nouveau_object *object;
-};
-
-int nouveau_handle_create(struct nouveau_object *, u32 parent, u32 handle,
- struct nouveau_object *, struct nouveau_handle **);
-void nouveau_handle_destroy(struct nouveau_handle *);
-int nouveau_handle_init(struct nouveau_handle *);
-int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
-
-struct nouveau_object *
-nouveau_handle_ref(struct nouveau_object *, u32 name);
-
-struct nouveau_handle *nouveau_handle_get_class(struct nouveau_object *, u16);
-struct nouveau_handle *nouveau_handle_get_vinst(struct nouveau_object *, u64);
-struct nouveau_handle *nouveau_handle_get_cinst(struct nouveau_object *, u32);
-void nouveau_handle_put(struct nouveau_handle *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ioctl.h b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h
deleted file mode 100644
index ac7935c2474e..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/ioctl.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __NVKM_IOCTL_H__
-#define __NVKM_IOCTL_H__
-
-int nvkm_ioctl(struct nouveau_client *, bool, void *, u32, void **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
deleted file mode 100644
index bfe6931544fe..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __NOUVEAU_MM_H__
-#define __NOUVEAU_MM_H__
-
-struct nouveau_mm_node {
- struct list_head nl_entry;
- struct list_head fl_entry;
- struct list_head rl_entry;
-
-#define NVKM_MM_HEAP_ANY 0x00
- u8 heap;
-#define NVKM_MM_TYPE_NONE 0x00
-#define NVKM_MM_TYPE_HOLE 0xff
- u8 type;
- u32 offset;
- u32 length;
-};
-
-struct nouveau_mm {
- struct list_head nodes;
- struct list_head free;
-
- u32 block_size;
- int heap_nodes;
-};
-
-static inline bool
-nouveau_mm_initialised(struct nouveau_mm *mm)
-{
- return mm->block_size != 0;
-}
-
-int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
-int nouveau_mm_fini(struct nouveau_mm *);
-int nouveau_mm_head(struct nouveau_mm *, u8 heap, u8 type, u32 size_max,
- u32 size_min, u32 align, struct nouveau_mm_node **);
-int nouveau_mm_tail(struct nouveau_mm *, u8 heap, u8 type, u32 size_max,
- u32 size_min, u32 align, struct nouveau_mm_node **);
-void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/namedb.h b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
deleted file mode 100644
index f5b5fd8e1fc9..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/namedb.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __NOUVEAU_NAMEDB_H__
-#define __NOUVEAU_NAMEDB_H__
-
-#include <core/parent.h>
-
-struct nouveau_handle;
-
-struct nouveau_namedb {
- struct nouveau_parent base;
- rwlock_t lock;
- struct list_head list;
-};
-
-static inline struct nouveau_namedb *
-nv_namedb(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
- nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-#define nouveau_namedb_create(p,e,c,v,s,m,d) \
- nouveau_namedb_create_((p), (e), (c), (v), (s), (m), \
- sizeof(**d), (void **)d)
-#define nouveau_namedb_init(p) \
- nouveau_parent_init(&(p)->base)
-#define nouveau_namedb_fini(p,s) \
- nouveau_parent_fini(&(p)->base, (s))
-#define nouveau_namedb_destroy(p) \
- nouveau_parent_destroy(&(p)->base)
-
-int nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32 pclass,
- struct nouveau_oclass *, u64 engcls,
- int size, void **);
-
-int _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-#define _nouveau_namedb_dtor _nouveau_parent_dtor
-#define _nouveau_namedb_init _nouveau_parent_init
-#define _nouveau_namedb_fini _nouveau_parent_fini
-
-int nouveau_namedb_insert(struct nouveau_namedb *, u32 name,
- struct nouveau_object *, struct nouveau_handle *);
-void nouveau_namedb_remove(struct nouveau_handle *);
-
-struct nouveau_handle *nouveau_namedb_get(struct nouveau_namedb *, u32);
-struct nouveau_handle *nouveau_namedb_get_class(struct nouveau_namedb *, u16);
-struct nouveau_handle *nouveau_namedb_get_vinst(struct nouveau_namedb *, u64);
-struct nouveau_handle *nouveau_namedb_get_cinst(struct nouveau_namedb *, u32);
-void nouveau_namedb_put(struct nouveau_handle *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
deleted file mode 100644
index ed055847887e..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/option.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __NOUVEAU_OPTION_H__
-#define __NOUVEAU_OPTION_H__
-
-#include <core/os.h>
-
-const char *nouveau_stropt(const char *optstr, const char *opt, int *len);
-bool nouveau_boolopt(const char *optstr, const char *opt, bool value);
-
-int nouveau_dbgopt(const char *optstr, const char *sub);
-
-/* compares unterminated string 'str' with zero-terminated string 'cmp' */
-static inline int
-strncasecmpz(const char *str, const char *cmp, size_t len)
-{
- if (strlen(cmp) != len)
- return len;
- return strncasecmp(str, cmp, len);
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
deleted file mode 100644
index 12da418ec70a..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __NOUVEAU_PARENT_H__
-#define __NOUVEAU_PARENT_H__
-
-#include <core/device.h>
-#include <core/object.h>
-
-struct nouveau_sclass {
- struct nouveau_sclass *sclass;
- struct nouveau_engine *engine;
- struct nouveau_oclass *oclass;
-};
-
-struct nouveau_parent {
- struct nouveau_object base;
-
- struct nouveau_sclass *sclass;
- u64 engine;
-
- int (*context_attach)(struct nouveau_object *,
- struct nouveau_object *);
- int (*context_detach)(struct nouveau_object *, bool suspend,
- struct nouveau_object *);
-
- int (*object_attach)(struct nouveau_object *parent,
- struct nouveau_object *object, u32 name);
- void (*object_detach)(struct nouveau_object *parent, int cookie);
-};
-
-static inline struct nouveau_parent *
-nv_parent(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
- nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-#define nouveau_parent_create(p,e,c,v,s,m,d) \
- nouveau_parent_create_((p), (e), (c), (v), (s), (m), \
- sizeof(**d), (void **)d)
-#define nouveau_parent_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_parent_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32 pclass,
- struct nouveau_oclass *, u64 engcls,
- int size, void **);
-void nouveau_parent_destroy(struct nouveau_parent *);
-
-void _nouveau_parent_dtor(struct nouveau_object *);
-#define _nouveau_parent_init nouveau_object_init
-#define _nouveau_parent_fini nouveau_object_fini
-
-int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
- struct nouveau_object **pengine,
- struct nouveau_oclass **poclass);
-int nouveau_parent_lclass(struct nouveau_object *, u32 *, int);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ramht.h b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
deleted file mode 100644
index 47e4cacbca37..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/ramht.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __NOUVEAU_RAMHT_H__
-#define __NOUVEAU_RAMHT_H__
-
-#include <core/gpuobj.h>
-
-struct nouveau_ramht {
- struct nouveau_gpuobj base;
- int bits;
-};
-
-int nouveau_ramht_insert(struct nouveau_ramht *, int chid,
- u32 handle, u32 context);
-void nouveau_ramht_remove(struct nouveau_ramht *, int cookie);
-int nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *,
- u32 size, u32 align, struct nouveau_ramht **);
-
-static inline void
-nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref)
-{
- nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref);
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
deleted file mode 100644
index 67662e2c4547..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __NOUVEAU_BSP_H__
-#define __NOUVEAU_BSP_H__
-
-extern struct nouveau_oclass nv84_bsp_oclass;
-extern struct nouveau_oclass nv98_bsp_oclass;
-extern struct nouveau_oclass nvc0_bsp_oclass;
-extern struct nouveau_oclass nve0_bsp_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
deleted file mode 100644
index 316a28ae5f5c..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/copy.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __NOUVEAU_COPY_H__
-#define __NOUVEAU_COPY_H__
-
-void nva3_copy_intr(struct nouveau_subdev *);
-
-extern struct nouveau_oclass nva3_copy_oclass;
-extern struct nouveau_oclass nvc0_copy0_oclass;
-extern struct nouveau_oclass nvc0_copy1_oclass;
-extern struct nouveau_oclass nve0_copy0_oclass;
-extern struct nouveau_oclass nve0_copy1_oclass;
-extern struct nouveau_oclass nve0_copy2_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
deleted file mode 100644
index db975618e937..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __NOUVEAU_CRYPT_H__
-#define __NOUVEAU_CRYPT_H__
-
-extern struct nouveau_oclass nv84_crypt_oclass;
-extern struct nouveau_oclass nv98_crypt_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
deleted file mode 100644
index fc307f1317ff..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __NOUVEAU_DISP_H__
-#define __NOUVEAU_DISP_H__
-
-#include <core/object.h>
-#include <core/engine.h>
-#include <core/device.h>
-#include <core/event.h>
-
-struct nouveau_disp {
- struct nouveau_engine base;
-
- struct list_head outp;
-
- struct nvkm_event hpd;
- struct nvkm_event vblank;
-};
-
-static inline struct nouveau_disp *
-nouveau_disp(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
-}
-
-extern struct nouveau_oclass *nv04_disp_oclass;
-extern struct nouveau_oclass *nv50_disp_oclass;
-extern struct nouveau_oclass *nv84_disp_oclass;
-extern struct nouveau_oclass *nva0_disp_oclass;
-extern struct nouveau_oclass *nv94_disp_oclass;
-extern struct nouveau_oclass *nva3_disp_oclass;
-extern struct nouveau_oclass *nvd0_disp_oclass;
-extern struct nouveau_oclass *nve0_disp_oclass;
-extern struct nouveau_oclass *nvf0_disp_oclass;
-extern struct nouveau_oclass *gm107_disp_oclass;
-extern struct nouveau_oclass *gm204_disp_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
deleted file mode 100644
index 1b283a7b78e6..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __NOUVEAU_DMAOBJ_H__
-#define __NOUVEAU_DMAOBJ_H__
-
-#include <core/object.h>
-#include <core/engine.h>
-
-struct nouveau_gpuobj;
-
-struct nouveau_dmaobj {
- struct nouveau_object base;
- u32 target;
- u32 access;
- u64 start;
- u64 limit;
-};
-
-struct nouveau_dmaeng {
- struct nouveau_engine base;
-
- /* creates a "physical" dma object from a struct nouveau_dmaobj */
- int (*bind)(struct nouveau_dmaobj *dmaobj,
- struct nouveau_object *parent,
- struct nouveau_gpuobj **);
-};
-
-extern struct nouveau_oclass *nv04_dmaeng_oclass;
-extern struct nouveau_oclass *nv50_dmaeng_oclass;
-extern struct nouveau_oclass *nvc0_dmaeng_oclass;
-extern struct nouveau_oclass *nvd0_dmaeng_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
deleted file mode 100644
index 181aa7da524d..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef __NOUVEAU_FALCON_H__
-#define __NOUVEAU_FALCON_H__
-
-#include <core/engine.h>
-#include <core/engctx.h>
-#include <core/gpuobj.h>
-
-struct nouveau_falcon_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_falcon_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_falcon_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_falcon_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
-#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
-#define _nouveau_falcon_context_init _nouveau_engctx_init
-#define _nouveau_falcon_context_fini _nouveau_engctx_fini
-#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_falcon_data {
- bool external;
-};
-
-struct nouveau_falcon {
- struct nouveau_engine base;
-
- u32 addr;
- u8 version;
- u8 secret;
-
- struct nouveau_gpuobj *core;
- bool external;
-
- struct {
- u32 limit;
- u32 *data;
- u32 size;
- } code;
-
- struct {
- u32 limit;
- u32 *data;
- u32 size;
- } data;
-};
-
-#define nv_falcon(priv) (&(priv)->base)
-
-#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \
- nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
- sizeof(**r),(void **)r)
-#define nouveau_falcon_destroy(p) \
- nouveau_engine_destroy(&(p)->base)
-#define nouveau_falcon_init(p) ({ \
- struct nouveau_falcon *falcon = (p); \
- _nouveau_falcon_init(nv_object(falcon)); \
-})
-#define nouveau_falcon_fini(p,s) ({ \
- struct nouveau_falcon *falcon = (p); \
- _nouveau_falcon_fini(nv_object(falcon), (s)); \
-})
-
-int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32, bool, const char *,
- const char *, int, void **);
-
-void nouveau_falcon_intr(struct nouveau_subdev *subdev);
-
-#define _nouveau_falcon_dtor _nouveau_engine_dtor
-int _nouveau_falcon_init(struct nouveau_object *);
-int _nouveau_falcon_fini(struct nouveau_object *, bool);
-u32 _nouveau_falcon_rd32(struct nouveau_object *, u64);
-void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
deleted file mode 100644
index 2007453f6fce..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ /dev/null
@@ -1,126 +0,0 @@
-#ifndef __NOUVEAU_FIFO_H__
-#define __NOUVEAU_FIFO_H__
-
-#include <core/namedb.h>
-#include <core/gpuobj.h>
-#include <core/engine.h>
-#include <core/event.h>
-
-struct nouveau_fifo_chan {
- struct nouveau_namedb base;
- struct nouveau_dmaobj *pushdma;
- struct nouveau_gpuobj *pushgpu;
- void __iomem *user;
- u64 addr;
- u32 size;
- u16 chid;
- atomic_t refcnt; /* NV04_NVSW_SET_REF */
-};
-
-static inline struct nouveau_fifo_chan *
-nouveau_fifo_chan(void *obj)
-{
- return (void *)nv_namedb(obj);
-}
-
-#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
- nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
- (m), sizeof(**d), (void **)d)
-#define nouveau_fifo_channel_init(p) \
- nouveau_namedb_init(&(p)->base)
-#define nouveau_fifo_channel_fini(p,s) \
- nouveau_namedb_fini(&(p)->base, (s))
-
-int nouveau_fifo_channel_create_(struct nouveau_object *,
- struct nouveau_object *,
- struct nouveau_oclass *,
- int bar, u32 addr, u32 size, u32 push,
- u64 engmask, int len, void **);
-void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
-
-#define _nouveau_fifo_channel_init _nouveau_namedb_init
-#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
-
-void _nouveau_fifo_channel_dtor(struct nouveau_object *);
-int _nouveau_fifo_channel_map(struct nouveau_object *, u64 *, u32 *);
-u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
-void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
-int _nouveau_fifo_channel_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
-
-struct nouveau_fifo_base {
- struct nouveau_gpuobj base;
-};
-
-#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d) \
- nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
-#define nouveau_fifo_context_destroy(p) \
- nouveau_gpuobj_destroy(&(p)->base)
-#define nouveau_fifo_context_init(p) \
- nouveau_gpuobj_init(&(p)->base)
-#define nouveau_fifo_context_fini(p,s) \
- nouveau_gpuobj_fini(&(p)->base, (s))
-
-#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor
-#define _nouveau_fifo_context_init _nouveau_gpuobj_init
-#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini
-#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32
-#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32
-
-struct nouveau_fifo {
- struct nouveau_engine base;
-
- struct nvkm_event cevent; /* channel creation event */
- struct nvkm_event uevent; /* async user trigger */
-
- struct nouveau_object **channel;
- spinlock_t lock;
- u16 min;
- u16 max;
-
- int (*chid)(struct nouveau_fifo *, struct nouveau_object *);
- void (*pause)(struct nouveau_fifo *, unsigned long *);
- void (*start)(struct nouveau_fifo *, unsigned long *);
-};
-
-static inline struct nouveau_fifo *
-nouveau_fifo(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO];
-}
-
-#define nouveau_fifo_create(o,e,c,fc,lc,d) \
- nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
-#define nouveau_fifo_init(p) \
- nouveau_engine_init(&(p)->base)
-#define nouveau_fifo_fini(p,s) \
- nouveau_engine_fini(&(p)->base, (s))
-
-int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int min, int max,
- int size, void **);
-void nouveau_fifo_destroy(struct nouveau_fifo *);
-const char *
-nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
-
-#define _nouveau_fifo_init _nouveau_engine_init
-#define _nouveau_fifo_fini _nouveau_engine_fini
-
-extern struct nouveau_oclass *nv04_fifo_oclass;
-extern struct nouveau_oclass *nv10_fifo_oclass;
-extern struct nouveau_oclass *nv17_fifo_oclass;
-extern struct nouveau_oclass *nv40_fifo_oclass;
-extern struct nouveau_oclass *nv50_fifo_oclass;
-extern struct nouveau_oclass *nv84_fifo_oclass;
-extern struct nouveau_oclass *nvc0_fifo_oclass;
-extern struct nouveau_oclass *nve0_fifo_oclass;
-extern struct nouveau_oclass *gk20a_fifo_oclass;
-extern struct nouveau_oclass *nv108_fifo_oclass;
-
-int nouveau_fifo_uevent_ctor(struct nouveau_object *, void *, u32,
- struct nvkm_notify *);
-void nouveau_fifo_uevent(struct nouveau_fifo *);
-
-void nv04_fifo_intr(struct nouveau_subdev *);
-int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
deleted file mode 100644
index d5055570d01b..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef __NOUVEAU_GRAPH_H__
-#define __NOUVEAU_GRAPH_H__
-
-#include <core/engine.h>
-#include <core/engctx.h>
-#include <core/enum.h>
-
-struct nouveau_graph_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_graph_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_graph_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_graph_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_graph_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_graph_context_dtor _nouveau_engctx_dtor
-#define _nouveau_graph_context_init _nouveau_engctx_init
-#define _nouveau_graph_context_fini _nouveau_engctx_fini
-#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_graph {
- struct nouveau_engine base;
-
- /* Returns chipset-specific counts of units packed into an u64.
- */
- u64 (*units)(struct nouveau_graph *);
-};
-
-static inline struct nouveau_graph *
-nouveau_graph(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR];
-}
-
-#define nouveau_graph_create(p,e,c,y,d) \
- nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
-#define nouveau_graph_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_graph_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_graph_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_graph_dtor _nouveau_engine_dtor
-#define _nouveau_graph_init _nouveau_engine_init
-#define _nouveau_graph_fini _nouveau_engine_fini
-
-extern struct nouveau_oclass nv04_graph_oclass;
-extern struct nouveau_oclass nv10_graph_oclass;
-extern struct nouveau_oclass nv20_graph_oclass;
-extern struct nouveau_oclass nv25_graph_oclass;
-extern struct nouveau_oclass nv2a_graph_oclass;
-extern struct nouveau_oclass nv30_graph_oclass;
-extern struct nouveau_oclass nv34_graph_oclass;
-extern struct nouveau_oclass nv35_graph_oclass;
-extern struct nouveau_oclass nv40_graph_oclass;
-extern struct nouveau_oclass nv50_graph_oclass;
-extern struct nouveau_oclass *nvc0_graph_oclass;
-extern struct nouveau_oclass *nvc1_graph_oclass;
-extern struct nouveau_oclass *nvc4_graph_oclass;
-extern struct nouveau_oclass *nvc8_graph_oclass;
-extern struct nouveau_oclass *nvd7_graph_oclass;
-extern struct nouveau_oclass *nvd9_graph_oclass;
-extern struct nouveau_oclass *nve4_graph_oclass;
-extern struct nouveau_oclass *gk20a_graph_oclass;
-extern struct nouveau_oclass *nvf0_graph_oclass;
-extern struct nouveau_oclass *gk110b_graph_oclass;
-extern struct nouveau_oclass *nv108_graph_oclass;
-extern struct nouveau_oclass *gm107_graph_oclass;
-
-extern const struct nouveau_bitfield nv04_graph_nsource[];
-extern struct nouveau_ofuncs nv04_graph_ofuncs;
-bool nv04_graph_idle(void *obj);
-
-extern const struct nouveau_bitfield nv10_graph_intr_name[];
-extern const struct nouveau_bitfield nv10_graph_nstatus[];
-
-extern const struct nouveau_enum nv50_data_error_names[];
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
deleted file mode 100644
index 9b0d938199f6..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef __NOUVEAU_MPEG_H__
-#define __NOUVEAU_MPEG_H__
-
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_mpeg_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_mpeg_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_mpeg_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_mpeg_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor
-#define _nouveau_mpeg_context_init _nouveau_engctx_init
-#define _nouveau_mpeg_context_fini _nouveau_engctx_fini
-#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_mpeg {
- struct nouveau_engine base;
-};
-
-#define nouveau_mpeg_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
-#define nouveau_mpeg_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_mpeg_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_mpeg_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_mpeg_dtor _nouveau_engine_dtor
-#define _nouveau_mpeg_init _nouveau_engine_init
-#define _nouveau_mpeg_fini _nouveau_engine_fini
-
-extern struct nouveau_oclass nv31_mpeg_oclass;
-extern struct nouveau_oclass nv40_mpeg_oclass;
-extern struct nouveau_oclass nv44_mpeg_oclass;
-extern struct nouveau_oclass nv50_mpeg_oclass;
-extern struct nouveau_oclass nv84_mpeg_oclass;
-extern struct nouveau_ofuncs nv31_mpeg_ofuncs;
-extern struct nouveau_oclass nv31_mpeg_cclass;
-extern struct nouveau_oclass nv31_mpeg_sclass[];
-extern struct nouveau_oclass nv40_mpeg_sclass[];
-void nv31_mpeg_intr(struct nouveau_subdev *);
-void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
-int nv31_mpeg_init(struct nouveau_object *);
-
-extern struct nouveau_ofuncs nv50_mpeg_ofuncs;
-int nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nv50_mpeg_intr(struct nouveau_subdev *);
-int nv50_mpeg_init(struct nouveau_object *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
deleted file mode 100644
index 88cc812baaa3..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __NVKM_PERFMON_H__
-#define __NVKM_PERFMON_H__
-
-#include <core/device.h>
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_perfdom;
-struct nouveau_perfctr;
-struct nouveau_perfmon {
- struct nouveau_engine base;
-
- struct nouveau_perfctx *context;
- void *profile_data;
-
- struct list_head domains;
- u32 sequence;
-
- /*XXX: temp for daemon backend */
- u32 pwr[8];
- u32 last;
-};
-
-static inline struct nouveau_perfmon *
-nouveau_perfmon(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_PERFMON];
-}
-
-extern struct nouveau_oclass *nv40_perfmon_oclass;
-extern struct nouveau_oclass *nv50_perfmon_oclass;
-extern struct nouveau_oclass *nv84_perfmon_oclass;
-extern struct nouveau_oclass *nva3_perfmon_oclass;
-extern struct nouveau_oclass nvc0_perfmon_oclass;
-extern struct nouveau_oclass nve0_perfmon_oclass;
-extern struct nouveau_oclass nvf0_perfmon_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
deleted file mode 100644
index 0a66781e8cf1..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __NOUVEAU_PPP_H__
-#define __NOUVEAU_PPP_H__
-
-extern struct nouveau_oclass nv98_ppp_oclass;
-extern struct nouveau_oclass nvc0_ppp_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
deleted file mode 100644
index 23a462b50d03..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __NOUVEAU_SOFTWARE_H__
-#define __NOUVEAU_SOFTWARE_H__
-
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_software_chan {
- struct nouveau_engctx base;
-
- int (*flip)(void *);
- void *flip_data;
-};
-
-#define nouveau_software_context_create(p,e,c,d) \
- nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
-#define nouveau_software_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_software_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_software_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_software_context_dtor _nouveau_engctx_dtor
-#define _nouveau_software_context_init _nouveau_engctx_init
-#define _nouveau_software_context_fini _nouveau_engctx_fini
-
-struct nouveau_software {
- struct nouveau_engine base;
-};
-
-#define nouveau_software_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "SW", "software", (d))
-#define nouveau_software_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_software_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_software_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_software_dtor _nouveau_engine_dtor
-#define _nouveau_software_init _nouveau_engine_init
-#define _nouveau_software_fini _nouveau_engine_fini
-
-extern struct nouveau_oclass *nv04_software_oclass;
-extern struct nouveau_oclass *nv10_software_oclass;
-extern struct nouveau_oclass *nv50_software_oclass;
-extern struct nouveau_oclass *nvc0_software_oclass;
-
-void nv04_software_intr(struct nouveau_subdev *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
deleted file mode 100644
index 39baebec7fbb..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/vp.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __NOUVEAU_VP_H__
-#define __NOUVEAU_VP_H__
-
-extern struct nouveau_oclass nv84_vp_oclass;
-extern struct nouveau_oclass nv98_vp_oclass;
-extern struct nouveau_oclass nvc0_vp_oclass;
-extern struct nouveau_oclass nve0_vp_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/xtensa.h b/drivers/gpu/drm/nouveau/core/include/engine/xtensa.h
deleted file mode 100644
index 306100f31f02..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/engine/xtensa.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __NOUVEAU_XTENSA_H__
-#define __NOUVEAU_XTENSA_H__
-
-#include <core/engine.h>
-#include <core/engctx.h>
-#include <core/gpuobj.h>
-
-struct nouveau_xtensa {
- struct nouveau_engine base;
-
- u32 addr;
- struct nouveau_gpuobj *gpu_fw;
- u32 fifo_val;
- u32 unkd28;
-};
-
-#define nouveau_xtensa_create(p,e,c,b,d,i,f,r) \
- nouveau_xtensa_create_((p), (e), (c), (b), (d), (i), (f), \
- sizeof(**r),(void **)r)
-
-int _nouveau_xtensa_engctx_ctor(struct nouveau_object *,
- struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-
-void _nouveau_xtensa_intr(struct nouveau_subdev *);
-int nouveau_xtensa_create_(struct nouveau_object *,
- struct nouveau_object *,
- struct nouveau_oclass *, u32, bool,
- const char *, const char *,
- int, void **);
-#define _nouveau_xtensa_dtor _nouveau_engine_dtor
-int _nouveau_xtensa_init(struct nouveau_object *);
-int _nouveau_xtensa_fini(struct nouveau_object *, bool);
-u32 _nouveau_xtensa_rd32(struct nouveau_object *, u64);
-void _nouveau_xtensa_wr32(struct nouveau_object *, u64, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/class.h b/drivers/gpu/drm/nouveau/core/include/nvif/class.h
deleted file mode 120000
index f1ac4859edd4..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/nvif/class.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../nvif/class.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/event.h b/drivers/gpu/drm/nouveau/core/include/nvif/event.h
deleted file mode 120000
index 1b798538a725..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/nvif/event.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../nvif/event.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h
deleted file mode 120000
index 8569c86907c5..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../nvif/ioctl.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h
deleted file mode 120000
index 69d99292bca4..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../nvif/unpack.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
deleted file mode 100644
index 257ddf6d36d4..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __NOUVEAU_BAR_H__
-#define __NOUVEAU_BAR_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_mem;
-struct nouveau_vma;
-
-struct nouveau_bar {
- struct nouveau_subdev base;
-
- int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
- struct nouveau_mem *, struct nouveau_object **);
-
- int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
- u32 flags, struct nouveau_vma *);
- int (*umap)(struct nouveau_bar *, struct nouveau_mem *,
- u32 flags, struct nouveau_vma *);
- void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
- void (*flush)(struct nouveau_bar *);
-
- /* whether the BAR supports to be ioremapped WC or should be uncached */
- bool iomap_uncached;
-};
-
-static inline struct nouveau_bar *
-nouveau_bar(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
-}
-
-extern struct nouveau_oclass nv50_bar_oclass;
-extern struct nouveau_oclass nvc0_bar_oclass;
-extern struct nouveau_oclass gk20a_bar_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
deleted file mode 100644
index 5bd1ca8cd20d..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __NOUVEAU_BIOS_H__
-#define __NOUVEAU_BIOS_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_bios {
- struct nouveau_subdev base;
- u32 size;
- u8 *data;
-
- u32 bmp_offset;
- u32 bit_offset;
-
- struct {
- u8 major;
- u8 chip;
- u8 minor;
- u8 micro;
- u8 patch;
- } version;
-};
-
-static inline struct nouveau_bios *
-nouveau_bios(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VBIOS];
-}
-
-u8 nvbios_checksum(const u8 *data, int size);
-u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
-
-extern struct nouveau_oclass nouveau_bios_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0205.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0205.h
deleted file mode 100644
index e171120cec81..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0205.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __NVBIOS_M0205_H__
-#define __NVBIOS_M0205_H__
-
-struct nvbios_M0205T {
- u16 freq;
-};
-
-u32 nvbios_M0205Te(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u32 nvbios_M0205Tp(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz,
- struct nvbios_M0205T *);
-
-struct nvbios_M0205E {
- u8 type;
-};
-
-u32 nvbios_M0205Ee(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u32 nvbios_M0205Ep(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_M0205E *);
-
-struct nvbios_M0205S {
- u8 data;
-};
-
-u32 nvbios_M0205Se(struct nouveau_bios *, int ent, int idx, u8 *ver, u8 *hdr);
-u32 nvbios_M0205Sp(struct nouveau_bios *, int ent, int idx, u8 *ver, u8 *hdr,
- struct nvbios_M0205S *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0209.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0209.h
deleted file mode 100644
index 67dc50d837bc..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0209.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __NVBIOS_M0209_H__
-#define __NVBIOS_M0209_H__
-
-u32 nvbios_M0209Te(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-
-struct nvbios_M0209E {
- u8 v00_40;
- u8 bits;
- u8 modulo;
- u8 v02_40;
- u8 v02_07;
- u8 v03;
-};
-
-u32 nvbios_M0209Ee(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u32 nvbios_M0209Ep(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_M0209E *);
-
-struct nvbios_M0209S {
- u32 data[0x200];
-};
-
-u32 nvbios_M0209Se(struct nouveau_bios *, int ent, int idx, u8 *ver, u8 *hdr);
-u32 nvbios_M0209Sp(struct nouveau_bios *, int ent, int idx, u8 *ver, u8 *hdr,
- struct nvbios_M0209S *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h
deleted file mode 100644
index bba01ab1e049..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __NVBIOS_P0260_H__
-#define __NVBIOS_P0260_H__
-
-u32 nvbios_P0260Te(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
-
-struct nvbios_P0260E {
- u32 data;
-};
-
-u32 nvbios_P0260Ee(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
-u32 nvbios_P0260Ep(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
- struct nvbios_P0260E *);
-
-struct nvbios_P0260X {
- u32 data;
-};
-
-u32 nvbios_P0260Xe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
-u32 nvbios_P0260Xp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
- struct nvbios_P0260X *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
deleted file mode 100644
index 662b20726851..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __NVBIOS_BOOST_H__
-#define __NVBIOS_BOOST_H__
-
-u16 nvbios_boostTe(struct nouveau_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
-
-struct nvbios_boostE {
- u8 pstate;
- u32 min;
- u32 max;
-};
-
-u16 nvbios_boostEe(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
-u16 nvbios_boostEp(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
- struct nvbios_boostE *);
-u16 nvbios_boostEm(struct nouveau_bios *, u8, u8 *, u8 *, u8 *, u8 *,
- struct nvbios_boostE *);
-
-struct nvbios_boostS {
- u8 domain;
- u8 percent;
- u32 min;
- u32 max;
-};
-
-u16 nvbios_boostSe(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8);
-u16 nvbios_boostSp(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8,
- struct nvbios_boostS *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
deleted file mode 100644
index a80a43809883..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __NVBIOS_CSTEP_H__
-#define __NVBIOS_CSTEP_H__
-
-u16 nvbios_cstepTe(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
-
-struct nvbios_cstepE {
- u8 pstate;
- u8 index;
-};
-
-u16 nvbios_cstepEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
-u16 nvbios_cstepEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
- struct nvbios_cstepE *);
-u16 nvbios_cstepEm(struct nouveau_bios *, u8 pstate, u8 *ver, u8 *hdr,
- struct nvbios_cstepE *);
-
-struct nvbios_cstepX {
- u32 freq;
- u8 unkn[2];
- u8 voltage;
-};
-
-u16 nvbios_cstepXe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
-u16 nvbios_cstepXp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
- struct nvbios_cstepX *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
deleted file mode 100644
index c35937e2f6a4..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef __NVBIOS_DISP_H__
-#define __NVBIOS_DISP_H__
-
-u16 nvbios_disp_table(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
-
-struct nvbios_disp {
- u16 data;
-};
-
-u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
- u8 *ver, u8 *hdr__, u8 *sub);
-u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
- u8 *ver, u8 *hdr__, u8 *sub,
- struct nvbios_disp *);
-
-struct nvbios_outp {
- u16 type;
- u16 mask;
- u16 script[3];
-};
-
-u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_outp *);
-u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_outp *);
-
-
-struct nvbios_ocfg {
- u16 match;
- u16 clkcmp[2];
-};
-
-u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ocfg *);
-u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
deleted file mode 100644
index 728206e21777..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __NVBIOS_DP_H__
-#define __NVBIOS_DP_H__
-
-struct nvbios_dpout {
- u16 type;
- u16 mask;
- u8 flags;
- u32 script[5];
- u32 lnkcmp;
-};
-
-u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_dpout *);
-u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_dpout *);
-
-struct nvbios_dpcfg {
- u8 pc;
- u8 dc;
- u8 pe;
- u8 tx_pu;
-};
-
-u16
-nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_dpcfg *);
-u16
-nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 pc, u8 vs, u8 pe,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_dpcfg *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
deleted file mode 100644
index 5572e60414e8..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __NVBIOS_MXM_H__
-#define __NVBIOS_MXM_H__
-
-u16 mxm_table(struct nouveau_bios *, u8 *ver, u8 *hdr);
-
-u8 mxm_sor_map(struct nouveau_bios *, u8 conn);
-u8 mxm_ddc_map(struct nouveau_bios *, u8 port);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
deleted file mode 100644
index b18413d951e5..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef __NVBIOS_NPDE_H__
-#define __NVBIOS_NPDE_H__
-
-struct nvbios_npdeT {
- u32 image_size;
- bool last;
-};
-
-u32 nvbios_npdeTe(struct nouveau_bios *, u32);
-u32 nvbios_npdeTp(struct nouveau_bios *, u32, struct nvbios_npdeT *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
deleted file mode 100644
index 47e021d3e20d..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __NVBIOS_RAMMAP_H__
-#define __NVBIOS_RAMMAP_H__
-
-struct nvbios_ramcfg;
-
-u32 nvbios_rammapTe(struct nouveau_bios *, u8 *ver, u8 *hdr,
- u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-
-u32 nvbios_rammapEe(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u32 nvbios_rammapEp(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ramcfg *);
-u32 nvbios_rammapEm(struct nouveau_bios *, u16 mhz,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ramcfg *);
-
-u32 nvbios_rammapSe(struct nouveau_bios *, u32 data,
- u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
- u8 *ver, u8 *hdr);
-u32 nvbios_rammapSp(struct nouveau_bios *, u32 data,
- u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
- u8 *ver, u8 *hdr,
- struct nvbios_ramcfg *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
deleted file mode 100644
index 76d914b67ab5..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __NVBIOS_TIMING_H__
-#define __NVBIOS_TIMING_H__
-
-struct nvbios_ramcfg;
-
-u16 nvbios_timingTe(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u16 nvbios_timingEe(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_timingEp(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ramcfg *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
deleted file mode 100644
index ad5a8f20e113..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __NVBIOS_VMAP_H__
-#define __NVBIOS_VMAP_H__
-
-struct nouveau_bios;
-
-struct nvbios_vmap {
-};
-
-u16 nvbios_vmap_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_vmap_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_vmap *);
-
-struct nvbios_vmap_entry {
- u8 unk0;
- u8 link;
- u32 min;
- u32 max;
- s32 arg[6];
-};
-
-u16 nvbios_vmap_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
-u16 nvbios_vmap_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
- struct nvbios_vmap_entry *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
deleted file mode 100644
index 6a11dcd59770..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __NVBIOS_VOLT_H__
-#define __NVBIOS_VOLT_H__
-
-struct nouveau_bios;
-
-struct nvbios_volt {
- u8 vidmask;
- u32 min;
- u32 max;
- u32 base;
- s16 step;
-};
-
-u16 nvbios_volt_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_volt_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_volt *);
-
-struct nvbios_volt_entry {
- u32 voltage;
- u8 vid;
-};
-
-u16 nvbios_volt_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
-u16 nvbios_volt_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
- struct nvbios_volt_entry *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
deleted file mode 100644
index 697f7ce70aab..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NOUVEAU_BUS_H__
-#define __NOUVEAU_BUS_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_bus_intr {
- u32 stat;
- u32 unit;
-};
-
-struct nouveau_bus {
- struct nouveau_subdev base;
- int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
- u32 hwsq_size;
-};
-
-static inline struct nouveau_bus *
-nouveau_bus(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BUS];
-}
-
-#define nouveau_bus_create(p, e, o, d) \
- nouveau_subdev_create_((p), (e), (o), 0, "PBUS", "master", \
- sizeof(**d), (void **)d)
-#define nouveau_bus_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_bus_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_bus_fini(p, s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-#define _nouveau_bus_dtor _nouveau_subdev_dtor
-#define _nouveau_bus_init _nouveau_subdev_init
-#define _nouveau_bus_fini _nouveau_subdev_fini
-
-extern struct nouveau_oclass *nv04_bus_oclass;
-extern struct nouveau_oclass *nv31_bus_oclass;
-extern struct nouveau_oclass *nv50_bus_oclass;
-extern struct nouveau_oclass *nv94_bus_oclass;
-extern struct nouveau_oclass *nvc0_bus_oclass;
-
-/* interface to sequencer */
-struct nouveau_hwsq;
-int nouveau_hwsq_init(struct nouveau_bus *, struct nouveau_hwsq **);
-int nouveau_hwsq_fini(struct nouveau_hwsq **, bool exec);
-void nouveau_hwsq_wr32(struct nouveau_hwsq *, u32 addr, u32 data);
-void nouveau_hwsq_setf(struct nouveau_hwsq *, u8 flag, int data);
-void nouveau_hwsq_wait(struct nouveau_hwsq *, u8 flag, u8 data);
-void nouveau_hwsq_nsec(struct nouveau_hwsq *, u32 nsec);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
deleted file mode 100644
index 36ed035d4d42..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef __NOUVEAU_CLOCK_H__
-#define __NOUVEAU_CLOCK_H__
-
-#include <core/device.h>
-#include <core/subdev.h>
-
-struct nouveau_pll_vals;
-struct nvbios_pll;
-
-enum nv_clk_src {
- nv_clk_src_crystal,
- nv_clk_src_href,
-
- nv_clk_src_hclk,
- nv_clk_src_hclkm3,
- nv_clk_src_hclkm3d2,
- nv_clk_src_hclkm2d3, /* NVAA */
- nv_clk_src_hclkm4, /* NVAA */
- nv_clk_src_cclk, /* NVAA */
-
- nv_clk_src_host,
-
- nv_clk_src_sppll0,
- nv_clk_src_sppll1,
-
- nv_clk_src_mpllsrcref,
- nv_clk_src_mpllsrc,
- nv_clk_src_mpll,
- nv_clk_src_mdiv,
-
- nv_clk_src_core,
- nv_clk_src_core_intm,
- nv_clk_src_shader,
-
- nv_clk_src_mem,
-
- nv_clk_src_gpc,
- nv_clk_src_rop,
- nv_clk_src_hubk01,
- nv_clk_src_hubk06,
- nv_clk_src_hubk07,
- nv_clk_src_copy,
- nv_clk_src_daemon,
- nv_clk_src_disp,
- nv_clk_src_vdec,
-
- nv_clk_src_dom6,
-
- nv_clk_src_max,
-};
-
-struct nouveau_cstate {
- struct list_head head;
- u8 voltage;
- u32 domain[nv_clk_src_max];
-};
-
-struct nouveau_pstate {
- struct list_head head;
- struct list_head list; /* c-states */
- struct nouveau_cstate base;
- u8 pstate;
- u8 fanspeed;
-};
-
-struct nouveau_clock {
- struct nouveau_subdev base;
-
- struct nouveau_clocks *domains;
- struct nouveau_pstate bstate;
-
- struct list_head states;
- int state_nr;
-
- struct work_struct work;
- wait_queue_head_t wait;
- atomic_t waiting;
-
- struct nvkm_notify pwrsrc_ntfy;
- int pwrsrc;
- int pstate; /* current */
- int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
- int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
- int astate; /* perfmon adjustment (base) */
- int tstate; /* thermal adjustment (max-) */
- int dstate; /* display adjustment (min+) */
-
- bool allow_reclock;
-
- int (*read)(struct nouveau_clock *, enum nv_clk_src);
- int (*calc)(struct nouveau_clock *, struct nouveau_cstate *);
- int (*prog)(struct nouveau_clock *);
- void (*tidy)(struct nouveau_clock *);
-
- /*XXX: die, these are here *only* to support the completely
- * bat-shit insane what-was-nouveau_hw.c code
- */
- int (*pll_calc)(struct nouveau_clock *, struct nvbios_pll *,
- int clk, struct nouveau_pll_vals *pv);
- int (*pll_prog)(struct nouveau_clock *, u32 reg1,
- struct nouveau_pll_vals *pv);
-};
-
-static inline struct nouveau_clock *
-nouveau_clock(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
-}
-
-struct nouveau_clocks {
- enum nv_clk_src name;
- u8 bios; /* 0xff for none */
-#define NVKM_CLK_DOM_FLAG_CORE 0x01
- u8 flags;
- const char *mname;
- int mdiv;
-};
-
-#define nouveau_clock_create(p,e,o,i,r,s,n,d) \
- nouveau_clock_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \
- (void **)d)
-#define nouveau_clock_destroy(p) ({ \
- struct nouveau_clock *clk = (p); \
- _nouveau_clock_dtor(nv_object(clk)); \
-})
-#define nouveau_clock_init(p) ({ \
- struct nouveau_clock *clk = (p); \
- _nouveau_clock_init(nv_object(clk)); \
-})
-#define nouveau_clock_fini(p,s) ({ \
- struct nouveau_clock *clk = (p); \
- _nouveau_clock_fini(nv_object(clk), (s)); \
-})
-
-int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *,
- struct nouveau_clocks *, struct nouveau_pstate *,
- int, bool, int, void **);
-void _nouveau_clock_dtor(struct nouveau_object *);
-int _nouveau_clock_init(struct nouveau_object *);
-int _nouveau_clock_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nv04_clock_oclass;
-extern struct nouveau_oclass nv40_clock_oclass;
-extern struct nouveau_oclass *nv50_clock_oclass;
-extern struct nouveau_oclass *nv84_clock_oclass;
-extern struct nouveau_oclass *nvaa_clock_oclass;
-extern struct nouveau_oclass nva3_clock_oclass;
-extern struct nouveau_oclass nvc0_clock_oclass;
-extern struct nouveau_oclass nve0_clock_oclass;
-extern struct nouveau_oclass gk20a_clock_oclass;
-
-int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
-int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
- int clk, struct nouveau_pll_vals *);
-int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
- struct nouveau_pll_vals *);
-int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
- int clk, struct nouveau_pll_vals *);
-
-int nouveau_clock_ustate(struct nouveau_clock *, int req, int pwr);
-int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
-int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
-int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
deleted file mode 100644
index e007a9d44683..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __NOUVEAU_DEVINIT_H__
-#define __NOUVEAU_DEVINIT_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_devinit {
- struct nouveau_subdev base;
- bool post;
- void (*meminit)(struct nouveau_devinit *);
- int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
- u32 (*mmio)(struct nouveau_devinit *, u32 addr);
-};
-
-static inline struct nouveau_devinit *
-nouveau_devinit(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
-}
-
-extern struct nouveau_oclass *nv04_devinit_oclass;
-extern struct nouveau_oclass *nv05_devinit_oclass;
-extern struct nouveau_oclass *nv10_devinit_oclass;
-extern struct nouveau_oclass *nv1a_devinit_oclass;
-extern struct nouveau_oclass *nv20_devinit_oclass;
-extern struct nouveau_oclass *nv50_devinit_oclass;
-extern struct nouveau_oclass *nv84_devinit_oclass;
-extern struct nouveau_oclass *nv98_devinit_oclass;
-extern struct nouveau_oclass *nva3_devinit_oclass;
-extern struct nouveau_oclass *nvaf_devinit_oclass;
-extern struct nouveau_oclass *nvc0_devinit_oclass;
-extern struct nouveau_oclass *gm107_devinit_oclass;
-extern struct nouveau_oclass *gm204_devinit_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
deleted file mode 100644
index 8d0032f15205..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ /dev/null
@@ -1,159 +0,0 @@
-#ifndef __NOUVEAU_FB_H__
-#define __NOUVEAU_FB_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-#include <core/mm.h>
-
-#include <subdev/vm.h>
-
-/* memory type/access flags, do not match hardware values */
-#define NV_MEM_ACCESS_RO 1
-#define NV_MEM_ACCESS_WO 2
-#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
-#define NV_MEM_ACCESS_SYS 4
-#define NV_MEM_ACCESS_VM 8
-#define NV_MEM_ACCESS_NOSNOOP 16
-
-#define NV_MEM_TARGET_VRAM 0
-#define NV_MEM_TARGET_PCI 1
-#define NV_MEM_TARGET_PCI_NOSNOOP 2
-#define NV_MEM_TARGET_VM 3
-#define NV_MEM_TARGET_GART 4
-
-#define NV_MEM_TYPE_VM 0x7f
-#define NV_MEM_COMP_VM 0x03
-
-struct nouveau_mem {
- struct drm_device *dev;
-
- struct nouveau_vma bar_vma;
- struct nouveau_vma vma[2];
- u8 page_shift;
-
- struct nouveau_mm_node *tag;
- struct list_head regions;
- dma_addr_t *pages;
- u32 memtype;
- u64 offset;
- u64 size;
- struct sg_table *sg;
-};
-
-struct nouveau_fb_tile {
- struct nouveau_mm_node *tag;
- u32 addr;
- u32 limit;
- u32 pitch;
- u32 zcomp;
-};
-
-struct nouveau_fb {
- struct nouveau_subdev base;
-
- bool (*memtype_valid)(struct nouveau_fb *, u32 memtype);
-
- struct nouveau_ram *ram;
-
- struct nouveau_mm vram;
- struct nouveau_mm tags;
-
- struct {
- struct nouveau_fb_tile region[16];
- int regions;
- void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
- void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *);
- void (*fini)(struct nouveau_fb *, int i,
- struct nouveau_fb_tile *);
- void (*prog)(struct nouveau_fb *, int i,
- struct nouveau_fb_tile *);
- } tile;
-};
-
-static inline struct nouveau_fb *
-nouveau_fb(void *obj)
-{
- /* fbram uses this before device subdev pointer is valid */
- if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
- nv_subidx(obj) == NVDEV_SUBDEV_FB)
- return obj;
-
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
-}
-
-extern struct nouveau_oclass *nv04_fb_oclass;
-extern struct nouveau_oclass *nv10_fb_oclass;
-extern struct nouveau_oclass *nv1a_fb_oclass;
-extern struct nouveau_oclass *nv20_fb_oclass;
-extern struct nouveau_oclass *nv25_fb_oclass;
-extern struct nouveau_oclass *nv30_fb_oclass;
-extern struct nouveau_oclass *nv35_fb_oclass;
-extern struct nouveau_oclass *nv36_fb_oclass;
-extern struct nouveau_oclass *nv40_fb_oclass;
-extern struct nouveau_oclass *nv41_fb_oclass;
-extern struct nouveau_oclass *nv44_fb_oclass;
-extern struct nouveau_oclass *nv46_fb_oclass;
-extern struct nouveau_oclass *nv47_fb_oclass;
-extern struct nouveau_oclass *nv49_fb_oclass;
-extern struct nouveau_oclass *nv4e_fb_oclass;
-extern struct nouveau_oclass *nv50_fb_oclass;
-extern struct nouveau_oclass *nv84_fb_oclass;
-extern struct nouveau_oclass *nva3_fb_oclass;
-extern struct nouveau_oclass *nvaa_fb_oclass;
-extern struct nouveau_oclass *nvaf_fb_oclass;
-extern struct nouveau_oclass *nvc0_fb_oclass;
-extern struct nouveau_oclass *nve0_fb_oclass;
-extern struct nouveau_oclass *gk20a_fb_oclass;
-extern struct nouveau_oclass *gm107_fb_oclass;
-
-#include <subdev/bios/ramcfg.h>
-
-struct nouveau_ram_data {
- struct list_head head;
- struct nvbios_ramcfg bios;
- u32 freq;
-};
-
-struct nouveau_ram {
- struct nouveau_object base;
- enum {
- NV_MEM_TYPE_UNKNOWN = 0,
- NV_MEM_TYPE_STOLEN,
- NV_MEM_TYPE_SGRAM,
- NV_MEM_TYPE_SDRAM,
- NV_MEM_TYPE_DDR1,
- NV_MEM_TYPE_DDR2,
- NV_MEM_TYPE_DDR3,
- NV_MEM_TYPE_GDDR2,
- NV_MEM_TYPE_GDDR3,
- NV_MEM_TYPE_GDDR4,
- NV_MEM_TYPE_GDDR5
- } type;
- u64 stolen;
- u64 size;
- u32 tags;
-
- int ranks;
- int parts;
- int part_mask;
-
- int (*get)(struct nouveau_fb *, u64 size, u32 align,
- u32 size_nc, u32 type, struct nouveau_mem **);
- void (*put)(struct nouveau_fb *, struct nouveau_mem **);
-
- int (*calc)(struct nouveau_fb *, u32 freq);
- int (*prog)(struct nouveau_fb *);
- void (*tidy)(struct nouveau_fb *);
- u32 freq;
- u32 mr[16];
- u32 mr1_nuts;
-
- struct nouveau_ram_data *next;
- struct nouveau_ram_data former;
- struct nouveau_ram_data xition;
- struct nouveau_ram_data target;
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fuse.h b/drivers/gpu/drm/nouveau/core/include/subdev/fuse.h
deleted file mode 100644
index 2b1ddb2a9a7d..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fuse.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __NOUVEAU_FUSE_H__
-#define __NOUVEAU_FUSE_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_fuse {
- struct nouveau_subdev base;
-};
-
-static inline struct nouveau_fuse *
-nouveau_fuse(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FUSE];
-}
-
-#define nouveau_fuse_create(p, e, o, d) \
- nouveau_fuse_create_((p), (e), (o), sizeof(**d), (void **)d)
-
-int nouveau_fuse_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_fuse_dtor(struct nouveau_object *);
-int _nouveau_fuse_init(struct nouveau_object *);
-#define _nouveau_fuse_fini _nouveau_subdev_fini
-
-extern struct nouveau_oclass g80_fuse_oclass;
-extern struct nouveau_oclass gf100_fuse_oclass;
-extern struct nouveau_oclass gm107_fuse_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
deleted file mode 100644
index f855140dbcb7..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef __NOUVEAU_GPIO_H__
-#define __NOUVEAU_GPIO_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-#include <core/event.h>
-
-#include <subdev/bios.h>
-#include <subdev/bios/gpio.h>
-
-struct nvkm_gpio_ntfy_req {
-#define NVKM_GPIO_HI 0x01
-#define NVKM_GPIO_LO 0x02
-#define NVKM_GPIO_TOGGLED 0x03
- u8 mask;
- u8 line;
-};
-
-struct nvkm_gpio_ntfy_rep {
- u8 mask;
-};
-
-struct nouveau_gpio {
- struct nouveau_subdev base;
-
- struct nvkm_event event;
-
- void (*reset)(struct nouveau_gpio *, u8 func);
- int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
- struct dcb_gpio_func *);
- int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
- int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
-};
-
-static inline struct nouveau_gpio *
-nouveau_gpio(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
-}
-
-extern struct nouveau_oclass *nv10_gpio_oclass;
-extern struct nouveau_oclass *nv50_gpio_oclass;
-extern struct nouveau_oclass *nv94_gpio_oclass;
-extern struct nouveau_oclass *nvd0_gpio_oclass;
-extern struct nouveau_oclass *nve0_gpio_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
deleted file mode 100644
index d94ccacb40bf..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ /dev/null
@@ -1,136 +0,0 @@
-#ifndef __NOUVEAU_I2C_H__
-#define __NOUVEAU_I2C_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-#include <subdev/bios.h>
-#include <subdev/bios/i2c.h>
-
-#define NV_I2C_PORT(n) (0x00 + (n))
-#define NV_I2C_AUX(n) (0x10 + (n))
-#define NV_I2C_EXT(n) (0x20 + (n))
-#define NV_I2C_DEFAULT(n) (0x80 + (n))
-
-#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
-#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
-#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
-
-struct nvkm_i2c_ntfy_req {
-#define NVKM_I2C_PLUG 0x01
-#define NVKM_I2C_UNPLUG 0x02
-#define NVKM_I2C_IRQ 0x04
-#define NVKM_I2C_DONE 0x08
-#define NVKM_I2C_ANY 0x0f
- u8 mask;
- u8 port;
-};
-
-struct nvkm_i2c_ntfy_rep {
- u8 mask;
-};
-
-struct nouveau_i2c_port {
- struct nouveau_object base;
- struct i2c_adapter adapter;
- struct mutex mutex;
-
- struct list_head head;
- u8 index;
- int aux;
-
- const struct nouveau_i2c_func *func;
-};
-
-struct nouveau_i2c_func {
- void (*drive_scl)(struct nouveau_i2c_port *, int);
- void (*drive_sda)(struct nouveau_i2c_port *, int);
- int (*sense_scl)(struct nouveau_i2c_port *);
- int (*sense_sda)(struct nouveau_i2c_port *);
-
- int (*aux)(struct nouveau_i2c_port *, bool, u8, u32, u8 *, u8);
- int (*pattern)(struct nouveau_i2c_port *, int pattern);
- int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh);
- int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
-};
-
-struct nouveau_i2c_board_info {
- struct i2c_board_info dev;
- u8 udelay; /* set to 0 to use the standard delay */
-};
-
-struct nouveau_i2c {
- struct nouveau_subdev base;
- struct nvkm_event event;
-
- struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
- struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
- int (*acquire_pad)(struct nouveau_i2c_port *, unsigned long timeout);
- void (*release_pad)(struct nouveau_i2c_port *);
- int (*acquire)(struct nouveau_i2c_port *, unsigned long timeout);
- void (*release)(struct nouveau_i2c_port *);
- int (*identify)(struct nouveau_i2c *, int index,
- const char *what, struct nouveau_i2c_board_info *,
- bool (*match)(struct nouveau_i2c_port *,
- struct i2c_board_info *, void *), void *);
-
- wait_queue_head_t wait;
- struct list_head ports;
-};
-
-static inline struct nouveau_i2c *
-nouveau_i2c(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
-}
-
-extern struct nouveau_oclass *nv04_i2c_oclass;
-extern struct nouveau_oclass *nv4e_i2c_oclass;
-extern struct nouveau_oclass *nv50_i2c_oclass;
-extern struct nouveau_oclass *nv94_i2c_oclass;
-extern struct nouveau_oclass *nvd0_i2c_oclass;
-extern struct nouveau_oclass *gf117_i2c_oclass;
-extern struct nouveau_oclass *nve0_i2c_oclass;
-extern struct nouveau_oclass *gm204_i2c_oclass;
-
-static inline int
-nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
-{
- u8 val;
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
- { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
- };
-
- int ret = i2c_transfer(&port->adapter, msgs, 2);
- if (ret != 2)
- return -EIO;
-
- return val;
-}
-
-static inline int
-nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
-{
- u8 buf[2] = { reg, val };
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 2, .buf = buf },
- };
-
- int ret = i2c_transfer(&port->adapter, msgs, 1);
- if (ret != 1)
- return -EIO;
-
- return 0;
-}
-
-static inline bool
-nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
-{
- return nv_rdi2cr(port, addr, 0) >= 0;
-}
-
-int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
-int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
deleted file mode 100644
index 31df634c0fdc..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __NOUVEAU_IBUS_H__
-#define __NOUVEAU_IBUS_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_ibus {
- struct nouveau_subdev base;
-};
-
-static inline struct nouveau_ibus *
-nouveau_ibus(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_IBUS];
-}
-
-#define nouveau_ibus_create(p,e,o,d) \
- nouveau_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus", \
- sizeof(**d), (void **)d)
-#define nouveau_ibus_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_ibus_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_ibus_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-#define _nouveau_ibus_dtor _nouveau_subdev_dtor
-#define _nouveau_ibus_init _nouveau_subdev_init
-#define _nouveau_ibus_fini _nouveau_subdev_fini
-
-extern struct nouveau_oclass nvc0_ibus_oclass;
-extern struct nouveau_oclass nve0_ibus_oclass;
-extern struct nouveau_oclass gk20a_ibus_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
deleted file mode 100644
index c1df26f3230c..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __NOUVEAU_INSTMEM_H__
-#define __NOUVEAU_INSTMEM_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-#include <core/mm.h>
-
-struct nouveau_instobj {
- struct nouveau_object base;
- struct list_head head;
- u32 *suspend;
- u64 addr;
- u32 size;
-};
-
-static inline struct nouveau_instobj *
-nv_memobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
- nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-struct nouveau_instmem {
- struct nouveau_subdev base;
- struct list_head list;
-
- u32 reserved;
- int (*alloc)(struct nouveau_instmem *, struct nouveau_object *,
- u32 size, u32 align, struct nouveau_object **);
-};
-
-static inline struct nouveau_instmem *
-nouveau_instmem(void *obj)
-{
- /* nv04/nv40 impls need to create objects in their constructor,
- * which is before the subdev pointer is valid
- */
- if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
- nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
- return obj;
-
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
-}
-
-extern struct nouveau_oclass *nv04_instmem_oclass;
-extern struct nouveau_oclass *nv40_instmem_oclass;
-extern struct nouveau_oclass *nv50_instmem_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h
deleted file mode 100644
index b909a7363f6b..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __NOUVEAU_LTC_H__
-#define __NOUVEAU_LTC_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-#define NOUVEAU_LTC_MAX_ZBC_CNT 16
-
-struct nouveau_mm_node;
-
-struct nouveau_ltc {
- struct nouveau_subdev base;
-
- int (*tags_alloc)(struct nouveau_ltc *, u32 count,
- struct nouveau_mm_node **);
- void (*tags_free)(struct nouveau_ltc *, struct nouveau_mm_node **);
- void (*tags_clear)(struct nouveau_ltc *, u32 first, u32 count);
-
- int zbc_min;
- int zbc_max;
- int (*zbc_color_get)(struct nouveau_ltc *, int index, const u32[4]);
- int (*zbc_depth_get)(struct nouveau_ltc *, int index, const u32);
-};
-
-static inline struct nouveau_ltc *
-nouveau_ltc(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTC];
-}
-
-extern struct nouveau_oclass *gf100_ltc_oclass;
-extern struct nouveau_oclass *gk104_ltc_oclass;
-extern struct nouveau_oclass *gm107_ltc_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
deleted file mode 100644
index 568e4dfc5e9e..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __NOUVEAU_MC_H__
-#define __NOUVEAU_MC_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_mc {
- struct nouveau_subdev base;
- bool use_msi;
- unsigned int irq;
- void (*unk260)(struct nouveau_mc *, u32);
-};
-
-static inline struct nouveau_mc *
-nouveau_mc(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
-}
-
-extern struct nouveau_oclass *nv04_mc_oclass;
-extern struct nouveau_oclass *nv40_mc_oclass;
-extern struct nouveau_oclass *nv44_mc_oclass;
-extern struct nouveau_oclass *nv4c_mc_oclass;
-extern struct nouveau_oclass *nv50_mc_oclass;
-extern struct nouveau_oclass *nv94_mc_oclass;
-extern struct nouveau_oclass *nv98_mc_oclass;
-extern struct nouveau_oclass *nvc0_mc_oclass;
-extern struct nouveau_oclass *nvc3_mc_oclass;
-extern struct nouveau_oclass *gk20a_mc_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
deleted file mode 100644
index b93b152cb566..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __NOUVEAU_MXM_H__
-#define __NOUVEAU_MXM_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-#define MXM_SANITISE_DCB 0x00000001
-
-struct nouveau_mxm {
- struct nouveau_subdev base;
- u32 action;
- u8 *mxms;
-};
-
-static inline struct nouveau_mxm *
-nouveau_mxm(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MXM];
-}
-
-#define nouveau_mxm_create(p,e,o,d) \
- nouveau_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_mxm_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_mxm_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-int nouveau_mxm_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void nouveau_mxm_destroy(struct nouveau_mxm *);
-
-#define _nouveau_mxm_dtor _nouveau_subdev_dtor
-#define _nouveau_mxm_init _nouveau_subdev_init
-#define _nouveau_mxm_fini _nouveau_subdev_fini
-
-extern struct nouveau_oclass nv50_mxm_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
deleted file mode 100644
index f2427bf5aeed..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __NOUVEAU_PWR_H__
-#define __NOUVEAU_PWR_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_pwr {
- struct nouveau_subdev base;
-
- struct {
- u32 base;
- u32 size;
- } send;
-
- struct {
- u32 base;
- u32 size;
-
- struct work_struct work;
- wait_queue_head_t wait;
- u32 process;
- u32 message;
- u32 data[2];
- } recv;
-
- int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
- void (*pgob)(struct nouveau_pwr *, bool);
-};
-
-static inline struct nouveau_pwr *
-nouveau_pwr(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
-}
-
-extern struct nouveau_oclass *nva3_pwr_oclass;
-extern struct nouveau_oclass *nvc0_pwr_oclass;
-extern struct nouveau_oclass *nvd0_pwr_oclass;
-extern struct nouveau_oclass *gk104_pwr_oclass;
-extern struct nouveau_oclass *nv108_pwr_oclass;
-
-/* interface to MEMX process running on PPWR */
-struct nouveau_memx;
-int nouveau_memx_init(struct nouveau_pwr *, struct nouveau_memx **);
-int nouveau_memx_fini(struct nouveau_memx **, bool exec);
-void nouveau_memx_wr32(struct nouveau_memx *, u32 addr, u32 data);
-void nouveau_memx_wait(struct nouveau_memx *,
- u32 addr, u32 mask, u32 data, u32 nsec);
-void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
-void nouveau_memx_wait_vblank(struct nouveau_memx *);
-void nouveau_memx_train(struct nouveau_memx *);
-int nouveau_memx_train_result(struct nouveau_pwr *, u32 *, int);
-void nouveau_memx_block(struct nouveau_memx *);
-void nouveau_memx_unblock(struct nouveau_memx *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
deleted file mode 100644
index a437597dcafc..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef __NOUVEAU_THERM_H__
-#define __NOUVEAU_THERM_H__
-
-#include <core/device.h>
-#include <core/subdev.h>
-
-enum nouveau_therm_fan_mode {
- NOUVEAU_THERM_CTRL_NONE = 0,
- NOUVEAU_THERM_CTRL_MANUAL = 1,
- NOUVEAU_THERM_CTRL_AUTO = 2,
-};
-
-enum nouveau_therm_attr_type {
- NOUVEAU_THERM_ATTR_FAN_MIN_DUTY = 0,
- NOUVEAU_THERM_ATTR_FAN_MAX_DUTY = 1,
- NOUVEAU_THERM_ATTR_FAN_MODE = 2,
-
- NOUVEAU_THERM_ATTR_THRS_FAN_BOOST = 10,
- NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST = 11,
- NOUVEAU_THERM_ATTR_THRS_DOWN_CLK = 12,
- NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST = 13,
- NOUVEAU_THERM_ATTR_THRS_CRITICAL = 14,
- NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST = 15,
- NOUVEAU_THERM_ATTR_THRS_SHUTDOWN = 16,
- NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
-};
-
-struct nouveau_therm {
- struct nouveau_subdev base;
-
- int (*pwm_ctrl)(struct nouveau_therm *, int line, bool);
- int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *);
- int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
- int (*pwm_clock)(struct nouveau_therm *, int line);
-
- int (*fan_get)(struct nouveau_therm *);
- int (*fan_set)(struct nouveau_therm *, int);
- int (*fan_sense)(struct nouveau_therm *);
-
- int (*temp_get)(struct nouveau_therm *);
-
- int (*attr_get)(struct nouveau_therm *, enum nouveau_therm_attr_type);
- int (*attr_set)(struct nouveau_therm *,
- enum nouveau_therm_attr_type, int);
-};
-
-static inline struct nouveau_therm *
-nouveau_therm(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_THERM];
-}
-
-#define nouveau_therm_create(p,e,o,d) \
- nouveau_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_therm_destroy(p) ({ \
- struct nouveau_therm *therm = (p); \
- _nouveau_therm_dtor(nv_object(therm)); \
-})
-#define nouveau_therm_init(p) ({ \
- struct nouveau_therm *therm = (p); \
- _nouveau_therm_init(nv_object(therm)); \
-})
-#define nouveau_therm_fini(p,s) ({ \
- struct nouveau_therm *therm = (p); \
- _nouveau_therm_init(nv_object(therm), (s)); \
-})
-
-int nouveau_therm_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_therm_dtor(struct nouveau_object *);
-int _nouveau_therm_init(struct nouveau_object *);
-int _nouveau_therm_fini(struct nouveau_object *, bool);
-
-int nouveau_therm_cstate(struct nouveau_therm *, int, int);
-
-extern struct nouveau_oclass nv40_therm_oclass;
-extern struct nouveau_oclass nv50_therm_oclass;
-extern struct nouveau_oclass nv84_therm_oclass;
-extern struct nouveau_oclass nva3_therm_oclass;
-extern struct nouveau_oclass nvd0_therm_oclass;
-extern struct nouveau_oclass gm107_therm_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
deleted file mode 100644
index db9be803a874..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef __NOUVEAU_TIMER_H__
-#define __NOUVEAU_TIMER_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_alarm {
- struct list_head head;
- u64 timestamp;
- void (*func)(struct nouveau_alarm *);
-};
-
-static inline void
-nouveau_alarm_init(struct nouveau_alarm *alarm,
- void (*func)(struct nouveau_alarm *))
-{
- INIT_LIST_HEAD(&alarm->head);
- alarm->func = func;
-}
-
-bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
-void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
-void nouveau_timer_alarm_cancel(void *, struct nouveau_alarm *);
-
-#define NV_WAIT_DEFAULT 2000000000ULL
-#define nv_wait(o,a,m,v) \
- nouveau_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_ne(o,a,m,v) \
- nouveau_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_cb(o,c,d) \
- nouveau_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
-
-struct nouveau_timer {
- struct nouveau_subdev base;
- u64 (*read)(struct nouveau_timer *);
- void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *);
- void (*alarm_cancel)(struct nouveau_timer *, struct nouveau_alarm *);
-};
-
-static inline struct nouveau_timer *
-nouveau_timer(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_TIMER];
-}
-
-#define nouveau_timer_create(p,e,o,d) \
- nouveau_subdev_create_((p), (e), (o), 0, "PTIMER", "timer", \
- sizeof(**d), (void **)d)
-#define nouveau_timer_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_timer_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_timer_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-int nouveau_timer_create_(struct nouveau_object *, struct nouveau_engine *,
- struct nouveau_oclass *, int size, void **);
-
-extern struct nouveau_oclass nv04_timer_oclass;
-extern struct nouveau_oclass gk20a_timer_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
deleted file mode 100644
index c9509039f94b..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_VM_H__
-#define __NOUVEAU_VM_H__
-
-#include <core/object.h>
-#include <core/subdev.h>
-#include <core/device.h>
-#include <core/mm.h>
-
-struct nouveau_vm_pgt {
- struct nouveau_gpuobj *obj[2];
- u32 refcount[2];
-};
-
-struct nouveau_vm_pgd {
- struct list_head head;
- struct nouveau_gpuobj *obj;
-};
-
-struct nouveau_gpuobj;
-struct nouveau_mem;
-
-struct nouveau_vma {
- struct list_head head;
- int refcount;
- struct nouveau_vm *vm;
- struct nouveau_mm_node *node;
- u64 offset;
- u32 access;
-};
-
-struct nouveau_vm {
- struct nouveau_vmmgr *vmm;
- struct nouveau_mm mm;
- struct kref refcount;
-
- struct list_head pgd_list;
- atomic_t engref[NVDEV_SUBDEV_NR];
-
- struct nouveau_vm_pgt *pgt;
- u32 fpde;
- u32 lpde;
-};
-
-struct nouveau_vmmgr {
- struct nouveau_subdev base;
-
- u64 limit;
- u8 dma_bits;
- u32 pgt_bits;
- u8 spg_shift;
- u8 lpg_shift;
-
- int (*create)(struct nouveau_vmmgr *, u64 offset, u64 length,
- u64 mm_offset, struct nouveau_vm **);
-
- void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
- struct nouveau_gpuobj *pgt[2]);
- void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt,
- u64 phys, u64 delta);
- void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
- void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
- void (*flush)(struct nouveau_vm *);
-};
-
-static inline struct nouveau_vmmgr *
-nouveau_vmmgr(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM];
-}
-
-#define nouveau_vmmgr_create(p,e,o,i,f,d) \
- nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d))
-#define nouveau_vmmgr_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_vmmgr_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_vmmgr_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor
-#define _nouveau_vmmgr_init _nouveau_subdev_init
-#define _nouveau_vmmgr_fini _nouveau_subdev_fini
-
-extern struct nouveau_oclass nv04_vmmgr_oclass;
-extern struct nouveau_oclass nv41_vmmgr_oclass;
-extern struct nouveau_oclass nv44_vmmgr_oclass;
-extern struct nouveau_oclass nv50_vmmgr_oclass;
-extern struct nouveau_oclass nvc0_vmmgr_oclass;
-
-int nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
- struct nouveau_vm **);
-void nv04_vmmgr_dtor(struct nouveau_object *);
-
-/* nouveau_vm.c */
-int nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
- u64 mm_offset, u32 block, struct nouveau_vm **);
-int nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length,
- u64 mm_offset, struct nouveau_vm **);
-int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
- struct nouveau_gpuobj *pgd);
-int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
- u32 access, struct nouveau_vma *);
-void nouveau_vm_put(struct nouveau_vma *);
-void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
-void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
-void nouveau_vm_unmap(struct nouveau_vma *);
-void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
deleted file mode 100644
index 67db5e58880d..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef __NOUVEAU_VOLT_H__
-#define __NOUVEAU_VOLT_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_voltage {
- u32 uv;
- u8 id;
-};
-
-struct nouveau_volt {
- struct nouveau_subdev base;
-
- int (*vid_get)(struct nouveau_volt *);
- int (*get)(struct nouveau_volt *);
- int (*vid_set)(struct nouveau_volt *, u8 vid);
- int (*set)(struct nouveau_volt *, u32 uv);
- int (*set_id)(struct nouveau_volt *, u8 id, int condition);
-
- u8 vid_mask;
- u8 vid_nr;
- struct {
- u32 uv;
- u8 vid;
- } vid[256];
-};
-
-static inline struct nouveau_volt *
-nouveau_volt(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VOLT];
-}
-
-#define nouveau_volt_create(p, e, o, d) \
- nouveau_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_volt_destroy(p) ({ \
- struct nouveau_volt *v = (p); \
- _nouveau_volt_dtor(nv_object(v)); \
-})
-#define nouveau_volt_init(p) ({ \
- struct nouveau_volt *v = (p); \
- _nouveau_volt_init(nv_object(v)); \
-})
-#define nouveau_volt_fini(p,s) \
- nouveau_subdev_fini((p), (s))
-
-int nouveau_volt_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_volt_dtor(struct nouveau_object *);
-int _nouveau_volt_init(struct nouveau_object *);
-#define _nouveau_volt_fini _nouveau_subdev_fini
-
-extern struct nouveau_oclass nv40_volt_oclass;
-extern struct nouveau_oclass gk20a_volt_oclass;
-
-int nouveau_voltgpio_init(struct nouveau_volt *);
-int nouveau_voltgpio_get(struct nouveau_volt *);
-int nouveau_voltgpio_set(struct nouveau_volt *, u8);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
deleted file mode 100644
index b1adc69efd88..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <core/object.h>
-
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-
-#include "priv.h"
-
-struct nouveau_barobj {
- struct nouveau_object base;
- struct nouveau_vma vma;
- void __iomem *iomem;
-};
-
-static int
-nouveau_barobj_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_bar *bar = (void *)engine;
- struct nouveau_mem *mem = data;
- struct nouveau_barobj *barobj;
- int ret;
-
- ret = nouveau_object_create(parent, engine, oclass, 0, &barobj);
- *pobject = nv_object(barobj);
- if (ret)
- return ret;
-
- ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
- if (ret)
- return ret;
-
- barobj->iomem = ioremap(nv_device_resource_start(device, 3) +
- (u32)barobj->vma.offset, mem->size << 12);
- if (!barobj->iomem) {
- nv_warn(bar, "PRAMIN ioremap failed\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void
-nouveau_barobj_dtor(struct nouveau_object *object)
-{
- struct nouveau_bar *bar = (void *)object->engine;
- struct nouveau_barobj *barobj = (void *)object;
- if (barobj->vma.node) {
- if (barobj->iomem)
- iounmap(barobj->iomem);
- bar->unmap(bar, &barobj->vma);
- }
- nouveau_object_destroy(&barobj->base);
-}
-
-static u32
-nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
-{
- struct nouveau_barobj *barobj = (void *)object;
- return ioread32_native(barobj->iomem + addr);
-}
-
-static void
-nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- struct nouveau_barobj *barobj = (void *)object;
- iowrite32_native(data, barobj->iomem + addr);
-}
-
-static struct nouveau_oclass
-nouveau_barobj_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nouveau_barobj_ctor,
- .dtor = nouveau_barobj_dtor,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
- .rd32 = nouveau_barobj_rd32,
- .wr32 = nouveau_barobj_wr32,
- },
-};
-
-int
-nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
- struct nouveau_mem *mem, struct nouveau_object **pobject)
-{
- struct nouveau_object *engine = nv_object(bar);
- struct nouveau_object *gpuobj;
- int ret = nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
- mem, 0, &gpuobj);
- if (ret == 0)
- *pobject = gpuobj;
- return ret;
-}
-
-int
-nouveau_bar_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
-{
- struct nouveau_bar *bar;
- int ret;
-
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
- "bar", length, pobject);
- bar = *pobject;
- if (ret)
- return ret;
-
- return 0;
-}
-
-void
-nouveau_bar_destroy(struct nouveau_bar *bar)
-{
- nouveau_subdev_destroy(&bar->base);
-}
-
-void
-_nouveau_bar_dtor(struct nouveau_object *object)
-{
- struct nouveau_bar *bar = (void *)object;
- nouveau_bar_destroy(bar);
-}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
deleted file mode 100644
index 05a278bab247..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-
-#include "priv.h"
-
-struct nvc0_bar_priv_vm {
- struct nouveau_gpuobj *mem;
- struct nouveau_gpuobj *pgd;
- struct nouveau_vm *vm;
-};
-
-struct nvc0_bar_priv {
- struct nouveau_bar base;
- spinlock_t lock;
- struct nvc0_bar_priv_vm bar[2];
-};
-
-static int
-nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
- u32 flags, struct nouveau_vma *vma)
-{
- struct nvc0_bar_priv *priv = (void *)bar;
- int ret;
-
- ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
- if (ret)
- return ret;
-
- nouveau_vm_map(vma, mem);
- return 0;
-}
-
-static int
-nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
- u32 flags, struct nouveau_vma *vma)
-{
- struct nvc0_bar_priv *priv = (void *)bar;
- int ret;
-
- ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12,
- mem->page_shift, flags, vma);
- if (ret)
- return ret;
-
- nouveau_vm_map(vma, mem);
- return 0;
-}
-
-static void
-nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
-{
- nouveau_vm_unmap(vma);
- nouveau_vm_put(vma);
-}
-
-static int
-nvc0_bar_init_vm(struct nvc0_bar_priv *priv, struct nvc0_bar_priv_vm *bar_vm,
- int bar_nr)
-{
- struct nouveau_device *device = nv_device(&priv->base);
- struct nouveau_vm *vm;
- resource_size_t bar_len;
- int ret;
-
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
- &bar_vm->mem);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
- &bar_vm->pgd);
- if (ret)
- return ret;
-
- bar_len = nv_device_resource_len(device, bar_nr);
-
- ret = nouveau_vm_new(device, 0, bar_len, 0, &vm);
- if (ret)
- return ret;
-
- atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
-
- /*
- * Bootstrap page table lookup.
- */
- if (bar_nr == 3) {
- ret = nouveau_gpuobj_new(nv_object(priv), NULL,
- (bar_len >> 12) * 8, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &vm->pgt[0].obj[0]);
- vm->pgt[0].refcount[0] = 1;
- if (ret)
- return ret;
- }
-
- ret = nouveau_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
- nouveau_vm_ref(NULL, &vm, NULL);
- if (ret)
- return ret;
-
- nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
- nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
- nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
- nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
-
- return 0;
-}
-
-int
-nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_device *device = nv_device(parent);
- struct nvc0_bar_priv *priv;
- bool has_bar3 = nv_device_resource_len(device, 3) != 0;
- int ret;
-
- ret = nouveau_bar_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- /* BAR3 */
- if (has_bar3) {
- ret = nvc0_bar_init_vm(priv, &priv->bar[0], 3);
- if (ret)
- return ret;
- priv->base.alloc = nouveau_bar_alloc;
- priv->base.kmap = nvc0_bar_kmap;
- }
-
- /* BAR1 */
- ret = nvc0_bar_init_vm(priv, &priv->bar[1], 1);
- if (ret)
- return ret;
-
- priv->base.umap = nvc0_bar_umap;
- priv->base.unmap = nvc0_bar_unmap;
- priv->base.flush = nv84_bar_flush;
- spin_lock_init(&priv->lock);
- return 0;
-}
-
-void
-nvc0_bar_dtor(struct nouveau_object *object)
-{
- struct nvc0_bar_priv *priv = (void *)object;
-
- nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
- nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd);
- nouveau_gpuobj_ref(NULL, &priv->bar[1].mem);
-
- if (priv->bar[0].vm) {
- nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
- nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
- }
- nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd);
- nouveau_gpuobj_ref(NULL, &priv->bar[0].mem);
-
- nouveau_bar_destroy(&priv->base);
-}
-
-int
-nvc0_bar_init(struct nouveau_object *object)
-{
- struct nvc0_bar_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_bar_init(&priv->base);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
- nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-
- nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
- if (priv->bar[0].mem)
- nv_wr32(priv, 0x001714,
- 0xc0000000 | priv->bar[0].mem->addr >> 12);
- return 0;
-}
-
-struct nouveau_oclass
-nvc0_bar_oclass = {
- .handle = NV_SUBDEV(BAR, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_bar_ctor,
- .dtor = nvc0_bar_dtor,
- .init = nvc0_bar_init,
- .fini = _nouveau_bar_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
deleted file mode 100644
index 3ee8b1476d00..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __NVKM_BAR_PRIV_H__
-#define __NVKM_BAR_PRIV_H__
-
-#include <subdev/bar.h>
-
-#define nouveau_bar_create(p,e,o,d) \
- nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_bar_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_bar_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void nouveau_bar_destroy(struct nouveau_bar *);
-
-void _nouveau_bar_dtor(struct nouveau_object *);
-#define _nouveau_bar_init _nouveau_subdev_init
-#define _nouveau_bar_fini _nouveau_subdev_fini
-
-int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
- struct nouveau_mem *, struct nouveau_object **);
-
-void nv84_bar_flush(struct nouveau_bar *);
-
-int nvc0_bar_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nvc0_bar_dtor(struct nouveau_object *);
-int nvc0_bar_init(struct nouveau_object *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
deleted file mode 100644
index 4d7602450a20..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __NVKM_BUS_NV04_H__
-#define __NVKM_BUS_NV04_H__
-
-#include <subdev/bus.h>
-
-struct nv04_bus_priv {
- struct nouveau_bus base;
-};
-
-int nv04_bus_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-int nv50_bus_init(struct nouveau_object *);
-void nv50_bus_intr(struct nouveau_subdev *);
-
-struct nv04_bus_impl {
- struct nouveau_oclass base;
- void (*intr)(struct nouveau_subdev *);
- int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
- u32 hwsq_size;
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
deleted file mode 100644
index f10917d789e8..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __NVKM_CLK_NV50_H__
-#define __NVKM_CLK_NV50_H__
-
-#include <subdev/bus.h>
-#include <subdev/bus/hwsq.h>
-#include <subdev/clock.h>
-
-struct nv50_clock_hwsq {
- struct hwsq base;
- struct hwsq_reg r_fifo;
- struct hwsq_reg r_spll[2];
- struct hwsq_reg r_nvpll[2];
- struct hwsq_reg r_divs;
- struct hwsq_reg r_mast;
-};
-
-struct nv50_clock_priv {
- struct nouveau_clock base;
- struct nv50_clock_hwsq hwsq;
-};
-
-int nv50_clock_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-
-struct nv50_clock_oclass {
- struct nouveau_oclass base;
- struct nouveau_clocks *domains;
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
deleted file mode 100644
index a45a1038b12f..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __NVKM_CLK_NVA3_H__
-#define __NVKM_CLK_NVA3_H__
-
-#include <subdev/clock.h>
-
-struct nva3_clock_info {
- u32 clk;
- u32 pll;
- enum {
- NVA3_HOST_277,
- NVA3_HOST_CLK,
- } host_out;
- u32 fb_delay;
-};
-
-int nva3_pll_info(struct nouveau_clock *, int, u32, u32,
- struct nva3_clock_info *);
-int nva3_clock_pre(struct nouveau_clock *clk, unsigned long *flags);
-void nva3_clock_post(struct nouveau_clock *clk, unsigned long *flags);
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
deleted file mode 100644
index 445b14c33a98..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __NOUVEAU_PLL_H__
-#define __NOUVEAU_PLL_H__
-
-int nv04_pll_calc(struct nouveau_subdev *, struct nvbios_pll *, u32 freq,
- int *N1, int *M1, int *N2, int *M2, int *P);
-int nva3_pll_calc(struct nouveau_subdev *, struct nvbios_pll *, u32 freq,
- int *N, int *fN, int *M, int *P);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
deleted file mode 100644
index 23470a57510c..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __NVKM_DEVINIT_NV04_H__
-#define __NVKM_DEVINIT_NV04_H__
-
-#include "priv.h"
-
-struct nv04_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
-
-int nv04_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nv04_devinit_dtor(struct nouveau_object *);
-int nv04_devinit_init(struct nouveau_object *);
-int nv04_devinit_fini(struct nouveau_object *, bool);
-int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
-
-void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
deleted file mode 100644
index f412bb7f780e..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __NVKM_DEVINIT_NV50_H__
-#define __NVKM_DEVINIT_NV50_H__
-
-#include "priv.h"
-
-struct nv50_devinit_priv {
- struct nouveau_devinit base;
- u32 r001540;
-};
-
-int nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-int nv50_devinit_init(struct nouveau_object *);
-int nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32);
-
-int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
-
-int nvc0_devinit_pll_set(struct nouveau_devinit *, u32, u32);
-
-u64 gm107_devinit_disable(struct nouveau_devinit *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
deleted file mode 100644
index cbcd51852472..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __NVKM_DEVINIT_PRIV_H__
-#define __NVKM_DEVINIT_PRIV_H__
-
-#include <subdev/bios.h>
-#include <subdev/bios/pll.h>
-#include <subdev/bios/init.h>
-#include <subdev/clock/pll.h>
-#include <subdev/devinit.h>
-
-struct nouveau_devinit_impl {
- struct nouveau_oclass base;
- void (*meminit)(struct nouveau_devinit *);
- int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
- u64 (*disable)(struct nouveau_devinit *);
- u32 (*mmio)(struct nouveau_devinit *, u32);
- int (*post)(struct nouveau_subdev *, bool);
-};
-
-#define nouveau_devinit_create(p,e,o,d) \
- nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_devinit_destroy(p) ({ \
- struct nouveau_devinit *d = (p); \
- _nouveau_devinit_dtor(nv_object(d)); \
-})
-#define nouveau_devinit_init(p) ({ \
- struct nouveau_devinit *d = (p); \
- _nouveau_devinit_init(nv_object(d)); \
-})
-#define nouveau_devinit_fini(p,s) ({ \
- struct nouveau_devinit *d = (p); \
- _nouveau_devinit_fini(nv_object(d), (s)); \
-})
-
-int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_devinit_dtor(struct nouveau_object *);
-int _nouveau_devinit_init(struct nouveau_object *);
-int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
deleted file mode 100644
index 06ce71f87a74..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef __NVKM_FB_NV04_H__
-#define __NVKM_FB_NV04_H__
-
-#include "priv.h"
-
-struct nv04_fb_priv {
- struct nouveau_fb base;
-};
-
-int nv04_fb_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-
-struct nv04_fb_impl {
- struct nouveau_fb_impl base;
- struct {
- int regions;
- void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
- void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *);
- void (*fini)(struct nouveau_fb *, int i,
- struct nouveau_fb_tile *);
- void (*prog)(struct nouveau_fb *, int i,
- struct nouveau_fb_tile *);
- } tile;
-};
-
-void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
-void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
-void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-int nv30_fb_init(struct nouveau_object *);
-void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
-
-void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *);
-
-int nv41_fb_init(struct nouveau_object *);
-void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-int nv44_fb_init(struct nouveau_object *);
-void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
deleted file mode 100644
index 581f808527f2..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __NVKM_FB_NV40_H__
-#define __NVKM_FB_NV40_H__
-
-#include "priv.h"
-
-struct nv40_ram {
- struct nouveau_ram base;
- u32 ctrl;
- u32 coef;
-};
-
-
-int nv40_ram_calc(struct nouveau_fb *, u32);
-int nv40_ram_prog(struct nouveau_fb *);
-void nv40_ram_tidy(struct nouveau_fb *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
deleted file mode 100644
index c5e5a888c607..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __NVKM_FB_NV50_H__
-#define __NVKM_FB_NV50_H__
-
-#include "priv.h"
-
-struct nv50_fb_priv {
- struct nouveau_fb base;
- struct page *r100c08_page;
- dma_addr_t r100c08;
-};
-
-int nv50_fb_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nv50_fb_dtor(struct nouveau_object *);
-int nv50_fb_init(struct nouveau_object *);
-
-struct nv50_fb_impl {
- struct nouveau_fb_impl base;
- u32 trap;
-};
-
-#define nv50_ram_create(p,e,o,d) \
- nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
-int nv50_ram_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-int nv50_ram_get(struct nouveau_fb *, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_mem **);
-void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
-void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
-extern int nv50_fb_memtype[0x80];
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
deleted file mode 100644
index 705a06d755ad..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __NVKM_RAM_NVC0_H__
-#define __NVKM_RAM_NVC0_H__
-
-#include "priv.h"
-#include "nv50.h"
-
-struct nvc0_fb_priv {
- struct nouveau_fb base;
- struct page *r100c10_page;
- dma_addr_t r100c10;
-};
-
-int nvc0_fb_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nvc0_fb_dtor(struct nouveau_object *);
-int nvc0_fb_init(struct nouveau_object *);
-bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32);
-
-
-#define nvc0_ram_create(p,e,o,m,d) \
- nvc0_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
-int nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32, int, void **);
-int nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32,
- struct nouveau_mem **);
-void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **);
-
-int nve0_ram_init(struct nouveau_object*);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
deleted file mode 100644
index 283863f7aa9b..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef __NVKM_FB_PRIV_H__
-#define __NVKM_FB_PRIV_H__
-
-#include <subdev/fb.h>
-
-#define nouveau_ram_create(p,e,o,d) \
- nouveau_object_create_((p), (e), (o), 0, sizeof(**d), (void **)d)
-#define nouveau_ram_destroy(p) \
- nouveau_object_destroy(&(p)->base)
-#define nouveau_ram_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_ram_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-#define nouveau_ram_create_(p,e,o,s,d) \
- nouveau_object_create_((p), (e), (o), 0, (s), (void **)d)
-#define _nouveau_ram_dtor nouveau_object_destroy
-#define _nouveau_ram_init nouveau_object_init
-#define _nouveau_ram_fini nouveau_object_fini
-
-extern struct nouveau_oclass nv04_ram_oclass;
-extern struct nouveau_oclass nv10_ram_oclass;
-extern struct nouveau_oclass nv1a_ram_oclass;
-extern struct nouveau_oclass nv20_ram_oclass;
-extern struct nouveau_oclass nv40_ram_oclass;
-extern struct nouveau_oclass nv41_ram_oclass;
-extern struct nouveau_oclass nv44_ram_oclass;
-extern struct nouveau_oclass nv49_ram_oclass;
-extern struct nouveau_oclass nv4e_ram_oclass;
-extern struct nouveau_oclass nv50_ram_oclass;
-extern struct nouveau_oclass nva3_ram_oclass;
-extern struct nouveau_oclass nvaa_ram_oclass;
-extern struct nouveau_oclass nvc0_ram_oclass;
-extern struct nouveau_oclass nve0_ram_oclass;
-extern struct nouveau_oclass gk20a_ram_oclass;
-extern struct nouveau_oclass gm107_ram_oclass;
-
-int nouveau_sddr2_calc(struct nouveau_ram *ram);
-int nouveau_sddr3_calc(struct nouveau_ram *ram);
-int nouveau_gddr3_calc(struct nouveau_ram *ram);
-int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
-
-#define nouveau_fb_create(p,e,c,d) \
- nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
-#define nouveau_fb_destroy(p) ({ \
- struct nouveau_fb *pfb = (p); \
- _nouveau_fb_dtor(nv_object(pfb)); \
-})
-#define nouveau_fb_init(p) ({ \
- struct nouveau_fb *pfb = (p); \
- _nouveau_fb_init(nv_object(pfb)); \
-})
-#define nouveau_fb_fini(p,s) ({ \
- struct nouveau_fb *pfb = (p); \
- _nouveau_fb_fini(nv_object(pfb), (s)); \
-})
-
-int nouveau_fb_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_fb_dtor(struct nouveau_object *);
-int _nouveau_fb_init(struct nouveau_object *);
-int _nouveau_fb_fini(struct nouveau_object *, bool);
-
-struct nouveau_fb_impl {
- struct nouveau_oclass base;
- struct nouveau_oclass *ram;
- bool (*memtype)(struct nouveau_fb *, u32);
-};
-
-bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
-bool nv50_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
-
-struct nouveau_bios;
-int nouveau_fb_bios_memtype(struct nouveau_bios *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fuse/priv.h
deleted file mode 100644
index d2085411a5cb..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/fuse/priv.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __NVKM_FUSE_PRIV_H__
-#define __NVKM_FUSE_PRIV_H__
-
-#include <subdev/fuse.h>
-
-int _nouveau_fuse_init(struct nouveau_object *object);
-void _nouveau_fuse_dtor(struct nouveau_object *object);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
deleted file mode 100644
index bff98b86e2b5..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __NVKM_GPIO_H__
-#define __NVKM_GPIO_H__
-
-#include <subdev/gpio.h>
-
-#define nouveau_gpio_create(p,e,o,d) \
- nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_gpio_destroy(p) ({ \
- struct nouveau_gpio *gpio = (p); \
- _nouveau_gpio_dtor(nv_object(gpio)); \
-})
-#define nouveau_gpio_init(p) ({ \
- struct nouveau_gpio *gpio = (p); \
- _nouveau_gpio_init(nv_object(gpio)); \
-})
-#define nouveau_gpio_fini(p,s) ({ \
- struct nouveau_gpio *gpio = (p); \
- _nouveau_gpio_fini(nv_object(gpio), (s)); \
-})
-
-int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-int _nouveau_gpio_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void _nouveau_gpio_dtor(struct nouveau_object *);
-int _nouveau_gpio_init(struct nouveau_object *);
-int _nouveau_gpio_fini(struct nouveau_object *, bool);
-
-struct nouveau_gpio_impl {
- struct nouveau_oclass base;
- int lines;
-
- /* read and ack pending interrupts, returning only data
- * for lines that have not been masked off, while still
- * performing the ack for anything that was pending.
- */
- void (*intr_stat)(struct nouveau_gpio *, u32 *, u32 *);
-
- /* mask on/off interrupts for hi/lo transitions on a
- * given set of gpio lines
- */
- void (*intr_mask)(struct nouveau_gpio *, u32, u32, u32);
-
- /* configure gpio direction and output value */
- int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
-
- /* sense current state of given gpio line */
- int (*sense)(struct nouveau_gpio *, int line);
-
- /*XXX*/
- void (*reset)(struct nouveau_gpio *, u8);
-};
-
-void nv50_gpio_reset(struct nouveau_gpio *, u8);
-int nv50_gpio_drive(struct nouveau_gpio *, int, int, int);
-int nv50_gpio_sense(struct nouveau_gpio *, int);
-
-void nv94_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *);
-void nv94_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32);
-
-void nvd0_gpio_reset(struct nouveau_gpio *, u8);
-int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
-int nvd0_gpio_sense(struct nouveau_gpio *, int);
-
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
deleted file mode 100644
index 9ef965692fb1..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __NV50_I2C_H__
-#define __NV50_I2C_H__
-
-#include "priv.h"
-
-struct nv50_i2c_priv {
- struct nouveau_i2c base;
-};
-
-struct nv50_i2c_port {
- struct nouveau_i2c_port base;
- u32 addr;
- u32 state;
-};
-
-extern const u32 nv50_i2c_addr[];
-extern const int nv50_i2c_addr_nr;
-int nv50_i2c_port_init(struct nouveau_object *);
-int nv50_i2c_sense_scl(struct nouveau_i2c_port *);
-int nv50_i2c_sense_sda(struct nouveau_i2c_port *);
-void nv50_i2c_drive_scl(struct nouveau_i2c_port *, int state);
-void nv50_i2c_drive_sda(struct nouveau_i2c_port *, int state);
-
-int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nv94_i2c_acquire(struct nouveau_i2c_port *);
-void nv94_i2c_release(struct nouveau_i2c_port *);
-
-int nvd0_i2c_port_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
deleted file mode 100644
index 4fe7ae3fde4e..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef __NVKM_I2C_H__
-#define __NVKM_I2C_H__
-
-#include <subdev/i2c.h>
-
-extern struct nouveau_oclass nv04_i2c_pad_oclass;
-extern struct nouveau_oclass nv94_i2c_pad_oclass;
-extern struct nouveau_oclass gm204_i2c_pad_oclass;
-
-#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
- nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
- sizeof(**d), (void **)d)
-#define nouveau_i2c_port_destroy(p) ({ \
- struct nouveau_i2c_port *port = (p); \
- _nouveau_i2c_port_dtor(nv_object(i2c)); \
-})
-#define nouveau_i2c_port_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_i2c_port_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u8,
- const struct i2c_algorithm *,
- const struct nouveau_i2c_func *,
- int, void **);
-void _nouveau_i2c_port_dtor(struct nouveau_object *);
-#define _nouveau_i2c_port_init nouveau_object_init
-int _nouveau_i2c_port_fini(struct nouveau_object *, bool);
-
-#define nouveau_i2c_create(p,e,o,d) \
- nouveau_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_i2c_destroy(p) ({ \
- struct nouveau_i2c *i2c = (p); \
- _nouveau_i2c_dtor(nv_object(i2c)); \
-})
-#define nouveau_i2c_init(p) ({ \
- struct nouveau_i2c *i2c = (p); \
- _nouveau_i2c_init(nv_object(i2c)); \
-})
-#define nouveau_i2c_fini(p,s) ({ \
- struct nouveau_i2c *i2c = (p); \
- _nouveau_i2c_fini(nv_object(i2c), (s)); \
-})
-
-int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-int _nouveau_i2c_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void _nouveau_i2c_dtor(struct nouveau_object *);
-int _nouveau_i2c_init(struct nouveau_object *);
-int _nouveau_i2c_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nouveau_anx9805_sclass[];
-extern struct nouveau_oclass nvd0_i2c_sclass[];
-
-extern const struct i2c_algorithm nouveau_i2c_bit_algo;
-extern const struct i2c_algorithm nouveau_i2c_aux_algo;
-
-struct nouveau_i2c_impl {
- struct nouveau_oclass base;
-
- /* supported i2c port classes */
- struct nouveau_oclass *sclass;
- struct nouveau_oclass *pad_x;
- struct nouveau_oclass *pad_s;
-
- /* number of native dp aux channels present */
- int aux;
-
- /* read and ack pending interrupts, returning only data
- * for ports that have not been masked off, while still
- * performing the ack for anything that was pending.
- */
- void (*aux_stat)(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
-
- /* mask on/off interrupt types for a given set of auxch
- */
- void (*aux_mask)(struct nouveau_i2c *, u32, u32, u32);
-};
-
-void nv94_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
-void nv94_aux_mask(struct nouveau_i2c *, u32, u32, u32);
-
-void nve0_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
-void nve0_aux_mask(struct nouveau_i2c *, u32, u32, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
deleted file mode 100644
index 095fbc6fc099..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __NV04_INSTMEM_H__
-#define __NV04_INSTMEM_H__
-
-#include <core/gpuobj.h>
-#include <core/ramht.h>
-#include <core/mm.h>
-
-#include "priv.h"
-
-extern struct nouveau_instobj_impl nv04_instobj_oclass;
-
-struct nv04_instmem_priv {
- struct nouveau_instmem base;
-
- void __iomem *iomem;
- struct nouveau_mm heap;
-
- struct nouveau_gpuobj *vbios;
- struct nouveau_ramht *ramht;
- struct nouveau_gpuobj *ramro;
- struct nouveau_gpuobj *ramfc;
-};
-
-static inline struct nv04_instmem_priv *
-nv04_instmem(void *obj)
-{
- return (void *)nouveau_instmem(obj);
-}
-
-struct nv04_instobj_priv {
- struct nouveau_instobj base;
- struct nouveau_mm_node *mem;
-};
-
-void nv04_instmem_dtor(struct nouveau_object *);
-
-int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *,
- u32 size, u32 align, struct nouveau_object **pobject);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
deleted file mode 100644
index 8d67dedc5bb2..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __NVKM_INSTMEM_PRIV_H__
-#define __NVKM_INSTMEM_PRIV_H__
-
-#include <subdev/instmem.h>
-
-struct nouveau_instobj_impl {
- struct nouveau_oclass base;
-};
-
-struct nouveau_instobj_args {
- u32 size;
- u32 align;
-};
-
-#define nouveau_instobj_create(p,e,o,d) \
- nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instobj_destroy(p) ({ \
- struct nouveau_instobj *iobj = (p); \
- _nouveau_instobj_dtor(nv_object(iobj)); \
-})
-#define nouveau_instobj_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_instobj_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_instobj_dtor(struct nouveau_object *);
-#define _nouveau_instobj_init nouveau_object_init
-#define _nouveau_instobj_fini nouveau_object_fini
-
-struct nouveau_instmem_impl {
- struct nouveau_oclass base;
- struct nouveau_oclass *instobj;
-};
-
-#define nouveau_instmem_create(p,e,o,d) \
- nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instmem_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_instmem_init(p) ({ \
- struct nouveau_instmem *imem = (p); \
- _nouveau_instmem_init(nv_object(imem)); \
-})
-#define nouveau_instmem_fini(p,s) ({ \
- struct nouveau_instmem *imem = (p); \
- _nouveau_instmem_fini(nv_object(imem), (s)); \
-})
-
-int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-#define _nouveau_instmem_dtor _nouveau_subdev_dtor
-int _nouveau_instmem_init(struct nouveau_object *);
-int _nouveau_instmem_fini(struct nouveau_object *, bool);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
deleted file mode 100644
index 4d9ea46c47c2..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __NVKM_MC_NV04_H__
-#define __NVKM_MC_NV04_H__
-
-#include "priv.h"
-
-struct nv04_mc_priv {
- struct nouveau_mc base;
-};
-
-int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-
-extern const struct nouveau_mc_intr nv04_mc_intr[];
-int nv04_mc_init(struct nouveau_object *);
-void nv40_mc_msi_rearm(struct nouveau_mc *);
-int nv44_mc_init(struct nouveau_object *object);
-int nv50_mc_init(struct nouveau_object *);
-extern const struct nouveau_mc_intr nv50_mc_intr[];
-extern const struct nouveau_mc_intr nvc0_mc_intr[];
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h
deleted file mode 100644
index 911e66392587..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __NVKM_MC_PRIV_H__
-#define __NVKM_MC_PRIV_H__
-
-#include <subdev/mc.h>
-
-#define nouveau_mc_create(p,e,o,d) \
- nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_mc_destroy(p) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
-})
-#define nouveau_mc_init(p) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
-})
-#define nouveau_mc_fini(p,s) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
-})
-
-int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_mc_dtor(struct nouveau_object *);
-int _nouveau_mc_init(struct nouveau_object *);
-int _nouveau_mc_fini(struct nouveau_object *, bool);
-
-struct nouveau_mc_intr {
- u32 stat;
- u32 unit;
-};
-
-struct nouveau_mc_oclass {
- struct nouveau_oclass base;
- const struct nouveau_mc_intr *intr;
- void (*msi_rearm)(struct nouveau_mc *);
- void (*unk260)(struct nouveau_mc *, u32);
-};
-
-void nvc0_mc_unk260(struct nouveau_mc *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
deleted file mode 100644
index 5e0be0c591ca..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __NVMXM_MXMS_H__
-#define __NVMXM_MXMS_H__
-
-struct mxms_odev {
- u8 outp_type;
- u8 conn_type;
- u8 ddc_port;
- u8 dig_conn;
-};
-
-void mxms_output_device(struct nouveau_mxm *, u8 *, struct mxms_odev *);
-
-u16 mxms_version(struct nouveau_mxm *);
-u16 mxms_headerlen(struct nouveau_mxm *);
-u16 mxms_structlen(struct nouveau_mxm *);
-bool mxms_checksum(struct nouveau_mxm *);
-bool mxms_valid(struct nouveau_mxm *);
-
-bool mxms_foreach(struct nouveau_mxm *, u8,
- bool (*)(struct nouveau_mxm *, u8 *, void *), void *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
deleted file mode 100644
index 0ab55f27ec45..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <subdev/timer.h>
-
-#include "priv.h"
-
-static void
-nouveau_pwr_pgob(struct nouveau_pwr *ppwr, bool enable)
-{
- const struct nvkm_pwr_impl *impl = (void *)nv_oclass(ppwr);
- if (impl->pgob)
- impl->pgob(ppwr, enable);
-}
-
-static int
-nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
- u32 process, u32 message, u32 data0, u32 data1)
-{
- struct nouveau_subdev *subdev = nv_subdev(ppwr);
- u32 addr;
-
- /* wait for a free slot in the fifo */
- addr = nv_rd32(ppwr, 0x10a4a0);
- if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
- return -EBUSY;
-
- /* we currently only support a single process at a time waiting
- * on a synchronous reply, take the PPWR mutex and tell the
- * receive handler what we're waiting for
- */
- if (reply) {
- mutex_lock(&subdev->mutex);
- ppwr->recv.message = message;
- ppwr->recv.process = process;
- }
-
- /* acquire data segment access */
- do {
- nv_wr32(ppwr, 0x10a580, 0x00000001);
- } while (nv_rd32(ppwr, 0x10a580) != 0x00000001);
-
- /* write the packet */
- nv_wr32(ppwr, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
- ppwr->send.base));
- nv_wr32(ppwr, 0x10a1c4, process);
- nv_wr32(ppwr, 0x10a1c4, message);
- nv_wr32(ppwr, 0x10a1c4, data0);
- nv_wr32(ppwr, 0x10a1c4, data1);
- nv_wr32(ppwr, 0x10a4a0, (addr + 1) & 0x0f);
-
- /* release data segment access */
- nv_wr32(ppwr, 0x10a580, 0x00000000);
-
- /* wait for reply, if requested */
- if (reply) {
- wait_event(ppwr->recv.wait, (ppwr->recv.process == 0));
- reply[0] = ppwr->recv.data[0];
- reply[1] = ppwr->recv.data[1];
- mutex_unlock(&subdev->mutex);
- }
-
- return 0;
-}
-
-static void
-nouveau_pwr_recv(struct work_struct *work)
-{
- struct nouveau_pwr *ppwr =
- container_of(work, struct nouveau_pwr, recv.work);
- u32 process, message, data0, data1;
-
- /* nothing to do if GET == PUT */
- u32 addr = nv_rd32(ppwr, 0x10a4cc);
- if (addr == nv_rd32(ppwr, 0x10a4c8))
- return;
-
- /* acquire data segment access */
- do {
- nv_wr32(ppwr, 0x10a580, 0x00000002);
- } while (nv_rd32(ppwr, 0x10a580) != 0x00000002);
-
- /* read the packet */
- nv_wr32(ppwr, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
- ppwr->recv.base));
- process = nv_rd32(ppwr, 0x10a1c4);
- message = nv_rd32(ppwr, 0x10a1c4);
- data0 = nv_rd32(ppwr, 0x10a1c4);
- data1 = nv_rd32(ppwr, 0x10a1c4);
- nv_wr32(ppwr, 0x10a4cc, (addr + 1) & 0x0f);
-
- /* release data segment access */
- nv_wr32(ppwr, 0x10a580, 0x00000000);
-
- /* wake process if it's waiting on a synchronous reply */
- if (ppwr->recv.process) {
- if (process == ppwr->recv.process &&
- message == ppwr->recv.message) {
- ppwr->recv.data[0] = data0;
- ppwr->recv.data[1] = data1;
- ppwr->recv.process = 0;
- wake_up(&ppwr->recv.wait);
- return;
- }
- }
-
- /* right now there's no other expected responses from the engine,
- * so assume that any unexpected message is an error.
- */
- nv_warn(ppwr, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
- (char)((process & 0x000000ff) >> 0),
- (char)((process & 0x0000ff00) >> 8),
- (char)((process & 0x00ff0000) >> 16),
- (char)((process & 0xff000000) >> 24),
- process, message, data0, data1);
-}
-
-static void
-nouveau_pwr_intr(struct nouveau_subdev *subdev)
-{
- struct nouveau_pwr *ppwr = (void *)subdev;
- u32 disp = nv_rd32(ppwr, 0x10a01c);
- u32 intr = nv_rd32(ppwr, 0x10a008) & disp & ~(disp >> 16);
-
- if (intr & 0x00000020) {
- u32 stat = nv_rd32(ppwr, 0x10a16c);
- if (stat & 0x80000000) {
- nv_error(ppwr, "UAS fault at 0x%06x addr 0x%08x\n",
- stat & 0x00ffffff, nv_rd32(ppwr, 0x10a168));
- nv_wr32(ppwr, 0x10a16c, 0x00000000);
- intr &= ~0x00000020;
- }
- }
-
- if (intr & 0x00000040) {
- schedule_work(&ppwr->recv.work);
- nv_wr32(ppwr, 0x10a004, 0x00000040);
- intr &= ~0x00000040;
- }
-
- if (intr & 0x00000080) {
- nv_info(ppwr, "wr32 0x%06x 0x%08x\n", nv_rd32(ppwr, 0x10a7a0),
- nv_rd32(ppwr, 0x10a7a4));
- nv_wr32(ppwr, 0x10a004, 0x00000080);
- intr &= ~0x00000080;
- }
-
- if (intr) {
- nv_error(ppwr, "intr 0x%08x\n", intr);
- nv_wr32(ppwr, 0x10a004, intr);
- }
-}
-
-int
-_nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
-{
- struct nouveau_pwr *ppwr = (void *)object;
-
- nv_wr32(ppwr, 0x10a014, 0x00000060);
- flush_work(&ppwr->recv.work);
-
- return nouveau_subdev_fini(&ppwr->base, suspend);
-}
-
-int
-_nouveau_pwr_init(struct nouveau_object *object)
-{
- const struct nvkm_pwr_impl *impl = (void *)object->oclass;
- struct nouveau_pwr *ppwr = (void *)object;
- int ret, i;
-
- ret = nouveau_subdev_init(&ppwr->base);
- if (ret)
- return ret;
-
- nv_subdev(ppwr)->intr = nouveau_pwr_intr;
- ppwr->message = nouveau_pwr_send;
- ppwr->pgob = nouveau_pwr_pgob;
-
- /* prevent previous ucode from running, wait for idle, reset */
- nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
- nv_wait(ppwr, 0x10a04c, 0xffffffff, 0x00000000);
- nv_mask(ppwr, 0x000200, 0x00002000, 0x00000000);
- nv_mask(ppwr, 0x000200, 0x00002000, 0x00002000);
- nv_rd32(ppwr, 0x000200);
- nv_wait(ppwr, 0x10a10c, 0x00000006, 0x00000000);
-
- /* upload data segment */
- nv_wr32(ppwr, 0x10a1c0, 0x01000000);
- for (i = 0; i < impl->data.size / 4; i++)
- nv_wr32(ppwr, 0x10a1c4, impl->data.data[i]);
-
- /* upload code segment */
- nv_wr32(ppwr, 0x10a180, 0x01000000);
- for (i = 0; i < impl->code.size / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(ppwr, 0x10a188, i >> 6);
- nv_wr32(ppwr, 0x10a184, impl->code.data[i]);
- }
-
- /* start it running */
- nv_wr32(ppwr, 0x10a10c, 0x00000000);
- nv_wr32(ppwr, 0x10a104, 0x00000000);
- nv_wr32(ppwr, 0x10a100, 0x00000002);
-
- /* wait for valid host->pwr ring configuration */
- if (!nv_wait_ne(ppwr, 0x10a4d0, 0xffffffff, 0x00000000))
- return -EBUSY;
- ppwr->send.base = nv_rd32(ppwr, 0x10a4d0) & 0x0000ffff;
- ppwr->send.size = nv_rd32(ppwr, 0x10a4d0) >> 16;
-
- /* wait for valid pwr->host ring configuration */
- if (!nv_wait_ne(ppwr, 0x10a4dc, 0xffffffff, 0x00000000))
- return -EBUSY;
- ppwr->recv.base = nv_rd32(ppwr, 0x10a4dc) & 0x0000ffff;
- ppwr->recv.size = nv_rd32(ppwr, 0x10a4dc) >> 16;
-
- nv_wr32(ppwr, 0x10a010, 0x000000e0);
- return 0;
-}
-
-int
-nouveau_pwr_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
-{
- struct nouveau_pwr *ppwr;
- int ret;
-
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PPWR",
- "pwr", length, pobject);
- ppwr = *pobject;
- if (ret)
- return ret;
-
- INIT_WORK(&ppwr->recv.work, nouveau_pwr_recv);
- init_waitqueue_head(&ppwr->recv.wait);
- return 0;
-}
-
-int
-_nouveau_pwr_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_pwr *ppwr;
- int ret = nouveau_pwr_create(parent, engine, oclass, &ppwr);
- *pobject = nv_object(ppwr);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
deleted file mode 100644
index 7a9299d7159f..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef __NVKM_PWR_MEMX_H__
-#define __NVKM_PWR_MEMX_H__
-
-#include "priv.h"
-
-struct nouveau_memx {
- struct nouveau_pwr *ppwr;
- u32 base;
- u32 size;
- struct {
- u32 mthd;
- u32 size;
- u32 data[64];
- } c;
-};
-
-static void
-memx_out(struct nouveau_memx *memx)
-{
- struct nouveau_pwr *ppwr = memx->ppwr;
- int i;
-
- if (memx->c.mthd) {
- nv_wr32(ppwr, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
- for (i = 0; i < memx->c.size; i++)
- nv_wr32(ppwr, 0x10a1c4, memx->c.data[i]);
- memx->c.mthd = 0;
- memx->c.size = 0;
- }
-}
-
-static void
-memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[])
-{
- if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
- (memx->c.mthd && memx->c.mthd != mthd))
- memx_out(memx);
- memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
- memx->c.size += size;
- memx->c.mthd = mthd;
-}
-
-int
-nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
-{
- struct nouveau_memx *memx;
- u32 reply[2];
- int ret;
-
- ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
- MEMX_INFO_DATA, 0);
- if (ret)
- return ret;
-
- memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
- if (!memx)
- return -ENOMEM;
- memx->ppwr = ppwr;
- memx->base = reply[0];
- memx->size = reply[1];
-
- /* acquire data segment access */
- do {
- nv_wr32(ppwr, 0x10a580, 0x00000003);
- } while (nv_rd32(ppwr, 0x10a580) != 0x00000003);
- nv_wr32(ppwr, 0x10a1c0, 0x01000000 | memx->base);
-
- return 0;
-}
-
-int
-nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
-{
- struct nouveau_memx *memx = *pmemx;
- struct nouveau_pwr *ppwr = memx->ppwr;
- u32 finish, reply[2];
-
- /* flush the cache... */
- memx_out(memx);
-
- /* release data segment access */
- finish = nv_rd32(ppwr, 0x10a1c0) & 0x00ffffff;
- nv_wr32(ppwr, 0x10a580, 0x00000000);
-
- /* call MEMX process to execute the script, and wait for reply */
- if (exec) {
- ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_EXEC,
- memx->base, finish);
- }
-
- nv_debug(memx->ppwr, "Exec took %uns, PPWR_IN %08x\n",
- reply[0], reply[1]);
- kfree(memx);
- return 0;
-}
-
-void
-nouveau_memx_wr32(struct nouveau_memx *memx, u32 addr, u32 data)
-{
- nv_debug(memx->ppwr, "R[%06x] = 0x%08x\n", addr, data);
- memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
-}
-
-void
-nouveau_memx_wait(struct nouveau_memx *memx,
- u32 addr, u32 mask, u32 data, u32 nsec)
-{
- nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
- addr, mask, data, nsec);
- memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
- memx_out(memx); /* fuc can't handle multiple */
-}
-
-void
-nouveau_memx_nsec(struct nouveau_memx *memx, u32 nsec)
-{
- nv_debug(memx->ppwr, " DELAY = %d ns\n", nsec);
- memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
- memx_out(memx); /* fuc can't handle multiple */
-}
-
-void
-nouveau_memx_wait_vblank(struct nouveau_memx *memx)
-{
- struct nouveau_pwr *ppwr = memx->ppwr;
- u32 heads, x, y, px = 0;
- int i, head_sync;
-
- if (nv_device(ppwr)->chipset < 0xd0) {
- heads = nv_rd32(ppwr, 0x610050);
- for (i = 0; i < 2; i++) {
- /* Heuristic: sync to head with biggest resolution */
- if (heads & (2 << (i << 3))) {
- x = nv_rd32(ppwr, 0x610b40 + (0x540 * i));
- y = (x & 0xffff0000) >> 16;
- x &= 0x0000ffff;
- if ((x * y) > px) {
- px = (x * y);
- head_sync = i;
- }
- }
- }
- }
-
- if (px == 0) {
- nv_debug(memx->ppwr, "WAIT VBLANK !NO ACTIVE HEAD\n");
- return;
- }
-
- nv_debug(memx->ppwr, "WAIT VBLANK HEAD%d\n", head_sync);
- memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
- memx_out(memx); /* fuc can't handle multiple */
-}
-
-void
-nouveau_memx_train(struct nouveau_memx *memx)
-{
- nv_debug(memx->ppwr, " MEM TRAIN\n");
- memx_cmd(memx, MEMX_TRAIN, 0, NULL);
-}
-
-int
-nouveau_memx_train_result(struct nouveau_pwr *ppwr, u32 *res, int rsize)
-{
- u32 reply[2], base, size, i;
- int ret;
-
- ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
- MEMX_INFO_TRAIN, 0);
- if (ret)
- return ret;
-
- base = reply[0];
- size = reply[1] >> 2;
- if (size > rsize)
- return -ENOMEM;
-
- /* read the packet */
- nv_wr32(ppwr, 0x10a1c0, 0x02000000 | base);
-
- for (i = 0; i < size; i++)
- res[i] = nv_rd32(ppwr, 0x10a1c4);
-
- return 0;
-}
-
-void
-nouveau_memx_block(struct nouveau_memx *memx)
-{
- nv_debug(memx->ppwr, " HOST BLOCKED\n");
- memx_cmd(memx, MEMX_ENTER, 0, NULL);
-}
-
-void
-nouveau_memx_unblock(struct nouveau_memx *memx)
-{
- nv_debug(memx->ppwr, " HOST UNBLOCKED\n");
- memx_cmd(memx, MEMX_LEAVE, 0, NULL);
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h
deleted file mode 100644
index 3814a341db32..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef __NVKM_PWR_PRIV_H__
-#define __NVKM_PWR_PRIV_H__
-
-#include <subdev/pwr.h>
-#include <subdev/pwr/fuc/os.h>
-
-#define nouveau_pwr_create(p, e, o, d) \
- nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_pwr_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_pwr_init(p) ({ \
- struct nouveau_pwr *_ppwr = (p); \
- _nouveau_pwr_init(nv_object(_ppwr)); \
-})
-#define nouveau_pwr_fini(p,s) ({ \
- struct nouveau_pwr *_ppwr = (p); \
- _nouveau_pwr_fini(nv_object(_ppwr), (s)); \
-})
-
-int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-
-int _nouveau_pwr_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-#define _nouveau_pwr_dtor _nouveau_subdev_dtor
-int _nouveau_pwr_init(struct nouveau_object *);
-int _nouveau_pwr_fini(struct nouveau_object *, bool);
-
-struct nvkm_pwr_impl {
- struct nouveau_oclass base;
- struct {
- u32 *data;
- u32 size;
- } code;
- struct {
- u32 *data;
- u32 size;
- } data;
-
- void (*pgob)(struct nouveau_pwr *, bool);
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
deleted file mode 100644
index 7dba8c281a0b..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ /dev/null
@@ -1,159 +0,0 @@
-#ifndef __NVTHERM_PRIV_H__
-#define __NVTHERM_PRIV_H__
-
-/*
- * Copyright 2012 The Nouveau community
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Martin Peres
- */
-
-#include <subdev/therm.h>
-
-#include <subdev/bios/extdev.h>
-#include <subdev/bios/gpio.h>
-#include <subdev/bios/perf.h>
-#include <subdev/bios/therm.h>
-#include <subdev/timer.h>
-
-struct nouveau_fan {
- struct nouveau_therm *parent;
- const char *type;
-
- struct nvbios_therm_fan bios;
- struct nvbios_perf_fan perf;
-
- struct nouveau_alarm alarm;
- spinlock_t lock;
- int percent;
-
- int (*get)(struct nouveau_therm *therm);
- int (*set)(struct nouveau_therm *therm, int percent);
-
- struct dcb_gpio_func tach;
-};
-
-enum nouveau_therm_thrs_direction {
- NOUVEAU_THERM_THRS_FALLING = 0,
- NOUVEAU_THERM_THRS_RISING = 1
-};
-
-enum nouveau_therm_thrs_state {
- NOUVEAU_THERM_THRS_LOWER = 0,
- NOUVEAU_THERM_THRS_HIGHER = 1
-};
-
-enum nouveau_therm_thrs {
- NOUVEAU_THERM_THRS_FANBOOST = 0,
- NOUVEAU_THERM_THRS_DOWNCLOCK = 1,
- NOUVEAU_THERM_THRS_CRITICAL = 2,
- NOUVEAU_THERM_THRS_SHUTDOWN = 3,
- NOUVEAU_THERM_THRS_NR
-};
-
-struct nouveau_therm_priv {
- struct nouveau_therm base;
-
- /* automatic thermal management */
- struct nouveau_alarm alarm;
- spinlock_t lock;
- struct nouveau_therm_trip_point *last_trip;
- int mode;
- int cstate;
- int suspend;
-
- /* bios */
- struct nvbios_therm_sensor bios_sensor;
-
- /* fan priv */
- struct nouveau_fan *fan;
-
- /* alarms priv */
- struct {
- spinlock_t alarm_program_lock;
- struct nouveau_alarm therm_poll_alarm;
- enum nouveau_therm_thrs_state alarm_state[NOUVEAU_THERM_THRS_NR];
- void (*program_alarms)(struct nouveau_therm *);
- } sensor;
-
- /* what should be done if the card overheats */
- struct {
- void (*downclock)(struct nouveau_therm *, bool active);
- void (*pause)(struct nouveau_therm *, bool active);
- } emergency;
-
- /* ic */
- struct i2c_client *ic;
-};
-
-int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode);
-int nouveau_therm_attr_get(struct nouveau_therm *therm,
- enum nouveau_therm_attr_type type);
-int nouveau_therm_attr_set(struct nouveau_therm *therm,
- enum nouveau_therm_attr_type type, int value);
-
-void nouveau_therm_ic_ctor(struct nouveau_therm *therm);
-
-int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
-
-int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
-int nouveau_therm_fan_init(struct nouveau_therm *therm);
-int nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend);
-int nouveau_therm_fan_get(struct nouveau_therm *therm);
-int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
-int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
-int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
-
-int nouveau_therm_fan_sense(struct nouveau_therm *therm);
-
-int nouveau_therm_preinit(struct nouveau_therm *);
-
-int nouveau_therm_sensor_init(struct nouveau_therm *therm);
-int nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend);
-void nouveau_therm_sensor_preinit(struct nouveau_therm *);
-void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
- enum nouveau_therm_thrs thrs,
- enum nouveau_therm_thrs_state st);
-enum nouveau_therm_thrs_state
-nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
- enum nouveau_therm_thrs thrs);
-void nouveau_therm_sensor_event(struct nouveau_therm *therm,
- enum nouveau_therm_thrs thrs,
- enum nouveau_therm_thrs_direction dir);
-void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
-
-void nv40_therm_intr(struct nouveau_subdev *);
-int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
-int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
-int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
-int nv50_fan_pwm_clock(struct nouveau_therm *, int);
-int nv84_temp_get(struct nouveau_therm *therm);
-void nv84_sensor_setup(struct nouveau_therm *therm);
-int nv84_therm_fini(struct nouveau_object *object, bool suspend);
-
-int nva3_therm_fan_sense(struct nouveau_therm *);
-
-int nvd0_therm_init(struct nouveau_object *object);
-
-int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *);
-int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *);
-int nouveau_fannil_create(struct nouveau_therm *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
deleted file mode 100644
index f75a683bd47a..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <core/gpuobj.h>
-#include <core/mm.h>
-
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-
-void
-nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
-{
- struct nouveau_vm *vm = vma->vm;
- struct nouveau_vmmgr *vmm = vm->vmm;
- struct nouveau_mm_node *r;
- int big = vma->node->type != vmm->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (vmm->pgt_bits - bits);
- u32 end, len;
-
- delta = 0;
- list_for_each_entry(r, &node->regions, rl_entry) {
- u64 phys = (u64)r->offset << 12;
- u32 num = r->length >> bits;
-
- while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- vmm->map(vma, pgt, node, pte, len, phys, delta);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- phys += len << (bits + 12);
- pde++;
- pte = 0;
- }
-
- delta += (u64)len << vma->node->type;
- }
- }
-
- vmm->flush(vm);
-}
-
-static void
-nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem)
-{
- struct nouveau_vm *vm = vma->vm;
- struct nouveau_vmmgr *vmm = vm->vmm;
- int big = vma->node->type != vmm->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (vmm->pgt_bits - bits);
- unsigned m, sglen;
- u32 end, len;
- int i;
- struct scatterlist *sg;
-
- for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
- sglen = sg_dma_len(sg) >> PAGE_SHIFT;
-
- end = pte + sglen;
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- for (m = 0; m < len; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
-
- if (num == 0)
- goto finish;
- }
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- if (m < sglen) {
- for (; m < sglen; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
- if (num == 0)
- goto finish;
- }
- }
-
- }
-finish:
- vmm->flush(vm);
-}
-
-static void
-nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem)
-{
- struct nouveau_vm *vm = vma->vm;
- struct nouveau_vmmgr *vmm = vm->vmm;
- dma_addr_t *list = mem->pages;
- int big = vma->node->type != vmm->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (vmm->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- vmm->map_sg(vma, pgt, mem, pte, len, list);
-
- num -= len;
- pte += len;
- list += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- }
-
- vmm->flush(vm);
-}
-
-void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
-{
- if (node->sg)
- nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
- else
- if (node->pages)
- nouveau_vm_map_sg(vma, 0, node->size << 12, node);
- else
- nouveau_vm_map_at(vma, 0, node);
-}
-
-void
-nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
-{
- struct nouveau_vm *vm = vma->vm;
- struct nouveau_vmmgr *vmm = vm->vmm;
- int big = vma->node->type != vmm->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (vmm->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- vmm->unmap(pgt, pte, len);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- }
-
- vmm->flush(vm);
-}
-
-void
-nouveau_vm_unmap(struct nouveau_vma *vma)
-{
- nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
-}
-
-static void
-nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
-{
- struct nouveau_vmmgr *vmm = vm->vmm;
- struct nouveau_vm_pgd *vpgd;
- struct nouveau_vm_pgt *vpgt;
- struct nouveau_gpuobj *pgt;
- u32 pde;
-
- for (pde = fpde; pde <= lpde; pde++) {
- vpgt = &vm->pgt[pde - vm->fpde];
- if (--vpgt->refcount[big])
- continue;
-
- pgt = vpgt->obj[big];
- vpgt->obj[big] = NULL;
-
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
- }
-
- mutex_unlock(&nv_subdev(vmm)->mutex);
- nouveau_gpuobj_ref(NULL, &pgt);
- mutex_lock(&nv_subdev(vmm)->mutex);
- }
-}
-
-static int
-nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
-{
- struct nouveau_vmmgr *vmm = vm->vmm;
- struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- struct nouveau_vm_pgd *vpgd;
- struct nouveau_gpuobj *pgt;
- int big = (type != vmm->spg_shift);
- u32 pgt_size;
- int ret;
-
- pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
- pgt_size *= 8;
-
- mutex_unlock(&nv_subdev(vmm)->mutex);
- ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &pgt);
- mutex_lock(&nv_subdev(vmm)->mutex);
- if (unlikely(ret))
- return ret;
-
- /* someone beat us to filling the PDE while we didn't have the lock */
- if (unlikely(vpgt->refcount[big]++)) {
- mutex_unlock(&nv_subdev(vmm)->mutex);
- nouveau_gpuobj_ref(NULL, &pgt);
- mutex_lock(&nv_subdev(vmm)->mutex);
- return 0;
- }
-
- vpgt->obj[big] = pgt;
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
- }
-
- return 0;
-}
-
-int
-nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
- u32 access, struct nouveau_vma *vma)
-{
- struct nouveau_vmmgr *vmm = vm->vmm;
- u32 align = (1 << page_shift) >> 12;
- u32 msize = size >> 12;
- u32 fpde, lpde, pde;
- int ret;
-
- mutex_lock(&nv_subdev(vmm)->mutex);
- ret = nouveau_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
- &vma->node);
- if (unlikely(ret != 0)) {
- mutex_unlock(&nv_subdev(vmm)->mutex);
- return ret;
- }
-
- fpde = (vma->node->offset >> vmm->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
-
- for (pde = fpde; pde <= lpde; pde++) {
- struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- int big = (vma->node->type != vmm->spg_shift);
-
- if (likely(vpgt->refcount[big])) {
- vpgt->refcount[big]++;
- continue;
- }
-
- ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
- if (ret) {
- if (pde != fpde)
- nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
- nouveau_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&nv_subdev(vmm)->mutex);
- return ret;
- }
- }
- mutex_unlock(&nv_subdev(vmm)->mutex);
-
- vma->vm = NULL;
- nouveau_vm_ref(vm, &vma->vm, NULL);
- vma->offset = (u64)vma->node->offset << 12;
- vma->access = access;
- return 0;
-}
-
-void
-nouveau_vm_put(struct nouveau_vma *vma)
-{
- struct nouveau_vm *vm = vma->vm;
- struct nouveau_vmmgr *vmm = vm->vmm;
- u32 fpde, lpde;
-
- if (unlikely(vma->node == NULL))
- return;
- fpde = (vma->node->offset >> vmm->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
-
- mutex_lock(&nv_subdev(vmm)->mutex);
- nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
- nouveau_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&nv_subdev(vmm)->mutex);
-
- nouveau_vm_ref(NULL, &vma->vm, NULL);
-}
-
-int
-nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
- u64 mm_offset, u32 block, struct nouveau_vm **pvm)
-{
- struct nouveau_vm *vm;
- u64 mm_length = (offset + length) - mm_offset;
- int ret;
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&vm->pgd_list);
- vm->vmm = vmm;
- kref_init(&vm->refcount);
- vm->fpde = offset >> (vmm->pgt_bits + 12);
- vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
-
- vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
- if (!vm->pgt) {
- kfree(vm);
- return -ENOMEM;
- }
-
- ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
- block >> 12);
- if (ret) {
- vfree(vm->pgt);
- kfree(vm);
- return ret;
- }
-
- *pvm = vm;
-
- return 0;
-}
-
-int
-nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
- u64 mm_offset, struct nouveau_vm **pvm)
-{
- struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
- return vmm->create(vmm, offset, length, mm_offset, pvm);
-}
-
-static int
-nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
-{
- struct nouveau_vmmgr *vmm = vm->vmm;
- struct nouveau_vm_pgd *vpgd;
- int i;
-
- if (!pgd)
- return 0;
-
- vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
- if (!vpgd)
- return -ENOMEM;
-
- nouveau_gpuobj_ref(pgd, &vpgd->obj);
-
- mutex_lock(&nv_subdev(vmm)->mutex);
- for (i = vm->fpde; i <= vm->lpde; i++)
- vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
- list_add(&vpgd->head, &vm->pgd_list);
- mutex_unlock(&nv_subdev(vmm)->mutex);
- return 0;
-}
-
-static void
-nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
-{
- struct nouveau_vmmgr *vmm = vm->vmm;
- struct nouveau_vm_pgd *vpgd, *tmp;
- struct nouveau_gpuobj *pgd = NULL;
-
- if (!mpgd)
- return;
-
- mutex_lock(&nv_subdev(vmm)->mutex);
- list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- if (vpgd->obj == mpgd) {
- pgd = vpgd->obj;
- list_del(&vpgd->head);
- kfree(vpgd);
- break;
- }
- }
- mutex_unlock(&nv_subdev(vmm)->mutex);
-
- nouveau_gpuobj_ref(NULL, &pgd);
-}
-
-static void
-nouveau_vm_del(struct kref *kref)
-{
- struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount);
- struct nouveau_vm_pgd *vpgd, *tmp;
-
- list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- nouveau_vm_unlink(vm, vpgd->obj);
- }
-
- nouveau_mm_fini(&vm->mm);
- vfree(vm->pgt);
- kfree(vm);
-}
-
-int
-nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
- struct nouveau_gpuobj *pgd)
-{
- if (ref) {
- int ret = nouveau_vm_link(ref, pgd);
- if (ret)
- return ret;
-
- kref_get(&ref->refcount);
- }
-
- if (*ptr) {
- nouveau_vm_unlink(*ptr, pgd);
- kref_put(&(*ptr)->refcount, nouveau_vm_del);
- }
-
- *ptr = ref;
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
deleted file mode 100644
index ec42d4bc86a6..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __NV04_VMMGR_PRIV__
-#define __NV04_VMMGR_PRIV__
-
-#include <subdev/vm.h>
-
-struct nv04_vmmgr_priv {
- struct nouveau_vmmgr base;
- struct nouveau_vm *vm;
- dma_addr_t null;
- void *nullp;
-};
-
-static inline struct nv04_vmmgr_priv *
-nv04_vmmgr(void *obj)
-{
- return (void *)nouveau_vmmgr(obj);
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Kbuild
index 424a489d0f03..424a489d0f03 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Makefile
+++ b/drivers/gpu/drm/nouveau/dispnv04/Kbuild
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 38402ade6835..542bb266a0ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -41,7 +41,7 @@
#include "disp.h"
#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
+#include <subdev/clk.h>
static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -112,12 +112,12 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bios *bios = nvkm_bios(&drm->device);
- struct nouveau_clock *clk = nvkm_clock(&drm->device);
+ struct nvkm_bios *bios = nvxx_bios(&drm->device);
+ struct nvkm_clk *clk = nvxx_clk(&drm->device);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
- struct nouveau_pll_vals *pv = &regp->pllvals;
+ struct nvkm_pll_vals *pv = &regp->pllvals;
struct nvbios_pll pll_lim;
if (nvbios_pll_parse(bios, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 2d8056cde996..d7b495a5f30c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -66,7 +66,7 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
static int sample_load_twice(struct drm_device *dev, bool sense[2])
{
struct nvif_device *device = &nouveau_drm(dev)->device;
- struct nouveau_timer *ptimer = nvkm_timer(device);
+ struct nvkm_timer *ptimer = nvxx_timer(device);
int i;
for (i = 0; i < 2; i++) {
@@ -80,17 +80,17 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
* use a 10ms timeout (guards against crtc being inactive, in
* which case blank state would never change)
*/
- if (!nouveau_timer_wait_eq(ptimer, 10000000,
- NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (!nvkm_timer_wait_eq(ptimer, 10000000,
+ NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
return -EBUSY;
- if (!nouveau_timer_wait_eq(ptimer, 10000000,
- NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000001))
+ if (!nvkm_timer_wait_eq(ptimer, 10000000,
+ NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000001))
return -EBUSY;
- if (!nouveau_timer_wait_eq(ptimer, 10000000,
- NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (!nvkm_timer_wait_eq(ptimer, 10000000,
+ NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
return -EBUSY;
udelay(100);
@@ -232,7 +232,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &nouveau_drm(dev)->device;
- struct nouveau_gpio *gpio = nvkm_gpio(device);
+ struct nvkm_gpio *gpio = nvxx_gpio(device);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 42a5435259f7..f6ca343fd34a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -623,9 +623,9 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
- struct nouveau_i2c_port *port = i2c->find(i2c, 2);
- struct nouveau_i2c_board_info info[] = {
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c_port *port = i2c->find(i2c, 2);
+ struct nvkm_i2c_board_info info[] = {
{
{
.type = "sil164",
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 3d0afa1c6cff..f96237ef2a6b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -32,28 +32,10 @@
#include "nouveau_connector.h"
int
-nv04_display_early_init(struct drm_device *dev)
-{
- /* ensure vblank interrupts are off, they can't be enabled until
- * drm_vblank has been initialised
- */
- NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
- if (nv_two_heads(dev))
- NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
-
- return 0;
-}
-
-void
-nv04_display_late_takedown(struct drm_device *dev)
-{
-}
-
-int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 17b899d9aba3..c910c5d5c662 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -36,7 +36,7 @@ struct nv04_crtc_reg {
/* PRAMDAC regs */
uint32_t nv10_cursync;
- struct nouveau_pll_vals pllvals;
+ struct nvkm_pll_vals pllvals;
uint32_t ramdac_gen_ctrl;
uint32_t ramdac_630;
uint32_t ramdac_634;
@@ -90,8 +90,6 @@ nv04_display(struct drm_device *dev)
}
/* nv04_display.c */
-int nv04_display_early_init(struct drm_device *);
-void nv04_display_late_takedown(struct drm_device *);
int nv04_display_create(struct drm_device *);
void nv04_display_destroy(struct drm_device *);
int nv04_display_init(struct drm_device *);
@@ -172,7 +170,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
struct dcb_output *outp, int crtc)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bios *bios = nvkm_bios(&drm->device);
+ struct nvkm_bios *bios = nvxx_bios(&drm->device);
struct nvbios_init init = {
.subdev = nv_subdev(bios),
.bios = bios,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 3d4c19300768..42e07afc4c2b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -130,7 +130,7 @@ NVBlankScreen(struct drm_device *dev, int head, bool blank)
static void
nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
- uint32_t pll2, struct nouveau_pll_vals *pllvals)
+ uint32_t pll2, struct nvkm_pll_vals *pllvals)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -162,11 +162,11 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
int
nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
- struct nouveau_pll_vals *pllvals)
+ struct nvkm_pll_vals *pllvals)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
- struct nouveau_bios *bios = nvkm_bios(device);
+ struct nvkm_bios *bios = nvxx_bios(device);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
@@ -202,7 +202,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
}
int
-nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
+nouveau_hw_pllvals_to_clk(struct nvkm_pll_vals *pv)
{
/* Avoid divide by zero if called at an inappropriate time */
if (!pv->M1 || !pv->M2)
@@ -214,7 +214,7 @@ nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
int
nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
{
- struct nouveau_pll_vals pllvals;
+ struct nvkm_pll_vals pllvals;
int ret;
if (plltype == PLL_MEMORY &&
@@ -253,10 +253,10 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
- struct nouveau_clock *clk = nvkm_clock(device);
- struct nouveau_bios *bios = nvkm_bios(device);
+ struct nvkm_clk *clk = nvxx_clk(device);
+ struct nvkm_bios *bios = nvxx_bios(device);
struct nvbios_pll pll_lim;
- struct nouveau_pll_vals pv;
+ struct nvkm_pll_vals pv;
enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
if (nvbios_pll_parse(bios, pll, &pll_lim))
@@ -463,7 +463,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_clock *clk = nvkm_clock(&drm->device);
+ struct nvkm_clk *clk = nvxx_clk(&drm->device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
int i;
@@ -661,7 +661,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
- struct nouveau_timer *ptimer = nvkm_timer(device);
+ struct nvkm_timer *ptimer = nvxx_timer(device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t reg900;
int i;
@@ -741,8 +741,8 @@ nv_load_state_ext(struct drm_device *dev, int head,
if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
- nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
- nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+ nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+ nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
}
wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 7f53c571f31f..6c796178bf0c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -42,8 +42,8 @@ uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
void NVSetOwner(struct drm_device *, int owner);
void NVBlankScreen(struct drm_device *, int head, bool blank);
int nouveau_hw_get_pllvals(struct drm_device *, enum nvbios_pll_type plltype,
- struct nouveau_pll_vals *pllvals);
-int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
+ struct nvkm_pll_vals *pllvals);
+int nouveau_hw_pllvals_to_clk(struct nvkm_pll_vals *pllvals);
int nouveau_hw_get_clock(struct drm_device *, enum nvbios_pll_type plltype);
void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
void nouveau_hw_save_state(struct drm_device *, int head,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 8061d8d0ce79..d9664b37def1 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,7 +35,7 @@
#include <drm/i2c/ch7006.h>
-static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
+static struct nvkm_i2c_board_info nv04_tv_encoder_info[] = {
{
{
I2C_BOARD_INFO("ch7006", 0x75),
@@ -54,7 +54,7 @@ static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
return i2c->identify(i2c, i2c_index, "TV encoder",
nv04_tv_encoder_info, NULL, NULL);
@@ -204,8 +204,8 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
- struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c_port *port = i2c->find(i2c, entry->i2c_index);
int type, ret;
/* Ensure that we can talk to this encoder */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 72d2ab04db47..731d74efc1e5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -46,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -133,14 +133,14 @@ get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
struct nvif_device *device = &drm->device;
/* Zotac FX5200 */
- if (nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x1035) ||
- nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x2035)) {
+ if (nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x1035) ||
+ nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x2035)) {
*pin_mask = 0xc;
return false;
}
/* MSI nForce2 IGP */
- if (nv_device_match(nvkm_object(device), 0x01f0, 0x1462, 0x5710)) {
+ if (nv_device_match(nvxx_object(device), 0x01f0, 0x1462, 0x5710)) {
*pin_mask = 0xc;
return false;
}
@@ -370,7 +370,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 4e308eacb27a..5ad17fc36ae3 100644
--- a/drivers/gpu/drm/nouveau/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -122,18 +122,21 @@ struct nv_device_v0 {
#define NV_DEVICE_V0_DISABLE_CORE 0x0000000000000008ULL
#define NV_DEVICE_V0_DISABLE_DISP 0x0000000000010000ULL
#define NV_DEVICE_V0_DISABLE_FIFO 0x0000000000020000ULL
-#define NV_DEVICE_V0_DISABLE_GRAPH 0x0000000100000000ULL
+#define NV_DEVICE_V0_DISABLE_GR 0x0000000100000000ULL
#define NV_DEVICE_V0_DISABLE_MPEG 0x0000000200000000ULL
#define NV_DEVICE_V0_DISABLE_ME 0x0000000400000000ULL
#define NV_DEVICE_V0_DISABLE_VP 0x0000000800000000ULL
-#define NV_DEVICE_V0_DISABLE_CRYPT 0x0000001000000000ULL
+#define NV_DEVICE_V0_DISABLE_CIPHER 0x0000001000000000ULL
#define NV_DEVICE_V0_DISABLE_BSP 0x0000002000000000ULL
-#define NV_DEVICE_V0_DISABLE_PPP 0x0000004000000000ULL
-#define NV_DEVICE_V0_DISABLE_COPY0 0x0000008000000000ULL
-#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL
+#define NV_DEVICE_V0_DISABLE_MSPPP 0x0000004000000000ULL
+#define NV_DEVICE_V0_DISABLE_CE0 0x0000008000000000ULL
+#define NV_DEVICE_V0_DISABLE_CE1 0x0000010000000000ULL
#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
-#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL
-#define NV_DEVICE_V0_DISABLE_COPY2 0x0000080000000000ULL
+#define NV_DEVICE_V0_DISABLE_MSENC 0x0000040000000000ULL
+#define NV_DEVICE_V0_DISABLE_CE2 0x0000080000000000ULL
+#define NV_DEVICE_V0_DISABLE_MSVLD 0x0000100000000000ULL
+#define NV_DEVICE_V0_DISABLE_SEC 0x0000200000000000ULL
+#define NV_DEVICE_V0_DISABLE_MSPDEC 0x0000400000000000ULL
__u64 disable; /* disable particular subsystems */
__u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
};
@@ -346,9 +349,9 @@ struct nv50_channel_gpfifo_v0 {
struct kepler_channel_gpfifo_a_v0 {
__u8 version;
#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_VP 0x02
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_PPP 0x04
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_BSP 0x08
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPDEC 0x02
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPPP 0x04
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSVLD 0x08
#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10
#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20
#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
diff --git a/drivers/gpu/drm/nouveau/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 28352f0882ec..eca648ef0f7a 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -1,7 +1,7 @@
#ifndef __NVIF_CLIENT_H__
#define __NVIF_CLIENT_H__
-#include "object.h"
+#include <nvif/object.h>
struct nvif_client {
struct nvif_object base;
@@ -31,9 +31,9 @@ int nvif_client_resume(struct nvif_client *);
/*XXX*/
#include <core/client.h>
-#define nvkm_client(a) ({ \
+#define nvxx_client(a) ({ \
struct nvif_client *_client = nvif_client(nvif_object(a)); \
- nouveau_client(_client->base.priv); \
+ nvkm_client(_client->base.priv); \
})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
new file mode 100644
index 000000000000..88553a741ab7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -0,0 +1,61 @@
+#ifndef __NVIF_DEVICE_H__
+#define __NVIF_DEVICE_H__
+
+#include <nvif/object.h>
+#include <nvif/class.h>
+
+struct nvif_device {
+ struct nvif_object base;
+ struct nvif_object *object; /*XXX: hack for nvif_object() */
+ struct nv_device_info_v0 info;
+};
+
+static inline struct nvif_device *
+nvif_device(struct nvif_object *object)
+{
+ while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
+ object = object->parent;
+ return (void *)object;
+}
+
+int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
+ u32 handle, u32 oclass, void *, u32,
+ struct nvif_device *);
+void nvif_device_fini(struct nvif_device *);
+int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
+ void *, u32, struct nvif_device **);
+void nvif_device_ref(struct nvif_device *, struct nvif_device **);
+
+/*XXX*/
+#include <subdev/bios.h>
+#include <subdev/fb.h>
+#include <subdev/mmu.h>
+#include <subdev/bar.h>
+#include <subdev/gpio.h>
+#include <subdev/clk.h>
+#include <subdev/i2c.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+#define nvxx_device(a) nv_device(nvxx_object((a)))
+#define nvxx_bios(a) nvkm_bios(nvxx_device(a))
+#define nvxx_fb(a) nvkm_fb(nvxx_device(a))
+#define nvxx_mmu(a) nvkm_mmu(nvxx_device(a))
+#define nvxx_bar(a) nvkm_bar(nvxx_device(a))
+#define nvxx_gpio(a) nvkm_gpio(nvxx_device(a))
+#define nvxx_clk(a) nvkm_clk(nvxx_device(a))
+#define nvxx_i2c(a) nvkm_i2c(nvxx_device(a))
+#define nvxx_timer(a) nvkm_timer(nvxx_device(a))
+#define nvxx_wait(a,b,c,d) nv_wait(nvxx_timer(a), (b), (c), (d))
+#define nvxx_wait_cb(a,b,c) nv_wait_cb(nvxx_timer(a), (b), (c))
+#define nvxx_therm(a) nvkm_therm(nvxx_device(a))
+
+#include <core/device.h>
+#include <engine/fifo.h>
+#include <engine/gr.h>
+#include <engine/sw.h>
+
+#define nvxx_fifo(a) nvkm_fifo(nvxx_device(a))
+#define nvxx_fifo_chan(a) ((struct nvkm_fifo_chan *)nvxx_object(a))
+#define nvxx_gr(a) ((struct nvkm_gr *)nvkm_engine(nvxx_object(a), NVDEV_ENGINE_GR))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 8bd39e69229c..8bd39e69229c 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
diff --git a/drivers/gpu/drm/nouveau/nvif/event.h b/drivers/gpu/drm/nouveau/include/nvif/event.h
index 21764499b4be..21764499b4be 100644
--- a/drivers/gpu/drm/nouveau/nvif/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/event.h
diff --git a/drivers/gpu/drm/nouveau/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4cd8e323b23d..4cd8e323b23d 100644
--- a/drivers/gpu/drm/nouveau/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
diff --git a/drivers/gpu/drm/nouveau/nvif/list.h b/drivers/gpu/drm/nouveau/include/nvif/list.h
index 8af5d144ecb0..8af5d144ecb0 100644
--- a/drivers/gpu/drm/nouveau/nvif/list.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/list.h
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 9ebfa3b45e76..9ebfa3b45e76 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
diff --git a/drivers/gpu/drm/nouveau/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index fe519179b76c..04c874707b96 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -70,6 +70,6 @@ void nvif_object_unmap(struct nvif_object *);
/*XXX*/
#include <core/object.h>
-#define nvkm_object(a) ((struct nouveau_object *)nvif_object(a)->priv)
+#define nvxx_object(a) ((struct nvkm_object *)nvif_object(a)->priv)
#endif
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index bdd05ee7ec72..bdd05ee7ec72 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
diff --git a/drivers/gpu/drm/nouveau/nvif/unpack.h b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
index 5933188b4a77..5933188b4a77 100644
--- a/drivers/gpu/drm/nouveau/nvif/unpack.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
new file mode 100644
index 000000000000..a35b38244502
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -0,0 +1,55 @@
+#ifndef __NVKM_CLIENT_H__
+#define __NVKM_CLIENT_H__
+#include <core/namedb.h>
+
+struct nvkm_client {
+ struct nvkm_namedb namedb;
+ struct nvkm_handle *root;
+ struct nvkm_object *device;
+ char name[32];
+ u32 debug;
+ struct nvkm_vm *vm;
+ bool super;
+ void *data;
+
+ int (*ntfy)(const void *, u32, const void *, u32);
+ struct nvkm_client_notify *notify[16];
+};
+
+static inline struct nvkm_client *
+nv_client(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+ if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
+ nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
+#endif
+ return obj;
+}
+
+static inline struct nvkm_client *
+nvkm_client(void *obj)
+{
+ struct nvkm_object *client = nv_object(obj);
+ while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
+ client = client->parent;
+ return (void *)client;
+}
+
+#define nvkm_client_create(n,c,oc,od,d) \
+ nvkm_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
+
+int nvkm_client_create_(const char *name, u64 device, const char *cfg,
+ const char *dbg, int, void **);
+#define nvkm_client_destroy(p) \
+ nvkm_namedb_destroy(&(p)->base)
+
+int nvkm_client_init(struct nvkm_client *);
+int nvkm_client_fini(struct nvkm_client *, bool suspend);
+const char *nvkm_client_name(void *obj);
+
+int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
+ void *data, u32 size);
+int nvkm_client_notify_del(struct nvkm_client *, int index);
+int nvkm_client_notify_get(struct nvkm_client *, int index);
+int nvkm_client_notify_put(struct nvkm_client *, int index);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index 8092e2e90323..d07cb860b56c 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,6 +1,5 @@
-#ifndef __NOUVEAU_DEBUG_H__
-#define __NOUVEAU_DEBUG_H__
-
+#ifndef __NVKM_DEBUG_H__
+#define __NVKM_DEBUG_H__
extern int nv_info_debug_level;
#define NV_DBG_FATAL 0
@@ -16,5 +15,4 @@ extern int nv_info_debug_level;
#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
new file mode 100644
index 000000000000..333db33a162c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -0,0 +1,101 @@
+#ifndef __NVKM_DEVICE_H__
+#define __NVKM_DEVICE_H__
+#include <core/engine.h>
+#include <core/event.h>
+
+struct nvkm_device {
+ struct nvkm_engine engine;
+ struct list_head head;
+
+ struct pci_dev *pdev;
+ struct platform_device *platformdev;
+ u64 handle;
+
+ struct nvkm_event event;
+
+ const char *cfgopt;
+ const char *dbgopt;
+ const char *name;
+ const char *cname;
+ u64 disable_mask;
+
+ enum {
+ NV_04 = 0x04,
+ NV_10 = 0x10,
+ NV_11 = 0x11,
+ NV_20 = 0x20,
+ NV_30 = 0x30,
+ NV_40 = 0x40,
+ NV_50 = 0x50,
+ NV_C0 = 0xc0,
+ NV_E0 = 0xe0,
+ GM100 = 0x110,
+ } card_type;
+ u32 chipset;
+ u8 chiprev;
+ u32 crystal;
+
+ struct nvkm_oclass *oclass[NVDEV_SUBDEV_NR];
+ struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
+
+ struct {
+ struct notifier_block nb;
+ } acpi;
+};
+
+struct nvkm_device *nvkm_device_find(u64 name);
+int nvkm_device_list(u64 *name, int size);
+
+struct nvkm_device *nv_device(void *obj);
+
+static inline bool
+nv_device_match(struct nvkm_object *object, u16 dev, u16 ven, u16 sub)
+{
+ struct nvkm_device *device = nv_device(object);
+ return device->pdev->device == dev &&
+ device->pdev->subsystem_vendor == ven &&
+ device->pdev->subsystem_device == sub;
+}
+
+static inline bool
+nv_device_is_pci(struct nvkm_device *device)
+{
+ return device->pdev != NULL;
+}
+
+static inline bool
+nv_device_is_cpu_coherent(struct nvkm_device *device)
+{
+ return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
+}
+
+static inline struct device *
+nv_device_base(struct nvkm_device *device)
+{
+ return nv_device_is_pci(device) ? &device->pdev->dev :
+ &device->platformdev->dev;
+}
+
+resource_size_t
+nv_device_resource_start(struct nvkm_device *device, unsigned int bar);
+
+resource_size_t
+nv_device_resource_len(struct nvkm_device *device, unsigned int bar);
+
+int
+nv_device_get_irq(struct nvkm_device *device, bool stall);
+
+struct platform_device;
+
+enum nv_bus_type {
+ NVKM_BUS_PCI,
+ NVKM_BUS_PLATFORM,
+};
+
+#define nvkm_device_create(p,t,n,s,c,d,u) \
+ nvkm_device_create_((void *)(p), (t), (n), (s), (c), (d), \
+ sizeof(**u), (void **)u)
+int nvkm_device_create_(void *, enum nv_bus_type type, u64 name,
+ const char *sname, const char *cfg, const char *dbg,
+ int, void **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
new file mode 100644
index 000000000000..60c5888b5df3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
@@ -0,0 +1,62 @@
+#ifndef __NVKM_DEVIDX_H__
+#define __NVKM_DEVIDX_H__
+enum nvkm_devidx {
+ NVDEV_ENGINE_DEVICE,
+ NVDEV_SUBDEV_VBIOS,
+
+ /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
+ * *any* of them are initialised. This subdev category is used
+ * for any subdevs that the VBIOS init table parsing may call out
+ * to during POST.
+ */
+ NVDEV_SUBDEV_DEVINIT,
+ NVDEV_SUBDEV_IBUS,
+ NVDEV_SUBDEV_GPIO,
+ NVDEV_SUBDEV_I2C,
+ NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
+
+ /* This grouping of subdevs are initialised right after they've
+ * been created, and are allowed to assume any subdevs in the
+ * list above them exist and have been initialised.
+ */
+ NVDEV_SUBDEV_FUSE,
+ NVDEV_SUBDEV_MXM,
+ NVDEV_SUBDEV_MC,
+ NVDEV_SUBDEV_BUS,
+ NVDEV_SUBDEV_TIMER,
+ NVDEV_SUBDEV_FB,
+ NVDEV_SUBDEV_LTC,
+ NVDEV_SUBDEV_INSTMEM,
+ NVDEV_SUBDEV_MMU,
+ NVDEV_SUBDEV_BAR,
+ NVDEV_SUBDEV_PMU,
+ NVDEV_SUBDEV_VOLT,
+ NVDEV_SUBDEV_THERM,
+ NVDEV_SUBDEV_CLK,
+
+ NVDEV_ENGINE_FIRST,
+ NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
+ NVDEV_ENGINE_IFB,
+ NVDEV_ENGINE_FIFO,
+ NVDEV_ENGINE_SW,
+ NVDEV_ENGINE_GR,
+ NVDEV_ENGINE_MPEG,
+ NVDEV_ENGINE_ME,
+ NVDEV_ENGINE_VP,
+ NVDEV_ENGINE_CIPHER,
+ NVDEV_ENGINE_BSP,
+ NVDEV_ENGINE_MSPPP,
+ NVDEV_ENGINE_CE0,
+ NVDEV_ENGINE_CE1,
+ NVDEV_ENGINE_CE2,
+ NVDEV_ENGINE_VIC,
+ NVDEV_ENGINE_MSENC,
+ NVDEV_ENGINE_DISP,
+ NVDEV_ENGINE_PM,
+ NVDEV_ENGINE_MSVLD,
+ NVDEV_ENGINE_SEC,
+ NVDEV_ENGINE_MSPDEC,
+
+ NVDEV_SUBDEV_NR,
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
new file mode 100644
index 000000000000..1bf2e8eb4268
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
@@ -0,0 +1,51 @@
+#ifndef __NVKM_ENGCTX_H__
+#define __NVKM_ENGCTX_H__
+#include <core/gpuobj.h>
+
+#include <subdev/mmu.h>
+
+#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
+#define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
+
+struct nvkm_engctx {
+ struct nvkm_gpuobj gpuobj;
+ struct nvkm_vma vma;
+ struct list_head head;
+ unsigned long save;
+ u64 addr;
+};
+
+static inline struct nvkm_engctx *
+nv_engctx(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+ if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
+ nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
+#endif
+ return obj;
+}
+
+#define nvkm_engctx_create(p,e,c,g,s,a,f,d) \
+ nvkm_engctx_create_((p), (e), (c), (g), (s), (a), (f), \
+ sizeof(**d), (void **)d)
+
+int nvkm_engctx_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, struct nvkm_object *,
+ u32 size, u32 align, u32 flags,
+ int length, void **data);
+void nvkm_engctx_destroy(struct nvkm_engctx *);
+int nvkm_engctx_init(struct nvkm_engctx *);
+int nvkm_engctx_fini(struct nvkm_engctx *, bool suspend);
+
+int _nvkm_engctx_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void _nvkm_engctx_dtor(struct nvkm_object *);
+int _nvkm_engctx_init(struct nvkm_object *);
+int _nvkm_engctx_fini(struct nvkm_object *, bool suspend);
+#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
+#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
+
+struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr);
+void nvkm_engctx_put(struct nvkm_object *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
new file mode 100644
index 000000000000..faf0fd2f0638
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -0,0 +1,56 @@
+#ifndef __NVKM_ENGINE_H__
+#define __NVKM_ENGINE_H__
+#include <core/subdev.h>
+
+#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
+#define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var))
+
+struct nvkm_engine {
+ struct nvkm_subdev subdev;
+ struct nvkm_oclass *cclass;
+ struct nvkm_oclass *sclass;
+
+ struct list_head contexts;
+ spinlock_t lock;
+
+ void (*tile_prog)(struct nvkm_engine *, int region);
+ int (*tlb_flush)(struct nvkm_engine *);
+};
+
+static inline struct nvkm_engine *
+nv_engine(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+ if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
+ nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
+#endif
+ return obj;
+}
+
+static inline int
+nv_engidx(struct nvkm_engine *engine)
+{
+ return nv_subidx(&engine->subdev);
+}
+
+struct nvkm_engine *nvkm_engine(void *obj, int idx);
+
+#define nvkm_engine_create(p,e,c,d,i,f,r) \
+ nvkm_engine_create_((p), (e), (c), (d), (i), (f), \
+ sizeof(**r),(void **)r)
+
+#define nvkm_engine_destroy(p) \
+ nvkm_subdev_destroy(&(p)->subdev)
+#define nvkm_engine_init(p) \
+ nvkm_subdev_init(&(p)->subdev)
+#define nvkm_engine_fini(p,s) \
+ nvkm_subdev_fini(&(p)->subdev, (s))
+
+int nvkm_engine_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, bool, const char *,
+ const char *, int, void **);
+
+#define _nvkm_engine_dtor _nvkm_subdev_dtor
+#define _nvkm_engine_init _nvkm_subdev_init
+#define _nvkm_engine_fini _nvkm_subdev_fini
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
new file mode 100644
index 000000000000..e76f76f115e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_ENUM_H__
+#define __NVKM_ENUM_H__
+#include <core/os.h>
+
+struct nvkm_enum {
+ u32 value;
+ const char *name;
+ const void *data;
+ u32 data2;
+};
+
+const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
+const struct nvkm_enum *nvkm_enum_print(const struct nvkm_enum *, u32 value);
+
+struct nvkm_bitfield {
+ u32 mask;
+ const char *name;
+};
+
+void nvkm_bitfield_print(const struct nvkm_bitfield *, u32 value);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
index 92876528972f..b98fe2de546a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
@@ -1,15 +1,8 @@
#ifndef __NVKM_EVENT_H__
#define __NVKM_EVENT_H__
-
-#include <core/notify.h>
-
-struct nvkm_event_func {
- int (*ctor)(struct nouveau_object *, void *data, u32 size,
- struct nvkm_notify *);
- void (*send)(void *data, u32 size, struct nvkm_notify *);
- void (*init)(struct nvkm_event *, int type, int index);
- void (*fini)(struct nvkm_event *, int type, int index);
-};
+#include <core/os.h>
+struct nvkm_notify;
+struct nvkm_object;
struct nvkm_event {
const struct nvkm_event_func *func;
@@ -23,13 +16,19 @@ struct nvkm_event {
int *refs;
};
-int nvkm_event_init(const struct nvkm_event_func *func,
- int types_nr, int index_nr,
- struct nvkm_event *);
+struct nvkm_event_func {
+ int (*ctor)(struct nvkm_object *, void *data, u32 size,
+ struct nvkm_notify *);
+ void (*send)(void *data, u32 size, struct nvkm_notify *);
+ void (*init)(struct nvkm_event *, int type, int index);
+ void (*fini)(struct nvkm_event *, int type, int index);
+};
+
+int nvkm_event_init(const struct nvkm_event_func *func, int types_nr,
+ int index_nr, struct nvkm_event *);
void nvkm_event_fini(struct nvkm_event *);
void nvkm_event_get(struct nvkm_event *, u32 types, int index);
void nvkm_event_put(struct nvkm_event *, u32 types, int index);
void nvkm_event_send(struct nvkm_event *, u32 types, int index,
void *data, u32 size);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
new file mode 100644
index 000000000000..e0187e7abb6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -0,0 +1,64 @@
+#ifndef __NVKM_GPUOBJ_H__
+#define __NVKM_GPUOBJ_H__
+#include <core/object.h>
+#include <core/mm.h>
+struct nvkm_vma;
+struct nvkm_vm;
+
+#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
+#define NVOBJ_FLAG_ZERO_FREE 0x00000002
+#define NVOBJ_FLAG_HEAP 0x00000004
+
+struct nvkm_gpuobj {
+ struct nvkm_object object;
+ struct nvkm_object *parent;
+ struct nvkm_mm_node *node;
+ struct nvkm_mm heap;
+
+ u32 flags;
+ u64 addr;
+ u32 size;
+};
+
+static inline struct nvkm_gpuobj *
+nv_gpuobj(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+ if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
+ nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
+#endif
+ return obj;
+}
+
+#define nvkm_gpuobj_create(p,e,c,v,g,s,a,f,d) \
+ nvkm_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \
+ sizeof(**d), (void **)d)
+#define nvkm_gpuobj_init(p) nvkm_object_init(&(p)->object)
+#define nvkm_gpuobj_fini(p,s) nvkm_object_fini(&(p)->object, (s))
+int nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, u32 pclass,
+ struct nvkm_object *, u32 size, u32 align,
+ u32 flags, int length, void **);
+void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
+
+int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
+ u32 align, u32 flags, struct nvkm_gpuobj **);
+int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *,
+ struct nvkm_gpuobj **);
+int nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *);
+int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
+ struct nvkm_vma *);
+void nvkm_gpuobj_unmap(struct nvkm_vma *);
+
+static inline void
+nvkm_gpuobj_ref(struct nvkm_gpuobj *obj, struct nvkm_gpuobj **ref)
+{
+ nvkm_object_ref(&obj->object, (struct nvkm_object **)ref);
+}
+
+void _nvkm_gpuobj_dtor(struct nvkm_object *);
+int _nvkm_gpuobj_init(struct nvkm_object *);
+int _nvkm_gpuobj_fini(struct nvkm_object *, bool);
+u32 _nvkm_gpuobj_rd32(struct nvkm_object *, u64);
+void _nvkm_gpuobj_wr32(struct nvkm_object *, u64, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
new file mode 100644
index 000000000000..67f384d0916c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_HANDLE_H__
+#define __NVKM_HANDLE_H__
+#include <core/os.h>
+struct nvkm_object;
+
+struct nvkm_handle {
+ struct nvkm_namedb *namedb;
+ struct list_head node;
+
+ struct list_head head;
+ struct list_head tree;
+ u32 name;
+ u32 priv;
+
+ u8 route;
+ u64 token;
+
+ struct nvkm_handle *parent;
+ struct nvkm_object *object;
+};
+
+int nvkm_handle_create(struct nvkm_object *, u32 parent, u32 handle,
+ struct nvkm_object *, struct nvkm_handle **);
+void nvkm_handle_destroy(struct nvkm_handle *);
+int nvkm_handle_init(struct nvkm_handle *);
+int nvkm_handle_fini(struct nvkm_handle *, bool suspend);
+
+struct nvkm_object *nvkm_handle_ref(struct nvkm_object *, u32 name);
+
+struct nvkm_handle *nvkm_handle_get_class(struct nvkm_object *, u16);
+struct nvkm_handle *nvkm_handle_get_vinst(struct nvkm_object *, u64);
+struct nvkm_handle *nvkm_handle_get_cinst(struct nvkm_object *, u32);
+void nvkm_handle_put(struct nvkm_handle *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
new file mode 100644
index 000000000000..88971eb37afa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
@@ -0,0 +1,7 @@
+#ifndef __NVKM_IOCTL_H__
+#define __NVKM_IOCTL_H__
+#include <core/os.h>
+struct nvkm_client;
+
+int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
new file mode 100644
index 000000000000..096eb1a623ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -0,0 +1,40 @@
+#ifndef __NVKM_MM_H__
+#define __NVKM_MM_H__
+#include <core/os.h>
+
+struct nvkm_mm_node {
+ struct list_head nl_entry;
+ struct list_head fl_entry;
+ struct list_head rl_entry;
+
+#define NVKM_MM_HEAP_ANY 0x00
+ u8 heap;
+#define NVKM_MM_TYPE_NONE 0x00
+#define NVKM_MM_TYPE_HOLE 0xff
+ u8 type;
+ u32 offset;
+ u32 length;
+};
+
+struct nvkm_mm {
+ struct list_head nodes;
+ struct list_head free;
+
+ u32 block_size;
+ int heap_nodes;
+};
+
+static inline bool
+nvkm_mm_initialised(struct nvkm_mm *mm)
+{
+ return mm->block_size != 0;
+}
+
+int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
+int nvkm_mm_fini(struct nvkm_mm *);
+int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
+ u32 size_min, u32 align, struct nvkm_mm_node **);
+int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
+ u32 size_min, u32 align, struct nvkm_mm_node **);
+void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
new file mode 100644
index 000000000000..4cfe16fcde9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
@@ -0,0 +1,53 @@
+#ifndef __NVKM_NAMEDB_H__
+#define __NVKM_NAMEDB_H__
+#include <core/parent.h>
+struct nvkm_handle;
+
+struct nvkm_namedb {
+ struct nvkm_parent parent;
+ rwlock_t lock;
+ struct list_head list;
+};
+
+static inline struct nvkm_namedb *
+nv_namedb(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+ if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
+ nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
+#endif
+ return obj;
+}
+
+#define nvkm_namedb_create(p,e,c,v,s,m,d) \
+ nvkm_namedb_create_((p), (e), (c), (v), (s), (m), \
+ sizeof(**d), (void **)d)
+#define nvkm_namedb_init(p) \
+ nvkm_parent_init(&(p)->parent)
+#define nvkm_namedb_fini(p,s) \
+ nvkm_parent_fini(&(p)->parent, (s))
+#define nvkm_namedb_destroy(p) \
+ nvkm_parent_destroy(&(p)->parent)
+
+int nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, u32 pclass,
+ struct nvkm_oclass *, u64 engcls,
+ int size, void **);
+
+int _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+#define _nvkm_namedb_dtor _nvkm_parent_dtor
+#define _nvkm_namedb_init _nvkm_parent_init
+#define _nvkm_namedb_fini _nvkm_parent_fini
+
+int nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *,
+ struct nvkm_handle *);
+void nvkm_namedb_remove(struct nvkm_handle *);
+
+struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32);
+struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, u16);
+struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64);
+struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32);
+void nvkm_namedb_put(struct nvkm_handle *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/notify.h b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
index a7c3c5f578cc..753d08c1767b 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
@@ -1,5 +1,7 @@
#ifndef __NVKM_NOTIFY_H__
#define __NVKM_NOTIFY_H__
+#include <core/os.h>
+struct nvkm_object;
struct nvkm_notify {
struct nvkm_event *event;
@@ -25,7 +27,7 @@ struct nvkm_notify {
const void *data;
};
-int nvkm_notify_init(struct nouveau_object *, struct nvkm_event *,
+int nvkm_notify_init(struct nvkm_object *, struct nvkm_event *,
int (*func)(struct nvkm_notify *), bool work,
void *data, u32 size, u32 reply,
struct nvkm_notify *);
@@ -33,5 +35,4 @@ void nvkm_notify_fini(struct nvkm_notify *);
void nvkm_notify_get(struct nvkm_notify *);
void nvkm_notify_put(struct nvkm_notify *);
void nvkm_notify_send(struct nvkm_notify *, void *data, u32 size);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 2e2afa502c99..6e3cd3908400 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,6 +1,5 @@
-#ifndef __NOUVEAU_OBJECT_H__
-#define __NOUVEAU_OBJECT_H__
-
+#ifndef __NVKM_OBJECT_H__
+#define __NVKM_OBJECT_H__
#include <core/os.h>
#include <core/printk.h>
@@ -14,52 +13,52 @@
#define NV_ENGCTX_CLASS 0x01000000
#define NV_OBJECT_CLASS 0x0000ffff
-struct nouveau_object {
- struct nouveau_oclass *oclass;
- struct nouveau_object *parent;
- struct nouveau_object *engine;
+struct nvkm_object {
+ struct nvkm_oclass *oclass;
+ struct nvkm_object *parent;
+ struct nvkm_engine *engine;
atomic_t refcount;
atomic_t usecount;
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-#define NOUVEAU_OBJECT_MAGIC 0x75ef0bad
+#define NVKM_OBJECT_MAGIC 0x75ef0bad
struct list_head list;
u32 _magic;
#endif
};
-static inline struct nouveau_object *
+static inline struct nvkm_object *
nv_object(void *obj)
{
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
if (likely(obj)) {
- struct nouveau_object *object = obj;
- if (unlikely(object->_magic != NOUVEAU_OBJECT_MAGIC))
+ struct nvkm_object *object = obj;
+ if (unlikely(object->_magic != NVKM_OBJECT_MAGIC))
nv_assert("BAD CAST -> NvObject, invalid magic");
}
#endif
return obj;
}
-#define nouveau_object_create(p,e,c,s,d) \
- nouveau_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
-int nouveau_object_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32, int size, void **);
-void nouveau_object_destroy(struct nouveau_object *);
-int nouveau_object_init(struct nouveau_object *);
-int nouveau_object_fini(struct nouveau_object *, bool suspend);
+#define nvkm_object_create(p,e,c,s,d) \
+ nvkm_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
+int nvkm_object_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, u32, int size, void **);
+void nvkm_object_destroy(struct nvkm_object *);
+int nvkm_object_init(struct nvkm_object *);
+int nvkm_object_fini(struct nvkm_object *, bool suspend);
-int _nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
+int _nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
-extern struct nouveau_ofuncs nouveau_object_ofuncs;
+extern struct nvkm_ofuncs nvkm_object_ofuncs;
/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
* ".data". */
-struct nouveau_oclass {
+struct nvkm_oclass {
u32 handle;
- struct nouveau_ofuncs * const ofuncs;
- struct nouveau_omthds * const omthds;
+ struct nvkm_ofuncs * const ofuncs;
+ struct nvkm_omthds * const omthds;
struct lock_class_key lock_class_key;
};
@@ -68,58 +67,57 @@ struct nouveau_oclass {
#define nv_iclass(o,i) (nv_hclass(o) & (i))
#define nv_mclass(o) nv_iclass(o, NV_OBJECT_CLASS)
-static inline struct nouveau_object *
-nv_pclass(struct nouveau_object *parent, u32 oclass)
+static inline struct nvkm_object *
+nv_pclass(struct nvkm_object *parent, u32 oclass)
{
while (parent && !nv_iclass(parent, oclass))
parent = parent->parent;
return parent;
}
-struct nouveau_omthds {
+struct nvkm_omthds {
u32 start;
u32 limit;
- int (*call)(struct nouveau_object *, u32, void *, u32);
+ int (*call)(struct nvkm_object *, u32, void *, u32);
};
struct nvkm_event;
-struct nouveau_ofuncs {
- int (*ctor)(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *data, u32 size,
- struct nouveau_object **);
- void (*dtor)(struct nouveau_object *);
- int (*init)(struct nouveau_object *);
- int (*fini)(struct nouveau_object *, bool suspend);
- int (*mthd)(struct nouveau_object *, u32, void *, u32);
- int (*ntfy)(struct nouveau_object *, u32, struct nvkm_event **);
- int (* map)(struct nouveau_object *, u64 *, u32 *);
- u8 (*rd08)(struct nouveau_object *, u64 offset);
- u16 (*rd16)(struct nouveau_object *, u64 offset);
- u32 (*rd32)(struct nouveau_object *, u64 offset);
- void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
- void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
- void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
+struct nvkm_ofuncs {
+ int (*ctor)(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ void (*dtor)(struct nvkm_object *);
+ int (*init)(struct nvkm_object *);
+ int (*fini)(struct nvkm_object *, bool suspend);
+ int (*mthd)(struct nvkm_object *, u32, void *, u32);
+ int (*ntfy)(struct nvkm_object *, u32, struct nvkm_event **);
+ int (* map)(struct nvkm_object *, u64 *, u32 *);
+ u8 (*rd08)(struct nvkm_object *, u64 offset);
+ u16 (*rd16)(struct nvkm_object *, u64 offset);
+ u32 (*rd32)(struct nvkm_object *, u64 offset);
+ void (*wr08)(struct nvkm_object *, u64 offset, u8 data);
+ void (*wr16)(struct nvkm_object *, u64 offset, u16 data);
+ void (*wr32)(struct nvkm_object *, u64 offset, u32 data);
};
-static inline struct nouveau_ofuncs *
+static inline struct nvkm_ofuncs *
nv_ofuncs(void *obj)
{
return nv_oclass(obj)->ofuncs;
}
-int nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **);
-int nouveau_object_inc(struct nouveau_object *);
-int nouveau_object_dec(struct nouveau_object *, bool suspend);
-
-void nouveau_object_debug(void);
+int nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void nvkm_object_ref(struct nvkm_object *, struct nvkm_object **);
+int nvkm_object_inc(struct nvkm_object *);
+int nvkm_object_dec(struct nvkm_object *, bool suspend);
+void nvkm_object_debug(void);
static inline int
nv_exec(void *obj, u32 mthd, void *data, u32 size)
{
- struct nouveau_omthds *method = nv_oclass(obj)->omthds;
+ struct nvkm_omthds *method = nv_oclass(obj)->omthds;
while (method && method->call) {
if (mthd >= method->start && mthd <= method->limit)
@@ -202,5 +200,4 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
}
return 0;
}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
new file mode 100644
index 000000000000..532bfa8e3f72
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_OPTION_H__
+#define __NVKM_OPTION_H__
+#include <core/os.h>
+
+const char *nvkm_stropt(const char *optstr, const char *opt, int *len);
+bool nvkm_boolopt(const char *optstr, const char *opt, bool value);
+int nvkm_dbgopt(const char *optstr, const char *sub);
+
+/* compares unterminated string 'str' with zero-terminated string 'cmp' */
+static inline int
+strncasecmpz(const char *str, const char *cmp, size_t len)
+{
+ if (strlen(cmp) != len)
+ return len;
+ return strncasecmp(str, cmp, len);
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
new file mode 100644
index 000000000000..cd57e238ddd3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_OS_H__
+#define __NVKM_OS_H__
+#include <nvif/os.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
new file mode 100644
index 000000000000..837e4fe966a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
@@ -0,0 +1,58 @@
+#ifndef __NVKM_PARENT_H__
+#define __NVKM_PARENT_H__
+#include <core/object.h>
+
+struct nvkm_sclass {
+ struct nvkm_sclass *sclass;
+ struct nvkm_engine *engine;
+ struct nvkm_oclass *oclass;
+};
+
+struct nvkm_parent {
+ struct nvkm_object object;
+
+ struct nvkm_sclass *sclass;
+ u64 engine;
+
+ int (*context_attach)(struct nvkm_object *, struct nvkm_object *);
+ int (*context_detach)(struct nvkm_object *, bool suspend,
+ struct nvkm_object *);
+
+ int (*object_attach)(struct nvkm_object *parent,
+ struct nvkm_object *object, u32 name);
+ void (*object_detach)(struct nvkm_object *parent, int cookie);
+};
+
+static inline struct nvkm_parent *
+nv_parent(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+ if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
+ nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
+#endif
+ return obj;
+}
+
+#define nvkm_parent_create(p,e,c,v,s,m,d) \
+ nvkm_parent_create_((p), (e), (c), (v), (s), (m), \
+ sizeof(**d), (void **)d)
+#define nvkm_parent_init(p) \
+ nvkm_object_init(&(p)->object)
+#define nvkm_parent_fini(p,s) \
+ nvkm_object_fini(&(p)->object, (s))
+
+int nvkm_parent_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, u32 pclass,
+ struct nvkm_oclass *, u64 engcls,
+ int size, void **);
+void nvkm_parent_destroy(struct nvkm_parent *);
+
+void _nvkm_parent_dtor(struct nvkm_object *);
+#define _nvkm_parent_init nvkm_object_init
+#define _nvkm_parent_fini nvkm_object_fini
+
+int nvkm_parent_sclass(struct nvkm_object *, u16 handle,
+ struct nvkm_object **pengine,
+ struct nvkm_oclass **poclass);
+int nvkm_parent_lclass(struct nvkm_object *, u32 *, int);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
index 451b6ed20b7e..83648177059f 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
@@ -1,13 +1,11 @@
-#ifndef __NOUVEAU_PRINTK_H__
-#define __NOUVEAU_PRINTK_H__
-
+#ifndef __NVKM_PRINTK_H__
+#define __NVKM_PRINTK_H__
#include <core/os.h>
#include <core/debug.h>
-
-struct nouveau_object;
+struct nvkm_object;
void __printf(3, 4)
-nv_printk_(struct nouveau_object *, int, const char *, ...);
+nv_printk_(struct nvkm_object *, int, const char *, ...);
#define nv_printk(o,l,f,a...) do { \
if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
@@ -21,12 +19,11 @@ nv_printk_(struct nouveau_object *, int, const char *, ...);
#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
-#define nv_ioctl(o,f,a...) nv_trace(nouveau_client(o), "ioctl: "f, ##a)
+#define nv_ioctl(o,f,a...) nv_trace(nvkm_client(o), "ioctl: "f, ##a)
#define nv_assert(f,a...) do { \
if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a); \
BUG_ON(1); \
} while(0)
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
new file mode 100644
index 000000000000..cc132eaa10cc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -0,0 +1,20 @@
+#ifndef __NVKM_RAMHT_H__
+#define __NVKM_RAMHT_H__
+#include <core/gpuobj.h>
+
+struct nvkm_ramht {
+ struct nvkm_gpuobj gpuobj;
+ int bits;
+};
+
+int nvkm_ramht_insert(struct nvkm_ramht *, int chid, u32 handle, u32 context);
+void nvkm_ramht_remove(struct nvkm_ramht *, int cookie);
+int nvkm_ramht_new(struct nvkm_object *, struct nvkm_object *, u32 size,
+ u32 align, struct nvkm_ramht **);
+
+static inline void
+nvkm_ramht_ref(struct nvkm_ramht *obj, struct nvkm_ramht **ref)
+{
+ nvkm_gpuobj_ref(&obj->gpuobj, (struct nvkm_gpuobj **)ref);
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index e9632e931616..6fdc39116aac 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,23 +1,23 @@
-#ifndef __NOUVEAU_SUBDEV_H__
-#define __NOUVEAU_SUBDEV_H__
-
+#ifndef __NVKM_SUBDEV_H__
+#define __NVKM_SUBDEV_H__
#include <core/object.h>
+#include <core/devidx.h>
#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
#define NV_SUBDEV(name,var) NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
-struct nouveau_subdev {
- struct nouveau_object base;
+struct nvkm_subdev {
+ struct nvkm_object object;
struct mutex mutex;
const char *name;
void __iomem *mmio;
u32 debug;
u32 unit;
- void (*intr)(struct nouveau_subdev *);
+ void (*intr)(struct nvkm_subdev *);
};
-static inline struct nouveau_subdev *
+static inline struct nvkm_subdev *
nv_subdev(void *obj)
{
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
@@ -28,27 +28,29 @@ nv_subdev(void *obj)
}
static inline int
-nv_subidx(struct nouveau_object *object)
+nv_subidx(struct nvkm_subdev *subdev)
{
- return nv_hclass(nv_subdev(object)) & 0xff;
+ return nv_hclass(subdev) & 0xff;
}
-#define nouveau_subdev_create(p,e,o,v,s,f,d) \
- nouveau_subdev_create_((p), (e), (o), (v), (s), (f), \
+struct nvkm_subdev *nvkm_subdev(void *obj, int idx);
+
+#define nvkm_subdev_create(p,e,o,v,s,f,d) \
+ nvkm_subdev_create_((p), (e), (o), (v), (s), (f), \
sizeof(**d),(void **)d)
-int nouveau_subdev_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32 pclass,
+int nvkm_subdev_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, u32 pclass,
const char *sname, const char *fname,
int size, void **);
-void nouveau_subdev_destroy(struct nouveau_subdev *);
-int nouveau_subdev_init(struct nouveau_subdev *);
-int nouveau_subdev_fini(struct nouveau_subdev *, bool suspend);
-void nouveau_subdev_reset(struct nouveau_object *);
+void nvkm_subdev_destroy(struct nvkm_subdev *);
+int nvkm_subdev_init(struct nvkm_subdev *);
+int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
+void nvkm_subdev_reset(struct nvkm_object *);
-void _nouveau_subdev_dtor(struct nouveau_object *);
-int _nouveau_subdev_init(struct nouveau_object *);
-int _nouveau_subdev_fini(struct nouveau_object *, bool suspend);
+void _nvkm_subdev_dtor(struct nvkm_object *);
+int _nvkm_subdev_init(struct nvkm_object *);
+int _nvkm_subdev_fini(struct nvkm_object *, bool suspend);
#define s_printk(s,l,f,a...) do { \
if ((s)->debug >= OS_DBG_##l) { \
@@ -59,7 +61,7 @@ int _nouveau_subdev_fini(struct nouveau_object *, bool suspend);
static inline u8
nv_rd08(void *obj, u32 addr)
{
- struct nouveau_subdev *subdev = nv_subdev(obj);
+ struct nvkm_subdev *subdev = nv_subdev(obj);
u8 data = ioread8(subdev->mmio + addr);
nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
return data;
@@ -68,7 +70,7 @@ nv_rd08(void *obj, u32 addr)
static inline u16
nv_rd16(void *obj, u32 addr)
{
- struct nouveau_subdev *subdev = nv_subdev(obj);
+ struct nvkm_subdev *subdev = nv_subdev(obj);
u16 data = ioread16_native(subdev->mmio + addr);
nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
return data;
@@ -77,7 +79,7 @@ nv_rd16(void *obj, u32 addr)
static inline u32
nv_rd32(void *obj, u32 addr)
{
- struct nouveau_subdev *subdev = nv_subdev(obj);
+ struct nvkm_subdev *subdev = nv_subdev(obj);
u32 data = ioread32_native(subdev->mmio + addr);
nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
return data;
@@ -86,7 +88,7 @@ nv_rd32(void *obj, u32 addr)
static inline void
nv_wr08(void *obj, u32 addr, u8 data)
{
- struct nouveau_subdev *subdev = nv_subdev(obj);
+ struct nvkm_subdev *subdev = nv_subdev(obj);
nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
iowrite8(data, subdev->mmio + addr);
}
@@ -94,7 +96,7 @@ nv_wr08(void *obj, u32 addr, u8 data)
static inline void
nv_wr16(void *obj, u32 addr, u16 data)
{
- struct nouveau_subdev *subdev = nv_subdev(obj);
+ struct nvkm_subdev *subdev = nv_subdev(obj);
nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
iowrite16_native(data, subdev->mmio + addr);
}
@@ -102,7 +104,7 @@ nv_wr16(void *obj, u32 addr, u16 data)
static inline void
nv_wr32(void *obj, u32 addr, u32 data)
{
- struct nouveau_subdev *subdev = nv_subdev(obj);
+ struct nvkm_subdev *subdev = nv_subdev(obj);
nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
iowrite32_native(data, subdev->mmio + addr);
}
@@ -114,5 +116,4 @@ nv_mask(void *obj, u32 addr, u32 mask, u32 data)
nv_wr32(obj, addr, (temp & ~mask) | data);
return temp;
}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
new file mode 100644
index 000000000000..e489beef2b92
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -0,0 +1,5 @@
+#ifndef __NVKM_BSP_H__
+#define __NVKM_BSP_H__
+#include <core/engine.h>
+extern struct nvkm_oclass g84_bsp_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
new file mode 100644
index 000000000000..7e29c52617ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -0,0 +1,13 @@
+#ifndef __NVKM_CE_H__
+#define __NVKM_CE_H__
+#include <core/engine.h>
+
+void gt215_ce_intr(struct nvkm_subdev *);
+
+extern struct nvkm_oclass gt215_ce_oclass;
+extern struct nvkm_oclass gf100_ce0_oclass;
+extern struct nvkm_oclass gf100_ce1_oclass;
+extern struct nvkm_oclass gk104_ce0_oclass;
+extern struct nvkm_oclass gk104_ce1_oclass;
+extern struct nvkm_oclass gk104_ce2_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
new file mode 100644
index 000000000000..57c29e91bad5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -0,0 +1,5 @@
+#ifndef __NVKM_CIPHER_H__
+#define __NVKM_CIPHER_H__
+#include <core/engine.h>
+extern struct nvkm_oclass g84_cipher_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/device.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
index 672d3c8f4145..5d4805e67e76 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
@@ -27,7 +27,4 @@ int nv50_identify(struct nouveau_device *);
int nvc0_identify(struct nouveau_device *);
int nve0_identify(struct nouveau_device *);
int gm100_identify(struct nouveau_device *);
-
-struct nouveau_device *nouveau_device_find(u64 name);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
new file mode 100644
index 000000000000..a5e1ed81312f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -0,0 +1,32 @@
+#ifndef __NVKM_DISP_H__
+#define __NVKM_DISP_H__
+#include <core/engine.h>
+#include <core/event.h>
+
+struct nvkm_disp {
+ struct nvkm_engine base;
+
+ struct list_head outp;
+
+ struct nvkm_event hpd;
+ struct nvkm_event vblank;
+};
+
+static inline struct nvkm_disp *
+nvkm_disp(void *obj)
+{
+ return (void *)nvkm_engine(obj, NVDEV_ENGINE_DISP);
+}
+
+extern struct nvkm_oclass *nv04_disp_oclass;
+extern struct nvkm_oclass *nv50_disp_oclass;
+extern struct nvkm_oclass *g84_disp_oclass;
+extern struct nvkm_oclass *gt200_disp_oclass;
+extern struct nvkm_oclass *g94_disp_oclass;
+extern struct nvkm_oclass *gt215_disp_oclass;
+extern struct nvkm_oclass *gf110_disp_oclass;
+extern struct nvkm_oclass *gk104_disp_oclass;
+extern struct nvkm_oclass *gk110_disp_oclass;
+extern struct nvkm_oclass *gm107_disp_oclass;
+extern struct nvkm_oclass *gm204_disp_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
new file mode 100644
index 000000000000..c4fce8afcf83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_DMAOBJ_H__
+#define __NVKM_DMAOBJ_H__
+#include <core/engine.h>
+struct nvkm_gpuobj;
+
+struct nvkm_dmaobj {
+ struct nvkm_object base;
+ u32 target;
+ u32 access;
+ u64 start;
+ u64 limit;
+};
+
+struct nvkm_dmaeng {
+ struct nvkm_engine base;
+
+ /* creates a "physical" dma object from a struct nvkm_dmaobj */
+ int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
+ struct nvkm_gpuobj **);
+};
+
+extern struct nvkm_oclass *nv04_dmaeng_oclass;
+extern struct nvkm_oclass *nv50_dmaeng_oclass;
+extern struct nvkm_oclass *gf100_dmaeng_oclass;
+extern struct nvkm_oclass *gf110_dmaeng_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
new file mode 100644
index 000000000000..bd38cf9130fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -0,0 +1,81 @@
+#ifndef __NVKM_FALCON_H__
+#define __NVKM_FALCON_H__
+#include <core/engctx.h>
+
+struct nvkm_falcon_chan {
+ struct nvkm_engctx base;
+};
+
+#define nvkm_falcon_context_create(p,e,c,g,s,a,f,d) \
+ nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nvkm_falcon_context_destroy(d) \
+ nvkm_engctx_destroy(&(d)->base)
+#define nvkm_falcon_context_init(d) \
+ nvkm_engctx_init(&(d)->base)
+#define nvkm_falcon_context_fini(d,s) \
+ nvkm_engctx_fini(&(d)->base, (s))
+
+#define _nvkm_falcon_context_ctor _nvkm_engctx_ctor
+#define _nvkm_falcon_context_dtor _nvkm_engctx_dtor
+#define _nvkm_falcon_context_init _nvkm_engctx_init
+#define _nvkm_falcon_context_fini _nvkm_engctx_fini
+#define _nvkm_falcon_context_rd32 _nvkm_engctx_rd32
+#define _nvkm_falcon_context_wr32 _nvkm_engctx_wr32
+
+struct nvkm_falcon_data {
+ bool external;
+};
+
+#include <core/engine.h>
+
+struct nvkm_falcon {
+ struct nvkm_engine base;
+
+ u32 addr;
+ u8 version;
+ u8 secret;
+
+ struct nvkm_gpuobj *core;
+ bool external;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } code;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nvkm_falcon_create(p,e,c,b,d,i,f,r) \
+ nvkm_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
+ sizeof(**r),(void **)r)
+#define nvkm_falcon_destroy(p) \
+ nvkm_engine_destroy(&(p)->base)
+#define nvkm_falcon_init(p) ({ \
+ struct nvkm_falcon *falcon = (p); \
+ _nvkm_falcon_init(nv_object(falcon)); \
+})
+#define nvkm_falcon_fini(p,s) ({ \
+ struct nvkm_falcon *falcon = (p); \
+ _nvkm_falcon_fini(nv_object(falcon), (s)); \
+})
+
+int nvkm_falcon_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, u32, bool, const char *,
+ const char *, int, void **);
+
+void nvkm_falcon_intr(struct nvkm_subdev *subdev);
+
+#define _nvkm_falcon_dtor _nvkm_engine_dtor
+int _nvkm_falcon_init(struct nvkm_object *);
+int _nvkm_falcon_fini(struct nvkm_object *, bool);
+u32 _nvkm_falcon_rd32(struct nvkm_object *, u64);
+void _nvkm_falcon_wr32(struct nvkm_object *, u64, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
new file mode 100644
index 000000000000..05321ce7ab15
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -0,0 +1,126 @@
+#ifndef __NVKM_FIFO_H__
+#define __NVKM_FIFO_H__
+#include <core/namedb.h>
+
+struct nvkm_fifo_chan {
+ struct nvkm_namedb namedb;
+ struct nvkm_dmaobj *pushdma;
+ struct nvkm_gpuobj *pushgpu;
+ void __iomem *user;
+ u64 addr;
+ u32 size;
+ u16 chid;
+ atomic_t refcnt; /* NV04_NVSW_SET_REF */
+};
+
+static inline struct nvkm_fifo_chan *
+nvkm_fifo_chan(void *obj)
+{
+ return (void *)nv_namedb(obj);
+}
+
+#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
+ nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
+ (m), sizeof(**d), (void **)d)
+#define nvkm_fifo_channel_init(p) \
+ nvkm_namedb_init(&(p)->namedb)
+#define nvkm_fifo_channel_fini(p,s) \
+ nvkm_namedb_fini(&(p)->namedb, (s))
+
+int nvkm_fifo_channel_create_(struct nvkm_object *,
+ struct nvkm_object *,
+ struct nvkm_oclass *,
+ int bar, u32 addr, u32 size, u32 push,
+ u64 engmask, int len, void **);
+void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
+
+#define _nvkm_fifo_channel_init _nvkm_namedb_init
+#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
+
+void _nvkm_fifo_channel_dtor(struct nvkm_object *);
+int _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
+u32 _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
+void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
+int _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
+
+#include <core/gpuobj.h>
+
+struct nvkm_fifo_base {
+ struct nvkm_gpuobj gpuobj;
+};
+
+#define nvkm_fifo_context_create(p,e,c,g,s,a,f,d) \
+ nvkm_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
+#define nvkm_fifo_context_destroy(p) \
+ nvkm_gpuobj_destroy(&(p)->gpuobj)
+#define nvkm_fifo_context_init(p) \
+ nvkm_gpuobj_init(&(p)->gpuobj)
+#define nvkm_fifo_context_fini(p,s) \
+ nvkm_gpuobj_fini(&(p)->gpuobj, (s))
+
+#define _nvkm_fifo_context_dtor _nvkm_gpuobj_dtor
+#define _nvkm_fifo_context_init _nvkm_gpuobj_init
+#define _nvkm_fifo_context_fini _nvkm_gpuobj_fini
+#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
+#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
+
+#include <core/engine.h>
+#include <core/event.h>
+
+struct nvkm_fifo {
+ struct nvkm_engine base;
+
+ struct nvkm_event cevent; /* channel creation event */
+ struct nvkm_event uevent; /* async user trigger */
+
+ struct nvkm_object **channel;
+ spinlock_t lock;
+ u16 min;
+ u16 max;
+
+ int (*chid)(struct nvkm_fifo *, struct nvkm_object *);
+ void (*pause)(struct nvkm_fifo *, unsigned long *);
+ void (*start)(struct nvkm_fifo *, unsigned long *);
+};
+
+static inline struct nvkm_fifo *
+nvkm_fifo(void *obj)
+{
+ return (void *)nvkm_engine(obj, NVDEV_ENGINE_FIFO);
+}
+
+#define nvkm_fifo_create(o,e,c,fc,lc,d) \
+ nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
+#define nvkm_fifo_init(p) \
+ nvkm_engine_init(&(p)->base)
+#define nvkm_fifo_fini(p,s) \
+ nvkm_engine_fini(&(p)->base, (s))
+
+int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int min, int max,
+ int size, void **);
+void nvkm_fifo_destroy(struct nvkm_fifo *);
+const char *
+nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid);
+
+#define _nvkm_fifo_init _nvkm_engine_init
+#define _nvkm_fifo_fini _nvkm_engine_fini
+
+extern struct nvkm_oclass *nv04_fifo_oclass;
+extern struct nvkm_oclass *nv10_fifo_oclass;
+extern struct nvkm_oclass *nv17_fifo_oclass;
+extern struct nvkm_oclass *nv40_fifo_oclass;
+extern struct nvkm_oclass *nv50_fifo_oclass;
+extern struct nvkm_oclass *g84_fifo_oclass;
+extern struct nvkm_oclass *gf100_fifo_oclass;
+extern struct nvkm_oclass *gk104_fifo_oclass;
+extern struct nvkm_oclass *gk20a_fifo_oclass;
+extern struct nvkm_oclass *gk208_fifo_oclass;
+
+int nvkm_fifo_uevent_ctor(struct nvkm_object *, void *, u32,
+ struct nvkm_notify *);
+void nvkm_fifo_uevent(struct nvkm_fifo *);
+
+void nv04_fifo_intr(struct nvkm_subdev *);
+int nv04_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
new file mode 100644
index 000000000000..93ef1f2bfac4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -0,0 +1,86 @@
+#ifndef __NVKM_GR_H__
+#define __NVKM_GR_H__
+#include <core/engctx.h>
+
+struct nvkm_gr_chan {
+ struct nvkm_engctx base;
+};
+
+#define nvkm_gr_context_create(p,e,c,g,s,a,f,d) \
+ nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nvkm_gr_context_destroy(d) \
+ nvkm_engctx_destroy(&(d)->base)
+#define nvkm_gr_context_init(d) \
+ nvkm_engctx_init(&(d)->base)
+#define nvkm_gr_context_fini(d,s) \
+ nvkm_engctx_fini(&(d)->base, (s))
+
+#define _nvkm_gr_context_dtor _nvkm_engctx_dtor
+#define _nvkm_gr_context_init _nvkm_engctx_init
+#define _nvkm_gr_context_fini _nvkm_engctx_fini
+#define _nvkm_gr_context_rd32 _nvkm_engctx_rd32
+#define _nvkm_gr_context_wr32 _nvkm_engctx_wr32
+
+#include <core/engine.h>
+
+struct nvkm_gr {
+ struct nvkm_engine base;
+
+ /* Returns chipset-specific counts of units packed into an u64.
+ */
+ u64 (*units)(struct nvkm_gr *);
+};
+
+static inline struct nvkm_gr *
+nvkm_gr(void *obj)
+{
+ return (void *)nvkm_engine(obj, NVDEV_ENGINE_GR);
+}
+
+#define nvkm_gr_create(p,e,c,y,d) \
+ nvkm_engine_create((p), (e), (c), (y), "PGR", "graphics", (d))
+#define nvkm_gr_destroy(d) \
+ nvkm_engine_destroy(&(d)->base)
+#define nvkm_gr_init(d) \
+ nvkm_engine_init(&(d)->base)
+#define nvkm_gr_fini(d,s) \
+ nvkm_engine_fini(&(d)->base, (s))
+
+#define _nvkm_gr_dtor _nvkm_engine_dtor
+#define _nvkm_gr_init _nvkm_engine_init
+#define _nvkm_gr_fini _nvkm_engine_fini
+
+extern struct nvkm_oclass nv04_gr_oclass;
+extern struct nvkm_oclass nv10_gr_oclass;
+extern struct nvkm_oclass nv20_gr_oclass;
+extern struct nvkm_oclass nv25_gr_oclass;
+extern struct nvkm_oclass nv2a_gr_oclass;
+extern struct nvkm_oclass nv30_gr_oclass;
+extern struct nvkm_oclass nv34_gr_oclass;
+extern struct nvkm_oclass nv35_gr_oclass;
+extern struct nvkm_oclass nv40_gr_oclass;
+extern struct nvkm_oclass nv50_gr_oclass;
+extern struct nvkm_oclass *gf100_gr_oclass;
+extern struct nvkm_oclass *gf108_gr_oclass;
+extern struct nvkm_oclass *gf104_gr_oclass;
+extern struct nvkm_oclass *gf110_gr_oclass;
+extern struct nvkm_oclass *gf117_gr_oclass;
+extern struct nvkm_oclass *gf119_gr_oclass;
+extern struct nvkm_oclass *gk104_gr_oclass;
+extern struct nvkm_oclass *gk20a_gr_oclass;
+extern struct nvkm_oclass *gk110_gr_oclass;
+extern struct nvkm_oclass *gk110b_gr_oclass;
+extern struct nvkm_oclass *gk208_gr_oclass;
+extern struct nvkm_oclass *gm107_gr_oclass;
+
+#include <core/enum.h>
+
+extern const struct nvkm_bitfield nv04_gr_nsource[];
+extern struct nvkm_ofuncs nv04_gr_ofuncs;
+bool nv04_gr_idle(void *obj);
+
+extern const struct nvkm_bitfield nv10_gr_intr_name[];
+extern const struct nvkm_bitfield nv10_gr_nstatus[];
+
+extern const struct nvkm_enum nv50_data_error_names[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
new file mode 100644
index 000000000000..4e500b398064
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -0,0 +1,62 @@
+#ifndef __NVKM_MPEG_H__
+#define __NVKM_MPEG_H__
+#include <core/engctx.h>
+
+struct nvkm_mpeg_chan {
+ struct nvkm_engctx base;
+};
+
+#define nvkm_mpeg_context_create(p,e,c,g,s,a,f,d) \
+ nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nvkm_mpeg_context_destroy(d) \
+ nvkm_engctx_destroy(&(d)->base)
+#define nvkm_mpeg_context_init(d) \
+ nvkm_engctx_init(&(d)->base)
+#define nvkm_mpeg_context_fini(d,s) \
+ nvkm_engctx_fini(&(d)->base, (s))
+
+#define _nvkm_mpeg_context_dtor _nvkm_engctx_dtor
+#define _nvkm_mpeg_context_init _nvkm_engctx_init
+#define _nvkm_mpeg_context_fini _nvkm_engctx_fini
+#define _nvkm_mpeg_context_rd32 _nvkm_engctx_rd32
+#define _nvkm_mpeg_context_wr32 _nvkm_engctx_wr32
+
+#include <core/engine.h>
+
+struct nvkm_mpeg {
+ struct nvkm_engine base;
+};
+
+#define nvkm_mpeg_create(p,e,c,d) \
+ nvkm_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
+#define nvkm_mpeg_destroy(d) \
+ nvkm_engine_destroy(&(d)->base)
+#define nvkm_mpeg_init(d) \
+ nvkm_engine_init(&(d)->base)
+#define nvkm_mpeg_fini(d,s) \
+ nvkm_engine_fini(&(d)->base, (s))
+
+#define _nvkm_mpeg_dtor _nvkm_engine_dtor
+#define _nvkm_mpeg_init _nvkm_engine_init
+#define _nvkm_mpeg_fini _nvkm_engine_fini
+
+extern struct nvkm_oclass nv31_mpeg_oclass;
+extern struct nvkm_oclass nv40_mpeg_oclass;
+extern struct nvkm_oclass nv44_mpeg_oclass;
+extern struct nvkm_oclass nv50_mpeg_oclass;
+extern struct nvkm_oclass g84_mpeg_oclass;
+extern struct nvkm_ofuncs nv31_mpeg_ofuncs;
+extern struct nvkm_oclass nv31_mpeg_cclass;
+extern struct nvkm_oclass nv31_mpeg_sclass[];
+extern struct nvkm_oclass nv40_mpeg_sclass[];
+void nv31_mpeg_intr(struct nvkm_subdev *);
+void nv31_mpeg_tile_prog(struct nvkm_engine *, int);
+int nv31_mpeg_init(struct nvkm_object *);
+
+extern struct nvkm_ofuncs nv50_mpeg_ofuncs;
+int nv50_mpeg_context_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void nv50_mpeg_intr(struct nvkm_subdev *);
+int nv50_mpeg_init(struct nvkm_object *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
new file mode 100644
index 000000000000..54b7672eed9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -0,0 +1,7 @@
+#ifndef __NVKM_MSPDEC_H__
+#define __NVKM_MSPDEC_H__
+#include <core/engine.h>
+extern struct nvkm_oclass g98_mspdec_oclass;
+extern struct nvkm_oclass gf100_mspdec_oclass;
+extern struct nvkm_oclass gk104_mspdec_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
new file mode 100644
index 000000000000..c6c69d0a8d01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -0,0 +1,6 @@
+#ifndef __NVKM_MSPPP_H__
+#define __NVKM_MSPPP_H__
+#include <core/engine.h>
+extern struct nvkm_oclass g98_msppp_oclass;
+extern struct nvkm_oclass gf100_msppp_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
new file mode 100644
index 000000000000..1f193b7bd6c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -0,0 +1,7 @@
+#ifndef __NVKM_MSVLD_H__
+#define __NVKM_MSVLD_H__
+#include <core/engine.h>
+extern struct nvkm_oclass g98_msvld_oclass;
+extern struct nvkm_oclass gf100_msvld_oclass;
+extern struct nvkm_oclass gk104_msvld_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
new file mode 100644
index 000000000000..93181bbf0f63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_PM_H__
+#define __NVKM_PM_H__
+#include <core/engine.h>
+
+struct nvkm_perfdom;
+struct nvkm_perfctr;
+struct nvkm_pm {
+ struct nvkm_engine base;
+
+ struct nvkm_perfctx *context;
+ void *profile_data;
+
+ struct list_head domains;
+ u32 sequence;
+
+ /*XXX: temp for daemon backend */
+ u32 pwr[8];
+ u32 last;
+};
+
+static inline struct nvkm_pm *
+nvkm_pm(void *obj)
+{
+ return (void *)nvkm_engine(obj, NVDEV_ENGINE_PM);
+}
+
+extern struct nvkm_oclass *nv40_pm_oclass;
+extern struct nvkm_oclass *nv50_pm_oclass;
+extern struct nvkm_oclass *g84_pm_oclass;
+extern struct nvkm_oclass *gt215_pm_oclass;
+extern struct nvkm_oclass gf100_pm_oclass;
+extern struct nvkm_oclass gk104_pm_oclass;
+extern struct nvkm_oclass gk110_pm_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
new file mode 100644
index 000000000000..44590a2a479d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -0,0 +1,5 @@
+#ifndef __NVKM_SEC_H__
+#define __NVKM_SEC_H__
+#include <core/engine.h>
+extern struct nvkm_oclass g98_sec_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
new file mode 100644
index 000000000000..a529013c92ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -0,0 +1,50 @@
+#ifndef __NVKM_SW_H__
+#define __NVKM_SW_H__
+#include <core/engctx.h>
+
+struct nvkm_sw_chan {
+ struct nvkm_engctx base;
+
+ int (*flip)(void *);
+ void *flip_data;
+};
+
+#define nvkm_sw_context_create(p,e,c,d) \
+ nvkm_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
+#define nvkm_sw_context_destroy(d) \
+ nvkm_engctx_destroy(&(d)->base)
+#define nvkm_sw_context_init(d) \
+ nvkm_engctx_init(&(d)->base)
+#define nvkm_sw_context_fini(d,s) \
+ nvkm_engctx_fini(&(d)->base, (s))
+
+#define _nvkm_sw_context_dtor _nvkm_engctx_dtor
+#define _nvkm_sw_context_init _nvkm_engctx_init
+#define _nvkm_sw_context_fini _nvkm_engctx_fini
+
+#include <core/engine.h>
+
+struct nvkm_sw {
+ struct nvkm_engine base;
+};
+
+#define nvkm_sw_create(p,e,c,d) \
+ nvkm_engine_create((p), (e), (c), true, "SW", "software", (d))
+#define nvkm_sw_destroy(d) \
+ nvkm_engine_destroy(&(d)->base)
+#define nvkm_sw_init(d) \
+ nvkm_engine_init(&(d)->base)
+#define nvkm_sw_fini(d,s) \
+ nvkm_engine_fini(&(d)->base, (s))
+
+#define _nvkm_sw_dtor _nvkm_engine_dtor
+#define _nvkm_sw_init _nvkm_engine_init
+#define _nvkm_sw_fini _nvkm_engine_fini
+
+extern struct nvkm_oclass *nv04_sw_oclass;
+extern struct nvkm_oclass *nv10_sw_oclass;
+extern struct nvkm_oclass *nv50_sw_oclass;
+extern struct nvkm_oclass *gf100_sw_oclass;
+
+void nv04_sw_intr(struct nvkm_subdev *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
new file mode 100644
index 000000000000..7851f18c5add
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -0,0 +1,5 @@
+#ifndef __NVKM_VP_H__
+#define __NVKM_VP_H__
+#include <core/engine.h>
+extern struct nvkm_oclass g84_vp_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
new file mode 100644
index 000000000000..7a216cca2865
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -0,0 +1,35 @@
+#ifndef __NVKM_XTENSA_H__
+#define __NVKM_XTENSA_H__
+#include <core/engine.h>
+struct nvkm_gpuobj;
+
+struct nvkm_xtensa {
+ struct nvkm_engine base;
+
+ u32 addr;
+ struct nvkm_gpuobj *gpu_fw;
+ u32 fifo_val;
+ u32 unkd28;
+};
+
+#define nvkm_xtensa_create(p,e,c,b,d,i,f,r) \
+ nvkm_xtensa_create_((p), (e), (c), (b), (d), (i), (f), \
+ sizeof(**r),(void **)r)
+
+int _nvkm_xtensa_engctx_ctor(struct nvkm_object *,
+ struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+
+void _nvkm_xtensa_intr(struct nvkm_subdev *);
+int nvkm_xtensa_create_(struct nvkm_object *,
+ struct nvkm_object *,
+ struct nvkm_oclass *, u32, bool,
+ const char *, const char *,
+ int, void **);
+#define _nvkm_xtensa_dtor _nvkm_engine_dtor
+int _nvkm_xtensa_init(struct nvkm_object *);
+int _nvkm_xtensa_fini(struct nvkm_object *, bool);
+u32 _nvkm_xtensa_rd32(struct nvkm_object *, u64);
+void _nvkm_xtensa_wr32(struct nvkm_object *, u64, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
new file mode 100644
index 000000000000..c7a007b8bc10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -0,0 +1,33 @@
+#ifndef __NVKM_BAR_H__
+#define __NVKM_BAR_H__
+#include <core/subdev.h>
+struct nvkm_mem;
+struct nvkm_vma;
+
+struct nvkm_bar {
+ struct nvkm_subdev base;
+
+ int (*alloc)(struct nvkm_bar *, struct nvkm_object *,
+ struct nvkm_mem *, struct nvkm_object **);
+
+ int (*kmap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
+ struct nvkm_vma *);
+ int (*umap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
+ struct nvkm_vma *);
+ void (*unmap)(struct nvkm_bar *, struct nvkm_vma *);
+ void (*flush)(struct nvkm_bar *);
+
+ /* whether the BAR supports to be ioremapped WC or should be uncached */
+ bool iomap_uncached;
+};
+
+static inline struct nvkm_bar *
+nvkm_bar(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BAR);
+}
+
+extern struct nvkm_oclass nv50_bar_oclass;
+extern struct nvkm_oclass gf100_bar_oclass;
+extern struct nvkm_oclass gk20a_bar_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
new file mode 100644
index 000000000000..cef287e0bbf2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -0,0 +1,32 @@
+#ifndef __NVKM_BIOS_H__
+#define __NVKM_BIOS_H__
+#include <core/subdev.h>
+
+struct nvkm_bios {
+ struct nvkm_subdev base;
+ u32 size;
+ u8 *data;
+
+ u32 bmp_offset;
+ u32 bit_offset;
+
+ struct {
+ u8 major;
+ u8 chip;
+ u8 minor;
+ u8 micro;
+ u8 patch;
+ } version;
+};
+
+static inline struct nvkm_bios *
+nvkm_bios(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VBIOS);
+}
+
+u8 nvbios_checksum(const u8 *data, int size);
+u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
+
+extern struct nvkm_oclass nvkm_bios_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
index 1f84d3612dd8..cf202c793a1d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
@@ -1,14 +1,13 @@
#ifndef __NVBIOS_M0203_H__
#define __NVBIOS_M0203_H__
-
struct nvbios_M0203T {
#define M0203T_TYPE_RAMCFG 0x00
u8 type;
u16 pointer;
};
-u32 nvbios_M0203Te(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u32 nvbios_M0203Tp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_M0203Te(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_M0203Tp(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_M0203T *);
struct nvbios_M0203E {
@@ -22,10 +21,9 @@ struct nvbios_M0203E {
u8 group;
};
-u32 nvbios_M0203Ee(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
-u32 nvbios_M0203Ep(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+u32 nvbios_M0203Ee(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_M0203Ep(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
struct nvbios_M0203E *);
-u32 nvbios_M0203Em(struct nouveau_bios *, u8 ramcfg, u8 *ver, u8 *hdr,
+u32 nvbios_M0203Em(struct nvkm_bios *, u8 ramcfg, u8 *ver, u8 *hdr,
struct nvbios_M0203E *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
new file mode 100644
index 000000000000..d34608ff241e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
@@ -0,0 +1,29 @@
+#ifndef __NVBIOS_M0205_H__
+#define __NVBIOS_M0205_H__
+struct nvbios_M0205T {
+ u16 freq;
+};
+
+u32 nvbios_M0205Te(struct nvkm_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u32 nvbios_M0205Tp(struct nvkm_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz,
+ struct nvbios_M0205T *);
+
+struct nvbios_M0205E {
+ u8 type;
+};
+
+u32 nvbios_M0205Ee(struct nvkm_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_M0205Ep(struct nvkm_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_M0205E *);
+
+struct nvbios_M0205S {
+ u8 data;
+};
+
+u32 nvbios_M0205Se(struct nvkm_bios *, int ent, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_M0205Sp(struct nvkm_bios *, int ent, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_M0205S *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
new file mode 100644
index 000000000000..c7ff8d9526e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
@@ -0,0 +1,27 @@
+#ifndef __NVBIOS_M0209_H__
+#define __NVBIOS_M0209_H__
+u32 nvbios_M0209Te(struct nvkm_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+struct nvbios_M0209E {
+ u8 v00_40;
+ u8 bits;
+ u8 modulo;
+ u8 v02_40;
+ u8 v02_07;
+ u8 v03;
+};
+
+u32 nvbios_M0209Ee(struct nvkm_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_M0209Ep(struct nvkm_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_M0209E *);
+
+struct nvbios_M0209S {
+ u32 data[0x200];
+};
+
+u32 nvbios_M0209Se(struct nvkm_bios *, int ent, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_M0209Sp(struct nvkm_bios *, int ent, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_M0209S *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
new file mode 100644
index 000000000000..1c1c52eac97d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
@@ -0,0 +1,21 @@
+#ifndef __NVBIOS_P0260_H__
+#define __NVBIOS_P0260_H__
+u32 nvbios_P0260Te(struct nvkm_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
+
+struct nvbios_P0260E {
+ u32 data;
+};
+
+u32 nvbios_P0260Ee(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_P0260Ep(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_P0260E *);
+
+struct nvbios_P0260X {
+ u32 data;
+};
+
+u32 nvbios_P0260Xe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_P0260Xp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_P0260X *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
index 73f060b07981..6711732b7cb1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
@@ -1,6 +1,5 @@
#ifndef __NVBIOS_BIT_H__
#define __NVBIOS_BIT_H__
-
struct bit_entry {
u8 id;
u8 version;
@@ -8,6 +7,5 @@ struct bit_entry {
u16 offset;
};
-int bit_entry(struct nouveau_bios *, u8 id, struct bit_entry *);
-
+int bit_entry(struct nvkm_bios *, u8 id, struct bit_entry *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 10e4dbca649a..4107aa546a21 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -1,8 +1,7 @@
#ifndef __NVBIOS_BMP_H__
#define __NVBIOS_BMP_H__
-
static inline u16
-bmp_version(struct nouveau_bios *bios)
+bmp_version(struct nvkm_bios *bios)
{
if (bios->bmp_offset) {
return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
@@ -13,7 +12,7 @@ bmp_version(struct nouveau_bios *bios)
}
static inline u16
-bmp_mem_init_table(struct nouveau_bios *bios)
+bmp_mem_init_table(struct nvkm_bios *bios)
{
if (bmp_version(bios) >= 0x0300)
return nv_ro16(bios, bios->bmp_offset + 24);
@@ -21,7 +20,7 @@ bmp_mem_init_table(struct nouveau_bios *bios)
}
static inline u16
-bmp_sdr_seq_table(struct nouveau_bios *bios)
+bmp_sdr_seq_table(struct nvkm_bios *bios)
{
if (bmp_version(bios) >= 0x0300)
return nv_ro16(bios, bios->bmp_offset + 26);
@@ -29,11 +28,10 @@ bmp_sdr_seq_table(struct nouveau_bios *bios)
}
static inline u16
-bmp_ddr_seq_table(struct nouveau_bios *bios)
+bmp_ddr_seq_table(struct nvkm_bios *bios)
{
if (bmp_version(bios) >= 0x0300)
return nv_ro16(bios, bios->bmp_offset + 28);
return 0x0000;
}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
new file mode 100644
index 000000000000..934b0ae5521d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
@@ -0,0 +1,27 @@
+#ifndef __NVBIOS_BOOST_H__
+#define __NVBIOS_BOOST_H__
+u16 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
+
+struct nvbios_boostE {
+ u8 pstate;
+ u32 min;
+ u32 max;
+};
+
+u16 nvbios_boostEe(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
+u16 nvbios_boostEp(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
+ struct nvbios_boostE *);
+u16 nvbios_boostEm(struct nvkm_bios *, u8, u8 *, u8 *, u8 *, u8 *,
+ struct nvbios_boostE *);
+
+struct nvbios_boostS {
+ u8 domain;
+ u8 percent;
+ u32 min;
+ u32 max;
+};
+
+u16 nvbios_boostSe(struct nvkm_bios *, int, u16, u8 *, u8 *, u8, u8);
+u16 nvbios_boostSp(struct nvkm_bios *, int, u16, u8 *, u8 *, u8, u8,
+ struct nvbios_boostS *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index f3930c27cb7a..e8e77ee24776 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -1,6 +1,5 @@
#ifndef __NVBIOS_CONN_H__
#define __NVBIOS_CONN_H__
-
enum dcb_connector_type {
DCB_CONNECTOR_VGA = 0x00,
DCB_CONNECTOR_TV_0 = 0x10,
@@ -25,8 +24,8 @@ enum dcb_connector_type {
struct nvbios_connT {
};
-u32 nvbios_connTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u32 nvbios_connTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_connTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_connTp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_connT *info);
struct nvbios_connE {
@@ -39,8 +38,7 @@ struct nvbios_connE {
u8 lcdid;
};
-u32 nvbios_connEe(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *hdr);
-u32 nvbios_connEp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *hdr,
+u32 nvbios_connEe(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *hdr);
+u32 nvbios_connEp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *hdr,
struct nvbios_connE *info);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
new file mode 100644
index 000000000000..2f0e0c8e83be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
@@ -0,0 +1,26 @@
+#ifndef __NVBIOS_CSTEP_H__
+#define __NVBIOS_CSTEP_H__
+u16 nvbios_cstepTe(struct nvkm_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
+
+struct nvbios_cstepE {
+ u8 pstate;
+ u8 index;
+};
+
+u16 nvbios_cstepEe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u16 nvbios_cstepEp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_cstepE *);
+u16 nvbios_cstepEm(struct nvkm_bios *, u8 pstate, u8 *ver, u8 *hdr,
+ struct nvbios_cstepE *);
+
+struct nvbios_cstepX {
+ u32 freq;
+ u8 unkn[2];
+ u8 voltage;
+};
+
+u16 nvbios_cstepXe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u16 nvbios_cstepXp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_cstepX *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
index 123270e9813a..4892a65ddd48 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
@@ -1,8 +1,5 @@
#ifndef __NVBIOS_DCB_H__
#define __NVBIOS_DCB_H__
-
-struct nouveau_bios;
-
enum dcb_output_type {
DCB_OUTPUT_ANALOG = 0x0,
DCB_OUTPUT_TV = 0x1,
@@ -57,13 +54,12 @@ struct dcb_output {
bool i2c_upper_default;
};
-u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
-u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
-u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+u16 dcb_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
+u16 dcb_outp(struct nvkm_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nvkm_bios *, u8 idx, u8 *, u8 *,
struct dcb_output *);
-u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+u16 dcb_outp_match(struct nvkm_bios *, u16 type, u16 mask, u8 *, u8 *,
struct dcb_output *);
-int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
- (struct nouveau_bios *, void *, int index, u16 entry));
-
+int dcb_outp_foreach(struct nvkm_bios *, void *data, int (*exec)
+ (struct nvkm_bios *, void *, int index, u16 entry));
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
new file mode 100644
index 000000000000..db10c11f0595
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
@@ -0,0 +1,39 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+u16 nvbios_disp_table(struct nvkm_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+ u16 data;
+};
+
+u16 nvbios_disp_entry(struct nvkm_bios *, u8 idx, u8 *ver, u8 *hdr, u8 *sub);
+u16 nvbios_disp_parse(struct nvkm_bios *, u8 idx, u8 *ver, u8 *hdr, u8 *sub,
+ struct nvbios_disp *);
+
+struct nvbios_outp {
+ u16 type;
+ u16 mask;
+ u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nvkm_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nvkm_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
+u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
+
+struct nvbios_ocfg {
+ u16 match;
+ u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
new file mode 100644
index 000000000000..b4d39df70d4e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
@@ -0,0 +1,31 @@
+#ifndef __NVBIOS_DP_H__
+#define __NVBIOS_DP_H__
+struct nvbios_dpout {
+ u16 type;
+ u16 mask;
+ u8 flags;
+ u32 script[5];
+ u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nvkm_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nvkm_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+ u8 pc;
+ u8 dc;
+ u8 pe;
+ u8 tx_pu;
+};
+
+u16
+nvbios_dpcfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nvkm_bios *, u16 outp, u8 pc, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_dpcfg *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
index 949fee3af8fb..6d3bedc633b3 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
@@ -1,8 +1,5 @@
#ifndef __NVBIOS_EXTDEV_H__
#define __NVBIOS_EXTDEV_H__
-
-struct nouveau_bios;
-
enum nvbios_extdev_type {
NVBIOS_EXTDEV_LM89 = 0x02,
NVBIOS_EXTDEV_VT1103M = 0x40,
@@ -20,11 +17,9 @@ struct nvbios_extdev_func {
};
int
-nvbios_extdev_parse(struct nouveau_bios *, int, struct nvbios_extdev_func *);
+nvbios_extdev_parse(struct nvkm_bios *, int, struct nvbios_extdev_func *);
int
-nvbios_extdev_find(struct nouveau_bios *, enum nvbios_extdev_type,
+nvbios_extdev_find(struct nvkm_bios *, enum nvbios_extdev_type,
struct nvbios_extdev_func *);
-
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/fan.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
index 119d0874e041..693ea7d9ec43 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/fan.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
@@ -1,8 +1,6 @@
#ifndef __NVBIOS_FAN_H__
#define __NVBIOS_FAN_H__
-
#include <subdev/bios/therm.h>
-u16 nvbios_fan_parse(struct nouveau_bios *bios, struct nvbios_therm_fan *fan);
-
+u16 nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index c7b2e586be0b..33be260ddd38 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -1,6 +1,5 @@
#ifndef __NVBIOS_GPIO_H__
#define __NVBIOS_GPIO_H__
-
enum dcb_gpio_func_name {
DCB_GPIO_PANEL_POWER = 0x01,
DCB_GPIO_TVDAC0 = 0x0c,
@@ -38,11 +37,10 @@ struct dcb_gpio_func {
u8 param;
};
-u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
-u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
+u16 dcb_gpio_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_gpio_entry(struct nvkm_bios *, int idx, int ent, u8 *ver, u8 *len);
+u16 dcb_gpio_parse(struct nvkm_bios *, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *);
-u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
+u16 dcb_gpio_match(struct nvkm_bios *, int idx, u8 func, u8 line,
u8 *ver, u8 *len, struct dcb_gpio_func *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
index c9bb112895af..85c529ecf9b1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
@@ -1,8 +1,5 @@
#ifndef __NVBIOS_I2C_H__
#define __NVBIOS_I2C_H__
-
-struct nouveau_bios;
-
enum dcb_i2c_type {
/* matches bios type field prior to ccb 4.1 */
DCB_I2C_NV04_BIT = 0x00,
@@ -22,8 +19,7 @@ struct dcb_i2c_entry {
u8 auxch;
};
-u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dcb_i2c_entry(struct nouveau_bios *, u8 index, u8 *ver, u8 *len);
-int dcb_i2c_parse(struct nouveau_bios *, u8 index, struct dcb_i2c_entry *);
-
+u16 dcb_i2c_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_i2c_entry(struct nvkm_bios *, u8 index, u8 *ver, u8 *len);
+int dcb_i2c_parse(struct nvkm_bios *, u8 index, struct dcb_i2c_entry *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
index 3348b4580843..e15d63b9a5eb 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
@@ -1,6 +1,5 @@
#ifndef __NVBIOS_IMAGE_H__
#define __NVBIOS_IMAGE_H__
-
struct nvbios_image {
u32 base;
u32 size;
@@ -8,6 +7,5 @@ struct nvbios_image {
bool last;
};
-bool nvbios_image(struct nouveau_bios *, int, struct nvbios_image *);
-
+bool nvbios_image(struct nvkm_bios *, int, struct nvbios_image *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index ca2f6bf37f46..578a667eed3b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,9 +1,8 @@
#ifndef __NVBIOS_INIT_H__
#define __NVBIOS_INIT_H__
-
struct nvbios_init {
- struct nouveau_subdev *subdev;
- struct nouveau_bios *bios;
+ struct nvkm_subdev *subdev;
+ struct nvkm_bios *bios;
u16 offset;
struct dcb_output *outp;
int crtc;
@@ -17,6 +16,5 @@ struct nvbios_init {
};
int nvbios_exec(struct nvbios_init *);
-int nvbios_init(struct nouveau_subdev *, bool execute);
-
+int nvbios_init(struct nvkm_subdev *, bool execute);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
new file mode 100644
index 000000000000..4e31b64c5edf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
@@ -0,0 +1,6 @@
+#ifndef __NVBIOS_MXM_H__
+#define __NVBIOS_MXM_H__
+u16 mxm_table(struct nvkm_bios *, u8 *ver, u8 *hdr);
+u8 mxm_sor_map(struct nvkm_bios *, u8 conn);
+u8 mxm_ddc_map(struct nvkm_bios *, u8 port);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
new file mode 100644
index 000000000000..64a59549b7ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
@@ -0,0 +1,10 @@
+#ifndef __NVBIOS_NPDE_H__
+#define __NVBIOS_NPDE_H__
+struct nvbios_npdeT {
+ u32 image_size;
+ bool last;
+};
+
+u32 nvbios_npdeTe(struct nvkm_bios *, u32);
+u32 nvbios_npdeTp(struct nvkm_bios *, u32, struct nvbios_npdeT *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
index 3d634a06dca1..e85931541f4f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
@@ -1,6 +1,5 @@
#ifndef __NVBIOS_PCIR_H__
#define __NVBIOS_PCIR_H__
-
struct nvbios_pcirT {
u16 vendor_id;
u16 device_id;
@@ -11,8 +10,7 @@ struct nvbios_pcirT {
bool last;
};
-u32 nvbios_pcirTe(struct nouveau_bios *, u32, u8 *ver, u16 *hdr);
-u32 nvbios_pcirTp(struct nouveau_bios *, u32, u8 *ver, u16 *hdr,
+u32 nvbios_pcirTe(struct nvkm_bios *, u32, u8 *ver, u16 *hdr);
+u32 nvbios_pcirTp(struct nvkm_bios *, u32, u8 *ver, u16 *hdr,
struct nvbios_pcirT *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
index 16ff06ec2a88..7cc2becabc69 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
@@ -1,9 +1,6 @@
#ifndef __NVBIOS_PERF_H__
#define __NVBIOS_PERF_H__
-
-struct nouveau_bios;
-
-u16 nvbios_perf_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
+u16 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
struct nvbios_perfE {
@@ -18,9 +15,9 @@ struct nvbios_perfE {
u32 script;
};
-u16 nvbios_perf_entry(struct nouveau_bios *, int idx,
+u16 nvbios_perf_entry(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_perfEp(struct nouveau_bios *, int idx,
+u16 nvbios_perfEp(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
struct nvbios_perfS {
@@ -31,17 +28,14 @@ struct nvbios_perfS {
};
};
-u32 nvbios_perfSe(struct nouveau_bios *, u32 data, int idx,
+u32 nvbios_perfSe(struct nvkm_bios *, u32 data, int idx,
u8 *ver, u8 *hdr, u8 cnt, u8 len);
-u32 nvbios_perfSp(struct nouveau_bios *, u32 data, int idx,
+u32 nvbios_perfSp(struct nvkm_bios *, u32 data, int idx,
u8 *ver, u8 *hdr, u8 cnt, u8 len, struct nvbios_perfS *);
struct nvbios_perf_fan {
u32 pwm_divisor;
};
-int
-nvbios_perf_fan_parse(struct nouveau_bios *, struct nvbios_perf_fan *);
-
-
+int nvbios_perf_fan_parse(struct nvkm_bios *, struct nvbios_perf_fan *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
index b2f3d4d0aa49..5a69978d1e3b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
@@ -1,8 +1,7 @@
#ifndef __NVBIOS_PLL_H__
#define __NVBIOS_PLL_H__
-
/*XXX: kill me */
-struct nouveau_pll_vals {
+struct nvkm_pll_vals {
union {
struct {
#ifdef __BIG_ENDIAN
@@ -20,10 +19,8 @@ struct nouveau_pll_vals {
int refclk;
};
-struct nouveau_bios;
-
/* these match types in pll limits table version 0x40,
- * nouveau uses them on all chipsets internally where a
+ * nvkm uses them on all chipsets internally where a
* specific pll needs to be referenced, but the exact
* register isn't known.
*/
@@ -74,6 +71,5 @@ struct nvbios_pll {
} vco1, vco2;
};
-int nvbios_pll_parse(struct nouveau_bios *, u32 type, struct nvbios_pll *);
-
+int nvbios_pll_parse(struct nvkm_bios *, u32 type, struct nvbios_pll *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
index 9de593deaea8..d606875c125a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
@@ -1,11 +1,10 @@
#ifndef __NVBIOS_PMU_H__
#define __NVBIOS_PMU_H__
-
struct nvbios_pmuT {
};
-u32 nvbios_pmuTe(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u32 nvbios_pmuTp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_pmuTe(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_pmuTp(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_pmuT *);
struct nvbios_pmuE {
@@ -13,8 +12,8 @@ struct nvbios_pmuE {
u32 data;
};
-u32 nvbios_pmuEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
-u32 nvbios_pmuEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+u32 nvbios_pmuEe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_pmuEp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
struct nvbios_pmuE *);
struct nvbios_pmuR {
@@ -32,6 +31,5 @@ struct nvbios_pmuR {
u32 args_addr_pmu;
};
-bool nvbios_pmuRm(struct nouveau_bios *, u8 type, struct nvbios_pmuR *);
-
+bool nvbios_pmuRm(struct nvkm_bios *, u8 type, struct nvbios_pmuR *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index 4a0e0ceb41ba..420426793880 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -1,8 +1,5 @@
#ifndef __NVBIOS_RAMCFG_H__
#define __NVBIOS_RAMCFG_H__
-
-struct nouveau_bios;
-
struct nvbios_ramcfg {
unsigned rammap_ver;
unsigned rammap_hdr;
@@ -139,7 +136,6 @@ struct nvbios_ramcfg {
};
};
-u8 nvbios_ramcfg_count(struct nouveau_bios *);
-u8 nvbios_ramcfg_index(struct nouveau_subdev *);
-
+u8 nvbios_ramcfg_count(struct nvkm_bios *);
+u8 nvbios_ramcfg_index(struct nvkm_subdev *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
new file mode 100644
index 000000000000..609a905ec780
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -0,0 +1,21 @@
+#ifndef __NVBIOS_RAMMAP_H__
+#define __NVBIOS_RAMMAP_H__
+#include <subdev/bios/ramcfg.h>
+
+u32 nvbios_rammapTe(struct nvkm_bios *, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+u32 nvbios_rammapEe(struct nvkm_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp(struct nvkm_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
+u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
+
+u32 nvbios_rammapSe(struct nvkm_bios *, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp(struct nvkm_bios *, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr, struct nvbios_ramcfg *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
index 295d093f3b30..dd3ba960e75d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
@@ -1,8 +1,5 @@
#ifndef __NVBIOS_THERM_H__
#define __NVBIOS_THERM_H__
-
-struct nouveau_bios;
-
struct nvbios_therm_threshold {
u8 temp;
u8 hysteresis;
@@ -30,8 +27,8 @@ enum nvbios_therm_fan_type {
};
/* no vbios have more than 6 */
-#define NOUVEAU_TEMP_FAN_TRIP_MAX 10
-struct nouveau_therm_trip_point {
+#define NVKM_TEMP_FAN_TRIP_MAX 10
+struct nvbios_therm_trip_point {
int fan_duty;
int temp;
int hysteresis;
@@ -55,7 +52,7 @@ struct nvbios_therm_fan {
u16 slow_down_period;
enum nvbios_therm_fan_mode fan_mode;
- struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX];
+ struct nvbios_therm_trip_point trip[NVKM_TEMP_FAN_TRIP_MAX];
u8 nr_fan_trip;
u8 linear_min_temp;
u8 linear_max_temp;
@@ -67,11 +64,9 @@ enum nvbios_therm_domain {
};
int
-nvbios_therm_sensor_parse(struct nouveau_bios *, enum nvbios_therm_domain,
+nvbios_therm_sensor_parse(struct nvkm_bios *, enum nvbios_therm_domain,
struct nvbios_therm_sensor *);
int
-nvbios_therm_fan_parse(struct nouveau_bios *, struct nvbios_therm_fan *);
-
-
+nvbios_therm_fan_parse(struct nvkm_bios *, struct nvbios_therm_fan *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
new file mode 100644
index 000000000000..339a826aa176
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
@@ -0,0 +1,11 @@
+#ifndef __NVBIOS_TIMING_H__
+#define __NVBIOS_TIMING_H__
+#include <subdev/bios/ramcfg.h>
+
+u16 nvbios_timingTe(struct nvkm_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u16 nvbios_timingEe(struct nvkm_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_timingEp(struct nvkm_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
new file mode 100644
index 000000000000..6633c6db9281
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -0,0 +1,21 @@
+#ifndef __NVBIOS_VMAP_H__
+#define __NVBIOS_VMAP_H__
+struct nvbios_vmap {
+};
+
+u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_vmap *);
+
+struct nvbios_vmap_entry {
+ u8 unk0;
+ u8 link;
+ u32 min;
+ u32 max;
+ s32 arg[6];
+};
+
+u16 nvbios_vmap_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
+u16 nvbios_vmap_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
+ struct nvbios_vmap_entry *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
new file mode 100644
index 000000000000..eb2de4b85bbd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -0,0 +1,23 @@
+#ifndef __NVBIOS_VOLT_H__
+#define __NVBIOS_VOLT_H__
+struct nvbios_volt {
+ u8 vidmask;
+ u32 min;
+ u32 max;
+ u32 base;
+ s16 step;
+};
+
+u16 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_volt_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_volt *);
+
+struct nvbios_volt_entry {
+ u32 voltage;
+ u8 vid;
+};
+
+u16 nvbios_volt_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
+u16 nvbios_volt_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
+ struct nvbios_volt_entry *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
index 360baab52e4c..0c0fe234ff12 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
@@ -11,9 +11,8 @@ struct nvbios_xpio {
u8 flags;
};
-u16 dcb_xpio_table(struct nouveau_bios *, u8 idx,
+u16 dcb_xpio_table(struct nvkm_bios *, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dcb_xpio_parse(struct nouveau_bios *, u8 idx,
+u16 dcb_xpio_parse(struct nvkm_bios *, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
new file mode 100644
index 000000000000..fba83c04849e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -0,0 +1,50 @@
+#ifndef __NVKM_BUS_H__
+#define __NVKM_BUS_H__
+#include <core/subdev.h>
+
+struct nvkm_bus_intr {
+ u32 stat;
+ u32 unit;
+};
+
+struct nvkm_bus {
+ struct nvkm_subdev base;
+ int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
+ u32 hwsq_size;
+};
+
+static inline struct nvkm_bus *
+nvkm_bus(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BUS);
+}
+
+#define nvkm_bus_create(p, e, o, d) \
+ nvkm_subdev_create_((p), (e), (o), 0, "PBUS", "master", \
+ sizeof(**d), (void **)d)
+#define nvkm_bus_destroy(p) \
+ nvkm_subdev_destroy(&(p)->base)
+#define nvkm_bus_init(p) \
+ nvkm_subdev_init(&(p)->base)
+#define nvkm_bus_fini(p, s) \
+ nvkm_subdev_fini(&(p)->base, (s))
+
+#define _nvkm_bus_dtor _nvkm_subdev_dtor
+#define _nvkm_bus_init _nvkm_subdev_init
+#define _nvkm_bus_fini _nvkm_subdev_fini
+
+extern struct nvkm_oclass *nv04_bus_oclass;
+extern struct nvkm_oclass *nv31_bus_oclass;
+extern struct nvkm_oclass *nv50_bus_oclass;
+extern struct nvkm_oclass *g94_bus_oclass;
+extern struct nvkm_oclass *gf100_bus_oclass;
+
+/* interface to sequencer */
+struct nvkm_hwsq;
+int nvkm_hwsq_init(struct nvkm_bus *, struct nvkm_hwsq **);
+int nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec);
+void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data);
+void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data);
+void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
+void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
new file mode 100644
index 000000000000..f5d303850d8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -0,0 +1,161 @@
+#ifndef __NVKM_CLK_H__
+#define __NVKM_CLK_H__
+#include <core/subdev.h>
+#include <core/notify.h>
+struct nvbios_pll;
+struct nvkm_pll_vals;
+
+enum nv_clk_src {
+ nv_clk_src_crystal,
+ nv_clk_src_href,
+
+ nv_clk_src_hclk,
+ nv_clk_src_hclkm3,
+ nv_clk_src_hclkm3d2,
+ nv_clk_src_hclkm2d3, /* NVAA */
+ nv_clk_src_hclkm4, /* NVAA */
+ nv_clk_src_cclk, /* NVAA */
+
+ nv_clk_src_host,
+
+ nv_clk_src_sppll0,
+ nv_clk_src_sppll1,
+
+ nv_clk_src_mpllsrcref,
+ nv_clk_src_mpllsrc,
+ nv_clk_src_mpll,
+ nv_clk_src_mdiv,
+
+ nv_clk_src_core,
+ nv_clk_src_core_intm,
+ nv_clk_src_shader,
+
+ nv_clk_src_mem,
+
+ nv_clk_src_gpc,
+ nv_clk_src_rop,
+ nv_clk_src_hubk01,
+ nv_clk_src_hubk06,
+ nv_clk_src_hubk07,
+ nv_clk_src_copy,
+ nv_clk_src_daemon,
+ nv_clk_src_disp,
+ nv_clk_src_vdec,
+
+ nv_clk_src_dom6,
+
+ nv_clk_src_max,
+};
+
+struct nvkm_cstate {
+ struct list_head head;
+ u8 voltage;
+ u32 domain[nv_clk_src_max];
+};
+
+struct nvkm_pstate {
+ struct list_head head;
+ struct list_head list; /* c-states */
+ struct nvkm_cstate base;
+ u8 pstate;
+ u8 fanspeed;
+};
+
+struct nvkm_domain {
+ enum nv_clk_src name;
+ u8 bios; /* 0xff for none */
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+ u8 flags;
+ const char *mname;
+ int mdiv;
+};
+
+struct nvkm_clk {
+ struct nvkm_subdev base;
+
+ struct nvkm_domain *domains;
+ struct nvkm_pstate bstate;
+
+ struct list_head states;
+ int state_nr;
+
+ struct work_struct work;
+ wait_queue_head_t wait;
+ atomic_t waiting;
+
+ struct nvkm_notify pwrsrc_ntfy;
+ int pwrsrc;
+ int pstate; /* current */
+ int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
+ int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
+ int astate; /* perfmon adjustment (base) */
+ int tstate; /* thermal adjustment (max-) */
+ int dstate; /* display adjustment (min+) */
+
+ bool allow_reclock;
+
+ int (*read)(struct nvkm_clk *, enum nv_clk_src);
+ int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
+ int (*prog)(struct nvkm_clk *);
+ void (*tidy)(struct nvkm_clk *);
+
+ /*XXX: die, these are here *only* to support the completely
+ * bat-shit insane what-was-nvkm_hw.c code
+ */
+ int (*pll_calc)(struct nvkm_clk *, struct nvbios_pll *, int clk,
+ struct nvkm_pll_vals *pv);
+ int (*pll_prog)(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *pv);
+};
+
+static inline struct nvkm_clk *
+nvkm_clk(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_CLK);
+}
+
+#define nvkm_clk_create(p,e,o,i,r,s,n,d) \
+ nvkm_clk_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \
+ (void **)d)
+#define nvkm_clk_destroy(p) ({ \
+ struct nvkm_clk *clk = (p); \
+ _nvkm_clk_dtor(nv_object(clk)); \
+})
+#define nvkm_clk_init(p) ({ \
+ struct nvkm_clk *clk = (p); \
+ _nvkm_clk_init(nv_object(clk)); \
+})
+#define nvkm_clk_fini(p,s) ({ \
+ struct nvkm_clk *clk = (p); \
+ _nvkm_clk_fini(nv_object(clk), (s)); \
+})
+
+int nvkm_clk_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *,
+ struct nvkm_domain *, struct nvkm_pstate *,
+ int, bool, int, void **);
+void _nvkm_clk_dtor(struct nvkm_object *);
+int _nvkm_clk_init(struct nvkm_object *);
+int _nvkm_clk_fini(struct nvkm_object *, bool);
+
+extern struct nvkm_oclass nv04_clk_oclass;
+extern struct nvkm_oclass nv40_clk_oclass;
+extern struct nvkm_oclass *nv50_clk_oclass;
+extern struct nvkm_oclass *g84_clk_oclass;
+extern struct nvkm_oclass *mcp77_clk_oclass;
+extern struct nvkm_oclass gt215_clk_oclass;
+extern struct nvkm_oclass gf100_clk_oclass;
+extern struct nvkm_oclass gk104_clk_oclass;
+extern struct nvkm_oclass gk20a_clk_oclass;
+
+int nv04_clk_pll_set(struct nvkm_clk *, u32 type, u32 freq);
+int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
+ struct nvkm_pll_vals *);
+int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
+int gt215_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *,
+ int clk, struct nvkm_pll_vals *);
+
+int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
+int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
+int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
+int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
new file mode 100644
index 000000000000..d1bbe0d62b35
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -0,0 +1,32 @@
+#ifndef __NVKM_DEVINIT_H__
+#define __NVKM_DEVINIT_H__
+#include <core/subdev.h>
+
+struct nvkm_devinit {
+ struct nvkm_subdev base;
+ bool post;
+ void (*meminit)(struct nvkm_devinit *);
+ int (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
+ u32 (*mmio)(struct nvkm_devinit *, u32 addr);
+};
+
+static inline struct nvkm_devinit *
+nvkm_devinit(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_DEVINIT);
+}
+
+extern struct nvkm_oclass *nv04_devinit_oclass;
+extern struct nvkm_oclass *nv05_devinit_oclass;
+extern struct nvkm_oclass *nv10_devinit_oclass;
+extern struct nvkm_oclass *nv1a_devinit_oclass;
+extern struct nvkm_oclass *nv20_devinit_oclass;
+extern struct nvkm_oclass *nv50_devinit_oclass;
+extern struct nvkm_oclass *g84_devinit_oclass;
+extern struct nvkm_oclass *g98_devinit_oclass;
+extern struct nvkm_oclass *gt215_devinit_oclass;
+extern struct nvkm_oclass *mcp89_devinit_oclass;
+extern struct nvkm_oclass *gf100_devinit_oclass;
+extern struct nvkm_oclass *gm107_devinit_oclass;
+extern struct nvkm_oclass *gm204_devinit_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
new file mode 100644
index 000000000000..16da56cf43b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -0,0 +1,154 @@
+#ifndef __NVKM_FB_H__
+#define __NVKM_FB_H__
+#include <core/subdev.h>
+
+#include <subdev/mmu.h>
+
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO 1
+#define NV_MEM_ACCESS_WO 2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM 8
+#define NV_MEM_ACCESS_NOSNOOP 16
+
+#define NV_MEM_TARGET_VRAM 0
+#define NV_MEM_TARGET_PCI 1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM 3
+#define NV_MEM_TARGET_GART 4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+struct nvkm_mem {
+ struct drm_device *dev;
+
+ struct nvkm_vma bar_vma;
+ struct nvkm_vma vma[2];
+ u8 page_shift;
+
+ struct nvkm_mm_node *tag;
+ struct list_head regions;
+ dma_addr_t *pages;
+ u32 memtype;
+ u64 offset;
+ u64 size;
+ struct sg_table *sg;
+};
+
+struct nvkm_fb_tile {
+ struct nvkm_mm_node *tag;
+ u32 addr;
+ u32 limit;
+ u32 pitch;
+ u32 zcomp;
+};
+
+struct nvkm_fb {
+ struct nvkm_subdev base;
+
+ bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+
+ struct nvkm_ram *ram;
+
+ struct nvkm_mm vram;
+ struct nvkm_mm tags;
+
+ struct {
+ struct nvkm_fb_tile region[16];
+ int regions;
+ void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+ void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *);
+ void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+ void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+ } tile;
+};
+
+static inline struct nvkm_fb *
+nvkm_fb(void *obj)
+{
+ /* fbram uses this before device subdev pointer is valid */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_FB)
+ return obj;
+
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FB);
+}
+
+extern struct nvkm_oclass *nv04_fb_oclass;
+extern struct nvkm_oclass *nv10_fb_oclass;
+extern struct nvkm_oclass *nv1a_fb_oclass;
+extern struct nvkm_oclass *nv20_fb_oclass;
+extern struct nvkm_oclass *nv25_fb_oclass;
+extern struct nvkm_oclass *nv30_fb_oclass;
+extern struct nvkm_oclass *nv35_fb_oclass;
+extern struct nvkm_oclass *nv36_fb_oclass;
+extern struct nvkm_oclass *nv40_fb_oclass;
+extern struct nvkm_oclass *nv41_fb_oclass;
+extern struct nvkm_oclass *nv44_fb_oclass;
+extern struct nvkm_oclass *nv46_fb_oclass;
+extern struct nvkm_oclass *nv47_fb_oclass;
+extern struct nvkm_oclass *nv49_fb_oclass;
+extern struct nvkm_oclass *nv4e_fb_oclass;
+extern struct nvkm_oclass *nv50_fb_oclass;
+extern struct nvkm_oclass *g84_fb_oclass;
+extern struct nvkm_oclass *gt215_fb_oclass;
+extern struct nvkm_oclass *mcp77_fb_oclass;
+extern struct nvkm_oclass *mcp89_fb_oclass;
+extern struct nvkm_oclass *gf100_fb_oclass;
+extern struct nvkm_oclass *gk104_fb_oclass;
+extern struct nvkm_oclass *gk20a_fb_oclass;
+extern struct nvkm_oclass *gm107_fb_oclass;
+
+#include <subdev/bios.h>
+#include <subdev/bios/ramcfg.h>
+
+struct nvkm_ram_data {
+ struct list_head head;
+ struct nvbios_ramcfg bios;
+ u32 freq;
+};
+
+struct nvkm_ram {
+ struct nvkm_object base;
+ enum {
+ NV_MEM_TYPE_UNKNOWN = 0,
+ NV_MEM_TYPE_STOLEN,
+ NV_MEM_TYPE_SGRAM,
+ NV_MEM_TYPE_SDRAM,
+ NV_MEM_TYPE_DDR1,
+ NV_MEM_TYPE_DDR2,
+ NV_MEM_TYPE_DDR3,
+ NV_MEM_TYPE_GDDR2,
+ NV_MEM_TYPE_GDDR3,
+ NV_MEM_TYPE_GDDR4,
+ NV_MEM_TYPE_GDDR5
+ } type;
+ u64 stolen;
+ u64 size;
+ u32 tags;
+
+ int ranks;
+ int parts;
+ int part_mask;
+
+ int (*get)(struct nvkm_fb *, u64 size, u32 align, u32 size_nc,
+ u32 type, struct nvkm_mem **);
+ void (*put)(struct nvkm_fb *, struct nvkm_mem **);
+
+ int (*calc)(struct nvkm_fb *, u32 freq);
+ int (*prog)(struct nvkm_fb *);
+ void (*tidy)(struct nvkm_fb *);
+ u32 freq;
+ u32 mr[16];
+ u32 mr1_nuts;
+
+ struct nvkm_ram_data *next;
+ struct nvkm_ram_data former;
+ struct nvkm_ram_data xition;
+ struct nvkm_ram_data target;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
new file mode 100644
index 000000000000..a1384786adc9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -0,0 +1,28 @@
+#ifndef __NVKM_FUSE_H__
+#define __NVKM_FUSE_H__
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nvkm_fuse {
+ struct nvkm_subdev base;
+};
+
+static inline struct nvkm_fuse *
+nvkm_fuse(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FUSE);
+}
+
+#define nvkm_fuse_create(p, e, o, d) \
+ nvkm_fuse_create_((p), (e), (o), sizeof(**d), (void **)d)
+
+int nvkm_fuse_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void _nvkm_fuse_dtor(struct nvkm_object *);
+int _nvkm_fuse_init(struct nvkm_object *);
+#define _nvkm_fuse_fini _nvkm_subdev_fini
+
+extern struct nvkm_oclass nv50_fuse_oclass;
+extern struct nvkm_oclass gf100_fuse_oclass;
+extern struct nvkm_oclass gm107_fuse_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
new file mode 100644
index 000000000000..ca5099a81b5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -0,0 +1,44 @@
+#ifndef __NVKM_GPIO_H__
+#define __NVKM_GPIO_H__
+#include <core/subdev.h>
+#include <core/event.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
+struct nvkm_gpio_ntfy_req {
+#define NVKM_GPIO_HI 0x01
+#define NVKM_GPIO_LO 0x02
+#define NVKM_GPIO_TOGGLED 0x03
+ u8 mask;
+ u8 line;
+};
+
+struct nvkm_gpio_ntfy_rep {
+ u8 mask;
+};
+
+struct nvkm_gpio {
+ struct nvkm_subdev base;
+
+ struct nvkm_event event;
+
+ void (*reset)(struct nvkm_gpio *, u8 func);
+ int (*find)(struct nvkm_gpio *, int idx, u8 tag, u8 line,
+ struct dcb_gpio_func *);
+ int (*set)(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
+ int (*get)(struct nvkm_gpio *, int idx, u8 tag, u8 line);
+};
+
+static inline struct nvkm_gpio *
+nvkm_gpio(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_GPIO);
+}
+
+extern struct nvkm_oclass *nv10_gpio_oclass;
+extern struct nvkm_oclass *nv50_gpio_oclass;
+extern struct nvkm_oclass *g94_gpio_oclass;
+extern struct nvkm_oclass *gf110_gpio_oclass;
+extern struct nvkm_oclass *gk104_gpio_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
new file mode 100644
index 000000000000..a2e33730f05e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -0,0 +1,135 @@
+#ifndef __NVKM_I2C_H__
+#define __NVKM_I2C_H__
+#include <core/subdev.h>
+#include <core/event.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/i2c.h>
+
+#define NV_I2C_PORT(n) (0x00 + (n))
+#define NV_I2C_AUX(n) (0x10 + (n))
+#define NV_I2C_EXT(n) (0x20 + (n))
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
+
+#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
+#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
+#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
+
+struct nvkm_i2c_ntfy_req {
+#define NVKM_I2C_PLUG 0x01
+#define NVKM_I2C_UNPLUG 0x02
+#define NVKM_I2C_IRQ 0x04
+#define NVKM_I2C_DONE 0x08
+#define NVKM_I2C_ANY 0x0f
+ u8 mask;
+ u8 port;
+};
+
+struct nvkm_i2c_ntfy_rep {
+ u8 mask;
+};
+
+struct nvkm_i2c_port {
+ struct nvkm_object base;
+ struct i2c_adapter adapter;
+ struct mutex mutex;
+
+ struct list_head head;
+ u8 index;
+ int aux;
+
+ const struct nvkm_i2c_func *func;
+};
+
+struct nvkm_i2c_func {
+ void (*drive_scl)(struct nvkm_i2c_port *, int);
+ void (*drive_sda)(struct nvkm_i2c_port *, int);
+ int (*sense_scl)(struct nvkm_i2c_port *);
+ int (*sense_sda)(struct nvkm_i2c_port *);
+
+ int (*aux)(struct nvkm_i2c_port *, bool, u8, u32, u8 *, u8);
+ int (*pattern)(struct nvkm_i2c_port *, int pattern);
+ int (*lnk_ctl)(struct nvkm_i2c_port *, int nr, int bw, bool enh);
+ int (*drv_ctl)(struct nvkm_i2c_port *, int lane, int sw, int pe);
+};
+
+struct nvkm_i2c_board_info {
+ struct i2c_board_info dev;
+ u8 udelay; /* set to 0 to use the standard delay */
+};
+
+struct nvkm_i2c {
+ struct nvkm_subdev base;
+ struct nvkm_event event;
+
+ struct nvkm_i2c_port *(*find)(struct nvkm_i2c *, u8 index);
+ struct nvkm_i2c_port *(*find_type)(struct nvkm_i2c *, u16 type);
+ int (*acquire_pad)(struct nvkm_i2c_port *, unsigned long timeout);
+ void (*release_pad)(struct nvkm_i2c_port *);
+ int (*acquire)(struct nvkm_i2c_port *, unsigned long timeout);
+ void (*release)(struct nvkm_i2c_port *);
+ int (*identify)(struct nvkm_i2c *, int index,
+ const char *what, struct nvkm_i2c_board_info *,
+ bool (*match)(struct nvkm_i2c_port *,
+ struct i2c_board_info *, void *),
+ void *);
+
+ wait_queue_head_t wait;
+ struct list_head ports;
+};
+
+static inline struct nvkm_i2c *
+nvkm_i2c(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_I2C);
+}
+
+extern struct nvkm_oclass *nv04_i2c_oclass;
+extern struct nvkm_oclass *nv4e_i2c_oclass;
+extern struct nvkm_oclass *nv50_i2c_oclass;
+extern struct nvkm_oclass *g94_i2c_oclass;
+extern struct nvkm_oclass *gf110_i2c_oclass;
+extern struct nvkm_oclass *gf117_i2c_oclass;
+extern struct nvkm_oclass *gk104_i2c_oclass;
+extern struct nvkm_oclass *gm204_i2c_oclass;
+
+static inline int
+nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
+{
+ u8 val;
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
+ };
+
+ int ret = i2c_transfer(&port->adapter, msgs, 2);
+ if (ret != 2)
+ return -EIO;
+
+ return val;
+}
+
+static inline int
+nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 2, .buf = buf },
+ };
+
+ int ret = i2c_transfer(&port->adapter, msgs, 1);
+ if (ret != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static inline bool
+nv_probe_i2c(struct nvkm_i2c_port *port, u8 addr)
+{
+ return nv_rdi2cr(port, addr, 0) >= 0;
+}
+
+int nv_rdaux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
+int nv_wraux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
new file mode 100644
index 000000000000..2150d8af0040
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -0,0 +1,32 @@
+#ifndef __NVKM_IBUS_H__
+#define __NVKM_IBUS_H__
+#include <core/subdev.h>
+
+struct nvkm_ibus {
+ struct nvkm_subdev base;
+};
+
+static inline struct nvkm_ibus *
+nvkm_ibus(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_IBUS);
+}
+
+#define nvkm_ibus_create(p,e,o,d) \
+ nvkm_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus", \
+ sizeof(**d), (void **)d)
+#define nvkm_ibus_destroy(p) \
+ nvkm_subdev_destroy(&(p)->base)
+#define nvkm_ibus_init(p) \
+ nvkm_subdev_init(&(p)->base)
+#define nvkm_ibus_fini(p,s) \
+ nvkm_subdev_fini(&(p)->base, (s))
+
+#define _nvkm_ibus_dtor _nvkm_subdev_dtor
+#define _nvkm_ibus_init _nvkm_subdev_init
+#define _nvkm_ibus_fini _nvkm_subdev_fini
+
+extern struct nvkm_oclass gf100_ibus_oclass;
+extern struct nvkm_oclass gk104_ibus_oclass;
+extern struct nvkm_oclass gk20a_ibus_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
new file mode 100644
index 000000000000..d104c1aac807
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -0,0 +1,48 @@
+#ifndef __NVKM_INSTMEM_H__
+#define __NVKM_INSTMEM_H__
+#include <core/subdev.h>
+
+struct nvkm_instobj {
+ struct nvkm_object base;
+ struct list_head head;
+ u32 *suspend;
+ u64 addr;
+ u32 size;
+};
+
+static inline struct nvkm_instobj *
+nv_memobj(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+ if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
+ nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
+#endif
+ return obj;
+}
+
+struct nvkm_instmem {
+ struct nvkm_subdev base;
+ struct list_head list;
+
+ u32 reserved;
+ int (*alloc)(struct nvkm_instmem *, struct nvkm_object *,
+ u32 size, u32 align, struct nvkm_object **);
+};
+
+static inline struct nvkm_instmem *
+nvkm_instmem(void *obj)
+{
+ /* nv04/nv40 impls need to create objects in their constructor,
+ * which is before the subdev pointer is valid
+ */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
+ return obj;
+
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_INSTMEM);
+}
+
+extern struct nvkm_oclass *nv04_instmem_oclass;
+extern struct nvkm_oclass *nv40_instmem_oclass;
+extern struct nvkm_oclass *nv50_instmem_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
new file mode 100644
index 000000000000..cd5d29fc0565
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -0,0 +1,31 @@
+#ifndef __NVKM_LTC_H__
+#define __NVKM_LTC_H__
+#include <core/subdev.h>
+struct nvkm_mm_node;
+
+#define NVKM_LTC_MAX_ZBC_CNT 16
+
+struct nvkm_ltc {
+ struct nvkm_subdev base;
+
+ int (*tags_alloc)(struct nvkm_ltc *, u32 count,
+ struct nvkm_mm_node **);
+ void (*tags_free)(struct nvkm_ltc *, struct nvkm_mm_node **);
+ void (*tags_clear)(struct nvkm_ltc *, u32 first, u32 count);
+
+ int zbc_min;
+ int zbc_max;
+ int (*zbc_color_get)(struct nvkm_ltc *, int index, const u32[4]);
+ int (*zbc_depth_get)(struct nvkm_ltc *, int index, const u32);
+};
+
+static inline struct nvkm_ltc *
+nvkm_ltc(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_LTC);
+}
+
+extern struct nvkm_oclass *gf100_ltc_oclass;
+extern struct nvkm_oclass *gk104_ltc_oclass;
+extern struct nvkm_oclass *gm107_ltc_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
new file mode 100644
index 000000000000..055bea7702a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -0,0 +1,28 @@
+#ifndef __NVKM_MC_H__
+#define __NVKM_MC_H__
+#include <core/subdev.h>
+
+struct nvkm_mc {
+ struct nvkm_subdev base;
+ bool use_msi;
+ unsigned int irq;
+ void (*unk260)(struct nvkm_mc *, u32);
+};
+
+static inline struct nvkm_mc *
+nvkm_mc(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MC);
+}
+
+extern struct nvkm_oclass *nv04_mc_oclass;
+extern struct nvkm_oclass *nv40_mc_oclass;
+extern struct nvkm_oclass *nv44_mc_oclass;
+extern struct nvkm_oclass *nv4c_mc_oclass;
+extern struct nvkm_oclass *nv50_mc_oclass;
+extern struct nvkm_oclass *g94_mc_oclass;
+extern struct nvkm_oclass *g98_mc_oclass;
+extern struct nvkm_oclass *gf100_mc_oclass;
+extern struct nvkm_oclass *gf106_mc_oclass;
+extern struct nvkm_oclass *gk20a_mc_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
new file mode 100644
index 000000000000..3a5368776c31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -0,0 +1,104 @@
+#ifndef __NVKM_MMU_H__
+#define __NVKM_MMU_H__
+#include <core/subdev.h>
+#include <core/mm.h>
+struct nvkm_device;
+struct nvkm_mem;
+
+struct nvkm_vm_pgt {
+ struct nvkm_gpuobj *obj[2];
+ u32 refcount[2];
+};
+
+struct nvkm_vm_pgd {
+ struct list_head head;
+ struct nvkm_gpuobj *obj;
+};
+
+struct nvkm_vma {
+ struct list_head head;
+ int refcount;
+ struct nvkm_vm *vm;
+ struct nvkm_mm_node *node;
+ u64 offset;
+ u32 access;
+};
+
+struct nvkm_vm {
+ struct nvkm_mmu *mmu;
+ struct nvkm_mm mm;
+ struct kref refcount;
+
+ struct list_head pgd_list;
+ atomic_t engref[NVDEV_SUBDEV_NR];
+
+ struct nvkm_vm_pgt *pgt;
+ u32 fpde;
+ u32 lpde;
+};
+
+struct nvkm_mmu {
+ struct nvkm_subdev base;
+
+ u64 limit;
+ u8 dma_bits;
+ u32 pgt_bits;
+ u8 spg_shift;
+ u8 lpg_shift;
+
+ int (*create)(struct nvkm_mmu *, u64 offset, u64 length,
+ u64 mm_offset, struct nvkm_vm **);
+
+ void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
+ struct nvkm_gpuobj *pgt[2]);
+ void (*map)(struct nvkm_vma *, struct nvkm_gpuobj *,
+ struct nvkm_mem *, u32 pte, u32 cnt,
+ u64 phys, u64 delta);
+ void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
+ struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
+ void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
+ void (*flush)(struct nvkm_vm *);
+};
+
+static inline struct nvkm_mmu *
+nvkm_mmu(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MMU);
+}
+
+#define nvkm_mmu_create(p,e,o,i,f,d) \
+ nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d))
+#define nvkm_mmu_destroy(p) \
+ nvkm_subdev_destroy(&(p)->base)
+#define nvkm_mmu_init(p) \
+ nvkm_subdev_init(&(p)->base)
+#define nvkm_mmu_fini(p,s) \
+ nvkm_subdev_fini(&(p)->base, (s))
+
+#define _nvkm_mmu_dtor _nvkm_subdev_dtor
+#define _nvkm_mmu_init _nvkm_subdev_init
+#define _nvkm_mmu_fini _nvkm_subdev_fini
+
+extern struct nvkm_oclass nv04_mmu_oclass;
+extern struct nvkm_oclass nv41_mmu_oclass;
+extern struct nvkm_oclass nv44_mmu_oclass;
+extern struct nvkm_oclass nv50_mmu_oclass;
+extern struct nvkm_oclass gf100_mmu_oclass;
+
+int nv04_vm_create(struct nvkm_mmu *, u64, u64, u64,
+ struct nvkm_vm **);
+void nv04_mmu_dtor(struct nvkm_object *);
+
+int nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
+ u32 block, struct nvkm_vm **);
+int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
+ struct nvkm_vm **);
+int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
+int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
+ struct nvkm_vma *);
+void nvkm_vm_put(struct nvkm_vma *);
+void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
+void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
+void nvkm_vm_unmap(struct nvkm_vma *);
+void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
new file mode 100644
index 000000000000..fba613477b1a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_MXM_H__
+#define __NVKM_MXM_H__
+#include <core/subdev.h>
+
+#define MXM_SANITISE_DCB 0x00000001
+
+struct nvkm_mxm {
+ struct nvkm_subdev base;
+ u32 action;
+ u8 *mxms;
+};
+
+static inline struct nvkm_mxm *
+nvkm_mxm(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MXM);
+}
+
+#define nvkm_mxm_create(p,e,o,d) \
+ nvkm_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_mxm_init(p) \
+ nvkm_subdev_init(&(p)->base)
+#define nvkm_mxm_fini(p,s) \
+ nvkm_subdev_fini(&(p)->base, (s))
+int nvkm_mxm_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void nvkm_mxm_destroy(struct nvkm_mxm *);
+
+#define _nvkm_mxm_dtor _nvkm_subdev_dtor
+#define _nvkm_mxm_init _nvkm_subdev_init
+#define _nvkm_mxm_fini _nvkm_subdev_fini
+
+extern struct nvkm_oclass nv50_mxm_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
new file mode 100644
index 000000000000..7b86acc634a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -0,0 +1,53 @@
+#ifndef __NVKM_PMU_H__
+#define __NVKM_PMU_H__
+#include <core/subdev.h>
+
+struct nvkm_pmu {
+ struct nvkm_subdev base;
+
+ struct {
+ u32 base;
+ u32 size;
+ } send;
+
+ struct {
+ u32 base;
+ u32 size;
+
+ struct work_struct work;
+ wait_queue_head_t wait;
+ u32 process;
+ u32 message;
+ u32 data[2];
+ } recv;
+
+ int (*message)(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
+ void (*pgob)(struct nvkm_pmu *, bool);
+};
+
+static inline struct nvkm_pmu *
+nvkm_pmu(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_PMU);
+}
+
+extern struct nvkm_oclass *gt215_pmu_oclass;
+extern struct nvkm_oclass *gf100_pmu_oclass;
+extern struct nvkm_oclass *gf110_pmu_oclass;
+extern struct nvkm_oclass *gk104_pmu_oclass;
+extern struct nvkm_oclass *gk208_pmu_oclass;
+extern struct nvkm_oclass *gk20a_pmu_oclass;
+
+/* interface to MEMX process running on PMU */
+struct nvkm_memx;
+int nvkm_memx_init(struct nvkm_pmu *, struct nvkm_memx **);
+int nvkm_memx_fini(struct nvkm_memx **, bool exec);
+void nvkm_memx_wr32(struct nvkm_memx *, u32 addr, u32 data);
+void nvkm_memx_wait(struct nvkm_memx *, u32 addr, u32 mask, u32 data, u32 nsec);
+void nvkm_memx_nsec(struct nvkm_memx *, u32 nsec);
+void nvkm_memx_wait_vblank(struct nvkm_memx *);
+void nvkm_memx_train(struct nvkm_memx *);
+int nvkm_memx_train_result(struct nvkm_pmu *, u32 *, int);
+void nvkm_memx_block(struct nvkm_memx *);
+void nvkm_memx_unblock(struct nvkm_memx *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
new file mode 100644
index 000000000000..6662829b6db1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -0,0 +1,79 @@
+#ifndef __NVKM_THERM_H__
+#define __NVKM_THERM_H__
+#include <core/subdev.h>
+
+enum nvkm_therm_fan_mode {
+ NVKM_THERM_CTRL_NONE = 0,
+ NVKM_THERM_CTRL_MANUAL = 1,
+ NVKM_THERM_CTRL_AUTO = 2,
+};
+
+enum nvkm_therm_attr_type {
+ NVKM_THERM_ATTR_FAN_MIN_DUTY = 0,
+ NVKM_THERM_ATTR_FAN_MAX_DUTY = 1,
+ NVKM_THERM_ATTR_FAN_MODE = 2,
+
+ NVKM_THERM_ATTR_THRS_FAN_BOOST = 10,
+ NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST = 11,
+ NVKM_THERM_ATTR_THRS_DOWN_CLK = 12,
+ NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST = 13,
+ NVKM_THERM_ATTR_THRS_CRITICAL = 14,
+ NVKM_THERM_ATTR_THRS_CRITICAL_HYST = 15,
+ NVKM_THERM_ATTR_THRS_SHUTDOWN = 16,
+ NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
+};
+
+struct nvkm_therm {
+ struct nvkm_subdev base;
+
+ int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
+ int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
+ int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
+ int (*pwm_clock)(struct nvkm_therm *, int line);
+
+ int (*fan_get)(struct nvkm_therm *);
+ int (*fan_set)(struct nvkm_therm *, int);
+ int (*fan_sense)(struct nvkm_therm *);
+
+ int (*temp_get)(struct nvkm_therm *);
+
+ int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
+ int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
+};
+
+static inline struct nvkm_therm *
+nvkm_therm(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_THERM);
+}
+
+#define nvkm_therm_create(p,e,o,d) \
+ nvkm_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_therm_destroy(p) ({ \
+ struct nvkm_therm *therm = (p); \
+ _nvkm_therm_dtor(nv_object(therm)); \
+})
+#define nvkm_therm_init(p) ({ \
+ struct nvkm_therm *therm = (p); \
+ _nvkm_therm_init(nv_object(therm)); \
+})
+#define nvkm_therm_fini(p,s) ({ \
+ struct nvkm_therm *therm = (p); \
+ _nvkm_therm_init(nv_object(therm), (s)); \
+})
+
+int nvkm_therm_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void _nvkm_therm_dtor(struct nvkm_object *);
+int _nvkm_therm_init(struct nvkm_object *);
+int _nvkm_therm_fini(struct nvkm_object *, bool);
+
+int nvkm_therm_cstate(struct nvkm_therm *, int, int);
+
+extern struct nvkm_oclass nv40_therm_oclass;
+extern struct nvkm_oclass nv50_therm_oclass;
+extern struct nvkm_oclass g84_therm_oclass;
+extern struct nvkm_oclass gt215_therm_oclass;
+extern struct nvkm_oclass gf110_therm_oclass;
+extern struct nvkm_oclass gm107_therm_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
new file mode 100644
index 000000000000..4ad55082ef7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -0,0 +1,61 @@
+#ifndef __NVKM_TIMER_H__
+#define __NVKM_TIMER_H__
+#include <core/subdev.h>
+
+struct nvkm_alarm {
+ struct list_head head;
+ u64 timestamp;
+ void (*func)(struct nvkm_alarm *);
+};
+
+static inline void
+nvkm_alarm_init(struct nvkm_alarm *alarm,
+ void (*func)(struct nvkm_alarm *))
+{
+ INIT_LIST_HEAD(&alarm->head);
+ alarm->func = func;
+}
+
+bool nvkm_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
+bool nvkm_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
+bool nvkm_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
+void nvkm_timer_alarm(void *, u32 nsec, struct nvkm_alarm *);
+void nvkm_timer_alarm_cancel(void *, struct nvkm_alarm *);
+
+#define NV_WAIT_DEFAULT 2000000000ULL
+#define nv_wait(o,a,m,v) \
+ nvkm_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
+#define nv_wait_ne(o,a,m,v) \
+ nvkm_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
+#define nv_wait_cb(o,c,d) \
+ nvkm_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
+
+struct nvkm_timer {
+ struct nvkm_subdev base;
+ u64 (*read)(struct nvkm_timer *);
+ void (*alarm)(struct nvkm_timer *, u64 time, struct nvkm_alarm *);
+ void (*alarm_cancel)(struct nvkm_timer *, struct nvkm_alarm *);
+};
+
+static inline struct nvkm_timer *
+nvkm_timer(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_TIMER);
+}
+
+#define nvkm_timer_create(p,e,o,d) \
+ nvkm_subdev_create_((p), (e), (o), 0, "PTIMER", "timer", \
+ sizeof(**d), (void **)d)
+#define nvkm_timer_destroy(p) \
+ nvkm_subdev_destroy(&(p)->base)
+#define nvkm_timer_init(p) \
+ nvkm_subdev_init(&(p)->base)
+#define nvkm_timer_fini(p,s) \
+ nvkm_subdev_fini(&(p)->base, (s))
+
+int nvkm_timer_create_(struct nvkm_object *, struct nvkm_engine *,
+ struct nvkm_oclass *, int size, void **);
+
+extern struct nvkm_oclass nv04_timer_oclass;
+extern struct nvkm_oclass gk20a_timer_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index fee09ad818e4..fee09ad818e4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
new file mode 100644
index 000000000000..e3d7243fbb1d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -0,0 +1,58 @@
+#ifndef __NVKM_VOLT_H__
+#define __NVKM_VOLT_H__
+#include <core/subdev.h>
+
+struct nvkm_voltage {
+ u32 uv;
+ u8 id;
+};
+
+struct nvkm_volt {
+ struct nvkm_subdev base;
+
+ int (*vid_get)(struct nvkm_volt *);
+ int (*get)(struct nvkm_volt *);
+ int (*vid_set)(struct nvkm_volt *, u8 vid);
+ int (*set)(struct nvkm_volt *, u32 uv);
+ int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+
+ u8 vid_mask;
+ u8 vid_nr;
+ struct {
+ u32 uv;
+ u8 vid;
+ } vid[256];
+};
+
+static inline struct nvkm_volt *
+nvkm_volt(void *obj)
+{
+ return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VOLT);
+}
+
+#define nvkm_volt_create(p, e, o, d) \
+ nvkm_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_volt_destroy(p) ({ \
+ struct nvkm_volt *v = (p); \
+ _nvkm_volt_dtor(nv_object(v)); \
+})
+#define nvkm_volt_init(p) ({ \
+ struct nvkm_volt *v = (p); \
+ _nvkm_volt_init(nv_object(v)); \
+})
+#define nvkm_volt_fini(p,s) \
+ nvkm_subdev_fini((p), (s))
+
+int nvkm_volt_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void _nvkm_volt_dtor(struct nvkm_object *);
+int _nvkm_volt_init(struct nvkm_object *);
+#define _nvkm_volt_fini _nvkm_subdev_fini
+
+extern struct nvkm_oclass nv40_volt_oclass;
+extern struct nvkm_oclass gk20a_volt_oclass;
+
+int nvkm_voltgpio_init(struct nvkm_volt *);
+int nvkm_voltgpio_get(struct nvkm_volt *);
+int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d39a15000068..d8b0891a141c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -100,7 +100,7 @@ static void
nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
struct nouveau_abi16_ntfy *ntfy)
{
- nouveau_mm_free(&chan->heap, &ntfy->node);
+ nvkm_mm_free(&chan->heap, &ntfy->node);
list_del(&ntfy->head);
kfree(ntfy);
}
@@ -128,7 +128,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
}
if (chan->heap.block_size)
- nouveau_mm_fini(&chan->heap);
+ nvkm_mm_fini(&chan->heap);
/* destroy channel object, all children will be killed too */
if (chan->chan) {
@@ -164,8 +164,8 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
- struct nouveau_timer *ptimer = nvkm_timer(device);
- struct nouveau_graph *graph = nvkm_gr(device);
+ struct nvkm_timer *ptimer = nvxx_timer(device);
+ struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
switch (getparam->param) {
@@ -173,19 +173,19 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = device->info.chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- if (nv_device_is_pci(nvkm_device(device)))
+ if (nv_device_is_pci(nvxx_device(device)))
getparam->value = dev->pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- if (nv_device_is_pci(nvkm_device(device)))
+ if (nv_device_is_pci(nvxx_device(device)))
getparam->value = dev->pdev->device;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (!nv_device_is_pci(nvkm_device(device)))
+ if (!nv_device_is_pci(nvxx_device(device)))
getparam->value = 3;
else
if (drm_pci_device_is_agp(dev))
@@ -215,7 +215,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
- getparam->value = graph->units ? graph->units(graph) : 0;
+ getparam->value = gr->units ? gr->units(gr) : 0;
break;
default:
NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
@@ -324,7 +324,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
+ ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
done:
if (ret)
nouveau_abi16_chan_fini(abi16, chan);
@@ -448,8 +448,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
list_add(&ntfy->head, &chan->notifiers);
ntfy->handle = info->handle;
- ret = nouveau_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
- &ntfy->node);
+ ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
+ &ntfy->node);
if (ret)
goto done;
@@ -527,7 +527,7 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
/* cleanup extra state if this object was a notifier */
list_for_each_entry(ntfy, &chan->notifiers, head) {
if (ntfy->handle == fini->handle) {
- nouveau_mm_free(&chan->heap, &ntfy->node);
+ nvkm_mm_free(&chan->heap, &ntfy->node);
list_del(&ntfy->head);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 39844e6bfbff..86eb1caf4957 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -14,7 +14,7 @@ int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
struct nouveau_abi16_ntfy {
struct list_head head;
- struct nouveau_mm_node *node;
+ struct nvkm_mm_node *node;
u32 handle;
};
@@ -23,8 +23,8 @@ struct nouveau_abi16_chan {
struct nouveau_channel *chan;
struct list_head notifiers;
struct nouveau_bo *ntfy;
- struct nouveau_vma ntfy_vma;
- struct nouveau_mm heap;
+ struct nvkm_vma ntfy_vma;
+ struct nvkm_mm heap;
};
struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 1f6f6ba6847a..0b5970955604 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -45,8 +45,8 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
while (agpmode == -1 && quirk->hostbridge_vendor) {
if (info->id_vendor == quirk->hostbridge_vendor &&
info->id_device == quirk->hostbridge_device &&
- nvkm_device(device)->pdev->vendor == quirk->chip_vendor &&
- nvkm_device(device)->pdev->device == quirk->chip_device) {
+ nvxx_device(device)->pdev->vendor == quirk->chip_vendor &&
+ nvxx_device(device)->pdev->device == quirk->chip_device) {
agpmode = quirk->mode;
NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
agpmode);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 7df6acc8bb34..0190b69bbe25 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2009,7 +2009,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bios *bios = nvkm_bios(&drm->device);
+ struct nvkm_bios *bios = nvxx_bios(&drm->device);
struct nvbios *legacy = &drm->vbios;
memset(legacy, 0, sizeof(struct nvbios));
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index bba2960d3dfb..77326e344dad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,9 +48,9 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
- struct nouveau_fb_tile *tile = &pfb->tile.region[i];
- struct nouveau_engine *engine;
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ struct nvkm_fb_tile *tile = &pfb->tile.region[i];
+ struct nvkm_engine *engine;
nouveau_fence_unref(&reg->fence);
@@ -62,9 +62,9 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
pfb->tile.prog(pfb, i, tile);
- if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
+ if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR)))
engine->tile_prog(engine, i);
- if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
+ if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
engine->tile_prog(engine, i);
}
@@ -105,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -193,7 +193,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
int max_size;
if (drm->client.vm)
- lpg_shift = drm->client.vm->vmm->lpg_shift;
+ lpg_shift = drm->client.vm->mmu->lpg_shift;
max_size = INT_MAX & ~((1 << lpg_shift) - 1);
if (size <= 0 || size > max_size) {
@@ -214,13 +214,13 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
- if (!nv_device_is_cpu_coherent(nvkm_device(&drm->device)))
+ if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12;
if (drm->client.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
+ nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
}
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
@@ -325,7 +325,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
memtype == TTM_PL_FLAG_VRAM && contig) {
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
if (bo->mem.mem_type == TTM_PL_VRAM) {
- struct nouveau_mem *mem = bo->mem.mm_node;
+ struct nvkm_mem *mem = bo->mem.mm_node;
if (!list_is_singular(&mem->regions))
evict = true;
}
@@ -459,7 +459,7 @@ void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_device *device = nvkm_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
@@ -479,7 +479,7 @@ void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_device *device = nvkm_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
@@ -533,20 +533,6 @@ _nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
}
#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
-u16
-nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
-{
- bool is_iomem;
- u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
-
- mem = nouveau_bo_mem_index(nvbo, index, mem);
-
- if (is_iomem)
- return ioread16_native((void __force __iomem *)mem);
- else
- return *mem;
-}
-
void
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
@@ -634,7 +620,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
/* Some BARs do not support being ioremapped WC */
- if (nvkm_bar(&drm->device)->iomap_uncached) {
+ if (nvxx_bar(&drm->device)->iomap_uncached) {
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
}
@@ -709,7 +695,7 @@ static int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
+ struct nvkm_mem *node = old_mem->mm_node;
int ret = RING_SPACE(chan, 10);
if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
@@ -741,7 +727,7 @@ static int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
+ struct nvkm_mem *node = old_mem->mm_node;
u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
u32 page_count = new_mem->num_pages;
@@ -779,7 +765,7 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
+ struct nvkm_mem *node = old_mem->mm_node;
u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
u32 page_count = new_mem->num_pages;
@@ -818,7 +804,7 @@ static int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
+ struct nvkm_mem *node = old_mem->mm_node;
u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
u32 page_count = new_mem->num_pages;
@@ -856,7 +842,7 @@ static int
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
+ struct nvkm_mem *node = old_mem->mm_node;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
@@ -874,7 +860,7 @@ static int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
+ struct nvkm_mem *node = old_mem->mm_node;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
@@ -908,12 +894,12 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
+ struct nvkm_mem *node = old_mem->mm_node;
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
int src_tiled = !!node->memtype;
- int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
+ int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
int ret;
while (length) {
@@ -1050,25 +1036,25 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
- struct nouveau_mem *old_node = bo->mem.mm_node;
- struct nouveau_mem *new_node = mem->mm_node;
+ struct nvkm_mem *old_node = bo->mem.mm_node;
+ struct nvkm_mem *new_node = mem->mm_node;
u64 size = (u64)mem->num_pages << PAGE_SHIFT;
int ret;
- ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
- NV_MEM_ACCESS_RW, &old_node->vma[0]);
+ ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
+ NV_MEM_ACCESS_RW, &old_node->vma[0]);
if (ret)
return ret;
- ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
- NV_MEM_ACCESS_RW, &old_node->vma[1]);
+ ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
+ NV_MEM_ACCESS_RW, &old_node->vma[1]);
if (ret) {
- nouveau_vm_put(&old_node->vma[0]);
+ nvkm_vm_put(&old_node->vma[0]);
return ret;
}
- nouveau_vm_map(&old_node->vma[0], old_node);
- nouveau_vm_map(&old_node->vma[1], new_node);
+ nvkm_vm_map(&old_node->vma[0], old_node);
+ nvkm_vm_map(&old_node->vma[1], new_node);
return 0;
}
@@ -1083,7 +1069,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
int ret;
/* create temporary vmas for the transfer and attach them to the
- * old nouveau_mem node, these will get cleaned up after ttm has
+ * old nvkm_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
@@ -1245,7 +1231,7 @@ static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vma *vma;
+ struct nvkm_vma *vma;
/* ttm can now (stupidly) pass the driver bos it didn't create... */
if (bo->destroy != nouveau_bo_del_ttm)
@@ -1254,10 +1240,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
list_for_each_entry(vma, &nvbo->vma_list, head) {
if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
(new_mem->mem_type == TTM_PL_VRAM ||
- nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
- nouveau_vm_map(vma, new_mem->mm_node);
+ nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
+ nvkm_vm_map(vma, new_mem->mm_node);
} else {
- nouveau_vm_unmap(vma);
+ nvkm_vm_unmap(vma);
}
}
}
@@ -1368,7 +1354,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nouveau_mem *node = mem->mm_node;
+ struct nvkm_mem *node = mem->mm_node;
int ret;
mem->bus.addr = NULL;
@@ -1396,10 +1382,10 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
+ mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
mem->bus.is_iomem = true;
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- struct nouveau_bar *bar = nvkm_bar(&drm->device);
+ struct nvkm_bar *bar = nvxx_bar(&drm->device);
ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
&node->bar_vma);
@@ -1419,8 +1405,8 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nouveau_bar *bar = nvkm_bar(&drm->device);
- struct nouveau_mem *node = mem->mm_node;
+ struct nvkm_bar *bar = nvxx_bar(&drm->device);
+ struct nvkm_mem *node = mem->mm_node;
if (!node->bar_vma.node)
return;
@@ -1434,7 +1420,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvif_device *device = &drm->device;
- u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
+ u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1479,7 +1465,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
- struct nouveau_device *device;
+ struct nvkm_device *device;
struct drm_device *dev;
struct device *pdev;
unsigned i;
@@ -1498,7 +1484,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
drm = nouveau_bdev(ttm->bdev);
- device = nvkm_device(&drm->device);
+ device = nvxx_device(&drm->device);
dev = drm->dev;
pdev = nv_device_base(device);
@@ -1553,7 +1539,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
- struct nouveau_device *device;
+ struct nvkm_device *device;
struct drm_device *dev;
struct device *pdev;
unsigned i;
@@ -1563,7 +1549,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
drm = nouveau_bdev(ttm->bdev);
- device = nvkm_device(&drm->device);
+ device = nvxx_device(&drm->device);
dev = drm->dev;
pdev = nv_device_base(device);
@@ -1627,10 +1613,10 @@ struct ttm_bo_driver nouveau_bo_driver = {
.io_mem_free = &nouveau_ttm_io_mem_free,
};
-struct nouveau_vma *
-nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
+struct nvkm_vma *
+nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
{
- struct nouveau_vma *vma;
+ struct nvkm_vma *vma;
list_for_each_entry(vma, &nvbo->vma_list, head) {
if (vma->vm == vm)
return vma;
@@ -1640,21 +1626,21 @@ nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
}
int
-nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
- struct nouveau_vma *vma)
+nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
+ struct nvkm_vma *vma)
{
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
int ret;
- ret = nouveau_vm_get(vm, size, nvbo->page_shift,
+ ret = nvkm_vm_get(vm, size, nvbo->page_shift,
NV_MEM_ACCESS_RW, vma);
if (ret)
return ret;
if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
(nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
- nvbo->page_shift != vma->vm->vmm->lpg_shift))
- nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
+ nvbo->page_shift != vma->vm->mmu->lpg_shift))
+ nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
@@ -1662,12 +1648,12 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
}
void
-nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
+nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
{
if (vma->node) {
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
- nouveau_vm_unmap(vma);
- nouveau_vm_put(vma);
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
list_del(&vma->head);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 072222efeeb7..e42360983229 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -5,7 +5,7 @@
struct nouveau_channel;
struct nouveau_fence;
-struct nouveau_vma;
+struct nvkm_vma;
struct nouveau_bo {
struct ttm_buffer_object bo;
@@ -78,7 +78,6 @@ int nouveau_bo_unpin(struct nouveau_bo *);
int nouveau_bo_map(struct nouveau_bo *);
void nouveau_bo_unmap(struct nouveau_bo *);
void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
-u16 nouveau_bo_rd16(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
@@ -88,12 +87,12 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
-struct nouveau_vma *
-nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
+struct nvkm_vma *
+nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
-int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
- struct nouveau_vma *);
-void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
+int nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
+ struct nvkm_vma *);
+void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index aff9099aae6c..e581f63cbf25 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -54,7 +54,7 @@ nouveau_channel_idle(struct nouveau_channel *chan)
if (ret)
NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
- chan->object->handle, nvkm_client(&cli->base)->name);
+ chan->object->handle, nvxx_client(&cli->base)->name);
return ret;
}
@@ -88,7 +88,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
u32 handle, u32 size, struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)nvif_client(&device->base);
- struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
+ struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
u32 target;
@@ -136,7 +136,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->vmm->limit - 1;
+ args.limit = cli->vm->mmu->limit - 1;
} else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
@@ -146,7 +146,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nv_device_resource_start(nvkm_device(device), 1);
+ args.start = nv_device_resource_start(nvxx_device(device), 1);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -165,7 +165,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = vmm->limit - 1;
+ args.limit = mmu->limit - 1;
}
}
@@ -281,8 +281,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
struct nvif_device *device = chan->device;
struct nouveau_cli *cli = (void *)nvif_client(&device->base);
- struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
- struct nouveau_software_chan *swch;
+ struct nvkm_mmu *mmu = nvxx_mmu(device);
+ struct nvkm_sw_chan *swch;
struct nv_dma_v0 args = {};
int ret, i;
@@ -294,7 +294,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->vmm->limit - 1;
+ args.limit = cli->vm->mmu->limit - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -312,7 +312,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->vmm->limit - 1;
+ args.limit = cli->vm->mmu->limit - 1;
} else
if (chan->drm->agp.stat == ENABLED) {
args.target = NV_DMA_V0_TARGET_AGP;
@@ -324,7 +324,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = vmm->limit - 1;
+ args.limit = mmu->limit - 1;
}
ret = nvif_object_init(chan->object, NULL, gart,
@@ -372,7 +372,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
if (ret)
return ret;
- swch = (void *)nvkm_object(&chan->nvsw)->parent;
+ swch = (void *)nvxx_object(&chan->nvsw)->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 8309c24ee698..8b3640f69e4f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -16,7 +16,7 @@ struct nouveau_channel {
struct {
struct nouveau_bo *buffer;
- struct nouveau_vma vma;
+ struct nvkm_vma vma;
struct nvif_object ctxdma;
} push;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c8ac9482cf2e..db7095ae4ebb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -115,7 +115,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int i, panel = -ENODEV;
@@ -241,7 +241,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_encoder *nv_partner;
- struct nouveau_i2c_port *i2c;
+ struct nvkm_i2c_port *i2c;
int type;
int ret;
enum drm_connector_status conn_status = connector_status_disconnected;
@@ -458,6 +458,28 @@ nouveau_connector_set_property(struct drm_connector *connector,
switch (value) {
case DRM_MODE_SCALE_NONE:
+ /* We allow 'None' for EDID modes, even on a fixed
+ * panel (some exist with support for lower refresh
+ * rates, which people might want to use for power
+ * saving purposes).
+ *
+ * Non-EDID modes will force the use of GPU scaling
+ * to the native mode regardless of this setting.
+ */
+ switch (nv_connector->type) {
+ case DCB_CONNECTOR_LVDS:
+ case DCB_CONNECTOR_LVDS_SPWG:
+ case DCB_CONNECTOR_eDP:
+ /* ... except prior to G80, where the code
+ * doesn't support such things.
+ */
+ if (disp->disp.oclass < NV50_DISP)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+ break;
case DRM_MODE_SCALE_FULLSCREEN:
case DRM_MODE_SCALE_CENTER:
case DRM_MODE_SCALE_ASPECT:
@@ -466,11 +488,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
- /* LVDS always needs gpu scaling */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
- value == DRM_MODE_SCALE_NONE)
- return -EINVAL;
-
/* Changing between GPU and panel scaling requires a full
* modeset
*/
@@ -655,15 +672,15 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
while (mode->hdisplay) {
if (mode->hdisplay <= native->hdisplay &&
- mode->vdisplay <= native->vdisplay) {
+ mode->vdisplay <= native->vdisplay &&
+ (mode->hdisplay != native->hdisplay ||
+ mode->vdisplay != native->vdisplay)) {
m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
drm_mode_vrefresh(native), false,
false, false);
if (!m)
continue;
- m->type |= DRM_MODE_TYPE_DRIVER;
-
drm_mode_probed_add(connector, m);
modes++;
}
@@ -968,7 +985,7 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
struct nouveau_connector *nv_connector =
container_of(aux, typeof(*nv_connector), aux);
struct nouveau_encoder *nv_encoder;
- struct nouveau_i2c_port *port;
+ struct nvkm_i2c_port *port;
int ret;
nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
@@ -979,13 +996,13 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (msg->size == 0)
return msg->size;
- ret = nouveau_i2c(port)->acquire(port, 0);
+ ret = nvkm_i2c(port)->acquire(port, 0);
if (ret)
return ret;
ret = port->func->aux(port, false, msg->request, msg->address,
msg->buffer, msg->size);
- nouveau_i2c(port)->release(port);
+ nvkm_i2c(port)->release(port);
if (ret >= 0) {
msg->reply = ret;
return msg->size;
@@ -1180,36 +1197,61 @@ nouveau_connector_create(struct drm_device *dev, int index)
disp->color_vibrance_property,
150);
+ /* default scaling mode */
switch (nv_connector->type) {
- case DCB_CONNECTOR_VGA:
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- drm_object_attach_property(&connector->base,
- dev->mode_config.scaling_mode_property,
- nv_connector->scaling_mode);
+ case DCB_CONNECTOR_LVDS:
+ case DCB_CONNECTOR_LVDS_SPWG:
+ case DCB_CONNECTOR_eDP:
+ /* see note in nouveau_connector_set_property() */
+ if (disp->disp.oclass < NV50_DISP) {
+ nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
+ break;
}
- /* fall-through */
+ nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+ break;
+ default:
+ nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+ break;
+ }
+
+ /* scaling mode property */
+ switch (nv_connector->type) {
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
case DCB_CONNECTOR_TV_3:
- nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
+ case DCB_CONNECTOR_VGA:
+ if (disp->disp.oclass < NV50_DISP)
+ break; /* can only scale on DFPs */
+ /* fall-through */
default:
- nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ scaling_mode_property,
+ nv_connector->scaling_mode);
+ break;
+ }
- drm_object_attach_property(&connector->base,
- dev->mode_config.scaling_mode_property,
- nv_connector->scaling_mode);
+ /* dithering properties */
+ switch (nv_connector->type) {
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ case DCB_CONNECTOR_VGA:
+ break;
+ default:
if (disp->dithering_mode) {
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
drm_object_attach_property(&connector->base,
- disp->dithering_mode,
- nv_connector->dithering_mode);
+ disp->dithering_mode,
+ nv_connector->
+ dithering_mode);
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
}
if (disp->dithering_depth) {
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
drm_object_attach_property(&connector->base,
- disp->dithering_depth,
- nv_connector->dithering_depth);
+ disp->dithering_depth,
+ nv_connector->
+ dithering_depth);
+ nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
}
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 629a380c7085..7446ee66ea04 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -33,7 +33,7 @@
#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
-struct nouveau_i2c_port;
+struct nvkm_i2c_port;
enum nouveau_underscan_type {
UNDERSCAN_OFF,
@@ -72,6 +72,7 @@ struct nouveau_connector {
int dithering_mode;
int dithering_depth;
int scaling_mode;
+ bool scaling_full;
enum nouveau_underscan_type underscan;
u32 underscan_hborder;
u32 underscan_vborder;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index f8042433752b..860b0e2d4181 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -450,7 +450,7 @@ nouveau_display_create(struct drm_device *dev)
drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
- dev->mode_config.fb_base = nv_device_resource_start(nvkm_device(&drm->device), 1);
+ dev->mode_config.fb_base = nv_device_resource_start(nvxx_device(&drm->device), 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -570,7 +570,8 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (nv_crtc->cursor.nvbo) {
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ if (nv_crtc->cursor.set_offset)
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
}
}
@@ -604,7 +605,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
continue;
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
- if (!ret)
+ if (!ret && nv_crtc->cursor.set_offset)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
NV_ERROR(drm, "Could not pin/map cursor.\n");
@@ -637,7 +638,9 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
if (!nv_crtc->cursor.nvbo)
continue;
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
+
+ if (nv_crtc->cursor.set_offset)
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
nv_crtc->cursor_saved_y);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index be3d5947c6be..a6213e2425c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,14 +1,14 @@
#ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
#include "nouveau_drm.h"
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
- struct nouveau_vma vma;
+ struct nvkm_vma vma;
u32 r_handle;
u32 r_format;
u32 r_pitch;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 8508603cc8c3..6d9245aa81a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -84,7 +84,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
{
struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
struct nouveau_bo *pb = chan->push.buffer;
- struct nouveau_vma *vma;
+ struct nvkm_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
u64 offset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index c5137cccce7d..c3ef30b3a5ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -31,7 +31,7 @@
#include "nouveau_crtc.h"
static void
-nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
+nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch,
u8 *dpcd)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -55,7 +55,7 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
{
struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c_port *auxch;
+ struct nvkm_i2c_port *auxch;
u8 *dpcd = nv_encoder->dp.dpcd;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 65910e3aed0c..8763deb5188b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -52,6 +52,7 @@
#include "nouveau_debugfs.h"
#include "nouveau_usif.h"
#include "nouveau_connector.h"
+#include "nouveau_platform.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -123,7 +124,7 @@ nouveau_cli_create(u64 name, const char *sname,
static void
nouveau_cli_destroy(struct nouveau_cli *cli)
{
- nouveau_vm_ref(NULL, &nvkm_client(&cli->base)->vm, NULL);
+ nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
nvif_client_fini(&cli->base);
usif_client_fini(cli);
}
@@ -133,7 +134,7 @@ nouveau_accel_fini(struct nouveau_drm *drm)
{
nouveau_channel_del(&drm->channel);
nvif_object_fini(&drm->ntfy);
- nouveau_gpuobj_ref(NULL, &drm->notify);
+ nvkm_gpuobj_ref(NULL, &drm->notify);
nvif_object_fini(&drm->nvsw);
nouveau_channel_del(&drm->cechan);
nvif_object_fini(&drm->ttm.copy);
@@ -230,7 +231,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
if (ret == 0) {
- struct nouveau_software_chan *swch;
+ struct nvkm_sw_chan *swch;
ret = RING_SPACE(drm->channel, 2);
if (ret == 0) {
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -242,7 +243,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
OUT_RING (drm->channel, 0x001f0000);
}
}
- swch = (void *)nvkm_object(&drm->nvsw)->parent;
+ swch = (void *)nvxx_object(&drm->nvsw)->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = drm->channel;
}
@@ -254,8 +255,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
}
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
- ret = nouveau_gpuobj_new(nvkm_object(&drm->device), NULL, 32,
- 0, 0, &drm->notify);
+ ret = nvkm_gpuobj_new(nvxx_object(&drm->device), NULL, 32,
+ 0, 0, &drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_fini(drm);
@@ -284,7 +285,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
static int nouveau_drm_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
- struct nouveau_device *device;
+ struct nvkm_device *device;
struct apertures_struct *aper;
bool boot = false;
int ret;
@@ -317,9 +318,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
remove_conflicting_framebuffers(aper, "nouveaufb", boot);
kfree(aper);
- ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI,
- nouveau_pci_name(pdev), pci_name(pdev),
- nouveau_config, nouveau_debug, &device);
+ ret = nvkm_device_create(pdev, NVKM_BUS_PCI,
+ nouveau_pci_name(pdev), pci_name(pdev),
+ nouveau_config, nouveau_debug, &device);
if (ret)
return ret;
@@ -327,7 +328,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
ret = drm_get_pci_dev(pdev, pent, &driver_pci);
if (ret) {
- nouveau_object_ref(NULL, (struct nouveau_object **)&device);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&device);
return ret;
}
@@ -378,8 +379,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
- nvkm_client(&drm->client.base)->debug =
- nouveau_dbgopt(nouveau_debug, "DRM");
+ nvxx_client(&drm->client.base)->debug =
+ nvkm_dbgopt(nouveau_debug, "DRM");
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
@@ -434,12 +435,12 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_agp_init(drm);
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40),
- 0x1000, &drm->client.vm);
+ ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
+ 0x1000, &drm->client.vm);
if (ret)
goto fail_device;
- nvkm_client(&drm->client.base)->vm = drm->client.vm;
+ nvxx_client(&drm->client.base)->vm = drm->client.vm;
}
ret = nouveau_ttm_init(drm);
@@ -522,18 +523,17 @@ void
nouveau_drm_device_remove(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_client *client;
- struct nouveau_object *device;
+ struct nvkm_client *client;
+ struct nvkm_object *device;
dev->irq_enabled = false;
- client = nvkm_client(&drm->client.base);
+ client = nvxx_client(&drm->client.base);
device = client->device;
drm_put_dev(dev);
- nouveau_object_ref(NULL, &device);
- nouveau_object_debug();
+ nvkm_object_ref(NULL, &device);
+ nvkm_object_debug();
}
-EXPORT_SYMBOL(nouveau_drm_device_remove);
static void
nouveau_drm_remove(struct pci_dev *pdev)
@@ -831,14 +831,14 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
cli->base.super = false;
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40),
- 0x1000, &cli->vm);
+ ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
+ 0x1000, &cli->vm);
if (ret) {
nouveau_cli_destroy(cli);
goto out_suspend;
}
- nvkm_client(&cli->base)->vm = cli->vm;
+ nvxx_client(&cli->base)->vm = cli->vm;
}
fpriv->driver_priv = cli;
@@ -1056,10 +1056,10 @@ nouveau_platform_device_create_(struct platform_device *pdev, int size,
struct drm_device *drm;
int err;
- err = nouveau_device_create_(pdev, NOUVEAU_BUS_PLATFORM,
- nouveau_platform_name(pdev),
- dev_name(&pdev->dev), nouveau_config,
- nouveau_debug, size, pobject);
+ err = nvkm_device_create_(pdev, NVKM_BUS_PLATFORM,
+ nouveau_platform_name(pdev),
+ dev_name(&pdev->dev), nouveau_config,
+ nouveau_debug, size, pobject);
if (err)
return ERR_PTR(err);
@@ -1079,11 +1079,10 @@ nouveau_platform_device_create_(struct platform_device *pdev, int size,
return drm;
err_free:
- nouveau_object_ref(NULL, (struct nouveau_object **)pobject);
+ nvkm_object_ref(NULL, (struct nvkm_object **)pobject);
return ERR_PTR(err);
}
-EXPORT_SYMBOL(nouveau_platform_device_create_);
static int __init
nouveau_drm_init(void)
@@ -1105,6 +1104,10 @@ nouveau_drm_init(void)
if (!nouveau_modeset)
return 0;
+#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
+ platform_driver_register(&nouveau_platform_driver);
+#endif
+
nouveau_register_dsm_handler();
return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver);
}
@@ -1117,6 +1120,10 @@ nouveau_drm_exit(void)
drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver);
nouveau_unregister_dsm_handler();
+
+#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
+ platform_driver_unregister(&nouveau_platform_driver);
+#endif
}
module_init(nouveau_drm_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 8ae36f265fb8..fc68f0973f9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -80,7 +80,7 @@ enum nouveau_drm_handle {
struct nouveau_cli {
struct nvif_client base;
- struct nouveau_vm *vm; /*XXX*/
+ struct nvkm_vm *vm; /*XXX*/
struct list_head head;
struct mutex mutex;
void *abi16;
@@ -142,7 +142,7 @@ struct nouveau_drm {
/* context for accelerated drm-internal operations */
struct nouveau_channel *cechan;
struct nouveau_channel *channel;
- struct nouveau_gpuobj *notify;
+ struct nvkm_gpuobj *notify;
struct nouveau_fbdev *fbcon;
struct nvif_object nvsw;
struct nvif_object ntfy;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 5f0e37fc2849..c57a37e8e1eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -34,14 +34,14 @@
#define NV_DPMS_CLEARED 0x80
-struct nouveau_i2c_port;
+struct nvkm_i2c_port;
struct nouveau_encoder {
struct drm_encoder_slave base;
struct dcb_output *dcb;
int or;
- struct nouveau_i2c_port *i2c;
+ struct nvkm_i2c_port *i2c;
/* different to drm_encoder.crtc, this reflects what's
* actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 3ed12a8cfc91..79924e4b1b49 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -370,6 +370,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
ret = -ENOMEM;
goto out_unlock;
}
+ info->skip_vt_switch = 1;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
@@ -487,30 +488,17 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.fb_probe = nouveau_fbcon_create,
};
-static void
-nouveau_fbcon_set_suspend_work(struct work_struct *work)
-{
- struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work);
- console_lock();
- nouveau_fbcon_accel_restore(fbcon->dev);
- nouveau_fbcon_zfill(fbcon->dev, fbcon);
- fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING);
- console_unlock();
-}
-
void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) {
- if (state == FBINFO_STATE_RUNNING) {
- schedule_work(&drm->fbcon->work);
- return;
- }
- flush_work(&drm->fbcon->work);
console_lock();
+ if (state == FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_restore(dev);
fb_set_suspend(drm->fbcon->helper.fbdev, state);
- nouveau_fbcon_accel_save_disable(dev);
+ if (state != FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_save_disable(dev);
console_unlock();
}
}
@@ -531,7 +519,6 @@ nouveau_fbcon_init(struct drm_device *dev)
if (!fbcon)
return -ENOMEM;
- INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work);
fbcon->dev = dev;
drm->fbcon = fbcon;
@@ -539,12 +526,12 @@ nouveau_fbcon_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &fbcon->helper,
dev->mode_config.num_crtc, 4);
- if (ret) {
- kfree(fbcon);
- return ret;
- }
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&fbcon->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&fbcon->helper);
+ if (ret)
+ goto fini;
if (drm->device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
@@ -557,8 +544,17 @@ nouveau_fbcon_init(struct drm_device *dev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
+ ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&fbcon->helper);
+free:
+ kfree(fbcon);
+ return ret;
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 6208e70e4a1c..1e2e9e27a03b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -36,7 +36,6 @@ struct nouveau_fbdev {
struct nouveau_framebuffer nouveau_fb;
struct list_head fbdev_list;
struct drm_device *dev;
- struct work_struct work;
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index f32a434724e3..c6d56bef5823 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -182,7 +182,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
else if (chan == chan->drm->channel)
strcpy(fctx->name, "generic kernel channel");
else
- strcpy(fctx->name, nvkm_client(&cli->base)->name);
+ strcpy(fctx->name, nvxx_client(&cli->base)->name);
kref_init(&fctx->fence_ref);
if (!priv->uevent)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 96e461c6f68f..d9241d8247fb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -89,9 +89,9 @@ int nouveau_flip_complete(void *chan);
struct nv84_fence_chan {
struct nouveau_fence_chan base;
- struct nouveau_vma vma;
- struct nouveau_vma vma_gart;
- struct nouveau_vma dispc_vma[4];
+ struct nvkm_vma vma;
+ struct nvkm_vma vma_gart;
+ struct nvkm_vma dispc_vma[4];
};
struct nv84_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index bf0f9e21d714..7c077fced1d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -64,7 +64,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_vma *vma;
+ struct nvkm_vma *vma;
struct device *dev = drm->dev->dev;
int ret;
@@ -105,14 +105,14 @@ out:
static void
nouveau_gem_object_delete(void *data)
{
- struct nouveau_vma *vma = data;
- nouveau_vm_unmap(vma);
- nouveau_vm_put(vma);
+ struct nvkm_vma *vma = data;
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
kfree(vma);
}
static void
-nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
+nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
@@ -135,8 +135,8 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
} else {
if (mapped)
- nouveau_vm_unmap(vma);
- nouveau_vm_put(vma);
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
kfree(vma);
}
}
@@ -148,7 +148,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
- struct nouveau_vma *vma;
+ struct nvkm_vma *vma;
int ret;
if (!cli->vm)
@@ -222,7 +222,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
- struct nouveau_vma *vma;
+ struct nvkm_vma *vma;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
@@ -251,7 +251,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
@@ -850,19 +850,6 @@ out_next:
return nouveau_abi16_put(abi16, ret);
}
-static inline uint32_t
-domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
-{
- uint32_t flags = 0;
-
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
- flags |= TTM_PL_FLAG_VRAM;
- if (domain & NOUVEAU_GEM_DOMAIN_GART)
- flags |= TTM_PL_FLAG_TT;
-
- return flags;
-}
-
int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index afb36d66e78d..0dbe0060f86e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -40,7 +40,7 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
int temp = therm->temp_get(therm);
if (temp < 0)
@@ -66,10 +66,10 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
@@ -78,13 +78,13 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
- therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
+ therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST,
value / 1000);
return count;
@@ -99,10 +99,10 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
@@ -111,13 +111,13 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
- therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
+ therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST,
value / 1000);
return count;
@@ -131,10 +131,10 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK) * 1000);
}
static ssize_t
nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
@@ -142,13 +142,13 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
- therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
+ therm->attr_set(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
return count;
}
@@ -162,10 +162,10 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
}
static ssize_t
nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
@@ -173,13 +173,13 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
- therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
+ therm->attr_set(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST,
value / 1000);
return count;
@@ -194,10 +194,10 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL) * 1000);
}
static ssize_t
nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
@@ -206,13 +206,13 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
- therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
+ therm->attr_set(therm, NVKM_THERM_ATTR_THRS_CRITICAL, value / 1000);
return count;
}
@@ -227,10 +227,10 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
}
static ssize_t
nouveau_hwmon_set_critical_temp_hyst(struct device *d,
@@ -240,13 +240,13 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
- therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
+ therm->attr_set(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST,
value / 1000);
return count;
@@ -260,10 +260,10 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN) * 1000);
}
static ssize_t
nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
@@ -272,13 +272,13 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
- therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
+ therm->attr_set(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
return count;
}
@@ -293,10 +293,10 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
}
static ssize_t
nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
@@ -306,13 +306,13 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
- therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
+ therm->attr_set(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST,
value / 1000);
return count;
@@ -346,7 +346,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
}
@@ -359,10 +359,10 @@ nouveau_hwmon_get_pwm1_enable(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
int ret;
- ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
+ ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE);
if (ret < 0)
return ret;
@@ -375,7 +375,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
int ret;
@@ -383,7 +383,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
if (ret)
return ret;
- ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
+ ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MODE, value);
if (ret)
return ret;
else
@@ -398,7 +398,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
int ret;
ret = therm->fan_get(therm);
@@ -414,7 +414,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
int ret = -ENODEV;
long value;
@@ -438,10 +438,10 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
int ret;
- ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
+ ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY);
if (ret < 0)
return ret;
@@ -454,14 +454,14 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
int ret;
if (kstrtol(buf, 10, &value) == -EINVAL)
return -EINVAL;
- ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
+ ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY, value);
if (ret < 0)
return ret;
@@ -478,10 +478,10 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
int ret;
- ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
+ ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY);
if (ret < 0)
return ret;
@@ -494,14 +494,14 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
long value;
int ret;
if (kstrtol(buf, 10, &value) == -EINVAL)
return -EINVAL;
- ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
+ ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY, value);
if (ret < 0)
return ret;
@@ -561,7 +561,7 @@ nouveau_hwmon_init(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nvkm_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->device);
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 6544b84f0303..ca0ad9d1563d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -60,22 +60,22 @@ nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
static int
nvkm_client_resume(void *priv)
{
- return nouveau_client_init(priv);
+ return nvkm_client_init(priv);
}
static int
nvkm_client_suspend(void *priv)
{
- return nouveau_client_fini(priv, true);
+ return nvkm_client_fini(priv, true);
}
static void
-nvkm_client_fini(void *priv)
+nvkm_client_driver_fini(void *priv)
{
- struct nouveau_object *client = priv;
- nouveau_client_fini(nv_client(client), false);
+ struct nvkm_object *client = priv;
+ nvkm_client_fini(nv_client(client), false);
atomic_set(&client->refcount, 1);
- nouveau_object_ref(NULL, &client);
+ nvkm_object_ref(NULL, &client);
}
static int
@@ -107,13 +107,13 @@ nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size)
}
static int
-nvkm_client_init(const char *name, u64 device, const char *cfg,
- const char *dbg, void **ppriv)
+nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
+ const char *dbg, void **ppriv)
{
- struct nouveau_client *client;
+ struct nvkm_client *client;
int ret;
- ret = nouveau_client_create(name, device, cfg, dbg, &client);
+ ret = nvkm_client_create(name, device, cfg, dbg, &client);
*ppriv = client;
if (ret)
return ret;
@@ -125,8 +125,8 @@ nvkm_client_init(const char *name, u64 device, const char *cfg,
const struct nvif_driver
nvif_driver_nvkm = {
.name = "nvkm",
- .init = nvkm_client_init,
- .fini = nvkm_client_fini,
+ .init = nvkm_client_driver_init,
+ .fini = nvkm_client_driver_fini,
.suspend = nvkm_client_suspend,
.resume = nvkm_client_resume,
.ioctl = nvkm_client_ioctl,
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index b307bbedd4c4..dc5900bf54ff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -152,7 +152,7 @@ static int nouveau_platform_remove(struct platform_device *pdev)
{
struct drm_device *drm_dev = platform_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nouveau_device *device = nvkm_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->device);
struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
nouveau_drm_device_remove(drm_dev);
@@ -177,9 +177,3 @@ struct platform_driver nouveau_platform_driver = {
.probe = nouveau_platform_probe,
.remove = nouveau_platform_remove,
};
-
-module_platform_driver(nouveau_platform_driver);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 58c28b5653d5..268bb7213681 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -28,6 +28,7 @@
struct reset_control;
struct clk;
struct regulator;
+struct platform_driver;
struct nouveau_platform_gpu {
struct reset_control *rst;
@@ -38,7 +39,7 @@ struct nouveau_platform_gpu {
};
struct nouveau_platform_device {
- struct nouveau_device device;
+ struct nvkm_device device;
struct nouveau_platform_gpu *gpu;
@@ -48,4 +49,6 @@ struct nouveau_platform_device {
#define nv_device_to_platform(d) \
container_of(d, struct nouveau_platform_device, device)
+extern struct platform_driver nouveau_platform_driver;
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 43a96b99e180..7226f1f60901 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -72,7 +72,7 @@
# define NV_RAMHT_CONTEXT_VALID (1<<31)
# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24
# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16
-# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0
+# define NV_RAMHT_CONTEXT_ENGINE_SW 0
# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1
# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0
# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 01707e7deaf5..8c3053a177d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -9,8 +9,7 @@ struct nouveau_sgdma_be {
* nouve_bo.c works properly, otherwise have to move them here
*/
struct ttm_dma_tt ttm;
- struct drm_device *dev;
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
};
static void
@@ -28,7 +27,7 @@ static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nouveau_mem *node = mem->mm_node;
+ struct nvkm_mem *node = mem->mm_node;
if (ttm->sg) {
node->sg = ttm->sg;
@@ -39,7 +38,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
}
node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
- nouveau_vm_map(&node->vma[0], node);
+ nvkm_vm_map(&node->vma[0], node);
nvbe->node = node;
return 0;
}
@@ -48,7 +47,7 @@ static int
nv04_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- nouveau_vm_unmap(&nvbe->node->vma[0]);
+ nvkm_vm_unmap(&nvbe->node->vma[0]);
return 0;
}
@@ -62,7 +61,7 @@ static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nouveau_mem *node = mem->mm_node;
+ struct nvkm_mem *node = mem->mm_node;
/* noop: bound in move_notify() */
if (ttm->sg) {
@@ -101,13 +100,17 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
if (!nvbe)
return NULL;
- nvbe->dev = drm->dev;
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
nvbe->ttm.ttm.func = &nv04_sgdma_backend;
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
+ /*
+ * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
+ * and thus our nouveau_sgdma_destroy() hook, so we don't need
+ * to free nvbe here.
+ */
return NULL;
return &nvbe->ttm.ttm;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 8fbbf3093d86..1ec8f38ae69a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -165,7 +165,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
struct nvif_device *device = &drm->device;
if (sysfs && sysfs->ctrl.priv) {
- device_remove_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate);
+ device_remove_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
nvif_object_fini(&sysfs->ctrl);
}
@@ -192,7 +192,7 @@ nouveau_sysfs_init(struct drm_device *dev)
NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
&sysfs->ctrl);
if (ret == 0)
- device_create_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate);
+ device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 3d1cfcb96b6b..273e50110ec3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -33,7 +33,7 @@ static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
man->priv = pfb;
return 0;
}
@@ -46,16 +46,16 @@ nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
}
static inline void
-nouveau_mem_node_cleanup(struct nouveau_mem *node)
+nvkm_mem_node_cleanup(struct nvkm_mem *node)
{
if (node->vma[0].node) {
- nouveau_vm_unmap(&node->vma[0]);
- nouveau_vm_put(&node->vma[0]);
+ nvkm_vm_unmap(&node->vma[0]);
+ nvkm_vm_put(&node->vma[0]);
}
if (node->vma[1].node) {
- nouveau_vm_unmap(&node->vma[1]);
- nouveau_vm_put(&node->vma[1]);
+ nvkm_vm_unmap(&node->vma[1]);
+ nvkm_vm_put(&node->vma[1]);
}
}
@@ -64,9 +64,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
- nouveau_mem_node_cleanup(mem->mm_node);
- pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ nvkm_mem_node_cleanup(mem->mm_node);
+ pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
}
static int
@@ -76,9 +76,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
u32 size_nc = 0;
int ret;
@@ -103,9 +103,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
static void
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
- struct nouveau_fb *pfb = man->priv;
- struct nouveau_mm *mm = &pfb->vram;
- struct nouveau_mm_node *r;
+ struct nvkm_fb *pfb = man->priv;
+ struct nvkm_mm *mm = &pfb->vram;
+ struct nvkm_mm_node *r;
u32 total = 0, free = 0;
mutex_lock(&nv_subdev(pfb)->mutex);
@@ -150,7 +150,7 @@ static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- nouveau_mem_node_cleanup(mem->mm_node);
+ nvkm_mem_node_cleanup(mem->mm_node);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
@@ -163,7 +163,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
@@ -203,15 +203,15 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
};
/*XXX*/
-#include <core/subdev/vm/nv04.h>
+#include <subdev/mmu/nv04.h>
static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device);
- struct nv04_vmmgr_priv *priv = (void *)vmm;
- struct nouveau_vm *vm = NULL;
- nouveau_vm_ref(priv->vm, &vm, NULL);
+ struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
+ struct nv04_mmu_priv *priv = (void *)mmu;
+ struct nvkm_vm *vm = NULL;
+ nvkm_vm_ref(priv->vm, &vm, NULL);
man->priv = vm;
return 0;
}
@@ -219,8 +219,8 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
static int
nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
{
- struct nouveau_vm *vm = man->priv;
- nouveau_vm_ref(NULL, &vm, NULL);
+ struct nvkm_vm *vm = man->priv;
+ nvkm_vm_ref(NULL, &vm, NULL);
man->priv = NULL;
return 0;
}
@@ -228,9 +228,9 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
static void
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node = mem->mm_node;
+ struct nvkm_mem *node = mem->mm_node;
if (node->vma[0].node)
- nouveau_vm_put(&node->vma[0]);
+ nvkm_vm_put(&node->vma[0]);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
@@ -241,7 +241,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -250,8 +250,8 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
- ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
- NV_MEM_ACCESS_RW, &node->vma[0]);
+ ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
+ NV_MEM_ACCESS_RW, &node->vma[0]);
if (ret) {
kfree(node);
return ret;
@@ -354,8 +354,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
u32 bits;
int ret;
- bits = nvkm_vmmgr(&drm->device)->dma_bits;
- if (nv_device_is_pci(nvkm_device(&drm->device))) {
+ bits = nvxx_mmu(&drm->device)->dma_bits;
+ if (nv_device_is_pci(nvxx_device(&drm->device))) {
if (drm->agp.stat == ENABLED ||
!pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
bits = 32;
@@ -396,12 +396,12 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1),
- nv_device_resource_len(nvkm_device(&drm->device), 1));
+ drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1),
+ nv_device_resource_len(nvxx_device(&drm->device), 1));
/* GART init */
if (drm->agp.stat != ENABLED) {
- drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit;
+ drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
} else {
drm->gem.gart_available = drm->agp.size;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index f9859deb108a..c2e05e64cd6f 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -57,7 +57,7 @@ nv04_fence_sync(struct nouveau_fence *fence,
static u32
nv04_fence_read(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = nvkm_fifo_chan(chan);;
+ struct nvkm_fifo_chan *fifo = nvxx_fifo_chan(chan);;
return atomic_read(&fifo->refcnt);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 490b90866baf..7da7958556a3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -125,7 +125,6 @@ nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
struct nv50_curs {
struct nv50_pioc base;
- struct nouveau_bo *image;
};
static int
@@ -201,7 +200,7 @@ nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
nv50_chan_destroy(&dmac->base);
if (dmac->ptr) {
- struct pci_dev *pdev = nvkm_device(nvif_device(disp))->pdev;
+ struct pci_dev *pdev = nvxx_device(nvif_device(disp))->pdev;
pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
}
@@ -218,7 +217,7 @@ nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
mutex_init(&dmac->lock);
- dmac->ptr = pci_alloc_consistent(nvkm_device(device)->pdev,
+ dmac->ptr = pci_alloc_consistent(nvxx_device(device)->pdev,
PAGE_SIZE, &dmac->handle);
if (!dmac->ptr)
return -ENOMEM;
@@ -421,9 +420,9 @@ evo_wait(void *evoc, int nr)
dmac->ptr[put] = 0x20000000;
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
- if (!nvkm_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ if (!nvxx_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
mutex_unlock(&dmac->lock);
- nv_error(nvkm_object(&dmac->base.user), "channel stalled\n");
+ nv_error(nvxx_object(&dmac->base.user), "channel stalled\n");
return NULL;
}
@@ -481,7 +480,7 @@ evo_sync(struct drm_device *dev)
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_kick(push, mast);
- if (nv_wait_cb(nvkm_device(device), evo_sync_wait, disp->sync))
+ if (nv_wait_cb(nvxx_device(device), evo_sync_wait, disp->sync))
return 0;
}
@@ -536,7 +535,7 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
evo_kick(push, flip.chan);
}
- nv_wait_cb(nvkm_device(device), nv50_display_flip_wait, &flip);
+ nv_wait_cb(nvxx_device(device), nv50_display_flip_wait, &flip);
}
int
@@ -550,6 +549,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
u32 *push;
int ret;
+ if (crtc->primary->fb->width != fb->width ||
+ crtc->primary->fb->height != fb->height)
+ return -EINVAL;
+
swap_interval <<= 4;
if (swap_interval == 0)
swap_interval |= 0x100;
@@ -729,8 +732,11 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
* effectively handles NONE/FULL scaling
*/
nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode)
+ if (nv_connector && nv_connector->native_mode) {
mode = nv_connector->scaling_mode;
+ if (nv_connector->scaling_full) /* non-EDID LVDS/eDP mode */
+ mode = DRM_MODE_SCALE_FULLSCREEN;
+ }
if (mode != DRM_MODE_SCALE_NONE)
omode = nv_connector->native_mode;
@@ -917,29 +923,29 @@ static void
nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
u32 *push = evo_wait(mast, 16);
if (push) {
if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
- evo_data(push, curs->image->bo.offset >> 8);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
} else
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
- evo_data(push, curs->image->bo.offset >> 8);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
evo_data(push, mast->base.vram.handle);
} else {
evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
evo_data(push, 0x85000000);
- evo_data(push, curs->image->bo.offset >> 8);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
evo_data(push, mast->base.vram.handle);
}
evo_kick(push, mast);
}
+ nv_crtc->cursor.visible = true;
}
static void
@@ -965,15 +971,15 @@ nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
}
evo_kick(push, mast);
}
+ nv_crtc->cursor.visible = false;
}
static void
nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
- if (show && curs->image)
+ if (show && nv_crtc->cursor.nvbo)
nv50_crtc_cursor_show(nv_crtc);
else
nv50_crtc_cursor_hide(nv_crtc);
@@ -1273,7 +1279,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_curs *curs = nv50_curs(crtc);
struct drm_device *dev = crtc->dev;
struct drm_gem_object *gem = NULL;
struct nouveau_bo *nvbo = NULL;
@@ -1292,9 +1297,9 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
}
if (ret == 0) {
- if (curs->image)
- nouveau_bo_unpin(curs->image);
- nouveau_bo_ref(nvbo, &curs->image);
+ if (nv_crtc->cursor.nvbo)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ nouveau_bo_ref(nvbo, &nv_crtc->cursor.nvbo);
}
drm_gem_object_unreference_unlocked(gem);
@@ -1305,10 +1310,14 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
static int
nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_curs *curs = nv50_curs(crtc);
struct nv50_chan *chan = nv50_chan(curs);
nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
nvif_wr32(&chan->user, 0x0080, 0x00000000);
+
+ nv_crtc->cursor_saved_x = x;
+ nv_crtc->cursor_saved_y = y;
return 0;
}
@@ -1330,6 +1339,14 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
}
static void
+nv50_crtc_cursor_restore(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+ nv50_crtc_cursor_move(&nv_crtc->base, x, y);
+
+ nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+}
+
+static void
nv50_crtc_destroy(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1354,9 +1371,9 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
nouveau_bo_ref(NULL, &head->image);
/*XXX: ditto */
- if (head->curs.image)
- nouveau_bo_unpin(head->curs.image);
- nouveau_bo_ref(NULL, &head->curs.image);
+ if (nv_crtc->cursor.nvbo)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
@@ -1406,6 +1423,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
head->base.color_vibrance = 50;
head->base.vibrant_hue = 0;
+ head->base.cursor.set_pos = nv50_crtc_cursor_restore;
for (i = 0; i < 256; i++) {
head->base.lut.r[i] = i << 8;
head->base.lut.g[i] = i << 8;
@@ -1433,8 +1451,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (ret)
goto out;
- nv50_crtc_lut_load(crtc);
-
/* allocate cursor resources */
ret = nv50_curs_create(disp->disp, index, &head->curs);
if (ret)
@@ -1466,6 +1482,41 @@ out:
}
/******************************************************************************
+ * Encoder helpers
+ *****************************************************************************/
+static bool
+nv50_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ nv_connector->scaling_full = false;
+ if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) {
+ switch (nv_connector->type) {
+ case DCB_CONNECTOR_LVDS:
+ case DCB_CONNECTOR_LVDS_SPWG:
+ case DCB_CONNECTOR_eDP:
+ /* force use of scaler for non-edid modes */
+ if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
+ return true;
+ nv_connector->scaling_full = true;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ drm_mode_copy(adjusted_mode, nv_connector->native_mode);
+ }
+
+ return true;
+}
+
+/******************************************************************************
* DAC
*****************************************************************************/
static void
@@ -1492,26 +1543,6 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
static void
nv50_dac_commit(struct drm_encoder *encoder)
{
@@ -1629,7 +1660,7 @@ nv50_dac_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
.dpms = nv50_dac_dpms,
- .mode_fixup = nv50_dac_mode_fixup,
+ .mode_fixup = nv50_encoder_mode_fixup,
.prepare = nv50_dac_disconnect,
.commit = nv50_dac_commit,
.mode_set = nv50_dac_mode_set,
@@ -1646,7 +1677,7 @@ static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type = DRM_MODE_ENCODER_DAC;
@@ -1834,26 +1865,6 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
}
-static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
static void
nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
{
@@ -2035,7 +2046,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
.dpms = nv50_sor_dpms,
- .mode_fixup = nv50_sor_mode_fixup,
+ .mode_fixup = nv50_encoder_mode_fixup,
.prepare = nv50_sor_disconnect,
.commit = nv50_sor_commit,
.mode_set = nv50_sor_mode_set,
@@ -2051,7 +2062,7 @@ static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type;
@@ -2112,18 +2123,8 @@ nv50_pior_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
+ if (!nv50_encoder_mode_fixup(encoder, mode, adjusted_mode))
+ return false;
adjusted_mode->clock *= 2;
return true;
}
@@ -2232,8 +2233,8 @@ static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
- struct nouveau_i2c_port *ddc = NULL;
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c_port *ddc = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type;
@@ -2427,6 +2428,8 @@ nv50_display_init(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nv50_sync *sync = nv50_sync(crtc);
+
+ nv50_crtc_lut_load(crtc);
nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index cb5b88938d45..bf429cabbaa8 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -213,7 +213,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
int
nv84_fence_create(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
+ struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device);
struct nv84_fence_priv *priv;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
new file mode 100644
index 000000000000..ff8ed3a04d06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -0,0 +1,4 @@
+nvif-y := nvif/object.o
+nvif-y += nvif/client.o
+nvif-y += nvif/device.o
+nvif-y += nvif/notify.o
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 3f7ac5bc8e03..80b96844221e 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -22,9 +22,9 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "client.h"
-#include "driver.h"
-#include "ioctl.h"
+#include <nvif/client.h>
+#include <nvif/driver.h>
+#include <nvif/ioctl.h>
int
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index f477579725e3..6f72244c52cd 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "device.h"
+#include <nvif/device.h>
void
nvif_device_fini(struct nvif_device *device)
diff --git a/drivers/gpu/drm/nouveau/nvif/device.h b/drivers/gpu/drm/nouveau/nvif/device.h
deleted file mode 100644
index 43180f9fe630..000000000000
--- a/drivers/gpu/drm/nouveau/nvif/device.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __NVIF_DEVICE_H__
-#define __NVIF_DEVICE_H__
-
-#include "object.h"
-#include "class.h"
-
-struct nvif_device {
- struct nvif_object base;
- struct nvif_object *object; /*XXX: hack for nvif_object() */
- struct nv_device_info_v0 info;
-};
-
-static inline struct nvif_device *
-nvif_device(struct nvif_object *object)
-{
- while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
- object = object->parent;
- return (void *)object;
-}
-
-int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
- u32 handle, u32 oclass, void *, u32,
- struct nvif_device *);
-void nvif_device_fini(struct nvif_device *);
-int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
- void *, u32, struct nvif_device **);
-void nvif_device_ref(struct nvif_device *, struct nvif_device **);
-
-/*XXX*/
-#include <subdev/bios.h>
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-#include <subdev/gpio.h>
-#include <subdev/clock.h>
-#include <subdev/i2c.h>
-#include <subdev/timer.h>
-#include <subdev/therm.h>
-
-#define nvkm_device(a) nv_device(nvkm_object((a)))
-#define nvkm_bios(a) nouveau_bios(nvkm_device(a))
-#define nvkm_fb(a) nouveau_fb(nvkm_device(a))
-#define nvkm_vmmgr(a) nouveau_vmmgr(nvkm_device(a))
-#define nvkm_bar(a) nouveau_bar(nvkm_device(a))
-#define nvkm_gpio(a) nouveau_gpio(nvkm_device(a))
-#define nvkm_clock(a) nouveau_clock(nvkm_device(a))
-#define nvkm_i2c(a) nouveau_i2c(nvkm_device(a))
-#define nvkm_timer(a) nouveau_timer(nvkm_device(a))
-#define nvkm_wait(a,b,c,d) nv_wait(nvkm_timer(a), (b), (c), (d))
-#define nvkm_wait_cb(a,b,c) nv_wait_cb(nvkm_timer(a), (b), (c))
-#define nvkm_therm(a) nouveau_therm(nvkm_device(a))
-
-#include <engine/device.h>
-#include <engine/fifo.h>
-#include <engine/graph.h>
-#include <engine/software.h>
-
-#define nvkm_fifo(a) nouveau_fifo(nvkm_device(a))
-#define nvkm_fifo_chan(a) ((struct nouveau_fifo_chan *)nvkm_object(a))
-#define nvkm_gr(a) ((struct nouveau_graph *)nouveau_engine(nvkm_object(a), NVDEV_ENGINE_GR))
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index 0898c3155292..8e34748709a0 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -92,7 +92,7 @@ nvif_notify_func(struct nvif_notify *notify, bool keep)
{
int ret = notify->func(notify);
if (ret == NVIF_NOTIFY_KEEP ||
- !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ !test_and_clear_bit(NVIF_NOTIFY_USER, &notify->flags)) {
if (!keep)
atomic_dec(&notify->putcnt);
else
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index dd85b56f6aa5..3ab4e2f8cc12 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -22,10 +22,10 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "object.h"
-#include "client.h"
-#include "driver.h"
-#include "ioctl.h"
+#include <nvif/object.h>
+#include <nvif/client.h>
+#include <nvif/driver.h>
+#include <nvif/ioctl.h>
int
nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
diff --git a/drivers/gpu/drm/nouveau/nvif/os.h b/drivers/gpu/drm/nouveau/nvif/os.h
deleted file mode 120000
index bd744b2cf5cf..000000000000
--- a/drivers/gpu/drm/nouveau/nvif/os.h
+++ /dev/null
@@ -1 +0,0 @@
-../core/os.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild
new file mode 100644
index 000000000000..2832147b676c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild
@@ -0,0 +1,3 @@
+include $(src)/nvkm/core/Kbuild
+include $(src)/nvkm/subdev/Kbuild
+include $(src)/nvkm/engine/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
new file mode 100644
index 000000000000..a2bdb2069113
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -0,0 +1,17 @@
+nvkm-y := nvkm/core/client.o
+nvkm-y += nvkm/core/engctx.o
+nvkm-y += nvkm/core/engine.o
+nvkm-y += nvkm/core/enum.o
+nvkm-y += nvkm/core/event.o
+nvkm-y += nvkm/core/gpuobj.o
+nvkm-y += nvkm/core/handle.o
+nvkm-y += nvkm/core/ioctl.o
+nvkm-y += nvkm/core/mm.o
+nvkm-y += nvkm/core/namedb.o
+nvkm-y += nvkm/core/notify.o
+nvkm-y += nvkm/core/object.o
+nvkm-y += nvkm/core/option.o
+nvkm-y += nvkm/core/parent.o
+nvkm-y += nvkm/core/printk.o
+nvkm-y += nvkm/core/ramht.o
+nvkm-y += nvkm/core/subdev.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index e962433294c3..878a82f8f295 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -21,21 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/object.h>
#include <core/client.h>
+#include <core/device.h>
#include <core/handle.h>
+#include <core/notify.h>
#include <core/option.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <nvif/event.h>
-
-#include <engine/device.h>
+#include <nvif/unpack.h>
struct nvkm_client_notify {
- struct nouveau_client *client;
+ struct nvkm_client *client;
struct nvkm_notify n;
u8 version;
u8 size;
@@ -48,12 +45,12 @@ static int
nvkm_client_notify(struct nvkm_notify *n)
{
struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n);
- struct nouveau_client *client = notify->client;
+ struct nvkm_client *client = notify->client;
return client->ntfy(&notify->rep, notify->size, n->data, n->size);
}
int
-nvkm_client_notify_put(struct nouveau_client *client, int index)
+nvkm_client_notify_put(struct nvkm_client *client, int index)
{
if (index < ARRAY_SIZE(client->notify)) {
if (client->notify[index]) {
@@ -65,7 +62,7 @@ nvkm_client_notify_put(struct nouveau_client *client, int index)
}
int
-nvkm_client_notify_get(struct nouveau_client *client, int index)
+nvkm_client_notify_get(struct nvkm_client *client, int index)
{
if (index < ARRAY_SIZE(client->notify)) {
if (client->notify[index]) {
@@ -77,7 +74,7 @@ nvkm_client_notify_get(struct nouveau_client *client, int index)
}
int
-nvkm_client_notify_del(struct nouveau_client *client, int index)
+nvkm_client_notify_del(struct nvkm_client *client, int index)
{
if (index < ARRAY_SIZE(client->notify)) {
if (client->notify[index]) {
@@ -91,10 +88,10 @@ nvkm_client_notify_del(struct nouveau_client *client, int index)
}
int
-nvkm_client_notify_new(struct nouveau_object *object,
+nvkm_client_notify_new(struct nvkm_object *object,
struct nvkm_event *event, void *data, u32 size)
{
- struct nouveau_client *client = nouveau_client(object);
+ struct nvkm_client *client = nvkm_client(object);
struct nvkm_client_notify *notify;
union {
struct nvif_notify_req_v0 v0;
@@ -142,7 +139,7 @@ nvkm_client_notify_new(struct nouveau_object *object,
}
static int
-nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size)
+nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
{
union {
struct nv_client_devlist_v0 v0;
@@ -154,8 +151,7 @@ nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size)
nv_ioctl(object, "client devlist vers %d count %d\n",
args->v0.version, args->v0.count);
if (size == sizeof(args->v0.device[0]) * args->v0.count) {
- ret = nouveau_device_list(args->v0.device,
- args->v0.count);
+ ret = nvkm_device_list(args->v0.device, args->v0.count);
if (ret >= 0) {
args->v0.count = ret;
ret = 0;
@@ -169,12 +165,11 @@ nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size)
}
static int
-nouveau_client_mthd(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
switch (mthd) {
case NV_CLIENT_DEVLIST:
- return nouveau_client_devlist(object, data, size);
+ return nvkm_client_mthd_devlist(object, data, size);
default:
break;
}
@@ -182,71 +177,71 @@ nouveau_client_mthd(struct nouveau_object *object, u32 mthd,
}
static void
-nouveau_client_dtor(struct nouveau_object *object)
+nvkm_client_dtor(struct nvkm_object *object)
{
- struct nouveau_client *client = (void *)object;
+ struct nvkm_client *client = (void *)object;
int i;
for (i = 0; i < ARRAY_SIZE(client->notify); i++)
nvkm_client_notify_del(client, i);
- nouveau_object_ref(NULL, &client->device);
- nouveau_handle_destroy(client->root);
- nouveau_namedb_destroy(&client->base);
+ nvkm_object_ref(NULL, &client->device);
+ nvkm_handle_destroy(client->root);
+ nvkm_namedb_destroy(&client->namedb);
}
-static struct nouveau_oclass
-nouveau_client_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
- .dtor = nouveau_client_dtor,
- .mthd = nouveau_client_mthd,
+static struct nvkm_oclass
+nvkm_client_oclass = {
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .dtor = nvkm_client_dtor,
+ .mthd = nvkm_client_mthd,
},
};
int
-nouveau_client_create_(const char *name, u64 devname, const char *cfg,
- const char *dbg, int length, void **pobject)
+nvkm_client_create_(const char *name, u64 devname, const char *cfg,
+ const char *dbg, int length, void **pobject)
{
- struct nouveau_object *device;
- struct nouveau_client *client;
+ struct nvkm_object *device;
+ struct nvkm_client *client;
int ret;
- device = (void *)nouveau_device_find(devname);
+ device = (void *)nvkm_device_find(devname);
if (!device)
return -ENODEV;
- ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
- NV_CLIENT_CLASS, NULL,
- (1ULL << NVDEV_ENGINE_DEVICE),
- length, pobject);
+ ret = nvkm_namedb_create_(NULL, NULL, &nvkm_client_oclass,
+ NV_CLIENT_CLASS, NULL,
+ (1ULL << NVDEV_ENGINE_DEVICE),
+ length, pobject);
client = *pobject;
if (ret)
return ret;
- ret = nouveau_handle_create(nv_object(client), ~0, ~0,
- nv_object(client), &client->root);
+ ret = nvkm_handle_create(nv_object(client), ~0, ~0, nv_object(client),
+ &client->root);
if (ret)
return ret;
/* prevent init/fini being called, os in in charge of this */
atomic_set(&nv_object(client)->usecount, 2);
- nouveau_object_ref(device, &client->device);
+ nvkm_object_ref(device, &client->device);
snprintf(client->name, sizeof(client->name), "%s", name);
- client->debug = nouveau_dbgopt(dbg, "CLIENT");
+ client->debug = nvkm_dbgopt(dbg, "CLIENT");
return 0;
}
int
-nouveau_client_init(struct nouveau_client *client)
+nvkm_client_init(struct nvkm_client *client)
{
int ret;
nv_debug(client, "init running\n");
- ret = nouveau_handle_init(client->root);
+ ret = nvkm_handle_init(client->root);
nv_debug(client, "init completed with %d\n", ret);
return ret;
}
int
-nouveau_client_fini(struct nouveau_client *client, bool suspend)
+nvkm_client_fini(struct nvkm_client *client, bool suspend)
{
const char *name[2] = { "fini", "suspend" };
int ret, i;
@@ -255,16 +250,16 @@ nouveau_client_fini(struct nouveau_client *client, bool suspend)
for (i = 0; i < ARRAY_SIZE(client->notify); i++)
nvkm_client_notify_put(client, i);
nv_debug(client, "%s object\n", name[suspend]);
- ret = nouveau_handle_fini(client->root, suspend);
+ ret = nvkm_handle_fini(client->root, suspend);
nv_debug(client, "%s completed with %d\n", name[suspend], ret);
return ret;
}
const char *
-nouveau_client_name(void *obj)
+nvkm_client_name(void *obj)
{
const char *client_name = "unknown";
- struct nouveau_client *client = nouveau_client(obj);
+ struct nvkm_client *client = nvkm_client(obj);
if (client)
client_name = client->name;
return client_name;
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
index 84c71fad2b6c..fb2acbca75d9 100644
--- a/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
@@ -21,21 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/object.h>
-#include <core/namedb.h>
-#include <core/handle.h>
-#include <core/client.h>
#include <core/engctx.h>
-
-#include <subdev/vm.h>
+#include <core/engine.h>
+#include <core/client.h>
static inline int
-nouveau_engctx_exists(struct nouveau_object *parent,
- struct nouveau_engine *engine, void **pobject)
+nvkm_engctx_exists(struct nvkm_object *parent,
+ struct nvkm_engine *engine, void **pobject)
{
- struct nouveau_engctx *engctx;
- struct nouveau_object *parctx;
+ struct nvkm_engctx *engctx;
+ struct nvkm_object *parctx;
list_for_each_entry(engctx, &engine->contexts, head) {
parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
@@ -50,16 +45,13 @@ nouveau_engctx_exists(struct nouveau_object *parent,
}
int
-nouveau_engctx_create_(struct nouveau_object *parent,
- struct nouveau_object *engobj,
- struct nouveau_oclass *oclass,
- struct nouveau_object *pargpu,
- u32 size, u32 align, u32 flags,
- int length, void **pobject)
+nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
+ struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
+ u32 size, u32 align, u32 flags, int length, void **pobject)
{
- struct nouveau_client *client = nouveau_client(parent);
- struct nouveau_engine *engine = nv_engine(engobj);
- struct nouveau_object *engctx;
+ struct nvkm_client *client = nvkm_client(parent);
+ struct nvkm_engine *engine = nv_engine(engobj);
+ struct nvkm_object *engctx;
unsigned long save;
int ret;
@@ -67,7 +59,7 @@ nouveau_engctx_create_(struct nouveau_object *parent,
* and reference it instead of creating a new one
*/
spin_lock_irqsave(&engine->lock, save);
- ret = nouveau_engctx_exists(parent, engine, pobject);
+ ret = nvkm_engctx_exists(parent, engine, pobject);
spin_unlock_irqrestore(&engine->lock, save);
if (ret)
return ret;
@@ -76,13 +68,12 @@ nouveau_engctx_create_(struct nouveau_object *parent,
* objects backed by instance memory
*/
if (size) {
- ret = nouveau_gpuobj_create_(parent, engobj, oclass,
- NV_ENGCTX_CLASS,
- pargpu, size, align, flags,
- length, pobject);
+ ret = nvkm_gpuobj_create_(parent, engobj, oclass,
+ NV_ENGCTX_CLASS, pargpu, size,
+ align, flags, length, pobject);
} else {
- ret = nouveau_object_create_(parent, engobj, oclass,
- NV_ENGCTX_CLASS, length, pobject);
+ ret = nvkm_object_create_(parent, engobj, oclass,
+ NV_ENGCTX_CLASS, length, pobject);
}
engctx = *pobject;
@@ -94,15 +85,15 @@ nouveau_engctx_create_(struct nouveau_object *parent,
* it's not possible to allocate the object with it held.
*/
spin_lock_irqsave(&engine->lock, save);
- ret = nouveau_engctx_exists(parent, engine, pobject);
+ ret = nvkm_engctx_exists(parent, engine, pobject);
if (ret) {
spin_unlock_irqrestore(&engine->lock, save);
- nouveau_object_ref(NULL, &engctx);
+ nvkm_object_ref(NULL, &engctx);
return ret;
}
if (client->vm)
- atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
+ atomic_inc(&client->vm->engref[nv_engidx(engine)]);
list_add(&nv_engctx(engctx)->head, &engine->contexts);
nv_engctx(engctx)->addr = ~0ULL;
spin_unlock_irqrestore(&engine->lock, save);
@@ -110,37 +101,36 @@ nouveau_engctx_create_(struct nouveau_object *parent,
}
void
-nouveau_engctx_destroy(struct nouveau_engctx *engctx)
+nvkm_engctx_destroy(struct nvkm_engctx *engctx)
{
- struct nouveau_object *engobj = nv_object(engctx)->engine;
- struct nouveau_engine *engine = nv_engine(engobj);
- struct nouveau_client *client = nouveau_client(engctx);
+ struct nvkm_engine *engine = engctx->gpuobj.object.engine;
+ struct nvkm_client *client = nvkm_client(engctx);
unsigned long save;
- nouveau_gpuobj_unmap(&engctx->vma);
+ nvkm_gpuobj_unmap(&engctx->vma);
spin_lock_irqsave(&engine->lock, save);
list_del(&engctx->head);
spin_unlock_irqrestore(&engine->lock, save);
if (client->vm)
- atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
+ atomic_dec(&client->vm->engref[nv_engidx(engine)]);
- if (engctx->base.size)
- nouveau_gpuobj_destroy(&engctx->base);
+ if (engctx->gpuobj.size)
+ nvkm_gpuobj_destroy(&engctx->gpuobj);
else
- nouveau_object_destroy(&engctx->base.base);
+ nvkm_object_destroy(&engctx->gpuobj.object);
}
int
-nouveau_engctx_init(struct nouveau_engctx *engctx)
+nvkm_engctx_init(struct nvkm_engctx *engctx)
{
- struct nouveau_object *object = nv_object(engctx);
- struct nouveau_subdev *subdev = nv_subdev(object->engine);
- struct nouveau_object *parent;
- struct nouveau_subdev *pardev;
+ struct nvkm_object *object = nv_object(engctx);
+ struct nvkm_subdev *subdev = nv_subdev(object->engine);
+ struct nvkm_object *parent;
+ struct nvkm_subdev *pardev;
int ret;
- ret = nouveau_gpuobj_init(&engctx->base);
+ ret = nvkm_gpuobj_init(&engctx->gpuobj);
if (ret)
return ret;
@@ -163,12 +153,12 @@ nouveau_engctx_init(struct nouveau_engctx *engctx)
}
int
-nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
+nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
{
- struct nouveau_object *object = nv_object(engctx);
- struct nouveau_subdev *subdev = nv_subdev(object->engine);
- struct nouveau_object *parent;
- struct nouveau_subdev *pardev;
+ struct nvkm_object *object = nv_object(engctx);
+ struct nvkm_subdev *subdev = nv_subdev(object->engine);
+ struct nvkm_object *parent;
+ struct nvkm_subdev *pardev;
int ret = 0;
parent = nv_pclass(object->parent, NV_PARENT_CLASS);
@@ -186,47 +176,45 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
}
nv_debug(parent, "detached %s context\n", subdev->name);
- return nouveau_gpuobj_fini(&engctx->base, suspend);
+ return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
}
int
-_nouveau_engctx_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+_nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_engctx *engctx;
+ struct nvkm_engctx *engctx;
int ret;
- ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
- NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+ ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
+ NVOBJ_FLAG_ZERO_ALLOC, &engctx);
*pobject = nv_object(engctx);
return ret;
}
void
-_nouveau_engctx_dtor(struct nouveau_object *object)
+_nvkm_engctx_dtor(struct nvkm_object *object)
{
- nouveau_engctx_destroy(nv_engctx(object));
+ nvkm_engctx_destroy(nv_engctx(object));
}
int
-_nouveau_engctx_init(struct nouveau_object *object)
+_nvkm_engctx_init(struct nvkm_object *object)
{
- return nouveau_engctx_init(nv_engctx(object));
+ return nvkm_engctx_init(nv_engctx(object));
}
-
int
-_nouveau_engctx_fini(struct nouveau_object *object, bool suspend)
+_nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
{
- return nouveau_engctx_fini(nv_engctx(object), suspend);
+ return nvkm_engctx_fini(nv_engctx(object), suspend);
}
-struct nouveau_object *
-nouveau_engctx_get(struct nouveau_engine *engine, u64 addr)
+struct nvkm_object *
+nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
{
- struct nouveau_engctx *engctx;
+ struct nvkm_engctx *engctx;
unsigned long flags;
spin_lock_irqsave(&engine->lock, flags);
@@ -241,11 +229,11 @@ nouveau_engctx_get(struct nouveau_engine *engine, u64 addr)
}
void
-nouveau_engctx_put(struct nouveau_object *object)
+nvkm_engctx_put(struct nvkm_object *object)
{
if (object) {
- struct nouveau_engine *engine = nv_engine(object->engine);
- struct nouveau_engctx *engctx = nv_engctx(object);
+ struct nvkm_engine *engine = nv_engine(object->engine);
+ struct nvkm_engctx *engctx = nv_engctx(object);
spin_unlock_irqrestore(&engine->lock, engctx->save);
}
}
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 1f6954ae9dd3..60820173c6aa 100644
--- a/drivers/gpu/drm/nouveau/core/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -21,33 +21,40 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/device.h>
#include <core/engine.h>
+#include <core/device.h>
#include <core/option.h>
+struct nvkm_engine *
+nvkm_engine(void *obj, int idx)
+{
+ obj = nvkm_subdev(obj, idx);
+ if (obj && nv_iclass(obj, NV_ENGINE_CLASS))
+ return nv_engine(obj);
+ return NULL;
+}
+
int
-nouveau_engine_create_(struct nouveau_object *parent,
- struct nouveau_object *engobj,
- struct nouveau_oclass *oclass, bool enable,
- const char *iname, const char *fname,
- int length, void **pobject)
+nvkm_engine_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
+ struct nvkm_oclass *oclass, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
{
- struct nouveau_engine *engine;
+ struct nvkm_engine *engine;
int ret;
- ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
- iname, fname, length, pobject);
+ ret = nvkm_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
+ iname, fname, length, pobject);
engine = *pobject;
if (ret)
return ret;
if (parent) {
- struct nouveau_device *device = nv_device(parent);
- int engidx = nv_engidx(nv_object(engine));
+ struct nvkm_device *device = nv_device(parent);
+ int engidx = nv_engidx(engine);
if (device->disable_mask & (1ULL << engidx)) {
- if (!nouveau_boolopt(device->cfgopt, iname, false)) {
+ if (!nvkm_boolopt(device->cfgopt, iname, false)) {
nv_debug(engine, "engine disabled by hw/fw\n");
return -ENODEV;
}
@@ -55,7 +62,7 @@ nouveau_engine_create_(struct nouveau_object *parent,
nv_warn(engine, "ignoring hw/fw engine disable\n");
}
- if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+ if (!nvkm_boolopt(device->cfgopt, iname, enable)) {
if (!enable)
nv_warn(engine, "disabled, %s=1 to enable\n", iname);
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/core/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index dd434790ccc4..4f92bfc13d6b 100644
--- a/drivers/gpu/drm/nouveau/core/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -24,12 +24,10 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
-#include <core/os.h>
#include <core/enum.h>
-const struct nouveau_enum *
-nouveau_enum_find(const struct nouveau_enum *en, u32 value)
+const struct nvkm_enum *
+nvkm_enum_find(const struct nvkm_enum *en, u32 value)
{
while (en->name) {
if (en->value == value)
@@ -40,10 +38,10 @@ nouveau_enum_find(const struct nouveau_enum *en, u32 value)
return NULL;
}
-const struct nouveau_enum *
-nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+const struct nvkm_enum *
+nvkm_enum_print(const struct nvkm_enum *en, u32 value)
{
- en = nouveau_enum_find(en, value);
+ en = nvkm_enum_find(en, value);
if (en)
pr_cont("%s", en->name);
else
@@ -52,7 +50,7 @@ nouveau_enum_print(const struct nouveau_enum *en, u32 value)
}
void
-nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+nvkm_bitfield_print(const struct nvkm_bitfield *bf, u32 value)
{
while (bf->name) {
if (value & bf->mask) {
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c
index 760947e380c9..4e8d3fa042df 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c
@@ -19,9 +19,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#include <core/object.h>
#include <core/event.h>
+#include <core/notify.h>
void
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
new file mode 100644
index 000000000000..2eba801aae6f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include <core/gpuobj.h>
+#include <core/engine.h>
+
+#include <subdev/instmem.h>
+#include <subdev/bar.h>
+#include <subdev/mmu.h>
+
+void
+nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
+{
+ int i;
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0x00000000);
+ }
+
+ if (gpuobj->node)
+ nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
+
+ if (gpuobj->heap.block_size)
+ nvkm_mm_fini(&gpuobj->heap);
+
+ nvkm_object_destroy(&gpuobj->object);
+}
+
+int
+nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 pclass,
+ struct nvkm_object *pargpu, u32 size, u32 align, u32 flags,
+ int length, void **pobject)
+{
+ struct nvkm_instmem *imem = nvkm_instmem(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
+ struct nvkm_gpuobj *gpuobj;
+ struct nvkm_mm *heap = NULL;
+ int ret, i;
+ u64 addr;
+
+ *pobject = NULL;
+
+ if (pargpu) {
+ while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
+ if (nv_gpuobj(pargpu)->heap.block_size)
+ break;
+ pargpu = pargpu->parent;
+ }
+
+ if (unlikely(pargpu == NULL)) {
+ nv_error(parent, "no gpuobj heap\n");
+ return -EINVAL;
+ }
+
+ addr = nv_gpuobj(pargpu)->addr;
+ heap = &nv_gpuobj(pargpu)->heap;
+ atomic_inc(&parent->refcount);
+ } else {
+ ret = imem->alloc(imem, parent, size, align, &parent);
+ pargpu = parent;
+ if (ret)
+ return ret;
+
+ addr = nv_memobj(pargpu)->addr;
+ size = nv_memobj(pargpu)->size;
+
+ if (bar && bar->alloc) {
+ struct nvkm_instobj *iobj = (void *)parent;
+ struct nvkm_mem **mem = (void *)(iobj + 1);
+ struct nvkm_mem *node = *mem;
+ if (!bar->alloc(bar, parent, node, &pargpu)) {
+ nvkm_object_ref(NULL, &parent);
+ parent = pargpu;
+ }
+ }
+ }
+
+ ret = nvkm_object_create_(parent, engine, oclass, pclass |
+ NV_GPUOBJ_CLASS, length, pobject);
+ nvkm_object_ref(NULL, &parent);
+ gpuobj = *pobject;
+ if (ret)
+ return ret;
+
+ gpuobj->parent = pargpu;
+ gpuobj->flags = flags;
+ gpuobj->addr = addr;
+ gpuobj->size = size;
+
+ if (heap) {
+ ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
+ &gpuobj->node);
+ if (ret)
+ return ret;
+
+ gpuobj->addr += gpuobj->node->offset;
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
+ ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+ if (ret)
+ return ret;
+ }
+
+ if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0x00000000);
+ }
+
+ return ret;
+}
+
+struct nvkm_gpuobj_class {
+ struct nvkm_object *pargpu;
+ u64 size;
+ u32 align;
+ u32 flags;
+};
+
+static int
+_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_gpuobj_class *args = data;
+ struct nvkm_gpuobj *object;
+ int ret;
+
+ ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
+ args->size, args->align, args->flags,
+ &object);
+ *pobject = nv_object(object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+_nvkm_gpuobj_dtor(struct nvkm_object *object)
+{
+ nvkm_gpuobj_destroy(nv_gpuobj(object));
+}
+
+int
+_nvkm_gpuobj_init(struct nvkm_object *object)
+{
+ return nvkm_gpuobj_init(nv_gpuobj(object));
+}
+
+int
+_nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend)
+{
+ return nvkm_gpuobj_fini(nv_gpuobj(object), suspend);
+}
+
+u32
+_nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr)
+{
+ struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
+ struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+ if (gpuobj->node)
+ addr += gpuobj->node->offset;
+ return pfuncs->rd32(gpuobj->parent, addr);
+}
+
+void
+_nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+ struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
+ struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+ if (gpuobj->node)
+ addr += gpuobj->node->offset;
+ pfuncs->wr32(gpuobj->parent, addr, data);
+}
+
+static struct nvkm_oclass
+_nvkm_gpuobj_oclass = {
+ .handle = 0x00000000,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_gpuobj_ctor,
+ .dtor = _nvkm_gpuobj_dtor,
+ .init = _nvkm_gpuobj_init,
+ .fini = _nvkm_gpuobj_fini,
+ .rd32 = _nvkm_gpuobj_rd32,
+ .wr32 = _nvkm_gpuobj_wr32,
+ },
+};
+
+int
+nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
+ u32 size, u32 align, u32 flags,
+ struct nvkm_gpuobj **pgpuobj)
+{
+ struct nvkm_object *engine = parent;
+ struct nvkm_gpuobj_class args = {
+ .pargpu = pargpu,
+ .size = size,
+ .align = align,
+ .flags = flags,
+ };
+
+ if (!nv_iclass(engine, NV_SUBDEV_CLASS))
+ engine = &engine->engine->subdev.object;
+ BUG_ON(engine == NULL);
+
+ return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass,
+ &args, sizeof(args),
+ (struct nvkm_object **)pgpuobj);
+}
+
+int
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma)
+{
+ struct nvkm_bar *bar = nvkm_bar(gpuobj);
+ int ret = -EINVAL;
+
+ if (bar && bar->umap) {
+ struct nvkm_instobj *iobj = (void *)
+ nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+ struct nvkm_mem **mem = (void *)(iobj + 1);
+ ret = bar->umap(bar, *mem, access, vma);
+ }
+
+ return ret;
+}
+
+int
+nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
+ u32 access, struct nvkm_vma *vma)
+{
+ struct nvkm_instobj *iobj = (void *)
+ nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+ struct nvkm_mem **mem = (void *)(iobj + 1);
+ int ret;
+
+ ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
+ if (ret)
+ return ret;
+
+ nvkm_vm_map(vma, *mem);
+ return 0;
+}
+
+void
+nvkm_gpuobj_unmap(struct nvkm_vma *vma)
+{
+ if (vma->node) {
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
+ }
+}
+
+/* the below is basically only here to support sharing the paged dma object
+ * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
+ * anywhere else.
+ */
+
+static void
+nvkm_gpudup_dtor(struct nvkm_object *object)
+{
+ struct nvkm_gpuobj *gpuobj = (void *)object;
+ nvkm_object_ref(NULL, &gpuobj->parent);
+ nvkm_object_destroy(&gpuobj->object);
+}
+
+static struct nvkm_oclass
+nvkm_gpudup_oclass = {
+ .handle = NV_GPUOBJ_CLASS,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .dtor = nvkm_gpudup_dtor,
+ .init = nvkm_object_init,
+ .fini = nvkm_object_fini,
+ },
+};
+
+int
+nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
+ struct nvkm_gpuobj **pgpuobj)
+{
+ struct nvkm_gpuobj *gpuobj;
+ int ret;
+
+ ret = nvkm_object_create(parent, &parent->engine->subdev.object,
+ &nvkm_gpudup_oclass, 0, &gpuobj);
+ *pgpuobj = gpuobj;
+ if (ret)
+ return ret;
+
+ nvkm_object_ref(nv_object(base), &gpuobj->parent);
+ gpuobj->addr = base->addr;
+ gpuobj->size = base->size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/nvkm/core/handle.c
index 13f816cb08bd..dc7ff10ebe7b 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/handle.c
@@ -21,31 +21,29 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/object.h>
#include <core/handle.h>
#include <core/client.h>
#define hprintk(h,l,f,a...) do { \
- struct nouveau_client *c = nouveau_client((h)->object); \
- struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
+ struct nvkm_client *c = nvkm_client((h)->object); \
+ struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \
} while(0)
int
-nouveau_handle_init(struct nouveau_handle *handle)
+nvkm_handle_init(struct nvkm_handle *handle)
{
- struct nouveau_handle *item;
+ struct nvkm_handle *item;
int ret;
hprintk(handle, TRACE, "init running\n");
- ret = nouveau_object_inc(handle->object);
+ ret = nvkm_object_inc(handle->object);
if (ret)
return ret;
hprintk(handle, TRACE, "init children\n");
list_for_each_entry(item, &handle->tree, head) {
- ret = nouveau_handle_init(item);
+ ret = nvkm_handle_init(item);
if (ret)
goto fail;
}
@@ -55,30 +53,30 @@ nouveau_handle_init(struct nouveau_handle *handle)
fail:
hprintk(handle, ERROR, "init failed with %d\n", ret);
list_for_each_entry_continue_reverse(item, &handle->tree, head) {
- nouveau_handle_fini(item, false);
+ nvkm_handle_fini(item, false);
}
- nouveau_object_dec(handle->object, false);
+ nvkm_object_dec(handle->object, false);
return ret;
}
int
-nouveau_handle_fini(struct nouveau_handle *handle, bool suspend)
+nvkm_handle_fini(struct nvkm_handle *handle, bool suspend)
{
static char *name[2] = { "fini", "suspend" };
- struct nouveau_handle *item;
+ struct nvkm_handle *item;
int ret;
hprintk(handle, TRACE, "%s children\n", name[suspend]);
list_for_each_entry(item, &handle->tree, head) {
- ret = nouveau_handle_fini(item, suspend);
+ ret = nvkm_handle_fini(item, suspend);
if (ret && suspend)
goto fail;
}
hprintk(handle, TRACE, "%s running\n", name[suspend]);
if (handle->object) {
- ret = nouveau_object_dec(handle->object, suspend);
+ ret = nvkm_object_dec(handle->object, suspend);
if (ret && suspend)
goto fail;
}
@@ -88,7 +86,7 @@ nouveau_handle_fini(struct nouveau_handle *handle, bool suspend)
fail:
hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
list_for_each_entry_continue_reverse(item, &handle->tree, head) {
- int rret = nouveau_handle_init(item);
+ int rret = nvkm_handle_init(item);
if (rret)
hprintk(handle, FATAL, "failed to restart, %d\n", rret);
}
@@ -97,12 +95,11 @@ fail:
}
int
-nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
- struct nouveau_object *object,
- struct nouveau_handle **phandle)
+nvkm_handle_create(struct nvkm_object *parent, u32 _parent, u32 _handle,
+ struct nvkm_object *object, struct nvkm_handle **phandle)
{
- struct nouveau_object *namedb;
- struct nouveau_handle *handle;
+ struct nvkm_object *namedb;
+ struct nvkm_handle *handle;
int ret;
namedb = parent;
@@ -118,7 +115,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
handle->name = _handle;
handle->priv = ~0;
- ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle);
+ ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, object, handle);
if (ret) {
kfree(handle);
return ret;
@@ -127,7 +124,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
if (nv_parent(parent)->object_attach) {
ret = nv_parent(parent)->object_attach(parent, object, _handle);
if (ret < 0) {
- nouveau_handle_destroy(handle);
+ nvkm_handle_destroy(handle);
return ret;
}
@@ -138,10 +135,10 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
while (!nv_iclass(namedb, NV_CLIENT_CLASS))
namedb = namedb->parent;
- handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent);
+ handle->parent = nvkm_namedb_get(nv_namedb(namedb), _parent);
if (handle->parent) {
list_add(&handle->head, &handle->parent->tree);
- nouveau_namedb_put(handle->parent);
+ nvkm_namedb_put(handle->parent);
}
}
@@ -151,74 +148,74 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
}
void
-nouveau_handle_destroy(struct nouveau_handle *handle)
+nvkm_handle_destroy(struct nvkm_handle *handle)
{
- struct nouveau_handle *item, *temp;
+ struct nvkm_handle *item, *temp;
hprintk(handle, TRACE, "destroy running\n");
list_for_each_entry_safe(item, temp, &handle->tree, head) {
- nouveau_handle_destroy(item);
+ nvkm_handle_destroy(item);
}
list_del(&handle->head);
if (handle->priv != ~0) {
- struct nouveau_object *parent = handle->parent->object;
+ struct nvkm_object *parent = handle->parent->object;
nv_parent(parent)->object_detach(parent, handle->priv);
}
hprintk(handle, TRACE, "destroy completed\n");
- nouveau_namedb_remove(handle);
+ nvkm_namedb_remove(handle);
kfree(handle);
}
-struct nouveau_object *
-nouveau_handle_ref(struct nouveau_object *parent, u32 name)
+struct nvkm_object *
+nvkm_handle_ref(struct nvkm_object *parent, u32 name)
{
- struct nouveau_object *object = NULL;
- struct nouveau_handle *handle;
+ struct nvkm_object *object = NULL;
+ struct nvkm_handle *handle;
while (!nv_iclass(parent, NV_NAMEDB_CLASS))
parent = parent->parent;
- handle = nouveau_namedb_get(nv_namedb(parent), name);
+ handle = nvkm_namedb_get(nv_namedb(parent), name);
if (handle) {
- nouveau_object_ref(handle->object, &object);
- nouveau_namedb_put(handle);
+ nvkm_object_ref(handle->object, &object);
+ nvkm_namedb_put(handle);
}
return object;
}
-struct nouveau_handle *
-nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass)
+struct nvkm_handle *
+nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass)
{
- struct nouveau_namedb *namedb;
+ struct nvkm_namedb *namedb;
if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
- return nouveau_namedb_get_class(namedb, oclass);
+ return nvkm_namedb_get_class(namedb, oclass);
return NULL;
}
-struct nouveau_handle *
-nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst)
+struct nvkm_handle *
+nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst)
{
- struct nouveau_namedb *namedb;
+ struct nvkm_namedb *namedb;
if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
- return nouveau_namedb_get_vinst(namedb, vinst);
+ return nvkm_namedb_get_vinst(namedb, vinst);
return NULL;
}
-struct nouveau_handle *
-nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst)
+struct nvkm_handle *
+nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst)
{
- struct nouveau_namedb *namedb;
+ struct nvkm_namedb *namedb;
if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
- return nouveau_namedb_get_cinst(namedb, cinst);
+ return nvkm_namedb_get_cinst(namedb, cinst);
return NULL;
}
void
-nouveau_handle_put(struct nouveau_handle *handle)
+nvkm_handle_put(struct nvkm_handle *handle)
{
if (handle)
- nouveau_namedb_put(handle);
+ nvkm_namedb_put(handle);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 692aa92dd850..4459ff5f4cb8 100644
--- a/drivers/gpu/drm/nouveau/core/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -21,23 +21,19 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include <core/object.h>
-#include <core/parent.h>
+#include <core/ioctl.h>
+#include <core/client.h>
+#include <core/engine.h>
#include <core/handle.h>
#include <core/namedb.h>
-#include <core/client.h>
-#include <core/device.h>
-#include <core/ioctl.h>
-#include <core/event.h>
#include <nvif/unpack.h>
#include <nvif/ioctl.h>
static int
-nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_nop(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
+ struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_nop none;
} *args = data;
@@ -52,9 +48,9 @@ nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_sclass(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
+ struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_sclass_v0 v0;
} *args = data;
@@ -70,8 +66,8 @@ nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size)
nv_ioctl(object, "sclass vers %d count %d\n",
args->v0.version, args->v0.count);
if (size == args->v0.count * sizeof(args->v0.oclass[0])) {
- ret = nouveau_parent_lclass(object, args->v0.oclass,
- args->v0.count);
+ ret = nvkm_parent_lclass(object, args->v0.oclass,
+ args->v0.count);
if (ret >= 0) {
args->v0.count = ret;
ret = 0;
@@ -85,17 +81,17 @@ nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size)
+nvkm_ioctl_new(struct nvkm_handle *handle, void *data, u32 size)
{
union {
struct nvif_ioctl_new_v0 v0;
} *args = data;
- struct nouveau_client *client = nouveau_client(parent->object);
- struct nouveau_object *engctx = NULL;
- struct nouveau_object *object = NULL;
- struct nouveau_object *engine;
- struct nouveau_oclass *oclass;
- struct nouveau_handle *handle;
+ struct nvkm_client *client = nvkm_client(handle->object);
+ struct nvkm_object *engctx = NULL;
+ struct nvkm_object *object = NULL;
+ struct nvkm_parent *parent;
+ struct nvkm_object *engine;
+ struct nvkm_oclass *oclass;
u32 _handle, _oclass;
int ret;
@@ -108,19 +104,21 @@ nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size)
nv_ioctl(client, "new vers %d handle %08x class %08x "
"route %02x token %llx\n",
- args->v0.version, _handle, _oclass,
- args->v0.route, args->v0.token);
+ args->v0.version, _handle, _oclass,
+ args->v0.route, args->v0.token);
- if (!nv_iclass(parent->object, NV_PARENT_CLASS)) {
- nv_debug(parent->object, "cannot have children (ctor)\n");
+ if (!nv_iclass(handle->object, NV_PARENT_CLASS)) {
+ nv_debug(handle->object, "cannot have children (ctor)\n");
ret = -ENODEV;
goto fail_class;
}
+ parent = nv_parent(handle->object);
+
/* check that parent supports the requested subclass */
- ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass);
+ ret = nvkm_parent_sclass(&parent->object, _oclass, &engine, &oclass);
if (ret) {
- nv_debug(parent->object, "illegal class 0x%04x\n", _oclass);
+ nv_debug(parent, "illegal class 0x%04x\n", _oclass);
goto fail_class;
}
@@ -129,7 +127,7 @@ nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size)
* state calculated at init (ie. default context construction)
*/
if (engine) {
- ret = nouveau_object_inc(engine);
+ ret = nvkm_object_inc(engine);
if (ret)
goto fail_class;
}
@@ -138,53 +136,53 @@ nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size)
* between the parent and its children (eg. PGRAPH context)
*/
if (engine && nv_engine(engine)->cclass) {
- ret = nouveau_object_ctor(parent->object, engine,
- nv_engine(engine)->cclass,
- data, size, &engctx);
+ ret = nvkm_object_ctor(&parent->object, engine,
+ nv_engine(engine)->cclass,
+ data, size, &engctx);
if (ret)
goto fail_engctx;
} else {
- nouveau_object_ref(parent->object, &engctx);
+ nvkm_object_ref(&parent->object, &engctx);
}
/* finally, create new object and bind it to its handle */
- ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+ ret = nvkm_object_ctor(engctx, engine, oclass, data, size, &object);
client->data = object;
if (ret)
goto fail_ctor;
- ret = nouveau_object_inc(object);
+ ret = nvkm_object_inc(object);
if (ret)
goto fail_init;
- ret = nouveau_handle_create(parent->object, parent->name,
- _handle, object, &handle);
+ ret = nvkm_handle_create(&parent->object, handle->name,
+ _handle, object, &handle);
if (ret)
goto fail_handle;
- ret = nouveau_handle_init(handle);
+ ret = nvkm_handle_init(handle);
handle->route = args->v0.route;
handle->token = args->v0.token;
if (ret)
- nouveau_handle_destroy(handle);
+ nvkm_handle_destroy(handle);
fail_handle:
- nouveau_object_dec(object, false);
+ nvkm_object_dec(object, false);
fail_init:
- nouveau_object_ref(NULL, &object);
+ nvkm_object_ref(NULL, &object);
fail_ctor:
- nouveau_object_ref(NULL, &engctx);
+ nvkm_object_ref(NULL, &engctx);
fail_engctx:
if (engine)
- nouveau_object_dec(engine, false);
+ nvkm_object_dec(engine, false);
fail_class:
return ret;
}
static int
-nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_del(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
+ struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_del none;
} *args = data;
@@ -193,18 +191,18 @@ nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size)
nv_ioctl(object, "delete size %d\n", size);
if (nvif_unvers(args->none)) {
nv_ioctl(object, "delete\n");
- nouveau_handle_fini(handle, false);
- nouveau_handle_destroy(handle);
+ nvkm_handle_fini(handle, false);
+ nvkm_handle_destroy(handle);
}
return ret;
}
static int
-nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
- struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ struct nvkm_object *object = handle->object;
+ struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_mthd_v0 v0;
} *args = data;
@@ -223,10 +221,10 @@ nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size)
static int
-nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
- struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ struct nvkm_object *object = handle->object;
+ struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_rd_v0 v0;
} *args = data;
@@ -235,7 +233,7 @@ nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size)
nv_ioctl(object, "rd size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
nv_ioctl(object, "rd vers %d size %d addr %016llx\n",
- args->v0.version, args->v0.size, args->v0.addr);
+ args->v0.version, args->v0.size, args->v0.addr);
switch (args->v0.size) {
case 1:
if (ret = -ENODEV, ofuncs->rd08) {
@@ -265,10 +263,10 @@ nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_wr(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
- struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ struct nvkm_object *object = handle->object;
+ struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_wr_v0 v0;
} *args = data;
@@ -308,10 +306,10 @@ nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_map(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
- struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ struct nvkm_object *object = handle->object;
+ struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_map_v0 v0;
} *args = data;
@@ -330,9 +328,9 @@ nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_unmap(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
+ struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_unmap none;
} *args = data;
@@ -347,10 +345,10 @@ nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_object *object = handle->object;
- struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ struct nvkm_object *object = handle->object;
+ struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_ntfy_new_v0 v0;
} *args = data;
@@ -376,10 +374,10 @@ nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_client *client = nouveau_client(handle->object);
- struct nouveau_object *object = handle->object;
+ struct nvkm_client *client = nvkm_client(handle->object);
+ struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_ntfy_del_v0 v0;
} *args = data;
@@ -396,10 +394,10 @@ nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_client *client = nouveau_client(handle->object);
- struct nouveau_object *object = handle->object;
+ struct nvkm_client *client = nvkm_client(handle->object);
+ struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_ntfy_get_v0 v0;
} *args = data;
@@ -416,10 +414,10 @@ nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size)
{
- struct nouveau_client *client = nouveau_client(handle->object);
- struct nouveau_object *object = handle->object;
+ struct nvkm_client *client = nvkm_client(handle->object);
+ struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_ntfy_put_v0 v0;
} *args = data;
@@ -437,7 +435,7 @@ nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size)
static struct {
int version;
- int (*func)(struct nouveau_handle *, void *, u32);
+ int (*func)(struct nvkm_handle *, void *, u32);
}
nvkm_ioctl_v0[] = {
{ 0x00, nvkm_ioctl_nop },
@@ -456,13 +454,12 @@ nvkm_ioctl_v0[] = {
};
static int
-nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr,
- u32 *path, void *data, u32 size,
- u8 owner, u8 *route, u64 *token)
+nvkm_ioctl_path(struct nvkm_handle *parent, u32 type, u32 nr, u32 *path,
+ void *data, u32 size, u8 owner, u8 *route, u64 *token)
{
- struct nouveau_handle *handle = parent;
- struct nouveau_namedb *namedb;
- struct nouveau_object *object;
+ struct nvkm_handle *handle = parent;
+ struct nvkm_namedb *namedb;
+ struct nvkm_object *object;
int ret;
while ((object = parent->object), nr--) {
@@ -473,16 +470,15 @@ nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr,
}
if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
- !(handle = nouveau_namedb_get(namedb, path[nr]))) {
+ !(handle = nvkm_namedb_get(namedb, path[nr]))) {
nv_debug(object, "handle 0x%08x not found\n", path[nr]);
return -ENOENT;
}
- nouveau_namedb_put(handle);
+ nvkm_namedb_put(handle);
parent = handle;
}
- if (owner != NVIF_IOCTL_V0_OWNER_ANY &&
- owner != handle->route) {
+ if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) {
nv_ioctl(object, "object route != owner\n");
return -EACCES;
}
@@ -490,16 +486,15 @@ nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr,
*token = handle->token;
if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
- if (nvkm_ioctl_v0[type].version == 0) {
+ if (nvkm_ioctl_v0[type].version == 0)
ret = nvkm_ioctl_v0[type].func(handle, data, size);
- }
}
return ret;
}
int
-nvkm_ioctl(struct nouveau_client *client, bool supervisor,
+nvkm_ioctl(struct nvkm_client *client, bool supervisor,
void *data, u32 size, void **hack)
{
union {
@@ -517,7 +512,7 @@ nvkm_ioctl(struct nouveau_client *client, bool supervisor,
ret = nvkm_ioctl_path(client->root, args->v0.type,
args->v0.path_nr, args->v0.path,
data, size, args->v0.owner,
- &args->v0.route, &args->v0.token);
+ &args->v0.route, &args->v0.token);
}
nv_ioctl(client, "return %d\n", ret);
@@ -525,6 +520,7 @@ nvkm_ioctl(struct nouveau_client *client, bool supervisor,
*hack = client->data;
client->data = NULL;
}
+
client->super = false;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index b4f5db66d5b5..7f458dfd5608 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -21,39 +21,37 @@
*
* Authors: Ben Skeggs
*/
+#include <core/mm.h>
-#include "core/os.h"
-#include "core/mm.h"
-
-#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
- list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
+ list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
static void
-nouveau_mm_dump(struct nouveau_mm *mm, const char *header)
+nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
{
- struct nouveau_mm_node *node;
+ struct nvkm_mm_node *node;
- printk(KERN_ERR "nouveau: %s\n", header);
- printk(KERN_ERR "nouveau: node list:\n");
+ printk(KERN_ERR "nvkm: %s\n", header);
+ printk(KERN_ERR "nvkm: node list:\n");
list_for_each_entry(node, &mm->nodes, nl_entry) {
- printk(KERN_ERR "nouveau: \t%08x %08x %d\n",
+ printk(KERN_ERR "nvkm: \t%08x %08x %d\n",
node->offset, node->length, node->type);
}
- printk(KERN_ERR "nouveau: free list:\n");
+ printk(KERN_ERR "nvkm: free list:\n");
list_for_each_entry(node, &mm->free, fl_entry) {
- printk(KERN_ERR "nouveau: \t%08x %08x %d\n",
+ printk(KERN_ERR "nvkm: \t%08x %08x %d\n",
node->offset, node->length, node->type);
}
}
void
-nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
+nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
{
- struct nouveau_mm_node *this = *pthis;
+ struct nvkm_mm_node *this = *pthis;
if (this) {
- struct nouveau_mm_node *prev = node(this, prev);
- struct nouveau_mm_node *next = node(this, next);
+ struct nvkm_mm_node *prev = node(this, prev);
+ struct nvkm_mm_node *next = node(this, next);
if (prev && prev->type == NVKM_MM_TYPE_NONE) {
prev->length += this->length;
@@ -84,10 +82,10 @@ nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
*pthis = NULL;
}
-static struct nouveau_mm_node *
-region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+static struct nvkm_mm_node *
+region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
{
- struct nouveau_mm_node *b;
+ struct nvkm_mm_node *b;
if (a->length == size)
return a;
@@ -105,14 +103,15 @@ region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
list_add_tail(&b->nl_entry, &a->nl_entry);
if (b->type == NVKM_MM_TYPE_NONE)
list_add_tail(&b->fl_entry, &a->fl_entry);
+
return b;
}
int
-nouveau_mm_head(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max,
- u32 size_min, u32 align, struct nouveau_mm_node **pnode)
+nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
+ u32 align, struct nvkm_mm_node **pnode)
{
- struct nouveau_mm_node *prev, *this, *next;
+ struct nvkm_mm_node *prev, *this, *next;
u32 mask = align - 1;
u32 splitoff;
u32 s, e;
@@ -157,10 +156,10 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max,
return -ENOSPC;
}
-static struct nouveau_mm_node *
-region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+static struct nvkm_mm_node *
+region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
{
- struct nouveau_mm_node *b;
+ struct nvkm_mm_node *b;
if (a->length == size)
return a;
@@ -178,14 +177,15 @@ region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
list_add(&b->nl_entry, &a->nl_entry);
if (b->type == NVKM_MM_TYPE_NONE)
list_add(&b->fl_entry, &a->fl_entry);
+
return b;
}
int
-nouveau_mm_tail(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max,
- u32 size_min, u32 align, struct nouveau_mm_node **pnode)
+nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
+ u32 align, struct nvkm_mm_node **pnode)
{
- struct nouveau_mm_node *prev, *this, *next;
+ struct nvkm_mm_node *prev, *this, *next;
u32 mask = align - 1;
BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
@@ -235,12 +235,12 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max,
}
int
-nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
+nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
{
- struct nouveau_mm_node *node, *prev;
+ struct nvkm_mm_node *node, *prev;
u32 next;
- if (nouveau_mm_initialised(mm)) {
+ if (nvkm_mm_initialised(mm)) {
prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
next = prev->offset + prev->length;
if (next != offset) {
@@ -277,18 +277,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
}
int
-nouveau_mm_fini(struct nouveau_mm *mm)
+nvkm_mm_fini(struct nvkm_mm *mm)
{
- struct nouveau_mm_node *node, *temp;
+ struct nvkm_mm_node *node, *temp;
int nodes = 0;
- if (!nouveau_mm_initialised(mm))
+ if (!nvkm_mm_initialised(mm))
return 0;
list_for_each_entry(node, &mm->nodes, nl_entry) {
if (node->type != NVKM_MM_TYPE_HOLE) {
if (++nodes > mm->heap_nodes) {
- nouveau_mm_dump(mm, "mm not clean!");
+ nvkm_mm_dump(mm, "mm not clean!");
return -EBUSY;
}
}
@@ -298,6 +298,7 @@ nouveau_mm_fini(struct nouveau_mm *mm)
list_del(&node->nl_entry);
kfree(node);
}
+
mm->heap_nodes = 0;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
index 0594a599f6fb..6400767c5dba 100644
--- a/drivers/gpu/drm/nouveau/core/core/namedb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
@@ -21,16 +21,14 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/object.h>
#include <core/namedb.h>
-#include <core/handle.h>
#include <core/gpuobj.h>
+#include <core/handle.h>
-static struct nouveau_handle *
-nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name)
+static struct nvkm_handle *
+nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name)
{
- struct nouveau_handle *handle;
+ struct nvkm_handle *handle;
list_for_each_entry(handle, &namedb->list, node) {
if (handle->name == name)
@@ -40,10 +38,10 @@ nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name)
return NULL;
}
-static struct nouveau_handle *
-nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass)
+static struct nvkm_handle *
+nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, u16 oclass)
{
- struct nouveau_handle *handle;
+ struct nvkm_handle *handle;
list_for_each_entry(handle, &namedb->list, node) {
if (nv_mclass(handle->object) == oclass)
@@ -53,10 +51,10 @@ nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass)
return NULL;
}
-static struct nouveau_handle *
-nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst)
+static struct nvkm_handle *
+nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst)
{
- struct nouveau_handle *handle;
+ struct nvkm_handle *handle;
list_for_each_entry(handle, &namedb->list, node) {
if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
@@ -68,10 +66,10 @@ nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst)
return NULL;
}
-static struct nouveau_handle *
-nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst)
+static struct nvkm_handle *
+nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst)
{
- struct nouveau_handle *handle;
+ struct nvkm_handle *handle;
list_for_each_entry(handle, &namedb->list, node) {
if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
@@ -85,14 +83,14 @@ nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst)
}
int
-nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name,
- struct nouveau_object *object,
- struct nouveau_handle *handle)
+nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name,
+ struct nvkm_object *object,
+ struct nvkm_handle *handle)
{
int ret = -EEXIST;
write_lock_irq(&namedb->lock);
- if (!nouveau_namedb_lookup(namedb, name)) {
- nouveau_object_ref(object, &handle->object);
+ if (!nvkm_namedb_lookup(namedb, name)) {
+ nvkm_object_ref(object, &handle->object);
handle->namedb = namedb;
list_add(&handle->node, &namedb->list);
ret = 0;
@@ -102,80 +100,79 @@ nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name,
}
void
-nouveau_namedb_remove(struct nouveau_handle *handle)
+nvkm_namedb_remove(struct nvkm_handle *handle)
{
- struct nouveau_namedb *namedb = handle->namedb;
- struct nouveau_object *object = handle->object;
+ struct nvkm_namedb *namedb = handle->namedb;
+ struct nvkm_object *object = handle->object;
write_lock_irq(&namedb->lock);
list_del(&handle->node);
write_unlock_irq(&namedb->lock);
- nouveau_object_ref(NULL, &object);
+ nvkm_object_ref(NULL, &object);
}
-struct nouveau_handle *
-nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name)
+struct nvkm_handle *
+nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name)
{
- struct nouveau_handle *handle;
+ struct nvkm_handle *handle;
read_lock(&namedb->lock);
- handle = nouveau_namedb_lookup(namedb, name);
+ handle = nvkm_namedb_lookup(namedb, name);
if (handle == NULL)
read_unlock(&namedb->lock);
return handle;
}
-struct nouveau_handle *
-nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass)
+struct nvkm_handle *
+nvkm_namedb_get_class(struct nvkm_namedb *namedb, u16 oclass)
{
- struct nouveau_handle *handle;
+ struct nvkm_handle *handle;
read_lock(&namedb->lock);
- handle = nouveau_namedb_lookup_class(namedb, oclass);
+ handle = nvkm_namedb_lookup_class(namedb, oclass);
if (handle == NULL)
read_unlock(&namedb->lock);
return handle;
}
-struct nouveau_handle *
-nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst)
+struct nvkm_handle *
+nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst)
{
- struct nouveau_handle *handle;
+ struct nvkm_handle *handle;
read_lock(&namedb->lock);
- handle = nouveau_namedb_lookup_vinst(namedb, vinst);
+ handle = nvkm_namedb_lookup_vinst(namedb, vinst);
if (handle == NULL)
read_unlock(&namedb->lock);
return handle;
}
-struct nouveau_handle *
-nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst)
+struct nvkm_handle *
+nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst)
{
- struct nouveau_handle *handle;
+ struct nvkm_handle *handle;
read_lock(&namedb->lock);
- handle = nouveau_namedb_lookup_cinst(namedb, cinst);
+ handle = nvkm_namedb_lookup_cinst(namedb, cinst);
if (handle == NULL)
read_unlock(&namedb->lock);
return handle;
}
void
-nouveau_namedb_put(struct nouveau_handle *handle)
+nvkm_namedb_put(struct nvkm_handle *handle)
{
if (handle)
read_unlock(&handle->namedb->lock);
}
int
-nouveau_namedb_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 pclass,
- struct nouveau_oclass *sclass, u64 engcls,
- int length, void **pobject)
+nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 pclass,
+ struct nvkm_oclass *sclass, u64 engcls,
+ int length, void **pobject)
{
- struct nouveau_namedb *namedb;
+ struct nvkm_namedb *namedb;
int ret;
- ret = nouveau_parent_create_(parent, engine, oclass, pclass |
- NV_NAMEDB_CLASS, sclass, engcls,
- length, pobject);
+ ret = nvkm_parent_create_(parent, engine, oclass, pclass |
+ NV_NAMEDB_CLASS, sclass, engcls,
+ length, pobject);
namedb = *pobject;
if (ret)
return ret;
@@ -186,15 +183,14 @@ nouveau_namedb_create_(struct nouveau_object *parent,
}
int
-_nouveau_namedb_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+_nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_namedb *object;
+ struct nvkm_namedb *object;
int ret;
- ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
+ ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
*pobject = nv_object(object);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/nvkm/core/notify.c
index 839a32577680..023610d01458 100644
--- a/drivers/gpu/drm/nouveau/core/core/notify.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/notify.c
@@ -21,13 +21,8 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include <core/client.h>
-#include <core/event.h>
#include <core/notify.h>
-
-#include <nvif/unpack.h>
-#include <nvif/event.h>
+#include <core/event.h>
static inline void
nvkm_notify_put_locked(struct nvkm_notify *notify)
@@ -134,7 +129,7 @@ nvkm_notify_fini(struct nvkm_notify *notify)
}
int
-nvkm_notify_init(struct nouveau_object *object, struct nvkm_event *event,
+nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event,
int (*func)(struct nvkm_notify *), bool work,
void *data, u32 size, u32 reply,
struct nvkm_notify *notify)
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index b08630577c82..979f3627d395 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -21,36 +21,34 @@
*
* Authors: Ben Skeggs
*/
-
#include <core/object.h>
#include <core/engine.h>
-#ifdef NOUVEAU_OBJECT_MAGIC
+#ifdef NVKM_OBJECT_MAGIC
static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
static DEFINE_SPINLOCK(_objlist_lock);
#endif
int
-nouveau_object_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 pclass,
- int size, void **pobject)
+nvkm_object_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 pclass,
+ int size, void **pobject)
{
- struct nouveau_object *object;
+ struct nvkm_object *object;
object = *pobject = kzalloc(size, GFP_KERNEL);
if (!object)
return -ENOMEM;
- nouveau_object_ref(parent, &object->parent);
- nouveau_object_ref(engine, &object->engine);
+ nvkm_object_ref(parent, &object->parent);
+ nvkm_object_ref(engine, (struct nvkm_object **)&object->engine);
object->oclass = oclass;
object->oclass->handle |= pclass;
atomic_set(&object->refcount, 1);
atomic_set(&object->usecount, 0);
-#ifdef NOUVEAU_OBJECT_MAGIC
- object->_magic = NOUVEAU_OBJECT_MAGIC;
+#ifdef NVKM_OBJECT_MAGIC
+ object->_magic = NVKM_OBJECT_MAGIC;
spin_lock(&_objlist_lock);
list_add(&object->list, &_objlist);
spin_unlock(&_objlist_lock);
@@ -59,57 +57,55 @@ nouveau_object_create_(struct nouveau_object *parent,
}
int
-_nouveau_object_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+_nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
if (size != 0)
return -ENOSYS;
- return nouveau_object_create(parent, engine, oclass, 0, pobject);
+ return nvkm_object_create(parent, engine, oclass, 0, pobject);
}
void
-nouveau_object_destroy(struct nouveau_object *object)
+nvkm_object_destroy(struct nvkm_object *object)
{
-#ifdef NOUVEAU_OBJECT_MAGIC
+#ifdef NVKM_OBJECT_MAGIC
spin_lock(&_objlist_lock);
list_del(&object->list);
spin_unlock(&_objlist_lock);
#endif
- nouveau_object_ref(NULL, &object->engine);
- nouveau_object_ref(NULL, &object->parent);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&object->engine);
+ nvkm_object_ref(NULL, &object->parent);
kfree(object);
}
int
-nouveau_object_init(struct nouveau_object *object)
+nvkm_object_init(struct nvkm_object *object)
{
return 0;
}
int
-nouveau_object_fini(struct nouveau_object *object, bool suspend)
+nvkm_object_fini(struct nvkm_object *object, bool suspend)
{
return 0;
}
-struct nouveau_ofuncs
-nouveau_object_ofuncs = {
- .ctor = _nouveau_object_ctor,
- .dtor = nouveau_object_destroy,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
+struct nvkm_ofuncs
+nvkm_object_ofuncs = {
+ .ctor = _nvkm_object_ctor,
+ .dtor = nvkm_object_destroy,
+ .init = nvkm_object_init,
+ .fini = nvkm_object_fini,
};
int
-nouveau_object_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
- struct nouveau_object *object = NULL;
+ struct nvkm_ofuncs *ofuncs = oclass->ofuncs;
+ struct nvkm_object *object = NULL;
int ret;
ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
@@ -137,14 +133,14 @@ nouveau_object_ctor(struct nouveau_object *parent,
}
static void
-nouveau_object_dtor(struct nouveau_object *object)
+nvkm_object_dtor(struct nvkm_object *object)
{
nv_trace(object, "destroying\n");
nv_ofuncs(object)->dtor(object);
}
void
-nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
+nvkm_object_ref(struct nvkm_object *obj, struct nvkm_object **ref)
{
if (obj) {
atomic_inc(&obj->refcount);
@@ -155,14 +151,14 @@ nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
int dead = atomic_dec_and_test(&(*ref)->refcount);
nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
if (dead)
- nouveau_object_dtor(*ref);
+ nvkm_object_dtor(*ref);
}
*ref = obj;
}
int
-nouveau_object_inc(struct nouveau_object *object)
+nvkm_object_inc(struct nvkm_object *object)
{
int ref = atomic_add_return(1, &object->usecount);
int ret;
@@ -173,7 +169,7 @@ nouveau_object_inc(struct nouveau_object *object)
nv_trace(object, "initialising...\n");
if (object->parent) {
- ret = nouveau_object_inc(object->parent);
+ ret = nvkm_object_inc(object->parent);
if (ret) {
nv_error(object, "parent failed, %d\n", ret);
goto fail_parent;
@@ -182,7 +178,7 @@ nouveau_object_inc(struct nouveau_object *object)
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- ret = nouveau_object_inc(object->engine);
+ ret = nvkm_object_inc(&object->engine->subdev.object);
mutex_unlock(&nv_subdev(object->engine)->mutex);
if (ret) {
nv_error(object, "engine failed, %d\n", ret);
@@ -203,19 +199,19 @@ nouveau_object_inc(struct nouveau_object *object)
fail_self:
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- nouveau_object_dec(object->engine, false);
+ nvkm_object_dec(&object->engine->subdev.object, false);
mutex_unlock(&nv_subdev(object->engine)->mutex);
}
fail_engine:
if (object->parent)
- nouveau_object_dec(object->parent, false);
+ nvkm_object_dec(object->parent, false);
fail_parent:
atomic_dec(&object->usecount);
return ret;
}
static int
-nouveau_object_decf(struct nouveau_object *object)
+nvkm_object_decf(struct nvkm_object *object)
{
int ret;
@@ -228,19 +224,19 @@ nouveau_object_decf(struct nouveau_object *object)
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- nouveau_object_dec(object->engine, false);
+ nvkm_object_dec(&object->engine->subdev.object, false);
mutex_unlock(&nv_subdev(object->engine)->mutex);
}
if (object->parent)
- nouveau_object_dec(object->parent, false);
+ nvkm_object_dec(object->parent, false);
nv_trace(object, "stopped\n");
return 0;
}
static int
-nouveau_object_decs(struct nouveau_object *object)
+nvkm_object_decs(struct nvkm_object *object)
{
int ret, rret;
@@ -255,7 +251,7 @@ nouveau_object_decs(struct nouveau_object *object)
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- ret = nouveau_object_dec(object->engine, true);
+ ret = nvkm_object_dec(&object->engine->subdev.object, true);
mutex_unlock(&nv_subdev(object->engine)->mutex);
if (ret) {
nv_warn(object, "engine failed suspend, %d\n", ret);
@@ -264,7 +260,7 @@ nouveau_object_decs(struct nouveau_object *object)
}
if (object->parent) {
- ret = nouveau_object_dec(object->parent, true);
+ ret = nvkm_object_dec(object->parent, true);
if (ret) {
nv_warn(object, "parent failed suspend, %d\n", ret);
goto fail_parent;
@@ -277,7 +273,7 @@ nouveau_object_decs(struct nouveau_object *object)
fail_parent:
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- rret = nouveau_object_inc(object->engine);
+ rret = nvkm_object_inc(&object->engine->subdev.object);
mutex_unlock(&nv_subdev(object->engine)->mutex);
if (rret)
nv_fatal(object, "engine failed to reinit, %d\n", rret);
@@ -292,7 +288,7 @@ fail_engine:
}
int
-nouveau_object_dec(struct nouveau_object *object, bool suspend)
+nvkm_object_dec(struct nvkm_object *object, bool suspend)
{
int ref = atomic_add_return(-1, &object->usecount);
int ret;
@@ -301,9 +297,9 @@ nouveau_object_dec(struct nouveau_object *object, bool suspend)
if (ref == 0) {
if (suspend)
- ret = nouveau_object_decs(object);
+ ret = nvkm_object_decs(object);
else
- ret = nouveau_object_decf(object);
+ ret = nvkm_object_decf(object);
if (ret) {
atomic_inc(&object->usecount);
@@ -315,10 +311,10 @@ nouveau_object_dec(struct nouveau_object *object, bool suspend)
}
void
-nouveau_object_debug(void)
+nvkm_object_debug(void)
{
-#ifdef NOUVEAU_OBJECT_MAGIC
- struct nouveau_object *object;
+#ifdef NVKM_OBJECT_MAGIC
+ struct nvkm_object *object;
if (!list_empty(&_objlist)) {
nv_fatal(NULL, "*******************************************\n");
nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/nvkm/core/option.c
index 9f6fcc5f66c2..19d153f8c8fd 100644
--- a/drivers/gpu/drm/nouveau/core/core/option.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/option.c
@@ -21,12 +21,11 @@
*
* Authors: Ben Skeggs
*/
-
#include <core/option.h>
#include <core/debug.h>
const char *
-nouveau_stropt(const char *optstr, const char *opt, int *arglen)
+nvkm_stropt(const char *optstr, const char *opt, int *arglen)
{
while (optstr && *optstr != '\0') {
int len = strcspn(optstr, ",=");
@@ -52,11 +51,11 @@ nouveau_stropt(const char *optstr, const char *opt, int *arglen)
}
bool
-nouveau_boolopt(const char *optstr, const char *opt, bool value)
+nvkm_boolopt(const char *optstr, const char *opt, bool value)
{
int arglen;
- optstr = nouveau_stropt(optstr, opt, &arglen);
+ optstr = nvkm_stropt(optstr, opt, &arglen);
if (optstr) {
if (!strncasecmpz(optstr, "0", arglen) ||
!strncasecmpz(optstr, "no", arglen) ||
@@ -75,7 +74,7 @@ nouveau_boolopt(const char *optstr, const char *opt, bool value)
}
int
-nouveau_dbgopt(const char *optstr, const char *sub)
+nvkm_dbgopt(const char *optstr, const char *sub)
{
int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT;
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/nvkm/core/parent.c
index 30a2911878f8..dd56cd1eeb38 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/parent.c
@@ -21,25 +21,24 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/object.h>
#include <core/parent.h>
#include <core/client.h>
+#include <core/engine.h>
int
-nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
- struct nouveau_object **pengine,
- struct nouveau_oclass **poclass)
+nvkm_parent_sclass(struct nvkm_object *parent, u16 handle,
+ struct nvkm_object **pengine,
+ struct nvkm_oclass **poclass)
{
- struct nouveau_sclass *sclass;
- struct nouveau_engine *engine;
- struct nouveau_oclass *oclass;
+ struct nvkm_sclass *sclass;
+ struct nvkm_engine *engine;
+ struct nvkm_oclass *oclass;
u64 mask;
sclass = nv_parent(parent)->sclass;
while (sclass) {
if ((sclass->oclass->handle & 0xffff) == handle) {
- *pengine = parent->engine;
+ *pengine = &parent->engine->subdev.object;
*poclass = sclass->oclass;
return 0;
}
@@ -54,7 +53,7 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
if (nv_iclass(parent, NV_CLIENT_CLASS))
engine = nv_engine(nv_client(parent)->device);
else
- engine = nouveau_engine(parent, i);
+ engine = nvkm_engine(parent, i);
if (engine) {
oclass = engine->sclass;
@@ -75,11 +74,11 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
}
int
-nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
+nvkm_parent_lclass(struct nvkm_object *parent, u32 *lclass, int size)
{
- struct nouveau_sclass *sclass;
- struct nouveau_engine *engine;
- struct nouveau_oclass *oclass;
+ struct nvkm_sclass *sclass;
+ struct nvkm_engine *engine;
+ struct nvkm_oclass *oclass;
int nr = -1, i;
u64 mask;
@@ -92,7 +91,7 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
mask = nv_parent(parent)->engine;
while (i = __ffs64(mask), mask) {
- engine = nouveau_engine(parent, i);
+ engine = nvkm_engine(parent, i);
if (engine && (oclass = engine->sclass)) {
while (oclass->ofuncs) {
if (++nr < size)
@@ -108,18 +107,17 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
}
int
-nouveau_parent_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 pclass,
- struct nouveau_oclass *sclass, u64 engcls,
- int size, void **pobject)
+nvkm_parent_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 pclass,
+ struct nvkm_oclass *sclass, u64 engcls,
+ int size, void **pobject)
{
- struct nouveau_parent *object;
- struct nouveau_sclass *nclass;
+ struct nvkm_parent *object;
+ struct nvkm_sclass *nclass;
int ret;
- ret = nouveau_object_create_(parent, engine, oclass, pclass |
- NV_PARENT_CLASS, size, pobject);
+ ret = nvkm_object_create_(parent, engine, oclass, pclass |
+ NV_PARENT_CLASS, size, pobject);
object = *pobject;
if (ret)
return ret;
@@ -141,21 +139,21 @@ nouveau_parent_create_(struct nouveau_object *parent,
}
void
-nouveau_parent_destroy(struct nouveau_parent *parent)
+nvkm_parent_destroy(struct nvkm_parent *parent)
{
- struct nouveau_sclass *sclass;
+ struct nvkm_sclass *sclass;
while ((sclass = parent->sclass)) {
parent->sclass = sclass->sclass;
kfree(sclass);
}
- nouveau_object_destroy(&parent->base);
+ nvkm_object_destroy(&parent->object);
}
void
-_nouveau_parent_dtor(struct nouveau_object *object)
+_nvkm_parent_dtor(struct nvkm_object *object)
{
- nouveau_parent_destroy(nv_parent(object));
+ nvkm_parent_destroy(nv_parent(object));
}
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/nvkm/core/printk.c
index 03e0060b13da..4a220eb91660 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/printk.c
@@ -21,16 +21,14 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/object.h>
-#include <core/client.h>
-#include <core/subdev.h>
#include <core/printk.h>
+#include <core/client.h>
+#include <core/device.h>
int nv_info_debug_level = NV_DBG_INFO_NORMAL;
void
-nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...)
+nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...)
{
static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
const char *pfx;
@@ -60,20 +58,27 @@ nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...)
}
if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
- struct nouveau_object *device = object;
- struct nouveau_object *subdev = object;
+ struct nvkm_object *device;
+ struct nvkm_object *subdev;
char obuf[64], *ofmt = "";
- if (object->engine) {
- snprintf(obuf, sizeof(obuf), "[0x%08x][%p]",
- nv_hclass(object), object);
- ofmt = obuf;
- subdev = object->engine;
- device = object->engine;
+ if (object->engine == NULL) {
+ subdev = object;
+ while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS))
+ subdev = subdev->parent;
+ } else {
+ subdev = &object->engine->subdev.object;
}
- if (subdev->parent)
- device = subdev->parent;
+ device = subdev;
+ if (device->parent)
+ device = device->parent;
+
+ if (object != subdev) {
+ snprintf(obuf, sizeof(obuf), "[0x%08x]",
+ nv_hclass(object));
+ ofmt = obuf;
+ }
if (level > nv_subdev(subdev)->debug)
return;
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index f3b9bddc3875..ebd4d15479bd 100644
--- a/drivers/gpu/drm/nouveau/core/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -19,14 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#include <core/object.h>
#include <core/ramht.h>
+#include <core/engine.h>
#include <subdev/bar.h>
static u32
-nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
+nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
{
u32 hash = 0;
@@ -41,13 +40,12 @@ nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
}
int
-nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
- u32 handle, u32 context)
+nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context)
{
- struct nouveau_bar *bar = nouveau_bar(ramht);
+ struct nvkm_bar *bar = nvkm_bar(ramht);
u32 co, ho;
- co = ho = nouveau_ramht_hash(ramht, chid, handle);
+ co = ho = nvkm_ramht_hash(ramht, chid, handle);
do {
if (!nv_ro32(ramht, co + 4)) {
nv_wo32(ramht, co + 0, handle);
@@ -66,39 +64,39 @@ nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
}
void
-nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
+nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie)
{
- struct nouveau_bar *bar = nouveau_bar(ramht);
+ struct nvkm_bar *bar = nvkm_bar(ramht);
nv_wo32(ramht, cookie + 0, 0x00000000);
nv_wo32(ramht, cookie + 4, 0x00000000);
if (bar)
bar->flush(bar);
}
-static struct nouveau_oclass
-nouveau_ramht_oclass = {
+static struct nvkm_oclass
+nvkm_ramht_oclass = {
.handle = 0x0000abcd,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = NULL,
- .dtor = _nouveau_gpuobj_dtor,
- .init = _nouveau_gpuobj_init,
- .fini = _nouveau_gpuobj_fini,
- .rd32 = _nouveau_gpuobj_rd32,
- .wr32 = _nouveau_gpuobj_wr32,
+ .dtor = _nvkm_gpuobj_dtor,
+ .init = _nvkm_gpuobj_init,
+ .fini = _nvkm_gpuobj_fini,
+ .rd32 = _nvkm_gpuobj_rd32,
+ .wr32 = _nvkm_gpuobj_wr32,
},
};
int
-nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
- u32 size, u32 align, struct nouveau_ramht **pramht)
+nvkm_ramht_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
+ u32 size, u32 align, struct nvkm_ramht **pramht)
{
- struct nouveau_ramht *ramht;
+ struct nvkm_ramht *ramht;
int ret;
- ret = nouveau_gpuobj_create(parent, parent->engine ?
- parent->engine : parent, /* <nv50 ramht */
- &nouveau_ramht_oclass, 0, pargpu, size,
- align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
+ ret = nvkm_gpuobj_create(parent, parent->engine ?
+ &parent->engine->subdev.object : parent, /* <nv50 ramht */
+ &nvkm_ramht_oclass, 0, pargpu, size,
+ align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
*pramht = ramht;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 2ea5568b6cf5..c5fb3a793174 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -21,14 +21,23 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/object.h>
#include <core/subdev.h>
#include <core/device.h>
#include <core/option.h>
+struct nvkm_subdev *
+nvkm_subdev(void *obj, int idx)
+{
+ struct nvkm_object *object = nv_object(obj);
+ while (object && !nv_iclass(object, NV_SUBDEV_CLASS))
+ object = object->parent;
+ if (object == NULL || nv_subidx(nv_subdev(object)) != idx)
+ object = nv_device(obj)->subdev[idx];
+ return object ? nv_subdev(object) : NULL;
+}
+
void
-nouveau_subdev_reset(struct nouveau_object *subdev)
+nvkm_subdev_reset(struct nvkm_object *subdev)
{
nv_trace(subdev, "resetting...\n");
nv_ofuncs(subdev)->fini(subdev, false);
@@ -36,65 +45,64 @@ nouveau_subdev_reset(struct nouveau_object *subdev)
}
int
-nouveau_subdev_init(struct nouveau_subdev *subdev)
+nvkm_subdev_init(struct nvkm_subdev *subdev)
{
- int ret = nouveau_object_init(&subdev->base);
+ int ret = nvkm_object_init(&subdev->object);
if (ret)
return ret;
- nouveau_subdev_reset(&subdev->base);
+ nvkm_subdev_reset(&subdev->object);
return 0;
}
int
-_nouveau_subdev_init(struct nouveau_object *object)
+_nvkm_subdev_init(struct nvkm_object *object)
{
- return nouveau_subdev_init(nv_subdev(object));
+ return nvkm_subdev_init(nv_subdev(object));
}
int
-nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend)
+nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
{
if (subdev->unit) {
nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
}
- return nouveau_object_fini(&subdev->base, suspend);
+ return nvkm_object_fini(&subdev->object, suspend);
}
int
-_nouveau_subdev_fini(struct nouveau_object *object, bool suspend)
+_nvkm_subdev_fini(struct nvkm_object *object, bool suspend)
{
- return nouveau_subdev_fini(nv_subdev(object), suspend);
+ return nvkm_subdev_fini(nv_subdev(object), suspend);
}
void
-nouveau_subdev_destroy(struct nouveau_subdev *subdev)
+nvkm_subdev_destroy(struct nvkm_subdev *subdev)
{
int subidx = nv_hclass(subdev) & 0xff;
nv_device(subdev)->subdev[subidx] = NULL;
- nouveau_object_destroy(&subdev->base);
+ nvkm_object_destroy(&subdev->object);
}
void
-_nouveau_subdev_dtor(struct nouveau_object *object)
+_nvkm_subdev_dtor(struct nvkm_object *object)
{
- nouveau_subdev_destroy(nv_subdev(object));
+ nvkm_subdev_destroy(nv_subdev(object));
}
int
-nouveau_subdev_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 pclass,
- const char *subname, const char *sysname,
- int size, void **pobject)
+nvkm_subdev_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 pclass,
+ const char *subname, const char *sysname,
+ int size, void **pobject)
{
- struct nouveau_subdev *subdev;
+ struct nvkm_subdev *subdev;
int ret;
- ret = nouveau_object_create_(parent, engine, oclass, pclass |
- NV_SUBDEV_CLASS, size, pobject);
+ ret = nvkm_object_create_(parent, engine, oclass, pclass |
+ NV_SUBDEV_CLASS, size, pobject);
subdev = *pobject;
if (ret)
return ret;
@@ -103,8 +111,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
subdev->name = subname;
if (parent) {
- struct nouveau_device *device = nv_device(parent);
- subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
+ struct nvkm_device *device = nv_device(parent);
+ subdev->debug = nvkm_dbgopt(device->dbgopt, subname);
subdev->mmio = nv_subdev(device)->mmio;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
new file mode 100644
index 000000000000..6bd3d756f32c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -0,0 +1,19 @@
+nvkm-y += nvkm/engine/falcon.o
+nvkm-y += nvkm/engine/xtensa.o
+
+include $(src)/nvkm/engine/bsp/Kbuild
+include $(src)/nvkm/engine/ce/Kbuild
+include $(src)/nvkm/engine/cipher/Kbuild
+include $(src)/nvkm/engine/device/Kbuild
+include $(src)/nvkm/engine/disp/Kbuild
+include $(src)/nvkm/engine/dmaobj/Kbuild
+include $(src)/nvkm/engine/fifo/Kbuild
+include $(src)/nvkm/engine/gr/Kbuild
+include $(src)/nvkm/engine/mpeg/Kbuild
+include $(src)/nvkm/engine/mspdec/Kbuild
+include $(src)/nvkm/engine/msppp/Kbuild
+include $(src)/nvkm/engine/msvld/Kbuild
+include $(src)/nvkm/engine/pm/Kbuild
+include $(src)/nvkm/engine/sec/Kbuild
+include $(src)/nvkm/engine/sw/Kbuild
+include $(src)/nvkm/engine/vp/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild
new file mode 100644
index 000000000000..5ac9f9e1a283
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild
@@ -0,0 +1 @@
+nvkm-y += nvkm/engine/bsp/g84.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 1e8e75c0684a..a0b1fd80fa93 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -21,17 +21,18 @@
*
* Authors: Ben Skeggs, Ilia Mirkin
*/
-
-#include <engine/xtensa.h>
#include <engine/bsp.h>
+#include <engine/xtensa.h>
+
+#include <core/engctx.h>
/*******************************************************************************
* BSP object classes
******************************************************************************/
-static struct nouveau_oclass
-nv84_bsp_sclass[] = {
- { 0x74b0, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+g84_bsp_sclass[] = {
+ { 0x74b0, &nvkm_object_ofuncs },
{},
};
@@ -39,16 +40,16 @@ nv84_bsp_sclass[] = {
* BSP context
******************************************************************************/
-static struct nouveau_oclass
-nv84_bsp_cclass = {
+static struct nvkm_oclass
+g84_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_xtensa_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_xtensa_engctx_ctor,
+ .dtor = _nvkm_engctx_dtor,
+ .init = _nvkm_engctx_init,
+ .fini = _nvkm_engctx_fini,
+ .rd32 = _nvkm_engctx_rd32,
+ .wr32 = _nvkm_engctx_wr32,
},
};
@@ -57,36 +58,36 @@ nv84_bsp_cclass = {
******************************************************************************/
static int
-nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_bsp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_xtensa *priv;
+ struct nvkm_xtensa *priv;
int ret;
- ret = nouveau_xtensa_create(parent, engine, oclass, 0x103000, true,
- "PBSP", "bsp", &priv);
+ ret = nvkm_xtensa_create(parent, engine, oclass, 0x103000, true,
+ "PBSP", "bsp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
- nv_engine(priv)->cclass = &nv84_bsp_cclass;
- nv_engine(priv)->sclass = nv84_bsp_sclass;
+ nv_engine(priv)->cclass = &g84_bsp_cclass;
+ nv_engine(priv)->sclass = g84_bsp_sclass;
priv->fifo_val = 0x1111;
priv->unkd28 = 0x90044;
return 0;
}
-struct nouveau_oclass
-nv84_bsp_oclass = {
+struct nvkm_oclass
+g84_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_bsp_ctor,
- .dtor = _nouveau_xtensa_dtor,
- .init = _nouveau_xtensa_init,
- .fini = _nouveau_xtensa_fini,
- .rd32 = _nouveau_xtensa_rd32,
- .wr32 = _nouveau_xtensa_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g84_bsp_ctor,
+ .dtor = _nvkm_xtensa_dtor,
+ .init = _nvkm_xtensa_init,
+ .fini = _nvkm_xtensa_fini,
+ .rd32 = _nvkm_xtensa_rd32,
+ .wr32 = _nvkm_xtensa_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
new file mode 100644
index 000000000000..858797453e0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -0,0 +1,3 @@
+nvkm-y += nvkm/engine/ce/gt215.o
+nvkm-y += nvkm/engine/ce/gf100.o
+nvkm-y += nvkm/engine/ce/gk104.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
index 219850d53286..a558dfa4d76a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
@@ -1,4 +1,4 @@
-/* fuc microcode for copy engine on nva3- chipsets
+/* fuc microcode for copy engine on gt215- chipsets
*
* Copyright 2011 Red Hat Inc.
*
@@ -23,26 +23,19 @@
* Authors: Ben Skeggs
*/
-/* To build for nva3:nvc0
- * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h
- *
- * To build for nvc0-
- * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h
- */
-
-ifdef(`NVA3',
-.section #nva3_pcopy_data
-,
-.section #nvc0_pcopy_data
-)
+#ifdef GT215
+.section #gt215_pce_data
+#else
+.section #gf100_pce_data
+#endif
ctx_object: .b32 0
-ifdef(`NVA3',
+#ifdef GT215
ctx_dma:
ctx_dma_query: .b32 0
ctx_dma_src: .b32 0
ctx_dma_dst: .b32 0
-,)
+#endif
.equ #ctx_dma_count 3
ctx_query_address_high: .b32 0
ctx_query_address_low: .b32 0
@@ -86,14 +79,14 @@ dispatch_table:
// mthd 0x0140, PM_TRIGGER
.b16 0x050 1
.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
-ifdef(`NVA3', `
+#ifdef GT215
// mthd 0x0180-0x018c, DMA_
.b16 0x060 #ctx_dma_count
dispatch_dma:
.b32 0x00010000 + #cmd_dma ~0xffffffff
.b32 0x00010000 + #cmd_dma ~0xffffffff
.b32 0x00010000 + #cmd_dma ~0xffffffff
-',)
+#endif
// mthd 0x0200-0x0218, SRC_TILE
.b16 0x80 7
.b32 #ctx_src_tile_mode ~0x00000fff
@@ -134,11 +127,11 @@ dispatch_dma:
.b32 #ctx_query_counter ~0xffffffff
.b16 0x800 0
-ifdef(`NVA3',
-.section #nva3_pcopy_code
-,
-.section #nvc0_pcopy_code
-)
+#ifdef GT215
+.section #gt215_pce_code
+#else
+.section #gf100_pce_code
+#endif
main:
clear b32 $r0
@@ -190,10 +183,10 @@ ih:
swctx:
mov $r4 0x7700
mov $xtargets $r4
-ifdef(`NVA3', `
+#ifdef GT215
// target 7 hardcoded to ctx dma object
mov $xdbase $r0
-', ` // NVC0
+#else
// read SCRATCH3 to decide if we are PCOPY0 or PCOPY1
mov $r4 0x2100
iord $r4 I[$r4 + 0]
@@ -231,7 +224,7 @@ ifdef(`NVA3', `
shl b32 $r6 24
or $r4 $r6
mov $xdbase $r4
-')
+#endif
// 256-byte context, at start of data segment
mov b32 $r4 $r0
sethi $r4 0x60000
@@ -271,7 +264,7 @@ chsw:
bra e #chsw_finish_load
bset $flags $p1
call #swctx
-ifdef(`NVA3',
+#ifdef GT215
// load dma objects back into TARGET regs
mov $r5 #ctx_dma
mov $r6 #ctx_dma_count
@@ -282,8 +275,7 @@ ifdef(`NVA3',
iowr I[$r8] $r7
sub b32 $r6 1
bra nc #chsw_load_ctx_dma
-,)
-
+#endif
chsw_finish_load:
mov $r3 2
iowr I[$r2 + 0x200] $r3
@@ -397,7 +389,7 @@ cmd_pm_trigger:
iowr I[$r2] $r3
ret
-ifdef(`NVA3',
+#ifdef GT215
// SET_DMA_* method handler
//
// Inputs:
@@ -419,7 +411,7 @@ cmd_dma:
shl b32 $r4 6
iowr I[$r4] $r3
ret
-,)
+#endif
// Calculates the hw swizzle mask and adjusts the surface's xcnt to match
//
@@ -548,11 +540,11 @@ cmd_exec_set_surface_tiled:
ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
extr $r9 $r7 8:11
extr $r8 $r7 4:7
-ifdef(`NVA3',
+#ifdef GT215
add b32 $r8 2
-,
+#else
add b32 $r8 3
-)
+#endif
extr $r7 $r7 0:3
cmp b32 $r7 0xe
bra ne #xtile64
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3 b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3
new file mode 100644
index 000000000000..36f0a99ac7a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3
@@ -0,0 +1,2 @@
+#define GF100
+#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index 98cc4216a372..d9af6e4e4585 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nvc0_pcopy_data[] = {
+uint32_t gf100_pce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ uint32_t nvc0_pcopy_data[] = {
0x00000800,
};
-uint32_t nvc0_pcopy_code[] = {
+uint32_t gf100_pce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3 b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3
new file mode 100644
index 000000000000..07bda93cfd79
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3
@@ -0,0 +1,2 @@
+#define GT215
+#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index 241b27201206..f42c0d0d6cee 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nva3_pcopy_data[] = {
+uint32_t gt215_pce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ uint32_t nva3_pcopy_data[] = {
0x00000800,
};
-uint32_t nva3_pcopy_code[] = {
+uint32_t gt215_pce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
new file mode 100644
index 000000000000..2d2e549c2e34
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include <engine/ce.h>
+#include <engine/falcon.h>
+#include "fuc/gf100.fuc3.h"
+
+struct gf100_ce_priv {
+ struct nvkm_falcon base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nvkm_oclass
+gf100_ce0_sclass[] = {
+ { 0x90b5, &nvkm_object_ofuncs },
+ {},
+};
+
+static struct nvkm_oclass
+gf100_ce1_sclass[] = {
+ { 0x90b8, &nvkm_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCE context
+ ******************************************************************************/
+
+static struct nvkm_ofuncs
+gf100_ce_context_ofuncs = {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
+};
+
+static struct nvkm_oclass
+gf100_ce0_cclass = {
+ .handle = NV_ENGCTX(CE0, 0xc0),
+ .ofuncs = &gf100_ce_context_ofuncs,
+};
+
+static struct nvkm_oclass
+gf100_ce1_cclass = {
+ .handle = NV_ENGCTX(CE1, 0xc0),
+ .ofuncs = &gf100_ce_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCE engine/subdev functions
+ ******************************************************************************/
+
+static int
+gf100_ce_init(struct nvkm_object *object)
+{
+ struct gf100_ce_priv *priv = (void *)object;
+ int ret;
+
+ ret = nvkm_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wo32(priv, 0x084, nv_engidx(&priv->base.base) - NVDEV_ENGINE_CE0);
+ return 0;
+}
+
+static int
+gf100_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gf100_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, true,
+ "PCE0", "ce0", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_subdev(priv)->intr = gt215_ce_intr;
+ nv_engine(priv)->cclass = &gf100_ce0_cclass;
+ nv_engine(priv)->sclass = gf100_ce0_sclass;
+ nv_falcon(priv)->code.data = gf100_pce_code;
+ nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
+ nv_falcon(priv)->data.data = gf100_pce_data;
+ nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
+ return 0;
+}
+
+static int
+gf100_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gf100_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x105000, true,
+ "PCE1", "ce1", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_subdev(priv)->intr = gt215_ce_intr;
+ nv_engine(priv)->cclass = &gf100_ce1_cclass;
+ nv_engine(priv)->sclass = gf100_ce1_sclass;
+ nv_falcon(priv)->code.data = gf100_pce_code;
+ nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
+ nv_falcon(priv)->data.data = gf100_pce_data;
+ nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
+ return 0;
+}
+
+struct nvkm_oclass
+gf100_ce0_oclass = {
+ .handle = NV_ENGINE(CE0, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_ce0_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = gf100_ce_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
+ },
+};
+
+struct nvkm_oclass
+gf100_ce1_oclass = {
+ .handle = NV_ENGINE(CE1, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_ce1_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = gf100_ce_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
new file mode 100644
index 000000000000..a998932fae45
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include <engine/ce.h>
+
+#include <core/engctx.h>
+
+struct gk104_ce_priv {
+ struct nvkm_engine base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nvkm_oclass
+gk104_ce_sclass[] = {
+ { 0xa0b5, &nvkm_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCE context
+ ******************************************************************************/
+
+static struct nvkm_ofuncs
+gk104_ce_context_ofuncs = {
+ .ctor = _nvkm_engctx_ctor,
+ .dtor = _nvkm_engctx_dtor,
+ .init = _nvkm_engctx_init,
+ .fini = _nvkm_engctx_fini,
+ .rd32 = _nvkm_engctx_rd32,
+ .wr32 = _nvkm_engctx_wr32,
+};
+
+static struct nvkm_oclass
+gk104_ce_cclass = {
+ .handle = NV_ENGCTX(CE0, 0xc0),
+ .ofuncs = &gk104_ce_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCE engine/subdev functions
+ ******************************************************************************/
+
+static void
+gk104_ce_intr(struct nvkm_subdev *subdev)
+{
+ const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
+ struct gk104_ce_priv *priv = (void *)subdev;
+ u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
+
+ if (stat) {
+ nv_warn(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+ }
+}
+
+static int
+gk104_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gk104_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE0", "ce0", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_subdev(priv)->intr = gk104_ce_intr;
+ nv_engine(priv)->cclass = &gk104_ce_cclass;
+ nv_engine(priv)->sclass = gk104_ce_sclass;
+ return 0;
+}
+
+static int
+gk104_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gk104_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE1", "ce1", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_subdev(priv)->intr = gk104_ce_intr;
+ nv_engine(priv)->cclass = &gk104_ce_cclass;
+ nv_engine(priv)->sclass = gk104_ce_sclass;
+ return 0;
+}
+
+static int
+gk104_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gk104_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE2", "ce2", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00200000;
+ nv_subdev(priv)->intr = gk104_ce_intr;
+ nv_engine(priv)->cclass = &gk104_ce_cclass;
+ nv_engine(priv)->sclass = gk104_ce_sclass;
+ return 0;
+}
+
+struct nvkm_oclass
+gk104_ce0_oclass = {
+ .handle = NV_ENGINE(CE0, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_ce0_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
+
+struct nvkm_oclass
+gk104_ce1_oclass = {
+ .handle = NV_ENGINE(CE1, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_ce1_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
+
+struct nvkm_oclass
+gk104_ce2_oclass = {
+ .handle = NV_ENGINE(CE2, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_ce2_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index abb410ef09ea..d8bb4293bc11 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -21,57 +21,53 @@
*
* Authors: Ben Skeggs
*/
-
+#include <engine/ce.h>
#include <engine/falcon.h>
#include <engine/fifo.h>
-#include <engine/copy.h>
-
-#include <subdev/fb.h>
-#include <subdev/vm.h>
+#include "fuc/gt215.fuc3.h"
#include <core/client.h>
+#include <core/device.h>
#include <core/enum.h>
-
-#include "fuc/nva3.fuc.h"
-
-struct nva3_copy_priv {
- struct nouveau_falcon base;
+struct gt215_ce_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
* Copy object classes
******************************************************************************/
-static struct nouveau_oclass
-nva3_copy_sclass[] = {
- { 0x85b5, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+gt215_ce_sclass[] = {
+ { 0x85b5, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
- * PCOPY context
+ * PCE context
******************************************************************************/
-static struct nouveau_oclass
-nva3_copy_cclass = {
- .handle = NV_ENGCTX(COPY0, 0xa3),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+gt215_ce_cclass = {
+ .handle = NV_ENGCTX(CE0, 0xa3),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PCOPY engine/subdev functions
+ * PCE engine/subdev functions
******************************************************************************/
-static const struct nouveau_enum nva3_copy_isr_error_name[] = {
+static const struct nvkm_enum
+gt215_ce_isr_error_name[] = {
{ 0x0001, "ILLEGAL_MTHD" },
{ 0x0002, "INVALID_ENUM" },
{ 0x0003, "INVALID_BITFIELD" },
@@ -79,12 +75,12 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
};
void
-nva3_copy_intr(struct nouveau_subdev *subdev)
+gt215_ce_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_falcon *falcon = (void *)subdev;
- struct nouveau_object *engctx;
+ struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
+ struct nvkm_engine *engine = nv_engine(subdev);
+ struct nvkm_falcon *falcon = (void *)subdev;
+ struct nvkm_object *engctx;
u32 dispatch = nv_ro32(falcon, 0x01c);
u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
@@ -95,14 +91,14 @@ nva3_copy_intr(struct nouveau_subdev *subdev)
u32 data = nv_ro32(falcon, 0x044);
int chid;
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000040) {
nv_error(falcon, "DISPATCH_ERROR [");
- nouveau_enum_print(nva3_copy_isr_error_name, ssta);
+ nvkm_enum_print(gt215_ce_isr_error_name, ssta);
pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, nouveau_client_name(engctx), subc,
+ chid, inst << 12, nvkm_client_name(engctx), subc,
mthd, data);
nv_wo32(falcon, 0x004, 0x00000040);
stat &= ~0x00000040;
@@ -113,44 +109,44 @@ nva3_copy_intr(struct nouveau_subdev *subdev)
nv_wo32(falcon, 0x004, stat);
}
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
static int
-nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gt215_ce_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
bool enable = (nv_device(parent)->chipset != 0xaf);
- struct nva3_copy_priv *priv;
+ struct gt215_ce_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
- "PCE0", "copy0", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, enable,
+ "PCE0", "ce0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00802000;
- nv_subdev(priv)->intr = nva3_copy_intr;
- nv_engine(priv)->cclass = &nva3_copy_cclass;
- nv_engine(priv)->sclass = nva3_copy_sclass;
- nv_falcon(priv)->code.data = nva3_pcopy_code;
- nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
- nv_falcon(priv)->data.data = nva3_pcopy_data;
- nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
+ nv_subdev(priv)->intr = gt215_ce_intr;
+ nv_engine(priv)->cclass = &gt215_ce_cclass;
+ nv_engine(priv)->sclass = gt215_ce_sclass;
+ nv_falcon(priv)->code.data = gt215_pce_code;
+ nv_falcon(priv)->code.size = sizeof(gt215_pce_code);
+ nv_falcon(priv)->data.data = gt215_pce_data;
+ nv_falcon(priv)->data.size = sizeof(gt215_pce_data);
return 0;
}
-struct nouveau_oclass
-nva3_copy_oclass = {
- .handle = NV_ENGINE(COPY0, 0xa3),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_copy_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = _nouveau_falcon_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+gt215_ce_oclass = {
+ .handle = NV_ENGINE(CE0, 0xa3),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gt215_ce_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = _nvkm_falcon_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild
new file mode 100644
index 000000000000..fa39945327ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild
@@ -0,0 +1 @@
+nvkm-y += nvkm/engine/cipher/g84.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index ea5c42f31791..13f30428a305 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -21,20 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include <engine/cipher.h>
+#include <engine/fifo.h>
#include <core/client.h>
-#include <core/os.h>
-#include <core/enum.h>
#include <core/engctx.h>
-#include <core/gpuobj.h>
-
-#include <subdev/fb.h>
-
-#include <engine/fifo.h>
-#include <engine/crypt.h>
+#include <core/enum.h>
-struct nv84_crypt_priv {
- struct nouveau_engine base;
+struct g84_cipher_priv {
+ struct nvkm_engine base;
};
/*******************************************************************************
@@ -42,16 +37,16 @@ struct nv84_crypt_priv {
******************************************************************************/
static int
-nv84_crypt_object_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_cipher_object_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_gpuobj *obj;
+ struct nvkm_gpuobj *obj;
int ret;
- ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
- 16, 16, 0, &obj);
+ ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
@@ -63,44 +58,45 @@ nv84_crypt_object_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_ofuncs
-nv84_crypt_ofuncs = {
- .ctor = nv84_crypt_object_ctor,
- .dtor = _nouveau_gpuobj_dtor,
- .init = _nouveau_gpuobj_init,
- .fini = _nouveau_gpuobj_fini,
- .rd32 = _nouveau_gpuobj_rd32,
- .wr32 = _nouveau_gpuobj_wr32,
+static struct nvkm_ofuncs
+g84_cipher_ofuncs = {
+ .ctor = g84_cipher_object_ctor,
+ .dtor = _nvkm_gpuobj_dtor,
+ .init = _nvkm_gpuobj_init,
+ .fini = _nvkm_gpuobj_fini,
+ .rd32 = _nvkm_gpuobj_rd32,
+ .wr32 = _nvkm_gpuobj_wr32,
};
-static struct nouveau_oclass
-nv84_crypt_sclass[] = {
- { 0x74c1, &nv84_crypt_ofuncs },
+static struct nvkm_oclass
+g84_cipher_sclass[] = {
+ { 0x74c1, &g84_cipher_ofuncs },
{}
};
/*******************************************************************************
- * PCRYPT context
+ * PCIPHER context
******************************************************************************/
-static struct nouveau_oclass
-nv84_crypt_cclass = {
- .handle = NV_ENGCTX(CRYPT, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+static struct nvkm_oclass
+g84_cipher_cclass = {
+ .handle = NV_ENGCTX(CIPHER, 0x84),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_engctx_ctor,
+ .dtor = _nvkm_engctx_dtor,
+ .init = _nvkm_engctx_init,
+ .fini = _nvkm_engctx_fini,
+ .rd32 = _nvkm_engctx_rd32,
+ .wr32 = _nvkm_engctx_wr32,
},
};
/*******************************************************************************
- * PCRYPT engine/subdev functions
+ * PCIPHER engine/subdev functions
******************************************************************************/
-static const struct nouveau_bitfield nv84_crypt_intr_mask[] = {
+static const struct nvkm_bitfield
+g84_cipher_intr_mask[] = {
{ 0x00000001, "INVALID_STATE" },
{ 0x00000002, "ILLEGAL_MTHD" },
{ 0x00000004, "ILLEGAL_CLASS" },
@@ -110,63 +106,63 @@ static const struct nouveau_bitfield nv84_crypt_intr_mask[] = {
};
static void
-nv84_crypt_intr(struct nouveau_subdev *subdev)
+g84_cipher_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- struct nv84_crypt_priv *priv = (void *)subdev;
+ struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
+ struct nvkm_engine *engine = nv_engine(subdev);
+ struct nvkm_object *engctx;
+ struct g84_cipher_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, 0x102130);
u32 mthd = nv_rd32(priv, 0x102190);
u32 data = nv_rd32(priv, 0x102194);
u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
int chid;
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat) {
nv_error(priv, "%s", "");
- nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
+ nvkm_bitfield_print(g84_cipher_intr_mask, stat);
pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, nouveau_client_name(engctx),
+ chid, (u64)inst << 12, nvkm_client_name(engctx),
mthd, data);
}
nv_wr32(priv, 0x102130, stat);
nv_wr32(priv, 0x10200c, 0x10);
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
static int
-nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_cipher_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv84_crypt_priv *priv;
+ struct g84_cipher_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
- "PCRYPT", "crypt", &priv);
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCIPHER", "cipher", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00004000;
- nv_subdev(priv)->intr = nv84_crypt_intr;
- nv_engine(priv)->cclass = &nv84_crypt_cclass;
- nv_engine(priv)->sclass = nv84_crypt_sclass;
+ nv_subdev(priv)->intr = g84_cipher_intr;
+ nv_engine(priv)->cclass = &g84_cipher_cclass;
+ nv_engine(priv)->sclass = g84_cipher_sclass;
return 0;
}
static int
-nv84_crypt_init(struct nouveau_object *object)
+g84_cipher_init(struct nvkm_object *object)
{
- struct nv84_crypt_priv *priv = (void *)object;
+ struct g84_cipher_priv *priv = (void *)object;
int ret;
- ret = nouveau_engine_init(&priv->base);
+ ret = nvkm_engine_init(&priv->base);
if (ret)
return ret;
@@ -176,13 +172,13 @@ nv84_crypt_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv84_crypt_oclass = {
- .handle = NV_ENGINE(CRYPT, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_crypt_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = nv84_crypt_init,
- .fini = _nouveau_engine_fini,
+struct nvkm_oclass
+g84_cipher_oclass = {
+ .handle = NV_ENGINE(CIPHER, 0x84),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g84_cipher_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = g84_cipher_init,
+ .fini = _nvkm_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
new file mode 100644
index 000000000000..de1bf092b2b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
@@ -0,0 +1,12 @@
+nvkm-y += nvkm/engine/device/acpi.o
+nvkm-y += nvkm/engine/device/base.o
+nvkm-y += nvkm/engine/device/ctrl.o
+nvkm-y += nvkm/engine/device/nv04.o
+nvkm-y += nvkm/engine/device/nv10.o
+nvkm-y += nvkm/engine/device/nv20.o
+nvkm-y += nvkm/engine/device/nv30.o
+nvkm-y += nvkm/engine/device/nv40.o
+nvkm-y += nvkm/engine/device/nv50.o
+nvkm-y += nvkm/engine/device/gf100.o
+nvkm-y += nvkm/engine/device/gk104.o
+nvkm-y += nvkm/engine/device/gm100.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
index 4dbf0ba89e5c..f42706e1d5db 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/acpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
@@ -21,14 +21,15 @@
*
* Authors: Ben Skeggs
*/
-
#include "acpi.h"
+#include <core/device.h>
+
#ifdef CONFIG_ACPI
static int
nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
{
- struct nouveau_device *device =
+ struct nvkm_device *device =
container_of(nb, typeof(*device), acpi.nb);
struct acpi_bus_event *info = data;
@@ -40,7 +41,7 @@ nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
#endif
int
-nvkm_acpi_fini(struct nouveau_device *device, bool suspend)
+nvkm_acpi_fini(struct nvkm_device *device, bool suspend)
{
#ifdef CONFIG_ACPI
unregister_acpi_notifier(&device->acpi.nb);
@@ -49,7 +50,7 @@ nvkm_acpi_fini(struct nouveau_device *device, bool suspend)
}
int
-nvkm_acpi_init(struct nouveau_device *device)
+nvkm_acpi_init(struct nvkm_device *device)
{
#ifdef CONFIG_ACPI
device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
new file mode 100644
index 000000000000..82dd359ddfa4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -0,0 +1,8 @@
+#ifndef __NVKM_DEVICE_ACPI_H__
+#define __NVKM_DEVICE_ACPI_H__
+#include <core/os.h>
+struct nvkm_device;
+
+int nvkm_acpi_init(struct nvkm_device *);
+int nvkm_acpi_fini(struct nvkm_device *, bool);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 137e0b0faeae..29bd539af183 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -21,28 +21,27 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
+#include "acpi.h"
-#include <core/object.h>
-#include <core/device.h>
#include <core/client.h>
#include <core/option.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
+#include <core/notify.h>
+#include <core/parent.h>
#include <subdev/bios.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
-#include "priv.h"
-#include "acpi.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
-struct nouveau_device *
-nouveau_device_find(u64 name)
+struct nvkm_device *
+nvkm_device_find(u64 name)
{
- struct nouveau_device *device, *match = NULL;
+ struct nvkm_device *device, *match = NULL;
mutex_lock(&nv_devices_mutex);
list_for_each_entry(device, &nv_devices, head) {
if (device->handle == name) {
@@ -55,9 +54,9 @@ nouveau_device_find(u64 name)
}
int
-nouveau_device_list(u64 *name, int size)
+nvkm_device_list(u64 *name, int size)
{
- struct nouveau_device *device;
+ struct nvkm_device *device;
int nr = 0;
mutex_lock(&nv_devices_mutex);
list_for_each_entry(device, &nv_devices, head) {
@@ -69,20 +68,20 @@ nouveau_device_list(u64 *name, int size)
}
/******************************************************************************
- * nouveau_devobj (0x0080): class implementation
+ * nvkm_devobj (0x0080): class implementation
*****************************************************************************/
-struct nouveau_devobj {
- struct nouveau_parent base;
- struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
+struct nvkm_devobj {
+ struct nvkm_parent base;
+ struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
};
static int
-nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
+nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
{
- struct nouveau_device *device = nv_device(object);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct nouveau_instmem *imem = nouveau_instmem(device);
+ struct nvkm_device *device = nv_device(object);
+ struct nvkm_fb *pfb = nvkm_fb(device);
+ struct nvkm_instmem *imem = nvkm_instmem(device);
union {
struct nv_device_info_v0 v0;
} *args = data;
@@ -147,12 +146,11 @@ nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
}
static int
-nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nvkm_devobj_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
switch (mthd) {
case NV_DEVICE_V0_INFO:
- return nouveau_devobj_info(object, data, size);
+ return nvkm_devobj_info(object, data, size);
default:
break;
}
@@ -160,45 +158,45 @@ nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd,
}
static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
+nvkm_devobj_rd08(struct nvkm_object *object, u64 addr)
{
return nv_rd08(object->engine, addr);
}
static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
+nvkm_devobj_rd16(struct nvkm_object *object, u64 addr)
{
return nv_rd16(object->engine, addr);
}
static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
+nvkm_devobj_rd32(struct nvkm_object *object, u64 addr)
{
return nv_rd32(object->engine, addr);
}
static void
-nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
+nvkm_devobj_wr08(struct nvkm_object *object, u64 addr, u8 data)
{
nv_wr08(object->engine, addr, data);
}
static void
-nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
+nvkm_devobj_wr16(struct nvkm_object *object, u64 addr, u16 data)
{
nv_wr16(object->engine, addr, data);
}
static void
-nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+nvkm_devobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
nv_wr32(object->engine, addr, data);
}
static int
-nouveau_devobj_map(struct nouveau_object *object, u64 *addr, u32 *size)
+nvkm_devobj_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
- struct nouveau_device *device = nv_device(object);
+ struct nvkm_device *device = nv_device(object);
*addr = nv_device_resource_start(device, 0);
*size = nv_device_resource_len(device, 0);
return 0;
@@ -209,7 +207,7 @@ static const u64 disable_map[] = {
[NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_CLK ] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
@@ -218,74 +216,75 @@ static const u64 disable_map[] = {
[NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_VM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_MMU] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_PMU] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_ENGINE_PM ] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
[NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
- [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GRAPH,
+ [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GR,
[NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
[NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
[NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
- [NVDEV_ENGINE_CRYPT] = NV_DEVICE_V0_DISABLE_CRYPT,
+ [NVDEV_ENGINE_CIPHER] = NV_DEVICE_V0_DISABLE_CIPHER,
[NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
- [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP,
- [NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0,
- [NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1,
- [NVDEV_ENGINE_COPY2] = NV_DEVICE_V0_DISABLE_COPY1,
+ [NVDEV_ENGINE_MSPPP] = NV_DEVICE_V0_DISABLE_MSPPP,
+ [NVDEV_ENGINE_CE0] = NV_DEVICE_V0_DISABLE_CE0,
+ [NVDEV_ENGINE_CE1] = NV_DEVICE_V0_DISABLE_CE1,
+ [NVDEV_ENGINE_CE2] = NV_DEVICE_V0_DISABLE_CE2,
[NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
- [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC,
+ [NVDEV_ENGINE_MSENC] = NV_DEVICE_V0_DISABLE_MSENC,
[NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
+ [NVDEV_ENGINE_MSVLD] = NV_DEVICE_V0_DISABLE_MSVLD,
+ [NVDEV_ENGINE_SEC] = NV_DEVICE_V0_DISABLE_SEC,
[NVDEV_SUBDEV_NR] = 0,
};
static void
-nouveau_devobj_dtor(struct nouveau_object *object)
+nvkm_devobj_dtor(struct nvkm_object *object)
{
- struct nouveau_devobj *devobj = (void *)object;
+ struct nvkm_devobj *devobj = (void *)object;
int i;
for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
- nouveau_object_ref(NULL, &devobj->subdev[i]);
+ nvkm_object_ref(NULL, &devobj->subdev[i]);
- nouveau_parent_destroy(&devobj->base);
+ nvkm_parent_destroy(&devobj->base);
}
-static struct nouveau_oclass
-nouveau_devobj_oclass_super = {
+static struct nvkm_oclass
+nvkm_devobj_oclass_super = {
.handle = NV_DEVICE,
- .ofuncs = &(struct nouveau_ofuncs) {
- .dtor = nouveau_devobj_dtor,
- .init = _nouveau_parent_init,
- .fini = _nouveau_parent_fini,
- .mthd = nouveau_devobj_mthd,
- .map = nouveau_devobj_map,
- .rd08 = nouveau_devobj_rd08,
- .rd16 = nouveau_devobj_rd16,
- .rd32 = nouveau_devobj_rd32,
- .wr08 = nouveau_devobj_wr08,
- .wr16 = nouveau_devobj_wr16,
- .wr32 = nouveau_devobj_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .dtor = nvkm_devobj_dtor,
+ .init = _nvkm_parent_init,
+ .fini = _nvkm_parent_fini,
+ .mthd = nvkm_devobj_mthd,
+ .map = nvkm_devobj_map,
+ .rd08 = nvkm_devobj_rd08,
+ .rd16 = nvkm_devobj_rd16,
+ .rd32 = nvkm_devobj_rd32,
+ .wr08 = nvkm_devobj_wr08,
+ .wr16 = nvkm_devobj_wr16,
+ .wr32 = nvkm_devobj_wr32,
}
};
static int
-nouveau_devobj_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv_device_v0 v0;
} *args = data;
- struct nouveau_client *client = nv_client(parent);
- struct nouveau_device *device;
- struct nouveau_devobj *devobj;
+ struct nvkm_client *client = nv_client(parent);
+ struct nvkm_device *device;
+ struct nvkm_devobj *devobj;
u32 boot0, strap;
u64 disable, mmio_base, mmio_size;
void __iomem *map;
@@ -302,22 +301,22 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
/* give priviledged clients register access */
if (client->super)
- oclass = &nouveau_devobj_oclass_super;
+ oclass = &nvkm_devobj_oclass_super;
/* find the device subdev that matches what the client requested */
device = nv_device(client->device);
if (args->v0.device != ~0) {
- device = nouveau_device_find(args->v0.device);
+ device = nvkm_device_find(args->v0.device);
if (!device)
return -ENODEV;
}
- ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
- nouveau_control_oclass,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_FIFO) |
- (1ULL << NVDEV_ENGINE_DISP) |
- (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
+ ret = nvkm_parent_create(parent, nv_object(device), oclass, 0,
+ nvkm_control_oclass,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_FIFO) |
+ (1ULL << NVDEV_ENGINE_DISP) |
+ (1ULL << NVDEV_ENGINE_PM), &devobj);
*pobject = nv_object(devobj);
if (ret)
return ret;
@@ -400,8 +399,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case NV_30: ret = nv30_identify(device); break;
case NV_40: ret = nv40_identify(device); break;
case NV_50: ret = nv50_identify(device); break;
- case NV_C0: ret = nvc0_identify(device); break;
- case NV_E0: ret = nve0_identify(device); break;
+ case NV_C0: ret = gf100_identify(device); break;
+ case NV_E0: ret = gk104_identify(device); break;
case GM100: ret = gm100_identify(device); break;
default:
ret = -EINVAL;
@@ -436,7 +435,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
} else
if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
device->cname = "NULL";
- device->oclass[NVDEV_SUBDEV_VBIOS] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass;
}
if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
@@ -454,14 +453,12 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
continue;
if (device->subdev[i]) {
- nouveau_object_ref(device->subdev[i],
- &devobj->subdev[i]);
+ nvkm_object_ref(device->subdev[i], &devobj->subdev[i]);
continue;
}
- ret = nouveau_object_ctor(nv_object(device), NULL,
- oclass, NULL, i,
- &devobj->subdev[i]);
+ ret = nvkm_object_ctor(nv_object(device), NULL, oclass,
+ NULL, i, &devobj->subdev[i]);
if (ret == -ENODEV)
continue;
if (ret)
@@ -479,15 +476,15 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
* subdev in turn as they're created.
*/
while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
- struct nouveau_object *subdev = devobj->subdev[c++];
+ struct nvkm_object *subdev = devobj->subdev[c++];
if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nouveau_object_inc(subdev);
+ ret = nvkm_object_inc(subdev);
if (ret)
return ret;
atomic_dec(&nv_object(device)->usecount);
} else
if (subdev) {
- nouveau_subdev_reset(subdev);
+ nvkm_subdev_reset(subdev);
}
}
}
@@ -495,28 +492,47 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_ofuncs
-nouveau_devobj_ofuncs = {
- .ctor = nouveau_devobj_ctor,
- .dtor = nouveau_devobj_dtor,
- .init = _nouveau_parent_init,
- .fini = _nouveau_parent_fini,
- .mthd = nouveau_devobj_mthd,
+static struct nvkm_ofuncs
+nvkm_devobj_ofuncs = {
+ .ctor = nvkm_devobj_ctor,
+ .dtor = nvkm_devobj_dtor,
+ .init = _nvkm_parent_init,
+ .fini = _nvkm_parent_fini,
+ .mthd = nvkm_devobj_mthd,
};
/******************************************************************************
- * nouveau_device: engine functions
+ * nvkm_device: engine functions
*****************************************************************************/
-static struct nouveau_oclass
-nouveau_device_sclass[] = {
- { 0x0080, &nouveau_devobj_ofuncs },
+struct nvkm_device *
+nv_device(void *obj)
+{
+ struct nvkm_object *device = nv_object(obj);
+ if (device->engine == NULL) {
+ while (device && device->parent)
+ device = device->parent;
+ } else {
+ device = &nv_object(obj)->engine->subdev.object;
+ if (device && device->parent)
+ device = device->parent;
+ }
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+ if (unlikely(!device))
+ nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj));
+#endif
+ return (void *)device;
+}
+
+static struct nvkm_oclass
+nvkm_device_sclass[] = {
+ { 0x0080, &nvkm_devobj_ofuncs },
{}
};
static int
-nouveau_device_event_ctor(struct nouveau_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
if (!WARN_ON(size != 0)) {
notify->size = 0;
@@ -528,21 +544,21 @@ nouveau_device_event_ctor(struct nouveau_object *object, void *data, u32 size,
}
static const struct nvkm_event_func
-nouveau_device_event_func = {
- .ctor = nouveau_device_event_ctor,
+nvkm_device_event_func = {
+ .ctor = nvkm_device_event_ctor,
};
static int
-nouveau_device_fini(struct nouveau_object *object, bool suspend)
+nvkm_device_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_device *device = (void *)object;
- struct nouveau_object *subdev;
+ struct nvkm_device *device = (void *)object;
+ struct nvkm_object *subdev;
int ret, i;
for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
if ((subdev = device->subdev[i])) {
if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nouveau_object_dec(subdev, suspend);
+ ret = nvkm_object_dec(subdev, suspend);
if (ret && suspend)
goto fail;
}
@@ -554,7 +570,7 @@ fail:
for (; ret && i < NVDEV_SUBDEV_NR; i++) {
if ((subdev = device->subdev[i])) {
if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nouveau_object_inc(subdev);
+ ret = nvkm_object_inc(subdev);
if (ret) {
/* XXX */
}
@@ -566,10 +582,10 @@ fail:
}
static int
-nouveau_device_init(struct nouveau_object *object)
+nvkm_device_init(struct nvkm_object *object)
{
- struct nouveau_device *device = (void *)object;
- struct nouveau_object *subdev;
+ struct nvkm_device *device = (void *)object;
+ struct nvkm_object *subdev;
int ret, i = 0;
ret = nvkm_acpi_init(device);
@@ -579,11 +595,11 @@ nouveau_device_init(struct nouveau_object *object)
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
if ((subdev = device->subdev[i])) {
if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nouveau_object_inc(subdev);
+ ret = nvkm_object_inc(subdev);
if (ret)
goto fail;
} else {
- nouveau_subdev_reset(subdev);
+ nvkm_subdev_reset(subdev);
}
}
}
@@ -593,7 +609,7 @@ fail:
for (--i; ret && i >= 0; i--) {
if ((subdev = device->subdev[i])) {
if (!nv_iclass(subdev, NV_ENGINE_CLASS))
- nouveau_object_dec(subdev, false);
+ nvkm_object_dec(subdev, false);
}
}
@@ -603,9 +619,9 @@ fail:
}
static void
-nouveau_device_dtor(struct nouveau_object *object)
+nvkm_device_dtor(struct nvkm_object *object)
{
- struct nouveau_device *device = (void *)object;
+ struct nvkm_device *device = (void *)object;
nvkm_event_fini(&device->event);
@@ -616,11 +632,11 @@ nouveau_device_dtor(struct nouveau_object *object)
if (nv_subdev(device)->mmio)
iounmap(nv_subdev(device)->mmio);
- nouveau_engine_destroy(&device->base);
+ nvkm_engine_destroy(&device->engine);
}
resource_size_t
-nv_device_resource_start(struct nouveau_device *device, unsigned int bar)
+nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
{
if (nv_device_is_pci(device)) {
return pci_resource_start(device->pdev, bar);
@@ -635,7 +651,7 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar)
}
resource_size_t
-nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
+nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
{
if (nv_device_is_pci(device)) {
return pci_resource_len(device->pdev, bar);
@@ -650,7 +666,7 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
}
int
-nv_device_get_irq(struct nouveau_device *device, bool stall)
+nv_device_get_irq(struct nvkm_device *device, bool stall)
{
if (nv_device_is_pci(device)) {
return device->pdev->irq;
@@ -660,22 +676,22 @@ nv_device_get_irq(struct nouveau_device *device, bool stall)
}
}
-static struct nouveau_oclass
-nouveau_device_oclass = {
+static struct nvkm_oclass
+nvkm_device_oclass = {
.handle = NV_ENGINE(DEVICE, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
- .dtor = nouveau_device_dtor,
- .init = nouveau_device_init,
- .fini = nouveau_device_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .dtor = nvkm_device_dtor,
+ .init = nvkm_device_init,
+ .fini = nvkm_device_fini,
},
};
int
-nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
- const char *sname, const char *cfg, const char *dbg,
- int length, void **pobject)
+nvkm_device_create_(void *dev, enum nv_bus_type type, u64 name,
+ const char *sname, const char *cfg, const char *dbg,
+ int length, void **pobject)
{
- struct nouveau_device *device;
+ struct nvkm_device *device;
int ret = -EEXIST;
mutex_lock(&nv_devices_mutex);
@@ -684,17 +700,17 @@ nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
goto done;
}
- ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
- "DEVICE", "device", length, pobject);
+ ret = nvkm_engine_create_(NULL, NULL, &nvkm_device_oclass, true,
+ "DEVICE", "device", length, pobject);
device = *pobject;
if (ret)
goto done;
switch (type) {
- case NOUVEAU_BUS_PCI:
+ case NVKM_BUS_PCI:
device->pdev = dev;
break;
- case NOUVEAU_BUS_PLATFORM:
+ case NVKM_BUS_PLATFORM:
device->platformdev = dev;
break;
}
@@ -703,12 +719,11 @@ nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
device->dbgopt = dbg;
device->name = sname;
- nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
- nv_engine(device)->sclass = nouveau_device_sclass;
+ nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE");
+ nv_engine(device)->sclass = nvkm_device_sclass;
list_add(&device->head, &nv_devices);
- ret = nvkm_event_init(&nouveau_device_event_func, 1, 1,
- &device->event);
+ ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
done:
mutex_unlock(&nv_devices_mutex);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index e34101a3490e..0b794b13cec3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -21,25 +21,22 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "priv.h"
#include <core/client.h>
-#include <core/object.h>
-#include <nvif/unpack.h>
+#include <subdev/clk.h>
+
#include <nvif/class.h>
#include <nvif/ioctl.h>
-
-#include <subdev/clock.h>
-
-#include "priv.h"
+#include <nvif/unpack.h>
static int
-nouveau_control_mthd_pstate_info(struct nouveau_object *object,
- void *data, u32 size)
+nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_control_pstate_info_v0 v0;
} *args = data;
- struct nouveau_clock *clk = nouveau_clock(object);
+ struct nvkm_clk *clk = nvkm_clk(object);
int ret;
nv_ioctl(object, "control pstate info size %d\n", size);
@@ -67,16 +64,15 @@ nouveau_control_mthd_pstate_info(struct nouveau_object *object,
}
static int
-nouveau_control_mthd_pstate_attr(struct nouveau_object *object,
- void *data, u32 size)
+nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_control_pstate_attr_v0 v0;
} *args = data;
- struct nouveau_clock *clk = nouveau_clock(object);
- struct nouveau_clocks *domain;
- struct nouveau_pstate *pstate;
- struct nouveau_cstate *cstate;
+ struct nvkm_clk *clk = nvkm_clk(object);
+ struct nvkm_domain *domain;
+ struct nvkm_pstate *pstate;
+ struct nvkm_cstate *cstate;
int i = 0, j = -1;
u32 lo, hi;
int ret;
@@ -141,13 +137,12 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object,
}
static int
-nouveau_control_mthd_pstate_user(struct nouveau_object *object,
- void *data, u32 size)
+nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_control_pstate_user_v0 v0;
} *args = data;
- struct nouveau_clock *clk = nouveau_clock(object);
+ struct nvkm_clk *clk = nvkm_clk(object);
int ret;
nv_ioctl(object, "control pstate user size %d\n", size);
@@ -161,45 +156,44 @@ nouveau_control_mthd_pstate_user(struct nouveau_object *object,
return ret;
if (args->v0.pwrsrc >= 0) {
- ret |= nouveau_clock_ustate(clk, args->v0.ustate, args->v0.pwrsrc);
+ ret |= nvkm_clk_ustate(clk, args->v0.ustate, args->v0.pwrsrc);
} else {
- ret |= nouveau_clock_ustate(clk, args->v0.ustate, 0);
- ret |= nouveau_clock_ustate(clk, args->v0.ustate, 1);
+ ret |= nvkm_clk_ustate(clk, args->v0.ustate, 0);
+ ret |= nvkm_clk_ustate(clk, args->v0.ustate, 1);
}
return ret;
}
static int
-nouveau_control_mthd(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nvkm_control_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
switch (mthd) {
case NVIF_CONTROL_PSTATE_INFO:
- return nouveau_control_mthd_pstate_info(object, data, size);
+ return nvkm_control_mthd_pstate_info(object, data, size);
case NVIF_CONTROL_PSTATE_ATTR:
- return nouveau_control_mthd_pstate_attr(object, data, size);
+ return nvkm_control_mthd_pstate_attr(object, data, size);
case NVIF_CONTROL_PSTATE_USER:
- return nouveau_control_mthd_pstate_user(object, data, size);
+ return nvkm_control_mthd_pstate_user(object, data, size);
default:
break;
}
return -EINVAL;
}
-static struct nouveau_ofuncs
-nouveau_control_ofuncs = {
- .ctor = _nouveau_object_ctor,
- .dtor = nouveau_object_destroy,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
- .mthd = nouveau_control_mthd,
+static struct nvkm_ofuncs
+nvkm_control_ofuncs = {
+ .ctor = _nvkm_object_ctor,
+ .dtor = nvkm_object_destroy,
+ .init = nvkm_object_init,
+ .fini = nvkm_object_fini,
+ .mthd = nvkm_control_mthd,
};
-struct nouveau_oclass
-nouveau_control_oclass[] = {
+struct nvkm_oclass
+nvkm_control_oclass[] = {
{ .handle = NVIF_IOCTL_NEW_V0_CONTROL,
- .ofuncs = &nouveau_control_ofuncs
+ .ofuncs = &nvkm_control_ofuncs
},
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
new file mode 100644
index 000000000000..82b38d7e9730
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/fuse.h>
+#include <subdev/clk.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu.h>
+#include <subdev/bar.h>
+#include <subdev/pmu.h>
+#include <subdev/volt.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
+#include <engine/mspdec.h>
+#include <engine/bsp.h>
+#include <engine/msvld.h>
+#include <engine/msppp.h>
+#include <engine/ce.h>
+#include <engine/disp.h>
+#include <engine/pm.h>
+
+int
+gf100_identify(struct nvkm_device *device)
+{
+ switch (device->chipset) {
+ case 0xc0:
+ device->cname = "GF100";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf100_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ case 0xc4:
+ device->cname = "GF104";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ case 0xc3:
+ device->cname = "GF106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ case 0xce:
+ device->cname = "GF114";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ case 0xcf:
+ device->cname = "GF116";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ case 0xc1:
+ device->cname = "GF108";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf108_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ case 0xc8:
+ device->cname = "GF110";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf110_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ case 0xd9:
+ device->cname = "GF119";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gf110_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf119_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gf110_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ case 0xd7:
+ device->cname = "GF117";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gf110_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gf117_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gf110_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Fermi chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
new file mode 100644
index 000000000000..bf5893458a47
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/fuse.h>
+#include <subdev/clk.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu.h>
+#include <subdev/bar.h>
+#include <subdev/pmu.h>
+#include <subdev/volt.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
+#include <engine/disp.h>
+#include <engine/ce.h>
+#include <engine/bsp.h>
+#include <engine/msvld.h>
+#include <engine/mspdec.h>
+#include <engine/msppp.h>
+#include <engine/pm.h>
+
+int
+gk104_identify(struct nvkm_device *device)
+{
+ switch (device->chipset) {
+ case 0xe4:
+ device->cname = "GK104";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk104_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
+ break;
+ case 0xe7:
+ device->cname = "GK107";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
+ break;
+ case 0xe6:
+ device->cname = "GK106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk104_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
+ break;
+ case 0xea:
+ device->cname = "GK20A";
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk20a_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk20a_gr_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &gk20a_volt_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk20a_pmu_oclass;
+ break;
+ case 0xf0:
+ device->cname = "GK110";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk110_gr_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gk110_pm_oclass;
+ break;
+ case 0xf1:
+ device->cname = "GK110B";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk110b_gr_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = &gk110_pm_oclass;
+ break;
+ case 0x106:
+ device->cname = "GK208B";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk208_gr_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ break;
+ case 0x108:
+ device->cname = "GK208";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk208_gr_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Kepler chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
index 4e74a3376de8..539561ed3281 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
@@ -21,13 +21,14 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/fuse.h>
-#include <subdev/clock.h>
+#include <subdev/clk.h>
#include <subdev/therm.h>
#include <subdev/mxm.h>
#include <subdev/devinit.h>
@@ -37,108 +38,108 @@
#include <subdev/ltc.h>
#include <subdev/ibus.h>
#include <subdev/instmem.h>
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
#include <subdev/bar.h>
-#include <subdev/pwr.h>
+#include <subdev/pmu.h>
#include <subdev/volt.h>
-#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
#include <engine/disp.h>
-#include <engine/copy.h>
+#include <engine/ce.h>
#include <engine/bsp.h>
-#include <engine/vp.h>
-#include <engine/ppp.h>
-#include <engine/perfmon.h>
+#include <engine/msvld.h>
+#include <engine/mspdec.h>
+#include <engine/msppp.h>
+#include <engine/pm.h>
int
-gm100_identify(struct nouveau_device *device)
+gm100_identify(struct nvkm_device *device)
{
switch (device->chipset) {
case 0x117:
device->cname = "GM107";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
#if 0
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = gm107_disp_oclass;
- device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
#if 0
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
#endif
- device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
#if 0
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
#endif
break;
case 0x124:
device->cname = "GM204";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
#if 0
/* looks to be some non-trivial changes */
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
/* priv ring says no to 0x10eb14 writes */
device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
#endif
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
#if 0
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
#if 0
- device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
#endif
device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
#if 0
- device->oclass[NVDEV_ENGINE_COPY0 ] = &gm204_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &gm204_copy1_oclass;
- device->oclass[NVDEV_ENGINE_COPY2 ] = &gm204_copy2_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
#endif
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
index 573b55f5c2f9..5a2ae043b478 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
@@ -21,63 +21,63 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/i2c.h>
-#include <subdev/clock.h>
+#include <subdev/clk.h>
#include <subdev/devinit.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
-#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
#include <engine/disp.h>
int
-nv04_identify(struct nouveau_device *device)
+nv04_identify(struct nvkm_device *device)
{
switch (device->chipset) {
case 0x04:
device->cname = "NV04";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv04_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv04_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x05:
device->cname = "NV05";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv04_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv04_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
index 183a85a6204e..94a1ca45e94a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
@@ -21,178 +21,178 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-#include <subdev/clock.h>
+#include <subdev/clk.h>
#include <subdev/devinit.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
-#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
#include <engine/disp.h>
int
-nv10_identify(struct nouveau_device *device)
+nv10_identify(struct nvkm_device *device)
{
switch (device->chipset) {
case 0x10:
device->cname = "NV10";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x15:
device->cname = "NV15";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x16:
device->cname = "NV16";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x1a:
device->cname = "nForce";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x11:
device->cname = "NV11";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x17:
device->cname = "NV17";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x1f:
device->cname = "nForce2";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x18:
device->cname = "NV18";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
index aa564c68a920..d5ec8937df68 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
@@ -21,105 +21,105 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-#include <subdev/clock.h>
+#include <subdev/clk.h>
#include <subdev/therm.h>
#include <subdev/devinit.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
-#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
#include <engine/disp.h>
int
-nv20_identify(struct nouveau_device *device)
+nv20_identify(struct nvkm_device *device)
{
switch (device->chipset) {
case 0x20:
device->cname = "NV20";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv20_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x25:
device->cname = "NV25";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv25_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x28:
device->cname = "NV28";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv25_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x2a:
device->cname = "NV2A";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv2a_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
index 11bd31da82ab..dda09621e898 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
@@ -21,126 +21,126 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-#include <subdev/clock.h>
+#include <subdev/clk.h>
#include <subdev/devinit.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
-#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
#include <engine/mpeg.h>
#include <engine/disp.h>
int
-nv30_identify(struct nouveau_device *device)
+nv30_identify(struct nvkm_device *device)
{
switch (device->chipset) {
case 0x30:
device->cname = "NV30";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv30_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x35:
device->cname = "NV35";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv35_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x31:
device->cname = "NV31";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv30_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x36:
device->cname = "NV36";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv35_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
case 0x34:
device->cname = "NV34";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv34_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
index e96c223cb797..c6301361d14f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
@@ -21,41 +21,41 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bus.h>
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-#include <subdev/clock.h>
+#include <subdev/clk.h>
#include <subdev/therm.h>
#include <subdev/devinit.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
#include <subdev/volt.h>
-#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#include <engine/software.h>
-#include <engine/graph.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
#include <engine/mpeg.h>
#include <engine/disp.h>
-#include <engine/perfmon.h>
+#include <engine/pm.h>
int
-nv40_identify(struct nouveau_device *device)
+nv40_identify(struct nvkm_device *device)
{
switch (device->chipset) {
case 0x40:
device->cname = "NV40";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
@@ -63,22 +63,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x41:
device->cname = "NV41";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
@@ -86,22 +86,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x42:
device->cname = "NV42";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
@@ -109,22 +109,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x43:
device->cname = "NV43";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
@@ -132,22 +132,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x45:
device->cname = "NV45";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
@@ -155,22 +155,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x47:
device->cname = "G70";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
@@ -178,22 +178,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x49:
device->cname = "G71";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
@@ -201,22 +201,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x4b:
device->cname = "G73";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
@@ -224,22 +224,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x44:
device->cname = "NV44";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
@@ -247,22 +247,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x46:
device->cname = "G72";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
@@ -270,22 +270,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x4a:
device->cname = "NV44A";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
@@ -293,22 +293,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x4c:
device->cname = "C61";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
@@ -316,22 +316,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x4e:
device->cname = "C51";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv4e_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
@@ -339,22 +339,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x63:
device->cname = "C73";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
@@ -362,22 +362,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x67:
device->cname = "C67";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
@@ -385,22 +385,22 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
case 0x68:
device->cname = "C68";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
@@ -408,15 +408,15 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
break;
default:
nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
new file mode 100644
index 000000000000..249b84454612
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/fuse.h>
+#include <subdev/clk.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu.h>
+#include <subdev/bar.h>
+#include <subdev/pmu.h>
+#include <subdev/volt.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/sw.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+#include <engine/vp.h>
+#include <engine/cipher.h>
+#include <engine/sec.h>
+#include <engine/bsp.h>
+#include <engine/msvld.h>
+#include <engine/mspdec.h>
+#include <engine/msppp.h>
+#include <engine/ce.h>
+#include <engine/disp.h>
+#include <engine/pm.h>
+
+int
+nv50_identify(struct nvkm_device *device)
+{
+ switch (device->chipset) {
+ case 0x50:
+ device->cname = "G80";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = nv50_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = nv50_pm_oclass;
+ break;
+ case 0x84:
+ device->cname = "G84";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0x86:
+ device->cname = "G86";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0x92:
+ device->cname = "G92";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0x94:
+ device->cname = "G94";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g94_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0x96:
+ device->cname = "G96";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g94_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0x98:
+ device->cname = "G98";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0xa0:
+ device->cname = "G200";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt200_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0xaa:
+ device->cname = "MCP77/MCP78";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = mcp77_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = mcp77_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0xac:
+ device->cname = "MCP79/MCP7A";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = mcp77_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = mcp77_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
+ break;
+ case 0xa3:
+ device->cname = "GT215";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
+ break;
+ case 0xa5:
+ device->cname = "GT216";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
+ break;
+ case 0xa8:
+ device->cname = "GT218";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
+ break;
+ case 0xaf:
+ device->cname = "MCP89";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = mcp89_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = mcp89_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Tesla chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
new file mode 100644
index 000000000000..8d3590e7bd87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -0,0 +1,16 @@
+#ifndef __NVKM_DEVICE_PRIV_H__
+#define __NVKM_DEVICE_PRIV_H__
+#include <core/device.h>
+
+extern struct nvkm_oclass nvkm_control_oclass[];
+
+int nv04_identify(struct nvkm_device *);
+int nv10_identify(struct nvkm_device *);
+int nv20_identify(struct nvkm_device *);
+int nv30_identify(struct nvkm_device *);
+int nv40_identify(struct nvkm_device *);
+int nv50_identify(struct nvkm_device *);
+int gf100_identify(struct nvkm_device *);
+int gk104_identify(struct nvkm_device *);
+int gm100_identify(struct nvkm_device *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
new file mode 100644
index 000000000000..16a4e2a37008
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -0,0 +1,29 @@
+nvkm-y += nvkm/engine/disp/base.o
+nvkm-y += nvkm/engine/disp/conn.o
+nvkm-y += nvkm/engine/disp/outp.o
+nvkm-y += nvkm/engine/disp/outpdp.o
+nvkm-y += nvkm/engine/disp/nv04.o
+nvkm-y += nvkm/engine/disp/nv50.o
+nvkm-y += nvkm/engine/disp/g84.o
+nvkm-y += nvkm/engine/disp/g94.o
+nvkm-y += nvkm/engine/disp/gt200.o
+nvkm-y += nvkm/engine/disp/gt215.o
+nvkm-y += nvkm/engine/disp/gf110.o
+nvkm-y += nvkm/engine/disp/gk104.o
+nvkm-y += nvkm/engine/disp/gk110.o
+nvkm-y += nvkm/engine/disp/gm107.o
+nvkm-y += nvkm/engine/disp/gm204.o
+nvkm-y += nvkm/engine/disp/dacnv50.o
+nvkm-y += nvkm/engine/disp/dport.o
+nvkm-y += nvkm/engine/disp/hdagt215.o
+nvkm-y += nvkm/engine/disp/hdagf110.o
+nvkm-y += nvkm/engine/disp/hdmig84.o
+nvkm-y += nvkm/engine/disp/hdmigt215.o
+nvkm-y += nvkm/engine/disp/hdmigf110.o
+nvkm-y += nvkm/engine/disp/hdmigk104.o
+nvkm-y += nvkm/engine/disp/piornv50.o
+nvkm-y += nvkm/engine/disp/sornv50.o
+nvkm-y += nvkm/engine/disp/sorg94.o
+nvkm-y += nvkm/engine/disp/sorgf110.o
+nvkm-y += nvkm/engine/disp/sorgm204.o
+nvkm-y += nvkm/engine/disp/vga.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 64b84667f3a5..23d1b5c0dc16 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -21,21 +21,23 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
+#include "conn.h"
+#include "outp.h"
+
+#include <core/notify.h>
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
-#include <core/os.h>
-#include <nvif/unpack.h>
#include <nvif/class.h>
#include <nvif/event.h>
-
-#include "priv.h"
-#include "outp.h"
-#include "conn.h"
+#include <nvif/unpack.h>
int
-nouveau_disp_vblank_ctor(struct nouveau_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
- struct nouveau_disp *disp =
+ struct nvkm_disp *disp =
container_of(notify->event, typeof(*disp), vblank);
union {
struct nvif_notify_head_req_v0 v0;
@@ -55,17 +57,17 @@ nouveau_disp_vblank_ctor(struct nouveau_object *object, void *data, u32 size,
}
void
-nouveau_disp_vblank(struct nouveau_disp *disp, int head)
+nvkm_disp_vblank(struct nvkm_disp *disp, int head)
{
struct nvif_notify_head_rep_v0 rep = {};
nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
}
static int
-nouveau_disp_hpd_ctor(struct nouveau_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
- struct nouveau_disp *disp =
+ struct nvkm_disp *disp =
container_of(notify->event, typeof(*disp), hpd);
union {
struct nvif_notify_conn_req_v0 v0;
@@ -91,15 +93,14 @@ nouveau_disp_hpd_ctor(struct nouveau_object *object, void *data, u32 size,
}
static const struct nvkm_event_func
-nouveau_disp_hpd_func = {
- .ctor = nouveau_disp_hpd_ctor
+nvkm_disp_hpd_func = {
+ .ctor = nvkm_disp_hpd_ctor
};
int
-nouveau_disp_ntfy(struct nouveau_object *object, u32 type,
- struct nvkm_event **event)
+nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
{
- struct nouveau_disp *disp = (void *)object->engine;
+ struct nvkm_disp *disp = (void *)object->engine;
switch (type) {
case NV04_DISP_NTFY_VBLANK:
*event = &disp->vblank;
@@ -114,9 +115,9 @@ nouveau_disp_ntfy(struct nouveau_object *object, u32 type,
}
int
-_nouveau_disp_fini(struct nouveau_object *object, bool suspend)
+_nvkm_disp_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_disp *disp = (void *)object;
+ struct nvkm_disp *disp = (void *)object;
struct nvkm_output *outp;
int ret;
@@ -126,7 +127,7 @@ _nouveau_disp_fini(struct nouveau_object *object, bool suspend)
goto fail_outp;
}
- return nouveau_engine_fini(&disp->base, suspend);
+ return nvkm_engine_fini(&disp->base, suspend);
fail_outp:
list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
@@ -137,13 +138,13 @@ fail_outp:
}
int
-_nouveau_disp_init(struct nouveau_object *object)
+_nvkm_disp_init(struct nvkm_object *object)
{
- struct nouveau_disp *disp = (void *)object;
+ struct nvkm_disp *disp = (void *)object;
struct nvkm_output *outp;
int ret;
- ret = nouveau_engine_init(&disp->base);
+ ret = nvkm_engine_init(&disp->base);
if (ret)
return ret;
@@ -164,9 +165,9 @@ fail_outp:
}
void
-_nouveau_disp_dtor(struct nouveau_object *object)
+_nvkm_disp_dtor(struct nvkm_object *object)
{
- struct nouveau_disp *disp = (void *)object;
+ struct nvkm_disp *disp = (void *)object;
struct nvkm_output *outp, *outt;
nvkm_event_fini(&disp->vblank);
@@ -174,32 +175,30 @@ _nouveau_disp_dtor(struct nouveau_object *object)
if (disp->outp.next) {
list_for_each_entry_safe(outp, outt, &disp->outp, head) {
- nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&outp);
}
}
- nouveau_engine_destroy(&disp->base);
+ nvkm_engine_destroy(&disp->base);
}
int
-nouveau_disp_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int heads,
- const char *intname, const char *extname,
- int length, void **pobject)
+nvkm_disp_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int heads, const char *intname,
+ const char *extname, int length, void **pobject)
{
- struct nouveau_disp_impl *impl = (void *)oclass;
- struct nouveau_bios *bios = nouveau_bios(parent);
- struct nouveau_disp *disp;
- struct nouveau_oclass **sclass;
- struct nouveau_object *object;
+ struct nvkm_disp_impl *impl = (void *)oclass;
+ struct nvkm_bios *bios = nvkm_bios(parent);
+ struct nvkm_disp *disp;
+ struct nvkm_oclass **sclass;
+ struct nvkm_object *object;
struct dcb_output dcbE;
u8 hpd = 0, ver, hdr;
u32 data;
int ret, i;
- ret = nouveau_engine_create_(parent, engine, oclass, true,
- intname, extname, length, pobject);
+ ret = nvkm_engine_create_(parent, engine, oclass, true, intname,
+ extname, length, pobject);
disp = *pobject;
if (ret)
return ret;
@@ -225,12 +224,11 @@ nouveau_disp_create_(struct nouveau_object *parent,
sclass++;
}
- nouveau_object_ctor(*pobject, *pobject, oclass,
- &dcbE, i, &object);
+ nvkm_object_ctor(*pobject, NULL, oclass, &dcbE, i, &object);
hpd = max(hpd, (u8)(dcbE.connector + 1));
}
- ret = nvkm_event_init(&nouveau_disp_hpd_func, 3, hpd, &disp->hpd);
+ ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
index 1496b567dd4a..cf03e0240ced 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
@@ -21,21 +21,20 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/os.h>
-#include <nvif/event.h>
+#include "conn.h"
+#include "outp.h"
+#include "priv.h"
#include <subdev/gpio.h>
-#include "conn.h"
-#include "outp.h"
+#include <nvif/event.h>
static int
nvkm_connector_hpd(struct nvkm_notify *notify)
{
struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
- struct nouveau_disp *disp = nouveau_disp(conn);
- struct nouveau_gpio *gpio = nouveau_gpio(conn);
+ struct nvkm_disp *disp = nvkm_disp(conn);
+ struct nvkm_gpio *gpio = nvkm_gpio(conn);
const struct nvkm_gpio_ntfy_rep *line = notify->data;
struct nvif_notify_conn_rep_v0 rep;
int index = conn->index;
@@ -53,41 +52,41 @@ nvkm_connector_hpd(struct nvkm_notify *notify)
}
int
-_nvkm_connector_fini(struct nouveau_object *object, bool suspend)
+_nvkm_connector_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_connector *conn = (void *)object;
nvkm_notify_put(&conn->hpd);
- return nouveau_object_fini(&conn->base, suspend);
+ return nvkm_object_fini(&conn->base, suspend);
}
int
-_nvkm_connector_init(struct nouveau_object *object)
+_nvkm_connector_init(struct nvkm_object *object)
{
struct nvkm_connector *conn = (void *)object;
- int ret = nouveau_object_init(&conn->base);
+ int ret = nvkm_object_init(&conn->base);
if (ret == 0)
nvkm_notify_get(&conn->hpd);
return ret;
}
void
-_nvkm_connector_dtor(struct nouveau_object *object)
+_nvkm_connector_dtor(struct nvkm_object *object)
{
struct nvkm_connector *conn = (void *)object;
nvkm_notify_fini(&conn->hpd);
- nouveau_object_destroy(&conn->base);
+ nvkm_object_destroy(&conn->base);
}
int
-nvkm_connector_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
+nvkm_connector_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass,
struct nvbios_connE *info, int index,
int length, void **pobject)
{
static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
- struct nouveau_gpio *gpio = nouveau_gpio(parent);
- struct nouveau_disp *disp = (void *)engine;
+ struct nvkm_disp *disp = nvkm_disp(parent);
+ struct nvkm_gpio *gpio = nvkm_gpio(parent);
struct nvkm_connector *conn;
struct nvkm_output *outp;
struct dcb_gpio_func func;
@@ -101,7 +100,7 @@ nvkm_connector_create_(struct nouveau_object *parent,
}
}
- ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
conn = *pobject;
if (ret)
return ret;
@@ -145,10 +144,10 @@ nvkm_connector_create_(struct nouveau_object *parent,
}
int
-_nvkm_connector_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *info, u32 index,
- struct nouveau_object **pobject)
+_nvkm_connector_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *info, u32 index,
+ struct nvkm_object **pobject)
{
struct nvkm_connector *conn;
int ret;
@@ -161,11 +160,11 @@ _nvkm_connector_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nvkm_connector_oclass = &(struct nvkm_connector_impl) {
.base = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_connector_ctor,
.dtor = _nvkm_connector_dtor,
.init = _nvkm_connector_init,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index 55e5f5c82c14..c87a061f7f7d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,10 +1,13 @@
#ifndef __NVKM_DISP_CONN_H__
#define __NVKM_DISP_CONN_H__
+#include <core/object.h>
+#include <core/notify.h>
-#include "priv.h"
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
struct nvkm_connector {
- struct nouveau_object base;
+ struct nvkm_object base;
struct list_head head;
struct nvbios_connE info;
@@ -28,29 +31,28 @@ struct nvkm_connector {
_nvkm_connector_fini(nv_object(disp), (s)); \
})
-int nvkm_connector_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, struct nvbios_connE *,
+int nvkm_connector_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, struct nvbios_connE *,
int, int, void **);
-int _nvkm_connector_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void _nvkm_connector_dtor(struct nouveau_object *);
-int _nvkm_connector_init(struct nouveau_object *);
-int _nvkm_connector_fini(struct nouveau_object *, bool);
+int _nvkm_connector_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void _nvkm_connector_dtor(struct nvkm_object *);
+int _nvkm_connector_init(struct nvkm_object *);
+int _nvkm_connector_fini(struct nvkm_object *, bool);
struct nvkm_connector_impl {
- struct nouveau_oclass base;
+ struct nvkm_oclass base;
};
#ifndef MSG
#define MSG(l,f,a...) do { \
struct nvkm_connector *_conn = (void *)conn; \
- nv_##l(nv_object(conn)->engine, "%02x:%02x%02x: "f, _conn->index, \
+ nv_##l(_conn, "%02x:%02x%02x: "f, _conn->index, \
_conn->info.location, _conn->info.type, ##a); \
} while(0)
#define DBG(f,a...) MSG(debug, f, ##a)
#define ERR(f,a...) MSG(error, f, ##a)
#endif
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index b36addff06a9..0f7d1ec4d37e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -21,16 +21,14 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outp.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
#include <subdev/timer.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
int
nv50_dac_power(NV50_DISP_MTHD_V1)
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 16db08dfba6e..68347661adca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -21,20 +21,16 @@
*
* Authors: Ben Skeggs
*/
+#include "dport.h"
+#include "outpdp.h"
+#include "nv50.h"
#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/dp.h>
#include <subdev/bios/init.h>
#include <subdev/i2c.h>
-#include "nv50.h"
-
#include <nvif/class.h>
-#include "dport.h"
-#include "outpdp.h"
-
/******************************************************************************
* link training
*****************************************************************************/
@@ -54,8 +50,8 @@ dp_set_link_config(struct dp_state *dp)
{
struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
struct nvkm_output_dp *outp = dp->outp;
- struct nouveau_disp *disp = nouveau_disp(outp);
- struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvkm_disp *disp = nvkm_disp(outp);
+ struct nvkm_bios *bios = nvkm_bios(disp);
struct nvbios_init init = {
.subdev = nv_subdev(disp),
.bios = bios,
@@ -264,8 +260,8 @@ static void
dp_link_train_init(struct dp_state *dp, bool spread)
{
struct nvkm_output_dp *outp = dp->outp;
- struct nouveau_disp *disp = nouveau_disp(outp);
- struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvkm_disp *disp = nvkm_disp(outp);
+ struct nvkm_bios *bios = nvkm_bios(disp);
struct nvbios_init init = {
.subdev = nv_subdev(disp),
.bios = bios,
@@ -290,8 +286,8 @@ static void
dp_link_train_fini(struct dp_state *dp)
{
struct nvkm_output_dp *outp = dp->outp;
- struct nouveau_disp *disp = nouveau_disp(outp);
- struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvkm_disp *disp = nvkm_disp(outp);
+ struct nvkm_bios *bios = nvkm_bios(disp);
struct nvbios_init init = {
.subdev = nv_subdev(disp),
.bios = bios,
@@ -309,7 +305,7 @@ static const struct dp_rates {
u32 rate;
u8 bw;
u8 nr;
-} nouveau_dp_rates[] = {
+} nvkm_dp_rates[] = {
{ 2160000, 0x14, 4 },
{ 1080000, 0x0a, 4 },
{ 1080000, 0x14, 2 },
@@ -323,11 +319,11 @@ static const struct dp_rates {
};
void
-nouveau_dp_train(struct work_struct *w)
+nvkm_dp_train(struct work_struct *w)
{
struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- const struct dp_rates *cfg = nouveau_dp_rates;
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ const struct dp_rates *cfg = nvkm_dp_rates;
struct dp_state _dp = {
.outp = outp,
}, *dp = &_dp;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 5628d2d5ec71..9596290329c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -1,5 +1,6 @@
#ifndef __NVKM_DISP_DPORT_H__
#define __NVKM_DISP_DPORT_H__
+#include <core/os.h>
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
@@ -70,6 +71,5 @@
#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
-void nouveau_dp_train(struct work_struct *);
-
+void nvkm_dp_train(struct work_struct *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 13eff5e4ee51..a0dcf534cb20 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -21,20 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/software.h>
-#include <engine/disp.h>
+#include "nv50.h"
#include <nvif/class.h>
-#include "nv50.h"
-
/*******************************************************************************
* EVO master channel object
******************************************************************************/
const struct nv50_disp_mthd_list
-nv84_disp_core_mthd_dac = {
+g84_disp_core_mthd_dac = {
.mthd = 0x0080,
.addr = 0x000008,
.data = {
@@ -46,7 +42,7 @@ nv84_disp_core_mthd_dac = {
};
const struct nv50_disp_mthd_list
-nv84_disp_core_mthd_head = {
+g84_disp_core_mthd_head = {
.mthd = 0x0400,
.addr = 0x000540,
.data = {
@@ -98,15 +94,15 @@ nv84_disp_core_mthd_head = {
};
const struct nv50_disp_mthd_chan
-nv84_disp_core_mthd_chan = {
+g84_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &nv84_disp_core_mthd_dac },
+ { "DAC", 3, &g84_disp_core_mthd_dac },
{ "SOR", 2, &nv50_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
- { "HEAD", 2, &nv84_disp_core_mthd_head },
+ { "HEAD", 2, &g84_disp_core_mthd_head },
{}
}
};
@@ -116,7 +112,7 @@ nv84_disp_core_mthd_chan = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nv84_disp_base_mthd_base = {
+g84_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -146,11 +142,11 @@ nv84_disp_base_mthd_base = {
};
const struct nv50_disp_mthd_chan
-nv84_disp_base_mthd_chan = {
+g84_disp_base_mthd_chan = {
.name = "Base",
.addr = 0x000540,
.data = {
- { "Global", 1, &nv84_disp_base_mthd_base },
+ { "Global", 1, &g84_disp_base_mthd_base },
{ "Image", 2, &nv50_disp_base_mthd_image },
{}
}
@@ -161,7 +157,7 @@ nv84_disp_base_mthd_chan = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nv84_disp_ovly_mthd_base = {
+g84_disp_ovly_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -189,11 +185,11 @@ nv84_disp_ovly_mthd_base = {
};
const struct nv50_disp_mthd_chan
-nv84_disp_ovly_mthd_chan = {
+g84_disp_ovly_mthd_chan = {
.name = "Overlay",
.addr = 0x000540,
.data = {
- { "Global", 1, &nv84_disp_ovly_mthd_base },
+ { "Global", 1, &g84_disp_ovly_mthd_base },
{}
}
};
@@ -202,8 +198,8 @@ nv84_disp_ovly_mthd_chan = {
* Base display object
******************************************************************************/
-static struct nouveau_oclass
-nv84_disp_sclass[] = {
+static struct nvkm_oclass
+g84_disp_sclass[] = {
{ G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
{ G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
@@ -212,8 +208,8 @@ nv84_disp_sclass[] = {
{}
};
-static struct nouveau_oclass
-nv84_disp_main_oclass[] = {
+static struct nvkm_oclass
+g84_disp_main_oclass[] = {
{ G82_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -223,15 +219,15 @@ nv84_disp_main_oclass[] = {
******************************************************************************/
static int
-nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -240,11 +236,11 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv84_disp_main_oclass;
+ nv_engine(priv)->sclass = g84_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = nv84_disp_sclass;
+ priv->sclass = g84_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
@@ -252,25 +248,25 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->sor.hdmi = g84_hdmi_ctrl;
priv->pior.power = nv50_pior_power;
return 0;
}
-struct nouveau_oclass *
-nv84_disp_oclass = &(struct nv50_disp_impl) {
+struct nvkm_oclass *
+g84_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x82),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g84_disp_ctor,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv84_disp_core_mthd_chan,
- .mthd.base = &nv84_disp_base_mthd_chan,
- .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .mthd.core = &g84_disp_core_mthd_chan,
+ .mthd.base = &g84_disp_base_mthd_chan,
+ .mthd.ovly = &g84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
.head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 2bb7ac5cd0e6..1ab0d0ae3cc8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -21,20 +21,17 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/software.h>
-#include <engine/disp.h>
+#include "nv50.h"
+#include "outpdp.h"
#include <nvif/class.h>
-#include "nv50.h"
-
/*******************************************************************************
* EVO master channel object
******************************************************************************/
const struct nv50_disp_mthd_list
-nv94_disp_core_mthd_sor = {
+g94_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
@@ -44,15 +41,15 @@ nv94_disp_core_mthd_sor = {
};
const struct nv50_disp_mthd_chan
-nv94_disp_core_mthd_chan = {
+g94_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &nv84_disp_core_mthd_dac },
- { "SOR", 4, &nv94_disp_core_mthd_sor },
+ { "DAC", 3, &g84_disp_core_mthd_dac },
+ { "SOR", 4, &g94_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
- { "HEAD", 2, &nv84_disp_core_mthd_head },
+ { "HEAD", 2, &g84_disp_core_mthd_head },
{}
}
};
@@ -61,8 +58,8 @@ nv94_disp_core_mthd_chan = {
* Base display object
******************************************************************************/
-static struct nouveau_oclass
-nv94_disp_sclass[] = {
+static struct nvkm_oclass
+g94_disp_sclass[] = {
{ GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
{ GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
@@ -71,8 +68,8 @@ nv94_disp_sclass[] = {
{}
};
-static struct nouveau_oclass
-nv94_disp_main_oclass[] = {
+static struct nvkm_oclass
+g94_disp_main_oclass[] = {
{ GT206_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -82,15 +79,15 @@ nv94_disp_main_oclass[] = {
******************************************************************************/
static int
-nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g94_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -99,11 +96,11 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv94_disp_main_oclass;
+ nv_engine(priv)->sclass = g94_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = nv94_disp_sclass;
+ priv->sclass = g94_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 4;
@@ -111,32 +108,32 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->sor.hdmi = g84_hdmi_ctrl;
priv->pior.power = nv50_pior_power;
return 0;
}
-struct nouveau_oclass *
-nv94_disp_outp_sclass[] = {
+struct nvkm_oclass *
+g94_disp_outp_sclass[] = {
&nv50_pior_dp_impl.base.base,
- &nv94_sor_dp_impl.base.base,
+ &g94_sor_dp_impl.base.base,
NULL
};
-struct nouveau_oclass *
-nv94_disp_oclass = &(struct nv50_disp_impl) {
+struct nvkm_oclass *
+g94_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x88),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv94_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g94_disp_ctor,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
.base.vblank = &nv50_disp_vblank_func,
- .base.outp = nv94_disp_outp_sclass,
- .mthd.core = &nv94_disp_core_mthd_chan,
- .mthd.base = &nv84_disp_base_mthd_chan,
- .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .base.outp = g94_disp_outp_sclass,
+ .mthd.core = &g94_disp_core_mthd_chan,
+ .mthd.base = &g84_disp_base_mthd_chan,
+ .mthd.ovly = &g84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
.head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 181a2d57e356..0ebf466e9ef3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -21,33 +21,30 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outp.h"
+#include "outpdp.h"
-#include <core/object.h>
#include <core/client.h>
-#include <core/parent.h>
-#include <core/handle.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
-#include <engine/disp.h>
-
+#include <core/gpuobj.h>
+#include <core/ramht.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/disp.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit.h>
-#include <subdev/fb.h>
#include <subdev/timer.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
/*******************************************************************************
* EVO channel base class
******************************************************************************/
static void
-nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+gf110_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
@@ -55,7 +52,7 @@ nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
}
static void
-nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+gf110_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
nv_wr32(priv, 0x61008c, 0x00000001 << index);
@@ -63,10 +60,10 @@ nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
}
const struct nvkm_event_func
-nvd0_disp_chan_uevent = {
+gf110_disp_chan_uevent = {
.ctor = nv50_disp_chan_uevent_ctor,
- .init = nvd0_disp_chan_uevent_init,
- .fini = nvd0_disp_chan_uevent_fini,
+ .init = gf110_disp_chan_uevent_init,
+ .fini = gf110_disp_chan_uevent_fini,
};
/*******************************************************************************
@@ -74,25 +71,25 @@ nvd0_disp_chan_uevent = {
******************************************************************************/
static int
-nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
- struct nouveau_object *object, u32 name)
+gf110_disp_dmac_object_attach(struct nvkm_object *parent,
+ struct nvkm_object *object, u32 name)
{
struct nv50_disp_base *base = (void *)parent->parent;
struct nv50_disp_chan *chan = (void *)parent;
u32 addr = nv_gpuobj(object)->node->offset;
u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
- return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+ return nvkm_ramht_insert(base->ramht, chan->chid, name, data);
}
static void
-nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+gf110_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
{
struct nv50_disp_base *base = (void *)parent->parent;
- nouveau_ramht_remove(base->ramht, cookie);
+ nvkm_ramht_remove(base->ramht, cookie);
}
static int
-nvd0_disp_dmac_init(struct nouveau_object *object)
+gf110_disp_dmac_init(struct nvkm_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *dmac = (void *)object;
@@ -125,7 +122,7 @@ nvd0_disp_dmac_init(struct nouveau_object *object)
}
static int
-nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+gf110_disp_dmac_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *dmac = (void *)object;
@@ -153,7 +150,7 @@ nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
******************************************************************************/
const struct nv50_disp_mthd_list
-nvd0_disp_core_mthd_base = {
+gf110_disp_core_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -166,7 +163,7 @@ nvd0_disp_core_mthd_base = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_core_mthd_dac = {
+gf110_disp_core_mthd_dac = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -179,7 +176,7 @@ nvd0_disp_core_mthd_dac = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_core_mthd_sor = {
+gf110_disp_core_mthd_sor = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -192,7 +189,7 @@ nvd0_disp_core_mthd_sor = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_core_mthd_pior = {
+gf110_disp_core_mthd_pior = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -205,7 +202,7 @@ nvd0_disp_core_mthd_pior = {
};
static const struct nv50_disp_mthd_list
-nvd0_disp_core_mthd_head = {
+gf110_disp_core_mthd_head = {
.mthd = 0x0300,
.addr = 0x000300,
.data = {
@@ -279,21 +276,21 @@ nvd0_disp_core_mthd_head = {
};
static const struct nv50_disp_mthd_chan
-nvd0_disp_core_mthd_chan = {
+gf110_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nvd0_disp_core_mthd_base },
- { "DAC", 3, &nvd0_disp_core_mthd_dac },
- { "SOR", 8, &nvd0_disp_core_mthd_sor },
- { "PIOR", 4, &nvd0_disp_core_mthd_pior },
- { "HEAD", 4, &nvd0_disp_core_mthd_head },
+ { "Global", 1, &gf110_disp_core_mthd_base },
+ { "DAC", 3, &gf110_disp_core_mthd_dac },
+ { "SOR", 8, &gf110_disp_core_mthd_sor },
+ { "PIOR", 4, &gf110_disp_core_mthd_pior },
+ { "HEAD", 4, &gf110_disp_core_mthd_head },
{}
}
};
static int
-nvd0_disp_core_init(struct nouveau_object *object)
+gf110_disp_core_init(struct nvkm_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -324,7 +321,7 @@ nvd0_disp_core_init(struct nouveau_object *object)
}
static int
-nvd0_disp_core_fini(struct nouveau_object *object, bool suspend)
+gf110_disp_core_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -346,18 +343,18 @@ nvd0_disp_core_fini(struct nouveau_object *object, bool suspend)
}
struct nv50_disp_chan_impl
-nvd0_disp_core_ofuncs = {
+gf110_disp_core_ofuncs = {
.base.ctor = nv50_disp_core_ctor,
.base.dtor = nv50_disp_dmac_dtor,
- .base.init = nvd0_disp_core_init,
- .base.fini = nvd0_disp_core_fini,
+ .base.init = gf110_disp_core_init,
+ .base.fini = gf110_disp_core_fini,
.base.ntfy = nv50_disp_chan_ntfy,
.base.map = nv50_disp_chan_map,
.base.rd32 = nv50_disp_chan_rd32,
.base.wr32 = nv50_disp_chan_wr32,
.chid = 0,
- .attach = nvd0_disp_dmac_object_attach,
- .detach = nvd0_disp_dmac_object_detach,
+ .attach = gf110_disp_dmac_object_attach,
+ .detach = gf110_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -365,7 +362,7 @@ nvd0_disp_core_ofuncs = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nvd0_disp_base_mthd_base = {
+gf110_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -415,7 +412,7 @@ nvd0_disp_base_mthd_base = {
};
static const struct nv50_disp_mthd_list
-nvd0_disp_base_mthd_image = {
+gf110_disp_base_mthd_image = {
.mthd = 0x0400,
.addr = 0x000400,
.data = {
@@ -429,29 +426,29 @@ nvd0_disp_base_mthd_image = {
};
const struct nv50_disp_mthd_chan
-nvd0_disp_base_mthd_chan = {
+gf110_disp_base_mthd_chan = {
.name = "Base",
.addr = 0x001000,
.data = {
- { "Global", 1, &nvd0_disp_base_mthd_base },
- { "Image", 2, &nvd0_disp_base_mthd_image },
+ { "Global", 1, &gf110_disp_base_mthd_base },
+ { "Image", 2, &gf110_disp_base_mthd_image },
{}
}
};
struct nv50_disp_chan_impl
-nvd0_disp_base_ofuncs = {
+gf110_disp_base_ofuncs = {
.base.ctor = nv50_disp_base_ctor,
.base.dtor = nv50_disp_dmac_dtor,
- .base.init = nvd0_disp_dmac_init,
- .base.fini = nvd0_disp_dmac_fini,
+ .base.init = gf110_disp_dmac_init,
+ .base.fini = gf110_disp_dmac_fini,
.base.ntfy = nv50_disp_chan_ntfy,
.base.map = nv50_disp_chan_map,
.base.rd32 = nv50_disp_chan_rd32,
.base.wr32 = nv50_disp_chan_wr32,
.chid = 1,
- .attach = nvd0_disp_dmac_object_attach,
- .detach = nvd0_disp_dmac_object_detach,
+ .attach = gf110_disp_dmac_object_attach,
+ .detach = gf110_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -459,7 +456,7 @@ nvd0_disp_base_ofuncs = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nvd0_disp_ovly_mthd_base = {
+gf110_disp_ovly_mthd_base = {
.mthd = 0x0000,
.data = {
{ 0x0080, 0x665080 },
@@ -511,28 +508,28 @@ nvd0_disp_ovly_mthd_base = {
};
static const struct nv50_disp_mthd_chan
-nvd0_disp_ovly_mthd_chan = {
+gf110_disp_ovly_mthd_chan = {
.name = "Overlay",
.addr = 0x001000,
.data = {
- { "Global", 1, &nvd0_disp_ovly_mthd_base },
+ { "Global", 1, &gf110_disp_ovly_mthd_base },
{}
}
};
struct nv50_disp_chan_impl
-nvd0_disp_ovly_ofuncs = {
+gf110_disp_ovly_ofuncs = {
.base.ctor = nv50_disp_ovly_ctor,
.base.dtor = nv50_disp_dmac_dtor,
- .base.init = nvd0_disp_dmac_init,
- .base.fini = nvd0_disp_dmac_fini,
+ .base.init = gf110_disp_dmac_init,
+ .base.fini = gf110_disp_dmac_fini,
.base.ntfy = nv50_disp_chan_ntfy,
.base.map = nv50_disp_chan_map,
.base.rd32 = nv50_disp_chan_rd32,
.base.wr32 = nv50_disp_chan_wr32,
.chid = 5,
- .attach = nvd0_disp_dmac_object_attach,
- .detach = nvd0_disp_dmac_object_detach,
+ .attach = gf110_disp_dmac_object_attach,
+ .detach = gf110_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -540,7 +537,7 @@ nvd0_disp_ovly_ofuncs = {
******************************************************************************/
static int
-nvd0_disp_pioc_init(struct nouveau_object *object)
+gf110_disp_pioc_init(struct nvkm_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_pioc *pioc = (void *)object;
@@ -566,7 +563,7 @@ nvd0_disp_pioc_init(struct nouveau_object *object)
}
static int
-nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+gf110_disp_pioc_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_pioc *pioc = (void *)object;
@@ -592,11 +589,11 @@ nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
******************************************************************************/
struct nv50_disp_chan_impl
-nvd0_disp_oimm_ofuncs = {
+gf110_disp_oimm_ofuncs = {
.base.ctor = nv50_disp_oimm_ctor,
.base.dtor = nv50_disp_pioc_dtor,
- .base.init = nvd0_disp_pioc_init,
- .base.fini = nvd0_disp_pioc_fini,
+ .base.init = gf110_disp_pioc_init,
+ .base.fini = gf110_disp_pioc_fini,
.base.ntfy = nv50_disp_chan_ntfy,
.base.map = nv50_disp_chan_map,
.base.rd32 = nv50_disp_chan_rd32,
@@ -609,11 +606,11 @@ nvd0_disp_oimm_ofuncs = {
******************************************************************************/
struct nv50_disp_chan_impl
-nvd0_disp_curs_ofuncs = {
+gf110_disp_curs_ofuncs = {
.base.ctor = nv50_disp_curs_ctor,
.base.dtor = nv50_disp_pioc_dtor,
- .base.init = nvd0_disp_pioc_init,
- .base.fini = nvd0_disp_pioc_fini,
+ .base.init = gf110_disp_pioc_init,
+ .base.fini = gf110_disp_pioc_fini,
.base.ntfy = nv50_disp_chan_ntfy,
.base.map = nv50_disp_chan_map,
.base.rd32 = nv50_disp_chan_rd32,
@@ -626,7 +623,7 @@ nvd0_disp_curs_ofuncs = {
******************************************************************************/
int
-nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
+gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
{
const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
@@ -658,14 +655,14 @@ nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
}
static int
-nvd0_disp_main_init(struct nouveau_object *object)
+gf110_disp_main_init(struct nvkm_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
int ret, i;
u32 tmp;
- ret = nouveau_parent_init(&base->base);
+ ret = nvkm_parent_init(&base->base);
if (ret)
return ret;
@@ -715,7 +712,7 @@ nvd0_disp_main_init(struct nouveau_object *object)
nv_wr32(priv, 0x6100b0, 0x00000307);
/* disable underflow reporting, preventing an intermittent issue
- * on some nve4 boards where the production vbios left this
+ * on some gk104 boards where the production vbios left this
* setting enabled by default.
*
* ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
@@ -727,7 +724,7 @@ nvd0_disp_main_init(struct nouveau_object *object)
}
static int
-nvd0_disp_main_fini(struct nouveau_object *object, bool suspend)
+gf110_disp_main_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -735,32 +732,32 @@ nvd0_disp_main_fini(struct nouveau_object *object, bool suspend)
/* disable all interrupts */
nv_wr32(priv, 0x6100b0, 0x00000000);
- return nouveau_parent_fini(&base->base, suspend);
+ return nvkm_parent_fini(&base->base, suspend);
}
-struct nouveau_ofuncs
-nvd0_disp_main_ofuncs = {
+struct nvkm_ofuncs
+gf110_disp_main_ofuncs = {
.ctor = nv50_disp_main_ctor,
.dtor = nv50_disp_main_dtor,
- .init = nvd0_disp_main_init,
- .fini = nvd0_disp_main_fini,
+ .init = gf110_disp_main_init,
+ .fini = gf110_disp_main_fini,
.mthd = nv50_disp_main_mthd,
- .ntfy = nouveau_disp_ntfy,
+ .ntfy = nvkm_disp_ntfy,
};
-static struct nouveau_oclass
-nvd0_disp_main_oclass[] = {
- { GF110_DISP, &nvd0_disp_main_ofuncs },
+static struct nvkm_oclass
+gf110_disp_main_oclass[] = {
+ { GF110_DISP, &gf110_disp_main_ofuncs },
{}
};
-static struct nouveau_oclass
-nvd0_disp_sclass[] = {
- { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
- { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
- { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
- { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
- { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
+static struct nvkm_oclass
+gf110_disp_sclass[] = {
+ { GF110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
+ { GF110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
+ { GF110_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
+ { GF110_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
+ { GF110_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
{}
};
@@ -769,24 +766,24 @@ nvd0_disp_sclass[] = {
******************************************************************************/
static void
-nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head)
+gf110_disp_vblank_init(struct nvkm_event *event, int type, int head)
{
- struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
}
static void
-nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+gf110_disp_vblank_fini(struct nvkm_event *event, int type, int head)
{
- struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
}
const struct nvkm_event_func
-nvd0_disp_vblank_func = {
- .ctor = nouveau_disp_vblank_ctor,
- .init = nvd0_disp_vblank_init,
- .fini = nvd0_disp_vblank_fini,
+gf110_disp_vblank_func = {
+ .ctor = nvkm_disp_vblank_ctor,
+ .init = gf110_disp_vblank_init,
+ .fini = gf110_disp_vblank_fini,
};
static struct nvkm_output *
@@ -794,7 +791,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_outp *info)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvkm_output *outp;
u16 mask, type;
@@ -838,7 +835,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
static struct nvkm_output *
exec_script(struct nv50_disp_priv *priv, int head, int id)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvkm_output *outp;
struct nvbios_outp info;
u8 ver, hdr, cnt, len;
@@ -874,7 +871,7 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
static struct nvkm_output *
exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvkm_output *outp;
struct nvbios_outp info1;
struct nvbios_ocfg info2;
@@ -934,13 +931,13 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
}
static void
-nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
+gf110_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
{
exec_script(priv, head, 1);
}
static void
-nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
+gf110_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
{
struct nvkm_output *outp = exec_script(priv, head, 2);
@@ -949,7 +946,7 @@ nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
struct nvkm_output_dp *outpdp = (void *)outp;
struct nvbios_init init = {
.subdev = nv_subdev(priv),
- .bios = nouveau_bios(priv),
+ .bios = nvkm_bios(priv),
.outp = &outp->info,
.crtc = head,
.offset = outpdp->info.script[4],
@@ -962,9 +959,9 @@ nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
}
static void
-nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
+gf110_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
{
- struct nouveau_devinit *devinit = nouveau_devinit(priv);
+ struct nvkm_devinit *devinit = nvkm_devinit(priv);
u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
if (pclk)
devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
@@ -972,8 +969,8 @@ nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
}
static void
-nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
- struct dcb_output *outp)
+gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
+ struct dcb_output *outp)
{
const int or = ffs(outp->or) - 1;
const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
@@ -1033,7 +1030,7 @@ nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
}
static void
-nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
+gf110_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
{
struct nvkm_output *outp;
u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
@@ -1075,7 +1072,7 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
nv_mask(priv, addr, 0x007c0000, 0x00280000);
break;
case DCB_OUTPUT_DP:
- nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
+ gf110_disp_intr_unk2_2_tu(priv, head, &outp->info);
break;
default:
break;
@@ -1086,7 +1083,7 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
}
static void
-nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
+gf110_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
{
u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
u32 conf;
@@ -1095,7 +1092,7 @@ nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
}
void
-nvd0_disp_intr_supervisor(struct work_struct *work)
+gf110_disp_intr_supervisor(struct work_struct *work)
{
struct nv50_disp_priv *priv =
container_of(work, struct nv50_disp_priv, supervisor);
@@ -1115,7 +1112,7 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
if (!(mask[head] & 0x00001000))
continue;
nv_debug(priv, "supervisor 1.0 - head %d\n", head);
- nvd0_disp_intr_unk1_0(priv, head);
+ gf110_disp_intr_unk1_0(priv, head);
}
} else
if (priv->super & 0x00000002) {
@@ -1123,19 +1120,19 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
if (!(mask[head] & 0x00001000))
continue;
nv_debug(priv, "supervisor 2.0 - head %d\n", head);
- nvd0_disp_intr_unk2_0(priv, head);
+ gf110_disp_intr_unk2_0(priv, head);
}
for (head = 0; head < priv->head.nr; head++) {
if (!(mask[head] & 0x00010000))
continue;
nv_debug(priv, "supervisor 2.1 - head %d\n", head);
- nvd0_disp_intr_unk2_1(priv, head);
+ gf110_disp_intr_unk2_1(priv, head);
}
for (head = 0; head < priv->head.nr; head++) {
if (!(mask[head] & 0x00001000))
continue;
nv_debug(priv, "supervisor 2.2 - head %d\n", head);
- nvd0_disp_intr_unk2_2(priv, head);
+ gf110_disp_intr_unk2_2(priv, head);
}
} else
if (priv->super & 0x00000004) {
@@ -1143,7 +1140,7 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
if (!(mask[head] & 0x00001000))
continue;
nv_debug(priv, "supervisor 3.0 - head %d\n", head);
- nvd0_disp_intr_unk4_0(priv, head);
+ gf110_disp_intr_unk4_0(priv, head);
}
}
@@ -1153,7 +1150,7 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
}
static void
-nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+gf110_disp_intr_error(struct nv50_disp_priv *priv, int chid)
{
const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
@@ -1200,7 +1197,7 @@ nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
}
void
-nvd0_disp_intr(struct nouveau_subdev *subdev)
+gf110_disp_intr(struct nvkm_subdev *subdev)
{
struct nv50_disp_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x610088);
@@ -1220,7 +1217,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
u32 stat = nv_rd32(priv, 0x61009c);
int chid = ffs(stat) - 1;
if (chid >= 0)
- nvd0_disp_intr_error(priv, chid);
+ gf110_disp_intr_error(priv, chid);
intr &= ~0x00000002;
}
@@ -1246,7 +1243,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
if (stat & 0x00000001)
- nouveau_disp_vblank(&priv->base, i);
+ nvkm_disp_vblank(&priv->base, i);
nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
nv_rd32(priv, 0x6100c0 + (i * 0x800));
}
@@ -1254,60 +1251,60 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
}
static int
-nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
+ ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
if (ret)
return ret;
- nv_engine(priv)->sclass = nvd0_disp_main_oclass;
+ nv_engine(priv)->sclass = gf110_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nvd0_disp_intr;
- INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
- priv->sclass = nvd0_disp_sclass;
+ nv_subdev(priv)->intr = gf110_disp_intr;
+ INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
+ priv->sclass = gf110_disp_sclass;
priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = nvd0_hda_eld;
- priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.hda_eld = gf110_hda_eld;
+ priv->sor.hdmi = gf110_hdmi_ctrl;
return 0;
}
-struct nouveau_oclass *
-nvd0_disp_outp_sclass[] = {
- &nvd0_sor_dp_impl.base.base,
+struct nvkm_oclass *
+gf110_disp_outp_sclass[] = {
+ &gf110_sor_dp_impl.base.base,
NULL
};
-struct nouveau_oclass *
-nvd0_disp_oclass = &(struct nv50_disp_impl) {
+struct nvkm_oclass *
+gf110_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x90),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf110_disp_ctor,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
- .base.vblank = &nvd0_disp_vblank_func,
- .base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nvd0_disp_core_mthd_chan,
- .mthd.base = &nvd0_disp_base_mthd_chan,
- .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
+ .base.vblank = &gf110_disp_vblank_func,
+ .base.outp = gf110_disp_outp_sclass,
+ .mthd.core = &gf110_disp_core_mthd_chan,
+ .mthd.base = &gf110_disp_base_mthd_chan,
+ .mthd.ovly = &gf110_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_main_scanoutpos,
+ .head.scanoutpos = gf110_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 55debec7e68f..6f4019ab4e65 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -21,20 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/software.h>
-#include <engine/disp.h>
+#include "nv50.h"
#include <nvif/class.h>
-#include "nv50.h"
-
/*******************************************************************************
* EVO master channel object
******************************************************************************/
static const struct nv50_disp_mthd_list
-nve0_disp_core_mthd_head = {
+gk104_disp_core_mthd_head = {
.mthd = 0x0300,
.addr = 0x000300,
.data = {
@@ -113,15 +109,15 @@ nve0_disp_core_mthd_head = {
};
const struct nv50_disp_mthd_chan
-nve0_disp_core_mthd_chan = {
+gk104_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nvd0_disp_core_mthd_base },
- { "DAC", 3, &nvd0_disp_core_mthd_dac },
- { "SOR", 8, &nvd0_disp_core_mthd_sor },
- { "PIOR", 4, &nvd0_disp_core_mthd_pior },
- { "HEAD", 4, &nve0_disp_core_mthd_head },
+ { "Global", 1, &gf110_disp_core_mthd_base },
+ { "DAC", 3, &gf110_disp_core_mthd_dac },
+ { "SOR", 8, &gf110_disp_core_mthd_sor },
+ { "PIOR", 4, &gf110_disp_core_mthd_pior },
+ { "HEAD", 4, &gk104_disp_core_mthd_head },
{}
}
};
@@ -131,7 +127,7 @@ nve0_disp_core_mthd_chan = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nve0_disp_ovly_mthd_base = {
+gk104_disp_ovly_mthd_base = {
.mthd = 0x0000,
.data = {
{ 0x0080, 0x665080 },
@@ -185,11 +181,11 @@ nve0_disp_ovly_mthd_base = {
};
const struct nv50_disp_mthd_chan
-nve0_disp_ovly_mthd_chan = {
+gk104_disp_ovly_mthd_chan = {
.name = "Overlay",
.addr = 0x001000,
.data = {
- { "Global", 1, &nve0_disp_ovly_mthd_base },
+ { "Global", 1, &gk104_disp_ovly_mthd_base },
{}
}
};
@@ -198,19 +194,19 @@ nve0_disp_ovly_mthd_chan = {
* Base display object
******************************************************************************/
-static struct nouveau_oclass
-nve0_disp_sclass[] = {
- { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
- { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
- { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
- { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
- { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
+static struct nvkm_oclass
+gk104_disp_sclass[] = {
+ { GK104_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
+ { GK104_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
{}
};
-static struct nouveau_oclass
-nve0_disp_main_oclass[] = {
- { GK104_DISP, &nvd0_disp_main_ofuncs },
+static struct nvkm_oclass
+gk104_disp_main_oclass[] = {
+ { GK104_DISP, &gf110_disp_main_ofuncs },
{}
};
@@ -219,54 +215,54 @@ nve0_disp_main_oclass[] = {
******************************************************************************/
static int
-nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
+ ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
if (ret)
return ret;
- nv_engine(priv)->sclass = nve0_disp_main_oclass;
+ nv_engine(priv)->sclass = gk104_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nvd0_disp_intr;
- INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
- priv->sclass = nve0_disp_sclass;
+ nv_subdev(priv)->intr = gf110_disp_intr;
+ INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
+ priv->sclass = gk104_disp_sclass;
priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = nvd0_hda_eld;
- priv->sor.hdmi = nve0_hdmi_ctrl;
+ priv->sor.hda_eld = gf110_hda_eld;
+ priv->sor.hdmi = gk104_hdmi_ctrl;
return 0;
}
-struct nouveau_oclass *
-nve0_disp_oclass = &(struct nv50_disp_impl) {
+struct nvkm_oclass *
+gk104_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x91),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_disp_ctor,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
- .base.vblank = &nvd0_disp_vblank_func,
- .base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_core_mthd_chan,
- .mthd.base = &nvd0_disp_base_mthd_chan,
- .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .base.vblank = &gf110_disp_vblank_func,
+ .base.outp = gf110_disp_outp_sclass,
+ .mthd.core = &gk104_disp_core_mthd_chan,
+ .mthd.base = &gf110_disp_base_mthd_chan,
+ .mthd.ovly = &gk104_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_main_scanoutpos,
+ .head.scanoutpos = gf110_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index 3e7e2d28744c..daa4b460a6ba 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -21,31 +21,27 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/software.h>
-#include <engine/disp.h>
+#include "nv50.h"
#include <nvif/class.h>
-#include "nv50.h"
-
/*******************************************************************************
* Base display object
******************************************************************************/
-static struct nouveau_oclass
-nvf0_disp_sclass[] = {
- { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
- { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
- { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
- { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
+static struct nvkm_oclass
+gk110_disp_sclass[] = {
+ { GK110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
{}
};
-static struct nouveau_oclass
-nvf0_disp_main_oclass[] = {
- { GK110_DISP, &nvd0_disp_main_ofuncs },
+static struct nvkm_oclass
+gk110_disp_main_oclass[] = {
+ { GK110_DISP, &gf110_disp_main_ofuncs },
{}
};
@@ -54,54 +50,54 @@ nvf0_disp_main_oclass[] = {
******************************************************************************/
static int
-nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
+ ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
if (ret)
return ret;
- nv_engine(priv)->sclass = nvf0_disp_main_oclass;
+ nv_engine(priv)->sclass = gk110_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nvd0_disp_intr;
- INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
- priv->sclass = nvf0_disp_sclass;
+ nv_subdev(priv)->intr = gf110_disp_intr;
+ INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
+ priv->sclass = gk110_disp_sclass;
priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = nvd0_hda_eld;
- priv->sor.hdmi = nve0_hdmi_ctrl;
+ priv->sor.hda_eld = gf110_hda_eld;
+ priv->sor.hdmi = gk104_hdmi_ctrl;
return 0;
}
-struct nouveau_oclass *
-nvf0_disp_oclass = &(struct nv50_disp_impl) {
+struct nvkm_oclass *
+gk110_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x92),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvf0_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk110_disp_ctor,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
- .base.vblank = &nvd0_disp_vblank_func,
- .base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_core_mthd_chan,
- .mthd.base = &nvd0_disp_base_mthd_chan,
- .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .base.vblank = &gf110_disp_vblank_func,
+ .base.outp = gf110_disp_outp_sclass,
+ .mthd.core = &gk104_disp_core_mthd_chan,
+ .mthd.base = &gf110_disp_base_mthd_chan,
+ .mthd.ovly = &gk104_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_main_scanoutpos,
+ .head.scanoutpos = gf110_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index e2ad0543fb31..881cc94385a1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -21,31 +21,27 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/software.h>
-#include <engine/disp.h>
+#include "nv50.h"
#include <nvif/class.h>
-#include "nv50.h"
-
/*******************************************************************************
* Base display object
******************************************************************************/
-static struct nouveau_oclass
+static struct nvkm_oclass
gm107_disp_sclass[] = {
- { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
- { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
- { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
- { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
+ { GM107_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
{}
};
-static struct nouveau_oclass
+static struct nvkm_oclass
gm107_disp_main_oclass[] = {
- { GM107_DISP, &nvd0_disp_main_ofuncs },
+ { GM107_DISP, &gf110_disp_main_ofuncs },
{}
};
@@ -54,28 +50,28 @@ gm107_disp_main_oclass[] = {
******************************************************************************/
static int
-gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gm107_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
+ ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
if (ret)
return ret;
nv_engine(priv)->sclass = gm107_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nvd0_disp_intr;
- INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ nv_subdev(priv)->intr = gf110_disp_intr;
+ INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
priv->sclass = gm107_disp_sclass;
priv->head.nr = heads;
priv->dac.nr = 3;
@@ -83,25 +79,25 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = nvd0_hda_eld;
- priv->sor.hdmi = nve0_hdmi_ctrl;
+ priv->sor.hda_eld = gf110_hda_eld;
+ priv->sor.hdmi = gk104_hdmi_ctrl;
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
gm107_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x07),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gm107_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
- .base.vblank = &nvd0_disp_vblank_func,
- .base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_core_mthd_chan,
- .mthd.base = &nvd0_disp_base_mthd_chan,
- .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .base.vblank = &gf110_disp_vblank_func,
+ .base.outp = gf110_disp_outp_sclass,
+ .mthd.core = &gk104_disp_core_mthd_chan,
+ .mthd.base = &gf110_disp_base_mthd_chan,
+ .mthd.ovly = &gk104_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_main_scanoutpos,
+ .head.scanoutpos = gf110_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
index 672ded79b2a9..67004f8302b3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
@@ -21,31 +21,28 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/software.h>
-#include <engine/disp.h>
+#include "nv50.h"
+#include "outpdp.h"
#include <nvif/class.h>
-#include "nv50.h"
-
/*******************************************************************************
* Base display object
******************************************************************************/
-static struct nouveau_oclass
+static struct nvkm_oclass
gm204_disp_sclass[] = {
- { GM204_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
- { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
- { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
- { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
+ { GM204_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
{}
};
-static struct nouveau_oclass
+static struct nvkm_oclass
gm204_disp_main_oclass[] = {
- { GM204_DISP, &nvd0_disp_main_ofuncs },
+ { GM204_DISP, &gf110_disp_main_ofuncs },
{}
};
@@ -54,28 +51,28 @@ gm204_disp_main_oclass[] = {
******************************************************************************/
static int
-gm204_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gm204_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
+ ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
if (ret)
return ret;
nv_engine(priv)->sclass = gm204_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nvd0_disp_intr;
- INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ nv_subdev(priv)->intr = gf110_disp_intr;
+ INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
priv->sclass = gm204_disp_sclass;
priv->head.nr = heads;
priv->dac.nr = 3;
@@ -83,32 +80,32 @@ gm204_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = nvd0_hda_eld;
- priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.hda_eld = gf110_hda_eld;
+ priv->sor.hdmi = gf110_hdmi_ctrl;
priv->sor.magic = gm204_sor_magic;
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
gm204_disp_outp_sclass[] = {
&gm204_sor_dp_impl.base.base,
NULL
};
-struct nouveau_oclass *
+struct nvkm_oclass *
gm204_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x07),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gm204_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
- .base.vblank = &nvd0_disp_vblank_func,
+ .base.vblank = &gf110_disp_vblank_func,
.base.outp = gm204_disp_outp_sclass,
- .mthd.core = &nve0_disp_core_mthd_chan,
- .mthd.base = &nvd0_disp_base_mthd_chan,
- .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.core = &gk104_disp_core_mthd_chan,
+ .mthd.base = &gf110_disp_base_mthd_chan,
+ .mthd.ovly = &gk104_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_main_scanoutpos,
+ .head.scanoutpos = gf110_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index b32456c9494f..a45307213f4b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -21,20 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/software.h>
-#include <engine/disp.h>
+#include "nv50.h"
#include <nvif/class.h>
-#include "nv50.h"
-
/*******************************************************************************
* EVO overlay channel objects
******************************************************************************/
static const struct nv50_disp_mthd_list
-nva0_disp_ovly_mthd_base = {
+gt200_disp_ovly_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -65,11 +61,11 @@ nva0_disp_ovly_mthd_base = {
};
static const struct nv50_disp_mthd_chan
-nva0_disp_ovly_mthd_chan = {
+gt200_disp_ovly_mthd_chan = {
.name = "Overlay",
.addr = 0x000540,
.data = {
- { "Global", 1, &nva0_disp_ovly_mthd_base },
+ { "Global", 1, &gt200_disp_ovly_mthd_base },
{}
}
};
@@ -78,8 +74,8 @@ nva0_disp_ovly_mthd_chan = {
* Base display object
******************************************************************************/
-static struct nouveau_oclass
-nva0_disp_sclass[] = {
+static struct nvkm_oclass
+gt200_disp_sclass[] = {
{ GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
{ GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
@@ -88,8 +84,8 @@ nva0_disp_sclass[] = {
{}
};
-static struct nouveau_oclass
-nva0_disp_main_oclass[] = {
+static struct nvkm_oclass
+gt200_disp_main_oclass[] = {
{ GT200_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -99,15 +95,15 @@ nva0_disp_main_oclass[] = {
******************************************************************************/
static int
-nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gt200_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -116,11 +112,11 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nva0_disp_main_oclass;
+ nv_engine(priv)->sclass = gt200_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = nva0_disp_sclass;
+ priv->sclass = gt200_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
@@ -128,25 +124,25 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->sor.hdmi = g84_hdmi_ctrl;
priv->pior.power = nv50_pior_power;
return 0;
}
-struct nouveau_oclass *
-nva0_disp_oclass = &(struct nv50_disp_impl) {
+struct nvkm_oclass *
+gt200_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x83),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva0_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gt200_disp_ctor,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv84_disp_core_mthd_chan,
- .mthd.base = &nv84_disp_base_mthd_chan,
- .mthd.ovly = &nva0_disp_ovly_mthd_chan,
+ .mthd.core = &g84_disp_core_mthd_chan,
+ .mthd.base = &g84_disp_base_mthd_chan,
+ .mthd.ovly = &gt200_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
.head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 951d79f9b781..55f0d3ac591e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -21,20 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/software.h>
-#include <engine/disp.h>
+#include "nv50.h"
#include <nvif/class.h>
-#include "nv50.h"
-
/*******************************************************************************
* Base display object
******************************************************************************/
-static struct nouveau_oclass
-nva3_disp_sclass[] = {
+static struct nvkm_oclass
+gt215_disp_sclass[] = {
{ GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
{ GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
@@ -43,8 +39,8 @@ nva3_disp_sclass[] = {
{}
};
-static struct nouveau_oclass
-nva3_disp_main_oclass[] = {
+static struct nvkm_oclass
+gt215_disp_main_oclass[] = {
{ GT214_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -54,15 +50,15 @@ nva3_disp_main_oclass[] = {
******************************************************************************/
static int
-nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gt215_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -71,11 +67,11 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nva3_disp_main_oclass;
+ nv_engine(priv)->sclass = gt215_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = nva3_disp_sclass;
+ priv->sclass = gt215_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 4;
@@ -83,26 +79,26 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = nva3_hda_eld;
- priv->sor.hdmi = nva3_hdmi_ctrl;
+ priv->sor.hda_eld = gt215_hda_eld;
+ priv->sor.hdmi = gt215_hdmi_ctrl;
priv->pior.power = nv50_pior_power;
return 0;
}
-struct nouveau_oclass *
-nva3_disp_oclass = &(struct nv50_disp_impl) {
+struct nvkm_oclass *
+gt215_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x85),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gt215_disp_ctor,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
.base.vblank = &nv50_disp_vblank_func,
- .base.outp = nv94_disp_outp_sclass,
- .mthd.core = &nv94_disp_core_mthd_chan,
- .mthd.base = &nv84_disp_base_mthd_chan,
- .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .base.outp = g94_disp_outp_sclass,
+ .mthd.core = &g94_disp_core_mthd_chan,
+ .mthd.base = &g84_disp_base_mthd_chan,
+ .mthd.ovly = &g84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
.head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
index 1d4e8432d857..b9813d246ba5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
@@ -21,17 +21,19 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outp.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
#include <subdev/timer.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
int
-nvd0_hda_eld(NV50_DISP_MTHD_V1)
+gf110_hda_eld(NV50_DISP_MTHD_V1)
{
union {
struct nv50_disp_sor_hda_eld_v0 v0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index fe9ef5894dd4..891d1e7bf7d2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -21,17 +21,17 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outp.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
#include <subdev/timer.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
int
-nva3_hda_eld(NV50_DISP_MTHD_V1)
+gt215_hda_eld(NV50_DISP_MTHD_V1)
{
union {
struct nv50_disp_sor_hda_eld_v0 v0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
index fa276dede9cd..621cb0b7ff19 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
@@ -21,15 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
int
-nv84_hdmi_ctrl(NV50_DISP_MTHD_V1)
+g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
const u32 hoff = (head * 0x800);
union {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
index bac4fc4570f0..c28449061bbd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
@@ -21,15 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
int
-nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1)
+gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
const u32 hoff = (head * 0x800);
union {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminve0.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
index 528d14ec2f7f..ca34ff81ad7f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
@@ -21,15 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
int
-nve0_hdmi_ctrl(NV50_DISP_MTHD_V1)
+gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
const u32 hoff = (head * 0x800);
const u32 hdmi = (head * 0x400);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
index 57eeed1d1942..b641c167dcfa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
@@ -21,15 +21,16 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outp.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
int
-nva3_hdmi_ctrl(NV50_DISP_MTHD_V1)
+gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
const u32 soff = outp->or * 0x800;
union {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index 366f315fc9a5..ff09b2659c17 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -21,20 +21,20 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
#include <core/client.h>
-#include <core/event.h>
-#include <nvif/unpack.h>
+#include <core/device.h>
+
#include <nvif/class.h>
+#include <nvif/unpack.h>
struct nv04_disp_priv {
- struct nouveau_disp base;
+ struct nvkm_disp base;
};
static int
-nv04_disp_scanoutpos(struct nouveau_object *object, struct nv04_disp_priv *priv,
+nv04_disp_scanoutpos(struct nvkm_object *object, struct nv04_disp_priv *priv,
void *data, u32 size, int head)
{
const u32 hoff = head * 0x2000;
@@ -75,7 +75,7 @@ nv04_disp_scanoutpos(struct nouveau_object *object, struct nv04_disp_priv *priv,
}
static int
-nv04_disp_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
+nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
union {
struct nv04_disp_mthd_v0 v0;
@@ -105,17 +105,17 @@ nv04_disp_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
return -EINVAL;
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv04_disp_ofuncs = {
- .ctor = _nouveau_object_ctor,
- .dtor = nouveau_object_destroy,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
+ .ctor = _nvkm_object_ctor,
+ .dtor = nvkm_object_destroy,
+ .init = nvkm_object_init,
+ .fini = nvkm_object_fini,
.mthd = nv04_disp_mthd,
- .ntfy = nouveau_disp_ntfy,
+ .ntfy = nvkm_disp_ntfy,
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv04_disp_sclass[] = {
{ NV04_DISP, &nv04_disp_ofuncs },
{},
@@ -128,26 +128,26 @@ nv04_disp_sclass[] = {
static void
nv04_disp_vblank_init(struct nvkm_event *event, int type, int head)
{
- struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001);
}
static void
nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head)
{
- struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000);
}
static const struct nvkm_event_func
nv04_disp_vblank_func = {
- .ctor = nouveau_disp_vblank_ctor,
+ .ctor = nvkm_disp_vblank_ctor,
.init = nv04_disp_vblank_init,
.fini = nv04_disp_vblank_fini,
};
static void
-nv04_disp_intr(struct nouveau_subdev *subdev)
+nv04_disp_intr(struct nvkm_subdev *subdev)
{
struct nv04_disp_priv *priv = (void *)subdev;
u32 crtc0 = nv_rd32(priv, 0x600100);
@@ -155,12 +155,12 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
u32 pvideo;
if (crtc0 & 0x00000001) {
- nouveau_disp_vblank(&priv->base, 0);
+ nvkm_disp_vblank(&priv->base, 0);
nv_wr32(priv, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
- nouveau_disp_vblank(&priv->base, 1);
+ nvkm_disp_vblank(&priv->base, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
@@ -174,15 +174,15 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
}
static int
-nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, 2, "DISPLAY",
- "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, 2, "DISPLAY",
+ "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -192,14 +192,14 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
-nv04_disp_oclass = &(struct nouveau_disp_impl) {
+struct nvkm_oclass *
+nv04_disp_oclass = &(struct nvkm_disp_impl) {
.base.handle = NV_ENGINE(DISP, 0x04),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
.vblank = &nv04_disp_vblank_func,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 44a8290aaea5..84ade810e27c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -21,35 +21,38 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outp.h"
+#include "outpdp.h"
-#include <core/object.h>
#include <core/client.h>
-#include <core/parent.h>
-#include <core/handle.h>
+#include <core/device.h>
+#include <core/engctx.h>
#include <core/enum.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-#include <nvif/event.h>
-
+#include <core/handle.h>
+#include <core/ramht.h>
+#include <engine/dmaobj.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/disp.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit.h>
-#include <subdev/timer.h>
#include <subdev/fb.h>
+#include <subdev/timer.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/event.h>
+#include <nvif/unpack.h>
/*******************************************************************************
* EVO channel base class
******************************************************************************/
static int
-nv50_disp_chan_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int head,
+nv50_disp_chan_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int head,
int length, void **pobject)
{
const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
@@ -62,9 +65,9 @@ nv50_disp_chan_create_(struct nouveau_object *parent,
return -EBUSY;
base->chan |= (1 << chid);
- ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
- (1ULL << NVDEV_ENGINE_DMAOBJ),
- length, pobject);
+ ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
+ (1ULL << NVDEV_ENGINE_DMAOBJ),
+ length, pobject);
chan = *pobject;
if (ret)
return ret;
@@ -80,7 +83,7 @@ nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
{
struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
base->chan &= ~(1 << chan->chid);
- nouveau_namedb_destroy(&chan->base);
+ nvkm_namedb_destroy(&chan->base);
}
static void
@@ -109,7 +112,7 @@ nv50_disp_chan_uevent_send(struct nv50_disp_priv *priv, int chid)
}
int
-nv50_disp_chan_uevent_ctor(struct nouveau_object *object, void *data, u32 size,
+nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
{
struct nv50_disp_dmac *dmac = (void *)object;
@@ -136,7 +139,7 @@ nv50_disp_chan_uevent = {
};
int
-nv50_disp_chan_ntfy(struct nouveau_object *object, u32 type,
+nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
struct nvkm_event **pevent)
{
struct nv50_disp_priv *priv = (void *)object->engine;
@@ -151,7 +154,7 @@ nv50_disp_chan_ntfy(struct nouveau_object *object, u32 type,
}
int
-nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size)
+nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
struct nv50_disp_chan *chan = (void *)object;
*addr = nv_device_resource_start(nv_device(object), 0) +
@@ -161,7 +164,7 @@ nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size)
}
u32
-nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_chan *chan = (void *)object;
@@ -169,7 +172,7 @@ nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
}
void
-nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_chan *chan = (void *)object;
@@ -181,28 +184,28 @@ nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
******************************************************************************/
static int
-nv50_disp_dmac_object_attach(struct nouveau_object *parent,
- struct nouveau_object *object, u32 name)
+nv50_disp_dmac_object_attach(struct nvkm_object *parent,
+ struct nvkm_object *object, u32 name)
{
struct nv50_disp_base *base = (void *)parent->parent;
struct nv50_disp_chan *chan = (void *)parent;
u32 addr = nv_gpuobj(object)->node->offset;
u32 chid = chan->chid;
u32 data = (chid << 28) | (addr << 10) | chid;
- return nouveau_ramht_insert(base->ramht, chid, name, data);
+ return nvkm_ramht_insert(base->ramht, chid, name, data);
}
static void
-nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
{
struct nv50_disp_base *base = (void *)parent->parent;
- nouveau_ramht_remove(base->ramht, cookie);
+ nvkm_ramht_remove(base->ramht, cookie);
}
static int
-nv50_disp_dmac_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 pushbuf, int head,
+nv50_disp_dmac_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 pushbuf, int head,
int length, void **pobject)
{
struct nv50_disp_dmac *dmac;
@@ -214,7 +217,7 @@ nv50_disp_dmac_create_(struct nouveau_object *parent,
if (ret)
return ret;
- dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ dmac->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
if (!dmac->pushdma)
return -ENOENT;
@@ -243,15 +246,15 @@ nv50_disp_dmac_create_(struct nouveau_object *parent,
}
void
-nv50_disp_dmac_dtor(struct nouveau_object *object)
+nv50_disp_dmac_dtor(struct nvkm_object *object)
{
struct nv50_disp_dmac *dmac = (void *)object;
- nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&dmac->pushdma);
nv50_disp_chan_destroy(&dmac->base);
}
static int
-nv50_disp_dmac_init(struct nouveau_object *object)
+nv50_disp_dmac_init(struct nvkm_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *dmac = (void *)object;
@@ -284,7 +287,7 @@ nv50_disp_dmac_init(struct nouveau_object *object)
}
static int
-nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *dmac = (void *)object;
@@ -314,7 +317,7 @@ static void
nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
const struct nv50_disp_mthd_list *list, int inst)
{
- struct nouveau_object *disp = nv_object(priv);
+ struct nvkm_object *disp = nv_object(priv);
int i;
for (i = 0; list->data[i].mthd; i++) {
@@ -341,7 +344,7 @@ void
nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
const struct nv50_disp_mthd_chan *chan)
{
- struct nouveau_object *disp = nv_object(priv);
+ struct nvkm_object *disp = nv_object(priv);
const struct nv50_disp_impl *impl = (void *)disp->oclass;
const struct nv50_disp_mthd_list *list;
int i, j;
@@ -482,10 +485,10 @@ nv50_disp_core_mthd_chan = {
};
int
-nv50_disp_core_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_disp_core_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv50_disp_core_channel_dma_v0 v0;
@@ -511,7 +514,7 @@ nv50_disp_core_ctor(struct nouveau_object *parent,
}
static int
-nv50_disp_core_init(struct nouveau_object *object)
+nv50_disp_core_init(struct nvkm_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -548,7 +551,7 @@ nv50_disp_core_init(struct nouveau_object *object)
}
static int
-nv50_disp_core_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_core_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -638,10 +641,10 @@ nv50_disp_base_mthd_chan = {
};
int
-nv50_disp_base_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_disp_base_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv50_disp_base_channel_dma_v0 v0;
@@ -728,10 +731,10 @@ nv50_disp_ovly_mthd_chan = {
};
int
-nv50_disp_ovly_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_disp_ovly_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv50_disp_overlay_channel_dma_v0 v0;
@@ -780,9 +783,9 @@ nv50_disp_ovly_ofuncs = {
******************************************************************************/
static int
-nv50_disp_pioc_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int head,
+nv50_disp_pioc_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int head,
int length, void **pobject)
{
return nv50_disp_chan_create_(parent, engine, oclass, head,
@@ -790,14 +793,14 @@ nv50_disp_pioc_create_(struct nouveau_object *parent,
}
void
-nv50_disp_pioc_dtor(struct nouveau_object *object)
+nv50_disp_pioc_dtor(struct nvkm_object *object)
{
struct nv50_disp_pioc *pioc = (void *)object;
nv50_disp_chan_destroy(&pioc->base);
}
static int
-nv50_disp_pioc_init(struct nouveau_object *object)
+nv50_disp_pioc_init(struct nvkm_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_pioc *pioc = (void *)object;
@@ -826,7 +829,7 @@ nv50_disp_pioc_init(struct nouveau_object *object)
}
static int
-nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_pioc *pioc = (void *)object;
@@ -848,10 +851,10 @@ nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
******************************************************************************/
int
-nv50_disp_oimm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_disp_oimm_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv50_disp_overlay_v0 v0;
@@ -896,10 +899,10 @@ nv50_disp_oimm_ofuncs = {
******************************************************************************/
int
-nv50_disp_curs_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_disp_curs_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv50_disp_cursor_v0 v0;
@@ -976,8 +979,7 @@ nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
}
int
-nv50_disp_main_mthd(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
union {
@@ -1100,42 +1102,42 @@ nv50_disp_main_mthd(struct nouveau_object *object, u32 mthd,
}
int
-nv50_disp_main_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_disp_main_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_base *base;
int ret;
- ret = nouveau_parent_create(parent, engine, oclass, 0,
- priv->sclass, 0, &base);
+ ret = nvkm_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
*pobject = nv_object(base);
if (ret)
return ret;
- return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
- &base->ramht);
+ return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+ &base->ramht);
}
void
-nv50_disp_main_dtor(struct nouveau_object *object)
+nv50_disp_main_dtor(struct nvkm_object *object)
{
struct nv50_disp_base *base = (void *)object;
- nouveau_ramht_ref(NULL, &base->ramht);
- nouveau_parent_destroy(&base->base);
+ nvkm_ramht_ref(NULL, &base->ramht);
+ nvkm_parent_destroy(&base->base);
}
static int
-nv50_disp_main_init(struct nouveau_object *object)
+nv50_disp_main_init(struct nvkm_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
int ret, i;
u32 tmp;
- ret = nouveau_parent_init(&base->base);
+ ret = nvkm_parent_init(&base->base);
if (ret)
return ret;
@@ -1196,7 +1198,7 @@ nv50_disp_main_init(struct nouveau_object *object)
}
static int
-nv50_disp_main_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_main_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -1205,26 +1207,26 @@ nv50_disp_main_fini(struct nouveau_object *object, bool suspend)
nv_wr32(priv, 0x610024, 0x00000000);
nv_wr32(priv, 0x610020, 0x00000000);
- return nouveau_parent_fini(&base->base, suspend);
+ return nvkm_parent_fini(&base->base, suspend);
}
-struct nouveau_ofuncs
+struct nvkm_ofuncs
nv50_disp_main_ofuncs = {
.ctor = nv50_disp_main_ctor,
.dtor = nv50_disp_main_dtor,
.init = nv50_disp_main_init,
.fini = nv50_disp_main_fini,
.mthd = nv50_disp_main_mthd,
- .ntfy = nouveau_disp_ntfy,
+ .ntfy = nvkm_disp_ntfy,
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv50_disp_main_oclass[] = {
{ NV50_DISP, &nv50_disp_main_ofuncs },
{}
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv50_disp_sclass[] = {
{ NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
{ NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
@@ -1240,13 +1242,13 @@ nv50_disp_sclass[] = {
******************************************************************************/
static int
-nv50_disp_data_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_disp_data_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv = (void *)engine;
- struct nouveau_engctx *ectx;
+ struct nvkm_engctx *ectx;
int ret = -EBUSY;
/* no context needed for channel objects... */
@@ -1259,25 +1261,24 @@ nv50_disp_data_ctor(struct nouveau_object *parent,
/* allocate display hardware to client */
mutex_lock(&nv_subdev(priv)->mutex);
if (list_empty(&nv_engine(priv)->contexts)) {
- ret = nouveau_engctx_create(parent, engine, oclass, NULL,
- 0x10000, 0x10000,
- NVOBJ_FLAG_HEAP, &ectx);
+ ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000,
+ 0x10000, NVOBJ_FLAG_HEAP, &ectx);
*pobject = nv_object(ectx);
}
mutex_unlock(&nv_subdev(priv)->mutex);
return ret;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv50_disp_cclass = {
.handle = NV_ENGCTX(DISP, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_disp_data_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .dtor = _nvkm_engctx_dtor,
+ .init = _nvkm_engctx_init,
+ .fini = _nvkm_engctx_fini,
+ .rd32 = _nvkm_engctx_rd32,
+ .wr32 = _nvkm_engctx_wr32,
},
};
@@ -1288,25 +1289,25 @@ nv50_disp_cclass = {
static void
nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
{
- struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
nv_mask(disp, 0x61002c, (4 << head), 0);
}
static void
nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
{
- struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
nv_mask(disp, 0x61002c, (4 << head), (4 << head));
}
const struct nvkm_event_func
nv50_disp_vblank_func = {
- .ctor = nouveau_disp_vblank_ctor,
+ .ctor = nvkm_disp_vblank_ctor,
.init = nv50_disp_vblank_init,
.fini = nv50_disp_vblank_fini,
};
-static const struct nouveau_enum
+static const struct nvkm_enum
nv50_disp_intr_error_type[] = {
{ 3, "ILLEGAL_MTHD" },
{ 4, "INVALID_VALUE" },
@@ -1315,7 +1316,7 @@ nv50_disp_intr_error_type[] = {
{}
};
-static const struct nouveau_enum
+static const struct nvkm_enum
nv50_disp_intr_error_code[] = {
{ 0x00, "" },
{}
@@ -1330,14 +1331,14 @@ nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
u32 code = (addr & 0x00ff0000) >> 16;
u32 type = (addr & 0x00007000) >> 12;
u32 mthd = (addr & 0x00000ffc);
- const struct nouveau_enum *ec, *et;
+ const struct nvkm_enum *ec, *et;
char ecunk[6], etunk[6];
- et = nouveau_enum_find(nv50_disp_intr_error_type, type);
+ et = nvkm_enum_find(nv50_disp_intr_error_type, type);
if (!et)
snprintf(etunk, sizeof(etunk), "UNK%02X", type);
- ec = nouveau_enum_find(nv50_disp_intr_error_code, code);
+ ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
if (!ec)
snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
@@ -1385,7 +1386,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_outp *info)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvkm_output *outp;
u16 mask, type;
@@ -1440,7 +1441,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
static struct nvkm_output *
exec_script(struct nv50_disp_priv *priv, int head, int id)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvkm_output *outp;
struct nvbios_outp info;
u8 ver, hdr, cnt, len;
@@ -1497,7 +1498,7 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
static struct nvkm_output *
exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvkm_output *outp;
struct nvbios_outp info1;
struct nvbios_ocfg info2;
@@ -1610,7 +1611,7 @@ nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
struct nvkm_output_dp *outpdp = (void *)outp;
struct nvbios_init init = {
.subdev = nv_subdev(priv),
- .bios = nouveau_bios(priv),
+ .bios = nvkm_bios(priv),
.outp = &outp->info,
.crtc = head,
.offset = outpdp->info.script[4],
@@ -1625,7 +1626,7 @@ nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
static void
nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
{
- struct nouveau_devinit *devinit = nouveau_devinit(priv);
+ struct nvkm_devinit *devinit = nvkm_devinit(priv);
u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
if (pclk)
devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
@@ -1841,9 +1842,10 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
* programmed for DisplayPort.
*/
static void
-nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv,
+ struct dcb_output *outp)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
const int link = !(outp->sorconf.link & 1);
const int or = ffs(outp->or) - 1;
const u32 loff = (or * 0x800) + (link * 0x80);
@@ -1920,7 +1922,7 @@ nv50_disp_intr_supervisor(struct work_struct *work)
}
void
-nv50_disp_intr(struct nouveau_subdev *subdev)
+nv50_disp_intr(struct nvkm_subdev *subdev)
{
struct nv50_disp_priv *priv = (void *)subdev;
u32 intr0 = nv_rd32(priv, 0x610020);
@@ -1939,13 +1941,13 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
}
if (intr1 & 0x00000004) {
- nouveau_disp_vblank(&priv->base, 0);
+ nvkm_disp_vblank(&priv->base, 0);
nv_wr32(priv, 0x610024, 0x00000004);
intr1 &= ~0x00000004;
}
if (intr1 & 0x00000008) {
- nouveau_disp_vblank(&priv->base, 1);
+ nvkm_disp_vblank(&priv->base, 1);
nv_wr32(priv, 0x610024, 0x00000008);
intr1 &= ~0x00000008;
}
@@ -1959,15 +1961,15 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
}
static int
-nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
+ ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -1992,20 +1994,20 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv50_disp_outp_sclass[] = {
&nv50_pior_dp_impl.base.base,
NULL
};
-struct nouveau_oclass *
+struct nvkm_oclass *
nv50_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x50),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_disp_ctor,
- .dtor = _nouveau_disp_dtor,
- .init = _nouveau_disp_init,
- .fini = _nouveau_disp_fini,
+ .dtor = _nvkm_disp_dtor,
+ .init = _nvkm_disp_init,
+ .fini = _nvkm_disp_fini,
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
new file mode 100644
index 000000000000..b4ed620070fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -0,0 +1,226 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+#include "priv.h"
+struct nvkm_output;
+struct nvkm_output_dp;
+
+#define NV50_DISP_MTHD_ struct nvkm_object *object, \
+ struct nv50_disp_priv *priv, void *data, u32 size
+#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
+#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
+
+struct nv50_disp_priv {
+ struct nvkm_disp base;
+ struct nvkm_oclass *sclass;
+
+ struct work_struct supervisor;
+ u32 super;
+
+ struct nvkm_event uevent;
+
+ struct {
+ int nr;
+ } head;
+ struct {
+ int nr;
+ int (*power)(NV50_DISP_MTHD_V1);
+ int (*sense)(NV50_DISP_MTHD_V1);
+ } dac;
+ struct {
+ int nr;
+ int (*power)(NV50_DISP_MTHD_V1);
+ int (*hda_eld)(NV50_DISP_MTHD_V1);
+ int (*hdmi)(NV50_DISP_MTHD_V1);
+ u32 lvdsconf;
+ void (*magic)(struct nvkm_output *);
+ } sor;
+ struct {
+ int nr;
+ int (*power)(NV50_DISP_MTHD_V1);
+ u8 type[3];
+ } pior;
+};
+
+struct nv50_disp_impl {
+ struct nvkm_disp_impl base;
+ struct {
+ const struct nv50_disp_mthd_chan *core;
+ const struct nv50_disp_mthd_chan *base;
+ const struct nv50_disp_mthd_chan *ovly;
+ int prev;
+ } mthd;
+ struct {
+ int (*scanoutpos)(NV50_DISP_MTHD_V0);
+ } head;
+};
+
+int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+int nv50_disp_main_mthd(struct nvkm_object *, u32, void *, u32);
+
+int gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+
+int nv50_dac_power(NV50_DISP_MTHD_V1);
+int nv50_dac_sense(NV50_DISP_MTHD_V1);
+
+int gt215_hda_eld(NV50_DISP_MTHD_V1);
+int gf110_hda_eld(NV50_DISP_MTHD_V1);
+
+int g84_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int gt215_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int gf110_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int gk104_hdmi_ctrl(NV50_DISP_MTHD_V1);
+
+int nv50_sor_power(NV50_DISP_MTHD_V1);
+int nv50_pior_power(NV50_DISP_MTHD_V1);
+
+#include <core/parent.h>
+
+struct nv50_disp_base {
+ struct nvkm_parent base;
+ struct nvkm_ramht *ramht;
+ u32 chan;
+};
+
+struct nv50_disp_chan_impl {
+ struct nvkm_ofuncs base;
+ int chid;
+ int (*attach)(struct nvkm_object *, struct nvkm_object *, u32);
+ void (*detach)(struct nvkm_object *, int);
+};
+
+#include <core/namedb.h>
+
+struct nv50_disp_chan {
+ struct nvkm_namedb base;
+ int chid;
+};
+
+int nv50_disp_chan_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
+int nv50_disp_chan_map(struct nvkm_object *, u64 *, u32 *);
+u32 nv50_disp_chan_rd32(struct nvkm_object *, u64);
+void nv50_disp_chan_wr32(struct nvkm_object *, u64, u32);
+extern const struct nvkm_event_func nv50_disp_chan_uevent;
+int nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
+ struct nvkm_notify *);
+void nv50_disp_chan_uevent_send(struct nv50_disp_priv *, int);
+
+extern const struct nvkm_event_func gf110_disp_chan_uevent;
+
+#define nv50_disp_chan_init(a) \
+ nvkm_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b) \
+ nvkm_namedb_fini(&(a)->base, (b))
+
+struct nv50_disp_dmac {
+ struct nv50_disp_chan base;
+ struct nvkm_dmaobj *pushdma;
+ u32 push;
+};
+
+void nv50_disp_dmac_dtor(struct nvkm_object *);
+
+struct nv50_disp_pioc {
+ struct nv50_disp_chan base;
+};
+
+void nv50_disp_pioc_dtor(struct nvkm_object *);
+
+struct nv50_disp_mthd_list {
+ u32 mthd;
+ u32 addr;
+ struct {
+ u32 mthd;
+ u32 addr;
+ const char *name;
+ } data[];
+};
+
+struct nv50_disp_mthd_chan {
+ const char *name;
+ u32 addr;
+ struct {
+ const char *name;
+ int nr;
+ const struct nv50_disp_mthd_list *mthd;
+ } data[];
+};
+
+extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
+int nv50_disp_core_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
+extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
+int nv50_disp_base_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
+extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
+int nv50_disp_ovly_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
+extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
+int nv50_disp_oimm_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
+int nv50_disp_curs_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+extern struct nvkm_ofuncs nv50_disp_main_ofuncs;
+int nv50_disp_main_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void nv50_disp_main_dtor(struct nvkm_object *);
+extern struct nvkm_omthds nv50_disp_main_omthds[];
+extern struct nvkm_oclass nv50_disp_cclass;
+void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
+ const struct nv50_disp_mthd_chan *);
+void nv50_disp_intr_supervisor(struct work_struct *);
+void nv50_disp_intr(struct nvkm_subdev *);
+extern const struct nvkm_event_func nv50_disp_vblank_func;
+
+extern const struct nv50_disp_mthd_chan g84_disp_core_mthd_chan;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
+extern const struct nv50_disp_mthd_chan g84_disp_base_mthd_chan;
+extern const struct nv50_disp_mthd_chan g84_disp_ovly_mthd_chan;
+
+extern const struct nv50_disp_mthd_chan g94_disp_core_mthd_chan;
+
+extern struct nv50_disp_chan_impl gf110_disp_core_ofuncs;
+extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_pior;
+extern struct nv50_disp_chan_impl gf110_disp_base_ofuncs;
+extern struct nv50_disp_chan_impl gf110_disp_ovly_ofuncs;
+extern const struct nv50_disp_mthd_chan gf110_disp_base_mthd_chan;
+extern struct nv50_disp_chan_impl gf110_disp_oimm_ofuncs;
+extern struct nv50_disp_chan_impl gf110_disp_curs_ofuncs;
+extern struct nvkm_ofuncs gf110_disp_main_ofuncs;
+extern struct nvkm_oclass gf110_disp_cclass;
+void gf110_disp_intr_supervisor(struct work_struct *);
+void gf110_disp_intr(struct nvkm_subdev *);
+extern const struct nvkm_event_func gf110_disp_vblank_func;
+
+extern const struct nv50_disp_mthd_chan gk104_disp_core_mthd_chan;
+extern const struct nv50_disp_mthd_chan gk104_disp_ovly_mthd_chan;
+
+extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
+extern struct nvkm_oclass *nv50_disp_outp_sclass[];
+
+extern struct nvkm_output_dp_impl g94_sor_dp_impl;
+int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
+extern struct nvkm_oclass *g94_disp_outp_sclass[];
+
+extern struct nvkm_output_dp_impl gf110_sor_dp_impl;
+int gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+extern struct nvkm_oclass *gf110_disp_outp_sclass[];
+
+void gm204_sor_magic(struct nvkm_output *outp);
+extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index bbd9b6fdc90f..9224bcbf0159 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -21,57 +21,58 @@
*
* Authors: Ben Skeggs
*/
+#include "outp.h"
+#include "priv.h"
-#include <subdev/i2c.h>
#include <subdev/bios.h>
#include <subdev/bios/conn.h>
-
-#include "outp.h"
+#include <subdev/bios/dcb.h>
+#include <subdev/i2c.h>
int
-_nvkm_output_fini(struct nouveau_object *object, bool suspend)
+_nvkm_output_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_output *outp = (void *)object;
nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend);
- return nouveau_object_fini(&outp->base, suspend);
+ return nvkm_object_fini(&outp->base, suspend);
}
int
-_nvkm_output_init(struct nouveau_object *object)
+_nvkm_output_init(struct nvkm_object *object)
{
struct nvkm_output *outp = (void *)object;
- int ret = nouveau_object_init(&outp->base);
+ int ret = nvkm_object_init(&outp->base);
if (ret == 0)
nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
return 0;
}
void
-_nvkm_output_dtor(struct nouveau_object *object)
+_nvkm_output_dtor(struct nvkm_object *object)
{
struct nvkm_output *outp = (void *)object;
list_del(&outp->head);
- nouveau_object_ref(NULL, (void *)&outp->conn);
- nouveau_object_destroy(&outp->base);
+ nvkm_object_ref(NULL, (void *)&outp->conn);
+ nvkm_object_destroy(&outp->base);
}
int
-nvkm_output_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
+nvkm_output_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass,
struct dcb_output *dcbE, int index,
int length, void **pobject)
{
- struct nouveau_bios *bios = nouveau_bios(engine);
- struct nouveau_i2c *i2c = nouveau_i2c(parent);
- struct nouveau_disp *disp = (void *)engine;
+ struct nvkm_disp *disp = nvkm_disp(parent);
+ struct nvkm_bios *bios = nvkm_bios(parent);
+ struct nvkm_i2c *i2c = nvkm_i2c(parent);
struct nvbios_connE connE;
struct nvkm_output *outp;
u8 ver, hdr;
u32 data;
int ret;
- ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
outp = *pobject;
if (ret)
return ret;
@@ -98,9 +99,9 @@ nvkm_output_create_(struct nouveau_object *parent,
connE.type = DCB_CONNECTOR_NONE;
}
- ret = nouveau_object_ctor(parent, engine, nvkm_connector_oclass,
- &connE, outp->info.connector,
- (struct nouveau_object **)&outp->conn);
+ ret = nvkm_object_ctor(parent, NULL, nvkm_connector_oclass,
+ &connE, outp->info.connector,
+ (struct nvkm_object **)&outp->conn);
if (ret < 0) {
ERR("error %d creating connector, disabling\n", ret);
return ret;
@@ -111,10 +112,10 @@ nvkm_output_create_(struct nouveau_object *parent,
}
int
-_nvkm_output_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *dcbE, u32 index,
- struct nouveau_object **pobject)
+_nvkm_output_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *dcbE, u32 index,
+ struct nvkm_object **pobject)
{
struct nvkm_output *outp;
int ret;
@@ -127,11 +128,11 @@ _nvkm_output_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nvkm_output_oclass = &(struct nvkm_output_impl) {
.base = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_output_ctor,
.dtor = _nvkm_output_dtor,
.init = _nvkm_output_init,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 187f435ad0e2..d9253d26c31b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,18 +1,20 @@
#ifndef __NVKM_DISP_OUTP_H__
#define __NVKM_DISP_OUTP_H__
+#include <core/object.h>
-#include "priv.h"
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
struct nvkm_output {
- struct nouveau_object base;
+ struct nvkm_object base;
struct list_head head;
struct dcb_output info;
int index;
int or;
- struct nouveau_i2c_port *port;
- struct nouveau_i2c_port *edid;
+ struct nvkm_i2c_port *port;
+ struct nvkm_i2c_port *edid;
struct nvkm_connector *conn;
};
@@ -32,29 +34,28 @@ struct nvkm_output {
_nvkm_output_fini(nv_object(_outp), (s)); \
})
-int nvkm_output_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, struct dcb_output *,
+int nvkm_output_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, struct dcb_output *,
int, int, void **);
-int _nvkm_output_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void _nvkm_output_dtor(struct nouveau_object *);
-int _nvkm_output_init(struct nouveau_object *);
-int _nvkm_output_fini(struct nouveau_object *, bool);
+int _nvkm_output_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void _nvkm_output_dtor(struct nvkm_object *);
+int _nvkm_output_init(struct nvkm_object *);
+int _nvkm_output_fini(struct nvkm_object *, bool);
struct nvkm_output_impl {
- struct nouveau_oclass base;
+ struct nvkm_oclass base;
};
#ifndef MSG
#define MSG(l,f,a...) do { \
struct nvkm_output *_outp = (void *)outp; \
- nv_##l(nv_object(outp)->engine, "%02x:%04x:%04x: "f, _outp->index, \
+ nv_##l(_outp, "%02x:%04x:%04x: "f, _outp->index, \
_outp->info.hasht, _outp->info.hashm, ##a); \
} while(0)
#define DBG(f,a...) MSG(debug, f, ##a)
#define ERR(f,a...) MSG(error, f, ##a)
#endif
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 667a9070e006..0bde0fa5b59d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/os.h>
-#include <nvif/event.h>
-
-#include <subdev/i2c.h>
-
#include "outpdp.h"
#include "conn.h"
#include "dport.h"
+#include "priv.h"
+
+#include <subdev/i2c.h>
+
+#include <nvif/event.h>
int
nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
@@ -105,17 +104,17 @@ done:
static void
nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present)
{
- struct nouveau_i2c_port *port = outp->base.edid;
+ struct nvkm_i2c_port *port = outp->base.edid;
if (present) {
if (!outp->present) {
- nouveau_i2c(port)->acquire_pad(port, 0);
+ nvkm_i2c(port)->acquire_pad(port, 0);
DBG("aux power -> always\n");
outp->present = true;
}
nvkm_output_dp_train(&outp->base, 0, true);
} else {
if (outp->present) {
- nouveau_i2c(port)->release_pad(port);
+ nvkm_i2c(port)->release_pad(port);
DBG("aux power -> demand\n");
outp->present = false;
}
@@ -126,13 +125,13 @@ nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present)
static void
nvkm_output_dp_detect(struct nvkm_output_dp *outp)
{
- struct nouveau_i2c_port *port = outp->base.edid;
- int ret = nouveau_i2c(port)->acquire_pad(port, 0);
+ struct nvkm_i2c_port *port = outp->base.edid;
+ int ret = nvkm_i2c(port)->acquire_pad(port, 0);
if (ret == 0) {
ret = nv_rdaux(outp->base.edid, DPCD_RC00_DPCD_REV,
outp->dpcd, sizeof(outp->dpcd));
nvkm_output_dp_enable(outp, ret == 0);
- nouveau_i2c(port)->release_pad(port);
+ nvkm_i2c(port)->release_pad(port);
}
}
@@ -141,7 +140,7 @@ nvkm_output_dp_hpd(struct nvkm_notify *notify)
{
struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
struct nvkm_output_dp *outp;
- struct nouveau_disp *disp = nouveau_disp(conn);
+ struct nvkm_disp *disp = nvkm_disp(conn);
const struct nvkm_i2c_ntfy_rep *line = notify->data;
struct nvif_notify_conn_rep_v0 rep = {};
@@ -170,7 +169,7 @@ static int
nvkm_output_dp_irq(struct nvkm_notify *notify)
{
struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
- struct nouveau_disp *disp = nouveau_disp(outp);
+ struct nvkm_disp *disp = nvkm_disp(outp);
const struct nvkm_i2c_ntfy_rep *line = notify->data;
struct nvif_notify_conn_rep_v0 rep = {
.mask = NVIF_NOTIFY_CONN_V0_IRQ,
@@ -185,7 +184,7 @@ nvkm_output_dp_irq(struct nvkm_notify *notify)
}
int
-_nvkm_output_dp_fini(struct nouveau_object *object, bool suspend)
+_nvkm_output_dp_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_output_dp *outp = (void *)object;
nvkm_notify_put(&outp->irq);
@@ -194,7 +193,7 @@ _nvkm_output_dp_fini(struct nouveau_object *object, bool suspend)
}
int
-_nvkm_output_dp_init(struct nouveau_object *object)
+_nvkm_output_dp_init(struct nvkm_object *object)
{
struct nvkm_output_dp *outp = (void *)object;
nvkm_output_dp_detect(outp);
@@ -202,7 +201,7 @@ _nvkm_output_dp_init(struct nouveau_object *object)
}
void
-_nvkm_output_dp_dtor(struct nouveau_object *object)
+_nvkm_output_dp_dtor(struct nvkm_object *object)
{
struct nvkm_output_dp *outp = (void *)object;
nvkm_notify_fini(&outp->irq);
@@ -210,14 +209,14 @@ _nvkm_output_dp_dtor(struct nouveau_object *object)
}
int
-nvkm_output_dp_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
+nvkm_output_dp_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass,
struct dcb_output *info, int index,
int length, void **pobject)
{
- struct nouveau_bios *bios = nouveau_bios(parent);
- struct nouveau_i2c *i2c = nouveau_i2c(parent);
+ struct nvkm_bios *bios = nvkm_bios(parent);
+ struct nvkm_i2c *i2c = nvkm_i2c(parent);
struct nvkm_output_dp *outp;
u8 hdr, cnt, len;
u32 data;
@@ -249,7 +248,7 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
DBG("bios dp %02x %02x %02x %02x\n", outp->version, hdr, cnt, len);
/* link training */
- INIT_WORK(&outp->lt.work, nouveau_dp_train);
+ INIT_WORK(&outp->lt.work, nvkm_dp_train);
init_waitqueue_head(&outp->lt.wait);
atomic_set(&outp->lt.done, 0);
@@ -285,10 +284,10 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
}
int
-_nvkm_output_dp_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *info, u32 index,
- struct nouveau_object **pobject)
+_nvkm_output_dp_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *info, u32 index,
+ struct nvkm_object **pobject)
{
struct nvkm_output_dp *outp;
int ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 1fac367cc867..70c77aec4850 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -1,11 +1,11 @@
#ifndef __NVKM_DISP_OUTP_DP_H__
#define __NVKM_DISP_OUTP_DP_H__
+#include "outp.h"
+#include <core/notify.h>
#include <subdev/bios.h>
#include <subdev/bios/dp.h>
-#include "outp.h"
-
struct nvkm_output_dp {
struct nvkm_output base;
@@ -38,16 +38,16 @@ struct nvkm_output_dp {
_nvkm_output_dp_fini(nv_object(_outp), (s)); \
})
-int nvkm_output_dp_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, struct dcb_output *,
+int nvkm_output_dp_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, struct dcb_output *,
int, int, void **);
-int _nvkm_output_dp_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void _nvkm_output_dp_dtor(struct nouveau_object *);
-int _nvkm_output_dp_init(struct nouveau_object *);
-int _nvkm_output_dp_fini(struct nouveau_object *, bool);
+int _nvkm_output_dp_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void _nvkm_output_dp_dtor(struct nvkm_object *);
+int _nvkm_output_dp_init(struct nvkm_object *);
+int _nvkm_output_dp_fini(struct nvkm_object *, bool);
struct nvkm_output_dp_impl {
struct nvkm_output_impl base;
@@ -58,5 +58,4 @@ struct nvkm_output_dp_impl {
};
int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index d00f89a468a7..2a1d8871bf82 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -21,29 +21,27 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outpdp.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/timer.h>
#include <subdev/i2c.h>
+#include <subdev/timer.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
/******************************************************************************
* TMDS
*****************************************************************************/
static int
-nv50_pior_tmds_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *info, u32 index,
- struct nouveau_object **pobject)
+nv50_pior_tmds_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *info, u32 index,
+ struct nvkm_object **pobject)
{
- struct nouveau_i2c *i2c = nouveau_i2c(parent);
+ struct nvkm_i2c *i2c = nvkm_i2c(parent);
struct nvkm_output *outp;
int ret;
@@ -59,7 +57,7 @@ nv50_pior_tmds_ctor(struct nouveau_object *parent,
struct nvkm_output_impl
nv50_pior_tmds_impl = {
.base.handle = DCB_OUTPUT_TMDS | 0x0100,
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_pior_tmds_ctor,
.dtor = _nvkm_output_dtor,
.init = _nvkm_output_init,
@@ -74,7 +72,7 @@ nv50_pior_tmds_impl = {
static int
nv50_pior_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nouveau_i2c_port *port = outp->base.edid;
+ struct nvkm_i2c_port *port = outp->base.edid;
if (port && port->func->pattern)
return port->func->pattern(port, pattern);
return port ? 0 : -ENODEV;
@@ -89,7 +87,7 @@ nv50_pior_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
static int
nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
- struct nouveau_i2c_port *port = outp->base.edid;
+ struct nvkm_i2c_port *port = outp->base.edid;
if (port && port->func->lnk_ctl)
return port->func->lnk_ctl(port, nr, bw, ef);
return port ? 0 : -ENODEV;
@@ -98,19 +96,19 @@ nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
static int
nv50_pior_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
{
- struct nouveau_i2c_port *port = outp->base.edid;
+ struct nvkm_i2c_port *port = outp->base.edid;
if (port && port->func->drv_ctl)
return port->func->drv_ctl(port, ln, vs, pe);
return port ? 0 : -ENODEV;
}
static int
-nv50_pior_dp_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *info, u32 index,
- struct nouveau_object **pobject)
+nv50_pior_dp_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *info, u32 index,
+ struct nvkm_object **pobject)
{
- struct nouveau_i2c *i2c = nouveau_i2c(parent);
+ struct nvkm_i2c *i2c = nvkm_i2c(parent);
struct nvkm_output_dp *outp;
int ret;
@@ -127,7 +125,7 @@ nv50_pior_dp_ctor(struct nouveau_object *parent,
struct nvkm_output_dp_impl
nv50_pior_dp_impl = {
.base.base.handle = DCB_OUTPUT_DP | 0x0010,
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_pior_dp_ctor,
.dtor = _nvkm_output_dp_dtor,
.init = _nvkm_output_dp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
new file mode 100644
index 000000000000..961ce8bb2135
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -0,0 +1,42 @@
+#ifndef __NVKM_DISP_PRIV_H__
+#define __NVKM_DISP_PRIV_H__
+#include <engine/disp.h>
+
+struct nvkm_disp_impl {
+ struct nvkm_oclass base;
+ struct nvkm_oclass **outp;
+ struct nvkm_oclass **conn;
+ const struct nvkm_event_func *vblank;
+};
+
+#define nvkm_disp_create(p,e,c,h,i,x,d) \
+ nvkm_disp_create_((p), (e), (c), (h), (i), (x), \
+ sizeof(**d), (void **)d)
+#define nvkm_disp_destroy(d) ({ \
+ struct nvkm_disp *disp = (d); \
+ _nvkm_disp_dtor(nv_object(disp)); \
+})
+#define nvkm_disp_init(d) ({ \
+ struct nvkm_disp *disp = (d); \
+ _nvkm_disp_init(nv_object(disp)); \
+})
+#define nvkm_disp_fini(d,s) ({ \
+ struct nvkm_disp *disp = (d); \
+ _nvkm_disp_fini(nv_object(disp), (s)); \
+})
+
+int nvkm_disp_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int heads,
+ const char *, const char *, int, void **);
+void _nvkm_disp_dtor(struct nvkm_object *);
+int _nvkm_disp_init(struct nvkm_object *);
+int _nvkm_disp_fini(struct nvkm_object *, bool);
+
+extern struct nvkm_oclass *nvkm_output_oclass;
+extern struct nvkm_oclass *nvkm_connector_oclass;
+
+int nvkm_disp_vblank_ctor(struct nvkm_object *, void *data, u32 size,
+ struct nvkm_notify *);
+void nvkm_disp_vblank(struct nvkm_disp *, int head);
+int nvkm_disp_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 39f85d627336..8918da7ffdf2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -21,59 +21,53 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/os.h>
-
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/dp.h>
-#include <subdev/bios/init.h>
-#include <subdev/timer.h>
-
#include "nv50.h"
#include "outpdp.h"
+#include <core/device.h>
+#include <subdev/timer.h>
+
static inline u32
-nv94_sor_soff(struct nvkm_output_dp *outp)
+g94_sor_soff(struct nvkm_output_dp *outp)
{
return (ffs(outp->base.info.or) - 1) * 0x800;
}
static inline u32
-nv94_sor_loff(struct nvkm_output_dp *outp)
+g94_sor_loff(struct nvkm_output_dp *outp)
{
- return nv94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+ return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
}
static inline u32
-nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
{
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv94[] = { 16, 8, 0, 24 };
+ static const u8 mcp89[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 g94[] = { 16, 8, 0, 24 };
if (nv_device(priv)->chipset == 0xaf)
- return nvaf[lane];
- return nv94[lane];
+ return mcp89[lane];
+ return g94[lane];
}
static int
-nv94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+g94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- const u32 loff = nv94_sor_loff(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ const u32 loff = g94_sor_loff(outp);
nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
return 0;
}
int
-nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+g94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- const u32 soff = nv94_sor_soff(outp);
- const u32 loff = nv94_sor_loff(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ const u32 soff = g94_sor_soff(outp);
+ const u32 loff = g94_sor_loff(outp);
u32 mask = 0, i;
for (i = 0; i < nr; i++)
- mask |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+ mask |= 1 << (g94_sor_dp_lane_map(priv, i) >> 3);
nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
@@ -82,11 +76,11 @@ nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
}
static int
-nv94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
+g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- const u32 soff = nv94_sor_soff(outp);
- const u32 loff = nv94_sor_loff(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ const u32 soff = g94_sor_soff(outp);
+ const u32 loff = g94_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
@@ -102,12 +96,12 @@ nv94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
}
static int
-nv94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 shift = nv94_sor_dp_lane_map(priv, ln);
- const u32 loff = nv94_sor_loff(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_bios *bios = nvkm_bios(priv);
+ const u32 shift = g94_sor_dp_lane_map(priv, ln);
+ const u32 loff = g94_sor_loff(outp);
u32 addr, data[3];
u8 ver, hdr, cnt, len;
struct nvbios_dpout info;
@@ -136,16 +130,16 @@ nv94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
}
struct nvkm_output_dp_impl
-nv94_sor_dp_impl = {
+g94_sor_dp_impl = {
.base.base.handle = DCB_OUTPUT_DP,
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_output_dp_ctor,
.dtor = _nvkm_output_dp_dtor,
.init = _nvkm_output_dp_init,
.fini = _nvkm_output_dp_fini,
},
- .pattern = nv94_sor_dp_pattern,
- .lnk_pwr = nv94_sor_dp_lnk_pwr,
- .lnk_ctl = nv94_sor_dp_lnk_ctl,
- .drv_ctl = nv94_sor_dp_drv_ctl,
+ .pattern = g94_sor_dp_pattern,
+ .lnk_pwr = g94_sor_dp_lnk_pwr,
+ .lnk_ctl = g94_sor_dp_lnk_ctl,
+ .drv_ctl = g94_sor_dp_drv_ctl,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
index fdab2939070c..52fbe4880e13 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
@@ -21,51 +21,43 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/os.h>
-
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/dp.h>
-#include <subdev/bios/init.h>
-#include <subdev/timer.h>
-
#include "nv50.h"
+#include "outpdp.h"
static inline u32
-nvd0_sor_soff(struct nvkm_output_dp *outp)
+gf110_sor_soff(struct nvkm_output_dp *outp)
{
return (ffs(outp->base.info.or) - 1) * 0x800;
}
static inline u32
-nvd0_sor_loff(struct nvkm_output_dp *outp)
+gf110_sor_loff(struct nvkm_output_dp *outp)
{
- return nvd0_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+ return gf110_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
}
static inline u32
-nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+gf110_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
{
- static const u8 nvd0[] = { 16, 8, 0, 24 };
- return nvd0[lane];
+ static const u8 gf110[] = { 16, 8, 0, 24 };
+ return gf110[lane];
}
static int
-nvd0_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+gf110_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- const u32 loff = nvd0_sor_loff(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ const u32 loff = gf110_sor_loff(outp);
nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
int
-nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
+gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- const u32 soff = nvd0_sor_soff(outp);
- const u32 loff = nvd0_sor_loff(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ const u32 soff = gf110_sor_soff(outp);
+ const u32 loff = gf110_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
@@ -80,12 +72,13 @@ nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
}
static int
-nvd0_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+ int ln, int vs, int pe, int pc)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 shift = nvd0_sor_dp_lane_map(priv, ln);
- const u32 loff = nvd0_sor_loff(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_bios *bios = nvkm_bios(priv);
+ const u32 shift = gf110_sor_dp_lane_map(priv, ln);
+ const u32 loff = gf110_sor_loff(outp);
u32 addr, data[4];
u8 ver, hdr, cnt, len;
struct nvbios_dpout info;
@@ -93,12 +86,12 @@ nvd0_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
addr = nvbios_dpout_match(bios, outp->base.info.hasht,
outp->base.info.hashm,
- &ver, &hdr, &cnt, &len, &info);
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
- &ver, &hdr, &cnt, &len, &ocfg);
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
@@ -116,16 +109,16 @@ nvd0_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
}
struct nvkm_output_dp_impl
-nvd0_sor_dp_impl = {
+gf110_sor_dp_impl = {
.base.base.handle = DCB_OUTPUT_DP,
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_output_dp_ctor,
.dtor = _nvkm_output_dp_dtor,
.init = _nvkm_output_dp_init,
.fini = _nvkm_output_dp_fini,
},
- .pattern = nvd0_sor_dp_pattern,
- .lnk_pwr = nv94_sor_dp_lnk_pwr,
- .lnk_ctl = nvd0_sor_dp_lnk_ctl,
- .drv_ctl = nvd0_sor_dp_drv_ctl,
+ .pattern = gf110_sor_dp_pattern,
+ .lnk_pwr = g94_sor_dp_lnk_pwr,
+ .lnk_ctl = gf110_sor_dp_lnk_ctl,
+ .drv_ctl = gf110_sor_dp_drv_ctl,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
index 0b4fad39e9a6..1e40dfe11319 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
@@ -21,17 +21,11 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outpdp.h"
-#include <core/os.h>
-
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/dp.h>
-#include <subdev/bios/init.h>
#include <subdev/timer.h>
-#include "nv50.h"
-
static inline u32
gm204_sor_soff(struct nvkm_output_dp *outp)
{
@@ -47,7 +41,7 @@ gm204_sor_loff(struct nvkm_output_dp *outp)
void
gm204_sor_magic(struct nvkm_output *outp)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
const u32 soff = outp->or * 0x100;
const u32 data = outp->or + 1;
if (outp->info.sorconf.link & 1)
@@ -65,7 +59,7 @@ gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
static int
gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
const u32 soff = gm204_sor_soff(outp);
const u32 data = 0x01010101 * pattern;
if (outp->base.info.sorconf.link & 1)
@@ -78,7 +72,7 @@ gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
static int
gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
const u32 soff = gm204_sor_soff(outp);
const u32 loff = gm204_sor_loff(outp);
u32 mask = 0, i;
@@ -93,10 +87,11 @@ gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
}
static int
-gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+ int ln, int vs, int pe, int pc)
{
- struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_bios *bios = nvkm_bios(priv);
const u32 shift = gm204_sor_dp_lane_map(priv, ln);
const u32 loff = gm204_sor_loff(outp);
u32 addr, data[4];
@@ -106,12 +101,12 @@ gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc
addr = nvbios_dpout_match(bios, outp->base.info.hasht,
outp->base.info.hashm,
- &ver, &hdr, &cnt, &len, &info);
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
- &ver, &hdr, &cnt, &len, &ocfg);
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
@@ -131,7 +126,7 @@ gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc
struct nvkm_output_dp_impl
gm204_sor_dp_impl = {
.base.base.handle = DCB_OUTPUT_DP,
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_output_dp_ctor,
.dtor = _nvkm_output_dp_dtor,
.init = _nvkm_output_dp_init,
@@ -139,6 +134,6 @@ gm204_sor_dp_impl = {
},
.pattern = gm204_sor_dp_pattern,
.lnk_pwr = gm204_sor_dp_lnk_pwr,
- .lnk_ctl = nvd0_sor_dp_lnk_ctl,
+ .lnk_ctl = gf110_sor_dp_lnk_ctl,
.drv_ctl = gm204_sor_dp_drv_ctl,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index ddf1760c4400..b229a311c78c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -21,16 +21,14 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "outp.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
#include <subdev/timer.h>
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
int
nv50_sor_power(NV50_DISP_MTHD_V1)
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
index 8836c3cb99c3..c4622c7388d0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs
*/
+#include <subdev/vga.h>
-#include <core/subdev.h>
#include <core/device.h>
-#include <subdev/vga.h>
u8
nv_rdport(void *obj, int head, u16 port)
{
- struct nouveau_device *device = nv_device(obj);
+ struct nvkm_device *device = nv_device(obj);
if (device->card_type >= NV_50)
return nv_rd08(obj, 0x601000 + port);
@@ -54,7 +53,7 @@ nv_rdport(void *obj, int head, u16 port)
void
nv_wrport(void *obj, int head, u16 port, u8 data)
{
- struct nouveau_device *device = nv_device(obj);
+ struct nvkm_device *device = nv_device(obj);
if (device->card_type >= NV_50)
nv_wr08(obj, 0x601000 + port, data);
@@ -138,7 +137,7 @@ nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
bool
nv_lockvgac(void *obj, bool lock)
{
- struct nouveau_device *dev = nv_device(obj);
+ struct nvkm_device *dev = nv_device(obj);
bool locked = !nv_rdvgac(obj, 0, 0x1f);
u8 data = lock ? 0x99 : 0x57;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
new file mode 100644
index 000000000000..7529632dbedb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
@@ -0,0 +1,5 @@
+nvkm-y += nvkm/engine/dmaobj/base.o
+nvkm-y += nvkm/engine/dmaobj/nv04.o
+nvkm-y += nvkm/engine/dmaobj/nv50.o
+nvkm-y += nvkm/engine/dmaobj/gf100.o
+nvkm-y += nvkm/engine/dmaobj/gf110.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
index e1500f77a56a..a2b60d86baba 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
-#include <core/object.h>
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
+#include <core/device.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
-#include "priv.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
static int
-nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent,
- struct nouveau_gpuobj **pgpuobj)
+nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
+ struct nvkm_gpuobj **pgpuobj)
{
const struct nvkm_dmaeng_impl *impl = (void *)
nv_oclass(nv_object(dmaobj)->engine);
@@ -47,7 +46,7 @@ nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent,
}
ret = impl->bind(dmaobj, parent, pgpuobj);
if (ret == 0)
- nouveau_object_ref(NULL, &parent);
+ nvkm_object_ref(NULL, &parent);
return ret;
}
@@ -55,24 +54,24 @@ nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent,
}
int
-nvkm_dmaobj_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void **pdata, u32 *psize,
+nvkm_dmaobj_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void **pdata, u32 *psize,
int length, void **pobject)
{
union {
struct nv_dma_v0 v0;
} *args = *pdata;
- struct nouveau_instmem *instmem = nouveau_instmem(parent);
- struct nouveau_client *client = nouveau_client(parent);
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_dmaobj *dmaobj;
+ struct nvkm_instmem *instmem = nvkm_instmem(parent);
+ struct nvkm_client *client = nvkm_client(parent);
+ struct nvkm_device *device = nv_device(parent);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_dmaobj *dmaobj;
void *data = *pdata;
u32 size = *psize;
int ret;
- ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
dmaobj = *pobject;
if (ret)
return ret;
@@ -145,16 +144,16 @@ nvkm_dmaobj_create_(struct nouveau_object *parent,
}
int
-_nvkm_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+_nvkm_dmaeng_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
const struct nvkm_dmaeng_impl *impl = (void *)oclass;
- struct nouveau_dmaeng *dmaeng;
+ struct nvkm_dmaeng *dmaeng;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true, "DMAOBJ",
- "dmaobj", &dmaeng);
+ ret = nvkm_engine_create(parent, engine, oclass, true, "DMAOBJ",
+ "dmaobj", &dmaeng);
*pobject = nv_object(dmaeng);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
index 88ec33b20048..f880e5167e45 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
@@ -21,29 +21,26 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <core/client.h>
-#include <core/device.h>
#include <core/gpuobj.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
#include <subdev/fb.h>
-#include "priv.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
-struct nvc0_dmaobj_priv {
- struct nouveau_dmaobj base;
+struct gf100_dmaobj_priv {
+ struct nvkm_dmaobj base;
u32 flags0;
u32 flags5;
};
static int
-nvc0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
- struct nouveau_object *parent,
- struct nouveau_gpuobj **pgpuobj)
+gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
+ struct nvkm_gpuobj **pgpuobj)
{
- struct nvc0_dmaobj_priv *priv = (void *)dmaobj;
+ struct gf100_dmaobj_priv *priv = (void *)dmaobj;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
@@ -58,7 +55,7 @@ nvc0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
} else
return 0;
- ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
@@ -73,15 +70,15 @@ nvc0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
}
static int
-nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nvkm_dmaeng *dmaeng = (void *)engine;
union {
struct gf100_dma_v0 v0;
} *args;
- struct nvc0_dmaobj_priv *priv;
+ struct gf100_dmaobj_priv *priv;
u32 kind, user, unkn;
int ret;
@@ -149,31 +146,31 @@ nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static struct nouveau_ofuncs
-nvc0_dmaobj_ofuncs = {
- .ctor = nvc0_dmaobj_ctor,
+static struct nvkm_ofuncs
+gf100_dmaobj_ofuncs = {
+ .ctor = gf100_dmaobj_ctor,
.dtor = _nvkm_dmaobj_dtor,
.init = _nvkm_dmaobj_init,
.fini = _nvkm_dmaobj_fini,
};
-static struct nouveau_oclass
-nvc0_dmaeng_sclass[] = {
- { NV_DMA_FROM_MEMORY, &nvc0_dmaobj_ofuncs },
- { NV_DMA_TO_MEMORY, &nvc0_dmaobj_ofuncs },
- { NV_DMA_IN_MEMORY, &nvc0_dmaobj_ofuncs },
+static struct nvkm_oclass
+gf100_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &gf100_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &gf100_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &gf100_dmaobj_ofuncs },
{}
};
-struct nouveau_oclass *
-nvc0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+struct nvkm_oclass *
+gf100_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
.base.handle = NV_ENGINE(DMAOBJ, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_dmaeng_ctor,
.dtor = _nvkm_dmaeng_dtor,
.init = _nvkm_dmaeng_init,
.fini = _nvkm_dmaeng_fini,
},
- .sclass = nvc0_dmaeng_sclass,
- .bind = nvc0_dmaobj_bind,
+ .sclass = gf100_dmaeng_sclass,
+ .bind = gf100_dmaobj_bind,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
index 19f5f6522962..bf8f0f20976c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
@@ -21,28 +21,25 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <core/client.h>
-#include <core/device.h>
#include <core/gpuobj.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
#include <subdev/fb.h>
-#include "priv.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
-struct nvd0_dmaobj_priv {
- struct nouveau_dmaobj base;
+struct gf110_dmaobj_priv {
+ struct nvkm_dmaobj base;
u32 flags0;
};
static int
-nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
- struct nouveau_object *parent,
- struct nouveau_gpuobj **pgpuobj)
+gf110_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
+ struct nvkm_gpuobj **pgpuobj)
{
- struct nvd0_dmaobj_priv *priv = (void *)dmaobj;
+ struct gf110_dmaobj_priv *priv = (void *)dmaobj;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
@@ -64,7 +61,7 @@ nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
} else
return 0;
- ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
nv_wo32(*pgpuobj, 0x00, priv->flags0);
nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
@@ -78,15 +75,15 @@ nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
}
static int
-nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nvkm_dmaeng *dmaeng = (void *)engine;
union {
struct gf110_dma_v0 v0;
} *args;
- struct nvd0_dmaobj_priv *priv;
+ struct gf110_dmaobj_priv *priv;
u32 kind, page;
int ret;
@@ -138,31 +135,31 @@ nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static struct nouveau_ofuncs
-nvd0_dmaobj_ofuncs = {
- .ctor = nvd0_dmaobj_ctor,
+static struct nvkm_ofuncs
+gf110_dmaobj_ofuncs = {
+ .ctor = gf110_dmaobj_ctor,
.dtor = _nvkm_dmaobj_dtor,
.init = _nvkm_dmaobj_init,
.fini = _nvkm_dmaobj_fini,
};
-static struct nouveau_oclass
-nvd0_dmaeng_sclass[] = {
- { NV_DMA_FROM_MEMORY, &nvd0_dmaobj_ofuncs },
- { NV_DMA_TO_MEMORY, &nvd0_dmaobj_ofuncs },
- { NV_DMA_IN_MEMORY, &nvd0_dmaobj_ofuncs },
+static struct nvkm_oclass
+gf110_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &gf110_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &gf110_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &gf110_dmaobj_ofuncs },
{}
};
-struct nouveau_oclass *
-nvd0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+struct nvkm_oclass *
+gf110_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
.base.handle = NV_ENGINE(DMAOBJ, 0xd0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_dmaeng_ctor,
.dtor = _nvkm_dmaeng_dtor,
.init = _nvkm_dmaeng_init,
.fini = _nvkm_dmaeng_fini,
},
- .sclass = nvd0_dmaeng_sclass,
- .bind = nvd0_dmaobj_bind,
+ .sclass = gf110_dmaeng_sclass,
+ .bind = gf110_dmaobj_bind,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
index 20c9dbfe3b2e..b4379c2a2fb5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
@@ -21,29 +21,27 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <core/gpuobj.h>
-#include <nvif/class.h>
-
#include <subdev/fb.h>
-#include <subdev/vm/nv04.h>
+#include <subdev/mmu/nv04.h>
-#include "priv.h"
+#include <nvif/class.h>
struct nv04_dmaobj_priv {
- struct nouveau_dmaobj base;
+ struct nvkm_dmaobj base;
bool clone;
u32 flags0;
u32 flags2;
};
static int
-nv04_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
- struct nouveau_object *parent,
- struct nouveau_gpuobj **pgpuobj)
+nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
+ struct nvkm_gpuobj **pgpuobj)
{
struct nv04_dmaobj_priv *priv = (void *)dmaobj;
- struct nouveau_gpuobj *gpuobj;
+ struct nvkm_gpuobj *gpuobj;
u64 offset = priv->base.start & 0xfffff000;
u64 adjust = priv->base.start & 0x00000fff;
u32 length = priv->base.limit - priv->base.start;
@@ -62,15 +60,15 @@ nv04_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
}
if (priv->clone) {
- struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaobj);
- struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
+ struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj);
+ struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
if (!dmaobj->start)
- return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
+ return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
offset = nv_ro32(pgt, 8 + (offset >> 10));
offset &= 0xfffff000;
}
- ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
+ ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
*pgpuobj = gpuobj;
if (ret == 0) {
nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
@@ -83,12 +81,12 @@ nv04_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
}
static int
-nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv04_vmmgr_priv *vmm = nv04_vmmgr(engine);
+ struct nvkm_dmaeng *dmaeng = (void *)engine;
+ struct nv04_mmu_priv *mmu = nv04_mmu(engine);
struct nv04_dmaobj_priv *priv;
int ret;
@@ -98,7 +96,7 @@ nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
if (priv->base.target == NV_MEM_TARGET_VM) {
- if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass)
+ if (nv_object(mmu)->oclass == &nv04_mmu_oclass)
priv->clone = true;
priv->base.target = NV_MEM_TARGET_PCI;
priv->base.access = NV_MEM_ACCESS_RW;
@@ -135,7 +133,7 @@ nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv04_dmaobj_ofuncs = {
.ctor = nv04_dmaobj_ctor,
.dtor = _nvkm_dmaobj_dtor,
@@ -143,7 +141,7 @@ nv04_dmaobj_ofuncs = {
.fini = _nvkm_dmaobj_fini,
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv04_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
@@ -151,10 +149,10 @@ nv04_dmaeng_sclass[] = {
{}
};
-struct nouveau_oclass *
+struct nvkm_oclass *
nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
.base.handle = NV_ENGINE(DMAOBJ, 0x04),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_dmaeng_ctor,
.dtor = _nvkm_dmaeng_dtor,
.init = _nvkm_dmaeng_init,
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
index a740ddba2ee2..4d3c828fe0e6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
@@ -21,26 +21,24 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <core/client.h>
#include <core/gpuobj.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
#include <subdev/fb.h>
-#include "priv.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
struct nv50_dmaobj_priv {
- struct nouveau_dmaobj base;
+ struct nvkm_dmaobj base;
u32 flags0;
u32 flags5;
};
static int
-nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
- struct nouveau_object *parent,
- struct nouveau_gpuobj **pgpuobj)
+nv50_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
+ struct nvkm_gpuobj **pgpuobj)
{
struct nv50_dmaobj_priv *priv = (void *)dmaobj;
int ret;
@@ -69,7 +67,7 @@ nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
}
}
- ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
@@ -84,11 +82,11 @@ nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
}
static int
-nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nvkm_dmaeng *dmaeng = (void *)engine;
union {
struct nv50_dma_v0 v0;
} *args;
@@ -167,7 +165,7 @@ nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv50_dmaobj_ofuncs = {
.ctor = nv50_dmaobj_ctor,
.dtor = _nvkm_dmaobj_dtor,
@@ -175,7 +173,7 @@ nv50_dmaobj_ofuncs = {
.fini = _nvkm_dmaobj_fini,
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv50_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
@@ -183,10 +181,10 @@ nv50_dmaeng_sclass[] = {
{}
};
-struct nouveau_oclass *
+struct nvkm_oclass *
nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
.base.handle = NV_ENGINE(DMAOBJ, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_dmaeng_ctor,
.dtor = _nvkm_dmaeng_dtor,
.init = _nvkm_dmaeng_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
new file mode 100644
index 000000000000..44ae8a0ca65c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
@@ -0,0 +1,28 @@
+#ifndef __NVKM_DMAOBJ_PRIV_H__
+#define __NVKM_DMAOBJ_PRIV_H__
+#include <engine/dmaobj.h>
+
+#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \
+ nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
+
+int nvkm_dmaobj_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void **, u32 *,
+ int, void **);
+#define _nvkm_dmaobj_dtor nvkm_object_destroy
+#define _nvkm_dmaobj_init nvkm_object_init
+#define _nvkm_dmaobj_fini nvkm_object_fini
+
+int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+#define _nvkm_dmaeng_dtor _nvkm_engine_dtor
+#define _nvkm_dmaeng_init _nvkm_engine_init
+#define _nvkm_dmaeng_fini _nvkm_engine_fini
+
+struct nvkm_dmaeng_impl {
+ struct nvkm_oclass base;
+ struct nvkm_oclass *sclass;
+ int (*bind)(struct nvkm_dmaobj *, struct nvkm_object *,
+ struct nvkm_gpuobj **);
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 2914646c8709..30958c19e61d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -19,14 +19,15 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include <engine/falcon.h>
+
+#include <core/device.h>
#include <subdev/timer.h>
void
-nouveau_falcon_intr(struct nouveau_subdev *subdev)
+nvkm_falcon_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_falcon *falcon = (void *)subdev;
+ struct nvkm_falcon *falcon = (void *)subdev;
u32 dispatch = nv_ro32(falcon, 0x01c);
u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
@@ -43,16 +44,16 @@ nouveau_falcon_intr(struct nouveau_subdev *subdev)
}
u32
-_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+_nvkm_falcon_rd32(struct nvkm_object *object, u64 addr)
{
- struct nouveau_falcon *falcon = (void *)object;
+ struct nvkm_falcon *falcon = (void *)object;
return nv_rd32(falcon, falcon->addr + addr);
}
void
-_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+_nvkm_falcon_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
- struct nouveau_falcon *falcon = (void *)object;
+ struct nvkm_falcon *falcon = (void *)object;
nv_wr32(falcon, falcon->addr + addr, data);
}
@@ -67,17 +68,17 @@ vmemdup(const void *src, size_t len)
}
int
-_nouveau_falcon_init(struct nouveau_object *object)
+_nvkm_falcon_init(struct nvkm_object *object)
{
- struct nouveau_device *device = nv_device(object);
- struct nouveau_falcon *falcon = (void *)object;
+ struct nvkm_device *device = nv_device(object);
+ struct nvkm_falcon *falcon = (void *)object;
const struct firmware *fw;
char name[32] = "internal";
int ret, i;
u32 caps;
/* enable engine, and determine its capabilities */
- ret = nouveau_engine_init(&falcon->base);
+ ret = nvkm_engine_init(&falcon->base);
if (ret)
return ret;
@@ -171,9 +172,8 @@ _nouveau_falcon_init(struct nouveau_object *object)
/* ensure any "self-bootstrapping" firmware image is in vram */
if (!falcon->data.data && !falcon->core) {
- ret = nouveau_gpuobj_new(object->parent, NULL,
- falcon->code.size, 256, 0,
- &falcon->core);
+ ret = nvkm_gpuobj_new(object->parent, NULL, falcon->code.size,
+ 256, 0, &falcon->core);
if (ret) {
nv_error(falcon, "core allocation failed, %d\n", ret);
return ret;
@@ -238,12 +238,12 @@ _nouveau_falcon_init(struct nouveau_object *object)
}
int
-_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+_nvkm_falcon_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_falcon *falcon = (void *)object;
+ struct nvkm_falcon *falcon = (void *)object;
if (!suspend) {
- nouveau_gpuobj_ref(NULL, &falcon->core);
+ nvkm_gpuobj_ref(NULL, &falcon->core);
if (falcon->external) {
vfree(falcon->data.data);
vfree(falcon->code.data);
@@ -254,21 +254,20 @@ _nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
nv_wo32(falcon, 0x014, 0xffffffff);
- return nouveau_engine_fini(&falcon->base, suspend);
+ return nvkm_engine_fini(&falcon->base, suspend);
}
int
-nouveau_falcon_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 addr, bool enable,
- const char *iname, const char *fname,
- int length, void **pobject)
+nvkm_falcon_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 addr, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
{
- struct nouveau_falcon *falcon;
+ struct nvkm_falcon *falcon;
int ret;
- ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
- fname, length, pobject);
+ ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
+ fname, length, pobject);
falcon = *pobject;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
new file mode 100644
index 000000000000..c5a2d8718c5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -0,0 +1,11 @@
+nvkm-y += nvkm/engine/fifo/base.o
+nvkm-y += nvkm/engine/fifo/nv04.o
+nvkm-y += nvkm/engine/fifo/nv10.o
+nvkm-y += nvkm/engine/fifo/nv17.o
+nvkm-y += nvkm/engine/fifo/nv40.o
+nvkm-y += nvkm/engine/fifo/nv50.o
+nvkm-y += nvkm/engine/fifo/g84.o
+nvkm-y += nvkm/engine/fifo/gf100.o
+nvkm-y += nvkm/engine/fifo/gk104.o
+nvkm-y += nvkm/engine/fifo/gk20a.o
+nvkm-y += nvkm/engine/fifo/gk208.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index ac8375cf4eef..fa223f88d25e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -21,21 +21,21 @@
*
* Authors: Ben Skeggs
*/
+#include <engine/fifo.h>
#include <core/client.h>
-#include <core/object.h>
+#include <core/device.h>
#include <core/handle.h>
-#include <core/event.h>
-#include <nvif/unpack.h>
+#include <core/notify.h>
+#include <engine/dmaobj.h>
+
#include <nvif/class.h>
#include <nvif/event.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
+#include <nvif/unpack.h>
static int
-nouveau_fifo_event_ctor(struct nouveau_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
if (size == 0) {
notify->size = 0;
@@ -47,33 +47,33 @@ nouveau_fifo_event_ctor(struct nouveau_object *object, void *data, u32 size,
}
static const struct nvkm_event_func
-nouveau_fifo_event_func = {
- .ctor = nouveau_fifo_event_ctor,
+nvkm_fifo_event_func = {
+ .ctor = nvkm_fifo_event_ctor,
};
int
-nouveau_fifo_channel_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int bar, u32 addr, u32 size, u32 pushbuf,
- u64 engmask, int len, void **ptr)
+nvkm_fifo_channel_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass,
+ int bar, u32 addr, u32 size, u32 pushbuf,
+ u64 engmask, int len, void **ptr)
{
- struct nouveau_device *device = nv_device(engine);
- struct nouveau_fifo *priv = (void *)engine;
- struct nouveau_fifo_chan *chan;
- struct nouveau_dmaeng *dmaeng;
+ struct nvkm_device *device = nv_device(engine);
+ struct nvkm_fifo *priv = (void *)engine;
+ struct nvkm_fifo_chan *chan;
+ struct nvkm_dmaeng *dmaeng;
unsigned long flags;
int ret;
/* create base object class */
- ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
- engmask, len, ptr);
+ ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
+ engmask, len, ptr);
chan = *ptr;
if (ret)
return ret;
/* validate dma object representing push buffer */
- chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
if (!chan->pushdma)
return -ENOENT;
@@ -113,9 +113,9 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
}
void
-nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
+nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
{
- struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
+ struct nvkm_fifo *priv = (void *)nv_object(chan)->engine;
unsigned long flags;
if (chan->user)
@@ -125,31 +125,31 @@ nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
priv->channel[chan->chid] = NULL;
spin_unlock_irqrestore(&priv->lock, flags);
- nouveau_gpuobj_ref(NULL, &chan->pushgpu);
- nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
- nouveau_namedb_destroy(&chan->base);
+ nvkm_gpuobj_ref(NULL, &chan->pushgpu);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma);
+ nvkm_namedb_destroy(&chan->namedb);
}
void
-_nouveau_fifo_channel_dtor(struct nouveau_object *object)
+_nvkm_fifo_channel_dtor(struct nvkm_object *object)
{
- struct nouveau_fifo_chan *chan = (void *)object;
- nouveau_fifo_channel_destroy(chan);
+ struct nvkm_fifo_chan *chan = (void *)object;
+ nvkm_fifo_channel_destroy(chan);
}
int
-_nouveau_fifo_channel_map(struct nouveau_object *object, u64 *addr, u32 *size)
+_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
- struct nouveau_fifo_chan *chan = (void *)object;
+ struct nvkm_fifo_chan *chan = (void *)object;
*addr = chan->addr;
*size = chan->size;
return 0;
}
u32
-_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
+_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
{
- struct nouveau_fifo_chan *chan = (void *)object;
+ struct nvkm_fifo_chan *chan = (void *)object;
if (unlikely(!chan->user)) {
chan->user = ioremap(chan->addr, chan->size);
if (WARN_ON_ONCE(chan->user == NULL))
@@ -159,9 +159,9 @@ _nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
}
void
-_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
+_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
- struct nouveau_fifo_chan *chan = (void *)object;
+ struct nvkm_fifo_chan *chan = (void *)object;
if (unlikely(!chan->user)) {
chan->user = ioremap(chan->addr, chan->size);
if (WARN_ON_ONCE(chan->user == NULL))
@@ -171,8 +171,8 @@ _nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
}
int
-nouveau_fifo_uevent_ctor(struct nouveau_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
union {
struct nvif_notify_uevent_req none;
@@ -189,7 +189,7 @@ nouveau_fifo_uevent_ctor(struct nouveau_object *object, void *data, u32 size,
}
void
-nouveau_fifo_uevent(struct nouveau_fifo *fifo)
+nvkm_fifo_uevent(struct nvkm_fifo *fifo)
{
struct nvif_notify_uevent_rep rep = {
};
@@ -197,10 +197,10 @@ nouveau_fifo_uevent(struct nouveau_fifo *fifo)
}
int
-_nouveau_fifo_channel_ntfy(struct nouveau_object *object, u32 type,
- struct nvkm_event **event)
+_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
+ struct nvkm_event **event)
{
- struct nouveau_fifo *fifo = (void *)object->engine;
+ struct nvkm_fifo *fifo = (void *)object->engine;
switch (type) {
case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
if (nv_mclass(object) >= G82_CHANNEL_DMA) {
@@ -215,14 +215,14 @@ _nouveau_fifo_channel_ntfy(struct nouveau_object *object, u32 type,
}
static int
-nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
+nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object)
{
int engidx = nv_hclass(priv) & 0xff;
while (object && object->parent) {
if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
(nv_hclass(object->parent) & 0xff) == engidx)
- return nouveau_fifo_chan(object)->chid;
+ return nvkm_fifo_chan(object)->chid;
object = object->parent;
}
@@ -230,9 +230,9 @@ nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
}
const char *
-nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid)
+nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
{
- struct nouveau_fifo_chan *chan = NULL;
+ struct nvkm_fifo_chan *chan = NULL;
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
@@ -240,29 +240,28 @@ nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid)
chan = (void *)fifo->channel[chid];
spin_unlock_irqrestore(&fifo->lock, flags);
- return nouveau_client_name(chan);
+ return nvkm_client_name(chan);
}
void
-nouveau_fifo_destroy(struct nouveau_fifo *priv)
+nvkm_fifo_destroy(struct nvkm_fifo *priv)
{
kfree(priv->channel);
nvkm_event_fini(&priv->uevent);
nvkm_event_fini(&priv->cevent);
- nouveau_engine_destroy(&priv->base);
+ nvkm_engine_destroy(&priv->base);
}
int
-nouveau_fifo_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int min, int max, int length, void **pobject)
+nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass,
+ int min, int max, int length, void **pobject)
{
- struct nouveau_fifo *priv;
+ struct nvkm_fifo *priv;
int ret;
- ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
- "fifo", length, pobject);
+ ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
+ "fifo", length, pobject);
priv = *pobject;
if (ret)
return ret;
@@ -273,11 +272,11 @@ nouveau_fifo_create_(struct nouveau_object *parent,
if (!priv->channel)
return -ENOMEM;
- ret = nvkm_event_init(&nouveau_fifo_event_func, 1, 1, &priv->cevent);
+ ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent);
if (ret)
return ret;
- priv->chid = nouveau_fifo_chid;
+ priv->chid = nvkm_fifo_chid;
spin_lock_init(&priv->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index 1f42996b354a..a04920b3cf84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -21,48 +21,45 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "nv04.h"
-#include <core/os.h>
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
-#include <core/event.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
-#include <subdev/timer.h>
#include <subdev/bar.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-
-#include "nv04.h"
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
static int
-nv84_fifo_context_attach(struct nouveau_object *parent,
- struct nouveau_object *object)
+g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_fifo_base *base = (void *)parent->parent;
- struct nouveau_gpuobj *ectx = (void *)object;
+ struct nvkm_gpuobj *ectx = (void *)object;
u64 limit = ectx->addr + ectx->size - 1;
u64 start = ectx->addr;
u32 addr;
switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : addr = 0x0020; break;
- case NVDEV_ENGINE_VP : addr = 0x0040; break;
- case NVDEV_ENGINE_PPP :
- case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
- case NVDEV_ENGINE_BSP : addr = 0x0080; break;
- case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
- case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0020; break;
+ case NVDEV_ENGINE_VP :
+ case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
+ case NVDEV_ENGINE_MSPPP :
+ case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+ case NVDEV_ENGINE_BSP :
+ case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
+ case NVDEV_ENGINE_CIPHER:
+ case NVDEV_ENGINE_SEC : addr = 0x00a0; break;
+ case NVDEV_ENGINE_CE0 : addr = 0x00c0; break;
default:
return -EINVAL;
}
@@ -80,10 +77,10 @@ nv84_fifo_context_attach(struct nouveau_object *parent,
}
static int
-nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
- struct nouveau_object *object)
+g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
+ struct nvkm_object *object)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_fifo_priv *priv = (void *)parent->engine;
struct nv50_fifo_base *base = (void *)parent->parent;
struct nv50_fifo_chan *chan = (void *)parent;
@@ -91,14 +88,17 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
bool done;
switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
- case NVDEV_ENGINE_VP : engn = 3; addr = 0x0040; break;
- case NVDEV_ENGINE_PPP :
- case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
- case NVDEV_ENGINE_BSP : engn = 5; addr = 0x0080; break;
- case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
- case NVDEV_ENGINE_COPY0: engn = 2; addr = 0x00c0; break;
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
+ case NVDEV_ENGINE_VP :
+ case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
+ case NVDEV_ENGINE_MSPPP :
+ case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
+ case NVDEV_ENGINE_BSP :
+ case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
+ case NVDEV_ENGINE_CIPHER:
+ case NVDEV_ENGINE_SEC : engn = 4; addr = 0x00a0; break;
+ case NVDEV_ENGINE_CE0 : engn = 2; addr = 0x00c0; break;
default:
return -EINVAL;
}
@@ -109,7 +109,7 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002520, save);
if (!done) {
nv_error(priv, "channel %d [%s] unload timeout\n",
- chan->base.chid, nouveau_client_name(chan));
+ chan->base.chid, nvkm_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -125,8 +125,8 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
}
static int
-nv84_fifo_object_attach(struct nouveau_object *parent,
- struct nouveau_object *object, u32 handle)
+g84_fifo_object_attach(struct nvkm_object *parent,
+ struct nvkm_object *object, u32 handle)
{
struct nv50_fifo_chan *chan = (void *)parent;
u32 context;
@@ -141,30 +141,32 @@ nv84_fifo_object_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_SW : context |= 0x00000000; break;
case NVDEV_ENGINE_GR : context |= 0x00100000; break;
case NVDEV_ENGINE_MPEG :
- case NVDEV_ENGINE_PPP : context |= 0x00200000; break;
+ case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
case NVDEV_ENGINE_ME :
- case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
- case NVDEV_ENGINE_VP : context |= 0x00400000; break;
- case NVDEV_ENGINE_CRYPT :
+ case NVDEV_ENGINE_CE0 : context |= 0x00300000; break;
+ case NVDEV_ENGINE_VP :
+ case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
+ case NVDEV_ENGINE_CIPHER:
+ case NVDEV_ENGINE_SEC :
case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
- case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
+ case NVDEV_ENGINE_BSP :
+ case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
default:
return -EINVAL;
}
- return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+ return nvkm_ramht_insert(chan->ramht, 0, handle, context);
}
static int
-nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv03_channel_dma_v0 v0;
} *args = data;
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
int ret;
@@ -177,33 +179,36 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG) |
- (1ULL << NVDEV_ENGINE_ME) |
- (1ULL << NVDEV_ENGINE_VP) |
- (1ULL << NVDEV_ENGINE_CRYPT) |
- (1ULL << NVDEV_ENGINE_BSP) |
- (1ULL << NVDEV_ENGINE_PPP) |
- (1ULL << NVDEV_ENGINE_COPY0) |
- (1ULL << NVDEV_ENGINE_VIC), &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CIPHER) |
+ (1ULL << NVDEV_ENGINE_SEC) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_MSVLD) |
+ (1ULL << NVDEV_ENGINE_MSPDEC) |
+ (1ULL << NVDEV_ENGINE_MSPPP) |
+ (1ULL << NVDEV_ENGINE_CE0) |
+ (1ULL << NVDEV_ENGINE_VIC), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
args->v0.chid = chan->base.chid;
- ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
- &chan->ramht);
+ ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+ &chan->ramht);
if (ret)
return ret;
- nv_parent(chan)->context_attach = nv84_fifo_context_attach;
- nv_parent(chan)->context_detach = nv84_fifo_context_detach;
- nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+ nv_parent(chan)->context_attach = g84_fifo_context_attach;
+ nv_parent(chan)->context_detach = g84_fifo_context_detach;
+ nv_parent(chan)->object_attach = g84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
@@ -219,7 +224,7 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->base.node->offset >> 4));
+ (chan->ramht->gpuobj.node->offset >> 4));
nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
bar->flush(bar);
@@ -227,15 +232,14 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
}
static int
-nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv50_channel_gpfifo_v0 v0;
} *args = data;
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
u64 ioffset, ilength;
@@ -250,33 +254,36 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG) |
- (1ULL << NVDEV_ENGINE_ME) |
- (1ULL << NVDEV_ENGINE_VP) |
- (1ULL << NVDEV_ENGINE_CRYPT) |
- (1ULL << NVDEV_ENGINE_BSP) |
- (1ULL << NVDEV_ENGINE_PPP) |
- (1ULL << NVDEV_ENGINE_COPY0) |
- (1ULL << NVDEV_ENGINE_VIC), &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CIPHER) |
+ (1ULL << NVDEV_ENGINE_SEC) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_MSVLD) |
+ (1ULL << NVDEV_ENGINE_MSPDEC) |
+ (1ULL << NVDEV_ENGINE_MSPPP) |
+ (1ULL << NVDEV_ENGINE_CE0) |
+ (1ULL << NVDEV_ENGINE_VIC), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
args->v0.chid = chan->base.chid;
- ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
- &chan->ramht);
+ ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+ &chan->ramht);
if (ret)
return ret;
- nv_parent(chan)->context_attach = nv84_fifo_context_attach;
- nv_parent(chan)->context_detach = nv84_fifo_context_detach;
- nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+ nv_parent(chan)->context_attach = g84_fifo_context_attach;
+ nv_parent(chan)->context_detach = g84_fifo_context_detach;
+ nv_parent(chan)->object_attach = g84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
ioffset = args->v0.ioffset;
@@ -292,7 +299,7 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->base.node->offset >> 4));
+ (chan->ramht->gpuobj.node->offset >> 4));
nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
bar->flush(bar);
@@ -300,16 +307,16 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
}
static int
-nv84_fifo_chan_init(struct nouveau_object *object)
+g84_fifo_chan_init(struct nvkm_object *object)
{
struct nv50_fifo_priv *priv = (void *)object->engine;
struct nv50_fifo_base *base = (void *)object->parent;
struct nv50_fifo_chan *chan = (void *)object;
- struct nouveau_gpuobj *ramfc = base->ramfc;
+ struct nvkm_gpuobj *ramfc = base->ramfc;
u32 chid = chan->base.chid;
int ret;
- ret = nouveau_fifo_channel_init(&chan->base);
+ ret = nvkm_fifo_channel_init(&chan->base);
if (ret)
return ret;
@@ -318,34 +325,34 @@ nv84_fifo_chan_init(struct nouveau_object *object)
return 0;
}
-static struct nouveau_ofuncs
-nv84_fifo_ofuncs_dma = {
- .ctor = nv84_fifo_chan_ctor_dma,
+static struct nvkm_ofuncs
+g84_fifo_ofuncs_dma = {
+ .ctor = g84_fifo_chan_ctor_dma,
.dtor = nv50_fifo_chan_dtor,
- .init = nv84_fifo_chan_init,
+ .init = g84_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_ofuncs
-nv84_fifo_ofuncs_ind = {
- .ctor = nv84_fifo_chan_ctor_ind,
+static struct nvkm_ofuncs
+g84_fifo_ofuncs_ind = {
+ .ctor = g84_fifo_chan_ctor_ind,
.dtor = nv50_fifo_chan_dtor,
- .init = nv84_fifo_chan_init,
+ .init = g84_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_oclass
-nv84_fifo_sclass[] = {
- { G82_CHANNEL_DMA, &nv84_fifo_ofuncs_dma },
- { G82_CHANNEL_GPFIFO, &nv84_fifo_ofuncs_ind },
+static struct nvkm_oclass
+g84_fifo_sclass[] = {
+ { G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
+ { G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
{}
};
@@ -354,57 +361,56 @@ nv84_fifo_sclass[] = {
******************************************************************************/
static int
-nv84_fifo_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_fifo_base *base;
int ret;
- ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
- 0x1000, NVOBJ_FLAG_HEAP, &base);
+ ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+ ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
- 0, &base->pgd);
+ ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
+ 0, &base->pgd);
if (ret)
return ret;
- ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
- 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
+ ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
+ 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
- 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+ ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
+ 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
if (ret)
return ret;
return 0;
}
-static struct nouveau_oclass
-nv84_fifo_cclass = {
+static struct nvkm_oclass
+g84_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_fifo_context_ctor,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g84_fifo_context_ctor,
.dtor = nv50_fifo_context_dtor,
- .init = _nouveau_fifo_context_init,
- .fini = _nouveau_fifo_context_fini,
- .rd32 = _nouveau_fifo_context_rd32,
- .wr32 = _nouveau_fifo_context_wr32,
+ .init = _nvkm_fifo_context_init,
+ .fini = _nvkm_fifo_context_fini,
+ .rd32 = _nvkm_fifo_context_rd32,
+ .wr32 = _nvkm_fifo_context_wr32,
},
};
@@ -413,69 +419,69 @@ nv84_fifo_cclass = {
******************************************************************************/
static void
-nv84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+g84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
nv_mask(fifo, 0x002140, 0x40000000, 0x40000000);
}
static void
-nv84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
+g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
nv_mask(fifo, 0x002140, 0x40000000, 0x00000000);
}
static const struct nvkm_event_func
-nv84_fifo_uevent_func = {
- .ctor = nouveau_fifo_uevent_ctor,
- .init = nv84_fifo_uevent_init,
- .fini = nv84_fifo_uevent_fini,
+g84_fifo_uevent_func = {
+ .ctor = nvkm_fifo_uevent_ctor,
+ .init = g84_fifo_uevent_init,
+ .fini = g84_fifo_uevent_fini,
};
static int
-nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_fifo_priv *priv;
int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+ ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
- &priv->playlist[0]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[0]);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
- &priv->playlist[1]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[1]);
if (ret)
return ret;
- ret = nvkm_event_init(&nv84_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &priv->base.uevent);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
- nv_engine(priv)->cclass = &nv84_fifo_cclass;
- nv_engine(priv)->sclass = nv84_fifo_sclass;
+ nv_engine(priv)->cclass = &g84_fifo_cclass;
+ nv_engine(priv)->sclass = g84_fifo_sclass;
priv->base.pause = nv04_fifo_pause;
priv->base.start = nv04_fifo_start;
return 0;
}
-struct nouveau_oclass *
-nv84_fifo_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+g84_fifo_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(FIFO, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_fifo_ctor,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g84_fifo_ctor,
.dtor = nv50_fifo_dtor,
.init = nv50_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .fini = _nvkm_fifo_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 074d434c3077..b745252f2261 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -21,52 +21,47 @@
*
* Authors: Ben Skeggs
*/
+#include <engine/fifo.h>
#include <core/client.h>
-#include <core/handle.h>
-#include <core/namedb.h>
-#include <core/gpuobj.h>
#include <core/engctx.h>
-#include <core/event.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
#include <core/enum.h>
-
-#include <subdev/timer.h>
+#include <core/handle.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
-#include <subdev/vm.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
+#include <nvif/class.h>
+#include <nvif/unpack.h>
-struct nvc0_fifo_priv {
- struct nouveau_fifo base;
+struct gf100_fifo_priv {
+ struct nvkm_fifo base;
struct work_struct fault;
u64 mask;
struct {
- struct nouveau_gpuobj *mem[2];
+ struct nvkm_gpuobj *mem[2];
int active;
wait_queue_head_t wait;
} runlist;
struct {
- struct nouveau_gpuobj *mem;
- struct nouveau_vma bar;
+ struct nvkm_gpuobj *mem;
+ struct nvkm_vma bar;
} user;
int spoon_nr;
};
-struct nvc0_fifo_base {
- struct nouveau_fifo_base base;
- struct nouveau_gpuobj *pgd;
- struct nouveau_vm *vm;
+struct gf100_fifo_base {
+ struct nvkm_fifo_base base;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
};
-struct nvc0_fifo_chan {
- struct nouveau_fifo_chan base;
+struct gf100_fifo_chan {
+ struct nvkm_fifo_chan base;
enum {
STOPPED,
RUNNING,
@@ -79,10 +74,10 @@ struct nvc0_fifo_chan {
******************************************************************************/
static void
-nvc0_fifo_runlist_update(struct nvc0_fifo_priv *priv)
+gf100_fifo_runlist_update(struct gf100_fifo_priv *priv)
{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_gpuobj *cur;
+ struct nvkm_bar *bar = nvkm_bar(priv);
+ struct nvkm_gpuobj *cur;
int i, p;
mutex_lock(&nv_subdev(priv)->mutex);
@@ -90,7 +85,7 @@ nvc0_fifo_runlist_update(struct nvc0_fifo_priv *priv)
priv->runlist.active = !priv->runlist.active;
for (i = 0, p = 0; i < 128; i++) {
- struct nvc0_fifo_chan *chan = (void *)priv->base.channel[i];
+ struct gf100_fifo_chan *chan = (void *)priv->base.channel[i];
if (chan && chan->state == RUNNING) {
nv_wo32(cur, p + 0, i);
nv_wo32(cur, p + 4, 0x00000004);
@@ -110,30 +105,30 @@ nvc0_fifo_runlist_update(struct nvc0_fifo_priv *priv)
}
static int
-nvc0_fifo_context_attach(struct nouveau_object *parent,
- struct nouveau_object *object)
+gf100_fifo_context_attach(struct nvkm_object *parent,
+ struct nvkm_object *object)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
- struct nvc0_fifo_base *base = (void *)parent->parent;
- struct nouveau_engctx *ectx = (void *)object;
+ struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gf100_fifo_base *base = (void *)parent->parent;
+ struct nvkm_engctx *ectx = (void *)object;
u32 addr;
int ret;
switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
- case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
- case NVDEV_ENGINE_BSP : addr = 0x0270; break;
- case NVDEV_ENGINE_VP : addr = 0x0250; break;
- case NVDEV_ENGINE_PPP : addr = 0x0260; break;
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
+ case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
+ case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
+ case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
+ case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
default:
return -EINVAL;
}
if (!ectx->vma.node) {
- ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
- NV_MEM_ACCESS_RW, &ectx->vma);
+ ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+ NV_MEM_ACCESS_RW, &ectx->vma);
if (ret)
return ret;
@@ -147,23 +142,23 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
}
static int
-nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
- struct nouveau_object *object)
+gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
+ struct nvkm_object *object)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
- struct nvc0_fifo_priv *priv = (void *)parent->engine;
- struct nvc0_fifo_base *base = (void *)parent->parent;
- struct nvc0_fifo_chan *chan = (void *)parent;
+ struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gf100_fifo_priv *priv = (void *)parent->engine;
+ struct gf100_fifo_base *base = (void *)parent->parent;
+ struct gf100_fifo_chan *chan = (void *)parent;
u32 addr;
switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
- case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
- case NVDEV_ENGINE_BSP : addr = 0x0270; break;
- case NVDEV_ENGINE_VP : addr = 0x0250; break;
- case NVDEV_ENGINE_PPP : addr = 0x0260; break;
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
+ case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
+ case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
+ case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
+ case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -171,7 +166,7 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d [%s] kick timeout\n",
- chan->base.chid, nouveau_client_name(chan));
+ chan->base.chid, nvkm_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -183,18 +178,17 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
}
static int
-nvc0_fifo_chan_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv50_channel_gpfifo_v0 v0;
} *args = data;
- struct nouveau_bar *bar = nouveau_bar(parent);
- struct nvc0_fifo_priv *priv = (void *)engine;
- struct nvc0_fifo_base *base = (void *)parent;
- struct nvc0_fifo_chan *chan;
+ struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gf100_fifo_priv *priv = (void *)engine;
+ struct gf100_fifo_base *base = (void *)parent;
+ struct gf100_fifo_chan *chan;
u64 usermem, ioffset, ilength;
int ret, i;
@@ -207,24 +201,24 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
- priv->user.bar.offset, 0x1000,
- args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_COPY0) |
- (1ULL << NVDEV_ENGINE_COPY1) |
- (1ULL << NVDEV_ENGINE_BSP) |
- (1ULL << NVDEV_ENGINE_VP) |
- (1ULL << NVDEV_ENGINE_PPP), &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
+ priv->user.bar.offset, 0x1000,
+ args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_CE0) |
+ (1ULL << NVDEV_ENGINE_CE1) |
+ (1ULL << NVDEV_ENGINE_MSVLD) |
+ (1ULL << NVDEV_ENGINE_MSPDEC) |
+ (1ULL << NVDEV_ENGINE_MSPPP), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
args->v0.chid = chan->base.chid;
- nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
- nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
+ nv_parent(chan)->context_attach = gf100_fifo_context_attach;
+ nv_parent(chan)->context_detach = gf100_fifo_context_detach;
usermem = chan->base.chid * 0x1000;
ioffset = args->v0.ioffset;
@@ -254,15 +248,15 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
}
static int
-nvc0_fifo_chan_init(struct nouveau_object *object)
+gf100_fifo_chan_init(struct nvkm_object *object)
{
- struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
- struct nvc0_fifo_priv *priv = (void *)object->engine;
- struct nvc0_fifo_chan *chan = (void *)object;
+ struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
+ struct gf100_fifo_priv *priv = (void *)object->engine;
+ struct gf100_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
int ret;
- ret = nouveau_fifo_channel_init(&chan->base);
+ ret = nvkm_fifo_channel_init(&chan->base);
if (ret)
return ret;
@@ -270,47 +264,47 @@ nvc0_fifo_chan_init(struct nouveau_object *object)
if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
- nvc0_fifo_runlist_update(priv);
+ gf100_fifo_runlist_update(priv);
}
return 0;
}
-static void nvc0_fifo_intr_engine(struct nvc0_fifo_priv *priv);
+static void gf100_fifo_intr_engine(struct gf100_fifo_priv *priv);
static int
-nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
- struct nvc0_fifo_priv *priv = (void *)object->engine;
- struct nvc0_fifo_chan *chan = (void *)object;
+ struct gf100_fifo_priv *priv = (void *)object->engine;
+ struct gf100_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
- nvc0_fifo_runlist_update(priv);
+ gf100_fifo_runlist_update(priv);
}
- nvc0_fifo_intr_engine(priv);
+ gf100_fifo_intr_engine(priv);
nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
- return nouveau_fifo_channel_fini(&chan->base, suspend);
+ return nvkm_fifo_channel_fini(&chan->base, suspend);
}
-static struct nouveau_ofuncs
-nvc0_fifo_ofuncs = {
- .ctor = nvc0_fifo_chan_ctor,
- .dtor = _nouveau_fifo_channel_dtor,
- .init = nvc0_fifo_chan_init,
- .fini = nvc0_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+static struct nvkm_ofuncs
+gf100_fifo_ofuncs = {
+ .ctor = gf100_fifo_chan_ctor,
+ .dtor = _nvkm_fifo_channel_dtor,
+ .init = gf100_fifo_chan_init,
+ .fini = gf100_fifo_chan_fini,
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_oclass
-nvc0_fifo_sclass[] = {
- { FERMI_CHANNEL_GPFIFO, &nvc0_fifo_ofuncs },
+static struct nvkm_oclass
+gf100_fifo_sclass[] = {
+ { FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
{}
};
@@ -319,23 +313,22 @@ nvc0_fifo_sclass[] = {
******************************************************************************/
static int
-nvc0_fifo_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_fifo_base *base;
+ struct gf100_fifo_base *base;
int ret;
- ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_HEAP, &base);
+ ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
- &base->pgd);
+ ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
+ &base->pgd);
if (ret)
return ret;
@@ -344,7 +337,7 @@ nvc0_fifo_context_ctor(struct nouveau_object *parent,
nv_wo32(base, 0x0208, 0xffffffff);
nv_wo32(base, 0x020c, 0x000000ff);
- ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
if (ret)
return ret;
@@ -352,24 +345,24 @@ nvc0_fifo_context_ctor(struct nouveau_object *parent,
}
static void
-nvc0_fifo_context_dtor(struct nouveau_object *object)
+gf100_fifo_context_dtor(struct nvkm_object *object)
{
- struct nvc0_fifo_base *base = (void *)object;
- nouveau_vm_ref(NULL, &base->vm, base->pgd);
- nouveau_gpuobj_ref(NULL, &base->pgd);
- nouveau_fifo_context_destroy(&base->base);
+ struct gf100_fifo_base *base = (void *)object;
+ nvkm_vm_ref(NULL, &base->vm, base->pgd);
+ nvkm_gpuobj_ref(NULL, &base->pgd);
+ nvkm_fifo_context_destroy(&base->base);
}
-static struct nouveau_oclass
-nvc0_fifo_cclass = {
+static struct nvkm_oclass
+gf100_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_fifo_context_ctor,
- .dtor = nvc0_fifo_context_dtor,
- .init = _nouveau_fifo_context_init,
- .fini = _nouveau_fifo_context_fini,
- .rd32 = _nouveau_fifo_context_rd32,
- .wr32 = _nouveau_fifo_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_fifo_context_ctor,
+ .dtor = gf100_fifo_context_dtor,
+ .init = _nvkm_fifo_context_init,
+ .fini = _nvkm_fifo_context_fini,
+ .rd32 = _nvkm_fifo_context_rd32,
+ .wr32 = _nvkm_fifo_context_wr32,
},
};
@@ -378,15 +371,15 @@ nvc0_fifo_cclass = {
******************************************************************************/
static inline int
-nvc0_fifo_engidx(struct nvc0_fifo_priv *priv, u32 engn)
+gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
{
switch (engn) {
- case NVDEV_ENGINE_GR : engn = 0; break;
- case NVDEV_ENGINE_BSP : engn = 1; break;
- case NVDEV_ENGINE_PPP : engn = 2; break;
- case NVDEV_ENGINE_VP : engn = 3; break;
- case NVDEV_ENGINE_COPY0: engn = 4; break;
- case NVDEV_ENGINE_COPY1: engn = 5; break;
+ case NVDEV_ENGINE_GR : engn = 0; break;
+ case NVDEV_ENGINE_MSVLD : engn = 1; break;
+ case NVDEV_ENGINE_MSPPP : engn = 2; break;
+ case NVDEV_ENGINE_MSPDEC: engn = 3; break;
+ case NVDEV_ENGINE_CE0 : engn = 4; break;
+ case NVDEV_ENGINE_CE1 : engn = 5; break;
default:
return -1;
}
@@ -394,28 +387,28 @@ nvc0_fifo_engidx(struct nvc0_fifo_priv *priv, u32 engn)
return engn;
}
-static inline struct nouveau_engine *
-nvc0_fifo_engine(struct nvc0_fifo_priv *priv, u32 engn)
+static inline struct nvkm_engine *
+gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn)
{
switch (engn) {
case 0: engn = NVDEV_ENGINE_GR; break;
- case 1: engn = NVDEV_ENGINE_BSP; break;
- case 2: engn = NVDEV_ENGINE_PPP; break;
- case 3: engn = NVDEV_ENGINE_VP; break;
- case 4: engn = NVDEV_ENGINE_COPY0; break;
- case 5: engn = NVDEV_ENGINE_COPY1; break;
+ case 1: engn = NVDEV_ENGINE_MSVLD; break;
+ case 2: engn = NVDEV_ENGINE_MSPPP; break;
+ case 3: engn = NVDEV_ENGINE_MSPDEC; break;
+ case 4: engn = NVDEV_ENGINE_CE0; break;
+ case 5: engn = NVDEV_ENGINE_CE1; break;
default:
return NULL;
}
- return nouveau_engine(priv, engn);
+ return nvkm_engine(priv, engn);
}
static void
-nvc0_fifo_recover_work(struct work_struct *work)
+gf100_fifo_recover_work(struct work_struct *work)
{
- struct nvc0_fifo_priv *priv = container_of(work, typeof(*priv), fault);
- struct nouveau_object *engine;
+ struct gf100_fifo_priv *priv = container_of(work, typeof(*priv), fault);
+ struct nvkm_object *engine;
unsigned long flags;
u32 engn, engm = 0;
u64 mask, todo;
@@ -426,26 +419,25 @@ nvc0_fifo_recover_work(struct work_struct *work)
spin_unlock_irqrestore(&priv->base.lock, flags);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
- engm |= 1 << nvc0_fifo_engidx(priv, engn);
+ engm |= 1 << gf100_fifo_engidx(priv, engn);
nv_mask(priv, 0x002630, engm, engm);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
- if ((engine = (void *)nouveau_engine(priv, engn))) {
+ if ((engine = (void *)nvkm_engine(priv, engn))) {
nv_ofuncs(engine)->fini(engine, false);
WARN_ON(nv_ofuncs(engine)->init(engine));
}
}
- nvc0_fifo_runlist_update(priv);
+ gf100_fifo_runlist_update(priv);
nv_wr32(priv, 0x00262c, engm);
nv_mask(priv, 0x002630, engm, 0x00000000);
}
static void
-nvc0_fifo_recover(struct nvc0_fifo_priv *priv, struct nouveau_engine *engine,
- struct nvc0_fifo_chan *chan)
+gf100_fifo_recover(struct gf100_fifo_priv *priv, struct nvkm_engine *engine,
+ struct gf100_fifo_chan *chan)
{
- struct nouveau_object *engobj = nv_object(engine);
u32 chid = chan->base.chid;
unsigned long flags;
@@ -456,16 +448,16 @@ nvc0_fifo_recover(struct nvc0_fifo_priv *priv, struct nouveau_engine *engine,
chan->state = KILLED;
spin_lock_irqsave(&priv->base.lock, flags);
- priv->mask |= 1ULL << nv_engidx(engobj);
+ priv->mask |= 1ULL << nv_engidx(engine);
spin_unlock_irqrestore(&priv->base.lock, flags);
schedule_work(&priv->fault);
}
static int
-nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
{
- struct nvc0_fifo_chan *chan = NULL;
- struct nouveau_handle *bind;
+ struct gf100_fifo_chan *chan = NULL;
+ struct nvkm_handle *bind;
unsigned long flags;
int ret = -EINVAL;
@@ -475,11 +467,11 @@ nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
if (unlikely(!chan))
goto out;
- bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+ bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
if (likely(bind)) {
if (!mthd || !nv_call(bind->object, mthd, data))
ret = 0;
- nouveau_namedb_put(bind);
+ nvkm_namedb_put(bind);
}
out:
@@ -487,17 +479,17 @@ out:
return ret;
}
-static const struct nouveau_enum
-nvc0_fifo_sched_reason[] = {
+static const struct nvkm_enum
+gf100_fifo_sched_reason[] = {
{ 0x0a, "CTXSW_TIMEOUT" },
{}
};
static void
-nvc0_fifo_intr_sched_ctxsw(struct nvc0_fifo_priv *priv)
+gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
{
- struct nouveau_engine *engine;
- struct nvc0_fifo_chan *chan;
+ struct nvkm_engine *engine;
+ struct gf100_fifo_chan *chan;
u32 engn;
for (engn = 0; engn < 6; engn++) {
@@ -512,22 +504,22 @@ nvc0_fifo_intr_sched_ctxsw(struct nvc0_fifo_priv *priv)
if (busy && unk0 && unk1) {
if (!(chan = (void *)priv->base.channel[chid]))
continue;
- if (!(engine = nvc0_fifo_engine(priv, engn)))
+ if (!(engine = gf100_fifo_engine(priv, engn)))
continue;
- nvc0_fifo_recover(priv, engine, chan);
+ gf100_fifo_recover(priv, engine, chan);
}
}
}
static void
-nvc0_fifo_intr_sched(struct nvc0_fifo_priv *priv)
+gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
{
u32 intr = nv_rd32(priv, 0x00254c);
u32 code = intr & 0x000000ff;
- const struct nouveau_enum *en;
+ const struct nvkm_enum *en;
char enunk[6] = "";
- en = nouveau_enum_find(nvc0_fifo_sched_reason, code);
+ en = nvkm_enum_find(gf100_fifo_sched_reason, code);
if (!en)
snprintf(enunk, sizeof(enunk), "UNK%02x", code);
@@ -535,32 +527,32 @@ nvc0_fifo_intr_sched(struct nvc0_fifo_priv *priv)
switch (code) {
case 0x0a:
- nvc0_fifo_intr_sched_ctxsw(priv);
+ gf100_fifo_intr_sched_ctxsw(priv);
break;
default:
break;
}
}
-static const struct nouveau_enum
-nvc0_fifo_fault_engine[] = {
+static const struct nvkm_enum
+gf100_fifo_fault_engine[] = {
{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
- { 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP },
- { 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP },
+ { 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
+ { 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
{ 0x13, "PCOUNTER" },
- { 0x14, "PVP", NULL, NVDEV_ENGINE_VP },
- { 0x15, "PCOPY0", NULL, NVDEV_ENGINE_COPY0 },
- { 0x16, "PCOPY1", NULL, NVDEV_ENGINE_COPY1 },
+ { 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
+ { 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
+ { 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
{ 0x17, "PDAEMON" },
{}
};
-static const struct nouveau_enum
-nvc0_fifo_fault_reason[] = {
+static const struct nvkm_enum
+gf100_fifo_fault_reason[] = {
{ 0x00, "PT_NOT_PRESENT" },
{ 0x01, "PT_TOO_SHORT" },
{ 0x02, "PAGE_NOT_PRESENT" },
@@ -573,8 +565,8 @@ nvc0_fifo_fault_reason[] = {
{}
};
-static const struct nouveau_enum
-nvc0_fifo_fault_hubclient[] = {
+static const struct nvkm_enum
+gf100_fifo_fault_hubclient[] = {
{ 0x01, "PCOPY0" },
{ 0x02, "PCOPY1" },
{ 0x04, "DISPATCH" },
@@ -583,8 +575,8 @@ nvc0_fifo_fault_hubclient[] = {
{ 0x07, "BAR_READ" },
{ 0x08, "BAR_WRITE" },
{ 0x0b, "PVP" },
- { 0x0c, "PPPP" },
- { 0x0d, "PBSP" },
+ { 0x0c, "PMSPPP" },
+ { 0x0d, "PMSVLD" },
{ 0x11, "PCOUNTER" },
{ 0x12, "PDAEMON" },
{ 0x14, "CCACHE" },
@@ -592,8 +584,8 @@ nvc0_fifo_fault_hubclient[] = {
{}
};
-static const struct nouveau_enum
-nvc0_fifo_fault_gpcclient[] = {
+static const struct nvkm_enum
+gf100_fifo_fault_gpcclient[] = {
{ 0x01, "TEX" },
{ 0x0c, "ESETUP" },
{ 0x0e, "CTXCTL" },
@@ -602,7 +594,7 @@ nvc0_fifo_fault_gpcclient[] = {
};
static void
-nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
+gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit)
{
u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
@@ -613,19 +605,19 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
u32 write = (stat & 0x00000080);
u32 hub = (stat & 0x00000040);
u32 reason = (stat & 0x0000000f);
- struct nouveau_object *engctx = NULL, *object;
- struct nouveau_engine *engine = NULL;
- const struct nouveau_enum *er, *eu, *ec;
+ struct nvkm_object *engctx = NULL, *object;
+ struct nvkm_engine *engine = NULL;
+ const struct nvkm_enum *er, *eu, *ec;
char erunk[6] = "";
char euunk[6] = "";
char ecunk[6] = "";
char gpcid[3] = "";
- er = nouveau_enum_find(nvc0_fifo_fault_reason, reason);
+ er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
if (!er)
snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
- eu = nouveau_enum_find(nvc0_fifo_fault_engine, unit);
+ eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
if (eu) {
switch (eu->data2) {
case NVDEV_SUBDEV_BAR:
@@ -638,9 +630,9 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
break;
default:
- engine = nouveau_engine(priv, eu->data2);
+ engine = nvkm_engine(priv, eu->data2);
if (engine)
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
break;
}
} else {
@@ -648,9 +640,9 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
}
if (hub) {
- ec = nouveau_enum_find(nvc0_fifo_fault_hubclient, client);
+ ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
} else {
- ec = nouveau_enum_find(nvc0_fifo_fault_gpcclient, client);
+ ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
snprintf(gpcid, sizeof(gpcid), "%d", gpc);
}
@@ -662,23 +654,23 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
(u64)vahi << 32 | valo, er ? er->name : erunk,
eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
ec ? ec->name : ecunk, (u64)inst << 12,
- nouveau_client_name(engctx));
+ nvkm_client_name(engctx));
object = engctx;
while (object) {
switch (nv_mclass(object)) {
case FERMI_CHANNEL_GPFIFO:
- nvc0_fifo_recover(priv, engine, (void *)object);
+ gf100_fifo_recover(priv, engine, (void *)object);
break;
}
object = object->parent;
}
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
-static const struct nouveau_bitfield
-nvc0_fifo_pbdma_intr[] = {
+static const struct nvkm_bitfield
+gf100_fifo_pbdma_intr[] = {
/* { 0x00008000, "" } seen with null ib push */
{ 0x00200000, "ILLEGAL_MTHD" },
{ 0x00800000, "EMPTY_SUBC" },
@@ -686,7 +678,7 @@ nvc0_fifo_pbdma_intr[] = {
};
static void
-nvc0_fifo_intr_pbdma(struct nvc0_fifo_priv *priv, int unit)
+gf100_fifo_intr_pbdma(struct gf100_fifo_priv *priv, int unit)
{
u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
@@ -697,18 +689,18 @@ nvc0_fifo_intr_pbdma(struct nvc0_fifo_priv *priv, int unit)
u32 show = stat;
if (stat & 0x00800000) {
- if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
+ if (!gf100_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
}
if (show) {
nv_error(priv, "PBDMA%d:", unit);
- nouveau_bitfield_print(nvc0_fifo_pbdma_intr, show);
+ nvkm_bitfield_print(gf100_fifo_pbdma_intr, show);
pr_cont("\n");
nv_error(priv,
"PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
unit, chid,
- nouveau_client_name_for_fifo_chid(&priv->base, chid),
+ nvkm_client_name_for_fifo_chid(&priv->base, chid),
subc, mthd, data);
}
@@ -717,7 +709,7 @@ nvc0_fifo_intr_pbdma(struct nvc0_fifo_priv *priv, int unit)
}
static void
-nvc0_fifo_intr_runlist(struct nvc0_fifo_priv *priv)
+gf100_fifo_intr_runlist(struct gf100_fifo_priv *priv)
{
u32 intr = nv_rd32(priv, 0x002a00);
@@ -734,7 +726,7 @@ nvc0_fifo_intr_runlist(struct nvc0_fifo_priv *priv)
}
static void
-nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
+gf100_fifo_intr_engine_unit(struct gf100_fifo_priv *priv, int engn)
{
u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04));
u32 inte = nv_rd32(priv, 0x002628);
@@ -745,7 +737,7 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) {
- nouveau_fifo_uevent(&priv->base);
+ nvkm_fifo_uevent(&priv->base);
ints &= ~1;
}
if (ints) {
@@ -756,20 +748,20 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
}
static void
-nvc0_fifo_intr_engine(struct nvc0_fifo_priv *priv)
+gf100_fifo_intr_engine(struct gf100_fifo_priv *priv)
{
u32 mask = nv_rd32(priv, 0x0025a4);
while (mask) {
u32 unit = __ffs(mask);
- nvc0_fifo_intr_engine_unit(priv, unit);
+ gf100_fifo_intr_engine_unit(priv, unit);
mask &= ~(1 << unit);
}
}
static void
-nvc0_fifo_intr(struct nouveau_subdev *subdev)
+gf100_fifo_intr(struct nvkm_subdev *subdev)
{
- struct nvc0_fifo_priv *priv = (void *)subdev;
+ struct gf100_fifo_priv *priv = (void *)subdev;
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
@@ -781,7 +773,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000100) {
- nvc0_fifo_intr_sched(priv);
+ gf100_fifo_intr_sched(priv);
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
@@ -804,7 +796,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x00259c);
while (mask) {
u32 unit = __ffs(mask);
- nvc0_fifo_intr_fault(priv, unit);
+ gf100_fifo_intr_fault(priv, unit);
nv_wr32(priv, 0x00259c, (1 << unit));
mask &= ~(1 << unit);
}
@@ -815,7 +807,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x0025a0);
while (mask) {
u32 unit = __ffs(mask);
- nvc0_fifo_intr_pbdma(priv, unit);
+ gf100_fifo_intr_pbdma(priv, unit);
nv_wr32(priv, 0x0025a0, (1 << unit));
mask &= ~(1 << unit);
}
@@ -823,12 +815,12 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x40000000) {
- nvc0_fifo_intr_runlist(priv);
+ gf100_fifo_intr_runlist(priv);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
- nvc0_fifo_intr_engine(priv);
+ gf100_fifo_intr_engine(priv);
stat &= ~0x80000000;
}
@@ -840,94 +832,94 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
static void
-nvc0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
}
static void
-nvc0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
+gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
}
static const struct nvkm_event_func
-nvc0_fifo_uevent_func = {
- .ctor = nouveau_fifo_uevent_ctor,
- .init = nvc0_fifo_uevent_init,
- .fini = nvc0_fifo_uevent_fini,
+gf100_fifo_uevent_func = {
+ .ctor = nvkm_fifo_uevent_ctor,
+ .init = gf100_fifo_uevent_init,
+ .fini = gf100_fifo_uevent_fini,
};
static int
-nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_fifo_priv *priv;
+ struct gf100_fifo_priv *priv;
int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
+ ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- INIT_WORK(&priv->fault, nvc0_fifo_recover_work);
+ INIT_WORK(&priv->fault, gf100_fifo_recover_work);
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
- &priv->runlist.mem[0]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
+ &priv->runlist.mem[0]);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
- &priv->runlist.mem[1]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
+ &priv->runlist.mem[1]);
if (ret)
return ret;
init_waitqueue_head(&priv->runlist.wait);
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
- &priv->user.mem);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
+ &priv->user.mem);
if (ret)
return ret;
- ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
- &priv->user.bar);
+ ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+ &priv->user.bar);
if (ret)
return ret;
- ret = nvkm_event_init(&nvc0_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &priv->base.uevent);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nvc0_fifo_intr;
- nv_engine(priv)->cclass = &nvc0_fifo_cclass;
- nv_engine(priv)->sclass = nvc0_fifo_sclass;
+ nv_subdev(priv)->intr = gf100_fifo_intr;
+ nv_engine(priv)->cclass = &gf100_fifo_cclass;
+ nv_engine(priv)->sclass = gf100_fifo_sclass;
return 0;
}
static void
-nvc0_fifo_dtor(struct nouveau_object *object)
+gf100_fifo_dtor(struct nvkm_object *object)
{
- struct nvc0_fifo_priv *priv = (void *)object;
+ struct gf100_fifo_priv *priv = (void *)object;
- nouveau_gpuobj_unmap(&priv->user.bar);
- nouveau_gpuobj_ref(NULL, &priv->user.mem);
- nouveau_gpuobj_ref(NULL, &priv->runlist.mem[0]);
- nouveau_gpuobj_ref(NULL, &priv->runlist.mem[1]);
+ nvkm_gpuobj_unmap(&priv->user.bar);
+ nvkm_gpuobj_ref(NULL, &priv->user.mem);
+ nvkm_gpuobj_ref(NULL, &priv->runlist.mem[0]);
+ nvkm_gpuobj_ref(NULL, &priv->runlist.mem[1]);
- nouveau_fifo_destroy(&priv->base);
+ nvkm_fifo_destroy(&priv->base);
}
static int
-nvc0_fifo_init(struct nouveau_object *object)
+gf100_fifo_init(struct nvkm_object *object)
{
- struct nvc0_fifo_priv *priv = (void *)object;
+ struct gf100_fifo_priv *priv = (void *)object;
int ret, i;
- ret = nouveau_fifo_init(&priv->base);
+ ret = nvkm_fifo_init(&priv->base);
if (ret)
return ret;
@@ -941,8 +933,8 @@ nvc0_fifo_init(struct nouveau_object *object)
if (priv->spoon_nr >= 3) {
nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
- nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
- nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
+ nv_wr32(priv, 0x002210, ~(1 << 1)); /* PMSPP */
+ nv_wr32(priv, 0x002214, ~(1 << 1)); /* PMSVLD */
nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
}
@@ -963,13 +955,13 @@ nvc0_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
-nvc0_fifo_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+gf100_fifo_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(FIFO, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_fifo_ctor,
- .dtor = nvc0_fifo_dtor,
- .init = nvc0_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_fifo_ctor,
+ .dtor = gf100_fifo_dtor,
+ .init = gf100_fifo_init,
+ .fini = _nvkm_fifo_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 6a8db7c80bd1..9585539e59f2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -21,25 +21,19 @@
*
* Authors: Ben Skeggs
*/
+#include "gk104.h"
#include <core/client.h>
-#include <core/handle.h>
-#include <core/namedb.h>
-#include <core/gpuobj.h>
#include <core/engctx.h>
-#include <core/event.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
#include <core/enum.h>
-
-#include <subdev/timer.h>
+#include <core/handle.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
-#include <subdev/vm.h>
-
-#include <engine/dmaobj.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
-#include "nve0.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static const struct {
@@ -47,45 +41,45 @@ static const struct {
u64 mask;
} fifo_engine[] = {
_(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_COPY2)),
- _(NVDEV_ENGINE_VP , 0),
- _(NVDEV_ENGINE_PPP , 0),
- _(NVDEV_ENGINE_BSP , 0),
- _(NVDEV_ENGINE_COPY0 , 0),
- _(NVDEV_ENGINE_COPY1 , 0),
- _(NVDEV_ENGINE_VENC , 0),
+ (1ULL << NVDEV_ENGINE_CE2)),
+ _(NVDEV_ENGINE_MSPDEC , 0),
+ _(NVDEV_ENGINE_MSPPP , 0),
+ _(NVDEV_ENGINE_MSVLD , 0),
+ _(NVDEV_ENGINE_CE0 , 0),
+ _(NVDEV_ENGINE_CE1 , 0),
+ _(NVDEV_ENGINE_MSENC , 0),
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
-struct nve0_fifo_engn {
- struct nouveau_gpuobj *runlist[2];
+struct gk104_fifo_engn {
+ struct nvkm_gpuobj *runlist[2];
int cur_runlist;
wait_queue_head_t wait;
};
-struct nve0_fifo_priv {
- struct nouveau_fifo base;
+struct gk104_fifo_priv {
+ struct nvkm_fifo base;
struct work_struct fault;
u64 mask;
- struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
+ struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
struct {
- struct nouveau_gpuobj *mem;
- struct nouveau_vma bar;
+ struct nvkm_gpuobj *mem;
+ struct nvkm_vma bar;
} user;
int spoon_nr;
};
-struct nve0_fifo_base {
- struct nouveau_fifo_base base;
- struct nouveau_gpuobj *pgd;
- struct nouveau_vm *vm;
+struct gk104_fifo_base {
+ struct nvkm_fifo_base base;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
};
-struct nve0_fifo_chan {
- struct nouveau_fifo_chan base;
+struct gk104_fifo_chan {
+ struct nvkm_fifo_chan base;
u32 engine;
enum {
STOPPED,
@@ -99,11 +93,11 @@ struct nve0_fifo_chan {
******************************************************************************/
static void
-nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
+gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine)
{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nve0_fifo_engn *engn = &priv->engine[engine];
- struct nouveau_gpuobj *cur;
+ struct nvkm_bar *bar = nvkm_bar(priv);
+ struct gk104_fifo_engn *engn = &priv->engine[engine];
+ struct nvkm_gpuobj *cur;
int i, p;
mutex_lock(&nv_subdev(priv)->mutex);
@@ -111,7 +105,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
engn->cur_runlist = !engn->cur_runlist;
for (i = 0, p = 0; i < priv->base.max; i++) {
- struct nve0_fifo_chan *chan = (void *)priv->base.channel[i];
+ struct gk104_fifo_chan *chan = (void *)priv->base.channel[i];
if (chan && chan->state == RUNNING && chan->engine == engine) {
nv_wo32(cur, p + 0, i);
nv_wo32(cur, p + 4, 0x00000000);
@@ -131,34 +125,34 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
}
static int
-nve0_fifo_context_attach(struct nouveau_object *parent,
- struct nouveau_object *object)
+gk104_fifo_context_attach(struct nvkm_object *parent,
+ struct nvkm_object *object)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
- struct nve0_fifo_base *base = (void *)parent->parent;
- struct nouveau_engctx *ectx = (void *)object;
+ struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gk104_fifo_base *base = (void *)parent->parent;
+ struct nvkm_engctx *ectx = (void *)object;
u32 addr;
int ret;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW :
return 0;
- case NVDEV_ENGINE_COPY0:
- case NVDEV_ENGINE_COPY1:
- case NVDEV_ENGINE_COPY2:
+ case NVDEV_ENGINE_CE0:
+ case NVDEV_ENGINE_CE1:
+ case NVDEV_ENGINE_CE2:
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
return 0;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_BSP : addr = 0x0270; break;
- case NVDEV_ENGINE_VP : addr = 0x0250; break;
- case NVDEV_ENGINE_PPP : addr = 0x0260; break;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
+ case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
+ case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
default:
return -EINVAL;
}
if (!ectx->vma.node) {
- ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
- NV_MEM_ACCESS_RW, &ectx->vma);
+ ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+ NV_MEM_ACCESS_RW, &ectx->vma);
if (ret)
return ret;
@@ -172,24 +166,24 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
}
static int
-nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
- struct nouveau_object *object)
+gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
+ struct nvkm_object *object)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
- struct nve0_fifo_priv *priv = (void *)parent->engine;
- struct nve0_fifo_base *base = (void *)parent->parent;
- struct nve0_fifo_chan *chan = (void *)parent;
+ struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gk104_fifo_priv *priv = (void *)parent->engine;
+ struct gk104_fifo_base *base = (void *)parent->parent;
+ struct gk104_fifo_chan *chan = (void *)parent;
u32 addr;
switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_COPY0:
- case NVDEV_ENGINE_COPY1:
- case NVDEV_ENGINE_COPY2: addr = 0x0000; break;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_BSP : addr = 0x0270; break;
- case NVDEV_ENGINE_VP : addr = 0x0250; break;
- case NVDEV_ENGINE_PPP : addr = 0x0260; break;
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_CE0 :
+ case NVDEV_ENGINE_CE1 :
+ case NVDEV_ENGINE_CE2 : addr = 0x0000; break;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
+ case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
+ case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -197,7 +191,7 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d [%s] kick timeout\n",
- chan->base.chid, nouveau_client_name(chan));
+ chan->base.chid, nvkm_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -212,18 +206,17 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
}
static int
-nve0_fifo_chan_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct kepler_channel_gpfifo_a_v0 v0;
} *args = data;
- struct nouveau_bar *bar = nouveau_bar(parent);
- struct nve0_fifo_priv *priv = (void *)engine;
- struct nve0_fifo_base *base = (void *)parent;
- struct nve0_fifo_chan *chan;
+ struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gk104_fifo_priv *priv = (void *)engine;
+ struct gk104_fifo_base *base = (void *)parent;
+ struct gk104_fifo_chan *chan;
u64 usermem, ioffset, ilength;
int ret, i;
@@ -238,7 +231,7 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
for (i = 0; i < FIFO_ENGINE_NR; i++) {
if (args->v0.engine & (1 << i)) {
- if (nouveau_engine(parent, fifo_engine[i].subdev)) {
+ if (nvkm_engine(parent, fifo_engine[i].subdev)) {
args->v0.engine = (1 << i);
break;
}
@@ -250,18 +243,18 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
return -ENODEV;
}
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
- priv->user.bar.offset, 0x200,
- args->v0.pushbuf,
- fifo_engine[i].mask, &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
+ priv->user.bar.offset, 0x200,
+ args->v0.pushbuf,
+ fifo_engine[i].mask, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
args->v0.chid = chan->base.chid;
- nv_parent(chan)->context_attach = nve0_fifo_context_attach;
- nv_parent(chan)->context_detach = nve0_fifo_context_detach;
+ nv_parent(chan)->context_attach = gk104_fifo_context_attach;
+ nv_parent(chan)->context_detach = gk104_fifo_context_detach;
chan->engine = i;
usermem = chan->base.chid * 0x200;
@@ -290,15 +283,15 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
}
static int
-nve0_fifo_chan_init(struct nouveau_object *object)
+gk104_fifo_chan_init(struct nvkm_object *object)
{
- struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
- struct nve0_fifo_priv *priv = (void *)object->engine;
- struct nve0_fifo_chan *chan = (void *)object;
+ struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
+ struct gk104_fifo_priv *priv = (void *)object->engine;
+ struct gk104_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
int ret;
- ret = nouveau_fifo_channel_init(&chan->base);
+ ret = nvkm_fifo_channel_init(&chan->base);
if (ret)
return ret;
@@ -307,7 +300,7 @@ nve0_fifo_chan_init(struct nouveau_object *object)
if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
- nve0_fifo_runlist_update(priv, chan->engine);
+ gk104_fifo_runlist_update(priv, chan->engine);
nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
}
@@ -315,36 +308,36 @@ nve0_fifo_chan_init(struct nouveau_object *object)
}
static int
-nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
- struct nve0_fifo_priv *priv = (void *)object->engine;
- struct nve0_fifo_chan *chan = (void *)object;
+ struct gk104_fifo_priv *priv = (void *)object->engine;
+ struct gk104_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
- nve0_fifo_runlist_update(priv, chan->engine);
+ gk104_fifo_runlist_update(priv, chan->engine);
}
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
- return nouveau_fifo_channel_fini(&chan->base, suspend);
+ return nvkm_fifo_channel_fini(&chan->base, suspend);
}
-static struct nouveau_ofuncs
-nve0_fifo_ofuncs = {
- .ctor = nve0_fifo_chan_ctor,
- .dtor = _nouveau_fifo_channel_dtor,
- .init = nve0_fifo_chan_init,
- .fini = nve0_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+static struct nvkm_ofuncs
+gk104_fifo_ofuncs = {
+ .ctor = gk104_fifo_chan_ctor,
+ .dtor = _nvkm_fifo_channel_dtor,
+ .init = gk104_fifo_chan_init,
+ .fini = gk104_fifo_chan_fini,
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_oclass
-nve0_fifo_sclass[] = {
- { KEPLER_CHANNEL_GPFIFO_A, &nve0_fifo_ofuncs },
+static struct nvkm_oclass
+gk104_fifo_sclass[] = {
+ { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_ofuncs },
{}
};
@@ -353,22 +346,21 @@ nve0_fifo_sclass[] = {
******************************************************************************/
static int
-nve0_fifo_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nve0_fifo_base *base;
+ struct gk104_fifo_base *base;
int ret;
- ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
+ ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
*pobject = nv_object(base);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
- &base->pgd);
+ ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
+ &base->pgd);
if (ret)
return ret;
@@ -377,7 +369,7 @@ nve0_fifo_context_ctor(struct nouveau_object *parent,
nv_wo32(base, 0x0208, 0xffffffff);
nv_wo32(base, 0x020c, 0x000000ff);
- ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
if (ret)
return ret;
@@ -385,24 +377,24 @@ nve0_fifo_context_ctor(struct nouveau_object *parent,
}
static void
-nve0_fifo_context_dtor(struct nouveau_object *object)
+gk104_fifo_context_dtor(struct nvkm_object *object)
{
- struct nve0_fifo_base *base = (void *)object;
- nouveau_vm_ref(NULL, &base->vm, base->pgd);
- nouveau_gpuobj_ref(NULL, &base->pgd);
- nouveau_fifo_context_destroy(&base->base);
+ struct gk104_fifo_base *base = (void *)object;
+ nvkm_vm_ref(NULL, &base->vm, base->pgd);
+ nvkm_gpuobj_ref(NULL, &base->pgd);
+ nvkm_fifo_context_destroy(&base->base);
}
-static struct nouveau_oclass
-nve0_fifo_cclass = {
+static struct nvkm_oclass
+gk104_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_fifo_context_ctor,
- .dtor = nve0_fifo_context_dtor,
- .init = _nouveau_fifo_context_init,
- .fini = _nouveau_fifo_context_fini,
- .rd32 = _nouveau_fifo_context_rd32,
- .wr32 = _nouveau_fifo_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_fifo_context_ctor,
+ .dtor = gk104_fifo_context_dtor,
+ .init = _nvkm_fifo_context_init,
+ .fini = _nvkm_fifo_context_fini,
+ .rd32 = _nvkm_fifo_context_rd32,
+ .wr32 = _nvkm_fifo_context_wr32,
},
};
@@ -411,17 +403,17 @@ nve0_fifo_cclass = {
******************************************************************************/
static inline int
-nve0_fifo_engidx(struct nve0_fifo_priv *priv, u32 engn)
+gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn)
{
switch (engn) {
- case NVDEV_ENGINE_GR :
- case NVDEV_ENGINE_COPY2: engn = 0; break;
- case NVDEV_ENGINE_BSP : engn = 1; break;
- case NVDEV_ENGINE_PPP : engn = 2; break;
- case NVDEV_ENGINE_VP : engn = 3; break;
- case NVDEV_ENGINE_COPY0: engn = 4; break;
- case NVDEV_ENGINE_COPY1: engn = 5; break;
- case NVDEV_ENGINE_VENC : engn = 6; break;
+ case NVDEV_ENGINE_GR :
+ case NVDEV_ENGINE_CE2 : engn = 0; break;
+ case NVDEV_ENGINE_MSVLD : engn = 1; break;
+ case NVDEV_ENGINE_MSPPP : engn = 2; break;
+ case NVDEV_ENGINE_MSPDEC: engn = 3; break;
+ case NVDEV_ENGINE_CE0 : engn = 4; break;
+ case NVDEV_ENGINE_CE1 : engn = 5; break;
+ case NVDEV_ENGINE_MSENC : engn = 6; break;
default:
return -1;
}
@@ -429,19 +421,19 @@ nve0_fifo_engidx(struct nve0_fifo_priv *priv, u32 engn)
return engn;
}
-static inline struct nouveau_engine *
-nve0_fifo_engine(struct nve0_fifo_priv *priv, u32 engn)
+static inline struct nvkm_engine *
+gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn)
{
if (engn >= ARRAY_SIZE(fifo_engine))
return NULL;
- return nouveau_engine(priv, fifo_engine[engn].subdev);
+ return nvkm_engine(priv, fifo_engine[engn].subdev);
}
static void
-nve0_fifo_recover_work(struct work_struct *work)
+gk104_fifo_recover_work(struct work_struct *work)
{
- struct nve0_fifo_priv *priv = container_of(work, typeof(*priv), fault);
- struct nouveau_object *engine;
+ struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault);
+ struct nvkm_object *engine;
unsigned long flags;
u32 engn, engm = 0;
u64 mask, todo;
@@ -452,15 +444,15 @@ nve0_fifo_recover_work(struct work_struct *work)
spin_unlock_irqrestore(&priv->base.lock, flags);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
- engm |= 1 << nve0_fifo_engidx(priv, engn);
+ engm |= 1 << gk104_fifo_engidx(priv, engn);
nv_mask(priv, 0x002630, engm, engm);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
- if ((engine = (void *)nouveau_engine(priv, engn))) {
+ if ((engine = (void *)nvkm_engine(priv, engn))) {
nv_ofuncs(engine)->fini(engine, false);
WARN_ON(nv_ofuncs(engine)->init(engine));
}
- nve0_fifo_runlist_update(priv, nve0_fifo_engidx(priv, engn));
+ gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn));
}
nv_wr32(priv, 0x00262c, engm);
@@ -468,10 +460,9 @@ nve0_fifo_recover_work(struct work_struct *work)
}
static void
-nve0_fifo_recover(struct nve0_fifo_priv *priv, struct nouveau_engine *engine,
- struct nve0_fifo_chan *chan)
+gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine,
+ struct gk104_fifo_chan *chan)
{
- struct nouveau_object *engobj = nv_object(engine);
u32 chid = chan->base.chid;
unsigned long flags;
@@ -482,16 +473,16 @@ nve0_fifo_recover(struct nve0_fifo_priv *priv, struct nouveau_engine *engine,
chan->state = KILLED;
spin_lock_irqsave(&priv->base.lock, flags);
- priv->mask |= 1ULL << nv_engidx(engobj);
+ priv->mask |= 1ULL << nv_engidx(engine);
spin_unlock_irqrestore(&priv->base.lock, flags);
schedule_work(&priv->fault);
}
static int
-nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
{
- struct nve0_fifo_chan *chan = NULL;
- struct nouveau_handle *bind;
+ struct gk104_fifo_chan *chan = NULL;
+ struct nvkm_handle *bind;
unsigned long flags;
int ret = -EINVAL;
@@ -501,11 +492,11 @@ nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
if (unlikely(!chan))
goto out;
- bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+ bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
if (likely(bind)) {
if (!mthd || !nv_call(bind->object, mthd, data))
ret = 0;
- nouveau_namedb_put(bind);
+ nvkm_namedb_put(bind);
}
out:
@@ -513,8 +504,8 @@ out:
return ret;
}
-static const struct nouveau_enum
-nve0_fifo_bind_reason[] = {
+static const struct nvkm_enum
+gk104_fifo_bind_reason[] = {
{ 0x01, "BIND_NOT_UNBOUND" },
{ 0x02, "SNOOP_WITHOUT_BAR1" },
{ 0x03, "UNBIND_WHILE_RUNNING" },
@@ -525,31 +516,31 @@ nve0_fifo_bind_reason[] = {
};
static void
-nve0_fifo_intr_bind(struct nve0_fifo_priv *priv)
+gk104_fifo_intr_bind(struct gk104_fifo_priv *priv)
{
u32 intr = nv_rd32(priv, 0x00252c);
u32 code = intr & 0x000000ff;
- const struct nouveau_enum *en;
+ const struct nvkm_enum *en;
char enunk[6] = "";
- en = nouveau_enum_find(nve0_fifo_bind_reason, code);
+ en = nvkm_enum_find(gk104_fifo_bind_reason, code);
if (!en)
snprintf(enunk, sizeof(enunk), "UNK%02x", code);
nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
}
-static const struct nouveau_enum
-nve0_fifo_sched_reason[] = {
+static const struct nvkm_enum
+gk104_fifo_sched_reason[] = {
{ 0x0a, "CTXSW_TIMEOUT" },
{}
};
static void
-nve0_fifo_intr_sched_ctxsw(struct nve0_fifo_priv *priv)
+gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
{
- struct nouveau_engine *engine;
- struct nve0_fifo_chan *chan;
+ struct nvkm_engine *engine;
+ struct gk104_fifo_chan *chan;
u32 engn;
for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
@@ -566,22 +557,22 @@ nve0_fifo_intr_sched_ctxsw(struct nve0_fifo_priv *priv)
if (busy && chsw) {
if (!(chan = (void *)priv->base.channel[chid]))
continue;
- if (!(engine = nve0_fifo_engine(priv, engn)))
+ if (!(engine = gk104_fifo_engine(priv, engn)))
continue;
- nve0_fifo_recover(priv, engine, chan);
+ gk104_fifo_recover(priv, engine, chan);
}
}
}
static void
-nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
+gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
{
u32 intr = nv_rd32(priv, 0x00254c);
u32 code = intr & 0x000000ff;
- const struct nouveau_enum *en;
+ const struct nvkm_enum *en;
char enunk[6] = "";
- en = nouveau_enum_find(nve0_fifo_sched_reason, code);
+ en = nvkm_enum_find(gk104_fifo_sched_reason, code);
if (!en)
snprintf(enunk, sizeof(enunk), "UNK%02x", code);
@@ -589,7 +580,7 @@ nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
switch (code) {
case 0x0a:
- nve0_fifo_intr_sched_ctxsw(priv);
+ gk104_fifo_intr_sched_ctxsw(priv);
break;
default:
break;
@@ -597,7 +588,7 @@ nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
}
static void
-nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
+gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv)
{
u32 stat = nv_rd32(priv, 0x00256c);
nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
@@ -605,14 +596,14 @@ nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
}
static void
-nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
+gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv)
{
u32 stat = nv_rd32(priv, 0x00259c);
nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
}
-static const struct nouveau_enum
-nve0_fifo_fault_engine[] = {
+static const struct nvkm_enum
+gk104_fifo_fault_engine[] = {
{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
@@ -620,20 +611,20 @@ nve0_fifo_fault_engine[] = {
{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
- { 0x10, "MSVLD", NULL, NVDEV_ENGINE_BSP },
- { 0x11, "MSPPP", NULL, NVDEV_ENGINE_PPP },
+ { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
+ { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
{ 0x13, "PERF" },
- { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_VP },
- { 0x15, "CE0", NULL, NVDEV_ENGINE_COPY0 },
- { 0x16, "CE1", NULL, NVDEV_ENGINE_COPY1 },
+ { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
+ { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
+ { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
{ 0x17, "PMU" },
- { 0x19, "MSENC", NULL, NVDEV_ENGINE_VENC },
- { 0x1b, "CE2", NULL, NVDEV_ENGINE_COPY2 },
+ { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
+ { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
{}
};
-static const struct nouveau_enum
-nve0_fifo_fault_reason[] = {
+static const struct nvkm_enum
+gk104_fifo_fault_reason[] = {
{ 0x00, "PDE" },
{ 0x01, "PDE_SIZE" },
{ 0x02, "PTE" },
@@ -653,8 +644,8 @@ nve0_fifo_fault_reason[] = {
{}
};
-static const struct nouveau_enum
-nve0_fifo_fault_hubclient[] = {
+static const struct nvkm_enum
+gk104_fifo_fault_hubclient[] = {
{ 0x00, "VIP" },
{ 0x01, "CE0" },
{ 0x02, "CE1" },
@@ -679,7 +670,7 @@ nve0_fifo_fault_hubclient[] = {
{ 0x15, "SCC_NB" },
{ 0x16, "SEC" },
{ 0x17, "SSYNC" },
- { 0x18, "GR_COPY" },
+ { 0x18, "GR_CE" },
{ 0x19, "CE2" },
{ 0x1a, "XV" },
{ 0x1b, "MMU_NB" },
@@ -690,8 +681,8 @@ nve0_fifo_fault_hubclient[] = {
{}
};
-static const struct nouveau_enum
-nve0_fifo_fault_gpcclient[] = {
+static const struct nvkm_enum
+gk104_fifo_fault_gpcclient[] = {
{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
@@ -717,7 +708,7 @@ nve0_fifo_fault_gpcclient[] = {
};
static void
-nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
+gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit)
{
u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
@@ -728,19 +719,19 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
u32 write = (stat & 0x00000080);
u32 hub = (stat & 0x00000040);
u32 reason = (stat & 0x0000000f);
- struct nouveau_object *engctx = NULL, *object;
- struct nouveau_engine *engine = NULL;
- const struct nouveau_enum *er, *eu, *ec;
+ struct nvkm_object *engctx = NULL, *object;
+ struct nvkm_engine *engine = NULL;
+ const struct nvkm_enum *er, *eu, *ec;
char erunk[6] = "";
char euunk[6] = "";
char ecunk[6] = "";
char gpcid[3] = "";
- er = nouveau_enum_find(nve0_fifo_fault_reason, reason);
+ er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
if (!er)
snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
- eu = nouveau_enum_find(nve0_fifo_fault_engine, unit);
+ eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
if (eu) {
switch (eu->data2) {
case NVDEV_SUBDEV_BAR:
@@ -753,9 +744,9 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
break;
default:
- engine = nouveau_engine(priv, eu->data2);
+ engine = nvkm_engine(priv, eu->data2);
if (engine)
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
break;
}
} else {
@@ -763,9 +754,9 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
}
if (hub) {
- ec = nouveau_enum_find(nve0_fifo_fault_hubclient, client);
+ ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
} else {
- ec = nouveau_enum_find(nve0_fifo_fault_gpcclient, client);
+ ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
snprintf(gpcid, sizeof(gpcid), "%d", gpc);
}
@@ -777,22 +768,22 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
(u64)vahi << 32 | valo, er ? er->name : erunk,
eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
ec ? ec->name : ecunk, (u64)inst << 12,
- nouveau_client_name(engctx));
+ nvkm_client_name(engctx));
object = engctx;
while (object) {
switch (nv_mclass(object)) {
case KEPLER_CHANNEL_GPFIFO_A:
- nve0_fifo_recover(priv, engine, (void *)object);
+ gk104_fifo_recover(priv, engine, (void *)object);
break;
}
object = object->parent;
}
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
-static const struct nouveau_bitfield nve0_fifo_pbdma_intr_0[] = {
+static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
{ 0x00000001, "MEMREQ" },
{ 0x00000002, "MEMACK_TIMEOUT" },
{ 0x00000004, "MEMACK_EXTRA" },
@@ -827,7 +818,7 @@ static const struct nouveau_bitfield nve0_fifo_pbdma_intr_0[] = {
};
static void
-nve0_fifo_intr_pbdma_0(struct nve0_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit)
{
u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
@@ -839,26 +830,26 @@ nve0_fifo_intr_pbdma_0(struct nve0_fifo_priv *priv, int unit)
u32 show = stat;
if (stat & 0x00800000) {
- if (!nve0_fifo_swmthd(priv, chid, mthd, data))
+ if (!gk104_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
}
if (show) {
nv_error(priv, "PBDMA%d:", unit);
- nouveau_bitfield_print(nve0_fifo_pbdma_intr_0, show);
+ nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show);
pr_cont("\n");
nv_error(priv,
"PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
unit, chid,
- nouveau_client_name_for_fifo_chid(&priv->base, chid),
+ nvkm_client_name_for_fifo_chid(&priv->base, chid),
subc, mthd, data);
}
nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
}
-static const struct nouveau_bitfield nve0_fifo_pbdma_intr_1[] = {
+static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
{ 0x00000002, "HCE_RE_ALIGNB" },
{ 0x00000004, "HCE_PRIV" },
@@ -868,7 +859,7 @@ static const struct nouveau_bitfield nve0_fifo_pbdma_intr_1[] = {
};
static void
-nve0_fifo_intr_pbdma_1(struct nve0_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit)
{
u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
@@ -876,7 +867,7 @@ nve0_fifo_intr_pbdma_1(struct nve0_fifo_priv *priv, int unit)
if (stat) {
nv_error(priv, "PBDMA%d:", unit);
- nouveau_bitfield_print(nve0_fifo_pbdma_intr_1, stat);
+ nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat);
pr_cont("\n");
nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
nv_rd32(priv, 0x040150 + (unit * 0x2000)),
@@ -887,7 +878,7 @@ nve0_fifo_intr_pbdma_1(struct nve0_fifo_priv *priv, int unit)
}
static void
-nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
+gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv)
{
u32 mask = nv_rd32(priv, 0x002a00);
while (mask) {
@@ -899,20 +890,20 @@ nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
}
static void
-nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
+gk104_fifo_intr_engine(struct gk104_fifo_priv *priv)
{
- nouveau_fifo_uevent(&priv->base);
+ nvkm_fifo_uevent(&priv->base);
}
static void
-nve0_fifo_intr(struct nouveau_subdev *subdev)
+gk104_fifo_intr(struct nvkm_subdev *subdev)
{
- struct nve0_fifo_priv *priv = (void *)subdev;
+ struct gk104_fifo_priv *priv = (void *)subdev;
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000001) {
- nve0_fifo_intr_bind(priv);
+ gk104_fifo_intr_bind(priv);
nv_wr32(priv, 0x002100, 0x00000001);
stat &= ~0x00000001;
}
@@ -924,13 +915,13 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000100) {
- nve0_fifo_intr_sched(priv);
+ gk104_fifo_intr_sched(priv);
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x00010000) {
- nve0_fifo_intr_chsw(priv);
+ gk104_fifo_intr_chsw(priv);
nv_wr32(priv, 0x002100, 0x00010000);
stat &= ~0x00010000;
}
@@ -948,7 +939,7 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x08000000) {
- nve0_fifo_intr_dropped_fault(priv);
+ gk104_fifo_intr_dropped_fault(priv);
nv_wr32(priv, 0x002100, 0x08000000);
stat &= ~0x08000000;
}
@@ -957,7 +948,7 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x00259c);
while (mask) {
u32 unit = __ffs(mask);
- nve0_fifo_intr_fault(priv, unit);
+ gk104_fifo_intr_fault(priv, unit);
nv_wr32(priv, 0x00259c, (1 << unit));
mask &= ~(1 << unit);
}
@@ -968,8 +959,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x0025a0);
while (mask) {
u32 unit = __ffs(mask);
- nve0_fifo_intr_pbdma_0(priv, unit);
- nve0_fifo_intr_pbdma_1(priv, unit);
+ gk104_fifo_intr_pbdma_0(priv, unit);
+ gk104_fifo_intr_pbdma_1(priv, unit);
nv_wr32(priv, 0x0025a0, (1 << unit));
mask &= ~(1 << unit);
}
@@ -977,13 +968,13 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x40000000) {
- nve0_fifo_intr_runlist(priv);
+ gk104_fifo_intr_runlist(priv);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
nv_wr32(priv, 0x002100, 0x80000000);
- nve0_fifo_intr_engine(priv);
+ gk104_fifo_intr_engine(priv);
stat &= ~0x80000000;
}
@@ -995,33 +986,33 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
static void
-nve0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
}
static void
-nve0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
+gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
}
static const struct nvkm_event_func
-nve0_fifo_uevent_func = {
- .ctor = nouveau_fifo_uevent_ctor,
- .init = nve0_fifo_uevent_init,
- .fini = nve0_fifo_uevent_fini,
+gk104_fifo_uevent_func = {
+ .ctor = nvkm_fifo_uevent_ctor,
+ .init = gk104_fifo_uevent_init,
+ .fini = gk104_fifo_uevent_fini,
};
int
-nve0_fifo_fini(struct nouveau_object *object, bool suspend)
+gk104_fifo_fini(struct nvkm_object *object, bool suspend)
{
- struct nve0_fifo_priv *priv = (void *)object;
+ struct gk104_fifo_priv *priv = (void *)object;
int ret;
- ret = nouveau_fifo_fini(&priv->base, suspend);
+ ret = nvkm_fifo_fini(&priv->base, suspend);
if (ret)
return ret;
@@ -1031,12 +1022,12 @@ nve0_fifo_fini(struct nouveau_object *object, bool suspend)
}
int
-nve0_fifo_init(struct nouveau_object *object)
+gk104_fifo_init(struct nvkm_object *object)
{
- struct nve0_fifo_priv *priv = (void *)object;
+ struct gk104_fifo_priv *priv = (void *)object;
int ret, i;
- ret = nouveau_fifo_init(&priv->base);
+ ret = nvkm_fifo_init(&priv->base);
if (ret)
return ret;
@@ -1066,82 +1057,82 @@ nve0_fifo_init(struct nouveau_object *object)
}
void
-nve0_fifo_dtor(struct nouveau_object *object)
+gk104_fifo_dtor(struct nvkm_object *object)
{
- struct nve0_fifo_priv *priv = (void *)object;
+ struct gk104_fifo_priv *priv = (void *)object;
int i;
- nouveau_gpuobj_unmap(&priv->user.bar);
- nouveau_gpuobj_ref(NULL, &priv->user.mem);
+ nvkm_gpuobj_unmap(&priv->user.bar);
+ nvkm_gpuobj_ref(NULL, &priv->user.mem);
for (i = 0; i < FIFO_ENGINE_NR; i++) {
- nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
+ nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
+ nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
}
- nouveau_fifo_destroy(&priv->base);
+ nvkm_fifo_destroy(&priv->base);
}
int
-nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nve0_fifo_impl *impl = (void *)oclass;
- struct nve0_fifo_priv *priv;
+ struct gk104_fifo_impl *impl = (void *)oclass;
+ struct gk104_fifo_priv *priv;
int ret, i;
- ret = nouveau_fifo_create(parent, engine, oclass, 0,
- impl->channels - 1, &priv);
+ ret = nvkm_fifo_create(parent, engine, oclass, 0,
+ impl->channels - 1, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- INIT_WORK(&priv->fault, nve0_fifo_recover_work);
+ INIT_WORK(&priv->fault, gk104_fifo_recover_work);
for (i = 0; i < FIFO_ENGINE_NR; i++) {
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
- 0, &priv->engine[i].runlist[0]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
+ 0, &priv->engine[i].runlist[0]);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
- 0, &priv->engine[i].runlist[1]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
+ 0, &priv->engine[i].runlist[1]);
if (ret)
return ret;
init_waitqueue_head(&priv->engine[i].wait);
}
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
if (ret)
return ret;
- ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
- &priv->user.bar);
+ ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+ &priv->user.bar);
if (ret)
return ret;
- ret = nvkm_event_init(&nve0_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nve0_fifo_intr;
- nv_engine(priv)->cclass = &nve0_fifo_cclass;
- nv_engine(priv)->sclass = nve0_fifo_sclass;
+ nv_subdev(priv)->intr = gk104_fifo_intr;
+ nv_engine(priv)->cclass = &gk104_fifo_cclass;
+ nv_engine(priv)->sclass = gk104_fifo_sclass;
return 0;
}
-struct nouveau_oclass *
-nve0_fifo_oclass = &(struct nve0_fifo_impl) {
+struct nvkm_oclass *
+gk104_fifo_oclass = &(struct gk104_fifo_impl) {
.base.handle = NV_ENGINE(FIFO, 0xe0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_fifo_ctor,
- .dtor = nve0_fifo_dtor,
- .init = nve0_fifo_init,
- .fini = nve0_fifo_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_fifo_ctor,
+ .dtor = gk104_fifo_dtor,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
},
.channels = 4096,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
new file mode 100644
index 000000000000..3046e00ed6ba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -0,0 +1,16 @@
+#ifndef __NVKM_FIFO_NVE0_H__
+#define __NVKM_FIFO_NVE0_H__
+#include <engine/fifo.h>
+
+int gk104_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void gk104_fifo_dtor(struct nvkm_object *);
+int gk104_fifo_init(struct nvkm_object *);
+int gk104_fifo_fini(struct nvkm_object *, bool);
+
+struct gk104_fifo_impl {
+ struct nvkm_oclass base;
+ u32 channels;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 09362a51ba57..927092217a06 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -21,17 +21,16 @@
*
* Authors: Ben Skeggs
*/
+#include "gk104.h"
-#include "nve0.h"
-
-struct nouveau_oclass *
-nv108_fifo_oclass = &(struct nve0_fifo_impl) {
+struct nvkm_oclass *
+gk208_fifo_oclass = &(struct gk104_fifo_impl) {
.base.handle = NV_ENGINE(FIFO, 0x08),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_fifo_ctor,
- .dtor = nve0_fifo_dtor,
- .init = nve0_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_fifo_ctor,
+ .dtor = gk104_fifo_dtor,
+ .init = gk104_fifo_init,
+ .fini = _nvkm_fifo_fini,
},
.channels = 1024,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index 327456eae963..b30dc87a1357 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -19,17 +19,16 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include "gk104.h"
-#include "nve0.h"
-
-struct nouveau_oclass *
-gk20a_fifo_oclass = &(struct nve0_fifo_impl) {
+struct nvkm_oclass *
+gk20a_fifo_oclass = &(struct gk104_fifo_impl) {
.base.handle = NV_ENGINE(FIFO, 0xea),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_fifo_ctor,
- .dtor = nve0_fifo_dtor,
- .init = nve0_fifo_init,
- .fini = nve0_fifo_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_fifo_ctor,
+ .dtor = gk104_fifo_dtor,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
},
.channels = 128,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 1931057f9962..b038b6eb51db 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -21,24 +21,18 @@
*
* Authors: Ben Skeggs
*/
+#include "nv04.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
+#include <core/device.h>
#include <core/engctx.h>
-#include <core/namedb.h>
#include <core/handle.h>
#include <core/ramht.h>
-#include <core/event.h>
-
-#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <engine/fifo.h>
-
-#include "nv04.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
static struct ramfc_desc
nv04_ramfc[] = {
@@ -58,8 +52,8 @@ nv04_ramfc[] = {
******************************************************************************/
int
-nv04_fifo_object_attach(struct nouveau_object *parent,
- struct nouveau_object *object, u32 handle)
+nv04_fifo_object_attach(struct nvkm_object *parent,
+ struct nvkm_object *object, u32 handle)
{
struct nv04_fifo_priv *priv = (void *)parent->engine;
struct nv04_fifo_chan *chan = (void *)parent;
@@ -90,33 +84,33 @@ nv04_fifo_object_attach(struct nouveau_object *parent,
context |= chid << 24;
mutex_lock(&nv_subdev(priv)->mutex);
- ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+ ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
mutex_unlock(&nv_subdev(priv)->mutex);
return ret;
}
void
-nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
+nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
{
struct nv04_fifo_priv *priv = (void *)parent->engine;
mutex_lock(&nv_subdev(priv)->mutex);
- nouveau_ramht_remove(priv->ramht, cookie);
+ nvkm_ramht_remove(priv->ramht, cookie);
mutex_unlock(&nv_subdev(priv)->mutex);
}
int
-nv04_fifo_context_attach(struct nouveau_object *parent,
- struct nouveau_object *object)
+nv04_fifo_context_attach(struct nvkm_object *parent,
+ struct nvkm_object *object)
{
- nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid;
+ nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
return 0;
}
static int
-nv04_fifo_chan_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_fifo_chan_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv03_channel_dma_v0 v0;
@@ -133,11 +127,11 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR), &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -163,7 +157,7 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
}
void
-nv04_fifo_chan_dtor(struct nouveau_object *object)
+nv04_fifo_chan_dtor(struct nvkm_object *object)
{
struct nv04_fifo_priv *priv = (void *)object->engine;
struct nv04_fifo_chan *chan = (void *)object;
@@ -173,11 +167,11 @@ nv04_fifo_chan_dtor(struct nouveau_object *object)
nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
} while ((++c)->bits);
- nouveau_fifo_channel_destroy(&chan->base);
+ nvkm_fifo_channel_destroy(&chan->base);
}
int
-nv04_fifo_chan_init(struct nouveau_object *object)
+nv04_fifo_chan_init(struct nvkm_object *object)
{
struct nv04_fifo_priv *priv = (void *)object->engine;
struct nv04_fifo_chan *chan = (void *)object;
@@ -185,7 +179,7 @@ nv04_fifo_chan_init(struct nouveau_object *object)
unsigned long flags;
int ret;
- ret = nouveau_fifo_channel_init(&chan->base);
+ ret = nvkm_fifo_channel_init(&chan->base);
if (ret)
return ret;
@@ -196,11 +190,11 @@ nv04_fifo_chan_init(struct nouveau_object *object)
}
int
-nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nv04_fifo_priv *priv = (void *)object->engine;
struct nv04_fifo_chan *chan = (void *)object;
- struct nouveau_gpuobj *fctx = priv->ramfc;
+ struct nvkm_gpuobj *fctx = priv->ramfc;
struct ramfc_desc *c;
unsigned long flags;
u32 data = chan->ramfc;
@@ -243,22 +237,22 @@ nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
nv_wr32(priv, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&priv->base.lock, flags);
- return nouveau_fifo_channel_fini(&chan->base, suspend);
+ return nvkm_fifo_channel_fini(&chan->base, suspend);
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv04_fifo_ofuncs = {
.ctor = nv04_fifo_chan_ctor,
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv04_fifo_sclass[] = {
{ NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
{}
@@ -269,16 +263,16 @@ nv04_fifo_sclass[] = {
******************************************************************************/
int
-nv04_fifo_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_fifo_context_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_fifo_base *base;
int ret;
- ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
- 0x1000, NVOBJ_FLAG_HEAP, &base);
+ ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
return ret;
@@ -286,16 +280,16 @@ nv04_fifo_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
+static struct nvkm_oclass
nv04_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fifo_context_ctor,
- .dtor = _nouveau_fifo_context_dtor,
- .init = _nouveau_fifo_context_init,
- .fini = _nouveau_fifo_context_fini,
- .rd32 = _nouveau_fifo_context_rd32,
- .wr32 = _nouveau_fifo_context_wr32,
+ .dtor = _nvkm_fifo_context_dtor,
+ .init = _nvkm_fifo_context_init,
+ .fini = _nvkm_fifo_context_fini,
+ .rd32 = _nvkm_fifo_context_rd32,
+ .wr32 = _nvkm_fifo_context_wr32,
},
};
@@ -304,7 +298,7 @@ nv04_fifo_cclass = {
******************************************************************************/
void
-nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
+nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags)
__acquires(priv->base.lock)
{
struct nv04_fifo_priv *priv = (void *)pfifo;
@@ -337,7 +331,7 @@ __acquires(priv->base.lock)
}
void
-nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
+nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags)
__releases(priv->base.lock)
{
struct nv04_fifo_priv *priv = (void *)pfifo;
@@ -363,7 +357,7 @@ static bool
nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
{
struct nv04_fifo_chan *chan = NULL;
- struct nouveau_handle *bind;
+ struct nvkm_handle *bind;
const int subc = (addr >> 13) & 0x7;
const int mthd = addr & 0x1ffc;
bool handled = false;
@@ -378,7 +372,7 @@ nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
switch (mthd) {
case 0x0000:
- bind = nouveau_namedb_get(nv_namedb(chan), data);
+ bind = nvkm_namedb_get(nv_namedb(chan), data);
if (unlikely(!bind))
break;
@@ -390,18 +384,18 @@ nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
}
- nouveau_namedb_put(bind);
+ nvkm_namedb_put(bind);
break;
default:
engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
- bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
+ bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
if (likely(bind)) {
if (!nv_call(bind->object, mthd, data))
handled = true;
- nouveau_namedb_put(bind);
+ nvkm_namedb_put(bind);
}
break;
}
@@ -412,8 +406,8 @@ out:
}
static void
-nv04_fifo_cache_error(struct nouveau_device *device,
- struct nv04_fifo_priv *priv, u32 chid, u32 get)
+nv04_fifo_cache_error(struct nvkm_device *device,
+ struct nv04_fifo_priv *priv, u32 chid, u32 get)
{
u32 mthd, data;
int ptr;
@@ -435,7 +429,7 @@ nv04_fifo_cache_error(struct nouveau_device *device,
if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
const char *client_name =
- nouveau_client_name_for_fifo_chid(&priv->base, chid);
+ nvkm_client_name_for_fifo_chid(&priv->base, chid);
nv_error(priv,
"CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
@@ -458,8 +452,8 @@ nv04_fifo_cache_error(struct nouveau_device *device,
}
static void
-nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv,
- u32 chid)
+nv04_fifo_dma_pusher(struct nvkm_device *device,
+ struct nv04_fifo_priv *priv, u32 chid)
{
const char *client_name;
u32 dma_get = nv_rd32(priv, 0x003244);
@@ -467,7 +461,7 @@ nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv,
u32 push = nv_rd32(priv, 0x003220);
u32 state = nv_rd32(priv, 0x003228);
- client_name = nouveau_client_name_for_fifo_chid(&priv->base, chid);
+ client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid);
if (device->card_type == NV_50) {
u32 ho_get = nv_rd32(priv, 0x003328);
@@ -504,9 +498,9 @@ nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv,
}
void
-nv04_fifo_intr(struct nouveau_subdev *subdev)
+nv04_fifo_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_device *device = nv_device(subdev);
+ struct nvkm_device *device = nv_device(subdev);
struct nv04_fifo_priv *priv = (void *)subdev;
uint32_t status, reassign;
int cnt = 0;
@@ -552,7 +546,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
if (status & 0x40000000) {
nv_wr32(priv, 0x002100, 0x40000000);
- nouveau_fifo_uevent(&priv->base);
+ nvkm_fifo_uevent(&priv->base);
status &= ~0x40000000;
}
}
@@ -577,22 +571,22 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
static int
-nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_instmem_priv *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
+ ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nouveau_ramht_ref(imem->ramht, &priv->ramht);
- nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
- nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+ nvkm_ramht_ref(imem->ramht, &priv->ramht);
+ nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
+ nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
@@ -605,22 +599,22 @@ nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
void
-nv04_fifo_dtor(struct nouveau_object *object)
+nv04_fifo_dtor(struct nvkm_object *object)
{
struct nv04_fifo_priv *priv = (void *)object;
- nouveau_gpuobj_ref(NULL, &priv->ramfc);
- nouveau_gpuobj_ref(NULL, &priv->ramro);
- nouveau_ramht_ref(NULL, &priv->ramht);
- nouveau_fifo_destroy(&priv->base);
+ nvkm_gpuobj_ref(NULL, &priv->ramfc);
+ nvkm_gpuobj_ref(NULL, &priv->ramro);
+ nvkm_ramht_ref(NULL, &priv->ramht);
+ nvkm_fifo_destroy(&priv->base);
}
int
-nv04_fifo_init(struct nouveau_object *object)
+nv04_fifo_init(struct nvkm_object *object)
{
struct nv04_fifo_priv *priv = (void *)object;
int ret;
- ret = nouveau_fifo_init(&priv->base);
+ ret = nvkm_fifo_init(&priv->base);
if (ret)
return ret;
@@ -629,7 +623,7 @@ nv04_fifo_init(struct nouveau_object *object)
nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((priv->ramht->bits - 9) << 16) |
- (priv->ramht->base.addr >> 8));
+ (priv->ramht->gpuobj.addr >> 8));
nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
@@ -644,13 +638,13 @@ nv04_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
-nv04_fifo_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+nv04_fifo_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(FIFO, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fifo_ctor,
.dtor = nv04_fifo_dtor,
.init = nv04_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .fini = _nvkm_fifo_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index 496a4b4fdfaf..e0e0c47cb4ca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,6 +1,5 @@
#ifndef __NV04_FIFO_H__
#define __NV04_FIFO_H__
-
#include <engine/fifo.h>
#define NV04_PFIFO_DELAY_0 0x00002040
@@ -141,38 +140,36 @@ struct ramfc_desc {
};
struct nv04_fifo_priv {
- struct nouveau_fifo base;
+ struct nvkm_fifo base;
struct ramfc_desc *ramfc_desc;
- struct nouveau_ramht *ramht;
- struct nouveau_gpuobj *ramro;
- struct nouveau_gpuobj *ramfc;
+ struct nvkm_ramht *ramht;
+ struct nvkm_gpuobj *ramro;
+ struct nvkm_gpuobj *ramfc;
};
struct nv04_fifo_base {
- struct nouveau_fifo_base base;
+ struct nvkm_fifo_base base;
};
struct nv04_fifo_chan {
- struct nouveau_fifo_chan base;
+ struct nvkm_fifo_chan base;
u32 subc[8];
u32 ramfc;
};
-int nv04_fifo_object_attach(struct nouveau_object *,
- struct nouveau_object *, u32);
-void nv04_fifo_object_detach(struct nouveau_object *, int);
-
-void nv04_fifo_chan_dtor(struct nouveau_object *);
-int nv04_fifo_chan_init(struct nouveau_object *);
-int nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
+int nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
+void nv04_fifo_object_detach(struct nvkm_object *, int);
-int nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
+void nv04_fifo_chan_dtor(struct nvkm_object *);
+int nv04_fifo_chan_init(struct nvkm_object *);
+int nv04_fifo_chan_fini(struct nvkm_object *, bool suspend);
-void nv04_fifo_dtor(struct nouveau_object *);
-int nv04_fifo_init(struct nouveau_object *);
-void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
-void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
+int nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void nv04_fifo_dtor(struct nvkm_object *);
+int nv04_fifo_init(struct nvkm_object *);
+void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index 2a32add51c81..48ce4af6f543 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -21,20 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include "nv04.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
-
-#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
-#include <subdev/fb.h>
-#include <engine/fifo.h>
-
-#include "nv04.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
static struct ramfc_desc
nv10_ramfc[] = {
@@ -55,10 +50,10 @@ nv10_ramfc[] = {
******************************************************************************/
static int
-nv10_fifo_chan_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv10_fifo_chan_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv03_channel_dma_v0 v0;
@@ -75,11 +70,11 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR), &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -104,19 +99,19 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv10_fifo_ofuncs = {
.ctor = nv10_fifo_chan_ctor,
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv10_fifo_sclass[] = {
{ NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
{}
@@ -126,16 +121,16 @@ nv10_fifo_sclass[] = {
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
-static struct nouveau_oclass
+static struct nvkm_oclass
nv10_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fifo_context_ctor,
- .dtor = _nouveau_fifo_context_dtor,
- .init = _nouveau_fifo_context_init,
- .fini = _nouveau_fifo_context_fini,
- .rd32 = _nouveau_fifo_context_rd32,
- .wr32 = _nouveau_fifo_context_wr32,
+ .dtor = _nvkm_fifo_context_dtor,
+ .init = _nvkm_fifo_context_init,
+ .fini = _nvkm_fifo_context_fini,
+ .rd32 = _nvkm_fifo_context_rd32,
+ .wr32 = _nvkm_fifo_context_wr32,
},
};
@@ -144,22 +139,22 @@ nv10_fifo_cclass = {
******************************************************************************/
static int
-nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_instmem_priv *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nouveau_ramht_ref(imem->ramht, &priv->ramht);
- nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
- nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+ nvkm_ramht_ref(imem->ramht, &priv->ramht);
+ nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
+ nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
@@ -171,13 +166,13 @@ nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
-nv10_fifo_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+nv10_fifo_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(FIFO, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv10_fifo_ctor,
.dtor = nv04_fifo_dtor,
.init = nv04_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .fini = _nvkm_fifo_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index 12d76c8adb23..4a20a6fd3887 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -21,20 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include "nv04.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
-
-#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
-#include <subdev/fb.h>
-#include <engine/fifo.h>
-
-#include "nv04.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
static struct ramfc_desc
nv17_ramfc[] = {
@@ -60,10 +55,10 @@ nv17_ramfc[] = {
******************************************************************************/
static int
-nv17_fifo_chan_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv17_fifo_chan_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv03_channel_dma_v0 v0;
@@ -80,13 +75,13 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
- &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
+ &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -111,19 +106,19 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv17_fifo_ofuncs = {
.ctor = nv17_fifo_chan_ctor,
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv17_fifo_sclass[] = {
{ NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
{}
@@ -133,16 +128,16 @@ nv17_fifo_sclass[] = {
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
-static struct nouveau_oclass
+static struct nvkm_oclass
nv17_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x17),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fifo_context_ctor,
- .dtor = _nouveau_fifo_context_dtor,
- .init = _nouveau_fifo_context_init,
- .fini = _nouveau_fifo_context_fini,
- .rd32 = _nouveau_fifo_context_rd32,
- .wr32 = _nouveau_fifo_context_wr32,
+ .dtor = _nvkm_fifo_context_dtor,
+ .init = _nvkm_fifo_context_init,
+ .fini = _nvkm_fifo_context_fini,
+ .rd32 = _nvkm_fifo_context_rd32,
+ .wr32 = _nvkm_fifo_context_wr32,
},
};
@@ -151,22 +146,22 @@ nv17_fifo_cclass = {
******************************************************************************/
static int
-nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_instmem_priv *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nouveau_ramht_ref(imem->ramht, &priv->ramht);
- nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
- nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+ nvkm_ramht_ref(imem->ramht, &priv->ramht);
+ nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
+ nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
@@ -179,12 +174,12 @@ nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static int
-nv17_fifo_init(struct nouveau_object *object)
+nv17_fifo_init(struct nvkm_object *object)
{
struct nv04_fifo_priv *priv = (void *)object;
int ret;
- ret = nouveau_fifo_init(&priv->base);
+ ret = nvkm_fifo_init(&priv->base);
if (ret)
return ret;
@@ -193,7 +188,7 @@ nv17_fifo_init(struct nouveau_object *object)
nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((priv->ramht->bits - 9) << 16) |
- (priv->ramht->base.addr >> 8));
+ (priv->ramht->gpuobj.addr >> 8));
nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
@@ -208,13 +203,13 @@ nv17_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
-nv17_fifo_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+nv17_fifo_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(FIFO, 0x17),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv17_fifo_ctor,
.dtor = nv04_fifo_dtor,
.init = nv17_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .fini = _nvkm_fifo_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 9f49c3a24dc6..5bfc96265f3b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -21,20 +21,17 @@
*
* Authors: Ben Skeggs
*/
+#include "nv04.h"
#include <core/client.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
+#include <core/device.h>
#include <core/engctx.h>
#include <core/ramht.h>
-
-#include <subdev/instmem.h>
-#include <subdev/instmem/nv04.h>
#include <subdev/fb.h>
+#include <subdev/instmem/nv04.h>
-#include <engine/fifo.h>
-
-#include "nv04.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
static struct ramfc_desc
nv40_ramfc[] = {
@@ -68,8 +65,8 @@ nv40_ramfc[] = {
******************************************************************************/
static int
-nv40_fifo_object_attach(struct nouveau_object *parent,
- struct nouveau_object *object, u32 handle)
+nv40_fifo_object_attach(struct nvkm_object *parent,
+ struct nvkm_object *object, u32 handle)
{
struct nv04_fifo_priv *priv = (void *)parent->engine;
struct nv04_fifo_chan *chan = (void *)parent;
@@ -99,14 +96,13 @@ nv40_fifo_object_attach(struct nouveau_object *parent,
context |= chid << 23;
mutex_lock(&nv_subdev(priv)->mutex);
- ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+ ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
mutex_unlock(&nv_subdev(priv)->mutex);
return ret;
}
static int
-nv40_fifo_context_attach(struct nouveau_object *parent,
- struct nouveau_object *engctx)
+nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
{
struct nv04_fifo_priv *priv = (void *)parent->engine;
struct nv04_fifo_chan *chan = (void *)parent;
@@ -142,8 +138,8 @@ nv40_fifo_context_attach(struct nouveau_object *parent,
}
static int
-nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
- struct nouveau_object *engctx)
+nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
+ struct nvkm_object *engctx)
{
struct nv04_fifo_priv *priv = (void *)parent->engine;
struct nv04_fifo_chan *chan = (void *)parent;
@@ -178,10 +174,9 @@ nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
}
static int
-nv40_fifo_chan_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv03_channel_dma_v0 v0;
@@ -198,12 +193,12 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x1000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG), &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x1000, args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -230,19 +225,19 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv40_fifo_ofuncs = {
.ctor = nv40_fifo_chan_ctor,
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv40_fifo_sclass[] = {
{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
{}
@@ -252,16 +247,16 @@ nv40_fifo_sclass[] = {
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
-static struct nouveau_oclass
+static struct nvkm_oclass
nv40_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fifo_context_ctor,
- .dtor = _nouveau_fifo_context_dtor,
- .init = _nouveau_fifo_context_init,
- .fini = _nouveau_fifo_context_fini,
- .rd32 = _nouveau_fifo_context_rd32,
- .wr32 = _nouveau_fifo_context_wr32,
+ .dtor = _nvkm_fifo_context_dtor,
+ .init = _nvkm_fifo_context_init,
+ .fini = _nvkm_fifo_context_fini,
+ .rd32 = _nvkm_fifo_context_rd32,
+ .wr32 = _nvkm_fifo_context_wr32,
},
};
@@ -270,22 +265,22 @@ nv40_fifo_cclass = {
******************************************************************************/
static int
-nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_instmem_priv *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nouveau_ramht_ref(imem->ramht, &priv->ramht);
- nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
- nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+ nvkm_ramht_ref(imem->ramht, &priv->ramht);
+ nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
+ nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
@@ -298,13 +293,13 @@ nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static int
-nv40_fifo_init(struct nouveau_object *object)
+nv40_fifo_init(struct nvkm_object *object)
{
struct nv04_fifo_priv *priv = (void *)object;
- struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvkm_fb *pfb = nvkm_fb(object);
int ret;
- ret = nouveau_fifo_init(&priv->base);
+ ret = nvkm_fifo_init(&priv->base);
if (ret)
return ret;
@@ -314,7 +309,7 @@ nv40_fifo_init(struct nouveau_object *object)
nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((priv->ramht->bits - 9) << 16) |
- (priv->ramht->base.addr >> 8));
+ (priv->ramht->gpuobj.addr >> 8));
nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
switch (nv_device(priv)->chipset) {
@@ -349,13 +344,13 @@ nv40_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
-nv40_fifo_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+nv40_fifo_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(FIFO, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv40_fifo_ctor,
.dtor = nv04_fifo_dtor,
.init = nv40_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .fini = _nvkm_fifo_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index 5d1e86bc244c..f25f0fd0655d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -21,21 +21,18 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "nv04.h"
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
-#include <nvif/unpack.h>
-#include <nvif/class.h>
-
-#include <subdev/timer.h>
#include <subdev/bar.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-
-#include "nv04.h"
-#include "nv50.h"
+#include <nvif/class.h>
+#include <nvif/unpack.h>
/*******************************************************************************
* FIFO channel objects
@@ -44,8 +41,8 @@
static void
nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_gpuobj *cur;
+ struct nvkm_bar *bar = nvkm_bar(priv);
+ struct nvkm_gpuobj *cur;
int i, p;
cur = priv->playlist[priv->cur_playlist];
@@ -72,12 +69,11 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
}
static int
-nv50_fifo_context_attach(struct nouveau_object *parent,
- struct nouveau_object *object)
+nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_fifo_base *base = (void *)parent->parent;
- struct nouveau_gpuobj *ectx = (void *)object;
+ struct nvkm_gpuobj *ectx = (void *)object;
u64 limit = ectx->addr + ectx->size - 1;
u64 start = ectx->addr;
u32 addr;
@@ -103,10 +99,10 @@ nv50_fifo_context_attach(struct nouveau_object *parent,
}
static int
-nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
- struct nouveau_object *object)
+nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
+ struct nvkm_object *object)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_fifo_priv *priv = (void *)parent->engine;
struct nv50_fifo_base *base = (void *)parent->parent;
struct nv50_fifo_chan *chan = (void *)parent;
@@ -139,7 +135,7 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
nv_error(priv, "channel %d [%s] unload timeout\n",
- chan->base.chid, nouveau_client_name(chan));
+ chan->base.chid, nvkm_client_name(chan));
if (suspend)
ret = -EBUSY;
}
@@ -159,8 +155,8 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
}
static int
-nv50_fifo_object_attach(struct nouveau_object *parent,
- struct nouveau_object *object, u32 handle)
+nv50_fifo_object_attach(struct nvkm_object *parent,
+ struct nvkm_object *object, u32 handle)
{
struct nv50_fifo_chan *chan = (void *)parent;
u32 context;
@@ -179,26 +175,25 @@ nv50_fifo_object_attach(struct nouveau_object *parent,
return -EINVAL;
}
- return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+ return nvkm_ramht_insert(chan->ramht, 0, handle, context);
}
void
-nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
+nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
{
struct nv50_fifo_chan *chan = (void *)parent;
- nouveau_ramht_remove(chan->ramht, cookie);
+ nvkm_ramht_remove(chan->ramht, cookie);
}
static int
-nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv03_channel_dma_v0 v0;
} *args = data;
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
int ret;
@@ -211,12 +206,12 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG), &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -228,8 +223,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
- &chan->ramht);
+ ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+ &chan->ramht);
if (ret)
return ret;
@@ -246,21 +241,20 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->base.node->offset >> 4));
+ (chan->ramht->gpuobj.node->offset >> 4));
bar->flush(bar);
return 0;
}
static int
-nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nv50_channel_gpfifo_v0 v0;
} *args = data;
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
u64 ioffset, ilength;
@@ -275,12 +269,12 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
} else
return ret;
- ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG), &chan);
+ ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->v0.pushbuf,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -292,8 +286,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
- &chan->ramht);
+ ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+ &chan->ramht);
if (ret)
return ret;
@@ -310,30 +304,30 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->base.node->offset >> 4));
+ (chan->ramht->gpuobj.node->offset >> 4));
bar->flush(bar);
return 0;
}
void
-nv50_fifo_chan_dtor(struct nouveau_object *object)
+nv50_fifo_chan_dtor(struct nvkm_object *object)
{
struct nv50_fifo_chan *chan = (void *)object;
- nouveau_ramht_ref(NULL, &chan->ramht);
- nouveau_fifo_channel_destroy(&chan->base);
+ nvkm_ramht_ref(NULL, &chan->ramht);
+ nvkm_fifo_channel_destroy(&chan->base);
}
static int
-nv50_fifo_chan_init(struct nouveau_object *object)
+nv50_fifo_chan_init(struct nvkm_object *object)
{
struct nv50_fifo_priv *priv = (void *)object->engine;
struct nv50_fifo_base *base = (void *)object->parent;
struct nv50_fifo_chan *chan = (void *)object;
- struct nouveau_gpuobj *ramfc = base->ramfc;
+ struct nvkm_gpuobj *ramfc = base->ramfc;
u32 chid = chan->base.chid;
int ret;
- ret = nouveau_fifo_channel_init(&chan->base);
+ ret = nvkm_fifo_channel_init(&chan->base);
if (ret)
return ret;
@@ -343,7 +337,7 @@ nv50_fifo_chan_init(struct nouveau_object *object)
}
int
-nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_fifo_priv *priv = (void *)object->engine;
struct nv50_fifo_chan *chan = (void *)object;
@@ -354,34 +348,34 @@ nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
nv50_fifo_playlist_update(priv);
nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
- return nouveau_fifo_channel_fini(&chan->base, suspend);
+ return nvkm_fifo_channel_fini(&chan->base, suspend);
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv50_fifo_ofuncs_dma = {
.ctor = nv50_fifo_chan_ctor_dma,
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
nv50_fifo_ofuncs_ind = {
.ctor = nv50_fifo_chan_ctor_ind,
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
- .map = _nouveau_fifo_channel_map,
- .rd32 = _nouveau_fifo_channel_rd32,
- .wr32 = _nouveau_fifo_channel_wr32,
- .ntfy = _nouveau_fifo_channel_ntfy
+ .map = _nvkm_fifo_channel_map,
+ .rd32 = _nvkm_fifo_channel_rd32,
+ .wr32 = _nvkm_fifo_channel_wr32,
+ .ntfy = _nvkm_fifo_channel_ntfy
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv50_fifo_sclass[] = {
{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
@@ -393,36 +387,35 @@ nv50_fifo_sclass[] = {
******************************************************************************/
static int
-nv50_fifo_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_fifo_base *base;
int ret;
- ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
- 0x1000, NVOBJ_FLAG_HEAP, &base);
+ ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+ ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+ ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
- &base->pgd);
+ ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
+ &base->pgd);
if (ret)
return ret;
- ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
if (ret)
return ret;
@@ -430,27 +423,27 @@ nv50_fifo_context_ctor(struct nouveau_object *parent,
}
void
-nv50_fifo_context_dtor(struct nouveau_object *object)
+nv50_fifo_context_dtor(struct nvkm_object *object)
{
struct nv50_fifo_base *base = (void *)object;
- nouveau_vm_ref(NULL, &base->vm, base->pgd);
- nouveau_gpuobj_ref(NULL, &base->pgd);
- nouveau_gpuobj_ref(NULL, &base->eng);
- nouveau_gpuobj_ref(NULL, &base->ramfc);
- nouveau_gpuobj_ref(NULL, &base->cache);
- nouveau_fifo_context_destroy(&base->base);
+ nvkm_vm_ref(NULL, &base->vm, base->pgd);
+ nvkm_gpuobj_ref(NULL, &base->pgd);
+ nvkm_gpuobj_ref(NULL, &base->eng);
+ nvkm_gpuobj_ref(NULL, &base->ramfc);
+ nvkm_gpuobj_ref(NULL, &base->cache);
+ nvkm_fifo_context_destroy(&base->base);
}
-static struct nouveau_oclass
+static struct nvkm_oclass
nv50_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_fifo_context_ctor,
.dtor = nv50_fifo_context_dtor,
- .init = _nouveau_fifo_context_init,
- .fini = _nouveau_fifo_context_fini,
- .rd32 = _nouveau_fifo_context_rd32,
- .wr32 = _nouveau_fifo_context_wr32,
+ .init = _nvkm_fifo_context_init,
+ .fini = _nvkm_fifo_context_fini,
+ .rd32 = _nvkm_fifo_context_rd32,
+ .wr32 = _nvkm_fifo_context_wr32,
},
};
@@ -459,25 +452,25 @@ nv50_fifo_cclass = {
******************************************************************************/
static int
-nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_fifo_priv *priv;
int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+ ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
- &priv->playlist[0]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[0]);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
- &priv->playlist[1]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[1]);
if (ret)
return ret;
@@ -491,23 +484,23 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
void
-nv50_fifo_dtor(struct nouveau_object *object)
+nv50_fifo_dtor(struct nvkm_object *object)
{
struct nv50_fifo_priv *priv = (void *)object;
- nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ nvkm_gpuobj_ref(NULL, &priv->playlist[1]);
+ nvkm_gpuobj_ref(NULL, &priv->playlist[0]);
- nouveau_fifo_destroy(&priv->base);
+ nvkm_fifo_destroy(&priv->base);
}
int
-nv50_fifo_init(struct nouveau_object *object)
+nv50_fifo_init(struct nvkm_object *object)
{
struct nv50_fifo_priv *priv = (void *)object;
int ret, i;
- ret = nouveau_fifo_init(&priv->base);
+ ret = nvkm_fifo_init(&priv->base);
if (ret)
return ret;
@@ -529,13 +522,13 @@ nv50_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
-nv50_fifo_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+nv50_fifo_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(FIFO, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_fifo_ctor,
.dtor = nv50_fifo_dtor,
.init = nv50_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .fini = _nvkm_fifo_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
new file mode 100644
index 000000000000..09ed93c66567
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -0,0 +1,36 @@
+#ifndef __NV50_FIFO_H__
+#define __NV50_FIFO_H__
+#include <engine/fifo.h>
+
+struct nv50_fifo_priv {
+ struct nvkm_fifo base;
+ struct nvkm_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv50_fifo_base {
+ struct nvkm_fifo_base base;
+ struct nvkm_gpuobj *ramfc;
+ struct nvkm_gpuobj *cache;
+ struct nvkm_gpuobj *eng;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
+};
+
+struct nv50_fifo_chan {
+ struct nvkm_fifo_chan base;
+ u32 subc[8];
+ struct nvkm_ramht *ramht;
+};
+
+void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
+
+void nv50_fifo_object_detach(struct nvkm_object *, int);
+void nv50_fifo_chan_dtor(struct nvkm_object *);
+int nv50_fifo_chan_fini(struct nvkm_object *, bool);
+
+void nv50_fifo_context_dtor(struct nvkm_object *);
+
+void nv50_fifo_dtor(struct nvkm_object *);
+int nv50_fifo_init(struct nvkm_object *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
new file mode 100644
index 000000000000..1771d944591b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -0,0 +1,36 @@
+nvkm-y += nvkm/engine/gr/ctxnv40.o
+nvkm-y += nvkm/engine/gr/ctxnv50.o
+nvkm-y += nvkm/engine/gr/ctxgf100.o
+nvkm-y += nvkm/engine/gr/ctxgf108.o
+nvkm-y += nvkm/engine/gr/ctxgf104.o
+nvkm-y += nvkm/engine/gr/ctxgf110.o
+nvkm-y += nvkm/engine/gr/ctxgf117.o
+nvkm-y += nvkm/engine/gr/ctxgf119.o
+nvkm-y += nvkm/engine/gr/ctxgk104.o
+nvkm-y += nvkm/engine/gr/ctxgk20a.o
+nvkm-y += nvkm/engine/gr/ctxgk110.o
+nvkm-y += nvkm/engine/gr/ctxgk110b.o
+nvkm-y += nvkm/engine/gr/ctxgk208.o
+nvkm-y += nvkm/engine/gr/ctxgm107.o
+nvkm-y += nvkm/engine/gr/nv04.o
+nvkm-y += nvkm/engine/gr/nv10.o
+nvkm-y += nvkm/engine/gr/nv20.o
+nvkm-y += nvkm/engine/gr/nv25.o
+nvkm-y += nvkm/engine/gr/nv2a.o
+nvkm-y += nvkm/engine/gr/nv30.o
+nvkm-y += nvkm/engine/gr/nv34.o
+nvkm-y += nvkm/engine/gr/nv35.o
+nvkm-y += nvkm/engine/gr/nv40.o
+nvkm-y += nvkm/engine/gr/nv50.o
+nvkm-y += nvkm/engine/gr/gf100.o
+nvkm-y += nvkm/engine/gr/gf108.o
+nvkm-y += nvkm/engine/gr/gf104.o
+nvkm-y += nvkm/engine/gr/gf110.o
+nvkm-y += nvkm/engine/gr/gf117.o
+nvkm-y += nvkm/engine/gr/gf119.o
+nvkm-y += nvkm/engine/gr/gk104.o
+nvkm-y += nvkm/engine/gr/gk20a.o
+nvkm-y += nvkm/engine/gr/gk110.o
+nvkm-y += nvkm/engine/gr/gk110b.o
+nvkm-y += nvkm/engine/gr/gk208.o
+nvkm-y += nvkm/engine/gr/gm107.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index b8e5fe60a1eb..2e7ec389eea7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -21,15 +21,19 @@
*
* Authors: Ben Skeggs
*/
+#include "ctxgf100.h"
-#include "ctxnvc0.h"
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nvc0_grctx_init_icmd_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
{ 0x000038, 1, 0x01, 0x0fac6881 },
@@ -266,14 +270,14 @@ nvc0_grctx_init_icmd_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvc0_grctx_pack_icmd[] = {
- { nvc0_grctx_init_icmd_0 },
+const struct gf100_gr_pack
+gf100_grctx_pack_icmd[] = {
+ { gf100_grctx_init_icmd_0 },
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_9097_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_9097_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
{ 0x000808, 8, 0x40, 0x00000400 },
@@ -575,8 +579,8 @@ nvc0_grctx_init_9097_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_902d_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_902d_0[] = {
{ 0x000200, 1, 0x04, 0x000000cf },
{ 0x000204, 1, 0x04, 0x00000001 },
{ 0x000208, 1, 0x04, 0x00000020 },
@@ -594,8 +598,8 @@ nvc0_grctx_init_902d_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_9039_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_9039_0[] = {
{ 0x00030c, 3, 0x04, 0x00000000 },
{ 0x000320, 1, 0x04, 0x00000000 },
{ 0x000238, 2, 0x04, 0x00000000 },
@@ -603,8 +607,8 @@ nvc0_grctx_init_9039_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_90c0_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_90c0_0[] = {
{ 0x00270c, 8, 0x20, 0x00000000 },
{ 0x00030c, 1, 0x04, 0x00000001 },
{ 0x001944, 1, 0x04, 0x00000000 },
@@ -621,23 +625,23 @@ nvc0_grctx_init_90c0_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvc0_grctx_pack_mthd[] = {
- { nvc0_grctx_init_9097_0, 0x9097 },
- { nvc0_grctx_init_902d_0, 0x902d },
- { nvc0_grctx_init_9039_0, 0x9039 },
- { nvc0_grctx_init_90c0_0, 0x90c0 },
+const struct gf100_gr_pack
+gf100_grctx_pack_mthd[] = {
+ { gf100_grctx_init_9097_0, 0x9097 },
+ { gf100_grctx_init_902d_0, 0x902d },
+ { gf100_grctx_init_9039_0, 0x9039 },
+ { gf100_grctx_init_90c0_0, 0x90c0 },
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_main_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_main_0[] = {
{ 0x400204, 2, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_fe_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_fe_0[] = {
{ 0x404004, 11, 0x04, 0x00000000 },
{ 0x404044, 1, 0x04, 0x00000000 },
{ 0x404094, 13, 0x04, 0x00000000 },
@@ -657,8 +661,8 @@ nvc0_grctx_init_fe_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_pri_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_pri_0[] = {
{ 0x404404, 14, 0x04, 0x00000000 },
{ 0x404460, 2, 0x04, 0x00000000 },
{ 0x404468, 1, 0x04, 0x00ffffff },
@@ -668,8 +672,8 @@ nvc0_grctx_init_pri_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_memfmt_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_memfmt_0[] = {
{ 0x404604, 1, 0x04, 0x00000015 },
{ 0x404608, 1, 0x04, 0x00000000 },
{ 0x40460c, 1, 0x04, 0x00002e00 },
@@ -690,8 +694,8 @@ nvc0_grctx_init_memfmt_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_ds_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x078000bf },
{ 0x405830, 1, 0x04, 0x02180000 },
{ 0x405834, 2, 0x04, 0x00000000 },
@@ -702,8 +706,8 @@ nvc0_grctx_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_pd_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x000103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
@@ -712,8 +716,8 @@ nvc0_grctx_init_pd_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_rstr2d_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_rstr2d_0[] = {
{ 0x407804, 1, 0x04, 0x00000023 },
{ 0x40780c, 1, 0x04, 0x0a418820 },
{ 0x407810, 1, 0x04, 0x062080e6 },
@@ -725,8 +729,8 @@ nvc0_grctx_init_rstr2d_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_scc_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_scc_0[] = {
{ 0x408000, 2, 0x04, 0x00000000 },
{ 0x408008, 1, 0x04, 0x00000018 },
{ 0x40800c, 2, 0x04, 0x00000000 },
@@ -736,8 +740,8 @@ nvc0_grctx_init_scc_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_be_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x02802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x0003e00d },
@@ -748,28 +752,28 @@ nvc0_grctx_init_be_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvc0_grctx_pack_hub[] = {
- { nvc0_grctx_init_main_0 },
- { nvc0_grctx_init_fe_0 },
- { nvc0_grctx_init_pri_0 },
- { nvc0_grctx_init_memfmt_0 },
- { nvc0_grctx_init_ds_0 },
- { nvc0_grctx_init_pd_0 },
- { nvc0_grctx_init_rstr2d_0 },
- { nvc0_grctx_init_scc_0 },
- { nvc0_grctx_init_be_0 },
+const struct gf100_gr_pack
+gf100_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gf100_grctx_init_fe_0 },
+ { gf100_grctx_init_pri_0 },
+ { gf100_grctx_init_memfmt_0 },
+ { gf100_grctx_init_ds_0 },
+ { gf100_grctx_init_pd_0 },
+ { gf100_grctx_init_rstr2d_0 },
+ { gf100_grctx_init_scc_0 },
+ { gf100_grctx_init_be_0 },
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_gpc_unk_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_gpc_unk_0[] = {
{ 0x418380, 1, 0x04, 0x00000016 },
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_prop_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_prop_0[] = {
{ 0x418400, 1, 0x04, 0x38004e00 },
{ 0x418404, 1, 0x04, 0x71e0ffff },
{ 0x418408, 1, 0x04, 0x00000000 },
@@ -782,8 +786,8 @@ nvc0_grctx_init_prop_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_gpc_unk_1[] = {
+const struct gf100_gr_init
+gf100_grctx_init_gpc_unk_1[] = {
{ 0x418600, 1, 0x04, 0x0000001f },
{ 0x418684, 1, 0x04, 0x0000000f },
{ 0x418700, 1, 0x04, 0x00000002 },
@@ -794,8 +798,8 @@ nvc0_grctx_init_gpc_unk_1[] = {
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_setup_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x0006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -807,8 +811,8 @@ nvc0_grctx_init_setup_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_zcull_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_zcull_0[] = {
{ 0x41891c, 1, 0x04, 0x00ff00ff },
{ 0x418924, 1, 0x04, 0x00000000 },
{ 0x418928, 1, 0x04, 0x00ffff00 },
@@ -816,8 +820,8 @@ nvc0_grctx_init_zcull_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_crstr_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_crstr_0[] = {
{ 0x418b00, 1, 0x04, 0x00000000 },
{ 0x418b08, 1, 0x04, 0x0a418820 },
{ 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -829,8 +833,8 @@ nvc0_grctx_init_crstr_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_gpm_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_gpm_0[] = {
{ 0x418c08, 1, 0x04, 0x00000001 },
{ 0x418c10, 8, 0x04, 0x00000000 },
{ 0x418c80, 1, 0x04, 0x20200004 },
@@ -838,29 +842,29 @@ nvc0_grctx_init_gpm_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_gcc_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_gcc_0[] = {
{ 0x419000, 1, 0x04, 0x00000780 },
{ 0x419004, 2, 0x04, 0x00000000 },
{ 0x419014, 1, 0x04, 0x00000004 },
{}
};
-const struct nvc0_graph_pack
-nvc0_grctx_pack_gpc[] = {
- { nvc0_grctx_init_gpc_unk_0 },
- { nvc0_grctx_init_prop_0 },
- { nvc0_grctx_init_gpc_unk_1 },
- { nvc0_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nvc0_grctx_init_crstr_0 },
- { nvc0_grctx_init_gpm_0 },
- { nvc0_grctx_init_gcc_0 },
+const struct gf100_gr_pack
+gf100_grctx_pack_gpc[] = {
+ { gf100_grctx_init_gpc_unk_0 },
+ { gf100_grctx_init_prop_0 },
+ { gf100_grctx_init_gpc_unk_1 },
+ { gf100_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gf100_grctx_init_crstr_0 },
+ { gf100_grctx_init_gpm_0 },
+ { gf100_grctx_init_gcc_0 },
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_zcullr_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_zcullr_0[] = {
{ 0x418a00, 3, 0x04, 0x00000000 },
{ 0x418a0c, 1, 0x04, 0x00010000 },
{ 0x418a10, 3, 0x04, 0x00000000 },
@@ -888,14 +892,14 @@ nvc0_grctx_init_zcullr_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvc0_grctx_pack_zcull[] = {
- { nvc0_grctx_init_zcullr_0 },
+const struct gf100_gr_pack
+gf100_grctx_pack_zcull[] = {
+ { gf100_grctx_init_zcullr_0 },
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_pe_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_pe_0[] = {
{ 0x419818, 1, 0x04, 0x00000000 },
{ 0x41983c, 1, 0x04, 0x00038bc7 },
{ 0x419848, 1, 0x04, 0x00000000 },
@@ -904,8 +908,8 @@ nvc0_grctx_init_pe_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_tex_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000001f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000023 },
@@ -915,8 +919,8 @@ nvc0_grctx_init_tex_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_wwdx_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_wwdx_0[] = {
{ 0x419b00, 1, 0x04, 0x0a418820 },
{ 0x419b04, 1, 0x04, 0x062080e6 },
{ 0x419b08, 1, 0x04, 0x020398a4 },
@@ -929,8 +933,8 @@ nvc0_grctx_init_wwdx_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_mpc_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x00000002 },
{ 0x419c04, 1, 0x04, 0x00000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
@@ -938,23 +942,23 @@ nvc0_grctx_init_mpc_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_l1c_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_l1c_0[] = {
{ 0x419cb0, 1, 0x04, 0x00060048 },
{ 0x419ce8, 1, 0x04, 0x00000000 },
{ 0x419cf4, 1, 0x04, 0x00000183 },
{}
};
-const struct nvc0_graph_init
-nvc0_grctx_init_tpccs_0[] = {
+const struct gf100_gr_init
+gf100_grctx_init_tpccs_0[] = {
{ 0x419d20, 1, 0x04, 0x02180000 },
{ 0x419d24, 1, 0x04, 0x00001fff },
{}
};
-static const struct nvc0_graph_init
-nvc0_grctx_init_sm_0[] = {
+static const struct gf100_gr_init
+gf100_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00000002 },
{ 0x419e44, 1, 0x04, 0x001beff2 },
@@ -966,15 +970,15 @@ nvc0_grctx_init_sm_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvc0_grctx_pack_tpc[] = {
- { nvc0_grctx_init_pe_0 },
- { nvc0_grctx_init_tex_0 },
- { nvc0_grctx_init_wwdx_0 },
- { nvc0_grctx_init_mpc_0 },
- { nvc0_grctx_init_l1c_0 },
- { nvc0_grctx_init_tpccs_0 },
- { nvc0_grctx_init_sm_0 },
+const struct gf100_gr_pack
+gf100_grctx_pack_tpc[] = {
+ { gf100_grctx_init_pe_0 },
+ { gf100_grctx_init_tex_0 },
+ { gf100_grctx_init_wwdx_0 },
+ { gf100_grctx_init_mpc_0 },
+ { gf100_grctx_init_l1c_0 },
+ { gf100_grctx_init_tpccs_0 },
+ { gf100_grctx_init_sm_0 },
{}
};
@@ -983,7 +987,7 @@ nvc0_grctx_pack_tpc[] = {
******************************************************************************/
int
-nvc0_grctx_mmio_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
+gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, u32 access)
{
if (info->data) {
info->buffer[info->buffer_nr] = round_up(info->addr, align);
@@ -998,8 +1002,8 @@ nvc0_grctx_mmio_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
}
void
-nvc0_grctx_mmio_item(struct nvc0_grctx *info, u32 addr, u32 data,
- int shift, int buffer)
+gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
+ int shift, int buffer)
{
if (info->data) {
if (shift >= 0) {
@@ -1021,9 +1025,9 @@ nvc0_grctx_mmio_item(struct nvc0_grctx *info, u32 addr, u32 data,
}
void
-nvc0_grctx_generate_bundle(struct nvc0_grctx *info)
+gf100_grctx_generate_bundle(struct gf100_grctx *info)
{
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
@@ -1034,9 +1038,9 @@ nvc0_grctx_generate_bundle(struct nvc0_grctx *info)
}
void
-nvc0_grctx_generate_pagepool(struct nvc0_grctx *info)
+gf100_grctx_generate_pagepool(struct gf100_grctx *info)
{
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
@@ -1047,10 +1051,10 @@ nvc0_grctx_generate_pagepool(struct nvc0_grctx *info)
}
void
-nvc0_grctx_generate_attrib(struct nvc0_grctx *info)
+gf100_grctx_generate_attrib(struct gf100_grctx *info)
{
- struct nvc0_graph_priv *priv = info->priv;
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ struct gf100_gr_priv *priv = info->priv;
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
const u32 attrib = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
@@ -1074,12 +1078,12 @@ nvc0_grctx_generate_attrib(struct nvc0_grctx *info)
}
void
-nvc0_grctx_generate_unkn(struct nvc0_graph_priv *priv)
+gf100_grctx_generate_unkn(struct gf100_gr_priv *priv)
{
}
void
-nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *priv)
+gf100_grctx_generate_tpcid(struct gf100_gr_priv *priv)
{
int gpc, tpc, id;
@@ -1100,7 +1104,7 @@ nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *priv)
}
void
-nvc0_grctx_generate_r406028(struct nvc0_graph_priv *priv)
+gf100_grctx_generate_r406028(struct gf100_gr_priv *priv)
{
u32 tmp[GPC_MAX / 8] = {}, i = 0;
for (i = 0; i < priv->gpc_nr; i++)
@@ -1112,7 +1116,7 @@ nvc0_grctx_generate_r406028(struct nvc0_graph_priv *priv)
}
void
-nvc0_grctx_generate_r4060a8(struct nvc0_graph_priv *priv)
+gf100_grctx_generate_r4060a8(struct gf100_gr_priv *priv)
{
u8 tpcnr[GPC_MAX], data[TPC_MAX];
int gpc, tpc, i;
@@ -1134,7 +1138,7 @@ nvc0_grctx_generate_r4060a8(struct nvc0_graph_priv *priv)
}
void
-nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *priv)
+gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
{
u32 data[6] = {}, data2[2] = {};
u8 tpcnr[GPC_MAX];
@@ -1192,7 +1196,7 @@ nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *priv)
}
void
-nvc0_grctx_generate_r406800(struct nvc0_graph_priv *priv)
+gf100_grctx_generate_r406800(struct gf100_gr_priv *priv)
{
u64 tpc_mask = 0, tpc_set = 0;
u8 tpcnr[GPC_MAX];
@@ -1225,17 +1229,17 @@ nvc0_grctx_generate_r406800(struct nvc0_graph_priv *priv)
}
void
-nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+gf100_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
{
- struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
- nvc0_graph_mmio(priv, oclass->hub);
- nvc0_graph_mmio(priv, oclass->gpc);
- nvc0_graph_mmio(priv, oclass->zcull);
- nvc0_graph_mmio(priv, oclass->tpc);
- nvc0_graph_mmio(priv, oclass->ppc);
+ gf100_gr_mmio(priv, oclass->hub);
+ gf100_gr_mmio(priv, oclass->gpc);
+ gf100_gr_mmio(priv, oclass->zcull);
+ gf100_gr_mmio(priv, oclass->tpc);
+ gf100_gr_mmio(priv, oclass->ppc);
nv_wr32(priv, 0x404154, 0x00000000);
@@ -1244,32 +1248,32 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
oclass->attrib(info);
oclass->unkn(priv);
- nvc0_grctx_generate_tpcid(priv);
- nvc0_grctx_generate_r406028(priv);
- nvc0_grctx_generate_r4060a8(priv);
- nvc0_grctx_generate_r418bb8(priv);
- nvc0_grctx_generate_r406800(priv);
+ gf100_grctx_generate_tpcid(priv);
+ gf100_grctx_generate_r406028(priv);
+ gf100_grctx_generate_r4060a8(priv);
+ gf100_grctx_generate_r418bb8(priv);
+ gf100_grctx_generate_r406800(priv);
- nvc0_graph_icmd(priv, oclass->icmd);
+ gf100_gr_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
- nvc0_graph_mthd(priv, oclass->mthd);
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
+ gf100_gr_mthd(priv, oclass->mthd);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
}
int
-nvc0_grctx_generate(struct nvc0_graph_priv *priv)
+gf100_grctx_generate(struct gf100_gr_priv *priv)
{
- struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_gpuobj *chan;
- struct nvc0_grctx info;
+ struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct nvkm_bar *bar = nvkm_bar(priv);
+ struct nvkm_gpuobj *chan;
+ struct gf100_grctx info;
int ret, i;
/* allocate memory to for a "channel", which we'll use to generate
* the default context values
*/
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x80000 + priv->size,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x80000 + priv->size,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &chan);
if (ret) {
nv_error(priv, "failed to allocate channel memory, %d\n", ret);
return ret;
@@ -1353,34 +1357,34 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
}
done:
- nouveau_gpuobj_ref(NULL, &chan);
+ nvkm_gpuobj_ref(NULL, &chan);
return ret;
}
-struct nouveau_oclass *
-nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .unkn = nvc0_grctx_generate_unkn,
- .hub = nvc0_grctx_pack_hub,
- .gpc = nvc0_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nvc0_grctx_pack_tpc,
- .icmd = nvc0_grctx_pack_icmd,
- .mthd = nvc0_grctx_pack_mthd,
- .bundle = nvc0_grctx_generate_bundle,
+ .main = gf100_grctx_generate_main,
+ .unkn = gf100_grctx_generate_unkn,
+ .hub = gf100_grctx_pack_hub,
+ .gpc = gf100_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gf100_grctx_pack_tpc,
+ .icmd = gf100_grctx_pack_icmd,
+ .mthd = gf100_grctx_pack_mthd,
+ .bundle = gf100_grctx_generate_bundle,
.bundle_size = 0x1800,
- .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool = gf100_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvc0_grctx_generate_attrib,
+ .attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
new file mode 100644
index 000000000000..1166b1aa1525
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -0,0 +1,199 @@
+#ifndef __NVKM_GRCTX_NVC0_H__
+#define __NVKM_GRCTX_NVC0_H__
+#include "gf100.h"
+
+struct gf100_grctx {
+ struct gf100_gr_priv *priv;
+ struct gf100_gr_data *data;
+ struct gf100_gr_mmio *mmio;
+ int buffer_nr;
+ u64 buffer[4];
+ u64 addr;
+};
+
+int gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, u32 access);
+void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int);
+
+#define mmio_vram(a,b,c,d) gf100_grctx_mmio_data((a), (b), (c), (d))
+#define mmio_refn(a,b,c,d,e) gf100_grctx_mmio_item((a), (b), (c), (d), (e))
+#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
+#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
+
+struct gf100_grctx_oclass {
+ struct nvkm_oclass base;
+ /* main context generation function */
+ void (*main)(struct gf100_gr_priv *, struct gf100_grctx *);
+ /* context-specific modify-on-first-load list generation function */
+ void (*unkn)(struct gf100_gr_priv *);
+ /* mmio context data */
+ const struct gf100_gr_pack *hub;
+ const struct gf100_gr_pack *gpc;
+ const struct gf100_gr_pack *zcull;
+ const struct gf100_gr_pack *tpc;
+ const struct gf100_gr_pack *ppc;
+ /* indirect context data, generated with icmds/mthds */
+ const struct gf100_gr_pack *icmd;
+ const struct gf100_gr_pack *mthd;
+ /* bundle circular buffer */
+ void (*bundle)(struct gf100_grctx *);
+ u32 bundle_size;
+ u32 bundle_min_gpm_fifo_depth;
+ u32 bundle_token_limit;
+ /* pagepool */
+ void (*pagepool)(struct gf100_grctx *);
+ u32 pagepool_size;
+ /* attribute(/alpha) circular buffer */
+ void (*attrib)(struct gf100_grctx *);
+ u32 attrib_nr_max;
+ u32 attrib_nr;
+ u32 alpha_nr_max;
+ u32 alpha_nr;
+};
+
+static inline const struct gf100_grctx_oclass *
+gf100_grctx_impl(struct gf100_gr_priv *priv)
+{
+ return (void *)nv_engine(priv)->cclass;
+}
+
+extern struct nvkm_oclass *gf100_grctx_oclass;
+int gf100_grctx_generate(struct gf100_gr_priv *);
+void gf100_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+void gf100_grctx_generate_bundle(struct gf100_grctx *);
+void gf100_grctx_generate_pagepool(struct gf100_grctx *);
+void gf100_grctx_generate_attrib(struct gf100_grctx *);
+void gf100_grctx_generate_unkn(struct gf100_gr_priv *);
+void gf100_grctx_generate_tpcid(struct gf100_gr_priv *);
+void gf100_grctx_generate_r406028(struct gf100_gr_priv *);
+void gf100_grctx_generate_r4060a8(struct gf100_gr_priv *);
+void gf100_grctx_generate_r418bb8(struct gf100_gr_priv *);
+void gf100_grctx_generate_r406800(struct gf100_gr_priv *);
+
+extern struct nvkm_oclass *gf108_grctx_oclass;
+void gf108_grctx_generate_attrib(struct gf100_grctx *);
+void gf108_grctx_generate_unkn(struct gf100_gr_priv *);
+
+extern struct nvkm_oclass *gf104_grctx_oclass;
+extern struct nvkm_oclass *gf110_grctx_oclass;
+
+extern struct nvkm_oclass *gf117_grctx_oclass;
+void gf117_grctx_generate_attrib(struct gf100_grctx *);
+
+extern struct nvkm_oclass *gf119_grctx_oclass;
+
+extern struct nvkm_oclass *gk104_grctx_oclass;
+extern struct nvkm_oclass *gk20a_grctx_oclass;
+void gk104_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+void gk104_grctx_generate_bundle(struct gf100_grctx *);
+void gk104_grctx_generate_pagepool(struct gf100_grctx *);
+void gk104_grctx_generate_unkn(struct gf100_gr_priv *);
+void gk104_grctx_generate_r418bb8(struct gf100_gr_priv *);
+
+extern struct nvkm_oclass *gk110_grctx_oclass;
+extern struct nvkm_oclass *gk110b_grctx_oclass;
+extern struct nvkm_oclass *gk208_grctx_oclass;
+extern struct nvkm_oclass *gm107_grctx_oclass;
+
+/* context init value lists */
+
+extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
+
+extern const struct gf100_gr_pack gf100_grctx_pack_mthd[];
+extern const struct gf100_gr_init gf100_grctx_init_902d_0[];
+extern const struct gf100_gr_init gf100_grctx_init_9039_0[];
+extern const struct gf100_gr_init gf100_grctx_init_90c0_0[];
+
+extern const struct gf100_gr_pack gf100_grctx_pack_hub[];
+extern const struct gf100_gr_init gf100_grctx_init_main_0[];
+extern const struct gf100_gr_init gf100_grctx_init_fe_0[];
+extern const struct gf100_gr_init gf100_grctx_init_pri_0[];
+extern const struct gf100_gr_init gf100_grctx_init_memfmt_0[];
+extern const struct gf100_gr_init gf100_grctx_init_rstr2d_0[];
+extern const struct gf100_gr_init gf100_grctx_init_scc_0[];
+
+extern const struct gf100_gr_pack gf100_grctx_pack_gpc[];
+extern const struct gf100_gr_init gf100_grctx_init_gpc_unk_0[];
+extern const struct gf100_gr_init gf100_grctx_init_prop_0[];
+extern const struct gf100_gr_init gf100_grctx_init_gpc_unk_1[];
+extern const struct gf100_gr_init gf100_grctx_init_zcull_0[];
+extern const struct gf100_gr_init gf100_grctx_init_crstr_0[];
+extern const struct gf100_gr_init gf100_grctx_init_gpm_0[];
+extern const struct gf100_gr_init gf100_grctx_init_gcc_0[];
+
+extern const struct gf100_gr_pack gf100_grctx_pack_zcull[];
+
+extern const struct gf100_gr_pack gf100_grctx_pack_tpc[];
+extern const struct gf100_gr_init gf100_grctx_init_pe_0[];
+extern const struct gf100_gr_init gf100_grctx_init_wwdx_0[];
+extern const struct gf100_gr_init gf100_grctx_init_mpc_0[];
+extern const struct gf100_gr_init gf100_grctx_init_tpccs_0[];
+
+extern const struct gf100_gr_init gf104_grctx_init_tex_0[];
+extern const struct gf100_gr_init gf104_grctx_init_l1c_0[];
+extern const struct gf100_gr_init gf104_grctx_init_sm_0[];
+
+extern const struct gf100_gr_init gf108_grctx_init_9097_0[];
+
+extern const struct gf100_gr_init gf108_grctx_init_gpm_0[];
+
+extern const struct gf100_gr_init gf108_grctx_init_pe_0[];
+extern const struct gf100_gr_init gf108_grctx_init_wwdx_0[];
+extern const struct gf100_gr_init gf108_grctx_init_tpccs_0[];
+
+extern const struct gf100_gr_init gf110_grctx_init_9197_0[];
+extern const struct gf100_gr_init gf110_grctx_init_9297_0[];
+
+extern const struct gf100_gr_pack gf119_grctx_pack_icmd[];
+
+extern const struct gf100_gr_pack gf119_grctx_pack_mthd[];
+
+extern const struct gf100_gr_init gf119_grctx_init_fe_0[];
+extern const struct gf100_gr_init gf119_grctx_init_be_0[];
+
+extern const struct gf100_gr_init gf119_grctx_init_prop_0[];
+extern const struct gf100_gr_init gf119_grctx_init_gpc_unk_1[];
+extern const struct gf100_gr_init gf119_grctx_init_crstr_0[];
+
+extern const struct gf100_gr_init gf119_grctx_init_sm_0[];
+
+extern const struct gf100_gr_init gf117_grctx_init_pe_0[];
+
+extern const struct gf100_gr_init gf117_grctx_init_wwdx_0[];
+
+extern const struct gf100_gr_init gk104_grctx_init_memfmt_0[];
+extern const struct gf100_gr_init gk104_grctx_init_ds_0[];
+extern const struct gf100_gr_init gk104_grctx_init_scc_0[];
+
+extern const struct gf100_gr_init gk104_grctx_init_gpm_0[];
+
+extern const struct gf100_gr_init gk104_grctx_init_pes_0[];
+
+extern const struct gf100_gr_pack gk104_grctx_pack_hub[];
+extern const struct gf100_gr_pack gk104_grctx_pack_gpc[];
+extern const struct gf100_gr_pack gk104_grctx_pack_tpc[];
+extern const struct gf100_gr_pack gk104_grctx_pack_ppc[];
+extern const struct gf100_gr_pack gk104_grctx_pack_icmd[];
+extern const struct gf100_gr_init gk104_grctx_init_a097_0[];
+
+extern const struct gf100_gr_pack gk110_grctx_pack_icmd[];
+
+extern const struct gf100_gr_pack gk110_grctx_pack_mthd[];
+
+extern const struct gf100_gr_pack gk110_grctx_pack_hub[];
+extern const struct gf100_gr_init gk110_grctx_init_pri_0[];
+extern const struct gf100_gr_init gk110_grctx_init_cwd_0[];
+
+extern const struct gf100_gr_pack gk110_grctx_pack_gpc[];
+extern const struct gf100_gr_init gk110_grctx_init_gpc_unk_2[];
+
+extern const struct gf100_gr_init gk110_grctx_init_tex_0[];
+extern const struct gf100_gr_init gk110_grctx_init_mpc_0[];
+extern const struct gf100_gr_init gk110_grctx_init_l1c_0[];
+
+extern const struct gf100_gr_pack gk110_grctx_pack_ppc[];
+
+extern const struct gf100_gr_init gk208_grctx_init_rstr2d_0[];
+
+extern const struct gf100_gr_init gk208_grctx_init_prop_0[];
+extern const struct gf100_gr_init gk208_grctx_init_crstr_0[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index 41705c60cc47..c5a8d55e2cac 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "ctxnvc0.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-const struct nvc0_graph_init
-nvc4_grctx_init_tex_0[] = {
+const struct gf100_gr_init
+gf104_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000001f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000023 },
@@ -42,16 +41,16 @@ nvc4_grctx_init_tex_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc4_grctx_init_l1c_0[] = {
+const struct gf100_gr_init
+gf104_grctx_init_l1c_0[] = {
{ 0x419cb0, 1, 0x04, 0x00020048 },
{ 0x419ce8, 1, 0x04, 0x00000000 },
{ 0x419cf4, 1, 0x04, 0x00000183 },
{}
};
-const struct nvc0_graph_init
-nvc4_grctx_init_sm_0[] = {
+const struct gf100_gr_init
+gf104_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00000002 },
{ 0x419e44, 1, 0x04, 0x001beff2 },
@@ -64,15 +63,15 @@ nvc4_grctx_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc4_grctx_pack_tpc[] = {
- { nvc0_grctx_init_pe_0 },
- { nvc4_grctx_init_tex_0 },
- { nvc0_grctx_init_wwdx_0 },
- { nvc0_grctx_init_mpc_0 },
- { nvc4_grctx_init_l1c_0 },
- { nvc0_grctx_init_tpccs_0 },
- { nvc4_grctx_init_sm_0 },
+static const struct gf100_gr_pack
+gf104_grctx_pack_tpc[] = {
+ { gf100_grctx_init_pe_0 },
+ { gf104_grctx_init_tex_0 },
+ { gf100_grctx_init_wwdx_0 },
+ { gf100_grctx_init_mpc_0 },
+ { gf104_grctx_init_l1c_0 },
+ { gf100_grctx_init_tpccs_0 },
+ { gf104_grctx_init_sm_0 },
{}
};
@@ -80,30 +79,30 @@ nvc4_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nouveau_oclass *
-nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc3),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .unkn = nvc0_grctx_generate_unkn,
- .hub = nvc0_grctx_pack_hub,
- .gpc = nvc0_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nvc4_grctx_pack_tpc,
- .icmd = nvc0_grctx_pack_icmd,
- .mthd = nvc0_grctx_pack_mthd,
- .bundle = nvc0_grctx_generate_bundle,
+ .main = gf100_grctx_generate_main,
+ .unkn = gf100_grctx_generate_unkn,
+ .hub = gf100_grctx_pack_hub,
+ .gpc = gf100_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gf104_grctx_pack_tpc,
+ .icmd = gf100_grctx_pack_icmd,
+ .mthd = gf100_grctx_pack_mthd,
+ .bundle = gf100_grctx_generate_bundle,
.bundle_size = 0x1800,
- .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool = gf100_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvc0_grctx_generate_attrib,
+ .attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index c6ba8fed18f1..87c844a5f34b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -21,15 +21,16 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "ctxgf100.h"
-#include "ctxnvc0.h"
+#include <subdev/fb.h>
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nvc1_grctx_init_icmd_0[] = {
+static const struct gf100_gr_init
+gf108_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
{ 0x000038, 1, 0x01, 0x0fac6881 },
@@ -267,14 +268,14 @@ nvc1_grctx_init_icmd_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc1_grctx_pack_icmd[] = {
- { nvc1_grctx_init_icmd_0 },
+static const struct gf100_gr_pack
+gf108_grctx_pack_icmd[] = {
+ { gf108_grctx_init_icmd_0 },
{}
};
-const struct nvc0_graph_init
-nvc1_grctx_init_9097_0[] = {
+const struct gf100_gr_init
+gf108_grctx_init_9097_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
{ 0x000808, 8, 0x40, 0x00000400 },
@@ -575,25 +576,25 @@ nvc1_grctx_init_9097_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc1_grctx_init_9197_0[] = {
+static const struct gf100_gr_init
+gf108_grctx_init_9197_0[] = {
{ 0x003400, 128, 0x04, 0x00000000 },
{ 0x0002e4, 1, 0x04, 0x0000b001 },
{}
};
-static const struct nvc0_graph_pack
-nvc1_grctx_pack_mthd[] = {
- { nvc1_grctx_init_9097_0, 0x9097 },
- { nvc1_grctx_init_9197_0, 0x9197 },
- { nvc0_grctx_init_902d_0, 0x902d },
- { nvc0_grctx_init_9039_0, 0x9039 },
- { nvc0_grctx_init_90c0_0, 0x90c0 },
+static const struct gf100_gr_pack
+gf108_grctx_pack_mthd[] = {
+ { gf108_grctx_init_9097_0, 0x9097 },
+ { gf108_grctx_init_9197_0, 0x9197 },
+ { gf100_grctx_init_902d_0, 0x902d },
+ { gf100_grctx_init_9039_0, 0x9039 },
+ { gf100_grctx_init_90c0_0, 0x90c0 },
{}
};
-static const struct nvc0_graph_init
-nvc1_grctx_init_ds_0[] = {
+static const struct gf100_gr_init
+gf108_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180218 },
{ 0x405834, 2, 0x04, 0x00000000 },
@@ -604,8 +605,8 @@ nvc1_grctx_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc1_grctx_init_pd_0[] = {
+static const struct gf100_gr_init
+gf108_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x000103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
@@ -616,8 +617,8 @@ nvc1_grctx_init_pd_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc1_grctx_init_be_0[] = {
+static const struct gf100_gr_init
+gf108_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x02802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1003e005 },
@@ -628,22 +629,22 @@ nvc1_grctx_init_be_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc1_grctx_pack_hub[] = {
- { nvc0_grctx_init_main_0 },
- { nvc0_grctx_init_fe_0 },
- { nvc0_grctx_init_pri_0 },
- { nvc0_grctx_init_memfmt_0 },
- { nvc1_grctx_init_ds_0 },
- { nvc1_grctx_init_pd_0 },
- { nvc0_grctx_init_rstr2d_0 },
- { nvc0_grctx_init_scc_0 },
- { nvc1_grctx_init_be_0 },
+static const struct gf100_gr_pack
+gf108_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gf100_grctx_init_fe_0 },
+ { gf100_grctx_init_pri_0 },
+ { gf100_grctx_init_memfmt_0 },
+ { gf108_grctx_init_ds_0 },
+ { gf108_grctx_init_pd_0 },
+ { gf100_grctx_init_rstr2d_0 },
+ { gf100_grctx_init_scc_0 },
+ { gf108_grctx_init_be_0 },
{}
};
-static const struct nvc0_graph_init
-nvc1_grctx_init_setup_0[] = {
+static const struct gf100_gr_init
+gf108_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x0006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -655,8 +656,8 @@ nvc1_grctx_init_setup_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc1_grctx_init_gpm_0[] = {
+const struct gf100_gr_init
+gf108_grctx_init_gpm_0[] = {
{ 0x418c08, 1, 0x04, 0x00000001 },
{ 0x418c10, 8, 0x04, 0x00000000 },
{ 0x418c6c, 1, 0x04, 0x00000001 },
@@ -665,21 +666,21 @@ nvc1_grctx_init_gpm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc1_grctx_pack_gpc[] = {
- { nvc0_grctx_init_gpc_unk_0 },
- { nvc0_grctx_init_prop_0 },
- { nvc0_grctx_init_gpc_unk_1 },
- { nvc1_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nvc0_grctx_init_crstr_0 },
- { nvc1_grctx_init_gpm_0 },
- { nvc0_grctx_init_gcc_0 },
+static const struct gf100_gr_pack
+gf108_grctx_pack_gpc[] = {
+ { gf100_grctx_init_gpc_unk_0 },
+ { gf100_grctx_init_prop_0 },
+ { gf100_grctx_init_gpc_unk_1 },
+ { gf108_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gf100_grctx_init_crstr_0 },
+ { gf108_grctx_init_gpm_0 },
+ { gf100_grctx_init_gcc_0 },
{}
};
-const struct nvc0_graph_init
-nvc1_grctx_init_pe_0[] = {
+const struct gf100_gr_init
+gf108_grctx_init_pe_0[] = {
{ 0x419818, 1, 0x04, 0x00000000 },
{ 0x41983c, 1, 0x04, 0x00038bc7 },
{ 0x419848, 1, 0x04, 0x00000000 },
@@ -688,8 +689,8 @@ nvc1_grctx_init_pe_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc1_grctx_init_wwdx_0[] = {
+const struct gf100_gr_init
+gf108_grctx_init_wwdx_0[] = {
{ 0x419b00, 1, 0x04, 0x0a418820 },
{ 0x419b04, 1, 0x04, 0x062080e6 },
{ 0x419b08, 1, 0x04, 0x020398a4 },
@@ -702,23 +703,23 @@ nvc1_grctx_init_wwdx_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc1_grctx_init_tpccs_0[] = {
+const struct gf100_gr_init
+gf108_grctx_init_tpccs_0[] = {
{ 0x419d20, 1, 0x04, 0x12180000 },
{ 0x419d24, 1, 0x04, 0x00001fff },
{ 0x419d44, 1, 0x04, 0x02180218 },
{}
};
-static const struct nvc0_graph_pack
-nvc1_grctx_pack_tpc[] = {
- { nvc1_grctx_init_pe_0 },
- { nvc4_grctx_init_tex_0 },
- { nvc1_grctx_init_wwdx_0 },
- { nvc0_grctx_init_mpc_0 },
- { nvc4_grctx_init_l1c_0 },
- { nvc1_grctx_init_tpccs_0 },
- { nvc4_grctx_init_sm_0 },
+static const struct gf100_gr_pack
+gf108_grctx_pack_tpc[] = {
+ { gf108_grctx_init_pe_0 },
+ { gf104_grctx_init_tex_0 },
+ { gf108_grctx_init_wwdx_0 },
+ { gf100_grctx_init_mpc_0 },
+ { gf104_grctx_init_l1c_0 },
+ { gf108_grctx_init_tpccs_0 },
+ { gf104_grctx_init_sm_0 },
{}
};
@@ -727,10 +728,10 @@ nvc1_grctx_pack_tpc[] = {
******************************************************************************/
void
-nvc1_grctx_generate_attrib(struct nvc0_grctx *info)
+gf108_grctx_generate_attrib(struct gf100_grctx *info)
{
- struct nvc0_graph_priv *priv = info->priv;
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ struct gf100_gr_priv *priv = info->priv;
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
const u32 alpha = impl->alpha_nr;
const u32 beta = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
@@ -764,7 +765,7 @@ nvc1_grctx_generate_attrib(struct nvc0_grctx *info)
}
void
-nvc1_grctx_generate_unkn(struct nvc0_graph_priv *priv)
+gf108_grctx_generate_unkn(struct gf100_gr_priv *priv)
{
nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
@@ -774,30 +775,30 @@ nvc1_grctx_generate_unkn(struct nvc0_graph_priv *priv)
nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
}
-struct nouveau_oclass *
-nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc1),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .unkn = nvc1_grctx_generate_unkn,
- .hub = nvc1_grctx_pack_hub,
- .gpc = nvc1_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nvc1_grctx_pack_tpc,
- .icmd = nvc1_grctx_pack_icmd,
- .mthd = nvc1_grctx_pack_mthd,
- .bundle = nvc0_grctx_generate_bundle,
+ .main = gf100_grctx_generate_main,
+ .unkn = gf108_grctx_generate_unkn,
+ .hub = gf108_grctx_pack_hub,
+ .gpc = gf108_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gf108_grctx_pack_tpc,
+ .icmd = gf108_grctx_pack_icmd,
+ .mthd = gf108_grctx_pack_mthd,
+ .bundle = gf100_grctx_generate_bundle,
.bundle_size = 0x1800,
- .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool = gf100_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvc1_grctx_generate_attrib,
+ .attrib = gf108_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
.alpha_nr_max = 0x324,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index 8f804cd8f9c7..b3acd931b978 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "ctxnvc0.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nvc8_grctx_init_icmd_0[] = {
+static const struct gf100_gr_init
+gf110_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
{ 0x000038, 1, 0x01, 0x0fac6881 },
@@ -268,20 +267,20 @@ nvc8_grctx_init_icmd_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc8_grctx_pack_icmd[] = {
- { nvc8_grctx_init_icmd_0 },
+static const struct gf100_gr_pack
+gf110_grctx_pack_icmd[] = {
+ { gf110_grctx_init_icmd_0 },
{}
};
-const struct nvc0_graph_init
-nvc8_grctx_init_9197_0[] = {
+const struct gf100_gr_init
+gf110_grctx_init_9197_0[] = {
{ 0x0002e4, 1, 0x04, 0x0000b001 },
{}
};
-const struct nvc0_graph_init
-nvc8_grctx_init_9297_0[] = {
+const struct gf100_gr_init
+gf110_grctx_init_9297_0[] = {
{ 0x003400, 128, 0x04, 0x00000000 },
{ 0x00036c, 2, 0x04, 0x00000000 },
{ 0x0007a4, 2, 0x04, 0x00000000 },
@@ -290,19 +289,19 @@ nvc8_grctx_init_9297_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc8_grctx_pack_mthd[] = {
- { nvc1_grctx_init_9097_0, 0x9097 },
- { nvc8_grctx_init_9197_0, 0x9197 },
- { nvc8_grctx_init_9297_0, 0x9297 },
- { nvc0_grctx_init_902d_0, 0x902d },
- { nvc0_grctx_init_9039_0, 0x9039 },
- { nvc0_grctx_init_90c0_0, 0x90c0 },
+static const struct gf100_gr_pack
+gf110_grctx_pack_mthd[] = {
+ { gf108_grctx_init_9097_0, 0x9097 },
+ { gf110_grctx_init_9197_0, 0x9197 },
+ { gf110_grctx_init_9297_0, 0x9297 },
+ { gf100_grctx_init_902d_0, 0x902d },
+ { gf100_grctx_init_9039_0, 0x9039 },
+ { gf100_grctx_init_90c0_0, 0x90c0 },
{}
};
-static const struct nvc0_graph_init
-nvc8_grctx_init_setup_0[] = {
+static const struct gf100_gr_init
+gf110_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x0006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -314,16 +313,16 @@ nvc8_grctx_init_setup_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc8_grctx_pack_gpc[] = {
- { nvc0_grctx_init_gpc_unk_0 },
- { nvc0_grctx_init_prop_0 },
- { nvc0_grctx_init_gpc_unk_1 },
- { nvc8_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nvc0_grctx_init_crstr_0 },
- { nvc0_grctx_init_gpm_0 },
- { nvc0_grctx_init_gcc_0 },
+static const struct gf100_gr_pack
+gf110_grctx_pack_gpc[] = {
+ { gf100_grctx_init_gpc_unk_0 },
+ { gf100_grctx_init_prop_0 },
+ { gf100_grctx_init_gpc_unk_1 },
+ { gf110_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gf100_grctx_init_crstr_0 },
+ { gf100_grctx_init_gpm_0 },
+ { gf100_grctx_init_gcc_0 },
{}
};
@@ -331,30 +330,30 @@ nvc8_grctx_pack_gpc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nouveau_oclass *
-nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc8),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .unkn = nvc0_grctx_generate_unkn,
- .hub = nvc0_grctx_pack_hub,
- .gpc = nvc8_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nvc0_grctx_pack_tpc,
- .icmd = nvc8_grctx_pack_icmd,
- .mthd = nvc8_grctx_pack_mthd,
- .bundle = nvc0_grctx_generate_bundle,
+ .main = gf100_grctx_generate_main,
+ .unkn = gf100_grctx_generate_unkn,
+ .hub = gf100_grctx_pack_hub,
+ .gpc = gf110_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gf100_grctx_pack_tpc,
+ .icmd = gf110_grctx_pack_icmd,
+ .mthd = gf110_grctx_pack_mthd,
+ .bundle = gf100_grctx_generate_bundle,
.bundle_size = 0x1800,
- .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool = gf100_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvc0_grctx_generate_attrib,
+ .attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index fcf534fd9e65..9bbe2c97552e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -21,15 +21,17 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "ctxgf100.h"
-#include "ctxnvc0.h"
+#include <subdev/fb.h>
+#include <subdev/mc.h>
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nvd7_grctx_init_ds_0[] = {
+static const struct gf100_gr_init
+gf117_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180324 },
{ 0x405834, 1, 0x04, 0x08000000 },
@@ -41,8 +43,8 @@ nvd7_grctx_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvd7_grctx_init_pd_0[] = {
+static const struct gf100_gr_init
+gf117_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x000103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
@@ -54,22 +56,22 @@ nvd7_grctx_init_pd_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvd7_grctx_pack_hub[] = {
- { nvc0_grctx_init_main_0 },
- { nvd9_grctx_init_fe_0 },
- { nvc0_grctx_init_pri_0 },
- { nvc0_grctx_init_memfmt_0 },
- { nvd7_grctx_init_ds_0 },
- { nvd7_grctx_init_pd_0 },
- { nvc0_grctx_init_rstr2d_0 },
- { nvc0_grctx_init_scc_0 },
- { nvd9_grctx_init_be_0 },
+static const struct gf100_gr_pack
+gf117_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gf119_grctx_init_fe_0 },
+ { gf100_grctx_init_pri_0 },
+ { gf100_grctx_init_memfmt_0 },
+ { gf117_grctx_init_ds_0 },
+ { gf117_grctx_init_pd_0 },
+ { gf100_grctx_init_rstr2d_0 },
+ { gf100_grctx_init_scc_0 },
+ { gf119_grctx_init_be_0 },
{}
};
-static const struct nvc0_graph_init
-nvd7_grctx_init_setup_0[] = {
+static const struct gf100_gr_init
+gf117_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -81,29 +83,29 @@ nvd7_grctx_init_setup_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvd7_grctx_pack_gpc[] = {
- { nvc0_grctx_init_gpc_unk_0 },
- { nvd9_grctx_init_prop_0 },
- { nvd9_grctx_init_gpc_unk_1 },
- { nvd7_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nvd9_grctx_init_crstr_0 },
- { nvc1_grctx_init_gpm_0 },
- { nvc0_grctx_init_gcc_0 },
+static const struct gf100_gr_pack
+gf117_grctx_pack_gpc[] = {
+ { gf100_grctx_init_gpc_unk_0 },
+ { gf119_grctx_init_prop_0 },
+ { gf119_grctx_init_gpc_unk_1 },
+ { gf117_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gf119_grctx_init_crstr_0 },
+ { gf108_grctx_init_gpm_0 },
+ { gf100_grctx_init_gcc_0 },
{}
};
-const struct nvc0_graph_init
-nvd7_grctx_init_pe_0[] = {
+const struct gf100_gr_init
+gf117_grctx_init_pe_0[] = {
{ 0x419848, 1, 0x04, 0x00000000 },
{ 0x419864, 1, 0x04, 0x00000129 },
{ 0x419888, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-nvd7_grctx_init_tex_0[] = {
+static const struct gf100_gr_init
+gf117_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000001f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000023 },
@@ -116,8 +118,8 @@ nvd7_grctx_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvd7_grctx_init_mpc_0[] = {
+static const struct gf100_gr_init
+gf117_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000000a },
{ 0x419c04, 1, 0x04, 0x00000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
@@ -127,32 +129,32 @@ nvd7_grctx_init_mpc_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvd7_grctx_pack_tpc[] = {
- { nvd7_grctx_init_pe_0 },
- { nvd7_grctx_init_tex_0 },
- { nvd7_grctx_init_mpc_0 },
- { nvc4_grctx_init_l1c_0 },
- { nvd9_grctx_init_sm_0 },
+static const struct gf100_gr_pack
+gf117_grctx_pack_tpc[] = {
+ { gf117_grctx_init_pe_0 },
+ { gf117_grctx_init_tex_0 },
+ { gf117_grctx_init_mpc_0 },
+ { gf104_grctx_init_l1c_0 },
+ { gf119_grctx_init_sm_0 },
{}
};
-static const struct nvc0_graph_init
-nvd7_grctx_init_pes_0[] = {
+static const struct gf100_gr_init
+gf117_grctx_init_pes_0[] = {
{ 0x41be24, 1, 0x04, 0x00000002 },
{}
};
-static const struct nvc0_graph_init
-nvd7_grctx_init_cbm_0[] = {
+static const struct gf100_gr_init
+gf117_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x12180000 },
{ 0x41bec4, 1, 0x04, 0x00003fff },
{ 0x41bee4, 1, 0x04, 0x03240218 },
{}
};
-const struct nvc0_graph_init
-nvd7_grctx_init_wwdx_0[] = {
+const struct gf100_gr_init
+gf117_grctx_init_wwdx_0[] = {
{ 0x41bf00, 1, 0x04, 0x0a418820 },
{ 0x41bf04, 1, 0x04, 0x062080e6 },
{ 0x41bf08, 1, 0x04, 0x020398a4 },
@@ -165,11 +167,11 @@ nvd7_grctx_init_wwdx_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvd7_grctx_pack_ppc[] = {
- { nvd7_grctx_init_pes_0 },
- { nvd7_grctx_init_cbm_0 },
- { nvd7_grctx_init_wwdx_0 },
+static const struct gf100_gr_pack
+gf117_grctx_pack_ppc[] = {
+ { gf117_grctx_init_pes_0 },
+ { gf117_grctx_init_cbm_0 },
+ { gf117_grctx_init_wwdx_0 },
{}
};
@@ -178,10 +180,10 @@ nvd7_grctx_pack_ppc[] = {
******************************************************************************/
void
-nvd7_grctx_generate_attrib(struct nvc0_grctx *info)
+gf117_grctx_generate_attrib(struct gf100_grctx *info)
{
- struct nvc0_graph_priv *priv = info->priv;
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ struct gf100_gr_priv *priv = info->priv;
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
const u32 alpha = impl->alpha_nr;
const u32 beta = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
@@ -215,18 +217,18 @@ nvd7_grctx_generate_attrib(struct nvc0_grctx *info)
}
void
-nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+gf117_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
{
- struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
int i;
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
- nvc0_graph_mmio(priv, oclass->hub);
- nvc0_graph_mmio(priv, oclass->gpc);
- nvc0_graph_mmio(priv, oclass->zcull);
- nvc0_graph_mmio(priv, oclass->tpc);
- nvc0_graph_mmio(priv, oclass->ppc);
+ gf100_gr_mmio(priv, oclass->hub);
+ gf100_gr_mmio(priv, oclass->gpc);
+ gf100_gr_mmio(priv, oclass->zcull);
+ gf100_gr_mmio(priv, oclass->tpc);
+ gf100_gr_mmio(priv, oclass->ppc);
nv_wr32(priv, 0x404154, 0x00000000);
@@ -235,46 +237,46 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
oclass->attrib(info);
oclass->unkn(priv);
- nvc0_grctx_generate_tpcid(priv);
- nvc0_grctx_generate_r406028(priv);
- nvc0_grctx_generate_r4060a8(priv);
- nve4_grctx_generate_r418bb8(priv);
- nvc0_grctx_generate_r406800(priv);
+ gf100_grctx_generate_tpcid(priv);
+ gf100_grctx_generate_r406028(priv);
+ gf100_grctx_generate_r4060a8(priv);
+ gk104_grctx_generate_r418bb8(priv);
+ gf100_grctx_generate_r406800(priv);
for (i = 0; i < 8; i++)
nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
- nvc0_graph_icmd(priv, oclass->icmd);
+ gf100_gr_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
- nvc0_graph_mthd(priv, oclass->mthd);
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
+ gf100_gr_mthd(priv, oclass->mthd);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
}
-struct nouveau_oclass *
-nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xd7),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nvd7_grctx_generate_main,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nvd7_grctx_pack_hub,
- .gpc = nvd7_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nvd7_grctx_pack_tpc,
- .ppc = nvd7_grctx_pack_ppc,
- .icmd = nvd9_grctx_pack_icmd,
- .mthd = nvd9_grctx_pack_mthd,
- .bundle = nvc0_grctx_generate_bundle,
+ .main = gf117_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gf117_grctx_pack_hub,
+ .gpc = gf117_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gf117_grctx_pack_tpc,
+ .ppc = gf117_grctx_pack_ppc,
+ .icmd = gf119_grctx_pack_icmd,
+ .mthd = gf119_grctx_pack_mthd,
+ .bundle = gf100_grctx_generate_bundle,
.bundle_size = 0x1800,
- .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool = gf100_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvd7_grctx_generate_attrib,
+ .attrib = gf117_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index b9a301b6fd9f..8d8761443809 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "ctxnvc0.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nvd9_grctx_init_icmd_0[] = {
+static const struct gf100_gr_init
+gf119_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
{ 0x000038, 1, 0x01, 0x0fac6881 },
@@ -270,14 +269,14 @@ nvd9_grctx_init_icmd_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvd9_grctx_pack_icmd[] = {
- { nvd9_grctx_init_icmd_0 },
+const struct gf100_gr_pack
+gf119_grctx_pack_icmd[] = {
+ { gf119_grctx_init_icmd_0 },
{}
};
-static const struct nvc0_graph_init
-nvd9_grctx_init_90c0_0[] = {
+static const struct gf100_gr_init
+gf119_grctx_init_90c0_0[] = {
{ 0x002700, 8, 0x20, 0x00000000 },
{ 0x002704, 8, 0x20, 0x00000000 },
{ 0x002708, 8, 0x20, 0x00000000 },
@@ -299,19 +298,19 @@ nvd9_grctx_init_90c0_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvd9_grctx_pack_mthd[] = {
- { nvc1_grctx_init_9097_0, 0x9097 },
- { nvc8_grctx_init_9197_0, 0x9197 },
- { nvc8_grctx_init_9297_0, 0x9297 },
- { nvc0_grctx_init_902d_0, 0x902d },
- { nvc0_grctx_init_9039_0, 0x9039 },
- { nvd9_grctx_init_90c0_0, 0x90c0 },
+const struct gf100_gr_pack
+gf119_grctx_pack_mthd[] = {
+ { gf108_grctx_init_9097_0, 0x9097 },
+ { gf110_grctx_init_9197_0, 0x9197 },
+ { gf110_grctx_init_9297_0, 0x9297 },
+ { gf100_grctx_init_902d_0, 0x902d },
+ { gf100_grctx_init_9039_0, 0x9039 },
+ { gf119_grctx_init_90c0_0, 0x90c0 },
{}
};
-const struct nvc0_graph_init
-nvd9_grctx_init_fe_0[] = {
+const struct gf100_gr_init
+gf119_grctx_init_fe_0[] = {
{ 0x404004, 10, 0x04, 0x00000000 },
{ 0x404044, 1, 0x04, 0x00000000 },
{ 0x404094, 13, 0x04, 0x00000000 },
@@ -331,8 +330,8 @@ nvd9_grctx_init_fe_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvd9_grctx_init_ds_0[] = {
+static const struct gf100_gr_init
+gf119_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180218 },
{ 0x405834, 1, 0x04, 0x08000000 },
@@ -344,8 +343,8 @@ nvd9_grctx_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvd9_grctx_init_pd_0[] = {
+static const struct gf100_gr_init
+gf119_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x000103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
@@ -356,8 +355,8 @@ nvd9_grctx_init_pd_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd9_grctx_init_be_0[] = {
+const struct gf100_gr_init
+gf119_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x02802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1043e005 },
@@ -368,22 +367,22 @@ nvd9_grctx_init_be_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvd9_grctx_pack_hub[] = {
- { nvc0_grctx_init_main_0 },
- { nvd9_grctx_init_fe_0 },
- { nvc0_grctx_init_pri_0 },
- { nvc0_grctx_init_memfmt_0 },
- { nvd9_grctx_init_ds_0 },
- { nvd9_grctx_init_pd_0 },
- { nvc0_grctx_init_rstr2d_0 },
- { nvc0_grctx_init_scc_0 },
- { nvd9_grctx_init_be_0 },
+static const struct gf100_gr_pack
+gf119_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gf119_grctx_init_fe_0 },
+ { gf100_grctx_init_pri_0 },
+ { gf100_grctx_init_memfmt_0 },
+ { gf119_grctx_init_ds_0 },
+ { gf119_grctx_init_pd_0 },
+ { gf100_grctx_init_rstr2d_0 },
+ { gf100_grctx_init_scc_0 },
+ { gf119_grctx_init_be_0 },
{}
};
-const struct nvc0_graph_init
-nvd9_grctx_init_prop_0[] = {
+const struct gf100_gr_init
+gf119_grctx_init_prop_0[] = {
{ 0x418400, 1, 0x04, 0x38004e00 },
{ 0x418404, 1, 0x04, 0x71e0ffff },
{ 0x41840c, 1, 0x04, 0x00001008 },
@@ -395,8 +394,8 @@ nvd9_grctx_init_prop_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd9_grctx_init_gpc_unk_1[] = {
+const struct gf100_gr_init
+gf119_grctx_init_gpc_unk_1[] = {
{ 0x418600, 1, 0x04, 0x0000001f },
{ 0x418684, 1, 0x04, 0x0000000f },
{ 0x418700, 1, 0x04, 0x00000002 },
@@ -405,8 +404,8 @@ nvd9_grctx_init_gpc_unk_1[] = {
{}
};
-static const struct nvc0_graph_init
-nvd9_grctx_init_setup_0[] = {
+static const struct gf100_gr_init
+gf119_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00008442 },
@@ -418,8 +417,8 @@ nvd9_grctx_init_setup_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd9_grctx_init_crstr_0[] = {
+const struct gf100_gr_init
+gf119_grctx_init_crstr_0[] = {
{ 0x418b00, 1, 0x04, 0x00000006 },
{ 0x418b08, 1, 0x04, 0x0a418820 },
{ 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -431,21 +430,21 @@ nvd9_grctx_init_crstr_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvd9_grctx_pack_gpc[] = {
- { nvc0_grctx_init_gpc_unk_0 },
- { nvd9_grctx_init_prop_0 },
- { nvd9_grctx_init_gpc_unk_1 },
- { nvd9_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nvd9_grctx_init_crstr_0 },
- { nvc1_grctx_init_gpm_0 },
- { nvc0_grctx_init_gcc_0 },
+static const struct gf100_gr_pack
+gf119_grctx_pack_gpc[] = {
+ { gf100_grctx_init_gpc_unk_0 },
+ { gf119_grctx_init_prop_0 },
+ { gf119_grctx_init_gpc_unk_1 },
+ { gf119_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gf119_grctx_init_crstr_0 },
+ { gf108_grctx_init_gpm_0 },
+ { gf100_grctx_init_gcc_0 },
{}
};
-static const struct nvc0_graph_init
-nvd9_grctx_init_tex_0[] = {
+static const struct gf100_gr_init
+gf119_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000001f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000023 },
@@ -458,8 +457,8 @@ nvd9_grctx_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvd9_grctx_init_mpc_0[] = {
+static const struct gf100_gr_init
+gf119_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000000a },
{ 0x419c04, 1, 0x04, 0x00000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
@@ -469,8 +468,8 @@ nvd9_grctx_init_mpc_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd9_grctx_init_sm_0[] = {
+const struct gf100_gr_init
+gf119_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00000002 },
{ 0x419e44, 1, 0x04, 0x001beff2 },
@@ -483,15 +482,15 @@ nvd9_grctx_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvd9_grctx_pack_tpc[] = {
- { nvc1_grctx_init_pe_0 },
- { nvd9_grctx_init_tex_0 },
- { nvc1_grctx_init_wwdx_0 },
- { nvd9_grctx_init_mpc_0 },
- { nvc4_grctx_init_l1c_0 },
- { nvc1_grctx_init_tpccs_0 },
- { nvd9_grctx_init_sm_0 },
+static const struct gf100_gr_pack
+gf119_grctx_pack_tpc[] = {
+ { gf108_grctx_init_pe_0 },
+ { gf119_grctx_init_tex_0 },
+ { gf108_grctx_init_wwdx_0 },
+ { gf119_grctx_init_mpc_0 },
+ { gf104_grctx_init_l1c_0 },
+ { gf108_grctx_init_tpccs_0 },
+ { gf119_grctx_init_sm_0 },
{}
};
@@ -499,30 +498,30 @@ nvd9_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nouveau_oclass *
-nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xd9),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nvc0_grctx_generate_main,
- .unkn = nvc1_grctx_generate_unkn,
- .hub = nvd9_grctx_pack_hub,
- .gpc = nvd9_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nvd9_grctx_pack_tpc,
- .icmd = nvd9_grctx_pack_icmd,
- .mthd = nvd9_grctx_pack_mthd,
- .bundle = nvc0_grctx_generate_bundle,
+ .main = gf100_grctx_generate_main,
+ .unkn = gf108_grctx_generate_unkn,
+ .hub = gf119_grctx_pack_hub,
+ .gpc = gf119_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gf119_grctx_pack_tpc,
+ .icmd = gf119_grctx_pack_icmd,
+ .mthd = gf119_grctx_pack_mthd,
+ .bundle = gf100_grctx_generate_bundle,
.bundle_size = 0x1800,
- .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool = gf100_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvc1_grctx_generate_attrib,
+ .attrib = gf108_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
.alpha_nr_max = 0x324,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index ccac2ee1a1cb..b52300d8861a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -21,15 +21,17 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "ctxgf100.h"
-#include "ctxnvc0.h"
+#include <subdev/fb.h>
+#include <subdev/mc.h>
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nve4_grctx_init_icmd_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x000039, 3, 0x01, 0x00000000 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
@@ -272,14 +274,14 @@ nve4_grctx_init_icmd_0[] = {
{}
};
-const struct nvc0_graph_pack
-nve4_grctx_pack_icmd[] = {
- { nve4_grctx_init_icmd_0 },
+const struct gf100_gr_pack
+gk104_grctx_pack_icmd[] = {
+ { gk104_grctx_init_icmd_0 },
{}
};
-const struct nvc0_graph_init
-nve4_grctx_init_a097_0[] = {
+const struct gf100_gr_init
+gk104_grctx_init_a097_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
{ 0x000808, 8, 0x40, 0x00000400 },
@@ -578,15 +580,15 @@ nve4_grctx_init_a097_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nve4_grctx_pack_mthd[] = {
- { nve4_grctx_init_a097_0, 0xa097 },
- { nvc0_grctx_init_902d_0, 0x902d },
+static const struct gf100_gr_pack
+gk104_grctx_pack_mthd[] = {
+ { gk104_grctx_init_a097_0, 0xa097 },
+ { gf100_grctx_init_902d_0, 0x902d },
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_fe_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_fe_0[] = {
{ 0x404010, 5, 0x04, 0x00000000 },
{ 0x404024, 1, 0x04, 0x0000e000 },
{ 0x404028, 1, 0x04, 0x00000000 },
@@ -606,8 +608,8 @@ nve4_grctx_init_fe_0[] = {
{}
};
-const struct nvc0_graph_init
-nve4_grctx_init_memfmt_0[] = {
+const struct gf100_gr_init
+gk104_grctx_init_memfmt_0[] = {
{ 0x404604, 1, 0x04, 0x00000014 },
{ 0x404608, 1, 0x04, 0x00000000 },
{ 0x40460c, 1, 0x04, 0x00003fff },
@@ -632,8 +634,8 @@ nve4_grctx_init_memfmt_0[] = {
{}
};
-const struct nvc0_graph_init
-nve4_grctx_init_ds_0[] = {
+const struct gf100_gr_init
+gk104_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180648 },
{ 0x405834, 1, 0x04, 0x08000000 },
@@ -645,15 +647,15 @@ nve4_grctx_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_cwd_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_cwd_0[] = {
{ 0x405b00, 1, 0x04, 0x00000000 },
{ 0x405b10, 1, 0x04, 0x00001000 },
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_pd_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x004103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
@@ -667,14 +669,14 @@ nve4_grctx_init_pd_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_sked_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_sked_0[] = {
{ 0x407040, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nve4_grctx_init_scc_0[] = {
+const struct gf100_gr_init
+gk104_grctx_init_scc_0[] = {
{ 0x408000, 2, 0x04, 0x00000000 },
{ 0x408008, 1, 0x04, 0x00000030 },
{ 0x40800c, 2, 0x04, 0x00000000 },
@@ -684,8 +686,8 @@ nve4_grctx_init_scc_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_be_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x02802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1043e005 },
@@ -697,24 +699,24 @@ nve4_grctx_init_be_0[] = {
{}
};
-const struct nvc0_graph_pack
-nve4_grctx_pack_hub[] = {
- { nvc0_grctx_init_main_0 },
- { nve4_grctx_init_fe_0 },
- { nvc0_grctx_init_pri_0 },
- { nve4_grctx_init_memfmt_0 },
- { nve4_grctx_init_ds_0 },
- { nve4_grctx_init_cwd_0 },
- { nve4_grctx_init_pd_0 },
- { nve4_grctx_init_sked_0 },
- { nvc0_grctx_init_rstr2d_0 },
- { nve4_grctx_init_scc_0 },
- { nve4_grctx_init_be_0 },
+const struct gf100_gr_pack
+gk104_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gk104_grctx_init_fe_0 },
+ { gf100_grctx_init_pri_0 },
+ { gk104_grctx_init_memfmt_0 },
+ { gk104_grctx_init_ds_0 },
+ { gk104_grctx_init_cwd_0 },
+ { gk104_grctx_init_pd_0 },
+ { gk104_grctx_init_sked_0 },
+ { gf100_grctx_init_rstr2d_0 },
+ { gk104_grctx_init_scc_0 },
+ { gk104_grctx_init_be_0 },
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_setup_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006860a },
{ 0x418808, 3, 0x04, 0x00000000 },
{ 0x418828, 1, 0x04, 0x00000044 },
@@ -726,8 +728,8 @@ nve4_grctx_init_setup_0[] = {
{}
};
-const struct nvc0_graph_init
-nve4_grctx_init_gpm_0[] = {
+const struct gf100_gr_init
+gk104_grctx_init_gpm_0[] = {
{ 0x418c08, 1, 0x04, 0x00000001 },
{ 0x418c10, 8, 0x04, 0x00000000 },
{ 0x418c40, 1, 0x04, 0xffffffff },
@@ -737,21 +739,21 @@ nve4_grctx_init_gpm_0[] = {
{}
};
-const struct nvc0_graph_pack
-nve4_grctx_pack_gpc[] = {
- { nvc0_grctx_init_gpc_unk_0 },
- { nvd9_grctx_init_prop_0 },
- { nvd9_grctx_init_gpc_unk_1 },
- { nve4_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nvd9_grctx_init_crstr_0 },
- { nve4_grctx_init_gpm_0 },
- { nvc0_grctx_init_gcc_0 },
+const struct gf100_gr_pack
+gk104_grctx_pack_gpc[] = {
+ { gf100_grctx_init_gpc_unk_0 },
+ { gf119_grctx_init_prop_0 },
+ { gf119_grctx_init_gpc_unk_1 },
+ { gk104_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gf119_grctx_init_crstr_0 },
+ { gk104_grctx_init_gpm_0 },
+ { gf100_grctx_init_gcc_0 },
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_tex_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000000f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000021 },
@@ -765,8 +767,8 @@ nve4_grctx_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_mpc_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000000a },
{ 0x419c04, 1, 0x04, 0x80000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
@@ -776,15 +778,15 @@ nve4_grctx_init_mpc_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_l1c_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_l1c_0[] = {
{ 0x419ce8, 1, 0x04, 0x00000000 },
{ 0x419cf4, 1, 0x04, 0x00003203 },
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_sm_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00000402 },
{ 0x419e44, 1, 0x04, 0x0013eff2 },
@@ -802,35 +804,35 @@ nve4_grctx_init_sm_0[] = {
{}
};
-const struct nvc0_graph_pack
-nve4_grctx_pack_tpc[] = {
- { nvd7_grctx_init_pe_0 },
- { nve4_grctx_init_tex_0 },
- { nve4_grctx_init_mpc_0 },
- { nve4_grctx_init_l1c_0 },
- { nve4_grctx_init_sm_0 },
+const struct gf100_gr_pack
+gk104_grctx_pack_tpc[] = {
+ { gf117_grctx_init_pe_0 },
+ { gk104_grctx_init_tex_0 },
+ { gk104_grctx_init_mpc_0 },
+ { gk104_grctx_init_l1c_0 },
+ { gk104_grctx_init_sm_0 },
{}
};
-const struct nvc0_graph_init
-nve4_grctx_init_pes_0[] = {
+const struct gf100_gr_init
+gk104_grctx_init_pes_0[] = {
{ 0x41be24, 1, 0x04, 0x00000006 },
{}
};
-static const struct nvc0_graph_init
-nve4_grctx_init_cbm_0[] = {
+static const struct gf100_gr_init
+gk104_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x12180000 },
{ 0x41bec4, 1, 0x04, 0x00037f7f },
{ 0x41bee4, 1, 0x04, 0x06480430 },
{}
};
-const struct nvc0_graph_pack
-nve4_grctx_pack_ppc[] = {
- { nve4_grctx_init_pes_0 },
- { nve4_grctx_init_cbm_0 },
- { nvd7_grctx_init_wwdx_0 },
+const struct gf100_gr_pack
+gk104_grctx_pack_ppc[] = {
+ { gk104_grctx_init_pes_0 },
+ { gk104_grctx_init_cbm_0 },
+ { gf117_grctx_init_wwdx_0 },
{}
};
@@ -839,9 +841,9 @@ nve4_grctx_pack_ppc[] = {
******************************************************************************/
void
-nve4_grctx_generate_bundle(struct nvc0_grctx *info)
+gk104_grctx_generate_bundle(struct gf100_grctx *info)
{
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
impl->bundle_size / 0x20);
const u32 token_limit = impl->bundle_token_limit;
@@ -856,9 +858,9 @@ nve4_grctx_generate_bundle(struct nvc0_grctx *info)
}
void
-nve4_grctx_generate_pagepool(struct nvc0_grctx *info)
+gk104_grctx_generate_pagepool(struct gf100_grctx *info)
{
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
@@ -870,7 +872,7 @@ nve4_grctx_generate_pagepool(struct nvc0_grctx *info)
}
void
-nve4_grctx_generate_unkn(struct nvc0_graph_priv *priv)
+gk104_grctx_generate_unkn(struct gf100_gr_priv *priv)
{
nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
@@ -881,7 +883,7 @@ nve4_grctx_generate_unkn(struct nvc0_graph_priv *priv)
}
void
-nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *priv)
+gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
{
u32 data[6] = {}, data2[2] = {};
u8 tpcnr[GPC_MAX];
@@ -939,18 +941,18 @@ nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *priv)
}
void
-nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
{
- struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
int i;
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
- nvc0_graph_mmio(priv, oclass->hub);
- nvc0_graph_mmio(priv, oclass->gpc);
- nvc0_graph_mmio(priv, oclass->zcull);
- nvc0_graph_mmio(priv, oclass->tpc);
- nvc0_graph_mmio(priv, oclass->ppc);
+ gf100_gr_mmio(priv, oclass->hub);
+ gf100_gr_mmio(priv, oclass->gpc);
+ gf100_gr_mmio(priv, oclass->zcull);
+ gf100_gr_mmio(priv, oclass->tpc);
+ gf100_gr_mmio(priv, oclass->ppc);
nv_wr32(priv, 0x404154, 0x00000000);
@@ -959,10 +961,10 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
oclass->attrib(info);
oclass->unkn(priv);
- nvc0_grctx_generate_tpcid(priv);
- nvc0_grctx_generate_r406028(priv);
- nve4_grctx_generate_r418bb8(priv);
- nvc0_grctx_generate_r406800(priv);
+ gf100_grctx_generate_tpcid(priv);
+ gf100_grctx_generate_r406028(priv);
+ gk104_grctx_generate_r418bb8(priv);
+ gf100_grctx_generate_r406800(priv);
for (i = 0; i < 8; i++)
nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -977,42 +979,42 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
}
nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
- nvc0_graph_icmd(priv, oclass->icmd);
+ gf100_gr_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
- nvc0_graph_mthd(priv, oclass->mthd);
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
+ gf100_gr_mthd(priv, oclass->mthd);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
nv_mask(priv, 0x418800, 0x00200000, 0x00200000);
nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
}
-struct nouveau_oclass *
-nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xe4),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nve4_grctx_generate_main,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nve4_grctx_pack_hub,
- .gpc = nve4_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nve4_grctx_pack_tpc,
- .ppc = nve4_grctx_pack_ppc,
- .icmd = nve4_grctx_pack_icmd,
- .mthd = nve4_grctx_pack_mthd,
- .bundle = nve4_grctx_generate_bundle,
+ .main = gk104_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gk104_grctx_pack_hub,
+ .gpc = gk104_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gk104_grctx_pack_tpc,
+ .ppc = gk104_grctx_pack_ppc,
+ .icmd = gk104_grctx_pack_icmd,
+ .mthd = gk104_grctx_pack_mthd,
+ .bundle = gk104_grctx_generate_bundle,
.bundle_size = 0x3000,
.bundle_min_gpm_fifo_depth = 0x180,
.bundle_token_limit = 0x600,
- .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool = gk104_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvd7_grctx_generate_attrib,
+ .attrib = gf117_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index e9b0dcf95a49..b3f58be04e9c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "ctxnvc0.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nvf0_grctx_init_icmd_0[] = {
+static const struct gf100_gr_init
+gk110_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x000039, 3, 0x01, 0x00000000 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
@@ -279,14 +278,14 @@ nvf0_grctx_init_icmd_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvf0_grctx_pack_icmd[] = {
- { nvf0_grctx_init_icmd_0 },
+const struct gf100_gr_pack
+gk110_grctx_pack_icmd[] = {
+ { gk110_grctx_init_icmd_0 },
{}
};
-static const struct nvc0_graph_init
-nvf0_grctx_init_a197_0[] = {
+static const struct gf100_gr_init
+gk110_grctx_init_a197_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
{ 0x000808, 8, 0x40, 0x00000400 },
@@ -587,15 +586,15 @@ nvf0_grctx_init_a197_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvf0_grctx_pack_mthd[] = {
- { nvf0_grctx_init_a197_0, 0xa197 },
- { nvc0_grctx_init_902d_0, 0x902d },
+const struct gf100_gr_pack
+gk110_grctx_pack_mthd[] = {
+ { gk110_grctx_init_a197_0, 0xa197 },
+ { gf100_grctx_init_902d_0, 0x902d },
{}
};
-static const struct nvc0_graph_init
-nvf0_grctx_init_fe_0[] = {
+static const struct gf100_gr_init
+gk110_grctx_init_fe_0[] = {
{ 0x404004, 8, 0x04, 0x00000000 },
{ 0x404024, 1, 0x04, 0x0000e000 },
{ 0x404028, 8, 0x04, 0x00000000 },
@@ -620,8 +619,8 @@ nvf0_grctx_init_fe_0[] = {
{}
};
-const struct nvc0_graph_init
-nvf0_grctx_init_pri_0[] = {
+const struct gf100_gr_init
+gk110_grctx_init_pri_0[] = {
{ 0x404404, 12, 0x04, 0x00000000 },
{ 0x404438, 1, 0x04, 0x00000000 },
{ 0x404460, 2, 0x04, 0x00000000 },
@@ -632,16 +631,16 @@ nvf0_grctx_init_pri_0[] = {
{}
};
-const struct nvc0_graph_init
-nvf0_grctx_init_cwd_0[] = {
+const struct gf100_gr_init
+gk110_grctx_init_cwd_0[] = {
{ 0x405b00, 1, 0x04, 0x00000000 },
{ 0x405b10, 1, 0x04, 0x00001000 },
{ 0x405b20, 1, 0x04, 0x04000000 },
{}
};
-static const struct nvc0_graph_init
-nvf0_grctx_init_pd_0[] = {
+static const struct gf100_gr_init
+gk110_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x034103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
@@ -655,8 +654,8 @@ nvf0_grctx_init_pd_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvf0_grctx_init_be_0[] = {
+static const struct gf100_gr_init
+gk110_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x12802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1003e005 },
@@ -668,23 +667,23 @@ nvf0_grctx_init_be_0[] = {
{}
};
-const struct nvc0_graph_pack
-nvf0_grctx_pack_hub[] = {
- { nvc0_grctx_init_main_0 },
- { nvf0_grctx_init_fe_0 },
- { nvf0_grctx_init_pri_0 },
- { nve4_grctx_init_memfmt_0 },
- { nve4_grctx_init_ds_0 },
- { nvf0_grctx_init_cwd_0 },
- { nvf0_grctx_init_pd_0 },
- { nvc0_grctx_init_rstr2d_0 },
- { nve4_grctx_init_scc_0 },
- { nvf0_grctx_init_be_0 },
+const struct gf100_gr_pack
+gk110_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gk110_grctx_init_fe_0 },
+ { gk110_grctx_init_pri_0 },
+ { gk104_grctx_init_memfmt_0 },
+ { gk104_grctx_init_ds_0 },
+ { gk110_grctx_init_cwd_0 },
+ { gk110_grctx_init_pd_0 },
+ { gf100_grctx_init_rstr2d_0 },
+ { gk104_grctx_init_scc_0 },
+ { gk110_grctx_init_be_0 },
{}
};
-static const struct nvc0_graph_init
-nvf0_grctx_init_setup_0[] = {
+static const struct gf100_gr_init
+gk110_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006860a },
{ 0x418808, 1, 0x04, 0x00000000 },
{ 0x41880c, 1, 0x04, 0x00000030 },
@@ -698,28 +697,28 @@ nvf0_grctx_init_setup_0[] = {
{}
};
-const struct nvc0_graph_init
-nvf0_grctx_init_gpc_unk_2[] = {
+const struct gf100_gr_init
+gk110_grctx_init_gpc_unk_2[] = {
{ 0x418d24, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_pack
-nvf0_grctx_pack_gpc[] = {
- { nvc0_grctx_init_gpc_unk_0 },
- { nvd9_grctx_init_prop_0 },
- { nvd9_grctx_init_gpc_unk_1 },
- { nvf0_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nvd9_grctx_init_crstr_0 },
- { nve4_grctx_init_gpm_0 },
- { nvf0_grctx_init_gpc_unk_2 },
- { nvc0_grctx_init_gcc_0 },
+const struct gf100_gr_pack
+gk110_grctx_pack_gpc[] = {
+ { gf100_grctx_init_gpc_unk_0 },
+ { gf119_grctx_init_prop_0 },
+ { gf119_grctx_init_gpc_unk_1 },
+ { gk110_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gf119_grctx_init_crstr_0 },
+ { gk104_grctx_init_gpm_0 },
+ { gk110_grctx_init_gpc_unk_2 },
+ { gf100_grctx_init_gcc_0 },
{}
};
-const struct nvc0_graph_init
-nvf0_grctx_init_tex_0[] = {
+const struct gf100_gr_init
+gk110_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000000f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000021 },
@@ -733,8 +732,8 @@ nvf0_grctx_init_tex_0[] = {
{}
};
-const struct nvc0_graph_init
-nvf0_grctx_init_mpc_0[] = {
+const struct gf100_gr_init
+gk110_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000001a },
{ 0x419c04, 1, 0x04, 0x80000006 },
{ 0x419c08, 1, 0x04, 0x00000002 },
@@ -744,15 +743,15 @@ nvf0_grctx_init_mpc_0[] = {
{}
};
-const struct nvc0_graph_init
-nvf0_grctx_init_l1c_0[] = {
+const struct gf100_gr_init
+gk110_grctx_init_l1c_0[] = {
{ 0x419ce8, 1, 0x04, 0x00000000 },
{ 0x419cf4, 1, 0x04, 0x00000203 },
{}
};
-static const struct nvc0_graph_init
-nvf0_grctx_init_sm_0[] = {
+static const struct gf100_gr_init
+gk110_grctx_init_sm_0[] = {
{ 0x419e04, 1, 0x04, 0x00000000 },
{ 0x419e08, 1, 0x04, 0x0000001d },
{ 0x419e0c, 1, 0x04, 0x00000000 },
@@ -779,29 +778,29 @@ nvf0_grctx_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvf0_grctx_pack_tpc[] = {
- { nvd7_grctx_init_pe_0 },
- { nvf0_grctx_init_tex_0 },
- { nvf0_grctx_init_mpc_0 },
- { nvf0_grctx_init_l1c_0 },
- { nvf0_grctx_init_sm_0 },
+static const struct gf100_gr_pack
+gk110_grctx_pack_tpc[] = {
+ { gf117_grctx_init_pe_0 },
+ { gk110_grctx_init_tex_0 },
+ { gk110_grctx_init_mpc_0 },
+ { gk110_grctx_init_l1c_0 },
+ { gk110_grctx_init_sm_0 },
{}
};
-static const struct nvc0_graph_init
-nvf0_grctx_init_cbm_0[] = {
+static const struct gf100_gr_init
+gk110_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x10000000 },
{ 0x41bec4, 1, 0x04, 0x00037f7f },
{ 0x41bee4, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_pack
-nvf0_grctx_pack_ppc[] = {
- { nve4_grctx_init_pes_0 },
- { nvf0_grctx_init_cbm_0 },
- { nvd7_grctx_init_wwdx_0 },
+const struct gf100_gr_pack
+gk110_grctx_pack_ppc[] = {
+ { gk104_grctx_init_pes_0 },
+ { gk110_grctx_init_cbm_0 },
+ { gf117_grctx_init_wwdx_0 },
{}
};
@@ -809,33 +808,33 @@ nvf0_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nouveau_oclass *
-nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nve4_grctx_generate_main,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nvf0_grctx_pack_hub,
- .gpc = nvf0_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nvf0_grctx_pack_tpc,
- .ppc = nvf0_grctx_pack_ppc,
- .icmd = nvf0_grctx_pack_icmd,
- .mthd = nvf0_grctx_pack_mthd,
- .bundle = nve4_grctx_generate_bundle,
+ .main = gk104_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gk110_grctx_pack_hub,
+ .gpc = gk110_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gk110_grctx_pack_tpc,
+ .ppc = gk110_grctx_pack_ppc,
+ .icmd = gk110_grctx_pack_icmd,
+ .mthd = gk110_grctx_pack_mthd,
+ .bundle = gk104_grctx_generate_bundle,
.bundle_size = 0x3000,
.bundle_min_gpm_fifo_depth = 0x180,
.bundle_token_limit = 0x7c0,
- .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool = gk104_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvd7_grctx_generate_attrib,
+ .attrib = gf117_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 3adb7fe91772..b11c26794fde 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -21,14 +21,13 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "ctxnvc0.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gk110b_grctx_init_sm_0[] = {
{ 0x419e04, 1, 0x04, 0x00000000 },
{ 0x419e08, 1, 0x04, 0x0000001d },
@@ -56,12 +55,12 @@ gk110b_grctx_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
+static const struct gf100_gr_pack
gk110b_grctx_pack_tpc[] = {
- { nvd7_grctx_init_pe_0 },
- { nvf0_grctx_init_tex_0 },
- { nvf0_grctx_init_mpc_0 },
- { nvf0_grctx_init_l1c_0 },
+ { gf117_grctx_init_pe_0 },
+ { gk110_grctx_init_tex_0 },
+ { gk110_grctx_init_mpc_0 },
+ { gk110_grctx_init_l1c_0 },
{ gk110b_grctx_init_sm_0 },
{}
};
@@ -70,33 +69,33 @@ gk110b_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nouveau_oclass *
-gk110b_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf1),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nve4_grctx_generate_main,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nvf0_grctx_pack_hub,
- .gpc = nvf0_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
+ .main = gk104_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gk110_grctx_pack_hub,
+ .gpc = gk110_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
.tpc = gk110b_grctx_pack_tpc,
- .ppc = nvf0_grctx_pack_ppc,
- .icmd = nvf0_grctx_pack_icmd,
- .mthd = nvf0_grctx_pack_mthd,
- .bundle = nve4_grctx_generate_bundle,
+ .ppc = gk110_grctx_pack_ppc,
+ .icmd = gk110_grctx_pack_icmd,
+ .mthd = gk110_grctx_pack_mthd,
+ .bundle = gk104_grctx_generate_bundle,
.bundle_size = 0x3000,
.bundle_min_gpm_fifo_depth = 0x180,
.bundle_token_limit = 0x600,
- .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool = gk104_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvd7_grctx_generate_attrib,
+ .attrib = gf117_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index ce252adbef81..6e8ce9fc311a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "ctxnvc0.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nv108_grctx_init_icmd_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x000039, 3, 0x01, 0x00000000 },
{ 0x0000a9, 1, 0x01, 0x0000ffff },
@@ -278,14 +277,14 @@ nv108_grctx_init_icmd_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nv108_grctx_pack_icmd[] = {
- { nv108_grctx_init_icmd_0 },
+static const struct gf100_gr_pack
+gk208_grctx_pack_icmd[] = {
+ { gk208_grctx_init_icmd_0 },
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_fe_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_fe_0[] = {
{ 0x404004, 8, 0x04, 0x00000000 },
{ 0x404024, 1, 0x04, 0x0000e000 },
{ 0x404028, 8, 0x04, 0x00000000 },
@@ -311,8 +310,8 @@ nv108_grctx_init_fe_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_ds_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8000bf },
{ 0x405830, 1, 0x04, 0x02180648 },
{ 0x405834, 1, 0x04, 0x08000000 },
@@ -325,8 +324,8 @@ nv108_grctx_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_pd_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x034103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
{ 0x4064a8, 1, 0x04, 0x00000000 },
@@ -340,8 +339,8 @@ nv108_grctx_init_pd_0[] = {
{}
};
-const struct nvc0_graph_init
-nv108_grctx_init_rstr2d_0[] = {
+const struct gf100_gr_init
+gk208_grctx_init_rstr2d_0[] = {
{ 0x407804, 1, 0x04, 0x00000063 },
{ 0x40780c, 1, 0x04, 0x0a418820 },
{ 0x407810, 1, 0x04, 0x062080e6 },
@@ -353,8 +352,8 @@ nv108_grctx_init_rstr2d_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_be_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x32802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
{ 0x408808, 1, 0x04, 0x1003e005 },
@@ -366,23 +365,23 @@ nv108_grctx_init_be_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nv108_grctx_pack_hub[] = {
- { nvc0_grctx_init_main_0 },
- { nv108_grctx_init_fe_0 },
- { nvf0_grctx_init_pri_0 },
- { nve4_grctx_init_memfmt_0 },
- { nv108_grctx_init_ds_0 },
- { nvf0_grctx_init_cwd_0 },
- { nv108_grctx_init_pd_0 },
- { nv108_grctx_init_rstr2d_0 },
- { nve4_grctx_init_scc_0 },
- { nv108_grctx_init_be_0 },
+static const struct gf100_gr_pack
+gk208_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gk208_grctx_init_fe_0 },
+ { gk110_grctx_init_pri_0 },
+ { gk104_grctx_init_memfmt_0 },
+ { gk208_grctx_init_ds_0 },
+ { gk110_grctx_init_cwd_0 },
+ { gk208_grctx_init_pd_0 },
+ { gk208_grctx_init_rstr2d_0 },
+ { gk104_grctx_init_scc_0 },
+ { gk208_grctx_init_be_0 },
{}
};
-const struct nvc0_graph_init
-nv108_grctx_init_prop_0[] = {
+const struct gf100_gr_init
+gk208_grctx_init_prop_0[] = {
{ 0x418400, 1, 0x04, 0x38005e00 },
{ 0x418404, 1, 0x04, 0x71e0ffff },
{ 0x41840c, 1, 0x04, 0x00001008 },
@@ -394,8 +393,8 @@ nv108_grctx_init_prop_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_gpc_unk_1[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_gpc_unk_1[] = {
{ 0x418600, 1, 0x04, 0x0000007f },
{ 0x418684, 1, 0x04, 0x0000001f },
{ 0x418700, 1, 0x04, 0x00000002 },
@@ -404,8 +403,8 @@ nv108_grctx_init_gpc_unk_1[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_setup_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006863a },
{ 0x418808, 1, 0x04, 0x00000000 },
{ 0x41880c, 1, 0x04, 0x00000030 },
@@ -419,8 +418,8 @@ nv108_grctx_init_setup_0[] = {
{}
};
-const struct nvc0_graph_init
-nv108_grctx_init_crstr_0[] = {
+const struct gf100_gr_init
+gk208_grctx_init_crstr_0[] = {
{ 0x418b00, 1, 0x04, 0x0000001e },
{ 0x418b08, 1, 0x04, 0x0a418820 },
{ 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -432,8 +431,8 @@ nv108_grctx_init_crstr_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_gpm_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_gpm_0[] = {
{ 0x418c08, 1, 0x04, 0x00000001 },
{ 0x418c10, 8, 0x04, 0x00000000 },
{ 0x418c40, 1, 0x04, 0xffffffff },
@@ -443,22 +442,22 @@ nv108_grctx_init_gpm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nv108_grctx_pack_gpc[] = {
- { nvc0_grctx_init_gpc_unk_0 },
- { nv108_grctx_init_prop_0 },
- { nv108_grctx_init_gpc_unk_1 },
- { nv108_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nv108_grctx_init_crstr_0 },
- { nv108_grctx_init_gpm_0 },
- { nvf0_grctx_init_gpc_unk_2 },
- { nvc0_grctx_init_gcc_0 },
+static const struct gf100_gr_pack
+gk208_grctx_pack_gpc[] = {
+ { gf100_grctx_init_gpc_unk_0 },
+ { gk208_grctx_init_prop_0 },
+ { gk208_grctx_init_gpc_unk_1 },
+ { gk208_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gk208_grctx_init_crstr_0 },
+ { gk208_grctx_init_gpm_0 },
+ { gk110_grctx_init_gpc_unk_2 },
+ { gf100_grctx_init_gcc_0 },
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_tex_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000100f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
{ 0x419a08, 1, 0x04, 0x00000421 },
@@ -472,8 +471,8 @@ nv108_grctx_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_sm_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_sm_0[] = {
{ 0x419e04, 1, 0x04, 0x00000000 },
{ 0x419e08, 1, 0x04, 0x0000001d },
{ 0x419e0c, 1, 0x04, 0x00000000 },
@@ -500,18 +499,18 @@ nv108_grctx_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nv108_grctx_pack_tpc[] = {
- { nvd7_grctx_init_pe_0 },
- { nv108_grctx_init_tex_0 },
- { nvf0_grctx_init_mpc_0 },
- { nvf0_grctx_init_l1c_0 },
- { nv108_grctx_init_sm_0 },
+static const struct gf100_gr_pack
+gk208_grctx_pack_tpc[] = {
+ { gf117_grctx_init_pe_0 },
+ { gk208_grctx_init_tex_0 },
+ { gk110_grctx_init_mpc_0 },
+ { gk110_grctx_init_l1c_0 },
+ { gk208_grctx_init_sm_0 },
{}
};
-static const struct nvc0_graph_init
-nv108_grctx_init_cbm_0[] = {
+static const struct gf100_gr_init
+gk208_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x10000000 },
{ 0x41bec4, 1, 0x04, 0x00037f7f },
{ 0x41bee4, 1, 0x04, 0x00000000 },
@@ -519,11 +518,11 @@ nv108_grctx_init_cbm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nv108_grctx_pack_ppc[] = {
- { nve4_grctx_init_pes_0 },
- { nv108_grctx_init_cbm_0 },
- { nvd7_grctx_init_wwdx_0 },
+static const struct gf100_gr_pack
+gk208_grctx_pack_ppc[] = {
+ { gk104_grctx_init_pes_0 },
+ { gk208_grctx_init_cbm_0 },
+ { gf117_grctx_init_wwdx_0 },
{}
};
@@ -531,33 +530,33 @@ nv108_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nouveau_oclass *
-nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nve4_grctx_generate_main,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nv108_grctx_pack_hub,
- .gpc = nv108_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nv108_grctx_pack_tpc,
- .ppc = nv108_grctx_pack_ppc,
- .icmd = nv108_grctx_pack_icmd,
- .mthd = nvf0_grctx_pack_mthd,
- .bundle = nve4_grctx_generate_bundle,
+ .main = gk104_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gk208_grctx_pack_hub,
+ .gpc = gk208_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gk208_grctx_pack_tpc,
+ .ppc = gk208_grctx_pack_ppc,
+ .icmd = gk208_grctx_pack_icmd,
+ .mthd = gk110_grctx_pack_mthd,
+ .bundle = gk104_grctx_generate_bundle,
.bundle_size = 0x3000,
.bundle_min_gpm_fifo_depth = 0xc2,
.bundle_token_limit = 0x200,
- .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool = gk104_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvd7_grctx_generate_attrib,
+ .attrib = gf117_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 36fc9831cc93..2f241f6f0f0a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -19,43 +19,42 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include "ctxgf100.h"
-#include "ctxnvc0.h"
-
-static const struct nvc0_graph_pack
+static const struct gf100_gr_pack
gk20a_grctx_pack_mthd[] = {
- { nve4_grctx_init_a097_0, 0xa297 },
- { nvc0_grctx_init_902d_0, 0x902d },
+ { gk104_grctx_init_a097_0, 0xa297 },
+ { gf100_grctx_init_902d_0, 0x902d },
{}
};
-struct nouveau_oclass *
-gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xea),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
- .main = nve4_grctx_generate_main,
- .unkn = nve4_grctx_generate_unkn,
- .hub = nve4_grctx_pack_hub,
- .gpc = nve4_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
- .tpc = nve4_grctx_pack_tpc,
- .ppc = nve4_grctx_pack_ppc,
- .icmd = nve4_grctx_pack_icmd,
+ .main = gk104_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gk104_grctx_pack_hub,
+ .gpc = gk104_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gk104_grctx_pack_tpc,
+ .ppc = gk104_grctx_pack_ppc,
+ .icmd = gk104_grctx_pack_icmd,
.mthd = gk20a_grctx_pack_mthd,
- .bundle = nve4_grctx_generate_bundle,
+ .bundle = gk104_grctx_generate_bundle,
.bundle_size = 0x1800,
.bundle_min_gpm_fifo_depth = 0x62,
.bundle_token_limit = 0x100,
- .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool = gk104_grctx_generate_pagepool,
.pagepool_size = 0x8000,
- .attrib = nvd7_grctx_generate_attrib,
+ .attrib = gf117_grctx_generate_attrib,
.attrib_nr_max = 0x240,
.attrib_nr = 0x240,
.alpha_nr_max = 0x648 + (0x648 / 2),
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 62e918b9fa81..956f4dce960c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -21,14 +21,16 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "ctxgf100.h"
-#include "ctxnvc0.h"
+#include <subdev/fb.h>
+#include <subdev/mc.h>
/*******************************************************************************
* PGRAPH context register lists
******************************************************************************/
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_icmd_0[] = {
{ 0x001000, 1, 0x01, 0x00000004 },
{ 0x000039, 3, 0x01, 0x00000000 },
@@ -287,13 +289,13 @@ gm107_grctx_init_icmd_0[] = {
{}
};
-static const struct nvc0_graph_pack
+static const struct gf100_gr_pack
gm107_grctx_pack_icmd[] = {
{ gm107_grctx_init_icmd_0 },
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_b097_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
@@ -610,14 +612,14 @@ gm107_grctx_init_b097_0[] = {
{}
};
-static const struct nvc0_graph_pack
+static const struct gf100_gr_pack
gm107_grctx_pack_mthd[] = {
{ gm107_grctx_init_b097_0, 0xb097 },
- { nvc0_grctx_init_902d_0, 0x902d },
+ { gf100_grctx_init_902d_0, 0x902d },
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_fe_0[] = {
{ 0x404004, 8, 0x04, 0x00000000 },
{ 0x404024, 1, 0x04, 0x0000e000 },
@@ -639,7 +641,7 @@ gm107_grctx_init_fe_0[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_ds_0[] = {
{ 0x405800, 1, 0x04, 0x0f8001bf },
{ 0x405830, 1, 0x04, 0x0aa01000 },
@@ -653,7 +655,7 @@ gm107_grctx_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_pd_0[] = {
{ 0x406020, 1, 0x04, 0x07410001 },
{ 0x406028, 4, 0x04, 0x00000001 },
@@ -669,7 +671,7 @@ gm107_grctx_init_pd_0[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_be_0[] = {
{ 0x408800, 1, 0x04, 0x32802a3c },
{ 0x408804, 1, 0x04, 0x00000040 },
@@ -682,28 +684,28 @@ gm107_grctx_init_be_0[] = {
{}
};
-static const struct nvc0_graph_pack
+static const struct gf100_gr_pack
gm107_grctx_pack_hub[] = {
- { nvc0_grctx_init_main_0 },
+ { gf100_grctx_init_main_0 },
{ gm107_grctx_init_fe_0 },
- { nvf0_grctx_init_pri_0 },
- { nve4_grctx_init_memfmt_0 },
+ { gk110_grctx_init_pri_0 },
+ { gk104_grctx_init_memfmt_0 },
{ gm107_grctx_init_ds_0 },
- { nvf0_grctx_init_cwd_0 },
+ { gk110_grctx_init_cwd_0 },
{ gm107_grctx_init_pd_0 },
- { nv108_grctx_init_rstr2d_0 },
- { nve4_grctx_init_scc_0 },
+ { gk208_grctx_init_rstr2d_0 },
+ { gk104_grctx_init_scc_0 },
{ gm107_grctx_init_be_0 },
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_gpc_unk_0[] = {
{ 0x418380, 1, 0x04, 0x00000056 },
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_gpc_unk_1[] = {
{ 0x418600, 1, 0x04, 0x0000007f },
{ 0x418684, 1, 0x04, 0x0000001f },
@@ -714,7 +716,7 @@ gm107_grctx_init_gpc_unk_1[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_setup_0[] = {
{ 0x418800, 1, 0x04, 0x7006863a },
{ 0x418810, 1, 0x04, 0x00000000 },
@@ -727,7 +729,7 @@ gm107_grctx_init_setup_0[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_gpc_unk_2[] = {
{ 0x418d24, 1, 0x04, 0x00000000 },
{ 0x418e00, 1, 0x04, 0x90000000 },
@@ -741,21 +743,21 @@ gm107_grctx_init_gpc_unk_2[] = {
{}
};
-static const struct nvc0_graph_pack
+static const struct gf100_gr_pack
gm107_grctx_pack_gpc[] = {
{ gm107_grctx_init_gpc_unk_0 },
- { nv108_grctx_init_prop_0 },
+ { gk208_grctx_init_prop_0 },
{ gm107_grctx_init_gpc_unk_1 },
{ gm107_grctx_init_setup_0 },
- { nvc0_grctx_init_zcull_0 },
- { nv108_grctx_init_crstr_0 },
- { nve4_grctx_init_gpm_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gk208_grctx_init_crstr_0 },
+ { gk104_grctx_init_gpm_0 },
{ gm107_grctx_init_gpc_unk_2 },
- { nvc0_grctx_init_gcc_0 },
+ { gf100_grctx_init_gcc_0 },
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000300f0 },
{ 0x419a04, 1, 0x04, 0x00000005 },
@@ -771,7 +773,7 @@ gm107_grctx_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_mpc_0[] = {
{ 0x419c00, 1, 0x04, 0x0000001a },
{ 0x419c04, 1, 0x04, 0x80000006 },
@@ -785,13 +787,13 @@ gm107_grctx_init_mpc_0[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_l1c_0[] = {
{ 0x419c84, 1, 0x04, 0x00000020 },
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_sm_0[] = {
{ 0x419e04, 3, 0x04, 0x00000000 },
{ 0x419e10, 1, 0x04, 0x00001c02 },
@@ -812,9 +814,9 @@ gm107_grctx_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
+static const struct gf100_gr_pack
gm107_grctx_pack_tpc[] = {
- { nvd7_grctx_init_pe_0 },
+ { gf117_grctx_init_pe_0 },
{ gm107_grctx_init_tex_0 },
{ gm107_grctx_init_mpc_0 },
{ gm107_grctx_init_l1c_0 },
@@ -822,7 +824,7 @@ gm107_grctx_pack_tpc[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_cbm_0[] = {
{ 0x41bec0, 1, 0x04, 0x00000000 },
{ 0x41bec4, 1, 0x04, 0x01050000 },
@@ -832,7 +834,7 @@ gm107_grctx_init_cbm_0[] = {
{}
};
-static const struct nvc0_graph_init
+static const struct gf100_gr_init
gm107_grctx_init_wwdx_0[] = {
{ 0x41bf00, 1, 0x04, 0x0a418820 },
{ 0x41bf04, 1, 0x04, 0x062080e6 },
@@ -846,9 +848,9 @@ gm107_grctx_init_wwdx_0[] = {
{}
};
-static const struct nvc0_graph_pack
+static const struct gf100_gr_pack
gm107_grctx_pack_ppc[] = {
- { nve4_grctx_init_pes_0 },
+ { gk104_grctx_init_pes_0 },
{ gm107_grctx_init_cbm_0 },
{ gm107_grctx_init_wwdx_0 },
{}
@@ -859,9 +861,9 @@ gm107_grctx_pack_ppc[] = {
******************************************************************************/
static void
-gm107_grctx_generate_bundle(struct nvc0_grctx *info)
+gm107_grctx_generate_bundle(struct gf100_grctx *info)
{
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
impl->bundle_size / 0x20);
const u32 token_limit = impl->bundle_token_limit;
@@ -876,9 +878,9 @@ gm107_grctx_generate_bundle(struct nvc0_grctx *info)
}
static void
-gm107_grctx_generate_pagepool(struct nvc0_grctx *info)
+gm107_grctx_generate_pagepool(struct gf100_grctx *info)
{
- const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
@@ -891,10 +893,10 @@ gm107_grctx_generate_pagepool(struct nvc0_grctx *info)
}
static void
-gm107_grctx_generate_attrib(struct nvc0_grctx *info)
+gm107_grctx_generate_attrib(struct gf100_grctx *info)
{
- struct nvc0_graph_priv *priv = info->priv;
- const struct nvc0_grctx_oclass *impl = (void *)nvc0_grctx_impl(priv);
+ struct gf100_gr_priv *priv = info->priv;
+ const struct gf100_grctx_oclass *impl = (void *)gf100_grctx_impl(priv);
const u32 alpha = impl->alpha_nr;
const u32 attrib = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
@@ -930,7 +932,7 @@ gm107_grctx_generate_attrib(struct nvc0_grctx *info)
}
static void
-gm107_grctx_generate_tpcid(struct nvc0_graph_priv *priv)
+gm107_grctx_generate_tpcid(struct gf100_gr_priv *priv)
{
int gpc, tpc, id;
@@ -950,16 +952,16 @@ gm107_grctx_generate_tpcid(struct nvc0_graph_priv *priv)
}
static void
-gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+gm107_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
{
- struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
int i;
- nvc0_graph_mmio(priv, oclass->hub);
- nvc0_graph_mmio(priv, oclass->gpc);
- nvc0_graph_mmio(priv, oclass->zcull);
- nvc0_graph_mmio(priv, oclass->tpc);
- nvc0_graph_mmio(priv, oclass->ppc);
+ gf100_gr_mmio(priv, oclass->hub);
+ gf100_gr_mmio(priv, oclass->gpc);
+ gf100_gr_mmio(priv, oclass->zcull);
+ gf100_gr_mmio(priv, oclass->tpc);
+ gf100_gr_mmio(priv, oclass->ppc);
nv_wr32(priv, 0x404154, 0x00000000);
@@ -969,9 +971,9 @@ gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
oclass->unkn(priv);
gm107_grctx_generate_tpcid(priv);
- nvc0_grctx_generate_r406028(priv);
- nve4_grctx_generate_r418bb8(priv);
- nvc0_grctx_generate_r406800(priv);
+ gf100_grctx_generate_r406028(priv);
+ gk104_grctx_generate_r418bb8(priv);
+ gf100_grctx_generate_r406800(priv);
nv_wr32(priv, 0x4064d0, 0x00000001);
for (i = 1; i < 8; i++)
@@ -988,9 +990,9 @@ gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
}
- nvc0_graph_icmd(priv, oclass->icmd);
+ gf100_gr_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
- nvc0_graph_mthd(priv, oclass->mthd);
+ gf100_gr_mthd(priv, oclass->mthd);
nv_mask(priv, 0x419e00, 0x00808080, 0x00808080);
nv_mask(priv, 0x419ccc, 0x80000000, 0x80000000);
@@ -998,22 +1000,22 @@ gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_mask(priv, 0x419f88, 0x80000000, 0x80000000);
}
-struct nouveau_oclass *
-gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
+struct nvkm_oclass *
+gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_context_ctor,
- .dtor = nvc0_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
.main = gm107_grctx_generate_main,
- .unkn = nve4_grctx_generate_unkn,
+ .unkn = gk104_grctx_generate_unkn,
.hub = gm107_grctx_pack_hub,
.gpc = gm107_grctx_pack_gpc,
- .zcull = nvc0_grctx_pack_zcull,
+ .zcull = gf100_grctx_pack_zcull,
.tpc = gm107_grctx_pack_tpc,
.ppc = gm107_grctx_pack_ppc,
.icmd = gm107_grctx_pack_icmd,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
index 7bbb1e1b7a8d..dc31462afe65 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
@@ -22,8 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <core/gpuobj.h>
-
/* NVIDIA context programs handle a number of other conditions which are
* not implemented in our versions. It's not clear why NVIDIA context
* programs have this code, nor whether it's strictly necessary for
@@ -111,15 +109,16 @@
#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
+#include "ctxnv40.h"
#include "nv40.h"
-#include "ctx.h"
+#include <core/device.h>
/* TODO:
* - get vs count from 0x1540
*/
static int
-nv40_graph_vs_count(struct nouveau_device *device)
+nv40_gr_vs_count(struct nvkm_device *device)
{
switch (device->chipset) {
@@ -158,9 +157,9 @@ enum cp_label {
};
static void
-nv40_graph_construct_general(struct nouveau_grctx *ctx)
+nv40_gr_construct_general(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i;
cp_ctx(ctx, 0x4000a4, 1);
@@ -208,7 +207,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x4009dc, 0x80000000);
} else {
cp_ctx(ctx, 0x400840, 20);
- if (nv44_graph_class(ctx->device)) {
+ if (nv44_gr_class(ctx->device)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
}
@@ -217,7 +216,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400888, 0x00000040);
cp_ctx(ctx, 0x400894, 11);
gr_def(ctx, 0x400894, 0x00000040);
- if (!nv44_graph_class(ctx->device)) {
+ if (!nv44_gr_class(ctx->device)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
}
@@ -264,9 +263,9 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
}
static void
-nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
+nv40_gr_construct_state3d(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i;
if (device->chipset == 0x40) {
@@ -369,9 +368,9 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
}
static void
-nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
+nv40_gr_construct_state3d_2(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i;
cp_ctx(ctx, 0x402000, 1);
@@ -504,8 +503,8 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
- cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
- for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
+ cp_ctx(ctx, 0x403420, nv40_gr_vs_count(ctx->device));
+ for (i = 0; i < nv40_gr_vs_count(ctx->device); i++)
gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
if (device->chipset != 0x40) {
@@ -533,9 +532,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
}
static void
-nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
+nv40_gr_construct_state3d_3(struct nvkm_grctx *ctx)
{
- int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
+ int len = nv44_gr_class(ctx->device) ? 0x0084 : 0x0684;
cp_out (ctx, 0x300000);
cp_lsr (ctx, len - 4);
@@ -548,14 +547,14 @@ nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
}
static void
-nv40_graph_construct_shader(struct nouveau_grctx *ctx)
+nv40_gr_construct_shader(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
- struct nouveau_gpuobj *obj = ctx->data;
+ struct nvkm_device *device = ctx->device;
+ struct nvkm_gpuobj *obj = ctx->data;
int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
int offset, i;
- vs_nr = nv40_graph_vs_count(ctx->device);
+ vs_nr = nv40_gr_vs_count(ctx->device);
vs_nr_b0 = 363;
vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
if (device->chipset == 0x40) {
@@ -570,16 +569,16 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
} else {
b0_offset = 0x1d40/4; /* 2200 */
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
- vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
+ vs_len = nv44_gr_class(device) ? 0x4980/4 : 0x4a40/4;
}
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
- cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
+ cp_out(ctx, nv44_gr_class(device) ? 0x800029 : 0x800041);
offset = ctx->ctxvals_pos;
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
- if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ if (ctx->mode != NVKM_GRCTX_VALS)
return;
offset += 0x0280/4;
@@ -595,7 +594,7 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
}
static void
-nv40_grctx_generate(struct nouveau_grctx *ctx)
+nv40_grctx_generate(struct nvkm_grctx *ctx)
{
/* decide whether we're loading/unloading the context */
cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
@@ -629,23 +628,23 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
/* general PGRAPH state */
cp_name(ctx, cp_swap_state);
cp_pos (ctx, 0x00020/4);
- nv40_graph_construct_general(ctx);
+ nv40_gr_construct_general(ctx);
cp_wait(ctx, STATUS, IDLE);
/* 3D state, block 1 */
cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
- nv40_graph_construct_state3d(ctx);
+ nv40_gr_construct_state3d(ctx);
cp_wait(ctx, STATUS, IDLE);
/* 3D state, block 2 */
- nv40_graph_construct_state3d_2(ctx);
+ nv40_gr_construct_state3d_2(ctx);
/* Some other block of "random" state */
- nv40_graph_construct_state3d_3(ctx);
+ nv40_gr_construct_state3d_3(ctx);
/* Per-vertex shader state */
cp_pos (ctx, ctx->ctxvals_pos);
- nv40_graph_construct_shader(ctx);
+ nv40_gr_construct_shader(ctx);
/* pre-exit state updates */
cp_name(ctx, cp_prepare_exit);
@@ -660,22 +659,22 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
}
void
-nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
+nv40_grctx_fill(struct nvkm_device *device, struct nvkm_gpuobj *mem)
{
- nv40_grctx_generate(&(struct nouveau_grctx) {
+ nv40_grctx_generate(&(struct nvkm_grctx) {
.device = device,
- .mode = NOUVEAU_GRCTX_VALS,
+ .mode = NVKM_GRCTX_VALS,
.data = mem,
});
}
int
-nv40_grctx_init(struct nouveau_device *device, u32 *size)
+nv40_grctx_init(struct nvkm_device *device, u32 *size)
{
u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i;
- struct nouveau_grctx ctx = {
+ struct nvkm_grctx ctx = {
.device = device,
- .mode = NOUVEAU_GRCTX_PROG,
+ .mode = NVKM_GRCTX_PROG,
.data = ctxprog,
.ctxprog_max = 256,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index e1947013d3bc..8a89961956af 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -1,12 +1,13 @@
-#ifndef __NOUVEAU_GRCTX_H__
-#define __NOUVEAU_GRCTX_H__
+#ifndef __NVKM_GRCTX_H__
+#define __NVKM_GRCTX_H__
+#include <core/gpuobj.h>
-struct nouveau_grctx {
- struct nouveau_device *device;
+struct nvkm_grctx {
+ struct nvkm_device *device;
enum {
- NOUVEAU_GRCTX_PROG,
- NOUVEAU_GRCTX_VALS
+ NVKM_GRCTX_PROG,
+ NVKM_GRCTX_VALS
} mode;
void *data;
@@ -19,11 +20,11 @@ struct nouveau_grctx {
};
static inline void
-cp_out(struct nouveau_grctx *ctx, u32 inst)
+cp_out(struct nvkm_grctx *ctx, u32 inst)
{
u32 *ctxprog = ctx->data;
- if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ if (ctx->mode != NVKM_GRCTX_PROG)
return;
BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
@@ -31,13 +32,13 @@ cp_out(struct nouveau_grctx *ctx, u32 inst)
}
static inline void
-cp_lsr(struct nouveau_grctx *ctx, u32 val)
+cp_lsr(struct nvkm_grctx *ctx, u32 val)
{
cp_out(ctx, CP_LOAD_SR | val);
}
static inline void
-cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
+cp_ctx(struct nvkm_grctx *ctx, u32 reg, u32 length)
{
ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
@@ -53,12 +54,12 @@ cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
}
static inline void
-cp_name(struct nouveau_grctx *ctx, int name)
+cp_name(struct nvkm_grctx *ctx, int name)
{
u32 *ctxprog = ctx->data;
int i;
- if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ if (ctx->mode != NVKM_GRCTX_PROG)
return;
ctx->ctxprog_label[name] = ctx->ctxprog_len;
@@ -73,7 +74,7 @@ cp_name(struct nouveau_grctx *ctx, int name)
}
static inline void
-_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
+_cp_bra(struct nvkm_grctx *ctx, u32 mod, int flag, int state, int name)
{
int ip = 0;
@@ -91,21 +92,21 @@ _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
static inline void
-_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
+_cp_wait(struct nvkm_grctx *ctx, int flag, int state)
{
cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
}
#define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
static inline void
-_cp_set(struct nouveau_grctx *ctx, int flag, int state)
+_cp_set(struct nvkm_grctx *ctx, int flag, int state)
{
cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
}
#define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
static inline void
-cp_pos(struct nouveau_grctx *ctx, int offset)
+cp_pos(struct nvkm_grctx *ctx, int offset)
{
ctx->ctxvals_pos = offset;
ctx->ctxvals_base = ctx->ctxvals_pos;
@@ -115,9 +116,9 @@ cp_pos(struct nouveau_grctx *ctx, int offset)
}
static inline void
-gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
+gr_def(struct nvkm_grctx *ctx, u32 reg, u32 val)
{
- if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ if (ctx->mode != NVKM_GRCTX_VALS)
return;
reg = (reg - 0x00400000) / 4;
@@ -125,5 +126,4 @@ gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
nv_wo32(ctx->data, reg * 4, val);
}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 1d0e33fb5f61..9c9528d2cd90 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <core/gpuobj.h>
-
#define CP_FLAG_CLEAR 0
#define CP_FLAG_SET 1
#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
@@ -107,14 +105,14 @@
#define CP_SEEK_1 0x00c000ff
#define CP_SEEK_2 0x00c800ff
-#include "nv50.h"
-#include "ctx.h"
+#include "ctxnv40.h"
+
+#include <core/device.h>
+#include <subdev/fb.h>
#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
-#include <subdev/fb.h>
-
/*
* This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
* the GPU itself that does context-switching, but it needs a special
@@ -169,14 +167,14 @@ enum cp_label {
cp_exit,
};
-static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
+static void nv50_gr_construct_mmio(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_xfer1(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_xfer2(struct nvkm_grctx *ctx);
/* Main function: construct the ctxprog skeleton, call the other functions. */
static int
-nv50_grctx_generate(struct nouveau_grctx *ctx)
+nv50_grctx_generate(struct nvkm_grctx *ctx)
{
cp_set (ctx, STATE, RUNNING);
cp_set (ctx, XFER_SWITCH, ENABLE);
@@ -219,9 +217,9 @@ nv50_grctx_generate(struct nouveau_grctx *ctx)
cp_pos (ctx, 0x00004/4);
cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
cp_pos (ctx, 0x00100/4);
- nv50_graph_construct_mmio(ctx);
- nv50_graph_construct_xfer1(ctx);
- nv50_graph_construct_xfer2(ctx);
+ nv50_gr_construct_mmio(ctx);
+ nv50_gr_construct_xfer1(ctx);
+ nv50_gr_construct_xfer2(ctx);
cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
@@ -255,22 +253,22 @@ nv50_grctx_generate(struct nouveau_grctx *ctx)
}
void
-nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
+nv50_grctx_fill(struct nvkm_device *device, struct nvkm_gpuobj *mem)
{
- nv50_grctx_generate(&(struct nouveau_grctx) {
+ nv50_grctx_generate(&(struct nvkm_grctx) {
.device = device,
- .mode = NOUVEAU_GRCTX_VALS,
+ .mode = NVKM_GRCTX_VALS,
.data = mem,
});
}
int
-nv50_grctx_init(struct nouveau_device *device, u32 *size)
+nv50_grctx_init(struct nvkm_device *device, u32 *size)
{
u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i;
- struct nouveau_grctx ctx = {
+ struct nvkm_grctx ctx = {
.device = device,
- .mode = NOUVEAU_GRCTX_PROG,
+ .mode = NVKM_GRCTX_PROG,
.data = ctxprog,
.ctxprog_max = 512,
};
@@ -293,12 +291,12 @@ nv50_grctx_init(struct nouveau_device *device, u32 *size)
*/
static void
-nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
+nv50_gr_construct_mmio_ddata(struct nvkm_grctx *ctx);
static void
-nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
+nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i, j;
int offset, base;
u32 units = nv_rd32 (ctx->device, 0x1540);
@@ -334,7 +332,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400b20, 0x0001629d);
}
- nv50_graph_construct_mmio_ddata(ctx);
+ nv50_gr_construct_mmio_ddata(ctx);
/* 0C00: VFETCH */
cp_ctx(ctx, 0x400c08, 0x2);
@@ -572,7 +570,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
else if (device->chipset < 0xa0)
gr_def(ctx, 0x407d08, 0x00390040);
else {
- if (nouveau_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
+ if (nvkm_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
gr_def(ctx, 0x407d08, 0x003d0040);
else
gr_def(ctx, 0x407d08, 0x003c0040);
@@ -784,18 +782,18 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
static void
-dd_emit(struct nouveau_grctx *ctx, int num, u32 val) {
+dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
- if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ if (val && ctx->mode == NVKM_GRCTX_VALS)
for (i = 0; i < num; i++)
nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
ctx->ctxvals_pos += num;
}
static void
-nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
+nv50_gr_construct_mmio_ddata(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int base, num;
base = ctx->ctxvals_pos;
@@ -1156,9 +1154,9 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
*/
static void
-xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
+xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
- if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ if (val && ctx->mode == NVKM_GRCTX_VALS)
for (i = 0; i < num; i++)
nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
ctx->ctxvals_pos += num << 3;
@@ -1166,29 +1164,29 @@ xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
/* Gene declarations... */
-static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
+static void nv50_gr_construct_gene_dispatch(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_m2mf(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_ccache(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_unk10xx(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_unk14xx(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_zcull(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_clipid(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_unk24xx(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_vfetch(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_eng2d(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_csched(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_unk1cxx(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_strmout(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_unk34xx(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_ropm1(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_ropm2(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_gene_ropc(struct nvkm_grctx *ctx);
+static void nv50_gr_construct_xfer_tp(struct nvkm_grctx *ctx);
static void
-nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
+nv50_gr_construct_xfer1(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i;
int offset;
int size = 0;
@@ -1200,32 +1198,32 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
if (device->chipset < 0xa0) {
/* Strand 0 */
ctx->ctxvals_pos = offset;
- nv50_graph_construct_gene_dispatch(ctx);
- nv50_graph_construct_gene_m2mf(ctx);
- nv50_graph_construct_gene_unk24xx(ctx);
- nv50_graph_construct_gene_clipid(ctx);
- nv50_graph_construct_gene_zcull(ctx);
+ nv50_gr_construct_gene_dispatch(ctx);
+ nv50_gr_construct_gene_m2mf(ctx);
+ nv50_gr_construct_gene_unk24xx(ctx);
+ nv50_gr_construct_gene_clipid(ctx);
+ nv50_gr_construct_gene_zcull(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 1 */
ctx->ctxvals_pos = offset + 0x1;
- nv50_graph_construct_gene_vfetch(ctx);
- nv50_graph_construct_gene_eng2d(ctx);
- nv50_graph_construct_gene_csched(ctx);
- nv50_graph_construct_gene_ropm1(ctx);
- nv50_graph_construct_gene_ropm2(ctx);
+ nv50_gr_construct_gene_vfetch(ctx);
+ nv50_gr_construct_gene_eng2d(ctx);
+ nv50_gr_construct_gene_csched(ctx);
+ nv50_gr_construct_gene_ropm1(ctx);
+ nv50_gr_construct_gene_ropm2(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 2 */
ctx->ctxvals_pos = offset + 0x2;
- nv50_graph_construct_gene_ccache(ctx);
- nv50_graph_construct_gene_unk1cxx(ctx);
- nv50_graph_construct_gene_strmout(ctx);
- nv50_graph_construct_gene_unk14xx(ctx);
- nv50_graph_construct_gene_unk10xx(ctx);
- nv50_graph_construct_gene_unk34xx(ctx);
+ nv50_gr_construct_gene_ccache(ctx);
+ nv50_gr_construct_gene_unk1cxx(ctx);
+ nv50_gr_construct_gene_strmout(ctx);
+ nv50_gr_construct_gene_unk14xx(ctx);
+ nv50_gr_construct_gene_unk10xx(ctx);
+ nv50_gr_construct_gene_unk34xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
@@ -1233,7 +1231,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
ctx->ctxvals_pos = offset + 3;
for (i = 0; i < 6; i++)
if (units & (1 << (i + 16)))
- nv50_graph_construct_gene_ropc(ctx);
+ nv50_gr_construct_gene_ropc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
@@ -1241,74 +1239,74 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
for (i = 0; i < 4; i++) {
ctx->ctxvals_pos = offset + 4 + i;
if (units & (1 << (2 * i)))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << (2 * i + 1)))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
}
} else {
/* Strand 0 */
ctx->ctxvals_pos = offset;
- nv50_graph_construct_gene_dispatch(ctx);
- nv50_graph_construct_gene_m2mf(ctx);
- nv50_graph_construct_gene_unk34xx(ctx);
- nv50_graph_construct_gene_csched(ctx);
- nv50_graph_construct_gene_unk1cxx(ctx);
- nv50_graph_construct_gene_strmout(ctx);
+ nv50_gr_construct_gene_dispatch(ctx);
+ nv50_gr_construct_gene_m2mf(ctx);
+ nv50_gr_construct_gene_unk34xx(ctx);
+ nv50_gr_construct_gene_csched(ctx);
+ nv50_gr_construct_gene_unk1cxx(ctx);
+ nv50_gr_construct_gene_strmout(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 1 */
ctx->ctxvals_pos = offset + 1;
- nv50_graph_construct_gene_unk10xx(ctx);
+ nv50_gr_construct_gene_unk10xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 2 */
ctx->ctxvals_pos = offset + 2;
if (device->chipset == 0xa0)
- nv50_graph_construct_gene_unk14xx(ctx);
- nv50_graph_construct_gene_unk24xx(ctx);
+ nv50_gr_construct_gene_unk14xx(ctx);
+ nv50_gr_construct_gene_unk24xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 3 */
ctx->ctxvals_pos = offset + 3;
- nv50_graph_construct_gene_vfetch(ctx);
+ nv50_gr_construct_gene_vfetch(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 4 */
ctx->ctxvals_pos = offset + 4;
- nv50_graph_construct_gene_ccache(ctx);
+ nv50_gr_construct_gene_ccache(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 5 */
ctx->ctxvals_pos = offset + 5;
- nv50_graph_construct_gene_ropm2(ctx);
- nv50_graph_construct_gene_ropm1(ctx);
+ nv50_gr_construct_gene_ropm2(ctx);
+ nv50_gr_construct_gene_ropm1(ctx);
/* per-ROP context */
for (i = 0; i < 8; i++)
if (units & (1<<(i+16)))
- nv50_graph_construct_gene_ropc(ctx);
+ nv50_gr_construct_gene_ropc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 6 */
ctx->ctxvals_pos = offset + 6;
- nv50_graph_construct_gene_zcull(ctx);
- nv50_graph_construct_gene_clipid(ctx);
- nv50_graph_construct_gene_eng2d(ctx);
+ nv50_gr_construct_gene_zcull(ctx);
+ nv50_gr_construct_gene_clipid(ctx);
+ nv50_gr_construct_gene_eng2d(ctx);
if (units & (1 << 0))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << 1))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << 2))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << 3))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
@@ -1316,19 +1314,19 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
ctx->ctxvals_pos = offset + 7;
if (device->chipset == 0xa0) {
if (units & (1 << 4))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << 5))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << 6))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << 7))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << 8))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
if (units & (1 << 9))
- nv50_graph_construct_xfer_tp(ctx);
+ nv50_gr_construct_xfer_tp(ctx);
} else {
- nv50_graph_construct_gene_unk14xx(ctx);
+ nv50_gr_construct_gene_unk14xx(ctx);
}
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
@@ -1349,10 +1347,10 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
*/
static void
-nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_dispatch(struct nvkm_grctx *ctx)
{
/* start of strand 0 */
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
/* SEEK */
if (device->chipset == 0x50)
xf_emit(ctx, 5, 0);
@@ -1405,10 +1403,10 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_m2mf(struct nvkm_grctx *ctx)
{
/* Strand 0, right after dispatch */
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int smallm2mf = 0;
if (device->chipset < 0x92 || device->chipset == 0x98)
smallm2mf = 1;
@@ -1457,9 +1455,9 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_ccache(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* RO */
xf_emit(ctx, 0x800, 0); /* ffffffff */
switch (device->chipset) {
@@ -1525,9 +1523,9 @@ nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_unk10xx(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i;
/* end of area 2 on pre-NVA0, area 1 on NVAx */
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
@@ -1585,9 +1583,9 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_unk34xx(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
/* end of area 2 on pre-NVA0, area 1 on NVAx */
xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
@@ -1610,9 +1608,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_unk14xx(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
/* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
if (device->chipset != 0x50) {
xf_emit(ctx, 5, 0); /* ffffffff */
@@ -1721,9 +1719,9 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_zcull(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
/* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
/* SEEK */
xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
@@ -1782,7 +1780,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_clipid(struct nvkm_grctx *ctx)
{
/* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */
/* SEEK */
@@ -1802,9 +1800,9 @@ nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_unk24xx(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i;
/* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
/* SEEK */
@@ -1885,9 +1883,9 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_vfetch(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int acnt = 0x10, rep, i;
/* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
if (IS_NVA3F(device->chipset))
@@ -2071,9 +2069,9 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_eng2d(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
/* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
/* SEEK */
xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
@@ -2133,9 +2131,9 @@ nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_csched(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
/* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
/* SEEK */
xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
@@ -2232,9 +2230,9 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_unk1cxx(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2328,9 +2326,9 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_strmout(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
@@ -2370,9 +2368,9 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_ropm1(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
xf_emit(ctx, 1, 0); /* 00000007 */
@@ -2383,9 +2381,9 @@ nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_ropm2(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
/* SEEK */
xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
@@ -2409,9 +2407,9 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
+nv50_gr_construct_gene_ropc(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int magic2;
if (device->chipset == 0x50) {
magic2 = 0x00003e60;
@@ -2644,9 +2642,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
+nv50_gr_construct_xfer_unk84xx(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int magic3;
switch (device->chipset) {
case 0x50:
@@ -2736,9 +2734,9 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
+nv50_gr_construct_xfer_tprop(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int magic1, magic2;
if (device->chipset == 0x50) {
magic1 = 0x3ff;
@@ -3036,9 +3034,9 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
+nv50_gr_construct_xfer_tex(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 3 */
@@ -3082,9 +3080,9 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
+nv50_gr_construct_xfer_unk8cxx(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
@@ -3121,26 +3119,26 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
+nv50_gr_construct_xfer_tp(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
if (device->chipset < 0xa0) {
- nv50_graph_construct_xfer_unk84xx(ctx);
- nv50_graph_construct_xfer_tprop(ctx);
- nv50_graph_construct_xfer_tex(ctx);
- nv50_graph_construct_xfer_unk8cxx(ctx);
+ nv50_gr_construct_xfer_unk84xx(ctx);
+ nv50_gr_construct_xfer_tprop(ctx);
+ nv50_gr_construct_xfer_tex(ctx);
+ nv50_gr_construct_xfer_unk8cxx(ctx);
} else {
- nv50_graph_construct_xfer_tex(ctx);
- nv50_graph_construct_xfer_tprop(ctx);
- nv50_graph_construct_xfer_unk8cxx(ctx);
- nv50_graph_construct_xfer_unk84xx(ctx);
+ nv50_gr_construct_xfer_tex(ctx);
+ nv50_gr_construct_xfer_tprop(ctx);
+ nv50_gr_construct_xfer_unk8cxx(ctx);
+ nv50_gr_construct_xfer_unk84xx(ctx);
}
}
static void
-nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
+nv50_gr_construct_xfer_mpc(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i, mpcnt = 2;
switch (device->chipset) {
case 0x98:
@@ -3270,9 +3268,9 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
}
static void
-nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
+nv50_gr_construct_xfer2(struct nvkm_grctx *ctx)
{
- struct nouveau_device *device = ctx->device;
+ struct nvkm_device *device = ctx->device;
int i;
u32 offset;
u32 units = nv_rd32 (ctx->device, 0x1540);
@@ -3288,7 +3286,7 @@ nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
if (i == 0)
xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
if (units & (1 << i))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
}
@@ -3299,40 +3297,40 @@ nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
* what it's doing here. */
xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
if (units & (1 << 0))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if (units & (1 << 1))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 1: TPs 2, 3 */
ctx->ctxvals_pos = offset + 1;
if (units & (1 << 2))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if (units & (1 << 3))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 2: TPs 4, 5, 6 */
ctx->ctxvals_pos = offset + 2;
if (units & (1 << 4))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if (units & (1 << 5))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if (units & (1 << 6))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 3: TPs 7, 8, 9 */
ctx->ctxvals_pos = offset + 3;
if (units & (1 << 7))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if (units & (1 << 8))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if (units & (1 << 9))
- nv50_graph_construct_xfer_mpc(ctx);
+ nv50_gr_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/com.fuc
index e37d8106ae1a..64208bf954cf 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/com.fuc
@@ -1,4 +1,4 @@
-/* fuc microcode util functions for nvc0 PGRAPH
+/* fuc microcode util functions for gf100 PGRAPH
*
* Copyright 2011 Red Hat Inc.
*
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index 7445f12b1d9e..eaed1599b90f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -1,4 +1,4 @@
-/* fuc microcode for nvc0 PGRAPH/GPC
+/* fuc microcode for gf100 PGRAPH/GPC
*
* Copyright 2011 Red Hat Inc.
*
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3
index 5ae06a2d64c9..7cf2bf9d95a2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3
@@ -27,13 +27,13 @@
#define CHIPSET GF100
#include "macros.fuc"
-.section #nvc0_grgpc_data
+.section #gf100_grgpc_data
#define INCLUDE_DATA
#include "com.fuc"
#include "gpc.fuc"
#undef INCLUDE_DATA
-.section #nvc0_grgpc_code
+.section #gf100_grgpc_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 325cc7b7b2fb..ea32f56c0a92 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nvc0_grgpc_data[] = {
+uint32_t gf100_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x00000064,
/* 0x0004: gpc_mmio_list_tail */
@@ -36,7 +36,7 @@ uint32_t nvc0_grgpc_data[] = {
0x00000000,
};
-uint32_t nvc0_grgpc_code[] = {
+uint32_t gf100_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3
index c2f754edbd7d..c918f7d60004 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3
@@ -27,13 +27,13 @@
#define CHIPSET GF117
#include "macros.fuc"
-.section #nvd7_grgpc_data
+.section #gf117_grgpc_data
#define INCLUDE_DATA
#include "com.fuc"
#include "gpc.fuc"
#undef INCLUDE_DATA
-.section #nvd7_grgpc_code
+.section #gf117_grgpc_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index d1504a4059c6..9a36d9cbb8a5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nvd7_grgpc_data[] = {
+uint32_t gf117_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t nvd7_grgpc_data[] = {
0x00000000,
};
-uint32_t nvd7_grgpc_code[] = {
+uint32_t gf117_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3
index 6b906cd2a31f..b80cdfd337a9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3
@@ -27,13 +27,13 @@
#define CHIPSET GK100
#include "macros.fuc"
-.section #nve0_grgpc_data
+.section #gk104_grgpc_data
#define INCLUDE_DATA
#include "com.fuc"
#include "gpc.fuc"
#undef INCLUDE_DATA
-.section #nve0_grgpc_code
+.section #gk104_grgpc_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 855b220378f9..49020fff4317 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nve0_grgpc_data[] = {
+uint32_t gk104_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t nve0_grgpc_data[] = {
0x00000000,
};
-uint32_t nve0_grgpc_code[] = {
+uint32_t gk104_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3
index 90bbe525b626..98d85fe210e8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3
@@ -27,13 +27,13 @@
#define CHIPSET GK110
#include "macros.fuc"
-.section #nvf0_grgpc_data
+.section #gk110_grgpc_data
#define INCLUDE_DATA
#include "com.fuc"
#include "gpc.fuc"
#undef INCLUDE_DATA
-.section #nvf0_grgpc_code
+.section #gk110_grgpc_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 1b803197d28b..c95b07e3bce5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nvf0_grgpc_data[] = {
+uint32_t gk110_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t nvf0_grgpc_data[] = {
0x00000000,
};
-uint32_t nvf0_grgpc_code[] = {
+uint32_t gk110_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5
index bd30262d635b..8f64299a3b91 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5
@@ -27,13 +27,13 @@
#define CHIPSET GK208
#include "macros.fuc"
-.section #nv108_grgpc_data
+.section #gk208_grgpc_data
#define INCLUDE_DATA
#include "com.fuc"
#include "gpc.fuc"
#undef INCLUDE_DATA
-.section #nv108_grgpc_code
+.section #gk208_grgpc_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 31922707794f..7e1c28ee7591 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t nv108_grgpc_data[] = {
+uint32_t gk208_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t nv108_grgpc_data[] = {
0x00000000,
};
-uint32_t nv108_grgpc_code[] = {
+uint32_t gk208_grgpc_code[] = {
0x03140ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5 b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5
index e730603891d7..e730603891d7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 6d53b67dd3c4..6d53b67dd3c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
index b4ad18bf5a26..87f99e38acbf 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
@@ -1,4 +1,4 @@
-/* fuc microcode for nvc0 PGRAPH/HUB
+/* fuc microcode for gf100 PGRAPH/HUB
*
* Copyright 2011 Red Hat Inc.
*
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3
index 3ff52badf932..2c28e7199b7f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3
@@ -25,13 +25,13 @@
#define CHIPSET GF100
#include "macros.fuc"
-.section #nvc0_grhub_data
+.section #gf100_grhub_data
#define INCLUDE_DATA
#include "com.fuc"
#include "hub.fuc"
#undef INCLUDE_DATA
-.section #nvc0_grhub_code
+.section #gf100_grhub_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 62b0c7601d8b..f6acda505677 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nvd7_grhub_data[] = {
+uint32_t gf100_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t nvd7_grhub_data[] = {
0x0417e91c,
};
-uint32_t nvd7_grhub_code[] = {
+uint32_t gf100_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3
index afbe03ac9077..581b2d53ab0c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3
@@ -25,13 +25,13 @@
#define CHIPSET GF117
#include "macros.fuc"
-.section #nvd7_grhub_data
+.section #gf117_grhub_data
#define INCLUDE_DATA
#include "com.fuc"
#include "hub.fuc"
#undef INCLUDE_DATA
-.section #nvd7_grhub_code
+.section #gf117_grhub_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 92dfe6a4ac87..7cb14e59dea1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nvc0_grhub_data[] = {
+uint32_t gf117_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t nvc0_grhub_data[] = {
0x0417e91c,
};
-uint32_t nvc0_grhub_code[] = {
+uint32_t gf117_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3
index d4840f1879fd..d977d393b679 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3
@@ -25,13 +25,13 @@
#define CHIPSET GK100
#include "macros.fuc"
-.section #nve0_grhub_data
+.section #gk104_grhub_data
#define INCLUDE_DATA
#include "com.fuc"
#include "hub.fuc"
#undef INCLUDE_DATA
-.section #nve0_grhub_code
+.section #gk104_grhub_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index 51c3797d8537..95ac15110049 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nve0_grhub_data[] = {
+uint32_t gk104_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t nve0_grhub_data[] = {
0x0417e91c,
};
-uint32_t nve0_grhub_code[] = {
+uint32_t gk104_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3
index ec42ed29b50d..760b4632f22d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3
@@ -25,13 +25,13 @@
#define CHIPSET GK110
#include "macros.fuc"
-.section #nvf0_grhub_data
+.section #gk110_grhub_data
#define INCLUDE_DATA
#include "com.fuc"
#include "hub.fuc"
#undef INCLUDE_DATA
-.section #nvf0_grhub_code
+.section #gk110_grhub_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index a0af4b703a8e..89986878480f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nvf0_grhub_data[] = {
+uint32_t gk110_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t nvf0_grhub_data[] = {
0x0417e91c,
};
-uint32_t nvf0_grhub_code[] = {
+uint32_t gk110_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5
index 7c5d25630fa8..43243a35f6dc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5
@@ -25,13 +25,13 @@
#define CHIPSET GK208
#include "macros.fuc"
-.section #nv108_grhub_data
+.section #gk208_grhub_data
#define INCLUDE_DATA
#include "com.fuc"
#include "hub.fuc"
#undef INCLUDE_DATA
-.section #nv108_grhub_code
+.section #gk208_grhub_code
#define INCLUDE_CODE
bra #init
#include "com.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index e49b5a877ae4..0e98fa4a386e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t nv108_grhub_data[] = {
+uint32_t gk208_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t nv108_grhub_data[] = {
0x0417e91c,
};
-uint32_t nv108_grhub_code[] = {
+uint32_t gk208_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5 b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5
index 27591b3086a5..27591b3086a5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 5f953c5c20b7..5f953c5c20b7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc
index 2a0b0f844299..2a0b0f844299 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
index 1718ae4e8224..1718ae4e8224 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 17251e4b9e86..1dd482e9da77 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -21,16 +21,28 @@
*
* Authors: Ben Skeggs
*/
-
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include "gf100.h"
+#include "ctxgf100.h"
+#include "fuc/os.h"
+
+#include <core/client.h>
+#include <core/device.h>
+#include <core/handle.h>
+#include <core/option.h>
+#include <engine/fifo.h>
+#include <subdev/fb.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
/*******************************************************************************
* Zero Bandwidth Clear
******************************************************************************/
static void
-nvc0_graph_zbc_clear_color(struct nvc0_graph_priv *priv, int zbc)
+gf100_gr_zbc_clear_color(struct gf100_gr_priv *priv, int zbc)
{
if (priv->zbc_color[zbc].format) {
nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]);
@@ -44,10 +56,10 @@ nvc0_graph_zbc_clear_color(struct nvc0_graph_priv *priv, int zbc)
}
static int
-nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format,
- const u32 ds[4], const u32 l2[4])
+gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format,
+ const u32 ds[4], const u32 l2[4])
{
- struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ struct nvkm_ltc *ltc = nvkm_ltc(priv);
int zbc = -ENOSPC, i;
for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
@@ -75,12 +87,12 @@ nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format,
memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
priv->zbc_color[zbc].format = format;
ltc->zbc_color_get(ltc, zbc, l2);
- nvc0_graph_zbc_clear_color(priv, zbc);
+ gf100_gr_zbc_clear_color(priv, zbc);
return zbc;
}
static void
-nvc0_graph_zbc_clear_depth(struct nvc0_graph_priv *priv, int zbc)
+gf100_gr_zbc_clear_depth(struct gf100_gr_priv *priv, int zbc)
{
if (priv->zbc_depth[zbc].format)
nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds);
@@ -90,10 +102,10 @@ nvc0_graph_zbc_clear_depth(struct nvc0_graph_priv *priv, int zbc)
}
static int
-nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format,
- const u32 ds, const u32 l2)
+gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
+ const u32 ds, const u32 l2)
{
- struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ struct nvkm_ltc *ltc = nvkm_ltc(priv);
int zbc = -ENOSPC, i;
for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
@@ -119,7 +131,7 @@ nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format,
priv->zbc_depth[zbc].ds = ds;
priv->zbc_depth[zbc].l2 = l2;
ltc->zbc_depth_get(ltc, zbc, l2);
- nvc0_graph_zbc_clear_depth(priv, zbc);
+ gf100_gr_zbc_clear_depth(priv, zbc);
return zbc;
}
@@ -128,9 +140,9 @@ nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format,
******************************************************************************/
static int
-nvc0_fermi_mthd_zbc_color(struct nouveau_object *object, void *data, u32 size)
+gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
{
- struct nvc0_graph_priv *priv = (void *)object->engine;
+ struct gf100_gr_priv *priv = (void *)object->engine;
union {
struct fermi_a_zbc_color_v0 v0;
} *args = data;
@@ -157,9 +169,9 @@ nvc0_fermi_mthd_zbc_color(struct nouveau_object *object, void *data, u32 size)
case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
- ret = nvc0_graph_zbc_color_get(priv, args->v0.format,
- args->v0.ds,
- args->v0.l2);
+ ret = gf100_gr_zbc_color_get(priv, args->v0.format,
+ args->v0.ds,
+ args->v0.l2);
if (ret >= 0) {
args->v0.index = ret;
return 0;
@@ -174,9 +186,9 @@ nvc0_fermi_mthd_zbc_color(struct nouveau_object *object, void *data, u32 size)
}
static int
-nvc0_fermi_mthd_zbc_depth(struct nouveau_object *object, void *data, u32 size)
+gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
{
- struct nvc0_graph_priv *priv = (void *)object->engine;
+ struct gf100_gr_priv *priv = (void *)object->engine;
union {
struct fermi_a_zbc_depth_v0 v0;
} *args = data;
@@ -185,9 +197,9 @@ nvc0_fermi_mthd_zbc_depth(struct nouveau_object *object, void *data, u32 size)
if (nvif_unpack(args->v0, 0, 0, false)) {
switch (args->v0.format) {
case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
- ret = nvc0_graph_zbc_depth_get(priv, args->v0.format,
- args->v0.ds,
- args->v0.l2);
+ ret = gf100_gr_zbc_depth_get(priv, args->v0.format,
+ args->v0.ds,
+ args->v0.l2);
return (ret >= 0) ? 0 : -ENOSPC;
default:
return -EINVAL;
@@ -198,33 +210,33 @@ nvc0_fermi_mthd_zbc_depth(struct nouveau_object *object, void *data, u32 size)
}
static int
-nvc0_fermi_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
+gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
switch (mthd) {
case FERMI_A_ZBC_COLOR:
- return nvc0_fermi_mthd_zbc_color(object, data, size);
+ return gf100_fermi_mthd_zbc_color(object, data, size);
case FERMI_A_ZBC_DEPTH:
- return nvc0_fermi_mthd_zbc_depth(object, data, size);
+ return gf100_fermi_mthd_zbc_depth(object, data, size);
default:
break;
}
return -EINVAL;
}
-struct nouveau_ofuncs
-nvc0_fermi_ofuncs = {
- .ctor = _nouveau_object_ctor,
- .dtor = nouveau_object_destroy,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
- .mthd = nvc0_fermi_mthd,
+struct nvkm_ofuncs
+gf100_fermi_ofuncs = {
+ .ctor = _nvkm_object_ctor,
+ .dtor = nvkm_object_destroy,
+ .init = nvkm_object_init,
+ .fini = nvkm_object_fini,
+ .mthd = gf100_fermi_mthd,
};
static int
-nvc0_graph_set_shader_exceptions(struct nouveau_object *object, u32 mthd,
- void *pdata, u32 size)
+gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd,
+ void *pdata, u32 size)
{
- struct nvc0_graph_priv *priv = (void *)nv_engine(object);
+ struct gf100_gr_priv *priv = (void *)nv_engine(object);
if (size >= sizeof(u32)) {
u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
nv_wr32(priv, 0x419e44, data);
@@ -234,24 +246,24 @@ nvc0_graph_set_shader_exceptions(struct nouveau_object *object, u32 mthd,
return -EINVAL;
}
-struct nouveau_omthds
-nvc0_graph_9097_omthds[] = {
- { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions },
+struct nvkm_omthds
+gf100_gr_9097_omthds[] = {
+ { 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
{}
};
-struct nouveau_omthds
-nvc0_graph_90c0_omthds[] = {
- { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions },
+struct nvkm_omthds
+gf100_gr_90c0_omthds[] = {
+ { 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
{}
};
-struct nouveau_oclass
-nvc0_graph_sclass[] = {
- { 0x902d, &nouveau_object_ofuncs },
- { 0x9039, &nouveau_object_ofuncs },
- { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
+struct nvkm_oclass
+gf100_gr_sclass[] = {
+ { 0x902d, &nvkm_object_ofuncs },
+ { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
};
@@ -260,22 +272,21 @@ nvc0_graph_sclass[] = {
******************************************************************************/
int
-nvc0_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *args, u32 size,
- struct nouveau_object **pobject)
+gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *args, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_vm *vm = nouveau_client(parent)->vm;
- struct nvc0_graph_priv *priv = (void *)engine;
- struct nvc0_graph_data *data = priv->mmio_data;
- struct nvc0_graph_mmio *mmio = priv->mmio_list;
- struct nvc0_graph_chan *chan;
+ struct nvkm_vm *vm = nvkm_client(parent)->vm;
+ struct gf100_gr_priv *priv = (void *)engine;
+ struct gf100_gr_data *data = priv->mmio_data;
+ struct gf100_gr_mmio *mmio = priv->mmio_list;
+ struct gf100_gr_chan *chan;
int ret, i;
/* allocate memory for context, and fill with default values */
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
- priv->size, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL,
+ priv->size, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -284,26 +295,26 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
* fuc to modify some per-context register settings on first load
* of the context.
*/
- ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
- &chan->mmio);
+ ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
+ &chan->mmio);
if (ret)
return ret;
- ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
- NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
- &chan->mmio_vma);
+ ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
+ NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+ &chan->mmio_vma);
if (ret)
return ret;
/* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
- ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size,
- data->align, 0, &chan->data[i].mem);
+ ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size,
+ data->align, 0, &chan->data[i].mem);
if (ret)
return ret;
- ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
- &chan->data[i].vma);
+ ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
+ &chan->data[i].vma);
if (ret)
return ret;
@@ -347,28 +358,28 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
}
void
-nvc0_graph_context_dtor(struct nouveau_object *object)
+gf100_gr_context_dtor(struct nvkm_object *object)
{
- struct nvc0_graph_chan *chan = (void *)object;
+ struct gf100_gr_chan *chan = (void *)object;
int i;
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
- nouveau_gpuobj_unmap(&chan->data[i].vma);
- nouveau_gpuobj_ref(NULL, &chan->data[i].mem);
+ nvkm_gpuobj_unmap(&chan->data[i].vma);
+ nvkm_gpuobj_ref(NULL, &chan->data[i].mem);
}
- nouveau_gpuobj_unmap(&chan->mmio_vma);
- nouveau_gpuobj_ref(NULL, &chan->mmio);
+ nvkm_gpuobj_unmap(&chan->mmio_vma);
+ nvkm_gpuobj_ref(NULL, &chan->mmio);
- nouveau_graph_context_destroy(&chan->base);
+ nvkm_gr_context_destroy(&chan->base);
}
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
-const struct nvc0_graph_init
-nvc0_graph_init_main_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_main_0[] = {
{ 0x400080, 1, 0x04, 0x003083c2 },
{ 0x400088, 1, 0x04, 0x00006fe7 },
{ 0x40008c, 1, 0x04, 0x00000000 },
@@ -383,53 +394,53 @@ nvc0_graph_init_main_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_fe_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_fe_0[] = {
{ 0x40415c, 1, 0x04, 0x00000000 },
{ 0x404170, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_pri_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_pri_0[] = {
{ 0x404488, 2, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_rstr2d_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_rstr2d_0[] = {
{ 0x407808, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_pd_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_pd_0[] = {
{ 0x406024, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_ds_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405908, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_scc_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_scc_0[] = {
{ 0x40803c, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_prop_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_prop_0[] = {
{ 0x4184a0, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_gpc_unk_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_gpc_unk_0[] = {
{ 0x418604, 1, 0x04, 0x00000000 },
{ 0x418680, 1, 0x04, 0x00000000 },
{ 0x418714, 1, 0x04, 0x80000000 },
@@ -437,20 +448,20 @@ nvc0_graph_init_gpc_unk_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_setup_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_setup_0[] = {
{ 0x418814, 3, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_crstr_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_crstr_0[] = {
{ 0x418b04, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_setup_1[] = {
+const struct gf100_gr_init
+gf100_gr_init_setup_1[] = {
{ 0x4188c8, 1, 0x04, 0x80000000 },
{ 0x4188cc, 1, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
@@ -458,8 +469,8 @@ nvc0_graph_init_setup_1[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_zcull_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_zcull_0[] = {
{ 0x418910, 1, 0x04, 0x00010001 },
{ 0x418914, 1, 0x04, 0x00000301 },
{ 0x418918, 1, 0x04, 0x00800000 },
@@ -468,15 +479,15 @@ nvc0_graph_init_zcull_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_gpm_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_gpm_0[] = {
{ 0x418c04, 1, 0x04, 0x00000000 },
{ 0x418c88, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_gpc_unk_1[] = {
+const struct gf100_gr_init
+gf100_gr_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418f08, 1, 0x04, 0x00000000 },
{ 0x418e00, 1, 0x04, 0x00000050 },
@@ -484,30 +495,30 @@ nvc0_graph_init_gpc_unk_1[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_gcc_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_gcc_0[] = {
{ 0x41900c, 1, 0x04, 0x00000000 },
{ 0x419018, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_tpccs_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_tpccs_0[] = {
{ 0x419d08, 2, 0x04, 0x00000000 },
{ 0x419d10, 1, 0x04, 0x00000014 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_tex_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
{ 0x419abc, 2, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_pe_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_pe_0[] = {
{ 0x41980c, 3, 0x04, 0x00000000 },
{ 0x419844, 1, 0x04, 0x00000000 },
{ 0x41984c, 1, 0x04, 0x00005bc5 },
@@ -515,8 +526,8 @@ nvc0_graph_init_pe_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_l1c_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x80000000 },
{ 0x419cb4, 1, 0x04, 0x00000000 },
@@ -526,27 +537,27 @@ nvc0_graph_init_l1c_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_wwdx_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_wwdx_0[] = {
{ 0x419bd4, 1, 0x04, 0x00800000 },
{ 0x419bdc, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_tpccs_1[] = {
+const struct gf100_gr_init
+gf100_gr_init_tpccs_1[] = {
{ 0x419d2c, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_mpc_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_mpc_0[] = {
{ 0x419c0c, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-nvc0_graph_init_sm_0[] = {
+static const struct gf100_gr_init
+gf100_gr_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
@@ -563,8 +574,8 @@ nvc0_graph_init_sm_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_be_0[] = {
+const struct gf100_gr_init
+gf100_gr_init_be_0[] = {
{ 0x40880c, 1, 0x04, 0x00000000 },
{ 0x408910, 9, 0x04, 0x00000000 },
{ 0x408950, 1, 0x04, 0x00000000 },
@@ -575,47 +586,47 @@ nvc0_graph_init_be_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_fe_1[] = {
+const struct gf100_gr_init
+gf100_gr_init_fe_1[] = {
{ 0x4040f0, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvc0_graph_init_pe_1[] = {
+const struct gf100_gr_init
+gf100_gr_init_pe_1[] = {
{ 0x419880, 1, 0x04, 0x00000002 },
{}
};
-static const struct nvc0_graph_pack
-nvc0_graph_pack_mmio[] = {
- { nvc0_graph_init_main_0 },
- { nvc0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvc0_graph_init_pd_0 },
- { nvc0_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvc0_graph_init_prop_0 },
- { nvc0_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc0_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvc0_graph_init_gpm_0 },
- { nvc0_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nvc0_graph_init_tpccs_0 },
- { nvc0_graph_init_tex_0 },
- { nvc0_graph_init_pe_0 },
- { nvc0_graph_init_l1c_0 },
- { nvc0_graph_init_wwdx_0 },
- { nvc0_graph_init_tpccs_1 },
- { nvc0_graph_init_mpc_0 },
- { nvc0_graph_init_sm_0 },
- { nvc0_graph_init_be_0 },
- { nvc0_graph_init_fe_1 },
- { nvc0_graph_init_pe_1 },
+static const struct gf100_gr_pack
+gf100_gr_pack_mmio[] = {
+ { gf100_gr_init_main_0 },
+ { gf100_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf100_gr_init_pd_0 },
+ { gf100_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gf100_gr_init_prop_0 },
+ { gf100_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf100_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf100_gr_init_gpm_0 },
+ { gf100_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gf100_gr_init_tpccs_0 },
+ { gf100_gr_init_tex_0 },
+ { gf100_gr_init_pe_0 },
+ { gf100_gr_init_l1c_0 },
+ { gf100_gr_init_wwdx_0 },
+ { gf100_gr_init_tpccs_1 },
+ { gf100_gr_init_mpc_0 },
+ { gf100_gr_init_sm_0 },
+ { gf100_gr_init_be_0 },
+ { gf100_gr_init_fe_1 },
+ { gf100_gr_init_pe_1 },
{}
};
@@ -624,7 +635,7 @@ nvc0_graph_pack_mmio[] = {
******************************************************************************/
void
-nvc0_graph_zbc_init(struct nvc0_graph_priv *priv)
+gf100_gr_zbc_init(struct gf100_gr_priv *priv)
{
const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000 };
@@ -634,29 +645,29 @@ nvc0_graph_zbc_init(struct nvc0_graph_priv *priv)
0x00000000, 0x00000000, 0x00000000, 0x00000000 };
const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
- struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ struct nvkm_ltc *ltc = nvkm_ltc(priv);
int index;
if (!priv->zbc_color[0].format) {
- nvc0_graph_zbc_color_get(priv, 1, & zero[0], &zero[4]);
- nvc0_graph_zbc_color_get(priv, 2, & one[0], &one[4]);
- nvc0_graph_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]);
- nvc0_graph_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]);
- nvc0_graph_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
- nvc0_graph_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
+ gf100_gr_zbc_color_get(priv, 1, & zero[0], &zero[4]);
+ gf100_gr_zbc_color_get(priv, 2, & one[0], &one[4]);
+ gf100_gr_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]);
+ gf100_gr_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]);
+ gf100_gr_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
+ gf100_gr_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
}
for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
- nvc0_graph_zbc_clear_color(priv, index);
+ gf100_gr_zbc_clear_color(priv, index);
for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
- nvc0_graph_zbc_clear_depth(priv, index);
+ gf100_gr_zbc_clear_depth(priv, index);
}
void
-nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
+gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
{
- const struct nvc0_graph_pack *pack;
- const struct nvc0_graph_init *init;
+ const struct gf100_gr_pack *pack;
+ const struct gf100_gr_init *init;
pack_for_each_init(init, pack, p) {
u32 next = init->addr + init->count * init->pitch;
@@ -669,10 +680,10 @@ nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
}
void
-nvc0_graph_icmd(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
+gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
{
- const struct nvc0_graph_pack *pack;
- const struct nvc0_graph_init *init;
+ const struct gf100_gr_pack *pack;
+ const struct gf100_gr_init *init;
u32 data = 0;
nv_wr32(priv, 0x400208, 0x80000000);
@@ -697,10 +708,10 @@ nvc0_graph_icmd(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
}
void
-nvc0_graph_mthd(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
+gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
{
- const struct nvc0_graph_pack *pack;
- const struct nvc0_graph_init *init;
+ const struct gf100_gr_pack *pack;
+ const struct gf100_gr_init *init;
u32 data = 0;
pack_for_each_init(init, pack, p) {
@@ -721,9 +732,9 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
}
u64
-nvc0_graph_units(struct nouveau_graph *graph)
+gf100_gr_units(struct nvkm_gr *gr)
{
- struct nvc0_graph_priv *priv = (void *)graph;
+ struct gf100_gr_priv *priv = (void *)gr;
u64 cfg;
cfg = (u32)priv->gpc_nr;
@@ -733,7 +744,7 @@ nvc0_graph_units(struct nouveau_graph *graph)
return cfg;
}
-static const struct nouveau_enum nve0_sked_error[] = {
+static const struct nvkm_enum gk104_sked_error[] = {
{ 7, "CONSTANT_BUFFER_SIZE" },
{ 9, "LOCAL_MEMORY_SIZE_POS" },
{ 10, "LOCAL_MEMORY_SIZE_NEG" },
@@ -748,7 +759,7 @@ static const struct nouveau_enum nve0_sked_error[] = {
{}
};
-static const struct nouveau_enum nvc0_gpc_rop_error[] = {
+static const struct nvkm_enum gf100_gpc_rop_error[] = {
{ 1, "RT_PITCH_OVERRUN" },
{ 4, "RT_WIDTH_OVERRUN" },
{ 5, "RT_HEIGHT_OVERRUN" },
@@ -759,7 +770,7 @@ static const struct nouveau_enum nvc0_gpc_rop_error[] = {
};
static void
-nvc0_graph_trap_gpc_rop(struct nvc0_graph_priv *priv, int gpc)
+gf100_gr_trap_gpc_rop(struct gf100_gr_priv *priv, int gpc)
{
u32 trap[4];
int i;
@@ -774,7 +785,7 @@ nvc0_graph_trap_gpc_rop(struct nvc0_graph_priv *priv, int gpc)
if (!(trap[0] & (1 << i)))
continue;
pr_cont(" ");
- nouveau_enum_print(nvc0_gpc_rop_error, i);
+ nvkm_enum_print(gf100_gpc_rop_error, i);
}
pr_cont("\n");
@@ -784,7 +795,7 @@ nvc0_graph_trap_gpc_rop(struct nvc0_graph_priv *priv, int gpc)
nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
}
-static const struct nouveau_enum nvc0_mp_warp_error[] = {
+static const struct nvkm_enum gf100_mp_warp_error[] = {
{ 0x00, "NO_ERROR" },
{ 0x01, "STACK_MISMATCH" },
{ 0x05, "MISALIGNED_PC" },
@@ -797,23 +808,23 @@ static const struct nouveau_enum nvc0_mp_warp_error[] = {
{}
};
-static const struct nouveau_bitfield nvc0_mp_global_error[] = {
+static const struct nvkm_bitfield gf100_mp_global_error[] = {
{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
{ 0x00000008, "OUT_OF_STACK_SPACE" },
{}
};
static void
-nvc0_graph_trap_mp(struct nvc0_graph_priv *priv, int gpc, int tpc)
+gf100_gr_trap_mp(struct gf100_gr_priv *priv, int gpc, int tpc)
{
u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x648));
u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x650));
nv_error(priv, "GPC%i/TPC%i/MP trap:", gpc, tpc);
- nouveau_bitfield_print(nvc0_mp_global_error, gerr);
+ nvkm_bitfield_print(gf100_mp_global_error, gerr);
if (werr) {
pr_cont(" ");
- nouveau_enum_print(nvc0_mp_warp_error, werr & 0xffff);
+ nvkm_enum_print(gf100_mp_warp_error, werr & 0xffff);
}
pr_cont("\n");
@@ -822,7 +833,7 @@ nvc0_graph_trap_mp(struct nvc0_graph_priv *priv, int gpc, int tpc)
}
static void
-nvc0_graph_trap_tpc(struct nvc0_graph_priv *priv, int gpc, int tpc)
+gf100_gr_trap_tpc(struct gf100_gr_priv *priv, int gpc, int tpc)
{
u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
@@ -834,7 +845,7 @@ nvc0_graph_trap_tpc(struct nvc0_graph_priv *priv, int gpc, int tpc)
}
if (stat & 0x00000002) {
- nvc0_graph_trap_mp(priv, gpc, tpc);
+ gf100_gr_trap_mp(priv, gpc, tpc);
stat &= ~0x00000002;
}
@@ -858,13 +869,13 @@ nvc0_graph_trap_tpc(struct nvc0_graph_priv *priv, int gpc, int tpc)
}
static void
-nvc0_graph_trap_gpc(struct nvc0_graph_priv *priv, int gpc)
+gf100_gr_trap_gpc(struct gf100_gr_priv *priv, int gpc)
{
u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
int tpc;
if (stat & 0x00000001) {
- nvc0_graph_trap_gpc_rop(priv, gpc);
+ gf100_gr_trap_gpc_rop(priv, gpc);
stat &= ~0x00000001;
}
@@ -892,7 +903,7 @@ nvc0_graph_trap_gpc(struct nvc0_graph_priv *priv, int gpc)
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
u32 mask = 0x00010000 << tpc;
if (stat & mask) {
- nvc0_graph_trap_tpc(priv, gpc, tpc);
+ gf100_gr_trap_tpc(priv, gpc, tpc);
nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
stat &= ~mask;
}
@@ -904,7 +915,7 @@ nvc0_graph_trap_gpc(struct nvc0_graph_priv *priv, int gpc)
}
static void
-nvc0_graph_trap_intr(struct nvc0_graph_priv *priv)
+gf100_gr_trap_intr(struct gf100_gr_priv *priv)
{
u32 trap = nv_rd32(priv, 0x400108);
int rop, gpc, i;
@@ -965,7 +976,7 @@ nvc0_graph_trap_intr(struct nvc0_graph_priv *priv)
if (!(stat & (1 << i)))
continue;
pr_cont(" ");
- nouveau_enum_print(nve0_sked_error, i);
+ nvkm_enum_print(gk104_sked_error, i);
}
pr_cont("\n");
@@ -980,7 +991,7 @@ nvc0_graph_trap_intr(struct nvc0_graph_priv *priv)
for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
u32 mask = 0x00000001 << gpc;
if (stat & mask) {
- nvc0_graph_trap_gpc(priv, gpc);
+ gf100_gr_trap_gpc(priv, gpc);
nv_wr32(priv, 0x400118, mask);
stat &= ~mask;
}
@@ -1009,7 +1020,7 @@ nvc0_graph_trap_intr(struct nvc0_graph_priv *priv)
}
static void
-nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base)
+gf100_gr_ctxctl_debug_unit(struct gf100_gr_priv *priv, u32 base)
{
nv_error(priv, "%06x - done 0x%08x\n", base,
nv_rd32(priv, base + 0x400));
@@ -1022,18 +1033,18 @@ nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base)
}
void
-nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
+gf100_gr_ctxctl_debug(struct gf100_gr_priv *priv)
{
u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
u32 gpc;
- nvc0_graph_ctxctl_debug_unit(priv, 0x409000);
+ gf100_gr_ctxctl_debug_unit(priv, 0x409000);
for (gpc = 0; gpc < gpcnr; gpc++)
- nvc0_graph_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
+ gf100_gr_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
}
static void
-nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
+gf100_gr_ctxctl_isr(struct gf100_gr_priv *priv)
{
u32 stat = nv_rd32(priv, 0x409c18);
@@ -1059,26 +1070,26 @@ nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
if (stat & 0x00080000) {
nv_error(priv, "FECS watchdog timeout\n");
- nvc0_graph_ctxctl_debug(priv);
+ gf100_gr_ctxctl_debug(priv);
nv_wr32(priv, 0x409c20, 0x00080000);
stat &= ~0x00080000;
}
if (stat) {
nv_error(priv, "FECS 0x%08x\n", stat);
- nvc0_graph_ctxctl_debug(priv);
+ gf100_gr_ctxctl_debug(priv);
nv_wr32(priv, 0x409c20, stat);
}
}
static void
-nvc0_graph_intr(struct nouveau_subdev *subdev)
+gf100_gr_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- struct nouveau_handle *handle;
- struct nvc0_graph_priv *priv = (void *)subdev;
+ struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
+ struct nvkm_engine *engine = nv_engine(subdev);
+ struct nvkm_object *engctx;
+ struct nvkm_handle *handle;
+ struct gf100_gr_priv *priv = (void *)subdev;
u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
u32 stat = nv_rd32(priv, 0x400100);
u32 addr = nv_rd32(priv, 0x400704);
@@ -1089,18 +1100,18 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
int chid;
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000010) {
- handle = nouveau_handle_get_class(engctx, class);
+ handle = nvkm_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
nv_error(priv,
"ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, nouveau_client_name(engctx),
+ chid, inst << 12, nvkm_client_name(engctx),
subc, class, mthd, data);
}
- nouveau_handle_put(handle);
+ nvkm_handle_put(handle);
nv_wr32(priv, 0x400100, 0x00000010);
stat &= ~0x00000010;
}
@@ -1108,7 +1119,7 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000020) {
nv_error(priv,
"ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, nouveau_client_name(engctx), subc,
+ chid, inst << 12, nvkm_client_name(engctx), subc,
class, mthd, data);
nv_wr32(priv, 0x400100, 0x00000020);
stat &= ~0x00000020;
@@ -1116,9 +1127,9 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00100000) {
nv_error(priv, "DATA_ERROR [");
- nouveau_enum_print(nv50_data_error_names, code);
+ nvkm_enum_print(nv50_data_error_names, code);
pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, nouveau_client_name(engctx), subc,
+ chid, inst << 12, nvkm_client_name(engctx), subc,
class, mthd, data);
nv_wr32(priv, 0x400100, 0x00100000);
stat &= ~0x00100000;
@@ -1126,14 +1137,14 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00200000) {
nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
- nouveau_client_name(engctx));
- nvc0_graph_trap_intr(priv);
+ nvkm_client_name(engctx));
+ gf100_gr_trap_intr(priv);
nv_wr32(priv, 0x400100, 0x00200000);
stat &= ~0x00200000;
}
if (stat & 0x00080000) {
- nvc0_graph_ctxctl_isr(priv);
+ gf100_gr_ctxctl_isr(priv);
nv_wr32(priv, 0x400100, 0x00080000);
stat &= ~0x00080000;
}
@@ -1144,12 +1155,12 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
}
nv_wr32(priv, 0x400500, 0x00010001);
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
void
-nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
- struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
+gf100_gr_init_fw(struct gf100_gr_priv *priv, u32 fuc_base,
+ struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
{
int i;
@@ -1170,12 +1181,12 @@ nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
}
static void
-nvc0_graph_init_csdata(struct nvc0_graph_priv *priv,
- const struct nvc0_graph_pack *pack,
- u32 falcon, u32 starstar, u32 base)
+gf100_gr_init_csdata(struct gf100_gr_priv *priv,
+ const struct gf100_gr_pack *pack,
+ u32 falcon, u32 starstar, u32 base)
{
- const struct nvc0_graph_pack *iter;
- const struct nvc0_graph_init *init;
+ const struct gf100_gr_pack *iter;
+ const struct gf100_gr_init *init;
u32 addr = ~0, prev = ~0, xfer = 0;
u32 star, temp;
@@ -1211,20 +1222,20 @@ nvc0_graph_init_csdata(struct nvc0_graph_priv *priv,
}
int
-nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
+gf100_gr_init_ctxctl(struct gf100_gr_priv *priv)
{
- struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass;
- struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
+ struct gf100_gr_oclass *oclass = (void *)nv_object(priv)->oclass;
+ struct gf100_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
int i;
if (priv->firmware) {
/* load fuc microcode */
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
- nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
- &priv->fuc409d);
- nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
- &priv->fuc41ad);
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+ gf100_gr_init_fw(priv, 0x409000, &priv->fuc409c,
+ &priv->fuc409d);
+ gf100_gr_init_fw(priv, 0x41a000, &priv->fuc41ac,
+ &priv->fuc41ad);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
/* start both of them running */
nv_wr32(priv, 0x409840, 0xffffffff);
@@ -1297,7 +1308,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
}
if (priv->data == NULL) {
- int ret = nvc0_grctx_generate(priv);
+ int ret = gf100_grctx_generate(priv);
if (ret) {
nv_error(priv, "failed to construct context\n");
return ret;
@@ -1311,7 +1322,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
}
/* load HUB microcode */
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
nv_wr32(priv, 0x4091c0, 0x01000000);
for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++)
nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]);
@@ -1334,26 +1345,26 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x41a188, i >> 6);
nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]);
}
- nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
+ nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
/* load register lists */
- nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
- nvc0_graph_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000);
- nvc0_graph_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800);
- nvc0_graph_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00);
+ gf100_gr_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
+ gf100_gr_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000);
+ gf100_gr_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800);
+ gf100_gr_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00);
/* start HUB ucode running, it'll init the GPCs */
nv_wr32(priv, 0x40910c, 0x00000000);
nv_wr32(priv, 0x409100, 0x00000002);
if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
nv_error(priv, "HUB_INIT timed out\n");
- nvc0_graph_ctxctl_debug(priv);
+ gf100_gr_ctxctl_debug(priv);
return -EBUSY;
}
priv->size = nv_rd32(priv, 0x409804);
if (priv->data == NULL) {
- int ret = nvc0_grctx_generate(priv);
+ int ret = gf100_grctx_generate(priv);
if (ret) {
nv_error(priv, "failed to construct context\n");
return ret;
@@ -1364,17 +1375,17 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
}
int
-nvc0_graph_init(struct nouveau_object *object)
+gf100_gr_init(struct nvkm_object *object)
{
- struct nvc0_graph_oclass *oclass = (void *)object->oclass;
- struct nvc0_graph_priv *priv = (void *)object;
+ struct gf100_gr_oclass *oclass = (void *)object->oclass;
+ struct gf100_gr_priv *priv = (void *)object;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
int ret, i;
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -1387,7 +1398,7 @@ nvc0_graph_init(struct nouveau_object *object)
nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
- nvc0_graph_mmio(priv, oclass->mmio);
+ gf100_gr_mmio(priv, oclass->mmio);
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
@@ -1470,23 +1481,23 @@ nvc0_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400054, 0x34ce3464);
- nvc0_graph_zbc_init(priv);
+ gf100_gr_zbc_init(priv);
- return nvc0_graph_init_ctxctl(priv);
+ return gf100_gr_init_ctxctl(priv);
}
static void
-nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
+gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
{
kfree(fuc->data);
fuc->data = NULL;
}
int
-nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
- struct nvc0_graph_fuc *fuc)
+gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname,
+ struct gf100_gr_fuc *fuc)
{
- struct nouveau_device *device = nv_device(priv);
+ struct nvkm_device *device = nv_device(priv);
const struct firmware *fw;
char f[32];
int ret;
@@ -1509,65 +1520,65 @@ nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
}
void
-nvc0_graph_dtor(struct nouveau_object *object)
+gf100_gr_dtor(struct nvkm_object *object)
{
- struct nvc0_graph_priv *priv = (void *)object;
+ struct gf100_gr_priv *priv = (void *)object;
kfree(priv->data);
- nvc0_graph_dtor_fw(&priv->fuc409c);
- nvc0_graph_dtor_fw(&priv->fuc409d);
- nvc0_graph_dtor_fw(&priv->fuc41ac);
- nvc0_graph_dtor_fw(&priv->fuc41ad);
+ gf100_gr_dtor_fw(&priv->fuc409c);
+ gf100_gr_dtor_fw(&priv->fuc409d);
+ gf100_gr_dtor_fw(&priv->fuc41ac);
+ gf100_gr_dtor_fw(&priv->fuc41ad);
- nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
- nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+ nvkm_gpuobj_ref(NULL, &priv->unk4188b8);
+ nvkm_gpuobj_ref(NULL, &priv->unk4188b4);
- nouveau_graph_destroy(&priv->base);
+ nvkm_gr_destroy(&priv->base);
}
int
-nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *bclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *bclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_graph_oclass *oclass = (void *)bclass;
- struct nouveau_device *device = nv_device(parent);
- struct nvc0_graph_priv *priv;
+ struct gf100_gr_oclass *oclass = (void *)bclass;
+ struct nvkm_device *device = nv_device(parent);
+ struct gf100_gr_priv *priv;
bool use_ext_fw, enable;
int ret, i, j;
- use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW",
- oclass->fecs.ucode == NULL);
+ use_ext_fw = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
+ oclass->fecs.ucode == NULL);
enable = use_ext_fw || oclass->fecs.ucode != NULL;
- ret = nouveau_graph_create(parent, engine, bclass, enable, &priv);
+ ret = nvkm_gr_create(parent, engine, bclass, enable, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x08001000;
- nv_subdev(priv)->intr = nvc0_graph_intr;
+ nv_subdev(priv)->intr = gf100_gr_intr;
- priv->base.units = nvc0_graph_units;
+ priv->base.units = gf100_gr_units;
if (use_ext_fw) {
nv_info(priv, "using external firmware\n");
- if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
- nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
- nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
- nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
+ if (gf100_gr_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
+ gf100_gr_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
+ gf100_gr_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
+ gf100_gr_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
return -ENODEV;
priv->firmware = true;
}
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
- &priv->unk4188b4);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+ &priv->unk4188b4);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
- &priv->unk4188b8);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+ &priv->unk4188b8);
if (ret)
return ret;
@@ -1630,38 +1641,38 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-#include "fuc/hubnvc0.fuc.h"
+#include "fuc/hubgf100.fuc3.h"
-struct nvc0_graph_ucode
-nvc0_graph_fecs_ucode = {
- .code.data = nvc0_grhub_code,
- .code.size = sizeof(nvc0_grhub_code),
- .data.data = nvc0_grhub_data,
- .data.size = sizeof(nvc0_grhub_data),
+struct gf100_gr_ucode
+gf100_gr_fecs_ucode = {
+ .code.data = gf100_grhub_code,
+ .code.size = sizeof(gf100_grhub_code),
+ .data.data = gf100_grhub_data,
+ .data.size = sizeof(gf100_grhub_data),
};
-#include "fuc/gpcnvc0.fuc.h"
+#include "fuc/gpcgf100.fuc3.h"
-struct nvc0_graph_ucode
-nvc0_graph_gpccs_ucode = {
- .code.data = nvc0_grgpc_code,
- .code.size = sizeof(nvc0_grgpc_code),
- .data.data = nvc0_grgpc_data,
- .data.size = sizeof(nvc0_grgpc_data),
+struct gf100_gr_ucode
+gf100_gr_gpccs_ucode = {
+ .code.data = gf100_grgpc_code,
+ .code.size = sizeof(gf100_grgpc_code),
+ .data.data = gf100_grgpc_data,
+ .data.size = sizeof(gf100_grgpc_data),
};
-struct nouveau_oclass *
-nvc0_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gf100_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nvc0_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gf100_gr_init,
+ .fini = _nvkm_gr_fini,
},
- .cclass = &nvc0_grctx_oclass,
- .sclass = nvc0_graph_sclass,
- .mmio = nvc0_graph_pack_mmio,
- .fecs.ucode = &nvc0_graph_fecs_ucode,
- .gpccs.ucode = &nvc0_graph_gpccs_ucode,
+ .cclass = &gf100_grctx_oclass,
+ .sclass = gf100_gr_sclass,
+ .mmio = gf100_gr_pack_mmio,
+ .fecs.ucode = &gf100_gr_fecs_ucode,
+ .gpccs.ucode = &gf100_gr_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
new file mode 100644
index 000000000000..aeeca1be9cf0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#ifndef __NVC0_GR_H__
+#define __NVC0_GR_H__
+#include <engine/gr.h>
+
+#include <subdev/ltc.h>
+
+#define GPC_MAX 32
+#define TPC_MAX (GPC_MAX * 8)
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
+#define PPC_UNIT(t, m, r) (0x503000 + (t) * 0x8000 + (m) * 0x200 + (r))
+#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct gf100_gr_data {
+ u32 size;
+ u32 align;
+ u32 access;
+};
+
+struct gf100_gr_mmio {
+ u32 addr;
+ u32 data;
+ u32 shift;
+ int buffer;
+};
+
+struct gf100_gr_fuc {
+ u32 *data;
+ u32 size;
+};
+
+struct gf100_gr_zbc_color {
+ u32 format;
+ u32 ds[4];
+ u32 l2[4];
+};
+
+struct gf100_gr_zbc_depth {
+ u32 format;
+ u32 ds;
+ u32 l2;
+};
+
+struct gf100_gr_priv {
+ struct nvkm_gr base;
+
+ struct gf100_gr_fuc fuc409c;
+ struct gf100_gr_fuc fuc409d;
+ struct gf100_gr_fuc fuc41ac;
+ struct gf100_gr_fuc fuc41ad;
+ bool firmware;
+
+ struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
+ struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
+
+ u8 rop_nr;
+ u8 gpc_nr;
+ u8 tpc_nr[GPC_MAX];
+ u8 tpc_total;
+ u8 ppc_nr[GPC_MAX];
+ u8 ppc_tpc_nr[GPC_MAX][4];
+
+ struct nvkm_gpuobj *unk4188b4;
+ struct nvkm_gpuobj *unk4188b8;
+
+ struct gf100_gr_data mmio_data[4];
+ struct gf100_gr_mmio mmio_list[4096/8];
+ u32 size;
+ u32 *data;
+
+ u8 magic_not_rop_nr;
+};
+
+struct gf100_gr_chan {
+ struct nvkm_gr_chan base;
+
+ struct nvkm_gpuobj *mmio;
+ struct nvkm_vma mmio_vma;
+ int mmio_nr;
+ struct {
+ struct nvkm_gpuobj *mem;
+ struct nvkm_vma vma;
+ } data[4];
+};
+
+int gf100_gr_context_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void gf100_gr_context_dtor(struct nvkm_object *);
+
+void gf100_gr_ctxctl_debug(struct gf100_gr_priv *);
+
+u64 gf100_gr_units(struct nvkm_gr *);
+int gf100_gr_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+void gf100_gr_dtor(struct nvkm_object *);
+int gf100_gr_init(struct nvkm_object *);
+void gf100_gr_zbc_init(struct gf100_gr_priv *);
+
+int gk104_gr_fini(struct nvkm_object *, bool);
+int gk104_gr_init(struct nvkm_object *);
+
+int gk110_gr_fini(struct nvkm_object *, bool);
+
+extern struct nvkm_ofuncs gf100_fermi_ofuncs;
+
+extern struct nvkm_oclass gf100_gr_sclass[];
+extern struct nvkm_omthds gf100_gr_9097_omthds[];
+extern struct nvkm_omthds gf100_gr_90c0_omthds[];
+extern struct nvkm_oclass gf110_gr_sclass[];
+extern struct nvkm_oclass gk110_gr_sclass[];
+
+struct gf100_gr_init {
+ u32 addr;
+ u8 count;
+ u8 pitch;
+ u32 data;
+};
+
+struct gf100_gr_pack {
+ const struct gf100_gr_init *init;
+ u32 type;
+};
+
+#define pack_for_each_init(init, pack, head) \
+ for (pack = head; pack && pack->init; pack++) \
+ for (init = pack->init; init && init->count; init++)
+
+struct gf100_gr_ucode {
+ struct gf100_gr_fuc code;
+ struct gf100_gr_fuc data;
+};
+
+extern struct gf100_gr_ucode gf100_gr_fecs_ucode;
+extern struct gf100_gr_ucode gf100_gr_gpccs_ucode;
+
+extern struct gf100_gr_ucode gk110_gr_fecs_ucode;
+extern struct gf100_gr_ucode gk110_gr_gpccs_ucode;
+
+struct gf100_gr_oclass {
+ struct nvkm_oclass base;
+ struct nvkm_oclass **cclass;
+ struct nvkm_oclass *sclass;
+ const struct gf100_gr_pack *mmio;
+ struct {
+ struct gf100_gr_ucode *ucode;
+ } fecs;
+ struct {
+ struct gf100_gr_ucode *ucode;
+ } gpccs;
+ int ppc_nr;
+};
+
+void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
+void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
+void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
+int gf100_gr_init_ctxctl(struct gf100_gr_priv *);
+
+/* register init value lists */
+
+extern const struct gf100_gr_init gf100_gr_init_main_0[];
+extern const struct gf100_gr_init gf100_gr_init_fe_0[];
+extern const struct gf100_gr_init gf100_gr_init_pri_0[];
+extern const struct gf100_gr_init gf100_gr_init_rstr2d_0[];
+extern const struct gf100_gr_init gf100_gr_init_pd_0[];
+extern const struct gf100_gr_init gf100_gr_init_ds_0[];
+extern const struct gf100_gr_init gf100_gr_init_scc_0[];
+extern const struct gf100_gr_init gf100_gr_init_prop_0[];
+extern const struct gf100_gr_init gf100_gr_init_gpc_unk_0[];
+extern const struct gf100_gr_init gf100_gr_init_setup_0[];
+extern const struct gf100_gr_init gf100_gr_init_crstr_0[];
+extern const struct gf100_gr_init gf100_gr_init_setup_1[];
+extern const struct gf100_gr_init gf100_gr_init_zcull_0[];
+extern const struct gf100_gr_init gf100_gr_init_gpm_0[];
+extern const struct gf100_gr_init gf100_gr_init_gpc_unk_1[];
+extern const struct gf100_gr_init gf100_gr_init_gcc_0[];
+extern const struct gf100_gr_init gf100_gr_init_tpccs_0[];
+extern const struct gf100_gr_init gf100_gr_init_tex_0[];
+extern const struct gf100_gr_init gf100_gr_init_pe_0[];
+extern const struct gf100_gr_init gf100_gr_init_l1c_0[];
+extern const struct gf100_gr_init gf100_gr_init_wwdx_0[];
+extern const struct gf100_gr_init gf100_gr_init_tpccs_1[];
+extern const struct gf100_gr_init gf100_gr_init_mpc_0[];
+extern const struct gf100_gr_init gf100_gr_init_be_0[];
+extern const struct gf100_gr_init gf100_gr_init_fe_1[];
+extern const struct gf100_gr_init gf100_gr_init_pe_1[];
+
+extern const struct gf100_gr_init gf104_gr_init_ds_0[];
+extern const struct gf100_gr_init gf104_gr_init_tex_0[];
+extern const struct gf100_gr_init gf104_gr_init_sm_0[];
+
+extern const struct gf100_gr_init gf108_gr_init_gpc_unk_0[];
+extern const struct gf100_gr_init gf108_gr_init_setup_1[];
+
+extern const struct gf100_gr_init gf119_gr_init_pd_0[];
+extern const struct gf100_gr_init gf119_gr_init_ds_0[];
+extern const struct gf100_gr_init gf119_gr_init_prop_0[];
+extern const struct gf100_gr_init gf119_gr_init_gpm_0[];
+extern const struct gf100_gr_init gf119_gr_init_gpc_unk_1[];
+extern const struct gf100_gr_init gf119_gr_init_tex_0[];
+extern const struct gf100_gr_init gf119_gr_init_sm_0[];
+extern const struct gf100_gr_init gf119_gr_init_fe_1[];
+
+extern const struct gf100_gr_init gf117_gr_init_pes_0[];
+extern const struct gf100_gr_init gf117_gr_init_wwdx_0[];
+extern const struct gf100_gr_init gf117_gr_init_cbm_0[];
+
+extern const struct gf100_gr_init gk104_gr_init_main_0[];
+extern const struct gf100_gr_init gk104_gr_init_tpccs_0[];
+extern const struct gf100_gr_init gk104_gr_init_pe_0[];
+extern const struct gf100_gr_init gk104_gr_init_be_0[];
+extern const struct gf100_gr_pack gk104_gr_pack_mmio[];
+
+extern const struct gf100_gr_init gk110_gr_init_fe_0[];
+extern const struct gf100_gr_init gk110_gr_init_ds_0[];
+extern const struct gf100_gr_init gk110_gr_init_sked_0[];
+extern const struct gf100_gr_init gk110_gr_init_cwd_0[];
+extern const struct gf100_gr_init gk110_gr_init_gpc_unk_1[];
+extern const struct gf100_gr_init gk110_gr_init_tex_0[];
+extern const struct gf100_gr_init gk110_gr_init_sm_0[];
+
+extern const struct gf100_gr_init gk208_gr_init_gpc_unk_0[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index e82e70c53132..20d3b85db3b5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -21,16 +21,15 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include "gf100.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
-const struct nvc0_graph_init
-nvc4_graph_init_ds_0[] = {
+const struct gf100_gr_init
+gf104_gr_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x00002834 },
@@ -38,8 +37,8 @@ nvc4_graph_init_ds_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc4_graph_init_tex_0[] = {
+const struct gf100_gr_init
+gf104_gr_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
@@ -47,8 +46,8 @@ nvc4_graph_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvc4_graph_init_pe_0[] = {
+static const struct gf100_gr_init
+gf104_gr_init_pe_0[] = {
{ 0x41980c, 3, 0x04, 0x00000000 },
{ 0x419844, 1, 0x04, 0x00000000 },
{ 0x41984c, 1, 0x04, 0x00005bc5 },
@@ -57,8 +56,8 @@ nvc4_graph_init_pe_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc4_graph_init_sm_0[] = {
+const struct gf100_gr_init
+gf104_gr_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
@@ -76,34 +75,34 @@ nvc4_graph_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc4_graph_pack_mmio[] = {
- { nvc0_graph_init_main_0 },
- { nvc0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvc0_graph_init_pd_0 },
- { nvc4_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvc0_graph_init_prop_0 },
- { nvc0_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc0_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvc0_graph_init_gpm_0 },
- { nvc0_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nvc0_graph_init_tpccs_0 },
- { nvc4_graph_init_tex_0 },
- { nvc4_graph_init_pe_0 },
- { nvc0_graph_init_l1c_0 },
- { nvc0_graph_init_wwdx_0 },
- { nvc0_graph_init_tpccs_1 },
- { nvc0_graph_init_mpc_0 },
- { nvc4_graph_init_sm_0 },
- { nvc0_graph_init_be_0 },
- { nvc0_graph_init_fe_1 },
+static const struct gf100_gr_pack
+gf104_gr_pack_mmio[] = {
+ { gf100_gr_init_main_0 },
+ { gf100_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf100_gr_init_pd_0 },
+ { gf104_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gf100_gr_init_prop_0 },
+ { gf100_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf100_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf100_gr_init_gpm_0 },
+ { gf100_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gf100_gr_init_tpccs_0 },
+ { gf104_gr_init_tex_0 },
+ { gf104_gr_init_pe_0 },
+ { gf100_gr_init_l1c_0 },
+ { gf100_gr_init_wwdx_0 },
+ { gf100_gr_init_tpccs_1 },
+ { gf100_gr_init_mpc_0 },
+ { gf104_gr_init_sm_0 },
+ { gf100_gr_init_be_0 },
+ { gf100_gr_init_fe_1 },
{}
};
@@ -111,18 +110,18 @@ nvc4_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nouveau_oclass *
-nvc4_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gf104_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc3),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nvc0_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gf100_gr_init,
+ .fini = _nvkm_gr_fini,
},
- .cclass = &nvc4_grctx_oclass,
- .sclass = nvc0_graph_sclass,
- .mmio = nvc4_graph_pack_mmio,
- .fecs.ucode = &nvc0_graph_fecs_ucode,
- .gpccs.ucode = &nvc0_graph_gpccs_ucode,
+ .cclass = &gf104_grctx_oclass,
+ .sclass = gf100_gr_sclass,
+ .mmio = gf104_gr_pack_mmio,
+ .fecs.ucode = &gf100_gr_fecs_ucode,
+ .gpccs.ucode = &gf100_gr_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 93d58e5b82c2..5362c8176e64 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -21,21 +21,22 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "gf100.h"
+#include "ctxgf100.h"
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
-nvc1_graph_sclass[] = {
- { 0x902d, &nouveau_object_ofuncs },
- { 0x9039, &nouveau_object_ofuncs },
- { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
+static struct nvkm_oclass
+gf108_gr_sclass[] = {
+ { 0x902d, &nvkm_object_ofuncs },
+ { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
};
@@ -43,8 +44,8 @@ nvc1_graph_sclass[] = {
* PGRAPH register lists
******************************************************************************/
-const struct nvc0_graph_init
-nvc1_graph_init_gpc_unk_0[] = {
+const struct gf100_gr_init
+gf108_gr_init_gpc_unk_0[] = {
{ 0x418604, 1, 0x04, 0x00000000 },
{ 0x418680, 1, 0x04, 0x00000000 },
{ 0x418714, 1, 0x04, 0x00000000 },
@@ -52,16 +53,16 @@ nvc1_graph_init_gpc_unk_0[] = {
{}
};
-const struct nvc0_graph_init
-nvc1_graph_init_setup_1[] = {
+const struct gf100_gr_init
+gf108_gr_init_setup_1[] = {
{ 0x4188c8, 2, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
{ 0x4188d4, 1, 0x04, 0x00000001 },
{}
};
-static const struct nvc0_graph_init
-nvc1_graph_init_gpc_unk_1[] = {
+static const struct gf100_gr_init
+gf108_gr_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418f08, 1, 0x04, 0x00000000 },
{ 0x418e00, 1, 0x04, 0x00000003 },
@@ -69,8 +70,8 @@ nvc1_graph_init_gpc_unk_1[] = {
{}
};
-static const struct nvc0_graph_init
-nvc1_graph_init_pe_0[] = {
+static const struct gf100_gr_init
+gf108_gr_init_pe_0[] = {
{ 0x41980c, 1, 0x04, 0x00000010 },
{ 0x419810, 1, 0x04, 0x00000000 },
{ 0x419814, 1, 0x04, 0x00000004 },
@@ -81,34 +82,34 @@ nvc1_graph_init_pe_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc1_graph_pack_mmio[] = {
- { nvc0_graph_init_main_0 },
- { nvc0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvc0_graph_init_pd_0 },
- { nvc4_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvc0_graph_init_prop_0 },
- { nvc1_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc1_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvc0_graph_init_gpm_0 },
- { nvc1_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nvc0_graph_init_tpccs_0 },
- { nvc4_graph_init_tex_0 },
- { nvc1_graph_init_pe_0 },
- { nvc0_graph_init_l1c_0 },
- { nvc0_graph_init_wwdx_0 },
- { nvc0_graph_init_tpccs_1 },
- { nvc0_graph_init_mpc_0 },
- { nvc4_graph_init_sm_0 },
- { nvc0_graph_init_be_0 },
- { nvc0_graph_init_fe_1 },
+static const struct gf100_gr_pack
+gf108_gr_pack_mmio[] = {
+ { gf100_gr_init_main_0 },
+ { gf100_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf100_gr_init_pd_0 },
+ { gf104_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gf100_gr_init_prop_0 },
+ { gf108_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf108_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf100_gr_init_gpm_0 },
+ { gf108_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gf100_gr_init_tpccs_0 },
+ { gf104_gr_init_tex_0 },
+ { gf108_gr_init_pe_0 },
+ { gf100_gr_init_l1c_0 },
+ { gf100_gr_init_wwdx_0 },
+ { gf100_gr_init_tpccs_1 },
+ { gf100_gr_init_mpc_0 },
+ { gf104_gr_init_sm_0 },
+ { gf100_gr_init_be_0 },
+ { gf100_gr_init_fe_1 },
{}
};
@@ -116,18 +117,18 @@ nvc1_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nouveau_oclass *
-nvc1_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gf108_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc1),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nvc0_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gf100_gr_init,
+ .fini = _nvkm_gr_fini,
},
- .cclass = &nvc1_grctx_oclass,
- .sclass = nvc1_graph_sclass,
- .mmio = nvc1_graph_pack_mmio,
- .fecs.ucode = &nvc0_graph_fecs_ucode,
- .gpccs.ucode = &nvc0_graph_gpccs_ucode,
+ .cclass = &gf108_grctx_oclass,
+ .sclass = gf108_gr_sclass,
+ .mmio = gf108_gr_pack_mmio,
+ .fecs.ucode = &gf100_gr_fecs_ucode,
+ .gpccs.ucode = &gf100_gr_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 692e1eda0eb4..88beb491b7b8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -21,22 +21,23 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "gf100.h"
+#include "ctxgf100.h"
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-struct nouveau_oclass
-nvc8_graph_sclass[] = {
- { 0x902d, &nouveau_object_ofuncs },
- { 0x9039, &nouveau_object_ofuncs },
- { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { FERMI_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
+struct nvkm_oclass
+gf110_gr_sclass[] = {
+ { 0x902d, &nvkm_object_ofuncs },
+ { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { FERMI_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
};
@@ -44,8 +45,8 @@ nvc8_graph_sclass[] = {
* PGRAPH register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nvc8_graph_init_sm_0[] = {
+static const struct gf100_gr_init
+gf110_gr_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
@@ -62,35 +63,35 @@ nvc8_graph_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvc8_graph_pack_mmio[] = {
- { nvc0_graph_init_main_0 },
- { nvc0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvc0_graph_init_pd_0 },
- { nvc0_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvc0_graph_init_prop_0 },
- { nvc0_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc1_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvc0_graph_init_gpm_0 },
- { nvc0_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nvc0_graph_init_tpccs_0 },
- { nvc0_graph_init_tex_0 },
- { nvc0_graph_init_pe_0 },
- { nvc0_graph_init_l1c_0 },
- { nvc0_graph_init_wwdx_0 },
- { nvc0_graph_init_tpccs_1 },
- { nvc0_graph_init_mpc_0 },
- { nvc8_graph_init_sm_0 },
- { nvc0_graph_init_be_0 },
- { nvc0_graph_init_fe_1 },
- { nvc0_graph_init_pe_1 },
+static const struct gf100_gr_pack
+gf110_gr_pack_mmio[] = {
+ { gf100_gr_init_main_0 },
+ { gf100_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf100_gr_init_pd_0 },
+ { gf100_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gf100_gr_init_prop_0 },
+ { gf100_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf108_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf100_gr_init_gpm_0 },
+ { gf100_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gf100_gr_init_tpccs_0 },
+ { gf100_gr_init_tex_0 },
+ { gf100_gr_init_pe_0 },
+ { gf100_gr_init_l1c_0 },
+ { gf100_gr_init_wwdx_0 },
+ { gf100_gr_init_tpccs_1 },
+ { gf100_gr_init_mpc_0 },
+ { gf110_gr_init_sm_0 },
+ { gf100_gr_init_be_0 },
+ { gf100_gr_init_fe_1 },
+ { gf100_gr_init_pe_1 },
{}
};
@@ -98,18 +99,18 @@ nvc8_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nouveau_oclass *
-nvc8_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gf110_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc8),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nvc0_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gf100_gr_init,
+ .fini = _nvkm_gr_fini,
},
- .cclass = &nvc8_grctx_oclass,
- .sclass = nvc8_graph_sclass,
- .mmio = nvc8_graph_pack_mmio,
- .fecs.ucode = &nvc0_graph_fecs_ucode,
- .gpccs.ucode = &nvc0_graph_gpccs_ucode,
+ .cclass = &gf110_grctx_oclass,
+ .sclass = gf110_gr_sclass,
+ .mmio = gf110_gr_pack_mmio,
+ .fecs.ucode = &gf100_gr_fecs_ucode,
+ .gpccs.ucode = &gf100_gr_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 41e8445c7eea..871ac5f806f6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -21,16 +21,15 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include "gf100.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nvd7_graph_init_pe_0[] = {
+static const struct gf100_gr_init
+gf117_gr_init_pe_0[] = {
{ 0x41980c, 1, 0x04, 0x00000010 },
{ 0x419844, 1, 0x04, 0x00000000 },
{ 0x41984c, 1, 0x04, 0x00005bc8 },
@@ -38,8 +37,8 @@ nvd7_graph_init_pe_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd7_graph_init_pes_0[] = {
+const struct gf100_gr_init
+gf117_gr_init_pes_0[] = {
{ 0x41be04, 1, 0x04, 0x00000000 },
{ 0x41be08, 1, 0x04, 0x00000004 },
{ 0x41be0c, 1, 0x04, 0x00000000 },
@@ -48,50 +47,50 @@ nvd7_graph_init_pes_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd7_graph_init_wwdx_0[] = {
+const struct gf100_gr_init
+gf117_gr_init_wwdx_0[] = {
{ 0x41bfd4, 1, 0x04, 0x00800000 },
{ 0x41bfdc, 1, 0x04, 0x00000000 },
{ 0x41bff8, 2, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvd7_graph_init_cbm_0[] = {
+const struct gf100_gr_init
+gf117_gr_init_cbm_0[] = {
{ 0x41becc, 1, 0x04, 0x00000000 },
{ 0x41bee8, 2, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_pack
-nvd7_graph_pack_mmio[] = {
- { nvc0_graph_init_main_0 },
- { nvc0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvd9_graph_init_pd_0 },
- { nvd9_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvd9_graph_init_prop_0 },
- { nvc1_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc1_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvd9_graph_init_gpm_0 },
- { nvd9_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nvc0_graph_init_tpccs_0 },
- { nvd9_graph_init_tex_0 },
- { nvd7_graph_init_pe_0 },
- { nvc0_graph_init_l1c_0 },
- { nvc0_graph_init_mpc_0 },
- { nvd9_graph_init_sm_0 },
- { nvd7_graph_init_pes_0 },
- { nvd7_graph_init_wwdx_0 },
- { nvd7_graph_init_cbm_0 },
- { nvc0_graph_init_be_0 },
- { nvd9_graph_init_fe_1 },
+static const struct gf100_gr_pack
+gf117_gr_pack_mmio[] = {
+ { gf100_gr_init_main_0 },
+ { gf100_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf119_gr_init_pd_0 },
+ { gf119_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gf119_gr_init_prop_0 },
+ { gf108_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf108_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf119_gr_init_gpm_0 },
+ { gf119_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gf100_gr_init_tpccs_0 },
+ { gf119_gr_init_tex_0 },
+ { gf117_gr_init_pe_0 },
+ { gf100_gr_init_l1c_0 },
+ { gf100_gr_init_mpc_0 },
+ { gf119_gr_init_sm_0 },
+ { gf117_gr_init_pes_0 },
+ { gf117_gr_init_wwdx_0 },
+ { gf117_gr_init_cbm_0 },
+ { gf100_gr_init_be_0 },
+ { gf119_gr_init_fe_1 },
{}
};
@@ -99,39 +98,39 @@ nvd7_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-#include "fuc/hubnvd7.fuc.h"
+#include "fuc/hubgf117.fuc3.h"
-struct nvc0_graph_ucode
-nvd7_graph_fecs_ucode = {
- .code.data = nvd7_grhub_code,
- .code.size = sizeof(nvd7_grhub_code),
- .data.data = nvd7_grhub_data,
- .data.size = sizeof(nvd7_grhub_data),
+struct gf100_gr_ucode
+gf117_gr_fecs_ucode = {
+ .code.data = gf117_grhub_code,
+ .code.size = sizeof(gf117_grhub_code),
+ .data.data = gf117_grhub_data,
+ .data.size = sizeof(gf117_grhub_data),
};
-#include "fuc/gpcnvd7.fuc.h"
+#include "fuc/gpcgf117.fuc3.h"
-struct nvc0_graph_ucode
-nvd7_graph_gpccs_ucode = {
- .code.data = nvd7_grgpc_code,
- .code.size = sizeof(nvd7_grgpc_code),
- .data.data = nvd7_grgpc_data,
- .data.size = sizeof(nvd7_grgpc_data),
+struct gf100_gr_ucode
+gf117_gr_gpccs_ucode = {
+ .code.data = gf117_grgpc_code,
+ .code.size = sizeof(gf117_grgpc_code),
+ .data.data = gf117_grgpc_data,
+ .data.size = sizeof(gf117_grgpc_data),
};
-struct nouveau_oclass *
-nvd7_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gf117_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xd7),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nvc0_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gf100_gr_init,
+ .fini = _nvkm_gr_fini,
},
- .cclass = &nvd7_grctx_oclass,
- .sclass = nvc8_graph_sclass,
- .mmio = nvd7_graph_pack_mmio,
- .fecs.ucode = &nvd7_graph_fecs_ucode,
- .gpccs.ucode = &nvd7_graph_gpccs_ucode,
+ .cclass = &gf117_grctx_oclass,
+ .sclass = gf110_gr_sclass,
+ .mmio = gf117_gr_pack_mmio,
+ .fecs.ucode = &gf117_gr_fecs_ucode,
+ .gpccs.ucode = &gf117_gr_gpccs_ucode,
.ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 00fdf202fb92..e6dd651e2636 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -21,23 +21,22 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include "gf100.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
-const struct nvc0_graph_init
-nvd9_graph_init_pd_0[] = {
+const struct gf100_gr_init
+gf119_gr_init_pd_0[] = {
{ 0x406024, 1, 0x04, 0x00000000 },
{ 0x4064f0, 3, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvd9_graph_init_ds_0[] = {
+const struct gf100_gr_init
+gf119_gr_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x00002834 },
@@ -46,15 +45,15 @@ nvd9_graph_init_ds_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd9_graph_init_prop_0[] = {
+const struct gf100_gr_init
+gf119_gr_init_prop_0[] = {
{ 0x418408, 1, 0x04, 0x00000000 },
{ 0x4184a0, 3, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvd9_graph_init_gpm_0[] = {
+const struct gf100_gr_init
+gf119_gr_init_gpm_0[] = {
{ 0x418c04, 1, 0x04, 0x00000000 },
{ 0x418c64, 2, 0x04, 0x00000000 },
{ 0x418c88, 1, 0x04, 0x00000000 },
@@ -62,8 +61,8 @@ nvd9_graph_init_gpm_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd9_graph_init_gpc_unk_1[] = {
+const struct gf100_gr_init
+gf119_gr_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418d28, 2, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000000 },
@@ -75,8 +74,8 @@ nvd9_graph_init_gpc_unk_1[] = {
{}
};
-const struct nvc0_graph_init
-nvd9_graph_init_tex_0[] = {
+const struct gf100_gr_init
+gf119_gr_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
@@ -85,8 +84,8 @@ nvd9_graph_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvd9_graph_init_pe_0[] = {
+static const struct gf100_gr_init
+gf119_gr_init_pe_0[] = {
{ 0x41980c, 1, 0x04, 0x00000010 },
{ 0x419810, 1, 0x04, 0x00000000 },
{ 0x419814, 1, 0x04, 0x00000004 },
@@ -97,23 +96,23 @@ nvd9_graph_init_pe_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvd9_graph_init_wwdx_0[] = {
+static const struct gf100_gr_init
+gf119_gr_init_wwdx_0[] = {
{ 0x419bd4, 1, 0x04, 0x00800000 },
{ 0x419bdc, 1, 0x04, 0x00000000 },
{ 0x419bf8, 2, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-nvd9_graph_init_tpccs_1[] = {
+static const struct gf100_gr_init
+gf119_gr_init_tpccs_1[] = {
{ 0x419d2c, 1, 0x04, 0x00000000 },
{ 0x419d48, 2, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvd9_graph_init_sm_0[] = {
+const struct gf100_gr_init
+gf119_gr_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ea4, 1, 0x04, 0x00000100 },
@@ -131,42 +130,42 @@ nvd9_graph_init_sm_0[] = {
{}
};
-const struct nvc0_graph_init
-nvd9_graph_init_fe_1[] = {
+const struct gf100_gr_init
+gf119_gr_init_fe_1[] = {
{ 0x40402c, 1, 0x04, 0x00000000 },
{ 0x4040f0, 1, 0x04, 0x00000000 },
{ 0x404174, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_pack
-nvd9_graph_pack_mmio[] = {
- { nvc0_graph_init_main_0 },
- { nvc0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvd9_graph_init_pd_0 },
- { nvd9_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvd9_graph_init_prop_0 },
- { nvc1_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc1_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvd9_graph_init_gpm_0 },
- { nvd9_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nvc0_graph_init_tpccs_0 },
- { nvd9_graph_init_tex_0 },
- { nvd9_graph_init_pe_0 },
- { nvc0_graph_init_l1c_0 },
- { nvd9_graph_init_wwdx_0 },
- { nvd9_graph_init_tpccs_1 },
- { nvc0_graph_init_mpc_0 },
- { nvd9_graph_init_sm_0 },
- { nvc0_graph_init_be_0 },
- { nvd9_graph_init_fe_1 },
+static const struct gf100_gr_pack
+gf119_gr_pack_mmio[] = {
+ { gf100_gr_init_main_0 },
+ { gf100_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf119_gr_init_pd_0 },
+ { gf119_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gf119_gr_init_prop_0 },
+ { gf108_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf108_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf119_gr_init_gpm_0 },
+ { gf119_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gf100_gr_init_tpccs_0 },
+ { gf119_gr_init_tex_0 },
+ { gf119_gr_init_pe_0 },
+ { gf100_gr_init_l1c_0 },
+ { gf119_gr_init_wwdx_0 },
+ { gf119_gr_init_tpccs_1 },
+ { gf100_gr_init_mpc_0 },
+ { gf119_gr_init_sm_0 },
+ { gf100_gr_init_be_0 },
+ { gf119_gr_init_fe_1 },
{}
};
@@ -174,18 +173,18 @@ nvd9_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nouveau_oclass *
-nvd9_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gf119_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xd9),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nvc0_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gf100_gr_init,
+ .fini = _nvkm_gr_fini,
},
- .cclass = &nvd9_grctx_oclass,
- .sclass = nvc8_graph_sclass,
- .mmio = nvd9_graph_pack_mmio,
- .fecs.ucode = &nvc0_graph_fecs_ucode,
- .gpccs.ucode = &nvc0_graph_gpccs_ucode,
+ .cclass = &gf119_grctx_oclass,
+ .sclass = gf110_gr_sclass,
+ .mmio = gf119_gr_pack_mmio,
+ .fecs.ucode = &gf100_gr_fecs_ucode,
+ .gpccs.ucode = &gf100_gr_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 0c71f5c67ae0..489fdd94b885 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -21,22 +21,23 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "gf100.h"
+#include "ctxgf100.h"
-#include <subdev/pwr.h>
+#include <subdev/pmu.h>
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
-nve4_graph_sclass[] = {
- { 0x902d, &nouveau_object_ofuncs },
- { 0xa040, &nouveau_object_ofuncs },
- { KEPLER_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
+static struct nvkm_oclass
+gk104_gr_sclass[] = {
+ { 0x902d, &nvkm_object_ofuncs },
+ { 0xa040, &nvkm_object_ofuncs },
+ { KEPLER_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
};
@@ -44,8 +45,8 @@ nve4_graph_sclass[] = {
* PGRAPH register lists
******************************************************************************/
-const struct nvc0_graph_init
-nve4_graph_init_main_0[] = {
+const struct gf100_gr_init
+gk104_gr_init_main_0[] = {
{ 0x400080, 1, 0x04, 0x003083c2 },
{ 0x400088, 1, 0x04, 0x0001ffe7 },
{ 0x40008c, 1, 0x04, 0x00000000 },
@@ -60,8 +61,8 @@ nve4_graph_init_main_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_graph_init_ds_0[] = {
+static const struct gf100_gr_init
+gk104_gr_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x0000ff34 },
@@ -70,20 +71,20 @@ nve4_graph_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_graph_init_sked_0[] = {
+static const struct gf100_gr_init
+gk104_gr_init_sked_0[] = {
{ 0x407010, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-nve4_graph_init_cwd_0[] = {
+static const struct gf100_gr_init
+gk104_gr_init_cwd_0[] = {
{ 0x405b50, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-nve4_graph_init_gpc_unk_1[] = {
+static const struct gf100_gr_init
+gk104_gr_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418d28, 2, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000000 },
@@ -95,15 +96,15 @@ nve4_graph_init_gpc_unk_1[] = {
{}
};
-const struct nvc0_graph_init
-nve4_graph_init_tpccs_0[] = {
+const struct gf100_gr_init
+gk104_gr_init_tpccs_0[] = {
{ 0x419d0c, 1, 0x04, 0x00000000 },
{ 0x419d10, 1, 0x04, 0x00000014 },
{}
};
-const struct nvc0_graph_init
-nve4_graph_init_pe_0[] = {
+const struct gf100_gr_init
+gk104_gr_init_pe_0[] = {
{ 0x41980c, 1, 0x04, 0x00000010 },
{ 0x419844, 1, 0x04, 0x00000000 },
{ 0x419850, 1, 0x04, 0x00000004 },
@@ -111,8 +112,8 @@ nve4_graph_init_pe_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_graph_init_l1c_0[] = {
+static const struct gf100_gr_init
+gk104_gr_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x00000000 },
{ 0x419cb0, 1, 0x04, 0x01000000 },
@@ -125,8 +126,8 @@ nve4_graph_init_l1c_0[] = {
{}
};
-static const struct nvc0_graph_init
-nve4_graph_init_sm_0[] = {
+static const struct gf100_gr_init
+gk104_gr_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ee4, 1, 0x04, 0x00000000 },
@@ -139,8 +140,8 @@ nve4_graph_init_sm_0[] = {
{}
};
-const struct nvc0_graph_init
-nve4_graph_init_be_0[] = {
+const struct gf100_gr_init
+gk104_gr_init_be_0[] = {
{ 0x40880c, 1, 0x04, 0x00000000 },
{ 0x408850, 1, 0x04, 0x00000004 },
{ 0x408910, 9, 0x04, 0x00000000 },
@@ -153,37 +154,37 @@ nve4_graph_init_be_0[] = {
{}
};
-const struct nvc0_graph_pack
-nve4_graph_pack_mmio[] = {
- { nve4_graph_init_main_0 },
- { nvc0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvd9_graph_init_pd_0 },
- { nve4_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nve4_graph_init_sked_0 },
- { nve4_graph_init_cwd_0 },
- { nvd9_graph_init_prop_0 },
- { nvc1_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc1_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvd9_graph_init_gpm_0 },
- { nve4_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nve4_graph_init_tpccs_0 },
- { nvd9_graph_init_tex_0 },
- { nve4_graph_init_pe_0 },
- { nve4_graph_init_l1c_0 },
- { nvc0_graph_init_mpc_0 },
- { nve4_graph_init_sm_0 },
- { nvd7_graph_init_pes_0 },
- { nvd7_graph_init_wwdx_0 },
- { nvd7_graph_init_cbm_0 },
- { nve4_graph_init_be_0 },
- { nvc0_graph_init_fe_1 },
+const struct gf100_gr_pack
+gk104_gr_pack_mmio[] = {
+ { gk104_gr_init_main_0 },
+ { gf100_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf119_gr_init_pd_0 },
+ { gk104_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gk104_gr_init_sked_0 },
+ { gk104_gr_init_cwd_0 },
+ { gf119_gr_init_prop_0 },
+ { gf108_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf108_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf119_gr_init_gpm_0 },
+ { gk104_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gk104_gr_init_tpccs_0 },
+ { gf119_gr_init_tex_0 },
+ { gk104_gr_init_pe_0 },
+ { gk104_gr_init_l1c_0 },
+ { gf100_gr_init_mpc_0 },
+ { gk104_gr_init_sm_0 },
+ { gf117_gr_init_pes_0 },
+ { gf117_gr_init_wwdx_0 },
+ { gf117_gr_init_cbm_0 },
+ { gk104_gr_init_be_0 },
+ { gf100_gr_init_fe_1 },
{}
};
@@ -192,21 +193,21 @@ nve4_graph_pack_mmio[] = {
******************************************************************************/
int
-nve4_graph_init(struct nouveau_object *object)
+gk104_gr_init(struct nvkm_object *object)
{
- struct nvc0_graph_oclass *oclass = (void *)object->oclass;
- struct nvc0_graph_priv *priv = (void *)object;
- struct nouveau_pwr *ppwr = nouveau_pwr(priv);
+ struct gf100_gr_oclass *oclass = (void *)object->oclass;
+ struct gf100_gr_priv *priv = (void *)object;
+ struct nvkm_pmu *pmu = nvkm_pmu(priv);
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
int ret, i;
- if (ppwr)
- ppwr->pgob(ppwr, false);
+ if (pmu)
+ pmu->pgob(pmu, false);
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -219,7 +220,7 @@ nve4_graph_init(struct nouveau_object *object)
nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
- nvc0_graph_mmio(priv, oclass->mmio);
+ gf100_gr_mmio(priv, oclass->mmio);
nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
@@ -304,44 +305,44 @@ nve4_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400054, 0x34ce3464);
- nvc0_graph_zbc_init(priv);
+ gf100_gr_zbc_init(priv);
- return nvc0_graph_init_ctxctl(priv);
+ return gf100_gr_init_ctxctl(priv);
}
-#include "fuc/hubnve0.fuc.h"
+#include "fuc/hubgk104.fuc3.h"
-static struct nvc0_graph_ucode
-nve4_graph_fecs_ucode = {
- .code.data = nve0_grhub_code,
- .code.size = sizeof(nve0_grhub_code),
- .data.data = nve0_grhub_data,
- .data.size = sizeof(nve0_grhub_data),
+static struct gf100_gr_ucode
+gk104_gr_fecs_ucode = {
+ .code.data = gk104_grhub_code,
+ .code.size = sizeof(gk104_grhub_code),
+ .data.data = gk104_grhub_data,
+ .data.size = sizeof(gk104_grhub_data),
};
-#include "fuc/gpcnve0.fuc.h"
+#include "fuc/gpcgk104.fuc3.h"
-static struct nvc0_graph_ucode
-nve4_graph_gpccs_ucode = {
- .code.data = nve0_grgpc_code,
- .code.size = sizeof(nve0_grgpc_code),
- .data.data = nve0_grgpc_data,
- .data.size = sizeof(nve0_grgpc_data),
+static struct gf100_gr_ucode
+gk104_gr_gpccs_ucode = {
+ .code.data = gk104_grgpc_code,
+ .code.size = sizeof(gk104_grgpc_code),
+ .data.data = gk104_grgpc_data,
+ .data.size = sizeof(gk104_grgpc_data),
};
-struct nouveau_oclass *
-nve4_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gk104_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xe4),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nve4_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gk104_gr_init,
+ .fini = _nvkm_gr_fini,
},
- .cclass = &nve4_grctx_oclass,
- .sclass = nve4_graph_sclass,
- .mmio = nve4_graph_pack_mmio,
- .fecs.ucode = &nve4_graph_fecs_ucode,
- .gpccs.ucode = &nve4_graph_gpccs_ucode,
+ .cclass = &gk104_grctx_oclass,
+ .sclass = gk104_gr_sclass,
+ .mmio = gk104_gr_pack_mmio,
+ .fecs.ucode = &gk104_gr_fecs_ucode,
+ .gpccs.ucode = &gk104_gr_gpccs_ucode,
.ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index c306c0f2fc84..78e03ab1608e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -21,20 +21,23 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "gf100.h"
+#include "ctxgf100.h"
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-struct nouveau_oclass
-nvf0_graph_sclass[] = {
- { 0x902d, &nouveau_object_ofuncs },
- { 0xa140, &nouveau_object_ofuncs },
- { KEPLER_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { KEPLER_COMPUTE_B, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
+struct nvkm_oclass
+gk110_gr_sclass[] = {
+ { 0x902d, &nvkm_object_ofuncs },
+ { 0xa140, &nvkm_object_ofuncs },
+ { KEPLER_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { KEPLER_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
};
@@ -42,16 +45,16 @@ nvf0_graph_sclass[] = {
* PGRAPH register lists
******************************************************************************/
-const struct nvc0_graph_init
-nvf0_graph_init_fe_0[] = {
+const struct gf100_gr_init
+gk110_gr_init_fe_0[] = {
{ 0x40415c, 1, 0x04, 0x00000000 },
{ 0x404170, 1, 0x04, 0x00000000 },
{ 0x4041b4, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvf0_graph_init_ds_0[] = {
+const struct gf100_gr_init
+gk110_gr_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x0000ff00 },
@@ -60,23 +63,23 @@ nvf0_graph_init_ds_0[] = {
{}
};
-const struct nvc0_graph_init
-nvf0_graph_init_sked_0[] = {
+const struct gf100_gr_init
+gk110_gr_init_sked_0[] = {
{ 0x407010, 1, 0x04, 0x00000000 },
{ 0x407040, 1, 0x04, 0x80440424 },
{ 0x407048, 1, 0x04, 0x0000000a },
{}
};
-const struct nvc0_graph_init
-nvf0_graph_init_cwd_0[] = {
+const struct gf100_gr_init
+gk110_gr_init_cwd_0[] = {
{ 0x405b44, 1, 0x04, 0x00000000 },
{ 0x405b50, 1, 0x04, 0x00000000 },
{}
};
-const struct nvc0_graph_init
-nvf0_graph_init_gpc_unk_1[] = {
+const struct gf100_gr_init
+gk110_gr_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418d28, 2, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000400 },
@@ -88,8 +91,8 @@ nvf0_graph_init_gpc_unk_1[] = {
{}
};
-const struct nvc0_graph_init
-nvf0_graph_init_tex_0[] = {
+const struct gf100_gr_init
+gk110_gr_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
@@ -100,8 +103,8 @@ nvf0_graph_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-nvf0_graph_init_l1c_0[] = {
+static const struct gf100_gr_init
+gk110_gr_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x00000000 },
{ 0x419cb0, 1, 0x04, 0x01000000 },
@@ -115,8 +118,8 @@ nvf0_graph_init_l1c_0[] = {
{}
};
-const struct nvc0_graph_init
-nvf0_graph_init_sm_0[] = {
+const struct gf100_gr_init
+gk110_gr_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000080 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ee4, 1, 0x04, 0x00000000 },
@@ -132,37 +135,37 @@ nvf0_graph_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nvf0_graph_pack_mmio[] = {
- { nve4_graph_init_main_0 },
- { nvf0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvd9_graph_init_pd_0 },
- { nvf0_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvf0_graph_init_sked_0 },
- { nvf0_graph_init_cwd_0 },
- { nvd9_graph_init_prop_0 },
- { nvc1_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc1_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvd9_graph_init_gpm_0 },
- { nvf0_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nve4_graph_init_tpccs_0 },
- { nvf0_graph_init_tex_0 },
- { nve4_graph_init_pe_0 },
- { nvf0_graph_init_l1c_0 },
- { nvc0_graph_init_mpc_0 },
- { nvf0_graph_init_sm_0 },
- { nvd7_graph_init_pes_0 },
- { nvd7_graph_init_wwdx_0 },
- { nvd7_graph_init_cbm_0 },
- { nve4_graph_init_be_0 },
- { nvc0_graph_init_fe_1 },
+static const struct gf100_gr_pack
+gk110_gr_pack_mmio[] = {
+ { gk104_gr_init_main_0 },
+ { gk110_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf119_gr_init_pd_0 },
+ { gk110_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gk110_gr_init_sked_0 },
+ { gk110_gr_init_cwd_0 },
+ { gf119_gr_init_prop_0 },
+ { gf108_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf108_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf119_gr_init_gpm_0 },
+ { gk110_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gk104_gr_init_tpccs_0 },
+ { gk110_gr_init_tex_0 },
+ { gk104_gr_init_pe_0 },
+ { gk110_gr_init_l1c_0 },
+ { gf100_gr_init_mpc_0 },
+ { gk110_gr_init_sm_0 },
+ { gf117_gr_init_pes_0 },
+ { gf117_gr_init_wwdx_0 },
+ { gf117_gr_init_cbm_0 },
+ { gk104_gr_init_be_0 },
+ { gf100_gr_init_fe_1 },
{}
};
@@ -171,9 +174,9 @@ nvf0_graph_pack_mmio[] = {
******************************************************************************/
int
-nvf0_graph_fini(struct nouveau_object *object, bool suspend)
+gk110_gr_fini(struct nvkm_object *object, bool suspend)
{
- struct nvc0_graph_priv *priv = (void *)object;
+ struct gf100_gr_priv *priv = (void *)object;
static const struct {
u32 addr;
u32 data;
@@ -204,42 +207,42 @@ nvf0_graph_fini(struct nouveau_object *object, bool suspend)
nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
}
- return nouveau_graph_fini(&priv->base, suspend);
+ return nvkm_gr_fini(&priv->base, suspend);
}
-#include "fuc/hubnvf0.fuc.h"
+#include "fuc/hubgk110.fuc3.h"
-struct nvc0_graph_ucode
-nvf0_graph_fecs_ucode = {
- .code.data = nvf0_grhub_code,
- .code.size = sizeof(nvf0_grhub_code),
- .data.data = nvf0_grhub_data,
- .data.size = sizeof(nvf0_grhub_data),
+struct gf100_gr_ucode
+gk110_gr_fecs_ucode = {
+ .code.data = gk110_grhub_code,
+ .code.size = sizeof(gk110_grhub_code),
+ .data.data = gk110_grhub_data,
+ .data.size = sizeof(gk110_grhub_data),
};
-#include "fuc/gpcnvf0.fuc.h"
+#include "fuc/gpcgk110.fuc3.h"
-struct nvc0_graph_ucode
-nvf0_graph_gpccs_ucode = {
- .code.data = nvf0_grgpc_code,
- .code.size = sizeof(nvf0_grgpc_code),
- .data.data = nvf0_grgpc_data,
- .data.size = sizeof(nvf0_grgpc_data),
+struct gf100_gr_ucode
+gk110_gr_gpccs_ucode = {
+ .code.data = gk110_grgpc_code,
+ .code.size = sizeof(gk110_grgpc_code),
+ .data.data = gk110_grgpc_data,
+ .data.size = sizeof(gk110_grgpc_data),
};
-struct nouveau_oclass *
-nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gk110_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nve4_graph_init,
- .fini = nvf0_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gk104_gr_init,
+ .fini = gk110_gr_fini,
},
- .cclass = &nvf0_grctx_oclass,
- .sclass = nvf0_graph_sclass,
- .mmio = nvf0_graph_pack_mmio,
- .fecs.ucode = &nvf0_graph_fecs_ucode,
- .gpccs.ucode = &nvf0_graph_gpccs_ucode,
+ .cclass = &gk110_grctx_oclass,
+ .sclass = gk110_gr_sclass,
+ .mmio = gk110_gr_pack_mmio,
+ .fecs.ucode = &gk110_gr_fecs_ucode,
+ .gpccs.ucode = &gk110_gr_gpccs_ucode,
.ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index d07b19dc168d..5292c5a9a38c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -21,16 +21,15 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include "gf100.h"
+#include "ctxgf100.h"
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
-static const struct nvc0_graph_init
-gk110b_graph_init_l1c_0[] = {
+static const struct gf100_gr_init
+gk110b_gr_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x00000000 },
{ 0x419cb0, 1, 0x04, 0x09000000 },
@@ -44,8 +43,8 @@ gk110b_graph_init_l1c_0[] = {
{}
};
-static const struct nvc0_graph_init
-gk110b_graph_init_sm_0[] = {
+static const struct gf100_gr_init
+gk110b_gr_init_sm_0[] = {
{ 0x419e00, 1, 0x04, 0x00000080 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
{ 0x419ee4, 1, 0x04, 0x00000000 },
@@ -61,37 +60,37 @@ gk110b_graph_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
-gk110b_graph_pack_mmio[] = {
- { nve4_graph_init_main_0 },
- { nvf0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvd9_graph_init_pd_0 },
- { nvf0_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvf0_graph_init_sked_0 },
- { nvf0_graph_init_cwd_0 },
- { nvd9_graph_init_prop_0 },
- { nvc1_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nvc1_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvd9_graph_init_gpm_0 },
- { nvf0_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nve4_graph_init_tpccs_0 },
- { nvf0_graph_init_tex_0 },
- { nve4_graph_init_pe_0 },
- { gk110b_graph_init_l1c_0 },
- { nvc0_graph_init_mpc_0 },
- { gk110b_graph_init_sm_0 },
- { nvd7_graph_init_pes_0 },
- { nvd7_graph_init_wwdx_0 },
- { nvd7_graph_init_cbm_0 },
- { nve4_graph_init_be_0 },
- { nvc0_graph_init_fe_1 },
+static const struct gf100_gr_pack
+gk110b_gr_pack_mmio[] = {
+ { gk104_gr_init_main_0 },
+ { gk110_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf119_gr_init_pd_0 },
+ { gk110_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gk110_gr_init_sked_0 },
+ { gk110_gr_init_cwd_0 },
+ { gf119_gr_init_prop_0 },
+ { gf108_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gf108_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf119_gr_init_gpm_0 },
+ { gk110_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gk104_gr_init_tpccs_0 },
+ { gk110_gr_init_tex_0 },
+ { gk104_gr_init_pe_0 },
+ { gk110b_gr_init_l1c_0 },
+ { gf100_gr_init_mpc_0 },
+ { gk110b_gr_init_sm_0 },
+ { gf117_gr_init_pes_0 },
+ { gf117_gr_init_wwdx_0 },
+ { gf117_gr_init_cbm_0 },
+ { gk104_gr_init_be_0 },
+ { gf100_gr_init_fe_1 },
{}
};
@@ -99,19 +98,19 @@ gk110b_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nouveau_oclass *
-gk110b_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gk110b_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf1),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nve4_graph_init,
- .fini = nvf0_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gk104_gr_init,
+ .fini = gk110_gr_fini,
},
.cclass = &gk110b_grctx_oclass,
- .sclass = nvf0_graph_sclass,
- .mmio = gk110b_graph_pack_mmio,
- .fecs.ucode = &nvf0_graph_fecs_ucode,
- .gpccs.ucode = &nvf0_graph_gpccs_ucode,
+ .sclass = gk110_gr_sclass,
+ .mmio = gk110b_gr_pack_mmio,
+ .fecs.ucode = &gk110_gr_fecs_ucode,
+ .gpccs.ucode = &gk110_gr_gpccs_ucode,
.ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 2b0e8f48c029..ae6b853173b6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -21,20 +21,23 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "gf100.h"
+#include "ctxgf100.h"
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
-nv108_graph_sclass[] = {
- { 0x902d, &nouveau_object_ofuncs },
- { 0xa140, &nouveau_object_ofuncs },
- { KEPLER_B, &nvc0_fermi_ofuncs },
- { 0xa1c0, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+gk208_gr_sclass[] = {
+ { 0x902d, &nvkm_object_ofuncs },
+ { 0xa140, &nvkm_object_ofuncs },
+ { KEPLER_B, &gf100_fermi_ofuncs },
+ { 0xa1c0, &nvkm_object_ofuncs },
{}
};
@@ -42,8 +45,8 @@ nv108_graph_sclass[] = {
* PGRAPH register lists
******************************************************************************/
-static const struct nvc0_graph_init
-nv108_graph_init_main_0[] = {
+static const struct gf100_gr_init
+gk208_gr_init_main_0[] = {
{ 0x400080, 1, 0x04, 0x003083c2 },
{ 0x400088, 1, 0x04, 0x0001bfe7 },
{ 0x40008c, 1, 0x04, 0x00000000 },
@@ -58,8 +61,8 @@ nv108_graph_init_main_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_graph_init_ds_0[] = {
+static const struct gf100_gr_init
+gk208_gr_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x00000000 },
@@ -68,8 +71,8 @@ nv108_graph_init_ds_0[] = {
{}
};
-const struct nvc0_graph_init
-nv108_graph_init_gpc_unk_0[] = {
+const struct gf100_gr_init
+gk208_gr_init_gpc_unk_0[] = {
{ 0x418604, 1, 0x04, 0x00000000 },
{ 0x418680, 1, 0x04, 0x00000000 },
{ 0x418714, 1, 0x04, 0x00000000 },
@@ -77,16 +80,16 @@ nv108_graph_init_gpc_unk_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_graph_init_setup_1[] = {
+static const struct gf100_gr_init
+gk208_gr_init_setup_1[] = {
{ 0x4188c8, 2, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
{ 0x4188d4, 1, 0x04, 0x00000201 },
{}
};
-static const struct nvc0_graph_init
-nv108_graph_init_tex_0[] = {
+static const struct gf100_gr_init
+gk208_gr_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
@@ -96,8 +99,8 @@ nv108_graph_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-nv108_graph_init_l1c_0[] = {
+static const struct gf100_gr_init
+gk208_gr_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419ca8, 1, 0x04, 0x00000000 },
{ 0x419cb0, 1, 0x04, 0x01000000 },
@@ -111,37 +114,37 @@ nv108_graph_init_l1c_0[] = {
{}
};
-static const struct nvc0_graph_pack
-nv108_graph_pack_mmio[] = {
- { nv108_graph_init_main_0 },
- { nvf0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvd9_graph_init_pd_0 },
- { nv108_graph_init_ds_0 },
- { nvc0_graph_init_scc_0 },
- { nvf0_graph_init_sked_0 },
- { nvf0_graph_init_cwd_0 },
- { nvd9_graph_init_prop_0 },
- { nv108_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { nv108_graph_init_setup_1 },
- { nvc0_graph_init_zcull_0 },
- { nvd9_graph_init_gpm_0 },
- { nvf0_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { nve4_graph_init_tpccs_0 },
- { nv108_graph_init_tex_0 },
- { nve4_graph_init_pe_0 },
- { nv108_graph_init_l1c_0 },
- { nvc0_graph_init_mpc_0 },
- { nvf0_graph_init_sm_0 },
- { nvd7_graph_init_pes_0 },
- { nvd7_graph_init_wwdx_0 },
- { nvd7_graph_init_cbm_0 },
- { nve4_graph_init_be_0 },
- { nvc0_graph_init_fe_1 },
+static const struct gf100_gr_pack
+gk208_gr_pack_mmio[] = {
+ { gk208_gr_init_main_0 },
+ { gk110_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf119_gr_init_pd_0 },
+ { gk208_gr_init_ds_0 },
+ { gf100_gr_init_scc_0 },
+ { gk110_gr_init_sked_0 },
+ { gk110_gr_init_cwd_0 },
+ { gf119_gr_init_prop_0 },
+ { gk208_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gk208_gr_init_setup_1 },
+ { gf100_gr_init_zcull_0 },
+ { gf119_gr_init_gpm_0 },
+ { gk110_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gk104_gr_init_tpccs_0 },
+ { gk208_gr_init_tex_0 },
+ { gk104_gr_init_pe_0 },
+ { gk208_gr_init_l1c_0 },
+ { gf100_gr_init_mpc_0 },
+ { gk110_gr_init_sm_0 },
+ { gf117_gr_init_pes_0 },
+ { gf117_gr_init_wwdx_0 },
+ { gf117_gr_init_cbm_0 },
+ { gk104_gr_init_be_0 },
+ { gf100_gr_init_fe_1 },
{}
};
@@ -150,9 +153,9 @@ nv108_graph_pack_mmio[] = {
******************************************************************************/
static int
-nv108_graph_fini(struct nouveau_object *object, bool suspend)
+gk208_gr_fini(struct nvkm_object *object, bool suspend)
{
- struct nvc0_graph_priv *priv = (void *)object;
+ struct gf100_gr_priv *priv = (void *)object;
static const struct {
u32 addr;
u32 data;
@@ -183,42 +186,42 @@ nv108_graph_fini(struct nouveau_object *object, bool suspend)
nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
}
- return nouveau_graph_fini(&priv->base, suspend);
+ return nvkm_gr_fini(&priv->base, suspend);
}
-#include "fuc/hubnv108.fuc5.h"
+#include "fuc/hubgk208.fuc5.h"
-static struct nvc0_graph_ucode
-nv108_graph_fecs_ucode = {
- .code.data = nv108_grhub_code,
- .code.size = sizeof(nv108_grhub_code),
- .data.data = nv108_grhub_data,
- .data.size = sizeof(nv108_grhub_data),
+static struct gf100_gr_ucode
+gk208_gr_fecs_ucode = {
+ .code.data = gk208_grhub_code,
+ .code.size = sizeof(gk208_grhub_code),
+ .data.data = gk208_grhub_data,
+ .data.size = sizeof(gk208_grhub_data),
};
-#include "fuc/gpcnv108.fuc5.h"
+#include "fuc/gpcgk208.fuc5.h"
-static struct nvc0_graph_ucode
-nv108_graph_gpccs_ucode = {
- .code.data = nv108_grgpc_code,
- .code.size = sizeof(nv108_grgpc_code),
- .data.data = nv108_grgpc_data,
- .data.size = sizeof(nv108_grgpc_data),
+static struct gf100_gr_ucode
+gk208_gr_gpccs_ucode = {
+ .code.data = gk208_grgpc_code,
+ .code.size = sizeof(gk208_grgpc_code),
+ .data.data = gk208_grgpc_data,
+ .data.size = sizeof(gk208_grgpc_data),
};
-struct nouveau_oclass *
-nv108_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gk208_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x08),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nve4_graph_init,
- .fini = nv108_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gk104_gr_init,
+ .fini = gk208_gr_fini,
},
- .cclass = &nv108_grctx_oclass,
- .sclass = nv108_graph_sclass,
- .mmio = nv108_graph_pack_mmio,
- .fecs.ucode = &nv108_graph_fecs_ucode,
- .gpccs.ucode = &nv108_graph_gpccs_ucode,
+ .cclass = &gk208_grctx_oclass,
+ .sclass = gk208_gr_sclass,
+ .mmio = gk208_gr_pack_mmio,
+ .fecs.ucode = &gk208_gr_fecs_ucode,
+ .gpccs.ucode = &gk208_gr_gpccs_ucode,
.ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 7d0abe9f3fe7..213755534084 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -19,30 +19,31 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include "gf100.h"
+#include "ctxgf100.h"
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include <nvif/class.h>
-static struct nouveau_oclass
-gk20a_graph_sclass[] = {
- { 0x902d, &nouveau_object_ofuncs },
- { 0xa040, &nouveau_object_ofuncs },
- { KEPLER_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
+static struct nvkm_oclass
+gk20a_gr_sclass[] = {
+ { 0x902d, &nvkm_object_ofuncs },
+ { 0xa040, &nvkm_object_ofuncs },
+ { KEPLER_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
};
-struct nouveau_oclass *
-gk20a_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gk20a_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xea),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = nve4_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gk104_gr_init,
+ .fini = _nvkm_gr_fini,
},
.cclass = &gk20a_grctx_oclass,
- .sclass = gk20a_graph_sclass,
- .mmio = nve4_graph_pack_mmio,
+ .sclass = gk20a_gr_sclass,
+ .mmio = gk104_gr_pack_mmio,
.ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 4bdbdab2fd9a..124492b8a2d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -21,23 +21,24 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "gf100.h"
+#include "ctxgf100.h"
#include <subdev/bios.h>
#include <subdev/bios/P0260.h>
-#include "nvc0.h"
-#include "ctxnvc0.h"
+#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
-gm107_graph_sclass[] = {
- { 0x902d, &nouveau_object_ofuncs },
- { 0xa140, &nouveau_object_ofuncs },
- { MAXWELL_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
- { MAXWELL_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
+static struct nvkm_oclass
+gm107_gr_sclass[] = {
+ { 0x902d, &nvkm_object_ofuncs },
+ { 0xa140, &nvkm_object_ofuncs },
+ { MAXWELL_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { MAXWELL_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
};
@@ -45,8 +46,8 @@ gm107_graph_sclass[] = {
* PGRAPH register lists
******************************************************************************/
-static const struct nvc0_graph_init
-gm107_graph_init_main_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_main_0[] = {
{ 0x400080, 1, 0x04, 0x003003c2 },
{ 0x400088, 1, 0x04, 0x0001bfe7 },
{ 0x40008c, 1, 0x04, 0x00060000 },
@@ -61,8 +62,8 @@ gm107_graph_init_main_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_ds_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
{ 0x405900, 1, 0x04, 0x00000000 },
@@ -70,37 +71,37 @@ gm107_graph_init_ds_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_scc_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_scc_0[] = {
{ 0x40803c, 1, 0x04, 0x00000010 },
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_sked_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_sked_0[] = {
{ 0x407010, 1, 0x04, 0x00000000 },
{ 0x407040, 1, 0x04, 0x40440424 },
{ 0x407048, 1, 0x04, 0x0000000a },
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_prop_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_prop_0[] = {
{ 0x418408, 1, 0x04, 0x00000000 },
{ 0x4184a0, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_setup_1[] = {
+static const struct gf100_gr_init
+gm107_gr_init_setup_1[] = {
{ 0x4188c8, 2, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
{ 0x4188d4, 1, 0x04, 0x00010201 },
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_zcull_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_zcull_0[] = {
{ 0x418910, 1, 0x04, 0x00010001 },
{ 0x418914, 1, 0x04, 0x00000301 },
{ 0x418918, 1, 0x04, 0x00800000 },
@@ -110,8 +111,8 @@ gm107_graph_init_zcull_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_gpc_unk_1[] = {
+static const struct gf100_gr_init
+gm107_gr_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000400 },
{ 0x418f08, 1, 0x04, 0x00000000 },
@@ -119,8 +120,8 @@ gm107_graph_init_gpc_unk_1[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_tpccs_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_tpccs_0[] = {
{ 0x419dc4, 1, 0x04, 0x00000000 },
{ 0x419dc8, 1, 0x04, 0x00000501 },
{ 0x419dd0, 1, 0x04, 0x00000000 },
@@ -133,8 +134,8 @@ gm107_graph_init_tpccs_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_tex_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
{ 0x419abc, 1, 0x04, 0x00000000 },
@@ -147,8 +148,8 @@ gm107_graph_init_tex_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_pe_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_pe_0[] = {
{ 0x419900, 1, 0x04, 0x000000ff },
{ 0x41980c, 1, 0x04, 0x00000010 },
{ 0x419844, 1, 0x04, 0x00000000 },
@@ -159,15 +160,15 @@ gm107_graph_init_pe_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_l1c_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419cc0, 2, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_sm_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_sm_0[] = {
{ 0x419e30, 1, 0x04, 0x000000ff },
{ 0x419e00, 1, 0x04, 0x00000000 },
{ 0x419ea0, 1, 0x04, 0x00000000 },
@@ -185,16 +186,16 @@ gm107_graph_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_l1c_1[] = {
+static const struct gf100_gr_init
+gm107_gr_init_l1c_1[] = {
{ 0x419ccc, 2, 0x04, 0x00000000 },
{ 0x419c80, 1, 0x04, 0x3f006022 },
{ 0x419c88, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_pes_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_pes_0[] = {
{ 0x41be50, 1, 0x04, 0x000000ff },
{ 0x41be04, 1, 0x04, 0x00000000 },
{ 0x41be08, 1, 0x04, 0x00000004 },
@@ -205,21 +206,21 @@ gm107_graph_init_pes_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_wwdx_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_wwdx_0[] = {
{ 0x41bfd4, 1, 0x04, 0x00800000 },
{ 0x41bfdc, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_cbm_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_cbm_0[] = {
{ 0x41becc, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_be_0[] = {
+static const struct gf100_gr_init
+gm107_gr_init_be_0[] = {
{ 0x408890, 1, 0x04, 0x000000ff },
{ 0x40880c, 1, 0x04, 0x00000000 },
{ 0x408850, 1, 0x04, 0x00000004 },
@@ -244,45 +245,45 @@ gm107_graph_init_be_0[] = {
{}
};
-static const struct nvc0_graph_init
-gm107_graph_init_sm_1[] = {
+static const struct gf100_gr_init
+gm107_gr_init_sm_1[] = {
{ 0x419e5c, 1, 0x04, 0x00000000 },
{ 0x419e58, 1, 0x04, 0x00000000 },
{}
};
-static const struct nvc0_graph_pack
-gm107_graph_pack_mmio[] = {
- { gm107_graph_init_main_0 },
- { nvf0_graph_init_fe_0 },
- { nvc0_graph_init_pri_0 },
- { nvc0_graph_init_rstr2d_0 },
- { nvc0_graph_init_pd_0 },
- { gm107_graph_init_ds_0 },
- { gm107_graph_init_scc_0 },
- { gm107_graph_init_sked_0 },
- { nvf0_graph_init_cwd_0 },
- { gm107_graph_init_prop_0 },
- { nv108_graph_init_gpc_unk_0 },
- { nvc0_graph_init_setup_0 },
- { nvc0_graph_init_crstr_0 },
- { gm107_graph_init_setup_1 },
- { gm107_graph_init_zcull_0 },
- { nvc0_graph_init_gpm_0 },
- { gm107_graph_init_gpc_unk_1 },
- { nvc0_graph_init_gcc_0 },
- { gm107_graph_init_tpccs_0 },
- { gm107_graph_init_tex_0 },
- { gm107_graph_init_pe_0 },
- { gm107_graph_init_l1c_0 },
- { nvc0_graph_init_mpc_0 },
- { gm107_graph_init_sm_0 },
- { gm107_graph_init_l1c_1 },
- { gm107_graph_init_pes_0 },
- { gm107_graph_init_wwdx_0 },
- { gm107_graph_init_cbm_0 },
- { gm107_graph_init_be_0 },
- { gm107_graph_init_sm_1 },
+static const struct gf100_gr_pack
+gm107_gr_pack_mmio[] = {
+ { gm107_gr_init_main_0 },
+ { gk110_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf100_gr_init_pd_0 },
+ { gm107_gr_init_ds_0 },
+ { gm107_gr_init_scc_0 },
+ { gm107_gr_init_sked_0 },
+ { gk110_gr_init_cwd_0 },
+ { gm107_gr_init_prop_0 },
+ { gk208_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gm107_gr_init_setup_1 },
+ { gm107_gr_init_zcull_0 },
+ { gf100_gr_init_gpm_0 },
+ { gm107_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gm107_gr_init_tpccs_0 },
+ { gm107_gr_init_tex_0 },
+ { gm107_gr_init_pe_0 },
+ { gm107_gr_init_l1c_0 },
+ { gf100_gr_init_mpc_0 },
+ { gm107_gr_init_sm_0 },
+ { gm107_gr_init_l1c_1 },
+ { gm107_gr_init_pes_0 },
+ { gm107_gr_init_wwdx_0 },
+ { gm107_gr_init_cbm_0 },
+ { gm107_gr_init_be_0 },
+ { gm107_gr_init_sm_1 },
{}
};
@@ -291,7 +292,7 @@ gm107_graph_pack_mmio[] = {
******************************************************************************/
static void
-gm107_graph_init_bios(struct nvc0_graph_priv *priv)
+gm107_gr_init_bios(struct gf100_gr_priv *priv)
{
static const struct {
u32 ctrl;
@@ -303,7 +304,7 @@ gm107_graph_init_bios(struct nvc0_graph_priv *priv)
{ 0x419af0, 0x419af4 },
{ 0x419af8, 0x419afc },
};
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_P0260E infoE;
struct nvbios_P0260X infoX;
int E = -1, X;
@@ -319,17 +320,17 @@ gm107_graph_init_bios(struct nvc0_graph_priv *priv)
}
int
-gm107_graph_init(struct nouveau_object *object)
+gm107_gr_init(struct nvkm_object *object)
{
- struct nvc0_graph_oclass *oclass = (void *)object->oclass;
- struct nvc0_graph_priv *priv = (void *)object;
+ struct gf100_gr_oclass *oclass = (void *)object->oclass;
+ struct gf100_gr_priv *priv = (void *)object;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, ppc, rop;
int ret, i;
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -339,9 +340,9 @@ gm107_graph_init(struct nouveau_object *object)
nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
- nvc0_graph_mmio(priv, oclass->mmio);
+ gf100_gr_mmio(priv, oclass->mmio);
- gm107_graph_init_bios(priv);
+ gm107_gr_init_bios(priv);
nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
@@ -426,15 +427,15 @@ gm107_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400054, 0x2c350f63);
- nvc0_graph_zbc_init(priv);
+ gf100_gr_zbc_init(priv);
- return nvc0_graph_init_ctxctl(priv);
+ return gf100_gr_init_ctxctl(priv);
}
#include "fuc/hubgm107.fuc5.h"
-static struct nvc0_graph_ucode
-gm107_graph_fecs_ucode = {
+static struct gf100_gr_ucode
+gm107_gr_fecs_ucode = {
.code.data = gm107_grhub_code,
.code.size = sizeof(gm107_grhub_code),
.data.data = gm107_grhub_data,
@@ -443,27 +444,27 @@ gm107_graph_fecs_ucode = {
#include "fuc/gpcgm107.fuc5.h"
-static struct nvc0_graph_ucode
-gm107_graph_gpccs_ucode = {
+static struct gf100_gr_ucode
+gm107_gr_gpccs_ucode = {
.code.data = gm107_grgpc_code,
.code.size = sizeof(gm107_grgpc_code),
.data.data = gm107_grgpc_data,
.data.size = sizeof(gm107_grgpc_data),
};
-struct nouveau_oclass *
-gm107_graph_oclass = &(struct nvc0_graph_oclass) {
+struct nvkm_oclass *
+gm107_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x07),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_graph_ctor,
- .dtor = nvc0_graph_dtor,
- .init = gm107_graph_init,
- .fini = _nouveau_graph_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gm107_gr_init,
+ .fini = _nvkm_gr_fini,
},
.cclass = &gm107_grctx_oclass,
- .sclass = gm107_graph_sclass,
- .mmio = gm107_graph_pack_mmio,
- .fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL,
- .gpccs.ucode = &gm107_graph_gpccs_ucode,
+ .sclass = gm107_gr_sclass,
+ .mmio = gm107_gr_pack_mmio,
+ .fecs.ucode = 0 ? &gm107_gr_fecs_ucode : NULL,
+ .gpccs.ucode = &gm107_gr_gpccs_ucode,
.ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index f70e2f67a4dd..2614510c28d0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -10,7 +10,7 @@
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
+ * paragr) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
@@ -21,23 +21,18 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include <engine/gr.h>
+#include "regs.h"
#include <core/client.h>
-#include <core/os.h>
+#include <core/device.h>
#include <core/handle.h>
-#include <core/namedb.h>
-
-#include <subdev/fb.h>
+#include <engine/fifo.h>
#include <subdev/instmem.h>
#include <subdev/timer.h>
-#include <engine/fifo.h>
-#include <engine/graph.h>
-
-#include "regs.h"
-
static u32
-nv04_graph_ctx_regs[] = {
+nv04_gr_ctx_regs[] = {
0x0040053c,
0x00400544,
0x00400540,
@@ -351,21 +346,21 @@ nv04_graph_ctx_regs[] = {
NV04_PGRAPH_DEBUG_3
};
-struct nv04_graph_priv {
- struct nouveau_graph base;
- struct nv04_graph_chan *chan[16];
+struct nv04_gr_priv {
+ struct nvkm_gr base;
+ struct nv04_gr_chan *chan[16];
spinlock_t lock;
};
-struct nv04_graph_chan {
- struct nouveau_object base;
+struct nv04_gr_chan {
+ struct nvkm_object base;
int chid;
- u32 nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+ u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
};
-static inline struct nv04_graph_priv *
-nv04_graph_priv(struct nv04_graph_chan *chan)
+static inline struct nv04_gr_priv *
+nv04_gr_priv(struct nv04_gr_chan *chan)
{
return (void *)nv_object(chan)->engine;
}
@@ -449,9 +444,9 @@ nv04_graph_priv(struct nv04_graph_chan *chan)
*/
static void
-nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx1(struct nvkm_object *object, u32 mask, u32 value)
{
- struct nv04_graph_priv *priv = (void *)object->engine;
+ struct nv04_gr_priv *priv = (void *)object->engine;
int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
u32 tmp;
@@ -465,7 +460,7 @@ nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value)
}
static void
-nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
{
int class, op, valid = 1;
u32 tmp, ctx1;
@@ -509,12 +504,12 @@ nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value)
break;
}
- nv04_graph_set_ctx1(object, 0x01000000, valid << 24);
+ nv04_gr_set_ctx1(object, 0x01000000, valid << 24);
}
static int
-nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_set_operation(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
u32 class = nv_ro32(object, 0) & 0xff;
u32 data = *(u32 *)args;
@@ -523,17 +518,17 @@ nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd,
/* Old versions of the objects only accept first three operations. */
if (data > 2 && class < 0x40)
return 1;
- nv04_graph_set_ctx1(object, 0x00038000, data << 15);
+ nv04_gr_set_ctx1(object, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
- nv04_graph_set_ctx_val(object, 0, 0);
+ nv04_gr_set_ctx_val(object, 0, 0);
return 0;
}
static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_surf3d_clip_h(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv04_graph_priv *priv = (void *)object->engine;
+ struct nv04_gr_priv *priv = (void *)object->engine;
u32 data = *(u32 *)args;
u32 min = data & 0xffff, max;
u32 w = data >> 16;
@@ -551,10 +546,10 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd,
}
static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_surf3d_clip_v(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv04_graph_priv *priv = (void *)object->engine;
+ struct nv04_gr_priv *priv = (void *)object->engine;
u32 data = *(u32 *)args;
u32 min = data & 0xffff, max;
u32 w = data >> 16;
@@ -572,397 +567,396 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd,
}
static u16
-nv04_graph_mthd_bind_class(struct nouveau_object *object, u32 *args, u32 size)
+nv04_gr_mthd_bind_class(struct nvkm_object *object, u32 *args, u32 size)
{
- struct nouveau_instmem *imem = nouveau_instmem(object);
+ struct nvkm_instmem *imem = nvkm_instmem(object);
u32 inst = *(u32 *)args << 4;
return nv_ro32(imem, inst);
}
static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_object *object, u32 mthd,
+nv04_gr_mthd_bind_surf2d(struct nvkm_object *object, u32 mthd,
void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx1(object, 0x00004000, 0);
- nv04_graph_set_ctx_val(object, 0x02000000, 0);
+ nv04_gr_set_ctx1(object, 0x00004000, 0);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0);
return 0;
case 0x42:
- nv04_graph_set_ctx1(object, 0x00004000, 0);
- nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+ nv04_gr_set_ctx1(object, 0x00004000, 0);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx1(object, 0x00004000, 0);
- nv04_graph_set_ctx_val(object, 0x02000000, 0);
+ nv04_gr_set_ctx1(object, 0x00004000, 0);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0);
return 0;
case 0x42:
- nv04_graph_set_ctx1(object, 0x00004000, 0);
- nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+ nv04_gr_set_ctx1(object, 0x00004000, 0);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
case 0x52:
- nv04_graph_set_ctx1(object, 0x00004000, 0x00004000);
- nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+ nv04_gr_set_ctx1(object, 0x00004000, 0x00004000);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
-nv01_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv01_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x08000000, 0);
+ nv04_gr_set_ctx_val(object, 0x08000000, 0);
return 0;
case 0x18:
- nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
+ nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x08000000, 0);
+ nv04_gr_set_ctx_val(object, 0x08000000, 0);
return 0;
case 0x44:
- nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
+ nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_rop(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_rop(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x10000000, 0);
+ nv04_gr_set_ctx_val(object, 0x10000000, 0);
return 0;
case 0x43:
- nv04_graph_set_ctx_val(object, 0x10000000, 0x10000000);
+ nv04_gr_set_ctx_val(object, 0x10000000, 0x10000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_beta1(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_beta1(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x20000000, 0);
+ nv04_gr_set_ctx_val(object, 0x20000000, 0);
return 0;
case 0x12:
- nv04_graph_set_ctx_val(object, 0x20000000, 0x20000000);
+ nv04_gr_set_ctx_val(object, 0x20000000, 0x20000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_beta4(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_beta4(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x40000000, 0);
+ nv04_gr_set_ctx_val(object, 0x40000000, 0);
return 0;
case 0x72:
- nv04_graph_set_ctx_val(object, 0x40000000, 0x40000000);
+ nv04_gr_set_ctx_val(object, 0x40000000, 0x40000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_surf_dst(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x02000000, 0);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0);
return 0;
case 0x58:
- nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_surf_src(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x04000000, 0);
+ nv04_gr_set_ctx_val(object, 0x04000000, 0);
return 0;
case 0x59:
- nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
+ nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_surf_color(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x02000000, 0);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0);
return 0;
case 0x5a:
- nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+ nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_gr_mthd_bind_surf_zeta(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(object, 0x04000000, 0);
+ nv04_gr_set_ctx_val(object, 0x04000000, 0);
return 0;
case 0x5b:
- nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
+ nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
return 0;
}
return 1;
}
static int
-nv01_graph_mthd_bind_clip(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv01_gr_mthd_bind_clip(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx1(object, 0x2000, 0);
+ nv04_gr_set_ctx1(object, 0x2000, 0);
return 0;
case 0x19:
- nv04_graph_set_ctx1(object, 0x2000, 0x2000);
+ nv04_gr_set_ctx1(object, 0x2000, 0x2000);
return 0;
}
return 1;
}
static int
-nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv01_gr_mthd_bind_chroma(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv04_graph_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx1(object, 0x1000, 0);
+ nv04_gr_set_ctx1(object, 0x1000, 0);
return 0;
/* Yes, for some reason even the old versions of objects
* accept 0x57 and not 0x17. Consistency be damned.
*/
case 0x57:
- nv04_graph_set_ctx1(object, 0x1000, 0x1000);
+ nv04_gr_set_ctx1(object, 0x1000, 0x1000);
return 0;
}
return 1;
}
-static struct nouveau_omthds
-nv03_graph_gdi_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
- { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv03_gr_gdi_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_patt },
+ { 0x0188, 0x0188, nv04_gr_mthd_bind_rop },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_beta1 },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv04_graph_gdi_omthds[] = {
- { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv04_gr_gdi_omthds[] = {
+ { 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv01_graph_blit_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv01_gr_blit_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
+ { 0x019c, 0x019c, nv04_gr_mthd_bind_surf_src },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv04_graph_blit_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv04_gr_blit_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv04_graph_iifc_omthds[] = {
- { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
- { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
- { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
- { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv04_gr_iifc_omthds[] = {
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_chroma },
+ { 0x018c, 0x018c, nv01_gr_mthd_bind_clip },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_patt },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_rop },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_beta1 },
+ { 0x019c, 0x019c, nv04_gr_mthd_bind_beta4 },
+ { 0x01a0, 0x01a0, nv04_gr_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, 0x03e4, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv01_graph_ifc_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv01_gr_ifc_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv04_graph_ifc_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv04_gr_ifc_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv03_graph_sifc_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv03_gr_sifc_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv04_graph_sifc_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv04_gr_sifc_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv03_graph_sifm_omthds[] = {
- { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv03_gr_sifm_omthds[] = {
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
+ { 0x0304, 0x0304, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv04_graph_sifm_omthds[] = {
- { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv04_gr_sifm_omthds[] = {
+ { 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
+ { 0x0304, 0x0304, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv04_graph_surf3d_omthds[] = {
- { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
- { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+static struct nvkm_omthds
+nv04_gr_surf3d_omthds[] = {
+ { 0x02f8, 0x02f8, nv04_gr_mthd_surf3d_clip_h },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_surf3d_clip_v },
{}
};
-static struct nouveau_omthds
-nv03_graph_ttri_omthds[] = {
- { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
+static struct nvkm_omthds
+nv03_gr_ttri_omthds[] = {
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_surf_color },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_surf_zeta },
{}
};
-static struct nouveau_omthds
-nv01_graph_prim_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv01_gr_prim_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
+ { 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
-static struct nouveau_omthds
-nv04_graph_prim_omthds[] = {
- { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+static struct nvkm_omthds
+nv04_gr_prim_omthds[] = {
+ { 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
+ { 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
{}
};
static int
-nv04_graph_object_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_gpuobj *obj;
+ struct nvkm_gpuobj *obj;
int ret;
- ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
- 16, 16, 0, &obj);
+ ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
@@ -977,59 +971,59 @@ nv04_graph_object_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_ofuncs
-nv04_graph_ofuncs = {
- .ctor = nv04_graph_object_ctor,
- .dtor = _nouveau_gpuobj_dtor,
- .init = _nouveau_gpuobj_init,
- .fini = _nouveau_gpuobj_fini,
- .rd32 = _nouveau_gpuobj_rd32,
- .wr32 = _nouveau_gpuobj_wr32,
+struct nvkm_ofuncs
+nv04_gr_ofuncs = {
+ .ctor = nv04_gr_object_ctor,
+ .dtor = _nvkm_gpuobj_dtor,
+ .init = _nvkm_gpuobj_init,
+ .fini = _nvkm_gpuobj_fini,
+ .rd32 = _nvkm_gpuobj_rd32,
+ .wr32 = _nvkm_gpuobj_wr32,
};
-static struct nouveau_oclass
-nv04_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
- { 0x0017, &nv04_graph_ofuncs }, /* chroma */
- { 0x0018, &nv04_graph_ofuncs }, /* pattern (nv01) */
- { 0x0019, &nv04_graph_ofuncs }, /* clip */
- { 0x001c, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* line */
- { 0x001d, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* tri */
- { 0x001e, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* rect */
- { 0x001f, &nv04_graph_ofuncs, nv01_graph_blit_omthds },
- { 0x0021, &nv04_graph_ofuncs, nv01_graph_ifc_omthds },
- { 0x0030, &nv04_graph_ofuncs }, /* null */
- { 0x0036, &nv04_graph_ofuncs, nv03_graph_sifc_omthds },
- { 0x0037, &nv04_graph_ofuncs, nv03_graph_sifm_omthds },
- { 0x0038, &nv04_graph_ofuncs }, /* dvd subpicture */
- { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
- { 0x0042, &nv04_graph_ofuncs }, /* surf2d */
- { 0x0043, &nv04_graph_ofuncs }, /* rop */
- { 0x0044, &nv04_graph_ofuncs }, /* pattern */
- { 0x0048, &nv04_graph_ofuncs, nv03_graph_ttri_omthds },
- { 0x004a, &nv04_graph_ofuncs, nv04_graph_gdi_omthds },
- { 0x004b, &nv04_graph_ofuncs, nv03_graph_gdi_omthds },
- { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
- { 0x0053, &nv04_graph_ofuncs, nv04_graph_surf3d_omthds },
- { 0x0054, &nv04_graph_ofuncs }, /* ttri */
- { 0x0055, &nv04_graph_ofuncs }, /* mtri */
- { 0x0057, &nv04_graph_ofuncs }, /* chroma */
- { 0x0058, &nv04_graph_ofuncs }, /* surf_dst */
- { 0x0059, &nv04_graph_ofuncs }, /* surf_src */
- { 0x005a, &nv04_graph_ofuncs }, /* surf_color */
- { 0x005b, &nv04_graph_ofuncs }, /* surf_zeta */
- { 0x005c, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* line */
- { 0x005d, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* tri */
- { 0x005e, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* rect */
- { 0x005f, &nv04_graph_ofuncs, nv04_graph_blit_omthds },
- { 0x0060, &nv04_graph_ofuncs, nv04_graph_iifc_omthds },
- { 0x0061, &nv04_graph_ofuncs, nv04_graph_ifc_omthds },
- { 0x0064, &nv04_graph_ofuncs }, /* iifc (nv05) */
- { 0x0065, &nv04_graph_ofuncs }, /* ifc (nv05) */
- { 0x0066, &nv04_graph_ofuncs }, /* sifc (nv05) */
- { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
- { 0x0076, &nv04_graph_ofuncs, nv04_graph_sifc_omthds },
- { 0x0077, &nv04_graph_ofuncs, nv04_graph_sifm_omthds },
+static struct nvkm_oclass
+nv04_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
+ { 0x0017, &nv04_gr_ofuncs }, /* chroma */
+ { 0x0018, &nv04_gr_ofuncs }, /* pattern (nv01) */
+ { 0x0019, &nv04_gr_ofuncs }, /* clip */
+ { 0x001c, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* line */
+ { 0x001d, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* tri */
+ { 0x001e, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* rect */
+ { 0x001f, &nv04_gr_ofuncs, nv01_gr_blit_omthds },
+ { 0x0021, &nv04_gr_ofuncs, nv01_gr_ifc_omthds },
+ { 0x0030, &nv04_gr_ofuncs }, /* null */
+ { 0x0036, &nv04_gr_ofuncs, nv03_gr_sifc_omthds },
+ { 0x0037, &nv04_gr_ofuncs, nv03_gr_sifm_omthds },
+ { 0x0038, &nv04_gr_ofuncs }, /* dvd subpicture */
+ { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
+ { 0x0042, &nv04_gr_ofuncs }, /* surf2d */
+ { 0x0043, &nv04_gr_ofuncs }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs }, /* pattern */
+ { 0x0048, &nv04_gr_ofuncs, nv03_gr_ttri_omthds },
+ { 0x004a, &nv04_gr_ofuncs, nv04_gr_gdi_omthds },
+ { 0x004b, &nv04_gr_ofuncs, nv03_gr_gdi_omthds },
+ { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
+ { 0x0053, &nv04_gr_ofuncs, nv04_gr_surf3d_omthds },
+ { 0x0054, &nv04_gr_ofuncs }, /* ttri */
+ { 0x0055, &nv04_gr_ofuncs }, /* mtri */
+ { 0x0057, &nv04_gr_ofuncs }, /* chroma */
+ { 0x0058, &nv04_gr_ofuncs }, /* surf_dst */
+ { 0x0059, &nv04_gr_ofuncs }, /* surf_src */
+ { 0x005a, &nv04_gr_ofuncs }, /* surf_color */
+ { 0x005b, &nv04_gr_ofuncs }, /* surf_zeta */
+ { 0x005c, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* line */
+ { 0x005d, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* tri */
+ { 0x005e, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* rect */
+ { 0x005f, &nv04_gr_ofuncs, nv04_gr_blit_omthds },
+ { 0x0060, &nv04_gr_ofuncs, nv04_gr_iifc_omthds },
+ { 0x0061, &nv04_gr_ofuncs, nv04_gr_ifc_omthds },
+ { 0x0064, &nv04_gr_ofuncs }, /* iifc (nv05) */
+ { 0x0065, &nv04_gr_ofuncs }, /* ifc (nv05) */
+ { 0x0066, &nv04_gr_ofuncs }, /* sifc (nv05) */
+ { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
+ { 0x0076, &nv04_gr_ofuncs, nv04_gr_sifc_omthds },
+ { 0x0077, &nv04_gr_ofuncs, nv04_gr_sifm_omthds },
{},
};
@@ -1037,10 +1031,10 @@ nv04_graph_sclass[] = {
* PGRAPH context
******************************************************************************/
-static struct nv04_graph_chan *
-nv04_graph_channel(struct nv04_graph_priv *priv)
+static struct nv04_gr_chan *
+nv04_gr_channel(struct nv04_gr_priv *priv)
{
- struct nv04_graph_chan *chan = NULL;
+ struct nv04_gr_chan *chan = NULL;
if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
if (chid < ARRAY_SIZE(priv->chan))
@@ -1050,13 +1044,13 @@ nv04_graph_channel(struct nv04_graph_priv *priv)
}
static int
-nv04_graph_load_context(struct nv04_graph_chan *chan, int chid)
+nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
{
- struct nv04_graph_priv *priv = nv04_graph_priv(chan);
+ struct nv04_gr_priv *priv = nv04_gr_priv(chan);
int i;
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
- nv_wr32(priv, nv04_graph_ctx_regs[i], chan->nv04[i]);
+ for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
+ nv_wr32(priv, nv04_gr_ctx_regs[i], chan->nv04[i]);
nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
@@ -1065,13 +1059,13 @@ nv04_graph_load_context(struct nv04_graph_chan *chan, int chid)
}
static int
-nv04_graph_unload_context(struct nv04_graph_chan *chan)
+nv04_gr_unload_context(struct nv04_gr_chan *chan)
{
- struct nv04_graph_priv *priv = nv04_graph_priv(chan);
+ struct nv04_gr_priv *priv = nv04_gr_priv(chan);
int i;
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
- chan->nv04[i] = nv_rd32(priv, nv04_graph_ctx_regs[i]);
+ for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
+ chan->nv04[i] = nv_rd32(priv, nv04_gr_ctx_regs[i]);
nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
@@ -1079,36 +1073,36 @@ nv04_graph_unload_context(struct nv04_graph_chan *chan)
}
static void
-nv04_graph_context_switch(struct nv04_graph_priv *priv)
+nv04_gr_context_switch(struct nv04_gr_priv *priv)
{
- struct nv04_graph_chan *prev = NULL;
- struct nv04_graph_chan *next = NULL;
+ struct nv04_gr_chan *prev = NULL;
+ struct nv04_gr_chan *next = NULL;
unsigned long flags;
int chid;
spin_lock_irqsave(&priv->lock, flags);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
/* If previous context is valid, we need to save it */
- prev = nv04_graph_channel(priv);
+ prev = nv04_gr_channel(priv);
if (prev)
- nv04_graph_unload_context(prev);
+ nv04_gr_unload_context(prev);
/* load context for next channel */
chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
next = priv->chan[chid];
if (next)
- nv04_graph_load_context(next, chid);
+ nv04_gr_load_context(next, chid);
spin_unlock_irqrestore(&priv->lock, flags);
}
-static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg)
+static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
{
int i;
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
- if (nv04_graph_ctx_regs[i] == reg)
+ for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++) {
+ if (nv04_gr_ctx_regs[i] == reg)
return &chan->nv04[i];
}
@@ -1116,18 +1110,18 @@ static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg)
}
static int
-nv04_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_gr_context_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fifo_chan *fifo = (void *)parent;
- struct nv04_graph_priv *priv = (void *)engine;
- struct nv04_graph_chan *chan;
+ struct nvkm_fifo_chan *fifo = (void *)parent;
+ struct nv04_gr_priv *priv = (void *)engine;
+ struct nv04_gr_chan *chan;
unsigned long flags;
int ret;
- ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+ ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -1137,7 +1131,7 @@ nv04_graph_context_ctor(struct nouveau_object *parent,
*pobject = nv_object(priv->chan[fifo->chid]);
atomic_inc(&(*pobject)->refcount);
spin_unlock_irqrestore(&priv->lock, flags);
- nouveau_object_destroy(&chan->base);
+ nvkm_object_destroy(&chan->base);
return 1;
}
@@ -1150,44 +1144,44 @@ nv04_graph_context_ctor(struct nouveau_object *parent,
}
static void
-nv04_graph_context_dtor(struct nouveau_object *object)
+nv04_gr_context_dtor(struct nvkm_object *object)
{
- struct nv04_graph_priv *priv = (void *)object->engine;
- struct nv04_graph_chan *chan = (void *)object;
+ struct nv04_gr_priv *priv = (void *)object->engine;
+ struct nv04_gr_chan *chan = (void *)object;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
priv->chan[chan->chid] = NULL;
spin_unlock_irqrestore(&priv->lock, flags);
- nouveau_object_destroy(&chan->base);
+ nvkm_object_destroy(&chan->base);
}
static int
-nv04_graph_context_fini(struct nouveau_object *object, bool suspend)
+nv04_gr_context_fini(struct nvkm_object *object, bool suspend)
{
- struct nv04_graph_priv *priv = (void *)object->engine;
- struct nv04_graph_chan *chan = (void *)object;
+ struct nv04_gr_priv *priv = (void *)object->engine;
+ struct nv04_gr_chan *chan = (void *)object;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (nv04_graph_channel(priv) == chan)
- nv04_graph_unload_context(chan);
+ if (nv04_gr_channel(priv) == chan)
+ nv04_gr_unload_context(chan);
nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&priv->lock, flags);
- return nouveau_object_fini(&chan->base, suspend);
+ return nvkm_object_fini(&chan->base, suspend);
}
-static struct nouveau_oclass
-nv04_graph_cclass = {
+static struct nvkm_oclass
+nv04_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_graph_context_ctor,
- .dtor = nv04_graph_context_dtor,
- .init = nouveau_object_init,
- .fini = nv04_graph_context_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv04_gr_context_ctor,
+ .dtor = nv04_gr_context_dtor,
+ .init = nvkm_object_init,
+ .fini = nv04_gr_context_fini,
},
};
@@ -1196,31 +1190,31 @@ nv04_graph_cclass = {
******************************************************************************/
bool
-nv04_graph_idle(void *obj)
+nv04_gr_idle(void *obj)
{
- struct nouveau_graph *graph = nouveau_graph(obj);
+ struct nvkm_gr *gr = nvkm_gr(obj);
u32 mask = 0xffffffff;
if (nv_device(obj)->card_type == NV_40)
mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
- if (!nv_wait(graph, NV04_PGRAPH_STATUS, mask, 0)) {
- nv_error(graph, "idle timed out with status 0x%08x\n",
- nv_rd32(graph, NV04_PGRAPH_STATUS));
+ if (!nv_wait(gr, NV04_PGRAPH_STATUS, mask, 0)) {
+ nv_error(gr, "idle timed out with status 0x%08x\n",
+ nv_rd32(gr, NV04_PGRAPH_STATUS));
return false;
}
return true;
}
-static const struct nouveau_bitfield
-nv04_graph_intr_name[] = {
+static const struct nvkm_bitfield
+nv04_gr_intr_name[] = {
{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
{}
};
-static const struct nouveau_bitfield
-nv04_graph_nstatus[] = {
+static const struct nvkm_bitfield
+nv04_gr_nstatus[] = {
{ NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
{ NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
@@ -1228,8 +1222,8 @@ nv04_graph_nstatus[] = {
{}
};
-const struct nouveau_bitfield
-nv04_graph_nsource[] = {
+const struct nvkm_bitfield
+nv04_gr_nsource[] = {
{ NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
{ NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
@@ -1253,12 +1247,12 @@ nv04_graph_nsource[] = {
};
static void
-nv04_graph_intr(struct nouveau_subdev *subdev)
+nv04_gr_intr(struct nvkm_subdev *subdev)
{
- struct nv04_graph_priv *priv = (void *)subdev;
- struct nv04_graph_chan *chan = NULL;
- struct nouveau_namedb *namedb = NULL;
- struct nouveau_handle *handle = NULL;
+ struct nv04_gr_priv *priv = (void *)subdev;
+ struct nv04_gr_chan *chan = NULL;
+ struct nvkm_namedb *namedb = NULL;
+ struct nvkm_handle *handle = NULL;
u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
@@ -1280,7 +1274,7 @@ nv04_graph_intr(struct nouveau_subdev *subdev)
if (stat & NV_PGRAPH_INTR_NOTIFY) {
if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
- handle = nouveau_namedb_get_vinst(namedb, inst);
+ handle = nvkm_namedb_get_vinst(namedb, inst);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~NV_PGRAPH_INTR_NOTIFY;
}
@@ -1290,7 +1284,7 @@ nv04_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv04_graph_context_switch(priv);
+ nv04_gr_context_switch(priv);
}
nv_wr32(priv, NV03_PGRAPH_INTR, stat);
@@ -1298,50 +1292,50 @@ nv04_graph_intr(struct nouveau_subdev *subdev)
if (show) {
nv_error(priv, "%s", "");
- nouveau_bitfield_print(nv04_graph_intr_name, show);
+ nvkm_bitfield_print(nv04_gr_intr_name, show);
pr_cont(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ nvkm_bitfield_print(nv04_gr_nsource, nsource);
pr_cont(" nstatus:");
- nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+ nvkm_bitfield_print(nv04_gr_nstatus, nstatus);
pr_cont("\n");
nv_error(priv,
"ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, nouveau_client_name(chan), subc, class, mthd,
+ chid, nvkm_client_name(chan), subc, class, mthd,
data);
}
- nouveau_namedb_put(handle);
+ nvkm_namedb_put(handle);
}
static int
-nv04_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv04_graph_priv *priv;
+ struct nv04_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv04_graph_intr;
- nv_engine(priv)->cclass = &nv04_graph_cclass;
- nv_engine(priv)->sclass = nv04_graph_sclass;
+ nv_subdev(priv)->intr = nv04_gr_intr;
+ nv_engine(priv)->cclass = &nv04_gr_cclass;
+ nv_engine(priv)->sclass = nv04_gr_sclass;
spin_lock_init(&priv->lock);
return 0;
}
static int
-nv04_graph_init(struct nouveau_object *object)
+nv04_gr_init(struct nvkm_object *object)
{
- struct nouveau_engine *engine = nv_engine(object);
- struct nv04_graph_priv *priv = (void *)engine;
+ struct nvkm_engine *engine = nv_engine(object);
+ struct nv04_gr_priv *priv = (void *)engine;
int ret;
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -1376,13 +1370,13 @@ nv04_graph_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv04_graph_oclass = {
+struct nvkm_oclass
+nv04_gr_oclass = {
.handle = NV_ENGINE(GR, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_graph_ctor,
- .dtor = _nouveau_graph_dtor,
- .init = nv04_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv04_gr_ctor,
+ .dtor = _nvkm_gr_dtor,
+ .init = nv04_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 2b12b09683c8..389904eb603f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -10,7 +10,7 @@
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
+ * paragr) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
@@ -21,17 +21,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include <engine/gr.h>
+#include "regs.h"
#include <core/client.h>
-#include <core/os.h>
+#include <core/device.h>
#include <core/handle.h>
-
-#include <subdev/fb.h>
-
#include <engine/fifo.h>
-#include <engine/graph.h>
-
-#include "regs.h"
+#include <subdev/fb.h>
struct pipe_state {
u32 pipe_0x0000[0x040/4];
@@ -46,7 +43,7 @@ struct pipe_state {
u32 pipe_0x7800[0x0c0/4];
};
-static int nv10_graph_ctx_regs[] = {
+static int nv10_gr_ctx_regs[] = {
NV10_PGRAPH_CTX_SWITCH(0),
NV10_PGRAPH_CTX_SWITCH(1),
NV10_PGRAPH_CTX_SWITCH(2),
@@ -368,7 +365,7 @@ static int nv10_graph_ctx_regs[] = {
NV04_PGRAPH_VALID2,
};
-static int nv17_graph_ctx_regs[] = {
+static int nv17_gr_ctx_regs[] = {
NV10_PGRAPH_DEBUG_4,
0x004006b0,
0x00400eac,
@@ -389,24 +386,24 @@ static int nv17_graph_ctx_regs[] = {
0x00400a04,
};
-struct nv10_graph_priv {
- struct nouveau_graph base;
- struct nv10_graph_chan *chan[32];
+struct nv10_gr_priv {
+ struct nvkm_gr base;
+ struct nv10_gr_chan *chan[32];
spinlock_t lock;
};
-struct nv10_graph_chan {
- struct nouveau_object base;
+struct nv10_gr_chan {
+ struct nvkm_object base;
int chid;
- int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
- int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
+ int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
+ int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
struct pipe_state pipe_state;
u32 lma_window[4];
};
-static inline struct nv10_graph_priv *
-nv10_graph_priv(struct nv10_graph_chan *chan)
+static inline struct nv10_gr_priv *
+nv10_gr_priv(struct nv10_gr_chan *chan)
{
return (void *)nv_object(chan)->engine;
}
@@ -431,58 +428,58 @@ nv10_graph_priv(struct nv10_graph_chan *chan)
nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
} while (0)
-static struct nouveau_oclass
-nv10_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
- { 0x0019, &nv04_graph_ofuncs }, /* clip */
- { 0x0030, &nv04_graph_ofuncs }, /* null */
- { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
- { 0x0043, &nv04_graph_ofuncs }, /* rop */
- { 0x0044, &nv04_graph_ofuncs }, /* pattern */
- { 0x004a, &nv04_graph_ofuncs }, /* gdi */
- { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
- { 0x005f, &nv04_graph_ofuncs }, /* blit */
- { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
- { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
- { 0x0089, &nv04_graph_ofuncs }, /* sifm */
- { 0x008a, &nv04_graph_ofuncs }, /* ifc */
- { 0x009f, &nv04_graph_ofuncs }, /* blit */
- { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
- { 0x0094, &nv04_graph_ofuncs }, /* ttri */
- { 0x0095, &nv04_graph_ofuncs }, /* mtri */
- { 0x0056, &nv04_graph_ofuncs }, /* celcius */
+static struct nvkm_oclass
+nv10_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
+ { 0x0019, &nv04_gr_ofuncs }, /* clip */
+ { 0x0030, &nv04_gr_ofuncs }, /* null */
+ { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
+ { 0x0043, &nv04_gr_ofuncs }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs }, /* pattern */
+ { 0x004a, &nv04_gr_ofuncs }, /* gdi */
+ { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
+ { 0x005f, &nv04_gr_ofuncs }, /* blit */
+ { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
+ { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
+ { 0x0089, &nv04_gr_ofuncs }, /* sifm */
+ { 0x008a, &nv04_gr_ofuncs }, /* ifc */
+ { 0x009f, &nv04_gr_ofuncs }, /* blit */
+ { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
+ { 0x0094, &nv04_gr_ofuncs }, /* ttri */
+ { 0x0095, &nv04_gr_ofuncs }, /* mtri */
+ { 0x0056, &nv04_gr_ofuncs }, /* celcius */
{},
};
-static struct nouveau_oclass
-nv15_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
- { 0x0019, &nv04_graph_ofuncs }, /* clip */
- { 0x0030, &nv04_graph_ofuncs }, /* null */
- { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
- { 0x0043, &nv04_graph_ofuncs }, /* rop */
- { 0x0044, &nv04_graph_ofuncs }, /* pattern */
- { 0x004a, &nv04_graph_ofuncs }, /* gdi */
- { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
- { 0x005f, &nv04_graph_ofuncs }, /* blit */
- { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
- { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
- { 0x0089, &nv04_graph_ofuncs }, /* sifm */
- { 0x008a, &nv04_graph_ofuncs }, /* ifc */
- { 0x009f, &nv04_graph_ofuncs }, /* blit */
- { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
- { 0x0094, &nv04_graph_ofuncs }, /* ttri */
- { 0x0095, &nv04_graph_ofuncs }, /* mtri */
- { 0x0096, &nv04_graph_ofuncs }, /* celcius */
+static struct nvkm_oclass
+nv15_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
+ { 0x0019, &nv04_gr_ofuncs }, /* clip */
+ { 0x0030, &nv04_gr_ofuncs }, /* null */
+ { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
+ { 0x0043, &nv04_gr_ofuncs }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs }, /* pattern */
+ { 0x004a, &nv04_gr_ofuncs }, /* gdi */
+ { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
+ { 0x005f, &nv04_gr_ofuncs }, /* blit */
+ { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
+ { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
+ { 0x0089, &nv04_gr_ofuncs }, /* sifm */
+ { 0x008a, &nv04_gr_ofuncs }, /* ifc */
+ { 0x009f, &nv04_gr_ofuncs }, /* blit */
+ { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
+ { 0x0094, &nv04_gr_ofuncs }, /* ttri */
+ { 0x0095, &nv04_gr_ofuncs }, /* mtri */
+ { 0x0096, &nv04_gr_ofuncs }, /* celcius */
{},
};
static int
-nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv17_gr_mthd_lma_window(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv10_graph_chan *chan = (void *)object->parent;
- struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct nv10_gr_chan *chan = (void *)object->parent;
+ struct nv10_gr_priv *priv = nv10_gr_priv(chan);
struct pipe_state *pipe = &chan->pipe_state;
u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
u32 xfmode0, xfmode1;
@@ -494,14 +491,14 @@ nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
if (mthd != 0x1644)
return 0;
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
PIPE_SAVE(priv, pipe_0x0040, 0x0040);
PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
PIPE_RESTORE(priv, chan->lma_window, 0x6790);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
@@ -511,7 +508,7 @@ nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
@@ -534,7 +531,7 @@ nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
@@ -549,55 +546,55 @@ nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
return 0;
}
static int
-nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv17_gr_mthd_lma_enable(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv10_graph_chan *chan = (void *)object->parent;
- struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct nv10_gr_chan *chan = (void *)object->parent;
+ struct nv10_gr_priv *priv = nv10_gr_priv(chan);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
return 0;
}
-static struct nouveau_omthds
+static struct nvkm_omthds
nv17_celcius_omthds[] = {
- { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
- { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
- { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
- { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
- { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
+ { 0x1638, 0x1638, nv17_gr_mthd_lma_window },
+ { 0x163c, 0x163c, nv17_gr_mthd_lma_window },
+ { 0x1640, 0x1640, nv17_gr_mthd_lma_window },
+ { 0x1644, 0x1644, nv17_gr_mthd_lma_window },
+ { 0x1658, 0x1658, nv17_gr_mthd_lma_enable },
{}
};
-static struct nouveau_oclass
-nv17_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
- { 0x0019, &nv04_graph_ofuncs }, /* clip */
- { 0x0030, &nv04_graph_ofuncs }, /* null */
- { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
- { 0x0043, &nv04_graph_ofuncs }, /* rop */
- { 0x0044, &nv04_graph_ofuncs }, /* pattern */
- { 0x004a, &nv04_graph_ofuncs }, /* gdi */
- { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
- { 0x005f, &nv04_graph_ofuncs }, /* blit */
- { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
- { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
- { 0x0089, &nv04_graph_ofuncs }, /* sifm */
- { 0x008a, &nv04_graph_ofuncs }, /* ifc */
- { 0x009f, &nv04_graph_ofuncs }, /* blit */
- { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
- { 0x0094, &nv04_graph_ofuncs }, /* ttri */
- { 0x0095, &nv04_graph_ofuncs }, /* mtri */
- { 0x0099, &nv04_graph_ofuncs, nv17_celcius_omthds },
+static struct nvkm_oclass
+nv17_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
+ { 0x0019, &nv04_gr_ofuncs }, /* clip */
+ { 0x0030, &nv04_gr_ofuncs }, /* null */
+ { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
+ { 0x0043, &nv04_gr_ofuncs }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs }, /* pattern */
+ { 0x004a, &nv04_gr_ofuncs }, /* gdi */
+ { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
+ { 0x005f, &nv04_gr_ofuncs }, /* blit */
+ { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
+ { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
+ { 0x0089, &nv04_gr_ofuncs }, /* sifm */
+ { 0x008a, &nv04_gr_ofuncs }, /* ifc */
+ { 0x009f, &nv04_gr_ofuncs }, /* blit */
+ { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
+ { 0x0094, &nv04_gr_ofuncs }, /* ttri */
+ { 0x0095, &nv04_gr_ofuncs }, /* mtri */
+ { 0x0099, &nv04_gr_ofuncs, nv17_celcius_omthds },
{},
};
@@ -605,10 +602,10 @@ nv17_graph_sclass[] = {
* PGRAPH context
******************************************************************************/
-static struct nv10_graph_chan *
-nv10_graph_channel(struct nv10_graph_priv *priv)
+static struct nv10_gr_chan *
+nv10_gr_channel(struct nv10_gr_priv *priv)
{
- struct nv10_graph_chan *chan = NULL;
+ struct nv10_gr_chan *chan = NULL;
if (nv_rd32(priv, 0x400144) & 0x00010000) {
int chid = nv_rd32(priv, 0x400148) >> 24;
if (chid < ARRAY_SIZE(priv->chan))
@@ -618,9 +615,9 @@ nv10_graph_channel(struct nv10_graph_priv *priv)
}
static void
-nv10_graph_save_pipe(struct nv10_graph_chan *chan)
+nv10_gr_save_pipe(struct nv10_gr_chan *chan)
{
- struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct nv10_gr_priv *priv = nv10_gr_priv(chan);
struct pipe_state *pipe = &chan->pipe_state;
PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
@@ -636,14 +633,14 @@ nv10_graph_save_pipe(struct nv10_graph_chan *chan)
}
static void
-nv10_graph_load_pipe(struct nv10_graph_chan *chan)
+nv10_gr_load_pipe(struct nv10_gr_chan *chan)
{
- struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct nv10_gr_priv *priv = nv10_gr_priv(chan);
struct pipe_state *pipe = &chan->pipe_state;
u32 xfmode0, xfmode1;
int i;
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
/* XXX check haiku comments */
xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
@@ -668,7 +665,7 @@ nv10_graph_load_pipe(struct nv10_graph_chan *chan)
PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
/* restore XFMODE */
nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
@@ -682,13 +679,13 @@ nv10_graph_load_pipe(struct nv10_graph_chan *chan)
PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
}
static void
-nv10_graph_create_pipe(struct nv10_graph_chan *chan)
+nv10_gr_create_pipe(struct nv10_gr_chan *chan)
{
- struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct nv10_gr_priv *priv = nv10_gr_priv(chan);
struct pipe_state *pipe_state = &chan->pipe_state;
u32 *pipe_state_addr;
int i;
@@ -841,11 +838,11 @@ nv10_graph_create_pipe(struct nv10_graph_chan *chan)
}
static int
-nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
+nv10_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
{
int i;
- for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
- if (nv10_graph_ctx_regs[i] == reg)
+ for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
+ if (nv10_gr_ctx_regs[i] == reg)
return i;
}
nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg);
@@ -853,11 +850,11 @@ nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
}
static int
-nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
+nv17_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
{
int i;
- for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
- if (nv17_graph_ctx_regs[i] == reg)
+ for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
+ if (nv17_gr_ctx_regs[i] == reg)
return i;
}
nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg);
@@ -865,9 +862,9 @@ nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
}
static void
-nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst)
+nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
{
- struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct nv10_gr_priv *priv = nv10_gr_priv(chan);
u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
u32 ctx_user, ctx_switch[5];
int i, subchan = -1;
@@ -935,25 +932,25 @@ nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst)
}
static int
-nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
+nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
{
- struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct nv10_gr_priv *priv = nv10_gr_priv(chan);
u32 inst;
int i;
- for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
- nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
+ for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
+ nv_wr32(priv, nv10_gr_ctx_regs[i], chan->nv10[i]);
if (nv_device(priv)->card_type >= NV_11 &&
nv_device(priv)->chipset >= 0x17) {
- for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
- nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
+ for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
+ nv_wr32(priv, nv17_gr_ctx_regs[i], chan->nv17[i]);
}
- nv10_graph_load_pipe(chan);
+ nv10_gr_load_pipe(chan);
inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
- nv10_graph_load_dma_vtxbuf(chan, chid, inst);
+ nv10_gr_load_dma_vtxbuf(chan, chid, inst);
nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
@@ -962,21 +959,21 @@ nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
}
static int
-nv10_graph_unload_context(struct nv10_graph_chan *chan)
+nv10_gr_unload_context(struct nv10_gr_chan *chan)
{
- struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct nv10_gr_priv *priv = nv10_gr_priv(chan);
int i;
- for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
- chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
+ for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
+ chan->nv10[i] = nv_rd32(priv, nv10_gr_ctx_regs[i]);
if (nv_device(priv)->card_type >= NV_11 &&
nv_device(priv)->chipset >= 0x17) {
- for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
- chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
+ for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
+ chan->nv17[i] = nv_rd32(priv, nv17_gr_ctx_regs[i]);
}
- nv10_graph_save_pipe(chan);
+ nv10_gr_save_pipe(chan);
nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
@@ -984,55 +981,54 @@ nv10_graph_unload_context(struct nv10_graph_chan *chan)
}
static void
-nv10_graph_context_switch(struct nv10_graph_priv *priv)
+nv10_gr_context_switch(struct nv10_gr_priv *priv)
{
- struct nv10_graph_chan *prev = NULL;
- struct nv10_graph_chan *next = NULL;
+ struct nv10_gr_chan *prev = NULL;
+ struct nv10_gr_chan *next = NULL;
unsigned long flags;
int chid;
spin_lock_irqsave(&priv->lock, flags);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
/* If previous context is valid, we need to save it */
- prev = nv10_graph_channel(priv);
+ prev = nv10_gr_channel(priv);
if (prev)
- nv10_graph_unload_context(prev);
+ nv10_gr_unload_context(prev);
/* load context for next channel */
chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
next = priv->chan[chid];
if (next)
- nv10_graph_load_context(next, chid);
+ nv10_gr_load_context(next, chid);
spin_unlock_irqrestore(&priv->lock, flags);
}
#define NV_WRITE_CTX(reg, val) do { \
- int offset = nv10_graph_ctx_regs_find_offset(priv, reg); \
+ int offset = nv10_gr_ctx_regs_find_offset(priv, reg); \
if (offset > 0) \
chan->nv10[offset] = val; \
} while (0)
#define NV17_WRITE_CTX(reg, val) do { \
- int offset = nv17_graph_ctx_regs_find_offset(priv, reg); \
+ int offset = nv17_gr_ctx_regs_find_offset(priv, reg); \
if (offset > 0) \
chan->nv17[offset] = val; \
} while (0)
static int
-nv10_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fifo_chan *fifo = (void *)parent;
- struct nv10_graph_priv *priv = (void *)engine;
- struct nv10_graph_chan *chan;
+ struct nvkm_fifo_chan *fifo = (void *)parent;
+ struct nv10_gr_priv *priv = (void *)engine;
+ struct nv10_gr_chan *chan;
unsigned long flags;
int ret;
- ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+ ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -1042,7 +1038,7 @@ nv10_graph_context_ctor(struct nouveau_object *parent,
*pobject = nv_object(priv->chan[fifo->chid]);
atomic_inc(&(*pobject)->refcount);
spin_unlock_irqrestore(&priv->lock, flags);
- nouveau_object_destroy(&chan->base);
+ nvkm_object_destroy(&chan->base);
return 1;
}
@@ -1066,7 +1062,7 @@ nv10_graph_context_ctor(struct nouveau_object *parent,
}
NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
- nv10_graph_create_pipe(chan);
+ nv10_gr_create_pipe(chan);
priv->chan[fifo->chid] = chan;
chan->chid = fifo->chid;
@@ -1075,44 +1071,44 @@ nv10_graph_context_ctor(struct nouveau_object *parent,
}
static void
-nv10_graph_context_dtor(struct nouveau_object *object)
+nv10_gr_context_dtor(struct nvkm_object *object)
{
- struct nv10_graph_priv *priv = (void *)object->engine;
- struct nv10_graph_chan *chan = (void *)object;
+ struct nv10_gr_priv *priv = (void *)object->engine;
+ struct nv10_gr_chan *chan = (void *)object;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
priv->chan[chan->chid] = NULL;
spin_unlock_irqrestore(&priv->lock, flags);
- nouveau_object_destroy(&chan->base);
+ nvkm_object_destroy(&chan->base);
}
static int
-nv10_graph_context_fini(struct nouveau_object *object, bool suspend)
+nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
{
- struct nv10_graph_priv *priv = (void *)object->engine;
- struct nv10_graph_chan *chan = (void *)object;
+ struct nv10_gr_priv *priv = (void *)object->engine;
+ struct nv10_gr_chan *chan = (void *)object;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (nv10_graph_channel(priv) == chan)
- nv10_graph_unload_context(chan);
+ if (nv10_gr_channel(priv) == chan)
+ nv10_gr_unload_context(chan);
nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&priv->lock, flags);
- return nouveau_object_fini(&chan->base, suspend);
+ return nvkm_object_fini(&chan->base, suspend);
}
-static struct nouveau_oclass
-nv10_graph_cclass = {
+static struct nvkm_oclass
+nv10_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv10_graph_context_ctor,
- .dtor = nv10_graph_context_dtor,
- .init = nouveau_object_init,
- .fini = nv10_graph_context_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv10_gr_context_ctor,
+ .dtor = nv10_gr_context_dtor,
+ .init = nvkm_object_init,
+ .fini = nv10_gr_context_fini,
},
};
@@ -1121,15 +1117,15 @@ nv10_graph_cclass = {
******************************************************************************/
static void
-nv10_graph_tile_prog(struct nouveau_engine *engine, int i)
+nv10_gr_tile_prog(struct nvkm_engine *engine, int i)
{
- struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
- struct nouveau_fifo *pfifo = nouveau_fifo(engine);
- struct nv10_graph_priv *priv = (void *)engine;
+ struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
+ struct nvkm_fifo *pfifo = nvkm_fifo(engine);
+ struct nv10_gr_priv *priv = (void *)engine;
unsigned long flags;
pfifo->pause(pfifo, &flags);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
@@ -1138,13 +1134,13 @@ nv10_graph_tile_prog(struct nouveau_engine *engine, int i)
pfifo->start(pfifo, &flags);
}
-const struct nouveau_bitfield nv10_graph_intr_name[] = {
+const struct nvkm_bitfield nv10_gr_intr_name[] = {
{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
{ NV_PGRAPH_INTR_ERROR, "ERROR" },
{}
};
-const struct nouveau_bitfield nv10_graph_nstatus[] = {
+const struct nvkm_bitfield nv10_gr_nstatus[] = {
{ NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
{ NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
@@ -1153,12 +1149,12 @@ const struct nouveau_bitfield nv10_graph_nstatus[] = {
};
static void
-nv10_graph_intr(struct nouveau_subdev *subdev)
+nv10_gr_intr(struct nvkm_subdev *subdev)
{
- struct nv10_graph_priv *priv = (void *)subdev;
- struct nv10_graph_chan *chan = NULL;
- struct nouveau_namedb *namedb = NULL;
- struct nouveau_handle *handle = NULL;
+ struct nv10_gr_priv *priv = (void *)subdev;
+ struct nv10_gr_chan *chan = NULL;
+ struct nvkm_namedb *namedb = NULL;
+ struct nvkm_handle *handle = NULL;
u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
@@ -1179,7 +1175,7 @@ nv10_graph_intr(struct nouveau_subdev *subdev)
if (stat & NV_PGRAPH_INTR_ERROR) {
if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
- handle = nouveau_namedb_get_class(namedb, class);
+ handle = nvkm_namedb_get_class(namedb, class);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~NV_PGRAPH_INTR_ERROR;
}
@@ -1189,7 +1185,7 @@ nv10_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv10_graph_context_switch(priv);
+ nv10_gr_context_switch(priv);
}
nv_wr32(priv, NV03_PGRAPH_INTR, stat);
@@ -1197,68 +1193,68 @@ nv10_graph_intr(struct nouveau_subdev *subdev)
if (show) {
nv_error(priv, "%s", "");
- nouveau_bitfield_print(nv10_graph_intr_name, show);
+ nvkm_bitfield_print(nv10_gr_intr_name, show);
pr_cont(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ nvkm_bitfield_print(nv04_gr_nsource, nsource);
pr_cont(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
pr_cont("\n");
nv_error(priv,
"ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, nouveau_client_name(chan), subc, class, mthd,
+ chid, nvkm_client_name(chan), subc, class, mthd,
data);
}
- nouveau_namedb_put(handle);
+ nvkm_namedb_put(handle);
}
static int
-nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv10_graph_priv *priv;
+ struct nv10_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv10_graph_intr;
- nv_engine(priv)->cclass = &nv10_graph_cclass;
+ nv_subdev(priv)->intr = nv10_gr_intr;
+ nv_engine(priv)->cclass = &nv10_gr_cclass;
if (nv_device(priv)->chipset <= 0x10)
- nv_engine(priv)->sclass = nv10_graph_sclass;
+ nv_engine(priv)->sclass = nv10_gr_sclass;
else
if (nv_device(priv)->chipset < 0x17 ||
nv_device(priv)->card_type < NV_11)
- nv_engine(priv)->sclass = nv15_graph_sclass;
+ nv_engine(priv)->sclass = nv15_gr_sclass;
else
- nv_engine(priv)->sclass = nv17_graph_sclass;
+ nv_engine(priv)->sclass = nv17_gr_sclass;
- nv_engine(priv)->tile_prog = nv10_graph_tile_prog;
+ nv_engine(priv)->tile_prog = nv10_gr_tile_prog;
spin_lock_init(&priv->lock);
return 0;
}
static void
-nv10_graph_dtor(struct nouveau_object *object)
+nv10_gr_dtor(struct nvkm_object *object)
{
- struct nv10_graph_priv *priv = (void *)object;
- nouveau_graph_destroy(&priv->base);
+ struct nv10_gr_priv *priv = (void *)object;
+ nvkm_gr_destroy(&priv->base);
}
static int
-nv10_graph_init(struct nouveau_object *object)
+nv10_gr_init(struct nvkm_object *object)
{
- struct nouveau_engine *engine = nv_engine(object);
- struct nouveau_fb *pfb = nouveau_fb(object);
- struct nv10_graph_priv *priv = (void *)engine;
+ struct nvkm_engine *engine = nv_engine(object);
+ struct nvkm_fb *pfb = nvkm_fb(object);
+ struct nv10_gr_priv *priv = (void *)engine;
int ret, i;
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -1301,19 +1297,19 @@ nv10_graph_init(struct nouveau_object *object)
}
static int
-nv10_graph_fini(struct nouveau_object *object, bool suspend)
+nv10_gr_fini(struct nvkm_object *object, bool suspend)
{
- struct nv10_graph_priv *priv = (void *)object;
- return nouveau_graph_fini(&priv->base, suspend);
+ struct nv10_gr_priv *priv = (void *)object;
+ return nvkm_gr_fini(&priv->base, suspend);
}
-struct nouveau_oclass
-nv10_graph_oclass = {
+struct nvkm_oclass
+nv10_gr_oclass = {
.handle = NV_ENGINE(GR, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv10_graph_ctor,
- .dtor = nv10_graph_dtor,
- .init = nv10_graph_init,
- .fini = nv10_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv10_gr_ctor,
+ .dtor = nv10_gr_dtor,
+ .init = nv10_gr_init,
+ .fini = nv10_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index ceb9c746d94e..1713ffb669e8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -1,39 +1,34 @@
+#include "nv20.h"
+#include "regs.h"
+
#include <core/client.h>
-#include <core/os.h>
-#include <core/engctx.h>
+#include <core/device.h>
#include <core/handle.h>
-#include <core/enum.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-#include <engine/graph.h>
#include <engine/fifo.h>
-
-#include "nv20.h"
-#include "regs.h"
+#include <subdev/fb.h>
+#include <subdev/timer.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
-nv20_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
- { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
- { 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */
- { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
- { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+static struct nvkm_oclass
+nv20_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
+ { 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
+ { 0x0097, &nv04_gr_ofuncs, NULL }, /* kelvin */
+ { 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
+ { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{},
};
@@ -42,22 +37,20 @@ nv20_graph_sclass[] = {
******************************************************************************/
static int
-nv20_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_chan *chan;
+ struct nv20_gr_chan *chan;
int ret, i;
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
- 0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC,
- &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
- chan->chid = nouveau_fifo_chan(parent)->chid;
+ chan->chid = nvkm_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x033c, 0xffff0000);
@@ -107,13 +100,13 @@ nv20_graph_context_ctor(struct nouveau_object *parent,
}
int
-nv20_graph_context_init(struct nouveau_object *object)
+nv20_gr_context_init(struct nvkm_object *object)
{
- struct nv20_graph_priv *priv = (void *)object->engine;
- struct nv20_graph_chan *chan = (void *)object;
+ struct nv20_gr_priv *priv = (void *)object->engine;
+ struct nv20_gr_chan *chan = (void *)object;
int ret;
- ret = nouveau_graph_context_init(&chan->base);
+ ret = nvkm_gr_context_init(&chan->base);
if (ret)
return ret;
@@ -122,10 +115,10 @@ nv20_graph_context_init(struct nouveau_object *object)
}
int
-nv20_graph_context_fini(struct nouveau_object *object, bool suspend)
+nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
{
- struct nv20_graph_priv *priv = (void *)object->engine;
- struct nv20_graph_chan *chan = (void *)object;
+ struct nv20_gr_priv *priv = (void *)object->engine;
+ struct nv20_gr_chan *chan = (void *)object;
int chid = -1;
nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
@@ -141,19 +134,19 @@ nv20_graph_context_fini(struct nouveau_object *object, bool suspend)
nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
- return nouveau_graph_context_fini(&chan->base, suspend);
+ return nvkm_gr_context_fini(&chan->base, suspend);
}
-static struct nouveau_oclass
-nv20_graph_cclass = {
+static struct nvkm_oclass
+nv20_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x20),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv20_graph_context_ctor,
- .dtor = _nouveau_graph_context_dtor,
- .init = nv20_graph_context_init,
- .fini = nv20_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv20_gr_context_ctor,
+ .dtor = _nvkm_gr_context_dtor,
+ .init = nv20_gr_context_init,
+ .fini = nv20_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
};
@@ -162,15 +155,15 @@ nv20_graph_cclass = {
******************************************************************************/
void
-nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
+nv20_gr_tile_prog(struct nvkm_engine *engine, int i)
{
- struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
- struct nouveau_fifo *pfifo = nouveau_fifo(engine);
- struct nv20_graph_priv *priv = (void *)engine;
+ struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
+ struct nvkm_fifo *pfifo = nvkm_fifo(engine);
+ struct nv20_gr_priv *priv = (void *)engine;
unsigned long flags;
pfifo->pause(pfifo, &flags);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
@@ -193,12 +186,12 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
}
void
-nv20_graph_intr(struct nouveau_subdev *subdev)
+nv20_gr_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- struct nouveau_handle *handle;
- struct nv20_graph_priv *priv = (void *)subdev;
+ struct nvkm_engine *engine = nv_engine(subdev);
+ struct nvkm_object *engctx;
+ struct nvkm_handle *handle;
+ struct nv20_gr_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
@@ -210,13 +203,13 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
u32 show = stat;
- engctx = nouveau_engctx_get(engine, chid);
+ engctx = nvkm_engctx_get(engine, chid);
if (stat & NV_PGRAPH_INTR_ERROR) {
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- handle = nouveau_handle_get_class(engctx, class);
+ handle = nvkm_handle_get_class(engctx, class);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~NV_PGRAPH_INTR_ERROR;
- nouveau_handle_put(handle);
+ nvkm_handle_put(handle);
}
}
@@ -225,65 +218,65 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
if (show) {
nv_error(priv, "%s", "");
- nouveau_bitfield_print(nv10_graph_intr_name, show);
+ nvkm_bitfield_print(nv10_gr_intr_name, show);
pr_cont(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ nvkm_bitfield_print(nv04_gr_nsource, nsource);
pr_cont(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
pr_cont("\n");
nv_error(priv,
"ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, nouveau_client_name(engctx), subc, class, mthd,
+ chid, nvkm_client_name(engctx), subc, class, mthd,
data);
}
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
static int
-nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_priv *priv;
+ struct nv20_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_graph_intr;
- nv_engine(priv)->cclass = &nv20_graph_cclass;
- nv_engine(priv)->sclass = nv20_graph_sclass;
- nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ nv_subdev(priv)->intr = nv20_gr_intr;
+ nv_engine(priv)->cclass = &nv20_gr_cclass;
+ nv_engine(priv)->sclass = nv20_gr_sclass;
+ nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
return 0;
}
void
-nv20_graph_dtor(struct nouveau_object *object)
+nv20_gr_dtor(struct nvkm_object *object)
{
- struct nv20_graph_priv *priv = (void *)object;
- nouveau_gpuobj_ref(NULL, &priv->ctxtab);
- nouveau_graph_destroy(&priv->base);
+ struct nv20_gr_priv *priv = (void *)object;
+ nvkm_gpuobj_ref(NULL, &priv->ctxtab);
+ nvkm_gr_destroy(&priv->base);
}
int
-nv20_graph_init(struct nouveau_object *object)
+nv20_gr_init(struct nvkm_object *object)
{
- struct nouveau_engine *engine = nv_engine(object);
- struct nv20_graph_priv *priv = (void *)engine;
- struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvkm_engine *engine = nv_engine(object);
+ struct nv20_gr_priv *priv = (void *)engine;
+ struct nvkm_fb *pfb = nvkm_fb(object);
u32 tmp, vramsz;
int ret, i;
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -371,13 +364,13 @@ nv20_graph_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv20_graph_oclass = {
+struct nvkm_oclass
+nv20_gr_oclass = {
.handle = NV_ENGINE(GR, 0x20),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv20_graph_ctor,
- .dtor = nv20_graph_dtor,
- .init = nv20_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv20_gr_ctor,
+ .dtor = nv20_gr_dtor,
+ .init = nv20_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
new file mode 100644
index 000000000000..ac4dc048fed1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -0,0 +1,26 @@
+#ifndef __NV20_GR_H__
+#define __NV20_GR_H__
+#include <engine/gr.h>
+
+struct nv20_gr_priv {
+ struct nvkm_gr base;
+ struct nvkm_gpuobj *ctxtab;
+};
+
+struct nv20_gr_chan {
+ struct nvkm_gr_chan base;
+ int chid;
+};
+
+extern struct nvkm_oclass nv25_gr_sclass[];
+int nv20_gr_context_init(struct nvkm_object *);
+int nv20_gr_context_fini(struct nvkm_object *, bool);
+
+void nv20_gr_tile_prog(struct nvkm_engine *, int);
+void nv20_gr_intr(struct nvkm_subdev *);
+
+void nv20_gr_dtor(struct nvkm_object *);
+int nv20_gr_init(struct nvkm_object *);
+
+int nv30_gr_init(struct nvkm_object *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index f8a6fdd7d5e8..bc362519cebb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,36 +1,29 @@
-#include <core/os.h>
-#include <core/engctx.h>
-#include <core/enum.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-#include <engine/graph.h>
-
#include "nv20.h"
#include "regs.h"
+#include <engine/fifo.h>
+
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-struct nouveau_oclass
-nv25_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
- { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
- { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
- { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
- { 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */
+struct nvkm_oclass
+nv25_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
+ { 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
+ { 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
+ { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
+ { 0x0597, &nv04_gr_ofuncs, NULL }, /* kelvin */
{},
};
@@ -39,21 +32,20 @@ nv25_graph_sclass[] = {
******************************************************************************/
static int
-nv25_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_chan *chan;
+ struct nv20_gr_chan *chan;
int ret, i;
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
- chan->chid = nouveau_fifo_chan(parent)->chid;
+ chan->chid = nvkm_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x035c, 0xffff0000);
@@ -111,16 +103,16 @@ nv25_graph_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
-nv25_graph_cclass = {
+static struct nvkm_oclass
+nv25_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x25),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv25_graph_context_ctor,
- .dtor = _nouveau_graph_context_dtor,
- .init = nv20_graph_context_init,
- .fini = nv20_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv25_gr_context_ctor,
+ .dtor = _nvkm_gr_context_dtor,
+ .init = nv20_gr_context_init,
+ .fini = nv20_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
};
@@ -129,38 +121,38 @@ nv25_graph_cclass = {
******************************************************************************/
static int
-nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_priv *priv;
+ struct nv20_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_graph_intr;
- nv_engine(priv)->cclass = &nv25_graph_cclass;
- nv_engine(priv)->sclass = nv25_graph_sclass;
- nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ nv_subdev(priv)->intr = nv20_gr_intr;
+ nv_engine(priv)->cclass = &nv25_gr_cclass;
+ nv_engine(priv)->sclass = nv25_gr_sclass;
+ nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
return 0;
}
-struct nouveau_oclass
-nv25_graph_oclass = {
+struct nvkm_oclass
+nv25_gr_oclass = {
.handle = NV_ENGINE(GR, 0x25),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv25_graph_ctor,
- .dtor = nv20_graph_dtor,
- .init = nv20_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv25_gr_ctor,
+ .dtor = nv20_gr_dtor,
+ .init = nv20_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 5de9caa2ef67..22a5096e283d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,35 +1,27 @@
-#include <core/os.h>
-#include <core/engctx.h>
-#include <core/enum.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-#include <engine/graph.h>
-
#include "nv20.h"
#include "regs.h"
+#include <engine/fifo.h>
+
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
-nv2a_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_chan *chan;
+ struct nv20_gr_chan *chan;
int ret, i;
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
- chan->chid = nouveau_fifo_chan(parent)->chid;
+ chan->chid = nvkm_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x033c, 0xffff0000);
@@ -78,16 +70,16 @@ nv2a_graph_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
-nv2a_graph_cclass = {
+static struct nvkm_oclass
+nv2a_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x2a),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv2a_graph_context_ctor,
- .dtor = _nouveau_graph_context_dtor,
- .init = nv20_graph_context_init,
- .fini = nv20_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv2a_gr_context_ctor,
+ .dtor = _nvkm_gr_context_dtor,
+ .init = nv20_gr_context_init,
+ .fini = nv20_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
};
@@ -96,38 +88,38 @@ nv2a_graph_cclass = {
******************************************************************************/
static int
-nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_priv *priv;
+ struct nv20_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_graph_intr;
- nv_engine(priv)->cclass = &nv2a_graph_cclass;
- nv_engine(priv)->sclass = nv25_graph_sclass;
- nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ nv_subdev(priv)->intr = nv20_gr_intr;
+ nv_engine(priv)->cclass = &nv2a_gr_cclass;
+ nv_engine(priv)->sclass = nv25_gr_sclass;
+ nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
return 0;
}
-struct nouveau_oclass
-nv2a_graph_oclass = {
+struct nvkm_oclass
+nv2a_gr_oclass = {
.handle = NV_ENGINE(GR, 0x2a),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv2a_graph_ctor,
- .dtor = nv20_graph_dtor,
- .init = nv20_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv2a_gr_ctor,
+ .dtor = nv20_gr_dtor,
+ .init = nv20_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 2f9dbc709389..dcc84eb54fb6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,38 +1,33 @@
-#include <core/os.h>
-#include <core/engctx.h>
-#include <core/enum.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-#include <engine/graph.h>
-
#include "nv20.h"
#include "regs.h"
+#include <core/device.h>
+#include <engine/fifo.h>
+#include <subdev/fb.h>
+
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
-nv30_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
- { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
- { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
- { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
- { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
- { 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */
+static struct nvkm_oclass
+nv30_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
+ { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
+ { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
+ { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
+ { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
+ { 0x0397, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
};
@@ -41,21 +36,20 @@ nv30_graph_sclass[] = {
******************************************************************************/
static int
-nv30_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_chan *chan;
+ struct nv20_gr_chan *chan;
int ret, i;
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
- chan->chid = nouveau_fifo_chan(parent)->chid;
+ chan->chid = nvkm_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x0410, 0x00000101);
@@ -112,16 +106,16 @@ nv30_graph_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
-nv30_graph_cclass = {
+static struct nvkm_oclass
+nv30_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x30),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv30_graph_context_ctor,
- .dtor = _nouveau_graph_context_dtor,
- .init = nv20_graph_context_init,
- .fini = nv20_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv30_gr_context_ctor,
+ .dtor = _nvkm_gr_context_dtor,
+ .init = nv20_gr_context_init,
+ .fini = nv20_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
};
@@ -130,40 +124,40 @@ nv30_graph_cclass = {
******************************************************************************/
static int
-nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_priv *priv;
+ struct nv20_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_graph_intr;
- nv_engine(priv)->cclass = &nv30_graph_cclass;
- nv_engine(priv)->sclass = nv30_graph_sclass;
- nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ nv_subdev(priv)->intr = nv20_gr_intr;
+ nv_engine(priv)->cclass = &nv30_gr_cclass;
+ nv_engine(priv)->sclass = nv30_gr_sclass;
+ nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
return 0;
}
int
-nv30_graph_init(struct nouveau_object *object)
+nv30_gr_init(struct nvkm_object *object)
{
- struct nouveau_engine *engine = nv_engine(object);
- struct nv20_graph_priv *priv = (void *)engine;
- struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvkm_engine *engine = nv_engine(object);
+ struct nv20_gr_priv *priv = (void *)engine;
+ struct nvkm_fb *pfb = nvkm_fb(object);
int ret, i;
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -225,13 +219,13 @@ nv30_graph_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv30_graph_oclass = {
+struct nvkm_oclass
+nv30_gr_oclass = {
.handle = NV_ENGINE(GR, 0x30),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv30_graph_ctor,
- .dtor = nv20_graph_dtor,
- .init = nv30_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv30_gr_ctor,
+ .dtor = nv20_gr_dtor,
+ .init = nv30_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 34dd26c70b64..985b7f3306ae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,38 +1,31 @@
-#include <core/os.h>
-#include <core/engctx.h>
-#include <core/enum.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-#include <engine/graph.h>
-
#include "nv20.h"
#include "regs.h"
+#include <engine/fifo.h>
+
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
-nv34_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
- { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
- { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
- { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
- { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
- { 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */
+static struct nvkm_oclass
+nv34_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
+ { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
+ { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
+ { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
+ { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
+ { 0x0697, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
};
@@ -41,21 +34,20 @@ nv34_graph_sclass[] = {
******************************************************************************/
static int
-nv34_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_chan *chan;
+ struct nv20_gr_chan *chan;
int ret, i;
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
- chan->chid = nouveau_fifo_chan(parent)->chid;
+ chan->chid = nvkm_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x040c, 0x01000101);
@@ -112,16 +104,16 @@ nv34_graph_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
-nv34_graph_cclass = {
+static struct nvkm_oclass
+nv34_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x34),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv34_graph_context_ctor,
- .dtor = _nouveau_graph_context_dtor,
- .init = nv20_graph_context_init,
- .fini = nv20_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv34_gr_context_ctor,
+ .dtor = _nvkm_gr_context_dtor,
+ .init = nv20_gr_context_init,
+ .fini = nv20_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
};
@@ -130,38 +122,38 @@ nv34_graph_cclass = {
******************************************************************************/
static int
-nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_priv *priv;
+ struct nv20_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_graph_intr;
- nv_engine(priv)->cclass = &nv34_graph_cclass;
- nv_engine(priv)->sclass = nv34_graph_sclass;
- nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ nv_subdev(priv)->intr = nv20_gr_intr;
+ nv_engine(priv)->cclass = &nv34_gr_cclass;
+ nv_engine(priv)->sclass = nv34_gr_sclass;
+ nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
return 0;
}
-struct nouveau_oclass
-nv34_graph_oclass = {
+struct nvkm_oclass
+nv34_gr_oclass = {
.handle = NV_ENGINE(GR, 0x34),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv34_graph_ctor,
- .dtor = nv20_graph_dtor,
- .init = nv30_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv34_gr_ctor,
+ .dtor = nv20_gr_dtor,
+ .init = nv30_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 2fb5756d9f66..707625f19ff5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,36 +1,31 @@
-#include <core/os.h>
-#include <core/engctx.h>
-#include <core/enum.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
#include "nv20.h"
#include "regs.h"
+#include <engine/fifo.h>
+
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
-nv35_graph_sclass[] = {
- { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
- { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
- { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
- { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
- { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
- { 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
+static struct nvkm_oclass
+nv35_gr_sclass[] = {
+ { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
+ { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
+ { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
+ { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
+ { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
+ { 0x0497, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
};
@@ -39,21 +34,20 @@ nv35_graph_sclass[] = {
******************************************************************************/
static int
-nv35_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_chan *chan;
+ struct nv20_gr_chan *chan;
int ret, i;
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
- chan->chid = nouveau_fifo_chan(parent)->chid;
+ chan->chid = nvkm_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x040c, 0x00000101);
@@ -110,16 +104,16 @@ nv35_graph_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
-nv35_graph_cclass = {
+static struct nvkm_oclass
+nv35_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x35),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv35_graph_context_ctor,
- .dtor = _nouveau_graph_context_dtor,
- .init = nv20_graph_context_init,
- .fini = nv20_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv35_gr_context_ctor,
+ .dtor = _nvkm_gr_context_dtor,
+ .init = nv20_gr_context_init,
+ .fini = nv20_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
};
@@ -128,38 +122,38 @@ nv35_graph_cclass = {
******************************************************************************/
static int
-nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv20_graph_priv *priv;
+ struct nv20_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_graph_intr;
- nv_engine(priv)->cclass = &nv35_graph_cclass;
- nv_engine(priv)->sclass = nv35_graph_sclass;
- nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ nv_subdev(priv)->intr = nv20_gr_intr;
+ nv_engine(priv)->cclass = &nv35_gr_cclass;
+ nv_engine(priv)->sclass = nv35_gr_sclass;
+ nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
return 0;
}
-struct nouveau_oclass
-nv35_graph_oclass = {
+struct nvkm_oclass
+nv35_gr_oclass = {
.handle = NV_ENGINE(GR, 0x35),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv35_graph_ctor,
- .dtor = nv20_graph_dtor,
- .init = nv30_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv35_gr_ctor,
+ .dtor = nv20_gr_dtor,
+ .init = nv30_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 4f401174868d..7e1937980e3f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -21,34 +21,28 @@
*
* Authors: Ben Skeggs
*/
+#include "nv40.h"
+#include "regs.h"
#include <core/client.h>
-#include <core/os.h>
#include <core/handle.h>
-#include <core/engctx.h>
-
#include <subdev/fb.h>
#include <subdev/timer.h>
-
-#include <engine/graph.h>
#include <engine/fifo.h>
-#include "nv40.h"
-#include "regs.h"
-
-struct nv40_graph_priv {
- struct nouveau_graph base;
+struct nv40_gr_priv {
+ struct nvkm_gr base;
u32 size;
};
-struct nv40_graph_chan {
- struct nouveau_graph_chan base;
+struct nv40_gr_chan {
+ struct nvkm_gr_chan base;
};
static u64
-nv40_graph_units(struct nouveau_graph *graph)
+nv40_gr_units(struct nvkm_gr *gr)
{
- struct nv40_graph_priv *priv = (void *)graph;
+ struct nv40_gr_priv *priv = (void *)gr;
return nv_rd32(priv, 0x1540);
}
@@ -58,16 +52,15 @@ nv40_graph_units(struct nouveau_graph *graph)
******************************************************************************/
static int
-nv40_graph_object_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_gpuobj *obj;
+ struct nvkm_gpuobj *obj;
int ret;
- ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
- 20, 16, 0, &obj);
+ ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
+ 20, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
@@ -83,55 +76,55 @@ nv40_graph_object_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_ofuncs
-nv40_graph_ofuncs = {
- .ctor = nv40_graph_object_ctor,
- .dtor = _nouveau_gpuobj_dtor,
- .init = _nouveau_gpuobj_init,
- .fini = _nouveau_gpuobj_fini,
- .rd32 = _nouveau_gpuobj_rd32,
- .wr32 = _nouveau_gpuobj_wr32,
+static struct nvkm_ofuncs
+nv40_gr_ofuncs = {
+ .ctor = nv40_gr_object_ctor,
+ .dtor = _nvkm_gpuobj_dtor,
+ .init = _nvkm_gpuobj_init,
+ .fini = _nvkm_gpuobj_fini,
+ .rd32 = _nvkm_gpuobj_rd32,
+ .wr32 = _nvkm_gpuobj_wr32,
};
-static struct nouveau_oclass
-nv40_graph_sclass[] = {
- { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
- { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
- { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
- { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
- { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
- { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
- { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
- { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
- { 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
+static struct nvkm_oclass
+nv40_gr_sclass[] = {
+ { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
+ { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
+ { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
+ { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
+ { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
+ { 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
{},
};
-static struct nouveau_oclass
-nv44_graph_sclass[] = {
- { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
- { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
- { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
- { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
- { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
- { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
- { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
- { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
- { 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
+static struct nvkm_oclass
+nv44_gr_sclass[] = {
+ { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
+ { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
+ { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
+ { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
+ { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
+ { 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
{},
};
@@ -140,18 +133,16 @@ nv44_graph_sclass[] = {
******************************************************************************/
static int
-nv40_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv40_graph_priv *priv = (void *)engine;
- struct nv40_graph_chan *chan;
+ struct nv40_gr_priv *priv = (void *)engine;
+ struct nv40_gr_chan *chan;
int ret;
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
- priv->size, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -162,10 +153,10 @@ nv40_graph_context_ctor(struct nouveau_object *parent,
}
static int
-nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
+nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
{
- struct nv40_graph_priv *priv = (void *)object->engine;
- struct nv40_graph_chan *chan = (void *)object;
+ struct nv40_gr_priv *priv = (void *)object->engine;
+ struct nv40_gr_chan *chan = (void *)object;
u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
int ret = 0;
@@ -194,16 +185,16 @@ nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
return ret;
}
-static struct nouveau_oclass
-nv40_graph_cclass = {
+static struct nvkm_oclass
+nv40_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv40_graph_context_ctor,
- .dtor = _nouveau_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = nv40_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv40_gr_context_ctor,
+ .dtor = _nvkm_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = nv40_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
};
@@ -212,15 +203,15 @@ nv40_graph_cclass = {
******************************************************************************/
static void
-nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
+nv40_gr_tile_prog(struct nvkm_engine *engine, int i)
{
- struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
- struct nouveau_fifo *pfifo = nouveau_fifo(engine);
- struct nv40_graph_priv *priv = (void *)engine;
+ struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
+ struct nvkm_fifo *pfifo = nvkm_fifo(engine);
+ struct nv40_gr_priv *priv = (void *)engine;
unsigned long flags;
pfifo->pause(pfifo, &flags);
- nv04_graph_idle(priv);
+ nv04_gr_idle(priv);
switch (nv_device(priv)->chipset) {
case 0x40:
@@ -290,13 +281,13 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
}
static void
-nv40_graph_intr(struct nouveau_subdev *subdev)
+nv40_gr_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- struct nouveau_handle *handle = NULL;
- struct nv40_graph_priv *priv = (void *)subdev;
+ struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
+ struct nvkm_engine *engine = nv_engine(subdev);
+ struct nvkm_object *engctx;
+ struct nvkm_handle *handle = NULL;
+ struct nv40_gr_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
@@ -309,15 +300,15 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
u32 show = stat;
int chid;
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & NV_PGRAPH_INTR_ERROR) {
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- handle = nouveau_handle_get_class(engctx, class);
+ handle = nvkm_handle_get_class(engctx, class);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~NV_PGRAPH_INTR_ERROR;
- nouveau_handle_put(handle);
+ nvkm_handle_put(handle);
}
if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
@@ -330,57 +321,57 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
if (show) {
nv_error(priv, "%s", "");
- nouveau_bitfield_print(nv10_graph_intr_name, show);
+ nvkm_bitfield_print(nv10_gr_intr_name, show);
pr_cont(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ nvkm_bitfield_print(nv04_gr_nsource, nsource);
pr_cont(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
pr_cont("\n");
nv_error(priv,
"ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 4, nouveau_client_name(engctx), subc,
+ chid, inst << 4, nvkm_client_name(engctx), subc,
class, mthd, data);
}
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
static int
-nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv40_graph_priv *priv;
+ struct nv40_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv40_graph_intr;
- nv_engine(priv)->cclass = &nv40_graph_cclass;
- if (nv44_graph_class(priv))
- nv_engine(priv)->sclass = nv44_graph_sclass;
+ nv_subdev(priv)->intr = nv40_gr_intr;
+ nv_engine(priv)->cclass = &nv40_gr_cclass;
+ if (nv44_gr_class(priv))
+ nv_engine(priv)->sclass = nv44_gr_sclass;
else
- nv_engine(priv)->sclass = nv40_graph_sclass;
- nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
+ nv_engine(priv)->sclass = nv40_gr_sclass;
+ nv_engine(priv)->tile_prog = nv40_gr_tile_prog;
- priv->base.units = nv40_graph_units;
+ priv->base.units = nv40_gr_units;
return 0;
}
static int
-nv40_graph_init(struct nouveau_object *object)
+nv40_gr_init(struct nvkm_object *object)
{
- struct nouveau_engine *engine = nv_engine(object);
- struct nouveau_fb *pfb = nouveau_fb(object);
- struct nv40_graph_priv *priv = (void *)engine;
+ struct nvkm_engine *engine = nv_engine(object);
+ struct nvkm_fb *pfb = nvkm_fb(object);
+ struct nv40_gr_priv *priv = (void *)engine;
int ret, i, j;
u32 vramsz;
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -524,13 +515,13 @@ nv40_graph_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv40_graph_oclass = {
+struct nvkm_oclass
+nv40_gr_oclass = {
.handle = NV_ENGINE(GR, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv40_graph_ctor,
- .dtor = _nouveau_graph_dtor,
- .init = nv40_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv40_gr_ctor,
+ .dtor = _nvkm_gr_dtor,
+ .init = nv40_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index ad8209377529..d852bd6de571 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,16 +1,17 @@
-#ifndef __NV40_GRAPH_H__
-#define __NV40_GRAPH_H__
+#ifndef __NV40_GR_H__
+#define __NV40_GR_H__
+#include <engine/gr.h>
#include <core/device.h>
-#include <core/gpuobj.h>
+struct nvkm_gpuobj;
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features
*/
static inline int
-nv44_graph_class(void *priv)
+nv44_gr_class(void *priv)
{
- struct nouveau_device *device = nv_device(priv);
+ struct nvkm_device *device = nv_device(priv);
if ((device->chipset & 0xf0) == 0x60)
return 1;
@@ -18,7 +19,6 @@ nv44_graph_class(void *priv)
return !(0x0baf & (1 << (device->chipset & 0x0f)));
}
-int nv40_grctx_init(struct nouveau_device *, u32 *size);
-void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
-
+int nv40_grctx_init(struct nvkm_device *, u32 *size);
+void nv40_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index 38e0aa26f1cd..270d7cd63fc7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -21,36 +21,28 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
-#include <core/os.h>
#include <core/client.h>
+#include <core/device.h>
#include <core/handle.h>
-#include <core/engctx.h>
-#include <core/enum.h>
-
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/timer.h>
-
#include <engine/fifo.h>
-#include <engine/graph.h>
-
-#include "nv50.h"
+#include <subdev/timer.h>
-struct nv50_graph_priv {
- struct nouveau_graph base;
+struct nv50_gr_priv {
+ struct nvkm_gr base;
spinlock_t lock;
u32 size;
};
-struct nv50_graph_chan {
- struct nouveau_graph_chan base;
+struct nv50_gr_chan {
+ struct nvkm_gr_chan base;
};
static u64
-nv50_graph_units(struct nouveau_graph *graph)
+nv50_gr_units(struct nvkm_gr *gr)
{
- struct nv50_graph_priv *priv = (void *)graph;
+ struct nv50_gr_priv *priv = (void *)gr;
return nv_rd32(priv, 0x1540);
}
@@ -60,16 +52,15 @@ nv50_graph_units(struct nouveau_graph *graph)
******************************************************************************/
static int
-nv50_graph_object_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_gpuobj *obj;
+ struct nvkm_gpuobj *obj;
int ret;
- ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
- 16, 16, 0, &obj);
+ ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
@@ -81,65 +72,65 @@ nv50_graph_object_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_ofuncs
-nv50_graph_ofuncs = {
- .ctor = nv50_graph_object_ctor,
- .dtor = _nouveau_gpuobj_dtor,
- .init = _nouveau_gpuobj_init,
- .fini = _nouveau_gpuobj_fini,
- .rd32 = _nouveau_gpuobj_rd32,
- .wr32 = _nouveau_gpuobj_wr32,
+static struct nvkm_ofuncs
+nv50_gr_ofuncs = {
+ .ctor = nv50_gr_object_ctor,
+ .dtor = _nvkm_gpuobj_dtor,
+ .init = _nvkm_gpuobj_init,
+ .fini = _nvkm_gpuobj_fini,
+ .rd32 = _nvkm_gpuobj_rd32,
+ .wr32 = _nvkm_gpuobj_wr32,
};
-static struct nouveau_oclass
-nv50_graph_sclass[] = {
- { 0x0030, &nv50_graph_ofuncs },
- { 0x502d, &nv50_graph_ofuncs },
- { 0x5039, &nv50_graph_ofuncs },
- { 0x5097, &nv50_graph_ofuncs },
- { 0x50c0, &nv50_graph_ofuncs },
+static struct nvkm_oclass
+nv50_gr_sclass[] = {
+ { 0x0030, &nv50_gr_ofuncs },
+ { 0x502d, &nv50_gr_ofuncs },
+ { 0x5039, &nv50_gr_ofuncs },
+ { 0x5097, &nv50_gr_ofuncs },
+ { 0x50c0, &nv50_gr_ofuncs },
{}
};
-static struct nouveau_oclass
-nv84_graph_sclass[] = {
- { 0x0030, &nv50_graph_ofuncs },
- { 0x502d, &nv50_graph_ofuncs },
- { 0x5039, &nv50_graph_ofuncs },
- { 0x50c0, &nv50_graph_ofuncs },
- { 0x8297, &nv50_graph_ofuncs },
+static struct nvkm_oclass
+g84_gr_sclass[] = {
+ { 0x0030, &nv50_gr_ofuncs },
+ { 0x502d, &nv50_gr_ofuncs },
+ { 0x5039, &nv50_gr_ofuncs },
+ { 0x50c0, &nv50_gr_ofuncs },
+ { 0x8297, &nv50_gr_ofuncs },
{}
};
-static struct nouveau_oclass
-nva0_graph_sclass[] = {
- { 0x0030, &nv50_graph_ofuncs },
- { 0x502d, &nv50_graph_ofuncs },
- { 0x5039, &nv50_graph_ofuncs },
- { 0x50c0, &nv50_graph_ofuncs },
- { 0x8397, &nv50_graph_ofuncs },
+static struct nvkm_oclass
+gt200_gr_sclass[] = {
+ { 0x0030, &nv50_gr_ofuncs },
+ { 0x502d, &nv50_gr_ofuncs },
+ { 0x5039, &nv50_gr_ofuncs },
+ { 0x50c0, &nv50_gr_ofuncs },
+ { 0x8397, &nv50_gr_ofuncs },
{}
};
-static struct nouveau_oclass
-nva3_graph_sclass[] = {
- { 0x0030, &nv50_graph_ofuncs },
- { 0x502d, &nv50_graph_ofuncs },
- { 0x5039, &nv50_graph_ofuncs },
- { 0x50c0, &nv50_graph_ofuncs },
- { 0x8597, &nv50_graph_ofuncs },
- { 0x85c0, &nv50_graph_ofuncs },
+static struct nvkm_oclass
+gt215_gr_sclass[] = {
+ { 0x0030, &nv50_gr_ofuncs },
+ { 0x502d, &nv50_gr_ofuncs },
+ { 0x5039, &nv50_gr_ofuncs },
+ { 0x50c0, &nv50_gr_ofuncs },
+ { 0x8597, &nv50_gr_ofuncs },
+ { 0x85c0, &nv50_gr_ofuncs },
{}
};
-static struct nouveau_oclass
-nvaf_graph_sclass[] = {
- { 0x0030, &nv50_graph_ofuncs },
- { 0x502d, &nv50_graph_ofuncs },
- { 0x5039, &nv50_graph_ofuncs },
- { 0x50c0, &nv50_graph_ofuncs },
- { 0x85c0, &nv50_graph_ofuncs },
- { 0x8697, &nv50_graph_ofuncs },
+static struct nvkm_oclass
+mcp89_gr_sclass[] = {
+ { 0x0030, &nv50_gr_ofuncs },
+ { 0x502d, &nv50_gr_ofuncs },
+ { 0x5039, &nv50_gr_ofuncs },
+ { 0x50c0, &nv50_gr_ofuncs },
+ { 0x85c0, &nv50_gr_ofuncs },
+ { 0x8697, &nv50_gr_ofuncs },
{}
};
@@ -148,18 +139,16 @@ nvaf_graph_sclass[] = {
******************************************************************************/
static int
-nv50_graph_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv50_graph_priv *priv = (void *)engine;
- struct nv50_graph_chan *chan;
+ struct nv50_gr_priv *priv = (void *)engine;
+ struct nv50_gr_chan *chan;
int ret;
- ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
- priv->size, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
+ 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -168,16 +157,16 @@ nv50_graph_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
-nv50_graph_cclass = {
+static struct nvkm_oclass
+nv50_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_graph_context_ctor,
- .dtor = _nouveau_graph_context_dtor,
- .init = _nouveau_graph_context_init,
- .fini = _nouveau_graph_context_fini,
- .rd32 = _nouveau_graph_context_rd32,
- .wr32 = _nouveau_graph_context_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_gr_context_ctor,
+ .dtor = _nvkm_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
},
};
@@ -185,7 +174,7 @@ nv50_graph_cclass = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static const struct nouveau_bitfield nv50_pgraph_status[] = {
+static const struct nvkm_bitfield nv50_pgr_status[] = {
{ 0x00000001, "BUSY" }, /* set when any bit is set */
{ 0x00000002, "DISPATCH" },
{ 0x00000004, "UNK2" },
@@ -214,22 +203,23 @@ static const struct nouveau_bitfield nv50_pgraph_status[] = {
{}
};
-static const char *const nv50_pgraph_vstatus_0[] = {
+static const char *const nv50_pgr_vstatus_0[] = {
"VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
NULL
};
-static const char *const nv50_pgraph_vstatus_1[] = {
+static const char *const nv50_pgr_vstatus_1[] = {
"TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
};
-static const char *const nv50_pgraph_vstatus_2[] = {
+static const char *const nv50_pgr_vstatus_2[] = {
"RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
"ROP", NULL
};
-static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
- const char *const units[], u32 status)
+static void
+nvkm_pgr_vstatus_print(struct nv50_gr_priv *priv, int r,
+ const char *const units[], u32 status)
{
int i;
@@ -246,10 +236,10 @@ static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
}
static int
-nv84_graph_tlb_flush(struct nouveau_engine *engine)
+g84_gr_tlb_flush(struct nvkm_engine *engine)
{
- struct nouveau_timer *ptimer = nouveau_timer(engine);
- struct nv50_graph_priv *priv = (void *)engine;
+ struct nvkm_timer *ptimer = nvkm_timer(engine);
+ struct nv50_gr_priv *priv = (void *)engine;
bool idle, timeout = false;
unsigned long flags;
u64 start;
@@ -284,15 +274,15 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
tmp = nv_rd32(priv, 0x400700);
nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
- nouveau_bitfield_print(nv50_pgraph_status, tmp);
+ nvkm_bitfield_print(nv50_pgr_status, tmp);
pr_cont("\n");
- nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
- nv_rd32(priv, 0x400380));
- nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
- nv_rd32(priv, 0x400384));
- nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
- nv_rd32(priv, 0x400388));
+ nvkm_pgr_vstatus_print(priv, 0, nv50_pgr_vstatus_0,
+ nv_rd32(priv, 0x400380));
+ nvkm_pgr_vstatus_print(priv, 1, nv50_pgr_vstatus_1,
+ nv_rd32(priv, 0x400384));
+ nvkm_pgr_vstatus_print(priv, 2, nv50_pgr_vstatus_2,
+ nv_rd32(priv, 0x400388));
}
@@ -304,7 +294,7 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
return timeout ? -EBUSY : 0;
}
-static const struct nouveau_bitfield nv50_mp_exec_errors[] = {
+static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
{ 0x01, "STACK_UNDERFLOW" },
{ 0x02, "STACK_MISMATCH" },
{ 0x04, "QUADON_ACTIVE" },
@@ -315,7 +305,7 @@ static const struct nouveau_bitfield nv50_mp_exec_errors[] = {
{}
};
-static const struct nouveau_bitfield nv50_mpc_traps[] = {
+static const struct nvkm_bitfield nv50_mpc_traps[] = {
{ 0x0000001, "LOCAL_LIMIT_READ" },
{ 0x0000010, "LOCAL_LIMIT_WRITE" },
{ 0x0000040, "STACK_LIMIT" },
@@ -329,7 +319,7 @@ static const struct nouveau_bitfield nv50_mpc_traps[] = {
{}
};
-static const struct nouveau_bitfield nv50_tex_traps[] = {
+static const struct nvkm_bitfield nv50_tex_traps[] = {
{ 0x00000001, "" }, /* any bit set? */
{ 0x00000002, "FAULT" },
{ 0x00000004, "STORAGE_TYPE_MISMATCH" },
@@ -338,30 +328,30 @@ static const struct nouveau_bitfield nv50_tex_traps[] = {
{}
};
-static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
{ 0x00000001, "NOTIFY" },
{ 0x00000002, "IN" },
{ 0x00000004, "OUT" },
{}
};
-static const struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
{ 0x00000001, "FAULT" },
{}
};
-static const struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
{ 0x00000001, "FAULT" },
{}
};
-static const struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
{ 0x00000001, "FAULT" },
{}
};
/* There must be a *lot* of these. Will take some time to gather them up. */
-const struct nouveau_enum nv50_data_error_names[] = {
+const struct nvkm_enum nv50_data_error_names[] = {
{ 0x00000003, "INVALID_OPERATION", NULL },
{ 0x00000004, "INVALID_VALUE", NULL },
{ 0x00000005, "INVALID_ENUM", NULL },
@@ -407,7 +397,7 @@ const struct nouveau_enum nv50_data_error_names[] = {
{}
};
-static const struct nouveau_bitfield nv50_graph_intr_name[] = {
+static const struct nvkm_bitfield nv50_gr_intr_name[] = {
{ 0x00000001, "NOTIFY" },
{ 0x00000002, "COMPUTE_QUERY" },
{ 0x00000010, "ILLEGAL_MTHD" },
@@ -421,7 +411,7 @@ static const struct nouveau_bitfield nv50_graph_intr_name[] = {
{}
};
-static const struct nouveau_bitfield nv50_graph_trap_prop[] = {
+static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
{ 0x00000004, "SURF_WIDTH_OVERRUN" },
{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
{ 0x00000010, "DST2D_FAULT" },
@@ -437,7 +427,7 @@ static const struct nouveau_bitfield nv50_graph_trap_prop[] = {
};
static void
-nv50_priv_prop_trap(struct nv50_graph_priv *priv,
+nv50_priv_prop_trap(struct nv50_gr_priv *priv,
u32 ustatus_addr, u32 ustatus, u32 tp)
{
u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
@@ -468,7 +458,7 @@ nv50_priv_prop_trap(struct nv50_graph_priv *priv,
}
if (ustatus) {
nv_error(priv, "TRAP_PROP - TP %d -", tp);
- nouveau_bitfield_print(nv50_graph_trap_prop, ustatus);
+ nvkm_bitfield_print(nv50_gr_trap_prop, ustatus);
pr_cont(" - Address %02x%08x\n", e14, e10);
}
nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
@@ -476,7 +466,7 @@ nv50_priv_prop_trap(struct nv50_graph_priv *priv,
}
static void
-nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
+nv50_priv_mp_trap(struct nv50_gr_priv *priv, int tpid, int display)
{
u32 units = nv_rd32(priv, 0x1540);
u32 addr, mp10, status, pc, oplow, ophigh;
@@ -500,7 +490,7 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
ophigh = nv_rd32(priv, addr + 0x74);
nv_error(priv, "TRAP_MP_EXEC - "
"TP %d MP %d:", tpid, i);
- nouveau_bitfield_print(nv50_mp_exec_errors, status);
+ nvkm_bitfield_print(nv50_mp_exec_errors, status);
pr_cont(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
@@ -515,8 +505,8 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
}
static void
-nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
- u32 ustatus_new, int display, const char *name)
+nv50_priv_tp_trap(struct nv50_gr_priv *priv, int type, u32 ustatus_old,
+ u32 ustatus_new, int display, const char *name)
{
int tps = 0;
u32 units = nv_rd32(priv, 0x1540);
@@ -542,7 +532,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
nv_rd32(priv, r));
if (ustatus) {
nv_error(priv, "%s - TP%d:", name, i);
- nouveau_bitfield_print(nv50_tex_traps,
+ nvkm_bitfield_print(nv50_tex_traps,
ustatus);
pr_cont("\n");
ustatus = 0;
@@ -556,7 +546,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
}
if (ustatus && display) {
nv_error(priv, "%s - TP%d:", name, i);
- nouveau_bitfield_print(nv50_mpc_traps, ustatus);
+ nvkm_bitfield_print(nv50_mpc_traps, ustatus);
pr_cont("\n");
ustatus = 0;
}
@@ -580,8 +570,8 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
}
static int
-nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
- int chid, u64 inst, struct nouveau_object *engctx)
+nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
+ int chid, u64 inst, struct nvkm_object *engctx)
{
u32 status = nv_rd32(priv, 0x400108);
u32 ustatus;
@@ -617,7 +607,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
nv_error(priv,
"ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
chid, inst,
- nouveau_client_name(engctx), subc,
+ nvkm_client_name(engctx), subc,
class, mthd, datah, datal, addr, r848);
} else
if (display) {
@@ -642,7 +632,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
nv_error(priv,
"ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
chid, inst,
- nouveau_client_name(engctx), subc,
+ nvkm_client_name(engctx), subc,
class, mthd, data, addr);
} else
if (display) {
@@ -670,7 +660,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
if (display) {
nv_error(priv, "TRAP_M2MF");
- nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+ nvkm_bitfield_print(nv50_gr_trap_m2mf, ustatus);
pr_cont("\n");
nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
@@ -691,7 +681,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
if (display) {
nv_error(priv, "TRAP_VFETCH");
- nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+ nvkm_bitfield_print(nv50_gr_trap_vfetch, ustatus);
pr_cont("\n");
nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
@@ -708,7 +698,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
if (display) {
nv_error(priv, "TRAP_STRMOUT");
- nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+ nvkm_bitfield_print(nv50_gr_trap_strmout, ustatus);
pr_cont("\n");
nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
@@ -729,7 +719,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
if (display) {
nv_error(priv, "TRAP_CCACHE");
- nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+ nvkm_bitfield_print(nv50_gr_trap_ccache, ustatus);
pr_cont("\n");
nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
@@ -791,13 +781,13 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
}
static void
-nv50_graph_intr(struct nouveau_subdev *subdev)
+nv50_gr_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- struct nouveau_handle *handle = NULL;
- struct nv50_graph_priv *priv = (void *)subdev;
+ struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
+ struct nvkm_engine *engine = nv_engine(subdev);
+ struct nvkm_object *engctx;
+ struct nvkm_handle *handle = NULL;
+ struct nv50_gr_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, 0x400100);
u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
u32 addr = nv_rd32(priv, 0x400704);
@@ -808,27 +798,27 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
u32 show = stat, show_bitfield = stat;
int chid;
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000010) {
- handle = nouveau_handle_get_class(engctx, class);
+ handle = nvkm_handle_get_class(engctx, class);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~0x00000010;
- nouveau_handle_put(handle);
+ nvkm_handle_put(handle);
}
if (show & 0x00100000) {
u32 ecode = nv_rd32(priv, 0x400110);
nv_error(priv, "DATA_ERROR ");
- nouveau_enum_print(nv50_data_error_names, ecode);
+ nvkm_enum_print(nv50_data_error_names, ecode);
pr_cont("\n");
show_bitfield &= ~0x00100000;
}
if (stat & 0x00200000) {
- if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
- engctx))
+ if (!nv50_gr_trap_handler(priv, show, chid, (u64)inst << 12,
+ engctx))
show &= ~0x00200000;
show_bitfield &= ~0x00200000;
}
@@ -840,43 +830,43 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
show &= show_bitfield;
if (show) {
nv_error(priv, "%s", "");
- nouveau_bitfield_print(nv50_graph_intr_name, show);
+ nvkm_bitfield_print(nv50_gr_intr_name, show);
pr_cont("\n");
}
nv_error(priv,
"ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, nouveau_client_name(engctx),
+ chid, (u64)inst << 12, nvkm_client_name(engctx),
subc, class, mthd, data);
}
if (nv_rd32(priv, 0x400824) & (1 << 31))
nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
static int
-nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv50_graph_priv *priv;
+ struct nv50_gr_priv *priv;
int ret;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00201000;
- nv_subdev(priv)->intr = nv50_graph_intr;
- nv_engine(priv)->cclass = &nv50_graph_cclass;
+ nv_subdev(priv)->intr = nv50_gr_intr;
+ nv_engine(priv)->cclass = &nv50_gr_cclass;
- priv->base.units = nv50_graph_units;
+ priv->base.units = nv50_gr_units;
switch (nv_device(priv)->chipset) {
case 0x50:
- nv_engine(priv)->sclass = nv50_graph_sclass;
+ nv_engine(priv)->sclass = nv50_gr_sclass;
break;
case 0x84:
case 0x86:
@@ -884,20 +874,20 @@ nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
case 0x94:
case 0x96:
case 0x98:
- nv_engine(priv)->sclass = nv84_graph_sclass;
+ nv_engine(priv)->sclass = g84_gr_sclass;
break;
case 0xa0:
case 0xaa:
case 0xac:
- nv_engine(priv)->sclass = nva0_graph_sclass;
+ nv_engine(priv)->sclass = gt200_gr_sclass;
break;
case 0xa3:
case 0xa5:
case 0xa8:
- nv_engine(priv)->sclass = nva3_graph_sclass;
+ nv_engine(priv)->sclass = gt215_gr_sclass;
break;
case 0xaf:
- nv_engine(priv)->sclass = nvaf_graph_sclass;
+ nv_engine(priv)->sclass = mcp89_gr_sclass;
break;
}
@@ -905,19 +895,19 @@ nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
/* unfortunate hw bug workaround... */
if (nv_device(priv)->chipset != 0x50 &&
nv_device(priv)->chipset != 0xac)
- nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush;
+ nv_engine(priv)->tlb_flush = g84_gr_tlb_flush;
spin_lock_init(&priv->lock);
return 0;
}
static int
-nv50_graph_init(struct nouveau_object *object)
+nv50_gr_init(struct nvkm_object *object)
{
- struct nv50_graph_priv *priv = (void *)object;
+ struct nv50_gr_priv *priv = (void *)object;
int ret, units, i;
- ret = nouveau_graph_init(&priv->base);
+ ret = nvkm_gr_init(&priv->base);
if (ret)
return ret;
@@ -997,13 +987,13 @@ nv50_graph_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv50_graph_oclass = {
+struct nvkm_oclass
+nv50_gr_oclass = {
.handle = NV_ENGINE(GR, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_graph_ctor,
- .dtor = _nouveau_graph_dtor,
- .init = nv50_graph_init,
- .fini = _nouveau_graph_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_gr_ctor,
+ .dtor = _nvkm_gr_dtor,
+ .init = nv50_gr_init,
+ .fini = _nvkm_gr_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
new file mode 100644
index 000000000000..bcf786f6b731
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -0,0 +1,9 @@
+#ifndef __NV50_GR_H__
+#define __NV50_GR_H__
+#include <engine/gr.h>
+struct nvkm_device;
+struct nvkm_gpuobj;
+
+int nv50_grctx_init(struct nvkm_device *, u32 *size);
+void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
index fde8e24415e4..90a9873ce522 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
@@ -1,5 +1,5 @@
-#ifndef __NOUVEAU_GRAPH_REGS_H__
-#define __NOUVEAU_GRAPH_REGS_H__
+#ifndef __NVKM_GR_REGS_H__
+#define __NVKM_GR_REGS_H__
#define NV04_PGRAPH_DEBUG_0 0x00400080
#define NV04_PGRAPH_DEBUG_1 0x00400084
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild
new file mode 100644
index 000000000000..61b7b5f98f3c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild
@@ -0,0 +1,5 @@
+nvkm-y += nvkm/engine/mpeg/nv31.o
+nvkm-y += nvkm/engine/mpeg/nv40.o
+nvkm-y += nvkm/engine/mpeg/nv44.o
+nvkm-y += nvkm/engine/mpeg/nv50.o
+nvkm-y += nvkm/engine/mpeg/g84.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index e9cc8b116a24..0df889fa2611 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -21,30 +21,22 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/os.h>
-#include <core/engctx.h>
-
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-#include <subdev/timer.h>
-
#include <engine/mpeg.h>
-struct nv84_mpeg_priv {
- struct nouveau_mpeg base;
+struct g84_mpeg_priv {
+ struct nvkm_mpeg base;
};
-struct nv84_mpeg_chan {
- struct nouveau_mpeg_chan base;
+struct g84_mpeg_chan {
+ struct nvkm_mpeg_chan base;
};
/*******************************************************************************
* MPEG object classes
******************************************************************************/
-static struct nouveau_oclass
-nv84_mpeg_sclass[] = {
+static struct nvkm_oclass
+g84_mpeg_sclass[] = {
{ 0x8274, &nv50_mpeg_ofuncs },
{}
};
@@ -53,16 +45,16 @@ nv84_mpeg_sclass[] = {
* PMPEG context
******************************************************************************/
-static struct nouveau_oclass
-nv84_mpeg_cclass = {
+static struct nvkm_oclass
+g84_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_mpeg_context_ctor,
- .dtor = _nouveau_mpeg_context_dtor,
- .init = _nouveau_mpeg_context_init,
- .fini = _nouveau_mpeg_context_fini,
- .rd32 = _nouveau_mpeg_context_rd32,
- .wr32 = _nouveau_mpeg_context_wr32,
+ .dtor = _nvkm_mpeg_context_dtor,
+ .init = _nvkm_mpeg_context_init,
+ .fini = _nvkm_mpeg_context_fini,
+ .rd32 = _nvkm_mpeg_context_rd32,
+ .wr32 = _nvkm_mpeg_context_wr32,
},
};
@@ -71,32 +63,32 @@ nv84_mpeg_cclass = {
******************************************************************************/
static int
-nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv84_mpeg_priv *priv;
+ struct g84_mpeg_priv *priv;
int ret;
- ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000002;
nv_subdev(priv)->intr = nv50_mpeg_intr;
- nv_engine(priv)->cclass = &nv84_mpeg_cclass;
- nv_engine(priv)->sclass = nv84_mpeg_sclass;
+ nv_engine(priv)->cclass = &g84_mpeg_cclass;
+ nv_engine(priv)->sclass = g84_mpeg_sclass;
return 0;
}
-struct nouveau_oclass
-nv84_mpeg_oclass = {
+struct nvkm_oclass
+g84_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_mpeg_ctor,
- .dtor = _nouveau_mpeg_dtor,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g84_mpeg_ctor,
+ .dtor = _nvkm_mpeg_dtor,
.init = nv50_mpeg_init,
- .fini = _nouveau_mpeg_fini,
+ .fini = _nvkm_mpeg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index d88c700b2f69..b5bef0718359 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -21,35 +21,30 @@
*
* Authors: Ben Skeggs
*/
+#include "nv31.h"
#include <core/client.h>
-#include <core/os.h>
-#include <core/engctx.h>
#include <core/handle.h>
-
+#include <engine/fifo.h>
+#include <subdev/instmem.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
-#include <subdev/instmem.h>
-
-#include <engine/fifo.h>
-#include <engine/mpeg.h>
-#include <engine/mpeg/nv31.h>
/*******************************************************************************
* MPEG object classes
******************************************************************************/
static int
-nv31_mpeg_object_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv31_mpeg_object_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_gpuobj *obj;
+ struct nvkm_gpuobj *obj;
int ret;
- ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
- 20, 16, 0, &obj);
+ ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
+ 20, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
@@ -62,9 +57,9 @@ nv31_mpeg_object_ctor(struct nouveau_object *parent,
}
static int
-nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
+nv31_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
{
- struct nouveau_instmem *imem = nouveau_instmem(object);
+ struct nvkm_instmem *imem = nvkm_instmem(object);
struct nv31_mpeg_priv *priv = (void *)object->engine;
u32 inst = *(u32 *)arg << 4;
u32 dma0 = nv_ro32(imem, inst + 0);
@@ -100,17 +95,17 @@ nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
return 0;
}
-struct nouveau_ofuncs
+struct nvkm_ofuncs
nv31_mpeg_ofuncs = {
.ctor = nv31_mpeg_object_ctor,
- .dtor = _nouveau_gpuobj_dtor,
- .init = _nouveau_gpuobj_init,
- .fini = _nouveau_gpuobj_fini,
- .rd32 = _nouveau_gpuobj_rd32,
- .wr32 = _nouveau_gpuobj_wr32,
+ .dtor = _nvkm_gpuobj_dtor,
+ .init = _nvkm_gpuobj_init,
+ .fini = _nvkm_gpuobj_fini,
+ .rd32 = _nvkm_gpuobj_rd32,
+ .wr32 = _nvkm_gpuobj_wr32,
};
-static struct nouveau_omthds
+static struct nvkm_omthds
nv31_mpeg_omthds[] = {
{ 0x0190, 0x0190, nv31_mpeg_mthd_dma },
{ 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
@@ -118,7 +113,7 @@ nv31_mpeg_omthds[] = {
{}
};
-struct nouveau_oclass
+struct nvkm_oclass
nv31_mpeg_sclass[] = {
{ 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
{}
@@ -129,17 +124,17 @@ nv31_mpeg_sclass[] = {
******************************************************************************/
static int
-nv31_mpeg_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv31_mpeg_context_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv31_mpeg_priv *priv = (void *)engine;
struct nv31_mpeg_chan *chan;
unsigned long flags;
int ret;
- ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+ ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -147,7 +142,7 @@ nv31_mpeg_context_ctor(struct nouveau_object *parent,
spin_lock_irqsave(&nv_engine(priv)->lock, flags);
if (priv->chan) {
spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
- nouveau_object_destroy(&chan->base);
+ nvkm_object_destroy(&chan->base);
*pobject = NULL;
return -EBUSY;
}
@@ -157,7 +152,7 @@ nv31_mpeg_context_ctor(struct nouveau_object *parent,
}
static void
-nv31_mpeg_context_dtor(struct nouveau_object *object)
+nv31_mpeg_context_dtor(struct nvkm_object *object)
{
struct nv31_mpeg_priv *priv = (void *)object->engine;
struct nv31_mpeg_chan *chan = (void *)object;
@@ -166,17 +161,17 @@ nv31_mpeg_context_dtor(struct nouveau_object *object)
spin_lock_irqsave(&nv_engine(priv)->lock, flags);
priv->chan = NULL;
spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
- nouveau_object_destroy(&chan->base);
+ nvkm_object_destroy(&chan->base);
}
-struct nouveau_oclass
+struct nvkm_oclass
nv31_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x31),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv31_mpeg_context_ctor,
.dtor = nv31_mpeg_context_dtor,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
+ .init = nvkm_object_init,
+ .fini = nvkm_object_fini,
},
};
@@ -185,9 +180,9 @@ nv31_mpeg_cclass = {
******************************************************************************/
void
-nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
+nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i)
{
- struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+ struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
struct nv31_mpeg_priv *priv = (void *)engine;
nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
@@ -196,12 +191,12 @@ nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
}
void
-nv31_mpeg_intr(struct nouveau_subdev *subdev)
+nv31_mpeg_intr(struct nvkm_subdev *subdev)
{
struct nv31_mpeg_priv *priv = (void *)subdev;
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_handle *handle;
- struct nouveau_object *engctx;
+ struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
+ struct nvkm_handle *handle;
+ struct nvkm_object *engctx;
u32 stat = nv_rd32(priv, 0x00b100);
u32 type = nv_rd32(priv, 0x00b230);
u32 mthd = nv_rd32(priv, 0x00b234);
@@ -220,10 +215,10 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
}
if (type == 0x00000010 && engctx) {
- handle = nouveau_handle_get_class(engctx, 0x3174);
+ handle = nvkm_handle_get_class(engctx, 0x3174);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~0x01000000;
- nouveau_handle_put(handle);
+ nvkm_handle_put(handle);
}
}
@@ -233,21 +228,21 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
if (show) {
nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
pfifo->chid(pfifo, engctx),
- nouveau_client_name(engctx), stat, type, mthd, data);
+ nvkm_client_name(engctx), stat, type, mthd, data);
}
spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
}
static int
-nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv31_mpeg_priv *priv;
int ret;
- ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -261,14 +256,14 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
int
-nv31_mpeg_init(struct nouveau_object *object)
+nv31_mpeg_init(struct nvkm_object *object)
{
- struct nouveau_engine *engine = nv_engine(object);
+ struct nvkm_engine *engine = nv_engine(object);
struct nv31_mpeg_priv *priv = (void *)object;
- struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvkm_fb *pfb = nvkm_fb(object);
int ret, i;
- ret = nouveau_mpeg_init(&priv->base);
+ ret = nvkm_mpeg_init(&priv->base);
if (ret)
return ret;
@@ -297,13 +292,13 @@ nv31_mpeg_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv31_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x31),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv31_mpeg_ctor,
- .dtor = _nouveau_mpeg_dtor,
+ .dtor = _nvkm_mpeg_dtor,
.init = nv31_mpeg_init,
- .fini = _nouveau_mpeg_fini,
+ .fini = _nvkm_mpeg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index d08629d0b6ad..782b796d7458 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,15 +1,13 @@
#ifndef __NV31_MPEG_H__
#define __NV31_MPEG_H__
-
#include <engine/mpeg.h>
struct nv31_mpeg_chan {
- struct nouveau_object base;
+ struct nvkm_object base;
};
struct nv31_mpeg_priv {
- struct nouveau_mpeg base;
+ struct nvkm_mpeg base;
struct nv31_mpeg_chan *chan;
};
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index bdb2f20ff7b1..9508bf9e140f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -21,25 +21,18 @@
*
* Authors: Ben Skeggs
*/
+#include "nv31.h"
-#include <core/os.h>
-#include <core/engctx.h>
-
-#include <subdev/fb.h>
-#include <subdev/timer.h>
#include <subdev/instmem.h>
-#include <engine/mpeg.h>
-#include <engine/mpeg/nv31.h>
-
/*******************************************************************************
* MPEG object classes
******************************************************************************/
static int
-nv40_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
+nv40_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
{
- struct nouveau_instmem *imem = nouveau_instmem(object);
+ struct nvkm_instmem *imem = nvkm_instmem(object);
struct nv31_mpeg_priv *priv = (void *)object->engine;
u32 inst = *(u32 *)arg << 4;
u32 dma0 = nv_ro32(imem, inst + 0);
@@ -75,7 +68,7 @@ nv40_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
return 0;
}
-static struct nouveau_omthds
+static struct nvkm_omthds
nv40_mpeg_omthds[] = {
{ 0x0190, 0x0190, nv40_mpeg_mthd_dma },
{ 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
@@ -83,7 +76,7 @@ nv40_mpeg_omthds[] = {
{}
};
-struct nouveau_oclass
+struct nvkm_oclass
nv40_mpeg_sclass[] = {
{ 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
{}
@@ -94,7 +87,7 @@ nv40_mpeg_sclass[] = {
******************************************************************************/
static void
-nv40_mpeg_intr(struct nouveau_subdev *subdev)
+nv40_mpeg_intr(struct nvkm_subdev *subdev)
{
struct nv31_mpeg_priv *priv = (void *)subdev;
u32 stat;
@@ -109,14 +102,14 @@ nv40_mpeg_intr(struct nouveau_subdev *subdev)
}
static int
-nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv31_mpeg_priv *priv;
int ret;
- ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -129,13 +122,13 @@ nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv40_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv40_mpeg_ctor,
- .dtor = _nouveau_mpeg_dtor,
+ .dtor = _nvkm_mpeg_dtor,
.init = nv31_mpeg_init,
- .fini = _nouveau_mpeg_fini,
+ .fini = _nvkm_mpeg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index 72c7f33fd29b..4720ac884468 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -21,25 +21,18 @@
*
* Authors: Ben Skeggs
*/
+#include <engine/mpeg.h>
-#include <core/os.h>
#include <core/client.h>
-#include <core/engctx.h>
#include <core/handle.h>
-
-#include <subdev/fb.h>
-#include <subdev/timer.h>
-#include <subdev/instmem.h>
-
#include <engine/fifo.h>
-#include <engine/mpeg.h>
struct nv44_mpeg_priv {
- struct nouveau_mpeg base;
+ struct nvkm_mpeg base;
};
struct nv44_mpeg_chan {
- struct nouveau_mpeg_chan base;
+ struct nvkm_mpeg_chan base;
};
/*******************************************************************************
@@ -47,17 +40,16 @@ struct nv44_mpeg_chan {
******************************************************************************/
static int
-nv44_mpeg_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv44_mpeg_context_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv44_mpeg_chan *chan;
int ret;
- ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
- 264 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 264 * 4,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -67,7 +59,7 @@ nv44_mpeg_context_ctor(struct nouveau_object *parent,
}
static int
-nv44_mpeg_context_fini(struct nouveau_object *object, bool suspend)
+nv44_mpeg_context_fini(struct nvkm_object *object, bool suspend)
{
struct nv44_mpeg_priv *priv = (void *)object->engine;
@@ -81,16 +73,16 @@ nv44_mpeg_context_fini(struct nouveau_object *object, bool suspend)
return 0;
}
-static struct nouveau_oclass
+static struct nvkm_oclass
nv44_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x44),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv44_mpeg_context_ctor,
- .dtor = _nouveau_mpeg_context_dtor,
- .init = _nouveau_mpeg_context_init,
+ .dtor = _nvkm_mpeg_context_dtor,
+ .init = _nvkm_mpeg_context_init,
.fini = nv44_mpeg_context_fini,
- .rd32 = _nouveau_mpeg_context_rd32,
- .wr32 = _nouveau_mpeg_context_wr32,
+ .rd32 = _nvkm_mpeg_context_rd32,
+ .wr32 = _nvkm_mpeg_context_wr32,
},
};
@@ -99,12 +91,12 @@ nv44_mpeg_cclass = {
******************************************************************************/
static void
-nv44_mpeg_intr(struct nouveau_subdev *subdev)
+nv44_mpeg_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- struct nouveau_handle *handle;
+ struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
+ struct nvkm_engine *engine = nv_engine(subdev);
+ struct nvkm_object *engctx;
+ struct nvkm_handle *handle;
struct nv44_mpeg_priv *priv = (void *)subdev;
u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
u32 stat = nv_rd32(priv, 0x00b100);
@@ -114,7 +106,7 @@ nv44_mpeg_intr(struct nouveau_subdev *subdev)
u32 show = stat;
int chid;
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x01000000) {
@@ -125,10 +117,10 @@ nv44_mpeg_intr(struct nouveau_subdev *subdev)
}
if (type == 0x00000010) {
- handle = nouveau_handle_get_class(engctx, 0x3174);
+ handle = nvkm_handle_get_class(engctx, 0x3174);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~0x01000000;
- nouveau_handle_put(handle);
+ nvkm_handle_put(handle);
}
}
@@ -138,15 +130,15 @@ nv44_mpeg_intr(struct nouveau_subdev *subdev)
if (show) {
nv_error(priv,
"ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst << 4, nouveau_client_name(engctx), stat,
+ chid, inst << 4, nvkm_client_name(engctx), stat,
type, mthd, data);
}
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
static void
-nv44_mpeg_me_intr(struct nouveau_subdev *subdev)
+nv44_mpeg_me_intr(struct nvkm_subdev *subdev)
{
struct nv44_mpeg_priv *priv = (void *)subdev;
u32 stat;
@@ -161,14 +153,14 @@ nv44_mpeg_me_intr(struct nouveau_subdev *subdev)
}
static int
-nv44_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv44_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv44_mpeg_priv *priv;
int ret;
- ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -181,13 +173,13 @@ nv44_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv44_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x44),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv44_mpeg_ctor,
- .dtor = _nouveau_mpeg_dtor,
+ .dtor = _nvkm_mpeg_dtor,
.init = nv31_mpeg_init,
- .fini = _nouveau_mpeg_fini,
+ .fini = _nvkm_mpeg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index cae33f86b11a..b3463f3739ce 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -21,22 +21,17 @@
*
* Authors: Ben Skeggs
*/
+#include <engine/mpeg.h>
-#include <core/os.h>
-#include <core/engctx.h>
-
-#include <subdev/vm.h>
#include <subdev/bar.h>
#include <subdev/timer.h>
-#include <engine/mpeg.h>
-
struct nv50_mpeg_priv {
- struct nouveau_mpeg base;
+ struct nvkm_mpeg base;
};
struct nv50_mpeg_chan {
- struct nouveau_mpeg_chan base;
+ struct nvkm_mpeg_chan base;
};
/*******************************************************************************
@@ -44,16 +39,16 @@ struct nv50_mpeg_chan {
******************************************************************************/
static int
-nv50_mpeg_object_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_mpeg_object_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_gpuobj *obj;
+ struct nvkm_gpuobj *obj;
int ret;
- ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
- 16, 16, 0, &obj);
+ ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
@@ -65,17 +60,17 @@ nv50_mpeg_object_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_ofuncs
+struct nvkm_ofuncs
nv50_mpeg_ofuncs = {
.ctor = nv50_mpeg_object_ctor,
- .dtor = _nouveau_gpuobj_dtor,
- .init = _nouveau_gpuobj_init,
- .fini = _nouveau_gpuobj_fini,
- .rd32 = _nouveau_gpuobj_rd32,
- .wr32 = _nouveau_gpuobj_wr32,
+ .dtor = _nvkm_gpuobj_dtor,
+ .init = _nvkm_gpuobj_init,
+ .fini = _nvkm_gpuobj_fini,
+ .rd32 = _nvkm_gpuobj_rd32,
+ .wr32 = _nvkm_gpuobj_wr32,
};
-static struct nouveau_oclass
+static struct nvkm_oclass
nv50_mpeg_sclass[] = {
{ 0x3174, &nv50_mpeg_ofuncs },
{}
@@ -86,17 +81,17 @@ nv50_mpeg_sclass[] = {
******************************************************************************/
int
-nv50_mpeg_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_mpeg_context_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvkm_bar *bar = nvkm_bar(parent);
struct nv50_mpeg_chan *chan;
int ret;
- ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
- 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
+ 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -107,16 +102,16 @@ nv50_mpeg_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
+static struct nvkm_oclass
nv50_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_mpeg_context_ctor,
- .dtor = _nouveau_mpeg_context_dtor,
- .init = _nouveau_mpeg_context_init,
- .fini = _nouveau_mpeg_context_fini,
- .rd32 = _nouveau_mpeg_context_rd32,
- .wr32 = _nouveau_mpeg_context_wr32,
+ .dtor = _nvkm_mpeg_context_dtor,
+ .init = _nvkm_mpeg_context_init,
+ .fini = _nvkm_mpeg_context_fini,
+ .rd32 = _nvkm_mpeg_context_rd32,
+ .wr32 = _nvkm_mpeg_context_wr32,
},
};
@@ -125,7 +120,7 @@ nv50_mpeg_cclass = {
******************************************************************************/
void
-nv50_mpeg_intr(struct nouveau_subdev *subdev)
+nv50_mpeg_intr(struct nvkm_subdev *subdev)
{
struct nv50_mpeg_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, 0x00b100);
@@ -152,7 +147,7 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
}
static void
-nv50_vpe_intr(struct nouveau_subdev *subdev)
+nv50_vpe_intr(struct nvkm_subdev *subdev)
{
struct nv50_mpeg_priv *priv = (void *)subdev;
@@ -167,14 +162,14 @@ nv50_vpe_intr(struct nouveau_subdev *subdev)
}
static int
-nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_mpeg_priv *priv;
int ret;
- ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -187,12 +182,12 @@ nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
int
-nv50_mpeg_init(struct nouveau_object *object)
+nv50_mpeg_init(struct nvkm_object *object)
{
struct nv50_mpeg_priv *priv = (void *)object;
int ret;
- ret = nouveau_mpeg_init(&priv->base);
+ ret = nvkm_mpeg_init(&priv->base);
if (ret)
return ret;
@@ -218,13 +213,13 @@ nv50_mpeg_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv50_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_mpeg_ctor,
- .dtor = _nouveau_mpeg_dtor,
+ .dtor = _nvkm_mpeg_dtor,
.init = nv50_mpeg_init,
- .fini = _nouveau_mpeg_fini,
+ .fini = _nvkm_mpeg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
new file mode 100644
index 000000000000..c59c83a67315
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
@@ -0,0 +1,3 @@
+nvkm-y += nvkm/engine/mspdec/g98.o
+nvkm-y += nvkm/engine/mspdec/gf100.o
+nvkm-y += nvkm/engine/mspdec/gk104.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index fc9ae0ff1ef5..2174577793a4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -21,53 +21,52 @@
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-
+#include <engine/mspdec.h>
#include <engine/falcon.h>
-#include <engine/vp.h>
-struct nv98_vp_priv {
- struct nouveau_falcon base;
+struct g98_mspdec_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
- * VP object classes
+ * MSPDEC object classes
******************************************************************************/
-static struct nouveau_oclass
-nv98_vp_sclass[] = {
- { 0x88b2, &nouveau_object_ofuncs },
- { 0x85b2, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+g98_mspdec_sclass[] = {
+ { 0x88b2, &nvkm_object_ofuncs },
+ { 0x85b2, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PVP context
+ * PMSPDEC context
******************************************************************************/
-static struct nouveau_oclass
-nv98_vp_cclass = {
- .handle = NV_ENGCTX(VP, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+g98_mspdec_cclass = {
+ .handle = NV_ENGCTX(MSPDEC, 0x98),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PVP engine/subdev functions
+ * PMSPDEC engine/subdev functions
******************************************************************************/
static int
-nv98_vp_init(struct nouveau_object *object)
+g98_mspdec_init(struct nvkm_object *object)
{
- struct nv98_vp_priv *priv = (void *)object;
+ struct g98_mspdec_priv *priv = (void *)object;
int ret;
- ret = nouveau_falcon_init(&priv->base);
+ ret = nvkm_falcon_init(&priv->base);
if (ret)
return ret;
@@ -77,34 +76,34 @@ nv98_vp_init(struct nouveau_object *object)
}
static int
-nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g98_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv98_vp_priv *priv;
+ struct g98_mspdec_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
- "PVP", "vp", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PMSPDEC", "mspdec", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x01020000;
- nv_engine(priv)->cclass = &nv98_vp_cclass;
- nv_engine(priv)->sclass = nv98_vp_sclass;
+ nv_engine(priv)->cclass = &g98_mspdec_cclass;
+ nv_engine(priv)->sclass = g98_mspdec_sclass;
return 0;
}
-struct nouveau_oclass
-nv98_vp_oclass = {
- .handle = NV_ENGINE(VP, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_vp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nv98_vp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+g98_mspdec_oclass = {
+ .handle = NV_ENGINE(MSPDEC, 0x98),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g98_mspdec_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = g98_mspdec_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index ac1f62aace72..c814a5f65eb0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -21,52 +21,51 @@
*
* Authors: Maarten Lankhorst
*/
-
+#include <engine/mspdec.h>
#include <engine/falcon.h>
-#include <engine/vp.h>
-struct nvc0_vp_priv {
- struct nouveau_falcon base;
+struct gf100_mspdec_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
- * VP object classes
+ * MSPDEC object classes
******************************************************************************/
-static struct nouveau_oclass
-nvc0_vp_sclass[] = {
- { 0x90b2, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+gf100_mspdec_sclass[] = {
+ { 0x90b2, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PVP context
+ * PMSPDEC context
******************************************************************************/
-static struct nouveau_oclass
-nvc0_vp_cclass = {
- .handle = NV_ENGCTX(VP, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+gf100_mspdec_cclass = {
+ .handle = NV_ENGCTX(MSPDEC, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PVP engine/subdev functions
+ * PMSPDEC engine/subdev functions
******************************************************************************/
static int
-nvc0_vp_init(struct nouveau_object *object)
+gf100_mspdec_init(struct nvkm_object *object)
{
- struct nvc0_vp_priv *priv = (void *)object;
+ struct gf100_mspdec_priv *priv = (void *)object;
int ret;
- ret = nouveau_falcon_init(&priv->base);
+ ret = nvkm_falcon_init(&priv->base);
if (ret)
return ret;
@@ -76,35 +75,35 @@ nvc0_vp_init(struct nouveau_object *object)
}
static int
-nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_vp_priv *priv;
+ struct gf100_mspdec_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
- "PVP", "vp", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PMSPDEC", "mspdec", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00020000;
- nv_subdev(priv)->intr = nouveau_falcon_intr;
- nv_engine(priv)->cclass = &nvc0_vp_cclass;
- nv_engine(priv)->sclass = nvc0_vp_sclass;
+ nv_subdev(priv)->intr = nvkm_falcon_intr;
+ nv_engine(priv)->cclass = &gf100_mspdec_cclass;
+ nv_engine(priv)->sclass = gf100_mspdec_sclass;
return 0;
}
-struct nouveau_oclass
-nvc0_vp_oclass = {
- .handle = NV_ENGINE(VP, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_vp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nvc0_vp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+gf100_mspdec_oclass = {
+ .handle = NV_ENGINE(MSPDEC, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_mspdec_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = gf100_mspdec_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index d4c3108479c9..979920650dbd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -21,52 +21,51 @@
*
* Authors: Ben Skeggs
*/
-
+#include <engine/mspdec.h>
#include <engine/falcon.h>
-#include <engine/vp.h>
-struct nve0_vp_priv {
- struct nouveau_falcon base;
+struct gk104_mspdec_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
- * VP object classes
+ * MSPDEC object classes
******************************************************************************/
-static struct nouveau_oclass
-nve0_vp_sclass[] = {
- { 0x95b2, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+gk104_mspdec_sclass[] = {
+ { 0x95b2, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PVP context
+ * PMSPDEC context
******************************************************************************/
-static struct nouveau_oclass
-nve0_vp_cclass = {
- .handle = NV_ENGCTX(VP, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+gk104_mspdec_cclass = {
+ .handle = NV_ENGCTX(MSPDEC, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PVP engine/subdev functions
+ * PMSPDEC engine/subdev functions
******************************************************************************/
static int
-nve0_vp_init(struct nouveau_object *object)
+gk104_mspdec_init(struct nvkm_object *object)
{
- struct nve0_vp_priv *priv = (void *)object;
+ struct gk104_mspdec_priv *priv = (void *)object;
int ret;
- ret = nouveau_falcon_init(&priv->base);
+ ret = nvkm_falcon_init(&priv->base);
if (ret)
return ret;
@@ -76,35 +75,35 @@ nve0_vp_init(struct nouveau_object *object)
}
static int
-nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nve0_vp_priv *priv;
+ struct gk104_mspdec_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
- "PVP", "vp", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PMSPDEC", "mspdec", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00020000;
- nv_subdev(priv)->intr = nouveau_falcon_intr;
- nv_engine(priv)->cclass = &nve0_vp_cclass;
- nv_engine(priv)->sclass = nve0_vp_sclass;
+ nv_subdev(priv)->intr = nvkm_falcon_intr;
+ nv_engine(priv)->cclass = &gk104_mspdec_cclass;
+ nv_engine(priv)->sclass = gk104_mspdec_sclass;
return 0;
}
-struct nouveau_oclass
-nve0_vp_oclass = {
- .handle = NV_ENGINE(VP, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_vp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nve0_vp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+gk104_mspdec_oclass = {
+ .handle = NV_ENGINE(MSPDEC, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_mspdec_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = gk104_mspdec_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
new file mode 100644
index 000000000000..4576a9eee39d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
@@ -0,0 +1,2 @@
+nvkm-y += nvkm/engine/msppp/g98.o
+nvkm-y += nvkm/engine/msppp/gf100.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index 13bf31c40aa1..7a602a2dec94 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -21,53 +21,52 @@
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-
+#include <engine/msppp.h>
#include <engine/falcon.h>
-#include <engine/ppp.h>
-struct nv98_ppp_priv {
- struct nouveau_falcon base;
+struct g98_msppp_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
- * PPP object classes
+ * MSPPP object classes
******************************************************************************/
-static struct nouveau_oclass
-nv98_ppp_sclass[] = {
- { 0x88b3, &nouveau_object_ofuncs },
- { 0x85b3, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+g98_msppp_sclass[] = {
+ { 0x88b3, &nvkm_object_ofuncs },
+ { 0x85b3, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PPPP context
+ * PMSPPP context
******************************************************************************/
-static struct nouveau_oclass
-nv98_ppp_cclass = {
- .handle = NV_ENGCTX(PPP, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+g98_msppp_cclass = {
+ .handle = NV_ENGCTX(MSPPP, 0x98),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PPPP engine/subdev functions
+ * PMSPPP engine/subdev functions
******************************************************************************/
static int
-nv98_ppp_init(struct nouveau_object *object)
+g98_msppp_init(struct nvkm_object *object)
{
- struct nv98_ppp_priv *priv = (void *)object;
+ struct g98_msppp_priv *priv = (void *)object;
int ret;
- ret = nouveau_falcon_init(&priv->base);
+ ret = nvkm_falcon_init(&priv->base);
if (ret)
return ret;
@@ -77,34 +76,34 @@ nv98_ppp_init(struct nouveau_object *object)
}
static int
-nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g98_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv98_ppp_priv *priv;
+ struct g98_msppp_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
- "PPPP", "ppp", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
+ "PMSPPP", "msppp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00400002;
- nv_engine(priv)->cclass = &nv98_ppp_cclass;
- nv_engine(priv)->sclass = nv98_ppp_sclass;
+ nv_engine(priv)->cclass = &g98_msppp_cclass;
+ nv_engine(priv)->sclass = g98_msppp_sclass;
return 0;
}
-struct nouveau_oclass
-nv98_ppp_oclass = {
- .handle = NV_ENGINE(PPP, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_ppp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nv98_ppp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+g98_msppp_oclass = {
+ .handle = NV_ENGINE(MSPPP, 0x98),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g98_msppp_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = g98_msppp_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 73719aaa62d6..6047baee1f75 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -21,52 +21,51 @@
*
* Authors: Maarten Lankhorst
*/
-
+#include <engine/msppp.h>
#include <engine/falcon.h>
-#include <engine/ppp.h>
-struct nvc0_ppp_priv {
- struct nouveau_falcon base;
+struct gf100_msppp_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
- * PPP object classes
+ * MSPPP object classes
******************************************************************************/
-static struct nouveau_oclass
-nvc0_ppp_sclass[] = {
- { 0x90b3, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+gf100_msppp_sclass[] = {
+ { 0x90b3, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PPPP context
+ * PMSPPP context
******************************************************************************/
-static struct nouveau_oclass
-nvc0_ppp_cclass = {
- .handle = NV_ENGCTX(PPP, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+gf100_msppp_cclass = {
+ .handle = NV_ENGCTX(MSPPP, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PPPP engine/subdev functions
+ * PMSPPP engine/subdev functions
******************************************************************************/
static int
-nvc0_ppp_init(struct nouveau_object *object)
+gf100_msppp_init(struct nvkm_object *object)
{
- struct nvc0_ppp_priv *priv = (void *)object;
+ struct gf100_msppp_priv *priv = (void *)object;
int ret;
- ret = nouveau_falcon_init(&priv->base);
+ ret = nvkm_falcon_init(&priv->base);
if (ret)
return ret;
@@ -76,35 +75,35 @@ nvc0_ppp_init(struct nouveau_object *object)
}
static int
-nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_ppp_priv *priv;
+ struct gf100_msppp_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
- "PPPP", "ppp", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
+ "PMSPPP", "msppp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000002;
- nv_subdev(priv)->intr = nouveau_falcon_intr;
- nv_engine(priv)->cclass = &nvc0_ppp_cclass;
- nv_engine(priv)->sclass = nvc0_ppp_sclass;
+ nv_subdev(priv)->intr = nvkm_falcon_intr;
+ nv_engine(priv)->cclass = &gf100_msppp_cclass;
+ nv_engine(priv)->sclass = gf100_msppp_sclass;
return 0;
}
-struct nouveau_oclass
-nvc0_ppp_oclass = {
- .handle = NV_ENGINE(PPP, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_ppp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nvc0_ppp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+gf100_msppp_oclass = {
+ .handle = NV_ENGINE(MSPPP, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_msppp_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = gf100_msppp_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
new file mode 100644
index 000000000000..0c9811009e28
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
@@ -0,0 +1,3 @@
+nvkm-y += nvkm/engine/msvld/g98.o
+nvkm-y += nvkm/engine/msvld/gf100.o
+nvkm-y += nvkm/engine/msvld/gk104.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index 6b089e022fd2..c8a6b4ef52a1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -21,54 +21,53 @@
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-
+#include <engine/msvld.h>
#include <engine/falcon.h>
-#include <engine/bsp.h>
-struct nv98_bsp_priv {
- struct nouveau_falcon base;
+struct g98_msvld_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
- * BSP object classes
+ * MSVLD object classes
******************************************************************************/
-static struct nouveau_oclass
-nv98_bsp_sclass[] = {
- { 0x88b1, &nouveau_object_ofuncs },
- { 0x85b1, &nouveau_object_ofuncs },
- { 0x86b1, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+g98_msvld_sclass[] = {
+ { 0x88b1, &nvkm_object_ofuncs },
+ { 0x85b1, &nvkm_object_ofuncs },
+ { 0x86b1, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PBSP context
+ * PMSVLD context
******************************************************************************/
-static struct nouveau_oclass
-nv98_bsp_cclass = {
- .handle = NV_ENGCTX(BSP, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+g98_msvld_cclass = {
+ .handle = NV_ENGCTX(MSVLD, 0x98),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PBSP engine/subdev functions
+ * PMSVLD engine/subdev functions
******************************************************************************/
static int
-nv98_bsp_init(struct nouveau_object *object)
+g98_msvld_init(struct nvkm_object *object)
{
- struct nv98_bsp_priv *priv = (void *)object;
+ struct g98_msvld_priv *priv = (void *)object;
int ret;
- ret = nouveau_falcon_init(&priv->base);
+ ret = nvkm_falcon_init(&priv->base);
if (ret)
return ret;
@@ -78,34 +77,34 @@ nv98_bsp_init(struct nouveau_object *object)
}
static int
-nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g98_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv98_bsp_priv *priv;
+ struct g98_msvld_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
- "PBSP", "bsp", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PMSVLD", "msvld", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
- nv_engine(priv)->cclass = &nv98_bsp_cclass;
- nv_engine(priv)->sclass = nv98_bsp_sclass;
+ nv_engine(priv)->cclass = &g98_msvld_cclass;
+ nv_engine(priv)->sclass = g98_msvld_sclass;
return 0;
}
-struct nouveau_oclass
-nv98_bsp_oclass = {
- .handle = NV_ENGINE(BSP, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_bsp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nv98_bsp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+g98_msvld_oclass = {
+ .handle = NV_ENGINE(MSVLD, 0x98),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g98_msvld_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = g98_msvld_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index ce860de43e61..b8d1e0f521ef 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -21,52 +21,51 @@
*
* Authors: Maarten Lankhorst
*/
-
+#include <engine/msvld.h>
#include <engine/falcon.h>
-#include <engine/bsp.h>
-struct nvc0_bsp_priv {
- struct nouveau_falcon base;
+struct gf100_msvld_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
- * BSP object classes
+ * MSVLD object classes
******************************************************************************/
-static struct nouveau_oclass
-nvc0_bsp_sclass[] = {
- { 0x90b1, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+gf100_msvld_sclass[] = {
+ { 0x90b1, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PBSP context
+ * PMSVLD context
******************************************************************************/
-static struct nouveau_oclass
-nvc0_bsp_cclass = {
- .handle = NV_ENGCTX(BSP, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+gf100_msvld_cclass = {
+ .handle = NV_ENGCTX(MSVLD, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PBSP engine/subdev functions
+ * PMSVLD engine/subdev functions
******************************************************************************/
static int
-nvc0_bsp_init(struct nouveau_object *object)
+gf100_msvld_init(struct nvkm_object *object)
{
- struct nvc0_bsp_priv *priv = (void *)object;
+ struct gf100_msvld_priv *priv = (void *)object;
int ret;
- ret = nouveau_falcon_init(&priv->base);
+ ret = nvkm_falcon_init(&priv->base);
if (ret)
return ret;
@@ -76,35 +75,35 @@ nvc0_bsp_init(struct nouveau_object *object)
}
static int
-nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_bsp_priv *priv;
+ struct gf100_msvld_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
- "PBSP", "bsp", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PMSVLD", "msvld", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00008000;
- nv_subdev(priv)->intr = nouveau_falcon_intr;
- nv_engine(priv)->cclass = &nvc0_bsp_cclass;
- nv_engine(priv)->sclass = nvc0_bsp_sclass;
+ nv_subdev(priv)->intr = nvkm_falcon_intr;
+ nv_engine(priv)->cclass = &gf100_msvld_cclass;
+ nv_engine(priv)->sclass = gf100_msvld_sclass;
return 0;
}
-struct nouveau_oclass
-nvc0_bsp_oclass = {
- .handle = NV_ENGINE(BSP, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_bsp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nvc0_bsp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+gf100_msvld_oclass = {
+ .handle = NV_ENGINE(MSVLD, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_msvld_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = gf100_msvld_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index ba6aeca0285e..a0b0927834df 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -21,52 +21,51 @@
*
* Authors: Ben Skeggs
*/
-
+#include <engine/msvld.h>
#include <engine/falcon.h>
-#include <engine/bsp.h>
-struct nve0_bsp_priv {
- struct nouveau_falcon base;
+struct gk104_msvld_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
- * BSP object classes
+ * MSVLD object classes
******************************************************************************/
-static struct nouveau_oclass
-nve0_bsp_sclass[] = {
- { 0x95b1, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+gk104_msvld_sclass[] = {
+ { 0x95b1, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PBSP context
+ * PMSVLD context
******************************************************************************/
-static struct nouveau_oclass
-nve0_bsp_cclass = {
- .handle = NV_ENGCTX(BSP, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+gk104_msvld_cclass = {
+ .handle = NV_ENGCTX(MSVLD, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PBSP engine/subdev functions
+ * PMSVLD engine/subdev functions
******************************************************************************/
static int
-nve0_bsp_init(struct nouveau_object *object)
+gk104_msvld_init(struct nvkm_object *object)
{
- struct nve0_bsp_priv *priv = (void *)object;
+ struct gk104_msvld_priv *priv = (void *)object;
int ret;
- ret = nouveau_falcon_init(&priv->base);
+ ret = nvkm_falcon_init(&priv->base);
if (ret)
return ret;
@@ -76,35 +75,35 @@ nve0_bsp_init(struct nouveau_object *object)
}
static int
-nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nve0_bsp_priv *priv;
+ struct gk104_msvld_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
- "PBSP", "bsp", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PMSVLD", "msvld", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00008000;
- nv_subdev(priv)->intr = nouveau_falcon_intr;
- nv_engine(priv)->cclass = &nve0_bsp_cclass;
- nv_engine(priv)->sclass = nve0_bsp_sclass;
+ nv_subdev(priv)->intr = nvkm_falcon_intr;
+ nv_engine(priv)->cclass = &gk104_msvld_cclass;
+ nv_engine(priv)->sclass = gk104_msvld_sclass;
return 0;
}
-struct nouveau_oclass
-nve0_bsp_oclass = {
- .handle = NV_ENGINE(BSP, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_bsp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nve0_bsp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+gk104_msvld_oclass = {
+ .handle = NV_ENGINE(MSVLD, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_msvld_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = gk104_msvld_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
new file mode 100644
index 000000000000..413b6091e256
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
@@ -0,0 +1,9 @@
+nvkm-y += nvkm/engine/pm/base.o
+nvkm-y += nvkm/engine/pm/daemon.o
+nvkm-y += nvkm/engine/pm/nv40.o
+nvkm-y += nvkm/engine/pm/nv50.o
+nvkm-y += nvkm/engine/pm/g84.o
+nvkm-y += nvkm/engine/pm/gt215.o
+nvkm-y += nvkm/engine/pm/gf100.o
+nvkm-y += nvkm/engine/pm/gk104.o
+nvkm-y += nvkm/engine/pm/gk110.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 63013812f7c9..2006c445938d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -21,22 +21,21 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <core/client.h>
+#include <core/device.h>
#include <core/option.h>
-#include <nvif/unpack.h>
+
#include <nvif/class.h>
#include <nvif/ioctl.h>
-
-#include <subdev/clock.h>
-
-#include "priv.h"
+#include <nvif/unpack.h>
#define QUAD_MASK 0x0f
#define QUAD_FREE 0x01
-static struct nouveau_perfsig *
-nouveau_perfsig_find_(struct nouveau_perfdom *dom, const char *name, u32 size)
+static struct nvkm_perfsig *
+nvkm_perfsig_find_(struct nvkm_perfdom *dom, const char *name, u32 size)
{
char path[64];
int i;
@@ -58,16 +57,16 @@ nouveau_perfsig_find_(struct nouveau_perfdom *dom, const char *name, u32 size)
return NULL;
}
-struct nouveau_perfsig *
-nouveau_perfsig_find(struct nouveau_perfmon *ppm, const char *name, u32 size,
- struct nouveau_perfdom **pdom)
+struct nvkm_perfsig *
+nvkm_perfsig_find(struct nvkm_pm *ppm, const char *name, u32 size,
+ struct nvkm_perfdom **pdom)
{
- struct nouveau_perfdom *dom = *pdom;
- struct nouveau_perfsig *sig;
+ struct nvkm_perfdom *dom = *pdom;
+ struct nvkm_perfsig *sig;
if (dom == NULL) {
list_for_each_entry(dom, &ppm->domains, head) {
- sig = nouveau_perfsig_find_(dom, name, size);
+ sig = nvkm_perfsig_find_(dom, name, size);
if (sig) {
*pdom = dom;
return sig;
@@ -77,17 +76,17 @@ nouveau_perfsig_find(struct nouveau_perfmon *ppm, const char *name, u32 size,
return NULL;
}
- return nouveau_perfsig_find_(dom, name, size);
+ return nvkm_perfsig_find_(dom, name, size);
}
-struct nouveau_perfctr *
-nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
- struct nouveau_perfdom **pdom)
+struct nvkm_perfctr *
+nvkm_perfsig_wrap(struct nvkm_pm *ppm, const char *name,
+ struct nvkm_perfdom **pdom)
{
- struct nouveau_perfsig *sig;
- struct nouveau_perfctr *ctr;
+ struct nvkm_perfsig *sig;
+ struct nvkm_perfctr *ctr;
- sig = nouveau_perfsig_find(ppm, name, strlen(name), pdom);
+ sig = nvkm_perfsig_find(ppm, name, strlen(name), pdom);
if (!sig)
return NULL;
@@ -104,16 +103,16 @@ nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
* Perfmon object classes
******************************************************************************/
static int
-nouveau_perfctr_query(struct nouveau_object *object, void *data, u32 size)
+nvkm_perfctr_query(struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_perfctr_query_v0 v0;
} *args = data;
- struct nouveau_device *device = nv_device(object);
- struct nouveau_perfmon *ppm = (void *)object->engine;
- struct nouveau_perfdom *dom = NULL, *chk;
- const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
- const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
+ struct nvkm_device *device = nv_device(object);
+ struct nvkm_pm *ppm = (void *)object->engine;
+ struct nvkm_perfdom *dom = NULL, *chk;
+ const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
+ const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
const char *name;
int tmp = 0, di, si;
int ret;
@@ -163,14 +162,14 @@ nouveau_perfctr_query(struct nouveau_object *object, void *data, u32 size)
}
static int
-nouveau_perfctr_sample(struct nouveau_object *object, void *data, u32 size)
+nvkm_perfctr_sample(struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_perfctr_sample none;
} *args = data;
- struct nouveau_perfmon *ppm = (void *)object->engine;
- struct nouveau_perfctr *ctr, *tmp;
- struct nouveau_perfdom *dom;
+ struct nvkm_pm *ppm = (void *)object->engine;
+ struct nvkm_perfctr *ctr, *tmp;
+ struct nvkm_perfdom *dom;
int ret;
nv_ioctl(object, "perfctr sample size %d\n", size);
@@ -187,7 +186,7 @@ nouveau_perfctr_sample(struct nouveau_object *object, void *data, u32 size)
tmp = NULL;
while (!list_empty(&dom->list)) {
ctr = list_first_entry(&dom->list,
- typeof(*ctr), head);
+ typeof(*ctr), head);
if (ctr->slot < 0) break;
if ( tmp && tmp == ctr) break;
if (!tmp) tmp = ctr;
@@ -216,12 +215,12 @@ nouveau_perfctr_sample(struct nouveau_object *object, void *data, u32 size)
}
static int
-nouveau_perfctr_read(struct nouveau_object *object, void *data, u32 size)
+nvkm_perfctr_read(struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_perfctr_read_v0 v0;
} *args = data;
- struct nouveau_perfctr *ctr = (void *)object;
+ struct nvkm_perfctr *ctr = (void *)object;
int ret;
nv_ioctl(object, "perfctr read size %d\n", size);
@@ -239,16 +238,15 @@ nouveau_perfctr_read(struct nouveau_object *object, void *data, u32 size)
}
static int
-nouveau_perfctr_mthd(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nvkm_perfctr_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
switch (mthd) {
case NVIF_PERFCTR_V0_QUERY:
- return nouveau_perfctr_query(object, data, size);
+ return nvkm_perfctr_query(object, data, size);
case NVIF_PERFCTR_V0_SAMPLE:
- return nouveau_perfctr_sample(object, data, size);
+ return nvkm_perfctr_sample(object, data, size);
case NVIF_PERFCTR_V0_READ:
- return nouveau_perfctr_read(object, data, size);
+ return nvkm_perfctr_read(object, data, size);
default:
break;
}
@@ -256,27 +254,26 @@ nouveau_perfctr_mthd(struct nouveau_object *object, u32 mthd,
}
static void
-nouveau_perfctr_dtor(struct nouveau_object *object)
+nvkm_perfctr_dtor(struct nvkm_object *object)
{
- struct nouveau_perfctr *ctr = (void *)object;
+ struct nvkm_perfctr *ctr = (void *)object;
if (ctr->head.next)
list_del(&ctr->head);
- nouveau_object_destroy(&ctr->base);
+ nvkm_object_destroy(&ctr->base);
}
static int
-nouveau_perfctr_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvkm_perfctr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
struct nvif_perfctr_v0 v0;
} *args = data;
- struct nouveau_perfmon *ppm = (void *)engine;
- struct nouveau_perfdom *dom = NULL;
- struct nouveau_perfsig *sig[4] = {};
- struct nouveau_perfctr *ctr;
+ struct nvkm_pm *ppm = (void *)engine;
+ struct nvkm_perfdom *dom = NULL;
+ struct nvkm_perfsig *sig[4] = {};
+ struct nvkm_perfctr *ctr;
int ret, i;
nv_ioctl(parent, "create perfctr size %d\n", size);
@@ -287,15 +284,15 @@ nouveau_perfctr_ctor(struct nouveau_object *parent,
return ret;
for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) {
- sig[i] = nouveau_perfsig_find(ppm, args->v0.name[i],
- strnlen(args->v0.name[i],
- sizeof(args->v0.name[i])),
- &dom);
+ sig[i] = nvkm_perfsig_find(ppm, args->v0.name[i],
+ strnlen(args->v0.name[i],
+ sizeof(args->v0.name[i])),
+ &dom);
if (!sig[i])
return -EINVAL;
}
- ret = nouveau_object_create(parent, engine, oclass, 0, &ctr);
+ ret = nvkm_object_create(parent, engine, oclass, 0, &ctr);
*pobject = nv_object(ctr);
if (ret)
return ret;
@@ -311,19 +308,19 @@ nouveau_perfctr_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_ofuncs
-nouveau_perfctr_ofuncs = {
- .ctor = nouveau_perfctr_ctor,
- .dtor = nouveau_perfctr_dtor,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
- .mthd = nouveau_perfctr_mthd,
+static struct nvkm_ofuncs
+nvkm_perfctr_ofuncs = {
+ .ctor = nvkm_perfctr_ctor,
+ .dtor = nvkm_perfctr_dtor,
+ .init = nvkm_object_init,
+ .fini = nvkm_object_fini,
+ .mthd = nvkm_perfctr_mthd,
};
-struct nouveau_oclass
-nouveau_perfmon_sclass[] = {
+struct nvkm_oclass
+nvkm_pm_sclass[] = {
{ .handle = NVIF_IOCTL_NEW_V0_PERFCTR,
- .ofuncs = &nouveau_perfctr_ofuncs,
+ .ofuncs = &nvkm_perfctr_ofuncs,
},
{},
};
@@ -332,27 +329,25 @@ nouveau_perfmon_sclass[] = {
* PPM context
******************************************************************************/
static void
-nouveau_perfctx_dtor(struct nouveau_object *object)
+nvkm_perfctx_dtor(struct nvkm_object *object)
{
- struct nouveau_perfmon *ppm = (void *)object->engine;
+ struct nvkm_pm *ppm = (void *)object->engine;
mutex_lock(&nv_subdev(ppm)->mutex);
- nouveau_engctx_destroy(&ppm->context->base);
+ nvkm_engctx_destroy(&ppm->context->base);
ppm->context = NULL;
mutex_unlock(&nv_subdev(ppm)->mutex);
}
static int
-nouveau_perfctx_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_perfmon *ppm = (void *)engine;
- struct nouveau_perfctx *ctx;
+ struct nvkm_pm *ppm = (void *)engine;
+ struct nvkm_perfctx *ctx;
int ret;
- ret = nouveau_engctx_create(parent, engine, oclass, NULL,
- 0, 0, 0, &ctx);
+ ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0, 0, 0, &ctx);
*pobject = nv_object(ctx);
if (ret)
return ret;
@@ -368,14 +363,14 @@ nouveau_perfctx_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_oclass
-nouveau_perfmon_cclass = {
- .handle = NV_ENGCTX(PERFMON, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nouveau_perfctx_ctor,
- .dtor = nouveau_perfctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
+struct nvkm_oclass
+nvkm_pm_cclass = {
+ .handle = NV_ENGCTX(PM, 0x00),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nvkm_perfctx_ctor,
+ .dtor = nvkm_perfctx_dtor,
+ .init = _nvkm_engctx_init,
+ .fini = _nvkm_engctx_fini,
},
};
@@ -383,13 +378,13 @@ nouveau_perfmon_cclass = {
* PPM engine/subdev functions
******************************************************************************/
int
-nouveau_perfdom_new(struct nouveau_perfmon *ppm, const char *name, u32 mask,
- u32 base, u32 size_unit, u32 size_domain,
- const struct nouveau_specdom *spec)
+nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
+ u32 base, u32 size_unit, u32 size_domain,
+ const struct nvkm_specdom *spec)
{
- const struct nouveau_specdom *sdom;
- const struct nouveau_specsig *ssig;
- struct nouveau_perfdom *dom;
+ const struct nvkm_specdom *sdom;
+ const struct nvkm_specsig *ssig;
+ struct nvkm_perfdom *dom;
int i;
for (i = 0; i == 0 || mask; i++) {
@@ -436,44 +431,42 @@ nouveau_perfdom_new(struct nouveau_perfmon *ppm, const char *name, u32 mask,
}
int
-_nouveau_perfmon_fini(struct nouveau_object *object, bool suspend)
+_nvkm_pm_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_perfmon *ppm = (void *)object;
- return nouveau_engine_fini(&ppm->base, suspend);
+ struct nvkm_pm *ppm = (void *)object;
+ return nvkm_engine_fini(&ppm->base, suspend);
}
int
-_nouveau_perfmon_init(struct nouveau_object *object)
+_nvkm_pm_init(struct nvkm_object *object)
{
- struct nouveau_perfmon *ppm = (void *)object;
- return nouveau_engine_init(&ppm->base);
+ struct nvkm_pm *ppm = (void *)object;
+ return nvkm_engine_init(&ppm->base);
}
void
-_nouveau_perfmon_dtor(struct nouveau_object *object)
+_nvkm_pm_dtor(struct nvkm_object *object)
{
- struct nouveau_perfmon *ppm = (void *)object;
- struct nouveau_perfdom *dom, *tmp;
+ struct nvkm_pm *ppm = (void *)object;
+ struct nvkm_perfdom *dom, *tmp;
list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
list_del(&dom->head);
kfree(dom);
}
- nouveau_engine_destroy(&ppm->base);
+ nvkm_engine_destroy(&ppm->base);
}
int
-nouveau_perfmon_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int length, void **pobject)
+nvkm_pm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_perfmon *ppm;
+ struct nvkm_pm *ppm;
int ret;
- ret = nouveau_engine_create_(parent, engine, oclass, true, "PPM",
- "perfmon", length, pobject);
+ ret = nvkm_engine_create_(parent, engine, oclass, true, "PPM",
+ "pm", length, pobject);
ppm = *pobject;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
index 50696cc7b7d7..a7a5f3a3c91b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
@@ -21,12 +21,11 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
static void
-pwr_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
- struct nouveau_perfctr *ctr)
+pwr_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+ struct nvkm_perfctr *ctr)
{
u32 mask = 0x00000000;
u32 ctrl = 0x00000001;
@@ -41,15 +40,15 @@ pwr_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
}
static void
-pwr_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
- struct nouveau_perfctr *ctr)
+pwr_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+ struct nvkm_perfctr *ctr)
{
ctr->ctr = ppm->pwr[ctr->slot];
ctr->clk = ppm->pwr[ppm->last];
}
static void
-pwr_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+pwr_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
{
int i;
@@ -59,16 +58,16 @@ pwr_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
}
}
-static const struct nouveau_funcdom
+static const struct nvkm_funcdom
pwr_perfctr_func = {
.init = pwr_perfctr_init,
.read = pwr_perfctr_read,
.next = pwr_perfctr_next,
};
-const struct nouveau_specdom
-nva3_perfmon_pwr[] = {
- { 0x20, (const struct nouveau_specsig[]) {
+const struct nvkm_specdom
+gt215_pm_pwr[] = {
+ { 0x20, (const struct nvkm_specsig[]) {
{ 0x00, "pwr_gr_idle" },
{ 0x04, "pwr_bsp_idle" },
{ 0x05, "pwr_vp_idle" },
@@ -79,9 +78,9 @@ nva3_perfmon_pwr[] = {
{}
};
-const struct nouveau_specdom
-nvc0_perfmon_pwr[] = {
- { 0x20, (const struct nouveau_specsig[]) {
+const struct nvkm_specdom
+gf100_pm_pwr[] = {
+ { 0x20, (const struct nvkm_specsig[]) {
{ 0x00, "pwr_gr_idle" },
{ 0x04, "pwr_bsp_idle" },
{ 0x05, "pwr_vp_idle" },
@@ -93,9 +92,9 @@ nvc0_perfmon_pwr[] = {
{}
};
-const struct nouveau_specdom
-nve0_perfmon_pwr[] = {
- { 0x20, (const struct nouveau_specsig[]) {
+const struct nvkm_specdom
+gk104_pm_pwr[] = {
+ { 0x20, (const struct nvkm_specsig[]) {
{ 0x00, "pwr_gr_idle" },
{ 0x04, "pwr_bsp_idle" },
{ 0x05, "pwr_vp_idle" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
new file mode 100644
index 000000000000..d54c6705ba17
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv40.h"
+
+static const struct nvkm_specdom
+g84_pm[] = {
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ {}
+};
+
+struct nvkm_oclass *
+g84_pm_oclass = &(struct nv40_pm_oclass) {
+ .base.handle = NV_ENGINE(PM, 0x84),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv40_pm_ctor,
+ .dtor = _nvkm_pm_dtor,
+ .init = _nvkm_pm_init,
+ .fini = _nvkm_pm_fini,
+ },
+ .doms = g84_pm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index 74b241042502..008fed73dd82 100644
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -21,42 +21,29 @@
*
* Authors: Ben Skeggs
*/
+#include "gf100.h"
-#include "nvc0.h"
-
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static const struct nouveau_specdom
-nvc0_perfmon_hub[] = {
+static const struct nvkm_specdom
+gf100_pm_hub[] = {
{}
};
-static const struct nouveau_specdom
-nvc0_perfmon_gpc[] = {
+static const struct nvkm_specdom
+gf100_pm_gpc[] = {
{}
};
-static const struct nouveau_specdom
-nvc0_perfmon_part[] = {
+static const struct nvkm_specdom
+gf100_pm_part[] = {
{}
};
static void
-nvc0_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
- struct nouveau_perfctr *ctr)
+gf100_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+ struct nvkm_perfctr *ctr)
{
- struct nvc0_perfmon_priv *priv = (void *)ppm;
- struct nvc0_perfmon_cntr *cntr = (void *)ctr;
+ struct gf100_pm_priv *priv = (void *)ppm;
+ struct gf100_pm_cntr *cntr = (void *)ctr;
u32 log = ctr->logic_op;
u32 src = 0x00000000;
int i;
@@ -71,11 +58,11 @@ nvc0_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
}
static void
-nvc0_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
- struct nouveau_perfctr *ctr)
+gf100_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+ struct nvkm_perfctr *ctr)
{
- struct nvc0_perfmon_priv *priv = (void *)ppm;
- struct nvc0_perfmon_cntr *cntr = (void *)ctr;
+ struct gf100_pm_priv *priv = (void *)ppm;
+ struct gf100_pm_cntr *cntr = (void *)ctr;
switch (cntr->base.slot) {
case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
@@ -87,51 +74,50 @@ nvc0_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
}
static void
-nvc0_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+gf100_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
{
- struct nvc0_perfmon_priv *priv = (void *)ppm;
+ struct gf100_pm_priv *priv = (void *)ppm;
nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
}
-const struct nouveau_funcdom
-nvc0_perfctr_func = {
- .init = nvc0_perfctr_init,
- .read = nvc0_perfctr_read,
- .next = nvc0_perfctr_next,
+const struct nvkm_funcdom
+gf100_perfctr_func = {
+ .init = gf100_perfctr_init,
+ .read = gf100_perfctr_read,
+ .next = gf100_perfctr_next,
};
int
-nvc0_perfmon_fini(struct nouveau_object *object, bool suspend)
+gf100_pm_fini(struct nvkm_object *object, bool suspend)
{
- struct nvc0_perfmon_priv *priv = (void *)object;
+ struct gf100_pm_priv *priv = (void *)object;
nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
- return nouveau_perfmon_fini(&priv->base, suspend);
+ return nvkm_pm_fini(&priv->base, suspend);
}
static int
-nvc0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_perfmon_priv *priv;
+ struct gf100_pm_priv *priv;
u32 mask;
int ret;
- ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+ ret = nvkm_pm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
- nvc0_perfmon_pwr);
+ ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gf100_pm_pwr);
if (ret)
return ret;
/* HUB */
- ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
- nvc0_perfmon_hub);
+ ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
+ gf100_pm_hub);
if (ret)
return ret;
@@ -140,8 +126,8 @@ nvc0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
mask &= ~nv_rd32(priv, 0x022504);
mask &= ~nv_rd32(priv, 0x022584);
- ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
- 0x1000, 0x200, nvc0_perfmon_gpc);
+ ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
+ 0x1000, 0x200, gf100_pm_gpc);
if (ret)
return ret;
@@ -150,24 +136,24 @@ nvc0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
mask &= ~nv_rd32(priv, 0x022548);
mask &= ~nv_rd32(priv, 0x0225c8);
- ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
- 0x1000, 0x200, nvc0_perfmon_part);
+ ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
+ 0x1000, 0x200, gf100_pm_part);
if (ret)
return ret;
- nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
- nv_engine(priv)->sclass = nouveau_perfmon_sclass;
+ nv_engine(priv)->cclass = &nvkm_pm_cclass;
+ nv_engine(priv)->sclass = nvkm_pm_sclass;
priv->base.last = 7;
return 0;
}
-struct nouveau_oclass
-nvc0_perfmon_oclass = {
- .handle = NV_ENGINE(PERFMON, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_perfmon_ctor,
- .dtor = _nouveau_perfmon_dtor,
- .init = _nouveau_perfmon_init,
- .fini = nvc0_perfmon_fini,
+struct nvkm_oclass
+gf100_pm_oclass = {
+ .handle = NV_ENGINE(PM, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_pm_ctor,
+ .dtor = _nvkm_pm_dtor,
+ .init = _nvkm_pm_init,
+ .fini = gf100_pm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
new file mode 100644
index 000000000000..6a01fc7fec6f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -0,0 +1,15 @@
+#ifndef __NVKM_PM_NVC0_H__
+#define __NVKM_PM_NVC0_H__
+#include "priv.h"
+
+struct gf100_pm_priv {
+ struct nvkm_pm base;
+};
+
+struct gf100_pm_cntr {
+ struct nvkm_perfctr base;
+};
+
+extern const struct nvkm_funcdom gf100_perfctr_func;
+int gf100_pm_fini(struct nvkm_object *, bool);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
new file mode 100644
index 000000000000..75b9ff3d1a2c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gf100.h"
+
+static const struct nvkm_specdom
+gk104_pm_hub[] = {
+ { 0x60, (const struct nvkm_specsig[]) {
+ { 0x47, "hub00_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ { 0x40, (const struct nvkm_specsig[]) {
+ { 0x27, "hub01_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ { 0x60, (const struct nvkm_specsig[]) {
+ { 0x47, "hub02_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ { 0x60, (const struct nvkm_specsig[]) {
+ { 0x47, "hub03_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ { 0x40, (const struct nvkm_specsig[]) {
+ { 0x03, "host_mmio_rd" },
+ { 0x27, "hub04_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ { 0x60, (const struct nvkm_specsig[]) {
+ { 0x47, "hub05_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ { 0xc0, (const struct nvkm_specsig[]) {
+ { 0x74, "host_fb_rd3x" },
+ { 0x75, "host_fb_rd3x_2" },
+ { 0xa7, "hub06_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ { 0x60, (const struct nvkm_specsig[]) {
+ { 0x47, "hub07_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ {}
+};
+
+static const struct nvkm_specdom
+gk104_pm_gpc[] = {
+ { 0xe0, (const struct nvkm_specsig[]) {
+ { 0xc7, "gpc00_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ {}
+};
+
+static const struct nvkm_specdom
+gk104_pm_part[] = {
+ { 0x60, (const struct nvkm_specsig[]) {
+ { 0x47, "part00_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ { 0x60, (const struct nvkm_specsig[]) {
+ { 0x47, "part01_user_0" },
+ {}
+ }, &gf100_perfctr_func },
+ {}
+};
+
+static int
+gk104_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gf100_pm_priv *priv;
+ u32 mask;
+ int ret;
+
+ ret = nvkm_pm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ /* PDAEMON */
+ ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
+ if (ret)
+ return ret;
+
+ /* HUB */
+ ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
+ gk104_pm_hub);
+ if (ret)
+ return ret;
+
+ /* GPC */
+ mask = (1 << nv_rd32(priv, 0x022430)) - 1;
+ mask &= ~nv_rd32(priv, 0x022504);
+ mask &= ~nv_rd32(priv, 0x022584);
+
+ ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
+ 0x1000, 0x200, gk104_pm_gpc);
+ if (ret)
+ return ret;
+
+ /* PART */
+ mask = (1 << nv_rd32(priv, 0x022438)) - 1;
+ mask &= ~nv_rd32(priv, 0x022548);
+ mask &= ~nv_rd32(priv, 0x0225c8);
+
+ ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
+ 0x1000, 0x200, gk104_pm_part);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nvkm_pm_cclass;
+ nv_engine(priv)->sclass = nvkm_pm_sclass;
+ priv->base.last = 7;
+ return 0;
+}
+
+struct nvkm_oclass
+gk104_pm_oclass = {
+ .handle = NV_ENGINE(PM, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_pm_ctor,
+ .dtor = _nvkm_pm_dtor,
+ .init = _nvkm_pm_init,
+ .fini = gf100_pm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
new file mode 100644
index 000000000000..6820176e5f78
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gf100.h"
+
+static int
+gk110_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gf100_pm_priv *priv;
+ int ret;
+
+ ret = nvkm_pm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nvkm_pm_cclass;
+ nv_engine(priv)->sclass = nvkm_pm_sclass;
+ return 0;
+}
+
+struct nvkm_oclass
+gk110_pm_oclass = {
+ .handle = NV_ENGINE(PM, 0xf0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk110_pm_ctor,
+ .dtor = _nvkm_pm_dtor,
+ .init = _nvkm_pm_init,
+ .fini = gf100_pm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index 9232c7fc6253..d065bfc59bbf 100644
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -21,58 +21,63 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv40.h"
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static const struct nouveau_specdom
-nv84_perfmon[] = {
- { 0x20, (const struct nouveau_specsig[]) {
+static const struct nvkm_specdom
+gt215_pm[] = {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{}
};
-struct nouveau_oclass *
-nv84_perfmon_oclass = &(struct nv40_perfmon_oclass) {
- .base.handle = NV_ENGINE(PERFMON, 0x84),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv40_perfmon_ctor,
- .dtor = _nouveau_perfmon_dtor,
- .init = _nouveau_perfmon_init,
- .fini = _nouveau_perfmon_fini,
+static int
+gt215_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **object)
+{
+ int ret = nv40_pm_ctor(parent, engine, oclass, data, size, object);
+ if (ret == 0) {
+ struct nv40_pm_priv *priv = (void *)*object;
+ ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+ gt215_pm_pwr);
+ if (ret)
+ return ret;
+
+ priv->base.last = 3;
+ }
+ return ret;
+}
+
+struct nvkm_oclass *
+gt215_pm_oclass = &(struct nv40_pm_oclass) {
+ .base.handle = NV_ENGINE(PM, 0xa3),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gt215_pm_ctor,
+ .dtor = _nvkm_pm_dtor,
+ .init = _nvkm_pm_init,
+ .fini = _nvkm_pm_fini,
},
- .doms = nv84_perfmon,
+ .doms = gt215_pm,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index b2a10785adb1..ff22f06b22b8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -21,27 +21,14 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv40.h"
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
static void
-nv40_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
- struct nouveau_perfctr *ctr)
+nv40_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+ struct nvkm_perfctr *ctr)
{
- struct nv40_perfmon_priv *priv = (void *)ppm;
- struct nv40_perfmon_cntr *cntr = (void *)ctr;
+ struct nv40_pm_priv *priv = (void *)ppm;
+ struct nv40_pm_cntr *cntr = (void *)ctr;
u32 log = ctr->logic_op;
u32 src = 0x00000000;
int i;
@@ -55,11 +42,11 @@ nv40_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
}
static void
-nv40_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
- struct nouveau_perfctr *ctr)
+nv40_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+ struct nvkm_perfctr *ctr)
{
- struct nv40_perfmon_priv *priv = (void *)ppm;
- struct nv40_perfmon_cntr *cntr = (void *)ctr;
+ struct nv40_pm_priv *priv = (void *)ppm;
+ struct nv40_pm_cntr *cntr = (void *)ctr;
switch (cntr->base.slot) {
case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
@@ -71,73 +58,73 @@ nv40_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
}
static void
-nv40_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+nv40_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
{
- struct nv40_perfmon_priv *priv = (void *)ppm;
+ struct nv40_pm_priv *priv = (void *)ppm;
if (priv->sequence != ppm->sequence) {
nv_wr32(priv, 0x400084, 0x00000020);
priv->sequence = ppm->sequence;
}
}
-const struct nouveau_funcdom
+const struct nvkm_funcdom
nv40_perfctr_func = {
.init = nv40_perfctr_init,
.read = nv40_perfctr_read,
.next = nv40_perfctr_next,
};
-static const struct nouveau_specdom
-nv40_perfmon[] = {
- { 0x20, (const struct nouveau_specsig[]) {
+static const struct nvkm_specdom
+nv40_pm[] = {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nouveau_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{}
};
int
-nv40_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv40_perfmon_oclass *mclass = (void *)oclass;
- struct nv40_perfmon_priv *priv;
+ struct nv40_pm_oclass *mclass = (void *)oclass;
+ struct nv40_pm_priv *priv;
int ret;
- ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+ ret = nvkm_pm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
+ ret = nvkm_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
if (ret)
return ret;
- nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
- nv_engine(priv)->sclass = nouveau_perfmon_sclass;
+ nv_engine(priv)->cclass = &nvkm_pm_cclass;
+ nv_engine(priv)->sclass = nvkm_pm_sclass;
return 0;
}
-struct nouveau_oclass *
-nv40_perfmon_oclass = &(struct nv40_perfmon_oclass) {
- .base.handle = NV_ENGINE(PERFMON, 0x40),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv40_perfmon_ctor,
- .dtor = _nouveau_perfmon_dtor,
- .init = _nouveau_perfmon_init,
- .fini = _nouveau_perfmon_fini,
+struct nvkm_oclass *
+nv40_pm_oclass = &(struct nv40_pm_oclass) {
+ .base.handle = NV_ENGINE(PM, 0x40),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv40_pm_ctor,
+ .dtor = _nvkm_pm_dtor,
+ .init = _nvkm_pm_init,
+ .fini = _nvkm_pm_fini,
},
- .doms = nv40_perfmon,
+ .doms = nv40_pm,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
new file mode 100644
index 000000000000..2338e150420e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -0,0 +1,24 @@
+#ifndef __NVKM_PM_NV40_H__
+#define __NVKM_PM_NV40_H__
+#include "priv.h"
+
+struct nv40_pm_oclass {
+ struct nvkm_oclass base;
+ const struct nvkm_specdom *doms;
+};
+
+struct nv40_pm_priv {
+ struct nvkm_pm base;
+ u32 sequence;
+};
+
+int nv40_pm_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **pobject);
+
+struct nv40_pm_cntr {
+ struct nvkm_perfctr base;
+};
+
+extern const struct nvkm_funcdom nv40_perfctr_func;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
new file mode 100644
index 000000000000..6af83b5d1b11
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv40.h"
+
+static const struct nvkm_specdom
+nv50_pm[] = {
+ { 0x040, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x100, (const struct nvkm_specsig[]) {
+ { 0xc8, "gr_idle" },
+ {}
+ }, &nv40_perfctr_func },
+ { 0x100, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x020, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x040, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ {}
+};
+
+struct nvkm_oclass *
+nv50_pm_oclass = &(struct nv40_pm_oclass) {
+ .base.handle = NV_ENGINE(PM, 0x50),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv40_pm_ctor,
+ .dtor = _nvkm_pm_dtor,
+ .init = _nvkm_pm_init,
+ .fini = _nvkm_pm_fini,
+ },
+ .doms = nv50_pm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
new file mode 100644
index 000000000000..1e6eff2a6d79
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -0,0 +1,90 @@
+#ifndef __NVKM_PM_PRIV_H__
+#define __NVKM_PM_PRIV_H__
+#include <engine/pm.h>
+
+struct nvkm_perfctr {
+ struct nvkm_object base;
+ struct list_head head;
+ struct nvkm_perfsig *signal[4];
+ int slot;
+ u32 logic_op;
+ u32 clk;
+ u32 ctr;
+};
+
+extern struct nvkm_oclass nvkm_pm_sclass[];
+
+#include <core/engctx.h>
+
+struct nvkm_perfctx {
+ struct nvkm_engctx base;
+};
+
+extern struct nvkm_oclass nvkm_pm_cclass;
+
+struct nvkm_specsig {
+ u8 signal;
+ const char *name;
+};
+
+struct nvkm_perfsig {
+ const char *name;
+};
+
+struct nvkm_perfdom;
+struct nvkm_perfctr *
+nvkm_perfsig_wrap(struct nvkm_pm *, const char *, struct nvkm_perfdom **);
+
+struct nvkm_specdom {
+ u16 signal_nr;
+ const struct nvkm_specsig *signal;
+ const struct nvkm_funcdom *func;
+};
+
+extern const struct nvkm_specdom gt215_pm_pwr[];
+extern const struct nvkm_specdom gf100_pm_pwr[];
+extern const struct nvkm_specdom gk104_pm_pwr[];
+
+struct nvkm_perfdom {
+ struct list_head head;
+ struct list_head list;
+ const struct nvkm_funcdom *func;
+ char name[32];
+ u32 addr;
+ u8 quad;
+ u32 signal_nr;
+ struct nvkm_perfsig signal[];
+};
+
+struct nvkm_funcdom {
+ void (*init)(struct nvkm_pm *, struct nvkm_perfdom *,
+ struct nvkm_perfctr *);
+ void (*read)(struct nvkm_pm *, struct nvkm_perfdom *,
+ struct nvkm_perfctr *);
+ void (*next)(struct nvkm_pm *, struct nvkm_perfdom *);
+};
+
+int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
+ const struct nvkm_specdom *);
+
+#define nvkm_pm_create(p,e,o,d) \
+ nvkm_pm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_pm_dtor(p) ({ \
+ struct nvkm_pm *c = (p); \
+ _nvkm_pm_dtor(nv_object(c)); \
+})
+#define nvkm_pm_init(p) ({ \
+ struct nvkm_pm *c = (p); \
+ _nvkm_pm_init(nv_object(c)); \
+})
+#define nvkm_pm_fini(p,s) ({ \
+ struct nvkm_pm *c = (p); \
+ _nvkm_pm_fini(nv_object(c), (s)); \
+})
+
+int nvkm_pm_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void _nvkm_pm_dtor(struct nvkm_object *);
+int _nvkm_pm_init(struct nvkm_object *);
+int _nvkm_pm_fini(struct nvkm_object *, bool);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild
new file mode 100644
index 000000000000..552d40a4641f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild
@@ -0,0 +1 @@
+nvkm-y += nvkm/engine/sec/g98.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
index 629da02dc352..06ee06071104 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
@@ -1,5 +1,5 @@
/*
- * fuc microcode for nv98 pcrypt engine
+ * fuc microcode for g98 psec engine
* Copyright (C) 2010 Marcin Kościelnicki
*
* This program is free software; you can redistribute it and/or modify
@@ -17,7 +17,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-.section #nv98_pcrypt_data
+.section #g98_psec_data
ctx_dma:
ctx_dma_query: .b32 0
@@ -70,31 +70,31 @@ engine_cmd_dtable:
.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
.b32 #ctx_dst_address_high + 0x20000 ~0xff
.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
-.b32 #crypt_cmd_mode + 0x00000 ~0xf
-.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
+.b32 #sec_cmd_mode + 0x00000 ~0xf
+.b32 #sec_cmd_length + 0x10000 ~0x0ffffff0
.equ #engine_cmd_max 0xce
.align 4
-crypt_dtable:
-.b16 #crypt_copy_prep #crypt_do_inout
-.b16 #crypt_store_prep #crypt_do_out
-.b16 #crypt_ecb_e_prep #crypt_do_inout
-.b16 #crypt_ecb_d_prep #crypt_do_inout
-.b16 #crypt_cbc_e_prep #crypt_do_inout
-.b16 #crypt_cbc_d_prep #crypt_do_inout
-.b16 #crypt_pcbc_e_prep #crypt_do_inout
-.b16 #crypt_pcbc_d_prep #crypt_do_inout
-.b16 #crypt_cfb_e_prep #crypt_do_inout
-.b16 #crypt_cfb_d_prep #crypt_do_inout
-.b16 #crypt_ofb_prep #crypt_do_inout
-.b16 #crypt_ctr_prep #crypt_do_inout
-.b16 #crypt_cbc_mac_prep #crypt_do_in
-.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
-.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
+sec_dtable:
+.b16 #sec_copy_prep #sec_do_inout
+.b16 #sec_store_prep #sec_do_out
+.b16 #sec_ecb_e_prep #sec_do_inout
+.b16 #sec_ecb_d_prep #sec_do_inout
+.b16 #sec_cbc_e_prep #sec_do_inout
+.b16 #sec_cbc_d_prep #sec_do_inout
+.b16 #sec_pcbc_e_prep #sec_do_inout
+.b16 #sec_pcbc_d_prep #sec_do_inout
+.b16 #sec_cfb_e_prep #sec_do_inout
+.b16 #sec_cfb_d_prep #sec_do_inout
+.b16 #sec_ofb_prep #sec_do_inout
+.b16 #sec_ctr_prep #sec_do_inout
+.b16 #sec_cbc_mac_prep #sec_do_in
+.b16 #sec_cmac_finish_complete_prep #sec_do_in
+.b16 #sec_cmac_finish_partial_prep #sec_do_in
.align 0x100
-.section #nv98_pcrypt_code
+.section #g98_psec_code
// $r0 is always set to 0 in our code - this allows some space savings.
clear b32 $r0
@@ -417,23 +417,23 @@ cmd_wrcache_flush:
iowr I[$r2] $r3
ret
-crypt_cmd_mode:
+sec_cmd_mode:
// if >= 0xf, INVALID_ENUM
bset $flags $p1
or $r2 2
cmpu b32 $r3 0xf
- bra nc #crypt_cmd_mode_return
+ bra nc #sec_cmd_mode_return
bclr $flags $p1
st b32 D[$r0 + #ctx_mode] $r3
- crypt_cmd_mode_return:
+ sec_cmd_mode_return:
ret
-crypt_cmd_length:
+sec_cmd_length:
// nop if length == 0
cmpu b32 $r3 0
- bra e #crypt_cmd_mode_return
+ bra e #sec_cmd_mode_return
// init key, IV
cxset 3
@@ -471,11 +471,11 @@ crypt_cmd_length:
shl b32 $r8 2
// run prep
- ld b16 $r9 D[$r8 + #crypt_dtable]
+ ld b16 $r9 D[$r8 + #sec_dtable]
call $r9
// do it
- ld b16 $r9 D[$r8 + #crypt_dtable + 2]
+ ld b16 $r9 D[$r8 + #sec_dtable + 2]
call $r9
cxset 1
xdwait
@@ -509,25 +509,25 @@ crypt_cmd_length:
ret
-crypt_copy_prep:
+sec_copy_prep:
cs0begin 2
cxsin $c0
cxsout $c0
ret
-crypt_store_prep:
+sec_store_prep:
cs0begin 1
cxsout $c6
ret
-crypt_ecb_e_prep:
+sec_ecb_e_prep:
cs0begin 3
cxsin $c0
cenc $c0 $c0
cxsout $c0
ret
-crypt_ecb_d_prep:
+sec_ecb_d_prep:
ckexp $c7 $c7
cs0begin 3
cxsin $c0
@@ -535,7 +535,7 @@ crypt_ecb_d_prep:
cxsout $c0
ret
-crypt_cbc_e_prep:
+sec_cbc_e_prep:
cs0begin 4
cxsin $c0
cxor $c6 $c0
@@ -543,7 +543,7 @@ crypt_cbc_e_prep:
cxsout $c6
ret
-crypt_cbc_d_prep:
+sec_cbc_d_prep:
ckexp $c7 $c7
cs0begin 5
cmov $c2 $c6
@@ -553,7 +553,7 @@ crypt_cbc_d_prep:
cxsout $c0
ret
-crypt_pcbc_e_prep:
+sec_pcbc_e_prep:
cs0begin 5
cxsin $c0
cxor $c6 $c0
@@ -562,7 +562,7 @@ crypt_pcbc_e_prep:
cxor $c6 $c0
ret
-crypt_pcbc_d_prep:
+sec_pcbc_d_prep:
ckexp $c7 $c7
cs0begin 5
cxsin $c0
@@ -572,7 +572,7 @@ crypt_pcbc_d_prep:
cxor $c6 $c0
ret
-crypt_cfb_e_prep:
+sec_cfb_e_prep:
cs0begin 4
cenc $c6 $c6
cxsin $c0
@@ -580,7 +580,7 @@ crypt_cfb_e_prep:
cxsout $c6
ret
-crypt_cfb_d_prep:
+sec_cfb_d_prep:
cs0begin 4
cenc $c0 $c6
cxsin $c6
@@ -588,7 +588,7 @@ crypt_cfb_d_prep:
cxsout $c0
ret
-crypt_ofb_prep:
+sec_ofb_prep:
cs0begin 4
cenc $c6 $c6
cxsin $c0
@@ -596,7 +596,7 @@ crypt_ofb_prep:
cxsout $c0
ret
-crypt_ctr_prep:
+sec_ctr_prep:
cs0begin 5
cenc $c1 $c6
cadd $c6 1
@@ -605,14 +605,14 @@ crypt_ctr_prep:
cxsout $c0
ret
-crypt_cbc_mac_prep:
+sec_cbc_mac_prep:
cs0begin 3
cxsin $c0
cxor $c6 $c0
cenc $c6 $c6
ret
-crypt_cmac_finish_complete_prep:
+sec_cmac_finish_complete_prep:
cs0begin 7
cxsin $c0
cxor $c6 $c0
@@ -623,7 +623,7 @@ crypt_cmac_finish_complete_prep:
cenc $c6 $c6
ret
-crypt_cmac_finish_partial_prep:
+sec_cmac_finish_partial_prep:
cs0begin 8
cxsin $c0
cxor $c6 $c0
@@ -636,12 +636,12 @@ crypt_cmac_finish_partial_prep:
ret
// TODO
-crypt_do_in:
+sec_do_in:
add b32 $r3 $r5
mov $xdbase $r4
mov $r9 #swap
sethi $r9 0x20000
- crypt_do_in_loop:
+ sec_do_in_loop:
xdld $r5 $r9
xdwait
cxset 0x22
@@ -650,17 +650,17 @@ crypt_do_in:
xdwait
add b32 $r5 0x10
cmpu b32 $r5 $r3
- bra ne #crypt_do_in_loop
+ bra ne #sec_do_in_loop
cxset 1
xdwait
ret
-crypt_do_out:
+sec_do_out:
add b32 $r3 $r7
mov $xdbase $r6
mov $r9 #swap
sethi $r9 0x20000
- crypt_do_out_loop:
+ sec_do_out_loop:
cs0exec 1
cxset 0x61
xdld $r7 $r9
@@ -669,14 +669,14 @@ crypt_do_out:
xdwait
add b32 $r7 0x10
cmpu b32 $r7 $r3
- bra ne #crypt_do_out_loop
+ bra ne #sec_do_out_loop
ret
-crypt_do_inout:
+sec_do_inout:
add b32 $r3 $r5
mov $r9 #swap
sethi $r9 0x20000
- crypt_do_inout_loop:
+ sec_do_inout_loop:
mov $xdbase $r4
xdld $r5 $r9
xdwait
@@ -692,7 +692,7 @@ crypt_do_inout:
add b32 $r5 0x10
add b32 $r7 0x10
cmpu b32 $r5 $r3
- bra ne #crypt_do_inout_loop
+ bra ne #sec_do_inout_loop
ret
.align 0x100
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 38676c74e6e0..5d65c4fbb087 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t nv98_pcrypt_data[] = {
+uint32_t g98_psec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
0x00000000,
@@ -103,7 +103,7 @@ uint32_t nv98_pcrypt_data[] = {
0xfffffff0,
0x00010285,
0xf000000f,
-/* 0x0150: crypt_dtable */
+/* 0x0150: sec_dtable */
0x04db0321,
0x04b1032f,
0x04db0339,
@@ -150,7 +150,7 @@ uint32_t nv98_pcrypt_data[] = {
0x00000000,
};
-uint32_t nv98_pcrypt_code[] = {
+uint32_t g98_psec_code[] = {
0x17f004bd,
0x0010fe35,
0xf10004fe,
@@ -329,14 +329,14 @@ uint32_t nv98_pcrypt_code[] = {
0xbd220027,
0x0133f034,
0xf80023d0,
-/* 0x0271: crypt_cmd_mode */
+/* 0x0271: sec_cmd_mode */
0x0131f400,
0xb00225f0,
0x18f40f34,
0x0132f409,
-/* 0x0283: crypt_cmd_mode_return */
+/* 0x0283: sec_cmd_mode_return */
0xf80d0380,
-/* 0x0285: crypt_cmd_length */
+/* 0x0285: sec_cmd_length */
0x0034b000,
0xf4fb0bf4,
0x47f0033c,
@@ -376,33 +376,33 @@ uint32_t nv98_pcrypt_code[] = {
0xf05047f0,
0x04fa0643,
0xf803f805,
-/* 0x0321: crypt_copy_prep */
+/* 0x0321: sec_copy_prep */
0x203cf500,
0x003cf594,
0x003cf588,
-/* 0x032f: crypt_store_prep */
+/* 0x032f: sec_store_prep */
0xf500f88c,
0xf594103c,
0xf88c063c,
-/* 0x0339: crypt_ecb_e_prep */
+/* 0x0339: sec_ecb_e_prep */
0x303cf500,
0x003cf594,
0x003cf588,
0x003cf5d0,
-/* 0x034b: crypt_ecb_d_prep */
+/* 0x034b: sec_ecb_d_prep */
0xf500f88c,
0xf5c8773c,
0xf594303c,
0xf588003c,
0xf5d4003c,
0xf88c003c,
-/* 0x0361: crypt_cbc_e_prep */
+/* 0x0361: sec_cbc_e_prep */
0x403cf500,
0x003cf594,
0x063cf588,
0x663cf5ac,
0x063cf5d0,
-/* 0x0377: crypt_cbc_d_prep */
+/* 0x0377: sec_cbc_d_prep */
0xf500f88c,
0xf5c8773c,
0xf594503c,
@@ -411,14 +411,14 @@ uint32_t nv98_pcrypt_code[] = {
0xf5d4603c,
0xf5ac203c,
0xf88c003c,
-/* 0x0395: crypt_pcbc_e_prep */
+/* 0x0395: sec_pcbc_e_prep */
0x503cf500,
0x003cf594,
0x063cf588,
0x663cf5ac,
0x063cf5d0,
0x063cf58c,
-/* 0x03af: crypt_pcbc_d_prep */
+/* 0x03af: sec_pcbc_d_prep */
0xf500f8ac,
0xf5c8773c,
0xf594503c,
@@ -427,26 +427,26 @@ uint32_t nv98_pcrypt_code[] = {
0xf5ac163c,
0xf58c063c,
0xf8ac063c,
-/* 0x03cd: crypt_cfb_e_prep */
+/* 0x03cd: sec_cfb_e_prep */
0x403cf500,
0x663cf594,
0x003cf5d0,
0x063cf588,
0x063cf5ac,
-/* 0x03e3: crypt_cfb_d_prep */
+/* 0x03e3: sec_cfb_d_prep */
0xf500f88c,
0xf594403c,
0xf5d0603c,
0xf588063c,
0xf5ac603c,
0xf88c003c,
-/* 0x03f9: crypt_ofb_prep */
+/* 0x03f9: sec_ofb_prep */
0x403cf500,
0x663cf594,
0x003cf5d0,
0x603cf588,
0x003cf5ac,
-/* 0x040f: crypt_ctr_prep */
+/* 0x040f: sec_ctr_prep */
0xf500f88c,
0xf594503c,
0xf5d0613c,
@@ -454,12 +454,12 @@ uint32_t nv98_pcrypt_code[] = {
0xf588003c,
0xf5ac103c,
0xf88c003c,
-/* 0x0429: crypt_cbc_mac_prep */
+/* 0x0429: sec_cbc_mac_prep */
0x303cf500,
0x003cf594,
0x063cf588,
0x663cf5ac,
-/* 0x043b: crypt_cmac_finish_complete_prep */
+/* 0x043b: sec_cmac_finish_complete_prep */
0xf500f8d0,
0xf594703c,
0xf588003c,
@@ -469,7 +469,7 @@ uint32_t nv98_pcrypt_code[] = {
0xf5bc003c,
0xf5ac063c,
0xf8d0663c,
-/* 0x045d: crypt_cmac_finish_partial_prep */
+/* 0x045d: sec_cmac_finish_partial_prep */
0x803cf500,
0x003cf594,
0x063cf588,
@@ -479,12 +479,12 @@ uint32_t nv98_pcrypt_code[] = {
0x003cf5bc,
0x063cf5bc,
0x663cf5ac,
-/* 0x0483: crypt_do_in */
+/* 0x0483: sec_do_in */
0xbb00f8d0,
0x47fe0035,
0x8097f100,
0x0293f000,
-/* 0x0490: crypt_do_in_loop */
+/* 0x0490: sec_do_in_loop */
0xf80559fa,
0x223cf403,
0xf50609fa,
@@ -493,11 +493,11 @@ uint32_t nv98_pcrypt_code[] = {
0xf40453b8,
0x3cf4e91b,
0xf803f801,
-/* 0x04b1: crypt_do_out */
+/* 0x04b1: sec_do_out */
0x0037bb00,
0xf10067fe,
0xf0008097,
-/* 0x04be: crypt_do_out_loop */
+/* 0x04be: sec_do_out_loop */
0x3cf50293,
0x3cf49810,
0x0579fa61,
@@ -505,11 +505,11 @@ uint32_t nv98_pcrypt_code[] = {
0x03f8013c,
0xb81070b6,
0x1bf40473,
-/* 0x04db: crypt_do_inout */
+/* 0x04db: sec_do_inout */
0xbb00f8e8,
0x97f10035,
0x93f00080,
-/* 0x04e5: crypt_do_inout_loop */
+/* 0x04e5: sec_do_inout_loop */
0x0047fe02,
0xf80559fa,
0x213cf403,
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 5571c09534cb..9d5c1b8b1f8c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -21,57 +21,50 @@
*
* Authors: Ben Skeggs
*/
+#include <engine/sec.h>
+#include <engine/falcon.h>
+#include "fuc/g98.fuc0s.h"
#include <core/client.h>
-#include <core/os.h>
#include <core/enum.h>
-#include <core/engctx.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-#include <engine/falcon.h>
#include <engine/fifo.h>
-#include <engine/crypt.h>
-
-#include "fuc/nv98.fuc.h"
-struct nv98_crypt_priv {
- struct nouveau_falcon base;
+struct g98_sec_priv {
+ struct nvkm_falcon base;
};
/*******************************************************************************
* Crypt object classes
******************************************************************************/
-static struct nouveau_oclass
-nv98_crypt_sclass[] = {
- { 0x88b4, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+g98_sec_sclass[] = {
+ { 0x88b4, &nvkm_object_ofuncs },
{},
};
/*******************************************************************************
- * PCRYPT context
+ * PSEC context
******************************************************************************/
-static struct nouveau_oclass
-nv98_crypt_cclass = {
- .handle = NV_ENGCTX(CRYPT, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+static struct nvkm_oclass
+g98_sec_cclass = {
+ .handle = NV_ENGCTX(SEC, 0x98),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_falcon_context_ctor,
+ .dtor = _nvkm_falcon_context_dtor,
+ .init = _nvkm_falcon_context_init,
+ .fini = _nvkm_falcon_context_fini,
+ .rd32 = _nvkm_falcon_context_rd32,
+ .wr32 = _nvkm_falcon_context_wr32,
},
};
/*******************************************************************************
- * PCRYPT engine/subdev functions
+ * PSEC engine/subdev functions
******************************************************************************/
-static const struct nouveau_enum nv98_crypt_isr_error_name[] = {
+static const struct nvkm_enum g98_sec_isr_error_name[] = {
{ 0x0000, "ILLEGAL_MTHD" },
{ 0x0001, "INVALID_BITFIELD" },
{ 0x0002, "INVALID_ENUM" },
@@ -80,12 +73,12 @@ static const struct nouveau_enum nv98_crypt_isr_error_name[] = {
};
static void
-nv98_crypt_intr(struct nouveau_subdev *subdev)
+g98_sec_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- struct nv98_crypt_priv *priv = (void *)subdev;
+ struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
+ struct nvkm_engine *engine = nv_engine(subdev);
+ struct nvkm_object *engctx;
+ struct g98_sec_priv *priv = (void *)subdev;
u32 disp = nv_rd32(priv, 0x08701c);
u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
@@ -96,14 +89,14 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
u32 data = nv_rd32(priv, 0x087044);
int chid;
- engctx = nouveau_engctx_get(engine, inst);
+ engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000040) {
nv_error(priv, "DISPATCH_ERROR [");
- nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+ nvkm_enum_print(g98_sec_isr_error_name, ssta);
pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, nouveau_client_name(engctx),
+ chid, (u64)inst << 12, nvkm_client_name(engctx),
subc, mthd, data);
nv_wr32(priv, 0x087004, 0x00000040);
stat &= ~0x00000040;
@@ -114,43 +107,43 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x087004, stat);
}
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
}
static int
-nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g98_sec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv98_crypt_priv *priv;
+ struct g98_sec_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
- "PCRYPT", "crypt", &priv);
+ ret = nvkm_falcon_create(parent, engine, oclass, 0x087000, true,
+ "PSEC", "sec", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00004000;
- nv_subdev(priv)->intr = nv98_crypt_intr;
- nv_engine(priv)->cclass = &nv98_crypt_cclass;
- nv_engine(priv)->sclass = nv98_crypt_sclass;
- nv_falcon(priv)->code.data = nv98_pcrypt_code;
- nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
- nv_falcon(priv)->data.data = nv98_pcrypt_data;
- nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
+ nv_subdev(priv)->intr = g98_sec_intr;
+ nv_engine(priv)->cclass = &g98_sec_cclass;
+ nv_engine(priv)->sclass = g98_sec_sclass;
+ nv_falcon(priv)->code.data = g98_psec_code;
+ nv_falcon(priv)->code.size = sizeof(g98_psec_code);
+ nv_falcon(priv)->data.data = g98_psec_data;
+ nv_falcon(priv)->data.size = sizeof(g98_psec_data);
return 0;
}
-struct nouveau_oclass
-nv98_crypt_oclass = {
- .handle = NV_ENGINE(CRYPT, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_crypt_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = _nouveau_falcon_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+struct nvkm_oclass
+g98_sec_oclass = {
+ .handle = NV_ENGINE(SEC, 0x98),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g98_sec_ctor,
+ .dtor = _nvkm_falcon_dtor,
+ .init = _nvkm_falcon_init,
+ .fini = _nvkm_falcon_fini,
+ .rd32 = _nvkm_falcon_rd32,
+ .wr32 = _nvkm_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
new file mode 100644
index 000000000000..bdc3a05907d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
@@ -0,0 +1,4 @@
+nvkm-y += nvkm/engine/sw/nv04.o
+nvkm-y += nvkm/engine/sw/nv10.o
+nvkm-y += nvkm/engine/sw/nv50.o
+nvkm-y += nvkm/engine/sw/gf100.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index 6af370d3a06d..533d5d8ed363 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -21,27 +21,19 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/os.h>
-#include <core/engctx.h>
-#include <core/event.h>
+#include "nv50.h"
#include <subdev/bar.h>
-#include <engine/software.h>
-#include <engine/disp.h>
-
-#include "nv50.h"
-
/*******************************************************************************
* software object classes
******************************************************************************/
static int
-nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+gf100_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
u64 data = *(u32 *)args;
if (mthd == 0x0400) {
chan->vblank.offset &= 0x00ffffffffULL;
@@ -54,11 +46,11 @@ nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
}
static int
-nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+gf100_sw_mthd_mp_control(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
- struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
u32 data = *(u32 *)args;
switch (mthd) {
@@ -79,22 +71,22 @@ nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
return 0;
}
-static struct nouveau_omthds
-nvc0_software_omthds[] = {
- { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
- { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
- { 0x0408, 0x0408, nv50_software_mthd_vblsem_value },
- { 0x040c, 0x040c, nv50_software_mthd_vblsem_release },
- { 0x0500, 0x0500, nv50_software_mthd_flip },
- { 0x0600, 0x0600, nvc0_software_mthd_mp_control },
- { 0x0644, 0x0644, nvc0_software_mthd_mp_control },
- { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
+static struct nvkm_omthds
+gf100_sw_omthds[] = {
+ { 0x0400, 0x0400, gf100_sw_mthd_vblsem_offset },
+ { 0x0404, 0x0404, gf100_sw_mthd_vblsem_offset },
+ { 0x0408, 0x0408, nv50_sw_mthd_vblsem_value },
+ { 0x040c, 0x040c, nv50_sw_mthd_vblsem_release },
+ { 0x0500, 0x0500, nv50_sw_mthd_flip },
+ { 0x0600, 0x0600, gf100_sw_mthd_mp_control },
+ { 0x0644, 0x0644, gf100_sw_mthd_mp_control },
+ { 0x06ac, 0x06ac, gf100_sw_mthd_mp_control },
{}
};
-static struct nouveau_oclass
-nvc0_software_sclass[] = {
- { 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds },
+static struct nvkm_oclass
+gf100_sw_sclass[] = {
+ { 0x906e, &nvkm_object_ofuncs, gf100_sw_omthds },
{}
};
@@ -103,12 +95,12 @@ nvc0_software_sclass[] = {
******************************************************************************/
static int
-nvc0_software_vblsem_release(struct nvkm_notify *notify)
+gf100_sw_vblsem_release(struct nvkm_notify *notify)
{
- struct nv50_software_chan *chan =
+ struct nv50_sw_chan *chan =
container_of(notify, typeof(*chan), vblank.notify[notify->index]);
- struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
- struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
+ struct nvkm_bar *bar = nvkm_bar(priv);
nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
bar->flush(bar);
@@ -119,31 +111,31 @@ nvc0_software_vblsem_release(struct nvkm_notify *notify)
return NVKM_NOTIFY_DROP;
}
-static struct nv50_software_cclass
-nvc0_software_cclass = {
+static struct nv50_sw_cclass
+gf100_sw_cclass = {
.base.handle = NV_ENGCTX(SW, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_software_context_ctor,
- .dtor = nv50_software_context_dtor,
- .init = _nouveau_software_context_init,
- .fini = _nouveau_software_context_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_sw_context_ctor,
+ .dtor = nv50_sw_context_dtor,
+ .init = _nvkm_sw_context_init,
+ .fini = _nvkm_sw_context_fini,
},
- .vblank = nvc0_software_vblsem_release,
+ .vblank = gf100_sw_vblsem_release,
};
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
-struct nouveau_oclass *
-nvc0_software_oclass = &(struct nv50_software_oclass) {
+struct nvkm_oclass *
+gf100_sw_oclass = &(struct nv50_sw_oclass) {
.base.handle = NV_ENGINE(SW, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_software_ctor,
- .dtor = _nouveau_software_dtor,
- .init = _nouveau_software_init,
- .fini = _nouveau_software_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_sw_ctor,
+ .dtor = _nvkm_sw_dtor,
+ .init = _nvkm_sw_init,
+ .fini = _nvkm_sw_fini,
},
- .cclass = &nvc0_software_cclass.base,
- .sclass = nvc0_software_sclass,
+ .cclass = &gf100_sw_cclass.base,
+ .sclass = gf100_sw_sclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index 64df15c7f051..897024421d36 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -21,19 +21,15 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/os.h>
-#include <core/engctx.h>
-
-#include <engine/software.h>
+#include <engine/sw.h>
#include <engine/fifo.h>
-struct nv04_software_priv {
- struct nouveau_software base;
+struct nv04_sw_priv {
+ struct nvkm_sw base;
};
-struct nv04_software_chan {
- struct nouveau_software_chan base;
+struct nv04_sw_chan {
+ struct nvkm_sw_chan base;
};
/*******************************************************************************
@@ -41,35 +37,33 @@ struct nv04_software_chan {
******************************************************************************/
static int
-nv04_software_set_ref(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nv04_sw_set_ref(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
- struct nouveau_object *channel = (void *)nv_engctx(object->parent);
- struct nouveau_fifo_chan *fifo = (void *)channel->parent;
+ struct nvkm_object *channel = (void *)nv_engctx(object->parent);
+ struct nvkm_fifo_chan *fifo = (void *)channel->parent;
atomic_set(&fifo->refcnt, *(u32*)data);
return 0;
}
static int
-nv04_software_flip(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv04_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
{
- struct nv04_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv04_sw_chan *chan = (void *)nv_engctx(object->parent);
if (chan->base.flip)
return chan->base.flip(chan->base.flip_data);
return -EINVAL;
}
-static struct nouveau_omthds
-nv04_software_omthds[] = {
- { 0x0150, 0x0150, nv04_software_set_ref },
- { 0x0500, 0x0500, nv04_software_flip },
+static struct nvkm_omthds
+nv04_sw_omthds[] = {
+ { 0x0150, 0x0150, nv04_sw_set_ref },
+ { 0x0500, 0x0500, nv04_sw_flip },
{}
};
-static struct nouveau_oclass
-nv04_software_sclass[] = {
- { 0x006e, &nouveau_object_ofuncs, nv04_software_omthds },
+static struct nvkm_oclass
+nv04_sw_sclass[] = {
+ { 0x006e, &nvkm_object_ofuncs, nv04_sw_omthds },
{}
};
@@ -78,15 +72,14 @@ nv04_software_sclass[] = {
******************************************************************************/
static int
-nv04_software_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv04_software_chan *chan;
+ struct nv04_sw_chan *chan;
int ret;
- ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+ ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -94,14 +87,14 @@ nv04_software_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
-nv04_software_cclass = {
+static struct nvkm_oclass
+nv04_sw_cclass = {
.handle = NV_ENGCTX(SW, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_software_context_ctor,
- .dtor = _nouveau_software_context_dtor,
- .init = _nouveau_software_context_init,
- .fini = _nouveau_software_context_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv04_sw_context_ctor,
+ .dtor = _nvkm_sw_context_dtor,
+ .init = _nvkm_sw_context_init,
+ .fini = _nvkm_sw_context_fini,
},
};
@@ -110,37 +103,37 @@ nv04_software_cclass = {
******************************************************************************/
void
-nv04_software_intr(struct nouveau_subdev *subdev)
+nv04_sw_intr(struct nvkm_subdev *subdev)
{
nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
}
static int
-nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv04_software_priv *priv;
+ struct nv04_sw_priv *priv;
int ret;
- ret = nouveau_software_create(parent, engine, oclass, &priv);
+ ret = nvkm_sw_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nv_engine(priv)->cclass = &nv04_software_cclass;
- nv_engine(priv)->sclass = nv04_software_sclass;
- nv_subdev(priv)->intr = nv04_software_intr;
+ nv_engine(priv)->cclass = &nv04_sw_cclass;
+ nv_engine(priv)->sclass = nv04_sw_sclass;
+ nv_subdev(priv)->intr = nv04_sw_intr;
return 0;
}
-struct nouveau_oclass *
-nv04_software_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+nv04_sw_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(SW, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_software_ctor,
- .dtor = _nouveau_software_dtor,
- .init = _nouveau_software_init,
- .fini = _nouveau_software_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv04_sw_ctor,
+ .dtor = _nvkm_sw_dtor,
+ .init = _nvkm_sw_init,
+ .fini = _nvkm_sw_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index f54a2253deca..c61153a3fb8b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -21,18 +21,14 @@
*
* Authors: Ben Skeggs
*/
+#include <engine/sw.h>
-#include <core/os.h>
-#include <core/engctx.h>
-
-#include <engine/software.h>
-
-struct nv10_software_priv {
- struct nouveau_software base;
+struct nv10_sw_priv {
+ struct nvkm_sw base;
};
-struct nv10_software_chan {
- struct nouveau_software_chan base;
+struct nv10_sw_chan {
+ struct nvkm_sw_chan base;
};
/*******************************************************************************
@@ -40,24 +36,23 @@ struct nv10_software_chan {
******************************************************************************/
static int
-nv10_software_flip(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv10_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
{
- struct nv10_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv10_sw_chan *chan = (void *)nv_engctx(object->parent);
if (chan->base.flip)
return chan->base.flip(chan->base.flip_data);
return -EINVAL;
}
-static struct nouveau_omthds
-nv10_software_omthds[] = {
- { 0x0500, 0x0500, nv10_software_flip },
+static struct nvkm_omthds
+nv10_sw_omthds[] = {
+ { 0x0500, 0x0500, nv10_sw_flip },
{}
};
-static struct nouveau_oclass
-nv10_software_sclass[] = {
- { 0x016e, &nouveau_object_ofuncs, nv10_software_omthds },
+static struct nvkm_oclass
+nv10_sw_sclass[] = {
+ { 0x016e, &nvkm_object_ofuncs, nv10_sw_omthds },
{}
};
@@ -66,15 +61,14 @@ nv10_software_sclass[] = {
******************************************************************************/
static int
-nv10_software_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv10_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv10_software_chan *chan;
+ struct nv10_sw_chan *chan;
int ret;
- ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+ ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -82,14 +76,14 @@ nv10_software_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nouveau_oclass
-nv10_software_cclass = {
+static struct nvkm_oclass
+nv10_sw_cclass = {
.handle = NV_ENGCTX(SW, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv10_software_context_ctor,
- .dtor = _nouveau_software_context_dtor,
- .init = _nouveau_software_context_init,
- .fini = _nouveau_software_context_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv10_sw_context_ctor,
+ .dtor = _nvkm_sw_context_dtor,
+ .init = _nvkm_sw_context_init,
+ .fini = _nvkm_sw_context_fini,
},
};
@@ -98,31 +92,31 @@ nv10_software_cclass = {
******************************************************************************/
static int
-nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv10_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv10_software_priv *priv;
+ struct nv10_sw_priv *priv;
int ret;
- ret = nouveau_software_create(parent, engine, oclass, &priv);
+ ret = nvkm_sw_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nv_engine(priv)->cclass = &nv10_software_cclass;
- nv_engine(priv)->sclass = nv10_software_sclass;
- nv_subdev(priv)->intr = nv04_software_intr;
+ nv_engine(priv)->cclass = &nv10_sw_cclass;
+ nv_engine(priv)->sclass = nv10_sw_sclass;
+ nv_subdev(priv)->intr = nv04_sw_intr;
return 0;
}
-struct nouveau_oclass *
-nv10_software_oclass = &(struct nouveau_oclass) {
+struct nvkm_oclass *
+nv10_sw_oclass = &(struct nvkm_oclass) {
.handle = NV_ENGINE(SW, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv10_software_ctor,
- .dtor = _nouveau_software_dtor,
- .init = _nouveau_software_init,
- .fini = _nouveau_software_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv10_sw_ctor,
+ .dtor = _nvkm_sw_dtor,
+ .init = _nvkm_sw_init,
+ .fini = _nvkm_sw_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index a0fec205f9db..401fcd73086b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -21,72 +21,67 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
-#include <core/os.h>
-#include <core/engctx.h>
-#include <core/namedb.h>
+#include <core/device.h>
#include <core/handle.h>
-#include <core/gpuobj.h>
-#include <core/event.h>
-#include <nvif/event.h>
-
-#include <subdev/bar.h>
-
+#include <core/namedb.h>
#include <engine/disp.h>
+#include <subdev/bar.h>
-#include "nv50.h"
+#include <nvif/event.h>
/*******************************************************************************
* software object classes
******************************************************************************/
static int
-nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv50_sw_mthd_dma_vblsem(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
- struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent;
- struct nouveau_handle *handle;
+ struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
+ struct nvkm_fifo_chan *fifo = (void *)nv_object(chan)->parent;
+ struct nvkm_handle *handle;
int ret = -EINVAL;
- handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args);
+ handle = nvkm_namedb_get(nv_namedb(fifo), *(u32 *)args);
if (!handle)
return -ENOENT;
if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
- struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
+ struct nvkm_gpuobj *gpuobj = nv_gpuobj(handle->object);
chan->vblank.ctxdma = gpuobj->node->offset >> 4;
ret = 0;
}
- nouveau_namedb_put(handle);
+ nvkm_namedb_put(handle);
return ret;
}
static int
-nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv50_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
chan->vblank.offset = *(u32 *)args;
return 0;
}
int
-nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv50_sw_mthd_vblsem_value(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
chan->vblank.value = *(u32 *)args;
return 0;
}
int
-nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv50_sw_mthd_vblsem_release(struct nvkm_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
u32 head = *(u32 *)args;
- if (head >= nouveau_disp(chan)->vblank.index_nr)
+ if (head >= nvkm_disp(chan)->vblank.index_nr)
return -EINVAL;
nvkm_notify_get(&chan->vblank.notify[head]);
@@ -94,28 +89,27 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
}
int
-nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
+nv50_sw_mthd_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
{
- struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
if (chan->base.flip)
return chan->base.flip(chan->base.flip_data);
return -EINVAL;
}
-static struct nouveau_omthds
-nv50_software_omthds[] = {
- { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
- { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
- { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
- { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
- { 0x0500, 0x0500, nv50_software_mthd_flip },
+static struct nvkm_omthds
+nv50_sw_omthds[] = {
+ { 0x018c, 0x018c, nv50_sw_mthd_dma_vblsem },
+ { 0x0400, 0x0400, nv50_sw_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nv50_sw_mthd_vblsem_value },
+ { 0x0408, 0x0408, nv50_sw_mthd_vblsem_release },
+ { 0x0500, 0x0500, nv50_sw_mthd_flip },
{}
};
-static struct nouveau_oclass
-nv50_software_sclass[] = {
- { 0x506e, &nouveau_object_ofuncs, nv50_software_omthds },
+static struct nvkm_oclass
+nv50_sw_sclass[] = {
+ { 0x506e, &nvkm_object_ofuncs, nv50_sw_omthds },
{}
};
@@ -124,12 +118,12 @@ nv50_software_sclass[] = {
******************************************************************************/
static int
-nv50_software_vblsem_release(struct nvkm_notify *notify)
+nv50_sw_vblsem_release(struct nvkm_notify *notify)
{
- struct nv50_software_chan *chan =
+ struct nv50_sw_chan *chan =
container_of(notify, typeof(*chan), vblank.notify[notify->index]);
- struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
- struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
+ struct nvkm_bar *bar = nvkm_bar(priv);
nv_wr32(priv, 0x001704, chan->vblank.channel);
nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
@@ -147,29 +141,28 @@ nv50_software_vblsem_release(struct nvkm_notify *notify)
}
void
-nv50_software_context_dtor(struct nouveau_object *object)
+nv50_sw_context_dtor(struct nvkm_object *object)
{
- struct nv50_software_chan *chan = (void *)object;
+ struct nv50_sw_chan *chan = (void *)object;
int i;
for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
nvkm_notify_fini(&chan->vblank.notify[i]);
- nouveau_software_context_destroy(&chan->base);
+ nvkm_sw_context_destroy(&chan->base);
}
int
-nv50_software_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_disp *pdisp = nouveau_disp(parent);
- struct nv50_software_cclass *pclass = (void *)oclass;
- struct nv50_software_chan *chan;
+ struct nvkm_disp *pdisp = nvkm_disp(parent);
+ struct nv50_sw_cclass *pclass = (void *)oclass;
+ struct nv50_sw_chan *chan;
int ret, i;
- ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+ ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -191,16 +184,16 @@ nv50_software_context_ctor(struct nouveau_object *parent,
return 0;
}
-static struct nv50_software_cclass
-nv50_software_cclass = {
+static struct nv50_sw_cclass
+nv50_sw_cclass = {
.base.handle = NV_ENGCTX(SW, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_software_context_ctor,
- .dtor = nv50_software_context_dtor,
- .init = _nouveau_software_context_init,
- .fini = _nouveau_software_context_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_sw_context_ctor,
+ .dtor = nv50_sw_context_dtor,
+ .init = _nvkm_sw_context_init,
+ .fini = _nvkm_sw_context_fini,
},
- .vblank = nv50_software_vblsem_release,
+ .vblank = nv50_sw_vblsem_release,
};
/*******************************************************************************
@@ -208,34 +201,34 @@ nv50_software_cclass = {
******************************************************************************/
int
-nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv50_software_oclass *pclass = (void *)oclass;
- struct nv50_software_priv *priv;
+ struct nv50_sw_oclass *pclass = (void *)oclass;
+ struct nv50_sw_priv *priv;
int ret;
- ret = nouveau_software_create(parent, engine, oclass, &priv);
+ ret = nvkm_sw_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->cclass = pclass->cclass;
nv_engine(priv)->sclass = pclass->sclass;
- nv_subdev(priv)->intr = nv04_software_intr;
+ nv_subdev(priv)->intr = nv04_sw_intr;
return 0;
}
-struct nouveau_oclass *
-nv50_software_oclass = &(struct nv50_software_oclass) {
+struct nvkm_oclass *
+nv50_sw_oclass = &(struct nv50_sw_oclass) {
.base.handle = NV_ENGINE(SW, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_software_ctor,
- .dtor = _nouveau_software_dtor,
- .init = _nouveau_software_init,
- .fini = _nouveau_software_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_sw_ctor,
+ .dtor = _nvkm_sw_dtor,
+ .init = _nvkm_sw_init,
+ .fini = _nvkm_sw_fini,
},
- .cclass = &nv50_software_cclass.base,
- .sclass = nv50_software_sclass,
+ .cclass = &nv50_sw_cclass.base,
+ .sclass = nv50_sw_sclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
new file mode 100644
index 000000000000..d8adc1108467
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -0,0 +1,45 @@
+#ifndef __NVKM_SW_NV50_H__
+#define __NVKM_SW_NV50_H__
+#include <engine/sw.h>
+#include <core/notify.h>
+
+struct nv50_sw_oclass {
+ struct nvkm_oclass base;
+ struct nvkm_oclass *cclass;
+ struct nvkm_oclass *sclass;
+};
+
+struct nv50_sw_priv {
+ struct nvkm_sw base;
+};
+
+int nv50_sw_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+
+struct nv50_sw_cclass {
+ struct nvkm_oclass base;
+ int (*vblank)(struct nvkm_notify *);
+};
+
+struct nv50_sw_chan {
+ struct nvkm_sw_chan base;
+ struct {
+ struct nvkm_notify notify[4];
+ u32 channel;
+ u32 ctxdma;
+ u64 offset;
+ u32 value;
+ } vblank;
+};
+
+int nv50_sw_context_ctor(struct nvkm_object *,
+ struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void nv50_sw_context_dtor(struct nvkm_object *);
+
+int nv50_sw_mthd_vblsem_value(struct nvkm_object *, u32, void *, u32);
+int nv50_sw_mthd_vblsem_release(struct nvkm_object *, u32, void *, u32);
+int nv50_sw_mthd_flip(struct nvkm_object *, u32, void *, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild
new file mode 100644
index 000000000000..6b390eb92b0e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild
@@ -0,0 +1 @@
+nvkm-y += nvkm/engine/vp/g84.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index fd6272b8cdb2..45f4e186befc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -21,17 +21,18 @@
*
* Authors: Ben Skeggs, Ilia Mirkin
*/
-
-#include <engine/xtensa.h>
#include <engine/vp.h>
+#include <engine/xtensa.h>
+
+#include <core/engctx.h>
/*******************************************************************************
* VP object classes
******************************************************************************/
-static struct nouveau_oclass
-nv84_vp_sclass[] = {
- { 0x7476, &nouveau_object_ofuncs },
+static struct nvkm_oclass
+g84_vp_sclass[] = {
+ { 0x7476, &nvkm_object_ofuncs },
{},
};
@@ -39,16 +40,16 @@ nv84_vp_sclass[] = {
* PVP context
******************************************************************************/
-static struct nouveau_oclass
-nv84_vp_cclass = {
+static struct nvkm_oclass
+g84_vp_cclass = {
.handle = NV_ENGCTX(VP, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_xtensa_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_xtensa_engctx_ctor,
+ .dtor = _nvkm_engctx_dtor,
+ .init = _nvkm_engctx_init,
+ .fini = _nvkm_engctx_fini,
+ .rd32 = _nvkm_engctx_rd32,
+ .wr32 = _nvkm_engctx_wr32,
},
};
@@ -57,36 +58,36 @@ nv84_vp_cclass = {
******************************************************************************/
static int
-nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_vp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_xtensa *priv;
+ struct nvkm_xtensa *priv;
int ret;
- ret = nouveau_xtensa_create(parent, engine, oclass, 0xf000, true,
- "PVP", "vp", &priv);
+ ret = nvkm_xtensa_create(parent, engine, oclass, 0xf000, true,
+ "PVP", "vp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x01020000;
- nv_engine(priv)->cclass = &nv84_vp_cclass;
- nv_engine(priv)->sclass = nv84_vp_sclass;
+ nv_engine(priv)->cclass = &g84_vp_cclass;
+ nv_engine(priv)->sclass = g84_vp_sclass;
priv->fifo_val = 0x111;
priv->unkd28 = 0x9c544;
return 0;
}
-struct nouveau_oclass
-nv84_vp_oclass = {
+struct nvkm_oclass
+g84_vp_oclass = {
.handle = NV_ENGINE(VP, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_vp_ctor,
- .dtor = _nouveau_xtensa_dtor,
- .init = _nouveau_xtensa_init,
- .fini = _nouveau_xtensa_fini,
- .rd32 = _nouveau_xtensa_rd32,
- .wr32 = _nouveau_xtensa_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g84_vp_ctor,
+ .dtor = _nvkm_xtensa_dtor,
+ .init = _nvkm_xtensa_init,
+ .fini = _nvkm_xtensa_fini,
+ .rd32 = _nvkm_xtensa_rd32,
+ .wr32 = _nvkm_xtensa_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index 92384759d2f5..cea90df533d9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -19,43 +19,43 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include <engine/xtensa.h>
+#include <core/device.h>
+
+#include <core/engctx.h>
u32
-_nouveau_xtensa_rd32(struct nouveau_object *object, u64 addr)
+_nvkm_xtensa_rd32(struct nvkm_object *object, u64 addr)
{
- struct nouveau_xtensa *xtensa = (void *)object;
+ struct nvkm_xtensa *xtensa = (void *)object;
return nv_rd32(xtensa, xtensa->addr + addr);
}
void
-_nouveau_xtensa_wr32(struct nouveau_object *object, u64 addr, u32 data)
+_nvkm_xtensa_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
- struct nouveau_xtensa *xtensa = (void *)object;
+ struct nvkm_xtensa *xtensa = (void *)object;
nv_wr32(xtensa, xtensa->addr + addr, data);
}
int
-_nouveau_xtensa_engctx_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+_nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_engctx *engctx;
+ struct nvkm_engctx *engctx;
int ret;
- ret = nouveau_engctx_create(parent, engine, oclass, NULL,
- 0x10000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+ ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &engctx);
*pobject = nv_object(engctx);
return ret;
}
void
-_nouveau_xtensa_intr(struct nouveau_subdev *subdev)
+_nvkm_xtensa_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_xtensa *xtensa = (void *)subdev;
+ struct nvkm_xtensa *xtensa = (void *)subdev;
u32 unk104 = nv_ro32(xtensa, 0xd04);
u32 intr = nv_ro32(xtensa, 0xc20);
u32 chan = nv_ro32(xtensa, 0xc28);
@@ -72,39 +72,36 @@ _nouveau_xtensa_intr(struct nouveau_subdev *subdev)
}
int
-nouveau_xtensa_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 addr, bool enable,
- const char *iname, const char *fname,
- int length, void **pobject)
+nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 addr, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
{
- struct nouveau_xtensa *xtensa;
+ struct nvkm_xtensa *xtensa;
int ret;
- ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
- fname, length, pobject);
+ ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
+ fname, length, pobject);
xtensa = *pobject;
if (ret)
return ret;
- nv_subdev(xtensa)->intr = _nouveau_xtensa_intr;
-
+ nv_subdev(xtensa)->intr = _nvkm_xtensa_intr;
xtensa->addr = addr;
-
return 0;
}
int
-_nouveau_xtensa_init(struct nouveau_object *object)
+_nvkm_xtensa_init(struct nvkm_object *object)
{
- struct nouveau_device *device = nv_device(object);
- struct nouveau_xtensa *xtensa = (void *)object;
+ struct nvkm_device *device = nv_device(object);
+ struct nvkm_xtensa *xtensa = (void *)object;
const struct firmware *fw;
char name[32];
int i, ret;
u32 tmp;
- ret = nouveau_engine_init(&xtensa->base);
+ ret = nvkm_engine_init(&xtensa->base);
if (ret)
return ret;
@@ -124,8 +121,8 @@ _nouveau_xtensa_init(struct nouveau_object *object)
return -EINVAL;
}
- ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
- &xtensa->gpu_fw);
+ ret = nvkm_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
+ &xtensa->gpu_fw);
if (ret) {
release_firmware(fw);
return ret;
@@ -157,20 +154,19 @@ _nouveau_xtensa_init(struct nouveau_object *object)
nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
-
return 0;
}
int
-_nouveau_xtensa_fini(struct nouveau_object *object, bool suspend)
+_nvkm_xtensa_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_xtensa *xtensa = (void *)object;
+ struct nvkm_xtensa *xtensa = (void *)object;
nv_wo32(xtensa, 0xd84, 0); /* INTR_EN */
nv_wo32(xtensa, 0xd94, 0); /* FIFO_CTRL */
if (!suspend)
- nouveau_gpuobj_ref(NULL, &xtensa->gpu_fw);
+ nvkm_gpuobj_ref(NULL, &xtensa->gpu_fw);
- return nouveau_engine_fini(&xtensa->base, suspend);
+ return nvkm_engine_fini(&xtensa->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
new file mode 100644
index 000000000000..a1bb3e48739c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -0,0 +1,19 @@
+include $(src)/nvkm/subdev/bar/Kbuild
+include $(src)/nvkm/subdev/bios/Kbuild
+include $(src)/nvkm/subdev/bus/Kbuild
+include $(src)/nvkm/subdev/clk/Kbuild
+include $(src)/nvkm/subdev/devinit/Kbuild
+include $(src)/nvkm/subdev/fb/Kbuild
+include $(src)/nvkm/subdev/fuse/Kbuild
+include $(src)/nvkm/subdev/gpio/Kbuild
+include $(src)/nvkm/subdev/i2c/Kbuild
+include $(src)/nvkm/subdev/ibus/Kbuild
+include $(src)/nvkm/subdev/instmem/Kbuild
+include $(src)/nvkm/subdev/ltc/Kbuild
+include $(src)/nvkm/subdev/mc/Kbuild
+include $(src)/nvkm/subdev/mmu/Kbuild
+include $(src)/nvkm/subdev/mxm/Kbuild
+include $(src)/nvkm/subdev/pmu/Kbuild
+include $(src)/nvkm/subdev/therm/Kbuild
+include $(src)/nvkm/subdev/timer/Kbuild
+include $(src)/nvkm/subdev/volt/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
new file mode 100644
index 000000000000..1ab554a0b5e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -0,0 +1,4 @@
+nvkm-y += nvkm/subdev/bar/base.o
+nvkm-y += nvkm/subdev/bar/nv50.o
+nvkm-y += nvkm/subdev/bar/gf100.o
+nvkm-y += nvkm/subdev/bar/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
new file mode 100644
index 000000000000..3502d00122ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <core/device.h>
+#include <subdev/fb.h>
+#include <subdev/mmu.h>
+
+struct nvkm_barobj {
+ struct nvkm_object base;
+ struct nvkm_vma vma;
+ void __iomem *iomem;
+};
+
+static int
+nvkm_barobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_device *device = nv_device(parent);
+ struct nvkm_bar *bar = nvkm_bar(device);
+ struct nvkm_mem *mem = data;
+ struct nvkm_barobj *barobj;
+ int ret;
+
+ ret = nvkm_object_create(parent, engine, oclass, 0, &barobj);
+ *pobject = nv_object(barobj);
+ if (ret)
+ return ret;
+
+ ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
+ if (ret)
+ return ret;
+
+ barobj->iomem = ioremap(nv_device_resource_start(device, 3) +
+ (u32)barobj->vma.offset, mem->size << 12);
+ if (!barobj->iomem) {
+ nv_warn(bar, "PRAMIN ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+nvkm_barobj_dtor(struct nvkm_object *object)
+{
+ struct nvkm_bar *bar = nvkm_bar(object);
+ struct nvkm_barobj *barobj = (void *)object;
+ if (barobj->vma.node) {
+ if (barobj->iomem)
+ iounmap(barobj->iomem);
+ bar->unmap(bar, &barobj->vma);
+ }
+ nvkm_object_destroy(&barobj->base);
+}
+
+static u32
+nvkm_barobj_rd32(struct nvkm_object *object, u64 addr)
+{
+ struct nvkm_barobj *barobj = (void *)object;
+ return ioread32_native(barobj->iomem + addr);
+}
+
+static void
+nvkm_barobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+ struct nvkm_barobj *barobj = (void *)object;
+ iowrite32_native(data, barobj->iomem + addr);
+}
+
+static struct nvkm_oclass
+nvkm_barobj_oclass = {
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nvkm_barobj_ctor,
+ .dtor = nvkm_barobj_dtor,
+ .init = nvkm_object_init,
+ .fini = nvkm_object_fini,
+ .rd32 = nvkm_barobj_rd32,
+ .wr32 = nvkm_barobj_wr32,
+ },
+};
+
+int
+nvkm_bar_alloc(struct nvkm_bar *bar, struct nvkm_object *parent,
+ struct nvkm_mem *mem, struct nvkm_object **pobject)
+{
+ struct nvkm_object *gpuobj;
+ int ret = nvkm_object_ctor(parent, &parent->engine->subdev.object,
+ &nvkm_barobj_oclass, mem, 0, &gpuobj);
+ if (ret == 0)
+ *pobject = gpuobj;
+ return ret;
+}
+
+int
+nvkm_bar_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
+{
+ struct nvkm_bar *bar;
+ int ret;
+
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "BARCTL",
+ "bar", length, pobject);
+ bar = *pobject;
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nvkm_bar_destroy(struct nvkm_bar *bar)
+{
+ nvkm_subdev_destroy(&bar->base);
+}
+
+void
+_nvkm_bar_dtor(struct nvkm_object *object)
+{
+ struct nvkm_bar *bar = (void *)object;
+ nvkm_bar_destroy(bar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
new file mode 100644
index 000000000000..12a1aebd9a96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu.h>
+
+struct gf100_bar_priv_vm {
+ struct nvkm_gpuobj *mem;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
+};
+
+struct gf100_bar_priv {
+ struct nvkm_bar base;
+ spinlock_t lock;
+ struct gf100_bar_priv_vm bar[2];
+};
+
+static int
+gf100_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
+ struct nvkm_vma *vma)
+{
+ struct gf100_bar_priv *priv = (void *)bar;
+ int ret;
+
+ ret = nvkm_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
+ if (ret)
+ return ret;
+
+ nvkm_vm_map(vma, mem);
+ return 0;
+}
+
+static int
+gf100_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
+ struct nvkm_vma *vma)
+{
+ struct gf100_bar_priv *priv = (void *)bar;
+ int ret;
+
+ ret = nvkm_vm_get(priv->bar[1].vm, mem->size << 12,
+ mem->page_shift, flags, vma);
+ if (ret)
+ return ret;
+
+ nvkm_vm_map(vma, mem);
+ return 0;
+}
+
+static void
+gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
+{
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
+}
+
+static int
+gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
+ int bar_nr)
+{
+ struct nvkm_device *device = nv_device(&priv->base);
+ struct nvkm_vm *vm;
+ resource_size_t bar_len;
+ int ret;
+
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+ &bar_vm->mem);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
+ &bar_vm->pgd);
+ if (ret)
+ return ret;
+
+ bar_len = nv_device_resource_len(device, bar_nr);
+
+ ret = nvkm_vm_new(device, 0, bar_len, 0, &vm);
+ if (ret)
+ return ret;
+
+ atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+
+ /*
+ * Bootstrap page table lookup.
+ */
+ if (bar_nr == 3) {
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+ (bar_len >> 12) * 8, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &vm->pgt[0].obj[0]);
+ vm->pgt[0].refcount[0] = 1;
+ if (ret)
+ return ret;
+ }
+
+ ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
+ nvkm_vm_ref(NULL, &vm, NULL);
+ if (ret)
+ return ret;
+
+ nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
+ nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
+ nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
+ nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+ return 0;
+}
+
+int
+gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_device *device = nv_device(parent);
+ struct gf100_bar_priv *priv;
+ bool has_bar3 = nv_device_resource_len(device, 3) != 0;
+ int ret;
+
+ ret = nvkm_bar_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ /* BAR3 */
+ if (has_bar3) {
+ ret = gf100_bar_ctor_vm(priv, &priv->bar[0], 3);
+ if (ret)
+ return ret;
+ }
+
+ /* BAR1 */
+ ret = gf100_bar_ctor_vm(priv, &priv->bar[1], 1);
+ if (ret)
+ return ret;
+
+ if (has_bar3) {
+ priv->base.alloc = nvkm_bar_alloc;
+ priv->base.kmap = gf100_bar_kmap;
+ }
+ priv->base.umap = gf100_bar_umap;
+ priv->base.unmap = gf100_bar_unmap;
+ priv->base.flush = g84_bar_flush;
+ spin_lock_init(&priv->lock);
+ return 0;
+}
+
+void
+gf100_bar_dtor(struct nvkm_object *object)
+{
+ struct gf100_bar_priv *priv = (void *)object;
+
+ nvkm_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
+ nvkm_gpuobj_ref(NULL, &priv->bar[1].pgd);
+ nvkm_gpuobj_ref(NULL, &priv->bar[1].mem);
+
+ if (priv->bar[0].vm) {
+ nvkm_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
+ nvkm_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
+ }
+ nvkm_gpuobj_ref(NULL, &priv->bar[0].pgd);
+ nvkm_gpuobj_ref(NULL, &priv->bar[0].mem);
+
+ nvkm_bar_destroy(&priv->base);
+}
+
+int
+gf100_bar_init(struct nvkm_object *object)
+{
+ struct gf100_bar_priv *priv = (void *)object;
+ int ret;
+
+ ret = nvkm_bar_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+
+ nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
+ if (priv->bar[0].mem)
+ nv_wr32(priv, 0x001714,
+ 0xc0000000 | priv->bar[0].mem->addr >> 12);
+ return 0;
+}
+
+struct nvkm_oclass
+gf100_bar_oclass = {
+ .handle = NV_SUBDEV(BAR, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_bar_ctor,
+ .dtor = gf100_bar_dtor,
+ .init = gf100_bar_init,
+ .fini = _nvkm_bar_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index bf877af9d3bd..148f739a276e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -19,36 +19,32 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
-#include <subdev/bar.h>
-
#include "priv.h"
int
-gk20a_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk20a_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_bar *bar;
+ struct nvkm_bar *bar;
int ret;
- ret = nvc0_bar_ctor(parent, engine, oclass, data, size, pobject);
+ ret = gf100_bar_ctor(parent, engine, oclass, data, size, pobject);
if (ret)
return ret;
- bar = (struct nouveau_bar *)*pobject;
+ bar = (struct nvkm_bar *)*pobject;
bar->iomap_uncached = true;
-
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gk20a_bar_oclass = {
.handle = NV_SUBDEV(BAR, 0xea),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gk20a_bar_ctor,
- .dtor = nvc0_bar_dtor,
- .init = nvc0_bar_init,
- .fini = _nouveau_bar_fini,
+ .dtor = gf100_bar_dtor,
+ .init = gf100_bar_init,
+ .fini = _nvkm_bar_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index f748ba49dfc8..8548adb91dcc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -21,66 +21,65 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
+#include <core/device.h>
#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/vm.h>
-
-#include "priv.h"
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
struct nv50_bar_priv {
- struct nouveau_bar base;
+ struct nvkm_bar base;
spinlock_t lock;
- struct nouveau_gpuobj *mem;
- struct nouveau_gpuobj *pad;
- struct nouveau_gpuobj *pgd;
- struct nouveau_vm *bar1_vm;
- struct nouveau_gpuobj *bar1;
- struct nouveau_vm *bar3_vm;
- struct nouveau_gpuobj *bar3;
+ struct nvkm_gpuobj *mem;
+ struct nvkm_gpuobj *pad;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *bar1_vm;
+ struct nvkm_gpuobj *bar1;
+ struct nvkm_vm *bar3_vm;
+ struct nvkm_gpuobj *bar3;
};
static int
-nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
- u32 flags, struct nouveau_vma *vma)
+nv50_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
+ struct nvkm_vma *vma)
{
struct nv50_bar_priv *priv = (void *)bar;
int ret;
- ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
+ ret = nvkm_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
if (ret)
return ret;
- nouveau_vm_map(vma, mem);
+ nvkm_vm_map(vma, mem);
return 0;
}
static int
-nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
- u32 flags, struct nouveau_vma *vma)
+nv50_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
+ struct nvkm_vma *vma)
{
struct nv50_bar_priv *priv = (void *)bar;
int ret;
- ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
+ ret = nvkm_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
if (ret)
return ret;
- nouveau_vm_map(vma, mem);
+ nvkm_vm_map(vma, mem);
return 0;
}
static void
-nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
+nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
{
- nouveau_vm_unmap(vma);
- nouveau_vm_put(vma);
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
}
static void
-nv50_bar_flush(struct nouveau_bar *bar)
+nv50_bar_flush(struct nvkm_bar *bar)
{
struct nv50_bar_priv *priv = (void *)bar;
unsigned long flags;
@@ -92,7 +91,7 @@ nv50_bar_flush(struct nouveau_bar *bar)
}
void
-nv84_bar_flush(struct nouveau_bar *bar)
+g84_bar_flush(struct nvkm_bar *bar)
{
struct nv50_bar_priv *priv = (void *)bar;
unsigned long flags;
@@ -104,36 +103,35 @@ nv84_bar_flush(struct nouveau_bar *bar)
}
static int
-nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_object *heap;
- struct nouveau_vm *vm;
+ struct nvkm_device *device = nv_device(parent);
+ struct nvkm_object *heap;
+ struct nvkm_vm *vm;
struct nv50_bar_priv *priv;
u64 start, limit;
int ret;
- ret = nouveau_bar_create(parent, engine, oclass, &priv);
+ ret = nvkm_bar_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
- NVOBJ_FLAG_HEAP, &priv->mem);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
+ NVOBJ_FLAG_HEAP, &priv->mem);
heap = nv_object(priv->mem);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), heap,
- (device->chipset == 0x50) ? 0x1400 : 0x0200,
- 0, 0, &priv->pad);
+ ret = nvkm_gpuobj_new(nv_object(priv), heap,
+ (device->chipset == 0x50) ? 0x1400 : 0x0200,
+ 0, 0, &priv->pad);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
- 0, &priv->pgd);
+ ret = nvkm_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd);
if (ret)
return ret;
@@ -141,25 +139,25 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
start = 0x0100000000ULL;
limit = start + nv_device_resource_len(device, 3);
- ret = nouveau_vm_new(device, start, limit, start, &vm);
+ ret = nvkm_vm_new(device, start, limit, start, &vm);
if (ret)
return ret;
atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
- ret = nouveau_gpuobj_new(nv_object(priv), heap,
- ((limit-- - start) >> 12) * 8, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
+ ret = nvkm_gpuobj_new(nv_object(priv), heap,
+ ((limit-- - start) >> 12) * 8, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
- ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
- nouveau_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd);
+ nvkm_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
+ ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
if (ret)
return ret;
@@ -175,18 +173,18 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
start = 0x0000000000ULL;
limit = start + nv_device_resource_len(device, 1);
- ret = nouveau_vm_new(device, start, limit--, start, &vm);
+ ret = nvkm_vm_new(device, start, limit--, start, &vm);
if (ret)
return ret;
atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
- ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
- nouveau_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd);
+ nvkm_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
+ ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
if (ret)
return ret;
@@ -198,42 +196,42 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_wo32(priv->bar1, 0x10, 0x00000000);
nv_wo32(priv->bar1, 0x14, 0x00000000);
- priv->base.alloc = nouveau_bar_alloc;
+ priv->base.alloc = nvkm_bar_alloc;
priv->base.kmap = nv50_bar_kmap;
priv->base.umap = nv50_bar_umap;
priv->base.unmap = nv50_bar_unmap;
if (device->chipset == 0x50)
priv->base.flush = nv50_bar_flush;
else
- priv->base.flush = nv84_bar_flush;
+ priv->base.flush = g84_bar_flush;
spin_lock_init(&priv->lock);
return 0;
}
static void
-nv50_bar_dtor(struct nouveau_object *object)
+nv50_bar_dtor(struct nvkm_object *object)
{
struct nv50_bar_priv *priv = (void *)object;
- nouveau_gpuobj_ref(NULL, &priv->bar1);
- nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
- nouveau_gpuobj_ref(NULL, &priv->bar3);
+ nvkm_gpuobj_ref(NULL, &priv->bar1);
+ nvkm_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
+ nvkm_gpuobj_ref(NULL, &priv->bar3);
if (priv->bar3_vm) {
- nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
- nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
+ nvkm_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
+ nvkm_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
}
- nouveau_gpuobj_ref(NULL, &priv->pgd);
- nouveau_gpuobj_ref(NULL, &priv->pad);
- nouveau_gpuobj_ref(NULL, &priv->mem);
- nouveau_bar_destroy(&priv->base);
+ nvkm_gpuobj_ref(NULL, &priv->pgd);
+ nvkm_gpuobj_ref(NULL, &priv->pad);
+ nvkm_gpuobj_ref(NULL, &priv->mem);
+ nvkm_bar_destroy(&priv->base);
}
static int
-nv50_bar_init(struct nouveau_object *object)
+nv50_bar_init(struct nvkm_object *object)
{
struct nv50_bar_priv *priv = (void *)object;
int ret, i;
- ret = nouveau_bar_init(&priv->base);
+ ret = nvkm_bar_init(&priv->base);
if (ret)
return ret;
@@ -255,16 +253,16 @@ nv50_bar_init(struct nouveau_object *object)
}
static int
-nv50_bar_fini(struct nouveau_object *object, bool suspend)
+nv50_bar_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_bar_priv *priv = (void *)object;
- return nouveau_bar_fini(&priv->base, suspend);
+ return nvkm_bar_fini(&priv->base, suspend);
}
-struct nouveau_oclass
+struct nvkm_oclass
nv50_bar_oclass = {
.handle = NV_SUBDEV(BAR, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_bar_ctor,
.dtor = nv50_bar_dtor,
.init = nv50_bar_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
new file mode 100644
index 000000000000..aa85f61b48c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -0,0 +1,30 @@
+#ifndef __NVKM_BAR_PRIV_H__
+#define __NVKM_BAR_PRIV_H__
+#include <subdev/bar.h>
+
+#define nvkm_bar_create(p,e,o,d) \
+ nvkm_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_bar_init(p) \
+ nvkm_subdev_init(&(p)->base)
+#define nvkm_bar_fini(p,s) \
+ nvkm_subdev_fini(&(p)->base, (s))
+
+int nvkm_bar_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void nvkm_bar_destroy(struct nvkm_bar *);
+
+void _nvkm_bar_dtor(struct nvkm_object *);
+#define _nvkm_bar_init _nvkm_subdev_init
+#define _nvkm_bar_fini _nvkm_subdev_fini
+
+int nvkm_bar_alloc(struct nvkm_bar *, struct nvkm_object *,
+ struct nvkm_mem *, struct nvkm_object **);
+
+void g84_bar_flush(struct nvkm_bar *);
+
+int gf100_bar_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void gf100_bar_dtor(struct nvkm_object *);
+int gf100_bar_init(struct nvkm_object *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
new file mode 100644
index 000000000000..64730d5e9351
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -0,0 +1,37 @@
+nvkm-y += nvkm/subdev/bios/base.o
+nvkm-y += nvkm/subdev/bios/bit.o
+nvkm-y += nvkm/subdev/bios/boost.o
+nvkm-y += nvkm/subdev/bios/conn.o
+nvkm-y += nvkm/subdev/bios/cstep.o
+nvkm-y += nvkm/subdev/bios/dcb.o
+nvkm-y += nvkm/subdev/bios/disp.o
+nvkm-y += nvkm/subdev/bios/dp.o
+nvkm-y += nvkm/subdev/bios/extdev.o
+nvkm-y += nvkm/subdev/bios/fan.o
+nvkm-y += nvkm/subdev/bios/gpio.o
+nvkm-y += nvkm/subdev/bios/i2c.o
+nvkm-y += nvkm/subdev/bios/image.o
+nvkm-y += nvkm/subdev/bios/init.o
+nvkm-y += nvkm/subdev/bios/mxm.o
+nvkm-y += nvkm/subdev/bios/npde.o
+nvkm-y += nvkm/subdev/bios/pcir.o
+nvkm-y += nvkm/subdev/bios/perf.o
+nvkm-y += nvkm/subdev/bios/pll.o
+nvkm-y += nvkm/subdev/bios/pmu.o
+nvkm-y += nvkm/subdev/bios/ramcfg.o
+nvkm-y += nvkm/subdev/bios/rammap.o
+nvkm-y += nvkm/subdev/bios/shadow.o
+nvkm-y += nvkm/subdev/bios/shadowacpi.o
+nvkm-y += nvkm/subdev/bios/shadowof.o
+nvkm-y += nvkm/subdev/bios/shadowpci.o
+nvkm-y += nvkm/subdev/bios/shadowramin.o
+nvkm-y += nvkm/subdev/bios/shadowrom.o
+nvkm-y += nvkm/subdev/bios/timing.o
+nvkm-y += nvkm/subdev/bios/therm.o
+nvkm-y += nvkm/subdev/bios/vmap.o
+nvkm-y += nvkm/subdev/bios/volt.o
+nvkm-y += nvkm/subdev/bios/xpio.o
+nvkm-y += nvkm/subdev/bios/M0203.o
+nvkm-y += nvkm/subdev/bios/M0205.o
+nvkm-y += nvkm/subdev/bios/M0209.o
+nvkm-y += nvkm/subdev/bios/P0260.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
index 28906b16d4e5..08eb03fbc203 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/M0203.h>
u32
-nvbios_M0203Te(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_M0203Te(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_M;
u32 data = 0x00000000;
@@ -53,7 +52,7 @@ nvbios_M0203Te(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u32
-nvbios_M0203Tp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+nvbios_M0203Tp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_M0203T *info)
{
u32 data = nvbios_M0203Te(bios, ver, hdr, cnt, len);
@@ -70,7 +69,7 @@ nvbios_M0203Tp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
}
u32
-nvbios_M0203Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+nvbios_M0203Ee(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len;
u32 data = nvbios_M0203Te(bios, ver, hdr, &cnt, &len);
@@ -83,7 +82,7 @@ nvbios_M0203Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
}
u32
-nvbios_M0203Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+nvbios_M0203Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_M0203E *info)
{
u32 data = nvbios_M0203Ee(bios, idx, ver, hdr);
@@ -101,7 +100,7 @@ nvbios_M0203Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
}
u32
-nvbios_M0203Em(struct nouveau_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
+nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
struct nvbios_M0203E *info)
{
struct nvbios_M0203T M0203T;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0205.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
index ac9617c5fc2a..e1a8ad5f3066 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/M0205.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/M0205.h>
u32
-nvbios_M0205Te(struct nouveau_bios *bios,
+nvbios_M0205Te(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_M;
@@ -56,7 +55,7 @@ nvbios_M0205Te(struct nouveau_bios *bios,
}
u32
-nvbios_M0205Tp(struct nouveau_bios *bios,
+nvbios_M0205Tp(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz,
struct nvbios_M0205T *info)
{
@@ -73,7 +72,7 @@ nvbios_M0205Tp(struct nouveau_bios *bios,
}
u32
-nvbios_M0205Ee(struct nouveau_bios *bios, int idx,
+nvbios_M0205Ee(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
@@ -89,7 +88,7 @@ nvbios_M0205Ee(struct nouveau_bios *bios, int idx,
}
u32
-nvbios_M0205Ep(struct nouveau_bios *bios, int idx,
+nvbios_M0205Ep(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_M0205E *info)
{
@@ -106,7 +105,7 @@ nvbios_M0205Ep(struct nouveau_bios *bios, int idx,
}
u32
-nvbios_M0205Se(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
+nvbios_M0205Se(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len;
@@ -120,7 +119,7 @@ nvbios_M0205Se(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
}
u32
-nvbios_M0205Sp(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
+nvbios_M0205Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
struct nvbios_M0205S *info)
{
u32 data = nvbios_M0205Se(bios, ent, idx, ver, hdr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0209.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
index b142a510e89f..3026920c3358 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/M0209.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/M0209.h>
u32
-nvbios_M0209Te(struct nouveau_bios *bios,
+nvbios_M0209Te(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_M;
@@ -56,7 +55,7 @@ nvbios_M0209Te(struct nouveau_bios *bios,
}
u32
-nvbios_M0209Ee(struct nouveau_bios *bios, int idx,
+nvbios_M0209Ee(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
@@ -72,9 +71,8 @@ nvbios_M0209Ee(struct nouveau_bios *bios, int idx,
}
u32
-nvbios_M0209Ep(struct nouveau_bios *bios, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_M0209E *info)
+nvbios_M0209Ep(struct nvkm_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_M0209E *info)
{
u32 data = nvbios_M0209Ee(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
@@ -94,7 +92,7 @@ nvbios_M0209Ep(struct nouveau_bios *bios, int idx,
}
u32
-nvbios_M0209Se(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
+nvbios_M0209Se(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len;
@@ -108,7 +106,7 @@ nvbios_M0209Se(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
}
u32
-nvbios_M0209Sp(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
+nvbios_M0209Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
struct nvbios_M0209S *info)
{
struct nvbios_M0209E M0209E;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
index 199f4e5f7488..b72edcf849b6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
@@ -21,14 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
-#include <subdev/bios/ramcfg.h>
#include <subdev/bios/P0260.h>
u32
-nvbios_P0260Te(struct nouveau_bios *bios,
+nvbios_P0260Te(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
{
struct bit_entry bit_P;
@@ -57,7 +55,7 @@ nvbios_P0260Te(struct nouveau_bios *bios,
}
u32
-nvbios_P0260Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_P0260Ee(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt, xnr, xsz;
u32 data = nvbios_P0260Te(bios, ver, &hdr, &cnt, len, &xnr, &xsz);
@@ -67,7 +65,7 @@ nvbios_P0260Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
}
u32
-nvbios_P0260Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+nvbios_P0260Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_P0260E *info)
{
u32 data = nvbios_P0260Ee(bios, idx, ver, len);
@@ -83,7 +81,7 @@ nvbios_P0260Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
}
u32
-nvbios_P0260Xe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *xsz)
+nvbios_P0260Xe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *xsz)
{
u8 hdr, cnt, len, xnr;
u32 data = nvbios_P0260Te(bios, ver, &hdr, &cnt, &len, &xnr, xsz);
@@ -93,7 +91,7 @@ nvbios_P0260Xe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *xsz)
}
u32
-nvbios_P0260Xp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+nvbios_P0260Xp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_P0260X *info)
{
u32 data = nvbios_P0260Xe(bios, idx, ver, hdr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 7df3a273553d..8db204f92ed3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -21,18 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/object.h>
-#include <core/device.h>
-#include <core/subdev.h>
-#include <core/option.h>
+#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
-#include "priv.h"
-
u8
nvbios_checksum(const u8 *data, int size)
{
@@ -59,7 +53,7 @@ nvbios_findstr(const u8 *data, int size, const char *str, int len)
}
int
-nvbios_extend(struct nouveau_bios *bios, u32 length)
+nvbios_extend(struct nvkm_bios *bios, u32 length)
{
if (bios->size < length) {
u8 *prev = bios->data;
@@ -76,59 +70,58 @@ nvbios_extend(struct nouveau_bios *bios, u32 length)
}
static u8
-nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
+nvkm_bios_rd08(struct nvkm_object *object, u64 addr)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
return bios->data[addr];
}
static u16
-nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
+nvkm_bios_rd16(struct nvkm_object *object, u64 addr)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
return get_unaligned_le16(&bios->data[addr]);
}
static u32
-nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
+nvkm_bios_rd32(struct nvkm_object *object, u64 addr)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
return get_unaligned_le32(&bios->data[addr]);
}
static void
-nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
+nvkm_bios_wr08(struct nvkm_object *object, u64 addr, u8 data)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
bios->data[addr] = data;
}
static void
-nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
+nvkm_bios_wr16(struct nvkm_object *object, u64 addr, u16 data)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
put_unaligned_le16(data, &bios->data[addr]);
}
static void
-nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
+nvkm_bios_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
put_unaligned_le32(data, &bios->data[addr]);
}
static int
-nouveau_bios_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_bios *bios;
+ struct nvkm_bios *bios;
struct bit_entry bit_i;
int ret;
- ret = nouveau_subdev_create(parent, engine, oclass, 0,
- "VBIOS", "bios", &bios);
+ ret = nvkm_subdev_create(parent, engine, oclass, 0,
+ "VBIOS", "bios", &bios);
*pobject = nv_object(bios);
if (ret)
return ret;
@@ -174,40 +167,40 @@ nouveau_bios_ctor(struct nouveau_object *parent,
}
static void
-nouveau_bios_dtor(struct nouveau_object *object)
+nvkm_bios_dtor(struct nvkm_object *object)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
kfree(bios->data);
- nouveau_subdev_destroy(&bios->base);
+ nvkm_subdev_destroy(&bios->base);
}
static int
-nouveau_bios_init(struct nouveau_object *object)
+nvkm_bios_init(struct nvkm_object *object)
{
- struct nouveau_bios *bios = (void *)object;
- return nouveau_subdev_init(&bios->base);
+ struct nvkm_bios *bios = (void *)object;
+ return nvkm_subdev_init(&bios->base);
}
static int
-nouveau_bios_fini(struct nouveau_object *object, bool suspend)
+nvkm_bios_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_bios *bios = (void *)object;
- return nouveau_subdev_fini(&bios->base, suspend);
+ struct nvkm_bios *bios = (void *)object;
+ return nvkm_subdev_fini(&bios->base, suspend);
}
-struct nouveau_oclass
-nouveau_bios_oclass = {
+struct nvkm_oclass
+nvkm_bios_oclass = {
.handle = NV_SUBDEV(VBIOS, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nouveau_bios_ctor,
- .dtor = nouveau_bios_dtor,
- .init = nouveau_bios_init,
- .fini = nouveau_bios_fini,
- .rd08 = nouveau_bios_rd08,
- .rd16 = nouveau_bios_rd16,
- .rd32 = nouveau_bios_rd32,
- .wr08 = nouveau_bios_wr08,
- .wr16 = nouveau_bios_wr16,
- .wr32 = nouveau_bios_wr32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nvkm_bios_ctor,
+ .dtor = nvkm_bios_dtor,
+ .init = nvkm_bios_init,
+ .fini = nvkm_bios_fini,
+ .rd08 = nvkm_bios_rd08,
+ .rd16 = nvkm_bios_rd16,
+ .rd32 = nvkm_bios_rd32,
+ .wr08 = nvkm_bios_wr08,
+ .wr16 = nvkm_bios_wr16,
+ .wr32 = nvkm_bios_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
index 1d03a3f2b2d2..eab540496cdf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
@@ -21,14 +21,11 @@
*
* Authors: Ben Skeggs
*/
-
-#include "core/object.h"
-
-#include "subdev/bios.h"
-#include "subdev/bios/bit.h"
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
int
-bit_entry(struct nouveau_bios *bios, u8 id, struct bit_entry *bit)
+bit_entry(struct nvkm_bios *bios, u8 id, struct bit_entry *bit)
{
if (likely(bios->bit_offset)) {
u8 entries = nv_ro08(bios, bios->bit_offset + 10);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index c1835e591c44..12e958533f46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/boost.h>
u16
-nvbios_boostTe(struct nouveau_bios *bios,
+nvbios_boostTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
@@ -57,7 +56,7 @@ nvbios_boostTe(struct nouveau_bios *bios,
}
u16
-nvbios_boostEe(struct nouveau_bios *bios, int idx,
+nvbios_boostEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
@@ -73,7 +72,7 @@ nvbios_boostEe(struct nouveau_bios *bios, int idx,
}
u16
-nvbios_boostEp(struct nouveau_bios *bios, int idx,
+nvbios_boostEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
@@ -87,7 +86,7 @@ nvbios_boostEp(struct nouveau_bios *bios, int idx,
}
u16
-nvbios_boostEm(struct nouveau_bios *bios, u8 pstate,
+nvbios_boostEm(struct nvkm_bios *bios, u8 pstate,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
u32 data, idx = 0;
@@ -99,7 +98,7 @@ nvbios_boostEm(struct nouveau_bios *bios, u8 pstate,
}
u16
-nvbios_boostSe(struct nouveau_bios *bios, int idx,
+nvbios_boostSe(struct nvkm_bios *bios, int idx,
u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
{
if (data && idx < cnt) {
@@ -111,7 +110,7 @@ nvbios_boostSe(struct nouveau_bios *bios, int idx,
}
u16
-nvbios_boostSp(struct nouveau_bios *bios, int idx,
+nvbios_boostSp(struct nvkm_bios *bios, int idx,
u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
struct nvbios_boostS *info)
{
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
index 2ede3bcd96a1..706a1650a4f2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
@@ -21,15 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/device.h>
-
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/conn.h>
u32
-nvbios_connTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_connTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
@@ -46,7 +43,7 @@ nvbios_connTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u32
-nvbios_connTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+nvbios_connTp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_connT *info)
{
u32 data = nvbios_connTe(bios, ver, hdr, cnt, len);
@@ -62,7 +59,7 @@ nvbios_connTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
}
u32
-nvbios_connEe(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+nvbios_connEe(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u32 data = nvbios_connTe(bios, ver, &hdr, &cnt, len);
@@ -72,7 +69,7 @@ nvbios_connEe(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
}
u32
-nvbios_connEp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+nvbios_connEp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
struct nvbios_connE *info)
{
u32 data = nvbios_connEe(bios, idx, ver, len);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index d3b15327fbfd..16f7ad8a4f80 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/cstep.h>
u16
-nvbios_cstepTe(struct nouveau_bios *bios,
+nvbios_cstepTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
{
struct bit_entry bit_P;
@@ -57,7 +56,7 @@ nvbios_cstepTe(struct nouveau_bios *bios,
}
u16
-nvbios_cstepEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+nvbios_cstepEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
@@ -70,7 +69,7 @@ nvbios_cstepEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
}
u16
-nvbios_cstepEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
@@ -83,7 +82,7 @@ nvbios_cstepEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
}
u16
-nvbios_cstepEm(struct nouveau_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
+nvbios_cstepEm(struct nvkm_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
u32 data, idx = 0;
@@ -95,7 +94,7 @@ nvbios_cstepEm(struct nouveau_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
}
u16
-nvbios_cstepXe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+nvbios_cstepXe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
@@ -108,7 +107,7 @@ nvbios_cstepXe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
}
u16
-nvbios_cstepXp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+nvbios_cstepXp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepX *info)
{
u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
index 96099aff8b41..8d78140f9401 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
@@ -21,16 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
-#include "core/device.h"
-
-#include "subdev/bios.h"
-#include "subdev/bios/dcb.h"
+#include <core/device.h>
u16
-dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- struct nouveau_device *device = nv_device(bios);
+ struct nvkm_device *device = nv_device(bios);
u16 dcb = 0x0000;
if (device->card_type > NV_04)
@@ -98,7 +97,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u16
-dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+dcb_outp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 dcb = dcb_table(bios, ver, &hdr, &cnt, len);
@@ -120,7 +119,7 @@ dcb_outp_hashm(struct dcb_output *outp)
}
u16
-dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
struct dcb_output *outp)
{
u16 dcb = dcb_outp(bios, idx, ver, len);
@@ -194,7 +193,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
}
u16
-dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+dcb_outp_match(struct nvkm_bios *bios, u16 type, u16 mask,
u8 *ver, u8 *len, struct dcb_output *outp)
{
u16 dcb, idx = 0;
@@ -208,8 +207,8 @@ dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
}
int
-dcb_outp_foreach(struct nouveau_bios *bios, void *data,
- int (*exec)(struct nouveau_bios *, void *, int, u16))
+dcb_outp_foreach(struct nvkm_bios *bios, void *data,
+ int (*exec)(struct nvkm_bios *, void *, int, u16))
{
int ret, idx = -1;
u8 ver, len;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index 51f355599694..262c410b7ee2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/disp.h>
u16
-nvbios_disp_table(struct nouveau_bios *bios,
+nvbios_disp_table(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
{
struct bit_entry U;
@@ -57,8 +56,7 @@ nvbios_disp_table(struct nouveau_bios *bios,
}
u16
-nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
- u8 *ver, u8 *len, u8 *sub)
+nvbios_disp_entry(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len, u8 *sub)
{
u8 hdr, cnt;
u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
@@ -69,8 +67,7 @@ nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
}
u16
-nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
- u8 *ver, u8 *len, u8 *sub,
+nvbios_disp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len, u8 *sub,
struct nvbios_disp *info)
{
u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
@@ -82,7 +79,7 @@ nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
}
u16
-nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+nvbios_outp_entry(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct nvbios_disp info;
@@ -96,9 +93,8 @@ nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
}
u16
-nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_outp *info)
+nvbios_outp_parse(struct nvkm_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *info)
{
u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
if (data && *hdr >= 0x0a) {
@@ -117,9 +113,8 @@ nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
}
u16
-nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_outp *info)
+nvbios_outp_match(struct nvkm_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *info)
{
u16 data, idx = 0;
while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
@@ -132,7 +127,7 @@ nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
}
u16
-nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+nvbios_ocfg_entry(struct nvkm_bios *bios, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
if (idx < *cnt)
@@ -141,9 +136,8 @@ nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
}
u16
-nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ocfg *info)
+nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
{
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
if (data) {
@@ -155,9 +149,8 @@ nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
}
u16
-nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ocfg *info)
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
{
u16 data, idx = 0;
while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
@@ -168,7 +161,7 @@ nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
}
u16
-nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+nvbios_oclk_match(struct nvkm_bios *bios, u16 cmp, u32 khz)
{
while (cmp) {
if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index cef53f81f12b..95970faae6c8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -21,14 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
-
-#include "subdev/bios.h"
-#include "subdev/bios/bit.h"
-#include "subdev/bios/dp.h"
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/dp.h>
static u16
-nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry d;
@@ -57,7 +55,7 @@ nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
static u16
-nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
@@ -86,7 +84,7 @@ nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
}
u16
-nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpout *info)
{
@@ -128,7 +126,7 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
}
u16
-nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+nvbios_dpout_match(struct nvkm_bios *bios, u16 type, u16 mask,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpout *info)
{
@@ -143,7 +141,7 @@ nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
}
static u16
-nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+nvbios_dpcfg_entry(struct nvkm_bios *bios, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
if (*ver >= 0x40) {
@@ -160,7 +158,7 @@ nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
}
u16
-nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpcfg *info)
{
@@ -190,7 +188,7 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
}
u16
-nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
+nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpcfg *info)
{
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index 49285d4f7ca5..a8503a1854c4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -21,13 +21,12 @@
*
* Authors: Martin Peres
*/
-
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/extdev.h>
static u16
-extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
+extdev_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
{
u8 dcb_ver, dcb_hdr, dcb_cnt, dcb_len;
u16 dcb, extdev = 0;
@@ -44,12 +43,11 @@ extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
*hdr = nv_ro08(bios, extdev + 1);
*cnt = nv_ro08(bios, extdev + 2);
*len = nv_ro08(bios, extdev + 3);
-
return extdev + *hdr;
}
static u16
-nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_extdev_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 extdev = extdev_table(bios, ver, &hdr, len, &cnt);
@@ -59,8 +57,8 @@ nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
}
static void
-extdev_parse_entry(struct nouveau_bios *bios, u16 offset,
- struct nvbios_extdev_func *entry)
+extdev_parse_entry(struct nvkm_bios *bios, u16 offset,
+ struct nvbios_extdev_func *entry)
{
entry->type = nv_ro08(bios, offset + 0);
entry->addr = nv_ro08(bios, offset + 1);
@@ -68,7 +66,7 @@ extdev_parse_entry(struct nouveau_bios *bios, u16 offset,
}
int
-nvbios_extdev_parse(struct nouveau_bios *bios, int idx,
+nvbios_extdev_parse(struct nvkm_bios *bios, int idx,
struct nvbios_extdev_func *func)
{
u8 ver, len;
@@ -78,12 +76,11 @@ nvbios_extdev_parse(struct nouveau_bios *bios, int idx,
return -EINVAL;
extdev_parse_entry(bios, entry, func);
-
return 0;
}
int
-nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
+nvbios_extdev_find(struct nvkm_bios *bios, enum nvbios_extdev_type type,
struct nvbios_extdev_func *func)
{
u8 ver, len, i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index e419892240f5..8dba70d9d9a9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -21,13 +21,12 @@
*
* Authors: Martin Peres
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/fan.h>
u16
-nvbios_fan_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
u16 fan = 0x0000;
@@ -54,7 +53,7 @@ nvbios_fan_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u16
-nvbios_fan_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+nvbios_fan_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len)
{
u16 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
@@ -64,7 +63,7 @@ nvbios_fan_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
}
u16
-nvbios_fan_parse(struct nouveau_bios *bios, struct nvbios_therm_fan *fan)
+nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
u8 ver, hdr, cnt, len;
@@ -89,5 +88,6 @@ nvbios_fan_parse(struct nouveau_bios *bios, struct nvbios_therm_fan *fan)
fan->pwm_freq = nv_ro32(bios, data + 0x0b) & 0xffffff;
}
+
return data;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
index 172a4f999990..8ce154d88f51 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
@@ -21,14 +21,13 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/xpio.h>
u16
-dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+dcb_gpio_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = 0x0000;
u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
@@ -59,7 +58,7 @@ dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u16
-dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
+dcb_gpio_entry(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
u8 hdr, cnt, xver; /* use gpio version for xpio entry parsing */
u16 gpio;
@@ -71,11 +70,12 @@ dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
if (gpio && ent < cnt)
return gpio + hdr + (ent * *len);
+
return 0x0000;
}
u16
-dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
+dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *gpio)
{
u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
@@ -116,7 +116,7 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
}
u16
-dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+dcb_gpio_match(struct nvkm_bios *bios, int idx, u8 func, u8 line,
u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
{
u8 hdr, cnt, i = 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index 282320ba9264..d1a89b2bd5c1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -21,14 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
-
-#include "subdev/bios.h"
-#include "subdev/bios/dcb.h"
-#include "subdev/bios/i2c.h"
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
u16
-dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+dcb_i2c_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 i2c = 0x0000;
u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
@@ -60,7 +58,7 @@ dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u16
-dcb_i2c_entry(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+dcb_i2c_entry(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 i2c = dcb_i2c_table(bios, ver, &hdr, &cnt, len);
@@ -70,7 +68,7 @@ dcb_i2c_entry(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
}
int
-dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
+dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
{
u8 ver, len;
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 373f9a564ac9..1815540a0e8b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -21,14 +21,13 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include <subdev/bios.h>
#include <subdev/bios/image.h>
#include <subdev/bios/pcir.h>
#include <subdev/bios/npde.h>
static bool
-nvbios_imagen(struct nouveau_bios *bios, struct nvbios_image *image)
+nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
{
struct nvbios_pcirT pcir;
struct nvbios_npdeT npde;
@@ -66,7 +65,7 @@ nvbios_imagen(struct nouveau_bios *bios, struct nvbios_image *image)
}
bool
-nvbios_image(struct nouveau_bios *bios, int idx, struct nvbios_image *image)
+nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
{
memset(image, 0x00, sizeof(*image));
do {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index c6579ef32cd1..f67cdae1e90a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -1,19 +1,41 @@
-#include <core/engine.h>
-#include <core/device.h>
-
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
#include <subdev/bios.h>
-#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/bmp.h>
#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/init.h>
#include <subdev/bios/ramcfg.h>
+
+#include <core/device.h>
#include <subdev/devinit.h>
+#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/vga.h>
-#include <subdev/gpio.h>
#define bioslog(lvl, fmt, args...) do { \
nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset, \
@@ -97,7 +119,7 @@ init_crtc(struct nvbios_init *init)
static u8
init_conn(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
struct nvbios_connE connE;
u8 ver, hdr;
u32 conn;
@@ -119,7 +141,7 @@ init_conn(struct nvbios_init *init)
static inline u32
init_nvreg(struct nvbios_init *init, u32 reg)
{
- struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
+ struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
/* C51 (at least) sometimes has the lower bits set which the VBIOS
* interprets to mean that access needs to go through certain IO
@@ -203,7 +225,7 @@ init_wrport(struct nvbios_init *init, u16 port, u8 value)
static u8
init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
{
- struct nouveau_subdev *subdev = init->subdev;
+ struct nvkm_subdev *subdev = init->subdev;
if (init_exec(init)) {
int head = init->crtc < 0 ? 0 : init->crtc;
return nv_rdvgai(subdev, head, port, index);
@@ -232,10 +254,10 @@ init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
}
}
-static struct nouveau_i2c_port *
+static struct nvkm_i2c_port *
init_i2c(struct nvbios_init *init, int index)
{
- struct nouveau_i2c *i2c = nouveau_i2c(init->bios);
+ struct nvkm_i2c *i2c = nvkm_i2c(init->bios);
if (index == 0xff) {
index = NV_I2C_DEFAULT(0);
@@ -265,7 +287,7 @@ init_i2c(struct nvbios_init *init, int index)
static int
init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
{
- struct nouveau_i2c_port *port = init_i2c(init, index);
+ struct nvkm_i2c_port *port = init_i2c(init, index);
if (port && init_exec(init))
return nv_rdi2cr(port, addr, reg);
return -ENODEV;
@@ -274,7 +296,7 @@ init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
static int
init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
{
- struct nouveau_i2c_port *port = init_i2c(init, index);
+ struct nvkm_i2c_port *port = init_i2c(init, index);
if (port && init_exec(init))
return nv_wri2cr(port, addr, reg, val);
return -ENODEV;
@@ -283,7 +305,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
static u8
init_rdauxr(struct nvbios_init *init, u32 addr)
{
- struct nouveau_i2c_port *port = init_i2c(init, -2);
+ struct nvkm_i2c_port *port = init_i2c(init, -2);
u8 data;
if (port && init_exec(init)) {
@@ -299,7 +321,7 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
static int
init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
{
- struct nouveau_i2c_port *port = init_i2c(init, -2);
+ struct nvkm_i2c_port *port = init_i2c(init, -2);
if (port && init_exec(init)) {
int ret = nv_wraux(port, addr, &data, 1);
if (ret)
@@ -312,7 +334,7 @@ init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
static void
init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
{
- struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
+ struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
if (devinit->pll_set && init_exec(init)) {
int ret = devinit->pll_set(devinit, id, freq);
if (ret)
@@ -325,7 +347,7 @@ init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
*****************************************************************************/
static u16
-init_table(struct nouveau_bios *bios, u16 *len)
+init_table(struct nvkm_bios *bios, u16 *len)
{
struct bit_entry bit_I;
@@ -345,7 +367,7 @@ init_table(struct nouveau_bios *bios, u16 *len)
static u16
init_table_(struct nvbios_init *init, u16 offset, const char *name)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 len, data = init_table(bios, &len);
if (data) {
if (len >= offset + 2) {
@@ -375,7 +397,7 @@ init_table_(struct nvbios_init *init, u16 offset, const char *name)
#define init_xlat_table(b) init_table_((b), 0x10, "xlat table");
static u16
-init_script(struct nouveau_bios *bios, int index)
+init_script(struct nvkm_bios *bios, int index)
{
struct nvbios_init init = { .bios = bios };
u16 bmp_ver = bmp_version(bios), data;
@@ -396,7 +418,7 @@ init_script(struct nouveau_bios *bios, int index)
}
static u16
-init_unknown_script(struct nouveau_bios *bios)
+init_unknown_script(struct nvkm_bios *bios)
{
u16 len, data = init_table(bios, &len);
if (data && len >= 16)
@@ -429,7 +451,7 @@ init_ram_restrict(struct nvbios_init *init)
static u8
init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 table = init_xlat_table(init);
if (table) {
u16 data = nv_ro16(bios, table + (index * 2));
@@ -447,7 +469,7 @@ init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
static bool
init_condition_met(struct nvbios_init *init, u8 cond)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 table = init_condition_table(init);
if (table) {
u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
@@ -463,7 +485,7 @@ init_condition_met(struct nvbios_init *init, u8 cond)
static bool
init_io_condition_met(struct nvbios_init *init, u8 cond)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 table = init_io_condition_table(init);
if (table) {
u16 port = nv_ro16(bios, table + (cond * 5) + 0);
@@ -480,7 +502,7 @@ init_io_condition_met(struct nvbios_init *init, u8 cond)
static bool
init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 table = init_io_flag_condition_table(init);
if (table) {
u16 port = nv_ro16(bios, table + (cond * 9) + 0);
@@ -515,7 +537,6 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
* CR58 for CR57 = 0 to index a table of offsets to the basic
* 0x6808b0 address, and then flip the offset by 8.
*/
-
const int pramdac_offset[13] = {
0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
const u32 pramdac_table[4] = {
@@ -589,7 +610,7 @@ init_done(struct nvbios_init *init)
static void
init_io_restrict_prog(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 port = nv_ro16(bios, init->offset + 1);
u8 index = nv_ro08(bios, init->offset + 3);
u8 mask = nv_ro08(bios, init->offset + 4);
@@ -626,7 +647,7 @@ init_io_restrict_prog(struct nvbios_init *init)
static void
init_repeat(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 count = nv_ro08(bios, init->offset + 1);
u16 repeat = init->repeat;
@@ -652,7 +673,7 @@ init_repeat(struct nvbios_init *init)
static void
init_io_restrict_pll(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 port = nv_ro16(bios, init->offset + 1);
u8 index = nv_ro08(bios, init->offset + 3);
u8 mask = nv_ro08(bios, init->offset + 4);
@@ -708,7 +729,7 @@ init_end_repeat(struct nvbios_init *init)
static void
init_copy(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u8 shift = nv_ro08(bios, init->offset + 5);
u8 smask = nv_ro08(bios, init->offset + 6);
@@ -747,7 +768,7 @@ init_not(struct nvbios_init *init)
static void
init_io_flag_condition(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 cond = nv_ro08(bios, init->offset + 1);
trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
@@ -764,7 +785,7 @@ init_io_flag_condition(struct nvbios_init *init)
static void
init_dp_condition(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
struct nvbios_dpout info;
u8 cond = nv_ro08(bios, init->offset + 1);
u8 unkn = nv_ro08(bios, init->offset + 2);
@@ -812,7 +833,7 @@ init_dp_condition(struct nvbios_init *init)
static void
init_io_mask_or(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 index = nv_ro08(bios, init->offset + 1);
u8 or = init_or(init);
u8 data;
@@ -831,7 +852,7 @@ init_io_mask_or(struct nvbios_init *init)
static void
init_io_or(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 index = nv_ro08(bios, init->offset + 1);
u8 or = init_or(init);
u8 data;
@@ -850,7 +871,7 @@ init_io_or(struct nvbios_init *init)
static void
init_andn_reg(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u32 mask = nv_ro32(bios, init->offset + 5);
@@ -867,7 +888,7 @@ init_andn_reg(struct nvbios_init *init)
static void
init_or_reg(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u32 mask = nv_ro32(bios, init->offset + 5);
@@ -884,7 +905,7 @@ init_or_reg(struct nvbios_init *init)
static void
init_idx_addr_latched(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 creg = nv_ro32(bios, init->offset + 1);
u32 dreg = nv_ro32(bios, init->offset + 5);
u32 mask = nv_ro32(bios, init->offset + 9);
@@ -914,7 +935,7 @@ init_idx_addr_latched(struct nvbios_init *init)
static void
init_io_restrict_pll2(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 port = nv_ro16(bios, init->offset + 1);
u8 index = nv_ro08(bios, init->offset + 3);
u8 mask = nv_ro08(bios, init->offset + 4);
@@ -949,7 +970,7 @@ init_io_restrict_pll2(struct nvbios_init *init)
static void
init_pll2(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u32 freq = nv_ro32(bios, init->offset + 5);
@@ -966,7 +987,7 @@ init_pll2(struct nvbios_init *init)
static void
init_i2c_byte(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 index = nv_ro08(bios, init->offset + 1);
u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
u8 count = nv_ro08(bios, init->offset + 3);
@@ -997,7 +1018,7 @@ init_i2c_byte(struct nvbios_init *init)
static void
init_zm_i2c_byte(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 index = nv_ro08(bios, init->offset + 1);
u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
u8 count = nv_ro08(bios, init->offset + 3);
@@ -1014,7 +1035,6 @@ init_zm_i2c_byte(struct nvbios_init *init)
init_wri2cr(init, index, addr, reg, data);
}
-
}
/**
@@ -1024,7 +1044,7 @@ init_zm_i2c_byte(struct nvbios_init *init)
static void
init_zm_i2c(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 index = nv_ro08(bios, init->offset + 1);
u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
u8 count = nv_ro08(bios, init->offset + 3);
@@ -1040,7 +1060,7 @@ init_zm_i2c(struct nvbios_init *init)
}
if (init_exec(init)) {
- struct nouveau_i2c_port *port = init_i2c(init, index);
+ struct nvkm_i2c_port *port = init_i2c(init, index);
struct i2c_msg msg = {
.addr = addr, .flags = 0, .len = count, .buf = data,
};
@@ -1058,7 +1078,7 @@ init_zm_i2c(struct nvbios_init *init)
static void
init_tmds(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 tmds = nv_ro08(bios, init->offset + 1);
u8 addr = nv_ro08(bios, init->offset + 2);
u8 mask = nv_ro08(bios, init->offset + 3);
@@ -1084,7 +1104,7 @@ init_tmds(struct nvbios_init *init)
static void
init_zm_tmds_group(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 tmds = nv_ro08(bios, init->offset + 1);
u8 count = nv_ro08(bios, init->offset + 2);
u32 reg = init_tmds_reg(init, tmds);
@@ -1111,7 +1131,7 @@ init_zm_tmds_group(struct nvbios_init *init)
static void
init_cr_idx_adr_latch(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 addr0 = nv_ro08(bios, init->offset + 1);
u8 addr1 = nv_ro08(bios, init->offset + 2);
u8 base = nv_ro08(bios, init->offset + 3);
@@ -1141,7 +1161,7 @@ init_cr_idx_adr_latch(struct nvbios_init *init)
static void
init_cr(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 addr = nv_ro08(bios, init->offset + 1);
u8 mask = nv_ro08(bios, init->offset + 2);
u8 data = nv_ro08(bios, init->offset + 3);
@@ -1161,7 +1181,7 @@ init_cr(struct nvbios_init *init)
static void
init_zm_cr(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 addr = nv_ro08(bios, init->offset + 1);
u8 data = nv_ro08(bios, init->offset + 2);
@@ -1178,7 +1198,7 @@ init_zm_cr(struct nvbios_init *init)
static void
init_zm_cr_group(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 count = nv_ro08(bios, init->offset + 1);
trace("ZM_CR_GROUP\n");
@@ -1202,7 +1222,7 @@ init_zm_cr_group(struct nvbios_init *init)
static void
init_condition_time(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 cond = nv_ro08(bios, init->offset + 1);
u8 retry = nv_ro08(bios, init->offset + 2);
u8 wait = min((u16)retry * 50, 100);
@@ -1229,7 +1249,7 @@ init_condition_time(struct nvbios_init *init)
static void
init_ltime(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 msec = nv_ro16(bios, init->offset + 1);
trace("LTIME\t0x%04x\n", msec);
@@ -1246,7 +1266,7 @@ init_ltime(struct nvbios_init *init)
static void
init_zm_reg_sequence(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 base = nv_ro32(bios, init->offset + 1);
u8 count = nv_ro08(bios, init->offset + 5);
@@ -1271,7 +1291,7 @@ init_zm_reg_sequence(struct nvbios_init *init)
static void
init_sub_direct(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 addr = nv_ro16(bios, init->offset + 1);
u16 save;
@@ -1297,7 +1317,7 @@ init_sub_direct(struct nvbios_init *init)
static void
init_jump(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 offset = nv_ro16(bios, init->offset + 1);
trace("JUMP\t0x%04x\n", offset);
@@ -1315,7 +1335,7 @@ init_jump(struct nvbios_init *init)
static void
init_i2c_if(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 index = nv_ro08(bios, init->offset + 1);
u8 addr = nv_ro08(bios, init->offset + 2);
u8 reg = nv_ro08(bios, init->offset + 3);
@@ -1342,7 +1362,7 @@ init_i2c_if(struct nvbios_init *init)
static void
init_copy_nv_reg(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 sreg = nv_ro32(bios, init->offset + 1);
u8 shift = nv_ro08(bios, init->offset + 5);
u32 smask = nv_ro32(bios, init->offset + 6);
@@ -1368,7 +1388,7 @@ init_copy_nv_reg(struct nvbios_init *init)
static void
init_zm_index_io(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 port = nv_ro16(bios, init->offset + 1);
u8 index = nv_ro08(bios, init->offset + 3);
u8 data = nv_ro08(bios, init->offset + 4);
@@ -1386,7 +1406,7 @@ init_zm_index_io(struct nvbios_init *init)
static void
init_compute_mem(struct nvbios_init *init)
{
- struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
+ struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
trace("COMPUTE_MEM\n");
init->offset += 1;
@@ -1404,7 +1424,7 @@ init_compute_mem(struct nvbios_init *init)
static void
init_reset(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u32 data1 = nv_ro32(bios, init->offset + 5);
u32 data2 = nv_ro32(bios, init->offset + 9);
@@ -1440,7 +1460,7 @@ init_configure_mem_clk(struct nvbios_init *init)
static void
init_configure_mem(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 mdata, sdata;
u32 addr, data;
@@ -1490,7 +1510,7 @@ init_configure_mem(struct nvbios_init *init)
static void
init_configure_clk(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 mdata, clock;
trace("CONFIGURE_CLK\n");
@@ -1524,7 +1544,7 @@ init_configure_clk(struct nvbios_init *init)
static void
init_configure_preinit(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 strap;
trace("CONFIGURE_PREINIT\n");
@@ -1550,7 +1570,7 @@ init_configure_preinit(struct nvbios_init *init)
static void
init_io(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 port = nv_ro16(bios, init->offset + 1);
u8 mask = nv_ro16(bios, init->offset + 3);
u8 data = nv_ro16(bios, init->offset + 4);
@@ -1590,7 +1610,7 @@ init_io(struct nvbios_init *init)
static void
init_sub(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 index = nv_ro08(bios, init->offset + 1);
u16 addr, save;
@@ -1617,7 +1637,7 @@ init_sub(struct nvbios_init *init)
static void
init_ram_condition(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 mask = nv_ro08(bios, init->offset + 1);
u8 value = nv_ro08(bios, init->offset + 2);
@@ -1636,7 +1656,7 @@ init_ram_condition(struct nvbios_init *init)
static void
init_nv_reg(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u32 mask = nv_ro32(bios, init->offset + 5);
u32 data = nv_ro32(bios, init->offset + 9);
@@ -1654,7 +1674,7 @@ init_nv_reg(struct nvbios_init *init)
static void
init_macro(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 macro = nv_ro08(bios, init->offset + 1);
u16 table;
@@ -1690,7 +1710,7 @@ init_resume(struct nvbios_init *init)
static void
init_time(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 usec = nv_ro16(bios, init->offset + 1);
trace("TIME\t0x%04x\n", usec);
@@ -1711,7 +1731,7 @@ init_time(struct nvbios_init *init)
static void
init_condition(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 cond = nv_ro08(bios, init->offset + 1);
trace("CONDITION\t0x%02x\n", cond);
@@ -1728,7 +1748,7 @@ init_condition(struct nvbios_init *init)
static void
init_io_condition(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 cond = nv_ro08(bios, init->offset + 1);
trace("IO_CONDITION\t0x%02x\n", cond);
@@ -1745,7 +1765,7 @@ init_io_condition(struct nvbios_init *init)
static void
init_index_io(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u16 port = nv_ro16(bios, init->offset + 1);
u8 index = nv_ro16(bios, init->offset + 3);
u8 mask = nv_ro08(bios, init->offset + 4);
@@ -1767,7 +1787,7 @@ init_index_io(struct nvbios_init *init)
static void
init_pll(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u32 freq = nv_ro16(bios, init->offset + 5) * 10;
@@ -1784,7 +1804,7 @@ init_pll(struct nvbios_init *init)
static void
init_zm_reg(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 addr = nv_ro32(bios, init->offset + 1);
u32 data = nv_ro32(bios, init->offset + 5);
@@ -1804,7 +1824,7 @@ init_zm_reg(struct nvbios_init *init)
static void
init_ram_restrict_pll(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 type = nv_ro08(bios, init->offset + 1);
u8 count = init_ram_restrict_group_count(init);
u8 strap = init_ram_restrict(init);
@@ -1834,7 +1854,7 @@ init_ram_restrict_pll(struct nvbios_init *init)
static void
init_gpio(struct nvbios_init *init)
{
- struct nouveau_gpio *gpio = nouveau_gpio(init->bios);
+ struct nvkm_gpio *gpio = nvkm_gpio(init->bios);
trace("GPIO\n");
init->offset += 1;
@@ -1850,7 +1870,7 @@ init_gpio(struct nvbios_init *init)
static void
init_ram_restrict_zm_reg_group(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 addr = nv_ro32(bios, init->offset + 1);
u8 incr = nv_ro08(bios, init->offset + 5);
u8 num = nv_ro08(bios, init->offset + 6);
@@ -1888,7 +1908,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
static void
init_copy_zm_reg(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 sreg = nv_ro32(bios, init->offset + 1);
u32 dreg = nv_ro32(bios, init->offset + 5);
@@ -1905,7 +1925,7 @@ init_copy_zm_reg(struct nvbios_init *init)
static void
init_zm_reg_group(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 addr = nv_ro32(bios, init->offset + 1);
u8 count = nv_ro08(bios, init->offset + 5);
@@ -1927,7 +1947,7 @@ init_zm_reg_group(struct nvbios_init *init)
static void
init_xlat(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 saddr = nv_ro32(bios, init->offset + 1);
u8 sshift = nv_ro08(bios, init->offset + 5);
u8 smask = nv_ro08(bios, init->offset + 6);
@@ -1955,7 +1975,7 @@ init_xlat(struct nvbios_init *init)
static void
init_zm_mask_add(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 addr = nv_ro32(bios, init->offset + 1);
u32 mask = nv_ro32(bios, init->offset + 5);
u32 add = nv_ro32(bios, init->offset + 9);
@@ -1976,7 +1996,7 @@ init_zm_mask_add(struct nvbios_init *init)
static void
init_auxch(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 addr = nv_ro32(bios, init->offset + 1);
u8 count = nv_ro08(bios, init->offset + 5);
@@ -2000,7 +2020,7 @@ init_auxch(struct nvbios_init *init)
static void
init_zm_auxch(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u32 addr = nv_ro32(bios, init->offset + 1);
u8 count = nv_ro08(bios, init->offset + 5);
@@ -2022,14 +2042,14 @@ init_zm_auxch(struct nvbios_init *init)
static void
init_i2c_long_if(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
+ struct nvkm_bios *bios = init->bios;
u8 index = nv_ro08(bios, init->offset + 1);
u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
u8 reglo = nv_ro08(bios, init->offset + 3);
u8 reghi = nv_ro08(bios, init->offset + 4);
u8 mask = nv_ro08(bios, init->offset + 5);
u8 data = nv_ro08(bios, init->offset + 6);
- struct nouveau_i2c_port *port;
+ struct nvkm_i2c_port *port;
trace("I2C_LONG_IF\t"
"I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
@@ -2061,8 +2081,8 @@ init_i2c_long_if(struct nvbios_init *init)
static void
init_gpio_ne(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
- struct nouveau_gpio *gpio = nouveau_gpio(bios);
+ struct nvkm_bios *bios = init->bios;
+ struct nvkm_gpio *gpio = nvkm_gpio(bios);
struct dcb_gpio_func func;
u8 count = nv_ro08(bios, init->offset + 1);
u8 idx = 0, ver, len;
@@ -2185,9 +2205,9 @@ nvbios_exec(struct nvbios_init *init)
}
int
-nvbios_init(struct nouveau_subdev *subdev, bool execute)
+nvbios_init(struct nvkm_subdev *subdev, bool execute)
{
- struct nouveau_bios *bios = nouveau_bios(subdev);
+ struct nvkm_bios *bios = nvkm_bios(subdev);
int ret = 0;
int i = -1;
u16 data;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index 2610b11a99b3..c4087df4f85e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/mxm.h>
u16
-mxm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr)
+mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr)
{
struct bit_entry x;
@@ -51,28 +50,28 @@ mxm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr)
*
* MXM v3.x VBIOS are nicer and provide pointers to these tables.
*/
-static u8 nv84_sor_map[16] = {
+static u8 g84_sor_map[16] = {
0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-static u8 nv92_sor_map[16] = {
+static u8 g92_sor_map[16] = {
0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-static u8 nv94_sor_map[16] = {
+static u8 g94_sor_map[16] = {
0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
};
-static u8 nv98_sor_map[16] = {
+static u8 g98_sor_map[16] = {
0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
u8
-mxm_sor_map(struct nouveau_bios *bios, u8 conn)
+mxm_sor_map(struct nvkm_bios *bios, u8 conn)
{
u8 ver, hdr;
u16 mxm = mxm_table(bios, &ver, &hdr);
@@ -95,20 +94,20 @@ mxm_sor_map(struct nouveau_bios *bios, u8 conn)
}
if (bios->version.chip == 0x84 || bios->version.chip == 0x86)
- return nv84_sor_map[conn];
+ return g84_sor_map[conn];
if (bios->version.chip == 0x92)
- return nv92_sor_map[conn];
+ return g92_sor_map[conn];
if (bios->version.chip == 0x94 || bios->version.chip == 0x96)
- return nv94_sor_map[conn];
+ return g94_sor_map[conn];
if (bios->version.chip == 0x98)
- return nv98_sor_map[conn];
+ return g98_sor_map[conn];
nv_warn(bios, "missing sor map\n");
return 0x00;
}
u8
-mxm_ddc_map(struct nouveau_bios *bios, u8 port)
+mxm_ddc_map(struct nvkm_bios *bios, u8 port)
{
u8 ver, hdr;
u16 mxm = mxm_table(bios, &ver, &hdr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
index d694716a166c..fd7dd718b2bf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include <subdev/bios.h>
#include <subdev/bios/npde.h>
#include <subdev/bios/pcir.h>
u32
-nvbios_npdeTe(struct nouveau_bios *bios, u32 base)
+nvbios_npdeTe(struct nvkm_bios *bios, u32 base)
{
struct nvbios_pcirT pcir;
u8 ver; u16 hdr;
@@ -47,7 +46,7 @@ nvbios_npdeTe(struct nouveau_bios *bios, u32 base)
}
u32
-nvbios_npdeTp(struct nouveau_bios *bios, u32 base, struct nvbios_npdeT *info)
+nvbios_npdeTp(struct nvkm_bios *bios, u32 base, struct nvbios_npdeT *info)
{
u32 data = nvbios_npdeTe(bios, base);
memset(info, 0x00, sizeof(*info));
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
index 91dae26bc50f..df5978753ae8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
@@ -21,12 +21,11 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include <subdev/bios.h>
#include <subdev/bios/pcir.h>
u32
-nvbios_pcirTe(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr)
+nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr)
{
u32 data = nv_ro16(bios, base + 0x18);
if (data) {
@@ -49,7 +48,7 @@ nvbios_pcirTe(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr)
}
u32
-nvbios_pcirTp(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr,
+nvbios_pcirTp(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr,
struct nvbios_pcirT *info)
{
u32 data = nvbios_pcirTe(bios, base, ver, hdr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 675e221680aa..382ae9cdbf58 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -21,13 +21,14 @@
*
* Authors: Martin Peres
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/perf.h>
+#include <core/device.h>
+
u16
-nvbios_perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
@@ -76,7 +77,7 @@ nvbios_perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
}
u16
-nvbios_perf_entry(struct nouveau_bios *bios, int idx,
+nvbios_perf_entry(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
@@ -92,9 +93,8 @@ nvbios_perf_entry(struct nouveau_bios *bios, int idx,
}
u16
-nvbios_perfEp(struct nouveau_bios *bios, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_perfE *info)
+nvbios_perfEp(struct nvkm_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *info)
{
u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
@@ -155,7 +155,7 @@ nvbios_perfEp(struct nouveau_bios *bios, int idx,
}
u32
-nvbios_perfSe(struct nouveau_bios *bios, u32 perfE, int idx,
+nvbios_perfSe(struct nvkm_bios *bios, u32 perfE, int idx,
u8 *ver, u8 *hdr, u8 cnt, u8 len)
{
u32 data = 0x00000000;
@@ -167,7 +167,7 @@ nvbios_perfSe(struct nouveau_bios *bios, u32 perfE, int idx,
}
u32
-nvbios_perfSp(struct nouveau_bios *bios, u32 perfE, int idx,
+nvbios_perfSp(struct nvkm_bios *bios, u32 perfE, int idx,
u8 *ver, u8 *hdr, u8 cnt, u8 len,
struct nvbios_perfS *info)
{
@@ -184,7 +184,7 @@ nvbios_perfSp(struct nouveau_bios *bios, u32 perfE, int idx,
}
int
-nvbios_perf_fan_parse(struct nouveau_bios *bios,
+nvbios_perf_fan_parse(struct nvkm_bios *bios,
struct nvbios_perf_fan *fan)
{
u8 ver, hdr, cnt, len, snr, ssz;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index 1f76de597d4b..ebd402e19dbf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -21,12 +21,13 @@
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
-#include <subdev/vga.h>
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/pll.h>
+#include <subdev/vga.h>
+
+#include <core/device.h>
struct pll_mapping {
u8 type;
@@ -66,7 +67,7 @@ nv50_pll_mapping[] = {
};
static struct pll_mapping
-nv84_pll_mapping[] = {
+g84_pll_mapping[] = {
{ PLL_CORE , 0x004028 },
{ PLL_SHADER, 0x004020 },
{ PLL_MEMORY, 0x004008 },
@@ -78,7 +79,7 @@ nv84_pll_mapping[] = {
};
static u16
-pll_limits_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_C;
@@ -109,7 +110,7 @@ pll_limits_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
static struct pll_mapping *
-pll_map(struct nouveau_bios *bios)
+pll_map(struct nvkm_bios *bios)
{
switch (nv_device(bios)->card_type) {
case NV_04:
@@ -128,14 +129,14 @@ pll_map(struct nouveau_bios *bios)
if (nv_device(bios)->chipset < 0xa3 ||
nv_device(bios)->chipset == 0xaa ||
nv_device(bios)->chipset == 0xac)
- return nv84_pll_mapping;
+ return g84_pll_mapping;
default:
return NULL;
}
}
static u16
-pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
+pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
@@ -177,7 +178,7 @@ pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
}
static u16
-pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
+pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
@@ -219,7 +220,7 @@ pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
}
int
-nvbios_pll_parse(struct nouveau_bios *bios, u32 type, struct nvbios_pll *info)
+nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
{
u8 ver, len;
u32 reg = type;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index 66c56ba07d1b..20c5ce0cd573 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -21,14 +21,13 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/image.h>
#include <subdev/bios/pmu.h>
static u32
-weirdo_pointer(struct nouveau_bios *bios, u32 data)
+weirdo_pointer(struct nvkm_bios *bios, u32 data)
{
struct nvbios_image image;
int idx = 0;
@@ -43,7 +42,7 @@ weirdo_pointer(struct nouveau_bios *bios, u32 data)
}
u32
-nvbios_pmuTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_p;
u32 data = 0;
@@ -63,7 +62,7 @@ nvbios_pmuTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u32
-nvbios_pmuTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+nvbios_pmuTp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_pmuT *info)
{
u32 data = nvbios_pmuTe(bios, ver, hdr, cnt, len);
@@ -76,7 +75,7 @@ nvbios_pmuTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
}
u32
-nvbios_pmuEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+nvbios_pmuEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len;
u32 data = nvbios_pmuTe(bios, ver, hdr, &cnt, &len);
@@ -89,7 +88,7 @@ nvbios_pmuEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
}
u32
-nvbios_pmuEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+nvbios_pmuEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_pmuE *info)
{
u32 data = nvbios_pmuEe(bios, idx, ver, hdr);
@@ -104,7 +103,7 @@ nvbios_pmuEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
}
bool
-nvbios_pmuRm(struct nouveau_bios *bios, u8 type, struct nvbios_pmuR *info)
+nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
{
struct nvbios_pmuE pmuE;
u8 ver, hdr, idx = 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 187d225bd1e9..95e4fa1531d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,18 +1,17 @@
#ifndef __NVKM_BIOS_PRIV_H__
#define __NVKM_BIOS_PRIV_H__
-
#include <subdev/bios.h>
struct nvbios_source {
const char *name;
- void *(*init)(struct nouveau_bios *, const char *);
+ void *(*init)(struct nvkm_bios *, const char *);
void (*fini)(void *);
- u32 (*read)(void *, u32 offset, u32 length, struct nouveau_bios *);
+ u32 (*read)(void *, u32 offset, u32 length, struct nvkm_bios *);
bool rw;
};
-int nvbios_extend(struct nouveau_bios *, u32 length);
-int nvbios_shadow(struct nouveau_bios *);
+int nvbios_extend(struct nvkm_bios *, u32 length);
+int nvbios_shadow(struct nvkm_bios *);
extern const struct nvbios_source nvbios_rom;
extern const struct nvbios_source nvbios_ramin;
@@ -21,5 +20,4 @@ extern const struct nvbios_source nvbios_acpi_slow;
extern const struct nvbios_source nvbios_pcirom;
extern const struct nvbios_source nvbios_platform;
extern const struct nvbios_source nvbios_of;
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
index 1623c8dfe797..a17b221119b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/ramcfg.h>
#include <subdev/bios/M0203.h>
static u8
-nvbios_ramcfg_strap(struct nouveau_subdev *subdev)
+nvbios_ramcfg_strap(struct nvkm_subdev *subdev)
{
return (nv_rd32(subdev, 0x101000) & 0x0000003c) >> 2;
}
u8
-nvbios_ramcfg_count(struct nouveau_bios *bios)
+nvbios_ramcfg_count(struct nvkm_bios *bios)
{
struct bit_entry bit_M;
@@ -49,9 +48,9 @@ nvbios_ramcfg_count(struct nouveau_bios *bios)
}
u8
-nvbios_ramcfg_index(struct nouveau_subdev *subdev)
+nvbios_ramcfg_index(struct nvkm_subdev *subdev)
{
- struct nouveau_bios *bios = nouveau_bios(subdev);
+ struct nvkm_bios *bios = nvkm_bios(subdev);
u8 strap = nvbios_ramcfg_strap(subdev);
u32 xlat = 0x00000000;
struct bit_entry bit_M;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index c5685228c322..8b17bb4b220c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -21,14 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
-#include <subdev/bios/ramcfg.h>
#include <subdev/bios/rammap.h>
u32
-nvbios_rammapTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
@@ -59,7 +57,7 @@ nvbios_rammapTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
}
u32
-nvbios_rammapEe(struct nouveau_bios *bios, int idx,
+nvbios_rammapEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
@@ -75,9 +73,8 @@ nvbios_rammapEe(struct nouveau_bios *bios, int idx,
}
u32
-nvbios_rammapEp(struct nouveau_bios *bios, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ramcfg *p)
+nvbios_rammapEp(struct nvkm_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
{
u32 data = nvbios_rammapEe(bios, idx, ver, hdr, cnt, len), temp;
memset(p, 0x00, sizeof(*p));
@@ -118,9 +115,8 @@ nvbios_rammapEp(struct nouveau_bios *bios, int idx,
}
u32
-nvbios_rammapEm(struct nouveau_bios *bios, u16 mhz,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ramcfg *info)
+nvbios_rammapEm(struct nvkm_bios *bios, u16 mhz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *info)
{
int idx = 0;
u32 data;
@@ -132,9 +128,8 @@ nvbios_rammapEm(struct nouveau_bios *bios, u16 mhz,
}
u32
-nvbios_rammapSe(struct nouveau_bios *bios, u32 data,
- u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
- u8 *ver, u8 *hdr)
+nvbios_rammapSe(struct nvkm_bios *bios, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, u8 *ver, u8 *hdr)
{
if (idx < ecnt) {
data = data + ehdr + (idx * elen);
@@ -146,7 +141,7 @@ nvbios_rammapSe(struct nouveau_bios *bios, u32 data,
}
u32
-nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
+nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
{
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index bb9e0018d936..8c2b7cba5cff 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -21,13 +21,15 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include "priv.h"
+
+#include <core/device.h>
#include <core/option.h>
+#include <subdev/bios.h>
#include <subdev/bios/image.h>
struct shadow {
- struct nouveau_oclass base;
+ struct nvkm_oclass base;
u32 skip;
const struct nvbios_source *func;
void *data;
@@ -36,7 +38,7 @@ struct shadow {
};
static bool
-shadow_fetch(struct nouveau_bios *bios, u32 upto)
+shadow_fetch(struct nvkm_bios *bios, u32 upto)
{
struct shadow *mthd = (void *)nv_object(bios)->oclass;
const u32 limit = (upto + 3) & ~3;
@@ -50,36 +52,36 @@ shadow_fetch(struct nouveau_bios *bios, u32 upto)
}
static u8
-shadow_rd08(struct nouveau_object *object, u64 addr)
+shadow_rd08(struct nvkm_object *object, u64 addr)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
if (shadow_fetch(bios, addr + 1))
return bios->data[addr];
return 0x00;
}
static u16
-shadow_rd16(struct nouveau_object *object, u64 addr)
+shadow_rd16(struct nvkm_object *object, u64 addr)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
if (shadow_fetch(bios, addr + 2))
return get_unaligned_le16(&bios->data[addr]);
return 0x0000;
}
static u32
-shadow_rd32(struct nouveau_object *object, u64 addr)
+shadow_rd32(struct nvkm_object *object, u64 addr)
{
- struct nouveau_bios *bios = (void *)object;
+ struct nvkm_bios *bios = (void *)object;
if (shadow_fetch(bios, addr + 4))
return get_unaligned_le32(&bios->data[addr]);
return 0x00000000;
}
-static struct nouveau_oclass
+static struct nvkm_oclass
shadow_class = {
.handle = NV_SUBDEV(VBIOS, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.rd08 = shadow_rd08,
.rd16 = shadow_rd16,
.rd32 = shadow_rd32,
@@ -87,7 +89,7 @@ shadow_class = {
};
static int
-shadow_image(struct nouveau_bios *bios, int idx, struct shadow *mthd)
+shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd)
{
struct nvbios_image image;
int score = 1;
@@ -126,9 +128,9 @@ shadow_image(struct nouveau_bios *bios, int idx, struct shadow *mthd)
}
static int
-shadow_score(struct nouveau_bios *bios, struct shadow *mthd)
+shadow_score(struct nvkm_bios *bios, struct shadow *mthd)
{
- struct nouveau_oclass *oclass = nv_object(bios)->oclass;
+ struct nvkm_oclass *oclass = nv_object(bios)->oclass;
int score;
nv_object(bios)->oclass = &mthd->base;
score = shadow_image(bios, 0, mthd);
@@ -138,7 +140,7 @@ shadow_score(struct nouveau_bios *bios, struct shadow *mthd)
}
static int
-shadow_method(struct nouveau_bios *bios, struct shadow *mthd, const char *name)
+shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
{
const struct nvbios_source *func = mthd->func;
if (func->name) {
@@ -163,7 +165,7 @@ shadow_method(struct nouveau_bios *bios, struct shadow *mthd, const char *name)
}
static u32
-shadow_fw_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+shadow_fw_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
const struct firmware *fw = data;
if (offset + length <= fw->size) {
@@ -174,7 +176,7 @@ shadow_fw_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
}
static void *
-shadow_fw_init(struct nouveau_bios *bios, const char *name)
+shadow_fw_init(struct nvkm_bios *bios, const char *name)
{
struct device *dev = &nv_device(bios)->pdev->dev;
const struct firmware *fw;
@@ -194,7 +196,7 @@ shadow_fw = {
};
int
-nvbios_shadow(struct nouveau_bios *bios)
+nvbios_shadow(struct nvkm_bios *bios)
{
struct shadow mthds[] = {
{ shadow_class, 0, &nvbios_of },
@@ -211,7 +213,7 @@ nvbios_shadow(struct nouveau_bios *bios)
int optlen;
/* handle user-specified bios source */
- optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+ optarg = nvkm_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
if (source) {
/* try to match one of the built-in methods */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index bc130c12ec06..1fbd93bbb561 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -20,9 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "priv.h"
+#include <core/device.h>
+
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
@@ -45,7 +46,7 @@ nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
* on some systems, such as Lenovo W530.
*/
static u32
-acpi_read_fast(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
u32 limit = (offset + length + 0xfff) & ~0xfff;
u32 start = offset & ~0x00000fff;
@@ -66,7 +67,7 @@ acpi_read_fast(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
* function.
*/
static u32
-acpi_read_slow(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
u32 limit = (offset + length + 0xfff) & ~0xfff;
u32 start = offset & ~0xfff;
@@ -87,7 +88,7 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
}
static void *
-acpi_init(struct nouveau_bios *bios, const char *name)
+acpi_init(struct nvkm_bios *bios, const char *name)
{
if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 3abe487a6025..4c19a7dba803 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -20,9 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "priv.h"
+#include <core/device.h>
+
#if defined(__powerpc__)
struct priv {
const void __iomem *data;
@@ -30,7 +31,7 @@ struct priv {
};
static u32
-of_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
struct priv *priv = data;
if (offset + length <= priv->size) {
@@ -41,7 +42,7 @@ of_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
}
static void *
-of_init(struct nouveau_bios *bios, const char *name)
+of_init(struct nvkm_bios *bios, const char *name)
{
struct pci_dev *pdev = nv_device(bios)->pdev;
struct device_node *dn;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 1d0389c0abef..1b045483dc87 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -20,9 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "priv.h"
+#include <core/device.h>
+
struct priv {
struct pci_dev *pdev;
void __iomem *rom;
@@ -30,7 +31,7 @@ struct priv {
};
static u32
-pcirom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+pcirom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
struct priv *priv = data;
if (offset + length <= priv->size) {
@@ -50,7 +51,7 @@ pcirom_fini(void *data)
}
static void *
-pcirom_init(struct nouveau_bios *bios, const char *name)
+pcirom_init(struct nvkm_bios *bios, const char *name)
{
struct pci_dev *pdev = nv_device(bios)->pdev;
struct priv *priv = NULL;
@@ -82,7 +83,7 @@ nvbios_pcirom = {
};
static void *
-platform_init(struct nouveau_bios *bios, const char *name)
+platform_init(struct nvkm_bios *bios, const char *name)
{
struct pci_dev *pdev = nv_device(bios)->pdev;
struct priv *priv;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index a7a890fad1e5..abe8ae4d3a9f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -20,16 +20,17 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "priv.h"
+#include <core/device.h>
+
struct priv {
- struct nouveau_bios *bios;
+ struct nvkm_bios *bios;
u32 bar0;
};
static u32
-pramin_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+pramin_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
u32 i;
if (offset + length <= 0x00100000) {
@@ -51,7 +52,7 @@ pramin_fini(void *data)
}
static void *
-pramin_init(struct nouveau_bios *bios, const char *name)
+pramin_init(struct nvkm_bios *bios, const char *name)
{
struct priv *priv = NULL;
u64 addr = 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
index b7992bc3ffa5..6ec3b237925e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
@@ -20,11 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "priv.h"
+#include <core/device.h>
+
static u32
-prom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
u32 i;
if (offset + length <= 0x00100000) {
@@ -38,7 +39,7 @@ prom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
static void
prom_fini(void *data)
{
- struct nouveau_bios *bios = data;
+ struct nvkm_bios *bios = data;
if (nv_device(bios)->card_type < NV_50)
nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
else
@@ -46,7 +47,7 @@ prom_fini(void *data)
}
static void *
-prom_init(struct nouveau_bios *bios, const char *name)
+prom_init(struct nvkm_bios *bios, const char *name)
{
if (nv_device(bios)->card_type < NV_50) {
if (nv_device(bios)->card_type == NV_40 &&
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index d15854094078..249ff6d583df 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -21,13 +21,14 @@
*
* Authors: Martin Peres
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/therm.h>
+#include <core/device.h>
+
static u16
-therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
+therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
{
struct bit_entry bit_P;
u16 therm = 0;
@@ -51,12 +52,11 @@ therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
*hdr = nv_ro08(bios, therm + 1);
*len = nv_ro08(bios, therm + 2);
*cnt = nv_ro08(bios, therm + 3);
-
return therm + nv_ro08(bios, therm + 1);
}
static u16
-nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_therm_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 therm = therm_table(bios, ver, &hdr, len, &cnt);
@@ -66,7 +66,7 @@ nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
}
int
-nvbios_therm_sensor_parse(struct nouveau_bios *bios,
+nvbios_therm_sensor_parse(struct nvkm_bios *bios,
enum nvbios_therm_domain domain,
struct nvbios_therm_sensor *sensor)
{
@@ -152,10 +152,9 @@ nvbios_therm_sensor_parse(struct nouveau_bios *bios,
}
int
-nvbios_therm_fan_parse(struct nouveau_bios *bios,
- struct nvbios_therm_fan *fan)
+nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
- struct nouveau_therm_trip_point *cur_trip = NULL;
+ struct nvbios_therm_trip_point *cur_trip = NULL;
u8 ver, len, i;
u16 entry;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 8521eca1ed9c..763fd29a58f2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -21,14 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
-#include <subdev/bios/ramcfg.h>
#include <subdev/bios/timing.h>
u16
-nvbios_timingTe(struct nouveau_bios *bios,
+nvbios_timingTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
@@ -68,7 +66,7 @@ nvbios_timingTe(struct nouveau_bios *bios,
}
u16
-nvbios_timingEe(struct nouveau_bios *bios, int idx,
+nvbios_timingEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
@@ -84,9 +82,8 @@ nvbios_timingEe(struct nouveau_bios *bios, int idx,
}
u16
-nvbios_timingEp(struct nouveau_bios *bios, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_ramcfg *p)
+nvbios_timingEp(struct nvkm_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
{
u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
p->timing_ver = *ver;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index f343a1b060e8..e95b69faa82e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -21,13 +21,12 @@
*
* Authors: Martin Peres
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/vmap.h>
u16
-nvbios_vmap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
u16 vmap = 0x0000;
@@ -55,7 +54,7 @@ nvbios_vmap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u16
-nvbios_vmap_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *info)
{
u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
@@ -69,7 +68,7 @@ nvbios_vmap_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
}
u16
-nvbios_vmap_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_vmap_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
@@ -81,7 +80,7 @@ nvbios_vmap_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
}
u16
-nvbios_vmap_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_vmap_entry *info)
{
u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index bb590de4ecb2..8454ab7c4a3d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -21,13 +21,12 @@
*
* Authors: Martin Peres
*/
-
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/volt.h>
u16
-nvbios_volt_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
u16 volt = 0x0000;
@@ -67,7 +66,7 @@ nvbios_volt_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u16
-nvbios_volt_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_volt *info)
{
u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
@@ -102,7 +101,7 @@ nvbios_volt_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
}
u16
-nvbios_volt_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_volt_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
@@ -114,7 +113,7 @@ nvbios_volt_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
}
u16
-nvbios_volt_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_volt_entry *info)
{
u16 volt = nvbios_volt_entry(bios, idx, ver, len);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
index e9b8e5d30a7a..63a5e1b5cb3c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/xpio.h>
static u16
-dcb_xpiod_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+dcb_xpiod_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
if (data && *ver >= 0x40 && *hdr >= 0x06) {
@@ -44,7 +43,7 @@ dcb_xpiod_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
u16
-dcb_xpio_table(struct nouveau_bios *bios, u8 idx,
+dcb_xpio_table(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
@@ -62,9 +61,8 @@ dcb_xpio_table(struct nouveau_bios *bios, u8 idx,
}
u16
-dcb_xpio_parse(struct nouveau_bios *bios, u8 idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_xpio *info)
+dcb_xpio_parse(struct nvkm_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *info)
{
u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
if (data && *len >= 6) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
new file mode 100644
index 000000000000..83d80b13f149
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
@@ -0,0 +1,6 @@
+nvkm-y += nvkm/subdev/bus/hwsq.o
+nvkm-y += nvkm/subdev/bus/nv04.o
+nvkm-y += nvkm/subdev/bus/nv31.o
+nvkm-y += nvkm/subdev/bus/nv50.o
+nvkm-y += nvkm/subdev/bus/g94.o
+nvkm-y += nvkm/subdev/bus/gf100.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index d3659055fa4b..cbe699e82593 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -22,13 +22,12 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
+#include "nv04.h"
#include <subdev/timer.h>
-#include "nv04.h"
-
static int
-nv94_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
+g94_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
{
struct nv50_bus_priv *priv = (void *)pbus;
int i;
@@ -44,16 +43,16 @@ nv94_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
}
-struct nouveau_oclass *
-nv94_bus_oclass = &(struct nv04_bus_impl) {
+struct nvkm_oclass *
+g94_bus_oclass = &(struct nv04_bus_impl) {
.base.handle = NV_SUBDEV(BUS, 0x94),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_bus_ctor,
- .dtor = _nouveau_bus_dtor,
+ .dtor = _nvkm_bus_dtor,
.init = nv50_bus_init,
- .fini = _nouveau_bus_fini,
+ .fini = _nvkm_bus_fini,
},
.intr = nv50_bus_intr,
- .hwsq_exec = nv94_bus_hwsq_exec,
+ .hwsq_exec = g94_bus_hwsq_exec,
.hwsq_size = 128,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index 73839d7151a7..ebc63ba968d4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -22,13 +22,12 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
-
#include "nv04.h"
static void
-nvc0_bus_intr(struct nouveau_subdev *subdev)
+gf100_bus_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_bus *pbus = nouveau_bus(subdev);
+ struct nvkm_bus *pbus = nvkm_bus(subdev);
u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
if (stat & 0x0000000e) {
@@ -54,12 +53,12 @@ nvc0_bus_intr(struct nouveau_subdev *subdev)
}
static int
-nvc0_bus_init(struct nouveau_object *object)
+gf100_bus_init(struct nvkm_object *object)
{
struct nv04_bus_priv *priv = (void *)object;
int ret;
- ret = nouveau_bus_init(&priv->base);
+ ret = nvkm_bus_init(&priv->base);
if (ret)
return ret;
@@ -68,14 +67,14 @@ nvc0_bus_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
-nvc0_bus_oclass = &(struct nv04_bus_impl) {
+struct nvkm_oclass *
+gf100_bus_oclass = &(struct nv04_bus_impl) {
.base.handle = NV_SUBDEV(BUS, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_bus_ctor,
- .dtor = _nouveau_bus_dtor,
- .init = nvc0_bus_init,
- .fini = _nouveau_bus_fini,
+ .dtor = _nvkm_bus_dtor,
+ .init = gf100_bus_init,
+ .fini = _nvkm_bus_fini,
},
- .intr = nvc0_bus_intr,
+ .intr = gf100_bus_intr,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index f757470e2284..b8853bf16b23 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -21,12 +21,10 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include <subdev/timer.h>
#include <subdev/bus.h>
-struct nouveau_hwsq {
- struct nouveau_bus *pbus;
+struct nvkm_hwsq {
+ struct nvkm_bus *pbus;
u32 addr;
u32 data;
struct {
@@ -36,16 +34,16 @@ struct nouveau_hwsq {
};
static void
-hwsq_cmd(struct nouveau_hwsq *hwsq, int size, u8 data[])
+hwsq_cmd(struct nvkm_hwsq *hwsq, int size, u8 data[])
{
memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
hwsq->c.size += size;
}
int
-nouveau_hwsq_init(struct nouveau_bus *pbus, struct nouveau_hwsq **phwsq)
+nvkm_hwsq_init(struct nvkm_bus *pbus, struct nvkm_hwsq **phwsq)
{
- struct nouveau_hwsq *hwsq;
+ struct nvkm_hwsq *hwsq;
hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
if (hwsq) {
@@ -60,12 +58,12 @@ nouveau_hwsq_init(struct nouveau_bus *pbus, struct nouveau_hwsq **phwsq)
}
int
-nouveau_hwsq_fini(struct nouveau_hwsq **phwsq, bool exec)
+nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
{
- struct nouveau_hwsq *hwsq = *phwsq;
+ struct nvkm_hwsq *hwsq = *phwsq;
int ret = 0, i;
if (hwsq) {
- struct nouveau_bus *pbus = hwsq->pbus;
+ struct nvkm_bus *pbus = hwsq->pbus;
hwsq->c.size = (hwsq->c.size + 4) / 4;
if (hwsq->c.size <= pbus->hwsq_size) {
if (exec)
@@ -88,7 +86,7 @@ nouveau_hwsq_fini(struct nouveau_hwsq **phwsq, bool exec)
}
void
-nouveau_hwsq_wr32(struct nouveau_hwsq *hwsq, u32 addr, u32 data)
+nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
{
nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
@@ -113,7 +111,7 @@ nouveau_hwsq_wr32(struct nouveau_hwsq *hwsq, u32 addr, u32 data)
}
void
-nouveau_hwsq_setf(struct nouveau_hwsq *hwsq, u8 flag, int data)
+nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
{
nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
flag += 0x80;
@@ -125,14 +123,14 @@ nouveau_hwsq_setf(struct nouveau_hwsq *hwsq, u8 flag, int data)
}
void
-nouveau_hwsq_wait(struct nouveau_hwsq *hwsq, u8 flag, u8 data)
+nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
{
nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
}
void
-nouveau_hwsq_nsec(struct nouveau_hwsq *hwsq, u32 nsec)
+nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
{
u8 shift = 0, usec = nsec / 1000;
while (usec & ~3) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index 12176f9c1bc6..3394a5ea8a9f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -1,11 +1,10 @@
#ifndef __NVKM_BUS_HWSQ_H__
#define __NVKM_BUS_HWSQ_H__
-
#include <subdev/bus.h>
struct hwsq {
- struct nouveau_subdev *subdev;
- struct nouveau_hwsq *hwsq;
+ struct nvkm_subdev *subdev;
+ struct nvkm_hwsq *hwsq;
int sequence;
};
@@ -34,12 +33,12 @@ hwsq_reg(u32 addr)
}
static inline int
-hwsq_init(struct hwsq *ram, struct nouveau_subdev *subdev)
+hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
{
- struct nouveau_bus *pbus = nouveau_bus(subdev);
+ struct nvkm_bus *pbus = nvkm_bus(subdev);
int ret;
- ret = nouveau_hwsq_init(pbus, &ram->hwsq);
+ ret = nvkm_hwsq_init(pbus, &ram->hwsq);
if (ret)
return ret;
@@ -53,7 +52,7 @@ hwsq_exec(struct hwsq *ram, bool exec)
{
int ret = 0;
if (ram->subdev) {
- ret = nouveau_hwsq_fini(&ram->hwsq, exec);
+ ret = nvkm_hwsq_fini(&ram->hwsq, exec);
ram->subdev = NULL;
}
return ret;
@@ -73,8 +72,8 @@ hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
reg->sequence = ram->sequence;
reg->data = data;
if (reg->addr[0] != reg->addr[1])
- nouveau_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
- nouveau_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
+ nvkm_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
+ nvkm_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
}
static inline void
@@ -95,19 +94,18 @@ hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
static inline void
hwsq_setf(struct hwsq *ram, u8 flag, int data)
{
- nouveau_hwsq_setf(ram->hwsq, flag, data);
+ nvkm_hwsq_setf(ram->hwsq, flag, data);
}
static inline void
hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
{
- nouveau_hwsq_wait(ram->hwsq, flag, data);
+ nvkm_hwsq_wait(ram->hwsq, flag, data);
}
static inline void
hwsq_nsec(struct hwsq *ram, u32 nsec)
{
- nouveau_hwsq_nsec(ram->hwsq, nsec);
+ nvkm_hwsq_nsec(ram->hwsq, nsec);
}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 23921b5351db..19c8e50eeff7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -22,13 +22,12 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
-
#include "nv04.h"
static void
-nv04_bus_intr(struct nouveau_subdev *subdev)
+nv04_bus_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_bus *pbus = nouveau_bus(subdev);
+ struct nvkm_bus *pbus = nvkm_bus(subdev);
u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
if (stat & 0x00000001) {
@@ -38,7 +37,7 @@ nv04_bus_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000110) {
- subdev = nouveau_subdev(subdev, NVDEV_SUBDEV_GPIO);
+ subdev = nvkm_subdev(subdev, NVDEV_SUBDEV_GPIO);
if (subdev && subdev->intr)
subdev->intr(subdev);
stat &= ~0x00000110;
@@ -52,26 +51,26 @@ nv04_bus_intr(struct nouveau_subdev *subdev)
}
static int
-nv04_bus_init(struct nouveau_object *object)
+nv04_bus_init(struct nvkm_object *object)
{
struct nv04_bus_priv *priv = (void *)object;
nv_wr32(priv, 0x001100, 0xffffffff);
nv_wr32(priv, 0x001140, 0x00000111);
- return nouveau_bus_init(&priv->base);
+ return nvkm_bus_init(&priv->base);
}
int
-nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_bus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_bus_impl *impl = (void *)oclass;
struct nv04_bus_priv *priv;
int ret;
- ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ ret = nvkm_bus_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -82,14 +81,14 @@ nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv04_bus_oclass = &(struct nv04_bus_impl) {
.base.handle = NV_SUBDEV(BUS, 0x04),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_bus_ctor,
- .dtor = _nouveau_bus_dtor,
+ .dtor = _nvkm_bus_dtor,
.init = nv04_bus_init,
- .fini = _nouveau_bus_fini,
+ .fini = _nvkm_bus_fini,
},
.intr = nv04_bus_intr,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
new file mode 100644
index 000000000000..3ddc8f91b1e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_BUS_NV04_H__
+#define __NVKM_BUS_NV04_H__
+#include <subdev/bus.h>
+
+struct nv04_bus_priv {
+ struct nvkm_bus base;
+};
+
+int nv04_bus_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+int nv50_bus_init(struct nvkm_object *);
+void nv50_bus_intr(struct nvkm_subdev *);
+
+struct nv04_bus_impl {
+ struct nvkm_oclass base;
+ void (*intr)(struct nvkm_subdev *);
+ int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
+ u32 hwsq_size;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index 94da46f61627..c5739bce8052 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -22,18 +22,17 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
-
#include "nv04.h"
static void
-nv31_bus_intr(struct nouveau_subdev *subdev)
+nv31_bus_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_bus *pbus = nouveau_bus(subdev);
+ struct nvkm_bus *pbus = nvkm_bus(subdev);
u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
if (gpio) {
- subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_GPIO);
+ subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_GPIO);
if (subdev && subdev->intr)
subdev->intr(subdev);
}
@@ -51,7 +50,7 @@ nv31_bus_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00070000) {
- subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+ subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
if (subdev && subdev->intr)
subdev->intr(subdev);
stat &= ~0x00070000;
@@ -65,12 +64,12 @@ nv31_bus_intr(struct nouveau_subdev *subdev)
}
static int
-nv31_bus_init(struct nouveau_object *object)
+nv31_bus_init(struct nvkm_object *object)
{
struct nv04_bus_priv *priv = (void *)object;
int ret;
- ret = nouveau_bus_init(&priv->base);
+ ret = nvkm_bus_init(&priv->base);
if (ret)
return ret;
@@ -79,14 +78,14 @@ nv31_bus_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv31_bus_oclass = &(struct nv04_bus_impl) {
.base.handle = NV_SUBDEV(BUS, 0x31),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_bus_ctor,
- .dtor = _nouveau_bus_dtor,
+ .dtor = _nvkm_bus_dtor,
.init = nv31_bus_init,
- .fini = _nouveau_bus_fini,
+ .fini = _nvkm_bus_fini,
},
.intr = nv31_bus_intr,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 11918f7e2aca..1987863d71ee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -22,13 +22,12 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
+#include "nv04.h"
#include <subdev/timer.h>
-#include "nv04.h"
-
static int
-nv50_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
+nv50_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
{
struct nv50_bus_priv *priv = (void *)pbus;
int i;
@@ -44,9 +43,9 @@ nv50_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
}
void
-nv50_bus_intr(struct nouveau_subdev *subdev)
+nv50_bus_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_bus *pbus = nouveau_bus(subdev);
+ struct nvkm_bus *pbus = nvkm_bus(subdev);
u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
if (stat & 0x00000008) {
@@ -62,7 +61,7 @@ nv50_bus_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00010000) {
- subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+ subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
if (subdev && subdev->intr)
subdev->intr(subdev);
stat &= ~0x00010000;
@@ -76,12 +75,12 @@ nv50_bus_intr(struct nouveau_subdev *subdev)
}
int
-nv50_bus_init(struct nouveau_object *object)
+nv50_bus_init(struct nvkm_object *object)
{
struct nv04_bus_priv *priv = (void *)object;
int ret;
- ret = nouveau_bus_init(&priv->base);
+ ret = nvkm_bus_init(&priv->base);
if (ret)
return ret;
@@ -90,14 +89,14 @@ nv50_bus_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv50_bus_oclass = &(struct nv04_bus_impl) {
.base.handle = NV_SUBDEV(BUS, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_bus_ctor,
- .dtor = _nouveau_bus_dtor,
+ .dtor = _nvkm_bus_dtor,
.init = nv50_bus_init,
- .fini = _nouveau_bus_fini,
+ .fini = _nvkm_bus_fini,
},
.intr = nv50_bus_intr,
.hwsq_exec = nv50_bus_hwsq_exec,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
new file mode 100644
index 000000000000..9c2f688c9602
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -0,0 +1,12 @@
+nvkm-y += nvkm/subdev/clk/base.o
+nvkm-y += nvkm/subdev/clk/nv04.o
+nvkm-y += nvkm/subdev/clk/nv40.o
+nvkm-y += nvkm/subdev/clk/nv50.o
+nvkm-y += nvkm/subdev/clk/g84.o
+nvkm-y += nvkm/subdev/clk/gt215.o
+nvkm-y += nvkm/subdev/clk/mcp77.o
+nvkm-y += nvkm/subdev/clk/gf100.o
+nvkm-y += nvkm/subdev/clk/gk104.o
+nvkm-y += nvkm/subdev/clk/gk20a.o
+nvkm-y += nvkm/subdev/clk/pllnv04.o
+nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index e51b72d47129..b24a9cc04b73 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -21,27 +21,26 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/option.h>
-
-#include <subdev/clock.h>
-#include <subdev/therm.h>
-#include <subdev/volt.h>
-#include <subdev/fb.h>
-
+#include <subdev/clk.h>
#include <subdev/bios.h>
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
#include <subdev/bios/perf.h>
+#include <subdev/fb.h>
+#include <subdev/therm.h>
+#include <subdev/volt.h>
+
+#include <core/device.h>
+#include <core/option.h>
/******************************************************************************
* misc
*****************************************************************************/
static u32
-nouveau_clock_adjust(struct nouveau_clock *clk, bool adjust,
- u8 pstate, u8 domain, u32 input)
+nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
+ u8 pstate, u8 domain, u32 input)
{
- struct nouveau_bios *bios = nouveau_bios(clk);
+ struct nvkm_bios *bios = nvkm_bios(clk);
struct nvbios_boostE boostE;
u8 ver, hdr, cnt, len;
u16 data;
@@ -76,12 +75,11 @@ nouveau_clock_adjust(struct nouveau_clock *clk, bool adjust,
* C-States
*****************************************************************************/
static int
-nouveau_cstate_prog(struct nouveau_clock *clk,
- struct nouveau_pstate *pstate, int cstatei)
+nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
- struct nouveau_therm *ptherm = nouveau_therm(clk);
- struct nouveau_volt *volt = nouveau_volt(clk);
- struct nouveau_cstate *cstate;
+ struct nvkm_therm *ptherm = nvkm_therm(clk);
+ struct nvkm_volt *volt = nvkm_volt(clk);
+ struct nvkm_cstate *cstate;
int ret;
if (!list_empty(&pstate->list)) {
@@ -91,7 +89,7 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
}
if (ptherm) {
- ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
+ ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, +1);
if (ret && ret != -ENODEV) {
nv_error(clk, "failed to raise fan speed: %d\n", ret);
return ret;
@@ -119,7 +117,7 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
}
if (ptherm) {
- ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
+ ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, -1);
if (ret && ret != -ENODEV)
nv_error(clk, "failed to lower fan speed: %d\n", ret);
}
@@ -128,19 +126,18 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
}
static void
-nouveau_cstate_del(struct nouveau_cstate *cstate)
+nvkm_cstate_del(struct nvkm_cstate *cstate)
{
list_del(&cstate->head);
kfree(cstate);
}
static int
-nouveau_cstate_new(struct nouveau_clock *clk, int idx,
- struct nouveau_pstate *pstate)
+nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
- struct nouveau_bios *bios = nouveau_bios(clk);
- struct nouveau_clocks *domain = clk->domains;
- struct nouveau_cstate *cstate = NULL;
+ struct nvkm_bios *bios = nvkm_bios(clk);
+ struct nvkm_domain *domain = clk->domains;
+ struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
u8 ver, hdr;
u16 data;
@@ -158,10 +155,8 @@ nouveau_cstate_new(struct nouveau_clock *clk, int idx,
while (domain && domain->name != nv_clk_src_max) {
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
- u32 freq = nouveau_clock_adjust(clk, true,
- pstate->pstate,
- domain->bios,
- cstepX.freq);
+ u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
+ domain->bios, cstepX.freq);
cstate->domain[domain->name] = freq;
}
domain++;
@@ -175,10 +170,10 @@ nouveau_cstate_new(struct nouveau_clock *clk, int idx,
* P-States
*****************************************************************************/
static int
-nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
+nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
- struct nouveau_fb *pfb = nouveau_fb(clk);
- struct nouveau_pstate *pstate;
+ struct nvkm_fb *pfb = nvkm_fb(clk);
+ struct nvkm_pstate *pstate;
int ret, idx = 0;
list_for_each_entry(pstate, &clk->states, head) {
@@ -199,13 +194,13 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
pfb->ram->tidy(pfb);
}
- return nouveau_cstate_prog(clk, pstate, 0);
+ return nvkm_cstate_prog(clk, pstate, 0);
}
static void
-nouveau_pstate_work(struct work_struct *work)
+nvkm_pstate_work(struct work_struct *work)
{
- struct nouveau_clock *clk = container_of(work, typeof(*clk), work);
+ struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
int pstate;
if (!atomic_xchg(&clk->waiting, 0))
@@ -227,7 +222,7 @@ nouveau_pstate_work(struct work_struct *work)
nv_trace(clk, "-> %d\n", pstate);
if (pstate != clk->pstate) {
- int ret = nouveau_pstate_prog(clk, pstate);
+ int ret = nvkm_pstate_prog(clk, pstate);
if (ret) {
nv_error(clk, "error setting pstate %d: %d\n",
pstate, ret);
@@ -239,7 +234,7 @@ nouveau_pstate_work(struct work_struct *work)
}
static int
-nouveau_pstate_calc(struct nouveau_clock *clk, bool wait)
+nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
{
atomic_set(&clk->waiting, 1);
schedule_work(&clk->work);
@@ -249,10 +244,10 @@ nouveau_pstate_calc(struct nouveau_clock *clk, bool wait)
}
static void
-nouveau_pstate_info(struct nouveau_clock *clk, struct nouveau_pstate *pstate)
+nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
{
- struct nouveau_clocks *clock = clk->domains - 1;
- struct nouveau_cstate *cstate;
+ struct nvkm_domain *clock = clk->domains - 1;
+ struct nvkm_cstate *cstate;
char info[3][32] = { "", "", "" };
char name[4] = "--";
int i = -1;
@@ -291,12 +286,12 @@ nouveau_pstate_info(struct nouveau_clock *clk, struct nouveau_pstate *pstate)
}
static void
-nouveau_pstate_del(struct nouveau_pstate *pstate)
+nvkm_pstate_del(struct nvkm_pstate *pstate)
{
- struct nouveau_cstate *cstate, *temp;
+ struct nvkm_cstate *cstate, *temp;
list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
- nouveau_cstate_del(cstate);
+ nvkm_cstate_del(cstate);
}
list_del(&pstate->head);
@@ -304,12 +299,12 @@ nouveau_pstate_del(struct nouveau_pstate *pstate)
}
static int
-nouveau_pstate_new(struct nouveau_clock *clk, int idx)
+nvkm_pstate_new(struct nvkm_clk *clk, int idx)
{
- struct nouveau_bios *bios = nouveau_bios(clk);
- struct nouveau_clocks *domain = clk->domains - 1;
- struct nouveau_pstate *pstate;
- struct nouveau_cstate *cstate;
+ struct nvkm_bios *bios = nvkm_bios(clk);
+ struct nvkm_domain *domain = clk->domains - 1;
+ struct nvkm_pstate *pstate;
+ struct nvkm_cstate *cstate;
struct nvbios_cstepE cstepE;
struct nvbios_perfE perfE;
u8 ver, hdr, cnt, len;
@@ -346,10 +341,10 @@ nouveau_pstate_new(struct nouveau_clock *clk, int idx)
continue;
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
- perfS.v40.freq = nouveau_clock_adjust(clk, false,
- pstate->pstate,
- domain->bios,
- perfS.v40.freq);
+ perfS.v40.freq = nvkm_clk_adjust(clk, false,
+ pstate->pstate,
+ domain->bios,
+ perfS.v40.freq);
}
cstate->domain[domain->name] = perfS.v40.freq;
@@ -359,11 +354,11 @@ nouveau_pstate_new(struct nouveau_clock *clk, int idx)
if (data) {
int idx = cstepE.index;
do {
- nouveau_cstate_new(clk, idx, pstate);
+ nvkm_cstate_new(clk, idx, pstate);
} while(idx--);
}
- nouveau_pstate_info(clk, pstate);
+ nvkm_pstate_info(clk, pstate);
list_add_tail(&pstate->head, &clk->states);
clk->state_nr++;
return 0;
@@ -373,9 +368,9 @@ nouveau_pstate_new(struct nouveau_clock *clk, int idx)
* Adjustment triggers
*****************************************************************************/
static int
-nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
+nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
{
- struct nouveau_pstate *pstate;
+ struct nvkm_pstate *pstate;
int i = 0;
if (!clk->allow_reclock)
@@ -397,17 +392,20 @@ nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
}
static int
-nouveau_clock_nstate(struct nouveau_clock *clk, const char *mode, int arglen)
+nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
{
int ret = 1;
+ if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
+ return -2;
+
if (strncasecmpz(mode, "disabled", arglen)) {
char save = mode[arglen];
long v;
((char *)mode)[arglen] = '\0';
if (!kstrtol(mode, 0, &v)) {
- ret = nouveau_clock_ustate_update(clk, v);
+ ret = nvkm_clk_ustate_update(clk, v);
if (ret < 0)
ret = 1;
}
@@ -418,53 +416,53 @@ nouveau_clock_nstate(struct nouveau_clock *clk, const char *mode, int arglen)
}
int
-nouveau_clock_ustate(struct nouveau_clock *clk, int req, int pwr)
+nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
{
- int ret = nouveau_clock_ustate_update(clk, req);
+ int ret = nvkm_clk_ustate_update(clk, req);
if (ret >= 0) {
if (ret -= 2, pwr) clk->ustate_ac = ret;
else clk->ustate_dc = ret;
- return nouveau_pstate_calc(clk, true);
+ return nvkm_pstate_calc(clk, true);
}
return ret;
}
int
-nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
+nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
{
if (!rel) clk->astate = req;
if ( rel) clk->astate += rel;
clk->astate = min(clk->astate, clk->state_nr - 1);
clk->astate = max(clk->astate, 0);
- return nouveau_pstate_calc(clk, true);
+ return nvkm_pstate_calc(clk, wait);
}
int
-nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
+nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
{
if (!rel) clk->tstate = req;
if ( rel) clk->tstate += rel;
clk->tstate = min(clk->tstate, 0);
clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
- return nouveau_pstate_calc(clk, true);
+ return nvkm_pstate_calc(clk, true);
}
int
-nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
+nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
{
if (!rel) clk->dstate = req;
if ( rel) clk->dstate += rel;
clk->dstate = min(clk->dstate, clk->state_nr - 1);
clk->dstate = max(clk->dstate, 0);
- return nouveau_pstate_calc(clk, true);
+ return nvkm_pstate_calc(clk, true);
}
static int
-nouveau_clock_pwrsrc(struct nvkm_notify *notify)
+nvkm_clk_pwrsrc(struct nvkm_notify *notify)
{
- struct nouveau_clock *clk =
+ struct nvkm_clk *clk =
container_of(notify, typeof(*clk), pwrsrc_ntfy);
- nouveau_pstate_calc(clk, false);
+ nvkm_pstate_calc(clk, false);
return NVKM_NOTIFY_DROP;
}
@@ -473,21 +471,21 @@ nouveau_clock_pwrsrc(struct nvkm_notify *notify)
*****************************************************************************/
int
-_nouveau_clock_fini(struct nouveau_object *object, bool suspend)
+_nvkm_clk_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_clock *clk = (void *)object;
+ struct nvkm_clk *clk = (void *)object;
nvkm_notify_put(&clk->pwrsrc_ntfy);
- return nouveau_subdev_fini(&clk->base, suspend);
+ return nvkm_subdev_fini(&clk->base, suspend);
}
int
-_nouveau_clock_init(struct nouveau_object *object)
+_nvkm_clk_init(struct nvkm_object *object)
{
- struct nouveau_clock *clk = (void *)object;
- struct nouveau_clocks *clock = clk->domains;
+ struct nvkm_clk *clk = (void *)object;
+ struct nvkm_domain *clock = clk->domains;
int ret;
- ret = nouveau_subdev_init(&clk->base);
+ ret = nvkm_subdev_init(&clk->base);
if (ret)
return ret;
@@ -505,47 +503,44 @@ _nouveau_clock_init(struct nouveau_object *object)
clock++;
}
- nouveau_pstate_info(clk, &clk->bstate);
+ nvkm_pstate_info(clk, &clk->bstate);
clk->astate = clk->state_nr - 1;
clk->tstate = 0;
clk->dstate = 0;
clk->pstate = -1;
- nouveau_pstate_calc(clk, true);
+ nvkm_pstate_calc(clk, true);
return 0;
}
void
-_nouveau_clock_dtor(struct nouveau_object *object)
+_nvkm_clk_dtor(struct nvkm_object *object)
{
- struct nouveau_clock *clk = (void *)object;
- struct nouveau_pstate *pstate, *temp;
+ struct nvkm_clk *clk = (void *)object;
+ struct nvkm_pstate *pstate, *temp;
nvkm_notify_fini(&clk->pwrsrc_ntfy);
list_for_each_entry_safe(pstate, temp, &clk->states, head) {
- nouveau_pstate_del(pstate);
+ nvkm_pstate_del(pstate);
}
- nouveau_subdev_destroy(&clk->base);
+ nvkm_subdev_destroy(&clk->base);
}
int
-nouveau_clock_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- struct nouveau_clocks *clocks,
- struct nouveau_pstate *pstates, int nb_pstates,
- bool allow_reclock,
- int length, void **object)
+nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, struct nvkm_domain *clocks,
+ struct nvkm_pstate *pstates, int nb_pstates,
+ bool allow_reclock, int length, void **object)
{
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_clock *clk;
+ struct nvkm_device *device = nv_device(parent);
+ struct nvkm_clk *clk;
int ret, idx, arglen;
const char *mode;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "CLK",
- "clock", length, object);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "CLK",
+ "clock", length, object);
clk = *object;
if (ret)
return ret;
@@ -555,7 +550,7 @@ nouveau_clock_create_(struct nouveau_object *parent,
clk->ustate_ac = -1;
clk->ustate_dc = -1;
- INIT_WORK(&clk->work, nouveau_pstate_work);
+ INIT_WORK(&clk->work, nvkm_pstate_work);
init_waitqueue_head(&clk->wait);
atomic_set(&clk->waiting, 0);
@@ -563,7 +558,7 @@ nouveau_clock_create_(struct nouveau_object *parent,
if (!pstates) {
idx = 0;
do {
- ret = nouveau_pstate_new(clk, idx++);
+ ret = nvkm_pstate_new(clk, idx++);
} while (ret == 0);
} else {
for (idx = 0; idx < nb_pstates; idx++)
@@ -573,25 +568,24 @@ nouveau_clock_create_(struct nouveau_object *parent,
clk->allow_reclock = allow_reclock;
- ret = nvkm_notify_init(NULL, &device->event, nouveau_clock_pwrsrc, true,
+ ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
NULL, 0, 0, &clk->pwrsrc_ntfy);
if (ret)
return ret;
- mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
+ mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
if (mode) {
- clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen);
- clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen);
+ clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
+ clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
}
- mode = nouveau_stropt(device->cfgopt, "NvClkModeAC", &arglen);
+ mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
if (mode)
- clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen);
+ clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
- mode = nouveau_stropt(device->cfgopt, "NvClkModeDC", &arglen);
+ mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
if (mode)
- clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen);
-
+ clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index b0b7c1437f10..4c90b9769d64 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -21,11 +21,10 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include "nv50.h"
-static struct nouveau_clocks
-nv84_domains[] = {
+static struct nvkm_domain
+g84_domains[] = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_core , 0xff, 0, "core", 1000 },
@@ -35,14 +34,14 @@ nv84_domains[] = {
{ nv_clk_src_max }
};
-struct nouveau_oclass *
-nv84_clock_oclass = &(struct nv50_clock_oclass) {
- .base.handle = NV_SUBDEV(CLOCK, 0x84),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_clock_ctor,
- .dtor = _nouveau_clock_dtor,
- .init = _nouveau_clock_init,
- .fini = _nouveau_clock_fini,
+struct nvkm_oclass *
+g84_clk_oclass = &(struct nv50_clk_oclass) {
+ .base.handle = NV_SUBDEV(CLK, 0x84),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_clk_ctor,
+ .dtor = _nvkm_clk_dtor,
+ .init = _nvkm_clk_init,
+ .fini = _nvkm_clk_fini,
},
- .domains = nv84_domains,
+ .domains = g84_domains,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 1234abaab2db..3d7330d54b02 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -21,15 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include <subdev/clk.h>
+#include "pll.h"
-#include <subdev/clock.h>
+#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
-#include "pll.h"
-
-struct nvc0_clock_info {
+struct gf100_clk_info {
u32 freq;
u32 ssel;
u32 mdiv;
@@ -38,17 +38,17 @@ struct nvc0_clock_info {
u32 coef;
};
-struct nvc0_clock_priv {
- struct nouveau_clock base;
- struct nvc0_clock_info eng[16];
+struct gf100_clk_priv {
+ struct nvkm_clk base;
+ struct gf100_clk_info eng[16];
};
-static u32 read_div(struct nvc0_clock_priv *, int, u32, u32);
+static u32 read_div(struct gf100_clk_priv *, int, u32, u32);
static u32
-read_vco(struct nvc0_clock_priv *priv, u32 dsrc)
+read_vco(struct gf100_clk_priv *priv, u32 dsrc)
{
- struct nouveau_clock *clk = &priv->base;
+ struct nvkm_clk *clk = &priv->base;
u32 ssrc = nv_rd32(priv, dsrc);
if (!(ssrc & 0x00000100))
return clk->read(clk, nv_clk_src_sppll0);
@@ -56,9 +56,9 @@ read_vco(struct nvc0_clock_priv *priv, u32 dsrc)
}
static u32
-read_pll(struct nvc0_clock_priv *priv, u32 pll)
+read_pll(struct gf100_clk_priv *priv, u32 pll)
{
- struct nouveau_clock *clk = &priv->base;
+ struct nvkm_clk *clk = &priv->base;
u32 ctrl = nv_rd32(priv, pll + 0x00);
u32 coef = nv_rd32(priv, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16;
@@ -95,7 +95,7 @@ read_pll(struct nvc0_clock_priv *priv, u32 pll)
}
static u32
-read_div(struct nvc0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gf100_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
{
u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
u32 sctl = nv_rd32(priv, dctl + (doff * 4));
@@ -121,7 +121,7 @@ read_div(struct nvc0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
}
static u32
-read_clk(struct nvc0_clock_priv *priv, int clk)
+read_clk(struct gf100_clk_priv *priv, int clk)
{
u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
u32 ssel = nv_rd32(priv, 0x137100);
@@ -145,10 +145,10 @@ read_clk(struct nvc0_clock_priv *priv, int clk)
}
static int
-nvc0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
{
- struct nouveau_device *device = nv_device(clk);
- struct nvc0_clock_priv *priv = (void *)clk;
+ struct nvkm_device *device = nv_device(clk);
+ struct gf100_clk_priv *priv = (void *)clk;
switch (src) {
case nv_clk_src_crystal:
@@ -196,7 +196,7 @@ nvc0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
}
static u32
-calc_div(struct nvc0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
@@ -207,7 +207,7 @@ calc_div(struct nvc0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
}
static u32
-calc_src(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
@@ -236,9 +236,9 @@ calc_src(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
}
static u32
-calc_pll(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pll limits;
int N, M, P, ret;
@@ -250,7 +250,7 @@ calc_pll(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *coef)
if (!limits.refclk)
return 0;
- ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
@@ -259,10 +259,10 @@ calc_pll(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *coef)
}
static int
-calc_clk(struct nvc0_clock_priv *priv,
- struct nouveau_cstate *cstate, int clk, int dom)
+calc_clk(struct gf100_clk_priv *priv,
+ struct nvkm_cstate *cstate, int clk, int dom)
{
- struct nvc0_clock_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &priv->eng[clk];
u32 freq = cstate->domain[dom];
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
@@ -311,9 +311,9 @@ calc_clk(struct nvc0_clock_priv *priv,
}
static int
-nvc0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+gf100_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
{
- struct nvc0_clock_priv *priv = (void *)clk;
+ struct gf100_clk_priv *priv = (void *)clk;
int ret;
if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
@@ -330,9 +330,9 @@ nvc0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
}
static void
-nvc0_clock_prog_0(struct nvc0_clock_priv *priv, int clk)
+gf100_clk_prog_0(struct gf100_clk_priv *priv, int clk)
{
- struct nvc0_clock_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &priv->eng[clk];
if (clk < 7 && !info->ssel) {
nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
@@ -340,16 +340,16 @@ nvc0_clock_prog_0(struct nvc0_clock_priv *priv, int clk)
}
static void
-nvc0_clock_prog_1(struct nvc0_clock_priv *priv, int clk)
+gf100_clk_prog_1(struct gf100_clk_priv *priv, int clk)
{
nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
}
static void
-nvc0_clock_prog_2(struct nvc0_clock_priv *priv, int clk)
+gf100_clk_prog_2(struct gf100_clk_priv *priv, int clk)
{
- struct nvc0_clock_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &priv->eng[clk];
const u32 addr = 0x137000 + (clk * 0x20);
if (clk <= 7) {
nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
@@ -364,9 +364,9 @@ nvc0_clock_prog_2(struct nvc0_clock_priv *priv, int clk)
}
static void
-nvc0_clock_prog_3(struct nvc0_clock_priv *priv, int clk)
+gf100_clk_prog_3(struct gf100_clk_priv *priv, int clk)
{
- struct nvc0_clock_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &priv->eng[clk];
if (info->ssel) {
nv_mask(priv, 0x137100, (1 << clk), info->ssel);
nv_wait(priv, 0x137100, (1 << clk), info->ssel);
@@ -374,24 +374,24 @@ nvc0_clock_prog_3(struct nvc0_clock_priv *priv, int clk)
}
static void
-nvc0_clock_prog_4(struct nvc0_clock_priv *priv, int clk)
+gf100_clk_prog_4(struct gf100_clk_priv *priv, int clk)
{
- struct nvc0_clock_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &priv->eng[clk];
nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
}
static int
-nvc0_clock_prog(struct nouveau_clock *clk)
+gf100_clk_prog(struct nvkm_clk *clk)
{
- struct nvc0_clock_priv *priv = (void *)clk;
+ struct gf100_clk_priv *priv = (void *)clk;
struct {
- void (*exec)(struct nvc0_clock_priv *, int);
+ void (*exec)(struct gf100_clk_priv *, int);
} stage[] = {
- { nvc0_clock_prog_0 }, /* div programming */
- { nvc0_clock_prog_1 }, /* select div mode */
- { nvc0_clock_prog_2 }, /* (maybe) program pll */
- { nvc0_clock_prog_3 }, /* (maybe) select pll mode */
- { nvc0_clock_prog_4 }, /* final divider */
+ { gf100_clk_prog_0 }, /* div programming */
+ { gf100_clk_prog_1 }, /* select div mode */
+ { gf100_clk_prog_2 }, /* (maybe) program pll */
+ { gf100_clk_prog_3 }, /* (maybe) select pll mode */
+ { gf100_clk_prog_4 }, /* final divider */
};
int i, j;
@@ -407,14 +407,14 @@ nvc0_clock_prog(struct nouveau_clock *clk)
}
static void
-nvc0_clock_tidy(struct nouveau_clock *clk)
+gf100_clk_tidy(struct nvkm_clk *clk)
{
- struct nvc0_clock_priv *priv = (void *)clk;
+ struct gf100_clk_priv *priv = (void *)clk;
memset(priv->eng, 0x00, sizeof(priv->eng));
}
-static struct nouveau_clocks
-nvc0_domain[] = {
+static struct nvkm_domain
+gf100_domain[] = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_hubk06 , 0x00 },
@@ -430,33 +430,33 @@ nvc0_domain[] = {
};
static int
-nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_clock_priv *priv;
+ struct gf100_clk_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, NULL, 0,
- false, &priv);
+ ret = nvkm_clk_create(parent, engine, oclass, gf100_domain,
+ NULL, 0, false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.read = nvc0_clock_read;
- priv->base.calc = nvc0_clock_calc;
- priv->base.prog = nvc0_clock_prog;
- priv->base.tidy = nvc0_clock_tidy;
+ priv->base.read = gf100_clk_read;
+ priv->base.calc = gf100_clk_calc;
+ priv->base.prog = gf100_clk_prog;
+ priv->base.tidy = gf100_clk_tidy;
return 0;
}
-struct nouveau_oclass
-nvc0_clock_oclass = {
- .handle = NV_SUBDEV(CLOCK, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_clock_ctor,
- .dtor = _nouveau_clock_dtor,
- .init = _nouveau_clock_init,
- .fini = _nouveau_clock_fini,
+struct nvkm_oclass
+gf100_clk_oclass = {
+ .handle = NV_SUBDEV(CLK, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_clk_ctor,
+ .dtor = _nvkm_clk_dtor,
+ .init = _nvkm_clk_init,
+ .fini = _nvkm_clk_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 7eccad57512e..e9b2310bdfbb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -21,15 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include <subdev/clk.h>
+#include "pll.h"
-#include <subdev/clock.h>
+#include <core/device.h>
#include <subdev/timer.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
-#include "pll.h"
-
-struct nve0_clock_info {
+struct gk104_clk_info {
u32 freq;
u32 ssel;
u32 mdiv;
@@ -38,16 +38,16 @@ struct nve0_clock_info {
u32 coef;
};
-struct nve0_clock_priv {
- struct nouveau_clock base;
- struct nve0_clock_info eng[16];
+struct gk104_clk_priv {
+ struct nvkm_clk base;
+ struct gk104_clk_info eng[16];
};
-static u32 read_div(struct nve0_clock_priv *, int, u32, u32);
-static u32 read_pll(struct nve0_clock_priv *, u32);
+static u32 read_div(struct gk104_clk_priv *, int, u32, u32);
+static u32 read_pll(struct gk104_clk_priv *, u32);
static u32
-read_vco(struct nve0_clock_priv *priv, u32 dsrc)
+read_vco(struct gk104_clk_priv *priv, u32 dsrc)
{
u32 ssrc = nv_rd32(priv, dsrc);
if (!(ssrc & 0x00000100))
@@ -56,7 +56,7 @@ read_vco(struct nve0_clock_priv *priv, u32 dsrc)
}
static u32
-read_pll(struct nve0_clock_priv *priv, u32 pll)
+read_pll(struct gk104_clk_priv *priv, u32 pll)
{
u32 ctrl = nv_rd32(priv, pll + 0x00);
u32 coef = nv_rd32(priv, pll + 0x04);
@@ -101,7 +101,7 @@ read_pll(struct nve0_clock_priv *priv, u32 pll)
}
static u32
-read_div(struct nve0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gk104_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
{
u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
u32 sctl = nv_rd32(priv, dctl + (doff * 4));
@@ -127,7 +127,7 @@ read_div(struct nve0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
}
static u32
-read_mem(struct nve0_clock_priv *priv)
+read_mem(struct gk104_clk_priv *priv)
{
switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
case 1: return read_pll(priv, 0x132020);
@@ -138,7 +138,7 @@ read_mem(struct nve0_clock_priv *priv)
}
static u32
-read_clk(struct nve0_clock_priv *priv, int clk)
+read_clk(struct gk104_clk_priv *priv, int clk)
{
u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
u32 sclk, sdiv;
@@ -181,10 +181,10 @@ read_clk(struct nve0_clock_priv *priv, int clk)
}
static int
-nve0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
{
- struct nouveau_device *device = nv_device(clk);
- struct nve0_clock_priv *priv = (void *)clk;
+ struct nvkm_device *device = nv_device(clk);
+ struct gk104_clk_priv *priv = (void *)clk;
switch (src) {
case nv_clk_src_crystal:
@@ -214,7 +214,7 @@ nve0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
}
static u32
-calc_div(struct nve0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
@@ -225,7 +225,7 @@ calc_div(struct nve0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
}
static u32
-calc_src(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
@@ -254,9 +254,9 @@ calc_src(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
}
static u32
-calc_pll(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pll limits;
int N, M, P, ret;
@@ -268,7 +268,7 @@ calc_pll(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *coef)
if (!limits.refclk)
return 0;
- ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
@@ -277,10 +277,10 @@ calc_pll(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *coef)
}
static int
-calc_clk(struct nve0_clock_priv *priv,
- struct nouveau_cstate *cstate, int clk, int dom)
+calc_clk(struct gk104_clk_priv *priv,
+ struct nvkm_cstate *cstate, int clk, int dom)
{
- struct nve0_clock_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &priv->eng[clk];
u32 freq = cstate->domain[dom];
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
@@ -329,9 +329,9 @@ calc_clk(struct nve0_clock_priv *priv,
}
static int
-nve0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+gk104_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
{
- struct nve0_clock_priv *priv = (void *)clk;
+ struct gk104_clk_priv *priv = (void *)clk;
int ret;
if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
@@ -347,9 +347,9 @@ nve0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
}
static void
-nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
+gk104_clk_prog_0(struct gk104_clk_priv *priv, int clk)
{
- struct nve0_clock_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &priv->eng[clk];
if (!info->ssel) {
nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
@@ -357,22 +357,22 @@ nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
}
static void
-nve0_clock_prog_1_0(struct nve0_clock_priv *priv, int clk)
+gk104_clk_prog_1_0(struct gk104_clk_priv *priv, int clk)
{
nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
}
static void
-nve0_clock_prog_1_1(struct nve0_clock_priv *priv, int clk)
+gk104_clk_prog_1_1(struct gk104_clk_priv *priv, int clk)
{
nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
}
static void
-nve0_clock_prog_2(struct nve0_clock_priv *priv, int clk)
+gk104_clk_prog_2(struct gk104_clk_priv *priv, int clk)
{
- struct nve0_clock_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &priv->eng[clk];
const u32 addr = 0x137000 + (clk * 0x20);
nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
@@ -385,9 +385,9 @@ nve0_clock_prog_2(struct nve0_clock_priv *priv, int clk)
}
static void
-nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
+gk104_clk_prog_3(struct gk104_clk_priv *priv, int clk)
{
- struct nve0_clock_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &priv->eng[clk];
if (info->ssel)
nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
else
@@ -395,9 +395,9 @@ nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
}
static void
-nve0_clock_prog_4_0(struct nve0_clock_priv *priv, int clk)
+gk104_clk_prog_4_0(struct gk104_clk_priv *priv, int clk)
{
- struct nve0_clock_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &priv->eng[clk];
if (info->ssel) {
nv_mask(priv, 0x137100, (1 << clk), info->ssel);
nv_wait(priv, 0x137100, (1 << clk), info->ssel);
@@ -405,9 +405,9 @@ nve0_clock_prog_4_0(struct nve0_clock_priv *priv, int clk)
}
static void
-nve0_clock_prog_4_1(struct nve0_clock_priv *priv, int clk)
+gk104_clk_prog_4_1(struct gk104_clk_priv *priv, int clk)
{
- struct nve0_clock_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &priv->eng[clk];
if (info->ssel) {
nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
@@ -415,20 +415,20 @@ nve0_clock_prog_4_1(struct nve0_clock_priv *priv, int clk)
}
static int
-nve0_clock_prog(struct nouveau_clock *clk)
+gk104_clk_prog(struct nvkm_clk *clk)
{
- struct nve0_clock_priv *priv = (void *)clk;
+ struct gk104_clk_priv *priv = (void *)clk;
struct {
u32 mask;
- void (*exec)(struct nve0_clock_priv *, int);
+ void (*exec)(struct gk104_clk_priv *, int);
} stage[] = {
- { 0x007f, nve0_clock_prog_0 }, /* div programming */
- { 0x007f, nve0_clock_prog_1_0 }, /* select div mode */
- { 0xff80, nve0_clock_prog_1_1 },
- { 0x00ff, nve0_clock_prog_2 }, /* (maybe) program pll */
- { 0xff80, nve0_clock_prog_3 }, /* final divider */
- { 0x007f, nve0_clock_prog_4_0 }, /* (maybe) select pll mode */
- { 0xff80, nve0_clock_prog_4_1 },
+ { 0x007f, gk104_clk_prog_0 }, /* div programming */
+ { 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
+ { 0xff80, gk104_clk_prog_1_1 },
+ { 0x00ff, gk104_clk_prog_2 }, /* (maybe) program pll */
+ { 0xff80, gk104_clk_prog_3 }, /* final divider */
+ { 0x007f, gk104_clk_prog_4_0 }, /* (maybe) select pll mode */
+ { 0xff80, gk104_clk_prog_4_1 },
};
int i, j;
@@ -446,14 +446,14 @@ nve0_clock_prog(struct nouveau_clock *clk)
}
static void
-nve0_clock_tidy(struct nouveau_clock *clk)
+gk104_clk_tidy(struct nvkm_clk *clk)
{
- struct nve0_clock_priv *priv = (void *)clk;
+ struct gk104_clk_priv *priv = (void *)clk;
memset(priv->eng, 0x00, sizeof(priv->eng));
}
-static struct nouveau_clocks
-nve0_domain[] = {
+static struct nvkm_domain
+gk104_domain[] = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
@@ -468,33 +468,33 @@ nve0_domain[] = {
};
static int
-nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nve0_clock_priv *priv;
+ struct gk104_clk_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, NULL, 0,
- true, &priv);
+ ret = nvkm_clk_create(parent, engine, oclass, gk104_domain,
+ NULL, 0, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.read = nve0_clock_read;
- priv->base.calc = nve0_clock_calc;
- priv->base.prog = nve0_clock_prog;
- priv->base.tidy = nve0_clock_tidy;
+ priv->base.read = gk104_clk_read;
+ priv->base.calc = gk104_clk_calc;
+ priv->base.prog = gk104_clk_prog;
+ priv->base.tidy = gk104_clk_tidy;
return 0;
}
-struct nouveau_oclass
-nve0_clock_oclass = {
- .handle = NV_SUBDEV(CLOCK, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_clock_ctor,
- .dtor = _nouveau_clock_dtor,
- .init = _nouveau_clock_init,
- .fini = _nouveau_clock_fini,
+struct nvkm_oclass
+gk104_clk_oclass = {
+ .handle = NV_SUBDEV(CLK, 0xe0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_clk_ctor,
+ .dtor = _nvkm_clk_dtor,
+ .init = _nvkm_clk_init,
+ .fini = _nvkm_clk_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index fb4fad374bdd..65c532742b08 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -22,6 +22,14 @@
* Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
*
*/
+#include <subdev/clk.h>
+#include <subdev/timer.h>
+
+#include <core/device.h>
+
+#ifdef __KERNEL__
+#include <nouveau_platform.h>
+#endif
#define MHZ (1000 * 1000)
@@ -87,13 +95,6 @@
#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
(0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
-
static const u8 pl_to_div[] = {
/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
@@ -116,16 +117,16 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
.min_pl = 1, .max_pl = 32,
};
-struct gk20a_clock_priv {
- struct nouveau_clock base;
+struct gk20a_clk_priv {
+ struct nvkm_clk base;
const struct gk20a_clk_pllg_params *params;
u32 m, n, pl;
u32 parent_rate;
};
-#define to_gk20a_clock(base) container_of(base, struct gk20a_clock_priv, base)
+#define to_gk20a_clk(base) container_of(base, struct gk20a_clk_priv, base)
static void
-gk20a_pllg_read_mnp(struct gk20a_clock_priv *priv)
+gk20a_pllg_read_mnp(struct gk20a_clk_priv *priv)
{
u32 val;
@@ -136,7 +137,7 @@ gk20a_pllg_read_mnp(struct gk20a_clock_priv *priv)
}
static u32
-gk20a_pllg_calc_rate(struct gk20a_clock_priv *priv)
+gk20a_pllg_calc_rate(struct gk20a_clk_priv *priv)
{
u32 rate;
u32 divider;
@@ -149,7 +150,7 @@ gk20a_pllg_calc_rate(struct gk20a_clock_priv *priv)
}
static int
-gk20a_pllg_calc_mnp(struct gk20a_clock_priv *priv, unsigned long rate)
+gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
{
u32 target_clk_f, ref_clk_f, target_freq;
u32 min_vco_f, max_vco_f;
@@ -260,12 +261,11 @@ found_match:
nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
-
return 0;
}
static int
-gk20a_pllg_slide(struct gk20a_clock_priv *priv, u32 n)
+gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
{
u32 val;
int ramp_timeout;
@@ -322,21 +322,21 @@ gk20a_pllg_slide(struct gk20a_clock_priv *priv, u32 n)
}
static void
-_gk20a_pllg_enable(struct gk20a_clock_priv *priv)
+_gk20a_pllg_enable(struct gk20a_clk_priv *priv)
{
nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nv_rd32(priv, GPCPLL_CFG);
}
static void
-_gk20a_pllg_disable(struct gk20a_clock_priv *priv)
+_gk20a_pllg_disable(struct gk20a_clk_priv *priv)
{
nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nv_rd32(priv, GPCPLL_CFG);
}
static int
-_gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv, bool allow_slide)
+_gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv, bool allow_slide)
{
u32 val, cfg;
u32 m_old, pl_old, n_lo;
@@ -402,8 +402,8 @@ _gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv, bool allow_slide)
nv_wr32(priv, GPCPLL_CFG, val);
}
- if (!nouveau_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
- GPCPLL_CFG_LOCK)) {
+ if (!nvkm_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
+ GPCPLL_CFG_LOCK)) {
nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
return -ETIMEDOUT;
}
@@ -422,7 +422,7 @@ _gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv, bool allow_slide)
}
static int
-gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv)
+gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv)
{
int err;
@@ -434,7 +434,7 @@ gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv)
}
static void
-gk20a_pllg_disable(struct gk20a_clock_priv *priv)
+gk20a_pllg_disable(struct gk20a_clk_priv *priv)
{
u32 val;
@@ -458,14 +458,14 @@ gk20a_pllg_disable(struct gk20a_clock_priv *priv)
#define GK20A_CLK_GPC_MDIV 1000
-static struct nouveau_clocks
+static struct nvkm_domain
gk20a_domains[] = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
{ nv_clk_src_max }
};
-static struct nouveau_pstate
+static struct nvkm_pstate
gk20a_pstates[] = {
{
.base = {
@@ -560,9 +560,9 @@ gk20a_pstates[] = {
};
static int
-gk20a_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
{
- struct gk20a_clock_priv *priv = (void *)clk;
+ struct gk20a_clk_priv *priv = (void *)clk;
switch (src) {
case nv_clk_src_crystal:
@@ -577,34 +577,34 @@ gk20a_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
}
static int
-gk20a_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+gk20a_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
{
- struct gk20a_clock_priv *priv = (void *)clk;
+ struct gk20a_clk_priv *priv = (void *)clk;
return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
GK20A_CLK_GPC_MDIV);
}
static int
-gk20a_clock_prog(struct nouveau_clock *clk)
+gk20a_clk_prog(struct nvkm_clk *clk)
{
- struct gk20a_clock_priv *priv = (void *)clk;
+ struct gk20a_clk_priv *priv = (void *)clk;
return gk20a_pllg_program_mnp(priv);
}
static void
-gk20a_clock_tidy(struct nouveau_clock *clk)
+gk20a_clk_tidy(struct nvkm_clk *clk)
{
}
static int
-gk20a_clock_fini(struct nouveau_object *object, bool suspend)
+gk20a_clk_fini(struct nvkm_object *object, bool suspend)
{
- struct gk20a_clock_priv *priv = (void *)object;
+ struct gk20a_clk_priv *priv = (void *)object;
int ret;
- ret = nouveau_clock_fini(&priv->base, false);
+ ret = nvkm_clk_fini(&priv->base, false);
gk20a_pllg_disable(priv);
@@ -612,18 +612,18 @@ gk20a_clock_fini(struct nouveau_object *object, bool suspend)
}
static int
-gk20a_clock_init(struct nouveau_object *object)
+gk20a_clk_init(struct nvkm_object *object)
{
- struct gk20a_clock_priv *priv = (void *)object;
+ struct gk20a_clk_priv *priv = (void *)object;
int ret;
nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
- ret = nouveau_clock_init(&priv->base);
+ ret = nvkm_clk_init(&priv->base);
if (ret)
return ret;
- ret = gk20a_clock_prog(&priv->base);
+ ret = gk20a_clk_prog(&priv->base);
if (ret) {
nv_error(priv, "cannot initialize clock\n");
return ret;
@@ -633,11 +633,11 @@ gk20a_clock_init(struct nouveau_object *object)
}
static int
-gk20a_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct gk20a_clock_priv *priv;
+ struct gk20a_clk_priv *priv;
struct nouveau_platform_device *plat;
int ret;
int i;
@@ -648,8 +648,9 @@ gk20a_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
gk20a_pstates[i].pstate = i + 1;
}
- ret = nouveau_clock_create(parent, engine, oclass, gk20a_domains,
- gk20a_pstates, ARRAY_SIZE(gk20a_pstates), true, &priv);
+ ret = nvkm_clk_create(parent, engine, oclass, gk20a_domains,
+ gk20a_pstates, ARRAY_SIZE(gk20a_pstates),
+ true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -660,21 +661,20 @@ gk20a_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->parent_rate = clk_get_rate(plat->gpu->clk);
nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
- priv->base.read = gk20a_clock_read;
- priv->base.calc = gk20a_clock_calc;
- priv->base.prog = gk20a_clock_prog;
- priv->base.tidy = gk20a_clock_tidy;
-
+ priv->base.read = gk20a_clk_read;
+ priv->base.calc = gk20a_clk_calc;
+ priv->base.prog = gk20a_clk_prog;
+ priv->base.tidy = gk20a_clk_tidy;
return 0;
}
-struct nouveau_oclass
-gk20a_clock_oclass = {
- .handle = NV_SUBDEV(CLOCK, 0xea),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = gk20a_clock_ctor,
- .dtor = _nouveau_subdev_dtor,
- .init = gk20a_clock_init,
- .fini = gk20a_clock_fini,
+struct nvkm_oclass
+gk20a_clk_oclass = {
+ .handle = NV_SUBDEV(CLK, 0xea),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk20a_clk_ctor,
+ .dtor = _nvkm_subdev_dtor,
+ .init = gk20a_clk_init,
+ .fini = gk20a_clk_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 07ad01247675..822d32a28d6e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -22,26 +22,25 @@
* Authors: Ben Skeggs
* Roy Spliet
*/
+#include "gt215.h"
+#include "pll.h"
+#include <core/device.h>
#include <engine/fifo.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
-#include "pll.h"
-
-#include "nva3.h"
-
-struct nva3_clock_priv {
- struct nouveau_clock base;
- struct nva3_clock_info eng[nv_clk_src_max];
+struct gt215_clk_priv {
+ struct nvkm_clk base;
+ struct gt215_clk_info eng[nv_clk_src_max];
};
-static u32 read_clk(struct nva3_clock_priv *, int, bool);
-static u32 read_pll(struct nva3_clock_priv *, int, u32);
+static u32 read_clk(struct gt215_clk_priv *, int, bool);
+static u32 read_pll(struct gt215_clk_priv *, int, u32);
static u32
-read_vco(struct nva3_clock_priv *priv, int clk)
+read_vco(struct gt215_clk_priv *priv, int clk)
{
u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
@@ -58,7 +57,7 @@ read_vco(struct nva3_clock_priv *priv, int clk)
}
static u32
-read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en)
+read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
{
u32 sctl, sdiv, sclk;
@@ -104,7 +103,7 @@ read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en)
}
static u32
-read_pll(struct nva3_clock_priv *priv, int clk, u32 pll)
+read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
{
u32 ctrl = nv_rd32(priv, pll + 0);
u32 sclk = 0, P = 1, N = 1, M = 1;
@@ -130,13 +129,14 @@ read_pll(struct nva3_clock_priv *priv, int clk, u32 pll)
if (M * P)
return sclk * N / (M * P);
+
return 0;
}
static int
-nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
{
- struct nva3_clock_priv *priv = (void *)clk;
+ struct gt215_clk_priv *priv = (void *)clk;
u32 hsrc;
switch (src) {
@@ -176,10 +176,10 @@ nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
}
int
-nva3_clk_info(struct nouveau_clock *clock, int clk, u32 khz,
- struct nva3_clock_info *info)
+gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
+ struct gt215_clk_info *info)
{
- struct nva3_clock_priv *priv = (void *)clock;
+ struct gt215_clk_priv *priv = (void *)clock;
u32 oclk, sclk, sdiv, diff;
info->clk = 0;
@@ -223,11 +223,11 @@ nva3_clk_info(struct nouveau_clock *clock, int clk, u32 khz,
}
int
-nva3_pll_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
- struct nva3_clock_info *info)
+gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
+ struct gt215_clk_info *info)
{
- struct nouveau_bios *bios = nouveau_bios(clock);
- struct nva3_clock_priv *priv = (void *)clock;
+ struct nvkm_bios *bios = nvkm_bios(clock);
+ struct gt215_clk_priv *priv = (void *)clock;
struct nvbios_pll limits;
int P, N, M, diff;
int ret;
@@ -236,7 +236,7 @@ nva3_pll_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
* PLL and use the divider instead. */
- ret = nva3_clk_info(clock, clk, khz, info);
+ ret = gt215_clk_info(clock, clk, khz, info);
diff = khz - ret;
if (!pll || (diff >= -2000 && diff < 3000)) {
goto out;
@@ -247,38 +247,37 @@ nva3_pll_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
if (ret)
return ret;
- ret = nva3_clk_info(clock, clk - 0x10, limits.refclk, info);
+ ret = gt215_clk_info(clock, clk - 0x10, limits.refclk, info);
if (ret != limits.refclk)
return -EINVAL;
- ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
if (ret >= 0) {
info->pll = (P << 16) | (N << 8) | M;
}
out:
info->fb_delay = max(((khz + 7566) / 15133), (u32) 18);
-
return ret ? ret : -ERANGE;
}
static int
-calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate,
+calc_clk(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate,
int clk, u32 pll, int idx)
{
- int ret = nva3_pll_info(&priv->base, clk, pll, cstate->domain[idx],
- &priv->eng[idx]);
+ int ret = gt215_pll_info(&priv->base, clk, pll, cstate->domain[idx],
+ &priv->eng[idx]);
if (ret >= 0)
return 0;
return ret;
}
static int
-calc_host(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate)
+calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
{
int ret = 0;
u32 kHz = cstate->domain[nv_clk_src_host];
- struct nva3_clock_info *info = &priv->eng[nv_clk_src_host];
+ struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
if (kHz == 277000) {
info->clk = 0;
@@ -288,16 +287,17 @@ calc_host(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate)
info->host_out = NVA3_HOST_CLK;
- ret = nva3_clk_info(&priv->base, 0x1d, kHz, info);
+ ret = gt215_clk_info(&priv->base, 0x1d, kHz, info);
if (ret >= 0)
return 0;
+
return ret;
}
int
-nva3_clock_pre(struct nouveau_clock *clk, unsigned long *flags)
+gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(clk);
+ struct nvkm_fifo *pfifo = nvkm_fifo(clk);
/* halt and idle execution engines */
nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
@@ -318,9 +318,9 @@ nva3_clock_pre(struct nouveau_clock *clk, unsigned long *flags)
}
void
-nva3_clock_post(struct nouveau_clock *clk, unsigned long *flags)
+gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(clk);
+ struct nvkm_fifo *pfifo = nvkm_fifo(clk);
if (pfifo && flags)
pfifo->start(pfifo, flags);
@@ -330,16 +330,16 @@ nva3_clock_post(struct nouveau_clock *clk, unsigned long *flags)
}
static void
-disable_clk_src(struct nva3_clock_priv *priv, u32 src)
+disable_clk_src(struct gt215_clk_priv *priv, u32 src)
{
nv_mask(priv, src, 0x00000100, 0x00000000);
nv_mask(priv, src, 0x00000001, 0x00000000);
}
static void
-prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
+prog_pll(struct gt215_clk_priv *priv, int clk, u32 pll, int idx)
{
- struct nva3_clock_info *info = &priv->eng[idx];
+ struct gt215_clk_info *info = &priv->eng[idx];
const u32 src0 = 0x004120 + (clk * 4);
const u32 src1 = 0x004160 + (clk * 4);
const u32 ctrl = pll + 0;
@@ -377,16 +377,16 @@ prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
}
static void
-prog_clk(struct nva3_clock_priv *priv, int clk, int idx)
+prog_clk(struct gt215_clk_priv *priv, int clk, int idx)
{
- struct nva3_clock_info *info = &priv->eng[idx];
+ struct gt215_clk_info *info = &priv->eng[idx];
nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
}
static void
-prog_host(struct nva3_clock_priv *priv)
+prog_host(struct gt215_clk_priv *priv)
{
- struct nva3_clock_info *info = &priv->eng[nv_clk_src_host];
+ struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
u32 hsrc = (nv_rd32(priv, 0xc040));
switch (info->host_out) {
@@ -411,9 +411,9 @@ prog_host(struct nva3_clock_priv *priv)
}
static void
-prog_core(struct nva3_clock_priv *priv, int idx)
+prog_core(struct gt215_clk_priv *priv, int idx)
{
- struct nva3_clock_info *info = &priv->eng[idx];
+ struct gt215_clk_info *info = &priv->eng[idx];
u32 fb_delay = nv_rd32(priv, 0x10002c);
if (fb_delay < info->fb_delay)
@@ -426,10 +426,10 @@ prog_core(struct nva3_clock_priv *priv, int idx)
}
static int
-nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
{
- struct nva3_clock_priv *priv = (void *)clk;
- struct nva3_clock_info *core = &priv->eng[nv_clk_src_core];
+ struct gt215_clk_priv *priv = (void *)clk;
+ struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
int ret;
if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
@@ -442,9 +442,9 @@ nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
/* XXX: Should be reading the highest bit in the VBIOS clock to decide
* whether to use a PLL or not... but using a PLL defeats the purpose */
if (core->pll) {
- ret = nva3_clk_info(clk, 0x10,
- cstate->domain[nv_clk_src_core_intm],
- &priv->eng[nv_clk_src_core_intm]);
+ ret = gt215_clk_info(clk, 0x10,
+ cstate->domain[nv_clk_src_core_intm],
+ &priv->eng[nv_clk_src_core_intm]);
if (ret < 0)
return ret;
}
@@ -453,15 +453,15 @@ nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
}
static int
-nva3_clock_prog(struct nouveau_clock *clk)
+gt215_clk_prog(struct nvkm_clk *clk)
{
- struct nva3_clock_priv *priv = (void *)clk;
- struct nva3_clock_info *core = &priv->eng[nv_clk_src_core];
+ struct gt215_clk_priv *priv = (void *)clk;
+ struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
int ret = 0;
unsigned long flags;
unsigned long *f = &flags;
- ret = nva3_clock_pre(clk, f);
+ ret = gt215_clk_pre(clk, f);
if (ret)
goto out;
@@ -478,18 +478,17 @@ out:
if (ret == -EBUSY)
f = NULL;
- nva3_clock_post(clk, f);
-
+ gt215_clk_post(clk, f);
return ret;
}
static void
-nva3_clock_tidy(struct nouveau_clock *clk)
+gt215_clk_tidy(struct nvkm_clk *clk)
{
}
-static struct nouveau_clocks
-nva3_domain[] = {
+static struct nvkm_domain
+gt215_domain[] = {
{ nv_clk_src_crystal , 0xff },
{ nv_clk_src_core , 0x00, 0, "core", 1000 },
{ nv_clk_src_shader , 0x01, 0, "shader", 1000 },
@@ -502,33 +501,33 @@ nva3_domain[] = {
};
static int
-nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nva3_clock_priv *priv;
+ struct gt215_clk_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0,
- true, &priv);
+ ret = nvkm_clk_create(parent, engine, oclass, gt215_domain,
+ NULL, 0, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.read = nva3_clock_read;
- priv->base.calc = nva3_clock_calc;
- priv->base.prog = nva3_clock_prog;
- priv->base.tidy = nva3_clock_tidy;
+ priv->base.read = gt215_clk_read;
+ priv->base.calc = gt215_clk_calc;
+ priv->base.prog = gt215_clk_prog;
+ priv->base.tidy = gt215_clk_tidy;
return 0;
}
-struct nouveau_oclass
-nva3_clock_oclass = {
- .handle = NV_SUBDEV(CLOCK, 0xa3),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_clock_ctor,
- .dtor = _nouveau_clock_dtor,
- .init = _nouveau_clock_init,
- .fini = _nouveau_clock_fini,
+struct nvkm_oclass
+gt215_clk_oclass = {
+ .handle = NV_SUBDEV(CLK, 0xa3),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gt215_clk_ctor,
+ .dtor = _nvkm_clk_dtor,
+ .init = _nvkm_clk_init,
+ .fini = _nvkm_clk_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
new file mode 100644
index 000000000000..b447d9cd4d37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_CLK_NVA3_H__
+#define __NVKM_CLK_NVA3_H__
+#include <subdev/clk.h>
+
+struct gt215_clk_info {
+ u32 clk;
+ u32 pll;
+ enum {
+ NVA3_HOST_277,
+ NVA3_HOST_CLK,
+ } host_out;
+ u32 fb_delay;
+};
+
+int gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *);
+int gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags);
+void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index 54aeab8005a0..c54417b146c7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -21,18 +21,16 @@
*
* Authors: Ben Skeggs
*/
+#include "gt215.h"
+#include "pll.h"
-#include <engine/fifo.h>
+#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
-#include <subdev/clock.h>
-
-#include "nva3.h"
-#include "pll.h"
-struct nvaa_clock_priv {
- struct nouveau_clock base;
+struct mcp77_clk_priv {
+ struct nvkm_clk base;
enum nv_clk_src csrc, ssrc, vsrc;
u32 cctrl, sctrl;
u32 ccoef, scoef;
@@ -41,13 +39,13 @@ struct nvaa_clock_priv {
};
static u32
-read_div(struct nouveau_clock *clk)
+read_div(struct nvkm_clk *clk)
{
return nv_rd32(clk, 0x004600);
}
static u32
-read_pll(struct nouveau_clock *clk, u32 base)
+read_pll(struct nvkm_clk *clk, u32 base)
{
u32 ctrl = nv_rd32(clk, base + 0);
u32 coef = nv_rd32(clk, base + 4);
@@ -78,9 +76,9 @@ read_pll(struct nouveau_clock *clk, u32 base)
}
static int
-nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
{
- struct nvaa_clock_priv *priv = (void *)clk;
+ struct mcp77_clk_priv *priv = (void *)clk;
u32 mast = nv_rd32(clk, 0x00c054);
u32 P = 0;
@@ -160,12 +158,12 @@ nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
}
static u32
-calc_pll(struct nvaa_clock_priv *priv, u32 reg,
+calc_pll(struct mcp77_clk_priv *priv, u32 reg,
u32 clock, int *N, int *M, int *P)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pll pll;
- struct nouveau_clock *clk = &priv->base;
+ struct nvkm_clk *clk = &priv->base;
int ret;
ret = nvbios_pll_parse(bios, reg, &pll);
@@ -199,9 +197,9 @@ calc_P(u32 src, u32 target, int *div)
}
static int
-nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
{
- struct nvaa_clock_priv *priv = (void *)clk;
+ struct mcp77_clk_priv *priv = (void *)clk;
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
@@ -216,8 +214,7 @@ nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
/* Calculate clock * 2, so shader clock can use it too */
clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
- if (abs(core - out) <=
- abs(core - (clock >> 1))) {
+ if (abs(core - out) <= abs(core - (clock >> 1))) {
priv->csrc = nv_clk_src_hclkm4;
priv->cctrl = divs << 16;
} else {
@@ -242,9 +239,8 @@ nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
priv->ssrc = nv_clk_src_href;
} else {
clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
- if (priv->csrc == nv_clk_src_core) {
+ if (priv->csrc == nv_clk_src_core)
out = calc_P((core << 1), shader, &divs);
- }
if (abs(shader - out) <=
abs(shader - clock) &&
@@ -261,8 +257,7 @@ nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
/* vclk */
out = calc_P(core, vdec, &divs);
clock = calc_P(500000, vdec, &P1);
- if(abs(vdec - out) <=
- abs(vdec - clock)) {
+ if(abs(vdec - out) <= abs(vdec - clock)) {
priv->vsrc = nv_clk_src_cclk;
priv->vdiv = divs << 16;
} else {
@@ -297,15 +292,15 @@ nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
}
static int
-nvaa_clock_prog(struct nouveau_clock *clk)
+mcp77_clk_prog(struct nvkm_clk *clk)
{
- struct nvaa_clock_priv *priv = (void *)clk;
+ struct mcp77_clk_priv *priv = (void *)clk;
u32 pllmask = 0, mast;
unsigned long flags;
unsigned long *f = &flags;
int ret = 0;
- ret = nva3_clock_pre(clk, f);
+ ret = gt215_clk_pre(clk, f);
if (ret)
goto out;
@@ -382,18 +377,17 @@ out:
if (ret == -EBUSY)
f = NULL;
- nva3_clock_post(clk, f);
-
+ gt215_clk_post(clk, f);
return ret;
}
static void
-nvaa_clock_tidy(struct nouveau_clock *clk)
+mcp77_clk_tidy(struct nvkm_clk *clk)
{
}
-static struct nouveau_clocks
-nvaa_domains[] = {
+static struct nvkm_domain
+mcp77_domains[] = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_core , 0xff, 0, "core", 1000 },
@@ -403,33 +397,33 @@ nvaa_domains[] = {
};
static int
-nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvaa_clock_priv *priv;
+ struct mcp77_clk_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, NULL,
- 0, true, &priv);
+ ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
+ NULL, 0, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.read = nvaa_clock_read;
- priv->base.calc = nvaa_clock_calc;
- priv->base.prog = nvaa_clock_prog;
- priv->base.tidy = nvaa_clock_tidy;
+ priv->base.read = mcp77_clk_read;
+ priv->base.calc = mcp77_clk_calc;
+ priv->base.prog = mcp77_clk_prog;
+ priv->base.tidy = mcp77_clk_tidy;
return 0;
}
-struct nouveau_oclass *
-nvaa_clock_oclass = &(struct nouveau_oclass) {
- .handle = NV_SUBDEV(CLOCK, 0xaa),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvaa_clock_ctor,
- .dtor = _nouveau_clock_dtor,
- .init = _nouveau_clock_init,
- .fini = _nouveau_clock_fini,
+struct nvkm_oclass *
+mcp77_clk_oclass = &(struct nvkm_oclass) {
+ .handle = NV_SUBDEV(CLK, 0xaa),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = mcp77_clk_ctor,
+ .dtor = _nvkm_clk_dtor,
+ .init = _nvkm_clk_init,
+ .fini = _nvkm_clk_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index 4c48232686be..63dbbb575228 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -21,21 +21,20 @@
*
* Authors: Ben Skeggs
*/
+#include <subdev/clk.h>
+#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
#include <subdev/devinit/nv04.h>
-#include "pll.h"
-
-struct nv04_clock_priv {
- struct nouveau_clock base;
+struct nv04_clk_priv {
+ struct nvkm_clk base;
};
int
-nv04_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
- int clk, struct nouveau_pll_vals *pv)
+nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
+ int clk, struct nvkm_pll_vals *pv)
{
int N1, M1, N2, M2, P;
int ret = nv04_pll_calc(nv_subdev(clock), info, clk, &N1, &M1, &N2, &M2, &P);
@@ -51,11 +50,10 @@ nv04_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
}
int
-nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
- struct nouveau_pll_vals *pv)
+nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
{
- struct nouveau_devinit *devinit = nouveau_devinit(clk);
- int cv = nouveau_bios(clk)->version.chip;
+ struct nvkm_devinit *devinit = nvkm_devinit(clk);
+ int cv = nvkm_bios(clk)->version.chip;
if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
cv >= 0x40) {
@@ -69,37 +67,37 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
return 0;
}
-static struct nouveau_clocks
+static struct nvkm_domain
nv04_domain[] = {
{ nv_clk_src_max }
};
static int
-nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv04_clock_priv *priv;
+ struct nv04_clk_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, NULL, 0,
- false, &priv);
+ ret = nvkm_clk_create(parent, engine, oclass, nv04_domain,
+ NULL, 0, false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.pll_calc = nv04_clock_pll_calc;
- priv->base.pll_prog = nv04_clock_pll_prog;
+ priv->base.pll_calc = nv04_clk_pll_calc;
+ priv->base.pll_prog = nv04_clk_pll_prog;
return 0;
}
-struct nouveau_oclass
-nv04_clock_oclass = {
- .handle = NV_SUBDEV(CLOCK, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_clock_ctor,
- .dtor = _nouveau_clock_dtor,
- .init = _nouveau_clock_init,
- .fini = _nouveau_clock_fini,
+struct nvkm_oclass
+nv04_clk_oclass = {
+ .handle = NV_SUBDEV(CLK, 0x04),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv04_clk_ctor,
+ .dtor = _nvkm_clk_dtor,
+ .init = _nvkm_clk_init,
+ .fini = _nvkm_clk_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index 08368fe97029..ed838130c89d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -21,22 +21,22 @@
*
* Authors: Ben Skeggs
*/
+#include <subdev/clk.h>
+#include "pll.h"
-#include <subdev/clock.h>
+#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
-#include "pll.h"
-
-struct nv40_clock_priv {
- struct nouveau_clock base;
+struct nv40_clk_priv {
+ struct nvkm_clk base;
u32 ctrl;
u32 npll_ctrl;
u32 npll_coef;
u32 spll;
};
-static struct nouveau_clocks
+static struct nvkm_domain
nv40_domain[] = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
@@ -47,7 +47,7 @@ nv40_domain[] = {
};
static u32
-read_pll_1(struct nv40_clock_priv *priv, u32 reg)
+read_pll_1(struct nv40_clk_priv *priv, u32 reg)
{
u32 ctrl = nv_rd32(priv, reg + 0x00);
int P = (ctrl & 0x00070000) >> 16;
@@ -62,7 +62,7 @@ read_pll_1(struct nv40_clock_priv *priv, u32 reg)
}
static u32
-read_pll_2(struct nv40_clock_priv *priv, u32 reg)
+read_pll_2(struct nv40_clk_priv *priv, u32 reg)
{
u32 ctrl = nv_rd32(priv, reg + 0x00);
u32 coef = nv_rd32(priv, reg + 0x04);
@@ -87,7 +87,7 @@ read_pll_2(struct nv40_clock_priv *priv, u32 reg)
}
static u32
-read_clk(struct nv40_clock_priv *priv, u32 src)
+read_clk(struct nv40_clk_priv *priv, u32 src)
{
switch (src) {
case 3:
@@ -102,9 +102,9 @@ read_clk(struct nv40_clock_priv *priv, u32 src)
}
static int
-nv40_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+nv40_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
{
- struct nv40_clock_priv *priv = (void *)clk;
+ struct nv40_clk_priv *priv = (void *)clk;
u32 mast = nv_rd32(priv, 0x00c040);
switch (src) {
@@ -127,10 +127,10 @@ nv40_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
}
static int
-nv40_clock_calc_pll(struct nv40_clock_priv *priv, u32 reg, u32 clk,
- int *N1, int *M1, int *N2, int *M2, int *log2P)
+nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
+ int *N1, int *M1, int *N2, int *M2, int *log2P)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pll pll;
int ret;
@@ -144,21 +144,22 @@ nv40_clock_calc_pll(struct nv40_clock_priv *priv, u32 reg, u32 clk,
ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
if (ret == 0)
return -ERANGE;
+
return ret;
}
static int
-nv40_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+nv40_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
{
- struct nv40_clock_priv *priv = (void *)clk;
+ struct nv40_clk_priv *priv = (void *)clk;
int gclk = cstate->domain[nv_clk_src_core];
int sclk = cstate->domain[nv_clk_src_shader];
int N1, M1, N2, M2, log2P;
int ret;
/* core/geometric clock */
- ret = nv40_clock_calc_pll(priv, 0x004000, gclk,
- &N1, &M1, &N2, &M2, &log2P);
+ ret = nv40_clk_calc_pll(priv, 0x004000, gclk,
+ &N1, &M1, &N2, &M2, &log2P);
if (ret < 0)
return ret;
@@ -172,8 +173,8 @@ nv40_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
/* use the second pll for shader/rop clock, if it differs from core */
if (sclk && sclk != gclk) {
- ret = nv40_clock_calc_pll(priv, 0x004008, sclk,
- &N1, &M1, NULL, NULL, &log2P);
+ ret = nv40_clk_calc_pll(priv, 0x004008, sclk,
+ &N1, &M1, NULL, NULL, &log2P);
if (ret < 0)
return ret;
@@ -188,9 +189,9 @@ nv40_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
}
static int
-nv40_clock_prog(struct nouveau_clock *clk)
+nv40_clk_prog(struct nvkm_clk *clk)
{
- struct nv40_clock_priv *priv = (void *)clk;
+ struct nv40_clk_priv *priv = (void *)clk;
nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
nv_wr32(priv, 0x004004, priv->npll_coef);
nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
@@ -201,40 +202,40 @@ nv40_clock_prog(struct nouveau_clock *clk)
}
static void
-nv40_clock_tidy(struct nouveau_clock *clk)
+nv40_clk_tidy(struct nvkm_clk *clk)
{
}
static int
-nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv40_clock_priv *priv;
+ struct nv40_clk_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, NULL, 0,
- true, &priv);
+ ret = nvkm_clk_create(parent, engine, oclass, nv40_domain,
+ NULL, 0, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.pll_calc = nv04_clock_pll_calc;
- priv->base.pll_prog = nv04_clock_pll_prog;
- priv->base.read = nv40_clock_read;
- priv->base.calc = nv40_clock_calc;
- priv->base.prog = nv40_clock_prog;
- priv->base.tidy = nv40_clock_tidy;
+ priv->base.pll_calc = nv04_clk_pll_calc;
+ priv->base.pll_prog = nv04_clk_pll_prog;
+ priv->base.read = nv40_clk_read;
+ priv->base.calc = nv40_clk_calc;
+ priv->base.prog = nv40_clk_prog;
+ priv->base.tidy = nv40_clk_tidy;
return 0;
}
-struct nouveau_oclass
-nv40_clock_oclass = {
- .handle = NV_SUBDEV(CLOCK, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv40_clock_ctor,
- .dtor = _nouveau_clock_dtor,
- .init = _nouveau_clock_init,
- .fini = _nouveau_clock_fini,
+struct nvkm_oclass
+nv40_clk_oclass = {
+ .handle = NV_SUBDEV(CLK, 0x40),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv40_clk_ctor,
+ .dtor = _nvkm_clk_dtor,
+ .init = _nvkm_clk_init,
+ .fini = _nvkm_clk_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 5070ebc260f8..9b4ffd6347ce 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -21,16 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
-#include <subdev/bios.h>
-#include <subdev/bios/pll.h>
-
#include "nv50.h"
#include "pll.h"
#include "seq.h"
+#include <core/device.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
static u32
-read_div(struct nv50_clock_priv *priv)
+read_div(struct nv50_clk_priv *priv)
{
switch (nv_device(priv)->chipset) {
case 0x50: /* it exists, but only has bit 31, not the dividers.. */
@@ -49,9 +49,9 @@ read_div(struct nv50_clock_priv *priv)
}
static u32
-read_pll_src(struct nv50_clock_priv *priv, u32 base)
+read_pll_src(struct nv50_clk_priv *priv, u32 base)
{
- struct nouveau_clock *clk = &priv->base;
+ struct nvkm_clk *clk = &priv->base;
u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
u32 rsel = nv_rd32(priv, 0x00e18c);
int P, N, M, id;
@@ -116,13 +116,14 @@ read_pll_src(struct nv50_clock_priv *priv, u32 base)
if (M)
return (ref * N / M) >> P;
+
return 0;
}
static u32
-read_pll_ref(struct nv50_clock_priv *priv, u32 base)
+read_pll_ref(struct nv50_clk_priv *priv, u32 base)
{
- struct nouveau_clock *clk = &priv->base;
+ struct nvkm_clk *clk = &priv->base;
u32 src, mast = nv_rd32(priv, 0x00c040);
switch (base) {
@@ -147,13 +148,14 @@ read_pll_ref(struct nv50_clock_priv *priv, u32 base)
if (src)
return clk->read(clk, nv_clk_src_href);
+
return read_pll_src(priv, base);
}
static u32
-read_pll(struct nv50_clock_priv *priv, u32 base)
+read_pll(struct nv50_clk_priv *priv, u32 base)
{
- struct nouveau_clock *clk = &priv->base;
+ struct nvkm_clk *clk = &priv->base;
u32 mast = nv_rd32(priv, 0x00c040);
u32 ctrl = nv_rd32(priv, base + 0);
u32 coef = nv_rd32(priv, base + 4);
@@ -162,7 +164,7 @@ read_pll(struct nv50_clock_priv *priv, u32 base)
int N1, N2, M1, M2;
if (base == 0x004028 && (mast & 0x00100000)) {
- /* wtf, appears to only disable post-divider on nva0 */
+ /* wtf, appears to only disable post-divider on gt200 */
if (nv_device(priv)->chipset != 0xa0)
return clk->read(clk, nv_clk_src_dom6);
}
@@ -185,9 +187,9 @@ read_pll(struct nv50_clock_priv *priv, u32 base)
}
static int
-nv50_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
{
- struct nv50_clock_priv *priv = (void *)clk;
+ struct nv50_clk_priv *priv = (void *)clk;
u32 mast = nv_rd32(priv, 0x00c040);
u32 P = 0;
@@ -316,9 +318,9 @@ nv50_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
}
static u32
-calc_pll(struct nv50_clock_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pll pll;
int ret;
@@ -359,10 +361,10 @@ clk_same(u32 a, u32 b)
}
static int
-nv50_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
{
- struct nv50_clock_priv *priv = (void *)clk;
- struct nv50_clock_hwsq *hwsq = &priv->hwsq;
+ struct nv50_clk_priv *priv = (void *)clk;
+ struct nv50_clk_hwsq *hwsq = &priv->hwsq;
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
@@ -484,30 +486,30 @@ nv50_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
}
static int
-nv50_clock_prog(struct nouveau_clock *clk)
+nv50_clk_prog(struct nvkm_clk *clk)
{
- struct nv50_clock_priv *priv = (void *)clk;
+ struct nv50_clk_priv *priv = (void *)clk;
return clk_exec(&priv->hwsq, true);
}
static void
-nv50_clock_tidy(struct nouveau_clock *clk)
+nv50_clk_tidy(struct nvkm_clk *clk)
{
- struct nv50_clock_priv *priv = (void *)clk;
+ struct nv50_clk_priv *priv = (void *)clk;
clk_exec(&priv->hwsq, false);
}
int
-nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv50_clock_oclass *pclass = (void *)oclass;
- struct nv50_clock_priv *priv;
+ struct nv50_clk_oclass *pclass = (void *)oclass;
+ struct nv50_clk_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
- NULL, 0, false, &priv);
+ ret = nvkm_clk_create(parent, engine, oclass, pclass->domains,
+ NULL, 0, false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -529,14 +531,14 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
priv->hwsq.r_mast = hwsq_reg(0x00c040);
- priv->base.read = nv50_clock_read;
- priv->base.calc = nv50_clock_calc;
- priv->base.prog = nv50_clock_prog;
- priv->base.tidy = nv50_clock_tidy;
+ priv->base.read = nv50_clk_read;
+ priv->base.calc = nv50_clk_calc;
+ priv->base.prog = nv50_clk_prog;
+ priv->base.tidy = nv50_clk_tidy;
return 0;
}
-static struct nouveau_clocks
+static struct nvkm_domain
nv50_domains[] = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
@@ -546,14 +548,14 @@ nv50_domains[] = {
{ nv_clk_src_max }
};
-struct nouveau_oclass *
-nv50_clock_oclass = &(struct nv50_clock_oclass) {
- .base.handle = NV_SUBDEV(CLOCK, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_clock_ctor,
- .dtor = _nouveau_clock_dtor,
- .init = _nouveau_clock_init,
- .fini = _nouveau_clock_fini,
+struct nvkm_oclass *
+nv50_clk_oclass = &(struct nv50_clk_oclass) {
+ .base.handle = NV_SUBDEV(CLK, 0x50),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_clk_ctor,
+ .dtor = _nvkm_clk_dtor,
+ .init = _nvkm_clk_init,
+ .fini = _nvkm_clk_fini,
},
.domains = nv50_domains,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
new file mode 100644
index 000000000000..0ead76a32f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -0,0 +1,28 @@
+#ifndef __NVKM_CLK_NV50_H__
+#define __NVKM_CLK_NV50_H__
+#include <subdev/bus/hwsq.h>
+#include <subdev/clk.h>
+
+struct nv50_clk_hwsq {
+ struct hwsq base;
+ struct hwsq_reg r_fifo;
+ struct hwsq_reg r_spll[2];
+ struct hwsq_reg r_nvpll[2];
+ struct hwsq_reg r_divs;
+ struct hwsq_reg r_mast;
+};
+
+struct nv50_clk_priv {
+ struct nvkm_clk base;
+ struct nv50_clk_hwsq hwsq;
+};
+
+int nv50_clk_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+
+struct nv50_clk_oclass {
+ struct nvkm_oclass base;
+ struct nvkm_domain *domains;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
new file mode 100644
index 000000000000..44020a30dee8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_PLL_H__
+#define __NVKM_PLL_H__
+#include <core/os.h>
+struct nvkm_subdev;
+struct nvbios_pll;
+
+int nv04_pll_calc(struct nvkm_subdev *, struct nvbios_pll *, u32 freq,
+ int *N1, int *M1, int *N2, int *M2, int *P);
+int gt215_pll_calc(struct nvkm_subdev *, struct nvbios_pll *, u32 freq,
+ int *N, int *fN, int *M, int *P);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
index 8eca457c2814..783a3e78d632 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
@@ -21,16 +21,14 @@
*
* Authors: Ben Skeggs
*/
+#include "pll.h"
-#include <subdev/clock.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
-#include "pll.h"
-
int
-nva3_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info,
- u32 freq, int *pN, int *pfN, int *pM, int *P)
+gt215_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info,
+ u32 freq, int *pN, int *pfN, int *pM, int *P)
{
u32 best_err = ~0, err;
int M, lM, hM, N, fN;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
index b47d543ab2e3..f2292895a1a8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
@@ -20,14 +20,13 @@
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
-#include "pll.h"
-
static int
-getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
+getMNP_single(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
int *pN, int *pM, int *pP)
{
/* Find M, N and P for a single stage PLL
@@ -38,7 +37,7 @@ getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
* "clk" parameter in kHz
* returns calculated clock
*/
- struct nouveau_bios *bios = nouveau_bios(subdev);
+ struct nvkm_bios *bios = nvkm_bios(subdev);
int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
int minM = info->vco1.min_m, maxM = info->vco1.max_m;
int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -126,7 +125,7 @@ getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
}
static int
-getMNP_double(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
+getMNP_double(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
int *pN1, int *pM1, int *pN2, int *pM2, int *pP)
{
/* Find M, N and P for a two stage PLL
@@ -137,7 +136,7 @@ getMNP_double(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
* "clk" parameter in kHz
* returns calculated clock
*/
- int chip_version = nouveau_bios(subdev)->version.chip;
+ int chip_version = nvkm_bios(subdev)->version.chip;
int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
@@ -225,7 +224,7 @@ getMNP_double(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
}
int
-nv04_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info, u32 freq,
+nv04_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info, u32 freq,
int *N1, int *M1, int *N2, int *M2, int *P)
{
int ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
index fb33f06ebd59..d717e8b8f679 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
@@ -1,7 +1,5 @@
#ifndef __NVKM_CLK_SEQ_H__
#define __NVKM_CLK_SEQ_H__
-
-#include <subdev/bus.h>
#include <subdev/bus/hwsq.h>
#define clk_init(s,p) hwsq_init(&(s)->base, (p))
@@ -13,5 +11,4 @@
#define clk_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
#define clk_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
#define clk_nsec(s,n) hwsq_nsec(&(s)->base, (n))
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
new file mode 100644
index 000000000000..793e73d16dac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -0,0 +1,14 @@
+nvkm-y += nvkm/subdev/devinit/base.o
+nvkm-y += nvkm/subdev/devinit/nv04.o
+nvkm-y += nvkm/subdev/devinit/nv05.o
+nvkm-y += nvkm/subdev/devinit/nv10.o
+nvkm-y += nvkm/subdev/devinit/nv1a.o
+nvkm-y += nvkm/subdev/devinit/nv20.o
+nvkm-y += nvkm/subdev/devinit/nv50.o
+nvkm-y += nvkm/subdev/devinit/g84.o
+nvkm-y += nvkm/subdev/devinit/g98.o
+nvkm-y += nvkm/subdev/devinit/gt215.o
+nvkm-y += nvkm/subdev/devinit/mcp89.o
+nvkm-y += nvkm/subdev/devinit/gf100.o
+nvkm-y += nvkm/subdev/devinit/gm107.o
+nvkm-y += nvkm/subdev/devinit/gm204.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index 0e45cee82463..b0d7c5f40db1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -21,17 +21,16 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
+#include <core/device.h>
#include <core/option.h>
-
#include <subdev/vga.h>
-#include "priv.h"
-
int
-_nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
+_nvkm_devinit_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_devinit *devinit = (void *)object;
+ struct nvkm_devinit *devinit = (void *)object;
/* force full reinit on resume */
if (suspend)
@@ -40,17 +39,17 @@ _nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
/* unlock the extended vga crtc regs */
nv_lockvgac(devinit, false);
- return nouveau_subdev_fini(&devinit->base, suspend);
+ return nvkm_subdev_fini(&devinit->base, suspend);
}
int
-_nouveau_devinit_init(struct nouveau_object *object)
+_nvkm_devinit_init(struct nvkm_object *object)
{
- struct nouveau_devinit_impl *impl = (void *)object->oclass;
- struct nouveau_devinit *devinit = (void *)object;
+ struct nvkm_devinit_impl *impl = (void *)object->oclass;
+ struct nvkm_devinit *devinit = (void *)object;
int ret;
- ret = nouveau_subdev_init(&devinit->base);
+ ret = nvkm_subdev_init(&devinit->base);
if (ret)
return ret;
@@ -64,34 +63,32 @@ _nouveau_devinit_init(struct nouveau_object *object)
}
void
-_nouveau_devinit_dtor(struct nouveau_object *object)
+_nvkm_devinit_dtor(struct nvkm_object *object)
{
- struct nouveau_devinit *devinit = (void *)object;
+ struct nvkm_devinit *devinit = (void *)object;
/* lock crtc regs */
nv_lockvgac(devinit, true);
- nouveau_subdev_destroy(&devinit->base);
+ nvkm_subdev_destroy(&devinit->base);
}
int
-nouveau_devinit_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int size, void **pobject)
+nvkm_devinit_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int size, void **pobject)
{
- struct nouveau_devinit_impl *impl = (void *)oclass;
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_devinit *devinit;
+ struct nvkm_devinit_impl *impl = (void *)oclass;
+ struct nvkm_device *device = nv_device(parent);
+ struct nvkm_devinit *devinit;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
- "init", size, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
+ "init", size, pobject);
devinit = *pobject;
if (ret)
return ret;
- devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
+ devinit->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
devinit->meminit = impl->meminit;
devinit->pll_set = impl->pll_set;
devinit->mmio = impl->mmio;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 6103484fea72..36684c3f9e9c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -23,9 +23,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include <core/device.h>
-
#include <subdev/fb/regsnv04.h>
#define NV04_PFB_DEBUG_0 0x00100080
@@ -48,7 +46,7 @@
# define NV10_PFB_REFCTRL_VALID_1 (1 << 31)
static inline struct io_mapping *
-fbmem_init(struct nouveau_device *dev)
+fbmem_init(struct nvkm_device *dev)
{
return io_mapping_create_wc(nv_device_resource_start(dev, 1),
nv_device_resource_len(dev, 1));
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index a7c80ded77cd..ca776ce75f4f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -21,11 +21,13 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
static u64
-nv84_devinit_disable(struct nouveau_devinit *devinit)
+g84_devinit_disable(struct nvkm_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
u32 r001540 = nv_rd32(priv, 0x001540);
@@ -36,7 +38,7 @@ nv84_devinit_disable(struct nouveau_devinit *devinit)
disable |= (1ULL << NVDEV_ENGINE_MPEG);
disable |= (1ULL << NVDEV_ENGINE_VP);
disable |= (1ULL << NVDEV_ENGINE_BSP);
- disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+ disable |= (1ULL << NVDEV_ENGINE_CIPHER);
}
if (!(r00154c & 0x00000004))
@@ -44,21 +46,21 @@ nv84_devinit_disable(struct nouveau_devinit *devinit)
if (!(r00154c & 0x00000020))
disable |= (1ULL << NVDEV_ENGINE_BSP);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+ disable |= (1ULL << NVDEV_ENGINE_CIPHER);
return disable;
}
-struct nouveau_oclass *
-nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+g84_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x84),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_devinit_ctor,
- .dtor = _nouveau_devinit_dtor,
+ .dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
- .fini = _nouveau_devinit_fini,
+ .fini = _nvkm_devinit_fini,
},
.pll_set = nv50_devinit_pll_set,
- .disable = nv84_devinit_disable,
+ .disable = g84_devinit_disable,
.post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index a773253a17f6..d29bacee65ee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -21,11 +21,13 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
static u64
-nv98_devinit_disable(struct nouveau_devinit *devinit)
+g98_devinit_disable(struct nvkm_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
u32 r001540 = nv_rd32(priv, 0x001540);
@@ -33,31 +35,31 @@ nv98_devinit_disable(struct nouveau_devinit *devinit)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVDEV_ENGINE_VP);
- disable |= (1ULL << NVDEV_ENGINE_BSP);
- disable |= (1ULL << NVDEV_ENGINE_PPP);
+ disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
+ disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+ disable |= (1ULL << NVDEV_ENGINE_MSPPP);
}
if (!(r00154c & 0x00000004))
disable |= (1ULL << NVDEV_ENGINE_DISP);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVDEV_ENGINE_MSVLD);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+ disable |= (1ULL << NVDEV_ENGINE_SEC);
return disable;
}
-struct nouveau_oclass *
-nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+g98_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x98),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_devinit_ctor,
- .dtor = _nouveau_devinit_dtor,
+ .dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
- .fini = _nouveau_devinit_fini,
+ .fini = _nvkm_devinit_fini,
},
.pll_set = nv50_devinit_pll_set,
- .disable = nv98_devinit_disable,
+ .disable = g98_devinit_disable,
.post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 80bd7f5eda3d..e8778c67578e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -21,14 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
int
-nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
+gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
{
struct nv50_devinit_priv *priv = (void *)devinit;
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pll info;
int N, fN, M, P;
int ret;
@@ -37,7 +41,7 @@ nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
if (ret)
return ret;
- ret = nva3_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+ ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
@@ -60,7 +64,7 @@ nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
}
static u64
-nvc0_devinit_disable(struct nouveau_devinit *devinit)
+gf100_devinit_disable(struct nvkm_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
u32 r022500 = nv_rd32(priv, 0x022500);
@@ -70,50 +74,51 @@ nvc0_devinit_disable(struct nouveau_devinit *devinit)
disable |= (1ULL << NVDEV_ENGINE_DISP);
if (r022500 & 0x00000002) {
- disable |= (1ULL << NVDEV_ENGINE_VP);
- disable |= (1ULL << NVDEV_ENGINE_PPP);
+ disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
+ disable |= (1ULL << NVDEV_ENGINE_MSPPP);
}
if (r022500 & 0x00000004)
- disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVDEV_ENGINE_MSVLD);
if (r022500 & 0x00000008)
- disable |= (1ULL << NVDEV_ENGINE_VENC);
+ disable |= (1ULL << NVDEV_ENGINE_MSENC);
if (r022500 & 0x00000100)
- disable |= (1ULL << NVDEV_ENGINE_COPY0);
+ disable |= (1ULL << NVDEV_ENGINE_CE0);
if (r022500 & 0x00000200)
- disable |= (1ULL << NVDEV_ENGINE_COPY1);
+ disable |= (1ULL << NVDEV_ENGINE_CE1);
return disable;
}
static int
-nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_devinit_priv *priv;
int ret;
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+ ret = nvkm_devinit_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
if (nv_rd32(priv, 0x022500) & 0x00000001)
priv->base.post = true;
+
return 0;
}
-struct nouveau_oclass *
-nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+gf100_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_devinit_ctor,
- .dtor = _nouveau_devinit_dtor,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_devinit_ctor,
+ .dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
- .fini = _nouveau_devinit_fini,
+ .fini = _nvkm_devinit_fini,
},
- .pll_set = nvc0_devinit_pll_set,
- .disable = nvc0_devinit_disable,
+ .pll_set = gf100_devinit_pll_set,
+ .disable = gf100_devinit_disable,
.post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 4ba43d6a1ec8..b345a53e881d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -21,11 +21,13 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
u64
-gm107_devinit_disable(struct nouveau_devinit *devinit)
+gm107_devinit_disable(struct nvkm_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
u32 r021c00 = nv_rd32(priv, 0x021c00);
@@ -33,25 +35,25 @@ gm107_devinit_disable(struct nouveau_devinit *devinit)
u64 disable = 0ULL;
if (r021c00 & 0x00000001)
- disable |= (1ULL << NVDEV_ENGINE_COPY0);
+ disable |= (1ULL << NVDEV_ENGINE_CE0);
if (r021c00 & 0x00000004)
- disable |= (1ULL << NVDEV_ENGINE_COPY2);
+ disable |= (1ULL << NVDEV_ENGINE_CE2);
if (r021c04 & 0x00000001)
disable |= (1ULL << NVDEV_ENGINE_DISP);
return disable;
}
-struct nouveau_oclass *
-gm107_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x07),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_devinit_ctor,
- .dtor = _nouveau_devinit_dtor,
+ .dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
- .fini = _nouveau_devinit_fini,
+ .fini = _nvkm_devinit_fini,
},
- .pll_set = nvc0_devinit_pll_set,
+ .pll_set = gf100_devinit_pll_set,
.disable = gm107_devinit_disable,
.post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
index e44a86662a2a..535172c5f1ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
@@ -21,17 +21,16 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/pmu.h>
-#include "nv50.h"
-
static void
pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
int i;
nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
@@ -50,7 +49,7 @@ pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
static void
pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
int i;
nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
@@ -78,7 +77,7 @@ static int
pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
u32 *init_addr_pmu, u32 *args_addr_pmu)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pmuR pmu;
if (!nvbios_pmuRm(bios, type, &pmu)) {
@@ -103,10 +102,10 @@ pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
}
static int
-gm204_devinit_post(struct nouveau_subdev *subdev, bool post)
+gm204_devinit_post(struct nvkm_subdev *subdev, bool post)
{
- struct nv50_devinit_priv *priv = (void *)nouveau_devinit(subdev);
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nv50_devinit_priv *priv = (void *)nvkm_devinit(subdev);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct bit_entry bit_I;
u32 init, args;
int ret;
@@ -158,16 +157,16 @@ gm204_devinit_post(struct nouveau_subdev *subdev, bool post)
return pmu_load(priv, 0x01, post, NULL, NULL);
}
-struct nouveau_oclass *
-gm204_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x07),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_devinit_ctor,
- .dtor = _nouveau_devinit_dtor,
+ .dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
- .fini = _nouveau_devinit_fini,
+ .fini = _nvkm_devinit_fini,
},
- .pll_set = nvc0_devinit_pll_set,
+ .pll_set = gf100_devinit_pll_set,
.disable = gm107_devinit_disable,
.post = gm204_devinit_post,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index b9cd9e53f760..6a3e8d4efed7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -21,14 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
int
-nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
+gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
{
struct nv50_devinit_priv *priv = (void *)devinit;
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pll info;
int N, fN, M, P;
int ret;
@@ -37,7 +41,7 @@ nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
if (ret)
return ret;
- ret = nva3_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+ ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
@@ -59,7 +63,7 @@ nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
}
static u64
-nva3_devinit_disable(struct nouveau_devinit *devinit)
+gt215_devinit_disable(struct nvkm_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
u32 r001540 = nv_rd32(priv, 0x001540);
@@ -67,22 +71,22 @@ nva3_devinit_disable(struct nouveau_devinit *devinit)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVDEV_ENGINE_VP);
- disable |= (1ULL << NVDEV_ENGINE_PPP);
+ disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
+ disable |= (1ULL << NVDEV_ENGINE_MSPPP);
}
if (!(r00154c & 0x00000004))
disable |= (1ULL << NVDEV_ENGINE_DISP);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVDEV_ENGINE_MSVLD);
if (!(r00154c & 0x00000200))
- disable |= (1ULL << NVDEV_ENGINE_COPY0);
+ disable |= (1ULL << NVDEV_ENGINE_CE0);
return disable;
}
static u32
-nva3_devinit_mmio_part[] = {
+gt215_devinit_mmio_part[] = {
0x100720, 0x1008bc, 4,
0x100a20, 0x100adc, 4,
0x100d80, 0x100ddc, 4,
@@ -95,10 +99,10 @@ nva3_devinit_mmio_part[] = {
};
static u32
-nva3_devinit_mmio(struct nouveau_devinit *devinit, u32 addr)
+gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
{
struct nv50_devinit_priv *priv = (void *)devinit;
- u32 *mmio = nva3_devinit_mmio_part;
+ u32 *mmio = gt215_devinit_mmio_part;
/* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
* instructions which touch registers that may not even exist on
@@ -130,17 +134,17 @@ nva3_devinit_mmio(struct nouveau_devinit *devinit, u32 addr)
return addr;
}
-struct nouveau_oclass *
-nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+gt215_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0xa3),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_devinit_ctor,
- .dtor = _nouveau_devinit_dtor,
+ .dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
- .fini = _nouveau_devinit_fini,
+ .fini = _nvkm_devinit_fini,
},
- .pll_set = nva3_devinit_pll_set,
- .disable = nva3_devinit_disable,
- .mmio = nva3_devinit_mmio,
+ .pll_set = gt215_devinit_pll_set,
+ .disable = gt215_devinit_disable,
+ .mmio = gt215_devinit_mmio,
.post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index 3729846a8e5c..55cf48bbca1c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -21,11 +21,13 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
static u64
-nvaf_devinit_disable(struct nouveau_devinit *devinit)
+mcp89_devinit_disable(struct nvkm_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
u32 r001540 = nv_rd32(priv, 0x001540);
@@ -33,32 +35,32 @@ nvaf_devinit_disable(struct nouveau_devinit *devinit)
u64 disable = 0;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVDEV_ENGINE_VP);
- disable |= (1ULL << NVDEV_ENGINE_PPP);
+ disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
+ disable |= (1ULL << NVDEV_ENGINE_MSPPP);
}
if (!(r00154c & 0x00000004))
disable |= (1ULL << NVDEV_ENGINE_DISP);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVDEV_ENGINE_MSVLD);
if (!(r00154c & 0x00000040))
disable |= (1ULL << NVDEV_ENGINE_VIC);
if (!(r00154c & 0x00000200))
- disable |= (1ULL << NVDEV_ENGINE_COPY0);
+ disable |= (1ULL << NVDEV_ENGINE_CE0);
return disable;
}
-struct nouveau_oclass *
-nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+mcp89_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0xaf),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_devinit_ctor,
- .dtor = _nouveau_devinit_dtor,
+ .dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
- .fini = _nouveau_devinit_fini,
+ .fini = _nvkm_devinit_fini,
},
- .pll_set = nva3_devinit_pll_set,
- .disable = nvaf_devinit_disable,
+ .pll_set = gt215_devinit_pll_set,
+ .disable = mcp89_devinit_disable,
.post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 65651c50f6ea..03a0da834244 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -23,14 +23,17 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "nv04.h"
+#include "fbmem.h"
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
#include <subdev/vga.h>
-#include "fbmem.h"
-#include "nv04.h"
-
static void
-nv04_devinit_meminit(struct nouveau_devinit *devinit)
+nv04_devinit_meminit(struct nvkm_devinit *devinit)
{
struct nv04_devinit_priv *priv = (void *)devinit;
u32 patt = 0xdeadbeef;
@@ -136,10 +139,10 @@ powerctrl_1_shift(int chip_version, int reg)
}
void
-setPLL_single(struct nouveau_devinit *devinit, u32 reg,
- struct nouveau_pll_vals *pv)
+setPLL_single(struct nvkm_devinit *devinit, u32 reg,
+ struct nvkm_pll_vals *pv)
{
- int chip_version = nouveau_bios(devinit)->version.chip;
+ int chip_version = nvkm_bios(devinit)->version.chip;
uint32_t oldpll = nv_rd32(devinit, reg);
int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
@@ -190,10 +193,10 @@ new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
}
void
-setPLL_double_highregs(struct nouveau_devinit *devinit, u32 reg1,
- struct nouveau_pll_vals *pv)
+setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
+ struct nvkm_pll_vals *pv)
{
- int chip_version = nouveau_bios(devinit)->version.chip;
+ int chip_version = nvkm_bios(devinit)->version.chip;
bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
uint32_t oldpll1 = nv_rd32(devinit, reg1);
@@ -267,8 +270,8 @@ setPLL_double_highregs(struct nouveau_devinit *devinit, u32 reg1,
}
void
-setPLL_double_lowregs(struct nouveau_devinit *devinit, u32 NMNMreg,
- struct nouveau_pll_vals *pv)
+setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
+ struct nvkm_pll_vals *pv)
{
/* When setting PLLs, there is a merry game of disabling and enabling
* various bits of hardware during the process. This function is a
@@ -301,7 +304,7 @@ setPLL_double_lowregs(struct nouveau_devinit *devinit, u32 NMNMreg,
struct nvbios_pll info;
uint8_t Pval2;
- if (nvbios_pll_parse(nouveau_bios(devinit), Preg, &info))
+ if (nvbios_pll_parse(nvkm_bios(devinit), Preg, &info))
return;
Pval2 = pv->log2P + info.bias_p;
@@ -347,10 +350,10 @@ setPLL_double_lowregs(struct nouveau_devinit *devinit, u32 NMNMreg,
}
int
-nv04_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
+nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
{
- struct nouveau_bios *bios = nouveau_bios(devinit);
- struct nouveau_pll_vals pv;
+ struct nvkm_bios *bios = nvkm_bios(devinit);
+ struct nvkm_pll_vals pv;
struct nvbios_pll info;
int cv = bios->version.chip;
int N1, M1, N2, M2, P;
@@ -361,7 +364,7 @@ nv04_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
return ret;
ret = nv04_pll_calc(nv_subdev(devinit), &info, freq,
- &N1, &M1, &N2, &M2, &P);
+ &N1, &M1, &N2, &M2, &P);
if (!ret)
return -EINVAL;
@@ -385,7 +388,7 @@ nv04_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
}
int
-nv04_devinit_fini(struct nouveau_object *object, bool suspend)
+nv04_devinit_fini(struct nvkm_object *object, bool suspend)
{
struct nv04_devinit_priv *priv = (void *)object;
int ret;
@@ -393,7 +396,7 @@ nv04_devinit_fini(struct nouveau_object *object, bool suspend)
/* make i2c busses accessible */
nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
- ret = nouveau_devinit_fini(&priv->base, suspend);
+ ret = nvkm_devinit_fini(&priv->base, suspend);
if (ret)
return ret;
@@ -401,12 +404,11 @@ nv04_devinit_fini(struct nouveau_object *object, bool suspend)
if (priv->owner < 0)
priv->owner = nv_rdvgaowner(priv);
nv_wrvgaowner(priv, 0);
-
return 0;
}
int
-nv04_devinit_init(struct nouveau_object *object)
+nv04_devinit_init(struct nvkm_object *object)
{
struct nv04_devinit_priv *priv = (void *)object;
@@ -422,29 +424,29 @@ nv04_devinit_init(struct nouveau_object *object)
}
}
- return nouveau_devinit_init(&priv->base);
+ return nvkm_devinit_init(&priv->base);
}
void
-nv04_devinit_dtor(struct nouveau_object *object)
+nv04_devinit_dtor(struct nvkm_object *object)
{
struct nv04_devinit_priv *priv = (void *)object;
/* restore vga owner saved at first init */
nv_wrvgaowner(priv, priv->owner);
- nouveau_devinit_destroy(&priv->base);
+ nvkm_devinit_destroy(&priv->base);
}
int
-nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_devinit_priv *priv;
int ret;
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+ ret = nvkm_devinit_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -453,10 +455,10 @@ nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
-nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+nv04_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x04),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
new file mode 100644
index 000000000000..14a51a9ff7d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -0,0 +1,22 @@
+#ifndef __NVKM_DEVINIT_NV04_H__
+#define __NVKM_DEVINIT_NV04_H__
+#include "priv.h"
+struct nvkm_pll_vals;
+
+struct nv04_devinit_priv {
+ struct nvkm_devinit base;
+ u8 owner;
+};
+
+int nv04_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void nv04_devinit_dtor(struct nvkm_object *);
+int nv04_devinit_init(struct nvkm_object *);
+int nv04_devinit_fini(struct nvkm_object *, bool);
+int nv04_devinit_pll_set(struct nvkm_devinit *, u32, u32);
+
+void setPLL_single(struct nvkm_devinit *, u32, struct nvkm_pll_vals *);
+void setPLL_double_highregs(struct nvkm_devinit *, u32, struct nvkm_pll_vals *);
+void setPLL_double_lowregs(struct nvkm_devinit *, u32, struct nvkm_pll_vals *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index a2007a3efc4d..def8649216c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -23,16 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "nv04.h"
+#include "fbmem.h"
#include <subdev/bios.h>
#include <subdev/bios/bmp.h>
+#include <subdev/bios/init.h>
#include <subdev/vga.h>
-#include "fbmem.h"
-#include "nv04.h"
-
static void
-nv05_devinit_meminit(struct nouveau_devinit *devinit)
+nv05_devinit_meminit(struct nvkm_devinit *devinit)
{
static const u8 default_config_tab[][2] = {
{ 0x24, 0x00 },
@@ -45,7 +45,7 @@ nv05_devinit_meminit(struct nouveau_devinit *devinit)
{ 0x00, 0x00 }
};
struct nv04_devinit_priv *priv = (void *)devinit;
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct io_mapping *fb;
u32 patt = 0xdeadbeef;
u16 data;
@@ -125,10 +125,10 @@ out:
fbmem_fini(fb);
}
-struct nouveau_oclass *
-nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+nv05_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x05),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 178b46f79b50..7aabc1bf0640 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -23,14 +23,14 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
-#include <subdev/vga.h>
-
-#include "fbmem.h"
#include "nv04.h"
+#include "fbmem.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
static void
-nv10_devinit_meminit(struct nouveau_devinit *devinit)
+nv10_devinit_meminit(struct nvkm_devinit *devinit)
{
struct nv04_devinit_priv *priv = (void *)devinit;
static const int mem_width[] = { 0x10, 0x00, 0x20 };
@@ -96,10 +96,10 @@ amount_found:
fbmem_fini(fb);
}
-struct nouveau_oclass *
-nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+nv10_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x10),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index 995dd97af3e9..9f36fff5a1c3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -21,13 +21,15 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-struct nouveau_oclass *
-nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
+struct nvkm_oclass *
+nv1a_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x1a),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 915089fb46f7..02fcfd921c42 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -23,15 +23,17 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
#include "fbmem.h"
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
static void
-nv20_devinit_meminit(struct nouveau_devinit *devinit)
+nv20_devinit_meminit(struct nvkm_devinit *devinit)
{
struct nv04_devinit_priv *priv = (void *)devinit;
- struct nouveau_device *device = nv_device(priv);
+ struct nvkm_device *device = nv_device(priv);
uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
uint32_t amount, off;
struct io_mapping *fb;
@@ -60,10 +62,10 @@ nv20_devinit_meminit(struct nouveau_devinit *devinit)
fbmem_fini(fb);
}
-struct nouveau_oclass *
-nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+nv20_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x20),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 968334d1dca4..26b7cb13e167 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -21,21 +21,22 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/disp.h>
#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
#include <subdev/ibus.h>
#include <subdev/vga.h>
-#include "nv50.h"
-
int
-nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
+nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
{
struct nv50_devinit_priv *priv = (void *)devinit;
- struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_bios *bios = nvkm_bios(priv);
struct nvbios_pll info;
int N1, M1, N2, M2, P;
int ret;
@@ -76,7 +77,7 @@ nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
}
static u64
-nv50_devinit_disable(struct nouveau_devinit *devinit)
+nv50_devinit_disable(struct nvkm_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
u32 r001540 = nv_rd32(priv, 0x001540);
@@ -89,10 +90,10 @@ nv50_devinit_disable(struct nouveau_devinit *devinit)
}
int
-nv50_devinit_init(struct nouveau_object *object)
+nv50_devinit_init(struct nvkm_object *object)
{
- struct nouveau_bios *bios = nouveau_bios(object);
- struct nouveau_ibus *ibus = nouveau_ibus(object);
+ struct nvkm_bios *bios = nvkm_bios(object);
+ struct nvkm_ibus *ibus = nvkm_ibus(object);
struct nv50_devinit_priv *priv = (void *)object;
struct nvbios_outp info;
struct dcb_output outp;
@@ -114,7 +115,7 @@ nv50_devinit_init(struct nouveau_object *object)
if (priv->base.post && ibus)
nv_ofuncs(ibus)->init(nv_object(ibus));
- ret = nouveau_devinit_init(&priv->base);
+ ret = nvkm_devinit_init(&priv->base);
if (ret)
return ret;
@@ -124,7 +125,7 @@ nv50_devinit_init(struct nouveau_object *object)
*/
while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
- &ver, &hdr, &cnt, &len, &info)) {
+ &ver, &hdr, &cnt, &len, &info)) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
@@ -143,14 +144,14 @@ nv50_devinit_init(struct nouveau_object *object)
}
int
-nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_devinit_priv *priv;
int ret;
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+ ret = nvkm_devinit_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -158,14 +159,14 @@ nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
-nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
+struct nvkm_oclass *
+nv50_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_devinit_ctor,
- .dtor = _nouveau_devinit_dtor,
+ .dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
- .fini = _nouveau_devinit_fini,
+ .fini = _nvkm_devinit_fini,
},
.pll_set = nv50_devinit_pll_set,
.disable = nv50_devinit_disable,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
new file mode 100644
index 000000000000..b882b65ff3cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_DEVINIT_NV50_H__
+#define __NVKM_DEVINIT_NV50_H__
+#include "priv.h"
+
+struct nv50_devinit_priv {
+ struct nvkm_devinit base;
+ u32 r001540;
+};
+
+int nv50_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+int nv50_devinit_init(struct nvkm_object *);
+int nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
+
+int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
+
+int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
+
+u64 gm107_devinit_disable(struct nvkm_devinit *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
new file mode 100644
index 000000000000..bb51a95d8012
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_DEVINIT_PRIV_H__
+#define __NVKM_DEVINIT_PRIV_H__
+#include <subdev/devinit.h>
+
+struct nvkm_devinit_impl {
+ struct nvkm_oclass base;
+ void (*meminit)(struct nvkm_devinit *);
+ int (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
+ u64 (*disable)(struct nvkm_devinit *);
+ u32 (*mmio)(struct nvkm_devinit *, u32);
+ int (*post)(struct nvkm_subdev *, bool);
+};
+
+#define nvkm_devinit_create(p,e,o,d) \
+ nvkm_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_devinit_destroy(p) ({ \
+ struct nvkm_devinit *d = (p); \
+ _nvkm_devinit_dtor(nv_object(d)); \
+})
+#define nvkm_devinit_init(p) ({ \
+ struct nvkm_devinit *d = (p); \
+ _nvkm_devinit_init(nv_object(d)); \
+})
+#define nvkm_devinit_fini(p,s) ({ \
+ struct nvkm_devinit *d = (p); \
+ _nvkm_devinit_fini(nv_object(d), (s)); \
+})
+
+int nvkm_devinit_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void _nvkm_devinit_dtor(struct nvkm_object *);
+int _nvkm_devinit_init(struct nvkm_object *);
+int _nvkm_devinit_fini(struct nvkm_object *, bool suspend);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
new file mode 100644
index 000000000000..904d601e8a50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -0,0 +1,45 @@
+nvkm-y += nvkm/subdev/fb/base.o
+nvkm-y += nvkm/subdev/fb/nv04.o
+nvkm-y += nvkm/subdev/fb/nv10.o
+nvkm-y += nvkm/subdev/fb/nv1a.o
+nvkm-y += nvkm/subdev/fb/nv20.o
+nvkm-y += nvkm/subdev/fb/nv25.o
+nvkm-y += nvkm/subdev/fb/nv30.o
+nvkm-y += nvkm/subdev/fb/nv35.o
+nvkm-y += nvkm/subdev/fb/nv36.o
+nvkm-y += nvkm/subdev/fb/nv40.o
+nvkm-y += nvkm/subdev/fb/nv41.o
+nvkm-y += nvkm/subdev/fb/nv44.o
+nvkm-y += nvkm/subdev/fb/nv46.o
+nvkm-y += nvkm/subdev/fb/nv47.o
+nvkm-y += nvkm/subdev/fb/nv49.o
+nvkm-y += nvkm/subdev/fb/nv4e.o
+nvkm-y += nvkm/subdev/fb/nv50.o
+nvkm-y += nvkm/subdev/fb/g84.o
+nvkm-y += nvkm/subdev/fb/gt215.o
+nvkm-y += nvkm/subdev/fb/mcp77.o
+nvkm-y += nvkm/subdev/fb/mcp89.o
+nvkm-y += nvkm/subdev/fb/gf100.o
+nvkm-y += nvkm/subdev/fb/gk104.o
+nvkm-y += nvkm/subdev/fb/gk20a.o
+nvkm-y += nvkm/subdev/fb/gm107.o
+nvkm-y += nvkm/subdev/fb/ramnv04.o
+nvkm-y += nvkm/subdev/fb/ramnv10.o
+nvkm-y += nvkm/subdev/fb/ramnv1a.o
+nvkm-y += nvkm/subdev/fb/ramnv20.o
+nvkm-y += nvkm/subdev/fb/ramnv40.o
+nvkm-y += nvkm/subdev/fb/ramnv41.o
+nvkm-y += nvkm/subdev/fb/ramnv44.o
+nvkm-y += nvkm/subdev/fb/ramnv49.o
+nvkm-y += nvkm/subdev/fb/ramnv4e.o
+nvkm-y += nvkm/subdev/fb/ramnv50.o
+nvkm-y += nvkm/subdev/fb/ramgt215.o
+nvkm-y += nvkm/subdev/fb/rammcp77.o
+nvkm-y += nvkm/subdev/fb/ramgf100.o
+nvkm-y += nvkm/subdev/fb/ramgk104.o
+nvkm-y += nvkm/subdev/fb/ramgk20a.o
+nvkm-y += nvkm/subdev/fb/ramgm107.o
+nvkm-y += nvkm/subdev/fb/sddr2.o
+nvkm-y += nvkm/subdev/fb/sddr3.o
+nvkm-y += nvkm/subdev/fb/gddr3.o
+nvkm-y += nvkm/subdev/fb/gddr5.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index c866148c440f..16589fa613cd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -21,14 +21,13 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bios/M0203.h>
-#include "priv.h"
-
int
-nouveau_fb_bios_memtype(struct nouveau_bios *bios)
+nvkm_fb_bios_memtype(struct nvkm_bios *bios)
{
const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
struct nvbios_M0203E M0203E;
@@ -51,25 +50,25 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
}
int
-_nouveau_fb_fini(struct nouveau_object *object, bool suspend)
+_nvkm_fb_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_fb *pfb = (void *)object;
+ struct nvkm_fb *pfb = (void *)object;
int ret;
ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
if (ret && suspend)
return ret;
- return nouveau_subdev_fini(&pfb->base, suspend);
+ return nvkm_subdev_fini(&pfb->base, suspend);
}
int
-_nouveau_fb_init(struct nouveau_object *object)
+_nvkm_fb_init(struct nvkm_object *object)
{
- struct nouveau_fb *pfb = (void *)object;
+ struct nvkm_fb *pfb = (void *)object;
int ret, i;
- ret = nouveau_subdev_init(&pfb->base);
+ ret = nvkm_subdev_init(&pfb->base);
if (ret)
return ret;
@@ -84,25 +83,25 @@ _nouveau_fb_init(struct nouveau_object *object)
}
void
-_nouveau_fb_dtor(struct nouveau_object *object)
+_nvkm_fb_dtor(struct nvkm_object *object)
{
- struct nouveau_fb *pfb = (void *)object;
+ struct nvkm_fb *pfb = (void *)object;
int i;
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
- nouveau_mm_fini(&pfb->tags);
- nouveau_mm_fini(&pfb->vram);
+ nvkm_mm_fini(&pfb->tags);
+ nvkm_mm_fini(&pfb->vram);
- nouveau_object_ref(NULL, (struct nouveau_object **)&pfb->ram);
- nouveau_subdev_destroy(&pfb->base);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
+ nvkm_subdev_destroy(&pfb->base);
}
int
-nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_fb_impl *impl = (void *)oclass;
+ struct nvkm_fb_impl *impl = (void *)oclass;
static const char *name[] = {
[NV_MEM_TYPE_UNKNOWN] = "unknown",
[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
@@ -116,38 +115,35 @@ nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
[NV_MEM_TYPE_GDDR4 ] = "GDDR4",
[NV_MEM_TYPE_GDDR5 ] = "GDDR5",
};
- struct nouveau_object *ram;
- struct nouveau_fb *pfb;
+ struct nvkm_object *ram;
+ struct nvkm_fb *pfb;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PFB", "fb",
- length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PFB", "fb",
+ length, pobject);
pfb = *pobject;
if (ret)
return ret;
pfb->memtype_valid = impl->memtype;
- ret = nouveau_object_ctor(nv_object(pfb), nv_object(pfb),
- impl->ram, NULL, 0, &ram);
+ ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
if (ret) {
nv_fatal(pfb, "error detecting memory configuration!!\n");
return ret;
}
- atomic_dec(&ram->parent->refcount);
- atomic_dec(&ram->engine->refcount);
pfb->ram = (void *)ram;
- if (!nouveau_mm_initialised(&pfb->vram)) {
- ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram->size >> 12, 1);
+ if (!nvkm_mm_initialised(&pfb->vram)) {
+ ret = nvkm_mm_init(&pfb->vram, 0, pfb->ram->size >> 12, 1);
if (ret)
return ret;
}
- if (!nouveau_mm_initialised(&pfb->tags)) {
- ret = nouveau_mm_init(&pfb->tags, 0, pfb->ram->tags ?
- ++pfb->ram->tags : 0, 1);
+ if (!nvkm_mm_initialised(&pfb->tags)) {
+ ret = nvkm_mm_init(&pfb->tags, 0, pfb->ram->tags ?
+ ++pfb->ram->tags : 0, 1);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index cf0e767d3833..6c968d1e98b3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -21,17 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
-struct nouveau_oclass *
-nv84_fb_oclass = &(struct nv50_fb_impl) {
+struct nvkm_oclass *
+g84_fb_oclass = &(struct nv50_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x84),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_fb_ctor,
.dtor = nv50_fb_dtor,
.init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv50_fb_memtype_valid,
.base.ram = &nv50_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index d85a25d027ee..15b462ae33cb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -22,8 +22,6 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
* Roy Spliet <rspliet@eclipso.eu>
*/
-
-#include <subdev/bios.h>
#include "priv.h"
struct ramxlat {
@@ -70,7 +68,7 @@ ramgddr3_wr_lo[] = {
};
int
-nouveau_gddr3_calc(struct nouveau_ram *ram)
+nvkm_gddr3_calc(struct nvkm_ram *ram)
{
int CL, WR, CWL, DLL = 0, ODT = 0, hi;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
index 7fbbe05d5c60..f6f9eee1dcd0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
@@ -21,8 +21,6 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
-#include <subdev/bios.h>
#include "priv.h"
/* binary driver only executes this path if the condition (a) is true
@@ -34,7 +32,7 @@
#define NOTE00(a) 1
int
-nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts)
+nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts)
{
int pd, lf, xd, vh, vr, vo, l3;
int WL, CL, WR, at[2], dt, ds;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 32f28dc73ef2..d51aa0237baf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -21,22 +21,23 @@
*
* Authors: Ben Skeggs
*/
+#include "gf100.h"
-#include "nvc0.h"
+#include <core/device.h>
-extern const u8 nvc0_pte_storage_type_map[256];
+extern const u8 gf100_pte_storage_type_map[256];
bool
-nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
+gf100_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
{
u8 memtype = (tile_flags & 0x0000ff00) >> 8;
- return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
+ return likely((gf100_pte_storage_type_map[memtype] != 0xff));
}
static void
-nvc0_fb_intr(struct nouveau_subdev *subdev)
+gf100_fb_intr(struct nvkm_subdev *subdev)
{
- struct nvc0_fb_priv *priv = (void *)subdev;
+ struct gf100_fb_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x000100);
if (intr & 0x08000000) {
nv_debug(priv, "PFFB intr\n");
@@ -49,26 +50,27 @@ nvc0_fb_intr(struct nouveau_subdev *subdev)
}
int
-nvc0_fb_init(struct nouveau_object *object)
+gf100_fb_init(struct nvkm_object *object)
{
- struct nvc0_fb_priv *priv = (void *)object;
+ struct gf100_fb_priv *priv = (void *)object;
int ret;
- ret = nouveau_fb_init(&priv->base);
+ ret = nvkm_fb_init(&priv->base);
if (ret)
return ret;
if (priv->r100c10_page)
nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+
nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
return 0;
}
void
-nvc0_fb_dtor(struct nouveau_object *object)
+gf100_fb_dtor(struct nvkm_object *object)
{
- struct nouveau_device *device = nv_device(object);
- struct nvc0_fb_priv *priv = (void *)object;
+ struct nvkm_device *device = nv_device(object);
+ struct gf100_fb_priv *priv = (void *)object;
if (priv->r100c10_page) {
dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
@@ -76,19 +78,19 @@ nvc0_fb_dtor(struct nouveau_object *object)
__free_page(priv->r100c10_page);
}
- nouveau_fb_destroy(&priv->base);
+ nvkm_fb_destroy(&priv->base);
}
int
-nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
- struct nvc0_fb_priv *priv;
+ struct nvkm_device *device = nv_device(parent);
+ struct gf100_fb_priv *priv;
int ret;
- ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ ret = nvkm_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -102,19 +104,19 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return -EFAULT;
}
- nv_subdev(priv)->intr = nvc0_fb_intr;
+ nv_subdev(priv)->intr = gf100_fb_intr;
return 0;
}
-struct nouveau_oclass *
-nvc0_fb_oclass = &(struct nouveau_fb_impl) {
+struct nvkm_oclass *
+gf100_fb_oclass = &(struct nvkm_fb_impl) {
.base.handle = NV_SUBDEV(FB, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_fb_ctor,
- .dtor = nvc0_fb_dtor,
- .init = nvc0_fb_init,
- .fini = _nouveau_fb_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_fb_ctor,
+ .dtor = gf100_fb_dtor,
+ .init = gf100_fb_init,
+ .fini = _nvkm_fb_fini,
},
- .memtype = nvc0_fb_memtype_valid,
- .ram = &nvc0_ram_oclass,
+ .memtype = gf100_fb_memtype_valid,
+ .ram = &gf100_ram_oclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
new file mode 100644
index 000000000000..0af4da259471
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -0,0 +1,28 @@
+#ifndef __NVKM_RAM_NVC0_H__
+#define __NVKM_RAM_NVC0_H__
+#include "priv.h"
+#include "nv50.h"
+
+struct gf100_fb_priv {
+ struct nvkm_fb base;
+ struct page *r100c10_page;
+ dma_addr_t r100c10;
+};
+
+int gf100_fb_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void gf100_fb_dtor(struct nvkm_object *);
+int gf100_fb_init(struct nvkm_object *);
+bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+
+#define gf100_ram_create(p,e,o,m,d) \
+ gf100_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
+int gf100_ram_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, u32, int, void **);
+int gf100_ram_get(struct nvkm_fb *, u64, u32, u32, u32,
+ struct nvkm_mem **);
+void gf100_ram_put(struct nvkm_fb *, struct nvkm_mem **);
+
+int gk104_ram_init(struct nvkm_object*);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 595db50cfef3..1c08317665bb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -21,18 +21,17 @@
*
* Authors: Ben Skeggs
*/
+#include "gf100.h"
-#include "nvc0.h"
-
-struct nouveau_oclass *
-nve0_fb_oclass = &(struct nouveau_fb_impl) {
+struct nvkm_oclass *
+gk104_fb_oclass = &(struct nvkm_fb_impl) {
.base.handle = NV_SUBDEV(FB, 0xe0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_fb_ctor,
- .dtor = nvc0_fb_dtor,
- .init = nvc0_fb_init,
- .fini = _nouveau_fb_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_fb_ctor,
+ .dtor = gf100_fb_dtor,
+ .init = gf100_fb_init,
+ .fini = _nvkm_fb_fini,
},
- .memtype = nvc0_fb_memtype_valid,
- .ram = &nve0_ram_oclass,
+ .memtype = gf100_fb_memtype_valid,
+ .ram = &gk104_ram_oclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index fde42e4d1b56..6762847c05e8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -19,20 +19,19 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
-#include "nvc0.h"
+#include "gf100.h"
struct gk20a_fb_priv {
- struct nouveau_fb base;
+ struct nvkm_fb base;
};
static int
-gk20a_fb_init(struct nouveau_object *object)
+gk20a_fb_init(struct nvkm_object *object)
{
struct gk20a_fb_priv *priv = (void *)object;
int ret;
- ret = nouveau_fb_init(&priv->base);
+ ret = nvkm_fb_init(&priv->base);
if (ret)
return ret;
@@ -41,14 +40,14 @@ gk20a_fb_init(struct nouveau_object *object)
}
static int
-gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk20a_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct gk20a_fb_priv *priv;
int ret;
- ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ ret = nvkm_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -56,15 +55,15 @@ gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
-gk20a_fb_oclass = &(struct nouveau_fb_impl) {
+struct nvkm_oclass *
+gk20a_fb_oclass = &(struct nvkm_fb_impl) {
.base.handle = NV_SUBDEV(FB, 0xea),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gk20a_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = gk20a_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
- .memtype = nvc0_fb_memtype_valid,
+ .memtype = gf100_fb_memtype_valid,
.ram = &gk20a_ram_oclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index c4840aedc2dc..843f9356b360 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -21,18 +21,17 @@
*
* Authors: Ben Skeggs
*/
+#include "gf100.h"
-#include "nvc0.h"
-
-struct nouveau_oclass *
-gm107_fb_oclass = &(struct nouveau_fb_impl) {
+struct nvkm_oclass *
+gm107_fb_oclass = &(struct nvkm_fb_impl) {
.base.handle = NV_SUBDEV(FB, 0x07),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_fb_ctor,
- .dtor = nvc0_fb_dtor,
- .init = nvc0_fb_init,
- .fini = _nouveau_fb_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_fb_ctor,
+ .dtor = gf100_fb_dtor,
+ .init = gf100_fb_init,
+ .fini = _nvkm_fb_fini,
},
- .memtype = nvc0_fb_memtype_valid,
+ .memtype = gf100_fb_memtype_valid,
.ram = &gm107_ram_oclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index dab6e1c63d48..dd9b8a0a3c8e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -21,19 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
-struct nouveau_oclass *
-nva3_fb_oclass = &(struct nv50_fb_impl) {
+struct nvkm_oclass *
+gt215_fb_oclass = &(struct nv50_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0xa3),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_fb_ctor,
.dtor = nv50_fb_dtor,
.init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv50_fb_memtype_valid,
- .base.ram = &nva3_ram_oclass,
+ .base.ram = &gt215_ram_oclass,
.trap = 0x000d0fff,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index cba8e6818035..7be4a47ef4ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -21,19 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
-struct nouveau_oclass *
-nvaa_fb_oclass = &(struct nv50_fb_impl) {
+struct nvkm_oclass *
+mcp77_fb_oclass = &(struct nv50_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0xaa),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_fb_ctor,
.dtor = nv50_fb_dtor,
.init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv50_fb_memtype_valid,
- .base.ram = &nvaa_ram_oclass,
+ .base.ram = &mcp77_ram_oclass,
.trap = 0x001d07ff,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 5423faa2c09b..2d00656faef5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -21,19 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
-struct nouveau_oclass *
-nvaf_fb_oclass = &(struct nv50_fb_impl) {
+struct nvkm_oclass *
+mcp89_fb_oclass = &(struct nv50_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0xaf),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_fb_ctor,
.dtor = nv50_fb_dtor,
.init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv50_fb_memtype_valid,
- .base.ram = &nvaa_ram_oclass,
+ .base.ram = &mcp77_ram_oclass,
.trap = 0x089d1fff,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index 8309fe33fe84..c063dec7d03a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -21,13 +21,11 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-
-#define NV04_PFB_CFG0 0x00100200
+#include "regsnv04.h"
bool
-nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
+nv04_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
{
if (!(tile_flags & 0xff00))
return true;
@@ -36,12 +34,12 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
-nv04_fb_init(struct nouveau_object *object)
+nv04_fb_init(struct nvkm_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
int ret;
- ret = nouveau_fb_init(&priv->base);
+ ret = nvkm_fb_init(&priv->base);
if (ret)
return ret;
@@ -54,15 +52,15 @@ nv04_fb_init(struct nouveau_object *object)
}
int
-nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_fb_impl *impl = (void *)oclass;
struct nv04_fb_priv *priv;
int ret;
- ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ ret = nvkm_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -75,14 +73,14 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv04_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x04),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv04_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv04_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
new file mode 100644
index 000000000000..caa0d03aaacc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
@@ -0,0 +1,53 @@
+#ifndef __NVKM_FB_NV04_H__
+#define __NVKM_FB_NV04_H__
+#include "priv.h"
+
+struct nv04_fb_priv {
+ struct nvkm_fb base;
+};
+
+int nv04_fb_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+
+struct nv04_fb_impl {
+ struct nvkm_fb_impl base;
+ struct {
+ int regions;
+ void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+ void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *);
+ void (*fini)(struct nvkm_fb *, int i,
+ struct nvkm_fb_tile *);
+ void (*prog)(struct nvkm_fb *, int i,
+ struct nvkm_fb_tile *);
+ } tile;
+};
+
+void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+int nv30_fb_init(struct nvkm_object *);
+void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+
+void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *);
+
+int nv41_fb_init(struct nvkm_object *);
+void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+int nv44_fb_init(struct nvkm_object *);
+void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index ffb7ec6d97aa..f3530e4a6760 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -23,12 +23,11 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
void
-nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
- u32 flags, struct nouveau_fb_tile *tile)
+nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x80000000 | addr;
tile->limit = max(1u, addr + size) - 1;
@@ -36,7 +35,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
void
-nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
{
tile->addr = 0;
tile->limit = 0;
@@ -45,7 +44,7 @@ nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
}
void
-nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+nv10_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
{
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
@@ -53,14 +52,14 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nv_rd32(pfb, 0x100240 + (i * 0x10));
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv10_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x10),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
- .init = _nouveau_fb_init,
- .fini = _nouveau_fb_fini,
+ .dtor = _nvkm_fb_dtor,
+ .init = _nvkm_fb_init,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv10_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 265d1253624a..83bcb73caf0a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -23,17 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
-struct nouveau_oclass *
+struct nvkm_oclass *
nv1a_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x1a),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
- .init = _nouveau_fb_init,
- .fini = _nouveau_fb_fini,
+ .dtor = _nvkm_fb_dtor,
+ .init = _nvkm_fb_init,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv1a_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index 2209ade63339..e37084b8d05e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -23,12 +23,11 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
void
-nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
- u32 flags, struct nouveau_fb_tile *tile)
+nv20_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x00000001 | addr;
tile->limit = max(1u, addr + size) - 1;
@@ -40,12 +39,12 @@ nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
static void
-nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *tile)
+nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -57,17 +56,17 @@ nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
void
-nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+nv20_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
{
tile->addr = 0;
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
- nouveau_mm_free(&pfb->tags, &tile->tag);
+ nvkm_mm_free(&pfb->tags, &tile->tag);
}
void
-nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+nv20_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
{
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
@@ -76,14 +75,14 @@ nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv20_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x20),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
- .init = _nouveau_fb_init,
- .fini = _nouveau_fb_fini,
+ .dtor = _nvkm_fb_dtor,
+ .init = _nvkm_fb_init,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv20_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index e2a66c355c50..bc9f54f38fba 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -23,16 +23,15 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
static void
-nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *tile)
+nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -42,14 +41,14 @@ nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv25_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x25),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
- .init = _nouveau_fb_init,
- .fini = _nouveau_fb_fini,
+ .dtor = _nvkm_fb_dtor,
+ .init = _nvkm_fb_init,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv20_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index cbec402ba5b9..09ebb9477e00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -23,12 +23,13 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
+#include <core/device.h>
+
void
-nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
- u32 flags, struct nouveau_fb_tile *tile)
+nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nvkm_fb_tile *tile)
{
/* for performance, select alternate bank offset for zeta */
if (!(flags & 4)) {
@@ -46,12 +47,12 @@ nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
static void
-nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *tile)
+nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -65,7 +66,7 @@ nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
static int
calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
{
- struct nouveau_device *device = nv_device(priv);
+ struct nvkm_device *device = nv_device(priv);
int b = (device->chipset > 0x30 ?
nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
0) & 0xf;
@@ -88,13 +89,13 @@ calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
}
int
-nv30_fb_init(struct nouveau_object *object)
+nv30_fb_init(struct nvkm_object *object)
{
- struct nouveau_device *device = nv_device(object);
+ struct nvkm_device *device = nv_device(object);
struct nv04_fb_priv *priv = (void *)object;
int ret, i, j;
- ret = nouveau_fb_init(&priv->base);
+ ret = nvkm_fb_init(&priv->base);
if (ret)
return ret;
@@ -120,14 +121,14 @@ nv30_fb_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv30_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x30),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv30_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv20_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index b2cf8c69fb2e..c01dc1839ea4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -23,16 +23,15 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
static void
-nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *tile)
+nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -43,14 +42,14 @@ nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv35_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x35),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv30_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv20_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index b4cdae2a3b2f..cad75a1cef22 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -23,16 +23,15 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
static void
-nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *tile)
+nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -43,14 +42,14 @@ nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv36_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x36),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv30_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv20_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index 52814258c212..dbe5c1910c2c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -23,17 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
void
-nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *tile)
+nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x80);
u32 tags = round_up(tiles / pfb->ram->parts, 0x100);
if ( (flags & 2) &&
- !nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ !nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -44,12 +43,12 @@ nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
static int
-nv40_fb_init(struct nouveau_object *object)
+nv40_fb_init(struct nvkm_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
int ret;
- ret = nouveau_fb_init(&priv->base);
+ ret = nvkm_fb_init(&priv->base);
if (ret)
return ret;
@@ -57,14 +56,14 @@ nv40_fb_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv40_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x40),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv40_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv40_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
new file mode 100644
index 000000000000..602182661820
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_FB_NV40_H__
+#define __NVKM_FB_NV40_H__
+#include "priv.h"
+
+struct nv40_ram {
+ struct nvkm_ram base;
+ u32 ctrl;
+ u32 coef;
+};
+
+int nv40_ram_calc(struct nvkm_fb *, u32);
+int nv40_ram_prog(struct nvkm_fb *);
+void nv40_ram_tidy(struct nvkm_fb *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index b239a8615599..d9e1a40a2955 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -23,11 +23,10 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
void
-nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+nv41_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
{
nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
@@ -37,12 +36,12 @@ nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
}
int
-nv41_fb_init(struct nouveau_object *object)
+nv41_fb_init(struct nvkm_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
int ret;
- ret = nouveau_fb_init(&priv->base);
+ ret = nvkm_fb_init(&priv->base);
if (ret)
return ret;
@@ -50,14 +49,14 @@ nv41_fb_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv41_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x41),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv41_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv41_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index d8478208a681..20b97c83c4af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -23,12 +23,11 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
static void
-nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
- u32 flags, struct nouveau_fb_tile *tile)
+nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x00000001; /* mode = vram */
tile->addr |= addr;
@@ -37,7 +36,7 @@ nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
void
-nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+nv44_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
{
nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
@@ -46,12 +45,12 @@ nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
}
int
-nv44_fb_init(struct nouveau_object *object)
+nv44_fb_init(struct nvkm_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
int ret;
- ret = nouveau_fb_init(&priv->base);
+ ret = nvkm_fb_init(&priv->base);
if (ret)
return ret;
@@ -60,14 +59,14 @@ nv44_fb_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv44_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x44),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv44_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv44_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index a5b77514d35b..5bfac38cdf24 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -23,12 +23,11 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
void
-nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
- u32 flags, struct nouveau_fb_tile *tile)
+nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nvkm_fb_tile *tile)
{
/* for performance, select alternate bank offset for zeta */
if (!(flags & 4)) tile->addr = (0 << 3);
@@ -40,14 +39,14 @@ nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
tile->pitch = pitch;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv46_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x46),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv44_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv44_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index 3bea142376bc..d3b3988d1d49 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -23,17 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
-struct nouveau_oclass *
+struct nvkm_oclass *
nv47_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x47),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv41_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv41_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 666cbd5d47f5..236e36c5054e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -23,17 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
-struct nouveau_oclass *
+struct nvkm_oclass *
nv49_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x49),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv41_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv49_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 42e64f364ec1..1352b6a73fb0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -23,17 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "nv04.h"
-struct nouveau_oclass *
+struct nvkm_oclass *
nv4e_fb_oclass = &(struct nv04_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x4e),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fb_ctor,
- .dtor = _nouveau_fb_dtor,
+ .dtor = _nvkm_fb_dtor,
.init = nv44_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv4e_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 4150b0d10af8..0480ce52aa06 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -21,15 +21,12 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
#include <core/client.h>
-#include <core/enum.h>
+#include <core/device.h>
#include <core/engctx.h>
-#include <core/object.h>
-
-#include <subdev/bios.h>
-
-#include "nv50.h"
+#include <core/enum.h>
int
nv50_fb_memtype[0x80] = {
@@ -44,12 +41,12 @@ nv50_fb_memtype[0x80] = {
};
bool
-nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
+nv50_fb_memtype_valid(struct nvkm_fb *pfb, u32 memtype)
{
return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
}
-static const struct nouveau_enum vm_dispatch_subclients[] = {
+static const struct nvkm_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX", NULL },
{ 0x00000001, "NOTIFY", NULL },
{ 0x00000002, "QUERY", NULL },
@@ -60,14 +57,14 @@ static const struct nouveau_enum vm_dispatch_subclients[] = {
{}
};
-static const struct nouveau_enum vm_ccache_subclients[] = {
+static const struct nvkm_enum vm_ccache_subclients[] = {
{ 0x00000000, "CB", NULL },
{ 0x00000001, "TIC", NULL },
{ 0x00000002, "TSC", NULL },
{}
};
-static const struct nouveau_enum vm_prop_subclients[] = {
+static const struct nvkm_enum vm_prop_subclients[] = {
{ 0x00000000, "RT0", NULL },
{ 0x00000001, "RT1", NULL },
{ 0x00000002, "RT2", NULL },
@@ -84,24 +81,24 @@ static const struct nouveau_enum vm_prop_subclients[] = {
{}
};
-static const struct nouveau_enum vm_pfifo_subclients[] = {
+static const struct nvkm_enum vm_pfifo_subclients[] = {
{ 0x00000000, "PUSHBUF", NULL },
{ 0x00000001, "SEMAPHORE", NULL },
{}
};
-static const struct nouveau_enum vm_bar_subclients[] = {
+static const struct nvkm_enum vm_bar_subclients[] = {
{ 0x00000000, "FB", NULL },
{ 0x00000001, "IN", NULL },
{}
};
-static const struct nouveau_enum vm_client[] = {
+static const struct nvkm_enum vm_client[] = {
{ 0x00000000, "STRMOUT", NULL },
{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
{ 0x00000004, "PFIFO_WRITE", NULL },
{ 0x00000005, "CCACHE", vm_ccache_subclients },
- { 0x00000006, "PPPP", NULL },
+ { 0x00000006, "PMSPPP", NULL },
{ 0x00000007, "CLIPID", NULL },
{ 0x00000008, "PFIFO_READ", NULL },
{ 0x00000009, "VFETCH", NULL },
@@ -115,24 +112,24 @@ static const struct nouveau_enum vm_client[] = {
{}
};
-static const struct nouveau_enum vm_engine[] = {
+static const struct nvkm_enum vm_engine[] = {
{ 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
{ 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
{ 0x00000004, "PEEPHOLE", NULL },
{ 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
{ 0x00000006, "BAR", vm_bar_subclients },
- { 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP },
+ { 0x00000008, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
{ 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
{ 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
- { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT },
+ { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CIPHER },
{ 0x0000000b, "PCOUNTER", NULL },
{ 0x0000000c, "SEMAPHORE_BG", NULL },
- { 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 },
+ { 0x0000000d, "PCE0", NULL, NVDEV_ENGINE_CE0 },
{ 0x0000000e, "PDAEMON", NULL },
{}
};
-static const struct nouveau_enum vm_fault[] = {
+static const struct nvkm_enum vm_fault[] = {
{ 0x00000000, "PT_NOT_PRESENT", NULL },
{ 0x00000001, "PT_TOO_SHORT", NULL },
{ 0x00000002, "PAGE_NOT_PRESENT", NULL },
@@ -146,13 +143,13 @@ static const struct nouveau_enum vm_fault[] = {
};
static void
-nv50_fb_intr(struct nouveau_subdev *subdev)
+nv50_fb_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_device *device = nv_device(subdev);
- struct nouveau_engine *engine;
+ struct nvkm_device *device = nv_device(subdev);
+ struct nvkm_engine *engine;
struct nv50_fb_priv *priv = (void *)subdev;
- const struct nouveau_enum *en, *cl;
- struct nouveau_object *engctx = NULL;
+ const struct nvkm_enum *en, *cl;
+ struct nvkm_object *engctx = NULL;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
int i;
@@ -183,14 +180,21 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
}
chan = (trap[2] << 16) | trap[1];
- en = nouveau_enum_find(vm_engine, st0);
+ en = nvkm_enum_find(vm_engine, st0);
if (en && en->data2) {
- const struct nouveau_enum *orig_en = en;
+ const struct nvkm_enum *orig_en = en;
while (en->name && en->value == st0 && en->data2) {
- engine = nouveau_engine(subdev, en->data2);
+ engine = nvkm_engine(subdev, en->data2);
+ /*XXX: clean this up */
+ if (!engine && en->data2 == NVDEV_ENGINE_BSP)
+ engine = nvkm_engine(subdev, NVDEV_ENGINE_MSVLD);
+ if (!engine && en->data2 == NVDEV_ENGINE_CIPHER)
+ engine = nvkm_engine(subdev, NVDEV_ENGINE_SEC);
+ if (!engine && en->data2 == NVDEV_ENGINE_VP)
+ engine = nvkm_engine(subdev, NVDEV_ENGINE_MSPDEC);
if (engine) {
- engctx = nouveau_engctx_get(engine, chan);
+ engctx = nvkm_engctx_get(engine, chan);
if (engctx)
break;
}
@@ -203,23 +207,23 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
(trap[5] & 0x00000100) ? "read" : "write",
trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
- nouveau_client_name(engctx));
+ nvkm_client_name(engctx));
- nouveau_engctx_put(engctx);
+ nvkm_engctx_put(engctx);
if (en)
pr_cont("%s/", en->name);
else
pr_cont("%02x/", st0);
- cl = nouveau_enum_find(vm_client, st2);
+ cl = nvkm_enum_find(vm_client, st2);
if (cl)
pr_cont("%s/", cl->name);
else
pr_cont("%02x/", st2);
- if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
- else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
+ if (cl && cl->data) cl = nvkm_enum_find(cl->data, st3);
+ else if (en && en->data) cl = nvkm_enum_find(en->data, st3);
else cl = NULL;
if (cl)
pr_cont("%s", cl->name);
@@ -227,7 +231,7 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
pr_cont("%02x", st3);
pr_cont(" reason: ");
- en = nouveau_enum_find(vm_fault, st1);
+ en = nvkm_enum_find(vm_fault, st1);
if (en)
pr_cont("%s\n", en->name);
else
@@ -235,15 +239,15 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
}
int
-nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
+ struct nvkm_device *device = nv_device(parent);
struct nv50_fb_priv *priv;
int ret;
- ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ ret = nvkm_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -264,9 +268,9 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
void
-nv50_fb_dtor(struct nouveau_object *object)
+nv50_fb_dtor(struct nvkm_object *object)
{
- struct nouveau_device *device = nv_device(object);
+ struct nvkm_device *device = nv_device(object);
struct nv50_fb_priv *priv = (void *)object;
if (priv->r100c08_page) {
@@ -275,17 +279,17 @@ nv50_fb_dtor(struct nouveau_object *object)
__free_page(priv->r100c08_page);
}
- nouveau_fb_destroy(&priv->base);
+ nvkm_fb_destroy(&priv->base);
}
int
-nv50_fb_init(struct nouveau_object *object)
+nv50_fb_init(struct nvkm_object *object)
{
struct nv50_fb_impl *impl = (void *)object->oclass;
struct nv50_fb_priv *priv = (void *)object;
int ret;
- ret = nouveau_fb_init(&priv->base);
+ ret = nvkm_fb_init(&priv->base);
if (ret)
return ret;
@@ -301,14 +305,14 @@ nv50_fb_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
nv50_fb_oclass = &(struct nv50_fb_impl) {
.base.base.handle = NV_SUBDEV(FB, 0x50),
- .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_fb_ctor,
.dtor = nv50_fb_dtor,
.init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
+ .fini = _nvkm_fb_fini,
},
.base.memtype = nv50_fb_memtype_valid,
.base.ram = &nv50_ram_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
new file mode 100644
index 000000000000..f3cde3f1f511
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -0,0 +1,31 @@
+#ifndef __NVKM_FB_NV50_H__
+#define __NVKM_FB_NV50_H__
+#include "priv.h"
+
+struct nv50_fb_priv {
+ struct nvkm_fb base;
+ struct page *r100c08_page;
+ dma_addr_t r100c08;
+};
+
+int nv50_fb_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void nv50_fb_dtor(struct nvkm_object *);
+int nv50_fb_init(struct nvkm_object *);
+
+struct nv50_fb_impl {
+ struct nvkm_fb_impl base;
+ u32 trap;
+};
+
+#define nv50_ram_create(p,e,o,d) \
+ nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
+int nv50_ram_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+int nv50_ram_get(struct nvkm_fb *, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nvkm_mem **);
+void nv50_ram_put(struct nvkm_fb *, struct nvkm_mem **);
+void __nv50_ram_put(struct nvkm_fb *, struct nvkm_mem *);
+extern int nv50_fb_memtype[0x80];
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
new file mode 100644
index 000000000000..d82da02daa1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -0,0 +1,74 @@
+#ifndef __NVKM_FB_PRIV_H__
+#define __NVKM_FB_PRIV_H__
+#include <subdev/fb.h>
+struct nvkm_bios;
+
+#define nvkm_ram_create(p,e,o,d) \
+ nvkm_object_create_((p), (e), (o), 0, sizeof(**d), (void **)d)
+#define nvkm_ram_destroy(p) \
+ nvkm_object_destroy(&(p)->base)
+#define nvkm_ram_init(p) \
+ nvkm_object_init(&(p)->base)
+#define nvkm_ram_fini(p,s) \
+ nvkm_object_fini(&(p)->base, (s))
+
+#define nvkm_ram_create_(p,e,o,s,d) \
+ nvkm_object_create_((p), (e), (o), 0, (s), (void **)d)
+#define _nvkm_ram_dtor nvkm_object_destroy
+#define _nvkm_ram_init nvkm_object_init
+#define _nvkm_ram_fini nvkm_object_fini
+
+extern struct nvkm_oclass nv04_ram_oclass;
+extern struct nvkm_oclass nv10_ram_oclass;
+extern struct nvkm_oclass nv1a_ram_oclass;
+extern struct nvkm_oclass nv20_ram_oclass;
+extern struct nvkm_oclass nv40_ram_oclass;
+extern struct nvkm_oclass nv41_ram_oclass;
+extern struct nvkm_oclass nv44_ram_oclass;
+extern struct nvkm_oclass nv49_ram_oclass;
+extern struct nvkm_oclass nv4e_ram_oclass;
+extern struct nvkm_oclass nv50_ram_oclass;
+extern struct nvkm_oclass gt215_ram_oclass;
+extern struct nvkm_oclass mcp77_ram_oclass;
+extern struct nvkm_oclass gf100_ram_oclass;
+extern struct nvkm_oclass gk104_ram_oclass;
+extern struct nvkm_oclass gk20a_ram_oclass;
+extern struct nvkm_oclass gm107_ram_oclass;
+
+int nvkm_sddr2_calc(struct nvkm_ram *ram);
+int nvkm_sddr3_calc(struct nvkm_ram *ram);
+int nvkm_gddr3_calc(struct nvkm_ram *ram);
+int nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts);
+
+#define nvkm_fb_create(p,e,c,d) \
+ nvkm_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
+#define nvkm_fb_destroy(p) ({ \
+ struct nvkm_fb *pfb = (p); \
+ _nvkm_fb_dtor(nv_object(pfb)); \
+})
+#define nvkm_fb_init(p) ({ \
+ struct nvkm_fb *pfb = (p); \
+ _nvkm_fb_init(nv_object(pfb)); \
+})
+#define nvkm_fb_fini(p,s) ({ \
+ struct nvkm_fb *pfb = (p); \
+ _nvkm_fb_fini(nv_object(pfb), (s)); \
+})
+
+int nvkm_fb_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void _nvkm_fb_dtor(struct nvkm_object *);
+int _nvkm_fb_init(struct nvkm_object *);
+int _nvkm_fb_fini(struct nvkm_object *, bool);
+
+struct nvkm_fb_impl {
+ struct nvkm_oclass base;
+ struct nvkm_oclass *ram;
+ bool (*memtype)(struct nvkm_fb *, u32);
+};
+
+bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+bool nv50_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+
+int nvkm_fb_bios_memtype(struct nvkm_bios *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index 0ac7256443bb..f343682b1387 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,11 +1,10 @@
#ifndef __NVKM_FBRAM_FUC_H__
#define __NVKM_FBRAM_FUC_H__
-
-#include <subdev/pwr.h>
+#include <subdev/pmu.h>
struct ramfuc {
- struct nouveau_memx *memx;
- struct nouveau_fb *pfb;
+ struct nvkm_memx *memx;
+ struct nvkm_fb *pfb;
int sequence;
};
@@ -55,12 +54,12 @@ ramfuc_reg(u32 addr)
}
static inline int
-ramfuc_init(struct ramfuc *ram, struct nouveau_fb *pfb)
+ramfuc_init(struct ramfuc *ram, struct nvkm_fb *pfb)
{
- struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
+ struct nvkm_pmu *pmu = nvkm_pmu(pfb);
int ret;
- ret = nouveau_memx_init(ppwr, &ram->memx);
+ ret = nvkm_memx_init(pmu, &ram->memx);
if (ret)
return ret;
@@ -74,7 +73,7 @@ ramfuc_exec(struct ramfuc *ram, bool exec)
{
int ret = 0;
if (ram->pfb) {
- ret = nouveau_memx_fini(&ram->memx, exec);
+ ret = nvkm_memx_fini(&ram->memx, exec);
ram->pfb = NULL;
}
return ret;
@@ -97,10 +96,8 @@ ramfuc_wr32(struct ramfuc *ram, struct ramfuc_reg *reg, u32 data)
reg->data = data;
for (mask = reg->mask; mask > 0; mask = (mask & ~1) >> 1) {
- if (mask & 1) {
- nouveau_memx_wr32(ram->memx, reg->addr+off, reg->data);
- }
-
+ if (mask & 1)
+ nvkm_memx_wr32(ram->memx, reg->addr+off, reg->data);
off += reg->stride;
}
}
@@ -125,45 +122,45 @@ ramfuc_mask(struct ramfuc *ram, struct ramfuc_reg *reg, u32 mask, u32 data)
static inline void
ramfuc_wait(struct ramfuc *ram, u32 addr, u32 mask, u32 data, u32 nsec)
{
- nouveau_memx_wait(ram->memx, addr, mask, data, nsec);
+ nvkm_memx_wait(ram->memx, addr, mask, data, nsec);
}
static inline void
ramfuc_nsec(struct ramfuc *ram, u32 nsec)
{
- nouveau_memx_nsec(ram->memx, nsec);
+ nvkm_memx_nsec(ram->memx, nsec);
}
static inline void
ramfuc_wait_vblank(struct ramfuc *ram)
{
- nouveau_memx_wait_vblank(ram->memx);
+ nvkm_memx_wait_vblank(ram->memx);
}
static inline void
ramfuc_train(struct ramfuc *ram)
{
- nouveau_memx_train(ram->memx);
+ nvkm_memx_train(ram->memx);
}
static inline int
-ramfuc_train_result(struct nouveau_fb *pfb, u32 *result, u32 rsize)
+ramfuc_train_result(struct nvkm_fb *pfb, u32 *result, u32 rsize)
{
- struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
+ struct nvkm_pmu *pmu = nvkm_pmu(pfb);
- return nouveau_memx_train_result(ppwr, result, rsize);
+ return nvkm_memx_train_result(pmu, result, rsize);
}
static inline void
ramfuc_block(struct ramfuc *ram)
{
- nouveau_memx_block(ram->memx);
+ nvkm_memx_block(ram->memx);
}
static inline void
ramfuc_unblock(struct ramfuc *ram)
{
- nouveau_memx_unblock(ram->memx);
+ nvkm_memx_unblock(ram->memx);
}
#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
@@ -180,5 +177,4 @@ ramfuc_unblock(struct ramfuc *ram)
#define ram_train_result(s,r,l) ramfuc_train_result((s), (r), (l))
#define ram_block(s) ramfuc_block(&(s)->base)
#define ram_unblock(s) ramfuc_unblock(&(s)->base)
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 735cb9580abe..de9f39569943 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -21,23 +21,20 @@
*
* Authors: Ben Skeggs
*/
+#include "gf100.h"
+#include "ramfuc.h"
+#include <core/device.h>
+#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
+#include <subdev/clk.h>
+#include <subdev/clk/pll.h>
#include <subdev/ltc.h>
-#include <subdev/clock.h>
-#include <subdev/clock/pll.h>
-
-#include <core/option.h>
-
-#include "ramfuc.h"
-
-#include "nvc0.h"
-
-struct nvc0_ramfuc {
+struct gf100_ramfuc {
struct ramfuc base;
struct ramfuc_reg r_0x10fe20;
@@ -100,18 +97,18 @@ struct nvc0_ramfuc {
struct ramfuc_reg r_0x13d8f4;
};
-struct nvc0_ram {
- struct nouveau_ram base;
- struct nvc0_ramfuc fuc;
+struct gf100_ram {
+ struct nvkm_ram base;
+ struct gf100_ramfuc fuc;
struct nvbios_pll refpll;
struct nvbios_pll mempll;
};
static void
-nvc0_ram_train(struct nvc0_ramfuc *fuc, u32 magic)
+gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
{
- struct nvc0_ram *ram = container_of(fuc, typeof(*ram), fuc);
- struct nouveau_fb *pfb = nouveau_fb(ram);
+ struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
+ struct nvkm_fb *pfb = nvkm_fb(ram);
u32 part = nv_rd32(pfb, 0x022438), i;
u32 mask = nv_rd32(pfb, 0x022554);
u32 addr = 0x110974;
@@ -127,12 +124,12 @@ nvc0_ram_train(struct nvc0_ramfuc *fuc, u32 magic)
}
static int
-nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
{
- struct nouveau_clock *clk = nouveau_clock(pfb);
- struct nouveau_bios *bios = nouveau_bios(pfb);
- struct nvc0_ram *ram = (void *)pfb->ram;
- struct nvc0_ramfuc *fuc = &ram->fuc;
+ struct nvkm_clk *clk = nvkm_clk(pfb);
+ struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct gf100_ram *ram = (void *)pfb->ram;
+ struct gf100_ramfuc *fuc = &ram->fuc;
struct nvbios_ramcfg cfg;
u8 ver, cnt, len, strap;
struct {
@@ -146,7 +143,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* lookup memory config data relevant to the target frequency */
rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
- &cnt, &ramcfg.size, &cfg);
+ &cnt, &ramcfg.size, &cfg);
if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
nv_error(pfb, "invalid/missing rammap entry\n");
return -EINVAL;
@@ -169,7 +166,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
strap = nv_ro08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
- &cnt, &len);
+ &cnt, &len);
if (!timing.data || ver != 0x10 || timing.size < 0x19) {
nv_error(pfb, "invalid/missing timing entry\n");
return -EINVAL;
@@ -213,8 +210,8 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
if (mode == 1 && from == 0) {
/* calculate refpll */
- ret = nva3_pll_calc(nv_subdev(pfb), &ram->refpll,
- ram->mempll.refclk, &N1, NULL, &M1, &P);
+ ret = gt215_pll_calc(nv_subdev(pfb), &ram->refpll,
+ ram->mempll.refclk, &N1, NULL, &M1, &P);
if (ret <= 0) {
nv_error(pfb, "unable to calc refpll\n");
return ret ? ret : -ERANGE;
@@ -228,8 +225,8 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
/* calculate mempll */
- ret = nva3_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
- &N1, NULL, &M1, &P);
+ ret = gt215_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
+ &N1, NULL, &M1, &P);
if (ret <= 0) {
nv_error(pfb, "unable to calc refpll\n");
return ret ? ret : -ERANGE;
@@ -277,7 +274,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f210, 0x00000000);
ram_nsec(fuc, 1000);
if (mode == 0)
- nvc0_ram_train(fuc, 0x000c1001);
+ gf100_ram_train(fuc, 0x000c1001);
ram_wr32(fuc, 0x10f310, 0x00000001);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f090, 0x00000061);
@@ -325,8 +322,8 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f348, 0x00700008);
ram_wr32(fuc, 0x61c140, 0x19240000);
ram_wr32(fuc, 0x10f830, 0x00300017);
- nvc0_ram_train(fuc, 0x80021001);
- nvc0_ram_train(fuc, 0x80081001);
+ gf100_ram_train(fuc, 0x80021001);
+ gf100_ram_train(fuc, 0x80081001);
ram_wr32(fuc, 0x10f340, 0x00500004);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f830, 0x01300017);
@@ -379,7 +376,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x13d8f4, 0x00000000);
ram_wr32(fuc, 0x61c140, 0x09a40000);
- nvc0_ram_train(fuc, 0x800e1008);
+ gf100_ram_train(fuc, 0x800e1008);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f800, 0x00001804);
@@ -392,7 +389,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f9b0, 0x05313f41);
ram_wr32(fuc, 0x10f9b4, 0x00002f50);
- nvc0_ram_train(fuc, 0x010c1001);
+ gf100_ram_train(fuc, 0x010c1001);
}
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
@@ -400,34 +397,35 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
if (mode == 0)
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+
return 0;
}
static int
-nvc0_ram_prog(struct nouveau_fb *pfb)
+gf100_ram_prog(struct nvkm_fb *pfb)
{
- struct nouveau_device *device = nv_device(pfb);
- struct nvc0_ram *ram = (void *)pfb->ram;
- struct nvc0_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
+ struct nvkm_device *device = nv_device(pfb);
+ struct gf100_ram *ram = (void *)pfb->ram;
+ struct gf100_ramfuc *fuc = &ram->fuc;
+ ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
static void
-nvc0_ram_tidy(struct nouveau_fb *pfb)
+gf100_ram_tidy(struct nvkm_fb *pfb)
{
- struct nvc0_ram *ram = (void *)pfb->ram;
- struct nvc0_ramfuc *fuc = &ram->fuc;
+ struct gf100_ram *ram = (void *)pfb->ram;
+ struct gf100_ramfuc *fuc = &ram->fuc;
ram_exec(fuc, false);
}
-extern const u8 nvc0_pte_storage_type_map[256];
+extern const u8 gf100_pte_storage_type_map[256];
void
-nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+gf100_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
{
- struct nouveau_ltc *ltc = nouveau_ltc(pfb);
- struct nouveau_mem *mem = *pmem;
+ struct nvkm_ltc *ltc = nvkm_ltc(pfb);
+ struct nvkm_mem *mem = *pmem;
*pmem = NULL;
if (unlikely(mem == NULL))
@@ -443,15 +441,15 @@ nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
}
int
-nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_mem **pmem)
+gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nvkm_mem **pmem)
{
- struct nouveau_mm *mm = &pfb->vram;
- struct nouveau_mm_node *r;
- struct nouveau_mem *mem;
+ struct nvkm_mm *mm = &pfb->vram;
+ struct nvkm_mm_node *r;
+ struct nvkm_mem *mem;
int type = (memtype & 0x0ff);
int back = (memtype & 0x800);
- const bool comp = nvc0_pte_storage_type_map[type] != type;
+ const bool comp = gf100_pte_storage_type_map[type] != type;
int ret;
size >>= 12;
@@ -469,7 +467,7 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mutex_lock(&pfb->base.mutex);
if (comp) {
- struct nouveau_ltc *ltc = nouveau_ltc(pfb);
+ struct nvkm_ltc *ltc = nvkm_ltc(pfb);
/* compression only works with lpages */
if (align == (1 << (17 - 12))) {
@@ -478,15 +476,15 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
}
if (unlikely(!mem->tag))
- type = nvc0_pte_storage_type_map[type];
+ type = gf100_pte_storage_type_map[type];
}
mem->memtype = type;
do {
if (back)
- ret = nouveau_mm_tail(mm, 0, 1, size, ncmin, align, &r);
+ ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
else
- ret = nouveau_mm_head(mm, 0, 1, size, ncmin, align, &r);
+ ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
if (ret) {
mutex_unlock(&pfb->base.mutex);
pfb->ram->put(pfb, &mem);
@@ -498,20 +496,20 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
} while (size);
mutex_unlock(&pfb->base.mutex);
- r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
*pmem = mem;
return 0;
}
int
-nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 maskaddr, int size,
- void **pobject)
+gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 maskaddr, int size,
+ void **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_bios *bios = nouveau_bios(pfb);
- struct nouveau_ram *ram;
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct nvkm_ram *ram;
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
u32 parts = nv_rd32(pfb, 0x022438);
@@ -521,7 +519,7 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
bool uniform = true;
int ret, part;
- ret = nouveau_ram_create_(parent, engine, oclass, size, pobject);
+ ret = nvkm_ram_create_(parent, engine, oclass, size, pobject);
ram = *pobject;
if (ret)
return ret;
@@ -529,7 +527,7 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
- ram->type = nouveau_fb_bios_memtype(bios);
+ ram->type = nvkm_fb_bios_memtype(bios);
ram->ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
/* read amount of vram attached to each memory controller */
@@ -551,11 +549,11 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (uniform) {
offset = rsvd_head;
length = (ram->size >> 12) - rsvd_head - rsvd_tail;
- ret = nouveau_mm_init(&pfb->vram, offset, length, 1);
+ ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
} else {
/* otherwise, address lowest common amount from 0GiB */
- ret = nouveau_mm_init(&pfb->vram, rsvd_head,
- (bsize << 8) * parts - rsvd_head, 1);
+ ret = nvkm_mm_init(&pfb->vram, rsvd_head,
+ (bsize << 8) * parts - rsvd_head, 1);
if (ret)
return ret;
@@ -563,27 +561,27 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
offset = (0x0200000000ULL >> 12) + (bsize << 8);
length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
- ret = nouveau_mm_init(&pfb->vram, offset, length, 1);
+ ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
if (ret)
- nouveau_mm_fini(&pfb->vram);
+ nvkm_mm_fini(&pfb->vram);
}
if (ret)
return ret;
- ram->get = nvc0_ram_get;
- ram->put = nvc0_ram_put;
+ ram->get = gf100_ram_get;
+ ram->put = gf100_ram_put;
return 0;
}
static int
-nvc0_ram_init(struct nouveau_object *object)
+gf100_ram_init(struct nvkm_object *object)
{
- struct nouveau_fb *pfb = (void *)object->parent;
- struct nvc0_ram *ram = (void *)object;
+ struct nvkm_fb *pfb = (void *)object->parent;
+ struct gf100_ram *ram = (void *)object;
int ret, i;
- ret = nouveau_ram_init(&ram->base);
+ ret = nvkm_ram_init(&ram->base);
if (ret)
return ret;
@@ -624,15 +622,15 @@ nvc0_ram_init(struct nouveau_object *object)
}
static int
-nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_bios *bios = nouveau_bios(parent);
- struct nvc0_ram *ram;
+ struct nvkm_bios *bios = nvkm_bios(parent);
+ struct gf100_ram *ram;
int ret;
- ret = nvc0_ram_create(parent, engine, oclass, 0x022554, &ram);
+ ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -651,9 +649,9 @@ nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
switch (ram->base.type) {
case NV_MEM_TYPE_GDDR5:
- ram->base.calc = nvc0_ram_calc;
- ram->base.prog = nvc0_ram_prog;
- ram->base.tidy = nvc0_ram_tidy;
+ ram->base.calc = gf100_ram_calc;
+ ram->base.prog = gf100_ram_prog;
+ ram->base.tidy = gf100_ram_tidy;
break;
default:
nv_warn(ram, "reclocking of this ram type unsupported\n");
@@ -721,13 +719,13 @@ nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nvc0_ram_oclass = {
+struct nvkm_oclass
+gf100_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_ram_ctor,
- .dtor = _nouveau_ram_dtor,
- .init = nvc0_ram_init,
- .fini = _nouveau_ram_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_ram_ctor,
+ .dtor = _nvkm_ram_dtor,
+ .init = gf100_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 6bae474abb44..1ef15c3e6a81 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -21,29 +21,23 @@
*
* Authors: Ben Skeggs
*/
+#include "ramfuc.h"
+#include "gf100.h"
-#include <subdev/gpio.h>
-
+#include <core/device.h>
+#include <core/option.h>
#include <subdev/bios.h>
-#include <subdev/bios/pll.h>
#include <subdev/bios/init.h>
-#include <subdev/bios/rammap.h>
-#include <subdev/bios/timing.h>
#include <subdev/bios/M0205.h>
#include <subdev/bios/M0209.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
+#include <subdev/clk.h>
+#include <subdev/clk/pll.h>
+#include <subdev/gpio.h>
-#include <subdev/clock.h>
-#include <subdev/clock/pll.h>
-
-#include <subdev/timer.h>
-
-#include <core/option.h>
-
-#include "nvc0.h"
-
-#include "ramfuc.h"
-
-struct nve0_ramfuc {
+struct gk104_ramfuc {
struct ramfuc base;
struct nvbios_pll refpll;
@@ -124,9 +118,9 @@ struct nve0_ramfuc {
struct ramfuc_reg r_0x100750;
};
-struct nve0_ram {
- struct nouveau_ram base;
- struct nve0_ramfuc fuc;
+struct gk104_ram {
+ struct nvkm_ram base;
+ struct gk104_ramfuc fuc;
struct list_head cfg;
u32 parts;
@@ -144,9 +138,9 @@ struct nve0_ram {
* GDDR5
******************************************************************************/
static void
-nve0_ram_train(struct nve0_ramfuc *fuc, u32 mask, u32 data)
+gk104_ram_train(struct gk104_ramfuc *fuc, u32 mask, u32 data)
{
- struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+ struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc);
u32 addr = 0x110974, i;
ram_mask(fuc, 0x10f910, mask, data);
@@ -160,9 +154,9 @@ nve0_ram_train(struct nve0_ramfuc *fuc, u32 mask, u32 data)
}
static void
-r1373f4_init(struct nve0_ramfuc *fuc)
+r1373f4_init(struct gk104_ramfuc *fuc)
{
- struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+ struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc);
const u32 mcoef = ((--ram->P2 << 28) | (ram->N2 << 8) | ram->M2);
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
const u32 runk0 = ram->fN1 << 16;
@@ -210,10 +204,10 @@ r1373f4_init(struct nve0_ramfuc *fuc)
}
static void
-r1373f4_fini(struct nve0_ramfuc *fuc)
+r1373f4_fini(struct gk104_ramfuc *fuc)
{
- struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
- struct nouveau_ram_data *next = ram->base.next;
+ struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc);
+ struct nvkm_ram_data *next = ram->base.next;
u8 v0 = next->bios.ramcfg_11_03_c0;
u8 v1 = next->bios.ramcfg_11_03_30;
u32 tmp;
@@ -232,10 +226,10 @@ r1373f4_fini(struct nve0_ramfuc *fuc)
}
static void
-nve0_ram_nuts(struct nve0_ram *ram, struct ramfuc_reg *reg,
- u32 _mask, u32 _data, u32 _copy)
+gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
+ u32 _mask, u32 _data, u32 _copy)
{
- struct nve0_fb_priv *priv = (void *)nouveau_fb(ram);
+ struct gk104_fb_priv *priv = (void *)nvkm_fb(ram);
struct ramfuc *fuc = &ram->fuc.base;
u32 addr = 0x110000 + (reg->addr & 0xfff);
u32 mask = _mask | _copy;
@@ -246,19 +240,19 @@ nve0_ram_nuts(struct nve0_ram *ram, struct ramfuc_reg *reg,
if (ram->pnuts & (1 << i)) {
u32 prev = nv_rd32(priv, addr);
u32 next = (prev & ~mask) | data;
- nouveau_memx_wr32(fuc->memx, addr, next);
+ nvkm_memx_wr32(fuc->memx, addr, next);
}
}
}
#define ram_nuts(s,r,m,d,c) \
- nve0_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
+ gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
static int
-nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
+gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
{
- struct nve0_ram *ram = (void *)pfb->ram;
- struct nve0_ramfuc *fuc = &ram->fuc;
- struct nouveau_ram_data *next = ram->base.next;
+ struct gk104_ram *ram = (void *)pfb->ram;
+ struct gk104_ramfuc *fuc = &ram->fuc;
+ struct nvkm_ram_data *next = ram->base.next;
int vc = !next->bios.ramcfg_11_02_08;
int mv = !next->bios.ramcfg_11_02_04;
u32 mask, data;
@@ -283,7 +277,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
- nve0_ram_train(fuc, 0x01020000, 0x000c0000);
+ gk104_ram_train(fuc, 0x01020000, 0x000c0000);
ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
ram_nsec(fuc, 1000);
@@ -588,7 +582,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
if (next->bios.ramcfg_11_08_10 && (ram->mode == 2) /*XXX*/) {
u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
- nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
+ gk104_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f294, temp);
}
@@ -643,7 +637,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
} else {
data = 0xa40e0000;
}
- nve0_ram_train(fuc, 0xbc0f0000, data);
+ gk104_ram_train(fuc, 0xbc0f0000, data);
if (1) /* XXX: not always? */
ram_nsec(fuc, 1000);
@@ -661,7 +655,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
}
if (next->bios.ramcfg_11_07_02)
- nve0_ram_train(fuc, 0x80020000, 0x01000000);
+ gk104_ram_train(fuc, 0x80020000, 0x01000000);
ram_unblock(fuc);
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
@@ -680,14 +674,14 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
******************************************************************************/
static int
-nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
+gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
{
- struct nve0_ram *ram = (void *)pfb->ram;
- struct nve0_ramfuc *fuc = &ram->fuc;
+ struct gk104_ram *ram = (void *)pfb->ram;
+ struct gk104_ramfuc *fuc = &ram->fuc;
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
const u32 runk0 = ram->fN1 << 16;
const u32 runk1 = ram->fN1;
- struct nouveau_ram_data *next = ram->base.next;
+ struct nvkm_ram_data *next = ram->base.next;
int vc = !next->bios.ramcfg_11_02_08;
int mv = !next->bios.ramcfg_11_02_04;
u32 mask, data;
@@ -932,11 +926,10 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
******************************************************************************/
static int
-nve0_ram_calc_data(struct nouveau_fb *pfb, u32 khz,
- struct nouveau_ram_data *data)
+gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
{
- struct nve0_ram *ram = (void *)pfb->ram;
- struct nouveau_ram_data *cfg;
+ struct gk104_ram *ram = (void *)pfb->ram;
+ struct nvkm_ram_data *cfg;
u32 mhz = khz / 1000;
list_for_each_entry(cfg, &ram->cfg, head) {
@@ -953,10 +946,10 @@ nve0_ram_calc_data(struct nouveau_fb *pfb, u32 khz,
}
static int
-nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
+gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
{
- struct nve0_ram *ram = (void *)pfb->ram;
- struct nve0_ramfuc *fuc = &ram->fuc;
+ struct gk104_ram *ram = (void *)pfb->ram;
+ struct gk104_ramfuc *fuc = &ram->fuc;
int refclk, i;
int ret;
@@ -980,8 +973,8 @@ nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
refclk = fuc->mempll.refclk;
/* calculate refpll coefficients */
- ret = nva3_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
- &ram->fN1, &ram->M1, &ram->P1);
+ ret = gt215_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
+ &ram->fN1, &ram->M1, &ram->P1);
fuc->mempll.refclk = ret;
if (ret <= 0) {
nv_error(pfb, "unable to calc refpll\n");
@@ -997,8 +990,8 @@ nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
fuc->mempll.min_p = 1;
fuc->mempll.max_p = 2;
- ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
- &ram->N2, NULL, &ram->M2, &ram->P2);
+ ret = gt215_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
+ &ram->N2, NULL, &ram->M2, &ram->P2);
if (ret <= 0) {
nv_error(pfb, "unable to calc mempll\n");
return -EINVAL;
@@ -1013,14 +1006,14 @@ nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
switch (ram->base.type) {
case NV_MEM_TYPE_DDR3:
- ret = nouveau_sddr3_calc(&ram->base);
+ ret = nvkm_sddr3_calc(&ram->base);
if (ret == 0)
- ret = nve0_ram_calc_sddr3(pfb, next->freq);
+ ret = gk104_ram_calc_sddr3(pfb, next->freq);
break;
case NV_MEM_TYPE_GDDR5:
- ret = nouveau_gddr5_calc(&ram->base, ram->pnuts != 0);
+ ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
if (ret == 0)
- ret = nve0_ram_calc_gddr5(pfb, next->freq);
+ ret = gk104_ram_calc_gddr5(pfb, next->freq);
break;
default:
ret = -ENOSYS;
@@ -1031,21 +1024,21 @@ nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
}
static int
-nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
{
- struct nouveau_clock *clk = nouveau_clock(pfb);
- struct nve0_ram *ram = (void *)pfb->ram;
- struct nouveau_ram_data *xits = &ram->base.xition;
- struct nouveau_ram_data *copy;
+ struct nvkm_clk *clk = nvkm_clk(pfb);
+ struct gk104_ram *ram = (void *)pfb->ram;
+ struct nvkm_ram_data *xits = &ram->base.xition;
+ struct nvkm_ram_data *copy;
int ret;
if (ram->base.next == NULL) {
- ret = nve0_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
- &ram->base.former);
+ ret = gk104_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+ &ram->base.former);
if (ret)
return ret;
- ret = nve0_ram_calc_data(pfb, freq, &ram->base.target);
+ ret = gk104_ram_calc_data(pfb, freq, &ram->base.target);
if (ret)
return ret;
@@ -1069,14 +1062,14 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram->base.next = &ram->base.target;
}
- return nve0_ram_calc_xits(pfb, ram->base.next);
+ return gk104_ram_calc_xits(pfb, ram->base.next);
}
static void
-nve0_ram_prog_0(struct nouveau_fb *pfb, u32 freq)
+gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
{
- struct nve0_ram *ram = (void *)pfb->ram;
- struct nouveau_ram_data *cfg;
+ struct gk104_ram *ram = (void *)pfb->ram;
+ struct nvkm_ram_data *cfg;
u32 mhz = freq / 1000;
u32 mask, data;
@@ -1149,35 +1142,35 @@ nve0_ram_prog_0(struct nouveau_fb *pfb, u32 freq)
}
static int
-nve0_ram_prog(struct nouveau_fb *pfb)
+gk104_ram_prog(struct nvkm_fb *pfb)
{
- struct nouveau_device *device = nv_device(pfb);
- struct nve0_ram *ram = (void *)pfb->ram;
- struct nve0_ramfuc *fuc = &ram->fuc;
- struct nouveau_ram_data *next = ram->base.next;
+ struct nvkm_device *device = nv_device(pfb);
+ struct gk104_ram *ram = (void *)pfb->ram;
+ struct gk104_ramfuc *fuc = &ram->fuc;
+ struct nvkm_ram_data *next = ram->base.next;
- if (!nouveau_boolopt(device->cfgopt, "NvMemExec", true)) {
+ if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) {
ram_exec(fuc, false);
return (ram->base.next == &ram->base.xition);
}
- nve0_ram_prog_0(pfb, 1000);
+ gk104_ram_prog_0(pfb, 1000);
ram_exec(fuc, true);
- nve0_ram_prog_0(pfb, next->freq);
+ gk104_ram_prog_0(pfb, next->freq);
return (ram->base.next == &ram->base.xition);
}
static void
-nve0_ram_tidy(struct nouveau_fb *pfb)
+gk104_ram_tidy(struct nvkm_fb *pfb)
{
- struct nve0_ram *ram = (void *)pfb->ram;
- struct nve0_ramfuc *fuc = &ram->fuc;
+ struct gk104_ram *ram = (void *)pfb->ram;
+ struct gk104_ramfuc *fuc = &ram->fuc;
ram->base.next = NULL;
ram_exec(fuc, false);
}
-struct nve0_ram_train {
+struct gk104_ram_train {
u16 mask;
struct nvbios_M0209S remap;
struct nvbios_M0209S type00;
@@ -1190,10 +1183,10 @@ struct nve0_ram_train {
};
static int
-nve0_ram_train_type(struct nouveau_fb *pfb, int i, u8 ramcfg,
- struct nve0_ram_train *train)
+gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
+ struct gk104_ram_train *train)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nvkm_bios *bios = nvkm_bios(pfb);
struct nvbios_M0205E M0205E;
struct nvbios_M0205S M0205S;
struct nvbios_M0209E M0209E;
@@ -1251,7 +1244,7 @@ nve0_ram_train_type(struct nouveau_fb *pfb, int i, u8 ramcfg,
}
static int
-nve0_ram_train_init_0(struct nouveau_fb *pfb, struct nve0_ram_train *train)
+gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
{
int i, j;
@@ -1285,15 +1278,15 @@ nve0_ram_train_init_0(struct nouveau_fb *pfb, struct nve0_ram_train *train)
}
static int
-nve0_ram_train_init(struct nouveau_fb *pfb)
+gk104_ram_train_init(struct nvkm_fb *pfb)
{
u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
- struct nve0_ram_train *train;
+ struct gk104_ram_train *train;
int ret = -ENOMEM, i;
if ((train = kzalloc(sizeof(*train), GFP_KERNEL))) {
for (i = 0; i < 0x100; i++) {
- ret = nve0_ram_train_type(pfb, i, ramcfg, train);
+ ret = gk104_ram_train_type(pfb, i, ramcfg, train);
if (ret && ret != -ENOENT)
break;
}
@@ -1301,7 +1294,7 @@ nve0_ram_train_init(struct nouveau_fb *pfb)
switch (pfb->ram->type) {
case NV_MEM_TYPE_GDDR5:
- ret = nve0_ram_train_init_0(pfb, train);
+ ret = gk104_ram_train_init_0(pfb, train);
break;
default:
ret = 0;
@@ -1313,16 +1306,16 @@ nve0_ram_train_init(struct nouveau_fb *pfb)
}
int
-nve0_ram_init(struct nouveau_object *object)
+gk104_ram_init(struct nvkm_object *object)
{
- struct nouveau_fb *pfb = (void *)object->parent;
- struct nve0_ram *ram = (void *)object;
- struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nvkm_fb *pfb = (void *)object->parent;
+ struct gk104_ram *ram = (void *)object;
+ struct nvkm_bios *bios = nvkm_bios(pfb);
u8 ver, hdr, cnt, len, snr, ssz;
u32 data, save;
int ret, i;
- ret = nouveau_ram_init(&ram->base);
+ ret = nvkm_ram_init(&ram->base);
if (ret)
return ret;
@@ -1360,15 +1353,15 @@ nve0_ram_init(struct nouveau_object *object)
nv_wr32(pfb, 0x10ecc0, 0xffffffff);
nv_mask(pfb, 0x10f160, 0x00000010, 0x00000010);
- return nve0_ram_train_init(pfb);
+ return gk104_ram_train_init(pfb);
}
static int
-nve0_ram_ctor_data(struct nve0_ram *ram, u8 ramcfg, int i)
+gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
{
- struct nouveau_fb *pfb = (void *)nv_object(ram)->parent;
- struct nouveau_bios *bios = nouveau_bios(pfb);
- struct nouveau_ram_data *cfg;
+ struct nvkm_fb *pfb = (void *)nv_object(ram)->parent;
+ struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct nvkm_ram_data *cfg;
struct nvbios_ramcfg *d = &ram->diff;
struct nvbios_ramcfg *p, *n;
u8 ver, hdr, cnt, len;
@@ -1434,33 +1427,33 @@ done:
}
static void
-nve0_ram_dtor(struct nouveau_object *object)
+gk104_ram_dtor(struct nvkm_object *object)
{
- struct nve0_ram *ram = (void *)object;
- struct nouveau_ram_data *cfg, *tmp;
+ struct gk104_ram *ram = (void *)object;
+ struct nvkm_ram_data *cfg, *tmp;
list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
kfree(cfg);
}
- nouveau_ram_destroy(&ram->base);
+ nvkm_ram_destroy(&ram->base);
}
static int
-nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_bios *bios = nouveau_bios(pfb);
- struct nouveau_gpio *gpio = nouveau_gpio(pfb);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct nvkm_gpio *gpio = nvkm_gpio(pfb);
struct dcb_gpio_func func;
- struct nve0_ram *ram;
+ struct gk104_ram *ram;
int ret, i;
u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
u32 tmp;
- ret = nvc0_ram_create(parent, engine, oclass, 0x022554, &ram);
+ ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -1470,9 +1463,9 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
switch (ram->base.type) {
case NV_MEM_TYPE_DDR3:
case NV_MEM_TYPE_GDDR5:
- ram->base.calc = nve0_ram_calc;
- ram->base.prog = nve0_ram_prog;
- ram->base.tidy = nve0_ram_tidy;
+ ram->base.calc = gk104_ram_calc;
+ ram->base.prog = gk104_ram_prog;
+ ram->base.tidy = gk104_ram_tidy;
break;
default:
nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
@@ -1510,7 +1503,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
* need to treat this condition as a "don't touch" indicator.
*/
for (i = 0; !ret; i++) {
- ret = nve0_ram_ctor_data(ram, ramcfg, i);
+ ret = gk104_ram_ctor_data(ram, ramcfg, i);
if (ret && ret != -ENOENT) {
nv_error(pfb, "failed to parse ramcfg data\n");
return ret;
@@ -1634,13 +1627,13 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nve0_ram_oclass = {
+struct nvkm_oclass
+gk104_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_ram_ctor,
- .dtor = nve0_ram_dtor,
- .init = nve0_ram_init,
- .fini = _nouveau_ram_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_ram_ctor,
+ .dtor = gk104_ram_dtor,
+ .init = gk104_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c
index 4d77d75e4673..5f30db140b47 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c
@@ -19,20 +19,19 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
-#include <subdev/fb.h>
+#include <core/device.h>
struct gk20a_mem {
- struct nouveau_mem base;
+ struct nvkm_mem base;
void *cpuaddr;
dma_addr_t handle;
};
#define to_gk20a_mem(m) container_of(m, struct gk20a_mem, base)
static void
-gk20a_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+gk20a_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
{
struct device *dev = nv_device_base(nv_device(pfb));
struct gk20a_mem *mem = to_gk20a_mem(*pmem);
@@ -50,8 +49,8 @@ gk20a_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
}
static int
-gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_mem **pmem)
+gk20a_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nvkm_mem **pmem)
{
struct device *dev = nv_device_base(nv_device(pfb));
struct gk20a_mem *mem;
@@ -116,19 +115,18 @@ gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mem->base.pages[i] = mem->handle + (PAGE_SIZE * i);
mem->base.offset = (u64)mem->base.pages[0];
-
return 0;
}
static int
-gk20a_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 datasize,
- struct nouveau_object **pobject)
+gk20a_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 datasize,
+ struct nvkm_object **pobject)
{
- struct nouveau_ram *ram;
+ struct nvkm_ram *ram;
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -137,16 +135,15 @@ gk20a_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
ram->get = gk20a_ram_get;
ram->put = gk20a_ram_put;
-
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gk20a_ram_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gk20a_ram_ctor,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 4c6363595c79..a298b39f55c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -21,22 +21,21 @@
*
* Authors: Ben Skeggs
*/
-
-#include "nvc0.h"
+#include "gf100.h"
struct gm107_ram {
- struct nouveau_ram base;
+ struct nvkm_ram base;
};
static int
-gm107_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gm107_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct gm107_ram *ram;
int ret;
- ret = nvc0_ram_create(parent, engine, oclass, 0x021c14, &ram);
+ ret = gf100_ram_create(parent, engine, oclass, 0x021c14, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -44,13 +43,13 @@ gm107_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gm107_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gm107_ram_ctor,
- .dtor = _nouveau_ram_dtor,
- .init = nve0_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = gk104_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index 3b38a538845d..24176401b49b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -23,32 +23,22 @@
* Roy Spliet <rspliet@eclipso.eu>
*/
+#include "ramfuc.h"
+#include "nv50.h"
+
+#include <core/device.h>
+#include <core/option.h>
#include <subdev/bios.h>
-#include <subdev/bios/bit.h>
-#include <subdev/bios/pll.h>
-#include <subdev/bios/rammap.h>
#include <subdev/bios/M0205.h>
+#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
-
-#include <subdev/clock/nva3.h>
-#include <subdev/clock/pll.h>
-
+#include <subdev/clk/gt215.h>
#include <subdev/gpio.h>
-#include <subdev/timer.h>
-
-#include <engine/fifo.h>
-
-#include <core/option.h>
-
-#include "ramfuc.h"
-
-#include "nv50.h"
-
/* XXX: Remove when memx gains GPIO support */
extern int nv50_gpio_location(int line, u32 *reg, u32 *shift);
-struct nva3_ramfuc {
+struct gt215_ramfuc {
struct ramfuc base;
struct ramfuc_reg r_0x001610;
struct ramfuc_reg r_0x001700;
@@ -89,7 +79,7 @@ struct nva3_ramfuc {
struct ramfuc_reg r_gpioFBVREF;
};
-struct nva3_ltrain {
+struct gt215_ltrain {
enum {
NVA3_TRAIN_UNKNOWN,
NVA3_TRAIN_UNSUPPORTED,
@@ -100,17 +90,17 @@ struct nva3_ltrain {
u32 r_100720;
u32 r_1111e0;
u32 r_111400;
- struct nouveau_mem *mem;
+ struct nvkm_mem *mem;
};
-struct nva3_ram {
- struct nouveau_ram base;
- struct nva3_ramfuc fuc;
- struct nva3_ltrain ltrain;
+struct gt215_ram {
+ struct nvkm_ram base;
+ struct gt215_ramfuc fuc;
+ struct gt215_ltrain ltrain;
};
void
-nva3_link_train_calc(u32 *vals, struct nva3_ltrain *train)
+gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
{
int i, lo, hi;
u8 median[8], bins[4] = {0, 0, 0, 0}, bin = 0, qty = 0;
@@ -164,14 +154,14 @@ nva3_link_train_calc(u32 *vals, struct nva3_ltrain *train)
* Link training for (at least) DDR3
*/
int
-nva3_link_train(struct nouveau_fb *pfb)
+gt215_link_train(struct nvkm_fb *pfb)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
- struct nva3_ram *ram = (void *)pfb->ram;
- struct nouveau_clock *clk = nouveau_clock(pfb);
- struct nva3_ltrain *train = &ram->ltrain;
- struct nouveau_device *device = nv_device(pfb);
- struct nva3_ramfuc *fuc = &ram->fuc;
+ struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct gt215_ram *ram = (void *)pfb->ram;
+ struct nvkm_clk *clk = nvkm_clk(pfb);
+ struct gt215_ltrain *train = &ram->ltrain;
+ struct nvkm_device *device = nv_device(pfb);
+ struct gt215_ramfuc *fuc = &ram->fuc;
u32 *result, r1700;
int ret, i;
struct nvbios_M0205T M0205T = { 0 };
@@ -180,7 +170,7 @@ nva3_link_train(struct nouveau_fb *pfb)
unsigned long flags;
unsigned long *f = &flags;
- if (nouveau_boolopt(device->cfgopt, "NvMemExec", true) != true)
+ if (nvkm_boolopt(device->cfgopt, "NvMemExec", true) != true)
return -ENOSYS;
/* XXX: Multiple partitions? */
@@ -197,7 +187,7 @@ nva3_link_train(struct nouveau_fb *pfb)
clk_current = clk->read(clk, nv_clk_src_mem);
- ret = nva3_clock_pre(clk, f);
+ ret = gt215_clk_pre(clk, f);
if (ret)
goto out;
@@ -252,12 +242,12 @@ nva3_link_train(struct nouveau_fb *pfb)
nv_mask(pfb, 0x616308, 0x10, 0x10);
nv_mask(pfb, 0x616b08, 0x10, 0x10);
- nva3_clock_post(clk, f);
+ gt215_clk_post(clk, f);
ram_train_result(pfb, result, 64);
for (i = 0; i < 64; i++)
nv_debug(pfb, "Train: %08x", result[i]);
- nva3_link_train_calc(result, train);
+ gt215_link_train_calc(result, train);
nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
train->r_1111e0, train->r_111400);
@@ -274,12 +264,12 @@ out:
train->state = NVA3_TRAIN_UNSUPPORTED;
- nva3_clock_post(clk, f);
+ gt215_clk_post(clk, f);
return ret;
}
int
-nva3_link_train_init(struct nouveau_fb *pfb)
+gt215_link_train_init(struct nvkm_fb *pfb)
{
static const u32 pattern[16] = {
0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
@@ -287,10 +277,10 @@ nva3_link_train_init(struct nouveau_fb *pfb)
0x33333333, 0x55555555, 0x77777777, 0x66666666,
0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
};
- struct nouveau_bios *bios = nouveau_bios(pfb);
- struct nva3_ram *ram = (void *)pfb->ram;
- struct nva3_ltrain *train = &ram->ltrain;
- struct nouveau_mem *mem;
+ struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct gt215_ram *ram = (void *)pfb->ram;
+ struct gt215_ltrain *train = &ram->ltrain;
+ struct nvkm_mem *mem;
struct nvbios_M0205E M0205E;
u8 ver, hdr, cnt, len;
u32 r001700;
@@ -340,14 +330,13 @@ nva3_link_train_init(struct nouveau_fb *pfb)
train->r_100720 = nv_rd32(pfb, 0x100720);
train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
train->r_111400 = nv_rd32(pfb, 0x111400);
-
return 0;
}
void
-nva3_link_train_fini(struct nouveau_fb *pfb)
+gt215_link_train_fini(struct nvkm_fb *pfb)
{
- struct nva3_ram *ram = (void *)pfb->ram;
+ struct gt215_ram *ram = (void *)pfb->ram;
if (ram->ltrain.mem)
pfb->ram->put(pfb, &ram->ltrain.mem);
@@ -358,9 +347,9 @@ nva3_link_train_fini(struct nouveau_fb *pfb)
*/
#define T(t) cfg->timing_10_##t
static int
-nva3_ram_timing_calc(struct nouveau_fb *pfb, u32 *timing)
+gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
{
- struct nva3_ram *ram = (void *)pfb->ram;
+ struct gt215_ram *ram = (void *)pfb->ram;
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
int tUNK_base, tUNK_40_0, prevCL;
u32 cur2, cur3, cur7, cur8;
@@ -433,7 +422,7 @@ nva3_ram_timing_calc(struct nouveau_fb *pfb, u32 *timing)
#undef T
static void
-nouveau_sddr2_dll_reset(struct nva3_ramfuc *fuc)
+nvkm_sddr2_dll_reset(struct gt215_ramfuc *fuc)
{
ram_mask(fuc, mr[0], 0x100, 0x100);
ram_nsec(fuc, 1000);
@@ -442,7 +431,7 @@ nouveau_sddr2_dll_reset(struct nva3_ramfuc *fuc)
}
static void
-nouveau_sddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
+nvkm_sddr3_dll_disable(struct gt215_ramfuc *fuc, u32 *mr)
{
u32 mr1_old = ram_rd32(fuc, mr[1]);
@@ -454,7 +443,7 @@ nouveau_sddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
}
static void
-nouveau_gddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
+nvkm_gddr3_dll_disable(struct gt215_ramfuc *fuc, u32 *mr)
{
u32 mr1_old = ram_rd32(fuc, mr[1]);
@@ -465,7 +454,7 @@ nouveau_gddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
}
static void
-nva3_ram_lock_pll(struct nva3_ramfuc *fuc, struct nva3_clock_info *mclk)
+gt215_ram_lock_pll(struct gt215_ramfuc *fuc, struct gt215_clk_info *mclk)
{
ram_wr32(fuc, 0x004004, mclk->pll);
ram_mask(fuc, 0x004000, 0x00000001, 0x00000001);
@@ -475,9 +464,9 @@ nva3_ram_lock_pll(struct nva3_ramfuc *fuc, struct nva3_clock_info *mclk)
}
static void
-nva3_ram_fbvref(struct nva3_ramfuc *fuc, u32 val)
+gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
{
- struct nouveau_gpio *gpio = nouveau_gpio(fuc->base.pfb);
+ struct nvkm_gpio *gpio = nvkm_gpio(fuc->base.pfb);
struct dcb_gpio_func func;
u32 reg, sh, gpio_val;
int ret;
@@ -498,14 +487,14 @@ nva3_ram_fbvref(struct nva3_ramfuc *fuc, u32 val)
}
static int
-nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
+gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
- struct nva3_ram *ram = (void *)pfb->ram;
- struct nva3_ramfuc *fuc = &ram->fuc;
- struct nva3_ltrain *train = &ram->ltrain;
- struct nva3_clock_info mclk;
- struct nouveau_ram_data *next;
+ struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct gt215_ram *ram = (void *)pfb->ram;
+ struct gt215_ramfuc *fuc = &ram->fuc;
+ struct gt215_ltrain *train = &ram->ltrain;
+ struct gt215_clk_info mclk;
+ struct nvkm_ram_data *next;
u8 ver, hdr, cnt, len, strap;
u32 data;
u32 r004018, r100760, r100da0, r111100, ctrl;
@@ -519,12 +508,12 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram->base.next = next;
if (ram->ltrain.state == NVA3_TRAIN_ONCE)
- nva3_link_train(pfb);
+ gt215_link_train(pfb);
/* lookup memory config data relevant to the target frequency */
i = 0;
data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
- &next->bios);
+ &next->bios);
if (!data || ver != 0x10 || hdr < 0x05) {
nv_error(pfb, "invalid/missing rammap entry\n");
return -EINVAL;
@@ -555,13 +544,13 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
}
- ret = nva3_pll_info(nouveau_clock(pfb), 0x12, 0x4000, freq, &mclk);
+ ret = gt215_pll_info(nvkm_clk(pfb), 0x12, 0x4000, freq, &mclk);
if (ret < 0) {
nv_error(pfb, "failed mclk calculation\n");
return ret;
}
- nva3_ram_timing_calc(pfb, timing);
+ gt215_ram_timing_calc(pfb, timing);
ret = ram_init(fuc, pfb);
if (ret)
@@ -574,13 +563,13 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
switch (ram->base.type) {
case NV_MEM_TYPE_DDR2:
- ret = nouveau_sddr2_calc(&ram->base);
+ ret = nvkm_sddr2_calc(&ram->base);
break;
case NV_MEM_TYPE_DDR3:
- ret = nouveau_sddr3_calc(&ram->base);
+ ret = nvkm_sddr3_calc(&ram->base);
break;
case NV_MEM_TYPE_GDDR3:
- ret = nouveau_gddr3_calc(&ram->base);
+ ret = nvkm_gddr3_calc(&ram->base);
break;
default:
ret = -ENOSYS;
@@ -621,7 +610,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* If switching from non-pll to pll, lock before disabling FB */
if (mclk.pll && !pll2pll) {
ram_mask(fuc, 0x004128, 0x003f3141, mclk.clk | 0x00000101);
- nva3_ram_lock_pll(fuc, &mclk);
+ gt215_ram_lock_pll(fuc, &mclk);
}
/* Start with disabling some CRTCs and PFIFO? */
@@ -643,15 +632,15 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* If we're disabling the DLL, do it now */
switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
case NV_MEM_TYPE_DDR3:
- nouveau_sddr3_dll_disable(fuc, ram->base.mr);
+ nvkm_sddr3_dll_disable(fuc, ram->base.mr);
break;
case NV_MEM_TYPE_GDDR3:
- nouveau_gddr3_dll_disable(fuc, ram->base.mr);
+ nvkm_gddr3_dll_disable(fuc, ram->base.mr);
break;
}
if (fuc->r_gpioFBVREF.addr && next->bios.timing_10_ODT)
- nva3_ram_fbvref(fuc, 0);
+ gt215_ram_fbvref(fuc, 0);
/* Brace RAM for impact */
ram_wr32(fuc, 0x1002d4, 0x00000001);
@@ -678,7 +667,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x004000, 0x00000008, 0x00000008);
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
ram_wr32(fuc, 0x004018, 0x00001000);
- nva3_ram_lock_pll(fuc, &mclk);
+ gt215_ram_lock_pll(fuc, &mclk);
}
if (mclk.pll) {
@@ -818,11 +807,11 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x111100, 0xffffffff, r111100);
if (fuc->r_gpioFBVREF.addr && !next->bios.timing_10_ODT)
- nva3_ram_fbvref(fuc, 1);
+ gt215_ram_fbvref(fuc, 1);
/* Reset DLL */
if (!next->bios.ramcfg_10_DLLoff)
- nouveau_sddr2_dll_reset(fuc);
+ nvkm_sddr2_dll_reset(fuc);
if (ram->base.type == NV_MEM_TYPE_GDDR3) {
ram_nsec(fuc, 31000);
@@ -866,12 +855,12 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
static int
-nva3_ram_prog(struct nouveau_fb *pfb)
+gt215_ram_prog(struct nvkm_fb *pfb)
{
- struct nouveau_device *device = nv_device(pfb);
- struct nva3_ram *ram = (void *)pfb->ram;
- struct nva3_ramfuc *fuc = &ram->fuc;
- bool exec = nouveau_boolopt(device->cfgopt, "NvMemExec", true);
+ struct nvkm_device *device = nv_device(pfb);
+ struct gt215_ram *ram = (void *)pfb->ram;
+ struct gt215_ramfuc *fuc = &ram->fuc;
+ bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
if (exec) {
nv_mask(pfb, 0x001534, 0x2, 0x2);
@@ -891,49 +880,48 @@ nva3_ram_prog(struct nouveau_fb *pfb)
}
static void
-nva3_ram_tidy(struct nouveau_fb *pfb)
+gt215_ram_tidy(struct nvkm_fb *pfb)
{
- struct nva3_ram *ram = (void *)pfb->ram;
- struct nva3_ramfuc *fuc = &ram->fuc;
+ struct gt215_ram *ram = (void *)pfb->ram;
+ struct gt215_ramfuc *fuc = &ram->fuc;
ram_exec(fuc, false);
}
static int
-nva3_ram_init(struct nouveau_object *object)
+gt215_ram_init(struct nvkm_object *object)
{
- struct nouveau_fb *pfb = (void *)object->parent;
- struct nva3_ram *ram = (void *)object;
+ struct nvkm_fb *pfb = (void *)object->parent;
+ struct gt215_ram *ram = (void *)object;
int ret;
- ret = nouveau_ram_init(&ram->base);
+ ret = nvkm_ram_init(&ram->base);
if (ret)
return ret;
- nva3_link_train_init(pfb);
-
+ gt215_link_train_init(pfb);
return 0;
}
static int
-nva3_ram_fini(struct nouveau_object *object, bool suspend)
+gt215_ram_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_fb *pfb = (void *)object->parent;
+ struct nvkm_fb *pfb = (void *)object->parent;
if (!suspend)
- nva3_link_train_fini(pfb);
+ gt215_link_train_fini(pfb);
return 0;
}
static int
-nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 datasize,
- struct nouveau_object **pobject)
+gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 datasize,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_gpio *gpio = nouveau_gpio(pfb);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_gpio *gpio = nvkm_gpio(pfb);
struct dcb_gpio_func func;
- struct nva3_ram *ram;
+ struct gt215_ram *ram;
int ret, i;
u32 reg, shift;
@@ -946,9 +934,9 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
case NV_MEM_TYPE_DDR2:
case NV_MEM_TYPE_DDR3:
case NV_MEM_TYPE_GDDR3:
- ram->base.calc = nva3_ram_calc;
- ram->base.prog = nva3_ram_prog;
- ram->base.tidy = nva3_ram_tidy;
+ ram->base.calc = gt215_ram_calc;
+ ram->base.prog = gt215_ram_prog;
+ ram->base.tidy = gt215_ram_tidy;
break;
default:
nv_warn(ram, "reclocking of this ram type unsupported\n");
@@ -1013,12 +1001,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nva3_ram_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_ram_ctor,
- .dtor = _nouveau_ram_dtor,
- .init = nva3_ram_init,
- .fini = nva3_ram_fini,
+struct nvkm_oclass
+gt215_ram_oclass = {
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gt215_ram_ctor,
+ .dtor = _nvkm_ram_dtor,
+ .init = gt215_ram_init,
+ .fini = gt215_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index 033a8e999497..abc18e89a97c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -21,26 +21,25 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
-struct nvaa_ram_priv {
- struct nouveau_ram base;
+struct mcp77_ram_priv {
+ struct nvkm_ram base;
u64 poller_base;
};
static int
-nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 datasize,
- struct nouveau_object **pobject)
+mcp77_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 datasize,
+ struct nvkm_object **pobject)
{
u32 rsvd_head = ( 256 * 1024); /* vga memory */
u32 rsvd_tail = (1024 * 1024); /* vbios etc */
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nvaa_ram_priv *priv;
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct mcp77_ram_priv *priv;
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &priv);
+ ret = nvkm_ram_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -52,9 +51,9 @@ nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
rsvd_tail += 0x1000;
priv->poller_base = priv->base.size - rsvd_tail;
- ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
- (priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
- 1);
+ ret = nvkm_mm_init(&pfb->vram, rsvd_head >> 12,
+ (priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
+ 1);
if (ret)
return ret;
@@ -64,14 +63,14 @@ nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static int
-nvaa_ram_init(struct nouveau_object *object)
+mcp77_ram_init(struct nvkm_object *object)
{
- struct nouveau_fb *pfb = nouveau_fb(object);
- struct nvaa_ram_priv *priv = (void *)object;
+ struct nvkm_fb *pfb = nvkm_fb(object);
+ struct mcp77_ram_priv *priv = (void *)object;
int ret;
u64 dniso, hostnb, flush;
- ret = nouveau_ram_init(&priv->base);
+ ret = nvkm_ram_init(&priv->base);
if (ret)
return ret;
@@ -88,16 +87,15 @@ nvaa_ram_init(struct nouveau_object *object)
nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
nv_wr32(pfb, 0x100c24, flush);
nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
-
return 0;
}
-struct nouveau_oclass
-nvaa_ram_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvaa_ram_ctor,
- .dtor = _nouveau_ram_dtor,
- .init = nvaa_ram_init,
- .fini = _nouveau_ram_fini,
+struct nvkm_oclass
+mcp77_ram_oclass = {
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = mcp77_ram_ctor,
+ .dtor = _nvkm_ram_dtor,
+ .init = mcp77_ram_init,
+ .fini = _nvkm_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 1972268d1410..855de1617229 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -21,22 +21,20 @@
*
* Authors: Ben Skeggs
*/
-
-#include <subdev/fb/regsnv04.h>
-
#include "priv.h"
+#include "regsnv04.h"
static int
-nv04_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_ram *ram;
u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -65,16 +63,17 @@ nv04_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
ram->type = NV_MEM_TYPE_SGRAM;
else
ram->type = NV_MEM_TYPE_SDRAM;
+
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv04_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index 8311f3774edf..3b8a1eda5b64 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
static int
-nv10_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_ram *ram;
u32 cfg0 = nv_rd32(pfb, 0x100200);
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -48,14 +47,13 @@ nv10_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-
-struct nouveau_oclass
+struct nvkm_oclass
nv10_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv10_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index d0caddfb9db0..fbae05db4ffd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -21,16 +21,17 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
+#include <core/device.h>
+
static int
-nv1a_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_ram *ram;
struct pci_dev *bridge;
u32 mem, mib;
int ret;
@@ -41,7 +42,7 @@ nv1a_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return -ENODEV;
}
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -59,13 +60,13 @@ nv1a_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv1a_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv1a_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index fdc11bba226d..d9e7187bd235 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
static int
-nv20_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_ram *ram;
u32 pbus1218 = nv_rd32(pfb, 0x001218);
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -51,13 +50,13 @@ nv20_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv20_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv20_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 7648beb11199..3d31fa45c1a6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -21,23 +21,20 @@
*
* Authors: Ben Skeggs
*/
+#include "nv40.h"
+#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
-#include <subdev/bios/pll.h>
#include <subdev/bios/init.h>
-#include <subdev/clock.h>
-#include <subdev/clock/pll.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
#include <subdev/timer.h>
-#include <engine/fifo.h>
-
-#include "nv40.h"
-
int
-nv40_ram_calc(struct nouveau_fb *pfb, u32 freq)
+nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nvkm_bios *bios = nvkm_bios(pfb);
struct nv40_ram *ram = (void *)pfb->ram;
struct nvbios_pll pll;
int N1, M1, N2, M2;
@@ -68,9 +65,9 @@ nv40_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
int
-nv40_ram_prog(struct nouveau_fb *pfb)
+nv40_ram_prog(struct nvkm_fb *pfb)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nvkm_bios *bios = nvkm_bios(pfb);
struct nv40_ram *ram = (void *)pfb->ram;
struct bit_entry M;
u32 crtc_mask = 0;
@@ -167,21 +164,21 @@ nv40_ram_prog(struct nouveau_fb *pfb)
}
void
-nv40_ram_tidy(struct nouveau_fb *pfb)
+nv40_ram_tidy(struct nvkm_fb *pfb)
{
}
static int
-nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
struct nv40_ram *ram;
u32 pbus1218 = nv_rd32(pfb, 0x001218);
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -203,13 +200,13 @@ nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
}
-struct nouveau_oclass
+struct nvkm_oclass
nv40_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv40_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index d64498a4d9ee..33c612b1355f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv40.h"
static int
-nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
struct nv40_ram *ram;
u32 pfb474 = nv_rd32(pfb, 0x100474);
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -55,13 +54,13 @@ nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv41_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv41_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index 089acac810c5..f575a7246403 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv40.h"
static int
-nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
struct nv40_ram *ram;
u32 pfb474 = nv_rd32(pfb, 0x100474);
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -53,13 +52,13 @@ nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv44_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv44_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index baa013afa57b..51b44cdb2732 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv40.h"
static int
-nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
struct nv40_ram *ram;
u32 pfb914 = nv_rd32(pfb, 0x100914);
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -55,13 +54,13 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv49_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv49_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index 63a6aab86028..f3ed1c60d730 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -21,19 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
static int
-nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_ram *ram;
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
if (ret)
return ret;
@@ -43,13 +42,13 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv4e_ram_oclass = {
.handle = 0,
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv4e_ram_create,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index 64a983c96625..d2c81dd635dc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -21,21 +21,16 @@
*
* Authors: Ben Skeggs
*/
+#include "nv50.h"
+#include "ramseq.h"
+#include <core/device.h>
+#include <core/option.h>
#include <subdev/bios.h>
-#include <subdev/bios/bit.h>
-#include <subdev/bios/pll.h>
#include <subdev/bios/perf.h>
+#include <subdev/bios/pll.h>
#include <subdev/bios/timing.h>
-#include <subdev/clock/pll.h>
-#include <subdev/fb.h>
-
-#include <core/option.h>
-#include <core/mm.h>
-
-#include "ramseq.h"
-
-#include "nv50.h"
+#include <subdev/clk/pll.h>
struct nv50_ramseq {
struct hwsq base;
@@ -56,16 +51,16 @@ struct nv50_ramseq {
};
struct nv50_ram {
- struct nouveau_ram base;
+ struct nvkm_ram base;
struct nv50_ramseq hwsq;
};
#define QFX5800NVA0 1
static int
-nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
+nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nvkm_bios *bios = nvkm_bios(pfb);
struct nv50_ram *ram = (void *)pfb->ram;
struct nv50_ramseq *hwsq = &ram->hwsq;
struct nvbios_perfE perfE;
@@ -82,7 +77,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
i = 0;
do {
ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
- &ramcfg.size, &perfE);
+ &ramcfg.size, &perfE);
if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
(ramcfg.size < 2)) {
nv_error(pfb, "invalid/missing perftab entry\n");
@@ -103,7 +98,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
strap = nv_ro08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
- &cnt, &len);
+ &cnt, &len);
if (!timing.data || ver != 0x10 || hdr < 0x12) {
nv_error(pfb, "invalid/missing timing entry "
"%02x %04x %02x %02x\n",
@@ -136,7 +131,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
mpll.vco2.max_freq = 0;
if (ret == 0) {
ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
- &N1, &M1, &N2, &M2, &P);
+ &N1, &M1, &N2, &M2, &P);
if (ret == 0)
ret = -EINVAL;
}
@@ -205,18 +200,18 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
static int
-nv50_ram_prog(struct nouveau_fb *pfb)
+nv50_ram_prog(struct nvkm_fb *pfb)
{
- struct nouveau_device *device = nv_device(pfb);
+ struct nvkm_device *device = nv_device(pfb);
struct nv50_ram *ram = (void *)pfb->ram;
struct nv50_ramseq *hwsq = &ram->hwsq;
- ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
+ ram_exec(hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
static void
-nv50_ram_tidy(struct nouveau_fb *pfb)
+nv50_ram_tidy(struct nvkm_fb *pfb)
{
struct nv50_ram *ram = (void *)pfb->ram;
struct nv50_ramseq *hwsq = &ram->hwsq;
@@ -224,24 +219,24 @@ nv50_ram_tidy(struct nouveau_fb *pfb)
}
void
-__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
+__nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
{
- struct nouveau_mm_node *this;
+ struct nvkm_mm_node *this;
while (!list_empty(&mem->regions)) {
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
list_del(&this->rl_entry);
- nouveau_mm_free(&pfb->vram, &this);
+ nvkm_mm_free(&pfb->vram, &this);
}
- nouveau_mm_free(&pfb->tags, &mem->tag);
+ nvkm_mm_free(&pfb->tags, &mem->tag);
}
void
-nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
{
- struct nouveau_mem *mem = *pmem;
+ struct nvkm_mem *mem = *pmem;
*pmem = NULL;
if (unlikely(mem == NULL))
@@ -255,13 +250,13 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
}
int
-nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_mem **pmem)
+nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nvkm_mem **pmem)
{
- struct nouveau_mm *heap = &pfb->vram;
- struct nouveau_mm *tags = &pfb->tags;
- struct nouveau_mm_node *r;
- struct nouveau_mem *mem;
+ struct nvkm_mm *heap = &pfb->vram;
+ struct nvkm_mm *tags = &pfb->tags;
+ struct nvkm_mm_node *r;
+ struct nvkm_mem *mem;
int comp = (memtype & 0x300) >> 8;
int type = (memtype & 0x07f);
int back = (memtype & 0x800);
@@ -280,7 +275,7 @@ nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
if (align == 16) {
int n = (max >> 4) * comp;
- ret = nouveau_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
+ ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
if (ret)
mem->tag = NULL;
}
@@ -296,9 +291,9 @@ nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
type = nv50_fb_memtype[type];
do {
if (back)
- ret = nouveau_mm_tail(heap, 0, type, max, min, align, &r);
+ ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
else
- ret = nouveau_mm_head(heap, 0, type, max, min, align, &r);
+ ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
if (ret) {
mutex_unlock(&pfb->base.mutex);
pfb->ram->put(pfb, &mem);
@@ -310,14 +305,14 @@ nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
} while (max);
mutex_unlock(&pfb->base.mutex);
- r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
*pmem = mem;
return 0;
}
static u32
-nv50_fb_vram_rblock(struct nouveau_fb *pfb, struct nouveau_ram *ram)
+nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
{
int colbits, rowbitsa, rowbitsb, banks;
u64 rowsize, predicted;
@@ -326,8 +321,8 @@ nv50_fb_vram_rblock(struct nouveau_fb *pfb, struct nouveau_ram *ram)
r0 = nv_rd32(pfb, 0x100200);
r4 = nv_rd32(pfb, 0x100204);
rt = nv_rd32(pfb, 0x100250);
- nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt,
- nv_rd32(pfb, 0x001540));
+ nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ r0, r4, rt, nv_rd32(pfb, 0x001540));
colbits = (r4 & 0x0000f000) >> 12;
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
@@ -353,17 +348,17 @@ nv50_fb_vram_rblock(struct nouveau_fb *pfb, struct nouveau_ram *ram)
}
int
-nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- struct nouveau_bios *bios = nouveau_bios(parent);
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvkm_bios *bios = nvkm_bios(parent);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_ram *ram;
int ret;
- ret = nouveau_ram_create_(parent, engine, oclass, length, pobject);
+ ret = nvkm_ram_create_(parent, engine, oclass, length, pobject);
ram = *pobject;
if (ret)
return ret;
@@ -377,7 +372,7 @@ nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
case 0: ram->type = NV_MEM_TYPE_DDR1; break;
case 1:
- if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+ if (nvkm_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
ram->type = NV_MEM_TYPE_DDR3;
else
ram->type = NV_MEM_TYPE_DDR2;
@@ -389,9 +384,9 @@ nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
break;
}
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
- (rsvd_head + rsvd_tail),
- nv50_fb_vram_rblock(pfb, ram) >> 12);
+ ret = nvkm_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
+ (rsvd_head + rsvd_tail),
+ nv50_fb_vram_rblock(pfb, ram) >> 12);
if (ret)
return ret;
@@ -403,9 +398,9 @@ nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
}
static int
-nv50_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 datasize,
- struct nouveau_object **pobject)
+nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 datasize,
+ struct nvkm_object **pobject)
{
struct nv50_ram *ram;
int ret, i;
@@ -459,12 +454,12 @@ nv50_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv50_ram_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_ram_ctor,
- .dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
- .fini = _nouveau_ram_fini,
+ .dtor = _nvkm_ram_dtor,
+ .init = _nvkm_ram_init,
+ .fini = _nvkm_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
index 571077e39071..0f1f97ccd5f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
@@ -1,7 +1,5 @@
#ifndef __NVKM_FBRAM_SEQ_H__
#define __NVKM_FBRAM_SEQ_H__
-
-#include <subdev/bus.h>
#include <subdev/bus/hwsq.h>
#define ram_init(s,p) hwsq_init(&(s)->base, (p))
@@ -14,5 +12,4 @@
#define ram_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
#define ram_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
#define ram_nsec(s,n) hwsq_nsec(&(s)->base, (n))
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
index 0f7fc0c52ab2..1f865f61504e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
@@ -1,5 +1,5 @@
-#ifndef __NOUVEAU_FB_REGS_04_H__
-#define __NOUVEAU_FB_REGS_04_H__
+#ifndef __NVKM_FB_REGS_04_H__
+#define __NVKM_FB_REGS_04_H__
#define NV04_PFB_BOOT_0 0x00100000
# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
@@ -17,5 +17,6 @@
# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
+#define NV04_PFB_CFG0 0x00100200
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index 252575f3aa29..afab42df28d4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -22,7 +22,6 @@
* Authors: Roy Spliet <rspliet@eclipso.eu>
* Ben Skeggs
*/
-
#include "priv.h"
struct ramxlat {
@@ -58,7 +57,7 @@ ramddr2_wr[] = {
};
int
-nouveau_sddr2_calc(struct nouveau_ram *ram)
+nvkm_sddr2_calc(struct nvkm_ram *ram)
{
int CL, WR, DLL = 0, ODT = 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index a2dca4869e52..10844355c3f3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
* Roy Spliet <rspliet@eclipso.eu>
*/
-
#include "priv.h"
struct ramxlat {
@@ -67,7 +66,7 @@ ramddr3_cwl[] = {
};
int
-nouveau_sddr3_calc(struct nouveau_ram *ram)
+nvkm_sddr3_calc(struct nvkm_ram *ram)
{
int CWL, CL, WR, DLL = 0, ODT = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild
new file mode 100644
index 000000000000..f3d4e6e131b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild
@@ -0,0 +1,4 @@
+nvkm-y += nvkm/subdev/fuse/base.o
+nvkm-y += nvkm/subdev/fuse/nv50.o
+nvkm-y += nvkm/subdev/fuse/gf100.o
+nvkm-y += nvkm/subdev/fuse/gm107.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index 9e8e92127715..b7b7193bbce7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -21,34 +21,31 @@
*
* Authors: Martin Peres
*/
-
#include <subdev/fuse.h>
int
-_nouveau_fuse_init(struct nouveau_object *object)
+_nvkm_fuse_init(struct nvkm_object *object)
{
- struct nouveau_fuse *fuse = (void *)object;
- return nouveau_subdev_init(&fuse->base);
+ struct nvkm_fuse *fuse = (void *)object;
+ return nvkm_subdev_init(&fuse->base);
}
void
-_nouveau_fuse_dtor(struct nouveau_object *object)
+_nvkm_fuse_dtor(struct nvkm_object *object)
{
- struct nouveau_fuse *fuse = (void *)object;
- nouveau_subdev_destroy(&fuse->base);
+ struct nvkm_fuse *fuse = (void *)object;
+ nvkm_subdev_destroy(&fuse->base);
}
int
-nouveau_fuse_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+nvkm_fuse_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_fuse *fuse;
+ struct nvkm_fuse *fuse;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "FUSE",
- "fuse", length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "FUSE",
+ "fuse", length, pobject);
fuse = *pobject;
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 5ed03f54b3d4..393ef3a0faaf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -21,63 +21,58 @@
*
* Authors: Martin Peres
*/
-
#include "priv.h"
struct gf100_fuse_priv {
- struct nouveau_fuse base;
+ struct nvkm_fuse base;
spinlock_t fuse_enable_lock;
};
static u32
-gf100_fuse_rd32(struct nouveau_object *object, u64 addr)
+gf100_fuse_rd32(struct nvkm_object *object, u64 addr)
{
struct gf100_fuse_priv *priv = (void *)object;
unsigned long flags;
u32 fuse_enable, unk, val;
+ /* racy if another part of nvkm start writing to these regs */
spin_lock_irqsave(&priv->fuse_enable_lock, flags);
-
- /* racy if another part of nouveau start writing to these regs */
fuse_enable = nv_mask(priv, 0x22400, 0x800, 0x800);
unk = nv_mask(priv, 0x21000, 0x1, 0x1);
val = nv_rd32(priv, 0x21100 + addr);
nv_wr32(priv, 0x21000, unk);
nv_wr32(priv, 0x22400, fuse_enable);
-
spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
-
return val;
}
static int
-gf100_fuse_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct gf100_fuse_priv *priv;
int ret;
- ret = nouveau_fuse_create(parent, engine, oclass, &priv);
+ ret = nvkm_fuse_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
spin_lock_init(&priv->fuse_enable_lock);
-
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gf100_fuse_oclass = {
.handle = NV_SUBDEV(FUSE, 0xC0),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_fuse_ctor,
- .dtor = _nouveau_fuse_dtor,
- .init = _nouveau_fuse_init,
- .fini = _nouveau_fuse_fini,
+ .dtor = _nvkm_fuse_dtor,
+ .init = _nvkm_fuse_init,
+ .fini = _nvkm_fuse_fini,
.rd32 = gf100_fuse_rd32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 4f1a636c6538..ba19158a5912 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -21,31 +21,29 @@
*
* Authors: Martin Peres
*/
-
#include "priv.h"
struct gm107_fuse_priv {
- struct nouveau_fuse base;
+ struct nvkm_fuse base;
};
static u32
-gm107_fuse_rd32(struct nouveau_object *object, u64 addr)
+gm107_fuse_rd32(struct nvkm_object *object, u64 addr)
{
struct gf100_fuse_priv *priv = (void *)object;
-
return nv_rd32(priv, 0x21100 + addr);
}
static int
-gm107_fuse_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gm107_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct gm107_fuse_priv *priv;
int ret;
- ret = nouveau_fuse_create(parent, engine, oclass, &priv);
+ ret = nvkm_fuse_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -53,14 +51,14 @@ gm107_fuse_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gm107_fuse_oclass = {
.handle = NV_SUBDEV(FUSE, 0x117),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gm107_fuse_ctor,
- .dtor = _nouveau_fuse_dtor,
- .init = _nouveau_fuse_init,
- .fini = _nouveau_fuse_fini,
+ .dtor = _nvkm_fuse_dtor,
+ .init = _nvkm_fuse_init,
+ .fini = _nvkm_fuse_fini,
.rd32 = gm107_fuse_rd32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/g80.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index a374ade485be..0d2afc426100 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fuse/g80.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -21,61 +21,56 @@
*
* Authors: Martin Peres
*/
-
#include "priv.h"
-struct g80_fuse_priv {
- struct nouveau_fuse base;
+struct nv50_fuse_priv {
+ struct nvkm_fuse base;
spinlock_t fuse_enable_lock;
};
static u32
-g80_fuse_rd32(struct nouveau_object *object, u64 addr)
+nv50_fuse_rd32(struct nvkm_object *object, u64 addr)
{
- struct g80_fuse_priv *priv = (void *)object;
+ struct nv50_fuse_priv *priv = (void *)object;
unsigned long flags;
u32 fuse_enable, val;
+ /* racy if another part of nvkm start writing to this reg */
spin_lock_irqsave(&priv->fuse_enable_lock, flags);
-
- /* racy if another part of nouveau start writing to this reg */
fuse_enable = nv_mask(priv, 0x1084, 0x800, 0x800);
val = nv_rd32(priv, 0x21000 + addr);
nv_wr32(priv, 0x1084, fuse_enable);
-
spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
-
return val;
}
static int
-g80_fuse_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct g80_fuse_priv *priv;
+ struct nv50_fuse_priv *priv;
int ret;
- ret = nouveau_fuse_create(parent, engine, oclass, &priv);
+ ret = nvkm_fuse_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
spin_lock_init(&priv->fuse_enable_lock);
-
return 0;
}
-struct nouveau_oclass
-g80_fuse_oclass = {
+struct nvkm_oclass
+nv50_fuse_oclass = {
.handle = NV_SUBDEV(FUSE, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = g80_fuse_ctor,
- .dtor = _nouveau_fuse_dtor,
- .init = _nouveau_fuse_init,
- .fini = _nouveau_fuse_fini,
- .rd32 = g80_fuse_rd32,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_fuse_ctor,
+ .dtor = _nvkm_fuse_dtor,
+ .init = _nvkm_fuse_init,
+ .fini = _nvkm_fuse_fini,
+ .rd32 = nv50_fuse_rd32,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
new file mode 100644
index 000000000000..7e050f789384
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -0,0 +1,7 @@
+#ifndef __NVKM_FUSE_PRIV_H__
+#define __NVKM_FUSE_PRIV_H__
+#include <subdev/fuse.h>
+
+int _nvkm_fuse_init(struct nvkm_object *object);
+void _nvkm_fuse_dtor(struct nvkm_object *object);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
new file mode 100644
index 000000000000..ea42a9ed1821
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -0,0 +1,6 @@
+nvkm-y += nvkm/subdev/gpio/base.o
+nvkm-y += nvkm/subdev/gpio/nv10.o
+nvkm-y += nvkm/subdev/gpio/nv50.o
+nvkm-y += nvkm/subdev/gpio/g94.o
+nvkm-y += nvkm/subdev/gpio/gf110.o
+nvkm-y += nvkm/subdev/gpio/gk104.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index 7ad99b763f4c..dea58161ba46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -21,32 +21,30 @@
*
* Authors: Ben Skeggs
*/
-
-#include <subdev/bios.h>
-#include <subdev/bios/gpio.h>
-
#include "priv.h"
+#include <core/device.h>
+#include <core/notify.h>
+
static int
-nouveau_gpio_drive(struct nouveau_gpio *gpio,
- int idx, int line, int dir, int out)
+nvkm_gpio_drive(struct nvkm_gpio *gpio, int idx, int line, int dir, int out)
{
- const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
return impl->drive ? impl->drive(gpio, line, dir, out) : -ENODEV;
}
static int
-nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line)
+nvkm_gpio_sense(struct nvkm_gpio *gpio, int idx, int line)
{
- const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
return impl->sense ? impl->sense(gpio, line) : -ENODEV;
}
static int
-nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
- struct dcb_gpio_func *func)
+nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
+ struct dcb_gpio_func *func)
{
- struct nouveau_bios *bios = nouveau_bios(gpio);
+ struct nvkm_bios *bios = nvkm_bios(gpio);
u8 ver, len;
u16 data;
@@ -74,30 +72,30 @@ nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
}
static int
-nouveau_gpio_set(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, int state)
+nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
{
struct dcb_gpio_func func;
int ret;
- ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
+ ret = nvkm_gpio_find(gpio, idx, tag, line, &func);
if (ret == 0) {
int dir = !!(func.log[state] & 0x02);
int out = !!(func.log[state] & 0x01);
- ret = nouveau_gpio_drive(gpio, idx, func.line, dir, out);
+ ret = nvkm_gpio_drive(gpio, idx, func.line, dir, out);
}
return ret;
}
static int
-nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
+nvkm_gpio_get(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line)
{
struct dcb_gpio_func func;
int ret;
- ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
+ ret = nvkm_gpio_find(gpio, idx, tag, line, &func);
if (ret == 0) {
- ret = nouveau_gpio_sense(gpio, idx, func.line);
+ ret = nvkm_gpio_sense(gpio, idx, func.line);
if (ret >= 0)
ret = (ret == (func.log[1] & 1));
}
@@ -106,24 +104,24 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
}
static void
-nouveau_gpio_intr_fini(struct nvkm_event *event, int type, int index)
+nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event);
- const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
+ const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
impl->intr_mask(gpio, type, 1 << index, 0);
}
static void
-nouveau_gpio_intr_init(struct nvkm_event *event, int type, int index)
+nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event);
- const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
+ const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
impl->intr_mask(gpio, type, 1 << index, 1 << index);
}
static int
-nouveau_gpio_intr_ctor(struct nouveau_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+nvkm_gpio_intr_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
struct nvkm_gpio_ntfy_req *req = data;
if (!WARN_ON(size != sizeof(*req))) {
@@ -136,10 +134,10 @@ nouveau_gpio_intr_ctor(struct nouveau_object *object, void *data, u32 size,
}
static void
-nouveau_gpio_intr(struct nouveau_subdev *subdev)
+nvkm_gpio_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_gpio *gpio = nouveau_gpio(subdev);
- const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+ const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
u32 hi, lo, i;
impl->intr_stat(gpio, &hi, &lo);
@@ -154,23 +152,23 @@ nouveau_gpio_intr(struct nouveau_subdev *subdev)
}
static const struct nvkm_event_func
-nouveau_gpio_intr_func = {
- .ctor = nouveau_gpio_intr_ctor,
- .init = nouveau_gpio_intr_init,
- .fini = nouveau_gpio_intr_fini,
+nvkm_gpio_intr_func = {
+ .ctor = nvkm_gpio_intr_ctor,
+ .init = nvkm_gpio_intr_init,
+ .fini = nvkm_gpio_intr_fini,
};
int
-_nouveau_gpio_fini(struct nouveau_object *object, bool suspend)
+_nvkm_gpio_fini(struct nvkm_object *object, bool suspend)
{
- const struct nouveau_gpio_impl *impl = (void *)object->oclass;
- struct nouveau_gpio *gpio = nouveau_gpio(object);
+ const struct nvkm_gpio_impl *impl = (void *)object->oclass;
+ struct nvkm_gpio *gpio = nvkm_gpio(object);
u32 mask = (1 << impl->lines) - 1;
impl->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
impl->intr_stat(gpio, &mask, &mask);
- return nouveau_subdev_fini(&gpio->base, suspend);
+ return nvkm_subdev_fini(&gpio->base, suspend);
}
static struct dmi_system_id gpio_reset_ids[] = {
@@ -185,12 +183,12 @@ static struct dmi_system_id gpio_reset_ids[] = {
};
int
-_nouveau_gpio_init(struct nouveau_object *object)
+_nvkm_gpio_init(struct nvkm_object *object)
{
- struct nouveau_gpio *gpio = nouveau_gpio(object);
+ struct nvkm_gpio *gpio = nvkm_gpio(object);
int ret;
- ret = nouveau_subdev_init(&gpio->base);
+ ret = nvkm_subdev_init(&gpio->base);
if (ret)
return ret;
@@ -201,52 +199,50 @@ _nouveau_gpio_init(struct nouveau_object *object)
}
void
-_nouveau_gpio_dtor(struct nouveau_object *object)
+_nvkm_gpio_dtor(struct nvkm_object *object)
{
- struct nouveau_gpio *gpio = (void *)object;
+ struct nvkm_gpio *gpio = (void *)object;
nvkm_event_fini(&gpio->event);
- nouveau_subdev_destroy(&gpio->base);
+ nvkm_subdev_destroy(&gpio->base);
}
int
-nouveau_gpio_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int length, void **pobject)
+nvkm_gpio_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- const struct nouveau_gpio_impl *impl = (void *)oclass;
- struct nouveau_gpio *gpio;
+ const struct nvkm_gpio_impl *impl = (void *)oclass;
+ struct nvkm_gpio *gpio;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "GPIO", "gpio",
- length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "GPIO",
+ "gpio", length, pobject);
gpio = *pobject;
if (ret)
return ret;
- gpio->find = nouveau_gpio_find;
- gpio->set = nouveau_gpio_set;
- gpio->get = nouveau_gpio_get;
+ gpio->find = nvkm_gpio_find;
+ gpio->set = nvkm_gpio_set;
+ gpio->get = nvkm_gpio_get;
gpio->reset = impl->reset;
- ret = nvkm_event_init(&nouveau_gpio_intr_func, 2, impl->lines,
+ ret = nvkm_event_init(&nvkm_gpio_intr_func, 2, impl->lines,
&gpio->event);
if (ret)
return ret;
- nv_subdev(gpio)->intr = nouveau_gpio_intr;
+ nv_subdev(gpio)->intr = nvkm_gpio_intr;
return 0;
}
int
-_nouveau_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+_nvkm_gpio_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_gpio *gpio;
+ struct nvkm_gpio *gpio;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &gpio);
+ ret = nvkm_gpio_create(parent, engine, oclass, &gpio);
*pobject = nv_object(gpio);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index cae404ccadac..12b3e01fca8e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -21,11 +21,10 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
void
-nv94_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
+g94_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
u32 intr0 = nv_rd32(gpio, 0x00e054);
u32 intr1 = nv_rd32(gpio, 0x00e074);
@@ -38,7 +37,7 @@ nv94_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
}
void
-nv94_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
+g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
u32 inte0 = nv_rd32(gpio, 0x00e050);
u32 inte1 = nv_rd32(gpio, 0x00e070);
@@ -56,18 +55,18 @@ nv94_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
nv_wr32(gpio, 0x00e070, inte1);
}
-struct nouveau_oclass *
-nv94_gpio_oclass = &(struct nouveau_gpio_impl) {
+struct nvkm_oclass *
+g94_gpio_oclass = &(struct nvkm_gpio_impl) {
.base.handle = NV_SUBDEV(GPIO, 0x94),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_gpio_ctor,
- .dtor = _nouveau_gpio_dtor,
- .init = _nouveau_gpio_init,
- .fini = _nouveau_gpio_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_gpio_ctor,
+ .dtor = _nvkm_gpio_dtor,
+ .init = _nvkm_gpio_init,
+ .fini = _nvkm_gpio_fini,
},
.lines = 32,
- .intr_stat = nv94_gpio_intr_stat,
- .intr_mask = nv94_gpio_intr_mask,
+ .intr_stat = g94_gpio_intr_stat,
+ .intr_mask = g94_gpio_intr_mask,
.drive = nv50_gpio_drive,
.sense = nv50_gpio_sense,
.reset = nv50_gpio_reset,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
index 480d6d2af770..2c3bb255d1f8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
void
-nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
+gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match)
{
- struct nouveau_bios *bios = nouveau_bios(gpio);
+ struct nvkm_bios *bios = nvkm_bios(gpio);
u8 ver, len;
u16 entry;
int ent = -1;
@@ -53,7 +52,7 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
}
int
-nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+gf110_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
u32 data = ((dir ^ 1) << 13) | (out << 12);
nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
@@ -62,24 +61,24 @@ nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
}
int
-nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
+gf110_gpio_sense(struct nvkm_gpio *gpio, int line)
{
return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
}
-struct nouveau_oclass *
-nvd0_gpio_oclass = &(struct nouveau_gpio_impl) {
+struct nvkm_oclass *
+gf110_gpio_oclass = &(struct nvkm_gpio_impl) {
.base.handle = NV_SUBDEV(GPIO, 0xd0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_gpio_ctor,
- .dtor = _nouveau_gpio_dtor,
- .init = _nouveau_gpio_init,
- .fini = _nouveau_gpio_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_gpio_ctor,
+ .dtor = _nvkm_gpio_dtor,
+ .init = _nvkm_gpio_init,
+ .fini = _nvkm_gpio_fini,
},
.lines = 32,
- .intr_stat = nv94_gpio_intr_stat,
- .intr_mask = nv94_gpio_intr_mask,
- .drive = nvd0_gpio_drive,
- .sense = nvd0_gpio_sense,
- .reset = nvd0_gpio_reset,
+ .intr_stat = g94_gpio_intr_stat,
+ .intr_mask = g94_gpio_intr_mask,
+ .drive = gf110_gpio_drive,
+ .sense = gf110_gpio_sense,
+ .reset = gf110_gpio_reset,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index e1145b48c76c..42fd2faaaa4f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -21,11 +21,10 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
static void
-nve0_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
+gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
u32 intr0 = nv_rd32(gpio, 0x00dc00);
u32 intr1 = nv_rd32(gpio, 0x00dc80);
@@ -38,7 +37,7 @@ nve0_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
}
void
-nve0_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
+gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
u32 inte0 = nv_rd32(gpio, 0x00dc08);
u32 inte1 = nv_rd32(gpio, 0x00dc88);
@@ -56,19 +55,19 @@ nve0_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
nv_wr32(gpio, 0x00dc88, inte1);
}
-struct nouveau_oclass *
-nve0_gpio_oclass = &(struct nouveau_gpio_impl) {
+struct nvkm_oclass *
+gk104_gpio_oclass = &(struct nvkm_gpio_impl) {
.base.handle = NV_SUBDEV(GPIO, 0xe0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_gpio_ctor,
- .dtor = _nouveau_gpio_dtor,
- .init = _nouveau_gpio_init,
- .fini = _nouveau_gpio_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_gpio_ctor,
+ .dtor = _nvkm_gpio_dtor,
+ .init = _nvkm_gpio_init,
+ .fini = _nvkm_gpio_fini,
},
.lines = 32,
- .intr_stat = nve0_gpio_intr_stat,
- .intr_mask = nve0_gpio_intr_mask,
- .drive = nvd0_gpio_drive,
- .sense = nvd0_gpio_sense,
- .reset = nvd0_gpio_reset,
+ .intr_stat = gk104_gpio_intr_stat,
+ .intr_mask = gk104_gpio_intr_mask,
+ .drive = gf110_gpio_drive,
+ .sense = gf110_gpio_sense,
+ .reset = gf110_gpio_reset,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index 27ad23eaf185..2b295154247e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -23,11 +23,10 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "priv.h"
static int
-nv10_gpio_sense(struct nouveau_gpio *gpio, int line)
+nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
{
if (line < 2) {
line = line * 16;
@@ -49,7 +48,7 @@ nv10_gpio_sense(struct nouveau_gpio *gpio, int line)
}
static int
-nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
u32 reg, mask, data;
@@ -79,7 +78,7 @@ nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
}
static void
-nv10_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
+nv10_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
u32 intr = nv_rd32(gpio, 0x001104);
u32 stat = nv_rd32(gpio, 0x001144) & intr;
@@ -89,7 +88,7 @@ nv10_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
}
static void
-nv10_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
+nv10_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
u32 inte = nv_rd32(gpio, 0x001144);
if (type & NVKM_GPIO_LO)
@@ -99,14 +98,14 @@ nv10_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
nv_wr32(gpio, 0x001144, inte);
}
-struct nouveau_oclass *
-nv10_gpio_oclass = &(struct nouveau_gpio_impl) {
+struct nvkm_oclass *
+nv10_gpio_oclass = &(struct nvkm_gpio_impl) {
.base.handle = NV_SUBDEV(GPIO, 0x10),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_gpio_ctor,
- .dtor = _nouveau_gpio_dtor,
- .init = _nouveau_gpio_init,
- .fini = _nouveau_gpio_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_gpio_ctor,
+ .dtor = _nvkm_gpio_dtor,
+ .init = _nvkm_gpio_init,
+ .fini = _nvkm_gpio_fini,
},
.lines = 16,
.intr_stat = nv10_gpio_intr_stat,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 2e30d5a62d6e..6a031035bd27 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
void
-nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
+nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
{
- struct nouveau_bios *bios = nouveau_bios(gpio);
+ struct nvkm_bios *bios = nvkm_bios(gpio);
u8 ver, len;
u16 entry;
int ent = -1;
@@ -68,7 +67,7 @@ nv50_gpio_location(int line, u32 *reg, u32 *shift)
}
int
-nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+nv50_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
u32 reg, shift;
@@ -80,7 +79,7 @@ nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
}
int
-nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
+nv50_gpio_sense(struct nvkm_gpio *gpio, int line)
{
u32 reg, shift;
@@ -91,7 +90,7 @@ nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
}
static void
-nv50_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
+nv50_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
u32 intr = nv_rd32(gpio, 0x00e054);
u32 stat = nv_rd32(gpio, 0x00e050) & intr;
@@ -101,7 +100,7 @@ nv50_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
}
static void
-nv50_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
+nv50_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
u32 inte = nv_rd32(gpio, 0x00e050);
if (type & NVKM_GPIO_LO)
@@ -111,14 +110,14 @@ nv50_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
nv_wr32(gpio, 0x00e050, inte);
}
-struct nouveau_oclass *
-nv50_gpio_oclass = &(struct nouveau_gpio_impl) {
+struct nvkm_oclass *
+nv50_gpio_oclass = &(struct nvkm_gpio_impl) {
.base.handle = NV_SUBDEV(GPIO, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_gpio_ctor,
- .dtor = _nouveau_gpio_dtor,
- .init = _nouveau_gpio_init,
- .fini = _nouveau_gpio_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_gpio_ctor,
+ .dtor = _nvkm_gpio_dtor,
+ .init = _nvkm_gpio_init,
+ .fini = _nvkm_gpio_fini,
},
.lines = 16,
.intr_stat = nv50_gpio_intr_stat,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
new file mode 100644
index 000000000000..382f8d44e140
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -0,0 +1,64 @@
+#ifndef __NVKM_GPIO_PRIV_H__
+#define __NVKM_GPIO_PRIV_H__
+#include <subdev/gpio.h>
+
+#define nvkm_gpio_create(p,e,o,d) \
+ nvkm_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_gpio_destroy(p) ({ \
+ struct nvkm_gpio *gpio = (p); \
+ _nvkm_gpio_dtor(nv_object(gpio)); \
+})
+#define nvkm_gpio_init(p) ({ \
+ struct nvkm_gpio *gpio = (p); \
+ _nvkm_gpio_init(nv_object(gpio)); \
+})
+#define nvkm_gpio_fini(p,s) ({ \
+ struct nvkm_gpio *gpio = (p); \
+ _nvkm_gpio_fini(nv_object(gpio), (s)); \
+})
+
+int nvkm_gpio_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+int _nvkm_gpio_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void _nvkm_gpio_dtor(struct nvkm_object *);
+int _nvkm_gpio_init(struct nvkm_object *);
+int _nvkm_gpio_fini(struct nvkm_object *, bool);
+
+struct nvkm_gpio_impl {
+ struct nvkm_oclass base;
+ int lines;
+
+ /* read and ack pending interrupts, returning only data
+ * for lines that have not been masked off, while still
+ * performing the ack for anything that was pending.
+ */
+ void (*intr_stat)(struct nvkm_gpio *, u32 *, u32 *);
+
+ /* mask on/off interrupts for hi/lo transitions on a
+ * given set of gpio lines
+ */
+ void (*intr_mask)(struct nvkm_gpio *, u32, u32, u32);
+
+ /* configure gpio direction and output value */
+ int (*drive)(struct nvkm_gpio *, int line, int dir, int out);
+
+ /* sense current state of given gpio line */
+ int (*sense)(struct nvkm_gpio *, int line);
+
+ /*XXX*/
+ void (*reset)(struct nvkm_gpio *, u8);
+};
+
+void nv50_gpio_reset(struct nvkm_gpio *, u8);
+int nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
+int nv50_gpio_sense(struct nvkm_gpio *, int);
+
+void g94_gpio_intr_stat(struct nvkm_gpio *, u32 *, u32 *);
+void g94_gpio_intr_mask(struct nvkm_gpio *, u32, u32, u32);
+
+void gf110_gpio_reset(struct nvkm_gpio *, u8);
+int gf110_gpio_drive(struct nvkm_gpio *, int, int, int);
+int gf110_gpio_sense(struct nvkm_gpio *, int);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
new file mode 100644
index 000000000000..d68307409980
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -0,0 +1,16 @@
+nvkm-y += nvkm/subdev/i2c/base.o
+nvkm-y += nvkm/subdev/i2c/anx9805.o
+nvkm-y += nvkm/subdev/i2c/aux.o
+nvkm-y += nvkm/subdev/i2c/bit.o
+nvkm-y += nvkm/subdev/i2c/pad.o
+nvkm-y += nvkm/subdev/i2c/padnv04.o
+nvkm-y += nvkm/subdev/i2c/padg94.o
+nvkm-y += nvkm/subdev/i2c/padgm204.o
+nvkm-y += nvkm/subdev/i2c/nv04.o
+nvkm-y += nvkm/subdev/i2c/nv4e.o
+nvkm-y += nvkm/subdev/i2c/nv50.o
+nvkm-y += nvkm/subdev/i2c/g94.o
+nvkm-y += nvkm/subdev/i2c/gf110.o
+nvkm-y += nvkm/subdev/i2c/gf117.o
+nvkm-y += nvkm/subdev/i2c/gk104.o
+nvkm-y += nvkm/subdev/i2c/gm204.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index 2c2731a6cf91..d17dd1cf3c34 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include "port.h"
struct anx9805_i2c_port {
- struct nouveau_i2c_port base;
+ struct nvkm_i2c_port base;
u32 addr;
u32 ctrl;
};
static int
-anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
+anx9805_train(struct nvkm_i2c_port *port, int link_nr, int link_bw, bool enh)
{
struct anx9805_i2c_port *chan = (void *)port;
- struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+ struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
u8 tmp, i;
DBG("ANX9805 train %d 0x%02x %d\n", link_nr, link_bw, enh);
@@ -62,11 +61,11 @@ anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
}
static int
-anx9805_aux(struct nouveau_i2c_port *port, bool retry,
+anx9805_aux(struct nvkm_i2c_port *port, bool retry,
u8 type, u32 addr, u8 *data, u8 size)
{
struct anx9805_i2c_port *chan = (void *)port;
- struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+ struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
int i, ret = -ETIMEDOUT;
u8 buf[16] = {};
u8 tmp;
@@ -116,25 +115,25 @@ done:
return ret;
}
-static const struct nouveau_i2c_func
+static const struct nvkm_i2c_func
anx9805_aux_func = {
.aux = anx9805_aux,
.lnk_ctl = anx9805_train,
};
static int
-anx9805_aux_chan_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+anx9805_aux_chan_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
- struct nouveau_i2c_port *mast = (void *)parent;
+ struct nvkm_i2c_port *mast = (void *)parent;
struct anx9805_i2c_port *chan;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_aux_algo, &anx9805_aux_func,
- &chan);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &nvkm_i2c_aux_algo, &anx9805_aux_func,
+ &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -156,22 +155,23 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent,
struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
algo->udelay = max(algo->udelay, 40);
}
+
return 0;
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
anx9805_aux_ofuncs = {
.ctor = anx9805_aux_chan_ctor,
- .dtor = _nouveau_i2c_port_dtor,
- .init = _nouveau_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .dtor = _nvkm_i2c_port_dtor,
+ .init = _nvkm_i2c_port_init,
+ .fini = _nvkm_i2c_port_fini,
};
static int
anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct anx9805_i2c_port *port = adap->algo_data;
- struct nouveau_i2c_port *mast = (void *)nv_object(port)->parent;
+ struct nvkm_i2c_port *mast = (void *)nv_object(port)->parent;
struct i2c_msg *msg = msgs;
int ret = -ETIMEDOUT;
int i, j, cnt = num;
@@ -233,23 +233,22 @@ anx9805_i2c_algo = {
.functionality = anx9805_func
};
-static const struct nouveau_i2c_func
+static const struct nvkm_i2c_func
anx9805_i2c_func = {
};
static int
-anx9805_ddc_port_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+anx9805_ddc_port_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
- struct nouveau_i2c_port *mast = (void *)parent;
+ struct nvkm_i2c_port *mast = (void *)parent;
struct anx9805_i2c_port *port;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &anx9805_i2c_algo, &anx9805_i2c_func,
- &port);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &anx9805_i2c_algo, &anx9805_i2c_func, &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -271,19 +270,20 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent,
struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
algo->udelay = max(algo->udelay, 40);
}
+
return 0;
}
-static struct nouveau_ofuncs
+static struct nvkm_ofuncs
anx9805_ddc_ofuncs = {
.ctor = anx9805_ddc_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
- .init = _nouveau_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .dtor = _nvkm_i2c_port_dtor,
+ .init = _nvkm_i2c_port_init,
+ .fini = _nvkm_i2c_port_fini,
};
-struct nouveau_oclass
-nouveau_anx9805_sclass[] = {
+struct nvkm_oclass
+nvkm_anx9805_sclass[] = {
{ .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
{ .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
{ .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 02eb42be2e9e..1c18860f80d1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
int
-nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
+nv_rdaux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
{
- struct nouveau_i2c *i2c = nouveau_i2c(port);
+ struct nvkm_i2c *i2c = nvkm_i2c(port);
if (port->func->aux) {
int ret = i2c->acquire(port, 0);
if (ret == 0) {
@@ -40,9 +39,9 @@ nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
}
int
-nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
+nv_wraux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
{
- struct nouveau_i2c *i2c = nouveau_i2c(port);
+ struct nvkm_i2c *i2c = nvkm_i2c(port);
if (port->func->aux) {
int ret = i2c->acquire(port, 0);
if (ret == 0) {
@@ -57,8 +56,8 @@ nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
static int
aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_port *port = adap->algo_data;
- struct nouveau_i2c *i2c = nouveau_i2c(port);
+ struct nvkm_i2c_port *port = adap->algo_data;
+ struct nvkm_i2c *i2c = nvkm_i2c(port);
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
@@ -108,7 +107,7 @@ aux_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-const struct i2c_algorithm nouveau_i2c_aux_algo = {
+const struct i2c_algorithm nvkm_i2c_aux_algo = {
.master_xfer = aux_xfer,
.functionality = aux_func
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 0dc605db7ec8..9200f122c02c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -21,18 +21,14 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
+#include "pad.h"
+#include <core/device.h>
+#include <core/notify.h>
#include <core/option.h>
-#include <core/object.h>
-#include <core/event.h>
-
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
-#include <subdev/bios/i2c.h>
-#include <subdev/vga.h>
-
-#include "priv.h"
-#include "pad.h"
/******************************************************************************
* interface to linux i2c bit-banging algorithm
@@ -45,46 +41,46 @@
#endif
static int
-nouveau_i2c_pre_xfer(struct i2c_adapter *adap)
+nvkm_i2c_pre_xfer(struct i2c_adapter *adap)
{
struct i2c_algo_bit_data *bit = adap->algo_data;
- struct nouveau_i2c_port *port = bit->data;
- return nouveau_i2c(port)->acquire(port, bit->timeout);
+ struct nvkm_i2c_port *port = bit->data;
+ return nvkm_i2c(port)->acquire(port, bit->timeout);
}
static void
-nouveau_i2c_post_xfer(struct i2c_adapter *adap)
+nvkm_i2c_post_xfer(struct i2c_adapter *adap)
{
struct i2c_algo_bit_data *bit = adap->algo_data;
- struct nouveau_i2c_port *port = bit->data;
- return nouveau_i2c(port)->release(port);
+ struct nvkm_i2c_port *port = bit->data;
+ return nvkm_i2c(port)->release(port);
}
static void
-nouveau_i2c_setscl(void *data, int state)
+nvkm_i2c_setscl(void *data, int state)
{
- struct nouveau_i2c_port *port = data;
+ struct nvkm_i2c_port *port = data;
port->func->drive_scl(port, state);
}
static void
-nouveau_i2c_setsda(void *data, int state)
+nvkm_i2c_setsda(void *data, int state)
{
- struct nouveau_i2c_port *port = data;
+ struct nvkm_i2c_port *port = data;
port->func->drive_sda(port, state);
}
static int
-nouveau_i2c_getscl(void *data)
+nvkm_i2c_getscl(void *data)
{
- struct nouveau_i2c_port *port = data;
+ struct nvkm_i2c_port *port = data;
return port->func->sense_scl(port);
}
static int
-nouveau_i2c_getsda(void *data)
+nvkm_i2c_getsda(void *data)
{
- struct nouveau_i2c_port *port = data;
+ struct nvkm_i2c_port *port = data;
return port->func->sense_sda(port);
}
@@ -93,42 +89,41 @@ nouveau_i2c_getsda(void *data)
*****************************************************************************/
int
-_nouveau_i2c_port_fini(struct nouveau_object *object, bool suspend)
+_nvkm_i2c_port_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_i2c_port *port = (void *)object;
+ struct nvkm_i2c_port *port = (void *)object;
struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
nv_ofuncs(pad)->fini(nv_object(pad), suspend);
- return nouveau_object_fini(&port->base, suspend);
+ return nvkm_object_fini(&port->base, suspend);
}
void
-_nouveau_i2c_port_dtor(struct nouveau_object *object)
+_nvkm_i2c_port_dtor(struct nvkm_object *object)
{
- struct nouveau_i2c_port *port = (void *)object;
+ struct nvkm_i2c_port *port = (void *)object;
i2c_del_adapter(&port->adapter);
- nouveau_object_destroy(&port->base);
+ nvkm_object_destroy(&port->base);
}
int
-nouveau_i2c_port_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u8 index,
- const struct i2c_algorithm *algo,
- const struct nouveau_i2c_func *func,
- int size, void **pobject)
+nvkm_i2c_port_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u8 index,
+ const struct i2c_algorithm *algo,
+ const struct nvkm_i2c_func *func,
+ int size, void **pobject)
{
- struct nouveau_device *device = nv_device(engine);
- struct nouveau_i2c *i2c = (void *)engine;
- struct nouveau_i2c_port *port;
+ struct nvkm_device *device = nv_device(parent);
+ struct nvkm_i2c *i2c = nvkm_i2c(parent);
+ struct nvkm_i2c_port *port;
int ret;
- ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
+ ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
port = *pobject;
if (ret)
return ret;
snprintf(port->adapter.name, sizeof(port->adapter.name),
- "nouveau-%s-%d", device->name, index);
+ "nvkm-%s-%d", device->name, index);
port->adapter.owner = THIS_MODULE;
port->adapter.dev.parent = nv_device_base(device);
port->index = index;
@@ -136,8 +131,8 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
port->func = func;
mutex_init(&port->mutex);
- if ( algo == &nouveau_i2c_bit_algo &&
- !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
+ if ( algo == &nvkm_i2c_bit_algo &&
+ !nvkm_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
struct i2c_algo_bit_data *bit;
bit = kzalloc(sizeof(*bit), GFP_KERNEL);
@@ -147,12 +142,12 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
bit->udelay = 10;
bit->timeout = usecs_to_jiffies(2200);
bit->data = port;
- bit->pre_xfer = nouveau_i2c_pre_xfer;
- bit->post_xfer = nouveau_i2c_post_xfer;
- bit->setsda = nouveau_i2c_setsda;
- bit->setscl = nouveau_i2c_setscl;
- bit->getsda = nouveau_i2c_getsda;
- bit->getscl = nouveau_i2c_getscl;
+ bit->pre_xfer = nvkm_i2c_pre_xfer;
+ bit->post_xfer = nvkm_i2c_post_xfer;
+ bit->setsda = nvkm_i2c_setsda;
+ bit->setscl = nvkm_i2c_setscl;
+ bit->getsda = nvkm_i2c_getsda;
+ bit->getscl = nvkm_i2c_getscl;
port->adapter.algo_data = bit;
ret = i2c_bit_add_bus(&port->adapter);
@@ -171,11 +166,11 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
* base i2c subdev class implementation
*****************************************************************************/
-static struct nouveau_i2c_port *
-nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
+static struct nvkm_i2c_port *
+nvkm_i2c_find(struct nvkm_i2c *i2c, u8 index)
{
- struct nouveau_bios *bios = nouveau_bios(i2c);
- struct nouveau_i2c_port *port;
+ struct nvkm_bios *bios = nvkm_bios(i2c);
+ struct nvkm_i2c_port *port;
if (index == NV_I2C_DEFAULT(0) ||
index == NV_I2C_DEFAULT(1)) {
@@ -200,10 +195,10 @@ nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
return NULL;
}
-static struct nouveau_i2c_port *
-nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
+static struct nvkm_i2c_port *
+nvkm_i2c_find_type(struct nvkm_i2c *i2c, u16 type)
{
- struct nouveau_i2c_port *port;
+ struct nvkm_i2c_port *port;
list_for_each_entry(port, &i2c->ports, head) {
if (nv_hclass(port) == type)
@@ -214,10 +209,10 @@ nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
}
static void
-nouveau_i2c_release_pad(struct nouveau_i2c_port *port)
+nvkm_i2c_release_pad(struct nvkm_i2c_port *port)
{
struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
- struct nouveau_i2c *i2c = nouveau_i2c(port);
+ struct nvkm_i2c *i2c = nvkm_i2c(port);
if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
nv_ofuncs(pad)->fini(nv_object(pad), false);
@@ -226,18 +221,18 @@ nouveau_i2c_release_pad(struct nouveau_i2c_port *port)
}
static int
-nouveau_i2c_try_acquire_pad(struct nouveau_i2c_port *port)
+nvkm_i2c_try_acquire_pad(struct nvkm_i2c_port *port)
{
struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
- struct nouveau_object *owner = (void *)pad->port;
+ struct nvkm_object *owner = (void *)pad->port;
do {
if (owner == (void *)port)
return 0;
owner = owner->parent;
} while(owner);
- nouveau_i2c_release_pad(port);
+ nvkm_i2c_release_pad(port);
return -EBUSY;
}
@@ -247,48 +242,48 @@ nouveau_i2c_try_acquire_pad(struct nouveau_i2c_port *port)
}
static int
-nouveau_i2c_acquire_pad(struct nouveau_i2c_port *port, unsigned long timeout)
+nvkm_i2c_acquire_pad(struct nvkm_i2c_port *port, unsigned long timeout)
{
- struct nouveau_i2c *i2c = nouveau_i2c(port);
+ struct nvkm_i2c *i2c = nvkm_i2c(port);
if (timeout) {
if (wait_event_timeout(i2c->wait,
- nouveau_i2c_try_acquire_pad(port) == 0,
+ nvkm_i2c_try_acquire_pad(port) == 0,
timeout) == 0)
return -EBUSY;
} else {
- wait_event(i2c->wait, nouveau_i2c_try_acquire_pad(port) == 0);
+ wait_event(i2c->wait, nvkm_i2c_try_acquire_pad(port) == 0);
}
return 0;
}
static void
-nouveau_i2c_release(struct nouveau_i2c_port *port)
+nvkm_i2c_release(struct nvkm_i2c_port *port)
__releases(pad->mutex)
{
- nouveau_i2c(port)->release_pad(port);
+ nvkm_i2c(port)->release_pad(port);
mutex_unlock(&port->mutex);
}
static int
-nouveau_i2c_acquire(struct nouveau_i2c_port *port, unsigned long timeout)
+nvkm_i2c_acquire(struct nvkm_i2c_port *port, unsigned long timeout)
__acquires(pad->mutex)
{
int ret;
mutex_lock(&port->mutex);
- if ((ret = nouveau_i2c(port)->acquire_pad(port, timeout)))
+ if ((ret = nvkm_i2c(port)->acquire_pad(port, timeout)))
mutex_unlock(&port->mutex);
return ret;
}
static int
-nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
- struct nouveau_i2c_board_info *info,
- bool (*match)(struct nouveau_i2c_port *,
- struct i2c_board_info *, void *), void *data)
+nvkm_i2c_identify(struct nvkm_i2c *i2c, int index, const char *what,
+ struct nvkm_i2c_board_info *info,
+ bool (*match)(struct nvkm_i2c_port *,
+ struct i2c_board_info *, void *), void *data)
{
- struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
+ struct nvkm_i2c_port *port = nvkm_i2c_find(i2c, index);
int i;
if (!port) {
@@ -327,27 +322,27 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
}
static void
-nouveau_i2c_intr_fini(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event);
- struct nouveau_i2c_port *port = i2c->find(i2c, index);
- const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
+ struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
+ struct nvkm_i2c_port *port = i2c->find(i2c, index);
+ const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
if (port && port->aux >= 0)
impl->aux_mask(i2c, type, 1 << port->aux, 0);
}
static void
-nouveau_i2c_intr_init(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event);
- struct nouveau_i2c_port *port = i2c->find(i2c, index);
- const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
+ struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
+ struct nvkm_i2c_port *port = i2c->find(i2c, index);
+ const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
if (port && port->aux >= 0)
impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
}
static int
-nouveau_i2c_intr_ctor(struct nouveau_object *object, void *data, u32 size,
+nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
{
struct nvkm_i2c_ntfy_req *req = data;
@@ -361,11 +356,11 @@ nouveau_i2c_intr_ctor(struct nouveau_object *object, void *data, u32 size,
}
static void
-nouveau_i2c_intr(struct nouveau_subdev *subdev)
+nvkm_i2c_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_i2c_impl *impl = (void *)nv_oclass(subdev);
- struct nouveau_i2c *i2c = nouveau_i2c(subdev);
- struct nouveau_i2c_port *port;
+ struct nvkm_i2c_impl *impl = (void *)nv_oclass(subdev);
+ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+ struct nvkm_i2c_port *port;
u32 hi, lo, rq, tx, e;
if (impl->aux_stat) {
@@ -393,18 +388,18 @@ nouveau_i2c_intr(struct nouveau_subdev *subdev)
}
static const struct nvkm_event_func
-nouveau_i2c_intr_func = {
- .ctor = nouveau_i2c_intr_ctor,
- .init = nouveau_i2c_intr_init,
- .fini = nouveau_i2c_intr_fini,
+nvkm_i2c_intr_func = {
+ .ctor = nvkm_i2c_intr_ctor,
+ .init = nvkm_i2c_intr_init,
+ .fini = nvkm_i2c_intr_fini,
};
int
-_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
+_nvkm_i2c_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_i2c_impl *impl = (void *)nv_oclass(object);
- struct nouveau_i2c *i2c = (void *)object;
- struct nouveau_i2c_port *port;
+ struct nvkm_i2c_impl *impl = (void *)nv_oclass(object);
+ struct nvkm_i2c *i2c = (void *)object;
+ struct nvkm_i2c_port *port;
u32 mask;
int ret;
@@ -419,7 +414,7 @@ _nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
impl->aux_stat(i2c, &mask, &mask, &mask, &mask);
}
- return nouveau_subdev_fini(&i2c->base, suspend);
+ return nvkm_subdev_fini(&i2c->base, suspend);
fail:
list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
nv_ofuncs(port)->init(nv_object(port));
@@ -429,13 +424,13 @@ fail:
}
int
-_nouveau_i2c_init(struct nouveau_object *object)
+_nvkm_i2c_init(struct nvkm_object *object)
{
- struct nouveau_i2c *i2c = (void *)object;
- struct nouveau_i2c_port *port;
+ struct nvkm_i2c *i2c = (void *)object;
+ struct nvkm_i2c_port *port;
int ret;
- ret = nouveau_subdev_init(&i2c->base);
+ ret = nvkm_subdev_init(&i2c->base);
if (ret == 0) {
list_for_each_entry(port, &i2c->ports, head) {
ret = nv_ofuncs(port)->init(nv_object(port));
@@ -454,33 +449,33 @@ fail:
}
void
-_nouveau_i2c_dtor(struct nouveau_object *object)
+_nvkm_i2c_dtor(struct nvkm_object *object)
{
- struct nouveau_i2c *i2c = (void *)object;
- struct nouveau_i2c_port *port, *temp;
+ struct nvkm_i2c *i2c = (void *)object;
+ struct nvkm_i2c_port *port, *temp;
nvkm_event_fini(&i2c->event);
list_for_each_entry_safe(port, temp, &i2c->ports, head) {
- nouveau_object_ref(NULL, (struct nouveau_object **)&port);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&port);
}
- nouveau_subdev_destroy(&i2c->base);
+ nvkm_subdev_destroy(&i2c->base);
}
-static struct nouveau_oclass *
-nouveau_i2c_extdev_sclass[] = {
- nouveau_anx9805_sclass,
+static struct nvkm_oclass *
+nvkm_i2c_extdev_sclass[] = {
+ nvkm_anx9805_sclass,
};
static void
-nouveau_i2c_create_port(struct nouveau_i2c *i2c, int index, u8 type,
- struct dcb_i2c_entry *info)
+nvkm_i2c_create_port(struct nvkm_i2c *i2c, int index, u8 type,
+ struct dcb_i2c_entry *info)
{
- const struct nouveau_i2c_impl *impl = (void *)nv_oclass(i2c);
- struct nouveau_oclass *oclass;
- struct nouveau_object *parent;
- struct nouveau_object *object;
+ const struct nvkm_i2c_impl *impl = (void *)nv_oclass(i2c);
+ struct nvkm_oclass *oclass;
+ struct nvkm_object *parent;
+ struct nvkm_object *object;
int ret, pad;
if (info->share != DCB_I2C_UNUSED) {
@@ -494,8 +489,8 @@ nouveau_i2c_create_port(struct nouveau_i2c *i2c, int index, u8 type,
oclass = impl->pad_x;
}
- ret = nouveau_object_ctor(NULL, nv_object(i2c), oclass, NULL, pad,
- &parent);
+ ret = nvkm_object_ctor(nv_object(i2c), NULL, oclass,
+ NULL, pad, &parent);
if (ret < 0)
return;
@@ -503,44 +498,40 @@ nouveau_i2c_create_port(struct nouveau_i2c *i2c, int index, u8 type,
do {
ret = -EINVAL;
if (oclass->handle == type) {
- ret = nouveau_object_ctor(parent, nv_object(i2c),
- oclass, info, index,
- &object);
+ ret = nvkm_object_ctor(parent, NULL, oclass,
+ info, index, &object);
}
} while (ret && (++oclass)->handle);
- nouveau_object_ref(NULL, &parent);
+ nvkm_object_ref(NULL, &parent);
}
int
-nouveau_i2c_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int length, void **pobject)
+nvkm_i2c_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_bios *bios = nouveau_bios(parent);
- struct nouveau_i2c *i2c;
- struct nouveau_object *object;
+ struct nvkm_bios *bios = nvkm_bios(parent);
+ struct nvkm_i2c *i2c;
+ struct nvkm_object *object;
struct dcb_i2c_entry info;
int ret, i, j, index = -1;
struct dcb_output outp;
u8 ver, hdr;
u32 data;
- ret = nouveau_subdev_create(parent, engine, oclass, 0,
- "I2C", "i2c", &i2c);
+ ret = nvkm_subdev_create(parent, engine, oclass, 0, "I2C", "i2c", &i2c);
*pobject = nv_object(i2c);
if (ret)
return ret;
- nv_subdev(i2c)->intr = nouveau_i2c_intr;
- i2c->find = nouveau_i2c_find;
- i2c->find_type = nouveau_i2c_find_type;
- i2c->acquire_pad = nouveau_i2c_acquire_pad;
- i2c->release_pad = nouveau_i2c_release_pad;
- i2c->acquire = nouveau_i2c_acquire;
- i2c->release = nouveau_i2c_release;
- i2c->identify = nouveau_i2c_identify;
+ nv_subdev(i2c)->intr = nvkm_i2c_intr;
+ i2c->find = nvkm_i2c_find;
+ i2c->find_type = nvkm_i2c_find_type;
+ i2c->acquire_pad = nvkm_i2c_acquire_pad;
+ i2c->release_pad = nvkm_i2c_release_pad;
+ i2c->acquire = nvkm_i2c_acquire;
+ i2c->release = nvkm_i2c_release;
+ i2c->identify = nvkm_i2c_identify;
init_waitqueue_head(&i2c->wait);
INIT_LIST_HEAD(&i2c->ports);
@@ -549,23 +540,21 @@ nouveau_i2c_create_(struct nouveau_object *parent,
case DCB_I2C_NV04_BIT:
case DCB_I2C_NV4E_BIT:
case DCB_I2C_NVIO_BIT:
- nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
- info.type, &info);
+ nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
+ info.type, &info);
break;
case DCB_I2C_NVIO_AUX:
- nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
- info.type, &info);
+ nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
+ info.type, &info);
break;
case DCB_I2C_PMGR:
if (info.drive != DCB_I2C_UNUSED) {
- nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
- DCB_I2C_NVIO_BIT,
- &info);
+ nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
+ DCB_I2C_NVIO_BIT, &info);
}
if (info.auxch != DCB_I2C_UNUSED) {
- nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
- DCB_I2C_NVIO_AUX,
- &info);
+ nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
+ DCB_I2C_NVIO_AUX, &info);
}
break;
case DCB_I2C_UNUSED:
@@ -597,20 +586,19 @@ nouveau_i2c_create_(struct nouveau_object *parent,
ret = -ENODEV;
j = -1;
- while (ret && ++j < ARRAY_SIZE(nouveau_i2c_extdev_sclass)) {
+ while (ret && ++j < ARRAY_SIZE(nvkm_i2c_extdev_sclass)) {
parent = nv_object(i2c->find(i2c, outp.i2c_index));
- oclass = nouveau_i2c_extdev_sclass[j];
+ oclass = nvkm_i2c_extdev_sclass[j];
do {
if (oclass->handle != info.type)
continue;
- ret = nouveau_object_ctor(parent, *pobject,
- oclass, NULL,
- index++, &object);
+ ret = nvkm_object_ctor(parent, NULL, oclass,
+ NULL, index++, &object);
} while (ret && (++oclass)->handle);
}
}
- ret = nvkm_event_init(&nouveau_i2c_intr_func, 4, index, &i2c->event);
+ ret = nvkm_event_init(&nvkm_i2c_intr_func, 4, index, &i2c->event);
if (ret)
return ret;
@@ -618,14 +606,14 @@ nouveau_i2c_create_(struct nouveau_object *parent,
}
int
-_nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+_nvkm_i2c_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_i2c *i2c;
+ struct nvkm_i2c *i2c;
int ret;
- ret = nouveau_i2c_create(parent, engine, oclass, &i2c);
+ ret = nvkm_i2c_create(parent, engine, oclass, &i2c);
*pobject = nv_object(i2c);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
index 813ffc96e864..861a453d2a67 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
@@ -21,7 +21,6 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
@@ -30,37 +29,37 @@
#define T_HOLD 5000
static inline void
-i2c_drive_scl(struct nouveau_i2c_port *port, int state)
+i2c_drive_scl(struct nvkm_i2c_port *port, int state)
{
port->func->drive_scl(port, state);
}
static inline void
-i2c_drive_sda(struct nouveau_i2c_port *port, int state)
+i2c_drive_sda(struct nvkm_i2c_port *port, int state)
{
port->func->drive_sda(port, state);
}
static inline int
-i2c_sense_scl(struct nouveau_i2c_port *port)
+i2c_sense_scl(struct nvkm_i2c_port *port)
{
return port->func->sense_scl(port);
}
static inline int
-i2c_sense_sda(struct nouveau_i2c_port *port)
+i2c_sense_sda(struct nvkm_i2c_port *port)
{
return port->func->sense_sda(port);
}
static void
-i2c_delay(struct nouveau_i2c_port *port, u32 nsec)
+i2c_delay(struct nvkm_i2c_port *port, u32 nsec)
{
udelay((nsec + 500) / 1000);
}
static bool
-i2c_raise_scl(struct nouveau_i2c_port *port)
+i2c_raise_scl(struct nvkm_i2c_port *port)
{
u32 timeout = T_TIMEOUT / T_RISEFALL;
@@ -73,7 +72,7 @@ i2c_raise_scl(struct nouveau_i2c_port *port)
}
static int
-i2c_start(struct nouveau_i2c_port *port)
+i2c_start(struct nvkm_i2c_port *port)
{
int ret = 0;
@@ -93,7 +92,7 @@ i2c_start(struct nouveau_i2c_port *port)
}
static void
-i2c_stop(struct nouveau_i2c_port *port)
+i2c_stop(struct nvkm_i2c_port *port)
{
i2c_drive_scl(port, 0);
i2c_drive_sda(port, 0);
@@ -106,7 +105,7 @@ i2c_stop(struct nouveau_i2c_port *port)
}
static int
-i2c_bitw(struct nouveau_i2c_port *port, int sda)
+i2c_bitw(struct nvkm_i2c_port *port, int sda)
{
i2c_drive_sda(port, sda);
i2c_delay(port, T_RISEFALL);
@@ -121,7 +120,7 @@ i2c_bitw(struct nouveau_i2c_port *port, int sda)
}
static int
-i2c_bitr(struct nouveau_i2c_port *port)
+i2c_bitr(struct nvkm_i2c_port *port)
{
int sda;
@@ -140,7 +139,7 @@ i2c_bitr(struct nouveau_i2c_port *port)
}
static int
-i2c_get_byte(struct nouveau_i2c_port *port, u8 *byte, bool last)
+i2c_get_byte(struct nvkm_i2c_port *port, u8 *byte, bool last)
{
int i, bit;
@@ -156,7 +155,7 @@ i2c_get_byte(struct nouveau_i2c_port *port, u8 *byte, bool last)
}
static int
-i2c_put_byte(struct nouveau_i2c_port *port, u8 byte)
+i2c_put_byte(struct nvkm_i2c_port *port, u8 byte)
{
int i, ret;
for (i = 7; i >= 0; i--) {
@@ -172,7 +171,7 @@ i2c_put_byte(struct nouveau_i2c_port *port, u8 byte)
}
static int
-i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
+i2c_addr(struct nvkm_i2c_port *port, struct i2c_msg *msg)
{
u32 addr = msg->addr << 1;
if (msg->flags & I2C_M_RD)
@@ -183,11 +182,11 @@ i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
static int
i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_port *port = adap->algo_data;
+ struct nvkm_i2c_port *port = adap->algo_data;
struct i2c_msg *msg = msgs;
int ret = 0, mcnt = num;
- ret = nouveau_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
+ ret = nvkm_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
if (ret)
return ret;
@@ -211,7 +210,7 @@ i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
}
i2c_stop(port);
- nouveau_i2c(port)->release(port);
+ nvkm_i2c(port)->release(port);
return (ret < 0) ? ret : num;
}
#else
@@ -228,7 +227,7 @@ i2c_bit_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-const struct i2c_algorithm nouveau_i2c_bit_algo = {
+const struct i2c_algorithm nvkm_i2c_bit_algo = {
.master_xfer = i2c_bit_xfer,
.functionality = i2c_bit_func
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index e383ee81f4d2..2a2dd47b9835 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -21,11 +21,10 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
void
-nv94_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
+g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
{
u32 intr = nv_rd32(i2c, 0x00e06c);
u32 stat = nv_rd32(i2c, 0x00e068) & intr, i;
@@ -39,7 +38,7 @@ nv94_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
}
void
-nv94_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
+g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
{
u32 temp = nv_rd32(i2c, 0x00e068), i;
for (i = 0; i < 8; i++) {
@@ -58,13 +57,13 @@ nv94_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
static void
-auxch_fini(struct nouveau_i2c *aux, int ch)
+auxch_fini(struct nvkm_i2c *aux, int ch)
{
nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
}
static int
-auxch_init(struct nouveau_i2c *aux, int ch)
+auxch_init(struct nvkm_i2c *aux, int ch)
{
const u32 unksel = 1; /* nfi which to use, or if it matters.. */
const u32 ureq = unksel ? 0x00100000 : 0x00200000;
@@ -99,10 +98,10 @@ auxch_init(struct nouveau_i2c *aux, int ch)
}
int
-nv94_aux(struct nouveau_i2c_port *base, bool retry,
+g94_aux(struct nvkm_i2c_port *base, bool retry,
u8 type, u32 addr, u8 *data, u8 size)
{
- struct nouveau_i2c *aux = nouveau_i2c(base);
+ struct nvkm_i2c *aux = nvkm_i2c(base);
struct nv50_i2c_port *port = (void *)base;
u32 ctrl, stat, timeout, retries;
u32 xbuf[4] = {};
@@ -185,8 +184,8 @@ out:
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
-static const struct nouveau_i2c_func
-nv94_i2c_func = {
+static const struct nvkm_i2c_func
+g94_i2c_func = {
.drive_scl = nv50_i2c_drive_scl,
.drive_sda = nv50_i2c_drive_sda,
.sense_scl = nv50_i2c_sense_scl,
@@ -194,17 +193,16 @@ nv94_i2c_func = {
};
static int
-nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+g94_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct dcb_i2c_entry *info = data;
struct nv50_i2c_port *port;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &nv94_i2c_func,
- &port);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &nvkm_i2c_bit_algo, &g94_i2c_func, &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -217,23 +215,22 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static const struct nouveau_i2c_func
-nv94_aux_func = {
- .aux = nv94_aux,
+static const struct nvkm_i2c_func
+g94_aux_func = {
+ .aux = g94_aux,
};
int
-nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+g94_aux_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct dcb_i2c_entry *info = data;
struct nv50_i2c_port *port;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_aux_algo, &nv94_aux_func,
- &port);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &nvkm_i2c_aux_algo, &g94_aux_func, &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -243,40 +240,40 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static struct nouveau_oclass
-nv94_i2c_sclass[] = {
+static struct nvkm_oclass
+g94_i2c_sclass[] = {
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv94_i2c_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g94_i2c_port_ctor,
+ .dtor = _nvkm_i2c_port_dtor,
.init = nv50_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .fini = _nvkm_i2c_port_fini,
},
},
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv94_aux_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
- .init = _nouveau_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g94_aux_port_ctor,
+ .dtor = _nvkm_i2c_port_dtor,
+ .init = _nvkm_i2c_port_init,
+ .fini = _nvkm_i2c_port_fini,
},
},
{}
};
-struct nouveau_oclass *
-nv94_i2c_oclass = &(struct nouveau_i2c_impl) {
+struct nvkm_oclass *
+g94_i2c_oclass = &(struct nvkm_i2c_impl) {
.base.handle = NV_SUBDEV(I2C, 0x94),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_i2c_ctor,
- .dtor = _nouveau_i2c_dtor,
- .init = _nouveau_i2c_init,
- .fini = _nouveau_i2c_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_i2c_ctor,
+ .dtor = _nvkm_i2c_dtor,
+ .init = _nvkm_i2c_init,
+ .fini = _nvkm_i2c_fini,
},
- .sclass = nv94_i2c_sclass,
+ .sclass = g94_i2c_sclass,
.pad_x = &nv04_i2c_pad_oclass,
- .pad_s = &nv94_i2c_pad_oclass,
+ .pad_s = &g94_i2c_pad_oclass,
.aux = 4,
- .aux_stat = nv94_aux_stat,
- .aux_mask = nv94_aux_mask,
+ .aux_stat = g94_aux_stat,
+ .aux_mask = g94_aux_mask,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
index fd99380502ec..4d4ac6638140 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
@@ -21,45 +21,43 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
static int
-nvd0_i2c_sense_scl(struct nouveau_i2c_port *base)
+gf110_i2c_sense_scl(struct nvkm_i2c_port *base)
{
- struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv50_i2c_port *port = (void *)base;
return !!(nv_rd32(priv, port->addr) & 0x00000010);
}
static int
-nvd0_i2c_sense_sda(struct nouveau_i2c_port *base)
+gf110_i2c_sense_sda(struct nvkm_i2c_port *base)
{
- struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv50_i2c_port *port = (void *)base;
return !!(nv_rd32(priv, port->addr) & 0x00000020);
}
-static const struct nouveau_i2c_func
-nvd0_i2c_func = {
+static const struct nvkm_i2c_func
+gf110_i2c_func = {
.drive_scl = nv50_i2c_drive_scl,
.drive_sda = nv50_i2c_drive_sda,
- .sense_scl = nvd0_i2c_sense_scl,
- .sense_sda = nvd0_i2c_sense_sda,
+ .sense_scl = gf110_i2c_sense_scl,
+ .sense_sda = gf110_i2c_sense_sda,
};
int
-nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+gf110_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct dcb_i2c_entry *info = data;
struct nv50_i2c_port *port;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &nvd0_i2c_func,
- &port);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &nvkm_i2c_bit_algo, &gf110_i2c_func, &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -69,40 +67,40 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nvd0_i2c_sclass[] = {
+struct nvkm_oclass
+gf110_i2c_sclass[] = {
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_i2c_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf110_i2c_port_ctor,
+ .dtor = _nvkm_i2c_port_dtor,
.init = nv50_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .fini = _nvkm_i2c_port_fini,
},
},
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv94_aux_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
- .init = _nouveau_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g94_aux_port_ctor,
+ .dtor = _nvkm_i2c_port_dtor,
+ .init = _nvkm_i2c_port_init,
+ .fini = _nvkm_i2c_port_fini,
},
},
{}
};
-struct nouveau_oclass *
-nvd0_i2c_oclass = &(struct nouveau_i2c_impl) {
+struct nvkm_oclass *
+gf110_i2c_oclass = &(struct nvkm_i2c_impl) {
.base.handle = NV_SUBDEV(I2C, 0xd0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_i2c_ctor,
- .dtor = _nouveau_i2c_dtor,
- .init = _nouveau_i2c_init,
- .fini = _nouveau_i2c_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_i2c_ctor,
+ .dtor = _nvkm_i2c_dtor,
+ .init = _nvkm_i2c_init,
+ .fini = _nvkm_i2c_fini,
},
- .sclass = nvd0_i2c_sclass,
+ .sclass = gf110_i2c_sclass,
.pad_x = &nv04_i2c_pad_oclass,
- .pad_s = &nv94_i2c_pad_oclass,
+ .pad_s = &g94_i2c_pad_oclass,
.aux = 4,
- .aux_stat = nv94_aux_stat,
- .aux_mask = nv94_aux_mask,
+ .aux_stat = g94_aux_stat,
+ .aux_mask = g94_aux_mask,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index fa891c39866b..e290b40f2d13 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -21,19 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
-struct nouveau_oclass *
-gf117_i2c_oclass = &(struct nouveau_i2c_impl) {
+struct nvkm_oclass *
+gf117_i2c_oclass = &(struct nvkm_i2c_impl) {
.base.handle = NV_SUBDEV(I2C, 0xd7),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_i2c_ctor,
- .dtor = _nouveau_i2c_dtor,
- .init = _nouveau_i2c_init,
- .fini = _nouveau_i2c_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_i2c_ctor,
+ .dtor = _nvkm_i2c_dtor,
+ .init = _nvkm_i2c_init,
+ .fini = _nvkm_i2c_fini,
},
- .sclass = nvd0_i2c_sclass,
+ .sclass = gf110_i2c_sclass,
.pad_x = &nv04_i2c_pad_oclass,
.pad_s = &nv04_i2c_pad_oclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index 25fe5c2d110e..1a464903a992 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -21,11 +21,10 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
void
-nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
+gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
{
u32 intr = nv_rd32(i2c, 0x00dc60);
u32 stat = nv_rd32(i2c, 0x00dc68) & intr, i;
@@ -39,7 +38,7 @@ nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
}
void
-nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
+gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
{
u32 temp = nv_rd32(i2c, 0x00dc68), i;
for (i = 0; i < 8; i++) {
@@ -54,19 +53,19 @@ nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
nv_wr32(i2c, 0x00dc68, temp);
}
-struct nouveau_oclass *
-nve0_i2c_oclass = &(struct nouveau_i2c_impl) {
+struct nvkm_oclass *
+gk104_i2c_oclass = &(struct nvkm_i2c_impl) {
.base.handle = NV_SUBDEV(I2C, 0xe0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_i2c_ctor,
- .dtor = _nouveau_i2c_dtor,
- .init = _nouveau_i2c_init,
- .fini = _nouveau_i2c_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_i2c_ctor,
+ .dtor = _nvkm_i2c_dtor,
+ .init = _nvkm_i2c_init,
+ .fini = _nvkm_i2c_fini,
},
- .sclass = nvd0_i2c_sclass,
+ .sclass = gf110_i2c_sclass,
.pad_x = &nv04_i2c_pad_oclass,
- .pad_s = &nv94_i2c_pad_oclass,
+ .pad_s = &g94_i2c_pad_oclass,
.aux = 4,
- .aux_stat = nve0_aux_stat,
- .aux_mask = nve0_aux_mask,
+ .aux_stat = gk104_aux_stat,
+ .aux_mask = gk104_aux_mask,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
index 06a2b87ccbf1..ab64237b3842 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
@@ -21,20 +21,19 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
static void
-auxch_fini(struct nouveau_i2c *aux, int ch)
+auxch_fini(struct nvkm_i2c *aux, int ch)
{
nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
}
static int
-auxch_init(struct nouveau_i2c *aux, int ch)
+auxch_init(struct nvkm_i2c *aux, int ch)
{
const u32 unksel = 1; /* nfi which to use, or if it matters.. */
const u32 ureq = unksel ? 0x00100000 : 0x00200000;
@@ -69,10 +68,10 @@ auxch_init(struct nouveau_i2c *aux, int ch)
}
int
-gm204_aux(struct nouveau_i2c_port *base, bool retry,
+gm204_aux(struct nvkm_i2c_port *base, bool retry,
u8 type, u32 addr, u8 *data, u8 size)
{
- struct nouveau_i2c *aux = nouveau_i2c(base);
+ struct nvkm_i2c *aux = nvkm_i2c(base);
struct nv50_i2c_port *port = (void *)base;
u32 ctrl, stat, timeout, retries;
u32 xbuf[4] = {};
@@ -155,24 +154,23 @@ out:
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
-static const struct nouveau_i2c_func
+static const struct nvkm_i2c_func
gm204_aux_func = {
.aux = gm204_aux,
};
int
-gm204_aux_port_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+gm204_aux_port_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct dcb_i2c_entry *info = data;
struct nv50_i2c_port *port;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_aux_algo, &gm204_aux_func,
- &port);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &nvkm_i2c_aux_algo, &gm204_aux_func, &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -182,40 +180,40 @@ gm204_aux_port_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gm204_i2c_sclass[] = {
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_i2c_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf110_i2c_port_ctor,
+ .dtor = _nvkm_i2c_port_dtor,
.init = nv50_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .fini = _nvkm_i2c_port_fini,
},
},
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gm204_aux_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
- .init = _nouveau_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .dtor = _nvkm_i2c_port_dtor,
+ .init = _nvkm_i2c_port_init,
+ .fini = _nvkm_i2c_port_fini,
},
},
{}
};
-struct nouveau_oclass *
-gm204_i2c_oclass = &(struct nouveau_i2c_impl) {
+struct nvkm_oclass *
+gm204_i2c_oclass = &(struct nvkm_i2c_impl) {
.base.handle = NV_SUBDEV(I2C, 0x24),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_i2c_ctor,
- .dtor = _nouveau_i2c_dtor,
- .init = _nouveau_i2c_init,
- .fini = _nouveau_i2c_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_i2c_ctor,
+ .dtor = _nvkm_i2c_dtor,
+ .init = _nvkm_i2c_init,
+ .fini = _nvkm_i2c_fini,
},
.sclass = gm204_i2c_sclass,
.pad_x = &nv04_i2c_pad_oclass,
.pad_s = &gm204_i2c_pad_oclass,
.aux = 8,
- .aux_stat = nve0_aux_stat,
- .aux_mask = nve0_aux_mask,
+ .aux_stat = gk104_aux_stat,
+ .aux_mask = gk104_aux_mask,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index b1725bdea967..4cdf1c489353 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -21,25 +21,24 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/vga.h>
-#include "priv.h"
-
struct nv04_i2c_priv {
- struct nouveau_i2c base;
+ struct nvkm_i2c base;
};
struct nv04_i2c_port {
- struct nouveau_i2c_port base;
+ struct nvkm_i2c_port base;
u8 drive;
u8 sense;
};
static void
-nv04_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+nv04_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
{
- struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv04_i2c_port *port = (void *)base;
u8 val = nv_rdvgac(priv, 0, port->drive);
if (state) val |= 0x20;
@@ -48,9 +47,9 @@ nv04_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
}
static void
-nv04_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+nv04_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
{
- struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv04_i2c_port *port = (void *)base;
u8 val = nv_rdvgac(priv, 0, port->drive);
if (state) val |= 0x10;
@@ -59,22 +58,22 @@ nv04_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
}
static int
-nv04_i2c_sense_scl(struct nouveau_i2c_port *base)
+nv04_i2c_sense_scl(struct nvkm_i2c_port *base)
{
- struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv04_i2c_port *port = (void *)base;
return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
}
static int
-nv04_i2c_sense_sda(struct nouveau_i2c_port *base)
+nv04_i2c_sense_sda(struct nvkm_i2c_port *base)
{
- struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv04_i2c_port *port = (void *)base;
return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
}
-static const struct nouveau_i2c_func
+static const struct nvkm_i2c_func
nv04_i2c_func = {
.drive_scl = nv04_i2c_drive_scl,
.drive_sda = nv04_i2c_drive_sda,
@@ -83,17 +82,16 @@ nv04_i2c_func = {
};
static int
-nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+nv04_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct dcb_i2c_entry *info = data;
struct nv04_i2c_port *port;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &nv04_i2c_func,
- &port);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &nvkm_i2c_bit_algo, &nv04_i2c_func, &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -103,27 +101,27 @@ nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static struct nouveau_oclass
+static struct nvkm_oclass
nv04_i2c_sclass[] = {
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_i2c_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
- .init = _nouveau_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .dtor = _nvkm_i2c_port_dtor,
+ .init = _nvkm_i2c_port_init,
+ .fini = _nvkm_i2c_port_fini,
},
},
{}
};
-struct nouveau_oclass *
-nv04_i2c_oclass = &(struct nouveau_i2c_impl) {
+struct nvkm_oclass *
+nv04_i2c_oclass = &(struct nvkm_i2c_impl) {
.base.handle = NV_SUBDEV(I2C, 0x04),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_i2c_ctor,
- .dtor = _nouveau_i2c_dtor,
- .init = _nouveau_i2c_init,
- .fini = _nouveau_i2c_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_i2c_ctor,
+ .dtor = _nvkm_i2c_dtor,
+ .init = _nvkm_i2c_init,
+ .fini = _nvkm_i2c_fini,
},
.sclass = nv04_i2c_sclass,
.pad_x = &nv04_i2c_pad_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index f16c87ce5ba1..046fe5e2ea19 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -21,53 +21,52 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/vga.h>
-#include "priv.h"
-
struct nv4e_i2c_priv {
- struct nouveau_i2c base;
+ struct nvkm_i2c base;
};
struct nv4e_i2c_port {
- struct nouveau_i2c_port base;
+ struct nvkm_i2c_port base;
u32 addr;
};
static void
-nv4e_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+nv4e_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
{
- struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv4e_i2c_port *port = (void *)base;
nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
}
static void
-nv4e_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+nv4e_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
{
- struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv4e_i2c_port *port = (void *)base;
nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
}
static int
-nv4e_i2c_sense_scl(struct nouveau_i2c_port *base)
+nv4e_i2c_sense_scl(struct nvkm_i2c_port *base)
{
- struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv4e_i2c_port *port = (void *)base;
return !!(nv_rd32(priv, port->addr) & 0x00040000);
}
static int
-nv4e_i2c_sense_sda(struct nouveau_i2c_port *base)
+nv4e_i2c_sense_sda(struct nvkm_i2c_port *base)
{
- struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv4e_i2c_port *port = (void *)base;
return !!(nv_rd32(priv, port->addr) & 0x00080000);
}
-static const struct nouveau_i2c_func
+static const struct nvkm_i2c_func
nv4e_i2c_func = {
.drive_scl = nv4e_i2c_drive_scl,
.drive_sda = nv4e_i2c_drive_sda,
@@ -76,17 +75,16 @@ nv4e_i2c_func = {
};
static int
-nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+nv4e_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct dcb_i2c_entry *info = data;
struct nv4e_i2c_port *port;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &nv4e_i2c_func,
- &port);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &nvkm_i2c_bit_algo, &nv4e_i2c_func, &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -95,27 +93,27 @@ nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static struct nouveau_oclass
+static struct nvkm_oclass
nv4e_i2c_sclass[] = {
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv4e_i2c_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
- .init = _nouveau_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .dtor = _nvkm_i2c_port_dtor,
+ .init = _nvkm_i2c_port_init,
+ .fini = _nvkm_i2c_port_fini,
},
},
{}
};
-struct nouveau_oclass *
-nv4e_i2c_oclass = &(struct nouveau_i2c_impl) {
+struct nvkm_oclass *
+nv4e_i2c_oclass = &(struct nvkm_i2c_impl) {
.base.handle = NV_SUBDEV(I2C, 0x4e),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_i2c_ctor,
- .dtor = _nouveau_i2c_dtor,
- .init = _nouveau_i2c_init,
- .fini = _nouveau_i2c_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_i2c_ctor,
+ .dtor = _nvkm_i2c_dtor,
+ .init = _nvkm_i2c_init,
+ .fini = _nvkm_i2c_fini,
},
.sclass = nv4e_i2c_sclass,
.pad_x = &nv04_i2c_pad_oclass,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index 7b8756d4df08..fba5b26a5682 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv50.h"
void
-nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+nv50_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
{
- struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv50_i2c_port *port = (void *)base;
if (state) port->state |= 0x01;
else port->state &= 0xfe;
@@ -35,9 +34,9 @@ nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
}
void
-nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+nv50_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
{
- struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv50_i2c_port *port = (void *)base;
if (state) port->state |= 0x02;
else port->state &= 0xfd;
@@ -45,22 +44,22 @@ nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
}
int
-nv50_i2c_sense_scl(struct nouveau_i2c_port *base)
+nv50_i2c_sense_scl(struct nvkm_i2c_port *base)
{
- struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv50_i2c_port *port = (void *)base;
return !!(nv_rd32(priv, port->addr) & 0x00000001);
}
int
-nv50_i2c_sense_sda(struct nouveau_i2c_port *base)
+nv50_i2c_sense_sda(struct nvkm_i2c_port *base)
{
- struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
struct nv50_i2c_port *port = (void *)base;
return !!(nv_rd32(priv, port->addr) & 0x00000002);
}
-static const struct nouveau_i2c_func
+static const struct nvkm_i2c_func
nv50_i2c_func = {
.drive_scl = nv50_i2c_drive_scl,
.drive_sda = nv50_i2c_drive_sda,
@@ -76,17 +75,16 @@ const u32 nv50_i2c_addr[] = {
const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
static int
-nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+nv50_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct dcb_i2c_entry *info = data;
struct nv50_i2c_port *port;
int ret;
- ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &nv50_i2c_func,
- &port);
+ ret = nvkm_i2c_port_create(parent, engine, oclass, index,
+ &nvkm_i2c_bit_algo, &nv50_i2c_func, &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -100,35 +98,35 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
int
-nv50_i2c_port_init(struct nouveau_object *object)
+nv50_i2c_port_init(struct nvkm_object *object)
{
- struct nv50_i2c_priv *priv = (void *)object->engine;
+ struct nv50_i2c_priv *priv = (void *)nvkm_i2c(object);
struct nv50_i2c_port *port = (void *)object;
nv_wr32(priv, port->addr, port->state);
- return nouveau_i2c_port_init(&port->base);
+ return nvkm_i2c_port_init(&port->base);
}
-static struct nouveau_oclass
+static struct nvkm_oclass
nv50_i2c_sclass[] = {
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_i2c_port_ctor,
- .dtor = _nouveau_i2c_port_dtor,
+ .dtor = _nvkm_i2c_port_dtor,
.init = nv50_i2c_port_init,
- .fini = _nouveau_i2c_port_fini,
+ .fini = _nvkm_i2c_port_fini,
},
},
{}
};
-struct nouveau_oclass *
-nv50_i2c_oclass = &(struct nouveau_i2c_impl) {
+struct nvkm_oclass *
+nv50_i2c_oclass = &(struct nvkm_i2c_impl) {
.base.handle = NV_SUBDEV(I2C, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_i2c_ctor,
- .dtor = _nouveau_i2c_dtor,
- .init = _nouveau_i2c_init,
- .fini = _nouveau_i2c_fini,
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_i2c_ctor,
+ .dtor = _nvkm_i2c_dtor,
+ .init = _nvkm_i2c_init,
+ .fini = _nvkm_i2c_fini,
},
.sclass = nv50_i2c_sclass,
.pad_x = &nv04_i2c_pad_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
new file mode 100644
index 000000000000..b3139e721b02
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
@@ -0,0 +1,32 @@
+#ifndef __NV50_I2C_H__
+#define __NV50_I2C_H__
+#include "priv.h"
+
+struct nv50_i2c_priv {
+ struct nvkm_i2c base;
+};
+
+struct nv50_i2c_port {
+ struct nvkm_i2c_port base;
+ u32 addr;
+ u32 state;
+};
+
+extern const u32 nv50_i2c_addr[];
+extern const int nv50_i2c_addr_nr;
+int nv50_i2c_port_init(struct nvkm_object *);
+int nv50_i2c_sense_scl(struct nvkm_i2c_port *);
+int nv50_i2c_sense_sda(struct nvkm_i2c_port *);
+void nv50_i2c_drive_scl(struct nvkm_i2c_port *, int state);
+void nv50_i2c_drive_sda(struct nvkm_i2c_port *, int state);
+
+int g94_aux_port_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void g94_i2c_acquire(struct nvkm_i2c_port *);
+void g94_i2c_release(struct nvkm_i2c_port *);
+
+int gf110_i2c_port_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
index e9e412477c12..a242eeb67829 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
@@ -21,35 +21,34 @@
*
* Authors: Ben Skeggs
*/
-
#include "pad.h"
int
-_nvkm_i2c_pad_fini(struct nouveau_object *object, bool suspend)
+_nvkm_i2c_pad_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_i2c_pad *pad = (void *)object;
DBG("-> NULL\n");
pad->port = NULL;
- return nouveau_object_fini(&pad->base, suspend);
+ return nvkm_object_fini(&pad->base, suspend);
}
int
-_nvkm_i2c_pad_init(struct nouveau_object *object)
+_nvkm_i2c_pad_init(struct nvkm_object *object)
{
struct nvkm_i2c_pad *pad = (void *)object;
DBG("-> PORT:%02x\n", pad->next->index);
pad->port = pad->next;
- return nouveau_object_init(&pad->base);
+ return nvkm_object_init(&pad->base);
}
int
-nvkm_i2c_pad_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int index,
+nvkm_i2c_pad_create_(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int index,
int size, void **pobject)
{
- struct nouveau_i2c *i2c = (void *)engine;
- struct nouveau_i2c_port *port;
+ struct nvkm_i2c *i2c = nvkm_i2c(parent);
+ struct nvkm_i2c_port *port;
struct nvkm_i2c_pad *pad;
int ret;
@@ -62,7 +61,7 @@ nvkm_i2c_pad_create_(struct nouveau_object *parent,
}
}
- ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
+ ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
pad = *pobject;
if (ret)
return ret;
@@ -72,9 +71,9 @@ nvkm_i2c_pad_create_(struct nouveau_object *parent,
}
int
-_nvkm_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+_nvkm_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct nvkm_i2c_pad *pad;
int ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 452ac10c3004..f3422cc6f8db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,20 +1,19 @@
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
-
#include "priv.h"
struct nvkm_i2c_pad {
- struct nouveau_object base;
+ struct nvkm_object base;
int index;
- struct nouveau_i2c_port *port;
- struct nouveau_i2c_port *next;
+ struct nvkm_i2c_port *port;
+ struct nvkm_i2c_port *next;
};
static inline struct nvkm_i2c_pad *
-nvkm_i2c_pad(struct nouveau_i2c_port *port)
+nvkm_i2c_pad(struct nvkm_i2c_port *port)
{
- struct nouveau_object *pad = nv_object(port);
- while (pad->parent)
+ struct nvkm_object *pad = nv_object(port);
+ while (!nv_iclass(pad->parent, NV_SUBDEV_CLASS))
pad = pad->parent;
return (void *)pad;
}
@@ -34,25 +33,24 @@ nvkm_i2c_pad(struct nouveau_i2c_port *port)
_nvkm_i2c_pad_fini(nv_object(_p), (s)); \
})
-int nvkm_i2c_pad_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int index, int, void **);
+int nvkm_i2c_pad_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int index, int, void **);
-int _nvkm_i2c_pad_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-#define _nvkm_i2c_pad_dtor nouveau_object_destroy
-int _nvkm_i2c_pad_init(struct nouveau_object *);
-int _nvkm_i2c_pad_fini(struct nouveau_object *, bool);
+int _nvkm_i2c_pad_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+#define _nvkm_i2c_pad_dtor nvkm_object_destroy
+int _nvkm_i2c_pad_init(struct nvkm_object *);
+int _nvkm_i2c_pad_fini(struct nvkm_object *, bool);
#ifndef MSG
#define MSG(l,f,a...) do { \
struct nvkm_i2c_pad *_pad = (void *)pad; \
- nv_##l(nv_object(_pad)->engine, "PAD:%c:%02x: "f, \
+ nv_##l(_pad, "PAD:%c:%02x: "f, \
_pad->index >= 0x100 ? 'X' : 'S', \
_pad->index >= 0x100 ? _pad->index - 0x100 : _pad->index, ##a); \
} while(0)
#define DBG(f,a...) MSG(debug, f, ##a)
#define ERR(f,a...) MSG(error, f, ##a)
#endif
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
index 0dc6753014f0..e9832f7a7e38 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
@@ -21,28 +21,27 @@
*
* Authors: Ben Skeggs
*/
-
#include "pad.h"
-struct nv94_i2c_pad {
+struct g94_i2c_pad {
struct nvkm_i2c_pad base;
int addr;
};
static int
-nv94_i2c_pad_fini(struct nouveau_object *object, bool suspend)
+g94_i2c_pad_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_i2c *i2c = (void *)object->engine;
- struct nv94_i2c_pad *pad = (void *)object;
+ struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
+ struct g94_i2c_pad *pad = (void *)object;
nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000001);
return nvkm_i2c_pad_fini(&pad->base, suspend);
}
static int
-nv94_i2c_pad_init(struct nouveau_object *object)
+g94_i2c_pad_init(struct nvkm_object *object)
{
- struct nouveau_i2c *i2c = (void *)object->engine;
- struct nv94_i2c_pad *pad = (void *)object;
+ struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
+ struct g94_i2c_pad *pad = (void *)object;
switch (nv_oclass(pad->base.next)->handle) {
case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
@@ -59,11 +58,11 @@ nv94_i2c_pad_init(struct nouveau_object *object)
}
static int
-nv94_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+g94_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
- struct nv94_i2c_pad *pad;
+ struct g94_i2c_pad *pad;
int ret;
ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
@@ -75,12 +74,12 @@ nv94_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv94_i2c_pad_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv94_i2c_pad_ctor,
+struct nvkm_oclass
+g94_i2c_pad_oclass = {
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g94_i2c_pad_ctor,
.dtor = _nvkm_i2c_pad_dtor,
- .init = nv94_i2c_pad_init,
- .fini = nv94_i2c_pad_fini,
+ .init = g94_i2c_pad_init,
+ .fini = g94_i2c_pad_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
index f0e6fbbaa8cd..be590405444d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
@@ -21,7 +21,6 @@
*
* Authors: Ben Skeggs
*/
-
#include "pad.h"
struct gm204_i2c_pad {
@@ -30,18 +29,18 @@ struct gm204_i2c_pad {
};
static int
-gm204_i2c_pad_fini(struct nouveau_object *object, bool suspend)
+gm204_i2c_pad_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_i2c *i2c = (void *)object->engine;
+ struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
struct gm204_i2c_pad *pad = (void *)object;
nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
return nvkm_i2c_pad_fini(&pad->base, suspend);
}
static int
-gm204_i2c_pad_init(struct nouveau_object *object)
+gm204_i2c_pad_init(struct nvkm_object *object)
{
- struct nouveau_i2c *i2c = (void *)object->engine;
+ struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
struct gm204_i2c_pad *pad = (void *)object;
switch (nv_oclass(pad->base.next)->handle) {
@@ -59,9 +58,9 @@ gm204_i2c_pad_init(struct nouveau_object *object)
}
static int
-gm204_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 index,
- struct nouveau_object **pobject)
+gm204_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 index,
+ struct nvkm_object **pobject)
{
struct gm204_i2c_pad *pad;
int ret;
@@ -75,9 +74,9 @@ gm204_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gm204_i2c_pad_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gm204_i2c_pad_ctor,
.dtor = _nvkm_i2c_pad_dtor,
.init = gm204_i2c_pad_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
index 2c4b61296dd1..22c7daaad3a0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
@@ -21,12 +21,11 @@
*
* Authors: Ben Skeggs
*/
-
#include "pad.h"
-struct nouveau_oclass
+struct nvkm_oclass
nv04_i2c_pad_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_i2c_pad_ctor,
.dtor = _nvkm_i2c_pad_dtor,
.init = _nvkm_i2c_pad_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/port.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
index a8ff6e077af5..586f53dad813 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/port.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
@@ -1,15 +1,13 @@
#ifndef __NVKM_I2C_PORT_H__
#define __NVKM_I2C_PORT_H__
-
#include "priv.h"
#ifndef MSG
#define MSG(l,f,a...) do { \
- struct nouveau_i2c_port *_port = (void *)port; \
- nv_##l(nv_object(_port)->engine, "PORT:%02x: "f, _port->index, ##a); \
+ struct nvkm_i2c_port *_port = (void *)port; \
+ nv_##l(_port, "PORT:%02x: "f, _port->index, ##a); \
} while(0)
#define DBG(f,a...) MSG(debug, f, ##a)
#define ERR(f,a...) MSG(error, f, ##a)
#endif
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
new file mode 100644
index 000000000000..6586e1567fcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -0,0 +1,87 @@
+#ifndef __NVKM_I2C_PRIV_H__
+#define __NVKM_I2C_PRIV_H__
+#include <subdev/i2c.h>
+
+extern struct nvkm_oclass nv04_i2c_pad_oclass;
+extern struct nvkm_oclass g94_i2c_pad_oclass;
+extern struct nvkm_oclass gm204_i2c_pad_oclass;
+
+#define nvkm_i2c_port_create(p,e,o,i,a,f,d) \
+ nvkm_i2c_port_create_((p), (e), (o), (i), (a), (f), \
+ sizeof(**d), (void **)d)
+#define nvkm_i2c_port_destroy(p) ({ \
+ struct nvkm_i2c_port *port = (p); \
+ _nvkm_i2c_port_dtor(nv_object(i2c)); \
+})
+#define nvkm_i2c_port_init(p) \
+ nvkm_object_init(&(p)->base)
+#define nvkm_i2c_port_fini(p,s) \
+ nvkm_object_fini(&(p)->base, (s))
+
+int nvkm_i2c_port_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, u8,
+ const struct i2c_algorithm *,
+ const struct nvkm_i2c_func *,
+ int, void **);
+void _nvkm_i2c_port_dtor(struct nvkm_object *);
+#define _nvkm_i2c_port_init nvkm_object_init
+int _nvkm_i2c_port_fini(struct nvkm_object *, bool);
+
+#define nvkm_i2c_create(p,e,o,d) \
+ nvkm_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_i2c_destroy(p) ({ \
+ struct nvkm_i2c *i2c = (p); \
+ _nvkm_i2c_dtor(nv_object(i2c)); \
+})
+#define nvkm_i2c_init(p) ({ \
+ struct nvkm_i2c *i2c = (p); \
+ _nvkm_i2c_init(nv_object(i2c)); \
+})
+#define nvkm_i2c_fini(p,s) ({ \
+ struct nvkm_i2c *i2c = (p); \
+ _nvkm_i2c_fini(nv_object(i2c), (s)); \
+})
+
+int nvkm_i2c_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+int _nvkm_i2c_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void _nvkm_i2c_dtor(struct nvkm_object *);
+int _nvkm_i2c_init(struct nvkm_object *);
+int _nvkm_i2c_fini(struct nvkm_object *, bool);
+
+extern struct nvkm_oclass nvkm_anx9805_sclass[];
+extern struct nvkm_oclass gf110_i2c_sclass[];
+
+extern const struct i2c_algorithm nvkm_i2c_bit_algo;
+extern const struct i2c_algorithm nvkm_i2c_aux_algo;
+
+struct nvkm_i2c_impl {
+ struct nvkm_oclass base;
+
+ /* supported i2c port classes */
+ struct nvkm_oclass *sclass;
+ struct nvkm_oclass *pad_x;
+ struct nvkm_oclass *pad_s;
+
+ /* number of native dp aux channels present */
+ int aux;
+
+ /* read and ack pending interrupts, returning only data
+ * for ports that have not been masked off, while still
+ * performing the ack for anything that was pending.
+ */
+ void (*aux_stat)(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
+
+ /* mask on/off interrupt types for a given set of auxch
+ */
+ void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32);
+};
+
+void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
+void g94_aux_mask(struct nvkm_i2c *, u32, u32, u32);
+
+void gk104_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
+void gk104_aux_mask(struct nvkm_i2c *, u32, u32, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
new file mode 100644
index 000000000000..a0b12d27284a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
@@ -0,0 +1,3 @@
+nvkm-y += nvkm/subdev/ibus/gf100.o
+nvkm-y += nvkm/subdev/ibus/gk104.o
+nvkm-y += nvkm/subdev/ibus/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 4e977ff27e44..8e578f802f66 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/ibus.h>
-struct nvc0_ibus_priv {
- struct nouveau_ibus base;
+struct gf100_ibus_priv {
+ struct nvkm_ibus base;
};
static void
-nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i)
+gf100_ibus_intr_hub(struct gf100_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
@@ -39,7 +38,7 @@ nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i)
}
static void
-nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i)
+gf100_ibus_intr_rop(struct gf100_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
@@ -49,7 +48,7 @@ nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i)
}
static void
-nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i)
+gf100_ibus_intr_gpc(struct gf100_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
@@ -59,9 +58,9 @@ nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i)
}
static void
-nvc0_ibus_intr(struct nouveau_subdev *subdev)
+gf100_ibus_intr(struct nvkm_subdev *subdev)
{
- struct nvc0_ibus_priv *priv = (void *)subdev;
+ struct gf100_ibus_priv *priv = (void *)subdev;
u32 intr0 = nv_rd32(priv, 0x121c58);
u32 intr1 = nv_rd32(priv, 0x121c5c);
u32 hubnr = nv_rd32(priv, 0x121c70);
@@ -72,7 +71,7 @@ nvc0_ibus_intr(struct nouveau_subdev *subdev)
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
- nvc0_ibus_intr_hub(priv, i);
+ gf100_ibus_intr_hub(priv, i);
intr0 &= ~stat;
}
}
@@ -80,7 +79,7 @@ nvc0_ibus_intr(struct nouveau_subdev *subdev)
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
- nvc0_ibus_intr_rop(priv, i);
+ gf100_ibus_intr_rop(priv, i);
intr0 &= ~stat;
}
}
@@ -88,36 +87,36 @@ nvc0_ibus_intr(struct nouveau_subdev *subdev)
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
- nvc0_ibus_intr_gpc(priv, i);
+ gf100_ibus_intr_gpc(priv, i);
intr1 &= ~stat;
}
}
}
static int
-nvc0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_ibus_priv *priv;
+ struct gf100_ibus_priv *priv;
int ret;
- ret = nouveau_ibus_create(parent, engine, oclass, &priv);
+ ret = nvkm_ibus_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nv_subdev(priv)->intr = nvc0_ibus_intr;
+ nv_subdev(priv)->intr = gf100_ibus_intr;
return 0;
}
-struct nouveau_oclass
-nvc0_ibus_oclass = {
+struct nvkm_oclass
+gf100_ibus_oclass = {
.handle = NV_SUBDEV(IBUS, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_ibus_ctor,
- .dtor = _nouveau_ibus_dtor,
- .init = _nouveau_ibus_init,
- .fini = _nouveau_ibus_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_ibus_ctor,
+ .dtor = _nvkm_ibus_dtor,
+ .init = _nvkm_ibus_init,
+ .fini = _nvkm_ibus_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index ebef970a0645..7b6e9a6cd7b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -21,15 +21,14 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/ibus.h>
-struct nve0_ibus_priv {
- struct nouveau_ibus base;
+struct gk104_ibus_priv {
+ struct nvkm_ibus base;
};
static void
-nve0_ibus_intr_hub(struct nve0_ibus_priv *priv, int i)
+gk104_ibus_intr_hub(struct gk104_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
@@ -39,7 +38,7 @@ nve0_ibus_intr_hub(struct nve0_ibus_priv *priv, int i)
}
static void
-nve0_ibus_intr_rop(struct nve0_ibus_priv *priv, int i)
+gk104_ibus_intr_rop(struct gk104_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
@@ -49,7 +48,7 @@ nve0_ibus_intr_rop(struct nve0_ibus_priv *priv, int i)
}
static void
-nve0_ibus_intr_gpc(struct nve0_ibus_priv *priv, int i)
+gk104_ibus_intr_gpc(struct gk104_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
@@ -59,9 +58,9 @@ nve0_ibus_intr_gpc(struct nve0_ibus_priv *priv, int i)
}
static void
-nve0_ibus_intr(struct nouveau_subdev *subdev)
+gk104_ibus_intr(struct nvkm_subdev *subdev)
{
- struct nve0_ibus_priv *priv = (void *)subdev;
+ struct gk104_ibus_priv *priv = (void *)subdev;
u32 intr0 = nv_rd32(priv, 0x120058);
u32 intr1 = nv_rd32(priv, 0x12005c);
u32 hubnr = nv_rd32(priv, 0x120070);
@@ -72,7 +71,7 @@ nve0_ibus_intr(struct nouveau_subdev *subdev)
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
- nve0_ibus_intr_hub(priv, i);
+ gk104_ibus_intr_hub(priv, i);
intr0 &= ~stat;
}
}
@@ -80,7 +79,7 @@ nve0_ibus_intr(struct nouveau_subdev *subdev)
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
- nve0_ibus_intr_rop(priv, i);
+ gk104_ibus_intr_rop(priv, i);
intr0 &= ~stat;
}
}
@@ -88,17 +87,17 @@ nve0_ibus_intr(struct nouveau_subdev *subdev)
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
- nve0_ibus_intr_gpc(priv, i);
+ gk104_ibus_intr_gpc(priv, i);
intr1 &= ~stat;
}
}
}
static int
-nve0_ibus_init(struct nouveau_object *object)
+gk104_ibus_init(struct nvkm_object *object)
{
- struct nve0_ibus_priv *priv = (void *)object;
- int ret = nouveau_ibus_init(&priv->base);
+ struct gk104_ibus_priv *priv = (void *)object;
+ int ret = nvkm_ibus_init(&priv->base);
if (ret == 0) {
nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
@@ -112,29 +111,29 @@ nve0_ibus_init(struct nouveau_object *object)
}
static int
-nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk104_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nve0_ibus_priv *priv;
+ struct gk104_ibus_priv *priv;
int ret;
- ret = nouveau_ibus_create(parent, engine, oclass, &priv);
+ ret = nvkm_ibus_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nv_subdev(priv)->intr = nve0_ibus_intr;
+ nv_subdev(priv)->intr = gk104_ibus_intr;
return 0;
}
-struct nouveau_oclass
-nve0_ibus_oclass = {
+struct nvkm_oclass
+gk104_ibus_oclass = {
.handle = NV_SUBDEV(IBUS, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_ibus_ctor,
- .dtor = _nouveau_ibus_dtor,
- .init = nve0_ibus_init,
- .fini = _nouveau_ibus_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk104_ibus_ctor,
+ .dtor = _nvkm_ibus_dtor,
+ .init = gk104_ibus_init,
+ .fini = _nvkm_ibus_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 245f0ebaa6af..c0fdb89e74ac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -19,12 +19,11 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#include <subdev/ibus.h>
#include <subdev/timer.h>
struct gk20a_ibus_priv {
- struct nouveau_ibus base;
+ struct nvkm_ibus base;
};
static void
@@ -42,7 +41,7 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
}
static void
-gk20a_ibus_intr(struct nouveau_subdev *subdev)
+gk20a_ibus_intr(struct nvkm_subdev *subdev)
{
struct gk20a_ibus_priv *priv = (void *)subdev;
u32 status0 = nv_rd32(priv, 0x120058);
@@ -60,12 +59,12 @@ gk20a_ibus_intr(struct nouveau_subdev *subdev)
}
static int
-gk20a_ibus_init(struct nouveau_object *object)
+gk20a_ibus_init(struct nvkm_object *object)
{
struct gk20a_ibus_priv *priv = (void *)object;
int ret;
- ret = _nouveau_ibus_init(object);
+ ret = _nvkm_ibus_init(object);
if (ret)
return ret;
@@ -75,14 +74,14 @@ gk20a_ibus_init(struct nouveau_object *object)
}
static int
-gk20a_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk20a_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct gk20a_ibus_priv *priv;
int ret;
- ret = nouveau_ibus_create(parent, engine, oclass, &priv);
+ ret = nvkm_ibus_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -91,13 +90,13 @@ gk20a_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gk20a_ibus_oclass = {
.handle = NV_SUBDEV(IBUS, 0xea),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gk20a_ibus_ctor,
- .dtor = _nouveau_ibus_dtor,
+ .dtor = _nvkm_ibus_dtor,
.init = gk20a_ibus_init,
- .fini = _nouveau_ibus_fini,
+ .fini = _nvkm_ibus_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
new file mode 100644
index 000000000000..e6f35abe7879
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -0,0 +1,4 @@
+nvkm-y += nvkm/subdev/instmem/base.o
+nvkm-y += nvkm/subdev/instmem/nv04.o
+nvkm-y += nvkm/subdev/instmem/nv40.o
+nvkm-y += nvkm/subdev/instmem/nv50.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 14706d9842ca..d16358cc6cbb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -21,38 +21,37 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
+#include <core/engine.h>
+
/******************************************************************************
* instmem object base implementation
*****************************************************************************/
void
-_nouveau_instobj_dtor(struct nouveau_object *object)
+_nvkm_instobj_dtor(struct nvkm_object *object)
{
- struct nouveau_instmem *imem = (void *)object->engine;
- struct nouveau_instobj *iobj = (void *)object;
+ struct nvkm_instmem *imem = nvkm_instmem(object);
+ struct nvkm_instobj *iobj = (void *)object;
mutex_lock(&nv_subdev(imem)->mutex);
list_del(&iobj->head);
mutex_unlock(&nv_subdev(imem)->mutex);
- return nouveau_object_destroy(&iobj->base);
+ return nvkm_object_destroy(&iobj->base);
}
int
-nouveau_instobj_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int length, void **pobject)
+nvkm_instobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_instmem *imem = (void *)engine;
- struct nouveau_instobj *iobj;
+ struct nvkm_instmem *imem = nvkm_instmem(parent);
+ struct nvkm_instobj *iobj;
int ret;
- ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
- length, pobject);
+ ret = nvkm_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
+ length, pobject);
iobj = *pobject;
if (ret)
return ret;
@@ -68,27 +67,24 @@ nouveau_instobj_create_(struct nouveau_object *parent,
*****************************************************************************/
static int
-nouveau_instmem_alloc(struct nouveau_instmem *imem,
- struct nouveau_object *parent, u32 size, u32 align,
- struct nouveau_object **pobject)
+nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent,
+ u32 size, u32 align, struct nvkm_object **pobject)
{
- struct nouveau_object *engine = nv_object(imem);
- struct nouveau_instmem_impl *impl = (void *)engine->oclass;
- struct nouveau_instobj_args args = { .size = size, .align = align };
- return nouveau_object_ctor(parent, engine, impl->instobj, &args,
- sizeof(args), pobject);
+ struct nvkm_instmem_impl *impl = (void *)imem->base.object.oclass;
+ struct nvkm_instobj_args args = { .size = size, .align = align };
+ return nvkm_object_ctor(parent, &parent->engine->subdev.object,
+ impl->instobj, &args, sizeof(args), pobject);
}
int
-_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
+_nvkm_instmem_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_instmem *imem = (void *)object;
- struct nouveau_instobj *iobj;
+ struct nvkm_instmem *imem = (void *)object;
+ struct nvkm_instobj *iobj;
int i, ret = 0;
if (suspend) {
mutex_lock(&imem->base.mutex);
-
list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size);
if (!iobj->suspend) {
@@ -99,29 +95,26 @@ _nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
for (i = 0; i < iobj->size; i += 4)
iobj->suspend[i / 4] = nv_ro32(iobj, i);
}
-
mutex_unlock(&imem->base.mutex);
-
if (ret)
return ret;
}
- return nouveau_subdev_fini(&imem->base, suspend);
+ return nvkm_subdev_fini(&imem->base, suspend);
}
int
-_nouveau_instmem_init(struct nouveau_object *object)
+_nvkm_instmem_init(struct nvkm_object *object)
{
- struct nouveau_instmem *imem = (void *)object;
- struct nouveau_instobj *iobj;
+ struct nvkm_instmem *imem = (void *)object;
+ struct nvkm_instobj *iobj;
int ret, i;
- ret = nouveau_subdev_init(&imem->base);
+ ret = nvkm_subdev_init(&imem->base);
if (ret)
return ret;
mutex_lock(&imem->base.mutex);
-
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
@@ -130,28 +123,24 @@ _nouveau_instmem_init(struct nouveau_object *object)
iobj->suspend = NULL;
}
}
-
mutex_unlock(&imem->base.mutex);
-
return 0;
}
int
-nouveau_instmem_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int length, void **pobject)
+nvkm_instmem_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_instmem *imem;
+ struct nvkm_instmem *imem;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0,
- "INSTMEM", "instmem", length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "INSTMEM",
+ "instmem", length, pobject);
imem = *pobject;
if (ret)
return ret;
INIT_LIST_HEAD(&imem->list);
- imem->alloc = nouveau_instmem_alloc;
+ imem->alloc = nvkm_instmem_alloc;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index e8b1401c59c0..80614f1b2074 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -21,56 +21,59 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
+#include <core/ramht.h>
+
/******************************************************************************
* instmem object implementation
*****************************************************************************/
static u32
-nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
+nv04_instobj_rd32(struct nvkm_object *object, u64 addr)
{
+ struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object;
- return nv_ro32(object->engine, node->mem->offset + addr);
+ return nv_ro32(priv, node->mem->offset + addr);
}
static void
-nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
+ struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object;
- nv_wo32(object->engine, node->mem->offset + addr, data);
+ nv_wo32(priv, node->mem->offset + addr, data);
}
static void
-nv04_instobj_dtor(struct nouveau_object *object)
+nv04_instobj_dtor(struct nvkm_object *object)
{
- struct nv04_instmem_priv *priv = (void *)object->engine;
+ struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object;
- nouveau_mm_free(&priv->heap, &node->mem);
- nouveau_instobj_destroy(&node->base);
+ nvkm_mm_free(&priv->heap, &node->mem);
+ nvkm_instobj_destroy(&node->base);
}
static int
-nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv04_instmem_priv *priv = (void *)engine;
+ struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
struct nv04_instobj_priv *node;
- struct nouveau_instobj_args *args = data;
+ struct nvkm_instobj_args *args = data;
int ret;
if (!args->align)
args->align = 1;
- ret = nouveau_instobj_create(parent, engine, oclass, &node);
+ ret = nvkm_instobj_create(parent, engine, oclass, &node);
*pobject = nv_object(node);
if (ret)
return ret;
- ret = nouveau_mm_head(&priv->heap, 0, 1, args->size, args->size,
- args->align, &node->mem);
+ ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
+ args->align, &node->mem);
if (ret)
return ret;
@@ -79,13 +82,13 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_instobj_impl
+struct nvkm_instobj_impl
nv04_instobj_oclass = {
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_instobj_ctor,
.dtor = nv04_instobj_dtor,
- .init = _nouveau_instobj_init,
- .fini = _nouveau_instobj_fini,
+ .init = _nvkm_instobj_init,
+ .fini = _nvkm_instobj_fini,
.rd32 = nv04_instobj_rd32,
.wr32 = nv04_instobj_wr32,
},
@@ -96,40 +99,40 @@ nv04_instobj_oclass = {
*****************************************************************************/
static u32
-nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
+nv04_instmem_rd32(struct nvkm_object *object, u64 addr)
{
return nv_rd32(object, 0x700000 + addr);
}
static void
-nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
return nv_wr32(object, 0x700000 + addr, data);
}
void
-nv04_instmem_dtor(struct nouveau_object *object)
+nv04_instmem_dtor(struct nvkm_object *object)
{
struct nv04_instmem_priv *priv = (void *)object;
- nouveau_gpuobj_ref(NULL, &priv->ramfc);
- nouveau_gpuobj_ref(NULL, &priv->ramro);
- nouveau_ramht_ref(NULL, &priv->ramht);
- nouveau_gpuobj_ref(NULL, &priv->vbios);
- nouveau_mm_fini(&priv->heap);
+ nvkm_gpuobj_ref(NULL, &priv->ramfc);
+ nvkm_gpuobj_ref(NULL, &priv->ramro);
+ nvkm_ramht_ref(NULL, &priv->ramht);
+ nvkm_gpuobj_ref(NULL, &priv->vbios);
+ nvkm_mm_fini(&priv->heap);
if (priv->iomem)
iounmap(priv->iomem);
- nouveau_instmem_destroy(&priv->base);
+ nvkm_instmem_destroy(&priv->base);
}
static int
-nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_instmem_priv *priv;
int ret;
- ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+ ret = nvkm_instmem_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -137,44 +140,44 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
/* PRAMIN aperture maps over the end of VRAM, reserve it */
priv->base.reserved = 512 * 1024;
- ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+ ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
- &priv->vbios);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
+ &priv->vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+ ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
if (ret)
return ret;
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
if (ret)
return ret;
/* 0x18800-0x18a00: reserve for RAMRO */
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
- &priv->ramro);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
+ &priv->ramro);
if (ret)
return ret;
return 0;
}
-struct nouveau_oclass *
-nv04_instmem_oclass = &(struct nouveau_instmem_impl) {
+struct nvkm_oclass *
+nv04_instmem_oclass = &(struct nvkm_instmem_impl) {
.base.handle = NV_SUBDEV(INSTMEM, 0x04),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_instmem_ctor,
.dtor = nv04_instmem_dtor,
- .init = _nouveau_instmem_init,
- .fini = _nouveau_instmem_fini,
+ .init = _nvkm_instmem_init,
+ .fini = _nvkm_instmem_fini,
.rd32 = nv04_instmem_rd32,
.wr32 = nv04_instmem_wr32,
},
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
new file mode 100644
index 000000000000..42b6c928047c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
@@ -0,0 +1,36 @@
+#ifndef __NV04_INSTMEM_H__
+#define __NV04_INSTMEM_H__
+#include "priv.h"
+
+#include <core/mm.h>
+
+extern struct nvkm_instobj_impl nv04_instobj_oclass;
+
+struct nv04_instmem_priv {
+ struct nvkm_instmem base;
+
+ void __iomem *iomem;
+ struct nvkm_mm heap;
+
+ struct nvkm_gpuobj *vbios;
+ struct nvkm_ramht *ramht;
+ struct nvkm_gpuobj *ramro;
+ struct nvkm_gpuobj *ramfc;
+};
+
+static inline struct nv04_instmem_priv *
+nv04_instmem(void *obj)
+{
+ return (void *)nvkm_instmem(obj);
+}
+
+struct nv04_instobj_priv {
+ struct nvkm_instobj base;
+ struct nvkm_mm_node *mem;
+};
+
+void nv04_instmem_dtor(struct nvkm_object *);
+
+int nv04_instmem_alloc(struct nvkm_instmem *, struct nvkm_object *,
+ u32 size, u32 align, struct nvkm_object **pobject);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 8803809f9fc5..b42b8588fc0e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -21,39 +21,39 @@
*
* Authors: Ben Skeggs
*/
-
-#include <engine/graph/nv40.h>
-
#include "nv04.h"
+#include <core/ramht.h>
+#include <engine/gr/nv40.h>
+
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
static u32
-nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
+nv40_instmem_rd32(struct nvkm_object *object, u64 addr)
{
struct nv04_instmem_priv *priv = (void *)object;
return ioread32_native(priv->iomem + addr);
}
static void
-nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nv04_instmem_priv *priv = (void *)object;
iowrite32_native(data, priv->iomem + addr);
}
static int
-nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
+ struct nvkm_device *device = nv_device(parent);
struct nv04_instmem_priv *priv;
int ret, bar, vs;
- ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+ ret = nvkm_instmem_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -73,12 +73,12 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
/* PRAMIN aperture maps over the end of vram, reserve enough space
* to fit graphics contexts for every channel, the magics come
- * from engine/graph/nv40.c
+ * from engine/gr/nv40.c
*/
vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
- else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs;
+ else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs;
else priv->base.reserved = 0x4a40 * vs;
priv->base.reserved += 16 * 1024;
priv->base.reserved *= 32; /* per-channel */
@@ -87,49 +87,48 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reserved = round_up(priv->base.reserved, 4096);
- ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+ ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
- &priv->vbios);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
+ &priv->vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
- &priv->ramht);
+ ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
if (ret)
return ret;
/* 0x18000-0x18200: reserve for RAMRO
* 0x18200-0x20000: padding
*/
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
- &priv->ramro);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
+ &priv->ramro);
if (ret)
return ret;
/* 0x20000-0x21000: reserve for RAMFC
* 0x21000-0x40000: padding and some unknown crap
*/
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
if (ret)
return ret;
return 0;
}
-struct nouveau_oclass *
-nv40_instmem_oclass = &(struct nouveau_instmem_impl) {
+struct nvkm_oclass *
+nv40_instmem_oclass = &(struct nvkm_instmem_impl) {
.base.handle = NV_SUBDEV(INSTMEM, 0x40),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv40_instmem_ctor,
.dtor = nv04_instmem_dtor,
- .init = _nouveau_instmem_init,
- .fini = _nouveau_instmem_fini,
+ .init = _nvkm_instmem_init,
+ .fini = _nvkm_instmem_fini,
.rd32 = nv40_instmem_rd32,
.wr32 = nv40_instmem_wr32,
},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 7cb3b098a08d..8404143f93ee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -21,21 +21,19 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/fb.h>
-#include <core/mm.h>
-
-#include "priv.h"
struct nv50_instmem_priv {
- struct nouveau_instmem base;
+ struct nvkm_instmem base;
spinlock_t lock;
u64 addr;
};
struct nv50_instobj_priv {
- struct nouveau_instobj base;
- struct nouveau_mem *mem;
+ struct nvkm_instobj base;
+ struct nvkm_mem *mem;
};
/******************************************************************************
@@ -43,9 +41,9 @@ struct nv50_instobj_priv {
*****************************************************************************/
static u32
-nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
+nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
{
- struct nv50_instmem_priv *priv = (void *)object->engine;
+ struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
struct nv50_instobj_priv *node = (void *)object;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
@@ -63,9 +61,9 @@ nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
}
static void
-nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
+nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
{
- struct nv50_instmem_priv *priv = (void *)object->engine;
+ struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
struct nv50_instobj_priv *node = (void *)object;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
@@ -81,28 +79,28 @@ nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
}
static void
-nv50_instobj_dtor(struct nouveau_object *object)
+nv50_instobj_dtor(struct nvkm_object *object)
{
struct nv50_instobj_priv *node = (void *)object;
- struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvkm_fb *pfb = nvkm_fb(object);
pfb->ram->put(pfb, &node->mem);
- nouveau_instobj_destroy(&node->base);
+ nvkm_instobj_destroy(&node->base);
}
static int
-nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_instobj_args *args = data;
+ struct nvkm_fb *pfb = nvkm_fb(parent);
+ struct nvkm_instobj_args *args = data;
struct nv50_instobj_priv *node;
int ret;
args->size = max((args->size + 4095) & ~4095, (u32)4096);
args->align = max((args->align + 4095) & ~4095, (u32)4096);
- ret = nouveau_instobj_create(parent, engine, oclass, &node);
+ ret = nvkm_instobj_create(parent, engine, oclass, &node);
*pobject = nv_object(node);
if (ret)
return ret;
@@ -117,13 +115,13 @@ nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static struct nouveau_instobj_impl
+static struct nvkm_instobj_impl
nv50_instobj_oclass = {
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_instobj_ctor,
.dtor = nv50_instobj_dtor,
- .init = _nouveau_instobj_init,
- .fini = _nouveau_instobj_fini,
+ .init = _nvkm_instobj_init,
+ .fini = _nvkm_instobj_fini,
.rd32 = nv50_instobj_rd32,
.wr32 = nv50_instobj_wr32,
},
@@ -134,22 +132,22 @@ nv50_instobj_oclass = {
*****************************************************************************/
static int
-nv50_instmem_fini(struct nouveau_object *object, bool suspend)
+nv50_instmem_fini(struct nvkm_object *object, bool suspend)
{
struct nv50_instmem_priv *priv = (void *)object;
priv->addr = ~0ULL;
- return nouveau_instmem_fini(&priv->base, suspend);
+ return nvkm_instmem_fini(&priv->base, suspend);
}
static int
-nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_instmem_priv *priv;
int ret;
- ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+ ret = nvkm_instmem_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -158,13 +156,13 @@ nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
-nv50_instmem_oclass = &(struct nouveau_instmem_impl) {
+struct nvkm_oclass *
+nv50_instmem_oclass = &(struct nvkm_instmem_impl) {
.base.handle = NV_SUBDEV(INSTMEM, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_instmem_ctor,
- .dtor = _nouveau_instmem_dtor,
- .init = _nouveau_instmem_init,
+ .dtor = _nvkm_instmem_dtor,
+ .init = _nvkm_instmem_init,
.fini = nv50_instmem_fini,
},
.instobj = &nv50_instobj_oclass.base,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
new file mode 100644
index 000000000000..b10e292e5607
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -0,0 +1,54 @@
+#ifndef __NVKM_INSTMEM_PRIV_H__
+#define __NVKM_INSTMEM_PRIV_H__
+#include <subdev/instmem.h>
+
+struct nvkm_instobj_impl {
+ struct nvkm_oclass base;
+};
+
+struct nvkm_instobj_args {
+ u32 size;
+ u32 align;
+};
+
+#define nvkm_instobj_create(p,e,o,d) \
+ nvkm_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_instobj_destroy(p) ({ \
+ struct nvkm_instobj *iobj = (p); \
+ _nvkm_instobj_dtor(nv_object(iobj)); \
+})
+#define nvkm_instobj_init(p) \
+ nvkm_object_init(&(p)->base)
+#define nvkm_instobj_fini(p,s) \
+ nvkm_object_fini(&(p)->base, (s))
+
+int nvkm_instobj_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void _nvkm_instobj_dtor(struct nvkm_object *);
+#define _nvkm_instobj_init nvkm_object_init
+#define _nvkm_instobj_fini nvkm_object_fini
+
+struct nvkm_instmem_impl {
+ struct nvkm_oclass base;
+ struct nvkm_oclass *instobj;
+};
+
+#define nvkm_instmem_create(p,e,o,d) \
+ nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_instmem_destroy(p) \
+ nvkm_subdev_destroy(&(p)->base)
+#define nvkm_instmem_init(p) ({ \
+ struct nvkm_instmem *imem = (p); \
+ _nvkm_instmem_init(nv_object(imem)); \
+})
+#define nvkm_instmem_fini(p,s) ({ \
+ struct nvkm_instmem *imem = (p); \
+ _nvkm_instmem_fini(nv_object(imem), (s)); \
+})
+
+int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+#define _nvkm_instmem_dtor _nvkm_subdev_dtor
+int _nvkm_instmem_init(struct nvkm_object *);
+int _nvkm_instmem_fini(struct nvkm_object *, bool);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
new file mode 100644
index 000000000000..e5df3d865f0c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -0,0 +1,4 @@
+nvkm-y += nvkm/subdev/ltc/base.o
+nvkm-y += nvkm/subdev/ltc/gf100.o
+nvkm-y += nvkm/subdev/ltc/gk104.o
+nvkm-y += nvkm/subdev/ltc/gm107.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 7fa331516f84..2fb87fbfd11c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -21,17 +21,15 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include "priv.h"
static int
-nvkm_ltc_tags_alloc(struct nouveau_ltc *ltc, u32 n,
- struct nouveau_mm_node **pnode)
+nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
{
struct nvkm_ltc_priv *priv = (void *)ltc;
int ret;
- ret = nouveau_mm_head(&priv->tags, 0, 1, n, n, 1, pnode);
+ ret = nvkm_mm_head(&priv->tags, 0, 1, n, n, 1, pnode);
if (ret)
*pnode = NULL;
@@ -39,14 +37,14 @@ nvkm_ltc_tags_alloc(struct nouveau_ltc *ltc, u32 n,
}
static void
-nvkm_ltc_tags_free(struct nouveau_ltc *ltc, struct nouveau_mm_node **pnode)
+nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
{
struct nvkm_ltc_priv *priv = (void *)ltc;
- nouveau_mm_free(&priv->tags, pnode);
+ nvkm_mm_free(&priv->tags, pnode);
}
static void
-nvkm_ltc_tags_clear(struct nouveau_ltc *ltc, u32 first, u32 count)
+nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
{
const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
struct nvkm_ltc_priv *priv = (void *)ltc;
@@ -59,7 +57,7 @@ nvkm_ltc_tags_clear(struct nouveau_ltc *ltc, u32 first, u32 count)
}
static int
-nvkm_ltc_zbc_color_get(struct nouveau_ltc *ltc, int index, const u32 color[4])
+nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4])
{
const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
struct nvkm_ltc_priv *priv = (void *)ltc;
@@ -69,7 +67,7 @@ nvkm_ltc_zbc_color_get(struct nouveau_ltc *ltc, int index, const u32 color[4])
}
static int
-nvkm_ltc_zbc_depth_get(struct nouveau_ltc *ltc, int index, const u32 depth)
+nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
{
const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
struct nvkm_ltc_priv *priv = (void *)ltc;
@@ -79,13 +77,13 @@ nvkm_ltc_zbc_depth_get(struct nouveau_ltc *ltc, int index, const u32 depth)
}
int
-_nvkm_ltc_init(struct nouveau_object *object)
+_nvkm_ltc_init(struct nvkm_object *object)
{
const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object);
struct nvkm_ltc_priv *priv = (void *)object;
int ret, i;
- ret = nouveau_subdev_init(&priv->base.base);
+ ret = nvkm_subdev_init(&priv->base.base);
if (ret)
return ret;
@@ -98,15 +96,15 @@ _nvkm_ltc_init(struct nouveau_object *object)
}
int
-nvkm_ltc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+nvkm_ltc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
const struct nvkm_ltc_impl *impl = (void *)oclass;
struct nvkm_ltc_priv *priv;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PLTCG",
- "l2c", length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PLTCG",
+ "l2c", length, pobject);
priv = *pobject;
if (ret)
return ret;
@@ -119,7 +117,7 @@ nvkm_ltc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.tags_free = nvkm_ltc_tags_free;
priv->base.tags_clear = nvkm_ltc_tags_clear;
priv->base.zbc_min = 1; /* reserve 0 for disabled */
- priv->base.zbc_max = min(impl->zbc, NOUVEAU_LTC_MAX_ZBC_CNT) - 1;
+ priv->base.zbc_max = min(impl->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 2db0977284f8..8e7cc6200d60 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -21,12 +21,12 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
+#include <core/enum.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
-#include "priv.h"
-
void
gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
{
@@ -62,7 +62,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
nv_wr32(priv, 0x17ea58, depth);
}
-static const struct nouveau_bitfield
+static const struct nvkm_bitfield
gf100_ltc_lts_intr_name[] = {
{ 0x00000001, "IDLE_ERROR_IQ" },
{ 0x00000002, "IDLE_ERROR_CBC" },
@@ -89,7 +89,7 @@ gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts)
if (stat) {
nv_info(priv, "LTC%d_LTS%d:", ltc, lts);
- nouveau_bitfield_print(gf100_ltc_lts_intr_name, stat);
+ nvkm_bitfield_print(gf100_ltc_lts_intr_name, stat);
pr_cont("\n");
}
@@ -97,7 +97,7 @@ gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts)
}
void
-gf100_ltc_intr(struct nouveau_subdev *subdev)
+gf100_ltc_intr(struct nvkm_subdev *subdev)
{
struct nvkm_ltc_priv *priv = (void *)subdev;
u32 mask;
@@ -112,7 +112,7 @@ gf100_ltc_intr(struct nouveau_subdev *subdev)
}
static int
-gf100_ltc_init(struct nouveau_object *object)
+gf100_ltc_init(struct nvkm_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
@@ -130,13 +130,13 @@ gf100_ltc_init(struct nouveau_object *object)
}
void
-gf100_ltc_dtor(struct nouveau_object *object)
+gf100_ltc_dtor(struct nvkm_object *object)
{
- struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvkm_fb *pfb = nvkm_fb(object);
struct nvkm_ltc_priv *priv = (void *)object;
- nouveau_mm_fini(&priv->tags);
- nouveau_mm_free(&pfb->vram, &priv->tag_ram);
+ nvkm_mm_fini(&priv->tags);
+ nvkm_mm_free(&pfb->vram, &priv->tag_ram);
nvkm_ltc_destroy(priv);
}
@@ -144,7 +144,7 @@ gf100_ltc_dtor(struct nouveau_object *object)
/* TODO: Figure out tag memory details and drop the over-cautious allocation.
*/
int
-gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv)
+gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
{
u32 tag_size, tag_margin, tag_align;
int ret;
@@ -170,8 +170,8 @@ gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv)
tag_size += tag_align;
tag_size = (tag_size + 0xfff) >> 12; /* round up */
- ret = nouveau_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
- &priv->tag_ram);
+ ret = nvkm_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
+ &priv->tag_ram);
if (ret) {
priv->num_tags = 0;
} else {
@@ -183,16 +183,16 @@ gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv)
priv->tag_base = tag_base;
}
- ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
+ ret = nvkm_mm_init(&priv->tags, 0, priv->num_tags, 1);
return ret;
}
int
-gf100_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_ltc_priv *priv;
u32 parts, mask;
int ret, i;
@@ -218,10 +218,10 @@ gf100_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
gf100_ltc_oclass = &(struct nvkm_ltc_impl) {
.base.handle = NV_SUBDEV(LTC, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_ltc_ctor,
.dtor = gf100_ltc_dtor,
.init = gf100_ltc_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index b39b5d0eb8f9..d53959b5ec67 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -21,11 +21,10 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
static int
-gk104_ltc_init(struct nouveau_object *object)
+gk104_ltc_init(struct nvkm_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
@@ -42,10 +41,10 @@ gk104_ltc_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
gk104_ltc_oclass = &(struct nvkm_ltc_impl) {
.base.handle = NV_SUBDEV(LTC, 0xe4),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_ltc_ctor,
.dtor = gf100_ltc_dtor,
.init = gk104_ltc_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 89fc4238f50c..6b3f6f4ce107 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -21,12 +21,11 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
#include <subdev/fb.h>
#include <subdev/timer.h>
-#include "priv.h"
-
static void
gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
{
@@ -75,7 +74,7 @@ gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
}
static void
-gm107_ltc_intr(struct nouveau_subdev *subdev)
+gm107_ltc_intr(struct nvkm_subdev *subdev)
{
struct nvkm_ltc_priv *priv = (void *)subdev;
u32 mask;
@@ -90,7 +89,7 @@ gm107_ltc_intr(struct nouveau_subdev *subdev)
}
static int
-gm107_ltc_init(struct nouveau_object *object)
+gm107_ltc_init(struct nvkm_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
@@ -107,11 +106,11 @@ gm107_ltc_init(struct nouveau_object *object)
}
static int
-gm107_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_ltc_priv *priv;
u32 parts, mask;
int ret, i;
@@ -136,10 +135,10 @@ gm107_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
+struct nvkm_oclass *
gm107_ltc_oclass = &(struct nvkm_ltc_impl) {
.base.handle = NV_SUBDEV(LTC, 0xff),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gm107_ltc_ctor,
.dtor = gf100_ltc_dtor,
.init = gm107_ltc_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 41f179d93da6..09537d7b6783 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,23 +1,22 @@
#ifndef __NVKM_LTC_PRIV_H__
#define __NVKM_LTC_PRIV_H__
-
#include <subdev/ltc.h>
-#include <subdev/fb.h>
-#include <core/enum.h>
+#include <core/mm.h>
+struct nvkm_fb;
struct nvkm_ltc_priv {
- struct nouveau_ltc base;
+ struct nvkm_ltc base;
u32 ltc_nr;
u32 lts_nr;
u32 num_tags;
u32 tag_base;
- struct nouveau_mm tags;
- struct nouveau_mm_node *tag_ram;
+ struct nvkm_mm tags;
+ struct nvkm_mm_node *tag_ram;
- u32 zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT][4];
- u32 zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT];
+ u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
+ u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
};
#define nvkm_ltc_create(p,e,o,d) \
@@ -35,24 +34,24 @@ struct nvkm_ltc_priv {
_nvkm_ltc_fini(nv_object(_priv), (s)); \
})
-int nvkm_ltc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
+int nvkm_ltc_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
-#define _nvkm_ltc_dtor _nouveau_subdev_dtor
-int _nvkm_ltc_init(struct nouveau_object *);
-#define _nvkm_ltc_fini _nouveau_subdev_fini
+#define _nvkm_ltc_dtor _nvkm_subdev_dtor
+int _nvkm_ltc_init(struct nvkm_object *);
+#define _nvkm_ltc_fini _nvkm_subdev_fini
-int gf100_ltc_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void gf100_ltc_dtor(struct nouveau_object *);
-int gf100_ltc_init_tag_ram(struct nouveau_fb *, struct nvkm_ltc_priv *);
-int gf100_ltc_tags_alloc(struct nouveau_ltc *, u32, struct nouveau_mm_node **);
-void gf100_ltc_tags_free(struct nouveau_ltc *, struct nouveau_mm_node **);
+int gf100_ltc_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void gf100_ltc_dtor(struct nvkm_object *);
+int gf100_ltc_init_tag_ram(struct nvkm_fb *, struct nvkm_ltc_priv *);
+int gf100_ltc_tags_alloc(struct nvkm_ltc *, u32, struct nvkm_mm_node **);
+void gf100_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
struct nvkm_ltc_impl {
- struct nouveau_oclass base;
- void (*intr)(struct nouveau_subdev *);
+ struct nvkm_oclass base;
+ void (*intr)(struct nvkm_subdev *);
void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
void (*cbc_wait)(struct nvkm_ltc_priv *);
@@ -62,10 +61,9 @@ struct nvkm_ltc_impl {
void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32);
};
-void gf100_ltc_intr(struct nouveau_subdev *);
+void gf100_ltc_intr(struct nvkm_subdev *);
void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32);
void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *);
void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]);
void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
new file mode 100644
index 000000000000..721643f04bb5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -0,0 +1,11 @@
+nvkm-y += nvkm/subdev/mc/base.o
+nvkm-y += nvkm/subdev/mc/nv04.o
+nvkm-y += nvkm/subdev/mc/nv40.o
+nvkm-y += nvkm/subdev/mc/nv44.o
+nvkm-y += nvkm/subdev/mc/nv4c.o
+nvkm-y += nvkm/subdev/mc/nv50.o
+nvkm-y += nvkm/subdev/mc/g94.o
+nvkm-y += nvkm/subdev/mc/g98.o
+nvkm-y += nvkm/subdev/mc/gf100.o
+nvkm-y += nvkm/subdev/mc/gf106.o
+nvkm-y += nvkm/subdev/mc/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index ca7cee3a314a..5b051a26653e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -21,20 +21,21 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
+
+#include <core/device.h>
#include <core/option.h>
static inline void
-nouveau_mc_unk260(struct nouveau_mc *pmc, u32 data)
+nvkm_mc_unk260(struct nvkm_mc *pmc, u32 data)
{
- const struct nouveau_mc_oclass *impl = (void *)nv_oclass(pmc);
+ const struct nvkm_mc_oclass *impl = (void *)nv_oclass(pmc);
if (impl->unk260)
impl->unk260(pmc, data);
}
static inline u32
-nouveau_mc_intr_mask(struct nouveau_mc *pmc)
+nvkm_mc_intr_mask(struct nvkm_mc *pmc)
{
u32 intr = nv_rd32(pmc, 0x000100);
if (intr == 0xffffffff) /* likely fallen off the bus */
@@ -43,25 +44,25 @@ nouveau_mc_intr_mask(struct nouveau_mc *pmc)
}
static irqreturn_t
-nouveau_mc_intr(int irq, void *arg)
+nvkm_mc_intr(int irq, void *arg)
{
- struct nouveau_mc *pmc = arg;
- const struct nouveau_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
- const struct nouveau_mc_intr *map = oclass->intr;
- struct nouveau_subdev *unit;
+ struct nvkm_mc *pmc = arg;
+ const struct nvkm_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
+ const struct nvkm_mc_intr *map = oclass->intr;
+ struct nvkm_subdev *unit;
u32 intr;
nv_wr32(pmc, 0x000140, 0x00000000);
nv_rd32(pmc, 0x000140);
- intr = nouveau_mc_intr_mask(pmc);
+ intr = nvkm_mc_intr_mask(pmc);
if (pmc->use_msi)
oclass->msi_rearm(pmc);
if (intr) {
- u32 stat = intr = nouveau_mc_intr_mask(pmc);
+ u32 stat = intr = nvkm_mc_intr_mask(pmc);
while (map->stat) {
if (intr & map->stat) {
- unit = nouveau_subdev(pmc, map->unit);
+ unit = nvkm_subdev(pmc, map->unit);
if (unit && unit->intr)
unit->intr(unit);
stat &= ~map->stat;
@@ -78,18 +79,18 @@ nouveau_mc_intr(int irq, void *arg)
}
int
-_nouveau_mc_fini(struct nouveau_object *object, bool suspend)
+_nvkm_mc_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_mc *pmc = (void *)object;
+ struct nvkm_mc *pmc = (void *)object;
nv_wr32(pmc, 0x000140, 0x00000000);
- return nouveau_subdev_fini(&pmc->base, suspend);
+ return nvkm_subdev_fini(&pmc->base, suspend);
}
int
-_nouveau_mc_init(struct nouveau_object *object)
+_nvkm_mc_init(struct nvkm_object *object)
{
- struct nouveau_mc *pmc = (void *)object;
- int ret = nouveau_subdev_init(&pmc->base);
+ struct nvkm_mc *pmc = (void *)object;
+ int ret = nvkm_subdev_init(&pmc->base);
if (ret)
return ret;
nv_wr32(pmc, 0x000140, 0x00000001);
@@ -97,34 +98,34 @@ _nouveau_mc_init(struct nouveau_object *object)
}
void
-_nouveau_mc_dtor(struct nouveau_object *object)
+_nvkm_mc_dtor(struct nvkm_object *object)
{
- struct nouveau_device *device = nv_device(object);
- struct nouveau_mc *pmc = (void *)object;
+ struct nvkm_device *device = nv_device(object);
+ struct nvkm_mc *pmc = (void *)object;
free_irq(pmc->irq, pmc);
if (pmc->use_msi)
pci_disable_msi(device->pdev);
- nouveau_subdev_destroy(&pmc->base);
+ nvkm_subdev_destroy(&pmc->base);
}
int
-nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *bclass, int length, void **pobject)
+nvkm_mc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *bclass, int length, void **pobject)
{
- const struct nouveau_mc_oclass *oclass = (void *)bclass;
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_mc *pmc;
+ const struct nvkm_mc_oclass *oclass = (void *)bclass;
+ struct nvkm_device *device = nv_device(parent);
+ struct nvkm_mc *pmc;
int ret;
- ret = nouveau_subdev_create_(parent, engine, bclass, 0, "PMC",
- "master", length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, bclass, 0, "PMC",
+ "master", length, pobject);
pmc = *pobject;
if (ret)
return ret;
- pmc->unk260 = nouveau_mc_unk260;
+ pmc->unk260 = nvkm_mc_unk260;
- if (nv_device_is_pci(device))
+ if (nv_device_is_pci(device)) {
switch (device->pdev->device & 0x0ff0) {
case 0x00f0:
case 0x02e0:
@@ -138,10 +139,11 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
default:
pmc->use_msi = true;
break;
+ }
}
- pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI",
- pmc->use_msi);
+ pmc->use_msi = nvkm_boolopt(device->cfgopt, "NvMSI",
+ pmc->use_msi);
if (pmc->use_msi && oclass->msi_rearm) {
pmc->use_msi = pci_enable_msi(device->pdev) == 0;
@@ -159,9 +161,7 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
pmc->irq = ret;
- ret = request_irq(pmc->irq, nouveau_mc_intr, IRQF_SHARED, "nouveau",
- pmc);
-
+ ret = request_irq(pmc->irq, nvkm_mc_intr, IRQF_SHARED, "nvkm", pmc);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
index 5f4541105e73..f042e7d8321d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
@@ -21,17 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-struct nouveau_oclass *
-nv94_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+g94_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x94),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv50_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
.intr = nv50_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 3c76d9038f38..8ab7f1272a14 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -21,39 +21,38 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-static const struct nouveau_mc_intr
-nv98_mc_intr[] = {
+static const struct nvkm_mc_intr
+g98_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */
- { 0x00000001, NVDEV_ENGINE_PPP },
+ { 0x00000001, NVDEV_ENGINE_MSPPP },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
- { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
- { 0x00008000, NVDEV_ENGINE_BSP },
- { 0x00020000, NVDEV_ENGINE_VP },
- { 0x00040000, NVDEV_SUBDEV_PWR }, /* NVA3:NVC0 */
+ { 0x00004000, NVDEV_ENGINE_SEC }, /* NV84:NVA3 */
+ { 0x00008000, NVDEV_ENGINE_MSVLD },
+ { 0x00020000, NVDEV_ENGINE_MSPDEC },
+ { 0x00040000, NVDEV_SUBDEV_PMU }, /* NVA3:NVC0 */
{ 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
{ 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
- { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
+ { 0x00400000, NVDEV_ENGINE_CE0 }, /* NVA3- */
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0042d101, NVDEV_SUBDEV_FB },
{},
};
-struct nouveau_oclass *
-nv98_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+g98_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x98),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv50_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
- .intr = nv98_mc_intr,
+ .intr = g98_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 15d41dc176ff..2425984b045e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -21,26 +21,25 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-const struct nouveau_mc_intr
-nvc0_mc_intr[] = {
+const struct nvkm_mc_intr
+gf100_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */
- { 0x00000001, NVDEV_ENGINE_PPP },
- { 0x00000020, NVDEV_ENGINE_COPY0 },
- { 0x00000040, NVDEV_ENGINE_COPY1 },
- { 0x00000080, NVDEV_ENGINE_COPY2 },
+ { 0x00000001, NVDEV_ENGINE_MSPPP },
+ { 0x00000020, NVDEV_ENGINE_CE0 },
+ { 0x00000040, NVDEV_ENGINE_CE1 },
+ { 0x00000080, NVDEV_ENGINE_CE2 },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00002000, NVDEV_SUBDEV_FB },
- { 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00008000, NVDEV_ENGINE_MSVLD },
{ 0x00040000, NVDEV_SUBDEV_THERM },
- { 0x00020000, NVDEV_ENGINE_VP },
+ { 0x00020000, NVDEV_ENGINE_MSPDEC },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
{ 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
- { 0x01000000, NVDEV_SUBDEV_PWR },
+ { 0x01000000, NVDEV_SUBDEV_PMU },
{ 0x02000000, NVDEV_SUBDEV_LTC },
{ 0x08000000, NVDEV_SUBDEV_FB },
{ 0x10000000, NVDEV_SUBDEV_BUS },
@@ -50,28 +49,28 @@ nvc0_mc_intr[] = {
};
static void
-nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
+gf100_mc_msi_rearm(struct nvkm_mc *pmc)
{
struct nv04_mc_priv *priv = (void *)pmc;
nv_wr32(priv, 0x088704, 0x00000000);
}
void
-nvc0_mc_unk260(struct nouveau_mc *pmc, u32 data)
+gf100_mc_unk260(struct nvkm_mc *pmc, u32 data)
{
nv_wr32(pmc, 0x000260, data);
}
-struct nouveau_oclass *
-nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+gf100_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv50_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
- .intr = nvc0_mc_intr,
- .msi_rearm = nvc0_mc_msi_rearm,
- .unk260 = nvc0_mc_unk260,
+ .intr = gf100_mc_intr,
+ .msi_rearm = gf100_mc_msi_rearm,
+ .unk260 = gf100_mc_unk260,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
index 68b5f61aadb5..8d2a8f457778 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
@@ -21,19 +21,18 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-struct nouveau_oclass *
-nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+gf106_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0xc3),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv50_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
- .intr = nvc0_mc_intr,
+ .intr = gf100_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
- .unk260 = nvc0_mc_unk260,
+ .unk260 = gf100_mc_unk260,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index b8d6cb435d0a..43b27742956d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -21,18 +21,17 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-struct nouveau_oclass *
-gk20a_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+gk20a_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0xea),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv50_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
- .intr = nvc0_mc_intr,
+ .intr = gf100_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 2d787e4dfefa..32713827b4dc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -21,10 +21,9 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-const struct nouveau_mc_intr
+const struct nvkm_mc_intr
nv04_mc_intr[] = {
{ 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */
{ 0x00000100, NVDEV_ENGINE_FIFO },
@@ -40,25 +39,25 @@ nv04_mc_intr[] = {
};
int
-nv04_mc_init(struct nouveau_object *object)
+nv04_mc_init(struct nvkm_object *object)
{
struct nv04_mc_priv *priv = (void *)object;
nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
- return nouveau_mc_init(&priv->base);
+ return nvkm_mc_init(&priv->base);
}
int
-nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_mc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nvkm_mc_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -66,14 +65,14 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass *
-nv04_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+nv04_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x04),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv04_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
.intr = nv04_mc_intr,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
new file mode 100644
index 000000000000..411de3d08ab6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
@@ -0,0 +1,20 @@
+#ifndef __NVKM_MC_NV04_H__
+#define __NVKM_MC_NV04_H__
+#include "priv.h"
+
+struct nv04_mc_priv {
+ struct nvkm_mc base;
+};
+
+int nv04_mc_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+
+extern const struct nvkm_mc_intr nv04_mc_intr[];
+int nv04_mc_init(struct nvkm_object *);
+void nv40_mc_msi_rearm(struct nvkm_mc *);
+int nv44_mc_init(struct nvkm_object *object);
+int nv50_mc_init(struct nvkm_object *);
+extern const struct nvkm_mc_intr nv50_mc_intr[];
+extern const struct nvkm_mc_intr gf100_mc_intr[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
index 5b1faecfed2d..b7613059da08 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
@@ -21,24 +21,23 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
void
-nv40_mc_msi_rearm(struct nouveau_mc *pmc)
+nv40_mc_msi_rearm(struct nvkm_mc *pmc)
{
struct nv04_mc_priv *priv = (void *)pmc;
nv_wr08(priv, 0x088068, 0xff);
}
-struct nouveau_oclass *
-nv40_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+nv40_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x40),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv04_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
.intr = nv04_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index cc4d0d2d886e..2c7f7c701a2b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -21,11 +21,10 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
int
-nv44_mc_init(struct nouveau_object *object)
+nv44_mc_init(struct nvkm_object *object)
{
struct nv04_mc_priv *priv = (void *)object;
u32 tmp = nv_rd32(priv, 0x10020c);
@@ -37,17 +36,17 @@ nv44_mc_init(struct nouveau_object *object)
nv_wr32(priv, 0x001708, 0);
nv_wr32(priv, 0x00170c, tmp);
- return nouveau_mc_init(&priv->base);
+ return nvkm_mc_init(&priv->base);
}
-struct nouveau_oclass *
-nv44_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+nv44_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x44),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv44_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
.intr = nv04_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
index 165401c4045c..c0aac7e20d45 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
@@ -21,17 +21,16 @@
*
* Authors: Ilia Mirkin
*/
-
#include "nv04.h"
-struct nouveau_oclass *
-nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+nv4c_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x4c),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv44_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
.intr = nv04_mc_intr,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 9ca93e2718f7..40e3019e1fde 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -21,16 +21,17 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
-const struct nouveau_mc_intr
+#include <core/device.h>
+
+const struct nvkm_mc_intr
nv50_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
{ 0x00000001, NVDEV_ENGINE_MPEG },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
- { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84- */
+ { 0x00004000, NVDEV_ENGINE_CIPHER }, /* NV84- */
{ 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */
{ 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
@@ -43,28 +44,28 @@ nv50_mc_intr[] = {
};
static void
-nv50_mc_msi_rearm(struct nouveau_mc *pmc)
+nv50_mc_msi_rearm(struct nvkm_mc *pmc)
{
- struct nouveau_device *device = nv_device(pmc);
+ struct nvkm_device *device = nv_device(pmc);
pci_write_config_byte(device->pdev, 0x68, 0xff);
}
int
-nv50_mc_init(struct nouveau_object *object)
+nv50_mc_init(struct nvkm_object *object)
{
struct nv04_mc_priv *priv = (void *)object;
nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
- return nouveau_mc_init(&priv->base);
+ return nvkm_mc_init(&priv->base);
}
-struct nouveau_oclass *
-nv50_mc_oclass = &(struct nouveau_mc_oclass) {
+struct nvkm_oclass *
+nv50_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x50),
- .base.ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
- .dtor = _nouveau_mc_dtor,
+ .dtor = _nvkm_mc_dtor,
.init = nv50_mc_init,
- .fini = _nouveau_mc_fini,
+ .fini = _nvkm_mc_fini,
},
.intr = nv50_mc_intr,
.msi_rearm = nv50_mc_msi_rearm,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
new file mode 100644
index 000000000000..d2cad07afd1a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -0,0 +1,36 @@
+#ifndef __NVKM_MC_PRIV_H__
+#define __NVKM_MC_PRIV_H__
+#include <subdev/mc.h>
+
+#define nvkm_mc_create(p,e,o,d) \
+ nvkm_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_mc_destroy(p) ({ \
+ struct nvkm_mc *pmc = (p); _nvkm_mc_dtor(nv_object(pmc)); \
+})
+#define nvkm_mc_init(p) ({ \
+ struct nvkm_mc *pmc = (p); _nvkm_mc_init(nv_object(pmc)); \
+})
+#define nvkm_mc_fini(p,s) ({ \
+ struct nvkm_mc *pmc = (p); _nvkm_mc_fini(nv_object(pmc), (s)); \
+})
+
+int nvkm_mc_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+void _nvkm_mc_dtor(struct nvkm_object *);
+int _nvkm_mc_init(struct nvkm_object *);
+int _nvkm_mc_fini(struct nvkm_object *, bool);
+
+struct nvkm_mc_intr {
+ u32 stat;
+ u32 unit;
+};
+
+struct nvkm_mc_oclass {
+ struct nvkm_oclass base;
+ const struct nvkm_mc_intr *intr;
+ void (*msi_rearm)(struct nvkm_mc *);
+ void (*unk260)(struct nvkm_mc *, u32);
+};
+
+void gf100_mc_unk260(struct nvkm_mc *, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
new file mode 100644
index 000000000000..012c9db687b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -0,0 +1,6 @@
+nvkm-y += nvkm/subdev/mmu/base.o
+nvkm-y += nvkm/subdev/mmu/nv04.o
+nvkm-y += nvkm/subdev/mmu/nv41.o
+nvkm-y += nvkm/subdev/mmu/nv44.o
+nvkm-y += nvkm/subdev/mmu/nv50.o
+nvkm-y += nvkm/subdev/mmu/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
new file mode 100644
index 000000000000..277b6ec04e24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include <subdev/mmu.h>
+#include <subdev/fb.h>
+
+#include <core/gpuobj.h>
+
+void
+nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
+{
+ struct nvkm_vm *vm = vma->vm;
+ struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_mm_node *r;
+ int big = vma->node->type != mmu->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (mmu->pgt_bits - bits);
+ u32 end, len;
+
+ delta = 0;
+ list_for_each_entry(r, &node->regions, rl_entry) {
+ u64 phys = (u64)r->offset << 12;
+ u32 num = r->length >> bits;
+
+ while (num) {
+ struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ mmu->map(vma, pgt, node, pte, len, phys, delta);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ phys += len << (bits + 12);
+ pde++;
+ pte = 0;
+ }
+
+ delta += (u64)len << vma->node->type;
+ }
+ }
+
+ mmu->flush(vm);
+}
+
+static void
+nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
+ struct nvkm_mem *mem)
+{
+ struct nvkm_vm *vm = vma->vm;
+ struct nvkm_mmu *mmu = vm->mmu;
+ int big = vma->node->type != mmu->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (mmu->pgt_bits - bits);
+ unsigned m, sglen;
+ u32 end, len;
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
+ struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ sglen = sg_dma_len(sg) >> PAGE_SHIFT;
+
+ end = pte + sglen;
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+
+ if (num == 0)
+ goto finish;
+ }
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ if (m < sglen) {
+ for (; m < sglen; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+ if (num == 0)
+ goto finish;
+ }
+ }
+
+ }
+finish:
+ mmu->flush(vm);
+}
+
+static void
+nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
+ struct nvkm_mem *mem)
+{
+ struct nvkm_vm *vm = vma->vm;
+ struct nvkm_mmu *mmu = vm->mmu;
+ dma_addr_t *list = mem->pages;
+ int big = vma->node->type != mmu->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (mmu->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ mmu->map_sg(vma, pgt, mem, pte, len, list);
+
+ num -= len;
+ pte += len;
+ list += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ mmu->flush(vm);
+}
+
+void
+nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
+{
+ if (node->sg)
+ nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
+ else
+ if (node->pages)
+ nvkm_vm_map_sg(vma, 0, node->size << 12, node);
+ else
+ nvkm_vm_map_at(vma, 0, node);
+}
+
+void
+nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
+{
+ struct nvkm_vm *vm = vma->vm;
+ struct nvkm_mmu *mmu = vm->mmu;
+ int big = vma->node->type != mmu->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (mmu->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ mmu->unmap(pgt, pte, len);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ mmu->flush(vm);
+}
+
+void
+nvkm_vm_unmap(struct nvkm_vma *vma)
+{
+ nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
+{
+ struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_vm_pgd *vpgd;
+ struct nvkm_vm_pgt *vpgt;
+ struct nvkm_gpuobj *pgt;
+ u32 pde;
+
+ for (pde = fpde; pde <= lpde; pde++) {
+ vpgt = &vm->pgt[pde - vm->fpde];
+ if (--vpgt->refcount[big])
+ continue;
+
+ pgt = vpgt->obj[big];
+ vpgt->obj[big] = NULL;
+
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+ nvkm_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&nv_subdev(mmu)->mutex);
+ }
+}
+
+static int
+nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
+{
+ struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ struct nvkm_vm_pgd *vpgd;
+ struct nvkm_gpuobj *pgt;
+ int big = (type != mmu->spg_shift);
+ u32 pgt_size;
+ int ret;
+
+ pgt_size = (1 << (mmu->pgt_bits + 12)) >> type;
+ pgt_size *= 8;
+
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+ ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+ mutex_lock(&nv_subdev(mmu)->mutex);
+ if (unlikely(ret))
+ return ret;
+
+ /* someone beat us to filling the PDE while we didn't have the lock */
+ if (unlikely(vpgt->refcount[big]++)) {
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+ nvkm_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&nv_subdev(mmu)->mutex);
+ return 0;
+ }
+
+ vpgt->obj[big] = pgt;
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ return 0;
+}
+
+int
+nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
+ struct nvkm_vma *vma)
+{
+ struct nvkm_mmu *mmu = vm->mmu;
+ u32 align = (1 << page_shift) >> 12;
+ u32 msize = size >> 12;
+ u32 fpde, lpde, pde;
+ int ret;
+
+ mutex_lock(&nv_subdev(mmu)->mutex);
+ ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
+ &vma->node);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+ return ret;
+ }
+
+ fpde = (vma->node->offset >> mmu->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+
+ for (pde = fpde; pde <= lpde; pde++) {
+ struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ int big = (vma->node->type != mmu->spg_shift);
+
+ if (likely(vpgt->refcount[big])) {
+ vpgt->refcount[big]++;
+ continue;
+ }
+
+ ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
+ if (ret) {
+ if (pde != fpde)
+ nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
+ nvkm_mm_free(&vm->mm, &vma->node);
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+ return ret;
+ }
+ }
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+
+ vma->vm = NULL;
+ nvkm_vm_ref(vm, &vma->vm, NULL);
+ vma->offset = (u64)vma->node->offset << 12;
+ vma->access = access;
+ return 0;
+}
+
+void
+nvkm_vm_put(struct nvkm_vma *vma)
+{
+ struct nvkm_vm *vm = vma->vm;
+ struct nvkm_mmu *mmu = vm->mmu;
+ u32 fpde, lpde;
+
+ if (unlikely(vma->node == NULL))
+ return;
+ fpde = (vma->node->offset >> mmu->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+
+ mutex_lock(&nv_subdev(mmu)->mutex);
+ nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde);
+ nvkm_mm_free(&vm->mm, &vma->node);
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+
+ nvkm_vm_ref(NULL, &vma->vm, NULL);
+}
+
+int
+nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+ u32 block, struct nvkm_vm **pvm)
+{
+ struct nvkm_vm *vm;
+ u64 mm_length = (offset + length) - mm_offset;
+ int ret;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vm->pgd_list);
+ vm->mmu = mmu;
+ kref_init(&vm->refcount);
+ vm->fpde = offset >> (mmu->pgt_bits + 12);
+ vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12);
+
+ vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
+ if (!vm->pgt) {
+ kfree(vm);
+ return -ENOMEM;
+ }
+
+ ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+ block >> 12);
+ if (ret) {
+ vfree(vm->pgt);
+ kfree(vm);
+ return ret;
+ }
+
+ *pvm = vm;
+
+ return 0;
+}
+
+int
+nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
+ struct nvkm_vm **pvm)
+{
+ struct nvkm_mmu *mmu = nvkm_mmu(device);
+ return mmu->create(mmu, offset, length, mm_offset, pvm);
+}
+
+static int
+nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
+{
+ struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_vm_pgd *vpgd;
+ int i;
+
+ if (!pgd)
+ return 0;
+
+ vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+ if (!vpgd)
+ return -ENOMEM;
+
+ nvkm_gpuobj_ref(pgd, &vpgd->obj);
+
+ mutex_lock(&nv_subdev(mmu)->mutex);
+ for (i = vm->fpde; i <= vm->lpde; i++)
+ mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+ list_add(&vpgd->head, &vm->pgd_list);
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+ return 0;
+}
+
+static void
+nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
+{
+ struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_vm_pgd *vpgd, *tmp;
+ struct nvkm_gpuobj *pgd = NULL;
+
+ if (!mpgd)
+ return;
+
+ mutex_lock(&nv_subdev(mmu)->mutex);
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ if (vpgd->obj == mpgd) {
+ pgd = vpgd->obj;
+ list_del(&vpgd->head);
+ kfree(vpgd);
+ break;
+ }
+ }
+ mutex_unlock(&nv_subdev(mmu)->mutex);
+
+ nvkm_gpuobj_ref(NULL, &pgd);
+}
+
+static void
+nvkm_vm_del(struct kref *kref)
+{
+ struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
+ struct nvkm_vm_pgd *vpgd, *tmp;
+
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ nvkm_vm_unlink(vm, vpgd->obj);
+ }
+
+ nvkm_mm_fini(&vm->mm);
+ vfree(vm->pgt);
+ kfree(vm);
+}
+
+int
+nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
+{
+ if (ref) {
+ int ret = nvkm_vm_link(ref, pgd);
+ if (ret)
+ return ret;
+
+ kref_get(&ref->refcount);
+ }
+
+ if (*ptr) {
+ nvkm_vm_unlink(*ptr, pgd);
+ kref_put(&(*ptr)->refcount, nvkm_vm_del);
+ }
+
+ *ptr = ref;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 2d0988755530..294cda37f068 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,25 +21,23 @@
*
* Authors: Ben Skeggs
*/
-
-#include <core/device.h>
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
+#include <subdev/mmu.h>
+#include <subdev/bar.h>
#include <subdev/fb.h>
-#include <subdev/vm.h>
#include <subdev/ltc.h>
-#include <subdev/bar.h>
+#include <subdev/timer.h>
-struct nvc0_vmmgr_priv {
- struct nouveau_vmmgr base;
+#include <core/gpuobj.h>
+
+struct gf100_mmu_priv {
+ struct nvkm_mmu base;
};
/* Map from compressed to corresponding uncompressed storage type.
* The value 0xff represents an invalid storage type.
*/
-const u8 nvc0_pte_storage_type_map[256] =
+const u8 gf100_pte_storage_type_map[256] =
{
0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
@@ -77,8 +75,7 @@ const u8 nvc0_pte_storage_type_map[256] =
static void
-nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
- struct nouveau_gpuobj *pgt[2])
+gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
{
u32 pde[2] = { 0, 0 };
@@ -92,7 +89,7 @@ nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
}
static inline u64
-nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
{
phys >>= 8;
@@ -102,22 +99,20 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
phys |= ((u64)target << 32);
phys |= ((u64)memtype << 36);
-
return phys;
}
static void
-nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
+gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+ struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u64 next = 1 << (vma->node->type - 8);
- phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
+ phys = gf100_vm_addr(vma, phys, mem->memtype, 0);
pte <<= 3;
if (mem->tag) {
- struct nouveau_ltc *ltc =
- nouveau_ltc(vma->vm->vmm->base.base.parent);
+ struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu);
u32 tag = mem->tag->offset + (delta >> 17);
phys |= (u64)tag << (32 + 12);
next |= (u64)1 << (32 + 12);
@@ -133,16 +128,16 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
}
static void
-nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+ struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
/* compressed storage types are invalid for system memory */
- u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff];
+ u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
pte <<= 3;
while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, memtype, target);
+ u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
@@ -150,7 +145,7 @@ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
}
static void
-nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
pte <<= 3;
while (cnt--) {
@@ -161,11 +156,11 @@ nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
}
static void
-nvc0_vm_flush(struct nouveau_vm *vm)
+gf100_vm_flush(struct nvkm_vm *vm)
{
- struct nvc0_vmmgr_priv *priv = (void *)vm->vmm;
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_vm_pgd *vpgd;
+ struct gf100_mmu_priv *priv = (void *)vm->mmu;
+ struct nvkm_bar *bar = nvkm_bar(priv);
+ struct nvkm_vm_pgd *vpgd;
u32 type;
bar->flush(bar);
@@ -197,21 +192,21 @@ nvc0_vm_flush(struct nouveau_vm *vm)
}
static int
-nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
- u64 mm_offset, struct nouveau_vm **pvm)
+gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+ struct nvkm_vm **pvm)
{
- return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
+ return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm);
}
static int
-nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvc0_vmmgr_priv *priv;
+ struct gf100_mmu_priv *priv;
int ret;
- ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
+ ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -221,22 +216,22 @@ nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.pgt_bits = 27 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 17;
- priv->base.create = nvc0_vm_create;
- priv->base.map_pgt = nvc0_vm_map_pgt;
- priv->base.map = nvc0_vm_map;
- priv->base.map_sg = nvc0_vm_map_sg;
- priv->base.unmap = nvc0_vm_unmap;
- priv->base.flush = nvc0_vm_flush;
+ priv->base.create = gf100_vm_create;
+ priv->base.map_pgt = gf100_vm_map_pgt;
+ priv->base.map = gf100_vm_map;
+ priv->base.map_sg = gf100_vm_map_sg;
+ priv->base.unmap = gf100_vm_unmap;
+ priv->base.flush = gf100_vm_flush;
return 0;
}
-struct nouveau_oclass
-nvc0_vmmgr_oclass = {
- .handle = NV_SUBDEV(VM, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_vmmgr_ctor,
- .dtor = _nouveau_vmmgr_dtor,
- .init = _nouveau_vmmgr_init,
- .fini = _nouveau_vmmgr_fini,
+struct nvkm_oclass
+gf100_mmu_oclass = {
+ .handle = NV_SUBDEV(MMU, 0xc0),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_mmu_ctor,
+ .dtor = _nvkm_mmu_dtor,
+ .init = _nvkm_mmu_init,
+ .fini = _nvkm_mmu_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index ed45437167f2..fe93ea2711c9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -21,11 +21,11 @@
*
* Authors: Ben Skeggs
*/
+#include "nv04.h"
+#include <core/device.h>
#include <core/gpuobj.h>
-#include "nv04.h"
-
#define NV04_PDMA_SIZE (128 * 1024 * 1024)
#define NV04_PDMA_PAGE ( 4 * 1024)
@@ -34,8 +34,8 @@
******************************************************************************/
static void
-nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+ struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = 0x00008 + (pte * 4);
while (cnt) {
@@ -51,7 +51,7 @@ nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
}
static void
-nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
pte = 0x00008 + (pte * 4);
while (cnt--) {
@@ -61,7 +61,7 @@ nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
}
static void
-nv04_vm_flush(struct nouveau_vm *vm)
+nv04_vm_flush(struct nvkm_vm *vm)
{
}
@@ -70,27 +70,27 @@ nv04_vm_flush(struct nouveau_vm *vm)
******************************************************************************/
int
-nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
- struct nouveau_vm **pvm)
+nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart,
+ struct nvkm_vm **pvm)
{
return -EINVAL;
}
/*******************************************************************************
- * VMMGR subdev
+ * MMU subdev
******************************************************************************/
static int
-nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv04_vmmgr_priv *priv;
- struct nouveau_gpuobj *dma;
+ struct nv04_mmu_priv *priv;
+ struct nvkm_gpuobj *dma;
int ret;
- ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
- "pcigart", &priv);
+ ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
+ "pcigart", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -105,15 +105,15 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.unmap = nv04_vm_unmap;
priv->base.flush = nv04_vm_flush;
- ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
- &priv->vm);
+ ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
+ &priv->vm);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL,
- (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
- 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
- &priv->vm->pgt[0].obj[0]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+ (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
+ 16, NVOBJ_FLAG_ZERO_ALLOC,
+ &priv->vm->pgt[0].obj[0]);
dma = priv->vm->pgt[0].obj[0];
priv->vm->pgt[0].refcount[0] = 1;
if (ret)
@@ -125,27 +125,27 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
void
-nv04_vmmgr_dtor(struct nouveau_object *object)
+nv04_mmu_dtor(struct nvkm_object *object)
{
- struct nv04_vmmgr_priv *priv = (void *)object;
+ struct nv04_mmu_priv *priv = (void *)object;
if (priv->vm) {
- nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
- nouveau_vm_ref(NULL, &priv->vm, NULL);
+ nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
+ nvkm_vm_ref(NULL, &priv->vm, NULL);
}
if (priv->nullp) {
pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
priv->nullp, priv->null);
}
- nouveau_vmmgr_destroy(&priv->base);
+ nvkm_mmu_destroy(&priv->base);
}
-struct nouveau_oclass
-nv04_vmmgr_oclass = {
- .handle = NV_SUBDEV(VM, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_vmmgr_ctor,
- .dtor = nv04_vmmgr_dtor,
- .init = _nouveau_vmmgr_init,
- .fini = _nouveau_vmmgr_fini,
+struct nvkm_oclass
+nv04_mmu_oclass = {
+ .handle = NV_SUBDEV(MMU, 0x04),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv04_mmu_ctor,
+ .dtor = nv04_mmu_dtor,
+ .init = _nvkm_mmu_init,
+ .fini = _nvkm_mmu_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
new file mode 100644
index 000000000000..7bf6f4b38f1d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
@@ -0,0 +1,19 @@
+#ifndef __NV04_MMU_PRIV__
+#define __NV04_MMU_PRIV__
+
+#include <subdev/mmu.h>
+
+struct nv04_mmu_priv {
+ struct nvkm_mmu base;
+ struct nvkm_vm *vm;
+ dma_addr_t null;
+ void *nullp;
+};
+
+static inline struct nv04_mmu_priv *
+nv04_mmu(void *obj)
+{
+ return (void *)nvkm_mmu(obj);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index 064c76262876..61ee3ab11660 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -21,14 +21,12 @@
*
* Authors: Ben Skeggs
*/
+#include "nv04.h"
+#include <core/device.h>
#include <core/gpuobj.h>
#include <core/option.h>
-
#include <subdev/timer.h>
-#include <subdev/vm.h>
-
-#include "nv04.h"
#define NV41_GART_SIZE (512 * 1024 * 1024)
#define NV41_GART_PAGE ( 4 * 1024)
@@ -38,8 +36,8 @@
******************************************************************************/
static void
-nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+ struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = pte * 4;
while (cnt) {
@@ -55,7 +53,7 @@ nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
}
static void
-nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
pte = pte * 4;
while (cnt--) {
@@ -65,9 +63,9 @@ nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
}
static void
-nv41_vm_flush(struct nouveau_vm *vm)
+nv41_vm_flush(struct nvkm_vm *vm)
{
- struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
+ struct nv04_mmu_priv *priv = (void *)vm->mmu;
mutex_lock(&nv_subdev(priv)->mutex);
nv_wr32(priv, 0x100810, 0x00000022);
@@ -80,26 +78,26 @@ nv41_vm_flush(struct nouveau_vm *vm)
}
/*******************************************************************************
- * VMMGR subdev
+ * MMU subdev
******************************************************************************/
static int
-nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
- struct nv04_vmmgr_priv *priv;
+ struct nvkm_device *device = nv_device(parent);
+ struct nv04_mmu_priv *priv;
int ret;
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
- !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
- return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
- data, size, pobject);
+ !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
+ return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
+ data, size, pobject);
}
- ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
- "pciegart", &priv);
+ ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
+ "pciegart", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -114,15 +112,15 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.unmap = nv41_vm_unmap;
priv->base.flush = nv41_vm_flush;
- ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
- &priv->vm);
+ ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
+ &priv->vm);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL,
- (NV41_GART_SIZE / NV41_GART_PAGE) * 4,
- 16, NVOBJ_FLAG_ZERO_ALLOC,
- &priv->vm->pgt[0].obj[0]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+ (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &priv->vm->pgt[0].obj[0]);
priv->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
@@ -131,13 +129,13 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static int
-nv41_vmmgr_init(struct nouveau_object *object)
+nv41_mmu_init(struct nvkm_object *object)
{
- struct nv04_vmmgr_priv *priv = (void *)object;
- struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0];
+ struct nv04_mmu_priv *priv = (void *)object;
+ struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0];
int ret;
- ret = nouveau_vmmgr_init(&priv->base);
+ ret = nvkm_mmu_init(&priv->base);
if (ret)
return ret;
@@ -147,13 +145,13 @@ nv41_vmmgr_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv41_vmmgr_oclass = {
- .handle = NV_SUBDEV(VM, 0x41),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv41_vmmgr_ctor,
- .dtor = nv04_vmmgr_dtor,
- .init = nv41_vmmgr_init,
- .fini = _nouveau_vmmgr_fini,
+struct nvkm_oclass
+nv41_mmu_oclass = {
+ .handle = NV_SUBDEV(MMU, 0x41),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv41_mmu_ctor,
+ .dtor = nv04_mmu_dtor,
+ .init = nv41_mmu_init,
+ .fini = _nvkm_mmu_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index fae1f67d5948..b90ded1887aa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -21,14 +21,12 @@
*
* Authors: Ben Skeggs
*/
+#include "nv04.h"
+#include <core/device.h>
#include <core/gpuobj.h>
#include <core/option.h>
-
#include <subdev/timer.h>
-#include <subdev/vm.h>
-
-#include "nv04.h"
#define NV44_GART_SIZE (512 * 1024 * 1024)
#define NV44_GART_PAGE ( 4 * 1024)
@@ -38,7 +36,7 @@
******************************************************************************/
static void
-nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
+nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
dma_addr_t *list, u32 pte, u32 cnt)
{
u32 base = (pte << 2) & ~0x0000000f;
@@ -84,10 +82,10 @@ nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
}
static void
-nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+ struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
- struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm;
+ struct nv04_mmu_priv *priv = (void *)vma->vm->mmu;
u32 tmp[4];
int i;
@@ -115,9 +113,9 @@ nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
}
static void
-nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
- struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt);
+ struct nv04_mmu_priv *priv = (void *)nvkm_mmu(pgt);
if (pte & 3) {
u32 max = 4 - (pte & 3);
@@ -140,9 +138,9 @@ nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
}
static void
-nv44_vm_flush(struct nouveau_vm *vm)
+nv44_vm_flush(struct nvkm_vm *vm)
{
- struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
+ struct nv04_mmu_priv *priv = (void *)vm->mmu;
nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
nv_wr32(priv, 0x100808, 0x00000020);
if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
@@ -151,26 +149,26 @@ nv44_vm_flush(struct nouveau_vm *vm)
}
/*******************************************************************************
- * VMMGR subdev
+ * MMU subdev
******************************************************************************/
static int
-nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
- struct nv04_vmmgr_priv *priv;
+ struct nvkm_device *device = nv_device(parent);
+ struct nv04_mmu_priv *priv;
int ret;
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
- !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
- return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
- data, size, pobject);
+ !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
+ return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
+ data, size, pobject);
}
- ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
- "pciegart", &priv);
+ ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
+ "pciegart", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -191,15 +189,15 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return -ENOMEM;
}
- ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
- &priv->vm);
+ ret = nvkm_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
+ &priv->vm);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL,
- (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
- 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
- &priv->vm->pgt[0].obj[0]);
+ ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+ (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
+ 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
+ &priv->vm->pgt[0].obj[0]);
priv->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
@@ -208,14 +206,14 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static int
-nv44_vmmgr_init(struct nouveau_object *object)
+nv44_mmu_init(struct nvkm_object *object)
{
- struct nv04_vmmgr_priv *priv = (void *)object;
- struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0];
+ struct nv04_mmu_priv *priv = (void *)object;
+ struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0];
u32 addr;
int ret;
- ret = nouveau_vmmgr_init(&priv->base);
+ ret = nvkm_mmu_init(&priv->base);
if (ret)
return ret;
@@ -237,13 +235,13 @@ nv44_vmmgr_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv44_vmmgr_oclass = {
- .handle = NV_SUBDEV(VM, 0x44),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv44_vmmgr_ctor,
- .dtor = nv04_vmmgr_dtor,
- .init = nv44_vmmgr_init,
- .fini = _nouveau_vmmgr_fini,
+struct nvkm_oclass
+nv44_mmu_oclass = {
+ .handle = NV_SUBDEV(MMU, 0x44),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv44_mmu_ctor,
+ .dtor = nv04_mmu_dtor,
+ .init = nv44_mmu_init,
+ .fini = _nvkm_mmu_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index a4aa81a2173b..b83550fa7f96 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,22 +21,20 @@
*
* Authors: Ben Skeggs
*/
+#include <subdev/mmu.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
-#include <core/device.h>
+#include <core/engine.h>
#include <core/gpuobj.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/bar.h>
-#include <subdev/vm.h>
-
-struct nv50_vmmgr_priv {
- struct nouveau_vmmgr base;
+struct nv50_mmu_priv {
+ struct nvkm_mmu base;
};
static void
-nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
- struct nouveau_gpuobj *pgt[2])
+nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
{
u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0;
@@ -64,7 +62,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
}
static inline u64
-vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
{
phys |= 1; /* present */
phys |= (u64)memtype << 40;
@@ -77,8 +75,8 @@ vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
}
static void
-nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
+nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+ struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u32 comp = (mem->memtype & 0x180) >> 7;
u32 block, target;
@@ -86,8 +84,8 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
/* IGPs don't have real VRAM, re-target to stolen system memory */
target = 0;
- if (nouveau_fb(vma->vm->vmm)->ram->stolen) {
- phys += nouveau_fb(vma->vm->vmm)->ram->stolen;
+ if (nvkm_fb(vma->vm->mmu)->ram->stolen) {
+ phys += nvkm_fb(vma->vm->mmu)->ram->stolen;
target = 3;
}
@@ -124,8 +122,8 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
}
static void
-nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+ struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
pte <<= 3;
@@ -138,7 +136,7 @@ nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
}
static void
-nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
pte <<= 3;
while (cnt--) {
@@ -149,11 +147,11 @@ nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
}
static void
-nv50_vm_flush(struct nouveau_vm *vm)
+nv50_vm_flush(struct nvkm_vm *vm)
{
- struct nv50_vmmgr_priv *priv = (void *)vm->vmm;
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_engine *engine;
+ struct nv50_mmu_priv *priv = (void *)vm->mmu;
+ struct nvkm_bar *bar = nvkm_bar(priv);
+ struct nvkm_engine *engine;
int i, vme;
bar->flush(bar);
@@ -164,21 +162,24 @@ nv50_vm_flush(struct nouveau_vm *vm)
continue;
/* unfortunate hw bug workaround... */
- engine = nouveau_engine(priv, i);
+ engine = nvkm_engine(priv, i);
if (engine && engine->tlb_flush) {
engine->tlb_flush(engine);
continue;
}
switch (i) {
- case NVDEV_ENGINE_GR : vme = 0x00; break;
- case NVDEV_ENGINE_VP : vme = 0x01; break;
- case NVDEV_SUBDEV_BAR : vme = 0x06; break;
- case NVDEV_ENGINE_PPP :
- case NVDEV_ENGINE_MPEG : vme = 0x08; break;
- case NVDEV_ENGINE_BSP : vme = 0x09; break;
- case NVDEV_ENGINE_CRYPT: vme = 0x0a; break;
- case NVDEV_ENGINE_COPY0: vme = 0x0d; break;
+ case NVDEV_ENGINE_GR : vme = 0x00; break;
+ case NVDEV_ENGINE_VP :
+ case NVDEV_ENGINE_MSPDEC: vme = 0x01; break;
+ case NVDEV_SUBDEV_BAR : vme = 0x06; break;
+ case NVDEV_ENGINE_MSPPP :
+ case NVDEV_ENGINE_MPEG : vme = 0x08; break;
+ case NVDEV_ENGINE_BSP :
+ case NVDEV_ENGINE_MSVLD : vme = 0x09; break;
+ case NVDEV_ENGINE_CIPHER:
+ case NVDEV_ENGINE_SEC : vme = 0x0a; break;
+ case NVDEV_ENGINE_CE0 : vme = 0x0d; break;
default:
continue;
}
@@ -191,25 +192,25 @@ nv50_vm_flush(struct nouveau_vm *vm)
}
static int
-nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
- u64 mm_offset, struct nouveau_vm **pvm)
+nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length,
+ u64 mm_offset, struct nvkm_vm **pvm)
{
- u32 block = (1 << (vmm->pgt_bits + 12));
+ u32 block = (1 << (mmu->pgt_bits + 12));
if (block > length)
block = length;
- return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm);
+ return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm);
}
static int
-nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv50_vmmgr_priv *priv;
+ struct nv50_mmu_priv *priv;
int ret;
- ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
+ ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -228,13 +229,13 @@ nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv50_vmmgr_oclass = {
- .handle = NV_SUBDEV(VM, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_vmmgr_ctor,
- .dtor = _nouveau_vmmgr_dtor,
- .init = _nouveau_vmmgr_init,
- .fini = _nouveau_vmmgr_fini,
+struct nvkm_oclass
+nv50_mmu_oclass = {
+ .handle = NV_SUBDEV(MMU, 0x50),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = nv50_mmu_ctor,
+ .dtor = _nvkm_mmu_dtor,
+ .init = _nvkm_mmu_init,
+ .fini = _nvkm_mmu_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild
new file mode 100644
index 000000000000..1a479e050b54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild
@@ -0,0 +1,3 @@
+nvkm-y += nvkm/subdev/mxm/base.o
+nvkm-y += nvkm/subdev/mxm/mxms.o
+nvkm-y += nvkm/subdev/mxm/nv50.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 51fcf7960417..0ca9dcabb6d3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -21,18 +21,16 @@
*
* Authors: Ben Skeggs
*/
+#include "mxms.h"
+#include <core/device.h>
#include <core/option.h>
-
-#include <subdev/i2c.h>
-#include <subdev/mxm.h>
#include <subdev/bios.h>
#include <subdev/bios/mxm.h>
-
-#include "mxms.h"
+#include <subdev/i2c.h>
static bool
-mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr,
+mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr,
u8 offset, u8 size, u8 *data)
{
struct i2c_msg msgs[] = {
@@ -44,11 +42,11 @@ mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr,
}
static bool
-mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version)
+mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
{
- struct nouveau_bios *bios = nouveau_bios(mxm);
- struct nouveau_i2c *i2c = nouveau_i2c(mxm);
- struct nouveau_i2c_port *port = NULL;
+ struct nvkm_bios *bios = nvkm_bios(mxm);
+ struct nvkm_i2c *i2c = nvkm_i2c(mxm);
+ struct nvkm_i2c_port *port = NULL;
u8 i2cidx, mxms[6], addr, size;
i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
@@ -79,9 +77,9 @@ mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version)
#if defined(CONFIG_ACPI)
static bool
-mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
+mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
{
- struct nouveau_device *device = nv_device(mxm);
+ struct nvkm_device *device = nv_device(mxm);
static char muid[] = {
0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
@@ -129,7 +127,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
static u8
-wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version)
+wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
{
u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
@@ -158,7 +156,7 @@ wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version)
}
static bool
-mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version)
+mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
{
u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
@@ -186,7 +184,7 @@ mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version)
obj = retn.pointer;
if (obj->type == ACPI_TYPE_BUFFER) {
mxm->mxms = kmemdup(obj->buffer.pointer,
- obj->buffer.length, GFP_KERNEL);
+ obj->buffer.length, GFP_KERNEL);
}
kfree(obj);
@@ -196,7 +194,7 @@ mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version)
static struct mxm_shadow_h {
const char *name;
- bool (*exec)(struct nouveau_mxm *, u8 version);
+ bool (*exec)(struct nvkm_mxm *, u8 version);
} _mxm_shadow[] = {
{ "ROM", mxm_shadow_rom },
#if defined(CONFIG_ACPI)
@@ -209,7 +207,7 @@ static struct mxm_shadow_h {
};
static int
-mxm_shadow(struct nouveau_mxm *mxm, u8 version)
+mxm_shadow(struct nvkm_mxm *mxm, u8 version)
{
struct mxm_shadow_h *shadow = _mxm_shadow;
do {
@@ -225,19 +223,18 @@ mxm_shadow(struct nouveau_mxm *mxm, u8 version)
}
int
-nouveau_mxm_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_bios *bios = nouveau_bios(device);
- struct nouveau_mxm *mxm;
+ struct nvkm_device *device = nv_device(parent);
+ struct nvkm_bios *bios = nvkm_bios(device);
+ struct nvkm_mxm *mxm;
u8 ver, len;
u16 data;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
- length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
+ length, pobject);
mxm = *pobject;
if (ret)
return ret;
@@ -268,7 +265,7 @@ nouveau_mxm_create_(struct nouveau_object *parent,
mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
mxms_foreach(mxm, 0, NULL, NULL);
- if (nouveau_boolopt(device->cfgopt, "NvMXMDCB", true))
+ if (nvkm_boolopt(device->cfgopt, "NvMXMDCB", true))
mxm->action |= MXM_SANITISE_DCB;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index 4bde7f7f7b81..a9b1d63fed58 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -21,22 +21,20 @@
*
* Authors: Ben Skeggs
*/
-
-#include <subdev/mxm.h>
#include "mxms.h"
#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
static u8 *
-mxms_data(struct nouveau_mxm *mxm)
+mxms_data(struct nvkm_mxm *mxm)
{
return mxm->mxms;
}
u16
-mxms_version(struct nouveau_mxm *mxm)
+mxms_version(struct nvkm_mxm *mxm)
{
u8 *mxms = mxms_data(mxm);
u16 version = (mxms[4] << 8) | mxms[5];
@@ -54,19 +52,19 @@ mxms_version(struct nouveau_mxm *mxm)
}
u16
-mxms_headerlen(struct nouveau_mxm *mxm)
+mxms_headerlen(struct nvkm_mxm *mxm)
{
return 8;
}
u16
-mxms_structlen(struct nouveau_mxm *mxm)
+mxms_structlen(struct nvkm_mxm *mxm)
{
return *(u16 *)&mxms_data(mxm)[6];
}
bool
-mxms_checksum(struct nouveau_mxm *mxm)
+mxms_checksum(struct nvkm_mxm *mxm)
{
u16 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
u8 *mxms = mxms_data(mxm), sum = 0;
@@ -80,7 +78,7 @@ mxms_checksum(struct nouveau_mxm *mxm)
}
bool
-mxms_valid(struct nouveau_mxm *mxm)
+mxms_valid(struct nvkm_mxm *mxm)
{
u8 *mxms = mxms_data(mxm);
if (*(u32 *)mxms != 0x5f4d584d) {
@@ -95,8 +93,8 @@ mxms_valid(struct nouveau_mxm *mxm)
}
bool
-mxms_foreach(struct nouveau_mxm *mxm, u8 types,
- bool (*exec)(struct nouveau_mxm *, u8 *, void *), void *info)
+mxms_foreach(struct nvkm_mxm *mxm, u8 types,
+ bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info)
{
u8 *mxms = mxms_data(mxm);
u8 *desc = mxms + mxms_headerlen(mxm);
@@ -180,7 +178,7 @@ mxms_foreach(struct nouveau_mxm *mxm, u8 types,
}
void
-mxms_output_device(struct nouveau_mxm *mxm, u8 *pdata, struct mxms_odev *desc)
+mxms_output_device(struct nvkm_mxm *mxm, u8 *pdata, struct mxms_odev *desc)
{
u64 data = ROM32(pdata[0]);
if (mxms_version(mxm) >= 0x0300)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
new file mode 100644
index 000000000000..4ef804012d06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -0,0 +1,22 @@
+#ifndef __NVMXM_MXMS_H__
+#define __NVMXM_MXMS_H__
+#include <subdev/mxm.h>
+
+struct mxms_odev {
+ u8 outp_type;
+ u8 conn_type;
+ u8 ddc_port;
+ u8 dig_conn;
+};
+
+void mxms_output_device(struct nvkm_mxm *, u8 *, struct mxms_odev *);
+
+u16 mxms_version(struct nvkm_mxm *);
+u16 mxms_headerlen(struct nvkm_mxm *);
+u16 mxms_structlen(struct nvkm_mxm *);
+bool mxms_checksum(struct nvkm_mxm *);
+bool mxms_valid(struct nvkm_mxm *);
+
+bool mxms_foreach(struct nvkm_mxm *, u8,
+ bool (*)(struct nvkm_mxm *, u8 *, void *), void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index fcaabe8456e3..42cac13ca629 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -21,17 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#include "mxms.h"
-#include <subdev/mxm.h>
#include <subdev/bios.h>
#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/mxm.h>
-#include "mxms.h"
-
struct nv50_mxm_priv {
- struct nouveau_mxm base;
+ struct nvkm_mxm base;
};
struct context {
@@ -40,7 +38,7 @@ struct context {
};
static bool
-mxm_match_tmds_partner(struct nouveau_mxm *mxm, u8 *data, void *info)
+mxm_match_tmds_partner(struct nvkm_mxm *mxm, u8 *data, void *info)
{
struct context *ctx = info;
struct mxms_odev desc;
@@ -53,9 +51,9 @@ mxm_match_tmds_partner(struct nouveau_mxm *mxm, u8 *data, void *info)
}
static bool
-mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
+mxm_match_dcb(struct nvkm_mxm *mxm, u8 *data, void *info)
{
- struct nouveau_bios *bios = nouveau_bios(mxm);
+ struct nvkm_bios *bios = nvkm_bios(mxm);
struct context *ctx = info;
u64 desc = *(u64 *)data;
@@ -98,9 +96,9 @@ mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
}
static int
-mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
+mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
{
- struct nouveau_mxm *mxm = data;
+ struct nvkm_mxm *mxm = data;
struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
u8 type, i2cidx, link, ver, len;
u8 *conn;
@@ -180,7 +178,7 @@ mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
}
static bool
-mxm_show_unmatched(struct nouveau_mxm *mxm, u8 *data, void *info)
+mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
{
u64 desc = *(u64 *)data;
if ((desc & 0xf0) != 0xf0)
@@ -189,9 +187,9 @@ mxm_show_unmatched(struct nouveau_mxm *mxm, u8 *data, void *info)
}
static void
-mxm_dcb_sanitise(struct nouveau_mxm *mxm)
+mxm_dcb_sanitise(struct nvkm_mxm *mxm)
{
- struct nouveau_bios *bios = nouveau_bios(mxm);
+ struct nvkm_bios *bios = nvkm_bios(mxm);
u8 ver, hdr, cnt, len;
u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
if (dcb == 0x0000 || ver != 0x40) {
@@ -204,14 +202,14 @@ mxm_dcb_sanitise(struct nouveau_mxm *mxm)
}
static int
-nv50_mxm_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_mxm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_mxm_priv *priv;
int ret;
- ret = nouveau_mxm_create(parent, engine, oclass, &priv);
+ ret = nvkm_mxm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -221,13 +219,13 @@ nv50_mxm_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv50_mxm_oclass = {
.handle = NV_SUBDEV(MXM, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_mxm_ctor,
- .dtor = _nouveau_mxm_dtor,
- .init = _nouveau_mxm_init,
- .fini = _nouveau_mxm_fini,
+ .dtor = _nvkm_mxm_dtor,
+ .init = _nvkm_mxm_init,
+ .fini = _nvkm_mxm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
new file mode 100644
index 000000000000..9a150d520225
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -0,0 +1,8 @@
+nvkm-y += nvkm/subdev/pmu/base.o
+nvkm-y += nvkm/subdev/pmu/memx.o
+nvkm-y += nvkm/subdev/pmu/gt215.o
+nvkm-y += nvkm/subdev/pmu/gf100.o
+nvkm-y += nvkm/subdev/pmu/gf110.o
+nvkm-y += nvkm/subdev/pmu/gk104.o
+nvkm-y += nvkm/subdev/pmu/gk208.o
+nvkm-y += nvkm/subdev/pmu/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
new file mode 100644
index 000000000000..054b2d2eec35
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <subdev/timer.h>
+
+void
+nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
+{
+ const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu);
+ if (impl->pgob)
+ impl->pgob(pmu, enable);
+}
+
+static int
+nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+ u32 process, u32 message, u32 data0, u32 data1)
+{
+ struct nvkm_subdev *subdev = nv_subdev(pmu);
+ u32 addr;
+
+ /* wait for a free slot in the fifo */
+ addr = nv_rd32(pmu, 0x10a4a0);
+ if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8))
+ return -EBUSY;
+
+ /* we currently only support a single process at a time waiting
+ * on a synchronous reply, take the PMU mutex and tell the
+ * receive handler what we're waiting for
+ */
+ if (reply) {
+ mutex_lock(&subdev->mutex);
+ pmu->recv.message = message;
+ pmu->recv.process = process;
+ }
+
+ /* acquire data segment access */
+ do {
+ nv_wr32(pmu, 0x10a580, 0x00000001);
+ } while (nv_rd32(pmu, 0x10a580) != 0x00000001);
+
+ /* write the packet */
+ nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+ pmu->send.base));
+ nv_wr32(pmu, 0x10a1c4, process);
+ nv_wr32(pmu, 0x10a1c4, message);
+ nv_wr32(pmu, 0x10a1c4, data0);
+ nv_wr32(pmu, 0x10a1c4, data1);
+ nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nv_wr32(pmu, 0x10a580, 0x00000000);
+
+ /* wait for reply, if requested */
+ if (reply) {
+ wait_event(pmu->recv.wait, (pmu->recv.process == 0));
+ reply[0] = pmu->recv.data[0];
+ reply[1] = pmu->recv.data[1];
+ mutex_unlock(&subdev->mutex);
+ }
+
+ return 0;
+}
+
+static void
+nvkm_pmu_recv(struct work_struct *work)
+{
+ struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
+ u32 process, message, data0, data1;
+
+ /* nothing to do if GET == PUT */
+ u32 addr = nv_rd32(pmu, 0x10a4cc);
+ if (addr == nv_rd32(pmu, 0x10a4c8))
+ return;
+
+ /* acquire data segment access */
+ do {
+ nv_wr32(pmu, 0x10a580, 0x00000002);
+ } while (nv_rd32(pmu, 0x10a580) != 0x00000002);
+
+ /* read the packet */
+ nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+ pmu->recv.base));
+ process = nv_rd32(pmu, 0x10a1c4);
+ message = nv_rd32(pmu, 0x10a1c4);
+ data0 = nv_rd32(pmu, 0x10a1c4);
+ data1 = nv_rd32(pmu, 0x10a1c4);
+ nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nv_wr32(pmu, 0x10a580, 0x00000000);
+
+ /* wake process if it's waiting on a synchronous reply */
+ if (pmu->recv.process) {
+ if (process == pmu->recv.process &&
+ message == pmu->recv.message) {
+ pmu->recv.data[0] = data0;
+ pmu->recv.data[1] = data1;
+ pmu->recv.process = 0;
+ wake_up(&pmu->recv.wait);
+ return;
+ }
+ }
+
+ /* right now there's no other expected responses from the engine,
+ * so assume that any unexpected message is an error.
+ */
+ nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (char)((process & 0x000000ff) >> 0),
+ (char)((process & 0x0000ff00) >> 8),
+ (char)((process & 0x00ff0000) >> 16),
+ (char)((process & 0xff000000) >> 24),
+ process, message, data0, data1);
+}
+
+static void
+nvkm_pmu_intr(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pmu *pmu = (void *)subdev;
+ u32 disp = nv_rd32(pmu, 0x10a01c);
+ u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000020) {
+ u32 stat = nv_rd32(pmu, 0x10a16c);
+ if (stat & 0x80000000) {
+ nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n",
+ stat & 0x00ffffff, nv_rd32(pmu, 0x10a168));
+ nv_wr32(pmu, 0x10a16c, 0x00000000);
+ intr &= ~0x00000020;
+ }
+ }
+
+ if (intr & 0x00000040) {
+ schedule_work(&pmu->recv.work);
+ nv_wr32(pmu, 0x10a004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr & 0x00000080) {
+ nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0),
+ nv_rd32(pmu, 0x10a7a4));
+ nv_wr32(pmu, 0x10a004, 0x00000080);
+ intr &= ~0x00000080;
+ }
+
+ if (intr) {
+ nv_error(pmu, "intr 0x%08x\n", intr);
+ nv_wr32(pmu, 0x10a004, intr);
+ }
+}
+
+int
+_nvkm_pmu_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nvkm_pmu *pmu = (void *)object;
+
+ nv_wr32(pmu, 0x10a014, 0x00000060);
+ flush_work(&pmu->recv.work);
+
+ return nvkm_subdev_fini(&pmu->base, suspend);
+}
+
+int
+_nvkm_pmu_init(struct nvkm_object *object)
+{
+ const struct nvkm_pmu_impl *impl = (void *)object->oclass;
+ struct nvkm_pmu *pmu = (void *)object;
+ int ret, i;
+
+ ret = nvkm_subdev_init(&pmu->base);
+ if (ret)
+ return ret;
+
+ nv_subdev(pmu)->intr = nvkm_pmu_intr;
+ pmu->message = nvkm_pmu_send;
+ pmu->pgob = nvkm_pmu_pgob;
+
+ /* prevent previous ucode from running, wait for idle, reset */
+ nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+ nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000);
+ nv_mask(pmu, 0x000200, 0x00002000, 0x00000000);
+ nv_mask(pmu, 0x000200, 0x00002000, 0x00002000);
+ nv_rd32(pmu, 0x000200);
+ nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000);
+
+ /* upload data segment */
+ nv_wr32(pmu, 0x10a1c0, 0x01000000);
+ for (i = 0; i < impl->data.size / 4; i++)
+ nv_wr32(pmu, 0x10a1c4, impl->data.data[i]);
+
+ /* upload code segment */
+ nv_wr32(pmu, 0x10a180, 0x01000000);
+ for (i = 0; i < impl->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(pmu, 0x10a188, i >> 6);
+ nv_wr32(pmu, 0x10a184, impl->code.data[i]);
+ }
+
+ /* start it running */
+ nv_wr32(pmu, 0x10a10c, 0x00000000);
+ nv_wr32(pmu, 0x10a104, 0x00000000);
+ nv_wr32(pmu, 0x10a100, 0x00000002);
+
+ /* wait for valid host->pmu ring configuration */
+ if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000))
+ return -EBUSY;
+ pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff;
+ pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16;
+
+ /* wait for valid pmu->host ring configuration */
+ if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000))
+ return -EBUSY;
+ pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff;
+ pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16;
+
+ nv_wr32(pmu, 0x10a010, 0x000000e0);
+ return 0;
+}
+
+int
+nvkm_pmu_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
+{
+ struct nvkm_pmu *pmu;
+ int ret;
+
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PMU",
+ "pmu", length, pobject);
+ pmu = *pobject;
+ if (ret)
+ return ret;
+
+ INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
+ init_waitqueue_head(&pmu->recv.wait);
+ return 0;
+}
+
+int
+_nvkm_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_pmu *pmu;
+ int ret = nvkm_pmu_create(parent, engine, oclass, &pmu);
+ *pobject = nv_object(pmu);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/arith.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/arith.fuc
index 214a6d9e088d..214a6d9e088d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/arith.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/arith.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3
index 21bf8cc7618f..37e8407b7462 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3
@@ -32,7 +32,7 @@
#include "macros.fuc"
-.section #nvc0_pwr_data
+.section #gf100_pmu_data
#define INCLUDE_PROC
#include "kernel.fuc"
#include "arith.fuc"
@@ -56,7 +56,7 @@
#undef INCLUDE_DATA
.align 256
-.section #nvc0_pwr_code
+.section #gf100_pmu_code
#define INCLUDE_CODE
#include "kernel.fuc"
#include "arith.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 90221d973f84..302557c52d03 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nvc0_pwr_data[] = {
+uint32_t gf100_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
};
-uint32_t nvc0_pwr_code[] = {
+uint32_t gf100_pmu_code[] = {
0x039e0ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
index b85443261569..ae9c3f18ae01 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
@@ -32,7 +32,7 @@
#include "macros.fuc"
-.section #nvd0_pwr_data
+.section #gf110_pmu_data
#define INCLUDE_PROC
#include "kernel.fuc"
#include "arith.fuc"
@@ -56,7 +56,7 @@
#undef INCLUDE_DATA
.align 256
-.section #nvd0_pwr_code
+.section #gf110_pmu_code
#define INCLUDE_CODE
#include "kernel.fuc"
#include "arith.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
index 7e16aab44d85..a0c499e4543c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
@@ -1,4 +1,4 @@
-uint32_t nvd0_pwr_data[] = {
+uint32_t gf110_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
};
-uint32_t nvd0_pwr_code[] = {
+uint32_t gf110_pmu_code[] = {
0x034d0ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5
index b439519ec866..093dc81880f4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5
@@ -32,7 +32,7 @@
#include "macros.fuc"
-.section #nv108_pwr_data
+.section #gk208_pmu_data
#define INCLUDE_PROC
#include "kernel.fuc"
#include "arith.fuc"
@@ -56,7 +56,7 @@
#undef INCLUDE_DATA
.align 256
-.section #nv108_pwr_code
+.section #gk208_pmu_code
#define INCLUDE_CODE
#include "kernel.fuc"
#include "arith.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 713e11e2953d..fe4f63deeaab 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t nv108_pwr_data[] = {
+uint32_t gk208_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
};
-uint32_t nv108_pwr_code[] = {
+uint32_t gk208_pmu_code[] = {
0x031c0ef5,
/* 0x0004: rd32 */
0xf607a040,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3
index daa06c1c655e..393049fc8b2d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3
@@ -32,7 +32,7 @@
#include "macros.fuc"
-.section #nva3_pwr_data
+.section #gt215_pmu_data
#define INCLUDE_PROC
#include "kernel.fuc"
#include "arith.fuc"
@@ -56,7 +56,7 @@
#undef INCLUDE_DATA
.align 256
-.section #nva3_pwr_code
+.section #gt215_pmu_code
#define INCLUDE_CODE
#include "kernel.fuc"
#include "arith.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index d1f9b6cb66d7..2686f8fad0f5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t nva3_pwr_data[] = {
+uint32_t gt215_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
};
-uint32_t nva3_pwr_code[] = {
+uint32_t gt215_pmu_code[] = {
0x039e0ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc
index c2bb616a8da5..c2bb616a8da5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/i2c_.fuc
index 757dda700024..757dda700024 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/i2c_.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/idle.fuc
index 98f1c3738b42..98f1c3738b42 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/idle.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
index 5cf5be63cbef..5cf5be63cbef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc
index 96fc984dafdc..96fc984dafdc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
index ec03f9a4290b..ec03f9a4290b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
index c8b06cb77e72..c8b06cb77e72 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/perf.fuc
index 38eadf705cbf..38eadf705cbf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/perf.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc
index 0c3a71bf5459..0c3a71bf5459 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 9a773e66efa4..78a4ea0101f1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -21,21 +21,20 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
-#include "fuc/nvc0.fuc.h"
+#include "fuc/gf100.fuc3.h"
-struct nouveau_oclass *
-nvc0_pwr_oclass = &(struct nvkm_pwr_impl) {
- .base.handle = NV_SUBDEV(PWR, 0xc0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_pwr_ctor,
- .dtor = _nouveau_pwr_dtor,
- .init = _nouveau_pwr_init,
- .fini = _nouveau_pwr_fini,
+struct nvkm_oclass *
+gf100_pmu_oclass = &(struct nvkm_pmu_impl) {
+ .base.handle = NV_SUBDEV(PMU, 0xc0),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_pmu_ctor,
+ .dtor = _nvkm_pmu_dtor,
+ .init = _nvkm_pmu_init,
+ .fini = _nvkm_pmu_fini,
},
- .code.data = nvc0_pwr_code,
- .code.size = sizeof(nvc0_pwr_code),
- .data.data = nvc0_pwr_data,
- .data.size = sizeof(nvc0_pwr_data),
+ .code.data = gf100_pmu_code,
+ .code.size = sizeof(gf100_pmu_code),
+ .data.data = gf100_pmu_data,
+ .data.size = sizeof(gf100_pmu_data),
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
index 2b29be5d08ac..6b3a23839ff0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
@@ -21,21 +21,20 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
-#include "fuc/nvd0.fuc.h"
+#include "fuc/gf110.fuc4.h"
-struct nouveau_oclass *
-nvd0_pwr_oclass = &(struct nvkm_pwr_impl) {
- .base.handle = NV_SUBDEV(PWR, 0xd0),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_pwr_ctor,
- .dtor = _nouveau_pwr_dtor,
- .init = _nouveau_pwr_init,
- .fini = _nouveau_pwr_fini,
+struct nvkm_oclass *
+gf110_pmu_oclass = &(struct nvkm_pmu_impl) {
+ .base.handle = NV_SUBDEV(PMU, 0xd0),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_pmu_ctor,
+ .dtor = _nvkm_pmu_dtor,
+ .init = _nvkm_pmu_init,
+ .fini = _nvkm_pmu_fini,
},
- .code.data = nvd0_pwr_code,
- .code.size = sizeof(nvd0_pwr_code),
- .data.data = nvd0_pwr_data,
- .data.size = sizeof(nvd0_pwr_data),
+ .code.data = gf110_pmu_code,
+ .code.size = sizeof(gf110_pmu_code),
+ .data.data = gf110_pmu_data,
+ .data.size = sizeof(gf110_pmu_data),
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index d76612999b9f..28fdb8ea9ed8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -21,49 +21,47 @@
*
* Authors: Ben Skeggs
*/
-
+#define gf110_pmu_code gk104_pmu_code
+#define gf110_pmu_data gk104_pmu_data
#include "priv.h"
-
-#define nvd0_pwr_code gk104_pwr_code
-#define nvd0_pwr_data gk104_pwr_data
-#include "fuc/nvd0.fuc.h"
+#include "fuc/gf110.fuc4.h"
static void
-gk104_pwr_pgob(struct nouveau_pwr *ppwr, bool enable)
+gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
- nv_mask(ppwr, 0x000200, 0x00001000, 0x00000000);
- nv_rd32(ppwr, 0x000200);
- nv_mask(ppwr, 0x000200, 0x08000000, 0x08000000);
+ nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
+ nv_rd32(pmu, 0x000200);
+ nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
msleep(50);
- nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000002);
- nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001);
- nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000);
+ nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
- nv_mask(ppwr, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
+ nv_mask(pmu, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
msleep(50);
- nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000000);
- nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001);
- nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000);
+ nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
- nv_mask(ppwr, 0x000200, 0x08000000, 0x00000000);
- nv_mask(ppwr, 0x000200, 0x00001000, 0x00001000);
- nv_rd32(ppwr, 0x000200);
+ nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
+ nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
+ nv_rd32(pmu, 0x000200);
}
-struct nouveau_oclass *
-gk104_pwr_oclass = &(struct nvkm_pwr_impl) {
- .base.handle = NV_SUBDEV(PWR, 0xe4),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_pwr_ctor,
- .dtor = _nouveau_pwr_dtor,
- .init = _nouveau_pwr_init,
- .fini = _nouveau_pwr_fini,
+struct nvkm_oclass *
+gk104_pmu_oclass = &(struct nvkm_pmu_impl) {
+ .base.handle = NV_SUBDEV(PMU, 0xe4),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_pmu_ctor,
+ .dtor = _nvkm_pmu_dtor,
+ .init = _nvkm_pmu_init,
+ .fini = _nvkm_pmu_fini,
},
- .code.data = gk104_pwr_code,
- .code.size = sizeof(gk104_pwr_code),
- .data.data = gk104_pwr_data,
- .data.size = sizeof(gk104_pwr_data),
- .pgob = gk104_pwr_pgob,
+ .code.data = gk104_pmu_code,
+ .code.size = sizeof(gk104_pmu_code),
+ .data.data = gk104_pmu_data,
+ .data.size = sizeof(gk104_pmu_data),
+ .pgob = gk104_pmu_pgob,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index 04ff7c3c34e9..6f9c09af1a49 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -21,21 +21,20 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
-#include "fuc/nv108.fuc.h"
+#include "fuc/gk208.fuc5.h"
-struct nouveau_oclass *
-nv108_pwr_oclass = &(struct nvkm_pwr_impl) {
- .base.handle = NV_SUBDEV(PWR, 0x00),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_pwr_ctor,
- .dtor = _nouveau_pwr_dtor,
- .init = _nouveau_pwr_init,
- .fini = _nouveau_pwr_fini,
+struct nvkm_oclass *
+gk208_pmu_oclass = &(struct nvkm_pmu_impl) {
+ .base.handle = NV_SUBDEV(PMU, 0x00),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_pmu_ctor,
+ .dtor = _nvkm_pmu_dtor,
+ .init = _nvkm_pmu_init,
+ .fini = _nvkm_pmu_fini,
},
- .code.data = nv108_pwr_code,
- .code.size = sizeof(nv108_pwr_code),
- .data.data = nv108_pwr_data,
- .data.size = sizeof(nv108_pwr_data),
+ .code.data = gk208_pmu_code,
+ .code.size = sizeof(gk208_pmu_code),
+ .data.data = gk208_pmu_data,
+ .data.size = sizeof(gk208_pmu_data),
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
new file mode 100644
index 000000000000..a49934bbe637
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/clk.h>
+#include <subdev/timer.h>
+#include <subdev/volt.h>
+
+#define BUSY_SLOT 0
+#define CLK_SLOT 7
+
+struct gk20a_pmu_dvfs_data {
+ int p_load_target;
+ int p_load_max;
+ int p_smooth;
+ unsigned int avg_load;
+};
+
+struct gk20a_pmu_priv {
+ struct nvkm_pmu base;
+ struct nvkm_alarm alarm;
+ struct gk20a_pmu_dvfs_data *data;
+};
+
+struct gk20a_pmu_dvfs_dev_status {
+ unsigned long total;
+ unsigned long busy;
+ int cur_state;
+};
+
+static int
+gk20a_pmu_dvfs_target(struct gk20a_pmu_priv *priv, int *state)
+{
+ struct nvkm_clk *clk = nvkm_clk(priv);
+
+ return nvkm_clk_astate(clk, *state, 0, false);
+}
+
+static int
+gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu_priv *priv, int *state)
+{
+ struct nvkm_clk *clk = nvkm_clk(priv);
+
+ *state = clk->pstate;
+ return 0;
+}
+
+static int
+gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
+ int *state, int load)
+{
+ struct gk20a_pmu_dvfs_data *data = priv->data;
+ struct nvkm_clk *clk = nvkm_clk(priv);
+ int cur_level, level;
+
+ /* For GK20A, the performance level is directly mapped to pstate */
+ level = cur_level = clk->pstate;
+
+ if (load > data->p_load_max) {
+ level = min(clk->state_nr - 1, level + (clk->state_nr / 3));
+ } else {
+ level += ((load - data->p_load_target) * 10 /
+ data->p_load_target) / 2;
+ level = max(0, level);
+ level = min(clk->state_nr - 1, level);
+ }
+
+ nv_trace(priv, "cur level = %d, new level = %d\n", cur_level, level);
+
+ *state = level;
+
+ if (level == cur_level)
+ return 0;
+ else
+ return 1;
+}
+
+static int
+gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu_priv *priv,
+ struct gk20a_pmu_dvfs_dev_status *status)
+{
+ status->busy = nv_rd32(priv, 0x10a508 + (BUSY_SLOT * 0x10));
+ status->total= nv_rd32(priv, 0x10a508 + (CLK_SLOT * 0x10));
+ return 0;
+}
+
+static void
+gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu_priv *priv)
+{
+ nv_wr32(priv, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
+ nv_wr32(priv, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
+}
+
+static void
+gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
+{
+ struct gk20a_pmu_priv *priv =
+ container_of(alarm, struct gk20a_pmu_priv, alarm);
+ struct gk20a_pmu_dvfs_data *data = priv->data;
+ struct gk20a_pmu_dvfs_dev_status status;
+ struct nvkm_clk *clk = nvkm_clk(priv);
+ struct nvkm_volt *volt = nvkm_volt(priv);
+ u32 utilization = 0;
+ int state, ret;
+
+ /*
+ * The PMU is initialized before CLK and VOLT, so we have to make sure the
+ * CLK and VOLT are ready here.
+ */
+ if (!clk || !volt)
+ goto resched;
+
+ ret = gk20a_pmu_dvfs_get_dev_status(priv, &status);
+ if (ret) {
+ nv_warn(priv, "failed to get device status\n");
+ goto resched;
+ }
+
+ if (status.total)
+ utilization = div_u64((u64)status.busy * 100, status.total);
+
+ data->avg_load = (data->p_smooth * data->avg_load) + utilization;
+ data->avg_load /= data->p_smooth + 1;
+ nv_trace(priv, "utilization = %d %%, avg_load = %d %%\n",
+ utilization, data->avg_load);
+
+ ret = gk20a_pmu_dvfs_get_cur_state(priv, &state);
+ if (ret) {
+ nv_warn(priv, "failed to get current state\n");
+ goto resched;
+ }
+
+ if (gk20a_pmu_dvfs_get_target_state(priv, &state, data->avg_load)) {
+ nv_trace(priv, "set new state to %d\n", state);
+ gk20a_pmu_dvfs_target(priv, &state);
+ }
+
+resched:
+ gk20a_pmu_dvfs_reset_dev_status(priv);
+ nvkm_timer_alarm(priv, 100000000, alarm);
+}
+
+int
+gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nvkm_pmu *pmu = (void *)object;
+ struct gk20a_pmu_priv *priv = (void *)pmu;
+
+ nvkm_timer_alarm_cancel(priv, &priv->alarm);
+
+ return nvkm_subdev_fini(&pmu->base, suspend);
+}
+
+int
+gk20a_pmu_init(struct nvkm_object *object)
+{
+ struct nvkm_pmu *pmu = (void *)object;
+ struct gk20a_pmu_priv *priv = (void *)pmu;
+ int ret;
+
+ ret = nvkm_subdev_init(&pmu->base);
+ if (ret)
+ return ret;
+
+ pmu->pgob = nvkm_pmu_pgob;
+
+ /* init pwr perf counter */
+ nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
+ nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
+ nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
+
+ nvkm_timer_alarm(pmu, 2000000000, &priv->alarm);
+ return ret;
+}
+
+struct gk20a_pmu_dvfs_data gk20a_dvfs_data= {
+ .p_load_target = 70,
+ .p_load_max = 90,
+ .p_smooth = 1,
+};
+
+static int
+gk20a_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gk20a_pmu_priv *priv;
+ int ret;
+
+ ret = nvkm_pmu_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->data = &gk20a_dvfs_data;
+
+ nvkm_alarm_init(&priv->alarm, gk20a_pmu_dvfs_work);
+ return 0;
+}
+
+struct nvkm_oclass *
+gk20a_pmu_oclass = &(struct nvkm_pmu_impl) {
+ .base.handle = NV_SUBDEV(PMU, 0xea),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk20a_pmu_ctor,
+ .dtor = _nvkm_pmu_dtor,
+ .init = gk20a_pmu_init,
+ .fini = gk20a_pmu_fini,
+ },
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 998d53076b8b..30aaeb21de41 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -21,30 +21,29 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
-#include "fuc/nva3.fuc.h"
+#include "fuc/gt215.fuc3.h"
static int
-nva3_pwr_init(struct nouveau_object *object)
+gt215_pmu_init(struct nvkm_object *object)
{
- struct nouveau_pwr *ppwr = (void *)object;
- nv_mask(ppwr, 0x022210, 0x00000001, 0x00000000);
- nv_mask(ppwr, 0x022210, 0x00000001, 0x00000001);
- return nouveau_pwr_init(ppwr);
+ struct nvkm_pmu *pmu = (void *)object;
+ nv_mask(pmu, 0x022210, 0x00000001, 0x00000000);
+ nv_mask(pmu, 0x022210, 0x00000001, 0x00000001);
+ return nvkm_pmu_init(pmu);
}
-struct nouveau_oclass *
-nva3_pwr_oclass = &(struct nvkm_pwr_impl) {
- .base.handle = NV_SUBDEV(PWR, 0xa3),
- .base.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_pwr_ctor,
- .dtor = _nouveau_pwr_dtor,
- .init = nva3_pwr_init,
- .fini = _nouveau_pwr_fini,
+struct nvkm_oclass *
+gt215_pmu_oclass = &(struct nvkm_pmu_impl) {
+ .base.handle = NV_SUBDEV(PMU, 0xa3),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_pmu_ctor,
+ .dtor = _nvkm_pmu_dtor,
+ .init = gt215_pmu_init,
+ .fini = _nvkm_pmu_fini,
},
- .code.data = nva3_pwr_code,
- .code.size = sizeof(nva3_pwr_code),
- .data.data = nva3_pwr_data,
- .data.size = sizeof(nva3_pwr_data),
+ .code.data = gt215_pmu_code,
+ .code.size = sizeof(gt215_pmu_code),
+ .data.data = gt215_pmu_data,
+ .data.size = sizeof(gt215_pmu_data),
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
new file mode 100644
index 000000000000..b75c5b885980
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -0,0 +1,200 @@
+#ifndef __NVKM_PMU_MEMX_H__
+#define __NVKM_PMU_MEMX_H__
+#include "priv.h"
+
+#include <core/device.h>
+
+struct nvkm_memx {
+ struct nvkm_pmu *pmu;
+ u32 base;
+ u32 size;
+ struct {
+ u32 mthd;
+ u32 size;
+ u32 data[64];
+ } c;
+};
+
+static void
+memx_out(struct nvkm_memx *memx)
+{
+ struct nvkm_pmu *pmu = memx->pmu;
+ int i;
+
+ if (memx->c.mthd) {
+ nv_wr32(pmu, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+ for (i = 0; i < memx->c.size; i++)
+ nv_wr32(pmu, 0x10a1c4, memx->c.data[i]);
+ memx->c.mthd = 0;
+ memx->c.size = 0;
+ }
+}
+
+static void
+memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
+{
+ if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
+ (memx->c.mthd && memx->c.mthd != mthd))
+ memx_out(memx);
+ memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
+ memx->c.size += size;
+ memx->c.mthd = mthd;
+}
+
+int
+nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
+{
+ struct nvkm_memx *memx;
+ u32 reply[2];
+ int ret;
+
+ ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_DATA, 0);
+ if (ret)
+ return ret;
+
+ memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
+ if (!memx)
+ return -ENOMEM;
+ memx->pmu = pmu;
+ memx->base = reply[0];
+ memx->size = reply[1];
+
+ /* acquire data segment access */
+ do {
+ nv_wr32(pmu, 0x10a580, 0x00000003);
+ } while (nv_rd32(pmu, 0x10a580) != 0x00000003);
+ nv_wr32(pmu, 0x10a1c0, 0x01000000 | memx->base);
+ return 0;
+}
+
+int
+nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
+{
+ struct nvkm_memx *memx = *pmemx;
+ struct nvkm_pmu *pmu = memx->pmu;
+ u32 finish, reply[2];
+
+ /* flush the cache... */
+ memx_out(memx);
+
+ /* release data segment access */
+ finish = nv_rd32(pmu, 0x10a1c0) & 0x00ffffff;
+ nv_wr32(pmu, 0x10a580, 0x00000000);
+
+ /* call MEMX process to execute the script, and wait for reply */
+ if (exec) {
+ pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
+ memx->base, finish);
+ }
+
+ nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n",
+ reply[0], reply[1]);
+ kfree(memx);
+ return 0;
+}
+
+void
+nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
+{
+ nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data);
+ memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
+}
+
+void
+nvkm_memx_wait(struct nvkm_memx *memx,
+ u32 addr, u32 mask, u32 data, u32 nsec)
+{
+ nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
+ addr, mask, data, nsec);
+ memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
+ memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
+{
+ nv_debug(memx->pmu, " DELAY = %d ns\n", nsec);
+ memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
+ memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nvkm_memx_wait_vblank(struct nvkm_memx *memx)
+{
+ struct nvkm_pmu *pmu = memx->pmu;
+ u32 heads, x, y, px = 0;
+ int i, head_sync;
+
+ if (nv_device(pmu)->chipset < 0xd0) {
+ heads = nv_rd32(pmu, 0x610050);
+ for (i = 0; i < 2; i++) {
+ /* Heuristic: sync to head with biggest resolution */
+ if (heads & (2 << (i << 3))) {
+ x = nv_rd32(pmu, 0x610b40 + (0x540 * i));
+ y = (x & 0xffff0000) >> 16;
+ x &= 0x0000ffff;
+ if ((x * y) > px) {
+ px = (x * y);
+ head_sync = i;
+ }
+ }
+ }
+ }
+
+ if (px == 0) {
+ nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n");
+ return;
+ }
+
+ nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync);
+ memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
+ memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nvkm_memx_train(struct nvkm_memx *memx)
+{
+ nv_debug(memx->pmu, " MEM TRAIN\n");
+ memx_cmd(memx, MEMX_TRAIN, 0, NULL);
+}
+
+int
+nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
+{
+ u32 reply[2], base, size, i;
+ int ret;
+
+ ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_TRAIN, 0);
+ if (ret)
+ return ret;
+
+ base = reply[0];
+ size = reply[1] >> 2;
+ if (size > rsize)
+ return -ENOMEM;
+
+ /* read the packet */
+ nv_wr32(pmu, 0x10a1c0, 0x02000000 | base);
+
+ for (i = 0; i < size; i++)
+ res[i] = nv_rd32(pmu, 0x10a1c4);
+
+ return 0;
+}
+
+void
+nvkm_memx_block(struct nvkm_memx *memx)
+{
+ nv_debug(memx->pmu, " HOST BLOCKED\n");
+ memx_cmd(memx, MEMX_ENTER, 0, NULL);
+}
+
+void
+nvkm_memx_unblock(struct nvkm_memx *memx)
+{
+ nv_debug(memx->pmu, " HOST UNBLOCKED\n");
+ memx_cmd(memx, MEMX_LEAVE, 0, NULL);
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
new file mode 100644
index 000000000000..998410563bfd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -0,0 +1,43 @@
+#ifndef __NVKM_PMU_PRIV_H__
+#define __NVKM_PMU_PRIV_H__
+#include <subdev/pmu.h>
+#include <subdev/pmu/fuc/os.h>
+
+#define nvkm_pmu_create(p, e, o, d) \
+ nvkm_pmu_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_pmu_destroy(p) \
+ nvkm_subdev_destroy(&(p)->base)
+#define nvkm_pmu_init(p) ({ \
+ struct nvkm_pmu *_pmu = (p); \
+ _nvkm_pmu_init(nv_object(_pmu)); \
+})
+#define nvkm_pmu_fini(p,s) ({ \
+ struct nvkm_pmu *_pmu = (p); \
+ _nvkm_pmu_fini(nv_object(_pmu), (s)); \
+})
+
+int nvkm_pmu_create_(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, int, void **);
+
+int _nvkm_pmu_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+#define _nvkm_pmu_dtor _nvkm_subdev_dtor
+int _nvkm_pmu_init(struct nvkm_object *);
+int _nvkm_pmu_fini(struct nvkm_object *, bool);
+void nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable);
+
+struct nvkm_pmu_impl {
+ struct nvkm_oclass base;
+ struct {
+ u32 *data;
+ u32 size;
+ } code;
+ struct {
+ u32 *data;
+ u32 size;
+ } data;
+
+ void (*pgob)(struct nvkm_pmu *, bool);
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
new file mode 100644
index 000000000000..5837cf1292d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -0,0 +1,13 @@
+nvkm-y += nvkm/subdev/therm/base.o
+nvkm-y += nvkm/subdev/therm/fan.o
+nvkm-y += nvkm/subdev/therm/fannil.o
+nvkm-y += nvkm/subdev/therm/fanpwm.o
+nvkm-y += nvkm/subdev/therm/fantog.o
+nvkm-y += nvkm/subdev/therm/ic.o
+nvkm-y += nvkm/subdev/therm/temp.o
+nvkm-y += nvkm/subdev/therm/nv40.o
+nvkm-y += nvkm/subdev/therm/nv50.o
+nvkm-y += nvkm/subdev/therm/g84.o
+nvkm-y += nvkm/subdev/therm/gt215.o
+nvkm-y += nvkm/subdev/therm/gf110.o
+nvkm-y += nvkm/subdev/therm/gm107.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 9ad01da6eacb..ec327cb64a0d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -21,21 +21,17 @@
*
* Authors: Martin Peres
*/
+#include "priv.h"
-#include <core/object.h>
#include <core/device.h>
-#include <subdev/bios.h>
-
-#include "priv.h"
-
static int
-nouveau_therm_update_trip(struct nouveau_therm *therm)
+nvkm_therm_update_trip(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_therm_trip_point *trip = priv->fan->bios.trip,
- *cur_trip = NULL,
- *last_trip = priv->last_trip;
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_trip_point *trip = priv->fan->bios.trip,
+ *cur_trip = NULL,
+ *last_trip = priv->last_trip;
u8 temp = therm->temp_get(therm);
u16 duty, i;
@@ -63,9 +59,9 @@ nouveau_therm_update_trip(struct nouveau_therm *therm)
}
static int
-nouveau_therm_update_linear(struct nouveau_therm *therm)
+nvkm_therm_update_linear(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
u8 linear_min_temp = priv->fan->bios.linear_min_temp;
u8 linear_max_temp = priv->fan->bios.linear_max_temp;
u8 temp = therm->temp_get(therm);
@@ -82,15 +78,14 @@ nouveau_therm_update_linear(struct nouveau_therm *therm)
duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
duty /= (linear_max_temp - linear_min_temp);
duty += priv->fan->bios.min_duty;
-
return duty;
}
static void
-nouveau_therm_update(struct nouveau_therm *therm, int mode)
+nvkm_therm_update(struct nvkm_therm *therm, int mode)
{
- struct nouveau_timer *ptimer = nouveau_timer(therm);
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_timer *ptimer = nvkm_timer(therm);
+ struct nvkm_therm_priv *priv = (void *)therm;
unsigned long flags;
bool immd = true;
bool poll = true;
@@ -102,20 +97,20 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
priv->mode = mode;
switch (mode) {
- case NOUVEAU_THERM_CTRL_MANUAL:
+ case NVKM_THERM_CTRL_MANUAL:
ptimer->alarm_cancel(ptimer, &priv->alarm);
- duty = nouveau_therm_fan_get(therm);
+ duty = nvkm_therm_fan_get(therm);
if (duty < 0)
duty = 100;
poll = false;
break;
- case NOUVEAU_THERM_CTRL_AUTO:
+ case NVKM_THERM_CTRL_AUTO:
switch(priv->fan->bios.fan_mode) {
case NVBIOS_THERM_FAN_TRIP:
- duty = nouveau_therm_update_trip(therm);
+ duty = nvkm_therm_update_trip(therm);
break;
case NVBIOS_THERM_FAN_LINEAR:
- duty = nouveau_therm_update_linear(therm);
+ duty = nvkm_therm_update_linear(therm);
break;
case NVBIOS_THERM_FAN_OTHER:
if (priv->cstate)
@@ -125,7 +120,7 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
}
immd = false;
break;
- case NOUVEAU_THERM_CTRL_NONE:
+ case NVKM_THERM_CTRL_NONE:
default:
ptimer->alarm_cancel(ptimer, &priv->alarm);
poll = false;
@@ -137,36 +132,36 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
if (duty >= 0) {
nv_debug(therm, "FAN target request: %d%%\n", duty);
- nouveau_therm_fan_set(therm, immd, duty);
+ nvkm_therm_fan_set(therm, immd, duty);
}
}
int
-nouveau_therm_cstate(struct nouveau_therm *ptherm, int fan, int dir)
+nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir)
{
- struct nouveau_therm_priv *priv = (void *)ptherm;
+ struct nvkm_therm_priv *priv = (void *)ptherm;
if (!dir || (dir < 0 && fan < priv->cstate) ||
(dir > 0 && fan > priv->cstate)) {
nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
priv->cstate = fan;
- nouveau_therm_update(ptherm, -1);
+ nvkm_therm_update(ptherm, -1);
}
return 0;
}
static void
-nouveau_therm_alarm(struct nouveau_alarm *alarm)
+nvkm_therm_alarm(struct nvkm_alarm *alarm)
{
- struct nouveau_therm_priv *priv =
- container_of(alarm, struct nouveau_therm_priv, alarm);
- nouveau_therm_update(&priv->base, -1);
+ struct nvkm_therm_priv *priv =
+ container_of(alarm, struct nvkm_therm_priv, alarm);
+ nvkm_therm_update(&priv->base, -1);
}
int
-nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
+nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_device *device = nv_device(therm);
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_device *device = nv_device(therm);
static const char *name[] = {
"disabled",
"manual",
@@ -175,51 +170,51 @@ nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
/* The default PPWR ucode on fermi interferes with fan management */
if ((mode >= ARRAY_SIZE(name)) ||
- (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
- !nouveau_subdev(device, NVDEV_SUBDEV_PWR)))
+ (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
+ !nvkm_subdev(device, NVDEV_SUBDEV_PMU)))
return -EINVAL;
/* do not allow automatic fan management if the thermal sensor is
* not available */
- if (mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
+ if (mode == NVKM_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
return -EINVAL;
if (priv->mode == mode)
return 0;
nv_info(therm, "fan management: %s\n", name[mode]);
- nouveau_therm_update(therm, mode);
+ nvkm_therm_update(therm, mode);
return 0;
}
int
-nouveau_therm_attr_get(struct nouveau_therm *therm,
- enum nouveau_therm_attr_type type)
+nvkm_therm_attr_get(struct nvkm_therm *therm,
+ enum nvkm_therm_attr_type type)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
switch (type) {
- case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
+ case NVKM_THERM_ATTR_FAN_MIN_DUTY:
return priv->fan->bios.min_duty;
- case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
+ case NVKM_THERM_ATTR_FAN_MAX_DUTY:
return priv->fan->bios.max_duty;
- case NOUVEAU_THERM_ATTR_FAN_MODE:
+ case NVKM_THERM_ATTR_FAN_MODE:
return priv->mode;
- case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
+ case NVKM_THERM_ATTR_THRS_FAN_BOOST:
return priv->bios_sensor.thrs_fan_boost.temp;
- case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
+ case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
return priv->bios_sensor.thrs_fan_boost.hysteresis;
- case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
+ case NVKM_THERM_ATTR_THRS_DOWN_CLK:
return priv->bios_sensor.thrs_down_clock.temp;
- case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
+ case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
return priv->bios_sensor.thrs_down_clock.hysteresis;
- case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
+ case NVKM_THERM_ATTR_THRS_CRITICAL:
return priv->bios_sensor.thrs_critical.temp;
- case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
+ case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
return priv->bios_sensor.thrs_critical.hysteresis;
- case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
+ case NVKM_THERM_ATTR_THRS_SHUTDOWN:
return priv->bios_sensor.thrs_shutdown.temp;
- case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
+ case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
return priv->bios_sensor.thrs_shutdown.hysteresis;
}
@@ -227,57 +222,57 @@ nouveau_therm_attr_get(struct nouveau_therm *therm,
}
int
-nouveau_therm_attr_set(struct nouveau_therm *therm,
- enum nouveau_therm_attr_type type, int value)
+nvkm_therm_attr_set(struct nvkm_therm *therm,
+ enum nvkm_therm_attr_type type, int value)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
switch (type) {
- case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
+ case NVKM_THERM_ATTR_FAN_MIN_DUTY:
if (value < 0)
value = 0;
if (value > priv->fan->bios.max_duty)
value = priv->fan->bios.max_duty;
priv->fan->bios.min_duty = value;
return 0;
- case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
+ case NVKM_THERM_ATTR_FAN_MAX_DUTY:
if (value < 0)
value = 0;
if (value < priv->fan->bios.min_duty)
value = priv->fan->bios.min_duty;
priv->fan->bios.max_duty = value;
return 0;
- case NOUVEAU_THERM_ATTR_FAN_MODE:
- return nouveau_therm_fan_mode(therm, value);
- case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
+ case NVKM_THERM_ATTR_FAN_MODE:
+ return nvkm_therm_fan_mode(therm, value);
+ case NVKM_THERM_ATTR_THRS_FAN_BOOST:
priv->bios_sensor.thrs_fan_boost.temp = value;
priv->sensor.program_alarms(therm);
return 0;
- case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
+ case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
priv->bios_sensor.thrs_fan_boost.hysteresis = value;
priv->sensor.program_alarms(therm);
return 0;
- case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
+ case NVKM_THERM_ATTR_THRS_DOWN_CLK:
priv->bios_sensor.thrs_down_clock.temp = value;
priv->sensor.program_alarms(therm);
return 0;
- case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
+ case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
priv->bios_sensor.thrs_down_clock.hysteresis = value;
priv->sensor.program_alarms(therm);
return 0;
- case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
+ case NVKM_THERM_ATTR_THRS_CRITICAL:
priv->bios_sensor.thrs_critical.temp = value;
priv->sensor.program_alarms(therm);
return 0;
- case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
+ case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
priv->bios_sensor.thrs_critical.hysteresis = value;
priv->sensor.program_alarms(therm);
return 0;
- case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
+ case NVKM_THERM_ATTR_THRS_SHUTDOWN:
priv->bios_sensor.thrs_shutdown.temp = value;
priv->sensor.program_alarms(therm);
return 0;
- case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
+ case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
priv->bios_sensor.thrs_shutdown.hysteresis = value;
priv->sensor.program_alarms(therm);
return 0;
@@ -287,88 +282,86 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
}
int
-_nouveau_therm_init(struct nouveau_object *object)
+_nvkm_therm_init(struct nvkm_object *object)
{
- struct nouveau_therm *therm = (void *)object;
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm *therm = (void *)object;
+ struct nvkm_therm_priv *priv = (void *)therm;
int ret;
- ret = nouveau_subdev_init(&therm->base);
+ ret = nvkm_subdev_init(&therm->base);
if (ret)
return ret;
if (priv->suspend >= 0) {
/* restore the pwm value only when on manual or auto mode */
if (priv->suspend > 0)
- nouveau_therm_fan_set(therm, true, priv->fan->percent);
+ nvkm_therm_fan_set(therm, true, priv->fan->percent);
- nouveau_therm_fan_mode(therm, priv->suspend);
+ nvkm_therm_fan_mode(therm, priv->suspend);
}
- nouveau_therm_sensor_init(therm);
- nouveau_therm_fan_init(therm);
+ nvkm_therm_sensor_init(therm);
+ nvkm_therm_fan_init(therm);
return 0;
}
int
-_nouveau_therm_fini(struct nouveau_object *object, bool suspend)
+_nvkm_therm_fini(struct nvkm_object *object, bool suspend)
{
- struct nouveau_therm *therm = (void *)object;
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm *therm = (void *)object;
+ struct nvkm_therm_priv *priv = (void *)therm;
- nouveau_therm_fan_fini(therm, suspend);
- nouveau_therm_sensor_fini(therm, suspend);
+ nvkm_therm_fan_fini(therm, suspend);
+ nvkm_therm_sensor_fini(therm, suspend);
if (suspend) {
priv->suspend = priv->mode;
- priv->mode = NOUVEAU_THERM_CTRL_NONE;
+ priv->mode = NVKM_THERM_CTRL_NONE;
}
- return nouveau_subdev_fini(&therm->base, suspend);
+ return nvkm_subdev_fini(&therm->base, suspend);
}
int
-nouveau_therm_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int length, void **pobject)
+nvkm_therm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_therm_priv *priv;
+ struct nvkm_therm_priv *priv;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PTHERM",
- "therm", length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PTHERM",
+ "therm", length, pobject);
priv = *pobject;
if (ret)
return ret;
- nouveau_alarm_init(&priv->alarm, nouveau_therm_alarm);
+ nvkm_alarm_init(&priv->alarm, nvkm_therm_alarm);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->sensor.alarm_program_lock);
- priv->base.fan_get = nouveau_therm_fan_user_get;
- priv->base.fan_set = nouveau_therm_fan_user_set;
- priv->base.fan_sense = nouveau_therm_fan_sense;
- priv->base.attr_get = nouveau_therm_attr_get;
- priv->base.attr_set = nouveau_therm_attr_set;
+ priv->base.fan_get = nvkm_therm_fan_user_get;
+ priv->base.fan_set = nvkm_therm_fan_user_set;
+ priv->base.fan_sense = nvkm_therm_fan_sense;
+ priv->base.attr_get = nvkm_therm_attr_get;
+ priv->base.attr_set = nvkm_therm_attr_set;
priv->mode = priv->suspend = -1; /* undefined */
return 0;
}
int
-nouveau_therm_preinit(struct nouveau_therm *therm)
+nvkm_therm_preinit(struct nvkm_therm *therm)
{
- nouveau_therm_sensor_ctor(therm);
- nouveau_therm_ic_ctor(therm);
- nouveau_therm_fan_ctor(therm);
+ nvkm_therm_sensor_ctor(therm);
+ nvkm_therm_ic_ctor(therm);
+ nvkm_therm_fan_ctor(therm);
- nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
- nouveau_therm_sensor_preinit(therm);
+ nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
+ nvkm_therm_sensor_preinit(therm);
return 0;
}
void
-_nouveau_therm_dtor(struct nouveau_object *object)
+_nvkm_therm_dtor(struct nvkm_object *object)
{
- struct nouveau_therm_priv *priv = (void *)object;
+ struct nvkm_therm_priv *priv = (void *)object;
kfree(priv->fan);
- nouveau_subdev_destroy(&priv->base.base);
+ nvkm_subdev_destroy(&priv->base.base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 3656d605168f..434fa745ca40 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -22,23 +22,18 @@
* Authors: Ben Skeggs
* Martin Peres
*/
-
#include "priv.h"
-#include <core/object.h>
-#include <core/device.h>
-
+#include <subdev/bios/fan.h>
#include <subdev/gpio.h>
#include <subdev/timer.h>
-#include <subdev/bios/fan.h>
-
static int
-nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
+nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
{
- struct nouveau_therm *therm = fan->parent;
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_timer *ptimer = nouveau_timer(priv);
+ struct nvkm_therm *therm = fan->parent;
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_timer *ptimer = nvkm_timer(priv);
unsigned long flags;
int ret = 0;
int duty;
@@ -107,32 +102,32 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
}
static void
-nouveau_fan_alarm(struct nouveau_alarm *alarm)
+nvkm_fan_alarm(struct nvkm_alarm *alarm)
{
- struct nouveau_fan *fan = container_of(alarm, struct nouveau_fan, alarm);
- nouveau_fan_update(fan, false, -1);
+ struct nvkm_fan *fan = container_of(alarm, struct nvkm_fan, alarm);
+ nvkm_fan_update(fan, false, -1);
}
int
-nouveau_therm_fan_get(struct nouveau_therm *therm)
+nvkm_therm_fan_get(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
return priv->fan->get(therm);
}
int
-nouveau_therm_fan_set(struct nouveau_therm *therm, bool immediate, int percent)
+nvkm_therm_fan_set(struct nvkm_therm *therm, bool immediate, int percent)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- return nouveau_fan_update(priv->fan, immediate, percent);
+ struct nvkm_therm_priv *priv = (void *)therm;
+ return nvkm_fan_update(priv->fan, immediate, percent);
}
int
-nouveau_therm_fan_sense(struct nouveau_therm *therm)
+nvkm_therm_fan_sense(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_timer *ptimer = nouveau_timer(therm);
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_timer *ptimer = nvkm_timer(therm);
+ struct nvkm_gpio *gpio = nvkm_gpio(therm);
u32 cycles, cur, prev;
u64 start, end, tach;
@@ -168,26 +163,26 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
}
int
-nouveau_therm_fan_user_get(struct nouveau_therm *therm)
+nvkm_therm_fan_user_get(struct nvkm_therm *therm)
{
- return nouveau_therm_fan_get(therm);
+ return nvkm_therm_fan_get(therm);
}
int
-nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
+nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
- if (priv->mode != NOUVEAU_THERM_CTRL_MANUAL)
+ if (priv->mode != NVKM_THERM_CTRL_MANUAL)
return -EINVAL;
- return nouveau_therm_fan_set(therm, true, percent);
+ return nvkm_therm_fan_set(therm, true, percent);
}
static void
-nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
+nvkm_therm_fan_set_defaults(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
priv->fan->bios.pwm_freq = 0;
priv->fan->bios.min_duty = 0;
@@ -199,9 +194,9 @@ nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
}
static void
-nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
+nvkm_therm_fan_safety_checks(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
if (priv->fan->bios.min_duty > 100)
priv->fan->bios.min_duty = 100;
@@ -213,16 +208,16 @@ nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
}
int
-nouveau_therm_fan_init(struct nouveau_therm *therm)
+nvkm_therm_fan_init(struct nvkm_therm *therm)
{
return 0;
}
int
-nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend)
+nvkm_therm_fan_fini(struct nvkm_therm *therm, bool suspend)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_timer *ptimer = nouveau_timer(therm);
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_timer *ptimer = nvkm_timer(therm);
if (suspend)
ptimer->alarm_cancel(ptimer, &priv->fan->alarm);
@@ -230,11 +225,11 @@ nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend)
}
int
-nouveau_therm_fan_ctor(struct nouveau_therm *therm)
+nvkm_therm_fan_ctor(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct nouveau_bios *bios = nouveau_bios(therm);
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_gpio *gpio = nvkm_gpio(therm);
+ struct nvkm_bios *bios = nvkm_bios(therm);
struct dcb_gpio_func func;
int ret;
@@ -246,15 +241,15 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
nv_debug(therm, "GPIO_FAN is in input mode\n");
ret = -EINVAL;
} else {
- ret = nouveau_fanpwm_create(therm, &func);
+ ret = nvkm_fanpwm_create(therm, &func);
if (ret != 0)
- ret = nouveau_fantog_create(therm, &func);
+ ret = nvkm_fantog_create(therm, &func);
}
}
/* no controllable fan found, create a dummy fan module */
if (ret != 0) {
- ret = nouveau_fannil_create(therm);
+ ret = nvkm_fannil_create(therm);
if (ret)
return ret;
}
@@ -262,7 +257,7 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
nv_info(therm, "FAN control: %s\n", priv->fan->type);
/* read the current speed, it is useful when resuming */
- priv->fan->percent = nouveau_therm_fan_get(therm);
+ priv->fan->percent = nvkm_therm_fan_get(therm);
/* attempt to detect a tachometer connection */
ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
@@ -271,17 +266,17 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
/* initialise fan bump/slow update handling */
priv->fan->parent = therm;
- nouveau_alarm_init(&priv->fan->alarm, nouveau_fan_alarm);
+ nvkm_alarm_init(&priv->fan->alarm, nvkm_fan_alarm);
spin_lock_init(&priv->fan->lock);
/* other random init... */
- nouveau_therm_fan_set_defaults(therm);
+ nvkm_therm_fan_set_defaults(therm);
nvbios_perf_fan_parse(bios, &priv->fan->perf);
if (!nvbios_fan_parse(bios, &priv->fan->bios)) {
nv_debug(therm, "parsing the fan table failed\n");
if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
nv_error(therm, "parsing both fan tables failed\n");
}
- nouveau_therm_fan_safety_checks(therm);
+ nvkm_therm_fan_safety_checks(therm);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
index b78c182e1d51..534e5970ec9c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
@@ -21,26 +21,25 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
static int
-nouveau_fannil_get(struct nouveau_therm *therm)
+nvkm_fannil_get(struct nvkm_therm *therm)
{
return -ENODEV;
}
static int
-nouveau_fannil_set(struct nouveau_therm *therm, int percent)
+nvkm_fannil_set(struct nvkm_therm *therm, int percent)
{
return -ENODEV;
}
int
-nouveau_fannil_create(struct nouveau_therm *therm)
+nvkm_fannil_create(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *tpriv = (void *)therm;
- struct nouveau_fan *priv;
+ struct nvkm_therm_priv *tpriv = (void *)therm;
+ struct nvkm_fan *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
tpriv->fan = priv;
@@ -48,7 +47,7 @@ nouveau_fannil_create(struct nouveau_therm *therm)
return -ENOMEM;
priv->type = "none / external";
- priv->get = nouveau_fannil_get;
- priv->set = nouveau_fannil_set;
+ priv->get = nvkm_fannil_get;
+ priv->set = nvkm_fannil_set;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
index c629d7f2a6a4..bde5ceaeb70a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
@@ -22,25 +22,25 @@
* Authors: Ben Skeggs
* Martin Peres
*/
+#include "priv.h"
+#include <core/device.h>
#include <core/option.h>
-#include <subdev/gpio.h>
#include <subdev/bios.h>
#include <subdev/bios/fan.h>
+#include <subdev/gpio.h>
-#include "priv.h"
-
-struct nouveau_fanpwm_priv {
- struct nouveau_fan base;
+struct nvkm_fanpwm_priv {
+ struct nvkm_fan base;
struct dcb_gpio_func func;
};
static int
-nouveau_fanpwm_get(struct nouveau_therm *therm)
+nvkm_fanpwm_get(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *tpriv = (void *)therm;
- struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
+ struct nvkm_therm_priv *tpriv = (void *)therm;
+ struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
+ struct nvkm_gpio *gpio = nvkm_gpio(therm);
int card_type = nv_device(therm)->card_type;
u32 divs, duty;
int ret;
@@ -57,10 +57,10 @@ nouveau_fanpwm_get(struct nouveau_therm *therm)
}
static int
-nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
+nvkm_fanpwm_set(struct nvkm_therm *therm, int percent)
{
- struct nouveau_therm_priv *tpriv = (void *)therm;
- struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+ struct nvkm_therm_priv *tpriv = (void *)therm;
+ struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
int card_type = nv_device(therm)->card_type;
u32 divs, duty;
int ret;
@@ -84,18 +84,18 @@ nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
}
int
-nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+nvkm_fanpwm_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
{
- struct nouveau_device *device = nv_device(therm);
- struct nouveau_therm_priv *tpriv = (void *)therm;
- struct nouveau_bios *bios = nouveau_bios(therm);
- struct nouveau_fanpwm_priv *priv;
+ struct nvkm_device *device = nv_device(therm);
+ struct nvkm_therm_priv *tpriv = (void *)therm;
+ struct nvkm_bios *bios = nvkm_bios(therm);
+ struct nvkm_fanpwm_priv *priv;
struct nvbios_therm_fan fan;
u32 divs, duty;
nvbios_fan_parse(bios, &fan);
- if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
+ if (!nvkm_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
!therm->pwm_ctrl || fan.type == NVBIOS_THERM_FAN_TOGGLE ||
therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
return -ENODEV;
@@ -106,8 +106,8 @@ nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
return -ENOMEM;
priv->base.type = "PWM";
- priv->base.get = nouveau_fanpwm_get;
- priv->base.set = nouveau_fanpwm_set;
+ priv->base.get = nvkm_fanpwm_get;
+ priv->base.set = nvkm_fanpwm_set;
priv->func = *func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index f69dab11f720..4ce041e81371 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -21,18 +21,14 @@
*
* Authors: Martin Peres
*/
-
#include "priv.h"
-#include <core/object.h>
-#include <core/device.h>
-
#include <subdev/gpio.h>
#include <subdev/timer.h>
-struct nouveau_fantog_priv {
- struct nouveau_fan base;
- struct nouveau_alarm alarm;
+struct nvkm_fantog_priv {
+ struct nvkm_fan base;
+ struct nvkm_alarm alarm;
spinlock_t lock;
u32 period_us;
u32 percent;
@@ -40,11 +36,11 @@ struct nouveau_fantog_priv {
};
static void
-nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent)
+nvkm_fantog_update(struct nvkm_fantog_priv *priv, int percent)
{
- struct nouveau_therm_priv *tpriv = (void *)priv->base.parent;
- struct nouveau_timer *ptimer = nouveau_timer(tpriv);
- struct nouveau_gpio *gpio = nouveau_gpio(tpriv);
+ struct nvkm_therm_priv *tpriv = (void *)priv->base.parent;
+ struct nvkm_timer *ptimer = nvkm_timer(tpriv);
+ struct nvkm_gpio *gpio = nvkm_gpio(tpriv);
unsigned long flags;
int duty;
@@ -66,37 +62,37 @@ nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent)
}
static void
-nouveau_fantog_alarm(struct nouveau_alarm *alarm)
+nvkm_fantog_alarm(struct nvkm_alarm *alarm)
{
- struct nouveau_fantog_priv *priv =
- container_of(alarm, struct nouveau_fantog_priv, alarm);
- nouveau_fantog_update(priv, -1);
+ struct nvkm_fantog_priv *priv =
+ container_of(alarm, struct nvkm_fantog_priv, alarm);
+ nvkm_fantog_update(priv, -1);
}
static int
-nouveau_fantog_get(struct nouveau_therm *therm)
+nvkm_fantog_get(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *tpriv = (void *)therm;
- struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+ struct nvkm_therm_priv *tpriv = (void *)therm;
+ struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
return priv->percent;
}
static int
-nouveau_fantog_set(struct nouveau_therm *therm, int percent)
+nvkm_fantog_set(struct nvkm_therm *therm, int percent)
{
- struct nouveau_therm_priv *tpriv = (void *)therm;
- struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+ struct nvkm_therm_priv *tpriv = (void *)therm;
+ struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
if (therm->pwm_ctrl)
therm->pwm_ctrl(therm, priv->func.line, false);
- nouveau_fantog_update(priv, percent);
+ nvkm_fantog_update(priv, percent);
return 0;
}
int
-nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+nvkm_fantog_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
{
- struct nouveau_therm_priv *tpriv = (void *)therm;
- struct nouveau_fantog_priv *priv;
+ struct nvkm_therm_priv *tpriv = (void *)therm;
+ struct nvkm_fantog_priv *priv;
int ret;
if (therm->pwm_ctrl) {
@@ -111,9 +107,9 @@ nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
return -ENOMEM;
priv->base.type = "toggle";
- priv->base.get = nouveau_fantog_get;
- priv->base.set = nouveau_fantog_set;
- nouveau_alarm_init(&priv->alarm, nouveau_fantog_alarm);
+ priv->base.get = nvkm_fantog_get;
+ priv->base.set = nvkm_fantog_set;
+ nvkm_alarm_init(&priv->alarm, nvkm_fantog_alarm);
priv->period_us = 100000; /* 10Hz */
priv->percent = 100;
priv->func = *func;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 14e2e09bfc24..85b5d0c18c0b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -22,18 +22,18 @@
* Authors: Ben Skeggs
* Martin Peres
*/
-
#include "priv.h"
+
#include <subdev/fuse.h>
-struct nv84_therm_priv {
- struct nouveau_therm_priv base;
+struct g84_therm_priv {
+ struct nvkm_therm_priv base;
};
int
-nv84_temp_get(struct nouveau_therm *therm)
+g84_temp_get(struct nvkm_therm *therm)
{
- struct nouveau_fuse *fuse = nouveau_fuse(therm);
+ struct nvkm_fuse *fuse = nvkm_fuse(therm);
if (nv_ro32(fuse, 0x1a8) == 1)
return nv_rd32(therm, 0x20400);
@@ -42,9 +42,9 @@ nv84_temp_get(struct nouveau_therm *therm)
}
void
-nv84_sensor_setup(struct nouveau_therm *therm)
+g84_sensor_setup(struct nvkm_therm *therm)
{
- struct nouveau_fuse *fuse = nouveau_fuse(therm);
+ struct nvkm_fuse *fuse = nvkm_fuse(therm);
/* enable temperature reading for cards with insane defaults */
if (nv_ro32(fuse, 0x1a8) == 1) {
@@ -55,9 +55,9 @@ nv84_sensor_setup(struct nouveau_therm *therm)
}
static void
-nv84_therm_program_alarms(struct nouveau_therm *therm)
+g84_therm_program_alarms(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
unsigned long flags;
@@ -92,53 +92,53 @@ nv84_therm_program_alarms(struct nouveau_therm *therm)
/* must be called with alarm_program_lock taken ! */
static void
-nv84_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
+g84_therm_threshold_hyst_emulation(struct nvkm_therm *therm,
uint32_t thrs_reg, u8 status_bit,
const struct nvbios_therm_threshold *thrs,
- enum nouveau_therm_thrs thrs_name)
+ enum nvkm_therm_thrs thrs_name)
{
- enum nouveau_therm_thrs_direction direction;
- enum nouveau_therm_thrs_state prev_state, new_state;
+ enum nvkm_therm_thrs_direction direction;
+ enum nvkm_therm_thrs_state prev_state, new_state;
int temp, cur;
- prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+ prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
temp = nv_rd32(therm, thrs_reg);
/* program the next threshold */
if (temp == thrs->temp) {
nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
- new_state = NOUVEAU_THERM_THRS_HIGHER;
+ new_state = NVKM_THERM_THRS_HIGHER;
} else {
nv_wr32(therm, thrs_reg, thrs->temp);
- new_state = NOUVEAU_THERM_THRS_LOWER;
+ new_state = NVKM_THERM_THRS_LOWER;
}
/* fix the state (in case someone reprogrammed the alarms) */
cur = therm->temp_get(therm);
- if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
- new_state = NOUVEAU_THERM_THRS_HIGHER;
- else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
+ if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp)
+ new_state = NVKM_THERM_THRS_HIGHER;
+ else if (new_state == NVKM_THERM_THRS_HIGHER &&
cur < thrs->temp - thrs->hysteresis)
- new_state = NOUVEAU_THERM_THRS_LOWER;
- nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+ new_state = NVKM_THERM_THRS_LOWER;
+ nvkm_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
/* find the direction */
if (prev_state < new_state)
- direction = NOUVEAU_THERM_THRS_RISING;
+ direction = NVKM_THERM_THRS_RISING;
else if (prev_state > new_state)
- direction = NOUVEAU_THERM_THRS_FALLING;
+ direction = NVKM_THERM_THRS_FALLING;
else
return;
/* advertise a change in direction */
- nouveau_therm_sensor_event(therm, thrs_name, direction);
+ nvkm_therm_sensor_event(therm, thrs_name, direction);
}
static void
-nv84_therm_intr(struct nouveau_subdev *subdev)
+g84_therm_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_therm *therm = nouveau_therm(subdev);
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm *therm = nvkm_therm(subdev);
+ struct nvkm_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
unsigned long flags;
uint32_t intr;
@@ -149,33 +149,33 @@ nv84_therm_intr(struct nouveau_subdev *subdev)
/* THRS_4: downclock */
if (intr & 0x002) {
- nv84_therm_threshold_hyst_emulation(therm, 0x20414, 24,
- &sensor->thrs_down_clock,
- NOUVEAU_THERM_THRS_DOWNCLOCK);
+ g84_therm_threshold_hyst_emulation(therm, 0x20414, 24,
+ &sensor->thrs_down_clock,
+ NVKM_THERM_THRS_DOWNCLOCK);
intr &= ~0x002;
}
/* shutdown */
if (intr & 0x004) {
- nv84_therm_threshold_hyst_emulation(therm, 0x20480, 20,
+ g84_therm_threshold_hyst_emulation(therm, 0x20480, 20,
&sensor->thrs_shutdown,
- NOUVEAU_THERM_THRS_SHUTDOWN);
+ NVKM_THERM_THRS_SHUTDOWN);
intr &= ~0x004;
}
/* THRS_1 : fan boost */
if (intr & 0x008) {
- nv84_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
+ g84_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
&sensor->thrs_fan_boost,
- NOUVEAU_THERM_THRS_FANBOOST);
+ NVKM_THERM_THRS_FANBOOST);
intr &= ~0x008;
}
/* THRS_2 : critical */
if (intr & 0x010) {
- nv84_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
+ g84_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
&sensor->thrs_critical,
- NOUVEAU_THERM_THRS_CRITICAL);
+ NVKM_THERM_THRS_CRITICAL);
intr &= ~0x010;
}
@@ -190,30 +190,28 @@ nv84_therm_intr(struct nouveau_subdev *subdev)
}
static int
-nv84_therm_init(struct nouveau_object *object)
+g84_therm_init(struct nvkm_object *object)
{
- struct nv84_therm_priv *priv = (void *)object;
+ struct g84_therm_priv *priv = (void *)object;
int ret;
- ret = nouveau_therm_init(&priv->base.base);
+ ret = nvkm_therm_init(&priv->base.base);
if (ret)
return ret;
- nv84_sensor_setup(&priv->base.base);
-
+ g84_sensor_setup(&priv->base.base);
return 0;
}
static int
-nv84_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+g84_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nv84_therm_priv *priv;
+ struct g84_therm_priv *priv;
int ret;
- ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ ret = nvkm_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -222,29 +220,29 @@ nv84_therm_ctor(struct nouveau_object *parent,
priv->base.base.pwm_get = nv50_fan_pwm_get;
priv->base.base.pwm_set = nv50_fan_pwm_set;
priv->base.base.pwm_clock = nv50_fan_pwm_clock;
- priv->base.base.temp_get = nv84_temp_get;
- priv->base.sensor.program_alarms = nv84_therm_program_alarms;
- nv_subdev(priv)->intr = nv84_therm_intr;
+ priv->base.base.temp_get = g84_temp_get;
+ priv->base.sensor.program_alarms = g84_therm_program_alarms;
+ nv_subdev(priv)->intr = g84_therm_intr;
/* init the thresholds */
- nouveau_therm_sensor_set_threshold_state(&priv->base.base,
- NOUVEAU_THERM_THRS_SHUTDOWN,
- NOUVEAU_THERM_THRS_LOWER);
- nouveau_therm_sensor_set_threshold_state(&priv->base.base,
- NOUVEAU_THERM_THRS_FANBOOST,
- NOUVEAU_THERM_THRS_LOWER);
- nouveau_therm_sensor_set_threshold_state(&priv->base.base,
- NOUVEAU_THERM_THRS_CRITICAL,
- NOUVEAU_THERM_THRS_LOWER);
- nouveau_therm_sensor_set_threshold_state(&priv->base.base,
- NOUVEAU_THERM_THRS_DOWNCLOCK,
- NOUVEAU_THERM_THRS_LOWER);
-
- return nouveau_therm_preinit(&priv->base.base);
+ nvkm_therm_sensor_set_threshold_state(&priv->base.base,
+ NVKM_THERM_THRS_SHUTDOWN,
+ NVKM_THERM_THRS_LOWER);
+ nvkm_therm_sensor_set_threshold_state(&priv->base.base,
+ NVKM_THERM_THRS_FANBOOST,
+ NVKM_THERM_THRS_LOWER);
+ nvkm_therm_sensor_set_threshold_state(&priv->base.base,
+ NVKM_THERM_THRS_CRITICAL,
+ NVKM_THERM_THRS_LOWER);
+ nvkm_therm_sensor_set_threshold_state(&priv->base.base,
+ NVKM_THERM_THRS_DOWNCLOCK,
+ NVKM_THERM_THRS_LOWER);
+
+ return nvkm_therm_preinit(&priv->base.base);
}
int
-nv84_therm_fini(struct nouveau_object *object, bool suspend)
+g84_therm_fini(struct nvkm_object *object, bool suspend)
{
/* Disable PTherm IRQs */
nv_wr32(object, 0x20000, 0x00000000);
@@ -253,16 +251,16 @@ nv84_therm_fini(struct nouveau_object *object, bool suspend)
nv_wr32(object, 0x20100, 0xffffffff);
nv_wr32(object, 0x1100, 0x10000); /* PBUS */
- return _nouveau_therm_fini(object, suspend);
+ return _nvkm_therm_fini(object, suspend);
}
-struct nouveau_oclass
-nv84_therm_oclass = {
+struct nvkm_oclass
+g84_therm_oclass = {
.handle = NV_SUBDEV(THERM, 0x84),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_therm_ctor,
- .dtor = _nouveau_therm_dtor,
- .init = nv84_therm_init,
- .fini = nv84_therm_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = g84_therm_ctor,
+ .dtor = _nvkm_therm_dtor,
+ .init = g84_therm_init,
+ .fini = g84_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
index b70f7cc649b8..46b7e656a752 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
@@ -21,15 +21,16 @@
*
* Authors: Ben Skeggs
*/
-
#include "priv.h"
-struct nvd0_therm_priv {
- struct nouveau_therm_priv base;
+#include <core/device.h>
+
+struct gf110_therm_priv {
+ struct nvkm_therm_priv base;
};
static int
-pwm_info(struct nouveau_therm *therm, int line)
+pwm_info(struct nvkm_therm *therm, int line)
{
u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
@@ -53,7 +54,7 @@ pwm_info(struct nouveau_therm *therm, int line)
}
static int
-nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+gf110_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
u32 data = enable ? 0x00000040 : 0x00000000;
int indx = pwm_info(therm, line);
@@ -66,7 +67,7 @@ nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
}
static int
-nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+gf110_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
int indx = pwm_info(therm, line);
if (indx < 0)
@@ -87,7 +88,7 @@ nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
}
static int
-nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+gf110_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
int indx = pwm_info(therm, line);
if (indx < 0)
@@ -103,7 +104,7 @@ nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
}
static int
-nvd0_fan_pwm_clock(struct nouveau_therm *therm, int line)
+gf110_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
int indx = pwm_info(therm, line);
if (indx < 0)
@@ -115,12 +116,12 @@ nvd0_fan_pwm_clock(struct nouveau_therm *therm, int line)
}
int
-nvd0_therm_init(struct nouveau_object *object)
+gf110_therm_init(struct nvkm_object *object)
{
- struct nvd0_therm_priv *priv = (void *)object;
+ struct gf110_therm_priv *priv = (void *)object;
int ret;
- ret = nouveau_therm_init(&priv->base.base);
+ ret = nvkm_therm_init(&priv->base.base);
if (ret)
return ret;
@@ -137,38 +138,37 @@ nvd0_therm_init(struct nouveau_object *object)
}
static int
-nvd0_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gf110_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvd0_therm_priv *priv;
+ struct gf110_therm_priv *priv;
int ret;
- ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ ret = nvkm_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- nv84_sensor_setup(&priv->base.base);
+ g84_sensor_setup(&priv->base.base);
- priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl;
- priv->base.base.pwm_get = nvd0_fan_pwm_get;
- priv->base.base.pwm_set = nvd0_fan_pwm_set;
- priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
- priv->base.base.temp_get = nv84_temp_get;
- priv->base.base.fan_sense = nva3_therm_fan_sense;
- priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
- return nouveau_therm_preinit(&priv->base.base);
+ priv->base.base.pwm_ctrl = gf110_fan_pwm_ctrl;
+ priv->base.base.pwm_get = gf110_fan_pwm_get;
+ priv->base.base.pwm_set = gf110_fan_pwm_set;
+ priv->base.base.pwm_clock = gf110_fan_pwm_clock;
+ priv->base.base.temp_get = g84_temp_get;
+ priv->base.base.fan_sense = gt215_therm_fan_sense;
+ priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
+ return nvkm_therm_preinit(&priv->base.base);
}
-struct nouveau_oclass
-nvd0_therm_oclass = {
+struct nvkm_oclass
+gf110_therm_oclass = {
.handle = NV_SUBDEV(THERM, 0xd0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_therm_ctor,
- .dtor = _nouveau_therm_dtor,
- .init = nvd0_therm_init,
- .fini = nv84_therm_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf110_therm_ctor,
+ .dtor = _nvkm_therm_dtor,
+ .init = gf110_therm_init,
+ .fini = g84_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 668cf3322285..2fd110f09878 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -21,22 +21,23 @@
*
* Authors: Martin Peres
*/
-
#include "priv.h"
+#include <core/device.h>
+
struct gm107_therm_priv {
- struct nouveau_therm_priv base;
+ struct nvkm_therm_priv base;
};
static int
-gm107_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
/* nothing to do, it seems hardwired */
return 0;
}
static int
-gm107_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+gm107_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
*divs = nv_rd32(therm, 0x10eb20) & 0x1fff;
*duty = nv_rd32(therm, 0x10eb24) & 0x1fff;
@@ -44,7 +45,7 @@ gm107_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
}
static int
-gm107_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+gm107_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
nv_mask(therm, 0x10eb10, 0x1fff, divs); /* keep the high bits */
nv_wr32(therm, 0x10eb14, duty | 0x80000000);
@@ -52,21 +53,20 @@ gm107_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
}
static int
-gm107_fan_pwm_clock(struct nouveau_therm *therm, int line)
+gm107_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
return nv_device(therm)->crystal * 1000;
}
static int
-gm107_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gm107_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct gm107_therm_priv *priv;
int ret;
- ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ ret = nvkm_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -75,19 +75,19 @@ gm107_therm_ctor(struct nouveau_object *parent,
priv->base.base.pwm_get = gm107_fan_pwm_get;
priv->base.base.pwm_set = gm107_fan_pwm_set;
priv->base.base.pwm_clock = gm107_fan_pwm_clock;
- priv->base.base.temp_get = nv84_temp_get;
- priv->base.base.fan_sense = nva3_therm_fan_sense;
- priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
- return nouveau_therm_preinit(&priv->base.base);
+ priv->base.base.temp_get = g84_temp_get;
+ priv->base.base.fan_sense = gt215_therm_fan_sense;
+ priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
+ return nvkm_therm_preinit(&priv->base.base);
}
-struct nouveau_oclass
+struct nvkm_oclass
gm107_therm_oclass = {
.handle = NV_SUBDEV(THERM, 0x117),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gm107_therm_ctor,
- .dtor = _nouveau_therm_dtor,
- .init = nvd0_therm_init,
- .fini = nv84_therm_fini,
+ .dtor = _nvkm_therm_dtor,
+ .init = gf110_therm_init,
+ .fini = g84_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index 7893357a7e9f..e99be20332f2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -21,17 +21,17 @@
*
* Authors: Ben Skeggs
*/
+#include "priv.h"
+#include <core/device.h>
#include <subdev/gpio.h>
-#include "priv.h"
-
-struct nva3_therm_priv {
- struct nouveau_therm_priv base;
+struct gt215_therm_priv {
+ struct nvkm_therm_priv base;
};
int
-nva3_therm_fan_sense(struct nouveau_therm *therm)
+gt215_therm_fan_sense(struct nvkm_therm *therm)
{
u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
u32 ctrl = nv_rd32(therm, 0x00e720);
@@ -41,17 +41,17 @@ nva3_therm_fan_sense(struct nouveau_therm *therm)
}
static int
-nva3_therm_init(struct nouveau_object *object)
+gt215_therm_init(struct nvkm_object *object)
{
- struct nva3_therm_priv *priv = (void *)object;
+ struct gt215_therm_priv *priv = (void *)object;
struct dcb_gpio_func *tach = &priv->base.fan->tach;
int ret;
- ret = nouveau_therm_init(&priv->base.base);
+ ret = nvkm_therm_init(&priv->base.base);
if (ret)
return ret;
- nv84_sensor_setup(&priv->base.base);
+ g84_sensor_setup(&priv->base.base);
/* enable fan tach, count revolutions per-second */
nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
@@ -66,15 +66,14 @@ nva3_therm_init(struct nouveau_object *object)
}
static int
-nva3_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gt215_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nva3_therm_priv *priv;
+ struct gt215_therm_priv *priv;
int ret;
- ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ ret = nvkm_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -83,19 +82,19 @@ nva3_therm_ctor(struct nouveau_object *parent,
priv->base.base.pwm_get = nv50_fan_pwm_get;
priv->base.base.pwm_set = nv50_fan_pwm_set;
priv->base.base.pwm_clock = nv50_fan_pwm_clock;
- priv->base.base.temp_get = nv84_temp_get;
- priv->base.base.fan_sense = nva3_therm_fan_sense;
- priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
- return nouveau_therm_preinit(&priv->base.base);
+ priv->base.base.temp_get = g84_temp_get;
+ priv->base.base.fan_sense = gt215_therm_fan_sense;
+ priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
+ return nvkm_therm_preinit(&priv->base.base);
}
-struct nouveau_oclass
-nva3_therm_oclass = {
+struct nvkm_oclass
+gt215_therm_oclass = {
.handle = NV_SUBDEV(THERM, 0xa3),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_therm_ctor,
- .dtor = _nouveau_therm_dtor,
- .init = nva3_therm_init,
- .fini = nv84_therm_fini,
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gt215_therm_ctor,
+ .dtor = _nvkm_therm_dtor,
+ .init = gt215_therm_init,
+ .fini = g84_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index ca9ad9fd47be..09fc4605e853 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -21,17 +21,16 @@
*
* Authors: Martin Peres
*/
-
#include "priv.h"
-#include <subdev/i2c.h>
#include <subdev/bios/extdev.h>
+#include <subdev/i2c.h>
static bool
-probe_monitoring_device(struct nouveau_i2c_port *i2c,
+probe_monitoring_device(struct nvkm_i2c_port *i2c,
struct i2c_board_info *info, void *data)
{
- struct nouveau_therm_priv *priv = data;
+ struct nvkm_therm_priv *priv = data;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
struct i2c_client *client;
@@ -52,11 +51,10 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
"temp offset %+i C)\n",
info->type, info->addr, sensor->offset_constant);
priv->ic = client;
-
return true;
}
-static struct nouveau_i2c_board_info
+static struct nvkm_i2c_board_info
nv_board_infos[] = {
{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
@@ -82,15 +80,15 @@ nv_board_infos[] = {
};
void
-nouveau_therm_ic_ctor(struct nouveau_therm *therm)
+nvkm_therm_ic_ctor(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_bios *bios = nouveau_bios(therm);
- struct nouveau_i2c *i2c = nouveau_i2c(therm);
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_bios *bios = nvkm_bios(therm);
+ struct nvkm_i2c *i2c = nvkm_i2c(therm);
struct nvbios_extdev_func extdev_entry;
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
- struct nouveau_i2c_board_info board[] = {
+ struct nvkm_i2c_board_info board[] = {
{ { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
{ }
};
@@ -102,7 +100,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
}
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
- struct nouveau_i2c_board_info board[] = {
+ struct nvkm_i2c_board_info board[] = {
{ { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
{ }
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 002e51b3af93..8496fffd4688 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -22,19 +22,20 @@
* Authors: Ben Skeggs
* Martin Peres
*/
-
#include "priv.h"
+#include <core/device.h>
+
struct nv40_therm_priv {
- struct nouveau_therm_priv base;
+ struct nvkm_therm_priv base;
};
enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
static enum nv40_sensor_style
-nv40_sensor_style(struct nouveau_therm *therm)
+nv40_sensor_style(struct nvkm_therm *therm)
{
- struct nouveau_device *device = nv_device(therm);
+ struct nvkm_device *device = nv_device(therm);
switch (device->chipset) {
case 0x43:
@@ -58,7 +59,7 @@ nv40_sensor_style(struct nouveau_therm *therm)
}
static int
-nv40_sensor_setup(struct nouveau_therm *therm)
+nv40_sensor_setup(struct nvkm_therm *therm)
{
enum nv40_sensor_style style = nv40_sensor_style(therm);
@@ -77,9 +78,9 @@ nv40_sensor_setup(struct nouveau_therm *therm)
}
static int
-nv40_temp_get(struct nouveau_therm *therm)
+nv40_temp_get(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
enum nv40_sensor_style style = nv40_sensor_style(therm);
int core_temp;
@@ -110,7 +111,7 @@ nv40_temp_get(struct nouveau_therm *therm)
}
static int
-nv40_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
u32 mask = enable ? 0x80000000 : 0x0000000;
if (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
@@ -123,7 +124,7 @@ nv40_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
}
static int
-nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
if (line == 2) {
u32 reg = nv_rd32(therm, 0x0010f0);
@@ -149,7 +150,7 @@ nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
}
static int
-nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
if (line == 2) {
nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
@@ -166,9 +167,9 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
}
void
-nv40_therm_intr(struct nouveau_subdev *subdev)
+nv40_therm_intr(struct nvkm_subdev *subdev)
{
- struct nouveau_therm *therm = nouveau_therm(subdev);
+ struct nvkm_therm *therm = nvkm_therm(subdev);
uint32_t stat = nv_rd32(therm, 0x1100);
/* traitement */
@@ -180,15 +181,15 @@ nv40_therm_intr(struct nouveau_subdev *subdev)
}
static int
-nv40_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_therm_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv40_therm_priv *priv;
int ret;
- ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ ret = nvkm_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -197,28 +198,28 @@ nv40_therm_ctor(struct nouveau_object *parent,
priv->base.base.pwm_get = nv40_fan_pwm_get;
priv->base.base.pwm_set = nv40_fan_pwm_set;
priv->base.base.temp_get = nv40_temp_get;
- priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
nv_subdev(priv)->intr = nv40_therm_intr;
- return nouveau_therm_preinit(&priv->base.base);
+ return nvkm_therm_preinit(&priv->base.base);
}
static int
-nv40_therm_init(struct nouveau_object *object)
+nv40_therm_init(struct nvkm_object *object)
{
- struct nouveau_therm *therm = (void *)object;
+ struct nvkm_therm *therm = (void *)object;
nv40_sensor_setup(therm);
- return _nouveau_therm_init(object);
+ return _nvkm_therm_init(object);
}
-struct nouveau_oclass
+struct nvkm_oclass
nv40_therm_oclass = {
.handle = NV_SUBDEV(THERM, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv40_therm_ctor,
- .dtor = _nouveau_therm_dtor,
+ .dtor = _nvkm_therm_dtor,
.init = nv40_therm_init,
- .fini = _nouveau_therm_fini,
+ .fini = _nvkm_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 321db927d638..1ef59e8922d4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -22,15 +22,16 @@
* Authors: Ben Skeggs
* Martin Peres
*/
-
#include "priv.h"
+#include <core/device.h>
+
struct nv50_therm_priv {
- struct nouveau_therm_priv base;
+ struct nvkm_therm_priv base;
};
static int
-pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
+pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
{
if (*line == 0x04) {
*ctrl = 0x00e100;
@@ -55,7 +56,7 @@ pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
}
int
-nv50_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+nv50_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
u32 data = enable ? 0x00000001 : 0x00000000;
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
@@ -65,7 +66,7 @@ nv50_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
}
int
-nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
if (ret)
@@ -81,7 +82,7 @@ nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
}
int
-nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+nv50_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
if (ret)
@@ -93,7 +94,7 @@ nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
}
int
-nv50_fan_pwm_clock(struct nouveau_therm *therm, int line)
+nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
int chipset = nv_device(therm)->chipset;
int crystal = nv_device(therm)->crystal;
@@ -119,16 +120,16 @@ nv50_fan_pwm_clock(struct nouveau_therm *therm, int line)
}
static void
-nv50_sensor_setup(struct nouveau_therm *therm)
+nv50_sensor_setup(struct nvkm_therm *therm)
{
nv_mask(therm, 0x20010, 0x40000000, 0x0);
mdelay(20); /* wait for the temperature to stabilize */
}
static int
-nv50_temp_get(struct nouveau_therm *therm)
+nv50_temp_get(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
int core_temp;
@@ -151,15 +152,15 @@ nv50_temp_get(struct nouveau_therm *therm)
}
static int
-nv50_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv50_therm_ctor(struct nvkm_object *parent,
+ struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv50_therm_priv *priv;
int ret;
- ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ ret = nvkm_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -169,29 +170,29 @@ nv50_therm_ctor(struct nouveau_object *parent,
priv->base.base.pwm_set = nv50_fan_pwm_set;
priv->base.base.pwm_clock = nv50_fan_pwm_clock;
priv->base.base.temp_get = nv50_temp_get;
- priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
nv_subdev(priv)->intr = nv40_therm_intr;
- return nouveau_therm_preinit(&priv->base.base);
+ return nvkm_therm_preinit(&priv->base.base);
}
static int
-nv50_therm_init(struct nouveau_object *object)
+nv50_therm_init(struct nvkm_object *object)
{
- struct nouveau_therm *therm = (void *)object;
+ struct nvkm_therm *therm = (void *)object;
nv50_sensor_setup(therm);
- return _nouveau_therm_init(object);
+ return _nvkm_therm_init(object);
}
-struct nouveau_oclass
+struct nvkm_oclass
nv50_therm_oclass = {
.handle = NV_SUBDEV(THERM, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_therm_ctor,
- .dtor = _nouveau_therm_dtor,
+ .dtor = _nvkm_therm_dtor,
.init = nv50_therm_init,
- .fini = _nouveau_therm_fini,
+ .fini = _nvkm_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
new file mode 100644
index 000000000000..916a149efe6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -0,0 +1,153 @@
+#ifndef __NVTHERM_PRIV_H__
+#define __NVTHERM_PRIV_H__
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+#include <subdev/therm.h>
+#include <subdev/bios.h>
+#include <subdev/bios/extdev.h>
+#include <subdev/bios/gpio.h>
+#include <subdev/bios/perf.h>
+#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+struct nvkm_fan {
+ struct nvkm_therm *parent;
+ const char *type;
+
+ struct nvbios_therm_fan bios;
+ struct nvbios_perf_fan perf;
+
+ struct nvkm_alarm alarm;
+ spinlock_t lock;
+ int percent;
+
+ int (*get)(struct nvkm_therm *);
+ int (*set)(struct nvkm_therm *, int percent);
+
+ struct dcb_gpio_func tach;
+};
+
+enum nvkm_therm_thrs_direction {
+ NVKM_THERM_THRS_FALLING = 0,
+ NVKM_THERM_THRS_RISING = 1
+};
+
+enum nvkm_therm_thrs_state {
+ NVKM_THERM_THRS_LOWER = 0,
+ NVKM_THERM_THRS_HIGHER = 1
+};
+
+enum nvkm_therm_thrs {
+ NVKM_THERM_THRS_FANBOOST = 0,
+ NVKM_THERM_THRS_DOWNCLOCK = 1,
+ NVKM_THERM_THRS_CRITICAL = 2,
+ NVKM_THERM_THRS_SHUTDOWN = 3,
+ NVKM_THERM_THRS_NR
+};
+
+struct nvkm_therm_priv {
+ struct nvkm_therm base;
+
+ /* automatic thermal management */
+ struct nvkm_alarm alarm;
+ spinlock_t lock;
+ struct nvbios_therm_trip_point *last_trip;
+ int mode;
+ int cstate;
+ int suspend;
+
+ /* bios */
+ struct nvbios_therm_sensor bios_sensor;
+
+ /* fan priv */
+ struct nvkm_fan *fan;
+
+ /* alarms priv */
+ struct {
+ spinlock_t alarm_program_lock;
+ struct nvkm_alarm therm_poll_alarm;
+ enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
+ void (*program_alarms)(struct nvkm_therm *);
+ } sensor;
+
+ /* what should be done if the card overheats */
+ struct {
+ void (*downclock)(struct nvkm_therm *, bool active);
+ void (*pause)(struct nvkm_therm *, bool active);
+ } emergency;
+
+ /* ic */
+ struct i2c_client *ic;
+};
+
+int nvkm_therm_fan_mode(struct nvkm_therm *, int mode);
+int nvkm_therm_attr_get(struct nvkm_therm *, enum nvkm_therm_attr_type);
+int nvkm_therm_attr_set(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
+
+void nvkm_therm_ic_ctor(struct nvkm_therm *);
+
+int nvkm_therm_sensor_ctor(struct nvkm_therm *);
+
+int nvkm_therm_fan_ctor(struct nvkm_therm *);
+int nvkm_therm_fan_init(struct nvkm_therm *);
+int nvkm_therm_fan_fini(struct nvkm_therm *, bool suspend);
+int nvkm_therm_fan_get(struct nvkm_therm *);
+int nvkm_therm_fan_set(struct nvkm_therm *, bool now, int percent);
+int nvkm_therm_fan_user_get(struct nvkm_therm *);
+int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
+
+int nvkm_therm_fan_sense(struct nvkm_therm *);
+
+int nvkm_therm_preinit(struct nvkm_therm *);
+
+int nvkm_therm_sensor_init(struct nvkm_therm *);
+int nvkm_therm_sensor_fini(struct nvkm_therm *, bool suspend);
+void nvkm_therm_sensor_preinit(struct nvkm_therm *);
+void nvkm_therm_sensor_set_threshold_state(struct nvkm_therm *,
+ enum nvkm_therm_thrs,
+ enum nvkm_therm_thrs_state);
+enum nvkm_therm_thrs_state
+nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *,
+ enum nvkm_therm_thrs);
+void nvkm_therm_sensor_event(struct nvkm_therm *, enum nvkm_therm_thrs,
+ enum nvkm_therm_thrs_direction);
+void nvkm_therm_program_alarms_polling(struct nvkm_therm *);
+
+void nv40_therm_intr(struct nvkm_subdev *);
+int nv50_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
+int nv50_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
+int nv50_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
+int nv50_fan_pwm_clock(struct nvkm_therm *, int);
+int g84_temp_get(struct nvkm_therm *);
+void g84_sensor_setup(struct nvkm_therm *);
+int g84_therm_fini(struct nvkm_object *, bool suspend);
+
+int gt215_therm_fan_sense(struct nvkm_therm *);
+
+int gf110_therm_init(struct nvkm_object *);
+
+int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
+int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
+int nvkm_fannil_create(struct nvkm_therm *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index 6212537b90c5..aa13744f3854 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -21,18 +21,12 @@
*
* Authors: Martin Peres
*/
-
#include "priv.h"
-#include <core/object.h>
-#include <core/device.h>
-
-#include <subdev/bios.h>
-
static void
-nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
+nvkm_therm_temp_set_defaults(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
priv->bios_sensor.offset_constant = 0;
@@ -51,9 +45,9 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
static void
-nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
+nvkm_therm_temp_safety_checks(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *s = &priv->bios_sensor;
/* enforce a minimum hysteresis on thresholds */
@@ -64,20 +58,21 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
}
/* must be called with alarm_program_lock taken ! */
-void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
- enum nouveau_therm_thrs thrs,
- enum nouveau_therm_thrs_state st)
+void
+nvkm_therm_sensor_set_threshold_state(struct nvkm_therm *therm,
+ enum nvkm_therm_thrs thrs,
+ enum nvkm_therm_thrs_state st)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
priv->sensor.alarm_state[thrs] = st;
}
/* must be called with alarm_program_lock taken ! */
-enum nouveau_therm_thrs_state
-nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
- enum nouveau_therm_thrs thrs)
+enum nvkm_therm_thrs_state
+nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *therm,
+ enum nvkm_therm_thrs thrs)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
return priv->sensor.alarm_state[thrs];
}
@@ -88,11 +83,11 @@ nv_poweroff_work(struct work_struct *work)
kfree(work);
}
-void nouveau_therm_sensor_event(struct nouveau_therm *therm,
- enum nouveau_therm_thrs thrs,
- enum nouveau_therm_thrs_direction dir)
+void
+nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
+ enum nvkm_therm_thrs_direction dir)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
bool active;
const char *thresolds[] = {
"fanboost", "downclock", "critical", "shutdown"
@@ -102,30 +97,30 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
if (thrs < 0 || thrs > 3)
return;
- if (dir == NOUVEAU_THERM_THRS_FALLING)
+ if (dir == NVKM_THERM_THRS_FALLING)
nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
temperature, thresolds[thrs]);
else
nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
temperature, thresolds[thrs]);
- active = (dir == NOUVEAU_THERM_THRS_RISING);
+ active = (dir == NVKM_THERM_THRS_RISING);
switch (thrs) {
- case NOUVEAU_THERM_THRS_FANBOOST:
+ case NVKM_THERM_THRS_FANBOOST:
if (active) {
- nouveau_therm_fan_set(therm, true, 100);
- nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
+ nvkm_therm_fan_set(therm, true, 100);
+ nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
}
break;
- case NOUVEAU_THERM_THRS_DOWNCLOCK:
+ case NVKM_THERM_THRS_DOWNCLOCK:
if (priv->emergency.downclock)
priv->emergency.downclock(therm, active);
break;
- case NOUVEAU_THERM_THRS_CRITICAL:
+ case NVKM_THERM_THRS_CRITICAL:
if (priv->emergency.pause)
priv->emergency.pause(therm, active);
break;
- case NOUVEAU_THERM_THRS_SHUTDOWN:
+ case NVKM_THERM_THRS_SHUTDOWN:
if (active) {
struct work_struct *work;
@@ -136,7 +131,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
}
}
break;
- case NOUVEAU_THERM_THRS_NR:
+ case NVKM_THERM_THRS_NR:
break;
}
@@ -144,53 +139,53 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
/* must be called with alarm_program_lock taken ! */
static void
-nouveau_therm_threshold_hyst_polling(struct nouveau_therm *therm,
- const struct nvbios_therm_threshold *thrs,
- enum nouveau_therm_thrs thrs_name)
+nvkm_therm_threshold_hyst_polling(struct nvkm_therm *therm,
+ const struct nvbios_therm_threshold *thrs,
+ enum nvkm_therm_thrs thrs_name)
{
- enum nouveau_therm_thrs_direction direction;
- enum nouveau_therm_thrs_state prev_state, new_state;
+ enum nvkm_therm_thrs_direction direction;
+ enum nvkm_therm_thrs_state prev_state, new_state;
int temp = therm->temp_get(therm);
- prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+ prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
- if (temp >= thrs->temp && prev_state == NOUVEAU_THERM_THRS_LOWER) {
- direction = NOUVEAU_THERM_THRS_RISING;
- new_state = NOUVEAU_THERM_THRS_HIGHER;
+ if (temp >= thrs->temp && prev_state == NVKM_THERM_THRS_LOWER) {
+ direction = NVKM_THERM_THRS_RISING;
+ new_state = NVKM_THERM_THRS_HIGHER;
} else if (temp <= thrs->temp - thrs->hysteresis &&
- prev_state == NOUVEAU_THERM_THRS_HIGHER) {
- direction = NOUVEAU_THERM_THRS_FALLING;
- new_state = NOUVEAU_THERM_THRS_LOWER;
+ prev_state == NVKM_THERM_THRS_HIGHER) {
+ direction = NVKM_THERM_THRS_FALLING;
+ new_state = NVKM_THERM_THRS_LOWER;
} else
return; /* nothing to do */
- nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
- nouveau_therm_sensor_event(therm, thrs_name, direction);
+ nvkm_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+ nvkm_therm_sensor_event(therm, thrs_name, direction);
}
static void
-alarm_timer_callback(struct nouveau_alarm *alarm)
+alarm_timer_callback(struct nvkm_alarm *alarm)
{
- struct nouveau_therm_priv *priv =
- container_of(alarm, struct nouveau_therm_priv, sensor.therm_poll_alarm);
+ struct nvkm_therm_priv *priv =
+ container_of(alarm, struct nvkm_therm_priv, sensor.therm_poll_alarm);
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
- struct nouveau_timer *ptimer = nouveau_timer(priv);
- struct nouveau_therm *therm = &priv->base;
+ struct nvkm_timer *ptimer = nvkm_timer(priv);
+ struct nvkm_therm *therm = &priv->base;
unsigned long flags;
spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
- nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
- NOUVEAU_THERM_THRS_FANBOOST);
+ nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
+ NVKM_THERM_THRS_FANBOOST);
- nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
- NOUVEAU_THERM_THRS_DOWNCLOCK);
+ nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+ NVKM_THERM_THRS_DOWNCLOCK);
- nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
- NOUVEAU_THERM_THRS_CRITICAL);
+ nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
+ NVKM_THERM_THRS_CRITICAL);
- nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
- NOUVEAU_THERM_THRS_SHUTDOWN);
+ nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
+ NVKM_THERM_THRS_SHUTDOWN);
spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
@@ -200,9 +195,9 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
}
void
-nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
+nvkm_therm_program_alarms_polling(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
nv_debug(therm,
@@ -217,18 +212,18 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
}
int
-nouveau_therm_sensor_init(struct nouveau_therm *therm)
+nvkm_therm_sensor_init(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvkm_therm_priv *priv = (void *)therm;
priv->sensor.program_alarms(therm);
return 0;
}
int
-nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend)
+nvkm_therm_sensor_fini(struct nvkm_therm *therm, bool suspend)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_timer *ptimer = nouveau_timer(therm);
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_timer *ptimer = nvkm_timer(therm);
if (suspend)
ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm);
@@ -236,7 +231,7 @@ nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend)
}
void
-nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
+nvkm_therm_sensor_preinit(struct nvkm_therm *therm)
{
const char *sensor_avail = "yes";
@@ -247,18 +242,18 @@ nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
}
int
-nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
+nvkm_therm_sensor_ctor(struct nvkm_therm *therm)
{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_bios *bios = nouveau_bios(therm);
+ struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_bios *bios = nvkm_bios(therm);
- nouveau_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+ nvkm_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
- nouveau_therm_temp_set_defaults(therm);
+ nvkm_therm_temp_set_defaults(therm);
if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
&priv->bios_sensor))
nv_error(therm, "nvbios_therm_sensor_parse failed\n");
- nouveau_therm_temp_safety_checks(therm);
+ nvkm_therm_temp_safety_checks(therm);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
new file mode 100644
index 000000000000..d1d38b4ba30a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
@@ -0,0 +1,3 @@
+nvkm-y += nvkm/subdev/timer/base.o
+nvkm-y += nvkm/subdev/timer/nv04.o
+nvkm-y += nvkm/subdev/timer/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index cf8a0e0f8ee3..d894061ced52 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -21,13 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
-#include "subdev/timer.h"
+#include <subdev/timer.h>
bool
-nouveau_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+nvkm_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
{
- struct nouveau_timer *ptimer = nouveau_timer(obj);
+ struct nvkm_timer *ptimer = nvkm_timer(obj);
u64 time0;
time0 = ptimer->read(ptimer);
@@ -45,9 +44,9 @@ nouveau_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
}
bool
-nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+nvkm_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
{
- struct nouveau_timer *ptimer = nouveau_timer(obj);
+ struct nvkm_timer *ptimer = nvkm_timer(obj);
u64 time0;
time0 = ptimer->read(ptimer);
@@ -65,9 +64,9 @@ nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
}
bool
-nouveau_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
+nvkm_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
{
- struct nouveau_timer *ptimer = nouveau_timer(obj);
+ struct nvkm_timer *ptimer = nvkm_timer(obj);
u64 time0;
time0 = ptimer->read(ptimer);
@@ -80,15 +79,15 @@ nouveau_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
}
void
-nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm)
+nvkm_timer_alarm(void *obj, u32 nsec, struct nvkm_alarm *alarm)
{
- struct nouveau_timer *ptimer = nouveau_timer(obj);
+ struct nvkm_timer *ptimer = nvkm_timer(obj);
ptimer->alarm(ptimer, nsec, alarm);
}
void
-nouveau_timer_alarm_cancel(void *obj, struct nouveau_alarm *alarm)
+nvkm_timer_alarm_cancel(void *obj, struct nvkm_alarm *alarm)
{
- struct nouveau_timer *ptimer = nouveau_timer(obj);
+ struct nvkm_timer *ptimer = nvkm_timer(obj);
ptimer->alarm_cancel(ptimer, alarm);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 37484db1f7fc..80e38063dd9b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -21,18 +21,17 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
static int
-gk20a_timer_init(struct nouveau_object *object)
+gk20a_timer_init(struct nvkm_object *object)
{
struct nv04_timer_priv *priv = (void *)object;
u32 hi = upper_32_bits(priv->suspend_time);
u32 lo = lower_32_bits(priv->suspend_time);
int ret;
- ret = nouveau_timer_init(&priv->base);
+ ret = nvkm_timer_init(&priv->base);
if (ret)
return ret;
@@ -45,10 +44,10 @@ gk20a_timer_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gk20a_timer_oclass = {
.handle = NV_SUBDEV(TIMER, 0xff),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_timer_ctor,
.dtor = nv04_timer_dtor,
.init = gk20a_timer_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 240ed0b983a9..6b7facbe59a2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -21,11 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include "nv04.h"
+#include <core/device.h>
+
static u64
-nv04_timer_read(struct nouveau_timer *ptimer)
+nv04_timer_read(struct nvkm_timer *ptimer)
{
struct nv04_timer_priv *priv = (void *)ptimer;
u32 hi, lo;
@@ -39,10 +40,10 @@ nv04_timer_read(struct nouveau_timer *ptimer)
}
static void
-nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
+nv04_timer_alarm_trigger(struct nvkm_timer *ptimer)
{
struct nv04_timer_priv *priv = (void *)ptimer;
- struct nouveau_alarm *alarm, *atemp;
+ struct nvkm_alarm *alarm, *atemp;
unsigned long flags;
LIST_HEAD(exec);
@@ -71,11 +72,10 @@ nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
}
static void
-nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
- struct nouveau_alarm *alarm)
+nv04_timer_alarm(struct nvkm_timer *ptimer, u64 time, struct nvkm_alarm *alarm)
{
struct nv04_timer_priv *priv = (void *)ptimer;
- struct nouveau_alarm *list;
+ struct nvkm_alarm *list;
unsigned long flags;
alarm->timestamp = ptimer->read(ptimer) + time;
@@ -99,8 +99,7 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
}
static void
-nv04_timer_alarm_cancel(struct nouveau_timer *ptimer,
- struct nouveau_alarm *alarm)
+nv04_timer_alarm_cancel(struct nvkm_timer *ptimer, struct nvkm_alarm *alarm)
{
struct nv04_timer_priv *priv = (void *)ptimer;
unsigned long flags;
@@ -110,7 +109,7 @@ nv04_timer_alarm_cancel(struct nouveau_timer *ptimer,
}
static void
-nv04_timer_intr(struct nouveau_subdev *subdev)
+nv04_timer_intr(struct nvkm_subdev *subdev)
{
struct nv04_timer_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
@@ -128,24 +127,24 @@ nv04_timer_intr(struct nouveau_subdev *subdev)
}
int
-nv04_timer_fini(struct nouveau_object *object, bool suspend)
+nv04_timer_fini(struct nvkm_object *object, bool suspend)
{
struct nv04_timer_priv *priv = (void *)object;
if (suspend)
priv->suspend_time = nv04_timer_read(&priv->base);
nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
- return nouveau_timer_fini(&priv->base, suspend);
+ return nvkm_timer_fini(&priv->base, suspend);
}
static int
-nv04_timer_init(struct nouveau_object *object)
+nv04_timer_init(struct nvkm_object *object)
{
- struct nouveau_device *device = nv_device(object);
+ struct nvkm_device *device = nv_device(object);
struct nv04_timer_priv *priv = (void *)object;
u32 m = 1, f, n, d, lo, hi;
int ret;
- ret = nouveau_timer_init(&priv->base);
+ ret = nvkm_timer_init(&priv->base);
if (ret)
return ret;
@@ -155,7 +154,7 @@ nv04_timer_init(struct nouveau_object *object)
/* determine base clock for timer source */
#if 0 /*XXX*/
if (device->chipset < 0x40) {
- n = nouveau_hw_get_clock(device, PLL_CORE);
+ n = nvkm_hw_get_clock(device, PLL_CORE);
} else
#endif
if (device->chipset <= 0x40) {
@@ -217,26 +216,25 @@ nv04_timer_init(struct nouveau_object *object)
nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
-
return 0;
}
void
-nv04_timer_dtor(struct nouveau_object *object)
+nv04_timer_dtor(struct nvkm_object *object)
{
struct nv04_timer_priv *priv = (void *)object;
- return nouveau_timer_destroy(&priv->base);
+ return nvkm_timer_destroy(&priv->base);
}
int
-nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv04_timer_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv04_timer_priv *priv;
int ret;
- ret = nouveau_timer_create(parent, engine, oclass, &priv);
+ ret = nvkm_timer_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -252,10 +250,10 @@ nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv04_timer_oclass = {
.handle = NV_SUBDEV(TIMER, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_timer_ctor,
.dtor = nv04_timer_dtor,
.init = nv04_timer_init,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
index 4bc152697c37..89996a9826b1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
@@ -1,6 +1,5 @@
#ifndef __NVKM_TIMER_NV04_H__
#define __NVKM_TIMER_NV04_H__
-
#include "priv.h"
#define NV04_PTIMER_INTR_0 0x009100
@@ -12,16 +11,15 @@
#define NV04_PTIMER_ALARM_0 0x009420
struct nv04_timer_priv {
- struct nouveau_timer base;
+ struct nvkm_timer base;
struct list_head alarms;
spinlock_t lock;
u64 suspend_time;
};
-int nv04_timer_ctor(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32,
- struct nouveau_object **);
-void nv04_timer_dtor(struct nouveau_object *);
-int nv04_timer_fini(struct nouveau_object *, bool);
-
+int nv04_timer_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
+void nv04_timer_dtor(struct nvkm_object *);
+int nv04_timer_fini(struct nvkm_object *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 799dae3f2300..08e29a3da188 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,6 +1,4 @@
#ifndef __NVKM_TIMER_PRIV_H__
#define __NVKM_TIMER_PRIV_H__
-
#include <subdev/timer.h>
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
new file mode 100644
index 000000000000..6b46ff4213a3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -0,0 +1,4 @@
+nvkm-y += nvkm/subdev/volt/base.o
+nvkm-y += nvkm/subdev/volt/gpio.o
+nvkm-y += nvkm/subdev/volt/nv40.o
+nvkm-y += nvkm/subdev/volt/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 26ccd8df193f..39f15803f2d4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -21,15 +21,13 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/volt.h>
-
#include <subdev/bios.h>
#include <subdev/bios/vmap.h>
#include <subdev/bios/volt.h>
static int
-nouveau_volt_get(struct nouveau_volt *volt)
+nvkm_volt_get(struct nvkm_volt *volt)
{
if (volt->vid_get) {
int ret = volt->vid_get(volt), i;
@@ -46,7 +44,7 @@ nouveau_volt_get(struct nouveau_volt *volt)
}
static int
-nouveau_volt_set(struct nouveau_volt *volt, u32 uv)
+nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
{
if (volt->vid_set) {
int i, ret = -EINVAL;
@@ -63,9 +61,9 @@ nouveau_volt_set(struct nouveau_volt *volt, u32 uv)
}
static int
-nouveau_volt_map(struct nouveau_volt *volt, u8 id)
+nvkm_volt_map(struct nvkm_volt *volt, u8 id)
{
- struct nouveau_bios *bios = nouveau_bios(volt);
+ struct nvkm_bios *bios = nvkm_bios(volt);
struct nvbios_vmap_entry info;
u8 ver, len;
u16 vmap;
@@ -73,7 +71,7 @@ nouveau_volt_map(struct nouveau_volt *volt, u8 id)
vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
if (vmap) {
if (info.link != 0xff) {
- int ret = nouveau_volt_map(volt, info.link);
+ int ret = nvkm_volt_map(volt, info.link);
if (ret < 0)
return ret;
info.min += ret;
@@ -85,15 +83,15 @@ nouveau_volt_map(struct nouveau_volt *volt, u8 id)
}
static int
-nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
+nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
{
- int ret = nouveau_volt_map(volt, id);
+ int ret = nvkm_volt_map(volt, id);
if (ret >= 0) {
- int prev = nouveau_volt_get(volt);
+ int prev = nvkm_volt_get(volt);
if (!condition || prev < 0 ||
(condition < 0 && ret < prev) ||
(condition > 0 && ret > prev)) {
- ret = nouveau_volt_set(volt, ret);
+ ret = nvkm_volt_set(volt, ret);
} else {
ret = 0;
}
@@ -101,8 +99,8 @@ nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
return ret;
}
-static void nouveau_volt_parse_bios(struct nouveau_bios *bios,
- struct nouveau_volt *volt)
+static void
+nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
{
struct nvbios_volt_entry ivid;
struct nvbios_volt info;
@@ -125,7 +123,7 @@ static void nouveau_volt_parse_bios(struct nouveau_bios *bios,
} else if (data && info.vidmask) {
for (i = 0; i < cnt; i++) {
data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
- &ivid);
+ &ivid);
if (data) {
volt->vid[volt->vid_nr].uv = ivid.voltage;
volt->vid[volt->vid_nr].vid = ivid.vid;
@@ -137,12 +135,12 @@ static void nouveau_volt_parse_bios(struct nouveau_bios *bios,
}
int
-_nouveau_volt_init(struct nouveau_object *object)
+_nvkm_volt_init(struct nvkm_object *object)
{
- struct nouveau_volt *volt = (void *)object;
+ struct nvkm_volt *volt = (void *)object;
int ret;
- ret = nouveau_subdev_init(&volt->base);
+ ret = nvkm_subdev_init(&volt->base);
if (ret)
return ret;
@@ -158,34 +156,33 @@ _nouveau_volt_init(struct nouveau_object *object)
}
void
-_nouveau_volt_dtor(struct nouveau_object *object)
+_nvkm_volt_dtor(struct nvkm_object *object)
{
- struct nouveau_volt *volt = (void *)object;
- nouveau_subdev_destroy(&volt->base);
+ struct nvkm_volt *volt = (void *)object;
+ nvkm_subdev_destroy(&volt->base);
}
int
-nouveau_volt_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, int length, void **pobject)
{
- struct nouveau_bios *bios = nouveau_bios(parent);
- struct nouveau_volt *volt;
+ struct nvkm_bios *bios = nvkm_bios(parent);
+ struct nvkm_volt *volt;
int ret, i;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
- "voltage", length, pobject);
+ ret = nvkm_subdev_create_(parent, engine, oclass, 0, "VOLT",
+ "voltage", length, pobject);
volt = *pobject;
if (ret)
return ret;
- volt->get = nouveau_volt_get;
- volt->set = nouveau_volt_set;
- volt->set_id = nouveau_volt_set_id;
+ volt->get = nvkm_volt_get;
+ volt->set = nvkm_volt_set;
+ volt->set_id = nvkm_volt_set_id;
/* Assuming the non-bios device should build the voltage table later */
if (bios)
- nouveau_volt_parse_bios(bios, volt);
+ nvkm_volt_parse_bios(bios, volt);
if (volt->vid_nr) {
for (i = 0; i < volt->vid_nr; i++) {
@@ -196,10 +193,10 @@ nouveau_volt_create_(struct nouveau_object *parent,
/*XXX: this is an assumption.. there probably exists boards
* out there with i2c-connected voltage controllers too..
*/
- ret = nouveau_voltgpio_init(volt);
+ ret = nvkm_voltgpio_init(volt);
if (ret == 0) {
- volt->vid_get = nouveau_voltgpio_get;
- volt->vid_set = nouveau_voltgpio_set;
+ volt->vid_get = nvkm_voltgpio_get;
+ volt->vid_set = nvkm_voltgpio_set;
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index 717368ef31ac..871fd51011db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -19,11 +19,10 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
+#include <subdev/volt.h>
#ifdef __KERNEL__
#include <nouveau_platform.h>
#endif
-#include <subdev/volt.h>
struct cvb_coef {
int c0;
@@ -35,7 +34,7 @@ struct cvb_coef {
};
struct gk20a_volt_priv {
- struct nouveau_volt base;
+ struct nvkm_volt base;
struct regulator *vdd;
};
@@ -62,8 +61,7 @@ const struct cvb_coef gk20a_cvb_coef[] = {
* cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0)
*/
static inline int
-gk20a_volt_get_cvb_voltage(int speedo, int s_scale,
- const struct cvb_coef *coef)
+gk20a_volt_get_cvb_voltage(int speedo, int s_scale, const struct cvb_coef *coef)
{
int mv;
@@ -79,7 +77,7 @@ gk20a_volt_get_cvb_voltage(int speedo, int s_scale,
*/
static inline int
gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
- const struct cvb_coef *coef)
+ const struct cvb_coef *coef)
{
int cvb_mv, mv;
@@ -103,7 +101,7 @@ gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
}
static int
-gk20a_volt_vid_get(struct nouveau_volt *volt)
+gk20a_volt_vid_get(struct nvkm_volt *volt)
{
struct gk20a_volt_priv *priv = (void *)volt;
int i, uv;
@@ -118,7 +116,7 @@ gk20a_volt_vid_get(struct nouveau_volt *volt)
}
static int
-gk20a_volt_vid_set(struct nouveau_volt *volt, u8 vid)
+gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid)
{
struct gk20a_volt_priv *priv = (void *)volt;
@@ -127,7 +125,7 @@ gk20a_volt_vid_set(struct nouveau_volt *volt, u8 vid)
}
static int
-gk20a_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
+gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
{
struct gk20a_volt_priv *priv = (void *)volt;
int prev_uv = regulator_get_voltage(priv->vdd);
@@ -148,16 +146,16 @@ gk20a_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
}
static int
-gk20a_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+gk20a_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct gk20a_volt_priv *priv;
- struct nouveau_volt *volt;
+ struct nvkm_volt *volt;
struct nouveau_platform_device *plat;
int i, ret, uv;
- ret = nouveau_volt_create(parent, engine, oclass, &priv);
+ ret = nvkm_volt_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -187,13 +185,13 @@ gk20a_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
gk20a_volt_oclass = {
.handle = NV_SUBDEV(VOLT, 0xea),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = gk20a_volt_ctor,
- .dtor = _nouveau_volt_dtor,
- .init = _nouveau_volt_init,
- .fini = _nouveau_volt_fini,
+ .dtor = _nvkm_volt_dtor,
+ .init = _nvkm_volt_init,
+ .fini = _nvkm_volt_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index 755fa91bcd09..b778deb32d93 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/volt.h>
-#include <subdev/gpio.h>
+#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
static const u8 tags[] = {
DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
@@ -32,9 +32,9 @@ static const u8 tags[] = {
};
int
-nouveau_voltgpio_get(struct nouveau_volt *volt)
+nvkm_voltgpio_get(struct nvkm_volt *volt)
{
- struct nouveau_gpio *gpio = nouveau_gpio(volt);
+ struct nvkm_gpio *gpio = nvkm_gpio(volt);
u8 vid = 0;
int i;
@@ -51,9 +51,9 @@ nouveau_voltgpio_get(struct nouveau_volt *volt)
}
int
-nouveau_voltgpio_set(struct nouveau_volt *volt, u8 vid)
+nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
{
- struct nouveau_gpio *gpio = nouveau_gpio(volt);
+ struct nvkm_gpio *gpio = nvkm_gpio(volt);
int i;
for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
@@ -68,9 +68,9 @@ nouveau_voltgpio_set(struct nouveau_volt *volt, u8 vid)
}
int
-nouveau_voltgpio_init(struct nouveau_volt *volt)
+nvkm_voltgpio_init(struct nvkm_volt *volt)
{
- struct nouveau_gpio *gpio = nouveau_gpio(volt);
+ struct nvkm_gpio *gpio = nvkm_gpio(volt);
struct dcb_gpio_func func;
int i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 87d5358376a6..0ac5a3f8c9a8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -21,22 +21,21 @@
*
* Authors: Ben Skeggs
*/
-
#include <subdev/volt.h>
struct nv40_volt_priv {
- struct nouveau_volt base;
+ struct nvkm_volt base;
};
static int
-nv40_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
struct nv40_volt_priv *priv;
int ret;
- ret = nouveau_volt_create(parent, engine, oclass, &priv);
+ ret = nvkm_volt_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -44,13 +43,13 @@ nv40_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
+struct nvkm_oclass
nv40_volt_oclass = {
.handle = NV_SUBDEV(VOLT, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv40_volt_ctor,
- .dtor = _nouveau_volt_dtor,
- .init = _nouveau_volt_init,
- .fini = _nouveau_volt_fini,
+ .dtor = _nvkm_volt_dtor,
+ .init = _nvkm_volt_init,
+ .fini = _nvkm_volt_fini,
},
};
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 8436c6857cda..d292d24b3a6e 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -334,17 +334,23 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
goto fail;
}
- drm_fb_helper_single_add_all_connectors(helper);
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(helper, 32);
+ ret = drm_fb_helper_initial_config(helper, 32);
+ if (ret)
+ goto fini;
priv->fbdev = helper;
return helper;
+fini:
+ drm_fb_helper_fini(helper);
fail:
kfree(fbdev);
return NULL;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 024e98ef8e4d..d84583776d50 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -10,6 +10,7 @@ menu "Display Panels"
config DRM_PANEL_SIMPLE
tristate "support for simple panels"
depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
help
DRM panel driver for dumb panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
@@ -31,6 +32,7 @@ config DRM_PANEL_SHARP_LQ101R1SX01
tristate "Sharp LQ101R1SX01 panel"
depends on OF
depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for Sharp LQ101R1SX01
TFT-LCD modules. The panel has a 2560x1600 resolution and uses
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 9d81759d82fc..3cce3ca19601 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -19,8 +19,6 @@
#include <video/mipi_display.h>
-#include <linux/host1x.h>
-
struct sharp_panel {
struct drm_panel base;
/* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */
@@ -41,6 +39,16 @@ static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel)
return container_of(panel, struct sharp_panel, base);
}
+static void sharp_wait_frames(struct sharp_panel *sharp, unsigned int frames)
+{
+ unsigned int refresh = drm_mode_vrefresh(sharp->mode);
+
+ if (WARN_ON(frames > refresh))
+ return;
+
+ msleep(1000 / (refresh / frames));
+}
+
static int sharp_panel_write(struct sharp_panel *sharp, u16 offset, u8 value)
{
u8 payload[3] = { offset >> 8, offset & 0xff, value };
@@ -106,6 +114,8 @@ static int sharp_panel_unprepare(struct drm_panel *panel)
if (!sharp->prepared)
return 0;
+ sharp_wait_frames(sharp, 4);
+
err = mipi_dsi_dcs_set_display_off(sharp->link1);
if (err < 0)
dev_err(panel->dev, "failed to set display off: %d\n", err);
@@ -170,15 +180,13 @@ static int sharp_panel_prepare(struct drm_panel *panel)
if (err < 0)
return err;
- usleep_range(10000, 20000);
-
- err = mipi_dsi_dcs_soft_reset(sharp->link1);
- if (err < 0) {
- dev_err(panel->dev, "soft reset failed: %d\n", err);
- goto poweroff;
- }
-
- msleep(120);
+ /*
+ * According to the datasheet, the panel needs around 10 ms to fully
+ * power up. At least another 120 ms is required before exiting sleep
+ * mode to make sure the panel is ready. Throw in another 20 ms for
+ * good measure.
+ */
+ msleep(150);
err = mipi_dsi_dcs_exit_sleep_mode(sharp->link1);
if (err < 0) {
@@ -238,6 +246,9 @@ static int sharp_panel_prepare(struct drm_panel *panel)
sharp->prepared = true;
+ /* wait for 6 frames before continuing */
+ sharp_wait_frames(sharp, 6);
+
return 0;
poweroff:
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index e95385bf8356..39806c335339 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -61,6 +61,8 @@ struct panel_desc {
unsigned int disable;
unsigned int unprepare;
} delay;
+
+ u32 bus_format;
};
struct panel_simple {
@@ -111,6 +113,9 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
+ if (panel->desc->bus_format)
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &panel->desc->bus_format, 1);
return num;
}
@@ -443,6 +448,34 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct drm_display_mode avic_tm070ddh03_mode = {
+ .clock = 51200,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 160,
+ .hsync_end = 1024 + 160 + 4,
+ .htotal = 1024 + 160 + 4 + 156,
+ .vdisplay = 600,
+ .vsync_start = 600 + 17,
+ .vsync_end = 600 + 17 + 1,
+ .vtotal = 600 + 17 + 1 + 17,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc avic_tm070ddh03 = {
+ .modes = &avic_tm070ddh03_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 90,
+ },
+ .delay = {
+ .prepare = 20,
+ .enable = 200,
+ .disable = 200,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
@@ -558,6 +591,30 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = {
.width = 108,
.height = 65,
},
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+static const struct drm_display_mode giantplus_gpg482739qs5_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 5,
+ .hsync_end = 480 + 5 + 1,
+ .htotal = 480 + 5 + 1 + 40,
+ .vdisplay = 272,
+ .vsync_start = 272 + 8,
+ .vsync_end = 272 + 8 + 1,
+ .vtotal = 272 + 8 + 1 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc giantplus_gpg482739qs5 = {
+ .modes = &giantplus_gpg482739qs5_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
};
static const struct drm_display_mode hannstar_hsd070pww1_mode = {
@@ -739,6 +796,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "avic,tm070ddh03",
+ .data = &avic_tm070ddh03,
+ }, {
.compatible = "chunghwa,claa101wa01a",
.data = &chunghwa_claa101wa01a
}, {
@@ -757,6 +817,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
+ .compatible = "giantplus,gpg482739qs5",
+ .data = &giantplus_gpg482739qs5
+ }, {
.compatible = "hannstar,hsd070pww1",
.data = &hannstar_hsd070pww1,
}, {
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 3d7c1d00a424..f778c0e8ae3c 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -686,14 +686,24 @@ int qxl_fbdev_init(struct qxl_device *qdev)
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
QXLFB_CONN_LIMIT);
- if (ret) {
- kfree(qfbdev);
- return ret;
- }
+ if (ret)
+ goto free;
+
+ ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
+ if (ret)
+ goto fini;
+
+ ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
+ if (ret)
+ goto fini;
- drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
- drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
return 0;
+
+fini:
+ drm_fb_helper_fini(&qfbdev->helper);
+free:
+ kfree(qfbdev);
+ return ret;
}
void qxl_fbdev_fini(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 12bc21219a0e..4605633e253b 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -2,7 +2,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include
hostprogs-y := mkregtable
clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
@@ -80,8 +80,10 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o \
- radeon_sync.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o \
+ radeon_sync.o radeon_audio.o
+
+radeon-$(CONFIG_MMU_NOTIFIER) += radeon_mn.o
# add async DMA block
radeon-y += \
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index db42a670f995..5bf825dfaa09 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -623,10 +623,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
drm_dp_dpcd_writeb(dp_info->aux,
DP_DOWNSPREAD_CTRL, 0);
- if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
- (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+ if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
- }
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index b8cd7975f797..7c9df1eac065 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -27,6 +27,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
+#include "radeon_audio.h"
#include "atom.h"
#include <linux/backlight.h>
@@ -664,6 +665,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
@@ -728,6 +731,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+ if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+ return ATOM_ENCODER_MODE_DP_AUDIO;
return ATOM_ENCODER_MODE_DP;
} else if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
@@ -742,6 +747,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
}
break;
case DRM_MODE_CONNECTOR_eDP:
+ if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+ return ATOM_ENCODER_MODE_DP_AUDIO;
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
case DRM_MODE_CONNECTOR_VGA:
@@ -1615,6 +1622,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
struct radeon_connector *radeon_connector = NULL;
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
bool travis_quirk = false;
+ int encoder_mode;
if (connector) {
radeon_connector = to_radeon_connector(connector);
@@ -1710,6 +1718,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
}
break;
}
+
+ encoder_mode = atombios_get_encoder_mode(encoder);
+ if (radeon_audio != 0 &&
+ (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
+ radeon_audio_dpms(encoder, mode);
}
static void
@@ -2123,6 +2136,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int encoder_mode;
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2149,6 +2163,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
+ encoder_mode = atombios_get_encoder_mode(encoder);
+ if (radeon_audio != 0 &&
+ (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
+ radeon_audio_mode_set(encoder, adjusted_mode);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -2170,13 +2188,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
-
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- if (rdev->asic->display.hdmi_enable)
- radeon_hdmi_enable(rdev, encoder, true);
- if (rdev->asic->display.hdmi_setmode)
- radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
- }
}
static bool
@@ -2442,10 +2453,6 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
disable_done:
if (radeon_encoder_is_digital(encoder)) {
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- if (rdev->asic->display.hdmi_enable)
- radeon_hdmi_enable(rdev, encoder, false);
- }
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0b2929de9f41..db08f17be76b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2277,6 +2277,7 @@ static void btc_update_requested_ps(struct radeon_device *rdev,
eg_pi->requested_rps.ps_priv = &eg_pi->requested_ps;
}
+#if 0
void btc_dpm_reset_asic(struct radeon_device *rdev)
{
rv770_restrict_performance_levels_before_switch(rdev);
@@ -2284,6 +2285,7 @@ void btc_dpm_reset_asic(struct radeon_device *rdev)
btc_set_boot_state_timing(rdev);
rv770_set_boot_state(rdev);
}
+#endif
int btc_dpm_pre_set_power_state(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index f373a81ba3d5..bcd2f1fe803f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -187,6 +187,9 @@ static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
PPSMC_Msg msg, u32 parameter);
+static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
+static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
+
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
{
struct ci_power_info *pi = rdev->pm.dpm.priv;
@@ -1043,22 +1046,24 @@ static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
return -EINVAL;
}
+ pi->fan_is_controlled_by_smc = true;
return 0;
}
-#if 0
static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
{
PPSMC_Result ret;
+ struct ci_power_info *pi = ci_get_pi(rdev);
ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
- if (ret == PPSMC_Result_OK)
+ if (ret == PPSMC_Result_OK) {
+ pi->fan_is_controlled_by_smc = false;
return 0;
- else
+ } else
return -EINVAL;
}
-static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
u32 *speed)
{
u32 duty, duty100;
@@ -1083,21 +1088,22 @@ static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
return 0;
}
-static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
u32 speed)
{
u32 tmp;
u32 duty, duty100;
u64 tmp64;
+ struct ci_power_info *pi = ci_get_pi(rdev);
if (rdev->pm.no_fan)
return -ENOENT;
- if (speed > 100)
+ if (pi->fan_is_controlled_by_smc)
return -EINVAL;
- if (rdev->pm.dpm.fan.ucode_fan_control)
- ci_fan_ctrl_stop_smc_fan_control(rdev);
+ if (speed > 100)
+ return -EINVAL;
duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
@@ -1112,11 +1118,38 @@ static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
tmp |= FDO_STATIC_DUTY(duty);
WREG32_SMC(CG_FDO_CTRL0, tmp);
- ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
-
return 0;
}
+void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
+{
+ if (mode) {
+ /* stop auto-manage */
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ ci_fan_ctrl_stop_smc_fan_control(rdev);
+ ci_fan_ctrl_set_static_mode(rdev, mode);
+ } else {
+ /* restart auto-manage */
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ ci_thermal_start_smc_fan_control(rdev);
+ else
+ ci_fan_ctrl_set_default_mode(rdev);
+ }
+}
+
+u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (pi->fan_is_controlled_by_smc)
+ return 0;
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+ return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
u32 *speed)
{
@@ -1698,10 +1731,12 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
return 0;
}
+#if 0
static int ci_set_boot_state(struct radeon_device *rdev)
{
return ci_enable_sclk_mclk_dpm(rdev, false);
}
+#endif
static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
{
@@ -5343,10 +5378,12 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
return 0;
}
+#if 0
void ci_dpm_reset_asic(struct radeon_device *rdev)
{
ci_set_boot_state(rdev);
}
+#endif
void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index 84e3d3bcf9f3..723220ffbea2 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -291,6 +291,7 @@ struct ci_power_info {
struct ci_ps requested_ps;
/* fan control */
bool fan_ctrl_is_in_default_mode;
+ bool fan_is_controlled_by_smc;
u32 t_min;
u32 fan_ctrl_default_mode;
};
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index e78bcad7a43e..35c6f648ba04 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -184,6 +184,7 @@ PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
return (PPSMC_Result)tmp;
}
+#if 0
PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
{
u32 tmp;
@@ -201,6 +202,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
return PPSMC_Result_OK;
}
+#endif
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dcde3798b45..e6a4ba236c70 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -27,6 +27,7 @@
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "cikd.h"
#include "atom.h"
#include "cik_blit_shaders.h"
@@ -3904,7 +3905,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- /* EVENT_WRITE_EOP - flush caches, send int */
+ /* Workaround for cache flush problems. First send a dummy EOP
+ * event down the pipe with seq one below.
+ */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(1) | INT_SEL(0));
+ radeon_ring_write(ring, fence->seq - 1);
+ radeon_ring_write(ring, 0);
+
+ /* Then send the real EOP event down the pipe. */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
@@ -5707,6 +5722,28 @@ void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
WREG32(VM_INVALIDATE_REQUEST, 0x1);
}
+static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
+{
+ int i;
+ uint32_t sh_mem_bases, sh_mem_config;
+
+ sh_mem_bases = 0x6000 | 0x6000 << 16;
+ sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
+
+ mutex_lock(&rdev->srbm_mutex);
+ for (i = 8; i < 16; i++) {
+ cik_srbm_select(rdev, 0, 0, 0, i);
+ /* CP and shaders */
+ WREG32(SH_MEM_CONFIG, sh_mem_config);
+ WREG32(SH_MEM_APE1_BASE, 1);
+ WREG32(SH_MEM_APE1_LIMIT, 0);
+ WREG32(SH_MEM_BASES, sh_mem_bases);
+ }
+ cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
+}
+
/**
* cik_pcie_gart_enable - gart enable
*
@@ -5820,6 +5857,8 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
+ cik_pcie_init_compute_vmid(rdev);
+
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
@@ -6033,6 +6072,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+ WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* compute doesn't have PFP */
if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
@@ -7323,7 +7373,6 @@ int cik_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 dma_cntl, dma_cntl1;
- u32 thermal_int;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -7353,13 +7402,6 @@ int cik_irq_set(struct radeon_device *rdev)
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- if (rdev->flags & RADEON_IS_IGP)
- thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
- ~(THERM_INTH_MASK | THERM_INTL_MASK);
- else
- thermal_int = RREG32_SMC(CG_THERMAL_INT) &
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
-
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("cik_irq_set: sw int gfx\n");
@@ -7463,14 +7505,6 @@ int cik_irq_set(struct radeon_device *rdev)
hpd6 |= DC_HPDx_INT_EN;
}
- if (rdev->irq.dpm_thermal) {
- DRM_DEBUG("dpm thermal\n");
- if (rdev->flags & RADEON_IS_IGP)
- thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
- else
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- }
-
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
@@ -7517,11 +7551,6 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
- if (rdev->flags & RADEON_IS_IGP)
- WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
- else
- WREG32_SMC(CG_THERMAL_INT, thermal_int);
-
return 0;
}
@@ -8482,7 +8511,7 @@ static int cik_startup(struct radeon_device *rdev)
return r;
}
- r = dce6_audio_init(rdev);
+ r = radeon_audio_init(rdev);
if (r)
return r;
@@ -8540,7 +8569,7 @@ int cik_suspend(struct radeon_device *rdev)
{
radeon_kfd_suspend(rdev);
radeon_pm_suspend(rdev);
- dce6_audio_fini(rdev);
+ radeon_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
cik_sdma_enable(rdev, false);
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index 79c45e8a536b..f667347d8157 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -147,140 +147,41 @@
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
+#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
+
#define CP_HQD_IQ_RPTR 0xC970u
#define AQL_ENABLE (1U << 0)
-
-#define IDLE (1 << 2)
-
-struct cik_mqd {
- uint32_t header;
- uint32_t compute_dispatch_initiator;
- uint32_t compute_dim_x;
- uint32_t compute_dim_y;
- uint32_t compute_dim_z;
- uint32_t compute_start_x;
- uint32_t compute_start_y;
- uint32_t compute_start_z;
- uint32_t compute_num_thread_x;
- uint32_t compute_num_thread_y;
- uint32_t compute_num_thread_z;
- uint32_t compute_pipelinestat_enable;
- uint32_t compute_perfcount_enable;
- uint32_t compute_pgm_lo;
- uint32_t compute_pgm_hi;
- uint32_t compute_tba_lo;
- uint32_t compute_tba_hi;
- uint32_t compute_tma_lo;
- uint32_t compute_tma_hi;
- uint32_t compute_pgm_rsrc1;
- uint32_t compute_pgm_rsrc2;
- uint32_t compute_vmid;
- uint32_t compute_resource_limits;
- uint32_t compute_static_thread_mgmt_se0;
- uint32_t compute_static_thread_mgmt_se1;
- uint32_t compute_tmpring_size;
- uint32_t compute_static_thread_mgmt_se2;
- uint32_t compute_static_thread_mgmt_se3;
- uint32_t compute_restart_x;
- uint32_t compute_restart_y;
- uint32_t compute_restart_z;
- uint32_t compute_thread_trace_enable;
- uint32_t compute_misc_reserved;
- uint32_t compute_user_data_0;
- uint32_t compute_user_data_1;
- uint32_t compute_user_data_2;
- uint32_t compute_user_data_3;
- uint32_t compute_user_data_4;
- uint32_t compute_user_data_5;
- uint32_t compute_user_data_6;
- uint32_t compute_user_data_7;
- uint32_t compute_user_data_8;
- uint32_t compute_user_data_9;
- uint32_t compute_user_data_10;
- uint32_t compute_user_data_11;
- uint32_t compute_user_data_12;
- uint32_t compute_user_data_13;
- uint32_t compute_user_data_14;
- uint32_t compute_user_data_15;
- uint32_t cp_compute_csinvoc_count_lo;
- uint32_t cp_compute_csinvoc_count_hi;
- uint32_t cp_mqd_base_addr_lo;
- uint32_t cp_mqd_base_addr_hi;
- uint32_t cp_hqd_active;
- uint32_t cp_hqd_vmid;
- uint32_t cp_hqd_persistent_state;
- uint32_t cp_hqd_pipe_priority;
- uint32_t cp_hqd_queue_priority;
- uint32_t cp_hqd_quantum;
- uint32_t cp_hqd_pq_base_lo;
- uint32_t cp_hqd_pq_base_hi;
- uint32_t cp_hqd_pq_rptr;
- uint32_t cp_hqd_pq_rptr_report_addr_lo;
- uint32_t cp_hqd_pq_rptr_report_addr_hi;
- uint32_t cp_hqd_pq_wptr_poll_addr_lo;
- uint32_t cp_hqd_pq_wptr_poll_addr_hi;
- uint32_t cp_hqd_pq_doorbell_control;
- uint32_t cp_hqd_pq_wptr;
- uint32_t cp_hqd_pq_control;
- uint32_t cp_hqd_ib_base_addr_lo;
- uint32_t cp_hqd_ib_base_addr_hi;
- uint32_t cp_hqd_ib_rptr;
- uint32_t cp_hqd_ib_control;
- uint32_t cp_hqd_iq_timer;
- uint32_t cp_hqd_iq_rptr;
- uint32_t cp_hqd_dequeue_request;
- uint32_t cp_hqd_dma_offload;
- uint32_t cp_hqd_sema_cmd;
- uint32_t cp_hqd_msg_type;
- uint32_t cp_hqd_atomic0_preop_lo;
- uint32_t cp_hqd_atomic0_preop_hi;
- uint32_t cp_hqd_atomic1_preop_lo;
- uint32_t cp_hqd_atomic1_preop_hi;
- uint32_t cp_hqd_hq_status0;
- uint32_t cp_hqd_hq_control0;
- uint32_t cp_mqd_control;
- uint32_t cp_mqd_query_time_lo;
- uint32_t cp_mqd_query_time_hi;
- uint32_t cp_mqd_connect_start_time_lo;
- uint32_t cp_mqd_connect_start_time_hi;
- uint32_t cp_mqd_connect_end_time_lo;
- uint32_t cp_mqd_connect_end_time_hi;
- uint32_t cp_mqd_connect_end_wf_count;
- uint32_t cp_mqd_connect_end_pq_rptr;
- uint32_t cp_mqd_connect_end_pq_wptr;
- uint32_t cp_mqd_connect_end_ib_rptr;
- uint32_t reserved_96;
- uint32_t reserved_97;
- uint32_t reserved_98;
- uint32_t reserved_99;
- uint32_t iqtimer_pkt_header;
- uint32_t iqtimer_pkt_dw0;
- uint32_t iqtimer_pkt_dw1;
- uint32_t iqtimer_pkt_dw2;
- uint32_t iqtimer_pkt_dw3;
- uint32_t iqtimer_pkt_dw4;
- uint32_t iqtimer_pkt_dw5;
- uint32_t iqtimer_pkt_dw6;
- uint32_t reserved_108;
- uint32_t reserved_109;
- uint32_t reserved_110;
- uint32_t reserved_111;
- uint32_t queue_doorbell_id0;
- uint32_t queue_doorbell_id1;
- uint32_t queue_doorbell_id2;
- uint32_t queue_doorbell_id3;
- uint32_t queue_doorbell_id4;
- uint32_t queue_doorbell_id5;
- uint32_t queue_doorbell_id6;
- uint32_t queue_doorbell_id7;
- uint32_t queue_doorbell_id8;
- uint32_t queue_doorbell_id9;
- uint32_t queue_doorbell_id10;
- uint32_t queue_doorbell_id11;
- uint32_t queue_doorbell_id12;
- uint32_t queue_doorbell_id13;
- uint32_t queue_doorbell_id14;
- uint32_t queue_doorbell_id15;
-};
+#define SDMA0_RLC0_RB_CNTL 0xD400u
+#define SDMA_RB_VMID(x) (x << 24)
+#define SDMA0_RLC0_RB_BASE 0xD404u
+#define SDMA0_RLC0_RB_BASE_HI 0xD408u
+#define SDMA0_RLC0_RB_RPTR 0xD40Cu
+#define SDMA0_RLC0_RB_WPTR 0xD410u
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL 0xD414u
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0xD418u
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0xD41Cu
+#define SDMA0_RLC0_RB_RPTR_ADDR_HI 0xD420u
+#define SDMA0_RLC0_RB_RPTR_ADDR_LO 0xD424u
+#define SDMA0_RLC0_IB_CNTL 0xD428u
+#define SDMA0_RLC0_IB_RPTR 0xD42Cu
+#define SDMA0_RLC0_IB_OFFSET 0xD430u
+#define SDMA0_RLC0_IB_BASE_LO 0xD434u
+#define SDMA0_RLC0_IB_BASE_HI 0xD438u
+#define SDMA0_RLC0_IB_SIZE 0xD43Cu
+#define SDMA0_RLC0_SKIP_CNTL 0xD440u
+#define SDMA0_RLC0_CONTEXT_STATUS 0xD444u
+#define SDMA_RLC_IDLE (1 << 2)
+#define SDMA0_RLC0_DOORBELL 0xD448u
+#define SDMA_OFFSET(x) (x << 0)
+#define SDMA_DB_ENABLE (1 << 28)
+#define SDMA0_RLC0_VIRTUAL_ADDR 0xD49Cu
+#define SDMA_ATC (1 << 0)
+#define SDMA_VA_PTR32 (1 << 4)
+#define SDMA_VA_SHARED_BASE(x) (x << 8)
+#define SDMA0_RLC0_APE1_CNTL 0xD4A0u
+#define SDMA0_RLC0_DOORBELL_LOG 0xD4A4u
+#define SDMA0_RLC0_WATERMARK 0xD4A8u
+#define SDMA0_CNTL 0xD010
+#define SDMA1_CNTL 0xD810
#endif
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index dde5c7e29eb2..f86eb54e7763 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -283,6 +283,33 @@ static void cik_sdma_rlc_stop(struct radeon_device *rdev)
}
/**
+ * cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable/disable preemption.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
+{
+ uint32_t reg_offset, value;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ else
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ value = RREG32(SDMA0_CNTL + reg_offset);
+ if (enable)
+ value |= AUTO_CTXSW_ENABLE;
+ else
+ value &= ~AUTO_CTXSW_ENABLE;
+ WREG32(SDMA0_CNTL + reg_offset, value);
+ }
+}
+
+/**
* cik_sdma_enable - stop the async dma engines
*
* @rdev: radeon_device pointer
@@ -312,6 +339,8 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
me_cntl |= SDMA_HALT;
WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
}
+
+ cik_sdma_ctx_switch_enable(rdev, enable);
}
/**
@@ -816,7 +845,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -903,6 +931,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm_id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
@@ -943,5 +974,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 1 << vm_id);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* reference */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 9aad0327e4d1..ca058589ddef 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2005,11 +2005,13 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
return 0;
}
+#if 0
void cypress_dpm_reset_asic(struct radeon_device *rdev)
{
rv770_restrict_performance_levels_before_switch(rdev);
rv770_set_boot_state(rdev);
}
+#endif
void cypress_dpm_display_configuration_changed(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index bafdf92a5732..f04205170b8a 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -24,37 +24,17 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "r600d.h"
-static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+void dce3_2_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u32 tmp;
- u8 *sadb = NULL;
- int sad_count;
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
- DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
/* program the speaker allocation */
- tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
@@ -62,19 +42,32 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
+ WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
}
-static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
+void dce3_2_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
+ u32 tmp;
+
+ /* program the speaker allocation */
+ tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set DP mode */
+ tmp |= DP_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder,
+ struct cea_sad *sads, int sad_count)
+{
+ int i;
+ struct radeon_device *rdev = encoder->dev->dev_private;
static const u16 eld_reg_to_type[][2] = {
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
@@ -90,25 +83,6 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count <= 0) {
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- return;
- }
- BUG_ON(!sads);
-
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
@@ -135,110 +109,124 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
- WREG32(eld_reg_to_type[i][0], value);
+ WREG32_ENDPOINT(0, eld_reg_to_type[i][0], value);
}
-
- kfree(sads);
}
-/*
- * update the info frames with the data from the current display mode
- */
-void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+void dce3_2_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- uint32_t offset;
- ssize_t err;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ unsigned int max_ratio = clock / 24000;
+ u32 dto_phase;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
+
+ if (!crtc)
return;
- offset = dig->afmt->offset;
-
- /* disable audio prior to setting up hw */
- dig->afmt->pin = r600_audio_get_pin(rdev);
- r600_audio_enable(rdev, dig->afmt->pin, 0);
- r600_audio_set_dto(encoder, mode->clock);
+ radeon_encoder = to_radeon_encoder(crtc->encoder);
+ dig = radeon_encoder->enc_priv;
- WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
- HDMI0_NULL_SEND); /* send null packets when required */
-
- WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+ if (!dig)
+ return;
- if (ASIC_IS_DCE32(rdev)) {
- WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
- HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
- AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
} else {
- WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
- HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
- HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
- HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
}
- if (ASIC_IS_DCE32(rdev)) {
- dce3_2_afmt_write_speaker_allocation(encoder);
- dce3_2_afmt_write_sad_regs(encoder);
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ if (dig->dig_encoder == 0) {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
+}
+
+void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+ const struct radeon_hdmi_acr *acr)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
- HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
- HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
-
- WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
- HDMI0_NULL_SEND | /* send null packets when required */
- HDMI0_GC_SEND | /* send general control packets */
- HDMI0_GC_CONT); /* send general control packets every frame */
-
- /* TODO: HDMI0_AUDIO_INFO_UPDATE */
- WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
- HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
-
- WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
- HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
- HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
- WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
+ HDMI0_ACR_SOURCE | /* select SW CTS value */
+ HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+
+ WREG32_P(HDMI0_ACR_32_0 + offset,
+ HDMI0_ACR_CTS_32(acr->cts_32khz),
+ ~HDMI0_ACR_CTS_32_MASK);
+ WREG32_P(HDMI0_ACR_32_1 + offset,
+ HDMI0_ACR_N_32(acr->n_32khz),
+ ~HDMI0_ACR_N_32_MASK);
+
+ WREG32_P(HDMI0_ACR_44_0 + offset,
+ HDMI0_ACR_CTS_44(acr->cts_44_1khz),
+ ~HDMI0_ACR_CTS_44_MASK);
+ WREG32_P(HDMI0_ACR_44_1 + offset,
+ HDMI0_ACR_N_44(acr->n_44_1khz),
+ ~HDMI0_ACR_N_44_MASK);
+
+ WREG32_P(HDMI0_ACR_48_0 + offset,
+ HDMI0_ACR_CTS_48(acr->cts_48khz),
+ ~HDMI0_ACR_CTS_48_MASK);
+ WREG32_P(HDMI0_ACR_48_1 + offset,
+ HDMI0_ACR_N_48(acr->n_48khz),
+ ~HDMI0_ACR_N_48_MASK);
+}
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
+void dce3_2_set_audio_packet(struct drm_encoder *encoder, u32 offset)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
- r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
- r600_hdmi_update_ACR(encoder, mode->clock);
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
- /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
- WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
- WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
- WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
- WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
- r600_hdmi_audio_workaround(encoder);
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
- /* enable audio after to setting up hw */
- r600_audio_enable(rdev, dig->afmt->pin, 0xf);
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+}
+
+void dce3_2_set_mute(struct drm_encoder *encoder, u32 offset, bool mute)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (mute)
+ WREG32_OR(HDMI0_GC + offset, HDMI0_GC_AVMUTE);
+ else
+ WREG32_AND(HDMI0_GC + offset, ~HDMI0_GC_AVMUTE);
}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index f312edf4d50e..192c80389151 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -23,9 +23,10 @@
#include <linux/hdmi.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_audio.h"
#include "sid.h"
-static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
+u32 dce6_endpoint_rreg(struct radeon_device *rdev,
u32 block_offset, u32 reg)
{
unsigned long flags;
@@ -39,7 +40,7 @@ static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
return r;
}
-static void dce6_endpoint_wreg(struct radeon_device *rdev,
+void dce6_endpoint_wreg(struct radeon_device *rdev,
u32 block_offset, u32 reg, u32 v)
{
unsigned long flags;
@@ -54,10 +55,6 @@ static void dce6_endpoint_wreg(struct radeon_device *rdev,
spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
}
-#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
-#define WREG32_ENDPOINT(block, reg, v) dce6_endpoint_wreg(rdev, (block), (reg), (v))
-
-
static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
{
int i;
@@ -105,13 +102,11 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
}
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_connector *connector, struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u32 tmp = 0, offset;
if (!dig || !dig->afmt || !dig->afmt->pin)
@@ -119,18 +114,6 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -147,40 +130,19 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
-void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u32 offset, tmp;
- u8 *sadb = NULL;
- int sad_count;
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
- if (sad_count < 0) {
- DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
-
/* program the speaker allocation */
tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
@@ -191,21 +153,41 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
}
-void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
+void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
+ u32 offset, tmp;
+
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+ offset = dig->afmt->pin->offset;
+
+ /* program the speaker allocation */
+ tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set DP mode */
+ tmp |= DP_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+
+void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
+ struct cea_sad *sads, int sad_count)
+{
+ u32 offset;
+ int i;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_device *rdev = encoder->dev->dev_private;
static const u16 eld_reg_to_type[][2] = {
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
@@ -226,25 +208,6 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
- if (sad_count <= 0) {
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- return;
- }
- BUG_ON(!sads);
-
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
@@ -273,13 +236,6 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
}
-
- kfree(sads);
-}
-
-static int dce6_audio_chipset_supported(struct radeon_device *rdev)
-{
- return !ASIC_IS_NODCE(rdev);
}
void dce6_audio_enable(struct radeon_device *rdev,
@@ -293,64 +249,76 @@ void dce6_audio_enable(struct radeon_device *rdev,
enable_mask ? AUDIO_ENABLED : 0);
}
-static const u32 pin_offsets[7] =
-{
- (0x5e00 - 0x5e00),
- (0x5e18 - 0x5e00),
- (0x5e30 - 0x5e00),
- (0x5e48 - 0x5e00),
- (0x5e60 - 0x5e00),
- (0x5e78 - 0x5e00),
- (0x5e90 - 0x5e00),
-};
-
-int dce6_audio_init(struct radeon_device *rdev)
+void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock)
{
- int i;
+ /* Two dtos; generally use dto0 for HDMI */
+ u32 value = 0;
- if (!radeon_audio || !dce6_audio_chipset_supported(rdev))
- return 0;
+ if (crtc)
+ value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
- rdev->audio.enabled = true;
+ WREG32(DCCG_AUDIO_DTO_SOURCE, value);
- if (ASIC_IS_DCE81(rdev)) /* KV: 4 streams, 7 endpoints */
- rdev->audio.num_pins = 7;
- else if (ASIC_IS_DCE83(rdev)) /* KB: 2 streams, 3 endpoints */
- rdev->audio.num_pins = 3;
- else if (ASIC_IS_DCE8(rdev)) /* BN/HW: 6 streams, 7 endpoints */
- rdev->audio.num_pins = 7;
- else if (ASIC_IS_DCE61(rdev)) /* TN: 4 streams, 6 endpoints */
- rdev->audio.num_pins = 6;
- else if (ASIC_IS_DCE64(rdev)) /* OL: 2 streams, 2 endpoints */
- rdev->audio.num_pins = 2;
- else /* SI: 6 streams, 6 endpoints */
- rdev->audio.num_pins = 6;
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
+}
- for (i = 0; i < rdev->audio.num_pins; i++) {
- rdev->audio.pin[i].channels = -1;
- rdev->audio.pin[i].rate = -1;
- rdev->audio.pin[i].bits_per_sample = -1;
- rdev->audio.pin[i].status_bits = 0;
- rdev->audio.pin[i].category_code = 0;
- rdev->audio.pin[i].connected = false;
- rdev->audio.pin[i].offset = pin_offsets[i];
- rdev->audio.pin[i].id = i;
- /* disable audio. it will be set up later */
- dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
- }
+void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock)
+{
+ /* Two dtos; generally use dto1 for DP */
+ u32 value = 0;
+ value |= DCCG_AUDIO_DTO_SEL;
+
+ if (crtc)
+ value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
+
+ WREG32(DCCG_AUDIO_DTO_SOURCE, value);
- return 0;
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
}
-void dce6_audio_fini(struct radeon_device *rdev)
+void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
{
- int i;
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
- if (!rdev->audio.enabled)
+ if (!dig || !dig->afmt)
return;
- for (i = 0; i < rdev->audio.num_pins; i++)
- dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
+ offset = dig->afmt->offset;
+
+ if (enable) {
+ if (dig->afmt->enabled)
+ return;
+
+ WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+ WREG32(EVERGREEN_DP_SEC_CNTL + offset,
+ EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
+ EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
+ EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
+ EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
+ radeon_audio_enable(rdev, dig->afmt->pin, true);
+ } else {
+ if (!dig->afmt->enabled)
+ return;
+
+ WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
+ radeon_audio_enable(rdev, dig->afmt->pin, false);
+ }
- rdev->audio.enabled = false;
+ dig->afmt->enabled = enable;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 85995b4e3338..78600f534c80 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include <drm/radeon_drm.h>
#include "evergreend.h"
#include "atom.h"
@@ -5286,7 +5287,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
@@ -5332,7 +5333,7 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
@@ -5482,7 +5483,7 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 924b1b7ab455..c9e0fbbf76a3 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -83,6 +83,7 @@ struct evergreen_cs_track {
u32 htile_offset;
u32 htile_surface;
struct radeon_bo *htile_bo;
+ unsigned long indirect_draw_buffer_size;
};
static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@@ -1896,6 +1897,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
}
+ case PACKET3_INDEX_BUFFER_SIZE:
+ {
+ if (pkt->count != 0) {
+ DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
+ return -EINVAL;
+ }
+ break;
+ }
case PACKET3_DRAW_INDEX:
{
uint64_t offset;
@@ -2006,6 +2015,67 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return r;
}
break;
+ case PACKET3_SET_BASE:
+ {
+ /*
+ DW 1 HEADER Header of the packet. Shader_Type in bit 1 of the Header will correspond to the shader type of the Load, see Type-3 Packet.
+ 2 BASE_INDEX Bits [3:0] BASE_INDEX - Base Index specifies which base address is specified in the last two DWs.
+ 0001: DX11 Draw_Index_Indirect Patch Table Base: Base address for Draw_Index_Indirect data.
+ 3 ADDRESS_LO Bits [31:3] - Lower bits of QWORD-Aligned Address. Bits [2:0] - Reserved
+ 4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
+ */
+ if (pkt->count != 2) {
+ DRM_ERROR("bad SET_BASE\n");
+ return -EINVAL;
+ }
+
+ /* currently only supporting setting indirect draw buffer base address */
+ if (idx_value != 1) {
+ DRM_ERROR("bad SET_BASE\n");
+ return -EINVAL;
+ }
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad SET_BASE\n");
+ return -EINVAL;
+ }
+
+ track->indirect_draw_buffer_size = radeon_bo_size(reloc->robj);
+
+ ib[idx+1] = reloc->gpu_offset;
+ ib[idx+2] = upper_32_bits(reloc->gpu_offset) & 0xff;
+
+ break;
+ }
+ case PACKET3_DRAW_INDIRECT:
+ case PACKET3_DRAW_INDEX_INDIRECT:
+ {
+ u64 size = pkt->opcode == PACKET3_DRAW_INDIRECT ? 16 : 20;
+
+ /*
+ DW 1 HEADER
+ 2 DATA_OFFSET Bits [31:0] + byte aligned offset where the required data structure starts. Bits 1:0 are zero
+ 3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
+ */
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DRAW_INDIRECT\n");
+ return -EINVAL;
+ }
+
+ if (idx_value + size > track->indirect_draw_buffer_size) {
+ dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
+ idx_value, size, track->indirect_draw_buffer_size);
+ return -EINVAL;
+ }
+
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ }
case PACKET3_DISPATCH_DIRECT:
if (pkt->count != 3) {
DRM_ERROR("bad DISPATCH_DIRECT\n");
@@ -3243,7 +3313,13 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
switch (pkt->opcode) {
case PACKET3_NOP:
+ break;
case PACKET3_SET_BASE:
+ if (idx_value != 1) {
+ DRM_ERROR("bad SET_BASE");
+ return -EINVAL;
+ }
+ break;
case PACKET3_CLEAR_STATE:
case PACKET3_INDEX_BUFFER_SIZE:
case PACKET3_DISPATCH_DIRECT:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 53abd9b17a50..1d9aebc79595 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -29,17 +29,12 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "evergreend.h"
#include "atom.h"
-extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
-extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
-extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
-extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
-
/* enable the audio stream */
-static void dce4_audio_enable(struct radeon_device *rdev,
+void dce4_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
u8 enable_mask)
{
@@ -69,48 +64,42 @@ static void dce4_audio_enable(struct radeon_device *rdev,
WREG32(AZ_HOT_PLUG_CONTROL, tmp);
}
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+void evergreen_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+ const struct radeon_hdmi_acr *acr)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset = dig->afmt->offset;
+ int bpc = 8;
- WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
- WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ bpc = radeon_crtc->bpc;
+ }
- WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
- WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+ if (bpc > 8)
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+ else
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_SOURCE | /* select SW CTS value */
+ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+
+ WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr->cts_32khz));
+ WREG32(HDMI_ACR_32_1 + offset, acr->n_32khz);
- WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
- WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+ WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr->cts_44_1khz));
+ WREG32(HDMI_ACR_44_1 + offset, acr->n_44_1khz);
+
+ WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr->cts_48khz));
+ WREG32(HDMI_ACR_48_1 + offset, acr->n_48khz);
}
-static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_connector *connector, struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u32 tmp = 0;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -124,38 +113,17 @@ static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
else
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
}
- WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
+ WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
}
-static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+void dce4_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u32 tmp;
- u8 *sadb = NULL;
- int sad_count;
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
- if (sad_count < 0) {
- DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
/* program the speaker allocation */
- tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
@@ -163,19 +131,32 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
+ WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
}
-static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
+void dce4_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
+ u32 tmp;
+ /* program the speaker allocation */
+ tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set DP mode */
+ tmp |= DP_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+
+void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder,
+ struct cea_sad *sads, int sad_count)
+{
+ int i;
+ struct radeon_device *rdev = encoder->dev->dev_private;
static const u16 eld_reg_to_type[][2] = {
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
@@ -191,25 +172,6 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
- if (sad_count <= 0) {
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- return;
- }
- BUG_ON(!sads);
-
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
@@ -236,25 +198,17 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
- WREG32(eld_reg_to_type[i][0], value);
+ WREG32_ENDPOINT(0, eld_reg_to_type[i][0], value);
}
-
- kfree(sads);
}
/*
- * build a HDMI Video Info Frame
+ * build a AVI Info Frame
*/
-static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
+void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
+ unsigned char *buffer, size_t size)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset = dig->afmt->offset;
uint8_t *frame = buffer + 3;
- uint8_t *header = buffer;
WREG32(AFMT_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -263,104 +217,103 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
WREG32(AFMT_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(AFMT_AVI_INFO3 + offset,
- frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+ frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
+
+ WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+ ~HDMI_AVI_INFO_LINE_MASK);
}
-static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- u32 base_rate = 24000;
- u32 max_ratio = clock / base_rate;
+ unsigned int max_ratio = clock / 24000;
u32 dto_phase;
- u32 dto_modulo = clock;
u32 wallclock_ratio;
- u32 dto_cntl;
-
- if (!dig || !dig->afmt)
- return;
-
- if (ASIC_IS_DCE6(rdev)) {
- dto_phase = 24 * 1000;
+ u32 value;
+
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
} else {
- if (max_ratio >= 8) {
- dto_phase = 192 * 1000;
- wallclock_ratio = 3;
- } else if (max_ratio >= 4) {
- dto_phase = 96 * 1000;
- wallclock_ratio = 2;
- } else if (max_ratio >= 2) {
- dto_phase = 48 * 1000;
- wallclock_ratio = 1;
- } else {
- dto_phase = 24 * 1000;
- wallclock_ratio = 0;
- }
- dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
- dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
- WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
}
- /* XXX two dtos; generally use dto0 for hdmi */
+ value = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ value |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ value &= ~DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+ WREG32(DCCG_AUDIO_DTO0_CNTL, value);
+
+ /* Two dtos; generally use dto0 for HDMI */
+ value = 0;
+
+ if (crtc)
+ value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
+
+ WREG32(DCCG_AUDIO_DTO_SOURCE, value);
+
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
- WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
}
-
-/*
- * update the info frames with the data from the current display mode
- */
-void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+void dce4_dp_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- uint32_t offset;
- ssize_t err;
- uint32_t val;
- int bpc = 8;
+ u32 value;
- if (!dig || !dig->afmt)
- return;
+ value = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ value |= DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+ WREG32(DCCG_AUDIO_DTO1_CNTL, value);
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
- return;
- offset = dig->afmt->offset;
+ /* Two dtos; generally use dto1 for DP */
+ value = 0;
+ value |= DCCG_AUDIO_DTO_SEL;
- /* hdmi deep color mode general control packets setup, if bpc > 8 */
- if (encoder->crtc) {
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- bpc = radeon_crtc->bpc;
- }
+ if (crtc)
+ value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
- /* disable audio prior to setting up hw */
- if (ASIC_IS_DCE6(rdev)) {
- dig->afmt->pin = dce6_audio_get_pin(rdev);
- dce6_audio_enable(rdev, dig->afmt->pin, 0);
- } else {
- dig->afmt->pin = r600_audio_get_pin(rdev);
- dce4_audio_enable(rdev, dig->afmt->pin, 0);
- }
+ WREG32(DCCG_AUDIO_DTO_SOURCE, value);
+
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10);
+}
- evergreen_audio_set_dto(encoder, mode->clock);
+void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
- HDMI_NULL_SEND); /* send null packets when required */
+ HDMI_NULL_SEND | /* send null packets when required */
+ HDMI_GC_SEND | /* send general control packets */
+ HDMI_GC_CONT); /* send general control packets every frame */
+}
- WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+void dce4_hdmi_set_color_depth(struct drm_encoder *encoder, u32 offset, int bpc)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ uint32_t val;
val = RREG32(HDMI_CONTROL + offset);
val &= ~HDMI_DEEP_COLOR_ENABLE;
@@ -390,113 +343,59 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
}
WREG32(HDMI_CONTROL + offset, val);
+}
- WREG32(HDMI_VBI_PACKET_CONTROL + offset,
- HDMI_NULL_SEND | /* send null packets when required */
- HDMI_GC_SEND | /* send general control packets */
- HDMI_GC_CONT); /* send general control packets every frame */
+void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
- HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
- AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+ AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
- HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
- WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+ HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-
- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
-
- /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
-
- if (bpc > 8)
- WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
- else
- WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_SOURCE | /* select SW CTS value */
- HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
-
- evergreen_hdmi_update_ACR(encoder, mode->clock);
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
WREG32(AFMT_60958_0 + offset,
- AFMT_60958_CS_CHANNEL_NUMBER_L(1));
+ AFMT_60958_CS_CHANNEL_NUMBER_L(1));
WREG32(AFMT_60958_1 + offset,
- AFMT_60958_CS_CHANNEL_NUMBER_R(2));
+ AFMT_60958_CS_CHANNEL_NUMBER_R(2));
WREG32(AFMT_60958_2 + offset,
- AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
- AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
- AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
- AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
- AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
- AFMT_60958_CS_CHANNEL_NUMBER_7(8));
-
- if (ASIC_IS_DCE6(rdev)) {
- dce6_afmt_write_speaker_allocation(encoder);
- } else {
- dce4_afmt_write_speaker_allocation(encoder);
- }
+ AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
+ AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
+ AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
+ AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
+ AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
+ AFMT_60958_CS_CHANNEL_NUMBER_7(8));
WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
- AFMT_AUDIO_CHANNEL_ENABLE(0xff));
-
- /* fglrx sets 0x40 in 0x5f80 here */
-
- if (ASIC_IS_DCE6(rdev)) {
- dce6_afmt_select_pin(encoder);
- dce6_afmt_write_sad_regs(encoder);
- dce6_afmt_write_latency_fields(encoder, mode);
- } else {
- evergreen_hdmi_write_sad_regs(encoder);
- dce4_afmt_write_latency_fields(encoder, mode);
- }
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
-
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
-
- evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-
- WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+ AFMT_AUDIO_CHANNEL_ENABLE(0xff));
- WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
- HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
- ~HDMI_AVI_INFO_LINE_MASK);
+ /* allow 60958 channel status and send audio packets fields to be updated */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
+}
- WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
- /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
- WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
- WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
- WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
- WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+void dce4_set_mute(struct drm_encoder *encoder, u32 offset, bool mute)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
- /* enable audio after to setting up hw */
- if (ASIC_IS_DCE6(rdev))
- dce6_audio_enable(rdev, dig->afmt->pin, 1);
+ if (mute)
+ WREG32_OR(HDMI_GC + offset, HDMI_GC_AVMUTE);
else
- dce4_audio_enable(rdev, dig->afmt->pin, 0xf);
+ WREG32_AND(HDMI_GC + offset, ~HDMI_GC_AVMUTE);
}
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
@@ -516,10 +415,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
return;
if (!enable && dig->afmt->pin) {
- if (ASIC_IS_DCE6(rdev))
- dce6_audio_enable(rdev, dig->afmt->pin, 0);
- else
- dce4_audio_enable(rdev, dig->afmt->pin, 0);
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
dig->afmt->pin = NULL;
}
@@ -528,3 +424,57 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
}
+
+void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ offset = dig->afmt->offset;
+
+ if (enable) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+ uint32_t val;
+
+ if (dig->afmt->enabled)
+ return;
+
+ WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+
+ if (radeon_connector->con_priv) {
+ dig_connector = radeon_connector->con_priv;
+ val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset);
+ val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
+
+ if (dig_connector->dp_clock == 162000)
+ val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(3);
+ else
+ val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
+
+ WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val);
+ }
+
+ WREG32(EVERGREEN_DP_SEC_CNTL + offset,
+ EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
+ EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
+ EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
+ EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ } else {
+ if (!dig->afmt->enabled)
+ return;
+
+ WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ }
+
+ dig->afmt->enabled = enable;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 23bff590fb6e..aa939dfed3a3 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -251,4 +251,19 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
+/* Display Port block */
+#define EVERGREEN_DP_SEC_CNTL 0x7280
+# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
+# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
+# define EVERGREEN_DP_SEC_ATP_ENABLE (1 << 8)
+# define EVERGREEN_DP_SEC_AIP_ENABLE (1 << 12)
+# define EVERGREEN_DP_SEC_GSP_ENABLE (1 << 20)
+# define EVERGREEN_DP_SEC_AVI_ENABLE (1 << 24)
+# define EVERGREEN_DP_SEC_MPG_ENABLE (1 << 28)
+#define EVERGREEN_DP_SEC_TIMESTAMP 0x72a4
+# define EVERGREEN_DP_SEC_TIMESTAMP_MODE(x) (((x) & 0x3) << 0)
+#define EVERGREEN_DP_SEC_AUD_N 0x7294
+# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
+# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b066d6711b8d..ee83d2a88750 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -509,6 +509,7 @@
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
#define DCCG_AUDIO_DTO1_LOAD 0x05c8
#define DCCG_AUDIO_DTO1_CNTL 0x05cc
+# define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
/* DCE 4.0 AFMT */
#define HDMI_CONTROL 0x7030
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index e3e9c10cfba9..0e236d067d66 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1169,6 +1169,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
}
}
+static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
+{
+ u32 thermal_int;
+
+ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
+ if (enable)
+ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+ else
+ thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
+ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+
+}
+
int kv_dpm_enable(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1280,8 +1293,7 @@ int kv_dpm_late_enable(struct radeon_device *rdev)
DRM_ERROR("kv_set_thermal_temperature_range failed\n");
return ret;
}
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
+ kv_enable_thermal_int(rdev, true);
}
/* powerdown unused blocks for now */
@@ -1312,6 +1324,7 @@ void kv_dpm_disable(struct radeon_device *rdev)
kv_stop_dpm(rdev);
kv_enable_ulv(rdev, false);
kv_reset_am(rdev);
+ kv_enable_thermal_int(rdev, false);
kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
}
@@ -1925,6 +1938,7 @@ void kv_dpm_setup_asic(struct radeon_device *rdev)
kv_init_sclk_t(rdev);
}
+#if 0
void kv_dpm_reset_asic(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1945,6 +1959,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev)
kv_set_enabled_level(rdev, pi->graphics_boot_level);
}
}
+#endif
//XXX use sumo_dpm_display_configuration_changed
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 360de9f1f491..24242a7f0ac3 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -27,6 +27,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include <drm/radeon_drm.h>
#include "nid.h"
#include "atom.h"
@@ -2097,15 +2098,9 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- if (ASIC_IS_DCE6(rdev)) {
- r = dce6_audio_init(rdev);
- if (r)
- return r;
- } else {
- r = r600_audio_init(rdev);
- if (r)
- return r;
- }
+ r = radeon_audio_init(rdev);
+ if (r)
+ return r;
return 0;
}
@@ -2140,10 +2135,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
- if (ASIC_IS_DCE6(rdev))
- dce6_audio_fini(rdev);
- else
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
@@ -2516,6 +2508,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 50f88611ff60..ce787a9f12c0 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -463,5 +462,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
+ radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0); /* value */
}
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 6d2f16cf2c1c..7bc9f8d9804a 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3862,11 +3862,13 @@ void ni_dpm_post_set_power_state(struct radeon_device *rdev)
ni_update_current_ps(rdev, new_ps);
}
+#if 0
void ni_dpm_reset_asic(struct radeon_device *rdev)
{
ni_restrict_performance_levels_before_switch(rdev);
rv770_set_boot_state(rdev);
}
+#endif
union power_info {
struct _ATOM_POWERPLAY_INFO info;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2e12e4d69253..ad7125486894 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1133,6 +1133,23 @@
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
@@ -1272,6 +1289,13 @@
(1 << 21) | \
(((n) & 0xFFFFF) << 0))
+#define DMA_SRBM_POLL_PACKET ((9 << 28) | \
+ (1 << 27) | \
+ (1 << 26))
+
+#define DMA_SRBM_READ_PACKET ((9 << 28) | \
+ (1 << 27))
+
/* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d540591..279801ca5110 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0);
}
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+ return addr;
+}
+
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+ uint64_t entry)
{
u32 *gtt = rdev->gart.ptr;
- gtt[i] = cpu_to_le32(lower_32_bits(addr));
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 064ad5569cca..08d68f3e13e9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = rdev->gart.ptr;
-
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED;
+ return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = rdev->gart.ptr;
+
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
- writel(addr, ((void __iomem *)ptr) + (i * 4));
+ writel(entry, ((void __iomem *)ptr) + (i * 4));
}
int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ef5d6066fa5b..07a71a2488c9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -33,6 +33,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "radeon_mode.h"
#include "r600d.h"
#include "atom.h"
@@ -3054,7 +3055,7 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
@@ -3105,7 +3106,7 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
r600_cp_stop(rdev);
if (rdev->has_uvd) {
uvd_v1_0_fini(rdev);
@@ -3224,7 +3225,7 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
r600_cp_fini(rdev);
r600_irq_fini(rdev);
if (rdev->has_uvd) {
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index b90dc0eb08e6..62c91ed669ce 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "r600d.h"
#include "atom.h"
@@ -55,30 +56,6 @@ enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_LEVEL = 0x80
};
-static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
- /* 32kHz 44.1kHz 48kHz */
- /* Clock N CTS N CTS N CTS */
- { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
- { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
- { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
- { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
- { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
- { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
- { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
- { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
- { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
- { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
-};
-
-
-/*
- * check if the chipset is supported
- */
-static int r600_audio_chipset_supported(struct radeon_device *rdev)
-{
- return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
-}
-
static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
{
struct r600_audio_pin status;
@@ -191,155 +168,56 @@ void r600_audio_enable(struct radeon_device *rdev,
WREG32(AZ_HOT_PLUG_CONTROL, tmp);
}
-/*
- * initialize the audio vars
- */
-int r600_audio_init(struct radeon_device *rdev)
-{
- if (!radeon_audio || !r600_audio_chipset_supported(rdev))
- return 0;
-
- rdev->audio.enabled = true;
-
- rdev->audio.num_pins = 1;
- rdev->audio.pin[0].channels = -1;
- rdev->audio.pin[0].rate = -1;
- rdev->audio.pin[0].bits_per_sample = -1;
- rdev->audio.pin[0].status_bits = 0;
- rdev->audio.pin[0].category_code = 0;
- rdev->audio.pin[0].id = 0;
- /* disable audio. it will be set up later */
- r600_audio_enable(rdev, &rdev->audio.pin[0], 0);
-
- return 0;
-}
-
-/*
- * release the audio timer
- * TODO: How to do this correctly on SMP systems?
- */
-void r600_audio_fini(struct radeon_device *rdev)
-{
- if (!rdev->audio.enabled)
- return;
-
- r600_audio_enable(rdev, &rdev->audio.pin[0], 0);
-
- rdev->audio.enabled = false;
-}
-
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
{
/* only one pin on 6xx-NI */
return &rdev->audio.pin[0];
}
-/*
- * calculate CTS and N values if they are not found in the table
- */
-static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
-{
- int n, cts;
- unsigned long div, mul;
-
- /* Safe, but overly large values */
- n = 128 * freq;
- cts = clock * 1000;
-
- /* Smallest valid fraction */
- div = gcd(n, cts);
-
- n /= div;
- cts /= div;
-
- /*
- * The optimal N is 128*freq/1000. Calculate the closest larger
- * value that doesn't truncate any bits.
- */
- mul = ((128*freq/1000) + (n-1))/n;
-
- n *= mul;
- cts *= mul;
-
- /* Check that we are in spec (not always possible) */
- if (n < (128*freq/1500))
- printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
- if (n > (128*freq/300))
- printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
-
- *N = n;
- *CTS = cts;
-
- DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
- *N, *CTS, freq);
-}
-
-struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
-{
- struct radeon_hdmi_acr res;
- u8 i;
-
- /* Precalculated values for common clocks */
- for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
- if (r600_hdmi_predefined_acr[i].clock == clock)
- return r600_hdmi_predefined_acr[i];
- }
-
- /* And odd clocks get manually calculated */
- r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
- r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
- r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
-
- return res;
-}
-
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+ const struct radeon_hdmi_acr *acr)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset = dig->afmt->offset;
+
+ /* DCE 3.0 uses register that's normally for CRC_CONTROL */
+ uint32_t acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL :
+ HDMI0_ACR_PACKET_CONTROL;
+ WREG32_P(acr_ctl + offset,
+ HDMI0_ACR_SOURCE | /* select SW CTS value */
+ HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */
+ ~(HDMI0_ACR_SOURCE |
+ HDMI0_ACR_AUTO_SEND));
WREG32_P(HDMI0_ACR_32_0 + offset,
- HDMI0_ACR_CTS_32(acr.cts_32khz),
- ~HDMI0_ACR_CTS_32_MASK);
+ HDMI0_ACR_CTS_32(acr->cts_32khz),
+ ~HDMI0_ACR_CTS_32_MASK);
WREG32_P(HDMI0_ACR_32_1 + offset,
- HDMI0_ACR_N_32(acr.n_32khz),
- ~HDMI0_ACR_N_32_MASK);
+ HDMI0_ACR_N_32(acr->n_32khz),
+ ~HDMI0_ACR_N_32_MASK);
WREG32_P(HDMI0_ACR_44_0 + offset,
- HDMI0_ACR_CTS_44(acr.cts_44_1khz),
- ~HDMI0_ACR_CTS_44_MASK);
+ HDMI0_ACR_CTS_44(acr->cts_44_1khz),
+ ~HDMI0_ACR_CTS_44_MASK);
WREG32_P(HDMI0_ACR_44_1 + offset,
- HDMI0_ACR_N_44(acr.n_44_1khz),
- ~HDMI0_ACR_N_44_MASK);
+ HDMI0_ACR_N_44(acr->n_44_1khz),
+ ~HDMI0_ACR_N_44_MASK);
WREG32_P(HDMI0_ACR_48_0 + offset,
- HDMI0_ACR_CTS_48(acr.cts_48khz),
- ~HDMI0_ACR_CTS_48_MASK);
+ HDMI0_ACR_CTS_48(acr->cts_48khz),
+ ~HDMI0_ACR_CTS_48_MASK);
WREG32_P(HDMI0_ACR_48_1 + offset,
- HDMI0_ACR_N_48(acr.n_48khz),
- ~HDMI0_ACR_N_48_MASK);
+ HDMI0_ACR_N_48(acr->n_48khz),
+ ~HDMI0_ACR_N_48_MASK);
}
/*
* build a HDMI Video Info Frame
*/
-void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
- size_t size)
+void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
+ unsigned char *buffer, size_t size)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset = dig->afmt->offset;
uint8_t *frame = buffer + 3;
- uint8_t *header = buffer;
WREG32(HDMI0_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -348,7 +226,14 @@ void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
WREG32(HDMI0_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(HDMI0_AVI_INFO3 + offset,
- frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+ frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
+
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
+
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
}
/*
@@ -425,188 +310,94 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
value, ~HDMI0_AUDIO_TEST_EN);
}
-void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 base_rate = 24000;
- u32 max_ratio = clock / base_rate;
- u32 dto_phase;
- u32 dto_modulo = clock;
- u32 wallclock_ratio;
- u32 dto_cntl;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
- if (!dig || !dig->afmt)
+ if (!crtc)
return;
- if (max_ratio >= 8) {
- dto_phase = 192 * 1000;
- wallclock_ratio = 3;
- } else if (max_ratio >= 4) {
- dto_phase = 96 * 1000;
- wallclock_ratio = 2;
- } else if (max_ratio >= 2) {
- dto_phase = 48 * 1000;
- wallclock_ratio = 1;
- } else {
- dto_phase = 24 * 1000;
- wallclock_ratio = 0;
- }
+ radeon_encoder = to_radeon_encoder(crtc->encoder);
+ dig = radeon_encoder->enc_priv;
- /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
- * doesn't matter which one you use. Just use the first one.
- */
- /* XXX two dtos; generally use dto0 for hdmi */
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_encoder == 0) {
- dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
- dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
- WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
- WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
- WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
- } else {
- dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
- dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
- WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
- WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
- WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
- WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
- }
+ if (!dig)
+ return;
+
+ if (dig->dig_encoder == 0) {
+ WREG32(DCCG_AUDIO_DTO0_PHASE, 24000 * 100);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
} else {
- /* according to the reg specs, this should DCE3.2 only, but in
- * practice it seems to cover DCE2.0/3.0/3.1 as well.
- */
- if (dig->dig_encoder == 0) {
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
- } else {
- WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
- }
+ WREG32(DCCG_AUDIO_DTO1_PHASE, 24000 * 100);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
}
-/*
- * update the info frames with the data from the current display mode
- */
-void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+void r600_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- uint32_t offset;
- uint32_t acr_ctl;
- ssize_t err;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
- return;
- offset = dig->afmt->offset;
- /* disable audio prior to setting up hw */
- dig->afmt->pin = r600_audio_get_pin(rdev);
- r600_audio_enable(rdev, dig->afmt->pin, 0xf);
+ WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
+}
- r600_audio_set_dto(encoder, mode->clock);
+void r600_set_audio_packet(struct drm_encoder *encoder, u32 offset)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
- HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
- HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
- HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */
- ~(HDMI0_AUDIO_SAMPLE_SEND |
- HDMI0_AUDIO_DELAY_EN_MASK |
- HDMI0_AUDIO_PACKETS_PER_LINE_MASK |
- HDMI0_60958_CS_UPDATE));
-
- /* DCE 3.0 uses register that's normally for CRC_CONTROL */
- acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL :
- HDMI0_ACR_PACKET_CONTROL;
- WREG32_P(acr_ctl + offset,
- HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
- HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */
- ~(HDMI0_ACR_SOURCE |
- HDMI0_ACR_AUTO_SEND));
-
- WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset,
- HDMI0_NULL_SEND | /* send null packets when required */
- HDMI0_GC_SEND | /* send general control packets */
- HDMI0_GC_CONT); /* send general control packets every frame */
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */
+ ~(HDMI0_AUDIO_SAMPLE_SEND |
+ HDMI0_AUDIO_DELAY_EN_MASK |
+ HDMI0_AUDIO_PACKETS_PER_LINE_MASK |
+ HDMI0_60958_CS_UPDATE));
WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
- HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset,
- HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
- HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */
- ~(HDMI0_AVI_INFO_LINE_MASK |
- HDMI0_AUDIO_INFO_LINE_MASK));
-
- WREG32_AND(HDMI0_GC + offset,
- ~HDMI0_GC_AVMUTE); /* unset HDMI0_GC_AVMUTE */
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
-
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
-
- r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-
- /* fglrx duplicates INFOFRAME_CONTROL0 & INFOFRAME_CONTROL1 ops here */
+ HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */
+ ~HDMI0_AUDIO_INFO_LINE_MASK);
WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset,
- ~(HDMI0_GENERIC0_SEND |
- HDMI0_GENERIC0_CONT |
- HDMI0_GENERIC0_UPDATE |
- HDMI0_GENERIC1_SEND |
- HDMI0_GENERIC1_CONT |
- HDMI0_GENERIC0_LINE_MASK |
- HDMI0_GENERIC1_LINE_MASK));
-
- r600_hdmi_update_ACR(encoder, mode->clock);
+ ~(HDMI0_GENERIC0_SEND |
+ HDMI0_GENERIC0_CONT |
+ HDMI0_GENERIC0_UPDATE |
+ HDMI0_GENERIC1_SEND |
+ HDMI0_GENERIC1_CONT |
+ HDMI0_GENERIC0_LINE_MASK |
+ HDMI0_GENERIC1_LINE_MASK));
WREG32_P(HDMI0_60958_0 + offset,
- HDMI0_60958_CS_CHANNEL_NUMBER_L(1),
- ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK |
- HDMI0_60958_CS_CLOCK_ACCURACY_MASK));
+ HDMI0_60958_CS_CHANNEL_NUMBER_L(1),
+ ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK |
+ HDMI0_60958_CS_CLOCK_ACCURACY_MASK));
WREG32_P(HDMI0_60958_1 + offset,
- HDMI0_60958_CS_CHANNEL_NUMBER_R(2),
- ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK);
+ HDMI0_60958_CS_CHANNEL_NUMBER_R(2),
+ ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK);
+}
- /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
- WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
- WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
- WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
- WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+void r600_set_mute(struct drm_encoder *encoder, u32 offset, bool mute)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
- /* enable audio after to setting up hw */
- r600_audio_enable(rdev, dig->afmt->pin, 0xf);
+ if (mute)
+ WREG32_OR(HDMI0_GC + offset, HDMI0_GC_AVMUTE);
+ else
+ WREG32_AND(HDMI0_GC + offset, ~HDMI0_GC_AVMUTE);
}
/**
@@ -692,7 +483,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
return;
if (!enable && dig->afmt->pin) {
- r600_audio_enable(rdev, dig->afmt->pin, 0);
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
dig->afmt->pin = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 54529b837afa..5587603b4a89 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page
*/
struct radeon_dummy_page {
+ uint64_t entry;
struct page *page;
dma_addr_t addr;
};
@@ -645,7 +646,7 @@ struct radeon_gart {
unsigned num_cpu_pages;
unsigned table_size;
struct page **pages;
- dma_addr_t *pages_addr;
+ uint64_t *pages_entry;
bool ready;
};
@@ -1757,6 +1758,9 @@ struct r600_audio {
bool enabled;
struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
int num_pins;
+ struct radeon_audio_funcs *hdmi_funcs;
+ struct radeon_audio_funcs *dp_funcs;
+ struct radeon_audio_basic_funcs *funcs;
};
/*
@@ -1777,8 +1781,16 @@ void radeon_test_syncing(struct radeon_device *rdev);
/*
* MMU Notifier
*/
+#if defined(CONFIG_MMU_NOTIFIER)
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
void radeon_mn_unregister(struct radeon_bo *bo);
+#else
+static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline void radeon_mn_unregister(struct radeon_bo *bo) {}
+#endif
/*
* Debugfs
@@ -1847,8 +1859,9 @@ struct radeon_asic {
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
+ uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
@@ -1967,6 +1980,10 @@ struct radeon_asic {
bool (*vblank_too_short)(struct radeon_device *rdev);
void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
void (*enable_bapm)(struct radeon_device *rdev, bool enable);
+ void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode);
+ u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
+ int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
+ int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
} dpm;
/* pageflipping */
struct {
@@ -2852,7 +2869,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 850de57069be..c0ecd128b14b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -333,6 +337,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
.set_wptr = &r100_gfx_set_wptr,
};
+static struct radeon_asic_ring rv515_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &r100_gfx_get_rptr,
+ .get_wptr = &r100_gfx_get_wptr,
+ .set_wptr = &r100_gfx_set_wptr,
+};
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -345,6 +363,7 @@ static struct radeon_asic r300_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -411,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -477,6 +497,7 @@ static struct radeon_asic r420_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -543,6 +564,7 @@ static struct radeon_asic rs400_asic = {
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -609,6 +631,7 @@ static struct radeon_asic rs600_asic = {
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -624,8 +647,6 @@ static struct radeon_asic rs600_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &r600_hdmi_enable,
- .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r100_copy_blit,
@@ -677,6 +698,7 @@ static struct radeon_asic rs690_asic = {
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -692,8 +714,6 @@ static struct radeon_asic rs690_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &r600_hdmi_enable,
- .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r100_copy_blit,
@@ -745,10 +765,11 @@ static struct radeon_asic rv515_asic = {
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -811,10 +832,11 @@ static struct radeon_asic r520_asic = {
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -905,6 +927,7 @@ static struct radeon_asic r600_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -921,8 +944,6 @@ static struct radeon_asic r600_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &r600_hdmi_enable,
- .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -990,6 +1011,7 @@ static struct radeon_asic rv6xx_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1007,8 +1029,6 @@ static struct radeon_asic rv6xx_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &r600_hdmi_enable,
- .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1081,6 +1101,7 @@ static struct radeon_asic rs780_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1098,8 +1119,6 @@ static struct radeon_asic rs780_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &r600_hdmi_enable,
- .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1185,6 +1204,7 @@ static struct radeon_asic rv770_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1202,8 +1222,6 @@ static struct radeon_asic rv770_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &r600_hdmi_enable,
- .hdmi_setmode = &dce3_1_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1303,6 +1321,7 @@ static struct radeon_asic evergreen_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1320,8 +1339,6 @@ static struct radeon_asic evergreen_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &evergreen_hdmi_enable,
- .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1395,6 +1412,7 @@ static struct radeon_asic sumo_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1412,8 +1430,6 @@ static struct radeon_asic sumo_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &evergreen_hdmi_enable,
- .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1486,6 +1502,7 @@ static struct radeon_asic btc_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1503,8 +1520,6 @@ static struct radeon_asic btc_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &evergreen_hdmi_enable,
- .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1621,6 +1636,7 @@ static struct radeon_asic cayman_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1649,8 +1665,6 @@ static struct radeon_asic cayman_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &evergreen_hdmi_enable,
- .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1724,6 +1738,7 @@ static struct radeon_asic trinity_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1752,8 +1767,6 @@ static struct radeon_asic trinity_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &evergreen_hdmi_enable,
- .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1857,6 +1870,7 @@ static struct radeon_asic si_asic = {
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1885,8 +1899,6 @@ static struct radeon_asic si_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &evergreen_hdmi_enable,
- .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1939,6 +1951,10 @@ static struct radeon_asic si_asic = {
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
+ .fan_ctrl_set_mode = &si_fan_ctrl_set_mode,
+ .fan_ctrl_get_mode = &si_fan_ctrl_get_mode,
+ .get_fan_speed_percent = &si_fan_ctrl_get_fan_speed_percent,
+ .set_fan_speed_percent = &si_fan_ctrl_set_fan_speed_percent,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -2018,6 +2034,7 @@ static struct radeon_asic ci_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2048,8 +2065,6 @@ static struct radeon_asic ci_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &evergreen_hdmi_enable,
- .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &cik_copy_cpdma,
@@ -2104,6 +2119,10 @@ static struct radeon_asic ci_asic = {
.force_performance_level = &ci_dpm_force_performance_level,
.vblank_too_short = &ci_dpm_vblank_too_short,
.powergate_uvd = &ci_dpm_powergate_uvd,
+ .fan_ctrl_set_mode = &ci_fan_ctrl_set_mode,
+ .fan_ctrl_get_mode = &ci_fan_ctrl_get_mode,
+ .get_fan_speed_percent = &ci_fan_ctrl_get_fan_speed_percent,
+ .set_fan_speed_percent = &ci_fan_ctrl_set_fan_speed_percent,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -2125,6 +2144,7 @@ static struct radeon_asic kv_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2155,8 +2175,6 @@ static struct radeon_asic kv_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
- .hdmi_enable = &evergreen_hdmi_enable,
- .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &cik_copy_cpdma,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2a45d548d5ec..72bdd3bf0d8e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
@@ -390,7 +394,6 @@ void r600_irq_suspend(struct radeon_device *rdev);
void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
-int r600_audio_init(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
@@ -399,8 +402,6 @@ void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
-void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
-void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -469,8 +470,6 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
-/* hdmi */
-void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/* rv7xx pm */
int rv770_dpm_init(struct radeon_device *rdev);
int rv770_dpm_enable(struct radeon_device *rdev);
@@ -540,8 +539,6 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
-void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
-void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
int evergreen_get_temp(struct radeon_device *rdev);
int sumo_get_temp(struct radeon_device *rdev);
int tn_get_temp(struct radeon_device *rdev);
@@ -680,7 +677,6 @@ void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
-int dce6_audio_init(struct radeon_device *rdev);
void dce6_audio_fini(struct radeon_device *rdev);
/*
@@ -744,6 +740,12 @@ void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int si_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+ u32 *speed);
+int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+ u32 speed);
+u32 si_fan_ctrl_get_mode(struct radeon_device *rdev);
+void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode);
/* DCE8 - CIK */
void dce8_bandwidth_update(struct radeon_device *rdev);
@@ -861,6 +863,13 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+ u32 *speed);
+int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+ u32 speed);
+u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev);
+void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode);
+
int kv_dpm_init(struct radeon_device *rdev);
int kv_dpm_enable(struct radeon_device *rdev);
int kv_dpm_late_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index dbc94f300297..fc1b3f34cf18 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -3289,6 +3289,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
args.in.ulSCLKFreq =
cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
new file mode 100644
index 000000000000..a3ceef6d9632
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -0,0 +1,766 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Slava Grigorev <slava.grigorev@amd.com>
+ */
+
+#include <linux/gcd.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "radeon.h"
+#include "atom.h"
+#include "radeon_audio.h"
+
+void r600_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
+ u8 enable_mask);
+void dce4_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
+ u8 enable_mask);
+void dce6_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
+ u8 enable_mask);
+u32 dce6_endpoint_rreg(struct radeon_device *rdev, u32 offset, u32 reg);
+void dce6_endpoint_wreg(struct radeon_device *rdev,
+ u32 offset, u32 reg, u32 v);
+void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder,
+ struct cea_sad *sads, int sad_count);
+void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder,
+ struct cea_sad *sads, int sad_count);
+void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
+ struct cea_sad *sads, int sad_count);
+void dce3_2_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count);
+void dce3_2_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count);
+void dce4_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count);
+void dce4_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count);
+void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count);
+void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count);
+void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_connector *connector, struct drm_display_mode *mode);
+void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_connector *connector, struct drm_display_mode *mode);
+struct r600_audio_pin* r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin* dce6_audio_get_pin(struct radeon_device *rdev);
+void dce6_afmt_select_pin(struct drm_encoder *encoder);
+void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock);
+void dce3_2_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock);
+void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock);
+void dce4_dp_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock);
+void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock);
+void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock);
+void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
+ unsigned char *buffer, size_t size);
+void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
+ unsigned char *buffer, size_t size);
+void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+ const struct radeon_hdmi_acr *acr);
+void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+ const struct radeon_hdmi_acr *acr);
+void evergreen_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+ const struct radeon_hdmi_acr *acr);
+void r600_set_vbi_packet(struct drm_encoder *encoder, u32 offset);
+void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset);
+void dce4_hdmi_set_color_depth(struct drm_encoder *encoder,
+ u32 offset, int bpc);
+void r600_set_audio_packet(struct drm_encoder *encoder, u32 offset);
+void dce3_2_set_audio_packet(struct drm_encoder *encoder, u32 offset);
+void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset);
+void r600_set_mute(struct drm_encoder *encoder, u32 offset, bool mute);
+void dce3_2_set_mute(struct drm_encoder *encoder, u32 offset, bool mute);
+void dce4_set_mute(struct drm_encoder *encoder, u32 offset, bool mute);
+static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
+void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
+
+static const u32 pin_offsets[7] =
+{
+ (0x5e00 - 0x5e00),
+ (0x5e18 - 0x5e00),
+ (0x5e30 - 0x5e00),
+ (0x5e48 - 0x5e00),
+ (0x5e60 - 0x5e00),
+ (0x5e78 - 0x5e00),
+ (0x5e90 - 0x5e00),
+};
+
+static u32 radeon_audio_rreg(struct radeon_device *rdev, u32 offset, u32 reg)
+{
+ return RREG32(reg);
+}
+
+static void radeon_audio_wreg(struct radeon_device *rdev, u32 offset,
+ u32 reg, u32 v)
+{
+ WREG32(reg, v);
+}
+
+static struct radeon_audio_basic_funcs r600_funcs = {
+ .endpoint_rreg = radeon_audio_rreg,
+ .endpoint_wreg = radeon_audio_wreg,
+ .enable = r600_audio_enable,
+};
+
+static struct radeon_audio_basic_funcs dce32_funcs = {
+ .endpoint_rreg = radeon_audio_rreg,
+ .endpoint_wreg = radeon_audio_wreg,
+ .enable = r600_audio_enable,
+};
+
+static struct radeon_audio_basic_funcs dce4_funcs = {
+ .endpoint_rreg = radeon_audio_rreg,
+ .endpoint_wreg = radeon_audio_wreg,
+ .enable = dce4_audio_enable,
+};
+
+static struct radeon_audio_basic_funcs dce6_funcs = {
+ .endpoint_rreg = dce6_endpoint_rreg,
+ .endpoint_wreg = dce6_endpoint_wreg,
+ .enable = dce6_audio_enable,
+};
+
+static struct radeon_audio_funcs r600_hdmi_funcs = {
+ .get_pin = r600_audio_get_pin,
+ .set_dto = r600_hdmi_audio_set_dto,
+ .update_acr = r600_hdmi_update_acr,
+ .set_vbi_packet = r600_set_vbi_packet,
+ .set_avi_packet = r600_set_avi_packet,
+ .set_audio_packet = r600_set_audio_packet,
+ .set_mute = r600_set_mute,
+ .mode_set = radeon_audio_hdmi_mode_set,
+ .dpms = r600_hdmi_enable,
+};
+
+static struct radeon_audio_funcs dce32_hdmi_funcs = {
+ .get_pin = r600_audio_get_pin,
+ .write_sad_regs = dce3_2_afmt_write_sad_regs,
+ .write_speaker_allocation = dce3_2_afmt_hdmi_write_speaker_allocation,
+ .set_dto = dce3_2_audio_set_dto,
+ .update_acr = dce3_2_hdmi_update_acr,
+ .set_vbi_packet = r600_set_vbi_packet,
+ .set_avi_packet = r600_set_avi_packet,
+ .set_audio_packet = dce3_2_set_audio_packet,
+ .set_mute = dce3_2_set_mute,
+ .mode_set = radeon_audio_hdmi_mode_set,
+ .dpms = r600_hdmi_enable,
+};
+
+static struct radeon_audio_funcs dce32_dp_funcs = {
+ .get_pin = r600_audio_get_pin,
+ .write_sad_regs = dce3_2_afmt_write_sad_regs,
+ .write_speaker_allocation = dce3_2_afmt_dp_write_speaker_allocation,
+ .set_dto = dce3_2_audio_set_dto,
+ .set_avi_packet = r600_set_avi_packet,
+ .set_audio_packet = dce3_2_set_audio_packet,
+};
+
+static struct radeon_audio_funcs dce4_hdmi_funcs = {
+ .get_pin = r600_audio_get_pin,
+ .write_sad_regs = evergreen_hdmi_write_sad_regs,
+ .write_speaker_allocation = dce4_afmt_hdmi_write_speaker_allocation,
+ .write_latency_fields = dce4_afmt_write_latency_fields,
+ .set_dto = dce4_hdmi_audio_set_dto,
+ .update_acr = evergreen_hdmi_update_acr,
+ .set_vbi_packet = dce4_set_vbi_packet,
+ .set_color_depth = dce4_hdmi_set_color_depth,
+ .set_avi_packet = evergreen_set_avi_packet,
+ .set_audio_packet = dce4_set_audio_packet,
+ .set_mute = dce4_set_mute,
+ .mode_set = radeon_audio_hdmi_mode_set,
+ .dpms = evergreen_hdmi_enable,
+};
+
+static struct radeon_audio_funcs dce4_dp_funcs = {
+ .get_pin = r600_audio_get_pin,
+ .write_sad_regs = evergreen_hdmi_write_sad_regs,
+ .write_speaker_allocation = dce4_afmt_dp_write_speaker_allocation,
+ .write_latency_fields = dce4_afmt_write_latency_fields,
+ .set_dto = dce4_dp_audio_set_dto,
+ .set_avi_packet = evergreen_set_avi_packet,
+ .set_audio_packet = dce4_set_audio_packet,
+ .mode_set = radeon_audio_dp_mode_set,
+ .dpms = evergreen_enable_dp_audio_packets,
+};
+
+static struct radeon_audio_funcs dce6_hdmi_funcs = {
+ .select_pin = dce6_afmt_select_pin,
+ .get_pin = dce6_audio_get_pin,
+ .write_sad_regs = dce6_afmt_write_sad_regs,
+ .write_speaker_allocation = dce6_afmt_hdmi_write_speaker_allocation,
+ .write_latency_fields = dce6_afmt_write_latency_fields,
+ .set_dto = dce6_hdmi_audio_set_dto,
+ .update_acr = evergreen_hdmi_update_acr,
+ .set_vbi_packet = dce4_set_vbi_packet,
+ .set_color_depth = dce4_hdmi_set_color_depth,
+ .set_avi_packet = evergreen_set_avi_packet,
+ .set_audio_packet = dce4_set_audio_packet,
+ .set_mute = dce4_set_mute,
+ .mode_set = radeon_audio_hdmi_mode_set,
+ .dpms = evergreen_hdmi_enable,
+};
+
+static struct radeon_audio_funcs dce6_dp_funcs = {
+ .select_pin = dce6_afmt_select_pin,
+ .get_pin = dce6_audio_get_pin,
+ .write_sad_regs = dce6_afmt_write_sad_regs,
+ .write_speaker_allocation = dce6_afmt_dp_write_speaker_allocation,
+ .write_latency_fields = dce6_afmt_write_latency_fields,
+ .set_dto = dce6_dp_audio_set_dto,
+ .set_avi_packet = evergreen_set_avi_packet,
+ .set_audio_packet = dce4_set_audio_packet,
+ .mode_set = radeon_audio_dp_mode_set,
+ .dpms = dce6_enable_dp_audio_packets,
+};
+
+static void radeon_audio_interface_init(struct radeon_device *rdev)
+{
+ if (ASIC_IS_DCE6(rdev)) {
+ rdev->audio.funcs = &dce6_funcs;
+ rdev->audio.hdmi_funcs = &dce6_hdmi_funcs;
+ rdev->audio.dp_funcs = &dce6_dp_funcs;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ rdev->audio.funcs = &dce4_funcs;
+ rdev->audio.hdmi_funcs = &dce4_hdmi_funcs;
+ rdev->audio.dp_funcs = &dce4_dp_funcs;
+ } else if (ASIC_IS_DCE32(rdev)) {
+ rdev->audio.funcs = &dce32_funcs;
+ rdev->audio.hdmi_funcs = &dce32_hdmi_funcs;
+ rdev->audio.dp_funcs = &dce32_dp_funcs;
+ } else {
+ rdev->audio.funcs = &r600_funcs;
+ rdev->audio.hdmi_funcs = &r600_hdmi_funcs;
+ rdev->audio.dp_funcs = 0;
+ }
+}
+
+static int radeon_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
+}
+
+int radeon_audio_init(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!radeon_audio || !radeon_audio_chipset_supported(rdev))
+ return 0;
+
+ rdev->audio.enabled = true;
+
+ if (ASIC_IS_DCE83(rdev)) /* KB: 2 streams, 3 endpoints */
+ rdev->audio.num_pins = 3;
+ else if (ASIC_IS_DCE81(rdev)) /* KV: 4 streams, 7 endpoints */
+ rdev->audio.num_pins = 7;
+ else if (ASIC_IS_DCE8(rdev)) /* BN/HW: 6 streams, 7 endpoints */
+ rdev->audio.num_pins = 7;
+ else if (ASIC_IS_DCE64(rdev)) /* OL: 2 streams, 2 endpoints */
+ rdev->audio.num_pins = 2;
+ else if (ASIC_IS_DCE61(rdev)) /* TN: 4 streams, 6 endpoints */
+ rdev->audio.num_pins = 6;
+ else if (ASIC_IS_DCE6(rdev)) /* SI: 6 streams, 6 endpoints */
+ rdev->audio.num_pins = 6;
+ else
+ rdev->audio.num_pins = 1;
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ rdev->audio.pin[i].channels = -1;
+ rdev->audio.pin[i].rate = -1;
+ rdev->audio.pin[i].bits_per_sample = -1;
+ rdev->audio.pin[i].status_bits = 0;
+ rdev->audio.pin[i].category_code = 0;
+ rdev->audio.pin[i].connected = false;
+ rdev->audio.pin[i].offset = pin_offsets[i];
+ rdev->audio.pin[i].id = i;
+ }
+
+ radeon_audio_interface_init(rdev);
+
+ /* disable audio. it will be set up later */
+ for (i = 0; i < rdev->audio.num_pins; i++)
+ radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
+
+ return 0;
+}
+
+u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, u32 offset, u32 reg)
+{
+ if (rdev->audio.funcs->endpoint_rreg)
+ return rdev->audio.funcs->endpoint_rreg(rdev, offset, reg);
+
+ return 0;
+}
+
+void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
+ u32 reg, u32 v)
+{
+ if (rdev->audio.funcs->endpoint_wreg)
+ rdev->audio.funcs->endpoint_wreg(rdev, offset, reg, v);
+}
+
+static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int sad_count;
+
+ list_for_each_entry(connector,
+ &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
+ if (sad_count <= 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
+ radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
+
+ kfree(sads);
+}
+
+static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u8 *sadb = NULL;
+ int sad_count;
+
+ list_for_each_entry(connector,
+ &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(
+ radeon_connector_edid(connector), &sadb);
+ if (sad_count < 0) {
+ DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
+ sad_count);
+ sad_count = 0;
+ }
+
+ if (radeon_encoder->audio && radeon_encoder->audio->write_speaker_allocation)
+ radeon_encoder->audio->write_speaker_allocation(encoder, sadb, sad_count);
+
+ kfree(sadb);
+}
+
+static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct radeon_encoder *radeon_encoder;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = 0;
+
+ list_for_each_entry(connector,
+ &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
+ radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
+}
+
+struct r600_audio_pin* radeon_audio_get_pin(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->audio && radeon_encoder->audio->get_pin)
+ return radeon_encoder->audio->get_pin(rdev);
+
+ return NULL;
+}
+
+static void radeon_audio_select_pin(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->audio && radeon_encoder->audio->select_pin)
+ radeon_encoder->audio->select_pin(encoder);
+}
+
+void radeon_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin, u8 enable_mask)
+{
+ if (rdev->audio.funcs->enable)
+ rdev->audio.funcs->enable(rdev, pin, enable_mask);
+}
+
+void radeon_audio_detect(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct radeon_device *rdev;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
+
+ if (!connector || !connector->encoder)
+ return;
+
+ rdev = connector->encoder->dev->dev_private;
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ dig = radeon_encoder->enc_priv;
+
+ if (status == connector_status_connected) {
+ struct radeon_connector *radeon_connector;
+ int sink_type;
+
+ if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_encoder->audio = NULL;
+ return;
+ }
+
+ radeon_connector = to_radeon_connector(connector);
+ sink_type = radeon_dp_getsinktype(radeon_connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ radeon_encoder->audio = rdev->audio.dp_funcs;
+ else
+ radeon_encoder->audio = rdev->audio.hdmi_funcs;
+
+ radeon_audio_write_speaker_allocation(connector->encoder);
+ radeon_audio_write_sad_regs(connector->encoder);
+ if (connector->encoder->crtc)
+ radeon_audio_write_latency_fields(connector->encoder,
+ &connector->encoder->crtc->mode);
+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ } else {
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ }
+}
+
+void radeon_audio_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!rdev->audio.enabled)
+ return;
+
+ for (i = 0; i < rdev->audio.num_pins; i++)
+ radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
+
+ rdev->audio.enabled = false;
+}
+
+static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *crtc = to_radeon_crtc(encoder->crtc);
+
+ if (radeon_encoder->audio && radeon_encoder->audio->set_dto)
+ radeon_encoder->audio->set_dto(rdev, crtc, clock);
+}
+
+static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
+ int err;
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %d\n", err);
+ return err;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %d\n", err);
+ return err;
+ }
+
+ if (dig && dig->afmt &&
+ radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+ radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
+ buffer, sizeof(buffer));
+
+ return 0;
+}
+
+/*
+ * calculate CTS and N values if they are not found in the table
+ */
+static void radeon_audio_calc_cts(unsigned int clock, int *CTS, int *N, int freq)
+{
+ int n, cts;
+ unsigned long div, mul;
+
+ /* Safe, but overly large values */
+ n = 128 * freq;
+ cts = clock * 1000;
+
+ /* Smallest valid fraction */
+ div = gcd(n, cts);
+
+ n /= div;
+ cts /= div;
+
+ /*
+ * The optimal N is 128*freq/1000. Calculate the closest larger
+ * value that doesn't truncate any bits.
+ */
+ mul = ((128*freq/1000) + (n-1))/n;
+
+ n *= mul;
+ cts *= mul;
+
+ /* Check that we are in spec (not always possible) */
+ if (n < (128*freq/1500))
+ printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
+ if (n > (128*freq/300))
+ printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
+
+ *N = n;
+ *CTS = cts;
+
+ DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
+ *N, *CTS, freq);
+}
+
+static const struct radeon_hdmi_acr* radeon_audio_acr(unsigned int clock)
+{
+ static struct radeon_hdmi_acr res;
+ u8 i;
+
+ static const struct radeon_hdmi_acr hdmi_predefined_acr[] = {
+ /* 32kHz 44.1kHz 48kHz */
+ /* Clock N CTS N CTS N CTS */
+ { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
+ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
+ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
+ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
+ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
+ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
+ { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+ { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+ };
+
+ /* Precalculated values for common clocks */
+ for (i = 0; i < ARRAY_SIZE(hdmi_predefined_acr); i++)
+ if (hdmi_predefined_acr[i].clock == clock)
+ return &hdmi_predefined_acr[i];
+
+ /* And odd clocks get manually calculated */
+ radeon_audio_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+ radeon_audio_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+ radeon_audio_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
+
+ return &res;
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void radeon_audio_update_acr(struct drm_encoder *encoder, unsigned int clock)
+{
+ const struct radeon_hdmi_acr *acr = radeon_audio_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ if (radeon_encoder->audio && radeon_encoder->audio->update_acr)
+ radeon_encoder->audio->update_acr(encoder, dig->afmt->offset, acr);
+}
+
+static void radeon_audio_set_vbi_packet(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ if (radeon_encoder->audio && radeon_encoder->audio->set_vbi_packet)
+ radeon_encoder->audio->set_vbi_packet(encoder, dig->afmt->offset);
+}
+
+static void radeon_hdmi_set_color_depth(struct drm_encoder *encoder)
+{
+ int bpc = 8;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ bpc = radeon_crtc->bpc;
+ }
+
+ if (radeon_encoder->audio && radeon_encoder->audio->set_color_depth)
+ radeon_encoder->audio->set_color_depth(encoder, dig->afmt->offset, bpc);
+}
+
+static void radeon_audio_set_audio_packet(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ if (radeon_encoder->audio && radeon_encoder->audio->set_audio_packet)
+ radeon_encoder->audio->set_audio_packet(encoder, dig->afmt->offset);
+}
+
+static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ if (radeon_encoder->audio && radeon_encoder->audio->set_mute)
+ radeon_encoder->audio->set_mute(encoder, dig->afmt->offset, mute);
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = radeon_audio_get_pin(encoder);
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
+
+ radeon_audio_set_dto(encoder, mode->clock);
+ radeon_audio_set_vbi_packet(encoder);
+ radeon_hdmi_set_color_depth(encoder);
+ radeon_audio_set_mute(encoder, false);
+ radeon_audio_update_acr(encoder, mode->clock);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+
+ /* enable audio after to setting up hw */
+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+}
+
+static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = radeon_audio_get_pin(encoder);
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
+
+ radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+
+ /* enable audio after to setting up hw */
+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+}
+
+void radeon_audio_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->audio && radeon_encoder->audio->mode_set)
+ radeon_encoder->audio->mode_set(encoder, mode);
+}
+
+void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->audio && radeon_encoder->audio->dpms)
+ radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
new file mode 100644
index 000000000000..c92d059ab204
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Slava Grigorev <slava.grigorev@amd.com>
+ */
+
+#ifndef __RADEON_AUDIO_H__
+#define __RADEON_AUDIO_H__
+
+#include <linux/types.h>
+
+#define RREG32_ENDPOINT(block, reg) \
+ radeon_audio_endpoint_rreg(rdev, (block), (reg))
+#define WREG32_ENDPOINT(block, reg, v) \
+ radeon_audio_endpoint_wreg(rdev, (block), (reg), (v))
+
+struct radeon_audio_basic_funcs
+{
+ u32 (*endpoint_rreg)(struct radeon_device *rdev, u32 offset, u32 reg);
+ void (*endpoint_wreg)(struct radeon_device *rdev,
+ u32 offset, u32 reg, u32 v);
+ void (*enable)(struct radeon_device *rdev,
+ struct r600_audio_pin *pin, u8 enable_mask);
+};
+
+struct radeon_audio_funcs
+{
+ void (*select_pin)(struct drm_encoder *encoder);
+ struct r600_audio_pin* (*get_pin)(struct radeon_device *rdev);
+ void (*write_latency_fields)(struct drm_encoder *encoder,
+ struct drm_connector *connector, struct drm_display_mode *mode);
+ void (*write_sad_regs)(struct drm_encoder *encoder,
+ struct cea_sad *sads, int sad_count);
+ void (*write_speaker_allocation)(struct drm_encoder *encoder,
+ u8 *sadb, int sad_count);
+ void (*set_dto)(struct radeon_device *rdev,
+ struct radeon_crtc *crtc, unsigned int clock);
+ void (*update_acr)(struct drm_encoder *encoder, long offset,
+ const struct radeon_hdmi_acr *acr);
+ void (*set_vbi_packet)(struct drm_encoder *encoder, u32 offset);
+ void (*set_color_depth)(struct drm_encoder *encoder, u32 offset, int bpc);
+ void (*set_avi_packet)(struct radeon_device *rdev, u32 offset,
+ unsigned char *buffer, size_t size);
+ void (*set_audio_packet)(struct drm_encoder *encoder, u32 offset);
+ void (*set_mute)(struct drm_encoder *encoder, u32 offset, bool mute);
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+ void (*dpms)(struct drm_encoder *encoder, bool mode);
+};
+
+int radeon_audio_init(struct radeon_device *rdev);
+void radeon_audio_detect(struct drm_connector *connector,
+ enum drm_connector_status status);
+u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
+ u32 offset, u32 reg);
+void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
+ u32 offset, u32 reg, u32 v);
+struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder);
+void radeon_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin, u8 enable_mask);
+void radeon_audio_fini(struct radeon_device *rdev);
+void radeon_audio_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 9e7f23dd14bd..87d5fb21cb61 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -34,7 +34,8 @@
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
uint64_t saddr, uint64_t daddr,
- int flag, int n)
+ int flag, int n,
+ struct reservation_object *resv)
{
unsigned long start_jiffies;
unsigned long end_jiffies;
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
case RADEON_BENCHMARK_COPY_DMA:
fence = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ resv);
break;
case RADEON_BENCHMARK_COPY_BLIT:
fence = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ resv);
break;
default:
DRM_ERROR("Unknown copy method\n");
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.dma) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_DMA, n);
+ RADEON_BENCHMARK_COPY_DMA, n,
+ dobj->tbo.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.blit) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_BLIT, n);
+ RADEON_BENCHMARK_COPY_BLIT, n,
+ dobj->tbo.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 26baa9c05f6c..27def67cb6be 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -29,6 +29,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
+#include "radeon_audio.h"
#include "atom.h"
#include <linux/pm_runtime.h>
@@ -1332,6 +1333,9 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
+ if (radeon_audio != 0)
+ radeon_audio_detect(connector, ret);
+
exit:
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
@@ -1654,6 +1658,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
radeon_connector_update_scratch_regs(connector, ret);
+
+ if (radeon_audio != 0)
+ radeon_audio_detect(connector, ret);
+
out:
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ec65168f331..bd7519fdd3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
+ rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+ RADEON_GART_PAGE_DUMMY);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 102116902a07..913fafa597ad 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
pll->flags & RADEON_PLL_USE_REF_DIV)
ref_div_max = pll->reference_div;
+ else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
+ /* fix for problems on RS880 */
+ ref_div_max = min(pll->max_ref_div, 7u);
else
ref_div_max = pll->max_ref_div;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4f50fb0e3d93..5d684beb48d3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -88,9 +88,10 @@
* 2.39.0 - Add INFO query for number of active CUs
* 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
* CS to GPU on >= r600
+ * 2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 40
+#define KMS_DRIVER_MINOR 41
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 29b9220ec399..ea276ff6d174 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -390,18 +390,27 @@ int radeon_fbdev_init(struct radeon_device *rdev)
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
rdev->num_crtc,
RADEONFB_CONN_LIMIT);
- if (ret) {
- kfree(rfbdev);
- return ret;
- }
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(rdev->ddev);
- drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+ ret = drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&rfbdev->helper);
+free:
+ kfree(rfbdev);
+ return ret;
}
void radeon_fbdev_fini(struct radeon_device *rdev)
@@ -419,16 +428,6 @@ void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
}
-int radeon_fbdev_total_size(struct radeon_device *rdev)
-{
- struct radeon_bo *robj;
- int size = 0;
-
- robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
- size += radeon_bo_size(robj);
- return size;
-}
-
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84146d5901aa..5450fa95a47e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
+
+ if (!r) {
+ int i;
+
+ /* We might have dropped some GART table updates while it wasn't
+ * mapped, restore all entries
+ */
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
+
return r;
}
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
- u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL;
- rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
- page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base,
- RADEON_GART_PAGE_DUMMY);
+ radeon_gart_set_page(rdev, t,
+ rdev->dummy_page.entry);
}
- page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
{
unsigned t;
unsigned p;
- uint64_t page_base;
+ uint64_t page_base, page_entry;
int i, j;
if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
- if (rdev->gart.ptr) {
- page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base, flags);
- page_base += RADEON_GPU_PAGE_SIZE;
+ page_base = dma_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ page_entry = radeon_gart_get_page_entry(page_base, flags);
+ rdev->gart.pages_entry[t] = page_entry;
+ if (rdev->gart.ptr) {
+ radeon_gart_set_page(rdev, t, page_entry);
}
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
- rdev->gart.num_cpu_pages);
- if (rdev->gart.pages_addr == NULL) {
+ rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
+ rdev->gart.num_gpu_pages);
+ if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
- for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
- rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
- }
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
return 0;
}
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
- if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+ if (rdev->gart.ready) {
/* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
- vfree(rdev->gart.pages_addr);
+ vfree(rdev->gart.pages_entry);
rdev->gart.pages = NULL;
- rdev->gart.pages_addr = NULL;
+ rdev->gart.pages_entry = NULL;
radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a46f73737994..ac3c1310b953 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
struct radeon_bo_va *bo_va;
int r;
- if (rdev->family < CHIP_CAYMAN) {
+ if ((rdev->family < CHIP_CAYMAN) ||
+ (!rdev->accel_working)) {
return 0;
}
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_bo_va *bo_va;
int r;
- if (rdev->family < CHIP_CAYMAN) {
+ if ((rdev->family < CHIP_CAYMAN) ||
+ (!rdev->accel_working)) {
return;
}
@@ -576,7 +578,7 @@ error_unreserve:
error_free:
drm_free_large(vm_bos);
- if (r)
+ if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index add622008407..9590bcd321c0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1048,11 +1048,6 @@ struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
return NULL;
}
-struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
-{
- return NULL;
-}
-
void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 8bf87f1203cc..061eaa9c19c7 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -30,22 +30,22 @@
#include "radeon_kfd.h"
#include "radeon_ucode.h"
#include <linux/firmware.h>
+#include "cik_structs.h"
#define CIK_PIPE_PER_MEC (4)
struct kgd_mem {
- struct radeon_sa_bo *sa_bo;
+ struct radeon_bo *bo;
uint64_t gpu_addr;
- void *ptr;
+ void *cpu_ptr;
};
-static int init_sa_manager(struct kgd_dev *kgd, unsigned int size);
-static void fini_sa_manager(struct kgd_dev *kgd);
-static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
- enum kgd_memory_pool pool, struct kgd_mem **mem);
+static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr);
-static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem);
+static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
static uint64_t get_vmem_size(struct kgd_dev *kgd);
static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
@@ -64,36 +64,37 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
unsigned int vmid);
-static int kgd_init_memory(struct kgd_dev *kgd);
-
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
-
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout);
static const struct kfd2kgd_calls kfd2kgd = {
- .init_sa_manager = init_sa_manager,
- .fini_sa_manager = fini_sa_manager,
- .allocate_mem = allocate_mem,
- .free_mem = free_mem,
+ .init_gtt_mem_allocation = alloc_gtt_mem,
+ .free_gtt_mem = free_gtt_mem,
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_memory = kgd_init_memory,
.init_pipeline = kgd_init_pipeline,
.hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
.get_fw_version = get_fw_version
};
@@ -194,87 +195,78 @@ int radeon_kfd_resume(struct radeon_device *rdev)
return r;
}
-static u32 pool_to_domain(enum kgd_memory_pool p)
-{
- switch (p) {
- case KGD_POOL_FRAMEBUFFER: return RADEON_GEM_DOMAIN_VRAM;
- default: return RADEON_GEM_DOMAIN_GTT;
- }
-}
-
-static int init_sa_manager(struct kgd_dev *kgd, unsigned int size)
+static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr)
{
struct radeon_device *rdev = (struct radeon_device *)kgd;
+ struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
int r;
BUG_ON(kgd == NULL);
+ BUG_ON(gpu_addr == NULL);
+ BUG_ON(cpu_ptr == NULL);
- r = radeon_sa_bo_manager_init(rdev, &rdev->kfd_bo,
- size,
- RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_GTT,
- RADEON_GEM_GTT_WC);
+ *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if ((*mem) == NULL)
+ return -ENOMEM;
- if (r)
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_GTT_WC, NULL, NULL, &(*mem)->bo);
+ if (r) {
+ dev_err(rdev->dev,
+ "failed to allocate BO for amdkfd (%d)\n", r);
return r;
+ }
- r = radeon_sa_bo_manager_start(rdev, &rdev->kfd_bo);
- if (r)
- radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
-
- return r;
-}
-
-static void fini_sa_manager(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
-
- BUG_ON(kgd == NULL);
-
- radeon_sa_bo_manager_suspend(rdev, &rdev->kfd_bo);
- radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
-}
-
-static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
- enum kgd_memory_pool pool, struct kgd_mem **mem)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
- u32 domain;
- int r;
-
- BUG_ON(kgd == NULL);
-
- domain = pool_to_domain(pool);
- if (domain != RADEON_GEM_DOMAIN_GTT) {
- dev_err(rdev->dev,
- "Only allowed to allocate gart memory for kfd\n");
- return -EINVAL;
+ /* map the buffer */
+ r = radeon_bo_reserve((*mem)->bo, true);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+ goto allocate_mem_reserve_bo_failed;
}
- *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if ((*mem) == NULL)
- return -ENOMEM;
+ r = radeon_bo_pin((*mem)->bo, RADEON_GEM_DOMAIN_GTT,
+ &(*mem)->gpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+ goto allocate_mem_pin_bo_failed;
+ }
+ *gpu_addr = (*mem)->gpu_addr;
- r = radeon_sa_bo_new(rdev, &rdev->kfd_bo, &(*mem)->sa_bo, size,
- alignment);
+ r = radeon_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
if (r) {
- dev_err(rdev->dev, "failed to get memory for kfd (%d)\n", r);
- return r;
+ dev_err(rdev->dev,
+ "(%d) failed to map bo to kernel for amdkfd\n", r);
+ goto allocate_mem_kmap_bo_failed;
}
+ *cpu_ptr = (*mem)->cpu_ptr;
- (*mem)->ptr = radeon_sa_bo_cpu_addr((*mem)->sa_bo);
- (*mem)->gpu_addr = radeon_sa_bo_gpu_addr((*mem)->sa_bo);
+ radeon_bo_unreserve((*mem)->bo);
return 0;
+
+allocate_mem_kmap_bo_failed:
+ radeon_bo_unpin((*mem)->bo);
+allocate_mem_pin_bo_failed:
+ radeon_bo_unreserve((*mem)->bo);
+allocate_mem_reserve_bo_failed:
+ radeon_bo_unref(&(*mem)->bo);
+
+ return r;
}
-static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem)
+static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
+ struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
- BUG_ON(kgd == NULL);
+ BUG_ON(mem == NULL);
- radeon_sa_bo_free(rdev, &mem->sa_bo, NULL);
+ radeon_bo_reserve(mem->bo, true);
+ radeon_bo_kunmap(mem->bo);
+ radeon_bo_unpin(mem->bo);
+ radeon_bo_unreserve(mem->bo);
+ radeon_bo_unref(&(mem->bo));
kfree(mem);
}
@@ -397,46 +389,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
return 0;
}
-static int kgd_init_memory(struct kgd_dev *kgd)
-{
- /*
- * Configure apertures:
- * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
- * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
- * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
- */
- int i;
- uint32_t sh_mem_bases = PRIVATE_BASE(0x6000) | SHARED_BASE(0x6000);
-
- for (i = 8; i < 16; i++) {
- uint32_t sh_mem_config;
-
- lock_srbm(kgd, 0, 0, 0, i);
-
- sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
-
- write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
-
- write_register(kgd, SH_MEM_BASES, sh_mem_bases);
-
- /* Scratch aperture is not supported for now. */
- write_register(kgd, SH_STATIC_MEM_CONFIG, 0);
-
- /* APE1 disabled for now. */
- write_register(kgd, SH_MEM_APE1_BASE, 1);
- write_register(kgd, SH_MEM_APE1_LIMIT, 0);
-
- unlock_srbm(kgd);
- }
-
- return 0;
-}
-
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr)
{
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
lock_srbm(kgd, mec, pipe, 0, 0);
@@ -451,11 +407,28 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
return 0;
}
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+ uint32_t retval;
+
+ retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+ m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+
+ pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+ return retval;
+}
+
static inline struct cik_mqd *get_mqd(void *mqd)
{
return (struct cik_mqd *)mqd;
}
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ return (struct cik_sdma_rlc_registers *)mqd;
+}
+
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr)
{
@@ -533,6 +506,45 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_BASE,
+ m->sdma_rlc_rb_base);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_BASE_HI,
+ m->sdma_rlc_rb_base_hi);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdma_rlc_rb_rptr_addr_lo);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdma_rlc_rb_rptr_addr_hi);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_DOORBELL,
+ m->sdma_rlc_doorbell);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl);
+
+ return 0;
+}
+
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id)
{
@@ -554,6 +566,24 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
return retval;
}
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ sdma_rlc_rb_cntl = read_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA_RB_ENABLE)
+ return true;
+
+ return false;
+}
+
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
@@ -583,6 +613,39 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
return 0;
}
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout)
+{
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t temp;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ temp = read_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA_RB_ENABLE;
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = read_register(kgd, sdma_base_addr +
+ SDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA_RLC_IDLE)
+ break;
+ if (timeout == 0)
+ return -ETIME;
+ msleep(20);
+ timeout -= 20;
+ }
+
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_DOORBELL, 0);
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_RPTR, 0);
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_WPTR, 0);
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_BASE, 0);
+
+ return 0;
+}
+
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct radeon_device *rdev = (struct radeon_device *) kgd;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/radeon/radeon_kfd.h
index f90e161ca507..1103f9082f6b 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.h
+++ b/drivers/gpu/drm/radeon/radeon_kfd.h
@@ -29,7 +29,7 @@
#define RADEON_KFD_H_INCLUDED
#include <linux/types.h>
-#include "../amd/include/kgd_kfd_interface.h"
+#include "kgd_kfd_interface.h"
struct radeon_device;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3cf9c1fa6475..686411e4e4f6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- vm = &fpriv->vm;
- r = radeon_vm_init(rdev, vm);
- if (r) {
- kfree(fpriv);
- return r;
- }
-
if (rdev->accel_working) {
+ vm = &fpriv->vm;
+ r = radeon_vm_init(rdev, vm);
+ if (r) {
+ kfree(fpriv);
+ return r;
+ }
+
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
radeon_vm_fini(rdev, vm);
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
+ radeon_vm_fini(rdev, vm);
}
- radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 390db897f322..920a8be8abad 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -449,6 +449,7 @@ struct radeon_encoder {
int audio_polling_active;
bool is_ext_encoder;
u16 caps;
+ struct radeon_audio_funcs *audio;
};
struct radeon_connector_atom_dig {
@@ -745,8 +746,6 @@ extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connec
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
-extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
-
extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id);
@@ -925,7 +924,6 @@ void dce8_program_fmt(struct drm_encoder *encoder);
int radeon_fbdev_init(struct radeon_device *rdev);
void radeon_fbdev_fini(struct radeon_device *rdev);
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
-int radeon_fbdev_total_size(struct radeon_device *rdev);
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 86fc56434b28..43e09942823e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -238,6 +238,18 @@ int radeon_bo_create(struct radeon_device *rdev,
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
*/
bo->flags &= ~RADEON_GEM_GTT_WC;
+#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
+ /* Don't try to enable write-combining when it can't work, or things
+ * may be slow
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
+ */
+
+#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
+ thanks to write-combining
+
+ DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ "better performance thanks to write-combining\n");
+ bo->flags &= ~RADEON_GEM_GTT_WC;
#endif
radeon_ttm_placement_from_domain(bo, domain);
@@ -576,12 +588,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
return 0;
}
-int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
- struct vm_area_struct *vma)
-{
- return ttm_fbdev_mmap(vma, &bo->tbo);
-}
-
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{
struct radeon_device *rdev = bo->rdev;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 3b0b377f76cb..d8d295ee7c12 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -143,8 +143,6 @@ extern void radeon_bo_fini(struct radeon_device *rdev);
extern int radeon_bo_list_validate(struct radeon_device *rdev,
struct ww_acquire_ctx *ticket,
struct list_head *head, int ring);
-extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
- struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
u32 tiling_flags, u32 pitch);
extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 32522cc940a1..9f758d39420d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -24,6 +24,7 @@
#include "radeon.h"
#include "avivod.h"
#include "atom.h"
+#include "r600_dpm.h"
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -554,6 +555,100 @@ fail:
return count;
}
+static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct radeon_device *rdev = dev_get_drvdata(dev);
+ u32 pwm_mode = 0;
+
+ if (rdev->asic->dpm.fan_ctrl_get_mode)
+ pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
+
+ /* never 0 (full-speed), fuse or smc-controlled always */
+ return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
+}
+
+static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct radeon_device *rdev = dev_get_drvdata(dev);
+ int err;
+ int value;
+
+ if(!rdev->asic->dpm.fan_ctrl_set_mode)
+ return -EINVAL;
+
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
+
+ switch (value) {
+ case 1: /* manual, percent-based */
+ rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
+ break;
+ default: /* disable */
+ rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
+ break;
+ }
+
+ return count;
+}
+
+static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%i\n", 0);
+}
+
+static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%i\n", 255);
+}
+
+static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct radeon_device *rdev = dev_get_drvdata(dev);
+ int err;
+ u32 value;
+
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
+ value = (value * 100) / 255;
+
+ err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct radeon_device *rdev = dev_get_drvdata(dev);
+ int err;
+ u32 speed;
+
+ err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
+ if (err)
+ return err;
+
+ speed = (speed * 255) / 100;
+
+ return sprintf(buf, "%i\n", speed);
+}
+
static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
@@ -601,11 +696,20 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
+static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
+
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm1_min.dev_attr.attr,
+ &sensor_dev_attr_pwm1_max.dev_attr.attr,
NULL
};
@@ -614,6 +718,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct radeon_device *rdev = dev_get_drvdata(dev);
+ umode_t effective_mode = attr->mode;
/* Skip limit attributes if DPM is not enabled */
if (rdev->pm.pm_method != PM_METHOD_DPM &&
@@ -621,7 +726,35 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
- return attr->mode;
+ /* Skip fan attributes if fan is not present */
+ if (rdev->pm.no_fan &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
+
+ /* mask fan attributes if we have no bindings for this asic to expose */
+ if ((!rdev->asic->dpm.get_fan_speed_percent &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+ (!rdev->asic->dpm.fan_ctrl_get_mode &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+ effective_mode &= ~S_IRUGO;
+
+ if ((!rdev->asic->dpm.set_fan_speed_percent &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+ (!rdev->asic->dpm.fan_ctrl_set_mode &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+ effective_mode &= ~S_IWUSR;
+
+ /* hide max/min values if we can't both query and manage the fan */
+ if ((!rdev->asic->dpm.set_fan_speed_percent &&
+ !rdev->asic->dpm.get_fan_speed_percent) &&
+ (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
+
+ return effective_mode;
}
static const struct attribute_group hwmon_attrgroup = {
@@ -1287,8 +1420,39 @@ dpm_failed:
return ret;
}
+struct radeon_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+};
+
+/* cards with dpm stability problems */
+static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
+ /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
+ { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
+ /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
+ { 0, 0, 0, 0 },
+};
+
int radeon_pm_init(struct radeon_device *rdev)
{
+ struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
+ bool disable_dpm = false;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ disable_dpm = true;
+ break;
+ }
+ ++p;
+ }
+
/* enable dpm on rv6xx+ */
switch (rdev->family) {
case CHIP_RV610:
@@ -1344,6 +1508,8 @@ int radeon_pm_init(struct radeon_device *rdev)
(!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (disable_dpm && (radeon_dpm == -1))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
else if (radeon_dpm == 0)
rdev->pm.pm_method = PM_METHOD_PROFILE;
else
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 07b506b41008..791818165c76 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ vram_obj->tbo.resv);
else
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ vram_obj->tbo.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
r = PTR_ERR(fence);
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ vram_obj->tbo.resv);
else
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ vram_obj->tbo.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = PTR_ERR(fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c42b30a..2a5a4a9e772d 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
uint64_t result;
/* page table offset */
- result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+ result &= ~RADEON_GPU_PAGE_MASK;
return result;
}
@@ -745,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
*/
/* NI is optimized for 256KB fragments, SI and newer for 64KB */
- uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
+ uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
+ (rdev->family == CHIP_ARUBA)) ?
R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
- uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
+ uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
+ (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
uint64_t frag_start = ALIGN(pe_start, frag_align);
uint64_t frag_end = pe_end & ~(frag_align - 1);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c5799f16aa4b..34e3235f41d2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
uint32_t entry;
- u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
- entry = cpu_to_le32(entry);
- gtt[i] = entry;
+ return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ u32 *gtt = rdev->gart.ptr;
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9acb1c3c005b..d81182ad53ec 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -38,6 +38,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "atom.h"
#include "rs600d.h"
@@ -625,11 +626,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = (void *)rdev->gart.ptr;
-
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_SYSTEM;
if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +638,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R600_PTE_WRITEABLE;
if (flags & RADEON_GART_PAGE_SNOOP)
addr |= R600_PTE_SNOOPED;
- writeq(addr, ptr + (i * 8));
+ return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = (void *)rdev->gart.ptr;
+ writeq(entry, ptr + (i * 8));
}
int rs600_irq_set(struct radeon_device *rdev)
@@ -1012,7 +1017,7 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_audio_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing audio\n");
return r;
@@ -1053,7 +1058,7 @@ int rs600_resume(struct radeon_device *rdev)
int rs600_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -1064,7 +1069,7 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0a2d36e81108..516ca27cfa12 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "atom.h"
#include "rs690d.h"
@@ -729,7 +730,7 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_audio_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing audio\n");
return r;
@@ -770,7 +771,7 @@ int rs690_resume(struct radeon_device *rdev)
int rs690_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -781,7 +782,7 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 372016e266d0..01ee96acb398 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include <drm/radeon_drm.h>
#include "rv770d.h"
#include "atom.h"
@@ -1788,7 +1789,7 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
@@ -1829,7 +1830,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
- r600_audio_fini(rdev);
+ radeon_audio_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 755a8f96fe46..306732641b23 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -231,6 +231,7 @@ u8 rv770_get_seq_value(struct radeon_device *rdev,
MC_CG_SEQ_DRAMCONF_S0 : MC_CG_SEQ_DRAMCONF_S1;
}
+#if 0
int rv770_read_smc_soft_register(struct radeon_device *rdev,
u16 reg_offset, u32 *value)
{
@@ -240,6 +241,7 @@ int rv770_read_smc_soft_register(struct radeon_device *rdev,
pi->soft_regs_start + reg_offset,
value, pi->sram_end);
}
+#endif
int rv770_write_smc_soft_register(struct radeon_device *rdev,
u16 reg_offset, u32 value)
@@ -2075,6 +2077,7 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
return 0;
}
+#if 0
void rv770_dpm_reset_asic(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -2087,6 +2090,7 @@ void rv770_dpm_reset_asic(struct radeon_device *rdev)
if (pi->dcodt)
rv770_program_dcodt_after_state_switch(rdev, boot_ps, boot_ps);
}
+#endif
void rv770_dpm_setup_asic(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index f776634840c9..d12beab7f3e6 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -278,8 +278,6 @@ void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
void rv770_get_engine_memory_ss(struct radeon_device *rdev);
/* smc */
-int rv770_read_smc_soft_register(struct radeon_device *rdev,
- u16 reg_offset, u32 *value);
int rv770_write_smc_soft_register(struct radeon_device *rdev,
u16 reg_offset, u32 value);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 60df444bd075..73107fe9e46f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -27,6 +27,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_audio.h"
#include <drm/radeon_drm.h>
#include "sid.h"
#include "atom.h"
@@ -5057,6 +5058,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
@@ -6859,7 +6870,7 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
- r = dce6_audio_init(rdev);
+ r = radeon_audio_init(rdev);
if (r)
return r;
@@ -6898,7 +6909,7 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
- dce6_audio_fini(rdev);
+ radeon_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index f5cc777e1c5f..83207929fc62 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -206,6 +205,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
+ radeon_ring_write(ring, 0xff << 16); /* retry */
+ radeon_ring_write(ring, 1 << vm_id); /* mask */
+ radeon_ring_write(ring, 0); /* value */
+ radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
/**
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 32e354b8b0ab..7be11651b7e6 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1756,6 +1756,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk);
+static void si_thermal_start_smc_fan_control(struct radeon_device *rdev);
+static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev);
+
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -2908,6 +2911,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
return ret;
}
+struct si_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 max_sclk;
+ u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { 0, 0, 0, 0 },
+};
+
static void si_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@@ -2918,7 +2937,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
u32 mclk, sclk;
u16 vddc, vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+ u32 max_sclk = 0, max_mclk = 0;
int i;
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ max_sclk = p->max_sclk;
+ max_mclk = p->max_mclk;
+ break;
+ }
+ ++p;
+ }
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
@@ -2972,6 +3006,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
+ if (max_mclk) {
+ if (ps->performance_levels[i].mclk > max_mclk)
+ ps->performance_levels[i].mclk = max_mclk;
+ }
+ if (max_sclk) {
+ if (ps->performance_levels[i].sclk > max_sclk)
+ ps->performance_levels[i].sclk = max_sclk;
+ }
}
/* XXX validate the min clocks required for display */
@@ -3320,11 +3362,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
return 0;
}
+#if 0
static int si_set_boot_state(struct radeon_device *rdev)
{
return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
+#endif
static int si_set_sw_state(struct radeon_device *rdev)
{
@@ -5934,6 +5978,10 @@ static int si_thermal_setup_fan_table(struct radeon_device *rdev)
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+ fan_table.temp_min = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
+ fan_table.temp_med = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
+ fan_table.temp_max = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
+
fan_table.slope1 = cpu_to_be16(slope1);
fan_table.slope2 = cpu_to_be16(slope2);
@@ -5973,29 +6021,35 @@ static int si_thermal_setup_fan_table(struct radeon_device *rdev)
static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
{
+ struct si_power_info *si_pi = si_get_pi(rdev);
PPSMC_Result ret;
ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl);
- if (ret == PPSMC_Result_OK)
+ if (ret == PPSMC_Result_OK) {
+ si_pi->fan_is_controlled_by_smc = true;
return 0;
- else
+ } else {
return -EINVAL;
+ }
}
static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
{
+ struct si_power_info *si_pi = si_get_pi(rdev);
PPSMC_Result ret;
ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl);
- if (ret == PPSMC_Result_OK)
+
+ if (ret == PPSMC_Result_OK) {
+ si_pi->fan_is_controlled_by_smc = false;
return 0;
- else
+ } else {
return -EINVAL;
+ }
}
-#if 0
-static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
- u32 *speed)
+int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+ u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
@@ -6019,9 +6073,10 @@ static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
return 0;
}
-static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
- u32 speed)
+int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+ u32 speed)
{
+ struct si_power_info *si_pi = si_get_pi(rdev);
u32 tmp;
u32 duty, duty100;
u64 tmp64;
@@ -6029,11 +6084,11 @@ static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
if (rdev->pm.no_fan)
return -ENOENT;
- if (speed > 100)
+ if (si_pi->fan_is_controlled_by_smc)
return -EINVAL;
- if (rdev->pm.dpm.fan.ucode_fan_control)
- si_fan_ctrl_stop_smc_fan_control(rdev);
+ if (speed > 100)
+ return -EINVAL;
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
@@ -6048,11 +6103,38 @@ static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
tmp |= FDO_STATIC_DUTY(duty);
WREG32(CG_FDO_CTRL0, tmp);
- si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
-
return 0;
}
+void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
+{
+ if (mode) {
+ /* stop auto-manage */
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(rdev);
+ si_fan_ctrl_set_static_mode(rdev, mode);
+ } else {
+ /* restart auto-manage */
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ si_thermal_start_smc_fan_control(rdev);
+ else
+ si_fan_ctrl_set_default_mode(rdev);
+ }
+}
+
+u32 si_fan_ctrl_get_mode(struct radeon_device *rdev)
+{
+ struct si_power_info *si_pi = si_get_pi(rdev);
+ u32 tmp;
+
+ if (si_pi->fan_is_controlled_by_smc)
+ return 0;
+
+ tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+ return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
u32 *speed)
{
@@ -6499,13 +6581,14 @@ void si_dpm_post_set_power_state(struct radeon_device *rdev)
ni_update_current_ps(rdev, new_ps);
}
-
+#if 0
void si_dpm_reset_asic(struct radeon_device *rdev)
{
si_restrict_performance_levels_before_switch(rdev);
si_disable_ulv(rdev);
si_set_boot_state(rdev);
}
+#endif
void si_dpm_display_configuration_changed(struct radeon_device *rdev)
{
@@ -6873,7 +6956,6 @@ int si_dpm_init(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
si_pi->fan_ctrl_is_in_default_mode = true;
- rdev->pm.dpm.fan.ucode_fan_control = false;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index d16bb1b5f10f..1032a68be792 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -202,6 +202,7 @@ struct si_power_info {
bool fan_ctrl_is_in_default_mode;
u32 t_min;
u32 fan_ctrl_default_mode;
+ bool fan_is_controlled_by_smc;
};
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 4069be89e585..cbd91d226f3c 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -901,6 +901,16 @@
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE 0x05ac
+# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE 0x05b0
+#define DCCG_AUDIO_DTO0_MODULE 0x05b4
+#define DCCG_AUDIO_DTO1_PHASE 0x05b8
+#define DCCG_AUDIO_DTO1_MODULE 0x05bc
+
#define AFMT_AUDIO_SRC_CONTROL 0x713c
#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
/* AFMT_AUDIO_SRC_SELECT
@@ -1632,6 +1642,23 @@
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
#define PACKET3_CP_DMA 0x41
@@ -1835,6 +1862,7 @@
#define DMA_PACKET_TRAP 0x7
#define DMA_PACKET_SRBM_WRITE 0x9
#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
#define VCE_STATUS 0x20004
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 1f8a8833e1be..25fd4ced36c8 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1338,6 +1338,7 @@ void sumo_dpm_post_set_power_state(struct radeon_device *rdev)
sumo_update_current_ps(rdev, new_ps);
}
+#if 0
void sumo_dpm_reset_asic(struct radeon_device *rdev)
{
sumo_program_bootup_state(rdev);
@@ -1349,6 +1350,7 @@ void sumo_dpm_reset_asic(struct radeon_device *rdev)
sumo_set_forced_mode_enabled(rdev);
sumo_set_forced_mode_disabled(rdev);
}
+#endif
void sumo_dpm_setup_asic(struct radeon_device *rdev)
{
@@ -1537,6 +1539,7 @@ u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
}
+#if 0
u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
struct sumo_vid_mapping_table *vid_mapping_table,
u32 vid_7bit)
@@ -1550,6 +1553,7 @@ u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
}
+#endif
static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
u32 vid_2bit)
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
index db1ea32a907b..07dda299c784 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.h
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -202,9 +202,6 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
struct sumo_vid_mapping_table *vid_mapping_table,
u32 vid_2bit);
-u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
- struct sumo_vid_mapping_table *vid_mapping_table,
- u32 vid_7bit);
u32 sumo_get_sleep_divider_from_id(u32 id);
u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
u32 sclk,
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index b4ec5c4e7969..38dacb7a3689 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1269,6 +1269,7 @@ void trinity_dpm_setup_asic(struct radeon_device *rdev)
trinity_release_mutex(rdev);
}
+#if 0
void trinity_dpm_reset_asic(struct radeon_device *rdev)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
@@ -1284,6 +1285,7 @@ void trinity_dpm_reset_asic(struct radeon_device *rdev)
}
trinity_release_mutex(rdev);
}
+#endif
static u16 trinity_convert_voltage_index_to_value(struct radeon_device *rdev,
u32 vid_2bit)
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 2324a526de65..11485a4a16ae 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,6 +1,6 @@
config DRM_RCAR_DU
tristate "DRM Support for R-Car Display Unit"
- depends on DRM && ARM
+ depends on DRM && ARM && HAVE_DMA_ATTRS
depends on ARCH_SHMOBILE || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 23cc910951f4..25c7a998fc2c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -74,39 +74,77 @@ static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
if (ret < 0)
return ret;
+ ret = clk_prepare_enable(rcrtc->extclock);
+ if (ret < 0)
+ goto error_clock;
+
ret = rcar_du_group_get(rcrtc->group);
if (ret < 0)
- clk_disable_unprepare(rcrtc->clock);
+ goto error_group;
+
+ return 0;
+error_group:
+ clk_disable_unprepare(rcrtc->extclock);
+error_clock:
+ clk_disable_unprepare(rcrtc->clock);
return ret;
}
static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
{
rcar_du_group_put(rcrtc->group);
+
+ clk_disable_unprepare(rcrtc->extclock);
clk_disable_unprepare(rcrtc->clock);
}
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+ unsigned long mode_clock = mode->clock * 1000;
unsigned long clk;
u32 value;
+ u32 escr;
u32 div;
- /* Dot clock */
+ /* Compute the clock divisor and select the internal or external dot
+ * clock based on the requested frequency.
+ */
clk = clk_get_rate(rcrtc->clock);
- div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
+ div = DIV_ROUND_CLOSEST(clk, mode_clock);
div = clamp(div, 1U, 64U) - 1;
+ escr = div | ESCR_DCLKSEL_CLKS;
+
+ if (rcrtc->extclock) {
+ unsigned long extclk;
+ unsigned long extrate;
+ unsigned long rate;
+ u32 extdiv;
+
+ extclk = clk_get_rate(rcrtc->extclock);
+ extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock);
+ extdiv = clamp(extdiv, 1U, 64U) - 1;
+
+ rate = clk / (div + 1);
+ extrate = extclk / (extdiv + 1);
+
+ if (abs((long)extrate - (long)mode_clock) <
+ abs((long)rate - (long)mode_clock)) {
+ dev_dbg(rcrtc->group->dev->dev,
+ "crtc%u: using external clock\n", rcrtc->index);
+ escr = extdiv | ESCR_DCLKSEL_DCLKIN;
+ }
+ }
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
- ESCR_DCLKSEL_CLKS | div);
+ escr);
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
- | DSMR_DIPM_DE;
+ | DSMR_DIPM_DE | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
/* Display timings */
@@ -117,12 +155,15 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
- rcar_du_crtc_write(rcrtc, VDSR, mode->vtotal - mode->vsync_end - 2);
- rcar_du_crtc_write(rcrtc, VDER, mode->vtotal - mode->vsync_end +
- mode->vdisplay - 2);
- rcar_du_crtc_write(rcrtc, VSPR, mode->vtotal - mode->vsync_end +
- mode->vsync_start - 1);
- rcar_du_crtc_write(rcrtc, VCR, mode->vtotal - 1);
+ rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
+ mode->crtc_vsync_end - 2);
+ rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
+ mode->crtc_vsync_end +
+ mode->crtc_vdisplay - 2);
+ rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
+ mode->crtc_vsync_end +
+ mode->crtc_vsync_start - 1);
+ rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
@@ -139,9 +180,10 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc,
*/
rcrtc->outputs |= BIT(output);
- /* Store RGB routing to DPAD0 for R8A7790. */
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) &&
- output == RCAR_DU_OUTPUT_DPAD0)
+ /* Store RGB routing to DPAD0, the hardware will be configured when
+ * starting the CRTC.
+ */
+ if (output == RCAR_DU_OUTPUT_DPAD0)
rcdu->dpad0_source = rcrtc->index;
}
@@ -217,6 +259,7 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
+ bool interlaced;
unsigned int i;
if (rcrtc->started)
@@ -252,7 +295,10 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
* sync mode (with the HSYNC and VSYNC signals configured as outputs and
* actively driven).
*/
- rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
+ interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
+ rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
+ (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
+ DSYSR_TVM_MASTER);
rcar_du_group_start_stop(rcrtc->group, true);
@@ -308,6 +354,9 @@ static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
if (rcrtc->dpms == mode)
return;
@@ -486,7 +535,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
status = rcar_du_crtc_read(rcrtc, DSSR);
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
- if (status & DSSR_VBK) {
+ if (status & DSSR_FRM) {
drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
@@ -542,12 +591,13 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
struct drm_crtc *crtc = &rcrtc->crtc;
unsigned int irqflags;
- char clk_name[5];
+ struct clk *clk;
+ char clk_name[9];
char *name;
int irq;
int ret;
- /* Get the CRTC clock. */
+ /* Get the CRTC clock and the optional external clock. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
sprintf(clk_name, "du.%u", index);
name = clk_name;
@@ -561,6 +611,15 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
return PTR_ERR(rcrtc->clock);
}
+ sprintf(clk_name, "dclkin.%u", index);
+ clk = devm_clk_get(rcdu->dev, clk_name);
+ if (!IS_ERR(clk)) {
+ rcrtc->extclock = clk;
+ } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
+ dev_info(rcdu->dev, "can't get external clock %u\n", index);
+ return -EPROBE_DEFER;
+ }
+
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 984e6083699f..d2f89f7d2e5e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -26,6 +26,7 @@ struct rcar_du_crtc {
struct drm_crtc crtc;
struct clk *clock;
+ struct clk *extclock;
unsigned int mmio_offset;
unsigned int index;
bool started;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 7bfa09cf18d5..e0d74f821416 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -56,7 +56,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
};
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
.quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
.num_crtcs = 3,
.routes = {
@@ -83,7 +84,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
};
static const struct rcar_du_device_info rcar_du_r8a7791_info = {
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
.routes = {
/* R8A7791 has one RGB output, one LVDS output and one
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 0a724669f02d..c5b9ea6a7eaa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -27,7 +27,7 @@ struct rcar_du_device;
struct rcar_du_lvdsenc;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_DEFR8 (1 << 1) /* Has DEFR8 register */
+#define RCAR_DU_FEATURE_EXT_CTRL_REGS (1 << 1) /* Has extended control registers */
#define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */
#define RCAR_DU_QUIRK_LVDS_LANES (1 << 1) /* LVDS lanes 1 and 3 inverted */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 34a122a39664..279167f783f6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -46,6 +46,9 @@ static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
if (renc->lvds)
rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
}
@@ -190,35 +193,42 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
}
if (type == RCAR_DU_ENCODER_HDMI) {
- if (renc->lvds) {
- dev_err(rcdu->dev,
- "Chaining LVDS and HDMI encoders not supported\n");
- return -EINVAL;
- }
-
ret = rcar_du_hdmienc_init(rcdu, renc, enc_node);
if (ret < 0)
- return ret;
+ goto done;
} else {
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
encoder_type);
if (ret < 0)
- return ret;
+ goto done;
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
}
switch (encoder_type) {
case DRM_MODE_ENCODER_LVDS:
- return rcar_du_lvds_connector_init(rcdu, renc, con_node);
+ ret = rcar_du_lvds_connector_init(rcdu, renc, con_node);
+ break;
case DRM_MODE_ENCODER_DAC:
- return rcar_du_vga_connector_init(rcdu, renc);
+ ret = rcar_du_vga_connector_init(rcdu, renc);
+ break;
case DRM_MODE_ENCODER_TMDS:
- return rcar_du_hdmi_connector_init(rcdu, renc);
+ ret = rcar_du_hdmi_connector_init(rcdu, renc);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+done:
+ if (ret < 0) {
+ if (encoder->name)
+ encoder->funcs->destroy(encoder);
+ devm_kfree(rcdu->dev, renc);
+ }
+
+ return ret;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 4e7614b145db..1bdc0ee0c248 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -48,9 +48,6 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
{
u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
- if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8))
- return;
-
/* The DEFR8 register for the first group also controls RGB output
* routing to DPAD0
*/
@@ -69,7 +66,20 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
- rcar_du_group_setup_defr8(rgrp);
+ if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
+ rcar_du_group_setup_defr8(rgrp);
+
+ /* Configure input dot clock routing. We currently hardcode the
+ * configuration to routing DOTCLKINn to DUn.
+ */
+ rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE |
+ DIDSR_LCDS_DCLKIN(2) |
+ DIDSR_LCDS_DCLKIN(1) |
+ DIDSR_LCDS_DCLKIN(0) |
+ DIDSR_PDCS_CLK(2, 0) |
+ DIDSR_PDCS_CLK(1, 0) |
+ DIDSR_PDCS_CLK(0, 0));
+ }
/* Use DS1PR and DS2PR to configure planes priorities and connects the
* superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
@@ -149,6 +159,9 @@ static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
{
int ret;
+ if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS))
+ return 0;
+
/* RGB output routing to DPAD0 is configured in the DEFR8 register of
* the first group. As this function can be called with the DU0 and DU1
* CRTCs disabled, we need to enable the first group clock before
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 4d7d4dd46d26..ca94b029ac80 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -95,6 +95,8 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
connector = &rcon->connector;
connector->display_info.width_mm = 0;
connector->display_info.height_mm = 0;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 359bc999a9c8..221f0a17fd6a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -21,6 +21,7 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
#include "rcar_du_hdmienc.h"
+#include "rcar_du_lvdsenc.h"
struct rcar_du_hdmienc {
struct rcar_du_encoder *renc;
@@ -36,12 +37,21 @@ static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode)
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
if (hdmienc->dpms == mode)
return;
+ if (mode == DRM_MODE_DPMS_ON && hdmienc->renc->lvds)
+ rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode);
+
if (sfuncs->dpms)
sfuncs->dpms(encoder, mode);
+ if (mode != DRM_MODE_DPMS_ON && hdmienc->renc->lvds)
+ rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode);
+
hdmienc->dpms = mode;
}
@@ -49,8 +59,16 @@ static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ /* The internal LVDS encoder has a clock frequency operating range of
+ * 30MHz to 150MHz. Clamp the clock accordingly.
+ */
+ if (hdmienc->renc->lvds)
+ adjusted_mode->clock = clamp(adjusted_mode->clock,
+ 30000, 150000);
+
if (sfuncs->mode_fixup == NULL)
return true;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 0c5ee616b5a3..cc9136e8ee9c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -346,8 +346,14 @@ static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
/* Process the output pipeline. */
ret = rcar_du_encoders_init_one(rcdu, output, &ep);
if (ret < 0) {
- of_node_put(ep_node);
- return ret;
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ dev_info(rcdu->dev,
+ "encoder initialization failed, skipping\n");
+ continue;
}
num_encoders += ret;
@@ -413,6 +419,11 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
+ if (ret == 0) {
+ dev_err(rcdu->dev, "error: no encoder could be initialized\n");
+ return -EINVAL;
+ }
+
num_encoders = ret;
/* Set the possible CRTCs and possible clones. There's always at least
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 72a7cb47bd9f..50f2f2b20d39 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -104,14 +104,22 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
{
struct rcar_du_group *rgrp = plane->group;
unsigned int index = plane->hwindex;
+ bool interlaced;
u32 mwr;
- /* Memory pitch (expressed in pixels) */
+ interlaced = plane->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+
+ /* Memory pitch (expressed in pixels). Must be doubled for interlaced
+ * operation with 32bpp formats.
+ */
if (plane->format->planes == 2)
mwr = plane->pitch;
else
mwr = plane->pitch * 8 / plane->format->bpp;
+ if (interlaced && plane->format->bpp == 32)
+ mwr *= 2;
+
rcar_du_plane_write(rgrp, index, PnMWR, mwr);
/* The Y position is expressed in raster line units and must be doubled
@@ -119,17 +127,23 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
* doubling the Y position is found in the R8A7779 datasheet, but the
* rule seems to apply there as well.
*
+ * Despite not being documented, doubling seem not to be needed when
+ * operating in interlaced mode.
+ *
* Similarly, for the second plane, NV12 and NV21 formats seem to
- * require a halved Y position value.
+ * require a halved Y position value, in both progressive and interlaced
+ * modes.
*/
rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
- (plane->format->bpp == 32 ? 2 : 1));
+ (!interlaced && plane->format->bpp == 32 ? 2 : 1));
rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
if (plane->format->planes == 2) {
index = (index + 1) % 8;
+ rcar_du_plane_write(rgrp, index, PnMWR, plane->pitch);
+
rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
(plane->format->bpp == 16 ? 2 : 1) / 2);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 73f7347f740b..70fcbc471ebd 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -34,6 +34,7 @@
#define DSYSR_SCM_INT_NONE (0 << 4)
#define DSYSR_SCM_INT_SYNC (2 << 4)
#define DSYSR_SCM_INT_VIDEO (3 << 4)
+#define DSYSR_SCM_MASK (3 << 4)
#define DSMR 0x00004
#define DSMR_VSPM (1 << 28)
@@ -256,8 +257,8 @@
#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
-#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2))
-#define DIDSR_PCDS_MASK(n) (3 << ((n) * 2))
+#define DIDSR_PDCS_CLK(n, clk) (clk << ((n) * 2))
+#define DIDSR_PDCS_MASK(n) (3 << ((n) * 2))
/* -----------------------------------------------------------------------------
* Display Timing Generation Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 752747a5e920..9d4879921cc7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -64,6 +64,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
connector = &rcon->connector;
connector->display_info.width_mm = 0;
connector->display_info.height_mm = 0;
+ connector->interlace_allowed = true;
ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
DRM_MODE_CONNECTOR_VGA);
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index ca9f085efa92..35215f6867d3 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -1,13 +1,13 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
+ depends on RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_PANEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
select VIDEOMODE_HELPERS
help
Choose this option if you have a Rockchip soc chipset.
@@ -15,3 +15,13 @@ config DRM_ROCKCHIP
management to userspace. This driver does not provide
2D or 3D acceleration; acceleration is performed by other
IP found on the SoC.
+
+config ROCKCHIP_DW_HDMI
+ tristate "Rockchip specific extensions for Synopsys DW HDMI"
+ depends on DRM_ROCKCHIP
+ select DRM_DW_HDMI
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the Synopsys DesignWare HDMI driver. If you want to
+ enable HDMI on RK3288 based SoC, you should selet this
+ option.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 2cb0672f57ed..f3d8a19c641f 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -5,4 +5,6 @@
rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
rockchip_drm_gem.o
+obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
+
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
new file mode 100644
index 000000000000..d236faa05b19
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/bridge/dw_hdmi.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define GRF_SOC_CON6 0x025c
+#define HDMI_SEL_VOP_LIT (1 << 4)
+
+struct rockchip_hdmi {
+ struct device *dev;
+ struct regmap *regmap;
+ struct drm_encoder encoder;
+};
+
+#define to_rockchip_hdmi(x) container_of(x, struct rockchip_hdmi, x)
+
+static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = {
+ {
+ 27000000, {
+ { 0x00b3, 0x0000},
+ { 0x2153, 0x0000},
+ { 0x40f3, 0x0000}
+ },
+ }, {
+ 36000000, {
+ { 0x00b3, 0x0000},
+ { 0x2153, 0x0000},
+ { 0x40f3, 0x0000}
+ },
+ }, {
+ 40000000, {
+ { 0x00b3, 0x0000},
+ { 0x2153, 0x0000},
+ { 0x40f3, 0x0000}
+ },
+ }, {
+ 54000000, {
+ { 0x0072, 0x0001},
+ { 0x2142, 0x0001},
+ { 0x40a2, 0x0001},
+ },
+ }, {
+ 65000000, {
+ { 0x0072, 0x0001},
+ { 0x2142, 0x0001},
+ { 0x40a2, 0x0001},
+ },
+ }, {
+ 66000000, {
+ { 0x013e, 0x0003},
+ { 0x217e, 0x0002},
+ { 0x4061, 0x0002}
+ },
+ }, {
+ 74250000, {
+ { 0x0072, 0x0001},
+ { 0x2145, 0x0002},
+ { 0x4061, 0x0002}
+ },
+ }, {
+ 83500000, {
+ { 0x0072, 0x0001},
+ },
+ }, {
+ 108000000, {
+ { 0x0051, 0x0002},
+ { 0x2145, 0x0002},
+ { 0x4061, 0x0002}
+ },
+ }, {
+ 106500000, {
+ { 0x0051, 0x0002},
+ { 0x2145, 0x0002},
+ { 0x4061, 0x0002}
+ },
+ }, {
+ 146250000, {
+ { 0x0051, 0x0002},
+ { 0x2145, 0x0002},
+ { 0x4061, 0x0002}
+ },
+ }, {
+ 148500000, {
+ { 0x0051, 0x0003},
+ { 0x214c, 0x0003},
+ { 0x4064, 0x0003}
+ },
+ }, {
+ ~0UL, {
+ { 0x00a0, 0x000a },
+ { 0x2001, 0x000f },
+ { 0x4002, 0x000f },
+ },
+ }
+};
+
+static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = {
+ /* pixelclk bpp8 bpp10 bpp12 */
+ {
+ 40000000, { 0x0018, 0x0018, 0x0018 },
+ }, {
+ 65000000, { 0x0028, 0x0028, 0x0028 },
+ }, {
+ 66000000, { 0x0038, 0x0038, 0x0038 },
+ }, {
+ 74250000, { 0x0028, 0x0038, 0x0038 },
+ }, {
+ 83500000, { 0x0028, 0x0038, 0x0038 },
+ }, {
+ 146250000, { 0x0038, 0x0038, 0x0038 },
+ }, {
+ 148500000, { 0x0000, 0x0038, 0x0038 },
+ }, {
+ ~0UL, { 0x0000, 0x0000, 0x0000},
+ }
+};
+
+static const struct dw_hdmi_sym_term rockchip_sym_term[] = {
+ /*pixelclk symbol term*/
+ { 74250000, 0x8009, 0x0004 },
+ { 148500000, 0x8029, 0x0004 },
+ { 297000000, 0x8039, 0x0005 },
+ { ~0UL, 0x0000, 0x0000 }
+};
+
+static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
+{
+ struct device_node *np = hdmi->dev->of_node;
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(hdmi->regmap)) {
+ dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
+ return 0;
+}
+
+static enum drm_mode_status
+dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg;
+ int pclk = mode->clock * 1000;
+ bool valid = false;
+ int i;
+
+ for (i = 0; mpll_cfg[i].mpixelclock != (~0UL); i++) {
+ if (pclk == mpll_cfg[i].mpixelclock) {
+ valid = true;
+ break;
+ }
+ }
+
+ return (valid) ? MODE_OK : MODE_BAD;
+}
+
+static struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static bool
+dw_hdmi_rockchip_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ return true;
+}
+
+static void dw_hdmi_rockchip_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+}
+
+static void dw_hdmi_rockchip_encoder_commit(struct drm_encoder *encoder)
+{
+ struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
+ u32 val;
+ int mux;
+
+ mux = rockchip_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
+ if (mux)
+ val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16);
+ else
+ val = HDMI_SEL_VOP_LIT << 16;
+
+ regmap_write(hdmi->regmap, GRF_SOC_CON6, val);
+ dev_dbg(hdmi->dev, "vop %s output to hdmi\n",
+ (mux) ? "LIT" : "BIG");
+}
+
+static void dw_hdmi_rockchip_encoder_prepare(struct drm_encoder *encoder)
+{
+ rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
+ ROCKCHIP_OUT_MODE_AAAA);
+}
+
+static struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
+ .mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
+ .mode_set = dw_hdmi_rockchip_encoder_mode_set,
+ .prepare = dw_hdmi_rockchip_encoder_prepare,
+ .commit = dw_hdmi_rockchip_encoder_commit,
+ .disable = dw_hdmi_rockchip_encoder_disable,
+};
+
+static const struct dw_hdmi_plat_data rockchip_hdmi_drv_data = {
+ .mode_valid = dw_hdmi_rockchip_mode_valid,
+ .mpll_cfg = rockchip_mpll_cfg,
+ .cur_ctr = rockchip_cur_ctr,
+ .sym_term = rockchip_sym_term,
+ .dev_type = RK3288_HDMI,
+};
+
+static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
+ { .compatible = "rockchip,rk3288-dw-hdmi",
+ .data = &rockchip_hdmi_drv_data
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_hdmi_rockchip_dt_ids);
+
+static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct dw_hdmi_plat_data *plat_data;
+ const struct of_device_id *match;
+ struct drm_device *drm = data;
+ struct drm_encoder *encoder;
+ struct rockchip_hdmi *hdmi;
+ struct resource *iores;
+ int irq;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ match = of_match_node(dw_hdmi_rockchip_dt_ids, pdev->dev.of_node);
+ plat_data = match->data;
+ hdmi->dev = &pdev->dev;
+ encoder = &hdmi->encoder;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -ENXIO;
+
+ platform_set_drvdata(pdev, hdmi);
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ ret = rockchip_hdmi_parse_dt(hdmi);
+ if (ret) {
+ dev_err(hdmi->dev, "Unable to parse OF data\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+}
+
+static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ return dw_hdmi_unbind(dev, master, data);
+}
+
+static const struct component_ops dw_hdmi_rockchip_ops = {
+ .bind = dw_hdmi_rockchip_bind,
+ .unbind = dw_hdmi_rockchip_unbind,
+};
+
+static int dw_hdmi_rockchip_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dw_hdmi_rockchip_ops);
+}
+
+static int dw_hdmi_rockchip_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dw_hdmi_rockchip_ops);
+
+ return 0;
+}
+
+static struct platform_driver dw_hdmi_rockchip_pltfm_driver = {
+ .probe = dw_hdmi_rockchip_probe,
+ .remove = dw_hdmi_rockchip_remove,
+ .driver = {
+ .name = "dwhdmi-rockchip",
+ .of_match_table = dw_hdmi_rockchip_dt_ids,
+ },
+};
+
+module_platform_driver(dw_hdmi_rockchip_pltfm_driver);
+
+MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Specific DW-HDMI Driver Extension");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dwhdmi-rockchip");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a798c7c71f91..21a481b224eb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -390,6 +390,7 @@ int rockchip_drm_encoder_get_mux_id(struct device_node *node,
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(rockchip_drm_encoder_get_mux_id);
static int compare_of(struct device *dev, void *data)
{
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index bc98a227dc76..7ca8799ef784 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -34,12 +34,9 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
&rk_obj->dma_addr, GFP_KERNEL,
&rk_obj->dma_attrs);
- if (IS_ERR(rk_obj->kvaddr)) {
- int ret = PTR_ERR(rk_obj->kvaddr);
-
- DRM_ERROR("failed to allocate %#x byte dma buffer, %d",
- obj->size, ret);
- return ret;
+ if (!rk_obj->kvaddr) {
+ DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size);
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index e7ca25b3fb38..9a5c571b95fc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -735,6 +735,7 @@ int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_drm_crtc_mode_config);
static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
{
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index a50fe0eeaa0d..b9202aa6f8ab 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,8 +1,10 @@
config DRM_SHMOBILE
tristate "DRM Support for SH Mobile"
- depends on DRM && ARM
+ depends on DRM && ARM && HAVE_DMA_ATTRS
depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
select BACKLIGHT_CLASS_DEVICE
+ select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index d6d6b705b8c1..fbccc105819b 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,10 +1,11 @@
config DRM_STI
tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
- depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+ depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS
select RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select DRM_PANEL
select FW_LOADER_USER_HELPER_FALLBACK
help
Choose this option to enable DRM on STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index 6ba9d27c1b90..f0f1e4ee2d92 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -12,6 +12,9 @@ stihdmi-y := sti_hdmi.o \
sti_hdmi_tx3g0c55phy.o \
sti_hdmi_tx3g4c28phy.o \
+stidvo-y := sti_dvo.o \
+ sti_awg_utils.o
+
obj-$(CONFIG_DRM_STI) = \
sti_vtg.o \
sti_vtac.o \
@@ -20,4 +23,5 @@ obj-$(CONFIG_DRM_STI) = \
sti_tvout.o \
sticompositor.o \
sti_hqvdp.o \
+ stidvo.o \
sti_drm_drv.o
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
new file mode 100644
index 000000000000..6029a2e3db1d
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_awg_utils.h"
+
+#define AWG_OPCODE_OFFSET 10
+
+enum opcode {
+ SET,
+ RPTSET,
+ RPLSET,
+ SKIP,
+ STOP,
+ REPEAT,
+ REPLAY,
+ JUMP,
+ HOLD,
+};
+
+static int awg_generate_instr(enum opcode opcode,
+ long int arg,
+ long int mux_sel,
+ long int data_en,
+ struct awg_code_generation_params *fwparams)
+{
+ u32 instruction = 0;
+ u32 mux = (mux_sel << 8) & 0x1ff;
+ u32 data_enable = (data_en << 9) & 0x2ff;
+ long int arg_tmp = arg;
+
+ /* skip, repeat and replay arg should not exceed 1023.
+ * If user wants to exceed this value, the instruction should be
+ * duplicate and arg should be adjust for each duplicated instruction.
+ */
+
+ while (arg_tmp > 0) {
+ arg = arg_tmp;
+ if (fwparams->instruction_offset >= AWG_MAX_INST) {
+ DRM_ERROR("too many number of instructions\n");
+ return -EINVAL;
+ }
+
+ switch (opcode) {
+ case SKIP:
+ /* leave 'arg' + 1 pixel elapsing without changing
+ * output bus */
+ arg--; /* pixel adjustment */
+ arg_tmp--;
+
+ if (arg < 0) {
+ /* SKIP instruction not needed */
+ return 0;
+ }
+
+ if (arg == 0) {
+ /* SKIP 0 not permitted but we want to skip 1
+ * pixel. So we transform SKIP into SET
+ * instruction */
+ opcode = SET;
+ break;
+ }
+
+ mux = 0;
+ data_enable = 0;
+ arg = (arg << 22) >> 22;
+ arg &= (0x3ff);
+ break;
+ case REPEAT:
+ case REPLAY:
+ if (arg == 0) {
+ /* REPEAT or REPLAY instruction not needed */
+ return 0;
+ }
+
+ mux = 0;
+ data_enable = 0;
+ arg = (arg << 22) >> 22;
+ arg &= (0x3ff);
+ break;
+ case JUMP:
+ mux = 0;
+ data_enable = 0;
+ arg |= 0x40; /* for jump instruction 7th bit is 1 */
+ arg = (arg << 22) >> 22;
+ arg &= 0x3ff;
+ break;
+ case STOP:
+ arg = 0;
+ break;
+ case SET:
+ case RPTSET:
+ case RPLSET:
+ case HOLD:
+ arg = (arg << 24) >> 24;
+ arg &= (0x0ff);
+ break;
+ default:
+ DRM_ERROR("instruction %d does not exist\n", opcode);
+ return -EINVAL;
+ }
+
+ arg_tmp = arg_tmp - arg;
+
+ arg = ((arg + mux) + data_enable);
+
+ instruction = ((opcode) << AWG_OPCODE_OFFSET) | arg;
+ fwparams->ram_code[fwparams->instruction_offset] =
+ instruction & (0x3fff);
+ fwparams->instruction_offset++;
+ }
+ return 0;
+}
+
+int sti_awg_generate_code_data_enable_mode(
+ struct awg_code_generation_params *fwparams,
+ struct awg_timing *timing)
+{
+ long int val;
+ long int data_en;
+ int ret = 0;
+
+ if (timing->trailing_lines > 0) {
+ /* skip trailing lines */
+ val = timing->blanking_level;
+ data_en = 0;
+ ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+
+ val = timing->trailing_lines - 1;
+ data_en = 0;
+ ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+ }
+
+ if (timing->trailing_pixels > 0) {
+ /* skip trailing pixel */
+ val = timing->blanking_level;
+ data_en = 0;
+ ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+
+ val = timing->trailing_pixels - 1;
+ data_en = 0;
+ ret |= awg_generate_instr(SKIP, val, 0, data_en, fwparams);
+ }
+
+ /* set DE signal high */
+ val = timing->blanking_level;
+ data_en = 1;
+ ret |= awg_generate_instr((timing->trailing_pixels > 0) ? SET : RPLSET,
+ val, 0, data_en, fwparams);
+
+ if (timing->blanking_pixels > 0) {
+ /* skip the number of active pixel */
+ val = timing->active_pixels - 1;
+ data_en = 1;
+ ret |= awg_generate_instr(SKIP, val, 0, data_en, fwparams);
+
+ /* set DE signal low */
+ val = timing->blanking_level;
+ data_en = 0;
+ ret |= awg_generate_instr(SET, val, 0, data_en, fwparams);
+ }
+
+ /* replay the sequence as many active lines defined */
+ val = timing->active_lines - 1;
+ data_en = 0;
+ ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+
+ if (timing->blanking_lines > 0) {
+ /* skip blanking lines */
+ val = timing->blanking_level;
+ data_en = 0;
+ ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+
+ val = timing->blanking_lines - 1;
+ data_en = 0;
+ ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.h b/drivers/gpu/drm/sti/sti_awg_utils.h
new file mode 100644
index 000000000000..45d599bd570a
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_awg_utils.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_AWG_UTILS_H_
+#define _STI_AWG_UTILS_H_
+
+#include <drm/drmP.h>
+
+#define AWG_MAX_INST 64
+
+struct awg_code_generation_params {
+ u32 *ram_code;
+ u8 instruction_offset;
+};
+
+struct awg_timing {
+ u32 total_lines;
+ u32 active_lines;
+ u32 blanking_lines;
+ u32 trailing_lines;
+ u32 total_pixels;
+ u32 active_pixels;
+ u32 blanking_pixels;
+ u32 trailing_pixels;
+ u32 blanking_level;
+};
+
+int sti_awg_generate_code_data_enable_mode(
+ struct awg_code_generation_params *fw_gen_params,
+ struct awg_timing *timing);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index 4c651c200f20..e6f6ef7c4866 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -190,11 +190,6 @@ out:
return ret;
}
-static void sti_drm_crtc_load_lut(struct drm_crtc *crtc)
-{
- /* do nothing */
-}
-
static void sti_drm_crtc_disable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -249,7 +244,6 @@ static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.mode_fixup = sti_drm_crtc_mode_fixup,
.mode_set = sti_drm_crtc_mode_set,
.mode_set_base = sti_drm_crtc_mode_set_base,
- .load_lut = sti_drm_crtc_load_lut,
.disable = sti_drm_crtc_disable,
};
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
new file mode 100644
index 000000000000..aeb5070c8363
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "sti_awg_utils.h"
+#include "sti_mixer.h"
+
+/* DVO registers */
+#define DVO_AWG_DIGSYNC_CTRL 0x0000
+#define DVO_DOF_CFG 0x0004
+#define DVO_LUT_PROG_LOW 0x0008
+#define DVO_LUT_PROG_MID 0x000C
+#define DVO_LUT_PROG_HIGH 0x0010
+#define DVO_DIGSYNC_INSTR_I 0x0100
+
+#define DVO_AWG_CTRL_EN BIT(0)
+#define DVO_AWG_FRAME_BASED_SYNC BIT(2)
+
+#define DVO_DOF_EN_LOWBYTE BIT(0)
+#define DVO_DOF_EN_MIDBYTE BIT(1)
+#define DVO_DOF_EN_HIGHBYTE BIT(2)
+#define DVO_DOF_EN BIT(6)
+#define DVO_DOF_MOD_COUNT_SHIFT 8
+
+#define DVO_LUT_ZERO 0
+#define DVO_LUT_Y_G 1
+#define DVO_LUT_Y_G_DEL 2
+#define DVO_LUT_CB_B 3
+#define DVO_LUT_CB_B_DEL 4
+#define DVO_LUT_CR_R 5
+#define DVO_LUT_CR_R_DEL 6
+#define DVO_LUT_HOLD 7
+
+struct dvo_config {
+ u32 flags;
+ u32 lowbyte;
+ u32 midbyte;
+ u32 highbyte;
+ int (*awg_fwgen_fct)(
+ struct awg_code_generation_params *fw_gen_params,
+ struct awg_timing *timing);
+};
+
+static struct dvo_config rgb_24bit_de_cfg = {
+ .flags = (0L << DVO_DOF_MOD_COUNT_SHIFT),
+ .lowbyte = DVO_LUT_CR_R,
+ .midbyte = DVO_LUT_Y_G,
+ .highbyte = DVO_LUT_CB_B,
+ .awg_fwgen_fct = sti_awg_generate_code_data_enable_mode,
+};
+
+/**
+ * STI digital video output structure
+ *
+ * @dev: driver device
+ * @drm_dev: pointer to drm device
+ * @mode: current display mode selected
+ * @regs: dvo registers
+ * @clk_pix: pixel clock for dvo
+ * @clk: clock for dvo
+ * @clk_main_parent: dvo parent clock if main path used
+ * @clk_aux_parent: dvo parent clock if aux path used
+ * @panel_node: panel node reference from device tree
+ * @panel: reference to the panel connected to the dvo
+ * @enabled: true if dvo is enabled else false
+ * @encoder: drm_encoder it is bound
+ */
+struct sti_dvo {
+ struct device dev;
+ struct drm_device *drm_dev;
+ struct drm_display_mode mode;
+ void __iomem *regs;
+ struct clk *clk_pix;
+ struct clk *clk;
+ struct clk *clk_main_parent;
+ struct clk *clk_aux_parent;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct dvo_config *config;
+ bool enabled;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+};
+
+struct sti_dvo_connector {
+ struct drm_connector drm_connector;
+ struct drm_encoder *encoder;
+ struct sti_dvo *dvo;
+};
+
+#define to_sti_dvo_connector(x) \
+ container_of(x, struct sti_dvo_connector, drm_connector)
+
+#define BLANKING_LEVEL 16
+int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
+{
+ struct drm_display_mode *mode = &dvo->mode;
+ struct dvo_config *config = dvo->config;
+ struct awg_code_generation_params fw_gen_params;
+ struct awg_timing timing;
+
+ fw_gen_params.ram_code = ram_code;
+ fw_gen_params.instruction_offset = 0;
+
+ timing.total_lines = mode->vtotal;
+ timing.active_lines = mode->vdisplay;
+ timing.blanking_lines = mode->vsync_start - mode->vdisplay;
+ timing.trailing_lines = mode->vtotal - mode->vsync_start;
+ timing.total_pixels = mode->htotal;
+ timing.active_pixels = mode->hdisplay;
+ timing.blanking_pixels = mode->hsync_start - mode->hdisplay;
+ timing.trailing_pixels = mode->htotal - mode->hsync_start;
+ timing.blanking_level = BLANKING_LEVEL;
+
+ if (config->awg_fwgen_fct(&fw_gen_params, &timing)) {
+ DRM_ERROR("AWG firmware not properly generated\n");
+ return -EINVAL;
+ }
+
+ *ram_size = fw_gen_params.instruction_offset;
+
+ return 0;
+}
+
+/* Configure AWG, writing instructions
+ *
+ * @dvo: pointer to DVO structure
+ * @awg_ram_code: pointer to AWG instructions table
+ * @nb: nb of AWG instructions
+ */
+static void dvo_awg_configure(struct sti_dvo *dvo, u32 *awg_ram_code, int nb)
+{
+ int i;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ for (i = 0; i < nb; i++)
+ writel(awg_ram_code[i],
+ dvo->regs + DVO_DIGSYNC_INSTR_I + i * 4);
+ for (i = nb; i < AWG_MAX_INST; i++)
+ writel(0, dvo->regs + DVO_DIGSYNC_INSTR_I + i * 4);
+
+ writel(DVO_AWG_CTRL_EN, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
+}
+
+static void sti_dvo_disable(struct drm_bridge *bridge)
+{
+ struct sti_dvo *dvo = bridge->driver_private;
+
+ if (!dvo->enabled)
+ return;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (dvo->config->awg_fwgen_fct)
+ writel(0x00000000, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
+
+ writel(0x00000000, dvo->regs + DVO_DOF_CFG);
+
+ if (dvo->panel)
+ dvo->panel->funcs->disable(dvo->panel);
+
+ /* Disable/unprepare dvo clock */
+ clk_disable_unprepare(dvo->clk_pix);
+ clk_disable_unprepare(dvo->clk);
+
+ dvo->enabled = false;
+}
+
+static void sti_dvo_pre_enable(struct drm_bridge *bridge)
+{
+ struct sti_dvo *dvo = bridge->driver_private;
+ struct dvo_config *config = dvo->config;
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (dvo->enabled)
+ return;
+
+ /* Make sure DVO is disabled */
+ writel(0x00000000, dvo->regs + DVO_DOF_CFG);
+ writel(0x00000000, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
+
+ if (config->awg_fwgen_fct) {
+ u8 nb_instr;
+ u32 awg_ram_code[AWG_MAX_INST];
+ /* Configure AWG */
+ if (!dvo_awg_generate_code(dvo, &nb_instr, awg_ram_code))
+ dvo_awg_configure(dvo, awg_ram_code, nb_instr);
+ else
+ return;
+ }
+
+ /* Prepare/enable clocks */
+ if (clk_prepare_enable(dvo->clk_pix))
+ DRM_ERROR("Failed to prepare/enable dvo_pix clk\n");
+ if (clk_prepare_enable(dvo->clk))
+ DRM_ERROR("Failed to prepare/enable dvo clk\n");
+
+ if (dvo->panel)
+ dvo->panel->funcs->enable(dvo->panel);
+
+ /* Set LUT */
+ writel(config->lowbyte, dvo->regs + DVO_LUT_PROG_LOW);
+ writel(config->midbyte, dvo->regs + DVO_LUT_PROG_MID);
+ writel(config->highbyte, dvo->regs + DVO_LUT_PROG_HIGH);
+
+ /* Digital output formatter config */
+ val = (config->flags | DVO_DOF_EN);
+ writel(val, dvo->regs + DVO_DOF_CFG);
+
+ dvo->enabled = true;
+}
+
+static void sti_dvo_set_mode(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sti_dvo *dvo = bridge->driver_private;
+ struct sti_mixer *mixer = to_sti_mixer(dvo->encoder->crtc);
+ int rate = mode->clock * 1000;
+ struct clk *clkp;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode));
+
+ /* According to the path used (main or aux), the dvo clocks should
+ * have a different parent clock. */
+ if (mixer->id == STI_MIXER_MAIN)
+ clkp = dvo->clk_main_parent;
+ else
+ clkp = dvo->clk_aux_parent;
+
+ if (clkp) {
+ clk_set_parent(dvo->clk_pix, clkp);
+ clk_set_parent(dvo->clk, clkp);
+ }
+
+ /* DVO clocks = compositor clock */
+ ret = clk_set_rate(dvo->clk_pix, rate);
+ if (ret < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for dvo_pix clk\n", rate);
+ return;
+ }
+
+ ret = clk_set_rate(dvo->clk, rate);
+ if (ret < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for dvo clk\n", rate);
+ return;
+ }
+
+ /* For now, we only support 24bit data enable (DE) synchro format */
+ dvo->config = &rgb_24bit_de_cfg;
+}
+
+static void sti_dvo_bridge_nope(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static const struct drm_bridge_funcs sti_dvo_bridge_funcs = {
+ .pre_enable = sti_dvo_pre_enable,
+ .enable = sti_dvo_bridge_nope,
+ .disable = sti_dvo_disable,
+ .post_disable = sti_dvo_bridge_nope,
+ .mode_set = sti_dvo_set_mode,
+};
+
+static int sti_dvo_connector_get_modes(struct drm_connector *connector)
+{
+ struct sti_dvo_connector *dvo_connector
+ = to_sti_dvo_connector(connector);
+ struct sti_dvo *dvo = dvo_connector->dvo;
+
+ if (dvo->panel)
+ return dvo->panel->funcs->get_modes(dvo->panel);
+
+ return 0;
+}
+
+#define CLK_TOLERANCE_HZ 50
+
+static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+ int target_max = target + CLK_TOLERANCE_HZ;
+ int result;
+ struct sti_dvo_connector *dvo_connector
+ = to_sti_dvo_connector(connector);
+ struct sti_dvo *dvo = dvo_connector->dvo;
+
+ result = clk_round_rate(dvo->clk_pix, target);
+
+ DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
+ target, result);
+
+ if ((result < target_min) || (result > target_max)) {
+ DRM_DEBUG_DRIVER("dvo pixclk=%d not supported\n", target);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector)
+{
+ struct sti_dvo_connector *dvo_connector
+ = to_sti_dvo_connector(connector);
+
+ /* Best encoder is the one associated during connector creation */
+ return dvo_connector->encoder;
+}
+
+static struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
+ .get_modes = sti_dvo_connector_get_modes,
+ .mode_valid = sti_dvo_connector_mode_valid,
+ .best_encoder = sti_dvo_best_encoder,
+};
+
+static enum drm_connector_status
+sti_dvo_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct sti_dvo_connector *dvo_connector
+ = to_sti_dvo_connector(connector);
+ struct sti_dvo *dvo = dvo_connector->dvo;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!dvo->panel)
+ dvo->panel = of_drm_find_panel(dvo->panel_node);
+
+ if (dvo->panel)
+ if (!drm_panel_attach(dvo->panel, connector))
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void sti_dvo_connector_destroy(struct drm_connector *connector)
+{
+ struct sti_dvo_connector *dvo_connector
+ = to_sti_dvo_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(dvo_connector);
+}
+
+static struct drm_connector_funcs sti_dvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = sti_dvo_connector_detect,
+ .destroy = sti_dvo_connector_destroy,
+};
+
+static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_dvo *dvo = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct sti_dvo_connector *connector;
+ struct drm_connector *drm_connector;
+ struct drm_bridge *bridge;
+ int err;
+
+ /* Set the drm device handle */
+ dvo->drm_dev = drm_dev;
+
+ encoder = sti_dvo_find_encoder(drm_dev);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return -ENOMEM;
+
+ connector->dvo = dvo;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver_private = dvo;
+ bridge->funcs = &sti_dvo_bridge_funcs;
+ bridge->of_node = dvo->dev.of_node;
+ err = drm_bridge_add(bridge);
+ if (err) {
+ DRM_ERROR("Failed to add bridge\n");
+ return err;
+ }
+
+ err = drm_bridge_attach(drm_dev, bridge);
+ if (err) {
+ DRM_ERROR("Failed to attach bridge\n");
+ return err;
+ }
+
+ dvo->bridge = bridge;
+ encoder->bridge = bridge;
+ connector->encoder = encoder;
+ dvo->encoder = encoder;
+
+ drm_connector = (struct drm_connector *)connector;
+
+ drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm_dev, drm_connector,
+ &sti_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(drm_connector,
+ &sti_dvo_connector_helper_funcs);
+
+ err = drm_connector_register(drm_connector);
+ if (err)
+ goto err_connector;
+
+ err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ if (err) {
+ DRM_ERROR("Failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ return 0;
+
+err_sysfs:
+ drm_connector_unregister(drm_connector);
+err_connector:
+ drm_bridge_remove(bridge);
+ drm_connector_cleanup(drm_connector);
+ return -EINVAL;
+}
+
+static void sti_dvo_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct sti_dvo *dvo = dev_get_drvdata(dev);
+
+ drm_bridge_remove(dvo->bridge);
+}
+
+static const struct component_ops sti_dvo_ops = {
+ .bind = sti_dvo_bind,
+ .unbind = sti_dvo_unbind,
+};
+
+static int sti_dvo_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sti_dvo *dvo;
+ struct resource *res;
+ struct device_node *np = dev->of_node;
+
+ DRM_INFO("%s\n", __func__);
+
+ dvo = devm_kzalloc(dev, sizeof(*dvo), GFP_KERNEL);
+ if (!dvo) {
+ DRM_ERROR("Failed to allocate memory for DVO\n");
+ return -ENOMEM;
+ }
+
+ dvo->dev = pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg");
+ if (!res) {
+ DRM_ERROR("Invalid dvo resource\n");
+ return -ENOMEM;
+ }
+ dvo->regs = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(dvo->regs))
+ return PTR_ERR(dvo->regs);
+
+ dvo->clk_pix = devm_clk_get(dev, "dvo_pix");
+ if (IS_ERR(dvo->clk_pix)) {
+ DRM_ERROR("Cannot get dvo_pix clock\n");
+ return PTR_ERR(dvo->clk_pix);
+ }
+
+ dvo->clk = devm_clk_get(dev, "dvo");
+ if (IS_ERR(dvo->clk)) {
+ DRM_ERROR("Cannot get dvo clock\n");
+ return PTR_ERR(dvo->clk);
+ }
+
+ dvo->clk_main_parent = devm_clk_get(dev, "main_parent");
+ if (IS_ERR(dvo->clk_main_parent)) {
+ DRM_DEBUG_DRIVER("Cannot get main_parent clock\n");
+ dvo->clk_main_parent = NULL;
+ }
+
+ dvo->clk_aux_parent = devm_clk_get(dev, "aux_parent");
+ if (IS_ERR(dvo->clk_aux_parent)) {
+ DRM_DEBUG_DRIVER("Cannot get aux_parent clock\n");
+ dvo->clk_aux_parent = NULL;
+ }
+
+ dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
+ if (!dvo->panel_node)
+ DRM_ERROR("No panel associated to the dvo output\n");
+
+ platform_set_drvdata(pdev, dvo);
+
+ return component_add(&pdev->dev, &sti_dvo_ops);
+}
+
+static int sti_dvo_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_dvo_ops);
+ return 0;
+}
+
+static struct of_device_id dvo_of_match[] = {
+ { .compatible = "st,stih407-dvo", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, dvo_of_match);
+
+struct platform_driver sti_dvo_driver = {
+ .driver = {
+ .name = "sti-dvo",
+ .owner = THIS_MODULE,
+ .of_match_table = dvo_of_match,
+ },
+ .probe = sti_dvo_probe,
+ .remove = sti_dvo_remove,
+};
+
+module_platform_driver(sti_dvo_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 32448d1d1e8f..087906fd8846 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -14,15 +14,19 @@
#include "sti_layer.h"
#include "sti_vtg.h"
+#define ALPHASWITCH BIT(6)
#define ENA_COLOR_FILL BIT(8)
+#define BIGNOTLITTLE BIT(23)
#define WAIT_NEXT_VSYNC BIT(31)
/* GDP color formats */
#define GDP_RGB565 0x00
#define GDP_RGB888 0x01
#define GDP_RGB888_32 0x02
+#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB8565 0x04
#define GDP_ARGB8888 0x05
+#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB1555 0x06
#define GDP_ARGB4444 0x07
#define GDP_CLUT8 0x0B
@@ -103,7 +107,9 @@ struct sti_gdp {
static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
@@ -129,8 +135,12 @@ static int sti_gdp_fourcc2format(int fourcc)
switch (fourcc) {
case DRM_FORMAT_XRGB8888:
return GDP_RGB888_32;
+ case DRM_FORMAT_XBGR8888:
+ return GDP_XBGR8888;
case DRM_FORMAT_ARGB8888:
return GDP_ARGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return GDP_ABGR8888;
case DRM_FORMAT_ARGB4444:
return GDP_ARGB4444;
case DRM_FORMAT_ARGB1555:
@@ -157,6 +167,7 @@ static int sti_gdp_get_alpharange(int format)
case GDP_ARGB8565:
case GDP_ARGB8888:
case GDP_AYCBR8888:
+ case GDP_ABGR8888:
return GAM_GDP_ALPHARANGE_255;
}
return 0;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 2ae9a9b73666..a9bbb081ecad 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -508,19 +508,12 @@ static void sti_hda_bridge_nope(struct drm_bridge *bridge)
/* do nothing */
}
-static void sti_hda_brigde_destroy(struct drm_bridge *bridge)
-{
- drm_bridge_cleanup(bridge);
- kfree(bridge);
-}
-
static const struct drm_bridge_funcs sti_hda_bridge_funcs = {
.pre_enable = sti_hda_pre_enable,
.enable = sti_hda_bridge_nope,
.disable = sti_hda_disable,
.post_disable = sti_hda_bridge_nope,
.mode_set = sti_hda_set_mode,
- .destroy = sti_hda_brigde_destroy,
};
static int sti_hda_connector_get_modes(struct drm_connector *connector)
@@ -664,7 +657,8 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
return -ENOMEM;
bridge->driver_private = hda;
- drm_bridge_init(drm_dev, bridge, &sti_hda_bridge_funcs);
+ bridge->funcs = &sti_hda_bridge_funcs;
+ drm_bridge_attach(drm_dev, bridge);
encoder->bridge = bridge;
connector->encoder = encoder;
@@ -693,7 +687,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
err_sysfs:
drm_connector_unregister(drm_connector);
err_connector:
- drm_bridge_cleanup(bridge);
drm_connector_cleanup(drm_connector);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index d032e024b0b8..1485ade98710 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -42,8 +42,17 @@
#define HDMI_SW_DI_1_PKT_WORD5 0x0228
#define HDMI_SW_DI_1_PKT_WORD6 0x022C
#define HDMI_SW_DI_CFG 0x0230
+#define HDMI_SW_DI_2_HEAD_WORD 0x0600
+#define HDMI_SW_DI_2_PKT_WORD0 0x0604
+#define HDMI_SW_DI_2_PKT_WORD1 0x0608
+#define HDMI_SW_DI_2_PKT_WORD2 0x060C
+#define HDMI_SW_DI_2_PKT_WORD3 0x0610
+#define HDMI_SW_DI_2_PKT_WORD4 0x0614
+#define HDMI_SW_DI_2_PKT_WORD5 0x0618
+#define HDMI_SW_DI_2_PKT_WORD6 0x061C
#define HDMI_IFRAME_SLOT_AVI 1
+#define HDMI_IFRAME_SLOT_AUDIO 2
#define XCAT(prefix, x, suffix) prefix ## x ## suffix
#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
@@ -99,6 +108,10 @@
#define HDMI_STA_SW_RST BIT(1)
+#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
+
struct sti_hdmi_connector {
struct drm_connector drm_connector;
struct drm_encoder *encoder;
@@ -228,6 +241,90 @@ static void hdmi_config(struct sti_hdmi *hdmi)
}
/**
+ * Helper to concatenate infoframe in 32 bits word
+ *
+ * @ptr: pointer on the hdmi internal structure
+ * @data: infoframe to write
+ * @size: size to write
+ */
+static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
+{
+ unsigned long value = 0;
+ size_t i;
+
+ for (i = size; i > 0; i--)
+ value = (value << 8) | ptr[i - 1];
+
+ return value;
+}
+
+/**
+ * Helper to write info frame
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ * @data: infoframe to write
+ * @size: size to write
+ */
+static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
+{
+ const u8 *ptr = data;
+ u32 val, slot, mode, i;
+ u32 head_offset, pack_offset;
+ size_t size;
+
+ switch (*ptr) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ slot = HDMI_IFRAME_SLOT_AVI;
+ mode = HDMI_IFRAME_FIELD;
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
+ size = HDMI_AVI_INFOFRAME_SIZE;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ slot = HDMI_IFRAME_SLOT_AUDIO;
+ mode = HDMI_IFRAME_FRAME;
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
+ size = HDMI_AUDIO_INFOFRAME_SIZE;
+ break;
+
+ default:
+ DRM_ERROR("unsupported infoframe type: %#x\n", *ptr);
+ return;
+ }
+
+ /* Disable transmission slot for updated infoframe */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, slot);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ val = HDMI_INFOFRAME_HEADER_TYPE(*ptr++);
+ val |= HDMI_INFOFRAME_HEADER_VERSION(*ptr++);
+ val |= HDMI_INFOFRAME_HEADER_LEN(*ptr++);
+ writel(val, hdmi->regs + head_offset);
+
+ /*
+ * Each subpack contains 4 bytes
+ * The First Bytes of the first subpacket must contain the checksum
+ * Packet size in increase by one.
+ */
+ for (i = 0; i < size; i += sizeof(u32)) {
+ size_t num;
+
+ num = min_t(size_t, size - i, sizeof(u32));
+ val = hdmi_infoframe_subpack(ptr, num);
+ ptr += sizeof(u32);
+ writel(val, hdmi->regs + pack_offset + i);
+ }
+
+ /* Enable transmission slot for updated infoframe */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, slot);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+}
+
+/**
* Prepare and configure the AVI infoframe
*
* AVI infoframe are transmitted at least once per two video field and
@@ -243,8 +340,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
struct drm_display_mode *mode = &hdmi->mode;
struct hdmi_avi_infoframe infoframe;
u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
- u8 *frame = buffer + HDMI_INFOFRAME_HEADER_SIZE;
- u32 val;
int ret;
DRM_DEBUG_DRIVER("\n");
@@ -266,47 +361,43 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
return ret;
}
- /* Disable transmission slot for AVI infoframe */
- val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
- val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, HDMI_IFRAME_SLOT_AVI);
- hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+ hdmi_infoframe_write_infopack(hdmi, buffer);
- /* Infoframe header */
- val = buffer[0];
- val |= buffer[1] << 8;
- val |= buffer[2] << 16;
- hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
-
- /* Infoframe packet bytes */
- val = buffer[3];
- val |= *(frame++) << 8;
- val |= *(frame++) << 16;
- val |= *(frame++) << 24;
- hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
-
- val = *(frame++);
- val |= *(frame++) << 8;
- val |= *(frame++) << 16;
- val |= *(frame++) << 24;
- hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
-
- val = *(frame++);
- val |= *(frame++) << 8;
- val |= *(frame++) << 16;
- val |= *(frame++) << 24;
- hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
-
- val = *(frame++);
- val |= *(frame) << 8;
- hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
-
- /* Enable transmission slot for AVI infoframe
- * According to the hdmi specification, AVI infoframe should be
- * transmitted at least once per two video fields
- */
- val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
- val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, HDMI_IFRAME_SLOT_AVI);
- hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+ return 0;
+}
+
+/**
+ * Prepare and configure the AUDIO infoframe
+ *
+ * AUDIO infoframe are transmitted once per frame and
+ * contains information about HDMI transmission mode such as audio codec,
+ * sample size, ...
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return negative value if error occurs
+ */
+static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
+{
+ struct hdmi_audio_infoframe infofame;
+ u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
+ int ret;
+
+ ret = hdmi_audio_infoframe_init(&infofame);
+ if (ret < 0) {
+ DRM_ERROR("failed to setup audio infoframe: %d\n", ret);
+ return ret;
+ }
+
+ infofame.channels = 2;
+
+ ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
+ return ret;
+ }
+
+ hdmi_infoframe_write_infopack(hdmi, buffer);
return 0;
}
@@ -427,6 +518,10 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
if (hdmi_avi_infoframe_config(hdmi))
DRM_ERROR("Unable to configure AVI infoframe\n");
+ /* Program AUDIO infoframe */
+ if (hdmi_audio_infoframe_config(hdmi))
+ DRM_ERROR("Unable to configure AUDIO infoframe\n");
+
/* Sw reset */
hdmi_swreset(hdmi);
}
@@ -463,19 +558,12 @@ static void sti_hdmi_bridge_nope(struct drm_bridge *bridge)
/* do nothing */
}
-static void sti_hdmi_brigde_destroy(struct drm_bridge *bridge)
-{
- drm_bridge_cleanup(bridge);
- kfree(bridge);
-}
-
static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
.pre_enable = sti_hdmi_pre_enable,
.enable = sti_hdmi_bridge_nope,
.disable = sti_hdmi_disable,
.post_disable = sti_hdmi_bridge_nope,
.mode_set = sti_hdmi_set_mode,
- .destroy = sti_hdmi_brigde_destroy,
};
static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -635,7 +723,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
goto err_adapt;
bridge->driver_private = hdmi;
- drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
+ bridge->funcs = &sti_hdmi_bridge_funcs;
+ drm_bridge_attach(drm_dev, bridge);
encoder->bridge = bridge;
connector->encoder = encoder;
@@ -667,7 +756,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
err_sysfs:
drm_connector_unregister(drm_connector);
err_connector:
- drm_bridge_cleanup(bridge);
drm_connector_cleanup(drm_connector);
err_adapt:
put_device(&hdmi->ddc_adapt->dev);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index f3db05dab0ab..b0eb62de1b2e 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1025,7 +1025,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
/* Get clock resources */
hqvdp->clk = devm_clk_get(dev, "hqvdp");
hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
- if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) {
+ if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk_pix_main)) {
DRM_ERROR("Cannot get clocks\n");
return -ENXIO;
}
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index cb924aa2b321..5cc53116508e 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -48,6 +48,9 @@
#define TVO_HDMI_CLIP_VALUE_R_CR 0x514
#define TVO_HDMI_SYNC_SEL 0x518
#define TVO_HDMI_DFV_OBS 0x540
+#define TVO_VIP_DVO 0x600
+#define TVO_DVO_SYNC_SEL 0x618
+#define TVO_DVO_CONFIG 0x620
#define TVO_IN_FMT_SIGNED BIT(0)
#define TVO_SYNC_EXT BIT(4)
@@ -98,6 +101,9 @@
#define TVO_SYNC_HD_DCS_SHIFT 8
+#define TVO_SYNC_DVO_PAD_HSYNC_SHIFT 8
+#define TVO_SYNC_DVO_PAD_VSYNC_SHIFT 16
+
#define ENCODER_CRTC_MASK (BIT(0) | BIT(1))
/* enum listing the supported output data format */
@@ -113,6 +119,7 @@ struct sti_tvout {
struct reset_control *reset;
struct drm_encoder *hdmi;
struct drm_encoder *hda;
+ struct drm_encoder *dvo;
};
struct sti_tvout_encoder {
@@ -262,6 +269,66 @@ static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
}
/**
+ * Start VIP block for DVO output
+ *
+ * @tvout: pointer on tvout structure
+ * @main_path: true if main path has to be used in the vip configuration
+ * else aux path is used.
+ */
+static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
+{
+ struct device_node *node = tvout->dev->of_node;
+ bool sel_input_logic_inverted = false;
+ u32 tvo_in_vid_format;
+ int val;
+
+ dev_dbg(tvout->dev, "%s\n", __func__);
+
+ if (main_path) {
+ DRM_DEBUG_DRIVER("main vip for DVO\n");
+ /* Select the input sync for dvo = VTG set 4 */
+ val = TVO_SYNC_MAIN_VTG_SET_4 << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
+ val |= TVO_SYNC_MAIN_VTG_SET_4 << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
+ val |= TVO_SYNC_MAIN_VTG_SET_4;
+ tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
+ tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
+ } else {
+ DRM_DEBUG_DRIVER("aux vip for DVO\n");
+ /* Select the input sync for dvo = VTG set 4 */
+ val = TVO_SYNC_AUX_VTG_SET_4 << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
+ val |= TVO_SYNC_AUX_VTG_SET_4 << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
+ val |= TVO_SYNC_AUX_VTG_SET_4;
+ tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
+ tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
+ }
+
+ /* Set color channel order */
+ tvout_vip_set_color_order(tvout, TVO_VIP_DVO,
+ TVO_VIP_REORDER_CR_R_SEL,
+ TVO_VIP_REORDER_Y_G_SEL,
+ TVO_VIP_REORDER_CB_B_SEL);
+
+ /* Set clipping mode (Limited range RGB/Y) */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_DVO,
+ TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+
+ /* Set round mode (rounded to 8-bit per component) */
+ tvout_vip_set_rnd(tvout, TVO_VIP_DVO, TVO_VIP_RND_8BIT_ROUNDED);
+
+ if (of_device_is_compatible(node, "st,stih407-tvout")) {
+ /* Set input video format */
+ tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
+ TVO_IN_FMT_SIGNED);
+ sel_input_logic_inverted = true;
+ }
+
+ /* Input selection */
+ tvout_vip_set_sel_input(tvout, TVO_VIP_DVO, main_path,
+ sel_input_logic_inverted,
+ STI_TVOUT_VIDEO_OUT_RGB);
+}
+
+/**
* Start VIP block for HDMI output
*
* @tvout: pointer on tvout structure
@@ -402,6 +469,56 @@ static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
.destroy = sti_tvout_encoder_destroy,
};
+static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+}
+
+static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ /* Reset VIP register */
+ tvout_write(tvout, 0x0, TVO_VIP_DVO);
+}
+
+static const struct drm_encoder_helper_funcs sti_dvo_encoder_helper_funcs = {
+ .dpms = sti_tvout_encoder_dpms,
+ .mode_fixup = sti_tvout_encoder_mode_fixup,
+ .mode_set = sti_tvout_encoder_mode_set,
+ .prepare = sti_tvout_encoder_prepare,
+ .commit = sti_dvo_encoder_commit,
+ .disable = sti_dvo_encoder_disable,
+};
+
+static struct drm_encoder *
+sti_tvout_create_dvo_encoder(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ struct sti_tvout_encoder *encoder;
+ struct drm_encoder *drm_encoder;
+
+ encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return NULL;
+
+ encoder->tvout = tvout;
+
+ drm_encoder = (struct drm_encoder *)encoder;
+
+ drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
+ drm_encoder->possible_clones = 1 << 0;
+
+ drm_encoder_init(dev, drm_encoder,
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS);
+
+ drm_encoder_helper_add(drm_encoder, &sti_dvo_encoder_helper_funcs);
+
+ return drm_encoder;
+}
+
static void sti_hda_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
@@ -508,6 +625,7 @@ static void sti_tvout_create_encoders(struct drm_device *dev,
{
tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout);
tvout->hda = sti_tvout_create_hda_encoder(dev, tvout);
+ tvout->dvo = sti_tvout_create_dvo_encoder(dev, tvout);
}
static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 978993fa3a36..3aaa84ae2681 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -18,9 +18,12 @@
#include "drm.h"
#include "gem.h"
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
struct tegra_dc_soc_info {
+ bool supports_border_color;
bool supports_interlacing;
bool supports_cursor;
bool supports_block_linear;
@@ -38,63 +41,122 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
return container_of(plane, struct tegra_plane, base);
}
-static void tegra_dc_window_commit(struct tegra_dc *dc, unsigned int index)
+struct tegra_dc_state {
+ struct drm_crtc_state base;
+
+ struct clk *clk;
+ unsigned long pclk;
+ unsigned int div;
+
+ u32 planes;
+};
+
+static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
{
- u32 value = WIN_A_ACT_REQ << index;
+ if (state)
+ return container_of(state, struct tegra_dc_state, base);
- tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ return NULL;
}
-static void tegra_dc_cursor_commit(struct tegra_dc *dc)
+struct tegra_plane_state {
+ struct drm_plane_state base;
+
+ struct tegra_bo_tiling tiling;
+ u32 format;
+ u32 swap;
+};
+
+static inline struct tegra_plane_state *
+to_tegra_plane_state(struct drm_plane_state *state)
{
- tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
+ if (state)
+ return container_of(state, struct tegra_plane_state, base);
+
+ return NULL;
}
-static void tegra_dc_commit(struct tegra_dc *dc)
+/*
+ * Reads the active copy of a register. This takes the dc->lock spinlock to
+ * prevent races with the VBLANK processing which also needs access to the
+ * active copy of some registers.
+ */
+static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&dc->lock, flags);
+
+ tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+ value = tegra_dc_readl(dc, offset);
+ tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+
+ spin_unlock_irqrestore(&dc->lock, flags);
+ return value;
+}
+
+/*
+ * Double-buffered registers have two copies: ASSEMBLY and ACTIVE. When the
+ * *_ACT_REQ bits are set the ASSEMBLY copy is latched into the ACTIVE copy.
+ * Latching happens mmediately if the display controller is in STOP mode or
+ * on the next frame boundary otherwise.
+ *
+ * Triple-buffered registers have three copies: ASSEMBLY, ARM and ACTIVE. The
+ * ASSEMBLY copy is latched into the ARM copy immediately after *_UPDATE bits
+ * are written. When the *_ACT_REQ bits are written, the ARM copy is latched
+ * into the ACTIVE copy, either immediately if the display controller is in
+ * STOP mode, or at the next frame boundary otherwise.
+ */
+void tegra_dc_commit(struct tegra_dc *dc)
{
tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
}
-static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap)
+static int tegra_dc_format(u32 fourcc, u32 *format, u32 *swap)
{
/* assume no swapping of fetched data */
if (swap)
*swap = BYTE_SWAP_NOSWAP;
- switch (format) {
+ switch (fourcc) {
case DRM_FORMAT_XBGR8888:
- return WIN_COLOR_DEPTH_R8G8B8A8;
+ *format = WIN_COLOR_DEPTH_R8G8B8A8;
+ break;
case DRM_FORMAT_XRGB8888:
- return WIN_COLOR_DEPTH_B8G8R8A8;
+ *format = WIN_COLOR_DEPTH_B8G8R8A8;
+ break;
case DRM_FORMAT_RGB565:
- return WIN_COLOR_DEPTH_B5G6R5;
+ *format = WIN_COLOR_DEPTH_B5G6R5;
+ break;
case DRM_FORMAT_UYVY:
- return WIN_COLOR_DEPTH_YCbCr422;
+ *format = WIN_COLOR_DEPTH_YCbCr422;
+ break;
case DRM_FORMAT_YUYV:
if (swap)
*swap = BYTE_SWAP_SWAP2;
- return WIN_COLOR_DEPTH_YCbCr422;
+ *format = WIN_COLOR_DEPTH_YCbCr422;
+ break;
case DRM_FORMAT_YUV420:
- return WIN_COLOR_DEPTH_YCbCr420P;
+ *format = WIN_COLOR_DEPTH_YCbCr420P;
+ break;
case DRM_FORMAT_YUV422:
- return WIN_COLOR_DEPTH_YCbCr422P;
+ *format = WIN_COLOR_DEPTH_YCbCr422P;
+ break;
default:
- break;
+ return -EINVAL;
}
- WARN(1, "unsupported pixel format %u, using default\n", format);
- return WIN_COLOR_DEPTH_B8G8R8A8;
+ return 0;
}
static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
@@ -121,6 +183,9 @@ static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
return true;
}
+ if (planar)
+ *planar = false;
+
return false;
}
@@ -164,8 +229,8 @@ static inline u32 compute_initial_dda(unsigned int in)
return dfixed_frac(inf);
}
-static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
- const struct tegra_dc_window *window)
+static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+ const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
unsigned long value, flags;
@@ -274,9 +339,11 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
break;
case TEGRA_BO_TILING_MODE_BLOCK:
- DRM_ERROR("hardware doesn't support block linear mode\n");
- spin_unlock_irqrestore(&dc->lock, flags);
- return -EINVAL;
+ /*
+ * No need to handle this here because ->atomic_check
+ * will already have filtered it out.
+ */
+ break;
}
tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
@@ -332,109 +399,245 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
break;
}
- tegra_dc_window_commit(dc, index);
-
spin_unlock_irqrestore(&dc->lock, flags);
-
- return 0;
}
-static int tegra_window_plane_disable(struct drm_plane *plane)
+static void tegra_plane_destroy(struct drm_plane *plane)
{
- struct tegra_dc *dc = to_tegra_dc(plane->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
- unsigned long flags;
- u32 value;
- if (!plane->crtc)
- return 0;
+ drm_plane_cleanup(plane);
+ kfree(p);
+}
- spin_lock_irqsave(&dc->lock, flags);
+static const u32 tegra_primary_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+};
- value = WINDOW_A_SELECT << p->index;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+static void tegra_primary_plane_destroy(struct drm_plane *plane)
+{
+ tegra_plane_destroy(plane);
+}
- value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
- value &= ~WIN_ENABLE;
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+static void tegra_plane_reset(struct drm_plane *plane)
+{
+ struct tegra_plane_state *state;
- tegra_dc_window_commit(dc, p->index);
+ if (plane->state && plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
- spin_unlock_irqrestore(&dc->lock, flags);
+ kfree(plane->state);
+ plane->state = NULL;
- return 0;
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ plane->state = &state->base;
+ plane->state->plane = plane;
+ }
}
-static void tegra_plane_destroy(struct drm_plane *plane)
+static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
{
- struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
+ struct tegra_plane_state *copy;
- drm_plane_cleanup(plane);
- kfree(p);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ if (copy->base.fb)
+ drm_framebuffer_reference(copy->base.fb);
+
+ return &copy->base;
}
-static const u32 tegra_primary_plane_formats[] = {
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
+static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+
+ kfree(state);
+}
+
+static const struct drm_plane_funcs tegra_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = tegra_primary_plane_destroy,
+ .reset = tegra_plane_reset,
+ .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
+ .atomic_destroy_state = tegra_plane_atomic_destroy_state,
};
-static int tegra_primary_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x,
- int crtc_y, unsigned int crtc_w,
- unsigned int crtc_h, uint32_t src_x,
- uint32_t src_y, uint32_t src_w,
- uint32_t src_h)
+static int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
- struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
- struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_dc *dc = to_tegra_dc(crtc);
- struct tegra_dc_window window;
+ return 0;
+}
+
+static void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+}
+
+static int tegra_plane_state_add(struct tegra_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct tegra_dc_state *tegra;
+
+ /* Propagate errors from allocation or locking failures. */
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ tegra = to_dc_state(crtc_state);
+
+ tegra->planes |= WIN_A_ACT_REQ << plane->index;
+
+ return 0;
+}
+
+static int tegra_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
+ struct tegra_bo_tiling *tiling = &plane_state->tiling;
+ struct tegra_plane *tegra = to_tegra_plane(plane);
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
int err;
- memset(&window, 0, sizeof(window));
- window.src.x = src_x >> 16;
- window.src.y = src_y >> 16;
- window.src.w = src_w >> 16;
- window.src.h = src_h >> 16;
- window.dst.x = crtc_x;
- window.dst.y = crtc_y;
- window.dst.w = crtc_w;
- window.dst.h = crtc_h;
- window.format = tegra_dc_format(fb->pixel_format, &window.swap);
- window.bits_per_pixel = fb->bits_per_pixel;
- window.bottom_up = tegra_fb_is_bottom_up(fb);
+ /* no need for further checks if the plane is being disabled */
+ if (!state->crtc)
+ return 0;
- err = tegra_fb_get_tiling(fb, &window.tiling);
+ err = tegra_dc_format(state->fb->pixel_format, &plane_state->format,
+ &plane_state->swap);
if (err < 0)
return err;
- window.base[0] = bo->paddr + fb->offsets[0];
- window.stride[0] = fb->pitches[0];
+ err = tegra_fb_get_tiling(state->fb, tiling);
+ if (err < 0)
+ return err;
+
+ if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
+ !dc->soc->supports_block_linear) {
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
- err = tegra_dc_setup_window(dc, p->index, &window);
+ /*
+ * Tegra doesn't support different strides for U and V planes so we
+ * error out if the user tries to display a framebuffer with such a
+ * configuration.
+ */
+ if (drm_format_num_planes(state->fb->pixel_format) > 2) {
+ if (state->fb->pitches[2] != state->fb->pitches[1]) {
+ DRM_ERROR("unsupported UV-plane configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ err = tegra_plane_state_add(tegra, state);
if (err < 0)
return err;
return 0;
}
-static void tegra_primary_plane_destroy(struct drm_plane *plane)
+static void tegra_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- tegra_window_plane_disable(plane);
- tegra_plane_destroy(plane);
+ struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
+ struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc_window window;
+ unsigned int i;
+
+ /* rien ne va plus */
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ memset(&window, 0, sizeof(window));
+ window.src.x = plane->state->src_x >> 16;
+ window.src.y = plane->state->src_y >> 16;
+ window.src.w = plane->state->src_w >> 16;
+ window.src.h = plane->state->src_h >> 16;
+ window.dst.x = plane->state->crtc_x;
+ window.dst.y = plane->state->crtc_y;
+ window.dst.w = plane->state->crtc_w;
+ window.dst.h = plane->state->crtc_h;
+ window.bits_per_pixel = fb->bits_per_pixel;
+ window.bottom_up = tegra_fb_is_bottom_up(fb);
+
+ /* copy from state */
+ window.tiling = state->tiling;
+ window.format = state->format;
+ window.swap = state->swap;
+
+ for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
+
+ window.base[i] = bo->paddr + fb->offsets[i];
+ window.stride[i] = fb->pitches[i];
+ }
+
+ tegra_dc_setup_window(dc, p->index, &window);
}
-static const struct drm_plane_funcs tegra_primary_plane_funcs = {
- .update_plane = tegra_primary_plane_update,
- .disable_plane = tegra_window_plane_disable,
- .destroy = tegra_primary_plane_destroy,
+static void tegra_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc;
+ unsigned long flags;
+ u32 value;
+
+ /* rien ne va plus */
+ if (!old_state || !old_state->crtc)
+ return;
+
+ dc = to_tegra_dc(old_state->crtc);
+
+ spin_lock_irqsave(&dc->lock, flags);
+
+ value = WINDOW_A_SELECT << p->index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
+ .atomic_check = tegra_plane_atomic_check,
+ .atomic_update = tegra_plane_atomic_update,
+ .atomic_disable = tegra_plane_atomic_disable,
};
static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
struct tegra_dc *dc)
{
+ /*
+ * Ideally this would use drm_crtc_mask(), but that would require the
+ * CRTC to already be in the mode_config's list of CRTCs. However, it
+ * will only be added to that list in the drm_crtc_init_with_planes()
+ * (in tegra_dc_init()), which in turn requires registration of these
+ * planes. So we have ourselves a nice little chicken and egg problem
+ * here.
+ *
+ * We work around this by manually creating the mask from the number
+ * of CRTCs that have been registered, and should therefore always be
+ * the same as drm_crtc_index() after registration.
+ */
+ unsigned long possible_crtcs = 1 << drm->mode_config.num_crtc;
struct tegra_plane *plane;
unsigned int num_formats;
const u32 *formats;
@@ -447,7 +650,7 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
num_formats = ARRAY_SIZE(tegra_primary_plane_formats);
formats = tegra_primary_plane_formats;
- err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+ err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_primary_plane_funcs, formats,
num_formats, DRM_PLANE_TYPE_PRIMARY);
if (err < 0) {
@@ -455,6 +658,8 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
return ERR_PTR(err);
}
+ drm_plane_helper_add(&plane->base, &tegra_primary_plane_helper_funcs);
+
return &plane->base;
}
@@ -462,27 +667,49 @@ static const u32 tegra_cursor_plane_formats[] = {
DRM_FORMAT_RGBA8888,
};
-static int tegra_cursor_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x,
- int crtc_y, unsigned int crtc_w,
- unsigned int crtc_h, uint32_t src_x,
- uint32_t src_y, uint32_t src_w,
- uint32_t src_h)
+static int tegra_cursor_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
- struct tegra_dc *dc = to_tegra_dc(crtc);
- u32 value = CURSOR_CLIP_DISPLAY;
+ struct tegra_plane *tegra = to_tegra_plane(plane);
+ int err;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!state->crtc)
+ return 0;
/* scaling not supported for cursor */
- if ((src_w >> 16 != crtc_w) || (src_h >> 16 != crtc_h))
+ if ((state->src_w >> 16 != state->crtc_w) ||
+ (state->src_h >> 16 != state->crtc_h))
return -EINVAL;
/* only square cursors supported */
- if (src_w != src_h)
+ if (state->src_w != state->src_h)
return -EINVAL;
- switch (crtc_w) {
+ if (state->crtc_w != 32 && state->crtc_w != 64 &&
+ state->crtc_w != 128 && state->crtc_w != 256)
+ return -EINVAL;
+
+ err = tegra_plane_state_add(tegra, state);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct tegra_bo *bo = tegra_fb_get_plane(plane->state->fb, 0);
+ struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
+ struct drm_plane_state *state = plane->state;
+ u32 value = CURSOR_CLIP_DISPLAY;
+
+ /* rien ne va plus */
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ switch (state->crtc_w) {
case 32:
value |= CURSOR_SIZE_32x32;
break;
@@ -500,7 +727,9 @@ static int tegra_cursor_plane_update(struct drm_plane *plane,
break;
default:
- return -EINVAL;
+ WARN(1, "cursor size %ux%u not supported\n", state->crtc_w,
+ state->crtc_h);
+ return;
}
value |= (bo->paddr >> 10) & 0x3fffff;
@@ -526,38 +755,43 @@ static int tegra_cursor_plane_update(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
/* position the cursor */
- value = (crtc_y & 0x3fff) << 16 | (crtc_x & 0x3fff);
+ value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
- /* apply changes */
- tegra_dc_cursor_commit(dc);
- tegra_dc_commit(dc);
-
- return 0;
}
-static int tegra_cursor_plane_disable(struct drm_plane *plane)
+static void tegra_cursor_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+ struct tegra_dc *dc;
u32 value;
- if (!plane->crtc)
- return 0;
+ /* rien ne va plus */
+ if (!old_state || !old_state->crtc)
+ return;
+
+ dc = to_tegra_dc(old_state->crtc);
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~CURSOR_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_cursor_commit(dc);
- tegra_dc_commit(dc);
-
- return 0;
}
static const struct drm_plane_funcs tegra_cursor_plane_funcs = {
- .update_plane = tegra_cursor_plane_update,
- .disable_plane = tegra_cursor_plane_disable,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = tegra_plane_destroy,
+ .reset = tegra_plane_reset,
+ .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
+ .atomic_destroy_state = tegra_plane_atomic_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
+ .atomic_check = tegra_cursor_atomic_check,
+ .atomic_update = tegra_cursor_atomic_update,
+ .atomic_disable = tegra_cursor_atomic_disable,
};
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
@@ -572,6 +806,13 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
if (!plane)
return ERR_PTR(-ENOMEM);
+ /*
+ * We'll treat the cursor as an overlay plane with index 6 here so
+ * that the update and activation request bits in DC_CMD_STATE_CONTROL
+ * match up.
+ */
+ plane->index = 6;
+
num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
formats = tegra_cursor_plane_formats;
@@ -583,71 +824,23 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
return ERR_PTR(err);
}
- return &plane->base;
-}
-
-static int tegra_overlay_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x,
- int crtc_y, unsigned int crtc_w,
- unsigned int crtc_h, uint32_t src_x,
- uint32_t src_y, uint32_t src_w,
- uint32_t src_h)
-{
- struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_dc *dc = to_tegra_dc(crtc);
- struct tegra_dc_window window;
- unsigned int i;
- int err;
-
- memset(&window, 0, sizeof(window));
- window.src.x = src_x >> 16;
- window.src.y = src_y >> 16;
- window.src.w = src_w >> 16;
- window.src.h = src_h >> 16;
- window.dst.x = crtc_x;
- window.dst.y = crtc_y;
- window.dst.w = crtc_w;
- window.dst.h = crtc_h;
- window.format = tegra_dc_format(fb->pixel_format, &window.swap);
- window.bits_per_pixel = fb->bits_per_pixel;
- window.bottom_up = tegra_fb_is_bottom_up(fb);
-
- err = tegra_fb_get_tiling(fb, &window.tiling);
- if (err < 0)
- return err;
-
- for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
- struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
-
- window.base[i] = bo->paddr + fb->offsets[i];
-
- /*
- * Tegra doesn't support different strides for U and V planes
- * so we display a warning if the user tries to display a
- * framebuffer with such a configuration.
- */
- if (i >= 2) {
- if (fb->pitches[i] != window.stride[1])
- DRM_ERROR("unsupported UV-plane configuration\n");
- } else {
- window.stride[i] = fb->pitches[i];
- }
- }
+ drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
- return tegra_dc_setup_window(dc, p->index, &window);
+ return &plane->base;
}
static void tegra_overlay_plane_destroy(struct drm_plane *plane)
{
- tegra_window_plane_disable(plane);
tegra_plane_destroy(plane);
}
static const struct drm_plane_funcs tegra_overlay_plane_funcs = {
- .update_plane = tegra_overlay_plane_update,
- .disable_plane = tegra_window_plane_disable,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = tegra_overlay_plane_destroy,
+ .reset = tegra_plane_reset,
+ .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
+ .atomic_destroy_state = tegra_plane_atomic_destroy_state,
};
static const uint32_t tegra_overlay_plane_formats[] = {
@@ -660,6 +853,14 @@ static const uint32_t tegra_overlay_plane_formats[] = {
DRM_FORMAT_YUV422,
};
+static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
+ .atomic_check = tegra_plane_atomic_check,
+ .atomic_update = tegra_plane_atomic_update,
+ .atomic_disable = tegra_plane_atomic_disable,
+};
+
static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int index)
@@ -686,6 +887,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
return ERR_PTR(err);
}
+ drm_plane_helper_add(&plane->base, &tegra_overlay_plane_helper_funcs);
+
return &plane->base;
}
@@ -703,99 +906,6 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
return 0;
}
-static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
- struct drm_framebuffer *fb)
-{
- struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
- unsigned int h_offset = 0, v_offset = 0;
- struct tegra_bo_tiling tiling;
- unsigned long value, flags;
- unsigned int format, swap;
- int err;
-
- err = tegra_fb_get_tiling(fb, &tiling);
- if (err < 0)
- return err;
-
- spin_lock_irqsave(&dc->lock, flags);
-
- tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- value = fb->offsets[0] + y * fb->pitches[0] +
- x * fb->bits_per_pixel / 8;
-
- tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
- tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
-
- format = tegra_dc_format(fb->pixel_format, &swap);
- tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
- tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP);
-
- if (dc->soc->supports_block_linear) {
- unsigned long height = tiling.value;
-
- switch (tiling.mode) {
- case TEGRA_BO_TILING_MODE_PITCH:
- value = DC_WINBUF_SURFACE_KIND_PITCH;
- break;
-
- case TEGRA_BO_TILING_MODE_TILED:
- value = DC_WINBUF_SURFACE_KIND_TILED;
- break;
-
- case TEGRA_BO_TILING_MODE_BLOCK:
- value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
- DC_WINBUF_SURFACE_KIND_BLOCK;
- break;
- }
-
- tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
- } else {
- switch (tiling.mode) {
- case TEGRA_BO_TILING_MODE_PITCH:
- value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
- DC_WIN_BUFFER_ADDR_MODE_LINEAR;
- break;
-
- case TEGRA_BO_TILING_MODE_TILED:
- value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
- DC_WIN_BUFFER_ADDR_MODE_TILE;
- break;
-
- case TEGRA_BO_TILING_MODE_BLOCK:
- DRM_ERROR("hardware doesn't support block linear mode\n");
- spin_unlock_irqrestore(&dc->lock, flags);
- return -EINVAL;
- }
-
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
- }
-
- /* make sure bottom-up buffers are properly displayed */
- if (tegra_fb_is_bottom_up(fb)) {
- value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
- value |= V_DIRECTION;
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
-
- v_offset += fb->height - 1;
- } else {
- value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
- value &= ~V_DIRECTION;
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
- }
-
- tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
- tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
-
- value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
- tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
-
- spin_unlock_irqrestore(&dc->lock, flags);
-
- return 0;
-}
-
void tegra_dc_enable_vblank(struct tegra_dc *dc)
{
unsigned long value, flags;
@@ -838,7 +948,7 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
bo = tegra_fb_get_plane(crtc->primary->fb, 0);
- spin_lock_irqsave(&dc->lock, flags);
+ spin_lock(&dc->lock);
/* check if new start address has been latched */
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -846,7 +956,7 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
- spin_unlock_irqrestore(&dc->lock, flags);
+ spin_unlock(&dc->lock);
if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
drm_crtc_send_vblank_event(crtc, dc->event);
@@ -874,64 +984,130 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&drm->event_lock, flags);
}
-static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
+static void tegra_dc_destroy(struct drm_crtc *crtc)
{
- unsigned int pipe = drm_crtc_index(crtc);
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->event)
- return -EBUSY;
+ drm_crtc_cleanup(crtc);
+}
- if (event) {
- event->pipe = pipe;
- dc->event = event;
- drm_crtc_vblank_get(crtc);
- }
+static void tegra_crtc_reset(struct drm_crtc *crtc)
+{
+ struct tegra_dc_state *state;
- tegra_dc_set_base(dc, 0, 0, fb);
- crtc->primary->fb = fb;
+ kfree(crtc->state);
+ crtc->state = NULL;
- return 0;
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ crtc->state = &state->base;
}
-static void drm_crtc_clear(struct drm_crtc *crtc)
+static struct drm_crtc_state *
+tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
- memset(crtc, 0, sizeof(*crtc));
+ struct tegra_dc_state *state = to_dc_state(crtc->state);
+ struct tegra_dc_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ copy->base.mode_changed = false;
+ copy->base.planes_changed = false;
+ copy->base.event = NULL;
+
+ return &copy->base;
}
-static void tegra_dc_destroy(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- drm_crtc_cleanup(crtc);
- drm_crtc_clear(crtc);
+ kfree(state);
}
static const struct drm_crtc_funcs tegra_crtc_funcs = {
- .page_flip = tegra_dc_page_flip,
- .set_config = drm_crtc_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_config = drm_atomic_helper_set_config,
.destroy = tegra_dc_destroy,
+ .reset = tegra_crtc_reset,
+ .atomic_duplicate_state = tegra_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = tegra_crtc_atomic_destroy_state,
};
+static void tegra_dc_stop(struct tegra_dc *dc)
+{
+ u32 value;
+
+ /* stop the display controller */
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_commit(dc);
+}
+
+static bool tegra_dc_idle(struct tegra_dc *dc)
+{
+ u32 value;
+
+ value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
+
+ return (value & DISP_CTRL_MODE_MASK) == 0;
+}
+
+static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
+{
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ if (tegra_dc_idle(dc))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
+ return -ETIMEDOUT;
+}
+
static void tegra_crtc_disable(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
- struct drm_device *drm = crtc->dev;
- struct drm_plane *plane;
+ u32 value;
- drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
- if (plane->crtc == crtc) {
- tegra_window_plane_disable(plane);
- plane->crtc = NULL;
+ if (!tegra_dc_idle(dc)) {
+ tegra_dc_stop(dc);
- if (plane->fb) {
- drm_framebuffer_unreference(plane->fb);
- plane->fb = NULL;
- }
- }
+ /*
+ * Ignore the return value, there isn't anything useful to do
+ * in case this fails.
+ */
+ tegra_dc_wait_idle(dc, 100);
+ }
+
+ /*
+ * This should really be part of the RGB encoder driver, but clearing
+ * these bits has the side-effect of stopping the display controller.
+ * When that happens no VBLANK interrupts will be raised. At the same
+ * time the encoder is disabled before the display controller, so the
+ * above code is always going to timeout waiting for the controller
+ * to go idle.
+ *
+ * Given the close coupling between the RGB encoder and the display
+ * controller doing it here is still kind of okay. None of the other
+ * encoder drivers require these bits to be cleared.
+ *
+ * XXX: Perhaps given that the display controller is switched off at
+ * this point anyway maybe clearing these bits isn't even useful for
+ * the RGB encoder?
+ */
+ if (dc->rgb) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
}
drm_crtc_vblank_off(crtc);
- tegra_dc_commit(dc);
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -971,33 +1147,15 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
return 0;
}
-static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent,
+ unsigned long pclk, unsigned int div)
{
- unsigned long pclk = mode->clock * 1000;
- struct tegra_dc *dc = to_tegra_dc(crtc);
- struct tegra_output *output = NULL;
- struct drm_encoder *encoder;
- unsigned int div;
u32 value;
- long err;
-
- list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc) {
- output = encoder_to_output(encoder);
- break;
- }
-
- if (!output)
- return -ENODEV;
+ int err;
- /*
- * This assumes that the parent clock is pll_d_out0 or pll_d2_out
- * respectively, each of which divides the base pll_d by 2.
- */
- err = tegra_output_setup_clock(output, dc->clk, pclk, &div);
+ err = clk_set_parent(dc->clk, parent);
if (err < 0) {
- dev_err(dc->dev, "failed to setup clock: %ld\n", err);
+ dev_err(dc->dev, "failed to set parent clock: %d\n", err);
return err;
}
@@ -1009,26 +1167,69 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
return 0;
}
-static int tegra_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted,
- int x, int y, struct drm_framebuffer *old_fb)
+int tegra_dc_state_setup_clock(struct tegra_dc *dc,
+ struct drm_crtc_state *crtc_state,
+ struct clk *clk, unsigned long pclk,
+ unsigned int div)
+{
+ struct tegra_dc_state *state = to_dc_state(crtc_state);
+
+ state->clk = clk;
+ state->pclk = pclk;
+ state->div = div;
+
+ return 0;
+}
+
+static void tegra_dc_commit_state(struct tegra_dc *dc,
+ struct tegra_dc_state *state)
{
- struct tegra_bo *bo = tegra_fb_get_plane(crtc->primary->fb, 0);
- struct tegra_dc *dc = to_tegra_dc(crtc);
- struct tegra_dc_window window;
u32 value;
int err;
- err = tegra_crtc_setup_clk(crtc, mode);
- if (err) {
- dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
- return err;
+ err = clk_set_parent(dc->clk, state->clk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+
+ /*
+ * Outputs may not want to change the parent clock rate. This is only
+ * relevant to Tegra20 where only a single display PLL is available.
+ * Since that PLL would typically be used for HDMI, an internal LVDS
+ * panel would need to be driven by some other clock such as PLL_P
+ * which is shared with other peripherals. Changing the clock rate
+ * should therefore be avoided.
+ */
+ if (state->pclk > 0) {
+ err = clk_set_rate(state->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev,
+ "failed to set clock rate to %lu Hz\n",
+ state->pclk);
}
+ DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
+ state->div);
+ DRM_DEBUG_KMS("pclk: %lu\n", state->pclk);
+
+ value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+}
+
+static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct tegra_dc_state *state = to_dc_state(crtc->state);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ tegra_dc_commit_state(dc, state);
+
/* program display mode */
tegra_dc_set_timings(dc, mode);
+ if (dc->soc->supports_border_color)
+ tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
/* interlacing isn't supported yet, so disable it */
if (dc->soc->supports_interlacing) {
value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@@ -1036,35 +1237,17 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
}
- /* setup window parameters */
- memset(&window, 0, sizeof(window));
- window.src.x = 0;
- window.src.y = 0;
- window.src.w = mode->hdisplay;
- window.src.h = mode->vdisplay;
- window.dst.x = 0;
- window.dst.y = 0;
- window.dst.w = mode->hdisplay;
- window.dst.h = mode->vdisplay;
- window.format = tegra_dc_format(crtc->primary->fb->pixel_format,
- &window.swap);
- window.bits_per_pixel = crtc->primary->fb->bits_per_pixel;
- window.stride[0] = crtc->primary->fb->pitches[0];
- window.base[0] = bo->paddr;
-
- err = tegra_dc_setup_window(dc, 0, &window);
- if (err < 0)
- dev_err(dc->dev, "failed to enable root plane\n");
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
- return 0;
-}
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- return tegra_dc_set_base(dc, x, y, crtc->primary->fb);
+ tegra_dc_commit(dc);
}
static void tegra_crtc_prepare(struct drm_crtc *crtc)
@@ -1075,10 +1258,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
- /* hardware initialization */
- reset_control_deassert(dc->rst);
- usleep_range(10000, 20000);
-
if (dc->pipe)
syncpt = SYNCPT_VBLANK1;
else
@@ -1113,24 +1292,49 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
static void tegra_crtc_commit(struct drm_crtc *crtc)
{
+ drm_crtc_vblank_on(crtc);
+}
+
+static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return 0;
+}
+
+static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
+{
struct tegra_dc *dc = to_tegra_dc(crtc);
- drm_crtc_vblank_on(crtc);
- tegra_dc_commit(dc);
+ if (crtc->state->event) {
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ dc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
}
-static void tegra_crtc_load_lut(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
{
+ struct tegra_dc_state *state = to_dc_state(crtc->state);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ tegra_dc_writel(dc, state->planes << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, state->planes, DC_CMD_STATE_CONTROL);
}
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
.disable = tegra_crtc_disable,
.mode_fixup = tegra_crtc_mode_fixup,
- .mode_set = tegra_crtc_mode_set,
- .mode_set_base = tegra_crtc_mode_set_base,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_nofb = tegra_crtc_mode_set_nofb,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = tegra_crtc_prepare,
.commit = tegra_crtc_commit,
- .load_lut = tegra_crtc_load_lut,
+ .atomic_check = tegra_crtc_atomic_check,
+ .atomic_begin = tegra_crtc_atomic_begin,
+ .atomic_flush = tegra_crtc_atomic_flush,
};
static irqreturn_t tegra_dc_irq(int irq, void *data)
@@ -1576,6 +1780,7 @@ static const struct host1x_client_ops dc_client_ops = {
};
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
+ .supports_border_color = true,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
@@ -1584,6 +1789,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
+ .supports_border_color = true,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
@@ -1592,6 +1798,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
+ .supports_border_color = true,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
@@ -1600,6 +1807,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
+ .supports_border_color = false,
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index d4f827593dfa..7dd328d77996 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -10,6 +10,9 @@
#include <linux/host1x.h>
#include <linux/iommu.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+
#include "drm.h"
#include "gem.h"
@@ -24,6 +27,92 @@ struct tegra_drm_file {
struct list_head contexts;
};
+static void tegra_atomic_schedule(struct tegra_drm *tegra,
+ struct drm_atomic_state *state)
+{
+ tegra->commit.state = state;
+ schedule_work(&tegra->commit.work);
+}
+
+static void tegra_atomic_complete(struct tegra_drm *tegra,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = tegra->drm;
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one condition: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ drm_atomic_helper_commit_pre_planes(drm, state);
+ drm_atomic_helper_commit_planes(drm, state);
+ drm_atomic_helper_commit_post_planes(drm, state);
+
+ drm_atomic_helper_wait_for_vblanks(drm, state);
+
+ drm_atomic_helper_cleanup_planes(drm, state);
+ drm_atomic_state_free(state);
+}
+
+static void tegra_atomic_work(struct work_struct *work)
+{
+ struct tegra_drm *tegra = container_of(work, struct tegra_drm,
+ commit.work);
+
+ tegra_atomic_complete(tegra, tegra->commit.state);
+}
+
+static int tegra_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state, bool async)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
+
+ err = drm_atomic_helper_prepare_planes(drm, state);
+ if (err)
+ return err;
+
+ /* serialize outstanding asynchronous commits */
+ mutex_lock(&tegra->commit.lock);
+ flush_work(&tegra->commit.work);
+
+ /*
+ * This is the point of no return - everything below never fails except
+ * when the hw goes bonghits. Which means we can commit the new state on
+ * the software side now.
+ */
+
+ drm_atomic_helper_swap_state(drm, state);
+
+ if (async)
+ tegra_atomic_schedule(tegra, state);
+ else
+ tegra_atomic_complete(tegra, state);
+
+ mutex_unlock(&tegra->commit.lock);
+ return 0;
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+ .fb_create = tegra_fb_create,
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+ .output_poll_changed = tegra_fb_output_poll_changed,
+#endif
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = tegra_atomic_commit,
+};
+
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
{
struct host1x_device *device = to_host1x_device(drm->dev);
@@ -36,8 +125,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (iommu_present(&platform_bus_type)) {
tegra->domain = iommu_domain_alloc(&platform_bus_type);
- if (IS_ERR(tegra->domain)) {
- err = PTR_ERR(tegra->domain);
+ if (!tegra->domain) {
+ err = -ENOMEM;
goto free;
}
@@ -47,11 +136,23 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
+
+ mutex_init(&tegra->commit.lock);
+ INIT_WORK(&tegra->commit.work, tegra_atomic_work);
+
drm->dev_private = tegra;
tegra->drm = drm;
drm_mode_config_init(drm);
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
err = tegra_drm_fb_prepare(drm);
if (err < 0)
goto config;
@@ -62,6 +163,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
goto fbdev;
+ drm_mode_config_reset(drm);
+
/*
* We don't use the drm_irq_install() helpers provided by the DRM
* core, so we need to set this manually in order to allow the
@@ -106,8 +209,8 @@ static int tegra_drm_unload(struct drm_device *drm)
drm_kms_helper_poll_fini(drm);
tegra_drm_fb_exit(drm);
- drm_vblank_cleanup(drm);
drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
err = host1x_device_exit(device);
if (err < 0)
@@ -190,7 +293,7 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
if (err < 0)
return err;
- err = get_user(dest->target.offset, &src->cmdbuf.offset);
+ err = get_user(dest->target.offset, &src->target.offset);
if (err < 0)
return err;
@@ -893,6 +996,30 @@ static int host1x_drm_remove(struct host1x_device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int host1x_drm_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_disable(drm);
+
+ return 0;
+}
+
+static int host1x_drm_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_enable(drm);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops host1x_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume)
+};
+
static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra20-dc", },
{ .compatible = "nvidia,tegra20-hdmi", },
@@ -912,7 +1039,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
};
static struct host1x_driver host1x_drm_driver = {
- .name = "drm",
+ .driver = {
+ .name = "drm",
+ .pm = &host1x_drm_pm_ops,
+ },
.probe = host1x_drm_probe,
.remove = host1x_drm_remove,
.subdevs = host1x_drm_subdevs,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 3a3b2e7b5b3f..8cb2dfeaa957 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -50,6 +50,12 @@ struct tegra_drm {
#endif
unsigned int pitch_align;
+
+ struct {
+ struct drm_atomic_state *state;
+ struct work_struct work;
+ struct mutex lock;
+ } commit;
};
struct tegra_drm_client;
@@ -164,45 +170,31 @@ struct tegra_dc_window {
unsigned int h;
} dst;
unsigned int bits_per_pixel;
- unsigned int format;
- unsigned int swap;
unsigned int stride[2];
unsigned long base[3];
bool bottom_up;
struct tegra_bo_tiling tiling;
+ u32 format;
+ u32 swap;
};
/* from dc.c */
void tegra_dc_enable_vblank(struct tegra_dc *dc);
void tegra_dc_disable_vblank(struct tegra_dc *dc);
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
-
-struct tegra_output_ops {
- int (*enable)(struct tegra_output *output);
- int (*disable)(struct tegra_output *output);
- int (*setup_clock)(struct tegra_output *output, struct clk *clk,
- unsigned long pclk, unsigned int *div);
- int (*check_mode)(struct tegra_output *output,
- struct drm_display_mode *mode,
- enum drm_mode_status *status);
- enum drm_connector_status (*detect)(struct tegra_output *output);
-};
-
-enum tegra_output_type {
- TEGRA_OUTPUT_RGB,
- TEGRA_OUTPUT_HDMI,
- TEGRA_OUTPUT_DSI,
- TEGRA_OUTPUT_EDP,
-};
+void tegra_dc_commit(struct tegra_dc *dc);
+int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent,
+ unsigned long pclk, unsigned int div);
+int tegra_dc_state_setup_clock(struct tegra_dc *dc,
+ struct drm_crtc_state *crtc_state,
+ struct clk *clk, unsigned long pclk,
+ unsigned int div);
struct tegra_output {
struct device_node *of_node;
struct device *dev;
- const struct tegra_output_ops *ops;
- enum tegra_output_type type;
-
struct drm_panel *panel;
struct i2c_adapter *ddc;
const struct edid *edid;
@@ -223,42 +215,6 @@ static inline struct tegra_output *connector_to_output(struct drm_connector *c)
return container_of(c, struct tegra_output, connector);
}
-static inline int tegra_output_enable(struct tegra_output *output)
-{
- if (output && output->ops && output->ops->enable)
- return output->ops->enable(output);
-
- return output ? -ENOSYS : -EINVAL;
-}
-
-static inline int tegra_output_disable(struct tegra_output *output)
-{
- if (output && output->ops && output->ops->disable)
- return output->ops->disable(output);
-
- return output ? -ENOSYS : -EINVAL;
-}
-
-static inline int tegra_output_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk,
- unsigned int *div)
-{
- if (output && output->ops && output->ops->setup_clock)
- return output->ops->setup_clock(output, clk, pclk, div);
-
- return output ? -ENOSYS : -EINVAL;
-}
-
-static inline int tegra_output_check_mode(struct tegra_output *output,
- struct drm_display_mode *mode,
- enum drm_mode_status *status)
-{
- if (output && output->ops && output->ops->check_mode)
- return output->ops->check_mode(output, mode, status);
-
- return output ? -ENOSYS : -EINVAL;
-}
-
/* from rgb.c */
int tegra_dc_rgb_probe(struct tegra_dc *dc);
int tegra_dc_rgb_remove(struct tegra_dc *dc);
@@ -267,9 +223,18 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
/* from output.c */
int tegra_output_probe(struct tegra_output *output);
-int tegra_output_remove(struct tegra_output *output);
+void tegra_output_remove(struct tegra_output *output);
int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
-int tegra_output_exit(struct tegra_output *output);
+void tegra_output_exit(struct tegra_output *output);
+
+int tegra_output_connector_get_modes(struct drm_connector *connector);
+struct drm_encoder *
+tegra_output_connector_best_encoder(struct drm_connector *connector);
+enum drm_connector_status
+tegra_output_connector_detect(struct drm_connector *connector, bool force);
+void tegra_output_connector_destroy(struct drm_connector *connector);
+
+void tegra_output_encoder_destroy(struct drm_encoder *encoder);
/* from dpaux.c */
struct tegra_dpaux;
@@ -291,12 +256,16 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling);
+struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+ struct drm_file *file,
+ struct drm_mode_fb_cmd2 *cmd);
int tegra_drm_fb_prepare(struct drm_device *drm);
void tegra_drm_fb_free(struct drm_device *drm);
int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
#ifdef CONFIG_DRM_TEGRA_FBDEV
void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
+void tegra_fb_output_poll_changed(struct drm_device *drm);
#endif
extern struct platform_driver tegra_dc_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 33f67fd601c6..ed970f622903 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -27,6 +28,28 @@
#include "dsi.h"
#include "mipi-phy.h"
+struct tegra_dsi_state {
+ struct drm_connector_state base;
+
+ struct mipi_dphy_timing timing;
+ unsigned long period;
+
+ unsigned int vrefresh;
+ unsigned int lanes;
+ unsigned long pclk;
+ unsigned long bclk;
+
+ enum tegra_dsi_format format;
+ unsigned int mul;
+ unsigned int div;
+};
+
+static inline struct tegra_dsi_state *
+to_dsi_state(struct drm_connector_state *state)
+{
+ return container_of(state, struct tegra_dsi_state, base);
+}
+
struct tegra_dsi {
struct host1x_client client;
struct tegra_output output;
@@ -51,7 +74,6 @@ struct tegra_dsi {
struct mipi_dsi_host host;
struct regulator *vdd;
- bool enabled;
unsigned int video_fifo_depth;
unsigned int host_fifo_depth;
@@ -77,13 +99,17 @@ static inline struct tegra_dsi *to_dsi(struct tegra_output *output)
return container_of(output, struct tegra_dsi, output);
}
-static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi,
- unsigned long reg)
+static struct tegra_dsi_state *tegra_dsi_get_state(struct tegra_dsi *dsi)
+{
+ return to_dsi_state(dsi->output.connector.state);
+}
+
+static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned long reg)
{
return readl(dsi->regs + (reg << 2));
}
-static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value,
+static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value,
unsigned long reg)
{
writel(value, dsi->regs + (reg << 2));
@@ -95,7 +121,7 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
struct tegra_dsi *dsi = node->info_ent->data;
#define DUMP_REG(name) \
- seq_printf(s, "%-32s %#05x %08lx\n", #name, name, \
+ seq_printf(s, "%-32s %#05x %08x\n", #name, name, \
tegra_dsi_readl(dsi, name))
DUMP_REG(DSI_INCR_SYNCPT);
@@ -230,7 +256,7 @@ remove:
return err;
}
-static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
+static void tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
{
drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files),
dsi->minor);
@@ -241,8 +267,6 @@ static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
debugfs_remove(dsi->debugfs);
dsi->debugfs = NULL;
-
- return 0;
}
#define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9))
@@ -338,61 +362,36 @@ static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = {
[11] = 0,
};
-static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
+static void tegra_dsi_set_phy_timing(struct tegra_dsi *dsi,
+ unsigned long period,
+ const struct mipi_dphy_timing *timing)
{
- struct mipi_dphy_timing timing;
- unsigned long value, period;
- long rate;
- int err;
-
- rate = clk_get_rate(dsi->clk);
- if (rate < 0)
- return rate;
-
- period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate * 2);
-
- err = mipi_dphy_timing_get_default(&timing, period);
- if (err < 0)
- return err;
-
- err = mipi_dphy_timing_validate(&timing, period);
- if (err < 0) {
- dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
- return err;
- }
-
- /*
- * The D-PHY timing fields below are expressed in byte-clock cycles,
- * so multiply the period by 8.
- */
- period *= 8;
+ u32 value;
- value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 |
- DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 |
- DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 |
- DSI_TIMING_FIELD(timing.hsprepare, period, 1);
+ value = DSI_TIMING_FIELD(timing->hsexit, period, 1) << 24 |
+ DSI_TIMING_FIELD(timing->hstrail, period, 0) << 16 |
+ DSI_TIMING_FIELD(timing->hszero, period, 3) << 8 |
+ DSI_TIMING_FIELD(timing->hsprepare, period, 1);
tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0);
- value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 |
- DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 |
- DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 |
- DSI_TIMING_FIELD(timing.lpx, period, 1);
+ value = DSI_TIMING_FIELD(timing->clktrail, period, 1) << 24 |
+ DSI_TIMING_FIELD(timing->clkpost, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing->clkzero, period, 1) << 8 |
+ DSI_TIMING_FIELD(timing->lpx, period, 1);
tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1);
- value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 |
- DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 |
+ value = DSI_TIMING_FIELD(timing->clkprepare, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing->clkpre, period, 1) << 8 |
DSI_TIMING_FIELD(0xff * period, period, 0) << 0;
tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2);
- value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 |
- DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 |
- DSI_TIMING_FIELD(timing.tago, period, 1);
+ value = DSI_TIMING_FIELD(timing->taget, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing->tasure, period, 1) << 8 |
+ DSI_TIMING_FIELD(timing->tago, period, 1);
tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
if (dsi->slave)
- return tegra_dsi_set_phy_timing(dsi->slave);
-
- return 0;
+ tegra_dsi_set_phy_timing(dsi->slave, period, timing);
}
static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
@@ -484,14 +483,22 @@ static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi)
return dsi->lanes;
}
-static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
- const struct drm_display_mode *mode)
+static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
+ const struct drm_display_mode *mode)
{
unsigned int hact, hsw, hbp, hfp, i, mul, div;
- enum tegra_dsi_format format;
+ struct tegra_dsi_state *state;
const u32 *pkt_seq;
u32 value;
- int err;
+
+ /* XXX: pass in state into this function? */
+ if (dsi->master)
+ state = tegra_dsi_get_state(dsi->master);
+ else
+ state = tegra_dsi_get_state(dsi);
+
+ mul = state->mul;
+ div = state->div;
if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
@@ -504,15 +511,8 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
pkt_seq = pkt_seq_command_mode;
}
- err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
- if (err < 0)
- return err;
-
- err = tegra_dsi_get_format(dsi->format, &format);
- if (err < 0)
- return err;
-
- value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) |
+ value = DSI_CONTROL_CHANNEL(0) |
+ DSI_CONTROL_FORMAT(state->format) |
DSI_CONTROL_LANES(dsi->lanes - 1) |
DSI_CONTROL_SOURCE(pipe);
tegra_dsi_writel(dsi, value, DSI_CONTROL);
@@ -591,8 +591,8 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
/* set SOL delay */
if (dsi->master || dsi->slave) {
- unsigned int lanes = tegra_dsi_get_lanes(dsi);
unsigned long delay, bclk, bclk_ganged;
+ unsigned int lanes = state->lanes;
/* SOL to valid, valid to FIFO and FIFO write delay */
delay = 4 + 4 + 2;
@@ -612,9 +612,7 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
}
if (dsi->slave) {
- err = tegra_dsi_configure(dsi->slave, pipe, mode);
- if (err < 0)
- return err;
+ tegra_dsi_configure(dsi->slave, pipe, mode);
/*
* TODO: Support modes other than symmetrical left-right
@@ -624,49 +622,6 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2,
mode->hdisplay / 2);
}
-
- return 0;
-}
-
-static int tegra_output_dsi_enable(struct tegra_output *output)
-{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
- const struct drm_display_mode *mode = &dc->base.mode;
- struct tegra_dsi *dsi = to_dsi(output);
- u32 value;
- int err;
-
- if (dsi->enabled)
- return 0;
-
- err = tegra_dsi_configure(dsi, dc->pipe, mode);
- if (err < 0)
- return err;
-
- /* enable display controller */
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= DSI_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- value |= DISP_CTRL_MODE_C_DISPLAY;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- /* enable DSI controller */
- tegra_dsi_enable(dsi);
-
- dsi->enabled = true;
-
- return 0;
}
static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout)
@@ -705,6 +660,29 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
}
+static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
+ unsigned int vrefresh)
+{
+ unsigned int timeout;
+ u32 value;
+
+ /* one frame high-speed transmission timeout */
+ timeout = (bclk / vrefresh) / 512;
+ value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+ /* 2 ms peripheral timeout for panel */
+ timeout = 2 * bclk / 512 * 1000;
+ value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+ value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+ tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+ if (dsi->slave)
+ tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh);
+}
+
static void tegra_dsi_disable(struct tegra_dsi *dsi)
{
u32 value;
@@ -724,15 +702,149 @@ static void tegra_dsi_disable(struct tegra_dsi *dsi)
usleep_range(5000, 10000);
}
-static int tegra_output_dsi_disable(struct tegra_output *output)
+static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value &= ~DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ usleep_range(300, 1000);
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ usleep_range(300, 1000);
+
+ value = tegra_dsi_readl(dsi, DSI_TRIGGER);
+ if (value)
+ tegra_dsi_writel(dsi, 0, DSI_TRIGGER);
+
+ if (dsi->slave)
+ tegra_dsi_soft_reset(dsi->slave);
+}
+
+static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+static void tegra_dsi_connector_reset(struct drm_connector *connector)
+{
+ struct tegra_dsi_state *state;
+
+ kfree(connector->state);
+ connector->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ connector->state = &state->base;
+}
+
+static struct drm_connector_state *
+tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct tegra_dsi_state *state = to_dsi_state(connector->state);
+ struct tegra_dsi_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ return &copy->base;
+}
+
+static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
+ .dpms = tegra_dsi_connector_dpms,
+ .reset = tegra_dsi_connector_reset,
+ .detect = tegra_output_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_output_connector_destroy,
+ .atomic_duplicate_state = tegra_dsi_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static enum drm_mode_status
+tegra_dsi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
+ .get_modes = tegra_output_connector_get_modes,
+ .mode_valid = tegra_dsi_connector_mode_valid,
+ .best_encoder = tegra_output_connector_best_encoder,
+};
+
+static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
+ .destroy = tegra_output_encoder_destroy,
+};
+
+static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_dsi_encoder_commit(struct drm_encoder *encoder)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+}
+
+static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_dsi *dsi = to_dsi(output);
+ struct tegra_dsi_state *state;
+ u32 value;
+
+ state = tegra_dsi_get_state(dsi);
+
+ tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh);
+
+ /*
+ * The D-PHY timing fields are expressed in byte-clock cycles, so
+ * multiply the period by 8.
+ */
+ tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing);
+
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
+ tegra_dsi_configure(dsi, dc->pipe, mode);
+
+ /* enable display controller */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ /* enable DSI controller */
+ tegra_dsi_enable(dsi);
+
+ if (output->panel)
+ drm_panel_enable(output->panel);
+
+ return;
+}
+
+static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_dsi *dsi = to_dsi(output);
- unsigned long value;
+ u32 value;
int err;
- if (!dsi->enabled)
- return 0;
+ if (output->panel)
+ drm_panel_disable(output->panel);
tegra_dsi_video_disable(dsi);
@@ -741,85 +853,78 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
* sure it's only executed when the output is attached to one.
*/
if (dc) {
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~DSI_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ tegra_dc_commit(dc);
}
err = tegra_dsi_wait_idle(dsi, 100);
if (err < 0)
dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
- tegra_dsi_disable(dsi);
+ tegra_dsi_soft_reset(dsi);
- dsi->enabled = false;
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
- return 0;
-}
-
-static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
- unsigned int vrefresh)
-{
- unsigned int timeout;
- u32 value;
-
- /* one frame high-speed transmission timeout */
- timeout = (bclk / vrefresh) / 512;
- value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
- tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
-
- /* 2 ms peripheral timeout for panel */
- timeout = 2 * bclk / 512 * 1000;
- value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
- tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
-
- value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
- tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+ tegra_dsi_disable(dsi);
- if (dsi->slave)
- tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh);
+ return;
}
-static int tegra_output_dsi_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk,
- unsigned int *divp)
+static int
+tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
- struct drm_display_mode *mode = &dc->base.mode;
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dsi_state *state = to_dsi_state(conn_state);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
struct tegra_dsi *dsi = to_dsi(output);
- unsigned int mul, div, vrefresh, lanes;
- unsigned long bclk, plld;
+ unsigned int scdiv;
+ unsigned long plld;
int err;
- lanes = tegra_dsi_get_lanes(dsi);
+ state->pclk = crtc_state->mode.clock * 1000;
+
+ err = tegra_dsi_get_muldiv(dsi->format, &state->mul, &state->div);
+ if (err < 0)
+ return err;
+
+ state->lanes = tegra_dsi_get_lanes(dsi);
- err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+ err = tegra_dsi_get_format(dsi->format, &state->format);
if (err < 0)
return err;
- DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, lanes);
- vrefresh = drm_mode_vrefresh(mode);
- DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh);
+ state->vrefresh = drm_mode_vrefresh(&crtc_state->mode);
/* compute byte clock */
- bclk = (pclk * mul) / (div * lanes);
+ state->bclk = (state->pclk * state->mul) / (state->div * state->lanes);
+
+ DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", state->mul, state->div,
+ state->lanes);
+ DRM_DEBUG_KMS("format: %u, vrefresh: %u\n", state->format,
+ state->vrefresh);
+ DRM_DEBUG_KMS("bclk: %lu\n", state->bclk);
/*
* Compute bit clock and round up to the next MHz.
*/
- plld = DIV_ROUND_UP(bclk * 8, USEC_PER_SEC) * USEC_PER_SEC;
+ plld = DIV_ROUND_UP(state->bclk * 8, USEC_PER_SEC) * USEC_PER_SEC;
+ state->period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, plld);
+
+ err = mipi_dphy_timing_get_default(&state->timing, state->period);
+ if (err < 0)
+ return err;
+
+ err = mipi_dphy_timing_validate(&state->timing, state->period);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
+ return err;
+ }
/*
* We divide the frequency by two here, but we make up for that by
@@ -828,19 +933,6 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output,
*/
plld /= 2;
- err = clk_set_parent(clk, dsi->clk_parent);
- if (err < 0) {
- dev_err(dsi->dev, "failed to set parent clock: %d\n", err);
- return err;
- }
-
- err = clk_set_rate(dsi->clk_parent, plld);
- if (err < 0) {
- dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n",
- plld);
- return err;
- }
-
/*
* Derive pixel clock from bit clock using the shift clock divider.
* Note that this is only half of what we would expect, but we need
@@ -851,44 +943,30 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output,
* not working properly otherwise. Perhaps the PLLs cannot generate
* frequencies sufficiently high.
*/
- *divp = ((8 * mul) / (div * lanes)) - 2;
+ scdiv = ((8 * state->mul) / (state->div * state->lanes)) - 2;
- /*
- * XXX: Move the below somewhere else so that we don't need to have
- * access to the vrefresh in this function?
- */
- tegra_dsi_set_timeout(dsi, bclk, vrefresh);
-
- err = tegra_dsi_set_phy_timing(dsi);
- if (err < 0)
+ err = tegra_dc_state_setup_clock(dc, crtc_state, dsi->clk_parent,
+ plld, scdiv);
+ if (err < 0) {
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
+ }
- return 0;
-}
-
-static int tegra_output_dsi_check_mode(struct tegra_output *output,
- struct drm_display_mode *mode,
- enum drm_mode_status *status)
-{
- /*
- * FIXME: For now, always assume that the mode is okay.
- */
-
- *status = MODE_OK;
-
- return 0;
+ return err;
}
-static const struct tegra_output_ops dsi_ops = {
- .enable = tegra_output_dsi_enable,
- .disable = tegra_output_dsi_disable,
- .setup_clock = tegra_output_dsi_setup_clock,
- .check_mode = tegra_output_dsi_check_mode,
+static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
+ .dpms = tegra_dsi_encoder_dpms,
+ .prepare = tegra_dsi_encoder_prepare,
+ .commit = tegra_dsi_encoder_commit,
+ .mode_set = tegra_dsi_encoder_mode_set,
+ .disable = tegra_dsi_encoder_disable,
+ .atomic_check = tegra_dsi_encoder_atomic_check,
};
static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
{
- unsigned long value;
+ u32 value;
value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
@@ -923,17 +1001,44 @@ static int tegra_dsi_init(struct host1x_client *client)
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
+ reset_control_deassert(dsi->rst);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+ goto reset;
+ }
+
/* Gangsters must not register their own outputs. */
if (!dsi->master) {
- dsi->output.type = TEGRA_OUTPUT_DSI;
dsi->output.dev = client->dev;
- dsi->output.ops = &dsi_ops;
+
+ drm_connector_init(drm, &dsi->output.connector,
+ &tegra_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(&dsi->output.connector,
+ &tegra_dsi_connector_helper_funcs);
+ dsi->output.connector.dpms = DRM_MODE_DPMS_OFF;
+
+ drm_encoder_init(drm, &dsi->output.encoder,
+ &tegra_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI);
+ drm_encoder_helper_add(&dsi->output.encoder,
+ &tegra_dsi_encoder_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&dsi->output.connector,
+ &dsi->output.encoder);
+ drm_connector_register(&dsi->output.connector);
err = tegra_output_init(drm, &dsi->output);
if (err < 0) {
- dev_err(client->dev, "output setup failed: %d\n", err);
- return err;
+ dev_err(client->dev,
+ "failed to initialize output: %d\n",
+ err);
+ goto reset;
}
+
+ dsi->output.encoder.possible_crtcs = 0x3;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
@@ -943,34 +1048,22 @@ static int tegra_dsi_init(struct host1x_client *client)
}
return 0;
+
+reset:
+ reset_control_assert(dsi->rst);
+ return err;
}
static int tegra_dsi_exit(struct host1x_client *client)
{
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
- int err;
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_dsi_debugfs_exit(dsi);
- if (err < 0)
- dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
- }
+ tegra_output_exit(&dsi->output);
- if (!dsi->master) {
- err = tegra_output_disable(&dsi->output);
- if (err < 0) {
- dev_err(client->dev, "output failed to disable: %d\n",
- err);
- return err;
- }
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_dsi_debugfs_exit(dsi);
- err = tegra_output_exit(&dsi->output);
- if (err < 0) {
- dev_err(client->dev, "output cleanup failed: %d\n",
- err);
- return err;
- }
- }
+ reset_control_assert(dsi->rst);
return 0;
}
@@ -1398,13 +1491,6 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->rst))
return PTR_ERR(dsi->rst);
- err = reset_control_deassert(dsi->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to bring DSI out of reset: %d\n",
- err);
- return err;
- }
-
dsi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dsi->clk)) {
dev_err(&pdev->dev, "cannot get DSI clock\n");
@@ -1470,12 +1556,6 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto disable_vdd;
}
- err = tegra_dsi_pad_calibrate(dsi);
- if (err < 0) {
- dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
- goto mipi_free;
- }
-
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -1527,6 +1607,8 @@ static int tegra_dsi_remove(struct platform_device *pdev)
return err;
}
+ tegra_output_remove(&dsi->output);
+
mipi_dsi_host_unregister(&dsi->host);
tegra_mipi_free(dsi->mipi);
@@ -1535,12 +1617,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
clk_disable_unprepare(dsi->clk);
reset_control_assert(dsi->rst);
- err = tegra_output_remove(&dsi->output);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to remove output: %d\n", err);
- return err;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e9c715d89261..397fb34d5d5b 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -129,9 +129,9 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
return fb;
}
-static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
- struct drm_file *file,
- struct drm_mode_fb_cmd2 *cmd)
+struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+ struct drm_file *file,
+ struct drm_mode_fb_cmd2 *cmd)
{
unsigned int hsub, vsub, i;
struct tegra_bo *planes[4];
@@ -377,7 +377,7 @@ void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->base);
}
-static void tegra_fb_output_poll_changed(struct drm_device *drm)
+void tegra_fb_output_poll_changed(struct drm_device *drm)
{
struct tegra_drm *tegra = drm->dev_private;
@@ -386,28 +386,11 @@ static void tegra_fb_output_poll_changed(struct drm_device *drm)
}
#endif
-static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
- .fb_create = tegra_fb_create,
-#ifdef CONFIG_DRM_TEGRA_FBDEV
- .output_poll_changed = tegra_fb_output_poll_changed,
-#endif
-};
-
int tegra_drm_fb_prepare(struct drm_device *drm)
{
#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
-#endif
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
-
- drm->mode_config.funcs = &tegra_drm_mode_funcs;
-
-#ifdef CONFIG_DRM_TEGRA_FBDEV
tegra->fbdev = tegra_fbdev_create(drm);
if (IS_ERR(tegra->fbdev))
return PTR_ERR(tegra->fbdev);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 8777b7f75791..cfb481943b6b 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -92,36 +92,6 @@ static const struct host1x_bo_ops tegra_bo_ops = {
.kunmap = tegra_bo_kunmap,
};
-/*
- * A generic iommu_map_sg() function is being reviewed and will hopefully be
- * merged soon. At that point this function can be dropped in favour of the
- * one provided by the IOMMU API.
- */
-static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents,
- int prot)
-{
- struct scatterlist *s;
- size_t offset = 0;
- unsigned int i;
- int err;
-
- for_each_sg(sg, s, nents, i) {
- phys_addr_t phys = page_to_phys(sg_page(s));
- size_t length = s->offset + s->length;
-
- err = iommu_map(domain, iova + offset, phys, length, prot);
- if (err < 0) {
- iommu_unmap(domain, iova, offset);
- return err;
- }
-
- offset += length;
- }
-
- return offset;
-}
-
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
{
int prot = IOMMU_READ | IOMMU_WRITE;
@@ -144,8 +114,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
bo->paddr = bo->mm->start;
- err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
- bo->sgt->nents, prot);
+ err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->sgt->nents, prot);
if (err < 0) {
dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
goto remove;
@@ -244,10 +214,8 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
for_each_sg(sgt->sgl, s, sgt->nents, i)
sg_dma_address(s) = sg_phys(s);
- if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
- sgt = ERR_PTR(-ENOMEM);
+ if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0)
goto release_sgt;
- }
bo->sgt = sgt;
@@ -256,6 +224,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
release_sgt:
sg_free_table(sgt);
kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return PTR_ERR(sgt);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index ffe26547328d..7e06657ae58b 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -9,10 +9,15 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/gpio.h>
#include <linux/hdmi.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
@@ -31,7 +36,7 @@ struct tegra_hdmi_config {
unsigned int num_tmds;
unsigned long fuse_override_offset;
- unsigned long fuse_override_value;
+ u32 fuse_override_value;
bool has_sor_io_peak_current;
};
@@ -40,7 +45,6 @@ struct tegra_hdmi {
struct host1x_client client;
struct tegra_output output;
struct device *dev;
- bool enabled;
struct regulator *hdmi;
struct regulator *pll;
@@ -85,16 +89,16 @@ enum {
HDA,
};
-static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
- unsigned long reg)
+static inline u32 tegra_hdmi_readl(struct tegra_hdmi *hdmi,
+ unsigned long offset)
{
- return readl(hdmi->regs + (reg << 2));
+ return readl(hdmi->regs + (offset << 2));
}
-static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
- unsigned long reg)
+static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value,
+ unsigned long offset)
{
- writel(val, hdmi->regs + (reg << 2));
+ writel(value, hdmi->regs + (offset << 2));
}
struct tegra_hdmi_audio_config {
@@ -455,8 +459,8 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
unsigned int f = freqs[i];
unsigned int eight_half;
- unsigned long value;
unsigned int delta;
+ u32 value;
if (f > 96000)
delta = 2;
@@ -477,7 +481,7 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
struct device_node *node = hdmi->dev->of_node;
const struct tegra_hdmi_audio_config *config;
unsigned int offset = 0;
- unsigned long value;
+ u32 value;
switch (hdmi->audio_source) {
case HDA:
@@ -571,9 +575,9 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
return 0;
}
-static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size)
+static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
{
- unsigned long value = 0;
+ u32 value = 0;
size_t i;
for (i = size; i > 0; i--)
@@ -587,8 +591,8 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
{
const u8 *ptr = data;
unsigned long offset;
- unsigned long value;
size_t i, j;
+ u32 value;
switch (ptr[0]) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -707,9 +711,9 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
{
struct hdmi_vendor_infoframe frame;
- unsigned long value;
u8 buffer[10];
ssize_t err;
+ u32 value;
if (!hdmi->stereo) {
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
@@ -738,7 +742,7 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
const struct tmds_config *tmds)
{
- unsigned long value;
+ u32 value;
tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
@@ -768,21 +772,78 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
return drm_detect_hdmi_monitor(edid);
}
-static int tegra_output_hdmi_enable(struct tegra_output *output)
+static void tegra_hdmi_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+}
+
+static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
+ .dpms = tegra_hdmi_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = tegra_output_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_output_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static enum drm_mode_status
+tegra_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ unsigned long pclk = mode->clock * 1000;
+ enum drm_mode_status status = MODE_OK;
+ struct clk *parent;
+ long err;
+
+ parent = clk_get_parent(hdmi->clk_parent);
+
+ err = clk_round_rate(parent, pclk * 4);
+ if (err <= 0)
+ status = MODE_NOCLOCK;
+
+ return status;
+}
+
+static const struct drm_connector_helper_funcs
+tegra_hdmi_connector_helper_funcs = {
+ .get_modes = tegra_output_connector_get_modes,
+ .mode_valid = tegra_hdmi_connector_mode_valid,
+ .best_encoder = tegra_output_connector_best_encoder,
+};
+
+static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
+ .destroy = tegra_output_encoder_destroy,
+};
+
+static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
{
unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
- struct drm_display_mode *mode = &dc->base.mode;
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct device_node *node = output->dev->of_node;
struct tegra_hdmi *hdmi = to_hdmi(output);
- struct device_node *node = hdmi->dev->of_node;
unsigned int pulse_start, div82, pclk;
- unsigned long value;
int retries = 1000;
+ u32 value;
int err;
- if (hdmi->enabled)
- return 0;
-
hdmi->dvi = !tegra_output_is_hdmi(output);
pclk = mode->clock * 1000;
@@ -790,32 +851,6 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
- err = regulator_enable(hdmi->pll);
- if (err < 0) {
- dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
- return err;
- }
-
- err = regulator_enable(hdmi->vdd);
- if (err < 0) {
- dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
- return err;
- }
-
- err = clk_set_rate(hdmi->clk, pclk);
- if (err < 0)
- return err;
-
- err = clk_prepare_enable(hdmi->clk);
- if (err < 0) {
- dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- reset_control_assert(hdmi->rst);
- usleep_range(1000, 2000);
- reset_control_deassert(hdmi->rst);
-
/* power up sequence */
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
value &= ~SOR_PLL_PDBG;
@@ -987,123 +1022,57 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
value |= HDMI_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- value |= DISP_CTRL_MODE_C_DISPLAY;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ tegra_dc_commit(dc);
/* TODO: add HDCP support */
-
- hdmi->enabled = true;
-
- return 0;
}
-static int tegra_output_hdmi_disable(struct tegra_output *output)
+static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
- struct tegra_hdmi *hdmi = to_hdmi(output);
- unsigned long value;
-
- if (!hdmi->enabled)
- return 0;
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ u32 value;
/*
* The following accesses registers of the display controller, so make
* sure it's only executed when the output is attached to one.
*/
if (dc) {
- /*
- * XXX: We can't do this here because it causes HDMI to go
- * into an erroneous state with the result that HDMI won't
- * properly work once disabled. See also a similar symptom
- * for the SOR output.
- */
- /*
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
- */
-
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~HDMI_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ tegra_dc_commit(dc);
}
-
- clk_disable_unprepare(hdmi->clk);
- reset_control_assert(hdmi->rst);
- regulator_disable(hdmi->vdd);
- regulator_disable(hdmi->pll);
-
- hdmi->enabled = false;
-
- return 0;
}
-static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk,
- unsigned int *div)
+static int
+tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_hdmi *hdmi = to_hdmi(output);
int err;
- err = clk_set_parent(clk, hdmi->clk_parent);
+ err = tegra_dc_state_setup_clock(dc, crtc_state, hdmi->clk_parent,
+ pclk, 0);
if (err < 0) {
- dev_err(output->dev, "failed to set parent: %d\n", err);
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
}
- err = clk_set_rate(hdmi->clk_parent, pclk);
- if (err < 0)
- dev_err(output->dev, "failed to set clock rate to %lu Hz\n",
- pclk);
-
- *div = 0;
-
- return 0;
-}
-
-static int tegra_output_hdmi_check_mode(struct tegra_output *output,
- struct drm_display_mode *mode,
- enum drm_mode_status *status)
-{
- struct tegra_hdmi *hdmi = to_hdmi(output);
- unsigned long pclk = mode->clock * 1000;
- struct clk *parent;
- long err;
-
- parent = clk_get_parent(hdmi->clk_parent);
-
- err = clk_round_rate(parent, pclk * 4);
- if (err <= 0)
- *status = MODE_NOCLOCK;
- else
- *status = MODE_OK;
-
- return 0;
+ return err;
}
-static const struct tegra_output_ops hdmi_ops = {
- .enable = tegra_output_hdmi_enable,
- .disable = tegra_output_hdmi_disable,
- .setup_clock = tegra_output_hdmi_setup_clock,
- .check_mode = tegra_output_hdmi_check_mode,
+static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
+ .dpms = tegra_hdmi_encoder_dpms,
+ .prepare = tegra_hdmi_encoder_prepare,
+ .commit = tegra_hdmi_encoder_commit,
+ .mode_set = tegra_hdmi_encoder_mode_set,
+ .disable = tegra_hdmi_encoder_disable,
+ .atomic_check = tegra_hdmi_encoder_atomic_check,
};
static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
@@ -1117,8 +1086,8 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
return err;
#define DUMP_REG(name) \
- seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
- tegra_hdmi_readl(hdmi, name))
+ seq_printf(s, "%-56s %#05x %08x\n", #name, name, \
+ tegra_hdmi_readl(hdmi, name))
DUMP_REG(HDMI_CTXSW);
DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
@@ -1330,7 +1299,7 @@ remove:
return err;
}
-static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
+static void tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
{
drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
hdmi->minor);
@@ -1341,8 +1310,6 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
debugfs_remove(hdmi->debugfs);
hdmi->debugfs = NULL;
-
- return 0;
}
static int tegra_hdmi_init(struct host1x_client *client)
@@ -1351,16 +1318,32 @@ static int tegra_hdmi_init(struct host1x_client *client)
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
int err;
- hdmi->output.type = TEGRA_OUTPUT_HDMI;
hdmi->output.dev = client->dev;
- hdmi->output.ops = &hdmi_ops;
+
+ drm_connector_init(drm, &hdmi->output.connector,
+ &tegra_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(&hdmi->output.connector,
+ &tegra_hdmi_connector_helper_funcs);
+ hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
+
+ drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&hdmi->output.encoder,
+ &tegra_hdmi_encoder_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&hdmi->output.connector,
+ &hdmi->output.encoder);
+ drm_connector_register(&hdmi->output.connector);
err = tegra_output_init(drm, &hdmi->output);
if (err < 0) {
- dev_err(client->dev, "output setup failed: %d\n", err);
+ dev_err(client->dev, "failed to initialize output: %d\n", err);
return err;
}
+ hdmi->output.encoder.possible_crtcs = 0x3;
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
if (err < 0)
@@ -1374,34 +1357,44 @@ static int tegra_hdmi_init(struct host1x_client *client)
return err;
}
+ err = regulator_enable(hdmi->pll);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(hdmi->vdd);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ reset_control_deassert(hdmi->rst);
+
return 0;
}
static int tegra_hdmi_exit(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
- int err;
- regulator_disable(hdmi->hdmi);
+ tegra_output_exit(&hdmi->output);
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_hdmi_debugfs_exit(hdmi);
- if (err < 0)
- dev_err(client->dev, "debugfs cleanup failed: %d\n",
- err);
- }
+ clk_disable_unprepare(hdmi->clk);
+ reset_control_assert(hdmi->rst);
- err = tegra_output_disable(&hdmi->output);
- if (err < 0) {
- dev_err(client->dev, "output failed to disable: %d\n", err);
- return err;
- }
+ regulator_disable(hdmi->vdd);
+ regulator_disable(hdmi->pll);
+ regulator_disable(hdmi->hdmi);
- err = tegra_output_exit(&hdmi->output);
- if (err < 0) {
- dev_err(client->dev, "output cleanup failed: %d\n", err);
- return err;
- }
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_hdmi_debugfs_exit(hdmi);
return 0;
}
@@ -1559,11 +1552,7 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
return err;
}
- err = tegra_output_remove(&hdmi->output);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to remove output: %d\n", err);
- return err;
- }
+ tegra_output_remove(&hdmi->output);
clk_disable_unprepare(hdmi->clk_parent);
clk_disable_unprepare(hdmi->clk);
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c
index 486d19d589c8..ba2ae6511957 100644
--- a/drivers/gpu/drm/tegra/mipi-phy.c
+++ b/drivers/gpu/drm/tegra/mipi-phy.c
@@ -12,9 +12,9 @@
#include "mipi-phy.h"
/*
- * Default D-PHY timings based on MIPI D-PHY specification. Derived from
- * the valid ranges specified in Section 5.9 of the D-PHY specification
- * with minor adjustments.
+ * Default D-PHY timings based on MIPI D-PHY specification. Derived from the
+ * valid ranges specified in Section 6.9, Table 14, Page 40 of the D-PHY
+ * specification (v1.2) with minor adjustments.
*/
int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
unsigned long period)
@@ -34,7 +34,20 @@ int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
timing->hszero = 145 + 5 * period;
timing->hssettle = 85 + 6 * period;
timing->hsskip = 40;
- timing->hstrail = max(8 * period, 60 + 4 * period);
+
+ /*
+ * The MIPI D-PHY specification (Section 6.9, v1.2, Table 14, Page 40)
+ * contains this formula as:
+ *
+ * T_HS-TRAIL = max(n * 8 * period, 60 + n * 4 * period)
+ *
+ * where n = 1 for forward-direction HS mode and n = 4 for reverse-
+ * direction HS mode. There's only one setting and this function does
+ * not parameterize on anything other that period, so this code will
+ * assumes that reverse-direction HS mode is supported and uses n = 4.
+ */
+ timing->hstrail = max(4 * 8 * period, 60 + 4 * 4 * period);
+
timing->init = 100000;
timing->lpx = 60;
timing->taget = 5 * timing->lpx;
@@ -46,8 +59,8 @@ int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
}
/*
- * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY,
- * Section 5.9 "Global Operation Timing Parameters".
+ * Validate D-PHY timing according to MIPI D-PHY specification (v1.2, Section
+ * Section 6.9 "Global Operation Timing Parameters").
*/
int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
unsigned long period)
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 6a5c7b81fbc5..37db47975d48 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -9,10 +9,11 @@
#include <linux/of_gpio.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
#include "drm.h"
-static int tegra_connector_get_modes(struct drm_connector *connector)
+int tegra_output_connector_get_modes(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
struct edid *edid = NULL;
@@ -43,43 +44,20 @@ static int tegra_connector_get_modes(struct drm_connector *connector)
return err;
}
-static int tegra_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct tegra_output *output = connector_to_output(connector);
- enum drm_mode_status status = MODE_OK;
- int err;
-
- err = tegra_output_check_mode(output, mode, &status);
- if (err < 0)
- return MODE_ERROR;
-
- return status;
-}
-
-static struct drm_encoder *
-tegra_connector_best_encoder(struct drm_connector *connector)
+struct drm_encoder *
+tegra_output_connector_best_encoder(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
return &output->encoder;
}
-static const struct drm_connector_helper_funcs connector_helper_funcs = {
- .get_modes = tegra_connector_get_modes,
- .mode_valid = tegra_connector_mode_valid,
- .best_encoder = tegra_connector_best_encoder,
-};
-
-static enum drm_connector_status
-tegra_connector_detect(struct drm_connector *connector, bool force)
+enum drm_connector_status
+tegra_output_connector_detect(struct drm_connector *connector, bool force)
{
struct tegra_output *output = connector_to_output(connector);
enum drm_connector_status status = connector_status_unknown;
- if (output->ops->detect)
- return output->ops->detect(output);
-
if (gpio_is_valid(output->hpd_gpio)) {
if (gpio_get_value(output->hpd_gpio) == 0)
status = connector_status_disconnected;
@@ -90,95 +68,22 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
status = connector_status_disconnected;
else
status = connector_status_connected;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
- status = connector_status_connected;
}
return status;
}
-static void drm_connector_clear(struct drm_connector *connector)
-{
- memset(connector, 0, sizeof(*connector));
-}
-
-static void tegra_connector_destroy(struct drm_connector *connector)
+void tegra_output_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- drm_connector_clear(connector);
}
-static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .detect = tegra_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = tegra_connector_destroy,
-};
-
-static void drm_encoder_clear(struct drm_encoder *encoder)
-{
- memset(encoder, 0, sizeof(*encoder));
-}
-
-static void tegra_encoder_destroy(struct drm_encoder *encoder)
+void tegra_output_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
- drm_encoder_clear(encoder);
}
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = tegra_encoder_destroy,
-};
-
-static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct drm_panel *panel = output->panel;
-
- if (mode != DRM_MODE_DPMS_ON) {
- drm_panel_disable(panel);
- tegra_output_disable(output);
- drm_panel_unprepare(panel);
- } else {
- drm_panel_prepare(panel);
- tegra_output_enable(output);
- drm_panel_enable(panel);
- }
-}
-
-static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
-{
- return true;
-}
-
-static void tegra_encoder_prepare(struct drm_encoder *encoder)
-{
- tegra_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tegra_encoder_commit(struct drm_encoder *encoder)
-{
- tegra_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static void tegra_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
-{
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = tegra_encoder_dpms,
- .mode_fixup = tegra_encoder_mode_fixup,
- .prepare = tegra_encoder_prepare,
- .commit = tegra_encoder_commit,
- .mode_set = tegra_encoder_mode_set,
-};
-
static irqreturn_t hpd_irq(int irq, void *data)
{
struct tegra_output *output = data;
@@ -268,7 +173,7 @@ int tegra_output_probe(struct tegra_output *output)
return 0;
}
-int tegra_output_remove(struct tegra_output *output)
+void tegra_output_remove(struct tegra_output *output)
{
if (gpio_is_valid(output->hpd_gpio)) {
free_irq(output->hpd_irq, output);
@@ -277,56 +182,17 @@ int tegra_output_remove(struct tegra_output *output)
if (output->ddc)
put_device(&output->ddc->dev);
-
- return 0;
}
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
- int connector, encoder;
-
- switch (output->type) {
- case TEGRA_OUTPUT_RGB:
- connector = DRM_MODE_CONNECTOR_LVDS;
- encoder = DRM_MODE_ENCODER_LVDS;
- break;
-
- case TEGRA_OUTPUT_HDMI:
- connector = DRM_MODE_CONNECTOR_HDMIA;
- encoder = DRM_MODE_ENCODER_TMDS;
- break;
-
- case TEGRA_OUTPUT_DSI:
- connector = DRM_MODE_CONNECTOR_DSI;
- encoder = DRM_MODE_ENCODER_DSI;
- break;
-
- case TEGRA_OUTPUT_EDP:
- connector = DRM_MODE_CONNECTOR_eDP;
- encoder = DRM_MODE_ENCODER_TMDS;
- break;
-
- default:
- connector = DRM_MODE_CONNECTOR_Unknown;
- encoder = DRM_MODE_ENCODER_NONE;
- break;
- }
-
- drm_connector_init(drm, &output->connector, &connector_funcs,
- connector);
- drm_connector_helper_add(&output->connector, &connector_helper_funcs);
- output->connector.dpms = DRM_MODE_DPMS_OFF;
-
- if (output->panel)
- drm_panel_attach(output->panel, &output->connector);
-
- drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
- drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
-
- drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
- drm_connector_register(&output->connector);
+ int err;
- output->encoder.possible_crtcs = 0x3;
+ if (output->panel) {
+ err = drm_panel_attach(output->panel, &output->connector);
+ if (err < 0)
+ return err;
+ }
/*
* The connector is now registered and ready to receive hotplug events
@@ -338,7 +204,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
return 0;
}
-int tegra_output_exit(struct tegra_output *output)
+void tegra_output_exit(struct tegra_output *output)
{
/*
* The connector is going away, so the interrupt must be disabled to
@@ -349,6 +215,4 @@ int tegra_output_exit(struct tegra_output *output)
if (output->panel)
drm_panel_detach(output->panel);
-
- return 0;
}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index d6af9be48f42..7cd833f5b5b5 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -9,6 +9,9 @@
#include <linux/clk.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_panel.h>
+
#include "drm.h"
#include "dc.h"
@@ -85,13 +88,65 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
tegra_dc_writel(dc, table[i].value, table[i].offset);
}
-static int tegra_output_rgb_enable(struct tegra_output *output)
+static void tegra_rgb_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+}
+
+static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
+ .dpms = tegra_rgb_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = tegra_output_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_output_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static enum drm_mode_status
+tegra_rgb_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /*
+ * FIXME: For now, always assume that the mode is okay. There are
+ * unresolved issues with clk_round_rate(), which doesn't always
+ * reliably report whether a frequency can be set or not.
+ */
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
+ .get_modes = tegra_output_connector_get_modes,
+ .mode_valid = tegra_rgb_connector_mode_valid,
+ .best_encoder = tegra_output_connector_best_encoder,
+};
+
+static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
+ .destroy = tegra_output_encoder_destroy,
+};
+
+static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder)
{
+}
+
+static void tegra_rgb_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
struct tegra_rgb *rgb = to_rgb(output);
- unsigned long value;
+ u32 value;
- if (rgb->enabled)
- return 0;
+ if (output->panel)
+ drm_panel_prepare(output->panel);
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
@@ -113,64 +168,39 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
- value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- value |= DISP_CTRL_MODE_C_DISPLAY;
- tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_commit(rgb->dc);
- value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
- tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-
- tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- rgb->enabled = true;
-
- return 0;
+ if (output->panel)
+ drm_panel_enable(output->panel);
}
-static int tegra_output_rgb_disable(struct tegra_output *output)
+static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
{
+ struct tegra_output *output = encoder_to_output(encoder);
struct tegra_rgb *rgb = to_rgb(output);
- unsigned long value;
-
- if (!rgb->enabled)
- return 0;
-
- value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
- tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-
- value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
- tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ if (output->panel)
+ drm_panel_disable(output->panel);
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ tegra_dc_commit(rgb->dc);
- rgb->enabled = false;
-
- return 0;
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
}
-static int tegra_output_rgb_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk,
- unsigned int *div)
+static int
+tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_rgb *rgb = to_rgb(output);
+ unsigned int div;
int err;
- err = clk_set_parent(clk, rgb->clk_parent);
- if (err < 0) {
- dev_err(output->dev, "failed to set parent: %d\n", err);
- return err;
- }
-
/*
* We may not want to change the frequency of the parent clock, since
* it may be a parent for other peripherals. This is due to the fact
@@ -187,32 +217,26 @@ static int tegra_output_rgb_setup_clock(struct tegra_output *output,
* and hope that the desired frequency can be matched (or at least
* matched sufficiently close that the panel will still work).
*/
+ div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2;
+ pclk = 0;
- *div = ((clk_get_rate(clk) * 2) / pclk) - 2;
-
- return 0;
-}
-
-static int tegra_output_rgb_check_mode(struct tegra_output *output,
- struct drm_display_mode *mode,
- enum drm_mode_status *status)
-{
- /*
- * FIXME: For now, always assume that the mode is okay. There are
- * unresolved issues with clk_round_rate(), which doesn't always
- * reliably report whether a frequency can be set or not.
- */
-
- *status = MODE_OK;
+ err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent,
+ pclk, div);
+ if (err < 0) {
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
+ return err;
+ }
- return 0;
+ return err;
}
-static const struct tegra_output_ops rgb_ops = {
- .enable = tegra_output_rgb_enable,
- .disable = tegra_output_rgb_disable,
- .setup_clock = tegra_output_rgb_setup_clock,
- .check_mode = tegra_output_rgb_check_mode,
+static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
+ .dpms = tegra_rgb_encoder_dpms,
+ .prepare = tegra_rgb_encoder_prepare,
+ .commit = tegra_rgb_encoder_commit,
+ .mode_set = tegra_rgb_encoder_mode_set,
+ .disable = tegra_rgb_encoder_disable,
+ .atomic_check = tegra_rgb_encoder_atomic_check,
};
int tegra_dc_rgb_probe(struct tegra_dc *dc)
@@ -262,64 +286,58 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
int tegra_dc_rgb_remove(struct tegra_dc *dc)
{
- int err;
-
if (!dc->rgb)
return 0;
- err = tegra_output_remove(dc->rgb);
- if (err < 0)
- return err;
+ tegra_output_remove(dc->rgb);
+ dc->rgb = NULL;
return 0;
}
int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
{
- struct tegra_rgb *rgb = to_rgb(dc->rgb);
+ struct tegra_output *output = dc->rgb;
int err;
if (!dc->rgb)
return -ENODEV;
- rgb->output.type = TEGRA_OUTPUT_RGB;
- rgb->output.ops = &rgb_ops;
+ drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(&output->connector,
+ &tegra_rgb_connector_helper_funcs);
+ output->connector.dpms = DRM_MODE_DPMS_OFF;
+
+ drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ drm_encoder_helper_add(&output->encoder,
+ &tegra_rgb_encoder_helper_funcs);
- err = tegra_output_init(dc->base.dev, &rgb->output);
+ drm_mode_connector_attach_encoder(&output->connector,
+ &output->encoder);
+ drm_connector_register(&output->connector);
+
+ err = tegra_output_init(drm, output);
if (err < 0) {
- dev_err(dc->dev, "output setup failed: %d\n", err);
+ dev_err(output->dev, "failed to initialize output: %d\n", err);
return err;
}
/*
- * By default, outputs can be associated with each display controller.
- * RGB outputs are an exception, so we make sure they can be attached
- * to only their parent display controller.
+ * Other outputs can be attached to either display controller. The RGB
+ * outputs are an exception and work only with their parent display
+ * controller.
*/
- rgb->output.encoder.possible_crtcs = drm_crtc_mask(&dc->base);
+ output->encoder.possible_crtcs = drm_crtc_mask(&dc->base);
return 0;
}
int tegra_dc_rgb_exit(struct tegra_dc *dc)
{
- if (dc->rgb) {
- int err;
-
- err = tegra_output_disable(dc->rgb);
- if (err < 0) {
- dev_err(dc->dev, "output failed to disable: %d\n", err);
- return err;
- }
-
- err = tegra_output_exit(dc->rgb);
- if (err < 0) {
- dev_err(dc->dev, "output cleanup failed: %d\n", err);
- return err;
- }
-
- dc->rgb = NULL;
- }
+ if (dc->rgb)
+ tegra_output_exit(dc->rgb);
return 0;
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7829e81f065d..2afe478ded3b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -8,13 +8,16 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
#include "dc.h"
#include "drm.h"
@@ -258,18 +261,8 @@ static int tegra_sor_attach(struct tegra_sor *sor)
static int tegra_sor_wakeup(struct tegra_sor *sor)
{
- struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
unsigned long value, timeout;
- /* enable display controller outputs */
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
timeout = jiffies + msecs_to_jiffies(250);
/* wait for head to wake up */
@@ -482,10 +475,317 @@ static int tegra_sor_calc_config(struct tegra_sor *sor,
return 0;
}
-static int tegra_output_sor_enable(struct tegra_output *output)
+static int tegra_sor_detach(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+
+ /* switch to safe mode */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value &= ~SOR_SUPER_STATE_MODE_NORMAL;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_super_update(sor);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWR);
+ if (value & SOR_PWR_MODE_SAFE)
+ break;
+ }
+
+ if ((value & SOR_PWR_MODE_SAFE) == 0)
+ return -ETIMEDOUT;
+
+ /* go to sleep */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_super_update(sor);
+
+ /* detach */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value &= ~SOR_SUPER_STATE_ATTACHED;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_super_update(sor);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_TEST);
+ if ((value & SOR_TEST_ATTACHED) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_TEST_ATTACHED) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int tegra_sor_power_down(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+ int err;
+
+ value = tegra_sor_readl(sor, SOR_PWR);
+ value &= ~SOR_PWR_NORMAL_STATE_PU;
+ value |= SOR_PWR_TRIGGER;
+ tegra_sor_writel(sor, value, SOR_PWR);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWR);
+ if ((value & SOR_PWR_TRIGGER) == 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_PWR_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ err = clk_set_parent(sor->clk, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+
+ /* stop lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
+ SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value |= SOR_PLL_2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_PLL_0);
+ value |= SOR_PLL_0_POWER_OFF;
+ value |= SOR_PLL_0_VCOPD;
+ tegra_sor_writel(sor, value, SOR_PLL_0);
+
+ value = tegra_sor_readl(sor, SOR_PLL_2);
+ value |= SOR_PLL_2_SEQ_PLLCAPPD;
+ value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL_2);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_sor_crc_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static int tegra_sor_crc_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
+{
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_CRC_A);
+ if (value & SOR_CRC_A_VALID)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
+ size_t size, loff_t *ppos)
+{
+ struct tegra_sor *sor = file->private_data;
+ ssize_t num, err;
+ char buf[10];
+ u32 value;
+
+ mutex_lock(&sor->lock);
+
+ if (!sor->enabled) {
+ err = -EAGAIN;
+ goto unlock;
+ }
+
+ value = tegra_sor_readl(sor, SOR_STATE_1);
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE_1);
+
+ value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
+ value |= SOR_CRC_CNTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_CRC_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_TEST);
+ value &= ~SOR_TEST_CRC_POST_SERIALIZE;
+ tegra_sor_writel(sor, value, SOR_TEST);
+
+ err = tegra_sor_crc_wait(sor, 100);
+ if (err < 0)
+ goto unlock;
+
+ tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
+ value = tegra_sor_readl(sor, SOR_CRC_B);
+
+ num = scnprintf(buf, sizeof(buf), "%08x\n", value);
+
+ err = simple_read_from_buffer(buffer, size, ppos, buf, num);
+
+unlock:
+ mutex_unlock(&sor->lock);
+ return err;
+}
+
+static const struct file_operations tegra_sor_crc_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_sor_crc_open,
+ .read = tegra_sor_crc_read,
+ .release = tegra_sor_crc_release,
+};
+
+static int tegra_sor_debugfs_init(struct tegra_sor *sor,
+ struct drm_minor *minor)
+{
+ struct dentry *entry;
+ int err = 0;
+
+ sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
+ if (!sor->debugfs)
+ return -ENOMEM;
+
+ entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
+ &tegra_sor_crc_fops);
+ if (!entry) {
+ dev_err(sor->dev,
+ "cannot create /sys/kernel/debug/dri/%s/sor/crc\n",
+ minor->debugfs_root->d_name.name);
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ return err;
+
+remove:
+ debugfs_remove(sor->debugfs);
+ sor->debugfs = NULL;
+ return err;
+}
+
+static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
+{
+ debugfs_remove_recursive(sor->debugfs);
+ sor->debugfs = NULL;
+}
+
+static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+static enum drm_connector_status
+tegra_sor_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_sor *sor = to_sor(output);
+
+ if (sor->dpaux)
+ return tegra_dpaux_detect(sor->dpaux);
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs tegra_sor_connector_funcs = {
+ .dpms = tegra_sor_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = tegra_sor_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_output_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int tegra_sor_connector_get_modes(struct drm_connector *connector)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
- struct drm_display_mode *mode = &dc->base.mode;
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_sor *sor = to_sor(output);
+ int err;
+
+ if (sor->dpaux)
+ tegra_dpaux_enable(sor->dpaux);
+
+ err = tegra_output_connector_get_modes(connector);
+
+ if (sor->dpaux)
+ tegra_dpaux_disable(sor->dpaux);
+
+ return err;
+}
+
+static enum drm_mode_status
+tegra_sor_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
+ .get_modes = tegra_sor_connector_get_modes,
+ .mode_valid = tegra_sor_connector_mode_valid,
+ .best_encoder = tegra_output_connector_best_encoder,
+};
+
+static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
+ .destroy = tegra_output_encoder_destroy,
+};
+
+static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static void tegra_sor_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_sor_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_config config;
@@ -505,6 +805,9 @@ static int tegra_output_sor_enable(struct tegra_output *output)
reset_control_deassert(sor->rst);
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
/* FIXME: properly convert to struct drm_dp_aux */
aux = (struct drm_dp_aux *)sor->dpaux;
@@ -800,18 +1103,6 @@ static int tegra_output_sor_enable(struct tegra_output *output)
goto unlock;
}
- /* start display controller in continuous mode */
- value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
- value |= WRITE_MUX;
- tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS);
-
- tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS);
- tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
-
- value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
- value &= ~WRITE_MUX;
- tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS);
-
/*
* configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
* raster, associate with display controller)
@@ -886,11 +1177,13 @@ static int tegra_output_sor_enable(struct tegra_output *output)
goto unlock;
}
+ tegra_sor_update(sor);
+
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value |= SOR_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- tegra_sor_update(sor);
+ tegra_dc_commit(dc);
err = tegra_sor_attach(sor);
if (err < 0) {
@@ -904,145 +1197,31 @@ static int tegra_output_sor_enable(struct tegra_output *output)
goto unlock;
}
+ if (output->panel)
+ drm_panel_enable(output->panel);
+
sor->enabled = true;
unlock:
mutex_unlock(&sor->lock);
- return err;
}
-static int tegra_sor_detach(struct tegra_sor *sor)
+static void tegra_sor_encoder_disable(struct drm_encoder *encoder)
{
- unsigned long value, timeout;
-
- /* switch to safe mode */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
- value &= ~SOR_SUPER_STATE_MODE_NORMAL;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
- tegra_sor_super_update(sor);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_PWR);
- if (value & SOR_PWR_MODE_SAFE)
- break;
- }
-
- if ((value & SOR_PWR_MODE_SAFE) == 0)
- return -ETIMEDOUT;
-
- /* go to sleep */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
- value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
- tegra_sor_super_update(sor);
-
- /* detach */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
- value &= ~SOR_SUPER_STATE_ATTACHED;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
- tegra_sor_super_update(sor);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_TEST);
- if ((value & SOR_TEST_ATTACHED) == 0)
- break;
-
- usleep_range(25, 100);
- }
-
- if ((value & SOR_TEST_ATTACHED) != 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int tegra_sor_power_down(struct tegra_sor *sor)
-{
- unsigned long value, timeout;
- int err;
-
- value = tegra_sor_readl(sor, SOR_PWR);
- value &= ~SOR_PWR_NORMAL_STATE_PU;
- value |= SOR_PWR_TRIGGER;
- tegra_sor_writel(sor, value, SOR_PWR);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_PWR);
- if ((value & SOR_PWR_TRIGGER) == 0)
- return 0;
-
- usleep_range(25, 100);
- }
-
- if ((value & SOR_PWR_TRIGGER) != 0)
- return -ETIMEDOUT;
-
- err = clk_set_parent(sor->clk, sor->clk_safe);
- if (err < 0)
- dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
-
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
- SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
-
- /* stop lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
- SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(25, 100);
- }
-
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
- return -ETIMEDOUT;
-
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
-
- usleep_range(20, 100);
-
- value = tegra_sor_readl(sor, SOR_PLL_0);
- value |= SOR_PLL_0_POWER_OFF;
- value |= SOR_PLL_0_VCOPD;
- tegra_sor_writel(sor, value, SOR_PLL_0);
-
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_SEQ_PLLCAPPD;
- value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, SOR_PLL_2);
-
- usleep_range(20, 100);
-
- return 0;
-}
-
-static int tegra_output_sor_disable(struct tegra_output *output)
-{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor *sor = to_sor(output);
- unsigned long value;
- int err = 0;
+ u32 value;
+ int err;
mutex_lock(&sor->lock);
if (!sor->enabled)
goto unlock;
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
err = tegra_sor_detach(sor);
if (err < 0) {
dev_err(sor->dev, "failed to detach SOR: %d\n", err);
@@ -1057,31 +1236,11 @@ static int tegra_output_sor_disable(struct tegra_output *output)
* sure it's only executed when the output is attached to one.
*/
if (dc) {
- /*
- * XXX: We can't do this here because it causes the SOR to go
- * into an erroneous state and the output will look scrambled
- * the next time it is enabled. Presumably this is because we
- * should be doing this only on the next VBLANK. A possible
- * solution would be to queue a "power-off" event to trigger
- * this code to be run during the next VBLANK.
- */
- /*
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
- */
-
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~SOR_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ tegra_dc_commit(dc);
}
err = tegra_sor_power_down(sor);
@@ -1104,187 +1263,48 @@ static int tegra_output_sor_disable(struct tegra_output *output)
goto unlock;
}
- reset_control_assert(sor->rst);
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
clk_disable_unprepare(sor->clk);
+ reset_control_assert(sor->rst);
sor->enabled = false;
unlock:
mutex_unlock(&sor->lock);
- return err;
}
-static int tegra_output_sor_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk,
- unsigned int *div)
+static int
+tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_sor *sor = to_sor(output);
int err;
- err = clk_set_parent(clk, sor->clk_parent);
- if (err < 0) {
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
- return err;
- }
-
- err = clk_set_rate(sor->clk_parent, pclk);
+ err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
+ pclk, 0);
if (err < 0) {
- dev_err(sor->dev, "failed to set clock rate to %lu Hz\n", pclk);
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
}
- *div = 0;
-
return 0;
}
-static int tegra_output_sor_check_mode(struct tegra_output *output,
- struct drm_display_mode *mode,
- enum drm_mode_status *status)
-{
- /*
- * FIXME: For now, always assume that the mode is okay.
- */
-
- *status = MODE_OK;
-
- return 0;
-}
-
-static enum drm_connector_status
-tegra_output_sor_detect(struct tegra_output *output)
-{
- struct tegra_sor *sor = to_sor(output);
-
- if (sor->dpaux)
- return tegra_dpaux_detect(sor->dpaux);
-
- return connector_status_unknown;
-}
-
-static const struct tegra_output_ops sor_ops = {
- .enable = tegra_output_sor_enable,
- .disable = tegra_output_sor_disable,
- .setup_clock = tegra_output_sor_setup_clock,
- .check_mode = tegra_output_sor_check_mode,
- .detect = tegra_output_sor_detect,
+static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = {
+ .dpms = tegra_sor_encoder_dpms,
+ .prepare = tegra_sor_encoder_prepare,
+ .commit = tegra_sor_encoder_commit,
+ .mode_set = tegra_sor_encoder_mode_set,
+ .disable = tegra_sor_encoder_disable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
};
-static int tegra_sor_crc_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
-
- return 0;
-}
-
-static int tegra_sor_crc_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
-{
- u32 value;
-
- timeout = jiffies + msecs_to_jiffies(timeout);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_CRC_A);
- if (value & SOR_CRC_A_VALID)
- return 0;
-
- usleep_range(100, 200);
- }
-
- return -ETIMEDOUT;
-}
-
-static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
- size_t size, loff_t *ppos)
-{
- struct tegra_sor *sor = file->private_data;
- ssize_t num, err;
- char buf[10];
- u32 value;
-
- mutex_lock(&sor->lock);
-
- if (!sor->enabled) {
- err = -EAGAIN;
- goto unlock;
- }
-
- value = tegra_sor_readl(sor, SOR_STATE_1);
- value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
- tegra_sor_writel(sor, value, SOR_STATE_1);
-
- value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
- value |= SOR_CRC_CNTRL_ENABLE;
- tegra_sor_writel(sor, value, SOR_CRC_CNTRL);
-
- value = tegra_sor_readl(sor, SOR_TEST);
- value &= ~SOR_TEST_CRC_POST_SERIALIZE;
- tegra_sor_writel(sor, value, SOR_TEST);
-
- err = tegra_sor_crc_wait(sor, 100);
- if (err < 0)
- goto unlock;
-
- tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
- value = tegra_sor_readl(sor, SOR_CRC_B);
-
- num = scnprintf(buf, sizeof(buf), "%08x\n", value);
-
- err = simple_read_from_buffer(buffer, size, ppos, buf, num);
-
-unlock:
- mutex_unlock(&sor->lock);
- return err;
-}
-
-static const struct file_operations tegra_sor_crc_fops = {
- .owner = THIS_MODULE,
- .open = tegra_sor_crc_open,
- .read = tegra_sor_crc_read,
- .release = tegra_sor_crc_release,
-};
-
-static int tegra_sor_debugfs_init(struct tegra_sor *sor,
- struct drm_minor *minor)
-{
- struct dentry *entry;
- int err = 0;
-
- sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
- if (!sor->debugfs)
- return -ENOMEM;
-
- entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
- &tegra_sor_crc_fops);
- if (!entry) {
- dev_err(sor->dev,
- "cannot create /sys/kernel/debug/dri/%s/sor/crc\n",
- minor->debugfs_root->d_name.name);
- err = -ENOMEM;
- goto remove;
- }
-
- return err;
-
-remove:
- debugfs_remove(sor->debugfs);
- sor->debugfs = NULL;
- return err;
-}
-
-static int tegra_sor_debugfs_exit(struct tegra_sor *sor)
-{
- debugfs_remove_recursive(sor->debugfs);
- sor->debugfs = NULL;
-
- return 0;
-}
-
static int tegra_sor_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -1294,17 +1314,32 @@ static int tegra_sor_init(struct host1x_client *client)
if (!sor->dpaux)
return -ENODEV;
- sor->output.type = TEGRA_OUTPUT_EDP;
-
sor->output.dev = sor->dev;
- sor->output.ops = &sor_ops;
+
+ drm_connector_init(drm, &sor->output.connector,
+ &tegra_sor_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ drm_connector_helper_add(&sor->output.connector,
+ &tegra_sor_connector_helper_funcs);
+ sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
+
+ drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&sor->output.encoder,
+ &tegra_sor_encoder_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&sor->output.connector,
+ &sor->output.encoder);
+ drm_connector_register(&sor->output.connector);
err = tegra_output_init(drm, &sor->output);
if (err < 0) {
- dev_err(sor->dev, "output setup failed: %d\n", err);
+ dev_err(client->dev, "failed to initialize output: %d\n", err);
return err;
}
+ sor->output.encoder.possible_crtcs = 0x3;
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_sor_debugfs_init(sor, drm->primary);
if (err < 0)
@@ -1319,6 +1354,20 @@ static int tegra_sor_init(struct host1x_client *client)
}
}
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(sor->clk_safe);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(sor->clk_dp);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -1327,11 +1376,7 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
- err = tegra_output_disable(&sor->output);
- if (err < 0) {
- dev_err(sor->dev, "output failed to disable: %d\n", err);
- return err;
- }
+ tegra_output_exit(&sor->output);
if (sor->dpaux) {
err = tegra_dpaux_detach(sor->dpaux);
@@ -1341,17 +1386,12 @@ static int tegra_sor_exit(struct host1x_client *client)
}
}
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_sor_debugfs_exit(sor);
- if (err < 0)
- dev_err(sor->dev, "debugfs cleanup failed: %d\n", err);
- }
+ clk_disable_unprepare(sor->clk_safe);
+ clk_disable_unprepare(sor->clk_dp);
+ clk_disable_unprepare(sor->clk);
- err = tegra_output_exit(&sor->output);
- if (err < 0) {
- dev_err(sor->dev, "output cleanup failed: %d\n", err);
- return err;
- }
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_sor_debugfs_exit(sor);
return 0;
}
@@ -1404,26 +1444,14 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (IS_ERR(sor->clk_parent))
return PTR_ERR(sor->clk_parent);
- err = clk_prepare_enable(sor->clk_parent);
- if (err < 0)
- return err;
-
sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
if (IS_ERR(sor->clk_safe))
return PTR_ERR(sor->clk_safe);
- err = clk_prepare_enable(sor->clk_safe);
- if (err < 0)
- return err;
-
sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(sor->clk_dp))
return PTR_ERR(sor->clk_dp);
- err = clk_prepare_enable(sor->clk_dp);
- if (err < 0)
- return err;
-
INIT_LIST_HEAD(&sor->client.list);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
@@ -1454,10 +1482,7 @@ static int tegra_sor_remove(struct platform_device *pdev)
return err;
}
- clk_disable_unprepare(sor->clk_parent);
- clk_disable_unprepare(sor->clk_safe);
- clk_disable_unprepare(sor->clk_dp);
- clk_disable_unprepare(sor->clk);
+ tegra_output_remove(&sor->output);
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 7c3ef79fcb37..8394a0b3993e 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -1,6 +1,6 @@
config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
- depends on DRM && OF && ARM
+ depends on DRM && OF && ARM && HAVE_DMA_ATTRS
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 8cbcb4589bd3..5fc16cecd3ba 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -589,19 +589,27 @@ int udl_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &ufbdev->helper,
1, 1);
- if (ret) {
- kfree(ufbdev);
- return ret;
-
- }
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
+ ret = drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&ufbdev->helper);
+free:
+ kfree(ufbdev);
+ return ret;
}
void udl_fbdev_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 1701f1dfb23f..677190a65e82 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -340,11 +340,11 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
wrptr = udl_dummy_render(wrptr);
- ufb->active_16 = true;
if (old_fb) {
struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
uold_fb->active_16 = false;
}
+ ufb->active_16 = true;
udl->mode_buf_len = wrptr - buf;
/* damage all of it */
@@ -373,6 +373,13 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
unsigned long flags;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ if (old_fb) {
+ struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
+ uold_fb->active_16 = false;
+ }
+ ufb->active_16 = true;
+
udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
spin_lock_irqsave(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index f343db73e095..917dcb978c2c 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -82,12 +82,14 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
((pixel >> 8) & 0xf800));
}
-static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp)
+static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
{
+ u16 pixel_val16 = 0;
if (bpp == 2)
- return *(const uint16_t *)pixel == repeat;
- else
- return *(const uint32_t *)pixel == repeat;
+ pixel_val16 = *(const uint16_t *)pixel;
+ else if (bpp == 4)
+ pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
+ return pixel_val16;
}
/*
@@ -134,6 +136,7 @@ static void udl_compress_hline16(
uint8_t *cmd_pixels_count_byte = NULL;
const u8 *raw_pixel_start = NULL;
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
+ uint16_t pixel_val16;
prefetchw((void *) cmd); /* pull in one cache line at least */
@@ -154,33 +157,29 @@ static void udl_compress_hline16(
(int)(cmd_buffer_end - cmd) / 2))) * bpp;
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
+ pixel_val16 = get_pixel_val16(pixel, bpp);
while (pixel < cmd_pixel_end) {
const u8 *const start = pixel;
- u32 repeating_pixel;
-
- if (bpp == 2) {
- repeating_pixel = *(uint16_t *)pixel;
- *(uint16_t *)cmd = cpu_to_be16(repeating_pixel);
- } else {
- repeating_pixel = *(uint32_t *)pixel;
- *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel));
- }
+ const uint16_t repeating_pixel_val16 = pixel_val16;
+
+ *(uint16_t *)cmd = cpu_to_be16(pixel_val16);
cmd += 2;
pixel += bpp;
- if (unlikely((pixel < cmd_pixel_end) &&
- (pixel_repeats(pixel, repeating_pixel, bpp)))) {
+ while (pixel < cmd_pixel_end) {
+ pixel_val16 = get_pixel_val16(pixel, bpp);
+ if (pixel_val16 != repeating_pixel_val16)
+ break;
+ pixel += bpp;
+ }
+
+ if (unlikely(pixel > start + bpp)) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = (((start -
raw_pixel_start) / bpp) + 1) & 0xFF;
- while ((pixel < cmd_pixel_end) &&
- (pixel_repeats(pixel, repeating_pixel, bpp))) {
- pixel += bpp;
- }
-
/* immediately after raw data is repeat byte */
*cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b5d22110f25..6c6b655defcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
} else if (unhide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
}
mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
- else if (hide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
+ else if (hide_svga)
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
- }
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
- mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
+ spin_lock_init(&dev_priv->hw_lock);
+ spin_lock_init(&dev_priv->waiter_lock);
+ spin_lock_init(&dev_priv->cap_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->enable_fb = enable_fbdev;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) {
ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
- mutex_unlock(&dev_priv->hw_mutex);
goto out_err0;
}
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem = dev_priv->vram_size;
ret = vmw_dma_masks(dev_priv);
- if (unlikely(ret != 0)) {
- mutex_unlock(&dev_priv->hw_mutex);
+ if (unlikely(ret != 0))
goto out_err0;
- }
/*
* Limit back buffer size to VRAM size. Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->prim_bb_mem > dev_priv->vram_size)
dev_priv->prim_bb_mem = dev_priv->vram_size;
- mutex_unlock(&dev_priv->hw_mutex);
-
vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
- mutex_unlock(&dev_priv->hw_mutex);
}
if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
return ret;
}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
- mutex_unlock(&dev_priv->hw_mutex);
/**
* Reclaim 3d reference held by fbdev and potentially
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
uint32_t memory_size;
bool has_gmr;
bool has_mob;
- struct mutex hw_mutex;
+ spinlock_t hw_lock;
+ spinlock_t cap_lock;
/*
* VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- int fence_queue_waiters; /* Protected by hw_mutex */
- int goal_queue_waiters; /* Protected by hw_mutex */
+ spinlock_t waiter_lock;
+ int fence_queue_waiters; /* Protected by waiter_lock */
+ int goal_queue_waiters; /* Protected by waiter_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
return (struct vmw_master *) master->driver_priv;
}
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- uint32_t val;
+ unsigned long irq_flags;
+ u32 val;
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
return val;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b7594cb758af..945f1e0dad92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work, ping_work;
+ struct work_struct work;
u32 user_fence_size;
u32 fence_size;
u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
return "svga";
}
-static void vmw_fence_ping_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, ping_work);
-
- vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
-}
-
static bool vmw_fence_enable_signaling(struct fence *f)
{
struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
- if (mutex_trylock(&dev_priv->hw_mutex)) {
- vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
- mutex_unlock(&dev_priv->hw_mutex);
- } else
- schedule_work(&fman->ping_work);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
return true;
}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->fence_list);
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
- INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- (void) cancel_work_sync(&fman->ping_work);
spin_lock_irqsave(&fman->lock, irq_flags);
lists_empty = list_empty(&fman->fence_list) &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 09e10aefcd8e..39f2b03888e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!dev_priv->has_mob)
return false;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return (result != 0);
}
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
- mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
- mutex_unlock(&dev_priv->hw_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
return vmw_fifo_send_fence(dev_priv, &dummy);
}
-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ static DEFINE_SPINLOCK(ping_lock);
+ unsigned long irq_flags;
+ /*
+ * The ping_lock is needed because we don't have an atomic
+ * test-and-set of the SVGA_FIFO_BUSY register.
+ */
+ spin_lock_irqsave(&ping_lock, irq_flags);
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
-}
-
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
-{
- mutex_lock(&dev_priv->hw_mutex);
-
- vmw_fifo_ping_host_locked(dev_priv, reason);
-
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock_irqrestore(&ping_lock, irq_flags);
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
- mutex_unlock(&dev_priv->hw_mutex);
vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
if (interruptible)
ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
else if (likely(ret > 0))
ret = 0;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 37881ecf5d7a..69c8ce23123c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return 0;
}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (num > SVGA3D_DEVCAP_MAX)
num = SVGA3D_DEVCAP_MAX;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
} else if (gb_objects) {
ret = vmw_fill_compat_cap(dev_priv, bounce, size);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c423766c441..9fe9827ee499 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
- uint32_t busy;
- mutex_lock(&dev_priv->hw_mutex);
- busy = vmw_read(dev_priv, SVGA_REG_BUSY);
- mutex_unlock(&dev_priv->hw_mutex);
-
- return (busy == 0);
+ return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags;
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
- mutex_unlock(&dev_priv->hw_mutex);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3725b521d931..8725b79e7847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
- mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
- mutex_unlock(&dev_priv->hw_mutex);
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index aaf54859adb0..4a99c6416e6a 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -72,13 +72,14 @@ static void host1x_subdev_del(struct host1x_subdev *subdev)
/**
* host1x_device_parse_dt() - scan device tree and add matching subdevices
*/
-static int host1x_device_parse_dt(struct host1x_device *device)
+static int host1x_device_parse_dt(struct host1x_device *device,
+ struct host1x_driver *driver)
{
struct device_node *np;
int err;
for_each_child_of_node(device->dev.parent->of_node, np) {
- if (of_match_node(device->driver->subdevs, np) &&
+ if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
err = host1x_subdev_add(device, np);
if (err < 0)
@@ -109,14 +110,12 @@ static void host1x_subdev_register(struct host1x_device *device,
mutex_unlock(&device->clients_lock);
mutex_unlock(&device->subdevs_lock);
- /*
- * When all subdevices have been registered, the composite device is
- * ready to be probed.
- */
if (list_empty(&device->subdevs)) {
- err = device->driver->probe(device);
+ err = device_add(&device->dev);
if (err < 0)
- dev_err(&device->dev, "probe failed: %d\n", err);
+ dev_err(&device->dev, "failed to add: %d\n", err);
+ else
+ device->registered = true;
}
}
@@ -124,16 +123,16 @@ static void __host1x_subdev_unregister(struct host1x_device *device,
struct host1x_subdev *subdev)
{
struct host1x_client *client = subdev->client;
- int err;
/*
* If all subdevices have been activated, we're about to remove the
* first active subdevice, so unload the driver first.
*/
if (list_empty(&device->subdevs)) {
- err = device->driver->remove(device);
- if (err < 0)
- dev_err(&device->dev, "remove failed: %d\n", err);
+ if (device->registered) {
+ device->registered = false;
+ device_del(&device->dev);
+ }
}
/*
@@ -260,24 +259,113 @@ static int host1x_del_client(struct host1x *host1x,
return -ENODEV;
}
-static struct bus_type host1x_bus_type = {
- .name = "host1x",
-};
+static int host1x_device_match(struct device *dev, struct device_driver *drv)
+{
+ return strcmp(dev_name(dev), drv->name) == 0;
+}
+
+static int host1x_device_probe(struct device *dev)
+{
+ struct host1x_driver *driver = to_host1x_driver(dev->driver);
+ struct host1x_device *device = to_host1x_device(dev);
+
+ if (driver->probe)
+ return driver->probe(device);
+
+ return 0;
+}
-int host1x_bus_init(void)
+static int host1x_device_remove(struct device *dev)
{
- return bus_register(&host1x_bus_type);
+ struct host1x_driver *driver = to_host1x_driver(dev->driver);
+ struct host1x_device *device = to_host1x_device(dev);
+
+ if (driver->remove)
+ return driver->remove(device);
+
+ return 0;
+}
+
+static void host1x_device_shutdown(struct device *dev)
+{
+ struct host1x_driver *driver = to_host1x_driver(dev->driver);
+ struct host1x_device *device = to_host1x_device(dev);
+
+ if (driver->shutdown)
+ driver->shutdown(device);
}
-void host1x_bus_exit(void)
+static const struct dev_pm_ops host1x_device_pm_ops = {
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+};
+
+struct bus_type host1x_bus_type = {
+ .name = "host1x",
+ .match = host1x_device_match,
+ .probe = host1x_device_probe,
+ .remove = host1x_device_remove,
+ .shutdown = host1x_device_shutdown,
+ .pm = &host1x_device_pm_ops,
+};
+
+static void __host1x_device_del(struct host1x_device *device)
{
- bus_unregister(&host1x_bus_type);
+ struct host1x_subdev *subdev, *sd;
+ struct host1x_client *client, *cl;
+
+ mutex_lock(&device->subdevs_lock);
+
+ /* unregister subdevices */
+ list_for_each_entry_safe(subdev, sd, &device->active, list) {
+ /*
+ * host1x_subdev_unregister() will remove the client from
+ * any lists, so we'll need to manually add it back to the
+ * list of idle clients.
+ *
+ * XXX: Alternatively, perhaps don't remove the client from
+ * any lists in host1x_subdev_unregister() and instead do
+ * that explicitly from host1x_unregister_client()?
+ */
+ client = subdev->client;
+
+ __host1x_subdev_unregister(device, subdev);
+
+ /* add the client to the list of idle clients */
+ mutex_lock(&clients_lock);
+ list_add_tail(&client->list, &clients);
+ mutex_unlock(&clients_lock);
+ }
+
+ /* remove subdevices */
+ list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
+ host1x_subdev_del(subdev);
+
+ mutex_unlock(&device->subdevs_lock);
+
+ /* move clients to idle list */
+ mutex_lock(&clients_lock);
+ mutex_lock(&device->clients_lock);
+
+ list_for_each_entry_safe(client, cl, &device->clients, list)
+ list_move_tail(&client->list, &clients);
+
+ mutex_unlock(&device->clients_lock);
+ mutex_unlock(&clients_lock);
+
+ /* finally remove the device */
+ list_del_init(&device->list);
}
static void host1x_device_release(struct device *dev)
{
struct host1x_device *device = to_host1x_device(dev);
+ __host1x_device_del(device);
kfree(device);
}
@@ -293,6 +381,8 @@ static int host1x_device_add(struct host1x *host1x,
if (!device)
return -ENOMEM;
+ device_initialize(&device->dev);
+
mutex_init(&device->subdevs_lock);
INIT_LIST_HEAD(&device->subdevs);
INIT_LIST_HEAD(&device->active);
@@ -303,24 +393,18 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
+ dev_set_name(&device->dev, "%s", driver->driver.name);
device->dev.release = host1x_device_release;
- dev_set_name(&device->dev, "%s", driver->name);
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
- err = device_register(&device->dev);
- if (err < 0)
- return err;
-
- err = host1x_device_parse_dt(device);
+ err = host1x_device_parse_dt(device, driver);
if (err < 0) {
- device_unregister(&device->dev);
+ kfree(device);
return err;
}
- mutex_lock(&host1x->devices_lock);
list_add_tail(&device->list, &host1x->devices);
- mutex_unlock(&host1x->devices_lock);
mutex_lock(&clients_lock);
@@ -347,51 +431,12 @@ static int host1x_device_add(struct host1x *host1x,
static void host1x_device_del(struct host1x *host1x,
struct host1x_device *device)
{
- struct host1x_subdev *subdev, *sd;
- struct host1x_client *client, *cl;
-
- mutex_lock(&device->subdevs_lock);
-
- /* unregister subdevices */
- list_for_each_entry_safe(subdev, sd, &device->active, list) {
- /*
- * host1x_subdev_unregister() will remove the client from
- * any lists, so we'll need to manually add it back to the
- * list of idle clients.
- *
- * XXX: Alternatively, perhaps don't remove the client from
- * any lists in host1x_subdev_unregister() and instead do
- * that explicitly from host1x_unregister_client()?
- */
- client = subdev->client;
-
- __host1x_subdev_unregister(device, subdev);
-
- /* add the client to the list of idle clients */
- mutex_lock(&clients_lock);
- list_add_tail(&client->list, &clients);
- mutex_unlock(&clients_lock);
+ if (device->registered) {
+ device->registered = false;
+ device_del(&device->dev);
}
- /* remove subdevices */
- list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
- host1x_subdev_del(subdev);
-
- mutex_unlock(&device->subdevs_lock);
-
- /* move clients to idle list */
- mutex_lock(&clients_lock);
- mutex_lock(&device->clients_lock);
-
- list_for_each_entry_safe(client, cl, &device->clients, list)
- list_move_tail(&client->list, &clients);
-
- mutex_unlock(&device->clients_lock);
- mutex_unlock(&clients_lock);
-
- /* finally remove the device */
- list_del_init(&device->list);
- device_unregister(&device->dev);
+ put_device(&device->dev);
}
static void host1x_attach_driver(struct host1x *host1x,
@@ -409,11 +454,11 @@ static void host1x_attach_driver(struct host1x *host1x,
}
}
- mutex_unlock(&host1x->devices_lock);
-
err = host1x_device_add(host1x, driver);
if (err < 0)
dev_err(host1x->dev, "failed to allocate device: %d\n", err);
+
+ mutex_unlock(&host1x->devices_lock);
}
static void host1x_detach_driver(struct host1x *host1x,
@@ -466,7 +511,8 @@ int host1x_unregister(struct host1x *host1x)
return 0;
}
-int host1x_driver_register(struct host1x_driver *driver)
+int host1x_driver_register_full(struct host1x_driver *driver,
+ struct module *owner)
{
struct host1x *host1x;
@@ -483,9 +529,12 @@ int host1x_driver_register(struct host1x_driver *driver)
mutex_unlock(&devices_lock);
- return 0;
+ driver->driver.bus = &host1x_bus_type;
+ driver->driver.owner = owner;
+
+ return driver_register(&driver->driver);
}
-EXPORT_SYMBOL(host1x_driver_register);
+EXPORT_SYMBOL(host1x_driver_register_full);
void host1x_driver_unregister(struct host1x_driver *driver)
{
diff --git a/drivers/gpu/host1x/bus.h b/drivers/gpu/host1x/bus.h
index 4099e99212c8..88fb1c4aac68 100644
--- a/drivers/gpu/host1x/bus.h
+++ b/drivers/gpu/host1x/bus.h
@@ -18,10 +18,10 @@
#ifndef HOST1X_BUS_H
#define HOST1X_BUS_H
+struct bus_type;
struct host1x;
-int host1x_bus_init(void);
-void host1x_bus_exit(void);
+extern struct bus_type host1x_bus_type;
int host1x_register(struct host1x *host1x);
int host1x_unregister(struct host1x *host1x);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 2529908d304b..53d3d1d45b48 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -216,7 +216,7 @@ static int __init tegra_host1x_init(void)
{
int err;
- err = host1x_bus_init();
+ err = bus_register(&host1x_bus_type);
if (err < 0)
return err;
@@ -233,7 +233,7 @@ static int __init tegra_host1x_init(void)
unregister_host1x:
platform_driver_unregister(&tegra_host1x_driver);
unregister_bus:
- host1x_bus_exit();
+ bus_unregister(&host1x_bus_type);
return err;
}
module_init(tegra_host1x_init);
@@ -242,7 +242,7 @@ static void __exit tegra_host1x_exit(void)
{
platform_driver_unregister(&tegra_mipi_driver);
platform_driver_unregister(&tegra_host1x_driver);
- host1x_bus_exit();
+ bus_unregister(&host1x_bus_type);
}
module_exit(tegra_host1x_exit);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index f707d25ae78f..67bab5c36056 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -742,7 +742,7 @@ static struct ipu_devtype ipu_type_imx51 = {
.tpm_ofs = 0x1f060000,
.csi0_ofs = 0x1f030000,
.csi1_ofs = 0x1f038000,
- .ic_ofs = 0x1f020000,
+ .ic_ofs = 0x1e020000,
.disp0_ofs = 0x1e040000,
.disp1_ofs = 0x1e048000,
.dc_tmpl_ofs = 0x1f080000,
@@ -758,7 +758,7 @@ static struct ipu_devtype ipu_type_imx53 = {
.tpm_ofs = 0x07060000,
.csi0_ofs = 0x07030000,
.csi1_ofs = 0x07038000,
- .ic_ofs = 0x07020000,
+ .ic_ofs = 0x06020000,
.disp0_ofs = 0x06040000,
.disp1_ofs = 0x06048000,
.dc_tmpl_ofs = 0x07080000,
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 2326c752d89b..4864f8300797 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -114,6 +114,7 @@ struct ipu_dc_priv {
struct completion comp;
int dc_irq;
int dp_irq;
+ int use_count;
};
static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
@@ -232,7 +233,16 @@ EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
void ipu_dc_enable(struct ipu_soc *ipu)
{
- ipu_module_enable(ipu, IPU_CONF_DC_EN);
+ struct ipu_dc_priv *priv = ipu->dc_priv;
+
+ mutex_lock(&priv->mutex);
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_DC_EN);
+
+ priv->use_count++;
+
+ mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dc_enable);
@@ -267,7 +277,8 @@ static irqreturn_t dc_irq_handler(int irq, void *dev_id)
void ipu_dc_disable_channel(struct ipu_dc *dc)
{
struct ipu_dc_priv *priv = dc->priv;
- int irq, ret;
+ int irq;
+ unsigned long ret;
u32 val;
/* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */
@@ -282,7 +293,7 @@ void ipu_dc_disable_channel(struct ipu_dc *dc)
enable_irq(irq);
ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50));
disable_irq(irq);
- if (ret <= 0) {
+ if (ret == 0) {
dev_warn(priv->dev, "DC stop timeout after 50 ms\n");
val = readl(dc->base + DC_WR_CH_CONF);
@@ -294,7 +305,18 @@ EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
void ipu_dc_disable(struct ipu_soc *ipu)
{
- ipu_module_disable(ipu, IPU_CONF_DC_EN);
+ struct ipu_dc_priv *priv = ipu->dc_priv;
+
+ mutex_lock(&priv->mutex);
+
+ priv->use_count--;
+ if (!priv->use_count)
+ ipu_module_disable(priv->ipu, IPU_CONF_DC_EN);
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dc_disable);
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index c490ba4384fc..b61d6be97602 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -207,10 +207,10 @@ static void ipu_di_sync_config(struct ipu_di *di, struct di_sync_config *config,
static void ipu_di_sync_config_interlaced(struct ipu_di *di,
struct ipu_di_signal_cfg *sig)
{
- u32 h_total = sig->width + sig->h_sync_width +
- sig->h_start_width + sig->h_end_width;
- u32 v_total = sig->height + sig->v_sync_width +
- sig->v_start_width + sig->v_end_width;
+ u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
+ sig->mode.hback_porch + sig->mode.hfront_porch;
+ u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
+ sig->mode.vback_porch + sig->mode.vfront_porch;
u32 reg;
struct di_sync_config cfg[] = {
{
@@ -229,13 +229,13 @@ static void ipu_di_sync_config_interlaced(struct ipu_di *di,
}, {
.run_count = v_total / 2 - 1,
.run_src = DI_SYNC_HSYNC,
- .offset_count = sig->v_start_width,
+ .offset_count = sig->mode.vback_porch,
.offset_src = DI_SYNC_HSYNC,
.repeat_count = 2,
.cnt_clr_src = DI_SYNC_VSYNC,
}, {
.run_src = DI_SYNC_HSYNC,
- .repeat_count = sig->height / 2,
+ .repeat_count = sig->mode.vactive / 2,
.cnt_clr_src = 4,
}, {
.run_count = v_total - 1,
@@ -249,9 +249,9 @@ static void ipu_di_sync_config_interlaced(struct ipu_di *di,
.cnt_clr_src = DI_SYNC_VSYNC,
}, {
.run_src = DI_SYNC_CLK,
- .offset_count = sig->h_start_width,
+ .offset_count = sig->mode.hback_porch,
.offset_src = DI_SYNC_CLK,
- .repeat_count = sig->width,
+ .repeat_count = sig->mode.hactive,
.cnt_clr_src = 5,
}, {
.run_count = v_total - 1,
@@ -277,10 +277,10 @@ static void ipu_di_sync_config_interlaced(struct ipu_di *di,
static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
struct ipu_di_signal_cfg *sig, int div)
{
- u32 h_total = sig->width + sig->h_sync_width + sig->h_start_width +
- sig->h_end_width;
- u32 v_total = sig->height + sig->v_sync_width + sig->v_start_width +
- sig->v_end_width;
+ u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
+ sig->mode.hback_porch + sig->mode.hfront_porch;
+ u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
+ sig->mode.vback_porch + sig->mode.vfront_porch;
struct di_sync_config cfg[] = {
{
/* 1: INT_HSYNC */
@@ -294,27 +294,29 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
.offset_src = DI_SYNC_CLK,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_CLK,
- .cnt_down = sig->h_sync_width * 2,
+ .cnt_down = sig->mode.hsync_len * 2,
} , {
/* PIN3: VSYNC */
.run_count = v_total - 1,
.run_src = DI_SYNC_INT_HSYNC,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
- .cnt_down = sig->v_sync_width * 2,
+ .cnt_down = sig->mode.vsync_len * 2,
} , {
/* 4: Line Active */
.run_src = DI_SYNC_HSYNC,
- .offset_count = sig->v_sync_width + sig->v_start_width,
+ .offset_count = sig->mode.vsync_len +
+ sig->mode.vback_porch,
.offset_src = DI_SYNC_HSYNC,
- .repeat_count = sig->height,
+ .repeat_count = sig->mode.vactive,
.cnt_clr_src = DI_SYNC_VSYNC,
} , {
/* 5: Pixel Active, referenced by DC */
.run_src = DI_SYNC_CLK,
- .offset_count = sig->h_sync_width + sig->h_start_width,
+ .offset_count = sig->mode.hsync_len +
+ sig->mode.hback_porch,
.offset_src = DI_SYNC_CLK,
- .repeat_count = sig->width,
+ .repeat_count = sig->mode.hactive,
.cnt_clr_src = 5, /* Line Active */
} , {
/* unused */
@@ -339,9 +341,10 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
} , {
/* 3: Line Active */
.run_src = DI_SYNC_INT_HSYNC,
- .offset_count = sig->v_sync_width + sig->v_start_width,
+ .offset_count = sig->mode.vsync_len +
+ sig->mode.vback_porch,
.offset_src = DI_SYNC_INT_HSYNC,
- .repeat_count = sig->height,
+ .repeat_count = sig->mode.vactive,
.cnt_clr_src = 3 /* VSYNC */,
} , {
/* PIN4: HSYNC for VGA via TVEv2 on TQ MBa53 */
@@ -351,13 +354,14 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
.offset_src = DI_SYNC_CLK,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_CLK,
- .cnt_down = sig->h_sync_width * 2,
+ .cnt_down = sig->mode.hsync_len * 2,
} , {
/* 5: Pixel Active signal to DC */
.run_src = DI_SYNC_CLK,
- .offset_count = sig->h_sync_width + sig->h_start_width,
+ .offset_count = sig->mode.hsync_len +
+ sig->mode.hback_porch,
.offset_src = DI_SYNC_CLK,
- .repeat_count = sig->width,
+ .repeat_count = sig->mode.hactive,
.cnt_clr_src = 4, /* Line Active */
} , {
/* PIN6: VSYNC for VGA via TVEv2 on TQ MBa53 */
@@ -367,7 +371,7 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
.offset_src = DI_SYNC_INT_HSYNC,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
- .cnt_down = sig->v_sync_width * 2,
+ .cnt_down = sig->mode.vsync_len * 2,
} , {
/* PIN4: HSYNC for VGA via TVEv2 on i.MX53-QSB */
.run_count = h_total - 1,
@@ -376,7 +380,7 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
.offset_src = DI_SYNC_CLK,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_CLK,
- .cnt_down = sig->h_sync_width * 2,
+ .cnt_down = sig->mode.hsync_len * 2,
} , {
/* PIN6: VSYNC for VGA via TVEv2 on i.MX53-QSB */
.run_count = v_total - 1,
@@ -385,7 +389,7 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
.offset_src = DI_SYNC_INT_HSYNC,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
- .cnt_down = sig->v_sync_width * 2,
+ .cnt_down = sig->mode.vsync_len * 2,
} , {
/* unused */
},
@@ -433,10 +437,10 @@ static void ipu_di_config_clock(struct ipu_di *di,
unsigned long in_rate;
unsigned div;
- clk_set_rate(clk, sig->pixelclock);
+ clk_set_rate(clk, sig->mode.pixelclock);
in_rate = clk_get_rate(clk);
- div = (in_rate + sig->pixelclock / 2) / sig->pixelclock;
+ div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
if (div == 0)
div = 1;
@@ -454,10 +458,10 @@ static void ipu_di_config_clock(struct ipu_di *di,
unsigned div, error;
clkrate = clk_get_rate(di->clk_ipu);
- div = (clkrate + sig->pixelclock / 2) / sig->pixelclock;
+ div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
rate = clkrate / div;
- error = rate / (sig->pixelclock / 1000);
+ error = rate / (sig->mode.pixelclock / 1000);
dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
rate, div, (signed)(error - 1000) / 10, error % 10);
@@ -473,10 +477,10 @@ static void ipu_di_config_clock(struct ipu_di *di,
clk = di->clk_di;
- clk_set_rate(clk, sig->pixelclock);
+ clk_set_rate(clk, sig->mode.pixelclock);
in_rate = clk_get_rate(clk);
- div = (in_rate + sig->pixelclock / 2) / sig->pixelclock;
+ div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
if (div == 0)
div = 1;
@@ -504,35 +508,58 @@ static void ipu_di_config_clock(struct ipu_di *di,
ipu_di_write(di, val, DI_GENERAL);
dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n",
- sig->pixelclock,
+ sig->mode.pixelclock,
clk_get_rate(di->clk_ipu),
clk_get_rate(di->clk_di),
clk == di->clk_di ? "DI" : "IPU",
clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4));
}
+/*
+ * This function is called to adjust a video mode to IPU restrictions.
+ * It is meant to be called from drm crtc mode_fixup() methods.
+ */
+int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
+{
+ u32 diff;
+
+ if (mode->vfront_porch >= 2)
+ return 0;
+
+ diff = 2 - mode->vfront_porch;
+
+ if (mode->vback_porch >= diff) {
+ mode->vfront_porch = 2;
+ mode->vback_porch -= diff;
+ } else if (mode->vsync_len > diff) {
+ mode->vfront_porch = 2;
+ mode->vsync_len = mode->vsync_len - diff;
+ } else {
+ dev_warn(di->ipu->dev, "failed to adjust videomode\n");
+ return -EINVAL;
+ }
+
+ dev_warn(di->ipu->dev, "videomode adapted for IPU restrictions\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
+
int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
{
u32 reg;
u32 di_gen, vsync_cnt;
u32 div;
- u32 h_total, v_total;
dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
- di->id, sig->width, sig->height);
+ di->id, sig->mode.hactive, sig->mode.vactive);
- if ((sig->v_sync_width == 0) || (sig->h_sync_width == 0))
+ if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0))
return -EINVAL;
- h_total = sig->width + sig->h_sync_width + sig->h_start_width +
- sig->h_end_width;
- v_total = sig->height + sig->v_sync_width + sig->v_start_width +
- sig->v_end_width;
-
dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
clk_get_rate(di->clk_ipu),
clk_get_rate(di->clk_di),
- sig->pixelclock);
+ sig->mode.pixelclock);
mutex_lock(&di_mutex);
@@ -551,7 +578,7 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
di_gen = ipu_di_read(di, DI_GENERAL) & DI_GEN_DI_CLK_EXT;
di_gen |= DI_GEN_DI_VSYNC_EXT;
- if (sig->interlaced) {
+ if (sig->mode.flags & DISPLAY_FLAGS_INTERLACED) {
ipu_di_sync_config_interlaced(di, sig);
/* set y_sel = 1 */
@@ -561,9 +588,9 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
vsync_cnt = 7;
- if (sig->Hsync_pol)
+ if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH)
di_gen |= DI_GEN_POLARITY_3;
- if (sig->Vsync_pol)
+ if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
di_gen |= DI_GEN_POLARITY_2;
} else {
ipu_di_sync_config_noninterlaced(di, sig, div);
@@ -577,7 +604,7 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
vsync_cnt = 6;
- if (sig->Hsync_pol) {
+ if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH) {
if (sig->hsync_pin == 2)
di_gen |= DI_GEN_POLARITY_2;
else if (sig->hsync_pin == 4)
@@ -585,7 +612,7 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
else if (sig->hsync_pin == 7)
di_gen |= DI_GEN_POLARITY_7;
}
- if (sig->Vsync_pol) {
+ if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH) {
if (sig->vsync_pin == 3)
di_gen |= DI_GEN_POLARITY_3;
else if (sig->vsync_pin == 6)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index dfdc26970022..152b006833cd 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -147,6 +147,16 @@ config HID_BELKIN
---help---
Support for Belkin Flip KVM and Wireless keyboard.
+config HID_BETOP_FF
+ tristate "Betop Production Inc. force feedback support"
+ depends on USB_HID
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you want to enable force feedback support for devices by
+ BETOP Production Ltd.
+ Currently the following devices are known to be supported:
+ - BETOP 2185 PC & BFM MODE
+
config HID_CHERRY
tristate "Cherry Cymotion keyboard" if EXPERT
depends on HID
@@ -389,7 +399,7 @@ config HID_LOGITECH_HIDPP
Say Y if you want support for Logitech devices relying on the HID++
specification. Such devices are the various Logitech Touchpads (T650,
T651, TK820), some mice (Zone Touch mouse), or even keyboards (Solar
- Keayboard).
+ Keyboard).
config LOGITECH_FF
bool "Logitech force feedback support"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index debd15b44b59..6f19958dfc38 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -2,10 +2,7 @@
# Makefile for the HID driver
#
hid-y := hid-core.o hid-input.o
-
-ifdef CONFIG_DEBUG_FS
- hid-objs += hid-debug.o
-endif
+hid-$(CONFIG_DEBUG_FS) += hid-debug.o
obj-$(CONFIG_HID) += hid.o
obj-$(CONFIG_UHID) += uhid.o
@@ -15,23 +12,13 @@ obj-$(CONFIG_HID_GENERIC) += hid-generic.o
hid-$(CONFIG_HIDRAW) += hidraw.o
hid-logitech-y := hid-lg.o
-ifdef CONFIG_LOGITECH_FF
- hid-logitech-y += hid-lgff.o
-endif
-ifdef CONFIG_LOGIRUMBLEPAD2_FF
- hid-logitech-y += hid-lg2ff.o
-endif
-ifdef CONFIG_LOGIG940_FF
- hid-logitech-y += hid-lg3ff.o
-endif
-ifdef CONFIG_LOGIWHEELS_FF
- hid-logitech-y += hid-lg4ff.o
-endif
+hid-logitech-$(CONFIG_LOGITECH_FF) += hid-lgff.o
+hid-logitech-$(CONFIG_LOGIRUMBLEPAD2_FF) += hid-lg2ff.o
+hid-logitech-$(CONFIG_LOGIG940_FF) += hid-lg3ff.o
+hid-logitech-$(CONFIG_LOGIWHEELS_FF) += hid-lg4ff.o
hid-wiimote-y := hid-wiimote-core.o hid-wiimote-modules.o
-ifdef CONFIG_DEBUG_FS
- hid-wiimote-y += hid-wiimote-debug.o
-endif
+hid-wiimote-$(CONFIG_DEBUG_FS) += hid-wiimote-debug.o
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
@@ -39,6 +26,7 @@ obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o
obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
+obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CP2112) += hid-cp2112.o
@@ -76,24 +64,12 @@ obj-$(CONFIG_HID_PENMOUNT) += hid-penmount.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
hid-picolcd-y += hid-picolcd_core.o
-ifdef CONFIG_HID_PICOLCD_FB
-hid-picolcd-y += hid-picolcd_fb.o
-endif
-ifdef CONFIG_HID_PICOLCD_BACKLIGHT
-hid-picolcd-y += hid-picolcd_backlight.o
-endif
-ifdef CONFIG_HID_PICOLCD_LCD
-hid-picolcd-y += hid-picolcd_lcd.o
-endif
-ifdef CONFIG_HID_PICOLCD_LEDS
-hid-picolcd-y += hid-picolcd_leds.o
-endif
-ifdef CONFIG_HID_PICOLCD_CIR
-hid-picolcd-y += hid-picolcd_cir.o
-endif
-ifdef CONFIG_DEBUG_FS
-hid-picolcd-y += hid-picolcd_debugfs.o
-endif
+hid-picolcd-$(CONFIG_HID_PICOLCD_FB) += hid-picolcd_fb.o
+hid-picolcd-$(CONFIG_HID_PICOLCD_BACKLIGHT) += hid-picolcd_backlight.o
+hid-picolcd-$(CONFIG_HID_PICOLCD_LCD) += hid-picolcd_lcd.o
+hid-picolcd-$(CONFIG_HID_PICOLCD_LEDS) += hid-picolcd_leds.o
+hid-picolcd-$(CONFIG_HID_PICOLCD_CIR) += hid-picolcd_cir.o
+hid-picolcd-$(CONFIG_DEBUG_FS) += hid-picolcd_debugfs.o
obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
new file mode 100644
index 000000000000..69cfc8dc6af1
--- /dev/null
+++ b/drivers/hid/hid-betopff.c
@@ -0,0 +1,160 @@
+/*
+ * Force feedback support for Betop based devices
+ *
+ * The devices are distributed under various names and the same USB device ID
+ * can be used in both adapters and actual game controllers.
+ *
+ * 0x11c2:0x2208 "BTP2185 BFM mode Joystick"
+ * - tested with BTP2185 BFM Mode.
+ *
+ * 0x11C0:0x5506 "BTP2185 PC mode Joystick"
+ * - tested with BTP2185 PC Mode.
+ *
+ * 0x8380:0x1850 "BTP2185 V2 PC mode USB Gamepad"
+ * - tested with BTP2185 PC Mode with another version.
+ *
+ * 0x20bc:0x5500 "BTP2185 V2 BFM mode Joystick"
+ * - tested with BTP2171s.
+ * Copyright (c) 2014 Huang Bo <huangbobupt@163.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/hid.h>
+
+#include "hid-ids.h"
+
+struct betopff_device {
+ struct hid_report *report;
+};
+
+static int hid_betopff_play(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct betopff_device *betopff = data;
+ __u16 left, right;
+
+ left = effect->u.rumble.strong_magnitude;
+ right = effect->u.rumble.weak_magnitude;
+
+ betopff->report->field[2]->value[0] = left / 256;
+ betopff->report->field[3]->value[0] = right / 256;
+
+ hid_hw_request(hid, betopff->report, HID_REQ_SET_REPORT);
+
+ return 0;
+}
+
+static int betopff_init(struct hid_device *hid)
+{
+ struct betopff_device *betopff;
+ struct hid_report *report;
+ struct hid_input *hidinput =
+ list_first_entry(&hid->inputs, struct hid_input, list);
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+ int field_count = 0;
+ int error;
+ int i, j;
+
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output reports found\n");
+ return -ENODEV;
+ }
+
+ report = list_first_entry(report_list, struct hid_report, list);
+ /*
+ * Actually there are 4 fields for 4 Bytes as below:
+ * -----------------------------------------
+ * Byte0 Byte1 Byte2 Byte3
+ * 0x00 0x00 left_motor right_motor
+ * -----------------------------------------
+ * Do init them with default value.
+ */
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] = 0x00;
+ field_count++;
+ }
+ }
+
+ if (field_count < 4) {
+ hid_err(hid, "not enough fields in the report: %d\n",
+ field_count);
+ return -ENODEV;
+ }
+
+ betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
+ if (!betopff)
+ return -ENOMEM;
+
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ error = input_ff_create_memless(dev, betopff, hid_betopff_play);
+ if (error) {
+ kfree(betopff);
+ return error;
+ }
+
+ betopff->report = report;
+ hid_hw_request(hid, betopff->report, HID_REQ_SET_REPORT);
+
+ hid_info(hid, "Force feedback for betop devices by huangbo <huangbobupt@163.com>\n");
+
+ return 0;
+}
+
+static int betop_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ if (id->driver_data)
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ goto err;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto err;
+ }
+
+ betopff_init(hdev);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct hid_device_id betop_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, betop_devices);
+
+static struct hid_driver betop_driver = {
+ .name = "betop",
+ .id_table = betop_devices,
+ .probe = betop_probe,
+};
+module_hid_driver(betop_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8b638792cb43..db4fb6e1cc5b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -698,15 +698,25 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
static void hid_scan_collection(struct hid_parser *parser, unsigned type)
{
struct hid_device *hid = parser->device;
+ int i;
if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
type == HID_COLLECTION_PHYSICAL)
hid->group = HID_GROUP_SENSOR_HUB;
if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 &&
+ (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
+
+ if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
+ for (i = 0; i < parser->local.usage_index; i++)
+ if (parser->local.usage[i] == HID_GD_POINTER)
+ parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
+
+ if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
+ parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
}
static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
@@ -792,11 +802,14 @@ static int hid_scan_report(struct hid_device *hid)
hid->group = HID_GROUP_WACOM;
break;
case USB_VENDOR_ID_SYNAPTICS:
- if ((hid->group == HID_GROUP_GENERIC) &&
- (hid->bus != BUS_USB || hid->type == HID_TYPE_USBMOUSE))
- /* hid-rmi should only bind to the mouse interface of
- * composite USB devices */
- hid->group = HID_GROUP_RMI;
+ if (hid->group == HID_GROUP_GENERIC)
+ if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
+ && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
+ /*
+ * hid-rmi should take care of them,
+ * not hid-generic
+ */
+ hid->group = HID_GROUP_RMI;
break;
}
@@ -1757,6 +1770,10 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1861,6 +1878,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
@@ -1971,6 +1989,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
{ }
};
@@ -2328,6 +2347,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 31fad641b744..6039f071fab1 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -381,7 +381,7 @@ static void mousevsc_on_channel_callback(void *context)
static int mousevsc_connect_to_vsp(struct hv_device *device)
{
int ret = 0;
- int t;
+ unsigned long t;
struct mousevsc_dev *input_dev = hv_get_drvdata(device);
struct mousevsc_prt_msg *request;
struct mousevsc_prt_msg *response;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9243359c1821..46edb4d3ed28 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -189,6 +189,11 @@
#define USB_VENDOR_ID_BERKSHIRE 0x0c98
#define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140
+#define USB_VENDOR_ID_BETOP_2185BFM 0x11c2
+#define USB_VENDOR_ID_BETOP_2185PC 0x11c0
+#define USB_VENDOR_ID_BETOP_2185V2PC 0x8380
+#define USB_VENDOR_ID_BETOP_2185V2BFM 0x20bc
+
#define USB_VENDOR_ID_BTC 0x046e
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
@@ -638,6 +643,7 @@
#define USB_DEVICE_ID_PICKIT2 0x0033
#define USB_DEVICE_ID_PICOLCD 0xc002
#define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002
+#define USB_DEVICE_ID_PICK16F1454 0x0042
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
@@ -654,6 +660,7 @@
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07dc
+#define USB_DEVICE_ID_MS_TYPE_COVER_3_JP 0x07dd
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -768,6 +775,9 @@
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
+#define USB_VENDOR_ID_RAZER 0x1532
+#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
+
#define USB_VENDOR_ID_REALTEK 0x0bda
#define USB_DEVICE_ID_REALTEK_READER 0x0152
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 9505605b6e22..052869d0ab78 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -306,10 +306,13 @@ static enum power_supply_property hidinput_battery_props[] = {
static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
- USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
- HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
- USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
@@ -1104,6 +1107,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
}
+ /*
+ * Ignore reports for absolute data if the data didn't change. This is
+ * not only an optimization but also fixes 'dead' key reports. Some
+ * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID
+ * 0x31 and 0x32) report multiple keys, even though a localized keyboard
+ * can only have one of them physically available. The 'dead' keys
+ * report constant 0. As all map to the same keycode, they'd confuse
+ * the input layer. If we filter the 'dead' keys on the HID level, we
+ * skip the keycode translation and only forward real events.
+ */
+ if (!(field->flags & (HID_MAIN_ITEM_RELATIVE |
+ HID_MAIN_ITEM_BUFFERED_BYTE)) &&
+ (field->flags & HID_MAIN_ITEM_VARIABLE) &&
+ usage->usage_index < field->maxusage &&
+ value == field->value[usage->usage_index])
+ return;
+
/* report the usage code as scancode if the key status has changed */
if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 4c55f4d95798..c4c3f0952521 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -38,6 +38,7 @@ struct lenovo_drvdata_tpkbd {
struct lenovo_drvdata_cptkbd {
bool fn_lock;
+ int sensitivity;
};
#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
@@ -91,6 +92,38 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
case 0x00fa: /* Fn-Esc: Fn-lock toggle */
map_key_clear(KEY_FN_ESC);
return 1;
+ case 0x00fb: /* Middle mouse button (in native mode) */
+ map_key_clear(BTN_MIDDLE);
+ return 1;
+ }
+ }
+
+ /* Compatibility middle/wheel mappings should be ignored */
+ if (usage->hid == HID_GD_WHEEL)
+ return -1;
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON &&
+ (usage->hid & HID_USAGE) == 0x003)
+ return -1;
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER &&
+ (usage->hid & HID_USAGE) == 0x238)
+ return -1;
+
+ /* Map wheel emulation reports: 0xffa1 = USB, 0xff10 = BT */
+ if ((usage->hid & HID_USAGE_PAGE) == 0xff100000 ||
+ (usage->hid & HID_USAGE_PAGE) == 0xffa10000) {
+ field->flags |= HID_MAIN_ITEM_RELATIVE | HID_MAIN_ITEM_VARIABLE;
+ field->logical_minimum = -127;
+ field->logical_maximum = 127;
+
+ switch (usage->hid & HID_USAGE) {
+ case 0x0000:
+ hid_map_usage(hi, usage, bit, max, EV_REL, 0x06);
+ return 1;
+ case 0x0001:
+ hid_map_usage(hi, usage, bit, max, EV_REL, 0x08);
+ return 1;
+ default:
+ return -1;
}
}
@@ -145,6 +178,7 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x02, cptkbd_data->sensitivity);
if (ret)
hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
}
@@ -179,13 +213,50 @@ static ssize_t attr_fn_lock_store_cptkbd(struct device *dev,
return count;
}
+static ssize_t attr_sensitivity_show_cptkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ cptkbd_data->sensitivity);
+}
+
+static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ cptkbd_data->sensitivity = value;
+ lenovo_features_set_cptkbd(hdev);
+
+ return count;
+}
+
+
static struct device_attribute dev_attr_fn_lock_cptkbd =
__ATTR(fn_lock, S_IWUSR | S_IRUGO,
attr_fn_lock_show_cptkbd,
attr_fn_lock_store_cptkbd);
+static struct device_attribute dev_attr_sensitivity_cptkbd =
+ __ATTR(sensitivity, S_IWUSR | S_IRUGO,
+ attr_sensitivity_show_cptkbd,
+ attr_sensitivity_store_cptkbd);
+
+
static struct attribute *lenovo_attributes_cptkbd[] = {
&dev_attr_fn_lock_cptkbd.attr,
+ &dev_attr_sensitivity_cptkbd.attr,
NULL
};
@@ -594,8 +665,14 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
if (ret)
hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
- /* Turn Fn-Lock on by default */
+ /* Switch middle button to native mode */
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
+ if (ret)
+ hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
+
+ /* Set keyboard settings to known state */
cptkbd_data->fn_lock = true;
+ cptkbd_data->sensitivity = 0x05;
lenovo_features_set_cptkbd(hdev);
ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd);
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 7835717bc020..db0dd9b17e53 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -49,10 +49,6 @@
static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range);
-static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf);
-static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
-
-static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IROTH, lg4ff_range_show, lg4ff_range_store);
struct lg4ff_device_entry {
__u32 product_id;
@@ -416,7 +412,8 @@ static void hid_lg4ff_switch_native(struct hid_device *hid, const struct lg4ff_n
}
/* Read current range and display it in terminal */
-static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t range_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct hid_device *hid = to_hid_device(dev);
struct lg4ff_device_entry *entry;
@@ -441,7 +438,8 @@ static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *att
/* Set range to user specified value, call appropriate function
* according to the type of the wheel */
-static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t range_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct hid_device *hid = to_hid_device(dev);
struct lg4ff_device_entry *entry;
@@ -472,6 +470,7 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at
return count;
}
+static DEVICE_ATTR_RW(range);
#ifdef CONFIG_LEDS_CLASS
static void lg4ff_set_leds(struct hid_device *hid, __u8 leds)
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index a93cefe0e522..e77658cd037c 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -89,6 +89,7 @@ struct hidpp_device {
struct hid_device *hid_dev;
struct mutex send_mutex;
void *send_receive_buf;
+ char *name; /* will never be NULL and should not be freed */
wait_queue_head_t wait;
bool answer_available;
u8 protocol_major;
@@ -105,6 +106,7 @@ struct hidpp_device {
};
+/* HID++ 1.0 error codes */
#define HIDPP_ERROR 0x8f
#define HIDPP_ERROR_SUCCESS 0x00
#define HIDPP_ERROR_INVALID_SUBID 0x01
@@ -119,6 +121,8 @@ struct hidpp_device {
#define HIDPP_ERROR_REQUEST_UNAVAILABLE 0x0a
#define HIDPP_ERROR_INVALID_PARAM_VALUE 0x0b
#define HIDPP_ERROR_WRONG_PIN_CODE 0x0c
+/* HID++ 2.0 error codes */
+#define HIDPP20_ERROR 0xff
static void hidpp_connect_event(struct hidpp_device *hidpp_dev);
@@ -192,9 +196,16 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
}
if (response->report_id == REPORT_ID_HIDPP_SHORT &&
- response->fap.feature_index == HIDPP_ERROR) {
+ response->rap.sub_id == HIDPP_ERROR) {
+ ret = response->rap.params[1];
+ dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
+ goto exit;
+ }
+
+ if (response->report_id == REPORT_ID_HIDPP_LONG &&
+ response->fap.feature_index == HIDPP20_ERROR) {
ret = response->fap.params[1];
- dbg_hid("__hidpp_send_report got hidpp error %02X\n", ret);
+ dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
goto exit;
}
@@ -271,7 +282,8 @@ static inline bool hidpp_match_answer(struct hidpp_report *question,
static inline bool hidpp_match_error(struct hidpp_report *question,
struct hidpp_report *answer)
{
- return (answer->fap.feature_index == HIDPP_ERROR) &&
+ return ((answer->rap.sub_id == HIDPP_ERROR) ||
+ (answer->fap.feature_index == HIDPP20_ERROR)) &&
(answer->fap.funcindex_clientid == question->fap.feature_index) &&
(answer->fap.params[0] == question->fap.funcindex_clientid);
}
@@ -903,24 +915,24 @@ static int wtp_allocate(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
};
-static void wtp_connect(struct hid_device *hdev, bool connected)
+static int wtp_connect(struct hid_device *hdev, bool connected)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
struct wtp_data *wd = hidpp->private_data;
int ret;
if (!connected)
- return;
+ return 0;
if (!wd->x_size) {
ret = wtp_get_config(hidpp);
if (ret) {
hid_err(hdev, "Can not get wtp config: %d\n", ret);
- return;
+ return ret;
}
}
- hidpp_touchpad_set_raw_report_state(hidpp, wd->mt_feature_index,
+ return hidpp_touchpad_set_raw_report_state(hidpp, wd->mt_feature_index,
true, true);
}
@@ -965,7 +977,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
/*
* If the mutex is locked then we have a pending answer from a
- * previoulsly sent command
+ * previously sent command.
*/
if (unlikely(mutex_is_locked(&hidpp->send_mutex))) {
/*
@@ -996,9 +1008,6 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
return 1;
}
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
- return wtp_raw_event(hidpp->hid_dev, data, size);
-
return 0;
}
@@ -1006,7 +1015,9 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ int ret = 0;
+ /* Generic HID++ processing. */
switch (data[0]) {
case REPORT_ID_HIDPP_LONG:
if (size != HIDPP_REPORT_LONG_LENGTH) {
@@ -1014,16 +1025,23 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
size);
return 1;
}
- return hidpp_raw_hidpp_event(hidpp, data, size);
+ ret = hidpp_raw_hidpp_event(hidpp, data, size);
+ break;
case REPORT_ID_HIDPP_SHORT:
if (size != HIDPP_REPORT_SHORT_LENGTH) {
hid_err(hdev, "received hid++ report of bad size (%d)",
size);
return 1;
}
- return hidpp_raw_hidpp_event(hidpp, data, size);
+ ret = hidpp_raw_hidpp_event(hidpp, data, size);
+ break;
}
+ /* If no report is available for further processing, skip calling
+ * raw_event of subclasses. */
+ if (ret != 0)
+ return ret;
+
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
return wtp_raw_event(hdev, data, size);
@@ -1070,6 +1088,7 @@ static void hidpp_input_close(struct input_dev *dev)
static struct input_dev *hidpp_allocate_input(struct hid_device *hdev)
{
struct input_dev *input_dev = devm_input_allocate_device(&hdev->dev);
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
if (!input_dev)
return NULL;
@@ -1078,7 +1097,7 @@ static struct input_dev *hidpp_allocate_input(struct hid_device *hdev)
input_dev->open = hidpp_input_open;
input_dev->close = hidpp_input_close;
- input_dev->name = hdev->name;
+ input_dev->name = hidpp->name;
input_dev->phys = hdev->phys;
input_dev->uniq = hdev->uniq;
input_dev->id.bustype = hdev->bus;
@@ -1098,8 +1117,11 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
struct input_dev *input;
char *name, *devm_name;
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
- wtp_connect(hdev, connected);
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
+ ret = wtp_connect(hdev, connected);
+ if (ret)
+ return;
+ }
if (!connected || hidpp->delayed_input)
return;
@@ -1117,22 +1139,28 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
hid_info(hdev, "HID++ %u.%u device connected.\n",
hidpp->protocol_major, hidpp->protocol_minor);
+ if (!hidpp->name || hidpp->name == hdev->name) {
+ name = hidpp_get_device_name(hidpp);
+ if (!name) {
+ hid_err(hdev,
+ "unable to retrieve the name of the device");
+ return;
+ }
+
+ devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name);
+ kfree(name);
+ if (!devm_name)
+ return;
+
+ hidpp->name = devm_name;
+ }
+
input = hidpp_allocate_input(hdev);
if (!input) {
hid_err(hdev, "cannot allocate new input device: %d\n", ret);
return;
}
- name = hidpp_get_device_name(hidpp);
- if (!name) {
- hid_err(hdev, "unable to retrieve the name of the device");
- } else {
- devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name);
- if (devm_name)
- input->name = devm_name;
- kfree(name);
- }
-
hidpp_populate_input(hidpp, input, false);
ret = input_register_device(input);
@@ -1155,6 +1183,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
return -ENOMEM;
hidpp->hid_dev = hdev;
+ hidpp->name = hdev->name;
hid_set_drvdata(hdev, hidpp);
hidpp->quirks = id->driver_data;
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index cacda43f6a6f..fbaea6eb882e 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -276,6 +276,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_DUPLICATE_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
.driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP),
+ .driver_data = MS_HIDINPUT },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index b51200fe2f33..49d4fe4f5987 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -33,6 +33,10 @@
#define RMI_READ_DATA_PENDING BIT(1)
#define RMI_STARTED BIT(2)
+/* device flags */
+#define RMI_DEVICE BIT(0)
+#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
+
enum rmi_mode_type {
RMI_MODE_OFF = 0,
RMI_MODE_ATTN_REPORTS = 1,
@@ -118,6 +122,8 @@ struct rmi_data {
struct work_struct reset_work;
struct hid_device *hdev;
+
+ unsigned long device_flags;
};
#define RMI_PAGE(addr) (((addr) >> 8) & 0xff)
@@ -452,9 +458,32 @@ static int rmi_raw_event(struct hid_device *hdev,
return rmi_read_data_event(hdev, data, size);
case RMI_ATTN_REPORT_ID:
return rmi_input_event(hdev, data, size);
- case RMI_MOUSE_REPORT_ID:
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int rmi_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct rmi_data *data = hid_get_drvdata(hdev);
+
+ if ((data->device_flags & RMI_DEVICE) &&
+ (field->application == HID_GD_POINTER ||
+ field->application == HID_GD_MOUSE)) {
+ if (data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS) {
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
+ return 0;
+
+ if ((usage->hid == HID_GD_X || usage->hid == HID_GD_Y)
+ && !value)
+ return 1;
+ }
+
rmi_schedule_reset(hdev);
- break;
+ return 1;
}
return 0;
@@ -856,6 +885,9 @@ static void rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (ret)
return;
+ if (!(data->device_flags & RMI_DEVICE))
+ return;
+
/* Allow incoming hid reports */
hid_device_io_start(hdev);
@@ -914,8 +946,38 @@ static int rmi_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
{
- /* we want to make HID ignore the advertised HID collection */
- return -1;
+ struct rmi_data *data = hid_get_drvdata(hdev);
+
+ /*
+ * we want to make HID ignore the advertised HID collection
+ * for RMI deivces
+ */
+ if (data->device_flags & RMI_DEVICE) {
+ if ((data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS) &&
+ ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON))
+ return 0;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int rmi_check_valid_report_id(struct hid_device *hdev, unsigned type,
+ unsigned id, struct hid_report **report)
+{
+ int i;
+
+ *report = hdev->report_enum[type].report_id_hash[id];
+ if (*report) {
+ for (i = 0; i < (*report)->maxfield; i++) {
+ unsigned app = (*report)->field[i]->application;
+ if ((app & HID_USAGE_PAGE) >= HID_UP_MSVENDOR)
+ return 1;
+ }
+ }
+
+ return 0;
}
static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -925,6 +987,7 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
size_t alloc_size;
struct hid_report *input_report;
struct hid_report *output_report;
+ struct hid_report *feature_report;
data = devm_kzalloc(&hdev->dev, sizeof(struct rmi_data), GFP_KERNEL);
if (!data)
@@ -943,27 +1006,37 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
- input_report = hdev->report_enum[HID_INPUT_REPORT]
- .report_id_hash[RMI_ATTN_REPORT_ID];
- if (!input_report) {
- hid_err(hdev, "device does not have expected input report\n");
- ret = -ENODEV;
- return ret;
+ if (id->driver_data)
+ data->device_flags = id->driver_data;
+
+ /*
+ * Check for the RMI specific report ids. If they are misisng
+ * simply return and let the events be processed by hid-input
+ */
+ if (!rmi_check_valid_report_id(hdev, HID_FEATURE_REPORT,
+ RMI_SET_RMI_MODE_REPORT_ID, &feature_report)) {
+ hid_dbg(hdev, "device does not have set mode feature report\n");
+ goto start;
}
- data->input_report_size = (input_report->size >> 3) + 1 /* report id */;
+ if (!rmi_check_valid_report_id(hdev, HID_INPUT_REPORT,
+ RMI_ATTN_REPORT_ID, &input_report)) {
+ hid_dbg(hdev, "device does not have attention input report\n");
+ goto start;
+ }
- output_report = hdev->report_enum[HID_OUTPUT_REPORT]
- .report_id_hash[RMI_WRITE_REPORT_ID];
- if (!output_report) {
- hid_err(hdev, "device does not have expected output report\n");
- ret = -ENODEV;
- return ret;
+ data->input_report_size = hid_report_len(input_report);
+
+ if (!rmi_check_valid_report_id(hdev, HID_OUTPUT_REPORT,
+ RMI_WRITE_REPORT_ID, &output_report)) {
+ hid_dbg(hdev,
+ "device does not have rmi write output report\n");
+ goto start;
}
- data->output_report_size = (output_report->size >> 3)
- + 1 /* report id */;
+ data->output_report_size = hid_report_len(output_report);
+ data->device_flags |= RMI_DEVICE;
alloc_size = data->output_report_size + data->input_report_size;
data->writeReport = devm_kzalloc(&hdev->dev, alloc_size, GFP_KERNEL);
@@ -978,13 +1051,15 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
mutex_init(&data->page_mutex);
+start:
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
return ret;
}
- if (!test_bit(RMI_STARTED, &data->flags))
+ if ((data->device_flags & RMI_DEVICE) &&
+ !test_bit(RMI_STARTED, &data->flags))
/*
* The device maybe in the bootloader if rmi_input_configured
* failed to find F11 in the PDT. Print an error, but don't
@@ -1007,6 +1082,8 @@ static void rmi_remove(struct hid_device *hdev)
}
static const struct hid_device_id rmi_id[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
+ .driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
@@ -1017,6 +1094,7 @@ static struct hid_driver rmi_driver = {
.id_table = rmi_id,
.probe = rmi_probe,
.remove = rmi_remove,
+ .event = rmi_event,
.raw_event = rmi_raw_event,
.input_mapping = rmi_input_mapping,
.input_configured = rmi_input_configured,
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index db3cf31c6fa1..890f2914a8ff 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -2,17 +2,9 @@
# Makefile for the USB input drivers
#
-# Multipart objects.
usbhid-y := hid-core.o hid-quirks.o
-
-# Optional parts of multipart objects.
-
-ifeq ($(CONFIG_USB_HIDDEV),y)
- usbhid-y += hiddev.o
-endif
-ifeq ($(CONFIG_HID_PID),y)
- usbhid-y += hid-pidff.o
-endif
+usbhid-$(CONFIG_USB_HIDDEV) += hiddev.o
+usbhid-$(CONFIG_HID_PID) += hid-pidff.o
obj-$(CONFIG_USB_HID) += usbhid.o
obj-$(CONFIG_USB_KBD) += usbkbd.o
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index 10b616702780..0b531c6a76a5 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -1252,6 +1252,8 @@ int hid_pidff_init(struct hid_device *hid)
pidff->hid = hid;
+ hid_device_io_start(hid);
+
pidff_find_reports(hid, HID_OUTPUT_REPORT, pidff);
pidff_find_reports(hid, HID_FEATURE_REPORT, pidff);
@@ -1315,9 +1317,13 @@ int hid_pidff_init(struct hid_device *hid)
hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_device_io_stop(hid);
+
return 0;
fail:
+ hid_device_io_stop(hid);
+
kfree(pidff);
return error;
}
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index b27b3d33ebab..9be99a67bfe2 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -80,6 +80,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 654202941d30..f0568a7e6de9 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -173,10 +173,8 @@ static void wacom_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_features *features = &wacom->wacom_wac.features;
- bool finger = (field->logical == HID_DG_FINGER) ||
- (field->physical == HID_DG_FINGER);
- bool pen = (field->logical == HID_DG_STYLUS) ||
- (field->physical == HID_DG_STYLUS);
+ bool finger = WACOM_FINGER_FIELD(field);
+ bool pen = WACOM_PEN_FIELD(field);
/*
* Requiring Stylus Usage will ignore boot mouse
@@ -405,6 +403,9 @@ static int wacom_query_tablet_data(struct hid_device *hdev,
else if (features->type == WACOM_24HDT || features->type == CINTIQ_HYBRID) {
return wacom_set_device_mode(hdev, 18, 3, 2);
}
+ else if (features->type == WACOM_27QHDT) {
+ return wacom_set_device_mode(hdev, 131, 3, 2);
+ }
} else if (features->device_type == BTN_TOOL_PEN) {
if (features->type <= BAMBOO_PT && features->type != WIRELESS) {
return wacom_set_device_mode(hdev, 2, 2, 2);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index ac7447c7b82e..1a6507999a65 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -15,7 +15,6 @@
#include "wacom_wac.h"
#include "wacom.h"
#include <linux/input/mt.h>
-#include <linux/hid.h>
/* resolution for penabled devices */
#define WACOM_PL_RES 20
@@ -444,9 +443,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
- if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
- wacom->shared->stylus_in_proximity = true;
-
/* serial number of the tool */
wacom->serial[idx] = ((data[3] & 0x0f) << 28) +
(data[4] << 20) + (data[5] << 12) +
@@ -535,24 +531,46 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
return 1;
}
+ /*
+ * don't report events for invalid data
+ */
/* older I4 styli don't work with new Cintiqs */
- if (!((wacom->id[idx] >> 20) & 0x01) &&
- (features->type == WACOM_21UX2))
+ if ((!((wacom->id[idx] >> 20) & 0x01) &&
+ (features->type == WACOM_21UX2)) ||
+ /* Only large Intuos support Lense Cursor */
+ (wacom->tool[idx] == BTN_TOOL_LENS &&
+ (features->type == INTUOS3 ||
+ features->type == INTUOS3S ||
+ features->type == INTUOS4 ||
+ features->type == INTUOS4S ||
+ features->type == INTUOS5 ||
+ features->type == INTUOS5S ||
+ features->type == INTUOSPM ||
+ features->type == INTUOSPS)) ||
+ /* Cintiq doesn't send data when RDY bit isn't set */
+ (features->type == CINTIQ && !(data[1] & 0x40)))
return 1;
- /* Range Report */
- if ((data[1] & 0xfe) == 0x20) {
+ if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
+ wacom->shared->stylus_in_proximity = true;
+
+ /* in Range while exiting */
+ if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
input_report_key(input, BTN_TOUCH, 0);
input_report_abs(input, ABS_PRESSURE, 0);
input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
- if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
- wacom->shared->stylus_in_proximity = true;
+ return 2;
}
/* Exit report */
if ((data[1] & 0xfe) == 0x80) {
if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
wacom->shared->stylus_in_proximity = false;
+ wacom->reporting_data = false;
+
+ /* don't report exit if we don't know the ID */
+ if (!wacom->id[idx])
+ return 1;
/*
* Reset all states otherwise we lose the initial states
@@ -586,6 +604,11 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
wacom->id[idx] = 0;
return 2;
}
+
+ /* don't report other events if we don't know the ID */
+ if (!wacom->id[idx])
+ return 1;
+
return 0;
}
@@ -633,6 +656,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
data[0] != WACOM_REPORT_INTUOSREAD &&
data[0] != WACOM_REPORT_INTUOSWRITE &&
data[0] != WACOM_REPORT_INTUOSPAD &&
+ data[0] != WACOM_REPORT_CINTIQ &&
+ data[0] != WACOM_REPORT_CINTIQPAD &&
data[0] != WACOM_REPORT_INTUOS5PAD) {
dev_dbg(input->dev.parent,
"%s: received unknown report #%d\n", __func__, data[0]);
@@ -644,7 +669,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
idx = data[1] & 0x01;
/* pad packets. Works as a second tool and is always in prox */
- if (data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD) {
+ if (data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD ||
+ data[0] == WACOM_REPORT_CINTIQPAD) {
input = wacom->pad_input;
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
input_report_key(input, BTN_0, (data[2] & 0x01));
@@ -744,6 +770,14 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else {
input_report_abs(input, ABS_MISC, 0);
}
+ } else if (features->type == WACOM_27QHD) {
+ input_report_key(input, KEY_PROG1, data[2] & 0x01);
+ input_report_key(input, KEY_PROG2, data[2] & 0x02);
+ input_report_key(input, KEY_PROG3, data[2] & 0x04);
+
+ input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
+ input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
+ input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
} else if (features->type == CINTIQ_HYBRID) {
/*
* Do not send hardware buttons under Android. They
@@ -760,6 +794,12 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, BTN_7, (data[4] & 0x40)); /* Left */
input_report_key(input, BTN_8, (data[4] & 0x80)); /* Down */
input_report_key(input, BTN_0, (data[3] & 0x01)); /* Center */
+
+ if (data[4] | (data[3] & 0x01)) {
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_abs(input, ABS_MISC, 0);
+ }
} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
int i;
@@ -843,28 +883,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
if (result)
return result - 1;
- /* don't proceed if we don't know the ID */
- if (!wacom->id[idx])
- return 0;
-
- /* Only large Intuos support Lense Cursor */
- if (wacom->tool[idx] == BTN_TOOL_LENS &&
- (features->type == INTUOS3 ||
- features->type == INTUOS3S ||
- features->type == INTUOS4 ||
- features->type == INTUOS4S ||
- features->type == INTUOS5 ||
- features->type == INTUOS5S ||
- features->type == INTUOSPM ||
- features->type == INTUOSPS)) {
-
- return 0;
- }
-
- /* Cintiq doesn't send data when RDY bit isn't set */
- if (features->type == CINTIQ && !(data[1] & 0x40))
- return 0;
-
if (features->type >= INTUOS3S) {
input_report_abs(input, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
input_report_abs(input, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1));
@@ -951,6 +969,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
input_report_key(input, wacom->tool[idx], 1);
input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ wacom->reporting_data = true;
return 1;
}
@@ -1019,8 +1038,20 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
int i;
- int current_num_contacts = data[61];
+ int current_num_contacts = 0;
int contacts_to_send = 0;
+ int num_contacts_left = 4; /* maximum contacts per packet */
+ int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
+ int y_offset = 2;
+
+ if (wacom->features.type == WACOM_27QHDT) {
+ current_num_contacts = data[63];
+ num_contacts_left = 10;
+ byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET;
+ y_offset = 0;
+ } else {
+ current_num_contacts = data[61];
+ }
/*
* First packet resets the counter since only the first
@@ -1029,12 +1060,11 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
if (current_num_contacts)
wacom->num_contacts_left = current_num_contacts;
- /* There are at most 4 contacts per packet */
- contacts_to_send = min(4, wacom->num_contacts_left);
+ contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
for (i = 0; i < contacts_to_send; i++) {
- int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
- bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
+ int offset = (byte_per_packet * i) + 1;
+ bool touch = (data[offset] & 0x1) && !wacom->shared->stylus_in_proximity;
int slot = input_mt_get_slot_by_key(input, data[offset + 1]);
if (slot < 0)
@@ -1044,18 +1074,23 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
if (touch) {
int t_x = get_unaligned_le16(&data[offset + 2]);
- int c_x = get_unaligned_le16(&data[offset + 4]);
- int t_y = get_unaligned_le16(&data[offset + 6]);
- int c_y = get_unaligned_le16(&data[offset + 8]);
- int w = get_unaligned_le16(&data[offset + 10]);
- int h = get_unaligned_le16(&data[offset + 12]);
+ int t_y = get_unaligned_le16(&data[offset + 4 + y_offset]);
input_report_abs(input, ABS_MT_POSITION_X, t_x);
input_report_abs(input, ABS_MT_POSITION_Y, t_y);
- input_report_abs(input, ABS_MT_TOUCH_MAJOR, min(w,h));
- input_report_abs(input, ABS_MT_WIDTH_MAJOR, min(w, h) + int_dist(t_x, t_y, c_x, c_y));
- input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
- input_report_abs(input, ABS_MT_ORIENTATION, w > h);
+
+ if (wacom->features.type != WACOM_27QHDT) {
+ int c_x = get_unaligned_le16(&data[offset + 4]);
+ int c_y = get_unaligned_le16(&data[offset + 8]);
+ int w = get_unaligned_le16(&data[offset + 10]);
+ int h = get_unaligned_le16(&data[offset + 12]);
+
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, min(w,h));
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+ min(w, h) + int_dist(t_x, t_y, c_x, c_y));
+ input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
+ input_report_abs(input, ABS_MT_ORIENTATION, w > h);
+ }
}
}
input_mt_report_pointer_emulation(input, true);
@@ -1064,6 +1099,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
if (wacom->num_contacts_left <= 0)
wacom->num_contacts_left = 0;
+ wacom->shared->touch_down = (wacom->num_contacts_left > 0);
return 1;
}
@@ -1092,7 +1128,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
for (i = 0; i < contacts_to_send; i++) {
int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
- bool touch = data[offset] & 0x1;
+ bool touch = (data[offset] & 0x1) && !wacom->shared->stylus_in_proximity;
int id = get_unaligned_le16(&data[offset + 1]);
int slot = input_mt_get_slot_by_key(input, id);
@@ -1114,6 +1150,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
if (wacom->num_contacts_left < 0)
wacom->num_contacts_left = 0;
+ wacom->shared->touch_down = (wacom->num_contacts_left > 0);
return 1;
}
@@ -1514,13 +1551,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(hdev);
}
-#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
- ((f)->physical == HID_DG_STYLUS) || \
- ((f)->application == HID_DG_PEN))
-#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
- ((f)->physical == HID_DG_FINGER) || \
- ((f)->application == HID_DG_TOUCHSCREEN))
-
void wacom_wac_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
@@ -1891,6 +1921,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case WACOM_21UX2:
case WACOM_22HD:
case WACOM_24HD:
+ case WACOM_27QHD:
case DTK:
case CINTIQ_HYBRID:
sync = wacom_intuos_irq(wacom_wac);
@@ -1901,6 +1932,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
break;
case WACOM_24HDT:
+ case WACOM_27QHDT:
sync = wacom_24hdt_irq(wacom_wac);
break;
@@ -2086,32 +2118,17 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
wacom_abs_set_axis(input_dev, wacom_wac);
switch (features->type) {
- case WACOM_MO:
- case WACOM_G4:
- /* fall through */
-
- case GRAPHIRE:
- input_set_capability(input_dev, EV_REL, REL_WHEEL);
-
- __set_bit(BTN_LEFT, input_dev->keybit);
- __set_bit(BTN_RIGHT, input_dev->keybit);
- __set_bit(BTN_MIDDLE, input_dev->keybit);
-
- __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
- __set_bit(BTN_TOOL_PEN, input_dev->keybit);
- __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
- __set_bit(BTN_STYLUS, input_dev->keybit);
- __set_bit(BTN_STYLUS2, input_dev->keybit);
-
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
- break;
-
case GRAPHIRE_BT:
__clear_bit(ABS_MISC, input_dev->absbit);
+
+ case WACOM_MO:
+ case WACOM_G4:
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
0, 0);
+ /* fall through */
+ case GRAPHIRE:
input_set_capability(input_dev, EV_REL, REL_WHEEL);
__set_bit(BTN_LEFT, input_dev->keybit);
@@ -2127,31 +2144,15 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
+ case WACOM_27QHD:
case WACOM_24HD:
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
- input_abs_set_res(input_dev, ABS_Z, 287);
- input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
- /* fall through */
-
case DTK:
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
-
- wacom_setup_cintiq(wacom_wac);
- break;
-
case WACOM_22HD:
case WACOM_21UX2:
case WACOM_BEE:
case CINTIQ:
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
- input_abs_set_res(input_dev, ABS_Z, 287);
-
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
-
- wacom_setup_cintiq(wacom_wac);
- break;
-
case WACOM_13HD:
+ case CINTIQ_HYBRID:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_abs_set_res(input_dev, ABS_Z, 287);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
@@ -2161,6 +2162,10 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
case INTUOS3:
case INTUOS3L:
case INTUOS3S:
+ case INTUOS4:
+ case INTUOS4WL:
+ case INTUOS4L:
+ case INTUOS4S:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_abs_set_res(input_dev, ABS_Z, 287);
/* fall through */
@@ -2199,17 +2204,6 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
}
break;
- case INTUOS4:
- case INTUOS4WL:
- case INTUOS4L:
- case INTUOS4S:
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
- input_abs_set_res(input_dev, ABS_Z, 287);
- wacom_setup_intuos(wacom_wac);
-
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
- break;
-
case WACOM_24HDT:
if (features->device_type == BTN_TOOL_FINGER) {
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
@@ -2219,6 +2213,7 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
}
/* fall through */
+ case WACOM_27QHDT:
case MTSCREEN:
case MTTPC:
case MTTPC_B:
@@ -2305,14 +2300,6 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
0, 0);
}
break;
-
- case CINTIQ_HYBRID:
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
- input_abs_set_res(input_dev, ABS_Z, 287);
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
-
- wacom_setup_cintiq(wacom_wac);
- break;
}
return 0;
}
@@ -2374,6 +2361,19 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
break;
+ case WACOM_27QHD:
+ __set_bit(KEY_PROG1, input_dev->keybit);
+ __set_bit(KEY_PROG2, input_dev->keybit);
+ __set_bit(KEY_PROG3, input_dev->keybit);
+ input_set_abs_params(input_dev, ABS_X, -2048, 2048, 0, 0);
+ input_abs_set_res(input_dev, ABS_X, 1024); /* points/g */
+ input_set_abs_params(input_dev, ABS_Y, -2048, 2048, 0, 0);
+ input_abs_set_res(input_dev, ABS_Y, 1024);
+ input_set_abs_params(input_dev, ABS_Z, -2048, 2048, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 1024);
+ __set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
+ break;
+
case DTK:
for (i = 0; i < 6; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
@@ -2724,6 +2724,18 @@ static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x32A =
+ { "Wacom Cintiq 27QHD", 119740, 67520, 2047,
+ 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x32B =
+ { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
+ WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
+static const struct wacom_features wacom_features_0x32C =
+ { "Wacom Cintiq 27QHD touch", .type = WACOM_27QHDT,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32B, .touch_max = 10 };
static const struct wacom_features wacom_features_0x3F =
{ "Wacom Cintiq 21UX", 87200, 65600, 1023, 63,
CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -3090,6 +3102,9 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x315) },
{ USB_DEVICE_WACOM(0x317) },
{ USB_DEVICE_WACOM(0x323) },
+ { USB_DEVICE_WACOM(0x32A) },
+ { USB_DEVICE_WACOM(0x32B) },
+ { USB_DEVICE_WACOM(0x32C) },
{ USB_DEVICE_WACOM(0x32F) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index bfad815cda8a..021ee1c1980a 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -10,9 +10,10 @@
#define WACOM_WAC_H
#include <linux/types.h>
+#include <linux/hid.h>
/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX 68
+#define WACOM_PKGLEN_MAX 192
#define WACOM_NAME_MAX 64
@@ -36,6 +37,7 @@
/* wacom data size per MT contact */
#define WACOM_BYTES_PER_MT_PACKET 11
#define WACOM_BYTES_PER_24HDT_PACKET 14
+#define WACOM_BYTES_PER_QHDTHID_PACKET 6
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
@@ -57,6 +59,8 @@
#define WACOM_REPORT_TPCMT 13
#define WACOM_REPORT_TPCMT2 3
#define WACOM_REPORT_TPCHID 15
+#define WACOM_REPORT_CINTIQ 16
+#define WACOM_REPORT_CINTIQPAD 17
#define WACOM_REPORT_TPCST 16
#define WACOM_REPORT_DTUS 17
#define WACOM_REPORT_TPC1FGE 18
@@ -71,6 +75,14 @@
#define WACOM_QUIRK_MONITOR 0x0008
#define WACOM_QUIRK_BATTERY 0x0010
+#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
+ ((f)->physical == HID_DG_STYLUS) || \
+ ((f)->physical == HID_DG_PEN) || \
+ ((f)->application == HID_DG_PEN))
+#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
+ ((f)->physical == HID_DG_FINGER) || \
+ ((f)->application == HID_DG_TOUCHSCREEN))
+
enum {
PENPARTNER = 0,
GRAPHIRE,
@@ -100,6 +112,7 @@ enum {
WACOM_22HD,
DTK,
WACOM_24HD,
+ WACOM_27QHD,
CINTIQ_HYBRID,
CINTIQ,
WACOM_BEE,
@@ -108,6 +121,7 @@ enum {
WIRELESS,
BAMBOO_PT,
WACOM_24HDT,
+ WACOM_27QHDT,
TABLETPC, /* add new TPC below */
TABLETPCE,
TABLETPC2FG,
@@ -180,6 +194,7 @@ struct wacom_wac {
int tool[2];
int id[2];
__u32 serial[2];
+ bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
struct input_dev *input;
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
index f0c21458962c..eb4dc63dbc93 100644
--- a/drivers/hsi/clients/nokia-modem.c
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -162,6 +162,7 @@ static int nokia_modem_probe(struct device *dev)
return -ENOMEM;
}
dev_set_drvdata(dev, modem);
+ modem->device = dev;
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 433f72a1c006..2978f5ee8d2a 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -73,14 +73,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
unsigned long flags;
int ret, t, err = 0;
- spin_lock_irqsave(&newchannel->sc_lock, flags);
+ spin_lock_irqsave(&newchannel->lock, flags);
if (newchannel->state == CHANNEL_OPEN_STATE) {
newchannel->state = CHANNEL_OPENING_STATE;
} else {
- spin_unlock_irqrestore(&newchannel->sc_lock, flags);
+ spin_unlock_irqrestore(&newchannel->lock, flags);
return -EINVAL;
}
- spin_unlock_irqrestore(&newchannel->sc_lock, flags);
+ spin_unlock_irqrestore(&newchannel->lock, flags);
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
unsigned long flags;
int ret = 0;
- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
- atomic_inc(&vmbus_connection.next_gpadl_handle);
+ next_gpadl_handle =
+ (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
if (ret)
@@ -686,6 +686,50 @@ EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
/*
* vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
* using a GPADL Direct packet type.
+ * The buffer includes the vmbus descriptor.
+ */
+int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+ struct vmbus_packet_mpb_array *desc,
+ u32 desc_size,
+ void *buffer, u32 bufferlen, u64 requestid)
+{
+ int ret;
+ u32 packetlen;
+ u32 packetlen_aligned;
+ struct kvec bufferlist[3];
+ u64 aligned_data = 0;
+ bool signal = false;
+
+ packetlen = desc_size + bufferlen;
+ packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+
+ /* Setup the descriptor */
+ desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
+ desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+ desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
+ desc->length8 = (u16)(packetlen_aligned >> 3);
+ desc->transactionid = requestid;
+ desc->rangecount = 1;
+
+ bufferlist[0].iov_base = desc;
+ bufferlist[0].iov_len = desc_size;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
+
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+
+ if (ret == 0 && signal)
+ vmbus_setevent(channel);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
+
+/*
+ * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
+ * using a GPADL Direct packet type.
*/
int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *multi_pagebuffer,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2c59f030546b..3736f71bdec5 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -146,7 +146,7 @@ static struct vmbus_channel *alloc_channel(void)
return NULL;
spin_lock_init(&channel->inbound_lock);
- spin_lock_init(&channel->sc_lock);
+ spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->sc_list);
INIT_LIST_HEAD(&channel->percpu_list);
@@ -246,9 +246,9 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
} else {
primary_channel = channel->primary_channel;
- spin_lock_irqsave(&primary_channel->sc_lock, flags);
+ spin_lock_irqsave(&primary_channel->lock, flags);
list_del(&channel->sc_list);
- spin_unlock_irqrestore(&primary_channel->sc_lock, flags);
+ spin_unlock_irqrestore(&primary_channel->lock, flags);
}
free_channel(channel);
}
@@ -279,9 +279,6 @@ static void vmbus_process_offer(struct work_struct *work)
int ret;
unsigned long flags;
- /* The next possible work is rescind handling */
- INIT_WORK(&newchannel->work, vmbus_process_rescind_offer);
-
/* Make sure this is a new offer */
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
@@ -323,9 +320,9 @@ static void vmbus_process_offer(struct work_struct *work)
* Process the sub-channel.
*/
newchannel->primary_channel = channel;
- spin_lock_irqsave(&channel->sc_lock, flags);
+ spin_lock_irqsave(&channel->lock, flags);
list_add_tail(&newchannel->sc_list, &channel->sc_list);
- spin_unlock_irqrestore(&channel->sc_lock, flags);
+ spin_unlock_irqrestore(&channel->lock, flags);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
@@ -341,11 +338,10 @@ static void vmbus_process_offer(struct work_struct *work)
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
- return;
+ goto done_init_rescind;
}
- free_channel(newchannel);
- return;
+ goto err_free_chan;
}
/*
@@ -364,6 +360,8 @@ static void vmbus_process_offer(struct work_struct *work)
&newchannel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_instance,
newchannel);
+ if (!newchannel->device_obj)
+ goto err_free_chan;
/*
* Add the new device to the bus. This will kick off device-driver
@@ -379,9 +377,19 @@ static void vmbus_process_offer(struct work_struct *work)
list_del(&newchannel->listentry);
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
kfree(newchannel->device_obj);
-
- free_channel(newchannel);
+ goto err_free_chan;
}
+done_init_rescind:
+ spin_lock_irqsave(&newchannel->lock, flags);
+ /* The next possible work is rescind handling */
+ INIT_WORK(&newchannel->work, vmbus_process_rescind_offer);
+ /* Check if rescind offer was already received */
+ if (newchannel->rescind)
+ queue_work(newchannel->controlwq, &newchannel->work);
+ spin_unlock_irqrestore(&newchannel->lock, flags);
+ return;
+err_free_chan:
+ free_channel(newchannel);
}
enum {
@@ -516,6 +524,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel;
+ unsigned long flags;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
channel = relid2channel(rescind->child_relid);
@@ -524,11 +533,20 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
/* Just return here, no channel found */
return;
+ spin_lock_irqsave(&channel->lock, flags);
channel->rescind = true;
+ /*
+ * channel->work.func != vmbus_process_rescind_offer means we are still
+ * processing offer request and the rescind offer processing should be
+ * postponed. It will be done at the very end of vmbus_process_offer()
+ * as rescind flag is being checked there.
+ */
+ if (channel->work.func == vmbus_process_rescind_offer)
+ /* work is initialized for vmbus_process_rescind_offer() from
+ * vmbus_process_offer() where the channel got created */
+ queue_work(channel->controlwq, &channel->work);
- /* work is initialized for vmbus_process_rescind_offer() from
- * vmbus_process_offer() where the channel got created */
- queue_work(channel->controlwq, &channel->work);
+ spin_unlock_irqrestore(&channel->lock, flags);
}
/*
@@ -815,7 +833,7 @@ cleanup:
struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
{
struct list_head *cur, *tmp;
- int cur_cpu = hv_context.vp_index[smp_processor_id()];
+ int cur_cpu;
struct vmbus_channel *cur_channel;
struct vmbus_channel *outgoing_channel = primary;
int cpu_distance, new_cpu_distance;
@@ -823,6 +841,8 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
if (list_empty(&primary->sc_list))
return outgoing_channel;
+ cur_cpu = hv_context.vp_index[get_cpu()];
+ put_cpu();
list_for_each_safe(cur, tmp, &primary->sc_list) {
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
if (cur_channel->state != CHANNEL_OPENED_STATE)
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index e206619b946e..a63a795300b9 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -80,8 +80,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
- if (version == VERSION_WIN8_1)
- msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
+ if (version == VERSION_WIN8_1) {
+ msg->target_vcpu = hv_context.vp_index[get_cpu()];
+ put_cpu();
+ }
/*
* Add to list before we send the request since we may
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 3e4235c7a47f..50e51a51ff8b 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -28,7 +28,9 @@
#include <linux/hyperv.h>
#include <linux/version.h>
#include <linux/interrupt.h>
+#include <linux/clockchips.h>
#include <asm/hyperv.h>
+#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
/* The one and only */
@@ -37,6 +39,10 @@ struct hv_context hv_context = {
.hypercall_page = NULL,
};
+#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
+#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
+#define HV_MIN_DELTA_TICKS 1
+
/*
* query_hypervisor_info - Get version info of the windows hypervisor
*/
@@ -144,6 +150,8 @@ int hv_init(void)
sizeof(int) * NR_CPUS);
memset(hv_context.event_dpc, 0,
sizeof(void *) * NR_CPUS);
+ memset(hv_context.clk_evt, 0,
+ sizeof(void *) * NR_CPUS);
max_leaf = query_hypervisor_info();
@@ -258,10 +266,63 @@ u16 hv_signal_event(void *con_id)
return status;
}
+static int hv_ce_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ cycle_t current_tick;
+
+ WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+
+ rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+ current_tick += delta;
+ wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick);
+ return 0;
+}
+
+static void hv_ce_setmode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ union hv_timer_config timer_cfg;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* unsupported */
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ timer_cfg.enable = 1;
+ timer_cfg.auto_enable = 1;
+ timer_cfg.sintx = VMBUS_MESSAGE_SINT;
+ wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
+ wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
+{
+ dev->name = "Hyper-V clockevent";
+ dev->features = CLOCK_EVT_FEAT_ONESHOT;
+ dev->cpumask = cpumask_of(cpu);
+ dev->rating = 1000;
+ dev->owner = THIS_MODULE;
+
+ dev->set_mode = hv_ce_setmode;
+ dev->set_next_event = hv_ce_set_next_event;
+}
+
int hv_synic_alloc(void)
{
size_t size = sizeof(struct tasklet_struct);
+ size_t ced_size = sizeof(struct clock_event_device);
int cpu;
for_each_online_cpu(cpu) {
@@ -272,6 +333,13 @@ int hv_synic_alloc(void)
}
tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+ hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
+ if (hv_context.clk_evt[cpu] == NULL) {
+ pr_err("Unable to allocate clock event device\n");
+ goto err;
+ }
+ hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
+
hv_context.synic_message_page[cpu] =
(void *)get_zeroed_page(GFP_ATOMIC);
@@ -305,6 +373,7 @@ err:
static void hv_synic_free_cpu(int cpu)
{
kfree(hv_context.event_dpc[cpu]);
+ kfree(hv_context.clk_evt[cpu]);
if (hv_context.synic_event_page[cpu])
free_page((unsigned long)hv_context.synic_event_page[cpu]);
if (hv_context.synic_message_page[cpu])
@@ -388,6 +457,15 @@ void hv_synic_init(void *arg)
hv_context.vp_index[cpu] = (u32)vp_index;
INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
+
+ /*
+ * Register the per-cpu clockevent source.
+ */
+ if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+ clockevents_config_and_register(hv_context.clk_evt[cpu],
+ HV_TIMER_FREQUENCY,
+ HV_MIN_DELTA_TICKS,
+ HV_MAX_MAX_DELTA_TICKS);
return;
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b958ded8ac7e..ff169386b2c7 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -533,6 +533,9 @@ struct hv_dynmem_device {
*/
struct task_struct *thread;
+ struct mutex ha_region_mutex;
+ struct completion waiter_event;
+
/*
* A list of hot-add regions.
*/
@@ -549,7 +552,59 @@ struct hv_dynmem_device {
static struct hv_dynmem_device dm_device;
static void post_status(struct hv_dynmem_device *dm);
+
#ifdef CONFIG_MEMORY_HOTPLUG
+static void acquire_region_mutex(bool trylock)
+{
+ if (trylock) {
+ reinit_completion(&dm_device.waiter_event);
+ while (!mutex_trylock(&dm_device.ha_region_mutex))
+ wait_for_completion(&dm_device.waiter_event);
+ } else {
+ mutex_lock(&dm_device.ha_region_mutex);
+ }
+}
+
+static void release_region_mutex(bool trylock)
+{
+ if (trylock) {
+ mutex_unlock(&dm_device.ha_region_mutex);
+ } else {
+ mutex_unlock(&dm_device.ha_region_mutex);
+ complete(&dm_device.waiter_event);
+ }
+}
+
+static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ switch (val) {
+ case MEM_GOING_ONLINE:
+ acquire_region_mutex(true);
+ break;
+
+ case MEM_ONLINE:
+ case MEM_CANCEL_ONLINE:
+ release_region_mutex(true);
+ if (dm_device.ha_waiting) {
+ dm_device.ha_waiting = false;
+ complete(&dm_device.ol_waitevent);
+ }
+ break;
+
+ case MEM_GOING_OFFLINE:
+ case MEM_OFFLINE:
+ case MEM_CANCEL_OFFLINE:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hv_memory_nb = {
+ .notifier_call = hv_memory_notifier,
+ .priority = 0
+};
+
static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
{
@@ -591,6 +646,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
init_completion(&dm_device.ol_waitevent);
dm_device.ha_waiting = true;
+ release_region_mutex(false);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
(HA_CHUNK << PAGE_SHIFT));
@@ -619,6 +675,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
* have not been "onlined" within the allowed time.
*/
wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
+ acquire_region_mutex(false);
post_status(&dm_device);
}
@@ -632,11 +689,6 @@ static void hv_online_page(struct page *pg)
unsigned long cur_start_pgp;
unsigned long cur_end_pgp;
- if (dm_device.ha_waiting) {
- dm_device.ha_waiting = false;
- complete(&dm_device.ol_waitevent);
- }
-
list_for_each(cur, &dm_device.ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
cur_start_pgp = (unsigned long)
@@ -834,6 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
resp.hdr.size = sizeof(struct dm_hot_add_response);
#ifdef CONFIG_MEMORY_HOTPLUG
+ acquire_region_mutex(false);
pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
@@ -865,6 +918,7 @@ static void hot_add_req(struct work_struct *dummy)
if (do_hot_add)
resp.page_count = process_hot_add(pg_start, pfn_cnt,
rg_start, rg_sz);
+ release_region_mutex(false);
#endif
/*
* The result field of the response structure has the
@@ -928,9 +982,8 @@ static unsigned long compute_balloon_floor(void)
* 128 72 (1/2)
* 512 168 (1/4)
* 2048 360 (1/8)
- * 8192 552 (1/32)
- * 32768 1320
- * 131072 4392
+ * 8192 768 (1/16)
+ * 32768 1536 (1/32)
*/
if (totalram_pages < MB2PAGES(128))
min_pages = MB2PAGES(8) + (totalram_pages >> 1);
@@ -938,8 +991,10 @@ static unsigned long compute_balloon_floor(void)
min_pages = MB2PAGES(40) + (totalram_pages >> 2);
else if (totalram_pages < MB2PAGES(2048))
min_pages = MB2PAGES(104) + (totalram_pages >> 3);
+ else if (totalram_pages < MB2PAGES(8192))
+ min_pages = MB2PAGES(256) + (totalram_pages >> 4);
else
- min_pages = MB2PAGES(296) + (totalram_pages >> 5);
+ min_pages = MB2PAGES(512) + (totalram_pages >> 5);
#undef MB2PAGES
return min_pages;
}
@@ -1171,7 +1226,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
for (i = 0; i < range_count; i++) {
free_balloon_pages(dm, &range_array[i]);
- post_status(&dm_device);
+ complete(&dm_device.config_event);
}
if (req->more_pages == 1)
@@ -1195,19 +1250,16 @@ static void balloon_onchannelcallback(void *context);
static int dm_thread_func(void *dm_dev)
{
struct hv_dynmem_device *dm = dm_dev;
- int t;
while (!kthread_should_stop()) {
- t = wait_for_completion_interruptible_timeout(
+ wait_for_completion_interruptible_timeout(
&dm_device.config_event, 1*HZ);
/*
* The host expects us to post information on the memory
* pressure every second.
*/
-
- if (t == 0)
- post_status(dm);
-
+ reinit_completion(&dm_device.config_event);
+ post_status(dm);
}
return 0;
@@ -1387,7 +1439,9 @@ static int balloon_probe(struct hv_device *dev,
dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
init_completion(&dm_device.host_event);
init_completion(&dm_device.config_event);
+ init_completion(&dm_device.waiter_event);
INIT_LIST_HEAD(&dm_device.ha_region_list);
+ mutex_init(&dm_device.ha_region_mutex);
INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
dm_device.host_specified_ha_region = false;
@@ -1401,6 +1455,7 @@ static int balloon_probe(struct hv_device *dev,
#ifdef CONFIG_MEMORY_HOTPLUG
set_online_page_callback(&hv_online_page);
+ register_memory_notifier(&hv_memory_nb);
#endif
hv_set_drvdata(dev, &dm_device);
@@ -1519,6 +1574,7 @@ static int balloon_remove(struct hv_device *dev)
kfree(send_buffer);
#ifdef CONFIG_MEMORY_HOTPLUG
restore_online_page_callback(&hv_online_page);
+ unregister_memory_notifier(&hv_memory_nb);
#endif
list_for_each_safe(cur, tmp, &dm->ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 23b2ce294c4c..cd453e4b2a07 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -86,6 +86,18 @@ static void fcopy_work_func(struct work_struct *dummy)
* process the pending transaction.
*/
fcopy_respond_to_host(HV_E_FAIL);
+
+ /* In the case the user-space daemon crashes, hangs or is killed, we
+ * need to down the semaphore, otherwise, after the daemon starts next
+ * time, the obsolete data in fcopy_transaction.message or
+ * fcopy_transaction.fcopy_msg will be used immediately.
+ *
+ * NOTE: fcopy_read() happens to get the semaphore (very rare)? We're
+ * still OK, because we've reported the failure to the host.
+ */
+ if (down_trylock(&fcopy_transaction.read_sema))
+ ;
+
}
static int fcopy_handle_handshake(u32 version)
@@ -344,6 +356,14 @@ static int fcopy_open(struct inode *inode, struct file *f)
return 0;
}
+/* XXX: there are still some tricky corner cases, e.g.,
+ * 1) In a SMP guest, when fcopy_release() runs between
+ * schedule_delayed_work() and fcopy_send_data(), there is
+ * still a chance an obsolete message will be queued.
+ *
+ * 2) When the fcopy daemon is running, if we unload the driver,
+ * we'll notice a kernel oops when we kill the daemon later.
+ */
static int fcopy_release(struct inode *inode, struct file *f)
{
/*
@@ -351,6 +371,13 @@ static int fcopy_release(struct inode *inode, struct file *f)
*/
in_hand_shake = true;
opened = false;
+
+ if (cancel_delayed_work_sync(&fcopy_work)) {
+ /* We haven't up()-ed the semaphore(very rare)? */
+ if (down_trylock(&fcopy_transaction.read_sema))
+ ;
+ fcopy_respond_to_host(HV_E_FAIL);
+ }
return 0;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index c386d8dc7223..44b1c9424712 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -178,6 +178,23 @@ struct hv_message_header {
};
};
+/*
+ * Timer configuration register.
+ */
+union hv_timer_config {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 periodic:1;
+ u64 lazy:1;
+ u64 auto_enable:1;
+ u64 reserved_z0:12;
+ u64 sintx:4;
+ u64 reserved_z1:44;
+ };
+};
+
+
/* Define timer message payload structure. */
struct hv_timer_message_payload {
u32 timer_index;
@@ -519,6 +536,10 @@ struct hv_context {
* buffer to post messages to the host.
*/
void *post_msg_page[NR_CPUS];
+ /*
+ * Support PV clockevent device.
+ */
+ struct clock_event_device *clk_evt[NR_CPUS];
};
extern struct hv_context hv_context;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4d6b26979fbd..f518b8d7a5b5 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -32,6 +32,7 @@
#include <linux/completion.h>
#include <linux/hyperv.h>
#include <linux/kernel_stat.h>
+#include <linux/clockchips.h>
#include <asm/hyperv.h>
#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
@@ -578,6 +579,34 @@ static void vmbus_onmessage_work(struct work_struct *work)
kfree(ctx);
}
+static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
+{
+ struct clock_event_device *dev = hv_context.clk_evt[cpu];
+
+ if (dev->event_handler)
+ dev->event_handler(dev);
+
+ msg->header.message_type = HVMSG_NONE;
+
+ /*
+ * Make sure the write to MessageType (ie set to
+ * HVMSG_NONE) happens before we read the
+ * MessagePending and EOMing. Otherwise, the EOMing
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+ mb();
+
+ if (msg->header.message_flags.msg_pending) {
+ /*
+ * This will cause message queue rescan to
+ * possibly deliver another msg from the
+ * hypervisor
+ */
+ wrmsrl(HV_X64_MSR_EOM, 0);
+ }
+}
+
static void vmbus_on_msg_dpc(unsigned long data)
{
int cpu = smp_processor_id();
@@ -667,8 +696,12 @@ static void vmbus_isr(void)
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
/* Check if there are actual msgs to be processed */
- if (msg->header.message_type != HVMSG_NONE)
- tasklet_schedule(&msg_dpc);
+ if (msg->header.message_type != HVMSG_NONE) {
+ if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
+ hv_process_timer_expiration(msg, cpu);
+ else
+ tasklet_schedule(&msg_dpc);
+ }
}
/*
@@ -861,8 +894,8 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
- hyperv_mmio.start = res->data.address64.minimum;
- hyperv_mmio.end = res->data.address64.maximum;
+ hyperv_mmio.start = res->data.address64.address.minimum;
+ hyperv_mmio.end = res->data.address64.address.maximum;
break;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6529c09c46f0..d931cbbed240 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON
for those channels specified in the map. This map can be provided
either via platform data or the device tree bindings.
+config SENSORS_I5500
+ tristate "Intel 5500/5520/X58 temperature sensor"
+ depends on X86 && PCI
+ help
+ If you say yes here you get support for the temperature
+ sensor inside the Intel 5500, 5520 and X58 chipsets.
+
+ This driver can also be built as a module. If so, the module
+ will be called i5500_temp.
+
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86
@@ -1379,6 +1389,7 @@ config SENSORS_ADS1015
config SENSORS_ADS7828
tristate "Texas Instruments ADS7828 and compatibles"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for Texas Instruments ADS7828 and
ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while
@@ -1420,8 +1431,8 @@ config SENSORS_INA2XX
tristate "Texas Instruments INA219 and compatibles"
depends on I2C
help
- If you say yes here you get support for INA219, INA220, INA226, and
- INA230 power monitor chips.
+ If you say yes here you get support for INA219, INA220, INA226,
+ INA230, and INA231 power monitor chips.
The INA2xx driver is configured for the default configuration of
the part as described in the datasheet.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 67280643bcf0..6c941472e707 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_HTU21) += htu21.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
+obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 13875968c844..6cb89c0ebab6 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev,
struct abx500_temp *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- return sprintf(buf, "%ld\n", data->min[attr->index]);
+ return sprintf(buf, "%lu\n", data->min[attr->index]);
}
static ssize_t show_max(struct device *dev,
@@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev,
struct abx500_temp *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- return sprintf(buf, "%ld\n", data->max[attr->index]);
+ return sprintf(buf, "%lu\n", data->max[attr->index]);
}
static ssize_t show_max_hyst(struct device *dev,
@@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev,
struct abx500_temp *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- return sprintf(buf, "%ld\n", data->max_hyst[attr->index]);
+ return sprintf(buf, "%lu\n", data->max_hyst[attr->index]);
}
static ssize_t show_min_alarm(struct device *dev,
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index f4f9b219bf16..11955467fc0f 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/bitops.h>
/*
* AD7314 temperature masks
@@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
switch (spi_get_device_id(chip->spi_dev)->driver_data) {
case ad7314:
data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
- data = (data << 6) >> 6;
+ data = sign_extend32(data, 9);
return sprintf(buf, "%d\n", 250 * data);
case adt7301:
@@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
* register. 1lsb - 31.25 milli degrees centigrade
*/
data = ret & ADT7301_TEMP_MASK;
- data = (data << 2) >> 2;
+ data = sign_extend32(data, 13);
return sprintf(buf, "%d\n",
DIV_ROUND_CLOSEST(data * 3125, 100));
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 0625e50d7a6e..ad2b47e40345 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/mutex.h>
+#include <linux/bitops.h>
/* Addresses to scan
* The chip also supports addresses 0x35..0x37. Don't scan those addresses
@@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev,
if (IS_ERR(data))
return PTR_ERR(data);
- temp = (data->temp[index] << 7) >> 7; /* sign extend */
+ temp = sign_extend32(data->temp[index], 8);
return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */
}
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index a622d40eec17..bce4e9ff21bf 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -30,14 +30,12 @@
#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/init.h>
-#include <linux/jiffies.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/platform_data/ads7828.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
/* The ADS7828 registers */
-#define ADS7828_NCH 8 /* 8 channels supported */
#define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */
#define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */
#define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */
@@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 };
/* Client specific data */
struct ads7828_data {
- struct i2c_client *client;
- struct mutex update_lock; /* Mutex protecting updates */
- unsigned long last_updated; /* Last updated time (in jiffies) */
- u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH samples */
- bool valid; /* Validity flag */
- bool diff_input; /* Differential input */
- bool ext_vref; /* External voltage reference */
- unsigned int vref_mv; /* voltage reference value */
+ struct regmap *regmap;
u8 cmd_byte; /* Command byte without channel bits */
unsigned int lsb_resol; /* Resolution of the ADC sample LSB */
- s32 (*read_channel)(const struct i2c_client *client, u8 command);
};
/* Command byte C2,C1,C0 - see datasheet */
@@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch)
return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4);
}
-/* Update data for the device (all 8 channels) */
-static struct ads7828_data *ads7828_update_device(struct device *dev)
-{
- struct ads7828_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- unsigned int ch;
- dev_dbg(&client->dev, "Starting ads7828 update\n");
-
- for (ch = 0; ch < ADS7828_NCH; ch++) {
- u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch);
- data->adc_input[ch] = data->read_channel(client, cmd);
- }
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
/* sysfs callback function */
static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct ads7828_data *data = ads7828_update_device(dev);
- unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] *
- data->lsb_resol, 1000);
+ struct ads7828_data *data = dev_get_drvdata(dev);
+ u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index);
+ unsigned int regval;
+ int err;
- return sprintf(buf, "%d\n", value);
+ err = regmap_read(data->regmap, cmd, &regval);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%d\n",
+ DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000));
}
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0);
@@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = {
ATTRIBUTE_GROUPS(ads7828);
+static const struct regmap_config ads2828_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+};
+
+static const struct regmap_config ads2830_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static int ads7828_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client,
struct ads7828_platform_data *pdata = dev_get_platdata(dev);
struct ads7828_data *data;
struct device *hwmon_dev;
+ unsigned int vref_mv = ADS7828_INT_VREF_MV;
+ bool diff_input = false;
+ bool ext_vref = false;
data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
if (pdata) {
- data->diff_input = pdata->diff_input;
- data->ext_vref = pdata->ext_vref;
- if (data->ext_vref)
- data->vref_mv = pdata->vref_mv;
+ diff_input = pdata->diff_input;
+ ext_vref = pdata->ext_vref;
+ if (ext_vref && pdata->vref_mv)
+ vref_mv = pdata->vref_mv;
}
- /* Bound Vref with min/max values if it was provided */
- if (data->vref_mv)
- data->vref_mv = clamp_val(data->vref_mv,
- ADS7828_EXT_VREF_MV_MIN,
- ADS7828_EXT_VREF_MV_MAX);
- else
- data->vref_mv = ADS7828_INT_VREF_MV;
+ /* Bound Vref with min/max values */
+ vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN,
+ ADS7828_EXT_VREF_MV_MAX);
/* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */
if (id->driver_data == ads7828) {
- data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096);
- data->read_channel = i2c_smbus_read_word_swapped;
+ data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096);
+ data->regmap = devm_regmap_init_i2c(client,
+ &ads2828_regmap_config);
} else {
- data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256);
- data->read_channel = i2c_smbus_read_byte_data;
+ data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256);
+ data->regmap = devm_regmap_init_i2c(client,
+ &ads2830_regmap_config);
}
- data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
- if (!data->diff_input)
+ data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
+ if (!diff_input)
data->cmd_byte |= ADS7828_CMD_SD_SE;
- data->client = client;
- mutex_init(&data->update_lock);
-
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data,
ads7828_groups);
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
new file mode 100644
index 000000000000..3e3ccbf18b4e
--- /dev/null
+++ b/drivers/hwmon/i5500_temp.c
@@ -0,0 +1,149 @@
+/*
+ * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor
+ *
+ * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Register definitions from datasheet */
+#define REG_TSTHRCATA 0xE2
+#define REG_TSCTRL 0xE8
+#define REG_TSTHRRPEX 0xEB
+#define REG_TSTHRLO 0xEC
+#define REG_TSTHRHI 0xEE
+#define REG_CTHINT 0xF0
+#define REG_TSFSC 0xF3
+#define REG_CTSTS 0xF4
+#define REG_TSTHRRQPI 0xF5
+#define REG_CTCTRL 0xF7
+#define REG_TSTIMER 0xF8
+
+/*
+ * Sysfs stuff
+ */
+
+/* Sensor resolution : 0.5 degree C */
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ long temp;
+ u16 tsthrhi;
+ s8 tsfsc;
+
+ pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ temp = ((long)tsthrhi - tsfsc) * 500;
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_thresh(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int reg = to_sensor_dev_attr(devattr)->index;
+ long temp;
+ u16 tsthr;
+
+ pci_read_config_word(pdev, reg, &tsthr);
+ temp = tsthr * 500;
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int nr = to_sensor_dev_attr(devattr)->index;
+ u8 ctsts;
+
+ pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+ return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+
+static struct attribute *i5500_temp_attrs[] = {
+ &dev_attr_temp1_input.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(i5500_temp);
+
+static const struct pci_device_id i5500_temp_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, i5500_temp_ids);
+
+static int i5500_temp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct device *hwmon_dev;
+ u32 tstimer;
+ s8 tsfsc;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable device\n");
+ return err;
+ }
+
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ pci_read_config_dword(pdev, REG_TSTIMER, &tstimer);
+ if (tsfsc == 0x7F && tstimer == 0x07D30D40) {
+ dev_notice(&pdev->dev, "Sensor seems to be disabled\n");
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "intel5500", NULL,
+ i5500_temp_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct pci_driver i5500_temp_driver = {
+ .name = "i5500_temp",
+ .id_table = i5500_temp_ids,
+ .probe = i5500_temp_probe,
+};
+
+module_pci_driver(i5500_temp_driver);
+
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
+MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e01feba909c3..d1542b7d4bc3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -35,6 +35,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/of.h>
+#include <linux/delay.h>
#include <linux/platform_data/ina2xx.h>
@@ -51,7 +52,6 @@
#define INA226_ALERT_LIMIT 0x07
#define INA226_DIE_ID 0xFF
-
/* register count */
#define INA219_REGISTERS 6
#define INA226_REGISTERS 8
@@ -64,6 +64,24 @@
/* worst case is 68.10 ms (~14.6Hz, ina219) */
#define INA2XX_CONVERSION_RATE 15
+#define INA2XX_MAX_DELAY 69 /* worst case delay in ms */
+
+#define INA2XX_RSHUNT_DEFAULT 10000
+
+/* bit mask for reading the averaging setting in the configuration register */
+#define INA226_AVG_RD_MASK 0x0E00
+
+#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9)
+#define INA226_SHIFT_AVG(val) ((val) << 9)
+
+/* common attrs, ina226 attrs and NULL */
+#define INA2XX_MAX_ATTRIBUTE_GROUPS 3
+
+/*
+ * Both bus voltage and shunt voltage conversion times for ina226 are set
+ * to 0b0100 on POR, which translates to 2200 microseconds in total.
+ */
+#define INA226_TOTAL_CONV_TIME_DEFAULT 2200
enum ina2xx_ids { ina219, ina226 };
@@ -81,11 +99,16 @@ struct ina2xx_data {
struct i2c_client *client;
const struct ina2xx_config *config;
+ long rshunt;
+ u16 curr_config;
+
struct mutex update_lock;
bool valid;
unsigned long last_updated;
+ int update_interval; /* in jiffies */
int kind;
+ const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
u16 regs[INA2XX_MAX_REGISTERS];
};
@@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = {
},
};
-static struct ina2xx_data *ina2xx_update_device(struct device *dev)
+/*
+ * Available averaging rates for ina226. The indices correspond with
+ * the bit values expected by the chip (according to the ina226 datasheet,
+ * table 3 AVG bit settings, found at
+ * http://www.ti.com/lit/ds/symlink/ina226.pdf.
+ */
+static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
+
+static int ina226_avg_bits(int avg)
+{
+ int i;
+
+ /* Get the closest average from the tab. */
+ for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) {
+ if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2)
+ break;
+ }
+
+ return i; /* Return 0b0111 for values greater than 1024. */
+}
+
+static int ina226_reg_to_interval(u16 config)
+{
+ int avg = ina226_avg_tab[INA226_READ_AVG(config)];
+
+ /*
+ * Multiply the total conversion time by the number of averages.
+ * Return the result in milliseconds.
+ */
+ return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000);
+}
+
+static u16 ina226_interval_to_reg(int interval, u16 config)
+{
+ int avg, avg_bits;
+
+ avg = DIV_ROUND_CLOSEST(interval * 1000,
+ INA226_TOTAL_CONV_TIME_DEFAULT);
+ avg_bits = ina226_avg_bits(avg);
+
+ return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
+}
+
+static void ina226_set_update_interval(struct ina2xx_data *data)
+{
+ int ms;
+
+ ms = ina226_reg_to_interval(data->curr_config);
+ data->update_interval = msecs_to_jiffies(ms);
+}
+
+static int ina2xx_calibrate(struct ina2xx_data *data)
+{
+ u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
+ data->rshunt);
+
+ return i2c_smbus_write_word_swapped(data->client,
+ INA2XX_CALIBRATION, val);
+}
+
+/*
+ * Initialize the configuration and calibration registers.
+ */
+static int ina2xx_init(struct ina2xx_data *data)
{
- struct ina2xx_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- struct ina2xx_data *ret = data;
+ int ret;
- mutex_lock(&data->update_lock);
+ /* device configuration */
+ ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
+ data->curr_config);
+ if (ret < 0)
+ return ret;
- if (time_after(jiffies, data->last_updated +
- HZ / INA2XX_CONVERSION_RATE) || !data->valid) {
+ /*
+ * Set current LSB to 1mA, shunt is in uOhms
+ * (equation 13 in datasheet).
+ */
+ return ina2xx_calibrate(data);
+}
- int i;
+static int ina2xx_do_update(struct device *dev)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int i, rv, retry;
- dev_dbg(&client->dev, "Starting ina2xx update\n");
+ dev_dbg(&client->dev, "Starting ina2xx update\n");
+ for (retry = 5; retry; retry--) {
/* Read all registers */
for (i = 0; i < data->config->registers; i++) {
- int rv = i2c_smbus_read_word_swapped(client, i);
- if (rv < 0) {
- ret = ERR_PTR(rv);
- goto abort;
- }
+ rv = i2c_smbus_read_word_swapped(client, i);
+ if (rv < 0)
+ return rv;
data->regs[i] = rv;
}
+
+ /*
+ * If the current value in the calibration register is 0, the
+ * power and current registers will also remain at 0. In case
+ * the chip has been reset let's check the calibration
+ * register and reinitialize if needed.
+ */
+ if (data->regs[INA2XX_CALIBRATION] == 0) {
+ dev_warn(dev, "chip not calibrated, reinitializing\n");
+
+ rv = ina2xx_init(data);
+ if (rv < 0)
+ return rv;
+
+ /*
+ * Let's make sure the power and current registers
+ * have been updated before trying again.
+ */
+ msleep(INA2XX_MAX_DELAY);
+ continue;
+ }
+
data->last_updated = jiffies;
data->valid = 1;
+
+ return 0;
}
-abort:
+
+ /*
+ * If we're here then although all write operations succeeded, the
+ * chip still returns 0 in the calibration register. Nothing more we
+ * can do here.
+ */
+ dev_err(dev, "unable to reinitialize the chip\n");
+ return -ENODEV;
+}
+
+static struct ina2xx_data *ina2xx_update_device(struct device *dev)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ struct ina2xx_data *ret = data;
+ unsigned long after;
+ int rv;
+
+ mutex_lock(&data->update_lock);
+
+ after = data->last_updated + data->update_interval;
+ if (time_after(jiffies, after) || !data->valid) {
+ rv = ina2xx_do_update(dev);
+ if (rv < 0)
+ ret = ERR_PTR(rv);
+ }
+
mutex_unlock(&data->update_lock);
return ret;
}
@@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
/* signed register, LSB=1mA (selected), in mA */
val = (s16)data->regs[reg];
break;
+ case INA2XX_CALIBRATION:
+ val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
+ data->regs[reg]);
+ break;
default:
/* programmer goofed */
WARN_ON_ONCE(1);
@@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index));
}
+static ssize_t ina2xx_set_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct ina2xx_data *data = ina2xx_update_device(dev);
+ unsigned long val;
+ int status;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ status = kstrtoul(buf, 10, &val);
+ if (status < 0)
+ return status;
+
+ if (val == 0 ||
+ /* Values greater than the calibration factor make no sense. */
+ val > data->config->calibration_factor)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->rshunt = val;
+ status = ina2xx_calibrate(data);
+ mutex_unlock(&data->update_lock);
+ if (status < 0)
+ return status;
+
+ return count;
+}
+
+static ssize_t ina226_set_interval(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int status;
+
+ status = kstrtoul(buf, 10, &val);
+ if (status < 0)
+ return status;
+
+ if (val > INT_MAX || val == 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->curr_config = ina226_interval_to_reg(val,
+ data->regs[INA2XX_CONFIG]);
+ status = i2c_smbus_write_word_swapped(data->client,
+ INA2XX_CONFIG,
+ data->curr_config);
+
+ ina226_set_update_interval(data);
+ /* Make sure the next access re-reads all registers. */
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+ if (status < 0)
+ return status;
+
+ return count;
+}
+
+static ssize_t ina226_show_interval(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ina2xx_data *data = ina2xx_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /*
+ * We don't use data->update_interval here as we want to display
+ * the actual interval used by the chip and jiffies_to_msecs()
+ * doesn't seem to be accurate enough.
+ */
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ina226_reg_to_interval(data->regs[INA2XX_CONFIG]));
+}
+
/* shunt voltage */
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL,
INA2XX_SHUNT_VOLTAGE);
@@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL,
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
INA2XX_POWER);
+/* shunt resistance */
+static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
+ ina2xx_show_value, ina2xx_set_shunt,
+ INA2XX_CALIBRATION);
+
+/* update interval (ina226 only) */
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+ ina226_show_interval, ina226_set_interval, 0);
+
/* pointers to created device attributes */
static struct attribute *ina2xx_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
+ &sensor_dev_attr_shunt_resistor.dev_attr.attr,
NULL,
};
-ATTRIBUTE_GROUPS(ina2xx);
+
+static const struct attribute_group ina2xx_group = {
+ .attrs = ina2xx_attrs,
+};
+
+static struct attribute *ina226_attrs[] = {
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ina226_group = {
+ .attrs = ina226_attrs,
+};
static int ina2xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct ina2xx_data *data;
struct device *hwmon_dev;
- long shunt = 10000; /* default shunt value 10mOhms */
u32 val;
- int ret;
+ int ret, group = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
@@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client,
if (dev_get_platdata(dev)) {
pdata = dev_get_platdata(dev);
- shunt = pdata->shunt_uohms;
+ data->rshunt = pdata->shunt_uohms;
} else if (!of_property_read_u32(dev->of_node,
"shunt-resistor", &val)) {
- shunt = val;
+ data->rshunt = val;
+ } else {
+ data->rshunt = INA2XX_RSHUNT_DEFAULT;
}
- if (shunt <= 0)
- return -ENODEV;
-
/* set the device type */
data->kind = id->driver_data;
data->config = &ina2xx_config[data->kind];
-
- /* device configuration */
- ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
- data->config->config_default);
- if (ret < 0) {
- dev_err(dev,
- "error writing to the config register: %d", ret);
- return -ENODEV;
- }
+ data->curr_config = data->config->config_default;
+ data->client = client;
/*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
+ * Ina226 has a variable update_interval. For ina219 we
+ * use a constant value.
*/
- ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
- data->config->calibration_factor / shunt);
+ if (data->kind == ina226)
+ ina226_set_update_interval(data);
+ else
+ data->update_interval = HZ / INA2XX_CONVERSION_RATE;
+
+ if (data->rshunt <= 0 ||
+ data->rshunt > data->config->calibration_factor)
+ return -ENODEV;
+
+ ret = ina2xx_init(data);
if (ret < 0) {
- dev_err(dev,
- "error writing to the calibration register: %d", ret);
+ dev_err(dev, "error configuring the device: %d\n", ret);
return -ENODEV;
}
- data->client = client;
mutex_init(&data->update_lock);
+ data->groups[group++] = &ina2xx_group;
+ if (data->kind == ina226)
+ data->groups[group++] = &ina226_group;
+
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, ina2xx_groups);
+ data, data->groups);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
- id->name, shunt);
+ id->name, data->rshunt);
return 0;
}
@@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = {
{ "ina220", ina219 },
{ "ina226", ina226 },
{ "ina230", ina226 },
+ { "ina231", ina226 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 388f8bcd898e..996bdfd5cf25 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -201,7 +201,7 @@ struct jc42_data {
#define JC42_TEMP_MIN 0
#define JC42_TEMP_MAX 125000
-static u16 jc42_temp_to_reg(int temp, bool extended)
+static u16 jc42_temp_to_reg(long temp, bool extended)
{
int ntemp = clamp_val(temp,
extended ? JC42_TEMP_MIN_EXTENDED :
@@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended)
static int jc42_temp_from_reg(s16 reg)
{
- reg &= 0x1fff;
-
- /* sign extend register */
- if (reg & 0x1000)
- reg |= 0xf000;
+ reg = sign_extend32(reg, 12);
/* convert from 0.0625 to 0.001 resolution */
return reg * 125 / 2;
@@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
const char *buf, size_t count)
{
struct jc42_data *data = dev_get_drvdata(dev);
- unsigned long val;
+ long val;
int diff, hyst;
int err;
int ret = count;
- if (kstrtoul(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
+ val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED :
+ JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX);
diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
+
hyst = 0;
if (diff > 0) {
if (diff < 2250)
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index ec5678289e4a..55765790907b 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
return reg != REG_BANK && reg <= 0x20;
}
-static struct regmap_config nct7802_regmap_config = {
+static const struct regmap_config nct7802_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index ba9f478f64ee..9da2735f1424 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int tmp102_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev)
config &= ~TMP102_CONF_SD;
return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
}
-
-static const struct dev_pm_ops tmp102_dev_pm_ops = {
- .suspend = tmp102_suspend,
- .resume = tmp102_resume,
-};
-
-#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
-#else
-#define TMP102_DEV_PM_OPS NULL
#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
+
static const struct i2c_device_id tmp102_id[] = {
{ "tmp102", 0 },
{ }
@@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id);
static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
- .driver.pm = TMP102_DEV_PM_OPS,
+ .driver.pm = &tmp102_dev_pm_ops,
.probe = tmp102_probe,
.remove = tmp102_remove,
.id_table = tmp102_id,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 31e8308ba899..ab838d9e28b6 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -881,6 +881,7 @@ config I2C_XLR
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ select I2C_SLAVE
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bff20a589621..958c8db4ec30 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
int ret;
pm_runtime_get_sync(&adap->dev);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (retry = 0; retry < adap->retries; retry++) {
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN) {
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return ret;
}
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
udelay(100);
}
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return -EREMOTEIO;
}
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
clk_prepare_enable(i2c->clk);
ret = s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
if (ret != 0) {
dev_err(&pdev->dev, "I2C controller init failed\n");
return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+ clk_unprepare(i2c->clk);
return ret;
}
}
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = s3c24xx_i2c_register_cpufreq(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
s3c24xx_i2c_deregister_cpufreq(i2c);
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ clk_unprepare(i2c->clk);
+
pm_runtime_disable(&i2c->adap.dev);
pm_runtime_disable(&pdev->dev);
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
if (!IS_ERR(i2c->sysreg))
regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 440d5dbc8b5f..007818b3e174 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
int pos;
int sr;
bool send_stop;
+ bool stop_after_dma;
struct resource *res;
struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
if (pd->pos == pd->msg->len) {
/* Send stop if we haven't yet (DMA case) */
- if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY))
+ if (pd->send_stop && pd->stop_after_dma)
i2c_op(pd, OP_TX_STOP, 0);
return 1;
}
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
real_pos = pd->pos - 2;
if (pd->pos == pd->msg->len) {
+ if (pd->stop_after_dma) {
+ /* Simulate PIO end condition after DMA transfer */
+ i2c_op(pd, OP_RX_STOP, 0);
+ pd->pos++;
+ break;
+ }
+
if (real_pos < 0) {
i2c_op(pd, OP_RX_STOP, 0);
break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
sh_mobile_i2c_dma_unmap(pd);
pd->pos = pd->msg->len;
+ pd->stop_after_dma = true;
iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
}
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
bool do_start = pd->send_stop || !i;
msg = &msgs[i];
pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
+ pd->stop_after_dma = false;
err = start_ch(pd, msg, do_start);
if (err)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 39d25a8cb1ad..e9eae57a2b50 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2972,6 +2972,7 @@ trace:
}
EXPORT_SYMBOL(i2c_smbus_xfer);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
return ret;
}
EXPORT_SYMBOL_GPL(i2c_slave_unregister);
+#endif
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 6631400b5f02..cf9b09db092f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 9cceacb92f9d..1bc0c170f12a 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -727,6 +727,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x46, idle_cpu_hsw),
ICPU(0x4d, idle_cpu_avn),
ICPU(0x3d, idle_cpu_bdw),
+ ICPU(0x47, idle_cpu_bdw),
ICPU(0x4f, idle_cpu_bdw),
ICPU(0x56, idle_cpu_bdw),
{}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 345395e9dc6e..4132935dc929 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -27,7 +27,6 @@ boolean "IIO callback buffer used for push in-kernel interfaces"
usage. That is, those where the data is pushed to the consumer.
config IIO_KFIFO_BUF
- select IIO_TRIGGER
tristate "Industrial I/O buffering based on kfifo"
help
A simple fifo based on kfifo. Note that this currently provides
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 9b9be8725e9d..7c9a9a94a8ce 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -43,6 +43,9 @@ config HID_SENSOR_ACCEL_3D
Say yes here to build support for the HID SENSOR
accelerometers 3D.
+ To compile this driver as a module, choose M here: the
+ module will be called hid-sensor-accel-3d.
+
config IIO_ST_ACCEL_3AXIS
tristate "STMicroelectronics accelerometers 3-Axis Driver"
depends on (I2C || SPI_MASTER) && SYSFS
@@ -80,6 +83,9 @@ config KXSD9
Say yes here to build support for the Kionix KXSD9 accelerometer.
Currently this only supports the device via an SPI interface.
+ To compile this driver as a module, choose M here: the module
+ will be called kxsd9.
+
config MMA8452
tristate "Freescale MMA8452Q Accelerometer Driver"
depends on I2C
@@ -105,4 +111,29 @@ config KXCJK1013
To compile this driver as a module, choose M here: the module will
be called kxcjk-1013.
+config MMA9551_CORE
+ tristate
+
+config MMA9551
+ tristate "Freescale MMA9551L Intelligent Motion-Sensing Platform Driver"
+ depends on I2C
+ select MMA9551_CORE
+
+ help
+ Say yes here to build support for the Freescale MMA9551L
+ Intelligent Motion-Sensing Platform Driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mma9551.
+
+config MMA9553
+ tristate "Freescale MMA9553L Intelligent Pedometer Platform Driver"
+ depends on I2C
+ select MMA9551_CORE
+ help
+ Say yes here to build support for the Freescale MMA9553L
+ Intelligent Pedometer Platform Driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mma9553.
endmenu
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index a593996c6539..99d89e46cad1 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -10,6 +10,12 @@ obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
obj-$(CONFIG_KXSD9) += kxsd9.o
obj-$(CONFIG_MMA8452) += mma8452.o
+obj-$(CONFIG_MMA9551_CORE) += mma9551_core.o
+obj-$(CONFIG_MMA9551) += mma9551.o
+obj-$(CONFIG_MMA9553) += mma9553.o
+
+obj-$(CONFIG_IIO_SSP_SENSORS_COMMONS) += ssp_accel_sensor.o
+
obj-$(CONFIG_IIO_ST_ACCEL_3AXIS) += st_accel.o
st_accel-y := st_accel_core.o
st_accel-$(CONFIG_IIO_BUFFER) += st_accel_buffer.o
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index d5d95317003a..df6a593bd4bd 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -111,19 +111,12 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
- s32 poll_value;
*val = 0;
*val2 = 0;
switch (mask) {
case 0:
- poll_value = hid_sensor_read_poll_value(
- &accel_state->common_attributes);
- if (poll_value < 0)
- return -EINVAL;
-
hid_sensor_power_state(&accel_state->common_attributes, true);
- msleep_interruptible(poll_value * 2);
report_id = accel_state->accel[chan->scan_index].report_id;
address = accel_3d_addresses[chan->scan_index];
if (report_id >= 0)
@@ -419,6 +412,7 @@ static struct platform_driver hid_accel_3d_platform_driver = {
.id_table = hid_accel_3d_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_accel_3d_probe,
.remove = hid_accel_3d_remove,
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index da2fe93739a2..567de269cc00 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -108,6 +108,7 @@ struct kxcjk1013_data {
bool motion_trigger_on;
int64_t timestamp;
enum kx_chipset chipset;
+ bool is_smo8500_device;
};
enum kxcjk1013_axis {
@@ -377,6 +378,7 @@ static int kxcjk1013_get_startup_times(struct kxcjk1013_data *data)
static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
{
+#ifdef CONFIG_PM
int ret;
if (on)
@@ -388,8 +390,11 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: kxcjk1013_set_power_state for %d\n", on);
+ if (on)
+ pm_runtime_put_noidle(&data->client->dev);
return ret;
}
+#endif
return 0;
}
@@ -858,6 +863,8 @@ static int kxcjk1013_write_event_config(struct iio_dev *indio_dev,
ret = kxcjk1013_setup_any_motion_interrupt(data, state);
if (ret < 0) {
+ kxcjk1013_set_power_state(data, false);
+ data->ev_enable_state = 0;
mutex_unlock(&data->mutex);
return ret;
}
@@ -1008,6 +1015,7 @@ static int kxcjk1013_data_rdy_trigger_set_state(struct iio_trigger *trig,
else
ret = kxcjk1013_setup_new_data_interrupt(data, state);
if (ret < 0) {
+ kxcjk1013_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
}
@@ -1131,12 +1139,16 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private)
}
static const char *kxcjk1013_match_acpi_device(struct device *dev,
- enum kx_chipset *chipset)
+ enum kx_chipset *chipset,
+ bool *is_smo8500_device)
{
const struct acpi_device_id *id;
+
id = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!id)
return NULL;
+ if (strcmp(id->id, "SMO8500") == 0)
+ *is_smo8500_device = true;
*chipset = (enum kx_chipset)id->driver_data;
return dev_name(dev);
@@ -1151,6 +1163,8 @@ static int kxcjk1013_gpio_probe(struct i2c_client *client,
if (!client)
return -EINVAL;
+ if (data->is_smo8500_device)
+ return -ENOTSUPP;
dev = &client->dev;
@@ -1200,7 +1214,8 @@ static int kxcjk1013_probe(struct i2c_client *client,
name = id->name;
} else if (ACPI_HANDLE(&client->dev)) {
name = kxcjk1013_match_acpi_device(&client->dev,
- &data->chipset);
+ &data->chipset,
+ &data->is_smo8500_device);
} else
return -ENODEV;
@@ -1228,21 +1243,25 @@ static int kxcjk1013_probe(struct i2c_client *client,
KXCJK1013_IRQ_NAME,
indio_dev);
if (ret)
- return ret;
+ goto err_poweroff;
data->dready_trig = devm_iio_trigger_alloc(&client->dev,
"%s-dev%d",
indio_dev->name,
indio_dev->id);
- if (!data->dready_trig)
- return -ENOMEM;
+ if (!data->dready_trig) {
+ ret = -ENOMEM;
+ goto err_poweroff;
+ }
data->motion_trig = devm_iio_trigger_alloc(&client->dev,
"%s-any-motion-dev%d",
indio_dev->name,
indio_dev->id);
- if (!data->motion_trig)
- return -ENOMEM;
+ if (!data->motion_trig) {
+ ret = -ENOMEM;
+ goto err_poweroff;
+ }
data->dready_trig->dev.parent = &client->dev;
data->dready_trig->ops = &kxcjk1013_trigger_ops;
@@ -1251,7 +1270,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
iio_trigger_get(indio_dev->trig);
ret = iio_trigger_register(data->dready_trig);
if (ret)
- return ret;
+ goto err_poweroff;
data->motion_trig->dev.parent = &client->dev;
data->motion_trig->ops = &kxcjk1013_trigger_ops;
@@ -1300,6 +1319,8 @@ err_trigger_unregister:
iio_trigger_unregister(data->dready_trig);
if (data->motion_trig)
iio_trigger_unregister(data->motion_trig);
+err_poweroff:
+ kxcjk1013_set_mode(data, STANDBY);
return ret;
}
@@ -1349,10 +1370,7 @@ static int kxcjk1013_resume(struct device *dev)
int ret = 0;
mutex_lock(&data->mutex);
- /* Check, if the suspend occured while active */
- if (data->dready_trigger_on || data->motion_trigger_on ||
- data->ev_enable_state)
- ret = kxcjk1013_set_mode(data, OPERATION);
+ ret = kxcjk1013_set_mode(data, OPERATION);
mutex_unlock(&data->mutex);
return ret;
@@ -1364,8 +1382,14 @@ static int kxcjk1013_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
- return kxcjk1013_set_mode(data, STANDBY);
+ ret = kxcjk1013_set_mode(data, STANDBY);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "powering off device failed\n");
+ return -EAGAIN;
+ }
+ return 0;
}
static int kxcjk1013_runtime_resume(struct device *dev)
@@ -1399,6 +1423,7 @@ static const struct acpi_device_id kx_acpi_match[] = {
{"KXCJ1013", KXCJK1013},
{"KXCJ1008", KXCJ91008},
{"KXTJ1009", KXTJ21009},
+ {"SMO8500", KXCJ91008},
{ },
};
MODULE_DEVICE_TABLE(acpi, kx_acpi_match);
@@ -1407,6 +1432,7 @@ static const struct i2c_device_id kxcjk1013_id[] = {
{"kxcjk1013", KXCJK1013},
{"kxcj91008", KXCJ91008},
{"kxtj21009", KXTJ21009},
+ {"SMO8500", KXCJ91008},
{}
};
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 3c12d4966376..5b80657883bb 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -111,7 +111,7 @@ static const int mma8452_samp_freq[8][2] = {
{6, 250000}, {1, 560000}
};
-/*
+/*
* Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
* The userspace interface uses m/s^2 and we declare micro units
* So scale factor is given by:
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
new file mode 100644
index 000000000000..46c38351c6a3
--- /dev/null
+++ b/drivers/iio/accel/mma9551.c
@@ -0,0 +1,637 @@
+/*
+ * Freescale MMA9551L Intelligent Motion-Sensing Platform driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/pm_runtime.h>
+#include "mma9551_core.h"
+
+#define MMA9551_DRV_NAME "mma9551"
+#define MMA9551_IRQ_NAME "mma9551_event"
+#define MMA9551_GPIO_NAME "mma9551_int"
+#define MMA9551_GPIO_COUNT 4
+
+/* Tilt application (inclination in IIO terms). */
+#define MMA9551_TILT_XZ_ANG_REG 0x00
+#define MMA9551_TILT_YZ_ANG_REG 0x01
+#define MMA9551_TILT_XY_ANG_REG 0x02
+#define MMA9551_TILT_ANGFLG BIT(7)
+#define MMA9551_TILT_QUAD_REG 0x03
+#define MMA9551_TILT_XY_QUAD_SHIFT 0
+#define MMA9551_TILT_YZ_QUAD_SHIFT 2
+#define MMA9551_TILT_XZ_QUAD_SHIFT 4
+#define MMA9551_TILT_CFG_REG 0x01
+#define MMA9551_TILT_ANG_THRESH_MASK GENMASK(3, 0)
+
+#define MMA9551_DEFAULT_SAMPLE_RATE 122 /* Hz */
+
+/* Tilt events are mapped to the first three GPIO pins. */
+enum mma9551_tilt_axis {
+ mma9551_x = 0,
+ mma9551_y,
+ mma9551_z,
+};
+
+struct mma9551_data {
+ struct i2c_client *client;
+ struct mutex mutex;
+ int event_enabled[3];
+ int irqs[MMA9551_GPIO_COUNT];
+};
+
+static int mma9551_read_incli_chan(struct i2c_client *client,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ u8 quad_shift, angle, quadrant;
+ u16 reg_addr;
+ int ret;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ reg_addr = MMA9551_TILT_YZ_ANG_REG;
+ quad_shift = MMA9551_TILT_YZ_QUAD_SHIFT;
+ break;
+ case IIO_MOD_Y:
+ reg_addr = MMA9551_TILT_XZ_ANG_REG;
+ quad_shift = MMA9551_TILT_XZ_QUAD_SHIFT;
+ break;
+ case IIO_MOD_Z:
+ reg_addr = MMA9551_TILT_XY_ANG_REG;
+ quad_shift = MMA9551_TILT_XY_QUAD_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mma9551_set_power_state(client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = mma9551_read_status_byte(client, MMA9551_APPID_TILT,
+ reg_addr, &angle);
+ if (ret < 0)
+ goto out_poweroff;
+
+ ret = mma9551_read_status_byte(client, MMA9551_APPID_TILT,
+ MMA9551_TILT_QUAD_REG, &quadrant);
+ if (ret < 0)
+ goto out_poweroff;
+
+ angle &= ~MMA9551_TILT_ANGFLG;
+ quadrant = (quadrant >> quad_shift) & 0x03;
+
+ if (quadrant == 1 || quadrant == 3)
+ *val = 90 * (quadrant + 1) - angle;
+ else
+ *val = angle + 90 * quadrant;
+
+ ret = IIO_VAL_INT;
+
+out_poweroff:
+ mma9551_set_power_state(client, false);
+ return ret;
+}
+
+static int mma9551_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_INCLI:
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_incli_chan(data->client, chan, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_accel_chan(data->client,
+ chan, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ return mma9551_read_accel_scale(val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma9551_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct mma9551_data *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_INCLI:
+ /* IIO counts axes from 1, because IIO_NO_MOD is 0. */
+ return data->event_enabled[chan->channel2 - 1];
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma9551_config_incli_event(struct iio_dev *indio_dev,
+ enum iio_modifier axis,
+ int state)
+{
+ struct mma9551_data *data = iio_priv(indio_dev);
+ enum mma9551_tilt_axis mma_axis;
+ int ret;
+
+ /* IIO counts axes from 1, because IIO_NO_MOD is 0. */
+ mma_axis = axis - 1;
+
+ if (data->event_enabled[mma_axis] == state)
+ return 0;
+
+ if (state == 0) {
+ ret = mma9551_gpio_config(data->client,
+ (enum mma9551_gpio_pin)mma_axis,
+ MMA9551_APPID_NONE, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = mma9551_set_power_state(data->client, false);
+ if (ret < 0)
+ return ret;
+ } else {
+ int bitnum;
+
+ /* Bit 7 of each angle register holds the angle flag. */
+ switch (axis) {
+ case IIO_MOD_X:
+ bitnum = 7 + 8 * MMA9551_TILT_YZ_ANG_REG;
+ break;
+ case IIO_MOD_Y:
+ bitnum = 7 + 8 * MMA9551_TILT_XZ_ANG_REG;
+ break;
+ case IIO_MOD_Z:
+ bitnum = 7 + 8 * MMA9551_TILT_XY_ANG_REG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ ret = mma9551_set_power_state(data->client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = mma9551_gpio_config(data->client,
+ (enum mma9551_gpio_pin)mma_axis,
+ MMA9551_APPID_TILT, bitnum, 0);
+ if (ret < 0) {
+ mma9551_set_power_state(data->client, false);
+ return ret;
+ }
+ }
+
+ data->event_enabled[mma_axis] = state;
+
+ return ret;
+}
+
+static int mma9551_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_INCLI:
+ mutex_lock(&data->mutex);
+ ret = mma9551_config_incli_event(indio_dev,
+ chan->channel2, state);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma9551_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_INCLI:
+ if (val2 != 0 || val < 1 || val > 10)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = mma9551_update_config_bits(data->client,
+ MMA9551_APPID_TILT,
+ MMA9551_TILT_CFG_REG,
+ MMA9551_TILT_ANG_THRESH_MASK,
+ val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma9551_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 tmp;
+
+ switch (chan->type) {
+ case IIO_INCLI:
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_config_byte(data->client,
+ MMA9551_APPID_TILT,
+ MMA9551_TILT_CFG_REG, &tmp);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+ *val = tmp & MMA9551_TILT_ANG_THRESH_MASK;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_event_spec mma9551_incli_event = {
+ .type = IIO_EV_TYPE_ROC,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
+};
+
+#define MMA9551_INCLI_CHANNEL(axis) { \
+ .type = IIO_INCLI, \
+ .modified = 1, \
+ .channel2 = axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .event_spec = &mma9551_incli_event, \
+ .num_event_specs = 1, \
+}
+
+static const struct iio_chan_spec mma9551_channels[] = {
+ MMA9551_ACCEL_CHANNEL(IIO_MOD_X),
+ MMA9551_ACCEL_CHANNEL(IIO_MOD_Y),
+ MMA9551_ACCEL_CHANNEL(IIO_MOD_Z),
+
+ MMA9551_INCLI_CHANNEL(IIO_MOD_X),
+ MMA9551_INCLI_CHANNEL(IIO_MOD_Y),
+ MMA9551_INCLI_CHANNEL(IIO_MOD_Z),
+};
+
+static const struct iio_info mma9551_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mma9551_read_raw,
+ .read_event_config = mma9551_read_event_config,
+ .write_event_config = mma9551_write_event_config,
+ .read_event_value = mma9551_read_event_value,
+ .write_event_value = mma9551_write_event_value,
+};
+
+static irqreturn_t mma9551_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int i, ret, mma_axis = -1;
+ u16 reg;
+ u8 val;
+
+ mutex_lock(&data->mutex);
+
+ for (i = 0; i < 3; i++)
+ if (irq == data->irqs[i]) {
+ mma_axis = i;
+ break;
+ }
+
+ if (mma_axis == -1) {
+ /* IRQ was triggered on 4th line, which we don't use. */
+ dev_warn(&data->client->dev,
+ "irq triggered on unused line %d\n", data->irqs[3]);
+ goto out;
+ }
+
+ switch (mma_axis) {
+ case mma9551_x:
+ reg = MMA9551_TILT_YZ_ANG_REG;
+ break;
+ case mma9551_y:
+ reg = MMA9551_TILT_XZ_ANG_REG;
+ break;
+ case mma9551_z:
+ reg = MMA9551_TILT_XY_ANG_REG;
+ break;
+ }
+
+ /*
+ * Read the angle even though we don't use it, otherwise we
+ * won't get any further interrupts.
+ */
+ ret = mma9551_read_status_byte(data->client, MMA9551_APPID_TILT,
+ reg, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "error %d reading tilt register in IRQ\n", ret);
+ goto out;
+ }
+
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_INCLI, 0, (mma_axis + 1),
+ IIO_EV_TYPE_ROC, IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int mma9551_init(struct mma9551_data *data)
+{
+ int ret;
+
+ ret = mma9551_read_version(data->client);
+ if (ret)
+ return ret;
+
+ return mma9551_set_device_state(data->client, true);
+}
+
+static int mma9551_gpio_probe(struct iio_dev *indio_dev)
+{
+ struct gpio_desc *gpio;
+ int i, ret;
+ struct mma9551_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+
+ for (i = 0; i < MMA9551_GPIO_COUNT; i++) {
+ gpio = devm_gpiod_get_index(dev, MMA9551_GPIO_NAME, i);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "acpi gpio get index failed\n");
+ return PTR_ERR(gpio);
+ }
+
+ ret = gpiod_direction_input(gpio);
+ if (ret)
+ return ret;
+
+ data->irqs[i] = gpiod_to_irq(gpio);
+ ret = devm_request_threaded_irq(dev, data->irqs[i],
+ NULL, mma9551_event_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ MMA9551_IRQ_NAME, indio_dev);
+ if (ret < 0) {
+ dev_err(dev, "request irq %d failed\n", data->irqs[i]);
+ return ret;
+ }
+
+ dev_dbg(dev, "gpio resource, no:%d irq:%d\n",
+ desc_to_gpio(gpio), data->irqs[i]);
+ }
+
+ return 0;
+}
+
+static const char *mma9551_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ return dev_name(dev);
+}
+
+static int mma9551_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mma9551_data *data;
+ struct iio_dev *indio_dev;
+ const char *name = NULL;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ if (id)
+ name = id->name;
+ else if (ACPI_HANDLE(&client->dev))
+ name = mma9551_match_acpi_device(&client->dev);
+
+ ret = mma9551_init(data);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&data->mutex);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = mma9551_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mma9551_channels);
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mma9551_info;
+
+ ret = mma9551_gpio_probe(indio_dev);
+ if (ret < 0)
+ goto out_poweroff;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to register iio device\n");
+ goto out_poweroff;
+ }
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret < 0)
+ goto out_iio_unregister;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ MMA9551_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ return 0;
+
+out_iio_unregister:
+ iio_device_unregister(indio_dev);
+out_poweroff:
+ mma9551_set_device_state(client, false);
+
+ return ret;
+}
+
+static int mma9551_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mma9551_data *data = iio_priv(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_device_unregister(indio_dev);
+ mutex_lock(&data->mutex);
+ mma9551_set_device_state(data->client, false);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mma9551_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = mma9551_set_device_state(data->client, false);
+ mutex_unlock(&data->mutex);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "powering off device failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int mma9551_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = mma9551_set_device_state(data->client, true);
+ if (ret < 0)
+ return ret;
+
+ mma9551_sleep(MMA9551_DEFAULT_SAMPLE_RATE);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int mma9551_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = mma9551_set_device_state(data->client, false);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int mma9551_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma9551_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = mma9551_set_device_state(data->client, true);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops mma9551_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mma9551_suspend, mma9551_resume)
+ SET_RUNTIME_PM_OPS(mma9551_runtime_suspend,
+ mma9551_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id mma9551_acpi_match[] = {
+ {"MMA9551", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, mma9551_acpi_match);
+
+static const struct i2c_device_id mma9551_id[] = {
+ {"mma9551", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mma9551_id);
+
+static struct i2c_driver mma9551_driver = {
+ .driver = {
+ .name = MMA9551_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(mma9551_acpi_match),
+ .pm = &mma9551_pm_ops,
+ },
+ .probe = mma9551_probe,
+ .remove = mma9551_remove,
+ .id_table = mma9551_id,
+};
+
+module_i2c_driver(mma9551_driver);
+
+MODULE_AUTHOR("Irina Tirdea <irina.tirdea@intel.com>");
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMA9551L motion-sensing platform driver");
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
new file mode 100644
index 000000000000..7f55a6d7cd03
--- /dev/null
+++ b/drivers/iio/accel/mma9551_core.c
@@ -0,0 +1,798 @@
+/*
+ * Common code for Freescale MMA955x Intelligent Sensor Platform drivers
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/pm_runtime.h>
+#include "mma9551_core.h"
+
+/* Command masks for mailbox write command */
+#define MMA9551_CMD_READ_VERSION_INFO 0x00
+#define MMA9551_CMD_READ_CONFIG 0x10
+#define MMA9551_CMD_WRITE_CONFIG 0x20
+#define MMA9551_CMD_READ_STATUS 0x30
+
+/* Mailbox read command */
+#define MMA9551_RESPONSE_COCO BIT(7)
+
+/* Error-Status codes returned in mailbox read command */
+#define MMA9551_MCI_ERROR_NONE 0x00
+#define MMA9551_MCI_ERROR_PARAM 0x04
+#define MMA9551_MCI_INVALID_COUNT 0x19
+#define MMA9551_MCI_ERROR_COMMAND 0x1C
+#define MMA9551_MCI_ERROR_INVALID_LENGTH 0x21
+#define MMA9551_MCI_ERROR_FIFO_BUSY 0x22
+#define MMA9551_MCI_ERROR_FIFO_ALLOCATED 0x23
+#define MMA9551_MCI_ERROR_FIFO_OVERSIZE 0x24
+
+/* GPIO Application */
+#define MMA9551_GPIO_POL_MSB 0x08
+#define MMA9551_GPIO_POL_LSB 0x09
+
+/* Sleep/Wake application */
+#define MMA9551_SLEEP_CFG 0x06
+#define MMA9551_SLEEP_CFG_SNCEN BIT(0)
+#define MMA9551_SLEEP_CFG_FLEEN BIT(1)
+#define MMA9551_SLEEP_CFG_SCHEN BIT(2)
+
+/* AFE application */
+#define MMA9551_AFE_X_ACCEL_REG 0x00
+#define MMA9551_AFE_Y_ACCEL_REG 0x02
+#define MMA9551_AFE_Z_ACCEL_REG 0x04
+
+/* Reset/Suspend/Clear application */
+#define MMA9551_RSC_RESET 0x00
+#define MMA9551_RSC_OFFSET(mask) (3 - (ffs(mask) - 1) / 8)
+#define MMA9551_RSC_VAL(mask) (mask >> (((ffs(mask) - 1) / 8) * 8))
+
+/*
+ * A response is composed of:
+ * - control registers: MB0-3
+ * - data registers: MB4-31
+ *
+ * A request is composed of:
+ * - mbox to write to (always 0)
+ * - control registers: MB1-4
+ * - data registers: MB5-31
+ */
+#define MMA9551_MAILBOX_CTRL_REGS 4
+#define MMA9551_MAX_MAILBOX_DATA_REGS 28
+#define MMA9551_MAILBOX_REGS 32
+
+#define MMA9551_I2C_READ_RETRIES 5
+#define MMA9551_I2C_READ_DELAY 50 /* us */
+
+struct mma9551_mbox_request {
+ u8 start_mbox; /* Always 0. */
+ u8 app_id;
+ /*
+ * See Section 5.3.1 of the MMA955xL Software Reference Manual.
+ *
+ * Bit 7: reserved, always 0
+ * Bits 6-4: command
+ * Bits 3-0: upper bits of register offset
+ */
+ u8 cmd_off;
+ u8 lower_off;
+ u8 nbytes;
+ u8 buf[MMA9551_MAX_MAILBOX_DATA_REGS - 1];
+} __packed;
+
+struct mma9551_mbox_response {
+ u8 app_id;
+ /*
+ * See Section 5.3.3 of the MMA955xL Software Reference Manual.
+ *
+ * Bit 7: COCO
+ * Bits 6-0: Error code.
+ */
+ u8 coco_err;
+ u8 nbytes;
+ u8 req_bytes;
+ u8 buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+} __packed;
+
+struct mma9551_version_info {
+ __be32 device_id;
+ u8 rom_version[2];
+ u8 fw_version[2];
+ u8 hw_version[2];
+ u8 fw_build[2];
+};
+
+static int mma9551_transfer(struct i2c_client *client,
+ u8 app_id, u8 command, u16 offset,
+ u8 *inbytes, int num_inbytes,
+ u8 *outbytes, int num_outbytes)
+{
+ struct mma9551_mbox_request req;
+ struct mma9551_mbox_response rsp;
+ struct i2c_msg in, out;
+ u8 req_len, err_code;
+ int ret, retries;
+
+ if (offset >= 1 << 12) {
+ dev_err(&client->dev, "register offset too large\n");
+ return -EINVAL;
+ }
+
+ req_len = 1 + MMA9551_MAILBOX_CTRL_REGS + num_inbytes;
+ req.start_mbox = 0;
+ req.app_id = app_id;
+ req.cmd_off = command | (offset >> 8);
+ req.lower_off = offset;
+
+ if (command == MMA9551_CMD_WRITE_CONFIG)
+ req.nbytes = num_inbytes;
+ else
+ req.nbytes = num_outbytes;
+ if (num_inbytes)
+ memcpy(req.buf, inbytes, num_inbytes);
+
+ out.addr = client->addr;
+ out.flags = 0;
+ out.len = req_len;
+ out.buf = (u8 *)&req;
+
+ ret = i2c_transfer(client->adapter, &out, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c write failed\n");
+ return ret;
+ }
+
+ retries = MMA9551_I2C_READ_RETRIES;
+ do {
+ udelay(MMA9551_I2C_READ_DELAY);
+
+ in.addr = client->addr;
+ in.flags = I2C_M_RD;
+ in.len = sizeof(rsp);
+ in.buf = (u8 *)&rsp;
+
+ ret = i2c_transfer(client->adapter, &in, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c read failed\n");
+ return ret;
+ }
+
+ if (rsp.coco_err & MMA9551_RESPONSE_COCO)
+ break;
+ } while (--retries > 0);
+
+ if (retries == 0) {
+ dev_err(&client->dev,
+ "timed out while waiting for command response\n");
+ return -ETIMEDOUT;
+ }
+
+ if (rsp.app_id != app_id) {
+ dev_err(&client->dev,
+ "app_id mismatch in response got %02x expected %02x\n",
+ rsp.app_id, app_id);
+ return -EINVAL;
+ }
+
+ err_code = rsp.coco_err & ~MMA9551_RESPONSE_COCO;
+ if (err_code != MMA9551_MCI_ERROR_NONE) {
+ dev_err(&client->dev, "read returned error %x\n", err_code);
+ return -EINVAL;
+ }
+
+ if (rsp.nbytes != rsp.req_bytes) {
+ dev_err(&client->dev,
+ "output length mismatch got %d expected %d\n",
+ rsp.nbytes, rsp.req_bytes);
+ return -EINVAL;
+ }
+
+ if (num_outbytes)
+ memcpy(outbytes, rsp.buf, num_outbytes);
+
+ return 0;
+}
+
+/**
+ * mma9551_read_config_byte() - read 1 configuration byte
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @val: Pointer to store value read
+ *
+ * Read one configuration byte from the device using MMA955xL command format.
+ * Commands to the MMA955xL platform consist of a write followed
+ * by one or more reads.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_read_config_byte(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 *val)
+{
+ return mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
+ reg, NULL, 0, val, 1);
+}
+EXPORT_SYMBOL(mma9551_read_config_byte);
+
+/**
+ * mma9551_write_config_byte() - write 1 configuration byte
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @val: Value to write
+ *
+ * Write one configuration byte from the device using MMA955xL command format.
+ * Commands to the MMA955xL platform consist of a write followed by one or
+ * more reads.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_write_config_byte(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 val)
+{
+ return mma9551_transfer(client, app_id, MMA9551_CMD_WRITE_CONFIG, reg,
+ &val, 1, NULL, 0);
+}
+EXPORT_SYMBOL(mma9551_write_config_byte);
+
+/**
+ * mma9551_read_status_byte() - read 1 status byte
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @val: Pointer to store value read
+ *
+ * Read one status byte from the device using MMA955xL command format.
+ * Commands to the MMA955xL platform consist of a write followed by one or
+ * more reads.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_read_status_byte(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 *val)
+{
+ return mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
+ reg, NULL, 0, val, 1);
+}
+EXPORT_SYMBOL(mma9551_read_status_byte);
+
+/**
+ * mma9551_read_config_word() - read 1 config word
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @val: Pointer to store value read
+ *
+ * Read one configuration word from the device using MMA955xL command format.
+ * Commands to the MMA955xL platform consist of a write followed by one or
+ * more reads.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
+ u16 reg, u16 *val)
+{
+ int ret;
+ __be16 v;
+
+ ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
+ reg, NULL, 0, (u8 *)&v, 2);
+ *val = be16_to_cpu(v);
+
+ return ret;
+}
+EXPORT_SYMBOL(mma9551_read_config_word);
+
+/**
+ * mma9551_write_config_word() - write 1 config word
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @val: Value to write
+ *
+ * Write one configuration word from the device using MMA955xL command format.
+ * Commands to the MMA955xL platform consist of a write followed by one or
+ * more reads.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_write_config_word(struct i2c_client *client, u8 app_id,
+ u16 reg, u16 val)
+{
+ __be16 v = cpu_to_be16(val);
+
+ return mma9551_transfer(client, app_id, MMA9551_CMD_WRITE_CONFIG, reg,
+ (u8 *) &v, 2, NULL, 0);
+}
+EXPORT_SYMBOL(mma9551_write_config_word);
+
+/**
+ * mma9551_read_status_word() - read 1 status word
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @val: Pointer to store value read
+ *
+ * Read one status word from the device using MMA955xL command format.
+ * Commands to the MMA955xL platform consist of a write followed by one or
+ * more reads.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
+ u16 reg, u16 *val)
+{
+ int ret;
+ __be16 v;
+
+ ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
+ reg, NULL, 0, (u8 *)&v, 2);
+ *val = be16_to_cpu(v);
+
+ return ret;
+}
+EXPORT_SYMBOL(mma9551_read_status_word);
+
+/**
+ * mma9551_read_config_words() - read multiple config words
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @len: Length of array to read in bytes
+ * @val: Array of words to read
+ *
+ * Read multiple configuration registers (word-sized registers).
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_read_config_words(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 len, u16 *buf)
+{
+ int ret, i;
+ int len_words = len / sizeof(u16);
+ __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+
+ ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
+ reg, NULL, 0, (u8 *) be_buf, len);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < len_words; i++)
+ buf[i] = be16_to_cpu(be_buf[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(mma9551_read_config_words);
+
+/**
+ * mma9551_read_status_words() - read multiple status words
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @len: Length of array to read in bytes
+ * @val: Array of words to read
+ *
+ * Read multiple status registers (word-sized registers).
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_read_status_words(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 len, u16 *buf)
+{
+ int ret, i;
+ int len_words = len / sizeof(u16);
+ __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+
+ ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
+ reg, NULL, 0, (u8 *) be_buf, len);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < len_words; i++)
+ buf[i] = be16_to_cpu(be_buf[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(mma9551_read_status_words);
+
+/**
+ * mma9551_write_config_words() - write multiple config words
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @len: Length of array to write in bytes
+ * @val: Array of words to write
+ *
+ * Write multiple configuration registers (word-sized registers).
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_write_config_words(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 len, u16 *buf)
+{
+ int i;
+ int len_words = len / sizeof(u16);
+ __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+
+ for (i = 0; i < len_words; i++)
+ be_buf[i] = cpu_to_be16(buf[i]);
+
+ return mma9551_transfer(client, app_id, MMA9551_CMD_WRITE_CONFIG,
+ reg, (u8 *) be_buf, len, NULL, 0);
+}
+EXPORT_SYMBOL(mma9551_write_config_words);
+
+/**
+ * mma9551_update_config_bits() - update bits in register
+ * @client: I2C client
+ * @app_id: Application ID
+ * @reg: Application register
+ * @mask: Mask for the bits to update
+ * @val: Value of the bits to update
+ *
+ * Update bits in the given register using a bit mask.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_update_config_bits(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 mask, u8 val)
+{
+ int ret;
+ u8 tmp, orig;
+
+ ret = mma9551_read_config_byte(client, app_id, reg, &orig);
+ if (ret < 0)
+ return ret;
+
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (tmp == orig)
+ return 0;
+
+ return mma9551_write_config_byte(client, app_id, reg, tmp);
+}
+EXPORT_SYMBOL(mma9551_update_config_bits);
+
+/**
+ * mma9551_gpio_config() - configure gpio
+ * @client: I2C client
+ * @pin: GPIO pin to configure
+ * @app_id: Application ID
+ * @bitnum: Bit number of status register being assigned to the GPIO pin.
+ * @polarity: The polarity parameter is described in section 6.2.2, page 66,
+ * of the Software Reference Manual. Basically, polarity=0 means
+ * the interrupt line has the same value as the selected bit,
+ * while polarity=1 means the line is inverted.
+ *
+ * Assign a bit from an application’s status register to a specific GPIO pin.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_gpio_config(struct i2c_client *client, enum mma9551_gpio_pin pin,
+ u8 app_id, u8 bitnum, int polarity)
+{
+ u8 reg, pol_mask, pol_val;
+ int ret;
+
+ if (pin > mma9551_gpio_max) {
+ dev_err(&client->dev, "bad GPIO pin\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Pin 6 is configured by regs 0x00 and 0x01, pin 7 by 0x02 and
+ * 0x03, and so on.
+ */
+ reg = pin * 2;
+
+ ret = mma9551_write_config_byte(client, MMA9551_APPID_GPIO,
+ reg, app_id);
+ if (ret < 0) {
+ dev_err(&client->dev, "error setting GPIO app_id\n");
+ return ret;
+ }
+
+ ret = mma9551_write_config_byte(client, MMA9551_APPID_GPIO,
+ reg + 1, bitnum);
+ if (ret < 0) {
+ dev_err(&client->dev, "error setting GPIO bit number\n");
+ return ret;
+ }
+
+ switch (pin) {
+ case mma9551_gpio6:
+ reg = MMA9551_GPIO_POL_LSB;
+ pol_mask = 1 << 6;
+ break;
+ case mma9551_gpio7:
+ reg = MMA9551_GPIO_POL_LSB;
+ pol_mask = 1 << 7;
+ break;
+ case mma9551_gpio8:
+ reg = MMA9551_GPIO_POL_MSB;
+ pol_mask = 1 << 0;
+ break;
+ case mma9551_gpio9:
+ reg = MMA9551_GPIO_POL_MSB;
+ pol_mask = 1 << 1;
+ break;
+ }
+ pol_val = polarity ? pol_mask : 0;
+
+ ret = mma9551_update_config_bits(client, MMA9551_APPID_GPIO, reg,
+ pol_mask, pol_val);
+ if (ret < 0)
+ dev_err(&client->dev, "error setting GPIO polarity\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(mma9551_gpio_config);
+
+/**
+ * mma9551_read_version() - read device version information
+ * @client: I2C client
+ *
+ * Read version information and print device id and firmware version.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_read_version(struct i2c_client *client)
+{
+ struct mma9551_version_info info;
+ int ret;
+
+ ret = mma9551_transfer(client, MMA9551_APPID_VERSION, 0x00, 0x00,
+ NULL, 0, (u8 *)&info, sizeof(info));
+ if (ret < 0)
+ return ret;
+
+ dev_info(&client->dev, "device ID 0x%x, firmware version %02x.%02x\n",
+ be32_to_cpu(info.device_id), info.fw_version[0],
+ info.fw_version[1]);
+
+ return 0;
+}
+EXPORT_SYMBOL(mma9551_read_version);
+
+/**
+ * mma9551_set_device_state() - sets HW power mode
+ * @client: I2C client
+ * @enable: Use true to power on device, false to cause the device
+ * to enter sleep.
+ *
+ * Set power on/off for device using the Sleep/Wake Application.
+ * When enable is true, power on chip and enable doze mode.
+ * When enable is false, enter sleep mode (device remains in the
+ * lowest-power mode).
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_set_device_state(struct i2c_client *client, bool enable)
+{
+ return mma9551_update_config_bits(client, MMA9551_APPID_SLEEP_WAKE,
+ MMA9551_SLEEP_CFG,
+ MMA9551_SLEEP_CFG_SNCEN |
+ MMA9551_SLEEP_CFG_FLEEN |
+ MMA9551_SLEEP_CFG_SCHEN,
+ enable ? MMA9551_SLEEP_CFG_SCHEN |
+ MMA9551_SLEEP_CFG_FLEEN :
+ MMA9551_SLEEP_CFG_SNCEN);
+}
+EXPORT_SYMBOL(mma9551_set_device_state);
+
+/**
+ * mma9551_set_power_state() - sets runtime PM state
+ * @client: I2C client
+ * @on: Use true to power on device, false to power off
+ *
+ * Resume or suspend the device using Runtime PM.
+ * The device will suspend after the autosuspend delay.
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_set_power_state(struct i2c_client *client, bool on)
+{
+#ifdef CONFIG_PM
+ int ret;
+
+ if (on)
+ ret = pm_runtime_get_sync(&client->dev);
+ else {
+ pm_runtime_mark_last_busy(&client->dev);
+ ret = pm_runtime_put_autosuspend(&client->dev);
+ }
+
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "failed to change power state to %d\n", on);
+ if (on)
+ pm_runtime_put_noidle(&client->dev);
+
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(mma9551_set_power_state);
+
+/**
+ * mma9551_sleep() - sleep
+ * @freq: Application frequency
+ *
+ * Firmware applications run at a certain frequency on the
+ * device. Sleep for one application cycle to make sure the
+ * application had time to run once and initialize set values.
+ */
+void mma9551_sleep(int freq)
+{
+ int sleep_val = 1000 / freq;
+
+ if (sleep_val < 20)
+ usleep_range(sleep_val * 1000, 20000);
+ else
+ msleep_interruptible(sleep_val);
+}
+EXPORT_SYMBOL(mma9551_sleep);
+
+/**
+ * mma9551_read_accel_chan() - read accelerometer channel
+ * @client: I2C client
+ * @chan: IIO channel
+ * @val: Pointer to the accelerometer value read
+ * @val2: Unused
+ *
+ * Read accelerometer value for the specified channel.
+ *
+ * Locking note: This function must be called with the device lock held.
+ * Locking is not handled inside the function. Callers should ensure they
+ * serialize access to the HW.
+ *
+ * Returns: IIO_VAL_INT on success, negative value on failure.
+ */
+int mma9551_read_accel_chan(struct i2c_client *client,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2)
+{
+ u16 reg_addr;
+ s16 raw_accel;
+ int ret;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ reg_addr = MMA9551_AFE_X_ACCEL_REG;
+ break;
+ case IIO_MOD_Y:
+ reg_addr = MMA9551_AFE_Y_ACCEL_REG;
+ break;
+ case IIO_MOD_Z:
+ reg_addr = MMA9551_AFE_Z_ACCEL_REG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mma9551_set_power_state(client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = mma9551_read_status_word(client, MMA9551_APPID_AFE,
+ reg_addr, &raw_accel);
+ if (ret < 0)
+ goto out_poweroff;
+
+ *val = raw_accel;
+
+ ret = IIO_VAL_INT;
+
+out_poweroff:
+ mma9551_set_power_state(client, false);
+ return ret;
+}
+EXPORT_SYMBOL(mma9551_read_accel_chan);
+
+/**
+ * mma9551_read_accel_scale() - read accelerometer scale
+ * @val: Pointer to the accelerometer scale (int value)
+ * @val2: Pointer to the accelerometer scale (micro value)
+ *
+ * Read accelerometer scale.
+ *
+ * Returns: IIO_VAL_INT_PLUS_MICRO.
+ */
+int mma9551_read_accel_scale(int *val, int *val2)
+{
+ *val = 0;
+ *val2 = 2440;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+EXPORT_SYMBOL(mma9551_read_accel_scale);
+
+/**
+ * mma9551_app_reset() - reset application
+ * @client: I2C client
+ * @app_mask: Application to reset
+ *
+ * Reset the given application (using the Reset/Suspend/Clear
+ * Control Application)
+ *
+ * Returns: 0 on success, negative value on failure.
+ */
+int mma9551_app_reset(struct i2c_client *client, u32 app_mask)
+{
+ return mma9551_write_config_byte(client, MMA9551_APPID_RCS,
+ MMA9551_RSC_RESET +
+ MMA9551_RSC_OFFSET(app_mask),
+ MMA9551_RSC_VAL(app_mask));
+}
+EXPORT_SYMBOL(mma9551_app_reset);
+
+MODULE_AUTHOR("Irina Tirdea <irina.tirdea@intel.com>");
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMA955xL sensors core");
diff --git a/drivers/iio/accel/mma9551_core.h b/drivers/iio/accel/mma9551_core.h
new file mode 100644
index 000000000000..edaa56b1078e
--- /dev/null
+++ b/drivers/iio/accel/mma9551_core.h
@@ -0,0 +1,81 @@
+/*
+ * Common code for Freescale MMA955x Intelligent Sensor Platform drivers
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _MMA9551_CORE_H_
+#define _MMA9551_CORE_H_
+
+/* Applications IDs */
+#define MMA9551_APPID_VERSION 0x00
+#define MMA9551_APPID_GPIO 0x03
+#define MMA9551_APPID_AFE 0x06
+#define MMA9551_APPID_TILT 0x0B
+#define MMA9551_APPID_SLEEP_WAKE 0x12
+#define MMA9551_APPID_PEDOMETER 0x15
+#define MMA9551_APPID_RCS 0x17
+#define MMA9551_APPID_NONE 0xff
+
+/* Reset/Suspend/Clear application app masks */
+#define MMA9551_RSC_PED BIT(21)
+
+#define MMA9551_AUTO_SUSPEND_DELAY_MS 2000
+
+enum mma9551_gpio_pin {
+ mma9551_gpio6 = 0,
+ mma9551_gpio7,
+ mma9551_gpio8,
+ mma9551_gpio9,
+ mma9551_gpio_max = mma9551_gpio9,
+};
+
+#define MMA9551_ACCEL_CHANNEL(axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+int mma9551_read_config_byte(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 *val);
+int mma9551_write_config_byte(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 val);
+int mma9551_read_status_byte(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 *val);
+int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
+ u16 reg, u16 *val);
+int mma9551_write_config_word(struct i2c_client *client, u8 app_id,
+ u16 reg, u16 val);
+int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
+ u16 reg, u16 *val);
+int mma9551_read_config_words(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 len, u16 *buf);
+int mma9551_read_status_words(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 len, u16 *buf);
+int mma9551_write_config_words(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 len, u16 *buf);
+int mma9551_update_config_bits(struct i2c_client *client, u8 app_id,
+ u16 reg, u8 mask, u8 val);
+int mma9551_gpio_config(struct i2c_client *client, enum mma9551_gpio_pin pin,
+ u8 app_id, u8 bitnum, int polarity);
+int mma9551_read_version(struct i2c_client *client);
+int mma9551_set_device_state(struct i2c_client *client, bool enable);
+int mma9551_set_power_state(struct i2c_client *client, bool on);
+void mma9551_sleep(int freq);
+int mma9551_read_accel_chan(struct i2c_client *client,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2);
+int mma9551_read_accel_scale(int *val, int *val2);
+int mma9551_app_reset(struct i2c_client *client, u32 app_mask);
+
+#endif /* _MMA9551_CORE_H_ */
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
new file mode 100644
index 000000000000..d23ebf192f63
--- /dev/null
+++ b/drivers/iio/accel/mma9553.c
@@ -0,0 +1,1334 @@
+/*
+ * Freescale MMA9553L Intelligent Pedometer driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/pm_runtime.h>
+#include "mma9551_core.h"
+
+#define MMA9553_DRV_NAME "mma9553"
+#define MMA9553_IRQ_NAME "mma9553_event"
+#define MMA9553_GPIO_NAME "mma9553_int"
+
+/* Pedometer configuration registers (R/W) */
+#define MMA9553_REG_CONF_SLEEPMIN 0x00
+#define MMA9553_REG_CONF_SLEEPMAX 0x02
+#define MMA9553_REG_CONF_SLEEPTHD 0x04
+#define MMA9553_MASK_CONF_WORD GENMASK(15, 0)
+
+#define MMA9553_REG_CONF_CONF_STEPLEN 0x06
+#define MMA9553_MASK_CONF_CONFIG BIT(15)
+#define MMA9553_MASK_CONF_ACT_DBCNTM BIT(14)
+#define MMA9553_MASK_CONF_SLP_DBCNTM BIT(13)
+#define MMA9553_MASK_CONF_STEPLEN GENMASK(7, 0)
+
+#define MMA9553_REG_CONF_HEIGHT_WEIGHT 0x08
+#define MMA9553_MASK_CONF_HEIGHT GENMASK(15, 8)
+#define MMA9553_MASK_CONF_WEIGHT GENMASK(7, 0)
+
+#define MMA9553_REG_CONF_FILTER 0x0A
+#define MMA9553_MASK_CONF_FILTSTEP GENMASK(15, 8)
+#define MMA9553_MASK_CONF_MALE BIT(7)
+#define MMA9553_MASK_CONF_FILTTIME GENMASK(6, 0)
+
+#define MMA9553_REG_CONF_SPEED_STEP 0x0C
+#define MMA9553_MASK_CONF_SPDPRD GENMASK(15, 8)
+#define MMA9553_MASK_CONF_STEPCOALESCE GENMASK(7, 0)
+
+#define MMA9553_REG_CONF_ACTTHD 0x0E
+
+/* Pedometer status registers (R-only) */
+#define MMA9553_REG_STATUS 0x00
+#define MMA9553_MASK_STATUS_MRGFL BIT(15)
+#define MMA9553_MASK_STATUS_SUSPCHG BIT(14)
+#define MMA9553_MASK_STATUS_STEPCHG BIT(13)
+#define MMA9553_MASK_STATUS_ACTCHG BIT(12)
+#define MMA9553_MASK_STATUS_SUSP BIT(11)
+#define MMA9553_MASK_STATUS_ACTIVITY (BIT(10) | BIT(9) | BIT(8))
+#define MMA9553_MASK_STATUS_VERSION 0x00FF
+
+#define MMA9553_REG_STEPCNT 0x02
+#define MMA9553_REG_DISTANCE 0x04
+#define MMA9553_REG_SPEED 0x06
+#define MMA9553_REG_CALORIES 0x08
+#define MMA9553_REG_SLEEPCNT 0x0A
+
+/* Pedometer events are always mapped to this pin. */
+#define MMA9553_DEFAULT_GPIO_PIN mma9551_gpio6
+#define MMA9553_DEFAULT_GPIO_POLARITY 0
+
+/* Bitnum used for gpio configuration = bit number in high status byte */
+#define STATUS_TO_BITNUM(bit) (ffs(bit) - 9)
+
+#define MMA9553_DEFAULT_SAMPLE_RATE 30 /* Hz */
+
+/*
+ * The internal activity level must be stable for ACTTHD samples before
+ * ACTIVITY is updated.The ACTIVITY variable contains the current activity
+ * level and is updated every time a step is detected or once a second
+ * if there are no steps.
+ */
+#define MMA9553_ACTIVITY_THD_TO_SEC(thd) ((thd) / MMA9553_DEFAULT_SAMPLE_RATE)
+#define MMA9553_ACTIVITY_SEC_TO_THD(sec) ((sec) * MMA9553_DEFAULT_SAMPLE_RATE)
+
+/*
+ * Autonomously suspend pedometer if acceleration vector magnitude
+ * is near 1g (4096 at 0.244 mg/LSB resolution) for 30 seconds.
+ */
+#define MMA9553_DEFAULT_SLEEPMIN 3688 /* 0,9 g */
+#define MMA9553_DEFAULT_SLEEPMAX 4508 /* 1,1 g */
+#define MMA9553_DEFAULT_SLEEPTHD (MMA9553_DEFAULT_SAMPLE_RATE * 30)
+
+#define MMA9553_CONFIG_RETRIES 2
+
+/* Status register - activity field */
+enum activity_level {
+ ACTIVITY_UNKNOWN,
+ ACTIVITY_REST,
+ ACTIVITY_WALKING,
+ ACTIVITY_JOGGING,
+ ACTIVITY_RUNNING,
+};
+
+static struct mma9553_event_info {
+ enum iio_chan_type type;
+ enum iio_modifier mod;
+ enum iio_event_direction dir;
+} mma9553_events_info[] = {
+ {
+ .type = IIO_STEPS,
+ .mod = IIO_NO_MOD,
+ .dir = IIO_EV_DIR_NONE,
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .mod = IIO_MOD_STILL,
+ .dir = IIO_EV_DIR_RISING,
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .mod = IIO_MOD_STILL,
+ .dir = IIO_EV_DIR_FALLING,
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .mod = IIO_MOD_WALKING,
+ .dir = IIO_EV_DIR_RISING,
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .mod = IIO_MOD_WALKING,
+ .dir = IIO_EV_DIR_FALLING,
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .mod = IIO_MOD_JOGGING,
+ .dir = IIO_EV_DIR_RISING,
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .mod = IIO_MOD_JOGGING,
+ .dir = IIO_EV_DIR_FALLING,
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .mod = IIO_MOD_RUNNING,
+ .dir = IIO_EV_DIR_RISING,
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .mod = IIO_MOD_RUNNING,
+ .dir = IIO_EV_DIR_FALLING,
+ },
+};
+
+#define MMA9553_EVENTS_INFO_SIZE ARRAY_SIZE(mma9553_events_info)
+
+struct mma9553_event {
+ struct mma9553_event_info *info;
+ bool enabled;
+};
+
+struct mma9553_conf_regs {
+ u16 sleepmin;
+ u16 sleepmax;
+ u16 sleepthd;
+ u16 config;
+ u16 height_weight;
+ u16 filter;
+ u16 speed_step;
+ u16 actthd;
+} __packed;
+
+struct mma9553_data {
+ struct i2c_client *client;
+ struct mutex mutex;
+ struct mma9553_conf_regs conf;
+ struct mma9553_event events[MMA9553_EVENTS_INFO_SIZE];
+ int num_events;
+ u8 gpio_bitnum;
+ /*
+ * This is used for all features that depend on step count:
+ * step count, distance, speed, calories.
+ */
+ bool stepcnt_enabled;
+ u16 stepcnt;
+ u8 activity;
+ s64 timestamp;
+};
+
+static u8 mma9553_get_bits(u16 val, u16 mask)
+{
+ return (val & mask) >> (ffs(mask) - 1);
+}
+
+static u16 mma9553_set_bits(u16 current_val, u16 val, u16 mask)
+{
+ return (current_val & ~mask) | (val << (ffs(mask) - 1));
+}
+
+static enum iio_modifier mma9553_activity_to_mod(enum activity_level activity)
+{
+ switch (activity) {
+ case ACTIVITY_RUNNING:
+ return IIO_MOD_RUNNING;
+ case ACTIVITY_JOGGING:
+ return IIO_MOD_JOGGING;
+ case ACTIVITY_WALKING:
+ return IIO_MOD_WALKING;
+ case ACTIVITY_REST:
+ return IIO_MOD_STILL;
+ case ACTIVITY_UNKNOWN:
+ default:
+ return IIO_NO_MOD;
+ }
+}
+
+static void mma9553_init_events(struct mma9553_data *data)
+{
+ int i;
+
+ data->num_events = MMA9553_EVENTS_INFO_SIZE;
+ for (i = 0; i < data->num_events; i++) {
+ data->events[i].info = &mma9553_events_info[i];
+ data->events[i].enabled = false;
+ }
+}
+
+static struct mma9553_event *mma9553_get_event(struct mma9553_data *data,
+ enum iio_chan_type type,
+ enum iio_modifier mod,
+ enum iio_event_direction dir)
+{
+ int i;
+
+ for (i = 0; i < data->num_events; i++)
+ if (data->events[i].info->type == type &&
+ data->events[i].info->mod == mod &&
+ data->events[i].info->dir == dir)
+ return &data->events[i];
+
+ return NULL;
+}
+
+static bool mma9553_is_any_event_enabled(struct mma9553_data *data,
+ bool check_type,
+ enum iio_chan_type type)
+{
+ int i;
+
+ for (i = 0; i < data->num_events; i++)
+ if ((check_type && data->events[i].info->type == type &&
+ data->events[i].enabled) ||
+ (!check_type && data->events[i].enabled))
+ return true;
+
+ return false;
+}
+
+static int mma9553_set_config(struct mma9553_data *data, u16 reg,
+ u16 *p_reg_val, u16 val, u16 mask)
+{
+ int ret, retries;
+ u16 reg_val, config;
+
+ reg_val = *p_reg_val;
+ if (val == mma9553_get_bits(reg_val, mask))
+ return 0;
+
+ reg_val = mma9553_set_bits(reg_val, val, mask);
+ ret = mma9551_write_config_word(data->client, MMA9551_APPID_PEDOMETER,
+ reg, reg_val);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "error writing config register 0x%x\n", reg);
+ return ret;
+ }
+
+ *p_reg_val = reg_val;
+
+ /* Reinitializes the pedometer with current configuration values */
+ config = mma9553_set_bits(data->conf.config, 1,
+ MMA9553_MASK_CONF_CONFIG);
+
+ ret = mma9551_write_config_word(data->client, MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_CONF_CONF_STEPLEN, config);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "error writing config register 0x%x\n",
+ MMA9553_REG_CONF_CONF_STEPLEN);
+ return ret;
+ }
+
+ retries = MMA9553_CONFIG_RETRIES;
+ do {
+ mma9551_sleep(MMA9553_DEFAULT_SAMPLE_RATE);
+ ret = mma9551_read_config_word(data->client,
+ MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_CONF_CONF_STEPLEN,
+ &config);
+ if (ret < 0)
+ return ret;
+ } while (mma9553_get_bits(config, MMA9553_MASK_CONF_CONFIG) &&
+ --retries > 0);
+
+ return 0;
+}
+
+static int mma9553_read_activity_stepcnt(struct mma9553_data *data,
+ u8 *activity, u16 *stepcnt)
+{
+ u32 status_stepcnt;
+ u16 status;
+ int ret;
+
+ ret = mma9551_read_status_words(data->client, MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_STATUS, sizeof(u32),
+ (u16 *) &status_stepcnt);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "error reading status and stepcnt\n");
+ return ret;
+ }
+
+ status = status_stepcnt & MMA9553_MASK_CONF_WORD;
+ *activity = mma9553_get_bits(status, MMA9553_MASK_STATUS_ACTIVITY);
+ *stepcnt = status_stepcnt >> 16;
+
+ return 0;
+}
+
+static int mma9553_conf_gpio(struct mma9553_data *data)
+{
+ u8 bitnum = 0, appid = MMA9551_APPID_PEDOMETER;
+ int ret;
+ struct mma9553_event *ev_step_detect;
+ bool activity_enabled;
+
+ activity_enabled =
+ mma9553_is_any_event_enabled(data, true, IIO_ACTIVITY);
+ ev_step_detect =
+ mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD, IIO_EV_DIR_NONE);
+
+ /*
+ * If both step detector and activity are enabled, use the MRGFL bit.
+ * This bit is the logical OR of the SUSPCHG, STEPCHG, and ACTCHG flags.
+ */
+ if (activity_enabled && ev_step_detect->enabled)
+ bitnum = STATUS_TO_BITNUM(MMA9553_MASK_STATUS_MRGFL);
+ else if (ev_step_detect->enabled)
+ bitnum = STATUS_TO_BITNUM(MMA9553_MASK_STATUS_STEPCHG);
+ else if (activity_enabled)
+ bitnum = STATUS_TO_BITNUM(MMA9553_MASK_STATUS_ACTCHG);
+ else /* Reset */
+ appid = MMA9551_APPID_NONE;
+
+ if (data->gpio_bitnum == bitnum)
+ return 0;
+
+ /* Save initial values for activity and stepcnt */
+ if (activity_enabled || ev_step_detect->enabled)
+ mma9553_read_activity_stepcnt(data, &data->activity,
+ &data->stepcnt);
+
+ ret = mma9551_gpio_config(data->client,
+ MMA9553_DEFAULT_GPIO_PIN,
+ appid, bitnum, MMA9553_DEFAULT_GPIO_POLARITY);
+ if (ret < 0)
+ return ret;
+ data->gpio_bitnum = bitnum;
+
+ return 0;
+}
+
+static int mma9553_init(struct mma9553_data *data)
+{
+ int ret;
+
+ ret = mma9551_read_version(data->client);
+ if (ret)
+ return ret;
+
+ /*
+ * Read all the pedometer configuration registers. This is used as
+ * a device identification command to differentiate the MMA9553L
+ * from the MMA9550L.
+ */
+ ret =
+ mma9551_read_config_words(data->client, MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_CONF_SLEEPMIN,
+ sizeof(data->conf), (u16 *) &data->conf);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "device is not MMA9553L: failed to read cfg regs\n");
+ return ret;
+ }
+
+
+ /* Reset gpio */
+ data->gpio_bitnum = -1;
+ ret = mma9553_conf_gpio(data);
+ if (ret < 0)
+ return ret;
+
+ ret = mma9551_app_reset(data->client, MMA9551_RSC_PED);
+ if (ret < 0)
+ return ret;
+
+ /* Init config registers */
+ data->conf.sleepmin = MMA9553_DEFAULT_SLEEPMIN;
+ data->conf.sleepmax = MMA9553_DEFAULT_SLEEPMAX;
+ data->conf.sleepthd = MMA9553_DEFAULT_SLEEPTHD;
+ data->conf.config =
+ mma9553_set_bits(data->conf.config, 1, MMA9553_MASK_CONF_CONFIG);
+ /*
+ * Clear the activity debounce counter when the activity level changes,
+ * so that the confidence level applies for any activity level.
+ */
+ data->conf.config = mma9553_set_bits(data->conf.config, 1,
+ MMA9553_MASK_CONF_ACT_DBCNTM);
+ ret =
+ mma9551_write_config_words(data->client, MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_CONF_SLEEPMIN,
+ sizeof(data->conf), (u16 *) &data->conf);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to write configuration registers\n");
+ return ret;
+ }
+
+ return mma9551_set_device_state(data->client, true);
+}
+
+static int mma9553_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mma9553_data *data = iio_priv(indio_dev);
+ int ret;
+ u16 tmp;
+ u8 activity;
+ bool powered_on;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_STEPS:
+ /*
+ * The HW only counts steps and other dependent
+ * parameters (speed, distance, calories, activity)
+ * if power is on (from enabling an event or the
+ * step counter */
+ powered_on =
+ mma9553_is_any_event_enabled(data, false, 0) ||
+ data->stepcnt_enabled;
+ if (!powered_on) {
+ dev_err(&data->client->dev,
+ "No channels enabled\n");
+ return -EINVAL;
+ }
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_status_word(data->client,
+ MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_STEPCNT,
+ &tmp);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+ *val = tmp;
+ return IIO_VAL_INT;
+ case IIO_DISTANCE:
+ powered_on =
+ mma9553_is_any_event_enabled(data, false, 0) ||
+ data->stepcnt_enabled;
+ if (!powered_on) {
+ dev_err(&data->client->dev,
+ "No channels enabled\n");
+ return -EINVAL;
+ }
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_status_word(data->client,
+ MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_DISTANCE,
+ &tmp);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+ *val = tmp;
+ return IIO_VAL_INT;
+ case IIO_ACTIVITY:
+ powered_on =
+ mma9553_is_any_event_enabled(data, false, 0) ||
+ data->stepcnt_enabled;
+ if (!powered_on) {
+ dev_err(&data->client->dev,
+ "No channels enabled\n");
+ return -EINVAL;
+ }
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_status_word(data->client,
+ MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_STATUS,
+ &tmp);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+
+ activity =
+ mma9553_get_bits(tmp, MMA9553_MASK_STATUS_ACTIVITY);
+
+ /*
+ * The device does not support confidence value levels,
+ * so we will always have 100% for current activity and
+ * 0% for the others.
+ */
+ if (chan->channel2 == mma9553_activity_to_mod(activity))
+ *val = 100;
+ else
+ *val = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_VELOCITY: /* m/h */
+ if (chan->channel2 != IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z)
+ return -EINVAL;
+ powered_on =
+ mma9553_is_any_event_enabled(data, false, 0) ||
+ data->stepcnt_enabled;
+ if (!powered_on) {
+ dev_err(&data->client->dev,
+ "No channels enabled\n");
+ return -EINVAL;
+ }
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_status_word(data->client,
+ MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_SPEED, &tmp);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+ *val = tmp;
+ return IIO_VAL_INT;
+ case IIO_ENERGY: /* Cal or kcal */
+ powered_on =
+ mma9553_is_any_event_enabled(data, false, 0) ||
+ data->stepcnt_enabled;
+ if (!powered_on) {
+ dev_err(&data->client->dev,
+ "No channels enabled\n");
+ return -EINVAL;
+ }
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_status_word(data->client,
+ MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_CALORIES,
+ &tmp);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+ *val = tmp;
+ return IIO_VAL_INT;
+ case IIO_ACCEL:
+ mutex_lock(&data->mutex);
+ ret = mma9551_read_accel_chan(data->client,
+ chan, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VELOCITY: /* m/h to m/s */
+ if (chan->channel2 != IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z)
+ return -EINVAL;
+ *val = 0;
+ *val2 = 277; /* 0.000277 */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ENERGY: /* Cal or kcal to J */
+ *val = 4184;
+ return IIO_VAL_INT;
+ case IIO_ACCEL:
+ return mma9551_read_accel_scale(val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_ENABLE:
+ *val = data->stepcnt_enabled;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBHEIGHT:
+ tmp = mma9553_get_bits(data->conf.height_weight,
+ MMA9553_MASK_CONF_HEIGHT);
+ *val = tmp / 100; /* cm to m */
+ *val2 = (tmp % 100) * 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBWEIGHT:
+ *val = mma9553_get_bits(data->conf.height_weight,
+ MMA9553_MASK_CONF_WEIGHT);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_DEBOUNCE_COUNT:
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = mma9553_get_bits(data->conf.filter,
+ MMA9553_MASK_CONF_FILTSTEP);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_DEBOUNCE_TIME:
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = mma9553_get_bits(data->conf.filter,
+ MMA9553_MASK_CONF_FILTTIME);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_VELOCITY:
+ if (chan->channel2 != IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z)
+ return -EINVAL;
+ *val = mma9553_get_bits(data->conf.speed_step,
+ MMA9553_MASK_CONF_SPDPRD);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma9553_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct mma9553_data *data = iio_priv(indio_dev);
+ int ret, tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_ENABLE:
+ if (data->stepcnt_enabled == !!val)
+ return 0;
+ mutex_lock(&data->mutex);
+ ret = mma9551_set_power_state(data->client, val);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ data->stepcnt_enabled = val;
+ mutex_unlock(&data->mutex);
+ return 0;
+ case IIO_CHAN_INFO_CALIBHEIGHT:
+ /* m to cm */
+ tmp = val * 100 + val2 / 10000;
+ if (tmp < 0 || tmp > 255)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = mma9553_set_config(data,
+ MMA9553_REG_CONF_HEIGHT_WEIGHT,
+ &data->conf.height_weight,
+ tmp, MMA9553_MASK_CONF_HEIGHT);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_CALIBWEIGHT:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = mma9553_set_config(data,
+ MMA9553_REG_CONF_HEIGHT_WEIGHT,
+ &data->conf.height_weight,
+ val, MMA9553_MASK_CONF_WEIGHT);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_DEBOUNCE_COUNT:
+ switch (chan->type) {
+ case IIO_STEPS:
+ /*
+ * Set to 0 to disable step filtering. If the value
+ * specified is greater than 6, then 6 will be used.
+ */
+ if (val < 0)
+ return -EINVAL;
+ if (val > 6)
+ val = 6;
+ mutex_lock(&data->mutex);
+ ret = mma9553_set_config(data, MMA9553_REG_CONF_FILTER,
+ &data->conf.filter, val,
+ MMA9553_MASK_CONF_FILTSTEP);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_DEBOUNCE_TIME:
+ switch (chan->type) {
+ case IIO_STEPS:
+ if (val < 0 || val > 127)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = mma9553_set_config(data, MMA9553_REG_CONF_FILTER,
+ &data->conf.filter, val,
+ MMA9553_MASK_CONF_FILTTIME);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_VELOCITY:
+ if (chan->channel2 != IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z)
+ return -EINVAL;
+ /*
+ * If set to a value greater than 5, then 5 will be
+ * used. Warning: Do not set SPDPRD to 0 or 1 as
+ * this may cause undesirable behavior.
+ */
+ if (val < 2)
+ return -EINVAL;
+ if (val > 5)
+ val = 5;
+ mutex_lock(&data->mutex);
+ ret = mma9553_set_config(data,
+ MMA9553_REG_CONF_SPEED_STEP,
+ &data->conf.speed_step, val,
+ MMA9553_MASK_CONF_SPDPRD);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma9553_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+
+ struct mma9553_data *data = iio_priv(indio_dev);
+ struct mma9553_event *event;
+
+ event = mma9553_get_event(data, chan->type, chan->channel2, dir);
+ if (!event)
+ return -EINVAL;
+
+ return event->enabled;
+}
+
+static int mma9553_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct mma9553_data *data = iio_priv(indio_dev);
+ struct mma9553_event *event;
+ int ret;
+
+ event = mma9553_get_event(data, chan->type, chan->channel2, dir);
+ if (!event)
+ return -EINVAL;
+
+ if (event->enabled == state)
+ return 0;
+
+ mutex_lock(&data->mutex);
+
+ ret = mma9551_set_power_state(data->client, state);
+ if (ret < 0)
+ goto err_out;
+ event->enabled = state;
+
+ ret = mma9553_conf_gpio(data);
+ if (ret < 0)
+ goto err_conf_gpio;
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+
+err_conf_gpio:
+ if (state) {
+ event->enabled = false;
+ mma9551_set_power_state(data->client, false);
+ }
+err_out:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int mma9553_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct mma9553_data *data = iio_priv(indio_dev);
+
+ *val2 = 0;
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = mma9553_get_bits(data->conf.speed_step,
+ MMA9553_MASK_CONF_STEPCOALESCE);
+ return IIO_VAL_INT;
+ case IIO_ACTIVITY:
+ /*
+ * The device does not support confidence value levels.
+ * We set an average of 50%.
+ */
+ *val = 50;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (chan->type) {
+ case IIO_ACTIVITY:
+ *val = MMA9553_ACTIVITY_THD_TO_SEC(data->conf.actthd);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma9553_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct mma9553_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (chan->type) {
+ case IIO_STEPS:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = mma9553_set_config(data,
+ MMA9553_REG_CONF_SPEED_STEP,
+ &data->conf.speed_step, val,
+ MMA9553_MASK_CONF_STEPCOALESCE);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (chan->type) {
+ case IIO_ACTIVITY:
+ mutex_lock(&data->mutex);
+ ret = mma9553_set_config(data, MMA9553_REG_CONF_ACTTHD,
+ &data->conf.actthd,
+ MMA9553_ACTIVITY_SEC_TO_THD
+ (val), MMA9553_MASK_CONF_WORD);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma9553_get_calibgender_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mma9553_data *data = iio_priv(indio_dev);
+ u8 gender;
+
+ gender = mma9553_get_bits(data->conf.filter, MMA9553_MASK_CONF_MALE);
+ /*
+ * HW expects 0 for female and 1 for male,
+ * while iio index is 0 for male and 1 for female
+ */
+ return !gender;
+}
+
+static int mma9553_set_calibgender_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct mma9553_data *data = iio_priv(indio_dev);
+ u8 gender = !mode;
+ int ret;
+
+ if ((mode != 0) && (mode != 1))
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = mma9553_set_config(data, MMA9553_REG_CONF_FILTER,
+ &data->conf.filter, gender,
+ MMA9553_MASK_CONF_MALE);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_event_spec mma9553_step_event = {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_NONE,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
+};
+
+static const struct iio_event_spec mma9553_activity_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+};
+
+static const char * const calibgender_modes[] = { "male", "female" };
+
+static const struct iio_enum mma9553_calibgender_enum = {
+ .items = calibgender_modes,
+ .num_items = ARRAY_SIZE(calibgender_modes),
+ .get = mma9553_get_calibgender_mode,
+ .set = mma9553_set_calibgender_mode,
+};
+
+static const struct iio_chan_spec_ext_info mma9553_ext_info[] = {
+ IIO_ENUM("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum),
+ IIO_ENUM_AVAILABLE("calibgender", &mma9553_calibgender_enum),
+ {},
+};
+
+#define MMA9553_PEDOMETER_CHANNEL(_type, _mask) { \
+ .type = _type, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_ENABLE) | \
+ BIT(IIO_CHAN_INFO_CALIBHEIGHT) | \
+ _mask, \
+ .ext_info = mma9553_ext_info, \
+}
+
+#define MMA9553_ACTIVITY_CHANNEL(_chan2) { \
+ .type = IIO_ACTIVITY, \
+ .modified = 1, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBHEIGHT), \
+ .event_spec = mma9553_activity_events, \
+ .num_event_specs = ARRAY_SIZE(mma9553_activity_events), \
+ .ext_info = mma9553_ext_info, \
+}
+
+static const struct iio_chan_spec mma9553_channels[] = {
+ MMA9551_ACCEL_CHANNEL(IIO_MOD_X),
+ MMA9551_ACCEL_CHANNEL(IIO_MOD_Y),
+ MMA9551_ACCEL_CHANNEL(IIO_MOD_Z),
+
+ {
+ .type = IIO_STEPS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_DEBOUNCE_COUNT) |
+ BIT(IIO_CHAN_INFO_DEBOUNCE_TIME),
+ .event_spec = &mma9553_step_event,
+ .num_event_specs = 1,
+ },
+
+ MMA9553_PEDOMETER_CHANNEL(IIO_DISTANCE, BIT(IIO_CHAN_INFO_PROCESSED)),
+ {
+ .type = IIO_VELOCITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_ENABLE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBHEIGHT),
+ .ext_info = mma9553_ext_info,
+ },
+ MMA9553_PEDOMETER_CHANNEL(IIO_ENERGY, BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBWEIGHT)),
+
+ MMA9553_ACTIVITY_CHANNEL(IIO_MOD_RUNNING),
+ MMA9553_ACTIVITY_CHANNEL(IIO_MOD_JOGGING),
+ MMA9553_ACTIVITY_CHANNEL(IIO_MOD_WALKING),
+ MMA9553_ACTIVITY_CHANNEL(IIO_MOD_STILL),
+};
+
+static const struct iio_info mma9553_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mma9553_read_raw,
+ .write_raw = mma9553_write_raw,
+ .read_event_config = mma9553_read_event_config,
+ .write_event_config = mma9553_write_event_config,
+ .read_event_value = mma9553_read_event_value,
+ .write_event_value = mma9553_write_event_value,
+};
+
+static irqreturn_t mma9553_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct mma9553_data *data = iio_priv(indio_dev);
+
+ data->timestamp = iio_get_time_ns();
+ /*
+ * Since we only configure the interrupt pin when an
+ * event is enabled, we are sure we have at least
+ * one event enabled at this point.
+ */
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mma9553_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct mma9553_data *data = iio_priv(indio_dev);
+ u16 stepcnt;
+ u8 activity;
+ struct mma9553_event *ev_activity, *ev_prev_activity, *ev_step_detect;
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = mma9553_read_activity_stepcnt(data, &activity, &stepcnt);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return IRQ_HANDLED;
+ }
+
+ ev_prev_activity =
+ mma9553_get_event(data, IIO_ACTIVITY,
+ mma9553_activity_to_mod(data->activity),
+ IIO_EV_DIR_FALLING);
+ ev_activity =
+ mma9553_get_event(data, IIO_ACTIVITY,
+ mma9553_activity_to_mod(activity),
+ IIO_EV_DIR_RISING);
+ ev_step_detect =
+ mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD, IIO_EV_DIR_NONE);
+
+ if (ev_step_detect->enabled && (stepcnt != data->stepcnt)) {
+ data->stepcnt = stepcnt;
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
+ IIO_EV_DIR_NONE, IIO_EV_TYPE_CHANGE, 0, 0, 0),
+ data->timestamp);
+ }
+
+ if (activity != data->activity) {
+ data->activity = activity;
+ /* ev_activity can be NULL if activity == ACTIVITY_UNKNOWN */
+ if (ev_prev_activity && ev_prev_activity->enabled)
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_ACTIVITY, 0,
+ ev_prev_activity->info->mod,
+ IIO_EV_DIR_FALLING,
+ IIO_EV_TYPE_THRESH, 0, 0, 0),
+ data->timestamp);
+
+ if (ev_activity && ev_activity->enabled)
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_ACTIVITY, 0,
+ ev_activity->info->mod,
+ IIO_EV_DIR_RISING,
+ IIO_EV_TYPE_THRESH, 0, 0, 0),
+ data->timestamp);
+ }
+ mutex_unlock(&data->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int mma9553_gpio_probe(struct i2c_client *client)
+{
+ struct device *dev;
+ struct gpio_desc *gpio;
+ int ret;
+
+ if (!client)
+ return -EINVAL;
+
+ dev = &client->dev;
+
+ /* data ready gpio interrupt pin */
+ gpio = devm_gpiod_get_index(dev, MMA9553_GPIO_NAME, 0);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "acpi gpio get index failed\n");
+ return PTR_ERR(gpio);
+ }
+
+ ret = gpiod_direction_input(gpio);
+ if (ret)
+ return ret;
+
+ ret = gpiod_to_irq(gpio);
+
+ dev_dbg(dev, "gpio resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+
+ return ret;
+}
+
+static const char *mma9553_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ return dev_name(dev);
+}
+
+static int mma9553_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mma9553_data *data;
+ struct iio_dev *indio_dev;
+ const char *name = NULL;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ if (id)
+ name = id->name;
+ else if (ACPI_HANDLE(&client->dev))
+ name = mma9553_match_acpi_device(&client->dev);
+ else
+ return -ENOSYS;
+
+ mutex_init(&data->mutex);
+ mma9553_init_events(data);
+
+ ret = mma9553_init(data);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = mma9553_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mma9553_channels);
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mma9553_info;
+
+ if (client->irq < 0)
+ client->irq = mma9553_gpio_probe(client);
+
+ if (client->irq >= 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ mma9553_irq_handler,
+ mma9553_event_handler,
+ IRQF_TRIGGER_RISING,
+ MMA9553_IRQ_NAME, indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "request irq %d failed\n",
+ client->irq);
+ goto out_poweroff;
+ }
+
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to register iio device\n");
+ goto out_poweroff;
+ }
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret < 0)
+ goto out_iio_unregister;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ MMA9551_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
+
+ return 0;
+
+out_iio_unregister:
+ iio_device_unregister(indio_dev);
+out_poweroff:
+ mma9551_set_device_state(client, false);
+ return ret;
+}
+
+static int mma9553_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mma9553_data *data = iio_priv(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_device_unregister(indio_dev);
+ mutex_lock(&data->mutex);
+ mma9551_set_device_state(data->client, false);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mma9553_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma9553_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = mma9551_set_device_state(data->client, false);
+ mutex_unlock(&data->mutex);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "powering off device failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int mma9553_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma9553_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = mma9551_set_device_state(data->client, true);
+ if (ret < 0)
+ return ret;
+
+ mma9551_sleep(MMA9553_DEFAULT_SAMPLE_RATE);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int mma9553_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma9553_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = mma9551_set_device_state(data->client, false);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int mma9553_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma9553_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = mma9551_set_device_state(data->client, true);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops mma9553_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mma9553_suspend, mma9553_resume)
+ SET_RUNTIME_PM_OPS(mma9553_runtime_suspend,
+ mma9553_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id mma9553_acpi_match[] = {
+ {"MMA9553", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, mma9553_acpi_match);
+
+static const struct i2c_device_id mma9553_id[] = {
+ {"mma9553", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, mma9553_id);
+
+static struct i2c_driver mma9553_driver = {
+ .driver = {
+ .name = MMA9553_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(mma9553_acpi_match),
+ .pm = &mma9553_pm_ops,
+ },
+ .probe = mma9553_probe,
+ .remove = mma9553_remove,
+ .id_table = mma9553_id,
+};
+
+module_i2c_driver(mma9553_driver);
+
+MODULE_AUTHOR("Irina Tirdea <irina.tirdea@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMA9553L pedometer platform driver");
diff --git a/drivers/iio/accel/ssp_accel_sensor.c b/drivers/iio/accel/ssp_accel_sensor.c
new file mode 100644
index 000000000000..4ae05fce9f24
--- /dev/null
+++ b/drivers/iio/accel/ssp_accel_sensor.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/iio/common/ssp_sensors.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../common/ssp_sensors/ssp_iio_sensor.h"
+
+#define SSP_CHANNEL_COUNT 3
+
+#define SSP_ACCEL_NAME "ssp-accelerometer"
+static const char ssp_accel_device_name[] = SSP_ACCEL_NAME;
+
+enum ssp_accel_3d_channel {
+ SSP_CHANNEL_SCAN_INDEX_X,
+ SSP_CHANNEL_SCAN_INDEX_Y,
+ SSP_CHANNEL_SCAN_INDEX_Z,
+ SSP_CHANNEL_SCAN_INDEX_TIME,
+};
+
+static int ssp_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ u32 t;
+ struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ t = ssp_get_sensor_delay(data, SSP_ACCELEROMETER_SENSOR);
+ ssp_convert_to_freq(t, val, val2);
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ssp_accel_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ int ret;
+ struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ssp_convert_to_time(val, val2);
+ ret = ssp_change_delay(data, SSP_ACCELEROMETER_SENSOR, ret);
+ if (ret < 0)
+ dev_err(&indio_dev->dev, "accel sensor enable fail\n");
+
+ return ret;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static struct iio_info ssp_accel_iio_info = {
+ .read_raw = &ssp_accel_read_raw,
+ .write_raw = &ssp_accel_write_raw,
+};
+
+static const unsigned long ssp_accel_scan_mask[] = { 0x7, 0, };
+
+static const struct iio_chan_spec ssp_acc_channels[] = {
+ SSP_CHANNEL_AG(IIO_ACCEL, IIO_MOD_X, SSP_CHANNEL_SCAN_INDEX_X),
+ SSP_CHANNEL_AG(IIO_ACCEL, IIO_MOD_Y, SSP_CHANNEL_SCAN_INDEX_Y),
+ SSP_CHANNEL_AG(IIO_ACCEL, IIO_MOD_Z, SSP_CHANNEL_SCAN_INDEX_Z),
+ SSP_CHAN_TIMESTAMP(SSP_CHANNEL_SCAN_INDEX_TIME),
+};
+
+static int ssp_process_accel_data(struct iio_dev *indio_dev, void *buf,
+ int64_t timestamp)
+{
+ return ssp_common_process_data(indio_dev, buf, SSP_ACCELEROMETER_SIZE,
+ timestamp);
+}
+
+static const struct iio_buffer_setup_ops ssp_accel_buffer_ops = {
+ .postenable = &ssp_common_buffer_postenable,
+ .postdisable = &ssp_common_buffer_postdisable,
+};
+
+static int ssp_accel_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct ssp_sensor_data *spd;
+ struct iio_buffer *buffer;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*spd));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ spd = iio_priv(indio_dev);
+
+ spd->process_data = ssp_process_accel_data;
+ spd->type = SSP_ACCELEROMETER_SENSOR;
+
+ indio_dev->name = ssp_accel_device_name;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &ssp_accel_iio_info;
+ indio_dev->modes = INDIO_BUFFER_SOFTWARE;
+ indio_dev->channels = ssp_acc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ssp_acc_channels);
+ indio_dev->available_scan_masks = ssp_accel_scan_mask;
+
+ buffer = devm_iio_kfifo_allocate(&pdev->dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ indio_dev->setup_ops = &ssp_accel_buffer_ops;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ /* ssp registering should be done after all iio setup */
+ ssp_register_consumer(indio_dev, SSP_ACCELEROMETER_SENSOR);
+
+ return 0;
+}
+
+static int ssp_accel_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver ssp_accel_driver = {
+ .driver = {
+ .name = SSP_ACCEL_NAME,
+ },
+ .probe = ssp_accel_probe,
+ .remove = ssp_accel_remove,
+};
+
+module_platform_driver(ssp_accel_driver);
+
+MODULE_AUTHOR("Karol Wrona <k.wrona@samsung.com>");
+MODULE_DESCRIPTION("Samsung sensorhub accelerometers driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 0f79e4725763..202daf889be2 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -135,6 +135,17 @@ config AXP288_ADC
device. Depending on platform configuration, this general purpose ADC can
be used for sampling sensors such as thermal resistors.
+config CC10001_ADC
+ tristate "Cosmic Circuits 10001 ADC driver"
+ depends on HAS_IOMEM || HAVE_CLK || REGULATOR
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Cosmic Circuits 10001 ADC.
+
+ This driver can also be built as a module. If so, the module will be
+ called cc10001_adc.
+
config EXYNOS_ADC
tristate "Exynos ADC driver support"
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
@@ -228,6 +239,20 @@ config QCOM_SPMI_IADC
To compile this driver as a module, choose M here: the module will
be called qcom-spmi-iadc.
+config QCOM_SPMI_VADC
+ tristate "Qualcomm SPMI PMIC voltage ADC"
+ depends on SPMI
+ select REGMAP_SPMI
+ help
+ This is the IIO Voltage ADC driver for Qualcomm QPNP VADC Chip.
+
+ The driver supports multiple channels read. The VADC is a 15-bit
+ sigma-delta ADC. Some of the channels are internally used for
+ calibration.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-spmi-vadc.
+
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 701fdb7c96aa..0315af640866 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_AD7887) += ad7887.o
obj-$(CONFIG_AD799X) += ad799x.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
+obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_MAX1027) += max1027.o
@@ -24,6 +25,7 @@ obj-$(CONFIG_MCP3422) += mcp3422.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
+obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index e37412da15f5..b99de00e57b8 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val)
case ad7998:
return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
val);
- default:
+ case ad7992:
+ case ad7993:
+ case ad7994:
return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
val);
+ default:
+ /* Will be written when doing a conversion */
+ st->config = val;
+ return 0;
}
}
@@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st)
case ad7997:
case ad7998:
return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
- default:
+ case ad7992:
+ case ad7993:
+ case ad7994:
return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
+ default:
+ /* No readback support */
+ return st->config;
}
}
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
new file mode 100644
index 000000000000..51e2a83c9404
--- /dev/null
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2014-2015 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* Registers */
+#define CC10001_ADC_CONFIG 0x00
+#define CC10001_ADC_START_CONV BIT(4)
+#define CC10001_ADC_MODE_SINGLE_CONV BIT(5)
+
+#define CC10001_ADC_DDATA_OUT 0x04
+#define CC10001_ADC_EOC 0x08
+#define CC10001_ADC_EOC_SET BIT(0)
+
+#define CC10001_ADC_CHSEL_SAMPLED 0x0c
+#define CC10001_ADC_POWER_UP 0x10
+#define CC10001_ADC_POWER_UP_SET BIT(0)
+#define CC10001_ADC_DEBUG 0x14
+#define CC10001_ADC_DATA_COUNT 0x20
+
+#define CC10001_ADC_DATA_MASK GENMASK(9, 0)
+#define CC10001_ADC_NUM_CHANNELS 8
+#define CC10001_ADC_CH_MASK GENMASK(2, 0)
+
+#define CC10001_INVALID_SAMPLED 0xffff
+#define CC10001_MAX_POLL_COUNT 20
+
+/*
+ * As per device specification, wait six clock cycles after power-up to
+ * activate START. Since adding two more clock cycles delay does not
+ * impact the performance too much, we are adding two additional cycles delay
+ * intentionally here.
+ */
+#define CC10001_WAIT_CYCLES 8
+
+struct cc10001_adc_device {
+ void __iomem *reg_base;
+ struct clk *adc_clk;
+ struct regulator *reg;
+ u16 *buf;
+
+ struct mutex lock;
+ unsigned long channel_map;
+ unsigned int start_delay_ns;
+ unsigned int eoc_delay_ns;
+};
+
+static inline void cc10001_adc_write_reg(struct cc10001_adc_device *adc_dev,
+ u32 reg, u32 val)
+{
+ writel(val, adc_dev->reg_base + reg);
+}
+
+static inline u32 cc10001_adc_read_reg(struct cc10001_adc_device *adc_dev,
+ u32 reg)
+{
+ return readl(adc_dev->reg_base + reg);
+}
+
+static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
+ unsigned int channel)
+{
+ u32 val;
+
+ /* Channel selection and mode of operation */
+ val = (channel & CC10001_ADC_CH_MASK) | CC10001_ADC_MODE_SINGLE_CONV;
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
+
+ val = cc10001_adc_read_reg(adc_dev, CC10001_ADC_CONFIG);
+ val = val | CC10001_ADC_START_CONV;
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
+}
+
+static u16 cc10001_adc_poll_done(struct iio_dev *indio_dev,
+ unsigned int channel,
+ unsigned int delay)
+{
+ struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int poll_count = 0;
+
+ while (!(cc10001_adc_read_reg(adc_dev, CC10001_ADC_EOC) &
+ CC10001_ADC_EOC_SET)) {
+
+ ndelay(delay);
+ if (poll_count++ == CC10001_MAX_POLL_COUNT)
+ return CC10001_INVALID_SAMPLED;
+ }
+
+ poll_count = 0;
+ while ((cc10001_adc_read_reg(adc_dev, CC10001_ADC_CHSEL_SAMPLED) &
+ CC10001_ADC_CH_MASK) != channel) {
+
+ ndelay(delay);
+ if (poll_count++ == CC10001_MAX_POLL_COUNT)
+ return CC10001_INVALID_SAMPLED;
+ }
+
+ /* Read the 10 bit output register */
+ return cc10001_adc_read_reg(adc_dev, CC10001_ADC_DDATA_OUT) &
+ CC10001_ADC_DATA_MASK;
+}
+
+static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
+{
+ struct cc10001_adc_device *adc_dev;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev;
+ unsigned int delay_ns;
+ unsigned int channel;
+ bool sample_invalid;
+ u16 *data;
+ int i;
+
+ indio_dev = pf->indio_dev;
+ adc_dev = iio_priv(indio_dev);
+ data = adc_dev->buf;
+
+ mutex_lock(&adc_dev->lock);
+
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
+ CC10001_ADC_POWER_UP_SET);
+
+ /* Wait for 8 (6+2) clock cycles before activating START */
+ ndelay(adc_dev->start_delay_ns);
+
+ /* Calculate delay step for eoc and sampled data */
+ delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
+
+ i = 0;
+ sample_invalid = false;
+ for_each_set_bit(channel, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+
+ cc10001_adc_start(adc_dev, channel);
+
+ data[i] = cc10001_adc_poll_done(indio_dev, channel, delay_ns);
+ if (data[i] == CC10001_INVALID_SAMPLED) {
+ dev_warn(&indio_dev->dev,
+ "invalid sample on channel %d\n", channel);
+ sample_invalid = true;
+ goto done;
+ }
+ i++;
+ }
+
+done:
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
+
+ mutex_unlock(&adc_dev->lock);
+
+ if (!sample_invalid)
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns());
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int delay_ns;
+ u16 val;
+
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
+ CC10001_ADC_POWER_UP_SET);
+
+ /* Wait for 8 (6+2) clock cycles before activating START */
+ ndelay(adc_dev->start_delay_ns);
+
+ /* Calculate delay step for eoc and sampled data */
+ delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
+
+ cc10001_adc_start(adc_dev, chan->channel);
+
+ val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns);
+
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
+
+ return val;
+}
+
+static int cc10001_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ mutex_lock(&adc_dev->lock);
+ *val = cc10001_adc_read_raw_voltage(indio_dev, chan);
+ mutex_unlock(&adc_dev->lock);
+
+ if (*val == CC10001_INVALID_SAMPLED)
+ return -EIO;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(adc_dev->reg);
+ if (ret)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cc10001_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
+
+ kfree(adc_dev->buf);
+ adc_dev->buf = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (!adc_dev->buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct iio_info cc10001_adc_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &cc10001_adc_read_raw,
+ .update_scan_mode = &cc10001_update_scan_mode,
+};
+
+static int cc10001_adc_channel_init(struct iio_dev *indio_dev)
+{
+ struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
+ struct iio_chan_spec *chan_array, *timestamp;
+ unsigned int bit, idx = 0;
+
+ indio_dev->num_channels = bitmap_weight(&adc_dev->channel_map,
+ CC10001_ADC_NUM_CHANNELS);
+
+ chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels + 1,
+ sizeof(struct iio_chan_spec),
+ GFP_KERNEL);
+ if (!chan_array)
+ return -ENOMEM;
+
+ for_each_set_bit(bit, &adc_dev->channel_map, CC10001_ADC_NUM_CHANNELS) {
+ struct iio_chan_spec *chan = &chan_array[idx];
+
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->channel = bit;
+ chan->scan_index = idx;
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 10;
+ chan->scan_type.storagebits = 16;
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ idx++;
+ }
+
+ timestamp = &chan_array[idx];
+ timestamp->type = IIO_TIMESTAMP;
+ timestamp->channel = -1;
+ timestamp->scan_index = idx;
+ timestamp->scan_type.sign = 's';
+ timestamp->scan_type.realbits = 64;
+ timestamp->scan_type.storagebits = 64;
+
+ indio_dev->channels = chan_array;
+
+ return 0;
+}
+
+static int cc10001_adc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct cc10001_adc_device *adc_dev;
+ unsigned long adc_clk_rate;
+ struct resource *res;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ adc_dev = iio_priv(indio_dev);
+
+ adc_dev->channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
+ if (!of_property_read_u32(node, "adc-reserved-channels", &ret))
+ adc_dev->channel_map &= ~ret;
+
+ adc_dev->reg = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(adc_dev->reg))
+ return PTR_ERR(adc_dev->reg);
+
+ ret = regulator_enable(adc_dev->reg);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->info = &cc10001_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adc_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(adc_dev->reg_base)) {
+ ret = PTR_ERR(adc_dev->reg_base);
+ goto err_disable_reg;
+ }
+
+ adc_dev->adc_clk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(adc_dev->adc_clk)) {
+ dev_err(&pdev->dev, "failed to get the clock\n");
+ ret = PTR_ERR(adc_dev->adc_clk);
+ goto err_disable_reg;
+ }
+
+ ret = clk_prepare_enable(adc_dev->adc_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable the clock\n");
+ goto err_disable_reg;
+ }
+
+ adc_clk_rate = clk_get_rate(adc_dev->adc_clk);
+ if (!adc_clk_rate) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "null clock rate!\n");
+ goto err_disable_clk;
+ }
+
+ adc_dev->eoc_delay_ns = NSEC_PER_SEC / adc_clk_rate;
+ adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES;
+
+ /* Setup the ADC channels available on the device */
+ ret = cc10001_adc_channel_init(indio_dev);
+ if (ret < 0)
+ goto err_disable_clk;
+
+ mutex_init(&adc_dev->lock);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &cc10001_adc_trigger_h, NULL);
+ if (ret < 0)
+ goto err_disable_clk;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto err_cleanup_buffer;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_disable_clk:
+ clk_disable_unprepare(adc_dev->adc_clk);
+err_disable_reg:
+ regulator_disable(adc_dev->reg);
+ return ret;
+}
+
+static int cc10001_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ clk_disable_unprepare(adc_dev->adc_clk);
+ regulator_disable(adc_dev->reg);
+
+ return 0;
+}
+
+static const struct of_device_id cc10001_adc_dt_ids[] = {
+ { .compatible = "cosmic,10001-adc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cc10001_adc_dt_ids);
+
+static struct platform_driver cc10001_adc_driver = {
+ .driver = {
+ .name = "cc10001-adc",
+ .of_match_table = cc10001_adc_dt_ids,
+ },
+ .probe = cc10001_adc_probe,
+ .remove = cc10001_adc_remove,
+};
+module_platform_driver(cc10001_adc_driver);
+
+MODULE_AUTHOR("Phani Movva <Phani.Movva@imgtec.com>");
+MODULE_DESCRIPTION("Cosmic Circuits ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
new file mode 100644
index 000000000000..3211729bcb0b
--- /dev/null
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+/* VADC register and bit definitions */
+#define VADC_REVISION2 0x1
+#define VADC_REVISION2_SUPPORTED_VADC 1
+
+#define VADC_PERPH_TYPE 0x4
+#define VADC_PERPH_TYPE_ADC 8
+
+#define VADC_PERPH_SUBTYPE 0x5
+#define VADC_PERPH_SUBTYPE_VADC 1
+
+#define VADC_STATUS1 0x8
+#define VADC_STATUS1_OP_MODE 4
+#define VADC_STATUS1_REQ_STS BIT(1)
+#define VADC_STATUS1_EOC BIT(0)
+#define VADC_STATUS1_REQ_STS_EOC_MASK 0x3
+
+#define VADC_MODE_CTL 0x40
+#define VADC_OP_MODE_SHIFT 3
+#define VADC_OP_MODE_NORMAL 0
+#define VADC_AMUX_TRIM_EN BIT(1)
+#define VADC_ADC_TRIM_EN BIT(0)
+
+#define VADC_EN_CTL1 0x46
+#define VADC_EN_CTL1_SET BIT(7)
+
+#define VADC_ADC_CH_SEL_CTL 0x48
+
+#define VADC_ADC_DIG_PARAM 0x50
+#define VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT 2
+
+#define VADC_HW_SETTLE_DELAY 0x51
+
+#define VADC_CONV_REQ 0x52
+#define VADC_CONV_REQ_SET BIT(7)
+
+#define VADC_FAST_AVG_CTL 0x5a
+#define VADC_FAST_AVG_EN 0x5b
+#define VADC_FAST_AVG_EN_SET BIT(7)
+
+#define VADC_ACCESS 0xd0
+#define VADC_ACCESS_DATA 0xa5
+
+#define VADC_PERH_RESET_CTL3 0xda
+#define VADC_FOLLOW_WARM_RB BIT(2)
+
+#define VADC_DATA 0x60 /* 16 bits */
+
+#define VADC_CONV_TIME_MIN_US 2000
+#define VADC_CONV_TIME_MAX_US 2100
+
+/* Min ADC code represents 0V */
+#define VADC_MIN_ADC_CODE 0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define VADC_MAX_ADC_CODE 0xa800
+
+#define VADC_ABSOLUTE_RANGE_UV 625000
+#define VADC_RATIOMETRIC_RANGE_UV 1800000
+
+#define VADC_DEF_PRESCALING 0 /* 1:1 */
+#define VADC_DEF_DECIMATION 0 /* 512 */
+#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE
+
+#define VADC_DECIMATION_MIN 512
+#define VADC_DECIMATION_MAX 4096
+
+#define VADC_HW_SETTLE_DELAY_MAX 10000
+#define VADC_AVG_SAMPLES_MAX 512
+
+#define KELVINMIL_CELSIUSMIL 273150
+
+#define VADC_CHAN_MIN VADC_USBIN
+#define VADC_CHAN_MAX VADC_LR_MUX3_BUF_PU1_PU2_XO_THERM
+
+/*
+ * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
+ * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for
+ * calibration.
+ */
+enum vadc_calibration {
+ VADC_CALIB_ABSOLUTE = 0,
+ VADC_CALIB_RATIOMETRIC
+};
+
+/**
+ * struct vadc_linear_graph - Represent ADC characteristics.
+ * @dy: numerator slope to calculate the gain.
+ * @dx: denominator slope to calculate the gain.
+ * @gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are
+ * computed to calibrate the device.
+ */
+struct vadc_linear_graph {
+ s32 dy;
+ s32 dx;
+ s32 gnd;
+};
+
+/**
+ * struct vadc_prescale_ratio - Represent scaling ratio for ADC input.
+ * @num: the inverse numerator of the gain applied to the input channel.
+ * @den: the inverse denominator of the gain applied to the input channel.
+ */
+struct vadc_prescale_ratio {
+ u32 num;
+ u32 den;
+};
+
+/**
+ * struct vadc_channel_prop - VADC channel property.
+ * @channel: channel number, refer to the channel list.
+ * @calibration: calibration type.
+ * @decimation: sampling rate supported for the channel.
+ * @prescale: channel scaling performed on the input signal.
+ * @hw_settle_time: the time between AMUX being configured and the
+ * start of conversion.
+ * @avg_samples: ability to provide single result from the ADC
+ * that is an average of multiple measurements.
+ */
+struct vadc_channel_prop {
+ unsigned int channel;
+ enum vadc_calibration calibration;
+ unsigned int decimation;
+ unsigned int prescale;
+ unsigned int hw_settle_time;
+ unsigned int avg_samples;
+};
+
+/**
+ * struct vadc_priv - VADC private structure.
+ * @regmap: pointer to struct regmap.
+ * @dev: pointer to struct device.
+ * @base: base address for the ADC peripheral.
+ * @nchannels: number of VADC channels.
+ * @chan_props: array of VADC channel properties.
+ * @iio_chans: array of IIO channels specification.
+ * @are_ref_measured: are reference points measured.
+ * @poll_eoc: use polling instead of interrupt.
+ * @complete: VADC result notification after interrupt is received.
+ * @graph: store parameters for calibration.
+ * @lock: ADC lock for access to the peripheral.
+ */
+struct vadc_priv {
+ struct regmap *regmap;
+ struct device *dev;
+ u16 base;
+ unsigned int nchannels;
+ struct vadc_channel_prop *chan_props;
+ struct iio_chan_spec *iio_chans;
+ bool are_ref_measured;
+ bool poll_eoc;
+ struct completion complete;
+ struct vadc_linear_graph graph[2];
+ struct mutex lock;
+};
+
+static const struct vadc_prescale_ratio vadc_prescale_ratios[] = {
+ {.num = 1, .den = 1},
+ {.num = 1, .den = 3},
+ {.num = 1, .den = 4},
+ {.num = 1, .den = 6},
+ {.num = 1, .den = 20},
+ {.num = 1, .den = 8},
+ {.num = 10, .den = 81},
+ {.num = 1, .den = 10}
+};
+
+static int vadc_read(struct vadc_priv *vadc, u16 offset, u8 *data)
+{
+ return regmap_bulk_read(vadc->regmap, vadc->base + offset, data, 1);
+}
+
+static int vadc_write(struct vadc_priv *vadc, u16 offset, u8 data)
+{
+ return regmap_write(vadc->regmap, vadc->base + offset, data);
+}
+
+static int vadc_reset(struct vadc_priv *vadc)
+{
+ u8 data;
+ int ret;
+
+ ret = vadc_write(vadc, VADC_ACCESS, VADC_ACCESS_DATA);
+ if (ret)
+ return ret;
+
+ ret = vadc_read(vadc, VADC_PERH_RESET_CTL3, &data);
+ if (ret)
+ return ret;
+
+ ret = vadc_write(vadc, VADC_ACCESS, VADC_ACCESS_DATA);
+ if (ret)
+ return ret;
+
+ data |= VADC_FOLLOW_WARM_RB;
+
+ return vadc_write(vadc, VADC_PERH_RESET_CTL3, data);
+}
+
+static int vadc_set_state(struct vadc_priv *vadc, bool state)
+{
+ return vadc_write(vadc, VADC_EN_CTL1, state ? VADC_EN_CTL1_SET : 0);
+}
+
+static void vadc_show_status(struct vadc_priv *vadc)
+{
+ u8 mode, sta1, chan, dig, en, req;
+ int ret;
+
+ ret = vadc_read(vadc, VADC_MODE_CTL, &mode);
+ if (ret)
+ return;
+
+ ret = vadc_read(vadc, VADC_ADC_DIG_PARAM, &dig);
+ if (ret)
+ return;
+
+ ret = vadc_read(vadc, VADC_ADC_CH_SEL_CTL, &chan);
+ if (ret)
+ return;
+
+ ret = vadc_read(vadc, VADC_CONV_REQ, &req);
+ if (ret)
+ return;
+
+ ret = vadc_read(vadc, VADC_STATUS1, &sta1);
+ if (ret)
+ return;
+
+ ret = vadc_read(vadc, VADC_EN_CTL1, &en);
+ if (ret)
+ return;
+
+ dev_err(vadc->dev,
+ "mode:%02x en:%02x chan:%02x dig:%02x req:%02x sta1:%02x\n",
+ mode, en, chan, dig, req, sta1);
+}
+
+static int vadc_configure(struct vadc_priv *vadc,
+ struct vadc_channel_prop *prop)
+{
+ u8 decimation, mode_ctrl;
+ int ret;
+
+ /* Mode selection */
+ mode_ctrl = (VADC_OP_MODE_NORMAL << VADC_OP_MODE_SHIFT) |
+ VADC_ADC_TRIM_EN | VADC_AMUX_TRIM_EN;
+ ret = vadc_write(vadc, VADC_MODE_CTL, mode_ctrl);
+ if (ret)
+ return ret;
+
+ /* Channel selection */
+ ret = vadc_write(vadc, VADC_ADC_CH_SEL_CTL, prop->channel);
+ if (ret)
+ return ret;
+
+ /* Digital parameter setup */
+ decimation = prop->decimation << VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT;
+ ret = vadc_write(vadc, VADC_ADC_DIG_PARAM, decimation);
+ if (ret)
+ return ret;
+
+ /* HW settle time delay */
+ ret = vadc_write(vadc, VADC_HW_SETTLE_DELAY, prop->hw_settle_time);
+ if (ret)
+ return ret;
+
+ ret = vadc_write(vadc, VADC_FAST_AVG_CTL, prop->avg_samples);
+ if (ret)
+ return ret;
+
+ if (prop->avg_samples)
+ ret = vadc_write(vadc, VADC_FAST_AVG_EN, VADC_FAST_AVG_EN_SET);
+ else
+ ret = vadc_write(vadc, VADC_FAST_AVG_EN, 0);
+
+ return ret;
+}
+
+static int vadc_poll_wait_eoc(struct vadc_priv *vadc, unsigned int interval_us)
+{
+ unsigned int count, retry;
+ u8 sta1;
+ int ret;
+
+ retry = interval_us / VADC_CONV_TIME_MIN_US;
+
+ for (count = 0; count < retry; count++) {
+ ret = vadc_read(vadc, VADC_STATUS1, &sta1);
+ if (ret)
+ return ret;
+
+ sta1 &= VADC_STATUS1_REQ_STS_EOC_MASK;
+ if (sta1 == VADC_STATUS1_EOC)
+ return 0;
+
+ usleep_range(VADC_CONV_TIME_MIN_US, VADC_CONV_TIME_MAX_US);
+ }
+
+ vadc_show_status(vadc);
+
+ return -ETIMEDOUT;
+}
+
+static int vadc_read_result(struct vadc_priv *vadc, u16 *data)
+{
+ int ret;
+
+ ret = regmap_bulk_read(vadc->regmap, vadc->base + VADC_DATA, data, 2);
+ if (ret)
+ return ret;
+
+ *data = clamp_t(u16, *data, VADC_MIN_ADC_CODE, VADC_MAX_ADC_CODE);
+
+ return 0;
+}
+
+static struct vadc_channel_prop *vadc_get_channel(struct vadc_priv *vadc,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < vadc->nchannels; i++)
+ if (vadc->chan_props[i].channel == num)
+ return &vadc->chan_props[i];
+
+ dev_dbg(vadc->dev, "no such channel %02x\n", num);
+
+ return NULL;
+}
+
+static int vadc_do_conversion(struct vadc_priv *vadc,
+ struct vadc_channel_prop *prop, u16 *data)
+{
+ unsigned int timeout;
+ int ret;
+
+ mutex_lock(&vadc->lock);
+
+ ret = vadc_configure(vadc, prop);
+ if (ret)
+ goto unlock;
+
+ if (!vadc->poll_eoc)
+ reinit_completion(&vadc->complete);
+
+ ret = vadc_set_state(vadc, true);
+ if (ret)
+ goto unlock;
+
+ ret = vadc_write(vadc, VADC_CONV_REQ, VADC_CONV_REQ_SET);
+ if (ret)
+ goto err_disable;
+
+ timeout = BIT(prop->avg_samples) * VADC_CONV_TIME_MIN_US * 2;
+
+ if (vadc->poll_eoc) {
+ ret = vadc_poll_wait_eoc(vadc, timeout);
+ } else {
+ ret = wait_for_completion_timeout(&vadc->complete, timeout);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto err_disable;
+ }
+
+ /* Double check conversion status */
+ ret = vadc_poll_wait_eoc(vadc, VADC_CONV_TIME_MIN_US);
+ if (ret)
+ goto err_disable;
+ }
+
+ ret = vadc_read_result(vadc, data);
+
+err_disable:
+ vadc_set_state(vadc, false);
+ if (ret)
+ dev_err(vadc->dev, "conversion failed\n");
+unlock:
+ mutex_unlock(&vadc->lock);
+ return ret;
+}
+
+static int vadc_measure_ref_points(struct vadc_priv *vadc)
+{
+ struct vadc_channel_prop *prop;
+ u16 read_1, read_2;
+ int ret;
+
+ vadc->graph[VADC_CALIB_RATIOMETRIC].dx = VADC_RATIOMETRIC_RANGE_UV;
+ vadc->graph[VADC_CALIB_ABSOLUTE].dx = VADC_ABSOLUTE_RANGE_UV;
+
+ prop = vadc_get_channel(vadc, VADC_REF_1250MV);
+ ret = vadc_do_conversion(vadc, prop, &read_1);
+ if (ret)
+ goto err;
+
+ /* Try with buffered 625mV channel first */
+ prop = vadc_get_channel(vadc, VADC_SPARE1);
+ if (!prop)
+ prop = vadc_get_channel(vadc, VADC_REF_625MV);
+
+ ret = vadc_do_conversion(vadc, prop, &read_2);
+ if (ret)
+ goto err;
+
+ if (read_1 == read_2) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ vadc->graph[VADC_CALIB_ABSOLUTE].dy = read_1 - read_2;
+ vadc->graph[VADC_CALIB_ABSOLUTE].gnd = read_2;
+
+ /* Ratiometric calibration */
+ prop = vadc_get_channel(vadc, VADC_VDD_VADC);
+ ret = vadc_do_conversion(vadc, prop, &read_1);
+ if (ret)
+ goto err;
+
+ prop = vadc_get_channel(vadc, VADC_GND_REF);
+ ret = vadc_do_conversion(vadc, prop, &read_2);
+ if (ret)
+ goto err;
+
+ if (read_1 == read_2) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ vadc->graph[VADC_CALIB_RATIOMETRIC].dy = read_1 - read_2;
+ vadc->graph[VADC_CALIB_RATIOMETRIC].gnd = read_2;
+err:
+ if (ret)
+ dev_err(vadc->dev, "measure reference points failed\n");
+
+ return ret;
+}
+
+static s32 vadc_calibrate(struct vadc_priv *vadc,
+ const struct vadc_channel_prop *prop, u16 adc_code)
+{
+ const struct vadc_prescale_ratio *prescale;
+ s32 voltage;
+
+ voltage = adc_code - vadc->graph[prop->calibration].gnd;
+ voltage *= vadc->graph[prop->calibration].dx;
+ voltage = voltage / vadc->graph[prop->calibration].dy;
+
+ if (prop->calibration == VADC_CALIB_ABSOLUTE)
+ voltage += vadc->graph[prop->calibration].dx;
+
+ if (voltage < 0)
+ voltage = 0;
+
+ prescale = &vadc_prescale_ratios[prop->prescale];
+
+ voltage = voltage * prescale->den;
+
+ return voltage / prescale->num;
+}
+
+static int vadc_decimation_from_dt(u32 value)
+{
+ if (!is_power_of_2(value) || value < VADC_DECIMATION_MIN ||
+ value > VADC_DECIMATION_MAX)
+ return -EINVAL;
+
+ return __ffs64(value / VADC_DECIMATION_MIN);
+}
+
+static int vadc_prescaling_from_dt(u32 num, u32 den)
+{
+ unsigned int pre;
+
+ for (pre = 0; pre < ARRAY_SIZE(vadc_prescale_ratios); pre++)
+ if (vadc_prescale_ratios[pre].num == num &&
+ vadc_prescale_ratios[pre].den == den)
+ break;
+
+ if (pre == ARRAY_SIZE(vadc_prescale_ratios))
+ return -EINVAL;
+
+ return pre;
+}
+
+static int vadc_hw_settle_time_from_dt(u32 value)
+{
+ if ((value <= 1000 && value % 100) || (value > 1000 && value % 2000))
+ return -EINVAL;
+
+ if (value <= 1000)
+ value /= 100;
+ else
+ value = value / 2000 + 10;
+
+ return value;
+}
+
+static int vadc_avg_samples_from_dt(u32 value)
+{
+ if (!is_power_of_2(value) || value > VADC_AVG_SAMPLES_MAX)
+ return -EINVAL;
+
+ return __ffs64(value);
+}
+
+static int vadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ struct vadc_priv *vadc = iio_priv(indio_dev);
+ struct vadc_channel_prop *prop;
+ u16 adc_code;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ prop = &vadc->chan_props[chan->address];
+ ret = vadc_do_conversion(vadc, prop, &adc_code);
+ if (ret)
+ break;
+
+ *val = vadc_calibrate(vadc, prop, adc_code);
+
+ /* 2mV/K, return milli Celsius */
+ *val /= 2;
+ *val -= KELVINMIL_CELSIUSMIL;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_RAW:
+ prop = &vadc->chan_props[chan->address];
+ ret = vadc_do_conversion(vadc, prop, &adc_code);
+ if (ret)
+ break;
+
+ *val = vadc_calibrate(vadc, prop, adc_code);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int vadc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ struct vadc_priv *vadc = iio_priv(indio_dev);
+ unsigned int i;
+
+ for (i = 0; i < vadc->nchannels; i++)
+ if (vadc->iio_chans[i].channel == iiospec->args[0])
+ return i;
+
+ return -EINVAL;
+}
+
+static const struct iio_info vadc_info = {
+ .read_raw = vadc_read_raw,
+ .of_xlate = vadc_of_xlate,
+ .driver_module = THIS_MODULE,
+};
+
+struct vadc_channels {
+ const char *datasheet_name;
+ unsigned int prescale_index;
+ enum iio_chan_type type;
+ long info_mask;
+};
+
+#define VADC_CHAN(_dname, _type, _mask, _pre) \
+ [VADC_##_dname] = { \
+ .datasheet_name = __stringify(_dname), \
+ .prescale_index = _pre, \
+ .type = _type, \
+ .info_mask = _mask \
+ }, \
+
+#define VADC_CHAN_TEMP(_dname, _pre) \
+ VADC_CHAN(_dname, IIO_TEMP, BIT(IIO_CHAN_INFO_PROCESSED), _pre) \
+
+#define VADC_CHAN_VOLT(_dname, _pre) \
+ VADC_CHAN(_dname, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \
+ _pre) \
+
+/*
+ * The array represents all possible ADC channels found in the supported PMICs.
+ * Every index in the array is equal to the channel number per datasheet. The
+ * gaps in the array should be treated as reserved channels.
+ */
+static const struct vadc_channels vadc_chans[] = {
+ VADC_CHAN_VOLT(USBIN, 4)
+ VADC_CHAN_VOLT(DCIN, 4)
+ VADC_CHAN_VOLT(VCHG_SNS, 3)
+ VADC_CHAN_VOLT(SPARE1_03, 1)
+ VADC_CHAN_VOLT(USB_ID_MV, 1)
+ VADC_CHAN_VOLT(VCOIN, 1)
+ VADC_CHAN_VOLT(VBAT_SNS, 1)
+ VADC_CHAN_VOLT(VSYS, 1)
+ VADC_CHAN_TEMP(DIE_TEMP, 0)
+ VADC_CHAN_VOLT(REF_625MV, 0)
+ VADC_CHAN_VOLT(REF_1250MV, 0)
+ VADC_CHAN_VOLT(CHG_TEMP, 0)
+ VADC_CHAN_VOLT(SPARE1, 0)
+ VADC_CHAN_VOLT(SPARE2, 0)
+ VADC_CHAN_VOLT(GND_REF, 0)
+ VADC_CHAN_VOLT(VDD_VADC, 0)
+
+ VADC_CHAN_VOLT(P_MUX1_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX2_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX3_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX4_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX5_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX6_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX7_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX8_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX9_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX10_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX11_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX12_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX13_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX14_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX15_1_1, 0)
+ VADC_CHAN_VOLT(P_MUX16_1_1, 0)
+
+ VADC_CHAN_VOLT(P_MUX1_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX2_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX3_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX4_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX5_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX6_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX7_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX8_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX9_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX10_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX11_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX12_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX13_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX14_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX15_1_3, 1)
+ VADC_CHAN_VOLT(P_MUX16_1_3, 1)
+
+ VADC_CHAN_VOLT(LR_MUX1_BAT_THERM, 0)
+ VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX3_XO_THERM, 0)
+ VADC_CHAN_VOLT(LR_MUX4_AMUX_THM1, 0)
+ VADC_CHAN_VOLT(LR_MUX5_AMUX_THM2, 0)
+ VADC_CHAN_VOLT(LR_MUX6_AMUX_THM3, 0)
+ VADC_CHAN_VOLT(LR_MUX7_HW_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX8_AMUX_THM4, 0)
+ VADC_CHAN_VOLT(LR_MUX9_AMUX_THM5, 0)
+ VADC_CHAN_VOLT(LR_MUX10_USB_ID, 0)
+ VADC_CHAN_VOLT(AMUX_PU1, 0)
+ VADC_CHAN_VOLT(AMUX_PU2, 0)
+ VADC_CHAN_VOLT(LR_MUX3_BUF_XO_THERM, 0)
+
+ VADC_CHAN_VOLT(LR_MUX1_PU1_BAT_THERM, 0)
+ VADC_CHAN_VOLT(LR_MUX2_PU1_BAT_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX3_PU1_XO_THERM, 0)
+ VADC_CHAN_VOLT(LR_MUX4_PU1_AMUX_THM1, 0)
+ VADC_CHAN_VOLT(LR_MUX5_PU1_AMUX_THM2, 0)
+ VADC_CHAN_VOLT(LR_MUX6_PU1_AMUX_THM3, 0)
+ VADC_CHAN_VOLT(LR_MUX7_PU1_AMUX_HW_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX8_PU1_AMUX_THM4, 0)
+ VADC_CHAN_VOLT(LR_MUX9_PU1_AMUX_THM5, 0)
+ VADC_CHAN_VOLT(LR_MUX10_PU1_AMUX_USB_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX3_BUF_PU1_XO_THERM, 0)
+
+ VADC_CHAN_VOLT(LR_MUX1_PU2_BAT_THERM, 0)
+ VADC_CHAN_VOLT(LR_MUX2_PU2_BAT_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX3_PU2_XO_THERM, 0)
+ VADC_CHAN_VOLT(LR_MUX4_PU2_AMUX_THM1, 0)
+ VADC_CHAN_VOLT(LR_MUX5_PU2_AMUX_THM2, 0)
+ VADC_CHAN_VOLT(LR_MUX6_PU2_AMUX_THM3, 0)
+ VADC_CHAN_VOLT(LR_MUX7_PU2_AMUX_HW_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX8_PU2_AMUX_THM4, 0)
+ VADC_CHAN_VOLT(LR_MUX9_PU2_AMUX_THM5, 0)
+ VADC_CHAN_VOLT(LR_MUX10_PU2_AMUX_USB_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX3_BUF_PU2_XO_THERM, 0)
+
+ VADC_CHAN_VOLT(LR_MUX1_PU1_PU2_BAT_THERM, 0)
+ VADC_CHAN_VOLT(LR_MUX2_PU1_PU2_BAT_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX3_PU1_PU2_XO_THERM, 0)
+ VADC_CHAN_VOLT(LR_MUX4_PU1_PU2_AMUX_THM1, 0)
+ VADC_CHAN_VOLT(LR_MUX5_PU1_PU2_AMUX_THM2, 0)
+ VADC_CHAN_VOLT(LR_MUX6_PU1_PU2_AMUX_THM3, 0)
+ VADC_CHAN_VOLT(LR_MUX7_PU1_PU2_AMUX_HW_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX8_PU1_PU2_AMUX_THM4, 0)
+ VADC_CHAN_VOLT(LR_MUX9_PU1_PU2_AMUX_THM5, 0)
+ VADC_CHAN_VOLT(LR_MUX10_PU1_PU2_AMUX_USB_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX3_BUF_PU1_PU2_XO_THERM, 0)
+};
+
+static int vadc_get_dt_channel_data(struct device *dev,
+ struct vadc_channel_prop *prop,
+ struct device_node *node)
+{
+ const char *name = node->name;
+ u32 chan, value, varr[2];
+ int ret;
+
+ ret = of_property_read_u32(node, "reg", &chan);
+ if (ret) {
+ dev_err(dev, "invalid channel number %s\n", name);
+ return ret;
+ }
+
+ if (chan > VADC_CHAN_MAX || chan < VADC_CHAN_MIN) {
+ dev_err(dev, "%s invalid channel number %d\n", name, chan);
+ return -EINVAL;
+ }
+
+ /* the channel has DT description */
+ prop->channel = chan;
+
+ ret = of_property_read_u32(node, "qcom,decimation", &value);
+ if (!ret) {
+ ret = vadc_decimation_from_dt(value);
+ if (ret < 0) {
+ dev_err(dev, "%02x invalid decimation %d\n",
+ chan, value);
+ return ret;
+ }
+ prop->decimation = ret;
+ } else {
+ prop->decimation = VADC_DEF_DECIMATION;
+ }
+
+ ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
+ if (!ret) {
+ ret = vadc_prescaling_from_dt(varr[0], varr[1]);
+ if (ret < 0) {
+ dev_err(dev, "%02x invalid pre-scaling <%d %d>\n",
+ chan, varr[0], varr[1]);
+ return ret;
+ }
+ prop->prescale = ret;
+ } else {
+ prop->prescale = vadc_chans[prop->channel].prescale_index;
+ }
+
+ ret = of_property_read_u32(node, "qcom,hw-settle-time", &value);
+ if (!ret) {
+ ret = vadc_hw_settle_time_from_dt(value);
+ if (ret < 0) {
+ dev_err(dev, "%02x invalid hw-settle-time %d us\n",
+ chan, value);
+ return ret;
+ }
+ prop->hw_settle_time = ret;
+ } else {
+ prop->hw_settle_time = VADC_DEF_HW_SETTLE_TIME;
+ }
+
+ ret = of_property_read_u32(node, "qcom,avg-samples", &value);
+ if (!ret) {
+ ret = vadc_avg_samples_from_dt(value);
+ if (ret < 0) {
+ dev_err(dev, "%02x invalid avg-samples %d\n",
+ chan, value);
+ return ret;
+ }
+ prop->avg_samples = ret;
+ } else {
+ prop->avg_samples = VADC_DEF_AVG_SAMPLES;
+ }
+
+ if (of_property_read_bool(node, "qcom,ratiometric"))
+ prop->calibration = VADC_CALIB_RATIOMETRIC;
+ else
+ prop->calibration = VADC_CALIB_ABSOLUTE;
+
+ dev_dbg(dev, "%02x name %s\n", chan, name);
+
+ return 0;
+}
+
+static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
+{
+ const struct vadc_channels *vadc_chan;
+ struct iio_chan_spec *iio_chan;
+ struct vadc_channel_prop prop;
+ struct device_node *child;
+ unsigned int index = 0;
+ int ret;
+
+ vadc->nchannels = of_get_available_child_count(node);
+ if (!vadc->nchannels)
+ return -EINVAL;
+
+ vadc->iio_chans = devm_kcalloc(vadc->dev, vadc->nchannels,
+ sizeof(*vadc->iio_chans), GFP_KERNEL);
+ if (!vadc->iio_chans)
+ return -ENOMEM;
+
+ vadc->chan_props = devm_kcalloc(vadc->dev, vadc->nchannels,
+ sizeof(*vadc->chan_props), GFP_KERNEL);
+ if (!vadc->chan_props)
+ return -ENOMEM;
+
+ iio_chan = vadc->iio_chans;
+
+ for_each_available_child_of_node(node, child) {
+ ret = vadc_get_dt_channel_data(vadc->dev, &prop, child);
+ if (ret)
+ return ret;
+
+ vadc->chan_props[index] = prop;
+
+ vadc_chan = &vadc_chans[prop.channel];
+
+ iio_chan->channel = prop.channel;
+ iio_chan->datasheet_name = vadc_chan->datasheet_name;
+ iio_chan->info_mask_separate = vadc_chan->info_mask;
+ iio_chan->type = vadc_chan->type;
+ iio_chan->indexed = 1;
+ iio_chan->address = index++;
+
+ iio_chan++;
+ }
+
+ /* These channels are mandatory, they are used as reference points */
+ if (!vadc_get_channel(vadc, VADC_REF_1250MV)) {
+ dev_err(vadc->dev, "Please define 1.25V channel\n");
+ return -ENODEV;
+ }
+
+ if (!vadc_get_channel(vadc, VADC_REF_625MV)) {
+ dev_err(vadc->dev, "Please define 0.625V channel\n");
+ return -ENODEV;
+ }
+
+ if (!vadc_get_channel(vadc, VADC_VDD_VADC)) {
+ dev_err(vadc->dev, "Please define VDD channel\n");
+ return -ENODEV;
+ }
+
+ if (!vadc_get_channel(vadc, VADC_GND_REF)) {
+ dev_err(vadc->dev, "Please define GND channel\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static irqreturn_t vadc_isr(int irq, void *dev_id)
+{
+ struct vadc_priv *vadc = dev_id;
+
+ complete(&vadc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int vadc_check_revision(struct vadc_priv *vadc)
+{
+ u8 val;
+ int ret;
+
+ ret = vadc_read(vadc, VADC_PERPH_TYPE, &val);
+ if (ret)
+ return ret;
+
+ if (val < VADC_PERPH_TYPE_ADC) {
+ dev_err(vadc->dev, "%d is not ADC\n", val);
+ return -ENODEV;
+ }
+
+ ret = vadc_read(vadc, VADC_PERPH_SUBTYPE, &val);
+ if (ret)
+ return ret;
+
+ if (val < VADC_PERPH_SUBTYPE_VADC) {
+ dev_err(vadc->dev, "%d is not VADC\n", val);
+ return -ENODEV;
+ }
+
+ ret = vadc_read(vadc, VADC_REVISION2, &val);
+ if (ret)
+ return ret;
+
+ if (val < VADC_REVISION2_SUPPORTED_VADC) {
+ dev_err(vadc->dev, "revision %d not supported\n", val);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int vadc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct vadc_priv *vadc;
+ struct regmap *regmap;
+ int ret, irq_eoc;
+ u32 reg;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(node, "reg", &reg);
+ if (ret < 0)
+ return ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*vadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ vadc = iio_priv(indio_dev);
+ vadc->regmap = regmap;
+ vadc->dev = dev;
+ vadc->base = reg;
+ vadc->are_ref_measured = false;
+ init_completion(&vadc->complete);
+ mutex_init(&vadc->lock);
+
+ ret = vadc_check_revision(vadc);
+ if (ret)
+ return ret;
+
+ ret = vadc_get_dt_data(vadc, node);
+ if (ret)
+ return ret;
+
+ irq_eoc = platform_get_irq(pdev, 0);
+ if (irq_eoc < 0) {
+ if (irq_eoc == -EPROBE_DEFER || irq_eoc == -EINVAL)
+ return irq_eoc;
+ vadc->poll_eoc = true;
+ } else {
+ ret = devm_request_irq(dev, irq_eoc, vadc_isr, 0,
+ "spmi-vadc", vadc);
+ if (ret)
+ return ret;
+ }
+
+ ret = vadc_reset(vadc);
+ if (ret) {
+ dev_err(dev, "reset failed\n");
+ return ret;
+ }
+
+ ret = vadc_measure_ref_points(vadc);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = node;
+ indio_dev->name = pdev->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &vadc_info;
+ indio_dev->channels = vadc->iio_chans;
+ indio_dev->num_channels = vadc->nchannels;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id vadc_match_table[] = {
+ { .compatible = "qcom,spmi-vadc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, vadc_match_table);
+
+static struct platform_driver vadc_driver = {
+ .driver = {
+ .name = "qcom-spmi-vadc",
+ .of_match_table = vadc_match_table,
+ },
+ .probe = vadc_probe,
+};
+module_platform_driver(vadc_driver);
+
+MODULE_ALIAS("platform:qcom-spmi-vadc");
+MODULE_DESCRIPTION("Qualcomm SPMI PMIC voltage ADC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Stanimir Varbanov <svarbanov@mm-sol.com>");
+MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index b730864731e8..2e5cc4409f78 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -86,19 +86,18 @@ static void tiadc_step_config(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
unsigned int stepconfig;
- int i, steps;
+ int i, steps = 0;
/*
* There are 16 configurable steps and 8 analog input
* lines available which are shared between Touchscreen and ADC.
*
- * Steps backwards i.e. from 16 towards 0 are used by ADC
+ * Steps forwards i.e. from 0 towards 16 are used by ADC
* depending on number of input lines needed.
* Channel would represent which analog input
* needs to be given to ADC to digitalize data.
*/
- steps = TOTAL_STEPS - adc_dev->channels;
if (iio_buffer_enabled(indio_dev))
stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1
| STEPCONFIG_MODE_SWCNT;
@@ -250,7 +249,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
struct iio_buffer *buffer;
int ret;
- buffer = iio_kfifo_allocate(indio_dev);
+ buffer = iio_kfifo_allocate();
if (!buffer)
return -ENOMEM;
@@ -264,16 +263,8 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
indio_dev->setup_ops = setup_ops;
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
- ret = iio_buffer_register(indio_dev,
- indio_dev->channels,
- indio_dev->num_channels);
- if (ret)
- goto error_free_irq;
-
return 0;
-error_free_irq:
- free_irq(irq, indio_dev);
error_kfifo_free:
iio_kfifo_free(indio_dev->buffer);
return ret;
@@ -285,7 +276,6 @@ static void tiadc_iio_buffered_hardware_remove(struct iio_dev *indio_dev)
free_irq(adc_dev->mfd_tscadc->irq, indio_dev);
iio_kfifo_free(indio_dev->buffer);
- iio_buffer_unregister(indio_dev);
}
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index ba6f6a91dfff..c0d364ebaea8 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -31,7 +31,7 @@ struct ad8366_state {
};
static int ad8366_write(struct iio_dev *indio_dev,
- unsigned char ch_a, char unsigned ch_b)
+ unsigned char ch_a, unsigned char ch_b)
{
struct ad8366_state *st = iio_priv(indio_dev);
int ret;
@@ -166,7 +166,7 @@ static int ad8366_probe(struct spi_device *spi)
if (ret)
goto error_disable_reg;
- ad8366_write(indio_dev, 0 , 0);
+ ad8366_write(indio_dev, 0, 0);
return 0;
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index 0b6e97d18fa0..790f106d719c 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -3,4 +3,5 @@
#
source "drivers/iio/common/hid-sensors/Kconfig"
+source "drivers/iio/common/ssp_sensors/Kconfig"
source "drivers/iio/common/st_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 3112df0060e9..b1e4d9c9591c 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -8,4 +8,5 @@
# When adding new entries keep the list in alphabetical order
obj-y += hid-sensors/
+obj-y += ssp_sensors/
obj-y += st_sensors/
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 92068cdbf8c7..2f1d535b94c4 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -22,16 +22,18 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/iio/sysfs.h>
#include "hid-sensor-trigger.h"
-int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
{
int state_val;
int report_val;
+ s32 poll_value = 0;
if (state) {
if (sensor_hub_device_open(st->hsdev))
@@ -47,6 +49,8 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
st->report_state.report_id,
st->report_state.index,
HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
+
+ poll_value = hid_sensor_read_poll_value(st);
} else {
if (!atomic_dec_and_test(&st->data_ready))
return 0;
@@ -78,10 +82,36 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
&state_val);
+ if (state && poll_value)
+ msleep_interruptible(poll_value * 2);
+
return 0;
}
EXPORT_SYMBOL(hid_sensor_power_state);
+int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+{
+#ifdef CONFIG_PM
+ int ret;
+
+ if (state)
+ ret = pm_runtime_get_sync(&st->pdev->dev);
+ else {
+ pm_runtime_mark_last_busy(&st->pdev->dev);
+ ret = pm_runtime_put_autosuspend(&st->pdev->dev);
+ }
+ if (ret < 0) {
+ if (state)
+ pm_runtime_put_noidle(&st->pdev->dev);
+ return ret;
+ }
+
+ return 0;
+#else
+ return _hid_sensor_power_state(st, state);
+#endif
+}
+
static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
@@ -125,8 +155,21 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
attrb->trigger = trig;
indio_dev->trig = iio_trigger_get(trig);
- return ret;
+ ret = pm_runtime_set_active(&indio_dev->dev);
+ if (ret)
+ goto error_unreg_trigger;
+ iio_device_set_drvdata(indio_dev, attrb);
+ pm_suspend_ignore_children(&attrb->pdev->dev, true);
+ pm_runtime_enable(&attrb->pdev->dev);
+ /* Default to 3 seconds, but can be changed from sysfs */
+ pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
+ 3000);
+ pm_runtime_use_autosuspend(&attrb->pdev->dev);
+
+ return ret;
+error_unreg_trigger:
+ iio_trigger_unregister(trig);
error_free_trig:
iio_trigger_free(trig);
error_ret:
@@ -134,6 +177,34 @@ error_ret:
}
EXPORT_SYMBOL(hid_sensor_setup_trigger);
+#ifdef CONFIG_PM
+static int hid_sensor_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
+
+ return _hid_sensor_power_state(attrb, false);
+}
+
+static int hid_sensor_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
+
+ return _hid_sensor_power_state(attrb, true);
+}
+
+#endif
+
+const struct dev_pm_ops hid_sensor_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hid_sensor_suspend, hid_sensor_resume)
+ SET_RUNTIME_PM_OPS(hid_sensor_suspend,
+ hid_sensor_resume, NULL)
+};
+EXPORT_SYMBOL(hid_sensor_pm_ops);
+
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
MODULE_DESCRIPTION("HID Sensor trigger processing");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index 0f8e78c249d3..9f4713f42ecb 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -19,6 +19,11 @@
#ifndef _HID_SENSOR_TRIGGER_H
#define _HID_SENSOR_TRIGGER_H
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+extern const struct dev_pm_ops hid_sensor_pm_ops;
+
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
struct hid_sensor_common *attrb);
void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
diff --git a/drivers/iio/common/ssp_sensors/Kconfig b/drivers/iio/common/ssp_sensors/Kconfig
new file mode 100644
index 000000000000..0ea4faf016d8
--- /dev/null
+++ b/drivers/iio/common/ssp_sensors/Kconfig
@@ -0,0 +1,26 @@
+#
+# SSP sensor drivers and commons configuration
+#
+menu "SSP Sensor Common"
+
+config IIO_SSP_SENSORS_COMMONS
+ tristate "Commons for all SSP Sensor IIO drivers"
+ depends on IIO_SSP_SENSORHUB
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say yes here to build commons for SSP sensors.
+ To compile this as a module, choose M here: the module
+ will be called ssp_iio.
+
+config IIO_SSP_SENSORHUB
+ tristate "Samsung Sensorhub driver"
+ depends on SPI
+ select MFD_CORE
+ help
+ SSP driver for sensorhub.
+ If you say yes here you get ssp support for sensorhub.
+ To compile this driver as a module, choose M here: the
+ module will be called sensorhub.
+
+endmenu
diff --git a/drivers/iio/common/ssp_sensors/Makefile b/drivers/iio/common/ssp_sensors/Makefile
new file mode 100644
index 000000000000..1e0389eb0905
--- /dev/null
+++ b/drivers/iio/common/ssp_sensors/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for SSP sensor drivers and commons.
+#
+
+sensorhub-objs := ssp_dev.o ssp_spi.o
+obj-$(CONFIG_IIO_SSP_SENSORHUB) += sensorhub.o
+
+obj-$(CONFIG_IIO_SSP_SENSORS_COMMONS) += ssp_iio.o
diff --git a/drivers/iio/common/ssp_sensors/ssp.h b/drivers/iio/common/ssp_sensors/ssp.h
new file mode 100644
index 000000000000..b910e91d7c0d
--- /dev/null
+++ b/drivers/iio/common/ssp_sensors/ssp.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SSP_SENSORHUB_H__
+#define __SSP_SENSORHUB_H__
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/iio/common/ssp_sensors.h>
+#include <linux/iio/iio.h>
+#include <linux/spi/spi.h>
+
+#define SSP_DEVICE_ID 0x55
+
+#ifdef SSP_DBG
+#define ssp_dbg(format, ...) pr_info("[SSP] "format, ##__VA_ARGS__)
+#else
+#define ssp_dbg(format, ...)
+#endif
+
+#define SSP_SW_RESET_TIME 3000
+/* Sensor polling in ms */
+#define SSP_DEFAULT_POLLING_DELAY 200
+#define SSP_DEFAULT_RETRIES 3
+#define SSP_DATA_PACKET_SIZE 960
+#define SSP_HEADER_BUFFER_SIZE 4
+
+enum {
+ SSP_KERNEL_BINARY = 0,
+ SSP_KERNEL_CRASHED_BINARY,
+};
+
+enum {
+ SSP_INITIALIZATION_STATE = 0,
+ SSP_NO_SENSOR_STATE,
+ SSP_ADD_SENSOR_STATE,
+ SSP_RUNNING_SENSOR_STATE,
+};
+
+/* Firmware download STATE */
+enum {
+ SSP_FW_DL_STATE_FAIL = -1,
+ SSP_FW_DL_STATE_NONE = 0,
+ SSP_FW_DL_STATE_NEED_TO_SCHEDULE,
+ SSP_FW_DL_STATE_SCHEDULED,
+ SSP_FW_DL_STATE_DOWNLOADING,
+ SSP_FW_DL_STATE_SYNC,
+ SSP_FW_DL_STATE_DONE,
+};
+
+#define SSP_INVALID_REVISION 99999
+#define SSP_INVALID_REVISION2 0xffffff
+
+/* AP -> SSP Instruction */
+#define SSP_MSG2SSP_INST_BYPASS_SENSOR_ADD 0xa1
+#define SSP_MSG2SSP_INST_BYPASS_SENSOR_RM 0xa2
+#define SSP_MSG2SSP_INST_REMOVE_ALL 0xa3
+#define SSP_MSG2SSP_INST_CHANGE_DELAY 0xa4
+#define SSP_MSG2SSP_INST_LIBRARY_ADD 0xb1
+#define SSP_MSG2SSP_INST_LIBRARY_REMOVE 0xb2
+#define SSP_MSG2SSP_INST_LIB_NOTI 0xb4
+#define SSP_MSG2SSP_INST_LIB_DATA 0xc1
+
+#define SSP_MSG2SSP_AP_MCU_SET_GYRO_CAL 0xcd
+#define SSP_MSG2SSP_AP_MCU_SET_ACCEL_CAL 0xce
+#define SSP_MSG2SSP_AP_STATUS_SHUTDOWN 0xd0
+#define SSP_MSG2SSP_AP_STATUS_WAKEUP 0xd1
+#define SSP_MSG2SSP_AP_STATUS_SLEEP 0xd2
+#define SSP_MSG2SSP_AP_STATUS_RESUME 0xd3
+#define SSP_MSG2SSP_AP_STATUS_SUSPEND 0xd4
+#define SSP_MSG2SSP_AP_STATUS_RESET 0xd5
+#define SSP_MSG2SSP_AP_STATUS_POW_CONNECTED 0xd6
+#define SSP_MSG2SSP_AP_STATUS_POW_DISCONNECTED 0xd7
+#define SSP_MSG2SSP_AP_TEMPHUMIDITY_CAL_DONE 0xda
+#define SSP_MSG2SSP_AP_MCU_SET_DUMPMODE 0xdb
+#define SSP_MSG2SSP_AP_MCU_DUMP_CHECK 0xdc
+#define SSP_MSG2SSP_AP_MCU_BATCH_FLUSH 0xdd
+#define SSP_MSG2SSP_AP_MCU_BATCH_COUNT 0xdf
+
+#define SSP_MSG2SSP_AP_WHOAMI 0x0f
+#define SSP_MSG2SSP_AP_FIRMWARE_REV 0xf0
+#define SSP_MSG2SSP_AP_SENSOR_FORMATION 0xf1
+#define SSP_MSG2SSP_AP_SENSOR_PROXTHRESHOLD 0xf2
+#define SSP_MSG2SSP_AP_SENSOR_BARCODE_EMUL 0xf3
+#define SSP_MSG2SSP_AP_SENSOR_SCANNING 0xf4
+#define SSP_MSG2SSP_AP_SET_MAGNETIC_HWOFFSET 0xf5
+#define SSP_MSG2SSP_AP_GET_MAGNETIC_HWOFFSET 0xf6
+#define SSP_MSG2SSP_AP_SENSOR_GESTURE_CURRENT 0xf7
+#define SSP_MSG2SSP_AP_GET_THERM 0xf8
+#define SSP_MSG2SSP_AP_GET_BIG_DATA 0xf9
+#define SSP_MSG2SSP_AP_SET_BIG_DATA 0xfa
+#define SSP_MSG2SSP_AP_START_BIG_DATA 0xfb
+#define SSP_MSG2SSP_AP_SET_MAGNETIC_STATIC_MATRIX 0xfd
+#define SSP_MSG2SSP_AP_SENSOR_TILT 0xea
+#define SSP_MSG2SSP_AP_MCU_SET_TIME 0xfe
+#define SSP_MSG2SSP_AP_MCU_GET_TIME 0xff
+
+#define SSP_MSG2SSP_AP_FUSEROM 0x01
+
+/* voice data */
+#define SSP_TYPE_WAKE_UP_VOICE_SERVICE 0x01
+#define SSP_TYPE_WAKE_UP_VOICE_SOUND_SOURCE_AM 0x01
+#define SSP_TYPE_WAKE_UP_VOICE_SOUND_SOURCE_GRAMMER 0x02
+
+/* Factory Test */
+#define SSP_ACCELEROMETER_FACTORY 0x80
+#define SSP_GYROSCOPE_FACTORY 0x81
+#define SSP_GEOMAGNETIC_FACTORY 0x82
+#define SSP_PRESSURE_FACTORY 0x85
+#define SSP_GESTURE_FACTORY 0x86
+#define SSP_TEMPHUMIDITY_CRC_FACTORY 0x88
+#define SSP_GYROSCOPE_TEMP_FACTORY 0x8a
+#define SSP_GYROSCOPE_DPS_FACTORY 0x8b
+#define SSP_MCU_FACTORY 0x8c
+#define SSP_MCU_SLEEP_FACTORY 0x8d
+
+/* SSP -> AP ACK about write CMD */
+#define SSP_MSG_ACK 0x80 /* ACK from SSP to AP */
+#define SSP_MSG_NAK 0x70 /* NAK from SSP to AP */
+
+struct ssp_sensorhub_info {
+ char *fw_name;
+ char *fw_crashed_name;
+ unsigned int fw_rev;
+ const u8 * const mag_table;
+ const unsigned int mag_length;
+};
+
+/* ssp_msg options bit */
+#define SSP_RW 0
+#define SSP_INDEX 3
+
+#define SSP_AP2HUB_READ 0
+#define SSP_AP2HUB_WRITE 1
+#define SSP_HUB2AP_WRITE 2
+#define SSP_AP2HUB_READY 3
+#define SSP_AP2HUB_RETURN 4
+
+/**
+ * struct ssp_data - ssp platformdata structure
+ * @spi: spi device
+ * @sensorhub_info: info about sensorhub board specific features
+ * @wdt_timer: watchdog timer
+ * @work_wdt: watchdog work
+ * @work_firmware: firmware upgrade work queue
+ * @work_refresh: refresh work queue for reset request from MCU
+ * @shut_down: shut down flag
+ * @mcu_dump_mode: mcu dump mode for debug
+ * @time_syncing: time syncing indication flag
+ * @timestamp: previous time in ns calculated for time syncing
+ * @check_status: status table for each sensor
+ * @com_fail_cnt: communication fail count
+ * @reset_cnt: reset count
+ * @timeout_cnt: timeout count
+ * @available_sensors: available sensors seen by sensorhub (bit array)
+ * @cur_firm_rev: cached current firmware revision
+ * @last_resume_state: last AP resume/suspend state used to handle the PM
+ * state of ssp
+ * @last_ap_state: (obsolete) sleep notification for MCU
+ * @sensor_enable: sensor enable mask
+ * @delay_buf: data acquisition intervals table
+ * @batch_latency_buf: yet unknown but existing in communication protocol
+ * @batch_opt_buf: yet unknown but existing in communication protocol
+ * @accel_position: yet unknown but existing in communication protocol
+ * @mag_position: yet unknown but existing in communication protocol
+ * @fw_dl_state: firmware download state
+ * @comm_lock: lock protecting the handshake
+ * @pending_lock: lock protecting pending list and completion
+ * @mcu_reset_gpio: mcu reset line
+ * @ap_mcu_gpio: ap to mcu gpio line
+ * @mcu_ap_gpio: mcu to ap gpio line
+ * @pending_list: pending list for messages queued to be sent/read
+ * @sensor_devs: registered IIO devices table
+ * @enable_refcount: enable reference count for wdt (watchdog timer)
+ * @header_buffer: cache aligned buffer for packet header
+ */
+struct ssp_data {
+ struct spi_device *spi;
+ struct ssp_sensorhub_info *sensorhub_info;
+ struct timer_list wdt_timer;
+ struct work_struct work_wdt;
+ struct delayed_work work_refresh;
+
+ bool shut_down;
+ bool mcu_dump_mode;
+ bool time_syncing;
+ int64_t timestamp;
+
+ int check_status[SSP_SENSOR_MAX];
+
+ unsigned int com_fail_cnt;
+ unsigned int reset_cnt;
+ unsigned int timeout_cnt;
+
+ unsigned int available_sensors;
+ unsigned int cur_firm_rev;
+
+ char last_resume_state;
+ char last_ap_state;
+
+ unsigned int sensor_enable;
+ u32 delay_buf[SSP_SENSOR_MAX];
+ s32 batch_latency_buf[SSP_SENSOR_MAX];
+ s8 batch_opt_buf[SSP_SENSOR_MAX];
+
+ int accel_position;
+ int mag_position;
+ int fw_dl_state;
+
+ struct mutex comm_lock;
+ struct mutex pending_lock;
+
+ int mcu_reset_gpio;
+ int ap_mcu_gpio;
+ int mcu_ap_gpio;
+
+ struct list_head pending_list;
+
+ struct iio_dev *sensor_devs[SSP_SENSOR_MAX];
+ atomic_t enable_refcount;
+
+ __le16 header_buffer[SSP_HEADER_BUFFER_SIZE / sizeof(__le16)]
+ ____cacheline_aligned;
+};
+
+void ssp_clean_pending_list(struct ssp_data *data);
+
+int ssp_command(struct ssp_data *data, char command, int arg);
+
+int ssp_send_instruction(struct ssp_data *data, u8 inst, u8 sensor_type,
+ u8 *send_buf, u8 length);
+
+int ssp_irq_msg(struct ssp_data *data);
+
+int ssp_get_chipid(struct ssp_data *data);
+
+int ssp_set_magnetic_matrix(struct ssp_data *data);
+
+unsigned int ssp_get_sensor_scanning_info(struct ssp_data *data);
+
+unsigned int ssp_get_firmware_rev(struct ssp_data *data);
+
+int ssp_queue_ssp_refresh_task(struct ssp_data *data, unsigned int delay);
+
+#endif /* __SSP_SENSORHUB_H__ */
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
new file mode 100644
index 000000000000..52d70435f5a1
--- /dev/null
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -0,0 +1,712 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include "ssp.h"
+
+#define SSP_WDT_TIME 10000
+#define SSP_LIMIT_RESET_CNT 20
+#define SSP_LIMIT_TIMEOUT_CNT 3
+
+/* It is possible that it is max clk rate for version 1.0 of bootcode */
+#define SSP_BOOT_SPI_HZ 400000
+
+/*
+ * These fields can look enigmatic but this structure is used mainly to flat
+ * some values and depends on command type.
+ */
+struct ssp_instruction {
+ __le32 a;
+ __le32 b;
+ u8 c;
+} __attribute__((__packed__));
+
+static const u8 ssp_magnitude_table[] = {110, 85, 171, 71, 203, 195, 0, 67,
+ 208, 56, 175, 244, 206, 213, 0, 92, 250, 0, 55, 48, 189, 252, 171,
+ 243, 13, 45, 250};
+
+static const struct ssp_sensorhub_info ssp_rinato_info = {
+ .fw_name = "ssp_B2.fw",
+ .fw_crashed_name = "ssp_crashed.fw",
+ .fw_rev = 14052300,
+ .mag_table = ssp_magnitude_table,
+ .mag_length = ARRAY_SIZE(ssp_magnitude_table),
+};
+
+static const struct ssp_sensorhub_info ssp_thermostat_info = {
+ .fw_name = "thermostat_B2.fw",
+ .fw_crashed_name = "ssp_crashed.fw",
+ .fw_rev = 14080600,
+ .mag_table = ssp_magnitude_table,
+ .mag_length = ARRAY_SIZE(ssp_magnitude_table),
+};
+
+static const struct mfd_cell sensorhub_sensor_devs[] = {
+ {
+ .name = "ssp-accelerometer",
+ },
+ {
+ .name = "ssp-gyroscope",
+ },
+};
+
+static void ssp_toggle_mcu_reset_gpio(struct ssp_data *data)
+{
+ gpio_set_value(data->mcu_reset_gpio, 0);
+ usleep_range(1000, 1200);
+ gpio_set_value(data->mcu_reset_gpio, 1);
+ msleep(50);
+}
+
+static void ssp_sync_available_sensors(struct ssp_data *data)
+{
+ int i, ret;
+
+ for (i = 0; i < SSP_SENSOR_MAX; ++i) {
+ if (data->available_sensors & BIT(i)) {
+ ret = ssp_enable_sensor(data, i, data->delay_buf[i]);
+ if (ret < 0) {
+ dev_err(&data->spi->dev,
+ "Sync sensor nr: %d fail\n", i);
+ continue;
+ }
+ }
+ }
+
+ ret = ssp_command(data, SSP_MSG2SSP_AP_MCU_SET_DUMPMODE,
+ data->mcu_dump_mode);
+ if (ret < 0)
+ dev_err(&data->spi->dev,
+ "SSP_MSG2SSP_AP_MCU_SET_DUMPMODE failed\n");
+}
+
+static void ssp_enable_mcu(struct ssp_data *data, bool enable)
+{
+ dev_info(&data->spi->dev, "current shutdown = %d, old = %d\n", enable,
+ data->shut_down);
+
+ if (enable && data->shut_down) {
+ data->shut_down = false;
+ enable_irq(data->spi->irq);
+ enable_irq_wake(data->spi->irq);
+ } else if (!enable && !data->shut_down) {
+ data->shut_down = true;
+ disable_irq(data->spi->irq);
+ disable_irq_wake(data->spi->irq);
+ } else {
+ dev_warn(&data->spi->dev, "current shutdown = %d, old = %d\n",
+ enable, data->shut_down);
+ }
+}
+
+/*
+ * This function is the first one which communicates with the mcu so it is
+ * possible that the first attempt will fail
+ */
+static int ssp_check_fwbl(struct ssp_data *data)
+{
+ int retries = 0;
+
+ while (retries++ < 5) {
+ data->cur_firm_rev = ssp_get_firmware_rev(data);
+ if (data->cur_firm_rev == SSP_INVALID_REVISION ||
+ data->cur_firm_rev == SSP_INVALID_REVISION2) {
+ dev_warn(&data->spi->dev,
+ "Invalid revision, trying %d time\n", retries);
+ } else {
+ break;
+ }
+ }
+
+ if (data->cur_firm_rev == SSP_INVALID_REVISION ||
+ data->cur_firm_rev == SSP_INVALID_REVISION2) {
+ dev_err(&data->spi->dev, "SSP_INVALID_REVISION\n");
+ return SSP_FW_DL_STATE_NEED_TO_SCHEDULE;
+ }
+
+ dev_info(&data->spi->dev,
+ "MCU Firm Rev : Old = %8u, New = %8u\n",
+ data->cur_firm_rev,
+ data->sensorhub_info->fw_rev);
+
+ if (data->cur_firm_rev != data->sensorhub_info->fw_rev)
+ return SSP_FW_DL_STATE_NEED_TO_SCHEDULE;
+
+ return SSP_FW_DL_STATE_NONE;
+}
+
+static void ssp_reset_mcu(struct ssp_data *data)
+{
+ ssp_enable_mcu(data, false);
+ ssp_clean_pending_list(data);
+ ssp_toggle_mcu_reset_gpio(data);
+ ssp_enable_mcu(data, true);
+}
+
+static void ssp_wdt_work_func(struct work_struct *work)
+{
+ struct ssp_data *data = container_of(work, struct ssp_data, work_wdt);
+
+ dev_err(&data->spi->dev, "%s - Sensor state: 0x%x, RC: %u, CC: %u\n",
+ __func__, data->available_sensors, data->reset_cnt,
+ data->com_fail_cnt);
+
+ ssp_reset_mcu(data);
+ data->com_fail_cnt = 0;
+ data->timeout_cnt = 0;
+}
+
+static void ssp_wdt_timer_func(unsigned long ptr)
+{
+ struct ssp_data *data = (struct ssp_data *)ptr;
+
+ switch (data->fw_dl_state) {
+ case SSP_FW_DL_STATE_FAIL:
+ case SSP_FW_DL_STATE_DOWNLOADING:
+ case SSP_FW_DL_STATE_SYNC:
+ goto _mod;
+ }
+
+ if (data->timeout_cnt > SSP_LIMIT_TIMEOUT_CNT ||
+ data->com_fail_cnt > SSP_LIMIT_RESET_CNT)
+ queue_work(system_power_efficient_wq, &data->work_wdt);
+_mod:
+ mod_timer(&data->wdt_timer, jiffies + msecs_to_jiffies(SSP_WDT_TIME));
+}
+
+static void ssp_enable_wdt_timer(struct ssp_data *data)
+{
+ mod_timer(&data->wdt_timer, jiffies + msecs_to_jiffies(SSP_WDT_TIME));
+}
+
+static void ssp_disable_wdt_timer(struct ssp_data *data)
+{
+ del_timer_sync(&data->wdt_timer);
+ cancel_work_sync(&data->work_wdt);
+}
+
+/**
+ * ssp_get_sensor_delay() - gets sensor data acquisition period
+ * @data: sensorhub structure
+ * @type: SSP sensor type
+ *
+ * Returns acquisition period in ms
+ */
+u32 ssp_get_sensor_delay(struct ssp_data *data, enum ssp_sensor_type type)
+{
+ return data->delay_buf[type];
+}
+EXPORT_SYMBOL(ssp_get_sensor_delay);
+
+/**
+ * ssp_enable_sensor() - enables data acquisition for sensor
+ * @data: sensorhub structure
+ * @type: SSP sensor type
+ * @delay: delay in ms
+ *
+ * Returns 0 or negative value in case of error
+ */
+int ssp_enable_sensor(struct ssp_data *data, enum ssp_sensor_type type,
+ u32 delay)
+{
+ int ret;
+ struct ssp_instruction to_send;
+
+ to_send.a = cpu_to_le32(delay);
+ to_send.b = cpu_to_le32(data->batch_latency_buf[type]);
+ to_send.c = data->batch_opt_buf[type];
+
+ switch (data->check_status[type]) {
+ case SSP_INITIALIZATION_STATE:
+ /* do calibration step, now just enable */
+ case SSP_ADD_SENSOR_STATE:
+ ret = ssp_send_instruction(data,
+ SSP_MSG2SSP_INST_BYPASS_SENSOR_ADD,
+ type,
+ (u8 *)&to_send, sizeof(to_send));
+ if (ret < 0) {
+ dev_err(&data->spi->dev, "Enabling sensor failed\n");
+ data->check_status[type] = SSP_NO_SENSOR_STATE;
+ goto derror;
+ }
+
+ data->sensor_enable |= BIT(type);
+ data->check_status[type] = SSP_RUNNING_SENSOR_STATE;
+ break;
+ case SSP_RUNNING_SENSOR_STATE:
+ ret = ssp_send_instruction(data,
+ SSP_MSG2SSP_INST_CHANGE_DELAY, type,
+ (u8 *)&to_send, sizeof(to_send));
+ if (ret < 0) {
+ dev_err(&data->spi->dev,
+ "Changing sensor delay failed\n");
+ goto derror;
+ }
+ break;
+ default:
+ data->check_status[type] = SSP_ADD_SENSOR_STATE;
+ break;
+ }
+
+ data->delay_buf[type] = delay;
+
+ if (atomic_inc_return(&data->enable_refcount) == 1)
+ ssp_enable_wdt_timer(data);
+
+ return 0;
+
+derror:
+ return ret;
+}
+EXPORT_SYMBOL(ssp_enable_sensor);
+
+/**
+ * ssp_change_delay() - changes data acquisition for sensor
+ * @data: sensorhub structure
+ * @type: SSP sensor type
+ * @delay: delay in ms
+ *
+ * Returns 0 or negative value in case of error
+ */
+int ssp_change_delay(struct ssp_data *data, enum ssp_sensor_type type,
+ u32 delay)
+{
+ int ret;
+ struct ssp_instruction to_send;
+
+ to_send.a = cpu_to_le32(delay);
+ to_send.b = cpu_to_le32(data->batch_latency_buf[type]);
+ to_send.c = data->batch_opt_buf[type];
+
+ ret = ssp_send_instruction(data, SSP_MSG2SSP_INST_CHANGE_DELAY, type,
+ (u8 *)&to_send, sizeof(to_send));
+ if (ret < 0) {
+ dev_err(&data->spi->dev, "Changing sensor delay failed\n");
+ return ret;
+ }
+
+ data->delay_buf[type] = delay;
+
+ return 0;
+}
+EXPORT_SYMBOL(ssp_change_delay);
+
+/**
+ * ssp_disable_sensor() - disables sensor
+ *
+ * @data: sensorhub structure
+ * @type: SSP sensor type
+ *
+ * Returns 0 or negative value in case of error
+ */
+int ssp_disable_sensor(struct ssp_data *data, enum ssp_sensor_type type)
+{
+ int ret;
+ __le32 command;
+
+ if (data->sensor_enable & BIT(type)) {
+ command = cpu_to_le32(data->delay_buf[type]);
+
+ ret = ssp_send_instruction(data,
+ SSP_MSG2SSP_INST_BYPASS_SENSOR_RM,
+ type, (u8 *)&command,
+ sizeof(command));
+ if (ret < 0) {
+ dev_err(&data->spi->dev, "Remove sensor fail\n");
+ return ret;
+ }
+
+ data->sensor_enable &= ~BIT(type);
+ }
+
+ data->check_status[type] = SSP_ADD_SENSOR_STATE;
+
+ if (atomic_dec_and_test(&data->enable_refcount))
+ ssp_disable_wdt_timer(data);
+
+ return 0;
+}
+EXPORT_SYMBOL(ssp_disable_sensor);
+
+static irqreturn_t ssp_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ssp_data *data = dev_id;
+
+ /*
+ * This wrapper is done to preserve error path for ssp_irq_msg, also
+ * it is defined in different file.
+ */
+ ssp_irq_msg(data);
+
+ return IRQ_HANDLED;
+}
+
+static int ssp_initialize_mcu(struct ssp_data *data)
+{
+ int ret;
+
+ ssp_clean_pending_list(data);
+
+ ret = ssp_get_chipid(data);
+ if (ret != SSP_DEVICE_ID) {
+ dev_err(&data->spi->dev, "%s - MCU %s ret = %d\n", __func__,
+ ret < 0 ? "is not working" : "identification failed",
+ ret);
+ return ret < 0 ? ret : -ENODEV;
+ }
+
+ dev_info(&data->spi->dev, "MCU device ID = %d\n", ret);
+
+ /*
+ * needs clarification, for now do not want to export all transfer
+ * methods to sensors' drivers
+ */
+ ret = ssp_set_magnetic_matrix(data);
+ if (ret < 0) {
+ dev_err(&data->spi->dev,
+ "%s - ssp_set_magnetic_matrix failed\n", __func__);
+ return ret;
+ }
+
+ data->available_sensors = ssp_get_sensor_scanning_info(data);
+ if (data->available_sensors == 0) {
+ dev_err(&data->spi->dev,
+ "%s - ssp_get_sensor_scanning_info failed\n", __func__);
+ return -EIO;
+ }
+
+ data->cur_firm_rev = ssp_get_firmware_rev(data);
+ dev_info(&data->spi->dev, "MCU Firm Rev : New = %8u\n",
+ data->cur_firm_rev);
+
+ return ssp_command(data, SSP_MSG2SSP_AP_MCU_DUMP_CHECK, 0);
+}
+
+/*
+ * sensorhub can request its reinitialization as some brutal and rare error
+ * handling. It can be requested from the MCU.
+ */
+static void ssp_refresh_task(struct work_struct *work)
+{
+ struct ssp_data *data = container_of((struct delayed_work *)work,
+ struct ssp_data, work_refresh);
+
+ dev_info(&data->spi->dev, "refreshing\n");
+
+ data->reset_cnt++;
+
+ if (ssp_initialize_mcu(data) >= 0) {
+ ssp_sync_available_sensors(data);
+ if (data->last_ap_state != 0)
+ ssp_command(data, data->last_ap_state, 0);
+
+ if (data->last_resume_state != 0)
+ ssp_command(data, data->last_resume_state, 0);
+
+ data->timeout_cnt = 0;
+ data->com_fail_cnt = 0;
+ }
+}
+
+int ssp_queue_ssp_refresh_task(struct ssp_data *data, unsigned int delay)
+{
+ cancel_delayed_work_sync(&data->work_refresh);
+
+ return queue_delayed_work(system_power_efficient_wq,
+ &data->work_refresh,
+ msecs_to_jiffies(delay));
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id ssp_of_match[] = {
+ {
+ .compatible = "samsung,sensorhub-rinato",
+ .data = &ssp_rinato_info,
+ }, {
+ .compatible = "samsung,sensorhub-thermostat",
+ .data = &ssp_thermostat_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ssp_of_match);
+
+static struct ssp_data *ssp_parse_dt(struct device *dev)
+{
+ int ret;
+ struct ssp_data *data;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->mcu_ap_gpio = of_get_named_gpio(node, "mcu-ap-gpios", 0);
+ if (data->mcu_ap_gpio < 0)
+ goto err_free_pd;
+
+ data->ap_mcu_gpio = of_get_named_gpio(node, "ap-mcu-gpios", 0);
+ if (data->ap_mcu_gpio < 0)
+ goto err_free_pd;
+
+ data->mcu_reset_gpio = of_get_named_gpio(node, "mcu-reset-gpios", 0);
+ if (data->mcu_reset_gpio < 0)
+ goto err_free_pd;
+
+ ret = devm_gpio_request_one(dev, data->ap_mcu_gpio, GPIOF_OUT_INIT_HIGH,
+ "ap-mcu-gpios");
+ if (ret)
+ goto err_free_pd;
+
+ ret = devm_gpio_request_one(dev, data->mcu_reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "mcu-reset-gpios");
+ if (ret)
+ goto err_ap_mcu;
+
+ match = of_match_node(ssp_of_match, node);
+ if (!match)
+ goto err_mcu_reset_gpio;
+
+ data->sensorhub_info = (struct ssp_sensorhub_info *)match->data;
+
+ dev_set_drvdata(dev, data);
+
+ return data;
+
+err_mcu_reset_gpio:
+ devm_gpio_free(dev, data->mcu_reset_gpio);
+err_ap_mcu:
+ devm_gpio_free(dev, data->ap_mcu_gpio);
+err_free_pd:
+ devm_kfree(dev, data);
+ return NULL;
+}
+#else
+static struct ssp_data *ssp_parse_dt(struct device *pdev)
+{
+ return NULL;
+}
+#endif
+
+/**
+ * ssp_register_consumer() - registers iio consumer in ssp framework
+ *
+ * @indio_dev: consumer iio device
+ * @type: ssp sensor type
+ */
+void ssp_register_consumer(struct iio_dev *indio_dev, enum ssp_sensor_type type)
+{
+ struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent);
+
+ data->sensor_devs[type] = indio_dev;
+}
+EXPORT_SYMBOL(ssp_register_consumer);
+
+static int ssp_probe(struct spi_device *spi)
+{
+ int ret, i;
+ struct ssp_data *data;
+
+ data = ssp_parse_dt(&spi->dev);
+ if (!data) {
+ dev_err(&spi->dev, "Failed to find platform data\n");
+ return -ENODEV;
+ }
+
+ ret = mfd_add_devices(&spi->dev, -1, sensorhub_sensor_devs,
+ ARRAY_SIZE(sensorhub_sensor_devs), NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(&spi->dev, "mfd add devices fail\n");
+ return ret;
+ }
+
+ spi->mode = SPI_MODE_1;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to setup spi\n");
+ return ret;
+ }
+
+ data->fw_dl_state = SSP_FW_DL_STATE_NONE;
+ data->spi = spi;
+ spi_set_drvdata(spi, data);
+
+ mutex_init(&data->comm_lock);
+
+ for (i = 0; i < SSP_SENSOR_MAX; ++i) {
+ data->delay_buf[i] = SSP_DEFAULT_POLLING_DELAY;
+ data->batch_latency_buf[i] = 0;
+ data->batch_opt_buf[i] = 0;
+ data->check_status[i] = SSP_INITIALIZATION_STATE;
+ }
+
+ data->delay_buf[SSP_BIO_HRM_LIB] = 100;
+
+ data->time_syncing = true;
+
+ mutex_init(&data->pending_lock);
+ INIT_LIST_HEAD(&data->pending_list);
+
+ atomic_set(&data->enable_refcount, 0);
+
+ INIT_WORK(&data->work_wdt, ssp_wdt_work_func);
+ INIT_DELAYED_WORK(&data->work_refresh, ssp_refresh_task);
+
+ setup_timer(&data->wdt_timer, ssp_wdt_timer_func, (unsigned long)data);
+
+ ret = request_threaded_irq(data->spi->irq, NULL,
+ ssp_irq_thread_fn,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "SSP_Int", data);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Irq request fail\n");
+ goto err_setup_irq;
+ }
+
+ /* Let's start with enabled one so irq balance could be ok */
+ data->shut_down = false;
+
+ /* just to avoid unbalanced irq set wake up */
+ enable_irq_wake(data->spi->irq);
+
+ data->fw_dl_state = ssp_check_fwbl(data);
+ if (data->fw_dl_state == SSP_FW_DL_STATE_NONE) {
+ ret = ssp_initialize_mcu(data);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Initialize_mcu failed\n");
+ goto err_read_reg;
+ }
+ } else {
+ dev_err(&spi->dev, "Firmware version not supported\n");
+ ret = -EPERM;
+ goto err_read_reg;
+ }
+
+ return 0;
+
+err_read_reg:
+ free_irq(data->spi->irq, data);
+err_setup_irq:
+ mutex_destroy(&data->pending_lock);
+ mutex_destroy(&data->comm_lock);
+
+ dev_err(&spi->dev, "Probe failed!\n");
+
+ return ret;
+}
+
+static int ssp_remove(struct spi_device *spi)
+{
+ struct ssp_data *data = spi_get_drvdata(spi);
+
+ if (ssp_command(data, SSP_MSG2SSP_AP_STATUS_SHUTDOWN, 0) < 0)
+ dev_err(&data->spi->dev,
+ "SSP_MSG2SSP_AP_STATUS_SHUTDOWN failed\n");
+
+ ssp_enable_mcu(data, false);
+ ssp_disable_wdt_timer(data);
+
+ ssp_clean_pending_list(data);
+
+ free_irq(data->spi->irq, data);
+
+ del_timer_sync(&data->wdt_timer);
+ cancel_work_sync(&data->work_wdt);
+
+ mutex_destroy(&data->comm_lock);
+ mutex_destroy(&data->pending_lock);
+
+ mfd_remove_devices(&spi->dev);
+
+ return 0;
+}
+
+static int ssp_suspend(struct device *dev)
+{
+ int ret;
+ struct ssp_data *data = spi_get_drvdata(to_spi_device(dev));
+
+ data->last_resume_state = SSP_MSG2SSP_AP_STATUS_SUSPEND;
+
+ if (atomic_read(&data->enable_refcount) > 0)
+ ssp_disable_wdt_timer(data);
+
+ ret = ssp_command(data, SSP_MSG2SSP_AP_STATUS_SUSPEND, 0);
+ if (ret < 0) {
+ dev_err(&data->spi->dev,
+ "%s SSP_MSG2SSP_AP_STATUS_SUSPEND failed\n", __func__);
+
+ ssp_enable_wdt_timer(data);
+ return ret;
+ }
+
+ data->time_syncing = false;
+ disable_irq(data->spi->irq);
+
+ return 0;
+}
+
+static int ssp_resume(struct device *dev)
+{
+ int ret;
+ struct ssp_data *data = spi_get_drvdata(to_spi_device(dev));
+
+ enable_irq(data->spi->irq);
+
+ if (atomic_read(&data->enable_refcount) > 0)
+ ssp_enable_wdt_timer(data);
+
+ ret = ssp_command(data, SSP_MSG2SSP_AP_STATUS_RESUME, 0);
+ if (ret < 0) {
+ dev_err(&data->spi->dev,
+ "%s SSP_MSG2SSP_AP_STATUS_RESUME failed\n", __func__);
+ ssp_disable_wdt_timer(data);
+ return ret;
+ }
+
+ /* timesyncing is set by MCU */
+ data->last_resume_state = SSP_MSG2SSP_AP_STATUS_RESUME;
+
+ return 0;
+}
+
+static const struct dev_pm_ops ssp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume)
+};
+
+static struct spi_driver ssp_driver = {
+ .probe = ssp_probe,
+ .remove = ssp_remove,
+ .driver = {
+ .pm = &ssp_pm_ops,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ssp_of_match),
+ .name = "sensorhub"
+ },
+};
+
+module_spi_driver(ssp_driver);
+
+MODULE_DESCRIPTION("ssp sensorhub driver");
+MODULE_AUTHOR("Samsung Electronics");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
new file mode 100644
index 000000000000..a3ae165f8d9f
--- /dev/null
+++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/iio/common/ssp_sensors.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "ssp_iio_sensor.h"
+
+/**
+ * ssp_common_buffer_postenable() - generic postenable callback for ssp buffer
+ *
+ * @indio_dev: iio device
+ *
+ * Returns 0 or negative value in case of error
+ */
+int ssp_common_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ssp_sensor_data *spd = iio_priv(indio_dev);
+ struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent);
+
+ /* the allocation is made in post because scan size is known in this
+ * moment
+ * */
+ spd->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL | GFP_DMA);
+ if (!spd->buffer)
+ return -ENOMEM;
+
+ return ssp_enable_sensor(data, spd->type,
+ ssp_get_sensor_delay(data, spd->type));
+}
+EXPORT_SYMBOL(ssp_common_buffer_postenable);
+
+/**
+ * ssp_common_buffer_postdisable() - generic postdisable callback for ssp buffer
+ *
+ * @indio_dev: iio device
+ *
+ * Returns 0 or negative value in case of error
+ */
+int ssp_common_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct ssp_sensor_data *spd = iio_priv(indio_dev);
+ struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent);
+
+ ret = ssp_disable_sensor(data, spd->type);
+ if (ret < 0)
+ return ret;
+
+ kfree(spd->buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL(ssp_common_buffer_postdisable);
+
+/**
+ * ssp_common_process_data() - Common process data callback for ssp sensors
+ *
+ * @indio_dev: iio device
+ * @buf: source buffer
+ * @len: sensor data length
+ * @timestamp: system timestamp
+ *
+ * Returns 0 or negative value in case of error
+ */
+int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
+ unsigned int len, int64_t timestamp)
+{
+ __le32 time;
+ int64_t calculated_time;
+ struct ssp_sensor_data *spd = iio_priv(indio_dev);
+
+ if (indio_dev->scan_bytes == 0)
+ return 0;
+
+ /*
+ * it always sends full set of samples, remember about available masks
+ */
+ memcpy(spd->buffer, buf, len);
+
+ if (indio_dev->scan_timestamp) {
+ memcpy(&time, &((char *)buf)[len], SSP_TIME_SIZE);
+ calculated_time =
+ timestamp + (int64_t)le32_to_cpu(time) * 1000000;
+ }
+
+ return iio_push_to_buffers_with_timestamp(indio_dev, spd->buffer,
+ calculated_time);
+}
+EXPORT_SYMBOL(ssp_common_process_data);
+
+MODULE_AUTHOR("Karol Wrona <k.wrona@samsung.com>");
+MODULE_DESCRIPTION("Samsung sensorhub commons");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/ssp_sensors/ssp_iio_sensor.h b/drivers/iio/common/ssp_sensors/ssp_iio_sensor.h
new file mode 100644
index 000000000000..541c6590d69c
--- /dev/null
+++ b/drivers/iio/common/ssp_sensors/ssp_iio_sensor.h
@@ -0,0 +1,71 @@
+#ifndef __SSP_IIO_SENSOR_H__
+#define __SSP_IIO_SENSOR_H__
+
+#define SSP_CHANNEL_AG(_type, _mod, _index) \
+{ \
+ .type = _type,\
+ .modified = 1,\
+ .channel2 = _mod,\
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .scan_index = _index,\
+ .scan_type = {\
+ .sign = 's',\
+ .realbits = 16,\
+ .storagebits = 16,\
+ .shift = 0,\
+ .endianness = IIO_LE,\
+ },\
+}
+
+/* It is defined here as it is a mixed timestamp */
+#define SSP_CHAN_TIMESTAMP(_si) { \
+ .type = IIO_TIMESTAMP, \
+ .channel = -1, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 64, \
+ .storagebits = 64, \
+ }, \
+}
+
+#define SSP_MS_PER_S 1000
+#define SSP_INVERTED_SCALING_FACTOR 1000000U
+
+#define SSP_FACTOR_WITH_MS \
+ (SSP_INVERTED_SCALING_FACTOR * SSP_MS_PER_S)
+
+int ssp_common_buffer_postenable(struct iio_dev *indio_dev);
+
+int ssp_common_buffer_postdisable(struct iio_dev *indio_dev);
+
+int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
+ unsigned int len, int64_t timestamp);
+
+/* Converts time in ms to frequency */
+static inline void ssp_convert_to_freq(u32 time, int *integer_part,
+ int *fractional)
+{
+ if (time == 0) {
+ *fractional = 0;
+ *integer_part = 0;
+ return;
+ }
+
+ *integer_part = SSP_FACTOR_WITH_MS / time;
+ *fractional = *integer_part % SSP_INVERTED_SCALING_FACTOR;
+ *integer_part = *integer_part / SSP_INVERTED_SCALING_FACTOR;
+}
+
+/* Converts frequency to time in ms */
+static inline int ssp_convert_to_time(int integer_part, int fractional)
+{
+ u64 value;
+
+ value = (u64)integer_part * SSP_INVERTED_SCALING_FACTOR + fractional;
+ if (value == 0)
+ return 0;
+
+ return div64_u64((u64)SSP_FACTOR_WITH_MS, value);
+}
+#endif /* __SSP_IIO_SENSOR_H__ */
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
new file mode 100644
index 000000000000..704284a475ae
--- /dev/null
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -0,0 +1,608 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "ssp.h"
+
+#define SSP_DEV (&data->spi->dev)
+#define SSP_GET_MESSAGE_TYPE(data) (data & (3 << SSP_RW))
+
+/*
+ * SSP -> AP Instruction
+ * They tell what packet type can be expected. In the future there will
+ * be less of them. BYPASS means common sensor packets with accel, gyro,
+ * hrm etc. data. LIBRARY and META are mock-up's for now.
+ */
+#define SSP_MSG2AP_INST_BYPASS_DATA 0x37
+#define SSP_MSG2AP_INST_LIBRARY_DATA 0x01
+#define SSP_MSG2AP_INST_DEBUG_DATA 0x03
+#define SSP_MSG2AP_INST_BIG_DATA 0x04
+#define SSP_MSG2AP_INST_META_DATA 0x05
+#define SSP_MSG2AP_INST_TIME_SYNC 0x06
+#define SSP_MSG2AP_INST_RESET 0x07
+
+#define SSP_UNIMPLEMENTED -1
+
+struct ssp_msg_header {
+ u8 cmd;
+ __le16 length;
+ __le16 options;
+ __le32 data;
+} __attribute__((__packed__));
+
+struct ssp_msg {
+ u16 length;
+ u16 options;
+ struct list_head list;
+ struct completion *done;
+ struct ssp_msg_header *h;
+ char *buffer;
+};
+
+static const int ssp_offset_map[SSP_SENSOR_MAX] = {
+ [SSP_ACCELEROMETER_SENSOR] = SSP_ACCELEROMETER_SIZE +
+ SSP_TIME_SIZE,
+ [SSP_GYROSCOPE_SENSOR] = SSP_GYROSCOPE_SIZE +
+ SSP_TIME_SIZE,
+ [SSP_GEOMAGNETIC_UNCALIB_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_GEOMAGNETIC_RAW] = SSP_UNIMPLEMENTED,
+ [SSP_GEOMAGNETIC_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_PRESSURE_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_GESTURE_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_PROXIMITY_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_TEMPERATURE_HUMIDITY_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_LIGHT_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_PROXIMITY_RAW] = SSP_UNIMPLEMENTED,
+ [SSP_ORIENTATION_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_STEP_DETECTOR] = SSP_UNIMPLEMENTED,
+ [SSP_SIG_MOTION_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_GYRO_UNCALIB_SENSOR] = SSP_UNIMPLEMENTED,
+ [SSP_GAME_ROTATION_VECTOR] = SSP_UNIMPLEMENTED,
+ [SSP_ROTATION_VECTOR] = SSP_UNIMPLEMENTED,
+ [SSP_STEP_COUNTER] = SSP_UNIMPLEMENTED,
+ [SSP_BIO_HRM_RAW] = SSP_BIO_HRM_RAW_SIZE +
+ SSP_TIME_SIZE,
+ [SSP_BIO_HRM_RAW_FAC] = SSP_BIO_HRM_RAW_FAC_SIZE +
+ SSP_TIME_SIZE,
+ [SSP_BIO_HRM_LIB] = SSP_BIO_HRM_LIB_SIZE +
+ SSP_TIME_SIZE,
+};
+
+#define SSP_HEADER_SIZE (sizeof(struct ssp_msg_header))
+#define SSP_HEADER_SIZE_ALIGNED (ALIGN(SSP_HEADER_SIZE, 4))
+
+static struct ssp_msg *ssp_create_msg(u8 cmd, u16 len, u16 opt, u32 data)
+{
+ struct ssp_msg_header h;
+ struct ssp_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ h.cmd = cmd;
+ h.length = cpu_to_le16(len);
+ h.options = cpu_to_le16(opt);
+ h.data = cpu_to_le32(data);
+
+ msg->buffer = kzalloc(SSP_HEADER_SIZE_ALIGNED + len,
+ GFP_KERNEL | GFP_DMA);
+ if (!msg->buffer) {
+ kfree(msg);
+ return NULL;
+ }
+
+ msg->length = len;
+ msg->options = opt;
+
+ memcpy(msg->buffer, &h, SSP_HEADER_SIZE);
+
+ return msg;
+}
+
+/*
+ * It is a bit heavy to do it this way but often the function is used to compose
+ * the message from smaller chunks which are placed on the stack. Often the
+ * chunks are small so memcpy should be optimalized.
+ */
+static inline void ssp_fill_buffer(struct ssp_msg *m, unsigned int offset,
+ const void *src, unsigned int len)
+{
+ memcpy(&m->buffer[SSP_HEADER_SIZE_ALIGNED + offset], src, len);
+}
+
+static inline void ssp_get_buffer(struct ssp_msg *m, unsigned int offset,
+ void *dest, unsigned int len)
+{
+ memcpy(dest, &m->buffer[SSP_HEADER_SIZE_ALIGNED + offset], len);
+}
+
+#define SSP_GET_BUFFER_AT_INDEX(m, index) \
+ (m->buffer[SSP_HEADER_SIZE_ALIGNED + index])
+#define SSP_SET_BUFFER_AT_INDEX(m, index, val) \
+ (m->buffer[SSP_HEADER_SIZE_ALIGNED + index] = val)
+
+static void ssp_clean_msg(struct ssp_msg *m)
+{
+ kfree(m->buffer);
+ kfree(m);
+}
+
+static int ssp_print_mcu_debug(char *data_frame, int *data_index,
+ int received_len)
+{
+ int length = data_frame[(*data_index)++];
+
+ if (length > received_len - *data_index || length <= 0) {
+ ssp_dbg("[SSP]: MSG From MCU-invalid debug length(%d/%d)\n",
+ length, received_len);
+ return length ? length : -EPROTO;
+ }
+
+ ssp_dbg("[SSP]: MSG From MCU - %s\n", &data_frame[*data_index]);
+
+ *data_index += length;
+
+ return 0;
+}
+
+/*
+ * It was designed that way - additional lines to some kind of handshake,
+ * please do not ask why - only the firmware guy can know it.
+ */
+static int ssp_check_lines(struct ssp_data *data, bool state)
+{
+ int delay_cnt = 0;
+
+ gpio_set_value_cansleep(data->ap_mcu_gpio, state);
+
+ while (gpio_get_value_cansleep(data->mcu_ap_gpio) != state) {
+ usleep_range(3000, 3500);
+
+ if (data->shut_down || delay_cnt++ > 500) {
+ dev_err(SSP_DEV, "%s:timeout, hw ack wait fail %d\n",
+ __func__, state);
+
+ if (!state)
+ gpio_set_value_cansleep(data->ap_mcu_gpio, 1);
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int ssp_do_transfer(struct ssp_data *data, struct ssp_msg *msg,
+ struct completion *done, int timeout)
+{
+ int status;
+ /*
+ * check if this is a short one way message or the whole transfer has
+ * second part after an interrupt
+ */
+ const bool use_no_irq = msg->length == 0;
+
+ if (data->shut_down)
+ return -EPERM;
+
+ msg->done = done;
+
+ mutex_lock(&data->comm_lock);
+
+ status = ssp_check_lines(data, false);
+ if (status < 0)
+ goto _error_locked;
+
+ status = spi_write(data->spi, msg->buffer, SSP_HEADER_SIZE);
+ if (status < 0) {
+ gpio_set_value_cansleep(data->ap_mcu_gpio, 1);
+ dev_err(SSP_DEV, "%s spi_write fail\n", __func__);
+ goto _error_locked;
+ }
+
+ if (!use_no_irq) {
+ mutex_lock(&data->pending_lock);
+ list_add_tail(&msg->list, &data->pending_list);
+ mutex_unlock(&data->pending_lock);
+ }
+
+ status = ssp_check_lines(data, true);
+ if (status < 0) {
+ if (!use_no_irq) {
+ mutex_lock(&data->pending_lock);
+ list_del(&msg->list);
+ mutex_unlock(&data->pending_lock);
+ }
+ goto _error_locked;
+ }
+
+ mutex_unlock(&data->comm_lock);
+
+ if (!use_no_irq && done)
+ if (wait_for_completion_timeout(done,
+ msecs_to_jiffies(timeout)) ==
+ 0) {
+ mutex_lock(&data->pending_lock);
+ list_del(&msg->list);
+ mutex_unlock(&data->pending_lock);
+
+ data->timeout_cnt++;
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+_error_locked:
+ mutex_unlock(&data->comm_lock);
+ data->timeout_cnt++;
+ return status;
+}
+
+static inline int ssp_spi_sync_command(struct ssp_data *data,
+ struct ssp_msg *msg)
+{
+ return ssp_do_transfer(data, msg, NULL, 0);
+}
+
+static int ssp_spi_sync(struct ssp_data *data, struct ssp_msg *msg,
+ int timeout)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ if (WARN_ON(!msg->length))
+ return -EPERM;
+
+ return ssp_do_transfer(data, msg, &done, timeout);
+}
+
+static int ssp_handle_big_data(struct ssp_data *data, char *dataframe, int *idx)
+{
+ /* mock-up, it will be changed with adding another sensor types */
+ *idx += 8;
+ return 0;
+}
+
+static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
+{
+ int idx, sd;
+ struct timespec ts;
+ struct ssp_sensor_data *spd;
+ struct iio_dev **indio_devs = data->sensor_devs;
+
+ getnstimeofday(&ts);
+
+ for (idx = 0; idx < len;) {
+ switch (dataframe[idx++]) {
+ case SSP_MSG2AP_INST_BYPASS_DATA:
+ sd = dataframe[idx++];
+ if (sd < 0 || sd >= SSP_SENSOR_MAX) {
+ dev_err(SSP_DEV,
+ "Mcu data frame1 error %d\n", sd);
+ return -EPROTO;
+ }
+
+ if (indio_devs[sd]) {
+ spd = iio_priv(indio_devs[sd]);
+ if (spd->process_data)
+ spd->process_data(indio_devs[sd],
+ &dataframe[idx],
+ data->timestamp);
+ } else {
+ dev_err(SSP_DEV, "no client for frame\n");
+ }
+
+ idx += ssp_offset_map[sd];
+ break;
+ case SSP_MSG2AP_INST_DEBUG_DATA:
+ sd = ssp_print_mcu_debug(dataframe, &idx, len);
+ if (sd) {
+ dev_err(SSP_DEV,
+ "Mcu data frame3 error %d\n", sd);
+ return sd;
+ }
+ break;
+ case SSP_MSG2AP_INST_LIBRARY_DATA:
+ idx += len;
+ break;
+ case SSP_MSG2AP_INST_BIG_DATA:
+ ssp_handle_big_data(data, dataframe, &idx);
+ break;
+ case SSP_MSG2AP_INST_TIME_SYNC:
+ data->time_syncing = true;
+ break;
+ case SSP_MSG2AP_INST_RESET:
+ ssp_queue_ssp_refresh_task(data, 0);
+ break;
+ }
+ }
+
+ if (data->time_syncing)
+ data->timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+
+ return 0;
+}
+
+/* threaded irq */
+int ssp_irq_msg(struct ssp_data *data)
+{
+ bool found = false;
+ char *buffer;
+ u8 msg_type;
+ int ret;
+ u16 length, msg_options;
+ struct ssp_msg *msg, *n;
+
+ ret = spi_read(data->spi, data->header_buffer, SSP_HEADER_BUFFER_SIZE);
+ if (ret < 0) {
+ dev_err(SSP_DEV, "header read fail\n");
+ return ret;
+ }
+
+ length = le16_to_cpu(data->header_buffer[1]);
+ msg_options = le16_to_cpu(data->header_buffer[0]);
+
+ if (length == 0) {
+ dev_err(SSP_DEV, "length received from mcu is 0\n");
+ return -EINVAL;
+ }
+
+ msg_type = SSP_GET_MESSAGE_TYPE(msg_options);
+
+ switch (msg_type) {
+ case SSP_AP2HUB_READ:
+ case SSP_AP2HUB_WRITE:
+ /*
+ * this is a small list, a few elements - the packets can be
+ * received with no order
+ */
+ mutex_lock(&data->pending_lock);
+ list_for_each_entry_safe(msg, n, &data->pending_list, list) {
+ if (msg->options == msg_options) {
+ list_del(&msg->list);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /*
+ * here can be implemented dead messages handling
+ * but the slave should not send such ones - it is to
+ * check but let's handle this
+ */
+ buffer = kmalloc(length, GFP_KERNEL | GFP_DMA);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto _unlock;
+ }
+
+ /* got dead packet so it is always an error */
+ ret = spi_read(data->spi, buffer, length);
+ if (ret >= 0)
+ ret = -EPROTO;
+
+ kfree(buffer);
+
+ dev_err(SSP_DEV, "No match error %x\n",
+ msg_options);
+
+ goto _unlock;
+ }
+
+ if (msg_type == SSP_AP2HUB_READ)
+ ret = spi_read(data->spi,
+ &msg->buffer[SSP_HEADER_SIZE_ALIGNED],
+ msg->length);
+
+ if (msg_type == SSP_AP2HUB_WRITE) {
+ ret = spi_write(data->spi,
+ &msg->buffer[SSP_HEADER_SIZE_ALIGNED],
+ msg->length);
+ if (msg_options & SSP_AP2HUB_RETURN) {
+ msg->options =
+ SSP_AP2HUB_READ | SSP_AP2HUB_RETURN;
+ msg->length = 1;
+
+ list_add_tail(&msg->list, &data->pending_list);
+ goto _unlock;
+ }
+ }
+
+ if (msg->done)
+ if (!completion_done(msg->done))
+ complete(msg->done);
+_unlock:
+ mutex_unlock(&data->pending_lock);
+ break;
+ case SSP_HUB2AP_WRITE:
+ buffer = kzalloc(length, GFP_KERNEL | GFP_DMA);
+ if (!buffer)
+ return -ENOMEM;
+
+ ret = spi_read(data->spi, buffer, length);
+ if (ret < 0) {
+ dev_err(SSP_DEV, "spi read fail\n");
+ kfree(buffer);
+ break;
+ }
+
+ ret = ssp_parse_dataframe(data, buffer, length);
+
+ kfree(buffer);
+ break;
+
+ default:
+ dev_err(SSP_DEV, "unknown msg type\n");
+ return -EPROTO;
+ }
+
+ return ret;
+}
+
+void ssp_clean_pending_list(struct ssp_data *data)
+{
+ struct ssp_msg *msg, *n;
+
+ mutex_lock(&data->pending_lock);
+ list_for_each_entry_safe(msg, n, &data->pending_list, list) {
+ list_del(&msg->list);
+
+ if (msg->done)
+ if (!completion_done(msg->done))
+ complete(msg->done);
+ }
+ mutex_unlock(&data->pending_lock);
+}
+
+int ssp_command(struct ssp_data *data, char command, int arg)
+{
+ int ret;
+ struct ssp_msg *msg;
+
+ msg = ssp_create_msg(command, 0, SSP_AP2HUB_WRITE, arg);
+ if (!msg)
+ return -ENOMEM;
+
+ ssp_dbg("%s - command 0x%x %d\n", __func__, command, arg);
+
+ ret = ssp_spi_sync_command(data, msg);
+ ssp_clean_msg(msg);
+
+ return ret;
+}
+
+int ssp_send_instruction(struct ssp_data *data, u8 inst, u8 sensor_type,
+ u8 *send_buf, u8 length)
+{
+ int ret;
+ struct ssp_msg *msg;
+
+ if (data->fw_dl_state == SSP_FW_DL_STATE_DOWNLOADING) {
+ dev_err(SSP_DEV, "%s - Skip Inst! DL state = %d\n",
+ __func__, data->fw_dl_state);
+ return -EBUSY;
+ } else if (!(data->available_sensors & BIT(sensor_type)) &&
+ (inst <= SSP_MSG2SSP_INST_CHANGE_DELAY)) {
+ dev_err(SSP_DEV, "%s - Bypass Inst Skip! - %u\n",
+ __func__, sensor_type);
+ return -EIO; /* just fail */
+ }
+
+ msg = ssp_create_msg(inst, length + 2, SSP_AP2HUB_WRITE, 0);
+ if (!msg)
+ return -ENOMEM;
+
+ ssp_fill_buffer(msg, 0, &sensor_type, 1);
+ ssp_fill_buffer(msg, 1, send_buf, length);
+
+ ssp_dbg("%s - Inst = 0x%x, Sensor Type = 0x%x, data = %u\n",
+ __func__, inst, sensor_type, send_buf[1]);
+
+ ret = ssp_spi_sync(data, msg, 1000);
+ ssp_clean_msg(msg);
+
+ return ret;
+}
+
+int ssp_get_chipid(struct ssp_data *data)
+{
+ int ret;
+ char buffer;
+ struct ssp_msg *msg;
+
+ msg = ssp_create_msg(SSP_MSG2SSP_AP_WHOAMI, 1, SSP_AP2HUB_READ, 0);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = ssp_spi_sync(data, msg, 1000);
+
+ buffer = SSP_GET_BUFFER_AT_INDEX(msg, 0);
+
+ ssp_clean_msg(msg);
+
+ return ret < 0 ? ret : buffer;
+}
+
+int ssp_set_magnetic_matrix(struct ssp_data *data)
+{
+ int ret;
+ struct ssp_msg *msg;
+
+ msg = ssp_create_msg(SSP_MSG2SSP_AP_SET_MAGNETIC_STATIC_MATRIX,
+ data->sensorhub_info->mag_length, SSP_AP2HUB_WRITE,
+ 0);
+ if (!msg)
+ return -ENOMEM;
+
+ ssp_fill_buffer(msg, 0, data->sensorhub_info->mag_table,
+ data->sensorhub_info->mag_length);
+
+ ret = ssp_spi_sync(data, msg, 1000);
+ ssp_clean_msg(msg);
+
+ return ret;
+}
+
+unsigned int ssp_get_sensor_scanning_info(struct ssp_data *data)
+{
+ int ret;
+ __le32 result;
+ u32 cpu_result = 0;
+
+ struct ssp_msg *msg = ssp_create_msg(SSP_MSG2SSP_AP_SENSOR_SCANNING, 4,
+ SSP_AP2HUB_READ, 0);
+ if (!msg)
+ return 0;
+
+ ret = ssp_spi_sync(data, msg, 1000);
+ if (ret < 0) {
+ dev_err(SSP_DEV, "%s - spi read fail %d\n", __func__, ret);
+ goto _exit;
+ }
+
+ ssp_get_buffer(msg, 0, &result, 4);
+ cpu_result = le32_to_cpu(result);
+
+ dev_info(SSP_DEV, "%s state: 0x%08x\n", __func__, cpu_result);
+
+_exit:
+ ssp_clean_msg(msg);
+ return cpu_result;
+}
+
+unsigned int ssp_get_firmware_rev(struct ssp_data *data)
+{
+ int ret;
+ __le32 result;
+
+ struct ssp_msg *msg = ssp_create_msg(SSP_MSG2SSP_AP_FIRMWARE_REV, 4,
+ SSP_AP2HUB_READ, 0);
+ if (!msg)
+ return SSP_INVALID_REVISION;
+
+ ret = ssp_spi_sync(data, msg, 1000);
+ if (ret < 0) {
+ dev_err(SSP_DEV, "%s - transfer fail %d\n", __func__, ret);
+ ret = SSP_INVALID_REVISION;
+ goto _exit;
+ }
+
+ ssp_get_buffer(msg, 0, &result, 4);
+ ret = le32_to_cpu(result);
+
+_exit:
+ ssp_clean_msg(msg);
+ return ret;
+}
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 78a6a1ab3ece..5b377373f48d 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -54,7 +54,7 @@ static int st_sensors_spi_read(struct st_sensor_transfer_buffer *tb,
if (err)
goto acc_spi_read_error;
- memcpy(data, tb->rx_buf, len*sizeof(u8));
+ memcpy(data, tb->rx_buf, len);
mutex_unlock(&tb->buf_lock);
return len;
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 7c5245d9f99c..50ed8d1ca45a 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -445,7 +445,7 @@ static int ad9523_store_eeprom(struct iio_dev *indio_dev)
tmp = 4;
do {
- msleep(16);
+ msleep(20);
ret = ad9523_read(indio_dev,
AD9523_EEPROM_DATA_XFER_STATUS);
if (ret < 0)
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 63a25d9e1204..10a0dfc3b01f 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -387,10 +387,8 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
int ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "could not allocate memory for platform data\n");
+ if (!pdata)
return NULL;
- }
strncpy(&pdata->name[0], np->name, SPI_NAME_SIZE - 1);
@@ -613,9 +611,8 @@ static int adf4350_remove(struct spi_device *spi)
if (st->clk)
clk_disable_unprepare(st->clk);
- if (!IS_ERR(reg)) {
+ if (!IS_ERR(reg))
regulator_disable(reg);
- }
return 0;
}
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index 36a38776f739..f46341b39139 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -16,6 +16,8 @@ itg3200-y := itg3200_core.o
itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
obj-$(CONFIG_ITG3200) += itg3200.o
+obj-$(CONFIG_IIO_SSP_SENSORS_COMMONS) += ssp_gyro_sensor.o
+
obj-$(CONFIG_IIO_ST_GYRO_3AXIS) += st_gyro.o
st_gyro-y := st_gyro_core.o
st_gyro-$(CONFIG_IIO_BUFFER) += st_gyro_buffer.o
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index a3ea1e8785d7..a3c3e19de527 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -111,19 +111,12 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
- s32 poll_value;
*val = 0;
*val2 = 0;
switch (mask) {
case 0:
- poll_value = hid_sensor_read_poll_value(
- &gyro_state->common_attributes);
- if (poll_value < 0)
- return -EINVAL;
-
hid_sensor_power_state(&gyro_state->common_attributes, true);
- msleep_interruptible(poll_value * 2);
report_id = gyro_state->gyro[chan->scan_index].report_id;
address = gyro_3d_addresses[chan->scan_index];
if (report_id >= 0)
@@ -416,6 +409,7 @@ static struct platform_driver hid_gyro_3d_platform_driver = {
.id_table = hid_gyro_3d_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_gyro_3d_probe,
.remove = hid_gyro_3d_remove,
diff --git a/drivers/iio/gyro/ssp_gyro_sensor.c b/drivers/iio/gyro/ssp_gyro_sensor.c
new file mode 100644
index 000000000000..0a8afdd21728
--- /dev/null
+++ b/drivers/iio/gyro/ssp_gyro_sensor.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/iio/common/ssp_sensors.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../common/ssp_sensors/ssp_iio_sensor.h"
+
+#define SSP_CHANNEL_COUNT 3
+
+#define SSP_GYROSCOPE_NAME "ssp-gyroscope"
+static const char ssp_gyro_name[] = SSP_GYROSCOPE_NAME;
+
+enum ssp_gyro_3d_channel {
+ SSP_CHANNEL_SCAN_INDEX_X,
+ SSP_CHANNEL_SCAN_INDEX_Y,
+ SSP_CHANNEL_SCAN_INDEX_Z,
+ SSP_CHANNEL_SCAN_INDEX_TIME,
+};
+
+static int ssp_gyro_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ u32 t;
+ struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ t = ssp_get_sensor_delay(data, SSP_GYROSCOPE_SENSOR);
+ ssp_convert_to_freq(t, val, val2);
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ssp_gyro_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ int ret;
+ struct ssp_data *data = dev_get_drvdata(indio_dev->dev.parent->parent);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ssp_convert_to_time(val, val2);
+ ret = ssp_change_delay(data, SSP_GYROSCOPE_SENSOR, ret);
+ if (ret < 0)
+ dev_err(&indio_dev->dev, "gyro sensor enable fail\n");
+
+ return ret;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static struct iio_info ssp_gyro_iio_info = {
+ .read_raw = &ssp_gyro_read_raw,
+ .write_raw = &ssp_gyro_write_raw,
+};
+
+static const unsigned long ssp_gyro_scan_mask[] = { 0x07, 0, };
+
+static const struct iio_chan_spec ssp_gyro_channels[] = {
+ SSP_CHANNEL_AG(IIO_ANGL_VEL, IIO_MOD_X, SSP_CHANNEL_SCAN_INDEX_X),
+ SSP_CHANNEL_AG(IIO_ANGL_VEL, IIO_MOD_Y, SSP_CHANNEL_SCAN_INDEX_Y),
+ SSP_CHANNEL_AG(IIO_ANGL_VEL, IIO_MOD_Z, SSP_CHANNEL_SCAN_INDEX_Z),
+ SSP_CHAN_TIMESTAMP(SSP_CHANNEL_SCAN_INDEX_TIME),
+};
+
+static int ssp_process_gyro_data(struct iio_dev *indio_dev, void *buf,
+ int64_t timestamp)
+{
+ return ssp_common_process_data(indio_dev, buf, SSP_GYROSCOPE_SIZE,
+ timestamp);
+}
+
+static const struct iio_buffer_setup_ops ssp_gyro_buffer_ops = {
+ .postenable = &ssp_common_buffer_postenable,
+ .postdisable = &ssp_common_buffer_postdisable,
+};
+
+static int ssp_gyro_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct ssp_sensor_data *spd;
+ struct iio_buffer *buffer;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*spd));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ spd = iio_priv(indio_dev);
+
+ spd->process_data = ssp_process_gyro_data;
+ spd->type = SSP_GYROSCOPE_SENSOR;
+
+ indio_dev->name = ssp_gyro_name;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &ssp_gyro_iio_info;
+ indio_dev->modes = INDIO_BUFFER_SOFTWARE;
+ indio_dev->channels = ssp_gyro_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ssp_gyro_channels);
+ indio_dev->available_scan_masks = ssp_gyro_scan_mask;
+
+ buffer = devm_iio_kfifo_allocate(&pdev->dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ indio_dev->setup_ops = &ssp_gyro_buffer_ops;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ /* ssp registering should be done after all iio setup */
+ ssp_register_consumer(indio_dev, SSP_GYROSCOPE_SENSOR);
+
+ return 0;
+}
+
+static int ssp_gyro_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver ssp_gyro_driver = {
+ .driver = {
+ .name = SSP_GYROSCOPE_NAME,
+ },
+ .probe = ssp_gyro_probe,
+ .remove = ssp_gyro_remove,
+};
+
+module_platform_driver(ssp_gyro_driver);
+
+MODULE_AUTHOR("Karol Wrona <k.wrona@samsung.com>");
+MODULE_DESCRIPTION("Samsung sensorhub gyroscopes driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 5f0ea77fe717..359883525ab7 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -48,6 +48,8 @@ unsigned int iio_buffer_poll(struct file *filp,
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps);
+int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev);
+void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev);
#define iio_buffer_poll_addr (&iio_buffer_poll)
#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
@@ -60,6 +62,13 @@ void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_first_n_outer_addr NULL
+static inline int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) {}
+
static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
static inline void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) {}
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 2b0e45133e9d..5e610f7de5aa 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -25,6 +25,17 @@ config ADIS16480
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
+config KMX61
+ tristate "Kionix KMX61 6-axis accelerometer and magnetometer"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you want to build a driver for Kionix KMX61 6-axis
+ accelerometer and magnetometer.
+ To compile this driver as module, choose M here: the module will
+ be called kmx61.
+
source "drivers/iio/imu/inv_mpu6050/Kconfig"
endmenu
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 114d2c17cbe2..e1e6e3d70e26 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -14,3 +14,5 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
obj-y += inv_mpu6050/
+
+obj-$(CONFIG_KMX61) += kmx61.o
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 2d0608ba88d7..48fbc0bc7e2a 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -7,6 +7,7 @@ config INV_MPU6050_IIO
depends on I2C && SYSFS
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select I2C_MUX
help
This driver supports the Invensense MPU6050 devices.
This driver can also support MPU6500 in MPU6050 compatibility mode
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index b75519deac1a..f73e60b7a796 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -23,6 +23,8 @@
#include <linux/kfifo.h>
#include <linux/spinlock.h>
#include <linux/iio/iio.h>
+#include <linux/i2c-mux.h>
+#include <linux/acpi.h>
#include "inv_mpu_iio.h"
/*
@@ -52,6 +54,7 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = {
.int_enable = INV_MPU6050_REG_INT_ENABLE,
.pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
.pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+ .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
};
static const struct inv_mpu6050_chip_config chip_config_6050 = {
@@ -77,6 +80,83 @@ int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d)
return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d);
}
+/*
+ * The i2c read/write needs to happen in unlocked mode. As the parent
+ * adapter is common. If we use locked versions, it will fail as
+ * the mux adapter will lock the parent i2c adapter, while calling
+ * select/deselect functions.
+ */
+static int inv_mpu6050_write_reg_unlocked(struct inv_mpu6050_state *st,
+ u8 reg, u8 d)
+{
+ int ret;
+ u8 buf[2];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = st->client->addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ buf[0] = reg;
+ buf[1] = d;
+ ret = __i2c_transfer(st->client->adapter, msg, 1);
+ if (ret != 1)
+ return ret;
+
+ return 0;
+}
+
+static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
+ u32 chan_id)
+{
+ struct iio_dev *indio_dev = mux_priv;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ /* Use the same mutex which was used everywhere to protect power-op */
+ mutex_lock(&indio_dev->mlock);
+ if (!st->powerup_count) {
+ ret = inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1,
+ 0);
+ if (ret)
+ goto write_error;
+
+ msleep(INV_MPU6050_REG_UP_TIME);
+ }
+ if (!ret) {
+ st->powerup_count++;
+ ret = inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg,
+ st->client->irq |
+ INV_MPU6050_BIT_BYPASS_EN);
+ }
+write_error:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
+ void *mux_priv, u32 chan_id)
+{
+ struct iio_dev *indio_dev = mux_priv;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ /* It doesn't really mattter, if any of the calls fails */
+ inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg,
+ st->client->irq);
+ st->powerup_count--;
+ if (!st->powerup_count)
+ inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
{
u8 d, mgmt_1;
@@ -133,13 +213,22 @@ int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
{
- int result;
+ int result = 0;
+
+ if (power_on) {
+ /* Already under indio-dev->mlock mutex */
+ if (!st->powerup_count)
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ 0);
+ if (!result)
+ st->powerup_count++;
+ } else {
+ st->powerup_count--;
+ if (!st->powerup_count)
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
+ }
- if (power_on)
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, 0);
- else
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_SLEEP);
if (result)
return result;
@@ -673,6 +762,7 @@ static int inv_mpu_probe(struct i2c_client *client,
st = iio_priv(indio_dev);
st->client = client;
+ st->powerup_count = 0;
pdata = dev_get_platdata(&client->dev);
if (pdata)
st->plat_data = *pdata;
@@ -720,8 +810,21 @@ static int inv_mpu_probe(struct i2c_client *client,
goto out_remove_trigger;
}
+ st->mux_adapter = i2c_add_mux_adapter(client->adapter,
+ &client->dev,
+ indio_dev,
+ 0, 0, 0,
+ inv_mpu6050_select_bypass,
+ inv_mpu6050_deselect_bypass);
+ if (!st->mux_adapter) {
+ result = -ENODEV;
+ goto out_unreg_device;
+ }
+
return 0;
+out_unreg_device:
+ iio_device_unregister(indio_dev);
out_remove_trigger:
inv_mpu6050_remove_trigger(st);
out_unreg_ring:
@@ -734,6 +837,7 @@ static int inv_mpu_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ i2c_del_mux_adapter(st->mux_adapter);
iio_device_unregister(indio_dev);
inv_mpu6050_remove_trigger(st);
iio_triggered_buffer_cleanup(indio_dev);
@@ -772,6 +876,13 @@ static const struct i2c_device_id inv_mpu_id[] = {
MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+static const struct acpi_device_id inv_acpi_match[] = {
+ {"INVN6500", 0},
+ { },
+};
+
+MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
+
static struct i2c_driver inv_mpu_driver = {
.probe = inv_mpu_probe,
.remove = inv_mpu_remove,
@@ -780,6 +891,7 @@ static struct i2c_driver inv_mpu_driver = {
.owner = THIS_MODULE,
.name = "inv-mpu6050",
.pm = INV_MPU6050_PMOPS,
+ .acpi_match_table = ACPI_PTR(inv_acpi_match),
},
};
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index e7799315d4dc..aa837de57079 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -54,6 +54,7 @@ struct inv_mpu6050_reg_map {
u8 int_enable;
u8 pwr_mgmt_1;
u8 pwr_mgmt_2;
+ u8 int_pin_cfg;
};
/*device enum */
@@ -119,6 +120,8 @@ struct inv_mpu6050_state {
enum inv_devices chip_type;
spinlock_t time_stamp_lock;
struct i2c_client *client;
+ struct i2c_adapter *mux_adapter;
+ unsigned int powerup_count;
struct inv_mpu6050_platform_data plat_data;
DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
};
@@ -179,6 +182,9 @@ struct inv_mpu6050_state {
/* 6 + 6 round up and plus 8 */
#define INV_MPU6050_OUTPUT_DATA_SIZE 24
+#define INV_MPU6050_REG_INT_PIN_CFG 0x37
+#define INV_MPU6050_BIT_BYPASS_EN 0x2
+
/* init parameters */
#define INV_MPU6050_INIT_FIFO_RATE 50
#define INV_MPU6050_TIME_STAMP_TOR 5
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 926fccea8de0..844610c3a3a9 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -116,40 +116,35 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
int ret;
struct inv_mpu6050_state *st = iio_priv(indio_dev);
- st->trig = iio_trigger_alloc("%s-dev%d",
- indio_dev->name,
- indio_dev->id);
- if (st->trig == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = request_irq(st->client->irq, &iio_trigger_generic_data_rdy_poll,
- IRQF_TRIGGER_RISING,
- "inv_mpu",
- st->trig);
+ st->trig = devm_iio_trigger_alloc(&indio_dev->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!st->trig)
+ return -ENOMEM;
+
+ ret = devm_request_irq(&indio_dev->dev, st->client->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ "inv_mpu",
+ st->trig);
if (ret)
- goto error_free_trig;
+ return ret;
+
st->trig->dev.parent = &st->client->dev;
st->trig->ops = &inv_mpu_trigger_ops;
iio_trigger_set_drvdata(st->trig, indio_dev);
+
ret = iio_trigger_register(st->trig);
if (ret)
- goto error_free_irq;
+ return ret;
+
indio_dev->trig = iio_trigger_get(st->trig);
return 0;
-
-error_free_irq:
- free_irq(st->client->irq, st->trig);
-error_free_trig:
- iio_trigger_free(st->trig);
-error_ret:
- return ret;
}
void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st)
{
iio_trigger_unregister(st->trig);
- free_irq(st->client->irq, st->trig);
- iio_trigger_free(st->trig);
}
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
new file mode 100644
index 000000000000..5cc3692acf37
--- /dev/null
+++ b/drivers/iio/imu/kmx61.c
@@ -0,0 +1,1595 @@
+/*
+ * KMX61 - Kionix 6-axis Accelerometer/Magnetometer
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for KMX61 (7-bit I2C slave address 0x0E or 0x0F).
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define KMX61_DRV_NAME "kmx61"
+#define KMX61_GPIO_NAME "kmx61_int"
+#define KMX61_IRQ_NAME "kmx61_event"
+
+#define KMX61_REG_WHO_AM_I 0x00
+#define KMX61_REG_INS1 0x01
+#define KMX61_REG_INS2 0x02
+
+/*
+ * three 16-bit accelerometer output registers for X/Y/Z axis
+ * we use only XOUT_L as a base register, all other addresses
+ * can be obtained by applying an offset and are provided here
+ * only for clarity.
+ */
+#define KMX61_ACC_XOUT_L 0x0A
+#define KMX61_ACC_XOUT_H 0x0B
+#define KMX61_ACC_YOUT_L 0x0C
+#define KMX61_ACC_YOUT_H 0x0D
+#define KMX61_ACC_ZOUT_L 0x0E
+#define KMX61_ACC_ZOUT_H 0x0F
+
+/*
+ * one 16-bit temperature output register
+ */
+#define KMX61_TEMP_L 0x10
+#define KMX61_TEMP_H 0x11
+
+/*
+ * three 16-bit magnetometer output registers for X/Y/Z axis
+ */
+#define KMX61_MAG_XOUT_L 0x12
+#define KMX61_MAG_XOUT_H 0x13
+#define KMX61_MAG_YOUT_L 0x14
+#define KMX61_MAG_YOUT_H 0x15
+#define KMX61_MAG_ZOUT_L 0x16
+#define KMX61_MAG_ZOUT_H 0x17
+
+#define KMX61_REG_INL 0x28
+#define KMX61_REG_STBY 0x29
+#define KMX61_REG_CTRL1 0x2A
+#define KMX61_REG_CTRL2 0x2B
+#define KMX61_REG_ODCNTL 0x2C
+#define KMX61_REG_INC1 0x2D
+
+#define KMX61_REG_WUF_THRESH 0x3D
+#define KMX61_REG_WUF_TIMER 0x3E
+
+#define KMX61_ACC_STBY_BIT BIT(0)
+#define KMX61_MAG_STBY_BIT BIT(1)
+#define KMX61_ACT_STBY_BIT BIT(7)
+
+#define KMX61_ALL_STBY (KMX61_ACC_STBY_BIT | KMX61_MAG_STBY_BIT)
+
+#define KMX61_REG_INS1_BIT_WUFS BIT(1)
+
+#define KMX61_REG_INS2_BIT_ZP BIT(0)
+#define KMX61_REG_INS2_BIT_ZN BIT(1)
+#define KMX61_REG_INS2_BIT_YP BIT(2)
+#define KMX61_REG_INS2_BIT_YN BIT(3)
+#define KMX61_REG_INS2_BIT_XP BIT(4)
+#define KMX61_REG_INS2_BIT_XN BIT(5)
+
+#define KMX61_REG_CTRL1_GSEL_MASK 0x03
+
+#define KMX61_REG_CTRL1_BIT_RES BIT(4)
+#define KMX61_REG_CTRL1_BIT_DRDYE BIT(5)
+#define KMX61_REG_CTRL1_BIT_WUFE BIT(6)
+#define KMX61_REG_CTRL1_BIT_BTSE BIT(7)
+
+#define KMX61_REG_INC1_BIT_WUFS BIT(0)
+#define KMX61_REG_INC1_BIT_DRDYM BIT(1)
+#define KMX61_REG_INC1_BIT_DRDYA BIT(2)
+#define KMX61_REG_INC1_BIT_IEN BIT(5)
+
+#define KMX61_ACC_ODR_SHIFT 0
+#define KMX61_MAG_ODR_SHIFT 4
+#define KMX61_ACC_ODR_MASK 0x0F
+#define KMX61_MAG_ODR_MASK 0xF0
+
+#define KMX61_OWUF_MASK 0x7
+
+#define KMX61_DEFAULT_WAKE_THRESH 1
+#define KMX61_DEFAULT_WAKE_DURATION 1
+
+#define KMX61_SLEEP_DELAY_MS 2000
+
+#define KMX61_CHIP_ID 0x12
+
+/* KMX61 devices */
+#define KMX61_ACC 0x01
+#define KMX61_MAG 0x02
+
+struct kmx61_data {
+ struct i2c_client *client;
+
+ /* serialize access to non-atomic ops, e.g set_mode */
+ struct mutex lock;
+
+ /* standby state */
+ bool acc_stby;
+ bool mag_stby;
+
+ /* power state */
+ bool acc_ps;
+ bool mag_ps;
+
+ /* config bits */
+ u8 range;
+ u8 odr_bits;
+ u8 wake_thresh;
+ u8 wake_duration;
+
+ /* accelerometer specific data */
+ struct iio_dev *acc_indio_dev;
+ struct iio_trigger *acc_dready_trig;
+ struct iio_trigger *motion_trig;
+ bool acc_dready_trig_on;
+ bool motion_trig_on;
+ bool ev_enable_state;
+
+ /* magnetometer specific data */
+ struct iio_dev *mag_indio_dev;
+ struct iio_trigger *mag_dready_trig;
+ bool mag_dready_trig_on;
+};
+
+enum kmx61_range {
+ KMX61_RANGE_2G,
+ KMX61_RANGE_4G,
+ KMX61_RANGE_8G,
+};
+
+enum kmx61_axis {
+ KMX61_AXIS_X,
+ KMX61_AXIS_Y,
+ KMX61_AXIS_Z,
+};
+
+static const u16 kmx61_uscale_table[] = {9582, 19163, 38326};
+
+static const struct {
+ int val;
+ int val2;
+ u8 odr_bits;
+} kmx61_samp_freq_table[] = { {12, 500000, 0x00},
+ {25, 0, 0x01},
+ {50, 0, 0x02},
+ {100, 0, 0x03},
+ {200, 0, 0x04},
+ {400, 0, 0x05},
+ {800, 0, 0x06},
+ {1600, 0, 0x07},
+ {0, 781000, 0x08},
+ {1, 563000, 0x09},
+ {3, 125000, 0x0A},
+ {6, 250000, 0x0B} };
+
+static const struct {
+ int val;
+ int val2;
+ int odr_bits;
+} kmx61_wake_up_odr_table[] = { {0, 781000, 0x00},
+ {1, 563000, 0x01},
+ {3, 125000, 0x02},
+ {6, 250000, 0x03},
+ {12, 500000, 0x04},
+ {25, 0, 0x05},
+ {50, 0, 0x06},
+ {100, 0, 0x06},
+ {200, 0, 0x06},
+ {400, 0, 0x06},
+ {800, 0, 0x06},
+ {1600, 0, 0x06} };
+
+static IIO_CONST_ATTR(accel_scale_available, "0.009582 0.019163 0.038326");
+static IIO_CONST_ATTR(magn_scale_available, "0.001465");
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "0.781000 1.563000 3.125000 6.250000 12.500000 25 50 100 200 400 800");
+
+static struct attribute *kmx61_acc_attributes[] = {
+ &iio_const_attr_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute *kmx61_mag_attributes[] = {
+ &iio_const_attr_magn_scale_available.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group kmx61_acc_attribute_group = {
+ .attrs = kmx61_acc_attributes,
+};
+
+static const struct attribute_group kmx61_mag_attribute_group = {
+ .attrs = kmx61_mag_attributes,
+};
+
+static const struct iio_event_spec kmx61_event = {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD),
+};
+
+#define KMX61_ACC_CHAN(_axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = KMX61_ACC, \
+ .scan_index = KMX61_AXIS_ ## _axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_LE, \
+ }, \
+ .event_spec = &kmx61_event, \
+ .num_event_specs = 1 \
+}
+
+#define KMX61_MAG_CHAN(_axis) { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _axis, \
+ .address = KMX61_MAG, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = KMX61_AXIS_ ## _axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 14, \
+ .storagebits = 16, \
+ .shift = 2, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+static const struct iio_chan_spec kmx61_acc_channels[] = {
+ KMX61_ACC_CHAN(X),
+ KMX61_ACC_CHAN(Y),
+ KMX61_ACC_CHAN(Z),
+};
+
+static const struct iio_chan_spec kmx61_mag_channels[] = {
+ KMX61_MAG_CHAN(X),
+ KMX61_MAG_CHAN(Y),
+ KMX61_MAG_CHAN(Z),
+};
+
+static void kmx61_set_data(struct iio_dev *indio_dev, struct kmx61_data *data)
+{
+ struct kmx61_data **priv = iio_priv(indio_dev);
+
+ *priv = data;
+}
+
+static struct kmx61_data *kmx61_get_data(struct iio_dev *indio_dev)
+{
+ return *(struct kmx61_data **)iio_priv(indio_dev);
+}
+
+static int kmx61_convert_freq_to_bit(int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kmx61_samp_freq_table); i++)
+ if (val == kmx61_samp_freq_table[i].val &&
+ val2 == kmx61_samp_freq_table[i].val2)
+ return kmx61_samp_freq_table[i].odr_bits;
+ return -EINVAL;
+}
+
+static int kmx61_convert_bit_to_freq(u8 odr_bits, int *val, int *val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kmx61_samp_freq_table); i++)
+ if (odr_bits == kmx61_samp_freq_table[i].odr_bits) {
+ *val = kmx61_samp_freq_table[i].val;
+ *val2 = kmx61_samp_freq_table[i].val2;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+
+static int kmx61_convert_wake_up_odr_to_bit(int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kmx61_wake_up_odr_table); ++i)
+ if (kmx61_wake_up_odr_table[i].val == val &&
+ kmx61_wake_up_odr_table[i].val2 == val2)
+ return kmx61_wake_up_odr_table[i].odr_bits;
+ return -EINVAL;
+}
+
+/**
+ * kmx61_set_mode() - set KMX61 device operating mode
+ * @data - kmx61 device private data pointer
+ * @mode - bitmask, indicating operating mode for @device
+ * @device - bitmask, indicating device for which @mode needs to be set
+ * @update - update stby bits stored in device's private @data
+ *
+ * For each sensor (accelerometer/magnetometer) there are two operating modes
+ * STANDBY and OPERATION. Neither accel nor magn can be disabled independently
+ * if they are both enabled. Internal sensors state is saved in acc_stby and
+ * mag_stby members of driver's private @data.
+ */
+static int kmx61_set_mode(struct kmx61_data *data, u8 mode, u8 device,
+ bool update)
+{
+ int ret;
+ int acc_stby = -1, mag_stby = -1;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_STBY);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_stby\n");
+ return ret;
+ }
+ if (device & KMX61_ACC) {
+ if (mode & KMX61_ACC_STBY_BIT) {
+ ret |= KMX61_ACC_STBY_BIT;
+ acc_stby = 1;
+ } else {
+ ret &= ~KMX61_ACC_STBY_BIT;
+ acc_stby = 0;
+ }
+ }
+
+ if (device & KMX61_MAG) {
+ if (mode & KMX61_MAG_STBY_BIT) {
+ ret |= KMX61_MAG_STBY_BIT;
+ mag_stby = 1;
+ } else {
+ ret &= ~KMX61_MAG_STBY_BIT;
+ mag_stby = 0;
+ }
+ }
+
+ if (mode & KMX61_ACT_STBY_BIT)
+ ret |= KMX61_ACT_STBY_BIT;
+
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_STBY, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_stby\n");
+ return ret;
+ }
+
+ if (acc_stby != -1 && update)
+ data->acc_stby = acc_stby;
+ if (mag_stby != -1 && update)
+ data->mag_stby = mag_stby;
+
+ return 0;
+}
+
+static int kmx61_get_mode(struct kmx61_data *data, u8 *mode, u8 device)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_STBY);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_stby\n");
+ return ret;
+ }
+ *mode = 0;
+
+ if (device & KMX61_ACC) {
+ if (ret & KMX61_ACC_STBY_BIT)
+ *mode |= KMX61_ACC_STBY_BIT;
+ else
+ *mode &= ~KMX61_ACC_STBY_BIT;
+ }
+
+ if (device & KMX61_MAG) {
+ if (ret & KMX61_MAG_STBY_BIT)
+ *mode |= KMX61_MAG_STBY_BIT;
+ else
+ *mode &= ~KMX61_MAG_STBY_BIT;
+ }
+
+ return 0;
+}
+
+static int kmx61_set_wake_up_odr(struct kmx61_data *data, int val, int val2)
+{
+ int ret, odr_bits;
+
+ odr_bits = kmx61_convert_wake_up_odr_to_bit(val, val2);
+ if (odr_bits < 0)
+ return odr_bits;
+
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_CTRL2,
+ odr_bits);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error writing reg_ctrl2\n");
+ return ret;
+}
+
+static int kmx61_set_odr(struct kmx61_data *data, int val, int val2, u8 device)
+{
+ int ret;
+ u8 mode;
+ int lodr_bits, odr_bits;
+
+ ret = kmx61_get_mode(data, &mode, KMX61_ACC | KMX61_MAG);
+ if (ret < 0)
+ return ret;
+
+ lodr_bits = kmx61_convert_freq_to_bit(val, val2);
+ if (lodr_bits < 0)
+ return lodr_bits;
+
+ /* To change ODR, accel and magn must be in STDBY */
+ ret = kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG,
+ true);
+ if (ret < 0)
+ return ret;
+
+ odr_bits = 0;
+ if (device & KMX61_ACC)
+ odr_bits |= lodr_bits << KMX61_ACC_ODR_SHIFT;
+ if (device & KMX61_MAG)
+ odr_bits |= lodr_bits << KMX61_MAG_ODR_SHIFT;
+
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_ODCNTL,
+ odr_bits);
+ if (ret < 0)
+ return ret;
+
+ data->odr_bits = odr_bits;
+
+ if (device & KMX61_ACC) {
+ ret = kmx61_set_wake_up_odr(data, val, val2);
+ if (ret)
+ return ret;
+ }
+
+ return kmx61_set_mode(data, mode, KMX61_ACC | KMX61_MAG, true);
+}
+
+static int kmx61_get_odr(struct kmx61_data *data, int *val, int *val2,
+ u8 device)
+{ int i;
+ u8 lodr_bits;
+
+ if (device & KMX61_ACC)
+ lodr_bits = (data->odr_bits >> KMX61_ACC_ODR_SHIFT) &
+ KMX61_ACC_ODR_MASK;
+ else if (device & KMX61_MAG)
+ lodr_bits = (data->odr_bits >> KMX61_MAG_ODR_SHIFT) &
+ KMX61_MAG_ODR_MASK;
+ else
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(kmx61_samp_freq_table); i++)
+ if (lodr_bits == kmx61_samp_freq_table[i].odr_bits) {
+ *val = kmx61_samp_freq_table[i].val;
+ *val2 = kmx61_samp_freq_table[i].val2;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int kmx61_set_range(struct kmx61_data *data, u8 range)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ ret &= ~KMX61_REG_CTRL1_GSEL_MASK;
+ ret |= range & KMX61_REG_CTRL1_GSEL_MASK;
+
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_CTRL1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
+ return ret;
+ }
+
+ data->range = range;
+
+ return 0;
+}
+
+static int kmx61_set_scale(struct kmx61_data *data, u16 uscale)
+{
+ int ret, i;
+ u8 mode;
+
+ for (i = 0; i < ARRAY_SIZE(kmx61_uscale_table); i++) {
+ if (kmx61_uscale_table[i] == uscale) {
+ ret = kmx61_get_mode(data, &mode,
+ KMX61_ACC | KMX61_MAG);
+ if (ret < 0)
+ return ret;
+
+ ret = kmx61_set_mode(data, KMX61_ALL_STBY,
+ KMX61_ACC | KMX61_MAG, true);
+ if (ret < 0)
+ return ret;
+
+ ret = kmx61_set_range(data, i);
+ if (ret < 0)
+ return ret;
+
+ return kmx61_set_mode(data, mode,
+ KMX61_ACC | KMX61_MAG, true);
+ }
+ }
+ return -EINVAL;
+}
+
+static int kmx61_chip_init(struct kmx61_data *data)
+{
+ int ret, val, val2;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_WHO_AM_I);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading who_am_i\n");
+ return ret;
+ }
+
+ if (ret != KMX61_CHIP_ID) {
+ dev_err(&data->client->dev,
+ "Wrong chip id, got %x expected %x\n",
+ ret, KMX61_CHIP_ID);
+ return -EINVAL;
+ }
+
+ /* set accel 12bit, 4g range */
+ ret = kmx61_set_range(data, KMX61_RANGE_4G);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_ODCNTL);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_odcntl\n");
+ return ret;
+ }
+ data->odr_bits = ret;
+
+ /* set output data rate for wake up (motion detection) function */
+ ret = kmx61_convert_bit_to_freq(data->odr_bits, &val, &val2);
+ if (ret < 0)
+ return ret;
+
+ ret = kmx61_set_wake_up_odr(data, val, val2);
+ if (ret < 0)
+ return ret;
+
+ /* set acc/magn to OPERATION mode */
+ ret = kmx61_set_mode(data, 0, KMX61_ACC | KMX61_MAG, true);
+ if (ret < 0)
+ return ret;
+
+ data->wake_thresh = KMX61_DEFAULT_WAKE_THRESH;
+ data->wake_duration = KMX61_DEFAULT_WAKE_DURATION;
+
+ return 0;
+}
+
+static int kmx61_setup_new_data_interrupt(struct kmx61_data *data,
+ bool status, u8 device)
+{
+ u8 mode;
+ int ret;
+
+ ret = kmx61_get_mode(data, &mode, KMX61_ACC | KMX61_MAG);
+ if (ret < 0)
+ return ret;
+
+ ret = kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG, true);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_INC1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ if (status) {
+ ret |= KMX61_REG_INC1_BIT_IEN;
+ if (device & KMX61_ACC)
+ ret |= KMX61_REG_INC1_BIT_DRDYA;
+ if (device & KMX61_MAG)
+ ret |= KMX61_REG_INC1_BIT_DRDYM;
+ } else {
+ ret &= ~KMX61_REG_INC1_BIT_IEN;
+ if (device & KMX61_ACC)
+ ret &= ~KMX61_REG_INC1_BIT_DRDYA;
+ if (device & KMX61_MAG)
+ ret &= ~KMX61_REG_INC1_BIT_DRDYM;
+ }
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_INC1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ if (status)
+ ret |= KMX61_REG_CTRL1_BIT_DRDYE;
+ else
+ ret &= ~KMX61_REG_CTRL1_BIT_DRDYE;
+
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_CTRL1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
+ return ret;
+ }
+
+ return kmx61_set_mode(data, mode, KMX61_ACC | KMX61_MAG, true);
+}
+
+static int kmx61_chip_update_thresholds(struct kmx61_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ KMX61_REG_WUF_TIMER,
+ data->wake_duration);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Errow writing reg_wuf_timer\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ KMX61_REG_WUF_THRESH,
+ data->wake_thresh);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error writing reg_wuf_thresh\n");
+
+ return ret;
+}
+
+static int kmx61_setup_any_motion_interrupt(struct kmx61_data *data,
+ bool status)
+{
+ u8 mode;
+ int ret;
+
+ ret = kmx61_get_mode(data, &mode, KMX61_ACC | KMX61_MAG);
+ if (ret < 0)
+ return ret;
+
+ ret = kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG, true);
+ if (ret < 0)
+ return ret;
+
+ ret = kmx61_chip_update_thresholds(data);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_INC1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_inc1\n");
+ return ret;
+ }
+ if (status)
+ ret |= (KMX61_REG_INC1_BIT_IEN | KMX61_REG_INC1_BIT_WUFS);
+ else
+ ret &= ~(KMX61_REG_INC1_BIT_IEN | KMX61_REG_INC1_BIT_WUFS);
+
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_INC1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_inc1\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ if (status)
+ ret |= KMX61_REG_CTRL1_BIT_WUFE | KMX61_REG_CTRL1_BIT_BTSE;
+ else
+ ret &= ~(KMX61_REG_CTRL1_BIT_WUFE | KMX61_REG_CTRL1_BIT_BTSE);
+
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_CTRL1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
+ return ret;
+ }
+ mode |= KMX61_ACT_STBY_BIT;
+ return kmx61_set_mode(data, mode, KMX61_ACC | KMX61_MAG, true);
+}
+
+/**
+ * kmx61_set_power_state() - set power state for kmx61 @device
+ * @data - kmx61 device private pointer
+ * @on - power state to be set for @device
+ * @device - bitmask indicating device for which @on state needs to be set
+ *
+ * Notice that when ACC power state needs to be set to ON and MAG is in
+ * OPERATION then we know that kmx61_runtime_resume was already called
+ * so we must set ACC OPERATION mode here. The same happens when MAG power
+ * state needs to be set to ON and ACC is in OPERATION.
+ */
+static int kmx61_set_power_state(struct kmx61_data *data, bool on, u8 device)
+{
+#ifdef CONFIG_PM
+ int ret;
+
+ if (device & KMX61_ACC) {
+ if (on && !data->acc_ps && !data->mag_stby) {
+ ret = kmx61_set_mode(data, 0, KMX61_ACC, true);
+ if (ret < 0)
+ return ret;
+ }
+ data->acc_ps = on;
+ }
+ if (device & KMX61_MAG) {
+ if (on && !data->mag_ps && !data->acc_stby) {
+ ret = kmx61_set_mode(data, 0, KMX61_MAG, true);
+ if (ret < 0)
+ return ret;
+ }
+ data->mag_ps = on;
+ }
+
+ if (on) {
+ ret = pm_runtime_get_sync(&data->client->dev);
+ } else {
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
+ }
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed: kmx61_set_power_state for %d, ret %d\n",
+ on, ret);
+ if (on)
+ pm_runtime_put_noidle(&data->client->dev);
+
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+static int kmx61_read_measurement(struct kmx61_data *data, u8 base, u8 offset)
+{
+ int ret;
+ u8 reg = base + offset * 2;
+
+ ret = i2c_smbus_read_word_data(data->client, reg);
+ if (ret < 0)
+ dev_err(&data->client->dev, "failed to read reg at %x\n", reg);
+
+ return ret;
+}
+
+static int kmx61_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret;
+ u8 base_reg;
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ base_reg = KMX61_ACC_XOUT_L;
+ break;
+ case IIO_MAGN:
+ base_reg = KMX61_MAG_XOUT_L;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mutex_lock(&data->lock);
+
+ ret = kmx61_set_power_state(data, true, chan->address);
+ if (ret) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ ret = kmx61_read_measurement(data, base_reg, chan->scan_index);
+ if (ret < 0) {
+ kmx61_set_power_state(data, false, chan->address);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ ret = kmx61_set_power_state(data, false, chan->address);
+
+ mutex_unlock(&data->lock);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = kmx61_uscale_table[data->range];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ /* 14 bits res, 1465 microGauss per magn count */
+ *val = 0;
+ *val2 = 1465;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (chan->type != IIO_ACCEL && chan->type != IIO_MAGN)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ ret = kmx61_get_odr(data, val, val2, chan->address);
+ mutex_unlock(&data->lock);
+ if (ret)
+ return -EINVAL;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int kmx61_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ int ret;
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (chan->type != IIO_ACCEL && chan->type != IIO_MAGN)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ ret = kmx61_set_odr(data, val, val2, chan->address);
+ mutex_unlock(&data->lock);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ if (val != 0)
+ return -EINVAL;
+ mutex_lock(&data->lock);
+ ret = kmx61_set_scale(data, val2);
+ mutex_unlock(&data->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kmx61_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+
+ *val2 = 0;
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = data->wake_thresh;
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_PERIOD:
+ *val = data->wake_duration;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kmx61_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+
+ if (data->ev_enable_state)
+ return -EBUSY;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ data->wake_thresh = val;
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_PERIOD:
+ data->wake_duration = val;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kmx61_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+
+ return data->ev_enable_state;
+}
+
+static int kmx61_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+ int ret = 0;
+
+ if (state && data->ev_enable_state)
+ return 0;
+
+ mutex_lock(&data->lock);
+
+ if (!state && data->motion_trig_on) {
+ data->ev_enable_state = false;
+ goto err_unlock;
+ }
+
+ ret = kmx61_set_power_state(data, state, KMX61_ACC);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = kmx61_setup_any_motion_interrupt(data, state);
+ if (ret < 0) {
+ kmx61_set_power_state(data, false, KMX61_ACC);
+ goto err_unlock;
+ }
+
+ data->ev_enable_state = state;
+
+err_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int kmx61_acc_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+
+ if (data->acc_dready_trig != trig && data->motion_trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int kmx61_mag_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+
+ if (data->mag_dready_trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct iio_info kmx61_acc_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = kmx61_read_raw,
+ .write_raw = kmx61_write_raw,
+ .attrs = &kmx61_acc_attribute_group,
+ .read_event_value = kmx61_read_event,
+ .write_event_value = kmx61_write_event,
+ .read_event_config = kmx61_read_event_config,
+ .write_event_config = kmx61_write_event_config,
+ .validate_trigger = kmx61_acc_validate_trigger,
+};
+
+static const struct iio_info kmx61_mag_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = kmx61_read_raw,
+ .write_raw = kmx61_write_raw,
+ .attrs = &kmx61_mag_attribute_group,
+ .validate_trigger = kmx61_mag_validate_trigger,
+};
+
+
+static int kmx61_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ int ret = 0;
+ u8 device;
+
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+
+ mutex_lock(&data->lock);
+
+ if (!state && data->ev_enable_state && data->motion_trig_on) {
+ data->motion_trig_on = false;
+ goto err_unlock;
+ }
+
+ if (data->acc_dready_trig == trig || data->motion_trig == trig)
+ device = KMX61_ACC;
+ else
+ device = KMX61_MAG;
+
+ ret = kmx61_set_power_state(data, state, device);
+ if (ret < 0)
+ goto err_unlock;
+
+ if (data->acc_dready_trig == trig || data->mag_dready_trig == trig)
+ ret = kmx61_setup_new_data_interrupt(data, state, device);
+ else
+ ret = kmx61_setup_any_motion_interrupt(data, state);
+ if (ret < 0) {
+ kmx61_set_power_state(data, false, device);
+ goto err_unlock;
+ }
+
+ if (data->acc_dready_trig == trig)
+ data->acc_dready_trig_on = state;
+ else if (data->mag_dready_trig == trig)
+ data->mag_dready_trig_on = state;
+ else
+ data->motion_trig_on = state;
+err_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int kmx61_trig_try_reenable(struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_INL);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_inl\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops kmx61_trigger_ops = {
+ .set_trigger_state = kmx61_data_rdy_trigger_set_state,
+ .try_reenable = kmx61_trig_try_reenable,
+ .owner = THIS_MODULE,
+};
+
+static irqreturn_t kmx61_event_handler(int irq, void *private)
+{
+ struct kmx61_data *data = private;
+ struct iio_dev *indio_dev = data->acc_indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_INS1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ins1\n");
+ goto ack_intr;
+ }
+
+ if (ret & KMX61_REG_INS1_BIT_WUFS) {
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_INS2);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ins2\n");
+ goto ack_intr;
+ }
+
+ if (ret & KMX61_REG_INS2_BIT_XN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ 0);
+
+ if (ret & KMX61_REG_INS2_BIT_XP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ 0);
+
+ if (ret & KMX61_REG_INS2_BIT_YN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ 0);
+
+ if (ret & KMX61_REG_INS2_BIT_YP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ 0);
+
+ if (ret & KMX61_REG_INS2_BIT_ZN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ 0);
+
+ if (ret & KMX61_REG_INS2_BIT_ZP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ 0);
+ }
+
+ack_intr:
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_CTRL1);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+
+ ret |= KMX61_REG_CTRL1_BIT_RES;
+ ret = i2c_smbus_write_byte_data(data->client, KMX61_REG_CTRL1, ret);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
+
+ ret = i2c_smbus_read_byte_data(data->client, KMX61_REG_INL);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error reading reg_inl\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t kmx61_data_rdy_trig_poll(int irq, void *private)
+{
+ struct kmx61_data *data = private;
+
+ if (data->acc_dready_trig_on)
+ iio_trigger_poll(data->acc_dready_trig);
+ if (data->mag_dready_trig_on)
+ iio_trigger_poll(data->mag_dready_trig);
+
+ if (data->motion_trig_on)
+ iio_trigger_poll(data->motion_trig);
+
+ if (data->ev_enable_state)
+ return IRQ_WAKE_THREAD;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t kmx61_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct kmx61_data *data = kmx61_get_data(indio_dev);
+ int bit, ret, i = 0;
+ u8 base;
+ s16 buffer[8];
+
+ if (indio_dev == data->acc_indio_dev)
+ base = KMX61_ACC_XOUT_L;
+ else
+ base = KMX61_MAG_XOUT_L;
+
+ mutex_lock(&data->lock);
+ for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ indio_dev->masklength) {
+ ret = kmx61_read_measurement(data, base, bit);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto err;
+ }
+ buffer[i++] = ret;
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers(indio_dev, buffer);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const char *kmx61_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+ return dev_name(dev);
+}
+
+static int kmx61_gpio_probe(struct i2c_client *client, struct kmx61_data *data)
+{
+ struct device *dev;
+ struct gpio_desc *gpio;
+ int ret;
+
+ if (!client)
+ return -EINVAL;
+
+ dev = &client->dev;
+
+ /* data ready gpio interrupt pin */
+ gpio = devm_gpiod_get_index(dev, KMX61_GPIO_NAME, 0);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "acpi gpio get index failed\n");
+ return PTR_ERR(gpio);
+ }
+
+ ret = gpiod_direction_input(gpio);
+ if (ret)
+ return ret;
+
+ ret = gpiod_to_irq(gpio);
+
+ dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+ return ret;
+}
+
+static struct iio_dev *kmx61_indiodev_setup(struct kmx61_data *data,
+ const struct iio_info *info,
+ const struct iio_chan_spec *chan,
+ int num_channels,
+ const char *name)
+{
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&data->client->dev, sizeof(data));
+ if (!indio_dev)
+ return ERR_PTR(-ENOMEM);
+
+ kmx61_set_data(indio_dev, data);
+
+ indio_dev->dev.parent = &data->client->dev;
+ indio_dev->channels = chan;
+ indio_dev->num_channels = num_channels;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = info;
+
+ return indio_dev;
+}
+
+static struct iio_trigger *kmx61_trigger_setup(struct kmx61_data *data,
+ struct iio_dev *indio_dev,
+ const char *tag)
+{
+ struct iio_trigger *trig;
+ int ret;
+
+ trig = devm_iio_trigger_alloc(&data->client->dev,
+ "%s-%s-dev%d",
+ indio_dev->name,
+ tag,
+ indio_dev->id);
+ if (!trig)
+ return ERR_PTR(-ENOMEM);
+
+ trig->dev.parent = &data->client->dev;
+ trig->ops = &kmx61_trigger_ops;
+ iio_trigger_set_drvdata(trig, indio_dev);
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return trig;
+}
+
+static int kmx61_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct kmx61_data *data;
+ const char *name = NULL;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ mutex_init(&data->lock);
+
+ if (id)
+ name = id->name;
+ else if (ACPI_HANDLE(&client->dev))
+ name = kmx61_match_acpi_device(&client->dev);
+ else
+ return -ENODEV;
+
+ data->acc_indio_dev =
+ kmx61_indiodev_setup(data, &kmx61_acc_info,
+ kmx61_acc_channels,
+ ARRAY_SIZE(kmx61_acc_channels),
+ name);
+ if (IS_ERR(data->acc_indio_dev))
+ return PTR_ERR(data->acc_indio_dev);
+
+ data->mag_indio_dev =
+ kmx61_indiodev_setup(data, &kmx61_mag_info,
+ kmx61_mag_channels,
+ ARRAY_SIZE(kmx61_mag_channels),
+ name);
+ if (IS_ERR(data->mag_indio_dev))
+ return PTR_ERR(data->mag_indio_dev);
+
+ ret = kmx61_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ if (client->irq < 0)
+ client->irq = kmx61_gpio_probe(client, data);
+
+ if (client->irq >= 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ kmx61_data_rdy_trig_poll,
+ kmx61_event_handler,
+ IRQF_TRIGGER_RISING,
+ KMX61_IRQ_NAME,
+ data);
+ if (ret)
+ goto err_chip_uninit;
+
+ data->acc_dready_trig =
+ kmx61_trigger_setup(data, data->acc_indio_dev,
+ "dready");
+ if (IS_ERR(data->acc_dready_trig)) {
+ ret = PTR_ERR(data->acc_dready_trig);
+ goto err_chip_uninit;
+ }
+
+ data->mag_dready_trig =
+ kmx61_trigger_setup(data, data->mag_indio_dev,
+ "dready");
+ if (IS_ERR(data->mag_dready_trig)) {
+ ret = PTR_ERR(data->mag_dready_trig);
+ goto err_trigger_unregister_acc_dready;
+ }
+
+ data->motion_trig =
+ kmx61_trigger_setup(data, data->acc_indio_dev,
+ "any-motion");
+ if (IS_ERR(data->motion_trig)) {
+ ret = PTR_ERR(data->motion_trig);
+ goto err_trigger_unregister_mag_dready;
+ }
+
+ ret = iio_triggered_buffer_setup(data->acc_indio_dev,
+ &iio_pollfunc_store_time,
+ kmx61_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed to setup acc triggered buffer\n");
+ goto err_trigger_unregister_motion;
+ }
+
+ ret = iio_triggered_buffer_setup(data->mag_indio_dev,
+ &iio_pollfunc_store_time,
+ kmx61_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed to setup mag triggered buffer\n");
+ goto err_buffer_cleanup_acc;
+ }
+ }
+
+ ret = iio_device_register(data->acc_indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to register acc iio device\n");
+ goto err_buffer_cleanup_mag;
+ }
+
+ ret = iio_device_register(data->mag_indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to register mag iio device\n");
+ goto err_iio_unregister_acc;
+ }
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret < 0)
+ goto err_iio_unregister_mag;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, KMX61_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ return 0;
+
+err_iio_unregister_mag:
+ iio_device_unregister(data->mag_indio_dev);
+err_iio_unregister_acc:
+ iio_device_unregister(data->acc_indio_dev);
+err_buffer_cleanup_mag:
+ if (client->irq >= 0)
+ iio_triggered_buffer_cleanup(data->mag_indio_dev);
+err_buffer_cleanup_acc:
+ if (client->irq >= 0)
+ iio_triggered_buffer_cleanup(data->acc_indio_dev);
+err_trigger_unregister_motion:
+ iio_trigger_unregister(data->motion_trig);
+err_trigger_unregister_mag_dready:
+ iio_trigger_unregister(data->mag_dready_trig);
+err_trigger_unregister_acc_dready:
+ iio_trigger_unregister(data->acc_dready_trig);
+err_chip_uninit:
+ kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG, true);
+ return ret;
+}
+
+static int kmx61_remove(struct i2c_client *client)
+{
+ struct kmx61_data *data = i2c_get_clientdata(client);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_device_unregister(data->acc_indio_dev);
+ iio_device_unregister(data->mag_indio_dev);
+
+ if (client->irq >= 0) {
+ iio_triggered_buffer_cleanup(data->acc_indio_dev);
+ iio_triggered_buffer_cleanup(data->mag_indio_dev);
+ iio_trigger_unregister(data->acc_dready_trig);
+ iio_trigger_unregister(data->mag_dready_trig);
+ iio_trigger_unregister(data->motion_trig);
+ }
+
+ mutex_lock(&data->lock);
+ kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG, true);
+ mutex_unlock(&data->lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int kmx61_suspend(struct device *dev)
+{
+ int ret;
+ struct kmx61_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ mutex_lock(&data->lock);
+ ret = kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG,
+ false);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int kmx61_resume(struct device *dev)
+{
+ u8 stby = 0;
+ struct kmx61_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ if (data->acc_stby)
+ stby |= KMX61_ACC_STBY_BIT;
+ if (data->mag_stby)
+ stby |= KMX61_MAG_STBY_BIT;
+
+ return kmx61_set_mode(data, stby, KMX61_ACC | KMX61_MAG, true);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int kmx61_runtime_suspend(struct device *dev)
+{
+ struct kmx61_data *data = i2c_get_clientdata(to_i2c_client(dev));
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG, true);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int kmx61_runtime_resume(struct device *dev)
+{
+ struct kmx61_data *data = i2c_get_clientdata(to_i2c_client(dev));
+ u8 stby = 0;
+
+ if (!data->acc_ps)
+ stby |= KMX61_ACC_STBY_BIT;
+ if (!data->mag_ps)
+ stby |= KMX61_MAG_STBY_BIT;
+
+ return kmx61_set_mode(data, stby, KMX61_ACC | KMX61_MAG, true);
+}
+#endif
+
+static const struct dev_pm_ops kmx61_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(kmx61_suspend, kmx61_resume)
+ SET_RUNTIME_PM_OPS(kmx61_runtime_suspend, kmx61_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id kmx61_acpi_match[] = {
+ {"KMX61021", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, kmx61_acpi_match);
+
+static const struct i2c_device_id kmx61_id[] = {
+ {"kmx611021", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, kmx61_id);
+
+static struct i2c_driver kmx61_driver = {
+ .driver = {
+ .name = KMX61_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(kmx61_acpi_match),
+ .pm = &kmx61_pm_ops,
+ },
+ .probe = kmx61_probe,
+ .remove = kmx61_remove,
+ .id_table = kmx61_id,
+};
+
+module_i2c_driver(kmx61_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("KMX61 accelerometer/magnetometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index f971f79103ec..71333140d42c 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -178,6 +178,80 @@ static ssize_t iio_scan_el_show(struct device *dev,
return sprintf(buf, "%d\n", ret);
}
+/* Note NULL used as error indicator as it doesn't make sense. */
+static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
+ unsigned int masklength,
+ const unsigned long *mask)
+{
+ if (bitmap_empty(mask, masklength))
+ return NULL;
+ while (*av_masks) {
+ if (bitmap_subset(mask, av_masks, masklength))
+ return av_masks;
+ av_masks += BITS_TO_LONGS(masklength);
+ }
+ return NULL;
+}
+
+static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
+ const unsigned long *mask)
+{
+ if (!indio_dev->setup_ops->validate_scan_mask)
+ return true;
+
+ return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
+}
+
+/**
+ * iio_scan_mask_set() - set particular bit in the scan mask
+ * @indio_dev: the iio device
+ * @buffer: the buffer whose scan mask we are interested in
+ * @bit: the bit to be set.
+ *
+ * Note that at this point we have no way of knowing what other
+ * buffers might request, hence this code only verifies that the
+ * individual buffers request is plausible.
+ */
+static int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
+{
+ const unsigned long *mask;
+ unsigned long *trialmask;
+
+ trialmask = kmalloc(sizeof(*trialmask)*
+ BITS_TO_LONGS(indio_dev->masklength),
+ GFP_KERNEL);
+
+ if (trialmask == NULL)
+ return -ENOMEM;
+ if (!indio_dev->masklength) {
+ WARN_ON("Trying to set scanmask prior to registering buffer\n");
+ goto err_invalid_mask;
+ }
+ bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
+ set_bit(bit, trialmask);
+
+ if (!iio_validate_scan_mask(indio_dev, trialmask))
+ goto err_invalid_mask;
+
+ if (indio_dev->available_scan_masks) {
+ mask = iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ trialmask);
+ if (!mask)
+ goto err_invalid_mask;
+ }
+ bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
+
+ kfree(trialmask);
+
+ return 0;
+
+err_invalid_mask:
+ kfree(trialmask);
+ return -EINVAL;
+}
+
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
{
clear_bit(bit, buffer->scan_mask);
@@ -309,115 +383,19 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
}
-static const char * const iio_scan_elements_group_name = "scan_elements";
-
-int iio_buffer_register(struct iio_dev *indio_dev,
- const struct iio_chan_spec *channels,
- int num_channels)
-{
- struct iio_dev_attr *p;
- struct attribute **attr;
- struct iio_buffer *buffer = indio_dev->buffer;
- int ret, i, attrn, attrcount, attrcount_orig = 0;
-
- if (buffer->attrs)
- indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
-
- if (buffer->scan_el_attrs != NULL) {
- attr = buffer->scan_el_attrs->attrs;
- while (*attr++ != NULL)
- attrcount_orig++;
- }
- attrcount = attrcount_orig;
- INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
- if (channels) {
- /* new magic */
- for (i = 0; i < num_channels; i++) {
- if (channels[i].scan_index < 0)
- continue;
-
- /* Establish necessary mask length */
- if (channels[i].scan_index >
- (int)indio_dev->masklength - 1)
- indio_dev->masklength
- = channels[i].scan_index + 1;
-
- ret = iio_buffer_add_channel_sysfs(indio_dev,
- &channels[i]);
- if (ret < 0)
- goto error_cleanup_dynamic;
- attrcount += ret;
- if (channels[i].type == IIO_TIMESTAMP)
- indio_dev->scan_index_timestamp =
- channels[i].scan_index;
- }
- if (indio_dev->masklength && buffer->scan_mask == NULL) {
- buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
- sizeof(*buffer->scan_mask),
- GFP_KERNEL);
- if (buffer->scan_mask == NULL) {
- ret = -ENOMEM;
- goto error_cleanup_dynamic;
- }
- }
- }
-
- buffer->scan_el_group.name = iio_scan_elements_group_name;
-
- buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
- sizeof(buffer->scan_el_group.attrs[0]),
- GFP_KERNEL);
- if (buffer->scan_el_group.attrs == NULL) {
- ret = -ENOMEM;
- goto error_free_scan_mask;
- }
- if (buffer->scan_el_attrs)
- memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
- sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
- attrn = attrcount_orig;
-
- list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
- buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
- indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
-
- return 0;
-
-error_free_scan_mask:
- kfree(buffer->scan_mask);
-error_cleanup_dynamic:
- iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
-
- return ret;
-}
-EXPORT_SYMBOL(iio_buffer_register);
-
-void iio_buffer_unregister(struct iio_dev *indio_dev)
-{
- kfree(indio_dev->buffer->scan_mask);
- kfree(indio_dev->buffer->scan_el_group.attrs);
- iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
-}
-EXPORT_SYMBOL(iio_buffer_unregister);
-
-ssize_t iio_buffer_read_length(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t iio_buffer_read_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
- if (buffer->access->get_length)
- return sprintf(buf, "%d\n",
- buffer->access->get_length(buffer));
-
- return 0;
+ return sprintf(buf, "%d\n", buffer->length);
}
-EXPORT_SYMBOL(iio_buffer_read_length);
-ssize_t iio_buffer_write_length(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+static ssize_t iio_buffer_write_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
@@ -428,47 +406,28 @@ ssize_t iio_buffer_write_length(struct device *dev,
if (ret)
return ret;
- if (buffer->access->get_length)
- if (val == buffer->access->get_length(buffer))
- return len;
+ if (val == buffer->length)
+ return len;
mutex_lock(&indio_dev->mlock);
if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
} else {
- if (buffer->access->set_length)
- buffer->access->set_length(buffer, val);
+ buffer->access->set_length(buffer, val);
ret = 0;
}
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
-EXPORT_SYMBOL(iio_buffer_write_length);
-ssize_t iio_buffer_show_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t iio_buffer_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
}
-EXPORT_SYMBOL(iio_buffer_show_enable);
-
-/* Note NULL used as error indicator as it doesn't make sense. */
-static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
- unsigned int masklength,
- const unsigned long *mask)
-{
- if (bitmap_empty(mask, masklength))
- return NULL;
- while (*av_masks) {
- if (bitmap_subset(mask, av_masks, masklength))
- return av_masks;
- av_masks += BITS_TO_LONGS(masklength);
- }
- return NULL;
-}
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
@@ -680,6 +639,8 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
+ } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
+ indio_dev->currentmode = INDIO_BUFFER_SOFTWARE;
} else { /* Should never be reached */
ret = -EINVAL;
goto error_run_postdisable;
@@ -755,10 +716,10 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iio_update_buffers);
-ssize_t iio_buffer_store_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+static ssize_t iio_buffer_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
int ret;
bool requested_state;
@@ -790,83 +751,146 @@ done:
mutex_unlock(&indio_dev->mlock);
return (ret < 0) ? ret : len;
}
-EXPORT_SYMBOL(iio_buffer_store_enable);
-/**
- * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
- * @indio_dev: the iio device
- * @mask: scan mask to be checked
- *
- * Return true if exactly one bit is set in the scan mask, false otherwise. It
- * can be used for devices where only one channel can be active for sampling at
- * a time.
- */
-bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
- const unsigned long *mask)
-{
- return bitmap_weight(mask, indio_dev->masklength) == 1;
-}
-EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
-
-static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
- const unsigned long *mask)
-{
- if (!indio_dev->setup_ops->validate_scan_mask)
- return true;
+static const char * const iio_scan_elements_group_name = "scan_elements";
- return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
-}
+static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
+ iio_buffer_write_length);
+static struct device_attribute dev_attr_length_ro = __ATTR(length,
+ S_IRUGO, iio_buffer_read_length, NULL);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+ iio_buffer_show_enable, iio_buffer_store_enable);
-/**
- * iio_scan_mask_set() - set particular bit in the scan mask
- * @indio_dev: the iio device
- * @buffer: the buffer whose scan mask we are interested in
- * @bit: the bit to be set.
- *
- * Note that at this point we have no way of knowing what other
- * buffers might request, hence this code only verifies that the
- * individual buffers request is plausible.
- */
-int iio_scan_mask_set(struct iio_dev *indio_dev,
- struct iio_buffer *buffer, int bit)
+int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{
- const unsigned long *mask;
- unsigned long *trialmask;
+ struct iio_dev_attr *p;
+ struct attribute **attr;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int ret, i, attrn, attrcount, attrcount_orig = 0;
+ const struct iio_chan_spec *channels;
- trialmask = kmalloc(sizeof(*trialmask)*
- BITS_TO_LONGS(indio_dev->masklength),
- GFP_KERNEL);
+ if (!buffer)
+ return 0;
- if (trialmask == NULL)
+ attrcount = 0;
+ if (buffer->attrs) {
+ while (buffer->attrs[attrcount] != NULL)
+ attrcount++;
+ }
+
+ buffer->buffer_group.name = "buffer";
+ buffer->buffer_group.attrs = kcalloc(attrcount + 3,
+ sizeof(*buffer->buffer_group.attrs), GFP_KERNEL);
+ if (!buffer->buffer_group.attrs)
return -ENOMEM;
- if (!indio_dev->masklength) {
- WARN_ON("Trying to set scanmask prior to registering buffer\n");
- goto err_invalid_mask;
+
+ if (buffer->access->set_length)
+ buffer->buffer_group.attrs[0] = &dev_attr_length.attr;
+ else
+ buffer->buffer_group.attrs[0] = &dev_attr_length_ro.attr;
+ buffer->buffer_group.attrs[1] = &dev_attr_enable.attr;
+ if (buffer->attrs)
+ memcpy(&buffer->buffer_group.attrs[2], buffer->attrs,
+ sizeof(*&buffer->buffer_group.attrs) * attrcount);
+ buffer->buffer_group.attrs[attrcount+2] = NULL;
+
+ indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
+
+ if (buffer->scan_el_attrs != NULL) {
+ attr = buffer->scan_el_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
}
- bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
- set_bit(bit, trialmask);
+ attrcount = attrcount_orig;
+ INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
+ channels = indio_dev->channels;
+ if (channels) {
+ /* new magic */
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ if (channels[i].scan_index < 0)
+ continue;
- if (!iio_validate_scan_mask(indio_dev, trialmask))
- goto err_invalid_mask;
+ /* Establish necessary mask length */
+ if (channels[i].scan_index >
+ (int)indio_dev->masklength - 1)
+ indio_dev->masklength
+ = channels[i].scan_index + 1;
- if (indio_dev->available_scan_masks) {
- mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- trialmask);
- if (!mask)
- goto err_invalid_mask;
+ ret = iio_buffer_add_channel_sysfs(indio_dev,
+ &channels[i]);
+ if (ret < 0)
+ goto error_cleanup_dynamic;
+ attrcount += ret;
+ if (channels[i].type == IIO_TIMESTAMP)
+ indio_dev->scan_index_timestamp =
+ channels[i].scan_index;
+ }
+ if (indio_dev->masklength && buffer->scan_mask == NULL) {
+ buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*buffer->scan_mask),
+ GFP_KERNEL);
+ if (buffer->scan_mask == NULL) {
+ ret = -ENOMEM;
+ goto error_cleanup_dynamic;
+ }
+ }
}
- bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
- kfree(trialmask);
+ buffer->scan_el_group.name = iio_scan_elements_group_name;
+
+ buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
+ sizeof(buffer->scan_el_group.attrs[0]),
+ GFP_KERNEL);
+ if (buffer->scan_el_group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_scan_mask;
+ }
+ if (buffer->scan_el_attrs)
+ memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
+ sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
+ attrn = attrcount_orig;
+
+ list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
+ buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
return 0;
-err_invalid_mask:
- kfree(trialmask);
- return -EINVAL;
+error_free_scan_mask:
+ kfree(buffer->scan_mask);
+error_cleanup_dynamic:
+ iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
+ kfree(indio_dev->buffer->buffer_group.attrs);
+
+ return ret;
+}
+
+void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
+{
+ if (!indio_dev->buffer)
+ return;
+
+ kfree(indio_dev->buffer->scan_mask);
+ kfree(indio_dev->buffer->buffer_group.attrs);
+ kfree(indio_dev->buffer->scan_el_group.attrs);
+ iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
+}
+
+/**
+ * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
+ * @indio_dev: the iio device
+ * @mask: scan mask to be checked
+ *
+ * Return true if exactly one bit is set in the scan mask, false otherwise. It
+ * can be used for devices where only one channel can be active for sampling at
+ * a time.
+ */
+bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
+ const unsigned long *mask)
+{
+ return bitmap_weight(mask, indio_dev->masklength) == 1;
}
-EXPORT_SYMBOL_GPL(iio_scan_mask_set);
+EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
int iio_scan_mask_query(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index af3e76d652ba..aaba9d3d980e 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -70,6 +70,11 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_CCT] = "cct",
[IIO_PRESSURE] = "pressure",
[IIO_HUMIDITYRELATIVE] = "humidityrelative",
+ [IIO_ACTIVITY] = "activity",
+ [IIO_STEPS] = "steps",
+ [IIO_ENERGY] = "energy",
+ [IIO_DISTANCE] = "distance",
+ [IIO_VELOCITY] = "velocity",
};
static const char * const iio_modifier_names[] = {
@@ -91,6 +96,11 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_NORTH_TRUE] = "from_north_true",
[IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
[IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
+ [IIO_MOD_RUNNING] = "running",
+ [IIO_MOD_JOGGING] = "jogging",
+ [IIO_MOD_WALKING] = "walking",
+ [IIO_MOD_STILL] = "still",
+ [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
};
/* relies on pairs of these shared then separate */
@@ -113,6 +123,11 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
[IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
[IIO_CHAN_INFO_INT_TIME] = "integration_time",
+ [IIO_CHAN_INFO_ENABLE] = "en",
+ [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight",
+ [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight",
+ [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count",
+ [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
};
/**
@@ -1035,7 +1050,6 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
if (!ptr)
return NULL;
- /* use raw alloc_dr for kmalloc caller tracing */
iio_dev = iio_device_alloc(sizeof_priv);
if (iio_dev) {
*ptr = iio_dev;
@@ -1127,6 +1141,29 @@ static const struct file_operations iio_buffer_fileops = {
.compat_ioctl = iio_ioctl,
};
+static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
+{
+ int i, j;
+ const struct iio_chan_spec *channels = indio_dev->channels;
+
+ if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES))
+ return 0;
+
+ for (i = 0; i < indio_dev->num_channels - 1; i++) {
+ if (channels[i].scan_index < 0)
+ continue;
+ for (j = i + 1; j < indio_dev->num_channels; j++)
+ if (channels[i].scan_index == channels[j].scan_index) {
+ dev_err(&indio_dev->dev,
+ "Duplicate scan index %d\n",
+ channels[i].scan_index);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static const struct iio_buffer_setup_ops noop_ring_setup_ops;
/**
@@ -1141,6 +1178,10 @@ int iio_device_register(struct iio_dev *indio_dev)
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
+ ret = iio_check_unique_scan_index(indio_dev);
+ if (ret < 0)
+ return ret;
+
/* configure elements for the chrdev */
indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
@@ -1150,11 +1191,19 @@ int iio_device_register(struct iio_dev *indio_dev)
"Failed to register debugfs interfaces\n");
return ret;
}
+
+ ret = iio_buffer_alloc_sysfs_and_mask(indio_dev);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to create buffer sysfs interfaces\n");
+ goto error_unreg_debugfs;
+ }
+
ret = iio_device_register_sysfs(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register sysfs interfaces\n");
- goto error_unreg_debugfs;
+ goto error_buffer_free_sysfs;
}
ret = iio_device_register_eventset(indio_dev);
if (ret) {
@@ -1187,6 +1236,8 @@ error_unreg_eventset:
iio_device_unregister_eventset(indio_dev);
error_free_sysfs:
iio_device_unregister_sysfs(indio_dev);
+error_buffer_free_sysfs:
+ iio_buffer_free_sysfs_and_mask(indio_dev);
error_unreg_debugfs:
iio_device_unregister_debugfs(indio_dev);
return ret;
@@ -1215,6 +1266,8 @@ void iio_device_unregister(struct iio_dev *indio_dev)
iio_buffer_wakeup_poll(indio_dev);
mutex_unlock(&indio_dev->info_exist_lock);
+
+ iio_buffer_free_sysfs_and_mask(indio_dev);
}
EXPORT_SYMBOL(iio_device_unregister);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 0c1e37e3120a..a4b397048f71 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -197,6 +197,7 @@ static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_ROC] = "roc",
[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
+ [IIO_EV_TYPE_CHANGE] = "change",
};
static const char * const iio_ev_dir_text[] = {
@@ -327,9 +328,15 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
for_each_set_bit(i, mask, sizeof(*mask)*8) {
if (i >= ARRAY_SIZE(iio_ev_info_text))
return -EINVAL;
- postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
- iio_ev_type_text[type], iio_ev_dir_text[dir],
- iio_ev_info_text[i]);
+ if (dir != IIO_EV_DIR_NONE)
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_ev_type_text[type],
+ iio_ev_dir_text[dir],
+ iio_ev_info_text[i]);
+ else
+ postfix = kasprintf(GFP_KERNEL, "%s_%s",
+ iio_ev_type_text[type],
+ iio_ev_info_text[i]);
if (postfix == NULL)
return -ENOMEM;
@@ -404,7 +411,7 @@ static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
{
int j, ret, attrcount = 0;
- /* Dynically created from the channels array */
+ /* Dynamically created from the channels array */
for (j = 0; j < indio_dev->num_channels; j++) {
ret = iio_device_add_event_sysfs(indio_dev,
&indio_dev->channels[j]);
diff --git a/drivers/iio/industrialio-triggered-buffer.c b/drivers/iio/industrialio-triggered-buffer.c
index d6f54930b34a..15a5341b5e7b 100644
--- a/drivers/iio/industrialio-triggered-buffer.c
+++ b/drivers/iio/industrialio-triggered-buffer.c
@@ -32,7 +32,7 @@ static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
*
* This function combines some common tasks which will normally be performed
* when setting up a triggered buffer. It will allocate the buffer and the
- * pollfunc, as well as register the buffer with the IIO core.
+ * pollfunc.
*
* Before calling this function the indio_dev structure should already be
* completely initialized, but not yet registered. In practice this means that
@@ -49,7 +49,7 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
struct iio_buffer *buffer;
int ret;
- buffer = iio_kfifo_allocate(indio_dev);
+ buffer = iio_kfifo_allocate();
if (!buffer) {
ret = -ENOMEM;
goto error_ret;
@@ -78,16 +78,8 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
- ret = iio_buffer_register(indio_dev,
- indio_dev->channels,
- indio_dev->num_channels);
- if (ret)
- goto error_dealloc_pollfunc;
-
return 0;
-error_dealloc_pollfunc:
- iio_dealloc_pollfunc(indio_dev->pollfunc);
error_kfifo_free:
iio_kfifo_free(indio_dev->buffer);
error_ret:
@@ -101,7 +93,6 @@ EXPORT_SYMBOL(iio_triggered_buffer_setup);
*/
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev)
{
- iio_buffer_unregister(indio_dev);
iio_dealloc_pollfunc(indio_dev->pollfunc);
iio_kfifo_free(indio_dev->buffer);
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 866fe904cba2..c8bad3cf891d 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -116,8 +116,11 @@ static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
if (!iiospec->args_count)
return 0;
- if (iiospec->args[0] >= indio_dev->num_channels)
+ if (iiospec->args[0] >= indio_dev->num_channels) {
+ dev_err(&indio_dev->dev, "invalid channel index %u\n",
+ iiospec->args[0]);
return -EINVAL;
+ }
return iiospec->args[0];
}
@@ -449,6 +452,9 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
if (val2 == NULL)
val2 = &unused;
+ if(!iio_channel_has_info(chan->channel, info))
+ return -EINVAL;
+
if (chan->indio_dev->info->read_raw_multi) {
ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
chan->channel, INDIO_MAX_RAW_ELEMENTS,
@@ -631,3 +637,28 @@ err_unlock:
return ret;
}
EXPORT_SYMBOL_GPL(iio_get_channel_type);
+
+static int iio_channel_write(struct iio_channel *chan, int val, int val2,
+ enum iio_chan_info_enum info)
+{
+ return chan->indio_dev->info->write_raw(chan->indio_dev,
+ chan->channel, val, val2, info);
+}
+
+int iio_write_channel_raw(struct iio_channel *chan, int val)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_write_channel_raw);
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index 7134e8ada09a..b2beea01c49b 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -47,30 +47,6 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
return ret;
}
-static int iio_get_length_kfifo(struct iio_buffer *r)
-{
- return r->length;
-}
-
-static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_LENGTH_ATTR;
-
-static struct attribute *iio_kfifo_attributes[] = {
- &dev_attr_length.attr,
- &dev_attr_enable.attr,
- NULL,
-};
-
-static struct attribute_group iio_kfifo_attribute_group = {
- .attrs = iio_kfifo_attributes,
- .name = "buffer",
-};
-
-static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
-{
- return r->bytes_per_datum;
-}
-
static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
{
struct iio_kfifo *kf = iio_to_kfifo(r);
@@ -159,26 +135,25 @@ static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.read_first_n = &iio_read_first_n_kfifo,
.data_available = iio_kfifo_buf_data_available,
.request_update = &iio_request_update_kfifo,
- .get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
- .get_length = &iio_get_length_kfifo,
.set_length = &iio_set_length_kfifo,
.release = &iio_kfifo_buffer_release,
};
-struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
+struct iio_buffer *iio_kfifo_allocate(void)
{
struct iio_kfifo *kf;
- kf = kzalloc(sizeof *kf, GFP_KERNEL);
+ kf = kzalloc(sizeof(*kf), GFP_KERNEL);
if (!kf)
return NULL;
+
kf->update_needed = true;
iio_buffer_init(&kf->buffer);
- kf->buffer.attrs = &iio_kfifo_attribute_group;
kf->buffer.access = &kfifo_access_funcs;
kf->buffer.length = 2;
mutex_init(&kf->user_lock);
+
return &kf->buffer;
}
EXPORT_SYMBOL(iio_kfifo_allocate);
@@ -189,4 +164,58 @@ void iio_kfifo_free(struct iio_buffer *r)
}
EXPORT_SYMBOL(iio_kfifo_free);
+static void devm_iio_kfifo_release(struct device *dev, void *res)
+{
+ iio_kfifo_free(*(struct iio_buffer **)res);
+}
+
+static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
+{
+ struct iio_buffer **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+/**
+ * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
+ * @dev: Device to allocate kfifo buffer for
+ *
+ * RETURNS:
+ * Pointer to allocated iio_buffer on success, NULL on failure.
+ */
+struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
+{
+ struct iio_buffer **ptr, *r;
+
+ ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ r = iio_kfifo_allocate();
+ if (r) {
+ *ptr = r;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(devm_iio_kfifo_allocate);
+
+/**
+ * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
+ * @dev: Device the buffer belongs to
+ * @r: The buffer associated with the device
+ */
+void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
+{
+ WARN_ON(devres_release(dev, devm_iio_kfifo_release,
+ devm_iio_kfifo_match, r));
+}
+EXPORT_SYMBOL(devm_iio_kfifo_free);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 5bea821adcae..ae68c64bdad3 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -48,6 +48,17 @@ config CM32181
To compile this driver as a module, choose M here:
the module will be called cm32181.
+config CM3232
+ depends on I2C
+ tristate "CM3232 ambient light sensor"
+ help
+ Say Y here if you use cm3232.
+ This option enables ambient light sensor using
+ Capella Microsystems cm3232 device driver.
+
+ To compile this driver as a module, choose M here:
+ the module will be called cm3232.
+
config CM36651
depends on I2C
tristate "CM36651 driver"
@@ -95,6 +106,9 @@ config HID_SENSOR_ALS
Say yes here to build support for the HID SENSOR
Ambient light sensor.
+ To compile this driver as a module, choose M here: the
+ module will be called hid-sensor-als.
+
config HID_SENSOR_PROX
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -109,6 +123,16 @@ config HID_SENSOR_PROX
To compile this driver as a module, choose M here: the
module will be called hid-sensor-prox.
+config JSA1212
+ tristate "JSA1212 ALS and proximity sensor driver"
+ depends on I2C
+ help
+ Say Y here if you want to build a IIO driver for JSA1212
+ proximity & ALS sensor device.
+
+ To compile this driver as a module, choose M here:
+ the module will be called jsa1212.
+
config SENSORS_LM3533
tristate "LM3533 ambient light sensor"
depends on MFD_LM3533
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 47877a36cc12..b12a5160d9e0 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -7,11 +7,13 @@ obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_AL3320A) += al3320a.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_CM32181) += cm32181.o
+obj-$(CONFIG_CM3232) += cm3232.o
obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
obj-$(CONFIG_ISL29125) += isl29125.o
+obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_LTR501) += ltr501.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index ad36b294e4d5..5d12ae54d088 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -169,7 +169,7 @@ static int cm32181_write_als_it(struct cm32181_chip *cm32181, int val)
* @cm32181: pointer of struct cm32181.
*
* Convert sensor raw data to lux. It depends on integration
- * time and claibscale variable.
+ * time and calibscale variable.
*
* Return: Positive value is lux, otherwise is error code.
*/
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
new file mode 100644
index 000000000000..90e3519a91de
--- /dev/null
+++ b/drivers/iio/light/cm3232.c
@@ -0,0 +1,403 @@
+/*
+ * CM3232 Ambient Light Sensor
+ *
+ * Copyright (C) 2014-2015 Capella Microsystems Inc.
+ * Author: Kevin Tsai <ktsai@capellamicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2, as published
+ * by the Free Software Foundation.
+ *
+ * IIO driver for CM3232 (7-bit I2C slave address 0x10).
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/init.h>
+
+/* Registers Address */
+#define CM3232_REG_ADDR_CMD 0x00
+#define CM3232_REG_ADDR_ALS 0x50
+#define CM3232_REG_ADDR_ID 0x53
+
+#define CM3232_CMD_ALS_DISABLE BIT(0)
+
+#define CM3232_CMD_ALS_IT_SHIFT 2
+#define CM3232_CMD_ALS_IT_MASK (BIT(2) | BIT(3) | BIT(4))
+#define CM3232_CMD_ALS_IT_DEFAULT (0x01 << CM3232_CMD_ALS_IT_SHIFT)
+
+#define CM3232_CMD_ALS_RESET BIT(6)
+
+#define CM3232_CMD_DEFAULT CM3232_CMD_ALS_IT_DEFAULT
+
+#define CM3232_HW_ID 0x32
+#define CM3232_CALIBSCALE_DEFAULT 100000
+#define CM3232_CALIBSCALE_RESOLUTION 100000
+#define CM3232_MLUX_PER_LUX 1000
+
+#define CM3232_MLUX_PER_BIT_DEFAULT 64
+#define CM3232_MLUX_PER_BIT_BASE_IT 100000
+
+static const struct {
+ int val;
+ int val2;
+ u8 it;
+} cm3232_als_it_scales[] = {
+ {0, 100000, 0}, /* 0.100000 */
+ {0, 200000, 1}, /* 0.200000 */
+ {0, 400000, 2}, /* 0.400000 */
+ {0, 800000, 3}, /* 0.800000 */
+ {1, 600000, 4}, /* 1.600000 */
+ {3, 200000, 5}, /* 3.200000 */
+};
+
+struct cm3232_als_info {
+ u8 regs_cmd_default;
+ u8 hw_id;
+ int calibscale;
+ int mlux_per_bit;
+ int mlux_per_bit_base_it;
+};
+
+static struct cm3232_als_info cm3232_als_info_default = {
+ .regs_cmd_default = CM3232_CMD_DEFAULT,
+ .hw_id = CM3232_HW_ID,
+ .calibscale = CM3232_CALIBSCALE_DEFAULT,
+ .mlux_per_bit = CM3232_MLUX_PER_BIT_DEFAULT,
+ .mlux_per_bit_base_it = CM3232_MLUX_PER_BIT_BASE_IT,
+};
+
+struct cm3232_chip {
+ struct i2c_client *client;
+ struct cm3232_als_info *als_info;
+ u8 regs_cmd;
+ u16 regs_als;
+};
+
+/**
+ * cm3232_reg_init() - Initialize CM3232
+ * @chip: pointer of struct cm3232_chip.
+ *
+ * Check and initialize CM3232 ambient light sensor.
+ *
+ * Return: 0 for success; otherwise for error code.
+ */
+static int cm3232_reg_init(struct cm3232_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ chip->als_info = &cm3232_als_info_default;
+
+ /* Identify device */
+ ret = i2c_smbus_read_word_data(client, CM3232_REG_ADDR_ID);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "Error reading addr_id\n");
+ return ret;
+ }
+
+ if ((ret & 0xFF) != chip->als_info->hw_id)
+ return -ENODEV;
+
+ /* Disable and reset device */
+ chip->regs_cmd = CM3232_CMD_ALS_DISABLE | CM3232_CMD_ALS_RESET;
+ ret = i2c_smbus_write_byte_data(client, CM3232_REG_ADDR_CMD,
+ chip->regs_cmd);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "Error writing reg_cmd\n");
+ return ret;
+ }
+
+ /* Register default value */
+ chip->regs_cmd = chip->als_info->regs_cmd_default;
+
+ /* Configure register */
+ ret = i2c_smbus_write_byte_data(client, CM3232_REG_ADDR_CMD,
+ chip->regs_cmd);
+ if (ret < 0)
+ dev_err(&chip->client->dev, "Error writing reg_cmd\n");
+
+ return 0;
+}
+
+/**
+ * cm3232_read_als_it() - Get sensor integration time
+ * @chip: pointer of struct cm3232_chip
+ * @val: pointer of int to load the integration (sec).
+ * @val2: pointer of int to load the integration time (microsecond).
+ *
+ * Report the current integration time.
+ *
+ * Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
+ */
+static int cm3232_read_als_it(struct cm3232_chip *chip, int *val, int *val2)
+{
+ u16 als_it;
+ int i;
+
+ als_it = chip->regs_cmd;
+ als_it &= CM3232_CMD_ALS_IT_MASK;
+ als_it >>= CM3232_CMD_ALS_IT_SHIFT;
+ for (i = 0; i < ARRAY_SIZE(cm3232_als_it_scales); i++) {
+ if (als_it == cm3232_als_it_scales[i].it) {
+ *val = cm3232_als_it_scales[i].val;
+ *val2 = cm3232_als_it_scales[i].val2;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * cm3232_write_als_it() - Write sensor integration time
+ * @chip: pointer of struct cm3232_chip.
+ * @val: integration time in second.
+ * @val2: integration time in microsecond.
+ *
+ * Convert integration time to sensor value.
+ *
+ * Return: i2c_smbus_write_byte_data command return value.
+ */
+static int cm3232_write_als_it(struct cm3232_chip *chip, int val, int val2)
+{
+ struct i2c_client *client = chip->client;
+ u16 als_it, cmd;
+ int i;
+ s32 ret;
+
+ for (i = 0; i < ARRAY_SIZE(cm3232_als_it_scales); i++) {
+ if (val == cm3232_als_it_scales[i].val &&
+ val2 == cm3232_als_it_scales[i].val2) {
+
+ als_it = cm3232_als_it_scales[i].it;
+ als_it <<= CM3232_CMD_ALS_IT_SHIFT;
+
+ cmd = chip->regs_cmd & ~CM3232_CMD_ALS_IT_MASK;
+ cmd |= als_it;
+ ret = i2c_smbus_write_byte_data(client,
+ CM3232_REG_ADDR_CMD,
+ cmd);
+ if (ret < 0)
+ return ret;
+ chip->regs_cmd = cmd;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/**
+ * cm3232_get_lux() - report current lux value
+ * @chip: pointer of struct cm3232_chip.
+ *
+ * Convert sensor data to lux. It depends on integration
+ * time and calibscale variable.
+ *
+ * Return: Zero or positive value is lux, otherwise error code.
+ */
+static int cm3232_get_lux(struct cm3232_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ struct cm3232_als_info *als_info = chip->als_info;
+ int ret;
+ int val, val2;
+ int als_it;
+ u64 lux;
+
+ /* Calculate mlux per bit based on als_it */
+ ret = cm3232_read_als_it(chip, &val, &val2);
+ if (ret < 0)
+ return -EINVAL;
+ als_it = val * 1000000 + val2;
+ lux = (__force u64)als_info->mlux_per_bit;
+ lux *= als_info->mlux_per_bit_base_it;
+ lux = div_u64(lux, als_it);
+
+ ret = i2c_smbus_read_word_data(client, CM3232_REG_ADDR_ALS);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading reg_addr_als\n");
+ return ret;
+ }
+
+ chip->regs_als = (u16)ret;
+ lux *= chip->regs_als;
+ lux *= als_info->calibscale;
+ lux = div_u64(lux, CM3232_CALIBSCALE_RESOLUTION);
+ lux = div_u64(lux, CM3232_MLUX_PER_LUX);
+
+ if (lux > 0xFFFF)
+ lux = 0xFFFF;
+
+ return (int)lux;
+}
+
+static int cm3232_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cm3232_chip *chip = iio_priv(indio_dev);
+ struct cm3232_als_info *als_info = chip->als_info;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = cm3232_get_lux(chip);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *val = als_info->calibscale;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ return cm3232_read_als_it(chip, val, val2);
+ }
+
+ return -EINVAL;
+}
+
+static int cm3232_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cm3232_chip *chip = iio_priv(indio_dev);
+ struct cm3232_als_info *als_info = chip->als_info;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ als_info->calibscale = val;
+ return 0;
+ case IIO_CHAN_INFO_INT_TIME:
+ return cm3232_write_als_it(chip, val, val2);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * cm3232_get_it_available() - Get available ALS IT value
+ * @dev: pointer of struct device.
+ * @attr: pointer of struct device_attribute.
+ * @buf: pointer of return string buffer.
+ *
+ * Display the available integration time in second.
+ *
+ * Return: string length.
+ */
+static ssize_t cm3232_get_it_available(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len;
+
+ for (i = 0, len = 0; i < ARRAY_SIZE(cm3232_als_it_scales); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ",
+ cm3232_als_it_scales[i].val,
+ cm3232_als_it_scales[i].val2);
+ return len + scnprintf(buf + len, PAGE_SIZE - len, "\n");
+}
+
+static const struct iio_chan_spec cm3232_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ }
+};
+
+static IIO_DEVICE_ATTR(in_illuminance_integration_time_available,
+ S_IRUGO, cm3232_get_it_available, NULL, 0);
+
+static struct attribute *cm3232_attributes[] = {
+ &iio_dev_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cm3232_attribute_group = {
+ .attrs = cm3232_attributes
+};
+
+static const struct iio_info cm3232_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &cm3232_read_raw,
+ .write_raw = &cm3232_write_raw,
+ .attrs = &cm3232_attribute_group,
+};
+
+static int cm3232_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cm3232_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ chip->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = cm3232_channels;
+ indio_dev->num_channels = ARRAY_SIZE(cm3232_channels);
+ indio_dev->info = &cm3232_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = cm3232_reg_init(chip);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: register init failed\n",
+ __func__);
+ return ret;
+ }
+
+ return iio_device_register(indio_dev);
+}
+
+static int cm3232_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ i2c_smbus_write_byte_data(client, CM3232_REG_ADDR_CMD,
+ CM3232_CMD_ALS_DISABLE);
+
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id cm3232_id[] = {
+ {"cm3232", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cm3232_id);
+
+static const struct of_device_id cm3232_of_match[] = {
+ {.compatible = "capella,cm3232"},
+ {}
+};
+
+static struct i2c_driver cm3232_driver = {
+ .driver = {
+ .name = "cm3232",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cm3232_of_match),
+ },
+ .id_table = cm3232_id,
+ .probe = cm3232_probe,
+ .remove = cm3232_remove,
+};
+
+module_i2c_driver(cm3232_driver);
+
+MODULE_AUTHOR("Kevin Tsai <ktsai@capellamicro.com>");
+MODULE_DESCRIPTION("CM3232 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index a5283d75c096..948acfc38b8c 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -80,7 +80,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
- s32 poll_value;
*val = 0;
*val2 = 0;
@@ -97,15 +96,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
break;
}
if (report_id >= 0) {
- poll_value = hid_sensor_read_poll_value(
- &als_state->common_attributes);
- if (poll_value < 0)
- return -EINVAL;
-
hid_sensor_power_state(&als_state->common_attributes,
true);
- msleep_interruptible(poll_value * 2);
-
*val = sensor_hub_input_attr_get_raw_value(
als_state->common_attributes.hsdev,
HID_USAGE_SENSOR_ALS, address,
@@ -381,6 +373,7 @@ static struct platform_driver hid_als_platform_driver = {
.id_table = hid_als_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_als_probe,
.remove = hid_als_remove,
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index f5a514698fd8..3ecf79ed08ac 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -75,7 +75,6 @@ static int prox_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
- s32 poll_value;
*val = 0;
*val2 = 0;
@@ -92,16 +91,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
break;
}
if (report_id >= 0) {
- poll_value = hid_sensor_read_poll_value(
- &prox_state->common_attributes);
- if (poll_value < 0)
- return -EINVAL;
-
hid_sensor_power_state(&prox_state->common_attributes,
true);
-
- msleep_interruptible(poll_value * 2);
-
*val = sensor_hub_input_attr_get_raw_value(
prox_state->common_attributes.hsdev,
HID_USAGE_SENSOR_PROX, address,
@@ -373,6 +364,7 @@ static struct platform_driver hid_prox_platform_driver = {
.id_table = hid_prox_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_prox_probe,
.remove = hid_prox_remove,
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
new file mode 100644
index 000000000000..29de7e7d9562
--- /dev/null
+++ b/drivers/iio/light/jsa1212.c
@@ -0,0 +1,471 @@
+/*
+ * JSA1212 Ambient Light & Proximity Sensor Driver
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * JSA1212 I2C slave address: 0x44(ADDR tied to GND), 0x45(ADDR tied to VDD)
+ *
+ * TODO: Interrupt support, thresholds, range support.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* JSA1212 reg address */
+#define JSA1212_CONF_REG 0x01
+#define JSA1212_INT_REG 0x02
+#define JSA1212_PXS_LT_REG 0x03
+#define JSA1212_PXS_HT_REG 0x04
+#define JSA1212_ALS_TH1_REG 0x05
+#define JSA1212_ALS_TH2_REG 0x06
+#define JSA1212_ALS_TH3_REG 0x07
+#define JSA1212_PXS_DATA_REG 0x08
+#define JSA1212_ALS_DT1_REG 0x09
+#define JSA1212_ALS_DT2_REG 0x0A
+#define JSA1212_ALS_RNG_REG 0x0B
+#define JSA1212_MAX_REG 0x0C
+
+/* JSA1212 reg masks */
+#define JSA1212_CONF_MASK 0xFF
+#define JSA1212_INT_MASK 0xFF
+#define JSA1212_PXS_LT_MASK 0xFF
+#define JSA1212_PXS_HT_MASK 0xFF
+#define JSA1212_ALS_TH1_MASK 0xFF
+#define JSA1212_ALS_TH2_LT_MASK 0x0F
+#define JSA1212_ALS_TH2_HT_MASK 0xF0
+#define JSA1212_ALS_TH3_MASK 0xFF
+#define JSA1212_PXS_DATA_MASK 0xFF
+#define JSA1212_ALS_DATA_MASK 0x0FFF
+#define JSA1212_ALS_DT1_MASK 0xFF
+#define JSA1212_ALS_DT2_MASK 0x0F
+#define JSA1212_ALS_RNG_MASK 0x07
+
+/* JSA1212 CONF REG bits */
+#define JSA1212_CONF_PXS_MASK 0x80
+#define JSA1212_CONF_PXS_ENABLE 0x80
+#define JSA1212_CONF_PXS_DISABLE 0x00
+#define JSA1212_CONF_ALS_MASK 0x04
+#define JSA1212_CONF_ALS_ENABLE 0x04
+#define JSA1212_CONF_ALS_DISABLE 0x00
+#define JSA1212_CONF_IRDR_MASK 0x08
+/* Proxmity sensing IRDR current sink settings */
+#define JSA1212_CONF_IRDR_200MA 0x08
+#define JSA1212_CONF_IRDR_100MA 0x00
+#define JSA1212_CONF_PXS_SLP_MASK 0x70
+#define JSA1212_CONF_PXS_SLP_0MS 0x70
+#define JSA1212_CONF_PXS_SLP_12MS 0x60
+#define JSA1212_CONF_PXS_SLP_50MS 0x50
+#define JSA1212_CONF_PXS_SLP_75MS 0x40
+#define JSA1212_CONF_PXS_SLP_100MS 0x30
+#define JSA1212_CONF_PXS_SLP_200MS 0x20
+#define JSA1212_CONF_PXS_SLP_400MS 0x10
+#define JSA1212_CONF_PXS_SLP_800MS 0x00
+
+/* JSA1212 INT REG bits */
+#define JSA1212_INT_CTRL_MASK 0x01
+#define JSA1212_INT_CTRL_EITHER 0x00
+#define JSA1212_INT_CTRL_BOTH 0x01
+#define JSA1212_INT_ALS_PRST_MASK 0x06
+#define JSA1212_INT_ALS_PRST_1CONV 0x00
+#define JSA1212_INT_ALS_PRST_4CONV 0x02
+#define JSA1212_INT_ALS_PRST_8CONV 0x04
+#define JSA1212_INT_ALS_PRST_16CONV 0x06
+#define JSA1212_INT_ALS_FLAG_MASK 0x08
+#define JSA1212_INT_ALS_FLAG_CLR 0x00
+#define JSA1212_INT_PXS_PRST_MASK 0x60
+#define JSA1212_INT_PXS_PRST_1CONV 0x00
+#define JSA1212_INT_PXS_PRST_4CONV 0x20
+#define JSA1212_INT_PXS_PRST_8CONV 0x40
+#define JSA1212_INT_PXS_PRST_16CONV 0x60
+#define JSA1212_INT_PXS_FLAG_MASK 0x80
+#define JSA1212_INT_PXS_FLAG_CLR 0x00
+
+/* JSA1212 ALS RNG REG bits */
+#define JSA1212_ALS_RNG_0_2048 0x00
+#define JSA1212_ALS_RNG_0_1024 0x01
+#define JSA1212_ALS_RNG_0_512 0x02
+#define JSA1212_ALS_RNG_0_256 0x03
+#define JSA1212_ALS_RNG_0_128 0x04
+
+/* JSA1212 INT threshold range */
+#define JSA1212_ALS_TH_MIN 0x0000
+#define JSA1212_ALS_TH_MAX 0x0FFF
+#define JSA1212_PXS_TH_MIN 0x00
+#define JSA1212_PXS_TH_MAX 0xFF
+
+#define JSA1212_ALS_DELAY_MS 200
+#define JSA1212_PXS_DELAY_MS 100
+
+#define JSA1212_DRIVER_NAME "jsa1212"
+#define JSA1212_REGMAP_NAME "jsa1212_regmap"
+
+enum jsa1212_op_mode {
+ JSA1212_OPMODE_ALS_EN,
+ JSA1212_OPMODE_PXS_EN,
+};
+
+struct jsa1212_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 als_rng_idx;
+ bool als_en; /* ALS enable status */
+ bool pxs_en; /* proximity enable status */
+ struct regmap *regmap;
+};
+
+/* ALS range idx to val mapping */
+static const int jsa1212_als_range_val[] = {2048, 1024, 512, 256, 128,
+ 128, 128, 128};
+
+/* Enables or disables ALS function based on status */
+static int jsa1212_als_enable(struct jsa1212_data *data, u8 status)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, JSA1212_CONF_REG,
+ JSA1212_CONF_ALS_MASK,
+ status);
+ if (ret < 0)
+ return ret;
+
+ data->als_en = !!status;
+
+ return 0;
+}
+
+/* Enables or disables PXS function based on status */
+static int jsa1212_pxs_enable(struct jsa1212_data *data, u8 status)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, JSA1212_CONF_REG,
+ JSA1212_CONF_PXS_MASK,
+ status);
+ if (ret < 0)
+ return ret;
+
+ data->pxs_en = !!status;
+
+ return 0;
+}
+
+static int jsa1212_read_als_data(struct jsa1212_data *data,
+ unsigned int *val)
+{
+ int ret;
+ __le16 als_data;
+
+ ret = jsa1212_als_enable(data, JSA1212_CONF_ALS_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ /* Delay for data output */
+ msleep(JSA1212_ALS_DELAY_MS);
+
+ /* Read 12 bit data */
+ ret = regmap_bulk_read(data->regmap, JSA1212_ALS_DT1_REG, &als_data, 2);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "als data read err\n");
+ goto als_data_read_err;
+ }
+
+ *val = le16_to_cpu(als_data);
+
+als_data_read_err:
+ return jsa1212_als_enable(data, JSA1212_CONF_ALS_DISABLE);
+}
+
+static int jsa1212_read_pxs_data(struct jsa1212_data *data,
+ unsigned int *val)
+{
+ int ret;
+ unsigned int pxs_data;
+
+ ret = jsa1212_pxs_enable(data, JSA1212_CONF_PXS_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ /* Delay for data output */
+ msleep(JSA1212_PXS_DELAY_MS);
+
+ /* Read out all data */
+ ret = regmap_read(data->regmap, JSA1212_PXS_DATA_REG, &pxs_data);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "pxs data read err\n");
+ goto pxs_data_read_err;
+ }
+
+ *val = pxs_data & JSA1212_PXS_DATA_MASK;
+
+pxs_data_read_err:
+ return jsa1212_pxs_enable(data, JSA1212_CONF_PXS_DISABLE);
+}
+
+static int jsa1212_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct jsa1212_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = jsa1212_read_als_data(data, val);
+ break;
+ case IIO_PROXIMITY:
+ ret = jsa1212_read_pxs_data(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->lock);
+ return ret < 0 ? ret : IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *val = jsa1212_als_range_val[data->als_rng_idx];
+ *val2 = BIT(12); /* Max 12 bit value */
+ return IIO_VAL_FRACTIONAL;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec jsa1212_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }
+};
+
+static const struct iio_info jsa1212_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &jsa1212_read_raw,
+};
+
+static int jsa1212_chip_init(struct jsa1212_data *data)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, JSA1212_CONF_REG,
+ (JSA1212_CONF_PXS_SLP_50MS |
+ JSA1212_CONF_IRDR_200MA));
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, JSA1212_INT_REG,
+ JSA1212_INT_ALS_PRST_4CONV);
+ if (ret < 0)
+ return ret;
+
+ data->als_rng_idx = JSA1212_ALS_RNG_0_2048;
+
+ return 0;
+}
+
+static bool jsa1212_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case JSA1212_PXS_DATA_REG:
+ case JSA1212_ALS_DT1_REG:
+ case JSA1212_ALS_DT2_REG:
+ case JSA1212_INT_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config jsa1212_regmap_config = {
+ .name = JSA1212_REGMAP_NAME,
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = JSA1212_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = jsa1212_is_volatile_reg,
+};
+
+static int jsa1212_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct jsa1212_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &jsa1212_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Regmap initialization failed.\n");
+ return PTR_ERR(regmap);
+ }
+
+ data = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+
+ mutex_init(&data->lock);
+
+ ret = jsa1212_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = jsa1212_channels;
+ indio_dev->num_channels = ARRAY_SIZE(jsa1212_channels);
+ indio_dev->name = JSA1212_DRIVER_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ indio_dev->info = &jsa1212_info;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ dev_err(&client->dev, "%s: register device failed\n", __func__);
+
+ return ret;
+}
+
+ /* power off the device */
+static int jsa1212_power_off(struct jsa1212_data *data)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = regmap_update_bits(data->regmap, JSA1212_CONF_REG,
+ JSA1212_CONF_ALS_MASK |
+ JSA1212_CONF_PXS_MASK,
+ JSA1212_CONF_ALS_DISABLE |
+ JSA1212_CONF_PXS_DISABLE);
+
+ if (ret < 0)
+ dev_err(&data->client->dev, "power off cmd failed\n");
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int jsa1212_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct jsa1212_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ return jsa1212_power_off(data);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int jsa1212_suspend(struct device *dev)
+{
+ struct jsa1212_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return jsa1212_power_off(data);
+}
+
+static int jsa1212_resume(struct device *dev)
+{
+ int ret = 0;
+ struct jsa1212_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ mutex_lock(&data->lock);
+
+ if (data->als_en) {
+ ret = jsa1212_als_enable(data, JSA1212_CONF_ALS_ENABLE);
+ if (ret < 0) {
+ dev_err(dev, "als resume failed\n");
+ goto unlock_and_ret;
+ }
+ }
+
+ if (data->pxs_en) {
+ ret = jsa1212_pxs_enable(data, JSA1212_CONF_PXS_ENABLE);
+ if (ret < 0)
+ dev_err(dev, "pxs resume failed\n");
+ }
+
+unlock_and_ret:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(jsa1212_pm_ops, jsa1212_suspend, jsa1212_resume);
+
+#define JSA1212_PM_OPS (&jsa1212_pm_ops)
+#else
+#define JSA1212_PM_OPS NULL
+#endif
+
+static const struct acpi_device_id jsa1212_acpi_match[] = {
+ {"JSA1212", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, jsa1212_acpi_match);
+
+static const struct i2c_device_id jsa1212_id[] = {
+ { JSA1212_DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, jsa1212_id);
+
+static struct i2c_driver jsa1212_driver = {
+ .driver = {
+ .name = JSA1212_DRIVER_NAME,
+ .pm = JSA1212_PM_OPS,
+ .owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(jsa1212_acpi_match),
+ },
+ .probe = jsa1212_probe,
+ .remove = jsa1212_remove,
+ .id_table = jsa1212_id,
+};
+module_i2c_driver(jsa1212_driver);
+
+MODULE_AUTHOR("Sathya Kuppuswamy <sathyanarayanan.kuppuswamy@linux.intel.com>");
+MODULE_DESCRIPTION("JSA1212 proximity/ambient light sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index ae3c71bdd6c6..076bc46fad03 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -657,7 +657,7 @@ static ALS_HYSTERESIS_ATTR_RO(3);
#define ILLUMINANCE_ATTR_RO(_name) \
DEVICE_ATTR(in_illuminance0_##_name, S_IRUGO, show_##_name, NULL)
#define ILLUMINANCE_ATTR_RW(_name) \
- DEVICE_ATTR(in_illuminance0_##_name, S_IRUGO | S_IWUSR , \
+ DEVICE_ATTR(in_illuminance0_##_name, S_IRUGO | S_IWUSR, \
show_##_name, store_##_name)
/*
* ALS Zone threshold-event enable
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index a9e449b0be0c..71c2bde275aa 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -149,8 +149,8 @@ static int tcs3414_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- i = (data->gain & TCS3414_GAIN_MASK) >> TCS3414_GAIN_SHIFT;
- *val = tcs3414_scales[i][0];
+ i = (data->gain & TCS3414_GAIN_MASK) >> TCS3414_GAIN_SHIFT;
+ *val = tcs3414_scales[i][0];
*val2 = tcs3414_scales[i][1];
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_INT_TIME:
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index b2dba9e506ab..4c7a4c52dd06 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -6,26 +6,21 @@
menu "Magnetometer sensors"
config AK8975
- tristate "Asahi Kasei AK8975 3-Axis Magnetometer"
+ tristate "Asahi Kasei AK 3-Axis Magnetometer"
depends on I2C
depends on GPIOLIB
help
- Say yes here to build support for Asahi Kasei AK8975 3-Axis
- Magnetometer. This driver can also support AK8963, if i2c
- device name is identified as ak8963.
+ Say yes here to build support for Asahi Kasei AK8975, AK8963,
+ AK09911 or AK09912 3-Axis Magnetometer.
To compile this driver as a module, choose M here: the module
will be called ak8975.
config AK09911
tristate "Asahi Kasei AK09911 3-axis Compass"
- depends on I2C
+ select AK8975
help
- Say yes here to build support for Asahi Kasei AK09911 3-Axis
- Magnetometer.
-
- To compile this driver as a module, choose M here: the module
- will be called ak09911.
+ Deprecated: AK09911 is now supported by AK8975 driver.
config MAG3110
tristate "Freescale MAG3110 3-Axis Magnetometer"
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index b91315e0b826..0f5d3c985799 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -3,7 +3,6 @@
#
# When adding new entries keep the list in alphabetical order
-obj-$(CONFIG_AK09911) += ak09911.o
obj-$(CONFIG_AK8975) += ak8975.o
obj-$(CONFIG_MAG3110) += mag3110.o
obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
diff --git a/drivers/iio/magnetometer/ak09911.c b/drivers/iio/magnetometer/ak09911.c
deleted file mode 100644
index b2bc942ff6b8..000000000000
--- a/drivers/iio/magnetometer/ak09911.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * AK09911 3-axis compass driver
- * Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/acpi.h>
-#include <linux/iio/iio.h>
-
-#define AK09911_REG_WIA1 0x00
-#define AK09911_REG_WIA2 0x01
-#define AK09911_WIA1_VALUE 0x48
-#define AK09911_WIA2_VALUE 0x05
-
-#define AK09911_REG_ST1 0x10
-#define AK09911_REG_HXL 0x11
-#define AK09911_REG_HXH 0x12
-#define AK09911_REG_HYL 0x13
-#define AK09911_REG_HYH 0x14
-#define AK09911_REG_HZL 0x15
-#define AK09911_REG_HZH 0x16
-
-#define AK09911_REG_ASAX 0x60
-#define AK09911_REG_ASAY 0x61
-#define AK09911_REG_ASAZ 0x62
-
-#define AK09911_REG_CNTL1 0x30
-#define AK09911_REG_CNTL2 0x31
-#define AK09911_REG_CNTL3 0x32
-
-#define AK09911_MODE_SNG_MEASURE 0x01
-#define AK09911_MODE_SELF_TEST 0x10
-#define AK09911_MODE_FUSE_ACCESS 0x1F
-#define AK09911_MODE_POWERDOWN 0x00
-#define AK09911_RESET_DATA 0x01
-
-#define AK09911_REG_CNTL1 0x30
-#define AK09911_REG_CNTL2 0x31
-#define AK09911_REG_CNTL3 0x32
-
-#define AK09911_RAW_TO_GAUSS(asa) ((((asa) + 128) * 6000) / 256)
-
-#define AK09911_MAX_CONVERSION_TIMEOUT_MS 500
-#define AK09911_CONVERSION_DONE_POLL_TIME_MS 10
-
-struct ak09911_data {
- struct i2c_client *client;
- struct mutex lock;
- u8 asa[3];
- long raw_to_gauss[3];
-};
-
-static const int ak09911_index_to_reg[] = {
- AK09911_REG_HXL, AK09911_REG_HYL, AK09911_REG_HZL,
-};
-
-static int ak09911_set_mode(struct i2c_client *client, u8 mode)
-{
- int ret;
-
- switch (mode) {
- case AK09911_MODE_SNG_MEASURE:
- case AK09911_MODE_SELF_TEST:
- case AK09911_MODE_FUSE_ACCESS:
- case AK09911_MODE_POWERDOWN:
- ret = i2c_smbus_write_byte_data(client,
- AK09911_REG_CNTL2, mode);
- if (ret < 0) {
- dev_err(&client->dev, "set_mode error\n");
- return ret;
- }
- /* After mode change wait atleast 100us */
- usleep_range(100, 500);
- break;
- default:
- dev_err(&client->dev,
- "%s: Unknown mode(%d).", __func__, mode);
- return -EINVAL;
- }
-
- return ret;
-}
-
-/* Get Sensitivity Adjustment value */
-static int ak09911_get_asa(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ak09911_data *data = iio_priv(indio_dev);
- int ret;
-
- ret = ak09911_set_mode(client, AK09911_MODE_FUSE_ACCESS);
- if (ret < 0)
- return ret;
-
- /* Get asa data and store in the device data. */
- ret = i2c_smbus_read_i2c_block_data(client, AK09911_REG_ASAX,
- 3, data->asa);
- if (ret < 0) {
- dev_err(&client->dev, "Not able to read asa data\n");
- return ret;
- }
-
- ret = ak09911_set_mode(client, AK09911_MODE_POWERDOWN);
- if (ret < 0)
- return ret;
-
- data->raw_to_gauss[0] = AK09911_RAW_TO_GAUSS(data->asa[0]);
- data->raw_to_gauss[1] = AK09911_RAW_TO_GAUSS(data->asa[1]);
- data->raw_to_gauss[2] = AK09911_RAW_TO_GAUSS(data->asa[2]);
-
- return 0;
-}
-
-static int ak09911_verify_chip_id(struct i2c_client *client)
-{
- u8 wia_val[2];
- int ret;
-
- ret = i2c_smbus_read_i2c_block_data(client, AK09911_REG_WIA1,
- 2, wia_val);
- if (ret < 0) {
- dev_err(&client->dev, "Error reading WIA\n");
- return ret;
- }
-
- dev_dbg(&client->dev, "WIA %02x %02x\n", wia_val[0], wia_val[1]);
-
- if (wia_val[0] != AK09911_WIA1_VALUE ||
- wia_val[1] != AK09911_WIA2_VALUE) {
- dev_err(&client->dev, "Device ak09911 not found\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int wait_conversion_complete_polled(struct ak09911_data *data)
-{
- struct i2c_client *client = data->client;
- u8 read_status;
- u32 timeout_ms = AK09911_MAX_CONVERSION_TIMEOUT_MS;
- int ret;
-
- /* Wait for the conversion to complete. */
- while (timeout_ms) {
- msleep_interruptible(AK09911_CONVERSION_DONE_POLL_TIME_MS);
- ret = i2c_smbus_read_byte_data(client, AK09911_REG_ST1);
- if (ret < 0) {
- dev_err(&client->dev, "Error in reading ST1\n");
- return ret;
- }
- read_status = ret & 0x01;
- if (read_status)
- break;
- timeout_ms -= AK09911_CONVERSION_DONE_POLL_TIME_MS;
- }
- if (!timeout_ms) {
- dev_err(&client->dev, "Conversion timeout happened\n");
- return -EIO;
- }
-
- return read_status;
-}
-
-static int ak09911_read_axis(struct iio_dev *indio_dev, int index, int *val)
-{
- struct ak09911_data *data = iio_priv(indio_dev);
- struct i2c_client *client = data->client;
- int ret;
-
- mutex_lock(&data->lock);
-
- ret = ak09911_set_mode(client, AK09911_MODE_SNG_MEASURE);
- if (ret < 0)
- goto fn_exit;
-
- ret = wait_conversion_complete_polled(data);
- if (ret < 0)
- goto fn_exit;
-
- /* Read data */
- ret = i2c_smbus_read_word_data(client, ak09911_index_to_reg[index]);
- if (ret < 0) {
- dev_err(&client->dev, "Read axis data fails\n");
- goto fn_exit;
- }
-
- mutex_unlock(&data->lock);
-
- /* Clamp to valid range. */
- *val = sign_extend32(clamp_t(s16, ret, -8192, 8191), 13);
-
- return IIO_VAL_INT;
-
-fn_exit:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int ak09911_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct ak09911_data *data = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return ak09911_read_axis(indio_dev, chan->address, val);
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = data->raw_to_gauss[chan->address];
- return IIO_VAL_INT_PLUS_MICRO;
- }
-
- return -EINVAL;
-}
-
-#define AK09911_CHANNEL(axis, index) \
- { \
- .type = IIO_MAGN, \
- .modified = 1, \
- .channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
- .address = index, \
- }
-
-static const struct iio_chan_spec ak09911_channels[] = {
- AK09911_CHANNEL(X, 0), AK09911_CHANNEL(Y, 1), AK09911_CHANNEL(Z, 2),
-};
-
-static const struct iio_info ak09911_info = {
- .read_raw = &ak09911_read_raw,
- .driver_module = THIS_MODULE,
-};
-
-static const struct acpi_device_id ak_acpi_match[] = {
- {"AK009911", 0},
- { },
-};
-MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
-
-static int ak09911_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct iio_dev *indio_dev;
- struct ak09911_data *data;
- const char *name;
- int ret;
-
- ret = ak09911_verify_chip_id(client);
- if (ret) {
- dev_err(&client->dev, "AK00911 not detected\n");
- return -ENODEV;
- }
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (indio_dev == NULL)
- return -ENOMEM;
-
- data = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
-
- data->client = client;
- mutex_init(&data->lock);
-
- ret = ak09911_get_asa(client);
- if (ret)
- return ret;
-
- if (id)
- name = id->name;
- else if (ACPI_HANDLE(&client->dev))
- name = dev_name(&client->dev);
- else
- return -ENODEV;
-
- dev_dbg(&client->dev, "Asahi compass chip %s\n", name);
-
- indio_dev->dev.parent = &client->dev;
- indio_dev->channels = ak09911_channels;
- indio_dev->num_channels = ARRAY_SIZE(ak09911_channels);
- indio_dev->info = &ak09911_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = name;
-
- return devm_iio_device_register(&client->dev, indio_dev);
-}
-
-static const struct i2c_device_id ak09911_id[] = {
- {"ak09911", 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ak09911_id);
-
-static struct i2c_driver ak09911_driver = {
- .driver = {
- .name = "ak09911",
- .acpi_match_table = ACPI_PTR(ak_acpi_match),
- },
- .probe = ak09911_probe,
- .id_table = ak09911_id,
-};
-module_i2c_driver(ak09911_driver);
-
-MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("AK09911 Compass driver");
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index bf5ef077e791..b13936dacc78 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -64,10 +64,10 @@
#define AK8975_REG_CNTL 0x0A
#define AK8975_REG_CNTL_MODE_SHIFT 0
#define AK8975_REG_CNTL_MODE_MASK (0xF << AK8975_REG_CNTL_MODE_SHIFT)
-#define AK8975_REG_CNTL_MODE_POWER_DOWN 0
-#define AK8975_REG_CNTL_MODE_ONCE 1
-#define AK8975_REG_CNTL_MODE_SELF_TEST 8
-#define AK8975_REG_CNTL_MODE_FUSE_ROM 0xF
+#define AK8975_REG_CNTL_MODE_POWER_DOWN 0x00
+#define AK8975_REG_CNTL_MODE_ONCE 0x01
+#define AK8975_REG_CNTL_MODE_SELF_TEST 0x08
+#define AK8975_REG_CNTL_MODE_FUSE_ROM 0x0F
#define AK8975_REG_RSVC 0x0B
#define AK8975_REG_ASTC 0x0C
@@ -81,18 +81,278 @@
#define AK8975_MAX_REGS AK8975_REG_ASAZ
/*
+ * AK09912 Register definitions
+ */
+#define AK09912_REG_WIA1 0x00
+#define AK09912_REG_WIA2 0x01
+#define AK09912_DEVICE_ID 0x04
+#define AK09911_DEVICE_ID 0x05
+
+#define AK09911_REG_INFO1 0x02
+#define AK09911_REG_INFO2 0x03
+
+#define AK09912_REG_ST1 0x10
+
+#define AK09912_REG_ST1_DRDY_SHIFT 0
+#define AK09912_REG_ST1_DRDY_MASK (1 << AK09912_REG_ST1_DRDY_SHIFT)
+
+#define AK09912_REG_HXL 0x11
+#define AK09912_REG_HXH 0x12
+#define AK09912_REG_HYL 0x13
+#define AK09912_REG_HYH 0x14
+#define AK09912_REG_HZL 0x15
+#define AK09912_REG_HZH 0x16
+#define AK09912_REG_TMPS 0x17
+
+#define AK09912_REG_ST2 0x18
+#define AK09912_REG_ST2_HOFL_SHIFT 3
+#define AK09912_REG_ST2_HOFL_MASK (1 << AK09912_REG_ST2_HOFL_SHIFT)
+
+#define AK09912_REG_CNTL1 0x30
+
+#define AK09912_REG_CNTL2 0x31
+#define AK09912_REG_CNTL_MODE_POWER_DOWN 0x00
+#define AK09912_REG_CNTL_MODE_ONCE 0x01
+#define AK09912_REG_CNTL_MODE_SELF_TEST 0x10
+#define AK09912_REG_CNTL_MODE_FUSE_ROM 0x1F
+#define AK09912_REG_CNTL2_MODE_SHIFT 0
+#define AK09912_REG_CNTL2_MODE_MASK (0x1F << AK09912_REG_CNTL2_MODE_SHIFT)
+
+#define AK09912_REG_CNTL3 0x32
+
+#define AK09912_REG_TS1 0x33
+#define AK09912_REG_TS2 0x34
+#define AK09912_REG_TS3 0x35
+#define AK09912_REG_I2CDIS 0x36
+#define AK09912_REG_TS4 0x37
+
+#define AK09912_REG_ASAX 0x60
+#define AK09912_REG_ASAY 0x61
+#define AK09912_REG_ASAZ 0x62
+
+#define AK09912_MAX_REGS AK09912_REG_ASAZ
+
+/*
* Miscellaneous values.
*/
#define AK8975_MAX_CONVERSION_TIMEOUT 500
#define AK8975_CONVERSION_DONE_POLL_TIME 10
#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
-#define RAW_TO_GAUSS_8975(asa) ((((asa) + 128) * 3000) / 256)
-#define RAW_TO_GAUSS_8963(asa) ((((asa) + 128) * 6000) / 256)
+
+/*
+ * Precalculate scale factor (in Gauss units) for each axis and
+ * store in the device data.
+ *
+ * This scale factor is axis-dependent, and is derived from 3 calibration
+ * factors ASA(x), ASA(y), and ASA(z).
+ *
+ * These ASA values are read from the sensor device at start of day, and
+ * cached in the device context struct.
+ *
+ * Adjusting the flux value with the sensitivity adjustment value should be
+ * done via the following formula:
+ *
+ * Hadj = H * ( ( ( (ASA-128)*0.5 ) / 128 ) + 1 )
+ * where H is the raw value, ASA is the sensitivity adjustment, and Hadj
+ * is the resultant adjusted value.
+ *
+ * We reduce the formula to:
+ *
+ * Hadj = H * (ASA + 128) / 256
+ *
+ * H is in the range of -4096 to 4095. The magnetometer has a range of
+ * +-1229uT. To go from the raw value to uT is:
+ *
+ * HuT = H * 1229/4096, or roughly, 3/10.
+ *
+ * Since 1uT = 0.01 gauss, our final scale factor becomes:
+ *
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
+ *
+ * Since ASA doesn't change, we cache the resultant scale factor into the
+ * device context in ak8975_setup().
+ *
+ * Given we use IIO_VAL_INT_PLUS_MICRO bit when displaying the scale, we
+ * multiply the stored scale value by 1e6.
+ */
+static long ak8975_raw_to_gauss(u16 data)
+{
+ return (((long)data + 128) * 3000) / 256;
+}
+
+/*
+ * For AK8963 and AK09911, same calculation, but the device is less sensitive:
+ *
+ * H is in the range of +-8190. The magnetometer has a range of
+ * +-4912uT. To go from the raw value to uT is:
+ *
+ * HuT = H * 4912/8190, or roughly, 6/10, instead of 3/10.
+ */
+
+static long ak8963_09911_raw_to_gauss(u16 data)
+{
+ return (((long)data + 128) * 6000) / 256;
+}
+
+/*
+ * For AK09912, same calculation, except the device is more sensitive:
+ *
+ * H is in the range of -32752 to 32752. The magnetometer has a range of
+ * +-4912uT. To go from the raw value to uT is:
+ *
+ * HuT = H * 4912/32752, or roughly, 3/20, instead of 3/10.
+ */
+static long ak09912_raw_to_gauss(u16 data)
+{
+ return (((long)data + 128) * 1500) / 256;
+}
/* Compatible Asahi Kasei Compass parts */
enum asahi_compass_chipset {
AK8975,
AK8963,
+ AK09911,
+ AK09912,
+ AK_MAX_TYPE
+};
+
+enum ak_ctrl_reg_addr {
+ ST1,
+ ST2,
+ CNTL,
+ ASA_BASE,
+ MAX_REGS,
+ REGS_END,
+};
+
+enum ak_ctrl_reg_mask {
+ ST1_DRDY,
+ ST2_HOFL,
+ ST2_DERR,
+ CNTL_MODE,
+ MASK_END,
+};
+
+enum ak_ctrl_mode {
+ POWER_DOWN,
+ MODE_ONCE,
+ SELF_TEST,
+ FUSE_ROM,
+ MODE_END,
+};
+
+struct ak_def {
+ enum asahi_compass_chipset type;
+ long (*raw_to_gauss)(u16 data);
+ u16 range;
+ u8 ctrl_regs[REGS_END];
+ u8 ctrl_masks[MASK_END];
+ u8 ctrl_modes[MODE_END];
+ u8 data_regs[3];
+};
+
+static struct ak_def ak_def_array[AK_MAX_TYPE] = {
+ {
+ .type = AK8975,
+ .raw_to_gauss = ak8975_raw_to_gauss,
+ .range = 4096,
+ .ctrl_regs = {
+ AK8975_REG_ST1,
+ AK8975_REG_ST2,
+ AK8975_REG_CNTL,
+ AK8975_REG_ASAX,
+ AK8975_MAX_REGS},
+ .ctrl_masks = {
+ AK8975_REG_ST1_DRDY_MASK,
+ AK8975_REG_ST2_HOFL_MASK,
+ AK8975_REG_ST2_DERR_MASK,
+ AK8975_REG_CNTL_MODE_MASK},
+ .ctrl_modes = {
+ AK8975_REG_CNTL_MODE_POWER_DOWN,
+ AK8975_REG_CNTL_MODE_ONCE,
+ AK8975_REG_CNTL_MODE_SELF_TEST,
+ AK8975_REG_CNTL_MODE_FUSE_ROM},
+ .data_regs = {
+ AK8975_REG_HXL,
+ AK8975_REG_HYL,
+ AK8975_REG_HZL},
+ },
+ {
+ .type = AK8963,
+ .raw_to_gauss = ak8963_09911_raw_to_gauss,
+ .range = 8190,
+ .ctrl_regs = {
+ AK8975_REG_ST1,
+ AK8975_REG_ST2,
+ AK8975_REG_CNTL,
+ AK8975_REG_ASAX,
+ AK8975_MAX_REGS},
+ .ctrl_masks = {
+ AK8975_REG_ST1_DRDY_MASK,
+ AK8975_REG_ST2_HOFL_MASK,
+ 0,
+ AK8975_REG_CNTL_MODE_MASK},
+ .ctrl_modes = {
+ AK8975_REG_CNTL_MODE_POWER_DOWN,
+ AK8975_REG_CNTL_MODE_ONCE,
+ AK8975_REG_CNTL_MODE_SELF_TEST,
+ AK8975_REG_CNTL_MODE_FUSE_ROM},
+ .data_regs = {
+ AK8975_REG_HXL,
+ AK8975_REG_HYL,
+ AK8975_REG_HZL},
+ },
+ {
+ .type = AK09911,
+ .raw_to_gauss = ak8963_09911_raw_to_gauss,
+ .range = 8192,
+ .ctrl_regs = {
+ AK09912_REG_ST1,
+ AK09912_REG_ST2,
+ AK09912_REG_CNTL2,
+ AK09912_REG_ASAX,
+ AK09912_MAX_REGS},
+ .ctrl_masks = {
+ AK09912_REG_ST1_DRDY_MASK,
+ AK09912_REG_ST2_HOFL_MASK,
+ 0,
+ AK09912_REG_CNTL2_MODE_MASK},
+ .ctrl_modes = {
+ AK09912_REG_CNTL_MODE_POWER_DOWN,
+ AK09912_REG_CNTL_MODE_ONCE,
+ AK09912_REG_CNTL_MODE_SELF_TEST,
+ AK09912_REG_CNTL_MODE_FUSE_ROM},
+ .data_regs = {
+ AK09912_REG_HXL,
+ AK09912_REG_HYL,
+ AK09912_REG_HZL},
+ },
+ {
+ .type = AK09912,
+ .raw_to_gauss = ak09912_raw_to_gauss,
+ .range = 32752,
+ .ctrl_regs = {
+ AK09912_REG_ST1,
+ AK09912_REG_ST2,
+ AK09912_REG_CNTL2,
+ AK09912_REG_ASAX,
+ AK09912_MAX_REGS},
+ .ctrl_masks = {
+ AK09912_REG_ST1_DRDY_MASK,
+ AK09912_REG_ST2_HOFL_MASK,
+ 0,
+ AK09912_REG_CNTL2_MODE_MASK},
+ .ctrl_modes = {
+ AK09912_REG_CNTL_MODE_POWER_DOWN,
+ AK09912_REG_CNTL_MODE_ONCE,
+ AK09912_REG_CNTL_MODE_SELF_TEST,
+ AK09912_REG_CNTL_MODE_FUSE_ROM},
+ .data_regs = {
+ AK09912_REG_HXL,
+ AK09912_REG_HYL,
+ AK09912_REG_HZL},
+ }
};
/*
@@ -100,40 +360,82 @@ enum asahi_compass_chipset {
*/
struct ak8975_data {
struct i2c_client *client;
+ struct ak_def *def;
struct attribute_group attrs;
struct mutex lock;
u8 asa[3];
long raw_to_gauss[3];
- u8 reg_cache[AK8975_MAX_REGS];
int eoc_gpio;
int eoc_irq;
wait_queue_head_t data_ready_queue;
unsigned long flags;
- enum asahi_compass_chipset chipset;
+ u8 cntl_cache;
};
-static const int ak8975_index_to_reg[] = {
- AK8975_REG_HXL, AK8975_REG_HYL, AK8975_REG_HZL,
-};
+/*
+ * Return 0 if the i2c device is the one we expect.
+ * return a negative error number otherwise
+ */
+static int ak8975_who_i_am(struct i2c_client *client,
+ enum asahi_compass_chipset type)
+{
+ u8 wia_val[2];
+ int ret;
+
+ /*
+ * Signature for each device:
+ * Device | WIA1 | WIA2
+ * AK09912 | DEVICE_ID | AK09912_DEVICE_ID
+ * AK09911 | DEVICE_ID | AK09911_DEVICE_ID
+ * AK8975 | DEVICE_ID | NA
+ * AK8963 | DEVICE_ID | NA
+ */
+ ret = i2c_smbus_read_i2c_block_data(client, AK09912_REG_WIA1,
+ 2, wia_val);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading WIA\n");
+ return ret;
+ }
+
+ if (wia_val[0] != AK8975_DEVICE_ID)
+ return -ENODEV;
+
+ switch (type) {
+ case AK8975:
+ case AK8963:
+ return 0;
+ case AK09911:
+ if (wia_val[1] == AK09911_DEVICE_ID)
+ return 0;
+ break;
+ case AK09912:
+ if (wia_val[1] == AK09912_DEVICE_ID)
+ return 0;
+ break;
+ default:
+ dev_err(&client->dev, "Type %d unknown\n", type);
+ }
+ return -ENODEV;
+}
/*
- * Helper function to write to the I2C device's registers.
+ * Helper function to write to CNTL register.
*/
-static int ak8975_write_data(struct i2c_client *client,
- u8 reg, u8 val, u8 mask, u8 shift)
+static int ak8975_set_mode(struct ak8975_data *data, enum ak_ctrl_mode mode)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ak8975_data *data = iio_priv(indio_dev);
u8 regval;
int ret;
- regval = (data->reg_cache[reg] & ~mask) | (val << shift);
- ret = i2c_smbus_write_byte_data(client, reg, regval);
+ regval = (data->cntl_cache & ~data->def->ctrl_masks[CNTL_MODE]) |
+ data->def->ctrl_modes[mode];
+ ret = i2c_smbus_write_byte_data(data->client,
+ data->def->ctrl_regs[CNTL], regval);
if (ret < 0) {
- dev_err(&client->dev, "Write to device fails status %x\n", ret);
return ret;
}
- data->reg_cache[reg] = regval;
+ data->cntl_cache = regval;
+ /* After mode change wait atleast 100us */
+ usleep_range(100, 500);
return 0;
}
@@ -166,8 +468,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
irq = gpio_to_irq(data->eoc_gpio);
rc = devm_request_irq(&client->dev, irq, ak8975_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- dev_name(&client->dev), data);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev_name(&client->dev), data);
if (rc < 0) {
dev_err(&client->dev,
"irq %d request failed, (gpio %d): %d\n",
@@ -191,34 +493,18 @@ static int ak8975_setup(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ak8975_data *data = iio_priv(indio_dev);
- u8 device_id;
int ret;
- /* Confirm that the device we're talking to is really an AK8975. */
- ret = i2c_smbus_read_byte_data(client, AK8975_REG_WIA);
- if (ret < 0) {
- dev_err(&client->dev, "Error reading WIA\n");
- return ret;
- }
- device_id = ret;
- if (device_id != AK8975_DEVICE_ID) {
- dev_err(&client->dev, "Device ak8975 not found\n");
- return -ENODEV;
- }
-
/* Write the fused rom access mode. */
- ret = ak8975_write_data(client,
- AK8975_REG_CNTL,
- AK8975_REG_CNTL_MODE_FUSE_ROM,
- AK8975_REG_CNTL_MODE_MASK,
- AK8975_REG_CNTL_MODE_SHIFT);
+ ret = ak8975_set_mode(data, FUSE_ROM);
if (ret < 0) {
dev_err(&client->dev, "Error in setting fuse access mode\n");
return ret;
}
/* Get asa data and store in the device data. */
- ret = i2c_smbus_read_i2c_block_data(client, AK8975_REG_ASAX,
+ ret = i2c_smbus_read_i2c_block_data(client,
+ data->def->ctrl_regs[ASA_BASE],
3, data->asa);
if (ret < 0) {
dev_err(&client->dev, "Not able to read asa data\n");
@@ -226,13 +512,13 @@ static int ak8975_setup(struct i2c_client *client)
}
/* After reading fuse ROM data set power-down mode */
- ret = ak8975_write_data(client,
- AK8975_REG_CNTL,
- AK8975_REG_CNTL_MODE_POWER_DOWN,
- AK8975_REG_CNTL_MODE_MASK,
- AK8975_REG_CNTL_MODE_SHIFT);
+ ret = ak8975_set_mode(data, POWER_DOWN);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting power-down mode\n");
+ return ret;
+ }
- if (data->eoc_gpio > 0 || client->irq) {
+ if (data->eoc_gpio > 0 || client->irq > 0) {
ret = ak8975_setup_irq(data);
if (ret < 0) {
dev_err(&client->dev,
@@ -241,61 +527,9 @@ static int ak8975_setup(struct i2c_client *client)
}
}
- if (ret < 0) {
- dev_err(&client->dev, "Error in setting power-down mode\n");
- return ret;
- }
-
-/*
- * Precalculate scale factor (in Gauss units) for each axis and
- * store in the device data.
- *
- * This scale factor is axis-dependent, and is derived from 3 calibration
- * factors ASA(x), ASA(y), and ASA(z).
- *
- * These ASA values are read from the sensor device at start of day, and
- * cached in the device context struct.
- *
- * Adjusting the flux value with the sensitivity adjustment value should be
- * done via the following formula:
- *
- * Hadj = H * ( ( ( (ASA-128)*0.5 ) / 128 ) + 1 )
- *
- * where H is the raw value, ASA is the sensitivity adjustment, and Hadj
- * is the resultant adjusted value.
- *
- * We reduce the formula to:
- *
- * Hadj = H * (ASA + 128) / 256
- *
- * H is in the range of -4096 to 4095. The magnetometer has a range of
- * +-1229uT. To go from the raw value to uT is:
- *
- * HuT = H * 1229/4096, or roughly, 3/10.
- *
- * Since 1uT = 0.01 gauss, our final scale factor becomes:
- *
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
- * Hadj = H * ((ASA + 128) * 0.003) / 256
- *
- * Since ASA doesn't change, we cache the resultant scale factor into the
- * device context in ak8975_setup().
- */
- if (data->chipset == AK8963) {
- /*
- * H range is +-8190 and magnetometer range is +-4912.
- * So HuT using the above explanation for 8975,
- * 4912/8190 = ~ 6/10.
- * So the Hadj should use 6/10 instead of 3/10.
- */
- data->raw_to_gauss[0] = RAW_TO_GAUSS_8963(data->asa[0]);
- data->raw_to_gauss[1] = RAW_TO_GAUSS_8963(data->asa[1]);
- data->raw_to_gauss[2] = RAW_TO_GAUSS_8963(data->asa[2]);
- } else {
- data->raw_to_gauss[0] = RAW_TO_GAUSS_8975(data->asa[0]);
- data->raw_to_gauss[1] = RAW_TO_GAUSS_8975(data->asa[1]);
- data->raw_to_gauss[2] = RAW_TO_GAUSS_8975(data->asa[2]);
- }
+ data->raw_to_gauss[0] = data->def->raw_to_gauss(data->asa[0]);
+ data->raw_to_gauss[1] = data->def->raw_to_gauss(data->asa[1]);
+ data->raw_to_gauss[2] = data->def->raw_to_gauss(data->asa[2]);
return 0;
}
@@ -318,7 +552,7 @@ static int wait_conversion_complete_gpio(struct ak8975_data *data)
return -EINVAL;
}
- ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST1);
+ ret = i2c_smbus_read_byte_data(client, data->def->ctrl_regs[ST1]);
if (ret < 0)
dev_err(&client->dev, "Error in reading ST1\n");
@@ -335,7 +569,8 @@ static int wait_conversion_complete_polled(struct ak8975_data *data)
/* Wait for the conversion to complete. */
while (timeout_ms) {
msleep(AK8975_CONVERSION_DONE_POLL_TIME);
- ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST1);
+ ret = i2c_smbus_read_byte_data(client,
+ data->def->ctrl_regs[ST1]);
if (ret < 0) {
dev_err(&client->dev, "Error in reading ST1\n");
return ret;
@@ -378,11 +613,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
mutex_lock(&data->lock);
/* Set up the device for taking a sample. */
- ret = ak8975_write_data(client,
- AK8975_REG_CNTL,
- AK8975_REG_CNTL_MODE_ONCE,
- AK8975_REG_CNTL_MODE_MASK,
- AK8975_REG_CNTL_MODE_SHIFT);
+ ret = ak8975_set_mode(data, MODE_ONCE);
if (ret < 0) {
dev_err(&client->dev, "Error in setting operating mode\n");
goto exit;
@@ -399,14 +630,15 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
goto exit;
/* This will be executed only for non-interrupt based waiting case */
- if (ret & AK8975_REG_ST1_DRDY_MASK) {
- ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST2);
+ if (ret & data->def->ctrl_masks[ST1_DRDY]) {
+ ret = i2c_smbus_read_byte_data(client,
+ data->def->ctrl_regs[ST2]);
if (ret < 0) {
dev_err(&client->dev, "Error in reading ST2\n");
goto exit;
}
- if (ret & (AK8975_REG_ST2_DERR_MASK |
- AK8975_REG_ST2_HOFL_MASK)) {
+ if (ret & (data->def->ctrl_masks[ST2_DERR] |
+ data->def->ctrl_masks[ST2_HOFL])) {
dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
ret = -EINVAL;
goto exit;
@@ -415,7 +647,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
/* Read the flux value from the appropriate register
(the register is specified in the iio device attributes). */
- ret = i2c_smbus_read_word_data(client, ak8975_index_to_reg[index]);
+ ret = i2c_smbus_read_word_data(client, data->def->data_regs[index]);
if (ret < 0) {
dev_err(&client->dev, "Read axis data fails\n");
goto exit;
@@ -424,7 +656,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- *val = clamp_t(s16, ret, -4096, 4095);
+ *val = clamp_t(s16, ret, -data->def->range, data->def->range);
return IIO_VAL_INT;
exit:
@@ -473,6 +705,8 @@ static const struct acpi_device_id ak_acpi_match[] = {
{"AK8975", AK8975},
{"AK8963", AK8963},
{"INVN6500", AK8963},
+ {"AK09911", AK09911},
+ {"AK09912", AK09912},
{ },
};
MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
@@ -498,6 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
int eoc_gpio;
int err;
const char *name = NULL;
+ enum asahi_compass_chipset chipset;
/* Grab and set up the supplied GPIO. */
if (client->dev.platform_data)
@@ -537,42 +772,50 @@ static int ak8975_probe(struct i2c_client *client,
/* id will be NULL when enumerated via ACPI */
if (id) {
- data->chipset =
- (enum asahi_compass_chipset)(id->driver_data);
+ chipset = (enum asahi_compass_chipset)(id->driver_data);
name = id->name;
} else if (ACPI_HANDLE(&client->dev))
- name = ak8975_match_acpi_device(&client->dev, &data->chipset);
+ name = ak8975_match_acpi_device(&client->dev, &chipset);
else
return -ENOSYS;
+ if (chipset >= AK_MAX_TYPE) {
+ dev_err(&client->dev, "AKM device type unsupported: %d\n",
+ chipset);
+ return -ENODEV;
+ }
+
+ data->def = &ak_def_array[chipset];
+ err = ak8975_who_i_am(client, data->def->type);
+ if (err < 0) {
+ dev_err(&client->dev, "Unexpected device\n");
+ return err;
+ }
dev_dbg(&client->dev, "Asahi compass chip %s\n", name);
/* Perform some basic start-of-day setup of the device. */
err = ak8975_setup(client);
if (err < 0) {
- dev_err(&client->dev, "AK8975 initialization fails\n");
+ dev_err(&client->dev, "%s initialization fails\n", name);
return err;
}
- data->client = client;
mutex_init(&data->lock);
- data->eoc_gpio = eoc_gpio;
indio_dev->dev.parent = &client->dev;
indio_dev->channels = ak8975_channels;
indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
indio_dev->info = &ak8975_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->name = name;
- err = devm_iio_device_register(&client->dev, indio_dev);
- if (err < 0)
- return err;
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id ak8975_id[] = {
{"ak8975", AK8975},
{"ak8963", AK8963},
+ {"AK8963", AK8963},
+ {"ak09911", AK09911},
+ {"ak09912", AK09912},
{}
};
@@ -581,14 +824,20 @@ MODULE_DEVICE_TABLE(i2c, ak8975_id);
static const struct of_device_id ak8975_of_match[] = {
{ .compatible = "asahi-kasei,ak8975", },
{ .compatible = "ak8975", },
- { }
+ { .compatible = "asahi-kasei,ak8963", },
+ { .compatible = "ak8963", },
+ { .compatible = "asahi-kasei,ak09911", },
+ { .compatible = "ak09911", },
+ { .compatible = "asahi-kasei,ak09912", },
+ { .compatible = "ak09912", },
+ {}
};
MODULE_DEVICE_TABLE(of, ak8975_of_match);
static struct i2c_driver ak8975_driver = {
.driver = {
.name = "ak8975",
- .of_match_table = ak8975_of_match,
+ .of_match_table = of_match_ptr(ak8975_of_match),
.acpi_match_table = ACPI_PTR(ak_acpi_match),
},
.probe = ak8975_probe,
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 6294575d2777..d22993b4066a 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -157,20 +157,12 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
- s32 poll_value;
*val = 0;
*val2 = 0;
switch (mask) {
case 0:
- poll_value = hid_sensor_read_poll_value(
- &magn_state->common_attributes);
- if (poll_value < 0)
- return -EINVAL;
-
hid_sensor_power_state(&magn_state->common_attributes, true);
- msleep_interruptible(poll_value * 2);
-
report_id =
magn_state->magn[chan->address].report_id;
address = magn_3d_addresses[chan->address];
@@ -530,6 +522,7 @@ static struct platform_driver hid_magn_3d_platform_driver = {
.id_table = hid_magn_3d_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_magn_3d_probe,
.remove = hid_magn_3d_remove,
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 1ff181bbbcef..73854460bb2c 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -111,20 +111,12 @@ static int incl_3d_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
- s32 poll_value;
*val = 0;
*val2 = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- poll_value = hid_sensor_read_poll_value(
- &incl_state->common_attributes);
- if (poll_value < 0)
- return -EINVAL;
-
hid_sensor_power_state(&incl_state->common_attributes, true);
- msleep_interruptible(poll_value * 2);
-
report_id =
incl_state->incl[chan->scan_index].report_id;
address = incl_3d_addresses[chan->scan_index];
@@ -437,6 +429,7 @@ static struct platform_driver hid_incl_3d_platform_driver = {
.id_table = hid_incl_3d_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_incl_3d_probe,
.remove = hid_incl_3d_remove,
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
index 75038dacfff1..7c623e2bd633 100644
--- a/drivers/iio/pressure/bmp280.c
+++ b/drivers/iio/pressure/bmp280.c
@@ -80,16 +80,12 @@ struct bmp280_data {
s32 t_fine;
};
-/* Compensation parameters. */
-struct bmp280_comp_temp {
- u16 dig_t1;
- s16 dig_t2, dig_t3;
-};
-
-struct bmp280_comp_press {
- u16 dig_p1;
- s16 dig_p2, dig_p3, dig_p4, dig_p5, dig_p6, dig_p7, dig_p8, dig_p9;
-};
+/*
+ * These enums are used for indexing into the array of compensation
+ * parameters.
+ */
+enum { T1, T2, T3 };
+enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
static const struct iio_chan_spec bmp280_channels[] = {
{
@@ -141,54 +137,6 @@ static const struct regmap_config bmp280_regmap_config = {
.volatile_reg = bmp280_is_volatile_reg,
};
-static int bmp280_read_compensation_temp(struct bmp280_data *data,
- struct bmp280_comp_temp *comp)
-{
- int ret;
- __le16 buf[BMP280_COMP_TEMP_REG_COUNT / 2];
-
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
- buf, BMP280_COMP_TEMP_REG_COUNT);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "failed to read temperature calibration parameters\n");
- return ret;
- }
-
- comp->dig_t1 = (u16) le16_to_cpu(buf[0]);
- comp->dig_t2 = (s16) le16_to_cpu(buf[1]);
- comp->dig_t3 = (s16) le16_to_cpu(buf[2]);
-
- return 0;
-}
-
-static int bmp280_read_compensation_press(struct bmp280_data *data,
- struct bmp280_comp_press *comp)
-{
- int ret;
- __le16 buf[BMP280_COMP_PRESS_REG_COUNT / 2];
-
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
- buf, BMP280_COMP_PRESS_REG_COUNT);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "failed to read pressure calibration parameters\n");
- return ret;
- }
-
- comp->dig_p1 = (u16) le16_to_cpu(buf[0]);
- comp->dig_p2 = (s16) le16_to_cpu(buf[1]);
- comp->dig_p3 = (s16) le16_to_cpu(buf[2]);
- comp->dig_p4 = (s16) le16_to_cpu(buf[3]);
- comp->dig_p5 = (s16) le16_to_cpu(buf[4]);
- comp->dig_p6 = (s16) le16_to_cpu(buf[5]);
- comp->dig_p7 = (s16) le16_to_cpu(buf[6]);
- comp->dig_p8 = (s16) le16_to_cpu(buf[7]);
- comp->dig_p9 = (s16) le16_to_cpu(buf[8]);
-
- return 0;
-}
-
/*
* Returns temperature in DegC, resolution is 0.01 DegC. Output value of
* "5123" equals 51.23 DegC. t_fine carries fine temperature as global
@@ -197,21 +145,35 @@ static int bmp280_read_compensation_press(struct bmp280_data *data,
* Taken from datasheet, Section 3.11.3, "Compensation formula".
*/
static s32 bmp280_compensate_temp(struct bmp280_data *data,
- struct bmp280_comp_temp *comp,
s32 adc_temp)
{
- s32 var1, var2, t;
+ int ret;
+ s32 var1, var2;
+ __le16 buf[BMP280_COMP_TEMP_REG_COUNT / 2];
- var1 = (((adc_temp >> 3) - ((s32) comp->dig_t1 << 1)) *
- ((s32) comp->dig_t2)) >> 11;
- var2 = (((((adc_temp >> 4) - ((s32) comp->dig_t1)) *
- ((adc_temp >> 4) - ((s32) comp->dig_t1))) >> 12) *
- ((s32) comp->dig_t3)) >> 14;
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+ buf, BMP280_COMP_TEMP_REG_COUNT);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read temperature calibration parameters\n");
+ return ret;
+ }
- data->t_fine = var1 + var2;
- t = (data->t_fine * 5 + 128) >> 8;
+ /*
+ * The double casts are necessary because le16_to_cpu returns an
+ * unsigned 16-bit value. Casting that value directly to a
+ * signed 32-bit will not do proper sign extension.
+ *
+ * Conversely, T1 and P1 are unsigned values, so they can be
+ * cast straight to the larger type.
+ */
+ var1 = (((adc_temp >> 3) - ((s32)le16_to_cpu(buf[T1]) << 1)) *
+ ((s32)(s16)le16_to_cpu(buf[T2]))) >> 11;
+ var2 = (((((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1]))) *
+ ((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1])))) >> 12) *
+ ((s32)(s16)le16_to_cpu(buf[T3]))) >> 14;
- return t;
+ return (data->t_fine * 5 + 128) >> 8;
}
/*
@@ -222,29 +184,38 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
* Taken from datasheet, Section 3.11.3, "Compensation formula".
*/
static u32 bmp280_compensate_press(struct bmp280_data *data,
- struct bmp280_comp_press *comp,
s32 adc_press)
{
+ int ret;
s64 var1, var2, p;
+ __le16 buf[BMP280_COMP_PRESS_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
+ buf, BMP280_COMP_PRESS_REG_COUNT);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read pressure calibration parameters\n");
+ return ret;
+ }
- var1 = ((s64) data->t_fine) - 128000;
- var2 = var1 * var1 * (s64) comp->dig_p6;
- var2 = var2 + ((var1 * (s64) comp->dig_p5) << 17);
- var2 = var2 + (((s64) comp->dig_p4) << 35);
- var1 = ((var1 * var1 * (s64) comp->dig_p3) >> 8) +
- ((var1 * (s64) comp->dig_p2) << 12);
- var1 = (((((s64) 1) << 47) + var1)) * ((s64) comp->dig_p1) >> 33;
+ var1 = ((s64)data->t_fine) - 128000;
+ var2 = var1 * var1 * (s64)(s16)le16_to_cpu(buf[P6]);
+ var2 += (var1 * (s64)(s16)le16_to_cpu(buf[P5])) << 17;
+ var2 += ((s64)(s16)le16_to_cpu(buf[P4])) << 35;
+ var1 = ((var1 * var1 * (s64)(s16)le16_to_cpu(buf[P3])) >> 8) +
+ ((var1 * (s64)(s16)le16_to_cpu(buf[P2])) << 12);
+ var1 = ((((s64)1) << 47) + var1) * ((s64)le16_to_cpu(buf[P1])) >> 33;
if (var1 == 0)
return 0;
- p = ((((s64) 1048576 - adc_press) << 31) - var2) * 3125;
+ p = ((((s64)1048576 - adc_press) << 31) - var2) * 3125;
p = div64_s64(p, var1);
- var1 = (((s64) comp->dig_p9) * (p >> 13) * (p >> 13)) >> 25;
- var2 = (((s64) comp->dig_p8) * p) >> 19;
- p = ((p + var1 + var2) >> 8) + (((s64) comp->dig_p7) << 4);
+ var1 = (((s64)(s16)le16_to_cpu(buf[P9])) * (p >> 13) * (p >> 13)) >> 25;
+ var2 = (((s64)(s16)le16_to_cpu(buf[P8])) * p) >> 19;
+ p = ((p + var1 + var2) >> 8) + (((s64)(s16)le16_to_cpu(buf[P7])) << 4);
- return (u32) p;
+ return (u32)p;
}
static int bmp280_read_temp(struct bmp280_data *data,
@@ -253,11 +224,6 @@ static int bmp280_read_temp(struct bmp280_data *data,
int ret;
__be32 tmp = 0;
s32 adc_temp, comp_temp;
- struct bmp280_comp_temp comp;
-
- ret = bmp280_read_compensation_temp(data, &comp);
- if (ret < 0)
- return ret;
ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
(u8 *) &tmp, 3);
@@ -267,7 +233,7 @@ static int bmp280_read_temp(struct bmp280_data *data,
}
adc_temp = be32_to_cpu(tmp) >> 12;
- comp_temp = bmp280_compensate_temp(data, &comp, adc_temp);
+ comp_temp = bmp280_compensate_temp(data, adc_temp);
/*
* val might be NULL if we're called by the read_press routine,
@@ -288,11 +254,6 @@ static int bmp280_read_press(struct bmp280_data *data,
__be32 tmp = 0;
s32 adc_press;
u32 comp_press;
- struct bmp280_comp_press comp;
-
- ret = bmp280_read_compensation_press(data, &comp);
- if (ret < 0)
- return ret;
/* Read and compensate temperature so we get a reading of t_fine. */
ret = bmp280_read_temp(data, NULL);
@@ -307,7 +268,7 @@ static int bmp280_read_press(struct bmp280_data *data,
}
adc_press = be32_to_cpu(tmp) >> 12;
- comp_press = bmp280_compensate_press(data, &comp, adc_press);
+ comp_press = bmp280_compensate_press(data, adc_press);
*val = comp_press;
*val2 = 256000;
@@ -366,7 +327,7 @@ static int bmp280_chip_init(struct bmp280_data *data)
BMP280_MODE_NORMAL);
if (ret < 0) {
dev_err(&data->client->dev,
- "failed to write config register\n");
+ "failed to write ctrl_meas register\n");
return ret;
}
@@ -394,7 +355,6 @@ static int bmp280_probe(struct i2c_client *client,
if (!indio_dev)
return -ENOMEM;
- i2c_set_clientdata(client, indio_dev);
data = iio_priv(indio_dev);
mutex_init(&data->lock);
data->client = client;
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 764928682df2..1af314926ebd 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -79,7 +79,6 @@ static int press_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
- s32 poll_value;
*val = 0;
*val2 = 0;
@@ -96,15 +95,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
break;
}
if (report_id >= 0) {
- poll_value = hid_sensor_read_poll_value(
- &press_state->common_attributes);
- if (poll_value < 0)
- return -EINVAL;
hid_sensor_power_state(&press_state->common_attributes,
true);
-
- msleep_interruptible(poll_value * 2);
-
*val = sensor_hub_input_attr_get_raw_value(
press_state->common_attributes.hsdev,
HID_USAGE_SENSOR_PRESSURE, address,
@@ -382,6 +374,7 @@ static struct platform_driver hid_press_platform_driver = {
.id_table = hid_press_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_press_probe,
.remove = hid_press_remove,
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 0c8cdf58f6a1..41a8d8ffa0de 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -17,3 +17,20 @@ config AS3935
module will be called as3935
endmenu
+
+menu "Proximity sensors"
+
+config SX9500
+ tristate "SX9500 Semtech proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for Semtech's SX9500 capacitive
+ proximity/button sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sx9500.
+
+endmenu
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index 743adee1c8bf..9818dc562abd 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -4,3 +4,4 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
+obj-$(CONFIG_SX9500) += sx9500.o
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 466aa4314667..bc0d68efd455 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -273,9 +273,9 @@ static void calibrate_as3935(struct as3935_state *st)
}
#ifdef CONFIG_PM_SLEEP
-static int as3935_suspend(struct spi_device *spi, pm_message_t msg)
+static int as3935_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct as3935_state *st = iio_priv(indio_dev);
int val, ret;
@@ -293,9 +293,9 @@ err_suspend:
return ret;
}
-static int as3935_resume(struct spi_device *spi)
+static int as3935_resume(struct device *dev)
{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct as3935_state *st = iio_priv(indio_dev);
int val, ret;
@@ -311,9 +311,12 @@ err_resume:
return ret;
}
+
+static SIMPLE_DEV_PM_OPS(as3935_pm_ops, as3935_suspend, as3935_resume);
+#define AS3935_PM_OPS (&as3935_pm_ops)
+
#else
-#define as3935_suspend NULL
-#define as3935_resume NULL
+#define AS3935_PM_OPS NULL
#endif
static int as3935_probe(struct spi_device *spi)
@@ -441,12 +444,11 @@ static struct spi_driver as3935_driver = {
.driver = {
.name = "as3935",
.owner = THIS_MODULE,
+ .pm = AS3935_PM_OPS,
},
.probe = as3935_probe,
.remove = as3935_remove,
.id_table = as3935_id,
- .suspend = as3935_suspend,
- .resume = as3935_resume,
};
module_spi_driver(as3935_driver);
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
new file mode 100644
index 000000000000..74dff4e4a11a
--- /dev/null
+++ b/drivers/iio/proximity/sx9500.c
@@ -0,0 +1,752 @@
+/*
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * Driver for Semtech's SX9500 capacitive proximity/button solution.
+ * Datasheet available at
+ * <http://www.semtech.com/images/datasheet/sx9500.pdf>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define SX9500_DRIVER_NAME "sx9500"
+#define SX9500_IRQ_NAME "sx9500_event"
+#define SX9500_GPIO_NAME "sx9500_gpio"
+
+/* Register definitions. */
+#define SX9500_REG_IRQ_SRC 0x00
+#define SX9500_REG_STAT 0x01
+#define SX9500_REG_IRQ_MSK 0x03
+
+#define SX9500_REG_PROX_CTRL0 0x06
+#define SX9500_REG_PROX_CTRL1 0x07
+#define SX9500_REG_PROX_CTRL2 0x08
+#define SX9500_REG_PROX_CTRL3 0x09
+#define SX9500_REG_PROX_CTRL4 0x0a
+#define SX9500_REG_PROX_CTRL5 0x0b
+#define SX9500_REG_PROX_CTRL6 0x0c
+#define SX9500_REG_PROX_CTRL7 0x0d
+#define SX9500_REG_PROX_CTRL8 0x0e
+
+#define SX9500_REG_SENSOR_SEL 0x20
+#define SX9500_REG_USE_MSB 0x21
+#define SX9500_REG_USE_LSB 0x22
+#define SX9500_REG_AVG_MSB 0x23
+#define SX9500_REG_AVG_LSB 0x24
+#define SX9500_REG_DIFF_MSB 0x25
+#define SX9500_REG_DIFF_LSB 0x26
+#define SX9500_REG_OFFSET_MSB 0x27
+#define SX9500_REG_OFFSET_LSB 0x28
+
+#define SX9500_REG_RESET 0x7f
+
+/* Write this to REG_RESET to do a soft reset. */
+#define SX9500_SOFT_RESET 0xde
+
+#define SX9500_SCAN_PERIOD_MASK GENMASK(6, 4)
+#define SX9500_SCAN_PERIOD_SHIFT 4
+
+/*
+ * These serve for identifying IRQ source in the IRQ_SRC register, and
+ * also for masking the IRQs in the IRQ_MSK register.
+ */
+#define SX9500_CLOSE_IRQ BIT(6)
+#define SX9500_FAR_IRQ BIT(5)
+#define SX9500_CONVDONE_IRQ BIT(3)
+
+#define SX9500_PROXSTAT_SHIFT 4
+
+#define SX9500_NUM_CHANNELS 4
+
+struct sx9500_data {
+ struct mutex mutex;
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ /*
+ * Last reading of the proximity status for each channel. We
+ * only send an event to user space when this changes.
+ */
+ bool prox_stat[SX9500_NUM_CHANNELS];
+ bool event_enabled[SX9500_NUM_CHANNELS];
+ bool trigger_enabled;
+ u16 *buffer;
+};
+
+static const struct iio_event_spec sx9500_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define SX9500_CHANNEL(idx) \
+ { \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .channel = idx, \
+ .event_spec = sx9500_events, \
+ .num_event_specs = ARRAY_SIZE(sx9500_events), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0, \
+ }, \
+ }
+
+static const struct iio_chan_spec sx9500_channels[] = {
+ SX9500_CHANNEL(0),
+ SX9500_CHANNEL(1),
+ SX9500_CHANNEL(2),
+ SX9500_CHANNEL(3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct {
+ int val;
+ int val2;
+} sx9500_samp_freq_table[] = {
+ {33, 333333},
+ {16, 666666},
+ {11, 111111},
+ {8, 333333},
+ {6, 666666},
+ {5, 0},
+ {3, 333333},
+ {2, 500000},
+};
+
+static const struct regmap_range sx9500_writable_reg_ranges[] = {
+ regmap_reg_range(SX9500_REG_IRQ_MSK, SX9500_REG_IRQ_MSK),
+ regmap_reg_range(SX9500_REG_PROX_CTRL0, SX9500_REG_PROX_CTRL8),
+ regmap_reg_range(SX9500_REG_SENSOR_SEL, SX9500_REG_SENSOR_SEL),
+ regmap_reg_range(SX9500_REG_OFFSET_MSB, SX9500_REG_OFFSET_LSB),
+ regmap_reg_range(SX9500_REG_RESET, SX9500_REG_RESET),
+};
+
+static const struct regmap_access_table sx9500_writeable_regs = {
+ .yes_ranges = sx9500_writable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9500_writable_reg_ranges),
+};
+
+/*
+ * All allocated registers are readable, so we just list unallocated
+ * ones.
+ */
+static const struct regmap_range sx9500_non_readable_reg_ranges[] = {
+ regmap_reg_range(SX9500_REG_STAT + 1, SX9500_REG_STAT + 1),
+ regmap_reg_range(SX9500_REG_IRQ_MSK + 1, SX9500_REG_PROX_CTRL0 - 1),
+ regmap_reg_range(SX9500_REG_PROX_CTRL8 + 1, SX9500_REG_SENSOR_SEL - 1),
+ regmap_reg_range(SX9500_REG_OFFSET_LSB + 1, SX9500_REG_RESET - 1),
+};
+
+static const struct regmap_access_table sx9500_readable_regs = {
+ .no_ranges = sx9500_non_readable_reg_ranges,
+ .n_no_ranges = ARRAY_SIZE(sx9500_non_readable_reg_ranges),
+};
+
+static const struct regmap_range sx9500_volatile_reg_ranges[] = {
+ regmap_reg_range(SX9500_REG_IRQ_SRC, SX9500_REG_STAT),
+ regmap_reg_range(SX9500_REG_USE_MSB, SX9500_REG_OFFSET_LSB),
+ regmap_reg_range(SX9500_REG_RESET, SX9500_REG_RESET),
+};
+
+static const struct regmap_access_table sx9500_volatile_regs = {
+ .yes_ranges = sx9500_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9500_volatile_reg_ranges),
+};
+
+static const struct regmap_config sx9500_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = SX9500_REG_RESET,
+ .cache_type = REGCACHE_RBTREE,
+
+ .wr_table = &sx9500_writeable_regs,
+ .rd_table = &sx9500_readable_regs,
+ .volatile_table = &sx9500_volatile_regs,
+};
+
+static int sx9500_read_proximity(struct sx9500_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ int ret;
+ __be16 regval;
+
+ ret = regmap_write(data->regmap, SX9500_REG_SENSOR_SEL, chan->channel);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, SX9500_REG_USE_MSB, &regval, 2);
+ if (ret < 0)
+ return ret;
+
+ *val = 32767 - (s16)be16_to_cpu(regval);
+
+ return IIO_VAL_INT;
+}
+
+static int sx9500_read_samp_freq(struct sx9500_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ unsigned int regval;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_read(data->regmap, SX9500_REG_PROX_CTRL0, &regval);
+ mutex_unlock(&data->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ regval = (regval & SX9500_SCAN_PERIOD_MASK) >> SX9500_SCAN_PERIOD_SHIFT;
+ *val = sx9500_samp_freq_table[regval].val;
+ *val2 = sx9500_samp_freq_table[regval].val2;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int sx9500_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct sx9500_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ mutex_lock(&data->mutex);
+ ret = sx9500_read_proximity(data, chan, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx9500_read_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sx9500_set_samp_freq(struct sx9500_data *data,
+ int val, int val2)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(sx9500_samp_freq_table); i++)
+ if (val == sx9500_samp_freq_table[i].val &&
+ val2 == sx9500_samp_freq_table[i].val2)
+ break;
+
+ if (i == ARRAY_SIZE(sx9500_samp_freq_table))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
+ SX9500_SCAN_PERIOD_MASK,
+ i << SX9500_SCAN_PERIOD_SHIFT);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9500_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct sx9500_data *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx9500_set_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t sx9500_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9500_data *data = iio_priv(indio_dev);
+
+ if (data->trigger_enabled)
+ iio_trigger_poll(data->trig);
+
+ /*
+ * Even if no event is enabled, we need to wake the thread to
+ * clear the interrupt state by reading SX9500_REG_IRQ_SRC. It
+ * is not possible to do that here because regmap_read takes a
+ * mutex.
+ */
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t sx9500_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9500_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int val, chan;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_read(data->regmap, SX9500_REG_IRQ_SRC, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ goto out;
+ }
+
+ if (!(val & (SX9500_CLOSE_IRQ | SX9500_FAR_IRQ)))
+ goto out;
+
+ ret = regmap_read(data->regmap, SX9500_REG_STAT, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ goto out;
+ }
+
+ val >>= SX9500_PROXSTAT_SHIFT;
+ for (chan = 0; chan < SX9500_NUM_CHANNELS; chan++) {
+ int dir;
+ u64 ev;
+ bool new_prox = val & BIT(chan);
+
+ if (!data->event_enabled[chan])
+ continue;
+ if (new_prox == data->prox_stat[chan])
+ /* No change on this channel. */
+ continue;
+
+ dir = new_prox ? IIO_EV_DIR_FALLING :
+ IIO_EV_DIR_RISING;
+ ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY,
+ chan,
+ IIO_EV_TYPE_THRESH,
+ dir);
+ iio_push_event(indio_dev, ev, iio_get_time_ns());
+ data->prox_stat[chan] = new_prox;
+ }
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9500_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sx9500_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY || type != IIO_EV_TYPE_THRESH ||
+ dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ return data->event_enabled[chan->channel];
+}
+
+static int sx9500_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct sx9500_data *data = iio_priv(indio_dev);
+ int ret, i;
+ bool any_active = false;
+ unsigned int irqmask;
+
+ if (chan->type != IIO_PROXIMITY || type != IIO_EV_TYPE_THRESH ||
+ dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ data->event_enabled[chan->channel] = state;
+
+ for (i = 0; i < SX9500_NUM_CHANNELS; i++)
+ if (data->event_enabled[i]) {
+ any_active = true;
+ break;
+ }
+
+ irqmask = SX9500_CLOSE_IRQ | SX9500_FAR_IRQ;
+ if (any_active)
+ ret = regmap_update_bits(data->regmap, SX9500_REG_IRQ_MSK,
+ irqmask, irqmask);
+ else
+ ret = regmap_update_bits(data->regmap, SX9500_REG_IRQ_MSK,
+ irqmask, 0);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9500_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct sx9500_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ kfree(data->buffer);
+ data->buffer = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ mutex_unlock(&data->mutex);
+
+ if (data->buffer == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "2.500000 3.333333 5 6.666666 8.333333 11.111111 16.666666 33.333333");
+
+static struct attribute *sx9500_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sx9500_attribute_group = {
+ .attrs = sx9500_attributes,
+};
+
+static const struct iio_info sx9500_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &sx9500_attribute_group,
+ .read_raw = &sx9500_read_raw,
+ .write_raw = &sx9500_write_raw,
+ .read_event_config = &sx9500_read_event_config,
+ .write_event_config = &sx9500_write_event_config,
+ .update_scan_mode = &sx9500_update_scan_mode,
+};
+
+static int sx9500_set_trigger_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct sx9500_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_update_bits(data->regmap, SX9500_REG_IRQ_MSK,
+ SX9500_CONVDONE_IRQ,
+ state ? SX9500_CONVDONE_IRQ : 0);
+ if (ret == 0)
+ data->trigger_enabled = state;
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops sx9500_trigger_ops = {
+ .set_trigger_state = sx9500_set_trigger_state,
+ .owner = THIS_MODULE,
+};
+
+static irqreturn_t sx9500_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct sx9500_data *data = iio_priv(indio_dev);
+ int val, bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+
+ for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ indio_dev->masklength) {
+ ret = sx9500_read_proximity(data, &indio_dev->channels[bit],
+ &val);
+ if (ret < 0)
+ goto out;
+
+ data->buffer[i++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+out:
+ mutex_unlock(&data->mutex);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+struct sx9500_reg_default {
+ u8 reg;
+ u8 def;
+};
+
+static const struct sx9500_reg_default sx9500_default_regs[] = {
+ {
+ .reg = SX9500_REG_PROX_CTRL1,
+ /* Shield enabled, small range. */
+ .def = 0x43,
+ },
+ {
+ .reg = SX9500_REG_PROX_CTRL2,
+ /* x8 gain, 167kHz frequency, finest resolution. */
+ .def = 0x77,
+ },
+ {
+ .reg = SX9500_REG_PROX_CTRL3,
+ /* Doze enabled, 2x scan period doze, no raw filter. */
+ .def = 0x40,
+ },
+ {
+ .reg = SX9500_REG_PROX_CTRL4,
+ /* Average threshold. */
+ .def = 0x30,
+ },
+ {
+ .reg = SX9500_REG_PROX_CTRL5,
+ /*
+ * Debouncer off, lowest average negative filter,
+ * highest average postive filter.
+ */
+ .def = 0x0f,
+ },
+ {
+ .reg = SX9500_REG_PROX_CTRL6,
+ /* Proximity detection threshold: 280 */
+ .def = 0x0e,
+ },
+ {
+ .reg = SX9500_REG_PROX_CTRL7,
+ /*
+ * No automatic compensation, compensate each pin
+ * independently, proximity hysteresis: 32, close
+ * debouncer off, far debouncer off.
+ */
+ .def = 0x00,
+ },
+ {
+ .reg = SX9500_REG_PROX_CTRL8,
+ /* No stuck timeout, no periodic compensation. */
+ .def = 0x00,
+ },
+ {
+ .reg = SX9500_REG_PROX_CTRL0,
+ /* Scan period: 30ms, all sensors enabled. */
+ .def = 0x0f,
+ },
+};
+
+static int sx9500_init_device(struct iio_dev *indio_dev)
+{
+ struct sx9500_data *data = iio_priv(indio_dev);
+ int ret, i;
+ unsigned int val;
+
+ ret = regmap_write(data->regmap, SX9500_REG_IRQ_MSK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, SX9500_REG_RESET,
+ SX9500_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, SX9500_REG_IRQ_SRC, &val);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(sx9500_default_regs); i++) {
+ ret = regmap_write(data->regmap,
+ sx9500_default_regs[i].reg,
+ sx9500_default_regs[i].def);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sx9500_gpio_probe(struct i2c_client *client,
+ struct sx9500_data *data)
+{
+ struct device *dev;
+ struct gpio_desc *gpio;
+ int ret;
+
+ if (!client)
+ return -EINVAL;
+
+ dev = &client->dev;
+
+ /* data ready gpio interrupt pin */
+ gpio = devm_gpiod_get_index(dev, SX9500_GPIO_NAME, 0);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "acpi gpio get index failed\n");
+ return PTR_ERR(gpio);
+ }
+
+ ret = gpiod_direction_input(gpio);
+ if (ret)
+ return ret;
+
+ ret = gpiod_to_irq(gpio);
+
+ dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+
+ return ret;
+}
+
+static int sx9500_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct sx9500_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->mutex);
+ data->trigger_enabled = false;
+
+ data->regmap = devm_regmap_init_i2c(client, &sx9500_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ sx9500_init_device(indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = SX9500_DRIVER_NAME;
+ indio_dev->channels = sx9500_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sx9500_channels);
+ indio_dev->info = &sx9500_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ if (client->irq <= 0)
+ client->irq = sx9500_gpio_probe(client, data);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ sx9500_irq_handler, sx9500_irq_thread_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ SX9500_IRQ_NAME, indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d", indio_dev->name, indio_dev->id);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->dev.parent = &client->dev;
+ data->trig->ops = &sx9500_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = iio_trigger_register(data->trig);
+ if (ret)
+ return ret;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ sx9500_trigger_handler, NULL);
+ if (ret < 0)
+ goto out_trigger_unregister;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto out_buffer_cleanup;
+
+ return 0;
+
+out_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+out_trigger_unregister:
+ if (client->irq > 0)
+ iio_trigger_unregister(data->trig);
+
+ return ret;
+}
+
+static int sx9500_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct sx9500_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (client->irq > 0)
+ iio_trigger_unregister(data->trig);
+ kfree(data->buffer);
+
+ return 0;
+}
+
+static const struct acpi_device_id sx9500_acpi_match[] = {
+ {"SSX9500", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sx9500_acpi_match);
+
+static const struct i2c_device_id sx9500_id[] = {
+ {"sx9500", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, sx9500_id);
+
+static struct i2c_driver sx9500_driver = {
+ .driver = {
+ .name = SX9500_DRIVER_NAME,
+ .acpi_match_table = ACPI_PTR(sx9500_acpi_match),
+ },
+ .probe = sx9500_probe,
+ .remove = sx9500_remove,
+ .id_table = sx9500_id,
+};
+module_i2c_driver(sx9500_driver);
+
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_DESCRIPTION("Driver for Semtech SX9500 proximity sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 254c7e906127..3dfab2bc6d69 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -135,6 +135,7 @@ static int iio_sysfs_trigger_probe(int id)
struct iio_sysfs_trig *t;
int ret;
bool foundit = false;
+
mutex_lock(&iio_sysfs_trig_list_mut);
list_for_each_entry(t, &iio_sysfs_trig_list, l)
if (id == t->id) {
@@ -185,6 +186,7 @@ static int iio_sysfs_trigger_remove(int id)
{
bool foundit = false;
struct iio_sysfs_trig *t;
+
mutex_lock(&iio_sysfs_trig_list_mut);
list_for_each_entry(t, &iio_sysfs_trig_list, l)
if (id == t->id) {
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b716b0815644..643c08a025a5 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
IB_UVERBS_DECLARE_EX_CMD(create_flow);
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
-IB_UVERBS_DECLARE_EX_CMD(query_device);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 532d8eba8b02..b7943ff16ed3 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -400,52 +400,6 @@ err:
return ret;
}
-static void copy_query_dev_fields(struct ib_uverbs_file *file,
- struct ib_uverbs_query_device_resp *resp,
- struct ib_device_attr *attr)
-{
- resp->fw_ver = attr->fw_ver;
- resp->node_guid = file->device->ib_dev->node_guid;
- resp->sys_image_guid = attr->sys_image_guid;
- resp->max_mr_size = attr->max_mr_size;
- resp->page_size_cap = attr->page_size_cap;
- resp->vendor_id = attr->vendor_id;
- resp->vendor_part_id = attr->vendor_part_id;
- resp->hw_ver = attr->hw_ver;
- resp->max_qp = attr->max_qp;
- resp->max_qp_wr = attr->max_qp_wr;
- resp->device_cap_flags = attr->device_cap_flags;
- resp->max_sge = attr->max_sge;
- resp->max_sge_rd = attr->max_sge_rd;
- resp->max_cq = attr->max_cq;
- resp->max_cqe = attr->max_cqe;
- resp->max_mr = attr->max_mr;
- resp->max_pd = attr->max_pd;
- resp->max_qp_rd_atom = attr->max_qp_rd_atom;
- resp->max_ee_rd_atom = attr->max_ee_rd_atom;
- resp->max_res_rd_atom = attr->max_res_rd_atom;
- resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
- resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
- resp->atomic_cap = attr->atomic_cap;
- resp->max_ee = attr->max_ee;
- resp->max_rdd = attr->max_rdd;
- resp->max_mw = attr->max_mw;
- resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
- resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
- resp->max_mcast_grp = attr->max_mcast_grp;
- resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
- resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
- resp->max_ah = attr->max_ah;
- resp->max_fmr = attr->max_fmr;
- resp->max_map_per_fmr = attr->max_map_per_fmr;
- resp->max_srq = attr->max_srq;
- resp->max_srq_wr = attr->max_srq_wr;
- resp->max_srq_sge = attr->max_srq_sge;
- resp->max_pkeys = attr->max_pkeys;
- resp->local_ca_ack_delay = attr->local_ca_ack_delay;
- resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
-}
-
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
const char __user *buf,
int in_len, int out_len)
@@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return ret;
memset(&resp, 0, sizeof resp);
- copy_query_dev_fields(file, &resp, &attr);
+
+ resp.fw_ver = attr.fw_ver;
+ resp.node_guid = file->device->ib_dev->node_guid;
+ resp.sys_image_guid = attr.sys_image_guid;
+ resp.max_mr_size = attr.max_mr_size;
+ resp.page_size_cap = attr.page_size_cap;
+ resp.vendor_id = attr.vendor_id;
+ resp.vendor_part_id = attr.vendor_part_id;
+ resp.hw_ver = attr.hw_ver;
+ resp.max_qp = attr.max_qp;
+ resp.max_qp_wr = attr.max_qp_wr;
+ resp.device_cap_flags = attr.device_cap_flags;
+ resp.max_sge = attr.max_sge;
+ resp.max_sge_rd = attr.max_sge_rd;
+ resp.max_cq = attr.max_cq;
+ resp.max_cqe = attr.max_cqe;
+ resp.max_mr = attr.max_mr;
+ resp.max_pd = attr.max_pd;
+ resp.max_qp_rd_atom = attr.max_qp_rd_atom;
+ resp.max_ee_rd_atom = attr.max_ee_rd_atom;
+ resp.max_res_rd_atom = attr.max_res_rd_atom;
+ resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
+ resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
+ resp.atomic_cap = attr.atomic_cap;
+ resp.max_ee = attr.max_ee;
+ resp.max_rdd = attr.max_rdd;
+ resp.max_mw = attr.max_mw;
+ resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
+ resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
+ resp.max_mcast_grp = attr.max_mcast_grp;
+ resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
+ resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
+ resp.max_ah = attr.max_ah;
+ resp.max_fmr = attr.max_fmr;
+ resp.max_map_per_fmr = attr.max_map_per_fmr;
+ resp.max_srq = attr.max_srq;
+ resp.max_srq_wr = attr.max_srq_wr;
+ resp.max_srq_sge = attr.max_srq_sge;
+ resp.max_pkeys = attr.max_pkeys;
+ resp.local_ca_ack_delay = attr.local_ca_ack_delay;
+ resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
@@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
return ret ? ret : in_len;
}
-
-int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
- struct ib_udata *ucore,
- struct ib_udata *uhw)
-{
- struct ib_uverbs_ex_query_device_resp resp;
- struct ib_uverbs_ex_query_device cmd;
- struct ib_device_attr attr;
- struct ib_device *device;
- int err;
-
- device = file->device->ib_dev;
- if (ucore->inlen < sizeof(cmd))
- return -EINVAL;
-
- err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
- if (err)
- return err;
-
- if (cmd.reserved)
- return -EINVAL;
-
- err = device->query_device(device, &attr);
- if (err)
- return err;
-
- memset(&resp, 0, sizeof(resp));
- copy_query_dev_fields(file, &resp.base, &attr);
- resp.comp_mask = 0;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
- resp.odp_caps.general_caps = attr.odp_caps.general_caps;
- resp.odp_caps.per_transport_caps.rc_odp_caps =
- attr.odp_caps.per_transport_caps.rc_odp_caps;
- resp.odp_caps.per_transport_caps.uc_odp_caps =
- attr.odp_caps.per_transport_caps.uc_odp_caps;
- resp.odp_caps.per_transport_caps.ud_odp_caps =
- attr.odp_caps.per_transport_caps.ud_odp_caps;
- resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
- }
-#endif
-
- err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
- if (err)
- return err;
-
- return 0;
-}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e6c23b9eab33..5db1a8cc388d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
[IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
- [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device
};
static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 9edc200b311d..57176ddd4c50 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt)
{
- ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
+ ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
((AF_INET == ep->com.remote_addr.ss_family) ?
sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
sizeof(struct tcphdr);
ep->mss = ep->emss;
- if (GET_TCPOPT_TSTAMP(opt))
+ if (TCPOPT_TSTAMP_G(opt))
ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
- PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
ep->mss, ep->emss);
}
@@ -652,29 +652,29 @@ static int send_connect(struct c4iw_ep *ep)
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- opt0 = (nocong ? NO_CONG(1) : 0) |
+ opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
- CCTRL_ECN(enable_ecn) |
+ CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps)
- opt2 |= TSTAMPS_EN(1);
+ opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack)
- opt2 |= SACK_EN(1);
+ opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID_F;
- opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
}
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
@@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb);
unsigned int tid = GET_TID(req);
- unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+ unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
ep = lookup_atid(t, atid);
@@ -1258,8 +1258,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
- F_RX_DACK_CHANGE |
- V_RX_DACK_MODE(dack_mode));
+ RX_DACK_CHANGE_F |
+ RX_DACK_MODE_V(dack_mode));
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
+ req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
@@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
- (nocong ? NO_CONG(1) : 0) |
+ req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
+ (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win));
- req->tcb.opt2 = (__force __be32) (PACE(1) |
- TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
+ req->tcb.opt2 = (__force __be32) (PACE_V(1) |
+ TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL_V(0) |
- CCTRL_ECN(enable_ecn) |
+ CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
if (enable_tcp_timestamps)
- req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
+ req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
if (enable_tcp_sack)
- req->tcb.opt2 |= (__force __be32)SACK_EN(1);
+ req->tcb.opt2 |= (__force __be32)SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
@@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_act_open_rpl *rpl = cplhdr(skb);
- unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
- ntohl(rpl->atid_status)));
+ unsigned int atid = TID_TID_G(AOPEN_ATID_G(
+ ntohl(rpl->atid_status)));
struct tid_info *t = dev->rdev.lldi.tids;
- int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+ int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
struct sockaddr_in *la;
struct sockaddr_in *ra;
struct sockaddr_in6 *la6;
@@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.local_addr.ss_family == AF_INET &&
dev->rdev.lldi.enable_fw_ofld_conn) {
send_fw_act_open_req(ep,
- GET_TID_TID(GET_AOPEN_ATID(
+ TID_TID_G(AOPEN_ATID_G(
ntohl(rpl->atid_status))));
return 0;
}
@@ -2181,39 +2181,39 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
win = ep->rcv_win >> 10;
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- opt0 = (nocong ? NO_CONG(1) : 0) |
+ opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos >> 2) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps && req->tcpopt.tstamp)
- opt2 |= TSTAMPS_EN(1);
+ opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack && req->tcpopt.sack)
- opt2 |= SACK_EN(1);
+ opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
if (enable_ecn) {
const struct tcphdr *tcph;
u32 hlen = ntohl(req->hdr_len);
- tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
- G_IP_HDR_LEN(hlen);
+ tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
+ IP_HDR_LEN_G(hlen);
if (tcph->ece && tcph->cwr)
- opt2 |= CCTRL_ECN(1);
+ opt2 |= CCTRL_ECN_V(1);
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID_F;
- opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
rpl5 = (void *)rpl;
memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
@@ -2245,8 +2245,8 @@ static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
__u8 *local_ip, __u8 *peer_ip,
__be16 *local_port, __be16 *peer_port)
{
- int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
- int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
+ int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
struct tcphdr *tcp = (struct tcphdr *)
@@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep = NULL, *parent_ep;
struct cpl_pass_accept_req *req = cplhdr(skb);
- unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
+ unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
struct dst_entry *dst;
@@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss);
dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port,
- GET_POPEN_TOS(ntohl(req->tos_stid)));
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS(ntohl(req->tos_stid)),
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
((struct sockaddr_in6 *)
&parent_ep->com.local_addr)->sin6_scope_id);
}
@@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
}
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
- child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
+ child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
child_ep->dst = dst;
child_ep->hwtid = hwtid;
@@ -3500,24 +3500,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
- V_SYN_MAC_IDX(G_RX_MACIDX(
+ req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
+ SYN_MAC_IDX_V(RX_MACIDX_G(
(__force int) htonl(l2info))) |
- F_SYN_XACT_MATCH);
+ SYN_XACT_MATCH_F);
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
- G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
- G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
- req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
+ RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
+ RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
+ req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(
(__force int) htonl(l2info))) |
- V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
+ TCP_HDR_LEN_V(RX_TCPHDR_LEN_G(
(__force int) htons(hdr_len))) |
- V_IP_HDR_LEN(G_RX_IPHDR_LEN(
+ IP_HDR_LEN_V(RX_IPHDR_LEN_G(
(__force int) htons(hdr_len))) |
- V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
+ ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len)));
req->vlan = (__force __be16) vlantag;
req->len = (__force __be16) len;
- req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
- PASS_OPEN_TOS(tos));
+ req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
+ PASS_OPEN_TOS_V(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp);
if (tmp_opt.wscale_ok)
req->tcpopt.wsf = tmp_opt.snd_wscale;
@@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+ req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
req->le.filter = (__force __be32) filter;
@@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
FW_OFLD_CONNECTION_WR_ASTID_V(
- GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
+ PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
/*
* We store the qid in opt2 which will be used by the firmware
@@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct neighbour *neigh;
/* Drop all non-SYN packets */
- if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
+ if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
goto reject;
/*
@@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
}
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
- G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
- G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
+ RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
+ RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e9fd3a029296..ab7692ac2044 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -52,7 +52,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- V_FW_RI_RES_WR_NRES(1) |
+ FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@@ -122,7 +122,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- V_FW_RI_RES_WR_NRES(1) |
+ FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@@ -131,17 +131,17 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res->u.cq.op = FW_RI_RES_OP_WRITE;
res->u.cq.iqid = cpu_to_be32(cq->cqid);
res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
- V_FW_RI_RES_WR_IQANUS(0) |
- V_FW_RI_RES_WR_IQANUD(1) |
- F_FW_RI_RES_WR_IQANDST |
- V_FW_RI_RES_WR_IQANDSTINDEX(
+ FW_RI_RES_WR_IQANUS_V(0) |
+ FW_RI_RES_WR_IQANUD_V(1) |
+ FW_RI_RES_WR_IQANDST_F |
+ FW_RI_RES_WR_IQANDSTINDEX_V(
rdev->lldi.ciq_ids[cq->vector]));
res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
- F_FW_RI_RES_WR_IQDROPRSS |
- V_FW_RI_RES_WR_IQPCIECH(2) |
- V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
- F_FW_RI_RES_WR_IQO |
- V_FW_RI_RES_WR_IQESIZE(1));
+ FW_RI_RES_WR_IQDROPRSS_F |
+ FW_RI_RES_WR_IQPCIECH_V(2) |
+ FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
+ FW_RI_RES_WR_IQO_F |
+ FW_RI_RES_WR_IQESIZE_V(1));
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
@@ -182,12 +182,12 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
- V_CQE_OPCODE(FW_RI_SEND) |
- V_CQE_TYPE(0) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->sq.qid));
- cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(FW_RI_SEND) |
+ CQE_TYPE_V(0) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(wq->sq.qid));
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@@ -215,13 +215,13 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
- V_CQE_OPCODE(swcqe->opcode) |
- V_CQE_TYPE(1) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->sq.qid));
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(swcqe->opcode) |
+ CQE_TYPE_V(1) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(wq->sq.qid));
CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
- cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@@ -284,7 +284,7 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
*/
PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
__func__, cidx, cq->sw_pidx);
- swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+ swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq);
swsqe->flushed = 1;
@@ -301,10 +301,10 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
{
read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
read_cqe->len = htonl(wq->sq.oldest_read->read_len);
- read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
- V_CQE_SWCQE(SW_CQE(hw_cqe)) |
- V_CQE_OPCODE(FW_RI_READ_REQ) |
- V_CQE_TYPE(1));
+ read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
+ CQE_SWCQE_V(SW_CQE(hw_cqe)) |
+ CQE_OPCODE_V(FW_RI_READ_REQ) |
+ CQE_TYPE_V(1));
read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
}
@@ -400,7 +400,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
} else {
swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
*swcqe = *hw_cqe;
- swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+ swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
t4_swcq_produce(&chp->cq);
}
next_cqe:
@@ -576,7 +576,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
t4_set_wq_in_error(wq);
- hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
+ hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
goto proc_cqe;
}
goto proc_cqe;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index eb5df4e62703..aafdbcd84fc4 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -380,12 +380,12 @@ static int dump_stag(int id, void *p, void *data)
"stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
"perm 0x%x ps %d len 0x%llx va 0x%llx\n",
(u32)id<<8,
- G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
- G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
if (cc < space)
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index c9df0549f51d..794555dc86a5 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -50,12 +50,12 @@ static void print_tpte(struct c4iw_dev *dev, u32 stag)
PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
"perm 0x%x ps %d len 0x%llx va 0x%llx\n",
stag & 0xffffff00,
- G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
- G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
}
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index cb43c2299ac0..6791fd16272c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
- req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+ req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1);
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
- ULPTX_NSGE(1));
+ ULPTX_NSGE_V(1));
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
@@ -286,17 +286,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
if (reset_tpt_entry)
memset(&tpt, 0, sizeof(tpt));
else {
- tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
- V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
- V_FW_RI_TPTE_STAGSTATE(stag_state) |
- V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
- tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
- (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
- V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
+ tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
+ FW_RI_TPTE_STAGSTATE_V(stag_state) |
+ FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
+ tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+ (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
+ FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
FW_RI_VA_BASED_TO))|
- V_FW_RI_TPTE_PS(page_size));
+ FW_RI_TPTE_PS_V(page_size));
tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
- V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
+ FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
tpt.va_hi = cpu_to_be32((u32)(to >> 32));
tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bb85d479e66e..15cae5a31018 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -272,7 +272,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- V_FW_RI_RES_WR_NRES(2) |
+ FW_RI_RES_WR_NRES_V(2) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@@ -287,19 +287,19 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
rdev->hw_queue.t4_eq_status_entries;
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
- V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
- V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
- V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
- (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
- V_FW_RI_RES_WR_IQID(scq->cqid));
+ FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
+ FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
+ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
+ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
+ FW_RI_RES_WR_IQID_V(scq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
- V_FW_RI_RES_WR_DCAEN(0) |
- V_FW_RI_RES_WR_DCACPU(0) |
- V_FW_RI_RES_WR_FBMIN(2) |
- V_FW_RI_RES_WR_FBMAX(2) |
- V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
- V_FW_RI_RES_WR_CIDXFTHRESH(0) |
- V_FW_RI_RES_WR_EQSIZE(eqsize));
+ FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(2) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
res++;
@@ -312,18 +312,18 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
rdev->hw_queue.t4_eq_status_entries;
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
- V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
- V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
- V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
- V_FW_RI_RES_WR_IQID(rcq->cqid));
+ FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
+ FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
+ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
+ FW_RI_RES_WR_IQID_V(rcq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
- V_FW_RI_RES_WR_DCAEN(0) |
- V_FW_RI_RES_WR_DCACPU(0) |
- V_FW_RI_RES_WR_FBMIN(2) |
- V_FW_RI_RES_WR_FBMAX(2) |
- V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
- V_FW_RI_RES_WR_CIDXFTHRESH(0) |
- V_FW_RI_RES_WR_EQSIZE(eqsize));
+ FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(2) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
@@ -444,19 +444,19 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
case IB_WR_SEND:
if (wr->send_flags & IB_SEND_SOLICITED)
wqe->send.sendop_pkd = cpu_to_be32(
- V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
+ FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
else
wqe->send.sendop_pkd = cpu_to_be32(
- V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
+ FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
wqe->send.stag_inv = 0;
break;
case IB_WR_SEND_WITH_INV:
if (wr->send_flags & IB_SEND_SOLICITED)
wqe->send.sendop_pkd = cpu_to_be32(
- V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
+ FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
else
wqe->send.sendop_pkd = cpu_to_be32(
- V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
+ FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
break;
@@ -1283,8 +1283,8 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
- V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
- V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
+ FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
+ FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
if (qhp->attr.mpa_attr.recv_marker_enabled)
wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
@@ -1776,7 +1776,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (mm5) {
mm5->key = uresp.ma_sync_key;
mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
- + A_PCIE_MA_SYNC) & PAGE_MASK;
+ + PCIE_MA_SYNC_A) & PAGE_MASK;
mm5->len = PAGE_SIZE;
insert_mmap(ucontext, mm5);
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index c04e5134b30c..871cdcac7be2 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -41,7 +41,7 @@
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
-#define A_PCIE_MA_SYNC 0x30b4
+#define PCIE_MA_SYNC_A 0x30b4
struct t4_status_page {
__be32 rsvd1; /* flit 0 - hw owns */
@@ -184,44 +184,44 @@ struct t4_cqe {
/* macros for flit 0 of the cqe */
-#define S_CQE_QPID 12
-#define M_CQE_QPID 0xFFFFF
-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE 11
-#define M_CQE_SWCQE 0x1
-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_STATUS 5
-#define M_CQE_STATUS 0x1F
-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE 4
-#define M_CQE_TYPE 0x1
-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE 0
-#define M_CQE_OPCODE 0xF
-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
-#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
-#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
+#define CQE_QPID_S 12
+#define CQE_QPID_M 0xFFFFF
+#define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
+#define CQE_QPID_V(x) ((x)<<CQE_QPID_S)
+
+#define CQE_SWCQE_S 11
+#define CQE_SWCQE_M 0x1
+#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
+#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
+
+#define CQE_STATUS_S 5
+#define CQE_STATUS_M 0x1F
+#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
+#define CQE_STATUS_V(x) ((x)<<CQE_STATUS_S)
+
+#define CQE_TYPE_S 4
+#define CQE_TYPE_M 0x1
+#define CQE_TYPE_G(x) ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
+#define CQE_TYPE_V(x) ((x)<<CQE_TYPE_S)
+
+#define CQE_OPCODE_S 0
+#define CQE_OPCODE_M 0xF
+#define CQE_OPCODE_G(x) ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
+#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
+
+#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
+#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
#define SQ_TYPE(x) (CQE_TYPE((x)))
#define RQ_TYPE(x) (!CQE_TYPE((x)))
-#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
+#define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header)))
+#define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header)))
#define CQE_SEND_OPCODE(x)( \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
#define CQE_LEN(x) (be32_to_cpu((x)->len))
@@ -237,25 +237,25 @@ struct t4_cqe {
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
/* macros for flit 3 of the cqe */
-#define S_CQE_GENBIT 63
-#define M_CQE_GENBIT 0x1
-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
+#define CQE_GENBIT_S 63
+#define CQE_GENBIT_M 0x1
+#define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
+#define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
-#define S_CQE_OVFBIT 62
-#define M_CQE_OVFBIT 0x1
-#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
+#define CQE_OVFBIT_S 62
+#define CQE_OVFBIT_M 0x1
+#define CQE_OVFBIT_G(x) ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
-#define S_CQE_IQTYPE 60
-#define M_CQE_IQTYPE 0x3
-#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
+#define CQE_IQTYPE_S 60
+#define CQE_IQTYPE_M 0x3
+#define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
-#define M_CQE_TS 0x0fffffffffffffffULL
-#define G_CQE_TS(x) ((x) & M_CQE_TS)
+#define CQE_TS_M 0x0fffffffffffffffULL
+#define CQE_TS_G(x) ((x) & CQE_TS_M)
-#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
-#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
-#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
struct t4_swsqe {
u64 wr_id;
@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else {
PDBG("%s: DB wq->sq.pidx = %d\n",
__func__, wq->sq.pidx);
- writel(PIDX_T5(inc), wq->sq.udb);
+ writel(PIDX_T5_V(inc), wq->sq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
- writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+ writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else {
PDBG("%s: DB wq->rq.pidx = %d\n",
__func__, wq->rq.pidx);
- writel(PIDX_T5(inc), wq->rq.udb);
+ writel(PIDX_T5_V(inc), wq->rq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
- writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+ writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
}
static inline int t4_wq_in_error(struct t4_wq *wq)
@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
u32 val;
set_bit(CQ_ARMED, &cq->flags);
- while (cq->cidx_inc > CIDXINC_MASK) {
- val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
+ while (cq->cidx_inc > CIDXINC_M) {
+ val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
- cq->cidx_inc -= CIDXINC_MASK;
+ cq->cidx_inc -= CIDXINC_M;
}
- val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
- INGRESSQID(cq->cqid);
+ val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
return 0;
@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
- if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
+ if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
u32 val;
- val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
+ val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 5709e77faf7c..5e53327fc647 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -162,102 +162,102 @@ struct fw_ri_tpte {
__be32 len_hi;
};
-#define S_FW_RI_TPTE_VALID 31
-#define M_FW_RI_TPTE_VALID 0x1
-#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
-#define G_FW_RI_TPTE_VALID(x) \
- (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
-#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
-
-#define S_FW_RI_TPTE_STAGKEY 23
-#define M_FW_RI_TPTE_STAGKEY 0xff
-#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
-#define G_FW_RI_TPTE_STAGKEY(x) \
- (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
-
-#define S_FW_RI_TPTE_STAGSTATE 22
-#define M_FW_RI_TPTE_STAGSTATE 0x1
-#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
-#define G_FW_RI_TPTE_STAGSTATE(x) \
- (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
-#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
-
-#define S_FW_RI_TPTE_STAGTYPE 20
-#define M_FW_RI_TPTE_STAGTYPE 0x3
-#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
-#define G_FW_RI_TPTE_STAGTYPE(x) \
- (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
-
-#define S_FW_RI_TPTE_PDID 0
-#define M_FW_RI_TPTE_PDID 0xfffff
-#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
-#define G_FW_RI_TPTE_PDID(x) \
- (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
-
-#define S_FW_RI_TPTE_PERM 28
-#define M_FW_RI_TPTE_PERM 0xf
-#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
-#define G_FW_RI_TPTE_PERM(x) \
- (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
-
-#define S_FW_RI_TPTE_REMINVDIS 27
-#define M_FW_RI_TPTE_REMINVDIS 0x1
-#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
-#define G_FW_RI_TPTE_REMINVDIS(x) \
- (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
-#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
-
-#define S_FW_RI_TPTE_ADDRTYPE 26
-#define M_FW_RI_TPTE_ADDRTYPE 1
-#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
-#define G_FW_RI_TPTE_ADDRTYPE(x) \
- (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
-#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
-
-#define S_FW_RI_TPTE_MWBINDEN 25
-#define M_FW_RI_TPTE_MWBINDEN 0x1
-#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
-#define G_FW_RI_TPTE_MWBINDEN(x) \
- (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
-#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
-
-#define S_FW_RI_TPTE_PS 20
-#define M_FW_RI_TPTE_PS 0x1f
-#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
-#define G_FW_RI_TPTE_PS(x) \
- (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
-
-#define S_FW_RI_TPTE_QPID 0
-#define M_FW_RI_TPTE_QPID 0xfffff
-#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
-#define G_FW_RI_TPTE_QPID(x) \
- (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
-
-#define S_FW_RI_TPTE_NOSNOOP 30
-#define M_FW_RI_TPTE_NOSNOOP 0x1
-#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
-#define G_FW_RI_TPTE_NOSNOOP(x) \
- (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
-#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
-
-#define S_FW_RI_TPTE_PBLADDR 0
-#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
-#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
-#define G_FW_RI_TPTE_PBLADDR(x) \
- (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
-
-#define S_FW_RI_TPTE_DCA 24
-#define M_FW_RI_TPTE_DCA 0x1f
-#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
-#define G_FW_RI_TPTE_DCA(x) \
- (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
-
-#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
-#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
-#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
- ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
-#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
- (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
+#define FW_RI_TPTE_VALID_S 31
+#define FW_RI_TPTE_VALID_M 0x1
+#define FW_RI_TPTE_VALID_V(x) ((x) << FW_RI_TPTE_VALID_S)
+#define FW_RI_TPTE_VALID_G(x) \
+ (((x) >> FW_RI_TPTE_VALID_S) & FW_RI_TPTE_VALID_M)
+#define FW_RI_TPTE_VALID_F FW_RI_TPTE_VALID_V(1U)
+
+#define FW_RI_TPTE_STAGKEY_S 23
+#define FW_RI_TPTE_STAGKEY_M 0xff
+#define FW_RI_TPTE_STAGKEY_V(x) ((x) << FW_RI_TPTE_STAGKEY_S)
+#define FW_RI_TPTE_STAGKEY_G(x) \
+ (((x) >> FW_RI_TPTE_STAGKEY_S) & FW_RI_TPTE_STAGKEY_M)
+
+#define FW_RI_TPTE_STAGSTATE_S 22
+#define FW_RI_TPTE_STAGSTATE_M 0x1
+#define FW_RI_TPTE_STAGSTATE_V(x) ((x) << FW_RI_TPTE_STAGSTATE_S)
+#define FW_RI_TPTE_STAGSTATE_G(x) \
+ (((x) >> FW_RI_TPTE_STAGSTATE_S) & FW_RI_TPTE_STAGSTATE_M)
+#define FW_RI_TPTE_STAGSTATE_F FW_RI_TPTE_STAGSTATE_V(1U)
+
+#define FW_RI_TPTE_STAGTYPE_S 20
+#define FW_RI_TPTE_STAGTYPE_M 0x3
+#define FW_RI_TPTE_STAGTYPE_V(x) ((x) << FW_RI_TPTE_STAGTYPE_S)
+#define FW_RI_TPTE_STAGTYPE_G(x) \
+ (((x) >> FW_RI_TPTE_STAGTYPE_S) & FW_RI_TPTE_STAGTYPE_M)
+
+#define FW_RI_TPTE_PDID_S 0
+#define FW_RI_TPTE_PDID_M 0xfffff
+#define FW_RI_TPTE_PDID_V(x) ((x) << FW_RI_TPTE_PDID_S)
+#define FW_RI_TPTE_PDID_G(x) \
+ (((x) >> FW_RI_TPTE_PDID_S) & FW_RI_TPTE_PDID_M)
+
+#define FW_RI_TPTE_PERM_S 28
+#define FW_RI_TPTE_PERM_M 0xf
+#define FW_RI_TPTE_PERM_V(x) ((x) << FW_RI_TPTE_PERM_S)
+#define FW_RI_TPTE_PERM_G(x) \
+ (((x) >> FW_RI_TPTE_PERM_S) & FW_RI_TPTE_PERM_M)
+
+#define FW_RI_TPTE_REMINVDIS_S 27
+#define FW_RI_TPTE_REMINVDIS_M 0x1
+#define FW_RI_TPTE_REMINVDIS_V(x) ((x) << FW_RI_TPTE_REMINVDIS_S)
+#define FW_RI_TPTE_REMINVDIS_G(x) \
+ (((x) >> FW_RI_TPTE_REMINVDIS_S) & FW_RI_TPTE_REMINVDIS_M)
+#define FW_RI_TPTE_REMINVDIS_F FW_RI_TPTE_REMINVDIS_V(1U)
+
+#define FW_RI_TPTE_ADDRTYPE_S 26
+#define FW_RI_TPTE_ADDRTYPE_M 1
+#define FW_RI_TPTE_ADDRTYPE_V(x) ((x) << FW_RI_TPTE_ADDRTYPE_S)
+#define FW_RI_TPTE_ADDRTYPE_G(x) \
+ (((x) >> FW_RI_TPTE_ADDRTYPE_S) & FW_RI_TPTE_ADDRTYPE_M)
+#define FW_RI_TPTE_ADDRTYPE_F FW_RI_TPTE_ADDRTYPE_V(1U)
+
+#define FW_RI_TPTE_MWBINDEN_S 25
+#define FW_RI_TPTE_MWBINDEN_M 0x1
+#define FW_RI_TPTE_MWBINDEN_V(x) ((x) << FW_RI_TPTE_MWBINDEN_S)
+#define FW_RI_TPTE_MWBINDEN_G(x) \
+ (((x) >> FW_RI_TPTE_MWBINDEN_S) & FW_RI_TPTE_MWBINDEN_M)
+#define FW_RI_TPTE_MWBINDEN_F FW_RI_TPTE_MWBINDEN_V(1U)
+
+#define FW_RI_TPTE_PS_S 20
+#define FW_RI_TPTE_PS_M 0x1f
+#define FW_RI_TPTE_PS_V(x) ((x) << FW_RI_TPTE_PS_S)
+#define FW_RI_TPTE_PS_G(x) \
+ (((x) >> FW_RI_TPTE_PS_S) & FW_RI_TPTE_PS_M)
+
+#define FW_RI_TPTE_QPID_S 0
+#define FW_RI_TPTE_QPID_M 0xfffff
+#define FW_RI_TPTE_QPID_V(x) ((x) << FW_RI_TPTE_QPID_S)
+#define FW_RI_TPTE_QPID_G(x) \
+ (((x) >> FW_RI_TPTE_QPID_S) & FW_RI_TPTE_QPID_M)
+
+#define FW_RI_TPTE_NOSNOOP_S 30
+#define FW_RI_TPTE_NOSNOOP_M 0x1
+#define FW_RI_TPTE_NOSNOOP_V(x) ((x) << FW_RI_TPTE_NOSNOOP_S)
+#define FW_RI_TPTE_NOSNOOP_G(x) \
+ (((x) >> FW_RI_TPTE_NOSNOOP_S) & FW_RI_TPTE_NOSNOOP_M)
+#define FW_RI_TPTE_NOSNOOP_F FW_RI_TPTE_NOSNOOP_V(1U)
+
+#define FW_RI_TPTE_PBLADDR_S 0
+#define FW_RI_TPTE_PBLADDR_M 0x1fffffff
+#define FW_RI_TPTE_PBLADDR_V(x) ((x) << FW_RI_TPTE_PBLADDR_S)
+#define FW_RI_TPTE_PBLADDR_G(x) \
+ (((x) >> FW_RI_TPTE_PBLADDR_S) & FW_RI_TPTE_PBLADDR_M)
+
+#define FW_RI_TPTE_DCA_S 24
+#define FW_RI_TPTE_DCA_M 0x1f
+#define FW_RI_TPTE_DCA_V(x) ((x) << FW_RI_TPTE_DCA_S)
+#define FW_RI_TPTE_DCA_G(x) \
+ (((x) >> FW_RI_TPTE_DCA_S) & FW_RI_TPTE_DCA_M)
+
+#define FW_RI_TPTE_MWBCNT_PSTAG_S 0
+#define FW_RI_TPTE_MWBCNT_PSTAG_M 0xffffff
+#define FW_RI_TPTE_MWBCNT_PSTAT_V(x) \
+ ((x) << FW_RI_TPTE_MWBCNT_PSTAG_S)
+#define FW_RI_TPTE_MWBCNT_PSTAG_G(x) \
+ (((x) >> FW_RI_TPTE_MWBCNT_PSTAG_S) & FW_RI_TPTE_MWBCNT_PSTAG_M)
enum fw_ri_res_type {
FW_RI_RES_TYPE_SQ,
@@ -308,222 +308,222 @@ struct fw_ri_res_wr {
#endif
};
-#define S_FW_RI_RES_WR_NRES 0
-#define M_FW_RI_RES_WR_NRES 0xff
-#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
-#define G_FW_RI_RES_WR_NRES(x) \
- (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
-
-#define S_FW_RI_RES_WR_FETCHSZM 26
-#define M_FW_RI_RES_WR_FETCHSZM 0x1
-#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
-#define G_FW_RI_RES_WR_FETCHSZM(x) \
- (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
-#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
-
-#define S_FW_RI_RES_WR_STATUSPGNS 25
-#define M_FW_RI_RES_WR_STATUSPGNS 0x1
-#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
-#define G_FW_RI_RES_WR_STATUSPGNS(x) \
- (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
-#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
-
-#define S_FW_RI_RES_WR_STATUSPGRO 24
-#define M_FW_RI_RES_WR_STATUSPGRO 0x1
-#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
-#define G_FW_RI_RES_WR_STATUSPGRO(x) \
- (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
-#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
-
-#define S_FW_RI_RES_WR_FETCHNS 23
-#define M_FW_RI_RES_WR_FETCHNS 0x1
-#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
-#define G_FW_RI_RES_WR_FETCHNS(x) \
- (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
-#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
-
-#define S_FW_RI_RES_WR_FETCHRO 22
-#define M_FW_RI_RES_WR_FETCHRO 0x1
-#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
-#define G_FW_RI_RES_WR_FETCHRO(x) \
- (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
-#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
-
-#define S_FW_RI_RES_WR_HOSTFCMODE 20
-#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
-#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
-#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
- (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
-
-#define S_FW_RI_RES_WR_CPRIO 19
-#define M_FW_RI_RES_WR_CPRIO 0x1
-#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
-#define G_FW_RI_RES_WR_CPRIO(x) \
- (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
-#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
-
-#define S_FW_RI_RES_WR_ONCHIP 18
-#define M_FW_RI_RES_WR_ONCHIP 0x1
-#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
-#define G_FW_RI_RES_WR_ONCHIP(x) \
- (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
-#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
-
-#define S_FW_RI_RES_WR_PCIECHN 16
-#define M_FW_RI_RES_WR_PCIECHN 0x3
-#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
-#define G_FW_RI_RES_WR_PCIECHN(x) \
- (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
-
-#define S_FW_RI_RES_WR_IQID 0
-#define M_FW_RI_RES_WR_IQID 0xffff
-#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
-#define G_FW_RI_RES_WR_IQID(x) \
- (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
-
-#define S_FW_RI_RES_WR_DCAEN 31
-#define M_FW_RI_RES_WR_DCAEN 0x1
-#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
-#define G_FW_RI_RES_WR_DCAEN(x) \
- (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
-#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
-
-#define S_FW_RI_RES_WR_DCACPU 26
-#define M_FW_RI_RES_WR_DCACPU 0x1f
-#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
-#define G_FW_RI_RES_WR_DCACPU(x) \
- (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
-
-#define S_FW_RI_RES_WR_FBMIN 23
-#define M_FW_RI_RES_WR_FBMIN 0x7
-#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
-#define G_FW_RI_RES_WR_FBMIN(x) \
- (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
-
-#define S_FW_RI_RES_WR_FBMAX 20
-#define M_FW_RI_RES_WR_FBMAX 0x7
-#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
-#define G_FW_RI_RES_WR_FBMAX(x) \
- (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
-
-#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
-#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
-#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
-#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
- (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
-#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
-
-#define S_FW_RI_RES_WR_CIDXFTHRESH 16
-#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
-#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
-#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
- (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
-
-#define S_FW_RI_RES_WR_EQSIZE 0
-#define M_FW_RI_RES_WR_EQSIZE 0xffff
-#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
-#define G_FW_RI_RES_WR_EQSIZE(x) \
- (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
-
-#define S_FW_RI_RES_WR_IQANDST 15
-#define M_FW_RI_RES_WR_IQANDST 0x1
-#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
-#define G_FW_RI_RES_WR_IQANDST(x) \
- (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
-#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
-
-#define S_FW_RI_RES_WR_IQANUS 14
-#define M_FW_RI_RES_WR_IQANUS 0x1
-#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
-#define G_FW_RI_RES_WR_IQANUS(x) \
- (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
-#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
-
-#define S_FW_RI_RES_WR_IQANUD 12
-#define M_FW_RI_RES_WR_IQANUD 0x3
-#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
-#define G_FW_RI_RES_WR_IQANUD(x) \
- (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
-
-#define S_FW_RI_RES_WR_IQANDSTINDEX 0
-#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
-#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
-#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
- (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
-
-#define S_FW_RI_RES_WR_IQDROPRSS 15
-#define M_FW_RI_RES_WR_IQDROPRSS 0x1
-#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
-#define G_FW_RI_RES_WR_IQDROPRSS(x) \
- (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
-#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
-
-#define S_FW_RI_RES_WR_IQGTSMODE 14
-#define M_FW_RI_RES_WR_IQGTSMODE 0x1
-#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
-#define G_FW_RI_RES_WR_IQGTSMODE(x) \
- (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
-#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
-
-#define S_FW_RI_RES_WR_IQPCIECH 12
-#define M_FW_RI_RES_WR_IQPCIECH 0x3
-#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
-#define G_FW_RI_RES_WR_IQPCIECH(x) \
- (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
-
-#define S_FW_RI_RES_WR_IQDCAEN 11
-#define M_FW_RI_RES_WR_IQDCAEN 0x1
-#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
-#define G_FW_RI_RES_WR_IQDCAEN(x) \
- (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
-#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
-
-#define S_FW_RI_RES_WR_IQDCACPU 6
-#define M_FW_RI_RES_WR_IQDCACPU 0x1f
-#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
-#define G_FW_RI_RES_WR_IQDCACPU(x) \
- (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
-
-#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
-#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
-#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
- ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
-#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
- (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
-
-#define S_FW_RI_RES_WR_IQO 3
-#define M_FW_RI_RES_WR_IQO 0x1
-#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
-#define G_FW_RI_RES_WR_IQO(x) \
- (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
-#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
-
-#define S_FW_RI_RES_WR_IQCPRIO 2
-#define M_FW_RI_RES_WR_IQCPRIO 0x1
-#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
-#define G_FW_RI_RES_WR_IQCPRIO(x) \
- (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
-#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
-
-#define S_FW_RI_RES_WR_IQESIZE 0
-#define M_FW_RI_RES_WR_IQESIZE 0x3
-#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
-#define G_FW_RI_RES_WR_IQESIZE(x) \
- (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
-
-#define S_FW_RI_RES_WR_IQNS 31
-#define M_FW_RI_RES_WR_IQNS 0x1
-#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
-#define G_FW_RI_RES_WR_IQNS(x) \
- (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
-#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
-
-#define S_FW_RI_RES_WR_IQRO 30
-#define M_FW_RI_RES_WR_IQRO 0x1
-#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
-#define G_FW_RI_RES_WR_IQRO(x) \
- (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
-#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
+#define FW_RI_RES_WR_NRES_S 0
+#define FW_RI_RES_WR_NRES_M 0xff
+#define FW_RI_RES_WR_NRES_V(x) ((x) << FW_RI_RES_WR_NRES_S)
+#define FW_RI_RES_WR_NRES_G(x) \
+ (((x) >> FW_RI_RES_WR_NRES_S) & FW_RI_RES_WR_NRES_M)
+
+#define FW_RI_RES_WR_FETCHSZM_S 26
+#define FW_RI_RES_WR_FETCHSZM_M 0x1
+#define FW_RI_RES_WR_FETCHSZM_V(x) ((x) << FW_RI_RES_WR_FETCHSZM_S)
+#define FW_RI_RES_WR_FETCHSZM_G(x) \
+ (((x) >> FW_RI_RES_WR_FETCHSZM_S) & FW_RI_RES_WR_FETCHSZM_M)
+#define FW_RI_RES_WR_FETCHSZM_F FW_RI_RES_WR_FETCHSZM_V(1U)
+
+#define FW_RI_RES_WR_STATUSPGNS_S 25
+#define FW_RI_RES_WR_STATUSPGNS_M 0x1
+#define FW_RI_RES_WR_STATUSPGNS_V(x) ((x) << FW_RI_RES_WR_STATUSPGNS_S)
+#define FW_RI_RES_WR_STATUSPGNS_G(x) \
+ (((x) >> FW_RI_RES_WR_STATUSPGNS_S) & FW_RI_RES_WR_STATUSPGNS_M)
+#define FW_RI_RES_WR_STATUSPGNS_F FW_RI_RES_WR_STATUSPGNS_V(1U)
+
+#define FW_RI_RES_WR_STATUSPGRO_S 24
+#define FW_RI_RES_WR_STATUSPGRO_M 0x1
+#define FW_RI_RES_WR_STATUSPGRO_V(x) ((x) << FW_RI_RES_WR_STATUSPGRO_S)
+#define FW_RI_RES_WR_STATUSPGRO_G(x) \
+ (((x) >> FW_RI_RES_WR_STATUSPGRO_S) & FW_RI_RES_WR_STATUSPGRO_M)
+#define FW_RI_RES_WR_STATUSPGRO_F FW_RI_RES_WR_STATUSPGRO_V(1U)
+
+#define FW_RI_RES_WR_FETCHNS_S 23
+#define FW_RI_RES_WR_FETCHNS_M 0x1
+#define FW_RI_RES_WR_FETCHNS_V(x) ((x) << FW_RI_RES_WR_FETCHNS_S)
+#define FW_RI_RES_WR_FETCHNS_G(x) \
+ (((x) >> FW_RI_RES_WR_FETCHNS_S) & FW_RI_RES_WR_FETCHNS_M)
+#define FW_RI_RES_WR_FETCHNS_F FW_RI_RES_WR_FETCHNS_V(1U)
+
+#define FW_RI_RES_WR_FETCHRO_S 22
+#define FW_RI_RES_WR_FETCHRO_M 0x1
+#define FW_RI_RES_WR_FETCHRO_V(x) ((x) << FW_RI_RES_WR_FETCHRO_S)
+#define FW_RI_RES_WR_FETCHRO_G(x) \
+ (((x) >> FW_RI_RES_WR_FETCHRO_S) & FW_RI_RES_WR_FETCHRO_M)
+#define FW_RI_RES_WR_FETCHRO_F FW_RI_RES_WR_FETCHRO_V(1U)
+
+#define FW_RI_RES_WR_HOSTFCMODE_S 20
+#define FW_RI_RES_WR_HOSTFCMODE_M 0x3
+#define FW_RI_RES_WR_HOSTFCMODE_V(x) ((x) << FW_RI_RES_WR_HOSTFCMODE_S)
+#define FW_RI_RES_WR_HOSTFCMODE_G(x) \
+ (((x) >> FW_RI_RES_WR_HOSTFCMODE_S) & FW_RI_RES_WR_HOSTFCMODE_M)
+
+#define FW_RI_RES_WR_CPRIO_S 19
+#define FW_RI_RES_WR_CPRIO_M 0x1
+#define FW_RI_RES_WR_CPRIO_V(x) ((x) << FW_RI_RES_WR_CPRIO_S)
+#define FW_RI_RES_WR_CPRIO_G(x) \
+ (((x) >> FW_RI_RES_WR_CPRIO_S) & FW_RI_RES_WR_CPRIO_M)
+#define FW_RI_RES_WR_CPRIO_F FW_RI_RES_WR_CPRIO_V(1U)
+
+#define FW_RI_RES_WR_ONCHIP_S 18
+#define FW_RI_RES_WR_ONCHIP_M 0x1
+#define FW_RI_RES_WR_ONCHIP_V(x) ((x) << FW_RI_RES_WR_ONCHIP_S)
+#define FW_RI_RES_WR_ONCHIP_G(x) \
+ (((x) >> FW_RI_RES_WR_ONCHIP_S) & FW_RI_RES_WR_ONCHIP_M)
+#define FW_RI_RES_WR_ONCHIP_F FW_RI_RES_WR_ONCHIP_V(1U)
+
+#define FW_RI_RES_WR_PCIECHN_S 16
+#define FW_RI_RES_WR_PCIECHN_M 0x3
+#define FW_RI_RES_WR_PCIECHN_V(x) ((x) << FW_RI_RES_WR_PCIECHN_S)
+#define FW_RI_RES_WR_PCIECHN_G(x) \
+ (((x) >> FW_RI_RES_WR_PCIECHN_S) & FW_RI_RES_WR_PCIECHN_M)
+
+#define FW_RI_RES_WR_IQID_S 0
+#define FW_RI_RES_WR_IQID_M 0xffff
+#define FW_RI_RES_WR_IQID_V(x) ((x) << FW_RI_RES_WR_IQID_S)
+#define FW_RI_RES_WR_IQID_G(x) \
+ (((x) >> FW_RI_RES_WR_IQID_S) & FW_RI_RES_WR_IQID_M)
+
+#define FW_RI_RES_WR_DCAEN_S 31
+#define FW_RI_RES_WR_DCAEN_M 0x1
+#define FW_RI_RES_WR_DCAEN_V(x) ((x) << FW_RI_RES_WR_DCAEN_S)
+#define FW_RI_RES_WR_DCAEN_G(x) \
+ (((x) >> FW_RI_RES_WR_DCAEN_S) & FW_RI_RES_WR_DCAEN_M)
+#define FW_RI_RES_WR_DCAEN_F FW_RI_RES_WR_DCAEN_V(1U)
+
+#define FW_RI_RES_WR_DCACPU_S 26
+#define FW_RI_RES_WR_DCACPU_M 0x1f
+#define FW_RI_RES_WR_DCACPU_V(x) ((x) << FW_RI_RES_WR_DCACPU_S)
+#define FW_RI_RES_WR_DCACPU_G(x) \
+ (((x) >> FW_RI_RES_WR_DCACPU_S) & FW_RI_RES_WR_DCACPU_M)
+
+#define FW_RI_RES_WR_FBMIN_S 23
+#define FW_RI_RES_WR_FBMIN_M 0x7
+#define FW_RI_RES_WR_FBMIN_V(x) ((x) << FW_RI_RES_WR_FBMIN_S)
+#define FW_RI_RES_WR_FBMIN_G(x) \
+ (((x) >> FW_RI_RES_WR_FBMIN_S) & FW_RI_RES_WR_FBMIN_M)
+
+#define FW_RI_RES_WR_FBMAX_S 20
+#define FW_RI_RES_WR_FBMAX_M 0x7
+#define FW_RI_RES_WR_FBMAX_V(x) ((x) << FW_RI_RES_WR_FBMAX_S)
+#define FW_RI_RES_WR_FBMAX_G(x) \
+ (((x) >> FW_RI_RES_WR_FBMAX_S) & FW_RI_RES_WR_FBMAX_M)
+
+#define FW_RI_RES_WR_CIDXFTHRESHO_S 19
+#define FW_RI_RES_WR_CIDXFTHRESHO_M 0x1
+#define FW_RI_RES_WR_CIDXFTHRESHO_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESHO_S)
+#define FW_RI_RES_WR_CIDXFTHRESHO_G(x) \
+ (((x) >> FW_RI_RES_WR_CIDXFTHRESHO_S) & FW_RI_RES_WR_CIDXFTHRESHO_M)
+#define FW_RI_RES_WR_CIDXFTHRESHO_F FW_RI_RES_WR_CIDXFTHRESHO_V(1U)
+
+#define FW_RI_RES_WR_CIDXFTHRESH_S 16
+#define FW_RI_RES_WR_CIDXFTHRESH_M 0x7
+#define FW_RI_RES_WR_CIDXFTHRESH_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESH_S)
+#define FW_RI_RES_WR_CIDXFTHRESH_G(x) \
+ (((x) >> FW_RI_RES_WR_CIDXFTHRESH_S) & FW_RI_RES_WR_CIDXFTHRESH_M)
+
+#define FW_RI_RES_WR_EQSIZE_S 0
+#define FW_RI_RES_WR_EQSIZE_M 0xffff
+#define FW_RI_RES_WR_EQSIZE_V(x) ((x) << FW_RI_RES_WR_EQSIZE_S)
+#define FW_RI_RES_WR_EQSIZE_G(x) \
+ (((x) >> FW_RI_RES_WR_EQSIZE_S) & FW_RI_RES_WR_EQSIZE_M)
+
+#define FW_RI_RES_WR_IQANDST_S 15
+#define FW_RI_RES_WR_IQANDST_M 0x1
+#define FW_RI_RES_WR_IQANDST_V(x) ((x) << FW_RI_RES_WR_IQANDST_S)
+#define FW_RI_RES_WR_IQANDST_G(x) \
+ (((x) >> FW_RI_RES_WR_IQANDST_S) & FW_RI_RES_WR_IQANDST_M)
+#define FW_RI_RES_WR_IQANDST_F FW_RI_RES_WR_IQANDST_V(1U)
+
+#define FW_RI_RES_WR_IQANUS_S 14
+#define FW_RI_RES_WR_IQANUS_M 0x1
+#define FW_RI_RES_WR_IQANUS_V(x) ((x) << FW_RI_RES_WR_IQANUS_S)
+#define FW_RI_RES_WR_IQANUS_G(x) \
+ (((x) >> FW_RI_RES_WR_IQANUS_S) & FW_RI_RES_WR_IQANUS_M)
+#define FW_RI_RES_WR_IQANUS_F FW_RI_RES_WR_IQANUS_V(1U)
+
+#define FW_RI_RES_WR_IQANUD_S 12
+#define FW_RI_RES_WR_IQANUD_M 0x3
+#define FW_RI_RES_WR_IQANUD_V(x) ((x) << FW_RI_RES_WR_IQANUD_S)
+#define FW_RI_RES_WR_IQANUD_G(x) \
+ (((x) >> FW_RI_RES_WR_IQANUD_S) & FW_RI_RES_WR_IQANUD_M)
+
+#define FW_RI_RES_WR_IQANDSTINDEX_S 0
+#define FW_RI_RES_WR_IQANDSTINDEX_M 0xfff
+#define FW_RI_RES_WR_IQANDSTINDEX_V(x) ((x) << FW_RI_RES_WR_IQANDSTINDEX_S)
+#define FW_RI_RES_WR_IQANDSTINDEX_G(x) \
+ (((x) >> FW_RI_RES_WR_IQANDSTINDEX_S) & FW_RI_RES_WR_IQANDSTINDEX_M)
+
+#define FW_RI_RES_WR_IQDROPRSS_S 15
+#define FW_RI_RES_WR_IQDROPRSS_M 0x1
+#define FW_RI_RES_WR_IQDROPRSS_V(x) ((x) << FW_RI_RES_WR_IQDROPRSS_S)
+#define FW_RI_RES_WR_IQDROPRSS_G(x) \
+ (((x) >> FW_RI_RES_WR_IQDROPRSS_S) & FW_RI_RES_WR_IQDROPRSS_M)
+#define FW_RI_RES_WR_IQDROPRSS_F FW_RI_RES_WR_IQDROPRSS_V(1U)
+
+#define FW_RI_RES_WR_IQGTSMODE_S 14
+#define FW_RI_RES_WR_IQGTSMODE_M 0x1
+#define FW_RI_RES_WR_IQGTSMODE_V(x) ((x) << FW_RI_RES_WR_IQGTSMODE_S)
+#define FW_RI_RES_WR_IQGTSMODE_G(x) \
+ (((x) >> FW_RI_RES_WR_IQGTSMODE_S) & FW_RI_RES_WR_IQGTSMODE_M)
+#define FW_RI_RES_WR_IQGTSMODE_F FW_RI_RES_WR_IQGTSMODE_V(1U)
+
+#define FW_RI_RES_WR_IQPCIECH_S 12
+#define FW_RI_RES_WR_IQPCIECH_M 0x3
+#define FW_RI_RES_WR_IQPCIECH_V(x) ((x) << FW_RI_RES_WR_IQPCIECH_S)
+#define FW_RI_RES_WR_IQPCIECH_G(x) \
+ (((x) >> FW_RI_RES_WR_IQPCIECH_S) & FW_RI_RES_WR_IQPCIECH_M)
+
+#define FW_RI_RES_WR_IQDCAEN_S 11
+#define FW_RI_RES_WR_IQDCAEN_M 0x1
+#define FW_RI_RES_WR_IQDCAEN_V(x) ((x) << FW_RI_RES_WR_IQDCAEN_S)
+#define FW_RI_RES_WR_IQDCAEN_G(x) \
+ (((x) >> FW_RI_RES_WR_IQDCAEN_S) & FW_RI_RES_WR_IQDCAEN_M)
+#define FW_RI_RES_WR_IQDCAEN_F FW_RI_RES_WR_IQDCAEN_V(1U)
+
+#define FW_RI_RES_WR_IQDCACPU_S 6
+#define FW_RI_RES_WR_IQDCACPU_M 0x1f
+#define FW_RI_RES_WR_IQDCACPU_V(x) ((x) << FW_RI_RES_WR_IQDCACPU_S)
+#define FW_RI_RES_WR_IQDCACPU_G(x) \
+ (((x) >> FW_RI_RES_WR_IQDCACPU_S) & FW_RI_RES_WR_IQDCACPU_M)
+
+#define FW_RI_RES_WR_IQINTCNTTHRESH_S 4
+#define FW_RI_RES_WR_IQINTCNTTHRESH_M 0x3
+#define FW_RI_RES_WR_IQINTCNTTHRESH_V(x) \
+ ((x) << FW_RI_RES_WR_IQINTCNTTHRESH_S)
+#define FW_RI_RES_WR_IQINTCNTTHRESH_G(x) \
+ (((x) >> FW_RI_RES_WR_IQINTCNTTHRESH_S) & FW_RI_RES_WR_IQINTCNTTHRESH_M)
+
+#define FW_RI_RES_WR_IQO_S 3
+#define FW_RI_RES_WR_IQO_M 0x1
+#define FW_RI_RES_WR_IQO_V(x) ((x) << FW_RI_RES_WR_IQO_S)
+#define FW_RI_RES_WR_IQO_G(x) \
+ (((x) >> FW_RI_RES_WR_IQO_S) & FW_RI_RES_WR_IQO_M)
+#define FW_RI_RES_WR_IQO_F FW_RI_RES_WR_IQO_V(1U)
+
+#define FW_RI_RES_WR_IQCPRIO_S 2
+#define FW_RI_RES_WR_IQCPRIO_M 0x1
+#define FW_RI_RES_WR_IQCPRIO_V(x) ((x) << FW_RI_RES_WR_IQCPRIO_S)
+#define FW_RI_RES_WR_IQCPRIO_G(x) \
+ (((x) >> FW_RI_RES_WR_IQCPRIO_S) & FW_RI_RES_WR_IQCPRIO_M)
+#define FW_RI_RES_WR_IQCPRIO_F FW_RI_RES_WR_IQCPRIO_V(1U)
+
+#define FW_RI_RES_WR_IQESIZE_S 0
+#define FW_RI_RES_WR_IQESIZE_M 0x3
+#define FW_RI_RES_WR_IQESIZE_V(x) ((x) << FW_RI_RES_WR_IQESIZE_S)
+#define FW_RI_RES_WR_IQESIZE_G(x) \
+ (((x) >> FW_RI_RES_WR_IQESIZE_S) & FW_RI_RES_WR_IQESIZE_M)
+
+#define FW_RI_RES_WR_IQNS_S 31
+#define FW_RI_RES_WR_IQNS_M 0x1
+#define FW_RI_RES_WR_IQNS_V(x) ((x) << FW_RI_RES_WR_IQNS_S)
+#define FW_RI_RES_WR_IQNS_G(x) \
+ (((x) >> FW_RI_RES_WR_IQNS_S) & FW_RI_RES_WR_IQNS_M)
+#define FW_RI_RES_WR_IQNS_F FW_RI_RES_WR_IQNS_V(1U)
+
+#define FW_RI_RES_WR_IQRO_S 30
+#define FW_RI_RES_WR_IQRO_M 0x1
+#define FW_RI_RES_WR_IQRO_V(x) ((x) << FW_RI_RES_WR_IQRO_S)
+#define FW_RI_RES_WR_IQRO_G(x) \
+ (((x) >> FW_RI_RES_WR_IQRO_S) & FW_RI_RES_WR_IQRO_M)
+#define FW_RI_RES_WR_IQRO_F FW_RI_RES_WR_IQRO_V(1U)
struct fw_ri_rdma_write_wr {
__u8 opcode;
@@ -562,11 +562,11 @@ struct fw_ri_send_wr {
#endif
};
-#define S_FW_RI_SEND_WR_SENDOP 0
-#define M_FW_RI_SEND_WR_SENDOP 0xf
-#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
-#define G_FW_RI_SEND_WR_SENDOP(x) \
- (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
+#define FW_RI_SEND_WR_SENDOP_S 0
+#define FW_RI_SEND_WR_SENDOP_M 0xf
+#define FW_RI_SEND_WR_SENDOP_V(x) ((x) << FW_RI_SEND_WR_SENDOP_S)
+#define FW_RI_SEND_WR_SENDOP_G(x) \
+ (((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M)
struct fw_ri_rdma_read_wr {
__u8 opcode;
@@ -612,25 +612,25 @@ struct fw_ri_bind_mw_wr {
__be64 r4;
};
-#define S_FW_RI_BIND_MW_WR_QPBINDE 6
-#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
-#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
-#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
- (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
-#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
+#define FW_RI_BIND_MW_WR_QPBINDE_S 6
+#define FW_RI_BIND_MW_WR_QPBINDE_M 0x1
+#define FW_RI_BIND_MW_WR_QPBINDE_V(x) ((x) << FW_RI_BIND_MW_WR_QPBINDE_S)
+#define FW_RI_BIND_MW_WR_QPBINDE_G(x) \
+ (((x) >> FW_RI_BIND_MW_WR_QPBINDE_S) & FW_RI_BIND_MW_WR_QPBINDE_M)
+#define FW_RI_BIND_MW_WR_QPBINDE_F FW_RI_BIND_MW_WR_QPBINDE_V(1U)
-#define S_FW_RI_BIND_MW_WR_NS 5
-#define M_FW_RI_BIND_MW_WR_NS 0x1
-#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
-#define G_FW_RI_BIND_MW_WR_NS(x) \
- (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
-#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
+#define FW_RI_BIND_MW_WR_NS_S 5
+#define FW_RI_BIND_MW_WR_NS_M 0x1
+#define FW_RI_BIND_MW_WR_NS_V(x) ((x) << FW_RI_BIND_MW_WR_NS_S)
+#define FW_RI_BIND_MW_WR_NS_G(x) \
+ (((x) >> FW_RI_BIND_MW_WR_NS_S) & FW_RI_BIND_MW_WR_NS_M)
+#define FW_RI_BIND_MW_WR_NS_F FW_RI_BIND_MW_WR_NS_V(1U)
-#define S_FW_RI_BIND_MW_WR_DCACPU 0
-#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
-#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
-#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
- (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
+#define FW_RI_BIND_MW_WR_DCACPU_S 0
+#define FW_RI_BIND_MW_WR_DCACPU_M 0x1f
+#define FW_RI_BIND_MW_WR_DCACPU_V(x) ((x) << FW_RI_BIND_MW_WR_DCACPU_S)
+#define FW_RI_BIND_MW_WR_DCACPU_G(x) \
+ (((x) >> FW_RI_BIND_MW_WR_DCACPU_S) & FW_RI_BIND_MW_WR_DCACPU_M)
struct fw_ri_fr_nsmr_wr {
__u8 opcode;
@@ -649,25 +649,25 @@ struct fw_ri_fr_nsmr_wr {
__be32 va_lo_fbo;
};
-#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
-#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
-#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
-#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
- (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
-#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
+#define FW_RI_FR_NSMR_WR_QPBINDE_S 6
+#define FW_RI_FR_NSMR_WR_QPBINDE_M 0x1
+#define FW_RI_FR_NSMR_WR_QPBINDE_V(x) ((x) << FW_RI_FR_NSMR_WR_QPBINDE_S)
+#define FW_RI_FR_NSMR_WR_QPBINDE_G(x) \
+ (((x) >> FW_RI_FR_NSMR_WR_QPBINDE_S) & FW_RI_FR_NSMR_WR_QPBINDE_M)
+#define FW_RI_FR_NSMR_WR_QPBINDE_F FW_RI_FR_NSMR_WR_QPBINDE_V(1U)
-#define S_FW_RI_FR_NSMR_WR_NS 5
-#define M_FW_RI_FR_NSMR_WR_NS 0x1
-#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
-#define G_FW_RI_FR_NSMR_WR_NS(x) \
- (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
-#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
+#define FW_RI_FR_NSMR_WR_NS_S 5
+#define FW_RI_FR_NSMR_WR_NS_M 0x1
+#define FW_RI_FR_NSMR_WR_NS_V(x) ((x) << FW_RI_FR_NSMR_WR_NS_S)
+#define FW_RI_FR_NSMR_WR_NS_G(x) \
+ (((x) >> FW_RI_FR_NSMR_WR_NS_S) & FW_RI_FR_NSMR_WR_NS_M)
+#define FW_RI_FR_NSMR_WR_NS_F FW_RI_FR_NSMR_WR_NS_V(1U)
-#define S_FW_RI_FR_NSMR_WR_DCACPU 0
-#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
-#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
-#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
- (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
+#define FW_RI_FR_NSMR_WR_DCACPU_S 0
+#define FW_RI_FR_NSMR_WR_DCACPU_M 0x1f
+#define FW_RI_FR_NSMR_WR_DCACPU_V(x) ((x) << FW_RI_FR_NSMR_WR_DCACPU_S)
+#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \
+ (((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
struct fw_ri_inv_lstag_wr {
__u8 opcode;
@@ -740,18 +740,18 @@ struct fw_ri_wr {
} u;
};
-#define S_FW_RI_WR_MPAREQBIT 7
-#define M_FW_RI_WR_MPAREQBIT 0x1
-#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
-#define G_FW_RI_WR_MPAREQBIT(x) \
- (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
-#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
+#define FW_RI_WR_MPAREQBIT_S 7
+#define FW_RI_WR_MPAREQBIT_M 0x1
+#define FW_RI_WR_MPAREQBIT_V(x) ((x) << FW_RI_WR_MPAREQBIT_S)
+#define FW_RI_WR_MPAREQBIT_G(x) \
+ (((x) >> FW_RI_WR_MPAREQBIT_S) & FW_RI_WR_MPAREQBIT_M)
+#define FW_RI_WR_MPAREQBIT_F FW_RI_WR_MPAREQBIT_V(1U)
-#define S_FW_RI_WR_P2PTYPE 0
-#define M_FW_RI_WR_P2PTYPE 0xf
-#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
-#define G_FW_RI_WR_P2PTYPE(x) \
- (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+#define FW_RI_WR_P2PTYPE_S 0
+#define FW_RI_WR_P2PTYPE_M 0xf
+#define FW_RI_WR_P2PTYPE_V(x) ((x) << FW_RI_WR_P2PTYPE_S)
+#define FW_RI_WR_P2PTYPE_G(x) \
+ (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
struct tcp_options {
__be16 mss;
@@ -783,58 +783,58 @@ struct cpl_pass_accept_req {
};
/* cpl_pass_accept_req.hdr_len fields */
-#define S_SYN_RX_CHAN 0
-#define M_SYN_RX_CHAN 0xF
-#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
-#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
-
-#define S_TCP_HDR_LEN 10
-#define M_TCP_HDR_LEN 0x3F
-#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
-#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
-
-#define S_IP_HDR_LEN 16
-#define M_IP_HDR_LEN 0x3FF
-#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
-#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
-
-#define S_ETH_HDR_LEN 26
-#define M_ETH_HDR_LEN 0x1F
-#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
-#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
+#define SYN_RX_CHAN_S 0
+#define SYN_RX_CHAN_M 0xF
+#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
+#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
+
+#define TCP_HDR_LEN_S 10
+#define TCP_HDR_LEN_M 0x3F
+#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
+#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
+
+#define IP_HDR_LEN_S 16
+#define IP_HDR_LEN_M 0x3FF
+#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
+#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
+
+#define ETH_HDR_LEN_S 26
+#define ETH_HDR_LEN_M 0x1F
+#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
+#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
/* cpl_pass_accept_req.l2info fields */
-#define S_SYN_MAC_IDX 0
-#define M_SYN_MAC_IDX 0x1FF
-#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
-#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
+#define SYN_MAC_IDX_S 0
+#define SYN_MAC_IDX_M 0x1FF
+#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
+#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
-#define S_SYN_XACT_MATCH 9
-#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
-#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
+#define SYN_XACT_MATCH_S 9
+#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
+#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
-#define S_SYN_INTF 12
-#define M_SYN_INTF 0xF
-#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
-#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+#define SYN_INTF_S 12
+#define SYN_INTF_M 0xF
+#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
+#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
struct ulptx_idata {
__be32 cmd_more;
__be32 len;
};
-#define S_ULPTX_NSGE 0
-#define M_ULPTX_NSGE 0xFFFF
-#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+#define ULPTX_NSGE_S 0
+#define ULPTX_NSGE_M 0xFFFF
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
-#define S_RX_DACK_MODE 29
-#define M_RX_DACK_MODE 0x3
-#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
-#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+#define RX_DACK_MODE_S 29
+#define RX_DACK_MODE_M 0x3
+#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
+#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
-#define S_RX_DACK_CHANGE 31
-#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
-#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+#define RX_DACK_CHANGE_S 31
+#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
+#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
enum { /* TCP congestion control algorithms */
CONG_ALG_RENO,
@@ -843,10 +843,10 @@ enum { /* TCP congestion control algorithms */
CONG_ALG_HIGHSPEED
};
-#define S_CONG_CNTRL 14
-#define M_CONG_CNTRL 0x3
-#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
-#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+#define CONG_CNTRL_S 14
+#define CONG_CNTRL_M 0x3
+#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
+#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
#define CONG_CNTRL_VALID (1 << 18)
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 2d8c3397774f..f50a546224ad 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <linux/inet.h>
#include <linux/string.h>
+#include <linux/mlx4/driver.h>
#include "mlx4_ib.h"
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 0eb141c41416..a31e031afd87 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
continue;
slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
- if (slave_id >= dev->dev->num_vfs + 1)
+ if (slave_id >= dev->dev->persist->num_vfs + 1)
return;
tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
form_cache_ag = get_cached_alias_guid(dev, port_num,
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a3b70f6c4035..543ecdd8667b 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -188,6 +188,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
spin_lock_init(&cq->lock);
cq->resize_buf = NULL;
cq->resize_umem = NULL;
+ INIT_LIST_HEAD(&cq->send_qp_list);
+ INIT_LIST_HEAD(&cq->recv_qp_list);
if (context) {
struct mlx4_ib_create_cq ucmd;
@@ -594,6 +596,55 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
return 0;
}
+static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
+ struct ib_wc *wc, int *npolled, int is_send)
+{
+ struct mlx4_ib_wq *wq;
+ unsigned cur;
+ int i;
+
+ wq = is_send ? &qp->sq : &qp->rq;
+ cur = wq->head - wq->tail;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && *npolled < num_entries; i++) {
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ (*npolled)++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ }
+}
+
+static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx4_ib_qp *qp;
+
+ *npolled = 0;
+ /* Find uncompleted WQEs belonging to that cq and retrun
+ * simulated FLUSH_ERR completions
+ */
+ list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
+ mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
+ if (*npolled >= num_entries)
+ goto out;
+ }
+
+ list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
+ mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
+ if (*npolled >= num_entries)
+ goto out;
+ }
+
+out:
+ return;
+}
+
static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -836,8 +887,13 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
unsigned long flags;
int npolled;
int err = 0;
+ struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
spin_lock_irqsave(&cq->lock, flags);
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+ goto out;
+ }
for (npolled = 0; npolled < num_entries; ++npolled) {
err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
@@ -847,6 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
mlx4_cq_set_ci(&cq->mcq);
+out:
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 82a7dd87089b..c7619716c31d 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
ctx->ib_dev = &dev->ib_dev;
for (i = 0;
- i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
+ i < min(dev->dev->caps.sqp_demux,
+ (u16)(dev->dev->persist->num_vfs + 1));
i++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev->dev, i);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 57ecc5b204f3..eb8e215f1613 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
- props->vendor_part_id = dev->dev->pdev->device;
+ props->vendor_part_id = dev->dev->persist->pdev->device;
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
@@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
enum ib_mtu tmp;
struct mlx4_cmd_mailbox *mailbox;
int err = 0;
+ int is_bonded = mlx4_is_bonded(mdev->dev);
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->state = IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state);
props->active_mtu = IB_MTU_256;
+ if (is_bonded)
+ rtnl_lock(); /* required to get upper dev */
spin_lock_bh(&iboe->lock);
ndev = iboe->netdevs[port - 1];
+ if (ndev && is_bonded)
+ ndev = netdev_master_upper_dev_get(ndev);
if (!ndev)
goto out_unlock;
@@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->phys_state = state_to_phys_state(props->state);
out_unlock:
spin_unlock_bh(&iboe->lock);
+ if (is_bonded)
+ rtnl_unlock();
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
@@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
struct mlx4_ib_steering {
struct list_head list;
- u64 reg_id;
+ struct mlx4_flow_reg_id reg_id;
union ib_gid gid;
};
@@ -1114,7 +1121,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
struct mlx4_dev *dev = to_mdev(qp->device)->dev;
int err = 0;
- if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+ dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
ib_flow = flow_attr + 1;
@@ -1134,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
{
- int err = 0, i = 0;
+ int err = 0, i = 0, j = 0;
struct mlx4_ib_flow *mflow;
enum mlx4_net_trans_promisc_mode type[2];
+ struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
+ int is_bonded = mlx4_is_bonded(dev);
memset(type, 0, sizeof(type));
@@ -1171,26 +1181,58 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
while (i < ARRAY_SIZE(type) && type[i]) {
err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
- &mflow->reg_id[i]);
+ &mflow->reg_id[i].id);
if (err)
goto err_create_flow;
i++;
+ if (is_bonded) {
+ /* Application always sees one port so the mirror rule
+ * must be on port #2
+ */
+ flow_attr->port = 2;
+ err = __mlx4_ib_create_flow(qp, flow_attr,
+ domain, type[j],
+ &mflow->reg_id[j].mirror);
+ flow_attr->port = 1;
+ if (err)
+ goto err_create_flow;
+ j++;
+ }
+
}
if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+ &mflow->reg_id[i].id);
if (err)
goto err_create_flow;
i++;
+ if (is_bonded) {
+ flow_attr->port = 2;
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+ &mflow->reg_id[j].mirror);
+ flow_attr->port = 1;
+ if (err)
+ goto err_create_flow;
+ j++;
+ }
+ /* function to create mirror rule */
}
return &mflow->ibflow;
err_create_flow:
while (i) {
- (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
+ (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+ mflow->reg_id[i].id);
i--;
}
+
+ while (j) {
+ (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+ mflow->reg_id[j].mirror);
+ j--;
+ }
err_free:
kfree(mflow);
return ERR_PTR(err);
@@ -1203,10 +1245,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
struct mlx4_ib_flow *mflow = to_mflow(flow_id);
- while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
- err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
+ while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
+ err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
if (err)
ret = err;
+ if (mflow->reg_id[i].mirror) {
+ err = __mlx4_ib_destroy_flow(mdev->dev,
+ mflow->reg_id[i].mirror);
+ if (err)
+ ret = err;
+ }
i++;
}
@@ -1218,11 +1266,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_dev *dev = mdev->dev;
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
- u64 reg_id;
struct mlx4_ib_steering *ib_steering = NULL;
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+ struct mlx4_flow_reg_id reg_id;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1234,10 +1283,21 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
!!(mqp->flags &
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
- prot, &reg_id);
+ prot, &reg_id.id);
if (err)
goto err_malloc;
+ reg_id.mirror = 0;
+ if (mlx4_is_bonded(dev)) {
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
+ (mqp->port == 1) ? 2 : 1,
+ !!(mqp->flags &
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+ prot, &reg_id.mirror);
+ if (err)
+ goto err_add;
+ }
+
err = add_gid_entry(ibqp, gid);
if (err)
goto err_add;
@@ -1253,7 +1313,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err_add:
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
- prot, reg_id);
+ prot, reg_id.id);
+ if (reg_id.mirror)
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ prot, reg_id.mirror);
err_malloc:
kfree(ib_steering);
@@ -1280,10 +1343,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_dev *dev = mdev->dev;
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
- u64 reg_id = 0;
+ struct mlx4_flow_reg_id reg_id = {0, 0};
+
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
@@ -1308,10 +1373,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
- prot, reg_id);
+ prot, reg_id.id);
if (err)
return err;
+ if (mlx4_is_bonded(dev)) {
+ err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ prot, reg_id.mirror);
+ if (err)
+ return err;
+ }
+
mutex_lock(&mqp->mutex);
ge = find_gid_entry(mqp, gid->raw);
if (ge) {
@@ -1375,7 +1447,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
- return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
+ return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -1439,6 +1511,7 @@ static void update_gids_task(struct work_struct *work)
union ib_gid *gids;
int err;
struct mlx4_dev *dev = gw->dev->dev;
+ int is_bonded = mlx4_is_bonded(dev);
if (!gw->dev->ib_active)
return;
@@ -1458,7 +1531,10 @@ static void update_gids_task(struct work_struct *work)
if (err)
pr_warn("set port command failed\n");
else
- mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
+ if ((gw->port == 1) || !is_bonded)
+ mlx4_ib_dispatch_event(gw->dev,
+ is_bonded ? 1 : gw->port,
+ IB_EVENT_GID_CHANGE);
mlx4_free_cmd_mailbox(dev, mailbox);
kfree(gw);
@@ -1874,7 +1950,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
* don't want the bond IP based gids in the table since
* flows that select port by gid may get the down port.
*/
- if (port_state == IB_PORT_DOWN) {
+ if (port_state == IB_PORT_DOWN &&
+ !mlx4_is_bonded(ibdev->dev)) {
reset_gid_table(ibdev, port);
mlx4_ib_set_default_gid(ibdev,
curr_netdev,
@@ -1937,7 +2014,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
int i;
if (mlx4_is_master(ibdev->dev)) {
- for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
+ for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
+ ++slave) {
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
for (i = 0;
i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
@@ -1994,7 +2072,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
for (j = 0; j < eq_per_port; j++) {
snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
- i, j, dev->pdev->bus->name);
+ i, j, dev->persist->pdev->bus->name);
/* Set IRQ for specific name (per ring) */
if (mlx4_assign_eq(dev, name, NULL,
&ibdev->eq_table[eq])) {
@@ -2045,6 +2123,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int err;
struct mlx4_ib_iboe *iboe;
int ib_num_ports = 0;
+ int num_req_counters;
pr_info_once("%s", mlx4_ib_version);
@@ -2058,7 +2137,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
if (!ibdev) {
- dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
+ dev_err(&dev->persist->pdev->dev,
+ "Device struct alloc failed\n");
return NULL;
}
@@ -2077,15 +2157,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
ibdev->dev = dev;
+ ibdev->bond_next_port = 0;
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
ibdev->num_ports = num_ports;
- ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
+ ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
+ 1 : ibdev->num_ports;
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
- ibdev->ib_dev.dma_device = &dev->pdev->dev;
+ ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
if (dev->caps.userspace_caps)
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
@@ -2204,7 +2286,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (init_node_data(ibdev))
goto err_map;
- for (i = 0; i < ibdev->num_ports; ++i) {
+ num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
+ for (i = 0; i < num_req_counters; ++i) {
mutex_init(&ibdev->qp1_proxy_lock[i]);
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
@@ -2215,12 +2298,18 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
}
+ if (mlx4_is_bonded(dev))
+ for (i = 1; i < ibdev->num_ports ; ++i)
+ ibdev->counters[i] = ibdev->counters[0];
+
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
+ INIT_LIST_HEAD(&ibdev->qp_list);
+ spin_lock_init(&ibdev->reset_flow_resource_lock);
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
ib_num_ports) {
@@ -2236,7 +2325,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
sizeof(long),
GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap) {
- dev_err(&dev->pdev->dev, "bit map alloc failed\n");
+ dev_err(&dev->persist->pdev->dev,
+ "bit map alloc failed\n");
goto err_steer_qp_release;
}
@@ -2534,6 +2624,99 @@ out:
return;
}
+static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
+{
+ struct mlx4_ib_qp *mqp;
+ unsigned long flags_qp;
+ unsigned long flags_cq;
+ struct mlx4_ib_cq *send_mcq, *recv_mcq;
+ struct list_head cq_notify_list;
+ struct mlx4_cq *mcq;
+ unsigned long flags;
+
+ pr_warn("mlx4_ib_handle_catas_error was started\n");
+ INIT_LIST_HEAD(&cq_notify_list);
+
+ /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+
+ list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+ spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+ if (mqp->sq.tail != mqp->sq.head) {
+ send_mcq = to_mcq(mqp->ibqp.send_cq);
+ spin_lock_irqsave(&send_mcq->lock, flags_cq);
+ if (send_mcq->mcq.comp &&
+ mqp->ibqp.send_cq->comp_handler) {
+ if (!send_mcq->mcq.reset_notify_added) {
+ send_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&send_mcq->mcq.reset_notify,
+ &cq_notify_list);
+ }
+ }
+ spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+ }
+ spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+ /* Now, handle the QP's receive queue */
+ spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+ /* no handling is needed for SRQ */
+ if (!mqp->ibqp.srq) {
+ if (mqp->rq.tail != mqp->rq.head) {
+ recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+ spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+ if (recv_mcq->mcq.comp &&
+ mqp->ibqp.recv_cq->comp_handler) {
+ if (!recv_mcq->mcq.reset_notify_added) {
+ recv_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&recv_mcq->mcq.reset_notify,
+ &cq_notify_list);
+ }
+ }
+ spin_unlock_irqrestore(&recv_mcq->lock,
+ flags_cq);
+ }
+ }
+ spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+ }
+
+ list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
+ mcq->comp(mcq);
+ }
+ spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+ pr_warn("mlx4_ib_handle_catas_error ended\n");
+}
+
+static void handle_bonded_port_state_event(struct work_struct *work)
+{
+ struct ib_event_work *ew =
+ container_of(work, struct ib_event_work, work);
+ struct mlx4_ib_dev *ibdev = ew->ib_dev;
+ enum ib_port_state bonded_port_state = IB_PORT_NOP;
+ int i;
+ struct ib_event ibev;
+
+ kfree(ew);
+ spin_lock_bh(&ibdev->iboe.lock);
+ for (i = 0; i < MLX4_MAX_PORTS; ++i) {
+ struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
+
+ enum ib_port_state curr_port_state =
+ (netif_running(curr_netdev) &&
+ netif_carrier_ok(curr_netdev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+
+ bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
+ curr_port_state : IB_PORT_ACTIVE;
+ }
+ spin_unlock_bh(&ibdev->iboe.lock);
+
+ ibev.device = &ibdev->ib_dev;
+ ibev.element.port_num = 1;
+ ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+
+ ib_dispatch_event(&ibev);
+}
+
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, unsigned long param)
{
@@ -2543,6 +2726,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
struct ib_event_work *ew;
int p = 0;
+ if (mlx4_is_bonded(dev) &&
+ ((event == MLX4_DEV_EVENT_PORT_UP) ||
+ (event == MLX4_DEV_EVENT_PORT_DOWN))) {
+ ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+ if (!ew)
+ return;
+ INIT_WORK(&ew->work, handle_bonded_port_state_event);
+ ew->ib_dev = ibdev;
+ queue_work(wq, &ew->work);
+ return;
+ }
+
if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
eqe = (struct mlx4_eqe *)param;
else
@@ -2569,6 +2764,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx4_ib_handle_catas_error(ibdev);
break;
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
@@ -2603,7 +2799,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
}
ibev.device = ibdev_ptr;
- ibev.element.port_num = (u8) p;
+ ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
ib_dispatch_event(&ibev);
}
@@ -2612,7 +2808,8 @@ static struct mlx4_interface mlx4_ib_interface = {
.add = mlx4_ib_add,
.remove = mlx4_ib_remove,
.event = mlx4_ib_event,
- .protocol = MLX4_PROT_IB_IPV6
+ .protocol = MLX4_PROT_IB_IPV6,
+ .flags = MLX4_INTFF_BONDING
};
static int __init mlx4_ib_init(void)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6eb743f65f6f..f829fd935b79 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -110,6 +110,9 @@ struct mlx4_ib_cq {
struct mutex resize_mutex;
struct ib_umem *umem;
struct ib_umem *resize_umem;
+ /* List of qps that it serves.*/
+ struct list_head send_qp_list;
+ struct list_head recv_qp_list;
};
struct mlx4_ib_mr {
@@ -134,10 +137,17 @@ struct mlx4_ib_fmr {
struct mlx4_fmr mfmr;
};
+#define MAX_REGS_PER_FLOW 2
+
+struct mlx4_flow_reg_id {
+ u64 id;
+ u64 mirror;
+};
+
struct mlx4_ib_flow {
struct ib_flow ibflow;
/* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
- u64 reg_id[2];
+ struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
};
struct mlx4_ib_wq {
@@ -293,6 +303,9 @@ struct mlx4_ib_qp {
struct mlx4_roce_smac_vlan_info pri;
struct mlx4_roce_smac_vlan_info alt;
u64 reg_id;
+ struct list_head qps_list;
+ struct list_head cq_recv_list;
+ struct list_head cq_send_list;
};
struct mlx4_ib_srq {
@@ -527,6 +540,10 @@ struct mlx4_ib_dev {
struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
/* lock when destroying qp1_proxy and getting netdev events */
struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
+ u8 bond_next_port;
+ /* protect resources needed as part of reset flow */
+ spinlock_t reset_flow_resource_lock;
+ struct list_head qp_list;
};
struct ib_event_work {
@@ -622,6 +639,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
return container_of(ibah, struct mlx4_ib_ah, ibah);
}
+static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
+{
+ dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
+
+ return dev->bond_next_port + 1;
+}
+
int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index c36ccbd9a644..e0d271782d0a 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
if (!mfrpl->ibfrpl.page_list)
goto err_free;
- mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
+ mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->
+ pdev->dev,
size, &mfrpl->map,
GFP_KERNEL);
if (!mfrpl->mapped_page_list)
@@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
int size = page_list->max_page_list_len * sizeof (u64);
- dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
+ dma_free_coherent(&dev->dev->persist->pdev->dev, size,
+ mfrpl->mapped_page_list,
mfrpl->map);
kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cf000b7ad64f..dfc6ca128a7e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -40,11 +40,17 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
+#include <linux/mlx4/driver.h>
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
#include "user.h"
+static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
+ struct mlx4_ib_cq *recv_cq);
+static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
+ struct mlx4_ib_cq *recv_cq);
+
enum {
MLX4_IB_ACK_REQ_FREQ = 8,
};
@@ -93,17 +99,6 @@ enum {
#ifndef ETH_ALEN
#define ETH_ALEN 6
#endif
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
@@ -628,6 +623,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct mlx4_ib_sqp *sqp;
struct mlx4_ib_qp *qp;
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
+ struct mlx4_ib_cq *mcq;
+ unsigned long flags;
/* When tunneling special qps, we use a plain UD qp */
if (sqpn) {
@@ -838,6 +835,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->mqp.event = mlx4_ib_qp_event;
if (!*caller_qp)
*caller_qp = qp;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ /* Maintain device to QPs access, needed for further handling
+ * via reset flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling
+ * via reset flow
+ */
+ mcq = to_mcq(init_attr->send_cq);
+ list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
+ mcq = to_mcq(init_attr->recv_cq);
+ list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
+ mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
return 0;
err_qpn:
@@ -896,13 +911,13 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
if (send_cq == recv_cq) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
@@ -912,13 +927,13 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re
{
if (send_cq == recv_cq) {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
}
}
@@ -963,6 +978,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
int is_user)
{
struct mlx4_ib_cq *send_cq, *recv_cq;
+ unsigned long flags;
if (qp->state != IB_QPS_RESET) {
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
@@ -994,8 +1010,13 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
get_cqs(qp, &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(send_cq, recv_cq);
+ /* del from lists under both locks above to protect reset flow paths */
+ list_del(&qp->qps_list);
+ list_del(&qp->cq_send_list);
+ list_del(&qp->cq_recv_list);
if (!is_user) {
__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
@@ -1006,6 +1027,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_qp_remove(dev->dev, &qp->mqp);
mlx4_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
mlx4_qp_free(dev->dev, &qp->mqp);
@@ -1915,6 +1937,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
+ if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
+ if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
+ if ((ibqp->qp_type == IB_QPT_RC) ||
+ (ibqp->qp_type == IB_QPT_UD) ||
+ (ibqp->qp_type == IB_QPT_UC) ||
+ (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
+ (ibqp->qp_type == IB_QPT_XRC_INI)) {
+ attr->port_num = mlx4_ib_bond_next_port(dev);
+ }
+ } else {
+ /* no sense in changing port_num
+ * when ports are bonded */
+ attr_mask &= ~IB_QP_PORT;
+ }
+ }
+
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > dev->num_ports)) {
pr_debug("qpn 0x%x: invalid port number (%d) specified "
@@ -1965,6 +2003,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
+ if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
+ attr->port_num = 1;
+
out:
mutex_unlock(&qp->mutex);
return err;
@@ -2609,8 +2650,15 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 uninitialized_var(lso_hdr_sz);
__be32 blh;
int i;
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
spin_lock_irqsave(&qp->sq.lock, flags);
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
ind = qp->sq_next_wqe;
@@ -2908,10 +2956,18 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
int ind;
int max_gs;
int i;
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
max_gs = qp->rq.max_gs;
spin_lock_irqsave(&qp->rq.lock, flags);
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 62d9285300af..dce5dfe3a70e 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -316,8 +316,15 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
int err = 0;
int nreq;
int i;
+ struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
spin_lock_irqsave(&srq->lock, flags);
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
@@ -362,6 +369,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
*srq->db.db = cpu_to_be32(srq->wqe_ctr);
}
+out:
spin_unlock_irqrestore(&srq->lock, flags);
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index cb4c66e723b5..d10c2b8a5dad 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
char base_name[9];
/* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
- strlcpy(name, pci_name(dev->dev->pdev), max);
+ strlcpy(name, pci_name(dev->dev->persist->pdev), max);
strncpy(base_name, name, 8); /*till xxxx:yy:*/
base_name[8] = '\0';
/* with no ARI only 3 last bits are used so when the fn is higher than 8
@@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device)
if (!mlx4_is_master(device->dev))
return 0;
- for (i = 0; i <= device->dev->num_vfs; ++i)
+ for (i = 0; i <= device->dev->persist->num_vfs; ++i)
register_one_pkey_tree(device, i);
return 0;
@@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
if (!mlx4_is_master(device->dev))
return;
- for (slave = device->dev->num_vfs; slave >= 0; --slave) {
+ for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
list_for_each_entry_safe(p, t,
&device->pkeys.pkey_port_list[slave],
entry) {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8a87404e9c76..03bf81211a54 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
(1ull << IB_USER_VERBS_CMD_OPEN_QP);
- dev->ib_dev.uverbs_ex_cmd_mask =
- (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b56e4c5593ee..611a9fdf2f38 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (unsigned long)pfn;
- m = min(m, find_first_bit(&tmp, sizeof(tmp)));
+ m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
skip = 1 << m;
mask = skip - 1;
base = pfn;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 49eb5111d2cd..70acda91eb2a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
- netdev->name, vlan_tx_tag_get(skb));
+ netdev->name, skb_vlan_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
- wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
} else
wqe_misc = 0;
@@ -576,11 +576,12 @@ tso_sq_no_longer_full:
wqe_fragment_length =
(__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
- netdev->name, vlan_tx_tag_get(skb) );
+ netdev->name,
+ skb_vlan_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
- wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
} else
wqe_misc = 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 8ba80a6d3a46..d7562beb5423 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -98,15 +98,9 @@ enum {
IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
IPOIB_MCAST_FLAG_SENDONLY = 1,
- /*
- * For IPOIB_MCAST_FLAG_BUSY
- * When set, in flight join and mcast->mc is unreliable
- * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
- * haven't started yet
- * When clear and mcast->mc is valid pointer, join was successful
- */
- IPOIB_MCAST_FLAG_BUSY = 2,
+ IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
IPOIB_MCAST_FLAG_ATTACHED = 3,
+ IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -323,7 +317,6 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
- struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
struct work_struct flush_light;
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
-int ipoib_ib_dev_open(struct net_device *dev);
+int ipoib_ib_dev_open(struct net_device *dev, int flush);
int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev);
-int ipoib_ib_dev_stop(struct net_device *dev);
+int ipoib_ib_dev_down(struct net_device *dev, int flush);
+int ipoib_ib_dev_stop(struct net_device *dev, int flush);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev);
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959adb6c7d..933efcea0d03 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
}
spin_lock_irq(&priv->lock);
- queue_delayed_work(priv->wq,
+ queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
- queue_work(priv->wq, &priv->cm.rx_reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
- queue_work(priv->wq, &priv->cm.rx_reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
}
return;
}
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(priv->wq, &priv->cm.reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(priv->wq, &priv->cm.reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
- queue_work(priv->wq, &priv->cm.start_task);
+ queue_work(ipoib_workqueue, &priv->cm.start_task);
return tx;
}
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(priv->wq, &priv->cm.reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4);
tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
- queue_work(priv->wq, &priv->cm.skb_task);
+ queue_work(ipoib_workqueue, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
}
if (!list_empty(&priv->cm.passive_ids))
- queue_delayed_work(priv->wq,
+ queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fe65abb5150c..72626c348174 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
__ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
- queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
drain_tx_cq((struct net_device *)ctx);
}
-int ipoib_ib_dev_open(struct net_device *dev)
+int ipoib_ib_dev_open(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev);
+ ipoib_ib_dev_stop(dev, flush);
return -1;
}
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
return ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev)
+int ipoib_ib_dev_down(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
- ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_stop_thread(dev, flush);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
local_bh_enable();
}
-int ipoib_ib_dev_stop(struct net_device *dev)
+int ipoib_ib_dev_stop(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
@@ -880,7 +880,8 @@ timeout:
/* Wait for all AHs to be reaped */
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
- flush_workqueue(priv->wq);
+ if (flush)
+ flush_workqueue(ipoib_workqueue);
begin = jiffies;
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev);
if (dev->flags & IFF_UP) {
- if (ipoib_ib_dev_open(dev)) {
+ if (ipoib_ib_dev_open(dev, 1)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level >= IPOIB_FLUSH_NORMAL)
- ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_down(dev, 0);
if (level == IPOIB_FLUSH_HEAVY) {
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- ipoib_ib_dev_stop(dev);
- if (ipoib_ib_dev_open(dev) != 0)
+ ipoib_ib_dev_stop(dev, 0);
+ if (ipoib_ib_dev_open(dev, 0) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
*/
ipoib_flush_paths(dev);
- ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_stop_thread(dev, 1);
ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6bad17d4d588..58b5aa3b6f2d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- if (ipoib_ib_dev_open(dev)) {
+ if (ipoib_ib_dev_open(dev, 1)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
return 0;
err_stop:
- ipoib_ib_dev_stop(dev);
+ ipoib_ib_dev_stop(dev, 1);
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev);
- ipoib_ib_dev_stop(dev);
+ ipoib_ib_dev_down(dev, 1);
+ ipoib_ib_dev_stop(dev, 0);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
return;
}
- queue_work(priv->wq, &priv->restart_task);
+ queue_work(ipoib_workqueue, &priv->restart_task);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
arp_tbl.gc_interval);
}
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
/* start garbage collection */
clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
arp_tbl.gc_interval);
return 0;
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ if (ipoib_neigh_hash_init(priv) < 0)
+ goto out;
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
- goto out;
+ goto out_neigh_hash_cleanup;
}
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_init(dev, ca, port))
goto out_tx_ring_cleanup;
- /*
- * Must be after ipoib_ib_dev_init so we can allocate a per
- * device wq there and use it here
- */
- if (ipoib_neigh_hash_init(priv) < 0)
- goto out_dev_uninit;
-
return 0;
-out_dev_uninit:
- ipoib_ib_dev_cleanup(dev);
-
out_tx_ring_cleanup:
vfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
+out_neigh_hash_cleanup:
+ ipoib_neigh_hash_uninit(dev);
out:
return -ENOMEM;
}
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
}
unregister_netdevice_many(&head);
- /*
- * Must be before ipoib_ib_dev_cleanup or we delete an in use
- * work queue
- */
- ipoib_neigh_hash_uninit(dev);
-
ipoib_ib_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->rx_ring = NULL;
priv->tx_ring = NULL;
+
+ ipoib_neigh_hash_uninit(dev);
}
static const struct header_ops ipoib_header_ops = {
@@ -1646,7 +1636,7 @@ register_failed:
/* Stop GC if started before flush */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
+ flush_workqueue(ipoib_workqueue);
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device)
/* Stop GC */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
+ flush_workqueue(ipoib_workqueue);
unregister_netdev(priv->dev);
free_netdev(priv->dev);
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void)
* unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device
* removal.
- *
- * In addition, bringing one device up and another down at the
- * same time can deadlock a single workqueue, so we have this
- * global fallback workqueue, but we also attempt to open a
- * per device workqueue each time we bring an interface up
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
+ ipoib_workqueue = create_singlethread_workqueue("ipoib");
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index bc50dd0d0e4d..ffb83b5f7e80 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
spin_unlock_irq(&priv->lock);
priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
set_qkey = 1;
+
+ if (!ipoib_cm_admin_enabled(dev)) {
+ rtnl_lock();
+ dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+ rtnl_unlock();
+ }
}
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status,
struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev;
- /*
- * We have to take the mutex to force mcast_sendonly_join to
- * return from ib_sa_multicast_join and set mcast->mc to a
- * valid value. Otherwise we were racing with ourselves in
- * that we might fail here, but get a valid return from
- * ib_sa_multicast_join after we had cleared mcast->mc here,
- * resulting in mis-matched joins and leaves and a deadlock
- */
- mutex_lock(&mcast_mutex);
-
/* We trap for port events ourselves. */
if (status == -ENETRESET)
- goto out;
+ return 0;
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (status) {
if (mcast->logcount++ < 20)
- ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast "
- "join failed for %pI6, status %d\n",
+ ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
mcast->mcmember.mgid.raw, status);
/* Flush out any queued packets */
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status,
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
netif_tx_unlock_bh(dev);
+
+ /* Clear the busy flag so we try again */
+ status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
+ &mcast->flags);
}
-out:
- clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- if (status)
- mcast->mc = NULL;
- complete(&mcast->done);
- if (status == -ENETRESET)
- status = 0;
- mutex_unlock(&mcast_mutex);
return status;
}
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
int ret = 0;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- ipoib_dbg_mcast(priv, "device shutting down, no sendonly "
- "multicast joins\n");
+ ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
return -ENODEV;
}
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
- ipoib_dbg_mcast(priv, "multicast entry busy, skipping "
- "sendonly join\n");
+ if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
return -EBUSY;
}
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
rec.port_gid = priv->local_gid;
rec.pkey = cpu_to_be16(priv->pkey);
- mutex_lock(&mcast_mutex);
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
priv->port, &rec,
IB_SA_MCMEMBER_REC_MGID |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
if (IS_ERR(mcast->mc)) {
ret = PTR_ERR(mcast->mc);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- complete(&mcast->done);
- ipoib_warn(priv, "ib_sa_join_multicast for sendonly join "
- "failed (ret = %d)\n", ret);
+ ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
+ ret);
} else {
- ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting "
- "sendonly join\n", mcast->mcmember.mgid.raw);
+ ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
+ mcast->mcmember.mgid.raw);
}
- mutex_unlock(&mcast_mutex);
return ret;
}
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
carrier_on_task);
struct ib_port_attr attr;
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed.
+ */
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
- /*
- * Take rtnl_lock to avoid racing with ipoib_stop() and
- * turning the carrier back on while a device is being
- * removed. However, ipoib_stop() will attempt to flush
- * the workqueue while holding the rtnl lock, so loop
- * on trylock until either we get the lock or we see
- * FLAG_ADMIN_UP go away as that signals that we are bailing
- * and can safely ignore the carrier on work.
- */
- while (!rtnl_trylock()) {
- if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- return;
- else
- msleep(20);
- }
- if (!ipoib_cm_admin_enabled(priv->dev))
- dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
+ rtnl_lock();
netif_carrier_on(priv->dev);
rtnl_unlock();
}
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status,
ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
mcast->mcmember.mgid.raw, status);
- /*
- * We have to take the mutex to force mcast_join to
- * return from ib_sa_multicast_join and set mcast->mc to a
- * valid value. Otherwise we were racing with ourselves in
- * that we might fail here, but get a valid return from
- * ib_sa_multicast_join after we had cleared mcast->mc here,
- * resulting in mis-matched joins and leaves and a deadlock
- */
- mutex_lock(&mcast_mutex);
-
/* We trap for port events ourselves. */
- if (status == -ENETRESET)
+ if (status == -ENETRESET) {
+ status = 0;
goto out;
+ }
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (!status) {
mcast->backoff = 1;
+ mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task, 0);
+ mutex_unlock(&mcast_mutex);
/*
- * Defer carrier on work to priv->wq to avoid a
+ * Defer carrier on work to ipoib_workqueue to avoid a
* deadlock on rtnl_lock here.
*/
if (mcast == priv->broadcast)
- queue_work(priv->wq, &priv->carrier_on_task);
- } else {
- if (mcast->logcount++ < 20) {
- if (status == -ETIMEDOUT || status == -EAGAIN) {
- ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
- } else {
- ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
- }
- }
+ queue_work(ipoib_workqueue, &priv->carrier_on_task);
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ status = 0;
+ goto out;
}
-out:
+
+ if (mcast->logcount++ < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN) {
+ ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ }
+ }
+
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+
+ /* Clear the busy flag so we try again */
+ status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
+ mutex_lock(&mcast_mutex);
spin_lock_irq(&priv->lock);
- clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- if (status)
- mcast->mc = NULL;
- complete(&mcast->done);
- if (status == -ENETRESET)
- status = 0;
- if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task,
+ if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
mcast->backoff * HZ);
spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex);
-
+out:
+ complete(&mcast->done);
return status;
}
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
}
- mutex_lock(&mcast_mutex);
- init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
+
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task,
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task,
mcast->backoff * HZ);
+ mutex_unlock(&mcast_mutex);
}
- mutex_unlock(&mcast_mutex);
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
ipoib_warn(priv, "failed to allocate broadcast group\n");
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task,
- HZ);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task, HZ);
mutex_unlock(&mcast_mutex);
return;
}
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
}
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
- if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
- !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+ if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
ipoib_mcast_join(dev, priv->broadcast, 0);
return;
}
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
while (1) {
struct ipoib_mcast *mcast = NULL;
- /*
- * Need the mutex so our flags are consistent, need the
- * priv->lock so we don't race with list removals in either
- * mcast_dev_flush or mcast_restart_task
- */
- mutex_lock(&mcast_mutex);
spin_lock_irq(&priv->lock);
list_for_each_entry(mcast, &priv->multicast_list, list) {
- if (IS_ERR_OR_NULL(mcast->mc) &&
- !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
- !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
+ && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
+ && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
/* Found the next unjoined group */
break;
}
}
spin_unlock_irq(&priv->lock);
- mutex_unlock(&mcast_mutex);
if (&mcast->list == &priv->multicast_list) {
/* All done */
break;
}
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- ipoib_mcast_sendonly_join(mcast);
- else
- ipoib_mcast_join(dev, mcast, 1);
+ ipoib_mcast_join(dev, mcast, 1);
return;
}
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev)
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
cancel_delayed_work(&priv->mcast_task);
mutex_unlock(&mcast_mutex);
- flush_workqueue(priv->wq);
+ if (flush)
+ flush_workqueue(ipoib_workqueue);
return 0;
}
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
-
- if (!IS_ERR_OR_NULL(mcast->mc))
ib_sa_free_multicast(mcast->mc);
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
- if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task, 0);
}
if (!mcast->ah) {
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
ipoib_dbg_mcast(priv, "no address vector, "
"but multicast join already started\n");
+ else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_mcast_sendonly_join(mcast);
/*
* If lookup completes between here and out:, don't
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
+ /* seperate between the wait to the leave*/
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
+ ipoib_mcast_stop_thread(dev, 0);
+
local_irq_save(flags);
netif_addr_lock(dev);
spin_lock(&priv->lock);
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
- /*
- * We have to cancel outside of the spinlock, but we have to
- * take the rtnl lock or else we race with the removal of
- * entries from the remove list in mcast_dev_flush as part
- * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
- * to signal that we have hit this particular race, and we
- * return since we know we don't need to do anything else
- * anyway.
- */
- while (!rtnl_trylock()) {
- if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- return;
- else
- msleep(20);
- }
+ /* We have to cancel outside of the spinlock */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
- /*
- * Restart our join task if needed
- */
- ipoib_mcast_start_thread(dev);
- rtnl_unlock();
+
+ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ ipoib_mcast_start_thread(dev);
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b72a753eb41d..c56d5d44c53b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size;
int i;
- /*
- * the various IPoIB tasks assume they will never race against
- * themselves, so always use a single thread workqueue
- */
- priv->wq = create_singlethread_workqueue("ipoib_wq");
- if (!priv->wq) {
- printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
- return -ENODEV;
- }
-
priv->pd = ib_alloc_pd(priv->ca);
if (IS_ERR(priv->pd)) {
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
- goto out_free_wq;
+ return -ENODEV;
}
priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
out_free_pd:
ib_dealloc_pd(priv->pd);
-
-out_free_wq:
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
return -ENODEV;
}
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
if (ib_dealloc_pd(priv->pd))
ipoib_warn(priv, "ib_dealloc_pd failed\n");
-
- if (priv->wq) {
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
- }
}
void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 18d4b2c8fe55..a18f41b89b6a 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -62,26 +62,6 @@ struct evdev_client {
struct input_event buffer[];
};
-static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
-{
- switch (clkid) {
-
- case CLOCK_REALTIME:
- client->clk_type = EV_CLK_REAL;
- break;
- case CLOCK_MONOTONIC:
- client->clk_type = EV_CLK_MONO;
- break;
- case CLOCK_BOOTTIME:
- client->clk_type = EV_CLK_BOOT;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
/* flush queued events of type @type, caller must hold client->buffer_lock */
static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
{
@@ -128,10 +108,8 @@ static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
client->head = head;
}
-/* queue SYN_DROPPED event */
-static void evdev_queue_syn_dropped(struct evdev_client *client)
+static void __evdev_queue_syn_dropped(struct evdev_client *client)
{
- unsigned long flags;
struct input_event ev;
ktime_t time;
@@ -146,8 +124,6 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
ev.code = SYN_DROPPED;
ev.value = 0;
- spin_lock_irqsave(&client->buffer_lock, flags);
-
client->buffer[client->head++] = ev;
client->head &= client->bufsize - 1;
@@ -156,8 +132,53 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
client->tail = (client->head - 1) & (client->bufsize - 1);
client->packet_head = client->tail;
}
+}
+
+static void evdev_queue_syn_dropped(struct evdev_client *client)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&client->buffer_lock, flags);
+ __evdev_queue_syn_dropped(client);
+ spin_unlock_irqrestore(&client->buffer_lock, flags);
+}
+
+static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
+{
+ unsigned long flags;
+
+ if (client->clk_type == clkid)
+ return 0;
+
+ switch (clkid) {
+
+ case CLOCK_REALTIME:
+ client->clk_type = EV_CLK_REAL;
+ break;
+ case CLOCK_MONOTONIC:
+ client->clk_type = EV_CLK_MONO;
+ break;
+ case CLOCK_BOOTTIME:
+ client->clk_type = EV_CLK_BOOT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Flush pending events and queue SYN_DROPPED event,
+ * but only if the queue is not empty.
+ */
+ spin_lock_irqsave(&client->buffer_lock, flags);
+
+ if (client->head != client->tail) {
+ client->packet_head = client->head = client->tail;
+ __evdev_queue_syn_dropped(client);
+ }
spin_unlock_irqrestore(&client->buffer_lock, flags);
+
+ return 0;
}
static void __pass_event(struct evdev_client *client,
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index fbe29fcb15c5..cb150a1dbaff 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -293,7 +293,7 @@ void input_mt_sync_frame(struct input_dev *dev)
}
EXPORT_SYMBOL(input_mt_sync_frame);
-static int adjust_dual(int *begin, int step, int *end, int eq)
+static int adjust_dual(int *begin, int step, int *end, int eq, int mu)
{
int f, *p, s, c;
@@ -311,9 +311,10 @@ static int adjust_dual(int *begin, int step, int *end, int eq)
s = *p;
c = (f + s + 1) / 2;
- if (c == 0 || (c > 0 && !eq))
+ if (c == 0 || (c > mu && (!eq || mu > 0)))
return 0;
- if (s < 0)
+ /* Improve convergence for positive matrices by penalizing overcovers */
+ if (s < 0 && mu <= 0)
c *= 2;
for (p = begin; p != end; p += step)
@@ -322,23 +323,24 @@ static int adjust_dual(int *begin, int step, int *end, int eq)
return (c < s && s <= 0) || (f >= 0 && f < c);
}
-static void find_reduced_matrix(int *w, int nr, int nc, int nrc)
+static void find_reduced_matrix(int *w, int nr, int nc, int nrc, int mu)
{
int i, k, sum;
for (k = 0; k < nrc; k++) {
for (i = 0; i < nr; i++)
- adjust_dual(w + i, nr, w + i + nrc, nr <= nc);
+ adjust_dual(w + i, nr, w + i + nrc, nr <= nc, mu);
sum = 0;
for (i = 0; i < nrc; i += nr)
- sum += adjust_dual(w + i, 1, w + i + nr, nc <= nr);
+ sum += adjust_dual(w + i, 1, w + i + nr, nc <= nr, mu);
if (!sum)
break;
}
}
static int input_mt_set_matrix(struct input_mt *mt,
- const struct input_mt_pos *pos, int num_pos)
+ const struct input_mt_pos *pos, int num_pos,
+ int mu)
{
const struct input_mt_pos *p;
struct input_mt_slot *s;
@@ -352,7 +354,7 @@ static int input_mt_set_matrix(struct input_mt *mt,
y = input_mt_get_value(s, ABS_MT_POSITION_Y);
for (p = pos; p != pos + num_pos; p++) {
int dx = x - p->x, dy = y - p->y;
- *w++ = dx * dx + dy * dy;
+ *w++ = dx * dx + dy * dy - mu;
}
}
@@ -393,17 +395,24 @@ static void input_mt_set_slots(struct input_mt *mt,
* @slots: the slot assignment to be filled
* @pos: the position array to match
* @num_pos: number of positions
+ * @dmax: maximum ABS_MT_POSITION displacement (zero for infinite)
*
* Performs a best match against the current contacts and returns
* the slot assignment list. New contacts are assigned to unused
* slots.
*
+ * The assignments are balanced so that all coordinate displacements are
+ * below the euclidian distance dmax. If no such assignment can be found,
+ * some contacts are assigned to unused slots.
+ *
* Returns zero on success, or negative error in case of failure.
*/
int input_mt_assign_slots(struct input_dev *dev, int *slots,
- const struct input_mt_pos *pos, int num_pos)
+ const struct input_mt_pos *pos, int num_pos,
+ int dmax)
{
struct input_mt *mt = dev->mt;
+ int mu = 2 * dmax * dmax;
int nrc;
if (!mt || !mt->red)
@@ -413,8 +422,8 @@ int input_mt_assign_slots(struct input_dev *dev, int *slots,
if (num_pos < 1)
return 0;
- nrc = input_mt_set_matrix(mt, pos, num_pos);
- find_reduced_matrix(mt->red, num_pos, nrc / num_pos, nrc);
+ nrc = input_mt_set_matrix(mt, pos, num_pos, mu);
+ find_reduced_matrix(mt->red, num_pos, nrc / num_pos, nrc, mu);
input_mt_set_slots(mt, slots, num_pos);
return 0;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 213e3a1903ee..cc357f1516a7 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -100,23 +100,24 @@ static unsigned int input_to_handler(struct input_handle *handle,
struct input_value *end = vals;
struct input_value *v;
- for (v = vals; v != vals + count; v++) {
- if (handler->filter &&
- handler->filter(handle, v->type, v->code, v->value))
- continue;
- if (end != v)
- *end = *v;
- end++;
+ if (handler->filter) {
+ for (v = vals; v != vals + count; v++) {
+ if (handler->filter(handle, v->type, v->code, v->value))
+ continue;
+ if (end != v)
+ *end = *v;
+ end++;
+ }
+ count = end - vals;
}
- count = end - vals;
if (!count)
return 0;
if (handler->events)
handler->events(handle, vals, count);
else if (handler->event)
- for (v = vals; v != end; v++)
+ for (v = vals; v != vals + count; v++)
handler->event(handle, v->type, v->code, v->value);
return count;
@@ -143,8 +144,11 @@ static void input_pass_values(struct input_dev *dev,
count = input_to_handler(handle, vals, count);
} else {
list_for_each_entry_rcu(handle, &dev->h_list, d_node)
- if (handle->open)
+ if (handle->open) {
count = input_to_handler(handle, vals, count);
+ if (!count)
+ break;
+ }
}
rcu_read_unlock();
@@ -152,12 +156,14 @@ static void input_pass_values(struct input_dev *dev,
add_input_randomness(vals->type, vals->code, vals->value);
/* trigger auto repeat for key events */
- for (v = vals; v != vals + count; v++) {
- if (v->type == EV_KEY && v->value != 2) {
- if (v->value)
- input_start_autorepeat(dev, v->code);
- else
- input_stop_autorepeat(dev);
+ if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
+ for (v = vals; v != vals + count; v++) {
+ if (v->type == EV_KEY && v->value != 2) {
+ if (v->value)
+ input_start_autorepeat(dev, v->code);
+ else
+ input_stop_autorepeat(dev);
+ }
}
}
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a5d9b3f3c871..a89ba7cb96f1 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -568,6 +568,16 @@ config KEYBOARD_STMPE
To compile this driver as a module, choose M here: the module will be
called stmpe-keypad.
+config KEYBOARD_SUN4I_LRADC
+ tristate "Allwinner sun4i low res adc attached tablet keys support"
+ depends on ARCH_SUNXI
+ help
+ This selects support for the Allwinner low res adc attached tablet
+ keys found on Allwinner sunxi SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun4i-lradc-keys.
+
config KEYBOARD_DAVINCI
tristate "TI DaVinci Key Scan"
depends on ARCH_DAVINCI_DM365
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index febafa527eb6..470767884bd8 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_KEYBOARD_SPEAR) += spear-keyboard.o
obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_ST_KEYSCAN) += st-keyscan.o
+obj-$(CONFIG_KEYBOARD_SUN4I_LRADC) += sun4i-lradc-keys.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
obj-$(CONFIG_KEYBOARD_TC3589X) += tc3589x-keypad.o
obj-$(CONFIG_KEYBOARD_TEGRA) += tegra-kbc.o
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index 10bcd4ae5402..f1235831283d 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -170,7 +170,7 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[93] = KEY_KPASTERISK,
[94] = KEY_KPPLUS,
[95] = KEY_HELP,
- [96] = KEY_BACKSLASH, /* FIXME: '<' */
+ [96] = KEY_102ND,
[97] = KEY_KPASTERISK, /* FIXME */
[98] = KEY_KPSLASH,
[99] = KEY_KPLEFTPAREN,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index e27a25892db4..387c51f4b4e4 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1399,8 +1399,8 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
static ssize_t atkbd_show_force_release(struct atkbd *atkbd, char *buf)
{
- size_t len = bitmap_scnlistprintf(buf, PAGE_SIZE - 2,
- atkbd->force_release_mask, ATKBD_KEYMAP_SIZE);
+ size_t len = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ ATKBD_KEYMAP_SIZE, atkbd->force_release_mask);
buf[len++] = '\n';
buf[len] = '\0';
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 4f59f0bab28f..f07461a64d85 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -370,7 +370,6 @@ static struct i2c_driver cap11xx_i2c_driver = {
module_i2c_driver(cap11xx_i2c_driver);
-MODULE_ALIAS("platform:cap11xx");
MODULE_DESCRIPTION("Microchip CAP11XX driver");
MODULE_AUTHOR("Daniel Mack <linux@zonque.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 883d6aed5b9a..ddf4045de084 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -190,7 +190,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
__set_bit(bdata->button->code, bits);
}
- ret = bitmap_scnlistprintf(buf, PAGE_SIZE - 2, bits, n_events);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", n_events, bits);
buf[ret++] = '\n';
buf[ret] = '\0';
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index e53f232eda0e..2e855e6f3565 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -448,8 +448,7 @@ static int imx_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
}
- keypad = devm_kzalloc(&pdev->dev, sizeof(struct imx_keypad),
- GFP_KERNEL);
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
if (!keypad) {
dev_err(&pdev->dev, "not enough memory for driver data\n");
return -ENOMEM;
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index a90d6bdc499e..a89488aa1aa4 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/io.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -28,10 +29,6 @@
#include <linux/slab.h>
#include <linux/of.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <mach/hardware.h>
#include <linux/platform_data/keypad-pxa27x.h>
/*
* Keypad Controller registers
diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c
new file mode 100644
index 000000000000..cc8f7ddcee53
--- /dev/null
+++ b/drivers/input/keyboard/sun4i-lradc-keys.c
@@ -0,0 +1,286 @@
+/*
+ * Allwinner sun4i low res adc attached tablet keys driver
+ *
+ * Copyright (C) 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Allwinnner sunxi SoCs have a lradc which is specifically designed to have
+ * various (tablet) keys (ie home, back, search, etc). attached to it using
+ * a resistor network. This driver is for the keys on such boards.
+ *
+ * There are 2 channels, currently this driver only supports channel 0 since
+ * there are no boards known to use channel 1.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define LRADC_CTRL 0x00
+#define LRADC_INTC 0x04
+#define LRADC_INTS 0x08
+#define LRADC_DATA0 0x0c
+#define LRADC_DATA1 0x10
+
+/* LRADC_CTRL bits */
+#define FIRST_CONVERT_DLY(x) ((x) << 24) /* 8 bits */
+#define CHAN_SELECT(x) ((x) << 22) /* 2 bits */
+#define CONTINUE_TIME_SEL(x) ((x) << 16) /* 4 bits */
+#define KEY_MODE_SEL(x) ((x) << 12) /* 2 bits */
+#define LEVELA_B_CNT(x) ((x) << 8) /* 4 bits */
+#define HOLD_EN(x) ((x) << 6)
+#define LEVELB_VOL(x) ((x) << 4) /* 2 bits */
+#define SAMPLE_RATE(x) ((x) << 2) /* 2 bits */
+#define ENABLE(x) ((x) << 0)
+
+/* LRADC_INTC and LRADC_INTS bits */
+#define CHAN1_KEYUP_IRQ BIT(12)
+#define CHAN1_ALRDY_HOLD_IRQ BIT(11)
+#define CHAN1_HOLD_IRQ BIT(10)
+#define CHAN1_KEYDOWN_IRQ BIT(9)
+#define CHAN1_DATA_IRQ BIT(8)
+#define CHAN0_KEYUP_IRQ BIT(4)
+#define CHAN0_ALRDY_HOLD_IRQ BIT(3)
+#define CHAN0_HOLD_IRQ BIT(2)
+#define CHAN0_KEYDOWN_IRQ BIT(1)
+#define CHAN0_DATA_IRQ BIT(0)
+
+struct sun4i_lradc_keymap {
+ u32 voltage;
+ u32 keycode;
+};
+
+struct sun4i_lradc_data {
+ struct device *dev;
+ struct input_dev *input;
+ void __iomem *base;
+ struct regulator *vref_supply;
+ struct sun4i_lradc_keymap *chan0_map;
+ u32 chan0_map_count;
+ u32 chan0_keycode;
+ u32 vref;
+};
+
+static irqreturn_t sun4i_lradc_irq(int irq, void *dev_id)
+{
+ struct sun4i_lradc_data *lradc = dev_id;
+ u32 i, ints, val, voltage, diff, keycode = 0, closest = 0xffffffff;
+
+ ints = readl(lradc->base + LRADC_INTS);
+
+ /*
+ * lradc supports only one keypress at a time, release does not give
+ * any info as to which key was released, so we cache the keycode.
+ */
+
+ if (ints & CHAN0_KEYUP_IRQ) {
+ input_report_key(lradc->input, lradc->chan0_keycode, 0);
+ lradc->chan0_keycode = 0;
+ }
+
+ if ((ints & CHAN0_KEYDOWN_IRQ) && lradc->chan0_keycode == 0) {
+ val = readl(lradc->base + LRADC_DATA0) & 0x3f;
+ voltage = val * lradc->vref / 63;
+
+ for (i = 0; i < lradc->chan0_map_count; i++) {
+ diff = abs(lradc->chan0_map[i].voltage - voltage);
+ if (diff < closest) {
+ closest = diff;
+ keycode = lradc->chan0_map[i].keycode;
+ }
+ }
+
+ lradc->chan0_keycode = keycode;
+ input_report_key(lradc->input, lradc->chan0_keycode, 1);
+ }
+
+ input_sync(lradc->input);
+
+ writel(ints, lradc->base + LRADC_INTS);
+
+ return IRQ_HANDLED;
+}
+
+static int sun4i_lradc_open(struct input_dev *dev)
+{
+ struct sun4i_lradc_data *lradc = input_get_drvdata(dev);
+ int error;
+
+ error = regulator_enable(lradc->vref_supply);
+ if (error)
+ return error;
+
+ /* lradc Vref internally is divided by 2/3 */
+ lradc->vref = regulator_get_voltage(lradc->vref_supply) * 2 / 3;
+
+ /*
+ * Set sample time to 4 ms / 250 Hz. Wait 2 * 4 ms for key to
+ * stabilize on press, wait (1 + 1) * 4 ms for key release
+ */
+ writel(FIRST_CONVERT_DLY(2) | LEVELA_B_CNT(1) | HOLD_EN(1) |
+ SAMPLE_RATE(0) | ENABLE(1), lradc->base + LRADC_CTRL);
+
+ writel(CHAN0_KEYUP_IRQ | CHAN0_KEYDOWN_IRQ, lradc->base + LRADC_INTC);
+
+ return 0;
+}
+
+static void sun4i_lradc_close(struct input_dev *dev)
+{
+ struct sun4i_lradc_data *lradc = input_get_drvdata(dev);
+
+ /* Disable lradc, leave other settings unchanged */
+ writel(FIRST_CONVERT_DLY(2) | LEVELA_B_CNT(1) | HOLD_EN(1) |
+ SAMPLE_RATE(2), lradc->base + LRADC_CTRL);
+ writel(0, lradc->base + LRADC_INTC);
+
+ regulator_disable(lradc->vref_supply);
+}
+
+static int sun4i_lradc_load_dt_keymap(struct device *dev,
+ struct sun4i_lradc_data *lradc)
+{
+ struct device_node *np, *pp;
+ int i;
+ int error;
+
+ np = dev->of_node;
+ if (!np)
+ return -EINVAL;
+
+ lradc->chan0_map_count = of_get_child_count(np);
+ if (lradc->chan0_map_count == 0) {
+ dev_err(dev, "keymap is missing in device tree\n");
+ return -EINVAL;
+ }
+
+ lradc->chan0_map = devm_kmalloc_array(dev, lradc->chan0_map_count,
+ sizeof(struct sun4i_lradc_keymap),
+ GFP_KERNEL);
+ if (!lradc->chan0_map)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_child_of_node(np, pp) {
+ struct sun4i_lradc_keymap *map = &lradc->chan0_map[i];
+ u32 channel;
+
+ error = of_property_read_u32(pp, "channel", &channel);
+ if (error || channel != 0) {
+ dev_err(dev, "%s: Inval channel prop\n", pp->name);
+ return -EINVAL;
+ }
+
+ error = of_property_read_u32(pp, "voltage", &map->voltage);
+ if (error) {
+ dev_err(dev, "%s: Inval voltage prop\n", pp->name);
+ return -EINVAL;
+ }
+
+ error = of_property_read_u32(pp, "linux,code", &map->keycode);
+ if (error) {
+ dev_err(dev, "%s: Inval linux,code prop\n", pp->name);
+ return -EINVAL;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int sun4i_lradc_probe(struct platform_device *pdev)
+{
+ struct sun4i_lradc_data *lradc;
+ struct device *dev = &pdev->dev;
+ int i;
+ int error;
+
+ lradc = devm_kzalloc(dev, sizeof(struct sun4i_lradc_data), GFP_KERNEL);
+ if (!lradc)
+ return -ENOMEM;
+
+ error = sun4i_lradc_load_dt_keymap(dev, lradc);
+ if (error)
+ return error;
+
+ lradc->vref_supply = devm_regulator_get(dev, "vref");
+ if (IS_ERR(lradc->vref_supply))
+ return PTR_ERR(lradc->vref_supply);
+
+ lradc->dev = dev;
+ lradc->input = devm_input_allocate_device(dev);
+ if (!lradc->input)
+ return -ENOMEM;
+
+ lradc->input->name = pdev->name;
+ lradc->input->phys = "sun4i_lradc/input0";
+ lradc->input->open = sun4i_lradc_open;
+ lradc->input->close = sun4i_lradc_close;
+ lradc->input->id.bustype = BUS_HOST;
+ lradc->input->id.vendor = 0x0001;
+ lradc->input->id.product = 0x0001;
+ lradc->input->id.version = 0x0100;
+
+ __set_bit(EV_KEY, lradc->input->evbit);
+ for (i = 0; i < lradc->chan0_map_count; i++)
+ __set_bit(lradc->chan0_map[i].keycode, lradc->input->keybit);
+
+ input_set_drvdata(lradc->input, lradc);
+
+ lradc->base = devm_ioremap_resource(dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(lradc->base))
+ return PTR_ERR(lradc->base);
+
+ error = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ sun4i_lradc_irq, 0,
+ "sun4i-a10-lradc-keys", lradc);
+ if (error)
+ return error;
+
+ error = input_register_device(lradc->input);
+ if (error)
+ return error;
+
+ platform_set_drvdata(pdev, lradc);
+ return 0;
+}
+
+static const struct of_device_id sun4i_lradc_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-lradc-keys", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun4i_lradc_of_match);
+
+static struct platform_driver sun4i_lradc_driver = {
+ .driver = {
+ .name = "sun4i-a10-lradc-keys",
+ .of_match_table = of_match_ptr(sun4i_lradc_of_match),
+ },
+ .probe = sun4i_lradc_probe,
+};
+
+module_platform_driver(sun4i_lradc_driver);
+
+MODULE_DESCRIPTION("Allwinner sun4i low res adc attached tablet keys driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 23297ab6163f..6deb8dae3205 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -93,6 +93,16 @@ config INPUT_BMA150
To compile this driver as a module, choose M here: the
module will be called bma150.
+config INPUT_E3X0_BUTTON
+ tristate "NI Ettus Research USRP E3x0 Button support."
+ default n
+ help
+ Say Y here to enable support for the NI Ettus Research
+ USRP E3x0 Button.
+
+ To compile this driver as a module, choose M here: the
+ module will be called e3x0_button.
+
config INPUT_PCSPKR
tristate "PC Speaker support"
depends on PCSPKR_PLATFORM
@@ -394,6 +404,18 @@ config INPUT_CM109
To compile this driver as a module, choose M here: the module will be
called cm109.
+config INPUT_REGULATOR_HAPTIC
+ tristate "Regulator haptics support"
+ depends on REGULATOR
+ select INPUT_FF_MEMLESS
+ help
+ This option enables device driver support for the haptic controlled
+ by a regulator. This driver supports ff-memless interface
+ from input framework.
+
+ To compile this driver as a module, choose M here: the
+ module will be called regulator-haptic.
+
config INPUT_RETU_PWRBUTTON
tristate "Retu Power button Driver"
depends on MFD_RETU
@@ -404,6 +426,27 @@ config INPUT_RETU_PWRBUTTON
To compile this driver as a module, choose M here. The module will
be called retu-pwrbutton.
+config INPUT_TPS65218_PWRBUTTON
+ tristate "TPS65218 Power button driver"
+ depends on MFD_TPS65218
+ help
+ Say Y here if you want to enable power buttong reporting for
+ the TPS65218 Power Management IC device.
+
+ To compile this driver as a module, choose M here. The module will
+ be called tps65218-pwrbutton.
+
+config INPUT_AXP20X_PEK
+ tristate "X-Powers AXP20X power button driver"
+ depends on MFD_AXP20X
+ help
+ Say Y here if you want to enable power key reporting via the
+ AXP20X PMIC.
+
+ To compile this driver as a module, choose M here. The module will
+ be called axp20x-pek.
+
+
config INPUT_TWL4030_PWRBUTTON
tristate "TWL4030 Power button Driver"
depends on TWL4030_CORE
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 19c760361f80..403a1a54a76c 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DA9052_ONKEY) += da9052_onkey.o
obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
+obj-$(CONFIG_INPUT_E3X0_BUTTON) += e3x0-button.o
obj-$(CONFIG_INPUT_DRV260X_HAPTICS) += drv260x.o
obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
@@ -53,12 +54,15 @@ obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
+obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o
obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
+obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
+obj-$(CONFIG_INPUT_TPS65218_PWRBUTTON) += tps65218-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
obj-$(CONFIG_INPUT_TWL6040_VIBRA) += twl6040-vibra.o
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
new file mode 100644
index 000000000000..f1c844739cd7
--- /dev/null
+++ b/drivers/input/misc/axp20x-pek.c
@@ -0,0 +1,290 @@
+/*
+ * axp20x power button driver.
+ *
+ * Copyright (C) 2013 Carlo Caione <carlo@caione.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AXP20X_PEK_STARTUP_MASK (0xc0)
+#define AXP20X_PEK_SHUTDOWN_MASK (0x03)
+
+struct axp20x_pek {
+ struct axp20x_dev *axp20x;
+ struct input_dev *input;
+ int irq_dbr;
+ int irq_dbf;
+};
+
+struct axp20x_time {
+ unsigned int time;
+ unsigned int idx;
+};
+
+static const struct axp20x_time startup_time[] = {
+ { .time = 128, .idx = 0 },
+ { .time = 1000, .idx = 2 },
+ { .time = 3000, .idx = 1 },
+ { .time = 2000, .idx = 3 },
+};
+
+static const struct axp20x_time shutdown_time[] = {
+ { .time = 4000, .idx = 0 },
+ { .time = 6000, .idx = 1 },
+ { .time = 8000, .idx = 2 },
+ { .time = 10000, .idx = 3 },
+};
+
+struct axp20x_pek_ext_attr {
+ const struct axp20x_time *p_time;
+ unsigned int mask;
+};
+
+static struct axp20x_pek_ext_attr axp20x_pek_startup_ext_attr = {
+ .p_time = startup_time,
+ .mask = AXP20X_PEK_STARTUP_MASK,
+};
+
+static struct axp20x_pek_ext_attr axp20x_pek_shutdown_ext_attr = {
+ .p_time = shutdown_time,
+ .mask = AXP20X_PEK_SHUTDOWN_MASK,
+};
+
+static struct axp20x_pek_ext_attr *get_axp_ext_attr(struct device_attribute *attr)
+{
+ return container_of(attr, struct dev_ext_attribute, attr)->var;
+}
+
+static ssize_t axp20x_show_ext_attr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+ struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr);
+ unsigned int val;
+ int ret, i;
+
+ ret = regmap_read(axp20x_pek->axp20x->regmap, AXP20X_PEK_KEY, &val);
+ if (ret != 0)
+ return ret;
+
+ val &= axp20x_ea->mask;
+ val >>= ffs(axp20x_ea->mask) - 1;
+
+ for (i = 0; i < 4; i++)
+ if (val == axp20x_ea->p_time[i].idx)
+ val = axp20x_ea->p_time[i].time;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t axp20x_store_ext_attr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+ struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr);
+ char val_str[20];
+ size_t len;
+ int ret, i;
+ unsigned int val, idx = 0;
+ unsigned int best_err = UINT_MAX;
+
+ val_str[sizeof(val_str) - 1] = '\0';
+ strncpy(val_str, buf, sizeof(val_str) - 1);
+ len = strlen(val_str);
+
+ if (len && val_str[len - 1] == '\n')
+ val_str[len - 1] = '\0';
+
+ ret = kstrtouint(val_str, 10, &val);
+ if (ret)
+ return ret;
+
+ for (i = 3; i >= 0; i--) {
+ unsigned int err;
+
+ err = abs(axp20x_ea->p_time[i].time - val);
+ if (err < best_err) {
+ best_err = err;
+ idx = axp20x_ea->p_time[i].idx;
+ }
+
+ if (!err)
+ break;
+ }
+
+ idx <<= ffs(axp20x_ea->mask) - 1;
+ ret = regmap_update_bits(axp20x_pek->axp20x->regmap,
+ AXP20X_PEK_KEY,
+ axp20x_ea->mask, idx);
+ if (ret != 0)
+ return -EINVAL;
+
+ return count;
+}
+
+static struct dev_ext_attribute axp20x_dev_attr_startup = {
+ .attr = __ATTR(startup, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr),
+ .var = &axp20x_pek_startup_ext_attr,
+};
+
+static struct dev_ext_attribute axp20x_dev_attr_shutdown = {
+ .attr = __ATTR(shutdown, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr),
+ .var = &axp20x_pek_shutdown_ext_attr,
+};
+
+static struct attribute *axp20x_attributes[] = {
+ &axp20x_dev_attr_startup.attr.attr,
+ &axp20x_dev_attr_shutdown.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group axp20x_attribute_group = {
+ .attrs = axp20x_attributes,
+};
+
+static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
+{
+ struct input_dev *idev = pwr;
+ struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
+
+ if (irq == axp20x_pek->irq_dbr)
+ input_report_key(idev, KEY_POWER, true);
+ else if (irq == axp20x_pek->irq_dbf)
+ input_report_key(idev, KEY_POWER, false);
+
+ input_sync(idev);
+
+ return IRQ_HANDLED;
+}
+
+static void axp20x_remove_sysfs_group(void *_data)
+{
+ struct device *dev = _data;
+
+ sysfs_remove_group(&dev->kobj, &axp20x_attribute_group);
+}
+
+static int axp20x_pek_probe(struct platform_device *pdev)
+{
+ struct axp20x_pek *axp20x_pek;
+ struct axp20x_dev *axp20x;
+ struct input_dev *idev;
+ int error;
+
+ axp20x_pek = devm_kzalloc(&pdev->dev, sizeof(struct axp20x_pek),
+ GFP_KERNEL);
+ if (!axp20x_pek)
+ return -ENOMEM;
+
+ axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
+ axp20x = axp20x_pek->axp20x;
+
+ axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
+ if (axp20x_pek->irq_dbr < 0) {
+ dev_err(&pdev->dev, "No IRQ for PEK_DBR, error=%d\n",
+ axp20x_pek->irq_dbr);
+ return axp20x_pek->irq_dbr;
+ }
+ axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc,
+ axp20x_pek->irq_dbr);
+
+ axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
+ if (axp20x_pek->irq_dbf < 0) {
+ dev_err(&pdev->dev, "No IRQ for PEK_DBF, error=%d\n",
+ axp20x_pek->irq_dbf);
+ return axp20x_pek->irq_dbf;
+ }
+ axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc,
+ axp20x_pek->irq_dbf);
+
+ axp20x_pek->input = devm_input_allocate_device(&pdev->dev);
+ if (!axp20x_pek->input)
+ return -ENOMEM;
+
+ idev = axp20x_pek->input;
+
+ idev->name = "axp20x-pek";
+ idev->phys = "m1kbd/input2";
+ idev->dev.parent = &pdev->dev;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+
+ input_set_drvdata(idev, axp20x_pek);
+
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbr", idev);
+ if (error < 0) {
+ dev_err(axp20x->dev, "Failed to request dbr IRQ#%d: %d\n",
+ axp20x_pek->irq_dbr, error);
+ return error;
+ }
+
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbf", idev);
+ if (error < 0) {
+ dev_err(axp20x->dev, "Failed to request dbf IRQ#%d: %d\n",
+ axp20x_pek->irq_dbf, error);
+ return error;
+ }
+
+ error = sysfs_create_group(&pdev->dev.kobj, &axp20x_attribute_group);
+ if (error) {
+ dev_err(axp20x->dev, "Failed to create sysfs attributes: %d\n",
+ error);
+ return error;
+ }
+
+ error = devm_add_action(&pdev->dev,
+ axp20x_remove_sysfs_group, &pdev->dev);
+ if (error) {
+ axp20x_remove_sysfs_group(&pdev->dev);
+ dev_err(&pdev->dev, "Failed to add sysfs cleanup action: %d\n",
+ error);
+ return error;
+ }
+
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(axp20x->dev, "Can't register input device: %d\n",
+ error);
+ return error;
+ }
+
+ platform_set_drvdata(pdev, axp20x_pek);
+
+ return 0;
+}
+
+static struct platform_driver axp20x_pek_driver = {
+ .probe = axp20x_pek_probe,
+ .driver = {
+ .name = "axp20x-pek",
+ },
+};
+module_platform_driver(axp20x_pek_driver);
+
+MODULE_DESCRIPTION("axp20x Power Button");
+MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index a364e109ca7c..599578042ea0 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -733,7 +733,6 @@ static struct i2c_driver drv260x_driver = {
};
module_i2c_driver(drv260x_driver);
-MODULE_ALIAS("platform:drv260x-haptics");
MODULE_DESCRIPTION("TI DRV260x haptics driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index a021744e608c..fc0fddf0896a 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -492,7 +492,6 @@ static struct i2c_driver drv2667_driver = {
};
module_i2c_driver(drv2667_driver);
-MODULE_ALIAS("platform:drv2667-haptics");
MODULE_DESCRIPTION("TI DRV2667 haptics driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
diff --git a/drivers/input/misc/e3x0-button.c b/drivers/input/misc/e3x0-button.c
new file mode 100644
index 000000000000..13bfca8a7b16
--- /dev/null
+++ b/drivers/input/misc/e3x0-button.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2014, National Instruments Corp. All rights reserved.
+ *
+ * Driver for NI Ettus Research USRP E3x0 Button Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+static irqreturn_t e3x0_button_release_handler(int irq, void *data)
+{
+ struct input_dev *idev = data;
+
+ input_report_key(idev, KEY_POWER, 0);
+ input_sync(idev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t e3x0_button_press_handler(int irq, void *data)
+{
+ struct input_dev *idev = data;
+
+ input_report_key(idev, KEY_POWER, 1);
+ pm_wakeup_event(idev->dev.parent, 0);
+ input_sync(idev);
+
+ return IRQ_HANDLED;
+}
+
+static int __maybe_unused e3x0_button_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(platform_get_irq_byname(pdev, "press"));
+
+ return 0;
+}
+
+static int __maybe_unused e3x0_button_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(platform_get_irq_byname(pdev, "press"));
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(e3x0_button_pm_ops,
+ e3x0_button_suspend, e3x0_button_resume);
+
+static int e3x0_button_probe(struct platform_device *pdev)
+{
+ struct input_dev *input;
+ int irq_press, irq_release;
+ int error;
+
+ irq_press = platform_get_irq_byname(pdev, "press");
+ if (irq_press < 0) {
+ dev_err(&pdev->dev, "No IRQ for 'press', error=%d\n",
+ irq_press);
+ return irq_press;
+ }
+
+ irq_release = platform_get_irq_byname(pdev, "release");
+ if (irq_release < 0) {
+ dev_err(&pdev->dev, "No IRQ for 'release', error=%d\n",
+ irq_release);
+ return irq_release;
+ }
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "NI Ettus Research USRP E3x0 Button Driver";
+ input->phys = "e3x0_button/input0";
+ input->dev.parent = &pdev->dev;
+
+ input_set_capability(input, EV_KEY, KEY_POWER);
+
+ error = devm_request_irq(&pdev->dev, irq_press,
+ e3x0_button_press_handler, 0,
+ "e3x0-button", input);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to request 'press' IRQ#%d: %d\n",
+ irq_press, error);
+ return error;
+ }
+
+ error = devm_request_irq(&pdev->dev, irq_release,
+ e3x0_button_release_handler, 0,
+ "e3x0-button", input);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to request 'release' IRQ#%d: %d\n",
+ irq_release, error);
+ return error;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "Can't register input device: %d\n", error);
+ return error;
+ }
+
+ platform_set_drvdata(pdev, input);
+ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+}
+
+static int e3x0_button_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, 0);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id e3x0_button_match[] = {
+ { .compatible = "ettus,e3x0-button", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, e3x0_button_match);
+#endif
+
+static struct platform_driver e3x0_button_driver = {
+ .driver = {
+ .name = "e3x0-button",
+ .of_match_table = of_match_ptr(e3x0_button_match),
+ .pm = &e3x0_button_pm_ops,
+ },
+ .probe = e3x0_button_probe,
+ .remove = e3x0_button_remove,
+};
+
+module_platform_driver(e3x0_button_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
+MODULE_DESCRIPTION("NI Ettus Research USRP E3x0 Button driver");
+MODULE_ALIAS("platform:e3x0-button");
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
new file mode 100644
index 000000000000..132eb914ea3e
--- /dev/null
+++ b/drivers/input/misc/regulator-haptic.c
@@ -0,0 +1,266 @@
+/*
+ * Regulator haptic driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Hyunhee Kim <hyunhee.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/regulator-haptic.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define MAX_MAGNITUDE_SHIFT 16
+
+struct regulator_haptic {
+ struct device *dev;
+ struct input_dev *input_dev;
+ struct regulator *regulator;
+
+ struct work_struct work;
+ struct mutex mutex;
+
+ bool active;
+ bool suspended;
+
+ unsigned int max_volt;
+ unsigned int min_volt;
+ unsigned int magnitude;
+};
+
+static int regulator_haptic_toggle(struct regulator_haptic *haptic, bool on)
+{
+ int error;
+
+ if (haptic->active != on) {
+
+ error = on ? regulator_enable(haptic->regulator) :
+ regulator_disable(haptic->regulator);
+ if (error) {
+ dev_err(haptic->dev,
+ "failed to switch regulator %s: %d\n",
+ on ? "on" : "off", error);
+ return error;
+ }
+
+ haptic->active = on;
+ }
+
+ return 0;
+}
+
+static int regulator_haptic_set_voltage(struct regulator_haptic *haptic,
+ unsigned int magnitude)
+{
+ u64 volt_mag_multi;
+ unsigned int intensity;
+ int error;
+
+ volt_mag_multi = (u64)(haptic->max_volt - haptic->min_volt) * magnitude;
+ intensity = (unsigned int)(volt_mag_multi >> MAX_MAGNITUDE_SHIFT);
+
+ error = regulator_set_voltage(haptic->regulator,
+ intensity + haptic->min_volt,
+ haptic->max_volt);
+ if (error) {
+ dev_err(haptic->dev, "cannot set regulator voltage to %d: %d\n",
+ intensity + haptic->min_volt, error);
+ return error;
+ }
+
+ regulator_haptic_toggle(haptic, !!magnitude);
+
+ return 0;
+}
+
+static void regulator_haptic_work(struct work_struct *work)
+{
+ struct regulator_haptic *haptic = container_of(work,
+ struct regulator_haptic, work);
+
+ mutex_lock(&haptic->mutex);
+
+ if (!haptic->suspended)
+ regulator_haptic_set_voltage(haptic, haptic->magnitude);
+
+ mutex_unlock(&haptic->mutex);
+}
+
+static int regulator_haptic_play_effect(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct regulator_haptic *haptic = input_get_drvdata(input);
+
+ haptic->magnitude = effect->u.rumble.strong_magnitude;
+ if (!haptic->magnitude)
+ haptic->magnitude = effect->u.rumble.weak_magnitude;
+
+ schedule_work(&haptic->work);
+
+ return 0;
+}
+
+static void regulator_haptic_close(struct input_dev *input)
+{
+ struct regulator_haptic *haptic = input_get_drvdata(input);
+
+ cancel_work_sync(&haptic->work);
+ regulator_haptic_set_voltage(haptic, 0);
+}
+
+static int __maybe_unused
+regulator_haptic_parse_dt(struct device *dev, struct regulator_haptic *haptic)
+{
+ struct device_node *node;
+ int error;
+
+ node = dev->of_node;
+ if(!node) {
+ dev_err(dev, "Missing dveice tree data\n");
+ return -EINVAL;
+ }
+
+ error = of_property_read_u32(node, "max-microvolt", &haptic->max_volt);
+ if (error) {
+ dev_err(dev, "cannot parse max-microvolt\n");
+ return error;
+ }
+
+ error = of_property_read_u32(node, "min-microvolt", &haptic->min_volt);
+ if (error) {
+ dev_err(dev, "cannot parse min-microvolt\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static int regulator_haptic_probe(struct platform_device *pdev)
+{
+ const struct regulator_haptic_data *pdata = dev_get_platdata(&pdev->dev);
+ struct regulator_haptic *haptic;
+ struct input_dev *input_dev;
+ int error;
+
+ haptic = devm_kzalloc(&pdev->dev, sizeof(*haptic), GFP_KERNEL);
+ if (!haptic)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, haptic);
+ haptic->dev = &pdev->dev;
+ mutex_init(&haptic->mutex);
+ INIT_WORK(&haptic->work, regulator_haptic_work);
+
+ if (pdata) {
+ haptic->max_volt = pdata->max_volt;
+ haptic->min_volt = pdata->min_volt;
+ } else if (IS_ENABLED(CONFIG_OF)) {
+ error = regulator_haptic_parse_dt(&pdev->dev, haptic);
+ if (error)
+ return error;
+ } else {
+ dev_err(&pdev->dev, "Missing platform data\n");
+ return -EINVAL;
+ }
+
+ haptic->regulator = devm_regulator_get_exclusive(&pdev->dev, "haptic");
+ if (IS_ERR(haptic->regulator)) {
+ dev_err(&pdev->dev, "failed to get regulator\n");
+ return PTR_ERR(haptic->regulator);
+ }
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ haptic->input_dev = input_dev;
+ haptic->input_dev->name = "regulator-haptic";
+ haptic->input_dev->dev.parent = &pdev->dev;
+ haptic->input_dev->close = regulator_haptic_close;
+ input_set_drvdata(haptic->input_dev, haptic);
+ input_set_capability(haptic->input_dev, EV_FF, FF_RUMBLE);
+
+ error = input_ff_create_memless(input_dev, NULL,
+ regulator_haptic_play_effect);
+ if (error) {
+ dev_err(&pdev->dev, "failed to create force-feedback\n");
+ return error;
+ }
+
+ error = input_register_device(haptic->input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused regulator_haptic_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct regulator_haptic *haptic = platform_get_drvdata(pdev);
+ int error;
+
+ error = mutex_lock_interruptible(&haptic->mutex);
+ if (error)
+ return error;
+
+ regulator_haptic_set_voltage(haptic, 0);
+
+ haptic->suspended = true;
+
+ mutex_unlock(&haptic->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused regulator_haptic_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct regulator_haptic *haptic = platform_get_drvdata(pdev);
+ unsigned int magnitude;
+
+ mutex_lock(&haptic->mutex);
+
+ haptic->suspended = false;
+
+ magnitude = ACCESS_ONCE(haptic->magnitude);
+ if (magnitude)
+ regulator_haptic_set_voltage(haptic, magnitude);
+
+ mutex_unlock(&haptic->mutex);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(regulator_haptic_pm_ops,
+ regulator_haptic_suspend, regulator_haptic_resume);
+
+static struct of_device_id regulator_haptic_dt_match[] = {
+ { .compatible = "regulator-haptic" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver regulator_haptic_driver = {
+ .probe = regulator_haptic_probe,
+ .driver = {
+ .name = "regulator-haptic",
+ .of_match_table = regulator_haptic_dt_match,
+ .pm = &regulator_haptic_pm_ops,
+ },
+};
+module_platform_driver(regulator_haptic_driver);
+
+MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
+MODULE_AUTHOR("Hyunhee Kim <hyunhee.kim@samsung.com>");
+MODULE_DESCRIPTION("Regulator haptic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/tps65218-pwrbutton.c b/drivers/input/misc/tps65218-pwrbutton.c
new file mode 100644
index 000000000000..54508dec4eb3
--- /dev/null
+++ b/drivers/input/misc/tps65218-pwrbutton.c
@@ -0,0 +1,126 @@
+/*
+ * Texas Instruments' TPS65218 Power Button Input Driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps65218.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct tps65218_pwrbutton {
+ struct device *dev;
+ struct tps65218 *tps;
+ struct input_dev *idev;
+};
+
+static irqreturn_t tps65218_pwr_irq(int irq, void *_pwr)
+{
+ struct tps65218_pwrbutton *pwr = _pwr;
+ unsigned int reg;
+ int error;
+
+ error = tps65218_reg_read(pwr->tps, TPS65218_REG_STATUS, &reg);
+ if (error) {
+ dev_err(pwr->dev, "can't read register: %d\n", error);
+ goto out;
+ }
+
+ if (reg & TPS65218_STATUS_PB_STATE) {
+ input_report_key(pwr->idev, KEY_POWER, 1);
+ pm_wakeup_event(pwr->dev, 0);
+ } else {
+ input_report_key(pwr->idev, KEY_POWER, 0);
+ }
+
+ input_sync(pwr->idev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int tps65218_pwron_probe(struct platform_device *pdev)
+{
+ struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct tps65218_pwrbutton *pwr;
+ struct input_dev *idev;
+ int error;
+ int irq;
+
+ pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
+ if (!pwr)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(dev);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->name = "tps65218_pwrbutton";
+ idev->phys = "tps65218_pwrbutton/input0";
+ idev->dev.parent = dev;
+ idev->id.bustype = BUS_I2C;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+
+ pwr->tps = tps;
+ pwr->dev = dev;
+ pwr->idev = idev;
+ platform_set_drvdata(pdev, pwr);
+ device_init_wakeup(dev, true);
+
+ irq = platform_get_irq(pdev, 0);
+ error = devm_request_threaded_irq(dev, irq, NULL, tps65218_pwr_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "tps65218-pwrbutton", pwr);
+ if (error) {
+ dev_err(dev, "failed to request IRQ #%d: %d\n",
+ irq, error);
+ return error;
+ }
+
+ error= input_register_device(idev);
+ if (error) {
+ dev_err(dev, "Can't register power button: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static struct of_device_id of_tps65218_pwr_match[] = {
+ { .compatible = "ti,tps65218-pwrbutton" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_tps65218_pwr_match);
+
+static struct platform_driver tps65218_pwron_driver = {
+ .probe = tps65218_pwron_probe,
+ .driver = {
+ .name = "tps65218_pwrbutton",
+ .of_match_table = of_tps65218_pwr_match,
+ },
+};
+module_platform_driver(tps65218_pwron_driver);
+
+MODULE_DESCRIPTION("TPS65218 Power Button");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index d8b46b0f2dbe..4658b5d41dd7 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -105,19 +105,12 @@ config MOUSE_PS2_ELANTECH
Say Y here if you have an Elantech PS/2 touchpad connected
to your system.
- Note that if you enable this driver you will need an updated
- X.org Synaptics driver that does not require ABS_PRESSURE
- reports from the touchpad (i.e. post 1.5.0 version). You can
- grab a patch for the driver here:
-
- http://userweb.kernel.org/~dtor/synaptics-no-abspressure.patch
-
- If unsure, say N.
-
This driver exposes some configuration registers via sysfs
entries. For further information,
see <file:Documentation/input/elantech.txt>.
+ If unsure, say N.
+
config MOUSE_PS2_SENTELIC
bool "Sentelic Finger Sensing Pad PS/2 protocol extension"
depends on MOUSE_PS2
@@ -146,6 +139,16 @@ config MOUSE_PS2_OLPC
If unsure, say N.
+config MOUSE_PS2_FOCALTECH
+ bool "FocalTech PS/2 mouse protocol extension" if EXPERT
+ default y
+ depends on MOUSE_PS2
+ help
+ Say Y here if you have a FocalTech PS/2 TouchPad connected to
+ your system.
+
+ If unsure, say Y.
+
config MOUSE_SERIAL
tristate "Serial mouse"
select SERIO
@@ -206,6 +209,7 @@ config MOUSE_BCM5974
config MOUSE_CYAPA
tristate "Cypress APA I2C Trackpad support"
depends on I2C
+ select CRC_ITU_T
help
This driver adds support for Cypress All Points Addressable (APA)
I2C Trackpads, including the ones used in 2012 Samsung Chromebooks.
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 560003dcac37..8a9c98e76d9c 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
-obj-$(CONFIG_MOUSE_CYAPA) += cyapa.o
+obj-$(CONFIG_MOUSE_CYAPA) += cyapatp.o
obj-$(CONFIG_MOUSE_ELAN_I2C) += elan_i2c.o
obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
@@ -24,6 +24,7 @@ obj-$(CONFIG_MOUSE_SYNAPTICS_I2C) += synaptics_i2c.o
obj-$(CONFIG_MOUSE_SYNAPTICS_USB) += synaptics_usb.o
obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o
+cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o
psmouse-objs := psmouse-base.o synaptics.o focaltech.o
psmouse-$(CONFIG_MOUSE_PS2_ALPS) += alps.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d88d73d83552..f205b8be2ce4 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -435,7 +435,7 @@ static void alps_report_mt_data(struct psmouse *psmouse, int n)
struct alps_fields *f = &priv->f;
int i, slot[MAX_TOUCHES];
- input_mt_assign_slots(dev, slot, f->mt, n);
+ input_mt_assign_slots(dev, slot, f->mt, n, 0);
for (i = 0; i < n; i++)
alps_set_slot(dev, slot[i], f->mt[i].x, f->mt[i].y);
@@ -475,6 +475,13 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
struct input_dev *dev = priv->dev2;
int x, y, z, left, right, middle;
+ /* It should be a DualPoint when received trackstick packet */
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
+ return;
+ }
+
/* Sanity check packet */
if (!(packet[0] & 0x40)) {
psmouse_dbg(psmouse, "Bad trackstick packet, discarding\n");
@@ -699,7 +706,8 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
alps_report_semi_mt_data(psmouse, fingers);
- if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
+ if ((priv->flags & ALPS_DUALPOINT) &&
+ !(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
input_report_key(dev2, BTN_LEFT, f->ts_left);
input_report_key(dev2, BTN_RIGHT, f->ts_right);
input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
@@ -743,8 +751,11 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
*/
if (packet[5] == 0x7F) {
/* It should be a DualPoint when received Trackpoint packet */
- if (!(priv->flags & ALPS_DUALPOINT))
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
return;
+ }
/* Trackpoint packet */
x = packet[1] | ((packet[3] & 0x20) << 2);
@@ -1026,6 +1037,13 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
struct input_dev *dev2 = priv->dev2;
int x, y, z, left, right, middle;
+ /* It should be a DualPoint when received trackstick packet */
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
+ return;
+ }
+
/*
* b7 b6 b5 b4 b3 b2 b1 b0
* Byte0 0 1 0 0 1 0 0 0
@@ -2443,14 +2461,24 @@ int alps_init(struct psmouse *psmouse)
dev1->keybit[BIT_WORD(BTN_MIDDLE)] |= BIT_MASK(BTN_MIDDLE);
}
+ if (priv->flags & ALPS_DUALPOINT) {
+ /*
+ * format of input device name is: "protocol vendor name"
+ * see function psmouse_switch_protocol() in psmouse-base.c
+ */
+ dev2->name = "AlpsPS/2 ALPS DualPoint Stick";
+ dev2->id.product = PSMOUSE_ALPS;
+ dev2->id.version = priv->proto_version;
+ } else {
+ dev2->name = "PS/2 ALPS Mouse";
+ dev2->id.product = PSMOUSE_PS2;
+ dev2->id.version = 0x0000;
+ }
+
snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
dev2->phys = priv->phys;
- dev2->name = (priv->flags & ALPS_DUALPOINT) ?
- "DualPoint Stick" : "ALPS PS/2 Device";
dev2->id.bustype = BUS_I8042;
dev2->id.vendor = 0x0002;
- dev2->id.product = PSMOUSE_ALPS;
- dev2->id.version = 0x0000;
dev2->dev.parent = &psmouse->ps2dev.serio->dev;
dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index c329cdb0b91a..b10709f04615 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -564,7 +564,7 @@ static int report_tp_state(struct bcm5974 *dev, int size)
dev->index[n++] = &f[i];
}
- input_mt_assign_slots(input, dev->slots, dev->pos, n);
+ input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
for (i = 0; i < n; i++)
report_finger_data(input, dev->slots[i],
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index 1bece8cad46f..58f4f6fa4857 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -20,408 +20,131 @@
#include <linux/input/mt.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include "cyapa.h"
-/* APA trackpad firmware generation */
-#define CYAPA_GEN3 0x03 /* support MT-protocol B with tracking ID. */
-
-#define CYAPA_NAME "Cypress APA Trackpad (cyapa)"
-
-/* commands for read/write registers of Cypress trackpad */
-#define CYAPA_CMD_SOFT_RESET 0x00
-#define CYAPA_CMD_POWER_MODE 0x01
-#define CYAPA_CMD_DEV_STATUS 0x02
-#define CYAPA_CMD_GROUP_DATA 0x03
-#define CYAPA_CMD_GROUP_CMD 0x04
-#define CYAPA_CMD_GROUP_QUERY 0x05
-#define CYAPA_CMD_BL_STATUS 0x06
-#define CYAPA_CMD_BL_HEAD 0x07
-#define CYAPA_CMD_BL_CMD 0x08
-#define CYAPA_CMD_BL_DATA 0x09
-#define CYAPA_CMD_BL_ALL 0x0a
-#define CYAPA_CMD_BLK_PRODUCT_ID 0x0b
-#define CYAPA_CMD_BLK_HEAD 0x0c
-
-/* report data start reg offset address. */
-#define DATA_REG_START_OFFSET 0x0000
-
-#define BL_HEAD_OFFSET 0x00
-#define BL_DATA_OFFSET 0x10
-
-/*
- * Operational Device Status Register
- *
- * bit 7: Valid interrupt source
- * bit 6 - 4: Reserved
- * bit 3 - 2: Power status
- * bit 1 - 0: Device status
- */
-#define REG_OP_STATUS 0x00
-#define OP_STATUS_SRC 0x80
-#define OP_STATUS_POWER 0x0c
-#define OP_STATUS_DEV 0x03
-#define OP_STATUS_MASK (OP_STATUS_SRC | OP_STATUS_POWER | OP_STATUS_DEV)
-
-/*
- * Operational Finger Count/Button Flags Register
- *
- * bit 7 - 4: Number of touched finger
- * bit 3: Valid data
- * bit 2: Middle Physical Button
- * bit 1: Right Physical Button
- * bit 0: Left physical Button
- */
-#define REG_OP_DATA1 0x01
-#define OP_DATA_VALID 0x08
-#define OP_DATA_MIDDLE_BTN 0x04
-#define OP_DATA_RIGHT_BTN 0x02
-#define OP_DATA_LEFT_BTN 0x01
-#define OP_DATA_BTN_MASK (OP_DATA_MIDDLE_BTN | OP_DATA_RIGHT_BTN | \
- OP_DATA_LEFT_BTN)
-
-/*
- * Bootloader Status Register
- *
- * bit 7: Busy
- * bit 6 - 5: Reserved
- * bit 4: Bootloader running
- * bit 3 - 1: Reserved
- * bit 0: Checksum valid
- */
-#define REG_BL_STATUS 0x01
-#define BL_STATUS_BUSY 0x80
-#define BL_STATUS_RUNNING 0x10
-#define BL_STATUS_DATA_VALID 0x08
-#define BL_STATUS_CSUM_VALID 0x01
-
-/*
- * Bootloader Error Register
- *
- * bit 7: Invalid
- * bit 6: Invalid security key
- * bit 5: Bootloading
- * bit 4: Command checksum
- * bit 3: Flash protection error
- * bit 2: Flash checksum error
- * bit 1 - 0: Reserved
- */
-#define REG_BL_ERROR 0x02
-#define BL_ERROR_INVALID 0x80
-#define BL_ERROR_INVALID_KEY 0x40
-#define BL_ERROR_BOOTLOADING 0x20
-#define BL_ERROR_CMD_CSUM 0x10
-#define BL_ERROR_FLASH_PROT 0x08
-#define BL_ERROR_FLASH_CSUM 0x04
-
-#define BL_STATUS_SIZE 3 /* length of bootloader status registers */
-#define BLK_HEAD_BYTES 32
-
-#define PRODUCT_ID_SIZE 16
-#define QUERY_DATA_SIZE 27
-#define REG_PROTOCOL_GEN_QUERY_OFFSET 20
-
-#define REG_OFFSET_DATA_BASE 0x0000
-#define REG_OFFSET_COMMAND_BASE 0x0028
-#define REG_OFFSET_QUERY_BASE 0x002a
-
-#define CAPABILITY_LEFT_BTN_MASK (0x01 << 3)
-#define CAPABILITY_RIGHT_BTN_MASK (0x01 << 4)
-#define CAPABILITY_MIDDLE_BTN_MASK (0x01 << 5)
-#define CAPABILITY_BTN_MASK (CAPABILITY_LEFT_BTN_MASK | \
- CAPABILITY_RIGHT_BTN_MASK | \
- CAPABILITY_MIDDLE_BTN_MASK)
-
-#define CYAPA_OFFSET_SOFT_RESET REG_OFFSET_COMMAND_BASE
-
-#define REG_OFFSET_POWER_MODE (REG_OFFSET_COMMAND_BASE + 1)
-
-#define PWR_MODE_MASK 0xfc
-#define PWR_MODE_FULL_ACTIVE (0x3f << 2)
-#define PWR_MODE_IDLE (0x05 << 2) /* default sleep time is 50 ms. */
-#define PWR_MODE_OFF (0x00 << 2)
-
-#define PWR_STATUS_MASK 0x0c
-#define PWR_STATUS_ACTIVE (0x03 << 2)
-#define PWR_STATUS_IDLE (0x02 << 2)
-#define PWR_STATUS_OFF (0x00 << 2)
-
-/*
- * CYAPA trackpad device states.
- * Used in register 0x00, bit1-0, DeviceStatus field.
- * Other values indicate device is in an abnormal state and must be reset.
- */
-#define CYAPA_DEV_NORMAL 0x03
-#define CYAPA_DEV_BUSY 0x01
-
-enum cyapa_state {
- CYAPA_STATE_OP,
- CYAPA_STATE_BL_IDLE,
- CYAPA_STATE_BL_ACTIVE,
- CYAPA_STATE_BL_BUSY,
- CYAPA_STATE_NO_DEVICE,
-};
-
-
-struct cyapa_touch {
- /*
- * high bits or x/y position value
- * bit 7 - 4: high 4 bits of x position value
- * bit 3 - 0: high 4 bits of y position value
- */
- u8 xy_hi;
- u8 x_lo; /* low 8 bits of x position value. */
- u8 y_lo; /* low 8 bits of y position value. */
- u8 pressure;
- /* id range is 1 - 15. It is incremented with every new touch. */
- u8 id;
-} __packed;
-
-/* The touch.id is used as the MT slot id, thus max MT slot is 15 */
-#define CYAPA_MAX_MT_SLOTS 15
-
-struct cyapa_reg_data {
- /*
- * bit 0 - 1: device status
- * bit 3 - 2: power mode
- * bit 6 - 4: reserved
- * bit 7: interrupt valid bit
- */
- u8 device_status;
- /*
- * bit 7 - 4: number of fingers currently touching pad
- * bit 3: valid data check bit
- * bit 2: middle mechanism button state if exists
- * bit 1: right mechanism button state if exists
- * bit 0: left mechanism button state if exists
- */
- u8 finger_btn;
- /* CYAPA reports up to 5 touches per packet. */
- struct cyapa_touch touches[5];
-} __packed;
-
-/* The main device structure */
-struct cyapa {
- enum cyapa_state state;
-
- struct i2c_client *client;
- struct input_dev *input;
- char phys[32]; /* device physical location */
- bool irq_wake; /* irq wake is enabled */
- bool smbus;
-
- /* read from query data region. */
- char product_id[16];
- u8 btn_capability;
- u8 gen;
- int max_abs_x;
- int max_abs_y;
- int physical_size_x;
- int physical_size_y;
-};
-
-static const u8 bl_deactivate[] = { 0x00, 0xff, 0x3b, 0x00, 0x01, 0x02, 0x03,
- 0x04, 0x05, 0x06, 0x07 };
-static const u8 bl_exit[] = { 0x00, 0xff, 0xa5, 0x00, 0x01, 0x02, 0x03, 0x04,
- 0x05, 0x06, 0x07 };
-
-struct cyapa_cmd_len {
- u8 cmd;
- u8 len;
-};
#define CYAPA_ADAPTER_FUNC_NONE 0
#define CYAPA_ADAPTER_FUNC_I2C 1
#define CYAPA_ADAPTER_FUNC_SMBUS 2
#define CYAPA_ADAPTER_FUNC_BOTH 3
-/*
- * macros for SMBus communication
- */
-#define SMBUS_READ 0x01
-#define SMBUS_WRITE 0x00
-#define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
-#define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
-#define SMBUS_BYTE_BLOCK_CMD_MASK 0x80
-#define SMBUS_GROUP_BLOCK_CMD_MASK 0x40
-
- /* for byte read/write command */
-#define CMD_RESET 0
-#define CMD_POWER_MODE 1
-#define CMD_DEV_STATUS 2
-#define SMBUS_BYTE_CMD(cmd) (((cmd) & 0x3f) << 1)
-#define CYAPA_SMBUS_RESET SMBUS_BYTE_CMD(CMD_RESET)
-#define CYAPA_SMBUS_POWER_MODE SMBUS_BYTE_CMD(CMD_POWER_MODE)
-#define CYAPA_SMBUS_DEV_STATUS SMBUS_BYTE_CMD(CMD_DEV_STATUS)
-
- /* for group registers read/write command */
-#define REG_GROUP_DATA 0
-#define REG_GROUP_CMD 2
-#define REG_GROUP_QUERY 3
-#define SMBUS_GROUP_CMD(grp) (0x80 | (((grp) & 0x07) << 3))
-#define CYAPA_SMBUS_GROUP_DATA SMBUS_GROUP_CMD(REG_GROUP_DATA)
-#define CYAPA_SMBUS_GROUP_CMD SMBUS_GROUP_CMD(REG_GROUP_CMD)
-#define CYAPA_SMBUS_GROUP_QUERY SMBUS_GROUP_CMD(REG_GROUP_QUERY)
-
- /* for register block read/write command */
-#define CMD_BL_STATUS 0
-#define CMD_BL_HEAD 1
-#define CMD_BL_CMD 2
-#define CMD_BL_DATA 3
-#define CMD_BL_ALL 4
-#define CMD_BLK_PRODUCT_ID 5
-#define CMD_BLK_HEAD 6
-#define SMBUS_BLOCK_CMD(cmd) (0xc0 | (((cmd) & 0x1f) << 1))
-
-/* register block read/write command in bootloader mode */
-#define CYAPA_SMBUS_BL_STATUS SMBUS_BLOCK_CMD(CMD_BL_STATUS)
-#define CYAPA_SMBUS_BL_HEAD SMBUS_BLOCK_CMD(CMD_BL_HEAD)
-#define CYAPA_SMBUS_BL_CMD SMBUS_BLOCK_CMD(CMD_BL_CMD)
-#define CYAPA_SMBUS_BL_DATA SMBUS_BLOCK_CMD(CMD_BL_DATA)
-#define CYAPA_SMBUS_BL_ALL SMBUS_BLOCK_CMD(CMD_BL_ALL)
-
-/* register block read/write command in operational mode */
-#define CYAPA_SMBUS_BLK_PRODUCT_ID SMBUS_BLOCK_CMD(CMD_BLK_PRODUCT_ID)
-#define CYAPA_SMBUS_BLK_HEAD SMBUS_BLOCK_CMD(CMD_BLK_HEAD)
-
-static const struct cyapa_cmd_len cyapa_i2c_cmds[] = {
- { CYAPA_OFFSET_SOFT_RESET, 1 },
- { REG_OFFSET_COMMAND_BASE + 1, 1 },
- { REG_OFFSET_DATA_BASE, 1 },
- { REG_OFFSET_DATA_BASE, sizeof(struct cyapa_reg_data) },
- { REG_OFFSET_COMMAND_BASE, 0 },
- { REG_OFFSET_QUERY_BASE, QUERY_DATA_SIZE },
- { BL_HEAD_OFFSET, 3 },
- { BL_HEAD_OFFSET, 16 },
- { BL_HEAD_OFFSET, 16 },
- { BL_DATA_OFFSET, 16 },
- { BL_HEAD_OFFSET, 32 },
- { REG_OFFSET_QUERY_BASE, PRODUCT_ID_SIZE },
- { REG_OFFSET_DATA_BASE, 32 }
-};
+#define CYAPA_FW_NAME "cyapa.bin"
-static const struct cyapa_cmd_len cyapa_smbus_cmds[] = {
- { CYAPA_SMBUS_RESET, 1 },
- { CYAPA_SMBUS_POWER_MODE, 1 },
- { CYAPA_SMBUS_DEV_STATUS, 1 },
- { CYAPA_SMBUS_GROUP_DATA, sizeof(struct cyapa_reg_data) },
- { CYAPA_SMBUS_GROUP_CMD, 2 },
- { CYAPA_SMBUS_GROUP_QUERY, QUERY_DATA_SIZE },
- { CYAPA_SMBUS_BL_STATUS, 3 },
- { CYAPA_SMBUS_BL_HEAD, 16 },
- { CYAPA_SMBUS_BL_CMD, 16 },
- { CYAPA_SMBUS_BL_DATA, 16 },
- { CYAPA_SMBUS_BL_ALL, 32 },
- { CYAPA_SMBUS_BLK_PRODUCT_ID, PRODUCT_ID_SIZE },
- { CYAPA_SMBUS_BLK_HEAD, 16 },
-};
+const char product_id[] = "CYTRA";
-static ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
- u8 *values)
+static int cyapa_reinitialize(struct cyapa *cyapa);
+
+static inline bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
{
- return i2c_smbus_read_i2c_block_data(cyapa->client, reg, len, values);
+ if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_BL)
+ return true;
+
+ if (cyapa->gen == CYAPA_GEN3 &&
+ cyapa->state >= CYAPA_STATE_BL_BUSY &&
+ cyapa->state <= CYAPA_STATE_BL_ACTIVE)
+ return true;
+
+ return false;
}
-static ssize_t cyapa_i2c_reg_write_block(struct cyapa *cyapa, u8 reg,
- size_t len, const u8 *values)
+static inline bool cyapa_is_operational_mode(struct cyapa *cyapa)
{
- return i2c_smbus_write_i2c_block_data(cyapa->client, reg, len, values);
+ if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+ return true;
+
+ if (cyapa->gen == CYAPA_GEN3 && cyapa->state == CYAPA_STATE_OP)
+ return true;
+
+ return false;
}
-/*
- * cyapa_smbus_read_block - perform smbus block read command
- * @cyapa - private data structure of the driver
- * @cmd - the properly encoded smbus command
- * @len - expected length of smbus command result
- * @values - buffer to store smbus command result
- *
- * Returns negative errno, else the number of bytes written.
- *
- * Note:
- * In trackpad device, the memory block allocated for I2C register map
- * is 256 bytes, so the max read block for I2C bus is 256 bytes.
- */
-static ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
- u8 *values)
+/* Returns 0 on success, else negative errno on failure. */
+static ssize_t cyapa_i2c_read(struct cyapa *cyapa, u8 reg, size_t len,
+ u8 *values)
{
- ssize_t ret;
- u8 index;
- u8 smbus_cmd;
- u8 *buf;
struct i2c_client *client = cyapa->client;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = values,
+ },
+ };
+ int ret;
- if (!(SMBUS_BYTE_BLOCK_CMD_MASK & cmd))
- return -EINVAL;
-
- if (SMBUS_GROUP_BLOCK_CMD_MASK & cmd) {
- /* read specific block registers command. */
- smbus_cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
- ret = i2c_smbus_read_block_data(client, smbus_cmd, values);
- goto out;
- }
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- ret = 0;
- for (index = 0; index * I2C_SMBUS_BLOCK_MAX < len; index++) {
- smbus_cmd = SMBUS_ENCODE_IDX(cmd, index);
- smbus_cmd = SMBUS_ENCODE_RW(smbus_cmd, SMBUS_READ);
- buf = values + I2C_SMBUS_BLOCK_MAX * index;
- ret = i2c_smbus_read_block_data(client, smbus_cmd, buf);
- if (ret < 0)
- goto out;
- }
+ if (ret != ARRAY_SIZE(msgs))
+ return ret < 0 ? ret : -EIO;
-out:
- return ret > 0 ? len : ret;
+ return 0;
}
-static s32 cyapa_read_byte(struct cyapa *cyapa, u8 cmd_idx)
+/**
+ * cyapa_i2c_write - Execute i2c block data write operation
+ * @cyapa: Handle to this driver
+ * @ret: Offset of the data to written in the register map
+ * @len: number of bytes to write
+ * @values: Data to be written
+ *
+ * Return negative errno code on error; return zero when success.
+ */
+static int cyapa_i2c_write(struct cyapa *cyapa, u8 reg,
+ size_t len, const void *values)
{
- u8 cmd;
+ struct i2c_client *client = cyapa->client;
+ char buf[32];
+ int ret;
- if (cyapa->smbus) {
- cmd = cyapa_smbus_cmds[cmd_idx].cmd;
- cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
- } else {
- cmd = cyapa_i2c_cmds[cmd_idx].cmd;
- }
- return i2c_smbus_read_byte_data(cyapa->client, cmd);
-}
+ if (len > sizeof(buf) - 1)
+ return -ENOMEM;
-static s32 cyapa_write_byte(struct cyapa *cyapa, u8 cmd_idx, u8 value)
-{
- u8 cmd;
+ buf[0] = reg;
+ memcpy(&buf[1], values, len);
- if (cyapa->smbus) {
- cmd = cyapa_smbus_cmds[cmd_idx].cmd;
- cmd = SMBUS_ENCODE_RW(cmd, SMBUS_WRITE);
- } else {
- cmd = cyapa_i2c_cmds[cmd_idx].cmd;
- }
- return i2c_smbus_write_byte_data(cyapa->client, cmd, value);
+ ret = i2c_master_send(client, buf, len + 1);
+ if (ret != len + 1)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
}
-static ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
+static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
{
- u8 cmd;
- size_t len;
+ u8 ret = CYAPA_ADAPTER_FUNC_NONE;
- if (cyapa->smbus) {
- cmd = cyapa_smbus_cmds[cmd_idx].cmd;
- len = cyapa_smbus_cmds[cmd_idx].len;
- return cyapa_smbus_read_block(cyapa, cmd, len, values);
- } else {
- cmd = cyapa_i2c_cmds[cmd_idx].cmd;
- len = cyapa_i2c_cmds[cmd_idx].len;
- return cyapa_i2c_reg_read_block(cyapa, cmd, len, values);
- }
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ ret |= CYAPA_ADAPTER_FUNC_I2C;
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ ret |= CYAPA_ADAPTER_FUNC_SMBUS;
+ return ret;
}
/*
* Query device for its current operating state.
- *
*/
static int cyapa_get_state(struct cyapa *cyapa)
{
u8 status[BL_STATUS_SIZE];
+ u8 cmd[32];
+ /* The i2c address of gen4 and gen5 trackpad device must be even. */
+ bool even_addr = ((cyapa->client->addr & 0x0001) == 0);
+ bool smbus = false;
+ int retries = 2;
int error;
cyapa->state = CYAPA_STATE_NO_DEVICE;
@@ -433,39 +156,74 @@ static int cyapa_get_state(struct cyapa *cyapa)
*
*/
error = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
- status);
+ status);
/*
* On smbus systems in OP mode, the i2c_reg_read will fail with
* -ETIMEDOUT. In this case, try again using the smbus equivalent
* command. This should return a BL_HEAD indicating CYAPA_STATE_OP.
*/
- if (cyapa->smbus && (error == -ETIMEDOUT || error == -ENXIO))
- error = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
+ if (cyapa->smbus && (error == -ETIMEDOUT || error == -ENXIO)) {
+ if (!even_addr)
+ error = cyapa_read_block(cyapa,
+ CYAPA_CMD_BL_STATUS, status);
+ smbus = true;
+ }
if (error != BL_STATUS_SIZE)
goto error;
- if ((status[REG_OP_STATUS] & OP_STATUS_SRC) == OP_STATUS_SRC) {
- switch (status[REG_OP_STATUS] & OP_STATUS_DEV) {
- case CYAPA_DEV_NORMAL:
- case CYAPA_DEV_BUSY:
- cyapa->state = CYAPA_STATE_OP;
- break;
- default:
- error = -EAGAIN;
- goto error;
+ /*
+ * Detect trackpad protocol based on characteristic registers and bits.
+ */
+ do {
+ cyapa->status[REG_OP_STATUS] = status[REG_OP_STATUS];
+ cyapa->status[REG_BL_STATUS] = status[REG_BL_STATUS];
+ cyapa->status[REG_BL_ERROR] = status[REG_BL_ERROR];
+
+ if (cyapa->gen == CYAPA_GEN_UNKNOWN ||
+ cyapa->gen == CYAPA_GEN3) {
+ error = cyapa_gen3_ops.state_parse(cyapa,
+ status, BL_STATUS_SIZE);
+ if (!error)
+ goto out_detected;
}
- } else {
- if (status[REG_BL_STATUS] & BL_STATUS_BUSY)
- cyapa->state = CYAPA_STATE_BL_BUSY;
- else if (status[REG_BL_ERROR] & BL_ERROR_BOOTLOADING)
- cyapa->state = CYAPA_STATE_BL_ACTIVE;
- else
- cyapa->state = CYAPA_STATE_BL_IDLE;
- }
+ if ((cyapa->gen == CYAPA_GEN_UNKNOWN ||
+ cyapa->gen == CYAPA_GEN5) &&
+ !smbus && even_addr) {
+ error = cyapa_gen5_ops.state_parse(cyapa,
+ status, BL_STATUS_SIZE);
+ if (!error)
+ goto out_detected;
+ }
+
+ /*
+ * Write 0x00 0x00 to trackpad device to force update its
+ * status, then redo the detection again.
+ */
+ if (!smbus) {
+ cmd[0] = 0x00;
+ cmd[1] = 0x00;
+ error = cyapa_i2c_write(cyapa, 0, 2, cmd);
+ if (error)
+ goto error;
+
+ msleep(50);
+
+ error = cyapa_i2c_read(cyapa, BL_HEAD_OFFSET,
+ BL_STATUS_SIZE, status);
+ if (error)
+ goto error;
+ }
+ } while (--retries > 0 && !smbus);
+
+ goto error;
+out_detected:
+ if (cyapa->state <= CYAPA_STATE_BL_BUSY)
+ return -EAGAIN;
return 0;
+
error:
return (error < 0) ? error : -EAGAIN;
}
@@ -482,364 +240,939 @@ error:
* Returns:
* 0 when the device eventually responds with a valid non-busy state.
* -ETIMEDOUT if device never responds (too many -EAGAIN)
- * < 0 other errors
+ * -EAGAIN if bootload is busy, or unknown state.
+ * < 0 other errors
*/
-static int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
+int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
{
int error;
int tries = timeout / 100;
- error = cyapa_get_state(cyapa);
- while ((error || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
- msleep(100);
+ do {
error = cyapa_get_state(cyapa);
- }
- return (error == -EAGAIN || error == -ETIMEDOUT) ? -ETIMEDOUT : error;
-}
+ if (!error && cyapa->state > CYAPA_STATE_BL_BUSY)
+ return 0;
-static int cyapa_bl_deactivate(struct cyapa *cyapa)
-{
- int error;
-
- error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
- bl_deactivate);
- if (error)
- return error;
+ msleep(100);
+ } while (tries--);
- /* wait for bootloader to switch to idle state; should take < 100ms */
- msleep(100);
- error = cyapa_poll_state(cyapa, 500);
- if (error)
- return error;
- if (cyapa->state != CYAPA_STATE_BL_IDLE)
- return -EAGAIN;
- return 0;
+ return (error == -EAGAIN || error == -ETIMEDOUT) ? -ETIMEDOUT : error;
}
/*
- * Exit bootloader
+ * Check if device is operational.
*
- * Send bl_exit command, then wait 50 - 100 ms to let device transition to
- * operational mode. If this is the first time the device's firmware is
- * running, it can take up to 2 seconds to calibrate its sensors. So, poll
- * the device's new state for up to 2 seconds.
+ * An operational device is responding, has exited bootloader, and has
+ * firmware supported by this driver.
*
* Returns:
+ * -ENODEV no device
+ * -EBUSY no device or in bootloader
* -EIO failure while reading from device
- * -EAGAIN device is stuck in bootloader, b/c it has invalid firmware
- * 0 device is supported and in operational mode
+ * -ETIMEDOUT timeout failure for bus idle or bus no response
+ * -EAGAIN device is still in bootloader
+ * if ->state = CYAPA_STATE_BL_IDLE, device has invalid firmware
+ * -EINVAL device is in operational mode, but not supported by this driver
+ * 0 device is supported
*/
-static int cyapa_bl_exit(struct cyapa *cyapa)
+static int cyapa_check_is_operational(struct cyapa *cyapa)
{
int error;
- error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
+ error = cyapa_poll_state(cyapa, 4000);
if (error)
return error;
- /*
- * Wait for bootloader to exit, and operation mode to start.
- * Normally, this takes at least 50 ms.
- */
- usleep_range(50000, 100000);
- /*
- * In addition, when a device boots for the first time after being
- * updated to new firmware, it must first calibrate its sensors, which
- * can take up to an additional 2 seconds.
- */
- error = cyapa_poll_state(cyapa, 2000);
- if (error < 0)
- return error;
- if (cyapa->state != CYAPA_STATE_OP)
- return -EAGAIN;
+ switch (cyapa->gen) {
+ case CYAPA_GEN5:
+ cyapa->ops = &cyapa_gen5_ops;
+ break;
+ case CYAPA_GEN3:
+ cyapa->ops = &cyapa_gen3_ops;
+ break;
+ default:
+ return -ENODEV;
+ }
- return 0;
+ error = cyapa->ops->operational_check(cyapa);
+ if (!error && cyapa_is_operational_mode(cyapa))
+ cyapa->operational = true;
+ else
+ cyapa->operational = false;
+
+ return error;
}
+
/*
- * Set device power mode
- *
+ * Returns 0 on device detected, negative errno on no device detected.
+ * And when the device is detected and opertaional, it will be reset to
+ * full power active mode automatically.
*/
-static int cyapa_set_power_mode(struct cyapa *cyapa, u8 power_mode)
+static int cyapa_detect(struct cyapa *cyapa)
{
struct device *dev = &cyapa->client->dev;
- int ret;
- u8 power;
-
- if (cyapa->state != CYAPA_STATE_OP)
- return 0;
+ int error;
- ret = cyapa_read_byte(cyapa, CYAPA_CMD_POWER_MODE);
- if (ret < 0)
- return ret;
+ error = cyapa_check_is_operational(cyapa);
+ if (error) {
+ if (error != -ETIMEDOUT && error != -ENODEV &&
+ cyapa_is_bootloader_mode(cyapa)) {
+ dev_warn(dev, "device detected but not operational\n");
+ return 0;
+ }
- power = ret & ~PWR_MODE_MASK;
- power |= power_mode & PWR_MODE_MASK;
- ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
- if (ret < 0) {
- dev_err(dev, "failed to set power_mode 0x%02x err = %d\n",
- power_mode, ret);
- return ret;
+ dev_err(dev, "no device detected: %d\n", error);
+ return error;
}
return 0;
}
-static int cyapa_get_query_data(struct cyapa *cyapa)
+static int cyapa_open(struct input_dev *input)
{
- u8 query_data[QUERY_DATA_SIZE];
- int ret;
+ struct cyapa *cyapa = input_get_drvdata(input);
+ struct i2c_client *client = cyapa->client;
+ int error;
- if (cyapa->state != CYAPA_STATE_OP)
- return -EBUSY;
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
- ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_QUERY, query_data);
- if (ret < 0)
- return ret;
- if (ret != QUERY_DATA_SIZE)
- return -EIO;
+ if (cyapa->operational) {
+ /*
+ * though failed to set active power mode,
+ * but still may be able to work in lower scan rate
+ * when in operational mode.
+ */
+ error = cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_FULL_ACTIVE, 0);
+ if (error) {
+ dev_warn(&client->dev,
+ "set active power failed: %d\n", error);
+ goto out;
+ }
+ } else {
+ error = cyapa_reinitialize(cyapa);
+ if (error || !cyapa->operational) {
+ error = error ? error : -EAGAIN;
+ goto out;
+ }
+ }
+
+ enable_irq(client->irq);
+ if (!pm_runtime_enabled(&client->dev)) {
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ }
+out:
+ mutex_unlock(&cyapa->state_sync_lock);
+ return error;
+}
+
+static void cyapa_close(struct input_dev *input)
+{
+ struct cyapa *cyapa = input_get_drvdata(input);
+ struct i2c_client *client = cyapa->client;
+
+ mutex_lock(&cyapa->state_sync_lock);
+
+ disable_irq(client->irq);
+ if (pm_runtime_enabled(&client->dev))
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ if (cyapa->operational)
+ cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
- memcpy(&cyapa->product_id[0], &query_data[0], 5);
- cyapa->product_id[5] = '-';
- memcpy(&cyapa->product_id[6], &query_data[5], 6);
- cyapa->product_id[12] = '-';
- memcpy(&cyapa->product_id[13], &query_data[11], 2);
- cyapa->product_id[15] = '\0';
+ mutex_unlock(&cyapa->state_sync_lock);
+}
- cyapa->btn_capability = query_data[19] & CAPABILITY_BTN_MASK;
+static int cyapa_create_input_dev(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ struct input_dev *input;
+ int error;
- cyapa->gen = query_data[20] & 0x0f;
+ if (!cyapa->physical_size_x || !cyapa->physical_size_y)
+ return -EINVAL;
- cyapa->max_abs_x = ((query_data[21] & 0xf0) << 4) | query_data[22];
- cyapa->max_abs_y = ((query_data[21] & 0x0f) << 8) | query_data[23];
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "failed to allocate memory for input device.\n");
+ return -ENOMEM;
+ }
+
+ input->name = CYAPA_NAME;
+ input->phys = cyapa->phys;
+ input->id.bustype = BUS_I2C;
+ input->id.version = 1;
+ input->id.product = 0; /* Means any product in eventcomm. */
+ input->dev.parent = &cyapa->client->dev;
+
+ input->open = cyapa_open;
+ input->close = cyapa_close;
+
+ input_set_drvdata(input, cyapa);
+
+ __set_bit(EV_ABS, input->evbit);
+
+ /* Finger position */
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
+ 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
+ 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, cyapa->max_z, 0, 0);
+ if (cyapa->gen > CYAPA_GEN3) {
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
+ /*
+ * Orientation is the angle between the vertical axis and
+ * the major axis of the contact ellipse.
+ * The range is -127 to 127.
+ * the positive direction is clockwise form the vertical axis.
+ * If the ellipse of contact degenerates into a circle,
+ * orientation is reported as 0.
+ *
+ * Also, for Gen5 trackpad the accurate of this orientation
+ * value is value + (-30 ~ 30).
+ */
+ input_set_abs_params(input, ABS_MT_ORIENTATION,
+ -127, 127, 0, 0);
+ }
+ if (cyapa->gen >= CYAPA_GEN5) {
+ input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 255, 0, 0);
+ }
+
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ cyapa->max_abs_x / cyapa->physical_size_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ cyapa->max_abs_y / cyapa->physical_size_y);
+
+ if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
+ __set_bit(BTN_LEFT, input->keybit);
+ if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
+ __set_bit(BTN_MIDDLE, input->keybit);
+ if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
+ __set_bit(BTN_RIGHT, input->keybit);
+
+ if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ /* Handle pointer emulation and unused slots in core */
+ error = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
- cyapa->physical_size_x =
- ((query_data[24] & 0xf0) << 4) | query_data[25];
- cyapa->physical_size_y =
- ((query_data[24] & 0x0f) << 8) | query_data[26];
+ /* Register the device in input subsystem */
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "failed to register input device: %d\n", error);
+ return error;
+ }
+ cyapa->input = input;
return 0;
}
+static void cyapa_enable_irq_for_cmd(struct cyapa *cyapa)
+{
+ struct input_dev *input = cyapa->input;
+
+ if (!input || !input->users) {
+ /*
+ * When input is NULL, TP must be in deep sleep mode.
+ * In this mode, later non-power I2C command will always failed
+ * if not bring it out of deep sleep mode firstly,
+ * so must command TP to active mode here.
+ */
+ if (!input || cyapa->operational)
+ cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_FULL_ACTIVE, 0);
+ /* Gen3 always using polling mode for command. */
+ if (cyapa->gen >= CYAPA_GEN5)
+ enable_irq(cyapa->client->irq);
+ }
+}
+
+static void cyapa_disable_irq_for_cmd(struct cyapa *cyapa)
+{
+ struct input_dev *input = cyapa->input;
+
+ if (!input || !input->users) {
+ if (cyapa->gen >= CYAPA_GEN5)
+ disable_irq(cyapa->client->irq);
+ if (!input || cyapa->operational)
+ cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+ }
+}
+
/*
- * Check if device is operational.
+ * cyapa_sleep_time_to_pwr_cmd and cyapa_pwr_cmd_to_sleep_time
*
- * An operational device is responding, has exited bootloader, and has
- * firmware supported by this driver.
+ * These are helper functions that convert to and from integer idle
+ * times and register settings to write to the PowerMode register.
+ * The trackpad supports between 20ms to 1000ms scan intervals.
+ * The time will be increased in increments of 10ms from 20ms to 100ms.
+ * From 100ms to 1000ms, time will be increased in increments of 20ms.
*
- * Returns:
- * -EBUSY no device or in bootloader
- * -EIO failure while reading from device
- * -EAGAIN device is still in bootloader
- * if ->state = CYAPA_STATE_BL_IDLE, device has invalid firmware
- * -EINVAL device is in operational mode, but not supported by this driver
- * 0 device is supported
+ * When Idle_Time < 100, the format to convert Idle_Time to Idle_Command is:
+ * Idle_Command = Idle Time / 10;
+ * When Idle_Time >= 100, the format to convert Idle_Time to Idle_Command is:
+ * Idle_Command = Idle Time / 20 + 5;
*/
-static int cyapa_check_is_operational(struct cyapa *cyapa)
+u8 cyapa_sleep_time_to_pwr_cmd(u16 sleep_time)
{
- struct device *dev = &cyapa->client->dev;
- static const char unique_str[] = "CYTRA";
- int error;
+ u16 encoded_time;
- error = cyapa_poll_state(cyapa, 2000);
+ sleep_time = clamp_val(sleep_time, 20, 1000);
+ encoded_time = sleep_time < 100 ? sleep_time / 10 : sleep_time / 20 + 5;
+ return (encoded_time << 2) & PWR_MODE_MASK;
+}
+
+u16 cyapa_pwr_cmd_to_sleep_time(u8 pwr_mode)
+{
+ u8 encoded_time = pwr_mode >> 2;
+
+ return (encoded_time < 10) ? encoded_time * 10
+ : (encoded_time - 5) * 20;
+}
+
+/* 0 on driver initialize and detected successfully, negative on failure. */
+static int cyapa_initialize(struct cyapa *cyapa)
+{
+ int error = 0;
+
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+ cyapa->gen = CYAPA_GEN_UNKNOWN;
+ mutex_init(&cyapa->state_sync_lock);
+
+ /*
+ * Set to hard code default, they will be updated with trackpad set
+ * default values after probe and initialized.
+ */
+ cyapa->suspend_power_mode = PWR_MODE_SLEEP;
+ cyapa->suspend_sleep_time =
+ cyapa_pwr_cmd_to_sleep_time(cyapa->suspend_power_mode);
+
+ /* ops.initialize() is aimed to prepare for module communications. */
+ error = cyapa_gen3_ops.initialize(cyapa);
+ if (!error)
+ error = cyapa_gen5_ops.initialize(cyapa);
if (error)
return error;
- switch (cyapa->state) {
- case CYAPA_STATE_BL_ACTIVE:
- error = cyapa_bl_deactivate(cyapa);
- if (error)
- return error;
- /* Fallthrough state */
- case CYAPA_STATE_BL_IDLE:
- error = cyapa_bl_exit(cyapa);
- if (error)
- return error;
+ error = cyapa_detect(cyapa);
+ if (error)
+ return error;
- /* Fallthrough state */
- case CYAPA_STATE_OP:
- error = cyapa_get_query_data(cyapa);
- if (error)
- return error;
+ /* Power down the device until we need it. */
+ if (cyapa->operational)
+ cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
- /* only support firmware protocol gen3 */
- if (cyapa->gen != CYAPA_GEN3) {
- dev_err(dev, "unsupported protocol version (%d)",
- cyapa->gen);
- return -EINVAL;
- }
+ return 0;
+}
+
+static int cyapa_reinitialize(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ struct input_dev *input = cyapa->input;
+ int error;
+
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
- /* only support product ID starting with CYTRA */
- if (memcmp(cyapa->product_id, unique_str,
- sizeof(unique_str) - 1) != 0) {
- dev_err(dev, "unsupported product ID (%s)\n",
- cyapa->product_id);
- return -EINVAL;
+ /* Avoid command failures when TP was in OFF state. */
+ if (cyapa->operational)
+ cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+
+ error = cyapa_detect(cyapa);
+ if (error)
+ goto out;
+
+ if (!input && cyapa->operational) {
+ error = cyapa_create_input_dev(cyapa);
+ if (error) {
+ dev_err(dev, "create input_dev instance failed: %d\n",
+ error);
+ goto out;
}
- return 0;
+ }
- default:
- return -EIO;
+out:
+ if (!input || !input->users) {
+ /* Reset to power OFF state to save power when no user open. */
+ if (cyapa->operational)
+ cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+ } else if (!error && cyapa->operational) {
+ /*
+ * Make sure only enable runtime PM when device is
+ * in operational mode and input->users > 0.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
}
- return 0;
+
+ return error;
}
static irqreturn_t cyapa_irq(int irq, void *dev_id)
{
struct cyapa *cyapa = dev_id;
struct device *dev = &cyapa->client->dev;
- struct input_dev *input = cyapa->input;
- struct cyapa_reg_data data;
- int i;
- int ret;
- int num_fingers;
+ pm_runtime_get_sync(dev);
if (device_may_wakeup(dev))
pm_wakeup_event(dev, 0);
- ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
- if (ret != sizeof(data))
- goto out;
+ /* Interrupt event maybe cuased by host command to trackpad device. */
+ if (cyapa->ops->irq_cmd_handler(cyapa)) {
+ /*
+ * Interrupt event maybe from trackpad device input reporting.
+ */
+ if (!cyapa->input) {
+ /*
+ * Still in probling or in firware image
+ * udpating or reading.
+ */
+ cyapa->ops->sort_empty_output_data(cyapa,
+ NULL, NULL, NULL);
+ goto out;
+ }
- if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
- (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
- (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
- goto out;
+ if (!cyapa->operational || cyapa->ops->irq_handler(cyapa)) {
+ if (!mutex_trylock(&cyapa->state_sync_lock)) {
+ cyapa->ops->sort_empty_output_data(cyapa,
+ NULL, NULL, NULL);
+ goto out;
+ }
+ cyapa_reinitialize(cyapa);
+ mutex_unlock(&cyapa->state_sync_lock);
+ }
}
- num_fingers = (data.finger_btn >> 4) & 0x0f;
- for (i = 0; i < num_fingers; i++) {
- const struct cyapa_touch *touch = &data.touches[i];
- /* Note: touch->id range is 1 to 15; slots are 0 to 14. */
- int slot = touch->id - 1;
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync_autosuspend(dev);
+ return IRQ_HANDLED;
+}
+
+/*
+ **************************************************************
+ * sysfs interface
+ **************************************************************
+*/
+#ifdef CONFIG_PM_SLEEP
+static ssize_t cyapa_show_suspend_scanrate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ u8 pwr_cmd = cyapa->suspend_power_mode;
+ u16 sleep_time;
+ int len;
+ int error;
+
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
+
+ pwr_cmd = cyapa->suspend_power_mode;
+ sleep_time = cyapa->suspend_sleep_time;
+
+ mutex_unlock(&cyapa->state_sync_lock);
+
+ switch (pwr_cmd) {
+ case PWR_MODE_BTN_ONLY:
+ len = scnprintf(buf, PAGE_SIZE, "%s\n", BTN_ONLY_MODE_NAME);
+ break;
- input_mt_slot(input, slot);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
- input_report_abs(input, ABS_MT_POSITION_X,
- ((touch->xy_hi & 0xf0) << 4) | touch->x_lo);
- input_report_abs(input, ABS_MT_POSITION_Y,
- ((touch->xy_hi & 0x0f) << 8) | touch->y_lo);
- input_report_abs(input, ABS_MT_PRESSURE, touch->pressure);
+ case PWR_MODE_OFF:
+ len = scnprintf(buf, PAGE_SIZE, "%s\n", OFF_MODE_NAME);
+ break;
+
+ default:
+ len = scnprintf(buf, PAGE_SIZE, "%u\n",
+ cyapa->gen == CYAPA_GEN3 ?
+ cyapa_pwr_cmd_to_sleep_time(pwr_cmd) :
+ sleep_time);
+ break;
}
- input_mt_sync_frame(input);
+ return len;
+}
- if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
- input_report_key(input, BTN_LEFT,
- data.finger_btn & OP_DATA_LEFT_BTN);
+static ssize_t cyapa_update_suspend_scanrate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ u16 sleep_time;
+ int error;
- if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
- input_report_key(input, BTN_MIDDLE,
- data.finger_btn & OP_DATA_MIDDLE_BTN);
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
- if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
- input_report_key(input, BTN_RIGHT,
- data.finger_btn & OP_DATA_RIGHT_BTN);
+ if (sysfs_streq(buf, BTN_ONLY_MODE_NAME)) {
+ cyapa->suspend_power_mode = PWR_MODE_BTN_ONLY;
+ } else if (sysfs_streq(buf, OFF_MODE_NAME)) {
+ cyapa->suspend_power_mode = PWR_MODE_OFF;
+ } else if (!kstrtou16(buf, 10, &sleep_time)) {
+ cyapa->suspend_sleep_time = max_t(u16, sleep_time, 1000);
+ cyapa->suspend_power_mode =
+ cyapa_sleep_time_to_pwr_cmd(cyapa->suspend_sleep_time);
+ } else {
+ count = -EINVAL;
+ }
- input_sync(input);
+ mutex_unlock(&cyapa->state_sync_lock);
-out:
- return IRQ_HANDLED;
+ return count;
}
-static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
+static DEVICE_ATTR(suspend_scanrate_ms, S_IRUGO|S_IWUSR,
+ cyapa_show_suspend_scanrate,
+ cyapa_update_suspend_scanrate);
+
+static struct attribute *cyapa_power_wakeup_entries[] = {
+ &dev_attr_suspend_scanrate_ms.attr,
+ NULL,
+};
+
+static const struct attribute_group cyapa_power_wakeup_group = {
+ .name = power_group_name,
+ .attrs = cyapa_power_wakeup_entries,
+};
+
+static void cyapa_remove_power_wakeup_group(void *data)
{
- u8 ret = CYAPA_ADAPTER_FUNC_NONE;
+ struct cyapa *cyapa = data;
- if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- ret |= CYAPA_ADAPTER_FUNC_I2C;
- if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_BLOCK_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK))
- ret |= CYAPA_ADAPTER_FUNC_SMBUS;
- return ret;
+ sysfs_unmerge_group(&cyapa->client->dev.kobj,
+ &cyapa_power_wakeup_group);
}
-static int cyapa_open(struct input_dev *input)
+static int cyapa_prepare_wakeup_controls(struct cyapa *cyapa)
{
- struct cyapa *cyapa = input_get_drvdata(input);
struct i2c_client *client = cyapa->client;
+ struct device *dev = &client->dev;
+ int error;
+
+ if (device_can_wakeup(dev)) {
+ error = sysfs_merge_group(&client->dev.kobj,
+ &cyapa_power_wakeup_group);
+ if (error) {
+ dev_err(dev, "failed to add power wakeup group: %d\n",
+ error);
+ return error;
+ }
+
+ error = devm_add_action(dev,
+ cyapa_remove_power_wakeup_group, cyapa);
+ if (error) {
+ cyapa_remove_power_wakeup_group(cyapa);
+ dev_err(dev, "failed to add power cleanup action: %d\n",
+ error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+#else
+static inline int cyapa_prepare_wakeup_controls(struct cyapa *cyapa)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static ssize_t cyapa_show_rt_suspend_scanrate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ u8 pwr_cmd;
+ u16 sleep_time;
+ int error;
+
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
+
+ pwr_cmd = cyapa->runtime_suspend_power_mode;
+ sleep_time = cyapa->runtime_suspend_sleep_time;
+
+ mutex_unlock(&cyapa->state_sync_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ cyapa->gen == CYAPA_GEN3 ?
+ cyapa_pwr_cmd_to_sleep_time(pwr_cmd) :
+ sleep_time);
+}
+
+static ssize_t cyapa_update_rt_suspend_scanrate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ u16 time;
+ int error;
+
+ if (buf == NULL || count == 0 || kstrtou16(buf, 10, &time)) {
+ dev_err(dev, "invalid runtime suspend scanrate ms parameter\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When the suspend scanrate is changed, pm_runtime_get to resume
+ * a potentially suspended device, update to the new pwr_cmd
+ * and then pm_runtime_put to suspend into the new power mode.
+ */
+ pm_runtime_get_sync(dev);
+
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
+
+ cyapa->runtime_suspend_sleep_time = max_t(u16, time, 1000);
+ cyapa->runtime_suspend_power_mode =
+ cyapa_sleep_time_to_pwr_cmd(cyapa->runtime_suspend_sleep_time);
+
+ mutex_unlock(&cyapa->state_sync_lock);
+
+ pm_runtime_put_sync_autosuspend(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR(runtime_suspend_scanrate_ms, S_IRUGO|S_IWUSR,
+ cyapa_show_rt_suspend_scanrate,
+ cyapa_update_rt_suspend_scanrate);
+
+static struct attribute *cyapa_power_runtime_entries[] = {
+ &dev_attr_runtime_suspend_scanrate_ms.attr,
+ NULL,
+};
+
+static const struct attribute_group cyapa_power_runtime_group = {
+ .name = power_group_name,
+ .attrs = cyapa_power_runtime_entries,
+};
+
+static void cyapa_remove_power_runtime_group(void *data)
+{
+ struct cyapa *cyapa = data;
+
+ sysfs_unmerge_group(&cyapa->client->dev.kobj,
+ &cyapa_power_runtime_group);
+}
+
+static int cyapa_start_runtime(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
int error;
- error = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ cyapa->runtime_suspend_power_mode = PWR_MODE_IDLE;
+ cyapa->runtime_suspend_sleep_time =
+ cyapa_pwr_cmd_to_sleep_time(cyapa->runtime_suspend_power_mode);
+
+ error = sysfs_merge_group(&dev->kobj, &cyapa_power_runtime_group);
if (error) {
- dev_err(&client->dev, "set active power failed: %d\n", error);
+ dev_err(dev,
+ "failed to create power runtime group: %d\n", error);
return error;
}
- enable_irq(client->irq);
+ error = devm_add_action(dev, cyapa_remove_power_runtime_group, cyapa);
+ if (error) {
+ cyapa_remove_power_runtime_group(cyapa);
+ dev_err(dev,
+ "failed to add power runtime cleanup action: %d\n",
+ error);
+ return error;
+ }
+
+ /* runtime is enabled until device is operational and opened. */
+ pm_runtime_set_suspended(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY);
+
return 0;
}
+#else
+static inline int cyapa_start_runtime(struct cyapa *cyapa)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
-static void cyapa_close(struct input_dev *input)
+static ssize_t cyapa_show_fm_ver(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct cyapa *cyapa = input_get_drvdata(input);
+ int error;
+ struct cyapa *cyapa = dev_get_drvdata(dev);
- disable_irq(cyapa->client->irq);
- cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
+ error = scnprintf(buf, PAGE_SIZE, "%d.%d\n", cyapa->fw_maj_ver,
+ cyapa->fw_min_ver);
+ mutex_unlock(&cyapa->state_sync_lock);
+ return error;
}
-static int cyapa_create_input_dev(struct cyapa *cyapa)
+static ssize_t cyapa_show_product_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int size;
+ int error;
+
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
+ size = scnprintf(buf, PAGE_SIZE, "%s\n", cyapa->product_id);
+ mutex_unlock(&cyapa->state_sync_lock);
+ return size;
+}
+
+static int cyapa_firmware(struct cyapa *cyapa, const char *fw_name)
{
struct device *dev = &cyapa->client->dev;
- struct input_dev *input;
+ const struct firmware *fw;
int error;
- if (!cyapa->physical_size_x || !cyapa->physical_size_y)
- return -EINVAL;
+ error = request_firmware(&fw, fw_name, dev);
+ if (error) {
+ dev_err(dev, "Could not load firmware from %s: %d\n",
+ fw_name, error);
+ return error;
+ }
- input = devm_input_allocate_device(dev);
- if (!input) {
- dev_err(dev, "failed to allocate memory for input device.\n");
- return -ENOMEM;
+ error = cyapa->ops->check_fw(cyapa, fw);
+ if (error) {
+ dev_err(dev, "Invalid CYAPA firmware image: %s\n",
+ fw_name);
+ goto done;
}
- input->name = CYAPA_NAME;
- input->phys = cyapa->phys;
- input->id.bustype = BUS_I2C;
- input->id.version = 1;
- input->id.product = 0; /* Means any product in eventcomm. */
- input->dev.parent = &cyapa->client->dev;
+ /*
+ * Resume the potentially suspended device because doing FW
+ * update on a device not in the FULL mode has a chance to
+ * fail.
+ */
+ pm_runtime_get_sync(dev);
- input->open = cyapa_open;
- input->close = cyapa_close;
+ /* Require IRQ support for firmware update commands. */
+ cyapa_enable_irq_for_cmd(cyapa);
- input_set_drvdata(input, cyapa);
+ error = cyapa->ops->bl_enter(cyapa);
+ if (error) {
+ dev_err(dev, "bl_enter failed, %d\n", error);
+ goto err_detect;
+ }
- __set_bit(EV_ABS, input->evbit);
+ error = cyapa->ops->bl_activate(cyapa);
+ if (error) {
+ dev_err(dev, "bl_activate failed, %d\n", error);
+ goto err_detect;
+ }
- /* Finger position */
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
- 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
- 0);
- input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+ error = cyapa->ops->bl_initiate(cyapa, fw);
+ if (error) {
+ dev_err(dev, "bl_initiate failed, %d\n", error);
+ goto err_detect;
+ }
- input_abs_set_res(input, ABS_MT_POSITION_X,
- cyapa->max_abs_x / cyapa->physical_size_x);
- input_abs_set_res(input, ABS_MT_POSITION_Y,
- cyapa->max_abs_y / cyapa->physical_size_y);
+ error = cyapa->ops->update_fw(cyapa, fw);
+ if (error) {
+ dev_err(dev, "update_fw failed, %d\n", error);
+ goto err_detect;
+ }
- if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
- __set_bit(BTN_LEFT, input->keybit);
- if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
- __set_bit(BTN_MIDDLE, input->keybit);
- if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
- __set_bit(BTN_RIGHT, input->keybit);
+err_detect:
+ cyapa_disable_irq_for_cmd(cyapa);
+ pm_runtime_put_noidle(dev);
- if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+done:
+ release_firmware(fw);
+ return error;
+}
- /* Handle pointer emulation and unused slots in core */
- error = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
- INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+static ssize_t cyapa_update_fw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ char fw_name[NAME_MAX];
+ int ret, error;
+
+ if (count >= NAME_MAX) {
+ dev_err(dev, "File name too long\n");
+ return -EINVAL;
+ }
+
+ memcpy(fw_name, buf, count);
+ if (fw_name[count - 1] == '\n')
+ fw_name[count - 1] = '\0';
+ else
+ fw_name[count] = '\0';
+
+ if (cyapa->input) {
+ /*
+ * Force the input device to be registered after the firmware
+ * image is updated, so if the corresponding parameters updated
+ * in the new firmware image can taken effect immediately.
+ */
+ input_unregister_device(cyapa->input);
+ cyapa->input = NULL;
+ }
+
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
if (error) {
- dev_err(dev, "failed to initialize MT slots: %d\n", error);
+ /*
+ * Whatever, do reinitialize to try to recover TP state to
+ * previous state just as it entered fw update entrance.
+ */
+ cyapa_reinitialize(cyapa);
return error;
}
- cyapa->input = input;
- return 0;
+ error = cyapa_firmware(cyapa, fw_name);
+ if (error)
+ dev_err(dev, "firmware update failed: %d\n", error);
+ else
+ dev_dbg(dev, "firmware update successfully done.\n");
+
+ /*
+ * Redetect trackpad device states because firmware update process
+ * will reset trackpad device into bootloader mode.
+ */
+ ret = cyapa_reinitialize(cyapa);
+ if (ret) {
+ dev_err(dev, "failed to redetect after updated: %d\n", ret);
+ error = error ? error : ret;
+ }
+
+ mutex_unlock(&cyapa->state_sync_lock);
+
+ return error ? error : count;
+}
+
+static ssize_t cyapa_calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int error;
+
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
+
+ if (cyapa->operational) {
+ cyapa_enable_irq_for_cmd(cyapa);
+ error = cyapa->ops->calibrate_store(dev, attr, buf, count);
+ cyapa_disable_irq_for_cmd(cyapa);
+ } else {
+ error = -EBUSY; /* Still running in bootloader mode. */
+ }
+
+ mutex_unlock(&cyapa->state_sync_lock);
+ return error < 0 ? error : count;
+}
+
+static ssize_t cyapa_show_baseline(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ ssize_t error;
+
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
+
+ if (cyapa->operational) {
+ cyapa_enable_irq_for_cmd(cyapa);
+ error = cyapa->ops->show_baseline(dev, attr, buf);
+ cyapa_disable_irq_for_cmd(cyapa);
+ } else {
+ error = -EBUSY; /* Still running in bootloader mode. */
+ }
+
+ mutex_unlock(&cyapa->state_sync_lock);
+ return error;
+}
+
+static char *cyapa_state_to_string(struct cyapa *cyapa)
+{
+ switch (cyapa->state) {
+ case CYAPA_STATE_BL_BUSY:
+ return "bootloader busy";
+ case CYAPA_STATE_BL_IDLE:
+ return "bootloader idle";
+ case CYAPA_STATE_BL_ACTIVE:
+ return "bootloader active";
+ case CYAPA_STATE_GEN5_BL:
+ return "bootloader";
+ case CYAPA_STATE_OP:
+ case CYAPA_STATE_GEN5_APP:
+ return "operational"; /* Normal valid state. */
+ default:
+ return "invalid mode";
+ }
+}
+
+static ssize_t cyapa_show_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int size;
+ int error;
+
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error)
+ return error;
+
+ size = scnprintf(buf, PAGE_SIZE, "gen%d %s\n",
+ cyapa->gen, cyapa_state_to_string(cyapa));
+
+ mutex_unlock(&cyapa->state_sync_lock);
+ return size;
+}
+
+static DEVICE_ATTR(firmware_version, S_IRUGO, cyapa_show_fm_ver, NULL);
+static DEVICE_ATTR(product_id, S_IRUGO, cyapa_show_product_id, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, cyapa_update_fw_store);
+static DEVICE_ATTR(baseline, S_IRUGO, cyapa_show_baseline, NULL);
+static DEVICE_ATTR(calibrate, S_IWUSR, NULL, cyapa_calibrate_store);
+static DEVICE_ATTR(mode, S_IRUGO, cyapa_show_mode, NULL);
+
+static struct attribute *cyapa_sysfs_entries[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_update_fw.attr,
+ &dev_attr_baseline.attr,
+ &dev_attr_calibrate.attr,
+ &dev_attr_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group cyapa_sysfs_group = {
+ .attrs = cyapa_sysfs_entries,
+};
+
+static void cyapa_remove_sysfs_group(void *data)
+{
+ struct cyapa *cyapa = data;
+
+ sysfs_remove_group(&cyapa->client->dev.kobj, &cyapa_sysfs_group);
}
static int cyapa_probe(struct i2c_client *client,
@@ -848,6 +1181,7 @@ static int cyapa_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct cyapa *cyapa;
u8 adapter_func;
+ union i2c_smbus_data dummy;
int error;
adapter_func = cyapa_check_adapter_functionality(client);
@@ -856,38 +1190,54 @@ static int cyapa_probe(struct i2c_client *client,
return -EIO;
}
+ /* Make sure there is something at this address */
+ if (i2c_smbus_xfer(client->adapter, client->addr, 0,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0)
+ return -ENODEV;
+
cyapa = devm_kzalloc(dev, sizeof(struct cyapa), GFP_KERNEL);
if (!cyapa)
return -ENOMEM;
- cyapa->gen = CYAPA_GEN3;
+ /* i2c isn't supported, use smbus */
+ if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
+ cyapa->smbus = true;
+
cyapa->client = client;
i2c_set_clientdata(client, cyapa);
sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
client->addr);
- /* i2c isn't supported, use smbus */
- if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
- cyapa->smbus = true;
+ error = cyapa_initialize(cyapa);
+ if (error) {
+ dev_err(dev, "failed to detect and initialize tp device.\n");
+ return error;
+ }
- cyapa->state = CYAPA_STATE_NO_DEVICE;
+ error = sysfs_create_group(&client->dev.kobj, &cyapa_sysfs_group);
+ if (error) {
+ dev_err(dev, "failed to create sysfs entries: %d\n", error);
+ return error;
+ }
- error = cyapa_check_is_operational(cyapa);
+ error = devm_add_action(dev, cyapa_remove_sysfs_group, cyapa);
if (error) {
- dev_err(dev, "device not operational, %d\n", error);
+ cyapa_remove_sysfs_group(cyapa);
+ dev_err(dev, "failed to add sysfs cleanup action: %d\n", error);
return error;
}
- /* Power down the device until we need it */
- error = cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+ error = cyapa_prepare_wakeup_controls(cyapa);
if (error) {
- dev_err(dev, "failed to quiesce the device: %d\n", error);
+ dev_err(dev, "failed to prepare wakeup controls: %d\n", error);
return error;
}
- error = cyapa_create_input_dev(cyapa);
- if (error)
+ error = cyapa_start_runtime(cyapa);
+ if (error) {
+ dev_err(dev, "failed to start pm_runtime: %d\n", error);
return error;
+ }
error = devm_request_threaded_irq(dev, client->irq,
NULL, cyapa_irq,
@@ -901,11 +1251,18 @@ static int cyapa_probe(struct i2c_client *client,
/* Disable IRQ until the device is opened */
disable_irq(client->irq);
- /* Register the device in input subsystem */
- error = input_register_device(cyapa->input);
- if (error) {
- dev_err(dev, "failed to register input device: %d\n", error);
- return error;
+ /*
+ * Register the device in the input subsystem when it's operational.
+ * Otherwise, keep in this driver, so it can be be recovered or updated
+ * through the sysfs mode and update_fw interfaces by user or apps.
+ */
+ if (cyapa->operational) {
+ error = cyapa_create_input_dev(cyapa);
+ if (error) {
+ dev_err(dev, "create input_dev instance failed: %d\n",
+ error);
+ return error;
+ }
}
return 0;
@@ -915,32 +1272,40 @@ static int __maybe_unused cyapa_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cyapa *cyapa = i2c_get_clientdata(client);
- struct input_dev *input = cyapa->input;
u8 power_mode;
int error;
- error = mutex_lock_interruptible(&input->mutex);
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
if (error)
return error;
+ /*
+ * Runtime PM is enable only when device is in operational mode and
+ * users in use, so need check it before disable it to
+ * avoid unbalance warning.
+ */
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
disable_irq(client->irq);
/*
* Set trackpad device to idle mode if wakeup is allowed,
* otherwise turn off.
*/
- power_mode = device_may_wakeup(dev) ? PWR_MODE_IDLE
- : PWR_MODE_OFF;
- error = cyapa_set_power_mode(cyapa, power_mode);
- if (error)
- dev_err(dev, "resume: set power mode to %d failed: %d\n",
- power_mode, error);
+ if (cyapa->operational) {
+ power_mode = device_may_wakeup(dev) ? cyapa->suspend_power_mode
+ : PWR_MODE_OFF;
+ error = cyapa->ops->set_power_mode(cyapa, power_mode,
+ cyapa->suspend_sleep_time);
+ if (error)
+ dev_err(dev, "suspend set power mode failed: %d\n",
+ error);
+ }
if (device_may_wakeup(dev))
cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
- mutex_unlock(&input->mutex);
-
+ mutex_unlock(&cyapa->state_sync_lock);
return 0;
}
@@ -948,29 +1313,56 @@ static int __maybe_unused cyapa_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cyapa *cyapa = i2c_get_clientdata(client);
- struct input_dev *input = cyapa->input;
- u8 power_mode;
int error;
- mutex_lock(&input->mutex);
+ mutex_lock(&cyapa->state_sync_lock);
- if (device_may_wakeup(dev) && cyapa->irq_wake)
+ if (device_may_wakeup(dev) && cyapa->irq_wake) {
disable_irq_wake(client->irq);
+ cyapa->irq_wake = false;
+ }
- power_mode = input->users ? PWR_MODE_FULL_ACTIVE : PWR_MODE_OFF;
- error = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ /* Update device states and runtime PM states. */
+ error = cyapa_reinitialize(cyapa);
if (error)
- dev_warn(dev, "resume: set power mode to %d failed: %d\n",
- power_mode, error);
+ dev_warn(dev, "failed to reinitialize TP device: %d\n", error);
enable_irq(client->irq);
- mutex_unlock(&input->mutex);
+ mutex_unlock(&cyapa->state_sync_lock);
+ return 0;
+}
+
+static int __maybe_unused cyapa_runtime_suspend(struct device *dev)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int error;
+
+ error = cyapa->ops->set_power_mode(cyapa,
+ cyapa->runtime_suspend_power_mode,
+ cyapa->runtime_suspend_sleep_time);
+ if (error)
+ dev_warn(dev, "runtime suspend failed: %d\n", error);
+
+ return 0;
+}
+
+static int __maybe_unused cyapa_runtime_resume(struct device *dev)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int error;
+
+ error = cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+ if (error)
+ dev_warn(dev, "runtime resume failed: %d\n", error);
return 0;
}
-static SIMPLE_DEV_PM_OPS(cyapa_pm_ops, cyapa_suspend, cyapa_resume);
+static const struct dev_pm_ops cyapa_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cyapa_suspend, cyapa_resume)
+ SET_RUNTIME_PM_OPS(cyapa_runtime_suspend, cyapa_runtime_resume, NULL)
+};
static const struct i2c_device_id cyapa_id_table[] = {
{ "cyapa", 0 },
@@ -978,11 +1370,21 @@ static const struct i2c_device_id cyapa_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, cyapa_id_table);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cyapa_acpi_id[] = {
+ { "CYAP0000", 0 }, /* Gen3 trackpad with 0x67 I2C address. */
+ { "CYAP0001", 0 }, /* Gen5 trackpad with 0x24 I2C address. */
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cyapa_acpi_id);
+#endif
+
static struct i2c_driver cyapa_driver = {
.driver = {
.name = "cyapa",
.owner = THIS_MODULE,
.pm = &cyapa_pm_ops,
+ .acpi_match_table = ACPI_PTR(cyapa_acpi_id),
},
.probe = cyapa_probe,
diff --git a/drivers/input/mouse/cyapa.h b/drivers/input/mouse/cyapa.h
new file mode 100644
index 000000000000..adc9ed5dcb0e
--- /dev/null
+++ b/drivers/input/mouse/cyapa.h
@@ -0,0 +1,301 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ *
+ * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#ifndef _CYAPA_H
+#define _CYAPA_H
+
+#include <linux/firmware.h>
+
+/* APA trackpad firmware generation number. */
+#define CYAPA_GEN_UNKNOWN 0x00 /* unknown protocol. */
+#define CYAPA_GEN3 0x03 /* support MT-protocol B with tracking ID. */
+#define CYAPA_GEN5 0x05 /* support TrueTouch GEN5 trackpad device. */
+
+#define CYAPA_NAME "Cypress APA Trackpad (cyapa)"
+
+/*
+ * Macros for SMBus communication
+ */
+#define SMBUS_READ 0x01
+#define SMBUS_WRITE 0x00
+#define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
+#define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
+#define SMBUS_BYTE_BLOCK_CMD_MASK 0x80
+#define SMBUS_GROUP_BLOCK_CMD_MASK 0x40
+
+/* Commands for read/write registers of Cypress trackpad */
+#define CYAPA_CMD_SOFT_RESET 0x00
+#define CYAPA_CMD_POWER_MODE 0x01
+#define CYAPA_CMD_DEV_STATUS 0x02
+#define CYAPA_CMD_GROUP_DATA 0x03
+#define CYAPA_CMD_GROUP_CMD 0x04
+#define CYAPA_CMD_GROUP_QUERY 0x05
+#define CYAPA_CMD_BL_STATUS 0x06
+#define CYAPA_CMD_BL_HEAD 0x07
+#define CYAPA_CMD_BL_CMD 0x08
+#define CYAPA_CMD_BL_DATA 0x09
+#define CYAPA_CMD_BL_ALL 0x0a
+#define CYAPA_CMD_BLK_PRODUCT_ID 0x0b
+#define CYAPA_CMD_BLK_HEAD 0x0c
+#define CYAPA_CMD_MAX_BASELINE 0x0d
+#define CYAPA_CMD_MIN_BASELINE 0x0e
+
+#define BL_HEAD_OFFSET 0x00
+#define BL_DATA_OFFSET 0x10
+
+#define BL_STATUS_SIZE 3 /* Length of gen3 bootloader status registers */
+#define CYAPA_REG_MAP_SIZE 256
+
+/*
+ * Gen3 Operational Device Status Register
+ *
+ * bit 7: Valid interrupt source
+ * bit 6 - 4: Reserved
+ * bit 3 - 2: Power status
+ * bit 1 - 0: Device status
+ */
+#define REG_OP_STATUS 0x00
+#define OP_STATUS_SRC 0x80
+#define OP_STATUS_POWER 0x0c
+#define OP_STATUS_DEV 0x03
+#define OP_STATUS_MASK (OP_STATUS_SRC | OP_STATUS_POWER | OP_STATUS_DEV)
+
+/*
+ * Operational Finger Count/Button Flags Register
+ *
+ * bit 7 - 4: Number of touched finger
+ * bit 3: Valid data
+ * bit 2: Middle Physical Button
+ * bit 1: Right Physical Button
+ * bit 0: Left physical Button
+ */
+#define REG_OP_DATA1 0x01
+#define OP_DATA_VALID 0x08
+#define OP_DATA_MIDDLE_BTN 0x04
+#define OP_DATA_RIGHT_BTN 0x02
+#define OP_DATA_LEFT_BTN 0x01
+#define OP_DATA_BTN_MASK (OP_DATA_MIDDLE_BTN | OP_DATA_RIGHT_BTN | \
+ OP_DATA_LEFT_BTN)
+
+/*
+ * Write-only command file register used to issue commands and
+ * parameters to the bootloader.
+ * The default value read from it is always 0x00.
+ */
+#define REG_BL_FILE 0x00
+#define BL_FILE 0x00
+
+/*
+ * Bootloader Status Register
+ *
+ * bit 7: Busy
+ * bit 6 - 5: Reserved
+ * bit 4: Bootloader running
+ * bit 3 - 2: Reserved
+ * bit 1: Watchdog Reset
+ * bit 0: Checksum valid
+ */
+#define REG_BL_STATUS 0x01
+#define BL_STATUS_REV_6_5 0x60
+#define BL_STATUS_BUSY 0x80
+#define BL_STATUS_RUNNING 0x10
+#define BL_STATUS_REV_3_2 0x0c
+#define BL_STATUS_WATCHDOG 0x02
+#define BL_STATUS_CSUM_VALID 0x01
+#define BL_STATUS_REV_MASK (BL_STATUS_WATCHDOG | BL_STATUS_REV_3_2 | \
+ BL_STATUS_REV_6_5)
+
+/*
+ * Bootloader Error Register
+ *
+ * bit 7: Invalid
+ * bit 6: Invalid security key
+ * bit 5: Bootloading
+ * bit 4: Command checksum
+ * bit 3: Flash protection error
+ * bit 2: Flash checksum error
+ * bit 1 - 0: Reserved
+ */
+#define REG_BL_ERROR 0x02
+#define BL_ERROR_INVALID 0x80
+#define BL_ERROR_INVALID_KEY 0x40
+#define BL_ERROR_BOOTLOADING 0x20
+#define BL_ERROR_CMD_CSUM 0x10
+#define BL_ERROR_FLASH_PROT 0x08
+#define BL_ERROR_FLASH_CSUM 0x04
+#define BL_ERROR_RESERVED 0x03
+#define BL_ERROR_NO_ERR_IDLE 0x00
+#define BL_ERROR_NO_ERR_ACTIVE (BL_ERROR_BOOTLOADING)
+
+#define CAPABILITY_BTN_SHIFT 3
+#define CAPABILITY_LEFT_BTN_MASK (0x01 << 3)
+#define CAPABILITY_RIGHT_BTN_MASK (0x01 << 4)
+#define CAPABILITY_MIDDLE_BTN_MASK (0x01 << 5)
+#define CAPABILITY_BTN_MASK (CAPABILITY_LEFT_BTN_MASK | \
+ CAPABILITY_RIGHT_BTN_MASK | \
+ CAPABILITY_MIDDLE_BTN_MASK)
+
+#define PWR_MODE_MASK 0xfc
+#define PWR_MODE_FULL_ACTIVE (0x3f << 2)
+#define PWR_MODE_IDLE (0x03 << 2) /* Default rt suspend scanrate: 30ms */
+#define PWR_MODE_SLEEP (0x05 << 2) /* Default suspend scanrate: 50ms */
+#define PWR_MODE_BTN_ONLY (0x01 << 2)
+#define PWR_MODE_OFF (0x00 << 2)
+
+#define PWR_STATUS_MASK 0x0c
+#define PWR_STATUS_ACTIVE (0x03 << 2)
+#define PWR_STATUS_IDLE (0x02 << 2)
+#define PWR_STATUS_BTN_ONLY (0x01 << 2)
+#define PWR_STATUS_OFF (0x00 << 2)
+
+#define AUTOSUSPEND_DELAY 2000 /* unit : ms */
+
+#define UNINIT_SLEEP_TIME 0xFFFF
+#define UNINIT_PWR_MODE 0xFF
+
+#define BTN_ONLY_MODE_NAME "buttononly"
+#define OFF_MODE_NAME "off"
+
+/* The touch.id is used as the MT slot id, thus max MT slot is 15 */
+#define CYAPA_MAX_MT_SLOTS 15
+
+struct cyapa;
+
+typedef bool (*cb_sort)(struct cyapa *, u8 *, int);
+
+struct cyapa_dev_ops {
+ int (*check_fw)(struct cyapa *, const struct firmware *);
+ int (*bl_enter)(struct cyapa *);
+ int (*bl_activate)(struct cyapa *);
+ int (*bl_initiate)(struct cyapa *, const struct firmware *);
+ int (*update_fw)(struct cyapa *, const struct firmware *);
+ int (*bl_deactivate)(struct cyapa *);
+
+ ssize_t (*show_baseline)(struct device *,
+ struct device_attribute *, char *);
+ ssize_t (*calibrate_store)(struct device *,
+ struct device_attribute *, const char *, size_t);
+
+ int (*initialize)(struct cyapa *cyapa);
+
+ int (*state_parse)(struct cyapa *cyapa, u8 *reg_status, int len);
+ int (*operational_check)(struct cyapa *cyapa);
+
+ int (*irq_handler)(struct cyapa *);
+ bool (*irq_cmd_handler)(struct cyapa *);
+ int (*sort_empty_output_data)(struct cyapa *,
+ u8 *, int *, cb_sort);
+
+ int (*set_power_mode)(struct cyapa *, u8, u16);
+};
+
+struct cyapa_gen5_cmd_states {
+ struct mutex cmd_lock;
+ struct completion cmd_ready;
+ atomic_t cmd_issued;
+ u8 in_progress_cmd;
+ bool is_irq_mode;
+
+ cb_sort resp_sort_func;
+ u8 *resp_data;
+ int *resp_len;
+
+ u8 irq_cmd_buf[CYAPA_REG_MAP_SIZE];
+ u8 empty_buf[CYAPA_REG_MAP_SIZE];
+};
+
+union cyapa_cmd_states {
+ struct cyapa_gen5_cmd_states gen5;
+};
+
+enum cyapa_state {
+ CYAPA_STATE_NO_DEVICE,
+ CYAPA_STATE_BL_BUSY,
+ CYAPA_STATE_BL_IDLE,
+ CYAPA_STATE_BL_ACTIVE,
+ CYAPA_STATE_OP,
+ CYAPA_STATE_GEN5_BL,
+ CYAPA_STATE_GEN5_APP,
+};
+
+/* The main device structure */
+struct cyapa {
+ enum cyapa_state state;
+ u8 status[BL_STATUS_SIZE];
+ bool operational; /* true: ready for data reporting; false: not. */
+
+ struct i2c_client *client;
+ struct input_dev *input;
+ char phys[32]; /* Device physical location */
+ bool irq_wake; /* Irq wake is enabled */
+ bool smbus;
+
+ /* power mode settings */
+ u8 suspend_power_mode;
+ u16 suspend_sleep_time;
+ u8 runtime_suspend_power_mode;
+ u16 runtime_suspend_sleep_time;
+ u8 dev_pwr_mode;
+ u16 dev_sleep_time;
+
+ /* Read from query data region. */
+ char product_id[16];
+ u8 fw_maj_ver; /* Firmware major version. */
+ u8 fw_min_ver; /* Firmware minor version. */
+ u8 btn_capability;
+ u8 gen;
+ int max_abs_x;
+ int max_abs_y;
+ int physical_size_x;
+ int physical_size_y;
+
+ /* Used in ttsp and truetouch based trackpad devices. */
+ u8 x_origin; /* X Axis Origin: 0 = left side; 1 = rigth side. */
+ u8 y_origin; /* Y Axis Origin: 0 = top; 1 = bottom. */
+ int electrodes_x; /* Number of electrodes on the X Axis*/
+ int electrodes_y; /* Number of electrodes on the Y Axis*/
+ int electrodes_rx; /* Number of Rx electrodes */
+ int aligned_electrodes_rx; /* 4 aligned */
+ int max_z;
+
+ /*
+ * Used to synchronize the access or update the device state.
+ * And since update firmware and read firmware image process will take
+ * quite long time, maybe more than 10 seconds, so use mutex_lock
+ * to sync and wait other interface and detecting are done or ready.
+ */
+ struct mutex state_sync_lock;
+
+ const struct cyapa_dev_ops *ops;
+
+ union cyapa_cmd_states cmd_states;
+};
+
+
+ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
+ u8 *values);
+ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
+ u8 *values);
+
+ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values);
+
+int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout);
+
+u8 cyapa_sleep_time_to_pwr_cmd(u16 sleep_time);
+u16 cyapa_pwr_cmd_to_sleep_time(u8 pwr_mode);
+
+
+extern const char product_id[];
+extern const struct cyapa_dev_ops cyapa_gen3_ops;
+extern const struct cyapa_dev_ops cyapa_gen5_ops;
+
+#endif
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
new file mode 100644
index 000000000000..77e9d70a986b
--- /dev/null
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -0,0 +1,1247 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ * Further cleanup and restructuring by:
+ * Daniel Kurtz <djkurtz@chromium.org>
+ * Benson Leung <bleung@chromium.org>
+ *
+ * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2012 Google, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/unaligned/access_ok.h>
+#include "cyapa.h"
+
+
+#define GEN3_MAX_FINGERS 5
+#define GEN3_FINGER_NUM(x) (((x) >> 4) & 0x07)
+
+#define BLK_HEAD_BYTES 32
+
+/* Macro for register map group offset. */
+#define PRODUCT_ID_SIZE 16
+#define QUERY_DATA_SIZE 27
+#define REG_PROTOCOL_GEN_QUERY_OFFSET 20
+
+#define REG_OFFSET_DATA_BASE 0x0000
+#define REG_OFFSET_COMMAND_BASE 0x0028
+#define REG_OFFSET_QUERY_BASE 0x002a
+
+#define CYAPA_OFFSET_SOFT_RESET REG_OFFSET_COMMAND_BASE
+#define OP_RECALIBRATION_MASK 0x80
+#define OP_REPORT_BASELINE_MASK 0x40
+#define REG_OFFSET_MAX_BASELINE 0x0026
+#define REG_OFFSET_MIN_BASELINE 0x0027
+
+#define REG_OFFSET_POWER_MODE (REG_OFFSET_COMMAND_BASE + 1)
+#define SET_POWER_MODE_DELAY 10000 /* Unit: us */
+#define SET_POWER_MODE_TRIES 5
+
+#define GEN3_BL_CMD_CHECKSUM_SEED 0xff
+#define GEN3_BL_CMD_INITIATE_BL 0x38
+#define GEN3_BL_CMD_WRITE_BLOCK 0x39
+#define GEN3_BL_CMD_VERIFY_BLOCK 0x3a
+#define GEN3_BL_CMD_TERMINATE_BL 0x3b
+#define GEN3_BL_CMD_LAUNCH_APP 0xa5
+
+/*
+ * CYAPA trackpad device states.
+ * Used in register 0x00, bit1-0, DeviceStatus field.
+ * Other values indicate device is in an abnormal state and must be reset.
+ */
+#define CYAPA_DEV_NORMAL 0x03
+#define CYAPA_DEV_BUSY 0x01
+
+#define CYAPA_FW_BLOCK_SIZE 64
+#define CYAPA_FW_READ_SIZE 16
+#define CYAPA_FW_HDR_START 0x0780
+#define CYAPA_FW_HDR_BLOCK_COUNT 2
+#define CYAPA_FW_HDR_BLOCK_START (CYAPA_FW_HDR_START / CYAPA_FW_BLOCK_SIZE)
+#define CYAPA_FW_HDR_SIZE (CYAPA_FW_HDR_BLOCK_COUNT * \
+ CYAPA_FW_BLOCK_SIZE)
+#define CYAPA_FW_DATA_START 0x0800
+#define CYAPA_FW_DATA_BLOCK_COUNT 480
+#define CYAPA_FW_DATA_BLOCK_START (CYAPA_FW_DATA_START / CYAPA_FW_BLOCK_SIZE)
+#define CYAPA_FW_DATA_SIZE (CYAPA_FW_DATA_BLOCK_COUNT * \
+ CYAPA_FW_BLOCK_SIZE)
+#define CYAPA_FW_SIZE (CYAPA_FW_HDR_SIZE + CYAPA_FW_DATA_SIZE)
+#define CYAPA_CMD_LEN 16
+
+#define GEN3_BL_IDLE_FW_MAJ_VER_OFFSET 0x0b
+#define GEN3_BL_IDLE_FW_MIN_VER_OFFSET (GEN3_BL_IDLE_FW_MAJ_VER_OFFSET + 1)
+
+
+struct cyapa_touch {
+ /*
+ * high bits or x/y position value
+ * bit 7 - 4: high 4 bits of x position value
+ * bit 3 - 0: high 4 bits of y position value
+ */
+ u8 xy_hi;
+ u8 x_lo; /* low 8 bits of x position value. */
+ u8 y_lo; /* low 8 bits of y position value. */
+ u8 pressure;
+ /* id range is 1 - 15. It is incremented with every new touch. */
+ u8 id;
+} __packed;
+
+struct cyapa_reg_data {
+ /*
+ * bit 0 - 1: device status
+ * bit 3 - 2: power mode
+ * bit 6 - 4: reserved
+ * bit 7: interrupt valid bit
+ */
+ u8 device_status;
+ /*
+ * bit 7 - 4: number of fingers currently touching pad
+ * bit 3: valid data check bit
+ * bit 2: middle mechanism button state if exists
+ * bit 1: right mechanism button state if exists
+ * bit 0: left mechanism button state if exists
+ */
+ u8 finger_btn;
+ /* CYAPA reports up to 5 touches per packet. */
+ struct cyapa_touch touches[5];
+} __packed;
+
+struct gen3_write_block_cmd {
+ u8 checksum_seed; /* Always be 0xff */
+ u8 cmd_code; /* command code: 0x39 */
+ u8 key[8]; /* 8-byte security key */
+ __be16 block_num;
+ u8 block_data[CYAPA_FW_BLOCK_SIZE];
+ u8 block_checksum; /* Calculated using bytes 12 - 75 */
+ u8 cmd_checksum; /* Calculated using bytes 0-76 */
+} __packed;
+
+static const u8 security_key[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
+static const u8 bl_activate[] = { 0x00, 0xff, 0x38, 0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07 };
+static const u8 bl_deactivate[] = { 0x00, 0xff, 0x3b, 0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07 };
+static const u8 bl_exit[] = { 0x00, 0xff, 0xa5, 0x00, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07 };
+
+
+ /* for byte read/write command */
+#define CMD_RESET 0
+#define CMD_POWER_MODE 1
+#define CMD_DEV_STATUS 2
+#define CMD_REPORT_MAX_BASELINE 3
+#define CMD_REPORT_MIN_BASELINE 4
+#define SMBUS_BYTE_CMD(cmd) (((cmd) & 0x3f) << 1)
+#define CYAPA_SMBUS_RESET SMBUS_BYTE_CMD(CMD_RESET)
+#define CYAPA_SMBUS_POWER_MODE SMBUS_BYTE_CMD(CMD_POWER_MODE)
+#define CYAPA_SMBUS_DEV_STATUS SMBUS_BYTE_CMD(CMD_DEV_STATUS)
+#define CYAPA_SMBUS_MAX_BASELINE SMBUS_BYTE_CMD(CMD_REPORT_MAX_BASELINE)
+#define CYAPA_SMBUS_MIN_BASELINE SMBUS_BYTE_CMD(CMD_REPORT_MIN_BASELINE)
+
+ /* for group registers read/write command */
+#define REG_GROUP_DATA 0
+#define REG_GROUP_CMD 2
+#define REG_GROUP_QUERY 3
+#define SMBUS_GROUP_CMD(grp) (0x80 | (((grp) & 0x07) << 3))
+#define CYAPA_SMBUS_GROUP_DATA SMBUS_GROUP_CMD(REG_GROUP_DATA)
+#define CYAPA_SMBUS_GROUP_CMD SMBUS_GROUP_CMD(REG_GROUP_CMD)
+#define CYAPA_SMBUS_GROUP_QUERY SMBUS_GROUP_CMD(REG_GROUP_QUERY)
+
+ /* for register block read/write command */
+#define CMD_BL_STATUS 0
+#define CMD_BL_HEAD 1
+#define CMD_BL_CMD 2
+#define CMD_BL_DATA 3
+#define CMD_BL_ALL 4
+#define CMD_BLK_PRODUCT_ID 5
+#define CMD_BLK_HEAD 6
+#define SMBUS_BLOCK_CMD(cmd) (0xc0 | (((cmd) & 0x1f) << 1))
+
+/* register block read/write command in bootloader mode */
+#define CYAPA_SMBUS_BL_STATUS SMBUS_BLOCK_CMD(CMD_BL_STATUS)
+#define CYAPA_SMBUS_BL_HEAD SMBUS_BLOCK_CMD(CMD_BL_HEAD)
+#define CYAPA_SMBUS_BL_CMD SMBUS_BLOCK_CMD(CMD_BL_CMD)
+#define CYAPA_SMBUS_BL_DATA SMBUS_BLOCK_CMD(CMD_BL_DATA)
+#define CYAPA_SMBUS_BL_ALL SMBUS_BLOCK_CMD(CMD_BL_ALL)
+
+/* register block read/write command in operational mode */
+#define CYAPA_SMBUS_BLK_PRODUCT_ID SMBUS_BLOCK_CMD(CMD_BLK_PRODUCT_ID)
+#define CYAPA_SMBUS_BLK_HEAD SMBUS_BLOCK_CMD(CMD_BLK_HEAD)
+
+ /* for byte read/write command */
+#define CMD_RESET 0
+#define CMD_POWER_MODE 1
+#define CMD_DEV_STATUS 2
+#define CMD_REPORT_MAX_BASELINE 3
+#define CMD_REPORT_MIN_BASELINE 4
+#define SMBUS_BYTE_CMD(cmd) (((cmd) & 0x3f) << 1)
+#define CYAPA_SMBUS_RESET SMBUS_BYTE_CMD(CMD_RESET)
+#define CYAPA_SMBUS_POWER_MODE SMBUS_BYTE_CMD(CMD_POWER_MODE)
+#define CYAPA_SMBUS_DEV_STATUS SMBUS_BYTE_CMD(CMD_DEV_STATUS)
+#define CYAPA_SMBUS_MAX_BASELINE SMBUS_BYTE_CMD(CMD_REPORT_MAX_BASELINE)
+#define CYAPA_SMBUS_MIN_BASELINE SMBUS_BYTE_CMD(CMD_REPORT_MIN_BASELINE)
+
+ /* for group registers read/write command */
+#define REG_GROUP_DATA 0
+#define REG_GROUP_CMD 2
+#define REG_GROUP_QUERY 3
+#define SMBUS_GROUP_CMD(grp) (0x80 | (((grp) & 0x07) << 3))
+#define CYAPA_SMBUS_GROUP_DATA SMBUS_GROUP_CMD(REG_GROUP_DATA)
+#define CYAPA_SMBUS_GROUP_CMD SMBUS_GROUP_CMD(REG_GROUP_CMD)
+#define CYAPA_SMBUS_GROUP_QUERY SMBUS_GROUP_CMD(REG_GROUP_QUERY)
+
+ /* for register block read/write command */
+#define CMD_BL_STATUS 0
+#define CMD_BL_HEAD 1
+#define CMD_BL_CMD 2
+#define CMD_BL_DATA 3
+#define CMD_BL_ALL 4
+#define CMD_BLK_PRODUCT_ID 5
+#define CMD_BLK_HEAD 6
+#define SMBUS_BLOCK_CMD(cmd) (0xc0 | (((cmd) & 0x1f) << 1))
+
+/* register block read/write command in bootloader mode */
+#define CYAPA_SMBUS_BL_STATUS SMBUS_BLOCK_CMD(CMD_BL_STATUS)
+#define CYAPA_SMBUS_BL_HEAD SMBUS_BLOCK_CMD(CMD_BL_HEAD)
+#define CYAPA_SMBUS_BL_CMD SMBUS_BLOCK_CMD(CMD_BL_CMD)
+#define CYAPA_SMBUS_BL_DATA SMBUS_BLOCK_CMD(CMD_BL_DATA)
+#define CYAPA_SMBUS_BL_ALL SMBUS_BLOCK_CMD(CMD_BL_ALL)
+
+/* register block read/write command in operational mode */
+#define CYAPA_SMBUS_BLK_PRODUCT_ID SMBUS_BLOCK_CMD(CMD_BLK_PRODUCT_ID)
+#define CYAPA_SMBUS_BLK_HEAD SMBUS_BLOCK_CMD(CMD_BLK_HEAD)
+
+struct cyapa_cmd_len {
+ u8 cmd;
+ u8 len;
+};
+
+/* maps generic CYAPA_CMD_* code to the I2C equivalent */
+static const struct cyapa_cmd_len cyapa_i2c_cmds[] = {
+ { CYAPA_OFFSET_SOFT_RESET, 1 }, /* CYAPA_CMD_SOFT_RESET */
+ { REG_OFFSET_COMMAND_BASE + 1, 1 }, /* CYAPA_CMD_POWER_MODE */
+ { REG_OFFSET_DATA_BASE, 1 }, /* CYAPA_CMD_DEV_STATUS */
+ { REG_OFFSET_DATA_BASE, sizeof(struct cyapa_reg_data) },
+ /* CYAPA_CMD_GROUP_DATA */
+ { REG_OFFSET_COMMAND_BASE, 0 }, /* CYAPA_CMD_GROUP_CMD */
+ { REG_OFFSET_QUERY_BASE, QUERY_DATA_SIZE }, /* CYAPA_CMD_GROUP_QUERY */
+ { BL_HEAD_OFFSET, 3 }, /* CYAPA_CMD_BL_STATUS */
+ { BL_HEAD_OFFSET, 16 }, /* CYAPA_CMD_BL_HEAD */
+ { BL_HEAD_OFFSET, 16 }, /* CYAPA_CMD_BL_CMD */
+ { BL_DATA_OFFSET, 16 }, /* CYAPA_CMD_BL_DATA */
+ { BL_HEAD_OFFSET, 32 }, /* CYAPA_CMD_BL_ALL */
+ { REG_OFFSET_QUERY_BASE, PRODUCT_ID_SIZE },
+ /* CYAPA_CMD_BLK_PRODUCT_ID */
+ { REG_OFFSET_DATA_BASE, 32 }, /* CYAPA_CMD_BLK_HEAD */
+ { REG_OFFSET_MAX_BASELINE, 1 }, /* CYAPA_CMD_MAX_BASELINE */
+ { REG_OFFSET_MIN_BASELINE, 1 }, /* CYAPA_CMD_MIN_BASELINE */
+};
+
+static const struct cyapa_cmd_len cyapa_smbus_cmds[] = {
+ { CYAPA_SMBUS_RESET, 1 }, /* CYAPA_CMD_SOFT_RESET */
+ { CYAPA_SMBUS_POWER_MODE, 1 }, /* CYAPA_CMD_POWER_MODE */
+ { CYAPA_SMBUS_DEV_STATUS, 1 }, /* CYAPA_CMD_DEV_STATUS */
+ { CYAPA_SMBUS_GROUP_DATA, sizeof(struct cyapa_reg_data) },
+ /* CYAPA_CMD_GROUP_DATA */
+ { CYAPA_SMBUS_GROUP_CMD, 2 }, /* CYAPA_CMD_GROUP_CMD */
+ { CYAPA_SMBUS_GROUP_QUERY, QUERY_DATA_SIZE },
+ /* CYAPA_CMD_GROUP_QUERY */
+ { CYAPA_SMBUS_BL_STATUS, 3 }, /* CYAPA_CMD_BL_STATUS */
+ { CYAPA_SMBUS_BL_HEAD, 16 }, /* CYAPA_CMD_BL_HEAD */
+ { CYAPA_SMBUS_BL_CMD, 16 }, /* CYAPA_CMD_BL_CMD */
+ { CYAPA_SMBUS_BL_DATA, 16 }, /* CYAPA_CMD_BL_DATA */
+ { CYAPA_SMBUS_BL_ALL, 32 }, /* CYAPA_CMD_BL_ALL */
+ { CYAPA_SMBUS_BLK_PRODUCT_ID, PRODUCT_ID_SIZE },
+ /* CYAPA_CMD_BLK_PRODUCT_ID */
+ { CYAPA_SMBUS_BLK_HEAD, 16 }, /* CYAPA_CMD_BLK_HEAD */
+ { CYAPA_SMBUS_MAX_BASELINE, 1 }, /* CYAPA_CMD_MAX_BASELINE */
+ { CYAPA_SMBUS_MIN_BASELINE, 1 }, /* CYAPA_CMD_MIN_BASELINE */
+};
+
+
+/*
+ * cyapa_smbus_read_block - perform smbus block read command
+ * @cyapa - private data structure of the driver
+ * @cmd - the properly encoded smbus command
+ * @len - expected length of smbus command result
+ * @values - buffer to store smbus command result
+ *
+ * Returns negative errno, else the number of bytes written.
+ *
+ * Note:
+ * In trackpad device, the memory block allocated for I2C register map
+ * is 256 bytes, so the max read block for I2C bus is 256 bytes.
+ */
+ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
+ u8 *values)
+{
+ ssize_t ret;
+ u8 index;
+ u8 smbus_cmd;
+ u8 *buf;
+ struct i2c_client *client = cyapa->client;
+
+ if (!(SMBUS_BYTE_BLOCK_CMD_MASK & cmd))
+ return -EINVAL;
+
+ if (SMBUS_GROUP_BLOCK_CMD_MASK & cmd) {
+ /* read specific block registers command. */
+ smbus_cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
+ ret = i2c_smbus_read_block_data(client, smbus_cmd, values);
+ goto out;
+ }
+
+ ret = 0;
+ for (index = 0; index * I2C_SMBUS_BLOCK_MAX < len; index++) {
+ smbus_cmd = SMBUS_ENCODE_IDX(cmd, index);
+ smbus_cmd = SMBUS_ENCODE_RW(smbus_cmd, SMBUS_READ);
+ buf = values + I2C_SMBUS_BLOCK_MAX * index;
+ ret = i2c_smbus_read_block_data(client, smbus_cmd, buf);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ return ret > 0 ? len : ret;
+}
+
+static s32 cyapa_read_byte(struct cyapa *cyapa, u8 cmd_idx)
+{
+ u8 cmd;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
+ } else {
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ }
+ return i2c_smbus_read_byte_data(cyapa->client, cmd);
+}
+
+static s32 cyapa_write_byte(struct cyapa *cyapa, u8 cmd_idx, u8 value)
+{
+ u8 cmd;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ cmd = SMBUS_ENCODE_RW(cmd, SMBUS_WRITE);
+ } else {
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ }
+ return i2c_smbus_write_byte_data(cyapa->client, cmd, value);
+}
+
+ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
+ u8 *values)
+{
+ return i2c_smbus_read_i2c_block_data(cyapa->client, reg, len, values);
+}
+
+static ssize_t cyapa_i2c_reg_write_block(struct cyapa *cyapa, u8 reg,
+ size_t len, const u8 *values)
+{
+ return i2c_smbus_write_i2c_block_data(cyapa->client, reg, len, values);
+}
+
+ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
+{
+ u8 cmd;
+ size_t len;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ len = cyapa_smbus_cmds[cmd_idx].len;
+ return cyapa_smbus_read_block(cyapa, cmd, len, values);
+ }
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ len = cyapa_i2c_cmds[cmd_idx].len;
+ return cyapa_i2c_reg_read_block(cyapa, cmd, len, values);
+}
+
+/*
+ * Determine the Gen3 trackpad device's current operating state.
+ *
+ */
+static int cyapa_gen3_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
+{
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+ /* Parse based on Gen3 characteristic registers and bits */
+ if (reg_data[REG_BL_FILE] == BL_FILE &&
+ reg_data[REG_BL_ERROR] == BL_ERROR_NO_ERR_IDLE &&
+ (reg_data[REG_BL_STATUS] ==
+ (BL_STATUS_RUNNING | BL_STATUS_CSUM_VALID) ||
+ reg_data[REG_BL_STATUS] == BL_STATUS_RUNNING)) {
+ /*
+ * Normal state after power on or reset,
+ * REG_BL_STATUS == 0x11, firmware image checksum is valid.
+ * REG_BL_STATUS == 0x10, firmware image checksum is invalid.
+ */
+ cyapa->gen = CYAPA_GEN3;
+ cyapa->state = CYAPA_STATE_BL_IDLE;
+ } else if (reg_data[REG_BL_FILE] == BL_FILE &&
+ (reg_data[REG_BL_STATUS] & BL_STATUS_RUNNING) ==
+ BL_STATUS_RUNNING) {
+ cyapa->gen = CYAPA_GEN3;
+ if (reg_data[REG_BL_STATUS] & BL_STATUS_BUSY) {
+ cyapa->state = CYAPA_STATE_BL_BUSY;
+ } else {
+ if ((reg_data[REG_BL_ERROR] & BL_ERROR_BOOTLOADING) ==
+ BL_ERROR_BOOTLOADING)
+ cyapa->state = CYAPA_STATE_BL_ACTIVE;
+ else
+ cyapa->state = CYAPA_STATE_BL_IDLE;
+ }
+ } else if ((reg_data[REG_OP_STATUS] & OP_STATUS_SRC) &&
+ (reg_data[REG_OP_DATA1] & OP_DATA_VALID)) {
+ /*
+ * Normal state when running in operational mode,
+ * may also not in full power state or
+ * busying in command process.
+ */
+ if (GEN3_FINGER_NUM(reg_data[REG_OP_DATA1]) <=
+ GEN3_MAX_FINGERS) {
+ /* Finger number data is valid. */
+ cyapa->gen = CYAPA_GEN3;
+ cyapa->state = CYAPA_STATE_OP;
+ }
+ } else if (reg_data[REG_OP_STATUS] == 0x0C &&
+ reg_data[REG_OP_DATA1] == 0x08) {
+ /* Op state when first two registers overwritten with 0x00 */
+ cyapa->gen = CYAPA_GEN3;
+ cyapa->state = CYAPA_STATE_OP;
+ } else if (reg_data[REG_BL_STATUS] &
+ (BL_STATUS_RUNNING | BL_STATUS_BUSY)) {
+ cyapa->gen = CYAPA_GEN3;
+ cyapa->state = CYAPA_STATE_BL_BUSY;
+ }
+
+ if (cyapa->gen == CYAPA_GEN3 && (cyapa->state == CYAPA_STATE_OP ||
+ cyapa->state == CYAPA_STATE_BL_IDLE ||
+ cyapa->state == CYAPA_STATE_BL_ACTIVE ||
+ cyapa->state == CYAPA_STATE_BL_BUSY))
+ return 0;
+
+ return -EAGAIN;
+}
+
+/*
+ * Enter bootloader by soft resetting the device.
+ *
+ * If device is already in the bootloader, the function just returns.
+ * Otherwise, reset the device; after reset, device enters bootloader idle
+ * state immediately.
+ *
+ * Returns:
+ * 0 on success
+ * -EAGAIN device was reset, but is not now in bootloader idle state
+ * < 0 if the device never responds within the timeout
+ */
+static int cyapa_gen3_bl_enter(struct cyapa *cyapa)
+{
+ int error;
+ int waiting_time;
+
+ error = cyapa_poll_state(cyapa, 500);
+ if (error)
+ return error;
+ if (cyapa->state == CYAPA_STATE_BL_IDLE) {
+ /* Already in BL_IDLE. Skipping reset. */
+ return 0;
+ }
+
+ if (cyapa->state != CYAPA_STATE_OP)
+ return -EAGAIN;
+
+ cyapa->operational = false;
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+ error = cyapa_write_byte(cyapa, CYAPA_CMD_SOFT_RESET, 0x01);
+ if (error)
+ return -EIO;
+
+ usleep_range(25000, 50000);
+ waiting_time = 2000; /* For some shipset, max waiting time is 1~2s. */
+ do {
+ error = cyapa_poll_state(cyapa, 500);
+ if (error) {
+ if (error == -ETIMEDOUT) {
+ waiting_time -= 500;
+ continue;
+ }
+ return error;
+ }
+
+ if ((cyapa->state == CYAPA_STATE_BL_IDLE) &&
+ !(cyapa->status[REG_BL_STATUS] & BL_STATUS_WATCHDOG))
+ break;
+
+ msleep(100);
+ waiting_time -= 100;
+ } while (waiting_time > 0);
+
+ if ((cyapa->state != CYAPA_STATE_BL_IDLE) ||
+ (cyapa->status[REG_BL_STATUS] & BL_STATUS_WATCHDOG))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int cyapa_gen3_bl_activate(struct cyapa *cyapa)
+{
+ int error;
+
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_activate),
+ bl_activate);
+ if (error)
+ return error;
+
+ /* Wait for bootloader to activate; takes between 2 and 12 seconds */
+ msleep(2000);
+ error = cyapa_poll_state(cyapa, 11000);
+ if (error)
+ return error;
+ if (cyapa->state != CYAPA_STATE_BL_ACTIVE)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int cyapa_gen3_bl_deactivate(struct cyapa *cyapa)
+{
+ int error;
+
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
+ bl_deactivate);
+ if (error)
+ return error;
+
+ /* Wait for bootloader to switch to idle state; should take < 100ms */
+ msleep(100);
+ error = cyapa_poll_state(cyapa, 500);
+ if (error)
+ return error;
+ if (cyapa->state != CYAPA_STATE_BL_IDLE)
+ return -EAGAIN;
+ return 0;
+}
+
+/*
+ * Exit bootloader
+ *
+ * Send bl_exit command, then wait 50 - 100 ms to let device transition to
+ * operational mode. If this is the first time the device's firmware is
+ * running, it can take up to 2 seconds to calibrate its sensors. So, poll
+ * the device's new state for up to 2 seconds.
+ *
+ * Returns:
+ * -EIO failure while reading from device
+ * -EAGAIN device is stuck in bootloader, b/c it has invalid firmware
+ * 0 device is supported and in operational mode
+ */
+static int cyapa_gen3_bl_exit(struct cyapa *cyapa)
+{
+ int error;
+
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
+ if (error)
+ return error;
+
+ /*
+ * Wait for bootloader to exit, and operation mode to start.
+ * Normally, this takes at least 50 ms.
+ */
+ usleep_range(50000, 100000);
+ /*
+ * In addition, when a device boots for the first time after being
+ * updated to new firmware, it must first calibrate its sensors, which
+ * can take up to an additional 2 seconds. If the device power is
+ * running low, this may take even longer.
+ */
+ error = cyapa_poll_state(cyapa, 4000);
+ if (error < 0)
+ return error;
+ if (cyapa->state != CYAPA_STATE_OP)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static u16 cyapa_gen3_csum(const u8 *buf, size_t count)
+{
+ int i;
+ u16 csum = 0;
+
+ for (i = 0; i < count; i++)
+ csum += buf[i];
+
+ return csum;
+}
+
+/*
+ * Verify the integrity of a CYAPA firmware image file.
+ *
+ * The firmware image file is 30848 bytes, composed of 482 64-byte blocks.
+ *
+ * The first 2 blocks are the firmware header.
+ * The next 480 blocks are the firmware image.
+ *
+ * The first two bytes of the header hold the header checksum, computed by
+ * summing the other 126 bytes of the header.
+ * The last two bytes of the header hold the firmware image checksum, computed
+ * by summing the 30720 bytes of the image modulo 0xffff.
+ *
+ * Both checksums are stored little-endian.
+ */
+static int cyapa_gen3_check_fw(struct cyapa *cyapa, const struct firmware *fw)
+{
+ struct device *dev = &cyapa->client->dev;
+ u16 csum;
+ u16 csum_expected;
+
+ /* Firmware must match exact 30848 bytes = 482 64-byte blocks. */
+ if (fw->size != CYAPA_FW_SIZE) {
+ dev_err(dev, "invalid firmware size = %zu, expected %u.\n",
+ fw->size, CYAPA_FW_SIZE);
+ return -EINVAL;
+ }
+
+ /* Verify header block */
+ csum_expected = (fw->data[0] << 8) | fw->data[1];
+ csum = cyapa_gen3_csum(&fw->data[2], CYAPA_FW_HDR_SIZE - 2);
+ if (csum != csum_expected) {
+ dev_err(dev, "%s %04x, expected: %04x\n",
+ "invalid firmware header checksum = ",
+ csum, csum_expected);
+ return -EINVAL;
+ }
+
+ /* Verify firmware image */
+ csum_expected = (fw->data[CYAPA_FW_HDR_SIZE - 2] << 8) |
+ fw->data[CYAPA_FW_HDR_SIZE - 1];
+ csum = cyapa_gen3_csum(&fw->data[CYAPA_FW_HDR_SIZE],
+ CYAPA_FW_DATA_SIZE);
+ if (csum != csum_expected) {
+ dev_err(dev, "%s %04x, expected: %04x\n",
+ "invalid firmware header checksum = ",
+ csum, csum_expected);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Write a |len| byte long buffer |buf| to the device, by chopping it up into a
+ * sequence of smaller |CYAPA_CMD_LEN|-length write commands.
+ *
+ * The data bytes for a write command are prepended with the 1-byte offset
+ * of the data relative to the start of |buf|.
+ */
+static int cyapa_gen3_write_buffer(struct cyapa *cyapa,
+ const u8 *buf, size_t len)
+{
+ int error;
+ size_t i;
+ unsigned char cmd[CYAPA_CMD_LEN + 1];
+ size_t cmd_len;
+
+ for (i = 0; i < len; i += CYAPA_CMD_LEN) {
+ const u8 *payload = &buf[i];
+
+ cmd_len = (len - i >= CYAPA_CMD_LEN) ? CYAPA_CMD_LEN : len - i;
+ cmd[0] = i;
+ memcpy(&cmd[1], payload, cmd_len);
+
+ error = cyapa_i2c_reg_write_block(cyapa, 0, cmd_len + 1, cmd);
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+/*
+ * A firmware block write command writes 64 bytes of data to a single flash
+ * page in the device. The 78-byte block write command has the format:
+ * <0xff> <CMD> <Key> <Start> <Data> <Data-Checksum> <CMD Checksum>
+ *
+ * <0xff> - every command starts with 0xff
+ * <CMD> - the write command value is 0x39
+ * <Key> - write commands include an 8-byte key: { 00 01 02 03 04 05 06 07 }
+ * <Block> - Memory Block number (address / 64) (16-bit, big-endian)
+ * <Data> - 64 bytes of firmware image data
+ * <Data Checksum> - sum of 64 <Data> bytes, modulo 0xff
+ * <CMD Checksum> - sum of 77 bytes, from 0xff to <Data Checksum>
+ *
+ * Each write command is split into 5 i2c write transactions of up to 16 bytes.
+ * Each transaction starts with an i2c register offset: (00, 10, 20, 30, 40).
+ */
+static int cyapa_gen3_write_fw_block(struct cyapa *cyapa,
+ u16 block, const u8 *data)
+{
+ int ret;
+ struct gen3_write_block_cmd write_block_cmd;
+ u8 status[BL_STATUS_SIZE];
+ int tries;
+ u8 bl_status, bl_error;
+
+ /* Set write command and security key bytes. */
+ write_block_cmd.checksum_seed = GEN3_BL_CMD_CHECKSUM_SEED;
+ write_block_cmd.cmd_code = GEN3_BL_CMD_WRITE_BLOCK;
+ memcpy(write_block_cmd.key, security_key, sizeof(security_key));
+ put_unaligned_be16(block, &write_block_cmd.block_num);
+ memcpy(write_block_cmd.block_data, data, CYAPA_FW_BLOCK_SIZE);
+ write_block_cmd.block_checksum = cyapa_gen3_csum(
+ write_block_cmd.block_data, CYAPA_FW_BLOCK_SIZE);
+ write_block_cmd.cmd_checksum = cyapa_gen3_csum((u8 *)&write_block_cmd,
+ sizeof(write_block_cmd) - 1);
+
+ ret = cyapa_gen3_write_buffer(cyapa, (u8 *)&write_block_cmd,
+ sizeof(write_block_cmd));
+ if (ret)
+ return ret;
+
+ /* Wait for write to finish */
+ tries = 11; /* Programming for one block can take about 100ms. */
+ do {
+ usleep_range(10000, 20000);
+
+ /* Check block write command result status. */
+ ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET,
+ BL_STATUS_SIZE, status);
+ if (ret != BL_STATUS_SIZE)
+ return (ret < 0) ? ret : -EIO;
+ } while ((status[REG_BL_STATUS] & BL_STATUS_BUSY) && --tries);
+
+ /* Ignore WATCHDOG bit and reserved bits. */
+ bl_status = status[REG_BL_STATUS] & ~BL_STATUS_REV_MASK;
+ bl_error = status[REG_BL_ERROR] & ~BL_ERROR_RESERVED;
+
+ if (bl_status & BL_STATUS_BUSY)
+ ret = -ETIMEDOUT;
+ else if (bl_status != BL_STATUS_RUNNING ||
+ bl_error != BL_ERROR_BOOTLOADING)
+ ret = -EIO;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int cyapa_gen3_write_blocks(struct cyapa *cyapa,
+ size_t start_block, size_t block_count,
+ const u8 *image_data)
+{
+ int error;
+ int i;
+
+ for (i = 0; i < block_count; i++) {
+ size_t block = start_block + i;
+ size_t addr = i * CYAPA_FW_BLOCK_SIZE;
+ const u8 *data = &image_data[addr];
+
+ error = cyapa_gen3_write_fw_block(cyapa, block, data);
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+static int cyapa_gen3_do_fw_update(struct cyapa *cyapa,
+ const struct firmware *fw)
+{
+ struct device *dev = &cyapa->client->dev;
+ int error;
+
+ /* First write data, starting at byte 128 of fw->data */
+ error = cyapa_gen3_write_blocks(cyapa,
+ CYAPA_FW_DATA_BLOCK_START, CYAPA_FW_DATA_BLOCK_COUNT,
+ &fw->data[CYAPA_FW_HDR_BLOCK_COUNT * CYAPA_FW_BLOCK_SIZE]);
+ if (error) {
+ dev_err(dev, "FW update aborted, write image: %d\n", error);
+ return error;
+ }
+
+ /* Then write checksum */
+ error = cyapa_gen3_write_blocks(cyapa,
+ CYAPA_FW_HDR_BLOCK_START, CYAPA_FW_HDR_BLOCK_COUNT,
+ &fw->data[0]);
+ if (error) {
+ dev_err(dev, "FW update aborted, write checksum: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static ssize_t cyapa_gen3_do_calibrate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int tries;
+ int ret;
+
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_DEV_STATUS);
+ if (ret < 0) {
+ dev_err(dev, "Error reading dev status: %d\n", ret);
+ goto out;
+ }
+ if ((ret & CYAPA_DEV_NORMAL) != CYAPA_DEV_NORMAL) {
+ dev_warn(dev, "Trackpad device is busy, device state: 0x%02x\n",
+ ret);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = cyapa_write_byte(cyapa, CYAPA_CMD_SOFT_RESET,
+ OP_RECALIBRATION_MASK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to send calibrate command: %d\n",
+ ret);
+ goto out;
+ }
+
+ tries = 20; /* max recalibration timeout 2s. */
+ do {
+ /*
+ * For this recalibration, the max time will not exceed 2s.
+ * The average time is approximately 500 - 700 ms, and we
+ * will check the status every 100 - 200ms.
+ */
+ usleep_range(100000, 200000);
+
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_DEV_STATUS);
+ if (ret < 0) {
+ dev_err(dev, "Error reading dev status: %d\n",
+ ret);
+ goto out;
+ }
+ if ((ret & CYAPA_DEV_NORMAL) == CYAPA_DEV_NORMAL)
+ break;
+ } while (--tries);
+
+ if (tries == 0) {
+ dev_err(dev, "Failed to calibrate. Timeout.\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ dev_dbg(dev, "Calibration successful.\n");
+
+out:
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t cyapa_gen3_show_baseline(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int max_baseline, min_baseline;
+ int tries;
+ int ret;
+
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_DEV_STATUS);
+ if (ret < 0) {
+ dev_err(dev, "Error reading dev status. err = %d\n", ret);
+ goto out;
+ }
+ if ((ret & CYAPA_DEV_NORMAL) != CYAPA_DEV_NORMAL) {
+ dev_warn(dev, "Trackpad device is busy. device state = 0x%x\n",
+ ret);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = cyapa_write_byte(cyapa, CYAPA_CMD_SOFT_RESET,
+ OP_REPORT_BASELINE_MASK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to send report baseline command. %d\n",
+ ret);
+ goto out;
+ }
+
+ tries = 3; /* Try for 30 to 60 ms */
+ do {
+ usleep_range(10000, 20000);
+
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_DEV_STATUS);
+ if (ret < 0) {
+ dev_err(dev, "Error reading dev status. err = %d\n",
+ ret);
+ goto out;
+ }
+ if ((ret & CYAPA_DEV_NORMAL) == CYAPA_DEV_NORMAL)
+ break;
+ } while (--tries);
+
+ if (tries == 0) {
+ dev_err(dev, "Device timed out going to Normal state.\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_MAX_BASELINE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read max baseline. err = %d\n", ret);
+ goto out;
+ }
+ max_baseline = ret;
+
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_MIN_BASELINE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read min baseline. err = %d\n", ret);
+ goto out;
+ }
+ min_baseline = ret;
+
+ dev_dbg(dev, "Baseline report successful. Max: %d Min: %d\n",
+ max_baseline, min_baseline);
+ ret = scnprintf(buf, PAGE_SIZE, "%d %d\n", max_baseline, min_baseline);
+
+out:
+ return ret;
+}
+
+/*
+ * cyapa_get_wait_time_for_pwr_cmd
+ *
+ * Compute the amount of time we need to wait after updating the touchpad
+ * power mode. The touchpad needs to consume the incoming power mode set
+ * command at the current clock rate.
+ */
+
+static u16 cyapa_get_wait_time_for_pwr_cmd(u8 pwr_mode)
+{
+ switch (pwr_mode) {
+ case PWR_MODE_FULL_ACTIVE: return 20;
+ case PWR_MODE_BTN_ONLY: return 20;
+ case PWR_MODE_OFF: return 20;
+ default: return cyapa_pwr_cmd_to_sleep_time(pwr_mode) + 50;
+ }
+}
+
+/*
+ * Set device power mode
+ *
+ * Write to the field to configure power state. Power states include :
+ * Full : Max scans and report rate.
+ * Idle : Report rate set by user specified time.
+ * ButtonOnly : No scans for fingers. When the button is triggered,
+ * a slave interrupt is asserted to notify host to wake up.
+ * Off : Only awake for i2c commands from host. No function for button
+ * or touch sensors.
+ *
+ * The power_mode command should conform to the following :
+ * Full : 0x3f
+ * Idle : Configurable from 20 to 1000ms. See note below for
+ * cyapa_sleep_time_to_pwr_cmd and cyapa_pwr_cmd_to_sleep_time
+ * ButtonOnly : 0x01
+ * Off : 0x00
+ *
+ * Device power mode can only be set when device is in operational mode.
+ */
+static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
+ u16 always_unused)
+{
+ int ret;
+ u8 power;
+ int tries;
+ u16 sleep_time;
+
+ always_unused = 0;
+ if (cyapa->state != CYAPA_STATE_OP)
+ return 0;
+
+ tries = SET_POWER_MODE_TRIES;
+ while (tries--) {
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_POWER_MODE);
+ if (ret >= 0)
+ break;
+ usleep_range(SET_POWER_MODE_DELAY, 2 * SET_POWER_MODE_DELAY);
+ }
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Return early if the power mode to set is the same as the current
+ * one.
+ */
+ if ((ret & PWR_MODE_MASK) == power_mode)
+ return 0;
+
+ sleep_time = cyapa_get_wait_time_for_pwr_cmd(ret & PWR_MODE_MASK);
+ power = ret;
+ power &= ~PWR_MODE_MASK;
+ power |= power_mode & PWR_MODE_MASK;
+ tries = SET_POWER_MODE_TRIES;
+ while (tries--) {
+ ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
+ if (!ret)
+ break;
+ usleep_range(SET_POWER_MODE_DELAY, 2 * SET_POWER_MODE_DELAY);
+ }
+
+ /*
+ * Wait for the newly set power command to go in at the previous
+ * clock speed (scanrate) used by the touchpad firmware. Not
+ * doing so before issuing the next command may result in errors
+ * depending on the command's content.
+ */
+ msleep(sleep_time);
+ return ret;
+}
+
+static int cyapa_gen3_get_query_data(struct cyapa *cyapa)
+{
+ u8 query_data[QUERY_DATA_SIZE];
+ int ret;
+
+ if (cyapa->state != CYAPA_STATE_OP)
+ return -EBUSY;
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_QUERY, query_data);
+ if (ret != QUERY_DATA_SIZE)
+ return (ret < 0) ? ret : -EIO;
+
+ memcpy(&cyapa->product_id[0], &query_data[0], 5);
+ cyapa->product_id[5] = '-';
+ memcpy(&cyapa->product_id[6], &query_data[5], 6);
+ cyapa->product_id[12] = '-';
+ memcpy(&cyapa->product_id[13], &query_data[11], 2);
+ cyapa->product_id[15] = '\0';
+
+ cyapa->fw_maj_ver = query_data[15];
+ cyapa->fw_min_ver = query_data[16];
+
+ cyapa->btn_capability = query_data[19] & CAPABILITY_BTN_MASK;
+
+ cyapa->gen = query_data[20] & 0x0f;
+
+ cyapa->max_abs_x = ((query_data[21] & 0xf0) << 4) | query_data[22];
+ cyapa->max_abs_y = ((query_data[21] & 0x0f) << 8) | query_data[23];
+
+ cyapa->physical_size_x =
+ ((query_data[24] & 0xf0) << 4) | query_data[25];
+ cyapa->physical_size_y =
+ ((query_data[24] & 0x0f) << 8) | query_data[26];
+
+ cyapa->max_z = 255;
+
+ return 0;
+}
+
+static int cyapa_gen3_bl_query_data(struct cyapa *cyapa)
+{
+ u8 bl_data[CYAPA_CMD_LEN];
+ int ret;
+
+ ret = cyapa_i2c_reg_read_block(cyapa, 0, CYAPA_CMD_LEN, bl_data);
+ if (ret != CYAPA_CMD_LEN)
+ return (ret < 0) ? ret : -EIO;
+
+ /*
+ * This value will be updated again when entered application mode.
+ * If TP failed to enter application mode, this fw version values
+ * can be used as a reference.
+ * This firmware version valid when fw image checksum is valid.
+ */
+ if (bl_data[REG_BL_STATUS] ==
+ (BL_STATUS_RUNNING | BL_STATUS_CSUM_VALID)) {
+ cyapa->fw_maj_ver = bl_data[GEN3_BL_IDLE_FW_MAJ_VER_OFFSET];
+ cyapa->fw_min_ver = bl_data[GEN3_BL_IDLE_FW_MIN_VER_OFFSET];
+ }
+
+ return 0;
+}
+
+/*
+ * Check if device is operational.
+ *
+ * An operational device is responding, has exited bootloader, and has
+ * firmware supported by this driver.
+ *
+ * Returns:
+ * -EBUSY no device or in bootloader
+ * -EIO failure while reading from device
+ * -EAGAIN device is still in bootloader
+ * if ->state = CYAPA_STATE_BL_IDLE, device has invalid firmware
+ * -EINVAL device is in operational mode, but not supported by this driver
+ * 0 device is supported
+ */
+static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ int error;
+
+ switch (cyapa->state) {
+ case CYAPA_STATE_BL_ACTIVE:
+ error = cyapa_gen3_bl_deactivate(cyapa);
+ if (error) {
+ dev_err(dev, "failed to bl_deactivate: %d\n", error);
+ return error;
+ }
+
+ /* Fallthrough state */
+ case CYAPA_STATE_BL_IDLE:
+ /* Try to get firmware version in bootloader mode. */
+ cyapa_gen3_bl_query_data(cyapa);
+
+ error = cyapa_gen3_bl_exit(cyapa);
+ if (error) {
+ dev_err(dev, "failed to bl_exit: %d\n", error);
+ return error;
+ }
+
+ /* Fallthrough state */
+ case CYAPA_STATE_OP:
+ /*
+ * Reading query data before going back to the full mode
+ * may cause problems, so we set the power mode first here.
+ */
+ error = cyapa_gen3_set_power_mode(cyapa,
+ PWR_MODE_FULL_ACTIVE, 0);
+ if (error)
+ dev_err(dev, "%s: set full power mode failed: %d\n",
+ __func__, error);
+ error = cyapa_gen3_get_query_data(cyapa);
+ if (error < 0)
+ return error;
+
+ /* Only support firmware protocol gen3 */
+ if (cyapa->gen != CYAPA_GEN3) {
+ dev_err(dev, "unsupported protocol version (%d)",
+ cyapa->gen);
+ return -EINVAL;
+ }
+
+ /* Only support product ID starting with CYTRA */
+ if (memcmp(cyapa->product_id, product_id,
+ strlen(product_id)) != 0) {
+ dev_err(dev, "unsupported product ID (%s)\n",
+ cyapa->product_id);
+ return -EINVAL;
+ }
+
+ return 0;
+
+ default:
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * Return false, do not continue process
+ * Return true, continue process.
+ */
+static bool cyapa_gen3_irq_cmd_handler(struct cyapa *cyapa)
+{
+ /* Not gen3 irq command response, skip for continue. */
+ if (cyapa->gen != CYAPA_GEN3)
+ return true;
+
+ if (cyapa->operational)
+ return true;
+
+ /*
+ * Driver in detecting or other interface function processing,
+ * so, stop cyapa_gen3_irq_handler to continue process to
+ * avoid unwanted to error detecting and processing.
+ *
+ * And also, avoid the periodicly accerted interrupts to be processed
+ * as touch inputs when gen3 failed to launch into application mode,
+ * which will cause gen3 stays in bootloader mode.
+ */
+ return false;
+}
+
+static int cyapa_gen3_irq_handler(struct cyapa *cyapa)
+{
+ struct input_dev *input = cyapa->input;
+ struct device *dev = &cyapa->client->dev;
+ struct cyapa_reg_data data;
+ int num_fingers;
+ int ret;
+ int i;
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
+ if (ret != sizeof(data)) {
+ dev_err(dev, "failed to read report data, (%d)\n", ret);
+ return -EINVAL;
+ }
+
+ if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
+ (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
+ (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
+ dev_err(dev, "invalid device state bytes, %02x %02x\n",
+ data.device_status, data.finger_btn);
+ return -EINVAL;
+ }
+
+ num_fingers = (data.finger_btn >> 4) & 0x0f;
+ for (i = 0; i < num_fingers; i++) {
+ const struct cyapa_touch *touch = &data.touches[i];
+ /* Note: touch->id range is 1 to 15; slots are 0 to 14. */
+ int slot = touch->id - 1;
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X,
+ ((touch->xy_hi & 0xf0) << 4) | touch->x_lo);
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ ((touch->xy_hi & 0x0f) << 8) | touch->y_lo);
+ input_report_abs(input, ABS_MT_PRESSURE, touch->pressure);
+ }
+
+ input_mt_sync_frame(input);
+
+ if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
+ input_report_key(input, BTN_LEFT,
+ !!(data.finger_btn & OP_DATA_LEFT_BTN));
+ if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
+ input_report_key(input, BTN_MIDDLE,
+ !!(data.finger_btn & OP_DATA_MIDDLE_BTN));
+ if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
+ input_report_key(input, BTN_RIGHT,
+ !!(data.finger_btn & OP_DATA_RIGHT_BTN));
+ input_sync(input);
+
+ return 0;
+}
+
+static int cyapa_gen3_initialize(struct cyapa *cyapa) { return 0; }
+static int cyapa_gen3_bl_initiate(struct cyapa *cyapa,
+ const struct firmware *fw) { return 0; }
+static int cyapa_gen3_empty_output_data(struct cyapa *cyapa,
+ u8 *buf, int *len, cb_sort func) { return 0; }
+
+const struct cyapa_dev_ops cyapa_gen3_ops = {
+ .check_fw = cyapa_gen3_check_fw,
+ .bl_enter = cyapa_gen3_bl_enter,
+ .bl_activate = cyapa_gen3_bl_activate,
+ .update_fw = cyapa_gen3_do_fw_update,
+ .bl_deactivate = cyapa_gen3_bl_deactivate,
+ .bl_initiate = cyapa_gen3_bl_initiate,
+
+ .show_baseline = cyapa_gen3_show_baseline,
+ .calibrate_store = cyapa_gen3_do_calibrate,
+
+ .initialize = cyapa_gen3_initialize,
+
+ .state_parse = cyapa_gen3_state_parse,
+ .operational_check = cyapa_gen3_do_operational_check,
+
+ .irq_handler = cyapa_gen3_irq_handler,
+ .irq_cmd_handler = cyapa_gen3_irq_cmd_handler,
+ .sort_empty_output_data = cyapa_gen3_empty_output_data,
+ .set_power_mode = cyapa_gen3_set_power_mode,
+};
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
new file mode 100644
index 000000000000..ddf5393a1180
--- /dev/null
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -0,0 +1,2777 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ *
+ * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/crc-itu-t.h>
+#include "cyapa.h"
+
+
+/* Macro of Gen5 */
+#define RECORD_EVENT_NONE 0
+#define RECORD_EVENT_TOUCHDOWN 1
+#define RECORD_EVENT_DISPLACE 2
+#define RECORD_EVENT_LIFTOFF 3
+
+#define CYAPA_TSG_FLASH_MAP_BLOCK_SIZE 0x80
+#define CYAPA_TSG_IMG_FW_HDR_SIZE 13
+#define CYAPA_TSG_FW_ROW_SIZE (CYAPA_TSG_FLASH_MAP_BLOCK_SIZE)
+#define CYAPA_TSG_IMG_START_ROW_NUM 0x002e
+#define CYAPA_TSG_IMG_END_ROW_NUM 0x01fe
+#define CYAPA_TSG_IMG_APP_INTEGRITY_ROW_NUM 0x01ff
+#define CYAPA_TSG_IMG_MAX_RECORDS (CYAPA_TSG_IMG_END_ROW_NUM - \
+ CYAPA_TSG_IMG_START_ROW_NUM + 1 + 1)
+#define CYAPA_TSG_IMG_READ_SIZE (CYAPA_TSG_FLASH_MAP_BLOCK_SIZE / 2)
+#define CYAPA_TSG_START_OF_APPLICATION 0x1700
+#define CYAPA_TSG_APP_INTEGRITY_SIZE 60
+#define CYAPA_TSG_FLASH_MAP_METADATA_SIZE 60
+#define CYAPA_TSG_BL_KEY_SIZE 8
+
+#define CYAPA_TSG_MAX_CMD_SIZE 256
+
+#define GEN5_BL_CMD_VERIFY_APP_INTEGRITY 0x31
+#define GEN5_BL_CMD_GET_BL_INFO 0x38
+#define GEN5_BL_CMD_PROGRAM_VERIFY_ROW 0x39
+#define GEN5_BL_CMD_LAUNCH_APP 0x3b
+#define GEN5_BL_CMD_INITIATE_BL 0x48
+
+#define GEN5_HID_DESCRIPTOR_ADDR 0x0001
+#define GEN5_REPORT_DESCRIPTOR_ADDR 0x0002
+#define GEN5_INPUT_REPORT_ADDR 0x0003
+#define GEN5_OUTPUT_REPORT_ADDR 0x0004
+#define GEN5_CMD_DATA_ADDR 0x0006
+
+#define GEN5_TOUCH_REPORT_HEAD_SIZE 7
+#define GEN5_TOUCH_REPORT_MAX_SIZE 127
+#define GEN5_BTN_REPORT_HEAD_SIZE 6
+#define GEN5_BTN_REPORT_MAX_SIZE 14
+#define GEN5_WAKEUP_EVENT_SIZE 4
+#define GEN5_RAW_DATA_HEAD_SIZE 24
+
+#define GEN5_BL_CMD_REPORT_ID 0x40
+#define GEN5_BL_RESP_REPORT_ID 0x30
+#define GEN5_APP_CMD_REPORT_ID 0x2f
+#define GEN5_APP_RESP_REPORT_ID 0x1f
+
+#define GEN5_APP_DEEP_SLEEP_REPORT_ID 0xf0
+#define GEN5_DEEP_SLEEP_RESP_LENGTH 5
+
+#define GEN5_CMD_GET_PARAMETER 0x05
+#define GEN5_CMD_SET_PARAMETER 0x06
+#define GEN5_PARAMETER_ACT_INTERVL_ID 0x4d
+#define GEN5_PARAMETER_ACT_INTERVL_SIZE 1
+#define GEN5_PARAMETER_ACT_LFT_INTERVL_ID 0x4f
+#define GEN5_PARAMETER_ACT_LFT_INTERVL_SIZE 2
+#define GEN5_PARAMETER_LP_INTRVL_ID 0x4c
+#define GEN5_PARAMETER_LP_INTRVL_SIZE 2
+
+#define GEN5_PARAMETER_DISABLE_PIP_REPORT 0x08
+
+#define GEN5_POWER_STATE_ACTIVE 0x01
+#define GEN5_POWER_STATE_LOOK_FOR_TOUCH 0x02
+#define GEN5_POWER_STATE_READY 0x03
+#define GEN5_POWER_STATE_IDLE 0x04
+#define GEN5_POWER_STATE_BTN_ONLY 0x05
+#define GEN5_POWER_STATE_OFF 0x06
+
+#define GEN5_DEEP_SLEEP_STATE_MASK 0x03
+#define GEN5_DEEP_SLEEP_STATE_ON 0x00
+#define GEN5_DEEP_SLEEP_STATE_OFF 0x01
+
+#define GEN5_DEEP_SLEEP_OPCODE 0x08
+#define GEN5_DEEP_SLEEP_OPCODE_MASK 0x0f
+
+#define GEN5_POWER_READY_MAX_INTRVL_TIME 50 /* Unit: ms */
+#define GEN5_POWER_IDLE_MAX_INTRVL_TIME 250 /* Unit: ms */
+
+#define GEN5_CMD_REPORT_ID_OFFSET 4
+
+#define GEN5_RESP_REPORT_ID_OFFSET 2
+#define GEN5_RESP_RSVD_OFFSET 3
+#define GEN5_RESP_RSVD_KEY 0x00
+#define GEN5_RESP_BL_SOP_OFFSET 4
+#define GEN5_SOP_KEY 0x01 /* Start of Packet */
+#define GEN5_EOP_KEY 0x17 /* End of Packet */
+#define GEN5_RESP_APP_CMD_OFFSET 4
+#define GET_GEN5_CMD_CODE(reg) ((reg) & 0x7f)
+
+#define VALID_CMD_RESP_HEADER(resp, cmd) \
+ (((resp)[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID) && \
+ ((resp)[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) && \
+ (GET_GEN5_CMD_CODE((resp)[GEN5_RESP_APP_CMD_OFFSET]) == (cmd)))
+
+#define GEN5_MIN_BL_CMD_LENGTH 13
+#define GEN5_MIN_BL_RESP_LENGTH 11
+#define GEN5_MIN_APP_CMD_LENGTH 7
+#define GEN5_MIN_APP_RESP_LENGTH 5
+#define GEN5_UNSUPPORTED_CMD_RESP_LENGTH 6
+
+#define GEN5_RESP_LENGTH_OFFSET 0x00
+#define GEN5_RESP_LENGTH_SIZE 2
+
+#define GEN5_HID_DESCRIPTOR_SIZE 32
+#define GEN5_BL_HID_REPORT_ID 0xff
+#define GEN5_APP_HID_REPORT_ID 0xf7
+#define GEN5_BL_MAX_OUTPUT_LENGTH 0x0100
+#define GEN5_APP_MAX_OUTPUT_LENGTH 0x00fe
+
+#define GEN5_BL_REPORT_DESCRIPTOR_SIZE 0x1d
+#define GEN5_BL_REPORT_DESCRIPTOR_ID 0xfe
+#define GEN5_APP_REPORT_DESCRIPTOR_SIZE 0xee
+#define GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE 0xfa
+#define GEN5_APP_REPORT_DESCRIPTOR_ID 0xf6
+
+#define GEN5_TOUCH_REPORT_ID 0x01
+#define GEN5_BTN_REPORT_ID 0x03
+#define GEN5_WAKEUP_EVENT_REPORT_ID 0x04
+#define GEN5_OLD_PUSH_BTN_REPORT_ID 0x05
+#define GEN5_PUSH_BTN_REPORT_ID 0x06
+
+#define GEN5_CMD_COMPLETE_SUCCESS(status) ((status) == 0x00)
+
+#define GEN5_BL_INITIATE_RESP_LEN 11
+#define GEN5_BL_FAIL_EXIT_RESP_LEN 11
+#define GEN5_BL_FAIL_EXIT_STATUS_CODE 0x0c
+#define GEN5_BL_VERIFY_INTEGRITY_RESP_LEN 12
+#define GEN5_BL_INTEGRITY_CHEKC_PASS 0x00
+#define GEN5_BL_BLOCK_WRITE_RESP_LEN 11
+#define GEN5_BL_READ_APP_INFO_RESP_LEN 31
+#define GEN5_CMD_CALIBRATE 0x28
+#define CYAPA_SENSING_MODE_MUTUAL_CAP_FINE 0x00
+#define CYAPA_SENSING_MODE_SELF_CAP 0x02
+
+#define GEN5_CMD_RETRIEVE_DATA_STRUCTURE 0x24
+#define GEN5_RETRIEVE_MUTUAL_PWC_DATA 0x00
+#define GEN5_RETRIEVE_SELF_CAP_PWC_DATA 0x01
+
+#define GEN5_RETRIEVE_DATA_ELEMENT_SIZE_MASK 0x07
+
+#define GEN5_CMD_EXECUTE_PANEL_SCAN 0x2a
+#define GEN5_CMD_RETRIEVE_PANEL_SCAN 0x2b
+#define GEN5_PANEL_SCAN_MUTUAL_RAW_DATA 0x00
+#define GEN5_PANEL_SCAN_MUTUAL_BASELINE 0x01
+#define GEN5_PANEL_SCAN_MUTUAL_DIFFCOUNT 0x02
+#define GEN5_PANEL_SCAN_SELF_RAW_DATA 0x03
+#define GEN5_PANEL_SCAN_SELF_BASELINE 0x04
+#define GEN5_PANEL_SCAN_SELF_DIFFCOUNT 0x05
+
+/* The offset only valid for reterive PWC and panel scan commands */
+#define GEN5_RESP_DATA_STRUCTURE_OFFSET 10
+#define GEN5_PWC_DATA_ELEMENT_SIZE_MASK 0x07
+
+#define GEN5_NUMBER_OF_TOUCH_OFFSET 5
+#define GEN5_NUMBER_OF_TOUCH_MASK 0x1f
+#define GEN5_BUTTONS_OFFSET 5
+#define GEN5_BUTTONS_MASK 0x0f
+#define GEN5_GET_EVENT_ID(reg) (((reg) >> 5) & 0x03)
+#define GEN5_GET_TOUCH_ID(reg) ((reg) & 0x1f)
+
+#define GEN5_PRODUCT_FAMILY_MASK 0xf000
+#define GEN5_PRODUCT_FAMILY_TRACKPAD 0x1000
+
+#define TSG_INVALID_CMD 0xff
+
+struct cyapa_gen5_touch_record {
+ /*
+ * Bit 7 - 3: reserved
+ * Bit 2 - 0: touch type;
+ * 0 : standard finger;
+ * 1 - 15 : reserved.
+ */
+ u8 touch_type;
+
+ /*
+ * Bit 7: indicates touch liftoff status.
+ * 0 : touch is currently on the panel.
+ * 1 : touch record indicates a liftoff.
+ * Bit 6 - 5: indicates an event associated with this touch instance
+ * 0 : no event
+ * 1 : touchdown
+ * 2 : significant displacement (> active distance)
+ * 3 : liftoff (record reports last known coordinates)
+ * Bit 4 - 0: An arbitrary ID tag associated with a finger
+ * to allow tracking a touch as it moves around the panel.
+ */
+ u8 touch_tip_event_id;
+
+ /* Bit 7 - 0 of X-axis coordinate of the touch in pixel. */
+ u8 x_lo;
+
+ /* Bit 15 - 8 of X-axis coordinate of the touch in pixel. */
+ u8 x_hi;
+
+ /* Bit 7 - 0 of Y-axis coordinate of the touch in pixel. */
+ u8 y_lo;
+
+ /* Bit 15 - 8 of Y-axis coordinate of the touch in pixel. */
+ u8 y_hi;
+
+ /* Touch intensity in counts, pressure value. */
+ u8 z;
+
+ /*
+ * The length of the major axis of the ellipse of contact between
+ * the finger and the panel (ABS_MT_TOUCH_MAJOR).
+ */
+ u8 major_axis_len;
+
+ /*
+ * The length of the minor axis of the ellipse of contact between
+ * the finger and the panel (ABS_MT_TOUCH_MINOR).
+ */
+ u8 minor_axis_len;
+
+ /*
+ * The length of the major axis of the approaching tool.
+ * (ABS_MT_WIDTH_MAJOR)
+ */
+ u8 major_tool_len;
+
+ /*
+ * The length of the minor axis of the approaching tool.
+ * (ABS_MT_WIDTH_MINOR)
+ */
+ u8 minor_tool_len;
+
+ /*
+ * The angle between the panel vertical axis and
+ * the major axis of the contact ellipse. This value is an 8-bit
+ * signed integer. The range is -127 to +127 (corresponding to
+ * -90 degree and +90 degree respectively).
+ * The positive direction is clockwise from the vertical axis.
+ * If the ellipse of contact degenerates into a circle,
+ * orientation is reported as 0.
+ */
+ u8 orientation;
+} __packed;
+
+struct cyapa_gen5_report_data {
+ u8 report_head[GEN5_TOUCH_REPORT_HEAD_SIZE];
+ struct cyapa_gen5_touch_record touch_records[10];
+} __packed;
+
+struct cyapa_tsg_bin_image_head {
+ u8 head_size; /* Unit: bytes, including itself. */
+ u8 ttda_driver_major_version; /* Reserved as 0. */
+ u8 ttda_driver_minor_version; /* Reserved as 0. */
+ u8 fw_major_version;
+ u8 fw_minor_version;
+ u8 fw_revision_control_number[8];
+} __packed;
+
+struct cyapa_tsg_bin_image_data_record {
+ u8 flash_array_id;
+ __be16 row_number;
+ /* The number of bytes of flash data contained in this record. */
+ __be16 record_len;
+ /* The flash program data. */
+ u8 record_data[CYAPA_TSG_FW_ROW_SIZE];
+} __packed;
+
+struct cyapa_tsg_bin_image {
+ struct cyapa_tsg_bin_image_head image_head;
+ struct cyapa_tsg_bin_image_data_record records[0];
+} __packed;
+
+struct gen5_bl_packet_start {
+ u8 sop; /* Start of packet, must be 01h */
+ u8 cmd_code;
+ __le16 data_length; /* Size of data parameter start from data[0] */
+} __packed;
+
+struct gen5_bl_packet_end {
+ __le16 crc;
+ u8 eop; /* End of packet, must be 17h */
+} __packed;
+
+struct gen5_bl_cmd_head {
+ __le16 addr; /* Output report register address, must be 0004h */
+ /* Size of packet not including output report register address */
+ __le16 length;
+ u8 report_id; /* Bootloader output report id, must be 40h */
+ u8 rsvd; /* Reserved, must be 0 */
+ struct gen5_bl_packet_start packet_start;
+ u8 data[0]; /* Command data variable based on commands */
+} __packed;
+
+/* Initiate bootload command data structure. */
+struct gen5_bl_initiate_cmd_data {
+ /* Key must be "A5h 01h 02h 03h FFh FEh FDh 5Ah" */
+ u8 key[CYAPA_TSG_BL_KEY_SIZE];
+ u8 metadata_raw_parameter[CYAPA_TSG_FLASH_MAP_METADATA_SIZE];
+ __le16 metadata_crc;
+} __packed;
+
+struct gen5_bl_metadata_row_params {
+ __le16 size;
+ __le16 maximum_size;
+ __le32 app_start;
+ __le16 app_len;
+ __le16 app_crc;
+ __le32 app_entry;
+ __le32 upgrade_start;
+ __le16 upgrade_len;
+ __le16 entry_row_crc;
+ u8 padding[36]; /* Padding data must be 0 */
+ __le16 metadata_crc; /* CRC starts at offset of 60 */
+} __packed;
+
+/* Bootload program and verify row command data structure */
+struct gen5_bl_flash_row_head {
+ u8 flash_array_id;
+ __le16 flash_row_id;
+ u8 flash_data[0];
+} __packed;
+
+struct gen5_app_cmd_head {
+ __le16 addr; /* Output report register address, must be 0004h */
+ /* Size of packet not including output report register address */
+ __le16 length;
+ u8 report_id; /* Application output report id, must be 2Fh */
+ u8 rsvd; /* Reserved, must be 0 */
+ /*
+ * Bit 7: reserved, must be 0.
+ * Bit 6-0: command code.
+ */
+ u8 cmd_code;
+ u8 parameter_data[0]; /* Parameter data variable based on cmd_code */
+} __packed;
+
+/* Applicaton get/set parameter command data structure */
+struct gen5_app_set_parameter_data {
+ u8 parameter_id;
+ u8 parameter_size;
+ __le32 value;
+} __packed;
+
+struct gen5_app_get_parameter_data {
+ u8 parameter_id;
+} __packed;
+
+struct gen5_retrieve_panel_scan_data {
+ __le16 read_offset;
+ __le16 read_elements;
+ u8 data_id;
+} __packed;
+
+/* Variables to record latest gen5 trackpad power states. */
+#define GEN5_DEV_SET_PWR_STATE(cyapa, s) ((cyapa)->dev_pwr_mode = (s))
+#define GEN5_DEV_GET_PWR_STATE(cyapa) ((cyapa)->dev_pwr_mode)
+#define GEN5_DEV_SET_SLEEP_TIME(cyapa, t) ((cyapa)->dev_sleep_time = (t))
+#define GEN5_DEV_GET_SLEEP_TIME(cyapa) ((cyapa)->dev_sleep_time)
+#define GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) \
+ (((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
+
+
+static u8 cyapa_gen5_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
+ 0xff, 0xfe, 0xfd, 0x5a };
+
+static int cyapa_gen5_initialize(struct cyapa *cyapa)
+{
+ struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+
+ init_completion(&gen5_pip->cmd_ready);
+ atomic_set(&gen5_pip->cmd_issued, 0);
+ mutex_init(&gen5_pip->cmd_lock);
+
+ gen5_pip->resp_sort_func = NULL;
+ gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
+ gen5_pip->resp_data = NULL;
+ gen5_pip->resp_len = NULL;
+
+ cyapa->dev_pwr_mode = UNINIT_PWR_MODE;
+ cyapa->dev_sleep_time = UNINIT_SLEEP_TIME;
+
+ return 0;
+}
+
+/* Return negative errno, or else the number of bytes read. */
+static ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
+{
+ int ret;
+
+ if (size == 0)
+ return 0;
+
+ if (!buf || size > CYAPA_REG_MAP_SIZE)
+ return -EINVAL;
+
+ ret = i2c_master_recv(cyapa->client, buf, size);
+
+ if (ret != size)
+ return (ret < 0) ? ret : -EIO;
+
+ return size;
+}
+
+/**
+ * Return a negative errno code else zero on success.
+ */
+static ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
+{
+ int ret;
+
+ if (!buf || !size)
+ return -EINVAL;
+
+ ret = i2c_master_send(cyapa->client, buf, size);
+
+ if (ret != size)
+ return (ret < 0) ? ret : -EIO;
+
+ return 0;
+}
+
+/**
+ * This function is aimed to dump all not read data in Gen5 trackpad
+ * before send any command, otherwise, the interrupt line will be blocked.
+ */
+static int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+ u8 *buf, int *len, cb_sort func)
+{
+ struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ int length;
+ int report_count;
+ int empty_count;
+ int buf_len;
+ int error;
+
+ buf_len = 0;
+ if (len) {
+ buf_len = (*len < CYAPA_REG_MAP_SIZE) ?
+ *len : CYAPA_REG_MAP_SIZE;
+ *len = 0;
+ }
+
+ report_count = 8; /* max 7 pending data before command response data */
+ empty_count = 0;
+ do {
+ /*
+ * Depending on testing in cyapa driver, there are max 5 "02 00"
+ * packets between two valid buffered data report in firmware.
+ * So in order to dump all buffered data out and
+ * make interrupt line release for reassert again,
+ * we must set the empty_count check value bigger than 5 to
+ * make it work. Otherwise, in some situation,
+ * the interrupt line may unable to reactive again,
+ * which will cause trackpad device unable to
+ * report data any more.
+ * for example, it may happen in EFT and ESD testing.
+ */
+ if (empty_count > 5)
+ return 0;
+
+ error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf,
+ GEN5_RESP_LENGTH_SIZE);
+ if (error < 0)
+ return error;
+
+ length = get_unaligned_le16(gen5_pip->empty_buf);
+ if (length == GEN5_RESP_LENGTH_SIZE) {
+ empty_count++;
+ continue;
+ } else if (length > CYAPA_REG_MAP_SIZE) {
+ /* Should not happen */
+ return -EINVAL;
+ } else if (length == 0) {
+ /* Application or bootloader launch data polled out. */
+ length = GEN5_RESP_LENGTH_SIZE;
+ if (buf && buf_len && func &&
+ func(cyapa, gen5_pip->empty_buf, length)) {
+ length = min(buf_len, length);
+ memcpy(buf, gen5_pip->empty_buf, length);
+ *len = length;
+ /* Response found, success. */
+ return 0;
+ }
+ continue;
+ }
+
+ error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+ if (error < 0)
+ return error;
+
+ report_count--;
+ empty_count = 0;
+ length = get_unaligned_le16(gen5_pip->empty_buf);
+ if (length <= GEN5_RESP_LENGTH_SIZE) {
+ empty_count++;
+ } else if (buf && buf_len && func &&
+ func(cyapa, gen5_pip->empty_buf, length)) {
+ length = min(buf_len, length);
+ memcpy(buf, gen5_pip->empty_buf, length);
+ *len = length;
+ /* Response found, success. */
+ return 0;
+ }
+
+ error = -EINVAL;
+ } while (report_count);
+
+ return error;
+}
+
+static int cyapa_do_i2c_pip_cmd_irq_sync(
+ struct cyapa *cyapa,
+ u8 *cmd, size_t cmd_len,
+ unsigned long timeout)
+{
+ struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ int error;
+
+ /* Wait for interrupt to set ready completion */
+ init_completion(&gen5_pip->cmd_ready);
+
+ atomic_inc(&gen5_pip->cmd_issued);
+ error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
+ if (error) {
+ atomic_dec(&gen5_pip->cmd_issued);
+ return (error < 0) ? error : -EIO;
+ }
+
+ /* Wait for interrupt to indicate command is completed. */
+ timeout = wait_for_completion_timeout(&gen5_pip->cmd_ready,
+ msecs_to_jiffies(timeout));
+ if (timeout == 0) {
+ atomic_dec(&gen5_pip->cmd_issued);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int cyapa_do_i2c_pip_cmd_polling(
+ struct cyapa *cyapa,
+ u8 *cmd, size_t cmd_len,
+ u8 *resp_data, int *resp_len,
+ unsigned long timeout,
+ cb_sort func)
+{
+ struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ int tries;
+ int length;
+ int error;
+
+ atomic_inc(&gen5_pip->cmd_issued);
+ error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
+ if (error) {
+ atomic_dec(&gen5_pip->cmd_issued);
+ return error < 0 ? error : -EIO;
+ }
+
+ length = resp_len ? *resp_len : 0;
+ if (resp_data && resp_len && length != 0 && func) {
+ tries = timeout / 5;
+ do {
+ usleep_range(3000, 5000);
+ *resp_len = length;
+ error = cyapa_empty_pip_output_data(cyapa,
+ resp_data, resp_len, func);
+ if (error || *resp_len == 0)
+ continue;
+ else
+ break;
+ } while (--tries > 0);
+ if ((error || *resp_len == 0) || tries <= 0)
+ error = error ? error : -ETIMEDOUT;
+ }
+
+ atomic_dec(&gen5_pip->cmd_issued);
+ return error;
+}
+
+static int cyapa_i2c_pip_cmd_irq_sync(
+ struct cyapa *cyapa,
+ u8 *cmd, int cmd_len,
+ u8 *resp_data, int *resp_len,
+ unsigned long timeout,
+ cb_sort func,
+ bool irq_mode)
+{
+ struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ int error;
+
+ if (!cmd || !cmd_len)
+ return -EINVAL;
+
+ /* Commands must be serialized. */
+ error = mutex_lock_interruptible(&gen5_pip->cmd_lock);
+ if (error)
+ return error;
+
+ gen5_pip->resp_sort_func = func;
+ gen5_pip->resp_data = resp_data;
+ gen5_pip->resp_len = resp_len;
+
+ if (cmd_len >= GEN5_MIN_APP_CMD_LENGTH &&
+ cmd[4] == GEN5_APP_CMD_REPORT_ID) {
+ /* Application command */
+ gen5_pip->in_progress_cmd = cmd[6] & 0x7f;
+ } else if (cmd_len >= GEN5_MIN_BL_CMD_LENGTH &&
+ cmd[4] == GEN5_BL_CMD_REPORT_ID) {
+ /* Bootloader command */
+ gen5_pip->in_progress_cmd = cmd[7];
+ }
+
+ /* Send command data, wait and read output response data's length. */
+ if (irq_mode) {
+ gen5_pip->is_irq_mode = true;
+ error = cyapa_do_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
+ timeout);
+ if (error == -ETIMEDOUT && resp_data &&
+ resp_len && *resp_len != 0 && func) {
+ /*
+ * For some old version, there was no interrupt for
+ * the command response data, so need to poll here
+ * to try to get the response data.
+ */
+ error = cyapa_empty_pip_output_data(cyapa,
+ resp_data, resp_len, func);
+ if (error || *resp_len == 0)
+ error = error ? error : -ETIMEDOUT;
+ }
+ } else {
+ gen5_pip->is_irq_mode = false;
+ error = cyapa_do_i2c_pip_cmd_polling(cyapa, cmd, cmd_len,
+ resp_data, resp_len, timeout, func);
+ }
+
+ gen5_pip->resp_sort_func = NULL;
+ gen5_pip->resp_data = NULL;
+ gen5_pip->resp_len = NULL;
+ gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
+
+ mutex_unlock(&gen5_pip->cmd_lock);
+ return error;
+}
+
+static bool cyapa_gen5_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
+ u8 *data, int len)
+{
+ if (!data || len < GEN5_MIN_BL_RESP_LENGTH)
+ return false;
+
+ /* Bootloader input report id 30h */
+ if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_RESP_REPORT_ID &&
+ data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
+ data[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY)
+ return true;
+
+ return false;
+}
+
+static bool cyapa_gen5_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
+ u8 *data, int len)
+{
+ struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ int resp_len;
+
+ if (!data || len < GEN5_MIN_APP_RESP_LENGTH)
+ return false;
+
+ if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID &&
+ data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) {
+ resp_len = get_unaligned_le16(&data[GEN5_RESP_LENGTH_OFFSET]);
+ if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) == 0x00 &&
+ resp_len == GEN5_UNSUPPORTED_CMD_RESP_LENGTH &&
+ data[5] == gen5_pip->in_progress_cmd) {
+ /* Unsupported command code */
+ return false;
+ } else if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) ==
+ gen5_pip->in_progress_cmd) {
+ /* Correct command response received */
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool cyapa_gen5_sort_application_launch_data(struct cyapa *cyapa,
+ u8 *buf, int len)
+{
+ if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+ return false;
+
+ /*
+ * After reset or power on, trackpad device always sets to 0x00 0x00
+ * to indicate a reset or power on event.
+ */
+ if (buf[0] == 0 && buf[1] == 0)
+ return true;
+
+ return false;
+}
+
+static bool cyapa_gen5_sort_hid_descriptor_data(struct cyapa *cyapa,
+ u8 *buf, int len)
+{
+ int resp_len;
+ int max_output_len;
+
+ /* Check hid descriptor. */
+ if (len != GEN5_HID_DESCRIPTOR_SIZE)
+ return false;
+
+ resp_len = get_unaligned_le16(&buf[GEN5_RESP_LENGTH_OFFSET]);
+ max_output_len = get_unaligned_le16(&buf[16]);
+ if (resp_len == GEN5_HID_DESCRIPTOR_SIZE) {
+ if (buf[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_HID_REPORT_ID &&
+ max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
+ /* BL mode HID Descriptor */
+ return true;
+ } else if ((buf[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_APP_HID_REPORT_ID) &&
+ max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
+ /* APP mode HID Descriptor */
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool cyapa_gen5_sort_deep_sleep_data(struct cyapa *cyapa,
+ u8 *buf, int len)
+{
+ if (len == GEN5_DEEP_SLEEP_RESP_LENGTH &&
+ buf[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_APP_DEEP_SLEEP_REPORT_ID &&
+ (buf[4] & GEN5_DEEP_SLEEP_OPCODE_MASK) ==
+ GEN5_DEEP_SLEEP_OPCODE)
+ return true;
+ return false;
+}
+
+static int gen5_idle_state_parse(struct cyapa *cyapa)
+{
+ u8 resp_data[GEN5_HID_DESCRIPTOR_SIZE];
+ int max_output_len;
+ int length;
+ u8 cmd[2];
+ int ret;
+ int error;
+
+ /*
+ * Dump all buffered data firstly for the situation
+ * when the trackpad is just power on the cyapa go here.
+ */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ memset(resp_data, 0, sizeof(resp_data));
+ ret = cyapa_i2c_pip_read(cyapa, resp_data, 3);
+ if (ret != 3)
+ return ret < 0 ? ret : -EIO;
+
+ length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+ if (length == GEN5_RESP_LENGTH_SIZE) {
+ /* Normal state of Gen5 with no data to respose */
+ cyapa->gen = CYAPA_GEN5;
+
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ /* Read description from trackpad device */
+ cmd[0] = 0x01;
+ cmd[1] = 0x00;
+ length = GEN5_HID_DESCRIPTOR_SIZE;
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, GEN5_RESP_LENGTH_SIZE,
+ resp_data, &length,
+ 300,
+ cyapa_gen5_sort_hid_descriptor_data,
+ false);
+ if (error)
+ return error;
+
+ length = get_unaligned_le16(
+ &resp_data[GEN5_RESP_LENGTH_OFFSET]);
+ max_output_len = get_unaligned_le16(&resp_data[16]);
+ if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
+ length == GEN5_RESP_LENGTH_SIZE) &&
+ (resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_BL_HID_REPORT_ID) &&
+ max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
+ /* BL mode HID Description read */
+ cyapa->state = CYAPA_STATE_GEN5_BL;
+ } else if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
+ length == GEN5_RESP_LENGTH_SIZE) &&
+ (resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_APP_HID_REPORT_ID) &&
+ max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
+ /* APP mode HID Description read */
+ cyapa->state = CYAPA_STATE_GEN5_APP;
+ } else {
+ /* Should not happen!!! */
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+ }
+ }
+
+ return 0;
+}
+
+static int gen5_hid_description_header_parse(struct cyapa *cyapa, u8 *reg_data)
+{
+ int length;
+ u8 resp_data[32];
+ int max_output_len;
+ int ret;
+
+ /* 0x20 0x00 0xF7 is Gen5 Application HID Description Header;
+ * 0x20 0x00 0xFF is Gen5 Booloader HID Description Header.
+ *
+ * Must read HID Description content through out,
+ * otherwise Gen5 trackpad cannot response next command
+ * or report any touch or button data.
+ */
+ ret = cyapa_i2c_pip_read(cyapa, resp_data,
+ GEN5_HID_DESCRIPTOR_SIZE);
+ if (ret != GEN5_HID_DESCRIPTOR_SIZE)
+ return ret < 0 ? ret : -EIO;
+ length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+ max_output_len = get_unaligned_le16(&resp_data[16]);
+ if (length == GEN5_RESP_LENGTH_SIZE) {
+ if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_BL_HID_REPORT_ID) {
+ /*
+ * BL mode HID Description has been previously
+ * read out.
+ */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_BL;
+ } else {
+ /*
+ * APP mode HID Description has been previously
+ * read out.
+ */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_APP;
+ }
+ } else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
+ resp_data[2] == GEN5_BL_HID_REPORT_ID &&
+ max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
+ /* BL mode HID Description read. */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_BL;
+ } else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
+ (resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_APP_HID_REPORT_ID) &&
+ max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
+ /* APP mode HID Description read. */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_APP;
+ } else {
+ /* Should not happen!!! */
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+ }
+
+ return 0;
+}
+
+static int gen5_report_data_header_parse(struct cyapa *cyapa, u8 *reg_data)
+{
+ int length;
+
+ length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
+ switch (reg_data[GEN5_RESP_REPORT_ID_OFFSET]) {
+ case GEN5_TOUCH_REPORT_ID:
+ if (length < GEN5_TOUCH_REPORT_HEAD_SIZE ||
+ length > GEN5_TOUCH_REPORT_MAX_SIZE)
+ return -EINVAL;
+ break;
+ case GEN5_BTN_REPORT_ID:
+ case GEN5_OLD_PUSH_BTN_REPORT_ID:
+ case GEN5_PUSH_BTN_REPORT_ID:
+ if (length < GEN5_BTN_REPORT_HEAD_SIZE ||
+ length > GEN5_BTN_REPORT_MAX_SIZE)
+ return -EINVAL;
+ break;
+ case GEN5_WAKEUP_EVENT_REPORT_ID:
+ if (length != GEN5_WAKEUP_EVENT_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_APP;
+ return 0;
+}
+
+static int gen5_cmd_resp_header_parse(struct cyapa *cyapa, u8 *reg_data)
+{
+ struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ int length;
+ int ret;
+
+ /*
+ * Must read report data through out,
+ * otherwise Gen5 trackpad cannot response next command
+ * or report any touch or button data.
+ */
+ length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
+ ret = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+ if (ret != length)
+ return ret < 0 ? ret : -EIO;
+
+ if (length == GEN5_RESP_LENGTH_SIZE) {
+ /* Previous command has read the data through out. */
+ if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_BL_RESP_REPORT_ID) {
+ /* Gen5 BL command response data detected */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_BL;
+ } else {
+ /* Gen5 APP command response data detected */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_APP;
+ }
+ } else if ((gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_BL_RESP_REPORT_ID) &&
+ (gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
+ GEN5_RESP_RSVD_KEY) &&
+ (gen5_pip->empty_buf[GEN5_RESP_BL_SOP_OFFSET] ==
+ GEN5_SOP_KEY) &&
+ (gen5_pip->empty_buf[length - 1] ==
+ GEN5_EOP_KEY)) {
+ /* Gen5 BL command response data detected */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_BL;
+ } else if (gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_APP_RESP_REPORT_ID &&
+ gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
+ GEN5_RESP_RSVD_KEY) {
+ /* Gen5 APP command response data detected */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_APP;
+ } else {
+ /* Should not happen!!! */
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+ }
+
+ return 0;
+}
+
+static int cyapa_gen5_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
+{
+ int length;
+
+ if (!reg_data || len < 3)
+ return -EINVAL;
+
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+ /* Parse based on Gen5 characteristic registers and bits */
+ length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
+ if (length == 0 || length == GEN5_RESP_LENGTH_SIZE) {
+ gen5_idle_state_parse(cyapa);
+ } else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
+ (reg_data[2] == GEN5_BL_HID_REPORT_ID ||
+ reg_data[2] == GEN5_APP_HID_REPORT_ID)) {
+ gen5_hid_description_header_parse(cyapa, reg_data);
+ } else if ((length == GEN5_APP_REPORT_DESCRIPTOR_SIZE ||
+ length == GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE) &&
+ reg_data[2] == GEN5_APP_REPORT_DESCRIPTOR_ID) {
+ /* 0xEE 0x00 0xF6 is Gen5 APP report description header. */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_APP;
+ } else if (length == GEN5_BL_REPORT_DESCRIPTOR_SIZE &&
+ reg_data[2] == GEN5_BL_REPORT_DESCRIPTOR_ID) {
+ /* 0x1D 0x00 0xFE is Gen5 BL report descriptior header. */
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = CYAPA_STATE_GEN5_BL;
+ } else if (reg_data[2] == GEN5_TOUCH_REPORT_ID ||
+ reg_data[2] == GEN5_BTN_REPORT_ID ||
+ reg_data[2] == GEN5_OLD_PUSH_BTN_REPORT_ID ||
+ reg_data[2] == GEN5_PUSH_BTN_REPORT_ID ||
+ reg_data[2] == GEN5_WAKEUP_EVENT_REPORT_ID) {
+ gen5_report_data_header_parse(cyapa, reg_data);
+ } else if (reg_data[2] == GEN5_BL_RESP_REPORT_ID ||
+ reg_data[2] == GEN5_APP_RESP_REPORT_ID) {
+ gen5_cmd_resp_header_parse(cyapa, reg_data);
+ }
+
+ if (cyapa->gen == CYAPA_GEN5) {
+ /*
+ * Must read the content (e.g.: report description and so on)
+ * from trackpad device throughout. Otherwise,
+ * Gen5 trackpad cannot response to next command or
+ * report any touch or button data later.
+ */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ if (cyapa->state == CYAPA_STATE_GEN5_APP ||
+ cyapa->state == CYAPA_STATE_GEN5_BL)
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
+static int cyapa_gen5_bl_initiate(struct cyapa *cyapa,
+ const struct firmware *fw)
+{
+ struct cyapa_tsg_bin_image *image;
+ struct gen5_bl_cmd_head *bl_cmd_head;
+ struct gen5_bl_packet_start *bl_packet_start;
+ struct gen5_bl_initiate_cmd_data *cmd_data;
+ struct gen5_bl_packet_end *bl_packet_end;
+ u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
+ int cmd_len;
+ u16 cmd_data_len;
+ u16 cmd_crc = 0;
+ u16 meta_data_crc = 0;
+ u8 resp_data[11];
+ int resp_len;
+ int records_num;
+ u8 *data;
+ int error;
+
+ /* Try to dump all buffered report data before any send command. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
+ bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+ cmd_data_len = CYAPA_TSG_BL_KEY_SIZE + CYAPA_TSG_FLASH_MAP_BLOCK_SIZE;
+ cmd_len = sizeof(struct gen5_bl_cmd_head) + cmd_data_len +
+ sizeof(struct gen5_bl_packet_end);
+
+ put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+ put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
+ bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
+
+ bl_packet_start = &bl_cmd_head->packet_start;
+ bl_packet_start->sop = GEN5_SOP_KEY;
+ bl_packet_start->cmd_code = GEN5_BL_CMD_INITIATE_BL;
+ /* 8 key bytes and 128 bytes block size */
+ put_unaligned_le16(cmd_data_len, &bl_packet_start->data_length);
+
+ cmd_data = (struct gen5_bl_initiate_cmd_data *)bl_cmd_head->data;
+ memcpy(cmd_data->key, cyapa_gen5_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
+
+ /* Copy 60 bytes Meta Data Row Parameters */
+ image = (struct cyapa_tsg_bin_image *)fw->data;
+ records_num = (fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
+ sizeof(struct cyapa_tsg_bin_image_data_record);
+ /* APP_INTEGRITY row is always the last row block */
+ data = image->records[records_num - 1].record_data;
+ memcpy(cmd_data->metadata_raw_parameter, data,
+ CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
+
+ meta_data_crc = crc_itu_t(0xffff, cmd_data->metadata_raw_parameter,
+ CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
+ put_unaligned_le16(meta_data_crc, &cmd_data->metadata_crc);
+
+ bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+ cmd_data_len);
+ cmd_crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
+ sizeof(struct gen5_bl_packet_start) + cmd_data_len);
+ put_unaligned_le16(cmd_crc, &bl_packet_end->crc);
+ bl_packet_end->eop = GEN5_EOP_KEY;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, cmd_len,
+ resp_data, &resp_len, 12000,
+ cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
+ if (error || resp_len != GEN5_BL_INITIATE_RESP_LEN ||
+ resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
+ !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ return error ? error : -EAGAIN;
+
+ return 0;
+}
+
+static bool cyapa_gen5_sort_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
+{
+ if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+ return false;
+
+ if (buf[0] == 0 && buf[1] == 0)
+ return true;
+
+ /* Exit bootloader failed for some reason. */
+ if (len == GEN5_BL_FAIL_EXIT_RESP_LEN &&
+ buf[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_BL_RESP_REPORT_ID &&
+ buf[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
+ buf[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY &&
+ buf[10] == GEN5_EOP_KEY)
+ return true;
+
+ return false;
+}
+
+static int cyapa_gen5_bl_exit(struct cyapa *cyapa)
+{
+
+ u8 bl_gen5_bl_exit[] = { 0x04, 0x00,
+ 0x0B, 0x00, 0x40, 0x00, 0x01, 0x3b, 0x00, 0x00,
+ 0x20, 0xc7, 0x17
+ };
+ u8 resp_data[11];
+ int resp_len;
+ int error;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ bl_gen5_bl_exit, sizeof(bl_gen5_bl_exit),
+ resp_data, &resp_len,
+ 5000, cyapa_gen5_sort_bl_exit_data, false);
+ if (error)
+ return error;
+
+ if (resp_len == GEN5_BL_FAIL_EXIT_RESP_LEN ||
+ resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
+ GEN5_BL_RESP_REPORT_ID)
+ return -EAGAIN;
+
+ if (resp_data[0] == 0x00 && resp_data[1] == 0x00)
+ return 0;
+
+ return -ENODEV;
+}
+
+static int cyapa_gen5_bl_enter(struct cyapa *cyapa)
+{
+ u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2F, 0x00, 0x01 };
+ u8 resp_data[2];
+ int resp_len;
+ int error;
+
+ error = cyapa_poll_state(cyapa, 500);
+ if (error < 0)
+ return error;
+ if (cyapa->gen != CYAPA_GEN5)
+ return -EINVAL;
+
+ /* Already in Gen5 BL. Skipping exit. */
+ if (cyapa->state == CYAPA_STATE_GEN5_BL)
+ return 0;
+
+ if (cyapa->state != CYAPA_STATE_GEN5_APP)
+ return -EAGAIN;
+
+ /* Try to dump all buffered report data before any send command. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ /*
+ * Send bootloader enter command to trackpad device,
+ * after enter bootloader, the response data is two bytes of 0x00 0x00.
+ */
+ resp_len = sizeof(resp_data);
+ memset(resp_data, 0, resp_len);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 5000, cyapa_gen5_sort_application_launch_data,
+ true);
+ if (error || resp_data[0] != 0x00 || resp_data[1] != 0x00)
+ return error < 0 ? error : -EAGAIN;
+
+ cyapa->operational = false;
+ cyapa->state = CYAPA_STATE_GEN5_BL;
+ return 0;
+}
+
+static int cyapa_gen5_check_fw(struct cyapa *cyapa, const struct firmware *fw)
+{
+ struct device *dev = &cyapa->client->dev;
+ const struct cyapa_tsg_bin_image *image = (const void *)fw->data;
+ const struct cyapa_tsg_bin_image_data_record *app_integrity;
+ const struct gen5_bl_metadata_row_params *metadata;
+ size_t flash_records_count;
+ u32 fw_app_start, fw_upgrade_start;
+ u16 fw_app_len, fw_upgrade_len;
+ u16 app_crc;
+ u16 app_integrity_crc;
+ int record_index;
+ int i;
+
+ flash_records_count = (fw->size -
+ sizeof(struct cyapa_tsg_bin_image_head)) /
+ sizeof(struct cyapa_tsg_bin_image_data_record);
+
+ /*
+ * APP_INTEGRITY row is always the last row block,
+ * and the row id must be 0x01ff.
+ */
+ app_integrity = &image->records[flash_records_count - 1];
+
+ if (app_integrity->flash_array_id != 0x00 ||
+ get_unaligned_be16(&app_integrity->row_number) != 0x01ff) {
+ dev_err(dev, "%s: invalid app_integrity data.\n", __func__);
+ return -EINVAL;
+ }
+
+ metadata = (const void *)app_integrity->record_data;
+
+ /* Verify app_integrity crc */
+ app_integrity_crc = crc_itu_t(0xffff, app_integrity->record_data,
+ CYAPA_TSG_APP_INTEGRITY_SIZE);
+ if (app_integrity_crc != get_unaligned_le16(&metadata->metadata_crc)) {
+ dev_err(dev, "%s: invalid app_integrity crc.\n", __func__);
+ return -EINVAL;
+ }
+
+ fw_app_start = get_unaligned_le32(&metadata->app_start);
+ fw_app_len = get_unaligned_le16(&metadata->app_len);
+ fw_upgrade_start = get_unaligned_le32(&metadata->upgrade_start);
+ fw_upgrade_len = get_unaligned_le16(&metadata->upgrade_len);
+
+ if (fw_app_start % CYAPA_TSG_FW_ROW_SIZE ||
+ fw_app_len % CYAPA_TSG_FW_ROW_SIZE ||
+ fw_upgrade_start % CYAPA_TSG_FW_ROW_SIZE ||
+ fw_upgrade_len % CYAPA_TSG_FW_ROW_SIZE) {
+ dev_err(dev, "%s: invalid image alignment.\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Verify application image CRC
+ */
+ record_index = fw_app_start / CYAPA_TSG_FW_ROW_SIZE -
+ CYAPA_TSG_IMG_START_ROW_NUM;
+ app_crc = 0xffffU;
+ for (i = 0; i < fw_app_len / CYAPA_TSG_FW_ROW_SIZE; i++) {
+ const u8 *data = image->records[record_index + i].record_data;
+ app_crc = crc_itu_t(app_crc, data, CYAPA_TSG_FW_ROW_SIZE);
+ }
+
+ if (app_crc != get_unaligned_le16(&metadata->app_crc)) {
+ dev_err(dev, "%s: invalid firmware app crc check.\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cyapa_gen5_write_fw_block(struct cyapa *cyapa,
+ struct cyapa_tsg_bin_image_data_record *flash_record)
+{
+ struct gen5_bl_cmd_head *bl_cmd_head;
+ struct gen5_bl_packet_start *bl_packet_start;
+ struct gen5_bl_flash_row_head *flash_row_head;
+ struct gen5_bl_packet_end *bl_packet_end;
+ u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
+ u16 cmd_len;
+ u8 flash_array_id;
+ u16 flash_row_id;
+ u16 record_len;
+ u8 *record_data;
+ u16 data_len;
+ u16 crc;
+ u8 resp_data[11];
+ int resp_len;
+ int error;
+
+ flash_array_id = flash_record->flash_array_id;
+ flash_row_id = get_unaligned_be16(&flash_record->row_number);
+ record_len = get_unaligned_be16(&flash_record->record_len);
+ record_data = flash_record->record_data;
+
+ memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
+ bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+ bl_packet_start = &bl_cmd_head->packet_start;
+ cmd_len = sizeof(struct gen5_bl_cmd_head) +
+ sizeof(struct gen5_bl_flash_row_head) +
+ CYAPA_TSG_FLASH_MAP_BLOCK_SIZE +
+ sizeof(struct gen5_bl_packet_end);
+
+ put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+ /* Don't include 2 bytes register address */
+ put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
+ bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
+ bl_packet_start->sop = GEN5_SOP_KEY;
+ bl_packet_start->cmd_code = GEN5_BL_CMD_PROGRAM_VERIFY_ROW;
+
+ /* 1 (Flash Array ID) + 2 (Flash Row ID) + 128 (flash data) */
+ data_len = sizeof(struct gen5_bl_flash_row_head) + record_len;
+ put_unaligned_le16(data_len, &bl_packet_start->data_length);
+
+ flash_row_head = (struct gen5_bl_flash_row_head *)bl_cmd_head->data;
+ flash_row_head->flash_array_id = flash_array_id;
+ put_unaligned_le16(flash_row_id, &flash_row_head->flash_row_id);
+ memcpy(flash_row_head->flash_data, record_data, record_len);
+
+ bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+ data_len);
+ crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
+ sizeof(struct gen5_bl_packet_start) + data_len);
+ put_unaligned_le16(crc, &bl_packet_end->crc);
+ bl_packet_end->eop = GEN5_EOP_KEY;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
+ if (error || resp_len != GEN5_BL_BLOCK_WRITE_RESP_LEN ||
+ resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
+ !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ return error < 0 ? error : -EAGAIN;
+
+ return 0;
+}
+
+static int cyapa_gen5_do_fw_update(struct cyapa *cyapa,
+ const struct firmware *fw)
+{
+ struct device *dev = &cyapa->client->dev;
+ struct cyapa_tsg_bin_image_data_record *flash_record;
+ struct cyapa_tsg_bin_image *image =
+ (struct cyapa_tsg_bin_image *)fw->data;
+ int flash_records_count;
+ int i;
+ int error;
+
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ flash_records_count =
+ (fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
+ sizeof(struct cyapa_tsg_bin_image_data_record);
+ /*
+ * The last flash row 0x01ff has been written through bl_initiate
+ * command, so DO NOT write flash 0x01ff to trackpad device.
+ */
+ for (i = 0; i < (flash_records_count - 1); i++) {
+ flash_record = &image->records[i];
+ error = cyapa_gen5_write_fw_block(cyapa, flash_record);
+ if (error) {
+ dev_err(dev, "%s: Gen5 FW update aborted: %d\n",
+ __func__, error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int cyapa_gen5_change_power_state(struct cyapa *cyapa, u8 power_state)
+{
+ u8 cmd[8] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, 0x08, 0x01 };
+ u8 resp_data[6];
+ int resp_len;
+ int error;
+
+ cmd[7] = power_state;
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+ if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x08) ||
+ !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ return error < 0 ? error : -EINVAL;
+
+ return 0;
+}
+
+static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
+ u8 parameter_id, u16 interval_time)
+{
+ struct gen5_app_cmd_head *app_cmd_head;
+ struct gen5_app_set_parameter_data *parameter_data;
+ u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
+ int cmd_len;
+ u8 resp_data[7];
+ int resp_len;
+ u8 parameter_size;
+ int error;
+
+ memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
+ app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+ parameter_data = (struct gen5_app_set_parameter_data *)
+ app_cmd_head->parameter_data;
+ cmd_len = sizeof(struct gen5_app_cmd_head) +
+ sizeof(struct gen5_app_set_parameter_data);
+
+ switch (parameter_id) {
+ case GEN5_PARAMETER_ACT_INTERVL_ID:
+ parameter_size = GEN5_PARAMETER_ACT_INTERVL_SIZE;
+ break;
+ case GEN5_PARAMETER_ACT_LFT_INTERVL_ID:
+ parameter_size = GEN5_PARAMETER_ACT_LFT_INTERVL_SIZE;
+ break;
+ case GEN5_PARAMETER_LP_INTRVL_ID:
+ parameter_size = GEN5_PARAMETER_LP_INTRVL_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+ /*
+ * Don't include unused parameter value bytes and
+ * 2 bytes register address.
+ */
+ put_unaligned_le16(cmd_len - (4 - parameter_size) - 2,
+ &app_cmd_head->length);
+ app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
+ parameter_data->parameter_id = parameter_id;
+ parameter_data->parameter_size = parameter_size;
+ put_unaligned_le32((u32)interval_time, &parameter_data->value);
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+ if (error || resp_data[5] != parameter_id ||
+ resp_data[6] != parameter_size ||
+ !VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER))
+ return error < 0 ? error : -EINVAL;
+
+ return 0;
+}
+
+static int cyapa_gen5_get_interval_time(struct cyapa *cyapa,
+ u8 parameter_id, u16 *interval_time)
+{
+ struct gen5_app_cmd_head *app_cmd_head;
+ struct gen5_app_get_parameter_data *parameter_data;
+ u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
+ int cmd_len;
+ u8 resp_data[11];
+ int resp_len;
+ u8 parameter_size;
+ u16 mask, i;
+ int error;
+
+ memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
+ app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+ parameter_data = (struct gen5_app_get_parameter_data *)
+ app_cmd_head->parameter_data;
+ cmd_len = sizeof(struct gen5_app_cmd_head) +
+ sizeof(struct gen5_app_get_parameter_data);
+
+ *interval_time = 0;
+ switch (parameter_id) {
+ case GEN5_PARAMETER_ACT_INTERVL_ID:
+ parameter_size = GEN5_PARAMETER_ACT_INTERVL_SIZE;
+ break;
+ case GEN5_PARAMETER_ACT_LFT_INTERVL_ID:
+ parameter_size = GEN5_PARAMETER_ACT_LFT_INTERVL_SIZE;
+ break;
+ case GEN5_PARAMETER_LP_INTRVL_ID:
+ parameter_size = GEN5_PARAMETER_LP_INTRVL_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+ /* Don't include 2 bytes register address */
+ put_unaligned_le16(cmd_len - 2, &app_cmd_head->length);
+ app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->cmd_code = GEN5_CMD_GET_PARAMETER;
+ parameter_data->parameter_id = parameter_id;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+ if (error || resp_data[5] != parameter_id || resp_data[6] == 0 ||
+ !VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_GET_PARAMETER))
+ return error < 0 ? error : -EINVAL;
+
+ mask = 0;
+ for (i = 0; i < parameter_size; i++)
+ mask |= (0xff << (i * 8));
+ *interval_time = get_unaligned_le16(&resp_data[7]) & mask;
+
+ return 0;
+}
+
+static int cyapa_gen5_disable_pip_report(struct cyapa *cyapa)
+{
+ struct gen5_app_cmd_head *app_cmd_head;
+ u8 cmd[10];
+ u8 resp_data[7];
+ int resp_len;
+ int error;
+
+ memset(cmd, 0, sizeof(cmd));
+ app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+
+ put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+ put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
+ app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
+ app_cmd_head->parameter_data[0] = GEN5_PARAMETER_DISABLE_PIP_REPORT;
+ app_cmd_head->parameter_data[1] = 0x01;
+ app_cmd_head->parameter_data[2] = 0x01;
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+ if (error || resp_data[5] != GEN5_PARAMETER_DISABLE_PIP_REPORT ||
+ !VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER) ||
+ resp_data[6] != 0x01)
+ return error < 0 ? error : -EINVAL;
+
+ return 0;
+}
+
+static int cyapa_gen5_deep_sleep(struct cyapa *cyapa, u8 state)
+{
+ u8 cmd[] = { 0x05, 0x00, 0x00, 0x08};
+ u8 resp_data[5];
+ int resp_len;
+ int error;
+
+ cmd[2] = state & GEN5_DEEP_SLEEP_STATE_MASK;
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_deep_sleep_data, false);
+ if (error || ((resp_data[3] & GEN5_DEEP_SLEEP_STATE_MASK) != state))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
+ u8 power_mode, u16 sleep_time)
+{
+ struct device *dev = &cyapa->client->dev;
+ u8 power_state;
+ int error;
+
+ if (cyapa->state != CYAPA_STATE_GEN5_APP)
+ return 0;
+
+ /* Dump all the report data before do power mode commmands. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ if (GEN5_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+ /*
+ * Assume TP in deep sleep mode when driver is loaded,
+ * avoid driver unload and reload command IO issue caused by TP
+ * has been set into deep sleep mode when unloading.
+ */
+ GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+ }
+
+ if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+ GEN5_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+ if (cyapa_gen5_get_interval_time(cyapa,
+ GEN5_PARAMETER_LP_INTRVL_ID,
+ &cyapa->dev_sleep_time) != 0)
+ GEN5_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+
+ if (GEN5_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+ if (power_mode == PWR_MODE_OFF ||
+ power_mode == PWR_MODE_FULL_ACTIVE ||
+ power_mode == PWR_MODE_BTN_ONLY ||
+ GEN5_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+ /* Has in correct power mode state, early return. */
+ return 0;
+ }
+ }
+
+ if (power_mode == PWR_MODE_OFF) {
+ error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_OFF);
+ if (error) {
+ dev_err(dev, "enter deep sleep fail: %d\n", error);
+ return error;
+ }
+
+ GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+ return 0;
+ }
+
+ /*
+ * When trackpad in power off mode, it cannot change to other power
+ * state directly, must be wake up from sleep firstly, then
+ * continue to do next power sate change.
+ */
+ if (GEN5_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+ error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_ON);
+ if (error) {
+ dev_err(dev, "deep sleep wake fail: %d\n", error);
+ return error;
+ }
+ }
+
+ if (power_mode == PWR_MODE_FULL_ACTIVE) {
+ error = cyapa_gen5_change_power_state(cyapa,
+ GEN5_POWER_STATE_ACTIVE);
+ if (error) {
+ dev_err(dev, "change to active fail: %d\n", error);
+ return error;
+ }
+
+ GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+ } else if (power_mode == PWR_MODE_BTN_ONLY) {
+ error = cyapa_gen5_change_power_state(cyapa,
+ GEN5_POWER_STATE_BTN_ONLY);
+ if (error) {
+ dev_err(dev, "fail to button only mode: %d\n", error);
+ return error;
+ }
+
+ GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+ } else {
+ /*
+ * Continue to change power mode even failed to set
+ * interval time, it won't affect the power mode change.
+ * except the sleep interval time is not correct.
+ */
+ if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) ||
+ sleep_time != GEN5_DEV_GET_SLEEP_TIME(cyapa))
+ if (cyapa_gen5_set_interval_time(cyapa,
+ GEN5_PARAMETER_LP_INTRVL_ID,
+ sleep_time) == 0)
+ GEN5_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+
+ if (sleep_time <= GEN5_POWER_READY_MAX_INTRVL_TIME)
+ power_state = GEN5_POWER_STATE_READY;
+ else
+ power_state = GEN5_POWER_STATE_IDLE;
+ error = cyapa_gen5_change_power_state(cyapa, power_state);
+ if (error) {
+ dev_err(dev, "set power state to 0x%02x failed: %d\n",
+ power_state, error);
+ return error;
+ }
+
+ /*
+ * Disable pip report for a little time, firmware will
+ * re-enable it automatically. It's used to fix the issue
+ * that trackpad unable to report signal to wake system up
+ * in the special situation that system is in suspending, and
+ * at the same time, user touch trackpad to wake system up.
+ * This function can avoid the data to be buffured when system
+ * is suspending which may cause interrput line unable to be
+ * asserted again.
+ */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+ cyapa_gen5_disable_pip_report(cyapa);
+
+ GEN5_DEV_SET_PWR_STATE(cyapa,
+ cyapa_sleep_time_to_pwr_cmd(sleep_time));
+ }
+
+ return 0;
+}
+
+static int cyapa_gen5_resume_scanning(struct cyapa *cyapa)
+{
+ u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x04 };
+ u8 resp_data[6];
+ int resp_len;
+ int error;
+
+ /* Try to dump all buffered data before doing command. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x04))
+ return -EINVAL;
+
+ /* Try to dump all buffered data when resuming scanning. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ return 0;
+}
+
+static int cyapa_gen5_suspend_scanning(struct cyapa *cyapa)
+{
+ u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x03 };
+ u8 resp_data[6];
+ int resp_len;
+ int error;
+
+ /* Try to dump all buffered data before doing command. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x03))
+ return -EINVAL;
+
+ /* Try to dump all buffered data when suspending scanning. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ return 0;
+}
+
+static int cyapa_gen5_calibrate_pwcs(struct cyapa *cyapa,
+ u8 calibrate_sensing_mode_type)
+{
+ struct gen5_app_cmd_head *app_cmd_head;
+ u8 cmd[8];
+ u8 resp_data[6];
+ int resp_len;
+ int error;
+
+ /* Try to dump all buffered data before doing command. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ memset(cmd, 0, sizeof(cmd));
+ app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+ put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+ put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
+ app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->cmd_code = GEN5_CMD_CALIBRATE;
+ app_cmd_head->parameter_data[0] = calibrate_sensing_mode_type;
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 5000, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ if (error || !VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_CALIBRATE) ||
+ !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ return error < 0 ? error : -EAGAIN;
+
+ return 0;
+}
+
+static ssize_t cyapa_gen5_do_calibrate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int error, calibrate_error;
+
+ /* 1. Suspend Scanning*/
+ error = cyapa_gen5_suspend_scanning(cyapa);
+ if (error)
+ return error;
+
+ /* 2. Do mutual capacitance fine calibrate. */
+ calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
+ CYAPA_SENSING_MODE_MUTUAL_CAP_FINE);
+ if (calibrate_error)
+ goto resume_scanning;
+
+ /* 3. Do self capacitance calibrate. */
+ calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
+ CYAPA_SENSING_MODE_SELF_CAP);
+ if (calibrate_error)
+ goto resume_scanning;
+
+resume_scanning:
+ /* 4. Resume Scanning*/
+ error = cyapa_gen5_resume_scanning(cyapa);
+ if (error || calibrate_error)
+ return error ? error : calibrate_error;
+
+ return count;
+}
+
+static s32 twos_complement_to_s32(s32 value, int num_bits)
+{
+ if (value >> (num_bits - 1))
+ value |= -1 << num_bits;
+ return value;
+}
+
+static s32 cyapa_parse_structure_data(u8 data_format, u8 *buf, int buf_len)
+{
+ int data_size;
+ bool big_endian;
+ bool unsigned_type;
+ s32 value;
+
+ data_size = (data_format & 0x07);
+ big_endian = ((data_format & 0x10) == 0x00);
+ unsigned_type = ((data_format & 0x20) == 0x00);
+
+ if (buf_len < data_size)
+ return 0;
+
+ switch (data_size) {
+ case 1:
+ value = buf[0];
+ break;
+ case 2:
+ if (big_endian)
+ value = get_unaligned_be16(buf);
+ else
+ value = get_unaligned_le16(buf);
+ break;
+ case 4:
+ if (big_endian)
+ value = get_unaligned_be32(buf);
+ else
+ value = get_unaligned_le32(buf);
+ break;
+ default:
+ /* Should not happen, just as default case here. */
+ value = 0;
+ break;
+ }
+
+ if (!unsigned_type)
+ value = twos_complement_to_s32(value, data_size * 8);
+
+ return value;
+}
+
+static void cyapa_gen5_guess_electrodes(struct cyapa *cyapa,
+ int *electrodes_rx, int *electrodes_tx)
+{
+ if (cyapa->electrodes_rx != 0) {
+ *electrodes_rx = cyapa->electrodes_rx;
+ *electrodes_tx = (cyapa->electrodes_x == *electrodes_rx) ?
+ cyapa->electrodes_y : cyapa->electrodes_x;
+ } else {
+ *electrodes_tx = min(cyapa->electrodes_x, cyapa->electrodes_y);
+ *electrodes_rx = max(cyapa->electrodes_x, cyapa->electrodes_y);
+ }
+}
+
+/*
+ * Read all the global mutual or self idac data or mutual or self local PWC
+ * data based on the @idac_data_type.
+ * If the input value of @data_size is 0, then means read global mutual or
+ * self idac data. For read global mutual idac data, @idac_max, @idac_min and
+ * @idac_ave are in order used to return the max value of global mutual idac
+ * data, the min value of global mutual idac and the average value of the
+ * global mutual idac data. For read global self idac data, @idac_max is used
+ * to return the global self cap idac data in Rx direction, @idac_min is used
+ * to return the global self cap idac data in Tx direction. @idac_ave is not
+ * used.
+ * If the input value of @data_size is not 0, than means read the mutual or
+ * self local PWC data. The @idac_max, @idac_min and @idac_ave are used to
+ * return the max, min and average value of the mutual or self local PWC data.
+ * Note, in order to raed mutual local PWC data, must read invoke this function
+ * to read the mutual global idac data firstly to set the correct Rx number
+ * value, otherwise, the read mutual idac and PWC data may not correct.
+ */
+static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
+ u8 cmd_code, u8 idac_data_type, int *data_size,
+ int *idac_max, int *idac_min, int *idac_ave)
+{
+ struct gen5_app_cmd_head *cmd_head;
+ u8 cmd[12];
+ u8 resp_data[256];
+ int resp_len;
+ int read_len;
+ int value;
+ u16 offset;
+ int read_elements;
+ bool read_global_idac;
+ int sum, count, max_element_cnt;
+ int tmp_max, tmp_min, tmp_ave, tmp_sum, tmp_count;
+ int electrodes_rx, electrodes_tx;
+ int i;
+ int error;
+
+ if (cmd_code != GEN5_CMD_RETRIEVE_DATA_STRUCTURE ||
+ (idac_data_type != GEN5_RETRIEVE_MUTUAL_PWC_DATA &&
+ idac_data_type != GEN5_RETRIEVE_SELF_CAP_PWC_DATA) ||
+ !data_size || !idac_max || !idac_min || !idac_ave)
+ return -EINVAL;
+
+ *idac_max = INT_MIN;
+ *idac_min = INT_MAX;
+ sum = count = tmp_count = 0;
+ electrodes_rx = electrodes_tx = 0;
+ if (*data_size == 0) {
+ /*
+ * Read global idac values firstly.
+ * Currently, no idac data exceed 4 bytes.
+ */
+ read_global_idac = true;
+ offset = 0;
+ *data_size = 4;
+ tmp_max = INT_MIN;
+ tmp_min = INT_MAX;
+ tmp_ave = tmp_sum = tmp_count = 0;
+
+ if (idac_data_type == GEN5_RETRIEVE_MUTUAL_PWC_DATA) {
+ if (cyapa->aligned_electrodes_rx == 0) {
+ cyapa_gen5_guess_electrodes(cyapa,
+ &electrodes_rx, &electrodes_tx);
+ cyapa->aligned_electrodes_rx =
+ (electrodes_rx + 3) & ~3u;
+ }
+ max_element_cnt =
+ (cyapa->aligned_electrodes_rx + 7) & ~7u;
+ } else {
+ max_element_cnt = 2;
+ }
+ } else {
+ read_global_idac = false;
+ if (*data_size > 4)
+ *data_size = 4;
+ /* Calculate the start offset in bytes of local PWC data. */
+ if (idac_data_type == GEN5_RETRIEVE_MUTUAL_PWC_DATA) {
+ offset = cyapa->aligned_electrodes_rx * (*data_size);
+ if (cyapa->electrodes_rx == cyapa->electrodes_x)
+ electrodes_tx = cyapa->electrodes_y;
+ else
+ electrodes_tx = cyapa->electrodes_x;
+ max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) &
+ ~7u) * electrodes_tx;
+ } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) {
+ offset = 2;
+ max_element_cnt = cyapa->electrodes_x +
+ cyapa->electrodes_y;
+ max_element_cnt = (max_element_cnt + 3) & ~3u;
+ }
+ }
+
+ memset(cmd, 0, sizeof(cmd));
+ cmd_head = (struct gen5_app_cmd_head *)cmd;
+ put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &cmd_head->addr);
+ put_unaligned_le16(sizeof(cmd) - 2, &cmd_head->length);
+ cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ cmd_head->cmd_code = cmd_code;
+ do {
+ read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) /
+ (*data_size);
+ read_elements = min(read_elements, max_element_cnt - count);
+ read_len = read_elements * (*data_size);
+
+ put_unaligned_le16(offset, &cmd_head->parameter_data[0]);
+ put_unaligned_le16(read_len, &cmd_head->parameter_data[2]);
+ cmd_head->parameter_data[4] = idac_data_type;
+ resp_len = GEN5_RESP_DATA_STRUCTURE_OFFSET + read_len;
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data,
+ true);
+ if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
+ !VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
+ !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+ resp_data[6] != idac_data_type)
+ return (error < 0) ? error : -EAGAIN;
+ read_len = get_unaligned_le16(&resp_data[7]);
+ if (read_len == 0)
+ break;
+
+ *data_size = (resp_data[9] & GEN5_PWC_DATA_ELEMENT_SIZE_MASK);
+ if (read_len < *data_size)
+ return -EINVAL;
+
+ if (read_global_idac &&
+ idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) {
+ /* Rx's self global idac data. */
+ *idac_max = cyapa_parse_structure_data(
+ resp_data[9],
+ &resp_data[GEN5_RESP_DATA_STRUCTURE_OFFSET],
+ *data_size);
+ /* Tx's self global idac data. */
+ *idac_min = cyapa_parse_structure_data(
+ resp_data[9],
+ &resp_data[GEN5_RESP_DATA_STRUCTURE_OFFSET +
+ *data_size],
+ *data_size);
+ break;
+ }
+
+ /* Read mutual global idac or local mutual/self PWC data. */
+ offset += read_len;
+ for (i = 10; i < (read_len + GEN5_RESP_DATA_STRUCTURE_OFFSET);
+ i += *data_size) {
+ value = cyapa_parse_structure_data(resp_data[9],
+ &resp_data[i], *data_size);
+ *idac_min = min(value, *idac_min);
+ *idac_max = max(value, *idac_max);
+
+ if (idac_data_type == GEN5_RETRIEVE_MUTUAL_PWC_DATA &&
+ tmp_count < cyapa->aligned_electrodes_rx &&
+ read_global_idac) {
+ /*
+ * The value gap betwen global and local mutual
+ * idac data must bigger than 50%.
+ * Normally, global value bigger than 50,
+ * local values less than 10.
+ */
+ if (!tmp_ave || value > tmp_ave / 2) {
+ tmp_min = min(value, tmp_min);
+ tmp_max = max(value, tmp_max);
+ tmp_sum += value;
+ tmp_count++;
+
+ tmp_ave = tmp_sum / tmp_count;
+ }
+ }
+
+ sum += value;
+ count++;
+
+ if (count >= max_element_cnt)
+ goto out;
+ }
+ } while (true);
+
+out:
+ *idac_ave = count ? (sum / count) : 0;
+
+ if (read_global_idac &&
+ idac_data_type == GEN5_RETRIEVE_MUTUAL_PWC_DATA) {
+ if (tmp_count == 0)
+ return 0;
+
+ if (tmp_count == cyapa->aligned_electrodes_rx) {
+ cyapa->electrodes_rx = cyapa->electrodes_rx ?
+ cyapa->electrodes_rx : electrodes_rx;
+ } else if (tmp_count == electrodes_rx) {
+ cyapa->electrodes_rx = cyapa->electrodes_rx ?
+ cyapa->electrodes_rx : electrodes_rx;
+ cyapa->aligned_electrodes_rx = electrodes_rx;
+ } else {
+ cyapa->electrodes_rx = cyapa->electrodes_rx ?
+ cyapa->electrodes_rx : electrodes_tx;
+ cyapa->aligned_electrodes_rx = tmp_count;
+ }
+
+ *idac_min = tmp_min;
+ *idac_max = tmp_max;
+ *idac_ave = tmp_ave;
+ }
+
+ return 0;
+}
+
+static int cyapa_gen5_read_mutual_idac_data(struct cyapa *cyapa,
+ int *gidac_mutual_max, int *gidac_mutual_min, int *gidac_mutual_ave,
+ int *lidac_mutual_max, int *lidac_mutual_min, int *lidac_mutual_ave)
+{
+ int data_size;
+ int error;
+
+ *gidac_mutual_max = *gidac_mutual_min = *gidac_mutual_ave = 0;
+ *lidac_mutual_max = *lidac_mutual_min = *lidac_mutual_ave = 0;
+
+ data_size = 0;
+ error = cyapa_gen5_read_idac_data(cyapa,
+ GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+ GEN5_RETRIEVE_MUTUAL_PWC_DATA,
+ &data_size,
+ gidac_mutual_max, gidac_mutual_min, gidac_mutual_ave);
+ if (error)
+ return error;
+
+ error = cyapa_gen5_read_idac_data(cyapa,
+ GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+ GEN5_RETRIEVE_MUTUAL_PWC_DATA,
+ &data_size,
+ lidac_mutual_max, lidac_mutual_min, lidac_mutual_ave);
+ return error;
+}
+
+static int cyapa_gen5_read_self_idac_data(struct cyapa *cyapa,
+ int *gidac_self_rx, int *gidac_self_tx,
+ int *lidac_self_max, int *lidac_self_min, int *lidac_self_ave)
+{
+ int data_size;
+ int error;
+
+ *gidac_self_rx = *gidac_self_tx = 0;
+ *lidac_self_max = *lidac_self_min = *lidac_self_ave = 0;
+
+ data_size = 0;
+ error = cyapa_gen5_read_idac_data(cyapa,
+ GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+ GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
+ &data_size,
+ lidac_self_max, lidac_self_min, lidac_self_ave);
+ if (error)
+ return error;
+ *gidac_self_rx = *lidac_self_max;
+ *gidac_self_tx = *lidac_self_min;
+
+ error = cyapa_gen5_read_idac_data(cyapa,
+ GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+ GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
+ &data_size,
+ lidac_self_max, lidac_self_min, lidac_self_ave);
+ return error;
+}
+
+static ssize_t cyapa_gen5_execute_panel_scan(struct cyapa *cyapa)
+{
+ struct gen5_app_cmd_head *app_cmd_head;
+ u8 cmd[7];
+ u8 resp_data[6];
+ int resp_len;
+ int error;
+
+ memset(cmd, 0, sizeof(cmd));
+ app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+ put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+ put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
+ app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->cmd_code = GEN5_CMD_EXECUTE_PANEL_SCAN;
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ if (error || resp_len != sizeof(resp_data) ||
+ !VALID_CMD_RESP_HEADER(resp_data,
+ GEN5_CMD_EXECUTE_PANEL_SCAN) ||
+ !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ return error ? error : -EAGAIN;
+
+ return 0;
+}
+
+static int cyapa_gen5_read_panel_scan_raw_data(struct cyapa *cyapa,
+ u8 cmd_code, u8 raw_data_type, int raw_data_max_num,
+ int *raw_data_max, int *raw_data_min, int *raw_data_ave,
+ u8 *buffer)
+{
+ struct gen5_app_cmd_head *app_cmd_head;
+ struct gen5_retrieve_panel_scan_data *panel_sacn_data;
+ u8 cmd[12];
+ u8 resp_data[256]; /* Max bytes can transfer one time. */
+ int resp_len;
+ int read_elements;
+ int read_len;
+ u16 offset;
+ s32 value;
+ int sum, count;
+ int data_size;
+ s32 *intp;
+ int i;
+ int error;
+
+ if (cmd_code != GEN5_CMD_RETRIEVE_PANEL_SCAN ||
+ (raw_data_type > GEN5_PANEL_SCAN_SELF_DIFFCOUNT) ||
+ !raw_data_max || !raw_data_min || !raw_data_ave)
+ return -EINVAL;
+
+ intp = (s32 *)buffer;
+ *raw_data_max = INT_MIN;
+ *raw_data_min = INT_MAX;
+ sum = count = 0;
+ offset = 0;
+ /* Assume max element size is 4 currently. */
+ read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) / 4;
+ read_len = read_elements * 4;
+ app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+ put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+ put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
+ app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->cmd_code = cmd_code;
+ panel_sacn_data = (struct gen5_retrieve_panel_scan_data *)
+ app_cmd_head->parameter_data;
+ do {
+ put_unaligned_le16(offset, &panel_sacn_data->read_offset);
+ put_unaligned_le16(read_elements,
+ &panel_sacn_data->read_elements);
+ panel_sacn_data->data_id = raw_data_type;
+
+ resp_len = GEN5_RESP_DATA_STRUCTURE_OFFSET + read_len;
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
+ !VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
+ !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+ resp_data[6] != raw_data_type)
+ return error ? error : -EAGAIN;
+
+ read_elements = get_unaligned_le16(&resp_data[7]);
+ if (read_elements == 0)
+ break;
+
+ data_size = (resp_data[9] & GEN5_PWC_DATA_ELEMENT_SIZE_MASK);
+ offset += read_elements;
+ if (read_elements) {
+ for (i = GEN5_RESP_DATA_STRUCTURE_OFFSET;
+ i < (read_elements * data_size +
+ GEN5_RESP_DATA_STRUCTURE_OFFSET);
+ i += data_size) {
+ value = cyapa_parse_structure_data(resp_data[9],
+ &resp_data[i], data_size);
+ *raw_data_min = min(value, *raw_data_min);
+ *raw_data_max = max(value, *raw_data_max);
+
+ if (intp)
+ put_unaligned_le32(value, &intp[count]);
+
+ sum += value;
+ count++;
+
+ }
+ }
+
+ if (count >= raw_data_max_num)
+ break;
+
+ read_elements = (sizeof(resp_data) -
+ GEN5_RESP_DATA_STRUCTURE_OFFSET) / data_size;
+ read_len = read_elements * data_size;
+ } while (true);
+
+ *raw_data_ave = count ? (sum / count) : 0;
+
+ return 0;
+}
+
+static ssize_t cyapa_gen5_show_baseline(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ int gidac_mutual_max, gidac_mutual_min, gidac_mutual_ave;
+ int lidac_mutual_max, lidac_mutual_min, lidac_mutual_ave;
+ int gidac_self_rx, gidac_self_tx;
+ int lidac_self_max, lidac_self_min, lidac_self_ave;
+ int raw_cap_mutual_max, raw_cap_mutual_min, raw_cap_mutual_ave;
+ int raw_cap_self_max, raw_cap_self_min, raw_cap_self_ave;
+ int mutual_diffdata_max, mutual_diffdata_min, mutual_diffdata_ave;
+ int self_diffdata_max, self_diffdata_min, self_diffdata_ave;
+ int mutual_baseline_max, mutual_baseline_min, mutual_baseline_ave;
+ int self_baseline_max, self_baseline_min, self_baseline_ave;
+ int error, resume_error;
+ int size;
+
+ if (cyapa->state != CYAPA_STATE_GEN5_APP)
+ return -EBUSY;
+
+ /* 1. Suspend Scanning*/
+ error = cyapa_gen5_suspend_scanning(cyapa);
+ if (error)
+ return error;
+
+ /* 2. Read global and local mutual IDAC data. */
+ gidac_self_rx = gidac_self_tx = 0;
+ error = cyapa_gen5_read_mutual_idac_data(cyapa,
+ &gidac_mutual_max, &gidac_mutual_min,
+ &gidac_mutual_ave, &lidac_mutual_max,
+ &lidac_mutual_min, &lidac_mutual_ave);
+ if (error)
+ goto resume_scanning;
+
+ /* 3. Read global and local self IDAC data. */
+ error = cyapa_gen5_read_self_idac_data(cyapa,
+ &gidac_self_rx, &gidac_self_tx,
+ &lidac_self_max, &lidac_self_min,
+ &lidac_self_ave);
+ if (error)
+ goto resume_scanning;
+
+ /* 4. Execuate panel scan. It must be executed before read data. */
+ error = cyapa_gen5_execute_panel_scan(cyapa);
+ if (error)
+ goto resume_scanning;
+
+ /* 5. Retrieve panel scan, mutual cap raw data. */
+ error = cyapa_gen5_read_panel_scan_raw_data(cyapa,
+ GEN5_CMD_RETRIEVE_PANEL_SCAN,
+ GEN5_PANEL_SCAN_MUTUAL_RAW_DATA,
+ cyapa->electrodes_x * cyapa->electrodes_y,
+ &raw_cap_mutual_max, &raw_cap_mutual_min,
+ &raw_cap_mutual_ave,
+ NULL);
+ if (error)
+ goto resume_scanning;
+
+ /* 6. Retrieve panel scan, self cap raw data. */
+ error = cyapa_gen5_read_panel_scan_raw_data(cyapa,
+ GEN5_CMD_RETRIEVE_PANEL_SCAN,
+ GEN5_PANEL_SCAN_SELF_RAW_DATA,
+ cyapa->electrodes_x + cyapa->electrodes_y,
+ &raw_cap_self_max, &raw_cap_self_min,
+ &raw_cap_self_ave,
+ NULL);
+ if (error)
+ goto resume_scanning;
+
+ /* 7. Retrieve panel scan, mutual cap diffcount raw data. */
+ error = cyapa_gen5_read_panel_scan_raw_data(cyapa,
+ GEN5_CMD_RETRIEVE_PANEL_SCAN,
+ GEN5_PANEL_SCAN_MUTUAL_DIFFCOUNT,
+ cyapa->electrodes_x * cyapa->electrodes_y,
+ &mutual_diffdata_max, &mutual_diffdata_min,
+ &mutual_diffdata_ave,
+ NULL);
+ if (error)
+ goto resume_scanning;
+
+ /* 8. Retrieve panel scan, self cap diffcount raw data. */
+ error = cyapa_gen5_read_panel_scan_raw_data(cyapa,
+ GEN5_CMD_RETRIEVE_PANEL_SCAN,
+ GEN5_PANEL_SCAN_SELF_DIFFCOUNT,
+ cyapa->electrodes_x + cyapa->electrodes_y,
+ &self_diffdata_max, &self_diffdata_min,
+ &self_diffdata_ave,
+ NULL);
+ if (error)
+ goto resume_scanning;
+
+ /* 9. Retrieve panel scan, mutual cap baseline raw data. */
+ error = cyapa_gen5_read_panel_scan_raw_data(cyapa,
+ GEN5_CMD_RETRIEVE_PANEL_SCAN,
+ GEN5_PANEL_SCAN_MUTUAL_BASELINE,
+ cyapa->electrodes_x * cyapa->electrodes_y,
+ &mutual_baseline_max, &mutual_baseline_min,
+ &mutual_baseline_ave,
+ NULL);
+ if (error)
+ goto resume_scanning;
+
+ /* 10. Retrieve panel scan, self cap baseline raw data. */
+ error = cyapa_gen5_read_panel_scan_raw_data(cyapa,
+ GEN5_CMD_RETRIEVE_PANEL_SCAN,
+ GEN5_PANEL_SCAN_SELF_BASELINE,
+ cyapa->electrodes_x + cyapa->electrodes_y,
+ &self_baseline_max, &self_baseline_min,
+ &self_baseline_ave,
+ NULL);
+ if (error)
+ goto resume_scanning;
+
+resume_scanning:
+ /* 11. Resume Scanning*/
+ resume_error = cyapa_gen5_resume_scanning(cyapa);
+ if (resume_error || error)
+ return resume_error ? resume_error : error;
+
+ /* 12. Output data strings */
+ size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d %d %d %d %d %d ",
+ gidac_mutual_min, gidac_mutual_max, gidac_mutual_ave,
+ lidac_mutual_min, lidac_mutual_max, lidac_mutual_ave,
+ gidac_self_rx, gidac_self_tx,
+ lidac_self_min, lidac_self_max, lidac_self_ave);
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
+ raw_cap_mutual_min, raw_cap_mutual_max, raw_cap_mutual_ave,
+ raw_cap_self_min, raw_cap_self_max, raw_cap_self_ave,
+ mutual_diffdata_min, mutual_diffdata_max, mutual_diffdata_ave,
+ self_diffdata_min, self_diffdata_max, self_diffdata_ave,
+ mutual_baseline_min, mutual_baseline_max, mutual_baseline_ave,
+ self_baseline_min, self_baseline_max, self_baseline_ave);
+ return size;
+}
+
+static bool cyapa_gen5_sort_system_info_data(struct cyapa *cyapa,
+ u8 *buf, int len)
+{
+ /* Check the report id and command code */
+ if (VALID_CMD_RESP_HEADER(buf, 0x02))
+ return true;
+
+ return false;
+}
+
+static int cyapa_gen5_bl_query_data(struct cyapa *cyapa)
+{
+ u8 bl_query_data_cmd[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
+ 0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
+ };
+ u8 resp_data[GEN5_BL_READ_APP_INFO_RESP_LEN];
+ int resp_len;
+ int error;
+
+ resp_len = GEN5_BL_READ_APP_INFO_RESP_LEN;
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ bl_query_data_cmd, sizeof(bl_query_data_cmd),
+ resp_data, &resp_len,
+ 500, cyapa_gen5_sort_tsg_pip_bl_resp_data, false);
+ if (error || resp_len != GEN5_BL_READ_APP_INFO_RESP_LEN ||
+ !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ return error ? error : -EIO;
+
+ memcpy(&cyapa->product_id[0], &resp_data[8], 5);
+ cyapa->product_id[5] = '-';
+ memcpy(&cyapa->product_id[6], &resp_data[13], 6);
+ cyapa->product_id[12] = '-';
+ memcpy(&cyapa->product_id[13], &resp_data[19], 2);
+ cyapa->product_id[15] = '\0';
+
+ cyapa->fw_maj_ver = resp_data[22];
+ cyapa->fw_min_ver = resp_data[23];
+
+ return 0;
+}
+
+static int cyapa_gen5_get_query_data(struct cyapa *cyapa)
+{
+ u8 get_system_information[] = {
+ 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02
+ };
+ u8 resp_data[71];
+ int resp_len;
+ u16 product_family;
+ int error;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ get_system_information, sizeof(get_system_information),
+ resp_data, &resp_len,
+ 2000, cyapa_gen5_sort_system_info_data, false);
+ if (error || resp_len < sizeof(resp_data))
+ return error ? error : -EIO;
+
+ product_family = get_unaligned_le16(&resp_data[7]);
+ if ((product_family & GEN5_PRODUCT_FAMILY_MASK) !=
+ GEN5_PRODUCT_FAMILY_TRACKPAD)
+ return -EINVAL;
+
+ cyapa->fw_maj_ver = resp_data[15];
+ cyapa->fw_min_ver = resp_data[16];
+
+ cyapa->electrodes_x = resp_data[52];
+ cyapa->electrodes_y = resp_data[53];
+
+ cyapa->physical_size_x = get_unaligned_le16(&resp_data[54]) / 100;
+ cyapa->physical_size_y = get_unaligned_le16(&resp_data[56]) / 100;
+
+ cyapa->max_abs_x = get_unaligned_le16(&resp_data[58]);
+ cyapa->max_abs_y = get_unaligned_le16(&resp_data[60]);
+
+ cyapa->max_z = get_unaligned_le16(&resp_data[62]);
+
+ cyapa->x_origin = resp_data[64] & 0x01;
+ cyapa->y_origin = resp_data[65] & 0x01;
+
+ cyapa->btn_capability = (resp_data[70] << 3) & CAPABILITY_BTN_MASK;
+
+ memcpy(&cyapa->product_id[0], &resp_data[33], 5);
+ cyapa->product_id[5] = '-';
+ memcpy(&cyapa->product_id[6], &resp_data[38], 6);
+ cyapa->product_id[12] = '-';
+ memcpy(&cyapa->product_id[13], &resp_data[44], 2);
+ cyapa->product_id[15] = '\0';
+
+ if (!cyapa->electrodes_x || !cyapa->electrodes_y ||
+ !cyapa->physical_size_x || !cyapa->physical_size_y ||
+ !cyapa->max_abs_x || !cyapa->max_abs_y || !cyapa->max_z)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ int error;
+
+ if (cyapa->gen != CYAPA_GEN5)
+ return -ENODEV;
+
+ switch (cyapa->state) {
+ case CYAPA_STATE_GEN5_BL:
+ error = cyapa_gen5_bl_exit(cyapa);
+ if (error) {
+ /* Rry to update trackpad product information. */
+ cyapa_gen5_bl_query_data(cyapa);
+ goto out;
+ }
+
+ cyapa->state = CYAPA_STATE_GEN5_APP;
+
+ case CYAPA_STATE_GEN5_APP:
+ /*
+ * If trackpad device in deep sleep mode,
+ * the app command will fail.
+ * So always try to reset trackpad device to full active when
+ * the device state is requeried.
+ */
+ error = cyapa_gen5_set_power_mode(cyapa,
+ PWR_MODE_FULL_ACTIVE, 0);
+ if (error)
+ dev_warn(dev, "%s: failed to set power active mode.\n",
+ __func__);
+
+ /* Get trackpad product information. */
+ error = cyapa_gen5_get_query_data(cyapa);
+ if (error)
+ goto out;
+ /* Only support product ID starting with CYTRA */
+ if (memcmp(cyapa->product_id, product_id,
+ strlen(product_id)) != 0) {
+ dev_err(dev, "%s: unknown product ID (%s)\n",
+ __func__, cyapa->product_id);
+ error = -EINVAL;
+ }
+ break;
+ default:
+ error = -EINVAL;
+ }
+
+out:
+ return error;
+}
+
+/*
+ * Return false, do not continue process
+ * Return true, continue process.
+ */
+static bool cyapa_gen5_irq_cmd_handler(struct cyapa *cyapa)
+{
+ struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ int length;
+
+ if (atomic_read(&gen5_pip->cmd_issued)) {
+ /* Polling command response data. */
+ if (gen5_pip->is_irq_mode == false)
+ return false;
+
+ /*
+ * Read out all none command response data.
+ * these output data may caused by user put finger on
+ * trackpad when host waiting the command response.
+ */
+ cyapa_i2c_pip_read(cyapa, gen5_pip->irq_cmd_buf,
+ GEN5_RESP_LENGTH_SIZE);
+ length = get_unaligned_le16(gen5_pip->irq_cmd_buf);
+ length = (length <= GEN5_RESP_LENGTH_SIZE) ?
+ GEN5_RESP_LENGTH_SIZE : length;
+ if (length > GEN5_RESP_LENGTH_SIZE)
+ cyapa_i2c_pip_read(cyapa,
+ gen5_pip->irq_cmd_buf, length);
+
+ if (!(gen5_pip->resp_sort_func &&
+ gen5_pip->resp_sort_func(cyapa,
+ gen5_pip->irq_cmd_buf, length))) {
+ /*
+ * Cover the Gen5 V1 firmware issue.
+ * The issue is there is no interrut will be
+ * asserted to notityf host to read a command
+ * data out when always has finger touch on
+ * trackpad during the command is issued to
+ * trackad device.
+ * This issue has the scenario is that,
+ * user always has his fingers touched on
+ * trackpad device when booting/rebooting
+ * their chrome book.
+ */
+ length = 0;
+ if (gen5_pip->resp_len)
+ length = *gen5_pip->resp_len;
+ cyapa_empty_pip_output_data(cyapa,
+ gen5_pip->resp_data,
+ &length,
+ gen5_pip->resp_sort_func);
+ if (gen5_pip->resp_len && length != 0) {
+ *gen5_pip->resp_len = length;
+ atomic_dec(&gen5_pip->cmd_issued);
+ complete(&gen5_pip->cmd_ready);
+ }
+ return false;
+ }
+
+ if (gen5_pip->resp_data && gen5_pip->resp_len) {
+ *gen5_pip->resp_len = (*gen5_pip->resp_len < length) ?
+ *gen5_pip->resp_len : length;
+ memcpy(gen5_pip->resp_data, gen5_pip->irq_cmd_buf,
+ *gen5_pip->resp_len);
+ }
+ atomic_dec(&gen5_pip->cmd_issued);
+ complete(&gen5_pip->cmd_ready);
+ return false;
+ }
+
+ return true;
+}
+
+static void cyapa_gen5_report_buttons(struct cyapa *cyapa,
+ const struct cyapa_gen5_report_data *report_data)
+{
+ struct input_dev *input = cyapa->input;
+ u8 buttons = report_data->report_head[GEN5_BUTTONS_OFFSET];
+
+ buttons = (buttons << CAPABILITY_BTN_SHIFT) & CAPABILITY_BTN_MASK;
+
+ if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK) {
+ input_report_key(input, BTN_LEFT,
+ !!(buttons & CAPABILITY_LEFT_BTN_MASK));
+ }
+ if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK) {
+ input_report_key(input, BTN_MIDDLE,
+ !!(buttons & CAPABILITY_MIDDLE_BTN_MASK));
+ }
+ if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK) {
+ input_report_key(input, BTN_RIGHT,
+ !!(buttons & CAPABILITY_RIGHT_BTN_MASK));
+ }
+
+ input_sync(input);
+}
+
+static void cyapa_gen5_report_slot_data(struct cyapa *cyapa,
+ const struct cyapa_gen5_touch_record *touch)
+{
+ struct input_dev *input = cyapa->input;
+ u8 event_id = GEN5_GET_EVENT_ID(touch->touch_tip_event_id);
+ int slot = GEN5_GET_TOUCH_ID(touch->touch_tip_event_id);
+ int x, y;
+
+ if (event_id == RECORD_EVENT_LIFTOFF)
+ return;
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ x = (touch->x_hi << 8) | touch->x_lo;
+ if (cyapa->x_origin)
+ x = cyapa->max_abs_x - x;
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ y = (touch->y_hi << 8) | touch->y_lo;
+ if (cyapa->y_origin)
+ y = cyapa->max_abs_y - y;
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_PRESSURE,
+ touch->z);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ touch->major_axis_len);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ touch->minor_axis_len);
+
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+ touch->major_tool_len);
+ input_report_abs(input, ABS_MT_WIDTH_MINOR,
+ touch->minor_tool_len);
+
+ input_report_abs(input, ABS_MT_ORIENTATION,
+ touch->orientation);
+}
+
+static void cyapa_gen5_report_touches(struct cyapa *cyapa,
+ const struct cyapa_gen5_report_data *report_data)
+{
+ struct input_dev *input = cyapa->input;
+ unsigned int touch_num;
+ int i;
+
+ touch_num = report_data->report_head[GEN5_NUMBER_OF_TOUCH_OFFSET] &
+ GEN5_NUMBER_OF_TOUCH_MASK;
+
+ for (i = 0; i < touch_num; i++)
+ cyapa_gen5_report_slot_data(cyapa,
+ &report_data->touch_records[i]);
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static int cyapa_gen5_irq_handler(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ struct cyapa_gen5_report_data report_data;
+ int ret;
+ u8 report_id;
+ unsigned int report_len;
+
+ if (cyapa->gen != CYAPA_GEN5 ||
+ cyapa->state != CYAPA_STATE_GEN5_APP) {
+ dev_err(dev, "invalid device state, gen=%d, state=0x%02x\n",
+ cyapa->gen, cyapa->state);
+ return -EINVAL;
+ }
+
+ ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data,
+ GEN5_RESP_LENGTH_SIZE);
+ if (ret != GEN5_RESP_LENGTH_SIZE) {
+ dev_err(dev, "failed to read length bytes, (%d)\n", ret);
+ return -EINVAL;
+ }
+
+ report_len = get_unaligned_le16(
+ &report_data.report_head[GEN5_RESP_LENGTH_OFFSET]);
+ if (report_len < GEN5_RESP_LENGTH_SIZE) {
+ /* Invliad length or internal reset happened. */
+ dev_err(dev, "invalid report_len=%d. bytes: %02x %02x\n",
+ report_len, report_data.report_head[0],
+ report_data.report_head[1]);
+ return -EINVAL;
+ }
+
+ /* Idle, no data for report. */
+ if (report_len == GEN5_RESP_LENGTH_SIZE)
+ return 0;
+
+ ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data, report_len);
+ if (ret != report_len) {
+ dev_err(dev, "failed to read %d bytes report data, (%d)\n",
+ report_len, ret);
+ return -EINVAL;
+ }
+
+ report_id = report_data.report_head[GEN5_RESP_REPORT_ID_OFFSET];
+ if (report_id == GEN5_WAKEUP_EVENT_REPORT_ID &&
+ report_len == GEN5_WAKEUP_EVENT_SIZE) {
+ /*
+ * Device wake event from deep sleep mode for touch.
+ * This interrupt event is used to wake system up.
+ */
+ return 0;
+ } else if (report_id != GEN5_TOUCH_REPORT_ID &&
+ report_id != GEN5_BTN_REPORT_ID &&
+ report_id != GEN5_OLD_PUSH_BTN_REPORT_ID &&
+ report_id != GEN5_PUSH_BTN_REPORT_ID) {
+ /* Running in BL mode or unknown response data read. */
+ dev_err(dev, "invalid report_id=0x%02x\n", report_id);
+ return -EINVAL;
+ }
+
+ if (report_id == GEN5_TOUCH_REPORT_ID &&
+ (report_len < GEN5_TOUCH_REPORT_HEAD_SIZE ||
+ report_len > GEN5_TOUCH_REPORT_MAX_SIZE)) {
+ /* Invalid report data length for finger packet. */
+ dev_err(dev, "invalid touch packet length=%d\n", report_len);
+ return 0;
+ }
+
+ if ((report_id == GEN5_BTN_REPORT_ID ||
+ report_id == GEN5_OLD_PUSH_BTN_REPORT_ID ||
+ report_id == GEN5_PUSH_BTN_REPORT_ID) &&
+ (report_len < GEN5_BTN_REPORT_HEAD_SIZE ||
+ report_len > GEN5_BTN_REPORT_MAX_SIZE)) {
+ /* Invalid report data length of button packet. */
+ dev_err(dev, "invalid button packet length=%d\n", report_len);
+ return 0;
+ }
+
+ if (report_id == GEN5_TOUCH_REPORT_ID)
+ cyapa_gen5_report_touches(cyapa, &report_data);
+ else
+ cyapa_gen5_report_buttons(cyapa, &report_data);
+
+ return 0;
+}
+
+static int cyapa_gen5_bl_activate(struct cyapa *cyapa) { return 0; }
+static int cyapa_gen5_bl_deactivate(struct cyapa *cyapa) { return 0; }
+
+const struct cyapa_dev_ops cyapa_gen5_ops = {
+ .check_fw = cyapa_gen5_check_fw,
+ .bl_enter = cyapa_gen5_bl_enter,
+ .bl_initiate = cyapa_gen5_bl_initiate,
+ .update_fw = cyapa_gen5_do_fw_update,
+ .bl_activate = cyapa_gen5_bl_activate,
+ .bl_deactivate = cyapa_gen5_bl_deactivate,
+
+ .show_baseline = cyapa_gen5_show_baseline,
+ .calibrate_store = cyapa_gen5_do_calibrate,
+
+ .initialize = cyapa_gen5_initialize,
+
+ .state_parse = cyapa_gen5_state_parse,
+ .operational_check = cyapa_gen5_do_operational_check,
+
+ .irq_handler = cyapa_gen5_irq_handler,
+ .irq_cmd_handler = cyapa_gen5_irq_cmd_handler,
+ .sort_empty_output_data = cyapa_empty_pip_output_data,
+ .set_power_mode = cyapa_gen5_set_power_mode,
+};
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 8af34ffe208b..9118a1861a45 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -538,7 +538,7 @@ static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt)
pos[i].y = contact->y;
}
- input_mt_assign_slots(input, slots, pos, n);
+ input_mt_assign_slots(input, slots, pos, n, 0);
for (i = 0; i < n; i++) {
contact = &report_data.contacts[i];
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 2e838626205f..e100c1b31597 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -4,7 +4,6 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.5
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -33,8 +32,9 @@
#define ETP_FW_IAP_PAGE_ERR (1 << 5)
#define ETP_FW_IAP_INTF_ERR (1 << 4)
#define ETP_FW_PAGE_SIZE 64
-#define ETP_FW_PAGE_COUNT 768
-#define ETP_FW_SIZE (ETP_FW_PAGE_SIZE * ETP_FW_PAGE_COUNT)
+#define ETP_FW_VAILDPAGE_COUNT 768
+#define ETP_FW_SIGNATURE_SIZE 6
+#define ETP_FW_SIGNATURE_ADDRESS 0xBFFA
struct i2c_client;
struct completion;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 0cb2be48d537..7ce8bfe22d7e 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,7 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.5
+ * Version: 1.5.6
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,7 +40,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.5.5"
+#define ELAN_DRIVER_VERSION "1.5.6"
#define ETP_PRESSURE_OFFSET 25
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
@@ -312,7 +312,7 @@ static int __elan_update_firmware(struct elan_tp_data *data,
iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
- for (i = boot_page_count; i < ETP_FW_PAGE_COUNT; i++) {
+ for (i = boot_page_count; i < ETP_FW_VAILDPAGE_COUNT; i++) {
u16 checksum = 0;
const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
@@ -434,10 +434,11 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct elan_tp_data *data = i2c_get_clientdata(client);
+ struct elan_tp_data *data = dev_get_drvdata(dev);
const struct firmware *fw;
int error;
+ const u8 *fw_signature;
+ static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
error = request_firmware(&fw, ETP_FW_NAME, dev);
if (error) {
@@ -446,10 +447,12 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
return error;
}
- /* Firmware must be exactly PAGE_NUM * PAGE_SIZE bytes */
- if (fw->size != ETP_FW_SIZE) {
- dev_err(dev, "invalid firmware size = %zu, expected %d.\n",
- fw->size, ETP_FW_SIZE);
+ /* Firmware file must match signature data */
+ fw_signature = &fw->data[ETP_FW_SIGNATURE_ADDRESS];
+ if (memcmp(fw_signature, signature, sizeof(signature)) != 0) {
+ dev_err(dev, "signature mismatch (expected %*ph, got %*ph)\n",
+ (int)sizeof(signature), signature,
+ (int)sizeof(signature), fw_signature);
error = -EBADF;
goto out_release_fw;
}
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 97d4937fc244..029941f861af 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -4,7 +4,6 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.5
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index 359bf8583d54..06a2bcd1cda2 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -4,7 +4,6 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.5
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -71,7 +70,7 @@ static int elan_smbus_initialize(struct i2c_client *client)
/* compare hello packet */
if (memcmp(values, check, ETP_SMBUS_HELLOPACKET_LEN)) {
- dev_err(&client->dev, "hello packet fail [%*px]\n",
+ dev_err(&client->dev, "hello packet fail [%*ph]\n",
ETP_SMBUS_HELLOPACKET_LEN, values);
return -ENXIO;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index f2b978026407..6e22682c8255 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
+ },
+ },
#endif
{ }
};
@@ -1520,6 +1536,8 @@ static int elantech_set_properties(struct elantech_data *etd)
case 7:
case 8:
case 9:
+ case 10:
+ case 13:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index f4d657ee1cc0..fca38ba63bbe 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -2,6 +2,7 @@
* Focaltech TouchPad PS/2 mouse driver
*
* Copyright (c) 2014 Red Hat Inc.
+ * Copyright (c) 2014 Mathias Gottschlag <mgottschlag@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,15 +14,14 @@
* Hans de Goede <hdegoede@redhat.com>
*/
-/*
- * The Focaltech PS/2 touchpad protocol is unknown. This drivers deals with
- * detection only, to avoid further detection attempts confusing the touchpad
- * this way it at least works in PS/2 mouse compatibility mode.
- */
#include <linux/device.h>
#include <linux/libps2.h>
+#include <linux/input/mt.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
#include "psmouse.h"
+#include "focaltech.h"
static const char * const focaltech_pnp_ids[] = {
"FLT0101",
@@ -30,6 +30,12 @@ static const char * const focaltech_pnp_ids[] = {
NULL
};
+/*
+ * Even if the kernel is built without support for Focaltech PS/2 touchpads (or
+ * when the real driver fails to recognize the device), we still have to detect
+ * them in order to avoid further detection attempts confusing the touchpad.
+ * This way it at least works in PS/2 mouse compatibility mode.
+ */
int focaltech_detect(struct psmouse *psmouse, bool set_properties)
{
if (!psmouse_matches_pnp_id(psmouse, focaltech_pnp_ids))
@@ -37,16 +43,404 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties)
if (set_properties) {
psmouse->vendor = "FocalTech";
- psmouse->name = "FocalTech Touchpad in mouse emulation mode";
+ psmouse->name = "FocalTech Touchpad";
}
return 0;
}
-int focaltech_init(struct psmouse *psmouse)
+static void focaltech_reset(struct psmouse *psmouse)
{
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
psmouse_reset(psmouse);
+}
+
+#ifdef CONFIG_MOUSE_PS2_FOCALTECH
+
+/*
+ * Packet types - the numbers are not consecutive, so we might be missing
+ * something here.
+ */
+#define FOC_TOUCH 0x3 /* bitmap of active fingers */
+#define FOC_ABS 0x6 /* absolute position of one finger */
+#define FOC_REL 0x9 /* relative position of 1-2 fingers */
+
+#define FOC_MAX_FINGERS 5
+
+#define FOC_MAX_X 2431
+#define FOC_MAX_Y 1663
+
+/*
+ * Current state of a single finger on the touchpad.
+ */
+struct focaltech_finger_state {
+ /* The touchpad has generated a touch event for the finger */
+ bool active;
+
+ /*
+ * The touchpad has sent position data for the finger. The
+ * flag is 0 when the finger is not active, and there is a
+ * time between the first touch event for the finger and the
+ * following absolute position packet for the finger where the
+ * touchpad has declared the finger to be valid, but we do not
+ * have any valid position yet.
+ */
+ bool valid;
+
+ /*
+ * Absolute position (from the bottom left corner) of the
+ * finger.
+ */
+ unsigned int x;
+ unsigned int y;
+};
+
+/*
+ * Description of the current state of the touchpad hardware.
+ */
+struct focaltech_hw_state {
+ /*
+ * The touchpad tracks the positions of the fingers for us,
+ * the array indices correspond to the finger indices returned
+ * in the report packages.
+ */
+ struct focaltech_finger_state fingers[FOC_MAX_FINGERS];
+
+ /* True if the clickpad has been pressed. */
+ bool pressed;
+};
+
+struct focaltech_data {
+ unsigned int x_max, y_max;
+ struct focaltech_hw_state state;
+};
+
+static void focaltech_report_state(struct psmouse *psmouse)
+{
+ struct focaltech_data *priv = psmouse->private;
+ struct focaltech_hw_state *state = &priv->state;
+ struct input_dev *dev = psmouse->dev;
+ int i;
+
+ for (i = 0; i < FOC_MAX_FINGERS; i++) {
+ struct focaltech_finger_state *finger = &state->fingers[i];
+ bool active = finger->active && finger->valid;
+
+ input_mt_slot(dev, i);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
+ if (active) {
+ input_report_abs(dev, ABS_MT_POSITION_X, finger->x);
+ input_report_abs(dev, ABS_MT_POSITION_Y,
+ FOC_MAX_Y - finger->y);
+ }
+ }
+ input_mt_report_pointer_emulation(dev, true);
+
+ input_report_key(psmouse->dev, BTN_LEFT, state->pressed);
+ input_sync(psmouse->dev);
+}
+
+static void focaltech_process_touch_packet(struct psmouse *psmouse,
+ unsigned char *packet)
+{
+ struct focaltech_data *priv = psmouse->private;
+ struct focaltech_hw_state *state = &priv->state;
+ unsigned char fingers = packet[1];
+ int i;
+
+ state->pressed = (packet[0] >> 4) & 1;
+
+ /* the second byte contains a bitmap of all fingers touching the pad */
+ for (i = 0; i < FOC_MAX_FINGERS; i++) {
+ state->fingers[i].active = fingers & 0x1;
+ if (!state->fingers[i].active) {
+ /*
+ * Even when the finger becomes active again, we still
+ * will have to wait for the first valid position.
+ */
+ state->fingers[i].valid = false;
+ }
+ fingers >>= 1;
+ }
+}
+
+static void focaltech_process_abs_packet(struct psmouse *psmouse,
+ unsigned char *packet)
+{
+ struct focaltech_data *priv = psmouse->private;
+ struct focaltech_hw_state *state = &priv->state;
+ unsigned int finger;
+
+ finger = (packet[1] >> 4) - 1;
+ if (finger >= FOC_MAX_FINGERS) {
+ psmouse_err(psmouse, "Invalid finger in abs packet: %d\n",
+ finger);
+ return;
+ }
+
+ state->pressed = (packet[0] >> 4) & 1;
+
+ /*
+ * packet[5] contains some kind of tool size in the most
+ * significant nibble. 0xff is a special value (latching) that
+ * signals a large contact area.
+ */
+ if (packet[5] == 0xff) {
+ state->fingers[finger].valid = false;
+ return;
+ }
+
+ state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2];
+ state->fingers[finger].y = (packet[3] << 8) | packet[4];
+ state->fingers[finger].valid = true;
+}
+
+static void focaltech_process_rel_packet(struct psmouse *psmouse,
+ unsigned char *packet)
+{
+ struct focaltech_data *priv = psmouse->private;
+ struct focaltech_hw_state *state = &priv->state;
+ int finger1, finger2;
+
+ state->pressed = packet[0] >> 7;
+ finger1 = ((packet[0] >> 4) & 0x7) - 1;
+ if (finger1 < FOC_MAX_FINGERS) {
+ state->fingers[finger1].x += (char)packet[1];
+ state->fingers[finger1].y += (char)packet[2];
+ } else {
+ psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
+ finger1);
+ }
+
+ /*
+ * If there is an odd number of fingers, the last relative
+ * packet only contains one finger. In this case, the second
+ * finger index in the packet is 0 (we subtract 1 in the lines
+ * above to create array indices, so the finger will overflow
+ * and be above FOC_MAX_FINGERS).
+ */
+ finger2 = ((packet[3] >> 4) & 0x7) - 1;
+ if (finger2 < FOC_MAX_FINGERS) {
+ state->fingers[finger2].x += (char)packet[4];
+ state->fingers[finger2].y += (char)packet[5];
+ }
+}
+
+static void focaltech_process_packet(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+
+ switch (packet[0] & 0xf) {
+ case FOC_TOUCH:
+ focaltech_process_touch_packet(psmouse, packet);
+ break;
+
+ case FOC_ABS:
+ focaltech_process_abs_packet(psmouse, packet);
+ break;
+
+ case FOC_REL:
+ focaltech_process_rel_packet(psmouse, packet);
+ break;
+
+ default:
+ psmouse_err(psmouse, "Unknown packet type: %02x\n", packet[0]);
+ break;
+ }
+
+ focaltech_report_state(psmouse);
+}
+
+static psmouse_ret_t focaltech_process_byte(struct psmouse *psmouse)
+{
+ if (psmouse->pktcnt >= 6) { /* Full packet received */
+ focaltech_process_packet(psmouse);
+ return PSMOUSE_FULL_PACKET;
+ }
+
+ /*
+ * We might want to do some validation of the data here, but
+ * we do not know the protocol well enough
+ */
+ return PSMOUSE_GOOD_DATA;
+}
+
+static int focaltech_switch_protocol(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[3];
+
+ param[0] = 0;
+ if (ps2_command(ps2dev, param, 0x10f8))
+ return -EIO;
+
+ if (ps2_command(ps2dev, param, 0x10f8))
+ return -EIO;
+
+ if (ps2_command(ps2dev, param, 0x10f8))
+ return -EIO;
+
+ param[0] = 1;
+ if (ps2_command(ps2dev, param, 0x10f8))
+ return -EIO;
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETSCALE11))
+ return -EIO;
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_ENABLE))
+ return -EIO;
+
+ return 0;
+}
+
+static void focaltech_disconnect(struct psmouse *psmouse)
+{
+ focaltech_reset(psmouse);
+ kfree(psmouse->private);
+ psmouse->private = NULL;
+}
+
+static int focaltech_reconnect(struct psmouse *psmouse)
+{
+ int error;
+
+ focaltech_reset(psmouse);
+
+ error = focaltech_switch_protocol(psmouse);
+ if (error) {
+ psmouse_err(psmouse, "Unable to initialize the device\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static void focaltech_set_input_params(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct focaltech_data *priv = psmouse->private;
+
+ /*
+ * Undo part of setup done for us by psmouse core since touchpad
+ * is not a relative device.
+ */
+ __clear_bit(EV_REL, dev->evbit);
+ __clear_bit(REL_X, dev->relbit);
+ __clear_bit(REL_Y, dev->relbit);
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ __clear_bit(BTN_MIDDLE, dev->keybit);
+
+ /*
+ * Now set up our capabilities.
+ */
+ __set_bit(EV_ABS, dev->evbit);
+ input_set_abs_params(dev, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0);
+ input_mt_init_slots(dev, 5, INPUT_MT_POINTER);
+ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+}
+
+static int focaltech_read_register(struct ps2dev *ps2dev, int reg,
+ unsigned char *param)
+{
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETSCALE11))
+ return -EIO;
+
+ param[0] = 0;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -EIO;
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -EIO;
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -EIO;
+
+ param[0] = reg;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -EIO;
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -EIO;
+
+ return 0;
+}
+
+static int focaltech_read_size(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ struct focaltech_data *priv = psmouse->private;
+ char param[3];
+
+ if (focaltech_read_register(ps2dev, 2, param))
+ return -EIO;
+
+ /* not sure whether this is 100% correct */
+ priv->x_max = (unsigned char)param[1] * 128;
+ priv->y_max = (unsigned char)param[2] * 128;
+
+ return 0;
+}
+int focaltech_init(struct psmouse *psmouse)
+{
+ struct focaltech_data *priv;
+ int error;
+
+ psmouse->private = priv = kzalloc(sizeof(struct focaltech_data),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ focaltech_reset(psmouse);
+
+ error = focaltech_read_size(psmouse);
+ if (error) {
+ psmouse_err(psmouse,
+ "Unable to read the size of the touchpad\n");
+ goto fail;
+ }
+
+ error = focaltech_switch_protocol(psmouse);
+ if (error) {
+ psmouse_err(psmouse, "Unable to initialize the device\n");
+ goto fail;
+ }
+
+ focaltech_set_input_params(psmouse);
+
+ psmouse->protocol_handler = focaltech_process_byte;
+ psmouse->pktsize = 6;
+ psmouse->disconnect = focaltech_disconnect;
+ psmouse->reconnect = focaltech_reconnect;
+ psmouse->cleanup = focaltech_reset;
+ /* resync is not supported yet */
+ psmouse->resync_time = 0;
return 0;
+
+fail:
+ focaltech_reset(psmouse);
+ kfree(priv);
+ return error;
}
+
+bool focaltech_supported(void)
+{
+ return true;
+}
+
+#else /* CONFIG_MOUSE_PS2_FOCALTECH */
+
+int focaltech_init(struct psmouse *psmouse)
+{
+ focaltech_reset(psmouse);
+
+ return 0;
+}
+
+bool focaltech_supported(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_MOUSE_PS2_FOCALTECH */
diff --git a/drivers/input/mouse/focaltech.h b/drivers/input/mouse/focaltech.h
index 498650c61e28..71870a9b548a 100644
--- a/drivers/input/mouse/focaltech.h
+++ b/drivers/input/mouse/focaltech.h
@@ -2,6 +2,7 @@
* Focaltech TouchPad PS/2 mouse driver
*
* Copyright (c) 2014 Red Hat Inc.
+ * Copyright (c) 2014 Mathias Gottschlag <mgottschlag@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,5 +19,6 @@
int focaltech_detect(struct psmouse *psmouse, bool set_properties);
int focaltech_init(struct psmouse *psmouse);
+bool focaltech_supported(void);
#endif
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 95a3a6e2faf6..68469feda470 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -725,16 +725,19 @@ static int psmouse_extensions(struct psmouse *psmouse,
/* Always check for focaltech, this is safe as it uses pnp-id matching */
if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) {
- if (!set_properties || focaltech_init(psmouse) == 0) {
- /*
- * Not supported yet, use bare protocol.
- * Note that we need to also restrict
- * psmouse_max_proto so that psmouse_initialize()
- * does not try to reset rate and resolution,
- * because even that upsets the device.
- */
- psmouse_max_proto = PSMOUSE_PS2;
- return PSMOUSE_PS2;
+ if (max_proto > PSMOUSE_IMEX) {
+ if (!set_properties || focaltech_init(psmouse) == 0) {
+ if (focaltech_supported())
+ return PSMOUSE_FOCALTECH;
+ /*
+ * Note that we need to also restrict
+ * psmouse_max_proto so that psmouse_initialize()
+ * does not try to reset rate and resolution,
+ * because even that upsets the device.
+ */
+ psmouse_max_proto = PSMOUSE_PS2;
+ return PSMOUSE_PS2;
+ }
}
}
@@ -1063,6 +1066,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.alias = "cortps",
.detect = cortron_detect,
},
+#ifdef CONFIG_MOUSE_PS2_FOCALTECH
+ {
+ .type = PSMOUSE_FOCALTECH,
+ .name = "FocalTechPS/2",
+ .alias = "focaltech",
+ .detect = focaltech_detect,
+ .init = focaltech_init,
+ },
+#endif
{
.type = PSMOUSE_AUTO,
.name = "auto",
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index f4cf664c7db3..c2ff137ecbdb 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -96,6 +96,7 @@ enum psmouse_type {
PSMOUSE_FSP,
PSMOUSE_SYNAPTICS_RELATIVE,
PSMOUSE_CYPRESS,
+ PSMOUSE_FOCALTECH,
PSMOUSE_AUTO /* This one should always be last */
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f9472920d986..7e705ee90b86 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -67,6 +67,9 @@
#define X_MAX_POSITIVE 8176
#define Y_MAX_POSITIVE 8176
+/* maximum ABS_MT_POSITION displacement (in mm) */
+#define DMAX 10
+
/*****************************************************************************
* Stuff we need even when we do not want native Synaptics support
****************************************************************************/
@@ -135,8 +138,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN0039",
- "LEN2002", "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN0037",
+ "LEN0039", "LEN2002", "LEN2004",
+ NULL},
1024, 5112, 2024, 4832
},
{
@@ -165,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
"LEN0035", /* X240 */
"LEN0036", /* T440 */
- "LEN0037",
+ "LEN0037", /* X1 Carbon 2nd */
"LEN0038",
"LEN0039", /* T440s */
"LEN0041",
@@ -574,14 +578,6 @@ static void synaptics_pt_create(struct psmouse *psmouse)
* Functions to interpret the absolute mode packets
****************************************************************************/
-static void synaptics_mt_state_set(struct synaptics_mt_state *state, int count,
- int sgm, int agm)
-{
- state->count = count;
- state->sgm = sgm;
- state->agm = agm;
-}
-
static void synaptics_parse_agm(const unsigned char buf[],
struct synaptics_data *priv,
struct synaptics_hw_state *hw)
@@ -600,16 +596,13 @@ static void synaptics_parse_agm(const unsigned char buf[],
break;
case 2:
- /* AGM-CONTACT packet: (count, sgm, agm) */
- synaptics_mt_state_set(&agm->mt_state, buf[1], buf[2], buf[4]);
+ /* AGM-CONTACT packet: we are only interested in the count */
+ priv->agm_count = buf[1];
break;
default:
break;
}
-
- /* Record that at least one AGM has been received since last SGM */
- priv->agm_pending = true;
}
static bool is_forcepad;
@@ -803,424 +796,68 @@ static void synaptics_report_buttons(struct psmouse *psmouse,
input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
}
-static void synaptics_report_slot(struct input_dev *dev, int slot,
- const struct synaptics_hw_state *hw)
-{
- input_mt_slot(dev, slot);
- input_mt_report_slot_state(dev, MT_TOOL_FINGER, (hw != NULL));
- if (!hw)
- return;
-
- input_report_abs(dev, ABS_MT_POSITION_X, hw->x);
- input_report_abs(dev, ABS_MT_POSITION_Y, synaptics_invert_y(hw->y));
- input_report_abs(dev, ABS_MT_PRESSURE, hw->z);
-}
-
static void synaptics_report_mt_data(struct psmouse *psmouse,
- struct synaptics_mt_state *mt_state,
- const struct synaptics_hw_state *sgm)
+ const struct synaptics_hw_state *sgm,
+ int num_fingers)
{
struct input_dev *dev = psmouse->dev;
struct synaptics_data *priv = psmouse->private;
- struct synaptics_hw_state *agm = &priv->agm;
- struct synaptics_mt_state *old = &priv->mt_state;
+ const struct synaptics_hw_state *hw[2] = { sgm, &priv->agm };
+ struct input_mt_pos pos[2];
+ int slot[2], nsemi, i;
- switch (mt_state->count) {
- case 0:
- synaptics_report_slot(dev, 0, NULL);
- synaptics_report_slot(dev, 1, NULL);
- break;
- case 1:
- if (mt_state->sgm == -1) {
- synaptics_report_slot(dev, 0, NULL);
- synaptics_report_slot(dev, 1, NULL);
- } else if (mt_state->sgm == 0) {
- synaptics_report_slot(dev, 0, sgm);
- synaptics_report_slot(dev, 1, NULL);
- } else {
- synaptics_report_slot(dev, 0, NULL);
- synaptics_report_slot(dev, 1, sgm);
- }
- break;
- default:
- /*
- * If the finger slot contained in SGM is valid, and either
- * hasn't changed, or is new, or the old SGM has now moved to
- * AGM, then report SGM in MTB slot 0.
- * Otherwise, empty MTB slot 0.
- */
- if (mt_state->sgm != -1 &&
- (mt_state->sgm == old->sgm ||
- old->sgm == -1 || mt_state->agm == old->sgm))
- synaptics_report_slot(dev, 0, sgm);
- else
- synaptics_report_slot(dev, 0, NULL);
+ nsemi = clamp_val(num_fingers, 0, 2);
- /*
- * If the finger slot contained in AGM is valid, and either
- * hasn't changed, or is new, then report AGM in MTB slot 1.
- * Otherwise, empty MTB slot 1.
- *
- * However, in the case where the AGM is new, make sure that
- * that it is either the same as the old SGM, or there was no
- * SGM.
- *
- * Otherwise, if the SGM was just 1, and the new AGM is 2, then
- * the new AGM will keep the old SGM's tracking ID, which can
- * cause apparent drumroll. This happens if in the following
- * valid finger sequence:
- *
- * Action SGM AGM (MTB slot:Contact)
- * 1. Touch contact 0 (0:0)
- * 2. Touch contact 1 (0:0, 1:1)
- * 3. Lift contact 0 (1:1)
- * 4. Touch contacts 2,3 (0:2, 1:3)
- *
- * In step 4, contact 3, in AGM must not be given the same
- * tracking ID as contact 1 had in step 3. To avoid this,
- * the first agm with contact 3 is dropped and slot 1 is
- * invalidated (tracking ID = -1).
- */
- if (mt_state->agm != -1 &&
- (mt_state->agm == old->agm ||
- (old->agm == -1 &&
- (old->sgm == -1 || mt_state->agm == old->sgm))))
- synaptics_report_slot(dev, 1, agm);
- else
- synaptics_report_slot(dev, 1, NULL);
- break;
+ for (i = 0; i < nsemi; i++) {
+ pos[i].x = hw[i]->x;
+ pos[i].y = synaptics_invert_y(hw[i]->y);
}
+ input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res);
+
+ for (i = 0; i < nsemi; i++) {
+ input_mt_slot(dev, slot[i]);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, true);
+ input_report_abs(dev, ABS_MT_POSITION_X, pos[i].x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, pos[i].y);
+ input_report_abs(dev, ABS_MT_PRESSURE, hw[i]->z);
+ }
+
+ input_mt_drop_unused(dev);
+
/* Don't use active slot count to generate BTN_TOOL events. */
input_mt_report_pointer_emulation(dev, false);
/* Send the number of fingers reported by touchpad itself. */
- input_mt_report_finger_count(dev, mt_state->count);
+ input_mt_report_finger_count(dev, num_fingers);
synaptics_report_buttons(psmouse, sgm);
input_sync(dev);
}
-/* Handle case where mt_state->count = 0 */
-static void synaptics_image_sensor_0f(struct synaptics_data *priv,
- struct synaptics_mt_state *mt_state)
-{
- synaptics_mt_state_set(mt_state, 0, -1, -1);
- priv->mt_state_lost = false;
-}
-
-/* Handle case where mt_state->count = 1 */
-static void synaptics_image_sensor_1f(struct synaptics_data *priv,
- struct synaptics_mt_state *mt_state)
-{
- struct synaptics_hw_state *agm = &priv->agm;
- struct synaptics_mt_state *old = &priv->mt_state;
-
- /*
- * If the last AGM was (0,0,0), and there is only one finger left,
- * then we absolutely know that SGM contains slot 0, and all other
- * fingers have been removed.
- */
- if (priv->agm_pending && agm->z == 0) {
- synaptics_mt_state_set(mt_state, 1, 0, -1);
- priv->mt_state_lost = false;
- return;
- }
-
- switch (old->count) {
- case 0:
- synaptics_mt_state_set(mt_state, 1, 0, -1);
- break;
- case 1:
- /*
- * If mt_state_lost, then the previous transition was 3->1,
- * and SGM now contains either slot 0 or 1, but we don't know
- * which. So, we just assume that the SGM now contains slot 1.
- *
- * If pending AGM and either:
- * (a) the previous SGM slot contains slot 0, or
- * (b) there was no SGM slot
- * then, the SGM now contains slot 1
- *
- * Case (a) happens with very rapid "drum roll" gestures, where
- * slot 0 finger is lifted and a new slot 1 finger touches
- * within one reporting interval.
- *
- * Case (b) happens if initially two or more fingers tap
- * briefly, and all but one lift before the end of the first
- * reporting interval.
- *
- * (In both these cases, slot 0 will becomes empty, so SGM
- * contains slot 1 with the new finger)
- *
- * Else, if there was no previous SGM, it now contains slot 0.
- *
- * Otherwise, SGM still contains the same slot.
- */
- if (priv->mt_state_lost ||
- (priv->agm_pending && old->sgm <= 0))
- synaptics_mt_state_set(mt_state, 1, 1, -1);
- else if (old->sgm == -1)
- synaptics_mt_state_set(mt_state, 1, 0, -1);
- break;
- case 2:
- /*
- * If mt_state_lost, we don't know which finger SGM contains.
- *
- * So, report 1 finger, but with both slots empty.
- * We will use slot 1 on subsequent 1->1
- */
- if (priv->mt_state_lost) {
- synaptics_mt_state_set(mt_state, 1, -1, -1);
- break;
- }
- /*
- * Since the last AGM was NOT (0,0,0), it was the finger in
- * slot 0 that has been removed.
- * So, SGM now contains previous AGM's slot, and AGM is now
- * empty.
- */
- synaptics_mt_state_set(mt_state, 1, old->agm, -1);
- break;
- case 3:
- /*
- * Since last AGM was not (0,0,0), we don't know which finger
- * is left.
- *
- * So, report 1 finger, but with both slots empty.
- * We will use slot 1 on subsequent 1->1
- */
- synaptics_mt_state_set(mt_state, 1, -1, -1);
- priv->mt_state_lost = true;
- break;
- case 4:
- case 5:
- /* mt_state was updated by AGM-CONTACT packet */
- break;
- }
-}
-
-/* Handle case where mt_state->count = 2 */
-static void synaptics_image_sensor_2f(struct synaptics_data *priv,
- struct synaptics_mt_state *mt_state)
-{
- struct synaptics_mt_state *old = &priv->mt_state;
-
- switch (old->count) {
- case 0:
- synaptics_mt_state_set(mt_state, 2, 0, 1);
- break;
- case 1:
- /*
- * If previous SGM contained slot 1 or higher, SGM now contains
- * slot 0 (the newly touching finger) and AGM contains SGM's
- * previous slot.
- *
- * Otherwise, SGM still contains slot 0 and AGM now contains
- * slot 1.
- */
- if (old->sgm >= 1)
- synaptics_mt_state_set(mt_state, 2, 0, old->sgm);
- else
- synaptics_mt_state_set(mt_state, 2, 0, 1);
- break;
- case 2:
- /*
- * If mt_state_lost, SGM now contains either finger 1 or 2, but
- * we don't know which.
- * So, we just assume that the SGM contains slot 0 and AGM 1.
- */
- if (priv->mt_state_lost)
- synaptics_mt_state_set(mt_state, 2, 0, 1);
- /*
- * Otherwise, use the same mt_state, since it either hasn't
- * changed, or was updated by a recently received AGM-CONTACT
- * packet.
- */
- break;
- case 3:
- /*
- * 3->2 transitions have two unsolvable problems:
- * 1) no indication is given which finger was removed
- * 2) no way to tell if agm packet was for finger 3
- * before 3->2, or finger 2 after 3->2.
- *
- * So, report 2 fingers, but empty all slots.
- * We will guess slots [0,1] on subsequent 2->2.
- */
- synaptics_mt_state_set(mt_state, 2, -1, -1);
- priv->mt_state_lost = true;
- break;
- case 4:
- case 5:
- /* mt_state was updated by AGM-CONTACT packet */
- break;
- }
-}
-
-/* Handle case where mt_state->count = 3 */
-static void synaptics_image_sensor_3f(struct synaptics_data *priv,
- struct synaptics_mt_state *mt_state)
-{
- struct synaptics_mt_state *old = &priv->mt_state;
-
- switch (old->count) {
- case 0:
- synaptics_mt_state_set(mt_state, 3, 0, 2);
- break;
- case 1:
- /*
- * If previous SGM contained slot 2 or higher, SGM now contains
- * slot 0 (one of the newly touching fingers) and AGM contains
- * SGM's previous slot.
- *
- * Otherwise, SGM now contains slot 0 and AGM contains slot 2.
- */
- if (old->sgm >= 2)
- synaptics_mt_state_set(mt_state, 3, 0, old->sgm);
- else
- synaptics_mt_state_set(mt_state, 3, 0, 2);
- break;
- case 2:
- /*
- * If the AGM previously contained slot 3 or higher, then the
- * newly touching finger is in the lowest available slot.
- *
- * If SGM was previously 1 or higher, then the new SGM is
- * now slot 0 (with a new finger), otherwise, the new finger
- * is now in a hidden slot between 0 and AGM's slot.
- *
- * In all such cases, the SGM now contains slot 0, and the AGM
- * continues to contain the same slot as before.
- */
- if (old->agm >= 3) {
- synaptics_mt_state_set(mt_state, 3, 0, old->agm);
- break;
- }
-
- /*
- * After some 3->1 and all 3->2 transitions, we lose track
- * of which slot is reported by SGM and AGM.
- *
- * For 2->3 in this state, report 3 fingers, but empty all
- * slots, and we will guess (0,2) on a subsequent 0->3.
- *
- * To userspace, the resulting transition will look like:
- * 2:[0,1] -> 3:[-1,-1] -> 3:[0,2]
- */
- if (priv->mt_state_lost) {
- synaptics_mt_state_set(mt_state, 3, -1, -1);
- break;
- }
-
- /*
- * If the (SGM,AGM) really previously contained slots (0, 1),
- * then we cannot know what slot was just reported by the AGM,
- * because the 2->3 transition can occur either before or after
- * the AGM packet. Thus, this most recent AGM could contain
- * either the same old slot 1 or the new slot 2.
- * Subsequent AGMs will be reporting slot 2.
- *
- * To userspace, the resulting transition will look like:
- * 2:[0,1] -> 3:[0,-1] -> 3:[0,2]
- */
- synaptics_mt_state_set(mt_state, 3, 0, -1);
- break;
- case 3:
- /*
- * If, for whatever reason, the previous agm was invalid,
- * Assume SGM now contains slot 0, AGM now contains slot 2.
- */
- if (old->agm <= 2)
- synaptics_mt_state_set(mt_state, 3, 0, 2);
- /*
- * mt_state either hasn't changed, or was updated by a recently
- * received AGM-CONTACT packet.
- */
- break;
-
- case 4:
- case 5:
- /* mt_state was updated by AGM-CONTACT packet */
- break;
- }
-}
-
-/* Handle case where mt_state->count = 4, or = 5 */
-static void synaptics_image_sensor_45f(struct synaptics_data *priv,
- struct synaptics_mt_state *mt_state)
-{
- /* mt_state was updated correctly by AGM-CONTACT packet */
- priv->mt_state_lost = false;
-}
-
static void synaptics_image_sensor_process(struct psmouse *psmouse,
struct synaptics_hw_state *sgm)
{
struct synaptics_data *priv = psmouse->private;
- struct synaptics_hw_state *agm = &priv->agm;
- struct synaptics_mt_state mt_state;
-
- /* Initialize using current mt_state (as updated by last agm) */
- mt_state = agm->mt_state;
+ int num_fingers;
/*
* Update mt_state using the new finger count and current mt_state.
*/
if (sgm->z == 0)
- synaptics_image_sensor_0f(priv, &mt_state);
+ num_fingers = 0;
else if (sgm->w >= 4)
- synaptics_image_sensor_1f(priv, &mt_state);
+ num_fingers = 1;
else if (sgm->w == 0)
- synaptics_image_sensor_2f(priv, &mt_state);
- else if (sgm->w == 1 && mt_state.count <= 3)
- synaptics_image_sensor_3f(priv, &mt_state);
+ num_fingers = 2;
+ else if (sgm->w == 1)
+ num_fingers = priv->agm_count ? priv->agm_count : 3;
else
- synaptics_image_sensor_45f(priv, &mt_state);
+ num_fingers = 4;
/* Send resulting input events to user space */
- synaptics_report_mt_data(psmouse, &mt_state, sgm);
-
- /* Store updated mt_state */
- priv->mt_state = agm->mt_state = mt_state;
- priv->agm_pending = false;
-}
-
-static void synaptics_profile_sensor_process(struct psmouse *psmouse,
- struct synaptics_hw_state *sgm,
- int num_fingers)
-{
- struct input_dev *dev = psmouse->dev;
- struct synaptics_data *priv = psmouse->private;
- struct synaptics_hw_state *hw[2] = { sgm, &priv->agm };
- struct input_mt_pos pos[2];
- int slot[2], nsemi, i;
-
- nsemi = clamp_val(num_fingers, 0, 2);
-
- for (i = 0; i < nsemi; i++) {
- pos[i].x = hw[i]->x;
- pos[i].y = synaptics_invert_y(hw[i]->y);
- }
-
- input_mt_assign_slots(dev, slot, pos, nsemi);
-
- for (i = 0; i < nsemi; i++) {
- input_mt_slot(dev, slot[i]);
- input_mt_report_slot_state(dev, MT_TOOL_FINGER, true);
- input_report_abs(dev, ABS_MT_POSITION_X, pos[i].x);
- input_report_abs(dev, ABS_MT_POSITION_Y, pos[i].y);
- input_report_abs(dev, ABS_MT_PRESSURE, hw[i]->z);
- }
-
- input_mt_drop_unused(dev);
- input_mt_report_pointer_emulation(dev, false);
- input_mt_report_finger_count(dev, num_fingers);
-
- synaptics_report_buttons(psmouse, sgm);
-
- input_sync(dev);
+ synaptics_report_mt_data(psmouse, sgm, num_fingers);
}
/*
@@ -1287,7 +924,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
}
if (cr48_profile_sensor) {
- synaptics_profile_sensor_process(psmouse, &hw, num_fingers);
+ synaptics_report_mt_data(psmouse, &hw, num_fingers);
return;
}
@@ -1444,7 +1081,7 @@ static void set_input_params(struct psmouse *psmouse,
ABS_MT_POSITION_Y);
/* Image sensors can report per-contact pressure */
input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
- input_mt_init_slots(dev, 2, INPUT_MT_POINTER);
+ input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
/* Image sensors can signal 4 and 5 finger clicks */
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 1bd01f21783b..6faf9bb7c117 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -119,16 +119,6 @@
#define SYN_REDUCED_FILTER_FUZZ 8
/*
- * A structure to describe which internal touchpad finger slots are being
- * reported in raw packets.
- */
-struct synaptics_mt_state {
- int count; /* num fingers being tracked */
- int sgm; /* which slot is reported by sgm pkt */
- int agm; /* which slot is reported by agm pkt*/
-};
-
-/*
* A structure to describe the state of the touchpad hardware (buttons and pad)
*/
struct synaptics_hw_state {
@@ -143,9 +133,6 @@ struct synaptics_hw_state {
unsigned int down:1;
unsigned char ext_buttons;
signed char scroll;
-
- /* As reported in last AGM-CONTACT packets */
- struct synaptics_mt_state mt_state;
};
struct synaptics_data {
@@ -170,15 +157,12 @@ struct synaptics_data {
struct serio *pt_port; /* Pass-through serio port */
- struct synaptics_mt_state mt_state; /* Current mt finger state */
- bool mt_state_lost; /* mt_state may be incorrect */
-
/*
* Last received Advanced Gesture Mode (AGM) packet. An AGM packet
* contains position data for a second contact, at half resolution.
*/
struct synaptics_hw_state agm;
- bool agm_pending; /* new AGM packet received */
+ unsigned int agm_count; /* finger count reported by agm */
/* ForcePad handling */
unsigned long press_start;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index bc2d47431bdc..77833d7a004b 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -281,4 +281,14 @@ config HYPERV_KEYBOARD
To compile this driver as a module, choose M here: the module will
be called hyperv_keyboard.
+config SERIO_SUN4I_PS2
+ tristate "Allwinner A10 PS/2 controller support"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ This selects support for the PS/2 Host Controller on
+ Allwinner A10.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun4i-ps2.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 815d874fe724..c600089b7a34 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_SERIO_ARC_PS2) += arc_ps2.o
obj-$(CONFIG_SERIO_APBPS2) += apbps2.o
obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o
obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o
+obj-$(CONFIG_SERIO_SUN4I_PS2) += sun4i-ps2.o
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 8d9ba0c3827c..94ab494a6ade 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -40,7 +40,6 @@
MODULE_AUTHOR("Laurent Canet <canetl@esiee.fr>, Thibaut Varene <varenet@parisc-linux.org>, Helge Deller <deller@gmx.de>");
MODULE_DESCRIPTION("HP GSC PS2 port driver");
MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(parisc, gscps2_device_tbl);
#define PFX "gscps2.c: "
@@ -439,6 +438,7 @@ static struct parisc_device_id gscps2_device_tbl[] = {
#endif
{ 0, } /* 0 terminated list */
};
+MODULE_DEVICE_TABLE(parisc, gscps2_device_tbl);
static struct parisc_driver parisc_ps2_driver = {
.name = "gsc_ps2",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index c66d1b53843e..c11556563ef0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Medion Akoya E7225 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {
/* Blue FB5601 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "blue"),
@@ -415,6 +423,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Acer Aspire 7738 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ },
+ },
+ {
/* Gericom Bellagio */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
@@ -745,6 +760,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{ }
};
+/*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
+static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ {
+ /* Gigabyte P35 v2 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ },
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ },
+ {
+ /* Gigabyte P34 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ },
+ },
+ { }
+};
+
#endif /* CONFIG_X86 */
#ifdef CONFIG_PNP
@@ -1040,6 +1084,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
+ if (dmi_check_system(i8042_dmi_kbdreset_table))
+ i8042_kbdreset = true;
+
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 924e4bf357fb..986a71c614b0 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -67,6 +67,10 @@ static bool i8042_notimeout;
module_param_named(notimeout, i8042_notimeout, bool, 0);
MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+static bool i8042_kbdreset;
+module_param_named(kbdreset, i8042_kbdreset, bool, 0);
+MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
+
#ifdef CONFIG_X86
static bool i8042_dritek;
module_param_named(dritek, i8042_dritek, bool, 0);
@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
return -1;
/*
+ * Reset keyboard (needed on some laptops to successfully detect
+ * touchpad, e.g., some Gigabyte laptop models with Elantech
+ * touchpads).
+ */
+ if (i8042_kbdreset) {
+ pr_warn("Attempting to reset device connected to KBD port\n");
+ i8042_kbd_write(NULL, (unsigned char) 0xff);
+ }
+
+/*
* Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
* used it for a PCI card or somethig else.
*/
diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
new file mode 100644
index 000000000000..04b96fe39339
--- /dev/null
+++ b/drivers/input/serio/sun4i-ps2.c
@@ -0,0 +1,340 @@
+/*
+ * Driver for Allwinner A10 PS2 host controller
+ *
+ * Author: Vishnu Patekar <vishnupatekar0510@gmail.com>
+ * Aaron.maoye <leafy.myeh@newbietech.com>
+ */
+
+#include <linux/module.h>
+#include <linux/serio.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "sun4i-ps2"
+
+/* register offset definitions */
+#define PS2_REG_GCTL 0x00 /* PS2 Module Global Control Reg */
+#define PS2_REG_DATA 0x04 /* PS2 Module Data Reg */
+#define PS2_REG_LCTL 0x08 /* PS2 Module Line Control Reg */
+#define PS2_REG_LSTS 0x0C /* PS2 Module Line Status Reg */
+#define PS2_REG_FCTL 0x10 /* PS2 Module FIFO Control Reg */
+#define PS2_REG_FSTS 0x14 /* PS2 Module FIFO Status Reg */
+#define PS2_REG_CLKDR 0x18 /* PS2 Module Clock Divider Reg*/
+
+/* PS2 GLOBAL CONTROL REGISTER PS2_GCTL */
+#define PS2_GCTL_INTFLAG BIT(4)
+#define PS2_GCTL_INTEN BIT(3)
+#define PS2_GCTL_RESET BIT(2)
+#define PS2_GCTL_MASTER BIT(1)
+#define PS2_GCTL_BUSEN BIT(0)
+
+/* PS2 LINE CONTROL REGISTER */
+#define PS2_LCTL_NOACK BIT(18)
+#define PS2_LCTL_TXDTOEN BIT(8)
+#define PS2_LCTL_STOPERREN BIT(3)
+#define PS2_LCTL_ACKERREN BIT(2)
+#define PS2_LCTL_PARERREN BIT(1)
+#define PS2_LCTL_RXDTOEN BIT(0)
+
+/* PS2 LINE STATUS REGISTER */
+#define PS2_LSTS_TXTDO BIT(8)
+#define PS2_LSTS_STOPERR BIT(3)
+#define PS2_LSTS_ACKERR BIT(2)
+#define PS2_LSTS_PARERR BIT(1)
+#define PS2_LSTS_RXTDO BIT(0)
+
+#define PS2_LINE_ERROR_BIT \
+ (PS2_LSTS_TXTDO | PS2_LSTS_STOPERR | PS2_LSTS_ACKERR | \
+ PS2_LSTS_PARERR | PS2_LSTS_RXTDO)
+
+/* PS2 FIFO CONTROL REGISTER */
+#define PS2_FCTL_TXRST BIT(17)
+#define PS2_FCTL_RXRST BIT(16)
+#define PS2_FCTL_TXUFIEN BIT(10)
+#define PS2_FCTL_TXOFIEN BIT(9)
+#define PS2_FCTL_TXRDYIEN BIT(8)
+#define PS2_FCTL_RXUFIEN BIT(2)
+#define PS2_FCTL_RXOFIEN BIT(1)
+#define PS2_FCTL_RXRDYIEN BIT(0)
+
+/* PS2 FIFO STATUS REGISTER */
+#define PS2_FSTS_TXUF BIT(10)
+#define PS2_FSTS_TXOF BIT(9)
+#define PS2_FSTS_TXRDY BIT(8)
+#define PS2_FSTS_RXUF BIT(2)
+#define PS2_FSTS_RXOF BIT(1)
+#define PS2_FSTS_RXRDY BIT(0)
+
+#define PS2_FIFO_ERROR_BIT \
+ (PS2_FSTS_TXUF | PS2_FSTS_TXOF | PS2_FSTS_RXUF | PS2_FSTS_RXOF)
+
+#define PS2_SAMPLE_CLK 1000000
+#define PS2_SCLK 125000
+
+struct sun4i_ps2data {
+ struct serio *serio;
+ struct device *dev;
+
+ /* IO mapping base */
+ void __iomem *reg_base;
+
+ /* clock management */
+ struct clk *clk;
+
+ /* irq */
+ spinlock_t lock;
+ int irq;
+};
+
+static irqreturn_t sun4i_ps2_interrupt(int irq, void *dev_id)
+{
+ struct sun4i_ps2data *drvdata = dev_id;
+ u32 intr_status;
+ u32 fifo_status;
+ unsigned char byte;
+ unsigned int rxflags = 0;
+ u32 rval;
+
+ spin_lock(&drvdata->lock);
+
+ /* Get the PS/2 interrupts and clear them */
+ intr_status = readl(drvdata->reg_base + PS2_REG_LSTS);
+ fifo_status = readl(drvdata->reg_base + PS2_REG_FSTS);
+
+ /* Check line status register */
+ if (intr_status & PS2_LINE_ERROR_BIT) {
+ rxflags = (intr_status & PS2_LINE_ERROR_BIT) ? SERIO_FRAME : 0;
+ rxflags |= (intr_status & PS2_LSTS_PARERR) ? SERIO_PARITY : 0;
+ rxflags |= (intr_status & PS2_LSTS_PARERR) ? SERIO_TIMEOUT : 0;
+
+ rval = PS2_LSTS_TXTDO | PS2_LSTS_STOPERR | PS2_LSTS_ACKERR |
+ PS2_LSTS_PARERR | PS2_LSTS_RXTDO;
+ writel(rval, drvdata->reg_base + PS2_REG_LSTS);
+ }
+
+ /* Check FIFO status register */
+ if (fifo_status & PS2_FIFO_ERROR_BIT) {
+ rval = PS2_FSTS_TXUF | PS2_FSTS_TXOF | PS2_FSTS_TXRDY |
+ PS2_FSTS_RXUF | PS2_FSTS_RXOF | PS2_FSTS_RXRDY;
+ writel(rval, drvdata->reg_base + PS2_REG_FSTS);
+ }
+
+ rval = (fifo_status >> 16) & 0x3;
+ while (rval--) {
+ byte = readl(drvdata->reg_base + PS2_REG_DATA) & 0xff;
+ serio_interrupt(drvdata->serio, byte, rxflags);
+ }
+
+ writel(intr_status, drvdata->reg_base + PS2_REG_LSTS);
+ writel(fifo_status, drvdata->reg_base + PS2_REG_FSTS);
+
+ spin_unlock(&drvdata->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sun4i_ps2_open(struct serio *serio)
+{
+ struct sun4i_ps2data *drvdata = serio->port_data;
+ u32 src_clk = 0;
+ u32 clk_scdf;
+ u32 clk_pcdf;
+ u32 rval;
+ unsigned long flags;
+
+ /* Set line control and enable interrupt */
+ rval = PS2_LCTL_STOPERREN | PS2_LCTL_ACKERREN
+ | PS2_LCTL_PARERREN | PS2_LCTL_RXDTOEN;
+ writel(rval, drvdata->reg_base + PS2_REG_LCTL);
+
+ /* Reset FIFO */
+ rval = PS2_FCTL_TXRST | PS2_FCTL_RXRST | PS2_FCTL_TXUFIEN
+ | PS2_FCTL_TXOFIEN | PS2_FCTL_RXUFIEN
+ | PS2_FCTL_RXOFIEN | PS2_FCTL_RXRDYIEN;
+
+ writel(rval, drvdata->reg_base + PS2_REG_FCTL);
+
+ src_clk = clk_get_rate(drvdata->clk);
+ /* Set clock divider register */
+ clk_scdf = src_clk / PS2_SAMPLE_CLK - 1;
+ clk_pcdf = PS2_SAMPLE_CLK / PS2_SCLK - 1;
+ rval = (clk_scdf << 8) | clk_pcdf;
+ writel(rval, drvdata->reg_base + PS2_REG_CLKDR);
+
+ /* Set global control register */
+ rval = PS2_GCTL_RESET | PS2_GCTL_INTEN | PS2_GCTL_MASTER
+ | PS2_GCTL_BUSEN;
+
+ spin_lock_irqsave(&drvdata->lock, flags);
+ writel(rval, drvdata->reg_base + PS2_REG_GCTL);
+ spin_unlock_irqrestore(&drvdata->lock, flags);
+
+ return 0;
+}
+
+static void sun4i_ps2_close(struct serio *serio)
+{
+ struct sun4i_ps2data *drvdata = serio->port_data;
+ u32 rval;
+
+ /* Shut off the interrupt */
+ rval = readl(drvdata->reg_base + PS2_REG_GCTL);
+ writel(rval & ~(PS2_GCTL_INTEN), drvdata->reg_base + PS2_REG_GCTL);
+
+ synchronize_irq(drvdata->irq);
+}
+
+static int sun4i_ps2_write(struct serio *serio, unsigned char val)
+{
+ unsigned long expire = jiffies + msecs_to_jiffies(10000);
+ struct sun4i_ps2data *drvdata = serio->port_data;
+
+ do {
+ if (readl(drvdata->reg_base + PS2_REG_FSTS) & PS2_FSTS_TXRDY) {
+ writel(val, drvdata->reg_base + PS2_REG_DATA);
+ return 0;
+ }
+ } while (time_before(jiffies, expire));
+
+ return SERIO_TIMEOUT;
+}
+
+static int sun4i_ps2_probe(struct platform_device *pdev)
+{
+ struct resource *res; /* IO mem resources */
+ struct sun4i_ps2data *drvdata;
+ struct serio *serio;
+ struct device *dev = &pdev->dev;
+ unsigned int irq;
+ int error;
+
+ drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!drvdata || !serio) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ spin_lock_init(&drvdata->lock);
+
+ /* IO */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to locate registers\n");
+ error = -ENXIO;
+ goto err_free_mem;
+ }
+
+ drvdata->reg_base = ioremap(res->start, resource_size(res));
+ if (!drvdata->reg_base) {
+ dev_err(dev, "failed to map registers\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ drvdata->clk = clk_get(dev, NULL);
+ if (IS_ERR(drvdata->clk)) {
+ error = PTR_ERR(drvdata->clk);
+ dev_err(dev, "couldn't get clock %d\n", error);
+ goto err_ioremap;
+ }
+
+ error = clk_prepare_enable(drvdata->clk);
+ if (error) {
+ dev_err(dev, "failed to enable clock %d\n", error);
+ goto err_clk;
+ }
+
+ serio->id.type = SERIO_8042;
+ serio->write = sun4i_ps2_write;
+ serio->open = sun4i_ps2_open;
+ serio->close = sun4i_ps2_close;
+ serio->port_data = drvdata;
+ serio->dev.parent = dev;
+ strlcpy(serio->name, dev_name(dev), sizeof(serio->name));
+ strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys));
+
+ /* shutoff interrupt */
+ writel(0, drvdata->reg_base + PS2_REG_GCTL);
+
+ /* Get IRQ for the device */
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(dev, "no IRQ found\n");
+ error = -ENXIO;
+ goto err_disable_clk;
+ }
+
+ drvdata->irq = irq;
+ drvdata->serio = serio;
+ drvdata->dev = dev;
+
+ error = request_irq(drvdata->irq, sun4i_ps2_interrupt, 0,
+ DRIVER_NAME, drvdata);
+ if (error) {
+ dev_err(drvdata->dev, "failed to allocate interrupt %d: %d\n",
+ drvdata->irq, error);
+ goto err_disable_clk;
+ }
+
+ serio_register_port(serio);
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0; /* success */
+
+err_disable_clk:
+ clk_disable_unprepare(drvdata->clk);
+err_clk:
+ clk_put(drvdata->clk);
+err_ioremap:
+ iounmap(drvdata->reg_base);
+err_free_mem:
+ kfree(serio);
+ kfree(drvdata);
+ return error;
+}
+
+static int sun4i_ps2_remove(struct platform_device *pdev)
+{
+ struct sun4i_ps2data *drvdata = platform_get_drvdata(pdev);
+
+ serio_unregister_port(drvdata->serio);
+
+ free_irq(drvdata->irq, drvdata);
+
+ clk_disable_unprepare(drvdata->clk);
+ clk_put(drvdata->clk);
+
+ iounmap(drvdata->reg_base);
+
+ kfree(drvdata);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_ps2_match[] = {
+ { .compatible = "allwinner,sun4i-a10-ps2", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, sun4i_ps2_match);
+
+static struct platform_driver sun4i_ps2_driver = {
+ .probe = sun4i_ps2_probe,
+ .remove = sun4i_ps2_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = sun4i_ps2_match,
+ },
+};
+module_platform_driver(sun4i_ps2_driver);
+
+MODULE_AUTHOR("Vishnu Patekar <vishnupatekar0510@gmail.com>");
+MODULE_AUTHOR("Aaron.maoye <leafy.myeh@newbietech.com>");
+MODULE_DESCRIPTION("Allwinner A10/Sun4i PS/2 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 858045694e9d..3a7f3a4a4396 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -59,7 +59,7 @@ Scott Hill shill@gtcocalcomp.com
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-
+#include <linux/bitops.h>
#include <linux/usb/input.h>
@@ -614,7 +614,6 @@ static void gtco_urb_callback(struct urb *urbinfo)
struct input_dev *inputdev;
int rc;
u32 val = 0;
- s8 valsigned = 0;
char le_buffer[2];
inputdev = device->inputdevice;
@@ -665,20 +664,11 @@ static void gtco_urb_callback(struct urb *urbinfo)
/* Fall thru */
case 4:
/* Tilt */
+ input_report_abs(inputdev, ABS_TILT_X,
+ sign_extend32(device->buffer[6], 6));
- /* Sign extend these 7 bit numbers. */
- if (device->buffer[6] & 0x40)
- device->buffer[6] |= 0x80;
-
- if (device->buffer[7] & 0x40)
- device->buffer[7] |= 0x80;
-
-
- valsigned = (device->buffer[6]);
- input_report_abs(inputdev, ABS_TILT_X, (s32)valsigned);
-
- valsigned = (device->buffer[7]);
- input_report_abs(inputdev, ABS_TILT_Y, (s32)valsigned);
+ input_report_abs(inputdev, ABS_TILT_Y,
+ sign_extend32(device->buffer[7], 6));
/* Fall thru */
case 2:
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index a510f7ef9b66..926c58e540c0 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -33,10 +33,8 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/buffer_head.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/firmware.h>
-#include <linux/version.h>
#include <linux/input/mt.h>
#include <linux/acpi.h>
#include <linux/of.h>
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 4fb5537fdd42..2c2107147319 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -126,7 +126,7 @@ static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts,
pos[i].y = touch->y;
}
- input_mt_assign_slots(ts->input, slots, pos, n);
+ input_mt_assign_slots(ts->input, slots, pos, n, 0);
}
for (i = 0; i < n; i++) {
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 28a06749ae42..b93a28b955fd 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -34,6 +34,7 @@
#include <linux/err.h>
#include <linux/hwmon.h>
+#include <linux/thermal.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -71,6 +72,9 @@
#define TP_ADC_SELECT(x) ((x) << 3)
#define ADC_CHAN_SELECT(x) ((x) << 0) /* 3 bits */
+/* on sun6i, bits 3~6 are left shifted by 1 to 4~7 */
+#define SUN6I_TP_MODE_EN(x) ((x) << 5)
+
/* TP_CTRL2 bits */
#define TP_SENSITIVE_ADJUST(x) ((x) << 28) /* 4 bits */
#define TP_MODE_SELECT(x) ((x) << 26) /* 2 bits */
@@ -107,10 +111,13 @@
struct sun4i_ts_data {
struct device *dev;
struct input_dev *input;
+ struct thermal_zone_device *tz;
void __iomem *base;
unsigned int irq;
bool ignore_fifo_data;
int temp_data;
+ int temp_offset;
+ int temp_step;
};
static void sun4i_ts_irq_handle_input(struct sun4i_ts_data *ts, u32 reg_val)
@@ -180,16 +187,38 @@ static void sun4i_ts_close(struct input_dev *dev)
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
}
+static int sun4i_get_temp(const struct sun4i_ts_data *ts, long *temp)
+{
+ /* No temp_data until the first irq */
+ if (ts->temp_data == -1)
+ return -EAGAIN;
+
+ *temp = (ts->temp_data - ts->temp_offset) * ts->temp_step;
+
+ return 0;
+}
+
+static int sun4i_get_tz_temp(void *data, long *temp)
+{
+ return sun4i_get_temp(data, temp);
+}
+
+static struct thermal_zone_of_device_ops sun4i_ts_tz_ops = {
+ .get_temp = sun4i_get_tz_temp,
+};
+
static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sun4i_ts_data *ts = dev_get_drvdata(dev);
+ long temp;
+ int error;
- /* No temp_data until the first irq */
- if (ts->temp_data == -1)
- return -EAGAIN;
+ error = sun4i_get_temp(ts, &temp);
+ if (error)
+ return error;
- return sprintf(buf, "%d\n", (ts->temp_data - 1447) * 100);
+ return sprintf(buf, "%ld\n", temp);
}
static ssize_t show_temp_label(struct device *dev,
@@ -215,6 +244,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device *hwmon;
int error;
+ u32 reg;
bool ts_attached;
ts = devm_kzalloc(dev, sizeof(struct sun4i_ts_data), GFP_KERNEL);
@@ -224,6 +254,25 @@ static int sun4i_ts_probe(struct platform_device *pdev)
ts->dev = dev;
ts->ignore_fifo_data = true;
ts->temp_data = -1;
+ if (of_device_is_compatible(np, "allwinner,sun6i-a31-ts")) {
+ /* Allwinner SDK has temperature = -271 + (value / 6) (C) */
+ ts->temp_offset = 1626;
+ ts->temp_step = 167;
+ } else {
+ /*
+ * The user manuals do not contain the formula for calculating
+ * the temperature. The formula used here is from the AXP209,
+ * which is designed by X-Powers, an affiliate of Allwinner:
+ *
+ * temperature = -144.7 + (value * 0.1)
+ *
+ * Allwinner does not have any documentation whatsoever for
+ * this hardware. Moreover, it is claimed that the sensor
+ * is inaccurate and cannot work properly.
+ */
+ ts->temp_offset = 1447;
+ ts->temp_step = 100;
+ }
ts_attached = of_property_read_bool(np, "allwinner,ts-attached");
if (ts_attached) {
@@ -280,20 +329,34 @@ static int sun4i_ts_probe(struct platform_device *pdev)
* Set stylus up debounce to aprox 10 ms, enable debounce, and
* finally enable tp mode.
*/
- writel(STYLUS_UP_DEBOUN(5) | STYLUS_UP_DEBOUN_EN(1) | TP_MODE_EN(1),
- ts->base + TP_CTRL1);
+ reg = STYLUS_UP_DEBOUN(5) | STYLUS_UP_DEBOUN_EN(1);
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-ts"))
+ reg |= TP_MODE_EN(1);
+ else
+ reg |= SUN6I_TP_MODE_EN(1);
+ writel(reg, ts->base + TP_CTRL1);
+ /*
+ * The thermal core does not register hwmon devices for DT-based
+ * thermal zone sensors, such as this one.
+ */
hwmon = devm_hwmon_device_register_with_groups(ts->dev, "sun4i_ts",
ts, sun4i_ts_groups);
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
+ ts->tz = thermal_zone_of_sensor_register(ts->dev, 0, ts,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(ts->tz))
+ ts->tz = NULL;
+
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
if (ts_attached) {
error = input_register_device(ts->input);
if (error) {
writel(0, ts->base + TP_INT_FIFOC);
+ thermal_zone_of_sensor_unregister(ts->dev, ts->tz);
return error;
}
}
@@ -310,6 +373,8 @@ static int sun4i_ts_remove(struct platform_device *pdev)
if (ts->input)
input_unregister_device(ts->input);
+ thermal_zone_of_sensor_unregister(ts->dev, ts->tz);
+
/* Deactivate all IRQs */
writel(0, ts->base + TP_INT_FIFOC);
@@ -318,6 +383,7 @@ static int sun4i_ts_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_ts_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-ts", },
+ { .compatible = "allwinner,sun6i-a31-ts", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun4i_ts_of_match);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 004f1346a957..191a1b87895f 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/sort.h>
#include <linux/mfd/ti_am335x_tscadc.h>
@@ -52,6 +53,7 @@ struct titsc {
u32 bit_xp, bit_xn, bit_yp, bit_yn;
u32 inp_xp, inp_xn, inp_yp, inp_yn;
u32 step_mask;
+ u32 charge_delay;
};
static unsigned int titsc_readl(struct titsc *ts, unsigned int reg)
@@ -121,7 +123,7 @@ static void titsc_step_config(struct titsc *ts_dev)
{
unsigned int config;
int i;
- int end_step;
+ int end_step, first_step, tsc_steps;
u32 stepenable;
config = STEPCONFIG_MODE_HWSYNC |
@@ -140,9 +142,11 @@ static void titsc_step_config(struct titsc *ts_dev)
break;
}
- /* 1 … coordinate_readouts is for X */
- end_step = ts_dev->coordinate_readouts;
- for (i = 0; i < end_step; i++) {
+ tsc_steps = ts_dev->coordinate_readouts * 2 + 2;
+ first_step = TOTAL_STEPS - tsc_steps;
+ /* Steps 16 to 16-coordinate_readouts is for X */
+ end_step = first_step + tsc_steps;
+ for (i = end_step - ts_dev->coordinate_readouts; i < end_step; i++) {
titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
}
@@ -164,22 +168,20 @@ static void titsc_step_config(struct titsc *ts_dev)
break;
}
- /* coordinate_readouts … coordinate_readouts * 2 is for Y */
- end_step = ts_dev->coordinate_readouts * 2;
- for (i = ts_dev->coordinate_readouts; i < end_step; i++) {
+ /* 1 ... coordinate_readouts is for Y */
+ end_step = first_step + ts_dev->coordinate_readouts;
+ for (i = first_step; i < end_step; i++) {
titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
}
- /* Charge step configuration */
- config = ts_dev->bit_xp | ts_dev->bit_yn |
- STEPCHARGE_RFP_XPUL | STEPCHARGE_RFM_XNUR |
- STEPCHARGE_INM_AN1 | STEPCHARGE_INP(ts_dev->inp_yp);
+ /* Make CHARGECONFIG same as IDLECONFIG */
+ config = titsc_readl(ts_dev, REG_IDLECONFIG);
titsc_writel(ts_dev, REG_CHARGECONFIG, config);
- titsc_writel(ts_dev, REG_CHARGEDELAY, CHARGEDLY_OPENDLY);
+ titsc_writel(ts_dev, REG_CHARGEDELAY, ts_dev->charge_delay);
- /* coordinate_readouts * 2 … coordinate_readouts * 2 + 2 is for Z */
+ /* coordinate_readouts + 1 ... coordinate_readouts + 2 is for Z */
config = STEPCONFIG_MODE_HWSYNC |
STEPCONFIG_AVG_16 | ts_dev->bit_yp |
ts_dev->bit_xn | STEPCONFIG_INM_ADCREFM |
@@ -194,73 +196,104 @@ static void titsc_step_config(struct titsc *ts_dev)
titsc_writel(ts_dev, REG_STEPDELAY(end_step),
STEPCONFIG_OPENDLY);
- /* The steps1 … end and bit 0 for TS_Charge */
- stepenable = (1 << (end_step + 2)) - 1;
+ /* The steps end ... end - readouts * 2 + 2 and bit 0 for TS_Charge */
+ stepenable = 1;
+ for (i = 0; i < tsc_steps; i++)
+ stepenable |= 1 << (first_step + i + 1);
+
ts_dev->step_mask = stepenable;
am335x_tsc_se_set_cache(ts_dev->mfd_tscadc, ts_dev->step_mask);
}
+static int titsc_cmp_coord(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
static void titsc_read_coordinates(struct titsc *ts_dev,
u32 *x, u32 *y, u32 *z1, u32 *z2)
{
- unsigned int fifocount = titsc_readl(ts_dev, REG_FIFO0CNT);
- unsigned int prev_val_x = ~0, prev_val_y = ~0;
- unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
- unsigned int read, diff;
- unsigned int i, channel;
+ unsigned int yvals[7], xvals[7];
+ unsigned int i, xsum = 0, ysum = 0;
unsigned int creads = ts_dev->coordinate_readouts;
- *z1 = *z2 = 0;
- if (fifocount % (creads * 2 + 2))
- fifocount -= fifocount % (creads * 2 + 2);
- /*
- * Delta filter is used to remove large variations in sampled
- * values from ADC. The filter tries to predict where the next
- * coordinate could be. This is done by taking a previous
- * coordinate and subtracting it form current one. Further the
- * algorithm compares the difference with that of a present value,
- * if true the value is reported to the sub system.
- */
- for (i = 0; i < fifocount; i++) {
- read = titsc_readl(ts_dev, REG_FIFO0);
-
- channel = (read & 0xf0000) >> 16;
- read &= 0xfff;
- if (channel < creads) {
- diff = abs(read - prev_val_x);
- if (diff < prev_diff_x) {
- prev_diff_x = diff;
- *x = read;
- }
- prev_val_x = read;
+ for (i = 0; i < creads; i++) {
+ yvals[i] = titsc_readl(ts_dev, REG_FIFO0);
+ yvals[i] &= 0xfff;
+ }
- } else if (channel < creads * 2) {
- diff = abs(read - prev_val_y);
- if (diff < prev_diff_y) {
- prev_diff_y = diff;
- *y = read;
- }
- prev_val_y = read;
+ *z1 = titsc_readl(ts_dev, REG_FIFO0);
+ *z1 &= 0xfff;
+ *z2 = titsc_readl(ts_dev, REG_FIFO0);
+ *z2 &= 0xfff;
- } else if (channel < creads * 2 + 1) {
- *z1 = read;
+ for (i = 0; i < creads; i++) {
+ xvals[i] = titsc_readl(ts_dev, REG_FIFO0);
+ xvals[i] &= 0xfff;
+ }
- } else if (channel < creads * 2 + 2) {
- *z2 = read;
+ /*
+ * If co-ordinates readouts is less than 4 then
+ * report the average. In case of 4 or more
+ * readouts, sort the co-ordinate samples, drop
+ * min and max values and report the average of
+ * remaining values.
+ */
+ if (creads <= 3) {
+ for (i = 0; i < creads; i++) {
+ ysum += yvals[i];
+ xsum += xvals[i];
}
+ ysum /= creads;
+ xsum /= creads;
+ } else {
+ sort(yvals, creads, sizeof(unsigned int),
+ titsc_cmp_coord, NULL);
+ sort(xvals, creads, sizeof(unsigned int),
+ titsc_cmp_coord, NULL);
+ for (i = 1; i < creads - 1; i++) {
+ ysum += yvals[i];
+ xsum += xvals[i];
+ }
+ ysum /= creads - 2;
+ xsum /= creads - 2;
}
+ *y = ysum;
+ *x = xsum;
}
static irqreturn_t titsc_irq(int irq, void *dev)
{
struct titsc *ts_dev = dev;
struct input_dev *input_dev = ts_dev->input;
- unsigned int status, irqclr = 0;
+ unsigned int fsm, status, irqclr = 0;
unsigned int x = 0, y = 0;
unsigned int z1, z2, z;
- unsigned int fsm;
- status = titsc_readl(ts_dev, REG_IRQSTATUS);
+ status = titsc_readl(ts_dev, REG_RAWIRQSTATUS);
+ if (status & IRQENB_HW_PEN) {
+ ts_dev->pen_down = true;
+ titsc_writel(ts_dev, REG_IRQWAKEUP, 0x00);
+ titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
+ irqclr |= IRQENB_HW_PEN;
+ }
+
+ if (status & IRQENB_PENUP) {
+ fsm = titsc_readl(ts_dev, REG_ADCFSM);
+ if (fsm == ADCFSM_STEPID) {
+ ts_dev->pen_down = false;
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+ } else {
+ ts_dev->pen_down = true;
+ }
+ irqclr |= IRQENB_PENUP;
+ }
+
+ if (status & IRQENB_EOS)
+ irqclr |= IRQENB_EOS;
+
/*
* ADC and touchscreen share the IRQ line.
* FIFO1 interrupts are used by ADC. Handle FIFO0 IRQs here only
@@ -291,37 +324,11 @@ static irqreturn_t titsc_irq(int irq, void *dev)
}
irqclr |= IRQENB_FIFO0THRES;
}
-
- /*
- * Time for sequencer to settle, to read
- * correct state of the sequencer.
- */
- udelay(SEQ_SETTLE);
-
- status = titsc_readl(ts_dev, REG_RAWIRQSTATUS);
- if (status & IRQENB_PENUP) {
- /* Pen up event */
- fsm = titsc_readl(ts_dev, REG_ADCFSM);
- if (fsm == ADCFSM_STEPID) {
- ts_dev->pen_down = false;
- input_report_key(input_dev, BTN_TOUCH, 0);
- input_report_abs(input_dev, ABS_PRESSURE, 0);
- input_sync(input_dev);
- } else {
- ts_dev->pen_down = true;
- }
- irqclr |= IRQENB_PENUP;
- }
-
- if (status & IRQENB_HW_PEN) {
-
- titsc_writel(ts_dev, REG_IRQWAKEUP, 0x00);
- titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
- }
-
if (irqclr) {
titsc_writel(ts_dev, REG_IRQSTATUS, irqclr);
- am335x_tsc_se_set_cache(ts_dev->mfd_tscadc, ts_dev->step_mask);
+ if (status & IRQENB_EOS)
+ am335x_tsc_se_set_cache(ts_dev->mfd_tscadc,
+ ts_dev->step_mask);
return IRQ_HANDLED;
}
return IRQ_NONE;
@@ -368,6 +375,23 @@ static int titsc_parse_dt(struct platform_device *pdev,
if (err < 0)
return err;
+ if (ts_dev->coordinate_readouts <= 0) {
+ dev_warn(&pdev->dev,
+ "invalid co-ordinate readouts, resetting it to 5\n");
+ ts_dev->coordinate_readouts = 5;
+ }
+
+ err = of_property_read_u32(node, "ti,charge-delay",
+ &ts_dev->charge_delay);
+ /*
+ * If ti,charge-delay value is not specified, then use
+ * CHARGEDLY_OPENDLY as the default value.
+ */
+ if (err < 0) {
+ ts_dev->charge_delay = CHARGEDLY_OPENDLY;
+ dev_warn(&pdev->dev, "ti,charge-delay not specified\n");
+ }
+
return of_property_read_u32_array(node, "ti,wire-config",
ts_dev->config_inp, ARRAY_SIZE(ts_dev->config_inp));
}
@@ -411,6 +435,7 @@ static int titsc_probe(struct platform_device *pdev)
}
titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO0THRES);
+ titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_EOS);
err = titsc_config_wires(ts_dev);
if (err) {
dev_err(&pdev->dev, "wrong i/p wire configuration\n");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 325188eef1c1..baa0d9786f50 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -4,6 +4,7 @@ config IOMMU_API
menuconfig IOMMU_SUPPORT
bool "IOMMU Hardware Support"
+ depends on MMU
default y
---help---
Say Y here if you want to compile device drivers for IO Memory
@@ -13,13 +14,43 @@ menuconfig IOMMU_SUPPORT
if IOMMU_SUPPORT
+menu "Generic IOMMU Pagetable Support"
+
+# Selected by the actual pagetable implementations
+config IOMMU_IO_PGTABLE
+ bool
+
+config IOMMU_IO_PGTABLE_LPAE
+ bool "ARMv7/v8 Long Descriptor Format"
+ select IOMMU_IO_PGTABLE
+ help
+ Enable support for the ARM long descriptor pagetable format.
+ This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
+ sizes at both stage-1 and stage-2, as well as address spaces
+ up to 48-bits in size.
+
+config IOMMU_IO_PGTABLE_LPAE_SELFTEST
+ bool "LPAE selftests"
+ depends on IOMMU_IO_PGTABLE_LPAE
+ help
+ Enable self-tests for LPAE page table allocator. This performs
+ a series of page-table consistency checks during boot.
+
+ If unsure, say N here.
+
+endmenu
+
+config IOMMU_IOVA
+ bool
+
config OF_IOMMU
def_bool y
depends on OF && IOMMU_API
config FSL_PAMU
bool "Freescale IOMMU support"
- depends on PPC_E500MC
+ depends on PPC32
+ depends on PPC_E500MC || COMPILE_TEST
select IOMMU_API
select GENERIC_ALLOCATOR
help
@@ -30,7 +61,8 @@ config FSL_PAMU
# MSM IOMMU support
config MSM_IOMMU
bool "MSM IOMMU Support"
- depends on ARCH_MSM8X60 || ARCH_MSM8960
+ depends on ARM
+ depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
select IOMMU_API
help
Support for the IOMMUs found on certain Qualcomm SOCs.
@@ -91,6 +123,7 @@ config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
select IOMMU_API
+ select IOMMU_IOVA
select DMAR_TABLE
help
DMA remapping (DMAR) devices support enables independent address
@@ -140,7 +173,8 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
- depends on ARCH_OMAP2PLUS
+ depends on ARM && MMU
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
select IOMMU_API
config OMAP_IOMMU_DEBUG
@@ -187,7 +221,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && ARM
+ depends on ARCH_EXYNOS && ARM && MMU
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
@@ -216,7 +250,7 @@ config SHMOBILE_IPMMU_TLB
config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
- depends on ARM
+ depends on ARM && MMU
depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -287,6 +321,7 @@ config IPMMU_VMSA
depends on ARM_LPAE
depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
help
Support for the Renesas VMSA-compatible IPMMU Renesas found in the
@@ -304,13 +339,13 @@ config SPAPR_TCE_IOMMU
config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || (ARM_LPAE && OF)
+ depends on (ARM64 || ARM) && MMU
select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
help
Support for implementations of the ARM System MMU architecture
- versions 1 and 2. The driver supports both v7l and v8l table
- formats with 4k and 64k page sizes.
+ versions 1 and 2.
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 7b976f294a69..080ffab4ed1c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,13 +1,16 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
+obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98024856df07..48882c126245 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -843,10 +843,10 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
size_t size, u16 domid, int pde)
{
u64 pages;
- int s;
+ bool s;
pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = 0;
+ s = false;
if (pages > 1) {
/*
@@ -854,7 +854,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = 1;
+ s = true;
}
address &= PAGE_MASK;
@@ -874,10 +874,10 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
u64 address, size_t size)
{
u64 pages;
- int s;
+ bool s;
pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = 0;
+ s = false;
if (pages > 1) {
/*
@@ -885,7 +885,7 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = 1;
+ s = true;
}
address &= PAGE_MASK;
@@ -4284,7 +4284,6 @@ static int alloc_hpet_msi(unsigned int irq, unsigned int id)
}
struct irq_remap_ops amd_iommu_irq_ops = {
- .supported = amd_iommu_supported,
.prepare = amd_iommu_prepare,
.enable = amd_iommu_enable,
.disable = amd_iommu_disable,
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b0522f15730f..450ef5001a65 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -2014,9 +2014,6 @@ static bool detect_ivrs(void)
/* Make sure ACS will be enabled during PCI probe */
pci_request_acs();
- if (!disable_irq_remap)
- amd_iommu_irq_remap = true;
-
return true;
}
@@ -2123,12 +2120,14 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
#ifdef CONFIG_IRQ_REMAP
int __init amd_iommu_prepare(void)
{
- return iommu_go_to_state(IOMMU_ACPI_FINISHED);
-}
+ int ret;
-int __init amd_iommu_supported(void)
-{
- return amd_iommu_irq_remap ? 1 : 0;
+ amd_iommu_irq_remap = true;
+
+ ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
+ if (ret)
+ return ret;
+ return amd_iommu_irq_remap ? 0 : -ENODEV;
}
int __init amd_iommu_enable(void)
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 95ed6deae47f..72b0fd455e24 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -33,7 +33,6 @@ extern void amd_iommu_init_notifier(void);
extern void amd_iommu_init_api(void);
/* Needed for interrupt remapping */
-extern int amd_iommu_supported(void);
extern int amd_iommu_prepare(void);
extern int amd_iommu_enable(void);
extern void amd_iommu_disable(void);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index cec51a8ba844..c4fffb710c58 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 90f70d0e1141..6d5a5c44453b 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -31,7 +31,7 @@
#include "amd_iommu_proto.h"
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
+MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
#define MAX_DEVICES 0x10000
#define PRI_QUEUE_SIZE 512
@@ -151,18 +151,6 @@ static void put_device_state(struct device_state *dev_state)
wake_up(&dev_state->wq);
}
-static void put_device_state_wait(struct device_state *dev_state)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
- if (!atomic_dec_and_test(&dev_state->count))
- schedule();
- finish_wait(&dev_state->wq, &wait);
-
- free_device_state(dev_state);
-}
-
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
int pasid, bool alloc)
@@ -278,14 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
-
- if (!atomic_dec_and_test(&pasid_state->count))
- schedule();
-
- finish_wait(&pasid_state->wq, &wait);
+ wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
free_pasid_state(pasid_state);
}
@@ -851,7 +832,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
- put_device_state_wait(dev_state);
+ put_device_state(dev_state);
+ /*
+ * Wait until the last reference is dropped before freeing
+ * the device state.
+ */
+ wait_event(dev_state->wq, !atomic_read(&dev_state->count));
+ free_device_state(dev_state);
}
EXPORT_SYMBOL(amd_iommu_free_device);
@@ -921,7 +908,7 @@ static int __init amd_iommu_v2_init(void)
{
int ret;
- pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
+ pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
if (!amd_iommu_v2_supported()) {
pr_info("AMD IOMMUv2 functionality not available on this system\n");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6cd47b75286f..fc13dd56953e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -23,8 +23,6 @@
* - Stream-matching and stream-indexing
* - v7/v8 long-descriptor format
* - Non-secure access to the SMMU
- * - 4k and 64k pages, with contiguous pte hints.
- * - Up to 48-bit addressing (dependent on VA_BITS)
* - Context fault reporting
*/
@@ -36,7 +34,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
-#include <linux/mm.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
@@ -46,7 +44,7 @@
#include <linux/amba/bus.h>
-#include <asm/pgalloc.h>
+#include "io-pgtable.h"
/* Maximum number of stream IDs assigned to a single device */
#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
@@ -71,40 +69,6 @@
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
? 0x400 : 0))
-/* Page table bits */
-#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
-#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
-#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
-#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
-#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
-#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
-#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
-
-#if PAGE_SIZE == SZ_4K
-#define ARM_SMMU_PTE_CONT_ENTRIES 16
-#elif PAGE_SIZE == SZ_64K
-#define ARM_SMMU_PTE_CONT_ENTRIES 32
-#else
-#define ARM_SMMU_PTE_CONT_ENTRIES 1
-#endif
-
-#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
-#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
-
-/* Stage-1 PTE */
-#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
-#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
-#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
-#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
-
-/* Stage-2 PTE */
-#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
-#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
-#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
-#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
-#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
-#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
-
/* Configuration registers */
#define ARM_SMMU_GR0_sCR0 0x0
#define sCR0_CLIENTPD (1 << 0)
@@ -132,17 +96,12 @@
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
-#define ARM_SMMU_GR0_PIDR0 0xfe0
-#define ARM_SMMU_GR0_PIDR1 0xfe4
-#define ARM_SMMU_GR0_PIDR2 0xfe8
#define ID0_S1TS (1 << 30)
#define ID0_S2TS (1 << 29)
#define ID0_NTS (1 << 28)
#define ID0_SMS (1 << 27)
-#define ID0_PTFS_SHIFT 24
-#define ID0_PTFS_MASK 0x2
-#define ID0_PTFS_V8_ONLY 0x2
+#define ID0_ATOSNS (1 << 26)
#define ID0_CTTW (1 << 14)
#define ID0_NUMIRPT_SHIFT 16
#define ID0_NUMIRPT_MASK 0xff
@@ -169,11 +128,7 @@
#define ID2_PTFS_16K (1 << 13)
#define ID2_PTFS_64K (1 << 14)
-#define PIDR2_ARCH_SHIFT 4
-#define PIDR2_ARCH_MASK 0xf
-
/* Global TLB invalidation */
-#define ARM_SMMU_GR0_STLBIALL 0x60
#define ARM_SMMU_GR0_TLBIVMID 0x64
#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
#define ARM_SMMU_GR0_TLBIALLH 0x6c
@@ -231,13 +186,25 @@
#define ARM_SMMU_CB_TTBCR2 0x10
#define ARM_SMMU_CB_TTBR0_LO 0x20
#define ARM_SMMU_CB_TTBR0_HI 0x24
+#define ARM_SMMU_CB_TTBR1_LO 0x28
+#define ARM_SMMU_CB_TTBR1_HI 0x2c
#define ARM_SMMU_CB_TTBCR 0x30
#define ARM_SMMU_CB_S1_MAIR0 0x38
+#define ARM_SMMU_CB_S1_MAIR1 0x3c
+#define ARM_SMMU_CB_PAR_LO 0x50
+#define ARM_SMMU_CB_PAR_HI 0x54
#define ARM_SMMU_CB_FSR 0x58
#define ARM_SMMU_CB_FAR_LO 0x60
#define ARM_SMMU_CB_FAR_HI 0x64
#define ARM_SMMU_CB_FSYNR0 0x68
+#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIVAL 0x620
+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+#define ARM_SMMU_CB_ATS1PR_LO 0x800
+#define ARM_SMMU_CB_ATS1PR_HI 0x804
+#define ARM_SMMU_CB_ATSR 0x8f0
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
@@ -249,47 +216,16 @@
#define SCTLR_M (1 << 0)
#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
-#define RESUME_RETRY (0 << 0)
-#define RESUME_TERMINATE (1 << 0)
-
-#define TTBCR_EAE (1 << 31)
+#define CB_PAR_F (1 << 0)
-#define TTBCR_PASIZE_SHIFT 16
-#define TTBCR_PASIZE_MASK 0x7
+#define ATSR_ACTIVE (1 << 0)
-#define TTBCR_TG0_4K (0 << 14)
-#define TTBCR_TG0_64K (1 << 14)
-
-#define TTBCR_SH0_SHIFT 12
-#define TTBCR_SH0_MASK 0x3
-#define TTBCR_SH_NS 0
-#define TTBCR_SH_OS 2
-#define TTBCR_SH_IS 3
-
-#define TTBCR_ORGN0_SHIFT 10
-#define TTBCR_IRGN0_SHIFT 8
-#define TTBCR_RGN_MASK 0x3
-#define TTBCR_RGN_NC 0
-#define TTBCR_RGN_WBWA 1
-#define TTBCR_RGN_WT 2
-#define TTBCR_RGN_WB 3
-
-#define TTBCR_SL0_SHIFT 6
-#define TTBCR_SL0_MASK 0x3
-#define TTBCR_SL0_LVL_2 0
-#define TTBCR_SL0_LVL_1 1
-
-#define TTBCR_T1SZ_SHIFT 16
-#define TTBCR_T0SZ_SHIFT 0
-#define TTBCR_SZ_MASK 0xf
+#define RESUME_RETRY (0 << 0)
+#define RESUME_TERMINATE (1 << 0)
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_MASK 0x7
-#define TTBCR2_PASIZE_SHIFT 0
-#define TTBCR2_PASIZE_MASK 0x7
-
-/* Common definitions for PASize and SEP fields */
#define TTBCR2_ADDR_32 0
#define TTBCR2_ADDR_36 1
#define TTBCR2_ADDR_40 2
@@ -297,16 +233,7 @@
#define TTBCR2_ADDR_44 4
#define TTBCR2_ADDR_48 5
-#define TTBRn_HI_ASID_SHIFT 16
-
-#define MAIR_ATTR_SHIFT(n) ((n) << 3)
-#define MAIR_ATTR_MASK 0xff
-#define MAIR_ATTR_DEVICE 0x04
-#define MAIR_ATTR_NC 0x44
-#define MAIR_ATTR_WBRWA 0xff
-#define MAIR_ATTR_IDX_NC 0
-#define MAIR_ATTR_IDX_CACHE 1
-#define MAIR_ATTR_IDX_DEV 2
+#define TTBRn_HI_ASID_SHIFT 16
#define FSR_MULTI (1 << 31)
#define FSR_SS (1 << 30)
@@ -366,6 +293,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
+#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,10 +308,9 @@ struct arm_smmu_device {
u32 num_mapping_groups;
DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
- unsigned long s1_input_size;
- unsigned long s1_output_size;
- unsigned long s2_input_size;
- unsigned long s2_output_size;
+ unsigned long va_size;
+ unsigned long ipa_size;
+ unsigned long pa_size;
u32 num_global_irqs;
u32 num_context_irqs;
@@ -397,7 +324,6 @@ struct arm_smmu_cfg {
u8 cbndx;
u8 irptndx;
u32 cbar;
- pgd_t *pgd;
};
#define INVALID_IRPTNDX 0xff
@@ -412,11 +338,15 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
+ struct io_pgtable_ops *pgtbl_ops;
+ spinlock_t pgtbl_lock;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
- spinlock_t lock;
+ struct mutex init_mutex; /* Protects smmu pointer */
};
+static struct iommu_ops arm_smmu_ops;
+
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
@@ -597,7 +527,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
}
/* Wait for any pending TLB invalidations to complete */
-static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
{
int count = 0;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
@@ -615,12 +545,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
}
}
-static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
+static void arm_smmu_tlb_sync(void *cookie)
{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ __arm_smmu_tlb_sync(smmu_domain->smmu);
+}
+
+static void arm_smmu_tlb_inv_context(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *base = ARM_SMMU_GR0(smmu);
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *base;
if (stage1) {
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -632,9 +569,76 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
base + ARM_SMMU_GR0_TLBIVMID);
}
- arm_smmu_tlb_sync(smmu);
+ __arm_smmu_tlb_sync(smmu);
+}
+
+static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ bool leaf, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *reg;
+
+ if (stage1) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+
+ if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
+ iova &= ~12UL;
+ iova |= ARM_SMMU_CB_ASID(cfg);
+ writel_relaxed(iova, reg);
+#ifdef CONFIG_64BIT
+ } else {
+ iova >>= 12;
+ iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
+ writeq_relaxed(iova, reg);
+#endif
+ }
+#ifdef CONFIG_64BIT
+ } else if (smmu->version == ARM_SMMU_V2) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
+ ARM_SMMU_CB_S2_TLBIIPAS2;
+ writeq_relaxed(iova >> 12, reg);
+#endif
+ } else {
+ reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
+ writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
+ }
+}
+
+static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+ dsb(ishst);
+ } else {
+ /*
+ * If the SMMU can't walk tables in the CPU caches, treat them
+ * like non-coherent DMA since we need to flush the new entries
+ * all the way out to memory. There's no possibility of
+ * recursion here as the SMMU table walker will not be wired
+ * through another SMMU.
+ */
+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+ DMA_TO_DEVICE);
+ }
}
+static struct iommu_gather_ops arm_smmu_gather_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context,
+ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
+ .tlb_sync = arm_smmu_tlb_sync,
+ .flush_pgtable = arm_smmu_flush_pgtable,
+};
+
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
int flags, ret;
@@ -712,29 +716,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
return IRQ_HANDLED;
}
-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
- size_t size)
-{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
- dsb(ishst);
- } else {
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of
- * recursion here as the SMMU table walker will not be wired
- * through another SMMU.
- */
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
- }
-}
-
-static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg)
{
u32 reg;
bool stage1;
@@ -771,124 +754,68 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
#else
reg = CBA2R_RW64_32BIT;
#endif
- writel_relaxed(reg,
- gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
-
- /* TTBCR2 */
- switch (smmu->s1_input_size) {
- case 32:
- reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
- break;
- case 36:
- reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
- break;
- case 39:
- case 40:
- reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
- break;
- case 42:
- reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
- break;
- case 44:
- reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
- break;
- case 48:
- reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
- break;
- }
-
- switch (smmu->s1_output_size) {
- case 32:
- reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
- break;
- case 36:
- reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
- break;
- case 39:
- case 40:
- reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
- break;
- case 42:
- reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
- break;
- case 44:
- reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
- break;
- case 48:
- reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
- break;
- }
-
- if (stage1)
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
}
- /* TTBR0 */
- arm_smmu_flush_pgtable(smmu, cfg->pgd,
- PTRS_PER_PGD * sizeof(pgd_t));
- reg = __pa(cfg->pgd);
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
- reg = (phys_addr_t)__pa(cfg->pgd) >> 32;
- if (stage1)
+ /* TTBRs */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-
- /*
- * TTBCR
- * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
- */
- if (smmu->version > ARM_SMMU_V1) {
- if (PAGE_SIZE == SZ_4K)
- reg = TTBCR_TG0_4K;
- else
- reg = TTBCR_TG0_64K;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
- if (!stage1) {
- reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT;
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
+ } else {
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+ }
- switch (smmu->s2_output_size) {
+ /* TTBCR */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ if (smmu->version > ARM_SMMU_V1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+ switch (smmu->va_size) {
case 32:
- reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
break;
case 36:
- reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
break;
case 40:
- reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
break;
case 42:
- reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
break;
case 44:
- reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
break;
case 48:
- reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
break;
}
- } else {
- reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
}
} else {
- reg = 0;
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
}
- reg |= TTBCR_EAE |
- (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
- (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
-
- if (!stage1)
- reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
-
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
-
- /* MAIR0 (stage-1 only) */
+ /* MAIRs (stage-1 only) */
if (stage1) {
- reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
- (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
- (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
}
/* SCTLR */
@@ -905,11 +832,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_device *smmu)
{
int irq, start, ret = 0;
- unsigned long flags;
+ unsigned long ias, oas;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct io_pgtable_cfg pgtbl_cfg;
+ enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- spin_lock_irqsave(&smmu_domain->lock, flags);
+ mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
goto out_unlock;
@@ -940,6 +870,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
case ARM_SMMU_DOMAIN_S1:
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks;
+ ias = smmu->va_size;
+ oas = smmu->ipa_size;
+ if (IS_ENABLED(CONFIG_64BIT))
+ fmt = ARM_64_LPAE_S1;
+ else
+ fmt = ARM_32_LPAE_S1;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -949,6 +885,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0;
+ ias = smmu->ipa_size;
+ oas = smmu->pa_size;
+ if (IS_ENABLED(CONFIG_64BIT))
+ fmt = ARM_64_LPAE_S2;
+ else
+ fmt = ARM_32_LPAE_S2;
break;
default:
ret = -EINVAL;
@@ -968,10 +910,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = cfg->cbndx;
}
- ACCESS_ONCE(smmu_domain->smmu) = smmu;
- arm_smmu_init_context_bank(smmu_domain);
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
+ .ias = ias,
+ .oas = oas,
+ .tlb = &arm_smmu_gather_ops,
+ };
+
+ smmu_domain->smmu = smmu;
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+ if (!pgtbl_ops) {
+ ret = -ENOMEM;
+ goto out_clear_smmu;
+ }
+
+ /* Update our support page sizes to reflect the page table format */
+ arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ /* Initialise the context bank with our page table cfg */
+ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
+
+ /*
+ * Request context fault interrupt. Do this last to avoid the
+ * handler seeing a half-initialised domain state.
+ */
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
"arm-smmu-context-fault", domain);
@@ -981,10 +943,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = INVALID_IRPTNDX;
}
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ /* Publish page table ops for map/unmap */
+ smmu_domain->pgtbl_ops = pgtbl_ops;
return 0;
+out_clear_smmu:
+ smmu_domain->smmu = NULL;
out_unlock:
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
+ mutex_unlock(&smmu_domain->init_mutex);
return ret;
}
@@ -999,23 +967,27 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
if (!smmu)
return;
- /* Disable the context bank and nuke the TLB before freeing it. */
+ /*
+ * Disable the context bank and free the page tables before freeing
+ * it.
+ */
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
- arm_smmu_tlb_inv_context(smmu_domain);
if (cfg->irptndx != INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
free_irq(irq, domain);
}
+ if (smmu_domain->pgtbl_ops)
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
}
static int arm_smmu_domain_init(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain;
- pgd_t *pgd;
/*
* Allocate the domain and initialise some of its data structures.
@@ -1026,81 +998,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
if (!smmu_domain)
return -ENOMEM;
- pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL);
- if (!pgd)
- goto out_free_domain;
- smmu_domain->cfg.pgd = pgd;
-
- spin_lock_init(&smmu_domain->lock);
+ mutex_init(&smmu_domain->init_mutex);
+ spin_lock_init(&smmu_domain->pgtbl_lock);
domain->priv = smmu_domain;
return 0;
-
-out_free_domain:
- kfree(smmu_domain);
- return -ENOMEM;
-}
-
-static void arm_smmu_free_ptes(pmd_t *pmd)
-{
- pgtable_t table = pmd_pgtable(*pmd);
-
- __free_page(table);
-}
-
-static void arm_smmu_free_pmds(pud_t *pud)
-{
- int i;
- pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
-
- pmd = pmd_base;
- for (i = 0; i < PTRS_PER_PMD; ++i) {
- if (pmd_none(*pmd))
- continue;
-
- arm_smmu_free_ptes(pmd);
- pmd++;
- }
-
- pmd_free(NULL, pmd_base);
-}
-
-static void arm_smmu_free_puds(pgd_t *pgd)
-{
- int i;
- pud_t *pud, *pud_base = pud_offset(pgd, 0);
-
- pud = pud_base;
- for (i = 0; i < PTRS_PER_PUD; ++i) {
- if (pud_none(*pud))
- continue;
-
- arm_smmu_free_pmds(pud);
- pud++;
- }
-
- pud_free(NULL, pud_base);
-}
-
-static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
-{
- int i;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- pgd_t *pgd, *pgd_base = cfg->pgd;
-
- /*
- * Recursively free the page tables for this domain. We don't
- * care about speculative TLB filling because the tables should
- * not be active in any context bank at this point (SCTLR.M is 0).
- */
- pgd = pgd_base;
- for (i = 0; i < PTRS_PER_PGD; ++i) {
- if (pgd_none(*pgd))
- continue;
- arm_smmu_free_puds(pgd);
- pgd++;
- }
-
- kfree(pgd_base);
}
static void arm_smmu_domain_destroy(struct iommu_domain *domain)
@@ -1112,7 +1013,6 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain)
* already been detached.
*/
arm_smmu_destroy_domain_context(domain);
- arm_smmu_free_pgtables(smmu_domain);
kfree(smmu_domain);
}
@@ -1244,7 +1144,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_device *smmu, *dom_smmu;
+ struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
smmu = find_smmu_for_device(dev);
@@ -1258,21 +1158,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EEXIST;
}
+ /* Ensure that the domain is finalised */
+ ret = arm_smmu_init_domain_context(domain, smmu);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
/*
* Sanity check the domain. We don't support domains across
* different SMMUs.
*/
- dom_smmu = ACCESS_ONCE(smmu_domain->smmu);
- if (!dom_smmu) {
- /* Now that we have a master, we can finalise the domain */
- ret = arm_smmu_init_domain_context(domain, smmu);
- if (IS_ERR_VALUE(ret))
- return ret;
-
- dom_smmu = smmu_domain->smmu;
- }
-
- if (dom_smmu != smmu) {
+ if (smmu_domain->smmu != smmu) {
dev_err(dev,
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
@@ -1303,293 +1198,103 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_domain_remove_master(smmu_domain, cfg);
}
-static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
- unsigned long end)
-{
- return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
- (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
-}
-
-static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned long pfn, int prot, int stage)
-{
- pte_t *pte, *start;
- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
-
- if (pmd_none(*pmd)) {
- /* Allocate a new set of tables */
- pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
-
- if (!table)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
- pmd_populate(NULL, pmd, table);
- arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
- }
-
- if (stage == 1) {
- pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
- pteval |= ARM_SMMU_PTE_AP_RDONLY;
-
- if (prot & IOMMU_CACHE)
- pteval |= (MAIR_ATTR_IDX_CACHE <<
- ARM_SMMU_PTE_ATTRINDX_SHIFT);
- } else {
- pteval |= ARM_SMMU_PTE_HAP_FAULT;
- if (prot & IOMMU_READ)
- pteval |= ARM_SMMU_PTE_HAP_READ;
- if (prot & IOMMU_WRITE)
- pteval |= ARM_SMMU_PTE_HAP_WRITE;
- if (prot & IOMMU_CACHE)
- pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
- else
- pteval |= ARM_SMMU_PTE_MEMATTR_NC;
- }
-
- if (prot & IOMMU_NOEXEC)
- pteval |= ARM_SMMU_PTE_XN;
-
- /* If no access, create a faulting entry to avoid TLB fills */
- if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- pteval &= ~ARM_SMMU_PTE_PAGE;
-
- pteval |= ARM_SMMU_PTE_SH_IS;
- start = pmd_page_vaddr(*pmd) + pte_index(addr);
- pte = start;
-
- /*
- * Install the page table entries. This is fairly complicated
- * since we attempt to make use of the contiguous hint in the
- * ptes where possible. The contiguous hint indicates a series
- * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
- * contiguous region with the following constraints:
- *
- * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
- * - Each pte in the region has the contiguous hint bit set
- *
- * This complicates unmapping (also handled by this code, when
- * neither IOMMU_READ or IOMMU_WRITE are set) because it is
- * possible, yet highly unlikely, that a client may unmap only
- * part of a contiguous range. This requires clearing of the
- * contiguous hint bits in the range before installing the new
- * faulting entries.
- *
- * Note that re-mapping an address range without first unmapping
- * it is not supported, so TLB invalidation is not required here
- * and is instead performed at unmap and domain-init time.
- */
- do {
- int i = 1;
-
- pteval &= ~ARM_SMMU_PTE_CONT;
-
- if (arm_smmu_pte_is_contiguous_range(addr, end)) {
- i = ARM_SMMU_PTE_CONT_ENTRIES;
- pteval |= ARM_SMMU_PTE_CONT;
- } else if (pte_val(*pte) &
- (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
- int j;
- pte_t *cont_start;
- unsigned long idx = pte_index(addr);
-
- idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
- cont_start = pmd_page_vaddr(*pmd) + idx;
- for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
- pte_val(*(cont_start + j)) &=
- ~ARM_SMMU_PTE_CONT;
-
- arm_smmu_flush_pgtable(smmu, cont_start,
- sizeof(*pte) *
- ARM_SMMU_PTE_CONT_ENTRIES);
- }
-
- do {
- *pte = pfn_pte(pfn, __pgprot(pteval));
- } while (pte++, pfn++, addr += PAGE_SIZE, --i);
- } while (addr != end);
-
- arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
- return 0;
-}
-
-static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
- unsigned long addr, unsigned long end,
- phys_addr_t phys, int prot, int stage)
+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
{
int ret;
- pmd_t *pmd;
- unsigned long next, pfn = __phys_to_pfn(phys);
-
-#ifndef __PAGETABLE_PMD_FOLDED
- if (pud_none(*pud)) {
- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pmd)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
- pud_populate(NULL, pud, pmd);
- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
-
- pmd += pmd_index(addr);
- } else
-#endif
- pmd = pmd_offset(pud, addr);
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- do {
- next = pmd_addr_end(addr, end);
- ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
- prot, stage);
- phys += next - addr;
- pfn = __phys_to_pfn(phys);
- } while (pmd++, addr = next, addr < end);
+ if (!ops)
+ return -ENODEV;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->map(ops, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
}
-static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
- unsigned long addr, unsigned long end,
- phys_addr_t phys, int prot, int stage)
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
{
- int ret = 0;
- pud_t *pud;
- unsigned long next;
-
-#ifndef __PAGETABLE_PUD_FOLDED
- if (pgd_none(*pgd)) {
- pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pud)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
- pgd_populate(NULL, pgd, pud);
- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
-
- pud += pud_index(addr);
- } else
-#endif
- pud = pud_offset(pgd, addr);
+ size_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- do {
- next = pud_addr_end(addr, end);
- ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
- prot, stage);
- phys += next - addr;
- } while (pud++, addr = next, addr < end);
+ if (!ops)
+ return 0;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->unmap(ops, iova, size);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
}
-static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ dma_addr_t iova)
{
- int ret, stage;
- unsigned long end;
- phys_addr_t input_mask, output_mask;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- pgd_t *pgd = cfg->pgd;
- unsigned long flags;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+ struct device *dev = smmu->dev;
+ void __iomem *cb_base;
+ u32 tmp;
+ u64 phys;
- if (cfg->cbar == CBAR_TYPE_S2_TRANS) {
- stage = 2;
- input_mask = (1ULL << smmu->s2_input_size) - 1;
- output_mask = (1ULL << smmu->s2_output_size) - 1;
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+
+ if (smmu->version == 1) {
+ u32 reg = iova & ~0xfff;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
} else {
- stage = 1;
- input_mask = (1ULL << smmu->s1_input_size) - 1;
- output_mask = (1ULL << smmu->s1_output_size) - 1;
+ u32 reg = iova & ~0xfff;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
+ reg = ((u64)iova & ~0xfff) >> 32;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI);
}
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- if ((phys_addr_t)iova & ~input_mask)
- return -ERANGE;
-
- if (paddr & ~output_mask)
- return -ERANGE;
-
- spin_lock_irqsave(&smmu_domain->lock, flags);
- pgd += pgd_index(iova);
- end = iova + size;
- do {
- unsigned long next = pgd_addr_end(iova, end);
-
- ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
- prot, stage);
- if (ret)
- goto out_unlock;
-
- paddr += next - iova;
- iova = next;
- } while (pgd++, iova != end);
-
-out_unlock:
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
-
- return ret;
-}
-
-static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- struct arm_smmu_domain *smmu_domain = domain->priv;
-
- if (!smmu_domain)
- return -ENODEV;
+ if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
+ !(tmp & ATSR_ACTIVE), 5, 50)) {
+ dev_err(dev,
+ "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
+ &iova);
+ return ops->iova_to_phys(ops, iova);
+ }
- return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
-}
+ phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
+ phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
-static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
-{
- int ret;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ if (phys & CB_PAR_F) {
+ dev_err(dev, "translation fault!\n");
+ dev_err(dev, "PAR = 0x%llx\n", phys);
+ return 0;
+ }
- ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
- arm_smmu_tlb_inv_context(smmu_domain);
- return ret ? 0 : size;
+ return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
}
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+ dma_addr_t iova)
{
- pgd_t *pgdp, pgd;
- pud_t pud;
- pmd_t pmd;
- pte_t pte;
+ phys_addr_t ret;
+ unsigned long flags;
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- pgdp = cfg->pgd;
- if (!pgdp)
+ if (!ops)
return 0;
- pgd = *(pgdp + pgd_index(iova));
- if (pgd_none(pgd))
- return 0;
-
- pud = *pud_offset(&pgd, iova);
- if (pud_none(pud))
- return 0;
-
- pmd = *pmd_offset(&pud, iova);
- if (pmd_none(pmd))
- return 0;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
+ ret = arm_smmu_iova_to_phys_hard(domain, iova);
+ else
+ ret = ops->iova_to_phys(ops, iova);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
- pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
- if (pte_none(pte))
- return 0;
-
- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
+ return ret;
}
static bool arm_smmu_capable(enum iommu_cap cap)
@@ -1698,24 +1403,34 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
+ int ret = 0;
struct arm_smmu_domain *smmu_domain = domain->priv;
+ mutex_lock(&smmu_domain->init_mutex);
+
switch (attr) {
case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu)
- return -EPERM;
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
- return 0;
+ break;
default:
- return -ENODEV;
+ ret = -ENODEV;
}
+
+out_unlock:
+ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
}
-static const struct iommu_ops arm_smmu_ops = {
+static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init,
.domain_destroy = arm_smmu_domain_destroy,
@@ -1729,9 +1444,7 @@ static const struct iommu_ops arm_smmu_ops = {
.remove_device = arm_smmu_remove_device,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
- .pgsize_bitmap = (SECTION_SIZE |
- ARM_SMMU_PTE_CONT_SIZE |
- PAGE_SIZE),
+ .pgsize_bitmap = -1UL, /* Restricted during device attach */
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -1760,7 +1473,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
}
/* Invalidate the TLB, just in case */
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
@@ -1782,7 +1494,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
/* Push the button */
- arm_smmu_tlb_sync(smmu);
+ __arm_smmu_tlb_sync(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
}
@@ -1816,12 +1528,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID0 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
-#ifndef CONFIG_64BIT
- if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
- dev_err(smmu->dev, "\tno v7 descriptor support!\n");
- return -ENODEV;
- }
-#endif
/* Restrict available stages based on module parameter */
if (force_stage == 1)
@@ -1850,6 +1556,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
+ if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
+ dev_notice(smmu->dev, "\taddress translation ops\n");
+ }
+
if (id & ID0_CTTW) {
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
dev_notice(smmu->dev, "\tcoherent table walk\n");
@@ -1894,16 +1605,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
/* Check for size mismatch of SMMU address space from mapped region */
- size = 1 <<
- (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
+ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
size *= 2 << smmu->pgshift;
if (smmu->size != size)
dev_warn(smmu->dev,
"SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
size, smmu->size);
- smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
- ID1_NUMS2CB_MASK;
+ smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
if (smmu->num_s2_context_banks > smmu->num_context_banks) {
dev_err(smmu->dev, "impossible number of S2 context banks!\n");
@@ -1915,46 +1624,40 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID2 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
- smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
-
- /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */
-#ifdef CONFIG_64BIT
- smmu->s2_input_size = min_t(unsigned long, VA_BITS, size);
-#else
- smmu->s2_input_size = min(32UL, size);
-#endif
+ smmu->ipa_size = size;
- /* The stage-2 output mask is also applied for bypass */
+ /* The output mask is also applied for bypass */
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
- smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
+ smmu->pa_size = size;
if (smmu->version == ARM_SMMU_V1) {
- smmu->s1_input_size = 32;
+ smmu->va_size = smmu->ipa_size;
+ size = SZ_4K | SZ_2M | SZ_1G;
} else {
-#ifdef CONFIG_64BIT
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
- size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
-#else
- size = 32;
+ smmu->va_size = arm_smmu_id_size_to_bits(size);
+#ifndef CONFIG_64BIT
+ smmu->va_size = min(32UL, smmu->va_size);
#endif
- smmu->s1_input_size = size;
-
- if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
- (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
- (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
- dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
- PAGE_SIZE);
- return -ENODEV;
- }
+ size = 0;
+ if (id & ID2_PTFS_4K)
+ size |= SZ_4K | SZ_2M | SZ_1G;
+ if (id & ID2_PTFS_16K)
+ size |= SZ_16K | SZ_32M;
+ if (id & ID2_PTFS_64K)
+ size |= SZ_64K | SZ_512M;
}
+ arm_smmu_ops.pgsize_bitmap &= size;
+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
+
if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
- smmu->s1_input_size, smmu->s1_output_size);
+ smmu->va_size, smmu->ipa_size);
if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
- smmu->s2_input_size, smmu->s2_output_size);
+ smmu->ipa_size, smmu->pa_size);
return 0;
}
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 80ac68d884c5..abeedc9a78c2 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -18,22 +18,13 @@
#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
-#include <linux/init.h>
-#include <linux/iommu.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mm.h>
+#include "fsl_pamu.h"
+
#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/of_platform.h>
-#include <linux/bootmem.h>
#include <linux/genalloc.h>
-#include <asm/io.h>
-#include <asm/bitops.h>
-#include <asm/fsl_guts.h>
-#include "fsl_pamu.h"
+#include <asm/mpc85xx.h>
+#include <asm/fsl_guts.h>
/* define indexes for each operation mapping scenario */
#define OMI_QMAN 0x00
@@ -44,13 +35,13 @@
#define make64(high, low) (((u64)(high) << 32) | (low))
struct pamu_isr_data {
- void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
+ void __iomem *pamu_reg_base; /* Base address of PAMU regs */
unsigned int count; /* The number of PAMUs */
};
static struct paace *ppaact;
static struct paace *spaact;
-static struct ome *omt;
+static struct ome *omt __initdata;
/*
* Table for matching compatible strings, for device tree
@@ -58,14 +49,13 @@ static struct ome *omt;
* "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
* SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
* string would be used.
-*/
-static const struct of_device_id guts_device_ids[] = {
+ */
+static const struct of_device_id guts_device_ids[] __initconst = {
{ .compatible = "fsl,qoriq-device-config-1.0", },
{ .compatible = "fsl,qoriq-device-config-2.0", },
{}
};
-
/*
* Table for matching compatible strings, for device tree
* L3 cache controller node.
@@ -73,7 +63,7 @@ static const struct of_device_id guts_device_ids[] = {
* "fsl,b4860-l3-cache-controller" corresponds to B4 &
* "fsl,p4080-l3-cache-controller" corresponds to other,
* SOCs.
-*/
+ */
static const struct of_device_id l3_device_ids[] = {
{ .compatible = "fsl,t4240-l3-cache-controller", },
{ .compatible = "fsl,b4860-l3-cache-controller", },
@@ -85,7 +75,7 @@ static const struct of_device_id l3_device_ids[] = {
static u32 max_subwindow_count;
/* Pool for fspi allocation */
-struct gen_pool *spaace_pool;
+static struct gen_pool *spaace_pool;
/**
* pamu_get_max_subwin_cnt() - Return the maximum supported
@@ -170,7 +160,7 @@ int pamu_disable_liodn(int liodn)
static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
{
/* Bug if not a power of 2 */
- BUG_ON((addrspace_size & (addrspace_size - 1)));
+ BUG_ON(addrspace_size & (addrspace_size - 1));
/* window size is 2^(WSE+1) bytes */
return fls64(addrspace_size) - 2;
@@ -179,8 +169,8 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
/* Derive the PAACE window count encoding for the subwindow count */
static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
{
- /* window count is 2^(WCE+1) bytes */
- return __ffs(subwindow_cnt) - 1;
+ /* window count is 2^(WCE+1) bytes */
+ return __ffs(subwindow_cnt) - 1;
}
/*
@@ -241,7 +231,7 @@ static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
* If no SPAACE entry is available or the allocator can not reserve the required
* number of contiguous entries function returns ULONG_MAX indicating a failure.
*
-*/
+ */
static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
{
unsigned long spaace_addr;
@@ -288,9 +278,8 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
}
if (subwin) {
paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace) {
+ if (!paace)
return -ENOENT;
- }
}
set_bf(paace->impl_attr, PAACE_IA_CID, value);
@@ -311,14 +300,12 @@ int pamu_disable_spaace(int liodn, u32 subwin)
}
if (subwin) {
paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace) {
+ if (!paace)
return -ENOENT;
- }
- set_bf(paace->addr_bitfields, PAACE_AF_V,
- PAACE_V_INVALID);
+ set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
} else {
set_bf(paace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_DENIED);
+ PAACE_AP_PERMS_DENIED);
}
mb();
@@ -326,7 +313,6 @@ int pamu_disable_spaace(int liodn, u32 subwin)
return 0;
}
-
/**
* pamu_config_paace() - Sets up PPAACE entry for specified liodn
*
@@ -352,7 +338,8 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
unsigned long fspi;
if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
- pr_debug("window size too small or not a power of two %llx\n", win_size);
+ pr_debug("window size too small or not a power of two %pa\n",
+ &win_size);
return -EINVAL;
}
@@ -362,13 +349,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
}
ppaace = pamu_get_ppaace(liodn);
- if (!ppaace) {
+ if (!ppaace)
return -ENOENT;
- }
/* window size is 2^(WSE+1) bytes */
set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
- map_addrspace_size_to_wse(win_size));
+ map_addrspace_size_to_wse(win_size));
pamu_init_ppaace(ppaace);
@@ -442,7 +428,6 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
{
struct paace *paace;
-
/* setup sub-windows */
if (!subwin_cnt) {
pr_debug("Invalid subwindow count\n");
@@ -510,11 +495,11 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
}
/**
-* get_ome_index() - Returns the index in the operation mapping table
-* for device.
-* @*omi_index: pointer for storing the index value
-*
-*/
+ * get_ome_index() - Returns the index in the operation mapping table
+ * for device.
+ * @*omi_index: pointer for storing the index value
+ *
+ */
void get_ome_index(u32 *omi_index, struct device *dev)
{
if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
@@ -544,9 +529,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
node = of_find_matching_node(NULL, l3_device_ids);
if (node) {
- prop = of_get_property(node, "cache-stash-id", 0);
+ prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) {
- pr_debug("missing cache-stash-id at %s\n", node->full_name);
+ pr_debug("missing cache-stash-id at %s\n",
+ node->full_name);
of_node_put(node);
return ~(u32)0;
}
@@ -570,9 +556,10 @@ found_cpu_node:
/* find the hwnode that represents the cache */
for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
if (stash_dest_hint == cache_level) {
- prop = of_get_property(node, "cache-stash-id", 0);
+ prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) {
- pr_debug("missing cache-stash-id at %s\n", node->full_name);
+ pr_debug("missing cache-stash-id at %s\n",
+ node->full_name);
of_node_put(node);
return ~(u32)0;
}
@@ -580,10 +567,10 @@ found_cpu_node:
return be32_to_cpup(prop);
}
- prop = of_get_property(node, "next-level-cache", 0);
+ prop = of_get_property(node, "next-level-cache", NULL);
if (!prop) {
pr_debug("can't find next-level-cache at %s\n",
- node->full_name);
+ node->full_name);
of_node_put(node);
return ~(u32)0; /* can't traverse any further */
}
@@ -598,7 +585,7 @@ found_cpu_node:
}
pr_debug("stash dest not found for %d on vcpu %d\n",
- stash_dest_hint, vcpu);
+ stash_dest_hint, vcpu);
return ~(u32)0;
}
@@ -612,7 +599,7 @@ found_cpu_node:
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
*/
-static void setup_qbman_paace(struct paace *ppaace, int paace_type)
+static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
{
switch (paace_type) {
case QMAN_PAACE:
@@ -626,7 +613,7 @@ static void setup_qbman_paace(struct paace *ppaace, int paace_type)
case QMAN_PORTAL_PAACE:
set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
ppaace->op_encode.index_ot.omi = OMI_QMAN;
- /*Set DQRR and Frame stashing for the L3 cache */
+ /* Set DQRR and Frame stashing for the L3 cache */
set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
break;
case BMAN_PAACE:
@@ -679,7 +666,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
-static void get_pamu_cap_values(unsigned long pamu_reg_base)
+static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
{
u32 pc_val;
@@ -689,9 +676,9 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
- phys_addr_t omt_phys)
+static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+ phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@@ -727,7 +714,7 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
*/
out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
- PAMU_ACCESS_VIOLATION_ENABLE);
+ PAMU_ACCESS_VIOLATION_ENABLE);
out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
return 0;
}
@@ -757,9 +744,9 @@ static void __init setup_liodns(void)
ppaace->wbah = 0;
set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
set_bf(ppaace->impl_attr, PAACE_IA_ATM,
- PAACE_ATM_NO_XLATE);
+ PAACE_ATM_NO_XLATE);
set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_ALL);
+ PAACE_AP_PERMS_ALL);
if (of_device_is_compatible(node, "fsl,qman-portal"))
setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
if (of_device_is_compatible(node, "fsl,qman"))
@@ -772,7 +759,7 @@ static void __init setup_liodns(void)
}
}
-irqreturn_t pamu_av_isr(int irq, void *arg)
+static irqreturn_t pamu_av_isr(int irq, void *arg)
{
struct pamu_isr_data *data = arg;
phys_addr_t phys;
@@ -792,14 +779,16 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
pr_emerg("AVS1=%08x\n", avs1);
pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
- pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
- in_be32(p + PAMU_AVAL)));
+ pr_emerg("AVA=%016llx\n",
+ make64(in_be32(p + PAMU_AVAH),
+ in_be32(p + PAMU_AVAL)));
pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
- pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
- in_be32(p + PAMU_POEAL)));
+ pr_emerg("POEA=%016llx\n",
+ make64(in_be32(p + PAMU_POEAH),
+ in_be32(p + PAMU_POEAL)));
phys = make64(in_be32(p + PAMU_POEAH),
- in_be32(p + PAMU_POEAL));
+ in_be32(p + PAMU_POEAL));
/* Assume that POEA points to a PAACE */
if (phys) {
@@ -807,11 +796,12 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
/* Only the first four words are relevant */
for (j = 0; j < 4; j++)
- pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
+ pr_emerg("PAACE[%u]=%08x\n",
+ j, in_be32(paace + j));
}
/* clear access violation condition */
- out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
+ out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK);
paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
BUG_ON(!paace);
/* check if we got a violation for a disabled LIODN */
@@ -827,13 +817,13 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
/* Disable the LIODN */
ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
BUG_ON(ret);
- pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
+ pr_emerg("Disabling liodn %x\n",
+ avs1 >> PAMU_AVS1_LIODN_SHIFT);
}
out_be32((p + PAMU_PICS), pics);
}
}
-
return IRQ_HANDLED;
}
@@ -952,7 +942,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
}
if (i == 0 || i == num_laws) {
- /* This should never happen*/
+ /* This should never happen */
ret = -ENOENT;
goto error;
}
@@ -998,26 +988,27 @@ error:
static const struct {
u32 svr;
u32 port_id;
-} port_id_map[] = {
- {0x82100010, 0xFF000000}, /* P2040 1.0 */
- {0x82100011, 0xFF000000}, /* P2040 1.1 */
- {0x82100110, 0xFF000000}, /* P2041 1.0 */
- {0x82100111, 0xFF000000}, /* P2041 1.1 */
- {0x82110310, 0xFF000000}, /* P3041 1.0 */
- {0x82110311, 0xFF000000}, /* P3041 1.1 */
- {0x82010020, 0xFFF80000}, /* P4040 2.0 */
- {0x82000020, 0xFFF80000}, /* P4080 2.0 */
- {0x82210010, 0xFC000000}, /* P5010 1.0 */
- {0x82210020, 0xFC000000}, /* P5010 2.0 */
- {0x82200010, 0xFC000000}, /* P5020 1.0 */
- {0x82050010, 0xFF800000}, /* P5021 1.0 */
- {0x82040010, 0xFF800000}, /* P5040 1.0 */
+} port_id_map[] __initconst = {
+ {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
+ {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
+ {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
+ {(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */
+ {(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */
+ {(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */
+ {(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */
+ {(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */
+ {(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */
+ {(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */
+ {(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */
+ {(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */
+ {(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */
};
#define SVR_SECURITY 0x80000 /* The Security (E) bit */
static int __init fsl_pamu_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
void __iomem *pamu_regs = NULL;
struct ccsr_guts __iomem *guts_regs = NULL;
u32 pamubypenr, pamu_counter;
@@ -1042,22 +1033,21 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
* NOTE : All PAMUs share the same LIODN tables.
*/
- pamu_regs = of_iomap(pdev->dev.of_node, 0);
+ pamu_regs = of_iomap(dev->of_node, 0);
if (!pamu_regs) {
- dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
+ dev_err(dev, "ioremap of PAMU node failed\n");
return -ENOMEM;
}
- of_get_address(pdev->dev.of_node, 0, &size, NULL);
+ of_get_address(dev->of_node, 0, &size, NULL);
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (irq == NO_IRQ) {
- dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
+ dev_warn(dev, "no interrupts listed in PAMU node\n");
goto error;
}
- data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
- dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
ret = -ENOMEM;
goto error;
}
@@ -1067,15 +1057,14 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* The ISR needs access to the regs, so we won't iounmap them */
ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
if (ret < 0) {
- dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
- ret, irq);
+ dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq);
goto error;
}
guts_node = of_find_matching_node(NULL, guts_device_ids);
if (!guts_node) {
- dev_err(&pdev->dev, "could not find GUTS node %s\n",
- pdev->dev.of_node->full_name);
+ dev_err(dev, "could not find GUTS node %s\n",
+ dev->of_node->full_name);
ret = -ENODEV;
goto error;
}
@@ -1083,7 +1072,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
guts_regs = of_iomap(guts_node, 0);
of_node_put(guts_node);
if (!guts_regs) {
- dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
+ dev_err(dev, "ioremap of GUTS node failed\n");
ret = -ENODEV;
goto error;
}
@@ -1103,7 +1092,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
- dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
+ dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n");
ret = -ENOMEM;
goto error;
}
@@ -1113,7 +1102,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* Make sure the memory is naturally aligned */
if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
- dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
+ dev_err(dev, "PAACT/OMT block is unaligned\n");
ret = -ENOMEM;
goto error;
}
@@ -1121,8 +1110,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
- dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
- (unsigned long long) ppaact_phys);
+ dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys);
/* Check to see if we need to implement the work-around on this SOC */
@@ -1130,21 +1118,19 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
csd_port_id = port_id_map[i].port_id;
- dev_dbg(&pdev->dev, "found matching SVR %08x\n",
+ dev_dbg(dev, "found matching SVR %08x\n",
port_id_map[i].svr);
break;
}
}
if (csd_port_id) {
- dev_dbg(&pdev->dev, "creating coherency subdomain at address "
- "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
- mem_size, csd_port_id);
+ dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x",
+ &ppaact_phys, mem_size, csd_port_id);
ret = create_csd(ppaact_phys, mem_size, csd_port_id);
if (ret) {
- dev_err(&pdev->dev, "could not create coherence "
- "subdomain\n");
+ dev_err(dev, "could not create coherence subdomain\n");
return ret;
}
}
@@ -1155,7 +1141,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
if (!spaace_pool) {
ret = -ENOMEM;
- dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
+ dev_err(dev, "Failed to allocate spaace gen pool\n");
goto error;
}
@@ -1168,9 +1154,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
- pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
+ pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off;
setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
- spaact_phys, omt_phys);
+ spaact_phys, omt_phys);
/* Disable PAMU bypass for this PAMU */
pamubypenr &= ~pamu_counter;
}
@@ -1182,7 +1168,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
iounmap(guts_regs);
- /* Enable DMA for the LIODNs in the device tree*/
+ /* Enable DMA for the LIODNs in the device tree */
setup_liodns();
@@ -1214,17 +1200,7 @@ error:
return ret;
}
-static const struct of_device_id fsl_of_pamu_ids[] = {
- {
- .compatible = "fsl,p4080-pamu",
- },
- {
- .compatible = "fsl,pamu",
- },
- {},
-};
-
-static struct platform_driver fsl_of_pamu_driver = {
+static struct platform_driver fsl_of_pamu_driver __initdata = {
.driver = {
.name = "fsl-of-pamu",
},
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index 8fc1a125b16e..aab723f91f12 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -19,13 +19,15 @@
#ifndef __FSL_PAMU_H
#define __FSL_PAMU_H
+#include <linux/iommu.h>
+
#include <asm/fsl_pamu_stash.h>
/* Bit Field macros
* v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
*/
-#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
-#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT))
+#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << m##_SHIFT) & (m)))
+#define get_bf(v, m) (((v) & (m)) >> m##_SHIFT)
/* PAMU CCSR space */
#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
@@ -65,7 +67,7 @@ struct pamu_mmap_regs {
#define PAMU_AVS1_GCV 0x2000
#define PAMU_AVS1_PDV 0x4000
#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
- | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
+ | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
#define PAMU_AVS1_LIODN_SHIFT 16
#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
@@ -198,8 +200,7 @@ struct pamu_mmap_regs {
#define PAACE_ATM_NO_XLATE 0x00
#define PAACE_ATM_WINDOW_XLATE 0x01
#define PAACE_ATM_PAGE_XLATE 0x02
-#define PAACE_ATM_WIN_PG_XLATE \
- (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
+#define PAACE_ATM_WIN_PG_XLATE (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
#define PAACE_OTM_NO_XLATE 0x00
#define PAACE_OTM_IMMEDIATE 0x01
#define PAACE_OTM_INDEXED 0x02
@@ -219,7 +220,7 @@ struct pamu_mmap_regs {
#define PAACE_TCEF_FORMAT0_8B 0x00
#define PAACE_TCEF_FORMAT1_RSVD 0x01
/*
- * Hard coded value for the PAACT size to accomodate
+ * Hard coded value for the PAACT size to accommodate
* maximum LIODN value generated by u-boot.
*/
#define PAACE_NUMBER_ENTRIES 0x500
@@ -332,7 +333,7 @@ struct paace {
#define NUM_MOE 128
struct ome {
u8 moe[NUM_MOE];
-} __attribute__((packed));
+} __packed;
#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c828f80d48b0..ceebd287b660 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -19,26 +19,10 @@
#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
-#include <linux/init.h>
-#include <linux/iommu.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/of_platform.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <asm/io.h>
-#include <asm/bitops.h>
-
-#include <asm/pci-bridge.h>
-#include <sysdev/fsl_pci.h>
-
#include "fsl_pamu_domain.h"
+#include <sysdev/fsl_pci.h>
+
/*
* Global spinlock that needs to be held while
* configuring PAMU.
@@ -51,23 +35,21 @@ static DEFINE_SPINLOCK(device_domain_lock);
static int __init iommu_init_mempool(void)
{
-
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
- sizeof(struct fsl_dma_domain),
- 0,
- SLAB_HWCACHE_ALIGN,
-
- NULL);
+ sizeof(struct fsl_dma_domain),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
if (!fsl_pamu_domain_cache) {
pr_debug("Couldn't create fsl iommu_domain cache\n");
return -ENOMEM;
}
iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
- sizeof(struct device_domain_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
+ sizeof(struct device_domain_info),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
if (!iommu_devinfo_cache) {
pr_debug("Couldn't create devinfo cache\n");
kmem_cache_destroy(fsl_pamu_domain_cache);
@@ -80,8 +62,7 @@ static int __init iommu_init_mempool(void)
static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
{
u32 win_cnt = dma_domain->win_cnt;
- struct dma_window *win_ptr =
- &dma_domain->win_arr[0];
+ struct dma_window *win_ptr = &dma_domain->win_arr[0];
struct iommu_domain_geometry *geom;
geom = &dma_domain->iommu_domain->geometry;
@@ -103,22 +84,20 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
}
if (win_ptr->valid)
- return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
+ return win_ptr->paddr + (iova & (win_ptr->size - 1));
return 0;
}
static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
{
- struct dma_window *sub_win_ptr =
- &dma_domain->win_arr[0];
+ struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
int i, ret;
unsigned long rpn, flags;
for (i = 0; i < dma_domain->win_cnt; i++) {
if (sub_win_ptr[i].valid) {
- rpn = sub_win_ptr[i].paddr >>
- PAMU_PAGE_SHIFT;
+ rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
sub_win_ptr[i].size,
@@ -130,7 +109,7 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
sub_win_ptr[i].prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
+ pr_debug("SPAACE configuration failed for liodn %d\n",
liodn);
return ret;
}
@@ -156,8 +135,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
0, wnd->prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret)
- pr_debug("PAMU PAACE configuration failed for liodn %d\n",
- liodn);
+ pr_debug("PAACE configuration failed for liodn %d\n", liodn);
return ret;
}
@@ -169,7 +147,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
return map_subwins(liodn, dma_domain);
else
return map_win(liodn, dma_domain);
-
}
/* Update window/subwindow mapping for the LIODN */
@@ -190,7 +167,8 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
(wnd_nr > 0) ? 1 : 0,
wnd->prot);
if (ret)
- pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
+ pr_debug("Subwindow reconfiguration failed for liodn %d\n",
+ liodn);
} else {
phys_addr_t wnd_addr;
@@ -200,10 +178,11 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
wnd->size,
~(u32)0,
wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
- 0, wnd->prot);
+ dma_domain->snoop_id, dma_domain->stash_id,
+ 0, wnd->prot);
if (ret)
- pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
+ pr_debug("Window reconfiguration failed for liodn %d\n",
+ liodn);
}
spin_unlock_irqrestore(&iommu_lock, flags);
@@ -212,14 +191,15 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
}
static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
- u32 val)
+ u32 val)
{
int ret = 0, i;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
if (!dma_domain->win_arr) {
- pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
+ pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
+ liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return -EINVAL;
}
@@ -227,7 +207,8 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
for (i = 0; i < dma_domain->win_cnt; i++) {
ret = pamu_update_paace_stash(liodn, i, val);
if (ret) {
- pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
+ pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
+ i, liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
@@ -240,9 +221,9 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
/* Set the geometry parameters for a LIODN */
static int pamu_set_liodn(int liodn, struct device *dev,
- struct fsl_dma_domain *dma_domain,
- struct iommu_domain_geometry *geom_attr,
- u32 win_cnt)
+ struct fsl_dma_domain *dma_domain,
+ struct iommu_domain_geometry *geom_attr,
+ u32 win_cnt)
{
phys_addr_t window_addr, window_size;
phys_addr_t subwin_size;
@@ -268,7 +249,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
dma_domain->stash_id, win_cnt, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
+ pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
+ liodn, win_cnt);
return ret;
}
@@ -285,7 +267,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
0, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
+ pr_debug("SPAACE configuration failed for liodn %d\n",
+ liodn);
return ret;
}
}
@@ -301,13 +284,13 @@ static int check_size(u64 size, dma_addr_t iova)
* to PAMU page size.
*/
if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
- pr_debug("%s: size too small or not a power of two\n", __func__);
+ pr_debug("Size too small or not a power of two\n");
return -EINVAL;
}
- /* iova must be page size aligned*/
+ /* iova must be page size aligned */
if (iova & (size - 1)) {
- pr_debug("%s: address is not aligned with window size\n", __func__);
+ pr_debug("Address is not aligned with window size\n");
return -EINVAL;
}
@@ -396,16 +379,15 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
if (!dev->archdata.iommu_domain)
dev->archdata.iommu_domain = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
-
}
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+ dma_addr_t iova)
{
struct fsl_dma_domain *dma_domain = domain->priv;
- if ((iova < domain->geometry.aperture_start) ||
- iova > (domain->geometry.aperture_end))
+ if (iova < domain->geometry.aperture_start ||
+ iova > domain->geometry.aperture_end)
return 0;
return get_phys_addr(dma_domain, iova);
@@ -460,7 +442,7 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
list_for_each_entry(info, &dma_domain->devices, link) {
ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
- geom_attr, win_cnt);
+ geom_attr, win_cnt);
if (ret)
break;
}
@@ -543,7 +525,6 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
}
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
}
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -576,7 +557,7 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
if (size > win_size) {
- pr_debug("Invalid window size \n");
+ pr_debug("Invalid window size\n");
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -EINVAL;
}
@@ -622,8 +603,8 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
* and window mappings.
*/
static int handle_attach_device(struct fsl_dma_domain *dma_domain,
- struct device *dev, const u32 *liodn,
- int num)
+ struct device *dev, const u32 *liodn,
+ int num)
{
unsigned long flags;
struct iommu_domain *domain = dma_domain->iommu_domain;
@@ -632,11 +613,10 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&dma_domain->domain_lock, flags);
for (i = 0; i < num; i++) {
-
/* Ensure that LIODN value is valid */
if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
pr_debug("Invalid liodn %d, attach device failed for %s\n",
- liodn[i], dev->of_node->full_name);
+ liodn[i], dev->of_node->full_name);
ret = -EINVAL;
break;
}
@@ -649,9 +629,9 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
*/
if (dma_domain->win_arr) {
u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
+
ret = pamu_set_liodn(liodn[i], dev, dma_domain,
- &domain->geometry,
- win_cnt);
+ &domain->geometry, win_cnt);
if (ret)
break;
if (dma_domain->mapped) {
@@ -698,19 +678,18 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
if (liodn) {
liodn_cnt = len / sizeof(u32);
- ret = handle_attach_device(dma_domain, dev,
- liodn, liodn_cnt);
+ ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
} else {
pr_debug("missing fsl,liodn property at %s\n",
- dev->of_node->full_name);
- ret = -EINVAL;
+ dev->of_node->full_name);
+ ret = -EINVAL;
}
return ret;
}
static void fsl_pamu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev)
{
struct fsl_dma_domain *dma_domain = domain->priv;
const u32 *prop;
@@ -738,7 +717,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
detach_device(dev, dma_domain);
else
pr_debug("missing fsl,liodn property at %s\n",
- dev->of_node->full_name);
+ dev->of_node->full_name);
}
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
@@ -754,10 +733,10 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data)
* DMA outside of the geometry.
*/
if (check_size(geom_size, geom_attr->aperture_start) ||
- !geom_attr->force_aperture) {
- pr_debug("Invalid PAMU geometry attributes\n");
- return -EINVAL;
- }
+ !geom_attr->force_aperture) {
+ pr_debug("Invalid PAMU geometry attributes\n");
+ return -EINVAL;
+ }
spin_lock_irqsave(&dma_domain->domain_lock, flags);
if (dma_domain->enabled) {
@@ -786,7 +765,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
spin_lock_irqsave(&dma_domain->domain_lock, flags);
memcpy(&dma_domain->dma_stash, stash_attr,
- sizeof(struct pamu_stash_attribute));
+ sizeof(struct pamu_stash_attribute));
dma_domain->stash_id = get_stash_id(stash_attr->cache,
stash_attr->cpu);
@@ -803,7 +782,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
return ret;
}
-/* Configure domain dma state i.e. enable/disable DMA*/
+/* Configure domain dma state i.e. enable/disable DMA */
static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
{
struct device_domain_info *info;
@@ -819,8 +798,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
}
dma_domain->enabled = enable;
- list_for_each_entry(info, &dma_domain->devices,
- link) {
+ list_for_each_entry(info, &dma_domain->devices, link) {
ret = (enable) ? pamu_enable_liodn(info->liodn) :
pamu_disable_liodn(info->liodn);
if (ret)
@@ -833,12 +811,11 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
}
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
+ enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
int ret = 0;
-
switch (attr_type) {
case DOMAIN_ATTR_GEOMETRY:
ret = configure_domain_geometry(domain, data);
@@ -853,22 +830,21 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
break;
- };
+ }
return ret;
}
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
+ enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
int ret = 0;
-
switch (attr_type) {
case DOMAIN_ATTR_FSL_PAMU_STASH:
- memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
- sizeof(struct pamu_stash_attribute));
+ memcpy(data, &dma_domain->dma_stash,
+ sizeof(struct pamu_stash_attribute));
break;
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
*(int *)data = dma_domain->enabled;
@@ -880,7 +856,7 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
break;
- };
+ }
return ret;
}
@@ -903,11 +879,8 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
/* Check the PCI controller version number by readding BRR1 register */
version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
version &= PCI_FSL_BRR1_VER;
- /* If PCI controller version is >= 0x204 we can partition endpoints*/
- if (version >= 0x204)
- return 1;
-
- return 0;
+ /* If PCI controller version is >= 0x204 we can partition endpoints */
+ return version >= 0x204;
}
/* Get iommu group information from peer devices or devices on the parent bus */
@@ -968,8 +941,9 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
if (pci_ctl->parent->iommu_group) {
group = get_device_iommu_group(pci_ctl->parent);
iommu_group_remove_device(pci_ctl->parent);
- } else
+ } else {
group = get_shared_pci_device_group(pdev);
+ }
}
if (!group)
@@ -1055,11 +1029,12 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
}
ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
- ((w_count > 1) ? w_count : 0));
+ w_count > 1 ? w_count : 0);
if (!ret) {
kfree(dma_domain->win_arr);
- dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
- w_count, GFP_ATOMIC);
+ dma_domain->win_arr = kcalloc(w_count,
+ sizeof(*dma_domain->win_arr),
+ GFP_ATOMIC);
if (!dma_domain->win_arr) {
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -ENOMEM;
@@ -1095,7 +1070,7 @@ static const struct iommu_ops fsl_pamu_ops = {
.remove_device = fsl_pamu_remove_device,
};
-int pamu_domain_init(void)
+int __init pamu_domain_init(void)
{
int ret = 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 40dfbc0444c0..ae4c1a854e57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -71,6 +71,9 @@
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
@@ -485,7 +488,6 @@ __setup("intel_iommu=", intel_iommu_setup);
static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
-static struct kmem_cache *iommu_iova_cache;
static inline void *alloc_pgtable_page(int node)
{
@@ -523,16 +525,6 @@ static inline void free_devinfo_mem(void *vaddr)
kmem_cache_free(iommu_devinfo_cache, vaddr);
}
-struct iova *alloc_iova_mem(void)
-{
- return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
- kmem_cache_free(iommu_iova_cache, iova);
-}
-
static inline int domain_type_is_vm(struct dmar_domain *domain)
{
return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -1643,7 +1635,8 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova;
int i;
- init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
+ init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
+ DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -1701,7 +1694,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width, agaw;
unsigned long sagaw;
- init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
+ DMA_32BIT_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
@@ -3427,23 +3421,6 @@ static inline int iommu_devinfo_cache_init(void)
return ret;
}
-static inline int iommu_iova_cache_init(void)
-{
- int ret = 0;
-
- iommu_iova_cache = kmem_cache_create("iommu_iova",
- sizeof(struct iova),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_iova_cache) {
- printk(KERN_ERR "Couldn't create iova cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
static int __init iommu_init_mempool(void)
{
int ret;
@@ -3461,7 +3438,7 @@ static int __init iommu_init_mempool(void)
kmem_cache_destroy(iommu_domain_cache);
domain_error:
- kmem_cache_destroy(iommu_iova_cache);
+ iommu_iova_cache_destroy();
return -ENOMEM;
}
@@ -3470,8 +3447,7 @@ static void __init iommu_exit_mempool(void)
{
kmem_cache_destroy(iommu_devinfo_cache);
kmem_cache_destroy(iommu_domain_cache);
- kmem_cache_destroy(iommu_iova_cache);
-
+ iommu_iova_cache_destroy();
}
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
@@ -4342,7 +4318,8 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
- init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
+ DMA_32BIT_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index a55b207b9425..14de1ab223c8 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -32,8 +32,9 @@ struct hpet_scope {
};
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
-#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
+#define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
+static int __read_mostly eim_mode;
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
@@ -481,11 +482,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (iommu->ir_table)
return 0;
- ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
+ ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
if (!ir_table)
return -ENOMEM;
- pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
+ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
INTR_REMAP_PAGE_ORDER);
if (!pages) {
@@ -566,13 +567,27 @@ static int __init dmar_x2apic_optout(void)
return dmar->flags & DMAR_X2APIC_OPT_OUT;
}
-static int __init intel_irq_remapping_supported(void)
+static void __init intel_cleanup_irq_remapping(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+
+ for_each_iommu(iommu, drhd) {
+ if (ecap_ir_support(iommu->ecap)) {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ }
+ }
+
+ if (x2apic_supported())
+ pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
+}
+
+static int __init intel_prepare_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- if (disable_irq_remap)
- return 0;
if (irq_remap_broken) {
printk(KERN_WARNING
"This system BIOS has enabled interrupt remapping\n"
@@ -581,38 +596,45 @@ static int __init intel_irq_remapping_supported(void)
"interrupt remapping is being disabled. Please\n"
"contact your BIOS vendor for an update\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- disable_irq_remap = 1;
- return 0;
+ return -ENODEV;
}
+ if (dmar_table_init() < 0)
+ return -ENODEV;
+
if (!dmar_ir_support())
- return 0;
+ return -ENODEV;
+
+ if (parse_ioapics_under_ir() != 1) {
+ printk(KERN_INFO "Not enabling interrupt remapping\n");
+ goto error;
+ }
+ /* First make sure all IOMMUs support IRQ remapping */
for_each_iommu(iommu, drhd)
if (!ecap_ir_support(iommu->ecap))
- return 0;
+ goto error;
- return 1;
+ /* Do the allocations early */
+ for_each_iommu(iommu, drhd)
+ if (intel_setup_irq_remapping(iommu))
+ goto error;
+
+ return 0;
+
+error:
+ intel_cleanup_irq_remapping();
+ return -ENODEV;
}
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- bool x2apic_present;
int setup = 0;
int eim = 0;
- x2apic_present = x2apic_supported();
-
- if (parse_ioapics_under_ir() != 1) {
- printk(KERN_INFO "Not enable interrupt remapping\n");
- goto error;
- }
-
- if (x2apic_present) {
- pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
+ if (x2apic_supported()) {
eim = !dmar_x2apic_optout();
if (!eim)
printk(KERN_WARNING
@@ -646,16 +668,15 @@ static int __init intel_enable_irq_remapping(void)
/*
* check for the Interrupt-remapping support
*/
- for_each_iommu(iommu, drhd) {
- if (!ecap_ir_support(iommu->ecap))
- continue;
-
+ for_each_iommu(iommu, drhd)
if (eim && !ecap_eim_support(iommu->ecap)) {
printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
" ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
- goto error;
+ eim = 0;
}
- }
+ eim_mode = eim;
+ if (eim)
+ pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
/*
* Enable queued invalidation for all the DRHD's.
@@ -675,12 +696,6 @@ static int __init intel_enable_irq_remapping(void)
* Setup Interrupt-remapping for all the DRHD's now.
*/
for_each_iommu(iommu, drhd) {
- if (!ecap_ir_support(iommu->ecap))
- continue;
-
- if (intel_setup_irq_remapping(iommu))
- goto error;
-
iommu_set_irq_remapping(iommu, eim);
setup = 1;
}
@@ -702,15 +717,7 @@ static int __init intel_enable_irq_remapping(void)
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error:
- for_each_iommu(iommu, drhd)
- if (ecap_ir_support(iommu->ecap)) {
- iommu_disable_irq_remapping(iommu);
- intel_teardown_irq_remapping(iommu);
- }
-
- if (x2apic_present)
- pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
-
+ intel_cleanup_irq_remapping();
return -1;
}
@@ -1199,8 +1206,7 @@ static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
}
struct irq_remap_ops intel_irq_remap_ops = {
- .supported = intel_irq_remapping_supported,
- .prepare = dmar_table_init,
+ .prepare = intel_prepare_irq_remapping,
.enable = intel_enable_irq_remapping,
.disable = disable_irq_remapping,
.reenable = reenable_irq_remapping,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
new file mode 100644
index 000000000000..5a500edf00cc
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -0,0 +1,986 @@
+/*
+ * CPU-agnostic ARM page table allocator.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
+
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "io-pgtable.h"
+
+#define ARM_LPAE_MAX_ADDR_BITS 48
+#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
+#define ARM_LPAE_MAX_LEVELS 4
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct arm_lpae_io_pgtable, iop)
+
+#define io_pgtable_ops_to_pgtable(x) \
+ container_of((x), struct io_pgtable, ops)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+/*
+ * For consistency with the architecture, we always consider
+ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
+ */
+#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
+
+/*
+ * Calculate the right shift amount to get to the portion describing level l
+ * in a virtual address mapped by the pagetable in d.
+ */
+#define ARM_LPAE_LVL_SHIFT(l,d) \
+ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
+ * (d)->bits_per_level) + (d)->pg_shift)
+
+#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
+
+/*
+ * Calculate the index at level l used to map virtual address a using the
+ * pagetable in d.
+ */
+#define ARM_LPAE_PGD_IDX(l,d) \
+ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+
+#define ARM_LPAE_LVL_IDX(a,l,d) \
+ (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
+ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
+
+/* Calculate the block/page mapping size at level l for pagetable in d. */
+#define ARM_LPAE_BLOCK_SIZE(l,d) \
+ (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
+ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+
+/* Page table bits */
+#define ARM_LPAE_PTE_TYPE_SHIFT 0
+#define ARM_LPAE_PTE_TYPE_MASK 0x3
+
+#define ARM_LPAE_PTE_TYPE_BLOCK 1
+#define ARM_LPAE_PTE_TYPE_TABLE 3
+#define ARM_LPAE_PTE_TYPE_PAGE 3
+
+#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
+#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
+#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
+#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
+#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
+#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
+#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
+#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
+
+#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
+/* Ignore the contiguous bit for block splitting */
+#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
+#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
+ ARM_LPAE_PTE_ATTR_HI_MASK)
+
+/* Stage-1 PTE */
+#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
+#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
+
+/* Stage-2 PTE */
+#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
+#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
+#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
+#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
+
+/* Register bits */
+#define ARM_32_LPAE_TCR_EAE (1 << 31)
+#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
+
+#define ARM_LPAE_TCR_TG0_4K (0 << 14)
+#define ARM_LPAE_TCR_TG0_64K (1 << 14)
+#define ARM_LPAE_TCR_TG0_16K (2 << 14)
+
+#define ARM_LPAE_TCR_SH0_SHIFT 12
+#define ARM_LPAE_TCR_SH0_MASK 0x3
+#define ARM_LPAE_TCR_SH_NS 0
+#define ARM_LPAE_TCR_SH_OS 2
+#define ARM_LPAE_TCR_SH_IS 3
+
+#define ARM_LPAE_TCR_ORGN0_SHIFT 10
+#define ARM_LPAE_TCR_IRGN0_SHIFT 8
+#define ARM_LPAE_TCR_RGN_MASK 0x3
+#define ARM_LPAE_TCR_RGN_NC 0
+#define ARM_LPAE_TCR_RGN_WBWA 1
+#define ARM_LPAE_TCR_RGN_WT 2
+#define ARM_LPAE_TCR_RGN_WB 3
+
+#define ARM_LPAE_TCR_SL0_SHIFT 6
+#define ARM_LPAE_TCR_SL0_MASK 0x3
+
+#define ARM_LPAE_TCR_T0SZ_SHIFT 0
+#define ARM_LPAE_TCR_SZ_MASK 0xf
+
+#define ARM_LPAE_TCR_PS_SHIFT 16
+#define ARM_LPAE_TCR_PS_MASK 0x7
+
+#define ARM_LPAE_TCR_IPS_SHIFT 32
+#define ARM_LPAE_TCR_IPS_MASK 0x7
+
+#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
+#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
+#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
+#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
+#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
+#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
+
+#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
+#define ARM_LPAE_MAIR_ATTR_MASK 0xff
+#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
+#define ARM_LPAE_MAIR_ATTR_NC 0x44
+#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
+#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
+#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
+#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+
+/* IOPTE accessors */
+#define iopte_deref(pte,d) \
+ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
+ & ~((1ULL << (d)->pg_shift) - 1)))
+
+#define iopte_type(pte,l) \
+ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
+
+#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
+
+#define iopte_leaf(pte,l) \
+ (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
+
+#define iopte_to_pfn(pte,d) \
+ (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
+
+#define pfn_to_iopte(pfn,d) \
+ (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
+
+struct arm_lpae_io_pgtable {
+ struct io_pgtable iop;
+
+ int levels;
+ size_t pgd_size;
+ unsigned long pg_shift;
+ unsigned long bits_per_level;
+
+ void *pgd;
+};
+
+typedef u64 arm_lpae_iopte;
+
+static bool selftest_running = false;
+
+static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr,
+ arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte pte = prot;
+
+ /* We require an unmap first */
+ if (iopte_leaf(*ptep, lvl)) {
+ WARN_ON(!selftest_running);
+ return -EEXIST;
+ }
+
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NS;
+
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ pte |= ARM_LPAE_PTE_TYPE_PAGE;
+ else
+ pte |= ARM_LPAE_PTE_TYPE_BLOCK;
+
+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+ pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
+
+ *ptep = pte;
+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+ return 0;
+}
+
+static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
+ phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
+ int lvl, arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte *cptep, pte;
+ void *cookie = data->iop.cookie;
+ size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ /* Find our entry at the current level */
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+
+ /* If we can install a leaf entry at this level, then do so */
+ if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+ return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+
+ /* We can't allocate tables at the final level */
+ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
+ return -EINVAL;
+
+ /* Grab a pointer to the next level */
+ pte = *ptep;
+ if (!pte) {
+ cptep = alloc_pages_exact(1UL << data->pg_shift,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!cptep)
+ return -ENOMEM;
+
+ data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
+ cookie);
+ pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NSTABLE;
+ *ptep = pte;
+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ } else {
+ cptep = iopte_deref(pte, data);
+ }
+
+ /* Rinse, repeat */
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+}
+
+static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
+ int prot)
+{
+ arm_lpae_iopte pte;
+
+ if (data->iop.fmt == ARM_64_LPAE_S1 ||
+ data->iop.fmt == ARM_32_LPAE_S1) {
+ pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+
+ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
+ pte |= ARM_LPAE_PTE_AP_RDONLY;
+
+ if (prot & IOMMU_CACHE)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ } else {
+ pte = ARM_LPAE_PTE_HAP_FAULT;
+ if (prot & IOMMU_READ)
+ pte |= ARM_LPAE_PTE_HAP_READ;
+ if (prot & IOMMU_WRITE)
+ pte |= ARM_LPAE_PTE_HAP_WRITE;
+ if (prot & IOMMU_CACHE)
+ pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
+ else
+ pte |= ARM_LPAE_PTE_MEMATTR_NC;
+ }
+
+ if (prot & IOMMU_NOEXEC)
+ pte |= ARM_LPAE_PTE_XN;
+
+ return pte;
+}
+
+static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int iommu_prot)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+ arm_lpae_iopte prot;
+
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ prot = arm_lpae_prot_to_pte(data, iommu_prot);
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+}
+
+static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte *start, *end;
+ unsigned long table_size;
+
+ /* Only leaf entries at the last level */
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ return;
+
+ if (lvl == ARM_LPAE_START_LVL(data))
+ table_size = data->pgd_size;
+ else
+ table_size = 1UL << data->pg_shift;
+
+ start = ptep;
+ end = (void *)ptep + table_size;
+
+ while (ptep != end) {
+ arm_lpae_iopte pte = *ptep++;
+
+ if (!pte || iopte_leaf(pte, lvl))
+ continue;
+
+ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
+ }
+
+ free_pages_exact(start, table_size);
+}
+
+static void arm_lpae_free_pgtable(struct io_pgtable *iop)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ kfree(data);
+}
+
+static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size,
+ arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte *ptep, size_t blk_size)
+{
+ unsigned long blk_start, blk_end;
+ phys_addr_t blk_paddr;
+ arm_lpae_iopte table = 0;
+ void *cookie = data->iop.cookie;
+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+
+ blk_start = iova & ~(blk_size - 1);
+ blk_end = blk_start + blk_size;
+ blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
+
+ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
+ arm_lpae_iopte *tablep;
+
+ /* Unmap! */
+ if (blk_start == iova)
+ continue;
+
+ /* __arm_lpae_map expects a pointer to the start of the table */
+ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
+ if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
+ tablep) < 0) {
+ if (table) {
+ /* Free the table we allocated */
+ tablep = iopte_deref(table, data);
+ __arm_lpae_free_pgtable(data, lvl + 1, tablep);
+ }
+ return 0; /* Bytes unmapped */
+ }
+ }
+
+ *ptep = table;
+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ iova &= ~(blk_size - 1);
+ tlb->tlb_add_flush(iova, blk_size, true, cookie);
+ return size;
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte pte;
+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+ void *cookie = data->iop.cookie;
+ size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+ pte = *ptep;
+
+ /* Something went horribly wrong and we ran out of page table */
+ if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
+ return 0;
+
+ /* If the size matches this level, we're in the right place */
+ if (size == blk_size) {
+ *ptep = 0;
+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+
+ if (!iopte_leaf(pte, lvl)) {
+ /* Also flush any partial walks */
+ tlb->tlb_add_flush(iova, size, false, cookie);
+ tlb->tlb_sync(data->iop.cookie);
+ ptep = iopte_deref(pte, data);
+ __arm_lpae_free_pgtable(data, lvl + 1, ptep);
+ } else {
+ tlb->tlb_add_flush(iova, size, true, cookie);
+ }
+
+ return size;
+ } else if (iopte_leaf(pte, lvl)) {
+ /*
+ * Insert a table at the next level to map the old region,
+ * minus the part we want to unmap
+ */
+ return arm_lpae_split_blk_unmap(data, iova, size,
+ iopte_prot(pte), lvl, ptep,
+ blk_size);
+ }
+
+ /* Keep on walkin' */
+ ptep = iopte_deref(pte, data);
+ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+}
+
+static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size)
+{
+ size_t unmapped;
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable *iop = &data->iop;
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+ unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
+ if (unmapped)
+ iop->cfg.tlb->tlb_sync(iop->cookie);
+
+ return unmapped;
+}
+
+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte pte, *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+ do {
+ /* Valid IOPTE pointer? */
+ if (!ptep)
+ return 0;
+
+ /* Grab the IOPTE we're interested in */
+ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
+
+ /* Valid entry? */
+ if (!pte)
+ return 0;
+
+ /* Leaf entry? */
+ if (iopte_leaf(pte,lvl))
+ goto found_translation;
+
+ /* Take it to the next level */
+ ptep = iopte_deref(pte, data);
+ } while (++lvl < ARM_LPAE_MAX_LEVELS);
+
+ /* Ran out of page tables to walk */
+ return 0;
+
+found_translation:
+ iova &= ((1 << data->pg_shift) - 1);
+ return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
+}
+
+static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
+{
+ unsigned long granule;
+
+ /*
+ * We need to restrict the supported page sizes to match the
+ * translation regime for a particular granule. Aim to match
+ * the CPU page size if possible, otherwise prefer smaller sizes.
+ * While we're at it, restrict the block sizes to match the
+ * chosen granule.
+ */
+ if (cfg->pgsize_bitmap & PAGE_SIZE)
+ granule = PAGE_SIZE;
+ else if (cfg->pgsize_bitmap & ~PAGE_MASK)
+ granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
+ else if (cfg->pgsize_bitmap & PAGE_MASK)
+ granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
+ else
+ granule = 0;
+
+ switch (granule) {
+ case SZ_4K:
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ break;
+ case SZ_16K:
+ cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
+ break;
+ case SZ_64K:
+ cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
+ break;
+ default:
+ cfg->pgsize_bitmap = 0;
+ }
+}
+
+static struct arm_lpae_io_pgtable *
+arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
+{
+ unsigned long va_bits, pgd_bits;
+ struct arm_lpae_io_pgtable *data;
+
+ arm_lpae_restrict_pgsizes(cfg);
+
+ if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
+ return NULL;
+
+ if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
+ return NULL;
+
+ if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
+ return NULL;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->pg_shift = __ffs(cfg->pgsize_bitmap);
+ data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+
+ va_bits = cfg->ias - data->pg_shift;
+ data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+
+ /* Calculate the actual size of our pgd (without concatenation) */
+ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
+ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map = arm_lpae_map,
+ .unmap = arm_lpae_unmap,
+ .iova_to_phys = arm_lpae_iova_to_phys,
+ };
+
+ return data;
+}
+
+static struct io_pgtable *
+arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ u64 reg;
+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+
+ if (!data)
+ return NULL;
+
+ /* TCR */
+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= ARM_LPAE_TCR_TG0_4K;
+ break;
+ case SZ_16K:
+ reg |= ARM_LPAE_TCR_TG0_16K;
+ break;
+ case SZ_64K:
+ reg |= ARM_LPAE_TCR_TG0_64K;
+ break;
+ }
+
+ switch (cfg->oas) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ default:
+ goto out_free_data;
+ }
+
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ cfg->arm_lpae_s1_cfg.tcr = reg;
+
+ /* MAIRs */
+ reg = (ARM_LPAE_MAIR_ATTR_NC
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+ (ARM_LPAE_MAIR_ATTR_WBRWA
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+ (ARM_LPAE_MAIR_ATTR_DEVICE
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
+
+ cfg->arm_lpae_s1_cfg.mair[0] = reg;
+ cfg->arm_lpae_s1_cfg.mair[1] = 0;
+
+ /* Looking good; allocate a pgd */
+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ if (!data->pgd)
+ goto out_free_data;
+
+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+
+ /* TTBRs */
+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
+static struct io_pgtable *
+arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ u64 reg, sl;
+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+
+ if (!data)
+ return NULL;
+
+ /*
+ * Concatenate PGDs at level 1 if possible in order to reduce
+ * the depth of the stage-2 walk.
+ */
+ if (data->levels == ARM_LPAE_MAX_LEVELS) {
+ unsigned long pgd_pages;
+
+ pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+ if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
+ data->pgd_size = pgd_pages << data->pg_shift;
+ data->levels--;
+ }
+ }
+
+ /* VTCR */
+ reg = ARM_64_LPAE_S2_TCR_RES1 |
+ (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+
+ sl = ARM_LPAE_START_LVL(data);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= ARM_LPAE_TCR_TG0_4K;
+ sl++; /* SL0 format is different for 4K granule size */
+ break;
+ case SZ_16K:
+ reg |= ARM_LPAE_TCR_TG0_16K;
+ break;
+ case SZ_64K:
+ reg |= ARM_LPAE_TCR_TG0_64K;
+ break;
+ }
+
+ switch (cfg->oas) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ default:
+ goto out_free_data;
+ }
+
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
+ cfg->arm_lpae_s2_cfg.vtcr = reg;
+
+ /* Allocate pgd pages */
+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ if (!data->pgd)
+ goto out_free_data;
+
+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+
+ /* VTTBR */
+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
+static struct io_pgtable *
+arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias > 32 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
+ if (iop) {
+ cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
+ cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
+ }
+
+ return iop;
+}
+
+static struct io_pgtable *
+arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias > 40 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
+ if (iop)
+ cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
+
+ return iop;
+}
+
+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
+ .alloc = arm_64_lpae_alloc_pgtable_s1,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
+ .alloc = arm_64_lpae_alloc_pgtable_s2,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
+ .alloc = arm_32_lpae_alloc_pgtable_s1,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
+ .alloc = arm_32_lpae_alloc_pgtable_s2,
+ .free = arm_lpae_free_pgtable,
+};
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
+ void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_sync(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+ .tlb_flush_all = dummy_tlb_flush_all,
+ .tlb_add_flush = dummy_tlb_add_flush,
+ .tlb_sync = dummy_tlb_sync,
+ .flush_pgtable = dummy_flush_pgtable,
+};
+
+static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
+ cfg->pgsize_bitmap, cfg->ias);
+ pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
+ data->levels, data->pgd_size, data->pg_shift,
+ data->bits_per_level, data->pgd);
+}
+
+#define __FAIL(ops, i) ({ \
+ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
+ arm_lpae_dump_ops(ops); \
+ selftest_running = false; \
+ -EFAULT; \
+})
+
+static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+{
+ static const enum io_pgtable_fmt fmts[] = {
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ };
+
+ int i, j;
+ unsigned long iova;
+ size_t size;
+ struct io_pgtable_ops *ops;
+
+ selftest_running = true;
+
+ for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+ cfg_cookie = cfg;
+ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
+ if (!ops) {
+ pr_err("selftest: failed to allocate io pgtable ops\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Initial sanity checks.
+ * Empty page tables shouldn't provide any translations.
+ */
+ if (ops->iova_to_phys(ops, 42))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + 42))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_2G + 42))
+ return __FAIL(ops, i);
+
+ /*
+ * Distinct mappings of different granule sizes.
+ */
+ iova = 0;
+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->map(ops, iova, iova, size, IOMMU_READ |
+ IOMMU_WRITE |
+ IOMMU_NOEXEC |
+ IOMMU_CACHE))
+ return __FAIL(ops, i);
+
+ /* Overlapping mappings */
+ if (!ops->map(ops, iova, iova + size, size,
+ IOMMU_READ | IOMMU_NOEXEC))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+ /* Partial unmap */
+ size = 1UL << __ffs(cfg->pgsize_bitmap);
+ if (ops->unmap(ops, SZ_1G + size, size) != size)
+ return __FAIL(ops, i);
+
+ /* Remap of partial unmap */
+ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
+ return __FAIL(ops, i);
+
+ /* Full unmap */
+ iova = 0;
+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->unmap(ops, iova, size) != size)
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42))
+ return __FAIL(ops, i);
+
+ /* Remap full block */
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+ free_io_pgtable_ops(ops);
+ }
+
+ selftest_running = false;
+ return 0;
+}
+
+static int __init arm_lpae_do_selftests(void)
+{
+ static const unsigned long pgsize[] = {
+ SZ_4K | SZ_2M | SZ_1G,
+ SZ_16K | SZ_32M,
+ SZ_64K | SZ_512M,
+ };
+
+ static const unsigned int ias[] = {
+ 32, 36, 40, 42, 44, 48,
+ };
+
+ int i, j, pass = 0, fail = 0;
+ struct io_pgtable_cfg cfg = {
+ .tlb = &dummy_tlb_ops,
+ .oas = 48,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
+ for (j = 0; j < ARRAY_SIZE(ias); ++j) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = ias[j];
+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
+ pgsize[i], ias[j]);
+ if (arm_lpae_run_tests(&cfg))
+ fail++;
+ else
+ pass++;
+ }
+ }
+
+ pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ return fail ? -EFAULT : 0;
+}
+subsys_initcall(arm_lpae_do_selftests);
+#endif
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
new file mode 100644
index 000000000000..6436fe24bc2f
--- /dev/null
+++ b/drivers/iommu/io-pgtable.c
@@ -0,0 +1,82 @@
+/*
+ * Generic page table allocator for IOMMUs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "io-pgtable.h"
+
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+
+static const struct io_pgtable_init_fns *
+io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
+{
+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
+ [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
+ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
+ [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
+ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
+#endif
+};
+
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie)
+{
+ struct io_pgtable *iop;
+ const struct io_pgtable_init_fns *fns;
+
+ if (fmt >= IO_PGTABLE_NUM_FMTS)
+ return NULL;
+
+ fns = io_pgtable_init_table[fmt];
+ if (!fns)
+ return NULL;
+
+ iop = fns->alloc(cfg, cookie);
+ if (!iop)
+ return NULL;
+
+ iop->fmt = fmt;
+ iop->cookie = cookie;
+ iop->cfg = *cfg;
+
+ return &iop->ops;
+}
+
+/*
+ * It is the IOMMU driver's responsibility to ensure that the page table
+ * is no longer accessible to the walker by this point.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops)
+{
+ struct io_pgtable *iop;
+
+ if (!ops)
+ return;
+
+ iop = container_of(ops, struct io_pgtable, ops);
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ io_pgtable_init_table[iop->fmt]->free(iop);
+}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
new file mode 100644
index 000000000000..10e32f69c668
--- /dev/null
+++ b/drivers/iommu/io-pgtable.h
@@ -0,0 +1,143 @@
+#ifndef __IO_PGTABLE_H
+#define __IO_PGTABLE_H
+
+/*
+ * Public API for use by IOMMU drivers
+ */
+enum io_pgtable_fmt {
+ ARM_32_LPAE_S1,
+ ARM_32_LPAE_S2,
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ IO_PGTABLE_NUM_FMTS,
+};
+
+/**
+ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
+ *
+ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
+ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
+ * @tlb_sync: Ensure any queue TLB invalidation has taken effect.
+ * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
+ *
+ * Note that these can all be called in atomic context and must therefore
+ * not block.
+ */
+struct iommu_gather_ops {
+ void (*tlb_flush_all)(void *cookie);
+ void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
+ void *cookie);
+ void (*tlb_sync)(void *cookie);
+ void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
+};
+
+/**
+ * struct io_pgtable_cfg - Configuration data for a set of page tables.
+ *
+ * @quirks: A bitmap of hardware quirks that require some special
+ * action by the low-level page table allocator.
+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
+ * tables.
+ * @ias: Input address (iova) size, in bits.
+ * @oas: Output address (paddr) size, in bits.
+ * @tlb: TLB management callbacks for this set of tables.
+ */
+struct io_pgtable_cfg {
+ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
+ int quirks;
+ unsigned long pgsize_bitmap;
+ unsigned int ias;
+ unsigned int oas;
+ const struct iommu_gather_ops *tlb;
+
+ /* Low-level data specific to the table format */
+ union {
+ struct {
+ u64 ttbr[2];
+ u64 tcr;
+ u64 mair[2];
+ } arm_lpae_s1_cfg;
+
+ struct {
+ u64 vttbr;
+ u64 vtcr;
+ } arm_lpae_s2_cfg;
+ };
+};
+
+/**
+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
+ *
+ * @map: Map a physically contiguous memory region.
+ * @unmap: Unmap a physically contiguous memory region.
+ * @iova_to_phys: Translate iova to physical address.
+ *
+ * These functions map directly onto the iommu_ops member functions with
+ * the same names.
+ */
+struct io_pgtable_ops {
+ int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size);
+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
+ unsigned long iova);
+};
+
+/**
+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
+ *
+ * @fmt: The page table format.
+ * @cfg: The page table configuration. This will be modified to represent
+ * the configuration actually provided by the allocator (e.g. the
+ * pgsize_bitmap may be restricted).
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * the callback routines in cfg->tlb.
+ */
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie);
+
+/**
+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+ * *must* ensure that the page table is no longer
+ * live, but the TLB can be dirty.
+ *
+ * @ops: The ops returned from alloc_io_pgtable_ops.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops);
+
+
+/*
+ * Internal structures for page table allocator implementations.
+ */
+
+/**
+ * struct io_pgtable - Internal structure describing a set of page tables.
+ *
+ * @fmt: The page table format.
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * any callback routines.
+ * @cfg: A copy of the page table configuration.
+ * @ops: The page table operations in use for this set of page tables.
+ */
+struct io_pgtable {
+ enum io_pgtable_fmt fmt;
+ void *cookie;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops ops;
+};
+
+/**
+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
+ * particular format.
+ *
+ * @alloc: Allocate a set of page tables described by cfg.
+ * @free: Free the page tables associated with iop.
+ */
+struct io_pgtable_init_fns {
+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
+ void (*free)(struct io_pgtable *iop);
+};
+
+#endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f7718d73e984..72e683df0731 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -1084,7 +1084,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
else
- trace_map(iova, paddr, size);
+ trace_map(orig_iova, paddr, orig_size);
return ret;
}
@@ -1094,6 +1094,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
+ unsigned long orig_iova = iova;
if (unlikely(domain->ops->unmap == NULL ||
domain->ops->pgsize_bitmap == 0UL))
@@ -1133,7 +1134,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unmapped += unmapped_page;
}
- trace_unmap(iova, 0, size);
+ trace_unmap(orig_iova, size, unmapped);
return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f6b17e6af2fb..9dd8208312c2 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,13 +18,58 @@
*/
#include <linux/iova.h>
+#include <linux/slab.h>
+
+static struct kmem_cache *iommu_iova_cache;
+
+int iommu_iova_cache_init(void)
+{
+ int ret = 0;
+
+ iommu_iova_cache = kmem_cache_create("iommu_iova",
+ sizeof(struct iova),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!iommu_iova_cache) {
+ pr_err("Couldn't create iova cache\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+void iommu_iova_cache_destroy(void)
+{
+ kmem_cache_destroy(iommu_iova_cache);
+}
+
+struct iova *alloc_iova_mem(void)
+{
+ return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
+}
+
+void free_iova_mem(struct iova *iova)
+{
+ kmem_cache_free(iommu_iova_cache, iova);
+}
void
-init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
+init_iova_domain(struct iova_domain *iovad, unsigned long granule,
+ unsigned long start_pfn, unsigned long pfn_32bit)
{
+ /*
+ * IOVA granularity will normally be equal to the smallest
+ * supported IOMMU page size; both *must* be capable of
+ * representing individual CPU pages exactly.
+ */
+ BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
+
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
+ iovad->granule = granule;
+ iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
}
@@ -127,7 +172,7 @@ move_left:
if (!curr) {
if (size_aligned)
pad_size = iova_get_pad_size(size, limit_pfn);
- if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
+ if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
@@ -202,8 +247,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
* @size: - size of page frames to allocate
* @limit_pfn: - max limit address
* @size_aligned: - set if size_aligned address range is required
- * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
- * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
+ * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
+ * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
* flag is set then the allocated address iova->pfn_lo will be naturally
* aligned on roundup_power_of_two(size).
*/
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 748693192c20..10186cac7716 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/platform_data/ipmmu-vmsa.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -24,12 +24,13 @@
#include <asm/dma-iommu.h>
#include <asm/pgalloc.h>
+#include "io-pgtable.h"
+
struct ipmmu_vmsa_device {
struct device *dev;
void __iomem *base;
struct list_head list;
- const struct ipmmu_vmsa_platform_data *pdata;
unsigned int num_utlbs;
struct dma_iommu_mapping *mapping;
@@ -39,14 +40,17 @@ struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device *mmu;
struct iommu_domain *io_domain;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops *iop;
+
unsigned int context_id;
spinlock_t lock; /* Protects mappings */
- pgd_t *pgd;
};
struct ipmmu_vmsa_archdata {
struct ipmmu_vmsa_device *mmu;
- unsigned int utlb;
+ unsigned int *utlbs;
+ unsigned int num_utlbs;
};
static DEFINE_SPINLOCK(ipmmu_devices_lock);
@@ -58,6 +62,8 @@ static LIST_HEAD(ipmmu_devices);
* Registers Definition
*/
+#define IM_NS_ALIAS_OFFSET 0x800
+
#define IM_CTX_SIZE 0x40
#define IMCTR 0x0000
@@ -171,52 +177,6 @@ static LIST_HEAD(ipmmu_devices);
#define IMUASID_ASID0_SHIFT 0
/* -----------------------------------------------------------------------------
- * Page Table Bits
- */
-
-/*
- * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access,
- * Long-descriptor format" that the NStable bit being set in a table descriptor
- * will result in the NStable and NS bits of all child entries being ignored and
- * considered as being set. The IPMMU seems not to comply with this, as it
- * generates a secure access page fault if any of the NStable and NS bits isn't
- * set when running in non-secure mode.
- */
-#ifndef PMD_NSTABLE
-#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63)
-#endif
-
-#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53)
-#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52)
-#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10)
-#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8)
-#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8)
-#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8)
-#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8)
-#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5)
-#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0)
-
-/* Stage-1 PTE */
-#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11)
-#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6)
-#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6)
-#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6)
-#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2)
-#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2
-
-#define ARM_VMSA_PTE_ATTRS_MASK \
- (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \
- ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \
- ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK)
-
-#define ARM_VMSA_PTE_CONT_ENTRIES 16
-#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES)
-
-#define IPMMU_PTRS_PER_PTE 512
-#define IPMMU_PTRS_PER_PMD 512
-#define IPMMU_PTRS_PER_PGD 4
-
-/* -----------------------------------------------------------------------------
* Read/Write Access
*/
@@ -305,18 +265,39 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
ipmmu_write(mmu, IMUCTR(utlb), 0);
}
-static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
- size_t size)
+static void ipmmu_tlb_flush_all(void *cookie)
+{
+ struct ipmmu_vmsa_domain *domain = cookie;
+
+ ipmmu_tlb_invalidate(domain);
+}
+
+static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
+ void *cookie)
{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ /* The hardware doesn't support selective TLB flush. */
+}
+
+static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
+{
+ unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
+ struct ipmmu_vmsa_domain *domain = cookie;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling.
*/
- dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE);
+ dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
+ DMA_TO_DEVICE);
}
+static struct iommu_gather_ops ipmmu_gather_ops = {
+ .tlb_flush_all = ipmmu_tlb_flush_all,
+ .tlb_add_flush = ipmmu_tlb_add_flush,
+ .tlb_sync = ipmmu_tlb_flush_all,
+ .flush_pgtable = ipmmu_flush_pgtable,
+};
+
/* -----------------------------------------------------------------------------
* Domain/Context Management
*/
@@ -324,7 +305,28 @@ static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
phys_addr_t ttbr;
- u32 reg;
+
+ /*
+ * Allocate the page table operations.
+ *
+ * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
+ * access, Long-descriptor format" that the NStable bit being set in a
+ * table descriptor will result in the NStable and NS bits of all child
+ * entries being ignored and considered as being set. The IPMMU seems
+ * not to comply with this, as it generates a secure access page fault
+ * if any of the NStable and NS bits isn't set when running in
+ * non-secure mode.
+ */
+ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
+ domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
+ domain->cfg.ias = 32;
+ domain->cfg.oas = 40;
+ domain->cfg.tlb = &ipmmu_gather_ops;
+
+ domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
+ domain);
+ if (!domain->iop)
+ return -EINVAL;
/*
* TODO: When adding support for multiple contexts, find an unused
@@ -333,9 +335,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->context_id = 0;
/* TTBR0 */
- ipmmu_flush_pgtable(domain->mmu, domain->pgd,
- IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd));
- ttbr = __pa(domain->pgd);
+ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
@@ -348,15 +348,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
- /*
- * MAIR0
- * We need three attributes only, non-cacheable, write-back read/write
- * allocate and device memory.
- */
- reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC))
- | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA))
- | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV));
- ipmmu_ctx_write(domain, IMMAIR0, reg);
+ /* MAIR0 */
+ ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
/* IMBUSCR */
ipmmu_ctx_write(domain, IMBUSCR,
@@ -461,396 +454,6 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
}
/* -----------------------------------------------------------------------------
- * Page Table Management
- */
-
-#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
-
-static void ipmmu_free_ptes(pmd_t *pmd)
-{
- pgtable_t table = pmd_pgtable(*pmd);
- __free_page(table);
-}
-
-static void ipmmu_free_pmds(pud_t *pud)
-{
- pmd_t *pmd = pmd_offset(pud, 0);
- pgtable_t table;
- unsigned int i;
-
- for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
- if (!pmd_table(*pmd))
- continue;
-
- ipmmu_free_ptes(pmd);
- pmd++;
- }
-
- table = pud_pgtable(*pud);
- __free_page(table);
-}
-
-static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
-{
- pgd_t *pgd, *pgd_base = domain->pgd;
- unsigned int i;
-
- /*
- * Recursively free the page tables for this domain. We don't care about
- * speculative TLB filling, because the TLB will be nuked next time this
- * context bank is re-allocated and no devices currently map to these
- * tables.
- */
- pgd = pgd_base;
- for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) {
- if (pgd_none(*pgd))
- continue;
- ipmmu_free_pmds((pud_t *)pgd);
- pgd++;
- }
-
- kfree(pgd_base);
-}
-
-/*
- * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte)
- * functions as they would flush the CPU TLB.
- */
-
-static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova)
-{
- pte_t *pte;
-
- if (!pmd_none(*pmd))
- return pte_offset_kernel(pmd, iova);
-
- pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pte)
- return NULL;
-
- ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE);
- *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return pte + pte_index(iova);
-}
-
-static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
- unsigned long iova)
-{
- pud_t *pud = (pud_t *)pgd;
- pmd_t *pmd;
-
- if (!pud_none(*pud))
- return pmd_offset(pud, iova);
-
- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pmd)
- return NULL;
-
- ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE);
- *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
-
- return pmd + pmd_index(iova);
-}
-
-static u64 ipmmu_page_prot(unsigned int prot, u64 type)
-{
- u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
- | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
- | ARM_VMSA_PTE_NS | type;
-
- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
- pgprot |= ARM_VMSA_PTE_AP_RDONLY;
-
- if (prot & IOMMU_CACHE)
- pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
-
- if (prot & IOMMU_NOEXEC)
- pgprot |= ARM_VMSA_PTE_XN;
- else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- /* If no access create a faulting entry to avoid TLB fills. */
- pgprot &= ~ARM_VMSA_PTE_PAGE;
-
- return pgprot;
-}
-
-static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova, unsigned long pfn,
- size_t size, int prot)
-{
- pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
- unsigned int num_ptes = 1;
- pte_t *pte, *start;
- unsigned int i;
-
- pte = ipmmu_alloc_pte(mmu, pmd, iova);
- if (!pte)
- return -ENOMEM;
-
- start = pte;
-
- /*
- * Install the page table entries. We can be called both for a single
- * page or for a block of 16 physically contiguous pages. In the latter
- * case set the PTE contiguous hint.
- */
- if (size == SZ_64K) {
- pteval |= ARM_VMSA_PTE_CONT;
- num_ptes = ARM_VMSA_PTE_CONT_ENTRIES;
- }
-
- for (i = num_ptes; i; --i)
- *pte++ = pfn_pte(pfn++, __pgprot(pteval));
-
- ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes);
-
- return 0;
-}
-
-static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova, unsigned long pfn,
- int prot)
-{
- pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT);
-
- *pmd = pfn_pmd(pfn, __pgprot(pmdval));
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return 0;
-}
-
-static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
-{
- struct ipmmu_vmsa_device *mmu = domain->mmu;
- pgd_t *pgd = domain->pgd;
- unsigned long flags;
- unsigned long pfn;
- pmd_t *pmd;
- int ret;
-
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- if (paddr & ~((1ULL << 40) - 1))
- return -ERANGE;
-
- pfn = __phys_to_pfn(paddr);
- pgd += pgd_index(iova);
-
- /* Update the page tables. */
- spin_lock_irqsave(&domain->lock, flags);
-
- pmd = ipmmu_alloc_pmd(mmu, pgd, iova);
- if (!pmd) {
- ret = -ENOMEM;
- goto done;
- }
-
- switch (size) {
- case SZ_2M:
- ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot);
- break;
- case SZ_64K:
- case SZ_4K:
- ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
-done:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- if (!ret)
- ipmmu_tlb_invalidate(domain);
-
- return ret;
-}
-
-static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud)
-{
- /* Free the page table. */
- pgtable_t table = pud_pgtable(*pud);
- __free_page(table);
-
- /* Clear the PUD. */
- *pud = __pud(0);
- ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
-}
-
-static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
- pmd_t *pmd)
-{
- unsigned int i;
-
- /* Free the page table. */
- if (pmd_table(*pmd)) {
- pgtable_t table = pmd_pgtable(*pmd);
- __free_page(table);
- }
-
- /* Clear the PMD. */
- *pmd = __pmd(0);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- /* Check whether the PUD is still needed. */
- pmd = pmd_offset(pud, 0);
- for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
- if (!pmd_none(pmd[i]))
- return;
- }
-
- /* Clear the parent PUD. */
- ipmmu_clear_pud(mmu, pud);
-}
-
-static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud,
- pmd_t *pmd, pte_t *pte, unsigned int num_ptes)
-{
- unsigned int i;
-
- /* Clear the PTE. */
- for (i = num_ptes; i; --i)
- pte[i-1] = __pte(0);
-
- ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes);
-
- /* Check whether the PMD is still needed. */
- pte = pte_offset_kernel(pmd, 0);
- for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) {
- if (!pte_none(pte[i]))
- return;
- }
-
- /* Clear the parent PMD. */
- ipmmu_clear_pmd(mmu, pud, pmd);
-}
-
-static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd)
-{
- pte_t *pte, *start;
- pteval_t pteval;
- unsigned long pfn;
- unsigned int i;
-
- pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pte)
- return -ENOMEM;
-
- /* Copy the PMD attributes. */
- pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK)
- | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE;
-
- pfn = pmd_pfn(*pmd);
- start = pte;
-
- for (i = IPMMU_PTRS_PER_PTE; i; --i)
- *pte++ = pfn_pte(pfn++, __pgprot(pteval));
-
- ipmmu_flush_pgtable(mmu, start, PAGE_SIZE);
- *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return 0;
-}
-
-static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte)
-{
- unsigned int i;
-
- for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i)
- pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT);
-
- ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES);
-}
-
-static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain,
- unsigned long iova, size_t size)
-{
- struct ipmmu_vmsa_device *mmu = domain->mmu;
- unsigned long flags;
- pgd_t *pgd = domain->pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int ret = 0;
-
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- pgd += pgd_index(iova);
- pud = (pud_t *)pgd;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- /* If there's no PUD or PMD we're done. */
- if (pud_none(*pud))
- goto done;
-
- pmd = pmd_offset(pud, iova);
- if (pmd_none(*pmd))
- goto done;
-
- /*
- * When freeing a 2MB block just clear the PMD. In the unlikely case the
- * block is mapped as individual pages this will free the corresponding
- * PTE page table.
- */
- if (size == SZ_2M) {
- ipmmu_clear_pmd(mmu, pud, pmd);
- goto done;
- }
-
- /*
- * If the PMD has been mapped as a section remap it as pages to allow
- * freeing individual pages.
- */
- if (pmd_sect(*pmd))
- ipmmu_split_pmd(mmu, pmd);
-
- pte = pte_offset_kernel(pmd, iova);
-
- /*
- * When freeing a 64kB block just clear the PTE entries. We don't have
- * to care about the contiguous hint of the surrounding entries.
- */
- if (size == SZ_64K) {
- ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES);
- goto done;
- }
-
- /*
- * If the PTE has been mapped with the contiguous hint set remap it and
- * its surrounding PTEs to allow unmapping a single page.
- */
- if (pte_val(*pte) & ARM_VMSA_PTE_CONT)
- ipmmu_split_pte(mmu, pte);
-
- /* Clear the PTE. */
- ipmmu_clear_pte(mmu, pud, pmd, pte, 1);
-
-done:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- if (ret)
- ipmmu_tlb_invalidate(domain);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
* IOMMU Operations
*/
@@ -864,12 +467,6 @@ static int ipmmu_domain_init(struct iommu_domain *io_domain)
spin_lock_init(&domain->lock);
- domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
- if (!domain->pgd) {
- kfree(domain);
- return -ENOMEM;
- }
-
io_domain->priv = domain;
domain->io_domain = io_domain;
@@ -885,7 +482,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
* been detached.
*/
ipmmu_domain_destroy_context(domain);
- ipmmu_free_pgtables(domain);
+ free_io_pgtable_ops(domain->iop);
kfree(domain);
}
@@ -896,6 +493,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct ipmmu_vmsa_device *mmu = archdata->mmu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
unsigned long flags;
+ unsigned int i;
int ret = 0;
if (!mmu) {
@@ -924,7 +522,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
if (ret < 0)
return ret;
- ipmmu_utlb_enable(domain, archdata->utlb);
+ for (i = 0; i < archdata->num_utlbs; ++i)
+ ipmmu_utlb_enable(domain, archdata->utlbs[i]);
return 0;
}
@@ -934,8 +533,10 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ unsigned int i;
- ipmmu_utlb_disable(domain, archdata->utlb);
+ for (i = 0; i < archdata->num_utlbs; ++i)
+ ipmmu_utlb_disable(domain, archdata->utlbs[i]);
/*
* TODO: Optimize by disabling the context when no device is attached.
@@ -950,76 +551,61 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
if (!domain)
return -ENODEV;
- return ipmmu_create_mapping(domain, iova, paddr, size, prot);
+ return domain->iop->map(domain->iop, iova, paddr, size, prot);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
size_t size)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
- int ret;
- ret = ipmmu_clear_mapping(domain, iova, size);
- return ret ? 0 : size;
+ return domain->iop->unmap(domain->iop, iova, size);
}
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
- pgd_t pgd;
- pud_t pud;
- pmd_t pmd;
- pte_t pte;
/* TODO: Is locking needed ? */
- if (!domain->pgd)
- return 0;
-
- pgd = *(domain->pgd + pgd_index(iova));
- if (pgd_none(pgd))
- return 0;
-
- pud = *pud_offset(&pgd, iova);
- if (pud_none(pud))
- return 0;
+ return domain->iop->iova_to_phys(domain->iop, iova);
+}
- pmd = *pmd_offset(&pud, iova);
- if (pmd_none(pmd))
- return 0;
+static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
+ unsigned int *utlbs, unsigned int num_utlbs)
+{
+ unsigned int i;
- if (pmd_sect(pmd))
- return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK);
+ for (i = 0; i < num_utlbs; ++i) {
+ struct of_phandle_args args;
+ int ret;
- pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
- if (pte_none(pte))
- return 0;
+ ret = of_parse_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells", i, &args);
+ if (ret < 0)
+ return ret;
- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
-}
+ of_node_put(args.np);
-static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev)
-{
- const struct ipmmu_vmsa_master *master = mmu->pdata->masters;
- const char *devname = dev_name(dev);
- unsigned int i;
+ if (args.np != mmu->dev->of_node || args.args_count != 1)
+ return -EINVAL;
- for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) {
- if (strcmp(master->name, devname) == 0)
- return master->utlb;
+ utlbs[i] = args.args[0];
}
- return -1;
+ return 0;
}
static int ipmmu_add_device(struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata;
struct ipmmu_vmsa_device *mmu;
- struct iommu_group *group;
- int utlb = -1;
- int ret;
+ struct iommu_group *group = NULL;
+ unsigned int *utlbs;
+ unsigned int i;
+ int num_utlbs;
+ int ret = -ENODEV;
if (dev->archdata.iommu) {
dev_warn(dev, "IOMMU driver already assigned to device %s\n",
@@ -1028,11 +614,21 @@ static int ipmmu_add_device(struct device *dev)
}
/* Find the master corresponding to the device. */
+
+ num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells");
+ if (num_utlbs < 0)
+ return -ENODEV;
+
+ utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);
+ if (!utlbs)
+ return -ENOMEM;
+
spin_lock(&ipmmu_devices_lock);
list_for_each_entry(mmu, &ipmmu_devices, list) {
- utlb = ipmmu_find_utlb(mmu, dev);
- if (utlb >= 0) {
+ ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);
+ if (!ret) {
/*
* TODO Take a reference to the MMU to protect
* against device removal.
@@ -1043,17 +639,22 @@ static int ipmmu_add_device(struct device *dev)
spin_unlock(&ipmmu_devices_lock);
- if (utlb < 0)
+ if (ret < 0)
return -ENODEV;
- if (utlb >= mmu->num_utlbs)
- return -EINVAL;
+ for (i = 0; i < num_utlbs; ++i) {
+ if (utlbs[i] >= mmu->num_utlbs) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
/* Create a device group and add the device to it. */
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
+ ret = PTR_ERR(group);
+ goto error;
}
ret = iommu_group_add_device(group, dev);
@@ -1061,7 +662,8 @@ static int ipmmu_add_device(struct device *dev)
if (ret < 0) {
dev_err(dev, "Failed to add device to IPMMU group\n");
- return ret;
+ group = NULL;
+ goto error;
}
archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
@@ -1071,7 +673,8 @@ static int ipmmu_add_device(struct device *dev)
}
archdata->mmu = mmu;
- archdata->utlb = utlb;
+ archdata->utlbs = utlbs;
+ archdata->num_utlbs = num_utlbs;
dev->archdata.iommu = archdata;
/*
@@ -1090,7 +693,8 @@ static int ipmmu_add_device(struct device *dev)
SZ_1G, SZ_2G);
if (IS_ERR(mapping)) {
dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
- return PTR_ERR(mapping);
+ ret = PTR_ERR(mapping);
+ goto error;
}
mmu->mapping = mapping;
@@ -1106,17 +710,29 @@ static int ipmmu_add_device(struct device *dev)
return 0;
error:
+ arm_iommu_release_mapping(mmu->mapping);
+
kfree(dev->archdata.iommu);
+ kfree(utlbs);
+
dev->archdata.iommu = NULL;
- iommu_group_remove_device(dev);
+
+ if (!IS_ERR_OR_NULL(group))
+ iommu_group_remove_device(dev);
+
return ret;
}
static void ipmmu_remove_device(struct device *dev)
{
+ struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
+
arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
- kfree(dev->archdata.iommu);
+
+ kfree(archdata->utlbs);
+ kfree(archdata);
+
dev->archdata.iommu = NULL;
}
@@ -1131,7 +747,7 @@ static const struct iommu_ops ipmmu_ops = {
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
.remove_device = ipmmu_remove_device,
- .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K,
+ .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
};
/* -----------------------------------------------------------------------------
@@ -1154,7 +770,7 @@ static int ipmmu_probe(struct platform_device *pdev)
int irq;
int ret;
- if (!pdev->dev.platform_data) {
+ if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) {
dev_err(&pdev->dev, "missing platform data\n");
return -EINVAL;
}
@@ -1166,7 +782,6 @@ static int ipmmu_probe(struct platform_device *pdev)
}
mmu->dev = &pdev->dev;
- mmu->pdata = pdev->dev.platform_data;
mmu->num_utlbs = 32;
/* Map I/O memory and request IRQ. */
@@ -1175,6 +790,20 @@ static int ipmmu_probe(struct platform_device *pdev)
if (IS_ERR(mmu->base))
return PTR_ERR(mmu->base);
+ /*
+ * The IPMMU has two register banks, for secure and non-secure modes.
+ * The bank mapped at the beginning of the IPMMU address space
+ * corresponds to the running mode of the CPU. When running in secure
+ * mode the non-secure register bank is also available at an offset.
+ *
+ * Secure mode operation isn't clearly documented and is thus currently
+ * not implemented in the driver. Furthermore, preliminary tests of
+ * non-secure operation with the main register bank were not successful.
+ * Offset the registers base unconditionally to point to the non-secure
+ * alias space for now.
+ */
+ mmu->base += IM_NS_ALIAS_OFFSET;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ found\n");
@@ -1220,9 +849,14 @@ static int ipmmu_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ipmmu_of_ids[] = {
+ { .compatible = "renesas,ipmmu-vmsa", },
+};
+
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
+ .of_match_table = of_match_ptr(ipmmu_of_ids),
},
.probe = ipmmu_probe,
.remove = ipmmu_remove,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 89c4846683be..390079ee1350 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -17,12 +17,11 @@
#include "irq_remapping.h"
int irq_remapping_enabled;
-
-int disable_irq_remap;
int irq_remap_broken;
int disable_sourceid_checking;
int no_x2apic_optout;
+static int disable_irq_remap;
static struct irq_remap_ops *remap_ops;
static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
@@ -194,45 +193,32 @@ static __init int setup_irqremap(char *str)
}
early_param("intremap", setup_irqremap);
-void __init setup_irq_remapping_ops(void)
-{
- remap_ops = &intel_irq_remap_ops;
-
-#ifdef CONFIG_AMD_IOMMU
- if (amd_iommu_irq_ops.prepare() == 0)
- remap_ops = &amd_iommu_irq_ops;
-#endif
-}
-
void set_irq_remapping_broken(void)
{
irq_remap_broken = 1;
}
-int irq_remapping_supported(void)
+int __init irq_remapping_prepare(void)
{
if (disable_irq_remap)
- return 0;
-
- if (!remap_ops || !remap_ops->supported)
- return 0;
-
- return remap_ops->supported();
-}
+ return -ENOSYS;
-int __init irq_remapping_prepare(void)
-{
- if (!remap_ops || !remap_ops->prepare)
- return -ENODEV;
+ if (intel_irq_remap_ops.prepare() == 0)
+ remap_ops = &intel_irq_remap_ops;
+ else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
+ amd_iommu_irq_ops.prepare() == 0)
+ remap_ops = &amd_iommu_irq_ops;
+ else
+ return -ENOSYS;
- return remap_ops->prepare();
+ return 0;
}
int __init irq_remapping_enable(void)
{
int ret;
- if (!remap_ops || !remap_ops->enable)
+ if (!remap_ops->enable)
return -ENODEV;
ret = remap_ops->enable();
@@ -245,22 +231,16 @@ int __init irq_remapping_enable(void)
void irq_remapping_disable(void)
{
- if (!irq_remapping_enabled ||
- !remap_ops ||
- !remap_ops->disable)
- return;
-
- remap_ops->disable();
+ if (irq_remapping_enabled && remap_ops->disable)
+ remap_ops->disable();
}
int irq_remapping_reenable(int mode)
{
- if (!irq_remapping_enabled ||
- !remap_ops ||
- !remap_ops->reenable)
- return 0;
+ if (irq_remapping_enabled && remap_ops->reenable)
+ return remap_ops->reenable(mode);
- return remap_ops->reenable(mode);
+ return 0;
}
int __init irq_remap_enable_fault_handling(void)
@@ -268,7 +248,7 @@ int __init irq_remap_enable_fault_handling(void)
if (!irq_remapping_enabled)
return 0;
- if (!remap_ops || !remap_ops->enable_faulting)
+ if (!remap_ops->enable_faulting)
return -ENODEV;
return remap_ops->enable_faulting();
@@ -279,7 +259,7 @@ int setup_ioapic_remapped_entry(int irq,
unsigned int destination, int vector,
struct io_apic_irq_attr *attr)
{
- if (!remap_ops || !remap_ops->setup_ioapic_entry)
+ if (!remap_ops->setup_ioapic_entry)
return -ENODEV;
return remap_ops->setup_ioapic_entry(irq, entry, destination,
@@ -289,8 +269,7 @@ int setup_ioapic_remapped_entry(int irq,
static int set_remapped_irq_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
- if (!config_enabled(CONFIG_SMP) || !remap_ops ||
- !remap_ops->set_affinity)
+ if (!config_enabled(CONFIG_SMP) || !remap_ops->set_affinity)
return 0;
return remap_ops->set_affinity(data, mask, force);
@@ -300,10 +279,7 @@ void free_remapped_irq(int irq)
{
struct irq_cfg *cfg = irq_cfg(irq);
- if (!remap_ops || !remap_ops->free_irq)
- return;
-
- if (irq_remapped(cfg))
+ if (irq_remapped(cfg) && remap_ops->free_irq)
remap_ops->free_irq(irq);
}
@@ -315,13 +291,13 @@ void compose_remapped_msi_msg(struct pci_dev *pdev,
if (!irq_remapped(cfg))
native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
- else if (remap_ops && remap_ops->compose_msi_msg)
+ else if (remap_ops->compose_msi_msg)
remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
}
static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
{
- if (!remap_ops || !remap_ops->msi_alloc_irq)
+ if (!remap_ops->msi_alloc_irq)
return -ENODEV;
return remap_ops->msi_alloc_irq(pdev, irq, nvec);
@@ -330,7 +306,7 @@ static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
int index, int sub_handle)
{
- if (!remap_ops || !remap_ops->msi_setup_irq)
+ if (!remap_ops->msi_setup_irq)
return -ENODEV;
return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle);
@@ -340,7 +316,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
{
int ret;
- if (!remap_ops || !remap_ops->alloc_hpet_msi)
+ if (!remap_ops->alloc_hpet_msi)
return -ENODEV;
ret = remap_ops->alloc_hpet_msi(irq, id);
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index fde250f86e60..7c70cc29ffe6 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -31,16 +31,12 @@ struct cpumask;
struct pci_dev;
struct msi_msg;
-extern int disable_irq_remap;
extern int irq_remap_broken;
extern int disable_sourceid_checking;
extern int no_x2apic_optout;
extern int irq_remapping_enabled;
struct irq_remap_ops {
- /* Check whether Interrupt Remapping is supported */
- int (*supported)(void);
-
/* Initializes hardware and makes it ready for remapping interrupts */
int (*prepare)(void);
@@ -89,7 +85,6 @@ extern struct irq_remap_ops amd_iommu_irq_ops;
#else /* CONFIG_IRQ_REMAP */
#define irq_remapping_enabled 0
-#define disable_irq_remap 1
#define irq_remap_broken 0
#endif /* CONFIG_IRQ_REMAP */
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index bbb7dcef02d3..f59f857b702e 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1126,7 +1126,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return -EINVAL;
}
- dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
+ dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
iotlb_init_entry(&e, da, pa, omap_pgsz);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index f722a0c466cf..c48da057dbb1 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
+ .map_sg = default_iommu_map_sg,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
return 0;
}
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 9516a324be6d..42965d2476bb 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o
obj-$(CONFIG_ARCH_MEDIATEK) += irq-mtk-sysirq.o
+obj-$(CONFIG_ARCH_DIGICOLOR) += irq-digicolor.o
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index d111ac779c40..63cd031b2c28 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -28,7 +28,7 @@
#define AT91_AIC_IRQ_MIN_PRIORITY 0
#define AT91_AIC_IRQ_MAX_PRIORITY 7
-#define AT91_AIC_SRCTYPE GENMASK(7, 6)
+#define AT91_AIC_SRCTYPE GENMASK(6, 5)
#define AT91_AIC_SRCTYPE_LOW (0 << 5)
#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
@@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
return -EINVAL;
}
- *val &= AT91_AIC_SRCTYPE;
+ *val &= ~AT91_AIC_SRCTYPE;
*val |= aic_type;
return 0;
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
new file mode 100644
index 000000000000..930a2a2fac7f
--- /dev/null
+++ b/drivers/irqchip/irq-digicolor.c
@@ -0,0 +1,120 @@
+/*
+ * Conexant Digicolor SoCs IRQ chip driver
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2014 Paradox Innovation Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define UC_IRQ_CONTROL 0x04
+
+#define IC_FLAG_CLEAR_LO 0x00
+#define IC_FLAG_CLEAR_XLO 0x04
+#define IC_INT0ENABLE_LO 0x10
+#define IC_INT0ENABLE_XLO 0x14
+#define IC_INT0STATUS_LO 0x18
+#define IC_INT0STATUS_XLO 0x1c
+
+static struct irq_domain *digicolor_irq_domain;
+
+static void __exception_irq_entry digicolor_handle_irq(struct pt_regs *regs)
+{
+ struct irq_domain_chip_generic *dgc = digicolor_irq_domain->gc;
+ struct irq_chip_generic *gc = dgc->gc[0];
+ u32 status, hwirq;
+
+ do {
+ status = irq_reg_readl(gc, IC_INT0STATUS_LO);
+ if (status) {
+ hwirq = ffs(status) - 1;
+ } else {
+ status = irq_reg_readl(gc, IC_INT0STATUS_XLO);
+ if (status)
+ hwirq = ffs(status) - 1 + 32;
+ else
+ return;
+ }
+
+ handle_domain_irq(digicolor_irq_domain, hwirq, regs);
+ } while (1);
+}
+
+static void digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
+ unsigned en_reg, unsigned ack_reg)
+{
+ struct irq_chip_generic *gc;
+
+ gc = irq_get_domain_generic_chip(digicolor_irq_domain, irq_base);
+ gc->reg_base = reg_base;
+ gc->chip_types[0].regs.ack = ack_reg;
+ gc->chip_types[0].regs.mask = en_reg;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+}
+
+static int __init digicolor_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ static void __iomem *reg_base;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct regmap *ucregs;
+ int ret;
+
+ reg_base = of_iomap(node, 0);
+ if (!reg_base) {
+ pr_err("%s: unable to map IC registers\n", node->full_name);
+ return -ENXIO;
+ }
+
+ /* disable all interrupts */
+ writel(0, reg_base + IC_INT0ENABLE_LO);
+ writel(0, reg_base + IC_INT0ENABLE_XLO);
+
+ ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
+ if (IS_ERR(ucregs)) {
+ pr_err("%s: unable to map UC registers\n", node->full_name);
+ return PTR_ERR(ucregs);
+ }
+ /* channel 1, regular IRQs */
+ regmap_write(ucregs, UC_IRQ_CONTROL, 1);
+
+ digicolor_irq_domain =
+ irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
+ if (!digicolor_irq_domain) {
+ pr_err("%s: unable to create IRQ domain\n", node->full_name);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(digicolor_irq_domain, 32, 1,
+ "digicolor_irq", handle_level_irq,
+ clr, 0, 0);
+ if (ret) {
+ pr_err("%s: unable to allocate IRQ gc\n", node->full_name);
+ return ret;
+ }
+
+ digicolor_set_gc(reg_base, 0, IC_INT0ENABLE_LO, IC_FLAG_CLEAR_LO);
+ digicolor_set_gc(reg_base, 32, IC_INT0ENABLE_XLO, IC_FLAG_CLEAR_XLO);
+
+ set_handle_irq(digicolor_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(conexant_digicolor_ic, "cnxt,cx92755-ic", digicolor_of_init);
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 61541ff24397..ad96ebb0c7ab 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -21,7 +21,7 @@
#include "irq-gic-common.h"
-void gic_configure_irq(unsigned int irq, unsigned int type,
+int gic_configure_irq(unsigned int irq, unsigned int type,
void __iomem *base, void (*sync_access)(void))
{
u32 enablemask = 1 << (irq % 32);
@@ -29,16 +29,17 @@ void gic_configure_irq(unsigned int irq, unsigned int type,
u32 confmask = 0x2 << ((irq % 16) * 2);
u32 confoff = (irq / 16) * 4;
bool enabled = false;
- u32 val;
+ u32 val, oldval;
+ int ret = 0;
/*
* Read current configuration register, and insert the config
* for "irq", depending on "type".
*/
- val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
- if (type == IRQ_TYPE_LEVEL_HIGH)
+ val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+ if (type & IRQ_TYPE_LEVEL_MASK)
val &= ~confmask;
- else if (type == IRQ_TYPE_EDGE_RISING)
+ else if (type & IRQ_TYPE_EDGE_BOTH)
val |= confmask;
/*
@@ -54,15 +55,20 @@ void gic_configure_irq(unsigned int irq, unsigned int type,
/*
* Write back the new configuration, and possibly re-enable
- * the interrupt.
+ * the interrupt. If we tried to write a new configuration and failed,
+ * return an error.
*/
writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+ if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval)
+ ret = -EINVAL;
if (enabled)
writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
if (sync_access)
sync_access();
+
+ return ret;
}
void __init gic_dist_config(void __iomem *base, int gic_irqs,
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index b41f02481c3a..35a9884778bd 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -20,7 +20,7 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
-void gic_configure_irq(unsigned int irq, unsigned int type,
+int gic_configure_irq(unsigned int irq, unsigned int type,
void __iomem *base, void (*sync_access)(void));
void gic_dist_config(void __iomem *base, int gic_irqs,
void (*sync_access)(void));
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 86e4684adeb1..d8996bdf0f61 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
* of two entries. No, the architecture doesn't let you
* express an ITT with a single entry.
*/
- nr_ites = max(2, roundup_pow_of_two(nvecs));
+ nr_ites = max(2UL, roundup_pow_of_two(nvecs));
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kmalloc(sz, GFP_KERNEL);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1a146ccee701..1c6dea2fbc34 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -238,7 +238,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
if (irq < 16)
return -EINVAL;
- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ /* SPIs have restrictions on the supported types */
+ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
+ type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
if (gic_irq_in_rdist(d)) {
@@ -249,9 +251,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
rwp_wait = gic_dist_wait_for_rwp;
}
- gic_configure_irq(irq, type, base, rwp_wait);
-
- return 0;
+ return gic_configure_irq(irq, type, base, rwp_wait);
}
static u64 gic_mpidr_to_affinity(u64 mpidr)
@@ -481,15 +481,19 @@ out:
return tlist;
}
+#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
+ (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
+ << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
+
static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
{
u64 val;
- val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
- MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
- irq << 24 |
- MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
- tlist);
+ val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
+ MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
+ irq << ICC_SGI1R_SGI_ID_SHIFT |
+ MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
+ tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
gic_write_sgi1r(val);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d617ee5a3d8a..4634cf7d0ec3 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -188,12 +188,15 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
unsigned int gicirq = gic_irq(d);
+ int ret;
/* Interrupt configuration for SGIs can't be changed */
if (gicirq < 16)
return -EINVAL;
- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ /* SPIs have restrictions on the supported types */
+ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
+ type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
raw_spin_lock(&irq_controller_lock);
@@ -201,11 +204,11 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
- gic_configure_irq(gicirq, type, base, NULL);
+ ret = gic_configure_irq(gicirq, type, base, NULL);
raw_spin_unlock(&irq_controller_lock);
- return 0;
+ return ret;
}
static int gic_retrigger(struct irq_data *d)
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 29b8f21b74d0..7d6ffb5de84f 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -120,21 +120,24 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = hip04_dist_base(d);
unsigned int irq = hip04_irq(d);
+ int ret;
/* Interrupt configuration for SGIs can't be changed */
if (irq < 16)
return -EINVAL;
- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ /* SPIs have restrictions on the supported types */
+ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
+ type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
raw_spin_lock(&irq_controller_lock);
- gic_configure_irq(irq, type, base, NULL);
+ ret = gic_configure_irq(irq, type, base, NULL);
raw_spin_unlock(&irq_controller_lock);
- return 0;
+ return ret;
}
#ifdef CONFIG_SMP
@@ -381,7 +384,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
* It will be refined as each CPU probes its ID.
*/
for (i = 0; i < NR_HIP04_CPU_IF; i++)
- hip04_cpu_map[i] = 0xff;
+ hip04_cpu_map[i] = 0xffff;
/*
* Find out how many interrupts are supported.
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 2b0468e3df6a..1daa7ca04577 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -37,6 +37,7 @@ static struct irq_domain *gic_irq_domain;
static int gic_shared_intrs;
static int gic_vpes;
static unsigned int gic_cpu_pin;
+static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
static void __gic_irq_dispatch(void);
@@ -234,9 +235,9 @@ int gic_get_c0_perfcount_int(void)
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}
-static unsigned int gic_get_int(void)
+static void gic_handle_shared_int(void)
{
- unsigned int i;
+ unsigned int i, intr, virq;
unsigned long *pcpu_mask;
unsigned long pending_reg, intrmask_reg;
DECLARE_BITMAP(pending, GIC_MAX_INTRS);
@@ -258,7 +259,16 @@ static unsigned int gic_get_int(void)
bitmap_and(pending, pending, intrmask, gic_shared_intrs);
bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
- return find_first_bit(pending, gic_shared_intrs);
+ intr = find_first_bit(pending, gic_shared_intrs);
+ while (intr != gic_shared_intrs) {
+ virq = irq_linear_revmap(gic_irq_domain,
+ GIC_SHARED_TO_HWIRQ(intr));
+ do_IRQ(virq);
+
+ /* go to next pending bit */
+ bitmap_clear(pending, intr, 1);
+ intr = find_first_bit(pending, gic_shared_intrs);
+ }
}
static void gic_mask_irq(struct irq_data *d)
@@ -385,16 +395,26 @@ static struct irq_chip gic_edge_irq_controller = {
#endif
};
-static unsigned int gic_get_local_int(void)
+static void gic_handle_local_int(void)
{
unsigned long pending, masked;
+ unsigned int intr, virq;
pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
- return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
+ intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
+ while (intr != GIC_NUM_LOCAL_INTRS) {
+ virq = irq_linear_revmap(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(intr));
+ do_IRQ(virq);
+
+ /* go to next pending bit */
+ bitmap_clear(&pending, intr, 1);
+ intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
+ }
}
static void gic_mask_local_irq(struct irq_data *d)
@@ -453,19 +473,8 @@ static struct irq_chip gic_all_vpes_local_irq_controller = {
static void __gic_irq_dispatch(void)
{
- unsigned int intr, virq;
-
- while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) {
- virq = irq_linear_revmap(gic_irq_domain,
- GIC_LOCAL_TO_HWIRQ(intr));
- do_IRQ(virq);
- }
-
- while ((intr = gic_get_int()) != gic_shared_intrs) {
- virq = irq_linear_revmap(gic_irq_domain,
- GIC_SHARED_TO_HWIRQ(intr));
- do_IRQ(virq);
- }
+ gic_handle_local_int();
+ gic_handle_shared_int();
}
static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
@@ -616,6 +625,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
break;
case GIC_LOCAL_INT_TIMER:
+ /* CONFIG_MIPS_CMP workaround (see __gic_init) */
+ val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
break;
case GIC_LOCAL_INT_PERFCTR:
@@ -713,12 +724,36 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (cpu_has_veic) {
/* Always use vector 1 in EIC mode */
gic_cpu_pin = 0;
+ timer_cpu_pin = gic_cpu_pin;
set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
__gic_irq_dispatch);
} else {
gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
gic_irq_dispatch);
+ /*
+ * With the CMP implementation of SMP (deprecated), other CPUs
+ * are started by the bootloader and put into a timer based
+ * waiting poll loop. We must not re-route those CPU's local
+ * timer interrupts as the wait instruction will never finish,
+ * so just handle whatever CPU interrupt it is routed to by
+ * default.
+ *
+ * This workaround should be removed when CMP support is
+ * dropped.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMP) &&
+ gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
+ timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
+ GIC_VPE_TIMER_MAP)) &
+ GIC_MAP_MSK;
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
+ GIC_CPU_PIN_OFFSET +
+ timer_cpu_pin,
+ gic_irq_dispatch);
+ } else {
+ timer_cpu_pin = gic_cpu_pin;
+ }
}
gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 7e342df6a62f..eaf0a710e98a 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -23,8 +23,6 @@
#include "irqchip.h"
-#define MT6577_SYS_INTPOL_NUM (224)
-
struct mtk_sysirq_chip_data {
spinlock_t lock;
void __iomem *intpol_base;
@@ -124,7 +122,8 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
{
struct irq_domain *domain, *domain_parent;
struct mtk_sysirq_chip_data *chip_data;
- int ret = 0;
+ int ret, size, intpol_num;
+ struct resource res;
domain_parent = irq_find_host(parent);
if (!domain_parent) {
@@ -132,19 +131,24 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
return -EINVAL;
}
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret)
+ return ret;
+
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
if (!chip_data)
return -ENOMEM;
- chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
+ size = resource_size(&res);
+ intpol_num = size * 8;
+ chip_data->intpol_base = ioremap(res.start, size);
if (!chip_data->intpol_base) {
pr_err("mtk_sysirq: unable to map sysirq register\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(chip_data->intpol_base);
goto out_free;
}
- domain = irq_domain_add_hierarchy(domain_parent, 0,
- MT6577_SYS_INTPOL_NUM, node,
+ domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node,
&sysirq_domain_ops, chip_data);
if (!domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index 28718d3e8281..a569c6dbd1d1 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
return ret;
}
-static int __init omap_init_irq_legacy(u32 base)
+static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
{
int j, irq_base;
@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
irq_base = 0;
}
- domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0,
+ domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
&irq_domain_simple_ops, NULL);
omap_irq_soft_reset();
@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
{
int ret;
- if (node)
+ /*
+ * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
+ * depends is still not ready for linear IRQ domains; because of that
+ * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
+ * linear IRQ Domain until that driver is finally fixed.
+ */
+ if (of_device_is_compatible(node, "ti,omap2-intc") ||
+ of_device_is_compatible(node, "ti,omap3-intc")) {
+ struct resource res;
+
+ if (of_address_to_resource(node, 0, &res))
+ return -ENOMEM;
+
+ base = res.start;
+ ret = omap_init_irq_legacy(base, node);
+ } else if (node) {
ret = omap_init_irq_of(node);
- else
- ret = omap_init_irq_legacy(base);
+ } else {
+ ret = omap_init_irq_legacy(base, NULL);
+ }
if (ret == 0)
omap_irq_enable_protection();
@@ -348,14 +364,6 @@ out:
omap_ack_irq(NULL);
}
-void __init omap2_init_irq(void)
-{
- omap_nr_irqs = 96;
- omap_nr_pending = 3;
- omap_init_irq(OMAP24XX_IC_BASE, NULL);
- set_handle_irq(omap_intc_handle_irq);
-}
-
void __init omap3_init_irq(void)
{
omap_nr_irqs = 96;
@@ -364,14 +372,6 @@ void __init omap3_init_irq(void)
set_handle_irq(omap_intc_handle_irq);
}
-void __init ti81xx_init_irq(void)
-{
- omap_nr_irqs = 96;
- omap_nr_pending = 4;
- omap_init_irq(OMAP34XX_IC_BASE, NULL);
- set_handle_irq(omap_intc_handle_irq);
-}
-
static int __init intc_of_init(struct device_node *node,
struct device_node *parent)
{
@@ -383,7 +383,9 @@ static int __init intc_of_init(struct device_node *node,
if (WARN_ON(!node))
return -ENODEV;
- if (of_device_is_compatible(node, "ti,am33xx-intc")) {
+ if (of_device_is_compatible(node, "ti,dm814-intc") ||
+ of_device_is_compatible(node, "ti,dm816-intc") ||
+ of_device_is_compatible(node, "ti,am33xx-intc")) {
omap_nr_irqs = 128;
omap_nr_pending = 4;
}
@@ -399,4 +401,6 @@ static int __init intc_of_init(struct device_node *node,
IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
+IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init);
+IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init);
IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 078cac5e2d08..9a0767b9c89d 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -30,6 +30,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_data/irq-renesas-intc-irqpin.h>
#include <linux/pm_runtime.h>
@@ -40,7 +41,9 @@
#define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */
#define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */
#define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */
-#define INTC_IRQPIN_REG_NR 5
+#define INTC_IRQPIN_REG_NR_MANDATORY 5
+#define INTC_IRQPIN_REG_IRLM 5 /* ICR0 with IRLM bit (optional) */
+#define INTC_IRQPIN_REG_NR 6
/* INTC external IRQ PIN hardware register access:
*
@@ -82,6 +85,10 @@ struct intc_irqpin_priv {
u8 shared_irq_mask;
};
+struct intc_irqpin_irlm_config {
+ unsigned int irlm_bit;
+};
+
static unsigned long intc_irqpin_read32(void __iomem *iomem)
{
return ioread32(iomem);
@@ -345,10 +352,23 @@ static struct irq_domain_ops intc_irqpin_irq_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a7779 = {
+ .irlm_bit = 23, /* ICR0.IRLM0 */
+};
+
+static const struct of_device_id intc_irqpin_dt_ids[] = {
+ { .compatible = "renesas,intc-irqpin", },
+ { .compatible = "renesas,intc-irqpin-r8a7779",
+ .data = &intc_irqpin_irlm_r8a7779 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
+
static int intc_irqpin_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct renesas_intc_irqpin_config *pdata = dev->platform_data;
+ const struct of_device_id *of_id;
struct intc_irqpin_priv *p;
struct intc_irqpin_iomem *i;
struct resource *io[INTC_IRQPIN_REG_NR];
@@ -391,10 +411,11 @@ static int intc_irqpin_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- /* get hold of manadatory IOMEM */
+ /* get hold of register banks */
+ memset(io, 0, sizeof(io));
for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k);
- if (!io[k]) {
+ if (!io[k] && k < INTC_IRQPIN_REG_NR_MANDATORY) {
dev_err(dev, "not enough IOMEM resources\n");
ret = -EINVAL;
goto err0;
@@ -422,6 +443,10 @@ static int intc_irqpin_probe(struct platform_device *pdev)
for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
i = &p->iomem[k];
+ /* handle optional registers */
+ if (!io[k])
+ continue;
+
switch (resource_size(io[k])) {
case 1:
i->width = 8;
@@ -448,6 +473,19 @@ static int intc_irqpin_probe(struct platform_device *pdev)
}
}
+ /* configure "individual IRQ mode" where needed */
+ of_id = of_match_device(intc_irqpin_dt_ids, dev);
+ if (of_id && of_id->data) {
+ const struct intc_irqpin_irlm_config *irlm_config = of_id->data;
+
+ if (io[INTC_IRQPIN_REG_IRLM])
+ intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
+ irlm_config->irlm_bit,
+ 1, 1);
+ else
+ dev_warn(dev, "unable to select IRLM mode\n");
+ }
+
/* mask all interrupts using priority */
for (k = 0; k < p->number_of_irqs; k++)
intc_irqpin_mask_unmask_prio(p, k, 1);
@@ -550,12 +588,6 @@ static int intc_irqpin_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id intc_irqpin_dt_ids[] = {
- { .compatible = "renesas,intc-irqpin", },
- {},
-};
-MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
-
static struct platform_driver intc_irqpin_device_driver = {
.probe = intc_irqpin_probe,
.remove = intc_irqpin_remove,
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index a82e542ffc21..d7c286656a25 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
add_ai(plci, &parms[5]);
sig_req(plci, REJECT, 0);
}
- else if (Reject == 1 || Reject > 9)
+ else if (Reject == 1 || Reject >= 9)
{
add_ai(plci, &parms[5]);
sig_req(plci, HANGUP, 0);
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
+ byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
byte force_mt_info = false;
byte dir;
dword d;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index ccd7d851be26..a77eea594b69 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -754,10 +754,10 @@ dbusy_timer_handler(struct isac_hw *isac)
}
static int
-open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
{
pr_debug("%s: %s dev(%d) open from %p\n", isac->name, __func__,
- isac->dch.dev.id, __builtin_return_address(1));
+ isac->dch.dev.id, caller);
if (rq->protocol != ISDN_P_TE_S0)
return -EINVAL;
if (rq->adr.channel == 1)
@@ -771,6 +771,12 @@ open_dchannel(struct isac_hw *isac, struct channel_req *rq)
return 0;
}
+static int
+open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+{
+ return open_dchannel_caller(isac, rq, __builtin_return_address(0));
+}
+
static const char *ISACVer[] =
{"2086/2186 V1.1", "2085 B1", "2085 B2",
"2085 V2.3"};
@@ -1548,7 +1554,7 @@ ipac_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
- err = open_dchannel(isac, rq);
+ err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
else
err = open_bchannel(ipac, rq);
if (err)
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index de69f6828c76..741675525b53 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1176,10 +1176,10 @@ w6692_l1callback(struct dchannel *dch, u32 cmd)
}
static int
-open_dchannel(struct w6692_hw *card, struct channel_req *rq)
+open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
{
pr_debug("%s: %s dev(%d) open from %p\n", card->name, __func__,
- card->dch.dev.id, __builtin_return_address(1));
+ card->dch.dev.id, caller);
if (rq->protocol != ISDN_P_TE_S0)
return -EINVAL;
if (rq->adr.channel == 1)
@@ -1207,7 +1207,7 @@ w6692_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
- err = open_dchannel(card, rq);
+ err = open_dchannel(card, rq, __builtin_return_address(0));
else
err = open_bchannel(card, rq);
if (err)
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index fc9f9d03fa13..0e5d673871c0 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -225,20 +225,6 @@ fWrite_hfc8(hfc4s8s_hw *a, u_char c)
}
static inline void
-Write_hfc16(hfc4s8s_hw *a, u_char b, u_short c)
-{
- SetRegAddr(a, b);
- outw(c, a->iobase);
-}
-
-static inline void
-Write_hfc32(hfc4s8s_hw *a, u_char b, u_long c)
-{
- SetRegAddr(a, b);
- outl(c, a->iobase);
-}
-
-static inline void
fWrite_hfc32(hfc4s8s_hw *a, u_long c)
{
outl(c, a->iobase);
@@ -266,13 +252,6 @@ Read_hfc16(hfc4s8s_hw *a, u_char b)
}
static inline u_long
-Read_hfc32(hfc4s8s_hw *a, u_char b)
-{
- SetRegAddr(a, b);
- return (inl((volatile u_int) a->iobase));
-}
-
-static inline u_long
fRead_hfc32(hfc4s8s_hw *a)
{
return (inl((volatile u_int) a->iobase));
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 5a4da94aefb0..ef9c8e4f1fa2 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -59,7 +59,8 @@ isdnloop_bchan_send(isdnloop_card *card, int ch)
isdn_ctrl cmd;
while (card->sndcount[ch]) {
- if ((skb = skb_dequeue(&card->bqueue[ch]))) {
+ skb = skb_dequeue(&card->bqueue[ch]);
+ if (skb) {
len = skb->len;
card->sndcount[ch] -= len;
ack = *(skb->head); /* used as scratch area */
@@ -149,8 +150,7 @@ typedef struct isdnloop_stat {
int action;
} isdnloop_stat;
/* *INDENT-OFF* */
-static isdnloop_stat isdnloop_stat_table[] =
-{
+static isdnloop_stat isdnloop_stat_table[] = {
{"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
{"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
{"DCON_", ISDN_STAT_DCONN, 0}, /* D-Channel connected */
@@ -317,7 +317,8 @@ isdnloop_polldchan(unsigned long data)
u_char *p;
isdn_ctrl cmd;
- if ((skb = skb_dequeue(&card->dqueue)))
+ skb = skb_dequeue(&card->dqueue);
+ if (skb)
avail = skb->len;
else
avail = 0;
@@ -471,8 +472,8 @@ isdnloop_fake(isdnloop_card *card, char *s, int ch)
{
struct sk_buff *skb;
int len = strlen(s) + ((ch >= 0) ? 3 : 0);
-
- if (!(skb = dev_alloc_skb(len))) {
+ skb = dev_alloc_skb(len);
+ if (!skb) {
printk(KERN_WARNING "isdnloop: Out of memory in isdnloop_fake\n");
return 1;
}
@@ -483,8 +484,7 @@ isdnloop_fake(isdnloop_card *card, char *s, int ch)
return 0;
}
/* *INDENT-OFF* */
-static isdnloop_stat isdnloop_cmd_table[] =
-{
+static isdnloop_stat isdnloop_cmd_table[] = {
{"BCON_R", 0, 1}, /* B-Channel connect */
{"BCON_I", 0, 17}, /* B-Channel connect ind */
{"BDIS_R", 0, 2}, /* B-Channel disconnect */
@@ -525,10 +525,8 @@ isdnloop_fake_err(isdnloop_card *card)
isdnloop_fake(card, "NAK", -1);
}
-static u_char ctable_eu[] =
-{0x00, 0x11, 0x01, 0x12};
-static u_char ctable_1t[] =
-{0x00, 0x3b, 0x01, 0x3a};
+static u_char ctable_eu[] = {0x00, 0x11, 0x01, 0x12};
+static u_char ctable_1t[] = {0x00, 0x3b, 0x01, 0x3a};
/*
* Assemble a simplified cause message depending on the
@@ -554,9 +552,9 @@ isdnloop_unicause(isdnloop_card *card, int loc, int cau)
sprintf(buf, "%02X44", ctable_1t[cau]);
break;
default:
- return ("0000");
+ return "0000";
}
- return (buf);
+ return buf;
}
/*
@@ -647,10 +645,8 @@ isdnloop_kill_ctimer(isdnloop_card *card, int ch)
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
-static u_char si2bit[] =
-{0, 1, 0, 0, 0, 2, 0, 4, 0, 0};
-static u_char bit2si[] =
-{1, 5, 7};
+static u_char si2bit[] = {0, 1, 0, 0, 0, 2, 0, 4, 0, 0};
+static u_char bit2si[] = {1, 5, 7};
/*
* Try finding a listener for an outgoing call.
@@ -754,17 +750,17 @@ isdnloop_vstphone(isdnloop_card *card, char *phone, int caller)
if (caller) {
for (i = 0; i < 2; i++)
if (!(strcmp(card->s0num[i], phone)))
- return (phone);
- return (card->s0num[0]);
+ return phone;
+ return card->s0num[0];
}
- return (phone);
+ return phone;
break;
case ISDN_PTYPE_1TR6:
if (caller) {
sprintf(nphone, "%s%c", card->s0num[0], phone[0]);
- return (nphone);
+ return nphone;
} else
- return (&phone[strlen(phone) - 1]);
+ return &phone[strlen(phone) - 1];
break;
}
return "";
@@ -1148,14 +1144,14 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
case ISDNLOOP_IOCTL_STARTUP:
if (!access_ok(VERIFY_READ, (void *) a, sizeof(isdnloop_sdef)))
return -EFAULT;
- return (isdnloop_start(card, (isdnloop_sdef *) a));
+ return isdnloop_start(card, (isdnloop_sdef *) a);
break;
case ISDNLOOP_IOCTL_ADDCARD:
if (copy_from_user((char *)&cdef,
(char *)a,
sizeof(cdef)))
return -EFAULT;
- return (isdnloop_addcard(cdef.id1));
+ return isdnloop_addcard(cdef.id1);
break;
case ISDNLOOP_IOCTL_LEASEDCFG:
if (a) {
@@ -1377,7 +1373,7 @@ if_command(isdn_ctrl *c)
isdnloop_card *card = isdnloop_findcard(c->driver);
if (card)
- return (isdnloop_command(c, card));
+ return isdnloop_command(c, card);
printk(KERN_ERR
"isdnloop: if_command called with invalid driverId!\n");
return -ENODEV;
@@ -1391,7 +1387,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
- return (isdnloop_writecmd(buf, len, 1, card));
+ return isdnloop_writecmd(buf, len, 1, card);
}
printk(KERN_ERR
"isdnloop: if_writecmd called with invalid driverId!\n");
@@ -1406,7 +1402,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
- return (isdnloop_readstatus(buf, len, card));
+ return isdnloop_readstatus(buf, len, card);
}
printk(KERN_ERR
"isdnloop: if_readstatus called with invalid driverId!\n");
@@ -1423,7 +1419,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
return -ENODEV;
/* ack request stored in skb scratch area */
*(skb->head) = ack;
- return (isdnloop_sendbuf(channel, skb, card));
+ return isdnloop_sendbuf(channel, skb, card);
}
printk(KERN_ERR
"isdnloop: if_sendbuf called with invalid driverId!\n");
@@ -1439,8 +1435,8 @@ isdnloop_initcard(char *id)
{
isdnloop_card *card;
int i;
-
- if (!(card = kzalloc(sizeof(isdnloop_card), GFP_KERNEL))) {
+ card = kzalloc(sizeof(isdnloop_card), GFP_KERNEL);
+ if (!card) {
printk(KERN_WARNING
"isdnloop: (%s) Could not allocate card-struct.\n", id);
return (isdnloop_card *) 0;
@@ -1489,8 +1485,8 @@ static int
isdnloop_addcard(char *id1)
{
isdnloop_card *card;
-
- if (!(card = isdnloop_initcard(id1))) {
+ card = isdnloop_initcard(id1);
+ if (!card) {
return -EIO;
}
printk(KERN_INFO
@@ -1503,7 +1499,7 @@ static int __init
isdnloop_init(void)
{
if (isdnloop_id)
- return (isdnloop_addcard(isdnloop_id));
+ return isdnloop_addcard(isdnloop_id);
return 0;
}
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index d6f19b168e8a..3597ef47b28a 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -30,7 +30,7 @@ static const char *boardname[] = { "DataCommute/BRI", "DataCommute/PRI", "TeleCo
static unsigned int io[] = {0, 0, 0, 0};
static unsigned char irq[] = {0, 0, 0, 0};
static unsigned long ram[] = {0, 0, 0, 0};
-static bool do_reset = 0;
+static bool do_reset;
module_param_array(io, int, NULL, 0);
module_param_array(irq, byte, NULL, 0);
@@ -104,13 +104,12 @@ static int __init sc_init(void)
io[b] + 0x400 * EXP_PAGE0);
continue;
}
- }
- else {
+ } else {
/*
* Yes, probe for I/O Base
*/
if (probe_exhasted) {
- pr_debug("All probe addresses exhasted, skipping\n");
+ pr_debug("All probe addresses exhausted, skipping\n");
continue;
}
pr_debug("Probing for I/O...\n");
@@ -169,8 +168,7 @@ static int __init sc_init(void)
model = identify_board(ram[b], io[b]);
release_region(ram[b], SRAM_PAGESIZE);
}
- }
- else {
+ } else {
/*
* Yes, probe for free RAM and look for
* a signature and id the board model
@@ -187,7 +185,7 @@ static int __init sc_init(void)
ram[b] = i;
break;
}
- pr_debug(" Unidentifed or inaccessible\n");
+ pr_debug(" Unidentified or inaccessible\n");
continue;
}
pr_debug(" request failed\n");
@@ -337,8 +335,7 @@ static int __init sc_init(void)
sc_adapter[cinst]->interrupt = irq[b];
if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
0, interface->id,
- (void *)(unsigned long) cinst))
- {
+ (void *)(unsigned long) cinst)) {
kfree(sc_adapter[cinst]->channel);
indicate_status(cinst, ISDN_STAT_UNLOAD, 0, NULL); /* Fix me */
kfree(interface);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a6c3d2f153f3..25b320d64e26 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -22,6 +22,16 @@ config LEDS_CLASS
This option enables the led sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
+config LEDS_CLASS_FLASH
+ tristate "LED Flash Class Support"
+ depends on LEDS_CLASS
+ help
+ This option enables the flash led sysfs class in /sys/class/leds.
+ It wrapps LED Class and adds flash LEDs specific sysfs attributes
+ and kernel internal API to it. You'll need this to provide support
+ for the flash related features of a LED device. It can be built
+ as a module.
+
comment "LED drivers"
config LEDS_88PM860X
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 1c65a191d907..cbba921b6f1c 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -2,6 +2,7 @@
# LED Core
obj-$(CONFIG_NEW_LEDS) += led-core.o
obj-$(CONFIG_LEDS_CLASS) += led-class.o
+obj-$(CONFIG_LEDS_CLASS_FLASH) += led-class-flash.o
obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
# LED Platform Drivers
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
new file mode 100644
index 000000000000..4a19fd44f93f
--- /dev/null
+++ b/drivers/leds/led-class-flash.c
@@ -0,0 +1,486 @@
+/*
+ * LED Flash class interface
+ *
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/led-class-flash.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "leds.h"
+
+#define has_flash_op(fled_cdev, op) \
+ (fled_cdev && fled_cdev->ops->op)
+
+#define call_flash_op(fled_cdev, op, args...) \
+ ((has_flash_op(fled_cdev, op)) ? \
+ (fled_cdev->ops->op(fled_cdev, args)) : \
+ -EINVAL)
+
+static const char * const led_flash_fault_names[] = {
+ "led-over-voltage",
+ "flash-timeout-exceeded",
+ "controller-over-temperature",
+ "controller-short-circuit",
+ "led-power-supply-over-current",
+ "indicator-led-fault",
+ "led-under-voltage",
+ "controller-under-voltage",
+ "led-over-temperature",
+};
+
+static ssize_t flash_brightness_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ unsigned long state;
+ ssize_t ret;
+
+ mutex_lock(&led_cdev->led_access);
+
+ if (led_sysfs_is_disabled(led_cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ goto unlock;
+
+ ret = led_set_flash_brightness(fled_cdev, state);
+ if (ret < 0)
+ goto unlock;
+
+ ret = size;
+unlock:
+ mutex_unlock(&led_cdev->led_access);
+ return ret;
+}
+
+static ssize_t flash_brightness_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+
+ /* no lock needed for this */
+ led_update_flash_brightness(fled_cdev);
+
+ return sprintf(buf, "%u\n", fled_cdev->brightness.val);
+}
+static DEVICE_ATTR_RW(flash_brightness);
+
+static ssize_t max_flash_brightness_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+
+ return sprintf(buf, "%u\n", fled_cdev->brightness.max);
+}
+static DEVICE_ATTR_RO(max_flash_brightness);
+
+static ssize_t flash_strobe_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ unsigned long state;
+ ssize_t ret = -EINVAL;
+
+ mutex_lock(&led_cdev->led_access);
+
+ if (led_sysfs_is_disabled(led_cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ goto unlock;
+
+ if (state < 0 || state > 1) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = led_set_flash_strobe(fled_cdev, state);
+ if (ret < 0)
+ goto unlock;
+ ret = size;
+unlock:
+ mutex_unlock(&led_cdev->led_access);
+ return ret;
+}
+
+static ssize_t flash_strobe_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ bool state;
+ int ret;
+
+ /* no lock needed for this */
+ ret = led_get_flash_strobe(fled_cdev, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%u\n", state);
+}
+static DEVICE_ATTR_RW(flash_strobe);
+
+static ssize_t flash_timeout_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ unsigned long flash_timeout;
+ ssize_t ret;
+
+ mutex_lock(&led_cdev->led_access);
+
+ if (led_sysfs_is_disabled(led_cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = kstrtoul(buf, 10, &flash_timeout);
+ if (ret)
+ goto unlock;
+
+ ret = led_set_flash_timeout(fled_cdev, flash_timeout);
+ if (ret < 0)
+ goto unlock;
+
+ ret = size;
+unlock:
+ mutex_unlock(&led_cdev->led_access);
+ return ret;
+}
+
+static ssize_t flash_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+
+ return sprintf(buf, "%u\n", fled_cdev->timeout.val);
+}
+static DEVICE_ATTR_RW(flash_timeout);
+
+static ssize_t max_flash_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+
+ return sprintf(buf, "%u\n", fled_cdev->timeout.max);
+}
+static DEVICE_ATTR_RO(max_flash_timeout);
+
+static ssize_t flash_fault_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ u32 fault, mask = 0x1;
+ char *pbuf = buf;
+ int i, ret, buf_len;
+
+ ret = led_get_flash_fault(fled_cdev, &fault);
+ if (ret < 0)
+ return -EINVAL;
+
+ *buf = '\0';
+
+ for (i = 0; i < LED_NUM_FLASH_FAULTS; ++i) {
+ if (fault & mask) {
+ buf_len = sprintf(pbuf, "%s ",
+ led_flash_fault_names[i]);
+ pbuf += buf_len;
+ }
+ mask <<= 1;
+ }
+
+ return sprintf(buf, "%s\n", buf);
+}
+static DEVICE_ATTR_RO(flash_fault);
+
+static ssize_t available_sync_leds_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ char *pbuf = buf;
+ int i, buf_len;
+
+ buf_len = sprintf(pbuf, "[0: none] ");
+ pbuf += buf_len;
+
+ for (i = 0; i < fled_cdev->num_sync_leds; ++i) {
+ buf_len = sprintf(pbuf, "[%d: %s] ", i + 1,
+ fled_cdev->sync_leds[i]->led_cdev.name);
+ pbuf += buf_len;
+ }
+
+ return sprintf(buf, "%s\n", buf);
+}
+static DEVICE_ATTR_RO(available_sync_leds);
+
+static ssize_t flash_sync_strobe_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ unsigned long led_id;
+ ssize_t ret;
+
+ mutex_lock(&led_cdev->led_access);
+
+ if (led_sysfs_is_disabled(led_cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = kstrtoul(buf, 10, &led_id);
+ if (ret)
+ goto unlock;
+
+ if (led_id > fled_cdev->num_sync_leds) {
+ ret = -ERANGE;
+ goto unlock;
+ }
+
+ fled_cdev->sync_led_id = led_id;
+
+ ret = size;
+unlock:
+ mutex_unlock(&led_cdev->led_access);
+ return ret;
+}
+
+static ssize_t flash_sync_strobe_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ int sled_id = fled_cdev->sync_led_id;
+ char *sync_led_name = "none";
+
+ if (fled_cdev->sync_led_id > 0)
+ sync_led_name = (char *)
+ fled_cdev->sync_leds[sled_id - 1]->led_cdev.name;
+
+ return sprintf(buf, "[%d: %s]\n", sled_id, sync_led_name);
+}
+static DEVICE_ATTR_RW(flash_sync_strobe);
+
+static struct attribute *led_flash_strobe_attrs[] = {
+ &dev_attr_flash_strobe.attr,
+ NULL,
+};
+
+static struct attribute *led_flash_timeout_attrs[] = {
+ &dev_attr_flash_timeout.attr,
+ &dev_attr_max_flash_timeout.attr,
+ NULL,
+};
+
+static struct attribute *led_flash_brightness_attrs[] = {
+ &dev_attr_flash_brightness.attr,
+ &dev_attr_max_flash_brightness.attr,
+ NULL,
+};
+
+static struct attribute *led_flash_fault_attrs[] = {
+ &dev_attr_flash_fault.attr,
+ NULL,
+};
+
+static struct attribute *led_flash_sync_strobe_attrs[] = {
+ &dev_attr_available_sync_leds.attr,
+ &dev_attr_flash_sync_strobe.attr,
+ NULL,
+};
+
+static const struct attribute_group led_flash_strobe_group = {
+ .attrs = led_flash_strobe_attrs,
+};
+
+static const struct attribute_group led_flash_timeout_group = {
+ .attrs = led_flash_timeout_attrs,
+};
+
+static const struct attribute_group led_flash_brightness_group = {
+ .attrs = led_flash_brightness_attrs,
+};
+
+static const struct attribute_group led_flash_fault_group = {
+ .attrs = led_flash_fault_attrs,
+};
+
+static const struct attribute_group led_flash_sync_strobe_group = {
+ .attrs = led_flash_sync_strobe_attrs,
+};
+
+static void led_flash_resume(struct led_classdev *led_cdev)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+
+ call_flash_op(fled_cdev, flash_brightness_set,
+ fled_cdev->brightness.val);
+ call_flash_op(fled_cdev, timeout_set, fled_cdev->timeout.val);
+}
+
+static void led_flash_init_sysfs_groups(struct led_classdev_flash *fled_cdev)
+{
+ struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+ const struct led_flash_ops *ops = fled_cdev->ops;
+ const struct attribute_group **flash_groups = fled_cdev->sysfs_groups;
+
+ int num_sysfs_groups = 0;
+
+ flash_groups[num_sysfs_groups++] = &led_flash_strobe_group;
+
+ if (ops->flash_brightness_set)
+ flash_groups[num_sysfs_groups++] = &led_flash_brightness_group;
+
+ if (ops->timeout_set)
+ flash_groups[num_sysfs_groups++] = &led_flash_timeout_group;
+
+ if (ops->fault_get)
+ flash_groups[num_sysfs_groups++] = &led_flash_fault_group;
+
+ if (led_cdev->flags & LED_DEV_CAP_SYNC_STROBE)
+ flash_groups[num_sysfs_groups++] = &led_flash_sync_strobe_group;
+
+ led_cdev->groups = flash_groups;
+}
+
+int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ struct led_classdev *led_cdev;
+ const struct led_flash_ops *ops;
+ int ret;
+
+ if (!fled_cdev)
+ return -EINVAL;
+
+ led_cdev = &fled_cdev->led_cdev;
+
+ if (led_cdev->flags & LED_DEV_CAP_FLASH) {
+ if (!led_cdev->brightness_set_sync)
+ return -EINVAL;
+
+ ops = fled_cdev->ops;
+ if (!ops || !ops->strobe_set)
+ return -EINVAL;
+
+ led_cdev->flash_resume = led_flash_resume;
+
+ /* Select the sysfs attributes to be created for the device */
+ led_flash_init_sysfs_groups(fled_cdev);
+ }
+
+ /* Register led class device */
+ ret = led_classdev_register(parent, led_cdev);
+ if (ret < 0)
+ return ret;
+
+ /* Setting a torch brightness needs to have immediate effect */
+ led_cdev->flags &= ~SET_BRIGHTNESS_ASYNC;
+ led_cdev->flags |= SET_BRIGHTNESS_SYNC;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_classdev_flash_register);
+
+void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev)
+{
+ if (!fled_cdev)
+ return;
+
+ led_classdev_unregister(&fled_cdev->led_cdev);
+}
+EXPORT_SYMBOL_GPL(led_classdev_flash_unregister);
+
+static void led_clamp_align(struct led_flash_setting *s)
+{
+ u32 v, offset;
+
+ v = s->val + s->step / 2;
+ v = clamp(v, s->min, s->max);
+ offset = v - s->min;
+ offset = s->step * (offset / s->step);
+ s->val = s->min + offset;
+}
+
+int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout)
+{
+ struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+ struct led_flash_setting *s = &fled_cdev->timeout;
+
+ s->val = timeout;
+ led_clamp_align(s);
+
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ return call_flash_op(fled_cdev, timeout_set, s->val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_set_flash_timeout);
+
+int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault)
+{
+ return call_flash_op(fled_cdev, fault_get, fault);
+}
+EXPORT_SYMBOL_GPL(led_get_flash_fault);
+
+int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
+ u32 brightness)
+{
+ struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+ struct led_flash_setting *s = &fled_cdev->brightness;
+
+ s->val = brightness;
+ led_clamp_align(s);
+
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ return call_flash_op(fled_cdev, flash_brightness_set, s->val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_set_flash_brightness);
+
+int led_update_flash_brightness(struct led_classdev_flash *fled_cdev)
+{
+ struct led_flash_setting *s = &fled_cdev->brightness;
+ u32 brightness;
+
+ if (has_flash_op(fled_cdev, flash_brightness_get)) {
+ int ret = call_flash_op(fled_cdev, flash_brightness_get,
+ &brightness);
+ if (ret < 0)
+ return ret;
+
+ s->val = brightness;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_update_flash_brightness);
+
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_DESCRIPTION("LED Flash class interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index dbeebac38d31..795ec994c663 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -179,6 +179,10 @@ EXPORT_SYMBOL_GPL(led_classdev_suspend);
void led_classdev_resume(struct led_classdev *led_cdev)
{
led_cdev->brightness_set(led_cdev, led_cdev->brightness);
+
+ if (led_cdev->flash_resume)
+ led_cdev->flash_resume(led_cdev);
+
led_cdev->flags &= ~LED_SUSPENDED;
}
EXPORT_SYMBOL_GPL(led_classdev_resume);
@@ -239,9 +243,8 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
- init_timer(&led_cdev->blink_timer);
- led_cdev->blink_timer.function = led_timer_function;
- led_cdev->blink_timer.data = (unsigned long)led_cdev;
+ setup_timer(&led_cdev->blink_timer, led_timer_function,
+ (unsigned long)led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
led_trigger_set_default(led_cdev);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 7ea1ea42c2d2..d26af0a79a90 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -187,6 +187,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
led.gpiod = devm_get_gpiod_from_child(dev, child);
if (IS_ERR(led.gpiod)) {
fwnode_handle_put(child);
+ ret = PTR_ERR(led.gpiod);
goto err;
}
@@ -229,7 +230,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
err:
for (count = priv->num_leds - 2; count >= 0; count--)
delete_gpio_led(&priv->leds[count]);
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(ret);
}
static const struct of_device_id of_gpio_leds_match[] = {
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 85c3714e1b5a..e2b847fe22a1 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -134,9 +134,7 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
if (!pdata)
return ERR_PTR(-ENOMEM);
- of_node_get(dev->parent->of_node);
-
- parent = of_find_node_by_name(dev->parent->of_node, "leds");
+ parent = of_get_child_by_name(dev->parent->of_node, "leds");
if (!parent)
goto out_node_put;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 26515c27ea8c..25e419752a7b 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
led_dat->sata = 0;
led_dat->cdev.brightness = LED_OFF;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- /*
- * If available, expose the SATA activity blink capability through
- * a "sata" sysfs attribute.
- */
- if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
- led_dat->cdev.groups = netxbig_led_groups;
led_dat->mode_addr = template->mode_addr;
led_dat->mode_val = template->mode_val;
led_dat->bright_addr = template->bright_addr;
led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
led_dat->timer = pdata->timer;
led_dat->num_timer = pdata->num_timer;
+ /*
+ * If available, expose the SATA activity blink capability through
+ * a "sata" sysfs attribute.
+ */
+ if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+ led_dat->cdev.groups = netxbig_led_groups;
return led_classdev_register(&pdev->dev, &led_dat->cdev);
}
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 2348dbda5269..79efe57c7405 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -20,7 +20,8 @@
static inline void led_set_brightness_async(struct led_classdev *led_cdev,
enum led_brightness value)
{
- led_cdev->brightness = min(value, led_cdev->max_brightness);
+ value = min(value, led_cdev->max_brightness);
+ led_cdev->brightness = value;
if (!(led_cdev->flags & LED_SUSPENDED))
led_cdev->brightness_set(led_cdev, value);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 922a1acbf652..6adfd7ba4c97 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -47,6 +47,7 @@
#include <asm/lguest.h>
#include <asm/uaccess.h>
#include <asm/i387.h>
+#include <asm/tlbflush.h>
#include "../lg.h"
static int cpu_had_pge;
@@ -452,9 +453,9 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
static void adjust_pge(void *on)
{
if (on)
- write_cr4(read_cr4() | X86_CR4_PGE);
+ cr4_set_bits(X86_CR4_PGE);
else
- write_cr4(read_cr4() & ~X86_CR4_PGE);
+ cr4_clear_bits(X86_CR4_PGE);
}
/*H:020
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c04fed9eb15d..84325f267acf 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -45,4 +45,10 @@ config PCC
states). Select this driver if your platform implements the
PCC clients mentioned above.
+config ALTERA_MBOX
+ tristate "Altera Mailbox"
+ help
+ An implementation of the Altera Mailbox soft core. It is used
+ to send message between processors. Say Y here if you want to use the
+ Altera mailbox support.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index dd412c22208b..2e79231154cf 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
obj-$(CONFIG_PCC) += pcc.o
+
+obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
new file mode 100644
index 000000000000..a266265677d3
--- /dev/null
+++ b/drivers/mailbox/mailbox-altera.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright Altera Corporation (C) 2013-2014. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "altera-mailbox"
+
+#define MAILBOX_CMD_REG 0x00
+#define MAILBOX_PTR_REG 0x04
+#define MAILBOX_STS_REG 0x08
+#define MAILBOX_INTMASK_REG 0x0C
+
+#define INT_PENDING_MSK 0x1
+#define INT_SPACE_MSK 0x2
+
+#define STS_PENDING_MSK 0x1
+#define STS_FULL_MSK 0x2
+#define STS_FULL_OFT 0x1
+
+#define MBOX_PENDING(status) (((status) & STS_PENDING_MSK))
+#define MBOX_FULL(status) (((status) & STS_FULL_MSK) >> STS_FULL_OFT)
+
+enum altera_mbox_msg {
+ MBOX_CMD = 0,
+ MBOX_PTR,
+};
+
+#define MBOX_POLLING_MS 5 /* polling interval 5ms */
+
+struct altera_mbox {
+ bool is_sender; /* 1-sender, 0-receiver */
+ bool intr_mode;
+ int irq;
+ void __iomem *mbox_base;
+ struct device *dev;
+ struct mbox_controller controller;
+
+ /* If the controller supports only RX polling mode */
+ struct timer_list rxpoll_timer;
+};
+
+static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan)
+{
+ if (!chan || !chan->con_priv)
+ return NULL;
+
+ return (struct altera_mbox *)chan->con_priv;
+}
+
+static inline int altera_mbox_full(struct altera_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
+ return MBOX_FULL(status);
+}
+
+static inline int altera_mbox_pending(struct altera_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
+ return MBOX_PENDING(status);
+}
+
+static void altera_mbox_rx_intmask(struct altera_mbox *mbox, bool enable)
+{
+ u32 mask;
+
+ mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
+ if (enable)
+ mask |= INT_PENDING_MSK;
+ else
+ mask &= ~INT_PENDING_MSK;
+ writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
+}
+
+static void altera_mbox_tx_intmask(struct altera_mbox *mbox, bool enable)
+{
+ u32 mask;
+
+ mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
+ if (enable)
+ mask |= INT_SPACE_MSK;
+ else
+ mask &= ~INT_SPACE_MSK;
+ writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
+}
+
+static bool altera_mbox_is_sender(struct altera_mbox *mbox)
+{
+ u32 reg;
+ /* Write a magic number to PTR register and read back this register.
+ * This register is read-write if it is a sender.
+ */
+ #define MBOX_MAGIC 0xA5A5AA55
+ writel_relaxed(MBOX_MAGIC, mbox->mbox_base + MAILBOX_PTR_REG);
+ reg = readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
+ if (reg == MBOX_MAGIC) {
+ /* Clear to 0 */
+ writel_relaxed(0, mbox->mbox_base + MAILBOX_PTR_REG);
+ return true;
+ }
+ return false;
+}
+
+static void altera_mbox_rx_data(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ u32 data[2];
+
+ if (altera_mbox_pending(mbox)) {
+ data[MBOX_PTR] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
+ data[MBOX_CMD] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_CMD_REG);
+ mbox_chan_received_data(chan, (void *)data);
+ }
+}
+
+static void altera_mbox_poll_rx(unsigned long data)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)data;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ altera_mbox_rx_data(chan);
+
+ mod_timer(&mbox->rxpoll_timer,
+ jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
+}
+
+static irqreturn_t altera_mbox_tx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)p;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ altera_mbox_tx_intmask(mbox, false);
+ mbox_chan_txdone(chan, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t altera_mbox_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)p;
+
+ altera_mbox_rx_data(chan);
+ return IRQ_HANDLED;
+}
+
+static int altera_mbox_startup_sender(struct mbox_chan *chan)
+{
+ int ret;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ ret = request_irq(mbox->irq, altera_mbox_tx_interrupt, 0,
+ DRIVER_NAME, chan);
+ if (unlikely(ret)) {
+ dev_err(mbox->dev,
+ "failed to register mailbox interrupt:%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int altera_mbox_startup_receiver(struct mbox_chan *chan)
+{
+ int ret;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ ret = request_irq(mbox->irq, altera_mbox_rx_interrupt, 0,
+ DRIVER_NAME, chan);
+ if (unlikely(ret)) {
+ mbox->intr_mode = false;
+ goto polling; /* use polling if failed */
+ }
+
+ altera_mbox_rx_intmask(mbox, true);
+ return 0;
+ }
+
+polling:
+ /* Setup polling timer */
+ setup_timer(&mbox->rxpoll_timer, altera_mbox_poll_rx,
+ (unsigned long)chan);
+ mod_timer(&mbox->rxpoll_timer,
+ jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
+
+ return 0;
+}
+
+static int altera_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ u32 *udata = (u32 *)data;
+
+ if (!mbox || !data)
+ return -EINVAL;
+ if (!mbox->is_sender) {
+ dev_warn(mbox->dev,
+ "failed to send. This is receiver mailbox.\n");
+ return -EINVAL;
+ }
+
+ if (altera_mbox_full(mbox))
+ return -EBUSY;
+
+ /* Enable interrupt before send */
+ if (mbox->intr_mode)
+ altera_mbox_tx_intmask(mbox, true);
+
+ /* Pointer register must write before command register */
+ writel_relaxed(udata[MBOX_PTR], mbox->mbox_base + MAILBOX_PTR_REG);
+ writel_relaxed(udata[MBOX_CMD], mbox->mbox_base + MAILBOX_CMD_REG);
+
+ return 0;
+}
+
+static bool altera_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ /* Return false if mailbox is full */
+ return altera_mbox_full(mbox) ? false : true;
+}
+
+static bool altera_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ return altera_mbox_pending(mbox) ? true : false;
+}
+
+static int altera_mbox_startup(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ int ret = 0;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (mbox->is_sender)
+ ret = altera_mbox_startup_sender(chan);
+ else
+ ret = altera_mbox_startup_receiver(chan);
+
+ return ret;
+}
+
+static void altera_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ /* Unmask all interrupt masks */
+ writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG);
+ free_irq(mbox->irq, chan);
+ } else if (!mbox->is_sender) {
+ del_timer_sync(&mbox->rxpoll_timer);
+ }
+}
+
+static struct mbox_chan_ops altera_mbox_ops = {
+ .send_data = altera_mbox_send_data,
+ .startup = altera_mbox_startup,
+ .shutdown = altera_mbox_shutdown,
+ .last_tx_done = altera_mbox_last_tx_done,
+ .peek_data = altera_mbox_peek_data,
+};
+
+static int altera_mbox_probe(struct platform_device *pdev)
+{
+ struct altera_mbox *mbox;
+ struct resource *regs;
+ struct mbox_chan *chans;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocated one channel */
+ chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mbox->mbox_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mbox->mbox_base))
+ return PTR_ERR(mbox->mbox_base);
+
+ /* Check is it a sender or receiver? */
+ mbox->is_sender = altera_mbox_is_sender(mbox);
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq >= 0)
+ mbox->intr_mode = true;
+
+ mbox->dev = &pdev->dev;
+
+ /* Hardware supports only one channel. */
+ chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = chans;
+ mbox->controller.ops = &altera_mbox_ops;
+
+ if (mbox->is_sender) {
+ if (mbox->intr_mode) {
+ mbox->controller.txdone_irq = true;
+ } else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = MBOX_POLLING_MS;
+ }
+ }
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Register mailbox failed\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+err:
+ return ret;
+}
+
+static int altera_mbox_remove(struct platform_device *pdev)
+{
+ struct altera_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (!mbox)
+ return -EINVAL;
+
+ mbox_controller_unregister(&mbox->controller);
+
+ return 0;
+}
+
+static const struct of_device_id altera_mbox_match[] = {
+ { .compatible = "altr,mailbox-1.0" },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, altera_mbox_match);
+
+static struct platform_driver altera_mbox_driver = {
+ .probe = altera_mbox_probe,
+ .remove = altera_mbox_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = altera_mbox_match,
+ },
+};
+
+module_platform_driver(altera_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Altera mailbox specific functions");
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_ALIAS("platform:altera-mailbox");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 59aad4d5da53..19b491d2964f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -87,7 +87,7 @@ static void msg_submit(struct mbox_chan *chan)
exit:
spin_unlock_irqrestore(&chan->lock, flags);
- if (!err && chan->txdone_method == TXDONE_BY_POLL)
+ if (!err && (chan->txdone_method & TXDONE_BY_POLL))
poll_txdone((unsigned long)chan->mbox);
}
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 6dbf6fcbdfaf..977c814cdf6f 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -386,16 +386,16 @@ static int __init pcc_init(void)
ret = acpi_pcc_probe();
if (ret) {
- pr_err("ACPI PCC probe failed.\n");
+ pr_debug("ACPI PCC probe failed.\n");
return -ENODEV;
}
pcc_pdev = platform_create_bundle(&pcc_mbox_driver,
pcc_mbox_probe, NULL, 0, NULL, 0);
- if (!pcc_pdev) {
- pr_err("Err creating PCC platform bundle\n");
- return -ENODEV;
+ if (IS_ERR(pcc_pdev)) {
+ pr_debug("Err creating PCC platform bundle\n");
+ return PTR_ERR(pcc_pdev);
}
return 0;
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
index f956ef26c0ce..fb7493dcfb79 100644
--- a/drivers/mcb/mcb-internal.h
+++ b/drivers/mcb/mcb-internal.h
@@ -7,6 +7,7 @@
#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45
#define CHAMELEON_FILENAME_LEN 12
#define CHAMELEONV2_MAGIC 0xabce
+#define CHAM_HEADER_SIZE 0x200
enum chameleon_descriptor_type {
CHAMELEON_DTYPE_GENERAL = 0x0,
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index b59181965643..0af7361e377f 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -17,6 +17,7 @@
struct priv {
struct mcb_bus *bus;
+ phys_addr_t mapbase;
void __iomem *base;
};
@@ -31,8 +32,8 @@ static int mcb_pci_get_irq(struct mcb_device *mdev)
static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct resource *res;
struct priv *priv;
- phys_addr_t mapbase;
int ret;
int num_cells;
unsigned long flags;
@@ -47,23 +48,25 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- mapbase = pci_resource_start(pdev, 0);
- if (!mapbase) {
+ priv->mapbase = pci_resource_start(pdev, 0);
+ if (!priv->mapbase) {
dev_err(&pdev->dev, "No PCI resource\n");
- goto err_start;
+ goto out_disable;
}
- ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request PCI BARs\n");
- goto err_start;
+ res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
+ KBUILD_MODNAME);
+ if (IS_ERR(res)) {
+ dev_err(&pdev->dev, "Failed to request PCI memory\n");
+ ret = PTR_ERR(res);
+ goto out_disable;
}
- priv->base = pci_iomap(pdev, 0, 0);
+ priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
- goto err_ioremap;
+ goto out_release;
}
flags = pci_resource_flags(pdev, 0);
@@ -71,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = -ENOTSUPP;
dev_err(&pdev->dev,
"IO mapped PCI devices are not supported\n");
- goto err_ioremap;
+ goto out_release;
}
pci_set_drvdata(pdev, priv);
@@ -79,25 +82,27 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
priv->bus = mcb_alloc_bus(&pdev->dev);
if (IS_ERR(priv->bus)) {
ret = PTR_ERR(priv->bus);
- goto err_drvdata;
+ goto out_iounmap;
}
priv->bus->get_irq = mcb_pci_get_irq;
- ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
+ ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
if (ret < 0)
- goto err_drvdata;
+ goto out_iounmap;
num_cells = ret;
dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
mcb_bus_add_devices(priv->bus);
-err_drvdata:
- pci_iounmap(pdev, priv->base);
-err_ioremap:
+ return 0;
+
+out_iounmap:
+ iounmap(priv->base);
+out_release:
pci_release_region(pdev, 0);
-err_start:
+out_disable:
pci_disable_device(pdev);
return ret;
}
@@ -107,6 +112,10 @@ static void mcb_pci_remove(struct pci_dev *pdev)
struct priv *priv = pci_get_drvdata(pdev);
mcb_release_bus(priv->bus);
+
+ iounmap(priv->base);
+ release_region(priv->mapbase, CHAM_HEADER_SIZE);
+ pci_disable_device(pdev);
}
static const struct pci_device_id mcb_pci_tbl[] = {
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 5bdedf6df153..c39644478aa4 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -5,6 +5,7 @@
menuconfig MD
bool "Multiple devices driver support (RAID and LVM)"
depends on BLOCK
+ select SRCU
help
Support multiple physical spindles through a single logical device.
Required for RAID and logical volume management.
@@ -230,9 +231,8 @@ config DM_CRYPT
transparently encrypts the data on it. You'll need to activate
the ciphers you're going to use in the cryptoapi configuration.
- Information on how to use dm-crypt can be found on
-
- <http://www.saout.de/misc/dm-crypt/>
+ For further information on dm-crypt and userspace tools see:
+ <http://code.google.com/p/cryptsetup/wiki/DMCrypt>
To compile this code as a module, choose M here: the module will
be called dm-crypt.
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index da3604e73e8a..3a5767968ba0 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -72,6 +72,19 @@ __acquires(bitmap->lock)
/* this page has not been allocated yet */
spin_unlock_irq(&bitmap->lock);
+ /* It is possible that this is being called inside a
+ * prepare_to_wait/finish_wait loop from raid5c:make_request().
+ * In general it is not permitted to sleep in that context as it
+ * can cause the loop to spin freely.
+ * That doesn't apply here as we can only reach this point
+ * once with any loop.
+ * When this function completes, either bp[page].map or
+ * bp[page].hijacked. In either case, this function will
+ * abort before getting to this point again. So there is
+ * no risk of a free-spin, and so it is safe to assert
+ * that sleeping here is allowed.
+ */
+ sched_annotate_sleep();
mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
spin_lock_irq(&bitmap->lock);
@@ -1606,7 +1619,9 @@ void bitmap_destroy(struct mddev *mddev)
return;
mutex_lock(&mddev->bitmap_info.mutex);
+ spin_lock(&mddev->lock);
mddev->bitmap = NULL; /* disconnect from the md device */
+ spin_unlock(&mddev->lock);
mutex_unlock(&mddev->bitmap_info.mutex);
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
@@ -2196,11 +2211,13 @@ __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t can_clear_show(struct mddev *mddev, char *page)
{
int len;
+ spin_lock(&mddev->lock);
if (mddev->bitmap)
len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
"false" : "true"));
else
len = sprintf(page, "\n");
+ spin_unlock(&mddev->lock);
return len;
}
@@ -2225,10 +2242,15 @@ __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
static ssize_t
behind_writes_used_show(struct mddev *mddev, char *page)
{
+ ssize_t ret;
+ spin_lock(&mddev->lock);
if (mddev->bitmap == NULL)
- return sprintf(page, "0\n");
- return sprintf(page, "%lu\n",
- mddev->bitmap->behind_writes_used);
+ ret = sprintf(page, "0\n");
+ else
+ ret = sprintf(page, "%lu\n",
+ mddev->bitmap->behind_writes_used);
+ spin_unlock(&mddev->lock);
+ return ret;
}
static ssize_t
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c33b49792b87..86dbbc737402 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -11,6 +11,7 @@
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <linux/vmalloc.h>
#include <linux/shrinker.h>
#include <linux/module.h>
@@ -1739,7 +1740,7 @@ static unsigned get_max_age_hz(void)
static bool older_than(struct dm_buffer *b, unsigned long age_hz)
{
- return (jiffies - b->last_accessed) >= age_hz;
+ return time_after_eq(jiffies, b->last_accessed + age_hz);
}
static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 9fc616c2755e..c1c010498a21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -94,6 +94,9 @@ struct cache_disk_superblock {
} __packed;
struct dm_cache_metadata {
+ atomic_t ref_count;
+ struct list_head list;
+
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
/*----------------------------------------------------------------*/
-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
- sector_t data_block_size,
- bool may_format_device,
- size_t policy_hint_size)
+static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
{
int r;
struct dm_cache_metadata *cmd;
@@ -680,9 +683,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
DMERR("could not allocate metadata struct");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ atomic_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
@@ -705,10 +709,96 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
return cmd;
}
+/*
+ * We keep a little list of ref counted metadata objects to prevent two
+ * different target instances creating separate bufio instances. This is
+ * an issue if a table is reloaded before the suspend.
+ */
+static DEFINE_MUTEX(table_lock);
+static LIST_HEAD(table);
+
+static struct dm_cache_metadata *lookup(struct block_device *bdev)
+{
+ struct dm_cache_metadata *cmd;
+
+ list_for_each_entry(cmd, &table, list)
+ if (cmd->bdev == bdev) {
+ atomic_inc(&cmd->ref_count);
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ struct dm_cache_metadata *cmd, *cmd2;
+
+ mutex_lock(&table_lock);
+ cmd = lookup(bdev);
+ mutex_unlock(&table_lock);
+
+ if (cmd)
+ return cmd;
+
+ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+ if (!IS_ERR(cmd)) {
+ mutex_lock(&table_lock);
+ cmd2 = lookup(bdev);
+ if (cmd2) {
+ mutex_unlock(&table_lock);
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+ return cmd2;
+ }
+ list_add(&cmd->list, &table);
+ mutex_unlock(&table_lock);
+ }
+
+ return cmd;
+}
+
+static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
+{
+ if (cmd->data_block_size != data_block_size) {
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+ (unsigned long long) data_block_size,
+ (unsigned long long) cmd->data_block_size);
+ return false;
+ }
+
+ return true;
+}
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+ may_format_device, policy_hint_size);
+
+ if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+ dm_cache_metadata_close(cmd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cmd;
+}
+
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
- __destroy_persistent_data_objects(cmd);
- kfree(cmd);
+ if (atomic_dec_and_test(&cmd->ref_count)) {
+ mutex_lock(&table_lock);
+ list_del(&cmd->list);
+ mutex_unlock(&table_lock);
+
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+ }
}
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1e96d7889f51..7755af351867 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -11,6 +11,7 @@
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
+#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
@@ -221,7 +222,13 @@ struct cache {
struct list_head need_commit_migrations;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
- atomic_t nr_migrations;
+ atomic_t nr_allocated_migrations;
+
+ /*
+ * The number of in flight migrations that are performing
+ * background io. eg, promotion, writeback.
+ */
+ atomic_t nr_io_migrations;
wait_queue_head_t quiescing_wait;
atomic_t quiescing;
@@ -258,7 +265,6 @@ struct cache {
struct dm_deferred_set *all_io_ds;
mempool_t *migration_pool;
- struct dm_cache_migration *next_migration;
struct dm_cache_policy *policy;
unsigned policy_nr_args;
@@ -350,10 +356,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
dm_bio_prison_free_cell(cache->prison, cell);
}
+static struct dm_cache_migration *alloc_migration(struct cache *cache)
+{
+ struct dm_cache_migration *mg;
+
+ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ if (mg) {
+ mg->cache = cache;
+ atomic_inc(&mg->cache->nr_allocated_migrations);
+ }
+
+ return mg;
+}
+
+static void free_migration(struct dm_cache_migration *mg)
+{
+ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
+ wake_up(&mg->cache->migration_wait);
+
+ mempool_free(mg, mg->cache->migration_pool);
+}
+
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
{
if (!p->mg) {
- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ p->mg = alloc_migration(cache);
if (!p->mg)
return -ENOMEM;
}
@@ -382,7 +409,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
free_prison_cell(cache, p->cell1);
if (p->mg)
- mempool_free(p->mg, cache->migration_pool);
+ free_migration(p->mg);
}
static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
@@ -854,24 +881,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
* Migration covers moving data from the origin device to the cache, or
* vice versa.
*--------------------------------------------------------------*/
-static void free_migration(struct dm_cache_migration *mg)
-{
- mempool_free(mg, mg->cache->migration_pool);
-}
-
-static void inc_nr_migrations(struct cache *cache)
+static void inc_io_migrations(struct cache *cache)
{
- atomic_inc(&cache->nr_migrations);
+ atomic_inc(&cache->nr_io_migrations);
}
-static void dec_nr_migrations(struct cache *cache)
+static void dec_io_migrations(struct cache *cache)
{
- atomic_dec(&cache->nr_migrations);
-
- /*
- * Wake the worker in case we're suspending the target.
- */
- wake_up(&cache->migration_wait);
+ atomic_dec(&cache->nr_io_migrations);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -894,11 +911,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
wake_worker(cache);
}
-static void cleanup_migration(struct dm_cache_migration *mg)
+static void free_io_migration(struct dm_cache_migration *mg)
{
- struct cache *cache = mg->cache;
+ dec_io_migrations(mg->cache);
free_migration(mg);
- dec_nr_migrations(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
@@ -923,7 +939,7 @@ static void migration_failure(struct dm_cache_migration *mg)
cell_defer(cache, mg->new_ocell, true);
}
- cleanup_migration(mg);
+ free_io_migration(mg);
}
static void migration_success_pre_commit(struct dm_cache_migration *mg)
@@ -934,7 +950,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
if (mg->writeback) {
clear_dirty(cache, mg->old_oblock, mg->cblock);
cell_defer(cache, mg->old_ocell, false);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
} else if (mg->demote) {
@@ -944,14 +960,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
mg->old_oblock);
if (mg->promote)
cell_defer(cache, mg->new_ocell, true);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
}
} else {
if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
policy_remove_mapping(cache->policy, mg->new_oblock);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
}
}
@@ -984,7 +1000,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
} else {
if (mg->invalidate)
policy_remove_mapping(cache->policy, mg->old_oblock);
- cleanup_migration(mg);
+ free_io_migration(mg);
}
} else {
@@ -999,7 +1015,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
bio_endio(mg->new_ocell->holder, 0);
cell_defer(cache, mg->new_ocell, false);
}
- cleanup_migration(mg);
+ free_io_migration(mg);
}
}
@@ -1251,7 +1267,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
mg->new_ocell = cell;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1275,7 +1291,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
mg->new_ocell = NULL;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1302,7 +1318,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
mg->new_ocell = new_ocell;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1330,7 +1346,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
mg->new_ocell = NULL;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1412,7 +1428,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
static bool spare_migration_bandwidth(struct cache *cache)
{
- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
cache->sectors_per_block;
return current_volume < cache->migration_threshold;
}
@@ -1547,8 +1563,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
static int need_commit_due_to_time(struct cache *cache)
{
- return jiffies < cache->last_commit_jiffies ||
- jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
+ return !time_in_range(jiffies, cache->last_commit_jiffies,
+ cache->last_commit_jiffies + COMMIT_PERIOD);
}
static int commit_if_needed(struct cache *cache)
@@ -1764,7 +1780,7 @@ static void stop_quiescing(struct cache *cache)
static void wait_for_migrations(struct cache *cache)
{
- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
}
static void stop_worker(struct cache *cache)
@@ -1876,9 +1892,6 @@ static void destroy(struct cache *cache)
{
unsigned i;
- if (cache->next_migration)
- mempool_free(cache->next_migration, cache->migration_pool);
-
if (cache->migration_pool)
mempool_destroy(cache->migration_pool);
@@ -2424,7 +2437,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
- atomic_set(&cache->nr_migrations, 0);
+ atomic_set(&cache->nr_allocated_migrations, 0);
+ atomic_set(&cache->nr_io_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
init_waitqueue_head(&cache->quiescing_wait);
@@ -2487,8 +2501,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- cache->next_migration = NULL;
-
cache->need_tick_bio = true;
cache->sized = false;
cache->invalidate = false;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 73f791bb9ea4..c8a18e4ee9dc 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -639,8 +639,8 @@ static int check_name(const char *name)
/*
* On successful return, the caller must not attempt to acquire
- * _hash_lock without first calling dm_table_put, because dm_table_destroy
- * waits for this dm_table_put and could be called under this lock.
+ * _hash_lock without first calling dm_put_live_table, because dm_table_destroy
+ * waits for this dm_put_live_table and could be called under this lock.
*/
static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx)
{
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index b953db6cc229..03177ca0b009 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -6,6 +6,7 @@
#include <linux/bio.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <linux/dm-dirty-log.h>
#include <linux/device-mapper.h>
#include <linux/dm-log-userspace.h>
@@ -829,7 +830,7 @@ static int userspace_is_remote_recovering(struct dm_dirty_log *log,
int r;
uint64_t region64 = region;
struct log_c *lc = log->context;
- static unsigned long long limit;
+ static unsigned long limit;
struct {
int64_t is_recovering;
uint64_t in_sync_hint;
@@ -845,7 +846,7 @@ static int userspace_is_remote_recovering(struct dm_dirty_log *log,
*/
if (region < lc->in_sync_hint)
return 0;
- else if (jiffies < limit)
+ else if (time_after(limit, jiffies))
return 1;
limit = jiffies + (HZ / 4);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7b6b0f0f831a..d376dc87716e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -11,6 +11,7 @@
#include "dm-path-selector.h"
#include "dm-uevent.h"
+#include <linux/blkdev.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/mempool.h>
@@ -378,18 +379,18 @@ static int __must_push_back(struct multipath *m)
/*
* Map cloned requests
*/
-static int multipath_map(struct dm_target *ti, struct request *clone,
- union map_info *map_context)
+static int __multipath_map(struct dm_target *ti, struct request *clone,
+ union map_info *map_context,
+ struct request *rq, struct request **__clone)
{
struct multipath *m = (struct multipath *) ti->private;
int r = DM_MAPIO_REQUEUE;
- size_t nr_bytes = blk_rq_bytes(clone);
- unsigned long flags;
+ size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
struct pgpath *pgpath;
struct block_device *bdev;
struct dm_mpath_io *mpio;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
/* Do we need to select a new pgpath? */
if (!m->current_pgpath ||
@@ -411,25 +412,61 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
/* ENOMEM, requeue */
goto out_unlock;
- bdev = pgpath->path.dev->bdev;
- clone->q = bdev_get_queue(bdev);
- clone->rq_disk = bdev->bd_disk;
- clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
mpio = map_context->ptr;
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
+
+ bdev = pgpath->path.dev->bdev;
+
+ spin_unlock_irq(&m->lock);
+
+ if (clone) {
+ /* Old request-based interface: allocated clone is passed in */
+ clone->q = bdev_get_queue(bdev);
+ clone->rq_disk = bdev->bd_disk;
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ } else {
+ /* blk-mq request-based interface */
+ *__clone = blk_get_request(bdev_get_queue(bdev),
+ rq_data_dir(rq), GFP_KERNEL);
+ if (IS_ERR(*__clone))
+ /* ENOMEM, requeue */
+ return r;
+ (*__clone)->bio = (*__clone)->biotail = NULL;
+ (*__clone)->rq_disk = bdev->bd_disk;
+ (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ }
+
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
&pgpath->path,
nr_bytes);
- r = DM_MAPIO_REMAPPED;
+ return DM_MAPIO_REMAPPED;
out_unlock:
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
return r;
}
+static int multipath_map(struct dm_target *ti, struct request *clone,
+ union map_info *map_context)
+{
+ return __multipath_map(ti, clone, map_context, NULL, NULL);
+}
+
+static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
+ union map_info *map_context,
+ struct request **clone)
+{
+ return __multipath_map(ti, NULL, map_context, rq, clone);
+}
+
+static void multipath_release_clone(struct request *clone)
+{
+ blk_put_request(clone);
+}
+
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
@@ -1666,11 +1703,13 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 7, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
.map_rq = multipath_map,
+ .clone_and_map_rq = multipath_clone_and_map,
+ .release_clone_rq = multipath_release_clone,
.rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend,
.postsuspend = multipath_postsuspend,
@@ -1694,16 +1733,15 @@ static int __init dm_multipath_init(void)
r = dm_register_target(&multipath_target);
if (r < 0) {
DMERR("register failed %d", r);
- kmem_cache_destroy(_mpio_cache);
- return -EINVAL;
+ r = -EINVAL;
+ goto bad_register_target;
}
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
- dm_unregister_target(&multipath_target);
- kmem_cache_destroy(_mpio_cache);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto bad_alloc_kmultipathd;
}
/*
@@ -1716,16 +1754,23 @@ static int __init dm_multipath_init(void)
WQ_MEM_RECLAIM);
if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd");
- destroy_workqueue(kmultipathd);
- dm_unregister_target(&multipath_target);
- kmem_cache_destroy(_mpio_cache);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto bad_alloc_kmpath_handlerd;
}
DMINFO("version %u.%u.%u loaded",
multipath_target.version[0], multipath_target.version[1],
multipath_target.version[2]);
+ return 0;
+
+bad_alloc_kmpath_handlerd:
+ destroy_workqueue(kmultipathd);
+bad_alloc_kmultipathd:
+ dm_unregister_target(&multipath_target);
+bad_register_target:
+ kmem_cache_destroy(_mpio_cache);
+
return r;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 07c0fa0fa284..88e4c7f24986 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -746,13 +746,7 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
- if (rs->raid_type->level == 1)
- return md_raid1_congested(&rs->md, bits);
-
- if (rs->raid_type->level == 10)
- return md_raid10_congested(&rs->md, bits);
-
- return md_raid5_congested(&rs->md, bits);
+ return mddev_congested(&rs->md, bits);
}
/*
@@ -1243,7 +1237,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
argv++;
/* Skip over RAID params for now and find out # of devices */
- if (num_raid_params + 1 > argc) {
+ if (num_raid_params >= argc) {
ti->error = "Arguments do not agree with counts given";
return -EINVAL;
}
@@ -1254,6 +1248,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
return -EINVAL;
}
+ argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
+ if (argc != (num_raid_devs * 2)) {
+ ti->error = "Supplied RAID devices does not match the count given";
+ return -EINVAL;
+ }
+
rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
if (IS_ERR(rs))
return PTR_ERR(rs);
@@ -1262,16 +1262,8 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (ret)
goto bad;
- ret = -EINVAL;
-
- argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
argv += num_raid_params + 1;
- if (argc != (num_raid_devs * 2)) {
- ti->error = "Supplied RAID devices does not match the count given";
- goto bad;
- }
-
ret = dev_parms(rs, argv);
if (ret)
goto bad;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index d6e88178d22c..808b8419bc48 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -200,16 +200,11 @@ err_area:
static void free_area(struct pstore *ps)
{
- if (ps->area)
- vfree(ps->area);
+ vfree(ps->area);
ps->area = NULL;
-
- if (ps->zero_area)
- vfree(ps->zero_area);
+ vfree(ps->zero_area);
ps->zero_area = NULL;
-
- if (ps->header_area)
- vfree(ps->header_area);
+ vfree(ps->header_area);
ps->header_area = NULL;
}
@@ -605,8 +600,7 @@ static void persistent_dtr(struct dm_exception_store *store)
free_area(ps);
/* Allocated in persistent_read_metadata */
- if (ps->callbacks)
- vfree(ps->callbacks);
+ vfree(ps->callbacks);
kfree(ps);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3afae9e062f8..6554d9148927 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -827,10 +827,11 @@ static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
+ bool use_blk_mq = false;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices;
- unsigned live_md_type;
+ unsigned live_md_type = dm_get_md_type(t->md);
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
@@ -854,8 +855,8 @@ static int dm_table_set_type(struct dm_table *t)
* Determine the type from the live device.
* Default to bio-based if device is new.
*/
- live_md_type = dm_get_md_type(t->md);
- if (live_md_type == DM_TYPE_REQUEST_BASED)
+ if (live_md_type == DM_TYPE_REQUEST_BASED ||
+ live_md_type == DM_TYPE_MQ_REQUEST_BASED)
request_based = 1;
else
bio_based = 1;
@@ -869,16 +870,6 @@ static int dm_table_set_type(struct dm_table *t)
BUG_ON(!request_based); /* No targets in this table */
- /* Non-request-stackable devices can't be used for request-based dm */
- devices = dm_table_get_devices(t);
- list_for_each_entry(dd, devices, list) {
- if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev->bdev))) {
- DMWARN("table load rejected: including"
- " non-request-stackable devices");
- return -EINVAL;
- }
- }
-
/*
* Request-based dm supports only tables that have a single target now.
* To support multiple targets, request splitting support is needed,
@@ -890,7 +881,37 @@ static int dm_table_set_type(struct dm_table *t)
return -EINVAL;
}
- t->type = DM_TYPE_REQUEST_BASED;
+ /* Non-request-stackable devices can't be used for request-based dm */
+ devices = dm_table_get_devices(t);
+ list_for_each_entry(dd, devices, list) {
+ struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
+
+ if (!blk_queue_stackable(q)) {
+ DMERR("table load rejected: including"
+ " non-request-stackable devices");
+ return -EINVAL;
+ }
+
+ if (q->mq_ops)
+ use_blk_mq = true;
+ }
+
+ if (use_blk_mq) {
+ /* verify _all_ devices in the table are blk-mq devices */
+ list_for_each_entry(dd, devices, list)
+ if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
+ DMERR("table load rejected: not all devices"
+ " are blk-mq request-stackable");
+ return -EINVAL;
+ }
+ t->type = DM_TYPE_MQ_REQUEST_BASED;
+
+ } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
+ /* inherit live MD type */
+ t->type = live_md_type;
+
+ } else
+ t->type = DM_TYPE_REQUEST_BASED;
return 0;
}
@@ -907,7 +928,15 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
bool dm_table_request_based(struct dm_table *t)
{
- return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
+ unsigned table_type = dm_table_get_type(t);
+
+ return (table_type == DM_TYPE_REQUEST_BASED ||
+ table_type == DM_TYPE_MQ_REQUEST_BASED);
+}
+
+bool dm_table_mq_request_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
}
static int dm_table_alloc_md_mempools(struct dm_table *t)
@@ -1360,6 +1389,14 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}
+static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags);
+}
+
static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
{
@@ -1480,6 +1517,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+ if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps))
+ queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q);
+
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 242e3cec397a..925ec1b15e75 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -137,13 +137,26 @@ static int io_err_map_rq(struct dm_target *ti, struct request *clone,
return -EIO;
}
+static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
+ union map_info *map_context,
+ struct request **clone)
+{
+ return -EIO;
+}
+
+static void io_err_release_clone_rq(struct request *clone)
+{
+}
+
static struct target_type error_target = {
.name = "error",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
.map_rq = io_err_map_rq,
+ .clone_and_map_rq = io_err_clone_and_map_rq,
+ .release_clone_rq = io_err_release_clone_rq,
};
int __init dm_target_init(void)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 43adbb863f5a..79f694120ddf 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1635,15 +1635,6 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
return r;
}
-int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result)
-{
- down_read(&pmd->root_lock);
- *result = pmd->data_block_size;
- up_read(&pmd->root_lock);
-
- return 0;
-}
-
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 921d15ee56a0..fac01a96d303 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -182,8 +182,6 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
dm_block_t *result);
-int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
-
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 493478989dbd..654773cb1eee 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -11,6 +11,7 @@
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
+#include <linux/jiffies.h>
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/rculist.h>
@@ -1700,8 +1701,8 @@ static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell
*/
static int need_commit_due_to_time(struct pool *pool)
{
- return jiffies < pool->last_commit_jiffies ||
- jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
+ return !time_in_range(jiffies, pool->last_commit_jiffies,
+ pool->last_commit_jiffies + COMMIT_PERIOD);
}
#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
@@ -3385,6 +3386,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+ dm_device_name(pool->pool_md));
+ return -EINVAL;
+ }
+
if (!strcasecmp(argv[0], "create_thin"))
r = process_create_thin_mesg(argc, argv, pool);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b98cd9d84435..ec1444f49de1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -20,6 +20,7 @@
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/wait.h>
+#include <linux/kthread.h>
#include <trace/events/block.h>
@@ -78,7 +79,8 @@ struct dm_io {
struct dm_rq_target_io {
struct mapped_device *md;
struct dm_target *ti;
- struct request *orig, clone;
+ struct request *orig, *clone;
+ struct kthread_work work;
int error;
union map_info info;
};
@@ -179,6 +181,7 @@ struct mapped_device {
* io objects are allocated from here.
*/
mempool_t *io_pool;
+ mempool_t *rq_pool;
struct bio_set *bs;
@@ -206,7 +209,13 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
+ /* the number of internal suspends */
+ unsigned internal_suspend_count;
+
struct dm_stats stats;
+
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
};
/*
@@ -214,6 +223,7 @@ struct mapped_device {
*/
struct dm_md_mempools {
mempool_t *io_pool;
+ mempool_t *rq_pool;
struct bio_set *bs;
};
@@ -228,6 +238,7 @@ struct table_device {
#define RESERVED_MAX_IOS 1024
static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
+static struct kmem_cache *_rq_cache;
/*
* Bio-based DM's mempools' reserved IOs set by the user.
@@ -285,9 +296,14 @@ static int __init local_init(void)
if (!_rq_tio_cache)
goto out_free_io_cache;
+ _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
+ __alignof__(struct request), 0, NULL);
+ if (!_rq_cache)
+ goto out_free_rq_tio_cache;
+
r = dm_uevent_init();
if (r)
- goto out_free_rq_tio_cache;
+ goto out_free_rq_cache;
deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
if (!deferred_remove_workqueue) {
@@ -309,6 +325,8 @@ out_free_workqueue:
destroy_workqueue(deferred_remove_workqueue);
out_uevent_exit:
dm_uevent_exit();
+out_free_rq_cache:
+ kmem_cache_destroy(_rq_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
out_free_io_cache:
@@ -322,6 +340,7 @@ static void local_exit(void)
flush_scheduled_work();
destroy_workqueue(deferred_remove_workqueue);
+ kmem_cache_destroy(_rq_cache);
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
@@ -574,6 +593,17 @@ static void free_rq_tio(struct dm_rq_target_io *tio)
mempool_free(tio, tio->md->io_pool);
}
+static struct request *alloc_clone_request(struct mapped_device *md,
+ gfp_t gfp_mask)
+{
+ return mempool_alloc(md->rq_pool, gfp_mask);
+}
+
+static void free_clone_request(struct mapped_device *md, struct request *rq)
+{
+ mempool_free(rq, md->rq_pool);
+}
+
static int md_in_flight(struct mapped_device *md)
{
return atomic_read(&md->pending[READ]) +
@@ -989,7 +1019,7 @@ static void end_clone_bio(struct bio *clone, int error)
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
-static void rq_completed(struct mapped_device *md, int rw, int run_queue)
+static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
atomic_dec(&md->pending[rw]);
@@ -1017,12 +1047,17 @@ static void free_rq_clone(struct request *clone)
struct dm_rq_target_io *tio = clone->end_io_data;
blk_rq_unprep_clone(clone);
+ if (clone->q && clone->q->mq_ops)
+ tio->ti->type->release_clone_rq(clone);
+ else
+ free_clone_request(tio->md, clone);
free_rq_tio(tio);
}
/*
* Complete the clone and the original request.
- * Must be called without queue lock.
+ * Must be called without clone's queue lock held,
+ * see end_clone_request() for more details.
*/
static void dm_end_request(struct request *clone, int error)
{
@@ -1051,23 +1086,23 @@ static void dm_end_request(struct request *clone, int error)
static void dm_unprep_request(struct request *rq)
{
- struct request *clone = rq->special;
+ struct dm_rq_target_io *tio = rq->special;
+ struct request *clone = tio->clone;
rq->special = NULL;
rq->cmd_flags &= ~REQ_DONTPREP;
- free_rq_clone(clone);
+ if (clone)
+ free_rq_clone(clone);
}
/*
* Requeue the original request of a clone.
*/
-void dm_requeue_unmapped_request(struct request *clone)
+static void dm_requeue_unmapped_original_request(struct mapped_device *md,
+ struct request *rq)
{
- int rw = rq_data_dir(clone);
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
- struct request *rq = tio->orig;
+ int rw = rq_data_dir(rq);
struct request_queue *q = rq->q;
unsigned long flags;
@@ -1077,9 +1112,15 @@ void dm_requeue_unmapped_request(struct request *clone)
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
- rq_completed(md, rw, 0);
+ rq_completed(md, rw, false);
+}
+
+static void dm_requeue_unmapped_request(struct request *clone)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ dm_requeue_unmapped_original_request(tio->md, tio->orig);
}
-EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
static void __stop_queue(struct request_queue *q)
{
@@ -1148,8 +1189,15 @@ static void dm_done(struct request *clone, int error, bool mapped)
static void dm_softirq_done(struct request *rq)
{
bool mapped = true;
- struct request *clone = rq->completion_data;
- struct dm_rq_target_io *tio = clone->end_io_data;
+ struct dm_rq_target_io *tio = rq->special;
+ struct request *clone = tio->clone;
+
+ if (!clone) {
+ blk_end_request_all(rq, tio->error);
+ rq_completed(tio->md, rq_data_dir(rq), false);
+ free_rq_tio(tio);
+ return;
+ }
if (rq->cmd_flags & REQ_FAILED)
mapped = false;
@@ -1161,13 +1209,11 @@ static void dm_softirq_done(struct request *rq)
* Complete the clone and the original request with the error status
* through softirq context.
*/
-static void dm_complete_request(struct request *clone, int error)
+static void dm_complete_request(struct request *rq, int error)
{
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct request *rq = tio->orig;
+ struct dm_rq_target_io *tio = rq->special;
tio->error = error;
- rq->completion_data = clone;
blk_complete_request(rq);
}
@@ -1175,40 +1221,40 @@ static void dm_complete_request(struct request *clone, int error)
* Complete the not-mapped clone and the original request with the error status
* through softirq context.
* Target's rq_end_io() function isn't called.
- * This may be used when the target's map_rq() function fails.
+ * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
*/
-void dm_kill_unmapped_request(struct request *clone, int error)
+static void dm_kill_unmapped_request(struct request *rq, int error)
{
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct request *rq = tio->orig;
-
rq->cmd_flags |= REQ_FAILED;
- dm_complete_request(clone, error);
+ dm_complete_request(rq, error);
}
-EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
/*
- * Called with the queue lock held
+ * Called with the clone's queue lock held
*/
static void end_clone_request(struct request *clone, int error)
{
- /*
- * For just cleaning up the information of the queue in which
- * the clone was dispatched.
- * The clone is *NOT* freed actually here because it is alloced from
- * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
- */
- __blk_put_request(clone->q, clone);
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ if (!clone->q->mq_ops) {
+ /*
+ * For just cleaning up the information of the queue in which
+ * the clone was dispatched.
+ * The clone is *NOT* freed actually here because it is alloced
+ * from dm own mempool (REQ_ALLOCED isn't set).
+ */
+ __blk_put_request(clone->q, clone);
+ }
/*
* Actual request completion is done in a softirq context which doesn't
- * hold the queue lock. Otherwise, deadlock could occur because:
+ * hold the clone's queue lock. Otherwise, deadlock could occur because:
* - another request may be submitted by the upper level driver
* of the stacking during the completion
* - the submission which requires queue lock may be done
- * against this queue
+ * against this clone's queue
*/
- dm_complete_request(clone, error);
+ dm_complete_request(tio->orig, error);
}
/*
@@ -1686,19 +1732,19 @@ static void dm_request(struct request_queue *q, struct bio *bio)
_dm_request(q, bio);
}
-void dm_dispatch_request(struct request *rq)
+static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
int r;
- if (blk_queue_io_stat(rq->q))
- rq->cmd_flags |= REQ_IO_STAT;
+ if (blk_queue_io_stat(clone->q))
+ clone->cmd_flags |= REQ_IO_STAT;
- rq->start_time = jiffies;
- r = blk_insert_cloned_request(rq->q, rq);
+ clone->start_time = jiffies;
+ r = blk_insert_cloned_request(clone->q, clone);
if (r)
+ /* must complete clone in terms of original request */
dm_complete_request(rq, r);
}
-EXPORT_SYMBOL_GPL(dm_dispatch_request);
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
@@ -1715,11 +1761,11 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
}
static int setup_clone(struct request *clone, struct request *rq,
- struct dm_rq_target_io *tio)
+ struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
int r;
- r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
+ r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
dm_rq_bio_constructor, tio);
if (r)
return r;
@@ -1730,14 +1776,37 @@ static int setup_clone(struct request *clone, struct request *rq,
clone->end_io = end_clone_request;
clone->end_io_data = tio;
+ tio->clone = clone;
+
return 0;
}
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
- gfp_t gfp_mask)
+ struct dm_rq_target_io *tio, gfp_t gfp_mask)
+{
+ struct request *clone = alloc_clone_request(md, gfp_mask);
+
+ if (!clone)
+ return NULL;
+
+ blk_rq_init(NULL, clone);
+ if (setup_clone(clone, rq, tio, gfp_mask)) {
+ /* -ENOMEM */
+ free_clone_request(md, clone);
+ return NULL;
+ }
+
+ return clone;
+}
+
+static void map_tio_request(struct kthread_work *work);
+
+static struct dm_rq_target_io *prep_tio(struct request *rq,
+ struct mapped_device *md, gfp_t gfp_mask)
{
- struct request *clone;
struct dm_rq_target_io *tio;
+ int srcu_idx;
+ struct dm_table *table;
tio = alloc_rq_tio(md, gfp_mask);
if (!tio)
@@ -1745,18 +1814,23 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
tio->md = md;
tio->ti = NULL;
+ tio->clone = NULL;
tio->orig = rq;
tio->error = 0;
memset(&tio->info, 0, sizeof(tio->info));
-
- clone = &tio->clone;
- if (setup_clone(clone, rq, tio)) {
- /* -ENOMEM */
- free_rq_tio(tio);
- return NULL;
+ init_kthread_work(&tio->work, map_tio_request);
+
+ table = dm_get_live_table(md, &srcu_idx);
+ if (!dm_table_mq_request_based(table)) {
+ if (!clone_rq(rq, md, tio, gfp_mask)) {
+ dm_put_live_table(md, srcu_idx);
+ free_rq_tio(tio);
+ return NULL;
+ }
}
+ dm_put_live_table(md, srcu_idx);
- return clone;
+ return tio;
}
/*
@@ -1765,18 +1839,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
static int dm_prep_fn(struct request_queue *q, struct request *rq)
{
struct mapped_device *md = q->queuedata;
- struct request *clone;
+ struct dm_rq_target_io *tio;
if (unlikely(rq->special)) {
DMWARN("Already has something in rq->special.");
return BLKPREP_KILL;
}
- clone = clone_rq(rq, md, GFP_ATOMIC);
- if (!clone)
+ tio = prep_tio(rq, md, GFP_ATOMIC);
+ if (!tio)
return BLKPREP_DEFER;
- rq->special = clone;
+ rq->special = tio;
rq->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
@@ -1784,17 +1858,36 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
/*
* Returns:
- * 0 : the request has been processed (not requeued)
- * !0 : the request has been requeued
+ * 0 : the request has been processed
+ * DM_MAPIO_REQUEUE : the original request needs to be requeued
+ * < 0 : the request was completed due to failure
*/
-static int map_request(struct dm_target *ti, struct request *clone,
+static int map_request(struct dm_target *ti, struct request *rq,
struct mapped_device *md)
{
- int r, requeued = 0;
- struct dm_rq_target_io *tio = clone->end_io_data;
+ int r;
+ struct dm_rq_target_io *tio = rq->special;
+ struct request *clone = NULL;
+
+ if (tio->clone) {
+ clone = tio->clone;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ } else {
+ r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
+ if (r < 0) {
+ /* The target wants to complete the I/O */
+ dm_kill_unmapped_request(rq, r);
+ return r;
+ }
+ if (IS_ERR(clone))
+ return DM_MAPIO_REQUEUE;
+ if (setup_clone(clone, rq, tio, GFP_KERNEL)) {
+ /* -ENOMEM */
+ ti->type->release_clone_rq(clone);
+ return DM_MAPIO_REQUEUE;
+ }
+ }
- tio->ti = ti;
- r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
@@ -1802,13 +1895,12 @@ static int map_request(struct dm_target *ti, struct request *clone,
case DM_MAPIO_REMAPPED:
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
- blk_rq_pos(tio->orig));
- dm_dispatch_request(clone);
+ blk_rq_pos(rq));
+ dm_dispatch_clone_request(clone, rq);
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_unmapped_request(clone);
- requeued = 1;
break;
default:
if (r > 0) {
@@ -1817,20 +1909,27 @@ static int map_request(struct dm_target *ti, struct request *clone,
}
/* The target wants to complete the I/O */
- dm_kill_unmapped_request(clone, r);
- break;
+ dm_kill_unmapped_request(rq, r);
+ return r;
}
- return requeued;
+ return 0;
}
-static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+static void map_tio_request(struct kthread_work *work)
{
- struct request *clone;
+ struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
+ struct request *rq = tio->orig;
+ struct mapped_device *md = tio->md;
+ if (map_request(tio->ti, rq, md) == DM_MAPIO_REQUEUE)
+ dm_requeue_unmapped_original_request(md, rq);
+}
+
+static void dm_start_request(struct mapped_device *md, struct request *orig)
+{
blk_start_request(orig);
- clone = orig->special;
- atomic_inc(&md->pending[rq_data_dir(clone)]);
+ atomic_inc(&md->pending[rq_data_dir(orig)]);
/*
* Hold the md reference here for the in-flight I/O.
@@ -1840,8 +1939,6 @@ static struct request *dm_start_request(struct mapped_device *md, struct request
* See the comment in rq_completed() too.
*/
dm_get(md);
-
- return clone;
}
/*
@@ -1854,7 +1951,8 @@ static void dm_request_fn(struct request_queue *q)
int srcu_idx;
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
struct dm_target *ti;
- struct request *rq, *clone;
+ struct request *rq;
+ struct dm_rq_target_io *tio;
sector_t pos;
/*
@@ -1876,34 +1974,29 @@ static void dm_request_fn(struct request_queue *q)
ti = dm_table_find_target(map, pos);
if (!dm_target_is_valid(ti)) {
/*
- * Must perform setup, that dm_done() requires,
+ * Must perform setup, that rq_completed() requires,
* before calling dm_kill_unmapped_request
*/
DMERR_LIMIT("request attempted access beyond the end of device");
- clone = dm_start_request(md, rq);
- dm_kill_unmapped_request(clone, -EIO);
+ dm_start_request(md, rq);
+ dm_kill_unmapped_request(rq, -EIO);
continue;
}
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
- clone = dm_start_request(md, rq);
-
- spin_unlock(q->queue_lock);
- if (map_request(ti, clone, md))
- goto requeued;
+ dm_start_request(md, rq);
+ tio = rq->special;
+ /* Establish tio->ti before queuing work (map_tio_request) */
+ tio->ti = ti;
+ queue_kthread_work(&md->kworker, &tio->work);
BUG_ON(!irqs_disabled());
- spin_lock(q->queue_lock);
}
goto out;
-requeued:
- BUG_ON(!irqs_disabled());
- spin_lock(q->queue_lock);
-
delay_and_out:
blk_delay_queue(q, HZ / 10);
out:
@@ -2089,6 +2182,7 @@ static struct mapped_device *alloc_dev(int minor)
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
+ md->kworker_task = NULL;
md->disk->major = _major;
md->disk->first_minor = minor;
@@ -2149,8 +2243,13 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md);
bdput(md->bdev);
destroy_workqueue(md->wq);
+
+ if (md->kworker_task)
+ kthread_stop(md->kworker_task);
if (md->io_pool)
mempool_destroy(md->io_pool);
+ if (md->rq_pool)
+ mempool_destroy(md->rq_pool);
if (md->bs)
bioset_free(md->bs);
blk_integrity_unregister(md->disk);
@@ -2184,23 +2283,24 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
bioset_free(md->bs);
md->bs = p->bs;
p->bs = NULL;
- } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
- /*
- * There's no need to reload with request-based dm
- * because the size of front_pad doesn't change.
- * Note for future: If you are to reload bioset,
- * prep-ed requests in the queue may refer
- * to bio from the old bioset, so you must walk
- * through the queue to unprep.
- */
}
+ /*
+ * There's no need to reload with request-based dm
+ * because the size of front_pad doesn't change.
+ * Note for future: If you are to reload bioset,
+ * prep-ed requests in the queue may refer
+ * to bio from the old bioset, so you must walk
+ * through the queue to unprep.
+ */
goto out;
}
- BUG_ON(!p || md->io_pool || md->bs);
+ BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
+ md->rq_pool = p->rq_pool;
+ p->rq_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
@@ -2403,6 +2503,14 @@ unsigned dm_get_md_type(struct mapped_device *md)
return md->type;
}
+static bool dm_md_type_request_based(struct mapped_device *md)
+{
+ unsigned table_type = dm_get_md_type(md);
+
+ return (table_type == DM_TYPE_REQUEST_BASED ||
+ table_type == DM_TYPE_MQ_REQUEST_BASED);
+}
+
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
return md->immutable_target_type;
@@ -2440,6 +2548,11 @@ static int dm_init_request_based_queue(struct mapped_device *md)
blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
+ /* Also initialize the request-based DM worker thread */
+ init_kthread_worker(&md->kworker);
+ md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
+ "kdmwork-%s", dm_device_name(md));
+
elv_register_queue(md->queue);
return 1;
@@ -2450,8 +2563,7 @@ static int dm_init_request_based_queue(struct mapped_device *md)
*/
int dm_setup_md_queue(struct mapped_device *md)
{
- if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
- !dm_init_request_based_queue(md)) {
+ if (dm_md_type_request_based(md) && !dm_init_request_based_queue(md)) {
DMWARN("Cannot initialize queue for request-based mapped device");
return -EINVAL;
}
@@ -2530,6 +2642,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
+ if (dm_request_based(md))
+ flush_kthread_worker(&md->kworker);
+
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
@@ -2773,8 +2888,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md))
+ if (dm_request_based(md)) {
stop_queue(md->queue);
+ flush_kthread_worker(&md->kworker);
+ }
flush_workqueue(md->wq);
@@ -2928,7 +3045,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
{
struct dm_table *map = NULL;
- if (dm_suspended_internally_md(md))
+ if (md->internal_suspend_count++)
return; /* nested internal suspend */
if (dm_suspended_md(md)) {
@@ -2953,7 +3070,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
static void __dm_internal_resume(struct mapped_device *md)
{
- if (!dm_suspended_internally_md(md))
+ BUG_ON(!md->internal_suspend_count);
+
+ if (--md->internal_suspend_count)
return; /* resume from nested internal suspend */
if (dm_suspended_md(md))
@@ -3118,24 +3237,35 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
{
struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
struct kmem_cache *cachep;
- unsigned int pool_size;
+ unsigned int pool_size = 0;
unsigned int front_pad;
if (!pools)
return NULL;
- if (type == DM_TYPE_BIO_BASED) {
+ switch (type) {
+ case DM_TYPE_BIO_BASED:
cachep = _io_cache;
pool_size = dm_get_reserved_bio_based_ios();
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
- } else if (type == DM_TYPE_REQUEST_BASED) {
- cachep = _rq_tio_cache;
+ break;
+ case DM_TYPE_REQUEST_BASED:
pool_size = dm_get_reserved_rq_based_ios();
+ pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
+ if (!pools->rq_pool)
+ goto out;
+ /* fall through to setup remaining rq-based pools */
+ case DM_TYPE_MQ_REQUEST_BASED:
+ cachep = _rq_tio_cache;
+ if (!pool_size)
+ pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_bio_data_size is not used. See __bind_mempools(). */
WARN_ON(per_bio_data_size != 0);
- } else
+ break;
+ default:
goto out;
+ }
pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
if (!pools->io_pool)
@@ -3164,6 +3294,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (pools->io_pool)
mempool_destroy(pools->io_pool);
+ if (pools->rq_pool)
+ mempool_destroy(pools->rq_pool);
+
if (pools->bs)
bioset_free(pools->bs);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 84b0f9e4ba6c..59f53e79db82 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -34,9 +34,10 @@
/*
* Type of table and mapped_device's mempool
*/
-#define DM_TYPE_NONE 0
-#define DM_TYPE_BIO_BASED 1
-#define DM_TYPE_REQUEST_BASED 2
+#define DM_TYPE_NONE 0
+#define DM_TYPE_BIO_BASED 1
+#define DM_TYPE_REQUEST_BASED 2
+#define DM_TYPE_MQ_REQUEST_BASED 3
/*
* List of devices that a metadevice uses and should open/close.
@@ -73,6 +74,7 @@ int dm_table_any_busy_target(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
+bool dm_table_mq_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
@@ -99,7 +101,8 @@ int dm_setup_md_queue(struct mapped_device *md);
/*
* To check whether the target type is request-based or not (bio-based).
*/
-#define dm_target_request_based(t) ((t)->type->map_rq != NULL)
+#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \
+ ((t)->type->clone_and_map_rq != NULL))
/*
* To check whether the target type is a hybrid (capable of being
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index e8b4574956c7..1277eb26b58a 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -332,13 +332,11 @@ static int run(struct mddev *mddev)
return 0;
}
-static int stop(struct mddev *mddev)
+static void faulty_free(struct mddev *mddev, void *priv)
{
- struct faulty_conf *conf = mddev->private;
+ struct faulty_conf *conf = priv;
kfree(conf);
- mddev->private = NULL;
- return 0;
}
static struct md_personality faulty_personality =
@@ -348,7 +346,7 @@ static struct md_personality faulty_personality =
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
- .stop = stop,
+ .free = faulty_free,
.status = status,
.check_reshape = reshape,
.size = faulty_size,
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 64713b77df1c..fa7d577f3d12 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -34,7 +34,7 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
lo = 0;
hi = mddev->raid_disks - 1;
- conf = rcu_dereference(mddev->private);
+ conf = mddev->private;
/*
* Binary Search
@@ -60,18 +60,16 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
*
* Return amount of bytes we can take at this offset
*/
-static int linear_mergeable_bvec(struct request_queue *q,
+static int linear_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
struct dev_info *dev0;
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int maxbytes = biovec->bv_len;
struct request_queue *subq;
- rcu_read_lock();
dev0 = which_dev(mddev, sector);
maxsectors = dev0->end_sector - sector;
subq = bdev_get_queue(dev0->rdev->bdev);
@@ -81,7 +79,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
biovec));
}
- rcu_read_unlock();
if (maxsectors < bio_sectors)
maxsectors = 0;
@@ -97,24 +94,18 @@ static int linear_mergeable_bvec(struct request_queue *q,
return maxsectors << 9;
}
-static int linear_congested(void *data, int bits)
+static int linear_congested(struct mddev *mddev, int bits)
{
- struct mddev *mddev = data;
struct linear_conf *conf;
int i, ret = 0;
- if (mddev_congested(mddev, bits))
- return 1;
-
- rcu_read_lock();
- conf = rcu_dereference(mddev->private);
+ conf = mddev->private;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
- rcu_read_unlock();
return ret;
}
@@ -123,12 +114,10 @@ static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disk
struct linear_conf *conf;
sector_t array_sectors;
- rcu_read_lock();
- conf = rcu_dereference(mddev->private);
+ conf = mddev->private;
WARN_ONCE(sectors || raid_disks,
"%s does not support generic reshape\n", __func__);
array_sectors = conf->array_sectors;
- rcu_read_unlock();
return array_sectors;
}
@@ -217,10 +206,6 @@ static int linear_run (struct mddev *mddev)
mddev->private = conf;
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
- blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
- mddev->queue->backing_dev_info.congested_fn = linear_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
-
ret = md_integrity_register(mddev);
if (ret) {
kfree(conf);
@@ -252,38 +237,23 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
if (!newconf)
return -ENOMEM;
- oldconf = rcu_dereference_protected(mddev->private,
- lockdep_is_held(
- &mddev->reconfig_mutex));
+ mddev_suspend(mddev);
+ oldconf = mddev->private;
mddev->raid_disks++;
- rcu_assign_pointer(mddev->private, newconf);
+ mddev->private = newconf;
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
+ mddev_resume(mddev);
revalidate_disk(mddev->gendisk);
- kfree_rcu(oldconf, rcu);
+ kfree(oldconf);
return 0;
}
-static int linear_stop (struct mddev *mddev)
+static void linear_free(struct mddev *mddev, void *priv)
{
- struct linear_conf *conf =
- rcu_dereference_protected(mddev->private,
- lockdep_is_held(
- &mddev->reconfig_mutex));
+ struct linear_conf *conf = priv;
- /*
- * We do not require rcu protection here since
- * we hold reconfig_mutex for both linear_add and
- * linear_stop, so they cannot race.
- * We should make sure any old 'conf's are properly
- * freed though.
- */
- rcu_barrier();
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf);
- mddev->private = NULL;
-
- return 0;
}
static void linear_make_request(struct mddev *mddev, struct bio *bio)
@@ -299,16 +269,12 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- rcu_read_lock();
-
tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
end_sector = tmp_dev->end_sector;
data_offset = tmp_dev->rdev->data_offset;
bio->bi_bdev = tmp_dev->rdev->bdev;
- rcu_read_unlock();
-
if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
bio->bi_iter.bi_sector < start_sector))
goto out_of_bounds;
@@ -355,6 +321,10 @@ static void linear_status (struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
+static void linear_quiesce(struct mddev *mddev, int state)
+{
+}
+
static struct md_personality linear_personality =
{
.name = "linear",
@@ -362,10 +332,13 @@ static struct md_personality linear_personality =
.owner = THIS_MODULE,
.make_request = linear_make_request,
.run = linear_run,
- .stop = linear_stop,
+ .free = linear_free,
.status = linear_status,
.hot_add_disk = linear_add,
.size = linear_size,
+ .quiesce = linear_quiesce,
+ .congested = linear_congested,
+ .mergeable_bvec = linear_mergeable_bvec,
};
static int __init linear_init (void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 709755fb6d7b..c8d2bac4e28b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -72,6 +72,7 @@ static struct workqueue_struct *md_misc_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
+static void mddev_detach(struct mddev *mddev);
/*
* Default number of read corrections we'll attempt on an rdev
@@ -292,8 +293,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
/* mddev_suspend makes sure no new requests are submitted
* to the device, and that any requests that have been submitted
* are completely handled.
- * Once ->stop is called and completes, the module will be completely
- * unused.
+ * Once mddev_detach() is called and completes, the module will be
+ * completely unused.
*/
void mddev_suspend(struct mddev *mddev)
{
@@ -321,10 +322,47 @@ EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(struct mddev *mddev, int bits)
{
- return mddev->suspended;
+ struct md_personality *pers = mddev->pers;
+ int ret = 0;
+
+ rcu_read_lock();
+ if (mddev->suspended)
+ ret = 1;
+ else if (pers && pers->congested)
+ ret = pers->congested(mddev, bits);
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mddev_congested);
+static int md_congested(void *data, int bits)
+{
+ struct mddev *mddev = data;
+ return mddev_congested(mddev, bits);
}
-EXPORT_SYMBOL(mddev_congested);
+static int md_mergeable_bvec(struct request_queue *q,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec)
+{
+ struct mddev *mddev = q->queuedata;
+ int ret;
+ rcu_read_lock();
+ if (mddev->suspended) {
+ /* Must always allow one vec */
+ if (bvm->bi_size == 0)
+ ret = biovec->bv_len;
+ else
+ ret = 0;
+ } else {
+ struct md_personality *pers = mddev->pers;
+ if (pers && pers->mergeable_bvec)
+ ret = pers->mergeable_bvec(mddev, bvm, biovec);
+ else
+ ret = biovec->bv_len;
+ }
+ rcu_read_unlock();
+ return ret;
+}
/*
* Generic flush handling for md
*/
@@ -397,12 +435,12 @@ static void md_submit_flush_data(struct work_struct *ws)
void md_flush_request(struct mddev *mddev, struct bio *bio)
{
- spin_lock_irq(&mddev->write_lock);
+ spin_lock_irq(&mddev->lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
- mddev->write_lock);
+ mddev->lock);
mddev->flush_bio = bio;
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock_irq(&mddev->lock);
INIT_WORK(&mddev->flush_work, submit_flushes);
queue_work(md_wq, &mddev->flush_work);
@@ -465,7 +503,7 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
- spin_lock_init(&mddev->write_lock);
+ spin_lock_init(&mddev->lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
@@ -552,32 +590,9 @@ static struct mddev *mddev_find(dev_t unit)
goto retry;
}
-static inline int __must_check mddev_lock(struct mddev *mddev)
-{
- return mutex_lock_interruptible(&mddev->reconfig_mutex);
-}
-
-/* Sometimes we need to take the lock in a situation where
- * failure due to interrupts is not acceptable.
- */
-static inline void mddev_lock_nointr(struct mddev *mddev)
-{
- mutex_lock(&mddev->reconfig_mutex);
-}
-
-static inline int mddev_is_locked(struct mddev *mddev)
-{
- return mutex_is_locked(&mddev->reconfig_mutex);
-}
-
-static inline int mddev_trylock(struct mddev *mddev)
-{
- return mutex_trylock(&mddev->reconfig_mutex);
-}
-
static struct attribute_group md_redundancy_group;
-static void mddev_unlock(struct mddev *mddev)
+void mddev_unlock(struct mddev *mddev)
{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
@@ -619,6 +634,7 @@ static void mddev_unlock(struct mddev *mddev)
md_wakeup_thread(mddev->thread);
spin_unlock(&pers_lock);
}
+EXPORT_SYMBOL_GPL(mddev_unlock);
static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
@@ -2230,7 +2246,7 @@ repeat:
return;
}
- spin_lock_irq(&mddev->write_lock);
+ spin_lock(&mddev->lock);
mddev->utime = get_seconds();
@@ -2287,7 +2303,7 @@ repeat:
}
sync_sbs(mddev, nospares);
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock(&mddev->lock);
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
@@ -2326,15 +2342,15 @@ repeat:
md_super_wait(mddev);
/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
- spin_lock_irq(&mddev->write_lock);
+ spin_lock(&mddev->lock);
if (mddev->in_sync != sync_req ||
test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
/* have to write it out again */
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock(&mddev->lock);
goto repeat;
}
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock(&mddev->lock);
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
@@ -2381,40 +2397,41 @@ state_show(struct md_rdev *rdev, char *page)
{
char *sep = "";
size_t len = 0;
+ unsigned long flags = ACCESS_ONCE(rdev->flags);
- if (test_bit(Faulty, &rdev->flags) ||
+ if (test_bit(Faulty, &flags) ||
rdev->badblocks.unacked_exist) {
len+= sprintf(page+len, "%sfaulty",sep);
sep = ",";
}
- if (test_bit(In_sync, &rdev->flags)) {
+ if (test_bit(In_sync, &flags)) {
len += sprintf(page+len, "%sin_sync",sep);
sep = ",";
}
- if (test_bit(WriteMostly, &rdev->flags)) {
+ if (test_bit(WriteMostly, &flags)) {
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
}
- if (test_bit(Blocked, &rdev->flags) ||
+ if (test_bit(Blocked, &flags) ||
(rdev->badblocks.unacked_exist
- && !test_bit(Faulty, &rdev->flags))) {
+ && !test_bit(Faulty, &flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
- if (!test_bit(Faulty, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags)) {
+ if (!test_bit(Faulty, &flags) &&
+ !test_bit(In_sync, &flags)) {
len += sprintf(page+len, "%sspare", sep);
sep = ",";
}
- if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ if (test_bit(WriteErrorSeen, &flags)) {
len += sprintf(page+len, "%swrite_error", sep);
sep = ",";
}
- if (test_bit(WantReplacement, &rdev->flags)) {
+ if (test_bit(WantReplacement, &flags)) {
len += sprintf(page+len, "%swant_replacement", sep);
sep = ",";
}
- if (test_bit(Replacement, &rdev->flags)) {
+ if (test_bit(Replacement, &flags)) {
len += sprintf(page+len, "%sreplacement", sep);
sep = ",";
}
@@ -2927,21 +2944,12 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
- struct mddev *mddev = rdev->mddev;
- ssize_t rv;
if (!entry->show)
return -EIO;
-
- rv = mddev ? mddev_lock(mddev) : -EBUSY;
- if (!rv) {
- if (rdev->mddev == NULL)
- rv = -EBUSY;
- else
- rv = entry->show(rdev, page);
- mddev_unlock(mddev);
- }
- return rv;
+ if (!rdev->mddev)
+ return -EBUSY;
+ return entry->show(rdev, page);
}
static ssize_t
@@ -3212,11 +3220,13 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
mddev->safemode_delay = 0;
else {
unsigned long old_delay = mddev->safemode_delay;
- mddev->safemode_delay = (msec*HZ)/1000;
- if (mddev->safemode_delay == 0)
- mddev->safemode_delay = 1;
- if (mddev->safemode_delay < old_delay || old_delay == 0)
- md_safemode_timeout((unsigned long)mddev);
+ unsigned long new_delay = (msec*HZ)/1000;
+
+ if (new_delay == 0)
+ new_delay = 1;
+ mddev->safemode_delay = new_delay;
+ if (new_delay < old_delay || old_delay == 0)
+ mod_timer(&mddev->safemode_timer, jiffies+1);
}
return len;
}
@@ -3226,41 +3236,52 @@ __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
static ssize_t
level_show(struct mddev *mddev, char *page)
{
- struct md_personality *p = mddev->pers;
+ struct md_personality *p;
+ int ret;
+ spin_lock(&mddev->lock);
+ p = mddev->pers;
if (p)
- return sprintf(page, "%s\n", p->name);
+ ret = sprintf(page, "%s\n", p->name);
else if (mddev->clevel[0])
- return sprintf(page, "%s\n", mddev->clevel);
+ ret = sprintf(page, "%s\n", mddev->clevel);
else if (mddev->level != LEVEL_NONE)
- return sprintf(page, "%d\n", mddev->level);
+ ret = sprintf(page, "%d\n", mddev->level);
else
- return 0;
+ ret = 0;
+ spin_unlock(&mddev->lock);
+ return ret;
}
static ssize_t
level_store(struct mddev *mddev, const char *buf, size_t len)
{
char clevel[16];
- ssize_t rv = len;
- struct md_personality *pers;
+ ssize_t rv;
+ size_t slen = len;
+ struct md_personality *pers, *oldpers;
long level;
- void *priv;
+ void *priv, *oldpriv;
struct md_rdev *rdev;
+ if (slen == 0 || slen >= sizeof(clevel))
+ return -EINVAL;
+
+ rv = mddev_lock(mddev);
+ if (rv)
+ return rv;
+
if (mddev->pers == NULL) {
- if (len == 0)
- return 0;
- if (len >= sizeof(mddev->clevel))
- return -ENOSPC;
- strncpy(mddev->clevel, buf, len);
- if (mddev->clevel[len-1] == '\n')
- len--;
- mddev->clevel[len] = 0;
+ strncpy(mddev->clevel, buf, slen);
+ if (mddev->clevel[slen-1] == '\n')
+ slen--;
+ mddev->clevel[slen] = 0;
mddev->level = LEVEL_NONE;
- return rv;
+ rv = len;
+ goto out_unlock;
}
+ rv = -EROFS;
if (mddev->ro)
- return -EROFS;
+ goto out_unlock;
/* request to change the personality. Need to ensure:
* - array is not engaged in resync/recovery/reshape
@@ -3268,25 +3289,25 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
* - new personality will access other array.
*/
+ rv = -EBUSY;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
- return -EBUSY;
+ goto out_unlock;
+ rv = -EINVAL;
if (!mddev->pers->quiesce) {
printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
mdname(mddev), mddev->pers->name);
- return -EINVAL;
+ goto out_unlock;
}
/* Now find the new personality */
- if (len == 0 || len >= sizeof(clevel))
- return -EINVAL;
- strncpy(clevel, buf, len);
- if (clevel[len-1] == '\n')
- len--;
- clevel[len] = 0;
+ strncpy(clevel, buf, slen);
+ if (clevel[slen-1] == '\n')
+ slen--;
+ clevel[slen] = 0;
if (kstrtol(clevel, 10, &level))
level = LEVEL_NONE;
@@ -3297,20 +3318,23 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
- return -EINVAL;
+ rv = -EINVAL;
+ goto out_unlock;
}
spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
module_put(pers->owner);
- return rv;
+ rv = len;
+ goto out_unlock;
}
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
- return -EINVAL;
+ rv = -EINVAL;
+ goto out_unlock;
}
rdev_for_each(rdev, mddev)
@@ -3330,30 +3354,29 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
mdname(mddev), clevel);
- return PTR_ERR(priv);
+ rv = PTR_ERR(priv);
+ goto out_unlock;
}
/* Looks like we have a winner */
mddev_suspend(mddev);
- mddev->pers->stop(mddev);
+ mddev_detach(mddev);
- if (mddev->pers->sync_request == NULL &&
- pers->sync_request != NULL) {
- /* need to add the md_redundancy_group */
- if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
- }
- if (mddev->pers->sync_request != NULL &&
- pers->sync_request == NULL) {
- /* need to remove the md_redundancy_group */
- if (mddev->to_remove == NULL)
- mddev->to_remove = &md_redundancy_group;
- }
+ spin_lock(&mddev->lock);
+ oldpers = mddev->pers;
+ oldpriv = mddev->private;
+ mddev->pers = pers;
+ mddev->private = priv;
+ strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+ mddev->level = mddev->new_level;
+ mddev->layout = mddev->new_layout;
+ mddev->chunk_sectors = mddev->new_chunk_sectors;
+ mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
+ mddev->degraded = 0;
+ spin_unlock(&mddev->lock);
- if (mddev->pers->sync_request == NULL &&
+ if (oldpers->sync_request == NULL &&
mddev->external) {
/* We are converting from a no-redundancy array
* to a redundancy array and metadata is managed
@@ -3367,6 +3390,24 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->safemode = 0;
}
+ oldpers->free(mddev, oldpriv);
+
+ if (oldpers->sync_request == NULL &&
+ pers->sync_request != NULL) {
+ /* need to add the md_redundancy_group */
+ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+ printk(KERN_WARNING
+ "md: cannot register extra attributes for %s\n",
+ mdname(mddev));
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+ }
+ if (oldpers->sync_request != NULL &&
+ pers->sync_request == NULL) {
+ /* need to remove the md_redundancy_group */
+ if (mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ }
+
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
@@ -3392,17 +3433,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
}
- module_put(mddev->pers->owner);
- mddev->pers = pers;
- mddev->private = priv;
- strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
- mddev->level = mddev->new_level;
- mddev->layout = mddev->new_layout;
- mddev->chunk_sectors = mddev->new_chunk_sectors;
- mddev->delta_disks = 0;
- mddev->reshape_backwards = 0;
- mddev->degraded = 0;
- if (mddev->pers->sync_request == NULL) {
+ if (pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
*/
@@ -3417,6 +3448,9 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
md_update_sb(mddev, 1);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
+ rv = len;
+out_unlock:
+ mddev_unlock(mddev);
return rv;
}
@@ -3439,28 +3473,32 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
+ int err;
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
if (mddev->pers) {
- int err;
if (mddev->pers->check_reshape == NULL)
- return -EBUSY;
- if (mddev->ro)
- return -EROFS;
- mddev->new_layout = n;
- err = mddev->pers->check_reshape(mddev);
- if (err) {
- mddev->new_layout = mddev->layout;
- return err;
+ err = -EBUSY;
+ else if (mddev->ro)
+ err = -EROFS;
+ else {
+ mddev->new_layout = n;
+ err = mddev->pers->check_reshape(mddev);
+ if (err)
+ mddev->new_layout = mddev->layout;
}
} else {
mddev->new_layout = n;
if (mddev->reshape_position == MaxSector)
mddev->layout = n;
}
- return len;
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_layout =
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
@@ -3483,32 +3521,39 @@ static ssize_t
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
- int rv = 0;
+ int err;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
if (mddev->pers)
- rv = update_raid_disks(mddev, n);
+ err = update_raid_disks(mddev, n);
else if (mddev->reshape_position != MaxSector) {
struct md_rdev *rdev;
int olddisks = mddev->raid_disks - mddev->delta_disks;
+ err = -EINVAL;
rdev_for_each(rdev, mddev) {
if (olddisks < n &&
rdev->data_offset < rdev->new_data_offset)
- return -EINVAL;
+ goto out_unlock;
if (olddisks > n &&
rdev->data_offset > rdev->new_data_offset)
- return -EINVAL;
+ goto out_unlock;
}
+ err = 0;
mddev->delta_disks = n - olddisks;
mddev->raid_disks = n;
mddev->reshape_backwards = (mddev->delta_disks < 0);
} else
mddev->raid_disks = n;
- return rv ? rv : len;
+out_unlock:
+ mddev_unlock(mddev);
+ return err ? err : len;
}
static struct md_sysfs_entry md_raid_disks =
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
@@ -3527,30 +3572,34 @@ chunk_size_show(struct mddev *mddev, char *page)
static ssize_t
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
{
+ int err;
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
if (mddev->pers) {
- int err;
if (mddev->pers->check_reshape == NULL)
- return -EBUSY;
- if (mddev->ro)
- return -EROFS;
- mddev->new_chunk_sectors = n >> 9;
- err = mddev->pers->check_reshape(mddev);
- if (err) {
- mddev->new_chunk_sectors = mddev->chunk_sectors;
- return err;
+ err = -EBUSY;
+ else if (mddev->ro)
+ err = -EROFS;
+ else {
+ mddev->new_chunk_sectors = n >> 9;
+ err = mddev->pers->check_reshape(mddev);
+ if (err)
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else {
mddev->new_chunk_sectors = n >> 9;
if (mddev->reshape_position == MaxSector)
mddev->chunk_sectors = n >> 9;
}
- return len;
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_chunk_size =
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
@@ -3566,20 +3615,27 @@ resync_start_show(struct mddev *mddev, char *page)
static ssize_t
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
{
+ int err;
char *e;
unsigned long long n = simple_strtoull(buf, &e, 10);
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
- return -EBUSY;
- if (cmd_match(buf, "none"))
+ err = -EBUSY;
+ else if (cmd_match(buf, "none"))
n = MaxSector;
else if (!*buf || (*e && *e != '\n'))
- return -EINVAL;
+ err = -EINVAL;
- mddev->recovery_cp = n;
- if (mddev->pers)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- return len;
+ if (!err) {
+ mddev->recovery_cp = n;
+ if (mddev->pers)
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ }
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_resync_start =
__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
@@ -3677,8 +3733,39 @@ static int restart_array(struct mddev *mddev);
static ssize_t
array_state_store(struct mddev *mddev, const char *buf, size_t len)
{
- int err = -EINVAL;
+ int err;
enum array_state st = match_word(buf, array_states);
+
+ if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
+ /* don't take reconfig_mutex when toggling between
+ * clean and active
+ */
+ spin_lock(&mddev->lock);
+ if (st == active) {
+ restart_array(mddev);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ wake_up(&mddev->sb_wait);
+ err = 0;
+ } else /* st == clean */ {
+ restart_array(mddev);
+ if (atomic_read(&mddev->writes_pending) == 0) {
+ if (mddev->in_sync == 0) {
+ mddev->in_sync = 1;
+ if (mddev->safemode == 1)
+ mddev->safemode = 0;
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ }
+ err = 0;
+ } else
+ err = -EBUSY;
+ }
+ spin_unlock(&mddev->lock);
+ return err;
+ }
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ err = -EINVAL;
switch(st) {
case bad_word:
break;
@@ -3722,7 +3809,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case clean:
if (mddev->pers) {
restart_array(mddev);
- spin_lock_irq(&mddev->write_lock);
+ spin_lock(&mddev->lock);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
mddev->in_sync = 1;
@@ -3733,7 +3820,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = 0;
} else
err = -EBUSY;
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock(&mddev->lock);
} else
err = -EINVAL;
break;
@@ -3754,14 +3841,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
/* these cannot be set */
break;
}
- if (err)
- return err;
- else {
+
+ if (!err) {
if (mddev->hold_active == UNTIL_IOCTL)
mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
- return len;
}
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
@@ -3822,6 +3909,11 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
minor != MINOR(dev))
return -EOVERFLOW;
+ flush_workqueue(md_misc_wq);
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
@@ -3845,6 +3937,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
out:
if (err)
export_rdev(rdev);
+ mddev_unlock(mddev);
return err ? err : len;
}
@@ -3856,7 +3949,11 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
{
char *end;
unsigned long chunk, end_chunk;
+ int err;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
if (!mddev->bitmap)
goto out;
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
@@ -3874,6 +3971,7 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
}
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
+ mddev_unlock(mddev);
return len;
}
@@ -3901,6 +3999,9 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
if (err < 0)
return err;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
if (mddev->pers) {
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
@@ -3911,6 +4012,7 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
else
err = -ENOSPC;
}
+ mddev_unlock(mddev);
return err ? err : len;
}
@@ -3940,21 +4042,28 @@ metadata_store(struct mddev *mddev, const char *buf, size_t len)
{
int major, minor;
char *e;
+ int err;
/* Changing the details of 'external' metadata is
* always permitted. Otherwise there must be
* no devices attached to the array.
*/
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ err = -EBUSY;
if (mddev->external && strncmp(buf, "external:", 9) == 0)
;
else if (!list_empty(&mddev->disks))
- return -EBUSY;
+ goto out_unlock;
+ err = 0;
if (cmd_match(buf, "none")) {
mddev->persistent = 0;
mddev->external = 0;
mddev->major_version = 0;
mddev->minor_version = 90;
- return len;
+ goto out_unlock;
}
if (strncmp(buf, "external:", 9) == 0) {
size_t namelen = len-9;
@@ -3968,22 +4077,27 @@ metadata_store(struct mddev *mddev, const char *buf, size_t len)
mddev->external = 1;
mddev->major_version = 0;
mddev->minor_version = 90;
- return len;
+ goto out_unlock;
}
major = simple_strtoul(buf, &e, 10);
+ err = -EINVAL;
if (e==buf || *e != '.')
- return -EINVAL;
+ goto out_unlock;
buf = e+1;
minor = simple_strtoul(buf, &e, 10);
if (e==buf || (*e && *e != '\n') )
- return -EINVAL;
+ goto out_unlock;
+ err = -ENOENT;
if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
- return -ENOENT;
+ goto out_unlock;
mddev->major_version = major;
mddev->minor_version = minor;
mddev->persistent = 1;
mddev->external = 0;
- return len;
+ err = 0;
+out_unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_metadata =
@@ -3993,20 +4107,21 @@ static ssize_t
action_show(struct mddev *mddev, char *page)
{
char *type = "idle";
- if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ unsigned long recovery = mddev->recovery;
+ if (test_bit(MD_RECOVERY_FROZEN, &recovery))
type = "frozen";
- else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
+ (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
type = "reshape";
- else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+ if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
type = "resync";
- else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ else if (test_bit(MD_RECOVERY_CHECK, &recovery))
type = "check";
else
type = "repair";
- } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+ } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
type = "recover";
}
return sprintf(page, "%s\n", type);
@@ -4027,7 +4142,10 @@ action_store(struct mddev *mddev, const char *page, size_t len)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ if (mddev_lock(mddev) == 0) {
+ md_reap_sync_thread(mddev);
+ mddev_unlock(mddev);
+ }
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -4041,7 +4159,11 @@ action_store(struct mddev *mddev, const char *page, size_t len)
int err;
if (mddev->pers->start_reshape == NULL)
return -EINVAL;
- err = mddev->pers->start_reshape(mddev);
+ err = mddev_lock(mddev);
+ if (!err) {
+ err = mddev->pers->start_reshape(mddev);
+ mddev_unlock(mddev);
+ }
if (err)
return err;
sysfs_notify(&mddev->kobj, NULL, "degraded");
@@ -4225,22 +4347,36 @@ static ssize_t
min_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long min;
+ int err;
+ int chunk;
+
if (kstrtoull(buf, 10, &min))
return -EINVAL;
+
+ spin_lock(&mddev->lock);
+ err = -EINVAL;
if (min > mddev->resync_max)
- return -EINVAL;
+ goto out_unlock;
+
+ err = -EBUSY;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- return -EBUSY;
+ goto out_unlock;
/* Must be a multiple of chunk_size */
- if (mddev->chunk_sectors) {
+ chunk = mddev->chunk_sectors;
+ if (chunk) {
sector_t temp = min;
- if (sector_div(temp, mddev->chunk_sectors))
- return -EINVAL;
+
+ err = -EINVAL;
+ if (sector_div(temp, chunk))
+ goto out_unlock;
}
mddev->resync_min = min;
+ err = 0;
- return len;
+out_unlock:
+ spin_unlock(&mddev->lock);
+ return err ?: len;
}
static struct md_sysfs_entry md_min_sync =
@@ -4258,29 +4394,42 @@ max_sync_show(struct mddev *mddev, char *page)
static ssize_t
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
+ int err;
+ spin_lock(&mddev->lock);
if (strncmp(buf, "max", 3) == 0)
mddev->resync_max = MaxSector;
else {
unsigned long long max;
+ int chunk;
+
+ err = -EINVAL;
if (kstrtoull(buf, 10, &max))
- return -EINVAL;
+ goto out_unlock;
if (max < mddev->resync_min)
- return -EINVAL;
+ goto out_unlock;
+
+ err = -EBUSY;
if (max < mddev->resync_max &&
mddev->ro == 0 &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- return -EBUSY;
+ goto out_unlock;
/* Must be a multiple of chunk_size */
- if (mddev->chunk_sectors) {
+ chunk = mddev->chunk_sectors;
+ if (chunk) {
sector_t temp = max;
- if (sector_div(temp, mddev->chunk_sectors))
- return -EINVAL;
+
+ err = -EINVAL;
+ if (sector_div(temp, chunk))
+ goto out_unlock;
}
mddev->resync_max = max;
}
wake_up(&mddev->recovery_wait);
- return len;
+ err = 0;
+out_unlock:
+ spin_unlock(&mddev->lock);
+ return err ?: len;
}
static struct md_sysfs_entry md_max_sync =
@@ -4297,14 +4446,20 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- unsigned long long old = mddev->suspend_lo;
+ unsigned long long old;
+ int err;
- if (mddev->pers == NULL ||
- mddev->pers->quiesce == NULL)
- return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ err = -EINVAL;
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
+ goto unlock;
+ old = mddev->suspend_lo;
mddev->suspend_lo = new;
if (new >= old)
/* Shrinking suspended region */
@@ -4314,7 +4469,10 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
- return len;
+ err = 0;
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
@@ -4330,14 +4488,20 @@ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- unsigned long long old = mddev->suspend_hi;
+ unsigned long long old;
+ int err;
- if (mddev->pers == NULL ||
- mddev->pers->quiesce == NULL)
- return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ err = -EINVAL;
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
+ goto unlock;
+ old = mddev->suspend_hi;
mddev->suspend_hi = new;
if (new <= old)
/* Shrinking suspended region */
@@ -4347,7 +4511,10 @@ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
- return len;
+ err = 0;
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
@@ -4367,11 +4534,17 @@ reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
{
struct md_rdev *rdev;
char *e;
+ int err;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers)
- return -EBUSY;
+
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ err = -EBUSY;
+ if (mddev->pers)
+ goto unlock;
mddev->reshape_position = new;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
@@ -4380,7 +4553,10 @@ reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
mddev->new_chunk_sectors = mddev->chunk_sectors;
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
- return len;
+ err = 0;
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_reshape_position =
@@ -4398,6 +4574,8 @@ static ssize_t
reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
{
int backwards = 0;
+ int err;
+
if (cmd_match(buf, "forwards"))
backwards = 0;
else if (cmd_match(buf, "backwards"))
@@ -4407,16 +4585,19 @@ reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->reshape_backwards == backwards)
return len;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
/* check if we are allowed to change */
if (mddev->delta_disks)
- return -EBUSY;
-
- if (mddev->persistent &&
+ err = -EBUSY;
+ else if (mddev->persistent &&
mddev->major_version == 0)
- return -EINVAL;
-
- mddev->reshape_backwards = backwards;
- return len;
+ err = -EINVAL;
+ else
+ mddev->reshape_backwards = backwards;
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_reshape_direction =
@@ -4437,6 +4618,11 @@ static ssize_t
array_size_store(struct mddev *mddev, const char *buf, size_t len)
{
sector_t sectors;
+ int err;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
if (strncmp(buf, "default", 7) == 0) {
if (mddev->pers)
@@ -4447,19 +4633,22 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len)
mddev->external_size = 0;
} else {
if (strict_blocks_to_sectors(buf, &sectors) < 0)
- return -EINVAL;
- if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
- return -E2BIG;
-
- mddev->external_size = 1;
+ err = -EINVAL;
+ else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
+ err = -E2BIG;
+ else
+ mddev->external_size = 1;
}
- mddev->array_sectors = sectors;
- if (mddev->pers) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ if (!err) {
+ mddev->array_sectors = sectors;
+ if (mddev->pers) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
}
- return len;
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry md_array_size =
@@ -4523,11 +4712,7 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
- rv = mddev_lock(mddev);
- if (!rv) {
- rv = entry->show(mddev, page);
- mddev_unlock(mddev);
- }
+ rv = entry->show(mddev, page);
mddev_put(mddev);
return rv;
}
@@ -4551,13 +4736,7 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
- if (entry->store == new_dev_store)
- flush_workqueue(md_misc_wq);
- rv = mddev_lock(mddev);
- if (!rv) {
- rv = entry->store(mddev, page, length);
- mddev_unlock(mddev);
- }
+ rv = entry->store(mddev, page, length);
mddev_put(mddev);
return rv;
}
@@ -4825,7 +5004,6 @@ int md_run(struct mddev *mddev)
mddev->clevel);
return -EINVAL;
}
- mddev->pers = pers;
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
mddev->level = pers->level;
@@ -4836,7 +5014,6 @@ int md_run(struct mddev *mddev)
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
- mddev->pers = NULL;
module_put(pers->owner);
return -EINVAL;
}
@@ -4880,35 +5057,38 @@ int md_run(struct mddev *mddev)
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
- err = mddev->pers->run(mddev);
+ err = pers->run(mddev);
if (err)
printk(KERN_ERR "md: pers->run() failed ...\n");
- else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
+ else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
WARN_ONCE(!mddev->external_size, "%s: default size too small,"
" but 'external_size' not in effect?\n", __func__);
printk(KERN_ERR
"md: invalid array_size %llu > default size %llu\n",
(unsigned long long)mddev->array_sectors / 2,
- (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
+ (unsigned long long)pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
- mddev->pers->stop(mddev);
}
- if (err == 0 && mddev->pers->sync_request &&
+ if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
err = bitmap_create(mddev);
- if (err) {
+ if (err)
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
- mddev->pers->stop(mddev);
- }
}
if (err) {
- module_put(mddev->pers->owner);
- mddev->pers = NULL;
+ mddev_detach(mddev);
+ pers->free(mddev, mddev->private);
+ module_put(pers->owner);
bitmap_destroy(mddev);
return err;
}
- if (mddev->pers->sync_request) {
+ if (mddev->queue) {
+ mddev->queue->backing_dev_info.congested_data = mddev;
+ mddev->queue->backing_dev_info.congested_fn = md_congested;
+ blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
+ }
+ if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
@@ -4927,7 +5107,10 @@ int md_run(struct mddev *mddev)
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
smp_wmb();
+ spin_lock(&mddev->lock);
+ mddev->pers = pers;
mddev->ready = 1;
+ spin_unlock(&mddev->lock);
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
if (sysfs_link_rdev(mddev, rdev))
@@ -5070,14 +5253,38 @@ void md_stop_writes(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(md_stop_writes);
+static void mddev_detach(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+ /* wait for behind writes to complete */
+ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
+ printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
+ /* need to kick something here to make sure I/O goes? */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
+ if (mddev->pers && mddev->pers->quiesce) {
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ }
+ md_unregister_thread(&mddev->thread);
+ if (mddev->queue)
+ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+}
+
static void __md_stop(struct mddev *mddev)
{
+ struct md_personality *pers = mddev->pers;
+ mddev_detach(mddev);
+ spin_lock(&mddev->lock);
mddev->ready = 0;
- mddev->pers->stop(mddev);
- if (mddev->pers->sync_request && mddev->to_remove == NULL)
- mddev->to_remove = &md_redundancy_group;
- module_put(mddev->pers->owner);
mddev->pers = NULL;
+ spin_unlock(&mddev->lock);
+ pers->free(mddev, mddev->private);
+ if (pers->sync_request && mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
@@ -5226,8 +5433,11 @@ static int do_md_stop(struct mddev *mddev, int mode,
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
- fput(mddev->bitmap_info.file);
+ struct file *f = mddev->bitmap_info.file;
+ spin_lock(&mddev->lock);
mddev->bitmap_info.file = NULL;
+ spin_unlock(&mddev->lock);
+ fput(f);
}
mddev->bitmap_info.offset = 0;
@@ -5436,37 +5646,31 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
static int get_bitmap_file(struct mddev *mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
- char *ptr, *buf = NULL;
- int err = -ENOMEM;
+ char *ptr;
+ int err;
file = kmalloc(sizeof(*file), GFP_NOIO);
-
if (!file)
- goto out;
+ return -ENOMEM;
+ err = 0;
+ spin_lock(&mddev->lock);
/* bitmap disabled, zero the first byte and copy out */
- if (!mddev->bitmap || !mddev->bitmap->storage.file) {
+ if (!mddev->bitmap_info.file)
file->pathname[0] = '\0';
- goto copy_out;
- }
-
- buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
- if (!buf)
- goto out;
-
- ptr = d_path(&mddev->bitmap->storage.file->f_path,
- buf, sizeof(file->pathname));
- if (IS_ERR(ptr))
- goto out;
-
- strcpy(file->pathname, ptr);
+ else if ((ptr = d_path(&mddev->bitmap_info.file->f_path,
+ file->pathname, sizeof(file->pathname))),
+ IS_ERR(ptr))
+ err = PTR_ERR(ptr);
+ else
+ memmove(file->pathname, ptr,
+ sizeof(file->pathname)-(ptr-file->pathname));
+ spin_unlock(&mddev->lock);
-copy_out:
- err = 0;
- if (copy_to_user(arg, file, sizeof(*file)))
+ if (err == 0 &&
+ copy_to_user(arg, file, sizeof(*file)))
err = -EFAULT;
-out:
- kfree(buf);
+
kfree(file);
return err;
}
@@ -5789,22 +5993,24 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
if (fd >= 0) {
struct inode *inode;
- if (mddev->bitmap)
+ struct file *f;
+
+ if (mddev->bitmap || mddev->bitmap_info.file)
return -EEXIST; /* cannot add when bitmap is present */
- mddev->bitmap_info.file = fget(fd);
+ f = fget(fd);
- if (mddev->bitmap_info.file == NULL) {
+ if (f == NULL) {
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
mdname(mddev));
return -EBADF;
}
- inode = mddev->bitmap_info.file->f_mapping->host;
+ inode = f->f_mapping->host;
if (!S_ISREG(inode->i_mode)) {
printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
mdname(mddev));
err = -EBADF;
- } else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) {
+ } else if (!(f->f_mode & FMODE_WRITE)) {
printk(KERN_ERR "%s: error: bitmap file must open for write\n",
mdname(mddev));
err = -EBADF;
@@ -5814,10 +6020,10 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
err = -EBUSY;
}
if (err) {
- fput(mddev->bitmap_info.file);
- mddev->bitmap_info.file = NULL;
+ fput(f);
return err;
}
+ mddev->bitmap_info.file = f;
mddev->bitmap_info.offset = 0; /* file overrides offset */
} else if (mddev->bitmap == NULL)
return -ENOENT; /* cannot remove what isn't there */
@@ -5836,9 +6042,13 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
- if (mddev->bitmap_info.file)
- fput(mddev->bitmap_info.file);
- mddev->bitmap_info.file = NULL;
+ struct file *f = mddev->bitmap_info.file;
+ if (f) {
+ spin_lock(&mddev->lock);
+ mddev->bitmap_info.file = NULL;
+ spin_unlock(&mddev->lock);
+ fput(f);
+ }
}
return err;
@@ -6251,6 +6461,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
case SET_DISK_FAULTY:
err = set_disk_faulty(mddev, new_decode_dev(arg));
goto out;
+
+ case GET_BITMAP_FILE:
+ err = get_bitmap_file(mddev, argp);
+ goto out;
+
}
if (cmd == ADD_NEW_DISK)
@@ -6342,10 +6557,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
* Commands even a read-only array can execute:
*/
switch (cmd) {
- case GET_BITMAP_FILE:
- err = get_bitmap_file(mddev, argp);
- goto unlock;
-
case RESTART_ARRAY_RW:
err = restart_array(mddev);
goto unlock;
@@ -6873,9 +7084,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
return 0;
}
- if (mddev_lock(mddev) < 0)
- return -EINTR;
-
+ spin_lock(&mddev->lock);
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : %sactive", mdname(mddev),
mddev->pers ? "" : "in");
@@ -6888,7 +7097,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
}
sectors = 0;
- rdev_for_each(rdev, mddev) {
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
char b[BDEVNAME_SIZE];
seq_printf(seq, " %s[%d]",
bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -6904,6 +7114,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "(R)");
sectors += rdev->sectors;
}
+ rcu_read_unlock();
if (!list_empty(&mddev->disks)) {
if (mddev->pers)
@@ -6946,7 +7157,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
- mddev_unlock(mddev);
+ spin_unlock(&mddev->lock);
return 0;
}
@@ -7102,7 +7313,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
if (mddev->safemode == 1)
mddev->safemode = 0;
if (mddev->in_sync) {
- spin_lock_irq(&mddev->write_lock);
+ spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
@@ -7110,7 +7321,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
md_wakeup_thread(mddev->thread);
did_change = 1;
}
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock(&mddev->lock);
}
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@ -7148,7 +7359,7 @@ int md_allow_write(struct mddev *mddev)
if (!mddev->pers->sync_request)
return 0;
- spin_lock_irq(&mddev->write_lock);
+ spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
@@ -7156,11 +7367,11 @@ int md_allow_write(struct mddev *mddev)
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock(&mddev->lock);
md_update_sb(mddev, 0);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock(&mddev->lock);
if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
@@ -7513,6 +7724,7 @@ void md_do_sync(struct md_thread *thread)
skip:
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
@@ -7521,6 +7733,8 @@ void md_do_sync(struct md_thread *thread)
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
+ spin_unlock(&mddev->lock);
+
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -7688,7 +7902,7 @@ void md_check_recovery(struct mddev *mddev)
if (!mddev->external) {
int did_change = 0;
- spin_lock_irq(&mddev->write_lock);
+ spin_lock(&mddev->lock);
if (mddev->safemode &&
!atomic_read(&mddev->writes_pending) &&
!mddev->in_sync &&
@@ -7699,7 +7913,7 @@ void md_check_recovery(struct mddev *mddev)
}
if (mddev->safemode == 1)
mddev->safemode = 0;
- spin_unlock_irq(&mddev->write_lock);
+ spin_unlock(&mddev->lock);
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
@@ -7721,7 +7935,9 @@ void md_check_recovery(struct mddev *mddev)
* any transients in the value of "sync_action".
*/
mddev->curr_resync_completed = 0;
+ spin_lock(&mddev->lock);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ spin_unlock(&mddev->lock);
/* Clear some bits that don't mean anything, but
* might be left set
*/
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 03cec5bdcaae..318ca8fd430f 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -386,7 +386,18 @@ struct mddev {
struct work_struct del_work; /* used for delayed sysfs removal */
- spinlock_t write_lock;
+ /* "lock" protects:
+ * flush_bio transition from NULL to !NULL
+ * rdev superblocks, events
+ * clearing MD_CHANGE_*
+ * in_sync - and related safemode and MD_CHANGE changes
+ * pers (also protected by reconfig_mutex and pending IO).
+ * clearing ->bitmap
+ * clearing ->bitmap_info.file
+ * changing ->resync_{min,max}
+ * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
+ */
+ spinlock_t lock;
wait_queue_head_t sb_wait; /* for waiting on superblock updates */
atomic_t pending_writes; /* number of active superblock writes */
@@ -439,13 +450,30 @@ struct mddev {
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
};
-static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
+static inline int __must_check mddev_lock(struct mddev *mddev)
{
- int faulty = test_bit(Faulty, &rdev->flags);
- if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ return mutex_lock_interruptible(&mddev->reconfig_mutex);
+}
+
+/* Sometimes we need to take the lock in a situation where
+ * failure due to interrupts is not acceptable.
+ */
+static inline void mddev_lock_nointr(struct mddev *mddev)
+{
+ mutex_lock(&mddev->reconfig_mutex);
+}
+
+static inline int mddev_is_locked(struct mddev *mddev)
+{
+ return mutex_is_locked(&mddev->reconfig_mutex);
}
+static inline int mddev_trylock(struct mddev *mddev)
+{
+ return mutex_trylock(&mddev->reconfig_mutex);
+}
+extern void mddev_unlock(struct mddev *mddev);
+
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
@@ -459,7 +487,7 @@ struct md_personality
struct module *owner;
void (*make_request)(struct mddev *mddev, struct bio *bio);
int (*run)(struct mddev *mddev);
- int (*stop)(struct mddev *mddev);
+ void (*free)(struct mddev *mddev, void *priv);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
* if appropriate, and should abort recovery if needed
@@ -490,6 +518,13 @@ struct md_personality
* array.
*/
void *(*takeover) (struct mddev *mddev);
+ /* congested implements bdi.congested_fn().
+ * Will not be called while array is 'suspended' */
+ int (*congested)(struct mddev *mddev, int bits);
+ /* mergeable_bvec is use to implement ->merge_bvec_fn */
+ int (*mergeable_bvec)(struct mddev *mddev,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec);
};
struct md_sysfs_entry {
@@ -624,4 +659,14 @@ static inline int mddev_check_plugged(struct mddev *mddev)
return !!blk_check_plugged(md_unplug, mddev,
sizeof(struct blk_plug_cb));
}
+
+static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
+{
+ int faulty = test_bit(Faulty, &rdev->flags);
+ if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+}
+
#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 399272f9c042..ac3ede2bd00e 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -153,15 +153,11 @@ static void multipath_status (struct seq_file *seq, struct mddev *mddev)
seq_printf (seq, "]");
}
-static int multipath_congested(void *data, int bits)
+static int multipath_congested(struct mddev *mddev, int bits)
{
- struct mddev *mddev = data;
struct mpconf *conf = mddev->private;
int i, ret = 0;
- if (mddev_congested(mddev, bits))
- return 1;
-
rcu_read_lock();
for (i = 0; i < mddev->raid_disks ; i++) {
struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
@@ -403,7 +399,7 @@ static int multipath_run (struct mddev *mddev)
/*
* copy the already verified devices into our private MULTIPATH
* bookkeeping area. [whatever we allocate in multipath_run(),
- * should be freed in multipath_stop()]
+ * should be freed in multipath_free()]
*/
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
@@ -489,9 +485,6 @@ static int multipath_run (struct mddev *mddev)
*/
md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
- mddev->queue->backing_dev_info.congested_fn = multipath_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
-
if (md_integrity_register(mddev))
goto out_free_conf;
@@ -507,17 +500,13 @@ out:
return -EIO;
}
-static int multipath_stop (struct mddev *mddev)
+static void multipath_free(struct mddev *mddev, void *priv)
{
- struct mpconf *conf = mddev->private;
+ struct mpconf *conf = priv;
- md_unregister_thread(&mddev->thread);
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
mempool_destroy(conf->pool);
kfree(conf->multipaths);
kfree(conf);
- mddev->private = NULL;
- return 0;
}
static struct md_personality multipath_personality =
@@ -527,12 +516,13 @@ static struct md_personality multipath_personality =
.owner = THIS_MODULE,
.make_request = multipath_make_request,
.run = multipath_run,
- .stop = multipath_stop,
+ .free = multipath_free,
.status = multipath_status,
.error_handler = multipath_error,
.hot_add_disk = multipath_add_disk,
.hot_remove_disk= multipath_remove_disk,
.size = multipath_size,
+ .congested = multipath_congested,
};
static int __init multipath_init (void)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ba6b85de96d2..a13f738a7b39 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,17 +25,13 @@
#include "raid0.h"
#include "raid5.h"
-static int raid0_congested(void *data, int bits)
+static int raid0_congested(struct mddev *mddev, int bits)
{
- struct mddev *mddev = data;
struct r0conf *conf = mddev->private;
struct md_rdev **devlist = conf->devlist;
int raid_disks = conf->strip_zone[0].nb_dev;
int i, ret = 0;
- if (mddev_congested(mddev, bits))
- return 1;
-
for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
@@ -263,8 +259,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
mdname(mddev),
(unsigned long long)smallest->sectors);
}
- mddev->queue->backing_dev_info.congested_fn = raid0_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
/*
* now since we have the hard sector sizes, we can make sure
@@ -356,17 +350,16 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
/**
* raid0_mergeable_bvec -- tell bio layer if two requests can be merged
- * @q: request queue
+ * @mddev: the md device
* @bvm: properties of new bio
* @biovec: the request that could be merged to it.
*
* Return amount of bytes we can accept at this offset
*/
-static int raid0_mergeable_bvec(struct request_queue *q,
+static int raid0_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
struct r0conf *conf = mddev->private;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
sector_t sector_offset = sector;
@@ -422,7 +415,7 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
return array_sectors;
}
-static int raid0_stop(struct mddev *mddev);
+static void raid0_free(struct mddev *mddev, void *priv);
static int raid0_run(struct mddev *mddev)
{
@@ -471,26 +464,22 @@ static int raid0_run(struct mddev *mddev)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
- blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
dump_zones(mddev);
ret = md_integrity_register(mddev);
if (ret)
- raid0_stop(mddev);
+ raid0_free(mddev, conf);
return ret;
}
-static int raid0_stop(struct mddev *mddev)
+static void raid0_free(struct mddev *mddev, void *priv)
{
- struct r0conf *conf = mddev->private;
+ struct r0conf *conf = priv;
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- mddev->private = NULL;
- return 0;
}
/*
@@ -724,11 +713,13 @@ static struct md_personality raid0_personality=
.owner = THIS_MODULE,
.make_request = raid0_make_request,
.run = raid0_run,
- .stop = raid0_stop,
+ .free = raid0_free,
.status = raid0_status,
.size = raid0_size,
.takeover = raid0_takeover,
.quiesce = raid0_quiesce,
+ .congested = raid0_congested,
+ .mergeable_bvec = raid0_mergeable_bvec,
};
static int __init raid0_init (void)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 40b35be34f8d..5dd0c2e59ab9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -701,11 +701,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
return best_disk;
}
-static int raid1_mergeable_bvec(struct request_queue *q,
+static int raid1_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
struct r1conf *conf = mddev->private;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max = biovec->bv_len;
@@ -734,7 +733,7 @@ static int raid1_mergeable_bvec(struct request_queue *q,
}
-int md_raid1_congested(struct mddev *mddev, int bits)
+static int raid1_congested(struct mddev *mddev, int bits)
{
struct r1conf *conf = mddev->private;
int i, ret = 0;
@@ -763,15 +762,6 @@ int md_raid1_congested(struct mddev *mddev, int bits)
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL_GPL(md_raid1_congested);
-
-static int raid1_congested(void *data, int bits)
-{
- struct mddev *mddev = data;
-
- return mddev_congested(mddev, bits) ||
- md_raid1_congested(mddev, bits);
-}
static void flush_pending_writes(struct r1conf *conf)
{
@@ -2882,7 +2872,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
-static int stop(struct mddev *mddev);
+static void raid1_free(struct mddev *mddev, void *priv);
static int run(struct mddev *mddev)
{
struct r1conf *conf;
@@ -2904,7 +2894,7 @@ static int run(struct mddev *mddev)
/*
* copy the already verified devices into our private RAID1
* bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
+ * should be freed in raid1_free()]
*/
if (mddev->private == NULL)
conf = setup_conf(mddev);
@@ -2955,10 +2945,6 @@ static int run(struct mddev *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
if (mddev->queue) {
- mddev->queue->backing_dev_info.congested_fn = raid1_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
- blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
-
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
@@ -2968,37 +2954,23 @@ static int run(struct mddev *mddev)
}
ret = md_integrity_register(mddev);
- if (ret)
- stop(mddev);
+ if (ret) {
+ md_unregister_thread(&mddev->thread);
+ raid1_free(mddev, conf);
+ }
return ret;
}
-static int stop(struct mddev *mddev)
+static void raid1_free(struct mddev *mddev, void *priv)
{
- struct r1conf *conf = mddev->private;
- struct bitmap *bitmap = mddev->bitmap;
+ struct r1conf *conf = priv;
- /* wait for behind writes to complete */
- if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
- mdname(mddev));
- /* need to kick something here to make sure I/O goes? */
- wait_event(bitmap->behind_wait,
- atomic_read(&bitmap->behind_writes) == 0);
- }
-
- freeze_array(conf, 0);
- unfreeze_array(conf);
-
- md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
kfree(conf);
- mddev->private = NULL;
- return 0;
}
static int raid1_resize(struct mddev *mddev, sector_t sectors)
@@ -3181,7 +3153,7 @@ static struct md_personality raid1_personality =
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
- .stop = stop,
+ .free = raid1_free,
.status = status,
.error_handler = error,
.hot_add_disk = raid1_add_disk,
@@ -3193,6 +3165,8 @@ static struct md_personality raid1_personality =
.check_reshape = raid1_reshape,
.quiesce = raid1_quiesce,
.takeover = raid1_takeover,
+ .congested = raid1_congested,
+ .mergeable_bvec = raid1_mergeable_bvec,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 33bda55ef9f7..14ebb288c1ef 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -170,7 +170,4 @@ struct r1bio {
*/
#define R1BIO_MadeGood 7
#define R1BIO_WriteError 8
-
-extern int md_raid1_congested(struct mddev *mddev, int bits);
-
#endif
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 32e282f4c83c..b8d76b1fba64 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -674,7 +674,7 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
/**
* raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
- * @q: request queue
+ * @mddev: the md device
* @bvm: properties of new bio
* @biovec: the request that could be merged to it.
*
@@ -682,11 +682,10 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
* This requires checking for end-of-chunk if near_copies != raid_disks,
* and for subordinate merge_bvec_fns if merge_check_needed.
*/
-static int raid10_mergeable_bvec(struct request_queue *q,
+static int raid10_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
struct r10conf *conf = mddev->private;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
@@ -910,7 +909,7 @@ retry:
return rdev;
}
-int md_raid10_congested(struct mddev *mddev, int bits)
+static int raid10_congested(struct mddev *mddev, int bits)
{
struct r10conf *conf = mddev->private;
int i, ret = 0;
@@ -934,15 +933,6 @@ int md_raid10_congested(struct mddev *mddev, int bits)
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL_GPL(md_raid10_congested);
-
-static int raid10_congested(void *data, int bits)
-{
- struct mddev *mddev = data;
-
- return mddev_congested(mddev, bits) ||
- md_raid10_congested(mddev, bits);
-}
static void flush_pending_writes(struct r10conf *conf)
{
@@ -3757,8 +3747,6 @@ static int run(struct mddev *mddev)
if (mddev->queue) {
int stripe = conf->geo.raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- mddev->queue->backing_dev_info.congested_fn = raid10_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
/* Calculate max read-ahead size.
* We need to readahead at least twice a whole stripe....
@@ -3767,7 +3755,6 @@ static int run(struct mddev *mddev)
stripe /= conf->geo.near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
}
if (md_integrity_register(mddev))
@@ -3811,17 +3798,9 @@ out:
return -EIO;
}
-static int stop(struct mddev *mddev)
+static void raid10_free(struct mddev *mddev, void *priv)
{
- struct r10conf *conf = mddev->private;
-
- raise_barrier(conf, 0);
- lower_barrier(conf);
-
- md_unregister_thread(&mddev->thread);
- if (mddev->queue)
- /* the unplug fn references 'conf'*/
- blk_sync_queue(mddev->queue);
+ struct r10conf *conf = priv;
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
@@ -3830,8 +3809,6 @@ static int stop(struct mddev *mddev)
kfree(conf->mirrors_old);
kfree(conf->mirrors_new);
kfree(conf);
- mddev->private = NULL;
- return 0;
}
static void raid10_quiesce(struct mddev *mddev, int state)
@@ -3895,7 +3872,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
return 0;
}
-static void *raid10_takeover_raid0(struct mddev *mddev)
+static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
{
struct md_rdev *rdev;
struct r10conf *conf;
@@ -3905,6 +3882,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev)
mdname(mddev));
return ERR_PTR(-EINVAL);
}
+ sector_div(size, devs);
/* Set new parameters */
mddev->new_level = 10;
@@ -3915,12 +3893,15 @@ static void *raid10_takeover_raid0(struct mddev *mddev)
mddev->raid_disks *= 2;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ mddev->dev_sectors = size;
conf = setup_conf(mddev);
if (!IS_ERR(conf)) {
rdev_for_each(rdev, mddev)
- if (rdev->raid_disk >= 0)
+ if (rdev->raid_disk >= 0) {
rdev->new_raid_disk = rdev->raid_disk * 2;
+ rdev->sectors = size;
+ }
conf->barrier = 1;
}
@@ -3943,7 +3924,9 @@ static void *raid10_takeover(struct mddev *mddev)
mdname(mddev));
return ERR_PTR(-EINVAL);
}
- return raid10_takeover_raid0(mddev);
+ return raid10_takeover_raid0(mddev,
+ raid0_conf->strip_zone->zone_end,
+ raid0_conf->strip_zone->nb_dev);
}
return ERR_PTR(-EINVAL);
}
@@ -4713,7 +4696,7 @@ static struct md_personality raid10_personality =
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
- .stop = stop,
+ .free = raid10_free,
.status = status,
.error_handler = error,
.hot_add_disk = raid10_add_disk,
@@ -4727,6 +4710,8 @@ static struct md_personality raid10_personality =
.check_reshape = raid10_check_reshape,
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
+ .congested = raid10_congested,
+ .mergeable_bvec = raid10_mergeable_bvec,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 157d69e83ff4..5ee6473ddc2c 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -150,7 +150,4 @@ enum r10bio_state {
*/
R10BIO_Previous,
};
-
-extern int md_raid10_congested(struct mddev *mddev, int bits);
-
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c1b0d52bfcb0..aa76865b804b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -296,12 +296,9 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state) &&
- !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
- if (atomic_read(&conf->preread_active_stripes)
- < IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+ else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
else {
@@ -2898,31 +2895,102 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
* Returns 1 when no more member devices need to be checked, otherwise returns
* 0 to tell the loop in handle_stripe_fill to continue
*/
-static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
- int disk_idx, int disks)
+
+static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
+ int disk_idx, int disks)
{
struct r5dev *dev = &sh->dev[disk_idx];
struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
&sh->dev[s->failed_num[1]] };
+ int i;
+
+
+ if (test_bit(R5_LOCKED, &dev->flags) ||
+ test_bit(R5_UPTODATE, &dev->flags))
+ /* No point reading this as we already have it or have
+ * decided to get it.
+ */
+ return 0;
+
+ if (dev->toread ||
+ (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)))
+ /* We need this block to directly satisfy a request */
+ return 1;
+
+ if (s->syncing || s->expanding ||
+ (s->replacing && want_replace(sh, disk_idx)))
+ /* When syncing, or expanding we read everything.
+ * When replacing, we need the replaced block.
+ */
+ return 1;
+
+ if ((s->failed >= 1 && fdev[0]->toread) ||
+ (s->failed >= 2 && fdev[1]->toread))
+ /* If we want to read from a failed device, then
+ * we need to actually read every other device.
+ */
+ return 1;
+
+ /* Sometimes neither read-modify-write nor reconstruct-write
+ * cycles can work. In those cases we read every block we
+ * can. Then the parity-update is certain to have enough to
+ * work with.
+ * This can only be a problem when we need to write something,
+ * and some device has failed. If either of those tests
+ * fail we need look no further.
+ */
+ if (!s->failed || !s->to_write)
+ return 0;
+
+ if (test_bit(R5_Insync, &dev->flags) &&
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ /* Pre-reads at not permitted until after short delay
+ * to gather multiple requests. However if this
+ * device is no Insync, the block could only be be computed
+ * and there is no need to delay that.
+ */
+ return 0;
+
+ for (i = 0; i < s->failed; i++) {
+ if (fdev[i]->towrite &&
+ !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
+ !test_bit(R5_OVERWRITE, &fdev[i]->flags))
+ /* If we have a partial write to a failed
+ * device, then we will need to reconstruct
+ * the content of that device, so all other
+ * devices must be read.
+ */
+ return 1;
+ }
+
+ /* If we are forced to do a reconstruct-write, either because
+ * the current RAID6 implementation only supports that, or
+ * or because parity cannot be trusted and we are currently
+ * recovering it, there is extra need to be careful.
+ * If one of the devices that we would need to read, because
+ * it is not being overwritten (and maybe not written at all)
+ * is missing/faulty, then we need to read everything we can.
+ */
+ if (sh->raid_conf->level != 6 &&
+ sh->sector < sh->raid_conf->mddev->recovery_cp)
+ /* reconstruct-write isn't being forced */
+ return 0;
+ for (i = 0; i < s->failed; i++) {
+ if (!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
+ !test_bit(R5_OVERWRITE, &fdev[i]->flags))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
+ int disk_idx, int disks)
+{
+ struct r5dev *dev = &sh->dev[disk_idx];
/* is the data in this block needed, and can we get it? */
- if (!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) &&
- (dev->toread ||
- (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- s->syncing || s->expanding ||
- (s->replacing && want_replace(sh, disk_idx)) ||
- (s->failed >= 1 && fdev[0]->toread) ||
- (s->failed >= 2 && fdev[1]->toread) ||
- (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
- (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
- !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
- ((sh->raid_conf->level == 6 ||
- sh->sector >= sh->raid_conf->mddev->recovery_cp)
- && s->failed && s->to_write &&
- (s->to_write - s->non_overwrite <
- sh->raid_conf->raid_disks - sh->raid_conf->max_degraded) &&
- (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
+ if (need_this_block(sh, s, disk_idx, disks)) {
/* we would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync
*/
@@ -3195,6 +3263,11 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector,
rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
}
+
+ if (rcw > disks && rmw > disks &&
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ set_bit(STRIPE_DELAYED, &sh->state);
+
/* now if nothing is locked, and if we have enough data,
* we can start a write request
*/
@@ -4076,7 +4149,7 @@ static void activate_bit_delay(struct r5conf *conf,
}
}
-int md_raid5_congested(struct mddev *mddev, int bits)
+static int raid5_congested(struct mddev *mddev, int bits)
{
struct r5conf *conf = mddev->private;
@@ -4093,24 +4166,14 @@ int md_raid5_congested(struct mddev *mddev, int bits)
return 0;
}
-EXPORT_SYMBOL_GPL(md_raid5_congested);
-
-static int raid5_congested(void *data, int bits)
-{
- struct mddev *mddev = data;
-
- return mddev_congested(mddev, bits) ||
- md_raid5_congested(mddev, bits);
-}
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
-static int raid5_mergeable_bvec(struct request_queue *q,
+static int raid5_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
unsigned int chunk_sectors = mddev->chunk_sectors;
@@ -5291,11 +5354,14 @@ static void raid5d(struct md_thread *thread)
static ssize_t
raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
+ int ret = 0;
+ spin_lock(&mddev->lock);
+ conf = mddev->private;
if (conf)
- return sprintf(page, "%d\n", conf->max_nr_stripes);
- else
- return 0;
+ ret = sprintf(page, "%d\n", conf->max_nr_stripes);
+ spin_unlock(&mddev->lock);
+ return ret;
}
int
@@ -5334,21 +5400,25 @@ EXPORT_SYMBOL(raid5_set_cache_size);
static ssize_t
raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
unsigned long new;
int err;
if (len >= PAGE_SIZE)
return -EINVAL;
- if (!conf)
- return -ENODEV;
-
if (kstrtoul(page, 10, &new))
return -EINVAL;
- err = raid5_set_cache_size(mddev, new);
+ err = mddev_lock(mddev);
if (err)
return err;
- return len;
+ conf = mddev->private;
+ if (!conf)
+ err = -ENODEV;
+ else
+ err = raid5_set_cache_size(mddev, new);
+ mddev_unlock(mddev);
+
+ return err ?: len;
}
static struct md_sysfs_entry
@@ -5359,29 +5429,40 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
static ssize_t
raid5_show_preread_threshold(struct mddev *mddev, char *page)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
+ int ret = 0;
+ spin_lock(&mddev->lock);
+ conf = mddev->private;
if (conf)
- return sprintf(page, "%d\n", conf->bypass_threshold);
- else
- return 0;
+ ret = sprintf(page, "%d\n", conf->bypass_threshold);
+ spin_unlock(&mddev->lock);
+ return ret;
}
static ssize_t
raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
unsigned long new;
+ int err;
+
if (len >= PAGE_SIZE)
return -EINVAL;
- if (!conf)
- return -ENODEV;
-
if (kstrtoul(page, 10, &new))
return -EINVAL;
- if (new > conf->max_nr_stripes)
- return -EINVAL;
- conf->bypass_threshold = new;
- return len;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ conf = mddev->private;
+ if (!conf)
+ err = -ENODEV;
+ else if (new > conf->max_nr_stripes)
+ err = -EINVAL;
+ else
+ conf->bypass_threshold = new;
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry
@@ -5393,39 +5474,48 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
static ssize_t
raid5_show_skip_copy(struct mddev *mddev, char *page)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
+ int ret = 0;
+ spin_lock(&mddev->lock);
+ conf = mddev->private;
if (conf)
- return sprintf(page, "%d\n", conf->skip_copy);
- else
- return 0;
+ ret = sprintf(page, "%d\n", conf->skip_copy);
+ spin_unlock(&mddev->lock);
+ return ret;
}
static ssize_t
raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
unsigned long new;
+ int err;
+
if (len >= PAGE_SIZE)
return -EINVAL;
- if (!conf)
- return -ENODEV;
-
if (kstrtoul(page, 10, &new))
return -EINVAL;
new = !!new;
- if (new == conf->skip_copy)
- return len;
- mddev_suspend(mddev);
- conf->skip_copy = new;
- if (new)
- mddev->queue->backing_dev_info.capabilities |=
- BDI_CAP_STABLE_WRITES;
- else
- mddev->queue->backing_dev_info.capabilities &=
- ~BDI_CAP_STABLE_WRITES;
- mddev_resume(mddev);
- return len;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ conf = mddev->private;
+ if (!conf)
+ err = -ENODEV;
+ else if (new != conf->skip_copy) {
+ mddev_suspend(mddev);
+ conf->skip_copy = new;
+ if (new)
+ mddev->queue->backing_dev_info.capabilities |=
+ BDI_CAP_STABLE_WRITES;
+ else
+ mddev->queue->backing_dev_info.capabilities &=
+ ~BDI_CAP_STABLE_WRITES;
+ mddev_resume(mddev);
+ }
+ mddev_unlock(mddev);
+ return err ?: len;
}
static struct md_sysfs_entry
@@ -5449,11 +5539,14 @@ raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
static ssize_t
raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
+ int ret = 0;
+ spin_lock(&mddev->lock);
+ conf = mddev->private;
if (conf)
- return sprintf(page, "%d\n", conf->worker_cnt_per_group);
- else
- return 0;
+ ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
+ spin_unlock(&mddev->lock);
+ return ret;
}
static int alloc_thread_groups(struct r5conf *conf, int cnt,
@@ -5463,7 +5556,7 @@ static int alloc_thread_groups(struct r5conf *conf, int cnt,
static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
unsigned long new;
int err;
struct r5worker_group *new_groups, *old_groups;
@@ -5471,41 +5564,41 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
if (len >= PAGE_SIZE)
return -EINVAL;
- if (!conf)
- return -ENODEV;
-
if (kstrtoul(page, 10, &new))
return -EINVAL;
- if (new == conf->worker_cnt_per_group)
- return len;
-
- mddev_suspend(mddev);
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ conf = mddev->private;
+ if (!conf)
+ err = -ENODEV;
+ else if (new != conf->worker_cnt_per_group) {
+ mddev_suspend(mddev);
- old_groups = conf->worker_groups;
- if (old_groups)
- flush_workqueue(raid5_wq);
+ old_groups = conf->worker_groups;
+ if (old_groups)
+ flush_workqueue(raid5_wq);
- err = alloc_thread_groups(conf, new,
- &group_cnt, &worker_cnt_per_group,
- &new_groups);
- if (!err) {
- spin_lock_irq(&conf->device_lock);
- conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
- conf->worker_groups = new_groups;
- spin_unlock_irq(&conf->device_lock);
+ err = alloc_thread_groups(conf, new,
+ &group_cnt, &worker_cnt_per_group,
+ &new_groups);
+ if (!err) {
+ spin_lock_irq(&conf->device_lock);
+ conf->group_cnt = group_cnt;
+ conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_groups = new_groups;
+ spin_unlock_irq(&conf->device_lock);
- if (old_groups)
- kfree(old_groups[0].workers);
- kfree(old_groups);
+ if (old_groups)
+ kfree(old_groups[0].workers);
+ kfree(old_groups);
+ }
+ mddev_resume(mddev);
}
+ mddev_unlock(mddev);
- mddev_resume(mddev);
-
- if (err)
- return err;
- return len;
+ return err ?: len;
}
static struct md_sysfs_entry
@@ -6173,11 +6266,6 @@ static int run(struct mddev *mddev)
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
-
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = raid5_congested;
-
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
blk_queue_io_opt(mddev->queue, chunk_size *
@@ -6255,17 +6343,12 @@ abort:
return -EIO;
}
-static int stop(struct mddev *mddev)
+static void raid5_free(struct mddev *mddev, void *priv)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf = priv;
- md_unregister_thread(&mddev->thread);
- if (mddev->queue)
- mddev->queue->backing_dev_info.congested_fn = NULL;
free_conf(conf);
- mddev->private = NULL;
mddev->to_remove = &raid5_attrs_group;
- return 0;
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -7039,7 +7122,7 @@ static struct md_personality raid6_personality =
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
- .stop = stop,
+ .free = raid5_free,
.status = status,
.error_handler = error,
.hot_add_disk = raid5_add_disk,
@@ -7053,6 +7136,8 @@ static struct md_personality raid6_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
+ .congested = raid5_congested,
+ .mergeable_bvec = raid5_mergeable_bvec,
};
static struct md_personality raid5_personality =
{
@@ -7061,7 +7146,7 @@ static struct md_personality raid5_personality =
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
- .stop = stop,
+ .free = raid5_free,
.status = status,
.error_handler = error,
.hot_add_disk = raid5_add_disk,
@@ -7075,6 +7160,8 @@ static struct md_personality raid5_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
+ .congested = raid5_congested,
+ .mergeable_bvec = raid5_mergeable_bvec,
};
static struct md_personality raid4_personality =
@@ -7084,7 +7171,7 @@ static struct md_personality raid4_personality =
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
- .stop = stop,
+ .free = raid5_free,
.status = status,
.error_handler = error,
.hot_add_disk = raid5_add_disk,
@@ -7098,6 +7185,8 @@ static struct md_personality raid4_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
+ .congested = raid5_congested,
+ .mergeable_bvec = raid5_mergeable_bvec,
};
static int __init raid5_init(void)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index d59f5ca743cd..983e18a83db1 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -558,7 +558,6 @@ static inline int algorithm_is_DDF(int layout)
return layout >= 8 && layout <= 10;
}
-extern int md_raid5_congested(struct mddev *mddev, int bits);
extern void md_raid5_kick_device(struct r5conf *conf);
extern int raid5_set_cache_size(struct mddev *mddev, int size);
#endif
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index b85f88c8ddbd..21154dd87b0b 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -8,10 +8,6 @@ comment "common driver options"
config VIDEO_CX2341X
tristate
-config VIDEO_BTCX
- depends on PCI
- tristate
-
config VIDEO_TVEEPROM
tristate
depends on I2C
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index d208de3b7cc0..89b795df2cdd 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,5 +1,4 @@
obj-y += b2c2/ saa7146/ siano/
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
-obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o
diff --git a/drivers/media/common/btcx-risc.h b/drivers/media/common/btcx-risc.h
index f8bc6e8e7b51..03583ef90506 100644
--- a/drivers/media/common/btcx-risc.h
+++ b/drivers/media/common/btcx-risc.h
@@ -26,9 +26,3 @@ void btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips);
void btcx_calc_skips(int line, int width, int *maxy,
struct btcx_skiplist *skips, unsigned int *nskips,
const struct v4l2_clip *clips, unsigned int nclips);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index e4041f074909..686d3277dad1 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -68,13 +68,6 @@
#include "dvb_demux.h"
#include "dvb_net.h"
-static int dvb_net_debug;
-module_param(dvb_net_debug, int, 0444);
-MODULE_PARM_DESC(dvb_net_debug, "enable debug messages");
-
-#define dprintk(x...) do { if (dvb_net_debug) printk(x); } while (0)
-
-
static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
{
unsigned int j;
@@ -90,36 +83,9 @@ static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
#ifdef ULE_DEBUG
-#define MAC_ADDR_PRINTFMT "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"
-#define MAX_ADDR_PRINTFMT_ARGS(macap) (macap)[0],(macap)[1],(macap)[2],(macap)[3],(macap)[4],(macap)[5]
-
-#define isprint(c) ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9'))
-
-static void hexdump( const unsigned char *buf, unsigned short len )
+static void hexdump(const unsigned char *buf, unsigned short len)
{
- char str[80], octet[10];
- int ofs, i, l;
-
- for (ofs = 0; ofs < len; ofs += 16) {
- sprintf( str, "%03d: ", ofs );
-
- for (i = 0; i < 16; i++) {
- if ((i + ofs) < len)
- sprintf( octet, "%02x ", buf[ofs + i] );
- else
- strcpy( octet, " " );
-
- strcat( str, octet );
- }
- strcat( str, " " );
- l = strlen( str );
-
- for (i = 0; (i < 16) && ((i + ofs) < len); i++)
- str[l++] = isprint( buf[ofs + i] ) ? buf[ofs + i] : '.';
-
- str[l] = '\0';
- printk( KERN_WARNING "%s\n", str );
- }
+ print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true);
}
#endif
@@ -315,9 +281,9 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
return l; /* Stop extension header processing and discard SNDU. */
total_ext_len += l;
#ifdef ULE_DEBUG
- dprintk("handle_ule_extensions: ule_next_hdr=%p, ule_sndu_type=%i, "
- "l=%i, total_ext_len=%i\n", p->ule_next_hdr,
- (int) p->ule_sndu_type, l, total_ext_len);
+ pr_debug("ule_next_hdr=%p, ule_sndu_type=%i, l=%i, total_ext_len=%i\n",
+ p->ule_next_hdr, (int)p->ule_sndu_type,
+ l, total_ext_len);
#endif
} while (p->ule_sndu_type < ETH_P_802_3_MIN);
@@ -700,8 +666,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
if (drop) {
#ifdef ULE_DEBUG
- dprintk("Dropping SNDU: MAC destination address does not match: dest addr: "MAC_ADDR_PRINTFMT", dev addr: "MAC_ADDR_PRINTFMT"\n",
- MAX_ADDR_PRINTFMT_ARGS(priv->ule_skb->data), MAX_ADDR_PRINTFMT_ARGS(dev->dev_addr));
+ netdev_dbg(dev, "Dropping SNDU: MAC destination address does not match: dest addr: %pM, dev addr: %pM\n",
+ priv->ule_skb->data, dev->dev_addr);
#endif
dev_kfree_skb(priv->ule_skb);
goto sndu_done;
@@ -964,8 +930,7 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
(*secfilter)->filter_mask[10] = mac_mask[1];
(*secfilter)->filter_mask[11]=mac_mask[0];
- dprintk("%s: filter mac=%pM\n", dev->name, mac);
- dprintk("%s: filter mask=%pM\n", dev->name, mac_mask);
+ netdev_dbg(dev, "filter mac=%pM mask=%pM\n", mac, mac_mask);
return 0;
}
@@ -977,7 +942,7 @@ static int dvb_net_feed_start(struct net_device *dev)
struct dmx_demux *demux = priv->demux;
unsigned char *mac = (unsigned char *) dev->dev_addr;
- dprintk("%s: rx_mode %i\n", __func__, priv->rx_mode);
+ netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
mutex_lock(&priv->mutex);
if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
printk("%s: BUG %d\n", __func__, __LINE__);
@@ -987,7 +952,7 @@ static int dvb_net_feed_start(struct net_device *dev)
priv->tsfeed = NULL;
if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
- dprintk("%s: alloc secfeed\n", __func__);
+ netdev_dbg(dev, "alloc secfeed\n");
ret=demux->allocate_section_feed(demux, &priv->secfeed,
dvb_net_sec_callback);
if (ret<0) {
@@ -1005,38 +970,38 @@ static int dvb_net_feed_start(struct net_device *dev)
}
if (priv->rx_mode != RX_MODE_PROMISC) {
- dprintk("%s: set secfilter\n", __func__);
+ netdev_dbg(dev, "set secfilter\n");
dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_normal);
}
switch (priv->rx_mode) {
case RX_MODE_MULTI:
for (i = 0; i < priv->multi_num; i++) {
- dprintk("%s: set multi_secfilter[%d]\n", __func__, i);
+ netdev_dbg(dev, "set multi_secfilter[%d]\n", i);
dvb_net_filter_sec_set(dev, &priv->multi_secfilter[i],
priv->multi_macs[i], mask_normal);
}
break;
case RX_MODE_ALL_MULTI:
priv->multi_num=1;
- dprintk("%s: set multi_secfilter[0]\n", __func__);
+ netdev_dbg(dev, "set multi_secfilter[0]\n");
dvb_net_filter_sec_set(dev, &priv->multi_secfilter[0],
mac_allmulti, mask_allmulti);
break;
case RX_MODE_PROMISC:
priv->multi_num=0;
- dprintk("%s: set secfilter\n", __func__);
+ netdev_dbg(dev, "set secfilter\n");
dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_promisc);
break;
}
- dprintk("%s: start filtering\n", __func__);
+ netdev_dbg(dev, "start filtering\n");
priv->secfeed->start_filtering(priv->secfeed);
} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
struct timespec timeout = { 0, 10000000 }; // 10 msec
/* we have payloads encapsulated in TS */
- dprintk("%s: alloc tsfeed\n", __func__);
+ netdev_dbg(dev, "alloc tsfeed\n");
ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback);
if (ret < 0) {
printk("%s: could not allocate ts feed\n", dev->name);
@@ -1060,7 +1025,7 @@ static int dvb_net_feed_start(struct net_device *dev)
goto error;
}
- dprintk("%s: start filtering\n", __func__);
+ netdev_dbg(dev, "start filtering\n");
priv->tsfeed->start_filtering(priv->tsfeed);
} else
ret = -EINVAL;
@@ -1075,17 +1040,16 @@ static int dvb_net_feed_stop(struct net_device *dev)
struct dvb_net_priv *priv = netdev_priv(dev);
int i, ret = 0;
- dprintk("%s\n", __func__);
mutex_lock(&priv->mutex);
if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
if (priv->secfeed) {
if (priv->secfeed->is_filtering) {
- dprintk("%s: stop secfeed\n", __func__);
+ netdev_dbg(dev, "stop secfeed\n");
priv->secfeed->stop_filtering(priv->secfeed);
}
if (priv->secfilter) {
- dprintk("%s: release secfilter\n", __func__);
+ netdev_dbg(dev, "release secfilter\n");
priv->secfeed->release_filter(priv->secfeed,
priv->secfilter);
priv->secfilter=NULL;
@@ -1093,8 +1057,8 @@ static int dvb_net_feed_stop(struct net_device *dev)
for (i=0; i<priv->multi_num; i++) {
if (priv->multi_secfilter[i]) {
- dprintk("%s: release multi_filter[%d]\n",
- __func__, i);
+ netdev_dbg(dev, "release multi_filter[%d]\n",
+ i);
priv->secfeed->release_filter(priv->secfeed,
priv->multi_secfilter[i]);
priv->multi_secfilter[i] = NULL;
@@ -1108,7 +1072,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
if (priv->tsfeed) {
if (priv->tsfeed->is_filtering) {
- dprintk("%s: stop tsfeed\n", __func__);
+ netdev_dbg(dev, "stop tsfeed\n");
priv->tsfeed->stop_filtering(priv->tsfeed);
}
priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
@@ -1148,16 +1112,16 @@ static void wq_set_multicast_list (struct work_struct *work)
netif_addr_lock_bh(dev);
if (dev->flags & IFF_PROMISC) {
- dprintk("%s: promiscuous mode\n", dev->name);
+ netdev_dbg(dev, "promiscuous mode\n");
priv->rx_mode = RX_MODE_PROMISC;
} else if ((dev->flags & IFF_ALLMULTI)) {
- dprintk("%s: allmulti mode\n", dev->name);
+ netdev_dbg(dev, "allmulti mode\n");
priv->rx_mode = RX_MODE_ALL_MULTI;
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
- dprintk("%s: set_mc_list, %d entries\n",
- dev->name, netdev_mc_count(dev));
+ netdev_dbg(dev, "set_mc_list, %d entries\n",
+ netdev_mc_count(dev));
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 6c75418222e2..bb76727d924e 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -443,7 +443,8 @@ config DVB_CXD2820R
config DVB_RTL2830
tristate "Realtek RTL2830 DVB-T"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && I2C_MUX
+ select REGMAP
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
@@ -451,6 +452,7 @@ config DVB_RTL2830
config DVB_RTL2832
tristate "Realtek RTL2832 DVB-T"
depends on DVB_CORE && I2C && I2C_MUX
+ select REGMAP
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
diff --git a/drivers/media/dvb-frontends/au8522.h b/drivers/media/dvb-frontends/au8522.h
index 83fe9a615619..612251958855 100644
--- a/drivers/media/dvb-frontends/au8522.h
+++ b/drivers/media/dvb-frontends/au8522.h
@@ -91,8 +91,3 @@ enum au8522_audio_input {
};
#endif /* __AU8522_H__ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 61e31f2d2f71..8c6663b6399d 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -1263,7 +1263,8 @@ static int dib8000_agc_startup(struct dvb_frontend *fe)
struct dib8000_state *state = fe->demodulator_priv;
enum frontend_tune_state *tune_state = &state->tune_state;
int ret = 0;
- u16 reg, upd_demod_gain_period = 0x8000;
+ u16 reg;
+ u32 upd_demod_gain_period = 0x8000;
switch (*tune_state) {
case CT_AGC_START:
diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c
index d7b9d549156d..67c8e6df42e8 100644
--- a/drivers/media/dvb-frontends/hd29l2.c
+++ b/drivers/media/dvb-frontends/hd29l2.c
@@ -22,20 +22,24 @@
#include "hd29l2_priv.h"
+#define HD29L2_MAX_LEN (3)
+
/* write multiple registers */
static int hd29l2_wr_regs(struct hd29l2_priv *priv, u8 reg, u8 *val, int len)
{
int ret;
- u8 buf[2 + len];
+ u8 buf[2 + HD29L2_MAX_LEN];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg.i2c_addr,
.flags = 0,
- .len = sizeof(buf),
+ .len = 2 + len,
.buf = buf,
}
};
+ if (len > HD29L2_MAX_LEN)
+ return -EINVAL;
buf[0] = 0x00;
buf[1] = reg;
memcpy(&buf[2], val, len);
@@ -118,7 +122,7 @@ static int hd29l2_wr_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 val, u8 mask)
}
/* read single register with mask */
-int hd29l2_rd_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 *val, u8 mask)
+static int hd29l2_rd_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 5fd14f840ab0..99efeba3c31a 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1456,9 +1456,3 @@ MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.3");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 92c891a571ab..d08570af1c10 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -236,12 +236,13 @@ static inline int lgdt3305_mpeg_mode(struct lgdt3305_state *state,
return lgdt3305_set_reg_bit(state, LGDT3305_TP_CTRL_1, 5, mode);
}
-static int lgdt3305_mpeg_mode_polarity(struct lgdt3305_state *state,
- enum lgdt3305_tp_clock_edge edge,
- enum lgdt3305_tp_valid_polarity valid)
+static int lgdt3305_mpeg_mode_polarity(struct lgdt3305_state *state)
{
u8 val;
int ret;
+ enum lgdt3305_tp_clock_edge edge = state->cfg->tpclk_edge;
+ enum lgdt3305_tp_clock_mode mode = state->cfg->tpclk_mode;
+ enum lgdt3305_tp_valid_polarity valid = state->cfg->tpvalid_polarity;
lg_dbg("edge = %d, valid = %d\n", edge, valid);
@@ -253,6 +254,8 @@ static int lgdt3305_mpeg_mode_polarity(struct lgdt3305_state *state,
if (edge)
val |= 0x08;
+ if (mode)
+ val |= 0x40;
if (valid)
val |= 0x01;
@@ -740,9 +743,7 @@ static int lgdt3304_set_parameters(struct dvb_frontend *fe)
goto fail;
/* lgdt3305_mpeg_mode_polarity calls lgdt3305_soft_reset */
- ret = lgdt3305_mpeg_mode_polarity(state,
- state->cfg->tpclk_edge,
- state->cfg->tpvalid_polarity);
+ ret = lgdt3305_mpeg_mode_polarity(state);
fail:
return ret;
}
@@ -806,9 +807,7 @@ static int lgdt3305_set_parameters(struct dvb_frontend *fe)
goto fail;
/* lgdt3305_mpeg_mode_polarity calls lgdt3305_soft_reset */
- ret = lgdt3305_mpeg_mode_polarity(state,
- state->cfg->tpclk_edge,
- state->cfg->tpvalid_polarity);
+ ret = lgdt3305_mpeg_mode_polarity(state);
fail:
return ret;
}
@@ -1215,9 +1214,3 @@ MODULE_DESCRIPTION("LG Electronics LGDT3304/5 ATSC/QAM-B Demodulator Driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.2");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-frontends/lgdt3305.h b/drivers/media/dvb-frontends/lgdt3305.h
index d9ab556c1b27..9c03e530e01b 100644
--- a/drivers/media/dvb-frontends/lgdt3305.h
+++ b/drivers/media/dvb-frontends/lgdt3305.h
@@ -37,6 +37,11 @@ enum lgdt3305_tp_clock_edge {
LGDT3305_TPCLK_FALLING_EDGE = 1,
};
+enum lgdt3305_tp_clock_mode {
+ LGDT3305_TPCLK_GATED = 0,
+ LGDT3305_TPCLK_FIXED = 1,
+};
+
enum lgdt3305_tp_valid_polarity {
LGDT3305_TP_VALID_LOW = 0,
LGDT3305_TP_VALID_HIGH = 1,
@@ -70,6 +75,7 @@ struct lgdt3305_config {
enum lgdt3305_mpeg_mode mpeg_mode;
enum lgdt3305_tp_clock_edge tpclk_edge;
+ enum lgdt3305_tp_clock_mode tpclk_mode;
enum lgdt3305_tp_valid_polarity tpvalid_polarity;
enum lgdt_demod_chip_type demod_chip;
};
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index e046622df0e4..2e1a61893fc1 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -823,9 +823,3 @@ MODULE_AUTHOR("Wilson Michaels");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(lgdt330x_attach);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-frontends/lgdt330x.h b/drivers/media/dvb-frontends/lgdt330x.h
index ca0eab562e1e..8bb332219fc4 100644
--- a/drivers/media/dvb-frontends/lgdt330x.h
+++ b/drivers/media/dvb-frontends/lgdt330x.h
@@ -65,9 +65,3 @@ static inline struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config*
#endif // CONFIG_DVB_LGDT330X
#endif /* LGDT330X_H */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-frontends/lgdt330x_priv.h b/drivers/media/dvb-frontends/lgdt330x_priv.h
index 38c76695abfe..1922f09a02d0 100644
--- a/drivers/media/dvb-frontends/lgdt330x_priv.h
+++ b/drivers/media/dvb-frontends/lgdt330x_priv.h
@@ -69,9 +69,3 @@ enum I2C_REG {
};
#endif /* _LGDT330X_PRIV_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index e6f165a5b90d..8f54c39ca63f 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -22,10 +22,6 @@
#define NUM_LAYERS 3
-static int debug = 1;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-
enum mb86a20s_bandwidth {
MB86A20S_13SEG = 0,
MB86A20S_13SEG_PARTIAL = 1,
diff --git a/drivers/media/dvb-frontends/mn88472.h b/drivers/media/dvb-frontends/mn88472.h
index da4558bce60f..e4e0b80d3091 100644
--- a/drivers/media/dvb-frontends/mn88472.h
+++ b/drivers/media/dvb-frontends/mn88472.h
@@ -33,6 +33,12 @@ struct mn88472_config {
* DVB frontend.
*/
struct dvb_frontend **fe;
+
+ /*
+ * Xtal frequency.
+ * Hz
+ */
+ u32 xtal;
};
#endif
diff --git a/drivers/media/dvb-frontends/nxt200x.h b/drivers/media/dvb-frontends/nxt200x.h
index b518d545609e..e38d01fb6c2b 100644
--- a/drivers/media/dvb-frontends/nxt200x.h
+++ b/drivers/media/dvb-frontends/nxt200x.h
@@ -55,9 +55,3 @@ static inline struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* c
#endif // CONFIG_DVB_NXT200X
#endif /* NXT200X_H */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index 5ef921823c15..cbbd259eacfe 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -623,9 +623,3 @@ MODULE_AUTHOR("Trent Piepho");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(or51132_attach);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-frontends/or51132.h b/drivers/media/dvb-frontends/or51132.h
index 938958386cb1..cdb5be3c65d6 100644
--- a/drivers/media/dvb-frontends/or51132.h
+++ b/drivers/media/dvb-frontends/or51132.h
@@ -47,9 +47,3 @@ static inline struct dvb_frontend* or51132_attach(const struct or51132_config* c
#endif // CONFIG_DVB_OR51132
#endif // OR51132_H
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 50e8b63e5169..e1b8df62bd59 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -13,261 +13,154 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-
-/*
- * Driver implements own I2C-adapter for tuner I2C access. That's since chip
- * have unusual I2C-gate control which closes gate automatically after each
- * I2C transfer. Using own I2C adapter we can workaround that.
*/
#include "rtl2830_priv.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
-/* write multiple hardware registers */
-static int rtl2830_wr(struct rtl2830_priv *priv, u8 reg, const u8 *val, int len)
+/* Our regmap is bypassing I2C adapter lock, thus we do it! */
+static int rtl2830_bulk_write(struct i2c_client *client, unsigned int reg,
+ const void *val, size_t val_count)
{
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[1] = {
- {
- .addr = priv->cfg.i2c_addr,
- .flags = 0,
- .len = 1 + len,
- .buf = buf,
- }
- };
-
- if (1 + len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- buf[0] = reg;
- memcpy(&buf[1], val, len);
- ret = i2c_transfer(priv->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
+ i2c_lock_adapter(client->adapter);
+ ret = regmap_bulk_write(dev->regmap, reg, val, val_count);
+ i2c_unlock_adapter(client->adapter);
return ret;
}
-/* read multiple hardware registers */
-static int rtl2830_rd(struct rtl2830_priv *priv, u8 reg, u8 *val, int len)
+static int rtl2830_update_bits(struct i2c_client *client, unsigned int reg,
+ unsigned int mask, unsigned int val)
{
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- struct i2c_msg msg[2] = {
- {
- .addr = priv->cfg.i2c_addr,
- .flags = 0,
- .len = 1,
- .buf = &reg,
- }, {
- .addr = priv->cfg.i2c_addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = val,
- }
- };
- ret = i2c_transfer(priv->i2c, msg, 2);
- if (ret == 2) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
+ i2c_lock_adapter(client->adapter);
+ ret = regmap_update_bits(dev->regmap, reg, mask, val);
+ i2c_unlock_adapter(client->adapter);
return ret;
}
-/* write multiple registers */
-static int rtl2830_wr_regs(struct rtl2830_priv *priv, u16 reg, const u8 *val,
- int len)
+static int rtl2830_bulk_read(struct i2c_client *client, unsigned int reg,
+ void *val, size_t val_count)
{
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- u8 reg2 = (reg >> 0) & 0xff;
- u8 page = (reg >> 8) & 0xff;
-
- /* switch bank if needed */
- if (page != priv->page) {
- ret = rtl2830_wr(priv, 0x00, &page, 1);
- if (ret)
- return ret;
-
- priv->page = page;
- }
-
- return rtl2830_wr(priv, reg2, val, len);
-}
-
-/* read multiple registers */
-static int rtl2830_rd_regs(struct rtl2830_priv *priv, u16 reg, u8 *val, int len)
-{
- int ret;
- u8 reg2 = (reg >> 0) & 0xff;
- u8 page = (reg >> 8) & 0xff;
-
- /* switch bank if needed */
- if (page != priv->page) {
- ret = rtl2830_wr(priv, 0x00, &page, 1);
- if (ret)
- return ret;
-
- priv->page = page;
- }
- return rtl2830_rd(priv, reg2, val, len);
-}
-
-/* read single register */
-static int rtl2830_rd_reg(struct rtl2830_priv *priv, u16 reg, u8 *val)
-{
- return rtl2830_rd_regs(priv, reg, val, 1);
-}
-
-/* write single register with mask */
-static int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
-{
- int ret;
- u8 tmp;
-
- /* no need for read if whole reg is written */
- if (mask != 0xff) {
- ret = rtl2830_rd_regs(priv, reg, &tmp, 1);
- if (ret)
- return ret;
-
- val &= mask;
- tmp &= ~mask;
- val |= tmp;
- }
-
- return rtl2830_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register with mask */
-static int rtl2830_rd_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 *val, u8 mask)
-{
- int ret, i;
- u8 tmp;
-
- ret = rtl2830_rd_regs(priv, reg, &tmp, 1);
- if (ret)
- return ret;
-
- tmp &= mask;
-
- /* find position of the first bit */
- for (i = 0; i < 8; i++) {
- if ((mask >> i) & 0x01)
- break;
- }
- *val = tmp >> i;
-
- return 0;
+ i2c_lock_adapter(client->adapter);
+ ret = regmap_bulk_read(dev->regmap, reg, val, val_count);
+ i2c_unlock_adapter(client->adapter);
+ return ret;
}
static int rtl2830_init(struct dvb_frontend *fe)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
int ret, i;
struct rtl2830_reg_val_mask tab[] = {
- { 0x00d, 0x01, 0x03 },
- { 0x00d, 0x10, 0x10 },
- { 0x104, 0x00, 0x1e },
- { 0x105, 0x80, 0x80 },
- { 0x110, 0x02, 0x03 },
- { 0x110, 0x08, 0x0c },
- { 0x17b, 0x00, 0x40 },
- { 0x17d, 0x05, 0x0f },
- { 0x17d, 0x50, 0xf0 },
- { 0x18c, 0x08, 0x0f },
- { 0x18d, 0x00, 0xc0 },
- { 0x188, 0x05, 0x0f },
- { 0x189, 0x00, 0xfc },
- { 0x2d5, 0x02, 0x02 },
- { 0x2f1, 0x02, 0x06 },
- { 0x2f1, 0x20, 0xf8 },
- { 0x16d, 0x00, 0x01 },
- { 0x1a6, 0x00, 0x80 },
- { 0x106, priv->cfg.vtop, 0x3f },
- { 0x107, priv->cfg.krf, 0x3f },
- { 0x112, 0x28, 0xff },
- { 0x103, priv->cfg.agc_targ_val, 0xff },
- { 0x00a, 0x02, 0x07 },
- { 0x140, 0x0c, 0x3c },
- { 0x140, 0x40, 0xc0 },
- { 0x15b, 0x05, 0x07 },
- { 0x15b, 0x28, 0x38 },
- { 0x15c, 0x05, 0x07 },
- { 0x15c, 0x28, 0x38 },
- { 0x115, priv->cfg.spec_inv, 0x01 },
- { 0x16f, 0x01, 0x07 },
- { 0x170, 0x18, 0x38 },
- { 0x172, 0x0f, 0x0f },
- { 0x173, 0x08, 0x38 },
- { 0x175, 0x01, 0x07 },
- { 0x176, 0x00, 0xc0 },
+ {0x00d, 0x01, 0x03},
+ {0x00d, 0x10, 0x10},
+ {0x104, 0x00, 0x1e},
+ {0x105, 0x80, 0x80},
+ {0x110, 0x02, 0x03},
+ {0x110, 0x08, 0x0c},
+ {0x17b, 0x00, 0x40},
+ {0x17d, 0x05, 0x0f},
+ {0x17d, 0x50, 0xf0},
+ {0x18c, 0x08, 0x0f},
+ {0x18d, 0x00, 0xc0},
+ {0x188, 0x05, 0x0f},
+ {0x189, 0x00, 0xfc},
+ {0x2d5, 0x02, 0x02},
+ {0x2f1, 0x02, 0x06},
+ {0x2f1, 0x20, 0xf8},
+ {0x16d, 0x00, 0x01},
+ {0x1a6, 0x00, 0x80},
+ {0x106, dev->pdata->vtop, 0x3f},
+ {0x107, dev->pdata->krf, 0x3f},
+ {0x112, 0x28, 0xff},
+ {0x103, dev->pdata->agc_targ_val, 0xff},
+ {0x00a, 0x02, 0x07},
+ {0x140, 0x0c, 0x3c},
+ {0x140, 0x40, 0xc0},
+ {0x15b, 0x05, 0x07},
+ {0x15b, 0x28, 0x38},
+ {0x15c, 0x05, 0x07},
+ {0x15c, 0x28, 0x38},
+ {0x115, dev->pdata->spec_inv, 0x01},
+ {0x16f, 0x01, 0x07},
+ {0x170, 0x18, 0x38},
+ {0x172, 0x0f, 0x0f},
+ {0x173, 0x08, 0x38},
+ {0x175, 0x01, 0x07},
+ {0x176, 0x00, 0xc0},
};
for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = rtl2830_wr_reg_mask(priv, tab[i].reg, tab[i].val,
- tab[i].mask);
+ ret = rtl2830_update_bits(client, tab[i].reg, tab[i].mask,
+ tab[i].val);
if (ret)
goto err;
}
- ret = rtl2830_wr_regs(priv, 0x18f, "\x28\x00", 2);
+ ret = rtl2830_bulk_write(client, 0x18f, "\x28\x00", 2);
if (ret)
goto err;
- ret = rtl2830_wr_regs(priv, 0x195,
- "\x04\x06\x0a\x12\x0a\x12\x1e\x28", 8);
+ ret = rtl2830_bulk_write(client, 0x195,
+ "\x04\x06\x0a\x12\x0a\x12\x1e\x28", 8);
if (ret)
goto err;
/* TODO: spec init */
/* soft reset */
- ret = rtl2830_wr_reg_mask(priv, 0x101, 0x04, 0x04);
+ ret = rtl2830_update_bits(client, 0x101, 0x04, 0x04);
if (ret)
goto err;
- ret = rtl2830_wr_reg_mask(priv, 0x101, 0x00, 0x04);
+ ret = rtl2830_update_bits(client, 0x101, 0x04, 0x00);
if (ret)
goto err;
- priv->sleeping = false;
+ /* init stats here in order signal app which stats are supported */
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ /* start statistics polling */
+ schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
+
+ dev->sleeping = false;
return ret;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2830_sleep(struct dvb_frontend *fe)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
- priv->sleeping = true;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
+
+ dev->sleeping = true;
+ /* stop statistics polling */
+ cancel_delayed_work_sync(&dev->stat_work);
+ dev->fe_status = 0;
+
return 0;
}
static int rtl2830_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *s)
+ struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 500;
s->step_size = fe->ops.info.frequency_stepsize * 2;
@@ -278,11 +171,12 @@ static int rtl2830_get_tune_settings(struct dvb_frontend *fe,
static int rtl2830_set_frontend(struct dvb_frontend *fe)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u64 num;
- u8 buf[3], tmp;
+ u8 buf[3], u8tmp;
u32 if_ctl, if_frequency;
static const u8 bw_params1[3][34] = {
{
@@ -308,9 +202,8 @@ static int rtl2830_set_frontend(struct dvb_frontend *fe)
{0xae, 0xba, 0xf3, 0x26, 0x66, 0x64}, /* 8 MHz */
};
- dev_dbg(&priv->i2c->dev,
- "%s: frequency=%d bandwidth_hz=%d inversion=%d\n",
- __func__, c->frequency, c->bandwidth_hz, c->inversion);
+ dev_dbg(&client->dev, "frequency=%u bandwidth_hz=%u inversion=%u\n",
+ c->frequency, c->bandwidth_hz, c->inversion);
/* program tuner */
if (fe->ops.tuner_ops.set_params)
@@ -327,11 +220,12 @@ static int rtl2830_set_frontend(struct dvb_frontend *fe)
i = 2;
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid bandwidth\n", __func__);
+ dev_err(&client->dev, "invalid bandwidth_hz %u\n",
+ c->bandwidth_hz);
return -EINVAL;
}
- ret = rtl2830_wr_reg_mask(priv, 0x008, i << 1, 0x06);
+ ret = rtl2830_update_bits(client, 0x008, 0x06, i << 1);
if (ret)
goto err;
@@ -340,70 +234,71 @@ static int rtl2830_set_frontend(struct dvb_frontend *fe)
ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
else
ret = -EINVAL;
-
- if (ret < 0)
+ if (ret)
goto err;
- num = if_frequency % priv->cfg.xtal;
+ num = if_frequency % dev->pdata->clk;
num *= 0x400000;
- num = div_u64(num, priv->cfg.xtal);
+ num = div_u64(num, dev->pdata->clk);
num = -num;
if_ctl = num & 0x3fffff;
- dev_dbg(&priv->i2c->dev, "%s: if_frequency=%d if_ctl=%08x\n",
- __func__, if_frequency, if_ctl);
+ dev_dbg(&client->dev, "if_frequency=%d if_ctl=%08x\n",
+ if_frequency, if_ctl);
- ret = rtl2830_rd_reg_mask(priv, 0x119, &tmp, 0xc0); /* b[7:6] */
+ buf[0] = (if_ctl >> 16) & 0x3f;
+ buf[1] = (if_ctl >> 8) & 0xff;
+ buf[2] = (if_ctl >> 0) & 0xff;
+
+ ret = rtl2830_bulk_read(client, 0x119, &u8tmp, 1);
if (ret)
goto err;
- buf[0] = tmp << 6;
- buf[0] |= (if_ctl >> 16) & 0x3f;
- buf[1] = (if_ctl >> 8) & 0xff;
- buf[2] = (if_ctl >> 0) & 0xff;
+ buf[0] |= u8tmp & 0xc0; /* [7:6] */
- ret = rtl2830_wr_regs(priv, 0x119, buf, 3);
+ ret = rtl2830_bulk_write(client, 0x119, buf, 3);
if (ret)
goto err;
/* 1/2 split I2C write */
- ret = rtl2830_wr_regs(priv, 0x11c, &bw_params1[i][0], 17);
+ ret = rtl2830_bulk_write(client, 0x11c, &bw_params1[i][0], 17);
if (ret)
goto err;
/* 2/2 split I2C write */
- ret = rtl2830_wr_regs(priv, 0x12d, &bw_params1[i][17], 17);
+ ret = rtl2830_bulk_write(client, 0x12d, &bw_params1[i][17], 17);
if (ret)
goto err;
- ret = rtl2830_wr_regs(priv, 0x19d, bw_params2[i], 6);
+ ret = rtl2830_bulk_write(client, 0x19d, bw_params2[i], 6);
if (ret)
goto err;
return ret;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2830_get_frontend(struct dvb_frontend *fe)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[3];
- if (priv->sleeping)
+ if (dev->sleeping)
return 0;
- ret = rtl2830_rd_regs(priv, 0x33c, buf, 2);
+ ret = rtl2830_bulk_read(client, 0x33c, buf, 2);
if (ret)
goto err;
- ret = rtl2830_rd_reg(priv, 0x351, &buf[2]);
+ ret = rtl2830_bulk_read(client, 0x351, &buf[2], 1);
if (ret)
goto err;
- dev_dbg(&priv->i2c->dev, "%s: TPS=%*ph\n", __func__, 3, buf);
+ dev_dbg(&client->dev, "TPS=%*ph\n", 3, buf);
switch ((buf[0] >> 2) & 3) {
case 0:
@@ -493,280 +388,543 @@ static int rtl2830_get_frontend(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2830_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- u8 tmp;
+ u8 u8tmp;
+
*status = 0;
- if (priv->sleeping)
+ if (dev->sleeping)
return 0;
- ret = rtl2830_rd_reg_mask(priv, 0x351, &tmp, 0x78); /* [6:3] */
+ ret = rtl2830_bulk_read(client, 0x351, &u8tmp, 1);
if (ret)
goto err;
- if (tmp == 11) {
+ u8tmp = (u8tmp >> 3) & 0x0f; /* [6:3] */
+ if (u8tmp == 11) {
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
- } else if (tmp == 10) {
+ } else if (u8tmp == 10) {
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
}
+ dev->fe_status = *status;
+
return ret;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2830_read_snr(struct dvb_frontend *fe, u16 *snr)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
- int ret, hierarchy, constellation;
- u8 buf[2], tmp;
- u16 tmp16;
-#define CONSTELLATION_NUM 3
-#define HIERARCHY_NUM 4
- static const u32 snr_constant[CONSTELLATION_NUM][HIERARCHY_NUM] = {
- { 70705899, 70705899, 70705899, 70705899 },
- { 82433173, 82433173, 87483115, 94445660 },
- { 92888734, 92888734, 95487525, 99770748 },
- };
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- if (priv->sleeping)
- return 0;
+ if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+ *snr = div_s64(c->cnr.stat[0].svalue, 100);
+ else
+ *snr = 0;
- /* reports SNR in resolution of 0.1 dB */
+ return 0;
+}
- ret = rtl2830_rd_reg(priv, 0x33c, &tmp);
- if (ret)
- goto err;
+static int rtl2830_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
- constellation = (tmp >> 2) & 0x03; /* [3:2] */
- if (constellation > CONSTELLATION_NUM - 1)
- goto err;
+ *ber = (dev->post_bit_error - dev->post_bit_error_prev);
+ dev->post_bit_error_prev = dev->post_bit_error;
- hierarchy = (tmp >> 4) & 0x07; /* [6:4] */
- if (hierarchy > HIERARCHY_NUM - 1)
- goto err;
+ return 0;
+}
- ret = rtl2830_rd_regs(priv, 0x40c, buf, 2);
- if (ret)
- goto err;
+static int rtl2830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ *ucblocks = 0;
- tmp16 = buf[0] << 8 | buf[1];
+ return 0;
+}
+
+static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- if (tmp16)
- *snr = (snr_constant[constellation][hierarchy] -
- intlog10(tmp16)) / ((1 << 24) / 100);
+ if (c->strength.stat[0].scale == FE_SCALE_RELATIVE)
+ *strength = c->strength.stat[0].uvalue;
else
- *snr = 0;
+ *strength = 0;
return 0;
+}
+
+static struct dvb_frontend_ops rtl2830_ops = {
+ .delsys = {SYS_DVBT},
+ .info = {
+ .name = "Realtek RTL2830 (DVB-T)",
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_RECOVER |
+ FE_CAN_MUTE_TS
+ },
+
+ .init = rtl2830_init,
+ .sleep = rtl2830_sleep,
+
+ .get_tune_settings = rtl2830_get_tune_settings,
+
+ .set_frontend = rtl2830_set_frontend,
+ .get_frontend = rtl2830_get_frontend,
+
+ .read_status = rtl2830_read_status,
+ .read_snr = rtl2830_read_snr,
+ .read_ber = rtl2830_read_ber,
+ .read_ucblocks = rtl2830_read_ucblocks,
+ .read_signal_strength = rtl2830_read_signal_strength,
+};
+
+static void rtl2830_stat_work(struct work_struct *work)
+{
+ struct rtl2830_dev *dev = container_of(work, struct rtl2830_dev, stat_work.work);
+ struct i2c_client *client = dev->client;
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
+ int ret, tmp;
+ u8 u8tmp, buf[2];
+ u16 u16tmp;
+
+ dev_dbg(&client->dev, "\n");
+
+ /* signal strength */
+ if (dev->fe_status & FE_HAS_SIGNAL) {
+ struct {signed int x:14; } s;
+
+ /* read IF AGC */
+ ret = rtl2830_bulk_read(client, 0x359, buf, 2);
+ if (ret)
+ goto err;
+
+ u16tmp = buf[0] << 8 | buf[1] << 0;
+ u16tmp &= 0x3fff; /* [13:0] */
+ tmp = s.x = u16tmp; /* 14-bit bin to 2 complement */
+ u16tmp = clamp_val(-4 * tmp + 32767, 0x0000, 0xffff);
+
+ dev_dbg(&client->dev, "IF AGC=%d\n", tmp);
+
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = u16tmp;
+ } else {
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* CNR */
+ if (dev->fe_status & FE_HAS_VITERBI) {
+ unsigned hierarchy, constellation;
+ #define CONSTELLATION_NUM 3
+ #define HIERARCHY_NUM 4
+ static const u32 constant[CONSTELLATION_NUM][HIERARCHY_NUM] = {
+ {70705899, 70705899, 70705899, 70705899},
+ {82433173, 82433173, 87483115, 94445660},
+ {92888734, 92888734, 95487525, 99770748},
+ };
+
+ ret = rtl2830_bulk_read(client, 0x33c, &u8tmp, 1);
+ if (ret)
+ goto err;
+
+ constellation = (u8tmp >> 2) & 0x03; /* [3:2] */
+ if (constellation > CONSTELLATION_NUM - 1)
+ goto err_schedule_delayed_work;
+
+ hierarchy = (u8tmp >> 4) & 0x07; /* [6:4] */
+ if (hierarchy > HIERARCHY_NUM - 1)
+ goto err_schedule_delayed_work;
+
+ ret = rtl2830_bulk_read(client, 0x40c, buf, 2);
+ if (ret)
+ goto err;
+
+ u16tmp = buf[0] << 8 | buf[1] << 0;
+ if (u16tmp)
+ tmp = (constant[constellation][hierarchy] -
+ intlog10(u16tmp)) / ((1 << 24) / 10000);
+ else
+ tmp = 0;
+
+ dev_dbg(&client->dev, "CNR raw=%u\n", u16tmp);
+
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = tmp;
+ } else {
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* BER */
+ if (dev->fe_status & FE_HAS_LOCK) {
+ ret = rtl2830_bulk_read(client, 0x34e, buf, 2);
+ if (ret)
+ goto err;
+
+ u16tmp = buf[0] << 8 | buf[1] << 0;
+ dev->post_bit_error += u16tmp;
+ dev->post_bit_count += 1000000;
+
+ dev_dbg(&client->dev, "BER errors=%u total=1000000\n", u16tmp);
+
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = dev->post_bit_count;
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+err_schedule_delayed_work:
+ schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
+ return;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
+ dev_dbg(&client->dev, "failed=%d\n", ret);
}
-static int rtl2830_read_ber(struct dvb_frontend *fe, u32 *ber)
+static int rtl2830_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
int ret;
- u8 buf[2];
+ u8 u8tmp;
- if (priv->sleeping)
- return 0;
+ dev_dbg(&client->dev, "onoff=%d\n", onoff);
+
+ /* enable / disable PID filter */
+ if (onoff)
+ u8tmp = 0x80;
+ else
+ u8tmp = 0x00;
- ret = rtl2830_rd_regs(priv, 0x34e, buf, 2);
+ ret = rtl2830_update_bits(client, 0x061, 0x80, u8tmp);
if (ret)
goto err;
- *ber = buf[0] << 8 | buf[1];
-
return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static int rtl2830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int onoff)
{
- *ucblocks = 0;
- return 0;
-}
-
-static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
-{
- struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- u8 buf[2];
- u16 if_agc_raw, if_agc;
+ u8 buf[4];
- if (priv->sleeping)
+ dev_dbg(&client->dev, "index=%d pid=%04x onoff=%d\n",
+ index, pid, onoff);
+
+ /* skip invalid PIDs (0x2000) */
+ if (pid > 0x1fff || index > 32)
return 0;
- ret = rtl2830_rd_regs(priv, 0x359, buf, 2);
+ if (onoff)
+ set_bit(index, &dev->filters);
+ else
+ clear_bit(index, &dev->filters);
+
+ /* enable / disable PIDs */
+ buf[0] = (dev->filters >> 0) & 0xff;
+ buf[1] = (dev->filters >> 8) & 0xff;
+ buf[2] = (dev->filters >> 16) & 0xff;
+ buf[3] = (dev->filters >> 24) & 0xff;
+ ret = rtl2830_bulk_write(client, 0x062, buf, 4);
if (ret)
goto err;
- if_agc_raw = (buf[0] << 8 | buf[1]) & 0x3fff;
-
- if (if_agc_raw & (1 << 9))
- if_agc = -(~(if_agc_raw - 1) & 0x1ff);
- else
- if_agc = if_agc_raw;
-
- *strength = (u8) (55 - if_agc / 182);
- *strength |= *strength << 8;
+ /* add PID */
+ buf[0] = (pid >> 8) & 0xff;
+ buf[1] = (pid >> 0) & 0xff;
+ ret = rtl2830_bulk_write(client, 0x066 + 2 * index, buf, 2);
+ if (ret)
+ goto err;
return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static struct dvb_frontend_ops rtl2830_ops;
-
-static u32 rtl2830_tuner_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C;
-}
-
-static int rtl2830_tuner_i2c_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg msg[], int num)
+/*
+ * I2C gate/mux/repeater logic
+ * We must use unlocked __i2c_transfer() here (through regmap) because of I2C
+ * adapter lock is already taken by tuner driver.
+ * Gate is closed automatically after single I2C transfer.
+ */
+static int rtl2830_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id)
{
- struct rtl2830_priv *priv = i2c_get_adapdata(i2c_adap);
+ struct i2c_client *client = mux_priv;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- /* open i2c-gate */
- ret = rtl2830_wr_reg_mask(priv, 0x101, 0x08, 0x08);
+ dev_dbg(&client->dev, "\n");
+
+ /* open I2C repeater for 1 transfer, closes automatically */
+ /* XXX: regmap_update_bits() does not lock I2C adapter */
+ ret = regmap_update_bits(dev->regmap, 0x101, 0x08, 0x08);
if (ret)
goto err;
- ret = i2c_transfer(priv->i2c, msg, num);
- if (ret < 0)
- dev_warn(&priv->i2c->dev, "%s: tuner i2c failed=%d\n",
- KBUILD_MODNAME, ret);
-
- return ret;
+ return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static struct i2c_algorithm rtl2830_tuner_i2c_algo = {
- .master_xfer = rtl2830_tuner_i2c_xfer,
- .functionality = rtl2830_tuner_i2c_func,
-};
+static struct dvb_frontend *rtl2830_get_dvb_frontend(struct i2c_client *client)
+{
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
-struct i2c_adapter *rtl2830_get_tuner_i2c_adapter(struct dvb_frontend *fe)
+ dev_dbg(&client->dev, "\n");
+
+ return &dev->fe;
+}
+
+static struct i2c_adapter *rtl2830_get_i2c_adapter(struct i2c_client *client)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
- return &priv->tuner_i2c_adapter;
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ return dev->adapter;
}
-EXPORT_SYMBOL(rtl2830_get_tuner_i2c_adapter);
-static void rtl2830_release(struct dvb_frontend *fe)
+/*
+ * We implement own I2C access routines for regmap in order to get manual access
+ * to I2C adapter lock, which is needed for I2C mux adapter.
+ */
+static int rtl2830_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
{
- struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = context;
+ int ret;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = reg_size,
+ .buf = (u8 *)reg_buf,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = val_size,
+ .buf = val_buf,
+ }
+ };
- i2c_del_adapter(&priv->tuner_i2c_adapter);
- kfree(priv);
+ ret = __i2c_transfer(client->adapter, msg, 2);
+ if (ret != 2) {
+ dev_warn(&client->dev, "i2c reg read failed %d\n", ret);
+ if (ret >= 0)
+ ret = -EREMOTEIO;
+ return ret;
+ }
+ return 0;
}
-struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg,
- struct i2c_adapter *i2c)
+static int rtl2830_regmap_write(void *context, const void *data, size_t count)
{
- struct rtl2830_priv *priv = NULL;
- int ret = 0;
- u8 tmp;
+ struct i2c_client *client = context;
+ int ret;
+ struct i2c_msg msg[1] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = count,
+ .buf = (u8 *)data,
+ }
+ };
+
+ ret = __i2c_transfer(client->adapter, msg, 1);
+ if (ret != 1) {
+ dev_warn(&client->dev, "i2c reg write failed %d\n", ret);
+ if (ret >= 0)
+ ret = -EREMOTEIO;
+ return ret;
+ }
+ return 0;
+}
+
+static int rtl2830_regmap_gather_write(void *context, const void *reg,
+ size_t reg_len, const void *val,
+ size_t val_len)
+{
+ struct i2c_client *client = context;
+ int ret;
+ u8 buf[256];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1 + val_len,
+ .buf = buf,
+ }
+ };
+
+ buf[0] = *(u8 const *)reg;
+ memcpy(&buf[1], val, val_len);
+
+ ret = __i2c_transfer(client->adapter, msg, 1);
+ if (ret != 1) {
+ dev_warn(&client->dev, "i2c reg write failed %d\n", ret);
+ if (ret >= 0)
+ ret = -EREMOTEIO;
+ return ret;
+ }
+ return 0;
+}
+
+static int rtl2830_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rtl2830_platform_data *pdata = client->dev.platform_data;
+ struct rtl2830_dev *dev;
+ int ret;
+ u8 u8tmp;
+ static const struct regmap_bus regmap_bus = {
+ .read = rtl2830_regmap_read,
+ .write = rtl2830_regmap_write,
+ .gather_write = rtl2830_regmap_gather_write,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ };
+ static const struct regmap_range_cfg regmap_range_cfg[] = {
+ {
+ .selector_reg = 0x00,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ .range_min = 0 * 0x100,
+ .range_max = 5 * 0x100,
+ },
+ };
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 5 * 0x100,
+ .ranges = regmap_range_cfg,
+ .num_ranges = ARRAY_SIZE(regmap_range_cfg),
+ };
+
+ dev_dbg(&client->dev, "\n");
+
+ if (pdata == NULL) {
+ ret = -EINVAL;
+ goto err;
+ }
/* allocate memory for the internal state */
- priv = kzalloc(sizeof(struct rtl2830_priv), GFP_KERNEL);
- if (priv == NULL)
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ ret = -ENOMEM;
goto err;
+ }
- /* setup the priv */
- priv->i2c = i2c;
- memcpy(&priv->cfg, cfg, sizeof(struct rtl2830_config));
+ /* setup the state */
+ i2c_set_clientdata(client, dev);
+ dev->client = client;
+ dev->pdata = client->dev.platform_data;
+ dev->sleeping = true;
+ INIT_DELAYED_WORK(&dev->stat_work, rtl2830_stat_work);
+ dev->regmap = regmap_init(&client->dev, &regmap_bus, client,
+ &regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ goto err_kfree;
+ }
/* check if the demod is there */
- ret = rtl2830_rd_reg(priv, 0x000, &tmp);
+ ret = rtl2830_bulk_read(client, 0x000, &u8tmp, 1);
if (ret)
- goto err;
-
- /* create dvb_frontend */
- memcpy(&priv->fe.ops, &rtl2830_ops, sizeof(struct dvb_frontend_ops));
- priv->fe.demodulator_priv = priv;
-
- /* create tuner i2c adapter */
- strlcpy(priv->tuner_i2c_adapter.name, "RTL2830 tuner I2C adapter",
- sizeof(priv->tuner_i2c_adapter.name));
- priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo;
- priv->tuner_i2c_adapter.algo_data = NULL;
- priv->tuner_i2c_adapter.dev.parent = &i2c->dev;
- i2c_set_adapdata(&priv->tuner_i2c_adapter, priv);
- if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) {
- dev_err(&i2c->dev,
- "%s: tuner i2c bus could not be initialized\n",
- KBUILD_MODNAME);
- goto err;
+ goto err_regmap_exit;
+
+ /* create muxed i2c adapter for tuner */
+ dev->adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
+ client, 0, 0, 0, rtl2830_select, NULL);
+ if (dev->adapter == NULL) {
+ ret = -ENODEV;
+ goto err_regmap_exit;
}
- priv->sleeping = true;
+ /* create dvb frontend */
+ memcpy(&dev->fe.ops, &rtl2830_ops, sizeof(dev->fe.ops));
+ dev->fe.demodulator_priv = client;
+
+ /* setup callbacks */
+ pdata->get_dvb_frontend = rtl2830_get_dvb_frontend;
+ pdata->get_i2c_adapter = rtl2830_get_i2c_adapter;
+ pdata->pid_filter = rtl2830_pid_filter;
+ pdata->pid_filter_ctrl = rtl2830_pid_filter_ctrl;
- return &priv->fe;
+ dev_info(&client->dev, "Realtek RTL2830 successfully attached\n");
+
+ return 0;
+err_regmap_exit:
+ regmap_exit(dev->regmap);
+err_kfree:
+ kfree(dev);
err:
- dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
- kfree(priv);
- return NULL;
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
-EXPORT_SYMBOL(rtl2830_attach);
-static struct dvb_frontend_ops rtl2830_ops = {
- .delsys = { SYS_DVBT },
- .info = {
- .name = "Realtek RTL2830 (DVB-T)",
- .caps = FE_CAN_FEC_1_2 |
- FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 |
- FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK |
- FE_CAN_QAM_16 |
- FE_CAN_QAM_64 |
- FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO |
- FE_CAN_RECOVER |
- FE_CAN_MUTE_TS
- },
+static int rtl2830_remove(struct i2c_client *client)
+{
+ struct rtl2830_dev *dev = i2c_get_clientdata(client);
- .release = rtl2830_release,
+ dev_dbg(&client->dev, "\n");
- .init = rtl2830_init,
- .sleep = rtl2830_sleep,
+ i2c_del_mux_adapter(dev->adapter);
+ regmap_exit(dev->regmap);
+ kfree(dev);
- .get_tune_settings = rtl2830_get_tune_settings,
+ return 0;
+}
- .set_frontend = rtl2830_set_frontend,
- .get_frontend = rtl2830_get_frontend,
+static const struct i2c_device_id rtl2830_id_table[] = {
+ {"rtl2830", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, rtl2830_id_table);
- .read_status = rtl2830_read_status,
- .read_snr = rtl2830_read_snr,
- .read_ber = rtl2830_read_ber,
- .read_ucblocks = rtl2830_read_ucblocks,
- .read_signal_strength = rtl2830_read_signal_strength,
+static struct i2c_driver rtl2830_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rtl2830",
+ },
+ .probe = rtl2830_probe,
+ .remove = rtl2830_remove,
+ .id_table = rtl2830_id_table,
};
+module_i2c_driver(rtl2830_driver);
+
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Realtek RTL2830 DVB-T demodulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/rtl2830.h b/drivers/media/dvb-frontends/rtl2830.h
index 3313847fb0be..0cde151e6608 100644
--- a/drivers/media/dvb-frontends/rtl2830.h
+++ b/drivers/media/dvb-frontends/rtl2830.h
@@ -13,78 +13,37 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef RTL2830_H
#define RTL2830_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
-struct rtl2830_config {
- /*
- * Demodulator I2C address.
- */
- u8 i2c_addr;
-
- /*
- * Xtal frequency.
- * Hz
- * 4000000, 16000000, 25000000, 28800000
- */
- u32 xtal;
-
- /*
- * TS output mode.
- */
- u8 ts_mode;
+/**
+ * struct rtl2830_platform_data - Platform data for the rtl2830 driver
+ * @clk: Clock frequency (4000000, 16000000, 25000000, 28800000).
+ * @spec_inv: Spectrum inversion.
+ * @vtop: AGC take-over point.
+ * @krf: AGC ratio.
+ * @agc_targ_val: AGC.
+ * @get_dvb_frontend: Get DVB frontend.
+ * @get_i2c_adapter: Get I2C adapter.
+ * @pid_filter: Set PID to PID filter.
+ * @pid_filter_ctrl: Control PID filter.
+ */
- /*
- * Spectrum inversion.
- */
+struct rtl2830_platform_data {
+ u32 clk;
bool spec_inv;
-
- /*
- */
u8 vtop;
-
- /*
- */
u8 krf;
-
- /*
- */
u8 agc_targ_val;
-};
-
-#if IS_ENABLED(CONFIG_DVB_RTL2830)
-extern struct dvb_frontend *rtl2830_attach(
- const struct rtl2830_config *config,
- struct i2c_adapter *i2c
-);
-extern struct i2c_adapter *rtl2830_get_tuner_i2c_adapter(
- struct dvb_frontend *fe
-);
-#else
-static inline struct dvb_frontend *rtl2830_attach(
- const struct rtl2830_config *config,
- struct i2c_adapter *i2c
-)
-{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-
-static inline struct i2c_adapter *rtl2830_get_tuner_i2c_adapter(
- struct dvb_frontend *fe
-)
-{
- return NULL;
-}
-#endif
+ struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
+ struct i2c_adapter* (*get_i2c_adapter)(struct i2c_client *);
+ int (*pid_filter)(struct dvb_frontend *, u8, u16, int);
+ int (*pid_filter_ctrl)(struct dvb_frontend *, int);
+};
#endif /* RTL2830_H */
diff --git a/drivers/media/dvb-frontends/rtl2830_priv.h b/drivers/media/dvb-frontends/rtl2830_priv.h
index fab10ecb3c3b..d50d5376c9c5 100644
--- a/drivers/media/dvb-frontends/rtl2830_priv.h
+++ b/drivers/media/dvb-frontends/rtl2830_priv.h
@@ -13,9 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef RTL2830_PRIV_H
@@ -24,16 +21,23 @@
#include "dvb_frontend.h"
#include "dvb_math.h"
#include "rtl2830.h"
+#include <linux/i2c-mux.h>
+#include <linux/math64.h>
+#include <linux/regmap.h>
-struct rtl2830_priv {
- struct i2c_adapter *i2c;
+struct rtl2830_dev {
+ struct rtl2830_platform_data *pdata;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct i2c_adapter *adapter;
struct dvb_frontend fe;
- struct rtl2830_config cfg;
- struct i2c_adapter tuner_i2c_adapter;
-
bool sleeping;
-
- u8 page; /* active register page */
+ unsigned long filters;
+ struct delayed_work stat_work;
+ fe_status_t fe_status;
+ u64 post_bit_error_prev; /* for old DVBv3 read_ber() calculation */
+ u64 post_bit_error;
+ u64 post_bit_count;
};
struct rtl2830_reg_val_mask {
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 9026e1aee163..5d2d8f45b4b6 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -2,6 +2,7 @@
* Realtek RTL2832 DVB-T demodulator driver
*
* Copyright (C) 2012 Thomas Mair <thomas.mair86@gmail.com>
+ * Copyright (C) 2012-2014 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,280 +20,191 @@
*/
#include "rtl2832_priv.h"
-#include "dvb_math.h"
-#include <linux/bitops.h>
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
#define REG_MASK(b) (BIT(b + 1) - 1)
static const struct rtl2832_reg_entry registers[] = {
- [DVBT_SOFT_RST] = {0x1, 0x1, 2, 2},
- [DVBT_IIC_REPEAT] = {0x1, 0x1, 3, 3},
- [DVBT_TR_WAIT_MIN_8K] = {0x1, 0x88, 11, 2},
- [DVBT_RSD_BER_FAIL_VAL] = {0x1, 0x8f, 15, 0},
- [DVBT_EN_BK_TRK] = {0x1, 0xa6, 7, 7},
- [DVBT_AD_EN_REG] = {0x0, 0x8, 7, 7},
- [DVBT_AD_EN_REG1] = {0x0, 0x8, 6, 6},
- [DVBT_EN_BBIN] = {0x1, 0xb1, 0, 0},
- [DVBT_MGD_THD0] = {0x1, 0x95, 7, 0},
- [DVBT_MGD_THD1] = {0x1, 0x96, 7, 0},
- [DVBT_MGD_THD2] = {0x1, 0x97, 7, 0},
- [DVBT_MGD_THD3] = {0x1, 0x98, 7, 0},
- [DVBT_MGD_THD4] = {0x1, 0x99, 7, 0},
- [DVBT_MGD_THD5] = {0x1, 0x9a, 7, 0},
- [DVBT_MGD_THD6] = {0x1, 0x9b, 7, 0},
- [DVBT_MGD_THD7] = {0x1, 0x9c, 7, 0},
- [DVBT_EN_CACQ_NOTCH] = {0x1, 0x61, 4, 4},
- [DVBT_AD_AV_REF] = {0x0, 0x9, 6, 0},
- [DVBT_REG_PI] = {0x0, 0xa, 2, 0},
- [DVBT_PIP_ON] = {0x0, 0x21, 3, 3},
- [DVBT_SCALE1_B92] = {0x2, 0x92, 7, 0},
- [DVBT_SCALE1_B93] = {0x2, 0x93, 7, 0},
- [DVBT_SCALE1_BA7] = {0x2, 0xa7, 7, 0},
- [DVBT_SCALE1_BA9] = {0x2, 0xa9, 7, 0},
- [DVBT_SCALE1_BAA] = {0x2, 0xaa, 7, 0},
- [DVBT_SCALE1_BAB] = {0x2, 0xab, 7, 0},
- [DVBT_SCALE1_BAC] = {0x2, 0xac, 7, 0},
- [DVBT_SCALE1_BB0] = {0x2, 0xb0, 7, 0},
- [DVBT_SCALE1_BB1] = {0x2, 0xb1, 7, 0},
- [DVBT_KB_P1] = {0x1, 0x64, 3, 1},
- [DVBT_KB_P2] = {0x1, 0x64, 6, 4},
- [DVBT_KB_P3] = {0x1, 0x65, 2, 0},
- [DVBT_OPT_ADC_IQ] = {0x0, 0x6, 5, 4},
- [DVBT_AD_AVI] = {0x0, 0x9, 1, 0},
- [DVBT_AD_AVQ] = {0x0, 0x9, 3, 2},
- [DVBT_K1_CR_STEP12] = {0x2, 0xad, 9, 4},
- [DVBT_TRK_KS_P2] = {0x1, 0x6f, 2, 0},
- [DVBT_TRK_KS_I2] = {0x1, 0x70, 5, 3},
- [DVBT_TR_THD_SET2] = {0x1, 0x72, 3, 0},
- [DVBT_TRK_KC_P2] = {0x1, 0x73, 5, 3},
- [DVBT_TRK_KC_I2] = {0x1, 0x75, 2, 0},
- [DVBT_CR_THD_SET2] = {0x1, 0x76, 7, 6},
- [DVBT_PSET_IFFREQ] = {0x1, 0x19, 21, 0},
- [DVBT_SPEC_INV] = {0x1, 0x15, 0, 0},
- [DVBT_RSAMP_RATIO] = {0x1, 0x9f, 27, 2},
- [DVBT_CFREQ_OFF_RATIO] = {0x1, 0x9d, 23, 4},
- [DVBT_FSM_STAGE] = {0x3, 0x51, 6, 3},
- [DVBT_RX_CONSTEL] = {0x3, 0x3c, 3, 2},
- [DVBT_RX_HIER] = {0x3, 0x3c, 6, 4},
- [DVBT_RX_C_RATE_LP] = {0x3, 0x3d, 2, 0},
- [DVBT_RX_C_RATE_HP] = {0x3, 0x3d, 5, 3},
- [DVBT_GI_IDX] = {0x3, 0x51, 1, 0},
- [DVBT_FFT_MODE_IDX] = {0x3, 0x51, 2, 2},
- [DVBT_RSD_BER_EST] = {0x3, 0x4e, 15, 0},
- [DVBT_CE_EST_EVM] = {0x4, 0xc, 15, 0},
- [DVBT_RF_AGC_VAL] = {0x3, 0x5b, 13, 0},
- [DVBT_IF_AGC_VAL] = {0x3, 0x59, 13, 0},
- [DVBT_DAGC_VAL] = {0x3, 0x5, 7, 0},
- [DVBT_SFREQ_OFF] = {0x3, 0x18, 13, 0},
- [DVBT_CFREQ_OFF] = {0x3, 0x5f, 17, 0},
- [DVBT_POLAR_RF_AGC] = {0x0, 0xe, 1, 1},
- [DVBT_POLAR_IF_AGC] = {0x0, 0xe, 0, 0},
- [DVBT_AAGC_HOLD] = {0x1, 0x4, 5, 5},
- [DVBT_EN_RF_AGC] = {0x1, 0x4, 6, 6},
- [DVBT_EN_IF_AGC] = {0x1, 0x4, 7, 7},
- [DVBT_IF_AGC_MIN] = {0x1, 0x8, 7, 0},
- [DVBT_IF_AGC_MAX] = {0x1, 0x9, 7, 0},
- [DVBT_RF_AGC_MIN] = {0x1, 0xa, 7, 0},
- [DVBT_RF_AGC_MAX] = {0x1, 0xb, 7, 0},
- [DVBT_IF_AGC_MAN] = {0x1, 0xc, 6, 6},
- [DVBT_IF_AGC_MAN_VAL] = {0x1, 0xc, 13, 0},
- [DVBT_RF_AGC_MAN] = {0x1, 0xe, 6, 6},
- [DVBT_RF_AGC_MAN_VAL] = {0x1, 0xe, 13, 0},
- [DVBT_DAGC_TRG_VAL] = {0x1, 0x12, 7, 0},
- [DVBT_AGC_TARG_VAL_0] = {0x1, 0x2, 0, 0},
- [DVBT_AGC_TARG_VAL_8_1] = {0x1, 0x3, 7, 0},
- [DVBT_AAGC_LOOP_GAIN] = {0x1, 0xc7, 5, 1},
- [DVBT_LOOP_GAIN2_3_0] = {0x1, 0x4, 4, 1},
- [DVBT_LOOP_GAIN2_4] = {0x1, 0x5, 7, 7},
- [DVBT_LOOP_GAIN3] = {0x1, 0xc8, 4, 0},
- [DVBT_VTOP1] = {0x1, 0x6, 5, 0},
- [DVBT_VTOP2] = {0x1, 0xc9, 5, 0},
- [DVBT_VTOP3] = {0x1, 0xca, 5, 0},
- [DVBT_KRF1] = {0x1, 0xcb, 7, 0},
- [DVBT_KRF2] = {0x1, 0x7, 7, 0},
- [DVBT_KRF3] = {0x1, 0xcd, 7, 0},
- [DVBT_KRF4] = {0x1, 0xce, 7, 0},
- [DVBT_EN_GI_PGA] = {0x1, 0xe5, 0, 0},
- [DVBT_THD_LOCK_UP] = {0x1, 0xd9, 8, 0},
- [DVBT_THD_LOCK_DW] = {0x1, 0xdb, 8, 0},
- [DVBT_THD_UP1] = {0x1, 0xdd, 7, 0},
- [DVBT_THD_DW1] = {0x1, 0xde, 7, 0},
- [DVBT_INTER_CNT_LEN] = {0x1, 0xd8, 3, 0},
- [DVBT_GI_PGA_STATE] = {0x1, 0xe6, 3, 3},
- [DVBT_EN_AGC_PGA] = {0x1, 0xd7, 0, 0},
- [DVBT_CKOUTPAR] = {0x1, 0x7b, 5, 5},
- [DVBT_CKOUT_PWR] = {0x1, 0x7b, 6, 6},
- [DVBT_SYNC_DUR] = {0x1, 0x7b, 7, 7},
- [DVBT_ERR_DUR] = {0x1, 0x7c, 0, 0},
- [DVBT_SYNC_LVL] = {0x1, 0x7c, 1, 1},
- [DVBT_ERR_LVL] = {0x1, 0x7c, 2, 2},
- [DVBT_VAL_LVL] = {0x1, 0x7c, 3, 3},
- [DVBT_SERIAL] = {0x1, 0x7c, 4, 4},
- [DVBT_SER_LSB] = {0x1, 0x7c, 5, 5},
- [DVBT_CDIV_PH0] = {0x1, 0x7d, 3, 0},
- [DVBT_CDIV_PH1] = {0x1, 0x7d, 7, 4},
- [DVBT_MPEG_IO_OPT_2_2] = {0x0, 0x6, 7, 7},
- [DVBT_MPEG_IO_OPT_1_0] = {0x0, 0x7, 7, 6},
- [DVBT_CKOUTPAR_PIP] = {0x0, 0xb7, 4, 4},
- [DVBT_CKOUT_PWR_PIP] = {0x0, 0xb7, 3, 3},
- [DVBT_SYNC_LVL_PIP] = {0x0, 0xb7, 2, 2},
- [DVBT_ERR_LVL_PIP] = {0x0, 0xb7, 1, 1},
- [DVBT_VAL_LVL_PIP] = {0x0, 0xb7, 0, 0},
- [DVBT_CKOUTPAR_PID] = {0x0, 0xb9, 4, 4},
- [DVBT_CKOUT_PWR_PID] = {0x0, 0xb9, 3, 3},
- [DVBT_SYNC_LVL_PID] = {0x0, 0xb9, 2, 2},
- [DVBT_ERR_LVL_PID] = {0x0, 0xb9, 1, 1},
- [DVBT_VAL_LVL_PID] = {0x0, 0xb9, 0, 0},
- [DVBT_SM_PASS] = {0x1, 0x93, 11, 0},
- [DVBT_AD7_SETTING] = {0x0, 0x11, 15, 0},
- [DVBT_RSSI_R] = {0x3, 0x1, 6, 0},
- [DVBT_ACI_DET_IND] = {0x3, 0x12, 0, 0},
- [DVBT_REG_MON] = {0x0, 0xd, 1, 0},
- [DVBT_REG_MONSEL] = {0x0, 0xd, 2, 2},
- [DVBT_REG_GPE] = {0x0, 0xd, 7, 7},
- [DVBT_REG_GPO] = {0x0, 0x10, 0, 0},
- [DVBT_REG_4MSEL] = {0x0, 0x13, 0, 0},
+ [DVBT_SOFT_RST] = {0x101, 2, 2},
+ [DVBT_IIC_REPEAT] = {0x101, 3, 3},
+ [DVBT_TR_WAIT_MIN_8K] = {0x188, 11, 2},
+ [DVBT_RSD_BER_FAIL_VAL] = {0x18f, 15, 0},
+ [DVBT_EN_BK_TRK] = {0x1a6, 7, 7},
+ [DVBT_AD_EN_REG] = {0x008, 7, 7},
+ [DVBT_AD_EN_REG1] = {0x008, 6, 6},
+ [DVBT_EN_BBIN] = {0x1b1, 0, 0},
+ [DVBT_MGD_THD0] = {0x195, 7, 0},
+ [DVBT_MGD_THD1] = {0x196, 7, 0},
+ [DVBT_MGD_THD2] = {0x197, 7, 0},
+ [DVBT_MGD_THD3] = {0x198, 7, 0},
+ [DVBT_MGD_THD4] = {0x199, 7, 0},
+ [DVBT_MGD_THD5] = {0x19a, 7, 0},
+ [DVBT_MGD_THD6] = {0x19b, 7, 0},
+ [DVBT_MGD_THD7] = {0x19c, 7, 0},
+ [DVBT_EN_CACQ_NOTCH] = {0x161, 4, 4},
+ [DVBT_AD_AV_REF] = {0x009, 6, 0},
+ [DVBT_REG_PI] = {0x00a, 2, 0},
+ [DVBT_PIP_ON] = {0x021, 3, 3},
+ [DVBT_SCALE1_B92] = {0x292, 7, 0},
+ [DVBT_SCALE1_B93] = {0x293, 7, 0},
+ [DVBT_SCALE1_BA7] = {0x2a7, 7, 0},
+ [DVBT_SCALE1_BA9] = {0x2a9, 7, 0},
+ [DVBT_SCALE1_BAA] = {0x2aa, 7, 0},
+ [DVBT_SCALE1_BAB] = {0x2ab, 7, 0},
+ [DVBT_SCALE1_BAC] = {0x2ac, 7, 0},
+ [DVBT_SCALE1_BB0] = {0x2b0, 7, 0},
+ [DVBT_SCALE1_BB1] = {0x2b1, 7, 0},
+ [DVBT_KB_P1] = {0x164, 3, 1},
+ [DVBT_KB_P2] = {0x164, 6, 4},
+ [DVBT_KB_P3] = {0x165, 2, 0},
+ [DVBT_OPT_ADC_IQ] = {0x006, 5, 4},
+ [DVBT_AD_AVI] = {0x009, 1, 0},
+ [DVBT_AD_AVQ] = {0x009, 3, 2},
+ [DVBT_K1_CR_STEP12] = {0x2ad, 9, 4},
+ [DVBT_TRK_KS_P2] = {0x16f, 2, 0},
+ [DVBT_TRK_KS_I2] = {0x170, 5, 3},
+ [DVBT_TR_THD_SET2] = {0x172, 3, 0},
+ [DVBT_TRK_KC_P2] = {0x173, 5, 3},
+ [DVBT_TRK_KC_I2] = {0x175, 2, 0},
+ [DVBT_CR_THD_SET2] = {0x176, 7, 6},
+ [DVBT_PSET_IFFREQ] = {0x119, 21, 0},
+ [DVBT_SPEC_INV] = {0x115, 0, 0},
+ [DVBT_RSAMP_RATIO] = {0x19f, 27, 2},
+ [DVBT_CFREQ_OFF_RATIO] = {0x19d, 23, 4},
+ [DVBT_FSM_STAGE] = {0x351, 6, 3},
+ [DVBT_RX_CONSTEL] = {0x33c, 3, 2},
+ [DVBT_RX_HIER] = {0x33c, 6, 4},
+ [DVBT_RX_C_RATE_LP] = {0x33d, 2, 0},
+ [DVBT_RX_C_RATE_HP] = {0x33d, 5, 3},
+ [DVBT_GI_IDX] = {0x351, 1, 0},
+ [DVBT_FFT_MODE_IDX] = {0x351, 2, 2},
+ [DVBT_RSD_BER_EST] = {0x34e, 15, 0},
+ [DVBT_CE_EST_EVM] = {0x40c, 15, 0},
+ [DVBT_RF_AGC_VAL] = {0x35b, 13, 0},
+ [DVBT_IF_AGC_VAL] = {0x359, 13, 0},
+ [DVBT_DAGC_VAL] = {0x305, 7, 0},
+ [DVBT_SFREQ_OFF] = {0x318, 13, 0},
+ [DVBT_CFREQ_OFF] = {0x35f, 17, 0},
+ [DVBT_POLAR_RF_AGC] = {0x00e, 1, 1},
+ [DVBT_POLAR_IF_AGC] = {0x00e, 0, 0},
+ [DVBT_AAGC_HOLD] = {0x104, 5, 5},
+ [DVBT_EN_RF_AGC] = {0x104, 6, 6},
+ [DVBT_EN_IF_AGC] = {0x104, 7, 7},
+ [DVBT_IF_AGC_MIN] = {0x108, 7, 0},
+ [DVBT_IF_AGC_MAX] = {0x109, 7, 0},
+ [DVBT_RF_AGC_MIN] = {0x10a, 7, 0},
+ [DVBT_RF_AGC_MAX] = {0x10b, 7, 0},
+ [DVBT_IF_AGC_MAN] = {0x10c, 6, 6},
+ [DVBT_IF_AGC_MAN_VAL] = {0x10c, 13, 0},
+ [DVBT_RF_AGC_MAN] = {0x10e, 6, 6},
+ [DVBT_RF_AGC_MAN_VAL] = {0x10e, 13, 0},
+ [DVBT_DAGC_TRG_VAL] = {0x112, 7, 0},
+ [DVBT_AGC_TARG_VAL_0] = {0x102, 0, 0},
+ [DVBT_AGC_TARG_VAL_8_1] = {0x103, 7, 0},
+ [DVBT_AAGC_LOOP_GAIN] = {0x1c7, 5, 1},
+ [DVBT_LOOP_GAIN2_3_0] = {0x104, 4, 1},
+ [DVBT_LOOP_GAIN2_4] = {0x105, 7, 7},
+ [DVBT_LOOP_GAIN3] = {0x1c8, 4, 0},
+ [DVBT_VTOP1] = {0x106, 5, 0},
+ [DVBT_VTOP2] = {0x1c9, 5, 0},
+ [DVBT_VTOP3] = {0x1ca, 5, 0},
+ [DVBT_KRF1] = {0x1cb, 7, 0},
+ [DVBT_KRF2] = {0x107, 7, 0},
+ [DVBT_KRF3] = {0x1cd, 7, 0},
+ [DVBT_KRF4] = {0x1ce, 7, 0},
+ [DVBT_EN_GI_PGA] = {0x1e5, 0, 0},
+ [DVBT_THD_LOCK_UP] = {0x1d9, 8, 0},
+ [DVBT_THD_LOCK_DW] = {0x1db, 8, 0},
+ [DVBT_THD_UP1] = {0x1dd, 7, 0},
+ [DVBT_THD_DW1] = {0x1de, 7, 0},
+ [DVBT_INTER_CNT_LEN] = {0x1d8, 3, 0},
+ [DVBT_GI_PGA_STATE] = {0x1e6, 3, 3},
+ [DVBT_EN_AGC_PGA] = {0x1d7, 0, 0},
+ [DVBT_CKOUTPAR] = {0x17b, 5, 5},
+ [DVBT_CKOUT_PWR] = {0x17b, 6, 6},
+ [DVBT_SYNC_DUR] = {0x17b, 7, 7},
+ [DVBT_ERR_DUR] = {0x17c, 0, 0},
+ [DVBT_SYNC_LVL] = {0x17c, 1, 1},
+ [DVBT_ERR_LVL] = {0x17c, 2, 2},
+ [DVBT_VAL_LVL] = {0x17c, 3, 3},
+ [DVBT_SERIAL] = {0x17c, 4, 4},
+ [DVBT_SER_LSB] = {0x17c, 5, 5},
+ [DVBT_CDIV_PH0] = {0x17d, 3, 0},
+ [DVBT_CDIV_PH1] = {0x17d, 7, 4},
+ [DVBT_MPEG_IO_OPT_2_2] = {0x006, 7, 7},
+ [DVBT_MPEG_IO_OPT_1_0] = {0x007, 7, 6},
+ [DVBT_CKOUTPAR_PIP] = {0x0b7, 4, 4},
+ [DVBT_CKOUT_PWR_PIP] = {0x0b7, 3, 3},
+ [DVBT_SYNC_LVL_PIP] = {0x0b7, 2, 2},
+ [DVBT_ERR_LVL_PIP] = {0x0b7, 1, 1},
+ [DVBT_VAL_LVL_PIP] = {0x0b7, 0, 0},
+ [DVBT_CKOUTPAR_PID] = {0x0b9, 4, 4},
+ [DVBT_CKOUT_PWR_PID] = {0x0b9, 3, 3},
+ [DVBT_SYNC_LVL_PID] = {0x0b9, 2, 2},
+ [DVBT_ERR_LVL_PID] = {0x0b9, 1, 1},
+ [DVBT_VAL_LVL_PID] = {0x0b9, 0, 0},
+ [DVBT_SM_PASS] = {0x193, 11, 0},
+ [DVBT_AD7_SETTING] = {0x011, 15, 0},
+ [DVBT_RSSI_R] = {0x301, 6, 0},
+ [DVBT_ACI_DET_IND] = {0x312, 0, 0},
+ [DVBT_REG_MON] = {0x00d, 1, 0},
+ [DVBT_REG_MONSEL] = {0x00d, 2, 2},
+ [DVBT_REG_GPE] = {0x00d, 7, 7},
+ [DVBT_REG_GPO] = {0x010, 0, 0},
+ [DVBT_REG_4MSEL] = {0x013, 0, 0},
};
-/* write multiple hardware registers */
-static int rtl2832_wr(struct rtl2832_priv *priv, u8 reg, u8 *val, int len)
+/* Our regmap is bypassing I2C adapter lock, thus we do it! */
+static int rtl2832_bulk_write(struct i2c_client *client, unsigned int reg,
+ const void *val, size_t val_count)
{
+ struct rtl2832_dev *dev = i2c_get_clientdata(client);
int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[1] = {
- {
- .addr = priv->cfg.i2c_addr,
- .flags = 0,
- .len = 1 + len,
- .buf = buf,
- }
- };
- if (1 + len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- buf[0] = reg;
- memcpy(&buf[1], val, len);
-
- ret = i2c_transfer(priv->i2c_adapter, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
+ i2c_lock_adapter(client->adapter);
+ ret = regmap_bulk_write(dev->regmap, reg, val, val_count);
+ i2c_unlock_adapter(client->adapter);
return ret;
}
-/* read multiple hardware registers */
-static int rtl2832_rd(struct rtl2832_priv *priv, u8 reg, u8 *val, int len)
+static int rtl2832_update_bits(struct i2c_client *client, unsigned int reg,
+ unsigned int mask, unsigned int val)
{
+ struct rtl2832_dev *dev = i2c_get_clientdata(client);
int ret;
- struct i2c_msg msg[2] = {
- {
- .addr = priv->cfg.i2c_addr,
- .flags = 0,
- .len = 1,
- .buf = &reg,
- }, {
- .addr = priv->cfg.i2c_addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = val,
- }
- };
- ret = i2c_transfer(priv->i2c_adapter, msg, 2);
- if (ret == 2) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev,
- "%s: i2c rd failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
+ i2c_lock_adapter(client->adapter);
+ ret = regmap_update_bits(dev->regmap, reg, mask, val);
+ i2c_unlock_adapter(client->adapter);
return ret;
}
-/* write multiple registers */
-static int rtl2832_wr_regs(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val,
- int len)
-{
- int ret;
-
- /* switch bank if needed */
- if (page != priv->page) {
- ret = rtl2832_wr(priv, 0x00, &page, 1);
- if (ret)
- return ret;
-
- priv->page = page;
-}
-
-return rtl2832_wr(priv, reg, val, len);
-}
-
-/* read multiple registers */
-static int rtl2832_rd_regs(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val,
- int len)
+static int rtl2832_bulk_read(struct i2c_client *client, unsigned int reg,
+ void *val, size_t val_count)
{
+ struct rtl2832_dev *dev = i2c_get_clientdata(client);
int ret;
- /* switch bank if needed */
- if (page != priv->page) {
- ret = rtl2832_wr(priv, 0x00, &page, 1);
- if (ret)
- return ret;
-
- priv->page = page;
- }
-
- return rtl2832_rd(priv, reg, val, len);
-}
-
-/* write single register */
-static int rtl2832_wr_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 val)
-{
- return rtl2832_wr_regs(priv, reg, page, &val, 1);
-}
-
-/* read single register */
-static int rtl2832_rd_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val)
-{
- return rtl2832_rd_regs(priv, reg, page, val, 1);
+ i2c_lock_adapter(client->adapter);
+ ret = regmap_bulk_read(dev->regmap, reg, val, val_count);
+ i2c_unlock_adapter(client->adapter);
+ return ret;
}
-static int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
+static int rtl2832_rd_demod_reg(struct rtl2832_dev *dev, int reg, u32 *val)
{
- int ret;
-
- u8 reg_start_addr;
- u8 msb, lsb;
- u8 page;
- u8 reading[4];
- u32 reading_tmp;
- int i;
-
- u8 len;
- u32 mask;
+ struct i2c_client *client = dev->client;
+ int ret, i;
+ u16 reg_start_addr;
+ u8 msb, lsb, reading[4], len;
+ u32 reading_tmp, mask;
reg_start_addr = registers[reg].start_address;
msb = registers[reg].msb;
lsb = registers[reg].lsb;
- page = registers[reg].page;
-
len = (msb >> 3) + 1;
mask = REG_MASK(msb - lsb);
- ret = rtl2832_rd_regs(priv, reg_start_addr, page, &reading[0], len);
+ ret = rtl2832_bulk_read(client, reg_start_addr, reading, len);
if (ret)
goto err;
@@ -302,40 +214,27 @@ static int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
*val = (reading_tmp >> lsb) & mask;
- return ret;
-
+ return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
-
}
-static int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
+static int rtl2832_wr_demod_reg(struct rtl2832_dev *dev, int reg, u32 val)
{
+ struct i2c_client *client = dev->client;
int ret, i;
- u8 len;
- u8 reg_start_addr;
- u8 msb, lsb;
- u8 page;
- u32 mask;
-
-
- u8 reading[4];
- u8 writing[4];
- u32 reading_tmp;
- u32 writing_tmp;
-
+ u16 reg_start_addr;
+ u8 msb, lsb, reading[4], writing[4], len;
+ u32 reading_tmp, writing_tmp, mask;
reg_start_addr = registers[reg].start_address;
msb = registers[reg].msb;
lsb = registers[reg].lsb;
- page = registers[reg].page;
-
len = (msb >> 3) + 1;
mask = REG_MASK(msb - lsb);
-
- ret = rtl2832_rd_regs(priv, reg_start_addr, page, &reading[0], len);
+ ret = rtl2832_bulk_read(client, reg_start_addr, reading, len);
if (ret)
goto err;
@@ -346,49 +245,23 @@ static int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
writing_tmp = reading_tmp & ~(mask << lsb);
writing_tmp |= ((val & mask) << lsb);
-
for (i = 0; i < len; i++)
writing[i] = (writing_tmp >> ((len - 1 - i) * 8)) & 0xff;
- ret = rtl2832_wr_regs(priv, reg_start_addr, page, &writing[0], len);
- if (ret)
- goto err;
-
- return ret;
-
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-
-}
-
-static int rtl2832_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
- int ret;
- struct rtl2832_priv *priv = fe->demodulator_priv;
-
- dev_dbg(&priv->i2c->dev, "%s: enable=%d\n", __func__, enable);
-
- /* gate already open or close */
- if (priv->i2c_gate_state == enable)
- return 0;
-
- ret = rtl2832_wr_demod_reg(priv, DVBT_IIC_REPEAT, (enable ? 0x1 : 0x0));
+ ret = rtl2832_bulk_write(client, reg_start_addr, writing, len);
if (ret)
goto err;
- priv->i2c_gate_state = enable;
-
- return ret;
+ return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-
static int rtl2832_set_if(struct dvb_frontend *fe, u32 if_freq)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
int ret;
u64 pset_iffreq;
u8 en_bbin = (if_freq == 0 ? 0x1 : 0x0);
@@ -397,30 +270,35 @@ static int rtl2832_set_if(struct dvb_frontend *fe, u32 if_freq)
* PSET_IFFREQ = - floor((IfFreqHz % CrystalFreqHz) * pow(2, 22)
* / CrystalFreqHz)
*/
-
- pset_iffreq = if_freq % priv->cfg.xtal;
+ pset_iffreq = if_freq % dev->pdata->clk;
pset_iffreq *= 0x400000;
- pset_iffreq = div_u64(pset_iffreq, priv->cfg.xtal);
+ pset_iffreq = div_u64(pset_iffreq, dev->pdata->clk);
pset_iffreq = -pset_iffreq;
pset_iffreq = pset_iffreq & 0x3fffff;
- dev_dbg(&priv->i2c->dev, "%s: if_frequency=%d pset_iffreq=%08x\n",
- __func__, if_freq, (unsigned)pset_iffreq);
+ dev_dbg(&client->dev, "if_frequency=%d pset_iffreq=%08x\n",
+ if_freq, (unsigned)pset_iffreq);
- ret = rtl2832_wr_demod_reg(priv, DVBT_EN_BBIN, en_bbin);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_EN_BBIN, en_bbin);
if (ret)
- return ret;
+ goto err;
- ret = rtl2832_wr_demod_reg(priv, DVBT_PSET_IFFREQ, pset_iffreq);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_PSET_IFFREQ, pset_iffreq);
+ if (ret)
+ goto err;
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2832_init(struct dvb_frontend *fe)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
const struct rtl2832_reg_value *init;
int i, ret, len;
-
/* initialization values for the demodulator registers */
struct rtl2832_reg_value rtl2832_initial_regs[] = {
{DVBT_AD_EN_REG, 0x1},
@@ -467,19 +345,19 @@ static int rtl2832_init(struct dvb_frontend *fe)
{DVBT_CR_THD_SET2, 0x1},
};
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
for (i = 0; i < ARRAY_SIZE(rtl2832_initial_regs); i++) {
- ret = rtl2832_wr_demod_reg(priv, rtl2832_initial_regs[i].reg,
+ ret = rtl2832_wr_demod_reg(dev, rtl2832_initial_regs[i].reg,
rtl2832_initial_regs[i].value);
if (ret)
goto err;
}
/* load tuner specific settings */
- dev_dbg(&priv->i2c->dev, "%s: load settings for tuner=%02x\n",
- __func__, priv->cfg.tuner);
- switch (priv->cfg.tuner) {
+ dev_dbg(&client->dev, "load settings for tuner=%02x\n",
+ dev->pdata->tuner);
+ switch (dev->pdata->tuner) {
case RTL2832_TUNER_FC0012:
case RTL2832_TUNER_FC0013:
len = ARRAY_SIZE(rtl2832_tuner_init_fc0012);
@@ -504,51 +382,60 @@ static int rtl2832_init(struct dvb_frontend *fe)
}
for (i = 0; i < len; i++) {
- ret = rtl2832_wr_demod_reg(priv, init[i].reg, init[i].value);
+ ret = rtl2832_wr_demod_reg(dev, init[i].reg, init[i].value);
if (ret)
goto err;
}
- /*
- * r820t NIM code does a software reset here at the demod -
- * may not be needed, as there's already a software reset at
- * set_params()
- */
-#if 1
- /* soft reset */
- ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1);
- if (ret)
- goto err;
-
- ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x0);
- if (ret)
- goto err;
-#endif
-
- priv->sleeping = false;
-
- return ret;
+ /* init stats here in order signal app which stats are supported */
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ /* start statistics polling */
+ schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
+ dev->sleeping = false;
+ return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2832_sleep(struct dvb_frontend *fe)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
+ int ret;
+
+ dev_dbg(&client->dev, "\n");
+
+ dev->sleeping = true;
+ /* stop statistics polling */
+ cancel_delayed_work_sync(&dev->stat_work);
+ dev->fe_status = 0;
+
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
+ if (ret)
+ goto err;
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
- priv->sleeping = true;
return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
static int rtl2832_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
s->min_delay_ms = 1000;
s->step_size = fe->ops.info.frequency_stepsize * 2;
s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
@@ -557,7 +444,8 @@ static int rtl2832_get_tune_settings(struct dvb_frontend *fe,
static int rtl2832_set_frontend(struct dvb_frontend *fe)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, j;
u64 bw_mode, num, num2;
@@ -588,17 +476,15 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
},
};
-
- dev_dbg(&priv->i2c->dev,
- "%s: frequency=%d bandwidth_hz=%d inversion=%d\n",
- __func__, c->frequency, c->bandwidth_hz, c->inversion);
+ dev_dbg(&client->dev, "frequency=%u bandwidth_hz=%u inversion=%u\n",
+ c->frequency, c->bandwidth_hz, c->inversion);
/* program tuner */
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
/* PIP mode related */
- ret = rtl2832_wr_regs(priv, 0x92, 1, "\x00\x0f\xff", 3);
+ ret = rtl2832_bulk_write(client, 0x192, "\x00\x0f\xff", 3);
if (ret)
goto err;
@@ -629,12 +515,14 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
bw_mode = 64000000;
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid bandwidth\n", __func__);
- return -EINVAL;
+ dev_err(&client->dev, "invalid bandwidth_hz %u\n",
+ c->bandwidth_hz);
+ ret = -EINVAL;
+ goto err;
}
for (j = 0; j < sizeof(bw_params[0]); j++) {
- ret = rtl2832_wr_regs(priv, 0x1c+j, 1, &bw_params[i][j], 1);
+ ret = rtl2832_bulk_write(client, 0x11c + j, &bw_params[i][j], 1);
if (ret)
goto err;
}
@@ -643,11 +531,11 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
* RSAMP_RATIO = floor(CrystalFreqHz * 7 * pow(2, 22)
* / ConstWithBandwidthMode)
*/
- num = priv->cfg.xtal * 7;
+ num = dev->pdata->clk * 7;
num *= 0x400000;
num = div_u64(num, bw_mode);
resamp_ratio = num & 0x3ffffff;
- ret = rtl2832_wr_demod_reg(priv, DVBT_RSAMP_RATIO, resamp_ratio);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_RSAMP_RATIO, resamp_ratio);
if (ret)
goto err;
@@ -656,48 +544,49 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
* / (CrystalFreqHz * 7))
*/
num = bw_mode << 20;
- num2 = priv->cfg.xtal * 7;
+ num2 = dev->pdata->clk * 7;
num = div_u64(num, num2);
num = -num;
cfreq_off_ratio = num & 0xfffff;
- ret = rtl2832_wr_demod_reg(priv, DVBT_CFREQ_OFF_RATIO, cfreq_off_ratio);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_CFREQ_OFF_RATIO, cfreq_off_ratio);
if (ret)
goto err;
/* soft reset */
- ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
if (ret)
goto err;
- ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x0);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
if (ret)
goto err;
- return ret;
+ return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2832_get_frontend(struct dvb_frontend *fe)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[3];
- if (priv->sleeping)
+ if (dev->sleeping)
return 0;
- ret = rtl2832_rd_regs(priv, 0x3c, 3, buf, 2);
+ ret = rtl2832_bulk_read(client, 0x33c, buf, 2);
if (ret)
goto err;
- ret = rtl2832_rd_reg(priv, 0x51, 3, &buf[2]);
+ ret = rtl2832_bulk_read(client, 0x351, &buf[2], 1);
if (ret)
goto err;
- dev_dbg(&priv->i2c->dev, "%s: TPS=%*ph\n", __func__, 3, buf);
+ dev_dbg(&client->dev, "TPS=%*ph\n", 3, buf);
switch ((buf[0] >> 2) & 3) {
case 0:
@@ -787,403 +676,652 @@ static int rtl2832_get_frontend(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2832_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
int ret;
u32 tmp;
- *status = 0;
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
- if (priv->sleeping)
+ dev_dbg(&client->dev, "\n");
+
+ *status = 0;
+ if (dev->sleeping)
return 0;
- ret = rtl2832_rd_demod_reg(priv, DVBT_FSM_STAGE, &tmp);
+ ret = rtl2832_rd_demod_reg(dev, DVBT_FSM_STAGE, &tmp);
if (ret)
goto err;
if (tmp == 11) {
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
- }
- /* TODO find out if this is also true for rtl2832? */
- /*else if (tmp == 10) {
+ } else if (tmp == 10) {
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
- }*/
+ }
- return ret;
+ dev->fe_status = *status;
+ return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2832_read_snr(struct dvb_frontend *fe, u16 *snr)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
- int ret, hierarchy, constellation;
- u8 buf[2], tmp;
- u16 tmp16;
-#define CONSTELLATION_NUM 3
-#define HIERARCHY_NUM 4
- static const u32 snr_constant[CONSTELLATION_NUM][HIERARCHY_NUM] = {
- { 85387325, 85387325, 85387325, 85387325 },
- { 86676178, 86676178, 87167949, 87795660 },
- { 87659938, 87659938, 87885178, 88241743 },
- };
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- /* reports SNR in resolution of 0.1 dB */
+ /* report SNR in resolution of 0.1 dB */
+ if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+ *snr = div_s64(c->cnr.stat[0].svalue, 100);
+ else
+ *snr = 0;
- ret = rtl2832_rd_reg(priv, 0x3c, 3, &tmp);
- if (ret)
- goto err;
+ return 0;
+}
- constellation = (tmp >> 2) & 0x03; /* [3:2] */
- if (constellation > CONSTELLATION_NUM - 1)
- goto err;
+static int rtl2832_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct rtl2832_dev *dev = fe->demodulator_priv;
- hierarchy = (tmp >> 4) & 0x07; /* [6:4] */
- if (hierarchy > HIERARCHY_NUM - 1)
- goto err;
+ *ber = (dev->post_bit_error - dev->post_bit_error_prev);
+ dev->post_bit_error_prev = dev->post_bit_error;
- ret = rtl2832_rd_regs(priv, 0x0c, 4, buf, 2);
- if (ret)
- goto err;
+ return 0;
+}
- tmp16 = buf[0] << 8 | buf[1];
+static void rtl2832_stat_work(struct work_struct *work)
+{
+ struct rtl2832_dev *dev = container_of(work, struct rtl2832_dev, stat_work.work);
+ struct i2c_client *client = dev->client;
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
+ int ret, tmp;
+ u8 u8tmp, buf[2];
+ u16 u16tmp;
+
+ dev_dbg(&client->dev, "\n");
+
+ /* signal strength */
+ if (dev->fe_status & FE_HAS_SIGNAL) {
+ /* read digital AGC */
+ ret = rtl2832_bulk_read(client, 0x305, &u8tmp, 1);
+ if (ret)
+ goto err;
- if (tmp16)
- *snr = (snr_constant[constellation][hierarchy] -
- intlog10(tmp16)) / ((1 << 24) / 100);
- else
- *snr = 0;
+ dev_dbg(&client->dev, "digital agc=%02x", u8tmp);
- return 0;
+ u8tmp = ~u8tmp;
+ u16tmp = u8tmp << 8 | u8tmp << 0;
+
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = u16tmp;
+ } else {
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* CNR */
+ if (dev->fe_status & FE_HAS_VITERBI) {
+ unsigned hierarchy, constellation;
+ #define CONSTELLATION_NUM 3
+ #define HIERARCHY_NUM 4
+ static const u32 constant[CONSTELLATION_NUM][HIERARCHY_NUM] = {
+ {85387325, 85387325, 85387325, 85387325},
+ {86676178, 86676178, 87167949, 87795660},
+ {87659938, 87659938, 87885178, 88241743},
+ };
+
+ ret = rtl2832_bulk_read(client, 0x33c, &u8tmp, 1);
+ if (ret)
+ goto err;
+
+ constellation = (u8tmp >> 2) & 0x03; /* [3:2] */
+ if (constellation > CONSTELLATION_NUM - 1)
+ goto err_schedule_delayed_work;
+
+ hierarchy = (u8tmp >> 4) & 0x07; /* [6:4] */
+ if (hierarchy > HIERARCHY_NUM - 1)
+ goto err_schedule_delayed_work;
+
+ ret = rtl2832_bulk_read(client, 0x40c, buf, 2);
+ if (ret)
+ goto err;
+
+ u16tmp = buf[0] << 8 | buf[1] << 0;
+ if (u16tmp)
+ tmp = (constant[constellation][hierarchy] -
+ intlog10(u16tmp)) / ((1 << 24) / 10000);
+ else
+ tmp = 0;
+
+ dev_dbg(&client->dev, "cnr raw=%u\n", u16tmp);
+
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = tmp;
+ } else {
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* BER */
+ if (dev->fe_status & FE_HAS_LOCK) {
+ ret = rtl2832_bulk_read(client, 0x34e, buf, 2);
+ if (ret)
+ goto err;
+
+ u16tmp = buf[0] << 8 | buf[1] << 0;
+ dev->post_bit_error += u16tmp;
+ dev->post_bit_count += 1000000;
+
+ dev_dbg(&client->dev, "ber errors=%u total=1000000\n", u16tmp);
+
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = dev->post_bit_count;
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+err_schedule_delayed_work:
+ schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
+ return;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
+ dev_dbg(&client->dev, "failed=%d\n", ret);
}
-static int rtl2832_read_ber(struct dvb_frontend *fe, u32 *ber)
+/*
+ * I2C gate/mux/repeater logic
+ * We must use unlocked __i2c_transfer() here (through regmap) because of I2C
+ * adapter lock is already taken by tuner driver.
+ * There is delay mechanism to avoid unneeded I2C gate open / close. Gate close
+ * is delayed here a little bit in order to see if there is sequence of I2C
+ * messages sent to same I2C bus.
+ */
+static void rtl2832_i2c_gate_work(struct work_struct *work)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = container_of(work, struct rtl2832_dev, i2c_gate_work.work);
+ struct i2c_client *client = dev->client;
int ret;
- u8 buf[2];
- ret = rtl2832_rd_regs(priv, 0x4e, 3, buf, 2);
+ /* close gate */
+ ret = rtl2832_update_bits(dev->client, 0x101, 0x08, 0x00);
if (ret)
goto err;
- *ber = buf[0] << 8 | buf[1];
+ return;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+}
+
+static int rtl2832_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id)
+{
+ struct rtl2832_dev *dev = mux_priv;
+ struct i2c_client *client = dev->client;
+ int ret;
+
+ /* terminate possible gate closing */
+ cancel_delayed_work(&dev->i2c_gate_work);
+
+ /*
+ * I2C adapter lock is already taken and due to that we will use
+ * regmap_update_bits() which does not lock again I2C adapter.
+ */
+ ret = regmap_update_bits(dev->regmap, 0x101, 0x08, 0x08);
+ if (ret)
+ goto err;
return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static struct dvb_frontend_ops rtl2832_ops;
+static int rtl2832_deselect(struct i2c_adapter *adap, void *mux_priv,
+ u32 chan_id)
+{
+ struct rtl2832_dev *dev = mux_priv;
+
+ schedule_delayed_work(&dev->i2c_gate_work, usecs_to_jiffies(100));
+ return 0;
+}
+
+static struct dvb_frontend_ops rtl2832_ops = {
+ .delsys = { SYS_DVBT },
+ .info = {
+ .name = "Realtek RTL2832 (DVB-T)",
+ .frequency_min = 174000000,
+ .frequency_max = 862000000,
+ .frequency_stepsize = 166667,
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_RECOVER |
+ FE_CAN_MUTE_TS
+ },
-static void rtl2832_release(struct dvb_frontend *fe)
+ .init = rtl2832_init,
+ .sleep = rtl2832_sleep,
+
+ .get_tune_settings = rtl2832_get_tune_settings,
+
+ .set_frontend = rtl2832_set_frontend,
+ .get_frontend = rtl2832_get_frontend,
+
+ .read_status = rtl2832_read_status,
+ .read_snr = rtl2832_read_snr,
+ .read_ber = rtl2832_read_ber,
+};
+
+static bool rtl2832_volatile_reg(struct device *dev, unsigned int reg)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ switch (reg) {
+ case 0x305:
+ case 0x33c:
+ case 0x34e:
+ case 0x351:
+ case 0x40c ... 0x40d:
+ return true;
+ default:
+ break;
+ }
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
- cancel_delayed_work_sync(&priv->i2c_gate_work);
- i2c_del_mux_adapter(priv->i2c_adapter_tuner);
- i2c_del_mux_adapter(priv->i2c_adapter);
- kfree(priv);
+ return false;
}
/*
- * Delay mechanism to avoid unneeded I2C gate open / close. Gate close is
- * delayed here a little bit in order to see if there is sequence of I2C
- * messages sent to same I2C bus.
- * We must use unlocked version of __i2c_transfer() in order to avoid deadlock
- * as lock is already taken by calling muxed i2c_transfer().
+ * We implement own I2C access routines for regmap in order to get manual access
+ * to I2C adapter lock, which is needed for I2C mux adapter.
*/
-static void rtl2832_i2c_gate_work(struct work_struct *work)
+static int rtl2832_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
{
- struct rtl2832_priv *priv = container_of(work,
- struct rtl2832_priv, i2c_gate_work.work);
- struct i2c_adapter *adap = priv->i2c;
+ struct i2c_client *client = context;
int ret;
- u8 buf[2];
- struct i2c_msg msg[1] = {
+ struct i2c_msg msg[2] = {
{
- .addr = priv->cfg.i2c_addr,
+ .addr = client->addr,
.flags = 0,
- .len = sizeof(buf),
- .buf = buf,
+ .len = reg_size,
+ .buf = (u8 *)reg_buf,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = val_size,
+ .buf = val_buf,
}
};
- /* select reg bank 1 */
- buf[0] = 0x00;
- buf[1] = 0x01;
- ret = __i2c_transfer(adap, msg, 1);
- if (ret != 1)
- goto err;
-
- priv->page = 1;
-
- /* close I2C repeater gate */
- buf[0] = 0x01;
- buf[1] = 0x10;
- ret = __i2c_transfer(adap, msg, 1);
- if (ret != 1)
- goto err;
-
- priv->i2c_gate_state = false;
-
- return;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-
- return;
+ ret = __i2c_transfer(client->adapter, msg, 2);
+ if (ret != 2) {
+ dev_warn(&client->dev, "i2c reg read failed %d\n", ret);
+ if (ret >= 0)
+ ret = -EREMOTEIO;
+ return ret;
+ }
+ return 0;
}
-static int rtl2832_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id)
+static int rtl2832_regmap_write(void *context, const void *data, size_t count)
{
- struct rtl2832_priv *priv = mux_priv;
+ struct i2c_client *client = context;
int ret;
- u8 buf[2], val;
struct i2c_msg msg[1] = {
{
- .addr = priv->cfg.i2c_addr,
+ .addr = client->addr,
.flags = 0,
- .len = sizeof(buf),
- .buf = buf,
+ .len = count,
+ .buf = (u8 *)data,
}
};
- struct i2c_msg msg_rd[2] = {
+
+ ret = __i2c_transfer(client->adapter, msg, 1);
+ if (ret != 1) {
+ dev_warn(&client->dev, "i2c reg write failed %d\n", ret);
+ if (ret >= 0)
+ ret = -EREMOTEIO;
+ return ret;
+ }
+ return 0;
+}
+
+static int rtl2832_regmap_gather_write(void *context, const void *reg,
+ size_t reg_len, const void *val,
+ size_t val_len)
+{
+ struct i2c_client *client = context;
+ int ret;
+ u8 buf[256];
+ struct i2c_msg msg[1] = {
{
- .addr = priv->cfg.i2c_addr,
+ .addr = client->addr,
.flags = 0,
- .len = 1,
- .buf = "\x01",
- }, {
- .addr = priv->cfg.i2c_addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = &val,
+ .len = 1 + val_len,
+ .buf = buf,
}
};
- /* terminate possible gate closing */
- cancel_delayed_work_sync(&priv->i2c_gate_work);
+ buf[0] = *(u8 const *)reg;
+ memcpy(&buf[1], val, val_len);
- if (priv->i2c_gate_state == chan_id)
- return 0;
-
- /* select reg bank 1 */
- buf[0] = 0x00;
- buf[1] = 0x01;
- ret = __i2c_transfer(adap, msg, 1);
- if (ret != 1)
- goto err;
-
- priv->page = 1;
+ ret = __i2c_transfer(client->adapter, msg, 1);
+ if (ret != 1) {
+ dev_warn(&client->dev, "i2c reg write failed %d\n", ret);
+ if (ret >= 0)
+ ret = -EREMOTEIO;
+ return ret;
+ }
+ return 0;
+}
- /* we must read that register, otherwise there will be errors */
- ret = __i2c_transfer(adap, msg_rd, 2);
- if (ret != 2)
- goto err;
+/*
+ * FIXME: Hack. Implement own regmap locking in order to silence lockdep
+ * recursive lock warning. That happens when regmap I2C client calls I2C mux
+ * adapter, which leads demod I2C repeater enable via demod regmap. Operation
+ * takes two regmap locks recursively - but those are different regmap instances
+ * in a two different I2C drivers, so it is not deadlock. Proper fix is to make
+ * regmap aware of lockdep.
+ */
+static void rtl2832_regmap_lock(void *__dev)
+{
+ struct rtl2832_dev *dev = __dev;
+ struct i2c_client *client = dev->client;
- /* open or close I2C repeater gate */
- buf[0] = 0x01;
- if (chan_id == 1)
- buf[1] = 0x18; /* open */
- else
- buf[1] = 0x10; /* close */
+ dev_dbg(&client->dev, "\n");
+ mutex_lock(&dev->regmap_mutex);
+}
- ret = __i2c_transfer(adap, msg, 1);
- if (ret != 1)
- goto err;
+static void rtl2832_regmap_unlock(void *__dev)
+{
+ struct rtl2832_dev *dev = __dev;
+ struct i2c_client *client = dev->client;
- priv->i2c_gate_state = chan_id;
+ dev_dbg(&client->dev, "\n");
+ mutex_unlock(&dev->regmap_mutex);
+}
- return 0;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+static struct dvb_frontend *rtl2832_get_dvb_frontend(struct i2c_client *client)
+{
+ struct rtl2832_dev *dev = i2c_get_clientdata(client);
- return -EREMOTEIO;
+ dev_dbg(&client->dev, "\n");
+ return &dev->fe;
}
-static int rtl2832_deselect(struct i2c_adapter *adap, void *mux_priv,
- u32 chan_id)
+static struct i2c_adapter *rtl2832_get_i2c_adapter(struct i2c_client *client)
{
- struct rtl2832_priv *priv = mux_priv;
- schedule_delayed_work(&priv->i2c_gate_work, usecs_to_jiffies(100));
- return 0;
+ struct rtl2832_dev *dev = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+ return dev->i2c_adapter_tuner;
}
-int rtl2832_enable_external_ts_if(struct dvb_frontend *fe)
+static int rtl2832_enable_slave_ts(struct i2c_client *client)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct rtl2832_dev *dev = i2c_get_clientdata(client);
int ret;
- dev_dbg(&priv->i2c->dev, "%s: setting PIP mode\n", __func__);
+ dev_dbg(&client->dev, "\n");
- ret = rtl2832_wr_regs(priv, 0x0c, 1, "\x5f\xff", 2);
+ ret = rtl2832_bulk_write(client, 0x10c, "\x5f\xff", 2);
if (ret)
goto err;
- ret = rtl2832_wr_demod_reg(priv, DVBT_PIP_ON, 0x1);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x1);
if (ret)
goto err;
- ret = rtl2832_wr_reg(priv, 0xbc, 0, 0x18);
+ ret = rtl2832_bulk_write(client, 0x0bc, "\x18", 1);
if (ret)
goto err;
- ret = rtl2832_wr_reg(priv, 0x22, 0, 0x01);
+ ret = rtl2832_bulk_write(client, 0x022, "\x01", 1);
if (ret)
goto err;
- ret = rtl2832_wr_reg(priv, 0x26, 0, 0x1f);
+ ret = rtl2832_bulk_write(client, 0x026, "\x1f", 1);
if (ret)
goto err;
- ret = rtl2832_wr_reg(priv, 0x27, 0, 0xff);
+ ret = rtl2832_bulk_write(client, 0x027, "\xff", 1);
if (ret)
goto err;
- ret = rtl2832_wr_regs(priv, 0x92, 1, "\x7f\xf7\xff", 3);
+ ret = rtl2832_bulk_write(client, 0x192, "\x7f\xf7\xff", 3);
if (ret)
goto err;
/* soft reset */
- ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
if (ret)
goto err;
- ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x0);
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
if (ret)
goto err;
return 0;
err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
-
}
-EXPORT_SYMBOL(rtl2832_enable_external_ts_if);
-struct i2c_adapter *rtl2832_get_i2c_adapter(struct dvb_frontend *fe)
+static int rtl2832_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
- return priv->i2c_adapter_tuner;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
+ int ret;
+ u8 u8tmp;
+
+ dev_dbg(&client->dev, "onoff=%d\n", onoff);
+
+ /* enable / disable PID filter */
+ if (onoff)
+ u8tmp = 0x80;
+ else
+ u8tmp = 0x00;
+
+ ret = rtl2832_update_bits(client, 0x061, 0xc0, u8tmp);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
-EXPORT_SYMBOL(rtl2832_get_i2c_adapter);
-struct i2c_adapter *rtl2832_get_private_i2c_adapter(struct dvb_frontend *fe)
+static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid,
+ int onoff)
{
- struct rtl2832_priv *priv = fe->demodulator_priv;
- return priv->i2c_adapter;
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
+ int ret;
+ u8 buf[4];
+
+ dev_dbg(&client->dev, "index=%d pid=%04x onoff=%d\n",
+ index, pid, onoff);
+
+ /* skip invalid PIDs (0x2000) */
+ if (pid > 0x1fff || index > 32)
+ return 0;
+
+ if (onoff)
+ set_bit(index, &dev->filters);
+ else
+ clear_bit(index, &dev->filters);
+
+ /* enable / disable PIDs */
+ buf[0] = (dev->filters >> 0) & 0xff;
+ buf[1] = (dev->filters >> 8) & 0xff;
+ buf[2] = (dev->filters >> 16) & 0xff;
+ buf[3] = (dev->filters >> 24) & 0xff;
+ ret = rtl2832_bulk_write(client, 0x062, buf, 4);
+ if (ret)
+ goto err;
+
+ /* add PID */
+ buf[0] = (pid >> 8) & 0xff;
+ buf[1] = (pid >> 0) & 0xff;
+ ret = rtl2832_bulk_write(client, 0x066 + 2 * index, buf, 2);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
-EXPORT_SYMBOL(rtl2832_get_private_i2c_adapter);
-struct dvb_frontend *rtl2832_attach(const struct rtl2832_config *cfg,
- struct i2c_adapter *i2c)
+static int rtl2832_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- struct rtl2832_priv *priv = NULL;
- int ret = 0;
+ struct rtl2832_platform_data *pdata = client->dev.platform_data;
+ struct i2c_adapter *i2c = client->adapter;
+ struct rtl2832_dev *dev;
+ int ret;
u8 tmp;
+ static const struct regmap_bus regmap_bus = {
+ .read = rtl2832_regmap_read,
+ .write = rtl2832_regmap_write,
+ .gather_write = rtl2832_regmap_gather_write,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ };
+ static const struct regmap_range_cfg regmap_range_cfg[] = {
+ {
+ .selector_reg = 0x00,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ .range_min = 0 * 0x100,
+ .range_max = 5 * 0x100,
+ },
+ };
- dev_dbg(&i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
/* allocate memory for the internal state */
- priv = kzalloc(sizeof(struct rtl2832_priv), GFP_KERNEL);
- if (priv == NULL)
+ dev = kzalloc(sizeof(struct rtl2832_dev), GFP_KERNEL);
+ if (dev == NULL) {
+ ret = -ENOMEM;
goto err;
+ }
- /* setup the priv */
- priv->i2c = i2c;
- priv->tuner = cfg->tuner;
- memcpy(&priv->cfg, cfg, sizeof(struct rtl2832_config));
- INIT_DELAYED_WORK(&priv->i2c_gate_work, rtl2832_i2c_gate_work);
-
- /* create muxed i2c adapter for demod itself */
- priv->i2c_adapter = i2c_add_mux_adapter(i2c, &i2c->dev, priv, 0, 0, 0,
- rtl2832_select, NULL);
- if (priv->i2c_adapter == NULL)
- goto err;
+ /* setup the state */
+ i2c_set_clientdata(client, dev);
+ dev->client = client;
+ dev->pdata = client->dev.platform_data;
+ dev->sleeping = true;
+ INIT_DELAYED_WORK(&dev->i2c_gate_work, rtl2832_i2c_gate_work);
+ INIT_DELAYED_WORK(&dev->stat_work, rtl2832_stat_work);
+ /* create regmap */
+ mutex_init(&dev->regmap_mutex);
+ dev->regmap_config.reg_bits = 8,
+ dev->regmap_config.val_bits = 8,
+ dev->regmap_config.lock = rtl2832_regmap_lock,
+ dev->regmap_config.unlock = rtl2832_regmap_unlock,
+ dev->regmap_config.lock_arg = dev,
+ dev->regmap_config.volatile_reg = rtl2832_volatile_reg,
+ dev->regmap_config.max_register = 5 * 0x100,
+ dev->regmap_config.ranges = regmap_range_cfg,
+ dev->regmap_config.num_ranges = ARRAY_SIZE(regmap_range_cfg),
+ dev->regmap_config.cache_type = REGCACHE_RBTREE,
+ dev->regmap = regmap_init(&client->dev, &regmap_bus, client,
+ &dev->regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ goto err_kfree;
+ }
/* check if the demod is there */
- ret = rtl2832_rd_reg(priv, 0x00, 0x0, &tmp);
+ ret = rtl2832_bulk_read(client, 0x000, &tmp, 1);
if (ret)
- goto err;
+ goto err_regmap_exit;
/* create muxed i2c adapter for demod tuner bus */
- priv->i2c_adapter_tuner = i2c_add_mux_adapter(i2c, &i2c->dev, priv,
- 0, 1, 0, rtl2832_select, rtl2832_deselect);
- if (priv->i2c_adapter_tuner == NULL)
- goto err;
+ dev->i2c_adapter_tuner = i2c_add_mux_adapter(i2c, &i2c->dev, dev,
+ 0, 0, 0, rtl2832_select, rtl2832_deselect);
+ if (dev->i2c_adapter_tuner == NULL) {
+ ret = -ENODEV;
+ goto err_regmap_exit;
+ }
/* create dvb_frontend */
- memcpy(&priv->fe.ops, &rtl2832_ops, sizeof(struct dvb_frontend_ops));
- priv->fe.demodulator_priv = priv;
-
- /* TODO implement sleep mode */
- priv->sleeping = true;
-
- return &priv->fe;
+ memcpy(&dev->fe.ops, &rtl2832_ops, sizeof(struct dvb_frontend_ops));
+ dev->fe.demodulator_priv = dev;
+
+ /* setup callbacks */
+ pdata->get_dvb_frontend = rtl2832_get_dvb_frontend;
+ pdata->get_i2c_adapter = rtl2832_get_i2c_adapter;
+ pdata->enable_slave_ts = rtl2832_enable_slave_ts;
+ pdata->pid_filter = rtl2832_pid_filter;
+ pdata->pid_filter_ctrl = rtl2832_pid_filter_ctrl;
+ pdata->bulk_read = rtl2832_bulk_read;
+ pdata->bulk_write = rtl2832_bulk_write;
+ pdata->update_bits = rtl2832_update_bits;
+
+ dev_info(&client->dev, "Realtek RTL2832 successfully attached\n");
+ return 0;
+err_regmap_exit:
+ regmap_exit(dev->regmap);
+err_kfree:
+ kfree(dev);
err:
- dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
- if (priv && priv->i2c_adapter)
- i2c_del_mux_adapter(priv->i2c_adapter);
- kfree(priv);
- return NULL;
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
-EXPORT_SYMBOL(rtl2832_attach);
-static struct dvb_frontend_ops rtl2832_ops = {
- .delsys = { SYS_DVBT },
- .info = {
- .name = "Realtek RTL2832 (DVB-T)",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
- .caps = FE_CAN_FEC_1_2 |
- FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 |
- FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK |
- FE_CAN_QAM_16 |
- FE_CAN_QAM_64 |
- FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO |
- FE_CAN_RECOVER |
- FE_CAN_MUTE_TS
- },
+static int rtl2832_remove(struct i2c_client *client)
+{
+ struct rtl2832_dev *dev = i2c_get_clientdata(client);
- .release = rtl2832_release,
+ dev_dbg(&client->dev, "\n");
- .init = rtl2832_init,
- .sleep = rtl2832_sleep,
+ cancel_delayed_work_sync(&dev->i2c_gate_work);
- .get_tune_settings = rtl2832_get_tune_settings,
+ i2c_del_mux_adapter(dev->i2c_adapter_tuner);
- .set_frontend = rtl2832_set_frontend,
- .get_frontend = rtl2832_get_frontend,
+ regmap_exit(dev->regmap);
- .read_status = rtl2832_read_status,
- .read_snr = rtl2832_read_snr,
- .read_ber = rtl2832_read_ber,
+ kfree(dev);
- .i2c_gate_ctrl = rtl2832_i2c_gate_ctrl,
+ return 0;
+}
+
+static const struct i2c_device_id rtl2832_id_table[] = {
+ {"rtl2832", 0},
+ {}
};
+MODULE_DEVICE_TABLE(i2c, rtl2832_id_table);
+
+static struct i2c_driver rtl2832_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rtl2832",
+ },
+ .probe = rtl2832_probe,
+ .remove = rtl2832_remove,
+ .id_table = rtl2832_id_table,
+};
+
+module_i2c_driver(rtl2832_driver);
MODULE_AUTHOR("Thomas Mair <mair.thomas86@gmail.com>");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Realtek RTL2832 DVB-T demodulator driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.5");
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index 5254c1dfc8de..a8e912e679a5 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -2,6 +2,7 @@
* Realtek RTL2832 DVB-T demodulator driver
*
* Copyright (C) 2012 Thomas Mair <thomas.mair86@gmail.com>
+ * Copyright (C) 2012-2014 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,86 +22,42 @@
#ifndef RTL2832_H
#define RTL2832_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
+#include <linux/i2c-mux.h>
+
+/**
+ * struct rtl2832_platform_data - Platform data for the rtl2832 driver
+ * @clk: Clock frequency (4000000, 16000000, 25000000, 28800000).
+ * @tuner: Used tuner model.
+ * @get_dvb_frontend: Get DVB frontend.
+ * @get_i2c_adapter: Get I2C adapter.
+ * @enable_slave_ts: Enable slave TS IF.
+ * @pid_filter: Set PID to PID filter.
+ * @pid_filter_ctrl: Control PID filter.
+ */
-struct rtl2832_config {
- /*
- * Demodulator I2C address.
- */
- u8 i2c_addr;
-
- /*
- * Xtal frequency.
- * Hz
- * 4000000, 16000000, 25000000, 28800000
- */
- u32 xtal;
-
+struct rtl2832_platform_data {
+ u32 clk;
/*
- * tuner
- * XXX: This must be keep sync with dvb_usb_rtl28xxu demod driver.
+ * XXX: This list must be kept sync with dvb_usb_rtl28xxu USB IF driver.
*/
#define RTL2832_TUNER_TUA9001 0x24
#define RTL2832_TUNER_FC0012 0x26
#define RTL2832_TUNER_E4000 0x27
#define RTL2832_TUNER_FC0013 0x29
-#define RTL2832_TUNER_R820T 0x2a
-#define RTL2832_TUNER_R828D 0x2b
+#define RTL2832_TUNER_R820T 0x2a
+#define RTL2832_TUNER_R828D 0x2b
u8 tuner;
-};
-
-#if IS_ENABLED(CONFIG_DVB_RTL2832)
-struct dvb_frontend *rtl2832_attach(
- const struct rtl2832_config *cfg,
- struct i2c_adapter *i2c
-);
-
-extern struct i2c_adapter *rtl2832_get_i2c_adapter(
- struct dvb_frontend *fe
-);
-
-extern struct i2c_adapter *rtl2832_get_private_i2c_adapter(
- struct dvb_frontend *fe
-);
-
-extern int rtl2832_enable_external_ts_if(
- struct dvb_frontend *fe
-);
-
-#else
-
-static inline struct dvb_frontend *rtl2832_attach(
- const struct rtl2832_config *config,
- struct i2c_adapter *i2c
-)
-{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-
-static inline struct i2c_adapter *rtl2832_get_i2c_adapter(
- struct dvb_frontend *fe
-)
-{
- return NULL;
-}
-
-static inline struct i2c_adapter *rtl2832_get_private_i2c_adapter(
- struct dvb_frontend *fe
-)
-{
- return NULL;
-}
-
-static inline int rtl2832_enable_external_ts_if(
- struct dvb_frontend *fe
-)
-{
- return -ENODEV;
-}
-
-#endif
+ struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
+ struct i2c_adapter* (*get_i2c_adapter)(struct i2c_client *);
+ int (*enable_slave_ts)(struct i2c_client *);
+ int (*pid_filter)(struct dvb_frontend *, u8, u16, int);
+ int (*pid_filter_ctrl)(struct dvb_frontend *, int);
+/* private: Register access for SDR module use only */
+ int (*bulk_read)(struct i2c_client *, unsigned int, void *, size_t);
+ int (*bulk_write)(struct i2c_client *, unsigned int, const void *, size_t);
+ int (*update_bits)(struct i2c_client *, unsigned int, unsigned int, unsigned int);
+};
#endif /* RTL2832_H */
diff --git a/drivers/media/dvb-frontends/rtl2832_priv.h b/drivers/media/dvb-frontends/rtl2832_priv.h
index ae469f032fe6..c3a922c37903 100644
--- a/drivers/media/dvb-frontends/rtl2832_priv.h
+++ b/drivers/media/dvb-frontends/rtl2832_priv.h
@@ -2,6 +2,7 @@
* Realtek RTL2832 DVB-T demodulator driver
*
* Copyright (C) 2012 Thomas Mair <thomas.mair86@gmail.com>
+ * Copyright (C) 2012-2014 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,28 +22,34 @@
#ifndef RTL2832_PRIV_H
#define RTL2832_PRIV_H
+#include <linux/regmap.h>
+#include <linux/math64.h>
+#include <linux/bitops.h>
+
#include "dvb_frontend.h"
+#include "dvb_math.h"
#include "rtl2832.h"
-#include <linux/i2c-mux.h>
-struct rtl2832_priv {
- struct i2c_adapter *i2c;
- struct i2c_adapter *i2c_adapter;
+struct rtl2832_dev {
+ struct rtl2832_platform_data *pdata;
+ struct i2c_client *client;
+ struct mutex regmap_mutex;
+ struct regmap_config regmap_config;
+ struct regmap *regmap;
struct i2c_adapter *i2c_adapter_tuner;
struct dvb_frontend fe;
- struct rtl2832_config cfg;
-
- bool i2c_gate_state;
+ struct delayed_work stat_work;
+ fe_status_t fe_status;
+ u64 post_bit_error_prev; /* for old DVBv3 read_ber() calculation */
+ u64 post_bit_error;
+ u64 post_bit_count;
bool sleeping;
-
- u8 tuner;
- u8 page; /* active register page */
struct delayed_work i2c_gate_work;
+ unsigned long filters; /* PID filter */
};
struct rtl2832_reg_entry {
- u8 page;
- u8 start_address;
+ u16 start_address;
u8 msb;
u8 lsb;
};
@@ -52,7 +59,6 @@ struct rtl2832_reg_value {
u32 value;
};
-
/* Demod register bit names */
enum DVBT_REG_BIT_NAME {
DVBT_SOFT_RST,
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 2896b47c29d8..3ff8806ca584 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -22,7 +22,6 @@
*
*/
-#include "dvb_frontend.h"
#include "rtl2832_sdr.h"
#include "dvb_usb.h"
@@ -32,6 +31,7 @@
#include <media/v4l2-event.h>
#include <media/videobuf2-vmalloc.h>
+#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/math64.h>
@@ -107,16 +107,12 @@ struct rtl2832_sdr_frame_buf {
struct list_head list;
};
-struct rtl2832_sdr_state {
+struct rtl2832_sdr_dev {
#define POWER_ON (1 << 1)
#define URB_BUF (1 << 2)
unsigned long flags;
- const struct rtl2832_config *cfg;
- struct dvb_frontend *fe;
- struct dvb_usb_device *d;
- struct i2c_adapter *i2c;
- u8 bank;
+ struct platform_device *pdev;
struct video_device vdev;
struct v4l2_device v4l2_dev;
@@ -160,200 +156,77 @@ struct rtl2832_sdr_state {
unsigned long jiffies_next;
};
-/* write multiple hardware registers */
-static int rtl2832_sdr_wr(struct rtl2832_sdr_state *s, u8 reg, const u8 *val,
- int len)
-{
- int ret;
-#define MAX_WR_LEN 24
-#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
- u8 buf[MAX_WR_XFER_LEN];
- struct i2c_msg msg[1] = {
- {
- .addr = s->cfg->i2c_addr,
- .flags = 0,
- .len = 1 + len,
- .buf = buf,
- }
- };
-
- if (WARN_ON(len > MAX_WR_LEN))
- return -EINVAL;
-
- buf[0] = reg;
- memcpy(&buf[1], val, len);
-
- ret = i2c_transfer(s->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_err(&s->i2c->dev,
- "%s: I2C wr failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
-/* read multiple hardware registers */
-static int rtl2832_sdr_rd(struct rtl2832_sdr_state *s, u8 reg, u8 *val, int len)
-{
- int ret;
- struct i2c_msg msg[2] = {
- {
- .addr = s->cfg->i2c_addr,
- .flags = 0,
- .len = 1,
- .buf = &reg,
- }, {
- .addr = s->cfg->i2c_addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = val,
- }
- };
-
- ret = i2c_transfer(s->i2c, msg, 2);
- if (ret == 2) {
- ret = 0;
- } else {
- dev_err(&s->i2c->dev,
- "%s: I2C rd failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
/* write multiple registers */
-static int rtl2832_sdr_wr_regs(struct rtl2832_sdr_state *s, u16 reg,
+static int rtl2832_sdr_wr_regs(struct rtl2832_sdr_dev *dev, u16 reg,
const u8 *val, int len)
{
- int ret;
- u8 reg2 = (reg >> 0) & 0xff;
- u8 bank = (reg >> 8) & 0xff;
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct i2c_client *client = pdata->i2c_client;
- /* switch bank if needed */
- if (bank != s->bank) {
- ret = rtl2832_sdr_wr(s, 0x00, &bank, 1);
- if (ret)
- return ret;
-
- s->bank = bank;
- }
-
- return rtl2832_sdr_wr(s, reg2, val, len);
+ return pdata->bulk_write(client, reg, val, len);
}
+#if 0
/* read multiple registers */
-static int rtl2832_sdr_rd_regs(struct rtl2832_sdr_state *s, u16 reg, u8 *val,
+static int rtl2832_sdr_rd_regs(struct rtl2832_sdr_dev *dev, u16 reg, u8 *val,
int len)
{
- int ret;
- u8 reg2 = (reg >> 0) & 0xff;
- u8 bank = (reg >> 8) & 0xff;
-
- /* switch bank if needed */
- if (bank != s->bank) {
- ret = rtl2832_sdr_wr(s, 0x00, &bank, 1);
- if (ret)
- return ret;
-
- s->bank = bank;
- }
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct i2c_client *client = pdata->i2c_client;
- return rtl2832_sdr_rd(s, reg2, val, len);
+ return pdata->bulk_read(client, reg, val, len);
}
+#endif
/* write single register */
-static int rtl2832_sdr_wr_reg(struct rtl2832_sdr_state *s, u16 reg, u8 val)
+static int rtl2832_sdr_wr_reg(struct rtl2832_sdr_dev *dev, u16 reg, u8 val)
{
- return rtl2832_sdr_wr_regs(s, reg, &val, 1);
+ return rtl2832_sdr_wr_regs(dev, reg, &val, 1);
}
-#if 0
-/* read single register */
-static int rtl2832_sdr_rd_reg(struct rtl2832_sdr_state *s, u16 reg, u8 *val)
-{
- return rtl2832_sdr_rd_regs(s, reg, val, 1);
-}
-#endif
-
/* write single register with mask */
-static int rtl2832_sdr_wr_reg_mask(struct rtl2832_sdr_state *s, u16 reg,
+static int rtl2832_sdr_wr_reg_mask(struct rtl2832_sdr_dev *dev, u16 reg,
u8 val, u8 mask)
{
- int ret;
- u8 tmp;
-
- /* no need for read if whole reg is written */
- if (mask != 0xff) {
- ret = rtl2832_sdr_rd_regs(s, reg, &tmp, 1);
- if (ret)
- return ret;
-
- val &= mask;
- tmp &= ~mask;
- val |= tmp;
- }
-
- return rtl2832_sdr_wr_regs(s, reg, &val, 1);
-}
-
-#if 0
-/* read single register with mask */
-static int rtl2832_sdr_rd_reg_mask(struct rtl2832_sdr_state *s, u16 reg,
- u8 *val, u8 mask)
-{
- int ret, i;
- u8 tmp;
-
- ret = rtl2832_sdr_rd_regs(s, reg, &tmp, 1);
- if (ret)
- return ret;
-
- tmp &= mask;
-
- /* find position of the first bit */
- for (i = 0; i < 8; i++) {
- if ((mask >> i) & 0x01)
- break;
- }
- *val = tmp >> i;
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct i2c_client *client = pdata->i2c_client;
- return 0;
+ return pdata->update_bits(client, reg, mask, val);
}
-#endif
/* Private functions */
static struct rtl2832_sdr_frame_buf *rtl2832_sdr_get_next_fill_buf(
- struct rtl2832_sdr_state *s)
+ struct rtl2832_sdr_dev *dev)
{
unsigned long flags;
struct rtl2832_sdr_frame_buf *buf = NULL;
- spin_lock_irqsave(&s->queued_bufs_lock, flags);
- if (list_empty(&s->queued_bufs))
+ spin_lock_irqsave(&dev->queued_bufs_lock, flags);
+ if (list_empty(&dev->queued_bufs))
goto leave;
- buf = list_entry(s->queued_bufs.next,
+ buf = list_entry(dev->queued_bufs.next,
struct rtl2832_sdr_frame_buf, list);
list_del(&buf->list);
leave:
- spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+ spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
return buf;
}
-static unsigned int rtl2832_sdr_convert_stream(struct rtl2832_sdr_state *s,
+static unsigned int rtl2832_sdr_convert_stream(struct rtl2832_sdr_dev *dev,
void *dst, const u8 *src, unsigned int src_len)
{
+ struct platform_device *pdev = dev->pdev;
unsigned int dst_len;
- if (s->pixelformat == V4L2_SDR_FMT_CU8) {
+ if (dev->pixelformat == V4L2_SDR_FMT_CU8) {
/* native stream, no need to convert */
memcpy(dst, src, src_len);
dst_len = src_len;
- } else if (s->pixelformat == V4L2_SDR_FMT_CU16LE) {
+ } else if (dev->pixelformat == V4L2_SDR_FMT_CU16LE) {
/* convert u8 to u16 */
unsigned int i;
u16 *u16dst = dst;
@@ -366,22 +239,21 @@ static unsigned int rtl2832_sdr_convert_stream(struct rtl2832_sdr_state *s,
}
/* calculate sample rate and output it in 10 seconds intervals */
- if (unlikely(time_is_before_jiffies(s->jiffies_next))) {
+ if (unlikely(time_is_before_jiffies(dev->jiffies_next))) {
#define MSECS 10000UL
unsigned int msecs = jiffies_to_msecs(jiffies -
- s->jiffies_next + msecs_to_jiffies(MSECS));
- unsigned int samples = s->sample - s->sample_measured;
-
- s->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
- s->sample_measured = s->sample;
- dev_dbg(&s->udev->dev,
- "slen=%u samples=%u msecs=%u sample rate=%lu\n",
- src_len, samples, msecs,
- samples * 1000UL / msecs);
+ dev->jiffies_next + msecs_to_jiffies(MSECS));
+ unsigned int samples = dev->sample - dev->sample_measured;
+
+ dev->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
+ dev->sample_measured = dev->sample;
+ dev_dbg(&pdev->dev,
+ "slen=%u samples=%u msecs=%u sample rate=%lu\n",
+ src_len, samples, msecs, samples * 1000UL / msecs);
}
/* total number of I+Q pairs */
- s->sample += src_len / 2;
+ dev->sample += src_len / 2;
return dst_len;
}
@@ -392,13 +264,13 @@ static unsigned int rtl2832_sdr_convert_stream(struct rtl2832_sdr_state *s,
*/
static void rtl2832_sdr_urb_complete(struct urb *urb)
{
- struct rtl2832_sdr_state *s = urb->context;
+ struct rtl2832_sdr_dev *dev = urb->context;
+ struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_frame_buf *fbuf;
- dev_dbg_ratelimited(&s->udev->dev,
- "status=%d length=%d/%d errors=%d\n",
- urb->status, urb->actual_length,
- urb->transfer_buffer_length, urb->error_count);
+ dev_dbg_ratelimited(&pdev->dev, "status=%d length=%d/%d errors=%d\n",
+ urb->status, urb->actual_length,
+ urb->transfer_buffer_length, urb->error_count);
switch (urb->status) {
case 0: /* success */
@@ -409,8 +281,7 @@ static void rtl2832_sdr_urb_complete(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dev_err_ratelimited(&s->udev->dev, "urb failed=%d\n",
- urb->status);
+ dev_err_ratelimited(&pdev->dev, "urb failed=%d\n", urb->status);
break;
}
@@ -418,204 +289,192 @@ static void rtl2832_sdr_urb_complete(struct urb *urb)
void *ptr;
unsigned int len;
/* get free framebuffer */
- fbuf = rtl2832_sdr_get_next_fill_buf(s);
+ fbuf = rtl2832_sdr_get_next_fill_buf(dev);
if (unlikely(fbuf == NULL)) {
- s->vb_full++;
- dev_notice_ratelimited(&s->udev->dev,
- "videobuf is full, %d packets dropped\n",
- s->vb_full);
+ dev->vb_full++;
+ dev_notice_ratelimited(&pdev->dev,
+ "videobuf is full, %d packets dropped\n",
+ dev->vb_full);
goto skip;
}
/* fill framebuffer */
ptr = vb2_plane_vaddr(&fbuf->vb, 0);
- len = rtl2832_sdr_convert_stream(s, ptr, urb->transfer_buffer,
+ len = rtl2832_sdr_convert_stream(dev, ptr, urb->transfer_buffer,
urb->actual_length);
vb2_set_plane_payload(&fbuf->vb, 0, len);
v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
- fbuf->vb.v4l2_buf.sequence = s->sequence++;
+ fbuf->vb.v4l2_buf.sequence = dev->sequence++;
vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
}
skip:
usb_submit_urb(urb, GFP_ATOMIC);
}
-static int rtl2832_sdr_kill_urbs(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_kill_urbs(struct rtl2832_sdr_dev *dev)
{
+ struct platform_device *pdev = dev->pdev;
int i;
- for (i = s->urbs_submitted - 1; i >= 0; i--) {
- dev_dbg(&s->udev->dev, "kill urb=%d\n", i);
+ for (i = dev->urbs_submitted - 1; i >= 0; i--) {
+ dev_dbg(&pdev->dev, "kill urb=%d\n", i);
/* stop the URB */
- usb_kill_urb(s->urb_list[i]);
+ usb_kill_urb(dev->urb_list[i]);
}
- s->urbs_submitted = 0;
+ dev->urbs_submitted = 0;
return 0;
}
-static int rtl2832_sdr_submit_urbs(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_submit_urbs(struct rtl2832_sdr_dev *dev)
{
+ struct platform_device *pdev = dev->pdev;
int i, ret;
- for (i = 0; i < s->urbs_initialized; i++) {
- dev_dbg(&s->udev->dev, "submit urb=%d\n", i);
- ret = usb_submit_urb(s->urb_list[i], GFP_ATOMIC);
+ for (i = 0; i < dev->urbs_initialized; i++) {
+ dev_dbg(&pdev->dev, "submit urb=%d\n", i);
+ ret = usb_submit_urb(dev->urb_list[i], GFP_ATOMIC);
if (ret) {
- dev_err(&s->udev->dev,
- "Could not submit urb no. %d - get them all back\n",
- i);
- rtl2832_sdr_kill_urbs(s);
+ dev_err(&pdev->dev,
+ "Could not submit urb no. %d - get them all back\n",
+ i);
+ rtl2832_sdr_kill_urbs(dev);
return ret;
}
- s->urbs_submitted++;
+ dev->urbs_submitted++;
}
return 0;
}
-static int rtl2832_sdr_free_stream_bufs(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_free_stream_bufs(struct rtl2832_sdr_dev *dev)
{
- if (s->flags & USB_STATE_URB_BUF) {
- while (s->buf_num) {
- s->buf_num--;
- dev_dbg(&s->udev->dev, "free buf=%d\n", s->buf_num);
- usb_free_coherent(s->udev, s->buf_size,
- s->buf_list[s->buf_num],
- s->dma_addr[s->buf_num]);
+ struct platform_device *pdev = dev->pdev;
+
+ if (dev->flags & USB_STATE_URB_BUF) {
+ while (dev->buf_num) {
+ dev->buf_num--;
+ dev_dbg(&pdev->dev, "free buf=%d\n", dev->buf_num);
+ usb_free_coherent(dev->udev, dev->buf_size,
+ dev->buf_list[dev->buf_num],
+ dev->dma_addr[dev->buf_num]);
}
}
- s->flags &= ~USB_STATE_URB_BUF;
+ dev->flags &= ~USB_STATE_URB_BUF;
return 0;
}
-static int rtl2832_sdr_alloc_stream_bufs(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_alloc_stream_bufs(struct rtl2832_sdr_dev *dev)
{
- s->buf_num = 0;
- s->buf_size = BULK_BUFFER_SIZE;
+ struct platform_device *pdev = dev->pdev;
- dev_dbg(&s->udev->dev, "all in all I will use %u bytes for streaming\n",
- MAX_BULK_BUFS * BULK_BUFFER_SIZE);
+ dev->buf_num = 0;
+ dev->buf_size = BULK_BUFFER_SIZE;
- for (s->buf_num = 0; s->buf_num < MAX_BULK_BUFS; s->buf_num++) {
- s->buf_list[s->buf_num] = usb_alloc_coherent(s->udev,
+ dev_dbg(&pdev->dev, "all in all I will use %u bytes for streaming\n",
+ MAX_BULK_BUFS * BULK_BUFFER_SIZE);
+
+ for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) {
+ dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev,
BULK_BUFFER_SIZE, GFP_ATOMIC,
- &s->dma_addr[s->buf_num]);
- if (!s->buf_list[s->buf_num]) {
- dev_dbg(&s->udev->dev, "alloc buf=%d failed\n",
- s->buf_num);
- rtl2832_sdr_free_stream_bufs(s);
+ &dev->dma_addr[dev->buf_num]);
+ if (!dev->buf_list[dev->buf_num]) {
+ dev_dbg(&pdev->dev, "alloc buf=%d failed\n",
+ dev->buf_num);
+ rtl2832_sdr_free_stream_bufs(dev);
return -ENOMEM;
}
- dev_dbg(&s->udev->dev, "alloc buf=%d %p (dma %llu)\n",
- s->buf_num, s->buf_list[s->buf_num],
- (long long)s->dma_addr[s->buf_num]);
- s->flags |= USB_STATE_URB_BUF;
+ dev_dbg(&pdev->dev, "alloc buf=%d %p (dma %llu)\n",
+ dev->buf_num, dev->buf_list[dev->buf_num],
+ (long long)dev->dma_addr[dev->buf_num]);
+ dev->flags |= USB_STATE_URB_BUF;
}
return 0;
}
-static int rtl2832_sdr_free_urbs(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_free_urbs(struct rtl2832_sdr_dev *dev)
{
+ struct platform_device *pdev = dev->pdev;
int i;
- rtl2832_sdr_kill_urbs(s);
+ rtl2832_sdr_kill_urbs(dev);
- for (i = s->urbs_initialized - 1; i >= 0; i--) {
- if (s->urb_list[i]) {
- dev_dbg(&s->udev->dev, "free urb=%d\n", i);
+ for (i = dev->urbs_initialized - 1; i >= 0; i--) {
+ if (dev->urb_list[i]) {
+ dev_dbg(&pdev->dev, "free urb=%d\n", i);
/* free the URBs */
- usb_free_urb(s->urb_list[i]);
+ usb_free_urb(dev->urb_list[i]);
}
}
- s->urbs_initialized = 0;
+ dev->urbs_initialized = 0;
return 0;
}
-static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_dev *dev)
{
+ struct platform_device *pdev = dev->pdev;
int i, j;
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
- dev_dbg(&s->udev->dev, "alloc urb=%d\n", i);
- s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
- if (!s->urb_list[i]) {
- dev_dbg(&s->udev->dev, "failed\n");
+ dev_dbg(&pdev->dev, "alloc urb=%d\n", i);
+ dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!dev->urb_list[i]) {
+ dev_dbg(&pdev->dev, "failed\n");
for (j = 0; j < i; j++)
- usb_free_urb(s->urb_list[j]);
+ usb_free_urb(dev->urb_list[j]);
return -ENOMEM;
}
- usb_fill_bulk_urb(s->urb_list[i],
- s->udev,
- usb_rcvbulkpipe(s->udev, 0x81),
- s->buf_list[i],
+ usb_fill_bulk_urb(dev->urb_list[i],
+ dev->udev,
+ usb_rcvbulkpipe(dev->udev, 0x81),
+ dev->buf_list[i],
BULK_BUFFER_SIZE,
- rtl2832_sdr_urb_complete, s);
+ rtl2832_sdr_urb_complete, dev);
- s->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
- s->urb_list[i]->transfer_dma = s->dma_addr[i];
- s->urbs_initialized++;
+ dev->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ dev->urb_list[i]->transfer_dma = dev->dma_addr[i];
+ dev->urbs_initialized++;
}
return 0;
}
/* Must be called with vb_queue_lock hold */
-static void rtl2832_sdr_cleanup_queued_bufs(struct rtl2832_sdr_state *s)
+static void rtl2832_sdr_cleanup_queued_bufs(struct rtl2832_sdr_dev *dev)
{
+ struct platform_device *pdev = dev->pdev;
unsigned long flags;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
- spin_lock_irqsave(&s->queued_bufs_lock, flags);
- while (!list_empty(&s->queued_bufs)) {
+ spin_lock_irqsave(&dev->queued_bufs_lock, flags);
+ while (!list_empty(&dev->queued_bufs)) {
struct rtl2832_sdr_frame_buf *buf;
- buf = list_entry(s->queued_bufs.next,
+ buf = list_entry(dev->queued_bufs.next,
struct rtl2832_sdr_frame_buf, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
- spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
-}
-
-/* The user yanked out the cable... */
-static void rtl2832_sdr_release_sec(struct dvb_frontend *fe)
-{
- struct rtl2832_sdr_state *s = fe->sec_priv;
-
- dev_dbg(&s->udev->dev, "\n");
-
- mutex_lock(&s->vb_queue_lock);
- mutex_lock(&s->v4l2_lock);
- /* No need to keep the urbs around after disconnection */
- s->udev = NULL;
-
- v4l2_device_disconnect(&s->v4l2_dev);
- video_unregister_device(&s->vdev);
- mutex_unlock(&s->v4l2_lock);
- mutex_unlock(&s->vb_queue_lock);
-
- v4l2_device_put(&s->v4l2_dev);
-
- fe->sec_priv = NULL;
+ spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
}
static int rtl2832_sdr_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
- strlcpy(cap->card, s->vdev.name, sizeof(cap->card));
- usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info));
+ strlcpy(cap->card, dev->vdev.name, sizeof(cap->card));
+ usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
@@ -627,26 +486,26 @@ static int rtl2832_sdr_queue_setup(struct vb2_queue *vq,
const struct v4l2_format *fmt, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
{
- struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
+ struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
+ struct platform_device *pdev = dev->pdev;
- dev_dbg(&s->udev->dev, "nbuffers=%d\n", *nbuffers);
+ dev_dbg(&pdev->dev, "nbuffers=%d\n", *nbuffers);
/* Need at least 8 buffers */
if (vq->num_buffers + *nbuffers < 8)
*nbuffers = 8 - vq->num_buffers;
*nplanes = 1;
- sizes[0] = PAGE_ALIGN(s->buffersize);
- dev_dbg(&s->udev->dev, "nbuffers=%d sizes[0]=%d\n",
- *nbuffers, sizes[0]);
+ sizes[0] = PAGE_ALIGN(dev->buffersize);
+ dev_dbg(&pdev->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]);
return 0;
}
static int rtl2832_sdr_buf_prepare(struct vb2_buffer *vb)
{
- struct rtl2832_sdr_state *s = vb2_get_drv_priv(vb->vb2_queue);
+ struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
/* Don't allow queing new buffers after device disconnection */
- if (!s->udev)
+ if (!dev->udev)
return -ENODEV;
return 0;
@@ -654,46 +513,48 @@ static int rtl2832_sdr_buf_prepare(struct vb2_buffer *vb)
static void rtl2832_sdr_buf_queue(struct vb2_buffer *vb)
{
- struct rtl2832_sdr_state *s = vb2_get_drv_priv(vb->vb2_queue);
+ struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct rtl2832_sdr_frame_buf *buf =
container_of(vb, struct rtl2832_sdr_frame_buf, vb);
unsigned long flags;
/* Check the device has not disconnected between prep and queuing */
- if (!s->udev) {
+ if (!dev->udev) {
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
return;
}
- spin_lock_irqsave(&s->queued_bufs_lock, flags);
- list_add_tail(&buf->list, &s->queued_bufs);
- spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+ spin_lock_irqsave(&dev->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &dev->queued_bufs);
+ spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
}
-static int rtl2832_sdr_set_adc(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_set_adc(struct rtl2832_sdr_dev *dev)
{
- struct dvb_frontend *fe = s->fe;
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct dvb_frontend *fe = pdata->dvb_frontend;
int ret;
unsigned int f_sr, f_if;
u8 buf[4], u8tmp1, u8tmp2;
u64 u64tmp;
u32 u32tmp;
- dev_dbg(&s->udev->dev, "f_adc=%u\n", s->f_adc);
+ dev_dbg(&pdev->dev, "f_adc=%u\n", dev->f_adc);
- if (!test_bit(POWER_ON, &s->flags))
+ if (!test_bit(POWER_ON, &dev->flags))
return 0;
- if (s->f_adc == 0)
+ if (dev->f_adc == 0)
return 0;
- f_sr = s->f_adc;
+ f_sr = dev->f_adc;
- ret = rtl2832_sdr_wr_regs(s, 0x13e, "\x00\x00", 2);
+ ret = rtl2832_sdr_wr_regs(dev, 0x13e, "\x00\x00", 2);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(s, 0x115, "\x00\x00\x00\x00", 4);
+ ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x00\x00\x00\x00", 4);
if (ret)
goto err;
@@ -707,19 +568,19 @@ static int rtl2832_sdr_set_adc(struct rtl2832_sdr_state *s)
goto err;
/* program IF */
- u64tmp = f_if % s->cfg->xtal;
+ u64tmp = f_if % pdata->clk;
u64tmp *= 0x400000;
- u64tmp = div_u64(u64tmp, s->cfg->xtal);
+ u64tmp = div_u64(u64tmp, pdata->clk);
u64tmp = -u64tmp;
u32tmp = u64tmp & 0x3fffff;
- dev_dbg(&s->udev->dev, "f_if=%u if_ctl=%08x\n", f_if, u32tmp);
+ dev_dbg(&pdev->dev, "f_if=%u if_ctl=%08x\n", f_if, u32tmp);
buf[0] = (u32tmp >> 16) & 0xff;
buf[1] = (u32tmp >> 8) & 0xff;
buf[2] = (u32tmp >> 0) & 0xff;
- ret = rtl2832_sdr_wr_regs(s, 0x119, buf, 3);
+ ret = rtl2832_sdr_wr_regs(dev, 0x119, buf, 3);
if (ret)
goto err;
@@ -733,208 +594,212 @@ static int rtl2832_sdr_set_adc(struct rtl2832_sdr_state *s)
u8tmp2 = 0xcd; /* enable ADC I, ADC Q */
}
- ret = rtl2832_sdr_wr_reg(s, 0x1b1, u8tmp1);
+ ret = rtl2832_sdr_wr_reg(dev, 0x1b1, u8tmp1);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_reg(s, 0x008, u8tmp2);
+ ret = rtl2832_sdr_wr_reg(dev, 0x008, u8tmp2);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_reg(s, 0x006, 0x80);
+ ret = rtl2832_sdr_wr_reg(dev, 0x006, 0x80);
if (ret)
goto err;
/* program sampling rate (resampling down) */
- u32tmp = div_u64(s->cfg->xtal * 0x400000ULL, f_sr * 4U);
+ u32tmp = div_u64(pdata->clk * 0x400000ULL, f_sr * 4U);
u32tmp <<= 2;
buf[0] = (u32tmp >> 24) & 0xff;
buf[1] = (u32tmp >> 16) & 0xff;
buf[2] = (u32tmp >> 8) & 0xff;
buf[3] = (u32tmp >> 0) & 0xff;
- ret = rtl2832_sdr_wr_regs(s, 0x19f, buf, 4);
+ ret = rtl2832_sdr_wr_regs(dev, 0x19f, buf, 4);
if (ret)
goto err;
/* low-pass filter */
- ret = rtl2832_sdr_wr_regs(s, 0x11c,
+ ret = rtl2832_sdr_wr_regs(dev, 0x11c,
"\xca\xdc\xd7\xd8\xe0\xf2\x0e\x35\x06\x50\x9c\x0d\x71\x11\x14\x71\x74\x19\x41\xa5",
20);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(s, 0x017, "\x11\x10", 2);
+ ret = rtl2832_sdr_wr_regs(dev, 0x017, "\x11\x10", 2);
if (ret)
goto err;
/* mode */
- ret = rtl2832_sdr_wr_regs(s, 0x019, "\x05", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x019, "\x05", 1);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(s, 0x01a, "\x1b\x16\x0d\x06\x01\xff", 6);
+ ret = rtl2832_sdr_wr_regs(dev, 0x01a, "\x1b\x16\x0d\x06\x01\xff", 6);
if (ret)
goto err;
/* FSM */
- ret = rtl2832_sdr_wr_regs(s, 0x192, "\x00\xf0\x0f", 3);
+ ret = rtl2832_sdr_wr_regs(dev, 0x192, "\x00\xf0\x0f", 3);
if (ret)
goto err;
/* PID filter */
- ret = rtl2832_sdr_wr_regs(s, 0x061, "\x60", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x061, "\x60", 1);
if (ret)
goto err;
/* used RF tuner based settings */
- switch (s->cfg->tuner) {
- case RTL2832_TUNER_E4000:
- ret = rtl2832_sdr_wr_regs(s, 0x112, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x102, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x103, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c7, "\x30", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x104, "\xd0", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x105, "\xbe", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c8, "\x18", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x106, "\x35", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c9, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1ca, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1cb, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x107, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1cd, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1ce, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x108, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x109, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x10a, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x10b, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x011, "\xd4", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1e5, "\xf0", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1d9, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1db, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1dd, "\x14", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1de, "\xec", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1d8, "\x0c", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1e6, "\x02", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1d7, "\x09", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00d, "\x83", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x010, "\x49", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00d, "\x87", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00d, "\x85", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x013, "\x02", 1);
+ switch (pdata->tuner) {
+ case RTL2832_SDR_TUNER_E4000:
+ ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x30", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xd0", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x18", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xd4", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1e5, "\xf0", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1d9, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1db, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1dd, "\x14", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1de, "\xec", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1d8, "\x0c", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1e6, "\x02", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1d7, "\x09", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x83", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x010, "\x49", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x87", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x85", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x013, "\x02", 1);
break;
- case RTL2832_TUNER_FC0012:
- case RTL2832_TUNER_FC0013:
- ret = rtl2832_sdr_wr_regs(s, 0x112, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x102, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x103, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c7, "\x2c", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x104, "\xcc", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x105, "\xbe", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c8, "\x16", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x106, "\x35", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c9, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1ca, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1cb, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x107, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1cd, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1ce, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x108, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x109, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x10a, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x10b, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x011, "\xe9\xbf", 2);
- ret = rtl2832_sdr_wr_regs(s, 0x1e5, "\xf0", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1d9, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1db, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1dd, "\x11", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1de, "\xef", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1d8, "\x0c", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1e6, "\x02", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1d7, "\x09", 1);
+ case RTL2832_SDR_TUNER_FC0012:
+ case RTL2832_SDR_TUNER_FC0013:
+ ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x2c", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x16", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xe9\xbf", 2);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1e5, "\xf0", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1d9, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1db, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1dd, "\x11", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1de, "\xef", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1d8, "\x0c", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1e6, "\x02", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1d7, "\x09", 1);
break;
- case RTL2832_TUNER_R820T:
- ret = rtl2832_sdr_wr_regs(s, 0x112, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x102, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x115, "\x01", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x103, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c7, "\x24", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x104, "\xcc", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x105, "\xbe", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c8, "\x14", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x106, "\x35", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1c9, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1ca, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1cb, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x107, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1cd, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x1ce, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x108, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x109, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x10a, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x10b, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(s, 0x011, "\xf4", 1);
+ case RTL2832_SDR_TUNER_R820T:
+ case RTL2832_SDR_TUNER_R828D:
+ ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x01", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x24", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x14", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xf4", 1);
break;
default:
- dev_notice(&s->udev->dev, "Unsupported tuner\n");
+ dev_notice(&pdev->dev, "Unsupported tuner\n");
}
/* software reset */
- ret = rtl2832_sdr_wr_reg_mask(s, 0x101, 0x04, 0x04);
+ ret = rtl2832_sdr_wr_reg_mask(dev, 0x101, 0x04, 0x04);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_reg_mask(s, 0x101, 0x00, 0x04);
+ ret = rtl2832_sdr_wr_reg_mask(dev, 0x101, 0x00, 0x04);
if (ret)
goto err;
err:
return ret;
};
-static void rtl2832_sdr_unset_adc(struct rtl2832_sdr_state *s)
+static void rtl2832_sdr_unset_adc(struct rtl2832_sdr_dev *dev)
{
+ struct platform_device *pdev = dev->pdev;
int ret;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
/* PID filter */
- ret = rtl2832_sdr_wr_regs(s, 0x061, "\xe0", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x061, "\xe0", 1);
if (ret)
goto err;
/* mode */
- ret = rtl2832_sdr_wr_regs(s, 0x019, "\x20", 1);
+ ret = rtl2832_sdr_wr_regs(dev, 0x019, "\x20", 1);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(s, 0x017, "\x11\x10", 2);
+ ret = rtl2832_sdr_wr_regs(dev, 0x017, "\x11\x10", 2);
if (ret)
goto err;
/* FSM */
- ret = rtl2832_sdr_wr_regs(s, 0x192, "\x00\x0f\xff", 3);
+ ret = rtl2832_sdr_wr_regs(dev, 0x192, "\x00\x0f\xff", 3);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(s, 0x13e, "\x40\x00", 2);
+ ret = rtl2832_sdr_wr_regs(dev, 0x13e, "\x40\x00", 2);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(s, 0x115, "\x06\x3f\xce\xcc", 4);
+ ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x06\x3f\xce\xcc", 4);
if (ret)
goto err;
err:
return;
};
-static int rtl2832_sdr_set_tuner_freq(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_set_tuner_freq(struct rtl2832_sdr_dev *dev)
{
- struct dvb_frontend *fe = s->fe;
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct dvb_frontend *fe = pdata->dvb_frontend;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct v4l2_ctrl *bandwidth_auto;
struct v4l2_ctrl *bandwidth;
@@ -942,29 +807,29 @@ static int rtl2832_sdr_set_tuner_freq(struct rtl2832_sdr_state *s)
/*
* tuner RF (Hz)
*/
- if (s->f_tuner == 0)
+ if (dev->f_tuner == 0)
return 0;
/*
* bandwidth (Hz)
*/
- bandwidth_auto = v4l2_ctrl_find(&s->hdl,
+ bandwidth_auto = v4l2_ctrl_find(&dev->hdl,
V4L2_CID_RF_TUNER_BANDWIDTH_AUTO);
- bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
+ bandwidth = v4l2_ctrl_find(&dev->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
if (v4l2_ctrl_g_ctrl(bandwidth_auto)) {
- c->bandwidth_hz = s->f_adc;
- v4l2_ctrl_s_ctrl(bandwidth, s->f_adc);
+ c->bandwidth_hz = dev->f_adc;
+ v4l2_ctrl_s_ctrl(bandwidth, dev->f_adc);
} else {
c->bandwidth_hz = v4l2_ctrl_g_ctrl(bandwidth);
}
- c->frequency = s->f_tuner;
+ c->frequency = dev->f_tuner;
c->delivery_system = SYS_DVBT;
- dev_dbg(&s->udev->dev, "frequency=%u bandwidth=%d\n",
- c->frequency, c->bandwidth_hz);
+ dev_dbg(&pdev->dev, "frequency=%u bandwidth=%d\n",
+ c->frequency, c->bandwidth_hz);
- if (!test_bit(POWER_ON, &s->flags))
+ if (!test_bit(POWER_ON, &dev->flags))
return 0;
if (fe->ops.tuner_ops.set_params)
@@ -973,11 +838,13 @@ static int rtl2832_sdr_set_tuner_freq(struct rtl2832_sdr_state *s)
return 0;
};
-static int rtl2832_sdr_set_tuner(struct rtl2832_sdr_state *s)
+static int rtl2832_sdr_set_tuner(struct rtl2832_sdr_dev *dev)
{
- struct dvb_frontend *fe = s->fe;
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct dvb_frontend *fe = pdata->dvb_frontend;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
if (fe->ops.tuner_ops.init)
fe->ops.tuner_ops.init(fe);
@@ -985,11 +852,13 @@ static int rtl2832_sdr_set_tuner(struct rtl2832_sdr_state *s)
return 0;
};
-static void rtl2832_sdr_unset_tuner(struct rtl2832_sdr_state *s)
+static void rtl2832_sdr_unset_tuner(struct rtl2832_sdr_dev *dev)
{
- struct dvb_frontend *fe = s->fe;
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct dvb_frontend *fe = pdata->dvb_frontend;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
if (fe->ops.tuner_ops.sleep)
fe->ops.tuner_ops.sleep(fe);
@@ -999,83 +868,89 @@ static void rtl2832_sdr_unset_tuner(struct rtl2832_sdr_state *s)
static int rtl2832_sdr_start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
+ struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct dvb_usb_device *d = pdata->dvb_usb_device;
int ret;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
- if (!s->udev)
+ if (!dev->udev)
return -ENODEV;
- if (mutex_lock_interruptible(&s->v4l2_lock))
+ if (mutex_lock_interruptible(&dev->v4l2_lock))
return -ERESTARTSYS;
- if (s->d->props->power_ctrl)
- s->d->props->power_ctrl(s->d, 1);
+ if (d->props->power_ctrl)
+ d->props->power_ctrl(d, 1);
/* enable ADC */
- if (s->d->props->frontend_ctrl)
- s->d->props->frontend_ctrl(s->fe, 1);
+ if (d->props->frontend_ctrl)
+ d->props->frontend_ctrl(pdata->dvb_frontend, 1);
- set_bit(POWER_ON, &s->flags);
+ set_bit(POWER_ON, &dev->flags);
- ret = rtl2832_sdr_set_tuner(s);
+ ret = rtl2832_sdr_set_tuner(dev);
if (ret)
goto err;
- ret = rtl2832_sdr_set_tuner_freq(s);
+ ret = rtl2832_sdr_set_tuner_freq(dev);
if (ret)
goto err;
- ret = rtl2832_sdr_set_adc(s);
+ ret = rtl2832_sdr_set_adc(dev);
if (ret)
goto err;
- ret = rtl2832_sdr_alloc_stream_bufs(s);
+ ret = rtl2832_sdr_alloc_stream_bufs(dev);
if (ret)
goto err;
- ret = rtl2832_sdr_alloc_urbs(s);
+ ret = rtl2832_sdr_alloc_urbs(dev);
if (ret)
goto err;
- s->sequence = 0;
+ dev->sequence = 0;
- ret = rtl2832_sdr_submit_urbs(s);
+ ret = rtl2832_sdr_submit_urbs(dev);
if (ret)
goto err;
err:
- mutex_unlock(&s->v4l2_lock);
+ mutex_unlock(&dev->v4l2_lock);
return ret;
}
static void rtl2832_sdr_stop_streaming(struct vb2_queue *vq)
{
- struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
+ struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct dvb_usb_device *d = pdata->dvb_usb_device;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
- mutex_lock(&s->v4l2_lock);
+ mutex_lock(&dev->v4l2_lock);
- rtl2832_sdr_kill_urbs(s);
- rtl2832_sdr_free_urbs(s);
- rtl2832_sdr_free_stream_bufs(s);
- rtl2832_sdr_cleanup_queued_bufs(s);
- rtl2832_sdr_unset_adc(s);
- rtl2832_sdr_unset_tuner(s);
+ rtl2832_sdr_kill_urbs(dev);
+ rtl2832_sdr_free_urbs(dev);
+ rtl2832_sdr_free_stream_bufs(dev);
+ rtl2832_sdr_cleanup_queued_bufs(dev);
+ rtl2832_sdr_unset_adc(dev);
+ rtl2832_sdr_unset_tuner(dev);
- clear_bit(POWER_ON, &s->flags);
+ clear_bit(POWER_ON, &dev->flags);
/* disable ADC */
- if (s->d->props->frontend_ctrl)
- s->d->props->frontend_ctrl(s->fe, 0);
+ if (d->props->frontend_ctrl)
+ d->props->frontend_ctrl(pdata->dvb_frontend, 0);
- if (s->d->props->power_ctrl)
- s->d->props->power_ctrl(s->d, 0);
+ if (d->props->power_ctrl)
+ d->props->power_ctrl(d, 0);
- mutex_unlock(&s->v4l2_lock);
+ mutex_unlock(&dev->v4l2_lock);
}
static struct vb2_ops rtl2832_sdr_vb2_ops = {
@@ -1091,9 +966,10 @@ static struct vb2_ops rtl2832_sdr_vb2_ops = {
static int rtl2832_sdr_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
- dev_dbg(&s->udev->dev, "index=%d type=%d\n", v->index, v->type);
+ dev_dbg(&pdev->dev, "index=%d type=%d\n", v->index, v->type);
if (v->index == 0) {
strlcpy(v->name, "ADC: Realtek RTL2832", sizeof(v->name));
@@ -1117,9 +993,10 @@ static int rtl2832_sdr_g_tuner(struct file *file, void *priv,
static int rtl2832_sdr_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *v)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
if (v->index > 1)
return -EINVAL;
@@ -1129,10 +1006,11 @@ static int rtl2832_sdr_s_tuner(struct file *file, void *priv,
static int rtl2832_sdr_enum_freq_bands(struct file *file, void *priv,
struct v4l2_frequency_band *band)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
- dev_dbg(&s->udev->dev, "tuner=%d type=%d index=%d\n",
- band->tuner, band->type, band->index);
+ dev_dbg(&pdev->dev, "tuner=%d type=%d index=%d\n",
+ band->tuner, band->type, band->index);
if (band->tuner == 0) {
if (band->index >= ARRAY_SIZE(bands_adc))
@@ -1154,17 +1032,17 @@ static int rtl2832_sdr_enum_freq_bands(struct file *file, void *priv,
static int rtl2832_sdr_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
int ret = 0;
- dev_dbg(&s->udev->dev, "tuner=%d type=%d\n",
- f->tuner, f->type);
+ dev_dbg(&pdev->dev, "tuner=%d type=%d\n", f->tuner, f->type);
if (f->tuner == 0) {
- f->frequency = s->f_adc;
+ f->frequency = dev->f_adc;
f->type = V4L2_TUNER_ADC;
} else if (f->tuner == 1) {
- f->frequency = s->f_tuner;
+ f->frequency = dev->f_tuner;
f->type = V4L2_TUNER_RF;
} else {
return -EINVAL;
@@ -1176,11 +1054,12 @@ static int rtl2832_sdr_g_frequency(struct file *file, void *priv,
static int rtl2832_sdr_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
int ret, band;
- dev_dbg(&s->udev->dev, "tuner=%d type=%d frequency=%u\n",
- f->tuner, f->type, f->frequency);
+ dev_dbg(&pdev->dev, "tuner=%d type=%d frequency=%u\n",
+ f->tuner, f->type, f->frequency);
/* ADC band midpoints */
#define BAND_ADC_0 ((bands_adc[0].rangehigh + bands_adc[1].rangelow) / 2)
@@ -1194,19 +1073,19 @@ static int rtl2832_sdr_s_frequency(struct file *file, void *priv,
else
band = 2;
- s->f_adc = clamp_t(unsigned int, f->frequency,
+ dev->f_adc = clamp_t(unsigned int, f->frequency,
bands_adc[band].rangelow,
bands_adc[band].rangehigh);
- dev_dbg(&s->udev->dev, "ADC frequency=%u Hz\n", s->f_adc);
- ret = rtl2832_sdr_set_adc(s);
+ dev_dbg(&pdev->dev, "ADC frequency=%u Hz\n", dev->f_adc);
+ ret = rtl2832_sdr_set_adc(dev);
} else if (f->tuner == 1) {
- s->f_tuner = clamp_t(unsigned int, f->frequency,
+ dev->f_tuner = clamp_t(unsigned int, f->frequency,
bands_fm[0].rangelow,
bands_fm[0].rangehigh);
- dev_dbg(&s->udev->dev, "RF frequency=%u Hz\n", f->frequency);
+ dev_dbg(&pdev->dev, "RF frequency=%u Hz\n", f->frequency);
- ret = rtl2832_sdr_set_tuner_freq(s);
+ ret = rtl2832_sdr_set_tuner_freq(dev);
} else {
ret = -EINVAL;
}
@@ -1217,11 +1096,12 @@ static int rtl2832_sdr_s_frequency(struct file *file, void *priv,
static int rtl2832_sdr_enum_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
- if (f->index >= s->num_formats)
+ if (f->index >= dev->num_formats)
return -EINVAL;
strlcpy(f->description, formats[f->index].name, sizeof(f->description));
@@ -1233,12 +1113,13 @@ static int rtl2832_sdr_enum_fmt_sdr_cap(struct file *file, void *priv,
static int rtl2832_sdr_g_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
- dev_dbg(&s->udev->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
- f->fmt.sdr.pixelformat = s->pixelformat;
- f->fmt.sdr.buffersize = s->buffersize;
+ f->fmt.sdr.pixelformat = dev->pixelformat;
+ f->fmt.sdr.buffersize = dev->buffersize;
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
@@ -1248,28 +1129,29 @@ static int rtl2832_sdr_g_fmt_sdr_cap(struct file *file, void *priv,
static int rtl2832_sdr_s_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
- struct vb2_queue *q = &s->vb_queue;
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
+ struct vb2_queue *q = &dev->vb_queue;
int i;
- dev_dbg(&s->udev->dev, "pixelformat fourcc %4.4s\n",
- (char *)&f->fmt.sdr.pixelformat);
+ dev_dbg(&pdev->dev, "pixelformat fourcc %4.4s\n",
+ (char *)&f->fmt.sdr.pixelformat);
if (vb2_is_busy(q))
return -EBUSY;
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
- for (i = 0; i < s->num_formats; i++) {
+ for (i = 0; i < dev->num_formats; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
- s->pixelformat = formats[i].pixelformat;
- s->buffersize = formats[i].buffersize;
+ dev->pixelformat = formats[i].pixelformat;
+ dev->buffersize = formats[i].buffersize;
f->fmt.sdr.buffersize = formats[i].buffersize;
return 0;
}
}
- s->pixelformat = formats[0].pixelformat;
- s->buffersize = formats[0].buffersize;
+ dev->pixelformat = formats[0].pixelformat;
+ dev->buffersize = formats[0].buffersize;
f->fmt.sdr.pixelformat = formats[0].pixelformat;
f->fmt.sdr.buffersize = formats[0].buffersize;
@@ -1279,14 +1161,15 @@ static int rtl2832_sdr_s_fmt_sdr_cap(struct file *file, void *priv,
static int rtl2832_sdr_try_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rtl2832_sdr_state *s = video_drvdata(file);
+ struct rtl2832_sdr_dev *dev = video_drvdata(file);
+ struct platform_device *pdev = dev->pdev;
int i;
- dev_dbg(&s->udev->dev, "pixelformat fourcc %4.4s\n",
- (char *)&f->fmt.sdr.pixelformat);
+ dev_dbg(&pdev->dev, "pixelformat fourcc %4.4s\n",
+ (char *)&f->fmt.sdr.pixelformat);
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
- for (i = 0; i < s->num_formats; i++) {
+ for (i = 0; i < dev->num_formats; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
f->fmt.sdr.buffersize = formats[i].buffersize;
return 0;
@@ -1348,37 +1231,38 @@ static struct video_device rtl2832_sdr_template = {
static int rtl2832_sdr_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct rtl2832_sdr_state *s =
- container_of(ctrl->handler, struct rtl2832_sdr_state,
+ struct rtl2832_sdr_dev *dev =
+ container_of(ctrl->handler, struct rtl2832_sdr_dev,
hdl);
- struct dvb_frontend *fe = s->fe;
+ struct platform_device *pdev = dev->pdev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
+ struct dvb_frontend *fe = pdata->dvb_frontend;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- dev_dbg(&s->udev->dev,
- "id=%d name=%s val=%d min=%lld max=%lld step=%lld\n",
- ctrl->id, ctrl->name, ctrl->val,
- ctrl->minimum, ctrl->maximum, ctrl->step);
+ dev_dbg(&pdev->dev, "id=%d name=%s val=%d min=%lld max=%lld step=%lld\n",
+ ctrl->id, ctrl->name, ctrl->val, ctrl->minimum, ctrl->maximum,
+ ctrl->step);
switch (ctrl->id) {
case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
case V4L2_CID_RF_TUNER_BANDWIDTH:
/* TODO: these controls should be moved to tuner drivers */
- if (s->bandwidth_auto->val) {
+ if (dev->bandwidth_auto->val) {
/* Round towards the closest legal value */
- s32 val = s->f_adc + div_u64(s->bandwidth->step, 2);
+ s32 val = dev->f_adc + div_u64(dev->bandwidth->step, 2);
u32 offset;
- val = clamp_t(s32, val, s->bandwidth->minimum,
- s->bandwidth->maximum);
- offset = val - s->bandwidth->minimum;
- offset = s->bandwidth->step *
- div_u64(offset, s->bandwidth->step);
- s->bandwidth->val = s->bandwidth->minimum + offset;
+ val = clamp_t(s32, val, dev->bandwidth->minimum,
+ dev->bandwidth->maximum);
+ offset = val - dev->bandwidth->minimum;
+ offset = dev->bandwidth->step *
+ div_u64(offset, dev->bandwidth->step);
+ dev->bandwidth->val = dev->bandwidth->minimum + offset;
}
- c->bandwidth_hz = s->bandwidth->val;
+ c->bandwidth_hz = dev->bandwidth->val;
- if (!test_bit(POWER_ON, &s->flags))
+ if (!test_bit(POWER_ON, &dev->flags))
return 0;
if (fe->ops.tuner_ops.set_params)
@@ -1399,154 +1283,195 @@ static const struct v4l2_ctrl_ops rtl2832_sdr_ctrl_ops = {
static void rtl2832_sdr_video_release(struct v4l2_device *v)
{
- struct rtl2832_sdr_state *s =
- container_of(v, struct rtl2832_sdr_state, v4l2_dev);
+ struct rtl2832_sdr_dev *dev =
+ container_of(v, struct rtl2832_sdr_dev, v4l2_dev);
+ struct platform_device *pdev = dev->pdev;
+
+ dev_dbg(&pdev->dev, "\n");
- v4l2_ctrl_handler_free(&s->hdl);
- v4l2_device_unregister(&s->v4l2_dev);
- kfree(s);
+ v4l2_ctrl_handler_free(&dev->hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
}
-struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
- struct v4l2_subdev *sd)
+/* Platform driver interface */
+static int rtl2832_sdr_probe(struct platform_device *pdev)
{
- int ret;
- struct rtl2832_sdr_state *s;
+ struct rtl2832_sdr_dev *dev;
+ struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
const struct v4l2_ctrl_ops *ops = &rtl2832_sdr_ctrl_ops;
- struct dvb_usb_device *d = i2c_get_adapdata(i2c);
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "\n");
- s = kzalloc(sizeof(struct rtl2832_sdr_state), GFP_KERNEL);
- if (s == NULL) {
- dev_err(&d->udev->dev,
- "Could not allocate memory for rtl2832_sdr_state\n");
- return NULL;
+ if (!pdata) {
+ dev_err(&pdev->dev, "Cannot proceed without platform data\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ if (!pdev->dev.parent->driver) {
+ dev_dbg(&pdev->dev, "No parent device\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ /* try to refcount host drv since we are the consumer */
+ if (!try_module_get(pdev->dev.parent->driver->owner)) {
+ dev_err(&pdev->dev, "Refcount fail");
+ ret = -EINVAL;
+ goto err;
+ }
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ ret = -ENOMEM;
+ goto err_module_put;
}
/* setup the state */
- s->fe = fe;
- s->d = d;
- s->udev = d->udev;
- s->i2c = i2c;
- s->cfg = cfg;
- s->f_adc = bands_adc[0].rangelow;
- s->f_tuner = bands_fm[0].rangelow;
- s->pixelformat = formats[0].pixelformat;
- s->buffersize = formats[0].buffersize;
- s->num_formats = NUM_FORMATS;
+ subdev = pdata->v4l2_subdev;
+ dev->pdev = pdev;
+ dev->udev = pdata->dvb_usb_device->udev;
+ dev->f_adc = bands_adc[0].rangelow;
+ dev->f_tuner = bands_fm[0].rangelow;
+ dev->pixelformat = formats[0].pixelformat;
+ dev->buffersize = formats[0].buffersize;
+ dev->num_formats = NUM_FORMATS;
if (!rtl2832_sdr_emulated_fmt)
- s->num_formats -= 1;
+ dev->num_formats -= 1;
- mutex_init(&s->v4l2_lock);
- mutex_init(&s->vb_queue_lock);
- spin_lock_init(&s->queued_bufs_lock);
- INIT_LIST_HEAD(&s->queued_bufs);
+ mutex_init(&dev->v4l2_lock);
+ mutex_init(&dev->vb_queue_lock);
+ spin_lock_init(&dev->queued_bufs_lock);
+ INIT_LIST_HEAD(&dev->queued_bufs);
/* Init videobuf2 queue structure */
- s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
- s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
- s->vb_queue.drv_priv = s;
- s->vb_queue.buf_struct_size = sizeof(struct rtl2832_sdr_frame_buf);
- s->vb_queue.ops = &rtl2832_sdr_vb2_ops;
- s->vb_queue.mem_ops = &vb2_vmalloc_memops;
- s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- ret = vb2_queue_init(&s->vb_queue);
+ dev->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ dev->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ dev->vb_queue.drv_priv = dev;
+ dev->vb_queue.buf_struct_size = sizeof(struct rtl2832_sdr_frame_buf);
+ dev->vb_queue.ops = &rtl2832_sdr_vb2_ops;
+ dev->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(&dev->vb_queue);
if (ret) {
- dev_err(&s->udev->dev, "Could not initialize vb2 queue\n");
- goto err_free_mem;
+ dev_err(&pdev->dev, "Could not initialize vb2 queue\n");
+ goto err_kfree;
}
/* Register controls */
- switch (s->cfg->tuner) {
- case RTL2832_TUNER_E4000:
- v4l2_ctrl_handler_init(&s->hdl, 9);
- if (sd)
- v4l2_ctrl_add_handler(&s->hdl, sd->ctrl_handler, NULL);
+ switch (pdata->tuner) {
+ case RTL2832_SDR_TUNER_E4000:
+ v4l2_ctrl_handler_init(&dev->hdl, 9);
+ if (subdev)
+ v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, NULL);
break;
- case RTL2832_TUNER_R820T:
- v4l2_ctrl_handler_init(&s->hdl, 2);
- s->bandwidth_auto = v4l2_ctrl_new_std(&s->hdl, ops,
- V4L2_CID_RF_TUNER_BANDWIDTH_AUTO,
- 0, 1, 1, 1);
- s->bandwidth = v4l2_ctrl_new_std(&s->hdl, ops,
- V4L2_CID_RF_TUNER_BANDWIDTH,
- 0, 8000000, 100000, 0);
- v4l2_ctrl_auto_cluster(2, &s->bandwidth_auto, 0, false);
+ case RTL2832_SDR_TUNER_R820T:
+ case RTL2832_SDR_TUNER_R828D:
+ v4l2_ctrl_handler_init(&dev->hdl, 2);
+ dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH_AUTO,
+ 0, 1, 1, 1);
+ dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH,
+ 0, 8000000, 100000, 0);
+ v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false);
break;
- case RTL2832_TUNER_FC0012:
- case RTL2832_TUNER_FC0013:
- v4l2_ctrl_handler_init(&s->hdl, 2);
- s->bandwidth_auto = v4l2_ctrl_new_std(&s->hdl, ops,
- V4L2_CID_RF_TUNER_BANDWIDTH_AUTO,
- 0, 1, 1, 1);
- s->bandwidth = v4l2_ctrl_new_std(&s->hdl, ops,
- V4L2_CID_RF_TUNER_BANDWIDTH,
- 6000000, 8000000, 1000000,
- 6000000);
- v4l2_ctrl_auto_cluster(2, &s->bandwidth_auto, 0, false);
+ case RTL2832_SDR_TUNER_FC0012:
+ case RTL2832_SDR_TUNER_FC0013:
+ v4l2_ctrl_handler_init(&dev->hdl, 2);
+ dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH_AUTO,
+ 0, 1, 1, 1);
+ dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH,
+ 6000000, 8000000, 1000000,
+ 6000000);
+ v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false);
break;
default:
- v4l2_ctrl_handler_init(&s->hdl, 0);
- dev_notice(&s->udev->dev, "%s: Unsupported tuner\n",
- KBUILD_MODNAME);
- goto err_free_controls;
+ v4l2_ctrl_handler_init(&dev->hdl, 0);
+ dev_err(&pdev->dev, "Unsupported tuner\n");
+ goto err_v4l2_ctrl_handler_free;
}
-
- if (s->hdl.error) {
- ret = s->hdl.error;
- dev_err(&s->udev->dev, "Could not initialize controls\n");
- goto err_free_controls;
+ if (dev->hdl.error) {
+ ret = dev->hdl.error;
+ dev_err(&pdev->dev, "Could not initialize controls\n");
+ goto err_v4l2_ctrl_handler_free;
}
/* Init video_device structure */
- s->vdev = rtl2832_sdr_template;
- s->vdev.queue = &s->vb_queue;
- s->vdev.queue->lock = &s->vb_queue_lock;
- video_set_drvdata(&s->vdev, s);
+ dev->vdev = rtl2832_sdr_template;
+ dev->vdev.queue = &dev->vb_queue;
+ dev->vdev.queue->lock = &dev->vb_queue_lock;
+ video_set_drvdata(&dev->vdev, dev);
/* Register the v4l2_device structure */
- s->v4l2_dev.release = rtl2832_sdr_video_release;
- ret = v4l2_device_register(&s->udev->dev, &s->v4l2_dev);
+ dev->v4l2_dev.release = rtl2832_sdr_video_release;
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
- dev_err(&s->udev->dev,
- "Failed to register v4l2-device (%d)\n", ret);
- goto err_free_controls;
+ dev_err(&pdev->dev, "Failed to register v4l2-device %d\n", ret);
+ goto err_v4l2_ctrl_handler_free;
}
- s->v4l2_dev.ctrl_handler = &s->hdl;
- s->vdev.v4l2_dev = &s->v4l2_dev;
- s->vdev.lock = &s->v4l2_lock;
- s->vdev.vfl_dir = VFL_DIR_RX;
+ dev->v4l2_dev.ctrl_handler = &dev->hdl;
+ dev->vdev.v4l2_dev = &dev->v4l2_dev;
+ dev->vdev.lock = &dev->v4l2_lock;
+ dev->vdev.vfl_dir = VFL_DIR_RX;
- ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
+ ret = video_register_device(&dev->vdev, VFL_TYPE_SDR, -1);
if (ret) {
- dev_err(&s->udev->dev,
- "Failed to register as video device (%d)\n",
- ret);
- goto err_unregister_v4l2_dev;
+ dev_err(&pdev->dev, "Failed to register as video device %d\n",
+ ret);
+ goto err_v4l2_device_unregister;
}
- dev_info(&s->udev->dev, "Registered as %s\n",
- video_device_node_name(&s->vdev));
-
- fe->sec_priv = s;
- fe->ops.release_sec = rtl2832_sdr_release_sec;
-
- dev_info(&s->i2c->dev, "%s: Realtek RTL2832 SDR attached\n",
- KBUILD_MODNAME);
- dev_notice(&s->udev->dev,
- "%s: SDR API is still slightly experimental and functionality changes may follow\n",
- KBUILD_MODNAME);
- return fe;
-
-err_unregister_v4l2_dev:
- v4l2_device_unregister(&s->v4l2_dev);
-err_free_controls:
- v4l2_ctrl_handler_free(&s->hdl);
-err_free_mem:
- kfree(s);
- return NULL;
+ dev_info(&pdev->dev, "Registered as %s\n",
+ video_device_node_name(&dev->vdev));
+ dev_info(&pdev->dev, "Realtek RTL2832 SDR attached\n");
+ dev_notice(&pdev->dev,
+ "SDR API is still slightly experimental and functionality changes may follow\n");
+ platform_set_drvdata(pdev, dev);
+ return 0;
+err_v4l2_device_unregister:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&dev->hdl);
+err_kfree:
+ kfree(dev);
+err_module_put:
+ module_put(pdev->dev.parent->driver->owner);
+err:
+ return ret;
}
-EXPORT_SYMBOL(rtl2832_sdr_attach);
+
+static int rtl2832_sdr_remove(struct platform_device *pdev)
+{
+ struct rtl2832_sdr_dev *dev = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "\n");
+
+ mutex_lock(&dev->vb_queue_lock);
+ mutex_lock(&dev->v4l2_lock);
+ /* No need to keep the urbs around after disconnection */
+ dev->udev = NULL;
+ v4l2_device_disconnect(&dev->v4l2_dev);
+ video_unregister_device(&dev->vdev);
+ mutex_unlock(&dev->v4l2_lock);
+ mutex_unlock(&dev->vb_queue_lock);
+ v4l2_device_put(&dev->v4l2_dev);
+ module_put(pdev->dev.parent->driver->owner);
+
+ return 0;
+}
+
+static struct platform_driver rtl2832_sdr_driver = {
+ .driver = {
+ .name = "rtl2832_sdr",
+ .owner = THIS_MODULE,
+ },
+ .probe = rtl2832_sdr_probe,
+ .remove = rtl2832_sdr_remove,
+};
+module_platform_driver(rtl2832_sdr_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Realtek RTL2832 SDR driver");
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.h b/drivers/media/dvb-frontends/rtl2832_sdr.h
index b865fadf184f..d2594768bff2 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.h
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.h
@@ -20,35 +20,48 @@
* GNU Radio plugin "gr-kernel" for device usage will be on:
* http://git.linuxtv.org/anttip/gr-kernel.git
*
- * TODO:
- * Help is very highly welcome for these + all the others you could imagine:
- * - move controls to V4L2 API
- * - use libv4l2 for stream format conversions
- * - gr-kernel: switch to v4l2_mmap (current read eats a lot of cpu)
- * - SDRSharp support
*/
#ifndef RTL2832_SDR_H
#define RTL2832_SDR_H
-#include <linux/kconfig.h>
+#include <linux/i2c.h>
#include <media/v4l2-subdev.h>
+#include "dvb_frontend.h"
-/* for config struct */
-#include "rtl2832.h"
+/**
+ * struct rtl2832_sdr_platform_data - Platform data for the rtl2832_sdr driver
+ * @clk: Clock frequency (4000000, 16000000, 25000000, 28800000).
+ * @tuner: Used tuner model.
+ * @i2c_client: rtl2832 demod driver I2C client.
+ * @bulk_read: rtl2832 driver private I/O interface.
+ * @bulk_write: rtl2832 driver private I/O interface.
+ * @update_bits: rtl2832 driver private I/O interface.
+ * @dvb_frontend: rtl2832 DVB frontend.
+ * @v4l2_subdev: Tuner v4l2 controls.
+ * @dvb_usb_device: DVB USB interface for USB streaming.
+ */
+
+struct rtl2832_sdr_platform_data {
+ u32 clk;
+ /*
+ * XXX: This list must be kept sync with dvb_usb_rtl28xxu USB IF driver.
+ */
+#define RTL2832_SDR_TUNER_TUA9001 0x24
+#define RTL2832_SDR_TUNER_FC0012 0x26
+#define RTL2832_SDR_TUNER_E4000 0x27
+#define RTL2832_SDR_TUNER_FC0013 0x29
+#define RTL2832_SDR_TUNER_R820T 0x2a
+#define RTL2832_SDR_TUNER_R828D 0x2b
+ u8 tuner;
-#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
-extern struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
- struct v4l2_subdev *sd);
-#else
-static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
- struct v4l2_subdev *sd)
-{
- dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif
+ struct i2c_client *i2c_client;
+ int (*bulk_read)(struct i2c_client *, unsigned int, void *, size_t);
+ int (*bulk_write)(struct i2c_client *, unsigned int, const void *, size_t);
+ int (*update_bits)(struct i2c_client *, unsigned int, unsigned int, unsigned int);
+ struct dvb_frontend *dvb_frontend;
+ struct v4l2_subdev *v4l2_subdev;
+ struct dvb_usb_device *dvb_usb_device;
+};
#endif /* RTL2832_SDR_H */
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index f71b06221e14..5ff474a7ff29 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -1021,9 +1021,3 @@ static struct dvb_frontend_ops s5h1409_ops = {
MODULE_DESCRIPTION("Samsung S5H1409 QAM-B/ATSC Demodulator driver");
MODULE_AUTHOR("Steven Toth");
MODULE_LICENSE("GPL");
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/dvb-frontends/s5h1409.h b/drivers/media/dvb-frontends/s5h1409.h
index 63b1e0a34e4e..9e143f5c8107 100644
--- a/drivers/media/dvb-frontends/s5h1409.h
+++ b/drivers/media/dvb-frontends/s5h1409.h
@@ -81,8 +81,3 @@ static inline struct dvb_frontend *s5h1409_attach(
#endif /* CONFIG_DVB_S5H1409 */
#endif /* __S5H1409_H__ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 6cc4b7a9dd60..64f35fed7ae1 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -944,8 +944,3 @@ MODULE_PARM_DESC(debug, "Enable verbose debug messages");
MODULE_DESCRIPTION("Samsung S5H1411 QAM-B/ATSC Demodulator driver");
MODULE_AUTHOR("Steven Toth");
MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/dvb-frontends/s5h1411.h b/drivers/media/dvb-frontends/s5h1411.h
index e4f56871f982..1d7deb615674 100644
--- a/drivers/media/dvb-frontends/s5h1411.h
+++ b/drivers/media/dvb-frontends/s5h1411.h
@@ -83,8 +83,3 @@ static inline struct dvb_frontend *s5h1411_attach(
#endif /* CONFIG_DVB_S5H1411 */
#endif /* __S5H1411_H__ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index ce9ab442b4b6..5db588ebfc24 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -19,16 +19,17 @@
static const struct dvb_frontend_ops si2168_ops;
/* execute firmware command */
-static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
+static int si2168_cmd_execute(struct i2c_client *client, struct si2168_cmd *cmd)
{
+ struct si2168_dev *dev = i2c_get_clientdata(client);
int ret;
unsigned long timeout;
- mutex_lock(&s->i2c_mutex);
+ mutex_lock(&dev->i2c_mutex);
if (cmd->wlen) {
/* write cmd and args for firmware */
- ret = i2c_master_send(s->client, cmd->args, cmd->wlen);
+ ret = i2c_master_send(client, cmd->args, cmd->wlen);
if (ret < 0) {
goto err_mutex_unlock;
} else if (ret != cmd->wlen) {
@@ -39,10 +40,10 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
if (cmd->rlen) {
/* wait cmd execution terminate */
- #define TIMEOUT 50
+ #define TIMEOUT 70
timeout = jiffies + msecs_to_jiffies(TIMEOUT);
while (!time_after(jiffies, timeout)) {
- ret = i2c_master_recv(s->client, cmd->args, cmd->rlen);
+ ret = i2c_master_recv(client, cmd->args, cmd->rlen);
if (ret < 0) {
goto err_mutex_unlock;
} else if (ret != cmd->rlen) {
@@ -55,7 +56,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
break;
}
- dev_dbg(&s->client->dev, "cmd execution took %d ms\n",
+ dev_dbg(&client->dev, "cmd execution took %d ms\n",
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
@@ -65,29 +66,26 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
}
}
- ret = 0;
+ mutex_unlock(&dev->i2c_mutex);
+ return 0;
err_mutex_unlock:
- mutex_unlock(&s->i2c_mutex);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ mutex_unlock(&dev->i2c_mutex);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int si2168_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
- struct si2168 *s = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct si2168_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
struct si2168_cmd cmd;
*status = 0;
- if (!s->active) {
+ if (!dev->active) {
ret = -EAGAIN;
goto err;
}
@@ -113,21 +111,10 @@ static int si2168_read_status(struct dvb_frontend *fe, fe_status_t *status)
goto err;
}
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- /*
- * Possible values seen, in order from strong signal to weak:
- * 16 0001 0110 full lock
- * 1e 0001 1110 partial lock
- * 1a 0001 1010 partial lock
- * 18 0001 1000 no lock
- *
- * [b3:b1] lock bits
- * [b4] statistics ready? Set in a few secs after lock is gained.
- */
-
switch ((cmd.args[2] >> 1) & 0x03) {
case 0x01:
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
@@ -138,7 +125,7 @@ static int si2168_read_status(struct dvb_frontend *fe, fe_status_t *status)
break;
}
- s->fe_status = *status;
+ dev->fe_status = *status;
if (*status & FE_HAS_LOCK) {
c->cnr.len = 1;
@@ -149,30 +136,31 @@ static int si2168_read_status(struct dvb_frontend *fe, fe_status_t *status)
c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
- dev_dbg(&s->client->dev, "status=%02x args=%*ph\n",
+ dev_dbg(&client->dev, "status=%02x args=%*ph\n",
*status, cmd.rlen, cmd.args);
return 0;
err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int si2168_set_frontend(struct dvb_frontend *fe)
{
- struct si2168 *s = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct si2168_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
struct si2168_cmd cmd;
u8 bandwidth, delivery_system;
- dev_dbg(&s->client->dev,
- "delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%u, stream_id=%d\n",
- c->delivery_system, c->modulation,
- c->frequency, c->bandwidth_hz, c->symbol_rate,
- c->inversion, c->stream_id);
+ dev_dbg(&client->dev,
+ "delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%u stream_id=%u\n",
+ c->delivery_system, c->modulation, c->frequency,
+ c->bandwidth_hz, c->symbol_rate, c->inversion,
+ c->stream_id);
- if (!s->active) {
+ if (!dev->active) {
ret = -EAGAIN;
goto err;
}
@@ -192,7 +180,12 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
goto err;
}
- if (c->bandwidth_hz <= 5000000)
+ if (c->bandwidth_hz == 0) {
+ ret = -EINVAL;
+ goto err;
+ } else if (c->bandwidth_hz <= 2000000)
+ bandwidth = 0x02;
+ else if (c->bandwidth_hz <= 5000000)
bandwidth = 0x05;
else if (c->bandwidth_hz <= 6000000)
bandwidth = 0x06;
@@ -217,7 +210,7 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
memcpy(cmd.args, "\x88\x02\x02\x02\x02", 5);
cmd.wlen = 5;
cmd.rlen = 5;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -230,7 +223,7 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
memcpy(cmd.args, "\x89\x21\x06\x11\x89\x20", 6);
cmd.wlen = 6;
cmd.rlen = 3;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -241,7 +234,7 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
cmd.args[2] = c->stream_id == NO_STREAM_ID_FILTER ? 0 : 1;
cmd.wlen = 3;
cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
}
@@ -249,35 +242,35 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
memcpy(cmd.args, "\x51\x03", 2);
cmd.wlen = 2;
cmd.rlen = 12;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x12\x08\x04", 3);
cmd.wlen = 3;
cmd.rlen = 3;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x14\x00\x0c\x10\x12\x00", 6);
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x14\x00\x06\x10\x24\x00", 6);
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x14\x00\x07\x10\x00\x24", 6);
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -285,18 +278,18 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
cmd.args[4] = delivery_system | bandwidth;
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
/* set DVB-C symbol rate */
if (c->delivery_system == SYS_DVBC_ANNEX_A) {
memcpy(cmd.args, "\x14\x00\x02\x11", 4);
- cmd.args[4] = (c->symbol_rate / 1000) & 0xff;
+ cmd.args[4] = ((c->symbol_rate / 1000) >> 0) & 0xff;
cmd.args[5] = ((c->symbol_rate / 1000) >> 8) & 0xff;
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
}
@@ -304,88 +297,88 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
memcpy(cmd.args, "\x14\x00\x0f\x10\x10\x00", 6);
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x14\x00\x09\x10\xe3\x08", 6);
- cmd.args[5] |= s->ts_clock_inv ? 0x00 : 0x10;
+ cmd.args[5] |= dev->ts_clock_inv ? 0x00 : 0x10;
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x14\x00\x08\x10\xd7\x05", 6);
- cmd.args[5] |= s->ts_clock_inv ? 0x00 : 0x10;
+ cmd.args[5] |= dev->ts_clock_inv ? 0x00 : 0x10;
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x14\x00\x01\x12\x00\x00", 6);
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x14\x00\x01\x03\x0c\x00", 6);
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x85", 1);
cmd.wlen = 1;
cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- s->delivery_system = c->delivery_system;
+ dev->delivery_system = c->delivery_system;
return 0;
err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int si2168_init(struct dvb_frontend *fe)
{
- struct si2168 *s = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct si2168_dev *dev = i2c_get_clientdata(client);
int ret, len, remaining;
- const struct firmware *fw = NULL;
- u8 *fw_file;
- const unsigned int i2c_wr_max = 8;
+ const struct firmware *fw;
+ const char *fw_name;
struct si2168_cmd cmd;
unsigned int chip_id;
- dev_dbg(&s->client->dev, "\n");
+ dev_dbg(&client->dev, "\n");
/* initialize */
memcpy(cmd.args, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00", 13);
cmd.wlen = 13;
cmd.rlen = 0;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- if (s->fw_loaded) {
+ if (dev->fw_loaded) {
/* resume */
memcpy(cmd.args, "\xc0\x06\x08\x0f\x00\x20\x21\x01", 8);
cmd.wlen = 8;
cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
memcpy(cmd.args, "\x85", 1);
cmd.wlen = 1;
cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -396,7 +389,7 @@ static int si2168_init(struct dvb_frontend *fe)
memcpy(cmd.args, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8);
cmd.wlen = 8;
cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -404,7 +397,7 @@ static int si2168_init(struct dvb_frontend *fe)
memcpy(cmd.args, "\x02", 1);
cmd.wlen = 1;
cmd.rlen = 13;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -417,50 +410,48 @@ static int si2168_init(struct dvb_frontend *fe)
switch (chip_id) {
case SI2168_A20:
- fw_file = SI2168_A20_FIRMWARE;
+ fw_name = SI2168_A20_FIRMWARE;
break;
case SI2168_A30:
- fw_file = SI2168_A30_FIRMWARE;
+ fw_name = SI2168_A30_FIRMWARE;
break;
case SI2168_B40:
- fw_file = SI2168_B40_FIRMWARE;
+ fw_name = SI2168_B40_FIRMWARE;
break;
default:
- dev_err(&s->client->dev,
- "unknown chip version Si21%d-%c%c%c\n",
+ dev_err(&client->dev, "unknown chip version Si21%d-%c%c%c\n",
cmd.args[2], cmd.args[1],
cmd.args[3], cmd.args[4]);
ret = -EINVAL;
goto err;
}
- /* cold state - try to download firmware */
- dev_info(&s->client->dev, "found a '%s' in cold state\n",
- si2168_ops.info.name);
+ dev_info(&client->dev, "found a 'Silicon Labs Si21%d-%c%c%c'\n",
+ cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
/* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, &s->client->dev);
+ ret = request_firmware(&fw, fw_name, &client->dev);
if (ret) {
/* fallback mechanism to handle old name for Si2168 B40 fw */
if (chip_id == SI2168_B40) {
- fw_file = SI2168_B40_FIRMWARE_FALLBACK;
- ret = request_firmware(&fw, fw_file, &s->client->dev);
+ fw_name = SI2168_B40_FIRMWARE_FALLBACK;
+ ret = request_firmware(&fw, fw_name, &client->dev);
}
if (ret == 0) {
- dev_notice(&s->client->dev,
+ dev_notice(&client->dev,
"please install firmware file '%s'\n",
SI2168_B40_FIRMWARE);
} else {
- dev_err(&s->client->dev,
+ dev_err(&client->dev,
"firmware file '%s' not found\n",
- fw_file);
- goto error_fw_release;
+ fw_name);
+ goto err_release_firmware;
}
}
- dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
- fw_file);
+ dev_info(&client->dev, "downloading firmware from file '%s'\n",
+ fw_name);
if ((fw->size % 17 == 0) && (fw->data[0] > 5)) {
/* firmware is in the new format */
@@ -469,41 +460,37 @@ static int si2168_init(struct dvb_frontend *fe)
memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
cmd.wlen = len;
cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
- if (ret) {
- dev_err(&s->client->dev,
- "firmware download failed=%d\n",
- ret);
- goto error_fw_release;
- }
+ ret = si2168_cmd_execute(client, &cmd);
+ if (ret)
+ break;
}
- } else {
+ } else if (fw->size % 8 == 0) {
/* firmware is in the old format */
- for (remaining = fw->size; remaining > 0; remaining -= i2c_wr_max) {
- len = remaining;
- if (len > i2c_wr_max)
- len = i2c_wr_max;
-
+ for (remaining = fw->size; remaining > 0; remaining -= 8) {
+ len = 8;
memcpy(cmd.args, &fw->data[fw->size - remaining], len);
cmd.wlen = len;
cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
- if (ret) {
- dev_err(&s->client->dev,
- "firmware download failed=%d\n",
- ret);
- goto error_fw_release;
- }
+ ret = si2168_cmd_execute(client, &cmd);
+ if (ret)
+ break;
}
+ } else {
+ /* bad or unknown firmware format */
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(&client->dev, "firmware download failed %d\n", ret);
+ goto err_release_firmware;
}
release_firmware(fw);
- fw = NULL;
memcpy(cmd.args, "\x01\x01", 2);
cmd.wlen = 2;
cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -511,58 +498,56 @@ static int si2168_init(struct dvb_frontend *fe)
memcpy(cmd.args, "\x11", 1);
cmd.wlen = 1;
cmd.rlen = 10;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- dev_dbg(&s->client->dev, "firmware version: %c.%c.%d\n",
+ dev_info(&client->dev, "firmware version: %c.%c.%d\n",
cmd.args[6], cmd.args[7], cmd.args[8]);
/* set ts mode */
memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
- cmd.args[4] |= s->ts_mode;
+ cmd.args[4] |= dev->ts_mode;
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- s->fw_loaded = true;
-
- dev_info(&s->client->dev, "found a '%s' in warm state\n",
- si2168_ops.info.name);
+ dev->fw_loaded = true;
warm:
- s->active = true;
+ dev->active = true;
return 0;
-error_fw_release:
+err_release_firmware:
release_firmware(fw);
err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int si2168_sleep(struct dvb_frontend *fe)
{
- struct si2168 *s = fe->demodulator_priv;
+ struct i2c_client *client = fe->demodulator_priv;
+ struct si2168_dev *dev = i2c_get_clientdata(client);
int ret;
struct si2168_cmd cmd;
- dev_dbg(&s->client->dev, "\n");
+ dev_dbg(&client->dev, "\n");
- s->active = false;
+ dev->active = false;
memcpy(cmd.args, "\x13", 1);
cmd.wlen = 1;
cmd.rlen = 0;
- ret = si2168_cmd_execute(s, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
return 0;
err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -581,21 +566,22 @@ static int si2168_get_tune_settings(struct dvb_frontend *fe,
*/
static int si2168_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
{
- struct si2168 *s = mux_priv;
+ struct i2c_client *client = mux_priv;
+ struct si2168_dev *dev = i2c_get_clientdata(client);
int ret;
struct i2c_msg gate_open_msg = {
- .addr = s->client->addr,
+ .addr = client->addr,
.flags = 0,
.len = 3,
.buf = "\xc0\x0d\x01",
};
- mutex_lock(&s->i2c_mutex);
+ mutex_lock(&dev->i2c_mutex);
/* open tuner I2C gate */
- ret = __i2c_transfer(s->client->adapter, &gate_open_msg, 1);
+ ret = __i2c_transfer(client->adapter, &gate_open_msg, 1);
if (ret != 1) {
- dev_warn(&s->client->dev, "i2c write failed=%d\n", ret);
+ dev_warn(&client->dev, "i2c write failed=%d\n", ret);
if (ret >= 0)
ret = -EREMOTEIO;
} else {
@@ -607,26 +593,27 @@ static int si2168_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
static int si2168_deselect(struct i2c_adapter *adap, void *mux_priv, u32 chan)
{
- struct si2168 *s = mux_priv;
+ struct i2c_client *client = mux_priv;
+ struct si2168_dev *dev = i2c_get_clientdata(client);
int ret;
struct i2c_msg gate_close_msg = {
- .addr = s->client->addr,
+ .addr = client->addr,
.flags = 0,
.len = 3,
.buf = "\xc0\x0d\x00",
};
/* close tuner I2C gate */
- ret = __i2c_transfer(s->client->adapter, &gate_close_msg, 1);
+ ret = __i2c_transfer(client->adapter, &gate_close_msg, 1);
if (ret != 1) {
- dev_warn(&s->client->dev, "i2c write failed=%d\n", ret);
+ dev_warn(&client->dev, "i2c write failed=%d\n", ret);
if (ret >= 0)
ret = -EREMOTEIO;
} else {
ret = 0;
}
- mutex_unlock(&s->i2c_mutex);
+ mutex_unlock(&dev->i2c_mutex);
return ret;
}
@@ -635,6 +622,8 @@ static const struct dvb_frontend_ops si2168_ops = {
.delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
.info = {
.name = "Silicon Labs Si2168",
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 7200000,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
@@ -670,71 +659,69 @@ static int si2168_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct si2168_config *config = client->dev.platform_data;
- struct si2168 *s;
+ struct si2168_dev *dev;
int ret;
dev_dbg(&client->dev, "\n");
- s = kzalloc(sizeof(struct si2168), GFP_KERNEL);
- if (!s) {
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
ret = -ENOMEM;
dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
- s->client = client;
- mutex_init(&s->i2c_mutex);
+ mutex_init(&dev->i2c_mutex);
/* create mux i2c adapter for tuner */
- s->adapter = i2c_add_mux_adapter(client->adapter, &client->dev, s,
- 0, 0, 0, si2168_select, si2168_deselect);
- if (s->adapter == NULL) {
+ dev->adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
+ client, 0, 0, 0, si2168_select, si2168_deselect);
+ if (dev->adapter == NULL) {
ret = -ENODEV;
- goto err;
+ goto err_kfree;
}
/* create dvb_frontend */
- memcpy(&s->fe.ops, &si2168_ops, sizeof(struct dvb_frontend_ops));
- s->fe.demodulator_priv = s;
-
- *config->i2c_adapter = s->adapter;
- *config->fe = &s->fe;
- s->ts_mode = config->ts_mode;
- s->ts_clock_inv = config->ts_clock_inv;
- s->fw_loaded = false;
+ memcpy(&dev->fe.ops, &si2168_ops, sizeof(struct dvb_frontend_ops));
+ dev->fe.demodulator_priv = client;
+ *config->i2c_adapter = dev->adapter;
+ *config->fe = &dev->fe;
+ dev->ts_mode = config->ts_mode;
+ dev->ts_clock_inv = config->ts_clock_inv;
+ dev->fw_loaded = false;
- i2c_set_clientdata(client, s);
+ i2c_set_clientdata(client, dev);
- dev_info(&s->client->dev,
- "Silicon Labs Si2168 successfully attached\n");
+ dev_info(&client->dev, "Silicon Labs Si2168 successfully attached\n");
return 0;
+err_kfree:
+ kfree(dev);
err:
- kfree(s);
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int si2168_remove(struct i2c_client *client)
{
- struct si2168 *s = i2c_get_clientdata(client);
+ struct si2168_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
- i2c_del_mux_adapter(s->adapter);
+ i2c_del_mux_adapter(dev->adapter);
- s->fe.ops.release = NULL;
- s->fe.demodulator_priv = NULL;
+ dev->fe.ops.release = NULL;
+ dev->fe.demodulator_priv = NULL;
- kfree(s);
+ kfree(dev);
return 0;
}
-static const struct i2c_device_id si2168_id[] = {
+static const struct i2c_device_id si2168_id_table[] = {
{"si2168", 0},
{}
};
-MODULE_DEVICE_TABLE(i2c, si2168_id);
+MODULE_DEVICE_TABLE(i2c, si2168_id_table);
static struct i2c_driver si2168_driver = {
.driver = {
@@ -743,7 +730,7 @@ static struct i2c_driver si2168_driver = {
},
.probe = si2168_probe,
.remove = si2168_remove,
- .id_table = si2168_id,
+ .id_table = si2168_id_table,
};
module_i2c_driver(si2168_driver);
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index 87bc12146667..70d702ae6f49 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -36,14 +36,12 @@ struct si2168_config {
struct i2c_adapter **i2c_adapter;
/* TS mode */
+#define SI2168_TS_PARALLEL 0x06
+#define SI2168_TS_SERIAL 0x03
u8 ts_mode;
/* TS clock inverted */
bool ts_clock_inv;
-
};
-#define SI2168_TS_PARALLEL 0x06
-#define SI2168_TS_SERIAL 0x03
-
#endif
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 60bc3349b6c3..aadd1367673f 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -28,8 +28,7 @@
#define SI2168_B40_FIRMWARE_FALLBACK "dvb-demod-si2168-02.fw"
/* state struct */
-struct si2168 {
- struct i2c_client *client;
+struct si2168_dev {
struct i2c_adapter *adapter;
struct mutex i2c_mutex;
struct dvb_frontend fe;
diff --git a/drivers/media/dvb-frontends/stb0899_algo.c b/drivers/media/dvb-frontends/stb0899_algo.c
index 93596e0e640b..3012f196e9bd 100644
--- a/drivers/media/dvb-frontends/stb0899_algo.c
+++ b/drivers/media/dvb-frontends/stb0899_algo.c
@@ -19,6 +19,7 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/bitops.h>
#include "stb0899_drv.h"
#include "stb0899_priv.h"
#include "stb0899_reg.h"
@@ -1490,9 +1491,7 @@ enum stb0899_status stb0899_dvbs2_algo(struct stb0899_state *state)
/* Store signal parameters */
offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
- /* sign extend 30 bit value before using it in calculations */
- if (offsetfreq & (1 << 29))
- offsetfreq |= -1 << 30;
+ offsetfreq = sign_extend32(offsetfreq, 29);
offsetfreq = offsetfreq / ((1 << 30) / 1000);
offsetfreq *= (internal->master_clk / 1000000);
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 19646fbb061d..c73899d3a53d 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -20,6 +20,7 @@
*/
#include <linux/init.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -691,7 +692,7 @@ static int stb0899_wait_diseqc_fifo_empty(struct stb0899_state *state, int timeo
reg = stb0899_read_reg(state, STB0899_DISSTATUS);
if (!STB0899_GETFIELD(FIFOFULL, reg))
break;
- if ((jiffies - start) > timeout) {
+ if (time_after(jiffies, start + timeout)) {
dprintk(state->verbose, FE_ERROR, 1, "timed out !!");
return -ETIMEDOUT;
}
@@ -733,7 +734,7 @@ static int stb0899_wait_diseqc_rxidle(struct stb0899_state *state, int timeout)
while (!STB0899_GETFIELD(RXEND, reg)) {
reg = stb0899_read_reg(state, STB0899_DISRX_ST0);
- if (jiffies - start > timeout) {
+ if (time_after(jiffies, start + timeout)) {
dprintk(state->verbose, FE_ERROR, 1, "timed out!!");
return -ETIMEDOUT;
}
@@ -782,7 +783,7 @@ static int stb0899_wait_diseqc_txidle(struct stb0899_state *state, int timeout)
while (!STB0899_GETFIELD(TXIDLE, reg)) {
reg = stb0899_read_reg(state, STB0899_DISSTATUS);
- if (jiffies - start > timeout) {
+ if (time_after(jiffies, start + timeout)) {
dprintk(state->verbose, FE_ERROR, 1, "timed out!!");
return -ETIMEDOUT;
}
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index b35d65c9cc05..dce22ce35d20 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -214,6 +214,7 @@ static int tc90522s_get_frontend(struct dvb_frontend *fe)
state = fe->demodulator_priv;
c = &fe->dtv_property_cache;
c->delivery_system = SYS_ISDBS;
+ c->symbol_rate = 28860000;
layers = 0;
ret = reg_read(state, 0xe6, val, 5);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 205d71364343..da58c9bb67c2 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -177,7 +177,7 @@ comment "Video decoders"
config VIDEO_ADV7180
tristate "Analog Devices ADV7180 decoder"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
---help---
Support for the Analog Devices ADV7180 video decoder.
@@ -196,7 +196,7 @@ config VIDEO_ADV7183
config VIDEO_ADV7604
tristate "Analog Devices ADV7604 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
---help---
Support for the Analog Devices ADV7604 video decoder.
@@ -208,7 +208,8 @@ config VIDEO_ADV7604
config VIDEO_ADV7842
tristate "Analog Devices ADV7842 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ select HDMI
---help---
Support for the Analog Devices ADV7842 video decoder.
@@ -422,7 +423,7 @@ config VIDEO_ADV7393
config VIDEO_ADV7511
tristate "Analog Devices ADV7511 encoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
---help---
Support for the Analog Devices ADV7511 video encoder.
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index bffe6eb528a3..b75878c27c2a 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -30,56 +30,60 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <linux/mutex.h>
-
-#define ADV7180_INPUT_CONTROL_REG 0x00
-#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM 0x00
-#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM_PED 0x10
-#define ADV7180_INPUT_CONTROL_AD_PAL_N_NTSC_J_SECAM 0x20
-#define ADV7180_INPUT_CONTROL_AD_PAL_N_NTSC_M_SECAM 0x30
-#define ADV7180_INPUT_CONTROL_NTSC_J 0x40
-#define ADV7180_INPUT_CONTROL_NTSC_M 0x50
-#define ADV7180_INPUT_CONTROL_PAL60 0x60
-#define ADV7180_INPUT_CONTROL_NTSC_443 0x70
-#define ADV7180_INPUT_CONTROL_PAL_BG 0x80
-#define ADV7180_INPUT_CONTROL_PAL_N 0x90
-#define ADV7180_INPUT_CONTROL_PAL_M 0xa0
-#define ADV7180_INPUT_CONTROL_PAL_M_PED 0xb0
-#define ADV7180_INPUT_CONTROL_PAL_COMB_N 0xc0
-#define ADV7180_INPUT_CONTROL_PAL_COMB_N_PED 0xd0
-#define ADV7180_INPUT_CONTROL_PAL_SECAM 0xe0
-#define ADV7180_INPUT_CONTROL_PAL_SECAM_PED 0xf0
+#include <linux/delay.h>
+
+#define ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM 0x0
+#define ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM_PED 0x1
+#define ADV7180_STD_AD_PAL_N_NTSC_J_SECAM 0x2
+#define ADV7180_STD_AD_PAL_N_NTSC_M_SECAM 0x3
+#define ADV7180_STD_NTSC_J 0x4
+#define ADV7180_STD_NTSC_M 0x5
+#define ADV7180_STD_PAL60 0x6
+#define ADV7180_STD_NTSC_443 0x7
+#define ADV7180_STD_PAL_BG 0x8
+#define ADV7180_STD_PAL_N 0x9
+#define ADV7180_STD_PAL_M 0xa
+#define ADV7180_STD_PAL_M_PED 0xb
+#define ADV7180_STD_PAL_COMB_N 0xc
+#define ADV7180_STD_PAL_COMB_N_PED 0xd
+#define ADV7180_STD_PAL_SECAM 0xe
+#define ADV7180_STD_PAL_SECAM_PED 0xf
+
+#define ADV7180_REG_INPUT_CONTROL 0x0000
#define ADV7180_INPUT_CONTROL_INSEL_MASK 0x0f
-#define ADV7180_EXTENDED_OUTPUT_CONTROL_REG 0x04
+#define ADV7182_REG_INPUT_VIDSEL 0x0002
+
+#define ADV7180_REG_EXTENDED_OUTPUT_CONTROL 0x0004
#define ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS 0xC5
-#define ADV7180_AUTODETECT_ENABLE_REG 0x07
+#define ADV7180_REG_AUTODETECT_ENABLE 0x07
#define ADV7180_AUTODETECT_DEFAULT 0x7f
/* Contrast */
-#define ADV7180_CON_REG 0x08 /*Unsigned */
+#define ADV7180_REG_CON 0x0008 /*Unsigned */
#define ADV7180_CON_MIN 0
#define ADV7180_CON_DEF 128
#define ADV7180_CON_MAX 255
/* Brightness*/
-#define ADV7180_BRI_REG 0x0a /*Signed */
+#define ADV7180_REG_BRI 0x000a /*Signed */
#define ADV7180_BRI_MIN -128
#define ADV7180_BRI_DEF 0
#define ADV7180_BRI_MAX 127
/* Hue */
-#define ADV7180_HUE_REG 0x0b /*Signed, inverted */
+#define ADV7180_REG_HUE 0x000b /*Signed, inverted */
#define ADV7180_HUE_MIN -127
#define ADV7180_HUE_DEF 0
#define ADV7180_HUE_MAX 128
-#define ADV7180_ADI_CTRL_REG 0x0e
-#define ADV7180_ADI_CTRL_IRQ_SPACE 0x20
+#define ADV7180_REG_CTRL 0x000e
+#define ADV7180_CTRL_IRQ_SPACE 0x20
-#define ADV7180_PWR_MAN_REG 0x0f
+#define ADV7180_REG_PWR_MAN 0x0f
#define ADV7180_PWR_MAN_ON 0x04
#define ADV7180_PWR_MAN_OFF 0x24
#define ADV7180_PWR_MAN_RES 0x80
-#define ADV7180_STATUS1_REG 0x10
+#define ADV7180_REG_STATUS1 0x0010
#define ADV7180_STATUS1_IN_LOCK 0x01
#define ADV7180_STATUS1_AUTOD_MASK 0x70
#define ADV7180_STATUS1_AUTOD_NTSM_M_J 0x00
@@ -91,49 +95,161 @@
#define ADV7180_STATUS1_AUTOD_PAL_COMB 0x60
#define ADV7180_STATUS1_AUTOD_SECAM_525 0x70
-#define ADV7180_IDENT_REG 0x11
+#define ADV7180_REG_IDENT 0x0011
#define ADV7180_ID_7180 0x18
-#define ADV7180_ICONF1_ADI 0x40
+#define ADV7180_REG_ICONF1 0x0040
#define ADV7180_ICONF1_ACTIVE_LOW 0x01
#define ADV7180_ICONF1_PSYNC_ONLY 0x10
#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
/* Saturation */
-#define ADV7180_SD_SAT_CB_REG 0xe3 /*Unsigned */
-#define ADV7180_SD_SAT_CR_REG 0xe4 /*Unsigned */
+#define ADV7180_REG_SD_SAT_CB 0x00e3 /*Unsigned */
+#define ADV7180_REG_SD_SAT_CR 0x00e4 /*Unsigned */
#define ADV7180_SAT_MIN 0
#define ADV7180_SAT_DEF 128
#define ADV7180_SAT_MAX 255
#define ADV7180_IRQ1_LOCK 0x01
#define ADV7180_IRQ1_UNLOCK 0x02
-#define ADV7180_ISR1_ADI 0x42
-#define ADV7180_ICR1_ADI 0x43
-#define ADV7180_IMR1_ADI 0x44
-#define ADV7180_IMR2_ADI 0x48
+#define ADV7180_REG_ISR1 0x0042
+#define ADV7180_REG_ICR1 0x0043
+#define ADV7180_REG_IMR1 0x0044
+#define ADV7180_REG_IMR2 0x0048
#define ADV7180_IRQ3_AD_CHANGE 0x08
-#define ADV7180_ISR3_ADI 0x4A
-#define ADV7180_ICR3_ADI 0x4B
-#define ADV7180_IMR3_ADI 0x4C
-#define ADV7180_IMR4_ADI 0x50
+#define ADV7180_REG_ISR3 0x004A
+#define ADV7180_REG_ICR3 0x004B
+#define ADV7180_REG_IMR3 0x004C
+#define ADV7180_REG_IMR4 0x50
-#define ADV7180_NTSC_V_BIT_END_REG 0xE6
+#define ADV7180_REG_NTSC_V_BIT_END 0x00E6
#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
+#define ADV7180_REG_VPP_SLAVE_ADDR 0xFD
+#define ADV7180_REG_CSI_SLAVE_ADDR 0xFE
+
+#define ADV7180_REG_FLCONTROL 0x40e0
+#define ADV7180_FLCONTROL_FL_ENABLE 0x1
+
+#define ADV7180_CSI_REG_PWRDN 0x00
+#define ADV7180_CSI_PWRDN 0x80
+
+#define ADV7180_INPUT_CVBS_AIN1 0x00
+#define ADV7180_INPUT_CVBS_AIN2 0x01
+#define ADV7180_INPUT_CVBS_AIN3 0x02
+#define ADV7180_INPUT_CVBS_AIN4 0x03
+#define ADV7180_INPUT_CVBS_AIN5 0x04
+#define ADV7180_INPUT_CVBS_AIN6 0x05
+#define ADV7180_INPUT_SVIDEO_AIN1_AIN2 0x06
+#define ADV7180_INPUT_SVIDEO_AIN3_AIN4 0x07
+#define ADV7180_INPUT_SVIDEO_AIN5_AIN6 0x08
+#define ADV7180_INPUT_YPRPB_AIN1_AIN2_AIN3 0x09
+#define ADV7180_INPUT_YPRPB_AIN4_AIN5_AIN6 0x0a
+
+#define ADV7182_INPUT_CVBS_AIN1 0x00
+#define ADV7182_INPUT_CVBS_AIN2 0x01
+#define ADV7182_INPUT_CVBS_AIN3 0x02
+#define ADV7182_INPUT_CVBS_AIN4 0x03
+#define ADV7182_INPUT_CVBS_AIN5 0x04
+#define ADV7182_INPUT_CVBS_AIN6 0x05
+#define ADV7182_INPUT_CVBS_AIN7 0x06
+#define ADV7182_INPUT_CVBS_AIN8 0x07
+#define ADV7182_INPUT_SVIDEO_AIN1_AIN2 0x08
+#define ADV7182_INPUT_SVIDEO_AIN3_AIN4 0x09
+#define ADV7182_INPUT_SVIDEO_AIN5_AIN6 0x0a
+#define ADV7182_INPUT_SVIDEO_AIN7_AIN8 0x0b
+#define ADV7182_INPUT_YPRPB_AIN1_AIN2_AIN3 0x0c
+#define ADV7182_INPUT_YPRPB_AIN4_AIN5_AIN6 0x0d
+#define ADV7182_INPUT_DIFF_CVBS_AIN1_AIN2 0x0e
+#define ADV7182_INPUT_DIFF_CVBS_AIN3_AIN4 0x0f
+#define ADV7182_INPUT_DIFF_CVBS_AIN5_AIN6 0x10
+#define ADV7182_INPUT_DIFF_CVBS_AIN7_AIN8 0x11
+
+#define ADV7180_DEFAULT_CSI_I2C_ADDR 0x44
+#define ADV7180_DEFAULT_VPP_I2C_ADDR 0x42
+
+#define V4L2_CID_ADV_FAST_SWITCH (V4L2_CID_USER_ADV7180_BASE + 0x00)
+
+struct adv7180_state;
+
+#define ADV7180_FLAG_RESET_POWERED BIT(0)
+#define ADV7180_FLAG_V2 BIT(1)
+#define ADV7180_FLAG_MIPI_CSI2 BIT(2)
+#define ADV7180_FLAG_I2P BIT(3)
+
+struct adv7180_chip_info {
+ unsigned int flags;
+ unsigned int valid_input_mask;
+ int (*set_std)(struct adv7180_state *st, unsigned int std);
+ int (*select_input)(struct adv7180_state *st, unsigned int input);
+ int (*init)(struct adv7180_state *state);
+};
+
struct adv7180_state {
struct v4l2_ctrl_handler ctrl_hdl;
struct v4l2_subdev sd;
+ struct media_pad pad;
struct mutex mutex; /* mutual excl. when accessing chip */
int irq;
v4l2_std_id curr_norm;
bool autodetect;
bool powered;
u8 input;
+
+ struct i2c_client *client;
+ unsigned int register_page;
+ struct i2c_client *csi_client;
+ struct i2c_client *vpp_client;
+ const struct adv7180_chip_info *chip_info;
+ enum v4l2_field field;
};
#define to_adv7180_sd(_ctrl) (&container_of(_ctrl->handler, \
struct adv7180_state, \
ctrl_hdl)->sd)
+static int adv7180_select_page(struct adv7180_state *state, unsigned int page)
+{
+ if (state->register_page != page) {
+ i2c_smbus_write_byte_data(state->client, ADV7180_REG_CTRL,
+ page);
+ state->register_page = page;
+ }
+
+ return 0;
+}
+
+static int adv7180_write(struct adv7180_state *state, unsigned int reg,
+ unsigned int value)
+{
+ lockdep_assert_held(&state->mutex);
+ adv7180_select_page(state, reg >> 8);
+ return i2c_smbus_write_byte_data(state->client, reg & 0xff, value);
+}
+
+static int adv7180_read(struct adv7180_state *state, unsigned int reg)
+{
+ lockdep_assert_held(&state->mutex);
+ adv7180_select_page(state, reg >> 8);
+ return i2c_smbus_read_byte_data(state->client, reg & 0xff);
+}
+
+static int adv7180_csi_write(struct adv7180_state *state, unsigned int reg,
+ unsigned int value)
+{
+ return i2c_smbus_write_byte_data(state->csi_client, reg, value);
+}
+
+static int adv7180_set_video_standard(struct adv7180_state *state,
+ unsigned int std)
+{
+ return state->chip_info->set_std(state, std);
+}
+
+static int adv7180_vpp_write(struct adv7180_state *state, unsigned int reg,
+ unsigned int value)
+{
+ return i2c_smbus_write_byte_data(state->vpp_client, reg, value);
+}
+
static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
{
/* in case V4L2_IN_ST_NO_SIGNAL */
@@ -165,22 +281,22 @@ static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
static int v4l2_std_to_adv7180(v4l2_std_id std)
{
if (std == V4L2_STD_PAL_60)
- return ADV7180_INPUT_CONTROL_PAL60;
+ return ADV7180_STD_PAL60;
if (std == V4L2_STD_NTSC_443)
- return ADV7180_INPUT_CONTROL_NTSC_443;
+ return ADV7180_STD_NTSC_443;
if (std == V4L2_STD_PAL_N)
- return ADV7180_INPUT_CONTROL_PAL_N;
+ return ADV7180_STD_PAL_N;
if (std == V4L2_STD_PAL_M)
- return ADV7180_INPUT_CONTROL_PAL_M;
+ return ADV7180_STD_PAL_M;
if (std == V4L2_STD_PAL_Nc)
- return ADV7180_INPUT_CONTROL_PAL_COMB_N;
+ return ADV7180_STD_PAL_COMB_N;
if (std & V4L2_STD_PAL)
- return ADV7180_INPUT_CONTROL_PAL_BG;
+ return ADV7180_STD_PAL_BG;
if (std & V4L2_STD_NTSC)
- return ADV7180_INPUT_CONTROL_NTSC_M;
+ return ADV7180_STD_NTSC_M;
if (std & V4L2_STD_SECAM)
- return ADV7180_INPUT_CONTROL_PAL_SECAM;
+ return ADV7180_STD_PAL_SECAM;
return -EINVAL;
}
@@ -193,10 +309,10 @@ static u32 adv7180_status_to_v4l2(u8 status1)
return 0;
}
-static int __adv7180_status(struct i2c_client *client, u32 *status,
+static int __adv7180_status(struct adv7180_state *state, u32 *status,
v4l2_std_id *std)
{
- int status1 = i2c_smbus_read_byte_data(client, ADV7180_STATUS1_REG);
+ int status1 = adv7180_read(state, ADV7180_REG_STATUS1);
if (status1 < 0)
return status1;
@@ -225,7 +341,7 @@ static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
if (!state->autodetect || state->irq > 0)
*std = state->curr_norm;
else
- err = __adv7180_status(v4l2_get_subdevdata(sd), NULL, std);
+ err = __adv7180_status(state, NULL, std);
mutex_unlock(&state->mutex);
return err;
@@ -236,26 +352,19 @@ static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input,
{
struct adv7180_state *state = to_state(sd);
int ret = mutex_lock_interruptible(&state->mutex);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
if (ret)
return ret;
- /* We cannot discriminate between LQFP and 40-pin LFCSP, so accept
- * all inputs and let the card driver take care of validation
- */
- if ((input & ADV7180_INPUT_CONTROL_INSEL_MASK) != input)
+ if (input > 31 || !(BIT(input) & state->chip_info->valid_input_mask)) {
+ ret = -EINVAL;
goto out;
+ }
- ret = i2c_smbus_read_byte_data(client, ADV7180_INPUT_CONTROL_REG);
-
- if (ret < 0)
- goto out;
+ ret = state->chip_info->select_input(state, input);
- ret &= ~ADV7180_INPUT_CONTROL_INSEL_MASK;
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_INPUT_CONTROL_REG, ret | input);
- state->input = input;
+ if (ret == 0)
+ state->input = input;
out:
mutex_unlock(&state->mutex);
return ret;
@@ -268,74 +377,104 @@ static int adv7180_g_input_status(struct v4l2_subdev *sd, u32 *status)
if (ret)
return ret;
- ret = __adv7180_status(v4l2_get_subdevdata(sd), status, NULL);
+ ret = __adv7180_status(state, status, NULL);
mutex_unlock(&state->mutex);
return ret;
}
+static int adv7180_program_std(struct adv7180_state *state)
+{
+ int ret;
+
+ if (state->autodetect) {
+ ret = adv7180_set_video_standard(state,
+ ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
+ if (ret < 0)
+ return ret;
+
+ __adv7180_status(state, NULL, &state->curr_norm);
+ } else {
+ ret = v4l2_std_to_adv7180(state->curr_norm);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_set_video_standard(state, ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7180_state *state = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = mutex_lock_interruptible(&state->mutex);
+
if (ret)
return ret;
/* all standards -> autodetect */
if (std == V4L2_STD_ALL) {
- ret =
- i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
- ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM
- | state->input);
- if (ret < 0)
- goto out;
-
- __adv7180_status(client, NULL, &state->curr_norm);
state->autodetect = true;
} else {
+ /* Make sure we can support this std */
ret = v4l2_std_to_adv7180(std);
if (ret < 0)
goto out;
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_INPUT_CONTROL_REG,
- ret | state->input);
- if (ret < 0)
- goto out;
-
state->curr_norm = std;
state->autodetect = false;
}
- ret = 0;
+
+ ret = adv7180_program_std(state);
out:
mutex_unlock(&state->mutex);
return ret;
}
-static int adv7180_set_power(struct adv7180_state *state,
- struct i2c_client *client, bool on)
+static int adv7180_set_power(struct adv7180_state *state, bool on)
{
u8 val;
+ int ret;
if (on)
val = ADV7180_PWR_MAN_ON;
else
val = ADV7180_PWR_MAN_OFF;
- return i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG, val);
+ ret = adv7180_write(state, ADV7180_REG_PWR_MAN, val);
+ if (ret)
+ return ret;
+
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ if (on) {
+ adv7180_csi_write(state, 0xDE, 0x02);
+ adv7180_csi_write(state, 0xD2, 0xF7);
+ adv7180_csi_write(state, 0xD8, 0x65);
+ adv7180_csi_write(state, 0xE0, 0x09);
+ adv7180_csi_write(state, 0x2C, 0x00);
+ if (state->field == V4L2_FIELD_NONE)
+ adv7180_csi_write(state, 0x1D, 0x80);
+ adv7180_csi_write(state, 0x00, 0x00);
+ } else {
+ adv7180_csi_write(state, 0x00, 0x80);
+ }
+ }
+
+ return 0;
}
static int adv7180_s_power(struct v4l2_subdev *sd, int on)
{
struct adv7180_state *state = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
ret = mutex_lock_interruptible(&state->mutex);
if (ret)
return ret;
- ret = adv7180_set_power(state, client, on);
+ ret = adv7180_set_power(state, on);
if (ret == 0)
state->powered = on;
@@ -347,7 +486,6 @@ static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_adv7180_sd(ctrl);
struct adv7180_state *state = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = mutex_lock_interruptible(&state->mutex);
int val;
@@ -356,26 +494,36 @@ static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
val = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- ret = i2c_smbus_write_byte_data(client, ADV7180_BRI_REG, val);
+ ret = adv7180_write(state, ADV7180_REG_BRI, val);
break;
case V4L2_CID_HUE:
/*Hue is inverted according to HSL chart */
- ret = i2c_smbus_write_byte_data(client, ADV7180_HUE_REG, -val);
+ ret = adv7180_write(state, ADV7180_REG_HUE, -val);
break;
case V4L2_CID_CONTRAST:
- ret = i2c_smbus_write_byte_data(client, ADV7180_CON_REG, val);
+ ret = adv7180_write(state, ADV7180_REG_CON, val);
break;
case V4L2_CID_SATURATION:
/*
*This could be V4L2_CID_BLUE_BALANCE/V4L2_CID_RED_BALANCE
*Let's not confuse the user, everybody understands saturation
*/
- ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CB_REG,
- val);
+ ret = adv7180_write(state, ADV7180_REG_SD_SAT_CB, val);
if (ret < 0)
break;
- ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CR_REG,
- val);
+ ret = adv7180_write(state, ADV7180_REG_SD_SAT_CR, val);
+ break;
+ case V4L2_CID_ADV_FAST_SWITCH:
+ if (ctrl->val) {
+ /* ADI required write */
+ adv7180_write(state, 0x80d9, 0x44);
+ adv7180_write(state, ADV7180_REG_FLCONTROL,
+ ADV7180_FLCONTROL_FL_ENABLE);
+ } else {
+ /* ADI required write */
+ adv7180_write(state, 0x80d9, 0xc4);
+ adv7180_write(state, ADV7180_REG_FLCONTROL, 0x00);
+ }
break;
default:
ret = -EINVAL;
@@ -389,6 +537,16 @@ static const struct v4l2_ctrl_ops adv7180_ctrl_ops = {
.s_ctrl = adv7180_s_ctrl,
};
+static const struct v4l2_ctrl_config adv7180_ctrl_fast_switch = {
+ .ops = &adv7180_ctrl_ops,
+ .id = V4L2_CID_ADV_FAST_SWITCH,
+ .name = "Fast Switching",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
static int adv7180_init_controls(struct adv7180_state *state)
{
v4l2_ctrl_handler_init(&state->ctrl_hdl, 4);
@@ -405,6 +563,8 @@ static int adv7180_init_controls(struct adv7180_state *state)
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_HUE, ADV7180_HUE_MIN,
ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF);
+ v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL);
+
state->sd.ctrl_handler = &state->ctrl_hdl;
if (state->ctrl_hdl.error) {
int err = state->ctrl_hdl.error;
@@ -421,13 +581,14 @@ static void adv7180_exit_controls(struct adv7180_state *state)
v4l2_ctrl_handler_free(&state->ctrl_hdl);
}
-static int adv7180_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
- u32 *code)
+static int adv7180_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
{
- if (index > 0)
+ if (code->index != 0)
return -EINVAL;
- *code = MEDIA_BUS_FMT_YUYV8_2X8;
+ code->code = MEDIA_BUS_FMT_YUYV8_2X8;
return 0;
}
@@ -439,23 +600,118 @@ static int adv7180_mbus_fmt(struct v4l2_subdev *sd,
fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
- fmt->field = V4L2_FIELD_INTERLACED;
fmt->width = 720;
fmt->height = state->curr_norm & V4L2_STD_525_60 ? 480 : 576;
return 0;
}
+static int adv7180_set_field_mode(struct adv7180_state *state)
+{
+ if (!(state->chip_info->flags & ADV7180_FLAG_I2P))
+ return 0;
+
+ if (state->field == V4L2_FIELD_NONE) {
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ adv7180_csi_write(state, 0x01, 0x20);
+ adv7180_csi_write(state, 0x02, 0x28);
+ adv7180_csi_write(state, 0x03, 0x38);
+ adv7180_csi_write(state, 0x04, 0x30);
+ adv7180_csi_write(state, 0x05, 0x30);
+ adv7180_csi_write(state, 0x06, 0x80);
+ adv7180_csi_write(state, 0x07, 0x70);
+ adv7180_csi_write(state, 0x08, 0x50);
+ }
+ adv7180_vpp_write(state, 0xa3, 0x00);
+ adv7180_vpp_write(state, 0x5b, 0x00);
+ adv7180_vpp_write(state, 0x55, 0x80);
+ } else {
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ adv7180_csi_write(state, 0x01, 0x18);
+ adv7180_csi_write(state, 0x02, 0x18);
+ adv7180_csi_write(state, 0x03, 0x30);
+ adv7180_csi_write(state, 0x04, 0x20);
+ adv7180_csi_write(state, 0x05, 0x28);
+ adv7180_csi_write(state, 0x06, 0x40);
+ adv7180_csi_write(state, 0x07, 0x58);
+ adv7180_csi_write(state, 0x08, 0x30);
+ }
+ adv7180_vpp_write(state, 0xa3, 0x70);
+ adv7180_vpp_write(state, 0x5b, 0x80);
+ adv7180_vpp_write(state, 0x55, 0x00);
+ }
+
+ return 0;
+}
+
+static int adv7180_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct adv7180_state *state = to_state(sd);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format->format = *v4l2_subdev_get_try_format(fh, 0);
+ } else {
+ adv7180_mbus_fmt(sd, &format->format);
+ format->format.field = state->field;
+ }
+
+ return 0;
+}
+
+static int adv7180_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct adv7180_state *state = to_state(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ switch (format->format.field) {
+ case V4L2_FIELD_NONE:
+ if (!(state->chip_info->flags & ADV7180_FLAG_I2P))
+ format->format.field = V4L2_FIELD_INTERLACED;
+ break;
+ default:
+ format->format.field = V4L2_FIELD_INTERLACED;
+ break;
+ }
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ framefmt = &format->format;
+ if (state->field != format->format.field) {
+ state->field = format->format.field;
+ adv7180_set_power(state, false);
+ adv7180_set_field_mode(state);
+ adv7180_set_power(state, true);
+ }
+ } else {
+ framefmt = v4l2_subdev_get_try_format(fh, 0);
+ *framefmt = format->format;
+ }
+
+ return adv7180_mbus_fmt(sd, framefmt);
+}
+
static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
- /*
- * The ADV7180 sensor supports BT.601/656 output modes.
- * The BT.656 is default and not yet configurable by s/w.
- */
- cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
- cfg->type = V4L2_MBUS_BT656;
+ struct adv7180_state *state = to_state(sd);
+
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ cfg->type = V4L2_MBUS_CSI2;
+ cfg->flags = V4L2_MBUS_CSI2_1_LANE |
+ V4L2_MBUS_CSI2_CHANNEL_0 |
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+ } else {
+ /*
+ * The ADV7180 sensor supports BT.601/656 output modes.
+ * The BT.656 is default and not yet configurable by s/w.
+ */
+ cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_BT656;
+ }
return 0;
}
@@ -465,139 +721,439 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
- .enum_mbus_fmt = adv7180_enum_mbus_fmt,
- .try_mbus_fmt = adv7180_mbus_fmt,
- .g_mbus_fmt = adv7180_mbus_fmt,
- .s_mbus_fmt = adv7180_mbus_fmt,
.g_mbus_config = adv7180_g_mbus_config,
};
+
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.s_power = adv7180_s_power,
};
+static const struct v4l2_subdev_pad_ops adv7180_pad_ops = {
+ .enum_mbus_code = adv7180_enum_mbus_code,
+ .set_fmt = adv7180_set_pad_format,
+ .get_fmt = adv7180_get_pad_format,
+};
+
static const struct v4l2_subdev_ops adv7180_ops = {
.core = &adv7180_core_ops,
.video = &adv7180_video_ops,
+ .pad = &adv7180_pad_ops,
};
static irqreturn_t adv7180_irq(int irq, void *devid)
{
struct adv7180_state *state = devid;
- struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
u8 isr3;
mutex_lock(&state->mutex);
- i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- ADV7180_ADI_CTRL_IRQ_SPACE);
- isr3 = i2c_smbus_read_byte_data(client, ADV7180_ISR3_ADI);
+ isr3 = adv7180_read(state, ADV7180_REG_ISR3);
/* clear */
- i2c_smbus_write_byte_data(client, ADV7180_ICR3_ADI, isr3);
- i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG, 0);
+ adv7180_write(state, ADV7180_REG_ICR3, isr3);
if (isr3 & ADV7180_IRQ3_AD_CHANGE && state->autodetect)
- __adv7180_status(client, NULL, &state->curr_norm);
+ __adv7180_status(state, NULL, &state->curr_norm);
mutex_unlock(&state->mutex);
return IRQ_HANDLED;
}
-static int init_device(struct i2c_client *client, struct adv7180_state *state)
+static int adv7180_init(struct adv7180_state *state)
{
int ret;
- /* Initialize adv7180 */
- /* Enable autodetection */
- if (state->autodetect) {
- ret =
- i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
- ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM
- | state->input);
- if (ret < 0)
- return ret;
-
- ret =
- i2c_smbus_write_byte_data(client,
- ADV7180_AUTODETECT_ENABLE_REG,
- ADV7180_AUTODETECT_DEFAULT);
- if (ret < 0)
- return ret;
- } else {
- ret = v4l2_std_to_adv7180(state->curr_norm);
- if (ret < 0)
- return ret;
-
- ret =
- i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
- ret | state->input);
- if (ret < 0)
- return ret;
-
- }
/* ITU-R BT.656-4 compatible */
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_EXTENDED_OUTPUT_CONTROL_REG,
+ ret = adv7180_write(state, ADV7180_REG_EXTENDED_OUTPUT_CONTROL,
ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
if (ret < 0)
return ret;
/* Manually set V bit end position in NTSC mode */
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_NTSC_V_BIT_END_REG,
+ return adv7180_write(state, ADV7180_REG_NTSC_V_BIT_END,
ADV7180_NTSC_V_BIT_END_MANUAL_NVEND);
+}
+
+static int adv7180_set_std(struct adv7180_state *state, unsigned int std)
+{
+ return adv7180_write(state, ADV7180_REG_INPUT_CONTROL,
+ (std << 4) | state->input);
+}
+
+static int adv7180_select_input(struct adv7180_state *state, unsigned int input)
+{
+ int ret;
+
+ ret = adv7180_read(state, ADV7180_REG_INPUT_CONTROL);
if (ret < 0)
return ret;
- /* read current norm */
- __adv7180_status(client, NULL, &state->curr_norm);
+ ret &= ~ADV7180_INPUT_CONTROL_INSEL_MASK;
+ ret |= input;
+ return adv7180_write(state, ADV7180_REG_INPUT_CONTROL, ret);
+}
- /* register for interrupts */
- if (state->irq > 0) {
- ret = request_threaded_irq(state->irq, NULL, adv7180_irq,
- IRQF_ONESHOT, KBUILD_MODNAME, state);
- if (ret)
- return ret;
+static int adv7182_init(struct adv7180_state *state)
+{
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2)
+ adv7180_write(state, ADV7180_REG_CSI_SLAVE_ADDR,
+ ADV7180_DEFAULT_CSI_I2C_ADDR << 1);
+
+ if (state->chip_info->flags & ADV7180_FLAG_I2P)
+ adv7180_write(state, ADV7180_REG_VPP_SLAVE_ADDR,
+ ADV7180_DEFAULT_VPP_I2C_ADDR << 1);
+
+ if (state->chip_info->flags & ADV7180_FLAG_V2) {
+ /* ADI recommended writes for improved video quality */
+ adv7180_write(state, 0x0080, 0x51);
+ adv7180_write(state, 0x0081, 0x51);
+ adv7180_write(state, 0x0082, 0x68);
+ }
- ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- ADV7180_ADI_CTRL_IRQ_SPACE);
- if (ret < 0)
- goto err;
+ /* ADI required writes */
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ adv7180_write(state, 0x0003, 0x4e);
+ adv7180_write(state, 0x0004, 0x57);
+ adv7180_write(state, 0x001d, 0xc0);
+ } else {
+ if (state->chip_info->flags & ADV7180_FLAG_V2)
+ adv7180_write(state, 0x0004, 0x17);
+ else
+ adv7180_write(state, 0x0004, 0x07);
+ adv7180_write(state, 0x0003, 0x0c);
+ adv7180_write(state, 0x001d, 0x40);
+ }
+
+ adv7180_write(state, 0x0013, 0x00);
+
+ return 0;
+}
+
+static int adv7182_set_std(struct adv7180_state *state, unsigned int std)
+{
+ return adv7180_write(state, ADV7182_REG_INPUT_VIDSEL, std << 4);
+}
+
+enum adv7182_input_type {
+ ADV7182_INPUT_TYPE_CVBS,
+ ADV7182_INPUT_TYPE_DIFF_CVBS,
+ ADV7182_INPUT_TYPE_SVIDEO,
+ ADV7182_INPUT_TYPE_YPBPR,
+};
+
+static enum adv7182_input_type adv7182_get_input_type(unsigned int input)
+{
+ switch (input) {
+ case ADV7182_INPUT_CVBS_AIN1:
+ case ADV7182_INPUT_CVBS_AIN2:
+ case ADV7182_INPUT_CVBS_AIN3:
+ case ADV7182_INPUT_CVBS_AIN4:
+ case ADV7182_INPUT_CVBS_AIN5:
+ case ADV7182_INPUT_CVBS_AIN6:
+ case ADV7182_INPUT_CVBS_AIN7:
+ case ADV7182_INPUT_CVBS_AIN8:
+ return ADV7182_INPUT_TYPE_CVBS;
+ case ADV7182_INPUT_SVIDEO_AIN1_AIN2:
+ case ADV7182_INPUT_SVIDEO_AIN3_AIN4:
+ case ADV7182_INPUT_SVIDEO_AIN5_AIN6:
+ case ADV7182_INPUT_SVIDEO_AIN7_AIN8:
+ return ADV7182_INPUT_TYPE_SVIDEO;
+ case ADV7182_INPUT_YPRPB_AIN1_AIN2_AIN3:
+ case ADV7182_INPUT_YPRPB_AIN4_AIN5_AIN6:
+ return ADV7182_INPUT_TYPE_YPBPR;
+ case ADV7182_INPUT_DIFF_CVBS_AIN1_AIN2:
+ case ADV7182_INPUT_DIFF_CVBS_AIN3_AIN4:
+ case ADV7182_INPUT_DIFF_CVBS_AIN5_AIN6:
+ case ADV7182_INPUT_DIFF_CVBS_AIN7_AIN8:
+ return ADV7182_INPUT_TYPE_DIFF_CVBS;
+ default: /* Will never happen */
+ return 0;
+ }
+}
+
+/* ADI recommended writes to registers 0x52, 0x53, 0x54 */
+static unsigned int adv7182_lbias_settings[][3] = {
+ [ADV7182_INPUT_TYPE_CVBS] = { 0xCB, 0x4E, 0x80 },
+ [ADV7182_INPUT_TYPE_DIFF_CVBS] = { 0xC0, 0x4E, 0x80 },
+ [ADV7182_INPUT_TYPE_SVIDEO] = { 0x0B, 0xCE, 0x80 },
+ [ADV7182_INPUT_TYPE_YPBPR] = { 0x0B, 0x4E, 0xC0 },
+};
+
+static unsigned int adv7280_lbias_settings[][3] = {
+ [ADV7182_INPUT_TYPE_CVBS] = { 0xCD, 0x4E, 0x80 },
+ [ADV7182_INPUT_TYPE_DIFF_CVBS] = { 0xC0, 0x4E, 0x80 },
+ [ADV7182_INPUT_TYPE_SVIDEO] = { 0x0B, 0xCE, 0x80 },
+ [ADV7182_INPUT_TYPE_YPBPR] = { 0x0B, 0x4E, 0xC0 },
+};
+
+static int adv7182_select_input(struct adv7180_state *state, unsigned int input)
+{
+ enum adv7182_input_type input_type;
+ unsigned int *lbias;
+ unsigned int i;
+ int ret;
+
+ ret = adv7180_write(state, ADV7180_REG_INPUT_CONTROL, input);
+ if (ret)
+ return ret;
+
+ /* Reset clamp circuitry - ADI recommended writes */
+ adv7180_write(state, 0x809c, 0x00);
+ adv7180_write(state, 0x809c, 0xff);
+
+ input_type = adv7182_get_input_type(input);
+
+ switch (input_type) {
+ case ADV7182_INPUT_TYPE_CVBS:
+ case ADV7182_INPUT_TYPE_DIFF_CVBS:
+ /* ADI recommends to use the SH1 filter */
+ adv7180_write(state, 0x0017, 0x41);
+ break;
+ default:
+ adv7180_write(state, 0x0017, 0x01);
+ break;
+ }
+
+ if (state->chip_info->flags & ADV7180_FLAG_V2)
+ lbias = adv7280_lbias_settings[input_type];
+ else
+ lbias = adv7182_lbias_settings[input_type];
+
+ for (i = 0; i < ARRAY_SIZE(adv7182_lbias_settings[0]); i++)
+ adv7180_write(state, 0x0052 + i, lbias[i]);
+
+ if (input_type == ADV7182_INPUT_TYPE_DIFF_CVBS) {
+ /* ADI required writes to make differential CVBS work */
+ adv7180_write(state, 0x005f, 0xa8);
+ adv7180_write(state, 0x005a, 0x90);
+ adv7180_write(state, 0x0060, 0xb0);
+ adv7180_write(state, 0x80b6, 0x08);
+ adv7180_write(state, 0x80c0, 0xa0);
+ } else {
+ adv7180_write(state, 0x005f, 0xf0);
+ adv7180_write(state, 0x005a, 0xd0);
+ adv7180_write(state, 0x0060, 0x10);
+ adv7180_write(state, 0x80b6, 0x9c);
+ adv7180_write(state, 0x80c0, 0x00);
+ }
+
+ return 0;
+}
+
+static const struct adv7180_chip_info adv7180_info = {
+ .flags = ADV7180_FLAG_RESET_POWERED,
+ /* We cannot discriminate between LQFP and 40-pin LFCSP, so accept
+ * all inputs and let the card driver take care of validation
+ */
+ .valid_input_mask = BIT(ADV7180_INPUT_CVBS_AIN1) |
+ BIT(ADV7180_INPUT_CVBS_AIN2) |
+ BIT(ADV7180_INPUT_CVBS_AIN3) |
+ BIT(ADV7180_INPUT_CVBS_AIN4) |
+ BIT(ADV7180_INPUT_CVBS_AIN5) |
+ BIT(ADV7180_INPUT_CVBS_AIN6) |
+ BIT(ADV7180_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7180_INPUT_SVIDEO_AIN3_AIN4) |
+ BIT(ADV7180_INPUT_SVIDEO_AIN5_AIN6) |
+ BIT(ADV7180_INPUT_YPRPB_AIN1_AIN2_AIN3) |
+ BIT(ADV7180_INPUT_YPRPB_AIN4_AIN5_AIN6),
+ .init = adv7180_init,
+ .set_std = adv7180_set_std,
+ .select_input = adv7180_select_input,
+};
+
+static const struct adv7180_chip_info adv7182_info = {
+ .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ BIT(ADV7182_INPUT_CVBS_AIN2) |
+ BIT(ADV7182_INPUT_CVBS_AIN3) |
+ BIT(ADV7182_INPUT_CVBS_AIN4) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_YPRPB_AIN1_AIN2_AIN3) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN3_AIN4),
+ .init = adv7182_init,
+ .set_std = adv7182_set_std,
+ .select_input = adv7182_select_input,
+};
+
+static const struct adv7180_chip_info adv7280_info = {
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
+ .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ BIT(ADV7182_INPUT_CVBS_AIN2) |
+ BIT(ADV7182_INPUT_CVBS_AIN3) |
+ BIT(ADV7182_INPUT_CVBS_AIN4) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_YPRPB_AIN1_AIN2_AIN3),
+ .init = adv7182_init,
+ .set_std = adv7182_set_std,
+ .select_input = adv7182_select_input,
+};
+
+static const struct adv7180_chip_info adv7280_m_info = {
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
+ .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ BIT(ADV7182_INPUT_CVBS_AIN2) |
+ BIT(ADV7182_INPUT_CVBS_AIN3) |
+ BIT(ADV7182_INPUT_CVBS_AIN4) |
+ BIT(ADV7182_INPUT_CVBS_AIN5) |
+ BIT(ADV7182_INPUT_CVBS_AIN6) |
+ BIT(ADV7182_INPUT_CVBS_AIN7) |
+ BIT(ADV7182_INPUT_CVBS_AIN8) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN5_AIN6) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN7_AIN8) |
+ BIT(ADV7182_INPUT_YPRPB_AIN1_AIN2_AIN3) |
+ BIT(ADV7182_INPUT_YPRPB_AIN4_AIN5_AIN6),
+ .init = adv7182_init,
+ .set_std = adv7182_set_std,
+ .select_input = adv7182_select_input,
+};
+
+static const struct adv7180_chip_info adv7281_info = {
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ BIT(ADV7182_INPUT_CVBS_AIN2) |
+ BIT(ADV7182_INPUT_CVBS_AIN7) |
+ BIT(ADV7182_INPUT_CVBS_AIN8) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN7_AIN8) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN7_AIN8),
+ .init = adv7182_init,
+ .set_std = adv7182_set_std,
+ .select_input = adv7182_select_input,
+};
+
+static const struct adv7180_chip_info adv7281_m_info = {
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ BIT(ADV7182_INPUT_CVBS_AIN2) |
+ BIT(ADV7182_INPUT_CVBS_AIN3) |
+ BIT(ADV7182_INPUT_CVBS_AIN4) |
+ BIT(ADV7182_INPUT_CVBS_AIN7) |
+ BIT(ADV7182_INPUT_CVBS_AIN8) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN7_AIN8) |
+ BIT(ADV7182_INPUT_YPRPB_AIN1_AIN2_AIN3) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN7_AIN8),
+ .init = adv7182_init,
+ .set_std = adv7182_set_std,
+ .select_input = adv7182_select_input,
+};
+static const struct adv7180_chip_info adv7281_ma_info = {
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ BIT(ADV7182_INPUT_CVBS_AIN2) |
+ BIT(ADV7182_INPUT_CVBS_AIN3) |
+ BIT(ADV7182_INPUT_CVBS_AIN4) |
+ BIT(ADV7182_INPUT_CVBS_AIN5) |
+ BIT(ADV7182_INPUT_CVBS_AIN6) |
+ BIT(ADV7182_INPUT_CVBS_AIN7) |
+ BIT(ADV7182_INPUT_CVBS_AIN8) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN5_AIN6) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN7_AIN8) |
+ BIT(ADV7182_INPUT_YPRPB_AIN1_AIN2_AIN3) |
+ BIT(ADV7182_INPUT_YPRPB_AIN4_AIN5_AIN6) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN5_AIN6) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN7_AIN8),
+ .init = adv7182_init,
+ .set_std = adv7182_set_std,
+ .select_input = adv7182_select_input,
+};
+
+static const struct adv7180_chip_info adv7282_info = {
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
+ .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ BIT(ADV7182_INPUT_CVBS_AIN2) |
+ BIT(ADV7182_INPUT_CVBS_AIN7) |
+ BIT(ADV7182_INPUT_CVBS_AIN8) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN7_AIN8) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN7_AIN8),
+ .init = adv7182_init,
+ .set_std = adv7182_set_std,
+ .select_input = adv7182_select_input,
+};
+
+static const struct adv7180_chip_info adv7282_m_info = {
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
+ .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ BIT(ADV7182_INPUT_CVBS_AIN2) |
+ BIT(ADV7182_INPUT_CVBS_AIN3) |
+ BIT(ADV7182_INPUT_CVBS_AIN4) |
+ BIT(ADV7182_INPUT_CVBS_AIN7) |
+ BIT(ADV7182_INPUT_CVBS_AIN8) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_SVIDEO_AIN7_AIN8) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN1_AIN2) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN3_AIN4) |
+ BIT(ADV7182_INPUT_DIFF_CVBS_AIN7_AIN8),
+ .init = adv7182_init,
+ .set_std = adv7182_set_std,
+ .select_input = adv7182_select_input,
+};
+
+static int init_device(struct adv7180_state *state)
+{
+ int ret;
+
+ mutex_lock(&state->mutex);
+
+ adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
+ usleep_range(2000, 10000);
+
+ ret = state->chip_info->init(state);
+ if (ret)
+ goto out_unlock;
+
+ ret = adv7180_program_std(state);
+ if (ret)
+ goto out_unlock;
+
+ adv7180_set_field_mode(state);
+
+ /* register for interrupts */
+ if (state->irq > 0) {
/* config the Interrupt pin to be active low */
- ret = i2c_smbus_write_byte_data(client, ADV7180_ICONF1_ADI,
+ ret = adv7180_write(state, ADV7180_REG_ICONF1,
ADV7180_ICONF1_ACTIVE_LOW |
ADV7180_ICONF1_PSYNC_ONLY);
if (ret < 0)
- goto err;
+ goto out_unlock;
- ret = i2c_smbus_write_byte_data(client, ADV7180_IMR1_ADI, 0);
+ ret = adv7180_write(state, ADV7180_REG_IMR1, 0);
if (ret < 0)
- goto err;
+ goto out_unlock;
- ret = i2c_smbus_write_byte_data(client, ADV7180_IMR2_ADI, 0);
+ ret = adv7180_write(state, ADV7180_REG_IMR2, 0);
if (ret < 0)
- goto err;
+ goto out_unlock;
/* enable AD change interrupts interrupts */
- ret = i2c_smbus_write_byte_data(client, ADV7180_IMR3_ADI,
+ ret = adv7180_write(state, ADV7180_REG_IMR3,
ADV7180_IRQ3_AD_CHANGE);
if (ret < 0)
- goto err;
+ goto out_unlock;
- ret = i2c_smbus_write_byte_data(client, ADV7180_IMR4_ADI, 0);
+ ret = adv7180_write(state, ADV7180_REG_IMR4, 0);
if (ret < 0)
- goto err;
-
- ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- 0);
- if (ret < 0)
- goto err;
+ goto out_unlock;
}
- return 0;
+out_unlock:
+ mutex_unlock(&state->mutex);
-err:
- free_irq(state->irq, state);
return ret;
}
@@ -616,26 +1172,63 @@ static int adv7180_probe(struct i2c_client *client,
client->addr, client->adapter->name);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
- if (state == NULL) {
- ret = -ENOMEM;
- goto err;
+ if (state == NULL)
+ return -ENOMEM;
+
+ state->client = client;
+ state->field = V4L2_FIELD_INTERLACED;
+ state->chip_info = (struct adv7180_chip_info *)id->driver_data;
+
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ state->csi_client = i2c_new_dummy(client->adapter,
+ ADV7180_DEFAULT_CSI_I2C_ADDR);
+ if (!state->csi_client)
+ return -ENOMEM;
+ }
+
+ if (state->chip_info->flags & ADV7180_FLAG_I2P) {
+ state->vpp_client = i2c_new_dummy(client->adapter,
+ ADV7180_DEFAULT_VPP_I2C_ADDR);
+ if (!state->vpp_client) {
+ ret = -ENOMEM;
+ goto err_unregister_csi_client;
+ }
}
state->irq = client->irq;
mutex_init(&state->mutex);
state->autodetect = true;
- state->powered = true;
+ if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
+ state->powered = true;
+ else
+ state->powered = false;
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
ret = adv7180_init_controls(state);
if (ret)
- goto err_unreg_subdev;
- ret = init_device(client, state);
+ goto err_unregister_vpp_client;
+
+ state->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.flags |= MEDIA_ENT_T_V4L2_SUBDEV_DECODER;
+ ret = media_entity_init(&sd->entity, 1, &state->pad, 0);
if (ret)
goto err_free_ctrl;
+ ret = init_device(state);
+ if (ret)
+ goto err_media_entity_cleanup;
+
+ if (state->irq) {
+ ret = request_threaded_irq(client->irq, NULL, adv7180_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ KBUILD_MODNAME, state);
+ if (ret)
+ goto err_media_entity_cleanup;
+ }
+
ret = v4l2_async_register_subdev(sd);
if (ret)
goto err_free_irq;
@@ -645,11 +1238,17 @@ static int adv7180_probe(struct i2c_client *client,
err_free_irq:
if (state->irq > 0)
free_irq(client->irq, state);
+err_media_entity_cleanup:
+ media_entity_cleanup(&sd->entity);
err_free_ctrl:
adv7180_exit_controls(state);
-err_unreg_subdev:
+err_unregister_vpp_client:
+ if (state->chip_info->flags & ADV7180_FLAG_I2P)
+ i2c_unregister_device(state->vpp_client);
+err_unregister_csi_client:
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2)
+ i2c_unregister_device(state->csi_client);
mutex_destroy(&state->mutex);
-err:
return ret;
}
@@ -663,15 +1262,32 @@ static int adv7180_remove(struct i2c_client *client)
if (state->irq > 0)
free_irq(client->irq, state);
+ media_entity_cleanup(&sd->entity);
adv7180_exit_controls(state);
+
+ if (state->chip_info->flags & ADV7180_FLAG_I2P)
+ i2c_unregister_device(state->vpp_client);
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2)
+ i2c_unregister_device(state->csi_client);
+
mutex_destroy(&state->mutex);
+
return 0;
}
static const struct i2c_device_id adv7180_id[] = {
- {KBUILD_MODNAME, 0},
+ { "adv7180", (kernel_ulong_t)&adv7180_info },
+ { "adv7182", (kernel_ulong_t)&adv7182_info },
+ { "adv7280", (kernel_ulong_t)&adv7280_info },
+ { "adv7280-m", (kernel_ulong_t)&adv7280_m_info },
+ { "adv7281", (kernel_ulong_t)&adv7281_info },
+ { "adv7281-m", (kernel_ulong_t)&adv7281_m_info },
+ { "adv7281-ma", (kernel_ulong_t)&adv7281_ma_info },
+ { "adv7282", (kernel_ulong_t)&adv7282_info },
+ { "adv7282-m", (kernel_ulong_t)&adv7282_m_info },
{},
};
+MODULE_DEVICE_TABLE(i2c, adv7180_id);
#ifdef CONFIG_PM_SLEEP
static int adv7180_suspend(struct device *dev)
@@ -680,7 +1296,7 @@ static int adv7180_suspend(struct device *dev)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7180_state *state = to_state(sd);
- return adv7180_set_power(state, client, false);
+ return adv7180_set_power(state, false);
}
static int adv7180_resume(struct device *dev)
@@ -690,14 +1306,14 @@ static int adv7180_resume(struct device *dev)
struct adv7180_state *state = to_state(sd);
int ret;
- if (state->powered) {
- ret = adv7180_set_power(state, client, true);
- if (ret)
- return ret;
- }
- ret = init_device(client, state);
+ ret = init_device(state);
if (ret < 0)
return ret;
+
+ ret = adv7180_set_power(state, state->powered);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -708,8 +1324,6 @@ static SIMPLE_DEV_PM_OPS(adv7180_pm_ops, adv7180_suspend, adv7180_resume);
#define ADV7180_PM_OPS NULL
#endif
-MODULE_DEVICE_TABLE(i2c, adv7180_id);
-
static struct i2c_driver adv7180_driver = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index e43dd2e2a38a..d228b7c82310 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -333,21 +333,11 @@ static inline struct adv7604_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct adv7604_state, sd);
}
-static inline unsigned hblanking(const struct v4l2_bt_timings *t)
-{
- return V4L2_DV_BT_BLANKING_WIDTH(t);
-}
-
static inline unsigned htotal(const struct v4l2_bt_timings *t)
{
return V4L2_DV_BT_FRAME_WIDTH(t);
}
-static inline unsigned vblanking(const struct v4l2_bt_timings *t)
-{
- return V4L2_DV_BT_BLANKING_HEIGHT(t);
-}
-
static inline unsigned vtotal(const struct v4l2_bt_timings *t)
{
return V4L2_DV_BT_FRAME_HEIGHT(t);
@@ -466,11 +456,6 @@ static inline int cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return adv_smbus_write_byte_data(state, ADV7604_PAGE_CEC, reg, val);
}
-static inline int cec_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
-{
- return cec_write(sd, reg, (cec_read(sd, reg) & ~mask) | val);
-}
-
static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv7604_state *state = to_state(sd);
@@ -486,34 +471,6 @@ static inline int infoframe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
reg, val);
}
-static inline int esdp_read(struct v4l2_subdev *sd, u8 reg)
-{
- struct adv7604_state *state = to_state(sd);
-
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_ESDP, reg);
-}
-
-static inline int esdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
-{
- struct adv7604_state *state = to_state(sd);
-
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_ESDP, reg, val);
-}
-
-static inline int dpp_read(struct v4l2_subdev *sd, u8 reg)
-{
- struct adv7604_state *state = to_state(sd);
-
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_DPP, reg);
-}
-
-static inline int dpp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
-{
- struct adv7604_state *state = to_state(sd);
-
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_DPP, reg, val);
-}
-
static inline int afe_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv7604_state *state = to_state(sd);
@@ -561,32 +518,6 @@ static inline int edid_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return adv_smbus_write_byte_data(state, ADV7604_PAGE_EDID, reg, val);
}
-static inline int edid_read_block(struct v4l2_subdev *sd, unsigned len, u8 *val)
-{
- struct adv7604_state *state = to_state(sd);
- struct i2c_client *client = state->i2c_clients[ADV7604_PAGE_EDID];
- u8 msgbuf0[1] = { 0 };
- u8 msgbuf1[256];
- struct i2c_msg msg[2] = {
- {
- .addr = client->addr,
- .len = 1,
- .buf = msgbuf0
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = msgbuf1
- },
- };
-
- if (i2c_transfer(client->adapter, msg, 2) < 0)
- return -EIO;
- memcpy(val, msgbuf1, len);
- return 0;
-}
-
static inline int edid_write_block(struct v4l2_subdev *sd,
unsigned len, const u8 *val)
{
@@ -652,13 +583,6 @@ static inline int hdmi_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8
return hdmi_write(sd, reg, (hdmi_read(sd, reg) & ~mask) | val);
}
-static inline int test_read(struct v4l2_subdev *sd, u8 reg)
-{
- struct adv7604_state *state = to_state(sd);
-
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_TEST, reg);
-}
-
static inline int test_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv7604_state *state = to_state(sd);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 75d26dfd0939..7c215ee142c4 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -38,6 +38,7 @@
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <linux/v4l2-dv-timings.h>
+#include <linux/hdmi.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dv-timings.h>
@@ -220,21 +221,11 @@ static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
return &container_of(ctrl->handler, struct adv7842_state, hdl)->sd;
}
-static inline unsigned hblanking(const struct v4l2_bt_timings *t)
-{
- return V4L2_DV_BT_BLANKING_WIDTH(t);
-}
-
static inline unsigned htotal(const struct v4l2_bt_timings *t)
{
return V4L2_DV_BT_FRAME_WIDTH(t);
}
-static inline unsigned vblanking(const struct v4l2_bt_timings *t)
-{
- return V4L2_DV_BT_BLANKING_HEIGHT(t);
-}
-
static inline unsigned vtotal(const struct v4l2_bt_timings *t)
{
return V4L2_DV_BT_FRAME_HEIGHT(t);
@@ -2108,149 +2099,65 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
return err;
}
-/*********** avi info frame CEA-861-E **************/
-/* TODO move to common library */
-
-struct avi_info_frame {
- uint8_t f17;
- uint8_t y10;
- uint8_t a0;
- uint8_t b10;
- uint8_t s10;
- uint8_t c10;
- uint8_t m10;
- uint8_t r3210;
- uint8_t itc;
- uint8_t ec210;
- uint8_t q10;
- uint8_t sc10;
- uint8_t f47;
- uint8_t vic;
- uint8_t yq10;
- uint8_t cn10;
- uint8_t pr3210;
- uint16_t etb;
- uint16_t sbb;
- uint16_t elb;
- uint16_t srb;
-};
-
-static const char *y10_txt[4] = {
- "RGB",
- "YCbCr 4:2:2",
- "YCbCr 4:4:4",
- "Future",
-};
-
-static const char *c10_txt[4] = {
- "No Data",
- "SMPTE 170M",
- "ITU-R 709",
- "Extended Colorimetry information valied",
-};
-
-static const char *itc_txt[2] = {
- "No Data",
- "IT content",
-};
-
-static const char *ec210_txt[8] = {
- "xvYCC601",
- "xvYCC709",
- "sYCC601",
- "AdobeYCC601",
- "AdobeRGB",
- "5 reserved",
- "6 reserved",
- "7 reserved",
+struct adv7842_cfg_read_infoframe {
+ const char *desc;
+ u8 present_mask;
+ u8 head_addr;
+ u8 payload_addr;
};
-static const char *q10_txt[4] = {
- "Default",
- "Limited Range",
- "Full Range",
- "Reserved",
-};
-
-static void parse_avi_infoframe(struct v4l2_subdev *sd, uint8_t *buf,
- struct avi_info_frame *avi)
-{
- avi->f17 = (buf[1] >> 7) & 0x1;
- avi->y10 = (buf[1] >> 5) & 0x3;
- avi->a0 = (buf[1] >> 4) & 0x1;
- avi->b10 = (buf[1] >> 2) & 0x3;
- avi->s10 = buf[1] & 0x3;
- avi->c10 = (buf[2] >> 6) & 0x3;
- avi->m10 = (buf[2] >> 4) & 0x3;
- avi->r3210 = buf[2] & 0xf;
- avi->itc = (buf[3] >> 7) & 0x1;
- avi->ec210 = (buf[3] >> 4) & 0x7;
- avi->q10 = (buf[3] >> 2) & 0x3;
- avi->sc10 = buf[3] & 0x3;
- avi->f47 = (buf[4] >> 7) & 0x1;
- avi->vic = buf[4] & 0x7f;
- avi->yq10 = (buf[5] >> 6) & 0x3;
- avi->cn10 = (buf[5] >> 4) & 0x3;
- avi->pr3210 = buf[5] & 0xf;
- avi->etb = buf[6] + 256*buf[7];
- avi->sbb = buf[8] + 256*buf[9];
- avi->elb = buf[10] + 256*buf[11];
- avi->srb = buf[12] + 256*buf[13];
-}
-
-static void print_avi_infoframe(struct v4l2_subdev *sd)
+static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infoframe *cri)
{
int i;
- uint8_t buf[14];
- u8 avi_len;
- u8 avi_ver;
- struct avi_info_frame avi;
+ uint8_t buffer[32];
+ union hdmi_infoframe frame;
+ u8 len;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
- if (!(hdmi_read(sd, 0x05) & 0x80)) {
- v4l2_info(sd, "receive DVI-D signal (AVI infoframe not supported)\n");
- return;
- }
- if (!(io_read(sd, 0x60) & 0x01)) {
- v4l2_info(sd, "AVI infoframe not received\n");
+ if (!(io_read(sd, 0x60) & cri->present_mask)) {
+ v4l2_info(sd, "%s infoframe not received\n", cri->desc);
return;
}
- if (io_read(sd, 0x88) & 0x10) {
- v4l2_info(sd, "AVI infoframe checksum error has occurred earlier\n");
- io_write(sd, 0x8a, 0x10); /* clear AVI_INF_CKS_ERR_RAW */
- if (io_read(sd, 0x88) & 0x10) {
- v4l2_info(sd, "AVI infoframe checksum error still present\n");
- io_write(sd, 0x8a, 0x10); /* clear AVI_INF_CKS_ERR_RAW */
- }
- }
+ for (i = 0; i < 3; i++)
+ buffer[i] = infoframe_read(sd, cri->head_addr + i);
- avi_len = infoframe_read(sd, 0xe2);
- avi_ver = infoframe_read(sd, 0xe1);
- v4l2_info(sd, "AVI infoframe version %d (%d byte)\n",
- avi_ver, avi_len);
+ len = buffer[2] + 1;
- if (avi_ver != 0x02)
+ if (len + 3 > sizeof(buffer)) {
+ v4l2_err(sd, "%s: invalid %s infoframe length %d\n", __func__, cri->desc, len);
return;
+ }
+
+ for (i = 0; i < len; i++)
+ buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i);
- for (i = 0; i < 14; i++)
- buf[i] = infoframe_read(sd, i);
+ if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+ v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
+ return;
+ }
- v4l2_info(sd, "\t%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
- buf[8], buf[9], buf[10], buf[11], buf[12], buf[13]);
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+}
- parse_avi_infoframe(sd, buf, &avi);
+static void adv7842_log_infoframes(struct v4l2_subdev *sd)
+{
+ int i;
+ struct adv7842_cfg_read_infoframe cri[] = {
+ { "AVI", 0x01, 0xe0, 0x00 },
+ { "Audio", 0x02, 0xe3, 0x1c },
+ { "SDP", 0x04, 0xe6, 0x2a },
+ { "Vendor", 0x10, 0xec, 0x54 }
+ };
- if (avi.vic)
- v4l2_info(sd, "\tVIC: %d\n", avi.vic);
- if (avi.itc)
- v4l2_info(sd, "\t%s\n", itc_txt[avi.itc]);
+ if (!(hdmi_read(sd, 0x05) & 0x80)) {
+ v4l2_info(sd, "receive DVI-D signal, no infoframes\n");
+ return;
+ }
- if (avi.y10)
- v4l2_info(sd, "\t%s %s\n", y10_txt[avi.y10], !avi.c10 ? "" :
- (avi.c10 == 0x3 ? ec210_txt[avi.ec210] : c10_txt[avi.c10]));
- else
- v4l2_info(sd, "\t%s %s\n", y10_txt[avi.y10], q10_txt[avi.q10]);
+ for (i = 0; i < ARRAY_SIZE(cri); i++)
+ log_infoframe(sd, &cri[i]);
}
static const char * const prim_mode_txt[] = {
@@ -2464,7 +2371,8 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Deep color mode: %s\n",
deep_color_mode_txt[hdmi_read(sd, 0x0b) >> 6]);
- print_avi_infoframe(sd);
+ adv7842_log_infoframes(sd);
+
return 0;
}
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 2820f7c38cba..6ed16e569bbf 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -125,9 +125,9 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
if (length == 1)
return *data;
else if (length == 2)
- return be16_to_cpu(*((u16 *)data));
+ return be16_to_cpu(*((__be16 *)data));
else
- return be32_to_cpu(*((u32 *)data));
+ return be32_to_cpu(*((__be32 *)data));
}
/**
@@ -454,11 +454,6 @@ static int m5mols_get_version(struct v4l2_subdev *sd)
return ret;
}
- ver->fw = be16_to_cpu(ver->fw);
- ver->hw = be16_to_cpu(ver->hw);
- ver->param = be16_to_cpu(ver->param);
- ver->awb = be16_to_cpu(ver->awb);
-
v4l2_info(sd, "Manufacturer\t[%s]\n",
is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
"Samsung Electro-Machanics" :
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 4d9c6bc34265..dcc68ec71732 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -904,11 +904,3 @@ static struct i2c_driver msp_driver = {
};
module_i2c_driver(msp_driver);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 45b3fca188ca..76431223f0ff 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -422,22 +422,25 @@ done:
return ret;
}
-static int mt9m032_get_pad_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int mt9m032_get_pad_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
mutex_lock(&sensor->lock);
- crop->rect = *__mt9m032_get_pad_crop(sensor, fh, crop->which);
+ sel->r = *__mt9m032_get_pad_crop(sensor, fh, sel->which);
mutex_unlock(&sensor->lock);
return 0;
}
-static int mt9m032_set_pad_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int mt9m032_set_pad_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
struct v4l2_mbus_framefmt *format;
@@ -445,9 +448,12 @@ static int mt9m032_set_pad_crop(struct v4l2_subdev *subdev,
struct v4l2_rect rect;
int ret = 0;
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
mutex_lock(&sensor->lock);
- if (sensor->streaming && crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (sensor->streaming && sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
ret = -EBUSY;
goto done;
}
@@ -455,13 +461,13 @@ static int mt9m032_set_pad_crop(struct v4l2_subdev *subdev,
/* Clamp the crop rectangle boundaries and align them to a multiple of 2
* pixels to ensure a GRBG Bayer pattern.
*/
- rect.left = clamp(ALIGN(crop->rect.left, 2), MT9M032_COLUMN_START_MIN,
+ rect.left = clamp(ALIGN(sel->r.left, 2), MT9M032_COLUMN_START_MIN,
MT9M032_COLUMN_START_MAX);
- rect.top = clamp(ALIGN(crop->rect.top, 2), MT9M032_ROW_START_MIN,
+ rect.top = clamp(ALIGN(sel->r.top, 2), MT9M032_ROW_START_MIN,
MT9M032_ROW_START_MAX);
- rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
MT9M032_COLUMN_SIZE_MIN, MT9M032_COLUMN_SIZE_MAX);
- rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
MT9M032_ROW_SIZE_MIN, MT9M032_ROW_SIZE_MAX);
rect.width = min_t(unsigned int, rect.width,
@@ -469,21 +475,21 @@ static int mt9m032_set_pad_crop(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9m032_get_pad_crop(sensor, fh, crop->which);
+ __crop = __mt9m032_get_pad_crop(sensor, fh, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- format = __mt9m032_get_pad_format(sensor, fh, crop->which);
+ format = __mt9m032_get_pad_format(sensor, fh, sel->which);
format->width = rect.width;
format->height = rect.height;
}
*__crop = rect;
- crop->rect = rect;
+ sel->r = rect;
- if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
ret = mt9m032_update_geom_timing(sensor);
done:
@@ -690,8 +696,8 @@ static const struct v4l2_subdev_pad_ops mt9m032_pad_ops = {
.enum_frame_size = mt9m032_enum_frame_size,
.get_fmt = mt9m032_get_pad_format,
.set_fmt = mt9m032_set_pad_format,
- .set_crop = mt9m032_set_pad_crop,
- .get_crop = mt9m032_get_pad_crop,
+ .set_selection = mt9m032_set_pad_selection,
+ .get_selection = mt9m032_get_pad_selection,
};
static const struct v4l2_subdev_ops mt9m032_ops = {
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index edb76bd33d16..e3acae9a2ec3 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -581,37 +581,42 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
return 0;
}
-static int mt9p031_get_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int mt9p031_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
- crop->rect = *__mt9p031_get_pad_crop(mt9p031, fh, crop->pad,
- crop->which);
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ sel->r = *__mt9p031_get_pad_crop(mt9p031, fh, sel->pad, sel->which);
return 0;
}
-static int mt9p031_set_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int mt9p031_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
struct v4l2_mbus_framefmt *__format;
struct v4l2_rect *__crop;
struct v4l2_rect rect;
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
/* Clamp the crop rectangle boundaries and align them to a multiple of 2
* pixels to ensure a GRBG Bayer pattern.
*/
- rect.left = clamp(ALIGN(crop->rect.left, 2), MT9P031_COLUMN_START_MIN,
+ rect.left = clamp(ALIGN(sel->r.left, 2), MT9P031_COLUMN_START_MIN,
MT9P031_COLUMN_START_MAX);
- rect.top = clamp(ALIGN(crop->rect.top, 2), MT9P031_ROW_START_MIN,
+ rect.top = clamp(ALIGN(sel->r.top, 2), MT9P031_ROW_START_MIN,
MT9P031_ROW_START_MAX);
- rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
MT9P031_WINDOW_WIDTH_MIN,
MT9P031_WINDOW_WIDTH_MAX);
- rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
MT9P031_WINDOW_HEIGHT_MIN,
MT9P031_WINDOW_HEIGHT_MAX);
@@ -620,20 +625,20 @@ static int mt9p031_set_crop(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9p031_get_pad_crop(mt9p031, fh, crop->pad, crop->which);
+ __crop = __mt9p031_get_pad_crop(mt9p031, fh, sel->pad, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9p031_get_pad_format(mt9p031, fh, crop->pad,
- crop->which);
+ __format = __mt9p031_get_pad_format(mt9p031, fh, sel->pad,
+ sel->which);
__format->width = rect.width;
__format->height = rect.height;
}
*__crop = rect;
- crop->rect = rect;
+ sel->r = rect;
return 0;
}
@@ -980,8 +985,8 @@ static struct v4l2_subdev_pad_ops mt9p031_subdev_pad_ops = {
.enum_frame_size = mt9p031_enum_frame_size,
.get_fmt = mt9p031_get_format,
.set_fmt = mt9p031_set_format,
- .get_crop = mt9p031_get_crop,
- .set_crop = mt9p031_set_crop,
+ .get_selection = mt9p031_get_selection,
+ .set_selection = mt9p031_set_selection,
};
static struct v4l2_subdev_ops mt9p031_subdev_ops = {
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index d9e9889b579f..f6ca636b538d 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -401,39 +401,44 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
return 0;
}
-static int mt9t001_get_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int mt9t001_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
- crop->rect = *__mt9t001_get_pad_crop(mt9t001, fh, crop->pad,
- crop->which);
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ sel->r = *__mt9t001_get_pad_crop(mt9t001, fh, sel->pad, sel->which);
return 0;
}
-static int mt9t001_set_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int mt9t001_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
struct v4l2_mbus_framefmt *__format;
struct v4l2_rect *__crop;
struct v4l2_rect rect;
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
/* Clamp the crop rectangle boundaries and align them to a multiple of 2
* pixels.
*/
- rect.left = clamp(ALIGN(crop->rect.left, 2),
+ rect.left = clamp(ALIGN(sel->r.left, 2),
MT9T001_COLUMN_START_MIN,
MT9T001_COLUMN_START_MAX);
- rect.top = clamp(ALIGN(crop->rect.top, 2),
+ rect.top = clamp(ALIGN(sel->r.top, 2),
MT9T001_ROW_START_MIN,
MT9T001_ROW_START_MAX);
- rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
MT9T001_WINDOW_WIDTH_MIN + 1,
MT9T001_WINDOW_WIDTH_MAX + 1);
- rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
MT9T001_WINDOW_HEIGHT_MIN + 1,
MT9T001_WINDOW_HEIGHT_MAX + 1);
@@ -442,20 +447,20 @@ static int mt9t001_set_crop(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9t001_get_pad_crop(mt9t001, fh, crop->pad, crop->which);
+ __crop = __mt9t001_get_pad_crop(mt9t001, fh, sel->pad, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9t001_get_pad_format(mt9t001, fh, crop->pad,
- crop->which);
+ __format = __mt9t001_get_pad_format(mt9t001, fh, sel->pad,
+ sel->which);
__format->width = rect.width;
__format->height = rect.height;
}
*__crop = rect;
- crop->rect = rect;
+ sel->r = rect;
return 0;
}
@@ -819,8 +824,8 @@ static struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = {
.enum_frame_size = mt9t001_enum_frame_size,
.get_fmt = mt9t001_get_format,
.set_fmt = mt9t001_set_format,
- .get_crop = mt9t001_get_crop,
- .set_crop = mt9t001_set_crop,
+ .get_selection = mt9t001_get_selection,
+ .set_selection = mt9t001_set_selection,
};
static struct v4l2_subdev_ops mt9t001_subdev_ops = {
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 93687c1e4097..bd3f979a4d49 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -552,39 +552,44 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
return 0;
}
-static int mt9v032_get_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int mt9v032_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
- crop->rect = *__mt9v032_get_pad_crop(mt9v032, fh, crop->pad,
- crop->which);
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ sel->r = *__mt9v032_get_pad_crop(mt9v032, fh, sel->pad, sel->which);
return 0;
}
-static int mt9v032_set_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int mt9v032_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
struct v4l2_mbus_framefmt *__format;
struct v4l2_rect *__crop;
struct v4l2_rect rect;
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
/* Clamp the crop rectangle boundaries and align them to a non multiple
* of 2 pixels to ensure a GRBG Bayer pattern.
*/
- rect.left = clamp(ALIGN(crop->rect.left + 1, 2) - 1,
+ rect.left = clamp(ALIGN(sel->r.left + 1, 2) - 1,
MT9V032_COLUMN_START_MIN,
MT9V032_COLUMN_START_MAX);
- rect.top = clamp(ALIGN(crop->rect.top + 1, 2) - 1,
+ rect.top = clamp(ALIGN(sel->r.top + 1, 2) - 1,
MT9V032_ROW_START_MIN,
MT9V032_ROW_START_MAX);
- rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
MT9V032_WINDOW_WIDTH_MIN,
MT9V032_WINDOW_WIDTH_MAX);
- rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
MT9V032_WINDOW_HEIGHT_MIN,
MT9V032_WINDOW_HEIGHT_MAX);
@@ -593,17 +598,17 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int,
rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9v032_get_pad_crop(mt9v032, fh, crop->pad, crop->which);
+ __crop = __mt9v032_get_pad_crop(mt9v032, fh, sel->pad, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9v032_get_pad_format(mt9v032, fh, crop->pad,
- crop->which);
+ __format = __mt9v032_get_pad_format(mt9v032, fh, sel->pad,
+ sel->which);
__format->width = rect.width;
__format->height = rect.height;
- if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
mt9v032->hratio = 1;
mt9v032->vratio = 1;
mt9v032_configure_pixel_rate(mt9v032);
@@ -611,7 +616,7 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
}
*__crop = rect;
- crop->rect = rect;
+ sel->r = rect;
return 0;
}
@@ -844,8 +849,8 @@ static struct v4l2_subdev_pad_ops mt9v032_subdev_pad_ops = {
.enum_frame_size = mt9v032_enum_frame_size,
.get_fmt = mt9v032_get_format,
.set_fmt = mt9v032_set_format,
- .get_crop = mt9v032_get_crop,
- .set_crop = mt9v032_set_crop,
+ .get_selection = mt9v032_get_selection,
+ .set_selection = mt9v032_set_selection,
};
static struct v4l2_subdev_ops mt9v032_subdev_ops = {
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index d1c50c9d43ae..70071314789e 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -220,7 +220,7 @@ static int s5k4ecgx_i2c_read(struct i2c_client *client, u16 addr, u16 *val)
msg[1].buf = rbuf;
ret = i2c_transfer(client->adapter, msg, 2);
- *val = be16_to_cpu(*((u16 *)rbuf));
+ *val = be16_to_cpu(*((__be16 *)rbuf));
v4l2_dbg(4, debug, client, "i2c_read: 0x%04X : 0x%04x\n", addr, *val);
@@ -341,7 +341,7 @@ static int s5k4ecgx_load_firmware(struct v4l2_subdev *sd)
v4l2_err(sd, "Failed to read firmware %s\n", S5K4ECGX_FIRMWARE);
return err;
}
- regs_num = le32_to_cpu(get_unaligned_le32(fw->data));
+ regs_num = get_unaligned_le32(fw->data);
v4l2_dbg(3, debug, sd, "FW: %s size %zu register sets %d\n",
S5K4ECGX_FIRMWARE, fw->size, regs_num);
@@ -351,8 +351,7 @@ static int s5k4ecgx_load_firmware(struct v4l2_subdev *sd)
err = -EINVAL;
goto fw_out;
}
- crc_file = le32_to_cpu(get_unaligned_le32(fw->data +
- regs_num * FW_RECORD_SIZE));
+ crc_file = get_unaligned_le32(fw->data + regs_num * FW_RECORD_SIZE);
crc = crc32_le(~0, fw->data, regs_num * FW_RECORD_SIZE);
if (crc != crc_file) {
v4l2_err(sd, "FW: invalid crc (%#x:%#x)\n", crc, crc_file);
@@ -361,9 +360,9 @@ static int s5k4ecgx_load_firmware(struct v4l2_subdev *sd)
}
ptr = fw->data + FW_RECORD_SIZE;
for (i = 1; i < regs_num; i++) {
- addr = le32_to_cpu(get_unaligned_le32(ptr));
+ addr = get_unaligned_le32(ptr);
ptr += sizeof(u32);
- val = le16_to_cpu(get_unaligned_le16(ptr));
+ val = get_unaligned_le16(ptr);
ptr += sizeof(u16);
if (addr - addr_inc != 2)
err = s5k4ecgx_write(client, addr, val);
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 60a74d8d38d5..a3d7d0391302 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -353,7 +353,7 @@ static struct v4l2_rect s5k5baf_cis_rect = {
*
*/
static int s5k5baf_fw_parse(struct device *dev, struct s5k5baf_fw **fw,
- size_t count, const u16 *data)
+ size_t count, const __le16 *data)
{
struct s5k5baf_fw *f;
u16 *d, i, *end;
@@ -421,6 +421,7 @@ static u16 s5k5baf_i2c_read(struct s5k5baf *state, u16 addr)
{
struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
__be16 w, r;
+ u16 res;
struct i2c_msg msg[] = {
{ .addr = c->addr, .flags = 0,
.len = 2, .buf = (u8 *)&w },
@@ -434,15 +435,15 @@ static u16 s5k5baf_i2c_read(struct s5k5baf *state, u16 addr)
w = cpu_to_be16(addr);
ret = i2c_transfer(c->adapter, msg, 2);
- r = be16_to_cpu(r);
+ res = be16_to_cpu(r);
- v4l2_dbg(3, debug, c, "i2c_read: 0x%04x : 0x%04x\n", addr, r);
+ v4l2_dbg(3, debug, c, "i2c_read: 0x%04x : 0x%04x\n", addr, res);
if (ret != 2) {
v4l2_err(c, "i2c_read: error during transfer (%d)\n", ret);
state->error = ret;
}
- return r;
+ return res;
}
static void s5k5baf_i2c_write(struct s5k5baf *state, u16 addr, u16 val)
@@ -1037,7 +1038,7 @@ static int s5k5baf_load_setfile(struct s5k5baf *state)
}
ret = s5k5baf_fw_parse(&c->dev, &state->fw, fw->size / 2,
- (u16 *)fw->data);
+ (__le16 *)fw->data);
release_firmware(fw);
@@ -1793,7 +1794,7 @@ static const struct v4l2_subdev_ops s5k5baf_subdev_ops = {
static int s5k5baf_configure_gpios(struct s5k5baf *state)
{
- static const char const *name[] = { "S5K5BAF_STBY", "S5K5BAF_RST" };
+ static const char * const name[] = { "S5K5BAF_STBY", "S5K5BAF_RST" };
struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
struct s5k5baf_gpio *g = state->gpios;
int ret, i;
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 2851581e0061..b1c583239dab 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -348,7 +348,7 @@ static int s5k6aa_i2c_read(struct i2c_client *client, u16 addr, u16 *val)
msg[1].buf = rbuf;
ret = i2c_transfer(client->adapter, msg, 2);
- *val = be16_to_cpu(*((u16 *)rbuf));
+ *val = be16_to_cpu(*((__be16 *)rbuf));
v4l2_dbg(3, debug, client, "i2c_read: 0x%04X : 0x%04x\n", addr, *val);
@@ -1161,17 +1161,21 @@ static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return ret;
}
-static int s5k6aa_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int s5k6aa_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
struct v4l2_rect *rect;
- memset(crop->reserved, 0, sizeof(crop->reserved));
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ memset(sel->reserved, 0, sizeof(sel->reserved));
mutex_lock(&s5k6aa->lock);
- rect = __s5k6aa_get_crop_rect(s5k6aa, fh, crop->which);
- crop->rect = *rect;
+ rect = __s5k6aa_get_crop_rect(s5k6aa, fh, sel->which);
+ sel->r = *rect;
mutex_unlock(&s5k6aa->lock);
v4l2_dbg(1, debug, sd, "Current crop rectangle: (%d,%d)/%dx%d\n",
@@ -1180,35 +1184,39 @@ static int s5k6aa_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
-static int s5k6aa_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int s5k6aa_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
struct v4l2_mbus_framefmt *mf;
unsigned int max_x, max_y;
struct v4l2_rect *crop_r;
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
mutex_lock(&s5k6aa->lock);
- crop_r = __s5k6aa_get_crop_rect(s5k6aa, fh, crop->which);
+ crop_r = __s5k6aa_get_crop_rect(s5k6aa, fh, sel->which);
- if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
mf = &s5k6aa->preset->mbus_fmt;
s5k6aa->apply_crop = 1;
} else {
mf = v4l2_subdev_get_try_format(fh, 0);
}
- v4l_bound_align_image(&crop->rect.width, mf->width,
+ v4l_bound_align_image(&sel->r.width, mf->width,
S5K6AA_WIN_WIDTH_MAX, 1,
- &crop->rect.height, mf->height,
+ &sel->r.height, mf->height,
S5K6AA_WIN_HEIGHT_MAX, 1, 0);
- max_x = (S5K6AA_WIN_WIDTH_MAX - crop->rect.width) & ~1;
- max_y = (S5K6AA_WIN_HEIGHT_MAX - crop->rect.height) & ~1;
+ max_x = (S5K6AA_WIN_WIDTH_MAX - sel->r.width) & ~1;
+ max_y = (S5K6AA_WIN_HEIGHT_MAX - sel->r.height) & ~1;
- crop->rect.left = clamp_t(unsigned int, crop->rect.left, 0, max_x);
- crop->rect.top = clamp_t(unsigned int, crop->rect.top, 0, max_y);
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, max_x);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, max_y);
- *crop_r = crop->rect;
+ *crop_r = sel->r;
mutex_unlock(&s5k6aa->lock);
@@ -1224,8 +1232,8 @@ static const struct v4l2_subdev_pad_ops s5k6aa_pad_ops = {
.enum_frame_interval = s5k6aa_enum_frame_interval,
.get_fmt = s5k6aa_get_fmt,
.set_fmt = s5k6aa_set_fmt,
- .get_crop = s5k6aa_get_crop,
- .set_crop = s5k6aa_set_crop,
+ .get_selection = s5k6aa_get_selection,
+ .set_selection = s5k6aa_set_selection,
};
static const struct v4l2_subdev_video_ops s5k6aa_video_ops = {
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index e40d9027df3d..e3348db56c46 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -14,14 +14,9 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
+#include <linux/device.h>
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/module.h>
diff --git a/drivers/media/i2c/smiapp-pll.h b/drivers/media/i2c/smiapp-pll.h
index e8f035a50c76..b98d143b64e1 100644
--- a/drivers/media/i2c/smiapp-pll.h
+++ b/drivers/media/i2c/smiapp-pll.h
@@ -14,19 +14,11 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef SMIAPP_PLL_H
#define SMIAPP_PLL_H
-#include <linux/device.h>
-
/* CSI-2 or CCP-2 */
#define SMIAPP_PLL_BUS_TYPE_CSI2 0x00
#define SMIAPP_PLL_BUS_TYPE_PARALLEL 0x01
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 0df5070e73c7..d47eff5d3101 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -18,12 +18,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#include <linux/clk.h>
@@ -31,11 +25,13 @@
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/module.h>
+#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/smiapp.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-of.h>
#include "smiapp.h"
@@ -523,14 +519,12 @@ static const struct v4l2_ctrl_ops smiapp_ctrl_ops = {
static int smiapp_init_controls(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- unsigned long *valid_link_freqs = &sensor->valid_link_freqs[
- sensor->csi_format->compressed - SMIAPP_COMPRESSED_BASE];
- unsigned int max, i;
int rval;
rval = v4l2_ctrl_handler_init(&sensor->pixel_array->ctrl_handler, 12);
if (rval)
return rval;
+
sensor->pixel_array->ctrl_handler.lock = &sensor->mutex;
sensor->analog_gain = v4l2_ctrl_new_std(
@@ -576,21 +570,11 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
ARRAY_SIZE(smiapp_test_patterns) - 1,
0, 0, smiapp_test_patterns);
- for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++) {
- int max_value = (1 << sensor->csi_format->width) - 1;
- sensor->test_data[i] =
- v4l2_ctrl_new_std(
- &sensor->pixel_array->ctrl_handler,
- &smiapp_ctrl_ops, V4L2_CID_TEST_PATTERN_RED + i,
- 0, max_value, 1, max_value);
- }
-
if (sensor->pixel_array->ctrl_handler.error) {
dev_err(&client->dev,
"pixel array controls initialization failed (%d)\n",
sensor->pixel_array->ctrl_handler.error);
- rval = sensor->pixel_array->ctrl_handler.error;
- goto error;
+ return sensor->pixel_array->ctrl_handler.error;
}
sensor->pixel_array->sd.ctrl_handler =
@@ -600,15 +584,9 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
rval = v4l2_ctrl_handler_init(&sensor->src->ctrl_handler, 0);
if (rval)
- goto error;
- sensor->src->ctrl_handler.lock = &sensor->mutex;
-
- for (max = 0; sensor->platform_data->op_sys_clock[max + 1]; max++);
+ return rval;
- sensor->link_freq = v4l2_ctrl_new_int_menu(
- &sensor->src->ctrl_handler, &smiapp_ctrl_ops,
- V4L2_CID_LINK_FREQ, __fls(*valid_link_freqs),
- __ffs(*valid_link_freqs), sensor->platform_data->op_sys_clock);
+ sensor->src->ctrl_handler.lock = &sensor->mutex;
sensor->pixel_rate_csi = v4l2_ctrl_new_std(
&sensor->src->ctrl_handler, &smiapp_ctrl_ops,
@@ -618,20 +596,41 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
dev_err(&client->dev,
"src controls initialization failed (%d)\n",
sensor->src->ctrl_handler.error);
- rval = sensor->src->ctrl_handler.error;
- goto error;
+ return sensor->src->ctrl_handler.error;
}
- sensor->src->sd.ctrl_handler =
- &sensor->src->ctrl_handler;
+ sensor->src->sd.ctrl_handler = &sensor->src->ctrl_handler;
return 0;
+}
+
+/*
+ * For controls that require information on available media bus codes
+ * and linke frequencies.
+ */
+static int smiapp_init_late_controls(struct smiapp_sensor *sensor)
+{
+ unsigned long *valid_link_freqs = &sensor->valid_link_freqs[
+ sensor->csi_format->compressed - SMIAPP_COMPRESSED_BASE];
+ unsigned int max, i;
-error:
- v4l2_ctrl_handler_free(&sensor->pixel_array->ctrl_handler);
- v4l2_ctrl_handler_free(&sensor->src->ctrl_handler);
+ for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++) {
+ int max_value = (1 << sensor->csi_format->width) - 1;
- return rval;
+ sensor->test_data[i] = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler,
+ &smiapp_ctrl_ops, V4L2_CID_TEST_PATTERN_RED + i,
+ 0, max_value, 1, max_value);
+ }
+
+ for (max = 0; sensor->platform_data->op_sys_clock[max + 1]; max++);
+
+ sensor->link_freq = v4l2_ctrl_new_int_menu(
+ &sensor->src->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_LINK_FREQ, __fls(*valid_link_freqs),
+ __ffs(*valid_link_freqs), sensor->platform_data->op_sys_clock);
+
+ return sensor->src->ctrl_handler.error;
}
static void smiapp_free_controls(struct smiapp_sensor *sensor)
@@ -1487,7 +1486,7 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
if (rval < 0)
goto out;
- if ((sensor->flash_capability &
+ if ((sensor->limits[SMIAPP_LIMIT_FLASH_MODE_CAPABILITY] &
(SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE |
SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE)) &&
sensor->platform_data->strobe_setup != NULL &&
@@ -2338,10 +2337,9 @@ static DEVICE_ATTR(ident, S_IRUGO, smiapp_sysfs_ident_read, NULL);
* V4L2 subdev core operations
*/
-static int smiapp_identify_module(struct v4l2_subdev *subdev)
+static int smiapp_identify_module(struct smiapp_sensor *sensor)
{
- struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
struct smiapp_module_info *minfo = &sensor->minfo;
unsigned int i;
int rval = 0;
@@ -2464,8 +2462,6 @@ static int smiapp_identify_module(struct v4l2_subdev *subdev)
minfo->name, minfo->manufacturer_id, minfo->model_id,
minfo->revision_number_major);
- strlcpy(subdev->name, sensor->minfo.name, sizeof(subdev->name));
-
return 0;
}
@@ -2473,13 +2469,71 @@ static const struct v4l2_subdev_ops smiapp_ops;
static const struct v4l2_subdev_internal_ops smiapp_internal_ops;
static const struct media_entity_operations smiapp_entity_ops;
-static int smiapp_registered(struct v4l2_subdev *subdev)
+static int smiapp_register_subdevs(struct smiapp_sensor *sensor)
{
- struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct smiapp_subdev *ssds[] = {
+ sensor->scaler,
+ sensor->binner,
+ sensor->pixel_array,
+ };
+ unsigned int i;
+ int rval;
+
+ for (i = 0; i < SMIAPP_SUBDEVS - 1; i++) {
+ struct smiapp_subdev *this = ssds[i + 1];
+ struct smiapp_subdev *last = ssds[i];
+
+ if (!last)
+ continue;
+
+ rval = media_entity_init(&this->sd.entity,
+ this->npads, this->pads, 0);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_entity_init failed\n");
+ return rval;
+ }
+
+ rval = media_entity_create_link(&this->sd.entity,
+ this->source_pad,
+ &last->sd.entity,
+ last->sink_pad,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_entity_create_link failed\n");
+ return rval;
+ }
+
+ rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
+ &this->sd);
+ if (rval) {
+ dev_err(&client->dev,
+ "v4l2_device_register_subdev failed\n");
+ return rval;
+ }
+ }
+
+ return 0;
+}
+
+static void smiapp_cleanup(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+
+ device_remove_file(&client->dev, &dev_attr_nvm);
+ device_remove_file(&client->dev, &dev_attr_ident);
+
+ smiapp_free_controls(sensor);
+}
+
+static int smiapp_init(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
struct smiapp_pll *pll = &sensor->pll;
struct smiapp_subdev *last = NULL;
- u32 tmp;
unsigned int i;
int rval;
@@ -2490,7 +2544,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
}
if (!sensor->platform_data->set_xclk) {
- sensor->ext_clk = devm_clk_get(&client->dev, "ext_clk");
+ sensor->ext_clk = devm_clk_get(&client->dev, NULL);
if (IS_ERR(sensor->ext_clk)) {
dev_err(&client->dev, "could not get clock\n");
return PTR_ERR(sensor->ext_clk);
@@ -2522,7 +2576,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
if (rval)
return -ENODEV;
- rval = smiapp_identify_module(subdev);
+ rval = smiapp_identify_module(sensor);
if (rval) {
rval = -ENODEV;
goto out_power_off;
@@ -2602,13 +2656,13 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
if (sensor->nvm == NULL) {
dev_err(&client->dev, "nvm buf allocation failed\n");
rval = -ENOMEM;
- goto out_ident_release;
+ goto out_cleanup;
}
if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
dev_err(&client->dev, "sysfs nvm entry failed\n");
rval = -EBUSY;
- goto out_ident_release;
+ goto out_cleanup;
}
}
@@ -2643,18 +2697,11 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
pll->csi2.lanes = sensor->platform_data->lanes;
pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
- pll->flags = smiapp_call_quirk(sensor, pll_flags);
pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
/* Profile 0 sensors have no separate OP clock branch. */
if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
- rval = smiapp_get_mbus_formats(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_nvm_release;
- }
-
for (i = 0; i < SMIAPP_SUBDEVS; i++) {
struct {
struct smiapp_subdev *ssd;
@@ -2711,34 +2758,6 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
this->sd.owner = THIS_MODULE;
v4l2_set_subdevdata(&this->sd, client);
- rval = media_entity_init(&this->sd.entity,
- this->npads, this->pads, 0);
- if (rval) {
- dev_err(&client->dev,
- "media_entity_init failed\n");
- goto out_nvm_release;
- }
-
- rval = media_entity_create_link(&this->sd.entity,
- this->source_pad,
- &last->sd.entity,
- last->sink_pad,
- MEDIA_LNK_FL_ENABLED |
- MEDIA_LNK_FL_IMMUTABLE);
- if (rval) {
- dev_err(&client->dev,
- "media_entity_create_link failed\n");
- goto out_nvm_release;
- }
-
- rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
- &this->sd);
- if (rval) {
- dev_err(&client->dev,
- "v4l2_device_register_subdev failed\n");
- goto out_nvm_release;
- }
-
last = this;
}
@@ -2750,40 +2769,66 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
smiapp_read_frame_fmt(sensor);
rval = smiapp_init_controls(sensor);
if (rval < 0)
- goto out_nvm_release;
+ goto out_cleanup;
+
+ rval = smiapp_call_quirk(sensor, init);
+ if (rval)
+ goto out_cleanup;
+
+ rval = smiapp_get_mbus_formats(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_cleanup;
+ }
+
+ rval = smiapp_init_late_controls(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_cleanup;
+ }
mutex_lock(&sensor->mutex);
rval = smiapp_update_mode(sensor);
mutex_unlock(&sensor->mutex);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
- goto out_nvm_release;
+ goto out_cleanup;
}
sensor->streaming = false;
sensor->dev_init_done = true;
- /* check flash capability */
- rval = smiapp_read(sensor, SMIAPP_REG_U8_FLASH_MODE_CAPABILITY, &tmp);
- sensor->flash_capability = tmp;
- if (rval)
- goto out_nvm_release;
-
smiapp_power_off(sensor);
return 0;
-out_nvm_release:
- device_remove_file(&client->dev, &dev_attr_nvm);
-
-out_ident_release:
- device_remove_file(&client->dev, &dev_attr_ident);
+out_cleanup:
+ smiapp_cleanup(sensor);
out_power_off:
smiapp_power_off(sensor);
return rval;
}
+static int smiapp_registered(struct v4l2_subdev *subdev)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int rval;
+
+ if (!client->dev.of_node) {
+ rval = smiapp_init(sensor);
+ if (rval)
+ return rval;
+ }
+
+ rval = smiapp_register_subdevs(sensor);
+ if (rval)
+ smiapp_cleanup(sensor);
+
+ return rval;
+}
+
static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
@@ -2927,19 +2972,125 @@ static int smiapp_resume(struct device *dev)
#endif /* CONFIG_PM */
+static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
+{
+ struct smiapp_platform_data *pdata;
+ struct v4l2_of_endpoint bus_cfg;
+ struct device_node *ep;
+ struct property *prop;
+ __be32 *val;
+ uint32_t asize;
+#ifdef CONFIG_OF
+ unsigned int i;
+#endif
+ int rval;
+
+ if (!dev->of_node)
+ return dev->platform_data;
+
+ ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!ep)
+ return NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ rval = -ENOMEM;
+ goto out_err;
+ }
+
+ v4l2_of_parse_endpoint(ep, &bus_cfg);
+
+ switch (bus_cfg.bus_type) {
+ case V4L2_MBUS_CSI2:
+ pdata->csi_signalling_mode = SMIAPP_CSI_SIGNALLING_MODE_CSI2;
+ break;
+ /* FIXME: add CCP2 support. */
+ default:
+ rval = -EINVAL;
+ goto out_err;
+ }
+
+ pdata->lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+ dev_dbg(dev, "lanes %u\n", pdata->lanes);
+
+ /* xshutdown GPIO is optional */
+ pdata->xshutdown = of_get_named_gpio(dev->of_node, "reset-gpios", 0);
+
+ /* NVM size is not mandatory */
+ of_property_read_u32(dev->of_node, "nokia,nvm-size",
+ &pdata->nvm_size);
+
+ rval = of_property_read_u32(dev->of_node, "clock-frequency",
+ &pdata->ext_clk);
+ if (rval) {
+ dev_warn(dev, "can't get clock-frequency\n");
+ goto out_err;
+ }
+
+ dev_dbg(dev, "reset %d, nvm %d, clk %d, csi %d\n", pdata->xshutdown,
+ pdata->nvm_size, pdata->ext_clk, pdata->csi_signalling_mode);
+
+ rval = of_get_property(
+ dev->of_node, "link-frequencies", &asize) ? 0 : -ENOENT;
+ if (rval) {
+ dev_warn(dev, "can't get link-frequencies array size\n");
+ goto out_err;
+ }
+
+ pdata->op_sys_clock = devm_kzalloc(dev, asize, GFP_KERNEL);
+ if (!pdata->op_sys_clock) {
+ rval = -ENOMEM;
+ goto out_err;
+ }
+
+ asize /= sizeof(*pdata->op_sys_clock);
+ /*
+ * Read a 64-bit array --- this will be replaced with a
+ * of_property_read_u64_array() once it's merged.
+ */
+ prop = of_find_property(dev->of_node, "link-frequencies", NULL);
+ if (!prop)
+ goto out_err;
+ if (!prop->value)
+ goto out_err;
+ if (asize * sizeof(*pdata->op_sys_clock) > prop->length)
+ goto out_err;
+ val = prop->value;
+ if (IS_ERR(val))
+ goto out_err;
+
+#ifdef CONFIG_OF
+ for (i = 0; i < asize; i++)
+ pdata->op_sys_clock[i] = of_read_number(val + i * 2, 2);
+#endif
+
+ for (; asize > 0; asize--)
+ dev_dbg(dev, "freq %d: %lld\n", asize - 1,
+ pdata->op_sys_clock[asize - 1]);
+
+ of_node_put(ep);
+ return pdata;
+
+out_err:
+ of_node_put(ep);
+ return NULL;
+}
+
static int smiapp_probe(struct i2c_client *client,
const struct i2c_device_id *devid)
{
struct smiapp_sensor *sensor;
+ struct smiapp_platform_data *pdata = smiapp_get_pdata(&client->dev);
+ int rval;
- if (client->dev.platform_data == NULL)
+ if (pdata == NULL)
return -ENODEV;
sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL);
if (sensor == NULL)
return -ENOMEM;
- sensor->platform_data = client->dev.platform_data;
+ sensor->platform_data = pdata;
mutex_init(&sensor->mutex);
mutex_init(&sensor->power_mutex);
sensor->src = &sensor->ssds[sensor->ssds_used];
@@ -2950,8 +3101,27 @@ static int smiapp_probe(struct i2c_client *client,
sensor->src->sensor = sensor;
sensor->src->pads[0].flags = MEDIA_PAD_FL_SOURCE;
- return media_entity_init(&sensor->src->sd.entity, 2,
+ rval = media_entity_init(&sensor->src->sd.entity, 2,
sensor->src->pads, 0);
+ if (rval < 0)
+ return rval;
+
+ if (client->dev.of_node) {
+ rval = smiapp_init(sensor);
+ if (rval)
+ goto out_media_entity_cleanup;
+ }
+
+ rval = v4l2_async_register_subdev(&sensor->src->sd);
+ if (rval < 0)
+ goto out_media_entity_cleanup;
+
+ return 0;
+
+out_media_entity_cleanup:
+ media_entity_cleanup(&sensor->src->sd.entity);
+
+ return rval;
}
static int smiapp_remove(struct i2c_client *client)
@@ -2960,6 +3130,8 @@ static int smiapp_remove(struct i2c_client *client)
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
unsigned int i;
+ v4l2_async_unregister_subdev(subdev);
+
if (sensor->power_count) {
if (gpio_is_valid(sensor->platform_data->xshutdown))
gpio_set_value(sensor->platform_data->xshutdown, 0);
@@ -2970,19 +3142,20 @@ static int smiapp_remove(struct i2c_client *client)
sensor->power_count = 0;
}
- device_remove_file(&client->dev, &dev_attr_ident);
- if (sensor->nvm)
- device_remove_file(&client->dev, &dev_attr_nvm);
-
for (i = 0; i < sensor->ssds_used; i++) {
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
media_entity_cleanup(&sensor->ssds[i].sd.entity);
}
- smiapp_free_controls(sensor);
+ smiapp_cleanup(sensor);
return 0;
}
+static const struct of_device_id smiapp_of_table[] = {
+ { .compatible = "nokia,smia" },
+ { },
+};
+
static const struct i2c_device_id smiapp_id_table[] = {
{ SMIAPP_NAME, 0 },
{ },
@@ -2996,6 +3169,7 @@ static const struct dev_pm_ops smiapp_pm_ops = {
static struct i2c_driver smiapp_i2c_driver = {
.driver = {
+ .of_match_table = smiapp_of_table,
.name = SMIAPP_NAME,
.pm = &smiapp_pm_ops,
},
diff --git a/drivers/media/i2c/smiapp/smiapp-limits.c b/drivers/media/i2c/smiapp/smiapp-limits.c
index 847cb235e198..784b114d3f8b 100644
--- a/drivers/media/i2c/smiapp/smiapp-limits.c
+++ b/drivers/media/i2c/smiapp/smiapp-limits.c
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#include "smiapp.h"
diff --git a/drivers/media/i2c/smiapp/smiapp-limits.h b/drivers/media/i2c/smiapp/smiapp-limits.h
index 343e9c3827fc..b20124862a14 100644
--- a/drivers/media/i2c/smiapp/smiapp-limits.h
+++ b/drivers/media/i2c/smiapp/smiapp-limits.h
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#define SMIAPP_LIMIT_ANALOGUE_GAIN_CAPABILITY 0
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c
index e0bee8752122..abf9ea7a0fb7 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.c
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.c
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#include <linux/delay.h>
@@ -220,9 +214,11 @@ static int jt8ev1_post_streamoff(struct smiapp_sensor *sensor)
return smiapp_write_8(sensor, 0x3328, 0x80);
}
-static unsigned long jt8ev1_pll_flags(struct smiapp_sensor *sensor)
+static int jt8ev1_init(struct smiapp_sensor *sensor)
{
- return SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+ sensor->pll.flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+
+ return 0;
}
const struct smiapp_quirk smiapp_jt8ev1_quirk = {
@@ -230,7 +226,7 @@ const struct smiapp_quirk smiapp_jt8ev1_quirk = {
.post_poweron = jt8ev1_post_poweron,
.pre_streamon = jt8ev1_pre_streamon,
.post_streamoff = jt8ev1_post_streamoff,
- .pll_flags = jt8ev1_pll_flags,
+ .init = jt8ev1_init,
};
static int tcm8500md_limits(struct smiapp_sensor *sensor)
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.h b/drivers/media/i2c/smiapp/smiapp-quirk.h
index 46e9ea8bfa08..dac5566a2f7a 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.h
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.h
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __SMIAPP_QUIRK__
@@ -35,6 +29,9 @@ struct smiapp_sensor;
* @post_poweron: Called always after the sensor has been fully powered on.
* @pre_streamon: Called just before streaming is enabled.
* @post_streamon: Called right after stopping streaming.
+ * @pll_flags: Return flags for the PLL calculator.
+ * @init: Quirk initialisation, called the last in probe(). This is
+ * also appropriate for adding sensor specific controls, for instance.
* @reg_access: Register access quirk. The quirk may divert the access
* to another register, or no register at all.
*
@@ -53,6 +50,7 @@ struct smiapp_quirk {
int (*pre_streamon)(struct smiapp_sensor *sensor);
int (*post_streamoff)(struct smiapp_sensor *sensor);
unsigned long (*pll_flags)(struct smiapp_sensor *sensor);
+ int (*init)(struct smiapp_sensor *sensor);
int (*reg_access)(struct smiapp_sensor *sensor, bool write, u32 *reg,
u32 *val);
unsigned long flags;
@@ -74,14 +72,14 @@ void smiapp_replace_limit(struct smiapp_sensor *sensor,
.val = _val, \
}
-#define smiapp_call_quirk(_sensor, _quirk, ...) \
- (_sensor->minfo.quirk && \
- _sensor->minfo.quirk->_quirk ? \
- _sensor->minfo.quirk->_quirk(_sensor, ##__VA_ARGS__) : 0)
+#define smiapp_call_quirk(sensor, _quirk, ...) \
+ ((sensor)->minfo.quirk && \
+ (sensor)->minfo.quirk->_quirk ? \
+ (sensor)->minfo.quirk->_quirk(sensor, ##__VA_ARGS__) : 0)
-#define smiapp_needs_quirk(_sensor, _quirk) \
- (_sensor->minfo.quirk ? \
- _sensor->minfo.quirk->flags & _quirk : 0)
+#define smiapp_needs_quirk(sensor, _quirk) \
+ ((sensor)->minfo.quirk ? \
+ (sensor)->minfo.quirk->flags & _quirk : 0)
extern const struct smiapp_quirk smiapp_jt8ev1_quirk;
extern const struct smiapp_quirk smiapp_imx125es_quirk;
diff --git a/drivers/media/i2c/smiapp/smiapp-reg-defs.h b/drivers/media/i2c/smiapp/smiapp-reg-defs.h
index c488ef028074..f928d4cc8e26 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg-defs.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg-defs.h
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#define SMIAPP_REG_MK_U8(r) ((SMIAPP_REG_8BIT << 16) | (r))
#define SMIAPP_REG_MK_U16(r) ((SMIAPP_REG_16BIT << 16) | (r))
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index b0dcbb8fa5e2..4c8b40614969 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __SMIAPP_REG_H_
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index a2098007fb70..6b6c20b61397 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#include <linux/delay.h>
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.h b/drivers/media/i2c/smiapp/smiapp-regs.h
index 35521125a2cc..6dd0e499c845 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.h
+++ b/drivers/media/i2c/smiapp/smiapp-regs.h
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef SMIAPP_REGS_H
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index f88f8ec344d3..ed010a8a49d7 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -14,12 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __SMIAPP_PRIV_H_
@@ -222,7 +216,6 @@ struct smiapp_sensor {
u8 scaling_mode;
u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
- u8 flash_capability;
u8 frame_skip;
int power_count;
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 6f2dd9093d94..1fdce2f6f880 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -25,6 +25,7 @@
#include <media/v4l2-clk.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-image-sizes.h>
#define VAL_SET(x, mask, rshift, lshift) \
((((x) >> rshift) & mask) << lshift)
@@ -268,33 +269,10 @@ struct regval_list {
u8 value;
};
-/* Supported resolutions */
-enum ov2640_width {
- W_QCIF = 176,
- W_QVGA = 320,
- W_CIF = 352,
- W_VGA = 640,
- W_SVGA = 800,
- W_XGA = 1024,
- W_SXGA = 1280,
- W_UXGA = 1600,
-};
-
-enum ov2640_height {
- H_QCIF = 144,
- H_QVGA = 240,
- H_CIF = 288,
- H_VGA = 480,
- H_SVGA = 600,
- H_XGA = 768,
- H_SXGA = 1024,
- H_UXGA = 1200,
-};
-
struct ov2640_win_size {
char *name;
- enum ov2640_width width;
- enum ov2640_height height;
+ u32 width;
+ u32 height;
const struct regval_list *regs;
};
@@ -495,17 +473,17 @@ static const struct regval_list ov2640_init_regs[] = {
static const struct regval_list ov2640_size_change_preamble_regs[] = {
{ BANK_SEL, BANK_SEL_DSP },
{ RESET, RESET_DVP },
- { HSIZE8, HSIZE8_SET(W_UXGA) },
- { VSIZE8, VSIZE8_SET(H_UXGA) },
+ { HSIZE8, HSIZE8_SET(UXGA_WIDTH) },
+ { VSIZE8, VSIZE8_SET(UXGA_HEIGHT) },
{ CTRL2, CTRL2_DCW_EN | CTRL2_SDE_EN |
CTRL2_UV_AVG_EN | CTRL2_CMX_EN | CTRL2_UV_ADJ_EN },
- { HSIZE, HSIZE_SET(W_UXGA) },
- { VSIZE, VSIZE_SET(H_UXGA) },
+ { HSIZE, HSIZE_SET(UXGA_WIDTH) },
+ { VSIZE, VSIZE_SET(UXGA_HEIGHT) },
{ XOFFL, XOFFL_SET(0) },
{ YOFFL, YOFFL_SET(0) },
- { VHYX, VHYX_HSIZE_SET(W_UXGA) | VHYX_VSIZE_SET(H_UXGA) |
+ { VHYX, VHYX_HSIZE_SET(UXGA_WIDTH) | VHYX_VSIZE_SET(UXGA_HEIGHT) |
VHYX_XOFF_SET(0) | VHYX_YOFF_SET(0)},
- { TEST, TEST_HSIZE_SET(W_UXGA) },
+ { TEST, TEST_HSIZE_SET(UXGA_WIDTH) },
ENDMARKER,
};
@@ -519,45 +497,45 @@ static const struct regval_list ov2640_size_change_preamble_regs[] = {
{ RESET, 0x00}
static const struct regval_list ov2640_qcif_regs[] = {
- PER_SIZE_REG_SEQ(W_QCIF, H_QCIF, 3, 3, 4),
+ PER_SIZE_REG_SEQ(QCIF_WIDTH, QCIF_HEIGHT, 3, 3, 4),
ENDMARKER,
};
static const struct regval_list ov2640_qvga_regs[] = {
- PER_SIZE_REG_SEQ(W_QVGA, H_QVGA, 2, 2, 4),
+ PER_SIZE_REG_SEQ(QVGA_WIDTH, QVGA_HEIGHT, 2, 2, 4),
ENDMARKER,
};
static const struct regval_list ov2640_cif_regs[] = {
- PER_SIZE_REG_SEQ(W_CIF, H_CIF, 2, 2, 8),
+ PER_SIZE_REG_SEQ(CIF_WIDTH, CIF_HEIGHT, 2, 2, 8),
ENDMARKER,
};
static const struct regval_list ov2640_vga_regs[] = {
- PER_SIZE_REG_SEQ(W_VGA, H_VGA, 0, 0, 2),
+ PER_SIZE_REG_SEQ(VGA_WIDTH, VGA_HEIGHT, 0, 0, 2),
ENDMARKER,
};
static const struct regval_list ov2640_svga_regs[] = {
- PER_SIZE_REG_SEQ(W_SVGA, H_SVGA, 1, 1, 2),
+ PER_SIZE_REG_SEQ(SVGA_WIDTH, SVGA_HEIGHT, 1, 1, 2),
ENDMARKER,
};
static const struct regval_list ov2640_xga_regs[] = {
- PER_SIZE_REG_SEQ(W_XGA, H_XGA, 0, 0, 2),
+ PER_SIZE_REG_SEQ(XGA_WIDTH, XGA_HEIGHT, 0, 0, 2),
{ CTRLI, 0x00},
ENDMARKER,
};
static const struct regval_list ov2640_sxga_regs[] = {
- PER_SIZE_REG_SEQ(W_SXGA, H_SXGA, 0, 0, 2),
+ PER_SIZE_REG_SEQ(SXGA_WIDTH, SXGA_HEIGHT, 0, 0, 2),
{ CTRLI, 0x00},
{ R_DVP_SP, 2 | R_DVP_SP_AUTO_MODE },
ENDMARKER,
};
static const struct regval_list ov2640_uxga_regs[] = {
- PER_SIZE_REG_SEQ(W_UXGA, H_UXGA, 0, 0, 0),
+ PER_SIZE_REG_SEQ(UXGA_WIDTH, UXGA_HEIGHT, 0, 0, 0),
{ CTRLI, 0x00},
{ R_DVP_SP, 0 | R_DVP_SP_AUTO_MODE },
ENDMARKER,
@@ -567,14 +545,14 @@ static const struct regval_list ov2640_uxga_regs[] = {
{.name = n, .width = w , .height = h, .regs = r }
static const struct ov2640_win_size ov2640_supported_win_sizes[] = {
- OV2640_SIZE("QCIF", W_QCIF, H_QCIF, ov2640_qcif_regs),
- OV2640_SIZE("QVGA", W_QVGA, H_QVGA, ov2640_qvga_regs),
- OV2640_SIZE("CIF", W_CIF, H_CIF, ov2640_cif_regs),
- OV2640_SIZE("VGA", W_VGA, H_VGA, ov2640_vga_regs),
- OV2640_SIZE("SVGA", W_SVGA, H_SVGA, ov2640_svga_regs),
- OV2640_SIZE("XGA", W_XGA, H_XGA, ov2640_xga_regs),
- OV2640_SIZE("SXGA", W_SXGA, H_SXGA, ov2640_sxga_regs),
- OV2640_SIZE("UXGA", W_UXGA, H_UXGA, ov2640_uxga_regs),
+ OV2640_SIZE("QCIF", QCIF_WIDTH, QCIF_HEIGHT, ov2640_qcif_regs),
+ OV2640_SIZE("QVGA", QVGA_WIDTH, QVGA_HEIGHT, ov2640_qvga_regs),
+ OV2640_SIZE("CIF", CIF_WIDTH, CIF_HEIGHT, ov2640_cif_regs),
+ OV2640_SIZE("VGA", VGA_WIDTH, VGA_HEIGHT, ov2640_vga_regs),
+ OV2640_SIZE("SVGA", SVGA_WIDTH, SVGA_HEIGHT, ov2640_svga_regs),
+ OV2640_SIZE("XGA", XGA_WIDTH, XGA_HEIGHT, ov2640_xga_regs),
+ OV2640_SIZE("SXGA", SXGA_WIDTH, SXGA_HEIGHT, ov2640_sxga_regs),
+ OV2640_SIZE("UXGA", UXGA_WIDTH, UXGA_HEIGHT, ov2640_uxga_regs),
};
/*
@@ -867,7 +845,7 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
struct ov2640_priv *priv = to_ov2640(client);
if (!priv->win) {
- u32 width = W_SVGA, height = H_SVGA;
+ u32 width = SVGA_WIDTH, height = SVGA_HEIGHT;
priv->win = ov2640_select_win(&width, &height);
priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
}
@@ -954,8 +932,8 @@ static int ov2640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
a->c.left = 0;
a->c.top = 0;
- a->c.width = W_UXGA;
- a->c.height = H_UXGA;
+ a->c.width = UXGA_WIDTH;
+ a->c.height = UXGA_HEIGHT;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
return 0;
@@ -965,8 +943,8 @@ static int ov2640_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
{
a->bounds.left = 0;
a->bounds.top = 0;
- a->bounds.width = W_UXGA;
- a->bounds.height = H_UXGA;
+ a->bounds.width = UXGA_WIDTH;
+ a->bounds.height = UXGA_HEIGHT;
a->defrect = a->bounds;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->pixelaspect.numerator = 1;
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 656d889c1c79..4ebd329d7b42 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -58,21 +58,11 @@ static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct ths8200_state, sd);
}
-static inline unsigned hblanking(const struct v4l2_bt_timings *t)
-{
- return V4L2_DV_BT_BLANKING_WIDTH(t);
-}
-
static inline unsigned htotal(const struct v4l2_bt_timings *t)
{
return V4L2_DV_BT_FRAME_WIDTH(t);
}
-static inline unsigned vblanking(const struct v4l2_bt_timings *t)
-{
- return V4L2_DV_BT_BLANKING_HEIGHT(t);
-}
-
static inline unsigned vtotal(const struct v4l2_bt_timings *t)
{
return V4L2_DV_BT_FRAME_HEIGHT(t);
diff --git a/drivers/media/mmc/siano/Kconfig b/drivers/media/mmc/siano/Kconfig
index aa05ad3c1ccb..7693487e2f63 100644
--- a/drivers/media/mmc/siano/Kconfig
+++ b/drivers/media/mmc/siano/Kconfig
@@ -6,6 +6,8 @@ config SMS_SDIO_DRV
tristate "Siano SMS1xxx based MDTV via SDIO interface"
depends on DVB_CORE && HAS_DMA
depends on MMC
+ depends on !RC_CORE || RC_CORE
select MEDIA_COMMON_OPTIONS
+ select SMS_SIANO_MDTV
---help---
Choose if you would like to have Siano's support for SDIO interface
diff --git a/drivers/media/pci/bt8xx/Kconfig b/drivers/media/pci/bt8xx/Kconfig
index 61d09e010814..4a93f6ded100 100644
--- a/drivers/media/pci/bt8xx/Kconfig
+++ b/drivers/media/pci/bt8xx/Kconfig
@@ -2,15 +2,17 @@ config VIDEO_BT848
tristate "BT848 Video For Linux"
depends on VIDEO_DEV && PCI && I2C && VIDEO_V4L2
select I2C_ALGOBIT
- select VIDEO_BTCX
select VIDEOBUF_DMA_SG
depends on RC_CORE
+ depends on MEDIA_RADIO_SUPPORT
select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVAUDIO if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TDA7432 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_SAA6588 if MEDIA_SUBDRV_AUTOSELECT
+ select RADIO_ADAPTERS
+ select RADIO_TEA575X
---help---
Support for BT848 based frame grabber/overlay boards. This includes
the Miro, Hauppauge and STB boards. Please read the material in
diff --git a/drivers/media/pci/bt8xx/Makefile b/drivers/media/pci/bt8xx/Makefile
index f9fe7c4e7d53..2d4c3dd88be1 100644
--- a/drivers/media/pci/bt8xx/Makefile
+++ b/drivers/media/pci/bt8xx/Makefile
@@ -1,6 +1,6 @@
bttv-objs := bttv-driver.o bttv-cards.o bttv-if.o \
bttv-risc.o bttv-vbi.o bttv-i2c.o bttv-gpio.o \
- bttv-input.o bttv-audio-hook.o
+ bttv-input.o bttv-audio-hook.o btcx-risc.o
obj-$(CONFIG_VIDEO_BT848) += bttv.o
obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 11765835d7b2..0939d399b774 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -590,9 +590,3 @@ module_init(bt878_init_module);
module_exit(bt878_cleanup_module);
MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/common/btcx-risc.c b/drivers/media/pci/bt8xx/btcx-risc.c
index ac1b2687a20d..00f0880b6d66 100644
--- a/drivers/media/common/btcx-risc.c
+++ b/drivers/media/pci/bt8xx/btcx-risc.c
@@ -32,13 +32,9 @@
#include "btcx-risc.h"
-MODULE_DESCRIPTION("some code shared by bttv and cx88xx drivers");
-MODULE_AUTHOR("Gerd Knorr");
-MODULE_LICENSE("GPL");
-
-static unsigned int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug,"debug messages, default is 0 (no)");
+static unsigned int btcx_debug;
+module_param(btcx_debug, int, 0644);
+MODULE_PARM_DESC(btcx_debug,"debug messages, default is 0 (no)");
/* ---------------------------------------------------------- */
/* allocate/free risc memory */
@@ -50,7 +46,7 @@ void btcx_riscmem_free(struct pci_dev *pci,
{
if (NULL == risc->cpu)
return;
- if (debug) {
+ if (btcx_debug) {
memcnt--;
printk("btcx: riscmem free [%d] dma=%lx\n",
memcnt, (unsigned long)risc->dma);
@@ -75,7 +71,7 @@ int btcx_riscmem_alloc(struct pci_dev *pci,
risc->cpu = cpu;
risc->dma = dma;
risc->size = size;
- if (debug) {
+ if (btcx_debug) {
memcnt++;
printk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
memcnt, (unsigned long)dma, cpu, size);
@@ -141,7 +137,7 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
dx = nx - win->left;
win->left = nx;
win->width = nw;
- if (debug)
+ if (btcx_debug)
printk(KERN_DEBUG "btcx: window align %dx%d+%d+%d [dx=%d]\n",
win->width, win->height, win->left, win->top, dx);
@@ -153,7 +149,7 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
nw += mask+1;
clips[i].c.left = nx;
clips[i].c.width = nw;
- if (debug)
+ if (btcx_debug)
printk(KERN_DEBUG "btcx: clip align %dx%d+%d+%d\n",
clips[i].c.width, clips[i].c.height,
clips[i].c.left, clips[i].c.top);
@@ -234,7 +230,7 @@ btcx_calc_skips(int line, int width, int *maxy,
*nskips = skip;
*maxy = maxline;
- if (debug) {
+ if (btcx_debug) {
printk(KERN_DEBUG "btcx: skips line %d-%d:",line,maxline);
for (skip = 0; skip < *nskips; skip++) {
printk(" %d-%d",skips[skip].start,skips[skip].end);
@@ -242,19 +238,3 @@ btcx_calc_skips(int line, int width, int *maxy,
printk("\n");
}
}
-
-/* ---------------------------------------------------------- */
-
-EXPORT_SYMBOL(btcx_riscmem_alloc);
-EXPORT_SYMBOL(btcx_riscmem_free);
-
-EXPORT_SYMBOL(btcx_screen_clips);
-EXPORT_SYMBOL(btcx_align);
-EXPORT_SYMBOL(btcx_sort_clips);
-EXPORT_SYMBOL(btcx_calc_skips);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/bt8xx/btcx-risc.h b/drivers/media/pci/bt8xx/btcx-risc.h
new file mode 100644
index 000000000000..1ed7a000160a
--- /dev/null
+++ b/drivers/media/pci/bt8xx/btcx-risc.h
@@ -0,0 +1,26 @@
+struct btcx_riscmem {
+ unsigned int size;
+ __le32 *cpu;
+ __le32 *jmp;
+ dma_addr_t dma;
+};
+
+struct btcx_skiplist {
+ int start;
+ int end;
+};
+
+int btcx_riscmem_alloc(struct pci_dev *pci,
+ struct btcx_riscmem *risc,
+ unsigned int size);
+void btcx_riscmem_free(struct pci_dev *pci,
+ struct btcx_riscmem *risc);
+
+int btcx_screen_clips(int swidth, int sheight, struct v4l2_rect *win,
+ struct v4l2_clip *clips, unsigned int n);
+int btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips,
+ unsigned int n, int mask);
+void btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips);
+void btcx_calc_skips(int line, int width, int *maxy,
+ struct btcx_skiplist *skips, unsigned int *nskips,
+ const struct v4l2_clip *clips, unsigned int nclips);
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 41055606b969..4654fb65ca21 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -84,8 +84,7 @@ static void gv800s_init(struct bttv *btv);
static void td3116_muxsel(struct bttv *btv, unsigned int input);
static int terratec_active_radio_upgrade(struct bttv *btv);
-static int tea5757_read(struct bttv *btv);
-static int tea5757_write(struct bttv *btv, int value);
+static int tea575x_init(struct bttv *btv);
static void identify_by_eeprom(struct bttv *btv,
unsigned char eeprom_data[256]);
static int pvr_boot(struct bttv *btv);
@@ -3085,12 +3084,12 @@ static void miro_pinnacle_gpio(struct bttv *btv)
if (0 == (gpio & 0x20)) {
btv->has_radio = 1;
if (!miro_fmtuner[id]) {
- btv->has_matchbox = 1;
- btv->mbox_we = (1<<6);
- btv->mbox_most = (1<<7);
- btv->mbox_clk = (1<<8);
- btv->mbox_data = (1<<9);
- btv->mbox_mask = (1<<6)|(1<<7)|(1<<8)|(1<<9);
+ btv->has_tea575x = 1;
+ btv->tea_gpio.wren = 6;
+ btv->tea_gpio.most = 7;
+ btv->tea_gpio.clk = 8;
+ btv->tea_gpio.data = 9;
+ tea575x_init(btv);
}
} else {
btv->has_radio = 0;
@@ -3104,7 +3103,7 @@ static void miro_pinnacle_gpio(struct bttv *btv)
pr_info("%d: miro: id=%d tuner=%d radio=%s stereo=%s\n",
btv->c.nr, id+1, btv->tuner_type,
!btv->has_radio ? "no" :
- (btv->has_matchbox ? "matchbox" : "fmtuner"),
+ (btv->has_tea575x ? "tea575x" : "fmtuner"),
(-1 == msp) ? "no" : "yes");
} else {
/* new cards with microtune tuner */
@@ -3382,12 +3381,12 @@ void bttv_init_card2(struct bttv *btv)
break;
case BTTV_BOARD_VHX:
btv->has_radio = 1;
- btv->has_matchbox = 1;
- btv->mbox_we = 0x20;
- btv->mbox_most = 0;
- btv->mbox_clk = 0x08;
- btv->mbox_data = 0x10;
- btv->mbox_mask = 0x38;
+ btv->has_tea575x = 1;
+ btv->tea_gpio.wren = 5;
+ btv->tea_gpio.most = 6;
+ btv->tea_gpio.clk = 3;
+ btv->tea_gpio.data = 4;
+ tea575x_init(btv);
break;
case BTTV_BOARD_VOBIS_BOOSTAR:
case BTTV_BOARD_TERRATV:
@@ -3745,33 +3744,112 @@ static void hauppauge_eeprom(struct bttv *btv)
btv->radio_uses_msp_demodulator = 1;
}
-static int terratec_active_radio_upgrade(struct bttv *btv)
+/* ----------------------------------------------------------------------- */
+
+static void bttv_tea575x_set_pins(struct snd_tea575x *tea, u8 pins)
+{
+ struct bttv *btv = tea->private_data;
+ struct bttv_tea575x_gpio gpio = btv->tea_gpio;
+ u16 val = 0;
+
+ val |= (pins & TEA575X_DATA) ? (1 << gpio.data) : 0;
+ val |= (pins & TEA575X_CLK) ? (1 << gpio.clk) : 0;
+ val |= (pins & TEA575X_WREN) ? (1 << gpio.wren) : 0;
+
+ gpio_bits((1 << gpio.data) | (1 << gpio.clk) | (1 << gpio.wren), val);
+ if (btv->mbox_ior) {
+ /* IOW and CSEL active */
+ gpio_bits(btv->mbox_iow | btv->mbox_csel, 0);
+ udelay(5);
+ /* all inactive */
+ gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel,
+ btv->mbox_ior | btv->mbox_iow | btv->mbox_csel);
+ }
+}
+
+static u8 bttv_tea575x_get_pins(struct snd_tea575x *tea)
+{
+ struct bttv *btv = tea->private_data;
+ struct bttv_tea575x_gpio gpio = btv->tea_gpio;
+ u8 ret = 0;
+ u16 val;
+
+ if (btv->mbox_ior) {
+ /* IOR and CSEL active */
+ gpio_bits(btv->mbox_ior | btv->mbox_csel, 0);
+ udelay(5);
+ }
+ val = gpio_read();
+ if (btv->mbox_ior) {
+ /* all inactive */
+ gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel,
+ btv->mbox_ior | btv->mbox_iow | btv->mbox_csel);
+ }
+
+ if (val & (1 << gpio.data))
+ ret |= TEA575X_DATA;
+ if (val & (1 << gpio.most))
+ ret |= TEA575X_MOST;
+
+ return ret;
+}
+
+static void bttv_tea575x_set_direction(struct snd_tea575x *tea, bool output)
{
- int freq;
+ struct bttv *btv = tea->private_data;
+ struct bttv_tea575x_gpio gpio = btv->tea_gpio;
+ u32 mask = (1 << gpio.clk) | (1 << gpio.wren) | (1 << gpio.data) |
+ (1 << gpio.most);
+
+ if (output)
+ gpio_inout(mask, (1 << gpio.data) | (1 << gpio.clk) |
+ (1 << gpio.wren));
+ else
+ gpio_inout(mask, (1 << gpio.clk) | (1 << gpio.wren));
+}
+
+static struct snd_tea575x_ops bttv_tea_ops = {
+ .set_pins = bttv_tea575x_set_pins,
+ .get_pins = bttv_tea575x_get_pins,
+ .set_direction = bttv_tea575x_set_direction,
+};
+
+static int tea575x_init(struct bttv *btv)
+{
+ btv->tea.private_data = btv;
+ btv->tea.ops = &bttv_tea_ops;
+ if (!snd_tea575x_hw_init(&btv->tea)) {
+ pr_info("%d: detected TEA575x radio\n", btv->c.nr);
+ btv->tea.mute = false;
+ return 0;
+ }
+
+ btv->has_tea575x = 0;
+ btv->has_radio = 0;
+ return -ENODEV;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int terratec_active_radio_upgrade(struct bttv *btv)
+{
btv->has_radio = 1;
- btv->has_matchbox = 1;
- btv->mbox_we = 0x10;
- btv->mbox_most = 0x20;
- btv->mbox_clk = 0x08;
- btv->mbox_data = 0x04;
- btv->mbox_mask = 0x3c;
+ btv->has_tea575x = 1;
+ btv->tea_gpio.wren = 4;
+ btv->tea_gpio.most = 5;
+ btv->tea_gpio.clk = 3;
+ btv->tea_gpio.data = 2;
btv->mbox_iow = 1 << 8;
btv->mbox_ior = 1 << 9;
btv->mbox_csel = 1 << 10;
- freq=88000/62.5;
- tea5757_write(btv, 5 * freq + 0x358); /* write 0x1ed8 */
- if (0x1ed8 == tea5757_read(btv)) {
+ if (!tea575x_init(btv)) {
pr_info("%d: Terratec Active Radio Upgrade found\n", btv->c.nr);
- btv->has_radio = 1;
- btv->has_saa6588 = 1;
- btv->has_matchbox = 1;
- } else {
- btv->has_radio = 0;
- btv->has_matchbox = 0;
+ btv->has_saa6588 = 1;
}
+
return 0;
}
@@ -4292,181 +4370,6 @@ init_PCI8604PW(struct bttv *btv)
}
}
-
-
-/* ----------------------------------------------------------------------- */
-/* Miro Pro radio stuff -- the tea5757 is connected to some GPIO ports */
-/*
- * Copyright (c) 1999 Csaba Halasz <qgehali@uni-miskolc.hu>
- * This code is placed under the terms of the GNU General Public License
- *
- * Brutally hacked by Dan Sheridan <dan.sheridan@contact.org.uk> djs52 8/3/00
- */
-
-static void bus_low(struct bttv *btv, int bit)
-{
- if (btv->mbox_ior) {
- gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel,
- btv->mbox_ior | btv->mbox_iow | btv->mbox_csel);
- udelay(5);
- }
-
- gpio_bits(bit,0);
- udelay(5);
-
- if (btv->mbox_ior) {
- gpio_bits(btv->mbox_iow | btv->mbox_csel, 0);
- udelay(5);
- }
-}
-
-static void bus_high(struct bttv *btv, int bit)
-{
- if (btv->mbox_ior) {
- gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel,
- btv->mbox_ior | btv->mbox_iow | btv->mbox_csel);
- udelay(5);
- }
-
- gpio_bits(bit,bit);
- udelay(5);
-
- if (btv->mbox_ior) {
- gpio_bits(btv->mbox_iow | btv->mbox_csel, 0);
- udelay(5);
- }
-}
-
-static int bus_in(struct bttv *btv, int bit)
-{
- if (btv->mbox_ior) {
- gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel,
- btv->mbox_ior | btv->mbox_iow | btv->mbox_csel);
- udelay(5);
-
- gpio_bits(btv->mbox_iow | btv->mbox_csel, 0);
- udelay(5);
- }
- return gpio_read() & (bit);
-}
-
-/* TEA5757 register bits */
-#define TEA_FREQ 0:14
-#define TEA_BUFFER 15:15
-
-#define TEA_SIGNAL_STRENGTH 16:17
-
-#define TEA_PORT1 18:18
-#define TEA_PORT0 19:19
-
-#define TEA_BAND 20:21
-#define TEA_BAND_FM 0
-#define TEA_BAND_MW 1
-#define TEA_BAND_LW 2
-#define TEA_BAND_SW 3
-
-#define TEA_MONO 22:22
-#define TEA_ALLOW_STEREO 0
-#define TEA_FORCE_MONO 1
-
-#define TEA_SEARCH_DIRECTION 23:23
-#define TEA_SEARCH_DOWN 0
-#define TEA_SEARCH_UP 1
-
-#define TEA_STATUS 24:24
-#define TEA_STATUS_TUNED 0
-#define TEA_STATUS_SEARCHING 1
-
-/* Low-level stuff */
-static int tea5757_read(struct bttv *btv)
-{
- unsigned long timeout;
- int value = 0;
- int i;
-
- /* better safe than sorry */
- gpio_inout(btv->mbox_mask, btv->mbox_clk | btv->mbox_we);
-
- if (btv->mbox_ior) {
- gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel,
- btv->mbox_ior | btv->mbox_iow | btv->mbox_csel);
- udelay(5);
- }
-
- if (bttv_gpio)
- bttv_gpio_tracking(btv,"tea5757 read");
-
- bus_low(btv,btv->mbox_we);
- bus_low(btv,btv->mbox_clk);
-
- udelay(10);
- timeout= jiffies + msecs_to_jiffies(1000);
-
- /* wait for DATA line to go low; error if it doesn't */
- while (bus_in(btv,btv->mbox_data) && time_before(jiffies, timeout))
- schedule();
- if (bus_in(btv,btv->mbox_data)) {
- pr_warn("%d: tea5757: read timeout\n", btv->c.nr);
- return -1;
- }
-
- dprintk("%d: tea5757:", btv->c.nr);
- for (i = 0; i < 24; i++) {
- udelay(5);
- bus_high(btv,btv->mbox_clk);
- udelay(5);
- dprintk_cont("%c",
- bus_in(btv, btv->mbox_most) == 0 ? 'T' : '-');
- bus_low(btv,btv->mbox_clk);
- value <<= 1;
- value |= (bus_in(btv,btv->mbox_data) == 0)?0:1; /* MSB first */
- dprintk_cont("%c",
- bus_in(btv, btv->mbox_most) == 0 ? 'S' : 'M');
- }
- dprintk_cont("\n");
- dprintk("%d: tea5757: read 0x%X\n", btv->c.nr, value);
- return value;
-}
-
-static int tea5757_write(struct bttv *btv, int value)
-{
- int i;
- int reg = value;
-
- gpio_inout(btv->mbox_mask, btv->mbox_clk | btv->mbox_we | btv->mbox_data);
-
- if (btv->mbox_ior) {
- gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel,
- btv->mbox_ior | btv->mbox_iow | btv->mbox_csel);
- udelay(5);
- }
- if (bttv_gpio)
- bttv_gpio_tracking(btv,"tea5757 write");
-
- dprintk("%d: tea5757: write 0x%X\n", btv->c.nr, value);
- bus_low(btv,btv->mbox_clk);
- bus_high(btv,btv->mbox_we);
- for (i = 0; i < 25; i++) {
- if (reg & 0x1000000)
- bus_high(btv,btv->mbox_data);
- else
- bus_low(btv,btv->mbox_data);
- reg <<= 1;
- bus_high(btv,btv->mbox_clk);
- udelay(10);
- bus_low(btv,btv->mbox_clk);
- udelay(10);
- }
- bus_low(btv,btv->mbox_we); /* unmute !!! */
- return 0;
-}
-
-void tea5757_set_freq(struct bttv *btv, unsigned short freq)
-{
- dprintk("tea5757_set_freq %d\n",freq);
- tea5757_write(btv, 5 * freq + 0x358); /* add 10.7MHz (see docs) */
-}
-
/* RemoteVision MX (rv605) muxsel helper [Miguel Freitas]
*
* This is needed because rv605 don't use a normal multiplex, but a crosspoint
@@ -5048,10 +4951,3 @@ int bttv_handle_chipset(struct bttv *btv)
pci_write_config_byte(btv->c.pci, PCI_LATENCY_TIMER, latency);
return 0;
}
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 4a8176c09fc9..4ec2a3c3f23c 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1874,8 +1874,10 @@ static void bttv_set_frequency(struct bttv *btv, const struct v4l2_frequency *f)
if (new_freq.type == V4L2_TUNER_RADIO) {
radio_enable(btv);
btv->radio_freq = new_freq.frequency;
- if (btv->has_matchbox)
- tea5757_set_freq(btv, btv->radio_freq);
+ if (btv->has_tea575x) {
+ btv->tea.freq = btv->radio_freq;
+ snd_tea575x_set_freq(&btv->tea);
+ }
} else {
btv->tv_freq = new_freq.frequency;
}
@@ -2513,6 +2515,8 @@ static int bttv_querycap(struct file *file, void *priv,
if (btv->has_saa6588)
cap->device_caps |= V4L2_CAP_READWRITE |
V4L2_CAP_RDS_CAPTURE;
+ if (btv->has_tea575x)
+ cap->device_caps |= V4L2_CAP_HW_FREQ_SEEK;
}
return 0;
}
@@ -3242,6 +3246,9 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (btv->audio_mode_gpio)
btv->audio_mode_gpio(btv, t, 0);
+ if (btv->has_tea575x)
+ return snd_tea575x_g_tuner(&btv->tea, t);
+
return 0;
}
@@ -3259,6 +3266,30 @@ static int radio_s_tuner(struct file *file, void *priv,
return 0;
}
+static int radio_s_hw_freq_seek(struct file *file, void *priv,
+ const struct v4l2_hw_freq_seek *a)
+{
+ struct bttv_fh *fh = priv;
+ struct bttv *btv = fh->btv;
+
+ if (btv->has_tea575x)
+ return snd_tea575x_s_hw_freq_seek(file, &btv->tea, a);
+
+ return -ENOTTY;
+}
+
+static int radio_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ struct bttv_fh *fh = priv;
+ struct bttv *btv = fh->btv;
+
+ if (btv->has_tea575x)
+ return snd_tea575x_enum_freq_bands(&btv->tea, band);
+
+ return -ENOTTY;
+}
+
static ssize_t radio_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
@@ -3316,6 +3347,8 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
.vidioc_s_tuner = radio_s_tuner,
.vidioc_g_frequency = bttv_g_frequency,
.vidioc_s_frequency = bttv_s_frequency,
+ .vidioc_s_hw_freq_seek = radio_s_hw_freq_seek,
+ .vidioc_enum_freq_bands = radio_enum_freq_bands,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -3884,7 +3917,6 @@ static struct video_device *vdev_init(struct bttv *btv,
*vfd = *template;
vfd->v4l2_dev = &btv->c.v4l2_dev;
vfd->release = video_device_release;
- vfd->debug = bttv_debug;
video_set_drvdata(vfd, btv);
snprintf(vfd->name, sizeof(vfd->name), "BT%d%s %s (%s)",
btv->id, (btv->id==848 && btv->revision==0x12) ? "A" : "",
@@ -4429,9 +4461,3 @@ static void __exit bttv_cleanup_module(void)
module_init(bttv_init_module);
module_exit(bttv_cleanup_module);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 3f364b7062b9..25b9916906d5 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -181,9 +181,3 @@ void bttv_gpio_bits(struct bttv_core *core, u32 mask, u32 bits)
btwrite(data,BT848_GPIO_DATA);
spin_unlock_irqrestore(&btv->gpio_lock,flags);
}
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/bt8xx/bttv-if.c b/drivers/media/pci/bt8xx/bttv-if.c
index a6a540dc9e4b..538652e16a5c 100644
--- a/drivers/media/pci/bt8xx/bttv-if.c
+++ b/drivers/media/pci/bt8xx/bttv-if.c
@@ -113,9 +113,3 @@ int bttv_write_gpio(unsigned int card, unsigned long mask, unsigned long data)
bttv_gpio_tracking(btv,"extern write");
return 0;
}
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c
index 4d3f05a19af3..3859dde98be2 100644
--- a/drivers/media/pci/bt8xx/bttv-risc.c
+++ b/drivers/media/pci/bt8xx/bttv-risc.c
@@ -901,9 +901,3 @@ bttv_overlay_risc(struct bttv *btv,
buf->vb.field = ov->field;
return 0;
}
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index b433267d9aa9..e77129c92fa0 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -450,10 +450,3 @@ void bttv_vbi_fmt_reset(struct bttv_vbi_fmt *f, unsigned int norm)
/* See bttv_vbi_fmt_set(). */
f->end = tvnorm->vbistart[0] * 2 + 2;
}
-
-/* ----------------------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/bt8xx/bttv.h b/drivers/media/pci/bt8xx/bttv.h
index f08126244662..91301c3cad1e 100644
--- a/drivers/media/pci/bt8xx/bttv.h
+++ b/drivers/media/pci/bt8xx/bttv.h
@@ -378,8 +378,3 @@ extern void bttv_input_fini(struct bttv *dev);
extern void bttv_input_irq(struct bttv *dev);
#endif /* _BTTV_H_ */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index 9fe19488b30b..bc048c586b1f 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -42,6 +42,7 @@
#include <media/tveeprom.h>
#include <media/rc-core.h>
#include <media/ir-kbd-i2c.h>
+#include <media/tea575x.h>
#include "bt848.h"
#include "bttv.h"
@@ -359,6 +360,10 @@ struct bttv_suspend_state {
struct bttv_buffer *vbi;
};
+struct bttv_tea575x_gpio {
+ u8 data, clk, wren, most;
+};
+
struct bttv {
struct bttv_core c;
@@ -445,12 +450,9 @@ struct bttv {
/* miro/pinnacle + Aimslab VHX
philips matchbox (tea5757 radio tuner) support */
- int has_matchbox;
- int mbox_we;
- int mbox_data;
- int mbox_clk;
- int mbox_most;
- int mbox_mask;
+ int has_tea575x;
+ struct bttv_tea575x_gpio tea_gpio;
+ struct snd_tea575x tea;
/* ISA stuff (Terratec Active Radio Upgrade) */
int mbox_ior;
@@ -531,9 +533,3 @@ static inline unsigned int bttv_muxsel(const struct bttv *btv,
#define btaor(dat,mask,adr) btwrite((dat) | ((mask) & btread(adr)), adr)
#endif /* _BTTVP_H_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index f613314b360b..74d774e5227b 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -41,6 +41,7 @@ config VIDEO_CX23885
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_M88RS6000T if MEDIA_SUBDRV_AUTOSELECT
select DVB_TUNER_DIB0070 if MEDIA_SUBDRV_AUTOSELECT
---help---
This is a video4linux driver for Conexant 23885 based
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index db99ca2613ba..f384f295676e 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = {
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR4400] = {
- .name = "Hauppauge WinTV-HVR4400",
+ .name = "Hauppauge WinTV-HVR4400/HVR5500",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
@@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = {
.tuner_addr = 0x60, /* 0xc0 >> 1 */
.tuner_bus = 1,
},
+ [CX23885_BOARD_HAUPPAUGE_STARBURST] = {
+ .name = "Hauppauge WinTV Starburst",
+ .portb = CX23885_MPEG_DVB,
+ },
[CX23885_BOARD_AVERMEDIA_HC81R] = {
.name = "AVerTV Hybrid Express Slim HC81R",
.tuner_type = TUNER_XC2028,
@@ -706,6 +710,11 @@ struct cx23885_board cx23885_boards[] = {
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_HAUPPAUGE_HVR5525] = {
+ .name = "Hauppauge WinTV-HVR5525",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -936,19 +945,19 @@ struct cx23885_subid cx23885_subids[] = {
}, {
.subvendor = 0x0070,
.subdevice = 0xc108,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc138,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc12a,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc1f8,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
}, {
.subvendor = 0x1461,
.subdevice = 0xd939,
@@ -989,6 +998,10 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x4254,
.subdevice = 0x0982,
.card = CX23885_BOARD_DVBSKY_T982,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xf038,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR5525,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1161,6 +1174,8 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
case 85721:
/* WinTV-HVR1290 (PCIe, OEM, RCA in, IR,
Dual channel ATSC and Basic analog */
+ case 150329:
+ /* WinTV-HVR5525 (PCIe, DVB-S/S2, DVB-T/T2/C) */
break;
default:
printk(KERN_WARNING "%s: warning: "
@@ -1545,8 +1560,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
/* GPIO-8 tda10071 demod reset */
- /* GPIO-9 si2165 demod reset */
+ /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/
/* Put the parts into reset and back */
cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
@@ -1632,6 +1648,29 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
msleep(100);
cx23885_gpio_set(dev, GPIO_2);
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ /*
+ * GPIO-00 IR_WIDE
+ * GPIO-02 wake#
+ * GPIO-03 VAUX Pres.
+ * GPIO-07 PROG#
+ * GPIO-08 SAT_RESN
+ * GPIO-09 TER_RESN
+ * GPIO-10 B2_SENSE
+ * GPIO-11 B1_SENSE
+ * GPIO-15 IR_LED_STATUS
+ * GPIO-19 IR_NARROW
+ * GPIO-20 Blauster1
+ * ALTGPIO VAUX_SWITCH
+ * AUX_PLL_CLK : Blaster2
+ */
+ /* Put the parts into reset and back */
+ cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
+ cx23885_gpio_clear(dev, GPIO_8 | GPIO_9);
+ msleep(100);
+ cx23885_gpio_set(dev, GPIO_8 | GPIO_9);
+ msleep(100);
+ break;
}
}
@@ -1872,7 +1911,9 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
break;
@@ -1980,6 +2021,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_T982:
ts1->gen_ctrl_val = 0x5; /* Parallel */
@@ -1997,6 +2043,14 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ ts1->gen_ctrl_val = 0x5; /* Parallel */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 1d9d0f86ca8c..1ad49946d7fa 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
cx23885_shutdown(dev);
- pci_disable_device(pci_dev);
-
/* unregister stuff */
free_irq(pci_dev->irq, dev);
+ pci_disable_device(pci_dev);
+
cx23885_dev_unregister(dev);
vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index c47d18270cfc..45fbe1e4d2d0 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -74,6 +74,7 @@
#include "sp2.h"
#include "m88ds3103.h"
#include "m88ts2022.h"
+#include "m88rs6000t.h"
static unsigned int debug;
@@ -915,6 +916,16 @@ static const struct m88ds3103_config dvbsky_s952_portc_m88ds3103_config = {
.agc = 0x99,
};
+static const struct m88ds3103_config hauppauge_hvr5525_m88ds3103_config = {
+ .i2c_addr = 0x69,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .ts_mode = M88DS3103_TS_PARALLEL,
+ .ts_clk = 16000,
+ .ts_clk_pol = 1,
+ .agc = 0x99,
+};
+
static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
@@ -1058,6 +1069,116 @@ static struct dib7000p_config dib7070p_dib7000p_config = {
.hostbus_diversity = 1,
};
+static int dvb_register_ci_mac(struct cx23885_tsport *port)
+{
+ struct cx23885_dev *dev = port->dev;
+ struct i2c_client *client_ci = NULL;
+ struct vb2_dvb_frontend *fe0;
+
+ fe0 = vb2_dvb_get_frontend(&port->frontends, 1);
+ if (!fe0)
+ return -EINVAL;
+
+ switch (dev->board) {
+ case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: {
+ static struct netup_card_info cinfo;
+
+ netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
+ memcpy(port->frontends.adapter.proposed_mac,
+ cinfo.port[port->nr - 1].mac, 6);
+ printk(KERN_INFO "NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
+ port->nr, port->frontends.adapter.proposed_mac);
+
+ netup_ci_init(port);
+ return 0;
+ }
+ case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: {
+ struct altera_ci_config netup_ci_cfg = {
+ .dev = dev,/* magic number to identify*/
+ .adapter = &port->frontends.adapter,/* for CI */
+ .demux = &fe0->dvb.demux,/* for hw pid filter */
+ .fpga_rw = netup_altera_fpga_rw,
+ };
+
+ altera_ci_init(&netup_ci_cfg, port->nr);
+ return 0;
+ }
+ case CX23885_BOARD_TEVII_S470: {
+ u8 eeprom[256]; /* 24C02 i2c eeprom */
+
+ if (port->nr != 1)
+ return 0;
+
+ /* Read entire EEPROM */
+ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
+ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
+ printk(KERN_INFO "TeVii S470 MAC= %pM\n", eeprom + 0xa0);
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
+ return 0;
+ }
+ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982: {
+ u8 eeprom[256]; /* 24C02 i2c eeprom */
+
+ if (port->nr > 2)
+ return 0;
+
+ /* Read entire EEPROM */
+ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
+ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
+ sizeof(eeprom));
+ printk(KERN_INFO "%s port %d MAC address: %pM\n",
+ cx23885_boards[dev->board].name, port->nr,
+ eeprom + 0xc0 + (port->nr-1) * 8);
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
+ (port->nr-1) * 8, 6);
+ return 0;
+ }
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_TT_CT2_4500_CI: {
+ u8 eeprom[256]; /* 24C02 i2c eeprom */
+ struct sp2_config sp2_config;
+ struct i2c_board_info info;
+ struct cx23885_i2c *i2c_bus2 = &dev->i2c_bus[1];
+
+ /* attach CI */
+ memset(&sp2_config, 0, sizeof(sp2_config));
+ sp2_config.dvb_adap = &port->frontends.adapter;
+ sp2_config.priv = port;
+ sp2_config.ci_control = cx23885_sp2_ci_ctrl;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "sp2", I2C_NAME_SIZE);
+ info.addr = 0x40;
+ info.platform_data = &sp2_config;
+ request_module(info.type);
+ client_ci = i2c_new_device(&i2c_bus2->i2c_adap, &info);
+ if (client_ci == NULL || client_ci->dev.driver == NULL)
+ return -ENODEV;
+ if (!try_module_get(client_ci->dev.driver->owner)) {
+ i2c_unregister_device(client_ci);
+ return -ENODEV;
+ }
+ port->i2c_client_ci = client_ci;
+
+ if (port->nr != 1)
+ return 0;
+
+ /* Read entire EEPROM */
+ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
+ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
+ sizeof(eeprom));
+ printk(KERN_INFO "%s MAC address: %pM\n",
+ cx23885_boards[dev->board].name, eeprom + 0xc0);
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0, 6);
+ return 0;
+ }
+ }
+ return 0;
+}
+
static int dvb_register(struct cx23885_tsport *port)
{
struct dib7000p_ops dib7000p_ops;
@@ -1066,11 +1187,10 @@ static int dvb_register(struct cx23885_tsport *port)
struct vb2_dvb_frontend *fe0, *fe1 = NULL;
struct si2168_config si2168_config;
struct si2157_config si2157_config;
- struct sp2_config sp2_config;
struct m88ts2022_config m88ts2022_config;
struct i2c_board_info info;
struct i2c_adapter *adapter;
- struct i2c_client *client_demod = NULL, *client_tuner = NULL, *client_ci = NULL;
+ struct i2c_client *client_demod = NULL, *client_tuner = NULL;
const struct m88ds3103_config *p_m88ds3103_config = NULL;
int (*p_set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage) = NULL;
int mfe_shared = 0; /* bus not shared by default */
@@ -1710,6 +1830,17 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(tda10071_attach,
+ &hauppauge_tda10071_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(a8293_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_a8293_config);
+ }
+ break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_S950:
i2c_bus = &dev->i2c_bus[0];
@@ -1790,15 +1921,11 @@ static int dvb_register(struct cx23885_tsport *port)
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL) {
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
+ client_tuner->dev.driver == NULL)
goto frontend_detach;
- }
+
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
@@ -1821,8 +1948,7 @@ static int dvb_register(struct cx23885_tsport *port)
info.platform_data = &si2168_config;
request_module(info.type);
client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (client_demod == NULL ||
- client_demod->dev.driver == NULL)
+ if (client_demod == NULL || client_demod->dev.driver == NULL)
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -1840,15 +1966,10 @@ static int dvb_register(struct cx23885_tsport *port)
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL) {
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
+ client_tuner->dev.driver == NULL)
goto frontend_detach;
- }
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
@@ -1874,8 +1995,7 @@ static int dvb_register(struct cx23885_tsport *port)
info.platform_data = &m88ts2022_config;
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL)
+ if (client_tuner == NULL || client_tuner->dev.driver == NULL)
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
@@ -1921,8 +2041,7 @@ static int dvb_register(struct cx23885_tsport *port)
info.platform_data = &m88ts2022_config;
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL)
+ if (client_tuner == NULL || client_tuner->dev.driver == NULL)
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
@@ -1967,8 +2086,7 @@ static int dvb_register(struct cx23885_tsport *port)
info.platform_data = &si2168_config;
request_module(info.type);
client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (client_demod == NULL ||
- client_demod->dev.driver == NULL)
+ if (client_demod == NULL || client_demod->dev.driver == NULL)
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -1986,20 +2104,101 @@ static int dvb_register(struct cx23885_tsport *port)
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL) {
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
+ client_tuner->dev.driver == NULL)
goto frontend_detach;
- }
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- port->i2c_client_demod = NULL;
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ switch (port->nr) {
+ struct m88rs6000t_config m88rs6000t_config;
+
+ /* port b - satellite */
+ case 1:
+ /* attach frontend */
+ fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
+ &hauppauge_hvr5525_m88ds3103_config,
+ &dev->i2c_bus[0].i2c_adap, &adapter);
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ /* attach SEC */
+ if (!dvb_attach(a8293_attach, fe0->dvb.frontend,
+ &dev->i2c_bus[0].i2c_adap,
+ &hauppauge_a8293_config))
+ goto frontend_detach;
+
+ /* attach tuner */
+ memset(&m88rs6000t_config, 0, sizeof(m88rs6000t_config));
+ m88rs6000t_config.fe = fe0->dvb.frontend;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "m88rs6000t", I2C_NAME_SIZE);
+ info.addr = 0x21;
+ info.platform_data = &m88rs6000t_config;
+ request_module("%s", info.type);
+ client_tuner = i2c_new_device(adapter, &info);
+ if (!client_tuner || !client_tuner->dev.driver)
+ goto frontend_detach;
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+
+ /* delegate signal strength measurement to tuner */
+ fe0->dvb.frontend->ops.read_signal_strength =
+ fe0->dvb.frontend->ops.tuner_ops.get_rf_strength;
+ break;
+ /* port c - terrestrial/cable */
+ case 2:
+ /* attach frontend */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &fe0->dvb.frontend;
+ si2168_config.ts_mode = SI2168_TS_SERIAL;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+ request_module("%s", info.type);
+ client_demod = i2c_new_device(&dev->i2c_bus[0].i2c_adap, &info);
+ if (!client_demod || !client_demod->dev.driver)
+ goto frontend_detach;
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_demod = client_demod;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = fe0->dvb.frontend;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module("%s", info.type);
+ client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!client_tuner || !client_tuner->dev.driver) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+ break;
+ }
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
@@ -2036,123 +2235,29 @@ static int dvb_register(struct cx23885_tsport *port)
if (ret)
goto frontend_detach;
- /* init CI & MAC */
- switch (dev->board) {
- case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: {
- static struct netup_card_info cinfo;
-
- netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
- memcpy(port->frontends.adapter.proposed_mac,
- cinfo.port[port->nr - 1].mac, 6);
- printk(KERN_INFO "NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
- port->nr, port->frontends.adapter.proposed_mac);
-
- netup_ci_init(port);
- break;
- }
- case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: {
- struct altera_ci_config netup_ci_cfg = {
- .dev = dev,/* magic number to identify*/
- .adapter = &port->frontends.adapter,/* for CI */
- .demux = &fe0->dvb.demux,/* for hw pid filter */
- .fpga_rw = netup_altera_fpga_rw,
- };
-
- altera_ci_init(&netup_ci_cfg, port->nr);
- break;
- }
- case CX23885_BOARD_TEVII_S470: {
- u8 eeprom[256]; /* 24C02 i2c eeprom */
-
- if (port->nr != 1)
- break;
-
- /* Read entire EEPROM */
- dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
- tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
- printk(KERN_INFO "TeVii S470 MAC= %pM\n", eeprom + 0xa0);
- memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
- break;
- }
- case CX23885_BOARD_DVBSKY_T9580:
- case CX23885_BOARD_DVBSKY_S950:
- case CX23885_BOARD_DVBSKY_S952:
- case CX23885_BOARD_DVBSKY_T982: {
- u8 eeprom[256]; /* 24C02 i2c eeprom */
-
- if (port->nr > 2)
- break;
-
- /* Read entire EEPROM */
- dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
- tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
- sizeof(eeprom));
- printk(KERN_INFO "%s port %d MAC address: %pM\n",
- cx23885_boards[dev->board].name, port->nr,
- eeprom + 0xc0 + (port->nr-1) * 8);
- memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
- (port->nr-1) * 8, 6);
- break;
- }
- case CX23885_BOARD_DVBSKY_S950C:
- case CX23885_BOARD_DVBSKY_T980C:
- case CX23885_BOARD_TT_CT2_4500_CI: {
- u8 eeprom[256]; /* 24C02 i2c eeprom */
-
- /* attach CI */
- memset(&sp2_config, 0, sizeof(sp2_config));
- sp2_config.dvb_adap = &port->frontends.adapter;
- sp2_config.priv = port;
- sp2_config.ci_control = cx23885_sp2_ci_ctrl;
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "sp2", I2C_NAME_SIZE);
- info.addr = 0x40;
- info.platform_data = &sp2_config;
- request_module(info.type);
- client_ci = i2c_new_device(&i2c_bus2->i2c_adap, &info);
- if (client_ci == NULL ||
- client_ci->dev.driver == NULL) {
- if (client_tuner) {
- module_put(client_tuner->dev.driver->owner);
- i2c_unregister_device(client_tuner);
- }
- if (client_demod) {
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- }
- goto frontend_detach;
- }
- if (!try_module_get(client_ci->dev.driver->owner)) {
- i2c_unregister_device(client_ci);
- if (client_tuner) {
- module_put(client_tuner->dev.driver->owner);
- i2c_unregister_device(client_tuner);
- }
- if (client_demod) {
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- }
- goto frontend_detach;
- }
- port->i2c_client_ci = client_ci;
+ ret = dvb_register_ci_mac(port);
+ if (ret)
+ goto frontend_detach;
- if (port->nr != 1)
- break;
+ return 0;
- /* Read entire EEPROM */
- dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
- tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
- sizeof(eeprom));
- printk(KERN_INFO "%s MAC address: %pM\n",
- cx23885_boards[dev->board].name, eeprom + 0xc0);
- memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0, 6);
- break;
- }
+frontend_detach:
+ /* remove I2C client for tuner */
+ client_tuner = port->i2c_client_tuner;
+ if (client_tuner) {
+ module_put(client_tuner->dev.driver->owner);
+ i2c_unregister_device(client_tuner);
+ port->i2c_client_tuner = NULL;
}
- return ret;
+ /* remove I2C client for demodulator */
+ client_demod = port->i2c_client_demod;
+ if (client_demod) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ }
-frontend_detach:
port->gate_ctrl = NULL;
vb2_dvb_dealloc_frontends(&port->frontends);
return -EINVAL;
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index fd71306af6e2..1135ea3f6ce5 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -300,8 +300,8 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
rc = i2c_master_recv(c, &buf, 0);
if (rc < 0)
continue;
- printk(KERN_INFO "%s: i2c scan: found device @ 0x%x [%s]\n",
- name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
+ printk(KERN_INFO "%s: i2c scan: found device @ 0x%04x [%s]\n",
+ name, i, i2c_devs[i] ? i2c_devs[i] : "???");
}
}
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index f55cd12da0fd..aeda8d3990ae 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -99,6 +99,8 @@
#define CX23885_BOARD_DVBSKY_S950 49
#define CX23885_BOARD_DVBSKY_S952 50
#define CX23885_BOARD_DVBSKY_T982 51
+#define CX23885_BOARD_HAUPPAUGE_HVR5525 52
+#define CX23885_BOARD_HAUPPAUGE_STARBURST 53
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx25821/Kconfig b/drivers/media/pci/cx25821/Kconfig
index 6439a847680c..1755d3d2feaa 100644
--- a/drivers/media/pci/cx25821/Kconfig
+++ b/drivers/media/pci/cx25821/Kconfig
@@ -2,8 +2,7 @@ config VIDEO_CX25821
tristate "Conexant cx25821 support"
depends on VIDEO_DEV && PCI && I2C
select I2C_ALGOBIT
- select VIDEO_BTCX
- select VIDEOBUF_DMA_SG
+ select VIDEOBUF2_DMA_SG
---help---
This is a video4linux driver for Conexant 25821 based
TV cards.
diff --git a/drivers/media/pci/cx25821/Makefile b/drivers/media/pci/cx25821/Makefile
index fb76c3d3713a..c8f8598a2b86 100644
--- a/drivers/media/pci/cx25821/Makefile
+++ b/drivers/media/pci/cx25821/Makefile
@@ -1,9 +1,8 @@
cx25821-y := cx25821-core.o cx25821-cards.o cx25821-i2c.o \
cx25821-gpio.o cx25821-medusa-video.o \
- cx25821-video.o cx25821-video-upstream.o
+ cx25821-video.o
obj-$(CONFIG_VIDEO_CX25821) += cx25821.o
obj-$(CONFIG_VIDEO_CX25821_ALSA) += cx25821-alsa.o
ccflags-y += -Idrivers/media/i2c
-ccflags-y += -Idrivers/media/common
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 2dd5bcaa7e53..24f964bcc53a 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -63,8 +63,11 @@ static int devno;
struct cx25821_audio_buffer {
unsigned int bpl;
- struct btcx_riscmem risc;
- struct videobuf_dmabuf dma;
+ struct cx25821_riscmem risc;
+ void *vaddr;
+ struct scatterlist *sglist;
+ int sglen;
+ int nr_pages;
};
struct cx25821_audio_dev {
@@ -87,8 +90,6 @@ struct cx25821_audio_dev {
unsigned int period_size;
unsigned int num_periods;
- struct videobuf_dmabuf *dma_risc;
-
struct cx25821_audio_buffer *buf;
struct snd_pcm_substream *substream;
@@ -142,6 +143,83 @@ MODULE_PARM_DESC(debug, "enable debug messages");
#define PCI_MSK_AUD_EXT (1 << 4)
#define PCI_MSK_AUD_INT (1 << 3)
+
+static int cx25821_alsa_dma_init(struct cx25821_audio_dev *chip, int nr_pages)
+{
+ struct cx25821_audio_buffer *buf = chip->buf;
+ struct page *pg;
+ int i;
+
+ buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
+ if (NULL == buf->vaddr) {
+ dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
+ return -ENOMEM;
+ }
+
+ dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
+ (unsigned long)buf->vaddr,
+ nr_pages << PAGE_SHIFT);
+
+ memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
+ buf->nr_pages = nr_pages;
+
+ buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
+ if (NULL == buf->sglist)
+ goto vzalloc_err;
+
+ sg_init_table(buf->sglist, buf->nr_pages);
+ for (i = 0; i < buf->nr_pages; i++) {
+ pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
+ if (NULL == pg)
+ goto vmalloc_to_page_err;
+ sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
+ }
+ return 0;
+
+vmalloc_to_page_err:
+ vfree(buf->sglist);
+ buf->sglist = NULL;
+vzalloc_err:
+ vfree(buf->vaddr);
+ buf->vaddr = NULL;
+ return -ENOMEM;
+}
+
+static int cx25821_alsa_dma_map(struct cx25821_audio_dev *dev)
+{
+ struct cx25821_audio_buffer *buf = dev->buf;
+
+ buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
+ buf->nr_pages, PCI_DMA_FROMDEVICE);
+
+ if (0 == buf->sglen) {
+ pr_warn("%s: cx25821_alsa_map_sg failed\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int cx25821_alsa_dma_unmap(struct cx25821_audio_dev *dev)
+{
+ struct cx25821_audio_buffer *buf = dev->buf;
+
+ if (!buf->sglen)
+ return 0;
+
+ dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
+ buf->sglen = 0;
+ return 0;
+}
+
+static int cx25821_alsa_dma_free(struct cx25821_audio_buffer *buf)
+{
+ vfree(buf->sglist);
+ buf->sglist = NULL;
+ vfree(buf->vaddr);
+ buf->vaddr = NULL;
+ return 0;
+}
+
/*
* BOARD Specific: Sets audio DMA
*/
@@ -330,15 +408,17 @@ out:
static int dsp_buffer_free(struct cx25821_audio_dev *chip)
{
+ struct cx25821_riscmem *risc = &chip->buf->risc;
+
BUG_ON(!chip->dma_size);
dprintk(2, "Freeing buffer\n");
- videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc);
- videobuf_dma_free(chip->dma_risc);
- btcx_riscmem_free(chip->pci, &chip->buf->risc);
+ cx25821_alsa_dma_unmap(chip);
+ cx25821_alsa_dma_free(chip->buf);
+ pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
kfree(chip->buf);
- chip->dma_risc = NULL;
+ chip->buf = NULL;
chip->dma_size = 0;
return 0;
@@ -430,8 +510,6 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
- struct videobuf_dmabuf *dma;
-
struct cx25821_audio_buffer *buf;
int ret;
@@ -455,19 +533,18 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
chip->period_size = AUDIO_LINE_SIZE;
buf->bpl = chip->period_size;
+ chip->buf = buf;
- dma = &buf->dma;
- videobuf_dma_init(dma);
- ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE,
+ ret = cx25821_alsa_dma_init(chip,
(PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
if (ret < 0)
goto error;
- ret = videobuf_dma_map(&chip->pci->dev, dma);
+ ret = cx25821_alsa_dma_map(chip);
if (ret < 0)
goto error;
- ret = cx25821_risc_databuffer_audio(chip->pci, &buf->risc, dma->sglist,
+ ret = cx25821_risc_databuffer_audio(chip->pci, &buf->risc, buf->sglist,
chip->period_size, chip->num_periods, 1);
if (ret < 0) {
pr_info("DEBUG: ERROR after cx25821_risc_databuffer_audio()\n");
@@ -479,16 +556,14 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
- chip->buf = buf;
- chip->dma_risc = dma;
-
- substream->runtime->dma_area = chip->dma_risc->vaddr;
+ substream->runtime->dma_area = chip->buf->vaddr;
substream->runtime->dma_bytes = chip->dma_size;
substream->runtime->dma_addr = 0;
return 0;
error:
+ chip->buf = NULL;
kfree(buf);
return ret;
}
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 389fffd2f36f..559f8293c53a 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -874,10 +874,9 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
if (dev->pci->device != 0x8210) {
pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
__func__, dev->pci->device);
- return -1;
- } else {
- pr_info("Athena Hardware device = 0x%02x\n", dev->pci->device);
+ return -ENODEV;
}
+ pr_info("Athena Hardware device = 0x%02x\n", dev->pci->device);
/* Apply a sensible clock frequency for the PCIe bridge */
dev->clk_freq = 28000000;
@@ -966,11 +965,15 @@ void cx25821_dev_unregister(struct cx25821_dev *dev)
release_mem_region(dev->base_io_addr, pci_resource_len(dev->pci, 0));
- for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; i++) {
+ for (i = 0; i < MAX_VID_CAP_CHANNEL_NUM - 1; i++) {
if (i == SRAM_CH08) /* audio channel */
continue;
+ /*
+ * TODO: enable when video output is properly
+ * supported.
if (i == SRAM_CH09 || i == SRAM_CH10)
cx25821_free_mem_upstream(&dev->channels[i]);
+ */
cx25821_video_unregister(dev, i);
}
@@ -979,14 +982,41 @@ void cx25821_dev_unregister(struct cx25821_dev *dev)
}
EXPORT_SYMBOL(cx25821_dev_unregister);
+int cx25821_riscmem_alloc(struct pci_dev *pci,
+ struct cx25821_riscmem *risc,
+ unsigned int size)
+{
+ __le32 *cpu;
+ dma_addr_t dma = 0;
+
+ if (NULL != risc->cpu && risc->size < size)
+ pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
+ if (NULL == risc->cpu) {
+ cpu = pci_zalloc_consistent(pci, size, &dma);
+ if (NULL == cpu)
+ return -ENOMEM;
+ risc->cpu = cpu;
+ risc->dma = dma;
+ risc->size = size;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cx25821_riscmem_alloc);
+
static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
- unsigned int lines)
+ unsigned int lines, bool jump)
{
struct scatterlist *sg;
unsigned int line, todo;
+ if (jump) {
+ *(rp++) = cpu_to_le32(RISC_JUMP);
+ *(rp++) = cpu_to_le32(0);
+ *(rp++) = cpu_to_le32(0); /* bits 63-32 */
+ }
+
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
@@ -1035,7 +1065,7 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
return rp;
}
-int cx25821_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+int cx25821_risc_buffer(struct pci_dev *pci, struct cx25821_riscmem *risc,
struct scatterlist *sglist, unsigned int top_offset,
unsigned int bottom_offset, unsigned int bpl,
unsigned int padding, unsigned int lines)
@@ -1052,14 +1082,14 @@ int cx25821_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
fields++;
/* estimate risc mem: worst case is one write per page border +
- one write per scan line + syncs + jump (all 2 dwords). Padding
+ one write per scan line + syncs + jump (all 3 dwords). Padding
can cause next bpl to start close to a page border. First DMA
region may be smaller than PAGE_SIZE */
/* write and jump need and extra dword */
instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE +
lines);
- instructions += 2;
- rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
+ instructions += 5;
+ rc = cx25821_riscmem_alloc(pci, risc, instructions * 12);
if (rc < 0)
return rc;
@@ -1069,17 +1099,17 @@ int cx25821_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
if (UNSET != top_offset) {
rp = cx25821_risc_field(rp, sglist, top_offset, 0, bpl, padding,
- lines);
+ lines, true);
}
if (UNSET != bottom_offset) {
rp = cx25821_risc_field(rp, sglist, bottom_offset, 0x200, bpl,
- padding, lines);
+ padding, lines, UNSET == top_offset);
}
/* save pointer to jmp instruction address */
risc->jmp = rp;
- BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
+ BUG_ON((risc->jmp - risc->cpu + 3) * sizeof(*risc->cpu) > risc->size);
return 0;
}
@@ -1146,7 +1176,7 @@ static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
}
int cx25821_risc_databuffer_audio(struct pci_dev *pci,
- struct btcx_riscmem *risc,
+ struct cx25821_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
unsigned int lines, unsigned int lpi)
@@ -1163,7 +1193,7 @@ int cx25821_risc_databuffer_audio(struct pci_dev *pci,
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 1;
- rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
+ rc = cx25821_riscmem_alloc(pci, risc, instructions * 12);
if (rc < 0)
return rc;
@@ -1179,40 +1209,14 @@ int cx25821_risc_databuffer_audio(struct pci_dev *pci,
}
EXPORT_SYMBOL(cx25821_risc_databuffer_audio);
-int cx25821_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
- u32 reg, u32 mask, u32 value)
-{
- __le32 *rp;
- int rc;
-
- rc = btcx_riscmem_alloc(pci, risc, 4 * 16);
-
- if (rc < 0)
- return rc;
-
- /* write risc instructions */
- rp = risc->cpu;
-
- *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ1);
- *(rp++) = cpu_to_le32(reg);
- *(rp++) = cpu_to_le32(value);
- *(rp++) = cpu_to_le32(mask);
- *(rp++) = cpu_to_le32(RISC_JUMP);
- *(rp++) = cpu_to_le32(risc->dma);
- *(rp++) = cpu_to_le32(0); /* bits 63-32 */
- return 0;
-}
-
-void cx25821_free_buffer(struct videobuf_queue *q, struct cx25821_buffer *buf)
+void cx25821_free_buffer(struct cx25821_dev *dev, struct cx25821_buffer *buf)
{
- struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
-
BUG_ON(in_interrupt());
- videobuf_waiton(q, &buf->vb, 0, 0);
- videobuf_dma_unmap(q->dev, dma);
- videobuf_dma_free(dma);
- btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ if (WARN_ON(buf->risc.size == 0))
+ return;
+ pci_free_consistent(dev->pci,
+ buf->risc.size, buf->risc.cpu, buf->risc.dma);
+ memset(&buf->risc, 0, sizeof(buf->risc));
}
static irqreturn_t cx25821_irq(int irq, void *dev_id)
@@ -1297,14 +1301,15 @@ static int cx25821_initdev(struct pci_dev *pci_dev,
goto fail_unregister_device;
}
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail_unregister_pci;
+ }
err = cx25821_dev_setup(dev);
- if (err) {
- if (err == -EBUSY)
- goto fail_unregister_device;
- else
- goto fail_unregister_pci;
- }
+ if (err)
+ goto fail_free_ctx;
/* print pci info */
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
@@ -1334,6 +1339,8 @@ fail_irq:
pr_info("cx25821_initdev() can't get IRQ !\n");
cx25821_dev_unregister(dev);
+fail_free_ctx:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
fail_unregister_pci:
pci_disable_device(pci_dev);
fail_unregister_device:
@@ -1357,6 +1364,7 @@ static void cx25821_finidev(struct pci_dev *pci_dev)
free_irq(pci_dev->irq, dev);
cx25821_dev_unregister(dev);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
}
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 3a419f134584..7bc495e4ece2 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <shu.lin@conexant.com>, <hiep.huynh@conexant.com>
- * Based on Steven Toth <stoth@linuxtv.org> cx23885 driver
+ * Based on Steven Toth <stoth@linuxtv.org> cx25821 driver
* Parts adapted/taken from Eduardo Moscoso Rubino
* Copyright (C) 2009 Eduardo Moscoso Rubino <moscoso@TopoLogica.com>
*
@@ -46,10 +46,6 @@ static unsigned int irq_debug;
module_param(irq_debug, int, 0644);
MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]");
-static unsigned int vid_limit = 16;
-module_param(vid_limit, int, 0644);
-MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
-
#define FORMAT_FLAGS_PACKED 0x01
static const struct cx25821_fmt formats[] = {
@@ -76,41 +72,6 @@ static const struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
return NULL;
}
-void cx25821_video_wakeup(struct cx25821_dev *dev, struct cx25821_dmaqueue *q,
- u32 count)
-{
- struct cx25821_buffer *buf;
- int bc;
-
- for (bc = 0;; bc++) {
- if (list_empty(&q->active)) {
- dprintk(1, "bc=%d (=0: active empty)\n", bc);
- break;
- }
-
- buf = list_entry(q->active.next, struct cx25821_buffer,
- vb.queue);
-
- /* count comes from the hw and it is 16bit wide --
- * this trick handles wrap-arounds correctly for
- * up to 32767 buffers in flight... */
- if ((s16) (count - buf->count) < 0)
- break;
-
- v4l2_get_timestamp(&buf->vb.ts);
- buf->vb.state = VIDEOBUF_DONE;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
- }
-
- if (list_empty(&q->active))
- del_timer(&q->timeout);
- else
- mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
- if (bc != 1)
- pr_err("%s: %d buffers handled (should be 1)\n", __func__, bc);
-}
-
int cx25821_start_video_dma(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q,
struct cx25821_buffer *buf,
@@ -123,7 +84,6 @@ int cx25821_start_video_dma(struct cx25821_dev *dev,
/* reset counter */
cx_write(channel->gpcnt_ctl, 3);
- q->count = 1;
/* enable irq */
cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << channel->i));
@@ -139,86 +99,8 @@ int cx25821_start_video_dma(struct cx25821_dev *dev,
return 0;
}
-static int cx25821_restart_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q,
- const struct sram_channel *channel)
-{
- struct cx25821_buffer *buf, *prev;
- struct list_head *item;
-
- if (!list_empty(&q->active)) {
- buf = list_entry(q->active.next, struct cx25821_buffer,
- vb.queue);
-
- cx25821_start_video_dma(dev, q, buf, channel);
-
- list_for_each(item, &q->active) {
- buf = list_entry(item, struct cx25821_buffer, vb.queue);
- buf->count = q->count++;
- }
-
- mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
- return 0;
- }
-
- prev = NULL;
- for (;;) {
- if (list_empty(&q->queued))
- return 0;
-
- buf = list_entry(q->queued.next, struct cx25821_buffer,
- vb.queue);
-
- if (NULL == prev) {
- list_move_tail(&buf->vb.queue, &q->active);
- cx25821_start_video_dma(dev, q, buf, channel);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
- } else if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_move_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- prev->risc.jmp[2] = cpu_to_le32(0); /* Bits 63 - 32 */
- } else {
- return 0;
- }
- prev = buf;
- }
-}
-
-static void cx25821_vid_timeout(unsigned long data)
-{
- struct cx25821_data *timeout_data = (struct cx25821_data *)data;
- struct cx25821_dev *dev = timeout_data->dev;
- const struct sram_channel *channel = timeout_data->channel;
- struct cx25821_dmaqueue *q = &dev->channels[channel->i].dma_vidq;
- struct cx25821_buffer *buf;
- unsigned long flags;
-
- /* cx25821_sram_channel_dump(dev, channel); */
- cx_clear(channel->dma_ctl, 0x11);
-
- spin_lock_irqsave(&dev->slock, flags);
- while (!list_empty(&q->active)) {
- buf = list_entry(q->active.next, struct cx25821_buffer,
- vb.queue);
- list_del(&buf->vb.queue);
-
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
- }
-
- cx25821_restart_video_queue(dev, q, channel);
- spin_unlock_irqrestore(&dev->slock, flags);
-}
-
int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
{
- u32 count = 0;
int handled = 0;
u32 mask;
const struct sram_channel *channel = dev->channels[chan_num].sram_channels;
@@ -239,317 +121,201 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
/* risc1 y */
if (status & FLD_VID_DST_RISC1) {
- spin_lock(&dev->slock);
- count = cx_read(channel->gpcnt);
- cx25821_video_wakeup(dev, &dev->channels[channel->i].dma_vidq,
- count);
- spin_unlock(&dev->slock);
- handled++;
- }
+ struct cx25821_dmaqueue *dmaq =
+ &dev->channels[channel->i].dma_vidq;
+ struct cx25821_buffer *buf;
- /* risc2 y */
- if (status & 0x10) {
- dprintk(2, "stopper video\n");
spin_lock(&dev->slock);
- cx25821_restart_video_queue(dev,
- &dev->channels[channel->i].dma_vidq, channel);
+ if (!list_empty(&dmaq->active)) {
+ buf = list_entry(dmaq->active.next,
+ struct cx25821_buffer, queue);
+
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.v4l2_buf.sequence = dmaq->count++;
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ }
spin_unlock(&dev->slock);
handled++;
}
return handled;
}
-static int cx25821_buffer_setup(struct videobuf_queue *q, unsigned int *count,
- unsigned int *size)
+static int cx25821_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct cx25821_channel *chan = q->priv_data;
-
- *size = chan->fmt->depth * chan->width * chan->height >> 3;
+ struct cx25821_channel *chan = q->drv_priv;
+ unsigned size = (chan->fmt->depth * chan->width * chan->height) >> 3;
- if (0 == *count)
- *count = 32;
-
- if (*size * *count > vid_limit * 1024 * 1024)
- *count = (vid_limit * 1024 * 1024) / *size;
+ if (fmt && fmt->fmt.pix.sizeimage < size)
+ return -EINVAL;
+ *num_planes = 1;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : size;
+ alloc_ctxs[0] = chan->dev->alloc_ctx;
return 0;
}
-static int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int cx25821_buffer_prepare(struct vb2_buffer *vb)
{
- struct cx25821_channel *chan = q->priv_data;
+ struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
struct cx25821_dev *dev = chan->dev;
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
- int rc, init_buffer = 0;
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
u32 line0_offset;
- struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int bpl_local = LINE_SIZE_D1;
+ int ret;
- BUG_ON(NULL == chan->fmt);
- if (chan->width < 48 || chan->width > 720 ||
- chan->height < 32 || chan->height > 576)
- return -EINVAL;
-
- buf->vb.size = (chan->width * chan->height * chan->fmt->depth) >> 3;
+ if (chan->pixel_formats == PIXEL_FRMT_411)
+ buf->bpl = (chan->fmt->depth * chan->width) >> 3;
+ else
+ buf->bpl = (chan->fmt->depth >> 3) * chan->width;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < chan->height * buf->bpl)
return -EINVAL;
+ vb2_set_plane_payload(vb, 0, chan->height * buf->bpl);
+ buf->vb.v4l2_buf.field = chan->field;
- if (buf->fmt != chan->fmt ||
- buf->vb.width != chan->width ||
- buf->vb.height != chan->height || buf->vb.field != field) {
- buf->fmt = chan->fmt;
- buf->vb.width = chan->width;
- buf->vb.height = chan->height;
- buf->vb.field = field;
- init_buffer = 1;
- }
+ if (chan->pixel_formats == PIXEL_FRMT_411) {
+ bpl_local = buf->bpl;
+ } else {
+ bpl_local = buf->bpl; /* Default */
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- init_buffer = 1;
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (0 != rc) {
- printk(KERN_DEBUG pr_fmt("videobuf_iolock failed!\n"));
- goto fail;
+ if (chan->use_cif_resolution) {
+ if (dev->tvnorm & V4L2_STD_625_50)
+ bpl_local = 352 << 1;
+ else
+ bpl_local = chan->cif_width << 1;
}
}
- dprintk(1, "init_buffer=%d\n", init_buffer);
-
- if (init_buffer) {
- if (chan->pixel_formats == PIXEL_FRMT_411)
- buf->bpl = (buf->fmt->depth * buf->vb.width) >> 3;
- else
- buf->bpl = (buf->fmt->depth >> 3) * (buf->vb.width);
-
- if (chan->pixel_formats == PIXEL_FRMT_411) {
- bpl_local = buf->bpl;
- } else {
- bpl_local = buf->bpl; /* Default */
-
- if (chan->use_cif_resolution) {
- if (dev->tvnorm & V4L2_STD_625_50)
- bpl_local = 352 << 1;
- else
- bpl_local = chan->cif_width << 1;
- }
- }
-
- switch (buf->vb.field) {
- case V4L2_FIELD_TOP:
- cx25821_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, 0, UNSET,
- buf->bpl, 0, buf->vb.height);
- break;
- case V4L2_FIELD_BOTTOM:
- cx25821_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, UNSET, 0,
- buf->bpl, 0, buf->vb.height);
- break;
- case V4L2_FIELD_INTERLACED:
- /* All other formats are top field first */
- line0_offset = 0;
- dprintk(1, "top field first\n");
-
- cx25821_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, line0_offset,
- bpl_local, bpl_local, bpl_local,
- buf->vb.height >> 1);
- break;
- case V4L2_FIELD_SEQ_TB:
- cx25821_risc_buffer(dev->pci, &buf->risc,
- dma->sglist,
- 0, buf->bpl * (buf->vb.height >> 1),
- buf->bpl, 0, buf->vb.height >> 1);
- break;
- case V4L2_FIELD_SEQ_BT:
- cx25821_risc_buffer(dev->pci, &buf->risc,
- dma->sglist,
- buf->bpl * (buf->vb.height >> 1), 0,
- buf->bpl, 0, buf->vb.height >> 1);
- break;
- default:
- BUG();
- }
+ switch (chan->field) {
+ case V4L2_FIELD_TOP:
+ ret = cx25821_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, UNSET,
+ buf->bpl, 0, chan->height);
+ break;
+ case V4L2_FIELD_BOTTOM:
+ ret = cx25821_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, UNSET, 0,
+ buf->bpl, 0, chan->height);
+ break;
+ case V4L2_FIELD_INTERLACED:
+ /* All other formats are top field first */
+ line0_offset = 0;
+ dprintk(1, "top field first\n");
+
+ ret = cx25821_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, line0_offset,
+ bpl_local, bpl_local, bpl_local,
+ chan->height >> 1);
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ ret = cx25821_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ 0, buf->bpl * (chan->height >> 1),
+ buf->bpl, 0, chan->height >> 1);
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ ret = cx25821_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ buf->bpl * (chan->height >> 1), 0,
+ buf->bpl, 0, chan->height >> 1);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ break;
}
dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
- buf, buf->vb.i, chan->width, chan->height, chan->fmt->depth,
- chan->fmt->name, (unsigned long)buf->risc.dma);
-
- buf->vb.state = VIDEOBUF_PREPARED;
-
- return 0;
+ buf, buf->vb.v4l2_buf.index, chan->width, chan->height,
+ chan->fmt->depth, chan->fmt->name,
+ (unsigned long)buf->risc.dma);
-fail:
- cx25821_free_buffer(q, buf);
- return rc;
+ return ret;
}
-static void cx25821_buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void cx25821_buffer_finish(struct vb2_buffer *vb)
{
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
+ struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
+ struct cx25821_dev *dev = chan->dev;
- cx25821_free_buffer(q, buf);
-}
-
-static int cx25821_video_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cx25821_channel *chan = video_drvdata(file);
-
- return videobuf_mmap_mapper(&chan->vidq, vma);
+ cx25821_free_buffer(dev, buf);
}
-
-static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void cx25821_buffer_queue(struct vb2_buffer *vb)
{
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
- struct cx25821_buffer *prev;
- struct cx25821_channel *chan = vq->priv_data;
+ struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
struct cx25821_dev *dev = chan->dev;
+ struct cx25821_buffer *prev;
struct cx25821_dmaqueue *q = &dev->channels[chan->id].dma_vidq;
- /* add jump to stopper */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
- buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
-
- dprintk(2, "jmp to stopper (0x%x)\n", buf->risc.jmp[1]);
-
- if (!list_empty(&q->queued)) {
- list_add_tail(&buf->vb.queue, &q->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buf,
- buf->vb.i);
-
- } else if (list_empty(&q->active)) {
- list_add_tail(&buf->vb.queue, &q->active);
- cx25821_start_video_dma(dev, q, buf, chan->sram_channels);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
- dprintk(2, "[%p/%d] buffer_queue - first active, buf cnt = %d, q->count = %d\n",
- buf, buf->vb.i, buf->count, q->count);
+ buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
+ buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
+
+ if (list_empty(&q->active)) {
+ list_add_tail(&buf->queue, &q->active);
} else {
+ buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
prev = list_entry(q->active.prev, struct cx25821_buffer,
- vb.queue);
- if (prev->vb.width == buf->vb.width
- && prev->vb.height == buf->vb.height
- && prev->fmt == buf->fmt) {
- list_add_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
-
- /* 64 bit bits 63-32 */
- prev->risc.jmp[2] = cpu_to_le32(0);
- dprintk(2, "[%p/%d] buffer_queue - append to active, buf->count=%d\n",
- buf, buf->vb.i, buf->count);
-
- } else {
- list_add_tail(&buf->vb.queue, &q->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2, "[%p/%d] buffer_queue - first queued\n", buf,
- buf->vb.i);
- }
+ queue);
+ list_add_tail(&buf->queue, &q->active);
+ prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
}
-
- if (list_empty(&q->active))
- dprintk(2, "active queue empty!\n");
}
-static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = cx25821_buffer_setup,
- .buf_prepare = cx25821_buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = cx25821_buffer_release,
-};
-
-static ssize_t video_read(struct file *file, char __user * data, size_t count,
- loff_t *ppos)
+static int cx25821_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct v4l2_fh *fh = file->private_data;
- struct cx25821_channel *chan = video_drvdata(file);
+ struct cx25821_channel *chan = q->drv_priv;
struct cx25821_dev *dev = chan->dev;
- int err = 0;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- if (chan->streaming_fh && chan->streaming_fh != fh) {
- err = -EBUSY;
- goto unlock;
- }
- chan->streaming_fh = fh;
+ struct cx25821_dmaqueue *dmaq = &dev->channels[chan->id].dma_vidq;
+ struct cx25821_buffer *buf = list_entry(dmaq->active.next,
+ struct cx25821_buffer, queue);
- err = videobuf_read_one(&chan->vidq, data, count, ppos,
- file->f_flags & O_NONBLOCK);
-unlock:
- mutex_unlock(&dev->lock);
- return err;
-}
-
-static unsigned int video_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct cx25821_channel *chan = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
-
- if (req_events & (POLLIN | POLLRDNORM))
- res |= videobuf_poll_stream(file, &chan->vidq, wait);
- return res;
-
- /* This doesn't belong in poll(). This can be done
- * much better with vb2. We keep this code here as a
- * reminder.
- if ((res & POLLIN) && buf->vb.state == VIDEOBUF_DONE) {
- struct cx25821_dev *dev = chan->dev;
-
- if (dev && chan->use_cif_resolution) {
- u8 cam_id = *((char *)buf->vb.baddr + 3);
- memcpy((char *)buf->vb.baddr,
- (char *)buf->vb.baddr + (chan->width * 2),
- (chan->width * 2));
- *((char *)buf->vb.baddr + 3) = cam_id;
- }
- }
- */
+ dmaq->count = 0;
+ cx25821_start_video_dma(dev, dmaq, buf, chan->sram_channels);
+ return 0;
}
-static int video_release(struct file *file)
+static void cx25821_stop_streaming(struct vb2_queue *q)
{
- struct cx25821_channel *chan = video_drvdata(file);
- struct v4l2_fh *fh = file->private_data;
+ struct cx25821_channel *chan = q->drv_priv;
struct cx25821_dev *dev = chan->dev;
- const struct sram_channel *sram_ch =
- dev->channels[0].sram_channels;
-
- mutex_lock(&dev->lock);
- /* stop the risc engine and fifo */
- cx_write(sram_ch->dma_ctl, 0); /* FIFO and RISC disable */
+ struct cx25821_dmaqueue *dmaq = &dev->channels[chan->id].dma_vidq;
+ unsigned long flags;
- /* stop video capture */
- if (chan->streaming_fh == fh) {
- videobuf_queue_cancel(&chan->vidq);
- chan->streaming_fh = NULL;
- }
+ cx_write(chan->sram_channels->dma_ctl, 0); /* FIFO and RISC disable */
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx25821_buffer *buf = list_entry(dmaq->active.next,
+ struct cx25821_buffer, queue);
- if (chan->vidq.read_buf) {
- cx25821_buffer_release(&chan->vidq, chan->vidq.read_buf);
- kfree(chan->vidq.read_buf);
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
-
- videobuf_mmap_free(&chan->vidq);
- mutex_unlock(&dev->lock);
-
- return v4l2_fh_release(file);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
+static struct vb2_ops cx25821_video_qops = {
+ .queue_setup = cx25821_queue_setup,
+ .buf_prepare = cx25821_buffer_prepare,
+ .buf_finish = cx25821_buffer_finish,
+ .buf_queue = cx25821_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = cx25821_start_streaming,
+ .stop_streaming = cx25821_stop_streaming,
+};
+
/* VIDEO IOCTLS */
static int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
@@ -571,7 +337,7 @@ static int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.width = chan->width;
f->fmt.pix.height = chan->height;
- f->fmt.pix.field = chan->vidq.field;
+ f->fmt.pix.field = chan->field;
f->fmt.pix.pixelformat = chan->fmt->fourcc;
f->fmt.pix.bytesperline = (chan->width * chan->fmt->depth) >> 3;
f->fmt.pix.sizeimage = chan->height * f->fmt.pix.bytesperline;
@@ -632,7 +398,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
return err;
chan->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
- chan->vidq.field = f->fmt.pix.field;
+ chan->field = f->fmt.pix.field;
chan->width = f->fmt.pix.width;
chan->height = f->fmt.pix.height;
@@ -654,47 +420,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx25821_channel *chan = video_drvdata(file);
-
- if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (chan->streaming_fh && chan->streaming_fh != priv)
- return -EBUSY;
- chan->streaming_fh = priv;
-
- return videobuf_streamon(&chan->vidq);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx25821_channel *chan = video_drvdata(file);
-
- if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (chan->streaming_fh && chan->streaming_fh != priv)
- return -EBUSY;
- if (chan->streaming_fh == NULL)
- return 0;
-
- chan->streaming_fh = NULL;
- return videobuf_streamoff(&chan->vidq);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- int ret_val = 0;
- struct cx25821_channel *chan = video_drvdata(file);
-
- ret_val = videobuf_dqbuf(&chan->vidq, p, file->f_flags & O_NONBLOCK);
- p->sequence = chan->dma_vidq.count;
-
- return ret_val;
-}
-
static int vidioc_log_status(struct file *file, void *priv)
{
struct cx25821_channel *chan = video_drvdata(file);
@@ -729,29 +454,6 @@ static int cx25821_vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int cx25821_vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct cx25821_channel *chan = video_drvdata(file);
-
- return videobuf_reqbufs(&chan->vidq, p);
-}
-
-static int cx25821_vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx25821_channel *chan = video_drvdata(file);
-
- return videobuf_querybuf(&chan->vidq, p);
-}
-
-static int cx25821_vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct cx25821_channel *chan = video_drvdata(file);
-
- return videobuf_qbuf(&chan->vidq, p);
-}
-
static int cx25821_vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorms)
{
struct cx25821_channel *chan = video_drvdata(file);
@@ -880,7 +582,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return err;
chan->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
- chan->vidq.field = f->fmt.pix.field;
+ chan->field = f->fmt.pix.field;
chan->width = f->fmt.pix.width;
chan->height = f->fmt.pix.height;
if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_Y41P)
@@ -890,52 +592,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return 0;
}
-static ssize_t video_write(struct file *file, const char __user *data, size_t count,
- loff_t *ppos)
-{
- struct cx25821_channel *chan = video_drvdata(file);
- struct cx25821_dev *dev = chan->dev;
- struct v4l2_fh *fh = file->private_data;
- int err = 0;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- if (chan->streaming_fh && chan->streaming_fh != fh) {
- err = -EBUSY;
- goto unlock;
- }
- if (!chan->streaming_fh) {
- err = cx25821_vidupstream_init(chan, chan->pixel_formats);
- if (err)
- goto unlock;
- chan->streaming_fh = fh;
- }
-
- err = cx25821_write_frame(chan, data, count);
- count -= err;
- *ppos += err;
-
-unlock:
- mutex_unlock(&dev->lock);
- return err;
-}
-
-static int video_out_release(struct file *file)
-{
- struct cx25821_channel *chan = video_drvdata(file);
- struct cx25821_dev *dev = chan->dev;
- struct v4l2_fh *fh = file->private_data;
-
- mutex_lock(&dev->lock);
- if (chan->streaming_fh == fh) {
- cx25821_stop_upstream_video(chan);
- chan->streaming_fh = NULL;
- }
- mutex_unlock(&dev->lock);
-
- return v4l2_fh_release(file);
-}
-
static const struct v4l2_ctrl_ops cx25821_ctrl_ops = {
.s_ctrl = cx25821_s_ctrl,
};
@@ -943,11 +599,11 @@ static const struct v4l2_ctrl_ops cx25821_ctrl_ops = {
static const struct v4l2_file_operations video_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
- .release = video_release,
- .read = video_read,
- .poll = video_poll,
- .mmap = cx25821_video_mmap,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
@@ -956,17 +612,19 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = cx25821_vidioc_reqbufs,
- .vidioc_querybuf = cx25821_vidioc_querybuf,
- .vidioc_qbuf = cx25821_vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_std = cx25821_vidioc_g_std,
.vidioc_s_std = cx25821_vidioc_s_std,
.vidioc_enum_input = cx25821_vidioc_enum_input,
.vidioc_g_input = cx25821_vidioc_g_input,
.vidioc_s_input = cx25821_vidioc_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -984,9 +642,11 @@ static const struct video_device cx25821_video_device = {
static const struct v4l2_file_operations video_out_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
- .write = video_write,
- .release = video_out_release,
+ .release = vb2_fop_release,
+ .write = vb2_fop_write,
+ .poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops video_out_ioctl_ops = {
@@ -1019,9 +679,6 @@ void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num)
if (video_is_registered(&dev->channels[chan_num].vdev)) {
video_unregister_device(&dev->channels[chan_num].vdev);
v4l2_ctrl_handler_free(&dev->channels[chan_num].hdl);
-
- btcx_riscmem_free(dev->pci,
- &dev->channels[chan_num].dma_vidq.stopper);
}
}
@@ -1035,10 +692,11 @@ int cx25821_video_register(struct cx25821_dev *dev)
spin_lock_init(&dev->slock);
- for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) {
+ for (i = 0; i < MAX_VID_CAP_CHANNEL_NUM - 1; ++i) {
struct cx25821_channel *chan = &dev->channels[i];
struct video_device *vdev = &chan->vdev;
struct v4l2_ctrl_handler *hdl = &chan->hdl;
+ struct vb2_queue *q;
bool is_output = i > SRAM_CH08;
if (i == SRAM_CH08) /* audio channel */
@@ -1066,11 +724,9 @@ int cx25821_video_register(struct cx25821_dev *dev)
chan->out->chan = chan;
}
- cx25821_risc_stopper(dev->pci, &chan->dma_vidq.stopper,
- chan->sram_channels->dma_ctl, 0x11, 0);
-
chan->sram_channels = &cx25821_sram_channels[i];
chan->width = 720;
+ chan->field = V4L2_FIELD_INTERLACED;
if (dev->tvnorm & V4L2_STD_625_50)
chan->height = 576;
else
@@ -1084,19 +740,27 @@ int cx25821_video_register(struct cx25821_dev *dev)
cx_write(chan->sram_channels->int_stat, 0xffffffff);
INIT_LIST_HEAD(&chan->dma_vidq.active);
- INIT_LIST_HEAD(&chan->dma_vidq.queued);
- chan->timeout_data.dev = dev;
- chan->timeout_data.channel = &cx25821_sram_channels[i];
- chan->dma_vidq.timeout.function = cx25821_vid_timeout;
- chan->dma_vidq.timeout.data = (unsigned long)&chan->timeout_data;
- init_timer(&chan->dma_vidq.timeout);
+ q = &chan->vidq;
+
+ q->type = is_output ? V4L2_BUF_TYPE_VIDEO_OUTPUT :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->io_modes |= is_output ? VB2_WRITE : VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = chan;
+ q->buf_struct_size = sizeof(struct cx25821_buffer);
+ q->ops = &cx25821_video_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &dev->lock;
- if (!is_output)
- videobuf_queue_sg_init(&chan->vidq, &cx25821_video_qops, &dev->pci->dev,
- &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED, sizeof(struct cx25821_buffer),
- chan, &dev->lock);
+ if (!is_output) {
+ err = vb2_queue_init(q);
+ if (err < 0)
+ goto fail_unreg;
+ }
/* register v4l devices */
*vdev = is_output ? cx25821_video_out_device : cx25821_video_device;
@@ -1106,6 +770,7 @@ int cx25821_video_register(struct cx25821_dev *dev)
else
vdev->vfl_dir = VFL_DIR_TX;
vdev->lock = &dev->lock;
+ vdev->queue = q;
snprintf(vdev->name, sizeof(vdev->name), "%s #%d", dev->name, i);
video_set_drvdata(vdev, chan);
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index 90bdc196929f..d81a08a2df4f 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -34,9 +34,8 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/videobuf-dma-sg.h>
+#include <media/videobuf2-dma-sg.h>
-#include "btcx-risc.h"
#include "cx25821-reg.h"
#include "cx25821-medusa-reg.h"
#include "cx25821-sram.h"
@@ -89,6 +88,13 @@
#define CX25821_BOARD_CONEXANT_ATHENA10 1
#define MAX_VID_CHANNEL_NUM 12
+
+/*
+ * Maximum capture-only channels. This can go away once video/audio output
+ * is fully supported in this driver.
+ */
+#define MAX_VID_CAP_CHANNEL_NUM 10
+
#define VID_CHANNEL_NUM 8
struct cx25821_fmt {
@@ -111,16 +117,23 @@ enum cx25821_src_sel_type {
CX25821_SRC_SEL_PARALLEL_MPEG_VIDEO
};
+struct cx25821_riscmem {
+ unsigned int size;
+ __le32 *cpu;
+ __le32 *jmp;
+ dma_addr_t dma;
+};
+
/* buffer for one video frame */
struct cx25821_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_buffer vb;
+ struct list_head queue;
/* cx25821 specific */
unsigned int bpl;
- struct btcx_riscmem risc;
+ struct cx25821_riscmem risc;
const struct cx25821_fmt *fmt;
- u32 count;
};
enum port {
@@ -159,17 +172,9 @@ struct cx25821_i2c {
struct cx25821_dmaqueue {
struct list_head active;
- struct list_head queued;
- struct timer_list timeout;
- struct btcx_riscmem stopper;
u32 count;
};
-struct cx25821_data {
- struct cx25821_dev *dev;
- const struct sram_channel *channel;
-};
-
struct cx25821_dev;
struct cx25821_channel;
@@ -207,18 +212,17 @@ struct cx25821_video_out_data {
struct cx25821_channel {
unsigned id;
struct cx25821_dev *dev;
- struct v4l2_fh *streaming_fh;
struct v4l2_ctrl_handler hdl;
- struct cx25821_data timeout_data;
struct video_device vdev;
struct cx25821_dmaqueue dma_vidq;
- struct videobuf_queue vidq;
+ struct vb2_queue vidq;
const struct sram_channel *sram_channels;
const struct cx25821_fmt *fmt;
+ unsigned field;
unsigned int width, height;
int pixel_formats;
int use_cif_resolution;
@@ -244,6 +248,7 @@ struct cx25821_dev {
int hwrevision;
/* used by cx25821-alsa */
struct snd_card *card;
+ void *alloc_ctx;
u32 clk_freq;
@@ -405,21 +410,22 @@ extern int cx25821_sram_channel_setup(struct cx25821_dev *dev,
const struct sram_channel *ch, unsigned int bpl,
u32 risc);
-extern int cx25821_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+extern int cx25821_riscmem_alloc(struct pci_dev *pci,
+ struct cx25821_riscmem *risc,
+ unsigned int size);
+extern int cx25821_risc_buffer(struct pci_dev *pci, struct cx25821_riscmem *risc,
struct scatterlist *sglist,
unsigned int top_offset,
unsigned int bottom_offset,
unsigned int bpl,
unsigned int padding, unsigned int lines);
extern int cx25821_risc_databuffer_audio(struct pci_dev *pci,
- struct btcx_riscmem *risc,
+ struct cx25821_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
unsigned int lines, unsigned int lpi);
-extern void cx25821_free_buffer(struct videobuf_queue *q,
+extern void cx25821_free_buffer(struct cx25821_dev *dev,
struct cx25821_buffer *buf);
-extern int cx25821_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
- u32 reg, u32 mask, u32 value);
extern void cx25821_sram_channel_dump(struct cx25821_dev *dev,
const struct sram_channel *ch);
extern void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index d3c79d964f2c..b6be46e94289 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1234,6 +1234,3 @@ static void __exit blackbird_fini(void)
module_init(blackbird_init);
module_exit(blackbird_fini);
-
-module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644);
-MODULE_PARM_DESC(debug,"enable debug messages [video]");
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index dee177ed5fe9..c38d5a12e277 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -1091,10 +1091,3 @@ EXPORT_SYMBOL(cx88_core_put);
EXPORT_SYMBOL(cx88_ir_start);
EXPORT_SYMBOL(cx88_ir_stop);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 5780e2f013b4..1b2ed238cdb6 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1504,8 +1504,8 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(stv0288_attach,
&tevii_tuner_earda_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- if (!dvb_attach(stb6000_attach, fe0->dvb.frontend, 0x61,
+ if (fe0->dvb.frontend != NULL) {
+ if (!dvb_attach(stb6000_attach, fe0->dvb.frontend, 0x61,
&core->i2c_adap))
goto frontend_detach;
core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 1c1f69e6b0b9..a369b0840acf 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -833,10 +833,3 @@ EXPORT_SYMBOL(cx8802_start_dma);
EXPORT_SYMBOL(cx8802_register_driver);
EXPORT_SYMBOL(cx8802_unregister_driver);
EXPORT_SYMBOL(cx8802_get_driver);
-/* ----------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/pci/cx88/cx88-tvaudio.c b/drivers/media/pci/cx88/cx88-tvaudio.c
index 424fd97495dc..6bbce6ad6295 100644
--- a/drivers/media/pci/cx88/cx88-tvaudio.c
+++ b/drivers/media/pci/cx88/cx88-tvaudio.c
@@ -1050,10 +1050,3 @@ EXPORT_SYMBOL(cx88_newstation);
EXPORT_SYMBOL(cx88_set_stereo);
EXPORT_SYMBOL(cx88_get_stereo);
EXPORT_SYMBOL(cx88_audio_thread);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index ab6d5d25aa6f..e7d701777e53 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -357,7 +357,6 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
int y_done = 0;
int bytes_written = 0;
- unsigned long flags = 0;
int idx = 0;
IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
@@ -407,16 +406,21 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
/* Sync Hardware SG List of buffers */
ivtv_stream_sync_for_device(s);
- if (lock)
+ if (lock) {
+ unsigned long flags = 0;
+
spin_lock_irqsave(&itv->dma_reg_lock, flags);
- if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
- ivtv_dma_dec_start(s);
- }
- else {
- set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
- }
- if (lock)
+ if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
+ ivtv_dma_dec_start(s);
+ else
+ set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
+ } else {
+ if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
+ ivtv_dma_dec_start(s);
+ else
+ set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
+ }
}
static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index bee2329e0b2e..24152accc66c 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,10 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Get user pages for DMA Xfer */
- down_read(&current->mm->mmap_sem);
- err = get_user_pages(current, current->mm,
- user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL);
- up_read(&current->mm->mmap_sem);
+ err = get_user_pages_unlocked(current, current->mm,
+ user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/mantis/mantis_core.c b/drivers/media/pci/mantis/mantis_core.c
index 684d9061fe2a..82220ea72dd3 100644
--- a/drivers/media/pci/mantis/mantis_core.c
+++ b/drivers/media/pci/mantis/mantis_core.c
@@ -56,29 +56,6 @@ static int read_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
return 0;
}
-static int write_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
-{
- int err;
-
- struct i2c_msg msg = {
- .addr = 0x50,
- .flags = 0,
- .buf = data,
- .len = length
- };
-
- err = i2c_transfer(&mantis->adapter, &msg, 1);
- if (err < 0) {
- dprintk(verbose, MANTIS_ERROR, 1,
- "ERROR: i2c write: < err=%i length=0x%02x d0=0x%02x, d1=0x%02x >",
- err, length, data[0], data[1]);
-
- return err;
- }
-
- return 0;
-}
-
static int get_mac_address(struct mantis_pci *mantis)
{
int err;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 701b52f34689..99d09a7566d3 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1084,11 +1084,6 @@ static int saa7134_s_ctrl(struct v4l2_ctrl *ctrl)
/* ------------------------------------------------------------------ */
-static inline struct vb2_queue *saa7134_queue(struct file *file)
-{
- return video_devdata(file)->queue;
-}
-
static int video_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/pci/smipcie/smipcie.c b/drivers/media/pci/smipcie/smipcie.c
index f773350e67b9..36c8ed77309c 100644
--- a/drivers/media/pci/smipcie/smipcie.c
+++ b/drivers/media/pci/smipcie/smipcie.c
@@ -448,16 +448,19 @@ static void smi_port_exit(struct smi_port *port)
port->enable = 0;
}
-static void smi_port_irq(struct smi_port *port, u32 int_status)
+static int smi_port_irq(struct smi_port *port, u32 int_status)
{
u32 port_req_irq = port->_dmaInterruptCH0 | port->_dmaInterruptCH1;
+ int handled = 0;
if (int_status & port_req_irq) {
smi_port_disableInterrupt(port);
port->_int_status = int_status;
smi_port_clearInterrupt(port);
tasklet_schedule(&port->tasklet);
+ handled = 1;
}
+ return handled;
}
static irqreturn_t smi_irq_handler(int irq, void *dev_id)
@@ -465,18 +468,19 @@ static irqreturn_t smi_irq_handler(int irq, void *dev_id)
struct smi_dev *dev = dev_id;
struct smi_port *port0 = &dev->ts_port[0];
struct smi_port *port1 = &dev->ts_port[1];
+ int handled = 0;
u32 intr_status = smi_read(MSI_INT_STATUS);
/* ts0 interrupt.*/
if (dev->info->ts_0)
- smi_port_irq(port0, intr_status);
+ handled += smi_port_irq(port0, intr_status);
/* ts1 interrupt.*/
if (dev->info->ts_1)
- smi_port_irq(port1, intr_status);
+ handled += smi_port_irq(port1, intr_status);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
static struct i2c_client *smi_add_i2c_client(struct i2c_adapter *adapter,
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 8cbe6b49f4c2..570d119ea18b 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -182,7 +182,7 @@ static ssize_t eeprom_store(struct device *dev, struct device_attribute *attr,
{
struct solo_dev *solo_dev =
container_of(dev, struct solo_dev, dev);
- unsigned short *p = (unsigned short *)buf;
+ u16 *p = (u16 *)buf;
int i;
if (count & 0x1)
@@ -212,7 +212,7 @@ static ssize_t eeprom_show(struct device *dev, struct device_attribute *attr,
{
struct solo_dev *solo_dev =
container_of(dev, struct solo_dev, dev);
- unsigned short *p = (unsigned short *)buf;
+ u16 *p = (u16 *)buf;
int count = (full_eeprom ? 128 : 64);
int i;
diff --git a/drivers/media/pci/solo6x10/solo6x10-eeprom.c b/drivers/media/pci/solo6x10/solo6x10-eeprom.c
index da25ce4a6952..8e81186dc785 100644
--- a/drivers/media/pci/solo6x10/solo6x10-eeprom.c
+++ b/drivers/media/pci/solo6x10/solo6x10-eeprom.c
@@ -103,7 +103,7 @@ unsigned int solo_eeprom_ewen(struct solo_dev *solo_dev, int w_en)
__be16 solo_eeprom_read(struct solo_dev *solo_dev, int loc)
{
int read_cmd = loc | (EE_READ_CMD << ADDR_LEN);
- unsigned short retval = 0;
+ u16 retval = 0;
int i;
solo_eeprom_cmd(solo_dev, read_cmd);
diff --git a/drivers/media/pci/solo6x10/solo6x10-enc.c b/drivers/media/pci/solo6x10/solo6x10-enc.c
index d19c0aef5abc..d28211bb9674 100644
--- a/drivers/media/pci/solo6x10/solo6x10-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-enc.c
@@ -136,11 +136,11 @@ static void solo_capture_config(struct solo_dev *solo_dev)
int solo_osd_print(struct solo_enc_dev *solo_enc)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
- unsigned char *str = solo_enc->osd_text;
+ u8 *str = solo_enc->osd_text;
u8 *buf = solo_enc->osd_buf;
u32 reg;
const struct font_desc *vga = find_font("VGA8x16");
- const unsigned char *vga_data;
+ const u8 *vga_data;
int i, j;
if (WARN_ON_ONCE(!vga))
@@ -154,7 +154,7 @@ int solo_osd_print(struct solo_enc_dev *solo_enc)
}
memset(buf, 0, SOLO_OSD_WRITE_SIZE);
- vga_data = (const unsigned char *)vga->data;
+ vga_data = (const u8 *)vga->data;
for (i = 0; *str; i++, str++) {
for (j = 0; j < 16; j++) {
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index c7141f2e63bd..7ddc76709caa 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -56,8 +56,8 @@
struct solo_snd_pcm {
int on;
spinlock_t lock;
- struct solo_dev *solo_dev;
- unsigned char *g723_buf;
+ struct solo_dev *solo_dev;
+ u8 *g723_buf;
dma_addr_t g723_dma;
};
diff --git a/drivers/media/pci/solo6x10/solo6x10-jpeg.h b/drivers/media/pci/solo6x10/solo6x10-jpeg.h
index 1c66a46da514..3c611bd3f2d8 100644
--- a/drivers/media/pci/solo6x10/solo6x10-jpeg.h
+++ b/drivers/media/pci/solo6x10/solo6x10-jpeg.h
@@ -21,7 +21,7 @@
#ifndef __SOLO6X10_JPEG_H
#define __SOLO6X10_JPEG_H
-static const unsigned char jpeg_header[] = {
+static const u8 jpeg_header[] = {
0xff, 0xd8, 0xff, 0xfe, 0x00, 0x0d, 0x42, 0x6c,
0x75, 0x65, 0x63, 0x68, 0x65, 0x72, 0x72, 0x79,
0x20, 0xff, 0xdb, 0x00, 0x43, 0x00, 0x20, 0x16,
@@ -106,7 +106,7 @@ static const unsigned char jpeg_header[] = {
/* This is the byte marker for the start of the DQT */
#define DQT_START 17
#define DQT_LEN 138
-static const unsigned char jpeg_dqt[4][DQT_LEN] = {
+static const u8 jpeg_dqt[4][DQT_LEN] = {
{
0xff, 0xdb, 0x00, 0x43, 0x00,
0x08, 0x06, 0x06, 0x07, 0x06, 0x05, 0x08, 0x07,
diff --git a/drivers/media/pci/solo6x10/solo6x10-tw28.c b/drivers/media/pci/solo6x10/solo6x10-tw28.c
index edd0781ee4b5..0632d3f7c73c 100644
--- a/drivers/media/pci/solo6x10/solo6x10-tw28.c
+++ b/drivers/media/pci/solo6x10/solo6x10-tw28.c
@@ -510,7 +510,7 @@ static int tw2815_setup(struct solo_dev *solo_dev, u8 dev_addr)
#define FIRST_ACTIVE_LINE 0x0008
#define LAST_ACTIVE_LINE 0x0102
-static void saa712x_write_regs(struct solo_dev *dev, const uint8_t *vals,
+static void saa712x_write_regs(struct solo_dev *dev, const u8 *vals,
int start, int n)
{
for (; start < n; start++, vals++) {
@@ -532,7 +532,7 @@ static void saa712x_write_regs(struct solo_dev *dev, const uint8_t *vals,
static void saa712x_setup(struct solo_dev *dev)
{
const int reg_start = 0x26;
- const uint8_t saa7128_regs_ntsc[] = {
+ const u8 saa7128_regs_ntsc[] = {
/* :0x26 */
0x0d, 0x00,
/* :0x28 */
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 6e933d383fa2..53fff5425c13 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -38,28 +38,28 @@
#define DMA_ALIGN 4096
/* 6010 M4V */
-static unsigned char vop_6010_ntsc_d1[] = {
+static u8 vop_6010_ntsc_d1[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x1d, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
0x1f, 0x4c, 0x58, 0x10, 0xf0, 0x71, 0x18, 0x3f,
};
-static unsigned char vop_6010_ntsc_cif[] = {
+static u8 vop_6010_ntsc_cif[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x1d, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
0x1f, 0x4c, 0x2c, 0x10, 0x78, 0x51, 0x18, 0x3f,
};
-static unsigned char vop_6010_pal_d1[] = {
+static u8 vop_6010_pal_d1[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x15, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
0x1f, 0x4c, 0x58, 0x11, 0x20, 0x71, 0x18, 0x3f,
};
-static unsigned char vop_6010_pal_cif[] = {
+static u8 vop_6010_pal_cif[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x15, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
@@ -67,25 +67,25 @@ static unsigned char vop_6010_pal_cif[] = {
};
/* 6110 h.264 */
-static unsigned char vop_6110_ntsc_d1[] = {
+static u8 vop_6110_ntsc_d1[] = {
0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1e,
0x9a, 0x74, 0x05, 0x81, 0xec, 0x80, 0x00, 0x00,
0x00, 0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00,
};
-static unsigned char vop_6110_ntsc_cif[] = {
+static u8 vop_6110_ntsc_cif[] = {
0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1e,
0x9a, 0x74, 0x0b, 0x0f, 0xc8, 0x00, 0x00, 0x00,
0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00, 0x00,
};
-static unsigned char vop_6110_pal_d1[] = {
+static u8 vop_6110_pal_d1[] = {
0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1e,
0x9a, 0x74, 0x05, 0x80, 0x93, 0x20, 0x00, 0x00,
0x00, 0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00,
};
-static unsigned char vop_6110_pal_cif[] = {
+static u8 vop_6110_pal_cif[] = {
0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1e,
0x9a, 0x74, 0x0b, 0x04, 0xb2, 0x00, 0x00, 0x00,
0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00, 0x00,
@@ -149,7 +149,7 @@ void solo_update_mode(struct solo_enc_dev *solo_enc)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
int vop_len;
- unsigned char *vop;
+ u8 *vop;
solo_enc->interlaced = (solo_enc->mode & 0x08) ? 1 : 0;
solo_enc->bw_weight = max(solo_dev->fps / solo_enc->interval, 1);
@@ -239,8 +239,6 @@ static int solo_enc_on(struct solo_enc_dev *solo_enc)
if (solo_enc->bw_weight > solo_dev->enc_bw_remain)
return -EBUSY;
solo_enc->sequence = 0;
- solo_enc->motion_last_state = false;
- solo_enc->frames_since_last_motion = 0;
solo_dev->enc_bw_remain -= solo_enc->bw_weight;
if (solo_enc->type == SOLO_ENC_TYPE_EXT)
@@ -529,36 +527,12 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
}
if (!ret) {
- bool send_event = false;
-
vb->v4l2_buf.sequence = solo_enc->sequence++;
vb->v4l2_buf.timestamp.tv_sec = vop_sec(vh);
vb->v4l2_buf.timestamp.tv_usec = vop_usec(vh);
/* Check for motion flags */
- if (solo_is_motion_on(solo_enc)) {
- /* It takes a few frames for the hardware to detect
- * motion. Once it does it clears the motion detection
- * register and it takes again a few frames before
- * motion is seen. This means in practice that when the
- * motion field is 1, it will go back to 0 for the next
- * frame. This leads to motion detection event being
- * sent all the time, which is not what we want.
- * Instead wait a few frames before deciding that the
- * motion has halted. After some experimentation it
- * turns out that waiting for 5 frames works well.
- */
- if (enc_buf->motion == 0 &&
- solo_enc->motion_last_state &&
- solo_enc->frames_since_last_motion++ > 5)
- send_event = true;
- else if (enc_buf->motion) {
- solo_enc->frames_since_last_motion = 0;
- send_event = !solo_enc->motion_last_state;
- }
- }
-
- if (send_event) {
+ if (solo_is_motion_on(solo_enc) && enc_buf->motion) {
struct v4l2_event ev = {
.type = V4L2_EVENT_MOTION_DET,
.u.motion_det = {
@@ -568,8 +542,6 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
},
};
- solo_enc->motion_last_state = enc_buf->motion;
- solo_enc->frames_since_last_motion = 0;
v4l2_event_queue(solo_enc->vfd, &ev);
}
}
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index bd8edfa319b8..1ca54b08b3aa 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -159,8 +159,6 @@ struct solo_enc_dev {
u16 motion_thresh;
bool motion_global;
bool motion_enabled;
- bool motion_last_state;
- u8 frames_since_last_motion;
u16 width;
u16 height;
@@ -170,9 +168,9 @@ struct solo_enc_dev {
__aligned(4);
/* VOP stuff */
- unsigned char vop[64];
+ u8 vop[64];
int vop_len;
- unsigned char jpeg_header[1024];
+ u8 jpeg_header[1024];
int jpeg_len;
u32 fmt;
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
index f6f30abc088b..e03587b1af71 100644
--- a/drivers/media/pci/sta2x11/Kconfig
+++ b/drivers/media/pci/sta2x11/Kconfig
@@ -5,6 +5,7 @@ config STA2X11_VIP
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
select VIDEOBUF2_DMA_CONTIG
depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS
+ depends on VIDEO_V4L2_SUBDEV_API
depends on I2C
help
Say Y for support for STA2X11 VIP (Video Input Port) capture
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index c1f0617a6973..45199a12b9d9 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -1219,11 +1219,14 @@ static int stop_ts_capture(struct av7110 *budget)
static int start_ts_capture(struct av7110 *budget)
{
+ unsigned y;
+
dprintk(2, "budget: %p\n", budget);
if (budget->feeding1)
return ++budget->feeding1;
- memset(budget->grabbing, 0x00, TS_BUFLEN);
+ for (y = 0; y < TS_HEIGHT; y++)
+ memset(budget->grabbing + y * TS_WIDTH, 0x00, TS_WIDTH);
budget->ttbp = 0;
SAA7146_ISR_CLEAR(budget->dev, MASK_10); /* VPE */
SAA7146_IER_ENABLE(budget->dev, MASK_10); /* VPE */
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 37d02fe09137..23e05499b509 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -231,63 +231,59 @@ static void vpeirq(unsigned long data)
}
-int ttpci_budget_debiread(struct budget *budget, u32 config, int addr, int count,
- int uselocks, int nobusyloop)
+static int ttpci_budget_debiread_nolock(struct budget *budget, u32 config,
+ int addr, int count, int nobusyloop)
{
struct saa7146_dev *saa = budget->dev;
- int result = 0;
- unsigned long flags = 0;
-
- if (count > 4 || count <= 0)
- return 0;
-
- if (uselocks)
- spin_lock_irqsave(&budget->debilock, flags);
+ int result;
- if ((result = saa7146_wait_for_debi_done(saa, nobusyloop)) < 0) {
- if (uselocks)
- spin_unlock_irqrestore(&budget->debilock, flags);
+ result = saa7146_wait_for_debi_done(saa, nobusyloop);
+ if (result < 0)
return result;
- }
saa7146_write(saa, DEBI_COMMAND, (count << 17) | 0x10000 | (addr & 0xffff));
saa7146_write(saa, DEBI_CONFIG, config);
saa7146_write(saa, DEBI_PAGE, 0);
saa7146_write(saa, MC2, (2 << 16) | 2);
- if ((result = saa7146_wait_for_debi_done(saa, nobusyloop)) < 0) {
- if (uselocks)
- spin_unlock_irqrestore(&budget->debilock, flags);
+ result = saa7146_wait_for_debi_done(saa, nobusyloop);
+ if (result < 0)
return result;
- }
result = saa7146_read(saa, DEBI_AD);
result &= (0xffffffffUL >> ((4 - count) * 8));
-
- if (uselocks)
- spin_unlock_irqrestore(&budget->debilock, flags);
-
return result;
}
-int ttpci_budget_debiwrite(struct budget *budget, u32 config, int addr,
- int count, u32 value, int uselocks, int nobusyloop)
+int ttpci_budget_debiread(struct budget *budget, u32 config, int addr, int count,
+ int uselocks, int nobusyloop)
{
- struct saa7146_dev *saa = budget->dev;
- unsigned long flags = 0;
- int result;
-
if (count > 4 || count <= 0)
return 0;
- if (uselocks)
- spin_lock_irqsave(&budget->debilock, flags);
+ if (uselocks) {
+ unsigned long flags;
+ int result;
- if ((result = saa7146_wait_for_debi_done(saa, nobusyloop)) < 0) {
- if (uselocks)
- spin_unlock_irqrestore(&budget->debilock, flags);
+ spin_lock_irqsave(&budget->debilock, flags);
+ result = ttpci_budget_debiread_nolock(budget, config, addr,
+ count, nobusyloop);
+ spin_unlock_irqrestore(&budget->debilock, flags);
return result;
}
+ return ttpci_budget_debiread_nolock(budget, config, addr,
+ count, nobusyloop);
+}
+
+static int ttpci_budget_debiwrite_nolock(struct budget *budget, u32 config,
+ int addr, int count, u32 value, int nobusyloop)
+{
+ struct saa7146_dev *saa = budget->dev;
+ int result;
+
+ result = saa7146_wait_for_debi_done(saa, nobusyloop);
+ if (result < 0)
+ return result;
saa7146_write(saa, DEBI_COMMAND, (count << 17) | 0x00000 | (addr & 0xffff));
saa7146_write(saa, DEBI_CONFIG, config);
@@ -295,15 +291,28 @@ int ttpci_budget_debiwrite(struct budget *budget, u32 config, int addr,
saa7146_write(saa, DEBI_AD, value);
saa7146_write(saa, MC2, (2 << 16) | 2);
- if ((result = saa7146_wait_for_debi_done(saa, nobusyloop)) < 0) {
- if (uselocks)
- spin_unlock_irqrestore(&budget->debilock, flags);
- return result;
- }
+ result = saa7146_wait_for_debi_done(saa, nobusyloop);
+ return result < 0 ? result : 0;
+}
- if (uselocks)
+int ttpci_budget_debiwrite(struct budget *budget, u32 config, int addr,
+ int count, u32 value, int uselocks, int nobusyloop)
+{
+ if (count > 4 || count <= 0)
+ return 0;
+
+ if (uselocks) {
+ unsigned long flags;
+ int result;
+
+ spin_lock_irqsave(&budget->debilock, flags);
+ result = ttpci_budget_debiwrite_nolock(budget, config, addr,
+ count, value, nobusyloop);
spin_unlock_irqrestore(&budget->debilock, flags);
- return 0;
+ return result;
+ }
+ return ttpci_budget_debiwrite_nolock(budget, config, addr,
+ count, value, nobusyloop);
}
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index 7a7501bd165f..93f2335e004b 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -25,7 +25,6 @@
* GNU General Public License for more details.
*/
-#include <linux/version.h>
#include <linux/pci.h>
#include <linux/videodev2.h>
#include <linux/notifier.h>
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 765bffb49a72..d9b872b9285a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -56,10 +56,8 @@ config VIDEO_VIU
config VIDEO_TIMBERDALE
tristate "Support for timberdale Video In/LogiWIN"
- depends on VIDEO_V4L2 && I2C && DMADEVICES
- depends on MFD_TIMBERDALE || COMPILE_TEST
- select DMA_ENGINE
- select TIMB_DMA
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on (MFD_TIMBERDALE && TIMB_DMA) || COMPILE_TEST
select VIDEO_ADV7180
select VIDEOBUF_DMA_CONTIG
---help---
@@ -118,6 +116,7 @@ config VIDEO_S3C_CAMIF
source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/exynos4-is/Kconfig"
source "drivers/media/platform/s5p-tv/Kconfig"
+source "drivers/media/platform/am437x/Kconfig"
endif # V4L_PLATFORM_DRIVERS
@@ -140,6 +139,7 @@ config VIDEO_CODA
depends on HAS_DMA
select SRAM
select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select GENERIC_ALLOCATOR
---help---
@@ -213,7 +213,6 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
- depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
@@ -223,7 +222,7 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on (ARCH_SHMOBILE && OF) || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
---help---
This is a V4L2 driver for the Renesas VSP1 video processing engine.
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index a49936b8ce8a..3ec154742083 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -46,4 +46,6 @@ obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
obj-y += omap/
+obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
+
ccflags-y += -I$(srctree)/drivers/media/i2c
diff --git a/drivers/media/platform/am437x/Kconfig b/drivers/media/platform/am437x/Kconfig
new file mode 100644
index 000000000000..7b023a76e32e
--- /dev/null
+++ b/drivers/media/platform/am437x/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_AM437X_VPFE
+ tristate "TI AM437x VPFE video capture driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on SOC_AM43XX || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for AM437x Video Processing Front End based Video
+ Capture Driver.
+
+ To compile this driver as a module, choose M here. The module
+ will be called am437x-vpfe.
diff --git a/drivers/media/platform/am437x/Makefile b/drivers/media/platform/am437x/Makefile
new file mode 100644
index 000000000000..d11fff16f260
--- /dev/null
+++ b/drivers/media/platform/am437x/Makefile
@@ -0,0 +1,3 @@
+# Makefile for AM437x VPFE driver
+
+obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x-vpfe.o
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
new file mode 100644
index 000000000000..56a5cb0d2152
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -0,0 +1,2776 @@
+/*
+ * TI VPFE capture Driver
+ *
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-of.h>
+
+#include "am437x-vpfe.h"
+
+#define VPFE_MODULE_NAME "vpfe"
+#define VPFE_VERSION "0.1.0"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-8");
+
+#define vpfe_dbg(level, dev, fmt, arg...) \
+ v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
+#define vpfe_info(dev, fmt, arg...) \
+ v4l2_info(&dev->v4l2_dev, fmt, ##arg)
+#define vpfe_err(dev, fmt, arg...) \
+ v4l2_err(&dev->v4l2_dev, fmt, ##arg)
+
+/* standard information */
+struct vpfe_standard {
+ v4l2_std_id std_id;
+ unsigned int width;
+ unsigned int height;
+ struct v4l2_fract pixelaspect;
+ int frame_format;
+};
+
+static const struct vpfe_standard vpfe_standards[] = {
+ {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
+ {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
+};
+
+struct bus_format {
+ unsigned int width;
+ unsigned int bpp;
+};
+
+/*
+ * struct vpfe_fmt - VPFE media bus format information
+ * @name: V4L2 format description
+ * @code: V4L2 media bus format code
+ * @shifted: V4L2 media bus format code for the same pixel layout but
+ * shifted to be 8 bits per pixel. =0 if format is not shiftable.
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @width: Bits per pixel (when transferred over a bus)
+ * @bpp: Bytes per pixel (when stored in memory)
+ * @supported: Indicates format supported by subdev
+ */
+struct vpfe_fmt {
+ const char *name;
+ u32 fourcc;
+ u32 code;
+ struct bus_format l;
+ struct bus_format s;
+ bool supported;
+ u32 index;
+};
+
+static struct vpfe_fmt formats[] = {
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "RAW8 BGGR",
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 GBRG",
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 GRBG",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 RGGB",
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RGB565 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "RGB565 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ },
+};
+
+static int
+__vpfe_get_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp);
+
+static struct vpfe_fmt *find_format_by_code(unsigned int code)
+{
+ struct vpfe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (fmt->code == code)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
+{
+ struct vpfe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static void
+mbus_to_pix(struct vpfe_device *vpfe,
+ const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix, unsigned int *bpp)
+{
+ struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
+ unsigned int bus_width = sdinfo->vpfe_param.bus_width;
+ struct vpfe_fmt *fmt;
+
+ fmt = find_format_by_code(mbus->code);
+ if (WARN_ON(fmt == NULL)) {
+ pr_err("Invalid mbus code set\n");
+ *bpp = 1;
+ return;
+ }
+
+ memset(pix, 0, sizeof(*pix));
+ v4l2_fill_pix_format(pix, mbus);
+ pix->pixelformat = fmt->fourcc;
+ *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
+
+ /* pitch should be 32 bytes aligned */
+ pix->bytesperline = ALIGN(pix->width * *bpp, 32);
+ pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+static void pix_to_mbus(struct vpfe_device *vpfe,
+ struct v4l2_pix_format *pix_fmt,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct vpfe_fmt *fmt;
+
+ fmt = find_format_by_pix(pix_fmt->pixelformat);
+ if (!fmt) {
+ /* default to first entry */
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ pix_fmt->pixelformat);
+ fmt = &formats[0];
+ }
+
+ memset(mbus_fmt, 0, sizeof(*mbus_fmt));
+ v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
+}
+
+/* Print Four-character-code (FOURCC) */
+static char *print_fourcc(u32 fmt)
+{
+ static char code[5];
+
+ code[0] = (unsigned char)(fmt & 0xff);
+ code[1] = (unsigned char)((fmt >> 8) & 0xff);
+ code[2] = (unsigned char)((fmt >> 16) & 0xff);
+ code[3] = (unsigned char)((fmt >> 24) & 0xff);
+ code[4] = '\0';
+
+ return code;
+}
+
+static int
+cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
+{
+ return lhs->type == rhs->type &&
+ lhs->fmt.pix.width == rhs->fmt.pix.width &&
+ lhs->fmt.pix.height == rhs->fmt.pix.height &&
+ lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
+ lhs->fmt.pix.field == rhs->fmt.pix.field &&
+ lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
+ lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
+ lhs->fmt.pix.quantization == rhs->fmt.pix.quantization;
+}
+
+static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
+{
+ return ioread32(ccdc->ccdc_cfg.base_addr + offset);
+}
+
+static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
+{
+ iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
+}
+
+static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
+{
+ return container_of(ccdc, struct vpfe_device, ccdc);
+}
+
+static inline struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct vpfe_cap_buffer, vb);
+}
+
+static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
+{
+ vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
+}
+
+static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
+{
+ unsigned int cfg;
+
+ if (!flag) {
+ cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
+ cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
+ } else {
+ cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
+ }
+
+ vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
+}
+
+static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt,
+ int bpp)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int val, mid_img;
+
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left * bpp;
+ horz_nr_pixels = (image_win->width * bpp) - 1;
+ vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
+ horz_nr_pixels, VPFE_HORZ_INFO);
+
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ /* configure VDINT0 */
+ val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
+ } else {
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /*
+ * configure VDINT0 and VDINT1. VDINT1 will be at half
+ * of image height
+ */
+ mid_img = vert_start + (image_win->height / 2);
+ val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
+ (mid_img & VPFE_VDINT_VDINT1_MASK);
+ }
+
+ vpfe_reg_write(ccdc, val, VPFE_VDINT);
+
+ vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
+ vert_start, VPFE_VERT_START);
+ vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
+}
+
+static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
+
+ vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
+ vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
+ vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
+ vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
+ vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
+ vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
+ vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_SYNMODE));
+ vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
+ vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
+ vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_VERT_START));
+ vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_VERT_LINES));
+}
+
+static int
+vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_config_params_raw *ccdcparam)
+{
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
+ u8 max_gamma, max_data;
+
+ if (!ccdcparam->alaw.enable)
+ return 0;
+
+ max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
+ max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
+
+ if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
+ ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
+ max_gamma > max_data) {
+ vpfe_dbg(1, vpfe, "Invalid data line select\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_config_params_raw *raw_params)
+{
+ struct vpfe_ccdc_config_params_raw *config_params =
+ &ccdc->ccdc_cfg.bayer.config_params;
+
+ config_params = raw_params;
+}
+
+/*
+ * vpfe_ccdc_restore_defaults()
+ * This function will write defaults to all CCDC registers
+ */
+static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
+{
+ int i;
+
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+
+ /* set all registers to default value */
+ for (i = 4; i <= 0x94; i += 4)
+ vpfe_reg_write(ccdc, 0, i);
+
+ vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
+ vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
+}
+
+static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
+{
+ int dma_cntl, i, pcr;
+
+ /* If the CCDC module is still busy wait for it to be done */
+ for (i = 0; i < 10; i++) {
+ usleep_range(5000, 6000);
+ pcr = vpfe_reg_read(ccdc, VPFE_PCR);
+ if (!pcr)
+ break;
+
+ /* make sure it it is disabled */
+ vpfe_pcr_enable(ccdc, 0);
+ }
+
+ /* Disable CCDC by resetting all register to default POR values */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /* if DMA_CNTL overflow bit is set. Clear it
+ * It appears to take a while for this to become quiescent ~20ms
+ */
+ for (i = 0; i < 10; i++) {
+ dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
+ if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
+ break;
+
+ /* Clear the overflow bit */
+ vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
+ usleep_range(5000, 6000);
+ }
+
+ /* Disabled the module at the CONFIG level */
+ vpfe_config_enable(ccdc, 0);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_ccdc_config_params_raw raw_params;
+ int x;
+
+ if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
+ return -EINVAL;
+
+ x = copy_from_user(&raw_params, params, sizeof(raw_params));
+ if (x) {
+ vpfe_dbg(1, vpfe,
+ "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
+ x);
+ return -EFAULT;
+ }
+
+ if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
+ vpfe_ccdc_update_raw_params(ccdc, &raw_params);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * vpfe_ccdc_config_ycbcr()
+ * This function will configure CCDC for YCbCr video capture
+ */
+static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
+ u32 syn_mode;
+
+ vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
+ /*
+ * first restore the CCDC registers to default values
+ * This is important since we assume default values to be set in
+ * a lot of registers that we didn't touch
+ */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /*
+ * configure pixel format, frame format, configure video frame
+ * format, enable output to SDRAM, enable internal timing generator
+ * and 8bit pack mode
+ */
+ syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
+ VPFE_SYN_MODE_INPMOD_SHIFT) |
+ ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
+ VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
+ VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
+
+ /* setup BT.656 sync mode */
+ if (params->bt656_enable) {
+ vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
+
+ /*
+ * configure the FID, VD, HD pin polarity,
+ * fld,hd pol positive, vd negative, 8-bit data
+ */
+ syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
+ if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ syn_mode |= VPFE_SYN_MODE_10BITS;
+ else
+ syn_mode |= VPFE_SYN_MODE_8BITS;
+ } else {
+ /* y/c external sync mode */
+ syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
+ VPFE_FID_POL_SHIFT) |
+ ((params->hd_pol & VPFE_HD_POL_MASK) <<
+ VPFE_HD_POL_SHIFT) |
+ ((params->vd_pol & VPFE_VD_POL_MASK) <<
+ VPFE_VD_POL_SHIFT));
+ }
+ vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
+
+ /* configure video window */
+ vpfe_ccdc_setwin(ccdc, &params->win,
+ params->frm_fmt, params->bytesperpixel);
+
+ /*
+ * configure the order of y cb cr in SDRAM, and disable latch
+ * internal register on vsync
+ */
+ if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ vpfe_reg_write(ccdc,
+ (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
+ VPFE_LATCH_ON_VSYNC_DISABLE |
+ VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
+ else
+ vpfe_reg_write(ccdc,
+ (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
+ VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
+
+ /*
+ * configure the horizontal line offset. This should be a
+ * on 32 byte boundary. So clear LSB 5 bits
+ */
+ vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
+
+ /* configure the memory line offset */
+ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+ /* two fields are interleaved in memory */
+ vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
+ VPFE_SDOFST);
+}
+
+static void
+vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_black_clamp *bclamp)
+{
+ u32 val;
+
+ if (!bclamp->enable) {
+ /* configure DCSub */
+ val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
+ vpfe_reg_write(ccdc, val, VPFE_DCSUB);
+ vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
+ return;
+ }
+ /*
+ * Configure gain, Start pixel, No of line to be avg,
+ * No of pixel/line to be avg, & Enable the Black clamping
+ */
+ val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
+ ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
+ VPFE_BLK_ST_PXL_SHIFT) |
+ ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
+ VPFE_BLK_SAMPLE_LINE_SHIFT) |
+ ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
+ VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
+ vpfe_reg_write(ccdc, val, VPFE_CLAMP);
+ /* If Black clamping is enable then make dcsub 0 */
+ vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
+}
+
+static void
+vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_black_compensation *bcomp)
+{
+ u32 val;
+
+ val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
+ ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_GB_COMP_SHIFT) |
+ ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_GR_COMP_SHIFT) |
+ ((bcomp->r & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_R_COMP_SHIFT));
+ vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
+}
+
+/*
+ * vpfe_ccdc_config_raw()
+ * This function will configure CCDC for Raw capture mode
+ */
+static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_ccdc_config_params_raw *config_params =
+ &ccdc->ccdc_cfg.bayer.config_params;
+ struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
+ unsigned int syn_mode;
+ unsigned int val;
+
+ vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
+
+ /* Reset CCDC */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /* Disable latching function registers on VSYNC */
+ vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
+
+ /*
+ * Configure the vertical sync polarity(SYN_MODE.VDPOL),
+ * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
+ * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
+ * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
+ * SDRAM, enable internal timing generator
+ */
+ syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
+ ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
+ ((params->fid_pol & VPFE_FID_POL_MASK) <<
+ VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
+ VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
+ ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
+ VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
+ VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
+ VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
+
+ /* Enable and configure aLaw register if needed */
+ if (config_params->alaw.enable) {
+ val = ((config_params->alaw.gamma_wd &
+ VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
+ vpfe_reg_write(ccdc, val, VPFE_ALAW);
+ vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
+ }
+
+ /* Configure video window */
+ vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt,
+ params->bytesperpixel);
+
+ /* Configure Black Clamp */
+ vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
+
+ /* Configure Black level compensation */
+ vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
+
+ /* If data size is 8 bit then pack the data */
+ if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ syn_mode |= VPFE_DATA_PACK_ENABLE;
+
+ /*
+ * Configure Horizontal offset register. If pack 8 is enabled then
+ * 1 pixel will take 1 byte
+ */
+ vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
+
+ vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
+ params->bytesperline, params->bytesperline);
+
+ /* Set value for SDOFST */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_enable) {
+ /* For interlace inverse mode */
+ vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
+ VPFE_SDOFST);
+ } else {
+ /* For interlace non inverse mode */
+ vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
+ VPFE_SDOFST);
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
+ VPFE_SDOFST);
+ }
+
+ vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
+
+ vpfe_reg_dump(ccdc);
+}
+
+static inline int
+vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
+ enum ccdc_buftype buf_type)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc->ccdc_cfg.bayer.buf_type = buf_type;
+ else
+ ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
+
+ return 0;
+}
+
+static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.buf_type;
+
+ return ccdc->ccdc_cfg.ycbcr.buf_type;
+}
+
+static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+
+ vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
+ ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
+
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ /*
+ * Need to clear it in case it was left on
+ * after the last capture.
+ */
+ ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_SBGGR8:
+ ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
+ break;
+
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_RGB565X:
+ break;
+
+ case V4L2_PIX_FMT_SBGGR16:
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_YUYV:
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ break;
+
+ case V4L2_PIX_FMT_UYVY:
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
+{
+ u32 pixfmt;
+
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ } else {
+ if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+
+ return pixfmt;
+}
+
+static int
+vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *win, unsigned int bpp)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc->ccdc_cfg.bayer.win = *win;
+ ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
+ ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
+ } else {
+ ccdc->ccdc_cfg.ycbcr.win = *win;
+ ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
+ ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
+ }
+
+ return 0;
+}
+
+static inline void
+vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *win)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ *win = ccdc->ccdc_cfg.bayer.win;
+ else
+ *win = ccdc->ccdc_cfg.ycbcr.win;
+}
+
+static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.bytesperline;
+
+ return ccdc->ccdc_cfg.ycbcr.bytesperline;
+}
+
+static inline int
+vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
+ enum ccdc_frmfmt frm_fmt)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+
+ return 0;
+}
+
+static inline enum ccdc_frmfmt
+vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.frm_fmt;
+
+ return ccdc->ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
+{
+ return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
+}
+
+static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
+{
+ vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
+}
+
+static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
+ struct vpfe_hw_if_param *params)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+
+ ccdc->ccdc_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_YCBCR_SYNC_16:
+ case VPFE_YCBCR_SYNC_8:
+ case VPFE_BT656_10BIT:
+ ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+ ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+ break;
+
+ case VPFE_RAW_BAYER:
+ ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
+ ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
+ if (params->bus_width == 10)
+ ccdc->ccdc_cfg.bayer.config_params.data_sz =
+ VPFE_CCDC_DATA_10BITS;
+ else
+ ccdc->ccdc_cfg.bayer.config_params.data_sz =
+ VPFE_CCDC_DATA_8BITS;
+ vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
+ params->bus_width);
+ vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
+ ccdc->ccdc_cfg.bayer.config_params.data_sz);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
+{
+ unsigned int vpfe_int_status;
+
+ vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
+
+ switch (vdint) {
+ /* VD0 interrupt */
+ case VPFE_VDINT0:
+ vpfe_int_status &= ~VPFE_VDINT0;
+ vpfe_int_status |= VPFE_VDINT0;
+ break;
+
+ /* VD1 interrupt */
+ case VPFE_VDINT1:
+ vpfe_int_status &= ~VPFE_VDINT1;
+ vpfe_int_status |= VPFE_VDINT1;
+ break;
+
+ /* VD2 interrupt */
+ case VPFE_VDINT2:
+ vpfe_int_status &= ~VPFE_VDINT2;
+ vpfe_int_status |= VPFE_VDINT2;
+ break;
+
+ /* Clear all interrupts */
+ default:
+ vpfe_int_status &= ~(VPFE_VDINT0 |
+ VPFE_VDINT1 |
+ VPFE_VDINT2);
+ vpfe_int_status |= (VPFE_VDINT0 |
+ VPFE_VDINT1 |
+ VPFE_VDINT2);
+ break;
+ }
+ /* Clear specific VDINT from the status register */
+ vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
+
+ vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
+
+ /* Acknowledge that we are done with all interrupts */
+ vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
+}
+
+static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
+{
+ ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
+
+ ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
+ ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
+ ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
+
+ ccdc->ccdc_cfg.ycbcr.win.left = 0;
+ ccdc->ccdc_cfg.ycbcr.win.top = 0;
+ ccdc->ccdc_cfg.ycbcr.win.width = 720;
+ ccdc->ccdc_cfg.ycbcr.win.height = 576;
+ ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
+
+ ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
+
+ ccdc->ccdc_cfg.bayer.win.left = 0;
+ ccdc->ccdc_cfg.bayer.win.top = 0;
+ ccdc->ccdc_cfg.bayer.win.width = 800;
+ ccdc->ccdc_cfg.bayer.win.height = 600;
+ ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
+ ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
+ VPFE_CCDC_GAMMA_BITS_09_0;
+}
+
+/*
+ * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
+ */
+static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
+ struct v4l2_format *f)
+{
+ struct v4l2_rect image_win;
+ enum ccdc_buftype buf_type;
+ enum ccdc_frmfmt frm_fmt;
+
+ memset(f, 0, sizeof(*f));
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
+ f->fmt.pix.width = image_win.width;
+ f->fmt.pix.height = image_win.height;
+ f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
+ f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
+ frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+
+ if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+ } else {
+ vpfe_err(vpfe, "Invalid buf_type\n");
+ return -EINVAL;
+ }
+ } else {
+ vpfe_err(vpfe, "Invalid frm_fmt\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
+{
+ enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
+
+ vpfe_dbg(1, vpfe, "pixelformat: %s\n",
+ print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
+
+ if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
+ vpfe->fmt.fmt.pix.pixelformat) < 0) {
+ vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
+ return -EINVAL;
+ }
+
+ /* configure the image window */
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
+
+ switch (vpfe->fmt.fmt.pix.field) {
+ case V4L2_FIELD_INTERLACED:
+ /* do nothing, since it is default */
+ ret = vpfe_ccdc_set_buftype(
+ &vpfe->ccdc,
+ CCDC_BUFTYPE_FLD_INTERLEAVED);
+ break;
+
+ case V4L2_FIELD_NONE:
+ frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ /* buffer type only applicable for interlaced scan */
+ break;
+
+ case V4L2_FIELD_SEQ_TB:
+ ret = vpfe_ccdc_set_buftype(
+ &vpfe->ccdc,
+ CCDC_BUFTYPE_FLD_SEPARATED);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
+}
+
+/*
+ * vpfe_config_image_format()
+ * For a given standard, this functions sets up the default
+ * pix format & crop values in the vpfe device and ccdc. It first
+ * starts with defaults based values from the standard table.
+ * It then checks if sub device support g_mbus_fmt and then override the
+ * values based on that.Sets crop values to match with scan resolution
+ * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
+ * values in ccdc
+ */
+static int vpfe_config_image_format(struct vpfe_device *vpfe,
+ v4l2_std_id std_id)
+{
+ struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
+ if (vpfe_standards[i].std_id & std_id) {
+ vpfe->std_info.active_pixels =
+ vpfe_standards[i].width;
+ vpfe->std_info.active_lines =
+ vpfe_standards[i].height;
+ vpfe->std_info.frame_format =
+ vpfe_standards[i].frame_format;
+ vpfe->std_index = i;
+
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(vpfe_standards)) {
+ vpfe_err(vpfe, "standard not supported\n");
+ return -EINVAL;
+ }
+
+ vpfe->crop.top = vpfe->crop.left = 0;
+ vpfe->crop.width = vpfe->std_info.active_pixels;
+ vpfe->crop.height = vpfe->std_info.active_lines;
+ pix->width = vpfe->crop.width;
+ pix->height = vpfe->crop.height;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+
+ /* first field and frame format based on standard frame format */
+ if (vpfe->std_info.frame_format)
+ pix->field = V4L2_FIELD_INTERLACED;
+ else
+ pix->field = V4L2_FIELD_NONE;
+
+ ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
+ if (ret)
+ return ret;
+
+ /* Update the crop window based on found values */
+ vpfe->crop.width = pix->width;
+ vpfe->crop.height = pix->height;
+
+ return vpfe_config_ccdc_image_format(vpfe);
+}
+
+static int vpfe_initialize_device(struct vpfe_device *vpfe)
+{
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ sdinfo = &vpfe->cfg->sub_devs[0];
+ sdinfo->sd = vpfe->sd[0];
+ vpfe->current_input = 0;
+ vpfe->std_index = 0;
+ /* Configure the default format information */
+ ret = vpfe_config_image_format(vpfe,
+ vpfe_standards[vpfe->std_index].std_id);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(vpfe->pdev);
+
+ vpfe_config_enable(&vpfe->ccdc, 1);
+
+ vpfe_ccdc_restore_defaults(&vpfe->ccdc);
+
+ /* Clear all VPFE interrupts */
+ vpfe_clear_intr(&vpfe->ccdc, -1);
+
+ return ret;
+}
+
+/*
+ * vpfe_release : This function is based on the vb2_fop_release
+ * helper function.
+ * It has been augmented to handle module power management,
+ * by disabling/enabling h/w module fcntl clock when necessary.
+ */
+static int vpfe_release(struct file *file)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&vpfe->lock);
+
+ if (v4l2_fh_is_singular_file(file))
+ vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
+ ret = _vb2_fop_release(file, NULL);
+
+ mutex_unlock(&vpfe->lock);
+
+ return ret;
+}
+
+/*
+ * vpfe_open : This function is based on the v4l2_fh_open helper function.
+ * It has been augmented to handle module power management,
+ * by disabling/enabling h/w module fcntl clock when necessary.
+ */
+static int vpfe_open(struct file *file)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&vpfe->lock);
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ vpfe_err(vpfe, "v4l2_fh_open failed\n");
+ goto unlock;
+ }
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ if (vpfe_initialize_device(vpfe)) {
+ v4l2_fh_release(file);
+ ret = -ENODEV;
+ }
+
+unlock:
+ mutex_unlock(&vpfe->lock);
+ return ret;
+}
+
+/**
+ * vpfe_schedule_next_buffer: set next buffer address for capture
+ * @vpfe : ptr to vpfe device
+ *
+ * This function will get next buffer from the dma queue and
+ * set the buffer address in the vpfe register for capture.
+ * the buffer is marked active
+ *
+ * Assumes caller is holding vpfe->dma_queue_lock already
+ */
+static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
+{
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ list_del(&vpfe->next_frm->list);
+
+ vpfe_set_sdr_addr(&vpfe->ccdc,
+ vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0));
+}
+
+static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
+{
+ unsigned long addr;
+
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0) +
+ vpfe->field_off;
+
+ vpfe_set_sdr_addr(&vpfe->ccdc, addr);
+}
+
+/*
+ * vpfe_process_buffer_complete: process a completed buffer
+ * @vpfe : ptr to vpfe device
+ *
+ * This function time stamp the buffer and mark it as DONE. It also
+ * wake up any process waiting on the QUEUE and set the next buffer
+ * as current
+ */
+static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
+{
+ v4l2_get_timestamp(&vpfe->cur_frm->vb.v4l2_buf.timestamp);
+ vpfe->cur_frm->vb.v4l2_buf.field = vpfe->fmt.fmt.pix.field;
+ vpfe->cur_frm->vb.v4l2_buf.sequence = vpfe->sequence++;
+ vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_DONE);
+ vpfe->cur_frm = vpfe->next_frm;
+}
+
+/*
+ * vpfe_isr : ISR handler for vpfe capture (VINT0)
+ * @irq: irq number
+ * @dev_id: dev_id ptr
+ *
+ * It changes status of the captured buffer, takes next buffer from the queue
+ * and sets its address in VPFE registers
+ */
+static irqreturn_t vpfe_isr(int irq, void *dev)
+{
+ struct vpfe_device *vpfe = (struct vpfe_device *)dev;
+ enum v4l2_field field;
+ int intr_status;
+ int fid;
+
+ intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
+
+ if (intr_status & VPFE_VDINT0) {
+ field = vpfe->fmt.fmt.pix.field;
+
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+ goto next_intr;
+ }
+
+ /* interlaced or TB capture check which field
+ we are in hardware */
+ fid = vpfe_ccdc_getfid(&vpfe->ccdc);
+
+ /* switch the software maintained field id */
+ vpfe->field ^= 1;
+ if (fid == vpfe->field) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the
+ * current frame and move on
+ */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+ /*
+ * based on whether the two fields are stored
+ * interleave or separately in memory,
+ * reconfigure the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_schedule_bottom_field(vpfe);
+
+ goto next_intr;
+ }
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ spin_lock(&vpfe->dma_queue_lock);
+ if (!list_empty(&vpfe->dma_queue) &&
+ vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ spin_unlock(&vpfe->dma_queue_lock);
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe->field = fid;
+ }
+ }
+
+next_intr:
+ if (intr_status & VPFE_VDINT1) {
+ spin_lock(&vpfe->dma_queue_lock);
+ if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
+ !list_empty(&vpfe->dma_queue) &&
+ vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ spin_unlock(&vpfe->dma_queue_lock);
+ }
+
+ vpfe_clear_intr(&vpfe->ccdc, intr_status);
+
+ return IRQ_HANDLED;
+}
+
+static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
+{
+ unsigned int intr = VPFE_VDINT0;
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ intr |= VPFE_VDINT1;
+
+ vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
+}
+
+static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
+{
+ unsigned int intr = VPFE_VDINT0;
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ intr |= VPFE_VDINT1;
+
+ vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
+}
+
+static int vpfe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_querycap\n");
+
+ strlcpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", vpfe->v4l2_dev.name);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+/* get the format set at output pad of the adjacent subdev */
+static int __vpfe_get_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_subdev_format fmt;
+ int ret;
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+
+ ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ if (!ret) {
+ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
+ mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ } else {
+ ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
+ sdinfo->grp_id,
+ video, g_mbus_fmt,
+ &mbus_fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+ v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
+ mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
+ }
+
+ format->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe,
+ "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
+ __func__, format->fmt.pix.width, format->fmt.pix.height,
+ print_fourcc(format->fmt.pix.pixelformat),
+ format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+
+ return 0;
+}
+
+/* set the format at output pad of the adjacent subdev */
+static int __vpfe_set_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_subdev_format fmt;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+
+ pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
+
+ ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ if (!ret) {
+ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
+ mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ } else {
+ ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
+ sdinfo->grp_id,
+ video, s_mbus_fmt,
+ &mbus_fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
+ mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
+ }
+
+ format->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe,
+ "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
+ __func__, format->fmt.pix.width, format->fmt.pix.height,
+ print_fourcc(format->fmt.pix.pixelformat),
+ format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+
+ return 0;
+}
+
+static int vpfe_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
+
+ *fmt = vpfe->fmt;
+
+ return 0;
+}
+
+static int vpfe_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_fmt *fmt = NULL;
+ unsigned int k;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
+ f->index);
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ if (f->index > ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ if (formats[k].index == f->index) {
+ fmt = &formats[k];
+ break;
+ }
+ }
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ f->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s [%s]\n",
+ f->index, fmt->code, print_fourcc(fmt->fourcc), fmt->name);
+
+ return 0;
+}
+
+static int vpfe_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ unsigned int bpp;
+
+ vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
+
+ return __vpfe_get_format(vpfe, fmt, &bpp);
+}
+
+static int vpfe_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_format format;
+ unsigned int bpp;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = vpfe_try_fmt(file, priv, fmt);
+ if (ret)
+ return ret;
+
+
+ if (!cmp_v4l2_format(fmt, &format)) {
+ /* Sensor format is different from the requested format
+ * so we need to change it
+ */
+ ret = __vpfe_set_format(vpfe, fmt, &bpp);
+ if (ret)
+ return ret;
+ } else /* Just make sure all of the fields are consistent */
+ *fmt = format;
+
+ /* First detach any IRQ if currently attached */
+ vpfe_detach_irq(vpfe);
+ vpfe->fmt = *fmt;
+ vpfe->bpp = bpp;
+
+ /* Update the crop window based on found values */
+ vpfe->crop.width = fmt->fmt.pix.width;
+ vpfe->crop.height = fmt->fmt.pix.height;
+
+ /* set image capture parameters in the ccdc */
+ return vpfe_config_ccdc_image_format(vpfe);
+}
+
+static int vpfe_enum_size(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_subdev_frame_size_enum fse;
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_mbus_framefmt mbus;
+ struct v4l2_pix_format pix;
+ struct vpfe_fmt *fmt;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
+
+ /* check for valid format */
+ fmt = find_format_by_pix(fsize->pixel_format);
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ memset(&pix, 0x0, sizeof(pix));
+ /* Construct pix from parameter and use default for the rest */
+ pix.pixelformat = fsize->pixel_format;
+ pix.width = 640;
+ pix.height = 480;
+ pix.colorspace = V4L2_COLORSPACE_SRGB;
+ pix.field = V4L2_FIELD_NONE;
+ pix_to_mbus(vpfe, &pix, &mbus);
+
+ memset(&fse, 0x0, sizeof(fse));
+ fse.index = fsize->index;
+ fse.pad = 0;
+ fse.code = mbus.code;
+ ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return -EINVAL;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
+ fsize->index, print_fourcc(fsize->pixel_format),
+ fsize->discrete.width, fsize->discrete.height);
+
+ return 0;
+}
+
+/*
+ * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
+ * given app input index
+ */
+static int
+vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
+ int *subdev_index,
+ int *subdev_input_index,
+ int app_input_index)
+{
+ struct vpfe_config *cfg = vpfe->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (app_input_index < (j + 1)) {
+ *subdev_index = i;
+ *subdev_input_index = app_input_index - j;
+ return 0;
+ }
+ j++;
+ }
+ return -EINVAL;
+}
+
+/*
+ * vpfe_get_app_input - Get app input index for a given subdev input index
+ * driver stores the input index of the current sub device and translate it
+ * when application request the current input
+ */
+static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
+ int *app_input_index)
+{
+ struct vpfe_config *cfg = vpfe->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (!strcmp(sdinfo->name, vpfe->current_subdev->name)) {
+ if (vpfe->current_input >= 1)
+ return -1;
+ *app_input_index = j + vpfe->current_input;
+ return 0;
+ }
+ j++;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int subdev, index;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
+
+ if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
+ inp->index) < 0) {
+ vpfe_dbg(1, vpfe,
+ "input information not found for the subdev\n");
+ return -EINVAL;
+ }
+ sdinfo = &vpfe->cfg->sub_devs[subdev];
+ *inp = sdinfo->inputs[index];
+
+ return 0;
+}
+
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_g_input\n");
+
+ return vpfe_get_app_input_index(vpfe, index);
+}
+
+/* Assumes caller is holding vpfe_dev->lock */
+static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
+{
+ int subdev_index = 0, inp_index = 0;
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_route *route;
+ u32 input, output;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+ ret = vpfe_get_subdev_input_index(vpfe,
+ &subdev_index,
+ &inp_index,
+ index);
+ if (ret < 0) {
+ vpfe_err(vpfe, "invalid input index: %d\n", index);
+ goto get_out;
+ }
+
+ sdinfo = &vpfe->cfg->sub_devs[subdev_index];
+ sdinfo->sd = vpfe->sd[subdev_index];
+ route = &sdinfo->routes[inp_index];
+ if (route && sdinfo->can_route) {
+ input = route->input;
+ output = route->output;
+ if (sdinfo->sd) {
+ ret = v4l2_subdev_call(sdinfo->sd, video,
+ s_routing, input, output, 0);
+ if (ret) {
+ vpfe_err(vpfe, "s_routing failed\n");
+ ret = -EINVAL;
+ goto get_out;
+ }
+ }
+
+ }
+
+ vpfe->current_subdev = sdinfo;
+ if (sdinfo->sd)
+ vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
+ vpfe->current_input = index;
+ vpfe->std_index = 0;
+
+ /* set the bus/interface parameter for the sub device in ccdc */
+ ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
+ if (ret)
+ return ret;
+
+ /* set the default image parameters in the device */
+ return vpfe_config_image_format(vpfe,
+ vpfe_standards[vpfe->std_index].std_id);
+
+get_out:
+ return ret;
+}
+
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe,
+ "vpfe_s_input: index: %d\n", index);
+
+ return vpfe_set_input(vpfe, index);
+}
+
+static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+
+ vpfe_dbg(2, vpfe, "vpfe_querystd\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
+
+ /* Call querystd function of decoder device */
+ return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
+ video, querystd, std_id);
+}
+
+static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_s_std\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ ret = -EBUSY;
+ return ret;
+ }
+
+ ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
+ video, s_std, std_id);
+ if (ret < 0) {
+ vpfe_err(vpfe, "Failed to set standard\n");
+ return ret;
+ }
+ ret = vpfe_config_image_format(vpfe, std_id);
+
+ return ret;
+}
+
+static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+
+ vpfe_dbg(2, vpfe, "vpfe_g_std\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
+ return -ENODATA;
+
+ *std_id = vpfe_standards[vpfe->std_index].std_id;
+
+ return 0;
+}
+
+/*
+ * vpfe_calculate_offsets : This function calculates buffers offset
+ * for top and bottom field
+ */
+static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
+{
+ struct v4l2_rect image_win;
+
+ vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
+
+ vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
+ vpfe->field_off = image_win.height * image_win.width;
+}
+
+/*
+ * vpfe_queue_setup - Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @fmt: v4l2 format
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_ctxs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer count and buffer size
+ */
+static int vpfe_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+
+ if (fmt && fmt->fmt.pix.sizeimage < vpfe->fmt.fmt.pix.sizeimage)
+ return -EINVAL;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ *nplanes = 1;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : vpfe->fmt.fmt.pix.sizeimage;
+ alloc_ctxs[0] = vpfe->alloc_ctx;
+
+ vpfe_dbg(1, vpfe,
+ "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
+
+ /* Calculate field offset */
+ vpfe_calculate_offsets(vpfe);
+
+ return 0;
+}
+
+/*
+ * vpfe_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpfe_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
+
+ vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
+
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vb->v4l2_buf.field = vpfe->fmt.fmt.pix.field;
+
+ return 0;
+}
+
+/*
+ * vpfe_buffer_queue : Callback function to add buffer to DMA queue
+ * @vb: ptr to vb2_buffer
+ */
+static void vpfe_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_cap_buffer *buf = to_vpfe_buffer(vb);
+ unsigned long flags = 0;
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ list_add_tail(&buf->list, &vpfe->dma_queue);
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
+/*
+ * vpfe_start_streaming : Starts the DMA engine for streaming
+ * @vb: ptr to vb2_buffer
+ * @count: number of buffers
+ */
+static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+ struct vpfe_cap_buffer *buf, *tmp;
+ struct vpfe_subdev_info *sdinfo;
+ unsigned long flags;
+ unsigned long addr;
+ int ret;
+
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+
+ vpfe->field = 0;
+ vpfe->sequence = 0;
+
+ sdinfo = vpfe->current_subdev;
+
+ vpfe_attach_irq(vpfe);
+
+ if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ vpfe_ccdc_config_raw(&vpfe->ccdc);
+ else
+ vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
+
+ /* Get the next frame from the buffer queue */
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ vpfe->cur_frm = vpfe->next_frm;
+ /* Remove buffer from the buffer queue */
+ list_del(&vpfe->cur_frm->list);
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb, 0);
+
+ vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
+
+ vpfe_pcr_enable(&vpfe->ccdc, 1);
+
+ ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
+ if (ret < 0) {
+ vpfe_err(vpfe, "Error in attaching interrupt handle\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
+}
+
+/*
+ * vpfe_stop_streaming : Stop the DMA engine
+ * @vq: ptr to vb2_queue
+ *
+ * This callback stops the DMA engine and any remaining buffers
+ * in the DMA queue are released.
+ */
+static void vpfe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+ struct vpfe_subdev_info *sdinfo;
+ unsigned long flags;
+ int ret;
+
+ vpfe_pcr_enable(&vpfe->ccdc, 0);
+
+ vpfe_detach_irq(vpfe);
+
+ sdinfo = vpfe->current_subdev;
+ ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
+
+ /* release all active buffers */
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ if (vpfe->cur_frm == vpfe->next_frm) {
+ vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (vpfe->cur_frm != NULL)
+ vb2_buffer_done(&vpfe->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (vpfe->next_frm != NULL)
+ vb2_buffer_done(&vpfe->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&vpfe->dma_queue)) {
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ list_del(&vpfe->next_frm->list);
+ vb2_buffer_done(&vpfe->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
+static int vpfe_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *crop)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_cropcap\n");
+
+ if (vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
+ return -EINVAL;
+
+ memset(crop, 0, sizeof(struct v4l2_cropcap));
+
+ crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ crop->defrect.width = vpfe_standards[vpfe->std_index].width;
+ crop->bounds.width = crop->defrect.width;
+ crop->defrect.height = vpfe_standards[vpfe->std_index].height;
+ crop->bounds.height = crop->defrect.height;
+ crop->pixelaspect = vpfe_standards[vpfe->std_index].pixelaspect;
+
+ return 0;
+}
+
+static int
+vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = s->r.top = 0;
+ s->r.width = vpfe->crop.width;
+ s->r.height = vpfe->crop.height;
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ s->r = vpfe->crop;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int
+vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_rect cr = vpfe->crop;
+ struct v4l2_rect r = s->r;
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ v4l_bound_align_image(&r.width, 0, cr.width, 0,
+ &r.height, 0, cr.height, 0, 0);
+
+ r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
+ r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
+
+ if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
+ return -ERANGE;
+
+ s->r = vpfe->crop = r;
+
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
+ vpfe->fmt.fmt.pix.width = r.width;
+ vpfe->fmt.fmt.pix.height = r.height;
+ vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
+ vpfe->fmt.fmt.pix.height;
+
+ vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
+ r.left, r.top, r.width, r.height, cr.width, cr.height);
+
+ return 0;
+}
+
+static long vpfe_ioctl_default(struct file *file, void *priv,
+ bool valid_prio, unsigned int cmd, void *param)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
+
+ if (!valid_prio) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ switch (cmd) {
+ case VIDIOC_AM437X_CCDC_CFG:
+ ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
+ if (ret) {
+ vpfe_dbg(2, vpfe,
+ "Error setting parameters in CCDC\n");
+ return ret;
+ }
+ ret = vpfe_get_ccdc_image_format(vpfe,
+ &vpfe->fmt);
+ if (ret < 0) {
+ vpfe_dbg(2, vpfe,
+ "Invalid image format at CCDC\n");
+ return ret;
+ }
+ break;
+
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct vb2_ops vpfe_video_qops = {
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .queue_setup = vpfe_queue_setup,
+ .buf_prepare = vpfe_buffer_prepare,
+ .buf_queue = vpfe_buffer_queue,
+ .start_streaming = vpfe_start_streaming,
+ .stop_streaming = vpfe_stop_streaming,
+};
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpfe_open,
+ .release = vpfe_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/* vpfe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
+ .vidioc_querycap = vpfe_querycap,
+ .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
+ .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
+ .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
+
+ .vidioc_enum_framesizes = vpfe_enum_size,
+
+ .vidioc_enum_input = vpfe_enum_input,
+ .vidioc_g_input = vpfe_g_input,
+ .vidioc_s_input = vpfe_s_input,
+
+ .vidioc_querystd = vpfe_querystd,
+ .vidioc_s_std = vpfe_s_std,
+ .vidioc_g_std = vpfe_g_std,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_cropcap = vpfe_cropcap,
+ .vidioc_g_selection = vpfe_g_selection,
+ .vidioc_s_selection = vpfe_s_selection,
+
+ .vidioc_default = vpfe_ioctl_default,
+};
+
+static int
+vpfe_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
+ struct vpfe_device, v4l2_dev);
+ struct v4l2_subdev_mbus_code_enum mbus_code;
+ struct vpfe_subdev_info *sdinfo;
+ bool found = false;
+ int i, j;
+
+ vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
+
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ sdinfo = &vpfe->cfg->sub_devs[i];
+
+ if (!strcmp(sdinfo->name, subdev->name)) {
+ vpfe->sd[i] = subdev;
+ vpfe_info(vpfe,
+ "v4l2 sub device %s registered\n",
+ subdev->name);
+ vpfe->sd[i]->grp_id =
+ sdinfo->grp_id;
+ /* update tvnorms from the sub devices */
+ for (j = 0; j < 1; j++)
+ vpfe->video_dev->tvnorms |=
+ sdinfo->inputs[j].std;
+
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
+ return -EINVAL;
+ }
+
+ /* setup the supported formats & indexes */
+ for (j = 0, i = 0; ; ++j) {
+ struct vpfe_fmt *fmt;
+ int ret;
+
+ memset(&mbus_code, 0, sizeof(mbus_code));
+ mbus_code.index = j;
+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code);
+ if (ret)
+ break;
+
+ fmt = find_format_by_code(mbus_code.code);
+ if (!fmt)
+ continue;
+
+ fmt->supported = true;
+ fmt->index = i++;
+ }
+
+ return 0;
+}
+
+static int vpfe_probe_complete(struct vpfe_device *vpfe)
+{
+ struct video_device *vdev;
+ struct vb2_queue *q;
+ int err;
+
+ spin_lock_init(&vpfe->dma_queue_lock);
+ mutex_init(&vpfe->lock);
+
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ /* set first sub device as current one */
+ vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
+ vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
+
+ err = vpfe_set_input(vpfe, 0);
+ if (err)
+ goto probe_out;
+
+ /* Initialize videobuf2 queue as per the buffer type */
+ vpfe->alloc_ctx = vb2_dma_contig_init_ctx(vpfe->pdev);
+ if (IS_ERR(vpfe->alloc_ctx)) {
+ vpfe_err(vpfe, "Failed to get the context\n");
+ err = PTR_ERR(vpfe->alloc_ctx);
+ goto probe_out;
+ }
+
+ q = &vpfe->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = vpfe;
+ q->ops = &vpfe_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &vpfe->lock;
+ q->min_buffers_needed = 1;
+
+ err = vb2_queue_init(q);
+ if (err) {
+ vpfe_err(vpfe, "vb2_queue_init() failed\n");
+ vb2_dma_contig_cleanup_ctx(vpfe->alloc_ctx);
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&vpfe->dma_queue);
+
+ vdev = vpfe->video_dev;
+ strlcpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &vpfe_fops;
+ vdev->ioctl_ops = &vpfe_ioctl_ops;
+ vdev->v4l2_dev = &vpfe->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &vpfe->lock;
+ video_set_drvdata(vdev, vpfe);
+ err = video_register_device(vpfe->video_dev, VFL_TYPE_GRABBER, -1);
+ if (err) {
+ vpfe_err(vpfe,
+ "Unable to register video device.\n");
+ goto probe_out;
+ }
+
+ return 0;
+
+probe_out:
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+ return err;
+}
+
+static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
+ struct vpfe_device, v4l2_dev);
+
+ return vpfe_probe_complete(vpfe);
+}
+
+static struct vpfe_config *
+vpfe_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *endpoint = NULL, *rem = NULL;
+ struct v4l2_of_endpoint bus_cfg;
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_config *pdata;
+ unsigned int flags;
+ unsigned int i;
+ int err;
+
+ dev_dbg(&pdev->dev, "vpfe_get_pdata\n");
+
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+ return pdev->dev.platform_data;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ for (i = 0; ; i++) {
+ endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
+ endpoint);
+ if (!endpoint)
+ break;
+
+ sdinfo = &pdata->sub_devs[i];
+ sdinfo->grp_id = 0;
+
+ /* we only support camera */
+ sdinfo->inputs[0].index = i;
+ strcpy(sdinfo->inputs[0].name, "Camera");
+ sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
+ sdinfo->inputs[0].std = V4L2_STD_ALL;
+ sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
+
+ sdinfo->can_route = 0;
+ sdinfo->routes = NULL;
+
+ of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
+ &sdinfo->vpfe_param.if_type);
+ if (sdinfo->vpfe_param.if_type < 0 ||
+ sdinfo->vpfe_param.if_type > 4) {
+ sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
+ }
+
+ err = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+ if (err) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ goto done;
+ }
+
+ sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
+
+ if (sdinfo->vpfe_param.bus_width < 8 ||
+ sdinfo->vpfe_param.bus_width > 16) {
+ dev_err(&pdev->dev, "Invalid bus width.\n");
+ goto done;
+ }
+
+ flags = bus_cfg.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ sdinfo->vpfe_param.hdpol = 1;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ sdinfo->vpfe_param.vdpol = 1;
+
+ rem = of_graph_get_remote_port_parent(endpoint);
+ if (!rem) {
+ dev_err(&pdev->dev, "Remote device at %s not found\n",
+ endpoint->full_name);
+ goto done;
+ }
+
+ strncpy(sdinfo->name, rem->name, sizeof(sdinfo->name));
+
+ pdata->asd[i] = devm_kzalloc(&pdev->dev,
+ sizeof(struct v4l2_async_subdev),
+ GFP_KERNEL);
+ pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_OF;
+ pdata->asd[i]->match.of.node = rem;
+ of_node_put(endpoint);
+ of_node_put(rem);
+ }
+
+ of_node_put(endpoint);
+ return pdata;
+
+done:
+ of_node_put(endpoint);
+ of_node_put(rem);
+ return NULL;
+}
+
+/*
+ * vpfe_probe : This function creates device entries by register
+ * itself to the V4L2 driver and initializes fields of each
+ * device objects
+ */
+static int vpfe_probe(struct platform_device *pdev)
+{
+ struct vpfe_config *vpfe_cfg = vpfe_get_pdata(pdev);
+ struct vpfe_device *vpfe;
+ struct vpfe_ccdc *ccdc;
+ struct resource *res;
+ int ret;
+
+ if (!vpfe_cfg) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
+ if (!vpfe)
+ return -ENOMEM;
+
+ vpfe->pdev = &pdev->dev;
+ vpfe->cfg = vpfe_cfg;
+ ccdc = &vpfe->ccdc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ccdc->ccdc_cfg.base_addr))
+ return PTR_ERR(ccdc->ccdc_cfg.base_addr);
+
+ vpfe->irq = platform_get_irq(pdev, 0);
+ if (vpfe->irq <= 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
+ "vpfe_capture0", vpfe);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request interrupt\n");
+ return -EINVAL;
+ }
+
+ vpfe->video_dev = video_device_alloc();
+ if (!vpfe->video_dev) {
+ dev_err(&pdev->dev, "Unable to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
+ if (ret) {
+ vpfe_err(vpfe,
+ "Unable to register v4l2 device.\n");
+ goto probe_out_video_release;
+ }
+
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpfe);
+ /* Enabling module functional clock */
+ pm_runtime_enable(&pdev->dev);
+
+ /* for now just enable it here instead of waiting for the open */
+ pm_runtime_get_sync(&pdev->dev);
+
+ vpfe_ccdc_config_defaults(ccdc);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ vpfe->sd = devm_kzalloc(&pdev->dev, sizeof(struct v4l2_subdev *) *
+ ARRAY_SIZE(vpfe->cfg->asd), GFP_KERNEL);
+ if (!vpfe->sd) {
+ ret = -ENOMEM;
+ goto probe_out_v4l2_unregister;
+ }
+
+ vpfe->notifier.subdevs = vpfe->cfg->asd;
+ vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
+ vpfe->notifier.bound = vpfe_async_bound;
+ vpfe->notifier.complete = vpfe_async_complete;
+ ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
+ &vpfe->notifier);
+ if (ret) {
+ vpfe_err(vpfe, "Error registering async notifier\n");
+ ret = -EINVAL;
+ goto probe_out_v4l2_unregister;
+ }
+
+ return 0;
+
+probe_out_v4l2_unregister:
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+probe_out_video_release:
+ if (!video_is_registered(vpfe->video_dev))
+ video_device_release(vpfe->video_dev);
+ return ret;
+}
+
+/*
+ * vpfe_remove : It un-register device from V4L2 driver
+ */
+static int vpfe_remove(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+
+ vpfe_dbg(2, vpfe, "vpfe_remove\n");
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_notifier_unregister(&vpfe->notifier);
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+ video_unregister_device(vpfe->video_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static void vpfe_save_context(struct vpfe_ccdc *ccdc)
+{
+ ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
+ ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
+ ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
+ ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
+ ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
+ ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
+ ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
+ ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
+ ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
+ ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
+ ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
+ ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
+ ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
+ ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HD_VD_WID);
+ ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
+ VPFE_PIX_LINES);
+ ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HORZ_INFO);
+ ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
+ VPFE_VERT_START);
+ ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
+ VPFE_VERT_LINES);
+ ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HSIZE_OFF);
+}
+
+static int vpfe_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+ struct vpfe_ccdc *ccdc = &vpfe->ccdc;
+
+ /* if streaming has not started we don't care */
+ if (!vb2_start_streaming_called(&vpfe->buffer_queue))
+ return 0;
+
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
+
+ /* Save VPFE context */
+ vpfe_save_context(ccdc);
+
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+ vpfe_config_enable(ccdc, 0);
+
+ /* Disable both master and slave clock */
+ pm_runtime_put_sync(dev);
+
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
+{
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
+ VPFE_HD_VD_WID);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
+ VPFE_PIX_LINES);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
+ VPFE_HORZ_INFO);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
+ VPFE_VERT_START);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
+ VPFE_VERT_LINES);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
+ VPFE_HSIZE_OFF);
+}
+
+static int vpfe_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+ struct vpfe_ccdc *ccdc = &vpfe->ccdc;
+
+ /* if streaming has not started we don't care */
+ if (!vb2_start_streaming_called(&vpfe->buffer_queue))
+ return 0;
+
+ /* Enable both master and slave clock */
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
+
+ /* Restore VPFE context */
+ vpfe_restore_context(ccdc);
+
+ vpfe_config_enable(ccdc, 0);
+ pm_runtime_put_sync(dev);
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
+
+static const struct of_device_id vpfe_of_match[] = {
+ { .compatible = "ti,am437x-vpfe", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, vpfe_of_match);
+
+static struct platform_driver vpfe_driver = {
+ .probe = vpfe_probe,
+ .remove = vpfe_remove,
+ .driver = {
+ .name = VPFE_MODULE_NAME,
+ .pm = &vpfe_pm_ops,
+ .of_match_table = of_match_ptr(vpfe_of_match),
+ },
+};
+
+module_platform_driver(vpfe_driver);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("TI AM437x VPFE driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPFE_VERSION);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
new file mode 100644
index 000000000000..0f557352313d
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef AM437X_VPFE_H
+#define AM437X_VPFE_H
+
+#include <linux/am437x-vpfe.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "am437x-vpfe_regs.h"
+
+enum vpfe_pin_pol {
+ VPFE_PINPOL_POSITIVE = 0,
+ VPFE_PINPOL_NEGATIVE,
+};
+
+enum vpfe_hw_if_type {
+ /* Raw Bayer */
+ VPFE_RAW_BAYER = 0,
+ /* BT656 - 8 bit */
+ VPFE_BT656,
+ /* BT656 - 10 bit */
+ VPFE_BT656_10BIT,
+ /* YCbCr - 8 bit with external sync */
+ VPFE_YCBCR_SYNC_8,
+ /* YCbCr - 16 bit with external sync */
+ VPFE_YCBCR_SYNC_16,
+};
+
+/* interface description */
+struct vpfe_hw_if_param {
+ enum vpfe_hw_if_type if_type;
+ enum vpfe_pin_pol hdpol;
+ enum vpfe_pin_pol vdpol;
+ unsigned int bus_width;
+};
+
+#define VPFE_MAX_SUBDEV 1
+#define VPFE_MAX_INPUTS 1
+
+struct vpfe_pixel_format {
+ struct v4l2_fmtdesc fmtdesc;
+ /* bytes per pixel */
+ int bpp;
+};
+
+struct vpfe_std_info {
+ int active_pixels;
+ int active_lines;
+ /* current frame format */
+ int frame_format;
+};
+
+struct vpfe_route {
+ u32 input;
+ u32 output;
+};
+
+struct vpfe_subdev_info {
+ char name[32];
+ /* Sub device group id */
+ int grp_id;
+ /* inputs available at the sub device */
+ struct v4l2_input inputs[VPFE_MAX_INPUTS];
+ /* Sub dev routing information for each input */
+ struct vpfe_route *routes;
+ /* check if sub dev supports routing */
+ int can_route;
+ /* ccdc bus/interface configuration */
+ struct vpfe_hw_if_param vpfe_param;
+ struct v4l2_subdev *sd;
+};
+
+struct vpfe_config {
+ /* information about each subdev */
+ struct vpfe_subdev_info sub_devs[VPFE_MAX_SUBDEV];
+ /* Flat array, arranged in groups */
+ struct v4l2_async_subdev *asd[VPFE_MAX_SUBDEV];
+};
+
+struct vpfe_cap_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+enum ccdc_pixfmt {
+ CCDC_PIXFMT_RAW = 0,
+ CCDC_PIXFMT_YCBCR_16BIT,
+ CCDC_PIXFMT_YCBCR_8BIT,
+};
+
+enum ccdc_frmfmt {
+ CCDC_FRMFMT_PROGRESSIVE = 0,
+ CCDC_FRMFMT_INTERLACED,
+};
+
+/* PIXEL ORDER IN MEMORY from LSB to MSB */
+/* only applicable for 8-bit input mode */
+enum ccdc_pixorder {
+ CCDC_PIXORDER_YCBYCR,
+ CCDC_PIXORDER_CBYCRY,
+};
+
+enum ccdc_buftype {
+ CCDC_BUFTYPE_FLD_INTERLEAVED,
+ CCDC_BUFTYPE_FLD_SEPARATED
+};
+
+
+/* returns the highest bit used for the gamma */
+static inline u8 ccdc_gamma_width_max_bit(enum vpfe_ccdc_gamma_width width)
+{
+ return 15 - width;
+}
+
+/* returns the highest bit used for this data size */
+static inline u8 ccdc_data_size_max_bit(enum vpfe_ccdc_data_size sz)
+{
+ return sz == VPFE_CCDC_DATA_8BITS ? 7 : 15 - sz;
+}
+
+/* Structure for CCDC configuration parameters for raw capture mode */
+struct ccdc_params_raw {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ struct v4l2_rect win;
+ /* Current Format Bytes Per Pixels */
+ unsigned int bytesperpixel;
+ /* Current Format Bytes per Lines
+ * (Aligned to 32 bytes) used for HORZ_INFO
+ */
+ unsigned int bytesperline;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+ /*
+ * enable to store the image in inverse
+ * order in memory(bottom to top)
+ */
+ unsigned char image_invert_enable;
+ /* configurable parameters */
+ struct vpfe_ccdc_config_params_raw config_params;
+};
+
+struct ccdc_params_ycbcr {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ struct v4l2_rect win;
+ /* Current Format Bytes Per Pixels */
+ unsigned int bytesperpixel;
+ /* Current Format Bytes per Lines
+ * (Aligned to 32 bytes) used for HORZ_INFO
+ */
+ unsigned int bytesperline;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* enable BT.656 embedded sync mode */
+ int bt656_enable;
+ /* cb:y:cr:y or y:cb:y:cr in memory */
+ enum ccdc_pixorder pix_order;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+};
+
+/*
+ * CCDC operational configuration
+ */
+struct ccdc_config {
+ /* CCDC interface type */
+ enum vpfe_hw_if_type if_type;
+ /* Raw Bayer configuration */
+ struct ccdc_params_raw bayer;
+ /* YCbCr configuration */
+ struct ccdc_params_ycbcr ycbcr;
+ /* ccdc base address */
+ void __iomem *base_addr;
+};
+
+struct vpfe_ccdc {
+ struct ccdc_config ccdc_cfg;
+ u32 ccdc_ctx[VPFE_REG_END / sizeof(u32)];
+};
+
+struct vpfe_device {
+ /* V4l2 specific parameters */
+ /* Identifies video device for this channel */
+ struct video_device *video_dev;
+ /* sub devices */
+ struct v4l2_subdev **sd;
+ /* vpfe cfg */
+ struct vpfe_config *cfg;
+ /* V4l2 device */
+ struct v4l2_device v4l2_dev;
+ /* parent device */
+ struct device *pdev;
+ /* subdevice async Notifier */
+ struct v4l2_async_notifier notifier;
+ /* Indicates id of the field which is being displayed */
+ unsigned field;
+ unsigned sequence;
+ /* current interface type */
+ struct vpfe_hw_if_param vpfe_if_params;
+ /* ptr to currently selected sub device */
+ struct vpfe_subdev_info *current_subdev;
+ /* current input at the sub device */
+ int current_input;
+ /* Keeps track of the information about the standard */
+ struct vpfe_std_info std_info;
+ /* std index into std table */
+ int std_index;
+ /* IRQs used when CCDC output to SDRAM */
+ unsigned int irq;
+ /* Pointer pointing to current v4l2_buffer */
+ struct vpfe_cap_buffer *cur_frm;
+ /* Pointer pointing to next v4l2_buffer */
+ struct vpfe_cap_buffer *next_frm;
+ /* Used to store pixel format */
+ struct v4l2_format fmt;
+ /* Used to store current bytes per pixel based on current format */
+ unsigned int bpp;
+ /*
+ * used when IMP is chained to store the crop window which
+ * is different from the image window
+ */
+ struct v4l2_rect crop;
+ /* Buffer queue used in video-buf */
+ struct vb2_queue buffer_queue;
+ /* Allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
+ /* Queue of filled frames */
+ struct list_head dma_queue;
+ /* IRQ lock for DMA queue */
+ spinlock_t dma_queue_lock;
+ /* lock used to access this structure */
+ struct mutex lock;
+ /*
+ * offset where second field starts from the starting of the
+ * buffer for field separated YCbCr formats
+ */
+ u32 field_off;
+ struct vpfe_ccdc ccdc;
+};
+
+#endif /* AM437X_VPFE_H */
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/am437x/am437x-vpfe_regs.h
new file mode 100644
index 000000000000..4a0ed29723e8
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe_regs.h
@@ -0,0 +1,140 @@
+/*
+ * TI AM437x Image Sensor Interface Registers
+ *
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef AM437X_VPFE_REGS_H
+#define AM437X_VPFE_REGS_H
+
+/* VPFE module register offset */
+#define VPFE_REVISION 0x0
+#define VPFE_PCR 0x4
+#define VPFE_SYNMODE 0x8
+#define VPFE_HD_VD_WID 0xc
+#define VPFE_PIX_LINES 0x10
+#define VPFE_HORZ_INFO 0x14
+#define VPFE_VERT_START 0x18
+#define VPFE_VERT_LINES 0x1c
+#define VPFE_CULLING 0x20
+#define VPFE_HSIZE_OFF 0x24
+#define VPFE_SDOFST 0x28
+#define VPFE_SDR_ADDR 0x2c
+#define VPFE_CLAMP 0x30
+#define VPFE_DCSUB 0x34
+#define VPFE_COLPTN 0x38
+#define VPFE_BLKCMP 0x3c
+#define VPFE_VDINT 0x48
+#define VPFE_ALAW 0x4c
+#define VPFE_REC656IF 0x50
+#define VPFE_CCDCFG 0x54
+#define VPFE_DMA_CNTL 0x98
+#define VPFE_SYSCONFIG 0x104
+#define VPFE_CONFIG 0x108
+#define VPFE_IRQ_EOI 0x110
+#define VPFE_IRQ_STS_RAW 0x114
+#define VPFE_IRQ_STS 0x118
+#define VPFE_IRQ_EN_SET 0x11c
+#define VPFE_IRQ_EN_CLR 0x120
+#define VPFE_REG_END 0x124
+
+/* Define bit fields within selected registers */
+#define VPFE_FID_POL_MASK 1
+#define VPFE_FID_POL_SHIFT 4
+#define VPFE_HD_POL_MASK 1
+#define VPFE_HD_POL_SHIFT 3
+#define VPFE_VD_POL_MASK 1
+#define VPFE_VD_POL_SHIFT 2
+#define VPFE_HSIZE_OFF_MASK 0xffffffe0
+#define VPFE_32BYTE_ALIGN_VAL 31
+#define VPFE_FRM_FMT_MASK 0x1
+#define VPFE_FRM_FMT_SHIFT 7
+#define VPFE_DATA_SZ_MASK 7
+#define VPFE_DATA_SZ_SHIFT 8
+#define VPFE_PIX_FMT_MASK 3
+#define VPFE_PIX_FMT_SHIFT 12
+#define VPFE_VP2SDR_DISABLE 0xfffbffff
+#define VPFE_WEN_ENABLE (1 << 17)
+#define VPFE_SDR2RSZ_DISABLE 0xfff7ffff
+#define VPFE_VDHDEN_ENABLE (1 << 16)
+#define VPFE_LPF_ENABLE (1 << 14)
+#define VPFE_ALAW_ENABLE (1 << 3)
+#define VPFE_ALAW_GAMMA_WD_MASK 7
+#define VPFE_BLK_CLAMP_ENABLE (1 << 31)
+#define VPFE_BLK_SGAIN_MASK 0x1f
+#define VPFE_BLK_ST_PXL_MASK 0x7fff
+#define VPFE_BLK_ST_PXL_SHIFT 10
+#define VPFE_BLK_SAMPLE_LN_MASK 7
+#define VPFE_BLK_SAMPLE_LN_SHIFT 28
+#define VPFE_BLK_SAMPLE_LINE_MASK 7
+#define VPFE_BLK_SAMPLE_LINE_SHIFT 25
+#define VPFE_BLK_DC_SUB_MASK 0x03fff
+#define VPFE_BLK_COMP_MASK 0xff
+#define VPFE_BLK_COMP_GB_COMP_SHIFT 8
+#define VPFE_BLK_COMP_GR_COMP_SHIFT 16
+#define VPFE_BLK_COMP_R_COMP_SHIFT 24
+#define VPFE_LATCH_ON_VSYNC_DISABLE (1 << 15)
+#define VPFE_DATA_PACK_ENABLE (1 << 11)
+#define VPFE_HORZ_INFO_SPH_SHIFT 16
+#define VPFE_VERT_START_SLV0_SHIFT 16
+#define VPFE_VDINT_VDINT0_SHIFT 16
+#define VPFE_VDINT_VDINT1_MASK 0xffff
+#define VPFE_PPC_RAW 1
+#define VPFE_DCSUB_DEFAULT_VAL 0
+#define VPFE_CLAMP_DEFAULT_VAL 0
+#define VPFE_COLPTN_VAL 0xbb11bb11
+#define VPFE_TWO_BYTES_PER_PIXEL 2
+#define VPFE_INTERLACED_IMAGE_INVERT 0x4b6d
+#define VPFE_INTERLACED_NO_IMAGE_INVERT 0x0249
+#define VPFE_PROGRESSIVE_IMAGE_INVERT 0x4000
+#define VPFE_PROGRESSIVE_NO_IMAGE_INVERT 0
+#define VPFE_INTERLACED_HEIGHT_SHIFT 1
+#define VPFE_SYN_MODE_INPMOD_SHIFT 12
+#define VPFE_SYN_MODE_INPMOD_MASK 3
+#define VPFE_SYN_MODE_8BITS (7 << 8)
+#define VPFE_SYN_MODE_10BITS (6 << 8)
+#define VPFE_SYN_MODE_11BITS (5 << 8)
+#define VPFE_SYN_MODE_12BITS (4 << 8)
+#define VPFE_SYN_MODE_13BITS (3 << 8)
+#define VPFE_SYN_MODE_14BITS (2 << 8)
+#define VPFE_SYN_MODE_15BITS (1 << 8)
+#define VPFE_SYN_MODE_16BITS (0 << 8)
+#define VPFE_SYN_FLDMODE_MASK 1
+#define VPFE_SYN_FLDMODE_SHIFT 7
+#define VPFE_REC656IF_BT656_EN 3
+#define VPFE_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
+#define VPFE_CCDCFG_Y8POS_SHIFT 11
+#define VPFE_CCDCFG_BW656_10BIT (1 << 5)
+#define VPFE_SDOFST_FIELD_INTERLEAVED 0x249
+#define VPFE_NO_CULLING 0xffff00ff
+#define VPFE_VDINT0 (1 << 0)
+#define VPFE_VDINT1 (1 << 1)
+#define VPFE_VDINT2 (1 << 2)
+#define VPFE_DMA_CNTL_OVERFLOW (1 << 31)
+
+#define VPFE_CONFIG_PCLK_INV_SHIFT 0
+#define VPFE_CONFIG_PCLK_INV_MASK 1
+#define VPFE_CONFIG_PCLK_INV_NOT_INV 0
+#define VPFE_CONFIG_PCLK_INV_INV 1
+#define VPFE_CONFIG_EN_SHIFT 1
+#define VPFE_CONFIG_EN_MASK 2
+#define VPFE_CONFIG_EN_DISABLE 0
+#define VPFE_CONFIG_EN_ENABLE 1
+#define VPFE_CONFIG_ST_SHIFT 2
+#define VPFE_CONFIG_ST_MASK 4
+#define VPFE_CONFIG_ST_OCP_ACTIVE 0
+#define VPFE_CONFIG_ST_OCP_STANDBY 1
+
+#endif /* AM437X_VPFE_REGS_H */
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index b4029ae293d3..856b542b35b9 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -718,6 +718,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
struct vb2_buffer *buf;
int gamma, ret, value;
u32 dst_fourcc;
+ int num_fb;
u32 stride;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
@@ -983,12 +984,14 @@ static int coda_start_encoding(struct coda_ctx *ctx)
v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
goto out;
}
+ num_fb = 2;
stride = q_data_src->bytesperline;
} else {
ctx->num_internal_frames = 0;
+ num_fb = 0;
stride = 0;
}
- coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, num_fb, CODA_CMD_SET_FRAME_BUF_NUM);
coda_write(dev, stride, CODA_CMD_SET_FRAME_BUF_STRIDE);
if (dev->devtype->product == CODA_7541) {
@@ -1316,8 +1319,10 @@ static void coda_seq_end_work(struct work_struct *work)
static void coda_bit_release(struct coda_ctx *ctx)
{
+ mutex_lock(&ctx->buffer_mutex);
coda_free_framebuffers(ctx);
coda_free_context_buffers(ctx);
+ mutex_unlock(&ctx->buffer_mutex);
}
const struct coda_context_ops coda_bit_encode_ops = {
@@ -1431,9 +1436,10 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
height = val & CODA7_PICHEIGHT_MASK;
}
- if (width > q_data_dst->width || height > q_data_dst->height) {
+ if (width > q_data_dst->bytesperline || height > q_data_dst->height) {
v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
- width, height, q_data_dst->width, q_data_dst->height);
+ width, height, q_data_dst->bytesperline,
+ q_data_dst->height);
return -EINVAL;
}
@@ -1565,6 +1571,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
struct vb2_buffer *dst_buf;
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_dst;
+ struct coda_buffer_meta *meta;
u32 reg_addr, reg_stride;
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1643,12 +1650,12 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
coda_write(dev, ctx->iram_info.axi_sram_use,
CODA7_REG_BIT_AXI_SRAM_USE);
- if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
- struct coda_buffer_meta *meta;
+ meta = list_first_entry_or_null(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+
+ if (meta && ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
/* If this is the last buffer in the bitstream, add padding */
- meta = list_first_entry(&ctx->buffer_meta_list,
- struct coda_buffer_meta, list);
if (meta->end == (ctx->bitstream_fifo.kfifo.in &
ctx->bitstream_fifo.kfifo.mask)) {
static unsigned char buf[512];
@@ -1665,6 +1672,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
coda_kfifo_sync_to_device_full(ctx);
+ /* Clear decode success flag */
+ coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
+
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
return 0;
@@ -1821,6 +1831,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
memset(&ctx->frame_metas[decoded_idx], 0,
sizeof(struct coda_buffer_meta));
ctx->frame_metas[decoded_idx].sequence = val;
+ ctx->sequence_offset++;
}
mutex_unlock(&ctx->bitstream_mutex);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 39330a70f752..6f32e6d6b156 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -37,6 +37,7 @@
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
#include "coda.h"
@@ -180,6 +181,7 @@ struct coda_video_device {
const char *name;
enum coda_inst_type type;
const struct coda_context_ops *ops;
+ bool direct;
u32 src_formats[CODA_MAX_FORMATS];
u32 dst_formats[CODA_MAX_FORMATS];
};
@@ -468,6 +470,18 @@ static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f)
return 0;
}
+static unsigned int coda_estimate_sizeimage(struct coda_ctx *ctx, u32 sizeimage,
+ u32 width, u32 height)
+{
+ /*
+ * This is a rough estimate for sensible compressed buffer
+ * sizes (between 1 and 16 bits per pixel). This could be
+ * improved by better format specific worst case estimates.
+ */
+ return round_up(clamp(sizeimage, width * height / 8,
+ width * height * 2), PAGE_SIZE);
+}
+
static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
struct v4l2_format *f)
{
@@ -513,15 +527,10 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_MPEG4:
f->fmt.pix.bytesperline = 0;
- /*
- * This is a rough estimate for sensible compressed buffer
- * sizes (between 1 and 16 bits per pixel). This could be
- * improved by better format specific worst case estimates.
- */
- f->fmt.pix.sizeimage = round_up(clamp(f->fmt.pix.sizeimage,
- f->fmt.pix.width * f->fmt.pix.height / 8,
- f->fmt.pix.width * f->fmt.pix.height * 2),
- PAGE_SIZE);
+ f->fmt.pix.sizeimage = coda_estimate_sizeimage(ctx,
+ f->fmt.pix.sizeimage,
+ f->fmt.pix.width,
+ f->fmt.pix.height);
break;
default:
BUG();
@@ -592,7 +601,11 @@ static int coda_try_fmt_vid_out(struct file *file, void *priv,
if (ret < 0)
return ret;
- if (!f->fmt.pix.colorspace) {
+ switch (f->fmt.pix.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_JPEG:
+ break;
+ default:
if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
else
@@ -670,6 +683,7 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
ctx->colorspace = f->fmt.pix.colorspace;
+ memset(&f_cap, 0, sizeof(f_cap));
f_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
coda_g_fmt(file, priv, &f_cap);
f_cap.fmt.pix.width = f->fmt.pix.width;
@@ -908,7 +922,8 @@ static void coda_pic_run_work(struct work_struct *work)
ctx->ops->finish_run(ctx);
}
- if (ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out))
+ if ((ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) &&
+ ctx->ops->seq_end_work)
queue_work(dev->workqueue, &ctx->seq_end_work);
mutex_unlock(&dev->coda_mutex);
@@ -939,15 +954,43 @@ static int coda_job_ready(void *m2m_priv)
return 0;
}
- if (ctx->hold ||
- ((ctx->inst_type == CODA_INST_DECODER) &&
- !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
- (coda_get_bitstream_payload(ctx) < 512) &&
- !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "%d: not ready: not enough bitstream data.\n",
- ctx->idx);
- return 0;
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
+ struct list_head *meta;
+ bool stream_end;
+ int num_metas;
+ int src_bufs;
+
+ if (ctx->hold && !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: on hold for more buffers.\n",
+ ctx->idx);
+ return 0;
+ }
+
+ stream_end = ctx->bit_stream_param &
+ CODA_BIT_STREAM_END_FLAG;
+
+ num_metas = 0;
+ list_for_each(meta, &ctx->buffer_meta_list)
+ num_metas++;
+
+ src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
+
+ if (!stream_end && (num_metas + src_bufs) < 2) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: need 2 buffers available (%d, %d)\n",
+ ctx->idx, num_metas, src_bufs);
+ return 0;
+ }
+
+
+ if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
+ !stream_end && (coda_get_bitstream_payload(ctx) < 512)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: not enough bitstream data (%d).\n",
+ ctx->idx, coda_get_bitstream_payload(ctx));
+ return 0;
+ }
}
if (ctx->aborting) {
@@ -1023,13 +1066,14 @@ static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type)
static void set_default_params(struct coda_ctx *ctx)
{
- unsigned int max_w, max_h, size;
+ unsigned int max_w, max_h, usize, csize;
ctx->codec = coda_find_codec(ctx->dev, ctx->cvd->src_formats[0],
ctx->cvd->dst_formats[0]);
max_w = min(ctx->codec->max_w, 1920U);
max_h = min(ctx->codec->max_h, 1088U);
- size = max_w * max_h * 3 / 2;
+ usize = max_w * max_h * 3 / 2;
+ csize = coda_estimate_sizeimage(ctx, usize, max_w, max_h);
ctx->params.codec_mode = ctx->codec->mode;
ctx->colorspace = V4L2_COLORSPACE_REC709;
@@ -1044,14 +1088,14 @@ static void set_default_params(struct coda_ctx *ctx)
ctx->q_data[V4L2_M2M_DST].height = max_h;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) {
ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w;
- ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = usize;
ctx->q_data[V4L2_M2M_DST].bytesperline = 0;
- ctx->q_data[V4L2_M2M_DST].sizeimage = round_up(size, PAGE_SIZE);
+ ctx->q_data[V4L2_M2M_DST].sizeimage = csize;
} else {
ctx->q_data[V4L2_M2M_SRC].bytesperline = 0;
- ctx->q_data[V4L2_M2M_SRC].sizeimage = round_up(size, PAGE_SIZE);
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = csize;
ctx->q_data[V4L2_M2M_DST].bytesperline = max_w;
- ctx->q_data[V4L2_M2M_DST].sizeimage = size;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = usize;
}
ctx->q_data[V4L2_M2M_SRC].rect.width = max_w;
ctx->q_data[V4L2_M2M_SRC].rect.height = max_h;
@@ -1080,6 +1124,7 @@ static int coda_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
+ /* Set to vb2-dma-contig allocator context, ignored by vb2-vmalloc */
alloc_ctxs[0] = ctx->dev->alloc_ctx;
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
@@ -1109,6 +1154,7 @@ static int coda_buf_prepare(struct vb2_buffer *vb)
static void coda_buf_queue(struct vb2_buffer *vb)
{
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *vq = vb->vb2_queue;
struct coda_q_data *q_data;
q_data = get_q_data(ctx, vb->vb2_queue->type);
@@ -1117,8 +1163,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
* In the decoder case, immediately try to copy the buffer into the
* bitstream ringbuffer and mark it as ready to be dequeued.
*/
- if (ctx->inst_type == CODA_INST_DECODER &&
- vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (ctx->bitstream.size && vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/*
* For backwards compatibility, queuing an empty buffer marks
* the stream end
@@ -1218,7 +1263,7 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
/* Allow BIT decoder device_run with no new buffers queued */
- if (ctx->inst_type == CODA_INST_DECODER)
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
ctx->gopcounter = ctx->params.gop_size - 1;
@@ -1271,7 +1316,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
coda_bit_stream_end_flag(ctx);
- ctx->isequence = 0;
+ ctx->qsequence = 0;
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
@@ -1290,6 +1335,10 @@ static void coda_stop_streaming(struct vb2_queue *q)
if (!ctx->streamon_out && !ctx->streamon_cap) {
struct coda_buffer_meta *meta;
+ if (ctx->ops->seq_end_work) {
+ queue_work(dev->workqueue, &ctx->seq_end_work);
+ flush_work(&ctx->seq_end_work);
+ }
mutex_lock(&ctx->bitstream_mutex);
while (!list_empty(&ctx->buffer_meta_list)) {
meta = list_first_entry(&ctx->buffer_meta_list,
@@ -1300,6 +1349,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
mutex_unlock(&ctx->bitstream_mutex);
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
+ ctx->initialized = 0;
ctx->runcounter = 0;
ctx->aborting = 0;
}
@@ -1521,8 +1571,8 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
- src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
ret = coda_queue_init(priv, src_vq);
if (ret)
@@ -1577,9 +1627,11 @@ static int coda_open(struct file *file)
ctx->cvd = to_coda_video_device(vdev);
ctx->inst_type = ctx->cvd->type;
ctx->ops = ctx->cvd->ops;
+ ctx->use_bit = !ctx->cvd->direct;
init_completion(&ctx->completion);
INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
- INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
+ if (ctx->ops->seq_end_work)
+ INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
@@ -1630,22 +1682,25 @@ static int coda_open(struct file *file)
ctx->fh.ctrl_handler = &ctx->ctrls;
- ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
- CODA_PARA_BUF_SIZE, "parabuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
- goto err_dma_alloc;
+ if (ctx->use_bit) {
+ ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
+ CODA_PARA_BUF_SIZE, "parabuf");
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
+ goto err_dma_alloc;
+ }
}
-
- ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
- ctx->bitstream.vaddr = dma_alloc_writecombine(
- &dev->plat_dev->dev, ctx->bitstream.size,
- &ctx->bitstream.paddr, GFP_KERNEL);
- if (!ctx->bitstream.vaddr) {
- v4l2_err(&dev->v4l2_dev,
- "failed to allocate bitstream ringbuffer");
- ret = -ENOMEM;
- goto err_dma_writecombine;
+ if (ctx->use_bit && ctx->inst_type == CODA_INST_DECODER) {
+ ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
+ ctx->bitstream.vaddr = dma_alloc_writecombine(
+ &dev->plat_dev->dev, ctx->bitstream.size,
+ &ctx->bitstream.paddr, GFP_KERNEL);
+ if (!ctx->bitstream.vaddr) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate bitstream ringbuffer");
+ ret = -ENOMEM;
+ goto err_dma_writecombine;
+ }
}
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
@@ -1693,16 +1748,14 @@ static int coda_release(struct file *file)
v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
ctx);
- debugfs_remove_recursive(ctx->debugfs_entry);
-
- if (ctx->inst_type == CODA_INST_DECODER)
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
coda_bit_stream_end_flag(ctx);
/* If this instance is running, call .job_abort and wait for it to end */
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
/* In case the instance was not running, we still need to call SEQ_END */
- if (ctx->initialized) {
+ if (ctx->initialized && ctx->ops->seq_end_work) {
queue_work(dev->workqueue, &ctx->seq_end_work);
flush_work(&ctx->seq_end_work);
}
@@ -1728,6 +1781,7 @@ static int coda_release(struct file *file)
clear_bit(ctx->idx, &dev->instance_mask);
if (ctx->ops->release)
ctx->ops->release(ctx);
+ debugfs_remove_recursive(ctx->debugfs_entry);
kfree(ctx);
return 0;
@@ -1844,10 +1898,11 @@ static int coda_register_device(struct coda_dev *dev, int i)
{
struct video_device *vfd = &dev->vfd[i];
- if (i > ARRAY_SIZE(dev->vfd))
+ if (i >= dev->devtype->num_vdevs)
return -EINVAL;
- snprintf(vfd->name, sizeof(vfd->name), dev->devtype->vdevs[i]->name);
+ snprintf(vfd->name, sizeof(vfd->name), "%s",
+ dev->devtype->vdevs[i]->name);
vfd->fops = &coda_fops;
vfd->ioctl_ops = &coda_ioctl_ops;
vfd->release = video_device_release_empty,
@@ -2001,7 +2056,6 @@ static const struct coda_devtype coda_devdata[] = {
static struct platform_device_id coda_platform_ids[] = {
{ .name = "coda-imx27", .driver_data = CODA_IMX27 },
- { .name = "coda-imx53", .driver_data = CODA_IMX53 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, coda_platform_ids);
@@ -2142,6 +2196,7 @@ static int coda_probe(struct platform_device *pdev)
if (!dev->iram.vaddr) {
dev_warn(&pdev->dev, "unable to alloc iram\n");
} else {
+ memset(dev->iram.vaddr, 0, dev->iram.size);
dev->iram.blob.data = dev->iram.vaddr;
dev->iram.blob.size = dev->iram.size;
dev->iram.dentry = debugfs_create_blob("iram", 0644,
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 5dd47e5f97c1..0c35cd5032ff 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -198,7 +198,6 @@ struct coda_ctx {
int initialized;
int streamon_out;
int streamon_cap;
- u32 isequence;
u32 qsequence;
u32 osequence;
u32 sequence_offset;
@@ -236,6 +235,7 @@ struct coda_ctx {
u32 frame_mem_ctrl;
int display_idx;
struct dentry *debugfs_entry;
+ bool use_bit;
};
extern int coda_debug;
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 8e015b8aa8fa..7d026241171b 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -304,9 +304,9 @@
#define CODA_RATECONTROL_AUTOSKIP_OFFSET 31
#define CODA_RATECONTROL_AUTOSKIP_MASK 0x01
#define CODA_RATECONTROL_INITIALDELAY_OFFSET 16
-#define CODA_RATECONTROL_INITIALDELAY_MASK 0x7f
+#define CODA_RATECONTROL_INITIALDELAY_MASK 0x7fff
#define CODA_RATECONTROL_BITRATE_OFFSET 1
-#define CODA_RATECONTROL_BITRATE_MASK 0x7f
+#define CODA_RATECONTROL_BITRATE_MASK 0x7fff
#define CODA_RATECONTROL_ENABLE_OFFSET 0
#define CODA_RATECONTROL_ENABLE_MASK 0x01
#define CODA_CMD_ENC_SEQ_RC_BUF_SIZE 0x1b0
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index d9e1ddb586b1..469e9d28cec0 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_DAVINCI_VPIF_DISPLAY
tristate "TI DaVinci VPIF V4L2-Display driver"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
depends on ARCH_DAVINCI || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
@@ -16,7 +16,7 @@ config VIDEO_DAVINCI_VPIF_DISPLAY
config VIDEO_DAVINCI_VPIF_CAPTURE
tristate "TI DaVinci VPIF video capture driver"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
depends on ARCH_DAVINCI || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
@@ -75,7 +75,7 @@ config VIDEO_DM365_ISIF
config VIDEO_DAVINCI_VPBE_DISPLAY
tristate "TI DaVinci VPBE V4L2-Display driver"
- depends on ARCH_DAVINCI
+ depends on VIDEO_V4L2 && ARCH_DAVINCI
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
help
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 0abdb17fb19c..fa572aacdb3f 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -466,18 +466,6 @@ static inline void gsc_hw_clear_irq(struct gsc_dev *dev, int irq)
writel(cfg, dev->regs + GSC_IRQ);
}
-static inline void gsc_lock(struct vb2_queue *vq)
-{
- struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_lock(&ctx->gsc_dev->lock);
-}
-
-static inline void gsc_unlock(struct vb2_queue *vq)
-{
- struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_unlock(&ctx->gsc_dev->lock);
-}
-
static inline bool gsc_ctx_state_is_set(u32 mask, struct gsc_ctx *ctx)
{
unsigned long flags;
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 74e1de637e8f..d5cffef2e227 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -267,8 +267,8 @@ static struct vb2_ops gsc_m2m_qops = {
.queue_setup = gsc_m2m_queue_setup,
.buf_prepare = gsc_m2m_buf_prepare,
.buf_queue = gsc_m2m_buf_queue,
- .wait_prepare = gsc_unlock,
- .wait_finish = gsc_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.stop_streaming = gsc_m2m_stop_streaming,
.start_streaming = gsc_m2m_start_streaming,
};
@@ -590,6 +590,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->gsc_dev->lock;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -603,6 +604,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->gsc_dev->lock;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell-ccic/Kconfig
index 6265d36adceb..4bf5bd1e90d6 100644
--- a/drivers/media/platform/marvell-ccic/Kconfig
+++ b/drivers/media/platform/marvell-ccic/Kconfig
@@ -5,6 +5,7 @@ config VIDEO_CAFE_CCIC
select VIDEO_OV7670
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
---help---
This is a video4linux2 driver for the Marvell 88ALP01 integrated
CMOS camera controller. This is the controller found on first-
@@ -13,7 +14,7 @@ config VIDEO_CAFE_CCIC
config VIDEO_MMP_CAMERA
tristate "Marvell Armada 610 integrated camera controller support"
depends on ARCH_MMP && I2C && VIDEO_V4L2
- depends on HAS_DMA
+ depends on HAS_DMA && BROKEN
select VIDEO_OV7670
select I2C_GPIO
select VIDEOBUF2_DMA_SG
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 193373ff268d..dd5b1415f974 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1913,7 +1913,6 @@ int mccic_register(struct mcam_camera *cam)
mutex_lock(&cam->s_mutex);
cam->vdev = mcam_v4l_template;
- cam->vdev.debug = 0;
cam->vdev.v4l2_dev = &cam->v4l2_dev;
video_set_drvdata(&cam->vdev, cam);
ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 51c2129bdcc6..deca80903c3a 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -220,6 +220,9 @@ static u32 isp_xclk_calc_divider(unsigned long *rate, unsigned long parent_rate)
return ISPTCTRL_CTRL_DIV_BYPASS;
}
+ if (*rate == 0)
+ *rate = 1;
+
divider = DIV_ROUND_CLOSEST(parent_rate, *rate);
if (divider >= ISPTCTRL_CTRL_DIV_BYPASS)
divider = ISPTCTRL_CTRL_DIV_BYPASS - 1;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index b463fe172d16..3fe9047ef466 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strlcpy(cap->card, video->video.name, sizeof(cap->card));
strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
else
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
return 0;
}
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index aa40c8269ab8..54479d60cc0d 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -536,24 +536,12 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&camif->slock, flags);
}
-static void camif_lock(struct vb2_queue *vq)
-{
- struct camif_vp *vp = vb2_get_drv_priv(vq);
- mutex_lock(&vp->camif->lock);
-}
-
-static void camif_unlock(struct vb2_queue *vq)
-{
- struct camif_vp *vp = vb2_get_drv_priv(vq);
- mutex_unlock(&vp->camif->lock);
-}
-
static const struct vb2_ops s3c_camif_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
- .wait_prepare = camif_unlock,
- .wait_finish = camif_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
};
@@ -1161,6 +1149,7 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
q->buf_struct_size = sizeof(struct camif_buffer);
q->drv_priv = vp;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &vp->camif->lock;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 47ba8fbb0426..ec3e1248923d 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index fbfdf03b9054..8e44a59d8ec2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -810,6 +810,7 @@ static int s5p_mfc_open(struct file *file)
q = &ctx->vq_dst;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
q->drv_priv = &ctx->fh;
+ q->lock = &dev->mfc_mutex;
if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index c6c3452ccca1..aebe4fd7f03a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <media/v4l2-ctrls.h>
@@ -813,7 +812,7 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
unsigned long flags;
switch (cmd->cmd) {
- case V4L2_ENC_CMD_STOP:
+ case V4L2_DEC_CMD_STOP:
if (cmd->flags != 0)
return -EINVAL;
@@ -944,22 +943,6 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
return 0;
}
-static void s5p_mfc_unlock(struct vb2_queue *q)
-{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
- struct s5p_mfc_dev *dev = ctx->dev;
-
- mutex_unlock(&dev->mfc_mutex);
-}
-
-static void s5p_mfc_lock(struct vb2_queue *q)
-{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
- struct s5p_mfc_dev *dev = ctx->dev;
-
- mutex_lock(&dev->mfc_mutex);
-}
-
static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
@@ -1107,8 +1090,8 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
static struct vb2_ops s5p_mfc_dec_qops = {
.queue_setup = s5p_mfc_queue_setup,
- .wait_prepare = s5p_mfc_unlock,
- .wait_finish = s5p_mfc_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.buf_init = s5p_mfc_buf_init,
.start_streaming = s5p_mfc_start_streaming,
.stop_streaming = s5p_mfc_stop_streaming,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index bd64f1dcbdb5..e65993f4b901 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
@@ -1867,22 +1866,6 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
return 0;
}
-static void s5p_mfc_unlock(struct vb2_queue *q)
-{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
- struct s5p_mfc_dev *dev = ctx->dev;
-
- mutex_unlock(&dev->mfc_mutex);
-}
-
-static void s5p_mfc_lock(struct vb2_queue *q)
-{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
- struct s5p_mfc_dev *dev = ctx->dev;
-
- mutex_lock(&dev->mfc_mutex);
-}
-
static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
@@ -2052,8 +2035,8 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
static struct vb2_ops s5p_mfc_enc_qops = {
.queue_setup = s5p_mfc_queue_setup,
- .wait_prepare = s5p_mfc_unlock,
- .wait_finish = s5p_mfc_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.buf_init = s5p_mfc_buf_init,
.buf_prepare = s5p_mfc_buf_prepare,
.start_streaming = s5p_mfc_start_streaming,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 9aea179943ce..d826c58b5d53 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1340,11 +1340,7 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
/* FMO_ASO_CTRL - 0: Enable, 1: Disable */
reg |= (fmo_aso_ctrl << S5P_FIMV_D_OPT_FMO_ASO_CTRL_MASK_V6);
- /* When user sets desplay_delay to 0,
- * It works as "display_delay enable" and delay set to 0.
- * If user wants display_delay disable, It should be
- * set to negative value. */
- if (ctx->display_delay >= 0) {
+ if (ctx->display_delay_enable) {
reg |= (0x1 << S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6);
writel(ctx->display_delay, mfc_regs->d_display_delay);
}
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index b4d2696501e4..72d4f2e1efc0 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -926,22 +926,6 @@ static void buf_queue(struct vb2_buffer *vb)
mxr_dbg(mdev, "queuing buffer\n");
}
-static void wait_lock(struct vb2_queue *vq)
-{
- struct mxr_layer *layer = vb2_get_drv_priv(vq);
-
- mxr_dbg(layer->mdev, "%s\n", __func__);
- mutex_lock(&layer->mutex);
-}
-
-static void wait_unlock(struct vb2_queue *vq)
-{
- struct mxr_layer *layer = vb2_get_drv_priv(vq);
-
- mxr_dbg(layer->mdev, "%s\n", __func__);
- mutex_unlock(&layer->mutex);
-}
-
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct mxr_layer *layer = vb2_get_drv_priv(vq);
@@ -1040,8 +1024,8 @@ static void stop_streaming(struct vb2_queue *vq)
static struct vb2_ops mxr_video_qops = {
.queue_setup = queue_setup,
.buf_queue = buf_queue,
- .wait_prepare = wait_unlock,
- .wait_finish = wait_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
};
@@ -1122,6 +1106,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
.ops = &mxr_video_qops,
.min_buffers_needed = 1,
.mem_ops = &vb2_dma_contig_memops,
+ .lock = &layer->mutex,
};
return layer;
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index aaa1f6f25a29..a901b6248557 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -242,20 +242,6 @@ static void sh_veu_job_abort(void *priv)
veu->aborting = true;
}
-static void sh_veu_lock(void *priv)
-{
- struct sh_veu_dev *veu = priv;
-
- mutex_lock(&veu->fop_lock);
-}
-
-static void sh_veu_unlock(void *priv)
-{
- struct sh_veu_dev *veu = priv;
-
- mutex_unlock(&veu->fop_lock);
-}
-
static void sh_veu_process(struct sh_veu_dev *veu,
struct vb2_buffer *src_buf,
struct vb2_buffer *dst_buf)
@@ -950,36 +936,28 @@ static void sh_veu_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(veu->m2m_ctx, vb);
}
-static void sh_veu_wait_prepare(struct vb2_queue *q)
-{
- sh_veu_unlock(vb2_get_drv_priv(q));
-}
-
-static void sh_veu_wait_finish(struct vb2_queue *q)
-{
- sh_veu_lock(vb2_get_drv_priv(q));
-}
-
static const struct vb2_ops sh_veu_qops = {
.queue_setup = sh_veu_queue_setup,
.buf_prepare = sh_veu_buf_prepare,
.buf_queue = sh_veu_buf_queue,
- .wait_prepare = sh_veu_wait_prepare,
- .wait_finish = sh_veu_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
+ struct sh_veu_dev *veu = priv;
int ret;
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- src_vq->drv_priv = priv;
+ src_vq->drv_priv = veu;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &sh_veu_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->lock = &veu->fop_lock;
ret = vb2_queue_init(src_vq);
if (ret < 0)
@@ -988,10 +966,11 @@ static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- dst_vq->drv_priv = priv;
+ dst_vq->drv_priv = veu;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &sh_veu_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->lock = &veu->fop_lock;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 8efe40337608..8526bf5c8429 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -455,8 +455,8 @@ static struct vb2_ops isi_video_qops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
- .wait_prepare = soc_camera_unlock,
- .wait_finish = soc_camera_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
@@ -465,6 +465,8 @@ static struct vb2_ops isi_video_qops = {
static int isi_camera_init_videobuf(struct vb2_queue *q,
struct soc_camera_device *icd)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP;
q->drv_priv = icd;
@@ -472,6 +474,7 @@ static int isi_camera_init_videobuf(struct vb2_queue *q,
q->ops = &isi_video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ici->host_lock;
return vb2_queue_init(q);
}
@@ -760,8 +763,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
{
strcpy(cap->driver, "atmel-isi");
strcpy(cap->card, "Atmel Image Sensor Interface");
- cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index ce72bd26a6ac..192377f55840 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index a60c3bb0e4cc..3435fd2ca8ec 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -435,14 +435,16 @@ static struct vb2_ops mx3_videobuf_ops = {
.buf_queue = mx3_videobuf_queue,
.buf_cleanup = mx3_videobuf_release,
.buf_init = mx3_videobuf_init,
- .wait_prepare = soc_camera_unlock,
- .wait_finish = soc_camera_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.stop_streaming = mx3_stop_streaming,
};
static int mx3_camera_init_videobuf(struct vb2_queue *q,
struct soc_camera_device *icd)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->drv_priv = icd;
@@ -450,6 +452,7 @@ static int mx3_camera_init_videobuf(struct vb2_queue *q,
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct mx3_camera_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ici->host_lock;
return vb2_queue_init(q);
}
@@ -967,7 +970,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index e6b93281f246..16f65ecb70a3 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 951226af0eba..8d6e343fec0f 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 0c1f55648106..279ab9f6ae38 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -804,62 +804,26 @@ error:
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
-static void rcar_vin_videobuf_release(struct vb2_buffer *vb)
+/*
+ * Wait for capture to stop and all in-flight buffers to be finished with by
+ * the video hardware. This must be called under &priv->lock
+ *
+ */
+static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv)
{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- unsigned int i;
- int buf_in_use = 0;
-
- spin_lock_irq(&priv->lock);
-
- /* Is the buffer in use by the VIN hardware? */
- for (i = 0; i < MAX_BUFFER_NUM; i++) {
- if (priv->queue_buf[i] == vb) {
- buf_in_use = 1;
- break;
- }
- }
-
- if (buf_in_use) {
- while (priv->state != STOPPED) {
-
- /* issue stop if running */
- if (priv->state == RUNNING)
- rcar_vin_request_capture_stop(priv);
+ while (priv->state != STOPPED) {
+ /* issue stop if running */
+ if (priv->state == RUNNING)
+ rcar_vin_request_capture_stop(priv);
- /* wait until capturing has been stopped */
- if (priv->state == STOPPING) {
- priv->request_to_stop = true;
- spin_unlock_irq(&priv->lock);
- wait_for_completion(&priv->capture_stop);
- spin_lock_irq(&priv->lock);
- }
- }
- /*
- * Capturing has now stopped. The buffer we have been asked
- * to release could be any of the current buffers in use, so
- * release all buffers that are in use by HW
- */
- for (i = 0; i < MAX_BUFFER_NUM; i++) {
- if (priv->queue_buf[i]) {
- vb2_buffer_done(priv->queue_buf[i],
- VB2_BUF_STATE_ERROR);
- priv->queue_buf[i] = NULL;
- }
+ /* wait until capturing has been stopped */
+ if (priv->state == STOPPING) {
+ priv->request_to_stop = true;
+ spin_unlock_irq(&priv->lock);
+ wait_for_completion(&priv->capture_stop);
+ spin_lock_irq(&priv->lock);
}
- } else {
- list_del_init(to_buf_list(vb));
}
-
- spin_unlock_irq(&priv->lock);
-}
-
-static int rcar_vin_videobuf_init(struct vb2_buffer *vb)
-{
- INIT_LIST_HEAD(to_buf_list(vb));
- return 0;
}
static void rcar_vin_stop_streaming(struct vb2_queue *vq)
@@ -868,21 +832,34 @@ static void rcar_vin_stop_streaming(struct vb2_queue *vq)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
struct list_head *buf_head, *tmp;
+ int i;
spin_lock_irq(&priv->lock);
- list_for_each_safe(buf_head, tmp, &priv->capture)
+ rcar_vin_wait_stop_streaming(priv);
+
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ if (priv->queue_buf[i]) {
+ vb2_buffer_done(priv->queue_buf[i],
+ VB2_BUF_STATE_ERROR);
+ priv->queue_buf[i] = NULL;
+ }
+ }
+
+ list_for_each_safe(buf_head, tmp, &priv->capture) {
+ vb2_buffer_done(&list_entry(buf_head,
+ struct rcar_vin_buffer, list)->vb,
+ VB2_BUF_STATE_ERROR);
list_del_init(buf_head);
+ }
spin_unlock_irq(&priv->lock);
}
static struct vb2_ops rcar_vin_vb2_ops = {
.queue_setup = rcar_vin_videobuf_setup,
- .buf_init = rcar_vin_videobuf_init,
- .buf_cleanup = rcar_vin_videobuf_release,
.buf_queue = rcar_vin_videobuf_queue,
.stop_streaming = rcar_vin_stop_streaming,
- .wait_prepare = soc_camera_unlock,
- .wait_finish = soc_camera_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static irqreturn_t rcar_vin_irq(int irq, void *data)
@@ -1799,13 +1776,17 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
struct soc_camera_device *icd)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vq->io_modes = VB2_MMAP | VB2_USERPTR;
vq->drv_priv = icd;
@@ -1813,6 +1794,7 @@ static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
vq->mem_ops = &vb2_dma_contig_memops;
vq->buf_struct_size = sizeof(struct rcar_vin_buffer);
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->lock = &ici->host_lock;
return vb2_queue_init(vq);
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 8b27b3eb2b25..9ce202f53934 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -496,8 +496,8 @@ static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
.buf_queue = sh_mobile_ceu_videobuf_queue,
.buf_cleanup = sh_mobile_ceu_videobuf_release,
.buf_init = sh_mobile_ceu_videobuf_init,
- .wait_prepare = soc_camera_unlock,
- .wait_finish = soc_camera_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.stop_streaming = sh_mobile_ceu_stop_streaming,
};
@@ -1652,13 +1652,17 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
struct soc_camera_device *icd)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->drv_priv = icd;
@@ -1666,6 +1670,7 @@ static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ici->host_lock;
return vb2_queue_init(q);
}
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index b3db51c82bde..cee7b56f8404 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -843,22 +843,6 @@ static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
return res;
}
-void soc_camera_lock(struct vb2_queue *vq)
-{
- struct soc_camera_device *icd = vb2_get_drv_priv(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- mutex_lock(&ici->host_lock);
-}
-EXPORT_SYMBOL(soc_camera_lock);
-
-void soc_camera_unlock(struct vb2_queue *vq)
-{
- struct soc_camera_device *icd = vb2_get_drv_priv(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- mutex_unlock(&ici->host_lock);
-}
-EXPORT_SYMBOL(soc_camera_unlock);
-
static struct v4l2_file_operations soc_camera_fops = {
.owner = THIS_MODULE,
.open = soc_camera_open,
@@ -1813,8 +1797,6 @@ eadddev:
mutex_unlock(&ici->clk_lock);
}
eadd:
- video_device_release(icd->vdev);
- icd->vdev = NULL;
if (icd->vdev) {
video_device_release(icd->vdev);
icd->vdev = NULL;
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index d628d1a7cf9e..c44760b705da 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/ioctl.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
@@ -74,7 +75,7 @@
#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
/*
- * each VPE context can need up to 3 config desciptors, 7 input descriptors,
+ * each VPE context can need up to 3 config descriptors, 7 input descriptors,
* 3 output descriptors, and 10 control descriptors
*/
#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
@@ -373,7 +374,6 @@ struct vpe_dev {
struct vpe_ctx {
struct v4l2_fh fh;
struct vpe_dev *dev;
- struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_ctrl_handler hdl;
unsigned int field; /* current field */
@@ -887,10 +887,10 @@ static int job_ready(void *priv)
if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
needed += 2; /* need additional two most recent fields */
- if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < needed)
return 0;
- if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < needed)
+ if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < needed)
return 0;
return 1;
@@ -1100,15 +1100,15 @@ static void device_run(void *priv)
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
- ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[2] == NULL);
- ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[1] == NULL);
}
- ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[0] == NULL);
- ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->dst_vb == NULL);
/* config descriptors */
@@ -1334,7 +1334,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
finished:
vpe_dbg(ctx->dev, "finishing transaction\n");
ctx->bufs_completed = 0;
- v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
handled:
return IRQ_HANDLED;
}
@@ -1395,7 +1395,7 @@ static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct vpe_q_data *q_data;
int i;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -1527,7 +1527,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
struct vb2_queue *vq;
int i;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -1739,52 +1739,6 @@ static int vpe_s_selection(struct file *file, void *fh,
return set_srcdst_params(ctx);
}
-static int vpe_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- vpe_dump_regs(ctx->dev);
- vpdma_dump_regs(ctx->dev->vpdma);
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
/*
* defines number of buffers/frames a context can process with VPE before
* switching to a different context. default value is 1 buffer per context
@@ -1814,14 +1768,14 @@ static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
};
static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
- .vidioc_querycap = vpe_querycap,
+ .vidioc_querycap = vpe_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
+ .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
.vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
- .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
.vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
.vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
.vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
@@ -1829,16 +1783,15 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
.vidioc_g_selection = vpe_g_selection,
.vidioc_s_selection = vpe_s_selection,
- .vidioc_reqbufs = vpe_reqbufs,
- .vidioc_querybuf = vpe_querybuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
- .vidioc_qbuf = vpe_qbuf,
- .vidioc_dqbuf = vpe_dqbuf,
-
- .vidioc_streamon = vpe_streamon,
- .vidioc_streamoff = vpe_streamoff,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
@@ -1910,33 +1863,40 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
static void vpe_buf_queue(struct vb2_buffer *vb)
{
struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
-static void vpe_wait_prepare(struct vb2_queue *q)
+static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct vpe_ctx *ctx = vb2_get_drv_priv(q);
- vpe_unlock(ctx);
+ /* currently we do nothing here */
+
+ return 0;
}
-static void vpe_wait_finish(struct vb2_queue *q)
+static void vpe_stop_streaming(struct vb2_queue *q)
{
struct vpe_ctx *ctx = vb2_get_drv_priv(q);
- vpe_lock(ctx);
+
+ vpe_dump_regs(ctx->dev);
+ vpdma_dump_regs(ctx->dev->vpdma);
}
static struct vb2_ops vpe_qops = {
.queue_setup = vpe_queue_setup,
.buf_prepare = vpe_buf_prepare,
.buf_queue = vpe_buf_queue,
- .wait_prepare = vpe_wait_prepare,
- .wait_finish = vpe_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vpe_start_streaming,
+ .stop_streaming = vpe_stop_streaming,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct vpe_ctx *ctx = priv;
+ struct vpe_dev *dev = ctx->dev;
int ret;
memset(src_vq, 0, sizeof(*src_vq));
@@ -1947,6 +1907,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &vpe_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &dev->dev_mutex;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1960,6 +1921,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &vpe_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &dev->dev_mutex;
return vb2_queue_init(dst_vq);
}
@@ -1981,9 +1943,9 @@ static const struct v4l2_ctrl_config vpe_bufs_per_job = {
static int vpe_open(struct file *file)
{
struct vpe_dev *dev = video_drvdata(file);
- struct vpe_ctx *ctx = NULL;
struct vpe_q_data *s_q_data;
struct v4l2_ctrl_handler *hdl;
+ struct vpe_ctx *ctx;
int ret;
vpe_dbg(dev, "vpe_open\n");
@@ -2056,10 +2018,10 @@ static int vpe_open(struct file *file)
if (ret)
goto exit_fh;
- ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
goto exit_fh;
}
@@ -2078,7 +2040,7 @@ static int vpe_open(struct file *file)
ctx->load_mmrs = true;
vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
- ctx, ctx->m2m_ctx);
+ ctx, ctx->fh.m2m_ctx);
mutex_unlock(&dev->dev_mutex);
@@ -2116,7 +2078,7 @@ static int vpe_release(struct file *file)
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
kfree(ctx);
@@ -2133,39 +2095,13 @@ static int vpe_release(struct file *file)
return 0;
}
-static unsigned int vpe_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct vpe_ctx *ctx = file2ctx(file);
- struct vpe_dev *dev = ctx->dev;
- int ret;
-
- mutex_lock(&dev->dev_mutex);
- ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- mutex_unlock(&dev->dev_mutex);
- return ret;
-}
-
-static int vpe_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct vpe_ctx *ctx = file2ctx(file);
- struct vpe_dev *dev = ctx->dev;
- int ret;
-
- if (mutex_lock_interruptible(&dev->dev_mutex))
- return -ERESTARTSYS;
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&dev->dev_mutex);
- return ret;
-}
-
static const struct v4l2_file_operations vpe_fops = {
.owner = THIS_MODULE,
.open = vpe_open,
.release = vpe_release,
- .poll = vpe_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = vpe_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static struct video_device vpe_videodev = {
@@ -2367,8 +2303,6 @@ static const struct of_device_id vpe_of_match[] = {
},
{},
};
-#else
-#define vpe_of_match NULL
#endif
static struct platform_driver vpe_pdrv = {
@@ -2376,7 +2310,7 @@ static struct platform_driver vpe_pdrv = {
.remove = vpe_remove,
.driver = {
.name = VPE_MODULE_NAME,
- .of_match_table = vpe_of_match,
+ .of_match_table = of_match_ptr(vpe_of_match),
},
};
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 857e7866e8bc..32a798f2d953 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -689,7 +689,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_max_edid_blocks = {
static const char * const vivid_ctrl_colorspace_strings[] = {
"SMPTE 170M",
- "REC 709",
+ "Rec. 709",
"sRGB",
"AdobeRGB",
"BT.2020",
@@ -716,7 +716,7 @@ static const char * const vivid_ctrl_ycbcr_enc_strings[] = {
"xvYCC 601",
"xvYCC 709",
"sYCC",
- "BT.2020 Non-Constant Luminance",
+ "BT.2020",
"BT.2020 Constant Luminance",
"SMPTE 240M",
NULL,
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index fc9c6536ba02..34493f435d5a 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -352,13 +352,14 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
{ COEFF(0.5, 224), COEFF(-0.4629, 224), COEFF(-0.0405, 224) },
};
bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
+ unsigned y_offset = full ? 0 : 16;
int lin_y, yc;
switch (tpg->real_ycbcr_enc) {
case V4L2_YCBCR_ENC_601:
case V4L2_YCBCR_ENC_XV601:
case V4L2_YCBCR_ENC_SYCC:
- rgb2ycbcr(full ? bt601_full : bt601, r, g, b, 16, y, cb, cr);
+ rgb2ycbcr(full ? bt601_full : bt601, r, g, b, y_offset, y, cb, cr);
break;
case V4L2_YCBCR_ENC_BT2020:
rgb2ycbcr(bt2020, r, g, b, 16, y, cb, cr);
@@ -384,7 +385,7 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
case V4L2_YCBCR_ENC_709:
case V4L2_YCBCR_ENC_XV709:
default:
- rgb2ycbcr(full ? rec709_full : rec709, r, g, b, 0, y, cb, cr);
+ rgb2ycbcr(full ? rec709_full : rec709, r, g, b, y_offset, y, cb, cr);
break;
}
}
@@ -439,13 +440,14 @@ static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
{ COEFF(1, 219), COEFF(1.8814, 224), COEFF(0, 224) },
};
bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
+ unsigned y_offset = full ? 0 : 16;
int lin_r, lin_g, lin_b, lin_y;
switch (tpg->real_ycbcr_enc) {
case V4L2_YCBCR_ENC_601:
case V4L2_YCBCR_ENC_XV601:
case V4L2_YCBCR_ENC_SYCC:
- ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, 16, r, g, b);
+ ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, y_offset, r, g, b);
break;
case V4L2_YCBCR_ENC_BT2020:
ycbcr2rgb(bt2020, y, cb, cr, 16, r, g, b);
@@ -480,7 +482,7 @@ static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
case V4L2_YCBCR_ENC_709:
case V4L2_YCBCR_ENC_XV709:
default:
- ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, 16, r, g, b);
+ ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, y_offset, r, g, b);
break;
}
}
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
index 9dc463a40ed3..bd8b1c760b3f 100644
--- a/drivers/media/platform/vivid/vivid-tpg.h
+++ b/drivers/media/platform/vivid/vivid-tpg.h
@@ -20,7 +20,6 @@
#ifndef _VIVID_TPG_H_
#define _VIVID_TPG_H_
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/random.h>
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 12467191dff4..989e96f7e360 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/platform_data/vsp1.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
@@ -40,9 +39,20 @@ struct vsp1_uds;
#define VSP1_MAX_UDS 3
#define VSP1_MAX_WPF 4
+#define VSP1_HAS_LIF (1 << 0)
+#define VSP1_HAS_LUT (1 << 1)
+#define VSP1_HAS_SRU (1 << 2)
+
+struct vsp1_platform_data {
+ unsigned int features;
+ unsigned int rpf_count;
+ unsigned int uds_count;
+ unsigned int wpf_count;
+};
+
struct vsp1_device {
struct device *dev;
- struct vsp1_platform_data *pdata;
+ struct vsp1_platform_data pdata;
void __iomem *mmio;
struct clk *clock;
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index b21f381a9862..401e2b77a0b6 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -20,7 +20,7 @@
#include "vsp1_bru.h"
#include "vsp1_rwpf.h"
-#define BRU_MIN_SIZE 4U
+#define BRU_MIN_SIZE 1U
#define BRU_MAX_SIZE 8190U
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 5eb16e87d53f..913485a90e97 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -40,7 +40,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
irqreturn_t ret = IRQ_NONE;
unsigned int i;
- for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
struct vsp1_rwpf *wpf = vsp1->wpf[i];
struct vsp1_pipeline *pipe;
u32 status;
@@ -181,7 +181,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
- if (vsp1->pdata->features & VSP1_HAS_LIF) {
+ if (vsp1->pdata.features & VSP1_HAS_LIF) {
vsp1->lif = vsp1_lif_create(vsp1);
if (IS_ERR(vsp1->lif)) {
ret = PTR_ERR(vsp1->lif);
@@ -191,7 +191,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities);
}
- if (vsp1->pdata->features & VSP1_HAS_LUT) {
+ if (vsp1->pdata.features & VSP1_HAS_LUT) {
vsp1->lut = vsp1_lut_create(vsp1);
if (IS_ERR(vsp1->lut)) {
ret = PTR_ERR(vsp1->lut);
@@ -201,7 +201,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata->rpf_count; ++i) {
+ for (i = 0; i < vsp1->pdata.rpf_count; ++i) {
struct vsp1_rwpf *rpf;
rpf = vsp1_rpf_create(vsp1, i);
@@ -214,7 +214,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
}
- if (vsp1->pdata->features & VSP1_HAS_SRU) {
+ if (vsp1->pdata.features & VSP1_HAS_SRU) {
vsp1->sru = vsp1_sru_create(vsp1);
if (IS_ERR(vsp1->sru)) {
ret = PTR_ERR(vsp1->sru);
@@ -224,7 +224,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata->uds_count; ++i) {
+ for (i = 0; i < vsp1->pdata.uds_count; ++i) {
struct vsp1_uds *uds;
uds = vsp1_uds_create(vsp1, i);
@@ -237,7 +237,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&uds->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
struct vsp1_rwpf *wpf;
wpf = vsp1_wpf_create(vsp1, i);
@@ -261,7 +261,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
goto done;
}
- if (vsp1->pdata->features & VSP1_HAS_LIF) {
+ if (vsp1->pdata.features & VSP1_HAS_LIF) {
ret = media_entity_create_link(
&vsp1->wpf[0]->entity.subdev.entity, RWPF_PAD_SOURCE,
&vsp1->lif->entity.subdev.entity, LIF_PAD_SINK, 0);
@@ -294,7 +294,7 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
/* Reset any channel that might be running. */
status = vsp1_read(vsp1, VI6_STATUS);
- for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
unsigned int timeout;
if (!(status & VI6_STATUS_SYS_ACT(i)))
@@ -318,10 +318,10 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) |
(8 << VI6_CLK_DCSWT_CSTRW_SHIFT));
- for (i = 0; i < vsp1->pdata->rpf_count; ++i)
+ for (i = 0; i < vsp1->pdata.rpf_count; ++i)
vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED);
- for (i = 0; i < vsp1->pdata->uds_count; ++i)
+ for (i = 0; i < vsp1->pdata.uds_count; ++i)
vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED);
@@ -428,28 +428,36 @@ static const struct dev_pm_ops vsp1_pm_ops = {
* Platform Driver
*/
-static int vsp1_validate_platform_data(struct platform_device *pdev,
- struct vsp1_platform_data *pdata)
+static int vsp1_parse_dt(struct vsp1_device *vsp1)
{
- if (pdata == NULL) {
- dev_err(&pdev->dev, "missing platform data\n");
- return -EINVAL;
- }
+ struct device_node *np = vsp1->dev->of_node;
+ struct vsp1_platform_data *pdata = &vsp1->pdata;
+
+ if (of_property_read_bool(np, "renesas,has-lif"))
+ pdata->features |= VSP1_HAS_LIF;
+ if (of_property_read_bool(np, "renesas,has-lut"))
+ pdata->features |= VSP1_HAS_LUT;
+ if (of_property_read_bool(np, "renesas,has-sru"))
+ pdata->features |= VSP1_HAS_SRU;
+
+ of_property_read_u32(np, "renesas,#rpf", &pdata->rpf_count);
+ of_property_read_u32(np, "renesas,#uds", &pdata->uds_count);
+ of_property_read_u32(np, "renesas,#wpf", &pdata->wpf_count);
if (pdata->rpf_count <= 0 || pdata->rpf_count > VSP1_MAX_RPF) {
- dev_err(&pdev->dev, "invalid number of RPF (%u)\n",
+ dev_err(vsp1->dev, "invalid number of RPF (%u)\n",
pdata->rpf_count);
return -EINVAL;
}
if (pdata->uds_count <= 0 || pdata->uds_count > VSP1_MAX_UDS) {
- dev_err(&pdev->dev, "invalid number of UDS (%u)\n",
+ dev_err(vsp1->dev, "invalid number of UDS (%u)\n",
pdata->uds_count);
return -EINVAL;
}
if (pdata->wpf_count <= 0 || pdata->wpf_count > VSP1_MAX_WPF) {
- dev_err(&pdev->dev, "invalid number of WPF (%u)\n",
+ dev_err(vsp1->dev, "invalid number of WPF (%u)\n",
pdata->wpf_count);
return -EINVAL;
}
@@ -457,33 +465,6 @@ static int vsp1_validate_platform_data(struct platform_device *pdev,
return 0;
}
-static struct vsp1_platform_data *
-vsp1_get_platform_data(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct vsp1_platform_data *pdata;
-
- if (!IS_ENABLED(CONFIG_OF) || np == NULL)
- return pdev->dev.platform_data;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (pdata == NULL)
- return NULL;
-
- if (of_property_read_bool(np, "renesas,has-lif"))
- pdata->features |= VSP1_HAS_LIF;
- if (of_property_read_bool(np, "renesas,has-lut"))
- pdata->features |= VSP1_HAS_LUT;
- if (of_property_read_bool(np, "renesas,has-sru"))
- pdata->features |= VSP1_HAS_SRU;
-
- of_property_read_u32(np, "renesas,#rpf", &pdata->rpf_count);
- of_property_read_u32(np, "renesas,#uds", &pdata->uds_count);
- of_property_read_u32(np, "renesas,#wpf", &pdata->wpf_count);
-
- return pdata;
-}
-
static int vsp1_probe(struct platform_device *pdev)
{
struct vsp1_device *vsp1;
@@ -499,11 +480,7 @@ static int vsp1_probe(struct platform_device *pdev)
mutex_init(&vsp1->lock);
INIT_LIST_HEAD(&vsp1->entities);
- vsp1->pdata = vsp1_get_platform_data(pdev);
- if (vsp1->pdata == NULL)
- return -ENODEV;
-
- ret = vsp1_validate_platform_data(pdev, vsp1->pdata);
+ ret = vsp1_parse_dt(vsp1);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 80bedc554ee3..0bc0471746c9 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -26,11 +26,6 @@
* Device Access
*/
-static inline u32 vsp1_hsit_read(struct vsp1_hsit *hsit, u32 reg)
-{
- return vsp1_read(hsit->entity.vsp1, reg);
-}
-
static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, u32 reg, u32 data)
{
vsp1_write(hsit->entity.vsp1, reg, data);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 55f163d32d15..da3c573e1efc 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -43,12 +43,12 @@
#define VI6_DISP_IRQ_ENB 0x0078
#define VI6_DISP_IRQ_ENB_DSTE (1 << 8)
#define VI6_DISP_IRQ_ENB_MAEE (1 << 5)
-#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << ((n) + 4))
+#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << (n))
#define VI6_DISP_IRQ_STA 0x007c
#define VI6_DISP_IRQ_STA_DSE (1 << 8)
#define VI6_DISP_IRQ_STA_MAE (1 << 5)
-#define VI6_DISP_IRQ_STA_LNE(n) (1 << ((n) + 4))
+#define VI6_DISP_IRQ_STA_LNE(n) (1 << (n))
#define VI6_WPF_LINE_COUNT(n) (0x0084 + (n) * 4)
#define VI6_WPF_LINE_COUNT_MASK (0x1fffff << 0)
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index d14d26b718ef..3294529a3108 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -106,11 +106,22 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
+ crop->left * fmtinfo->bpp[0] / 8;
pstride = format->plane_fmt[0].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
+
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
+ rpf->buf_addr[0] + rpf->offsets[0]);
+
if (format->num_planes > 1) {
rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
+ crop->left * fmtinfo->bpp[1] / 8;
pstride |= format->plane_fmt[1].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
+ rpf->buf_addr[1] + rpf->offsets[1]);
+
+ if (format->num_planes > 2)
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
+ rpf->buf_addr[2] + rpf->offsets[1]);
}
vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
@@ -179,6 +190,13 @@ static void rpf_vdev_queue(struct vsp1_video *video,
struct vsp1_video_buffer *buf)
{
struct vsp1_rwpf *rpf = container_of(video, struct vsp1_rwpf, video);
+ unsigned int i;
+
+ for (i = 0; i < 3; ++i)
+ rpf->buf_addr[i] = buf->addr[i];
+
+ if (!vsp1_entity_is_streaming(&rpf->entity))
+ return;
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
buf->addr[0] + rpf->offsets[0]);
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 28dd9e7b3838..2cf1f13d3bf9 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -39,6 +39,7 @@ struct vsp1_rwpf {
struct v4l2_rect crop;
unsigned int offsets[2];
+ dma_addr_t buf_addr[3];
};
static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 6e057762c933..1d2b3a2f1573 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -92,19 +92,20 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
return 0;
}
- /* Sources. If the pipeline has a single input configure it as the
- * master layer. Otherwise configure all inputs as sub-layers and
- * select the virtual RPF as the master layer.
+ /* Sources. If the pipeline has a single input and BRU is not used,
+ * configure it as the master layer. Otherwise configure all
+ * inputs as sub-layers and select the virtual RPF as the master
+ * layer.
*/
for (i = 0; i < pipe->num_inputs; ++i) {
struct vsp1_rwpf *input = pipe->inputs[i];
- srcrpf |= pipe->num_inputs == 1
+ srcrpf |= (!pipe->bru && pipe->num_inputs == 1)
? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
: VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
}
- if (pipe->num_inputs > 1)
+ if (pipe->bru || pipe->num_inputs > 1)
srcrpf |= VI6_WPF_SRCRPF_VIRACT_MST;
vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, srcrpf);
@@ -280,7 +281,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
* except for the WPF0 source link if a LIF is present.
*/
flags = MEDIA_LNK_FL_ENABLED;
- if (!(vsp1->pdata->features & VSP1_HAS_LIF) || index != 0)
+ if (!(vsp1->pdata.features & VSP1_HAS_LIF) || index != 0)
flags |= MEDIA_LNK_FL_IMMUTABLE;
ret = media_entity_create_link(&wpf->entity.subdev.entity,
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index a739ad492e7b..ea9308796741 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -129,11 +129,11 @@ static int rtrack_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol)
} else if (curvol < vol) {
outb(0x98, isa->io); /* volume up + sigstr + on */
for (; curvol < vol; curvol++)
- udelay(3000);
+ mdelay(3);
} else if (curvol > vol) {
outb(0x58, isa->io); /* volume down + sigstr + on */
for (; curvol > vol; curvol--)
- udelay(3000);
+ mdelay(3);
}
outb(0xd8, isa->io); /* volume steady + sigstr + on */
rt->curvol = vol;
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index f1a0867789fe..43d1ea53cb66 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -247,10 +247,9 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_freq_bands(struct file *file, void *priv,
- struct v4l2_frequency_band *band)
+int snd_tea575x_enum_freq_bands(struct snd_tea575x *tea,
+ struct v4l2_frequency_band *band)
{
- struct snd_tea575x *tea = video_drvdata(file);
int index;
if (band->tuner != 0)
@@ -279,18 +278,25 @@ static int vidioc_enum_freq_bands(struct file *file, void *priv,
return 0;
}
+EXPORT_SYMBOL(snd_tea575x_enum_freq_bands);
-static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
+static int vidioc_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
{
struct snd_tea575x *tea = video_drvdata(file);
+
+ return snd_tea575x_enum_freq_bands(tea, band);
+}
+
+int snd_tea575x_g_tuner(struct snd_tea575x *tea, struct v4l2_tuner *v)
+{
struct v4l2_frequency_band band_fm = { 0, };
if (v->index > 0)
return -EINVAL;
snd_tea575x_read(tea);
- vidioc_enum_freq_bands(file, priv, &band_fm);
+ snd_tea575x_enum_freq_bands(tea, &band_fm);
memset(v, 0, sizeof(*v));
strlcpy(v->name, tea->has_am ? "FM/AM" : "FM", sizeof(v->name));
@@ -304,6 +310,15 @@ static int vidioc_g_tuner(struct file *file, void *priv,
v->signal = tea->tuned ? 0xffff : 0;
return 0;
}
+EXPORT_SYMBOL(snd_tea575x_g_tuner);
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct snd_tea575x *tea = video_drvdata(file);
+
+ return snd_tea575x_g_tuner(tea, v);
+}
static int vidioc_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *v)
@@ -356,10 +371,9 @@ static int vidioc_s_frequency(struct file *file, void *priv,
return 0;
}
-static int vidioc_s_hw_freq_seek(struct file *file, void *fh,
- const struct v4l2_hw_freq_seek *a)
+int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
+ const struct v4l2_hw_freq_seek *a)
{
- struct snd_tea575x *tea = video_drvdata(file);
unsigned long timeout;
int i, spacing;
@@ -442,6 +456,15 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *fh,
snd_tea575x_set_freq(tea);
return -ENODATA;
}
+EXPORT_SYMBOL(snd_tea575x_s_hw_freq_seek);
+
+static int vidioc_s_hw_freq_seek(struct file *file, void *fh,
+ const struct v4l2_hw_freq_seek *a)
+{
+ struct snd_tea575x *tea = video_drvdata(file);
+
+ return snd_tea575x_s_hw_freq_seek(file, tea, a);
+}
static int tea575x_s_ctrl(struct v4l2_ctrl *ctrl)
{
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index 09632cb26cb6..cfaeb2417fbb 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -785,22 +785,6 @@ int fm_rx_set_rds_system(struct fmdev *fmdev, u8 rds_mode)
return 0;
}
-/* Returns current RDS operation mode */
-int fm_rx_get_rds_system(struct fmdev *fmdev, u8 *rds_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (rds_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *rds_mode = fmdev->rx.rds_mode;
-
- return 0;
-}
-
/* Configures Alternate Frequency switch mode */
int fm_rx_set_af_switch(struct fmdev *fmdev, u8 af_mode)
{
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.h b/drivers/media/radio/wl128x/fmdrv_rx.h
index 32add81f8d87..23922188882f 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.h
+++ b/drivers/media/radio/wl128x/fmdrv_rx.h
@@ -40,7 +40,6 @@ void fm_rx_reset_station_info(struct fmdev *);
int fm_rx_seek(struct fmdev *, u32, u32, u32);
int fm_rx_get_rds_mode(struct fmdev *, u8 *);
-int fm_rx_get_rds_system(struct fmdev *, u8 *);
int fm_rx_get_mute_mode(struct fmdev *, u8 *);
int fm_rx_get_volume(struct fmdev *, u16 *);
int fm_rx_get_band_freq_range(struct fmdev *,
diff --git a/drivers/media/rc/img-ir/Kconfig b/drivers/media/rc/img-ir/Kconfig
index 580715c7fc5e..a896d3c83a1c 100644
--- a/drivers/media/rc/img-ir/Kconfig
+++ b/drivers/media/rc/img-ir/Kconfig
@@ -60,3 +60,18 @@ config IR_IMG_SANYO
help
Say Y here to enable support for the Sanyo protocol (used by Sanyo,
Aiwa, Chinon remotes) in the ImgTec infrared decoder block.
+
+config IR_IMG_RC5
+ bool "Philips RC5 protocol support"
+ depends on IR_IMG_HW
+ help
+ Say Y here to enable support for the RC5 protocol in the ImgTec
+ infrared decoder block.
+
+config IR_IMG_RC6
+ bool "Philips RC6 protocol support"
+ depends on IR_IMG_HW
+ help
+ Say Y here to enable support for the RC6 protocol in the ImgTec
+ infrared decoder block.
+ Note: This version only supports mode 0.
diff --git a/drivers/media/rc/img-ir/Makefile b/drivers/media/rc/img-ir/Makefile
index 92a459d99509..8e6d458e66ad 100644
--- a/drivers/media/rc/img-ir/Makefile
+++ b/drivers/media/rc/img-ir/Makefile
@@ -6,6 +6,8 @@ img-ir-$(CONFIG_IR_IMG_JVC) += img-ir-jvc.o
img-ir-$(CONFIG_IR_IMG_SONY) += img-ir-sony.o
img-ir-$(CONFIG_IR_IMG_SHARP) += img-ir-sharp.o
img-ir-$(CONFIG_IR_IMG_SANYO) += img-ir-sanyo.o
+img-ir-$(CONFIG_IR_IMG_RC5) += img-ir-rc5.o
+img-ir-$(CONFIG_IR_IMG_RC6) += img-ir-rc6.o
img-ir-objs := $(img-ir-y)
obj-$(CONFIG_IR_IMG) += img-ir.o
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 2fd47c9bf5d8..7bb71bc9f534 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -42,6 +42,12 @@ static struct img_ir_decoder *img_ir_decoders[] = {
#ifdef CONFIG_IR_IMG_SANYO
&img_ir_sanyo,
#endif
+#ifdef CONFIG_IR_IMG_RC5
+ &img_ir_rc5,
+#endif
+#ifdef CONFIG_IR_IMG_RC6
+ &img_ir_rc6,
+#endif
NULL
};
@@ -52,6 +58,11 @@ static struct img_ir_decoder *img_ir_decoders[] = {
#define IMG_IR_QUIRK_CODE_BROKEN 0x1 /* Decode is broken */
#define IMG_IR_QUIRK_CODE_LEN_INCR 0x2 /* Bit length needs increment */
+/*
+ * The decoder generates rapid interrupts without actually having
+ * received any new data after an incomplete IR code is decoded.
+ */
+#define IMG_IR_QUIRK_CODE_IRQ 0x4
/* functions for preprocessing timings, ensuring max is set */
@@ -542,6 +553,7 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
*/
spin_unlock_irq(&priv->lock);
del_timer_sync(&hw->end_timer);
+ del_timer_sync(&hw->suspend_timer);
spin_lock_irq(&priv->lock);
hw->stopping = false;
@@ -806,20 +818,24 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
struct img_ir_priv_hw *hw = &priv->hw;
const struct img_ir_decoder *dec = hw->decoder;
int ret = IMG_IR_SCANCODE;
- u32 scancode;
- enum rc_type protocol = RC_TYPE_UNKNOWN;
+ struct img_ir_scancode_req request;
+
+ request.protocol = RC_TYPE_UNKNOWN;
+ request.toggle = 0;
if (dec->scancode)
- ret = dec->scancode(len, raw, &protocol, &scancode, hw->enabled_protocols);
+ ret = dec->scancode(len, raw, hw->enabled_protocols, &request);
else if (len >= 32)
- scancode = (u32)raw;
+ request.scancode = (u32)raw;
else if (len < 32)
- scancode = (u32)raw & ((1 << len)-1);
+ request.scancode = (u32)raw & ((1 << len)-1);
dev_dbg(priv->dev, "data (%u bits) = %#llx\n",
len, (unsigned long long)raw);
if (ret == IMG_IR_SCANCODE) {
- dev_dbg(priv->dev, "decoded scan code %#x\n", scancode);
- rc_keydown(hw->rdev, protocol, scancode, 0);
+ dev_dbg(priv->dev, "decoded scan code %#x, toggle %u\n",
+ request.scancode, request.toggle);
+ rc_keydown(hw->rdev, request.protocol, request.scancode,
+ request.toggle);
img_ir_end_repeat(priv);
} else if (ret == IMG_IR_REPEATCODE) {
if (hw->mode == IMG_IR_M_REPEATING) {
@@ -857,6 +873,29 @@ static void img_ir_end_timer(unsigned long arg)
spin_unlock_irq(&priv->lock);
}
+/*
+ * Timer function to re-enable the current protocol after it had been
+ * cleared when invalid interrupts were generated due to a quirk in the
+ * img-ir decoder.
+ */
+static void img_ir_suspend_timer(unsigned long arg)
+{
+ struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+
+ spin_lock_irq(&priv->lock);
+ /*
+ * Don't overwrite enabled valid/match IRQs if they have already been
+ * changed by e.g. a filter change.
+ */
+ if ((priv->hw.quirk_suspend_irq & IMG_IR_IRQ_EDGE) ==
+ img_ir_read(priv, IMG_IR_IRQ_ENABLE))
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE,
+ priv->hw.quirk_suspend_irq);
+ /* enable */
+ img_ir_write(priv, IMG_IR_CONTROL, priv->hw.reg_timings.ctrl);
+ spin_unlock_irq(&priv->lock);
+}
+
#ifdef CONFIG_COMMON_CLK
static void img_ir_change_frequency(struct img_ir_priv *priv,
struct clk_notifier_data *change)
@@ -922,15 +961,38 @@ void img_ir_isr_hw(struct img_ir_priv *priv, u32 irq_status)
if (!hw->decoder)
return;
+ ct = hw->decoder->control.code_type;
+
ir_status = img_ir_read(priv, IMG_IR_STATUS);
- if (!(ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)))
+ if (!(ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2))) {
+ if (!(priv->hw.ct_quirks[ct] & IMG_IR_QUIRK_CODE_IRQ) ||
+ hw->stopping)
+ return;
+ /*
+ * The below functionality is added as a work around to stop
+ * multiple Interrupts generated when an incomplete IR code is
+ * received by the decoder.
+ * The decoder generates rapid interrupts without actually
+ * having received any new data. After a single interrupt it's
+ * expected to clear up, but instead multiple interrupts are
+ * rapidly generated. only way to get out of this loop is to
+ * reset the control register after a short delay.
+ */
+ img_ir_write(priv, IMG_IR_CONTROL, 0);
+ hw->quirk_suspend_irq = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
+ img_ir_write(priv, IMG_IR_IRQ_ENABLE,
+ hw->quirk_suspend_irq & IMG_IR_IRQ_EDGE);
+
+ /* Timer activated to re-enable the protocol. */
+ mod_timer(&hw->suspend_timer,
+ jiffies + msecs_to_jiffies(5));
return;
+ }
ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
img_ir_write(priv, IMG_IR_STATUS, ir_status);
len = (ir_status & IMG_IR_RXDLEN) >> IMG_IR_RXDLEN_SHIFT;
/* some versions report wrong length for certain code types */
- ct = hw->decoder->control.code_type;
if (hw->ct_quirks[ct] & IMG_IR_QUIRK_CODE_LEN_INCR)
++len;
@@ -972,7 +1034,7 @@ static void img_ir_probe_hw_caps(struct img_ir_priv *priv)
hw->ct_quirks[IMG_IR_CODETYPE_PULSELEN]
|= IMG_IR_QUIRK_CODE_LEN_INCR;
hw->ct_quirks[IMG_IR_CODETYPE_BIPHASE]
- |= IMG_IR_QUIRK_CODE_BROKEN;
+ |= IMG_IR_QUIRK_CODE_IRQ;
hw->ct_quirks[IMG_IR_CODETYPE_2BITPULSEPOS]
|= IMG_IR_QUIRK_CODE_BROKEN;
}
@@ -991,6 +1053,8 @@ int img_ir_probe_hw(struct img_ir_priv *priv)
/* Set up the end timer */
setup_timer(&hw->end_timer, img_ir_end_timer, (unsigned long)priv);
+ setup_timer(&hw->suspend_timer, img_ir_suspend_timer,
+ (unsigned long)priv);
/* Register a clock notifier */
if (!IS_ERR(priv->clk)) {
diff --git a/drivers/media/rc/img-ir/img-ir-hw.h b/drivers/media/rc/img-ir/img-ir-hw.h
index 5c2b216c5fe3..91a297731661 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.h
+++ b/drivers/media/rc/img-ir/img-ir-hw.h
@@ -133,6 +133,20 @@ struct img_ir_timing_regvals {
#define IMG_IR_REPEATCODE 1 /* repeat the previous code */
/**
+ * struct img_ir_scancode_req - Scancode request data.
+ * @protocol: Protocol code of received message (defaults to
+ * RC_TYPE_UNKNOWN).
+ * @scancode: Scan code of received message (must be written by
+ * handler if IMG_IR_SCANCODE is returned).
+ * @toggle: Toggle bit (defaults to 0).
+ */
+struct img_ir_scancode_req {
+ enum rc_type protocol;
+ u32 scancode;
+ u8 toggle;
+};
+
+/**
* struct img_ir_decoder - Decoder settings for an IR protocol.
* @type: Protocol types bitmap.
* @tolerance: Timing tolerance as a percentage (default 10%).
@@ -162,8 +176,8 @@ struct img_ir_decoder {
struct img_ir_control control;
/* scancode logic */
- int (*scancode)(int len, u64 raw, enum rc_type *protocol,
- u32 *scancode, u64 enabled_protocols);
+ int (*scancode)(int len, u64 raw, u64 enabled_protocols,
+ struct img_ir_scancode_req *request);
int (*filter)(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols);
};
@@ -173,6 +187,8 @@ extern struct img_ir_decoder img_ir_jvc;
extern struct img_ir_decoder img_ir_sony;
extern struct img_ir_decoder img_ir_sharp;
extern struct img_ir_decoder img_ir_sanyo;
+extern struct img_ir_decoder img_ir_rc5;
+extern struct img_ir_decoder img_ir_rc6;
/**
* struct img_ir_reg_timings - Reg values for decoder timings at clock rate.
@@ -204,6 +220,7 @@ enum img_ir_mode {
* @rdev: Remote control device
* @clk_nb: Notifier block for clock notify events.
* @end_timer: Timer until repeat timeout.
+ * @suspend_timer: Timer to re-enable protocol.
* @decoder: Current decoder settings.
* @enabled_protocols: Currently enabled protocols.
* @clk_hz: Current core clock rate in Hz.
@@ -214,12 +231,14 @@ enum img_ir_mode {
* @stopping: Indicates that decoder is being taken down and timers
* should not be restarted.
* @suspend_irqen: Saved IRQ enable mask over suspend.
+ * @quirk_suspend_irq: Saved IRQ enable mask over quirk suspend timer.
*/
struct img_ir_priv_hw {
unsigned int ct_quirks[4];
struct rc_dev *rdev;
struct notifier_block clk_nb;
struct timer_list end_timer;
+ struct timer_list suspend_timer;
const struct img_ir_decoder *decoder;
u64 enabled_protocols;
unsigned long clk_hz;
@@ -230,6 +249,7 @@ struct img_ir_priv_hw {
enum img_ir_mode mode;
bool stopping;
u32 suspend_irqen;
+ u32 quirk_suspend_irq;
};
static inline bool img_ir_hw_enabled(struct img_ir_priv_hw *hw)
diff --git a/drivers/media/rc/img-ir/img-ir-jvc.c b/drivers/media/rc/img-ir/img-ir-jvc.c
index a60dda8bf706..d3e2fc0bcfe1 100644
--- a/drivers/media/rc/img-ir/img-ir-jvc.c
+++ b/drivers/media/rc/img-ir/img-ir-jvc.c
@@ -12,8 +12,8 @@
#include "img-ir-hw.h"
/* Convert JVC data to a scancode */
-static int img_ir_jvc_scancode(int len, u64 raw, enum rc_type *protocol,
- u32 *scancode, u64 enabled_protocols)
+static int img_ir_jvc_scancode(int len, u64 raw, u64 enabled_protocols,
+ struct img_ir_scancode_req *request)
{
unsigned int cust, data;
@@ -23,8 +23,8 @@ static int img_ir_jvc_scancode(int len, u64 raw, enum rc_type *protocol,
cust = (raw >> 0) & 0xff;
data = (raw >> 8) & 0xff;
- *protocol = RC_TYPE_JVC;
- *scancode = cust << 8 | data;
+ request->protocol = RC_TYPE_JVC;
+ request->scancode = cust << 8 | data;
return IMG_IR_SCANCODE;
}
diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c
index 739897549b5b..27a7ea8f1260 100644
--- a/drivers/media/rc/img-ir/img-ir-nec.c
+++ b/drivers/media/rc/img-ir/img-ir-nec.c
@@ -13,8 +13,8 @@
#include <linux/bitrev.h>
/* Convert NEC data to a scancode */
-static int img_ir_nec_scancode(int len, u64 raw, enum rc_type *protocol,
- u32 *scancode, u64 enabled_protocols)
+static int img_ir_nec_scancode(int len, u64 raw, u64 enabled_protocols,
+ struct img_ir_scancode_req *request)
{
unsigned int addr, addr_inv, data, data_inv;
/* a repeat code has no data */
@@ -30,23 +30,23 @@ static int img_ir_nec_scancode(int len, u64 raw, enum rc_type *protocol,
if ((data_inv ^ data) != 0xff) {
/* 32-bit NEC (used by Apple and TiVo remotes) */
/* scan encoding: as transmitted, MSBit = first received bit */
- *scancode = bitrev8(addr) << 24 |
- bitrev8(addr_inv) << 16 |
- bitrev8(data) << 8 |
- bitrev8(data_inv);
+ request->scancode = bitrev8(addr) << 24 |
+ bitrev8(addr_inv) << 16 |
+ bitrev8(data) << 8 |
+ bitrev8(data_inv);
} else if ((addr_inv ^ addr) != 0xff) {
/* Extended NEC */
/* scan encoding: AAaaDD */
- *scancode = addr << 16 |
- addr_inv << 8 |
- data;
+ request->scancode = addr << 16 |
+ addr_inv << 8 |
+ data;
} else {
/* Normal NEC */
/* scan encoding: AADD */
- *scancode = addr << 8 |
- data;
+ request->scancode = addr << 8 |
+ data;
}
- *protocol = RC_TYPE_NEC;
+ request->protocol = RC_TYPE_NEC;
return IMG_IR_SCANCODE;
}
diff --git a/drivers/media/rc/img-ir/img-ir-rc5.c b/drivers/media/rc/img-ir/img-ir-rc5.c
new file mode 100644
index 000000000000..a8a28a377eee
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-rc5.c
@@ -0,0 +1,88 @@
+/*
+ * ImgTec IR Decoder setup for Philips RC-5 protocol.
+ *
+ * Copyright 2012-2014 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include "img-ir-hw.h"
+
+/* Convert RC5 data to a scancode */
+static int img_ir_rc5_scancode(int len, u64 raw, u64 enabled_protocols,
+ struct img_ir_scancode_req *request)
+{
+ unsigned int addr, cmd, tgl, start;
+
+ /* Quirk in the decoder shifts everything by 2 to the left. */
+ raw >>= 2;
+
+ start = (raw >> 13) & 0x01;
+ tgl = (raw >> 11) & 0x01;
+ addr = (raw >> 6) & 0x1f;
+ cmd = raw & 0x3f;
+ /*
+ * 12th bit is used to extend the command in extended RC5 and has
+ * no effect on standard RC5.
+ */
+ cmd += ((raw >> 12) & 0x01) ? 0 : 0x40;
+
+ if (!start)
+ return -EINVAL;
+
+ request->protocol = RC_TYPE_RC5;
+ request->scancode = addr << 8 | cmd;
+ request->toggle = tgl;
+ return IMG_IR_SCANCODE;
+}
+
+/* Convert RC5 scancode to RC5 data filter */
+static int img_ir_rc5_filter(const struct rc_scancode_filter *in,
+ struct img_ir_filter *out, u64 protocols)
+{
+ /* Not supported by the hw. */
+ return -EINVAL;
+}
+
+/*
+ * RC-5 decoder
+ * see http://www.sbprojects.com/knowledge/ir/rc5.php
+ */
+struct img_ir_decoder img_ir_rc5 = {
+ .type = RC_BIT_RC5,
+ .control = {
+ .bitoriend2 = 1,
+ .code_type = IMG_IR_CODETYPE_BIPHASE,
+ .decodend2 = 1,
+ },
+ /* main timings */
+ .tolerance = 16,
+ .unit = 888888, /* 1/36k*32=888.888microseconds */
+ .timings = {
+ /* 10 symbol */
+ .s10 = {
+ .pulse = { 1 },
+ .space = { 1 },
+ },
+
+ /* 11 symbol */
+ .s11 = {
+ .pulse = { 1 },
+ .space = { 1 },
+ },
+
+ /* free time */
+ .ft = {
+ .minlen = 14,
+ .maxlen = 14,
+ .ft_min = 5,
+ },
+ },
+
+ /* scancode logic */
+ .scancode = img_ir_rc5_scancode,
+ .filter = img_ir_rc5_filter,
+};
diff --git a/drivers/media/rc/img-ir/img-ir-rc6.c b/drivers/media/rc/img-ir/img-ir-rc6.c
new file mode 100644
index 000000000000..de1e27534968
--- /dev/null
+++ b/drivers/media/rc/img-ir/img-ir-rc6.c
@@ -0,0 +1,117 @@
+/*
+ * ImgTec IR Decoder setup for Philips RC-6 protocol.
+ *
+ * Copyright 2012-2014 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include "img-ir-hw.h"
+
+/* Convert RC6 data to a scancode */
+static int img_ir_rc6_scancode(int len, u64 raw, u64 enabled_protocols,
+ struct img_ir_scancode_req *request)
+{
+ unsigned int addr, cmd, mode, trl1, trl2;
+
+ /*
+ * Due to a side effect of the decoder handling the double length
+ * Trailer bit, the header information is a bit scrambled, and the
+ * raw data is shifted incorrectly.
+ * This workaround effectively recovers the header bits.
+ *
+ * The Header field should look like this:
+ *
+ * StartBit ModeBit2 ModeBit1 ModeBit0 TrailerBit
+ *
+ * But what we get is:
+ *
+ * ModeBit2 ModeBit1 ModeBit0 TrailerBit1 TrailerBit2
+ *
+ * The start bit is not important to recover the scancode.
+ */
+
+ raw >>= 27;
+
+ trl1 = (raw >> 17) & 0x01;
+ trl2 = (raw >> 16) & 0x01;
+
+ mode = (raw >> 18) & 0x07;
+ addr = (raw >> 8) & 0xff;
+ cmd = raw & 0xff;
+
+ /*
+ * Due to the above explained irregularity the trailer bits cannot
+ * have the same value.
+ */
+ if (trl1 == trl2)
+ return -EINVAL;
+
+ /* Only mode 0 supported for now */
+ if (mode)
+ return -EINVAL;
+
+ request->protocol = RC_TYPE_RC6_0;
+ request->scancode = addr << 8 | cmd;
+ request->toggle = trl2;
+ return IMG_IR_SCANCODE;
+}
+
+/* Convert RC6 scancode to RC6 data filter */
+static int img_ir_rc6_filter(const struct rc_scancode_filter *in,
+ struct img_ir_filter *out, u64 protocols)
+{
+ /* Not supported by the hw. */
+ return -EINVAL;
+}
+
+/*
+ * RC-6 decoder
+ * see http://www.sbprojects.com/knowledge/ir/rc6.php
+ */
+struct img_ir_decoder img_ir_rc6 = {
+ .type = RC_BIT_RC6_0,
+ .control = {
+ .bitorien = 1,
+ .code_type = IMG_IR_CODETYPE_BIPHASE,
+ .decoden = 1,
+ .decodinpol = 1,
+ },
+ /* main timings */
+ .tolerance = 20,
+ /*
+ * Due to a quirk in the img-ir decoder, default header values do
+ * not work, the values described below were extracted from
+ * successful RTL test cases.
+ */
+ .timings = {
+ /* leader symbol */
+ .ldr = {
+ .pulse = { 650 },
+ .space = { 660 },
+ },
+ /* 0 symbol */
+ .s00 = {
+ .pulse = { 370 },
+ .space = { 370 },
+ },
+ /* 01 symbol */
+ .s01 = {
+ .pulse = { 370 },
+ .space = { 370 },
+ },
+ /* free time */
+ .ft = {
+ .minlen = 21,
+ .maxlen = 21,
+ .ft_min = 2666, /* 2.666 ms */
+ },
+ },
+
+ /* scancode logic */
+ .scancode = img_ir_rc6_scancode,
+ .filter = img_ir_rc6_filter,
+};
diff --git a/drivers/media/rc/img-ir/img-ir-sanyo.c b/drivers/media/rc/img-ir/img-ir-sanyo.c
index 6b0653ecdf5a..f394994ffc22 100644
--- a/drivers/media/rc/img-ir/img-ir-sanyo.c
+++ b/drivers/media/rc/img-ir/img-ir-sanyo.c
@@ -23,8 +23,8 @@
#include "img-ir-hw.h"
/* Convert Sanyo data to a scancode */
-static int img_ir_sanyo_scancode(int len, u64 raw, enum rc_type *protocol,
- u32 *scancode, u64 enabled_protocols)
+static int img_ir_sanyo_scancode(int len, u64 raw, u64 enabled_protocols,
+ struct img_ir_scancode_req *request)
{
unsigned int addr, addr_inv, data, data_inv;
/* a repeat code has no data */
@@ -44,8 +44,8 @@ static int img_ir_sanyo_scancode(int len, u64 raw, enum rc_type *protocol,
return -EINVAL;
/* Normal Sanyo */
- *protocol = RC_TYPE_SANYO;
- *scancode = addr << 8 | data;
+ request->protocol = RC_TYPE_SANYO;
+ request->scancode = addr << 8 | data;
return IMG_IR_SCANCODE;
}
diff --git a/drivers/media/rc/img-ir/img-ir-sharp.c b/drivers/media/rc/img-ir/img-ir-sharp.c
index 3300a38802ac..fe5acc4f030e 100644
--- a/drivers/media/rc/img-ir/img-ir-sharp.c
+++ b/drivers/media/rc/img-ir/img-ir-sharp.c
@@ -12,8 +12,8 @@
#include "img-ir-hw.h"
/* Convert Sharp data to a scancode */
-static int img_ir_sharp_scancode(int len, u64 raw, enum rc_type *protocol,
- u32 *scancode, u64 enabled_protocols)
+static int img_ir_sharp_scancode(int len, u64 raw, u64 enabled_protocols,
+ struct img_ir_scancode_req *request)
{
unsigned int addr, cmd, exp, chk;
@@ -32,8 +32,8 @@ static int img_ir_sharp_scancode(int len, u64 raw, enum rc_type *protocol,
/* probably the second half of the message */
return -EINVAL;
- *protocol = RC_TYPE_SHARP;
- *scancode = addr << 8 | cmd;
+ request->protocol = RC_TYPE_SHARP;
+ request->scancode = addr << 8 | cmd;
return IMG_IR_SCANCODE;
}
diff --git a/drivers/media/rc/img-ir/img-ir-sony.c b/drivers/media/rc/img-ir/img-ir-sony.c
index 3a0f17b0752c..7f7375f82ed6 100644
--- a/drivers/media/rc/img-ir/img-ir-sony.c
+++ b/drivers/media/rc/img-ir/img-ir-sony.c
@@ -12,8 +12,8 @@
#include "img-ir-hw.h"
/* Convert Sony data to a scancode */
-static int img_ir_sony_scancode(int len, u64 raw, enum rc_type *protocol,
- u32 *scancode, u64 enabled_protocols)
+static int img_ir_sony_scancode(int len, u64 raw, u64 enabled_protocols,
+ struct img_ir_scancode_req *request)
{
unsigned int dev, subdev, func;
@@ -25,7 +25,7 @@ static int img_ir_sony_scancode(int len, u64 raw, enum rc_type *protocol,
raw >>= 7;
dev = raw & 0x1f; /* next 5 bits */
subdev = 0;
- *protocol = RC_TYPE_SONY12;
+ request->protocol = RC_TYPE_SONY12;
break;
case 15:
if (!(enabled_protocols & RC_BIT_SONY15))
@@ -34,7 +34,7 @@ static int img_ir_sony_scancode(int len, u64 raw, enum rc_type *protocol,
raw >>= 7;
dev = raw & 0xff; /* next 8 bits */
subdev = 0;
- *protocol = RC_TYPE_SONY15;
+ request->protocol = RC_TYPE_SONY15;
break;
case 20:
if (!(enabled_protocols & RC_BIT_SONY20))
@@ -44,12 +44,12 @@ static int img_ir_sony_scancode(int len, u64 raw, enum rc_type *protocol,
dev = raw & 0x1f; /* next 5 bits */
raw >>= 5;
subdev = raw & 0xff; /* next 8 bits */
- *protocol = RC_TYPE_SONY20;
+ request->protocol = RC_TYPE_SONY20;
break;
default:
return -EINVAL;
}
- *scancode = dev << 16 | subdev << 8 | func;
+ request->scancode = dev << 16 | subdev << 8 | func;
return IMG_IR_SCANCODE;
}
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 1e0545a67959..4de0e85af805 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -553,14 +553,14 @@ unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
if (!ir->attached)
return POLLERR;
- poll_wait(file, &ir->buf->wait_poll, wait);
+ if (ir->buf) {
+ poll_wait(file, &ir->buf->wait_poll, wait);
- if (ir->buf)
if (lirc_buffer_empty(ir->buf))
ret = 0;
else
ret = POLLIN | POLLRDNORM;
- else
+ } else
ret = POLLERR;
dev_dbg(ir->d.dev, LOGHEAD "poll result = %d\n",
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 86ffcd54339e..f8c5e47a30aa 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1021,16 +1021,16 @@ static ssize_t store_protocols(struct device *device,
goto out;
}
- if (new_protocols == old_protocols) {
- rc = len;
- goto out;
+ if (new_protocols != old_protocols) {
+ *current_protocols = new_protocols;
+ IR_dprintk(1, "Protocols changed to 0x%llx\n",
+ (long long)new_protocols);
}
- *current_protocols = new_protocols;
- IR_dprintk(1, "Protocols changed to 0x%llx\n", (long long)new_protocols);
-
/*
- * If the protocol is changed the filter needs updating.
+ * If a protocol change was attempted the filter may need updating, even
+ * if the actual protocol mask hasn't changed (since the driver may have
+ * cleared the filter).
* Try setting the same filter with the new protocol (if any).
* Fall back to clearing the filter.
*/
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 340f7f51eed4..7830aef3db45 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/reset.h>
#include <media/rc-core.h>
#define SUNXI_IR_DEV "sunxi-ir"
@@ -55,12 +56,12 @@
#define REG_RXINT_RAI_EN BIT(4)
/* Rx FIFO available byte level */
-#define REG_RXINT_RAL(val) (((val) << 8) & (GENMASK(11, 8)))
+#define REG_RXINT_RAL(val) ((val) << 8)
/* Rx Interrupt Status */
#define SUNXI_IR_RXSTA_REG 0x30
/* RX FIFO Get Available Counter */
-#define REG_RXSTA_GET_AC(val) (((val) >> 8) & (GENMASK(5, 0)))
+#define REG_RXSTA_GET_AC(val) (((val) >> 8) & (ir->fifo_size * 2 - 1))
/* Clear all interrupt status value */
#define REG_RXSTA_CLEARALL 0xff
@@ -71,10 +72,6 @@
/* CIR_REG register idle threshold */
#define REG_CIR_ITHR(val) (((val) << 8) & (GENMASK(15, 8)))
-/* Hardware supported fifo size */
-#define SUNXI_IR_FIFO_SIZE 16
-/* How many messages in FIFO trigger IRQ */
-#define TRIGGER_LEVEL 8
/* Required frequency for IR0 or IR1 clock in CIR mode */
#define SUNXI_IR_BASE_CLK 8000000
/* Frequency after IR internal divider */
@@ -93,8 +90,10 @@ struct sunxi_ir {
struct rc_dev *rc;
void __iomem *base;
int irq;
+ int fifo_size;
struct clk *clk;
struct clk *apb_clk;
+ struct reset_control *rst;
const char *map_name;
};
@@ -113,11 +112,11 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
/* clean all pending statuses */
writel(status | REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
- if (status & REG_RXINT_RAI_EN) {
+ if (status & (REG_RXINT_RAI_EN | REG_RXINT_RPEI_EN)) {
/* How many messages in fifo */
rc = REG_RXSTA_GET_AC(status);
/* Sanity check */
- rc = rc > SUNXI_IR_FIFO_SIZE ? SUNXI_IR_FIFO_SIZE : rc;
+ rc = rc > ir->fifo_size ? ir->fifo_size : rc;
/* If we have data */
for (cnt = 0; cnt < rc; cnt++) {
/* for each bit in fifo */
@@ -154,6 +153,11 @@ static int sunxi_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
+ if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
+ ir->fifo_size = 64;
+ else
+ ir->fifo_size = 16;
+
/* Clock */
ir->apb_clk = devm_clk_get(dev, "apb");
if (IS_ERR(ir->apb_clk)) {
@@ -166,15 +170,29 @@ static int sunxi_ir_probe(struct platform_device *pdev)
return PTR_ERR(ir->clk);
}
+ /* Reset (optional) */
+ ir->rst = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(ir->rst)) {
+ ret = PTR_ERR(ir->rst);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ ir->rst = NULL;
+ } else {
+ ret = reset_control_deassert(ir->rst);
+ if (ret)
+ return ret;
+ }
+
ret = clk_set_rate(ir->clk, SUNXI_IR_BASE_CLK);
if (ret) {
dev_err(dev, "set ir base clock failed!\n");
- return ret;
+ goto exit_reset_assert;
}
if (clk_prepare_enable(ir->apb_clk)) {
dev_err(dev, "try to enable apb_ir_clk failed\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_reset_assert;
}
if (clk_prepare_enable(ir->clk)) {
@@ -255,7 +273,7 @@ static int sunxi_ir_probe(struct platform_device *pdev)
* level
*/
writel(REG_RXINT_ROI_EN | REG_RXINT_RPEI_EN |
- REG_RXINT_RAI_EN | REG_RXINT_RAL(TRIGGER_LEVEL - 1),
+ REG_RXINT_RAI_EN | REG_RXINT_RAL(ir->fifo_size / 2 - 1),
ir->base + SUNXI_IR_RXINT_REG);
/* Enable IR Module */
@@ -271,6 +289,9 @@ exit_clkdisable_clk:
clk_disable_unprepare(ir->clk);
exit_clkdisable_apb_clk:
clk_disable_unprepare(ir->apb_clk);
+exit_reset_assert:
+ if (ir->rst)
+ reset_control_assert(ir->rst);
return ret;
}
@@ -282,6 +303,8 @@ static int sunxi_ir_remove(struct platform_device *pdev)
clk_disable_unprepare(ir->clk);
clk_disable_unprepare(ir->apb_clk);
+ if (ir->rst)
+ reset_control_assert(ir->rst);
spin_lock_irqsave(&ir->ir_lock, flags);
/* disable IR IRQ */
@@ -298,6 +321,7 @@ static int sunxi_ir_remove(struct platform_device *pdev)
static const struct of_device_id sunxi_ir_match[] = {
{ .compatible = "allwinner,sun4i-a10-ir", },
+ { .compatible = "allwinner,sun5i-a13-ir", },
{},
};
diff --git a/drivers/media/tuners/mt20xx.c b/drivers/media/tuners/mt20xx.c
index 0e74e97e0d1a..9e031040c13f 100644
--- a/drivers/media/tuners/mt20xx.c
+++ b/drivers/media/tuners/mt20xx.c
@@ -660,11 +660,3 @@ EXPORT_SYMBOL_GPL(microtune_attach);
MODULE_DESCRIPTION("Microtune tuner driver");
MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer");
MODULE_LICENSE("GPL");
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index f83b0c1ea6c8..6e2cdd2b6175 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -294,8 +294,3 @@ EXPORT_SYMBOL(mt2131_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver");
MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/tuners/mt2131.h b/drivers/media/tuners/mt2131.h
index 09ceaf68e47c..837c854b9c65 100644
--- a/drivers/media/tuners/mt2131.h
+++ b/drivers/media/tuners/mt2131.h
@@ -47,8 +47,3 @@ static inline struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe,
#endif /* CONFIG_MEDIA_TUNER_MT2131 */
#endif /* __MT2131_H__ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/tuners/mt2131_priv.h b/drivers/media/tuners/mt2131_priv.h
index 62aeedf5c550..91283b599cb3 100644
--- a/drivers/media/tuners/mt2131_priv.h
+++ b/drivers/media/tuners/mt2131_priv.h
@@ -41,8 +41,3 @@ struct mt2131_priv {
};
#endif /* __MT2131_PRIV_H__ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 1810ad66888e..f4ae04c3328a 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -938,11 +938,3 @@ MODULE_DESCRIPTION("MaxLinear MxL5007T Silicon IC tuner driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.2");
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/tuners/mxl5007t.h b/drivers/media/tuners/mxl5007t.h
index 37b0942e2385..ae7037d681c5 100644
--- a/drivers/media/tuners/mxl5007t.h
+++ b/drivers/media/tuners/mxl5007t.h
@@ -93,12 +93,3 @@ static inline struct dvb_frontend *mxl5007t_attach(struct dvb_frontend *fe,
#endif
#endif /* __MXL5007T_H__ */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
-
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 2180de9d654a..fcf139dfdec6 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -19,16 +19,17 @@
static const struct dvb_tuner_ops si2157_ops;
/* execute firmware command */
-static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
+static int si2157_cmd_execute(struct i2c_client *client, struct si2157_cmd *cmd)
{
+ struct si2157_dev *dev = i2c_get_clientdata(client);
int ret;
unsigned long timeout;
- mutex_lock(&s->i2c_mutex);
+ mutex_lock(&dev->i2c_mutex);
if (cmd->wlen) {
/* write cmd and args for firmware */
- ret = i2c_master_send(s->client, cmd->args, cmd->wlen);
+ ret = i2c_master_send(client, cmd->args, cmd->wlen);
if (ret < 0) {
goto err_mutex_unlock;
} else if (ret != cmd->wlen) {
@@ -42,7 +43,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
#define TIMEOUT 80
timeout = jiffies + msecs_to_jiffies(TIMEOUT);
while (!time_after(jiffies, timeout)) {
- ret = i2c_master_recv(s->client, cmd->args, cmd->rlen);
+ ret = i2c_master_recv(client, cmd->args, cmd->rlen);
if (ret < 0) {
goto err_mutex_unlock;
} else if (ret != cmd->rlen) {
@@ -55,7 +56,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
break;
}
- dev_dbg(&s->client->dev, "cmd execution took %d ms\n",
+ dev_dbg(&client->dev, "cmd execution took %d ms\n",
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
@@ -65,35 +66,32 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
}
}
- ret = 0;
+ mutex_unlock(&dev->i2c_mutex);
+ return 0;
err_mutex_unlock:
- mutex_unlock(&s->i2c_mutex);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ mutex_unlock(&dev->i2c_mutex);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int si2157_init(struct dvb_frontend *fe)
{
- struct si2157 *s = fe->tuner_priv;
+ struct i2c_client *client = fe->tuner_priv;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
int ret, len, remaining;
struct si2157_cmd cmd;
- const struct firmware *fw = NULL;
- u8 *fw_file;
+ const struct firmware *fw;
+ const char *fw_name;
unsigned int chip_id;
- dev_dbg(&s->client->dev, "\n");
+ dev_dbg(&client->dev, "\n");
- if (s->fw_loaded)
+ if (dev->fw_loaded)
goto warm;
/* power up */
- if (s->chiptype == SI2157_CHIPTYPE_SI2146) {
+ if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
cmd.wlen = 9;
} else {
@@ -101,7 +99,7 @@ static int si2157_init(struct dvb_frontend *fe)
cmd.wlen = 15;
}
cmd.rlen = 1;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -109,7 +107,7 @@ static int si2157_init(struct dvb_frontend *fe)
memcpy(cmd.args, "\x02", 1);
cmd.wlen = 1;
cmd.rlen = 13;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -125,121 +123,133 @@ static int si2157_init(struct dvb_frontend *fe)
switch (chip_id) {
case SI2158_A20:
case SI2148_A20:
- fw_file = SI2158_A20_FIRMWARE;
+ fw_name = SI2158_A20_FIRMWARE;
break;
case SI2157_A30:
case SI2147_A30:
case SI2146_A10:
- goto skip_fw_download;
+ fw_name = NULL;
+ break;
default:
- dev_err(&s->client->dev,
- "unknown chip version Si21%d-%c%c%c\n",
+ dev_err(&client->dev, "unknown chip version Si21%d-%c%c%c\n",
cmd.args[2], cmd.args[1],
cmd.args[3], cmd.args[4]);
ret = -EINVAL;
goto err;
}
- /* cold state - try to download firmware */
- dev_info(&s->client->dev, "found a '%s' in cold state\n",
- si2157_ops.info.name);
+ dev_info(&client->dev, "found a 'Silicon Labs Si21%d-%c%c%c'\n",
+ cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
+
+ if (fw_name == NULL)
+ goto skip_fw_download;
/* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, &s->client->dev);
+ ret = request_firmware(&fw, fw_name, &client->dev);
if (ret) {
- dev_err(&s->client->dev, "firmware file '%s' not found\n",
- fw_file);
+ dev_err(&client->dev, "firmware file '%s' not found\n",
+ fw_name);
goto err;
}
/* firmware should be n chunks of 17 bytes */
if (fw->size % 17 != 0) {
- dev_err(&s->client->dev, "firmware file '%s' is invalid\n",
- fw_file);
+ dev_err(&client->dev, "firmware file '%s' is invalid\n",
+ fw_name);
ret = -EINVAL;
- goto fw_release_exit;
+ goto err_release_firmware;
}
- dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
- fw_file);
+ dev_info(&client->dev, "downloading firmware from file '%s'\n",
+ fw_name);
for (remaining = fw->size; remaining > 0; remaining -= 17) {
len = fw->data[fw->size - remaining];
memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
cmd.wlen = len;
cmd.rlen = 1;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret) {
- dev_err(&s->client->dev,
- "firmware download failed=%d\n",
+ dev_err(&client->dev, "firmware download failed %d\n",
ret);
- goto fw_release_exit;
+ goto err_release_firmware;
}
}
release_firmware(fw);
- fw = NULL;
skip_fw_download:
/* reboot the tuner with new firmware? */
memcpy(cmd.args, "\x01\x01", 2);
cmd.wlen = 2;
cmd.rlen = 1;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
- s->fw_loaded = true;
+ /* query firmware version */
+ memcpy(cmd.args, "\x11", 1);
+ cmd.wlen = 1;
+ cmd.rlen = 10;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ dev_info(&client->dev, "firmware version: %c.%c.%d\n",
+ cmd.args[6], cmd.args[7], cmd.args[8]);
+
+ dev->fw_loaded = true;
warm:
- s->active = true;
+ dev->active = true;
return 0;
-fw_release_exit:
+err_release_firmware:
release_firmware(fw);
err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int si2157_sleep(struct dvb_frontend *fe)
{
- struct si2157 *s = fe->tuner_priv;
+ struct i2c_client *client = fe->tuner_priv;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
int ret;
struct si2157_cmd cmd;
- dev_dbg(&s->client->dev, "\n");
+ dev_dbg(&client->dev, "\n");
- s->active = false;
+ dev->active = false;
/* standby */
memcpy(cmd.args, "\x16\x00", 2);
cmd.wlen = 2;
cmd.rlen = 1;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
return 0;
err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int si2157_set_params(struct dvb_frontend *fe)
{
- struct si2157 *s = fe->tuner_priv;
+ struct i2c_client *client = fe->tuner_priv;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
struct si2157_cmd cmd;
u8 bandwidth, delivery_system;
- dev_dbg(&s->client->dev,
+ dev_dbg(&client->dev,
"delivery_system=%d frequency=%u bandwidth_hz=%u\n",
- c->delivery_system, c->frequency,
- c->bandwidth_hz);
+ c->delivery_system, c->frequency, c->bandwidth_hz);
- if (!s->active) {
+ if (!dev->active) {
ret = -EAGAIN;
goto err;
}
@@ -274,21 +284,21 @@ static int si2157_set_params(struct dvb_frontend *fe)
memcpy(cmd.args, "\x14\x00\x03\x07\x00\x00", 6);
cmd.args[4] = delivery_system | bandwidth;
- if (s->inversion)
+ if (dev->inversion)
cmd.args[5] = 0x01;
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
- if (s->chiptype == SI2157_CHIPTYPE_SI2146)
+ if (dev->chiptype == SI2157_CHIPTYPE_SI2146)
memcpy(cmd.args, "\x14\x00\x02\x07\x00\x01", 6);
else
memcpy(cmd.args, "\x14\x00\x02\x07\x01\x00", 6);
cmd.wlen = 6;
cmd.rlen = 4;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -300,13 +310,13 @@ static int si2157_set_params(struct dvb_frontend *fe)
cmd.args[7] = (c->frequency >> 24) & 0xff;
cmd.wlen = 8;
cmd.rlen = 1;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret)
goto err;
return 0;
err:
- dev_dbg(&s->client->dev, "failed=%d\n", ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -334,70 +344,67 @@ static int si2157_probe(struct i2c_client *client,
{
struct si2157_config *cfg = client->dev.platform_data;
struct dvb_frontend *fe = cfg->fe;
- struct si2157 *s;
+ struct si2157_dev *dev;
struct si2157_cmd cmd;
int ret;
- s = kzalloc(sizeof(struct si2157), GFP_KERNEL);
- if (!s) {
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
ret = -ENOMEM;
dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
- s->client = client;
- s->fe = cfg->fe;
- s->inversion = cfg->inversion;
- s->fw_loaded = false;
- s->chiptype = (u8)id->driver_data;
- mutex_init(&s->i2c_mutex);
+ i2c_set_clientdata(client, dev);
+ dev->fe = cfg->fe;
+ dev->inversion = cfg->inversion;
+ dev->fw_loaded = false;
+ dev->chiptype = (u8)id->driver_data;
+ mutex_init(&dev->i2c_mutex);
/* check if the tuner is there */
cmd.wlen = 0;
cmd.rlen = 1;
- ret = si2157_cmd_execute(s, &cmd);
+ ret = si2157_cmd_execute(client, &cmd);
if (ret)
- goto err;
+ goto err_kfree;
- fe->tuner_priv = s;
- memcpy(&fe->ops.tuner_ops, &si2157_ops,
- sizeof(struct dvb_tuner_ops));
+ memcpy(&fe->ops.tuner_ops, &si2157_ops, sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = client;
- i2c_set_clientdata(client, s);
-
- dev_info(&s->client->dev,
- "Silicon Labs %s successfully attached\n",
- s->chiptype == SI2157_CHIPTYPE_SI2146 ?
+ dev_info(&client->dev, "Silicon Labs %s successfully attached\n",
+ dev->chiptype == SI2157_CHIPTYPE_SI2146 ?
"Si2146" : "Si2147/2148/2157/2158");
return 0;
+
+err_kfree:
+ kfree(dev);
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
- kfree(s);
-
return ret;
}
static int si2157_remove(struct i2c_client *client)
{
- struct si2157 *s = i2c_get_clientdata(client);
- struct dvb_frontend *fe = s->fe;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
+ struct dvb_frontend *fe = dev->fe;
dev_dbg(&client->dev, "\n");
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
- kfree(s);
+ kfree(dev);
return 0;
}
-static const struct i2c_device_id si2157_id[] = {
- {"si2157", 0},
- {"si2146", 1},
+static const struct i2c_device_id si2157_id_table[] = {
+ {"si2157", SI2157_CHIPTYPE_SI2157},
+ {"si2146", SI2157_CHIPTYPE_SI2146},
{}
};
-MODULE_DEVICE_TABLE(i2c, si2157_id);
+MODULE_DEVICE_TABLE(i2c, si2157_id_table);
static struct i2c_driver si2157_driver = {
.driver = {
@@ -406,7 +413,7 @@ static struct i2c_driver si2157_driver = {
},
.probe = si2157_probe,
.remove = si2157_remove,
- .id_table = si2157_id,
+ .id_table = si2157_id_table,
};
module_i2c_driver(si2157_driver);
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index d6e07cdd2a07..7aa53bce5593 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -21,9 +21,8 @@
#include "si2157.h"
/* state struct */
-struct si2157 {
+struct si2157_dev {
struct mutex i2c_mutex;
- struct i2c_client *client;
struct dvb_frontend *fe;
bool active;
bool fw_loaded;
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 4995b890c164..f8620741bb5f 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -1355,11 +1355,3 @@ MODULE_DESCRIPTION("NXP TDA18271HD analog / digital tuner driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.4");
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/tuners/tda18271-maps.c b/drivers/media/tuners/tda18271-maps.c
index b62e925f643f..1e89dd93c4bb 100644
--- a/drivers/media/tuners/tda18271-maps.c
+++ b/drivers/media/tuners/tda18271-maps.c
@@ -1305,11 +1305,3 @@ int tda18271_assign_map_layout(struct dvb_frontend *fe)
return ret;
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/tuners/tda18271-priv.h b/drivers/media/tuners/tda18271-priv.h
index b36a7b754772..cc80f544af34 100644
--- a/drivers/media/tuners/tda18271-priv.h
+++ b/drivers/media/tuners/tda18271-priv.h
@@ -226,11 +226,3 @@ extern int tda18271_calc_ir_measure(struct dvb_frontend *fe, u32 *freq);
extern int tda18271_calc_rf_cal(struct dvb_frontend *fe, u32 *freq);
#endif /* __TDA18271_PRIV_H__ */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index 73453a255cdc..edcb4a723aa1 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -907,11 +907,3 @@ MODULE_DESCRIPTION("DVB TDA827x driver");
MODULE_AUTHOR("Hartmut Hackmann <hartmut.hackmann@t-online.de>");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/tuners/tda8290.c b/drivers/media/tuners/tda8290.c
index ab4106c17b4c..998e82bba9c0 100644
--- a/drivers/media/tuners/tda8290.c
+++ b/drivers/media/tuners/tda8290.c
@@ -881,11 +881,3 @@ EXPORT_SYMBOL_GPL(tda829x_probe);
MODULE_DESCRIPTION("Philips/NXP TDA8290/TDA8295 analog IF demodulator driver");
MODULE_AUTHOR("Gerd Knorr, Hartmut Hackmann, Michael Krufky");
MODULE_LICENSE("GPL");
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/tuners/tda9887.c b/drivers/media/tuners/tda9887.c
index 9823248d743f..56be6c29399b 100644
--- a/drivers/media/tuners/tda9887.c
+++ b/drivers/media/tuners/tda9887.c
@@ -707,11 +707,3 @@ struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe,
EXPORT_SYMBOL_GPL(tda9887_attach);
MODULE_LICENSE("GPL");
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index ca274c2d8c70..8e9ce144da9a 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -1148,11 +1148,3 @@ EXPORT_SYMBOL_GPL(simple_tuner_attach);
MODULE_DESCRIPTION("Simple 4-control-bytes style tuner driver");
MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer");
MODULE_LICENSE("GPL");
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig
index 1d410ac8f9a8..78b797e0b434 100644
--- a/drivers/media/usb/au0828/Kconfig
+++ b/drivers/media/usb/au0828/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_AU0828
depends on I2C && INPUT && DVB_CORE && USB
select I2C_ALGOBIT
select VIDEO_TVEEPROM
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
select DVB_AU8522_DTV if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index da87f1cc31a9..edc27355f271 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -44,7 +44,7 @@ static void hvr950q_cs5340_audio(void *priv, int enable)
struct au0828_board au0828_boards[] = {
[AU0828_BOARD_UNKNOWN] = {
.name = "Unknown board",
- .tuner_type = UNSET,
+ .tuner_type = -1U,
.tuner_addr = ADDR_UNSET,
},
[AU0828_BOARD_HAUPPAUGE_HVR850] = {
diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c
index 932d24f42b24..f67247cf1a5a 100644
--- a/drivers/media/usb/au0828/au0828-vbi.c
+++ b/drivers/media/usb/au0828/au0828-vbi.c
@@ -28,111 +28,67 @@
#include <linux/init.h>
#include <linux/slab.h>
-static unsigned int vbibufs = 5;
-module_param(vbibufs, int, 0644);
-MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
-
/* ------------------------------------------------------------------ */
-static void
-free_buffer(struct videobuf_queue *vq, struct au0828_buffer *buf)
+static int vbi_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct au0828_fh *fh = vq->priv_data;
- struct au0828_dev *dev = fh->dev;
- unsigned long flags = 0;
- if (in_interrupt())
- BUG();
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
-
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->slock, flags);
- if (dev->isoc_ctl.vbi_buf == buf)
- dev->isoc_ctl.vbi_buf = NULL;
- spin_unlock_irqrestore(&dev->slock, flags);
+ struct au0828_dev *dev = vb2_get_drv_priv(vq);
+ unsigned long img_size = dev->vbi_width * dev->vbi_height * 2;
+ unsigned long size;
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
-
-static int
-vbi_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-{
- struct au0828_fh *fh = q->priv_data;
- struct au0828_dev *dev = fh->dev;
+ size = fmt ? (fmt->fmt.vbi.samples_per_line *
+ (fmt->fmt.vbi.count[0] + fmt->fmt.vbi.count[1])) : img_size;
+ if (size < img_size)
+ return -EINVAL;
- *size = dev->vbi_width * dev->vbi_height * 2;
+ *nplanes = 1;
+ sizes[0] = size;
- if (0 == *count)
- *count = vbibufs;
- if (*count < 2)
- *count = 2;
- if (*count > 32)
- *count = 32;
return 0;
}
-static int
-vbi_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vbi_buffer_prepare(struct vb2_buffer *vb)
{
- struct au0828_fh *fh = q->priv_data;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
- int rc = 0;
+ unsigned long size;
- buf->vb.size = dev->vbi_width * dev->vbi_height * 2;
+ size = dev->vbi_width * dev->vbi_height * 2;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < size) {
+ pr_err("%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
-
- buf->vb.width = dev->vbi_width;
- buf->vb.height = dev->vbi_height;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
}
+ vb2_set_plane_payload(&buf->vb, 0, size);
- buf->vb.state = VIDEOBUF_PREPARED;
return 0;
-
-fail:
- free_buffer(q, buf);
- return rc;
}
static void
-vbi_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
-{
- struct au0828_buffer *buf = container_of(vb,
- struct au0828_buffer,
- vb);
- struct au0828_fh *fh = vq->priv_data;
- struct au0828_dev *dev = fh->dev;
- struct au0828_dmaqueue *vbiq = &dev->vbiq;
-
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vbiq->active);
-}
-
-static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+vbi_buffer_queue(struct vb2_buffer *vb)
{
+ struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
- free_buffer(q, buf);
+ struct au0828_dmaqueue *vbiq = &dev->vbiq;
+ unsigned long flags = 0;
+
+ buf->mem = vb2_plane_vaddr(vb, 0);
+ buf->length = vb2_plane_size(vb, 0);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &vbiq->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-struct videobuf_queue_ops au0828_vbi_qops = {
- .buf_setup = vbi_setup,
- .buf_prepare = vbi_prepare,
- .buf_queue = vbi_queue,
- .buf_release = vbi_release,
+struct vb2_ops au0828_vbi_qops = {
+ .queue_setup = vbi_queue_setup,
+ .buf_prepare = vbi_buffer_prepare,
+ .buf_queue = vbi_buffer_queue,
+ .start_streaming = au0828_start_analog_streaming,
+ .stop_streaming = au0828_stop_vbi_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 5f337b118bff..a27cb5fcdef8 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -22,7 +22,6 @@
/* Developer Notes:
*
- * VBI support is not yet working
* The hardware scaler supported is unimplemented
* AC97 audio support is unimplemented (only i2s audio mode)
*
@@ -219,9 +218,6 @@ static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
au0828_isocdbg("au0828: called au0828_prepare_isoc\n");
- /* De-allocates all pending stuff */
- au0828_uninit_isoc(dev);
-
dev->isoc_ctl.isoc_copy = isoc_copy;
dev->isoc_ctl.num_bufs = num_bufs;
@@ -285,8 +281,6 @@ static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
}
}
- init_waitqueue_head(&dma_q->wq);
-
/* submit urbs and enables IRQ */
for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_ATOMIC);
@@ -309,16 +303,12 @@ static inline void buffer_filled(struct au0828_dev *dev,
struct au0828_buffer *buf)
{
/* Advice that buffer was filled */
- au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
-
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- v4l2_get_timestamp(&buf->vb.ts);
+ au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
- dev->isoc_ctl.buf = NULL;
-
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.v4l2_buf.sequence = dev->frame_count++;
+ buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
static inline void vbi_buffer_filled(struct au0828_dev *dev,
@@ -326,16 +316,12 @@ static inline void vbi_buffer_filled(struct au0828_dev *dev,
struct au0828_buffer *buf)
{
/* Advice that buffer was filled */
- au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
-
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- v4l2_get_timestamp(&buf->vb.ts);
+ au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
- dev->isoc_ctl.vbi_buf = NULL;
-
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.v4l2_buf.sequence = dev->vbi_frame_count++;
+ buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
/*
@@ -354,8 +340,8 @@ static void au0828_copy_video(struct au0828_dev *dev,
if (len == 0)
return;
- if (dma_q->pos + len > buf->vb.size)
- len = buf->vb.size - dma_q->pos;
+ if (dma_q->pos + len > buf->length)
+ len = buf->length - dma_q->pos;
startread = p;
remain = len;
@@ -373,11 +359,11 @@ static void au0828_copy_video(struct au0828_dev *dev,
lencopy = bytesperline - currlinedone;
lencopy = lencopy > remain ? remain : lencopy;
- if ((char *)startwrite + lencopy > (char *)outp + buf->vb.size) {
+ if ((char *)startwrite + lencopy > (char *)outp + buf->length) {
au0828_isocdbg("Overflow of %zi bytes past buffer end (1)\n",
((char *)startwrite + lencopy) -
- ((char *)outp + buf->vb.size));
- remain = (char *)outp + buf->vb.size - (char *)startwrite;
+ ((char *)outp + buf->length));
+ remain = (char *)outp + buf->length - (char *)startwrite;
lencopy = remain;
}
if (lencopy <= 0)
@@ -395,11 +381,11 @@ static void au0828_copy_video(struct au0828_dev *dev,
lencopy = bytesperline;
if ((char *)startwrite + lencopy > (char *)outp +
- buf->vb.size) {
+ buf->length) {
au0828_isocdbg("Overflow %zi bytes past buf end (2)\n",
((char *)startwrite + lencopy) -
- ((char *)outp + buf->vb.size));
- lencopy = remain = (char *)outp + buf->vb.size -
+ ((char *)outp + buf->length));
+ lencopy = remain = (char *)outp + buf->length -
(char *)startwrite;
}
if (lencopy <= 0)
@@ -435,7 +421,11 @@ static inline void get_next_buf(struct au0828_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct au0828_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct au0828_buffer, list);
+ /* Cleans up buffer - Useful for testing for frame/URB loss */
+ list_del(&(*buf)->list);
+ dma_q->pos = 0;
+ (*buf)->vb_buf = (*buf)->mem;
dev->isoc_ctl.buf = *buf;
return;
@@ -473,8 +463,8 @@ static void au0828_copy_vbi(struct au0828_dev *dev,
bytesperline = dev->vbi_width;
- if (dma_q->pos + len > buf->vb.size)
- len = buf->vb.size - dma_q->pos;
+ if (dma_q->pos + len > buf->length)
+ len = buf->length - dma_q->pos;
startread = p;
startwrite = outp + (dma_q->pos / 2);
@@ -497,7 +487,6 @@ static inline void vbi_get_next_buf(struct au0828_dmaqueue *dma_q,
struct au0828_buffer **buf)
{
struct au0828_dev *dev = container_of(dma_q, struct au0828_dev, vbiq);
- char *outp;
if (list_empty(&dma_q->active)) {
au0828_isocdbg("No active queue to serve\n");
@@ -507,13 +496,12 @@ static inline void vbi_get_next_buf(struct au0828_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct au0828_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct au0828_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0x00, (*buf)->vb.size);
-
+ list_del(&(*buf)->list);
+ dma_q->pos = 0;
+ (*buf)->vb_buf = (*buf)->mem;
dev->isoc_ctl.vbi_buf = *buf;
-
return;
}
@@ -549,11 +537,11 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
buf = dev->isoc_ctl.buf;
if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
+ outp = vb2_plane_vaddr(&buf->vb, 0);
vbi_buf = dev->isoc_ctl.vbi_buf;
if (vbi_buf != NULL)
- vbioutp = videobuf_to_vmalloc(&vbi_buf->vb);
+ vbioutp = vb2_plane_vaddr(&vbi_buf->vb, 0);
for (i = 0; i < urb->number_of_packets; i++) {
int status = urb->iso_frame_desc[i].status;
@@ -593,8 +581,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (vbi_buf == NULL)
vbioutp = NULL;
else
- vbioutp = videobuf_to_vmalloc(
- &vbi_buf->vb);
+ vbioutp = vb2_plane_vaddr(
+ &vbi_buf->vb, 0);
/* Video */
if (buf != NULL)
@@ -603,7 +591,7 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (buf == NULL)
outp = NULL;
else
- outp = videobuf_to_vmalloc(&buf->vb);
+ outp = vb2_plane_vaddr(&buf->vb, 0);
/* As long as isoc traffic is arriving, keep
resetting the timer */
@@ -657,130 +645,59 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
return rc;
}
-static int
-buffer_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct au0828_fh *fh = vq->priv_data;
- *size = (fh->dev->width * fh->dev->height * 16 + 7) >> 3;
+ struct au0828_dev *dev = vb2_get_drv_priv(vq);
+ unsigned long img_size = dev->height * dev->bytesperline;
+ unsigned long size;
- if (0 == *count)
- *count = AU0828_DEF_BUF;
+ size = fmt ? fmt->fmt.pix.sizeimage : img_size;
+ if (size < img_size)
+ return -EINVAL;
- if (*count < AU0828_MIN_BUF)
- *count = AU0828_MIN_BUF;
- return 0;
-}
+ *nplanes = 1;
+ sizes[0] = size;
-/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct au0828_buffer *buf)
-{
- struct au0828_fh *fh = vq->priv_data;
- struct au0828_dev *dev = fh->dev;
- unsigned long flags = 0;
- if (in_interrupt())
- BUG();
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
-
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->slock, flags);
- if (dev->isoc_ctl.buf == buf)
- dev->isoc_ctl.buf = NULL;
- spin_unlock_irqrestore(&dev->slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ return 0;
}
static int
-buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+buffer_prepare(struct vb2_buffer *vb)
{
- struct au0828_fh *fh = vq->priv_data;
struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
- struct au0828_dev *dev = fh->dev;
- int rc = 0, urb_init = 0;
+ struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- buf->vb.size = (fh->dev->width * fh->dev->height * 16 + 7) >> 3;
+ buf->length = dev->height * dev->bytesperline;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < buf->length) {
+ pr_err("%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), buf->length);
return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0) {
- pr_info("videobuf_iolock failed\n");
- goto fail;
- }
}
-
- if (!dev->isoc_ctl.num_bufs)
- urb_init = 1;
-
- if (urb_init) {
- rc = au0828_init_isoc(dev, AU0828_ISO_PACKETS_PER_URB,
- AU0828_MAX_ISO_BUFS, dev->max_pkt_size,
- au0828_isoc_copy);
- if (rc < 0) {
- pr_info("au0828_init_isoc failed\n");
- goto fail;
- }
- }
-
- buf->vb.state = VIDEOBUF_PREPARED;
+ vb2_set_plane_payload(&buf->vb, 0, buf->length);
return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
}
static void
-buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+buffer_queue(struct vb2_buffer *vb)
{
struct au0828_buffer *buf = container_of(vb,
struct au0828_buffer,
vb);
- struct au0828_fh *fh = vq->priv_data;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct au0828_dmaqueue *vidq = &dev->vidq;
+ unsigned long flags = 0;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
-}
-
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- struct au0828_buffer *buf = container_of(vb,
- struct au0828_buffer,
- vb);
+ buf->mem = vb2_plane_vaddr(vb, 0);
+ buf->length = vb2_plane_size(vb, 0);
- free_buffer(vq, buf);
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-static struct videobuf_queue_ops au0828_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
-};
-
-/* ------------------------------------------------------------------
- V4L2 interface
- ------------------------------------------------------------------*/
-
static int au0828_i2s_init(struct au0828_dev *dev)
{
/* Enable i2s mode */
@@ -829,7 +746,7 @@ static int au0828_analog_stream_enable(struct au0828_dev *d)
return 0;
}
-int au0828_analog_stream_disable(struct au0828_dev *d)
+static int au0828_analog_stream_disable(struct au0828_dev *d)
{
dprintk(1, "au0828_analog_stream_disable called\n");
au0828_writereg(d, AU0828_SENSORCTRL_100, 0x0);
@@ -862,78 +779,133 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
return 0;
}
-/*
- * au0828_release_resources
- * unregister v4l2 devices
- */
-void au0828_analog_unregister(struct au0828_dev *dev)
+int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
{
- dprintk(1, "au0828_release_resources called\n");
- mutex_lock(&au0828_sysfs_lock);
+ struct au0828_dev *dev = vb2_get_drv_priv(vq);
+ int rc = 0;
- if (dev->vdev)
- video_unregister_device(dev->vdev);
- if (dev->vbi_dev)
- video_unregister_device(dev->vbi_dev);
+ dprintk(1, "au0828_start_analog_streaming called %d\n",
+ dev->streaming_users);
- mutex_unlock(&au0828_sysfs_lock);
-}
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ dev->frame_count = 0;
+ else
+ dev->vbi_frame_count = 0;
+
+ if (dev->streaming_users == 0) {
+ /* If we were doing ac97 instead of i2s, it would go here...*/
+ au0828_i2s_init(dev);
+ rc = au0828_init_isoc(dev, AU0828_ISO_PACKETS_PER_URB,
+ AU0828_MAX_ISO_BUFS, dev->max_pkt_size,
+ au0828_isoc_copy);
+ if (rc < 0) {
+ pr_info("au0828_init_isoc failed\n");
+ return rc;
+ }
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video,
+ s_stream, 1);
+ dev->vid_timeout_running = 1;
+ mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
+ } else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ dev->vbi_timeout_running = 1;
+ mod_timer(&dev->vbi_timeout, jiffies + (HZ / 10));
+ }
+ }
+ dev->streaming_users++;
+ return rc;
+}
-/* Usage lock check functions */
-static int res_get(struct au0828_fh *fh, unsigned int bit)
+static void au0828_stop_streaming(struct vb2_queue *vq)
{
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = vb2_get_drv_priv(vq);
+ struct au0828_dmaqueue *vidq = &dev->vidq;
+ unsigned long flags = 0;
- if (fh->resources & bit)
- /* have it already allocated */
- return 1;
+ dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users);
- /* is it free? */
- if (dev->resources & bit) {
- /* no, someone else uses it */
- return 0;
+ if (dev->streaming_users-- == 1)
+ au0828_uninit_isoc(dev);
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+ dev->vid_timeout_running = 0;
+ del_timer_sync(&dev->vid_timeout);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ if (dev->isoc_ctl.buf != NULL) {
+ vb2_buffer_done(&dev->isoc_ctl.buf->vb, VB2_BUF_STATE_ERROR);
+ dev->isoc_ctl.buf = NULL;
}
- /* it's free, grab it */
- fh->resources |= bit;
- dev->resources |= bit;
- dprintk(1, "res: get %d\n", bit);
+ while (!list_empty(&vidq->active)) {
+ struct au0828_buffer *buf;
- return 1;
+ buf = list_entry(vidq->active.next, struct au0828_buffer, list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ list_del(&buf->list);
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-static int res_check(struct au0828_fh *fh, unsigned int bit)
+void au0828_stop_vbi_streaming(struct vb2_queue *vq)
{
- return fh->resources & bit;
-}
+ struct au0828_dev *dev = vb2_get_drv_priv(vq);
+ struct au0828_dmaqueue *vbiq = &dev->vbiq;
+ unsigned long flags = 0;
-static int res_locked(struct au0828_dev *dev, unsigned int bit)
-{
- return dev->resources & bit;
-}
+ dprintk(1, "au0828_stop_vbi_streaming called %d\n",
+ dev->streaming_users);
-static void res_free(struct au0828_fh *fh, unsigned int bits)
-{
- struct au0828_dev *dev = fh->dev;
+ if (dev->streaming_users-- == 1)
+ au0828_uninit_isoc(dev);
- BUG_ON((fh->resources & bits) != bits);
+ spin_lock_irqsave(&dev->slock, flags);
+ if (dev->isoc_ctl.vbi_buf != NULL) {
+ vb2_buffer_done(&dev->isoc_ctl.vbi_buf->vb,
+ VB2_BUF_STATE_ERROR);
+ dev->isoc_ctl.vbi_buf = NULL;
+ }
+ while (!list_empty(&vbiq->active)) {
+ struct au0828_buffer *buf;
+
+ buf = list_entry(vbiq->active.next, struct au0828_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
- fh->resources &= ~bits;
- dev->resources &= ~bits;
- dprintk(1, "res: put %d\n", bits);
+ dev->vbi_timeout_running = 0;
+ del_timer_sync(&dev->vbi_timeout);
}
-static int get_ressource(struct au0828_fh *fh)
+static struct vb2_ops au0828_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .start_streaming = au0828_start_analog_streaming,
+ .stop_streaming = au0828_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/* ------------------------------------------------------------------
+ V4L2 interface
+ ------------------------------------------------------------------*/
+/*
+ * au0828_analog_unregister
+ * unregister v4l2 devices
+ */
+void au0828_analog_unregister(struct au0828_dev *dev)
{
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return AU0828_RESOURCE_VIDEO;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return AU0828_RESOURCE_VBI;
- default:
- BUG();
- return 0;
- }
+ dprintk(1, "au0828_analog_unregister called\n");
+ mutex_lock(&au0828_sysfs_lock);
+
+ if (dev->vdev)
+ video_unregister_device(dev->vdev);
+ if (dev->vbi_dev)
+ video_unregister_device(dev->vbi_dev);
+
+ mutex_unlock(&au0828_sysfs_lock);
}
/* This function ensures that video frames continue to be delivered even if
@@ -951,8 +923,8 @@ static void au0828_vid_buffer_timeout(unsigned long data)
buf = dev->isoc_ctl.buf;
if (buf != NULL) {
- vid_data = videobuf_to_vmalloc(&buf->vb);
- memset(vid_data, 0x00, buf->vb.size); /* Blank green frame */
+ vid_data = vb2_plane_vaddr(&buf->vb, 0);
+ memset(vid_data, 0x00, buf->length); /* Blank green frame */
buffer_filled(dev, dma_q, buf);
}
get_next_buf(dma_q, &buf);
@@ -975,8 +947,8 @@ static void au0828_vbi_buffer_timeout(unsigned long data)
buf = dev->isoc_ctl.vbi_buf;
if (buf != NULL) {
- vbi_data = videobuf_to_vmalloc(&buf->vb);
- memset(vbi_data, 0x00, buf->vb.size);
+ vbi_data = vb2_plane_vaddr(&buf->vb, 0);
+ memset(vbi_data, 0x00, buf->length);
vbi_buffer_filled(dev, dma_q, buf);
}
vbi_get_next_buf(dma_q, &buf);
@@ -986,105 +958,65 @@ static void au0828_vbi_buffer_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock, flags);
}
-
static int au0828_v4l2_open(struct file *filp)
{
- int ret = 0;
- struct video_device *vdev = video_devdata(filp);
struct au0828_dev *dev = video_drvdata(filp);
- struct au0828_fh *fh;
- int type;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
- case VFL_TYPE_VBI:
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- break;
- default:
- return -EINVAL;
- }
-
- fh = kzalloc(sizeof(struct au0828_fh), GFP_KERNEL);
- if (NULL == fh) {
- dprintk(1, "Failed allocate au0828_fh struct!\n");
- return -ENOMEM;
- }
+ int ret;
- fh->type = type;
- fh->dev = dev;
- v4l2_fh_init(&fh->fh, vdev);
- filp->private_data = fh;
+ dprintk(1,
+ "%s called std_set %d dev_state %d stream users %d users %d\n",
+ __func__, dev->std_set_in_tuner_core, dev->dev_state,
+ dev->streaming_users, dev->users);
- if (mutex_lock_interruptible(&dev->lock)) {
- kfree(fh);
+ if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(filp);
+ if (ret) {
+ au0828_isocdbg("%s: v4l2_fh_open() returned error %d\n",
+ __func__, ret);
+ mutex_unlock(&dev->lock);
+ return ret;
}
+
if (dev->users == 0) {
au0828_analog_stream_enable(dev);
au0828_analog_stream_reset(dev);
-
- /* If we were doing ac97 instead of i2s, it would go here...*/
- au0828_i2s_init(dev);
-
dev->stream_state = STREAM_OFF;
dev->dev_state |= DEV_INITIALIZED;
}
-
dev->users++;
mutex_unlock(&dev->lock);
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &au0828_video_qops,
- NULL, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct au0828_buffer), fh,
- &dev->lock);
-
- /* VBI Setup */
- videobuf_queue_vmalloc_init(&fh->vb_vbiq, &au0828_vbi_qops,
- NULL, &dev->slock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_SEQ_TB,
- sizeof(struct au0828_buffer), fh,
- &dev->lock);
- v4l2_fh_add(&fh->fh);
return ret;
}
static int au0828_v4l2_close(struct file *filp)
{
int ret;
- struct au0828_fh *fh = filp->private_data;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
+
+ dprintk(1,
+ "%s called std_set %d dev_state %d stream users %d users %d\n",
+ __func__, dev->std_set_in_tuner_core, dev->dev_state,
+ dev->streaming_users, dev->users);
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
mutex_lock(&dev->lock);
- if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
+ if (vdev->vfl_type == VFL_TYPE_GRABBER && dev->vid_timeout_running) {
/* Cancel timeout thread in case they didn't call streamoff */
dev->vid_timeout_running = 0;
del_timer_sync(&dev->vid_timeout);
-
- videobuf_stop(&fh->vb_vidq);
- res_free(fh, AU0828_RESOURCE_VIDEO);
- }
-
- if (res_check(fh, AU0828_RESOURCE_VBI)) {
+ } else if (vdev->vfl_type == VFL_TYPE_VBI &&
+ dev->vbi_timeout_running) {
/* Cancel timeout thread in case they didn't call streamoff */
dev->vbi_timeout_running = 0;
del_timer_sync(&dev->vbi_timeout);
-
- videobuf_stop(&fh->vb_vbiq);
- res_free(fh, AU0828_RESOURCE_VBI);
}
- if (dev->users == 1 && video_is_registered(video_devdata(filp))) {
- au0828_analog_stream_disable(dev);
-
- au0828_uninit_isoc(dev);
+ if (dev->dev_state == DEV_DISCONNECTED)
+ goto end;
+ if (dev->users == 1) {
/* Save some power by putting tuner to sleep */
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
dev->std_set_in_tuner_core = 0;
@@ -1095,13 +1027,10 @@ static int au0828_v4l2_close(struct file *filp)
if (ret < 0)
pr_info("Au0828 can't set alternate to 0!\n");
}
- mutex_unlock(&dev->lock);
-
- videobuf_mmap_free(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vbiq);
- kfree(fh);
+end:
+ _vb2_fop_release(filp, NULL);
dev->users--;
- wake_up_interruptible_nr(&dev->open, 1);
+ mutex_unlock(&dev->lock);
return 0;
}
@@ -1113,6 +1042,9 @@ static void au0828_init_tuner(struct au0828_dev *dev)
.type = V4L2_TUNER_ANALOG_TV,
};
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
if (dev->std_set_in_tuner_core)
return;
dev->std_set_in_tuner_core = 1;
@@ -1125,98 +1057,6 @@ static void au0828_init_tuner(struct au0828_dev *dev)
i2c_gate_ctrl(dev, 0);
}
-static ssize_t au0828_v4l2_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
-{
- struct au0828_fh *fh = filp->private_data;
- struct au0828_dev *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- au0828_init_tuner(dev);
- mutex_unlock(&dev->lock);
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (res_locked(dev, AU0828_RESOURCE_VIDEO))
- return -EBUSY;
-
- return videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- }
-
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (!res_get(fh, AU0828_RESOURCE_VBI))
- return -EBUSY;
-
- if (dev->vbi_timeout_running == 0) {
- /* Handle case where caller tries to read without
- calling streamon first */
- dev->vbi_timeout_running = 1;
- mod_timer(&dev->vbi_timeout, jiffies + (HZ / 10));
- }
-
- return videobuf_read_stream(&fh->vb_vbiq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- }
-
- return 0;
-}
-
-static unsigned int au0828_v4l2_poll(struct file *filp, poll_table *wait)
-{
- struct au0828_fh *fh = filp->private_data;
- struct au0828_dev *dev = fh->dev;
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res;
-
- if (check_dev(dev) < 0)
- return POLLERR;
-
- res = v4l2_ctrl_poll(filp, wait);
- if (!(req_events & (POLLIN | POLLRDNORM)))
- return res;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- au0828_init_tuner(dev);
- mutex_unlock(&dev->lock);
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (!res_get(fh, AU0828_RESOURCE_VIDEO))
- return POLLERR;
- return res | videobuf_poll_stream(filp, &fh->vb_vidq, wait);
- }
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (!res_get(fh, AU0828_RESOURCE_VBI))
- return POLLERR;
- return res | videobuf_poll_stream(filp, &fh->vb_vbiq, wait);
- }
- return POLLERR;
-}
-
-static int au0828_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct au0828_fh *fh = filp->private_data;
- struct au0828_dev *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_mmap_mapper(&fh->vb_vbiq, vma);
-
- return rc;
-}
-
static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
struct v4l2_format *format)
{
@@ -1268,13 +1108,14 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
return 0;
}
-
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
strlcpy(cap->driver, "au0828", sizeof(cap->driver));
strlcpy(cap->card, dev->board.name, sizeof(cap->card));
@@ -1300,6 +1141,8 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index)
return -EINVAL;
+ dprintk(1, "%s called\n", __func__);
+
f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
strcpy(f->description, "Packed YUV2");
@@ -1312,8 +1155,10 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
@@ -1329,8 +1174,10 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
}
@@ -1338,15 +1185,17 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
int rc;
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
rc = check_dev(dev);
if (rc < 0)
return rc;
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
+ if (vb2_is_busy(&dev->vb_vidq)) {
pr_info("%s queue busy\n", __func__);
rc = -EBUSY;
goto out;
@@ -1359,8 +1208,16 @@ out:
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
+ if (norm == dev->std)
+ return 0;
+
+ if (dev->streaming_users > 0)
+ return -EBUSY;
dev->std = norm;
@@ -1383,8 +1240,10 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
*norm = dev->std;
return 0;
@@ -1393,8 +1252,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
unsigned int tmp;
static const char *inames[] = {
@@ -1407,6 +1265,9 @@ static int vidioc_enum_input(struct file *file, void *priv,
[AU0828_VMUX_DEBUG] = "tv debug"
};
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
tmp = input->index;
if (tmp >= AU0828_MAX_INPUT)
@@ -1432,8 +1293,11 @@ static int vidioc_enum_input(struct file *file, void *priv,
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
*i = dev->ctrl_input;
return 0;
}
@@ -1442,6 +1306,9 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
{
int i;
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
switch (AUVI_INPUT(index).type) {
case AU0828_VMUX_SVIDEO:
dev->input_type = AU0828_VMUX_SVIDEO;
@@ -1491,8 +1358,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "VIDIOC_S_INPUT in function %s, input=%d\n", __func__,
index);
@@ -1510,6 +1376,8 @@ static int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *a)
if (a->index > 1)
return -EINVAL;
+ dprintk(1, "%s called\n", __func__);
+
if (a->index == 0)
strcpy(a->name, "Television");
else
@@ -1521,8 +1389,10 @@ static int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *a)
static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
a->index = dev->ctrl_ainput;
if (a->index == 0)
@@ -1536,22 +1406,26 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio *a)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
if (a->index != dev->ctrl_ainput)
return -EINVAL;
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
return 0;
}
static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
if (t->index != 0)
return -EINVAL;
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
strcpy(t->name, "Auvitek tuner");
au0828_init_tuner(dev);
@@ -1564,12 +1438,14 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
static int vidioc_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
if (t->index != 0)
return -EINVAL;
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
au0828_init_tuner(dev);
i2c_gate_ctrl(dev, 1);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
@@ -1585,11 +1461,12 @@ static int vidioc_s_tuner(struct file *file, void *priv,
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *freq)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
if (freq->tuner != 0)
return -EINVAL;
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
freq->frequency = dev->ctrl_freq;
return 0;
}
@@ -1597,13 +1474,15 @@ static int vidioc_g_frequency(struct file *file, void *priv,
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *freq)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
struct v4l2_frequency new_freq = *freq;
if (freq->tuner != 0)
return -EINVAL;
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
au0828_init_tuner(dev);
i2c_gate_ctrl(dev, 1);
@@ -1625,8 +1504,10 @@ static int vidioc_s_frequency(struct file *file, void *priv,
static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *format)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
format->fmt.vbi.samples_per_line = dev->vbi_width;
format->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
@@ -1646,12 +1527,14 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *cc)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
+
cc->bounds.left = 0;
cc->bounds.top = 0;
cc->bounds.width = dev->width;
@@ -1665,105 +1548,14 @@ static int vidioc_cropcap(struct file *file, void *priv,
return 0;
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
- int rc = -EINVAL;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (unlikely(type != fh->type))
- return -EINVAL;
-
- dprintk(1, "vidioc_streamon fh=%p t=%d fh->res=%d dev->res=%d\n",
- fh, type, fh->resources, dev->resources);
-
- if (unlikely(!res_get(fh, get_ressource(fh))))
- return -EBUSY;
-
- au0828_init_tuner(dev);
- if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- au0828_analog_stream_enable(dev);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
- }
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- rc = videobuf_streamon(&fh->vb_vidq);
- dev->vid_timeout_running = 1;
- mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- rc = videobuf_streamon(&fh->vb_vbiq);
- dev->vbi_timeout_running = 1;
- mod_timer(&dev->vbi_timeout, jiffies + (HZ / 10));
- }
-
- return rc;
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
- int rc;
- int i;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- if (type != fh->type)
- return -EINVAL;
-
- dprintk(1, "vidioc_streamoff fh=%p t=%d fh->res=%d dev->res=%d\n",
- fh, type, fh->resources, dev->resources);
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dev->vid_timeout_running = 0;
- del_timer_sync(&dev->vid_timeout);
-
- au0828_analog_stream_disable(dev);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
- rc = au0828_stream_interrupt(dev);
- if (rc != 0)
- return rc;
-
- for (i = 0; i < AU0828_MAX_INPUT; i++) {
- if (AUVI_INPUT(i).audio_setup == NULL)
- continue;
- (AUVI_INPUT(i).audio_setup)(dev, 0);
- }
-
- if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh, AU0828_RESOURCE_VIDEO);
- }
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- dev->vbi_timeout_running = 0;
- del_timer_sync(&dev->vbi_timeout);
-
- if (res_check(fh, AU0828_RESOURCE_VBI)) {
- videobuf_streamoff(&fh->vb_vbiq);
- res_free(fh, AU0828_RESOURCE_VBI);
- }
- }
-
- return 0;
-}
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vidioc_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
reg->val = au0828_read(dev, reg->reg);
reg->size = 1;
@@ -1773,8 +1565,10 @@ static int vidioc_g_register(struct file *file, void *priv,
static int vidioc_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
+ struct au0828_dev *dev = video_drvdata(file);
+
+ dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dev->std_set_in_tuner_core, dev->dev_state);
return au0828_writereg(dev, reg->reg, reg->val);
}
@@ -1784,93 +1578,13 @@ static int vidioc_log_status(struct file *file, void *fh)
{
struct video_device *vdev = video_devdata(file);
+ dprintk(1, "%s called\n", __func__);
+
v4l2_ctrl_log_status(file, fh);
v4l2_device_call_all(vdev->v4l2_dev, 0, core, log_status);
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb)
-{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_reqbufs(&fh->vb_vidq, rb);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_reqbufs(&fh->vb_vbiq, rb);
-
- return rc;
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *b)
-{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_querybuf(&fh->vb_vidq, b);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_querybuf(&fh->vb_vbiq, b);
-
- return rc;
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_qbuf(&fh->vb_vidq, b);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_qbuf(&fh->vb_vbiq, b);
-
- return rc;
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct au0828_fh *fh = priv;
- struct au0828_dev *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- /* Workaround for a bug in the au0828 hardware design that sometimes
- results in the colorspace being inverted */
- if (dev->greenscreen_detected == 1) {
- dprintk(1, "Detected green frame. Resetting stream...\n");
- au0828_analog_stream_reset(dev);
- dev->greenscreen_detected = 0;
- }
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK);
-
- return rc;
-}
-
void au0828_v4l2_suspend(struct au0828_dev *dev)
{
struct urb *urb;
@@ -1938,9 +1652,9 @@ static struct v4l2_file_operations au0828_v4l_fops = {
.owner = THIS_MODULE,
.open = au0828_v4l2_open,
.release = au0828_v4l2_close,
- .read = au0828_v4l2_read,
- .poll = au0828_v4l2_poll,
- .mmap = au0828_v4l2_mmap,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -1957,17 +1671,24 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_cropcap = vidioc_cropcap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
@@ -1988,6 +1709,42 @@ static const struct video_device au0828_video_template = {
.tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_M,
};
+static int au0828_vb2_setup(struct au0828_dev *dev)
+{
+ int rc;
+ struct vb2_queue *q;
+
+ /* Setup Videobuf2 for Video capture */
+ q = &dev->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct au0828_buffer);
+ q->ops = &au0828_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+
+ rc = vb2_queue_init(q);
+ if (rc < 0)
+ return rc;
+
+ /* Setup Videobuf2 for VBI capture */
+ q = &dev->vb_vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct au0828_buffer);
+ q->ops = &au0828_vbi_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+
+ rc = vb2_queue_init(q);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
/**************************************************************************/
int au0828_analog_register(struct au0828_dev *dev,
@@ -2030,7 +1787,6 @@ int au0828_analog_register(struct au0828_dev *dev,
}
if (!(dev->isoc_in_endpointaddr)) {
pr_info("Could not locate isoc endpoint\n");
- kfree(dev);
return -ENODEV;
}
@@ -2039,17 +1795,12 @@ int au0828_analog_register(struct au0828_dev *dev,
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
- INIT_LIST_HEAD(&dev->vidq.queued);
INIT_LIST_HEAD(&dev->vbiq.active);
- INIT_LIST_HEAD(&dev->vbiq.queued);
-
- dev->vid_timeout.function = au0828_vid_buffer_timeout;
- dev->vid_timeout.data = (unsigned long) dev;
- init_timer(&dev->vid_timeout);
- dev->vbi_timeout.function = au0828_vbi_buffer_timeout;
- dev->vbi_timeout.data = (unsigned long) dev;
- init_timer(&dev->vbi_timeout);
+ setup_timer(&dev->vid_timeout, au0828_vid_buffer_timeout,
+ (unsigned long)dev);
+ setup_timer(&dev->vbi_timeout, au0828_vbi_buffer_timeout,
+ (unsigned long)dev);
dev->width = NTSC_STD_W;
dev->height = NTSC_STD_H;
@@ -2078,18 +1829,34 @@ int au0828_analog_register(struct au0828_dev *dev,
goto err_vdev;
}
+ mutex_init(&dev->vb_queue_lock);
+ mutex_init(&dev->vb_vbi_queue_lock);
+
/* Fill the video capture device struct */
*dev->vdev = au0828_video_template;
dev->vdev->v4l2_dev = &dev->v4l2_dev;
dev->vdev->lock = &dev->lock;
+ dev->vdev->queue = &dev->vb_vidq;
+ dev->vdev->queue->lock = &dev->vb_queue_lock;
strcpy(dev->vdev->name, "au0828a video");
/* Setup the VBI device */
*dev->vbi_dev = au0828_video_template;
dev->vbi_dev->v4l2_dev = &dev->v4l2_dev;
dev->vbi_dev->lock = &dev->lock;
+ dev->vbi_dev->queue = &dev->vb_vbiq;
+ dev->vbi_dev->queue->lock = &dev->vb_vbi_queue_lock;
strcpy(dev->vbi_dev->name, "au0828a vbi");
+ /* initialize videobuf2 stuff */
+ retval = au0828_vb2_setup(dev);
+ if (retval != 0) {
+ dprintk(1, "unable to setup videobuf2 queues (error = %d).\n",
+ retval);
+ ret = -ENODEV;
+ goto err_vbi_dev;
+ }
+
/* Register the v4l2 device */
video_set_drvdata(dev->vdev, dev);
retval = video_register_device(dev->vdev, VFL_TYPE_GRABBER, -1);
@@ -2097,7 +1864,7 @@ int au0828_analog_register(struct au0828_dev *dev,
dprintk(1, "unable to register video device (error = %d).\n",
retval);
ret = -ENODEV;
- goto err_vbi_dev;
+ goto err_reg_vdev;
}
/* Register the vbi device */
@@ -2107,13 +1874,18 @@ int au0828_analog_register(struct au0828_dev *dev,
dprintk(1, "unable to register vbi device (error = %d).\n",
retval);
ret = -ENODEV;
- goto err_vbi_dev;
+ goto err_reg_vbi_dev;
}
dprintk(1, "%s completed!\n", __func__);
return 0;
+err_reg_vbi_dev:
+ video_unregister_device(dev->vdev);
+err_reg_vdev:
+ vb2_queue_release(&dev->vb_vidq);
+ vb2_queue_release(&dev->vb_vbiq);
err_vbi_dev:
video_device_release(dev->vbi_dev);
err_vdev:
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 36815a369c68..eb1518742ae6 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -28,7 +28,7 @@
/* Analog */
#include <linux/videodev2.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
@@ -126,17 +126,7 @@ enum au0828_dev_state {
DEV_MISCONFIGURED = 0x04
};
-struct au0828_fh {
- /* must be the first field of this struct! */
- struct v4l2_fh fh;
-
- struct au0828_dev *dev;
- unsigned int resources;
-
- struct videobuf_queue vb_vidq;
- struct videobuf_queue vb_vbiq;
- enum v4l2_buf_type type;
-};
+struct au0828_dev;
struct au0828_usb_isoc_ctl {
/* max packet size of isoc transaction */
@@ -177,21 +167,20 @@ struct au0828_usb_isoc_ctl {
/* buffer for one video frame */
struct au0828_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_buffer vb;
+ struct list_head list;
- struct list_head frame;
+ void *mem;
+ unsigned long length;
int top_field;
- int receiving;
+ /* pointer to vmalloc memory address in vb */
+ char *vb_buf;
};
struct au0828_dmaqueue {
struct list_head active;
- struct list_head queued;
-
- wait_queue_head_t wq;
-
/* Counters to control buffer fill */
- int pos;
+ int pos;
};
struct au0828_dev {
@@ -220,14 +209,26 @@ struct au0828_dev {
struct au0828_rc *ir;
#endif
- int users;
- unsigned int resources; /* resources in use */
struct video_device *vdev;
struct video_device *vbi_dev;
+
+ /* Videobuf2 */
+ struct vb2_queue vb_vidq;
+ struct vb2_queue vb_vbiq;
+ struct mutex vb_queue_lock;
+ struct mutex vb_vbi_queue_lock;
+
+ unsigned int frame_count;
+ unsigned int vbi_frame_count;
+
struct timer_list vid_timeout;
int vid_timeout_running;
struct timer_list vbi_timeout;
int vbi_timeout_running;
+
+ int users;
+ int streaming_users;
+
int width;
int height;
int vbi_width;
@@ -242,7 +243,6 @@ struct au0828_dev {
__u8 isoc_in_endpointaddr;
u8 isoc_init_ok;
int greenscreen_detected;
- unsigned int frame_count;
int ctrl_freq;
int input_type;
int std_set_in_tuner_core;
@@ -277,6 +277,7 @@ struct au0828_dev {
char *dig_transfer_buffer[URB_COUNT];
};
+
/* ----------------------------------------------------------- */
#define au0828_read(dev, reg) au0828_readreg(dev, reg)
#define au0828_write(dev, reg, value) au0828_writereg(dev, reg, value)
@@ -309,13 +310,15 @@ extern int au0828_i2c_unregister(struct au0828_dev *dev);
/* ----------------------------------------------------------- */
/* au0828-video.c */
-int au0828_analog_register(struct au0828_dev *dev,
+extern int au0828_analog_register(struct au0828_dev *dev,
struct usb_interface *interface);
-int au0828_analog_stream_disable(struct au0828_dev *d);
-void au0828_analog_unregister(struct au0828_dev *dev);
+extern void au0828_analog_unregister(struct au0828_dev *dev);
+extern int au0828_start_analog_streaming(struct vb2_queue *vq,
+ unsigned int count);
+extern void au0828_stop_vbi_streaming(struct vb2_queue *vq);
#ifdef CONFIG_VIDEO_AU0828_V4L2
-void au0828_v4l2_suspend(struct au0828_dev *dev);
-void au0828_v4l2_resume(struct au0828_dev *dev);
+extern void au0828_v4l2_suspend(struct au0828_dev *dev);
+extern void au0828_v4l2_resume(struct au0828_dev *dev);
#else
static inline void au0828_v4l2_suspend(struct au0828_dev *dev) { };
static inline void au0828_v4l2_resume(struct au0828_dev *dev) { };
@@ -329,7 +332,7 @@ void au0828_dvb_suspend(struct au0828_dev *dev);
void au0828_dvb_resume(struct au0828_dev *dev);
/* au0828-vbi.c */
-extern struct videobuf_queue_ops au0828_vbi_qops;
+extern struct vb2_ops au0828_vbi_qops;
#define dprintk(level, fmt, arg...)\
do { if (au0828_debug & level)\
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index ae05d591f228..da03733690bd 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1403,7 +1403,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
struct usb_interface_assoc_descriptor *assoc_desc;
ifnum = interface->altsetting[0].desc.bInterfaceNumber;
- udev = usb_get_dev(interface_to_usbdev(interface));
/*
* Interface number 0 - IR interface (handled by mceusb driver)
@@ -1424,11 +1423,13 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
}
} while (test_and_set_bit(nr, &cx231xx_devused));
+ udev = usb_get_dev(interface_to_usbdev(interface));
+
/* allocate memory for our device state and initialize it */
dev = devm_kzalloc(&udev->dev, sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
- clear_bit(nr, &cx231xx_devused);
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_if;
}
snprintf(dev->name, 29, "cx231xx #%d", nr);
@@ -1582,7 +1583,7 @@ err_v4l2:
usb_set_intfdata(interface, NULL);
err_if:
usb_put_dev(udev);
- clear_bit(dev->devno, &cx231xx_devused);
+ clear_bit(nr, &cx231xx_devused);
return retval;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 53ca12c1ff69..ecea76fe07f6 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -2062,7 +2062,6 @@ static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
- vfd->debug = video_debug;
vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index f9e262eb0db9..6d6f3ee812f6 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -532,15 +532,7 @@ struct cx231xx_video_mode {
unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */
u16 end_point_addr;
};
-/*
-struct cx23885_dmaqueue {
- struct list_head active;
- struct list_head queued;
- struct timer_list timeout;
- struct btcx_riscmem stopper;
- u32 count;
-};
-*/
+
struct cx231xx_tsport {
struct cx231xx *dev;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 14e111e13e54..41c6363dff08 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -354,6 +354,7 @@ struct dvb_usb_adapter {
* @name: device name
* @rc_map: name of rc codes table
* @rc_polling_active: set when RC polling is active
+ * @intf: pointer to the device's struct usb_interface
* @udev: pointer to the device's struct usb_device
* @rc: remote controller configuration
* @powered: indicated whether the device is power or not
@@ -370,6 +371,7 @@ struct dvb_usb_device {
const char *name;
const char *rc_map;
bool rc_polling_active;
+ struct usb_interface *intf;
struct usb_device *udev;
struct dvb_usb_rc rc;
int powered;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 1950f37df835..9913e0f59485 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -868,6 +868,7 @@ int dvb_usbv2_probe(struct usb_interface *intf,
goto err;
}
+ d->intf = intf;
d->name = driver_info->name;
d->rc_map = driver_info->rc_map;
d->udev = udev;
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 994de53a574b..5de6f7c04d09 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -126,9 +126,9 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct lme2510_state {
unsigned long int_urb_due;
+ fe_status_t lock_status;
u8 id;
u8 tuner_config;
- u8 signal_lock;
u8 signal_level;
u8 signal_sn;
u8 time_key;
@@ -143,6 +143,12 @@ struct lme2510_state {
void *buffer;
struct urb *lme_urb;
void *usb_buffer;
+ /* Frontend original calls */
+ int (*fe_read_status)(struct dvb_frontend *, fe_status_t *);
+ int (*fe_read_signal_strength)(struct dvb_frontend *, u16 *);
+ int (*fe_read_snr)(struct dvb_frontend *, u16 *);
+ int (*fe_read_ber)(struct dvb_frontend *, u32 *);
+ int (*fe_read_ucblocks)(struct dvb_frontend *, u32 *);
int (*fe_set_voltage)(struct dvb_frontend *, fe_sec_voltage_t);
u8 dvb_usb_lme2510_firmware;
};
@@ -258,6 +264,7 @@ static void lme2510_int_response(struct urb *lme_urb)
static u8 *ibuf, *rbuf;
int i = 0, offset;
u32 key;
+ u8 signal_lock = 0;
switch (lme_urb->status) {
case 0:
@@ -298,8 +305,7 @@ static void lme2510_int_response(struct urb *lme_urb)
case 0xbb:
switch (st->tuner_config) {
case TUNER_LG:
- if (ibuf[2] > 0)
- st->signal_lock = ibuf[2];
+ signal_lock = ibuf[2] & BIT(5);
st->signal_level = ibuf[4];
st->signal_sn = ibuf[3];
st->time_key = ibuf[7];
@@ -308,29 +314,29 @@ static void lme2510_int_response(struct urb *lme_urb)
case TUNER_S0194:
/* Tweak for earlier firmware*/
if (ibuf[1] == 0x03) {
- if (ibuf[2] > 1)
- st->signal_lock = ibuf[2];
+ signal_lock = ibuf[2] & BIT(4);
st->signal_level = ibuf[3];
st->signal_sn = ibuf[4];
} else {
st->signal_level = ibuf[4];
st->signal_sn = ibuf[5];
- st->signal_lock =
- (st->signal_lock & 0xf7) +
- ((ibuf[2] & 0x01) << 0x03);
}
break;
case TUNER_RS2000:
- if (ibuf[2] & 0x1)
- st->signal_lock = 0xff;
- else
- st->signal_lock = 0x00;
+ signal_lock = ibuf[2] & 0xee;
st->signal_level = ibuf[5];
st->signal_sn = ibuf[4];
st->time_key = ibuf[7];
default:
break;
}
+
+ /* Interrupt will also throw just BIT 0 as lock */
+ signal_lock |= ibuf[2] & BIT(0);
+
+ if (!signal_lock)
+ st->lock_status &= ~FE_HAS_LOCK;
+
debug_data_snipet(5, "INT Remote data snipet in", ibuf);
break;
case 0xcc:
@@ -344,15 +350,17 @@ static void lme2510_int_response(struct urb *lme_urb)
usb_submit_urb(lme_urb, GFP_ATOMIC);
- /* interrupt urb is due every 48 msecs while streaming
- * add 12msecs for system lag */
- st->int_urb_due = jiffies + msecs_to_jiffies(60);
+ /* Interrupt urb is due every 48 msecs while streaming the buffer
+ * stores up to 4 periods if missed. Allow 200 msec for next interrupt.
+ */
+ st->int_urb_due = jiffies + msecs_to_jiffies(200);
}
static int lme2510_int_read(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap_to_d(adap);
struct lme2510_state *lme_int = adap_to_priv(adap);
+ struct usb_host_endpoint *ep;
lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -374,6 +382,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
adap,
8);
+ /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
+ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
+
+ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
+
lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
@@ -449,170 +463,13 @@ static int lme2510_return_status(struct dvb_usb_device *d)
static int lme2510_msg(struct dvb_usb_device *d,
u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int ret = 0;
struct lme2510_state *st = d->priv;
- if (st->i2c_talk_onoff == 1) {
-
- ret = lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
-
- switch (st->tuner_config) {
- case TUNER_LG:
- if (wbuf[2] == 0x1c) {
- if (wbuf[3] == 0x0e) {
- st->signal_lock = rbuf[1];
- if ((st->stream_on & 1) &&
- (st->signal_lock & 0x10)) {
- lme2510_stream_restart(d);
- st->i2c_talk_onoff = 0;
- }
- msleep(80);
- }
- }
- break;
- case TUNER_S7395:
- if (wbuf[2] == 0xd0) {
- if (wbuf[3] == 0x24) {
- st->signal_lock = rbuf[1];
- if ((st->stream_on & 1) &&
- (st->signal_lock & 0x8)) {
- lme2510_stream_restart(d);
- st->i2c_talk_onoff = 0;
- }
- }
- }
- break;
- case TUNER_S0194:
- if (wbuf[2] == 0xd0) {
- if (wbuf[3] == 0x1b) {
- st->signal_lock = rbuf[1];
- if ((st->stream_on & 1) &&
- (st->signal_lock & 0x8)) {
- lme2510_stream_restart(d);
- st->i2c_talk_onoff = 0;
- }
- }
- }
- break;
- case TUNER_RS2000:
- default:
- break;
- }
- } else {
- /* TODO rewrite this section */
- switch (st->tuner_config) {
- case TUNER_LG:
- switch (wbuf[3]) {
- case 0x0e:
- rbuf[0] = 0x55;
- rbuf[1] = st->signal_lock;
- break;
- case 0x43:
- rbuf[0] = 0x55;
- rbuf[1] = st->signal_level;
- break;
- case 0x1c:
- rbuf[0] = 0x55;
- rbuf[1] = st->signal_sn;
- break;
- case 0x15:
- case 0x16:
- case 0x17:
- case 0x18:
- rbuf[0] = 0x55;
- rbuf[1] = 0x00;
- break;
- default:
- lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
- st->i2c_talk_onoff = 1;
- break;
- }
- break;
- case TUNER_S7395:
- switch (wbuf[3]) {
- case 0x10:
- rbuf[0] = 0x55;
- rbuf[1] = (st->signal_level & 0x80)
- ? 0 : (st->signal_level * 2);
- break;
- case 0x2d:
- rbuf[0] = 0x55;
- rbuf[1] = st->signal_sn;
- break;
- case 0x24:
- rbuf[0] = 0x55;
- rbuf[1] = st->signal_lock;
- break;
- case 0x2e:
- case 0x26:
- case 0x27:
- rbuf[0] = 0x55;
- rbuf[1] = 0x00;
- break;
- default:
- lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
- st->i2c_talk_onoff = 1;
- break;
- }
- break;
- case TUNER_S0194:
- switch (wbuf[3]) {
- case 0x18:
- rbuf[0] = 0x55;
- rbuf[1] = (st->signal_level & 0x80)
- ? 0 : (st->signal_level * 2);
- break;
- case 0x24:
- rbuf[0] = 0x55;
- rbuf[1] = st->signal_sn;
- break;
- case 0x1b:
- rbuf[0] = 0x55;
- rbuf[1] = st->signal_lock;
- break;
- case 0x19:
- case 0x25:
- case 0x1e:
- case 0x1d:
- rbuf[0] = 0x55;
- rbuf[1] = 0x00;
- break;
- default:
- lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
- st->i2c_talk_onoff = 1;
- break;
- }
- break;
- case TUNER_RS2000:
- switch (wbuf[3]) {
- case 0x8c:
- rbuf[0] = 0x55;
- rbuf[1] = st->signal_lock;
-
- /* If int_urb_due overdue
- * set rbuf[1] to 0 to clear lock */
- if (time_after(jiffies, st->int_urb_due))
- rbuf[1] = 0;
-
- break;
- default:
- lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
- st->i2c_talk_onoff = 1;
- break;
- }
- default:
- break;
- }
-
- deb_info(4, "I2C From Interrupt Message out(%02x) in(%02x)",
- wbuf[3], rbuf[1]);
-
- }
+ st->i2c_talk_onoff = 1;
- return ret;
+ return lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
}
-
static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
@@ -935,26 +792,8 @@ static struct stv0299_config sharp_z0194_config = {
.set_symbol_rate = sharp_z0194a_set_symbol_rate,
};
-static int dm04_rs2000_set_ts_param(struct dvb_frontend *fe,
- int caller)
-{
- struct dvb_usb_adapter *adap = fe_to_adap(fe);
- struct dvb_usb_device *d = adap_to_d(adap);
- struct lme2510_state *st = d->priv;
-
- mutex_lock(&d->i2c_mutex);
- if ((st->i2c_talk_onoff == 1) && (st->stream_on & 1)) {
- st->i2c_talk_onoff = 0;
- lme2510_stream_restart(d);
- }
- mutex_unlock(&d->i2c_mutex);
-
- return 0;
-}
-
static struct m88rs2000_config m88rs2000_config = {
- .demod_addr = 0x68,
- .set_ts_params = dm04_rs2000_set_ts_param,
+ .demod_addr = 0x68
};
static struct ts2020_config ts2020_config = {
@@ -998,27 +837,101 @@ static int dm04_lme2510_set_voltage(struct dvb_frontend *fe,
return (ret < 0) ? -ENODEV : 0;
}
-static int dm04_rs2000_read_signal_strength(struct dvb_frontend *fe,
- u16 *strength)
+static int dm04_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct dvb_usb_device *d = fe_to_d(fe);
+ struct lme2510_state *st = d->priv;
+ int ret = 0;
+
+ if (st->i2c_talk_onoff) {
+ if (st->fe_read_status) {
+ ret = st->fe_read_status(fe, status);
+ if (ret < 0)
+ return ret;
+ }
+
+ st->lock_status = *status;
+
+ if (*status & FE_HAS_LOCK && st->stream_on) {
+ mutex_lock(&d->i2c_mutex);
+
+ st->i2c_talk_onoff = 0;
+ ret = lme2510_stream_restart(d);
+
+ mutex_unlock(&d->i2c_mutex);
+ }
+
+ return ret;
+ }
+
+ /* Timeout of interrupt reached on RS2000 */
+ if (st->tuner_config == TUNER_RS2000 &&
+ time_after(jiffies, st->int_urb_due))
+ st->lock_status &= ~FE_HAS_LOCK;
+
+ *status = st->lock_status;
+
+ if (!(*status & FE_HAS_LOCK))
+ st->i2c_talk_onoff = 1;
+
+ return ret;
+}
+
+static int dm04_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct lme2510_state *st = fe_to_priv(fe);
- *strength = (u16)((u32)st->signal_level * 0xffff / 0xff);
+ if (st->fe_read_signal_strength && !st->stream_on)
+ return st->fe_read_signal_strength(fe, strength);
+
+ switch (st->tuner_config) {
+ case TUNER_LG:
+ *strength = 0xff - st->signal_level;
+ *strength |= *strength << 8;
+ break;
+ /* fall through */
+ case TUNER_S7395:
+ case TUNER_S0194:
+ *strength = 0xffff - (((st->signal_level * 2) << 8) * 5 / 4);
+ break;
+ case TUNER_RS2000:
+ *strength = (u16)((u32)st->signal_level * 0xffff / 0xff);
+ }
return 0;
}
-static int dm04_rs2000_read_snr(struct dvb_frontend *fe, u16 *snr)
+static int dm04_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct lme2510_state *st = fe_to_priv(fe);
- *snr = (u16)((u32)st->signal_sn * 0xffff / 0x7f);
+ if (st->fe_read_snr && !st->stream_on)
+ return st->fe_read_snr(fe, snr);
+
+ switch (st->tuner_config) {
+ case TUNER_LG:
+ *snr = 0xff - st->signal_sn;
+ *snr |= *snr << 8;
+ break;
+ /* fall through */
+ case TUNER_S7395:
+ case TUNER_S0194:
+ *snr = (u16)((0xff - st->signal_sn - 0xa1) * 3) << 8;
+ break;
+ case TUNER_RS2000:
+ *snr = (u16)((u32)st->signal_sn * 0xffff / 0x7f);
+ }
return 0;
}
static int dm04_read_ber(struct dvb_frontend *fe, u32 *ber)
{
+ struct lme2510_state *st = fe_to_priv(fe);
+
+ if (st->fe_read_ber && !st->stream_on)
+ return st->fe_read_ber(fe, ber);
+
*ber = 0;
return 0;
@@ -1026,6 +939,11 @@ static int dm04_read_ber(struct dvb_frontend *fe, u32 *ber)
static int dm04_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
+ struct lme2510_state *st = fe_to_priv(fe);
+
+ if (st->fe_read_ucblocks && !st->stream_on)
+ return st->fe_read_ucblocks(fe, ucblocks);
+
*ucblocks = 0;
return 0;
@@ -1119,15 +1037,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
st->tuner_config = TUNER_RS2000;
st->fe_set_voltage =
adap->fe[0]->ops.set_voltage;
-
- adap->fe[0]->ops.read_signal_strength =
- dm04_rs2000_read_signal_strength;
- adap->fe[0]->ops.read_snr =
- dm04_rs2000_read_snr;
- adap->fe[0]->ops.read_ber =
- dm04_read_ber;
- adap->fe[0]->ops.read_ucblocks =
- dm04_read_ucblocks;
}
break;
}
@@ -1146,7 +1055,19 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
}
+ st->fe_read_status = adap->fe[0]->ops.read_status;
+ st->fe_read_signal_strength = adap->fe[0]->ops.read_signal_strength;
+ st->fe_read_snr = adap->fe[0]->ops.read_snr;
+ st->fe_read_ber = adap->fe[0]->ops.read_ber;
+ st->fe_read_ucblocks = adap->fe[0]->ops.read_ucblocks;
+
+ adap->fe[0]->ops.read_status = dm04_read_status;
+ adap->fe[0]->ops.read_signal_strength = dm04_read_signal_strength;
+ adap->fe[0]->ops.read_snr = dm04_read_snr;
+ adap->fe[0]->ops.read_ber = dm04_read_ber;
+ adap->fe[0]->ops.read_ucblocks = dm04_read_ucblocks;
adap->fe[0]->ops.set_voltage = dm04_lme2510_set_voltage;
+
ret = lme_name(adap);
return ret;
}
@@ -1288,7 +1209,6 @@ static void *lme2510_exit_int(struct dvb_usb_device *d)
if (st->usb_buffer != NULL) {
st->i2c_talk_onoff = 1;
- st->signal_lock = 0;
st->signal_level = 0;
st->signal_sn = 0;
buffer = st->usb_buffer;
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index 0a98d04c53e4..ecefa5c477fa 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -604,9 +604,3 @@ MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 2d4530f5be54..0bd83e52669c 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -47,9 +47,3 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
#endif /* CONFIG_DVB_USB_MXL111SF */
#endif /* __MXL111SF_DEMOD_H__ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index a619410adde4..2180c13a6dcc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -755,9 +755,3 @@ int mxl111sf_gpio_mode_switch(struct mxl111sf_state *state, unsigned int mode)
}
return 0;
}
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index b85a5772d771..16fa4d4daf88 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -48,9 +48,3 @@ int mxl111sf_config_pin_mux_modes(struct mxl111sf_state *state,
enum mxl111sf_mux_config pin_mux_config);
#endif /* _DVB_USB_MXL111SF_GPIO_H_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index a101d06eb143..283495c84ba3 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -842,9 +842,3 @@ int mxl111sf_i2c_xfer(struct i2c_adapter *adap,
return i == num ? num : -EREMOTEIO;
}
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index 465762145ad2..c486fe02f018 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -27,9 +27,3 @@ int mxl111sf_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg msg[], int num);
#endif /* _DVB_USB_MXL111SF_I2C_H_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index f6b348024bec..5b0191178f9f 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -335,9 +335,3 @@ int mxl111sf_idac_config(struct mxl111sf_state *state,
return ret;
}
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index 0643738de7de..25aa4a1ea755 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -45,9 +45,3 @@ int mxl111sf_idac_config(struct mxl111sf_state *state,
u8 current_value, u8 hysteresis_value);
#endif /* _DVB_USB_MXL111SF_PHY_H_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 89bf115e927e..1f4bfbcdbabb 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -171,9 +171,3 @@
#define V6_DIG_RF_PWR_MSB_REG 0x47
#endif /* _DVB_USB_MXL111SF_REG_H_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index a8d2c7053674..444579be0b77 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -515,11 +515,3 @@ MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 2046db22519e..e6caab21a197 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -77,12 +77,3 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
#endif
#endif /* __MXL111SF_TUNER_H__ */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
-
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index c3447eaf1104..bec12b0e076b 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -1425,9 +1425,3 @@ MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 8516c011b7cc..ee70df1f1e94 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -152,9 +152,3 @@ extern int dvb_usb_mxl111sf_debug;
})
#endif /* _DVB_USB_MXL111SF_H_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 896a225ee011..77dcfdf547ac 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -22,42 +22,6 @@
#include "rtl28xxu.h"
-#include "rtl2830.h"
-#include "rtl2832.h"
-#include "rtl2832_sdr.h"
-#include "mn88472.h"
-#include "mn88473.h"
-
-#include "qt1010.h"
-#include "mt2060.h"
-#include "mxl5005s.h"
-#include "fc0012.h"
-#include "fc0013.h"
-#include "e4000.h"
-#include "fc2580.h"
-#include "tua9001.h"
-#include "r820t.h"
-
-
-#ifdef CONFIG_MEDIA_ATTACH
-#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
- void *__r = NULL; \
- typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
- if (__a) { \
- __r = (void *) __a(ARGS); \
- if (__r == NULL) \
- symbol_put(FUNCTION); \
- } \
- __r; \
-})
-
-#else
-#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
- FUNCTION(ARGS); \
-})
-
-#endif
-
static int rtl28xxu_disable_rc;
module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
@@ -65,20 +29,14 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
{
+ struct rtl28xxu_dev *dev = d->priv;
int ret;
unsigned int pipe;
u8 requesttype;
- u8 *buf;
-
- buf = kmalloc(req->size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto err;
- }
if (req->index & CMD_WR_FLAG) {
/* write */
- memcpy(buf, req->data, req->size);
+ memcpy(dev->buf, req->data, req->size);
requesttype = (USB_TYPE_VENDOR | USB_DIR_OUT);
pipe = usb_sndctrlpipe(d->udev, 0);
} else {
@@ -88,30 +46,23 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
}
ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value,
- req->index, buf, req->size, 1000);
-
+ req->index, dev->buf, req->size, 1000);
dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value,
- req->index, buf, req->size);
-
- if (ret > 0)
- ret = 0;
+ req->index, dev->buf, req->size);
+ if (ret < 0)
+ goto err;
/* read request, copy returned data to return buf */
- if (!ret && requesttype == (USB_TYPE_VENDOR | USB_DIR_IN))
- memcpy(req->data, buf, req->size);
+ if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN))
+ memcpy(req->data, dev->buf, req->size);
- kfree(buf);
-
- if (ret)
- goto err;
-
- return ret;
+ return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
-static int rtl28xx_wr_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
+static int rtl28xxu_wr_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
{
struct rtl28xxu_req req;
@@ -129,7 +80,7 @@ static int rtl28xx_wr_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
return rtl28xxu_ctrl_msg(d, &req);
}
-static int rtl2831_rd_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
+static int rtl28xxu_rd_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
{
struct rtl28xxu_req req;
@@ -147,17 +98,17 @@ static int rtl2831_rd_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
return rtl28xxu_ctrl_msg(d, &req);
}
-static int rtl28xx_wr_reg(struct dvb_usb_device *d, u16 reg, u8 val)
+static int rtl28xxu_wr_reg(struct dvb_usb_device *d, u16 reg, u8 val)
{
- return rtl28xx_wr_regs(d, reg, &val, 1);
+ return rtl28xxu_wr_regs(d, reg, &val, 1);
}
-static int rtl28xx_rd_reg(struct dvb_usb_device *d, u16 reg, u8 *val)
+static int rtl28xxu_rd_reg(struct dvb_usb_device *d, u16 reg, u8 *val)
{
- return rtl2831_rd_regs(d, reg, val, 1);
+ return rtl28xxu_rd_regs(d, reg, val, 1);
}
-static int rtl28xx_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val,
+static int rtl28xxu_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val,
u8 mask)
{
int ret;
@@ -165,7 +116,7 @@ static int rtl28xx_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val,
/* no need for read if whole reg is written */
if (mask != 0xff) {
- ret = rtl28xx_rd_reg(d, reg, &tmp);
+ ret = rtl28xxu_rd_reg(d, reg, &tmp);
if (ret)
return ret;
@@ -174,7 +125,7 @@ static int rtl28xx_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val,
val |= tmp;
}
- return rtl28xx_wr_reg(d, reg, val);
+ return rtl28xxu_wr_reg(d, reg, val);
}
/* I2C */
@@ -183,7 +134,7 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{
int ret;
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- struct rtl28xxu_priv *priv = d->priv;
+ struct rtl28xxu_dev *dev = d->priv;
struct rtl28xxu_req req;
/*
@@ -220,7 +171,7 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
} else if (msg[0].addr == 0x10) {
/* method 1 - integrated demod */
req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
- req.index = CMD_DEMOD_RD | priv->page;
+ req.index = CMD_DEMOD_RD | dev->page;
req.size = msg[1].len;
req.data = &msg[1].buf[0];
ret = rtl28xxu_ctrl_msg(d, &req);
@@ -256,12 +207,12 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
/* method 1 - integrated demod */
if (msg[0].buf[0] == 0x00) {
/* save demod page for later demod access */
- priv->page = msg[0].buf[1];
+ dev->page = msg[0].buf[1];
ret = 0;
} else {
req.value = (msg[0].buf[0] << 8) |
(msg[0].addr << 1);
- req.index = CMD_DEMOD_WR | priv->page;
+ req.index = CMD_DEMOD_WR | dev->page;
req.size = msg[0].len-1;
req.data = &msg[0].buf[1];
ret = rtl28xxu_ctrl_msg(d, &req);
@@ -303,7 +254,7 @@ static struct i2c_algorithm rtl28xxu_i2c_algo = {
static int rtl2831u_read_config(struct dvb_usb_device *d)
{
- struct rtl28xxu_priv *priv = d_to_priv(d);
+ struct rtl28xxu_dev *dev = d_to_priv(d);
int ret;
u8 buf[1];
/* open RTL2831U/RTL2830 I2C gate */
@@ -312,7 +263,7 @@ static int rtl2831u_read_config(struct dvb_usb_device *d)
struct rtl28xxu_req req_mt2060 = {0x00c0, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_qt1010 = {0x0fc4, CMD_I2C_RD, 1, buf};
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+ dev_dbg(&d->intf->dev, "\n");
/*
* RTL2831U GPIOs
@@ -323,12 +274,12 @@ static int rtl2831u_read_config(struct dvb_usb_device *d)
*/
/* GPIO direction */
- ret = rtl28xx_wr_reg(d, SYS_GPIO_DIR, 0x0a);
+ ret = rtl28xxu_wr_reg(d, SYS_GPIO_DIR, 0x0a);
if (ret)
goto err;
/* enable as output GPIO0, GPIO2, GPIO4 */
- ret = rtl28xx_wr_reg(d, SYS_GPIO_OUT_EN, 0x15);
+ ret = rtl28xxu_wr_reg(d, SYS_GPIO_OUT_EN, 0x15);
if (ret)
goto err;
@@ -340,7 +291,7 @@ static int rtl2831u_read_config(struct dvb_usb_device *d)
/* demod needs some time to wake up */
msleep(20);
- priv->tuner_name = "NONE";
+ dev->tuner_name = "NONE";
/* open demod I2C gate */
ret = rtl28xxu_ctrl_msg(d, &req_gate_open);
@@ -350,8 +301,8 @@ static int rtl2831u_read_config(struct dvb_usb_device *d)
/* check QT1010 ID(?) register; reg=0f val=2c */
ret = rtl28xxu_ctrl_msg(d, &req_qt1010);
if (ret == 0 && buf[0] == 0x2c) {
- priv->tuner = TUNER_RTL2830_QT1010;
- priv->tuner_name = "QT1010";
+ dev->tuner = TUNER_RTL2830_QT1010;
+ dev->tuner_name = "QT1010";
goto found;
}
@@ -363,28 +314,28 @@ static int rtl2831u_read_config(struct dvb_usb_device *d)
/* check MT2060 ID register; reg=00 val=63 */
ret = rtl28xxu_ctrl_msg(d, &req_mt2060);
if (ret == 0 && buf[0] == 0x63) {
- priv->tuner = TUNER_RTL2830_MT2060;
- priv->tuner_name = "MT2060";
+ dev->tuner = TUNER_RTL2830_MT2060;
+ dev->tuner_name = "MT2060";
goto found;
}
/* assume MXL5005S */
- priv->tuner = TUNER_RTL2830_MXL5005S;
- priv->tuner_name = "MXL5005S";
+ dev->tuner = TUNER_RTL2830_MXL5005S;
+ dev->tuner_name = "MXL5005S";
goto found;
found:
- dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name);
+ dev_dbg(&d->intf->dev, "tuner=%s\n", dev->tuner_name);
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
static int rtl2832u_read_config(struct dvb_usb_device *d)
{
- struct rtl28xxu_priv *priv = d_to_priv(d);
+ struct rtl28xxu_dev *dev = d_to_priv(d);
int ret;
u8 buf[2];
/* open RTL2832U/RTL2832 I2C gate */
@@ -407,14 +358,14 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
struct rtl28xxu_req req_mn88472 = {0xff38, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_mn88473 = {0xff38, CMD_I2C_RD, 1, buf};
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+ dev_dbg(&d->intf->dev, "\n");
/* enable GPIO3 and GPIO6 as output */
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x40);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x40);
if (ret)
goto err;
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x48, 0x48);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x48, 0x48);
if (ret)
goto err;
@@ -428,134 +379,134 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret)
goto err;
- priv->tuner_name = "NONE";
+ dev->tuner_name = "NONE";
/* check FC0012 ID register; reg=00 val=a1 */
ret = rtl28xxu_ctrl_msg(d, &req_fc0012);
if (ret == 0 && buf[0] == 0xa1) {
- priv->tuner = TUNER_RTL2832_FC0012;
- priv->tuner_name = "FC0012";
+ dev->tuner = TUNER_RTL2832_FC0012;
+ dev->tuner_name = "FC0012";
goto tuner_found;
}
/* check FC0013 ID register; reg=00 val=a3 */
ret = rtl28xxu_ctrl_msg(d, &req_fc0013);
if (ret == 0 && buf[0] == 0xa3) {
- priv->tuner = TUNER_RTL2832_FC0013;
- priv->tuner_name = "FC0013";
+ dev->tuner = TUNER_RTL2832_FC0013;
+ dev->tuner_name = "FC0013";
goto tuner_found;
}
/* check MT2266 ID register; reg=00 val=85 */
ret = rtl28xxu_ctrl_msg(d, &req_mt2266);
if (ret == 0 && buf[0] == 0x85) {
- priv->tuner = TUNER_RTL2832_MT2266;
- priv->tuner_name = "MT2266";
+ dev->tuner = TUNER_RTL2832_MT2266;
+ dev->tuner_name = "MT2266";
goto tuner_found;
}
/* check FC2580 ID register; reg=01 val=56 */
ret = rtl28xxu_ctrl_msg(d, &req_fc2580);
if (ret == 0 && buf[0] == 0x56) {
- priv->tuner = TUNER_RTL2832_FC2580;
- priv->tuner_name = "FC2580";
+ dev->tuner = TUNER_RTL2832_FC2580;
+ dev->tuner_name = "FC2580";
goto tuner_found;
}
/* check MT2063 ID register; reg=00 val=9e || 9c */
ret = rtl28xxu_ctrl_msg(d, &req_mt2063);
if (ret == 0 && (buf[0] == 0x9e || buf[0] == 0x9c)) {
- priv->tuner = TUNER_RTL2832_MT2063;
- priv->tuner_name = "MT2063";
+ dev->tuner = TUNER_RTL2832_MT2063;
+ dev->tuner_name = "MT2063";
goto tuner_found;
}
/* check MAX3543 ID register; reg=00 val=38 */
ret = rtl28xxu_ctrl_msg(d, &req_max3543);
if (ret == 0 && buf[0] == 0x38) {
- priv->tuner = TUNER_RTL2832_MAX3543;
- priv->tuner_name = "MAX3543";
+ dev->tuner = TUNER_RTL2832_MAX3543;
+ dev->tuner_name = "MAX3543";
goto tuner_found;
}
/* check TUA9001 ID register; reg=7e val=2328 */
ret = rtl28xxu_ctrl_msg(d, &req_tua9001);
if (ret == 0 && buf[0] == 0x23 && buf[1] == 0x28) {
- priv->tuner = TUNER_RTL2832_TUA9001;
- priv->tuner_name = "TUA9001";
+ dev->tuner = TUNER_RTL2832_TUA9001;
+ dev->tuner_name = "TUA9001";
goto tuner_found;
}
/* check MXL5007R ID register; reg=d9 val=14 */
ret = rtl28xxu_ctrl_msg(d, &req_mxl5007t);
if (ret == 0 && buf[0] == 0x14) {
- priv->tuner = TUNER_RTL2832_MXL5007T;
- priv->tuner_name = "MXL5007T";
+ dev->tuner = TUNER_RTL2832_MXL5007T;
+ dev->tuner_name = "MXL5007T";
goto tuner_found;
}
/* check E4000 ID register; reg=02 val=40 */
ret = rtl28xxu_ctrl_msg(d, &req_e4000);
if (ret == 0 && buf[0] == 0x40) {
- priv->tuner = TUNER_RTL2832_E4000;
- priv->tuner_name = "E4000";
+ dev->tuner = TUNER_RTL2832_E4000;
+ dev->tuner_name = "E4000";
goto tuner_found;
}
/* check TDA18272 ID register; reg=00 val=c760 */
ret = rtl28xxu_ctrl_msg(d, &req_tda18272);
if (ret == 0 && (buf[0] == 0xc7 || buf[1] == 0x60)) {
- priv->tuner = TUNER_RTL2832_TDA18272;
- priv->tuner_name = "TDA18272";
+ dev->tuner = TUNER_RTL2832_TDA18272;
+ dev->tuner_name = "TDA18272";
goto tuner_found;
}
/* check R820T ID register; reg=00 val=69 */
ret = rtl28xxu_ctrl_msg(d, &req_r820t);
if (ret == 0 && buf[0] == 0x69) {
- priv->tuner = TUNER_RTL2832_R820T;
- priv->tuner_name = "R820T";
+ dev->tuner = TUNER_RTL2832_R820T;
+ dev->tuner_name = "R820T";
goto tuner_found;
}
/* check R828D ID register; reg=00 val=69 */
ret = rtl28xxu_ctrl_msg(d, &req_r828d);
if (ret == 0 && buf[0] == 0x69) {
- priv->tuner = TUNER_RTL2832_R828D;
- priv->tuner_name = "R828D";
+ dev->tuner = TUNER_RTL2832_R828D;
+ dev->tuner_name = "R828D";
goto tuner_found;
}
tuner_found:
- dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name);
+ dev_dbg(&d->intf->dev, "tuner=%s\n", dev->tuner_name);
/* probe slave demod */
- if (priv->tuner == TUNER_RTL2832_R828D) {
+ if (dev->tuner == TUNER_RTL2832_R828D) {
/* power on MN88472 demod on GPIO0 */
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x01, 0x01);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x01, 0x01);
if (ret)
goto err;
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01);
if (ret)
goto err;
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01);
if (ret)
goto err;
/* check MN88472 answers */
ret = rtl28xxu_ctrl_msg(d, &req_mn88472);
if (ret == 0 && buf[0] == 0x02) {
- dev_dbg(&d->udev->dev, "%s: MN88472 found\n", __func__);
- priv->slave_demod = SLAVE_DEMOD_MN88472;
+ dev_dbg(&d->intf->dev, "MN88472 found\n");
+ dev->slave_demod = SLAVE_DEMOD_MN88472;
goto demod_found;
}
ret = rtl28xxu_ctrl_msg(d, &req_mn88473);
if (ret == 0 && buf[0] == 0x03) {
- dev_dbg(&d->udev->dev, "%s: MN88473 found\n", __func__);
- priv->slave_demod = SLAVE_DEMOD_MN88473;
+ dev_dbg(&d->intf->dev, "MN88473 found\n");
+ dev->slave_demod = SLAVE_DEMOD_MN88473;
goto demod_found;
}
}
@@ -568,14 +519,51 @@ demod_found:
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
-static const struct rtl2830_config rtl28xxu_rtl2830_mt2060_config = {
- .i2c_addr = 0x10, /* 0x20 */
- .xtal = 28800000,
- .ts_mode = 0,
+static int rtl28xxu_read_config(struct dvb_usb_device *d)
+{
+ struct rtl28xxu_dev *dev = d_to_priv(d);
+
+ if (dev->chip_id == CHIP_ID_RTL2831U)
+ return rtl2831u_read_config(d);
+ else
+ return rtl2832u_read_config(d);
+}
+
+static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
+{
+ struct rtl28xxu_dev *dev = d_to_priv(d);
+ int ret;
+ struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL};
+
+ dev_dbg(&d->intf->dev, "\n");
+
+ /*
+ * Detect chip type using I2C command that is not supported
+ * by old RTL2831U.
+ */
+ ret = rtl28xxu_ctrl_msg(d, &req_demod_i2c);
+ if (ret == -EPIPE) {
+ dev->chip_id = CHIP_ID_RTL2831U;
+ } else if (ret == 0) {
+ dev->chip_id = CHIP_ID_RTL2832U;
+ } else {
+ dev_err(&d->intf->dev, "chip type detection failed %d\n", ret);
+ goto err;
+ }
+ dev_dbg(&d->intf->dev, "chip_id=%u\n", dev->chip_id);
+
+ return WARM;
+err:
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static const struct rtl2830_platform_data rtl2830_mt2060_platform_data = {
+ .clk = 28800000,
.spec_inv = 1,
.vtop = 0x20,
.krf = 0x04,
@@ -583,20 +571,16 @@ static const struct rtl2830_config rtl28xxu_rtl2830_mt2060_config = {
};
-static const struct rtl2830_config rtl28xxu_rtl2830_qt1010_config = {
- .i2c_addr = 0x10, /* 0x20 */
- .xtal = 28800000,
- .ts_mode = 0,
+static const struct rtl2830_platform_data rtl2830_qt1010_platform_data = {
+ .clk = 28800000,
.spec_inv = 1,
.vtop = 0x20,
.krf = 0x04,
.agc_targ_val = 0x2d,
};
-static const struct rtl2830_config rtl28xxu_rtl2830_mxl5005s_config = {
- .i2c_addr = 0x10, /* 0x20 */
- .xtal = 28800000,
- .ts_mode = 0,
+static const struct rtl2830_platform_data rtl2830_mxl5005s_platform_data = {
+ .clk = 28800000,
.spec_inv = 0,
.vtop = 0x3f,
.krf = 0x04,
@@ -606,69 +590,81 @@ static const struct rtl2830_config rtl28xxu_rtl2830_mxl5005s_config = {
static int rtl2831u_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap_to_d(adap);
- struct rtl28xxu_priv *priv = d_to_priv(d);
- const struct rtl2830_config *rtl2830_config;
+ struct rtl28xxu_dev *dev = d_to_priv(d);
+ struct rtl2830_platform_data *pdata = &dev->rtl2830_platform_data;
+ struct i2c_board_info board_info;
+ struct i2c_client *client;
int ret;
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+ dev_dbg(&d->intf->dev, "\n");
- switch (priv->tuner) {
+ switch (dev->tuner) {
case TUNER_RTL2830_QT1010:
- rtl2830_config = &rtl28xxu_rtl2830_qt1010_config;
+ *pdata = rtl2830_qt1010_platform_data;
break;
case TUNER_RTL2830_MT2060:
- rtl2830_config = &rtl28xxu_rtl2830_mt2060_config;
+ *pdata = rtl2830_mt2060_platform_data;
break;
case TUNER_RTL2830_MXL5005S:
- rtl2830_config = &rtl28xxu_rtl2830_mxl5005s_config;
+ *pdata = rtl2830_mxl5005s_platform_data;
break;
default:
- dev_err(&d->udev->dev, "%s: unknown tuner=%s\n",
- KBUILD_MODNAME, priv->tuner_name);
+ dev_err(&d->intf->dev, "unknown tuner %s\n", dev->tuner_name);
ret = -ENODEV;
goto err;
}
/* attach demodulator */
- adap->fe[0] = dvb_attach(rtl2830_attach, rtl2830_config, &d->i2c_adap);
- if (!adap->fe[0]) {
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "rtl2830", I2C_NAME_SIZE);
+ board_info.addr = 0x10;
+ board_info.platform_data = pdata;
+ request_module("%s", board_info.type);
+ client = i2c_new_device(&d->i2c_adap, &board_info);
+ if (client == NULL || client->dev.driver == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
ret = -ENODEV;
goto err;
}
+ adap->fe[0] = pdata->get_dvb_frontend(client);
+ dev->demod_i2c_adapter = pdata->get_i2c_adapter(client);
+
+ dev->i2c_client_demod = client;
+
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
-static const struct rtl2832_config rtl28xxu_rtl2832_fc0012_config = {
- .i2c_addr = 0x10, /* 0x20 */
- .xtal = 28800000,
+static const struct rtl2832_platform_data rtl2832_fc0012_platform_data = {
+ .clk = 28800000,
.tuner = TUNER_RTL2832_FC0012
};
-static const struct rtl2832_config rtl28xxu_rtl2832_fc0013_config = {
- .i2c_addr = 0x10, /* 0x20 */
- .xtal = 28800000,
+static const struct rtl2832_platform_data rtl2832_fc0013_platform_data = {
+ .clk = 28800000,
.tuner = TUNER_RTL2832_FC0013
};
-static const struct rtl2832_config rtl28xxu_rtl2832_tua9001_config = {
- .i2c_addr = 0x10, /* 0x20 */
- .xtal = 28800000,
+static const struct rtl2832_platform_data rtl2832_tua9001_platform_data = {
+ .clk = 28800000,
.tuner = TUNER_RTL2832_TUA9001,
};
-static const struct rtl2832_config rtl28xxu_rtl2832_e4000_config = {
- .i2c_addr = 0x10, /* 0x20 */
- .xtal = 28800000,
+static const struct rtl2832_platform_data rtl2832_e4000_platform_data = {
+ .clk = 28800000,
.tuner = TUNER_RTL2832_E4000,
};
-static const struct rtl2832_config rtl28xxu_rtl2832_r820t_config = {
- .i2c_addr = 0x10,
- .xtal = 28800000,
+static const struct rtl2832_platform_data rtl2832_r820t_platform_data = {
+ .clk = 28800000,
.tuner = TUNER_RTL2832_R820T,
};
@@ -678,12 +674,12 @@ static int rtl2832u_fc0012_tuner_callback(struct dvb_usb_device *d,
int ret;
u8 val;
- dev_dbg(&d->udev->dev, "%s: cmd=%d arg=%d\n", __func__, cmd, arg);
+ dev_dbg(&d->intf->dev, "cmd=%d arg=%d\n", cmd, arg);
switch (cmd) {
case FC_FE_CALLBACK_VHF_ENABLE:
/* set output values */
- ret = rtl28xx_rd_reg(d, SYS_GPIO_OUT_VAL, &val);
+ ret = rtl28xxu_rd_reg(d, SYS_GPIO_OUT_VAL, &val);
if (ret)
goto err;
@@ -693,7 +689,7 @@ static int rtl2832u_fc0012_tuner_callback(struct dvb_usb_device *d,
val |= 0x40; /* set GPIO6 high */
- ret = rtl28xx_wr_reg(d, SYS_GPIO_OUT_VAL, val);
+ ret = rtl28xxu_wr_reg(d, SYS_GPIO_OUT_VAL, val);
if (ret)
goto err;
break;
@@ -703,7 +699,7 @@ static int rtl2832u_fc0012_tuner_callback(struct dvb_usb_device *d,
}
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -713,7 +709,7 @@ static int rtl2832u_tua9001_tuner_callback(struct dvb_usb_device *d,
int ret;
u8 val;
- dev_dbg(&d->udev->dev, "%s: cmd=%d arg=%d\n", __func__, cmd, arg);
+ dev_dbg(&d->intf->dev, "cmd=%d arg=%d\n", cmd, arg);
/*
* CEN always enabled by hardware wiring
@@ -728,7 +724,7 @@ static int rtl2832u_tua9001_tuner_callback(struct dvb_usb_device *d,
else
val = (0 << 4);
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, val, 0x10);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_VAL, val, 0x10);
if (ret)
goto err;
break;
@@ -738,7 +734,7 @@ static int rtl2832u_tua9001_tuner_callback(struct dvb_usb_device *d,
else
val = (0 << 1);
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, val, 0x02);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_VAL, val, 0x02);
if (ret)
goto err;
break;
@@ -746,40 +742,46 @@ static int rtl2832u_tua9001_tuner_callback(struct dvb_usb_device *d,
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
-static int rtl2832u_tuner_callback(struct dvb_usb_device *d, int cmd, int arg)
-{
- struct rtl28xxu_priv *priv = d->priv;
-
- switch (priv->tuner) {
- case TUNER_RTL2832_FC0012:
- return rtl2832u_fc0012_tuner_callback(d, cmd, arg);
- case TUNER_RTL2832_TUA9001:
- return rtl2832u_tua9001_tuner_callback(d, cmd, arg);
- default:
- break;
- }
-
- return 0;
-}
-
static int rtl2832u_frontend_callback(void *adapter_priv, int component,
int cmd, int arg)
{
- struct i2c_adapter *adap = adapter_priv;
- struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct i2c_adapter *adapter = adapter_priv;
+ struct device *parent = adapter->dev.parent;
+ struct i2c_adapter *parent_adapter;
+ struct dvb_usb_device *d;
+ struct rtl28xxu_dev *dev;
+
+ /*
+ * All tuners are connected to demod muxed I2C adapter. We have to
+ * resolve its parent adapter in order to get handle for this driver
+ * private data. That is a bit hackish solution, GPIO or direct driver
+ * callback would be better...
+ */
+ if (parent != NULL && parent->type == &i2c_adapter_type)
+ parent_adapter = to_i2c_adapter(parent);
+ else
+ return -EINVAL;
+
+ d = i2c_get_adapdata(parent_adapter);
+ dev = d->priv;
- dev_dbg(&d->udev->dev, "%s: component=%d cmd=%d arg=%d\n",
- __func__, component, cmd, arg);
+ dev_dbg(&d->intf->dev, "component=%d cmd=%d arg=%d\n",
+ component, cmd, arg);
switch (component) {
case DVB_FRONTEND_COMPONENT_TUNER:
- return rtl2832u_tuner_callback(d, cmd, arg);
+ switch (dev->tuner) {
+ case TUNER_RTL2832_FC0012:
+ return rtl2832u_fc0012_tuner_callback(d, cmd, arg);
+ case TUNER_RTL2832_TUA9001:
+ return rtl2832u_tua9001_tuner_callback(d, cmd, arg);
+ }
default:
- break;
+ return -EINVAL;
}
return 0;
@@ -787,57 +789,70 @@ static int rtl2832u_frontend_callback(void *adapter_priv, int component,
static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
{
- int ret;
struct dvb_usb_device *d = adap_to_d(adap);
- struct rtl28xxu_priv *priv = d_to_priv(d);
- const struct rtl2832_config *rtl2832_config;
+ struct rtl28xxu_dev *dev = d_to_priv(d);
+ struct rtl2832_platform_data *pdata = &dev->rtl2832_platform_data;
+ struct i2c_board_info board_info;
+ struct i2c_client *client;
+ int ret;
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+ dev_dbg(&d->intf->dev, "\n");
- switch (priv->tuner) {
+ switch (dev->tuner) {
case TUNER_RTL2832_FC0012:
- rtl2832_config = &rtl28xxu_rtl2832_fc0012_config;
+ *pdata = rtl2832_fc0012_platform_data;
break;
case TUNER_RTL2832_FC0013:
- rtl2832_config = &rtl28xxu_rtl2832_fc0013_config;
+ *pdata = rtl2832_fc0013_platform_data;
break;
case TUNER_RTL2832_FC2580:
/* FIXME: do not abuse fc0012 settings */
- rtl2832_config = &rtl28xxu_rtl2832_fc0012_config;
+ *pdata = rtl2832_fc0012_platform_data;
break;
case TUNER_RTL2832_TUA9001:
- rtl2832_config = &rtl28xxu_rtl2832_tua9001_config;
+ *pdata = rtl2832_tua9001_platform_data;
break;
case TUNER_RTL2832_E4000:
- rtl2832_config = &rtl28xxu_rtl2832_e4000_config;
+ *pdata = rtl2832_e4000_platform_data;
break;
case TUNER_RTL2832_R820T:
case TUNER_RTL2832_R828D:
- rtl2832_config = &rtl28xxu_rtl2832_r820t_config;
+ *pdata = rtl2832_r820t_platform_data;
break;
default:
- dev_err(&d->udev->dev, "%s: unknown tuner=%s\n",
- KBUILD_MODNAME, priv->tuner_name);
+ dev_err(&d->intf->dev, "unknown tuner %s\n", dev->tuner_name);
ret = -ENODEV;
goto err;
}
/* attach demodulator */
- adap->fe[0] = dvb_attach(rtl2832_attach, rtl2832_config, &d->i2c_adap);
- if (!adap->fe[0]) {
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "rtl2832", I2C_NAME_SIZE);
+ board_info.addr = 0x10;
+ board_info.platform_data = pdata;
+ request_module("%s", board_info.type);
+ client = i2c_new_device(&d->i2c_adap, &board_info);
+ if (client == NULL || client->dev.driver == NULL) {
ret = -ENODEV;
goto err;
}
- /* RTL2832 I2C repeater */
- priv->demod_i2c_adapter = rtl2832_get_i2c_adapter(adap->fe[0]);
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ adap->fe[0] = pdata->get_dvb_frontend(client);
+ dev->demod_i2c_adapter = pdata->get_i2c_adapter(client);
+
+ dev->i2c_client_demod = client;
/* set fe callback */
adap->fe[0]->callback = rtl2832u_frontend_callback;
- if (priv->slave_demod) {
+ if (dev->slave_demod) {
struct i2c_board_info info = {};
- struct i2c_client *client;
/*
* We continue on reduced mode, without DVB-T2/C, using master
@@ -846,28 +861,29 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
ret = 0;
/* attach slave demodulator */
- if (priv->slave_demod == SLAVE_DEMOD_MN88472) {
+ if (dev->slave_demod == SLAVE_DEMOD_MN88472) {
struct mn88472_config mn88472_config = {};
mn88472_config.fe = &adap->fe[1];
mn88472_config.i2c_wr_max = 22,
strlcpy(info.type, "mn88472", I2C_NAME_SIZE);
+ mn88472_config.xtal = 20500000;
info.addr = 0x18;
info.platform_data = &mn88472_config;
request_module(info.type);
- client = i2c_new_device(priv->demod_i2c_adapter, &info);
+ client = i2c_new_device(&d->i2c_adap, &info);
if (client == NULL || client->dev.driver == NULL) {
- priv->slave_demod = SLAVE_DEMOD_NONE;
+ dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
- priv->slave_demod = SLAVE_DEMOD_NONE;
+ dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
- priv->i2c_client_slave_demod = client;
+ dev->i2c_client_slave_demod = client;
} else {
struct mn88473_config mn88473_config = {};
@@ -877,29 +893,64 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
info.addr = 0x18;
info.platform_data = &mn88473_config;
request_module(info.type);
- client = i2c_new_device(priv->demod_i2c_adapter, &info);
+ client = i2c_new_device(&d->i2c_adap, &info);
if (client == NULL || client->dev.driver == NULL) {
- priv->slave_demod = SLAVE_DEMOD_NONE;
+ dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
- priv->slave_demod = SLAVE_DEMOD_NONE;
+ dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
- priv->i2c_client_slave_demod = client;
+ dev->i2c_client_slave_demod = client;
}
}
return 0;
err_slave_demod_failed:
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
+static int rtl28xxu_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct rtl28xxu_dev *dev = adap_to_priv(adap);
+
+ if (dev->chip_id == CHIP_ID_RTL2831U)
+ return rtl2831u_frontend_attach(adap);
+ else
+ return rtl2832u_frontend_attach(adap);
+}
+
+static int rtl28xxu_frontend_detach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap_to_d(adap);
+ struct rtl28xxu_dev *dev = d_to_priv(d);
+ struct i2c_client *client;
+
+ dev_dbg(&d->intf->dev, "\n");
+
+ /* remove I2C slave demod */
+ client = dev->i2c_client_slave_demod;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ /* remove I2C demod */
+ client = dev->i2c_client_demod;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ return 0;
+}
+
static struct qt1010_config rtl28xxu_qt1010_config = {
.i2c_address = 0x62, /* 0xc4 */
};
@@ -930,33 +981,30 @@ static int rtl2831u_tuner_attach(struct dvb_usb_adapter *adap)
{
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
- struct rtl28xxu_priv *priv = d_to_priv(d);
- struct i2c_adapter *rtl2830_tuner_i2c;
+ struct rtl28xxu_dev *dev = d_to_priv(d);
struct dvb_frontend *fe;
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
-
- /* use rtl2830 driver I2C adapter, for more info see rtl2830 driver */
- rtl2830_tuner_i2c = rtl2830_get_tuner_i2c_adapter(adap->fe[0]);
+ dev_dbg(&d->intf->dev, "\n");
- switch (priv->tuner) {
+ switch (dev->tuner) {
case TUNER_RTL2830_QT1010:
fe = dvb_attach(qt1010_attach, adap->fe[0],
- rtl2830_tuner_i2c, &rtl28xxu_qt1010_config);
+ dev->demod_i2c_adapter,
+ &rtl28xxu_qt1010_config);
break;
case TUNER_RTL2830_MT2060:
fe = dvb_attach(mt2060_attach, adap->fe[0],
- rtl2830_tuner_i2c, &rtl28xxu_mt2060_config,
- 1220);
+ dev->demod_i2c_adapter,
+ &rtl28xxu_mt2060_config, 1220);
break;
case TUNER_RTL2830_MXL5005S:
fe = dvb_attach(mxl5005s_attach, adap->fe[0],
- rtl2830_tuner_i2c, &rtl28xxu_mxl5005s_config);
+ dev->demod_i2c_adapter,
+ &rtl28xxu_mxl5005s_config);
break;
default:
fe = NULL;
- dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME,
- priv->tuner);
+ dev_err(&d->intf->dev, "unknown tuner %d\n", dev->tuner);
}
if (fe == NULL) {
@@ -966,7 +1014,7 @@ static int rtl2831u_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1002,45 +1050,38 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
{
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
- struct rtl28xxu_priv *priv = d_to_priv(d);
+ struct rtl28xxu_dev *dev = d_to_priv(d);
struct dvb_frontend *fe = NULL;
struct i2c_board_info info;
struct i2c_client *client;
+ struct v4l2_subdev *subdev = NULL;
+ struct platform_device *pdev;
+ struct rtl2832_sdr_platform_data pdata;
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+ dev_dbg(&d->intf->dev, "\n");
memset(&info, 0, sizeof(struct i2c_board_info));
+ memset(&pdata, 0, sizeof(pdata));
- switch (priv->tuner) {
+ switch (dev->tuner) {
case TUNER_RTL2832_FC0012:
fe = dvb_attach(fc0012_attach, adap->fe[0],
- &d->i2c_adap, &rtl2832u_fc0012_config);
+ dev->demod_i2c_adapter, &rtl2832u_fc0012_config);
/* since fc0012 includs reading the signal strength delegate
* that to the tuner driver */
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
-
- /* attach SDR */
- dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
- &rtl28xxu_rtl2832_fc0012_config, NULL);
break;
case TUNER_RTL2832_FC0013:
fe = dvb_attach(fc0013_attach, adap->fe[0],
- &d->i2c_adap, 0xc6>>1, 0, FC_XTAL_28_8_MHZ);
+ dev->demod_i2c_adapter, 0xc6>>1, 0, FC_XTAL_28_8_MHZ);
/* fc0013 also supports signal strength reading */
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
-
- /* attach SDR */
- dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
- &rtl28xxu_rtl2832_fc0013_config, NULL);
break;
case TUNER_RTL2832_E4000: {
- struct v4l2_subdev *sd;
- struct i2c_adapter *i2c_adap_internal =
- rtl2832_get_private_i2c_adapter(adap->fe[0]);
struct e4000_config e4000_config = {
.fe = adap->fe[0],
.clock = 28800000,
@@ -1051,7 +1092,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
info.platform_data = &e4000_config;
request_module(info.type);
- client = i2c_new_device(priv->demod_i2c_adapter, &info);
+ client = i2c_new_device(dev->demod_i2c_adapter, &info);
if (client == NULL || client->dev.driver == NULL)
break;
@@ -1060,157 +1101,183 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
break;
}
- priv->i2c_client_tuner = client;
- sd = i2c_get_clientdata(client);
- i2c_set_adapdata(i2c_adap_internal, d);
-
- /* attach SDR */
- dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0],
- i2c_adap_internal,
- &rtl28xxu_rtl2832_e4000_config, sd);
+ dev->i2c_client_tuner = client;
+ subdev = i2c_get_clientdata(client);
}
break;
case TUNER_RTL2832_FC2580:
- fe = dvb_attach(fc2580_attach, adap->fe[0], &d->i2c_adap,
+ fe = dvb_attach(fc2580_attach, adap->fe[0],
+ dev->demod_i2c_adapter,
&rtl2832u_fc2580_config);
break;
case TUNER_RTL2832_TUA9001:
/* enable GPIO1 and GPIO4 as output */
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x12);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x12);
if (ret)
goto err;
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x12, 0x12);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x12, 0x12);
if (ret)
goto err;
- fe = dvb_attach(tua9001_attach, adap->fe[0], &d->i2c_adap,
+ fe = dvb_attach(tua9001_attach, adap->fe[0],
+ dev->demod_i2c_adapter,
&rtl2832u_tua9001_config);
break;
case TUNER_RTL2832_R820T:
- fe = dvb_attach(r820t_attach, adap->fe[0], &d->i2c_adap,
+ fe = dvb_attach(r820t_attach, adap->fe[0],
+ dev->demod_i2c_adapter,
&rtl2832u_r820t_config);
/* Use tuner to get the signal strength */
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
-
- /* attach SDR */
- dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
- &rtl28xxu_rtl2832_r820t_config, NULL);
break;
case TUNER_RTL2832_R828D:
fe = dvb_attach(r820t_attach, adap->fe[0],
- priv->demod_i2c_adapter,
+ dev->demod_i2c_adapter,
&rtl2832u_r828d_config);
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
if (adap->fe[1]) {
fe = dvb_attach(r820t_attach, adap->fe[1],
- priv->demod_i2c_adapter,
+ dev->demod_i2c_adapter,
&rtl2832u_r828d_config);
adap->fe[1]->ops.read_signal_strength =
adap->fe[1]->ops.tuner_ops.get_rf_strength;
}
-
- /* attach SDR */
- dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
- &rtl28xxu_rtl2832_r820t_config, NULL);
break;
default:
- dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME,
- priv->tuner);
+ dev_err(&d->intf->dev, "unknown tuner %d\n", dev->tuner);
}
-
- if (fe == NULL && priv->i2c_client_tuner == NULL) {
+ if (fe == NULL && dev->i2c_client_tuner == NULL) {
ret = -ENODEV;
goto err;
}
+ /* register SDR */
+ switch (dev->tuner) {
+ case TUNER_RTL2832_FC0012:
+ case TUNER_RTL2832_FC0013:
+ case TUNER_RTL2832_E4000:
+ case TUNER_RTL2832_R820T:
+ case TUNER_RTL2832_R828D:
+ pdata.clk = dev->rtl2832_platform_data.clk;
+ pdata.tuner = dev->tuner;
+ pdata.i2c_client = dev->i2c_client_demod;
+ pdata.bulk_read = dev->rtl2832_platform_data.bulk_read;
+ pdata.bulk_write = dev->rtl2832_platform_data.bulk_write;
+ pdata.update_bits = dev->rtl2832_platform_data.update_bits;
+ pdata.dvb_frontend = adap->fe[0];
+ pdata.dvb_usb_device = d;
+ pdata.v4l2_subdev = subdev;
+
+ request_module("%s", "rtl2832_sdr");
+ pdev = platform_device_register_data(&d->intf->dev,
+ "rtl2832_sdr",
+ PLATFORM_DEVID_AUTO,
+ &pdata, sizeof(pdata));
+ if (pdev == NULL || pdev->dev.driver == NULL)
+ break;
+ dev->platform_device_sdr = pdev;
+ break;
+ default:
+ dev_dbg(&d->intf->dev, "no SDR for tuner=%d\n", dev->tuner);
+ }
+
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
+static int rtl28xxu_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct rtl28xxu_dev *dev = adap_to_priv(adap);
+
+ if (dev->chip_id == CHIP_ID_RTL2831U)
+ return rtl2831u_tuner_attach(adap);
+ else
+ return rtl2832u_tuner_attach(adap);
+}
+
+static int rtl28xxu_tuner_detach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap_to_d(adap);
+ struct rtl28xxu_dev *dev = d_to_priv(d);
+ struct i2c_client *client;
+ struct platform_device *pdev;
+
+ dev_dbg(&d->intf->dev, "\n");
+
+ /* remove platform SDR */
+ pdev = dev->platform_device_sdr;
+ if (pdev)
+ platform_device_unregister(pdev);
+
+ /* remove I2C tuner */
+ client = dev->i2c_client_tuner;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ return 0;
+}
+
static int rtl28xxu_init(struct dvb_usb_device *d)
{
int ret;
u8 val;
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+ dev_dbg(&d->intf->dev, "\n");
/* init USB endpoints */
- ret = rtl28xx_rd_reg(d, USB_SYSCTL_0, &val);
+ ret = rtl28xxu_rd_reg(d, USB_SYSCTL_0, &val);
if (ret)
goto err;
/* enable DMA and Full Packet Mode*/
val |= 0x09;
- ret = rtl28xx_wr_reg(d, USB_SYSCTL_0, val);
+ ret = rtl28xxu_wr_reg(d, USB_SYSCTL_0, val);
if (ret)
goto err;
/* set EPA maximum packet size to 0x0200 */
- ret = rtl28xx_wr_regs(d, USB_EPA_MAXPKT, "\x00\x02\x00\x00", 4);
+ ret = rtl28xxu_wr_regs(d, USB_EPA_MAXPKT, "\x00\x02\x00\x00", 4);
if (ret)
goto err;
/* change EPA FIFO length */
- ret = rtl28xx_wr_regs(d, USB_EPA_FIFO_CFG, "\x14\x00\x00\x00", 4);
+ ret = rtl28xxu_wr_regs(d, USB_EPA_FIFO_CFG, "\x14\x00\x00\x00", 4);
if (ret)
goto err;
return ret;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
-static void rtl28xxu_exit(struct dvb_usb_device *d)
-{
- struct rtl28xxu_priv *priv = d->priv;
- struct i2c_client *client;
-
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
-
- /* remove I2C tuner */
- client = priv->i2c_client_tuner;
- if (client) {
- module_put(client->dev.driver->owner);
- i2c_unregister_device(client);
- }
-
- /* remove I2C slave demod */
- client = priv->i2c_client_slave_demod;
- if (client) {
- module_put(client->dev.driver->owner);
- i2c_unregister_device(client);
- }
-
- return;
-}
-
static int rtl2831u_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
u8 gpio, sys0, epa_ctl[2];
- dev_dbg(&d->udev->dev, "%s: onoff=%d\n", __func__, onoff);
+ dev_dbg(&d->intf->dev, "onoff=%d\n", onoff);
/* demod adc */
- ret = rtl28xx_rd_reg(d, SYS_SYS0, &sys0);
+ ret = rtl28xxu_rd_reg(d, SYS_SYS0, &sys0);
if (ret)
goto err;
/* tuner power, read GPIOs */
- ret = rtl28xx_rd_reg(d, SYS_GPIO_OUT_VAL, &gpio);
+ ret = rtl28xxu_rd_reg(d, SYS_GPIO_OUT_VAL, &gpio);
if (ret)
goto err;
- dev_dbg(&d->udev->dev, "%s: RD SYS0=%02x GPIO_OUT_VAL=%02x\n", __func__,
- sys0, gpio);
+ dev_dbg(&d->intf->dev, "RD SYS0=%02x GPIO_OUT_VAL=%02x\n", sys0, gpio);
if (onoff) {
gpio |= 0x01; /* GPIO0 = 1 */
@@ -1229,21 +1296,20 @@ static int rtl2831u_power_ctrl(struct dvb_usb_device *d, int onoff)
epa_ctl[1] = 0x02; /* set reset */
}
- dev_dbg(&d->udev->dev, "%s: WR SYS0=%02x GPIO_OUT_VAL=%02x\n", __func__,
- sys0, gpio);
+ dev_dbg(&d->intf->dev, "WR SYS0=%02x GPIO_OUT_VAL=%02x\n", sys0, gpio);
/* demod adc */
- ret = rtl28xx_wr_reg(d, SYS_SYS0, sys0);
+ ret = rtl28xxu_wr_reg(d, SYS_SYS0, sys0);
if (ret)
goto err;
/* tuner power, write GPIOs */
- ret = rtl28xx_wr_reg(d, SYS_GPIO_OUT_VAL, gpio);
+ ret = rtl28xxu_wr_reg(d, SYS_GPIO_OUT_VAL, gpio);
if (ret)
goto err;
/* streaming EP: stall & reset */
- ret = rtl28xx_wr_regs(d, USB_EPA_CTL, epa_ctl, 2);
+ ret = rtl28xxu_wr_regs(d, USB_EPA_CTL, epa_ctl, 2);
if (ret)
goto err;
@@ -1252,7 +1318,7 @@ static int rtl2831u_power_ctrl(struct dvb_usb_device *d, int onoff)
return ret;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1260,31 +1326,31 @@ static int rtl2832u_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
- dev_dbg(&d->udev->dev, "%s: onoff=%d\n", __func__, onoff);
+ dev_dbg(&d->intf->dev, "onoff=%d\n", onoff);
if (onoff) {
/* GPIO3=1, GPIO4=0 */
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x08, 0x18);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x08, 0x18);
if (ret)
goto err;
/* suspend? */
- ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL1, 0x00, 0x10);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL1, 0x00, 0x10);
if (ret)
goto err;
/* enable PLL */
- ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x80, 0x80);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL, 0x80, 0x80);
if (ret)
goto err;
/* disable reset */
- ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x20, 0x20);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL, 0x20, 0x20);
if (ret)
goto err;
/* streaming EP: clear stall & reset */
- ret = rtl28xx_wr_regs(d, USB_EPA_CTL, "\x00\x00", 2);
+ ret = rtl28xxu_wr_regs(d, USB_EPA_CTL, "\x00\x00", 2);
if (ret)
goto err;
@@ -1293,35 +1359,49 @@ static int rtl2832u_power_ctrl(struct dvb_usb_device *d, int onoff)
goto err;
} else {
/* GPIO4=1 */
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x10, 0x10);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x10, 0x10);
if (ret)
goto err;
/* disable PLL */
- ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x00, 0x80);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL, 0x00, 0x80);
if (ret)
goto err;
/* streaming EP: set stall & reset */
- ret = rtl28xx_wr_regs(d, USB_EPA_CTL, "\x10\x02", 2);
+ ret = rtl28xxu_wr_regs(d, USB_EPA_CTL, "\x10\x02", 2);
if (ret)
goto err;
}
return ret;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
-static int rtl2832u_frontend_ctrl(struct dvb_frontend *fe, int onoff)
+static int rtl28xxu_power_ctrl(struct dvb_usb_device *d, int onoff)
+{
+ struct rtl28xxu_dev *dev = d_to_priv(d);
+
+ if (dev->chip_id == CHIP_ID_RTL2831U)
+ return rtl2831u_power_ctrl(d, onoff);
+ else
+ return rtl2832u_power_ctrl(d, onoff);
+}
+
+static int rtl28xxu_frontend_ctrl(struct dvb_frontend *fe, int onoff)
{
struct dvb_usb_device *d = fe_to_d(fe);
- struct dvb_usb_adapter *adap = fe_to_adap(fe);
+ struct rtl28xxu_dev *dev = fe_to_priv(fe);
+ struct rtl2832_platform_data *pdata = &dev->rtl2832_platform_data;
int ret;
u8 val;
- dev_dbg(&d->udev->dev, "%s: fe=%d onoff=%d\n", __func__, fe->id, onoff);
+ dev_dbg(&d->intf->dev, "fe=%d onoff=%d\n", fe->id, onoff);
+
+ if (dev->chip_id == CHIP_ID_RTL2831U)
+ return 0;
/* control internal demod ADC */
if (fe->id == 0 && onoff)
@@ -1329,20 +1409,20 @@ static int rtl2832u_frontend_ctrl(struct dvb_frontend *fe, int onoff)
else
val = 0x00; /* disable ADC */
- ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, val, 0x48);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL, val, 0x48);
if (ret)
goto err;
/* bypass slave demod TS through master demod */
if (fe->id == 1 && onoff) {
- ret = rtl2832_enable_external_ts_if(adap->fe[0]);
+ ret = pdata->enable_slave_ts(dev->i2c_client_demod);
if (ret)
goto err;
}
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1350,7 +1430,7 @@ err:
static int rtl2831u_rc_query(struct dvb_usb_device *d)
{
int ret, i;
- struct rtl28xxu_priv *priv = d->priv;
+ struct rtl28xxu_dev *dev = d->priv;
u8 buf[5];
u32 rc_code;
struct rtl28xxu_reg_val rc_nec_tab[] = {
@@ -1371,17 +1451,17 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d)
};
/* init remote controller */
- if (!priv->rc_active) {
+ if (!dev->rc_active) {
for (i = 0; i < ARRAY_SIZE(rc_nec_tab); i++) {
- ret = rtl28xx_wr_reg(d, rc_nec_tab[i].reg,
+ ret = rtl28xxu_wr_reg(d, rc_nec_tab[i].reg,
rc_nec_tab[i].val);
if (ret)
goto err;
}
- priv->rc_active = true;
+ dev->rc_active = true;
}
- ret = rtl2831_rd_regs(d, SYS_IRRC_RP, buf, 5);
+ ret = rtl28xxu_rd_regs(d, SYS_IRRC_RP, buf, 5);
if (ret)
goto err;
@@ -1403,19 +1483,19 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d)
rc_keydown(d->rc_dev, RC_TYPE_NEC, rc_code, 0);
- ret = rtl28xx_wr_reg(d, SYS_IRRC_SR, 1);
+ ret = rtl28xxu_wr_reg(d, SYS_IRRC_SR, 1);
if (ret)
goto err;
/* repeated intentionally to avoid extra keypress */
- ret = rtl28xx_wr_reg(d, SYS_IRRC_SR, 1);
+ ret = rtl28xxu_wr_reg(d, SYS_IRRC_SR, 1);
if (ret)
goto err;
}
return ret;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1433,7 +1513,7 @@ static int rtl2831u_get_rc_config(struct dvb_usb_device *d,
static int rtl2832u_rc_query(struct dvb_usb_device *d)
{
int ret, i, len;
- struct rtl28xxu_priv *priv = d->priv;
+ struct rtl28xxu_dev *dev = d->priv;
struct ir_raw_event ev;
u8 buf[128];
static const struct rtl28xxu_reg_val_mask refresh_tab[] = {
@@ -1443,7 +1523,7 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
};
/* init remote controller */
- if (!priv->rc_active) {
+ if (!dev->rc_active) {
static const struct rtl28xxu_reg_val_mask init_tab[] = {
{SYS_DEMOD_CTL1, 0x00, 0x04},
{SYS_DEMOD_CTL1, 0x00, 0x08},
@@ -1464,36 +1544,36 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
};
for (i = 0; i < ARRAY_SIZE(init_tab); i++) {
- ret = rtl28xx_wr_reg_mask(d, init_tab[i].reg,
+ ret = rtl28xxu_wr_reg_mask(d, init_tab[i].reg,
init_tab[i].val, init_tab[i].mask);
if (ret)
goto err;
}
- priv->rc_active = true;
+ dev->rc_active = true;
}
- ret = rtl28xx_rd_reg(d, IR_RX_IF, &buf[0]);
+ ret = rtl28xxu_rd_reg(d, IR_RX_IF, &buf[0]);
if (ret)
goto err;
if (buf[0] != 0x83)
goto exit;
- ret = rtl28xx_rd_reg(d, IR_RX_BC, &buf[0]);
+ ret = rtl28xxu_rd_reg(d, IR_RX_BC, &buf[0]);
if (ret)
goto err;
len = buf[0];
/* read raw code from hw */
- ret = rtl2831_rd_regs(d, IR_RX_BUF, buf, len);
+ ret = rtl28xxu_rd_regs(d, IR_RX_BUF, buf, len);
if (ret)
goto err;
/* let hw receive new code */
for (i = 0; i < ARRAY_SIZE(refresh_tab); i++) {
- ret = rtl28xx_wr_reg_mask(d, refresh_tab[i].reg,
+ ret = rtl28xxu_wr_reg_mask(d, refresh_tab[i].reg,
refresh_tab[i].val, refresh_tab[i].mask);
if (ret)
goto err;
@@ -1514,7 +1594,7 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
exit:
return ret;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&d->intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1523,7 +1603,7 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
{
/* disable IR interrupts in order to avoid SDR sample loss */
if (rtl28xxu_disable_rc)
- return rtl28xx_wr_reg(d, IR_RX_IE, 0x00);
+ return rtl28xxu_wr_reg(d, IR_RX_IE, 0x00);
/* load empty to enable rc */
if (!rc->map_name)
@@ -1535,52 +1615,80 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
return 0;
}
+
+static int rtl28xxu_get_rc_config(struct dvb_usb_device *d,
+ struct dvb_usb_rc *rc)
+{
+ struct rtl28xxu_dev *dev = d_to_priv(d);
+
+ if (dev->chip_id == CHIP_ID_RTL2831U)
+ return rtl2831u_get_rc_config(d, rc);
+ else
+ return rtl2832u_get_rc_config(d, rc);
+}
#else
-#define rtl2831u_get_rc_config NULL
-#define rtl2832u_get_rc_config NULL
+#define rtl28xxu_get_rc_config NULL
#endif
-static const struct dvb_usb_device_properties rtl2831u_props = {
- .driver_name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- .adapter_nr = adapter_nr,
- .size_of_priv = sizeof(struct rtl28xxu_priv),
+static int rtl28xxu_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ struct rtl28xxu_dev *dev = adap_to_priv(adap);
- .power_ctrl = rtl2831u_power_ctrl,
- .i2c_algo = &rtl28xxu_i2c_algo,
- .read_config = rtl2831u_read_config,
- .frontend_attach = rtl2831u_frontend_attach,
- .tuner_attach = rtl2831u_tuner_attach,
- .init = rtl28xxu_init,
- .get_rc_config = rtl2831u_get_rc_config,
+ if (dev->chip_id == CHIP_ID_RTL2831U) {
+ struct rtl2830_platform_data *pdata = &dev->rtl2830_platform_data;
- .num_adapters = 1,
- .adapter = {
- {
- .stream = DVB_USB_STREAM_BULK(0x81, 6, 8 * 512),
- },
- },
-};
+ return pdata->pid_filter_ctrl(adap->fe[0], onoff);
+ } else {
+ struct rtl2832_platform_data *pdata = &dev->rtl2832_platform_data;
-static const struct dvb_usb_device_properties rtl2832u_props = {
+ return pdata->pid_filter_ctrl(adap->fe[0], onoff);
+ }
+}
+
+static int rtl28xxu_pid_filter(struct dvb_usb_adapter *adap, int index,
+ u16 pid, int onoff)
+{
+ struct rtl28xxu_dev *dev = adap_to_priv(adap);
+
+ if (dev->chip_id == CHIP_ID_RTL2831U) {
+ struct rtl2830_platform_data *pdata = &dev->rtl2830_platform_data;
+
+ return pdata->pid_filter(adap->fe[0], index, pid, onoff);
+ } else {
+ struct rtl2832_platform_data *pdata = &dev->rtl2832_platform_data;
+
+ return pdata->pid_filter(adap->fe[0], index, pid, onoff);
+ }
+}
+
+static const struct dvb_usb_device_properties rtl28xxu_props = {
.driver_name = KBUILD_MODNAME,
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
- .size_of_priv = sizeof(struct rtl28xxu_priv),
+ .size_of_priv = sizeof(struct rtl28xxu_dev),
- .power_ctrl = rtl2832u_power_ctrl,
- .frontend_ctrl = rtl2832u_frontend_ctrl,
+ .identify_state = rtl28xxu_identify_state,
+ .power_ctrl = rtl28xxu_power_ctrl,
+ .frontend_ctrl = rtl28xxu_frontend_ctrl,
.i2c_algo = &rtl28xxu_i2c_algo,
- .read_config = rtl2832u_read_config,
- .frontend_attach = rtl2832u_frontend_attach,
- .tuner_attach = rtl2832u_tuner_attach,
+ .read_config = rtl28xxu_read_config,
+ .frontend_attach = rtl28xxu_frontend_attach,
+ .frontend_detach = rtl28xxu_frontend_detach,
+ .tuner_attach = rtl28xxu_tuner_attach,
+ .tuner_detach = rtl28xxu_tuner_detach,
.init = rtl28xxu_init,
- .exit = rtl28xxu_exit,
- .get_rc_config = rtl2832u_get_rc_config,
+ .get_rc_config = rtl28xxu_get_rc_config,
.num_adapters = 1,
.adapter = {
{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter_ctrl = rtl28xxu_pid_filter_ctrl,
+ .pid_filter = rtl28xxu_pid_filter,
+
.stream = DVB_USB_STREAM_BULK(0x81, 6, 8 * 512),
},
},
@@ -1589,69 +1697,69 @@ static const struct dvb_usb_device_properties rtl2832u_props = {
static const struct usb_device_id rtl28xxu_id_table[] = {
/* RTL2831U devices: */
{ DVB_USB_DEVICE(USB_VID_REALTEK, USB_PID_REALTEK_RTL2831U,
- &rtl2831u_props, "Realtek RTL2831U reference design", NULL) },
+ &rtl28xxu_props, "Realtek RTL2831U reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_FREECOM_DVBT,
- &rtl2831u_props, "Freecom USB2.0 DVB-T", NULL) },
+ &rtl28xxu_props, "Freecom USB2.0 DVB-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_FREECOM_DVBT_2,
- &rtl2831u_props, "Freecom USB2.0 DVB-T", NULL) },
+ &rtl28xxu_props, "Freecom USB2.0 DVB-T", NULL) },
/* RTL2832U devices: */
{ DVB_USB_DEVICE(USB_VID_REALTEK, 0x2832,
- &rtl2832u_props, "Realtek RTL2832U reference design", NULL) },
+ &rtl28xxu_props, "Realtek RTL2832U reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_REALTEK, 0x2838,
- &rtl2832u_props, "Realtek RTL2832U reference design", NULL) },
+ &rtl28xxu_props, "Realtek RTL2832U reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1,
- &rtl2832u_props, "TerraTec Cinergy T Stick Black", RC_MAP_TERRATEC_SLIM) },
+ &rtl28xxu_props, "TerraTec Cinergy T Stick Black", RC_MAP_TERRATEC_SLIM) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_DELOCK_USB2_DVBT,
- &rtl2832u_props, "G-Tek Electronics Group Lifeview LV5TDLX DVB-T", NULL) },
+ &rtl28xxu_props, "G-Tek Electronics Group Lifeview LV5TDLX DVB-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK,
- &rtl2832u_props, "TerraTec NOXON DAB Stick", NULL) },
+ &rtl28xxu_props, "TerraTec NOXON DAB Stick", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV2,
- &rtl2832u_props, "TerraTec NOXON DAB Stick (rev 2)", NULL) },
+ &rtl28xxu_props, "TerraTec NOXON DAB Stick (rev 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV3,
- &rtl2832u_props, "TerraTec NOXON DAB Stick (rev 3)", NULL) },
+ &rtl28xxu_props, "TerraTec NOXON DAB Stick (rev 3)", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_TREKSTOR_TERRES_2_0,
- &rtl2832u_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
+ &rtl28xxu_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1101,
- &rtl2832u_props, "Dexatek DK DVB-T Dongle", NULL) },
+ &rtl28xxu_props, "Dexatek DK DVB-T Dongle", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6680,
- &rtl2832u_props, "DigitalNow Quad DVB-T Receiver", NULL) },
+ &rtl28xxu_props, "DigitalNow Quad DVB-T Receiver", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_MINID,
- &rtl2832u_props, "Leadtek Winfast DTV Dongle Mini D", NULL) },
+ &rtl28xxu_props, "Leadtek Winfast DTV Dongle Mini D", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d3,
- &rtl2832u_props, "TerraTec Cinergy T Stick RC (Rev. 3)", NULL) },
+ &rtl28xxu_props, "TerraTec Cinergy T Stick RC (Rev. 3)", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1102,
- &rtl2832u_props, "Dexatek DK mini DVB-T Dongle", NULL) },
+ &rtl28xxu_props, "Dexatek DK mini DVB-T Dongle", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d7,
- &rtl2832u_props, "TerraTec Cinergy T Stick+", NULL) },
+ &rtl28xxu_props, "TerraTec Cinergy T Stick+", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd3a8,
- &rtl2832u_props, "ASUS My Cinema-U3100Mini Plus V2", NULL) },
+ &rtl28xxu_props, "ASUS My Cinema-U3100Mini Plus V2", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd393,
- &rtl2832u_props, "GIGABYTE U7300", NULL) },
+ &rtl28xxu_props, "GIGABYTE U7300", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1104,
- &rtl2832u_props, "MSI DIGIVOX Micro HD", NULL) },
+ &rtl28xxu_props, "MSI DIGIVOX Micro HD", NULL) },
{ DVB_USB_DEVICE(USB_VID_COMPRO, 0x0620,
- &rtl2832u_props, "Compro VideoMate U620F", NULL) },
+ &rtl28xxu_props, "Compro VideoMate U620F", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd394,
- &rtl2832u_props, "MaxMedia HU394-T", NULL) },
+ &rtl28xxu_props, "MaxMedia HU394-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a03,
- &rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) },
+ &rtl28xxu_props, "Leadtek WinFast DTV Dongle mini", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A,
- &rtl2832u_props, "Crypto ReDi PC 50 A", NULL) },
+ &rtl28xxu_props, "Crypto ReDi PC 50 A", NULL) },
{ DVB_USB_DEVICE(USB_VID_KYE, 0x707f,
- &rtl2832u_props, "Genius TVGo DVB-T03", NULL) },
+ &rtl28xxu_props, "Genius TVGo DVB-T03", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd395,
- &rtl2832u_props, "Peak DVB-T USB", NULL) },
+ &rtl28xxu_props, "Peak DVB-T USB", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20_RTL2832U,
- &rtl2832u_props, "Sveon STV20", NULL) },
+ &rtl28xxu_props, "Sveon STV20", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV21,
- &rtl2832u_props, "Sveon STV21", NULL) },
+ &rtl28xxu_props, "Sveon STV21", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV27,
- &rtl2832u_props, "Sveon STV27", NULL) },
+ &rtl28xxu_props, "Sveon STV27", NULL) },
/* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
- &rtl2832u_props, "Astrometa DVB-T2", NULL) },
+ &rtl28xxu_props, "Astrometa DVB-T2", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index 3e3ea9d64a38..1b5d7ffb685e 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -22,8 +22,26 @@
#ifndef RTL28XXU_H
#define RTL28XXU_H
+#include <linux/platform_device.h>
+
#include "dvb_usb.h"
+#include "rtl2830.h"
+#include "rtl2832.h"
+#include "rtl2832_sdr.h"
+#include "mn88472.h"
+#include "mn88473.h"
+
+#include "qt1010.h"
+#include "mt2060.h"
+#include "mxl5005s.h"
+#include "fc0012.h"
+#include "fc0013.h"
+#include "e4000.h"
+#include "fc2580.h"
+#include "tua9001.h"
+#include "r820t.h"
+
/*
* USB commands
* (usb_control_msg() index parameter)
@@ -50,19 +68,26 @@
#define CMD_I2C_DA_WR 0x0610
-struct rtl28xxu_priv {
+struct rtl28xxu_dev {
+ u8 buf[28];
u8 chip_id;
u8 tuner;
char *tuner_name;
u8 page; /* integrated demod active register page */
struct i2c_adapter *demod_i2c_adapter;
bool rc_active;
+ struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
struct i2c_client *i2c_client_slave_demod;
+ struct platform_device *platform_device_sdr;
#define SLAVE_DEMOD_NONE 0
#define SLAVE_DEMOD_MN88472 1
#define SLAVE_DEMOD_MN88473 2
unsigned int slave_demod:2;
+ union {
+ struct rtl2830_platform_data rtl2830_platform_data;
+ struct rtl2832_platform_data rtl2832_platform_data;
+ };
};
enum rtl28xxu_chip_id {
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 0f345b1f9014..f327c49d7e09 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
{
"Mygica T230 DVB-T/T2/C",
{ NULL },
- { &cxusb_table[22], NULL },
+ { &cxusb_table[20], NULL },
},
}
};
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index abf8ab2e02e5..eafc5c82467f 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -1269,8 +1269,3 @@ MODULE_AUTHOR("Aapo Tahkola <aet@rasterburn.org>");
MODULE_DESCRIPTION("DVB Driver for ULI M920x");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- */
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 44ae1e0661e6..49a5f9532bd8 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -820,7 +820,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
if (urb_size > ep_size * npackets)
npackets = DIV_ROUND_UP(urb_size, ep_size);
- em28xx_info("Number of URBs: %d, with %d packets and %d size",
+ em28xx_info("Number of URBs: %d, with %d packets and %d size\n",
num_urb, npackets, urb_size);
/* Estimate the bytes per period */
@@ -981,7 +981,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
return 0;
}
- em28xx_info("Closing audio extension");
+ em28xx_info("Closing audio extension\n");
if (dev->adev.sndcard) {
snd_card_disconnect(dev->adev.sndcard);
@@ -1005,7 +1005,7 @@ static int em28xx_audio_suspend(struct em28xx *dev)
if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
return 0;
- em28xx_info("Suspending audio extension");
+ em28xx_info("Suspending audio extension\n");
em28xx_deinit_isoc_audio(dev);
atomic_set(&dev->adev.stream_started, 0);
return 0;
@@ -1019,7 +1019,7 @@ static int em28xx_audio_resume(struct em28xx *dev)
if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
return 0;
- em28xx_info("Resuming audio extension");
+ em28xx_info("Resuming audio extension\n");
/* Nothing to do other than schedule_work() ?? */
schedule_work(&dev->adev.wq_trigger);
return 0;
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 86461a708abe..37456079f490 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -1125,7 +1125,7 @@ int em28xx_suspend_extension(struct em28xx *dev)
{
const struct em28xx_ops *ops = NULL;
- em28xx_info("Suspending extensions");
+ em28xx_info("Suspending extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->suspend)
@@ -1139,7 +1139,7 @@ int em28xx_resume_extension(struct em28xx *dev)
{
const struct em28xx_ops *ops = NULL;
- em28xx_info("Resuming extensions");
+ em28xx_info("Resuming extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->resume)
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 9877b699c6bc..aee70d483264 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -1724,7 +1724,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
if (!dev->dvb)
return 0;
- em28xx_info("Closing DVB extension");
+ em28xx_info("Closing DVB extension\n");
dvb = dev->dvb;
client = dvb->i2c_client_tuner;
@@ -1775,17 +1775,17 @@ static int em28xx_dvb_suspend(struct em28xx *dev)
if (!dev->board.has_dvb)
return 0;
- em28xx_info("Suspending DVB extension");
+ em28xx_info("Suspending DVB extension\n");
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
if (dvb->fe[0]) {
ret = dvb_frontend_suspend(dvb->fe[0]);
- em28xx_info("fe0 suspend %d", ret);
+ em28xx_info("fe0 suspend %d\n", ret);
}
if (dvb->fe[1]) {
dvb_frontend_suspend(dvb->fe[1]);
- em28xx_info("fe1 suspend %d", ret);
+ em28xx_info("fe1 suspend %d\n", ret);
}
}
@@ -1802,18 +1802,18 @@ static int em28xx_dvb_resume(struct em28xx *dev)
if (!dev->board.has_dvb)
return 0;
- em28xx_info("Resuming DVB extension");
+ em28xx_info("Resuming DVB extension\n");
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
if (dvb->fe[0]) {
ret = dvb_frontend_resume(dvb->fe[0]);
- em28xx_info("fe0 resume %d", ret);
+ em28xx_info("fe0 resume %d\n", ret);
}
if (dvb->fe[1]) {
ret = dvb_frontend_resume(dvb->fe[1]);
- em28xx_info("fe1 resume %d", ret);
+ em28xx_info("fe1 resume %d\n", ret);
}
}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index d8dc03aadfbd..4007356d991d 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -654,8 +654,6 @@ next_button:
if (dev->num_button_polling_addresses) {
memset(dev->button_polling_last_values, 0,
EM28XX_NUM_BUTTON_ADDRESSES_MAX);
- INIT_DELAYED_WORK(&dev->buttons_query_work,
- em28xx_query_buttons);
schedule_delayed_work(&dev->buttons_query_work,
msecs_to_jiffies(dev->button_polling_interval));
}
@@ -689,6 +687,7 @@ static int em28xx_ir_init(struct em28xx *dev)
}
kref_get(&dev->ref);
+ INIT_DELAYED_WORK(&dev->buttons_query_work, em28xx_query_buttons);
if (dev->board.buttons)
em28xx_init_buttons(dev);
@@ -833,7 +832,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
return 0;
}
- em28xx_info("Closing input extension");
+ em28xx_info("Closing input extension\n");
em28xx_shutdown_buttons(dev);
@@ -862,7 +861,7 @@ static int em28xx_ir_suspend(struct em28xx *dev)
if (dev->is_audio_only)
return 0;
- em28xx_info("Suspending input extension");
+ em28xx_info("Suspending input extension\n");
if (ir)
cancel_delayed_work_sync(&ir->work);
cancel_delayed_work_sync(&dev->buttons_query_work);
@@ -879,7 +878,7 @@ static int em28xx_ir_resume(struct em28xx *dev)
if (dev->is_audio_only)
return 0;
- em28xx_info("Resuming input extension");
+ em28xx_info("Resuming input extension\n");
/* if suspend calls ir_raw_event_unregister(), the should call
ir_raw_event_register() */
if (ir)
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index cf7f58b76292..9ecf65629b3d 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1958,7 +1958,7 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
if (v4l2 == NULL)
return 0;
- em28xx_info("Closing video extension");
+ em28xx_info("Closing video extension\n");
mutex_lock(&dev->lock);
@@ -2007,7 +2007,7 @@ static int em28xx_v4l2_suspend(struct em28xx *dev)
if (!dev->has_video)
return 0;
- em28xx_info("Suspending video extension");
+ em28xx_info("Suspending video extension\n");
em28xx_stop_urbs(dev);
return 0;
}
@@ -2020,7 +2020,7 @@ static int em28xx_v4l2_resume(struct em28xx *dev)
if (!dev->has_video)
return 0;
- em28xx_info("Resuming video extension");
+ em28xx_info("Resuming video extension\n");
/* what do we do here */
return 0;
}
@@ -2192,7 +2192,6 @@ static struct video_device
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2->v4l2_dev;
- vfd->debug = video_debug;
vfd->lock = &dev->lock;
if (dev->board.is_webcam)
vfd->tvnorms = 0;
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index eed10d782535..60af3b167f3b 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -395,6 +395,16 @@ config USB_GSPCA_TOPRO
To compile this driver as a module, choose M here: the
module will be called gspca_topro.
+config USB_GSPCA_TOUPTEK
+ tristate "Touptek USB Camera Driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based on the ToupTek UCMOS
+ / AmScope MU series camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_touptek.
+
config USB_GSPCA_TV8532
tristate "TV8532 USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/usb/gspca/Makefile b/drivers/media/usb/gspca/Makefile
index f46975e4c82d..9f5ccecb9c8a 100644
--- a/drivers/media/usb/gspca/Makefile
+++ b/drivers/media/usb/gspca/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_USB_GSPCA_STK1135) += gspca_stk1135.o
obj-$(CONFIG_USB_GSPCA_STV0680) += gspca_stv0680.o
obj-$(CONFIG_USB_GSPCA_T613) += gspca_t613.o
obj-$(CONFIG_USB_GSPCA_TOPRO) += gspca_topro.o
+obj-$(CONFIG_USB_GSPCA_TOUPTEK) += gspca_touptek.o
obj-$(CONFIG_USB_GSPCA_TV8532) += gspca_tv8532.o
obj-$(CONFIG_USB_GSPCA_VC032X) += gspca_vc032x.o
obj-$(CONFIG_USB_GSPCA_VICAM) += gspca_vicam.o
@@ -86,6 +87,7 @@ gspca_stv0680-objs := stv0680.o
gspca_sunplus-objs := sunplus.o
gspca_t613-objs := t613.o
gspca_topro-objs := topro.o
+gspca_touptek-objs := touptek.o
gspca_tv8532-objs := tv8532.o
gspca_vc032x-objs := vc032x.o
gspca_vicam-objs := vicam.o
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 43d65057a5fe..e54cee856a80 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1562,7 +1562,7 @@ static int vidioc_s_parm(struct file *filp, void *priv,
struct v4l2_streamparm *parm)
{
struct gspca_dev *gspca_dev = video_drvdata(filp);
- int n;
+ unsigned int n;
n = parm->parm.capture.readbuffers;
if (n == 0 || n >= GSPCA_MAX_FRAMES)
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 90f0d637cd9d..a9c866d6d82d 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -12,6 +12,8 @@
* PS3 Eye camera enhanced by Richard Kaswy http://kaswy.free.fr
* PS3 Eye camera - brightness, contrast, awb, agc, aec controls
* added by Max Thrun <bear24rw@gmail.com>
+ * PS3 Eye camera - FPS range extended by Joseph Howse
+ * <josephhowse@nummist.com> http://nummist.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -116,7 +118,7 @@ static const struct v4l2_pix_format ov767x_mode[] = {
.colorspace = V4L2_COLORSPACE_JPEG},
};
-static const u8 qvga_rates[] = {125, 100, 75, 60, 50, 40, 30};
+static const u8 qvga_rates[] = {187, 150, 137, 125, 100, 75, 60, 50, 37, 30};
static const u8 vga_rates[] = {60, 50, 40, 30, 15};
static const struct framerates ov772x_framerates[] = {
@@ -769,12 +771,16 @@ static void set_frame_rate(struct gspca_dev *gspca_dev)
{15, 0x03, 0x41, 0x04},
};
static const struct rate_s rate_1[] = { /* 320x240 */
+/* {205, 0x01, 0xc1, 0x02}, * 205 FPS: video is partly corrupt */
+ {187, 0x01, 0x81, 0x02}, /* 187 FPS or below: video is valid */
+ {150, 0x01, 0xc1, 0x04},
+ {137, 0x02, 0xc1, 0x02},
{125, 0x02, 0x81, 0x02},
{100, 0x02, 0xc1, 0x04},
{75, 0x03, 0xc1, 0x04},
{60, 0x04, 0xc1, 0x04},
{50, 0x02, 0x41, 0x04},
- {40, 0x03, 0x41, 0x04},
+ {37, 0x03, 0x41, 0x04},
{30, 0x04, 0x41, 0x04},
};
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 49d209bbf9ee..6ac93d8db427 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -505,13 +505,13 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
{
int ret = -EINVAL;
- if (len == 1 && data[0] == 0x80) {
+ if (len == 1 && (data[0] == 0x80 || data[0] == 0x10)) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
input_sync(gspca_dev->input_dev);
ret = 0;
}
- if (len == 1 && data[0] == 0x88) {
+ if (len == 1 && (data[0] == 0x88 || data[0] == 0x11)) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
input_sync(gspca_dev->input_dev);
ret = 0;
diff --git a/drivers/media/usb/gspca/touptek.c b/drivers/media/usb/gspca/touptek.c
new file mode 100644
index 000000000000..7bac6bc96063
--- /dev/null
+++ b/drivers/media/usb/gspca/touptek.c
@@ -0,0 +1,731 @@
+/*
+ * ToupTek UCMOS / AmScope MU series camera driver
+ * TODO: contrast with ScopeTek / AmScope MDC cameras
+ *
+ * Copyright (C) 2012-2014 John McMaster <JohnDMcMaster@gmail.com>
+ *
+ * Special thanks to Bushing for helping with the decrypt algorithm and
+ * Sean O'Sullivan / the Rensselaer Center for Open Source
+ * Software (RCOS) for helping me learn kernel development
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "gspca.h"
+
+#define MODULE_NAME "touptek"
+
+MODULE_AUTHOR("John McMaster");
+MODULE_DESCRIPTION("ToupTek UCMOS / Amscope MU microscope camera driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Exposure reg is linear with exposure time
+ * Exposure (sec), E (reg)
+ * 0.000400, 0x0002
+ * 0.001000, 0x0005
+ * 0.005000, 0x0019
+ * 0.020000, 0x0064
+ * 0.080000, 0x0190
+ * 0.400000, 0x07D0
+ * 1.000000, 0x1388
+ * 2.000000, 0x2710
+ *
+ * Three gain stages
+ * 0x1000: master channel enable bit
+ * 0x007F: low gain bits
+ * 0x0080: medium gain bit
+ * 0x0100: high gain bit
+ * gain = enable * (1 + regH) * (1 + regM) * z * regL
+ *
+ * Gain implementation
+ * Want to do something similar to mt9v011.c's set_balance
+ *
+ * Gain does not vary with resolution (checked 640x480 vs 1600x1200)
+ *
+ * Constant derivation:
+ *
+ * Raw data:
+ * Gain, GTOP, B, R, GBOT
+ * 1.00, 0x105C, 0x1068, 0x10C8, 0x105C
+ * 1.20, 0x106E, 0x107E, 0x10D6, 0x106E
+ * 1.40, 0x10C0, 0x10CA, 0x10E5, 0x10C0
+ * 1.60, 0x10C9, 0x10D4, 0x10F3, 0x10C9
+ * 1.80, 0x10D2, 0x10DE, 0x11C1, 0x10D2
+ * 2.00, 0x10DC, 0x10E9, 0x11C8, 0x10DC
+ * 2.20, 0x10E5, 0x10F3, 0x11CF, 0x10E5
+ * 2.40, 0x10EE, 0x10FE, 0x11D7, 0x10EE
+ * 2.60, 0x10F7, 0x11C4, 0x11DE, 0x10F7
+ * 2.80, 0x11C0, 0x11CA, 0x11E5, 0x11C0
+ * 3.00, 0x11C5, 0x11CF, 0x11ED, 0x11C5
+ *
+ * zR = 0.0069605943152454778
+ * about 3/431 = 0.0069605568445475635
+ * zB = 0.0095695970695970703
+ * about 6/627 = 0.0095693779904306216
+ * zG = 0.010889328063241107
+ * about 6/551 = 0.010889292196007259
+ * about 10 bits for constant + 7 bits for value => at least 17 bit
+ * intermediate with 32 bit ints should be fine for overflow etc
+ * Essentially gains are in range 0-0x001FF
+ *
+ * However, V4L expects a main gain channel + R and B balance
+ * To keep things simple for now saturate the values of balance is too high/low
+ * This isn't really ideal but easy way to fit the Linux model
+ *
+ * Converted using gain model turns out to be quite linear:
+ * Gain, GTOP, B, R, GBOT
+ * 1.00, 92, 104, 144, 92
+ * 1.20, 110, 126, 172, 110
+ * 1.40, 128, 148, 202, 128
+ * 1.60, 146, 168, 230, 146
+ * 1.80, 164, 188, 260, 164
+ * 2.00, 184, 210, 288, 184
+ * 2.20, 202, 230, 316, 202
+ * 2.40, 220, 252, 348, 220
+ * 2.60, 238, 272, 376, 238
+ * 2.80, 256, 296, 404, 256
+ * 3.00, 276, 316, 436, 276
+ *
+ * Maximum gain is 0x7FF * 2 * 2 => 0x1FFC (8188)
+ * or about 13 effective bits of gain
+ * The highest the commercial driver goes in my setup 436
+ * However, because could *maybe* damage circuits
+ * limit the gain until have a reason to go higher
+ * Solution: gain clipped and warning emitted
+ */
+#define GAIN_MAX 511
+
+/* Frame sync is a short read */
+#define BULK_SIZE 0x4000
+
+/* MT9E001 reg names to give a rough approximation */
+#define REG_COARSE_INTEGRATION_TIME_ 0x3012
+#define REG_GROUPED_PARAMETER_HOLD_ 0x3022
+#define REG_MODE_SELECT 0x0100
+#define REG_OP_SYS_CLK_DIV 0x030A
+#define REG_VT_SYS_CLK_DIV 0x0302
+#define REG_PRE_PLL_CLK_DIV 0x0304
+#define REG_VT_PIX_CLK_DIV 0x0300
+#define REG_OP_PIX_CLK_DIV 0x0308
+#define REG_PLL_MULTIPLIER 0x0306
+#define REG_COARSE_INTEGRATION_TIME_ 0x3012
+#define REG_FRAME_LENGTH_LINES 0x0340
+#define REG_FRAME_LENGTH_LINES_ 0x300A
+#define REG_GREEN1_GAIN 0x3056
+#define REG_GREEN2_GAIN 0x305C
+#define REG_GROUPED_PARAMETER_HOLD 0x0104
+#define REG_LINE_LENGTH_PCK_ 0x300C
+#define REG_MODE_SELECT 0x0100
+#define REG_PLL_MULTIPLIER 0x0306
+#define REG_READ_MODE 0x3040
+#define REG_BLUE_GAIN 0x3058
+#define REG_RED_GAIN 0x305A
+#define REG_RESET_REGISTER 0x301A
+#define REG_SCALE_M 0x0404
+#define REG_SCALING_MODE 0x0400
+#define REG_SOFTWARE_RESET 0x0103
+#define REG_X_ADDR_END 0x0348
+#define REG_X_ADDR_START 0x0344
+#define REG_X_ADDR_START 0x0344
+#define REG_X_OUTPUT_SIZE 0x034C
+#define REG_Y_ADDR_END 0x034A
+#define REG_Y_ADDR_START 0x0346
+#define REG_Y_OUTPUT_SIZE 0x034E
+
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+ /* How many bytes this frame */
+ unsigned int this_f;
+
+ /*
+ Device has separate gains for each Bayer quadrant
+ V4L supports master gain which is referenced to G1/G2 and supplies
+ individual balance controls for R/B
+ */
+ struct v4l2_ctrl *blue;
+ struct v4l2_ctrl *red;
+};
+
+/* Used to simplify reg write error handling */
+struct cmd {
+ u16 value;
+ u16 index;
+};
+
+static const struct v4l2_pix_format vga_mode[] = {
+ {800, 600,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .bytesperline = 800,
+ .sizeimage = 800 * 600,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {1600, 1200,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .bytesperline = 1600,
+ .sizeimage = 1600 * 1200,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {3264, 2448,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .bytesperline = 3264,
+ .sizeimage = 3264 * 2448,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+/*
+ * As theres no known frame sync, the only way to keep synced is to try hard
+ * to never miss any packets
+ */
+#if MAX_NURBS < 4
+#error "Not enough URBs in the gspca table"
+#endif
+
+static int val_reply(struct gspca_dev *gspca_dev, const char *reply, int rc)
+{
+ if (rc < 0) {
+ PERR("reply has error %d", rc);
+ return -EIO;
+ }
+ if (rc != 1) {
+ PERR("Bad reply size %d", rc);
+ return -EIO;
+ }
+ if (reply[0] != 0x08) {
+ PERR("Bad reply 0x%02X", reply[0]);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
+{
+ char buff[1];
+ int rc;
+
+ PDEBUG(D_USBO,
+ "reg_w bReq=0x0B, bReqT=0xC0, wVal=0x%04X, wInd=0x%04X\n",
+ value, index);
+ rc = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0),
+ 0x0B, 0xC0, value, index, buff, 1, 500);
+ PDEBUG(D_USBO, "rc=%d, ret={0x%02X}", rc, buff[0]);
+ if (rc < 0) {
+ PERR("Failed reg_w(0x0B, 0xC0, 0x%04X, 0x%04X) w/ rc %d\n",
+ value, index, rc);
+ gspca_dev->usb_err = rc;
+ return;
+ }
+ if (val_reply(gspca_dev, buff, rc)) {
+ PERR("Bad reply to reg_w(0x0B, 0xC0, 0x%04X, 0x%04X\n",
+ value, index);
+ gspca_dev->usb_err = -EIO;
+ }
+}
+
+static void reg_w_buf(struct gspca_dev *gspca_dev,
+ const struct cmd *p, int l)
+{
+ do {
+ reg_w(gspca_dev, p->value, p->index);
+ p++;
+ } while (--l > 0);
+}
+
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
+{
+ u16 value;
+ unsigned int w = gspca_dev->pixfmt.width;
+
+ if (w == 800)
+ value = val * 5;
+ else if (w == 1600)
+ value = val * 3;
+ else if (w == 3264)
+ value = val * 3 / 2;
+ else {
+ PERR("Invalid width %u\n", w);
+ gspca_dev->usb_err = -EINVAL;
+ return;
+ }
+ PDEBUG(D_STREAM, "exposure: 0x%04X ms\n", value);
+ /* Wonder if theres a good reason for sending it twice */
+ /* probably not but leave it in because...why not */
+ reg_w(gspca_dev, value, REG_COARSE_INTEGRATION_TIME_);
+ reg_w(gspca_dev, value, REG_COARSE_INTEGRATION_TIME_);
+}
+
+static int gainify(int in)
+{
+ /*
+ * TODO: check if there are any issues with corner cases
+ * 0x000 (0):0x07F (127): regL
+ * 0x080 (128) - 0x0FF (255): regM, regL
+ * 0x100 (256) - max: regH, regM, regL
+ */
+ if (in <= 0x7F)
+ return 0x1000 | in;
+ else if (in <= 0xFF)
+ return 0x1080 | in / 2;
+ else
+ return 0x1180 | in / 4;
+}
+
+static void setggain(struct gspca_dev *gspca_dev, u16 global_gain)
+{
+ u16 normalized;
+
+ normalized = gainify(global_gain);
+ PDEBUG(D_STREAM, "gain G1/G2 (0x%04X): 0x%04X (src 0x%04X)\n",
+ REG_GREEN1_GAIN,
+ normalized, global_gain);
+
+ reg_w(gspca_dev, normalized, REG_GREEN1_GAIN);
+ reg_w(gspca_dev, normalized, REG_GREEN2_GAIN);
+}
+
+static void setbgain(struct gspca_dev *gspca_dev,
+ u16 gain, u16 global_gain)
+{
+ u16 normalized;
+
+ normalized = global_gain +
+ ((u32)global_gain) * gain / GAIN_MAX;
+ if (normalized > GAIN_MAX) {
+ PDEBUG(D_STREAM, "Truncating blue 0x%04X w/ value 0x%04X\n",
+ GAIN_MAX, normalized);
+ normalized = GAIN_MAX;
+ }
+ normalized = gainify(normalized);
+ PDEBUG(D_STREAM, "gain B (0x%04X): 0x%04X w/ source 0x%04X\n",
+ REG_BLUE_GAIN, normalized, gain);
+
+ reg_w(gspca_dev, normalized, REG_BLUE_GAIN);
+}
+
+static void setrgain(struct gspca_dev *gspca_dev,
+ u16 gain, u16 global_gain)
+{
+ u16 normalized;
+
+ normalized = global_gain +
+ ((u32)global_gain) * gain / GAIN_MAX;
+ if (normalized > GAIN_MAX) {
+ PDEBUG(D_STREAM, "Truncating gain 0x%04X w/ value 0x%04X\n",
+ GAIN_MAX, normalized);
+ normalized = GAIN_MAX;
+ }
+ normalized = gainify(normalized);
+ PDEBUG(D_STREAM, "gain R (0x%04X): 0x%04X w / source 0x%04X\n",
+ REG_RED_GAIN, normalized, gain);
+
+ reg_w(gspca_dev, normalized, REG_RED_GAIN);
+}
+
+static void configure_wh(struct gspca_dev *gspca_dev)
+{
+ unsigned int w = gspca_dev->pixfmt.width;
+
+ PDEBUG(D_STREAM, "configure_wh\n");
+
+ if (w == 800) {
+ static const struct cmd reg_init_res[] = {
+ {0x0060, REG_X_ADDR_START},
+ {0x0CD9, REG_X_ADDR_END},
+ {0x0036, REG_Y_ADDR_START},
+ {0x098F, REG_Y_ADDR_END},
+ {0x07C7, REG_READ_MODE},
+ };
+
+ reg_w_buf(gspca_dev,
+ reg_init_res, ARRAY_SIZE(reg_init_res));
+ } else if (w == 1600) {
+ static const struct cmd reg_init_res[] = {
+ {0x009C, REG_X_ADDR_START},
+ {0x0D19, REG_X_ADDR_END},
+ {0x0068, REG_Y_ADDR_START},
+ {0x09C5, REG_Y_ADDR_END},
+ {0x06C3, REG_READ_MODE},
+ };
+
+ reg_w_buf(gspca_dev,
+ reg_init_res, ARRAY_SIZE(reg_init_res));
+ } else if (w == 3264) {
+ static const struct cmd reg_init_res[] = {
+ {0x00E8, REG_X_ADDR_START},
+ {0x0DA7, REG_X_ADDR_END},
+ {0x009E, REG_Y_ADDR_START},
+ {0x0A2D, REG_Y_ADDR_END},
+ {0x0241, REG_READ_MODE},
+ };
+
+ reg_w_buf(gspca_dev,
+ reg_init_res, ARRAY_SIZE(reg_init_res));
+ } else {
+ PERR("bad width %u\n", w);
+ gspca_dev->usb_err = -EINVAL;
+ return;
+ }
+
+ reg_w(gspca_dev, 0x0000, REG_SCALING_MODE);
+ reg_w(gspca_dev, 0x0010, REG_SCALE_M);
+ reg_w(gspca_dev, w, REG_X_OUTPUT_SIZE);
+ reg_w(gspca_dev, gspca_dev->pixfmt.height, REG_Y_OUTPUT_SIZE);
+
+ if (w == 800) {
+ reg_w(gspca_dev, 0x0384, REG_FRAME_LENGTH_LINES_);
+ reg_w(gspca_dev, 0x0960, REG_LINE_LENGTH_PCK_);
+ } else if (w == 1600) {
+ reg_w(gspca_dev, 0x0640, REG_FRAME_LENGTH_LINES_);
+ reg_w(gspca_dev, 0x0FA0, REG_LINE_LENGTH_PCK_);
+ } else if (w == 3264) {
+ reg_w(gspca_dev, 0x0B4B, REG_FRAME_LENGTH_LINES_);
+ reg_w(gspca_dev, 0x1F40, REG_LINE_LENGTH_PCK_);
+ } else {
+ PERR("bad width %u\n", w);
+ gspca_dev->usb_err = -EINVAL;
+ return;
+ }
+}
+
+/* Packets that were encrypted, no idea if the grouping is significant */
+static void configure_encrypted(struct gspca_dev *gspca_dev)
+{
+ static const struct cmd reg_init_begin[] = {
+ {0x0100, REG_SOFTWARE_RESET},
+ {0x0000, REG_MODE_SELECT},
+ {0x0100, REG_GROUPED_PARAMETER_HOLD},
+ {0x0004, REG_VT_PIX_CLK_DIV},
+ {0x0001, REG_VT_SYS_CLK_DIV},
+ {0x0008, REG_OP_PIX_CLK_DIV},
+ {0x0001, REG_OP_SYS_CLK_DIV},
+ {0x0004, REG_PRE_PLL_CLK_DIV},
+ {0x0040, REG_PLL_MULTIPLIER},
+ {0x0000, REG_GROUPED_PARAMETER_HOLD},
+ {0x0100, REG_GROUPED_PARAMETER_HOLD},
+ };
+ static const struct cmd reg_init_end[] = {
+ {0x0000, REG_GROUPED_PARAMETER_HOLD},
+ {0x0301, 0x31AE},
+ {0x0805, 0x3064},
+ {0x0071, 0x3170},
+ {0x10DE, REG_RESET_REGISTER},
+ {0x0000, REG_MODE_SELECT},
+ {0x0010, REG_PLL_MULTIPLIER},
+ {0x0100, REG_MODE_SELECT},
+ };
+
+ PDEBUG(D_STREAM, "Encrypted begin, w = %u\n", gspca_dev->pixfmt.width);
+ reg_w_buf(gspca_dev, reg_init_begin, ARRAY_SIZE(reg_init_begin));
+ configure_wh(gspca_dev);
+ reg_w_buf(gspca_dev, reg_init_end, ARRAY_SIZE(reg_init_end));
+ reg_w(gspca_dev, 0x0100, REG_GROUPED_PARAMETER_HOLD);
+ reg_w(gspca_dev, 0x0000, REG_GROUPED_PARAMETER_HOLD);
+
+ PDEBUG(D_STREAM, "Encrypted end\n");
+}
+
+static int configure(struct gspca_dev *gspca_dev)
+{
+ int rc;
+ uint8_t buff[4];
+
+ PDEBUG(D_STREAM, "configure()\n");
+
+ /*
+ * First driver sets a sort of encryption key
+ * A number of futur requests of this type have wValue and wIndex
+ * encrypted as follows:
+ * -Compute key = this wValue rotate left by 4 bits
+ * (decrypt.py rotates right because we are decrypting)
+ * -Later packets encrypt packets by XOR'ing with key
+ * XOR encrypt/decrypt is symmetrical
+ * wValue, and wIndex are encrypted
+ * bRequest is not and bRequestType is always 0xC0
+ * This allows resyncing if key is unknown?
+ * By setting 0 we XOR with 0 and the shifting and XOR drops out
+ */
+ rc = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0),
+ 0x16, 0xC0, 0x0000, 0x0000, buff, 2, 500);
+ if (val_reply(gspca_dev, buff, rc)) {
+ PERR("failed key req");
+ return -EIO;
+ }
+
+ /*
+ * Next does some sort of 2 packet challenge / response
+ * evidence suggests its an Atmel I2C crypto part but nobody cares to
+ * look
+ * (to make sure its not cloned hardware?)
+ * Ignore: I want to work with their hardware, not clone it
+ * 16 bytes out challenge, requestType: 0x40
+ * 16 bytes in response, requestType: 0xC0
+ */
+
+ rc = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0),
+ 0x01, 0x40, 0x0001, 0x000F, NULL, 0, 500);
+ if (rc < 0) {
+ PERR("failed to replay packet 176 w/ rc %d\n", rc);
+ return rc;
+ }
+
+ rc = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0),
+ 0x01, 0x40, 0x0000, 0x000F, NULL, 0, 500);
+ if (rc < 0) {
+ PERR("failed to replay packet 178 w/ rc %d\n", rc);
+ return rc;
+ }
+
+ rc = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0),
+ 0x01, 0x40, 0x0001, 0x000F, NULL, 0, 500);
+ if (rc < 0) {
+ PERR("failed to replay packet 180 w/ rc %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Serial number? Doesn't seem to be required
+ * cam1: \xE6\x0D\x00\x00, cam2: \x70\x19\x00\x00
+ * rc = usb_control_msg(gspca_dev->dev,
+ * usb_rcvctrlpipe(gspca_dev->dev, 0),
+ * 0x20, 0xC0, 0x0000, 0x0000, buff, 4, 500);
+ */
+
+ /* Large (EEPROM?) read, skip it since no idea what to do with it */
+ gspca_dev->usb_err = 0;
+ configure_encrypted(gspca_dev);
+ if (gspca_dev->usb_err)
+ return gspca_dev->usb_err;
+
+ /* Omitted this by accident, does not work without it */
+ rc = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0),
+ 0x01, 0x40, 0x0003, 0x000F, NULL, 0, 500);
+ if (rc < 0) {
+ PERR("failed to replay final packet w/ rc %d\n", rc);
+ return rc;
+ }
+
+ PDEBUG(D_STREAM, "Configure complete\n");
+ return 0;
+}
+
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ gspca_dev->cam.cam_mode = vga_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
+
+ /* Yes we want URBs and we want them now! */
+ gspca_dev->cam.no_urb_create = 0;
+ gspca_dev->cam.bulk_nurbs = 4;
+ /* Largest size the windows driver uses */
+ gspca_dev->cam.bulk_size = BULK_SIZE;
+ /* Def need to use bulk transfers */
+ gspca_dev->cam.bulk = 1;
+
+ return 0;
+}
+
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int rc;
+
+ sd->this_f = 0;
+
+ rc = configure(gspca_dev);
+ if (rc < 0) {
+ PERR("Failed configure");
+ return rc;
+ }
+ /* First two frames have messed up gains
+ Drop them to avoid special cases in user apps? */
+ return 0;
+}
+
+static void sd_pkt_scan(struct gspca_dev *gspca_dev,
+ u8 *data, /* isoc packet */
+ int len) /* iso packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (len != BULK_SIZE) {
+ /* can we finish a frame? */
+ if (sd->this_f + len == gspca_dev->pixfmt.sizeimage) {
+ gspca_frame_add(gspca_dev, LAST_PACKET, data, len);
+ PDEBUG(D_FRAM, "finish frame sz %u/%u w/ len %u\n",
+ sd->this_f, gspca_dev->pixfmt.sizeimage, len);
+ /* lost some data, discard the frame */
+ } else {
+ gspca_frame_add(gspca_dev, DISCARD_PACKET, NULL, 0);
+ PDEBUG(D_FRAM, "abort frame sz %u/%u w/ len %u\n",
+ sd->this_f, gspca_dev->pixfmt.sizeimage, len);
+ }
+ sd->this_f = 0;
+ } else {
+ if (sd->this_f == 0)
+ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
+ else
+ gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
+ sd->this_f += len;
+ }
+}
+
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ return 0;
+}
+
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ setexposure(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ /* gspca_dev->gain automatically updated */
+ setggain(gspca_dev, gspca_dev->gain->val);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ sd->blue->val = ctrl->val;
+ setbgain(gspca_dev, sd->blue->val, gspca_dev->gain->val);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ sd->red->val = ctrl->val;
+ setrgain(gspca_dev, sd->red->val, gspca_dev->gain->val);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ /* Mostly limited by URB timeouts */
+ /* XXX: make dynamic based on frame rate? */
+ V4L2_CID_EXPOSURE, 0, 800, 1, 350);
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 511, 1, 128);
+ sd->blue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 1023, 1, 80);
+ sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 1023, 1, 295);
+
+ if (hdl->error) {
+ PERR("Could not initialize controls\n");
+ return hdl->error;
+ }
+ return 0;
+}
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ .config = sd_config,
+ .init = sd_init,
+ .init_controls = sd_init_controls,
+ .start = sd_start,
+ .pkt_scan = sd_pkt_scan,
+};
+
+/* Table of supported USB devices */
+static const struct usb_device_id device_table[] = {
+ /* Commented out devices should be related */
+ /* AS: AmScope, TT: ToupTek */
+ /* { USB_DEVICE(0x0547, 0x6035) }, TT UCMOS00350KPA */
+ /* { USB_DEVICE(0x0547, 0x6130) }, TT UCMOS01300KPA */
+ /* { USB_DEVICE(0x0547, 0x6200) }, TT UCMOS02000KPA */
+ /* { USB_DEVICE(0x0547, 0x6310) }, TT UCMOS03100KPA */
+ /* { USB_DEVICE(0x0547, 0x6510) }, TT UCMOS05100KPA */
+ /* { USB_DEVICE(0x0547, 0x6800) }, TT UCMOS08000KPA */
+ /* { USB_DEVICE(0x0547, 0x6801) }, TT UCMOS08000KPB */
+ { USB_DEVICE(0x0547, 0x6801) }, /* TT UCMOS08000KPB, AS MU800 */
+ /* { USB_DEVICE(0x0547, 0x6900) }, TT UCMOS09000KPA */
+ /* { USB_DEVICE(0x0547, 0x6901) }, TT UCMOS09000KPB */
+ /* { USB_DEVICE(0x0547, 0x6010) }, TT UCMOS10000KPA */
+ /* { USB_DEVICE(0x0547, 0x6014) }, TT UCMOS14000KPA */
+ /* { USB_DEVICE(0x0547, 0x6131) }, TT UCMOS01300KMA */
+ /* { USB_DEVICE(0x0547, 0x6511) }, TT UCMOS05100KMA */
+ /* { USB_DEVICE(0x0547, 0x8080) }, TT UHCCD00800KPA */
+ /* { USB_DEVICE(0x0547, 0x8140) }, TT UHCCD01400KPA */
+ /* { USB_DEVICE(0x0547, 0x8141) }, TT EXCCD01400KPA */
+ /* { USB_DEVICE(0x0547, 0x8200) }, TT UHCCD02000KPA */
+ /* { USB_DEVICE(0x0547, 0x8201) }, TT UHCCD02000KPB */
+ /* { USB_DEVICE(0x0547, 0x8310) }, TT UHCCD03100KPA */
+ /* { USB_DEVICE(0x0547, 0x8500) }, TT UHCCD05000KPA */
+ /* { USB_DEVICE(0x0547, 0x8510) }, TT UHCCD05100KPA */
+ /* { USB_DEVICE(0x0547, 0x8600) }, TT UHCCD06000KPA */
+ /* { USB_DEVICE(0x0547, 0x8800) }, TT UHCCD08000KPA */
+ /* { USB_DEVICE(0x0547, 0x8315) }, TT UHCCD03150KPA */
+ /* { USB_DEVICE(0x0547, 0x7800) }, TT UHCCD00800KMA */
+ /* { USB_DEVICE(0x0547, 0x7140) }, TT UHCCD01400KMA */
+ /* { USB_DEVICE(0x0547, 0x7141) }, TT UHCCD01400KMB */
+ /* { USB_DEVICE(0x0547, 0x7200) }, TT UHCCD02000KMA */
+ /* { USB_DEVICE(0x0547, 0x7315) }, TT UHCCD03150KMA */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
+ THIS_MODULE);
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+#endif
+};
+
+static int __init sd_mod_init(void)
+{
+ int ret;
+
+ ret = usb_register(&sd_driver);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
index c00ac57de510..b4efb2fb36fa 100644
--- a/drivers/media/usb/gspca/vc032x.c
+++ b/drivers/media/usb/gspca/vc032x.c
@@ -68,12 +68,12 @@ enum sensors {
static const struct v4l2_pix_format vc0321_mode[] = {
{320, 240, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
- .bytesperline = 320,
+ .bytesperline = 320 * 2,
.sizeimage = 320 * 240 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{640, 480, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
- .bytesperline = 640,
+ .bytesperline = 640 * 2,
.sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
@@ -97,17 +97,17 @@ static const struct v4l2_pix_format vc0323_mode[] = {
};
static const struct v4l2_pix_format bi_mode[] = {
{320, 240, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
- .bytesperline = 320,
+ .bytesperline = 320 * 2,
.sizeimage = 320 * 240 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
{640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
- .bytesperline = 640,
+ .bytesperline = 640 * 2,
.sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{1280, 1024, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
- .bytesperline = 1280,
+ .bytesperline = 1280 * 2,
.sizeimage = 1280 * 1024 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-audio.c b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
index cc06d5e4adcc..45276c628482 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
@@ -84,13 +84,3 @@ void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
MSP_OUTPUT(MSP_SC_IN_DSP_SCART1), 0);
}
}
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-audio.h b/drivers/media/usb/pvrusb2/pvrusb2-audio.h
index e3e63d750891..27cefb5cb170 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-audio.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-audio.h
@@ -25,13 +25,3 @@
#include "pvrusb2-hdw-internal.h"
void pvr2_msp3400_subdev_update(struct pvr2_hdw *, struct v4l2_subdev *);
#endif /* __PVRUSB2_AUDIO_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index c8761c71c9d2..924fc4c6019a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -418,14 +418,3 @@ struct pvr2_ioread *pvr2_channel_create_mpeg_stream(
pvr2_ioread_set_sync_key(cp,stream_sync_key,sizeof(stream_sync_key));
return cp;
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.h b/drivers/media/usb/pvrusb2/pvrusb2-context.h
index d657e53bbfa3..1c1d442d9ea3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.h
@@ -83,12 +83,3 @@ int pvr2_context_global_init(void);
void pvr2_context_global_done(void);
#endif /* __PVRUSB2_CONTEXT_H */
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
index 88320900dbd4..f82f0f0f2c04 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
@@ -82,14 +82,3 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
sd->ops->audio->s_routing(sd, input, 0, 0);
}
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h
index 53ba548b72a7..86c17bee56f9 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h
@@ -36,13 +36,3 @@
void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *, struct v4l2_subdev *);
#endif /* __PVRUSB2_AUDIO_CS53L32A_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
index 7d5a7139a45a..958db170a048 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
@@ -596,14 +596,3 @@ int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *cptr,
} while(0); LOCK_GIVE(cptr->hdw->big_lock);
return ret;
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.h b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.h
index 794ff90121c7..c175571868a3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.h
@@ -110,13 +110,3 @@ int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *,
unsigned int *len);
#endif /* __PVRUSB2_CTRL_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
index c514d0b9ffdc..1a81aa70509b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -152,15 +152,3 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
sd->ops->audio->s_routing(sd, (u32)aud_input, 0, 0);
}
}
-
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h
index e35c2322a08c..2eed7b7ee25e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h
@@ -40,13 +40,3 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *, struct v4l2_subdev *sd);
#endif /* __PVRUSB2_CX2584X_V4L_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debug.h b/drivers/media/usb/pvrusb2/pvrusb2-debug.h
index be79249f8628..4ef2ebcd97a5 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debug.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debug.h
@@ -57,13 +57,3 @@ extern int pvrusb2_debug;
#endif /* __PVRUSB2_HDW_INTERNAL_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
index 4279ebb811a1..e4022bcb155b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
@@ -322,14 +322,3 @@ int pvr2_debugifc_docmd(struct pvr2_hdw *hdw,const char *buf,
return 0;
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.h b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.h
index 2f8d46761cd0..a8dfc55f136f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.h
@@ -40,13 +40,3 @@ int pvr2_debugifc_docmd(struct pvr2_hdw *,
const char *buf_ptr,unsigned int buf_size);
#endif /* __PVRUSB2_DEBUGIFC_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-devattr.c b/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
index adc501d3c287..06c4c3dabcde 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
@@ -564,13 +564,3 @@ MODULE_FIRMWARE(PVR2_FIRMWARE_29xxx);
MODULE_FIRMWARE(PVR2_FIRMWARE_24xxx);
MODULE_FIRMWARE(PVR2_FIRMWARE_73xxx);
MODULE_FIRMWARE(PVR2_FIRMWARE_75xxx);
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-devattr.h b/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
index 273c8d4b3853..5aeefb6a991f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
@@ -187,13 +187,3 @@ struct pvr2_device_desc {
extern struct usb_device_id pvr2_device_table[];
#endif /* __PVRUSB2_HDW_INTERNAL_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index 9515f3a68f8f..e1907cd0c3b7 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -152,13 +152,3 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
return 0;
}
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.h b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.h
index cca3216f94cc..f1e33c807f46 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.h
@@ -27,13 +27,3 @@ struct pvr2_hdw;
int pvr2_eeprom_analyze(struct pvr2_hdw *);
#endif /* __PVRUSB2_EEPROM_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index f7702aeeda3f..593b3e9b6bfd 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -538,14 +538,3 @@ int pvr2_encoder_stop(struct pvr2_hdw *hdw)
return status;
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.h b/drivers/media/usb/pvrusb2/pvrusb2-encoder.h
index 232fefbcd1ac..a2bfb48f1ecd 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.h
@@ -30,13 +30,3 @@ int pvr2_encoder_start(struct pvr2_hdw *);
int pvr2_encoder_stop(struct pvr2_hdw *);
#endif /* __PVRUSB2_ENCODER_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h b/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
index 614755ea2ea3..06a15a68bcfd 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
@@ -60,13 +60,3 @@
#define FX2CMD_ONAIR_DTV_POWER_OFF 0xa3u
#endif /* _PVRUSB2_FX2_CMD_H_ */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
index 036952f2a3cb..1f9c02801cee 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
@@ -394,13 +394,3 @@ unsigned long pvr2_hdw_get_cur_freq(struct pvr2_hdw *);
void pvr2_hdw_status_poll(struct pvr2_hdw *);
#endif /* __PVRUSB2_HDW_INTERNAL_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 2fd9b5e0e2a9..930593d7028d 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2425,22 +2425,18 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
}
if (!hdw) goto fail;
- init_timer(&hdw->quiescent_timer);
- hdw->quiescent_timer.data = (unsigned long)hdw;
- hdw->quiescent_timer.function = pvr2_hdw_quiescent_timeout;
+ setup_timer(&hdw->quiescent_timer, pvr2_hdw_quiescent_timeout,
+ (unsigned long)hdw);
- init_timer(&hdw->decoder_stabilization_timer);
- hdw->decoder_stabilization_timer.data = (unsigned long)hdw;
- hdw->decoder_stabilization_timer.function =
- pvr2_hdw_decoder_stabilization_timeout;
+ setup_timer(&hdw->decoder_stabilization_timer,
+ pvr2_hdw_decoder_stabilization_timeout,
+ (unsigned long)hdw);
- init_timer(&hdw->encoder_wait_timer);
- hdw->encoder_wait_timer.data = (unsigned long)hdw;
- hdw->encoder_wait_timer.function = pvr2_hdw_encoder_wait_timeout;
+ setup_timer(&hdw->encoder_wait_timer, pvr2_hdw_encoder_wait_timeout,
+ (unsigned long)hdw);
- init_timer(&hdw->encoder_run_timer);
- hdw->encoder_run_timer.data = (unsigned long)hdw;
- hdw->encoder_run_timer.function = pvr2_hdw_encoder_run_timeout;
+ setup_timer(&hdw->encoder_run_timer, pvr2_hdw_encoder_run_timeout,
+ (unsigned long)hdw);
hdw->master_state = PVR2_STATE_DEAD;
@@ -3680,10 +3676,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
hdw->ctl_timeout_flag = 0;
hdw->ctl_write_pend_flag = 0;
hdw->ctl_read_pend_flag = 0;
- init_timer(&timer);
+ setup_timer(&timer, pvr2_ctl_timeout, (unsigned long)hdw);
timer.expires = jiffies + timeout;
- timer.data = (unsigned long)hdw;
- timer.function = pvr2_ctl_timeout;
if (write_len) {
hdw->cmd_debug_state = 2;
@@ -4035,11 +4029,6 @@ int pvr2_hdw_cmd_powerup(struct pvr2_hdw *hdw)
}
-int pvr2_hdw_cmd_powerdown(struct pvr2_hdw *hdw)
-{
- return pvr2_issue_simple_cmd(hdw,FX2CMD_POWER_OFF);
-}
-
int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw)
{
@@ -4301,9 +4290,8 @@ static int state_eval_encoder_config(struct pvr2_hdw *hdw)
the encoder. */
if (!hdw->state_encoder_waitok) {
hdw->encoder_wait_timer.expires =
- jiffies +
- (HZ * TIME_MSEC_ENCODER_WAIT
- / 1000);
+ jiffies + msecs_to_jiffies(
+ TIME_MSEC_ENCODER_WAIT);
add_timer(&hdw->encoder_wait_timer);
}
}
@@ -4426,8 +4414,8 @@ static int state_eval_encoder_run(struct pvr2_hdw *hdw)
if (pvr2_encoder_start(hdw) < 0) return !0;
hdw->state_encoder_run = !0;
if (!hdw->state_encoder_runok) {
- hdw->encoder_run_timer.expires =
- jiffies + (HZ * TIME_MSEC_ENCODER_OK / 1000);
+ hdw->encoder_run_timer.expires = jiffies +
+ msecs_to_jiffies(TIME_MSEC_ENCODER_OK);
add_timer(&hdw->encoder_run_timer);
}
}
@@ -4518,9 +4506,8 @@ static int state_eval_decoder_run(struct pvr2_hdw *hdw)
but before we did the pending check. */
if (!hdw->state_decoder_quiescent) {
hdw->quiescent_timer.expires =
- jiffies +
- (HZ * TIME_MSEC_DECODER_WAIT
- / 1000);
+ jiffies + msecs_to_jiffies(
+ TIME_MSEC_DECODER_WAIT);
add_timer(&hdw->quiescent_timer);
}
}
@@ -4544,9 +4531,8 @@ static int state_eval_decoder_run(struct pvr2_hdw *hdw)
hdw->state_decoder_run = !0;
if (hdw->decoder_client_id == PVR2_CLIENT_ID_SAA7115) {
hdw->decoder_stabilization_timer.expires =
- jiffies +
- (HZ * TIME_MSEC_DECODER_STABILIZATION_WAIT /
- 1000);
+ jiffies + msecs_to_jiffies(
+ TIME_MSEC_DECODER_STABILIZATION_WAIT);
add_timer(&hdw->decoder_stabilization_timer);
} else {
hdw->state_decoder_ready = !0;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
index 41847076f51a..a82a00dd7329 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
@@ -271,9 +271,6 @@ int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *);
/* Execute simple reset command */
int pvr2_hdw_cmd_powerup(struct pvr2_hdw *);
-/* suspend */
-int pvr2_hdw_cmd_powerdown(struct pvr2_hdw *);
-
/* Order decoder to reset */
int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *);
@@ -343,13 +340,3 @@ void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw);
int pvr2_upload_firmware2(struct pvr2_hdw *hdw);
#endif /* __PVRUSB2_HDW_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index b5e929f1bf82..4baa9d632a4e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -686,13 +686,3 @@ void pvr2_i2c_core_done(struct pvr2_hdw *hdw)
hdw->i2c_linked = 0;
}
}
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h
index 6a75769200bd..a10a3e8e9345 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h
@@ -27,14 +27,3 @@ void pvr2_i2c_core_done(struct pvr2_hdw *);
#endif /* __PVRUSB2_I2C_ADAPTER_H */
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.c b/drivers/media/usb/pvrusb2/pvrusb2-io.c
index 1e354747de3f..0c08f22bdfce 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.c
@@ -682,14 +682,3 @@ int pvr2_buffer_get_id(struct pvr2_buffer *bp)
{
return bp->id;
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.h b/drivers/media/usb/pvrusb2/pvrusb2-io.h
index afb7e87c0394..0c47c6a95ab2 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.h
@@ -90,13 +90,3 @@ int pvr2_buffer_get_id(struct pvr2_buffer *);
int pvr2_buffer_queue(struct pvr2_buffer *);
#endif /* __PVRUSB2_IO_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
index bba6115c9ae8..cd995b54732e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
@@ -499,14 +499,3 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
cp,req_cnt,ret);
return ret;
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ioread.h b/drivers/media/usb/pvrusb2/pvrusb2-ioread.h
index 100e0780e1aa..0b1f0fbc3438 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ioread.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ioread.h
@@ -36,13 +36,3 @@ int pvr2_ioread_read(struct pvr2_ioread *,void __user *buf,unsigned int cnt);
int pvr2_ioread_avail(struct pvr2_ioread *);
#endif /* __PVRUSB2_IOREAD_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-main.c b/drivers/media/usb/pvrusb2/pvrusb2-main.c
index c1d9bb61cd77..86be902a0049 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-main.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-main.c
@@ -169,14 +169,3 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION("0.9.1");
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index 453627b07833..9a596a3a4c27 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -398,14 +398,3 @@ v4l2_std_id pvr2_std_get_usable(void)
{
return CSTD_ALL;
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.h b/drivers/media/usb/pvrusb2/pvrusb2-std.h
index a35c53d0b320..ed4ec0474429 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.h
@@ -47,13 +47,3 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
v4l2_std_id pvr2_std_get_usable(void);
#endif /* __PVRUSB2_STD_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
index 6ef1335b2858..06fe63ced58c 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
@@ -848,14 +848,3 @@ static ssize_t debugcmd_store(struct device *class_dev,
return count;
}
#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.h b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.h
index 6d875bfe7991..6f0579e1e07b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.h
@@ -34,13 +34,3 @@ struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *,
struct pvr2_sysfs_class *);
#endif /* __PVRUSB2_SYSFS_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-util.h b/drivers/media/usb/pvrusb2/pvrusb2-util.h
index 92b75544ee2e..5465bf9cd73e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-util.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-util.h
@@ -50,13 +50,3 @@
#endif /* __PVRUSB2_UTIL_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 1b158f1167ed..35e4ea530494 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
module_param_array(vbi_nr, int, NULL, 0444);
MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor");
-static struct v4l2_capability pvr_capability ={
- .driver = "pvrusb2",
- .card = "Hauppauge WinTV pvr-usb2",
- .bus_info = "usb",
- .version = LINUX_VERSION_CODE,
- .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
- V4L2_CAP_READWRITE),
-};
-
static struct v4l2_fmtdesc pvr_fmtdesc [] = {
{
.index = 0,
@@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+ strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver));
strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
sizeof(cap->bus_info));
strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
+ V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS;
+ switch (fh->pdi->devbase.vfl_type) {
+ case VFL_TYPE_GRABBER:
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
+ break;
+ case VFL_TYPE_RADIO:
+ cap->device_caps = V4L2_CAP_RADIO;
+ break;
+ }
+ cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
return 0;
}
@@ -1360,13 +1362,3 @@ struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp)
pvr2_v4l2_destroy_no_lock(vp);
return NULL;
}
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.h b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.h
index 34c011a7b107..e455c9515841 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.h
@@ -27,13 +27,3 @@ struct pvr2_v4l2;
struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *);
#endif /* __PVRUSB2_V4L2_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 75 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
index 2e205c99eb96..139b39740534 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
@@ -101,14 +101,3 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
sd->ops->video->s_routing(sd, input, 0, 0);
}
}
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h
index 3b0bd5db602b..dacf3ec7f9e1 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h
@@ -36,13 +36,3 @@
void pvr2_saa7115_subdev_update(struct pvr2_hdw *, struct v4l2_subdev *);
#endif /* __PVRUSB2_VIDEO_V4L_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
index 3ac8d751a5c0..f1df94a2436f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
@@ -56,15 +56,3 @@ void pvr2_wm8775_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
sd->ops->audio->s_routing(sd, input, 0, 0);
}
}
-
-
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.h b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.h
index 0577bc7246fb..a4ee12e28d5c 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.h
@@ -40,13 +40,3 @@ void pvr2_wm8775_subdev_update(struct pvr2_hdw *, struct v4l2_subdev *sd);
#endif /* __PVRUSB2_WM8775_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2.h b/drivers/media/usb/pvrusb2/pvrusb2.h
index 240de9b35661..95f98a87abb3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2.h
@@ -30,13 +30,3 @@
#define PVR_NUM 20
#endif /* __PVRUSB2_H */
-
-/*
- Stuff for Emacs to see, in order to encourage consistent editing style:
- *** Local Variables: ***
- *** mode: c ***
- *** fill-column: 70 ***
- *** tab-width: 8 ***
- *** c-basic-offset: 8 ***
- *** End: ***
- */
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 15b754da4a2c..702267e208ba 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -508,7 +508,8 @@ static void pwc_isoc_cleanup(struct pwc_device *pdev)
}
/* Must be called with vb_queue_lock hold */
-static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
+static void pwc_cleanup_queued_bufs(struct pwc_device *pdev,
+ enum vb2_buffer_state state)
{
unsigned long flags = 0;
@@ -519,7 +520,7 @@ static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf,
list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb, state);
}
spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
}
@@ -674,7 +675,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
/* And cleanup any queued bufs!! */
- pwc_cleanup_queued_bufs(pdev);
+ pwc_cleanup_queued_bufs(pdev, VB2_BUF_STATE_QUEUED);
}
mutex_unlock(&pdev->v4l2_lock);
@@ -692,7 +693,9 @@ static void stop_streaming(struct vb2_queue *vq)
pwc_isoc_cleanup(pdev);
}
- pwc_cleanup_queued_bufs(pdev);
+ pwc_cleanup_queued_bufs(pdev, VB2_BUF_STATE_ERROR);
+ if (pdev->fill_buf)
+ vb2_buffer_done(&pdev->fill_buf->vb, VB2_BUF_STATE_ERROR);
mutex_unlock(&pdev->v4l2_lock);
}
@@ -1125,7 +1128,6 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
if (pdev->vb_queue.streaming)
pwc_isoc_cleanup(pdev);
pdev->udev = NULL;
- pwc_cleanup_queued_bufs(pdev);
v4l2_device_disconnect(&pdev->v4l2_dev);
video_unregister_device(&pdev->vdev);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index de55e96fed15..0f3c34d47ec3 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -2274,9 +2274,7 @@ static int s2255_probe(struct usb_interface *interface,
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
goto errorEP;
}
- init_timer(&dev->timer);
- dev->timer.function = s2255_timer;
- dev->timer.data = (unsigned long)dev->fw_data;
+ setup_timer(&dev->timer, s2255_timer, (unsigned long)dev->fw_data);
init_waitqueue_head(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
struct s2255_vc *vc = &dev->vc[i];
diff --git a/drivers/media/usb/siano/Kconfig b/drivers/media/usb/siano/Kconfig
index 5afbd9a4b55c..d37b742d4f7a 100644
--- a/drivers/media/usb/siano/Kconfig
+++ b/drivers/media/usb/siano/Kconfig
@@ -5,7 +5,9 @@
config SMS_USB_DRV
tristate "Siano SMS1xxx based MDTV receiver"
depends on DVB_CORE && HAS_DMA
+ depends on !RC_CORE || RC_CORE
select MEDIA_COMMON_OPTIONS
+ select SMS_SIANO_MDTV
---help---
Choose if you would like to have Siano's support for USB interface
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index a47629108c1b..65a326c5128f 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -38,10 +38,6 @@
#include "stk1160.h"
#include "stk1160-reg.h"
-static unsigned int vidioc_debug;
-module_param(vidioc_debug, int, 0644);
-MODULE_PARM_DESC(vidioc_debug, "enable debug messages [vidioc]");
-
static bool keep_buffers;
module_param(keep_buffers, bool, 0644);
MODULE_PARM_DESC(keep_buffers, "don't release buffers upon stop streaming");
@@ -659,7 +655,6 @@ int stk1160_video_register(struct stk1160 *dev)
/* Initialize video_device with a template structure */
dev->vdev = v4l_template;
- dev->vdev.debug = vidioc_debug;
dev->vdev.queue = &dev->vb_vidq;
/*
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 3588dc38db87..e08fa587332f 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -1262,7 +1262,6 @@ static int stk_register_video_device(struct stk_camera *dev)
dev->vdev = stk_v4l_data;
dev->vdev.lock = &dev->lock;
- dev->vdev.debug = debug;
dev->vdev.v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(&dev->vdev, dev);
err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 793577fc4633..0f14d3ccc7b4 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -941,7 +941,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (NULL == fmt) {
- dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Fourcc format (0x%08x)"
+ dprintk(dev, 2, "Fourcc format (0x%08x)"
" invalid.\n", f->fmt.pix.pixelformat);
return -EINVAL;
}
@@ -1622,7 +1622,6 @@ static struct video_device *vdev_init(struct tm6000_core *dev,
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
- vfd->debug = tm6000_debug;
vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index 302aa07c458f..44b0c28d69b6 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -2194,9 +2194,8 @@ static void usbvision_power_off_timer(unsigned long data)
void usbvision_init_power_off_timer(struct usb_usbvision *usbvision)
{
- init_timer(&usbvision->power_off_timer);
- usbvision->power_off_timer.data = (long)usbvision;
- usbvision->power_off_timer.function = usbvision_power_off_timer;
+ setup_timer(&usbvision->power_off_timer, usbvision_power_off_timer,
+ (unsigned long)usbvision);
}
void usbvision_set_power_off_timer(struct usb_usbvision *usbvision)
@@ -2502,11 +2501,3 @@ int usbvision_muxsel(struct usb_usbvision *usbvision, int channel)
usbvision_set_audio(usbvision, audio[channel]);
return 0;
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/media/usb/usbvision/usbvision-i2c.c
index ba262a32bd3a..26dbcb1146af 100644
--- a/drivers/media/usb/usbvision/usbvision-i2c.c
+++ b/drivers/media/usb/usbvision/usbvision-i2c.c
@@ -445,11 +445,3 @@ static struct i2c_adapter i2c_adap_template = {
.owner = THIS_MODULE,
.name = "usbvision",
};
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 693d5f409138..cd2fbf11e3b4 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1716,11 +1716,3 @@ static void __exit usbvision_exit(void)
module_init(usbvision_init);
module_exit(usbvision_exit);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index a0c73cf1517c..77aeb1ed9a81 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -517,11 +517,3 @@ int usbvision_power_off(struct usb_usbvision *usbvision);
int usbvision_power_on(struct usb_usbvision *usbvision);
#endif /* __LINUX_USBVISION_H */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 6a4b0b8cd270..cf27006c29dc 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -138,6 +138,11 @@ static struct uvc_format_desc uvc_fmts[] = {
.fcc = V4L2_PIX_FMT_RGB565,
},
{
+ .name = "BGR 8:8:8 (BGR3)",
+ .guid = UVC_GUID_FORMAT_BGR3,
+ .fcc = V4L2_PIX_FMT_BGR24,
+ },
+ {
.name = "H.264",
.guid = UVC_GUID_FORMAT_H264,
.fcc = V4L2_PIX_FMT_H264,
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index cc960723b926..10c554e7655c 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -143,20 +143,6 @@ static void uvc_buffer_finish(struct vb2_buffer *vb)
uvc_video_clock_update(stream, &vb->v4l2_buf, buf);
}
-static void uvc_wait_prepare(struct vb2_queue *vq)
-{
- struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
-
- mutex_unlock(&queue->mutex);
-}
-
-static void uvc_wait_finish(struct vb2_queue *vq)
-{
- struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
-
- mutex_lock(&queue->mutex);
-}
-
static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
@@ -195,8 +181,8 @@ static struct vb2_ops uvc_queue_qops = {
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
.buf_finish = uvc_buffer_finish,
- .wait_prepare = uvc_wait_prepare,
- .wait_finish = uvc_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.start_streaming = uvc_start_streaming,
.stop_streaming = uvc_stop_streaming,
};
@@ -214,6 +200,7 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
queue->queue.mem_ops = &vb2_vmalloc_memops;
queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
| V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
+ queue->queue.lock = &queue->mutex;
ret = vb2_queue_init(&queue->queue);
if (ret)
return ret;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 9c5cbcf16529..43e953f73e02 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -13,7 +13,6 @@
#include <linux/compat.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 9637e8b86949..20ccc9d315dc 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1734,13 +1734,13 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
uvc_video_clock_reset(stream);
+ if (!uvc_queue_streaming(&stream->queue))
+ return 0;
+
ret = uvc_commit_video(stream, &stream->ctrl);
if (ret < 0)
return ret;
- if (!uvc_queue_streaming(&stream->queue))
- return 0;
-
return uvc_init_video(stream, GFP_NOIO);
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index f0a04b532ede..c63e5b55e143 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -109,6 +109,9 @@
#define UVC_GUID_FORMAT_RGBP \
{ 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BGR3 \
+ { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
#define UVC_GUID_FORMAT_M420 \
{ 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 5c006277b8b1..ca850316d379 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1454,8 +1454,6 @@ static int zr364xx_probe(struct usb_interface *intf,
cam->vdev.v4l2_dev = &cam->v4l2_dev;
cam->vdev.ctrl_handler = &cam->ctrl_handler;
video_set_drvdata(&cam->vdev, cam);
- if (debug)
- cam->vdev.debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
cam->udev = udev;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 9aa530a8bea9..86bb93fd7db8 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -47,15 +47,15 @@ static ssize_t index_show(struct device *cd,
}
static DEVICE_ATTR_RO(index);
-static ssize_t debug_show(struct device *cd,
+static ssize_t dev_debug_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
- return sprintf(buf, "%i\n", vdev->debug);
+ return sprintf(buf, "%i\n", vdev->dev_debug);
}
-static ssize_t debug_store(struct device *cd, struct device_attribute *attr,
+static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr,
const char *buf, size_t len)
{
struct video_device *vdev = to_video_device(cd);
@@ -66,10 +66,10 @@ static ssize_t debug_store(struct device *cd, struct device_attribute *attr,
if (res)
return res;
- vdev->debug = value;
+ vdev->dev_debug = value;
return len;
}
-static DEVICE_ATTR_RW(debug);
+static DEVICE_ATTR_RW(dev_debug);
static ssize_t name_show(struct device *cd,
struct device_attribute *attr, char *buf)
@@ -82,7 +82,7 @@ static DEVICE_ATTR_RO(name);
static struct attribute *video_device_attrs[] = {
&dev_attr_name.attr,
- &dev_attr_debug.attr,
+ &dev_attr_dev_debug.attr,
&dev_attr_index.attr,
NULL,
};
@@ -304,7 +304,8 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
return -EINVAL;
if (video_is_registered(vdev))
ret = vdev->fops->read(filp, buf, sz, off);
- if (vdev->debug)
+ if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
+ (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
printk(KERN_DEBUG "%s: read: %zd (%d)\n",
video_device_node_name(vdev), sz, ret);
return ret;
@@ -320,7 +321,8 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
return -EINVAL;
if (video_is_registered(vdev))
ret = vdev->fops->write(filp, buf, sz, off);
- if (vdev->debug)
+ if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
+ (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
printk(KERN_DEBUG "%s: write: %zd (%d)\n",
video_device_node_name(vdev), sz, ret);
return ret;
@@ -335,7 +337,7 @@ static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
return DEFAULT_POLLMASK;
if (video_is_registered(vdev))
res = vdev->fops->poll(filp, poll);
- if (vdev->debug > 2)
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL)
printk(KERN_DEBUG "%s: poll: %08x\n",
video_device_node_name(vdev), res);
return res;
@@ -404,7 +406,7 @@ static unsigned long v4l2_get_unmapped_area(struct file *filp,
if (!video_is_registered(vdev))
return -ENODEV;
ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
- if (vdev->debug)
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
printk(KERN_DEBUG "%s: get_unmapped_area (%d)\n",
video_device_node_name(vdev), ret);
return ret;
@@ -420,7 +422,7 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
return -ENODEV;
if (video_is_registered(vdev))
ret = vdev->fops->mmap(filp, vm);
- if (vdev->debug)
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
printk(KERN_DEBUG "%s: mmap (%d)\n",
video_device_node_name(vdev), ret);
return ret;
@@ -450,7 +452,7 @@ static int v4l2_open(struct inode *inode, struct file *filp)
ret = -ENODEV;
}
- if (vdev->debug)
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
printk(KERN_DEBUG "%s: open (%d)\n",
video_device_node_name(vdev), ret);
/* decrease the refcount in case of an error */
@@ -467,7 +469,7 @@ static int v4l2_release(struct inode *inode, struct file *filp)
if (vdev->fops->release)
ret = vdev->fops->release(filp);
- if (vdev->debug)
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
printk(KERN_DEBUG "%s: release\n",
video_device_node_name(vdev));
@@ -1033,10 +1035,3 @@ MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index faac2f4e0f3a..b08407225db1 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2339,7 +2339,7 @@ static long __video_do_ioctl(struct file *file,
const struct v4l2_ioctl_info *info;
void *fh = file->private_data;
struct v4l2_fh *vfh = NULL;
- int debug = vfd->debug;
+ int dev_debug = vfd->dev_debug;
long ret = -ENOTTY;
if (ops == NULL) {
@@ -2388,11 +2388,15 @@ static long __video_do_ioctl(struct file *file,
}
done:
- if (debug) {
+ if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) {
+ if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) &&
+ (cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF))
+ return ret;
+
v4l_printk_ioctl(video_device_node_name(vfd), cmd);
if (ret < 0)
pr_cont(": error %ld", ret);
- if (debug == V4L2_DEBUG_IOCTL)
+ if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG))
pr_cont("\n");
else if (_IOC_DIR(cmd) == _IOC_NONE)
info->debug(arg, write_only);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 543631c3557a..19a034e79be4 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -283,10 +283,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (rval)
return rval;
- rval = v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
- if (rval != -ENOIOCTLCMD)
- return rval;
-
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
@@ -308,10 +304,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (rval)
return rval;
- rval = v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
- if (rval != -ENOIOCTLCMD)
- return rval;
-
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 3ff15f1c9d70..f669cedca8bd 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -145,12 +145,11 @@ struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
}
EXPORT_SYMBOL_GPL(videobuf_to_dma);
-void videobuf_dma_init(struct videobuf_dmabuf *dma)
+static void videobuf_dma_init(struct videobuf_dmabuf *dma)
{
memset(dma, 0, sizeof(*dma));
dma->magic = MAGIC_DMABUF;
}
-EXPORT_SYMBOL_GPL(videobuf_dma_init);
static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
int direction, unsigned long data, unsigned long size)
@@ -195,7 +194,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
return 0;
}
-int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
+static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
unsigned long data, unsigned long size)
{
int ret;
@@ -206,9 +205,8 @@ int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
return ret;
}
-EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
-int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
+static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
int nr_pages)
{
int i;
@@ -267,9 +265,8 @@ out_free_pages:
return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
-int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
+static int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
dma_addr_t addr, int nr_pages)
{
dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
@@ -284,9 +281,8 @@ int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
return 0;
}
-EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
-int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
+static int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
{
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(0 == dma->nr_pages);
@@ -328,7 +324,6 @@ int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
return 0;
}
-EXPORT_SYMBOL_GPL(videobuf_dma_map);
int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
{
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index d09a8916e940..bc08a829bc13 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -3146,27 +3146,26 @@ static int vb2_thread(void *data)
prequeue--;
} else {
call_void_qop(q, wait_finish, q);
- ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+ if (!threadio->stop)
+ ret = vb2_internal_dqbuf(q, &fileio->b, 0);
call_void_qop(q, wait_prepare, q);
dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
}
- if (threadio->stop)
- break;
- if (ret)
+ if (ret || threadio->stop)
break;
try_to_freeze();
vb = q->bufs[fileio->b.index];
if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
- ret = threadio->fnc(vb, threadio->priv);
- if (ret)
- break;
+ if (threadio->fnc(vb, threadio->priv))
+ break;
call_void_qop(q, wait_finish, q);
if (set_timestamp)
v4l2_get_timestamp(&fileio->b.timestamp);
- ret = vb2_internal_qbuf(q, &fileio->b);
+ if (!threadio->stop)
+ ret = vb2_internal_qbuf(q, &fileio->b);
call_void_qop(q, wait_prepare, q);
- if (ret)
+ if (ret || threadio->stop)
break;
}
@@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q)
threadio->stop = true;
vb2_internal_streamoff(q, q->type);
call_void_qop(q, wait_prepare, q);
+ err = kthread_stop(threadio->thread);
q->fileio = NULL;
fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
kfree(fileio);
- err = kthread_stop(threadio->thread);
threadio->thread = NULL;
kfree(threadio);
q->fileio = NULL;
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index fba944e50227..bcde88572429 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -95,7 +95,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
goto fail_pages_array_alloc;
buf->vma = vma;
- buf->vaddr = ioremap_nocache(physp, size);
+ buf->vaddr = (__force void *)ioremap_nocache(physp, size);
if (!buf->vaddr)
goto fail_pages_array_alloc;
} else {
@@ -155,7 +155,7 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
kfree(buf->pages);
} else {
vb2_put_vma(buf->vma);
- iounmap(buf->vaddr);
+ iounmap((__force void __iomem *)buf->vaddr);
}
kfree(buf);
}
@@ -211,6 +211,7 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
return 0;
}
+#ifdef CONFIG_HAS_DMA
/*********************************************/
/* DMABUF ops for exporters */
/*********************************************/
@@ -380,6 +381,8 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flag
return dbuf;
}
+#endif /* CONFIG_HAS_DMA */
+
/*********************************************/
/* callbacks for DMABUF buffers */
@@ -437,7 +440,9 @@ const struct vb2_mem_ops vb2_vmalloc_memops = {
.put = vb2_vmalloc_put,
.get_userptr = vb2_vmalloc_get_userptr,
.put_userptr = vb2_vmalloc_put_userptr,
+#ifdef CONFIG_HAS_DMA
.get_dmabuf = vb2_vmalloc_get_dmabuf,
+#endif
.map_dmabuf = vb2_vmalloc_map_dmabuf,
.unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
.attach_dmabuf = vb2_vmalloc_attach_dmabuf,
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
index fc7ab5a3561e..d708ded5457b 100644
--- a/drivers/memory/fsl-corenet-cf.c
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -27,18 +27,29 @@ enum ccf_version {
struct ccf_info {
enum ccf_version version;
int err_reg_offs;
+ bool has_brr;
};
static const struct ccf_info ccf1_info = {
.version = CCF1,
.err_reg_offs = 0xa00,
+ .has_brr = false,
};
static const struct ccf_info ccf2_info = {
.version = CCF2,
.err_reg_offs = 0xe40,
+ .has_brr = true,
};
+/*
+ * This register is present but not documented, with different values for
+ * IP_ID, on other chips with fsl,corenet2-cf such as t4240 and b4860.
+ */
+#define CCF_BRR 0xbf8
+#define CCF_BRR_IPID 0xffff0000
+#define CCF_BRR_IPID_T1040 0x09310000
+
static const struct of_device_id ccf_matches[] = {
{
.compatible = "fsl,corenet1-cf",
@@ -66,6 +77,8 @@ struct ccf_err_regs {
/* LAE/CV also valid for errdis and errinten */
#define ERRDET_LAE (1 << 0) /* Local Access Error */
#define ERRDET_CV (1 << 1) /* Coherency Violation */
+#define ERRDET_UTID (1 << 2) /* Unavailable Target ID (t1040) */
+#define ERRDET_MCST (1 << 3) /* Multicast Stash (t1040) */
#define ERRDET_CTYPE_SHIFT 26 /* Capture Type (ccf2 only) */
#define ERRDET_CTYPE_MASK (0x1f << ERRDET_CTYPE_SHIFT)
#define ERRDET_CAP (1 << 31) /* Capture Valid (ccf2 only) */
@@ -84,6 +97,7 @@ struct ccf_private {
struct device *dev;
void __iomem *regs;
struct ccf_err_regs __iomem *err_regs;
+ bool t1040;
};
static irqreturn_t ccf_irq(int irq, void *dev_id)
@@ -142,6 +156,12 @@ static irqreturn_t ccf_irq(int irq, void *dev_id)
if (errdet & ERRDET_CV)
dev_crit(ccf->dev, "Coherency Violation\n");
+ if (errdet & ERRDET_UTID)
+ dev_crit(ccf->dev, "Unavailable Target ID\n");
+
+ if (errdet & ERRDET_MCST)
+ dev_crit(ccf->dev, "Multicast Stash\n");
+
if (cap_valid) {
dev_crit(ccf->dev, "address 0x%09llx, src id 0x%x\n",
addr, src_id);
@@ -157,6 +177,7 @@ static int ccf_probe(struct platform_device *pdev)
struct ccf_private *ccf;
struct resource *r;
const struct of_device_id *match;
+ u32 errinten;
int ret, irq;
match = of_match_device(ccf_matches, &pdev->dev);
@@ -183,6 +204,13 @@ static int ccf_probe(struct platform_device *pdev)
ccf->info = match->data;
ccf->err_regs = ccf->regs + ccf->info->err_reg_offs;
+ if (ccf->info->has_brr) {
+ u32 brr = ioread32be(ccf->regs + CCF_BRR);
+
+ if ((brr & CCF_BRR_IPID) == CCF_BRR_IPID_T1040)
+ ccf->t1040 = true;
+ }
+
dev_set_drvdata(&pdev->dev, ccf);
irq = platform_get_irq(pdev, 0);
@@ -197,15 +225,19 @@ static int ccf_probe(struct platform_device *pdev)
return ret;
}
+ errinten = ERRDET_LAE | ERRDET_CV;
+ if (ccf->t1040)
+ errinten |= ERRDET_UTID | ERRDET_MCST;
+
switch (ccf->info->version) {
case CCF1:
/* On CCF1 this register enables rather than disables. */
- iowrite32be(ERRDET_LAE | ERRDET_CV, &ccf->err_regs->errdis);
+ iowrite32be(errinten, &ccf->err_regs->errdis);
break;
case CCF2:
iowrite32be(0, &ccf->err_regs->errdis);
- iowrite32be(ERRDET_LAE | ERRDET_CV, &ccf->err_regs->errinten);
+ iowrite32be(errinten, &ccf->err_regs->errinten);
break;
}
diff --git a/drivers/message/Makefile b/drivers/message/Makefile
index 97ef5a01ad11..755676ded67c 100644
--- a/drivers/message/Makefile
+++ b/drivers/message/Makefile
@@ -2,5 +2,4 @@
# Makefile for MPT based block devices
#
-obj-$(CONFIG_I2O) += i2o/
obj-$(CONFIG_FUSION) += fusion/
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 52a0c2f6264f..ae498b53ee40 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -554,7 +554,8 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
return ret;
}
- ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+ ret = mfd_add_devices(da9052->dev, PLATFORM_DEVID_AUTO,
+ da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
if (ret) {
dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index dbdd0faeb6ce..210d1f85679e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
#ifdef CONFIG_PM
static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct rtsx_ucr *ucr =
- (struct rtsx_ucr *)usb_get_intfdata(intf);
-
dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
__func__, message.event);
- /*
- * Call to make sure LED is off during suspend to save more power.
- * It is NOT a permanent state and could be turned on anytime later.
- * Thus no need to call turn_on when resunming.
- */
- mutex_lock(&ucr->dev_mutex);
- rtsx_usb_turn_off_led(ucr);
- mutex_unlock(&ucr->dev_mutex);
-
return 0;
}
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 0d256cb002eb..d6b764349f9d 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg,
}
EXPORT_SYMBOL_GPL(tps65218_clear_bits);
+static const struct regmap_range tps65218_yes_ranges[] = {
+ regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2),
+ regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS),
+};
+
+static const struct regmap_access_table tps65218_volatile_table = {
+ .yes_ranges = tps65218_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
+};
+
static struct regmap_config tps65218_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65218_volatile_table,
};
static const struct regmap_irq tps65218_irqs[] = {
@@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = {
.num_regs = 2,
.mask_base = TPS65218_REG_INT_MASK1,
+ .status_base = TPS65218_REG_INT1,
};
static const struct of_device_id of_tps65218_match_table[] = {
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 9da04ede04f3..f4c82eafa8e5 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -15,18 +15,21 @@
static int write8(void *client, u8 val)
{
u8 data = val;
+
return spi_write(client, &data, 1);
}
static int write16(void *client, u8 reg, u8 val)
{
u8 data[2] = {reg, val};
+
return spi_write(client, data, 2);
}
static int write24(void *client, u8 reg, u16 val)
{
u8 data[3] = {reg, val >> 8, val};
+
return spi_write(client, data, 3);
}
@@ -34,6 +37,7 @@ static int read8(void *client)
{
int ret;
u8 data;
+
ret = spi_read(client, &data, 1);
if (ret < 0)
return ret;
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index a43053daad0e..15e88078ba1e 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -176,6 +176,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
{
int value;
unsigned ctrl = 0;
+
switch (dpot->uid) {
case DPOT_UID(AD5246_ID):
case DPOT_UID(AD5247_ID):
@@ -333,7 +334,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
case DPOT_UID(AD5246_ID):
case DPOT_UID(AD5247_ID):
return dpot_write_d8(dpot, value);
- break;
case DPOT_UID(AD5245_ID):
case DPOT_UID(AD5241_ID):
@@ -345,7 +345,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5282_RDAC_AB;
return dpot_write_r8d8(dpot, ctrl, value);
- break;
case DPOT_UID(AD5171_ID):
case DPOT_UID(AD5273_ID):
if (reg & DPOT_ADDR_OTP) {
@@ -355,7 +354,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
ctrl = DPOT_AD5273_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
- break;
case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
@@ -367,7 +365,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
ctrl |= DPOT_AD5170_2_3_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
- break;
case DPOT_UID(AD5170_ID):
if (reg & DPOT_ADDR_OTP) {
tmp = dpot_read_r8d16(dpot, tmp);
@@ -376,7 +373,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
ctrl = DPOT_AD5170_2_3_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
- break;
case DPOT_UID(AD5272_ID):
case DPOT_UID(AD5274_ID):
dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2,
@@ -391,7 +387,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) |
(value >> 8), value & 0xFF);
- break;
default:
if (reg & DPOT_ADDR_CMD)
return dpot_write_d8(dpot, reg);
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 165e98fef2c2..edb494d3ff27 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,3 +1,6 @@
-cxl-y += main.o file.o irq.o fault.o native.o context.o sysfs.o debugfs.o pci.o
+cxl-y += main.o file.o irq.o fault.o native.o context.o sysfs.o debugfs.o pci.o trace.o
obj-$(CONFIG_CXL) += cxl.o
obj-$(CONFIG_CXL_BASE) += base.o
+
+# For tracepoints to include our trace.h from tracepoint infrastructure:
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b524371..d1b55fe62817 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct cxl_context *ctx = vma->vm_file->private_data;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ u64 area, offset;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+
+ pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+ __func__, ctx->pe, address, offset);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ area = ctx->afu->psn_phys;
+ if (offset > ctx->afu->adapter->ps_size)
+ return VM_FAULT_SIGBUS;
+ } else {
+ area = ctx->psn_phys;
+ if (offset > ctx->psn_size)
+ return VM_FAULT_SIGBUS;
+ }
+
+ mutex_lock(&ctx->status_mutex);
+
+ if (ctx->status != STARTED) {
+ mutex_unlock(&ctx->status_mutex);
+ pr_devel("%s: Context not started, failing problem state access\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+ mutex_unlock(&ctx->status_mutex);
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+ .fault = cxl_mmap_fault,
+};
+
/*
* Map a per-context mmio space into the given vma.
*/
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size);
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
- }
+ if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+ /* make sure there is a valid per process space for this AFU */
+ if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+ pr_devel("AFU doesn't support mmio space\n");
+ return -EINVAL;
+ }
- /* make sure there is a valid per process space for this AFU */
- if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
+ /* Can't mmap until the AFU is enabled */
+ if (!ctx->afu->enabled)
+ return -EBUSY;
}
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
-
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->psn_phys, len);
+ vma->vm_ops = &cxl_mmap_vmops;
+ return 0;
}
/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq);
-
- /* Release Problem State Area mapping */
- mutex_lock(&ctx->mapping_lock);
- if (ctx->mapping)
- unmap_mapping_range(ctx->mapping, 0, 0, 1);
- mutex_unlock(&ctx->mapping_lock);
}
/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
* created and torn down after the IDR removed
*/
__detach_context(ctx);
+
+ /*
+ * We are force detaching - remove any active PSA mappings so
+ * userspace cannot interfere with the card if it comes back.
+ * Easiest way to exercise this is to unbind and rebind the
+ * driver via sysfs while it is in use.
+ */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->mapping)
+ unmap_mapping_range(ctx->mapping, 0, 0, 1);
+ mutex_unlock(&ctx->mapping_lock);
}
mutex_unlock(&afu->contexts_lock);
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 28078f8894a5..a1cee4767ec6 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -287,6 +287,13 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */
#define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */
+/****** CXL_PSL_RXCTL_An (Implementation Specific) **************************
+ * Controls AFU Hang Pulse, which sets the timeout for the AFU to respond to
+ * the PSL for any response (except MMIO). Timeouts will occur between 1x to 2x
+ * of the hang pulse frequency.
+ */
+#define CXL_PSL_RXCTL_AFUHP_4S 0x7000000000000000ULL
+
/* SPA->sw_command_status */
#define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL
#define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL
@@ -375,6 +382,10 @@ struct cxl_afu {
int slice;
int modes_supported;
int current_mode;
+ int crs_num;
+ u64 crs_len;
+ u64 crs_offset;
+ struct list_head crs;
enum prefault_modes prefault_mode;
bool psa;
bool pp_psa;
@@ -481,6 +492,8 @@ void cxl_release_one_irq(struct cxl *adapter, int hwirq);
int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
+int cxl_update_image_control(struct cxl *adapter);
+int cxl_reset(struct cxl *adapter);
/* common == phyp + powernv */
struct cxl_process_element_common {
@@ -542,6 +555,15 @@ static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg
#define cxl_p2n_read(afu, reg) \
in_be64(_cxl_p2n_addr(afu, reg))
+
+#define cxl_afu_cr_read64(afu, cr, off) \
+ in_le64((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
+#define cxl_afu_cr_read32(afu, cr, off) \
+ in_le32((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
+u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
+u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
+
+
struct cxl_calls {
void (*cxl_slbia)(struct mm_struct *mm);
struct module *owner;
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index f8684bca2d79..5286b8b704f5 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -20,6 +20,7 @@
#include <asm/mmu.h>
#include "cxl.h"
+#include "trace.h"
static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
{
@@ -75,6 +76,7 @@ static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
sste - ctx->sstp, slb->vsid, slb->esid);
+ trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
sste->vsid_data = cpu_to_be64(slb->vsid);
sste->esid_data = cpu_to_be64(slb->esid);
@@ -116,6 +118,7 @@ static int cxl_handle_segment_miss(struct cxl_context *ctx,
int rc;
pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
+ trace_cxl_ste_miss(ctx, ea);
if ((rc = cxl_fault_segment(ctx, mm, ea)))
cxl_ack_ae(ctx);
@@ -135,6 +138,8 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
int result;
unsigned long access, flags, inv_flags = 0;
+ trace_cxl_pte_miss(ctx, dsisr, dar);
+
if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
pr_devel("copro_handle_mm_fault failed: %#x\n", result);
return cxl_ack_ae(ctx);
@@ -180,6 +185,12 @@ void cxl_handle_fault(struct work_struct *fault_work)
return;
}
+ /* Early return if the context is being / has been detached */
+ if (ctx->status == CLOSED) {
+ cxl_ack_ae(ctx);
+ return;
+ }
+
pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
"DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e9f2f10dbb37..2364bcadb9a9 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -23,6 +23,7 @@
#include <asm/copro.h>
#include "cxl.h"
+#include "trace.h"
#define CXL_NUM_MINORS 256 /* Total to reserve */
#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
@@ -140,18 +141,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
- mutex_lock(&ctx->status_mutex);
- if (ctx->status != OPENED) {
- rc = -EIO;
- goto out;
- }
-
+ /* Do this outside the status_mutex to avoid a circular dependency with
+ * the locking in cxl_mmap_fault() */
if (copy_from_user(&work, uwork,
sizeof(struct cxl_ioctl_start_work))) {
rc = -EFAULT;
goto out;
}
+ mutex_lock(&ctx->status_mutex);
+ if (ctx->status != OPENED) {
+ rc = -EIO;
+ goto out;
+ }
+
/*
* if any of the reserved fields are set or any of the unused
* flags are set it's invalid
@@ -184,9 +187,13 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
*/
ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
+ trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
+
if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
- amr)))
+ amr))) {
+ afu_release_irqs(ctx);
goto out;
+ }
ctx->status = STARTED;
rc = 0;
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index c294925f73ee..c8929c526691 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -17,6 +17,7 @@
#include <misc/cxl.h>
#include "cxl.h"
+#include "trace.h"
/* XXX: This is implementation specific */
static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
@@ -100,6 +101,8 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
dsisr = irq_info->dsisr;
dar = irq_info->dar;
+ trace_cxl_psl_irq(ctx, irq, dsisr, dar);
+
pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
if (dsisr & CXL_PSL_DSISR_An_DS) {
@@ -167,6 +170,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
}
cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
+ return IRQ_HANDLED;
}
if (dsisr & CXL_PSL_DSISR_An_OC)
pr_devel("CXL interrupt: OS Context Warning\n");
@@ -237,6 +241,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
return IRQ_HANDLED;
}
+ trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
afu_irq, ctx->pe, irq, hwirq);
@@ -436,7 +441,7 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
*/
INIT_LIST_HEAD(&ctx->irq_names);
for (r = 1; r < CXL_IRQ_RANGES; r++) {
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
+ for (i = 0; i < ctx->irqs.range[r]; i++) {
irq_name = kmalloc(sizeof(struct cxl_irq_name),
GFP_KERNEL);
if (!irq_name)
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 4cde9b661642..8ccddceead66 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -23,6 +23,7 @@
#include <misc/cxl.h>
#include "cxl.h"
+#include "trace.h"
static DEFINE_SPINLOCK(adapter_idr_lock);
static DEFINE_IDR(cxl_adapter_idr);
@@ -48,6 +49,7 @@ static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
spin_lock_irqsave(&ctx->sste_lock, flags);
+ trace_cxl_slbia(ctx);
memset(ctx->sstp, 0, ctx->sst_size);
spin_unlock_irqrestore(&ctx->sste_lock, flags);
mb();
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index f2b37b41a0da..29185fc61276 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -18,24 +18,28 @@
#include <misc/cxl.h>
#include "cxl.h"
+#include "trace.h"
static int afu_control(struct cxl_afu *afu, u64 command,
u64 result, u64 mask, bool enabled)
{
u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+ int rc = 0;
spin_lock(&afu->afu_cntl_lock);
pr_devel("AFU command starting: %llx\n", command);
+ trace_cxl_afu_ctrl(afu, command);
+
cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
while ((AFU_Cntl & mask) != result) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
- spin_unlock(&afu->afu_cntl_lock);
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
AFU_Cntl | command);
@@ -44,9 +48,11 @@ static int afu_control(struct cxl_afu *afu, u64 command,
};
pr_devel("AFU command complete: %llx\n", command);
afu->enabled = enabled;
+out:
+ trace_cxl_afu_ctrl_done(afu, command, rc);
spin_unlock(&afu->afu_cntl_lock);
- return 0;
+ return rc;
}
static int afu_enable(struct cxl_afu *afu)
@@ -91,6 +97,9 @@ int cxl_psl_purge(struct cxl_afu *afu)
u64 dsisr, dar;
u64 start, end;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+ int rc = 0;
+
+ trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
pr_devel("PSL purge request\n");
@@ -107,7 +116,8 @@ int cxl_psl_purge(struct cxl_afu *afu)
== CXL_PSL_SCNTL_An_Ps_Pending) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
@@ -128,7 +138,9 @@ int cxl_psl_purge(struct cxl_afu *afu)
cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
- return 0;
+out:
+ trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
+ return rc;
}
static int spa_max_procs(int spa_size)
@@ -185,6 +197,7 @@ static int alloc_spa(struct cxl_afu *afu)
static void release_spa(struct cxl_afu *afu)
{
+ cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
free_pages((unsigned long) afu->spa, afu->spa_order);
}
@@ -278,6 +291,9 @@ static int do_process_element_cmd(struct cxl_context *ctx,
{
u64 state;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+ int rc = 0;
+
+ trace_cxl_llcmd(ctx, cmd);
WARN_ON(!ctx->afu->enabled);
@@ -289,12 +305,14 @@ static int do_process_element_cmd(struct cxl_context *ctx,
while (1) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
state = be64_to_cpup(ctx->afu->sw_command_status);
if (state == ~0ULL) {
pr_err("cxl: Error adding process element to AFU\n");
- return -1;
+ rc = -1;
+ goto out;
}
if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
(cmd | (cmd >> 16) | ctx->pe))
@@ -309,7 +327,9 @@ static int do_process_element_cmd(struct cxl_context *ctx,
schedule();
}
- return 0;
+out:
+ trace_cxl_llcmd_done(ctx, cmd, rc);
+ return rc;
}
static int add_process_element(struct cxl_context *ctx)
@@ -629,6 +649,8 @@ static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
int cxl_detach_process(struct cxl_context *ctx)
{
+ trace_cxl_detach(ctx);
+
if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
return detach_process_native_dedicated(ctx);
@@ -667,6 +689,7 @@ static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
{
+ trace_cxl_psl_irq_ack(ctx, tfc);
if (tfc)
cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
if (psl_reset_mask)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 0f2cc9f8b4db..1ef01647265f 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -21,6 +21,7 @@
#include <asm/msi_bitmap.h>
#include <asm/pci-bridge.h> /* for struct pci_controller */
#include <asm/pnv-pci.h>
+#include <asm/io.h>
#include "cxl.h"
@@ -113,6 +114,24 @@
#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
+u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val;
+
+ val = cxl_afu_cr_read32(afu, cr, aligned_off);
+ return (val >> ((off & 0x2) * 8)) & 0xffff;
+}
+
+u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val;
+
+ val = cxl_afu_cr_read32(afu, cr, aligned_off);
+ return (val >> ((off & 0x3) * 8)) & 0xff;
+}
+
static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
@@ -316,7 +335,7 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
u64 psl_dsnctl;
u64 chipid;
- if (!(np = pnv_pci_to_phb_node(dev)))
+ if (!(np = pnv_pci_get_phb_node(dev)))
return -ENODEV;
while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
@@ -348,7 +367,7 @@ static int init_implementation_afu_regs(struct cxl_afu *afu)
cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
/* for debugging with trace arrays */
cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
- cxl_p1n_write(afu, CXL_PSL_RXCTL_A, 0xF000000000000000ULL);
+ cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
return 0;
}
@@ -361,6 +380,41 @@ int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
}
+int cxl_update_image_control(struct cxl *adapter)
+{
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+ int rc;
+ int vsec;
+ u8 image_state;
+
+ if (!(vsec = find_cxl_vsec(dev))) {
+ dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
+ return -ENODEV;
+ }
+
+ if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
+ dev_err(&dev->dev, "failed to read image state: %i\n", rc);
+ return rc;
+ }
+
+ if (adapter->perst_loads_image)
+ image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
+ else
+ image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
+
+ if (adapter->perst_select_user)
+ image_state |= CXL_VSEC_PERST_SELECT_USER;
+ else
+ image_state &= ~CXL_VSEC_PERST_SELECT_USER;
+
+ if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
+ dev_err(&dev->dev, "failed to update image control: %i\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
int cxl_alloc_one_irq(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
@@ -520,6 +574,7 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
val = AFUD_READ_INFO(afu);
afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
+ afu->crs_num = AFUD_NUM_CRS(val);
if (AFUD_AFU_DIRECTED(val))
afu->modes_supported |= CXL_MODE_DIRECTED;
@@ -534,11 +589,17 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
+ val = AFUD_READ_CR(afu);
+ afu->crs_len = AFUD_CR_LEN(val) * 256;
+ afu->crs_offset = AFUD_READ_CR_OFF(afu);
+
return 0;
}
static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
{
+ int i;
+
if (afu->psa && afu->adapter->ps_size <
(afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
@@ -548,6 +609,13 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
+ for (i = 0; i < afu->crs_num; i++) {
+ if ((cxl_afu_cr_read32(afu, i, 0) == 0)) {
+ dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -706,6 +774,42 @@ static void cxl_remove_afu(struct cxl_afu *afu)
device_unregister(&afu->dev);
}
+int cxl_reset(struct cxl *adapter)
+{
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+ int rc;
+ int i;
+ u32 val;
+
+ dev_info(&dev->dev, "CXL reset\n");
+
+ for (i = 0; i < adapter->slices; i++)
+ cxl_remove_afu(adapter->afu[i]);
+
+ /* pcie_warm_reset requests a fundamental pci reset which includes a
+ * PERST assert/deassert. PERST triggers a loading of the image
+ * if "user" or "factory" is selected in sysfs */
+ if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
+ dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
+ return rc;
+ }
+
+ /* the PERST done above fences the PHB. So, reset depends on EEH
+ * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
+ * the driver. Do an mmio read explictly to ensure EEH notices the
+ * fenced PHB. Retry for a few seconds before giving up. */
+ i = 0;
+ while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
+ (i < 5)) {
+ msleep(500);
+ i++;
+ }
+
+ if (val != 0xffffffff)
+ dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
+
+ return rc;
+}
static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
{
@@ -770,8 +874,8 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
- adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
- adapter->perst_select_user = !!(image_state & CXL_VSEC_PERST_SELECT_USER);
+ adapter->perst_loads_image = true;
+ adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
@@ -879,6 +983,9 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
if ((rc = cxl_vsec_looks_ok(adapter, dev)))
goto err2;
+ if ((rc = cxl_update_image_control(adapter)))
+ goto err2;
+
if ((rc = cxl_map_adapter_regs(adapter, dev)))
goto err2;
@@ -888,9 +995,15 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
if ((rc = init_implementation_adapter_regs(adapter, dev)))
goto err3;
- if ((rc = pnv_phb_to_cxl(dev)))
+ if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
goto err3;
+ /* If recovery happened, the last step is to turn on snooping.
+ * In the non-recovery case this has no effect */
+ if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) {
+ goto err3;
+ }
+
if ((rc = cxl_register_psl_err_irq(adapter)))
goto err3;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 461bdbd5d483..d0c38c7bc0c4 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/pci_regs.h>
#include "cxl.h"
@@ -56,11 +57,68 @@ static ssize_t image_loaded_show(struct device *device,
return scnprintf(buf, PAGE_SIZE, "factory\n");
}
+static ssize_t reset_adapter_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+ int rc;
+ int val;
+
+ rc = sscanf(buf, "%i", &val);
+ if ((rc != 1) || (val != 1))
+ return -EINVAL;
+
+ if ((rc = cxl_reset(adapter)))
+ return rc;
+ return count;
+}
+
+static ssize_t load_image_on_perst_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ if (!adapter->perst_loads_image)
+ return scnprintf(buf, PAGE_SIZE, "none\n");
+
+ if (adapter->perst_select_user)
+ return scnprintf(buf, PAGE_SIZE, "user\n");
+ return scnprintf(buf, PAGE_SIZE, "factory\n");
+}
+
+static ssize_t load_image_on_perst_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+ int rc;
+
+ if (!strncmp(buf, "none", 4))
+ adapter->perst_loads_image = false;
+ else if (!strncmp(buf, "user", 4)) {
+ adapter->perst_select_user = true;
+ adapter->perst_loads_image = true;
+ } else if (!strncmp(buf, "factory", 7)) {
+ adapter->perst_select_user = false;
+ adapter->perst_loads_image = true;
+ } else
+ return -EINVAL;
+
+ if ((rc = cxl_update_image_control(adapter)))
+ return rc;
+
+ return count;
+}
+
static struct device_attribute adapter_attrs[] = {
__ATTR_RO(caia_version),
__ATTR_RO(psl_revision),
__ATTR_RO(base_image),
__ATTR_RO(image_loaded),
+ __ATTR_RW(load_image_on_perst),
+ __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
};
@@ -310,8 +368,6 @@ static struct device_attribute afu_attrs[] = {
__ATTR(reset, S_IWUSR, NULL, reset_store_afu),
};
-
-
int cxl_sysfs_adapter_add(struct cxl *adapter)
{
int i, rc;
@@ -334,31 +390,191 @@ void cxl_sysfs_adapter_remove(struct cxl *adapter)
device_remove_file(&adapter->dev, &adapter_attrs[i]);
}
+struct afu_config_record {
+ struct kobject kobj;
+ struct bin_attribute config_attr;
+ struct list_head list;
+ int cr;
+ u16 device;
+ u16 vendor;
+ u32 class;
+};
+
+#define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
+
+static ssize_t vendor_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct afu_config_record *cr = to_cr(kobj);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
+}
+
+static ssize_t device_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct afu_config_record *cr = to_cr(kobj);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
+}
+
+static ssize_t class_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct afu_config_record *cr = to_cr(kobj);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
+}
+
+static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct afu_config_record *cr = to_cr(kobj);
+ struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
+
+ u64 i, j, val, size = afu->crs_len;
+
+ if (off > size)
+ return 0;
+ if (off + count > size)
+ count = size - off;
+
+ for (i = 0; i < count;) {
+ val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);
+ for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
+ buf[i] = (val >> (j * 8)) & 0xff;
+ }
+
+ return count;
+}
+
+static struct kobj_attribute vendor_attribute =
+ __ATTR_RO(vendor);
+static struct kobj_attribute device_attribute =
+ __ATTR_RO(device);
+static struct kobj_attribute class_attribute =
+ __ATTR_RO(class);
+
+static struct attribute *afu_cr_attrs[] = {
+ &vendor_attribute.attr,
+ &device_attribute.attr,
+ &class_attribute.attr,
+ NULL,
+};
+
+static void release_afu_config_record(struct kobject *kobj)
+{
+ struct afu_config_record *cr = to_cr(kobj);
+
+ kfree(cr);
+}
+
+static struct kobj_type afu_config_record_type = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = release_afu_config_record,
+ .default_attrs = afu_cr_attrs,
+};
+
+static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
+{
+ struct afu_config_record *cr;
+ int rc;
+
+ cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
+ if (!cr)
+ return ERR_PTR(-ENOMEM);
+
+ cr->cr = cr_idx;
+ cr->device = cxl_afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID);
+ cr->vendor = cxl_afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID);
+ cr->class = cxl_afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION) >> 8;
+
+ /*
+ * Export raw AFU PCIe like config record. For now this is read only by
+ * root - we can expand that later to be readable by non-root and maybe
+ * even writable provided we have a good use-case. Once we suport
+ * exposing AFUs through a virtual PHB they will get that for free from
+ * Linux' PCI infrastructure, but until then it's not clear that we
+ * need it for anything since the main use case is just identifying
+ * AFUs, which can be done via the vendor, device and class attributes.
+ */
+ sysfs_bin_attr_init(&cr->config_attr);
+ cr->config_attr.attr.name = "config";
+ cr->config_attr.attr.mode = S_IRUSR;
+ cr->config_attr.size = afu->crs_len;
+ cr->config_attr.read = afu_read_config;
+
+ rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
+ &afu->dev.kobj, "cr%i", cr->cr);
+ if (rc)
+ goto err;
+
+ rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
+ if (rc)
+ goto err1;
+
+ rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
+ if (rc)
+ goto err2;
+
+ return cr;
+err2:
+ sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
+err1:
+ kobject_put(&cr->kobj);
+ return ERR_PTR(rc);
+err:
+ kfree(cr);
+ return ERR_PTR(rc);
+}
+
+void cxl_sysfs_afu_remove(struct cxl_afu *afu)
+{
+ struct afu_config_record *cr, *tmp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
+ device_remove_file(&afu->dev, &afu_attrs[i]);
+
+ list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
+ sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
+ kobject_put(&cr->kobj);
+ }
+}
+
int cxl_sysfs_afu_add(struct cxl_afu *afu)
{
+ struct afu_config_record *cr;
int i, rc;
+ INIT_LIST_HEAD(&afu->crs);
+
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
goto err;
}
+ for (i = 0; i < afu->crs_num; i++) {
+ cr = cxl_sysfs_afu_new_cr(afu, i);
+ if (IS_ERR(cr)) {
+ rc = PTR_ERR(cr);
+ goto err1;
+ }
+ list_add(&cr->list, &afu->crs);
+ }
+
return 0;
+err1:
+ cxl_sysfs_afu_remove(afu);
+ return rc;
err:
for (i--; i >= 0; i--)
device_remove_file(&afu->dev, &afu_attrs[i]);
return rc;
}
-void cxl_sysfs_afu_remove(struct cxl_afu *afu)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
- device_remove_file(&afu->dev, &afu_attrs[i]);
-}
-
int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
{
int i, rc;
diff --git a/drivers/misc/cxl/trace.c b/drivers/misc/cxl/trace.c
new file mode 100644
index 000000000000..c2b06d319e6e
--- /dev/null
+++ b/drivers/misc/cxl/trace.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+#endif
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
new file mode 100644
index 000000000000..ae434d87887e
--- /dev/null
+++ b/drivers/misc/cxl/trace.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cxl
+
+#if !defined(_CXL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _CXL_TRACE_H
+
+#include <linux/tracepoint.h>
+
+#include "cxl.h"
+
+#define DSISR_FLAGS \
+ { CXL_PSL_DSISR_An_DS, "DS" }, \
+ { CXL_PSL_DSISR_An_DM, "DM" }, \
+ { CXL_PSL_DSISR_An_ST, "ST" }, \
+ { CXL_PSL_DSISR_An_UR, "UR" }, \
+ { CXL_PSL_DSISR_An_PE, "PE" }, \
+ { CXL_PSL_DSISR_An_AE, "AE" }, \
+ { CXL_PSL_DSISR_An_OC, "OC" }, \
+ { CXL_PSL_DSISR_An_M, "M" }, \
+ { CXL_PSL_DSISR_An_P, "P" }, \
+ { CXL_PSL_DSISR_An_A, "A" }, \
+ { CXL_PSL_DSISR_An_S, "S" }, \
+ { CXL_PSL_DSISR_An_K, "K" }
+
+#define TFC_FLAGS \
+ { CXL_PSL_TFC_An_A, "A" }, \
+ { CXL_PSL_TFC_An_C, "C" }, \
+ { CXL_PSL_TFC_An_AE, "AE" }, \
+ { CXL_PSL_TFC_An_R, "R" }
+
+#define LLCMD_NAMES \
+ { CXL_SPA_SW_CMD_TERMINATE, "TERMINATE" }, \
+ { CXL_SPA_SW_CMD_REMOVE, "REMOVE" }, \
+ { CXL_SPA_SW_CMD_SUSPEND, "SUSPEND" }, \
+ { CXL_SPA_SW_CMD_RESUME, "RESUME" }, \
+ { CXL_SPA_SW_CMD_ADD, "ADD" }, \
+ { CXL_SPA_SW_CMD_UPDATE, "UPDATE" }
+
+#define AFU_COMMANDS \
+ { 0, "DISABLE" }, \
+ { CXL_AFU_Cntl_An_E, "ENABLE" }, \
+ { CXL_AFU_Cntl_An_RA, "RESET" }
+
+#define PSL_COMMANDS \
+ { CXL_PSL_SCNTL_An_Pc, "PURGE" }, \
+ { CXL_PSL_SCNTL_An_Sc, "SUSPEND" }
+
+
+DECLARE_EVENT_CLASS(cxl_pe_class,
+ TP_PROTO(struct cxl_context *ctx),
+
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ ),
+
+ TP_printk("afu%i.%i pe=%i",
+ __entry->card,
+ __entry->afu,
+ __entry->pe
+ )
+);
+
+
+TRACE_EVENT(cxl_attach,
+ TP_PROTO(struct cxl_context *ctx, u64 wed, s16 num_interrupts, u64 amr),
+
+ TP_ARGS(ctx, wed, num_interrupts, amr),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(pid_t, pid)
+ __field(u64, wed)
+ __field(u64, amr)
+ __field(s16, num_interrupts)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->pid = pid_nr(ctx->pid);
+ __entry->wed = wed;
+ __entry->amr = amr;
+ __entry->num_interrupts = num_interrupts;
+ ),
+
+ TP_printk("afu%i.%i pid=%i pe=%i wed=0x%.16llx irqs=%i amr=0x%llx",
+ __entry->card,
+ __entry->afu,
+ __entry->pid,
+ __entry->pe,
+ __entry->wed,
+ __entry->num_interrupts,
+ __entry->amr
+ )
+);
+
+DEFINE_EVENT(cxl_pe_class, cxl_detach,
+ TP_PROTO(struct cxl_context *ctx),
+ TP_ARGS(ctx)
+);
+
+TRACE_EVENT(cxl_afu_irq,
+ TP_PROTO(struct cxl_context *ctx, int afu_irq, int virq, irq_hw_number_t hwirq),
+
+ TP_ARGS(ctx, afu_irq, virq, hwirq),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(u16, afu_irq)
+ __field(int, virq)
+ __field(irq_hw_number_t, hwirq)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->afu_irq = afu_irq;
+ __entry->virq = virq;
+ __entry->hwirq = hwirq;
+ ),
+
+ TP_printk("afu%i.%i pe=%i afu_irq=%i virq=%i hwirq=0x%lx",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __entry->afu_irq,
+ __entry->virq,
+ __entry->hwirq
+ )
+);
+
+TRACE_EVENT(cxl_psl_irq,
+ TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
+
+ TP_ARGS(ctx, irq, dsisr, dar),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(int, irq)
+ __field(u64, dsisr)
+ __field(u64, dar)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->irq = irq;
+ __entry->dsisr = dsisr;
+ __entry->dar = dar;
+ ),
+
+ TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%.16llx",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __entry->irq,
+ __print_flags(__entry->dsisr, "|", DSISR_FLAGS),
+ __entry->dar
+ )
+);
+
+TRACE_EVENT(cxl_psl_irq_ack,
+ TP_PROTO(struct cxl_context *ctx, u64 tfc),
+
+ TP_ARGS(ctx, tfc),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(u64, tfc)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->tfc = tfc;
+ ),
+
+ TP_printk("afu%i.%i pe=%i tfc=%s",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __print_flags(__entry->tfc, "|", TFC_FLAGS)
+ )
+);
+
+TRACE_EVENT(cxl_ste_miss,
+ TP_PROTO(struct cxl_context *ctx, u64 dar),
+
+ TP_ARGS(ctx, dar),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(u64, dar)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->dar = dar;
+ ),
+
+ TP_printk("afu%i.%i pe=%i dar=0x%.16llx",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __entry->dar
+ )
+);
+
+TRACE_EVENT(cxl_ste_write,
+ TP_PROTO(struct cxl_context *ctx, unsigned int idx, u64 e, u64 v),
+
+ TP_ARGS(ctx, idx, e, v),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(unsigned int, idx)
+ __field(u64, e)
+ __field(u64, v)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->idx = idx;
+ __entry->e = e;
+ __entry->v = v;
+ ),
+
+ TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%.16llx V=0x%.16llx",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __entry->idx,
+ __entry->e,
+ __entry->v
+ )
+);
+
+TRACE_EVENT(cxl_pte_miss,
+ TP_PROTO(struct cxl_context *ctx, u64 dsisr, u64 dar),
+
+ TP_ARGS(ctx, dsisr, dar),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(u64, dsisr)
+ __field(u64, dar)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->dsisr = dsisr;
+ __entry->dar = dar;
+ ),
+
+ TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%.16llx",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __print_flags(__entry->dsisr, "|", DSISR_FLAGS),
+ __entry->dar
+ )
+);
+
+TRACE_EVENT(cxl_llcmd,
+ TP_PROTO(struct cxl_context *ctx, u64 cmd),
+
+ TP_ARGS(ctx, cmd),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(u64, cmd)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("afu%i.%i pe=%i cmd=%s",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __print_symbolic_u64(__entry->cmd, LLCMD_NAMES)
+ )
+);
+
+TRACE_EVENT(cxl_llcmd_done,
+ TP_PROTO(struct cxl_context *ctx, u64 cmd, int rc),
+
+ TP_ARGS(ctx, cmd, rc),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(u64, cmd)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->rc = rc;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("afu%i.%i pe=%i cmd=%s rc=%i",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __print_symbolic_u64(__entry->cmd, LLCMD_NAMES),
+ __entry->rc
+ )
+);
+
+DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl,
+ TP_PROTO(struct cxl_afu *afu, u64 cmd),
+
+ TP_ARGS(afu, cmd),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u64, cmd)
+ ),
+
+ TP_fast_assign(
+ __entry->card = afu->adapter->adapter_num;
+ __entry->afu = afu->slice;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("afu%i.%i cmd=%s",
+ __entry->card,
+ __entry->afu,
+ __print_symbolic_u64(__entry->cmd, AFU_COMMANDS)
+ )
+);
+
+DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl_done,
+ TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
+
+ TP_ARGS(afu, cmd, rc),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u64, cmd)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->card = afu->adapter->adapter_num;
+ __entry->afu = afu->slice;
+ __entry->rc = rc;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("afu%i.%i cmd=%s rc=%i",
+ __entry->card,
+ __entry->afu,
+ __print_symbolic_u64(__entry->cmd, AFU_COMMANDS),
+ __entry->rc
+ )
+);
+
+DEFINE_EVENT(cxl_afu_psl_ctrl, cxl_afu_ctrl,
+ TP_PROTO(struct cxl_afu *afu, u64 cmd),
+ TP_ARGS(afu, cmd)
+);
+
+DEFINE_EVENT(cxl_afu_psl_ctrl_done, cxl_afu_ctrl_done,
+ TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
+ TP_ARGS(afu, cmd, rc)
+);
+
+DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl, cxl_psl_ctrl,
+ TP_PROTO(struct cxl_afu *afu, u64 cmd),
+ TP_ARGS(afu, cmd),
+
+ TP_printk("psl%i.%i cmd=%s",
+ __entry->card,
+ __entry->afu,
+ __print_symbolic_u64(__entry->cmd, PSL_COMMANDS)
+ )
+);
+
+DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl_done, cxl_psl_ctrl_done,
+ TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
+ TP_ARGS(afu, cmd, rc),
+
+ TP_printk("psl%i.%i cmd=%s rc=%i",
+ __entry->card,
+ __entry->afu,
+ __print_symbolic_u64(__entry->cmd, PSL_COMMANDS),
+ __entry->rc
+ )
+);
+
+DEFINE_EVENT(cxl_pe_class, cxl_slbia,
+ TP_PROTO(struct cxl_context *ctx),
+ TP_ARGS(ctx)
+);
+
+#endif /* _CXL_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 180a5442fd4b..38552a31304a 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -145,8 +145,11 @@ enclosure_register(struct device *dev, const char *name, int components,
if (err)
goto err;
- for (i = 0; i < components; i++)
+ for (i = 0; i < components; i++) {
edev->component[i].number = -1;
+ edev->component[i].slot = -1;
+ edev->component[i].power_status = 1;
+ }
mutex_lock(&container_list_lock);
list_add_tail(&edev->node, &container_list);
@@ -273,27 +276,26 @@ enclosure_component_find_by_name(struct enclosure_device *edev,
static const struct attribute_group *enclosure_component_groups[];
/**
- * enclosure_component_register - add a particular component to an enclosure
+ * enclosure_component_alloc - prepare a new enclosure component
* @edev: the enclosure to add the component
* @num: the device number
* @type: the type of component being added
* @name: an optional name to appear in sysfs (leave NULL if none)
*
- * Registers the component. The name is optional for enclosures that
- * give their components a unique name. If not, leave the field NULL
- * and a name will be assigned.
+ * The name is optional for enclosures that give their components a unique
+ * name. If not, leave the field NULL and a name will be assigned.
*
* Returns a pointer to the enclosure component or an error.
*/
struct enclosure_component *
-enclosure_component_register(struct enclosure_device *edev,
- unsigned int number,
- enum enclosure_component_type type,
- const char *name)
+enclosure_component_alloc(struct enclosure_device *edev,
+ unsigned int number,
+ enum enclosure_component_type type,
+ const char *name)
{
struct enclosure_component *ecomp;
struct device *cdev;
- int err, i;
+ int i;
char newname[COMPONENT_NAME_SIZE];
if (number >= edev->components)
@@ -327,14 +329,30 @@ enclosure_component_register(struct enclosure_device *edev,
cdev->release = enclosure_component_release;
cdev->groups = enclosure_component_groups;
+ return ecomp;
+}
+EXPORT_SYMBOL_GPL(enclosure_component_alloc);
+
+/**
+ * enclosure_component_register - publishes an initialized enclosure component
+ * @ecomp: component to add
+ *
+ * Returns 0 on successful registration, releases the component otherwise
+ */
+int enclosure_component_register(struct enclosure_component *ecomp)
+{
+ struct device *cdev;
+ int err;
+
+ cdev = &ecomp->cdev;
err = device_register(cdev);
if (err) {
ecomp->number = -1;
put_device(cdev);
- return ERR_PTR(err);
+ return err;
}
- return ecomp;
+ return 0;
}
EXPORT_SYMBOL_GPL(enclosure_component_register);
@@ -417,8 +435,21 @@ static ssize_t components_show(struct device *cdev,
}
static DEVICE_ATTR_RO(components);
+static ssize_t id_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev);
+
+ if (edev->cb->show_id)
+ return edev->cb->show_id(edev, buf);
+ return -EINVAL;
+}
+static DEVICE_ATTR_RO(id);
+
static struct attribute *enclosure_class_attrs[] = {
&dev_attr_components.attr,
+ &dev_attr_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(enclosure_class);
@@ -553,6 +584,40 @@ static ssize_t set_component_locate(struct device *cdev,
return count;
}
+static ssize_t get_component_power_status(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_power_status)
+ edev->cb->get_power_status(edev, ecomp);
+ return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
+}
+
+static ssize_t set_component_power_status(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int val;
+
+ if (strncmp(buf, "on", 2) == 0 &&
+ (buf[2] == '\n' || buf[2] == '\0'))
+ val = 1;
+ else if (strncmp(buf, "off", 3) == 0 &&
+ (buf[3] == '\n' || buf[3] == '\0'))
+ val = 0;
+ else
+ return -EINVAL;
+
+ if (edev->cb->set_power_status)
+ edev->cb->set_power_status(edev, ecomp, val);
+ return count;
+}
+
static ssize_t get_component_type(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -561,6 +626,20 @@ static ssize_t get_component_type(struct device *cdev,
return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
}
+static ssize_t get_component_slot(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int slot;
+
+ /* if the enclosure does not override then use 'number' as a stand-in */
+ if (ecomp->slot >= 0)
+ slot = ecomp->slot;
+ else
+ slot = ecomp->number;
+
+ return snprintf(buf, 40, "%d\n", slot);
+}
static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
set_component_fault);
@@ -570,14 +649,19 @@ static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
set_component_active);
static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
set_component_locate);
+static DEVICE_ATTR(power_status, S_IRUGO | S_IWUSR, get_component_power_status,
+ set_component_power_status);
static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
+static DEVICE_ATTR(slot, S_IRUGO, get_component_slot, NULL);
static struct attribute *enclosure_component_attrs[] = {
&dev_attr_fault.attr,
&dev_attr_status.attr,
&dev_attr_active.attr,
&dev_attr_locate.attr,
+ &dev_attr_power_status.attr,
&dev_attr_type.attr,
+ &dev_attr_slot.attr,
NULL
};
ATTRIBUTE_GROUPS(enclosure_component);
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index c64d7cad1085..e7353449874b 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -34,7 +34,6 @@
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <linux/version.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
index 2c33fbca9225..6ab31eff0536 100644
--- a/drivers/misc/genwqe/card_sysfs.c
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -24,7 +24,6 @@
* debugging, please also see the debugfs interfaces of this driver.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 3336ddca45ac..8758d033db23 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -144,9 +144,9 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
{
union ioc4_int_out int_out;
union ioc4_gpcr gpcr;
- unsigned int state, last_state = 1;
+ unsigned int state, last_state;
uint64_t start, end, period;
- unsigned int count = 0;
+ unsigned int count;
/* Enable output */
gpcr.raw = 0;
@@ -167,19 +167,20 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
mmiowb();
/* Check square wave period averaged over some number of cycles */
- do {
- int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
- state = int_out.fields.int_out;
- if (!last_state && state) {
- count++;
- if (count == IOC4_CALIBRATE_END) {
- end = ktime_get_ns();
- break;
- } else if (count == IOC4_CALIBRATE_DISCARD)
- start = ktime_get_ns();
- }
- last_state = state;
- } while (1);
+ start = ktime_get_ns();
+ state = 1; /* make sure the first read isn't a rising edge */
+ for (count = 0; count <= IOC4_CALIBRATE_END; count++) {
+ do { /* wait for a rising edge */
+ last_state = state;
+ int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
+ state = int_out.fields.int_out;
+ } while (last_state || !state);
+
+ /* discard the first few cycles */
+ if (count == IOC4_CALIBRATE_DISCARD)
+ start = ktime_get_ns();
+ }
+ end = ktime_get_ns();
/* Calculation rearranged to preserve intermediate precision.
* Logically:
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 79f53941779d..c4cb9a984a5f 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -97,23 +97,25 @@ int mei_amthif_host_init(struct mei_device *dev)
/* allocate storage for ME message buffer */
msg_buf = kcalloc(dev->iamthif_mtu,
sizeof(unsigned char), GFP_KERNEL);
- if (!msg_buf)
- return -ENOMEM;
+ if (!msg_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
dev->iamthif_msg_buf = msg_buf;
ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
-
if (ret < 0) {
- dev_err(dev->dev,
- "amthif: failed link client %d\n", ret);
- return ret;
+ dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
+ goto out;
}
ret = mei_cl_connect(cl, NULL);
dev->iamthif_state = MEI_IAMTHIF_IDLE;
+out:
+ mei_me_cl_put(me_cl);
return ret;
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index b3a72bca5242..be767f4db26a 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -224,46 +224,53 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver)
}
EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
-static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking)
{
struct mei_device *dev;
- struct mei_me_client *me_cl;
- struct mei_cl_cb *cb;
- int rets;
+ struct mei_me_client *me_cl = NULL;
+ struct mei_cl_cb *cb = NULL;
+ ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
- if (cl->state != MEI_FILE_CONNECTED)
- return -ENODEV;
+ mutex_lock(&dev->device_lock);
+ if (cl->state != MEI_FILE_CONNECTED) {
+ rets = -ENODEV;
+ goto out;
+ }
/* Check if we have an ME client device */
me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl)
- return -ENOTTY;
+ if (!me_cl) {
+ rets = -ENOTTY;
+ goto out;
+ }
- if (length > me_cl->props.max_msg_length)
- return -EFBIG;
+ if (length > me_cl->props.max_msg_length) {
+ rets = -EFBIG;
+ goto out;
+ }
cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
rets = mei_io_cb_alloc_req_buf(cb, length);
- if (rets < 0) {
- mei_io_cb_free(cb);
- return rets;
- }
+ if (rets < 0)
+ goto out;
memcpy(cb->request_buffer.data, buf, length);
- mutex_lock(&dev->device_lock);
-
rets = mei_cl_write(cl, cb, blocking);
+out:
+ mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
if (rets < 0)
mei_io_cb_free(cb);
@@ -271,12 +278,12 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
return rets;
}
-int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
size_t r_length;
- int err;
+ ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -286,11 +293,9 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
mutex_lock(&dev->device_lock);
if (!cl->read_cb) {
- err = mei_cl_read_start(cl, length);
- if (err < 0) {
- mutex_unlock(&dev->device_lock);
- return err;
- }
+ rets = mei_cl_read_start(cl, length);
+ if (rets < 0)
+ goto out;
}
if (cl->reading_state != MEI_READ_COMPLETE &&
@@ -313,13 +318,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
cb = cl->read_cb;
if (cl->reading_state != MEI_READ_COMPLETE) {
- r_length = 0;
+ rets = 0;
goto out;
}
r_length = min_t(size_t, length, cb->buf_idx);
-
memcpy(buf, cb->response_buffer.data, r_length);
+ rets = r_length;
mei_io_cb_free(cb);
cl->reading_state = MEI_IDLE;
@@ -328,20 +333,20 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
out:
mutex_unlock(&dev->device_lock);
- return r_length;
+ return rets;
}
-inline int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length)
+inline ssize_t __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length)
{
return ___mei_cl_send(cl, buf, length, 0);
}
-inline int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length)
+inline ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length)
{
return ___mei_cl_send(cl, buf, length, 1);
}
-int mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
+ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
{
struct mei_cl *cl = device->cl;
@@ -355,7 +360,7 @@ int mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
}
EXPORT_SYMBOL_GPL(mei_cl_send);
-int mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
+ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
{
struct mei_cl *cl = device->cl;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1382d551d7ed..dfbddfe1c7a0 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -27,7 +27,63 @@
#include "client.h"
/**
+ * mei_me_cl_init - initialize me client
+ *
+ * @me_cl: me client
+ */
+void mei_me_cl_init(struct mei_me_client *me_cl)
+{
+ INIT_LIST_HEAD(&me_cl->list);
+ kref_init(&me_cl->refcnt);
+}
+
+/**
+ * mei_me_cl_get - increases me client refcount
+ *
+ * @me_cl: me client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: me client or NULL
+ */
+struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
+{
+ if (me_cl)
+ kref_get(&me_cl->refcnt);
+
+ return me_cl;
+}
+
+/**
+ * mei_me_cl_release - unlink and free me client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * @ref: me_client refcount
+ */
+static void mei_me_cl_release(struct kref *ref)
+{
+ struct mei_me_client *me_cl =
+ container_of(ref, struct mei_me_client, refcnt);
+ list_del(&me_cl->list);
+ kfree(me_cl);
+}
+/**
+ * mei_me_cl_put - decrease me client refcount and free client if necessary
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * @me_cl: me client
+ */
+void mei_me_cl_put(struct mei_me_client *me_cl)
+{
+ if (me_cl)
+ kref_put(&me_cl->refcnt, mei_me_cl_release);
+}
+
+/**
* mei_me_cl_by_uuid - locate me client by uuid
+ * increases ref count
*
* @dev: mei device
* @uuid: me client uuid
@@ -43,13 +99,14 @@ struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
list_for_each_entry(me_cl, &dev->me_clients, list)
if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
- return me_cl;
+ return mei_me_cl_get(me_cl);
return NULL;
}
/**
* mei_me_cl_by_id - locate me client by client id
+ * increases ref count
*
* @dev: the device structure
* @client_id: me client id
@@ -65,12 +122,14 @@ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
list_for_each_entry(me_cl, &dev->me_clients, list)
if (me_cl->client_id == client_id)
- return me_cl;
+ return mei_me_cl_get(me_cl);
+
return NULL;
}
/**
* mei_me_cl_by_uuid_id - locate me client by client id and uuid
+ * increases ref count
*
* @dev: the device structure
* @uuid: me client uuid
@@ -88,31 +147,67 @@ struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
list_for_each_entry(me_cl, &dev->me_clients, list)
if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
me_cl->client_id == client_id)
- return me_cl;
+ return mei_me_cl_get(me_cl);
+
return NULL;
}
/**
- * mei_me_cl_remove - remove me client matching uuid and client_id
+ * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
*
* @dev: the device structure
* @uuid: me client uuid
- * @client_id: me client address
+ *
+ * Locking: called under "dev->device_lock" lock
*/
-void mei_me_cl_remove(struct mei_device *dev, const uuid_le *uuid, u8 client_id)
+void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
{
struct mei_me_client *me_cl, *next;
+ dev_dbg(dev->dev, "remove %pUl\n", uuid);
+ list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
+ if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
+ mei_me_cl_put(me_cl);
+}
+
+/**
+ * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
+ *
+ * @dev: the device structure
+ * @uuid: me client uuid
+ * @id: me client id
+ *
+ * Locking: called under "dev->device_lock" lock
+ */
+void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
+{
+ struct mei_me_client *me_cl, *next;
+ const uuid_le *pn;
+
+ dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) {
- if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
- me_cl->client_id == client_id) {
- list_del(&me_cl->list);
- kfree(me_cl);
- break;
- }
+ pn = &me_cl->props.protocol_name;
+ if (me_cl->client_id == id && uuid_le_cmp(*uuid, *pn) == 0)
+ mei_me_cl_put(me_cl);
}
}
+/**
+ * mei_me_cl_rm_all - remove all me clients
+ *
+ * @dev: the device structure
+ *
+ * Locking: called under "dev->device_lock" lock
+ */
+void mei_me_cl_rm_all(struct mei_device *dev)
+{
+ struct mei_me_client *me_cl, *next;
+
+ list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
+ mei_me_cl_put(me_cl);
+}
+
+
/**
* mei_cl_cmp_id - tells if the clients are the same
@@ -695,6 +790,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_me_client *me_cl;
+ int rets = 0;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
@@ -704,18 +800,19 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
if (cl->mei_flow_ctrl_creds > 0)
return 1;
- me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
+ me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
if (!me_cl) {
cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
return -ENOENT;
}
- if (me_cl->mei_flow_ctrl_creds) {
+ if (me_cl->mei_flow_ctrl_creds > 0) {
+ rets = 1;
if (WARN_ON(me_cl->props.single_recv_buf == 0))
- return -EINVAL;
- return 1;
+ rets = -EINVAL;
}
- return 0;
+ mei_me_cl_put(me_cl);
+ return rets;
}
/**
@@ -732,28 +829,36 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_me_client *me_cl;
+ int rets;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
- me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
+ me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
if (!me_cl) {
cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
return -ENOENT;
}
if (me_cl->props.single_recv_buf) {
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
+ if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) {
+ rets = -EINVAL;
+ goto out;
+ }
me_cl->mei_flow_ctrl_creds--;
} else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) {
+ rets = -EINVAL;
+ goto out;
+ }
cl->mei_flow_ctrl_creds--;
}
- return 0;
+ rets = 0;
+out:
+ mei_me_cl_put(me_cl);
+ return rets;
}
/**
@@ -788,6 +893,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
return -ENOTTY;
}
+ /* always allocate at least client max message */
+ length = max_t(size_t, length, me_cl->props.max_msg_length);
+ mei_me_cl_put(me_cl);
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
@@ -802,8 +910,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
goto out;
}
- /* always allocate at least client max message */
- length = max_t(size_t, length, me_cl->props.max_msg_length);
rets = mei_io_cb_alloc_resp_buf(cb, length);
if (rets)
goto out;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index d9d0c1525259..cfcde8e97fc4 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -24,15 +24,22 @@
#include "mei_dev.h"
+/*
+ * reference counting base function
+ */
+void mei_me_cl_init(struct mei_me_client *me_cl);
+void mei_me_cl_put(struct mei_me_client *me_cl);
+struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl);
+
struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
- const uuid_le *cuuid);
+ const uuid_le *uuid);
struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
-
struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 client_id);
-
-void mei_me_cl_remove(struct mei_device *dev,
- const uuid_le *uuid, u8 client_id);
+void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid);
+void mei_me_cl_rm_by_uuid_id(struct mei_device *dev,
+ const uuid_le *uuid, u8 id);
+void mei_me_cl_rm_all(struct mei_device *dev);
/*
* MEI IO Functions
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index b60b4263cf0f..b125380ee871 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -21,20 +21,22 @@
#include <linux/mei.h>
#include "mei_dev.h"
+#include "client.h"
#include "hw.h"
static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct mei_device *dev = fp->private_data;
- struct mei_me_client *me_cl;
+ struct mei_me_client *me_cl, *n;
size_t bufsz = 1;
char *buf;
int i = 0;
int pos = 0;
int ret;
-#define HDR " |id|fix| UUID |con|msg len|sb|\n"
+#define HDR \
+" |id|fix| UUID |con|msg len|sb|refc|\n"
mutex_lock(&dev->device_lock);
@@ -54,16 +56,22 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
if (dev->dev_state != MEI_DEV_ENABLED)
goto out;
- list_for_each_entry(me_cl, &dev->me_clients, list) {
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|\n",
- i++, me_cl->client_id,
- me_cl->props.fixed_address,
- &me_cl->props.protocol_name,
- me_cl->props.max_number_of_connections,
- me_cl->props.max_msg_length,
- me_cl->props.single_recv_buf);
+ list_for_each_entry_safe(me_cl, n, &dev->me_clients, list) {
+
+ me_cl = mei_me_cl_get(me_cl);
+ if (me_cl) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n",
+ i++, me_cl->client_id,
+ me_cl->props.fixed_address,
+ &me_cl->props.protocol_name,
+ me_cl->props.max_number_of_connections,
+ me_cl->props.max_msg_length,
+ me_cl->props.single_recv_buf,
+ atomic_read(&me_cl->refcnt.refcount));
+ }
+
+ mei_me_cl_put(me_cl);
}
out:
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 239d7f5d6a92..c8412d41e4f1 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -105,21 +105,6 @@ void mei_hbm_idle(struct mei_device *dev)
}
/**
- * mei_me_cl_remove_all - remove all me clients
- *
- * @dev: the device structure
- */
-static void mei_me_cl_remove_all(struct mei_device *dev)
-{
- struct mei_me_client *me_cl, *next;
-
- list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) {
- list_del(&me_cl->list);
- kfree(me_cl);
- }
-}
-
-/**
* mei_hbm_reset - reset hbm counters and book keeping data structurs
*
* @dev: the device structure
@@ -128,7 +113,7 @@ void mei_hbm_reset(struct mei_device *dev)
{
dev->me_client_index = 0;
- mei_me_cl_remove_all(dev);
+ mei_me_cl_rm_all(dev);
mei_hbm_idle(dev);
}
@@ -339,11 +324,16 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
struct hbm_props_response *res)
{
struct mei_me_client *me_cl;
+ const uuid_le *uuid = &res->client_properties.protocol_name;
+
+ mei_me_cl_rm_by_uuid(dev, uuid);
me_cl = kzalloc(sizeof(struct mei_me_client), GFP_KERNEL);
if (!me_cl)
return -ENOMEM;
+ mei_me_cl_init(me_cl);
+
me_cl->props = res->client_properties;
me_cl->client_id = res->me_addr;
me_cl->mei_flow_ctrl_creds = 0;
@@ -484,6 +474,7 @@ static int mei_hbm_add_single_flow_creds(struct mei_device *dev,
struct hbm_flow_control *flow)
{
struct mei_me_client *me_cl;
+ int rets;
me_cl = mei_me_cl_by_id(dev, flow->me_addr);
if (!me_cl) {
@@ -492,14 +483,19 @@ static int mei_hbm_add_single_flow_creds(struct mei_device *dev,
return -ENOENT;
}
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
- return -EINVAL;
+ if (WARN_ON(me_cl->props.single_recv_buf == 0)) {
+ rets = -EINVAL;
+ goto out;
+ }
me_cl->mei_flow_ctrl_creds++;
dev_dbg(dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n",
flow->me_addr, me_cl->mei_flow_ctrl_creds);
- return 0;
+ rets = 0;
+out:
+ mei_me_cl_put(me_cl);
+ return rets;
}
/**
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index ff2755062b44..f8fd503dfbd6 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
+ /* H_RST may be found lit before reset is started,
+ * for example if preceding reset flow hasn't completed.
+ * In that case asserting H_RST will be ignored, therefore
+ * we need to clean H_RST bit to start a successful reset sequence.
+ */
+ if ((hcsr & H_RST) == H_RST) {
+ dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+ hcsr &= ~H_RST;
+ mei_hcsr_set(hw, hcsr);
+ hcsr = mei_hcsr_read(hw);
+ }
+
hcsr |= H_RST | H_IG | H_IS;
if (intr_enable)
@@ -323,6 +335,7 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
return -ETIME;
}
+ mei_me_hw_reset_release(dev);
dev->recvd_hw_ready = false;
return 0;
}
@@ -719,9 +732,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
- mei_me_hw_reset_release(dev);
dev_dbg(dev->dev, "we need to start the dev.\n");
-
dev->recvd_hw_ready = true;
wake_up(&dev->wait_hw_ready);
} else {
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index ae56ba6ca0e3..3c019c0e60eb 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -303,7 +303,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
- struct mei_me_client *me_cl;
+ struct mei_me_client *me_cl = NULL;
struct mei_cl_cb *write_cb = NULL;
struct mei_device *dev;
unsigned long timeout = 0;
@@ -399,12 +399,14 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
"amthif write failed with status = %d\n", rets);
goto out;
}
+ mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
return length;
}
rets = mei_cl_write(cl, write_cb, false);
out:
+ mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
if (rets < 0)
mei_io_cb_free(write_cb);
@@ -433,24 +435,19 @@ static int mei_ioctl_connect_client(struct file *file,
cl = file->private_data;
dev = cl->dev;
- if (dev->dev_state != MEI_DEV_ENABLED) {
- rets = -ENODEV;
- goto end;
- }
+ if (dev->dev_state != MEI_DEV_ENABLED)
+ return -ENODEV;
if (cl->state != MEI_FILE_INITIALIZING &&
- cl->state != MEI_FILE_DISCONNECTED) {
- rets = -EBUSY;
- goto end;
- }
+ cl->state != MEI_FILE_DISCONNECTED)
+ return -EBUSY;
/* find ME client we're trying to connect to */
me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
if (!me_cl || me_cl->props.fixed_address) {
dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
&data->in_client_uuid);
- rets = -ENOTTY;
- goto end;
+ return -ENOTTY;
}
cl->me_client_id = me_cl->client_id;
@@ -487,17 +484,16 @@ static int mei_ioctl_connect_client(struct file *file,
goto end;
}
-
/* prepare the output buffer */
client = &data->out_client_properties;
client->max_msg_length = me_cl->props.max_msg_length;
client->protocol_version = me_cl->props.protocol_version;
dev_dbg(dev->dev, "Can connect?\n");
-
rets = mei_cl_connect(cl, file);
end:
+ mei_me_cl_put(me_cl);
return rets;
}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 3dad74a8d496..6c6ce9381535 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -172,12 +172,14 @@ struct mei_fw_status {
* struct mei_me_client - representation of me (fw) client
*
* @list: link in me client list
+ * @refcnt: struct reference count
* @props: client properties
* @client_id: me client id
* @mei_flow_ctrl_creds: flow control credits
*/
struct mei_me_client {
struct list_head list;
+ struct kref refcnt;
struct mei_client_properties props;
u8 client_id;
u8 mei_flow_ctrl_creds;
@@ -345,9 +347,9 @@ struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
struct mei_cl_ops *ops);
void mei_cl_remove_device(struct mei_cl_device *device);
-int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length);
-int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length);
-int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
+ssize_t __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length);
+ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length);
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
void mei_cl_bus_rx_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *dev);
int mei_cl_bus_init(void);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 60ca9240368e..bb61a119b8bb 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -521,6 +521,7 @@ int mei_nfc_host_init(struct mei_device *dev)
cl_info->me_client_id = me_cl->client_id;
cl_info->cl_uuid = me_cl->props.protocol_name;
+ mei_me_cl_put(me_cl);
ret = mei_cl_link(cl_info, MEI_HOST_CLIENT_ID_ANY);
if (ret)
@@ -539,6 +540,7 @@ int mei_nfc_host_init(struct mei_device *dev)
cl->me_client_id = me_cl->client_id;
cl->cl_uuid = me_cl->props.protocol_name;
+ mei_me_cl_put(me_cl);
ret = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
if (ret)
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index b1d892cea94d..475f1dea45bf 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -76,6 +76,7 @@ int mei_wd_host_init(struct mei_device *dev)
cl->me_client_id = me_cl->client_id;
cl->cl_uuid = me_cl->props.protocol_name;
+ mei_me_cl_put(me_cl);
ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 54be83d3efdd..c8c6a363069c 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -343,12 +343,26 @@ void st_int_recv(void *disc_data,
/* Unknow packet? */
default:
type = *ptr;
- if (st_gdata->list[type] == NULL) {
- pr_err("chip/interface misbehavior dropping"
- " frame starting with 0x%02x", type);
- goto done;
+ /* Default case means non-HCILL packets,
+ * possibilities are packets for:
+ * (a) valid protocol - Supported Protocols within
+ * the ST_MAX_CHANNELS.
+ * (b) registered protocol - Checked by
+ * "st_gdata->list[type] == NULL)" are supported
+ * protocols only.
+ * Rules out any invalid protocol and
+ * unregistered protocols with channel ID < 16.
+ */
+
+ if ((type >= ST_MAX_CHANNELS) ||
+ (st_gdata->list[type] == NULL)) {
+ pr_err("chip/interface misbehavior: "
+ "dropping frame starting "
+ "with 0x%02x\n", type);
+ goto done;
}
+
st_gdata->rx_skb = alloc_skb(
st_gdata->list[type]->max_frame_size,
GFP_ATOMIC);
@@ -893,5 +907,3 @@ void st_core_exit(struct st_data_s *st_gdata)
kfree(st_gdata);
}
}
-
-
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index e4b7ee4f57b8..18e7a03985d4 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -36,7 +36,8 @@
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
#include <linux/module.h>
-
+#include <linux/of.h>
+#include <linux/of_device.h>
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
@@ -44,6 +45,9 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
+struct ti_st_plat_data *dt_pdata;
+static struct ti_st_plat_data *get_platform_data(struct device *dev);
+
/**
* st_get_plat_device -
* function which returns the reference to the platform device
@@ -215,6 +219,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
{
unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
+ long timeout;
pr_debug("%s", __func__);
@@ -224,10 +229,11 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
return -EIO;
}
- if (!wait_for_completion_interruptible_timeout(
- &kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) {
- pr_err(" waiting for ver info- timed out ");
- return -ETIMEDOUT;
+ timeout = wait_for_completion_interruptible_timeout(
+ &kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME));
+ if (timeout <= 0) {
+ pr_err(" waiting for ver info- timed out or received signal");
+ return timeout ? -ERESTARTSYS : -ETIMEDOUT;
}
reinit_completion(&kim_gdata->kim_rcvd);
/* the positions 12 & 13 in the response buffer provide with the
@@ -391,13 +397,14 @@ static long download_firmware(struct kim_data_s *kim_gdata)
break;
case ACTION_WAIT_EVENT: /* wait */
pr_debug("W");
- if (!wait_for_completion_interruptible_timeout(
+ err = wait_for_completion_interruptible_timeout(
&kim_gdata->kim_rcvd,
- msecs_to_jiffies(CMD_RESP_TIME))) {
- pr_err("response timeout during fw download ");
+ msecs_to_jiffies(CMD_RESP_TIME));
+ if (err <= 0) {
+ pr_err("response timeout/signaled during fw download ");
/* timed out */
release_firmware(kim_gdata->fw_entry);
- return -ETIMEDOUT;
+ return err ? -ERESTARTSYS : -ETIMEDOUT;
}
reinit_completion(&kim_gdata->kim_rcvd);
break;
@@ -462,7 +469,12 @@ long st_kim_start(void *kim_data)
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
pr_info(" %s", __func__);
- pdata = kim_gdata->kim_pdev->dev.platform_data;
+ if (kim_gdata->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
+ }
do {
/* platform specific enabling code here */
@@ -522,12 +534,18 @@ long st_kim_stop(void *kim_data)
{
long err = 0;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
- struct ti_st_plat_data *pdata =
- kim_gdata->kim_pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
struct tty_struct *tty = kim_gdata->core_data->tty;
reinit_completion(&kim_gdata->ldisc_installed);
+ if (kim_gdata->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
+
+
if (tty) { /* can be called before ldisc is installed */
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
@@ -620,7 +638,7 @@ static ssize_t show_baud_rate(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
- return sprintf(buf, "%ld\n", kim_data->baud_rate);
+ return sprintf(buf, "%d\n", kim_data->baud_rate);
}
static ssize_t show_flow_cntrl(struct device *dev,
@@ -676,12 +694,16 @@ void st_kim_ref(struct st_data_s **core_data, int id)
struct kim_data_s *kim_gdata;
/* get kim_gdata reference from platform device */
pdev = st_get_plat_device(id);
- if (!pdev) {
- *core_data = NULL;
- return;
- }
+ if (!pdev)
+ goto err;
kim_gdata = platform_get_drvdata(pdev);
+ if (!kim_gdata)
+ goto err;
+
*core_data = kim_gdata->core_data;
+ return;
+err:
+ *core_data = NULL;
}
static int kim_version_open(struct inode *i, struct file *f)
@@ -715,13 +737,53 @@ static const struct file_operations list_debugfs_fops = {
* board-*.c file
*/
+static const struct of_device_id kim_of_match[] = {
+{
+ .compatible = "kim",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, kim_of_match);
+
+static struct ti_st_plat_data *get_platform_data(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ const u32 *dt_property;
+ int len;
+
+ dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
+
+ if (!dt_pdata)
+ pr_err("Can't allocate device_tree platform data\n");
+
+ dt_property = of_get_property(np, "dev_name", &len);
+ if (dt_property)
+ memcpy(&dt_pdata->dev_name, dt_property, len);
+ of_property_read_u32(np, "nshutdown_gpio",
+ &dt_pdata->nshutdown_gpio);
+ of_property_read_u32(np, "flow_cntrl", &dt_pdata->flow_cntrl);
+ of_property_read_u32(np, "baud_rate", &dt_pdata->baud_rate);
+
+ return dt_pdata;
+}
+
static struct dentry *kim_debugfs_dir;
static int kim_probe(struct platform_device *pdev)
{
struct kim_data_s *kim_gdata;
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
int err;
+ if (pdev->dev.of_node)
+ pdata = get_platform_data(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Platform Data is missing\n");
+ return -ENXIO;
+ }
+
if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
/* multiple devices could exist */
st_kim_devices[pdev->id] = pdev;
@@ -750,14 +812,14 @@ static int kim_probe(struct platform_device *pdev)
kim_gdata->nshutdown = pdata->nshutdown_gpio;
err = gpio_request(kim_gdata->nshutdown, "kim");
if (unlikely(err)) {
- pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
+ pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
return err;
}
/* Configure nShutdown GPIO as output=0 */
err = gpio_direction_output(kim_gdata->nshutdown, 0);
if (unlikely(err)) {
- pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
+ pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
return err;
}
/* get reference of pdev for request_firmware
@@ -781,8 +843,7 @@ static int kim_probe(struct platform_device *pdev)
kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
if (!kim_debugfs_dir) {
pr_err(" debugfs entries creation failed ");
- err = -EIO;
- goto err_debugfs_dir;
+ return 0;
}
debugfs_create_file("version", S_IRUGO, kim_debugfs_dir,
@@ -791,9 +852,6 @@ static int kim_probe(struct platform_device *pdev)
kim_gdata, &list_debugfs_fops);
return 0;
-err_debugfs_dir:
- sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
-
err_sysfs_group:
st_core_exit(kim_gdata->core_data);
@@ -806,9 +864,16 @@ err_core_init:
static int kim_remove(struct platform_device *pdev)
{
/* free the GPIOs requested */
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
struct kim_data_s *kim_gdata;
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
+
kim_gdata = platform_get_drvdata(pdev);
/* Free the Bluetooth/FM/GPIO
@@ -826,27 +891,44 @@ static int kim_remove(struct platform_device *pdev)
kfree(kim_gdata);
kim_gdata = NULL;
+ kfree(dt_pdata);
+ dt_pdata = NULL;
+
return 0;
}
static int kim_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
+
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
if (pdata->suspend)
return pdata->suspend(pdev, state);
- return -EOPNOTSUPP;
+ return 0;
}
static int kim_resume(struct platform_device *pdev)
{
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
+
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
if (pdata->resume)
return pdata->resume(pdev);
- return -EOPNOTSUPP;
+ return 0;
}
/**********************************************************************/
@@ -858,6 +940,8 @@ static struct platform_driver kim_platform_driver = {
.resume = kim_resume,
.driver = {
.name = "kim",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(kim_of_match),
},
};
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 93b4d67cc4a3..518e1b7f2f95 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -26,6 +26,7 @@
#include <linux/ti_wilink_st.h>
/**********************************************************************/
+
/* internal functions */
static void send_ll_cmd(struct st_data_s *st_data,
unsigned char cmd)
@@ -53,7 +54,13 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
/* communicate to platform about chip asleep */
kim_data = st_data->kim_data;
- pdata = kim_data->kim_pdev->dev.platform_data;
+ if (kim_data->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ }
+
if (pdata->chip_asleep)
pdata->chip_asleep(NULL);
}
@@ -86,7 +93,13 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
/* communicate to platform about chip wakeup */
kim_data = st_data->kim_data;
- pdata = kim_data->kim_pdev->dev.platform_data;
+ if (kim_data->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ }
+
if (pdata->chip_awake)
pdata->chip_awake(NULL);
}
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 3dee7ae123e7..032d35cf93ca 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.0.0-k");
+MODULE_VERSION("1.1.1.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 1723a6e4f2e8..66fc9921fc85 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -218,13 +218,12 @@ static int drv_cp_harray_to_user(void __user *user_buf_uva,
}
/*
- * Sets up a given context for notify to work. Calls drv_map_bool_ptr()
- * which maps the notify boolean in user VA in kernel space.
+ * Sets up a given context for notify to work. Maps the notify
+ * boolean in user VA into kernel space.
*/
static int vmci_host_setup_notify(struct vmci_ctx *context,
unsigned long uva)
{
- struct page *page;
int retval;
if (context->notify_page) {
@@ -243,14 +242,16 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
/*
* Lock physical page backing a given user VA.
*/
- retval = get_user_pages_fast(PAGE_ALIGN(uva), 1, 1, &page);
- if (retval != 1)
+ retval = get_user_pages_fast(uva, 1, 1, &context->notify_page);
+ if (retval != 1) {
+ context->notify_page = NULL;
return VMCI_ERROR_GENERIC;
+ }
/*
* Map the locked page and set up notify pointer.
*/
- context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
+ context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
vmci_ctx_check_signal_notify(context);
return VMCI_SUCCESS;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 7aaaf51e1596..35f19a683822 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
to_copy = size - bytes_copied;
if (is_iovec) {
- struct iovec *iov = (struct iovec *)src;
+ struct msghdr *msg = (struct msghdr *)src;
int err;
/* The iovec will track bytes_copied internally. */
- err = memcpy_fromiovec((u8 *)va + page_offset,
- iov, to_copy);
+ err = memcpy_from_msg((u8 *)va + page_offset,
+ msg, to_copy);
if (err != 0) {
if (kernel_if->host)
kunmap(kernel_if->u.h.page[page_index]);
@@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest,
*/
static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
u64 queue_offset,
- const void *src,
+ const void *msg,
size_t src_offset, size_t size)
{
@@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
* We ignore src_offset because src is really a struct iovec * and will
* maintain offset internally.
*/
- return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
+ return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
}
/*
@@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek);
* of bytes enqueued or < 0 on error.
*/
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
- void *iov,
+ struct msghdr *msg,
size_t iov_size,
int buf_type)
{
ssize_t result;
- if (!qpair || !iov)
+ if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
@@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q,
qpair->produce_q_size,
- iov, iov_size,
+ msg, iov_size,
qp_memcpy_to_queue_iov);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4409d79ed650..c69afb5e264e 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2147,7 +2147,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%d%s", md->name_idx, subname ? subname : "");
+ "mmcblk%u%s", md->name_idx, subname ? subname : "");
if (mmc_card_mmc(card))
blk_queue_logical_block_size(md->queue.queue,
@@ -2193,7 +2193,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
sector_t size;
- struct mmc_blk_data *md;
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
@@ -2209,9 +2208,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
size = card->csd.capacity << (card->csd.read_blkbits - 9);
}
- md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
+ return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
MMC_BLK_DATA_AREA_MAIN);
- return md;
}
static int mmc_blk_alloc_part(struct mmc_card *card,
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 0a7430f94d29..7dac4695163b 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2342,20 +2342,16 @@ static int mmc_test_hw_reset(struct mmc_test_card *test)
struct mmc_host *host = card->host;
int err;
- err = mmc_hw_reset_check(host);
+ if (!mmc_card_mmc(card) || !mmc_can_reset(card))
+ return RESULT_UNSUP_CARD;
+
+ err = mmc_hw_reset(host);
if (!err)
return RESULT_OK;
+ else if (err == -EOPNOTSUPP)
+ return RESULT_UNSUP_HOST;
- if (err == -ENOSYS)
- return RESULT_FAIL;
-
- if (err != -EOPNOTSUPP)
- return err;
-
- if (!mmc_can_reset(card))
- return RESULT_UNSUP_CARD;
-
- return RESULT_UNSUP_HOST;
+ return RESULT_FAIL;
}
static const struct mmc_test_case mmc_test_cases[] = {
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 38ed210ce2f3..2c25138f28b7 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,5 +8,5 @@ mmc_core-y := core.o bus.o host.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
quirks.o slot-gpio.o
-
+mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 86d271148528..c5ef10065a4a 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/card.h>
@@ -321,6 +322,8 @@ int mmc_add_card(struct mmc_card *card)
#endif
mmc_init_context_info(card->host);
+ card->dev.of_node = mmc_of_find_child_device(card->host, 0);
+
ret = device_add(&card->dev);
if (ret)
return ret;
@@ -349,6 +352,7 @@ void mmc_remove_card(struct mmc_card *card)
mmc_hostname(card->host), card->rca);
}
device_del(&card->dev);
+ of_node_put(card->dev.of_node);
}
put_device(&card->dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 9584bffa8b22..23f10f72e5f3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -40,6 +40,7 @@
#include "bus.h"
#include "host.h"
#include "sdio_bus.h"
+#include "pwrseq.h"
#include "mmc_ops.h"
#include "sd_ops.h"
@@ -185,13 +186,14 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
EXPORT_SYMBOL(mmc_request_done);
-static void
-mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
#ifdef CONFIG_MMC_DEBUG
unsigned int i, sz;
struct scatterlist *sg;
#endif
+ if (mmc_card_removed(host->card))
+ return -ENOMEDIUM;
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
@@ -251,6 +253,8 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
host->ops->request(host, mrq);
+
+ return 0;
}
/**
@@ -271,7 +275,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
BUG_ON(!card);
- if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
+ if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
return;
err = mmc_read_bkops_status(card);
@@ -345,29 +349,34 @@ static void mmc_wait_done(struct mmc_request *mrq)
*/
static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
{
+ int err;
+
mrq->done = mmc_wait_data_done;
mrq->host = host;
- if (mmc_card_removed(host->card)) {
- mrq->cmd->error = -ENOMEDIUM;
+
+ err = mmc_start_request(host, mrq);
+ if (err) {
+ mrq->cmd->error = err;
mmc_wait_data_done(mrq);
- return -ENOMEDIUM;
}
- mmc_start_request(host, mrq);
- return 0;
+ return err;
}
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
+ int err;
+
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
- if (mmc_card_removed(host->card)) {
- mrq->cmd->error = -ENOMEDIUM;
+
+ err = mmc_start_request(host, mrq);
+ if (err) {
+ mrq->cmd->error = err;
complete(&mrq->completion);
- return -ENOMEDIUM;
}
- mmc_start_request(host, mrq);
- return 0;
+
+ return err;
}
/*
@@ -1077,6 +1086,30 @@ void mmc_set_ungated(struct mmc_host *host)
}
#endif
+int mmc_execute_tuning(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 opcode;
+ int err;
+
+ if (!host->ops->execute_tuning)
+ return 0;
+
+ if (mmc_card_mmc(card))
+ opcode = MMC_SEND_TUNING_BLOCK_HS200;
+ else
+ opcode = MMC_SEND_TUNING_BLOCK;
+
+ mmc_host_clk_hold(host);
+ err = host->ops->execute_tuning(host, opcode);
+ mmc_host_clk_release(host);
+
+ if (err)
+ pr_err("%s: tuning execution failed\n", mmc_hostname(host));
+
+ return err;
+}
+
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
@@ -1232,6 +1265,34 @@ EXPORT_SYMBOL(mmc_of_parse_voltage);
#endif /* CONFIG_OF */
+static int mmc_of_get_func_num(struct device_node *node)
+{
+ u32 reg;
+ int ret;
+
+ ret = of_property_read_u32(node, "reg", &reg);
+ if (ret < 0)
+ return ret;
+
+ return reg;
+}
+
+struct device_node *mmc_of_find_child_device(struct mmc_host *host,
+ unsigned func_num)
+{
+ struct device_node *node;
+
+ if (!host->parent || !host->parent->of_node)
+ return NULL;
+
+ for_each_child_of_node(host->parent->of_node, node) {
+ if (mmc_of_get_func_num(node) == func_num)
+ return node;
+ }
+
+ return NULL;
+}
+
#ifdef CONFIG_REGULATOR
/**
@@ -1555,6 +1616,8 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
mmc_host_clk_hold(host);
+ mmc_pwrseq_pre_power_on(host);
+
host->ios.vdd = fls(ocr) - 1;
host->ios.power_mode = MMC_POWER_UP;
/* Set initial state and call mmc_set_ios */
@@ -1574,6 +1637,8 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
*/
mmc_delay(10);
+ mmc_pwrseq_post_power_on(host);
+
host->ios.clock = host->f_init;
host->ios.power_mode = MMC_POWER_ON;
@@ -1595,6 +1660,8 @@ void mmc_power_off(struct mmc_host *host)
mmc_host_clk_hold(host);
+ mmc_pwrseq_power_off(host);
+
host->ios.clock = 0;
host->ios.vdd = 0;
@@ -2245,67 +2312,28 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
mmc_host_clk_release(host);
}
-int mmc_can_reset(struct mmc_card *card)
-{
- u8 rst_n_function;
-
- if (!mmc_card_mmc(card))
- return 0;
- rst_n_function = card->ext_csd.rst_n_function;
- if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
- return 0;
- return 1;
-}
-EXPORT_SYMBOL(mmc_can_reset);
-
-static int mmc_do_hw_reset(struct mmc_host *host, int check)
+int mmc_hw_reset(struct mmc_host *host)
{
- struct mmc_card *card = host->card;
-
- if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
- return -EOPNOTSUPP;
+ int ret;
- if (!card)
+ if (!host->card)
return -EINVAL;
- if (!mmc_can_reset(card))
+ mmc_bus_get(host);
+ if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
+ mmc_bus_put(host);
return -EOPNOTSUPP;
-
- mmc_host_clk_hold(host);
- mmc_set_clock(host, host->f_init);
-
- host->ops->hw_reset(host);
-
- /* If the reset has happened, then a status command will fail */
- if (check) {
- u32 status;
-
- if (!mmc_send_status(card, &status)) {
- mmc_host_clk_release(host);
- return -ENOSYS;
- }
}
- /* Set initial state and call mmc_set_ios */
- mmc_set_initial_state(host);
+ ret = host->bus_ops->reset(host);
+ mmc_bus_put(host);
- mmc_host_clk_release(host);
+ pr_warn("%s: tried to reset card\n", mmc_hostname(host));
- return host->bus_ops->power_restore(host);
-}
-
-int mmc_hw_reset(struct mmc_host *host)
-{
- return mmc_do_hw_reset(host, 0);
+ return ret;
}
EXPORT_SYMBOL(mmc_hw_reset);
-int mmc_hw_reset_check(struct mmc_host *host)
-{
- return mmc_do_hw_reset(host, 1);
-}
-EXPORT_SYMBOL(mmc_hw_reset_check);
-
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index d76597c65e3a..cfba3c05aab1 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -27,11 +27,15 @@ struct mmc_bus_ops {
int (*power_restore)(struct mmc_host *);
int (*alive)(struct mmc_host *);
int (*shutdown)(struct mmc_host *);
+ int (*reset)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
void mmc_detach_bus(struct mmc_host *host);
+struct device_node *mmc_of_find_child_device(struct mmc_host *host,
+ unsigned func_num);
+
void mmc_init_erase(struct mmc_card *card);
void mmc_set_chip_select(struct mmc_host *host, int mode);
@@ -82,5 +86,8 @@ void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
void mmc_init_context_info(struct mmc_host *host);
+
+int mmc_execute_tuning(struct mmc_card *card);
+
#endif
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 270d58a4c43d..8be0df758e68 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -29,13 +29,20 @@
#include "core.h"
#include "host.h"
+#include "slot-gpio.h"
+#include "pwrseq.h"
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
+static DEFINE_IDR(mmc_host_idr);
+static DEFINE_SPINLOCK(mmc_host_lock);
+
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
- mutex_destroy(&host->slot.lock);
+ spin_lock(&mmc_host_lock);
+ idr_remove(&mmc_host_idr, host->index);
+ spin_unlock(&mmc_host_lock);
kfree(host);
}
@@ -54,9 +61,6 @@ void mmc_unregister_host_class(void)
class_unregister(&mmc_host_class);
}
-static DEFINE_IDR(mmc_host_idr);
-static DEFINE_SPINLOCK(mmc_host_lock);
-
#ifdef CONFIG_MMC_CLKGATE
static ssize_t clkgate_delay_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -367,16 +371,10 @@ int mmc_of_parse(struct mmc_host *host)
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
0, &cd_gpio_invert);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- return ret;
- if (ret != -ENOENT) {
- dev_err(host->parent,
- "Failed to request CD GPIO: %d\n",
- ret);
- }
- } else
+ if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
+ else if (ret != -ENOENT)
+ return ret;
/*
* There are two ways to flag that the CD line is inverted:
@@ -397,16 +395,10 @@ int mmc_of_parse(struct mmc_host *host)
ro_cap_invert = of_property_read_bool(np, "wp-inverted");
ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- goto out;
- if (ret != -ENOENT) {
- dev_err(host->parent,
- "Failed to request WP GPIO: %d\n",
- ret);
- }
- } else
+ if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
+ else if (ret != -ENOENT)
+ return ret;
/* See the comment on CD inversion above */
if (ro_cap_invert ^ ro_gpio_invert)
@@ -457,11 +449,7 @@ int mmc_of_parse(struct mmc_host *host)
host->dsr_req = 0;
}
- return 0;
-
-out:
- mmc_gpio_free_cd(host);
- return ret;
+ return mmc_pwrseq_alloc(host);
}
EXPORT_SYMBOL(mmc_of_parse);
@@ -491,8 +479,10 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->index = err;
spin_unlock(&mmc_host_lock);
idr_preload_end();
- if (err < 0)
- goto free;
+ if (err < 0) {
+ kfree(host);
+ return NULL;
+ }
dev_set_name(&host->class_dev, "mmc%d", host->index);
@@ -501,10 +491,12 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev);
- mmc_host_clk_init(host);
+ if (mmc_gpio_alloc(host)) {
+ put_device(&host->class_dev);
+ return NULL;
+ }
- mutex_init(&host->slot.lock);
- host->slot.cd_irq = -EINVAL;
+ mmc_host_clk_init(host);
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
@@ -525,10 +517,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_count = PAGE_CACHE_SIZE / 512;
return host;
-
-free:
- kfree(host);
- return NULL;
}
EXPORT_SYMBOL(mmc_alloc_host);
@@ -601,10 +589,7 @@ EXPORT_SYMBOL(mmc_remove_host);
*/
void mmc_free_host(struct mmc_host *host)
{
- spin_lock(&mmc_host_lock);
- idr_remove(&mmc_host_idr, host->index);
- spin_unlock(&mmc_host_lock);
-
+ mmc_pwrseq_free(host);
put_device(&host->class_dev);
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 7466ce098e60..1d41e8541f38 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -483,11 +483,13 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
/* check whether the eMMC card supports BKOPS */
if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
card->ext_csd.bkops = 1;
- card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
+ card->ext_csd.man_bkops_en =
+ (ext_csd[EXT_CSD_BKOPS_EN] &
+ EXT_CSD_MANUAL_BKOPS_MASK);
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
- if (!card->ext_csd.bkops_en)
- pr_info("%s: BKOPS_EN bit is not set\n",
+ if (!card->ext_csd.man_bkops_en)
+ pr_info("%s: MAN_BKOPS_EN bit is not set\n",
mmc_hostname(card->host));
}
@@ -1155,38 +1157,6 @@ bus_speed:
return err;
}
-const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE] = {
- 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
- 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
- 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
- 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
- 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
- 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
- 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
- 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
-};
-EXPORT_SYMBOL(tuning_blk_pattern_4bit);
-
-const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE] = {
- 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
- 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
- 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
- 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
- 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
- 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
- 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
- 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
- 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
- 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
- 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
- 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
- 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
- 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
- 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
- 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
-};
-EXPORT_SYMBOL(tuning_blk_pattern_8bit);
-
/*
* Execute tuning sequence to seek the proper bus operating
* conditions for HS200 and HS400, which sends CMD21 to the device.
@@ -1194,7 +1164,6 @@ EXPORT_SYMBOL(tuning_blk_pattern_8bit);
static int mmc_hs200_tuning(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- int err = 0;
/*
* Timing should be adjusted to the HS400 target
@@ -1205,18 +1174,7 @@ static int mmc_hs200_tuning(struct mmc_card *card)
if (host->ops->prepare_hs400_tuning)
host->ops->prepare_hs400_tuning(host, &host->ios);
- if (host->ops->execute_tuning) {
- mmc_host_clk_hold(host);
- err = host->ops->execute_tuning(host,
- MMC_SEND_TUNING_BLOCK_HS200);
- mmc_host_clk_release(host);
-
- if (err)
- pr_err("%s: tuning execution failed\n",
- mmc_hostname(host));
- }
-
- return err;
+ return mmc_execute_tuning(card);
}
/*
@@ -1297,6 +1255,12 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
* For native busses: set card RCA and quit open drain mode.
*/
if (!mmc_host_is_spi(host)) {
@@ -1821,6 +1785,46 @@ static int mmc_power_restore(struct mmc_host *host)
return ret;
}
+int mmc_can_reset(struct mmc_card *card)
+{
+ u8 rst_n_function;
+
+ rst_n_function = card->ext_csd.rst_n_function;
+ if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(mmc_can_reset);
+
+static int mmc_reset(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ u32 status;
+
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ return -EOPNOTSUPP;
+
+ if (!mmc_can_reset(card))
+ return -EOPNOTSUPP;
+
+ mmc_host_clk_hold(host);
+ mmc_set_clock(host, host->f_init);
+
+ host->ops->hw_reset(host);
+
+ /* If the reset has happened, then a status command will fail */
+ if (!mmc_send_status(card, &status)) {
+ mmc_host_clk_release(host);
+ return -ENOSYS;
+ }
+
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+ mmc_host_clk_release(host);
+
+ return mmc_power_restore(host);
+}
+
static const struct mmc_bus_ops mmc_ops = {
.remove = mmc_remove,
.detect = mmc_detect,
@@ -1831,6 +1835,7 @@ static const struct mmc_bus_ops mmc_ops = {
.power_restore = mmc_power_restore,
.alive = mmc_alive,
.shutdown = mmc_shutdown,
+ .reset = mmc_reset,
};
/*
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 3b044c5b029c..0ea042dc7443 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -23,6 +23,36 @@
#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+static const u8 tuning_blk_pattern_4bit[] = {
+ 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
+ 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
+ 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
+ 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
+ 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
+ 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
+ 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
+ 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
+};
+
+static const u8 tuning_blk_pattern_8bit[] = {
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+};
+
static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
bool ignore_crc)
{
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
new file mode 100644
index 000000000000..862356123d78
--- /dev/null
+++ b/drivers/mmc/core/pwrseq.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * MMC power sequence management
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_match {
+ const char *compatible;
+ int (*alloc)(struct mmc_host *host, struct device *dev);
+};
+
+static struct mmc_pwrseq_match pwrseq_match[] = {
+ {
+ .compatible = "mmc-pwrseq-simple",
+ .alloc = mmc_pwrseq_simple_alloc,
+ }, {
+ .compatible = "mmc-pwrseq-emmc",
+ .alloc = mmc_pwrseq_emmc_alloc,
+ },
+};
+
+static struct mmc_pwrseq_match *mmc_pwrseq_find(struct device_node *np)
+{
+ struct mmc_pwrseq_match *match = ERR_PTR(-ENODEV);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pwrseq_match); i++) {
+ if (of_device_is_compatible(np, pwrseq_match[i].compatible)) {
+ match = &pwrseq_match[i];
+ break;
+ }
+ }
+
+ return match;
+}
+
+int mmc_pwrseq_alloc(struct mmc_host *host)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct mmc_pwrseq_match *match;
+ int ret = 0;
+
+ np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0);
+ if (!np)
+ return 0;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ match = mmc_pwrseq_find(np);
+ if (IS_ERR(match)) {
+ ret = PTR_ERR(match);
+ goto err;
+ }
+
+ ret = match->alloc(host, &pdev->dev);
+ if (!ret)
+ dev_info(host->parent, "allocated mmc-pwrseq\n");
+
+err:
+ of_node_put(np);
+ return ret;
+}
+
+void mmc_pwrseq_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops && pwrseq->ops->pre_power_on)
+ pwrseq->ops->pre_power_on(host);
+}
+
+void mmc_pwrseq_post_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops && pwrseq->ops->post_power_on)
+ pwrseq->ops->post_power_on(host);
+}
+
+void mmc_pwrseq_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops && pwrseq->ops->power_off)
+ pwrseq->ops->power_off(host);
+}
+
+void mmc_pwrseq_free(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops && pwrseq->ops->free)
+ pwrseq->ops->free(host);
+}
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
new file mode 100644
index 000000000000..aba3409e8d6e
--- /dev/null
+++ b/drivers/mmc/core/pwrseq.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _MMC_CORE_PWRSEQ_H
+#define _MMC_CORE_PWRSEQ_H
+
+struct mmc_pwrseq_ops {
+ void (*pre_power_on)(struct mmc_host *host);
+ void (*post_power_on)(struct mmc_host *host);
+ void (*power_off)(struct mmc_host *host);
+ void (*free)(struct mmc_host *host);
+};
+
+struct mmc_pwrseq {
+ struct mmc_pwrseq_ops *ops;
+};
+
+#ifdef CONFIG_OF
+
+int mmc_pwrseq_alloc(struct mmc_host *host);
+void mmc_pwrseq_pre_power_on(struct mmc_host *host);
+void mmc_pwrseq_post_power_on(struct mmc_host *host);
+void mmc_pwrseq_power_off(struct mmc_host *host);
+void mmc_pwrseq_free(struct mmc_host *host);
+
+int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev);
+int mmc_pwrseq_emmc_alloc(struct mmc_host *host, struct device *dev);
+
+#else
+
+static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
+static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
+static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
+static inline void mmc_pwrseq_power_off(struct mmc_host *host) {}
+static inline void mmc_pwrseq_free(struct mmc_host *host) {}
+
+#endif
+
+#endif
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
new file mode 100644
index 000000000000..a2d545904fbf
--- /dev/null
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ *
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Simple eMMC hardware reset provider
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/reboot.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_emmc {
+ struct mmc_pwrseq pwrseq;
+ struct notifier_block reset_nb;
+ struct gpio_desc *reset_gpio;
+};
+
+static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
+{
+ gpiod_set_value(pwrseq->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value(pwrseq->reset_gpio, 0);
+ udelay(200);
+}
+
+static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
+{
+ struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_emmc, pwrseq);
+
+ __mmc_pwrseq_emmc_reset(pwrseq);
+}
+
+static void mmc_pwrseq_emmc_free(struct mmc_host *host)
+{
+ struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_emmc, pwrseq);
+
+ unregister_restart_handler(&pwrseq->reset_nb);
+ gpiod_put(pwrseq->reset_gpio);
+ kfree(pwrseq);
+ host->pwrseq = NULL;
+}
+
+static struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+ .post_power_on = mmc_pwrseq_emmc_reset,
+ .free = mmc_pwrseq_emmc_free,
+};
+
+static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct mmc_pwrseq_emmc *pwrseq = container_of(this,
+ struct mmc_pwrseq_emmc, reset_nb);
+
+ __mmc_pwrseq_emmc_reset(pwrseq);
+ return NOTIFY_DONE;
+}
+
+int mmc_pwrseq_emmc_alloc(struct mmc_host *host, struct device *dev)
+{
+ struct mmc_pwrseq_emmc *pwrseq;
+ int ret = 0;
+
+ pwrseq = kzalloc(sizeof(struct mmc_pwrseq_emmc), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
+ pwrseq->reset_gpio = gpiod_get_index(dev, "reset", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio)) {
+ ret = PTR_ERR(pwrseq->reset_gpio);
+ goto free;
+ }
+
+ /*
+ * register reset handler to ensure emmc reset also from
+ * emergency_reboot(), priority 129 schedules it just before
+ * system reboot
+ */
+ pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
+ pwrseq->reset_nb.priority = 129;
+ register_restart_handler(&pwrseq->reset_nb);
+
+ pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+ host->pwrseq = &pwrseq->pwrseq;
+
+ return 0;
+free:
+ kfree(pwrseq);
+ return ret;
+}
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
new file mode 100644
index 000000000000..e9f1d8d84613
--- /dev/null
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Simple MMC power sequence management
+ */
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_simple {
+ struct mmc_pwrseq pwrseq;
+ bool clk_enabled;
+ struct clk *ext_clk;
+ int nr_gpios;
+ struct gpio_desc *reset_gpios[0];
+};
+
+static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
+ int value)
+{
+ int i;
+
+ for (i = 0; i < pwrseq->nr_gpios; i++)
+ if (!IS_ERR(pwrseq->reset_gpios[i]))
+ gpiod_set_value_cansleep(pwrseq->reset_gpios[i], value);
+}
+
+static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_simple, pwrseq);
+
+ if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
+ clk_prepare_enable(pwrseq->ext_clk);
+ pwrseq->clk_enabled = true;
+ }
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
+}
+
+static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_simple, pwrseq);
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
+}
+
+static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_simple, pwrseq);
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
+
+ if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
+ clk_disable_unprepare(pwrseq->ext_clk);
+ pwrseq->clk_enabled = false;
+ }
+}
+
+static void mmc_pwrseq_simple_free(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_simple, pwrseq);
+ int i;
+
+ for (i = 0; i < pwrseq->nr_gpios; i++)
+ if (!IS_ERR(pwrseq->reset_gpios[i]))
+ gpiod_put(pwrseq->reset_gpios[i]);
+
+ if (!IS_ERR(pwrseq->ext_clk))
+ clk_put(pwrseq->ext_clk);
+
+ kfree(pwrseq);
+ host->pwrseq = NULL;
+}
+
+static struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
+ .pre_power_on = mmc_pwrseq_simple_pre_power_on,
+ .post_power_on = mmc_pwrseq_simple_post_power_on,
+ .power_off = mmc_pwrseq_simple_power_off,
+ .free = mmc_pwrseq_simple_free,
+};
+
+int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev)
+{
+ struct mmc_pwrseq_simple *pwrseq;
+ int i, nr_gpios, ret = 0;
+
+ nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
+ if (nr_gpios < 0)
+ nr_gpios = 0;
+
+ pwrseq = kzalloc(sizeof(struct mmc_pwrseq_simple) + nr_gpios *
+ sizeof(struct gpio_desc *), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
+ pwrseq->ext_clk = clk_get(dev, "ext_clock");
+ if (IS_ERR(pwrseq->ext_clk) &&
+ PTR_ERR(pwrseq->ext_clk) != -ENOENT) {
+ ret = PTR_ERR(pwrseq->ext_clk);
+ goto free;
+ }
+
+ for (i = 0; i < nr_gpios; i++) {
+ pwrseq->reset_gpios[i] = gpiod_get_index(dev, "reset", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pwrseq->reset_gpios[i]) &&
+ PTR_ERR(pwrseq->reset_gpios[i]) != -ENOENT &&
+ PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
+ ret = PTR_ERR(pwrseq->reset_gpios[i]);
+
+ while (--i)
+ gpiod_put(pwrseq->reset_gpios[i]);
+
+ goto clk_put;
+ }
+ }
+
+ pwrseq->nr_gpios = nr_gpios;
+ pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+ host->pwrseq = &pwrseq->pwrseq;
+
+ return 0;
+clk_put:
+ if (!IS_ERR(pwrseq->ext_clk))
+ clk_put(pwrseq->ext_clk);
+free:
+ kfree(pwrseq);
+ return ret;
+}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d90a6de7901d..ad4d43eae99d 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -660,15 +660,10 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
* SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
*/
- if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning &&
- (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
- card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
- mmc_host_clk_hold(card->host);
- err = card->host->ops->execute_tuning(card->host,
- MMC_SEND_TUNING_BLOCK);
- mmc_host_clk_release(card->host);
- }
-
+ if (!mmc_host_is_spi(card->host) &&
+ (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
+ card->sd_bus_speed == UHS_SDR104_BUS_SPEED))
+ err = mmc_execute_tuning(card);
out:
kfree(status);
@@ -933,6 +928,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
* For native busses: get card RCA and quit open drain mode.
*/
if (!mmc_host_is_spi(host)) {
@@ -1191,6 +1192,12 @@ static int mmc_sd_power_restore(struct mmc_host *host)
return ret;
}
+static int mmc_sd_reset(struct mmc_host *host)
+{
+ mmc_power_cycle(host, host->card->ocr);
+ return mmc_sd_power_restore(host);
+}
+
static const struct mmc_bus_ops mmc_sd_ops = {
.remove = mmc_sd_remove,
.detect = mmc_sd_detect,
@@ -1201,6 +1208,7 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.power_restore = mmc_sd_power_restore,
.alive = mmc_sd_alive,
.shutdown = mmc_sd_suspend,
+ .reset = mmc_sd_reset,
};
/*
@@ -1271,4 +1279,3 @@ err:
return err;
}
-
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index fd0750b5a634..ce6cc47206b0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -567,17 +567,11 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
* SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
*/
- if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning &&
- ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104))) {
- mmc_host_clk_hold(card->host);
- err = card->host->ops->execute_tuning(card->host,
- MMC_SEND_TUNING_BLOCK);
- mmc_host_clk_release(card->host);
- }
-
+ if (!mmc_host_is_spi(card->host) &&
+ ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
+ err = mmc_execute_tuning(card);
out:
-
return err;
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 60885316afba..bee02e644d62 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -22,7 +22,9 @@
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/of.h>
+#include "core.h"
#include "sdio_cis.h"
#include "sdio_bus.h"
@@ -295,6 +297,13 @@ static void sdio_acpi_set_handle(struct sdio_func *func)
static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
#endif
+static void sdio_set_of_node(struct sdio_func *func)
+{
+ struct mmc_host *host = func->card->host;
+
+ func->dev.of_node = mmc_of_find_child_device(host, func->num);
+}
+
/*
* Register a new SDIO function with the driver model.
*/
@@ -304,6 +313,7 @@ int sdio_add_func(struct sdio_func *func)
dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num);
+ sdio_set_of_node(func);
sdio_acpi_set_handle(func);
ret = device_add(&func->dev);
if (ret == 0) {
@@ -327,6 +337,7 @@ void sdio_remove_func(struct sdio_func *func)
dev_pm_domain_detach(&func->dev, false);
device_del(&func->dev);
+ of_node_put(func->dev.of_node);
put_device(&func->dev);
}
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 69bbf2adb329..27117ba47073 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -18,11 +18,14 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include "slot-gpio.h"
+
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
bool override_ro_active_level;
bool override_cd_active_level;
+ irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
char cd_label[0];
};
@@ -38,32 +41,20 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mmc_gpio_alloc(struct mmc_host *host)
+int mmc_gpio_alloc(struct mmc_host *host)
{
size_t len = strlen(dev_name(host->parent)) + 4;
- struct mmc_gpio *ctx;
-
- mutex_lock(&host->slot.lock);
-
- ctx = host->slot.handler_priv;
- if (!ctx) {
- /*
- * devm_kzalloc() can be called after device_initialize(), even
- * before device_add(), i.e., between mmc_alloc_host() and
- * mmc_add_host()
- */
- ctx = devm_kzalloc(&host->class_dev, sizeof(*ctx) + 2 * len,
- GFP_KERNEL);
- if (ctx) {
- ctx->ro_label = ctx->cd_label + len;
- snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent));
- snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent));
- host->slot.handler_priv = ctx;
- }
+ struct mmc_gpio *ctx = devm_kzalloc(host->parent,
+ sizeof(*ctx) + 2 * len, GFP_KERNEL);
+
+ if (ctx) {
+ ctx->ro_label = ctx->cd_label + len;
+ snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent));
+ snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent));
+ host->slot.handler_priv = ctx;
+ host->slot.cd_irq = -EINVAL;
}
- mutex_unlock(&host->slot.lock);
-
return ctx ? 0 : -ENOMEM;
}
@@ -103,29 +94,19 @@ EXPORT_SYMBOL(mmc_gpio_get_cd);
* @gpio: gpio number requested
*
* As devm_* managed functions are used in mmc_gpio_request_ro(), client
- * drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up,
- * if the requesting and freeing are only needed at probing and unbinding time
- * for once. However, if client drivers do something special like runtime
- * switching for write-protection, they are responsible for calling
- * mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own.
+ * drivers do not need to worry about freeing up memory.
*
* Returns zero on success, else an error.
*/
int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
{
- struct mmc_gpio *ctx;
+ struct mmc_gpio *ctx = host->slot.handler_priv;
int ret;
if (!gpio_is_valid(gpio))
return -EINVAL;
- ret = mmc_gpio_alloc(host);
- if (ret < 0)
- return ret;
-
- ctx = host->slot.handler_priv;
-
- ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
+ ret = devm_gpio_request_one(host->parent, gpio, GPIOF_DIR_IN,
ctx->ro_label);
if (ret < 0)
return ret;
@@ -156,8 +137,10 @@ void mmc_gpiod_request_cd_irq(struct mmc_host *host)
irq = -EINVAL;
if (irq >= 0) {
- ret = devm_request_threaded_irq(&host->class_dev, irq,
- NULL, mmc_gpio_cd_irqt,
+ if (!ctx->cd_gpio_isr)
+ ctx->cd_gpio_isr = mmc_gpio_cd_irqt;
+ ret = devm_request_threaded_irq(host->parent, irq,
+ NULL, ctx->cd_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
ctx->cd_label, host);
if (ret < 0)
@@ -171,6 +154,19 @@ void mmc_gpiod_request_cd_irq(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
+/* Register an alternate interrupt service routine for
+ * the card-detect GPIO.
+ */
+void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ irqreturn_t (*isr)(int irq, void *dev_id))
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ WARN_ON(ctx->cd_gpio_isr);
+ ctx->cd_gpio_isr = isr;
+}
+EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
+
/**
* mmc_gpio_request_cd - request a gpio for card-detection
* @host: mmc host
@@ -178,11 +174,7 @@ EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
* @debounce: debounce time in microseconds
*
* As devm_* managed functions are used in mmc_gpio_request_cd(), client
- * drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
- * if the requesting and freeing are only needed at probing and unbinding time
- * for once. However, if client drivers do something special like runtime
- * switching for card-detection, they are responsible for calling
- * mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
+ * drivers do not need to worry about freeing up memory.
*
* If GPIO debouncing is desired, set the debounce parameter to a non-zero
* value. The caller is responsible for ensuring that the GPIO driver associated
@@ -193,16 +185,10 @@ EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
unsigned int debounce)
{
- struct mmc_gpio *ctx;
+ struct mmc_gpio *ctx = host->slot.handler_priv;
int ret;
- ret = mmc_gpio_alloc(host);
- if (ret < 0)
- return ret;
-
- ctx = host->slot.handler_priv;
-
- ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
+ ret = devm_gpio_request_one(host->parent, gpio, GPIOF_DIR_IN,
ctx->cd_label);
if (ret < 0)
/*
@@ -226,55 +212,6 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
EXPORT_SYMBOL(mmc_gpio_request_cd);
/**
- * mmc_gpio_free_ro - free the write-protection gpio
- * @host: mmc host
- *
- * It's provided only for cases that client drivers need to manually free
- * up the write-protection gpio requested by mmc_gpio_request_ro().
- */
-void mmc_gpio_free_ro(struct mmc_host *host)
-{
- struct mmc_gpio *ctx = host->slot.handler_priv;
- int gpio;
-
- if (!ctx || !ctx->ro_gpio)
- return;
-
- gpio = desc_to_gpio(ctx->ro_gpio);
- ctx->ro_gpio = NULL;
-
- devm_gpio_free(&host->class_dev, gpio);
-}
-EXPORT_SYMBOL(mmc_gpio_free_ro);
-
-/**
- * mmc_gpio_free_cd - free the card-detection gpio
- * @host: mmc host
- *
- * It's provided only for cases that client drivers need to manually free
- * up the card-detection gpio requested by mmc_gpio_request_cd().
- */
-void mmc_gpio_free_cd(struct mmc_host *host)
-{
- struct mmc_gpio *ctx = host->slot.handler_priv;
- int gpio;
-
- if (!ctx || !ctx->cd_gpio)
- return;
-
- if (host->slot.cd_irq >= 0) {
- devm_free_irq(&host->class_dev, host->slot.cd_irq, host);
- host->slot.cd_irq = -EINVAL;
- }
-
- gpio = desc_to_gpio(ctx->cd_gpio);
- ctx->cd_gpio = NULL;
-
- devm_gpio_free(&host->class_dev, gpio);
-}
-EXPORT_SYMBOL(mmc_gpio_free_cd);
-
-/**
* mmc_gpiod_request_cd - request a gpio descriptor for card-detection
* @host: mmc host
* @con_id: function within the GPIO consumer
@@ -285,8 +222,7 @@ EXPORT_SYMBOL(mmc_gpio_free_cd);
* to NULL to ignore
*
* Use this function in place of mmc_gpio_request_cd() to use the GPIO
- * descriptor API. Note that it is paired with mmc_gpiod_free_cd() not
- * mmc_gpio_free_cd(). Note also that it must be called prior to mmc_add_host()
+ * descriptor API. Note that it must be called prior to mmc_add_host()
* otherwise the caller must also call mmc_gpiod_request_cd_irq().
*
* Returns zero on success, else an error.
@@ -295,16 +231,10 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce, bool *gpio_invert)
{
- struct mmc_gpio *ctx;
+ struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
int ret;
- ret = mmc_gpio_alloc(host);
- if (ret < 0)
- return ret;
-
- ctx = host->slot.handler_priv;
-
if (!con_id)
con_id = ctx->cd_label;
@@ -339,8 +269,7 @@ EXPORT_SYMBOL(mmc_gpiod_request_cd);
* set to NULL to ignore
*
* Use this function in place of mmc_gpio_request_ro() to use the GPIO
- * descriptor API. Note that it is paired with mmc_gpiod_free_ro() not
- * mmc_gpio_free_ro().
+ * descriptor API.
*
* Returns zero on success, else an error.
*/
@@ -348,16 +277,10 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce, bool *gpio_invert)
{
- struct mmc_gpio *ctx;
+ struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
int ret;
- ret = mmc_gpio_alloc(host);
- if (ret < 0)
- return ret;
-
- ctx = host->slot.handler_priv;
-
if (!con_id)
con_id = ctx->ro_label;
@@ -380,28 +303,3 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return 0;
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
-
-/**
- * mmc_gpiod_free_cd - free the card-detection gpio descriptor
- * @host: mmc host
- *
- * It's provided only for cases that client drivers need to manually free
- * up the card-detection gpio requested by mmc_gpiod_request_cd().
- */
-void mmc_gpiod_free_cd(struct mmc_host *host)
-{
- struct mmc_gpio *ctx = host->slot.handler_priv;
-
- if (!ctx || !ctx->cd_gpio)
- return;
-
- if (host->slot.cd_irq >= 0) {
- devm_free_irq(&host->class_dev, host->slot.cd_irq, host);
- host->slot.cd_irq = -EINVAL;
- }
-
- devm_gpiod_put(host->parent, ctx->cd_gpio);
-
- ctx->cd_gpio = NULL;
-}
-EXPORT_SYMBOL(mmc_gpiod_free_cd);
diff --git a/drivers/mmc/core/slot-gpio.h b/drivers/mmc/core/slot-gpio.h
new file mode 100644
index 000000000000..8c1854dc5d58
--- /dev/null
+++ b/drivers/mmc/core/slot-gpio.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _MMC_CORE_SLOTGPIO_H
+#define _MMC_CORE_SLOTGPIO_H
+
+int mmc_gpio_alloc(struct mmc_host *host);
+
+#endif
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2d6fbdd11803..61ac63a3776a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -57,6 +57,7 @@ config MMC_SDHCI_IO_ACCESSORS
config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
bool
+ depends on MMC_SDHCI
select MMC_SDHCI_IO_ACCESSORS
help
This option is selected by drivers running on big endian hosts
@@ -82,6 +83,7 @@ config MMC_SDHCI_PCI
config MMC_RICOH_MMC
bool "Ricoh MMC Controller Disabler"
depends on MMC_SDHCI_PCI
+ default y
help
This adds a pci quirk to disable Ricoh MMC Controller. This
proprietary controller is unnecessary because the SDHCI driver
@@ -228,6 +230,7 @@ config MMC_SDHCI_PXAV3
tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
depends on CLKDEV_LOOKUP
depends on MMC_SDHCI_PLTFM
+ depends on ARCH_MMP || COMPILE_TEST
default CPU_MMP2
help
This selects the Marvell(R) PXAV3 SD Host Controller.
@@ -240,6 +243,7 @@ config MMC_SDHCI_PXAV2
tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
depends on CLKDEV_LOOKUP
depends on MMC_SDHCI_PLTFM
+ depends on ARCH_MMP || COMPILE_TEST
default CPU_PXA910
help
This selects the Marvell(R) PXAV2 SD Host Controller.
@@ -292,6 +296,17 @@ config MMC_SDHCI_BCM2835
If unsure, say N.
+config MMC_SDHCI_F_SDH30
+ tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ Needed by some Fujitsu SoC for MMC / SD / SDIO support.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
depends on ARCH_MOXART && MMC
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f7b0a77cf419..6a7cfe0de332 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
+obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 509365cb22c6..fe32948c6114 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -21,43 +21,7 @@
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
-
-#define NUM_PINS(x) (x + 2)
-
-#define SDMMC_CLKSEL 0x09C
-#define SDMMC_CLKSEL64 0x0A8
-#define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0)
-#define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16)
-#define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24)
-#define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7)
-#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \
- SDMMC_CLKSEL_CCLK_DRIVE(y) | \
- SDMMC_CLKSEL_CCLK_DIVIDER(z))
-#define SDMMC_CLKSEL_WAKEUP_INT BIT(11)
-
-#define EXYNOS4210_FIXED_CIU_CLK_DIV 2
-#define EXYNOS4412_FIXED_CIU_CLK_DIV 4
-
-/* Block number in eMMC */
-#define DWMCI_BLOCK_NUM 0xFFFFFFFF
-
-#define SDMMC_EMMCP_BASE 0x1000
-#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010)
-#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200)
-#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204)
-#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C)
-
-/* SMU control bits */
-#define DWMCI_MPSCTRL_SECURE_READ_BIT BIT(7)
-#define DWMCI_MPSCTRL_SECURE_WRITE_BIT BIT(6)
-#define DWMCI_MPSCTRL_NON_SECURE_READ_BIT BIT(5)
-#define DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4)
-#define DWMCI_MPSCTRL_USE_FUSE_KEY BIT(3)
-#define DWMCI_MPSCTRL_ECB_MODE BIT(2)
-#define DWMCI_MPSCTRL_ENCRYPTION BIT(1)
-#define DWMCI_MPSCTRL_VALID BIT(0)
-
-#define EXYNOS_CCLKIN_MIN 50000000 /* unit: HZ */
+#include "dw_mmc-exynos.h"
/* Variations in Exynos specific dw-mshc controller */
enum dw_mci_exynos_type {
@@ -114,11 +78,11 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
mci_writel(host, MPSBEGIN0, 0);
- mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM);
- mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT |
- DWMCI_MPSCTRL_NON_SECURE_READ_BIT |
- DWMCI_MPSCTRL_VALID |
- DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT);
+ mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
+ mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
+ SDMMC_MPSCTRL_NON_SECURE_READ_BIT |
+ SDMMC_MPSCTRL_VALID |
+ SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT);
}
return 0;
@@ -127,9 +91,9 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
static int dw_mci_exynos_setup_clock(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
- unsigned long rate = clk_get_rate(host->ciu_clk);
- host->bus_hz = rate / (priv->ciu_div + 1);
+ host->bus_hz /= (priv->ciu_div + 1);
+
return 0;
}
@@ -232,8 +196,11 @@ static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
mci_writel(host, CLKSEL, priv->sdr_timing);
}
- /* Don't care if wanted clock is zero */
- if (!wanted)
+ /*
+ * Don't care if wanted clock is zero or
+ * ciu clock is unavailable
+ */
+ if (!wanted || IS_ERR(host->ciu_clk))
return;
/* Guaranteed minimum frequency for cclkin */
@@ -263,10 +230,8 @@ static int dw_mci_exynos_parse_dt(struct dw_mci *host)
int ret;
priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(host->dev, "mem alloc failed for private data\n");
+ if (!priv)
return -ENOMEM;
- }
for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
if (of_device_is_compatible(np, exynos_compat[idx].compatible))
@@ -375,64 +340,23 @@ out:
return loc;
}
-static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode,
- struct dw_mci_tuning_data *tuning_data)
+static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
{
struct dw_mci *host = slot->host;
struct mmc_host *mmc = slot->mmc;
- const u8 *blk_pattern = tuning_data->blk_pattern;
- u8 *blk_test;
- unsigned int blksz = tuning_data->blksz;
u8 start_smpl, smpl, candiates = 0;
s8 found = -1;
int ret = 0;
- blk_test = kmalloc(blksz, GFP_KERNEL);
- if (!blk_test)
- return -ENOMEM;
-
start_smpl = dw_mci_exynos_get_clksmpl(host);
do {
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_command stop = {0};
- struct mmc_data data = {0};
- struct scatterlist sg;
-
- cmd.opcode = opcode;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
-
- stop.opcode = MMC_STOP_TRANSMISSION;
- stop.arg = 0;
- stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
-
- data.blksz = blksz;
- data.blocks = 1;
- data.flags = MMC_DATA_READ;
- data.sg = &sg;
- data.sg_len = 1;
-
- sg_init_one(&sg, blk_test, blksz);
- mrq.cmd = &cmd;
- mrq.stop = &stop;
- mrq.data = &data;
- host->mrq = &mrq;
-
mci_writel(host, TMOUT, ~0);
smpl = dw_mci_exynos_move_next_clksmpl(host);
- mmc_wait_for_req(mmc, &mrq);
+ if (!mmc_send_tuning(mmc))
+ candiates |= (1 << smpl);
- if (!cmd.error && !data.error) {
- if (!memcmp(blk_pattern, blk_test, blksz))
- candiates |= (1 << smpl);
- } else {
- dev_dbg(host->dev,
- "Tuning error: cmd.error:%d, data.error:%d\n",
- cmd.error, data.error);
- }
} while (start_smpl != smpl);
found = dw_mci_exynos_get_best_clksmpl(candiates);
@@ -441,7 +365,6 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode,
else
ret = -EIO;
- kfree(blk_test);
return ret;
}
@@ -499,7 +422,7 @@ static const struct dev_pm_ops dw_mci_exynos_pmops = {
static struct platform_driver dw_mci_exynos_pltfm_driver = {
.probe = dw_mci_exynos_probe,
- .remove = __exit_p(dw_mci_pltfm_remove),
+ .remove = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_exynos",
.of_match_table = dw_mci_exynos_match,
diff --git a/drivers/mmc/host/dw_mmc-exynos.h b/drivers/mmc/host/dw_mmc-exynos.h
new file mode 100644
index 000000000000..7872ce586b55
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-exynos.h
@@ -0,0 +1,56 @@
+/*
+ * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver
+ *
+ * Copyright (C) 2012-2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_EXYNOS_H_
+#define _DW_MMC_EXYNOS_H_
+
+/* Extended Register's Offset */
+#define SDMMC_CLKSEL 0x09C
+#define SDMMC_CLKSEL64 0x0A8
+
+/* CLKSEL register defines */
+#define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0)
+#define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16)
+#define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24)
+#define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7)
+#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \
+ SDMMC_CLKSEL_CCLK_DRIVE(y) | \
+ SDMMC_CLKSEL_CCLK_DIVIDER(z))
+#define SDMMC_CLKSEL_WAKEUP_INT BIT(11)
+
+/* Protector Register */
+#define SDMMC_EMMCP_BASE 0x1000
+#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010)
+#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200)
+#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204)
+#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C)
+
+/* SMU control defines */
+#define SDMMC_MPSCTRL_SECURE_READ_BIT BIT(7)
+#define SDMMC_MPSCTRL_SECURE_WRITE_BIT BIT(6)
+#define SDMMC_MPSCTRL_NON_SECURE_READ_BIT BIT(5)
+#define SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4)
+#define SDMMC_MPSCTRL_USE_FUSE_KEY BIT(3)
+#define SDMMC_MPSCTRL_ECB_MODE BIT(2)
+#define SDMMC_MPSCTRL_ENCRYPTION BIT(1)
+#define SDMMC_MPSCTRL_VALID BIT(0)
+
+/* Maximum number of Ending sector */
+#define SDMMC_ENDING_SEC_NR_MAX 0xFFFFFFFF
+
+/* Fixed clock divider */
+#define EXYNOS4210_FIXED_CIU_CLK_DIV 2
+#define EXYNOS4412_FIXED_CIU_CLK_DIV 4
+
+/* Minimal required clock frequency for cclkin, unit: HZ */
+#define EXYNOS_CCLKIN_MIN 50000000
+
+#endif /* _DW_MMC_EXYNOS_H_ */
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 5650ac488cf3..e2a726a503ee 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -133,7 +133,7 @@ static SIMPLE_DEV_PM_OPS(dw_mci_rockchip_pmops,
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.probe = dw_mci_rockchip_probe,
- .remove = __exit_p(dw_mci_pltfm_remove),
+ .remove = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_rockchip",
.of_match_table = dw_mci_rockchip_match,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 67c04518ec4c..4d2e3c2e1830 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -27,6 +27,7 @@
#include <linux/stat.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
@@ -313,7 +314,9 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
if (cmdr == MMC_READ_SINGLE_BLOCK ||
cmdr == MMC_READ_MULTIPLE_BLOCK ||
cmdr == MMC_WRITE_BLOCK ||
- cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
+ cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
+ cmdr == MMC_SEND_TUNING_BLOCK ||
+ cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
stop->opcode = MMC_STOP_TRANSMISSION;
stop->arg = 0;
stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
@@ -758,6 +761,7 @@ disable:
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
+ unsigned long irqflags;
int sg_len;
u32 temp;
@@ -794,9 +798,11 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
mci_writel(host, CTRL, temp);
/* Disable RX/TX IRQs, let DMA handle it */
+ spin_lock_irqsave(&host->irq_lock, irqflags);
temp = mci_readl(host, INTMASK);
temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
mci_writel(host, INTMASK, temp);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
host->dma_ops->start(host, sg_len);
@@ -805,6 +811,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
{
+ unsigned long irqflags;
u32 temp;
data->error = -EINPROGRESS;
@@ -833,9 +840,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->part_buf_count = 0;
mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
temp = mci_readl(host, INTMASK);
temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
mci_writel(host, INTMASK, temp);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_DMA_ENABLE;
@@ -926,7 +936,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
/* enable clock; only low power if no SDIO */
clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
- if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->sdio_id)))
+ if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
mci_writel(host, CLKENA, clk_en_a);
@@ -1109,6 +1119,12 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
return;
}
}
+ set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
+ regs = mci_readl(slot->host, PWREN);
+ regs |= (1 << slot->id);
+ mci_writel(slot->host, PWREN, regs);
+ break;
+ case MMC_POWER_ON:
if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
ret = regulator_enable(mmc->supply.vqmmc);
if (ret < 0)
@@ -1117,10 +1133,6 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
slot->host->vqmmc_enabled = true;
}
- set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
- regs = mci_readl(slot->host, PWREN);
- regs |= (1 << slot->id);
- mci_writel(slot->host, PWREN, regs);
break;
case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
@@ -1245,27 +1257,37 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
return present;
}
-/*
- * Disable lower power mode.
- *
- * Low power mode will stop the card clock when idle. According to the
- * description of the CLKENA register we should disable low power mode
- * for SDIO cards if we need SDIO interrupts to work.
- *
- * This function is fast if low power mode is already disabled.
- */
-static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
+static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
- u32 clk_en_a;
- const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
- clk_en_a = mci_readl(host, CLKENA);
+ /*
+ * Low power mode will stop the card clock when idle. According to the
+ * description of the CLKENA register we should disable low power mode
+ * for SDIO cards if we need SDIO interrupts to work.
+ */
+ if (mmc->caps & MMC_CAP_SDIO_IRQ) {
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+ u32 clk_en_a_old;
+ u32 clk_en_a;
- if (clk_en_a & clken_low_pwr) {
- mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
- mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
- SDMMC_CMD_PRV_DAT_WAIT, 0);
+ clk_en_a_old = mci_readl(host, CLKENA);
+
+ if (card->type == MMC_TYPE_SDIO ||
+ card->type == MMC_TYPE_SD_COMBO) {
+ set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old & ~clken_low_pwr;
+ } else {
+ clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old | clken_low_pwr;
+ }
+
+ if (clk_en_a != clk_en_a_old) {
+ mci_writel(host, CLKENA, clk_en_a);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
+ SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
}
}
@@ -1273,25 +1295,20 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ unsigned long irqflags;
u32 int_mask;
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
/* Enable/disable Slot Specific SDIO interrupt */
int_mask = mci_readl(host, INTMASK);
- if (enb) {
- /*
- * Turn off low power mode if it was enabled. This is a bit of
- * a heavy operation and we disable / enable IRQs a lot, so
- * we'll leave low power mode disabled and it will get
- * re-enabled again in dw_mci_setup_bus().
- */
- dw_mci_disable_low_power(slot);
+ if (enb)
+ int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
+ else
+ int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
+ mci_writel(host, INTMASK, int_mask);
- mci_writel(host, INTMASK,
- (int_mask | SDMMC_INT_SDIO(slot->sdio_id)));
- } else {
- mci_writel(host, INTMASK,
- (int_mask & ~SDMMC_INT_SDIO(slot->sdio_id)));
- }
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
@@ -1299,30 +1316,10 @@ static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = host->drv_data;
- struct dw_mci_tuning_data tuning_data;
int err = -ENOSYS;
- if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
- if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
- tuning_data.blk_pattern = tuning_blk_pattern_8bit;
- tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
- } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
- tuning_data.blk_pattern = tuning_blk_pattern_4bit;
- tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
- } else {
- return -EINVAL;
- }
- } else if (opcode == MMC_SEND_TUNING_BLOCK) {
- tuning_data.blk_pattern = tuning_blk_pattern_4bit;
- tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
- } else {
- dev_err(host->dev,
- "Undefined command(%d) for tuning\n", opcode);
- return -EINVAL;
- }
-
if (drv_data && drv_data->execute_tuning)
- err = drv_data->execute_tuning(slot, opcode, &tuning_data);
+ err = drv_data->execute_tuning(slot);
return err;
}
@@ -1337,7 +1334,7 @@ static const struct mmc_host_ops dw_mci_ops = {
.execute_tuning = dw_mci_execute_tuning,
.card_busy = dw_mci_card_busy,
.start_signal_voltage_switch = dw_mci_switch_voltage,
-
+ .init_card = dw_mci_init_card,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
@@ -2319,9 +2316,9 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
#ifdef CONFIG_MMC_DW_IDMAC
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65536;
- mmc->max_blk_count = host->ring_size;
mmc->max_seg_size = 0x1000;
- mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_seg_size * host->ring_size;
+ mmc->max_blk_count = mmc->max_req_size / 512;
#else
mmc->max_segs = 64;
mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
@@ -2533,10 +2530,8 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
u32 clock_frequency;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "could not allocate memory for pdata\n");
+ if (!pdata)
return ERR_PTR(-ENOMEM);
- }
/* find out number of slots supported */
if (of_property_read_u32(dev->of_node, "num-slots",
@@ -2660,6 +2655,7 @@ int dw_mci_probe(struct dw_mci *host)
host->quirks = host->pdata->quirks;
spin_lock_init(&host->lock);
+ spin_lock_init(&host->irq_lock);
INIT_LIST_HEAD(&host->queue);
/*
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 0d0f7a271d63..18c4afe683b8 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -244,15 +244,11 @@ struct dw_mci_slot {
unsigned long flags;
#define DW_MMC_CARD_PRESENT 0
#define DW_MMC_CARD_NEED_INIT 1
+#define DW_MMC_CARD_NO_LOW_PWR 2
int id;
int sdio_id;
};
-struct dw_mci_tuning_data {
- const u8 *blk_pattern;
- unsigned int blksz;
-};
-
/**
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
@@ -274,7 +270,6 @@ struct dw_mci_drv_data {
void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
- int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
- struct dw_mci_tuning_data *tuning_data);
+ int (*execute_tuning)(struct dw_mci_slot *slot);
};
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 8232e9a02d40..7fe16194ebc8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -430,7 +430,6 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
static void mmci_dma_setup(struct mmci_host *host)
{
const char *rxname, *txname;
- dma_cap_mask_t mask;
struct variant_data *variant = host->variant;
host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
@@ -439,10 +438,6 @@ static void mmci_dma_setup(struct mmci_host *host)
/* initialize pre request cookie */
host->next_data.cookie = 1;
- /* Try to acquire a generic DMA engine slave channel */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
/*
* If only an RX channel is specified, the driver will
* attempt to use it bidirectionally, however if it is
@@ -1739,10 +1734,10 @@ static int mmci_probe(struct amba_device *dev,
pm_runtime_set_autosuspend_delay(&dev->dev, 50);
pm_runtime_use_autosuspend(&dev->dev);
- pm_runtime_put(&dev->dev);
mmc_add_host(mmc);
+ pm_runtime_put(&dev->dev);
return 0;
clk_disable:
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index f3e18d08e852..006f1862444b 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/dma-mapping.h>
@@ -562,7 +563,6 @@ static int moxart_probe(struct platform_device *pdev)
struct dma_slave_config cfg;
struct clk *clk;
void __iomem *reg_mmc;
- dma_cap_mask_t mask;
int irq, ret;
u32 i;
@@ -586,9 +586,8 @@ static int moxart_probe(struct platform_device *pdev)
goto out;
}
- clk = of_clk_get(node, 0);
+ clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
- dev_err(dev, "of_clk_get failed\n");
ret = PTR_ERR(clk);
goto out;
}
@@ -599,10 +598,9 @@ static int moxart_probe(struct platform_device *pdev)
goto out;
}
- mmc_of_parse(mmc);
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -611,8 +609,8 @@ static int moxart_probe(struct platform_device *pdev)
host->timeout = msecs_to_jiffies(1000);
host->sysclk = clk_get_rate(clk);
host->fifo_width = readl(host->base + REG_FEATURE) << 2;
- host->dma_chan_tx = of_dma_request_slave_channel(node, "tx");
- host->dma_chan_rx = of_dma_request_slave_channel(node, "rx");
+ host->dma_chan_tx = dma_request_slave_channel_reason(dev, "tx");
+ host->dma_chan_rx = dma_request_slave_channel_reason(dev, "rx");
spin_lock_init(&host->lock);
@@ -622,6 +620,11 @@ static int moxart_probe(struct platform_device *pdev)
mmc->ocr_avail = 0xffff00; /* Support 2.0v - 3.6v power. */
if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) {
+ if (PTR_ERR(host->dma_chan_tx) == -EPROBE_DEFER ||
+ PTR_ERR(host->dma_chan_rx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
dev_dbg(dev, "PIO mode transfer enabled\n");
host->have_dma = false;
} else {
@@ -700,9 +703,6 @@ static int moxart_remove(struct platform_device *pdev)
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
host->base + REG_CLOCK_CONTROL);
}
-
- kfree(host);
-
return 0;
}
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 4f8618f4522d..a448498e3af2 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -25,7 +25,6 @@
#include <linux/of_irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
-#include <linux/pinctrl/consumer.h>
#include <asm/sizes.h>
#include <asm/unaligned.h>
@@ -704,7 +703,6 @@ static int mvsd_probe(struct platform_device *pdev)
const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
- struct pinctrl *pinctrl;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -721,10 +719,6 @@ static int mvsd_probe(struct platform_device *pdev)
host->mmc = mmc;
host->dev = &pdev->dev;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev, "no pins associated\n");
-
/*
* Some non-DT platforms do not pass a clock, and the clock
* frequency is passed through platform_data. On DT platforms,
@@ -828,8 +822,6 @@ static int mvsd_probe(struct platform_device *pdev)
out:
if (mmc) {
- mmc_gpio_free_cd(mmc);
- mmc_gpio_free_ro(mmc);
if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
@@ -844,8 +836,6 @@ static int mvsd_remove(struct platform_device *pdev)
struct mvsd_host *host = mmc_priv(mmc);
- mmc_gpio_free_cd(mmc);
- mmc_gpio_free_ro(mmc);
mmc_remove_host(mmc);
del_timer_sync(&host->timer);
mvsd_power_down(host);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 60c4ca97a727..a82411a2c024 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -677,8 +677,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
return 0;
out_free_dma:
- if (ssp->dmach)
- dma_release_channel(ssp->dmach);
+ dma_release_channel(ssp->dmach);
out_clk_disable:
clk_disable_unprepare(ssp->clk);
out_mmc_free:
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 7c71dcdcba8b..f84cfb01716d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -36,6 +36,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/gpio.h>
@@ -251,55 +252,24 @@ static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);
static int omap_hsmmc_card_detect(struct device *dev)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
- struct omap_hsmmc_platform_data *mmc = host->pdata;
- /* NOTE: assumes card detect signal is active-low */
- return !gpio_get_value_cansleep(mmc->switch_pin);
+ return mmc_gpio_get_cd(host->mmc);
}
static int omap_hsmmc_get_wp(struct device *dev)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
- struct omap_hsmmc_platform_data *mmc = host->pdata;
- /* NOTE: assumes write protect signal is active-high */
- return gpio_get_value_cansleep(mmc->gpio_wp);
+ return mmc_gpio_get_ro(host->mmc);
}
static int omap_hsmmc_get_cover_state(struct device *dev)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
- struct omap_hsmmc_platform_data *mmc = host->pdata;
- /* NOTE: assumes card detect signal is active-low */
- return !gpio_get_value_cansleep(mmc->switch_pin);
+ return mmc_gpio_get_cd(host->mmc);
}
-#ifdef CONFIG_PM
-
-static int omap_hsmmc_suspend_cdirq(struct device *dev)
-{
- struct omap_hsmmc_host *host = dev_get_drvdata(dev);
-
- disable_irq(host->card_detect_irq);
- return 0;
-}
-
-static int omap_hsmmc_resume_cdirq(struct device *dev)
-{
- struct omap_hsmmc_host *host = dev_get_drvdata(dev);
-
- enable_irq(host->card_detect_irq);
- return 0;
-}
-
-#else
-
-#define omap_hsmmc_suspend_cdirq NULL
-#define omap_hsmmc_resume_cdirq NULL
-
-#endif
-
#ifdef CONFIG_REGULATOR
static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
@@ -464,7 +434,10 @@ static inline int omap_hsmmc_have_reg(void)
#endif
-static int omap_hsmmc_gpio_init(struct omap_hsmmc_host *host,
+static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id);
+
+static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
+ struct omap_hsmmc_host *host,
struct omap_hsmmc_platform_data *pdata)
{
int ret;
@@ -477,46 +450,24 @@ static int omap_hsmmc_gpio_init(struct omap_hsmmc_host *host,
host->card_detect = omap_hsmmc_card_detect;
host->card_detect_irq =
gpio_to_irq(pdata->switch_pin);
- ret = gpio_request(pdata->switch_pin, "mmc_cd");
+ mmc_gpio_set_cd_isr(mmc, omap_hsmmc_detect);
+ ret = mmc_gpio_request_cd(mmc, pdata->switch_pin, 0);
if (ret)
return ret;
- ret = gpio_direction_input(pdata->switch_pin);
- if (ret)
- goto err_free_sp;
} else {
pdata->switch_pin = -EINVAL;
}
if (gpio_is_valid(pdata->gpio_wp)) {
host->get_ro = omap_hsmmc_get_wp;
- ret = gpio_request(pdata->gpio_wp, "mmc_wp");
- if (ret)
- goto err_free_cd;
- ret = gpio_direction_input(pdata->gpio_wp);
+ ret = mmc_gpio_request_ro(mmc, pdata->gpio_wp);
if (ret)
- goto err_free_wp;
+ return ret;
} else {
pdata->gpio_wp = -EINVAL;
}
return 0;
-
-err_free_wp:
- gpio_free(pdata->gpio_wp);
-err_free_cd:
- if (gpio_is_valid(pdata->switch_pin))
-err_free_sp:
- gpio_free(pdata->switch_pin);
- return ret;
-}
-
-static void omap_hsmmc_gpio_free(struct omap_hsmmc_host *host,
- struct omap_hsmmc_platform_data *pdata)
-{
- if (gpio_is_valid(pdata->gpio_wp))
- gpio_free(pdata->gpio_wp);
- if (gpio_is_valid(pdata->switch_pin))
- gpio_free(pdata->switch_pin);
}
/*
@@ -1978,13 +1929,6 @@ static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
{
struct omap_hsmmc_platform_data *pdata;
struct device_node *np = dev->of_node;
- u32 bus_width, max_freq;
- int cd_gpio, wp_gpio;
-
- cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
- if (cd_gpio == -EPROBE_DEFER || wp_gpio == -EPROBE_DEFER)
- return ERR_PTR(-EPROBE_DEFER);
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -1993,34 +1937,20 @@ static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
if (of_find_property(np, "ti,dual-volt", NULL))
pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
- pdata->switch_pin = cd_gpio;
- pdata->gpio_wp = wp_gpio;
+ pdata->switch_pin = -EINVAL;
+ pdata->gpio_wp = -EINVAL;
if (of_find_property(np, "ti,non-removable", NULL)) {
pdata->nonremovable = true;
pdata->no_regulator_off_init = true;
}
- of_property_read_u32(np, "bus-width", &bus_width);
- if (bus_width == 4)
- pdata->caps |= MMC_CAP_4_BIT_DATA;
- else if (bus_width == 8)
- pdata->caps |= MMC_CAP_8_BIT_DATA;
if (of_find_property(np, "ti,needs-special-reset", NULL))
pdata->features |= HSMMC_HAS_UPDATED_RESET;
- if (!of_property_read_u32(np, "max-frequency", &max_freq))
- pdata->max_freq = max_freq;
-
if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
- if (of_find_property(np, "keep-power-in-suspend", NULL))
- pdata->pm_caps |= MMC_PM_KEEP_POWER;
-
- if (of_find_property(np, "enable-sdio-wakeup", NULL))
- pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
-
return pdata;
}
#else
@@ -2078,6 +2008,10 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
goto err;
}
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err1;
+
host = mmc_priv(mmc);
host->mmc = mmc;
host->pdata = pdata;
@@ -2091,7 +2025,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->next_data.cookie = 1;
host->pbias_enabled = 0;
- ret = omap_hsmmc_gpio_init(host, pdata);
+ ret = omap_hsmmc_gpio_init(mmc, host, pdata);
if (ret)
goto err_gpio;
@@ -2106,7 +2040,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (pdata->max_freq > 0)
mmc->f_max = pdata->max_freq;
- else
+ else if (mmc->f_max == 0)
mmc->f_max = OMAP_MMC_MAX_CLOCK;
spin_lock_init(&host->irq_lock);
@@ -2160,7 +2094,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (mmc_pdata(host)->nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
- mmc->pm_caps = mmc_pdata(host)->pm_caps;
+ mmc->pm_caps |= mmc_pdata(host)->pm_caps;
omap_hsmmc_conf_bus_power(host);
@@ -2222,22 +2156,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
- /* Request IRQ for card detect */
- if (host->card_detect_irq) {
- ret = devm_request_threaded_irq(&pdev->dev,
- host->card_detect_irq,
- NULL, omap_hsmmc_detect,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- mmc_hostname(mmc), host);
- if (ret) {
- dev_err(mmc_dev(host->mmc),
- "Unable to grab MMC CD IRQ\n");
- goto err_irq_cd;
- }
- host->suspend = omap_hsmmc_suspend_cdirq;
- host->resume = omap_hsmmc_resume_cdirq;
- }
-
omap_hsmmc_disable_irq(host);
/*
@@ -2276,7 +2194,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
err_slot_name:
mmc_remove_host(mmc);
-err_irq_cd:
if (host->use_reg)
omap_hsmmc_reg_put(host);
err_irq:
@@ -2289,7 +2206,6 @@ err_irq:
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
err1:
- omap_hsmmc_gpio_free(host, pdata);
err_gpio:
mmc_free_host(mmc);
err:
@@ -2315,32 +2231,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
- omap_hsmmc_gpio_free(host, host->pdata);
mmc_free_host(host->mmc);
return 0;
}
#ifdef CONFIG_PM
-static int omap_hsmmc_prepare(struct device *dev)
-{
- struct omap_hsmmc_host *host = dev_get_drvdata(dev);
-
- if (host->suspend)
- return host->suspend(dev);
-
- return 0;
-}
-
-static void omap_hsmmc_complete(struct device *dev)
-{
- struct omap_hsmmc_host *host = dev_get_drvdata(dev);
-
- if (host->resume)
- host->resume(dev);
-
-}
-
static int omap_hsmmc_suspend(struct device *dev)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
@@ -2398,8 +2294,6 @@ static int omap_hsmmc_resume(struct device *dev)
}
#else
-#define omap_hsmmc_prepare NULL
-#define omap_hsmmc_complete NULL
#define omap_hsmmc_suspend NULL
#define omap_hsmmc_resume NULL
#endif
@@ -2484,8 +2378,6 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
.suspend = omap_hsmmc_suspend,
.resume = omap_hsmmc_resume,
- .prepare = omap_hsmmc_prepare,
- .complete = omap_hsmmc_complete,
.runtime_suspend = omap_hsmmc_runtime_suspend,
.runtime_resume = omap_hsmmc_runtime_resume,
};
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index c70b602f8f1e..1d3d6c4bfdc6 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -28,6 +28,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
#include <linux/mmc/card.h>
#include <linux/mfd/rtsx_pci.h>
#include <asm/unaligned.h>
@@ -53,9 +54,9 @@ struct realtek_pci_sdmmc {
#define SDMMC_POWER_ON 1
#define SDMMC_POWER_OFF 0
- unsigned int sg_count;
+ int sg_count;
s32 cookie;
- unsigned int cookie_sg_count;
+ int cookie_sg_count;
bool using_cookie;
};
@@ -71,30 +72,83 @@ static inline void sd_clear_error(struct realtek_pci_sdmmc *host)
}
#ifdef DEBUG
-static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
+static void dump_reg_range(struct realtek_pci_sdmmc *host, u16 start, u16 end)
{
- struct rtsx_pcr *pcr = host->pcr;
- u16 i;
- u8 *ptr;
+ u16 len = end - start + 1;
+ int i;
+ u8 data[8];
+
+ for (i = 0; i < len; i += 8) {
+ int j;
+ int n = min(8, len - i);
+
+ memset(&data, 0, sizeof(data));
+ for (j = 0; j < n; j++)
+ rtsx_pci_read_register(host->pcr, start + i + j,
+ data + j);
+ dev_dbg(sdmmc_dev(host), "0x%04X(%d): %8ph\n",
+ start + i, n, data);
+ }
+}
- /* Print SD host internal registers */
- rtsx_pci_init_cmd(pcr);
- for (i = 0xFDA0; i <= 0xFDAE; i++)
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
- for (i = 0xFD52; i <= 0xFD69; i++)
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
- rtsx_pci_send_cmd(pcr, 100);
-
- ptr = rtsx_pci_get_cmd_data(pcr);
- for (i = 0xFDA0; i <= 0xFDAE; i++)
- dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
- for (i = 0xFD52; i <= 0xFD69; i++)
- dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
+{
+ dump_reg_range(host, 0xFDA0, 0xFDB3);
+ dump_reg_range(host, 0xFD52, 0xFD69);
}
#else
#define sd_print_debug_regs(host)
#endif /* DEBUG */
+static inline int sd_get_cd_int(struct realtek_pci_sdmmc *host)
+{
+ return rtsx_pci_readl(host->pcr, RTSX_BIPR) & SD_EXIST;
+}
+
+static void sd_cmd_set_sd_cmd(struct rtsx_pcr *pcr, struct mmc_command *cmd)
+{
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0, 0xFF,
+ SD_CMD_START | cmd->opcode);
+ rtsx_pci_write_be32(pcr, SD_CMD1, cmd->arg);
+}
+
+static void sd_cmd_set_data_len(struct rtsx_pcr *pcr, u16 blocks, u16 blksz)
+{
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, blocks);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, blocks >> 8);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, blksz);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, blksz >> 8);
+}
+
+static int sd_response_type(struct mmc_command *cmd)
+{
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ return SD_RSP_TYPE_R0;
+ case MMC_RSP_R1:
+ return SD_RSP_TYPE_R1;
+ case MMC_RSP_R1 & ~MMC_RSP_CRC:
+ return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
+ case MMC_RSP_R1B:
+ return SD_RSP_TYPE_R1b;
+ case MMC_RSP_R2:
+ return SD_RSP_TYPE_R2;
+ case MMC_RSP_R3:
+ return SD_RSP_TYPE_R3;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sd_status_index(int resp_type)
+{
+ if (resp_type == SD_RSP_TYPE_R0)
+ return 0;
+ else if (resp_type == SD_RSP_TYPE_R2)
+ return 16;
+
+ return 5;
+}
/*
* sd_pre_dma_transfer - do dma_map_sg() or using cookie
*
@@ -166,123 +220,6 @@ static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
data->host_cookie = 0;
}
-static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
- u8 *buf, int buf_len, int timeout)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err, i;
- u8 trans_mode;
-
- dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, cmd[0] - 0x40);
-
- if (!buf)
- buf_len = 0;
-
- if ((cmd[0] & 0x3F) == MMC_SEND_TUNING_BLOCK)
- trans_mode = SD_TM_AUTO_TUNING;
- else
- trans_mode = SD_TM_NORMAL_READ;
-
- rtsx_pci_init_cmd(pcr);
-
- for (i = 0; i < 5; i++)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0 + i, 0xFF, cmd[i]);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H,
- 0xFF, (u8)(byte_cnt >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
- SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
- if (trans_mode != SD_TM_AUTO_TUNING)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER,
- 0xFF, trans_mode | SD_TRANSFER_START);
- rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
-
- err = rtsx_pci_send_cmd(pcr, timeout);
- if (err < 0) {
- sd_print_debug_regs(host);
- dev_dbg(sdmmc_dev(host),
- "rtsx_pci_send_cmd fail (err = %d)\n", err);
- return err;
- }
-
- if (buf && buf_len) {
- err = rtsx_pci_read_ppbuf(pcr, buf, buf_len);
- if (err < 0) {
- dev_dbg(sdmmc_dev(host),
- "rtsx_pci_read_ppbuf fail (err = %d)\n", err);
- return err;
- }
- }
-
- return 0;
-}
-
-static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
- u8 *buf, int buf_len, int timeout)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err, i;
- u8 trans_mode;
-
- if (!buf)
- buf_len = 0;
-
- if (buf && buf_len) {
- err = rtsx_pci_write_ppbuf(pcr, buf, buf_len);
- if (err < 0) {
- dev_dbg(sdmmc_dev(host),
- "rtsx_pci_write_ppbuf fail (err = %d)\n", err);
- return err;
- }
- }
-
- trans_mode = cmd ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3;
- rtsx_pci_init_cmd(pcr);
-
- if (cmd) {
- dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d\n", __func__,
- cmd[0] - 0x40);
-
- for (i = 0; i < 5; i++)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_CMD0 + i, 0xFF, cmd[i]);
- }
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H,
- 0xFF, (u8)(byte_cnt >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
- SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
- trans_mode | SD_TRANSFER_START);
- rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
-
- err = rtsx_pci_send_cmd(pcr, timeout);
- if (err < 0) {
- sd_print_debug_regs(host);
- dev_dbg(sdmmc_dev(host),
- "rtsx_pci_send_cmd fail (err = %d)\n", err);
- return err;
- }
-
- return 0;
-}
-
static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
struct mmc_command *cmd)
{
@@ -293,47 +230,18 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
int timeout = 100;
int i;
u8 *ptr;
- int stat_idx = 0;
- u8 rsp_type;
- int rsp_len = 5;
+ int rsp_type;
+ int stat_idx;
bool clock_toggled = false;
dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
__func__, cmd_idx, arg);
- /* Response type:
- * R0
- * R1, R5, R6, R7
- * R1b
- * R2
- * R3, R4
- */
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_NONE:
- rsp_type = SD_RSP_TYPE_R0;
- rsp_len = 0;
- break;
- case MMC_RSP_R1:
- rsp_type = SD_RSP_TYPE_R1;
- break;
- case MMC_RSP_R1 & ~MMC_RSP_CRC:
- rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
- break;
- case MMC_RSP_R1B:
- rsp_type = SD_RSP_TYPE_R1b;
- break;
- case MMC_RSP_R2:
- rsp_type = SD_RSP_TYPE_R2;
- rsp_len = 16;
- break;
- case MMC_RSP_R3:
- rsp_type = SD_RSP_TYPE_R3;
- break;
- default:
- dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n");
- err = -EINVAL;
+ rsp_type = sd_response_type(cmd);
+ if (rsp_type < 0)
goto out;
- }
+
+ stat_idx = sd_status_index(rsp_type);
if (rsp_type == SD_RSP_TYPE_R1b)
timeout = 3000;
@@ -348,13 +256,7 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
}
rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg);
-
+ sd_cmd_set_sd_cmd(pcr, cmd);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, PINGPONG_BUFFER);
@@ -368,12 +270,10 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
/* Read data from ping-pong buffer */
for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
- stat_idx = 16;
} else if (rsp_type != SD_RSP_TYPE_R0) {
/* Read data from SD_CMDx registers */
for (i = SD_CMD0; i <= SD_CMD4; i++)
rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
- stat_idx = 5;
}
rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
@@ -438,71 +338,213 @@ out:
SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
}
-static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
+static int sd_read_data(struct realtek_pci_sdmmc *host, struct mmc_command *cmd,
+ u16 byte_cnt, u8 *buf, int buf_len, int timeout)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+ u8 trans_mode;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd->opcode, cmd->arg);
+
+ if (!buf)
+ buf_len = 0;
+
+ if (cmd->opcode == MMC_SEND_TUNING_BLOCK)
+ trans_mode = SD_TM_AUTO_TUNING;
+ else
+ trans_mode = SD_TM_NORMAL_READ;
+
+ rtsx_pci_init_cmd(pcr);
+ sd_cmd_set_sd_cmd(pcr, cmd);
+ sd_cmd_set_data_len(pcr, 1, byte_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ if (trans_mode != SD_TM_AUTO_TUNING)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER,
+ 0xFF, trans_mode | SD_TRANSFER_START);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd fail (err = %d)\n", err);
+ return err;
+ }
+
+ if (buf && buf_len) {
+ err = rtsx_pci_read_ppbuf(pcr, buf, buf_len);
+ if (err < 0) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_read_ppbuf fail (err = %d)\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int sd_write_data(struct realtek_pci_sdmmc *host,
+ struct mmc_command *cmd, u16 byte_cnt, u8 *buf, int buf_len,
+ int timeout)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd->opcode, cmd->arg);
+
+ if (!buf)
+ buf_len = 0;
+
+ sd_send_cmd_get_rsp(host, cmd);
+ if (cmd->error)
+ return cmd->error;
+
+ if (buf && buf_len) {
+ err = rtsx_pci_write_ppbuf(pcr, buf, buf_len);
+ if (err < 0) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_write_ppbuf fail (err = %d)\n", err);
+ return err;
+ }
+ }
+
+ rtsx_pci_init_cmd(pcr);
+ sd_cmd_set_data_len(pcr, 1, byte_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TRANSFER_START | SD_TM_AUTO_WRITE_3);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd fail (err = %d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sd_read_long_data(struct realtek_pci_sdmmc *host,
+ struct mmc_request *mrq)
{
struct rtsx_pcr *pcr = host->pcr;
struct mmc_host *mmc = host->mmc;
struct mmc_card *card = mmc->card;
+ struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
int uhs = mmc_card_uhs(card);
- int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
- u8 cfg2, trans_mode;
+ u8 cfg2 = 0;
int err;
+ int resp_type;
size_t data_len = data->blksz * data->blocks;
- if (read) {
- cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
- SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
- trans_mode = SD_TM_AUTO_READ_3;
- } else {
- cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
- SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
- trans_mode = SD_TM_AUTO_WRITE_3;
- }
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd->opcode, cmd->arg);
+
+ resp_type = sd_response_type(cmd);
+ if (resp_type < 0)
+ return resp_type;
if (!uhs)
cfg2 |= SD_NO_CHECK_WAIT_CRC_TO;
rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L,
- 0xFF, (u8)data->blocks);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H,
- 0xFF, (u8)(data->blocks >> 8));
-
+ sd_cmd_set_sd_cmd(pcr, cmd);
+ sd_cmd_set_data_len(pcr, data->blocks, data->blksz);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
DMA_DONE_INT, DMA_DONE_INT);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3,
- 0xFF, (u8)(data_len >> 24));
+ 0xFF, (u8)(data_len >> 24));
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2,
- 0xFF, (u8)(data_len >> 16));
+ 0xFF, (u8)(data_len >> 16));
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1,
- 0xFF, (u8)(data_len >> 8));
+ 0xFF, (u8)(data_len >> 8));
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len);
- if (read) {
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
- 0x03 | DMA_PACK_SIZE_MASK,
- DMA_DIR_FROM_CARD | DMA_EN | DMA_512);
- } else {
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
- 0x03 | DMA_PACK_SIZE_MASK,
- DMA_DIR_TO_CARD | DMA_EN | DMA_512);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_FROM_CARD | DMA_EN | DMA_512);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2 | resp_type);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TRANSFER_START | SD_TM_AUTO_READ_2);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+ rtsx_pci_send_cmd_no_wait(pcr);
+
+ err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, 1, 10000);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ sd_clear_error(host);
+ return err;
}
+ return 0;
+}
+
+static int sd_write_long_data(struct realtek_pci_sdmmc *host,
+ struct mmc_request *mrq)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_card *card = mmc->card;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ int uhs = mmc_card_uhs(card);
+ u8 cfg2;
+ int err;
+ size_t data_len = data->blksz * data->blocks;
+
+ sd_send_cmd_get_rsp(host, cmd);
+ if (cmd->error)
+ return cmd->error;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd->opcode, cmd->arg);
+
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
+
+ if (!uhs)
+ cfg2 |= SD_NO_CHECK_WAIT_CRC_TO;
+
+ rtsx_pci_init_cmd(pcr);
+ sd_cmd_set_data_len(pcr, data->blocks, data->blksz);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
+ DMA_DONE_INT, DMA_DONE_INT);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3,
+ 0xFF, (u8)(data_len >> 24));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2,
+ 0xFF, (u8)(data_len >> 16));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1,
+ 0xFF, (u8)(data_len >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_TO_CARD | DMA_EN | DMA_512);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, RING_BUFFER);
-
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
- trans_mode | SD_TRANSFER_START);
+ SD_TRANSFER_START | SD_TM_AUTO_WRITE_3);
rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
-
rtsx_pci_send_cmd_no_wait(pcr);
-
- err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read, 10000);
+ err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, 0, 10000);
if (err < 0) {
sd_clear_error(host);
return err;
@@ -511,6 +553,23 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
return 0;
}
+static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (host->sg_count < 0) {
+ data->error = host->sg_count;
+ dev_dbg(sdmmc_dev(host), "%s: sg_count = %d is invalid\n",
+ __func__, host->sg_count);
+ return data->error;
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ return sd_read_long_data(host, mrq);
+
+ return sd_write_long_data(host, mrq);
+}
+
static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
{
rtsx_pci_write_register(host->pcr, SD_CFG1,
@@ -528,10 +587,7 @@ static void sd_normal_rw(struct realtek_pci_sdmmc *host,
{
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
- u8 _cmd[5], *buf;
-
- _cmd[0] = 0x40 | (u8)cmd->opcode;
- put_unaligned_be32(cmd->arg, (u32 *)(&_cmd[1]));
+ u8 *buf;
buf = kzalloc(data->blksz, GFP_NOIO);
if (!buf) {
@@ -543,7 +599,7 @@ static void sd_normal_rw(struct realtek_pci_sdmmc *host,
if (host->initial_mode)
sd_disable_initial_mode(host);
- cmd->error = sd_read_data(host, _cmd, (u16)data->blksz, buf,
+ cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf,
data->blksz, 200);
if (host->initial_mode)
@@ -553,7 +609,7 @@ static void sd_normal_rw(struct realtek_pci_sdmmc *host,
} else {
sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz);
- cmd->error = sd_write_data(host, _cmd, (u16)data->blksz, buf,
+ cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf,
data->blksz, 200);
}
@@ -653,14 +709,14 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
u8 opcode, u8 sample_point)
{
int err;
- u8 cmd[5] = {0};
+ struct mmc_command cmd = {0};
err = sd_change_phase(host, sample_point, true);
if (err < 0)
return err;
- cmd[0] = 0x40 | opcode;
- err = sd_read_data(host, cmd, 0x40, NULL, 0, 100);
+ cmd.opcode = opcode;
+ err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100);
if (err < 0) {
/* Wait till SD DATA IDLE */
sd_wait_data_idle(host);
@@ -727,6 +783,12 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
return 0;
}
+static inline int sdio_extblock_cmd(struct mmc_command *cmd,
+ struct mmc_data *data)
+{
+ return (cmd->opcode == SD_IO_RW_EXTENDED) && (data->blksz == 512);
+}
+
static inline int sd_rw_cmd(struct mmc_command *cmd)
{
return mmc_op_multi(cmd->opcode) ||
@@ -748,7 +810,7 @@ static void sd_request(struct work_struct *work)
unsigned int data_size = 0;
int err;
- if (host->eject) {
+ if (host->eject || !sd_get_cd_int(host)) {
cmd->error = -ENOMEDIUM;
goto finish;
}
@@ -776,17 +838,15 @@ static void sd_request(struct work_struct *work)
if (mrq->data)
data_size = data->blocks * data->blksz;
- if (!data_size || sd_rw_cmd(cmd)) {
+ if (!data_size) {
sd_send_cmd_get_rsp(host, cmd);
+ } else if (sd_rw_cmd(cmd) || sdio_extblock_cmd(cmd, data)) {
+ cmd->error = sd_rw_multi(host, mrq);
+ if (!host->using_cookie)
+ sdmmc_post_req(host->mmc, host->mrq, 0);
- if (!cmd->error && data_size) {
- sd_rw_multi(host, mrq);
- if (!host->using_cookie)
- sdmmc_post_req(host->mmc, host->mrq, 0);
-
- if (mmc_op_multi(cmd->opcode) && mrq->stop)
- sd_send_cmd_get_rsp(host, mrq->stop);
- }
+ if (mmc_op_multi(cmd->opcode) && mrq->stop)
+ sd_send_cmd_get_rsp(host, mrq->stop);
} else {
sd_normal_rw(host, mrq);
}
@@ -801,8 +861,10 @@ static void sd_request(struct work_struct *work)
mutex_unlock(&pcr->pcr_mutex);
finish:
- if (cmd->error)
- dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
+ if (cmd->error) {
+ dev_dbg(sdmmc_dev(host), "CMD %d 0x%08x error(%d)\n",
+ cmd->opcode, cmd->arg, cmd->error);
+ }
mutex_lock(&host->host_mutex);
host->mrq = NULL;
@@ -820,7 +882,7 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
mutex_unlock(&host->host_mutex);
- if (sd_rw_cmd(mrq->cmd))
+ if (sd_rw_cmd(mrq->cmd) || sdio_extblock_cmd(mrq->cmd, data))
host->using_cookie = sd_pre_dma_transfer(host, data, false);
queue_work(host->workq, &host->work);
@@ -1066,7 +1128,7 @@ static int sdmmc_get_cd(struct mmc_host *mmc)
u32 val;
if (host->eject)
- return -ENOMEDIUM;
+ return cd;
mutex_lock(&pcr->pcr_mutex);
@@ -1317,6 +1379,7 @@ static void rtsx_pci_sdmmc_card_event(struct platform_device *pdev)
{
struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
+ host->cookie = -1;
mmc_detect_change(host->mmc, 0);
}
@@ -1349,6 +1412,7 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
host->pcr = pcr;
host->mmc = mmc;
host->pdev = pdev;
+ host->cookie = -1;
host->power_state = SDMMC_POWER_OFF;
INIT_WORK(&host->work, sd_request);
platform_set_drvdata(pdev, host);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index e3e56d35f0ee..a45ed39d062c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -158,7 +158,7 @@ static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
host = c->host;
- /* Platform specific code during emmc proble slot goes here */
+ /* Platform specific code during emmc probe slot goes here */
if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
@@ -179,7 +179,7 @@ static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev,
host = c->host;
- /* Platform specific code during emmc proble slot goes here */
+ /* Platform specific code during sdio probe slot goes here */
return 0;
}
@@ -195,7 +195,7 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
host = c->host;
- /* Platform specific code during emmc proble slot goes here */
+ /* Platform specific code during sd probe slot goes here */
return 0;
}
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "INT33BB" , "3" , &sdhci_acpi_slot_int_sd },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0D40" },
{ },
};
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "INT33BB" },
{ "INT33C6" },
{ "INT3436" },
+ { "INT344D" },
{ "PNP0D40" },
{ },
};
@@ -446,18 +448,13 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
return sdhci_runtime_resume_host(c->host);
}
-static int sdhci_acpi_runtime_idle(struct device *dev)
-{
- return 0;
-}
-
#endif
static const struct dev_pm_ops sdhci_acpi_pm_ops = {
.suspend = sdhci_acpi_suspend,
.resume = sdhci_acpi_resume,
SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend,
- sdhci_acpi_runtime_resume, sdhci_acpi_runtime_idle)
+ sdhci_acpi_runtime_resume, NULL)
};
static struct platform_driver sdhci_acpi_driver = {
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index e7e4fbdcbfe0..34bb8f92586e 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -254,7 +254,9 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
kona_dev = sdhci_pltfm_priv(pltfm_priv);
mutex_init(&kona_dev->write_lock);
- mmc_of_parse(host->mmc);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_pltfm_free;
if (!host->mmc->f_max) {
dev_err(&pdev->dev, "Missing max-freq for SDHCI cfg\n");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index af1f7c0f9545..10ef8244a239 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1080,10 +1080,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto disable_clk;
pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_suspend_ignore_children(&pdev->dev, 1);
+ pm_runtime_enable(&pdev->dev);
return 0;
@@ -1103,16 +1103,15 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
struct pltfm_imx_data *imx_data = pltfm_host->priv;
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
- sdhci_remove_host(host, dead);
-
- pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
- if (!IS_ENABLED(CONFIG_PM)) {
- clk_disable_unprepare(imx_data->clk_per);
- clk_disable_unprepare(imx_data->clk_ipg);
- clk_disable_unprepare(imx_data->clk_ahb);
- }
+ sdhci_remove_host(host, dead);
+
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 8872c85c63d4..17fe02ed6672 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -276,6 +276,14 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
}
+static void esdhc_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
static const struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl,
.read_w = esdhc_readw,
@@ -290,7 +298,7 @@ static const struct sdhci_ops sdhci_esdhc_ops = {
.platform_init = esdhc_of_platform_init,
.adma_workaround = esdhci_of_adma_workaround,
.set_bus_width = esdhc_pltfm_set_bus_width,
- .reset = sdhci_reset,
+ .reset = esdhc_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -362,13 +370,19 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
}
/* call to generic mmc_of_parse to support additional capabilities */
- mmc_of_parse(host->mmc);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
mmc_of_parse_voltage(np, &host->ocr_mask);
ret = sdhci_add_host(host);
if (ret)
- sdhci_pltfm_free(pdev);
+ goto err;
+ return 0;
+ err:
+ sdhci_pltfm_free(pdev);
return ret;
}
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 03427755b902..29eaff78238e 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
},
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
@@ -1342,11 +1367,6 @@ static int sdhci_pci_runtime_resume(struct device *dev)
return 0;
}
-static int sdhci_pci_runtime_idle(struct device *dev)
-{
- return 0;
-}
-
#else /* CONFIG_PM */
#define sdhci_pci_suspend NULL
@@ -1358,7 +1378,7 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = {
.suspend = sdhci_pci_suspend,
.resume = sdhci_pci_resume,
SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
- sdhci_pci_runtime_resume, sdhci_pci_runtime_idle)
+ sdhci_pci_runtime_resume, NULL)
};
/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d57c3d169914..1ec684d06d54 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -21,6 +21,9 @@
#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
/*
* PCI registers
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 45238871192d..b5103a247bc1 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -62,6 +62,7 @@ struct sdhci_pxa {
struct clk *clk_core;
struct clk *clk_io;
u8 power_mode;
+ void __iomem *sdio3_conf_reg;
};
/*
@@ -72,6 +73,14 @@ struct sdhci_pxa {
#define SDHCI_WINDOW_BASE(i) (0x84 + ((i) << 3))
#define SDHCI_MAX_WIN_NUM 8
+/*
+ * Fields below belong to SDIO3 Configuration Register (third register
+ * region for the Armada 38x flavor)
+ */
+
+#define SDIO3_CONF_CLK_INV BIT(0)
+#define SDIO3_CONF_SD_FB_CLK BIT(2)
+
static int mv_conf_mbus_windows(struct platform_device *pdev,
const struct mbus_dram_target_info *dram)
{
@@ -118,6 +127,51 @@ static int mv_conf_mbus_windows(struct platform_device *pdev,
return 0;
}
+static int armada_38x_quirks(struct platform_device *pdev,
+ struct sdhci_host *host)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct resource *res;
+
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "conf-sdio3");
+ if (res) {
+ pxa->sdio3_conf_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pxa->sdio3_conf_reg))
+ return PTR_ERR(pxa->sdio3_conf_reg);
+ } else {
+ /*
+ * According to erratum 'FE-2946959' both SDR50 and DDR50
+ * modes require specific clock adjustments in SDIO3
+ * Configuration register, if the adjustment is not done,
+ * remove them from the capabilities.
+ */
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
+
+ dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
+ }
+
+ /*
+ * According to erratum 'ERR-7878951' Armada 38x SDHCI
+ * controller has different capabilities than the ones shown
+ * in its registers
+ */
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (of_property_read_bool(np, "no-1-8-v")) {
+ host->caps &= ~SDHCI_CAN_VDD_180;
+ host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+ } else {
+ host->caps &= ~SDHCI_CAN_VDD_330;
+ }
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_USE_SDR50_TUNING);
+
+ return 0;
+}
+
static void pxav3_reset(struct sdhci_host *host, u8 mask)
{
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
@@ -194,6 +248,8 @@ static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
u16 ctrl_2;
/*
@@ -223,6 +279,24 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
break;
}
+ /*
+ * Update SDIO3 Configuration register according to erratum
+ * FE-2946959
+ */
+ if (pxa->sdio3_conf_reg) {
+ u8 reg_val = readb(pxa->sdio3_conf_reg);
+
+ if (uhs == MMC_TIMING_UHS_SDR50 ||
+ uhs == MMC_TIMING_UHS_DDR50) {
+ reg_val &= ~SDIO3_CONF_CLK_INV;
+ reg_val |= SDIO3_CONF_SD_FB_CLK;
+ } else {
+ reg_val |= SDIO3_CONF_CLK_INV;
+ reg_val &= ~SDIO3_CONF_SD_FB_CLK;
+ }
+ writeb(reg_val, pxa->sdio3_conf_reg);
+ }
+
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
dev_dbg(mmc_dev(host->mmc),
"%s uhs = %d, ctrl_2 = %04X\n",
@@ -268,8 +342,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
if (!pdata)
return NULL;
- of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
- if (clk_delay_cycles > 0)
+ if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
+ &clk_delay_cycles))
pdata->clk_delay_cycles = clk_delay_cycles;
return pdata;
@@ -300,13 +374,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (IS_ERR(host))
return PTR_ERR(host);
- if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
- ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
- if (ret < 0)
- goto err_mbus_win;
- }
-
-
pltfm_host = sdhci_priv(host);
pltfm_host->priv = pxa;
@@ -328,6 +395,15 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+ ret = armada_38x_quirks(pdev, host);
+ if (ret < 0)
+ goto err_clk_get;
+ ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+ if (ret < 0)
+ goto err_mbus_win;
+ }
+
match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
if (match) {
ret = mmc_of_parse(host->mmc);
@@ -366,10 +442,11 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
}
}
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
pm_suspend_ignore_children(&pdev->dev, 1);
ret = sdhci_add_host(host);
@@ -392,15 +469,14 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
return 0;
err_add_host:
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
err_of_parse:
err_cd_req:
+err_mbus_win:
clk_disable_unprepare(pxa->clk_io);
- if (!IS_ERR(pxa->clk_core))
- clk_disable_unprepare(pxa->clk_core);
+ clk_disable_unprepare(pxa->clk_core);
err_clk_get:
-err_mbus_win:
sdhci_pltfm_free(pdev);
return ret;
}
@@ -412,12 +488,13 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
struct sdhci_pxa *pxa = pltfm_host->priv;
pm_runtime_get_sync(&pdev->dev);
- sdhci_remove_host(host, 1);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ sdhci_remove_host(host, 1);
clk_disable_unprepare(pxa->clk_io);
- if (!IS_ERR(pxa->clk_core))
- clk_disable_unprepare(pxa->clk_core);
+ clk_disable_unprepare(pxa->clk_core);
sdhci_pltfm_free(pdev);
@@ -458,11 +535,11 @@ static int sdhci_pxav3_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_pxa *pxa = pltfm_host->priv;
- unsigned long flags;
+ int ret;
- spin_lock_irqsave(&host->lock, flags);
- host->runtime_suspended = true;
- spin_unlock_irqrestore(&host->lock, flags);
+ ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
clk_disable_unprepare(pxa->clk_io);
if (!IS_ERR(pxa->clk_core))
@@ -476,17 +553,12 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_pxa *pxa = pltfm_host->priv;
- unsigned long flags;
clk_prepare_enable(pxa->clk_io);
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
- spin_lock_irqsave(&host->lock, flags);
- host->runtime_suspended = false;
- spin_unlock_irqrestore(&host->lock, flags);
-
- return 0;
+ return sdhci_runtime_resume_host(host);
}
#endif
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index c45b8932d843..c6d2dd7317c1 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
@@ -312,7 +313,14 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_s3c_set_clock(host, clock);
+ /* Reset SD Clock Enable */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ spin_unlock_irq(&host->lock);
ret = clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+ spin_lock_irq(&host->lock);
if (ret != 0) {
dev_err(dev, "%s: failed to set clock rate %uHz\n",
mmc_hostname(host->mmc), clock);
@@ -607,7 +615,9 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_suspend_ignore_children(&pdev->dev, 1);
- mmc_of_parse(host->mmc);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_req_regs;
ret = sdhci_add_host(host);
if (ret) {
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index dd29d47c07aa..f6f82ec3618d 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -15,7 +15,9 @@
#include <linux/mmc/slot-gpio.h>
#include "sdhci-pltfm.h"
+#define SDHCI_CLK_DELAY_SETTING 0x4C
#define SDHCI_SIRF_8BITBUS BIT(3)
+#define SIRF_TUNING_COUNT 128
struct sdhci_sirf_priv {
struct clk *clk;
@@ -49,7 +51,76 @@ static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
+static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int tuning_seq_cnt = 3;
+ u8 phase, tuned_phases[SIRF_TUNING_COUNT];
+ u8 tuned_phase_cnt = 0;
+ int rc, longest_range = 0;
+ int start = -1, end = 0, tuning_value = -1, range = 0;
+ u16 clock_setting;
+ struct mmc_host *mmc = host->mmc;
+
+ clock_setting = sdhci_readw(host, SDHCI_CLK_DELAY_SETTING);
+ clock_setting &= ~0x3fff;
+
+retry:
+ phase = 0;
+ do {
+ sdhci_writel(host,
+ clock_setting | phase | (phase << 7) | (phase << 16),
+ SDHCI_CLK_DELAY_SETTING);
+
+ if (!mmc_send_tuning(mmc)) {
+ /* Tuning is successful at this tuning point */
+ tuned_phases[tuned_phase_cnt++] = phase;
+ dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
+ mmc_hostname(mmc), phase);
+ if (start == -1)
+ start = phase;
+ end = phase;
+ range++;
+ if (phase == (SIRF_TUNING_COUNT - 1)
+ && range > longest_range)
+ tuning_value = (start + end) / 2;
+ } else {
+ dev_dbg(mmc_dev(mmc), "%s: Found bad phase = %d\n",
+ mmc_hostname(mmc), phase);
+ if (range > longest_range) {
+ tuning_value = (start + end) / 2;
+ longest_range = range;
+ }
+ start = -1;
+ end = range = 0;
+ }
+ } while (++phase < ARRAY_SIZE(tuned_phases));
+
+ if (tuned_phase_cnt && tuning_value > 0) {
+ /*
+ * Finally set the selected phase in delay
+ * line hw block.
+ */
+ phase = tuning_value;
+ sdhci_writel(host,
+ clock_setting | phase | (phase << 7) | (phase << 16),
+ SDHCI_CLK_DELAY_SETTING);
+
+ dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
+ mmc_hostname(mmc), phase);
+ } else {
+ if (--tuning_seq_cnt)
+ goto retry;
+ /* Tuning failed */
+ dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
+ mmc_hostname(mmc));
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
static struct sdhci_ops sdhci_sirf_ops = {
+ .platform_execute_tuning = sdhci_sirf_execute_tuning,
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_sirf_get_max_clk,
.set_bus_width = sdhci_sirf_set_bus_width,
@@ -138,9 +209,6 @@ static int sdhci_sirf_remove(struct platform_device *pdev)
sdhci_pltfm_unregister(pdev);
- if (gpio_is_valid(priv->gpio_cd))
- mmc_gpio_free_cd(host->mmc);
-
clk_disable_unprepare(priv->clk);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index 328f348c7243..882b07e9667e 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -78,10 +78,9 @@ static int sdhci_st_probe(struct platform_device *pdev)
}
ret = mmc_of_parse(host->mmc);
-
if (ret) {
dev_err(&pdev->dev, "Failed mmc_of_parse\n");
- return ret;
+ goto err_of;
}
clk_prepare_enable(clk);
@@ -108,6 +107,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
err_out:
clk_disable_unprepare(clk);
+err_of:
sdhci_pltfm_free(pdev);
return ret;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 59797106af93..f3778d58d1cd 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -41,6 +41,7 @@
#define NVQUIRK_DISABLE_SDR50 BIT(3)
#define NVQUIRK_DISABLE_SDR104 BIT(4)
#define NVQUIRK_DISABLE_DDR50 BIT(5)
+#define NVQUIRK_SHADOW_XFER_MODE_REG BIT(6)
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
@@ -67,6 +68,31 @@ static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
return readw(host->ioaddr + reg);
}
+static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ if (soc_data->nvquirks & NVQUIRK_SHADOW_XFER_MODE_REG) {
+ switch (reg) {
+ case SDHCI_TRANSFER_MODE:
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ pltfm_host->xfer_mode_shadow = val;
+ return;
+ case SDHCI_COMMAND:
+ writel((val << 16) | pltfm_host->xfer_mode_shadow,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ return;
+ }
+ }
+
+ writew(val, host->ioaddr + reg);
+}
+
static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -147,6 +173,7 @@ static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
static const struct sdhci_ops tegra_sdhci_ops = {
.get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
+ .write_w = tegra_sdhci_writew,
.write_l = tegra_sdhci_writel,
.set_clock = sdhci_set_clock,
.set_bus_width = tegra_sdhci_set_bus_width,
@@ -201,7 +228,8 @@ static struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
.nvquirks = NVQUIRK_DISABLE_SDR50 |
NVQUIRK_DISABLE_DDR50 |
- NVQUIRK_DISABLE_SDR104,
+ NVQUIRK_DISABLE_SDR104 |
+ NVQUIRK_SHADOW_XFER_MODE_REG,
};
static const struct of_device_id sdhci_tegra_dt_match[] = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index cbb245b58538..0ad412a4876f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -53,6 +53,9 @@ static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+ struct mmc_data *data,
+ struct sdhci_host_next *next);
#ifdef CONFIG_PM
static int sdhci_runtime_pm_get(struct sdhci_host *host);
@@ -259,8 +262,6 @@ static void sdhci_reinit(struct sdhci_host *host)
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
- host->mmc->max_blk_count =
- (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
}
sdhci_enable_card_detection(host);
}
@@ -507,9 +508,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
goto fail;
BUG_ON(host->align_addr & host->align_mask);
- host->sg_count = dma_map_sg(mmc_dev(host->mmc),
- data->sg, data->sg_len, direction);
- if (host->sg_count == 0)
+ host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
+ if (host->sg_count < 0)
goto unmap_align;
desc = host->adma_table;
@@ -533,8 +533,6 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
if (offset) {
if (data->flags & MMC_DATA_WRITE) {
buffer = sdhci_kmap_atomic(sg, &flags);
- WARN_ON(((long)buffer & (PAGE_SIZE - 1)) >
- (PAGE_SIZE - offset));
memcpy(align, buffer, offset);
sdhci_kunmap_atomic(buffer, &flags);
}
@@ -641,8 +639,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
(sg_dma_address(sg) & host->align_mask);
buffer = sdhci_kmap_atomic(sg, &flags);
- WARN_ON(((long)buffer & (PAGE_SIZE - 1)) >
- (PAGE_SIZE - size));
memcpy(buffer, align, size);
sdhci_kunmap_atomic(buffer, &flags);
@@ -651,8 +647,9 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
}
}
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, direction);
+ if (!data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
}
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
@@ -848,11 +845,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
} else {
int sg_cnt;
- sg_cnt = dma_map_sg(mmc_dev(host->mmc),
- data->sg, data->sg_len,
- (data->flags & MMC_DATA_READ) ?
- DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
+ sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
if (sg_cnt == 0) {
/*
* This only happens when someone fed
@@ -911,7 +904,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
static void sdhci_set_transfer_mode(struct sdhci_host *host,
struct mmc_command *cmd)
{
- u16 mode;
+ u16 mode = 0;
struct mmc_data *data = cmd->data;
if (data == NULL) {
@@ -929,9 +922,11 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
WARN_ON(!host->data);
- mode = SDHCI_TRNS_BLK_CNT_EN;
+ if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
+ mode = SDHCI_TRNS_BLK_CNT_EN;
+
if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
- mode |= SDHCI_TRNS_MULTI;
+ mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
/*
* If we are sending CMD23, CMD12 never gets sent
* on successful completion (so no Auto-CMD12).
@@ -965,8 +960,10 @@ static void sdhci_finish_data(struct sdhci_host *host)
if (host->flags & SDHCI_USE_ADMA)
sdhci_adma_table_post(host, data);
else {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, (data->flags & MMC_DATA_READ) ?
+ if (!data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
}
@@ -1273,6 +1270,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
spin_unlock_irq(&host->lock);
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
spin_lock_irq(&host->lock);
+
+ if (mode != MMC_POWER_OFF)
+ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ else
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
return;
}
@@ -1353,6 +1356,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_runtime_pm_get(host);
+ present = mmc_gpio_get_cd(host->mmc);
+
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->mrq != NULL);
@@ -1381,7 +1386,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
* zero: cd-gpio is used, and card is removed
* one: cd-gpio is used, and card is present
*/
- present = mmc_gpio_get_cd(host->mmc);
if (present < 0) {
/* If polling, assume that the card is always present. */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1625,7 +1629,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
* signalling timeout and CRC errors even on CMD0. Resetting
* it on each ios seems to solve the problem.
*/
- if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+ if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
mmiowb();
@@ -1827,6 +1831,10 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
ctrl |= SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ /* Some controller need to do more when switching */
+ if (host->ops->voltage_switch)
+ host->ops->voltage_switch(host);
+
/* 1.8V regulator output should be stable within 5 ms */
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (ctrl & SDHCI_CTRL_VDD_180)
@@ -1880,6 +1888,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
return !(present_state & SDHCI_DATA_LVL_MASK);
}
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags |= SDHCI_HS400_TUNING;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return 0;
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1907,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
int tuning_loop_counter = MAX_TUNING_LOOP;
int err = 0;
unsigned long flags;
+ unsigned int tuning_count = 0;
+ bool hs400_tuning;
sdhci_runtime_pm_get(host);
spin_lock_irqsave(&host->lock, flags);
+ hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+ host->flags &= ~SDHCI_HS400_TUNING;
+
+ if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+ tuning_count = host->tuning_count;
+
/*
* The Host Controller needs tuning only in case of SDR104 mode
* and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1927,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
* tuning function has to be executed.
*/
switch (host->timing) {
+ /* HS400 tuning is done in HS200 mode */
case MMC_TIMING_MMC_HS400:
+ err = -EINVAL;
+ goto out_unlock;
+
case MMC_TIMING_MMC_HS200:
+ /*
+ * Periodic re-tuning for HS400 is not expected to be needed, so
+ * disable it here.
+ */
+ if (hs400_tuning)
+ tuning_count = 0;
+ break;
+
case MMC_TIMING_UHS_SDR104:
break;
@@ -1911,9 +1951,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* FALLTHROUGH */
default:
- spin_unlock_irqrestore(&host->lock, flags);
- sdhci_runtime_pm_put(host);
- return 0;
+ goto out_unlock;
}
if (host->ops->platform_execute_tuning) {
@@ -1925,6 +1963,8 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
+ ctrl |= SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
/*
@@ -2037,24 +2077,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
out:
- /*
- * If this is the very first time we are here, we start the retuning
- * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
- * flag won't be set, we check this condition before actually starting
- * the timer.
- */
- if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
- (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+ if (tuning_count) {
host->flags |= SDHCI_USING_RETUNING_TIMER;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
- /* Tuning mode 1 limits the maximum data length to 4MB */
- mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
- } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- host->flags &= ~SDHCI_NEEDS_RETUNING;
- /* Reload the new initial value for timer */
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
+ mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
}
/*
@@ -2070,6 +2097,7 @@ out:
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
@@ -2106,19 +2134,93 @@ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
}
}
+static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ if (data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mrq->data->host_cookie = 0;
+ }
+}
+
+static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+ struct mmc_data *data,
+ struct sdhci_host_next *next)
+{
+ int sg_count;
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
+ __func__, data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ /* Check if next job is already prepared */
+ if (next ||
+ (!next && data->host_cookie != host->next_data.cookie)) {
+ sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ } else {
+ sg_count = host->next_data.sg_count;
+ host->next_data.sg_count = 0;
+ }
+
+
+ if (sg_count == 0)
+ return -EINVAL;
+
+ if (next) {
+ next->sg_count = sg_count;
+ data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+ } else
+ host->sg_count = sg_count;
+
+ return sg_count;
+}
+
+static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (mrq->data->host_cookie) {
+ mrq->data->host_cookie = 0;
+ return;
+ }
+
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ if (sdhci_pre_dma_transfer(host,
+ mrq->data,
+ &host->next_data) < 0)
+ mrq->data->host_cookie = 0;
+}
+
static void sdhci_card_event(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ int present;
/* First check if client has provided their own card event */
if (host->ops->card_event)
host->ops->card_event(host);
+ present = sdhci_do_get_cd(host);
+
spin_lock_irqsave(&host->lock, flags);
/* Check host->mrq first in case we are runtime suspended */
- if (host->mrq && !sdhci_do_get_cd(host)) {
+ if (host->mrq && !present) {
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
pr_err("%s: Resetting controller.\n",
@@ -2136,12 +2238,15 @@ static void sdhci_card_event(struct mmc_host *mmc)
static const struct mmc_host_ops sdhci_ops = {
.request = sdhci_request,
+ .post_req = sdhci_post_req,
+ .pre_req = sdhci_pre_req,
.set_ios = sdhci_set_ios,
.get_cd = sdhci_get_cd,
.get_ro = sdhci_get_ro,
.hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
@@ -2766,9 +2871,9 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
+ sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
sdhci_do_set_ios(host, &host->mmc->ios);
- sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
if ((host_flags & SDHCI_PV_ENABLED) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
spin_lock_irqsave(&host->lock, flags);
@@ -2992,6 +3097,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->max_clk = host->ops->get_max_clock(host);
}
+ host->next_data.cookie = 1;
/*
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
@@ -3260,8 +3366,9 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->max_segs = SDHCI_MAX_SEGS;
/*
- * Maximum number of sectors in one transfer. Limited by DMA boundary
- * size (512KiB).
+ * Maximum number of sectors in one transfer. Limited by SDMA boundary
+ * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+ * is less anyway.
*/
mmc->max_req_size = 524288;
@@ -3310,9 +3417,9 @@ int sdhci_add_host(struct sdhci_host *host)
setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
- if (host->version >= SDHCI_SPEC_300) {
- init_waitqueue_head(&host->buf_ready_int);
+ init_waitqueue_head(&host->buf_ready_int);
+ if (host->version >= SDHCI_SPEC_300) {
/* Initialize re-tuning timer */
init_timer(&host->tuning_timer);
host->tuning_timer.data = (unsigned long)host;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 41a2c34299ed..0315e1844330 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -339,6 +339,7 @@ struct sdhci_ops {
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
void (*platform_init)(struct sdhci_host *host);
void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
new file mode 100644
index 000000000000..2fe8b91481b3
--- /dev/null
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -0,0 +1,237 @@
+/*
+ * linux/drivers/mmc/host/sdhci_f_sdh30.c
+ *
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+
+#include "sdhci-pltfm.h"
+
+/* F_SDH30 extended Controller registers */
+#define F_SDH30_AHB_CONFIG 0x100
+#define F_SDH30_AHB_BIGED 0x00000040
+#define F_SDH30_BUSLOCK_DMA 0x00000020
+#define F_SDH30_BUSLOCK_EN 0x00000010
+#define F_SDH30_SIN 0x00000008
+#define F_SDH30_AHB_INCR_16 0x00000004
+#define F_SDH30_AHB_INCR_8 0x00000002
+#define F_SDH30_AHB_INCR_4 0x00000001
+
+#define F_SDH30_TUNING_SETTING 0x108
+#define F_SDH30_CMD_CHK_DIS 0x00010000
+
+#define F_SDH30_IO_CONTROL2 0x114
+#define F_SDH30_CRES_O_DN 0x00080000
+#define F_SDH30_MSEL_O_1_8 0x00040000
+
+#define F_SDH30_ESD_CONTROL 0x124
+#define F_SDH30_EMMC_RST 0x00000002
+#define F_SDH30_EMMC_HS200 0x01000000
+
+#define F_SDH30_CMD_DAT_DELAY 0x200
+
+#define F_SDH30_MIN_CLOCK 400000
+
+struct f_sdhost_priv {
+ struct clk *clk_iface;
+ struct clk *clk;
+ u32 vendor_hs200;
+ struct device *dev;
+};
+
+void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctrl = 0;
+
+ usleep_range(2500, 3000);
+ ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+
+ ctrl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ usleep_range(2500, 3000);
+
+ if (priv->vendor_hs200) {
+ dev_info(priv->dev, "%s: setting hs200\n", __func__);
+ ctrl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctrl |= priv->vendor_hs200;
+ sdhci_writel(host, ctrl, F_SDH30_ESD_CONTROL);
+ }
+
+ ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING);
+ ctrl |= F_SDH30_CMD_CHK_DIS;
+ sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING);
+}
+
+unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
+{
+ return F_SDH30_MIN_CLOCK;
+}
+
+void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
+{
+ if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
+ sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
+
+ sdhci_reset(host, mask);
+}
+
+static const struct sdhci_ops sdhci_f_sdh30_ops = {
+ .voltage_switch = sdhci_f_sdh30_soft_voltage_switch,
+ .get_min_clock = sdhci_f_sdh30_get_min_clock,
+ .reset = sdhci_f_sdh30_reset,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, ctrl = 0, ret = 0;
+ struct f_sdhost_priv *priv;
+ u32 reg = 0;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "%s: no irq specified\n", __func__);
+ return irq;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_host) +
+ sizeof(struct f_sdhost_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ priv = sdhci_priv(host);
+ priv->dev = dev;
+
+ host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+ host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+ SDHCI_QUIRK2_TUNING_WORK_AROUND;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ sdhci_get_of_property(pdev);
+ host->hw_name = "f_sdh30";
+ host->ops = &sdhci_f_sdh30_ops;
+ host->irq = irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ goto err;
+ }
+
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
+
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+
+ /* init vendor specific regs */
+ ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
+ ctrl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 |
+ F_SDH30_AHB_INCR_4;
+ ctrl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN);
+ sdhci_writew(host, ctrl, F_SDH30_AHB_CONFIG);
+
+ reg = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ sdhci_writel(host, reg & ~F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL);
+ msleep(20);
+ sdhci_writel(host, reg | F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL);
+
+ reg = sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (reg & SDHCI_CAN_DO_8BIT)
+ priv->vendor_hs200 = F_SDH30_EMMC_HS200;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+
+ return 0;
+
+err_add_host:
+ clk_disable_unprepare(priv->clk);
+err_clk:
+ clk_disable_unprepare(priv->clk_iface);
+err:
+ sdhci_free_host(host);
+ return ret;
+}
+
+static int sdhci_f_sdh30_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+
+ sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ clk_disable_unprepare(priv->clk_iface);
+ clk_disable_unprepare(priv->clk);
+
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id f_sdh30_dt_ids[] = {
+ { .compatible = "fujitsu,mb86s70-sdhci-3.0" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
+
+static struct platform_driver sdhci_f_sdh30_driver = {
+ .driver = {
+ .name = "f_sdh30",
+ .of_match_table = f_sdh30_dt_ids,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_f_sdh30_probe,
+ .remove = sdhci_f_sdh30_remove,
+};
+
+module_platform_driver(sdhci_f_sdh30_driver);
+
+MODULE_DESCRIPTION("F_SDH30 SD Card Controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("FUJITSU SEMICONDUCTOR LTD.");
+MODULE_ALIAS("platform:f_sdh30");
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 00c8ebdf8ec7..6906a905cd54 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -35,10 +35,13 @@
#define EXT_ACC 0xe4
+#define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
+
struct sh_mobile_sdhi_of_data {
unsigned long tmio_flags;
unsigned long capabilities;
unsigned long capabilities2;
+ enum dma_slave_buswidth dma_buswidth;
dma_addr_t dma_rx_offset;
};
@@ -58,6 +61,7 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
TMIO_MMC_CLK_ACTUAL,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+ .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dma_rx_offset = 0x2000,
};
@@ -84,16 +88,43 @@ struct sh_mobile_sdhi {
struct tmio_mmc_dma dma_priv;
};
+static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
+{
+ u32 val;
+
+ /*
+ * see also
+ * sh_mobile_sdhi_of_data :: dma_buswidth
+ */
+ switch (sd_ctrl_read16(host, CTL_VERSION)) {
+ case 0x490C:
+ val = (width == 32) ? 0x0001 : 0x0000;
+ break;
+ case 0xCB0D:
+ val = (width == 32) ? 0x0000 : 0x0001;
+ break;
+ default:
+ /* nothing to do */
+ return;
+ }
+
+ sd_ctrl_write16(host, EXT_ACC, val);
+}
+
static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
int ret = clk_prepare_enable(priv->clk);
if (ret < 0)
return ret;
*f = clk_get_rate(priv->clk);
+
+ /* enable 16bit data access on SDBUF as default */
+ sh_mobile_sdhi_sdbuf_width(host, 16);
+
return 0;
}
@@ -101,7 +132,7 @@ static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
clk_disable_unprepare(priv->clk);
}
@@ -113,7 +144,7 @@ static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
udelay(1);
if (!timeout) {
- dev_warn(host->pdata->dev, "timeout waiting for SD bus idle\n");
+ dev_warn(&host->pdev->dev, "timeout waiting for SD bus idle\n");
return -EBUSY;
}
@@ -156,14 +187,13 @@ static int sh_mobile_sdhi_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static void sh_mobile_sdhi_cd_wakeup(const struct platform_device *pdev)
+static void sh_mobile_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
{
- mmc_detect_change(platform_get_drvdata(pdev), msecs_to_jiffies(100));
-}
+ sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
-static const struct sh_mobile_sdhi_ops sdhi_ops = {
- .cd_wakeup = sh_mobile_sdhi_cd_wakeup,
-};
+ /* enable 32bit access if DMA mode if possibile */
+ sh_mobile_sdhi_sdbuf_width(host, enable ? 32 : 16);
+}
static int sh_mobile_sdhi_probe(struct platform_device *pdev)
{
@@ -177,7 +207,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
int irq, ret, i = 0;
bool multiplexed_isr = true;
struct tmio_mmc_dma *dma_priv;
- u16 ver;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -192,26 +221,31 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data = &priv->mmc_data;
dma_priv = &priv->dma_priv;
- if (p) {
- if (p->init) {
- ret = p->init(pdev, &sdhi_ops);
- if (ret)
- return ret;
- }
- }
-
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- goto eclkget;
+ goto eprobe;
}
- mmc_data->clk_enable = sh_mobile_sdhi_clk_enable;
- mmc_data->clk_disable = sh_mobile_sdhi_clk_disable;
+ host = tmio_mmc_host_alloc(pdev);
+ if (!host) {
+ ret = -ENOMEM;
+ goto eprobe;
+ }
+
+ host->dma = dma_priv;
+ host->write16_hook = sh_mobile_sdhi_write16_hook;
+ host->clk_enable = sh_mobile_sdhi_clk_enable;
+ host->clk_disable = sh_mobile_sdhi_clk_disable;
+ host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
+ /* SD control register space size is 0x100, 0x200 for bus_shift=1 */
+ if (resource_size(res) > 0x100)
+ host->bus_shift = 1;
+ else
+ host->bus_shift = 0;
+
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
- mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
- mmc_data->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
if (p) {
mmc_data->flags = p->tmio_flags;
mmc_data->ocr_mask = p->tmio_ocr_mask;
@@ -231,11 +265,10 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
dma_priv->slave_id_rx = p->dma_slave_rx;
}
}
-
- dma_priv->alignment_shift = 1; /* 2-byte alignment */
dma_priv->filter = shdma_chan_filter;
+ dma_priv->enable = sh_mobile_sdhi_enable_dma;
- mmc_data->dma = dma_priv;
+ mmc_data->alignment_shift = 1; /* 2-byte alignment */
/*
* All SDHI blocks support 2-byte and larger block sizes in 4-bit
@@ -258,33 +291,18 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_QUIRK;
- /*
- * All SDHI have DMA control register
- */
- mmc_data->flags |= TMIO_MMC_HAVE_CTL_DMA_REG;
-
if (of_id && of_id->data) {
const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
mmc_data->flags |= of_data->tmio_flags;
mmc_data->capabilities |= of_data->capabilities;
mmc_data->capabilities2 |= of_data->capabilities2;
- dma_priv->dma_rx_offset = of_data->dma_rx_offset;
+ mmc_data->dma_rx_offset = of_data->dma_rx_offset;
+ dma_priv->dma_buswidth = of_data->dma_buswidth;
}
- /* SD control register space size is 0x100, 0x200 for bus_shift=1 */
- mmc_data->bus_shift = resource_size(res) >> 9;
-
- ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
+ ret = tmio_mmc_host_probe(host, mmc_data);
if (ret < 0)
- goto eprobe;
-
- /*
- * FIXME:
- * this Workaround can be more clever method
- */
- ver = sd_ctrl_read16(host, CTL_VERSION);
- if (ver == 0xCB0D)
- sd_ctrl_write16(host, EXT_ACC, 1);
+ goto efree;
/*
* Allow one or more specific (named) ISRs or
@@ -351,10 +369,9 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
eirq:
tmio_mmc_host_remove(host);
+efree:
+ tmio_mmc_host_free(host);
eprobe:
-eclkget:
- if (p && p->cleanup)
- p->cleanup(pdev);
return ret;
}
@@ -362,13 +379,9 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
tmio_mmc_host_remove(host);
- if (p && p->cleanup)
- p->cleanup(pdev);
-
return 0;
}
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 15cb8b7ffc34..6af0a28ba37d 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -252,7 +252,7 @@ static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host)
unsigned long expire = jiffies + msecs_to_jiffies(250);
u32 rval;
- mmc_writel(host, REG_CMDR, SDXC_HARDWARE_RESET);
+ mmc_writel(host, REG_GCTRL, SDXC_HARDWARE_RESET);
do {
rval = mmc_readl(host, REG_GCTRL);
} while (time_before(jiffies, expire) && (rval & SDXC_HARDWARE_RESET));
@@ -310,7 +310,9 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
}
pdes[0].config |= SDXC_IDMAC_DES0_FD;
- pdes[i - 1].config = SDXC_IDMAC_DES0_OWN | SDXC_IDMAC_DES0_LD;
+ pdes[i - 1].config |= SDXC_IDMAC_DES0_LD | SDXC_IDMAC_DES0_ER;
+ pdes[i - 1].config &= ~SDXC_IDMAC_DES0_DIC;
+ pdes[i - 1].buf_addr_ptr2 = 0;
/*
* Avoid the io-store starting the idmac hitting io-mem before the
@@ -570,6 +572,15 @@ static irqreturn_t sunxi_mmc_handle_manual_stop(int irq, void *dev_id)
}
dev_err(mmc_dev(host->mmc), "data error, sending stop command\n");
+
+ /*
+ * We will never have more than one outstanding request,
+ * and we do not complete the request until after
+ * we've cleared host->manual_stop_mrq so we do not need to
+ * spin lock this function.
+ * Additionally we have wait states within this function
+ * so having it in a lock is a very bad idea.
+ */
sunxi_mmc_send_manual_stop(host, mrq);
spin_lock_irqsave(&host->lock, iflags);
@@ -616,7 +627,7 @@ static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
struct mmc_ios *ios)
{
- u32 rate, oclk_dly, rval, sclk_dly, src_clk;
+ u32 rate, oclk_dly, rval, sclk_dly;
int ret;
rate = clk_round_rate(host->clk_mmc, ios->clock);
@@ -661,14 +672,6 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
sclk_dly = 4;
}
- src_clk = clk_get_rate(clk_get_parent(host->clk_mmc));
- if (src_clk >= 300000000 && src_clk <= 400000000) {
- if (oclk_dly)
- oclk_dly--;
- if (sclk_dly)
- sclk_dly--;
- }
-
clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly);
return sunxi_mmc_oclk_onoff(host, 1);
@@ -766,6 +769,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
unsigned long iflags;
u32 imask = SDXC_INTERRUPT_ERROR_BIT;
u32 cmd_val = SDXC_START | (cmd->opcode & 0x3f);
+ bool wait_dma = host->wait_dma;
int ret;
/* Check for set_ios errors (should never happen) */
@@ -816,7 +820,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (cmd->data->flags & MMC_DATA_WRITE)
cmd_val |= SDXC_WRITE;
else
- host->wait_dma = true;
+ wait_dma = true;
} else {
imask |= SDXC_COMMAND_DONE;
}
@@ -850,6 +854,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
host->mrq = mrq;
+ host->wait_dma = wait_dma;
mmc_writel(host, REG_IMASK, host->sdio_imask | imask);
mmc_writel(host, REG_CARG, cmd->arg);
mmc_writel(host, REG_CMDR, cmd_val);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 2ca0afaab792..f746df493892 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -88,14 +88,19 @@ static int tmio_mmc_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
- pdata->bus_shift = resource_size(res) >> 10;
pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
- ret = tmio_mmc_host_probe(&host, pdev, pdata);
- if (ret)
+ host = tmio_mmc_host_alloc(pdev);
+ if (!host)
goto cell_disable;
+ /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
+ host->bus_shift = resource_size(res) >> 10;
+
+ ret = tmio_mmc_host_probe(host, pdata);
+ if (ret)
+ goto host_free;
+
ret = request_irq(irq, tmio_mmc_irq, IRQF_TRIGGER_FALLING,
dev_name(&pdev->dev), host);
if (ret)
@@ -108,6 +113,8 @@ static int tmio_mmc_probe(struct platform_device *pdev)
host_remove:
tmio_mmc_host_remove(host);
+host_free:
+ tmio_mmc_host_free(host);
cell_disable:
if (cell->disable)
cell->disable(pdev);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index a34ecbe1c1ad..fc3805ed69d1 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -16,6 +16,7 @@
#ifndef TMIO_MMC_H
#define TMIO_MMC_H
+#include <linux/dmaengine.h>
#include <linux/highmem.h>
#include <linux/mmc/tmio.h>
#include <linux/mutex.h>
@@ -39,6 +40,17 @@
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
struct tmio_mmc_data;
+struct tmio_mmc_host;
+
+struct tmio_mmc_dma {
+ void *chan_priv_tx;
+ void *chan_priv_rx;
+ int slave_id_tx;
+ int slave_id_rx;
+ enum dma_slave_buswidth dma_buswidth;
+ bool (*filter)(struct dma_chan *chan, void *arg);
+ void (*enable)(struct tmio_mmc_host *host, bool enable);
+};
struct tmio_mmc_host {
void __iomem *ctl;
@@ -56,9 +68,11 @@ struct tmio_mmc_host {
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
+ unsigned long bus_shift;
struct platform_device *pdev;
struct tmio_mmc_data *pdata;
+ struct tmio_mmc_dma *dma;
/* DMA support */
bool force_pio;
@@ -83,10 +97,17 @@ struct tmio_mmc_host {
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
bool sdio_irq_enabled;
+
+ int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+ int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
+ void (*clk_disable)(struct platform_device *pdev);
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
};
-int tmio_mmc_host_probe(struct tmio_mmc_host **host,
- struct platform_device *pdev,
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
+void tmio_mmc_host_free(struct tmio_mmc_host *host);
+int tmio_mmc_host_probe(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
@@ -151,19 +172,19 @@ int tmio_mmc_host_runtime_resume(struct device *dev);
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->pdata->bus_shift));
+ return readw(host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- readsw(host->ctl + (addr << host->pdata->bus_shift), buf, count);
+ readsw(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->pdata->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->pdata->bus_shift)) << 16;
+ return readw(host->ctl + (addr << host->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
@@ -171,21 +192,21 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val
/* If there is a hook and it returns non-zero then there
* is an error and the write should be skipped
*/
- if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr))
+ if (host->write16_hook && host->write16_hook(host, addr))
return;
- writew(val, host->ctl + (addr << host->pdata->bus_shift));
+ writew(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- writesw(host->ctl + (addr << host->pdata->bus_shift), buf, count);
+ writesw(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
{
- writew(val, host->ctl + (addr << host->pdata->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->pdata->bus_shift));
+ writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 7d077388b9eb..331bb618e398 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -28,8 +28,8 @@ void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
if (!host->chan_tx || !host->chan_rx)
return;
- if (host->pdata->flags & TMIO_MMC_HAVE_CTL_DMA_REG)
- sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
+ if (host->dma->enable)
+ host->dma->enable(host, enable);
}
void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
@@ -49,11 +49,10 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
- struct tmio_mmc_data *pdata = host->pdata;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
- unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+ unsigned int align = (1 << host->pdata->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
@@ -126,11 +125,10 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
- struct tmio_mmc_data *pdata = host->pdata;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
- unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+ unsigned int align = (1 << host->pdata->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
@@ -262,8 +260,8 @@ out:
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
- if (!pdata->dma || (!host->pdev->dev.of_node &&
- (!pdata->dma->chan_priv_tx || !pdata->dma->chan_priv_rx)))
+ if (!host->dma || (!host->pdev->dev.of_node &&
+ (!host->dma->chan_priv_tx || !host->dma->chan_priv_rx)))
return;
if (!host->chan_tx && !host->chan_rx) {
@@ -280,7 +278,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
- pdata->dma->filter, pdata->dma->chan_priv_tx,
+ host->dma->filter, host->dma->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
@@ -288,18 +286,20 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (!host->chan_tx)
return;
- if (pdata->dma->chan_priv_tx)
- cfg.slave_id = pdata->dma->slave_id_tx;
+ if (host->dma->chan_priv_tx)
+ cfg.slave_id = host->dma->slave_id_tx;
cfg.direction = DMA_MEM_TO_DEV;
- cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->pdata->bus_shift);
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
+ cfg.dst_addr_width = host->dma->dma_buswidth;
+ if (!cfg.dst_addr_width)
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.src_addr = 0;
ret = dmaengine_slave_config(host->chan_tx, &cfg);
if (ret < 0)
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
- pdata->dma->filter, pdata->dma->chan_priv_rx,
+ host->dma->filter, host->dma->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
@@ -307,11 +307,13 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (!host->chan_rx)
goto ereqrx;
- if (pdata->dma->chan_priv_rx)
- cfg.slave_id = pdata->dma->slave_id_rx;
+ if (host->dma->chan_priv_rx)
+ cfg.slave_id = host->dma->slave_id_rx;
cfg.direction = DMA_DEV_TO_MEM;
- cfg.src_addr = cfg.dst_addr + pdata->dma->dma_rx_offset;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
+ cfg.src_addr_width = host->dma->dma_buswidth;
+ if (!cfg.src_addr_width)
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr = 0;
ret = dmaengine_slave_config(host->chan_rx, &cfg);
if (ret < 0)
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 250bf8c9f998..a31c3573d386 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -835,13 +835,12 @@ fail:
static int tmio_mmc_clk_update(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
- struct tmio_mmc_data *pdata = host->pdata;
int ret;
- if (!pdata->clk_enable)
+ if (!host->clk_enable)
return -ENOTSUPP;
- ret = pdata->clk_enable(host->pdev, &mmc->f_max);
+ ret = host->clk_enable(host->pdev, &mmc->f_max);
if (!ret)
mmc->f_min = mmc->f_max / 512;
@@ -1005,10 +1004,9 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
unsigned int direction, int blk_size)
{
struct tmio_mmc_host *host = mmc_priv(card->host);
- struct tmio_mmc_data *pdata = host->pdata;
- if (pdata->multi_io_quirk)
- return pdata->multi_io_quirk(card, direction, blk_size);
+ if (host->multi_io_quirk)
+ return host->multi_io_quirk(card, direction, blk_size);
return blk_size;
}
@@ -1054,12 +1052,37 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
}
-int tmio_mmc_host_probe(struct tmio_mmc_host **host,
- struct platform_device *pdev,
- struct tmio_mmc_data *pdata)
+struct tmio_mmc_host*
+tmio_mmc_host_alloc(struct platform_device *pdev)
{
- struct tmio_mmc_host *_host;
+ struct tmio_mmc_host *host;
struct mmc_host *mmc;
+
+ mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
+ if (!mmc)
+ return NULL;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->pdev = pdev;
+
+ return host;
+}
+EXPORT_SYMBOL(tmio_mmc_host_alloc);
+
+void tmio_mmc_host_free(struct tmio_mmc_host *host)
+{
+ mmc_free_host(host->mmc);
+
+ host->mmc = NULL;
+}
+EXPORT_SYMBOL(tmio_mmc_host_free);
+
+int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
+ struct tmio_mmc_data *pdata)
+{
+ struct platform_device *pdev = _host->pdev;
+ struct mmc_host *mmc = _host->mmc;
struct resource *res_ctl;
int ret;
u32 irq_mask = TMIO_MASK_CMD;
@@ -1067,25 +1090,17 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
tmio_mmc_of_parse(pdev, pdata);
if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
- pdata->write16_hook = NULL;
+ _host->write16_hook = NULL;
res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_ctl)
return -EINVAL;
- mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
- if (!mmc)
- return -ENOMEM;
-
ret = mmc_of_parse(mmc);
if (ret < 0)
goto host_free;
- pdata->dev = &pdev->dev;
- _host = mmc_priv(mmc);
_host->pdata = pdata;
- _host->mmc = mmc;
- _host->pdev = pdev;
platform_set_drvdata(pdev, mmc);
_host->set_pwr = pdata->set_pwr;
@@ -1192,12 +1207,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
mmc_gpiod_request_cd_irq(mmc);
}
- *host = _host;
-
return 0;
host_free:
- mmc_free_host(mmc);
return ret;
}
@@ -1222,7 +1234,6 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_disable(&pdev->dev);
iounmap(host->ctl);
- mmc_free_host(mmc);
}
EXPORT_SYMBOL(tmio_mmc_host_remove);
@@ -1237,8 +1248,8 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
if (host->clk_cache)
tmio_mmc_clk_stop(host);
- if (host->pdata->clk_disable)
- host->pdata->clk_disable(host->pdev);
+ if (host->clk_disable)
+ host->clk_disable(host->pdev);
return 0;
}
diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
index 4666262edaca..e2cdd5fb1423 100644
--- a/drivers/mmc/host/toshsd.c
+++ b/drivers/mmc/host/toshsd.c
@@ -176,7 +176,8 @@ static irqreturn_t toshsd_thread_irq(int irq, void *dev_id)
spin_lock_irqsave(&host->lock, flags);
if (!sg_miter_next(sg_miter))
- return IRQ_HANDLED;
+ goto done;
+
buf = sg_miter->addr;
/* Ensure we dont read more than one block. The chip will interrupt us
@@ -198,6 +199,7 @@ static irqreturn_t toshsd_thread_irq(int irq, void *dev_id)
sg_miter->consumed = count;
sg_miter_stop(sg_miter);
+done:
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_HANDLED;
@@ -699,18 +701,7 @@ static struct pci_driver toshsd_driver = {
.driver.pm = &toshsd_pm_ops,
};
-static int __init toshsd_drv_init(void)
-{
- return pci_register_driver(&toshsd_driver);
-}
-
-static void __exit toshsd_drv_exit(void)
-{
- pci_unregister_driver(&toshsd_driver);
-}
-
-module_init(toshsd_drv_init);
-module_exit(toshsd_drv_exit);
+module_pci_driver(toshsd_driver);
MODULE_AUTHOR("Ondrej Zary, Richard Betts");
MODULE_DESCRIPTION("Toshiba PCI Secure Digital Host Controller Interface driver");
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 4262296c12fa..fbabbb82b354 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -659,7 +659,7 @@ static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
static void __do_poll(struct vub300_mmc_host *vub300)
{
/* cmd_mutex is held by vub300_pollwork_thread */
- long commretval;
+ unsigned long commretval;
mod_timer(&vub300->inactivity_timer, jiffies + HZ);
init_completion(&vub300->irqpoll_complete);
send_irqpoll(vub300);
@@ -671,8 +671,6 @@ static void __do_poll(struct vub300_mmc_host *vub300)
vub300->usb_timed_out = 1;
usb_kill_urb(vub300->command_out_urb);
usb_kill_urb(vub300->command_res_urb);
- } else if (commretval < 0) {
- vub300_queue_poll_work(vub300, 1);
} else { /* commretval > 0 */
__vub300_irqpoll_response(vub300);
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 53563955931b..55fa27ecf4e1 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -49,7 +49,6 @@ static DEFINE_MUTEX(mtd_mutex);
*/
struct mtd_file_info {
struct mtd_info *mtd;
- struct inode *ino;
enum mtd_file_modes mode;
};
@@ -59,10 +58,6 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
}
-static int count;
-static struct vfsmount *mnt;
-static struct file_system_type mtd_inodefs_type;
-
static int mtdchar_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
@@ -70,7 +65,6 @@ static int mtdchar_open(struct inode *inode, struct file *file)
int ret = 0;
struct mtd_info *mtd;
struct mtd_file_info *mfi;
- struct inode *mtd_ino;
pr_debug("MTD_open\n");
@@ -78,10 +72,6 @@ static int mtdchar_open(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
- ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
- if (ret)
- return ret;
-
mutex_lock(&mtd_mutex);
mtd = get_mtd_device(NULL, devnum);
@@ -95,43 +85,26 @@ static int mtdchar_open(struct inode *inode, struct file *file)
goto out1;
}
- mtd_ino = iget_locked(mnt->mnt_sb, devnum);
- if (!mtd_ino) {
- ret = -ENOMEM;
- goto out1;
- }
- if (mtd_ino->i_state & I_NEW) {
- mtd_ino->i_private = mtd;
- mtd_ino->i_mode = S_IFCHR;
- mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
- unlock_new_inode(mtd_ino);
- }
- file->f_mapping = mtd_ino->i_mapping;
-
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
ret = -EACCES;
- goto out2;
+ goto out1;
}
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) {
ret = -ENOMEM;
- goto out2;
+ goto out1;
}
- mfi->ino = mtd_ino;
mfi->mtd = mtd;
file->private_data = mfi;
mutex_unlock(&mtd_mutex);
return 0;
-out2:
- iput(mtd_ino);
out1:
put_mtd_device(mtd);
out:
mutex_unlock(&mtd_mutex);
- simple_release_fs(&mnt, &count);
return ret;
} /* mtdchar_open */
@@ -148,12 +121,9 @@ static int mtdchar_close(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE))
mtd_sync(mtd);
- iput(mfi->ino);
-
put_mtd_device(mtd);
file->private_data = NULL;
kfree(mfi);
- simple_release_fs(&mnt, &count);
return 0;
} /* mtdchar_close */
@@ -1117,6 +1087,13 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
ret = mtd_get_unmapped_area(mtd, len, offset, flags);
return ret == -EOPNOTSUPP ? -ENODEV : ret;
}
+
+static unsigned mtdchar_mmap_capabilities(struct file *file)
+{
+ struct mtd_file_info *mfi = file->private_data;
+
+ return mtd_mmap_capabilities(mfi->mtd);
+}
#endif
/*
@@ -1160,27 +1137,10 @@ static const struct file_operations mtd_fops = {
.mmap = mtdchar_mmap,
#ifndef CONFIG_MMU
.get_unmapped_area = mtdchar_get_unmapped_area,
+ .mmap_capabilities = mtdchar_mmap_capabilities,
#endif
};
-static const struct super_operations mtd_ops = {
- .drop_inode = generic_delete_inode,
- .statfs = simple_statfs,
-};
-
-static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
-}
-
-static struct file_system_type mtd_inodefs_type = {
- .name = "mtd_inodefs",
- .mount = mtd_inodefs_mount,
- .kill_sb = kill_anon_super,
-};
-MODULE_ALIAS_FS("mtd_inodefs");
-
int __init init_mtdchar(void)
{
int ret;
@@ -1193,23 +1153,11 @@ int __init init_mtdchar(void)
return ret;
}
- ret = register_filesystem(&mtd_inodefs_type);
- if (ret) {
- pr_err("Can't register mtd_inodefs filesystem, error %d\n",
- ret);
- goto err_unregister_chdev;
- }
-
- return ret;
-
-err_unregister_chdev:
- __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
return ret;
}
void __exit cleanup_mtdchar(void)
{
- unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index b9000563b9f4..eacc3aac7327 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -732,8 +732,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
- concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
-
concat->subdev[0] = subdev[0];
for (i = 1; i < num_devs; i++) {
@@ -761,14 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
subdev[i]->flags & MTD_WRITEABLE;
}
- /* only permit direct mapping if the BDIs are all the same
- * - copy-mapping is still permitted
- */
- if (concat->mtd.backing_dev_info !=
- subdev[i]->backing_dev_info)
- concat->mtd.backing_dev_info =
- &default_backing_dev_info;
-
concat->mtd.size += subdev[i]->size;
concat->mtd.ecc_stats.badblocks +=
subdev[i]->ecc_stats.badblocks;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 4c611871d7e6..0ec4d6ea1e4b 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -43,33 +43,7 @@
#include "mtdcore.h"
-/*
- * backing device capabilities for non-mappable devices (such as NAND flash)
- * - permits private mappings, copies are taken of the data
- */
-static struct backing_dev_info mtd_bdi_unmappable = {
- .capabilities = BDI_CAP_MAP_COPY,
-};
-
-/*
- * backing device capabilities for R/O mappable devices (such as ROM)
- * - permits private mappings, copies are taken of the data
- * - permits non-writable shared mappings
- */
-static struct backing_dev_info mtd_bdi_ro_mappable = {
- .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
- BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
-};
-
-/*
- * backing device capabilities for writable mappable devices (such as RAM)
- * - permits private mappings, copies are taken of the data
- * - permits non-writable shared mappings
- */
-static struct backing_dev_info mtd_bdi_rw_mappable = {
- .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
- BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
- BDI_CAP_WRITE_MAP),
+static struct backing_dev_info mtd_bdi = {
};
static int mtd_cls_suspend(struct device *dev, pm_message_t state);
@@ -365,6 +339,23 @@ static struct device_type mtd_devtype = {
.release = mtd_release,
};
+#ifndef CONFIG_MMU
+unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
+{
+ switch (mtd->type) {
+ case MTD_RAM:
+ return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
+ NOMMU_MAP_READ | NOMMU_MAP_WRITE;
+ case MTD_ROM:
+ return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
+ NOMMU_MAP_READ;
+ default:
+ return NOMMU_MAP_COPY;
+ }
+}
+EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
+#endif
+
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
@@ -380,19 +371,7 @@ int add_mtd_device(struct mtd_info *mtd)
struct mtd_notifier *not;
int i, error;
- if (!mtd->backing_dev_info) {
- switch (mtd->type) {
- case MTD_RAM:
- mtd->backing_dev_info = &mtd_bdi_rw_mappable;
- break;
- case MTD_ROM:
- mtd->backing_dev_info = &mtd_bdi_ro_mappable;
- break;
- default:
- mtd->backing_dev_info = &mtd_bdi_unmappable;
- break;
- }
- }
+ mtd->backing_dev_info = &mtd_bdi;
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
@@ -1237,17 +1216,9 @@ static int __init init_mtd(void)
if (ret)
goto err_reg;
- ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
- if (ret)
- goto err_bdi1;
-
- ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
- if (ret)
- goto err_bdi2;
-
- ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
+ ret = mtd_bdi_init(&mtd_bdi, "mtd");
if (ret)
- goto err_bdi3;
+ goto err_bdi;
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
@@ -1260,11 +1231,7 @@ static int __init init_mtd(void)
out_procfs:
if (proc_mtd)
remove_proc_entry("mtd", NULL);
-err_bdi3:
- bdi_destroy(&mtd_bdi_ro_mappable);
-err_bdi2:
- bdi_destroy(&mtd_bdi_unmappable);
-err_bdi1:
+err_bdi:
class_unregister(&mtd_class);
err_reg:
pr_err("Error registering mtd class or bdi: %d\n", ret);
@@ -1277,9 +1244,7 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
- bdi_destroy(&mtd_bdi_unmappable);
- bdi_destroy(&mtd_bdi_ro_mappable);
- bdi_destroy(&mtd_bdi_rw_mappable);
+ bdi_destroy(&mtd_bdi);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a3e3a7d074d5..e779de315ade 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -378,7 +378,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.name = name;
slave->mtd.owner = master->owner;
- slave->mtd.backing_dev_info = master->backing_dev_info;
/* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
* to have the same data be in two different partitions.
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 6b6bce28bd63..db2c05b6fe7f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -42,11 +42,12 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/mtd/ubi.h>
#include <linux/workqueue.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/hdreg.h>
+#include <linux/scatterlist.h>
#include <asm/div64.h>
#include "ubi-media.h"
@@ -67,6 +68,11 @@ struct ubiblock_param {
char name[UBIBLOCK_PARAM_LEN+1];
};
+struct ubiblock_pdu {
+ struct work_struct work;
+ struct ubi_sgl usgl;
+};
+
/* Numbers of elements set in the @ubiblock_param array */
static int ubiblock_devs __initdata;
@@ -84,11 +90,10 @@ struct ubiblock {
struct request_queue *rq;
struct workqueue_struct *wq;
- struct work_struct work;
struct mutex dev_mutex;
- spinlock_t queue_lock;
struct list_head list;
+ struct blk_mq_tag_set tag_set;
};
/* Linked list of all ubiblock instances */
@@ -181,31 +186,20 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
return NULL;
}
-static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
- int leb, int offset, int len)
+static int ubiblock_read(struct ubiblock_pdu *pdu)
{
- int ret;
-
- ret = ubi_read(dev->desc, leb, buffer, offset, len);
- if (ret) {
- dev_err(disk_to_dev(dev->gd), "%d while reading from LEB %d (offset %d, length %d)",
- ret, leb, offset, len);
- return ret;
- }
- return 0;
-}
+ int ret, leb, offset, bytes_left, to_read;
+ u64 pos;
+ struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct ubiblock *dev = req->q->queuedata;
-static int ubiblock_read(struct ubiblock *dev, char *buffer,
- sector_t sec, int len)
-{
- int ret, leb, offset;
- int bytes_left = len;
- int to_read = len;
- u64 pos = sec << 9;
+ to_read = blk_rq_bytes(req);
+ pos = blk_rq_pos(req) << 9;
/* Get LEB:offset address to read from */
offset = do_div(pos, dev->leb_size);
leb = pos;
+ bytes_left = to_read;
while (bytes_left) {
/*
@@ -215,11 +209,10 @@ static int ubiblock_read(struct ubiblock *dev, char *buffer,
if (offset + to_read > dev->leb_size)
to_read = dev->leb_size - offset;
- ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read);
- if (ret)
+ ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
+ if (ret < 0)
return ret;
- buffer += to_read;
bytes_left -= to_read;
to_read = bytes_left;
leb += 1;
@@ -228,79 +221,6 @@ static int ubiblock_read(struct ubiblock *dev, char *buffer,
return 0;
}
-static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
-{
- int len, ret;
- sector_t sec;
-
- if (req->cmd_type != REQ_TYPE_FS)
- return -EIO;
-
- if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
- get_capacity(req->rq_disk))
- return -EIO;
-
- if (rq_data_dir(req) != READ)
- return -ENOSYS; /* Write not implemented */
-
- sec = blk_rq_pos(req);
- len = blk_rq_cur_bytes(req);
-
- /*
- * Let's prevent the device from being removed while we're doing I/O
- * work. Notice that this means we serialize all the I/O operations,
- * but it's probably of no impact given the NAND core serializes
- * flash access anyway.
- */
- mutex_lock(&dev->dev_mutex);
- ret = ubiblock_read(dev, bio_data(req->bio), sec, len);
- mutex_unlock(&dev->dev_mutex);
-
- return ret;
-}
-
-static void ubiblock_do_work(struct work_struct *work)
-{
- struct ubiblock *dev =
- container_of(work, struct ubiblock, work);
- struct request_queue *rq = dev->rq;
- struct request *req;
- int res;
-
- spin_lock_irq(rq->queue_lock);
-
- req = blk_fetch_request(rq);
- while (req) {
-
- spin_unlock_irq(rq->queue_lock);
- res = do_ubiblock_request(dev, req);
- spin_lock_irq(rq->queue_lock);
-
- /*
- * If we're done with this request,
- * we need to fetch a new one
- */
- if (!__blk_end_request_cur(req, res))
- req = blk_fetch_request(rq);
- }
-
- spin_unlock_irq(rq->queue_lock);
-}
-
-static void ubiblock_request(struct request_queue *rq)
-{
- struct ubiblock *dev;
- struct request *req;
-
- dev = rq->queuedata;
-
- if (!dev)
- while ((req = blk_fetch_request(rq)) != NULL)
- __blk_end_request_all(req, -ENODEV);
- else
- queue_work(dev->wq, &dev->work);
-}
-
static int ubiblock_open(struct block_device *bdev, fmode_t mode)
{
struct ubiblock *dev = bdev->bd_disk->private_data;
@@ -374,6 +294,63 @@ static const struct block_device_operations ubiblock_ops = {
.getgeo = ubiblock_getgeo,
};
+static void ubiblock_do_work(struct work_struct *work)
+{
+ int ret;
+ struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
+ struct request *req = blk_mq_rq_from_pdu(pdu);
+
+ blk_mq_start_request(req);
+
+ /*
+ * It is safe to ignore the return value of blk_rq_map_sg() because
+ * the number of sg entries is limited to UBI_MAX_SG_COUNT
+ * and ubi_read_sg() will check that limit.
+ */
+ blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+
+ ret = ubiblock_read(pdu);
+ blk_mq_end_request(req, ret);
+}
+
+static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct ubiblock *dev = hctx->queue->queuedata;
+ struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
+
+ if (req->cmd_type != REQ_TYPE_FS)
+ return BLK_MQ_RQ_QUEUE_ERROR;
+
+ if (rq_data_dir(req) != READ)
+ return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
+
+ ubi_sgl_init(&pdu->usgl);
+ queue_work(dev->wq, &pdu->work);
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static int ubiblock_init_request(void *data, struct request *req,
+ unsigned int hctx_idx,
+ unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
+
+ sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
+ INIT_WORK(&pdu->work, ubiblock_do_work);
+
+ return 0;
+}
+
+static struct blk_mq_ops ubiblock_mq_ops = {
+ .queue_rq = ubiblock_queue_rq,
+ .init_request = ubiblock_init_request,
+ .map_queue = blk_mq_map_queue,
+};
+
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
@@ -417,14 +394,28 @@ int ubiblock_create(struct ubi_volume_info *vi)
set_capacity(gd, disk_capacity);
dev->gd = gd;
- spin_lock_init(&dev->queue_lock);
- dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock);
- if (!dev->rq) {
- dev_err(disk_to_dev(gd), "blk_init_queue failed");
- ret = -ENODEV;
+ dev->tag_set.ops = &ubiblock_mq_ops;
+ dev->tag_set.queue_depth = 64;
+ dev->tag_set.numa_node = NUMA_NO_NODE;
+ dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
+ dev->tag_set.driver_data = dev;
+ dev->tag_set.nr_hw_queues = 1;
+
+ ret = blk_mq_alloc_tag_set(&dev->tag_set);
+ if (ret) {
+ dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
goto out_put_disk;
}
+ dev->rq = blk_mq_init_queue(&dev->tag_set);
+ if (IS_ERR(dev->rq)) {
+ dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
+ ret = PTR_ERR(dev->rq);
+ goto out_free_tags;
+ }
+ blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
+
dev->rq->queuedata = dev;
dev->gd->queue = dev->rq;
@@ -437,7 +428,6 @@ int ubiblock_create(struct ubi_volume_info *vi)
ret = -ENOMEM;
goto out_free_queue;
}
- INIT_WORK(&dev->work, ubiblock_do_work);
mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
@@ -451,6 +441,8 @@ int ubiblock_create(struct ubi_volume_info *vi)
out_free_queue:
blk_cleanup_queue(dev->rq);
+out_free_tags:
+ blk_mq_free_tag_set(&dev->tag_set);
out_put_disk:
put_disk(dev->gd);
out_free_dev:
@@ -461,8 +453,13 @@ out_free_dev:
static void ubiblock_cleanup(struct ubiblock *dev)
{
+ /* Stop new requests to arrive */
del_gendisk(dev->gd);
+ /* Flush pending work */
+ destroy_workqueue(dev->wq);
+ /* Finally destroy the blk queue */
blk_cleanup_queue(dev->rq);
+ blk_mq_free_tag_set(&dev->tag_set);
dev_info(disk_to_dev(dev->gd), "released");
put_disk(dev->gd);
}
@@ -490,9 +487,6 @@ int ubiblock_remove(struct ubi_volume_info *vi)
list_del(&dev->list);
mutex_unlock(&devices_mutex);
- /* Flush pending work and stop this workqueue */
- destroy_workqueue(dev->wq);
-
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
kfree(dev);
@@ -583,22 +577,28 @@ open_volume_desc(const char *name, int ubi_num, int vol_id)
return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
}
-static int __init ubiblock_create_from_param(void)
+static void __init ubiblock_create_from_param(void)
{
- int i, ret;
+ int i, ret = 0;
struct ubiblock_param *p;
struct ubi_volume_desc *desc;
struct ubi_volume_info vi;
+ /*
+ * If there is an error creating one of the ubiblocks, continue on to
+ * create the following ubiblocks. This helps in a circumstance where
+ * the kernel command-line specifies multiple block devices and some
+ * may be broken, but we still want the working ones to come up.
+ */
for (i = 0; i < ubiblock_devs; i++) {
p = &ubiblock_param[i];
desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
if (IS_ERR(desc)) {
- pr_err("UBI: block: can't open volume, err=%ld\n",
- PTR_ERR(desc));
- ret = PTR_ERR(desc);
- break;
+ pr_err(
+ "UBI: block: can't open volume on ubi%d_%d, err=%ld",
+ p->ubi_num, p->vol_id, PTR_ERR(desc));
+ continue;
}
ubi_get_volume_info(desc, &vi);
@@ -606,12 +606,12 @@ static int __init ubiblock_create_from_param(void)
ret = ubiblock_create(&vi);
if (ret) {
- pr_err("UBI: block: can't add '%s' volume, err=%d\n",
- vi.name, ret);
- break;
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d",
+ vi.name, p->ubi_num, p->vol_id, ret);
+ continue;
}
}
- return ret;
}
static void ubiblock_remove_all(void)
@@ -620,8 +620,6 @@ static void ubiblock_remove_all(void)
struct ubiblock *dev;
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
- /* Flush pending work and stop workqueue */
- destroy_workqueue(dev->wq);
/* The module is being forcefully removed */
WARN_ON(dev->desc);
/* Remove from device list */
@@ -639,10 +637,12 @@ int __init ubiblock_init(void)
if (ubiblock_major < 0)
return ubiblock_major;
- /* Attach block devices from 'block=' module param */
- ret = ubiblock_create_from_param();
- if (ret)
- goto err_remove;
+ /*
+ * Attach block devices from 'block=' module param.
+ * Even if one block device in the param list fails to come up,
+ * still allow the module to load and leave any others up.
+ */
+ ubiblock_create_from_param();
/*
* Block devices are only created upon user requests, so we ignore
@@ -655,7 +655,6 @@ int __init ubiblock_init(void)
err_unreg:
unregister_blkdev(ubiblock_major, "ubiblock");
-err_remove:
ubiblock_remove_all();
return ret;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 3405be46ebe9..ba01a8d22d28 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -923,7 +923,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- ubi_err(ubi, "ubi%d already exists", ubi_num);
+ ubi_err(ubi, "already exists");
return -EEXIST;
}
}
@@ -973,7 +973,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
mutex_init(&ubi->fm_mutex);
init_rwsem(&ubi->fm_sem);
- ubi_msg(ubi, "attaching mtd%d to ubi%d", mtd->index, ubi_num);
+ ubi_msg(ubi, "attaching mtd%d", mtd->index);
err = io_init(ubi, max_beb_per1024);
if (err)
@@ -1428,7 +1428,7 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
}
if (len == 0) {
- pr_err("UBI warning: empty 'mtd=' parameter - ignored\n");
+ pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
return 0;
}
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 3410ea8109f8..d647e504f9b1 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -48,26 +48,25 @@
/**
* get_exclusive - get exclusive access to an UBI volume.
- * @ubi: UBI device description object
* @desc: volume descriptor
*
* This function changes UBI volume open mode to "exclusive". Returns previous
* mode value (positive integer) in case of success and a negative error code
* in case of failure.
*/
-static int get_exclusive(struct ubi_device *ubi, struct ubi_volume_desc *desc)
+static int get_exclusive(struct ubi_volume_desc *desc)
{
int users, err;
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
- users = vol->readers + vol->writers + vol->exclusive;
+ users = vol->readers + vol->writers + vol->exclusive + vol->metaonly;
ubi_assert(users > 0);
if (users > 1) {
- ubi_err(ubi, "%d users for volume %d", users, vol->vol_id);
+ ubi_err(vol->ubi, "%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
- vol->readers = vol->writers = 0;
+ vol->readers = vol->writers = vol->metaonly = 0;
vol->exclusive = 1;
err = desc->mode;
desc->mode = UBI_EXCLUSIVE;
@@ -87,13 +86,15 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
- ubi_assert(vol->readers == 0 && vol->writers == 0);
+ ubi_assert(vol->readers == 0 && vol->writers == 0 && vol->metaonly == 0);
ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
vol->exclusive = 0;
if (mode == UBI_READONLY)
vol->readers = 1;
else if (mode == UBI_READWRITE)
vol->writers = 1;
+ else if (mode == UBI_METAONLY)
+ vol->metaonly = 1;
else
vol->exclusive = 1;
spin_unlock(&vol->ubi->volumes_lock);
@@ -421,7 +422,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- err = get_exclusive(ubi, desc);
+ err = get_exclusive(desc);
if (err < 0)
break;
@@ -457,7 +458,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
- err = get_exclusive(ubi, desc);
+ err = get_exclusive(desc);
if (err < 0)
break;
@@ -734,7 +735,7 @@ static int rename_volumes(struct ubi_device *ubi,
goto out_free;
}
- re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE);
+ re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
ubi_err(ubi, "cannot open volume %d, error %d",
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index a40020cf0923..da4c79259f67 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -426,6 +426,7 @@ retry:
pnum, vol_id, lnum);
err = -EBADMSG;
} else
+ err = -EINVAL;
ubi_ro_mode(ubi);
}
goto out_free;
@@ -480,6 +481,61 @@ out_unlock:
}
/**
+ * ubi_eba_read_leb_sg - read data into a scatter gather list.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @sgl: UBI scatter gather list to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * This function works exactly like ubi_eba_read_leb(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_sgl *sgl, int lnum, int offset, int len,
+ int check)
+{
+ int to_read;
+ int ret;
+ struct scatterlist *sg;
+
+ for (;;) {
+ ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
+ sg = &sgl->sg[sgl->list_pos];
+ if (len < sg->length - sgl->page_pos)
+ to_read = len;
+ else
+ to_read = sg->length - sgl->page_pos;
+
+ ret = ubi_eba_read_leb(ubi, vol, lnum,
+ sg_virt(sg) + sgl->page_pos, offset,
+ to_read, check);
+ if (ret < 0)
+ return ret;
+
+ offset += to_read;
+ len -= to_read;
+ if (!len) {
+ sgl->page_pos += to_read;
+ if (sgl->page_pos == sg->length) {
+ sgl->list_pos++;
+ sgl->page_pos = 0;
+ }
+
+ break;
+ }
+
+ sgl->list_pos++;
+ sgl->page_pos = 0;
+ }
+
+ return ret;
+}
+
+/**
* recover_peb - recover from write failure.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to recover
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b56672bf3294..db3defdfc3c0 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1196,6 +1196,19 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) {
+ list_for_each_entry(wl_e, &ubi->pq[i], u.list) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
fmh->used_peb_count = cpu_to_be32(used_peb_count);
for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 396aaa543362..ed0bcb35472f 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1419,8 +1419,7 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
fail:
ubi_err(ubi, "self-check failed for PEB %d", pnum);
- ubi_msg(ubi, "hex dump of the %d-%d region",
- offset, offset + len);
+ ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index f3bab669f6bb..478e00cf2d9e 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -137,7 +137,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
return ERR_PTR(-EINVAL);
if (mode != UBI_READONLY && mode != UBI_READWRITE &&
- mode != UBI_EXCLUSIVE)
+ mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
return ERR_PTR(-EINVAL);
/*
@@ -182,10 +182,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
break;
case UBI_EXCLUSIVE:
- if (vol->exclusive || vol->writers || vol->readers)
+ if (vol->exclusive || vol->writers || vol->readers ||
+ vol->metaonly)
goto out_unlock;
vol->exclusive = 1;
break;
+
+ case UBI_METAONLY:
+ if (vol->metaonly || vol->exclusive)
+ goto out_unlock;
+ vol->metaonly = 1;
+ break;
}
get_device(&vol->dev);
vol->ref_count += 1;
@@ -343,6 +350,10 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
break;
case UBI_EXCLUSIVE:
vol->exclusive = 0;
+ break;
+ case UBI_METAONLY:
+ vol->metaonly = 0;
+ break;
}
vol->ref_count -= 1;
spin_unlock(&ubi->volumes_lock);
@@ -355,6 +366,43 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
EXPORT_SYMBOL_GPL(ubi_close_volume);
/**
+ * leb_read_sanity_check - does sanity checks on read requests.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ *
+ * This function is used by ubi_leb_read() and ubi_leb_read_sg()
+ * to perform sanity checks.
+ */
+static int leb_read_sanity_check(struct ubi_volume_desc *desc, int lnum,
+ int offset, int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
+ lnum >= vol->used_ebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size)
+ return -EINVAL;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ if (vol->used_ebs == 0)
+ /* Empty static UBI volume */
+ return 0;
+ if (lnum == vol->used_ebs - 1 &&
+ offset + len > vol->last_eb_bytes)
+ return -EINVAL;
+ }
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return 0;
+}
+
+/**
* ubi_leb_read - read data.
* @desc: volume descriptor
* @lnum: logical eraseblock number to read from
@@ -390,22 +438,10 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
- if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
- lnum >= vol->used_ebs || offset < 0 || len < 0 ||
- offset + len > vol->usable_leb_size)
- return -EINVAL;
-
- if (vol->vol_type == UBI_STATIC_VOLUME) {
- if (vol->used_ebs == 0)
- /* Empty static UBI volume */
- return 0;
- if (lnum == vol->used_ebs - 1 &&
- offset + len > vol->last_eb_bytes)
- return -EINVAL;
- }
+ err = leb_read_sanity_check(desc, lnum, offset, len);
+ if (err < 0)
+ return err;
- if (vol->upd_marker)
- return -EBADF;
if (len == 0)
return 0;
@@ -419,6 +455,46 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
}
EXPORT_SYMBOL_GPL(ubi_leb_read);
+
+/**
+ * ubi_leb_read_sg - read data into a scatter gather list.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @buf: buffer where to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function works exactly like ubi_leb_read_sg(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+ int offset, int len, int check)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ err = leb_read_sanity_check(desc, lnum, offset, len);
+ if (err < 0)
+ return err;
+
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read_sg);
+
/**
* ubi_leb_write - write data.
* @desc: volume descriptor
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index dbda77e556cb..2a45ac210b16 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -74,6 +74,8 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
for (i = 0; i < vol->used_ebs; i++) {
int size;
+ cond_resched();
+
if (i == vol->used_ebs - 1)
size = vol->last_eb_bytes;
else
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index f80ffaba9058..c5be82d9d345 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -50,13 +50,13 @@
#define UBI_NAME_STR "ubi"
/* Normal UBI messages */
-#define ubi_msg(ubi, fmt, ...) pr_notice("UBI-%d: %s:" fmt "\n", \
- ubi->ubi_num, __func__, ##__VA_ARGS__)
+#define ubi_msg(ubi, fmt, ...) pr_notice(UBI_NAME_STR "%d: " fmt "\n", \
+ ubi->ubi_num, ##__VA_ARGS__)
/* UBI warning messages */
-#define ubi_warn(ubi, fmt, ...) pr_warn("UBI-%d warning: %s: " fmt "\n", \
+#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
ubi->ubi_num, __func__, ##__VA_ARGS__)
/* UBI error messages */
-#define ubi_err(ubi, fmt, ...) pr_err("UBI-%d error: %s: " fmt "\n", \
+#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
ubi->ubi_num, __func__, ##__VA_ARGS__)
/* Background thread name pattern */
@@ -261,6 +261,7 @@ struct ubi_fm_pool {
* @readers: number of users holding this volume in read-only mode
* @writers: number of users holding this volume in read-write mode
* @exclusive: whether somebody holds this volume in exclusive mode
+ * @metaonly: whether somebody is altering only meta data of this volume
*
* @reserved_pebs: how many physical eraseblocks are reserved for this volume
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
@@ -309,6 +310,7 @@ struct ubi_volume {
int readers;
int writers;
int exclusive;
+ int metaonly;
int reserved_pebs;
int vol_type;
@@ -339,7 +341,8 @@ struct ubi_volume {
/**
* struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
* @vol: reference to the corresponding volume description object
- * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE)
+ * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE
+ * or %UBI_METAONLY)
*/
struct ubi_volume_desc {
struct ubi_volume *vol;
@@ -390,7 +393,8 @@ struct ubi_debug_info {
* @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
* @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
* @vol->readers, @vol->writers, @vol->exclusive,
- * @vol->ref_count, @vol->mapping and @vol->eba_tbl.
+ * @vol->metaonly, @vol->ref_count, @vol->mapping and
+ * @vol->eba_tbl.
* @ref_count: count of references on the UBI device
* @image_seq: image sequence number recorded on EC headers
*
@@ -791,6 +795,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum);
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check);
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_sgl *sgl, int lnum, int offset, int len,
+ int check);
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len);
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index f8fc3081bbb4..68c9c5ea676f 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -655,14 +655,13 @@ static int init_volumes(struct ubi_device *ubi,
/**
* check_av - check volume attaching information.
- * @ubi: UBI device description object
* @vol: UBI volume description object
* @av: volume attaching information
*
* This function returns zero if the volume attaching information is consistent
* to the data read from the volume tabla, and %-EINVAL if not.
*/
-static int check_av(const struct ubi_device *ubi, const struct ubi_volume *vol,
+static int check_av(const struct ubi_volume *vol,
const struct ubi_ainf_volume *av)
{
int err;
@@ -690,7 +689,7 @@ static int check_av(const struct ubi_device *ubi, const struct ubi_volume *vol,
return 0;
bad:
- ubi_err(ubi, "bad attaching information, error %d", err);
+ ubi_err(vol->ubi, "bad attaching information, error %d", err);
ubi_dump_av(av);
ubi_dump_vol_info(vol);
return -EINVAL;
@@ -753,7 +752,7 @@ static int check_attaching_info(const struct ubi_device *ubi,
ubi_msg(ubi, "finish volume %d removal", av->vol_id);
ubi_remove_av(ai, av);
} else if (av) {
- err = check_av(ubi, vol, av);
+ err = check_av(vol, av);
if (err)
return err;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 834f6fe1f5fa..8f7bde6a85d6 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -470,11 +470,8 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
- if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) {
- ubi_warn(ubi, "Can't get peb for fastmap:anchor=%d, free_cnt=%d, reserved=%d",
- anchor, ubi->free_count, ubi->beb_rsvd_pebs);
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
goto out;
- }
if (anchor)
e = find_anchor_wl_entry(&ubi->free);
@@ -1806,11 +1803,8 @@ int ubi_thread(void *u)
for (;;) {
int err;
- if (kthread_should_stop()) {
- ubi_msg(ubi, "background thread \"%s\" should stop, PID %d",
- ubi->bgt_name, task_pid_nr(current));
+ if (kthread_should_stop())
break;
- }
if (try_to_freeze())
continue;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d6607ee9c855..84673ebcf428 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC
config NETPOLL
def_bool NETCONSOLE
+ select SRCU
config NET_POLL_CONTROLLER
def_bool NETPOLL
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 6c99ff0b0bdd..945f532078e9 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -78,6 +78,9 @@ static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
priv = devm_kzalloc(&pdev->dev, sizeof(struct com20020_priv),
GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
ci = (struct com20020_pci_card_info *)id->driver_data;
priv->ci = ci;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 8baa87df1738..cfc4a9c1000a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -467,11 +467,14 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
/* set the partner sync. to on if the partner is sync,
* and the port is matched
*/
- if ((port->sm_vars & AD_PORT_MATCHED)
- && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION))
+ if ((port->sm_vars & AD_PORT_MATCHED) &&
+ (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
partner->port_state |= AD_STATE_SYNCHRONIZATION;
- else
+ pr_debug("%s partner sync=1\n", port->slave->dev->name);
+ } else {
partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
+ pr_debug("%s partner sync=0\n", port->slave->dev->name);
+ }
}
}
@@ -726,6 +729,8 @@ static inline void __update_lacpdu_from_port(struct port *port)
lacpdu->actor_port_priority = htons(port->actor_port_priority);
lacpdu->actor_port = htons(port->actor_port_number);
lacpdu->actor_state = port->actor_oper_port_state;
+ pr_debug("update lacpdu: %s, actor port state %x\n",
+ port->slave->dev->name, port->actor_oper_port_state);
/* lacpdu->reserved_3_1 initialized
* lacpdu->tlv_type_partner_info initialized
@@ -898,7 +903,9 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if ((port->sm_vars & AD_PORT_SELECTED) &&
(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
- port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;
+ if (port->aggregator->is_active)
+ port->sm_mux_state =
+ AD_MUX_COLLECTING_DISTRIBUTING;
} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY)) {
/* if UNSELECTED or STANDBY */
@@ -910,12 +917,16 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
*/
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
port->sm_mux_state = AD_MUX_DETACHED;
+ } else if (port->aggregator->is_active) {
+ port->actor_oper_port_state |=
+ AD_STATE_SYNCHRONIZATION;
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
- !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)) {
+ !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) {
port->sm_mux_state = AD_MUX_ATTACHED;
} else {
/* if port state hasn't changed make
@@ -937,8 +948,10 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
/* check if the state machine was changed */
if (port->sm_mux_state != last_state) {
- pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
- port->actor_port_number, last_state,
+ pr_debug("Mux Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
+ port->actor_port_number,
+ port->slave->dev->name,
+ last_state,
port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
@@ -953,7 +966,12 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
break;
case AD_MUX_ATTACHED:
- port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
+ if (port->aggregator->is_active)
+ port->actor_oper_port_state |=
+ AD_STATE_SYNCHRONIZATION;
+ else
+ port->actor_oper_port_state &=
+ ~AD_STATE_SYNCHRONIZATION;
port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
ad_disable_collecting_distributing(port,
@@ -963,6 +981,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
case AD_MUX_COLLECTING_DISTRIBUTING:
port->actor_oper_port_state |= AD_STATE_COLLECTING;
port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
ad_enable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
@@ -1044,8 +1063,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* check if the State machine was changed or new lacpdu arrived */
if ((port->sm_rx_state != last_state) || (lacpdu)) {
- pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
- port->actor_port_number, last_state,
+ pr_debug("Rx Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
+ port->actor_port_number,
+ port->slave->dev->name,
+ last_state,
port->sm_rx_state);
switch (port->sm_rx_state) {
case AD_RX_INITIALIZE:
@@ -1394,6 +1415,9 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
aggregator = __get_first_agg(port);
ad_agg_selection_logic(aggregator, update_slave_arr);
+
+ if (!port->aggregator->is_active)
+ port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
}
/* Decide if "agg" is a better choice for the new active aggregator that
@@ -2195,8 +2219,10 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
switch (lacpdu->subtype) {
case AD_TYPE_LACPDU:
ret = RX_HANDLER_CONSUMED;
- netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n",
- port->actor_port_number);
+ netdev_dbg(slave->bond->dev,
+ "Received LACPDU on port %d slave %s\n",
+ port->actor_port_number,
+ slave->dev->name);
/* Protect against concurrent state machines */
spin_lock(&slave->bond->mode_lock);
ad_rx_machine(lacpdu, port);
@@ -2288,7 +2314,10 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
- netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number);
+ netdev_dbg(slave->bond->dev, "Port %d slave %s changed duplex\n",
+ port->actor_port_number, slave->dev->name);
+ if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
+ port->sm_vars |= AD_PORT_LACP_ENABLED;
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0dceba1a2ba1..b979c265fc51 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -77,6 +77,7 @@
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_keys.h>
+#include <net/switchdev.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
@@ -334,7 +335,7 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
*
* Returns zero if carrier state does not change, nonzero if it does.
*/
-static int bond_set_carrier(struct bonding *bond)
+int bond_set_carrier(struct bonding *bond)
{
struct list_head *iter;
struct slave *slave;
@@ -789,7 +790,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
new_active->delay = 0;
- new_active->link = BOND_LINK_UP;
+ bond_set_slave_link_state(new_active, BOND_LINK_UP);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@@ -979,7 +980,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
- mask = features;
+ /* If any slave has the offload feature flag set,
+ * set the offload flag on the bond.
+ */
+ mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
@@ -998,7 +1003,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
+ NETIF_F_TSO)
static void bond_compute_features(struct bonding *bond)
{
@@ -1034,7 +1039,7 @@ static void bond_compute_features(struct bonding *bond)
done:
bond_dev->vlan_features = vlan_features;
- bond_dev->hw_enc_features = enc_features;
+ bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
bond_dev->hard_header_len = max_hard_header_len;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1176,6 +1181,56 @@ static void bond_free_slave(struct slave *slave)
kfree(slave);
}
+static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
+{
+ info->bond_mode = BOND_MODE(bond);
+ info->miimon = bond->params.miimon;
+ info->num_slaves = bond->slave_cnt;
+}
+
+static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
+{
+ strcpy(info->slave_name, slave->dev->name);
+ info->link = slave->link;
+ info->state = bond_slave_state(slave);
+ info->link_failure_count = slave->link_failure_count;
+}
+
+static void bond_netdev_notify(struct net_device *dev,
+ struct netdev_bonding_info *info)
+{
+ rtnl_lock();
+ netdev_bonding_info_change(dev, info);
+ rtnl_unlock();
+}
+
+static void bond_netdev_notify_work(struct work_struct *_work)
+{
+ struct netdev_notify_work *w =
+ container_of(_work, struct netdev_notify_work, work.work);
+
+ bond_netdev_notify(w->dev, &w->bonding_info);
+ dev_put(w->dev);
+ kfree(w);
+}
+
+void bond_queue_slave_event(struct slave *slave)
+{
+ struct bonding *bond = slave->bond;
+ struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
+
+ if (!nnw)
+ return;
+
+ dev_hold(slave->dev);
+ nnw->dev = slave->dev;
+ bond_fill_ifslave(slave, &nnw->bonding_info.slave);
+ bond_fill_ifbond(bond, &nnw->bonding_info.master);
+ INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
+
+ queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -1439,19 +1494,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
- new_slave->link = BOND_LINK_BACK;
+ bond_set_slave_link_state(new_slave,
+ BOND_LINK_BACK);
new_slave->delay = bond->params.updelay;
} else {
- new_slave->link = BOND_LINK_UP;
+ bond_set_slave_link_state(new_slave,
+ BOND_LINK_UP);
}
} else {
- new_slave->link = BOND_LINK_DOWN;
+ bond_set_slave_link_state(new_slave, BOND_LINK_DOWN);
}
} else if (bond->params.arp_interval) {
- new_slave->link = (netif_carrier_ok(slave_dev) ?
- BOND_LINK_UP : BOND_LINK_DOWN);
+ bond_set_slave_link_state(new_slave,
+ (netif_carrier_ok(slave_dev) ?
+ BOND_LINK_UP : BOND_LINK_DOWN));
} else {
- new_slave->link = BOND_LINK_UP;
+ bond_set_slave_link_state(new_slave, BOND_LINK_UP);
}
if (new_slave->link != BOND_LINK_DOWN)
@@ -1567,6 +1625,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
+ bond_queue_slave_event(new_slave);
return 0;
/* Undo stages on error */
@@ -1816,11 +1875,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
-
- info->bond_mode = BOND_MODE(bond);
- info->miimon = bond->params.miimon;
- info->num_slaves = bond->slave_cnt;
-
+ bond_fill_ifbond(bond, info);
return 0;
}
@@ -1834,10 +1889,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
- strcpy(info->slave_name, slave->dev->name);
- info->link = slave->link;
- info->state = bond_slave_state(slave);
- info->link_failure_count = slave->link_failure_count;
+ bond_fill_ifslave(slave, info);
break;
}
}
@@ -1867,7 +1919,7 @@ static int bond_miimon_inspect(struct bonding *bond)
if (link_state)
continue;
- slave->link = BOND_LINK_FAIL;
+ bond_set_slave_link_state(slave, BOND_LINK_FAIL);
slave->delay = bond->params.downdelay;
if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -1882,7 +1934,7 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_FAIL:
if (link_state) {
/* recovered before downdelay expired */
- slave->link = BOND_LINK_UP;
+ bond_set_slave_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
(bond->params.downdelay - slave->delay) *
@@ -1904,7 +1956,7 @@ static int bond_miimon_inspect(struct bonding *bond)
if (!link_state)
continue;
- slave->link = BOND_LINK_BACK;
+ bond_set_slave_link_state(slave, BOND_LINK_BACK);
slave->delay = bond->params.updelay;
if (slave->delay) {
@@ -1917,7 +1969,8 @@ static int bond_miimon_inspect(struct bonding *bond)
/*FALLTHRU*/
case BOND_LINK_BACK:
if (!link_state) {
- slave->link = BOND_LINK_DOWN;
+ bond_set_slave_link_state(slave,
+ BOND_LINK_DOWN);
netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
(bond->params.updelay - slave->delay) *
bond->params.miimon,
@@ -1955,7 +2008,7 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- slave->link = BOND_LINK_UP;
+ bond_set_slave_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
primary = rtnl_dereference(bond->primary_slave);
@@ -1995,7 +2048,7 @@ static void bond_miimon_commit(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- slave->link = BOND_LINK_DOWN;
+ bond_set_slave_link_state(slave, BOND_LINK_DOWN);
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
BOND_MODE(bond) == BOND_MODE_8023AD)
@@ -2578,7 +2631,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
- slave->link = BOND_LINK_UP;
+ bond_set_slave_link_state(slave, BOND_LINK_UP);
if (current_arp_slave) {
bond_set_slave_inactive_flags(
current_arp_slave,
@@ -2601,7 +2654,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- slave->link = BOND_LINK_DOWN;
+ bond_set_slave_link_state(slave, BOND_LINK_DOWN);
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
@@ -2680,7 +2733,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
* up when it is actually down
*/
if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
- slave->link = BOND_LINK_DOWN;
+ bond_set_slave_link_state(slave, BOND_LINK_DOWN);
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2700,7 +2753,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
if (!new_slave)
goto check_state;
- new_slave->link = BOND_LINK_BACK;
+ bond_set_slave_link_state(new_slave, BOND_LINK_BACK);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
new_slave->last_link_up = jiffies;
@@ -3066,7 +3119,7 @@ static int bond_open(struct net_device *bond_dev)
slave != rcu_access_pointer(bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
- } else {
+ } else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
bond_set_slave_active_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
}
@@ -3734,7 +3787,7 @@ out:
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
*/
-int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
+static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
@@ -3952,6 +4005,8 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
+ .ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
+ .ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
};
static const struct device_type bond_type = {
@@ -4010,7 +4065,7 @@ void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_CTAG_FILTER;
bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
- bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 1a61cc9b3402..4df28943d222 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -186,7 +186,7 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
{ NULL, -1, 0}
};
-static const struct bond_option bond_opts[] = {
+static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
.name = "mode",
@@ -379,8 +379,7 @@ static const struct bond_option bond_opts[] = {
.values = bond_tlb_dynamic_lb_tbl,
.flags = BOND_OPTFLAG_IFDOWN,
.set = bond_option_tlb_dynamic_lb_set,
- },
- { }
+ }
};
/* Searches for an option by name */
@@ -1182,6 +1181,7 @@ static int bond_option_min_links_set(struct bonding *bond,
netdev_info(bond->dev, "Setting min links value to %llu\n",
newval->value);
bond->params.min_links = newval->value;
+ bond_set_carrier(bond);
return 0;
}
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 5e40a8b68cbe..b3b922adc0e4 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
cfhsi = netdev_priv(dev);
cfhsi_netlink_parms(data, cfhsi);
- dev_net_set(cfhsi->ndev, src_net);
get_ops = symbol_get(cfhsi_get_ops);
if (!get_ops) {
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index d0c2463b573f..eeb4b8b6b335 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -138,7 +138,6 @@ struct at91_devtype_data {
struct at91_priv {
struct can_priv can; /* must be the first member! */
- struct net_device *dev;
struct napi_struct napi;
void __iomem *reg_base;
@@ -1350,7 +1349,6 @@ static int at91_can_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LISTENONLY;
- priv->dev = dev;
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 417d50998e31..e7a6363e736b 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -352,6 +352,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
netdev_dbg(dev, "bus-off mode interrupt\n");
state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
+ priv->can.can_stats.bus_off++;
can_bus_off(dev);
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f94a9fa60488..041525d2595c 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
c_can_irq_control(priv, false);
+ /* put ctrl to init on stop to end ongoing transmission */
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
+
/* deactivate pins */
pinctrl_pm_select_sleep_state(dev->dev.parent);
priv->can.state = CAN_STATE_STOPPED;
@@ -866,7 +869,7 @@ static int c_can_handle_state_change(struct net_device *dev,
case C_CAN_BUS_OFF:
/* bus-off state */
priv->can.state = CAN_STATE_BUS_OFF;
- can_bus_off(dev);
+ priv->can.can_stats.bus_off++;
break;
default:
break;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index f363972cd77d..e36d10520e24 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable)
mask = 1 << raminit->bits.start | 1 << raminit->bits.done;
regmap_read(raminit->syscon, raminit->reg, &ctrl);
- /* We clear the done and start bit first. The start bit is
+ /* We clear the start bit first. The start bit is
* looking at the 0 -> transition, but is not self clearing;
- * And we clear the init done bit as well.
* NOTE: DONE must be written with 1 to clear it.
+ * We can't clear the DONE bit here using regmap_update_bits()
+ * as it will bypass the write if initial condition is START:0 DONE:1
+ * e.g. on DRA7 which needs START pulse.
*/
- ctrl &= ~(1 << raminit->bits.start);
- ctrl |= 1 << raminit->bits.done;
- regmap_write(raminit->syscon, raminit->reg, ctrl);
+ ctrl &= ~mask; /* START = 0, DONE = 0 */
+ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
- ctrl &= ~(1 << raminit->bits.done);
- c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
+ /* check if START bit is 0. Ignore DONE bit for now
+ * as it can be either 0 or 1.
+ */
+ c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl);
if (enable) {
- /* Set start bit and wait for the done bit. */
+ /* Clear DONE bit & set START bit. */
ctrl |= 1 << raminit->bits.start;
- regmap_write(raminit->syscon, raminit->reg, ctrl);
-
+ /* DONE must be written with 1 to clear it */
+ ctrl |= 1 << raminit->bits.done;
+ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
+ /* prevent further clearing of DONE bit */
+ ctrl &= ~(1 << raminit->bits.done);
/* clear START bit if start pulse is needed */
if (raminit->needs_pulse) {
ctrl &= ~(1 << raminit->bits.start);
- regmap_write(raminit->syscon, raminit->reg, ctrl);
+ regmap_update_bits(raminit->syscon, raminit->reg,
+ mask, ctrl);
}
ctrl |= 1 << raminit->bits.done;
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c486fe510f37..c11d44984036 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -535,6 +535,7 @@ static int cc770_err(struct net_device *dev, u8 status)
cc770_write_reg(priv, control, CTRL_INI);
cf->can_id |= CAN_ERR_BUSOFF;
priv->can.state = CAN_STATE_BUS_OFF;
+ priv->can.can_stats.bus_off++;
can_bus_off(dev);
} else if (status & STAT_WARN) {
cf->can_id |= CAN_ERR_CRTL;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3ec8f6f25e5f..3c82e02e3dae 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -289,9 +289,11 @@ static void can_update_state_error_stats(struct net_device *dev,
priv->can_stats.error_passive++;
break;
case CAN_STATE_BUS_OFF:
+ priv->can_stats.bus_off++;
+ break;
default:
break;
- };
+ }
}
static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
@@ -544,7 +546,6 @@ void can_bus_off(struct net_device *dev)
netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
- priv->can_stats.bus_off++;
if (priv->restart_ms)
mod_timer(&priv->restart_timer,
@@ -807,10 +808,14 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- if (cm->flags & ~priv->ctrlmode_supported)
+
+ /* check whether changed bits are allowed to be modified */
+ if (cm->mask & ~priv->ctrlmode_supported)
return -EOPNOTSUPP;
+
+ /* clear bits to be modified and copy the flag values */
priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= cm->flags;
+ priv->ctrlmode |= (cm->flags & cm->mask);
/* CAN_CTRLMODE_FD can only be set when driver supports FD */
if (priv->ctrlmode & CAN_CTRLMODE_FD)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index b1d583ba9674..80c46ad4cee4 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -247,7 +247,6 @@ struct flexcan_devtype_data {
struct flexcan_priv {
struct can_priv can;
- struct net_device *dev;
struct napi_struct napi;
void __iomem *base;
@@ -1220,7 +1219,6 @@ static int flexcan_probe(struct platform_device *pdev)
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_BERR_REPORTING;
priv->base = base;
- priv->dev = dev;
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
priv->pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 1b118394907f..4dd183a3643a 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1008,6 +1008,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
if (status & SR_BS) {
state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
+ mod->can.can_stats.bus_off++;
can_bus_off(dev);
} else if (status & SR_ES) {
if (rxerr >= 128 || txerr >= 128)
@@ -1678,8 +1679,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
if (ret)
return ret;
- ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
- if (ret == 0) {
+ if (!wait_for_completion_timeout(&mod->buserror_comp, HZ)) {
netdev_info(mod->ndev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -1704,8 +1704,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
if (ret)
return ret;
- ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
- if (ret == 0) {
+ if (!wait_for_completion_timeout(&mod->termination_comp, HZ)) {
netdev_info(mod->ndev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d7bc462aafdc..2e04b3aeeb37 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -589,6 +589,7 @@ static int m_can_handle_state_change(struct net_device *dev,
/* bus-off state */
priv->can.state = CAN_STATE_BUS_OFF;
m_can_disable_all_interrupts(priv);
+ priv->can.can_stats.bus_off++;
can_bus_off(dev);
break;
default:
@@ -955,6 +956,11 @@ static struct net_device *alloc_m_can_dev(void)
priv->can.data_bittiming_const = &m_can_data_bittiming_const;
priv->can.do_set_mode = m_can_set_mode;
priv->can.do_get_berr_counter = m_can_get_berr_counter;
+
+ /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
+ priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+
+ /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index a67eb01f3028..e187ca783da0 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -505,6 +505,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
pch_can_set_rx_all(priv, 0);
state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
+ priv->can.can_stats.bus_off++;
can_bus_off(ndev);
}
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 91cd48ca0efc..7deb80dcbe8c 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -331,6 +331,7 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.state = CAN_STATE_BUS_OFF;
/* Clear interrupt condition */
writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ priv->can.can_stats.bus_off++;
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 2bf98d862302..7621f91a8a20 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -261,6 +261,7 @@ static int softing_handle_1(struct softing *card)
++priv->can.can_stats.error_passive;
else if (can_state == CAN_STATE_BUS_OFF) {
/* this calls can_close_cleanup() */
+ ++priv->can.can_stats.bus_off;
can_bus_off(netdev);
netif_stop_queue(netdev);
}
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c66d699640a9..bf63fee4e743 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -905,6 +905,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (priv->can.state == CAN_STATE_BUS_OFF) {
if (priv->can.restart_ms == 0) {
priv->force_quit = 1;
+ priv->can.can_stats.bus_off++;
can_bus_off(net);
mcp251x_hw_sleep(spi);
break;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 9a07eafe554b..e95a9e1a889f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -715,6 +715,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
/* Disable all interrupts in bus-off to avoid int hog */
hecc_write(priv, HECC_CANGIM, 0);
+ ++priv->can.can_stats.bus_off;
can_bus_off(ndev);
}
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index a77db919363c..bcb272f6c68a 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -25,7 +25,7 @@ config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
---help---
This driver adds support for Kvaser CAN/USB devices like Kvaser
- Leaf Light.
+ Leaf Light and Kvaser USBcan II.
The driver provides support for the following devices:
- Kvaser Leaf Light
@@ -46,6 +46,12 @@ config CAN_KVASER_USB
- Kvaser USBcan R
- Kvaser Leaf Light v2
- Kvaser Mini PCI Express HS
+ - Kvaser USBcan II HS/HS
+ - Kvaser USBcan II HS/LS
+ - Kvaser USBcan Rugged ("USBcan Rev B")
+ - Kvaser Memorator HS/HS
+ - Kvaser Memorator HS/LS
+ - Scania VCI2 (if you have the Kvaser logo on top)
If unsure, say N.
@@ -53,10 +59,18 @@ config CAN_KVASER_USB
module will be called kvaser_usb.
config CAN_PEAK_USB
- tristate "PEAK PCAN-USB/USB Pro interfaces"
+ tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
---help---
- This driver supports the PCAN-USB and PCAN-USB Pro adapters
- from PEAK-System Technik (http://www.peak-system.com).
+ This driver supports the PEAK-System Technik USB adapters that enable
+ access to the CAN bus, with repect to the CAN 2.0b and/or CAN-FD
+ standards, that is:
+
+ PCAN-USB single CAN 2.0b channel USB adapter
+ PCAN-USB Pro dual CAN 2.0b channels USB adapter
+ PCAN-USB FD single CAN-FD channel USB adapter
+ PCAN-USB Pro FD dual CAN-FD channels USB adapter
+
+ (see also http://www.peak-system.com).
config CAN_8DEV_USB
tristate "8 devices USB2CAN interface"
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 29d3f0938eb8..9376f5e5b94e 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -347,6 +347,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
dev->can.state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
+ dev->can.can_stats.bus_off++;
can_bus_off(dev->netdev);
} else if (state & SJA1000_SR_ES) {
dev->can.state = CAN_STATE_ERROR_WARNING;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index c063a54ab8dd..bacca0bd89c1 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -250,6 +250,7 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
case ESD_BUSSTATE_BUSOFF:
priv->can.state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
+ priv->can.can_stats.bus_off++;
can_bus_off(priv->netdev);
break;
case ESD_BUSSTATE_WARN:
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 541fb7a05625..2928f7003041 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -6,10 +6,12 @@
* Parts of this driver are based on the following:
* - Kvaser linux leaf driver (version 4.78)
* - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
*
* Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
* Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
* Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
*/
#include <linux/completion.h>
@@ -30,8 +32,9 @@
#define RX_BUFFER_SIZE 3072
#define CAN_USB_CLOCK 8000000
#define MAX_NET_DEVICES 3
+#define MAX_USBCAN_NET_DEVICES 2
-/* Kvaser USB devices */
+/* Kvaser Leaf USB devices */
#define KVASER_VENDOR_ID 0x0bfd
#define USB_LEAF_DEVEL_PRODUCT_ID 10
#define USB_LEAF_LITE_PRODUCT_ID 11
@@ -56,6 +59,24 @@
#define USB_LEAF_LITE_V2_PRODUCT_ID 288
#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+static inline bool kvaser_is_leaf(const struct usb_device_id *id)
+{
+ return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
+ id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+}
+
+/* Kvaser USBCan-II devices */
+#define USB_USBCAN_REVB_PRODUCT_ID 2
+#define USB_VCI2_PRODUCT_ID 3
+#define USB_USBCAN2_PRODUCT_ID 4
+#define USB_MEMORATOR_PRODUCT_ID 5
+
+static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
+{
+ return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
+ id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
+}
+
/* USB devices features */
#define KVASER_HAS_SILENT_MODE BIT(0)
#define KVASER_HAS_TXRX_ERRORS BIT(1)
@@ -73,7 +94,7 @@
#define MSG_FLAG_TX_ACK BIT(6)
#define MSG_FLAG_TX_REQUEST BIT(7)
-/* Can states */
+/* Can states (M16C CxSTRH register) */
#define M16C_STATE_BUS_RESET BIT(0)
#define M16C_STATE_BUS_ERROR BIT(4)
#define M16C_STATE_BUS_PASSIVE BIT(5)
@@ -98,7 +119,11 @@
#define CMD_START_CHIP_REPLY 27
#define CMD_STOP_CHIP 28
#define CMD_STOP_CHIP_REPLY 29
-#define CMD_GET_CARD_INFO2 32
+
+#define CMD_LEAF_GET_CARD_INFO2 32
+#define CMD_USBCAN_RESET_CLOCK 32
+#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
+
#define CMD_GET_CARD_INFO 34
#define CMD_GET_CARD_INFO_REPLY 35
#define CMD_GET_SOFTWARE_INFO 38
@@ -108,8 +133,9 @@
#define CMD_RESET_ERROR_COUNTER 49
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
-#define CMD_USB_THROTTLE 77
-#define CMD_LOG_MESSAGE 106
+
+#define CMD_LEAF_USB_THROTTLE 77
+#define CMD_LEAF_LOG_MESSAGE 106
/* error factors */
#define M16C_EF_ACKE BIT(0)
@@ -121,6 +147,14 @@
#define M16C_EF_RCVE BIT(6)
#define M16C_EF_TRE BIT(7)
+/* Only Leaf-based devices can report M16C error factors,
+ * thus define our own error status flags for USBCANII
+ */
+#define USBCAN_ERROR_STATE_NONE 0
+#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
+#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
+#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
+
/* bittiming parameters */
#define KVASER_USB_TSEG1_MIN 1
#define KVASER_USB_TSEG1_MAX 16
@@ -137,9 +171,18 @@
#define KVASER_CTRL_MODE_SELFRECEPTION 3
#define KVASER_CTRL_MODE_OFF 4
-/* log message */
+/* Extended CAN identifier flag */
#define KVASER_EXTENDED_FRAME BIT(31)
+/* Kvaser USB CAN dongles are divided into two major families:
+ * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo'
+ * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
+ */
+enum kvaser_usb_family {
+ KVASER_LEAF,
+ KVASER_USBCAN,
+};
+
struct kvaser_msg_simple {
u8 tid;
u8 channel;
@@ -148,30 +191,55 @@ struct kvaser_msg_simple {
struct kvaser_msg_cardinfo {
u8 tid;
u8 nchannels;
- __le32 serial_number;
- __le32 padding;
+ union {
+ struct {
+ __le32 serial_number;
+ __le32 padding;
+ } __packed leaf0;
+ struct {
+ __le32 serial_number_low;
+ __le32 serial_number_high;
+ } __packed usbcan0;
+ } __packed;
__le32 clock_resolution;
__le32 mfgdate;
u8 ean[8];
u8 hw_revision;
- u8 usb_hs_mode;
- __le16 padding2;
+ union {
+ struct {
+ u8 usb_hs_mode;
+ } __packed leaf1;
+ struct {
+ u8 padding;
+ } __packed usbcan1;
+ } __packed;
+ __le16 padding;
} __packed;
struct kvaser_msg_cardinfo2 {
u8 tid;
- u8 channel;
+ u8 reserved;
u8 pcb_id[24];
__le32 oem_unlock_code;
} __packed;
-struct kvaser_msg_softinfo {
+struct leaf_msg_softinfo {
u8 tid;
- u8 channel;
+ u8 padding0;
__le32 sw_options;
__le32 fw_version;
__le16 max_outstanding_tx;
- __le16 padding[9];
+ __le16 padding1[9];
+} __packed;
+
+struct usbcan_msg_softinfo {
+ u8 tid;
+ u8 fw_name[5];
+ __le16 max_outstanding_tx;
+ u8 padding[6];
+ __le32 fw_version;
+ __le16 checksum;
+ __le16 sw_options;
} __packed;
struct kvaser_msg_busparams {
@@ -188,36 +256,86 @@ struct kvaser_msg_tx_can {
u8 channel;
u8 tid;
u8 msg[14];
- u8 padding;
- u8 flags;
+ union {
+ struct {
+ u8 padding;
+ u8 flags;
+ } __packed leaf;
+ struct {
+ u8 flags;
+ u8 padding;
+ } __packed usbcan;
+ } __packed;
} __packed;
-struct kvaser_msg_rx_can {
+struct kvaser_msg_rx_can_header {
u8 channel;
u8 flag;
+} __packed;
+
+struct leaf_msg_rx_can {
+ u8 channel;
+ u8 flag;
+
__le16 time[3];
u8 msg[14];
} __packed;
-struct kvaser_msg_chip_state_event {
+struct usbcan_msg_rx_can {
+ u8 channel;
+ u8 flag;
+
+ u8 msg[14];
+ __le16 time;
+} __packed;
+
+struct leaf_msg_chip_state_event {
u8 tid;
u8 channel;
+
__le16 time[3];
u8 tx_errors_count;
u8 rx_errors_count;
+
u8 status;
u8 padding[3];
} __packed;
-struct kvaser_msg_tx_acknowledge {
+struct usbcan_msg_chip_state_event {
+ u8 tid;
+ u8 channel;
+
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ __le16 time;
+
+ u8 status;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_msg_tx_acknowledge_header {
u8 channel;
u8 tid;
+} __packed;
+
+struct leaf_msg_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+
__le16 time[3];
u8 flags;
u8 time_offset;
} __packed;
-struct kvaser_msg_error_event {
+struct usbcan_msg_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+
+ __le16 time;
+ __le16 padding;
+} __packed;
+
+struct leaf_msg_error_event {
u8 tid;
u8 flags;
__le16 time[3];
@@ -229,6 +347,18 @@ struct kvaser_msg_error_event {
u8 error_factor;
} __packed;
+struct usbcan_msg_error_event {
+ u8 tid;
+ u8 padding;
+ u8 tx_errors_count_ch0;
+ u8 rx_errors_count_ch0;
+ u8 tx_errors_count_ch1;
+ u8 rx_errors_count_ch1;
+ u8 status_ch0;
+ u8 status_ch1;
+ __le16 time;
+} __packed;
+
struct kvaser_msg_ctrl_mode {
u8 tid;
u8 channel;
@@ -243,7 +373,7 @@ struct kvaser_msg_flush_queue {
u8 padding[3];
} __packed;
-struct kvaser_msg_log_message {
+struct leaf_msg_log_message {
u8 channel;
u8 flags;
__le16 time[3];
@@ -260,19 +390,57 @@ struct kvaser_msg {
struct kvaser_msg_simple simple;
struct kvaser_msg_cardinfo cardinfo;
struct kvaser_msg_cardinfo2 cardinfo2;
- struct kvaser_msg_softinfo softinfo;
struct kvaser_msg_busparams busparams;
+
+ struct kvaser_msg_rx_can_header rx_can_header;
+ struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header;
+
+ union {
+ struct leaf_msg_softinfo softinfo;
+ struct leaf_msg_rx_can rx_can;
+ struct leaf_msg_chip_state_event chip_state_event;
+ struct leaf_msg_tx_acknowledge tx_acknowledge;
+ struct leaf_msg_error_event error_event;
+ struct leaf_msg_log_message log_message;
+ } __packed leaf;
+
+ union {
+ struct usbcan_msg_softinfo softinfo;
+ struct usbcan_msg_rx_can rx_can;
+ struct usbcan_msg_chip_state_event chip_state_event;
+ struct usbcan_msg_tx_acknowledge tx_acknowledge;
+ struct usbcan_msg_error_event error_event;
+ } __packed usbcan;
+
struct kvaser_msg_tx_can tx_can;
- struct kvaser_msg_rx_can rx_can;
- struct kvaser_msg_chip_state_event chip_state_event;
- struct kvaser_msg_tx_acknowledge tx_acknowledge;
- struct kvaser_msg_error_event error_event;
struct kvaser_msg_ctrl_mode ctrl_mode;
struct kvaser_msg_flush_queue flush_queue;
- struct kvaser_msg_log_message log_message;
} u;
} __packed;
+/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
+ * handling. Some discrepancies between the two families exist:
+ *
+ * - USBCAN firmware does not report M16C "error factors"
+ * - USBCAN controllers has difficulties reporting if the raised error
+ * event is for ch0 or ch1. They leave such arbitration to the OS
+ * driver by letting it compare error counters with previous values
+ * and decide the error event's channel. Thus for USBCAN, the channel
+ * field is only advisory.
+ */
+struct kvaser_usb_error_summary {
+ u8 channel, status, txerr, rxerr;
+ union {
+ struct {
+ u8 error_factor;
+ } leaf;
+ struct {
+ u8 other_ch_status;
+ u8 error_state;
+ } usbcan;
+ };
+};
+
struct kvaser_usb_tx_urb_context {
struct kvaser_usb_net_priv *priv;
u32 echo_index;
@@ -288,6 +456,7 @@ struct kvaser_usb {
u32 fw_version;
unsigned int nchannels;
+ enum kvaser_usb_family family;
bool rxinitdone;
void *rxbuf[MAX_RX_URBS];
@@ -311,6 +480,7 @@ struct kvaser_usb_net_priv {
};
static const struct usb_device_id kvaser_usb_table[] = {
+ /* Leaf family IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
@@ -360,6 +530,17 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+
+ /* USBCANII family IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -463,7 +644,14 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
if (err)
return err;
- dev->fw_version = le32_to_cpu(msg.u.softinfo.fw_version);
+ switch (dev->family) {
+ case KVASER_LEAF:
+ dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
+ break;
+ case KVASER_USBCAN:
+ dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
+ break;
+ }
return 0;
}
@@ -482,7 +670,9 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
return err;
dev->nchannels = msg.u.cardinfo.nchannels;
- if (dev->nchannels > MAX_NET_DEVICES)
+ if ((dev->nchannels > MAX_NET_DEVICES) ||
+ (dev->family == KVASER_USBCAN &&
+ dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
return 0;
@@ -496,8 +686,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_net_priv *priv;
struct sk_buff *skb;
struct can_frame *cf;
- u8 channel = msg->u.tx_acknowledge.channel;
- u8 tid = msg->u.tx_acknowledge.tid;
+ u8 channel, tid;
+
+ channel = msg->u.tx_acknowledge_header.channel;
+ tid = msg->u.tx_acknowledge_header.tid;
if (channel >= dev->nchannels) {
dev_err(dev->udev->dev.parent,
@@ -520,10 +712,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -587,7 +779,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
buf, msg->len,
- kvaser_usb_simple_msg_callback, priv);
+ kvaser_usb_simple_msg_callback, netdev);
usb_anchor_urb(urb, &priv->tx_submitted);
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -615,165 +807,280 @@ static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
}
-static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
+static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_error_summary *es,
+ struct can_frame *cf)
{
- struct can_frame *cf;
- struct sk_buff *skb;
- struct net_device_stats *stats;
- struct kvaser_usb_net_priv *priv;
- unsigned int new_state;
- u8 channel, status, txerr, rxerr, error_factor;
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device_stats *stats = &priv->netdev->stats;
+ enum can_state cur_state, new_state, tx_state, rx_state;
- switch (msg->id) {
- case CMD_CAN_ERROR_EVENT:
- channel = msg->u.error_event.channel;
- status = msg->u.error_event.status;
- txerr = msg->u.error_event.tx_errors_count;
- rxerr = msg->u.error_event.rx_errors_count;
- error_factor = msg->u.error_event.error_factor;
- break;
- case CMD_LOG_MESSAGE:
- channel = msg->u.log_message.channel;
- status = msg->u.log_message.data[0];
- txerr = msg->u.log_message.data[2];
- rxerr = msg->u.log_message.data[3];
- error_factor = msg->u.log_message.data[1];
+ netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
+
+ new_state = cur_state = priv->can.state;
+
+ if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET))
+ new_state = CAN_STATE_BUS_OFF;
+ else if (es->status & M16C_STATE_BUS_PASSIVE)
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (es->status & M16C_STATE_BUS_ERROR) {
+ /* Guard against spurious error events after a busoff */
+ if (cur_state < CAN_STATE_BUS_OFF) {
+ if ((es->txerr >= 128) || (es->rxerr >= 128))
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if ((es->txerr >= 96) || (es->rxerr >= 96))
+ new_state = CAN_STATE_ERROR_WARNING;
+ else if (cur_state > CAN_STATE_ERROR_ACTIVE)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ if (!es->status)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (new_state != cur_state) {
+ tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
+ rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
+
+ can_change_state(priv->netdev, cf, tx_state, rx_state);
+ }
+
+ if (priv->can.restart_ms &&
+ (cur_state >= CAN_STATE_BUS_OFF) &&
+ (new_state < CAN_STATE_BUS_OFF)) {
+ priv->can.can_stats.restarts++;
+ }
+
+ switch (dev->family) {
+ case KVASER_LEAF:
+ if (es->leaf.error_factor) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ }
break;
- case CMD_CHIP_STATE_EVENT:
- channel = msg->u.chip_state_event.channel;
- status = msg->u.chip_state_event.status;
- txerr = msg->u.chip_state_event.tx_errors_count;
- rxerr = msg->u.chip_state_event.rx_errors_count;
- error_factor = 0;
+ case KVASER_USBCAN:
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
+ stats->tx_errors++;
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
+ stats->rx_errors++;
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
+ priv->can.can_stats.bus_error++;
+ }
break;
- default:
- dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
- msg->id);
- return;
}
- if (channel >= dev->nchannels) {
+ priv->bec.txerr = es->txerr;
+ priv->bec.rxerr = es->rxerr;
+}
+
+static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_usb_error_summary *es)
+{
+ struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC };
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ struct kvaser_usb_net_priv *priv;
+ enum can_state old_state, new_state;
+
+ if (es->channel >= dev->nchannels) {
dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
+ "Invalid channel number (%d)\n", es->channel);
return;
}
- priv = dev->nets[channel];
+ priv = dev->nets[es->channel];
stats = &priv->netdev->stats;
- if (status & M16C_STATE_BUS_RESET) {
- kvaser_usb_unlink_tx_urbs(priv);
- return;
- }
+ /* Update all of the can interface's state and error counters before
+ * trying any memory allocation that can actually fail with -ENOMEM.
+ *
+ * We send a temporary stack-allocated error can frame to
+ * can_change_state() for the very same reason.
+ *
+ * TODO: Split can_change_state() responsibility between updating the
+ * can interface's state and counters, and the setting up of can error
+ * frame ID and data to userspace. Remove stack allocation afterwards.
+ */
+ old_state = priv->can.state;
+ kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf);
+ new_state = priv->can.state;
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
return;
}
-
- new_state = priv->can.state;
-
- netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
-
- if (status & M16C_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_BUSOFF;
-
- priv->can.can_stats.bus_off++;
- if (!priv->can.restart_ms)
- kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
-
- netif_carrier_off(priv->netdev);
-
- new_state = CAN_STATE_BUS_OFF;
- } else if (status & M16C_STATE_BUS_PASSIVE) {
- if (priv->can.state != CAN_STATE_ERROR_PASSIVE) {
- cf->can_id |= CAN_ERR_CRTL;
-
- if (txerr || rxerr)
- cf->data[1] = (txerr > rxerr)
- ? CAN_ERR_CRTL_TX_PASSIVE
- : CAN_ERR_CRTL_RX_PASSIVE;
- else
- cf->data[1] = CAN_ERR_CRTL_TX_PASSIVE |
- CAN_ERR_CRTL_RX_PASSIVE;
-
- priv->can.can_stats.error_passive++;
+ memcpy(cf, &tmp_cf, sizeof(*cf));
+
+ if (new_state != old_state) {
+ if (es->status &
+ (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
+ netif_carrier_off(priv->netdev);
}
- new_state = CAN_STATE_ERROR_PASSIVE;
+ if (priv->can.restart_ms &&
+ (old_state >= CAN_STATE_BUS_OFF) &&
+ (new_state < CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_carrier_on(priv->netdev);
+ }
}
- if (status == M16C_STATE_BUS_ERROR) {
- if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
- ((txerr >= 96) || (rxerr >= 96))) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (txerr > rxerr)
- ? CAN_ERR_CRTL_TX_WARNING
- : CAN_ERR_CRTL_RX_WARNING;
-
- priv->can.can_stats.error_warning++;
- new_state = CAN_STATE_ERROR_WARNING;
- } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_ACTIVE;
-
- new_state = CAN_STATE_ERROR_ACTIVE;
+ switch (dev->family) {
+ case KVASER_LEAF:
+ if (es->leaf.error_factor) {
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+ if (es->leaf.error_factor & M16C_EF_ACKE)
+ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
+ if (es->leaf.error_factor & M16C_EF_CRCE)
+ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL);
+ if (es->leaf.error_factor & M16C_EF_FORME)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (es->leaf.error_factor & M16C_EF_STFE)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (es->leaf.error_factor & M16C_EF_BITE0)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (es->leaf.error_factor & M16C_EF_BITE1)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (es->leaf.error_factor & M16C_EF_TRE)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ break;
+ case KVASER_USBCAN:
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
+ cf->can_id |= CAN_ERR_BUSERROR;
}
+ break;
}
- if (!status) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
- new_state = CAN_STATE_ERROR_ACTIVE;
- }
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
- if (priv->can.restart_ms &&
- (priv->can.state >= CAN_STATE_BUS_OFF) &&
- (new_state < CAN_STATE_BUS_OFF)) {
- cf->can_id |= CAN_ERR_RESTARTED;
- netif_carrier_on(priv->netdev);
+/* For USBCAN, report error to userspace iff the channels's errors counter
+ * has changed, or we're the only channel seeing a bus error state.
+ */
+static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
+ struct kvaser_usb_error_summary *es)
+{
+ struct kvaser_usb_net_priv *priv;
+ int channel;
+ bool report_error;
- priv->can.can_stats.restarts++;
+ channel = es->channel;
+ if (channel >= dev->nchannels) {
+ dev_err(dev->udev->dev.parent,
+ "Invalid channel number (%d)\n", channel);
+ return;
}
- if (error_factor) {
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
+ priv = dev->nets[channel];
+ report_error = false;
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
-
- if (error_factor & M16C_EF_ACKE)
- cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
- if (error_factor & M16C_EF_CRCE)
- cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL);
- if (error_factor & M16C_EF_FORME)
- cf->data[2] |= CAN_ERR_PROT_FORM;
- if (error_factor & M16C_EF_STFE)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- if (error_factor & M16C_EF_BITE0)
- cf->data[2] |= CAN_ERR_PROT_BIT0;
- if (error_factor & M16C_EF_BITE1)
- cf->data[2] |= CAN_ERR_PROT_BIT1;
- if (error_factor & M16C_EF_TRE)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if (es->txerr != priv->bec.txerr) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
+ report_error = true;
+ }
+ if (es->rxerr != priv->bec.rxerr) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
+ report_error = true;
+ }
+ if ((es->status & M16C_STATE_BUS_ERROR) &&
+ !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
+ report_error = true;
}
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ if (report_error)
+ kvaser_usb_rx_error(dev, es);
+}
- priv->bec.txerr = txerr;
- priv->bec.rxerr = rxerr;
+static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_msg *msg)
+{
+ struct kvaser_usb_error_summary es = { };
- priv->can.state = new_state;
+ switch (msg->id) {
+ /* Sometimes errors are sent as unsolicited chip state events */
+ case CMD_CHIP_STATE_EVENT:
+ es.channel = msg->u.usbcan.chip_state_event.channel;
+ es.status = msg->u.usbcan.chip_state_event.status;
+ es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count;
+ es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count;
+ kvaser_usbcan_conditionally_rx_error(dev, &es);
+ break;
- netif_rx(skb);
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = 0;
+ es.status = msg->u.usbcan.error_event.status_ch0;
+ es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0;
+ es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0;
+ es.usbcan.other_ch_status =
+ msg->u.usbcan.error_event.status_ch1;
+ kvaser_usbcan_conditionally_rx_error(dev, &es);
+
+ /* The USBCAN firmware supports up to 2 channels.
+ * Now that ch0 was checked, check if ch1 has any errors.
+ */
+ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
+ es.channel = 1;
+ es.status = msg->u.usbcan.error_event.status_ch1;
+ es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1;
+ es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1;
+ es.usbcan.other_ch_status =
+ msg->u.usbcan.error_event.status_ch0;
+ kvaser_usbcan_conditionally_rx_error(dev, &es);
+ }
+ break;
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ default:
+ dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
+ msg->id);
+ }
+}
+
+static void kvaser_leaf_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_msg *msg)
+{
+ struct kvaser_usb_error_summary es = { };
+
+ switch (msg->id) {
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = msg->u.leaf.error_event.channel;
+ es.status = msg->u.leaf.error_event.status;
+ es.txerr = msg->u.leaf.error_event.tx_errors_count;
+ es.rxerr = msg->u.leaf.error_event.rx_errors_count;
+ es.leaf.error_factor = msg->u.leaf.error_event.error_factor;
+ break;
+ case CMD_LEAF_LOG_MESSAGE:
+ es.channel = msg->u.leaf.log_message.channel;
+ es.status = msg->u.leaf.log_message.data[0];
+ es.txerr = msg->u.leaf.log_message.data[2];
+ es.rxerr = msg->u.leaf.log_message.data[3];
+ es.leaf.error_factor = msg->u.leaf.log_message.data[1];
+ break;
+ case CMD_CHIP_STATE_EVENT:
+ es.channel = msg->u.leaf.chip_state_event.channel;
+ es.status = msg->u.leaf.chip_state_event.status;
+ es.txerr = msg->u.leaf.chip_state_event.tx_errors_count;
+ es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count;
+ es.leaf.error_factor = 0;
+ break;
+ default:
+ dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
+ msg->id);
+ return;
+ }
+
+ kvaser_usb_rx_error(dev, &es);
}
static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
@@ -783,16 +1090,19 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
struct sk_buff *skb;
struct net_device_stats *stats = &priv->netdev->stats;
- if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+ if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
MSG_FLAG_NERR)) {
netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
- msg->u.rx_can.flag);
+ msg->u.rx_can_header.flag);
stats->rx_errors++;
return;
}
- if (msg->u.rx_can.flag & MSG_FLAG_OVERRUN) {
+ if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
@@ -802,13 +1112,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_over_errors++;
- stats->rx_errors++;
-
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
}
@@ -819,7 +1125,8 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
struct can_frame *cf;
struct sk_buff *skb;
struct net_device_stats *stats;
- u8 channel = msg->u.rx_can.channel;
+ u8 channel = msg->u.rx_can_header.channel;
+ const u8 *rx_msg = NULL; /* GCC */
if (channel >= dev->nchannels) {
dev_err(dev->udev->dev.parent,
@@ -830,67 +1137,74 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
priv = dev->nets[channel];
stats = &priv->netdev->stats;
- if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) &&
- (msg->id == CMD_LOG_MESSAGE)) {
- kvaser_usb_rx_error(dev, msg);
+ if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
+ (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) {
+ kvaser_leaf_rx_error(dev, msg);
return;
- } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
- MSG_FLAG_NERR |
- MSG_FLAG_OVERRUN)) {
+ } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR |
+ MSG_FLAG_OVERRUN)) {
kvaser_usb_rx_can_err(priv, msg);
return;
- } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
+ } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
netdev_warn(priv->netdev,
"Unhandled frame (flags: 0x%02x)",
- msg->u.rx_can.flag);
+ msg->u.rx_can_header.flag);
return;
}
+ switch (dev->family) {
+ case KVASER_LEAF:
+ rx_msg = msg->u.leaf.rx_can.msg;
+ break;
+ case KVASER_USBCAN:
+ rx_msg = msg->u.usbcan.rx_can.msg;
+ break;
+ }
+
skb = alloc_can_skb(priv->netdev, &cf);
if (!skb) {
stats->tx_dropped++;
return;
}
- if (msg->id == CMD_LOG_MESSAGE) {
- cf->can_id = le32_to_cpu(msg->u.log_message.id);
+ if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) {
+ cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id);
if (cf->can_id & KVASER_EXTENDED_FRAME)
cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
else
cf->can_id &= CAN_SFF_MASK;
- cf->can_dlc = get_can_dlc(msg->u.log_message.dlc);
+ cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc);
- if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+ if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
else
- memcpy(cf->data, &msg->u.log_message.data,
+ memcpy(cf->data, &msg->u.leaf.log_message.data,
cf->can_dlc);
} else {
- cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
- (msg->u.rx_can.msg[1] & 0x3f);
+ cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f);
if (msg->id == CMD_RX_EXT_MESSAGE) {
cf->can_id <<= 18;
- cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
- ((msg->u.rx_can.msg[3] & 0xff) << 6) |
- (msg->u.rx_can.msg[4] & 0x3f);
+ cf->can_id |= ((rx_msg[2] & 0x0f) << 14) |
+ ((rx_msg[3] & 0xff) << 6) |
+ (rx_msg[4] & 0x3f);
cf->can_id |= CAN_EFF_FLAG;
}
- cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+ cf->can_dlc = get_can_dlc(rx_msg[5]);
- if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+ if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
else
- memcpy(cf->data, &msg->u.rx_can.msg[6],
+ memcpy(cf->data, &rx_msg[6],
cf->can_dlc);
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
@@ -947,21 +1261,35 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
case CMD_RX_STD_MESSAGE:
case CMD_RX_EXT_MESSAGE:
- case CMD_LOG_MESSAGE:
+ kvaser_usb_rx_can_msg(dev, msg);
+ break;
+
+ case CMD_LEAF_LOG_MESSAGE:
+ if (dev->family != KVASER_LEAF)
+ goto warn;
kvaser_usb_rx_can_msg(dev, msg);
break;
case CMD_CHIP_STATE_EVENT:
case CMD_CAN_ERROR_EVENT:
- kvaser_usb_rx_error(dev, msg);
+ if (dev->family == KVASER_LEAF)
+ kvaser_leaf_rx_error(dev, msg);
+ else
+ kvaser_usbcan_rx_error(dev, msg);
break;
case CMD_TX_ACKNOWLEDGE:
kvaser_usb_tx_acknowledge(dev, msg);
break;
+ /* Ignored messages */
+ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
+ if (dev->family != KVASER_USBCAN)
+ goto warn;
+ break;
+
default:
- dev_warn(dev->udev->dev.parent,
+warn: dev_warn(dev->udev->dev.parent,
"Unhandled message (%d)\n", msg->id);
break;
}
@@ -1181,7 +1509,7 @@ static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
dev->rxbuf[i],
dev->rxbuf_dma[i]);
- for (i = 0; i < MAX_NET_DEVICES; i++) {
+ for (i = 0; i < dev->nchannels; i++) {
struct kvaser_usb_net_priv *priv = dev->nets[i];
if (priv)
@@ -1246,6 +1574,9 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+ /* reset tx contexts */
+ kvaser_usb_unlink_tx_urbs(priv);
+
priv->can.state = CAN_STATE_STOPPED;
close_candev(priv->netdev);
@@ -1286,6 +1617,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
struct kvaser_msg *msg;
int i, err;
int ret = NETDEV_TX_OK;
+ u8 *msg_tx_can_flags = NULL; /* GCC */
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
@@ -1294,20 +1626,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (!urb) {
netdev_err(netdev, "No memory left for URBs\n");
stats->tx_dropped++;
- goto nourbmem;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
stats->tx_dropped++;
+ dev_kfree_skb(skb);
goto nobufmem;
}
msg = buf;
msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
- msg->u.tx_can.flags = 0;
msg->u.tx_can.channel = priv->channel;
+ switch (dev->family) {
+ case KVASER_LEAF:
+ msg_tx_can_flags = &msg->u.tx_can.leaf.flags;
+ break;
+ case KVASER_USBCAN:
+ msg_tx_can_flags = &msg->u.tx_can.usbcan.flags;
+ break;
+ }
+
+ *msg_tx_can_flags = 0;
+
if (cf->can_id & CAN_EFF_FLAG) {
msg->id = CMD_TX_EXT_MESSAGE;
msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
@@ -1325,7 +1669,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
if (cf->can_id & CAN_RTR_FLAG)
- msg->u.tx_can.flags |= MSG_FLAG_REMOTE_FRAME;
+ *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
@@ -1334,6 +1678,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
}
}
+ /* This should never happen; it implies a flow control bug */
if (!context) {
netdev_warn(netdev, "cannot find free context\n");
ret = NETDEV_TX_BUSY;
@@ -1364,9 +1709,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (unlikely(err)) {
can_free_echo_skb(netdev, context->echo_index);
- skb = NULL; /* set to NULL to avoid double free in
- * dev_kfree_skb(skb) */
-
atomic_dec(&priv->active_tx_urbs);
usb_unanchor_urb(urb);
@@ -1388,8 +1730,6 @@ releasebuf:
kfree(buf);
nobufmem:
usb_free_urb(urb);
-nourbmem:
- dev_kfree_skb(skb);
return ret;
}
@@ -1502,6 +1842,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
struct kvaser_usb_net_priv *priv;
int i, err;
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
+ if (err)
+ return err;
+
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Cannot alloc candev\n");
@@ -1588,12 +1932,23 @@ static int kvaser_usb_probe(struct usb_interface *intf,
{
struct kvaser_usb *dev;
int err = -ENOMEM;
- int i;
+ int i, retry = 3;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
+ if (kvaser_is_leaf(id)) {
+ dev->family = KVASER_LEAF;
+ } else if (kvaser_is_usbcan(id)) {
+ dev->family = KVASER_USBCAN;
+ } else {
+ dev_err(&intf->dev,
+ "Product ID (%d) does not belong to any known Kvaser USB family",
+ id->idProduct);
+ return -ENODEV;
+ }
+
err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
if (err) {
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
@@ -1606,10 +1961,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
- for (i = 0; i < MAX_NET_DEVICES; i++)
- kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+ /* On some x86 laptops, plugging a Kvaser device again after
+ * an unplug makes the firmware always ignore the very first
+ * command. For such a case, provide some room for retries
+ * instead of completely exiting the driver.
+ */
+ do {
+ err = kvaser_usb_get_software_info(dev);
+ } while (--retry && err == -ETIMEDOUT);
- err = kvaser_usb_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software infos, error %d\n", err);
diff --git a/drivers/net/can/usb/peak_usb/Makefile b/drivers/net/can/usb/peak_usb/Makefile
index 1aefbc88d643..1839e9ca62e7 100644
--- a/drivers/net/can/usb/peak_usb/Makefile
+++ b/drivers/net/can/usb/peak_usb/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb.o
-peak_usb-y = pcan_usb_core.o pcan_usb.o pcan_usb_pro.o
+peak_usb-y = pcan_usb_core.o pcan_usb.o pcan_usb_pro.o pcan_usb_fd.o
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
new file mode 100644
index 000000000000..1ba7c25002e1
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -0,0 +1,222 @@
+/*
+ * CAN driver for PEAK System micro-CAN based adapters
+ *
+ * Copyright (C) 2003-2011 PEAK System-Technik GmbH
+ * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef PUCAN_H
+#define PUCAN_H
+
+/* uCAN commands opcodes list (low-order 10 bits) */
+#define PUCAN_CMD_NOP 0x000
+#define PUCAN_CMD_RESET_MODE 0x001
+#define PUCAN_CMD_NORMAL_MODE 0x002
+#define PUCAN_CMD_LISTEN_ONLY_MODE 0x003
+#define PUCAN_CMD_TIMING_SLOW 0x004
+#define PUCAN_CMD_TIMING_FAST 0x005
+#define PUCAN_CMD_FILTER_STD 0x008
+#define PUCAN_CMD_TX_ABORT 0x009
+#define PUCAN_CMD_WR_ERR_CNT 0x00a
+#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b
+#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c
+#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
+
+/* uCAN received messages list */
+#define PUCAN_MSG_CAN_RX 0x0001
+#define PUCAN_MSG_ERROR 0x0002
+#define PUCAN_MSG_STATUS 0x0003
+#define PUCAN_MSG_BUSLOAD 0x0004
+#define PUCAN_MSG_CAN_TX 0x1000
+
+/* uCAN command common header */
+struct __packed pucan_command {
+ __le16 opcode_channel;
+ u16 args[3];
+};
+
+/* uCAN TIMING_SLOW command fields */
+#define PUCAN_TSLOW_SJW_T(s, t) (((s) & 0xf) | ((!!(t)) << 7))
+#define PUCAN_TSLOW_TSEG2(t) ((t) & 0xf)
+#define PUCAN_TSLOW_TSEG1(t) ((t) & 0x3f)
+#define PUCAN_TSLOW_BRP(b) ((b) & 0x3ff)
+
+struct __packed pucan_timing_slow {
+ __le16 opcode_channel;
+
+ u8 ewl; /* Error Warning limit */
+ u8 sjw_t; /* Sync Jump Width + Triple sampling */
+ u8 tseg2; /* Timing SEGment 2 */
+ u8 tseg1; /* Timing SEGment 1 */
+
+ __le16 brp; /* BaudRate Prescaler */
+};
+
+/* uCAN TIMING_FAST command fields */
+#define PUCAN_TFAST_SJW(s) ((s) & 0x3)
+#define PUCAN_TFAST_TSEG2(t) ((t) & 0x7)
+#define PUCAN_TFAST_TSEG1(t) ((t) & 0xf)
+#define PUCAN_TFAST_BRP(b) ((b) & 0x3ff)
+
+struct __packed pucan_timing_fast {
+ __le16 opcode_channel;
+
+ u8 unused;
+ u8 sjw; /* Sync Jump Width */
+ u8 tseg2; /* Timing SEGment 2 */
+ u8 tseg1; /* Timing SEGment 1 */
+
+ __le16 brp; /* BaudRate Prescaler */
+};
+
+/* uCAN FILTER_STD command fields */
+#define PUCAN_FLTSTD_ROW_IDX_BITS 6
+
+struct __packed pucan_filter_std {
+ __le16 opcode_channel;
+
+ __le16 idx;
+ __le32 mask; /* CAN-ID bitmask in idx range */
+};
+
+/* uCAN WR_ERR_CNT command fields */
+#define PUCAN_WRERRCNT_TE 0x4000 /* Tx error cntr write Enable */
+#define PUCAN_WRERRCNT_RE 0x8000 /* Rx error cntr write Enable */
+
+struct __packed pucan_wr_err_cnt {
+ __le16 opcode_channel;
+
+ __le16 sel_mask;
+ u8 tx_counter; /* Tx error counter new value */
+ u8 rx_counter; /* Rx error counter new value */
+
+ u16 unused;
+};
+
+/* uCAN RX_FRAME_ENABLE command fields */
+#define PUCAN_FLTEXT_ERROR 0x0001
+#define PUCAN_FLTEXT_BUSLOAD 0x0002
+
+struct __packed pucan_filter_ext {
+ __le16 opcode_channel;
+
+ __le16 ext_mask;
+ u32 unused;
+};
+
+/* uCAN received messages global format */
+struct __packed pucan_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+};
+
+/* uCAN flags for CAN/CANFD messages */
+#define PUCAN_MSG_SELF_RECEIVE 0x80
+#define PUCAN_MSG_ERROR_STATE_IND 0x40 /* error state indicator */
+#define PUCAN_MSG_BITRATE_SWITCH 0x20 /* bitrate switch */
+#define PUCAN_MSG_EXT_DATA_LEN 0x10 /* extended data length */
+#define PUCAN_MSG_SINGLE_SHOT 0x08
+#define PUCAN_MSG_LOOPED_BACK 0x04
+#define PUCAN_MSG_EXT_ID 0x02
+#define PUCAN_MSG_RTR 0x01
+
+struct __packed pucan_rx_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ __le32 tag_low;
+ __le32 tag_high;
+ u8 channel_dlc;
+ u8 client;
+ __le16 flags;
+ __le32 can_id;
+ u8 d[0];
+};
+
+/* uCAN error types */
+#define PUCAN_ERMSG_BIT_ERROR 0
+#define PUCAN_ERMSG_FORM_ERROR 1
+#define PUCAN_ERMSG_STUFF_ERROR 2
+#define PUCAN_ERMSG_OTHER_ERROR 3
+#define PUCAN_ERMSG_ERR_CNT_DEC 4
+
+struct __packed pucan_error_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ u8 channel_type_d;
+ u8 code_g;
+ u8 tx_err_cnt;
+ u8 rx_err_cnt;
+};
+
+#define PUCAN_BUS_PASSIVE 0x20
+#define PUCAN_BUS_WARNING 0x40
+#define PUCAN_BUS_BUSOFF 0x80
+
+struct __packed pucan_status_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ u8 channel_p_w_b;
+ u8 unused[3];
+};
+
+/* uCAN transmitted message format */
+#define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4))
+
+struct __packed pucan_tx_msg {
+ __le16 size;
+ __le16 type;
+ __le32 tag_low;
+ __le32 tag_high;
+ u8 channel_dlc;
+ u8 client;
+ __le16 flags;
+ __le32 can_id;
+ u8 d[0];
+};
+
+/* build the cmd opcode_channel field with respect to the correct endianness */
+static inline __le16 pucan_cmd_opcode_channel(struct peak_usb_device *dev,
+ int opcode)
+{
+ return cpu_to_le16(((dev->ctrl_idx) << 12) | ((opcode) & 0x3ff));
+}
+
+/* return the channel number part from any received message channel_dlc field */
+static inline int pucan_msg_get_channel(struct pucan_rx_msg *rm)
+{
+ return rm->channel_dlc & 0xf;
+}
+
+/* return the dlc value from any received message channel_dlc field */
+static inline int pucan_msg_get_dlc(struct pucan_rx_msg *rm)
+{
+ return rm->channel_dlc >> 4;
+}
+
+static inline int pucan_ermsg_get_channel(struct pucan_error_msg *em)
+{
+ return em->channel_type_d & 0x0f;
+}
+
+static inline int pucan_stmsg_get_channel(struct pucan_status_msg *sm)
+{
+ return sm->channel_p_w_b & 0x0f;
+}
+
+#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 4e1659d07979..72427f21edff 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -488,6 +488,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
switch (new_state) {
case CAN_STATE_BUS_OFF:
cf->can_id |= CAN_ERR_BUSOFF;
+ mc->pdev->dev.can.can_stats.bus_off++;
can_bus_off(mc->netdev);
break;
@@ -854,10 +855,11 @@ static int pcan_usb_probe(struct usb_interface *intf)
/*
* describe the PCAN-USB adapter
*/
-struct peak_usb_adapter pcan_usb = {
+const struct peak_usb_adapter pcan_usb = {
.name = "PCAN-USB",
.device_id = PCAN_USB_PRODUCT_ID,
.ctrl_count = 1,
+ .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index c62f48a1161d..7921cff93a63 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -37,16 +37,19 @@ MODULE_LICENSE("GPL v2");
static struct usb_device_id peak_usb_table[] = {
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)},
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
+ {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
+ {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, peak_usb_table);
/* List of supported PCAN-USB adapters (NULL terminated list) */
-static struct peak_usb_adapter *peak_usb_adapters_list[] = {
+static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
&pcan_usb,
&pcan_usb_pro,
- NULL,
+ &pcan_usb_fd,
+ &pcan_usb_pro_fd,
};
/*
@@ -65,7 +68,7 @@ void pcan_dump_mem(char *prompt, void *p, int l)
* initialize a time_ref object with usb adapter own settings
*/
void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
- struct peak_usb_adapter *adapter)
+ const struct peak_usb_adapter *adapter)
{
if (time_ref) {
memset(time_ref, 0, sizeof(struct peak_time_ref));
@@ -165,6 +168,21 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
}
/*
+ * post received skb after having set any hw timestamp
+ */
+int peak_usb_netif_rx(struct sk_buff *skb,
+ struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high)
+{
+ struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+ struct timeval tv;
+
+ peak_usb_get_ts_tv(time_ref, ts_low, &tv);
+ hwts->hwtstamp = timeval_to_ktime(tv);
+
+ return netif_rx(skb);
+}
+
+/*
* callback for bulk Rx urb
*/
static void peak_usb_read_bulk_callback(struct urb *urb)
@@ -253,7 +271,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
case 0:
/* transmission complete */
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->dlc;
+ netdev->stats.tx_bytes += context->data_len;
/* prevent tx timeout */
netdev->trans_start = jiffies;
@@ -289,7 +307,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
struct peak_usb_device *dev = netdev_priv(netdev);
struct peak_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct urb *urb;
u8 *obuf;
int i, err;
@@ -322,7 +340,9 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
}
context->echo_index = i;
- context->dlc = cf->can_dlc;
+
+ /* Note: this works with CANFD frames too */
+ context->data_len = cfd->len;
usb_anchor_urb(urb, &dev->tx_submitted);
@@ -679,19 +699,43 @@ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode)
}
/*
- * candev callback used to set device bitrate.
+ * candev callback used to set device nominal/arbitration bitrate.
*/
static int peak_usb_set_bittiming(struct net_device *netdev)
{
struct peak_usb_device *dev = netdev_priv(netdev);
- struct can_bittiming *bt = &dev->can.bittiming;
+ const struct peak_usb_adapter *pa = dev->adapter;
- if (dev->adapter->dev_set_bittiming) {
- int err = dev->adapter->dev_set_bittiming(dev, bt);
+ if (pa->dev_set_bittiming) {
+ struct can_bittiming *bt = &dev->can.bittiming;
+ int err = pa->dev_set_bittiming(dev, bt);
if (err)
netdev_info(netdev, "couldn't set bitrate (err %d)\n",
- err);
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * candev callback used to set device data bitrate.
+ */
+static int peak_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ const struct peak_usb_adapter *pa = dev->adapter;
+
+ if (pa->dev_set_data_bittiming) {
+ struct can_bittiming *bt = &dev->can.data_bittiming;
+ int err = pa->dev_set_data_bittiming(dev, bt);
+
+ if (err)
+ netdev_info(netdev,
+ "couldn't set data bitrate (err %d)\n",
+ err);
+
return err;
}
@@ -709,7 +753,7 @@ static const struct net_device_ops peak_usb_netdev_ops = {
* create one device which is attached to CAN controller #ctrl_idx of the
* usb adapter.
*/
-static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
+static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
struct usb_interface *intf, int ctrl_idx)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
@@ -750,9 +794,11 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
dev->can.clock = peak_usb_adapter->clock;
dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
+ dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
+ dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
- dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
- CAN_CTRLMODE_LISTENONLY;
+ dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
+ dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
netdev->netdev_ops = &peak_usb_netdev_ops;
@@ -857,17 +903,18 @@ static int peak_usb_probe(struct usb_interface *intf,
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct);
- struct peak_usb_adapter *peak_usb_adapter, **pp;
+ const struct peak_usb_adapter *peak_usb_adapter = NULL;
int i, err = -ENOMEM;
usb_dev = interface_to_usbdev(intf);
/* get corresponding PCAN-USB adapter */
- for (pp = peak_usb_adapters_list; *pp; pp++)
- if ((*pp)->device_id == usb_id_product)
+ for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
+ if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
+ peak_usb_adapter = peak_usb_adapters_list[i];
break;
+ }
- peak_usb_adapter = *pp;
if (!peak_usb_adapter) {
/* should never come except device_id bad usage in this file */
pr_err("%s: didn't find device id. 0x%x in devices list\n",
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 073b47ff8eee..9e624f05ad4d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -25,6 +25,8 @@
/* supported device ids. */
#define PCAN_USB_PRODUCT_ID 0x000c
#define PCAN_USBPRO_PRODUCT_ID 0x000d
+#define PCAN_USBPROFD_PRODUCT_ID 0x0011
+#define PCAN_USBFD_PRODUCT_ID 0x0012
#define PCAN_USB_DRIVER_NAME "peak_usb"
@@ -44,8 +46,10 @@ struct peak_usb_device;
struct peak_usb_adapter {
char *name;
u32 device_id;
+ u32 ctrlmode_supported;
struct can_clock clock;
const struct can_bittiming_const bittiming_const;
+ const struct can_bittiming_const data_bittiming_const;
unsigned int ctrl_count;
int (*intf_probe)(struct usb_interface *intf);
@@ -57,6 +61,8 @@ struct peak_usb_adapter {
int (*dev_close)(struct peak_usb_device *dev);
int (*dev_set_bittiming)(struct peak_usb_device *dev,
struct can_bittiming *bt);
+ int (*dev_set_data_bittiming)(struct peak_usb_device *dev,
+ struct can_bittiming *bt);
int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
@@ -66,6 +72,8 @@ struct peak_usb_adapter {
int (*dev_stop)(struct peak_usb_device *dev);
int (*dev_restart_async)(struct peak_usb_device *dev, struct urb *urb,
u8 *buf);
+ int (*do_get_berr_counter)(const struct net_device *netdev,
+ struct can_berr_counter *bec);
u8 ep_msg_in;
u8 ep_msg_out[PCAN_USB_MAX_CHANNEL];
u8 ts_used_bits;
@@ -78,21 +86,23 @@ struct peak_usb_adapter {
int sizeof_dev_private;
};
-extern struct peak_usb_adapter pcan_usb;
-extern struct peak_usb_adapter pcan_usb_pro;
+extern const struct peak_usb_adapter pcan_usb;
+extern const struct peak_usb_adapter pcan_usb_pro;
+extern const struct peak_usb_adapter pcan_usb_fd;
+extern const struct peak_usb_adapter pcan_usb_pro_fd;
struct peak_time_ref {
struct timeval tv_host_0, tv_host;
u32 ts_dev_1, ts_dev_2;
u64 ts_total;
u32 tick_count;
- struct peak_usb_adapter *adapter;
+ const struct peak_usb_adapter *adapter;
};
struct peak_tx_urb_context {
struct peak_usb_device *dev;
u32 echo_index;
- u8 dlc;
+ u8 data_len;
struct urb *urb;
};
@@ -102,7 +112,7 @@ struct peak_tx_urb_context {
/* PEAK-System USB device */
struct peak_usb_device {
struct can_priv can;
- struct peak_usb_adapter *adapter;
+ const struct peak_usb_adapter *adapter;
unsigned int ctrl_idx;
u32 state;
@@ -134,12 +144,14 @@ void pcan_dump_mem(char *prompt, void *p, int l);
/* common timestamp management */
void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
- struct peak_usb_adapter *adapter);
+ const struct peak_usb_adapter *adapter);
void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
struct timeval *tv);
-
+int peak_usb_netif_rx(struct sk_buff *skb,
+ struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
+
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
new file mode 100644
index 000000000000..962c3f027383
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -0,0 +1,1095 @@
+/*
+ * CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter
+ *
+ * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "pcan_usb_core.h"
+#include "pcan_usb_pro.h"
+#include "pcan_ucan.h"
+
+MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter");
+MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter");
+
+#define PCAN_USBPROFD_CHANNEL_COUNT 2
+#define PCAN_USBFD_CHANNEL_COUNT 1
+
+/* PCAN-USB Pro FD adapter internal clock (Hz) */
+#define PCAN_UFD_CRYSTAL_HZ 80000000
+
+#define PCAN_UFD_CMD_BUFFER_SIZE 512
+#define PCAN_UFD_LOSPD_PKT_SIZE 64
+
+/* PCAN-USB Pro FD command timeout (ms.) */
+#define PCAN_UFD_CMD_TIMEOUT_MS 1000
+
+/* PCAN-USB Pro FD rx/tx buffers size */
+#define PCAN_UFD_RX_BUFFER_SIZE 2048
+#define PCAN_UFD_TX_BUFFER_SIZE 512
+
+/* read some versions info from the hw devcie */
+struct __packed pcan_ufd_fw_info {
+ __le16 size_of; /* sizeof this */
+ __le16 type; /* type of this structure */
+ u8 hw_type; /* Type of hardware (HW_TYPE_xxx) */
+ u8 bl_version[3]; /* Bootloader version */
+ u8 hw_version; /* Hardware version (PCB) */
+ u8 fw_version[3]; /* Firmware version */
+ __le32 dev_id[2]; /* "device id" per CAN */
+ __le32 ser_no; /* S/N */
+ __le32 flags; /* special functions */
+};
+
+/* handle device specific info used by the netdevices */
+struct pcan_usb_fd_if {
+ struct peak_usb_device *dev[PCAN_USB_MAX_CHANNEL];
+ struct pcan_ufd_fw_info fw_info;
+ struct peak_time_ref time_ref;
+ int cm_ignore_count;
+ int dev_opened_count;
+};
+
+/* device information */
+struct pcan_usb_fd_device {
+ struct peak_usb_device dev;
+ struct can_berr_counter bec;
+ struct pcan_usb_fd_if *usb_if;
+ u8 *cmd_buffer_addr;
+};
+
+/* Extended USB commands (non uCAN commands) */
+
+/* Clock Modes command */
+#define PCAN_UFD_CMD_CLK_SET 0x80
+
+#define PCAN_UFD_CLK_80MHZ 0x0
+#define PCAN_UFD_CLK_60MHZ 0x1
+#define PCAN_UFD_CLK_40MHZ 0x2
+#define PCAN_UFD_CLK_30MHZ 0x3
+#define PCAN_UFD_CLK_24MHZ 0x4
+#define PCAN_UFD_CLK_20MHZ 0x5
+#define PCAN_UFD_CLK_DEF PCAN_UFD_CLK_80MHZ
+
+struct __packed pcan_ufd_clock {
+ __le16 opcode_channel;
+
+ u8 mode;
+ u8 unused[5];
+};
+
+/* LED control command */
+#define PCAN_UFD_CMD_LED_SET 0x86
+
+#define PCAN_UFD_LED_DEV 0x00
+#define PCAN_UFD_LED_FAST 0x01
+#define PCAN_UFD_LED_SLOW 0x02
+#define PCAN_UFD_LED_ON 0x03
+#define PCAN_UFD_LED_OFF 0x04
+#define PCAN_UFD_LED_DEF PCAN_UFD_LED_DEV
+
+struct __packed pcan_ufd_led {
+ __le16 opcode_channel;
+
+ u8 mode;
+ u8 unused[5];
+};
+
+/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
+#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
+
+struct __packed pcan_ufd_filter_ext {
+ __le16 opcode_channel;
+
+ __le16 ext_mask;
+ u16 unused;
+ __le16 usb_mask;
+};
+
+/* Extended usage of uCAN messages for PCAN-USB Pro FD */
+#define PCAN_UFD_MSG_CALIBRATION 0x100
+
+struct __packed pcan_ufd_ts_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ __le16 usb_frame_index;
+ u16 unused;
+};
+
+#define PCAN_UFD_MSG_OVERRUN 0x101
+
+#define PCAN_UFD_OVMSG_CHANNEL(o) ((o)->channel & 0xf)
+
+struct __packed pcan_ufd_ovr_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ u8 channel;
+ u8 unused[3];
+};
+
+static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om)
+{
+ return om->channel & 0xf;
+}
+
+/* Clock mode frequency values */
+static const u32 pcan_usb_fd_clk_freq[6] = {
+ [PCAN_UFD_CLK_80MHZ] = 80000000,
+ [PCAN_UFD_CLK_60MHZ] = 60000000,
+ [PCAN_UFD_CLK_40MHZ] = 40000000,
+ [PCAN_UFD_CLK_30MHZ] = 30000000,
+ [PCAN_UFD_CLK_24MHZ] = 24000000,
+ [PCAN_UFD_CLK_20MHZ] = 20000000
+};
+
+/* return a device USB interface */
+static inline
+struct pcan_usb_fd_if *pcan_usb_fd_dev_if(struct peak_usb_device *dev)
+{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ return pdev->usb_if;
+}
+
+/* return a device USB commands buffer */
+static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
+{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ return pdev->cmd_buffer_addr;
+}
+
+/* send PCAN-USB Pro FD commands synchronously */
+static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
+{
+ void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
+ int err;
+ u8 *packet_ptr;
+ int i, n = 1, packet_len;
+ ptrdiff_t cmd_len;
+
+ /* usb device unregistered? */
+ if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+ return 0;
+
+ /* if a packet is not filled completely by commands, the command list
+ * is terminated with an "end of collection" record.
+ */
+ cmd_len = cmd_tail - cmd_head;
+ if (cmd_len <= (PCAN_UFD_CMD_BUFFER_SIZE - sizeof(u64))) {
+ memset(cmd_tail, 0xff, sizeof(u64));
+ cmd_len += sizeof(u64);
+ }
+
+ packet_ptr = cmd_head;
+
+ /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
+ if ((dev->udev->speed != USB_SPEED_HIGH) &&
+ (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
+ packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
+ n += cmd_len / packet_len;
+ } else {
+ packet_len = cmd_len;
+ }
+
+ for (i = 0; i < n; i++) {
+ err = usb_bulk_msg(dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ PCAN_USBPRO_EP_CMDOUT),
+ packet_ptr, packet_len,
+ NULL, PCAN_UFD_CMD_TIMEOUT_MS);
+ if (err) {
+ netdev_err(dev->netdev,
+ "sending command failure: %d\n", err);
+ break;
+ }
+
+ packet_ptr += packet_len;
+ }
+
+ return err;
+}
+
+/* build the commands list in the given buffer, to enter operational mode */
+static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
+{
+ struct pucan_wr_err_cnt *prc;
+ struct pucan_command *cmd;
+ u8 *pc = buf;
+
+ /* 1st, reset error counters: */
+ prc = (struct pucan_wr_err_cnt *)pc;
+ prc->opcode_channel = pucan_cmd_opcode_channel(dev,
+ PUCAN_CMD_WR_ERR_CNT);
+
+ /* select both counters */
+ prc->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE|PUCAN_WRERRCNT_RE);
+
+ /* and reset their values */
+ prc->tx_counter = 0;
+ prc->rx_counter = 0;
+
+ /* moves the pointer forward */
+ pc += sizeof(struct pucan_wr_err_cnt);
+
+ /* next, go back to operational mode */
+ cmd = (struct pucan_command *)pc;
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ (dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ?
+ PUCAN_CMD_LISTEN_ONLY_MODE :
+ PUCAN_CMD_NORMAL_MODE);
+ pc += sizeof(struct pucan_command);
+
+ return pc - buf;
+}
+
+/* set CAN bus on/off */
+static int pcan_usb_fd_set_bus(struct peak_usb_device *dev, u8 onoff)
+{
+ u8 *pc = pcan_usb_fd_cmd_buffer(dev);
+ int l;
+
+ if (onoff) {
+ /* build the cmds list to enter operational mode */
+ l = pcan_usb_fd_build_restart_cmd(dev, pc);
+ } else {
+ struct pucan_command *cmd = (struct pucan_command *)pc;
+
+ /* build cmd to go back to reset mode */
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ PUCAN_CMD_RESET_MODE);
+ l = sizeof(struct pucan_command);
+ }
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, pc + l);
+}
+
+/* set filtering masks:
+ *
+ * idx in range [0..63] selects a row #idx, all rows otherwise
+ * mask in range [0..0xffffffff] defines up to 32 CANIDs in the row(s)
+ *
+ * Each bit of this 64 x 32 bits array defines a CANID value:
+ *
+ * bit[i,j] = 1 implies that CANID=(i x 32)+j will be received, while
+ * bit[i,j] = 0 implies that CANID=(i x 32)+j will be discarded.
+ */
+static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
+ u32 mask)
+{
+ struct pucan_filter_std *cmd = pcan_usb_fd_cmd_buffer(dev);
+ int i, n;
+
+ /* select all rows when idx is out of range [0..63] */
+ if ((idx < 0) || (idx >= (1 << PUCAN_FLTSTD_ROW_IDX_BITS))) {
+ n = 1 << PUCAN_FLTSTD_ROW_IDX_BITS;
+ idx = 0;
+
+ /* select the row (and only the row) otherwise */
+ } else {
+ n = idx + 1;
+ }
+
+ for (i = idx; i < n; i++, cmd++) {
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ PUCAN_CMD_FILTER_STD);
+ cmd->idx = cpu_to_le16(i);
+ cmd->mask = cpu_to_le32(mask);
+ }
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, cmd);
+}
+
+/* set/unset notifications filter:
+ *
+ * onoff sets(1)/unset(0) notifications
+ * mask each bit defines a kind of notification to set/unset
+ */
+static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
+ bool onoff, u16 ext_mask, u16 usb_mask)
+{
+ struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
+ PUCAN_CMD_RX_FRAME_DISABLE);
+
+ cmd->ext_mask = cpu_to_le16(ext_mask);
+ cmd->usb_mask = cpu_to_le16(usb_mask);
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* setup LED control */
+static int pcan_usb_fd_set_can_led(struct peak_usb_device *dev, u8 led_mode)
+{
+ struct pcan_ufd_led *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ PCAN_UFD_CMD_LED_SET);
+ cmd->mode = led_mode;
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* set CAN clock domain */
+static int pcan_usb_fd_set_clock_domain(struct peak_usb_device *dev,
+ u8 clk_mode)
+{
+ struct pcan_ufd_clock *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ PCAN_UFD_CMD_CLK_SET);
+ cmd->mode = clk_mode;
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* set bittiming for CAN and CAN-FD header */
+static int pcan_usb_fd_set_bittiming_slow(struct peak_usb_device *dev,
+ struct can_bittiming *bt)
+{
+ struct pucan_timing_slow *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ PUCAN_CMD_TIMING_SLOW);
+ cmd->sjw_t = PUCAN_TSLOW_SJW_T(bt->sjw - 1,
+ dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES);
+
+ cmd->tseg2 = PUCAN_TSLOW_TSEG2(bt->phase_seg2 - 1);
+ cmd->tseg1 = PUCAN_TSLOW_TSEG1(bt->prop_seg + bt->phase_seg1 - 1);
+ cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(bt->brp - 1));
+
+ cmd->ewl = 96; /* default */
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* set CAN-FD bittiming for data */
+static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
+ struct can_bittiming *bt)
+{
+ struct pucan_timing_fast *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ PUCAN_CMD_TIMING_FAST);
+ cmd->sjw = PUCAN_TFAST_SJW(bt->sjw - 1);
+ cmd->tseg2 = PUCAN_TFAST_TSEG2(bt->phase_seg2 - 1);
+ cmd->tseg1 = PUCAN_TFAST_TSEG1(bt->prop_seg + bt->phase_seg1 - 1);
+ cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(bt->brp - 1));
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* handle restart but in asynchronously way
+ * (uses PCAN-USB Pro code to complete asynchronous request)
+ */
+static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
+ struct urb *urb, u8 *buf)
+{
+ u8 *pc = buf;
+
+ /* build the entire cmds list in the provided buffer, to go back into
+ * operational mode.
+ */
+ pc += pcan_usb_fd_build_restart_cmd(dev, pc);
+
+ /* add EOC */
+ memset(pc, 0xff, sizeof(struct pucan_command));
+ pc += sizeof(struct pucan_command);
+
+ /* complete the URB */
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
+ buf, pc - buf,
+ pcan_usb_pro_restart_complete, dev);
+
+ /* and submit it. */
+ return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static int pcan_usb_fd_drv_loaded(struct peak_usb_device *dev, bool loaded)
+{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+
+ pdev->cmd_buffer_addr[0] = 0;
+ pdev->cmd_buffer_addr[1] = !!loaded;
+
+ return pcan_usb_pro_send_req(dev,
+ PCAN_USBPRO_REQ_FCT,
+ PCAN_USBPRO_FCT_DRVLD,
+ pdev->cmd_buffer_addr,
+ PCAN_USBPRO_FCT_DRVLD_REQ_LEN);
+}
+
+static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
+ struct pucan_msg *rx_msg)
+{
+ struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
+ struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
+ struct net_device *netdev = dev->netdev;
+ struct canfd_frame *cfd;
+ struct sk_buff *skb;
+ const u16 rx_msg_flags = le16_to_cpu(rm->flags);
+
+ if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
+ /* CANFD frame case */
+ skb = alloc_canfd_skb(netdev, &cfd);
+ if (!skb)
+ return -ENOMEM;
+
+ if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
+ cfd->flags |= CANFD_BRS;
+
+ if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
+ cfd->flags |= CANFD_ESI;
+
+ cfd->len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(rm)));
+ } else {
+ /* CAN 2.0 frame case */
+ skb = alloc_can_skb(netdev, (struct can_frame **)&cfd);
+ if (!skb)
+ return -ENOMEM;
+
+ cfd->len = get_can_dlc(pucan_msg_get_dlc(rm));
+ }
+
+ cfd->can_id = le32_to_cpu(rm->can_id);
+
+ if (rx_msg_flags & PUCAN_MSG_EXT_ID)
+ cfd->can_id |= CAN_EFF_FLAG;
+
+ if (rx_msg_flags & PUCAN_MSG_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cfd->data, rm->d, cfd->len);
+
+ peak_usb_netif_rx(skb, &usb_if->time_ref,
+ le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high));
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += cfd->len;
+
+ return 0;
+}
+
+/* handle uCAN status message */
+static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
+ struct pucan_msg *rx_msg)
+{
+ struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
+ struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+ enum can_state rx_state, tx_state;
+ struct net_device *netdev = dev->netdev;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* nothing should be sent while in BUS_OFF state */
+ if (dev->can.state == CAN_STATE_BUS_OFF)
+ return 0;
+
+ if (sm->channel_p_w_b & PUCAN_BUS_BUSOFF) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (sm->channel_p_w_b & PUCAN_BUS_PASSIVE) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ /* no error bit (so, no error skb, back to active state) */
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ pdev->bec.txerr = 0;
+ pdev->bec.rxerr = 0;
+ return 0;
+ }
+
+ /* state hasn't changed */
+ if (new_state == dev->can.state)
+ return 0;
+
+ /* handle bus state change */
+ tx_state = (pdev->bec.txerr >= pdev->bec.rxerr) ? new_state : 0;
+ rx_state = (pdev->bec.txerr <= pdev->bec.rxerr) ? new_state : 0;
+
+ /* allocate an skb to store the error frame */
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (skb)
+ can_change_state(netdev, cf, tx_state, rx_state);
+
+ /* things must be done even in case of OOM */
+ if (new_state == CAN_STATE_BUS_OFF)
+ can_bus_off(netdev);
+
+ if (!skb)
+ return -ENOMEM;
+
+ peak_usb_netif_rx(skb, &usb_if->time_ref,
+ le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high));
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += cf->can_dlc;
+
+ return 0;
+}
+
+/* handle uCAN error message */
+static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
+ struct pucan_msg *rx_msg)
+{
+ struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
+ struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+
+ /* keep a trace of tx and rx error counters for later use */
+ pdev->bec.txerr = er->tx_err_cnt;
+ pdev->bec.rxerr = er->rx_err_cnt;
+
+ return 0;
+}
+
+/* handle uCAN overrun message */
+static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
+ struct pucan_msg *rx_msg)
+{
+ struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
+ struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+ struct net_device *netdev = dev->netdev;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* allocate an skb to store the error frame */
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+
+ peak_usb_netif_rx(skb, &usb_if->time_ref,
+ le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high));
+
+ netdev->stats.rx_over_errors++;
+ netdev->stats.rx_errors++;
+
+ return 0;
+}
+
+/* handle USB calibration message */
+static void pcan_usb_fd_decode_ts(struct pcan_usb_fd_if *usb_if,
+ struct pucan_msg *rx_msg)
+{
+ struct pcan_ufd_ts_msg *ts = (struct pcan_ufd_ts_msg *)rx_msg;
+
+ /* should wait until clock is stabilized */
+ if (usb_if->cm_ignore_count > 0)
+ usb_if->cm_ignore_count--;
+ else
+ peak_usb_set_ts_now(&usb_if->time_ref, le32_to_cpu(ts->ts_low));
+}
+
+/* callback for bulk IN urb */
+static int pcan_usb_fd_decode_buf(struct peak_usb_device *dev, struct urb *urb)
+{
+ struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev);
+ struct net_device *netdev = dev->netdev;
+ struct pucan_msg *rx_msg;
+ u8 *msg_ptr, *msg_end;
+ int err = 0;
+
+ /* loop reading all the records from the incoming message */
+ msg_ptr = urb->transfer_buffer;
+ msg_end = urb->transfer_buffer + urb->actual_length;
+ for (; msg_ptr < msg_end;) {
+ u16 rx_msg_type, rx_msg_size;
+
+ rx_msg = (struct pucan_msg *)msg_ptr;
+ if (!rx_msg->size) {
+ /* null packet found: end of list */
+ break;
+ }
+
+ rx_msg_size = le16_to_cpu(rx_msg->size);
+ rx_msg_type = le16_to_cpu(rx_msg->type);
+
+ /* check if the record goes out of current packet */
+ if (msg_ptr + rx_msg_size > msg_end) {
+ netdev_err(netdev,
+ "got frag rec: should inc usb rx buf sze\n");
+ err = -EBADMSG;
+ break;
+ }
+
+ switch (rx_msg_type) {
+ case PUCAN_MSG_CAN_RX:
+ err = pcan_usb_fd_decode_canmsg(usb_if, rx_msg);
+ if (err < 0)
+ goto fail;
+ break;
+
+ case PCAN_UFD_MSG_CALIBRATION:
+ pcan_usb_fd_decode_ts(usb_if, rx_msg);
+ break;
+
+ case PUCAN_MSG_ERROR:
+ err = pcan_usb_fd_decode_error(usb_if, rx_msg);
+ if (err < 0)
+ goto fail;
+ break;
+
+ case PUCAN_MSG_STATUS:
+ err = pcan_usb_fd_decode_status(usb_if, rx_msg);
+ if (err < 0)
+ goto fail;
+ break;
+
+ case PCAN_UFD_MSG_OVERRUN:
+ err = pcan_usb_fd_decode_overrun(usb_if, rx_msg);
+ if (err < 0)
+ goto fail;
+ break;
+
+ default:
+ netdev_err(netdev,
+ "unhandled msg type 0x%02x (%d): ignored\n",
+ rx_msg_type, rx_msg_type);
+ break;
+ }
+
+ msg_ptr += rx_msg_size;
+ }
+
+fail:
+ if (err)
+ pcan_dump_mem("received msg",
+ urb->transfer_buffer, urb->actual_length);
+ return err;
+}
+
+/* CAN/CANFD frames encoding callback */
+static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
+ struct sk_buff *skb, u8 *obuf, size_t *size)
+{
+ struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ u16 tx_msg_size, tx_msg_flags;
+ u8 can_dlc;
+
+ tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
+ tx_msg->size = cpu_to_le16(tx_msg_size);
+ tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
+
+ tx_msg_flags = 0;
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ tx_msg_flags |= PUCAN_MSG_EXT_ID;
+ tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_EFF_MASK);
+ } else {
+ tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_SFF_MASK);
+ }
+
+ if (can_is_canfd_skb(skb)) {
+ /* considering a CANFD frame */
+ can_dlc = can_len2dlc(cfd->len);
+
+ tx_msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
+
+ if (cfd->flags & CANFD_BRS)
+ tx_msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
+
+ if (cfd->flags & CANFD_ESI)
+ tx_msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
+ } else {
+ /* CAND 2.0 frames */
+ can_dlc = cfd->len;
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ tx_msg_flags |= PUCAN_MSG_RTR;
+ }
+
+ tx_msg->flags = cpu_to_le16(tx_msg_flags);
+ tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, can_dlc);
+ memcpy(tx_msg->d, cfd->data, cfd->len);
+
+ /* add null size message to tag the end (messages are 32-bits aligned)
+ */
+ tx_msg = (struct pucan_tx_msg *)(obuf + tx_msg_size);
+
+ tx_msg->size = 0;
+
+ /* set the whole size of the USB packet to send */
+ *size = tx_msg_size + sizeof(u32);
+
+ return 0;
+}
+
+/* start the interface (last chance before set bus on) */
+static int pcan_usb_fd_start(struct peak_usb_device *dev)
+{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ int err;
+
+ /* set filter mode: all acceptance */
+ err = pcan_usb_fd_set_filter_std(dev, -1, 0xffffffff);
+ if (err)
+ return err;
+
+ /* opening first device: */
+ if (pdev->usb_if->dev_opened_count == 0) {
+ /* reset time_ref */
+ peak_usb_init_time_ref(&pdev->usb_if->time_ref,
+ &pcan_usb_pro_fd);
+
+ /* enable USB calibration messages */
+ err = pcan_usb_fd_set_filter_ext(dev, 1,
+ PUCAN_FLTEXT_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
+ }
+
+ pdev->usb_if->dev_opened_count++;
+
+ /* reset cached error counters */
+ pdev->bec.txerr = 0;
+ pdev->bec.rxerr = 0;
+
+ return err;
+}
+
+/* socket callback used to copy berr counters values receieved through USB */
+static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+
+ *bec = pdev->bec;
+
+ /* must return 0 */
+ return 0;
+}
+
+/* stop interface (last chance before set bus off) */
+static int pcan_usb_fd_stop(struct peak_usb_device *dev)
+{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+
+ /* turn off special msgs for that interface if no other dev opened */
+ if (pdev->usb_if->dev_opened_count == 1)
+ pcan_usb_fd_set_filter_ext(dev, 0,
+ PUCAN_FLTEXT_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
+ pdev->usb_if->dev_opened_count--;
+
+ return 0;
+}
+
+/* called when probing, to initialize a device object */
+static int pcan_usb_fd_init(struct peak_usb_device *dev)
+{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ int i, err = -ENOMEM;
+
+ /* do this for 1st channel only */
+ if (!dev->prev_siblings) {
+ /* allocate netdevices common structure attached to first one */
+ pdev->usb_if = kzalloc(sizeof(*pdev->usb_if), GFP_KERNEL);
+ if (!pdev->usb_if)
+ goto err_out;
+
+ /* allocate command buffer once for all for the interface */
+ pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!pdev->cmd_buffer_addr)
+ goto err_out_1;
+
+ /* number of ts msgs to ignore before taking one into account */
+ pdev->usb_if->cm_ignore_count = 5;
+
+ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+ PCAN_USBPRO_INFO_FW,
+ &pdev->usb_if->fw_info,
+ sizeof(pdev->usb_if->fw_info));
+ if (err) {
+ dev_err(dev->netdev->dev.parent,
+ "unable to read %s firmware info (err %d)\n",
+ dev->adapter->name, err);
+ goto err_out_2;
+ }
+
+ /* explicit use of dev_xxx() instead of netdev_xxx() here:
+ * information displayed are related to the device itself, not
+ * to the canx (channel) device.
+ */
+ dev_info(dev->netdev->dev.parent,
+ "PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n",
+ dev->adapter->name, pdev->usb_if->fw_info.hw_version,
+ pdev->usb_if->fw_info.fw_version[0],
+ pdev->usb_if->fw_info.fw_version[1],
+ pdev->usb_if->fw_info.fw_version[2],
+ dev->adapter->ctrl_count);
+
+ /* the currently supported hw is non-ISO */
+ dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+
+ /* tell the hardware the can driver is running */
+ err = pcan_usb_fd_drv_loaded(dev, 1);
+ if (err) {
+ dev_err(dev->netdev->dev.parent,
+ "unable to tell %s driver is loaded (err %d)\n",
+ dev->adapter->name, err);
+ goto err_out_2;
+ }
+ } else {
+ /* otherwise, simply copy previous sibling's values */
+ struct pcan_usb_fd_device *ppdev =
+ container_of(dev->prev_siblings,
+ struct pcan_usb_fd_device, dev);
+
+ pdev->usb_if = ppdev->usb_if;
+ pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
+ }
+
+ pdev->usb_if->dev[dev->ctrl_idx] = dev;
+ dev->device_number =
+ le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
+
+ /* set clock domain */
+ for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++)
+ if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i])
+ break;
+
+ if (i >= ARRAY_SIZE(pcan_usb_fd_clk_freq)) {
+ dev_warn(dev->netdev->dev.parent,
+ "incompatible clock frequencies\n");
+ err = -EINVAL;
+ goto err_out_2;
+ }
+
+ pcan_usb_fd_set_clock_domain(dev, i);
+
+ /* set LED in default state (end of init phase) */
+ pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF);
+
+ return 0;
+
+err_out_2:
+ kfree(pdev->cmd_buffer_addr);
+err_out_1:
+ kfree(pdev->usb_if);
+err_out:
+ return err;
+}
+
+/* called when driver module is being unloaded */
+static void pcan_usb_fd_exit(struct peak_usb_device *dev)
+{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+
+ /* when rmmod called before unplug and if down, should reset things
+ * before leaving
+ */
+ if (dev->can.state != CAN_STATE_STOPPED) {
+ /* set bus off on the corresponding channel */
+ pcan_usb_fd_set_bus(dev, 0);
+ }
+
+ /* switch off corresponding CAN LEDs */
+ pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_OFF);
+
+ /* if channel #0 (only) */
+ if (dev->ctrl_idx == 0) {
+ /* turn off calibration message if any device were opened */
+ if (pdev->usb_if->dev_opened_count > 0)
+ pcan_usb_fd_set_filter_ext(dev, 0,
+ PUCAN_FLTEXT_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
+
+ /* tell USB adapter that the driver is being unloaded */
+ pcan_usb_fd_drv_loaded(dev, 0);
+ }
+}
+
+/* called when the USB adapter is unplugged */
+static void pcan_usb_fd_free(struct peak_usb_device *dev)
+{
+ /* last device: can free shared objects now */
+ if (!dev->prev_siblings && !dev->next_siblings) {
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+
+ /* free commands buffer */
+ kfree(pdev->cmd_buffer_addr);
+
+ /* free usb interface object */
+ kfree(pdev->usb_if);
+ }
+}
+
+/* describes the PCAN-USB FD adapter */
+const struct peak_usb_adapter pcan_usb_fd = {
+ .name = "PCAN-USB FD",
+ .device_id = PCAN_USBFD_PRODUCT_ID,
+ .ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
+ .ctrlmode_supported = CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ .clock = {
+ .freq = PCAN_UFD_CRYSTAL_HZ,
+ },
+ .bittiming_const = {
+ .name = "pcan_usb_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+ },
+ .data_bittiming_const = {
+ .name = "pcan_usb_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+ },
+
+ /* size of device private data */
+ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+
+ /* timestamps usage */
+ .ts_used_bits = 32,
+ .ts_period = 1000000, /* calibration period in ts. */
+ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+ .us_per_ts_shift = 0,
+
+ /* give here messages in/out endpoints */
+ .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+ .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0},
+
+ /* size of rx/tx usb buffers */
+ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
+ .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
+
+ /* device callbacks */
+ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
+ .dev_init = pcan_usb_fd_init,
+
+ .dev_exit = pcan_usb_fd_exit,
+ .dev_free = pcan_usb_fd_free,
+ .dev_set_bus = pcan_usb_fd_set_bus,
+ .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
+ .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_decode_buf = pcan_usb_fd_decode_buf,
+ .dev_start = pcan_usb_fd_start,
+ .dev_stop = pcan_usb_fd_stop,
+ .dev_restart_async = pcan_usb_fd_restart_async,
+ .dev_encode_msg = pcan_usb_fd_encode_msg,
+
+ .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
+};
+
+/* describes the PCAN-USB Pro FD adapter */
+const struct peak_usb_adapter pcan_usb_pro_fd = {
+ .name = "PCAN-USB Pro FD",
+ .device_id = PCAN_USBPROFD_PRODUCT_ID,
+ .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
+ .ctrlmode_supported = CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ .clock = {
+ .freq = PCAN_UFD_CRYSTAL_HZ,
+ },
+ .bittiming_const = {
+ .name = "pcan_usb_pro_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+ },
+ .data_bittiming_const = {
+ .name = "pcan_usb_pro_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+ },
+
+ /* size of device private data */
+ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+
+ /* timestamps usage */
+ .ts_used_bits = 32,
+ .ts_period = 1000000, /* calibration period in ts. */
+ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+ .us_per_ts_shift = 0,
+
+ /* give here messages in/out endpoints */
+ .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+ .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1},
+
+ /* size of rx/tx usb buffers */
+ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
+ .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
+
+ /* device callbacks */
+ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
+ .dev_init = pcan_usb_fd_init,
+
+ .dev_exit = pcan_usb_fd_exit,
+ .dev_free = pcan_usb_fd_free,
+ .dev_set_bus = pcan_usb_fd_set_bus,
+ .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
+ .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_decode_buf = pcan_usb_fd_decode_buf,
+ .dev_start = pcan_usb_fd_start,
+ .dev_stop = pcan_usb_fd_stop,
+ .dev_restart_async = pcan_usb_fd_restart_async,
+ .dev_encode_msg = pcan_usb_fd_encode_msg,
+
+ .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
+};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 4cfa3b8605b1..dec51717635e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -27,14 +27,6 @@
MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter");
-/* PCAN-USB Pro Endpoints */
-#define PCAN_USBPRO_EP_CMDOUT 1
-#define PCAN_USBPRO_EP_CMDIN (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN)
-#define PCAN_USBPRO_EP_MSGOUT_0 2
-#define PCAN_USBPRO_EP_MSGIN (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN)
-#define PCAN_USBPRO_EP_MSGOUT_1 3
-#define PCAN_USBPRO_EP_UNUSED (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN)
-
#define PCAN_USBPRO_CHANNEL_COUNT 2
/* PCAN-USB Pro adapter internal clock (MHz) */
@@ -322,8 +314,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
return (i >= PCAN_USBPRO_RSP_SUBMIT_MAX) ? -ERANGE : err;
}
-static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
- int req_value, void *req_addr, int req_size)
+int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
+ int req_value, void *req_addr, int req_size)
{
int err;
u8 req_type;
@@ -475,7 +467,7 @@ static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
return pcan_usb_pro_set_bitrate(dev, ccbt);
}
-static void pcan_usb_pro_restart_complete(struct urb *urb)
+void pcan_usb_pro_restart_complete(struct urb *urb)
{
/* can delete usb resources */
peak_usb_async_complete(urb);
@@ -634,6 +626,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
switch (new_state) {
case CAN_STATE_BUS_OFF:
can_frame->can_id |= CAN_ERR_BUSOFF;
+ dev->can.can_stats.bus_off++;
can_bus_off(netdev);
break;
@@ -977,7 +970,7 @@ static void pcan_usb_pro_free(struct peak_usb_device *dev)
/*
* probe function for new PCAN-USB Pro usb interface
*/
-static int pcan_usb_pro_probe(struct usb_interface *intf)
+int pcan_usb_pro_probe(struct usb_interface *intf)
{
struct usb_host_interface *if_desc;
int i;
@@ -1011,10 +1004,11 @@ static int pcan_usb_pro_probe(struct usb_interface *intf)
/*
* describe the PCAN-USB Pro adapter
*/
-struct peak_usb_adapter pcan_usb_pro = {
+const struct peak_usb_adapter pcan_usb_pro = {
.name = "PCAN-USB Pro",
.device_id = PCAN_USBPRO_PRODUCT_ID,
.ctrl_count = PCAN_USBPRO_CHANNEL_COUNT,
+ .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
.clock = {
.freq = PCAN_USBPRO_CRYSTAL_HZ,
},
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 837cee267132..a62f7ab8980f 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -27,6 +27,14 @@
#define PCAN_USBPRO_INFO_BL 0
#define PCAN_USBPRO_INFO_FW 1
+/* PCAN-USB Pro (FD) Endpoints */
+#define PCAN_USBPRO_EP_CMDOUT 1
+#define PCAN_USBPRO_EP_CMDIN (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN)
+#define PCAN_USBPRO_EP_MSGOUT_0 2
+#define PCAN_USBPRO_EP_MSGIN (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN)
+#define PCAN_USBPRO_EP_MSGOUT_1 3
+#define PCAN_USBPRO_EP_UNUSED (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN)
+
/* Vendor Request value for XXX_FCT */
#define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */
#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN 16
@@ -176,4 +184,9 @@ union pcan_usb_pro_rec {
struct pcan_usb_pro_txmsg tx_msg;
};
+int pcan_usb_pro_probe(struct usb_interface *intf);
+int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
+ int req_value, void *req_addr, int req_size);
+void pcan_usb_pro_restart_complete(struct urb *urb);
+
#endif
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index ef674ecb82f8..dd52c7a4c80d 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -377,6 +377,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
case USB_8DEV_STATUSMSG_BUSOFF:
priv->can.state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
+ priv->can.can_stats.bus_off++;
can_bus_off(priv->netdev);
break;
case USB_8DEV_STATUSMSG_OVERRUN:
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index feb29c4526f7..4daffb284931 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -233,6 +233,35 @@ static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
core_writel(priv, reg, CORE_EEE_EN_CTRL);
}
+static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ u32 reg;
+
+ reg = reg_readl(priv, REG_SPHY_CNTRL);
+ if (enable) {
+ reg |= PHY_RESET;
+ reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
+ reg_writel(priv, reg, REG_SPHY_CNTRL);
+ udelay(21);
+ reg = reg_readl(priv, REG_SPHY_CNTRL);
+ reg &= ~PHY_RESET;
+ } else {
+ reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
+ reg_writel(priv, reg, REG_SPHY_CNTRL);
+ mdelay(1);
+ reg |= CK25_DIS;
+ }
+ reg_writel(priv, reg, REG_SPHY_CNTRL);
+
+ /* Use PHY-driven LED signaling */
+ if (!enable) {
+ reg = reg_readl(priv, REG_LED_CNTRL(0));
+ reg |= SPDLNK_SRC_SEL;
+ reg_writel(priv, reg, REG_LED_CNTRL(0));
+ }
+}
+
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
@@ -248,6 +277,24 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
/* Clear the Rx and Tx disable bits and set to no spanning tree */
core_writel(priv, 0, CORE_G_PCTL_PORT(port));
+ /* Re-enable the GPHY and re-apply workarounds */
+ if (port == 0 && priv->hw_params.num_gphy == 1) {
+ bcm_sf2_gphy_enable_set(ds, true);
+ if (phy) {
+ /* if phy_stop() has been called before, phy
+ * will be in halted state, and phy_start()
+ * will call resume.
+ *
+ * the resume path does not configure back
+ * autoneg settings, and since we hard reset
+ * the phy manually here, we need to reset the
+ * state machine also.
+ */
+ phy->state = PHY_READY;
+ phy_init_hw(phy);
+ }
+ }
+
/* Enable port 7 interrupts to get notified */
if (port == 7)
intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
@@ -281,6 +328,9 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
}
+ if (port == 0 && priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(ds, false);
+
if (dsa_is_cpu_port(ds, port))
off = CORE_IMP_CTL;
else
@@ -400,6 +450,16 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
return 0;
}
+static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
+{
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+}
+
static int bcm_sf2_sw_setup(struct dsa_switch *ds)
{
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -440,12 +500,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
}
/* Disable all interrupts and request them */
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ bcm_sf2_intr_disable(priv);
ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
"switch_0", priv);
@@ -747,12 +802,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
struct bcm_sf2_priv *priv = ds_to_priv(ds);
unsigned int port;
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ bcm_sf2_intr_disable(priv);
/* Disable all ports physically present including the IMP
* port, the other ones have already been disabled during
@@ -771,7 +821,6 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
unsigned int port;
- u32 reg;
int ret;
ret = bcm_sf2_sw_rst(priv);
@@ -780,17 +829,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
return ret;
}
- /* Reinitialize the single GPHY */
- if (priv->hw_params.num_gphy == 1) {
- reg = reg_readl(priv, REG_SPHY_CNTRL);
- reg |= PHY_RESET;
- reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS);
- reg_writel(priv, reg, REG_SPHY_CNTRL);
- udelay(21);
- reg = reg_readl(priv, REG_SPHY_CNTRL);
- reg &= ~PHY_RESET;
- reg_writel(priv, reg, REG_SPHY_CNTRL);
- }
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(ds, true);
for (port = 0; port < DSA_MAX_PORTS; port++) {
if ((1 << port) & ds->phys_port_mask)
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 1bb49cb699ab..cabdfa5e217a 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -61,6 +61,10 @@
#define LPI_COUNT_SHIFT 9
#define LPI_COUNT_MASK 0x3F
+#define REG_LED_CNTRL_BASE 0x90
+#define REG_LED_CNTRL(x) (REG_LED_CNTRL_BASE + (x) * 4)
+#define SPDLNK_SRC_SEL (1 << 24)
+
/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
#define INTRL2_CPU_STATUS 0x00
#define INTRL2_CPU_SET 0x04
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 1230f52aa70e..2540ef0142af 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -139,7 +139,8 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
int nexthop;
nexthop = 0x1f;
- if (i != ds->index && i < ds->dst->pd->nr_chips)
+ if (ds->pd->rtable &&
+ i != ds->index && i < ds->dst->pd->nr_chips)
nexthop = ds->pd->rtable[i] & 0x1f;
REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 258d9ef5ef25..e13adc7b3dda 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -22,17 +22,14 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
-static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask)
+static int mv88e6352_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
{
unsigned long timeout = jiffies + HZ / 10;
while (time_before(jiffies, timeout)) {
int ret;
- ret = REG_READ(REG_GLOBAL2, reg);
- if (ret < 0)
- return ret;
-
+ ret = REG_READ(reg, offset);
if (!(ret & mask))
return 0;
@@ -43,17 +40,17 @@ static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask)
static inline int mv88e6352_phy_wait(struct dsa_switch *ds)
{
- return mv88e6352_wait(ds, 0x18, 0x8000);
+ return mv88e6352_wait(ds, REG_GLOBAL2, 0x18, 0x8000);
}
static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds)
{
- return mv88e6352_wait(ds, 0x14, 0x0800);
+ return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x0800);
}
static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds)
{
- return mv88e6352_wait(ds, 0x14, 0x8000);
+ return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x8000);
}
static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum)
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cd6807c6b4ed..3e7e31a6abb7 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -85,6 +85,12 @@ int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
mutex_unlock(&ps->smi_mutex);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ addr, reg, ret);
+
return ret;
}
@@ -128,6 +134,9 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
if (bus == NULL)
return -EINVAL;
+ dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ addr, reg, val);
+
mutex_lock(&ps->smi_mutex);
ret = __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
mutex_unlock(&ps->smi_mutex);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index dede43f4ce09..8f8418d2ac4a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -769,11 +769,11 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
}
- if(vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
first_txd->processFlags |=
TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
first_txd->processFlags |=
- cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
+ cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b68074803de3..b90a26b13fdf 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2429,9 +2429,9 @@ restart:
flagsize = (skb->len << 16) | (BD_FLG_END);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
- vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = skb_vlan_tag_get(skb);
}
desc = ap->tx_ring + idx;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2450,9 +2450,9 @@ restart:
flagsize = (skb_headlen(skb) << 16);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
- vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = skb_vlan_tag_get(skb);
}
ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 7a5e4aa5415e..c638c85f3954 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -45,7 +45,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !ARM
---help---
If you have a network (Ethernet) card of this type, say Y and read
the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !ARM
---help---
If you have a network (Ethernet) card of this type, say Y and read
the Ethernet-HOWTO, available from
@@ -179,7 +179,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on OF_NET && HAS_IOMEM
+ depends on (OF_NET || ACPI) && HAS_IOMEM
select PHYLIB
select AMD_XGBE_PHY
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 841e6558db68..4c2ae2221780 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1299,11 +1299,11 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
lp->tx_ring[tx_index].tx_flags = 0;
#if AMD8111E_VLAN_TAG_USED
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
lp->tx_ring[tx_index].tag_ctrl_cmd |=
cpu_to_le16(TCC_VLAN_INSERT);
lp->tx_ring[tx_index].tag_ctrl_info =
- cpu_to_le16(vlan_tx_tag_get(skb));
+ cpu_to_le16(skb_vlan_tag_get(skb));
}
#endif
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index e07ce5ff2d48..b10964e8cb54 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -553,8 +553,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
if (lp->cardtype == PAM_CARD ||
memaddr == (unsigned short *)0xffe00000) {
/* PAMs card and Riebl on ST use level 5 autovector */
- if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
- "PAM,Riebl-ST Ethernet", dev)) {
+ if (request_irq(IRQ_AUTO_5, lance_interrupt, 0,
+ "PAM,Riebl-ST Ethernet", dev)) {
printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
return 0;
}
@@ -567,8 +567,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
printk( "Lance: request for VME interrupt failed\n" );
return 0;
}
- if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
- "Riebl-VME Ethernet", dev)) {
+ if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet",
+ dev)) {
printk( "Lance: request for irq %u failed\n", irq );
return 0;
}
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 5b22764ba88d..27245efe9f50 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
do {
/* WARNING: MACE_IR is a READ/CLEAR port! */
status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+ if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
+ return IRQ_NONE;
pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e2e3aaf501a2..11d6e6561df1 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2806,7 +2806,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
/*
* Check for loss of link and link establishment.
- * Can not use mii_check_media because it does nothing if mode is forced.
+ * Could possibly be changed to use mii_check_media instead.
*/
static void pcnet32_watchdog(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 75b08c63d39f..29a09271b64a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -767,16 +767,17 @@
#define MTL_Q_RQOMR 0x40
#define MTL_Q_RQMPOCR 0x44
#define MTL_Q_RQDR 0x4c
+#define MTL_Q_RQFCR 0x50
#define MTL_Q_IER 0x70
#define MTL_Q_ISR 0x74
/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
#define MTL_Q_RQOMR_EHFC_INDEX 7
#define MTL_Q_RQOMR_EHFC_WIDTH 1
-#define MTL_Q_RQOMR_RFA_INDEX 8
-#define MTL_Q_RQOMR_RFA_WIDTH 3
-#define MTL_Q_RQOMR_RFD_INDEX 13
-#define MTL_Q_RQOMR_RFD_WIDTH 3
#define MTL_Q_RQOMR_RQS_INDEX 16
#define MTL_Q_RQOMR_RQS_WIDTH 9
#define MTL_Q_RQOMR_RSF_INDEX 5
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 76479d04b903..2c063b60db4b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -328,7 +328,7 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
- if (pdata->xgbe_debugfs == NULL) {
+ if (!pdata->xgbe_debugfs) {
netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
return;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index a50891f52197..d81fc6bd4759 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -422,7 +422,6 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
ring->cur = 0;
ring->dirty = 0;
- memset(&ring->rx, 0, sizeof(ring->rx));
hw_if->rx_desc_init(channel);
}
@@ -621,35 +620,6 @@ err_out:
return 0;
}
-static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
-{
- struct xgbe_prv_data *pdata = channel->pdata;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_ring *ring = channel->rx_ring;
- struct xgbe_ring_data *rdata;
- int i;
-
- DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
- ring->rx.realloc_index);
-
- for (i = 0; i < ring->dirty; i++) {
- rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
-
- /* Reset rdata values */
- xgbe_unmap_rdata(pdata, rdata);
-
- if (xgbe_map_rx_buffer(pdata, ring, rdata))
- break;
-
- hw_if->rx_desc_reset(rdata);
-
- ring->rx.realloc_index++;
- }
- ring->dirty = 0;
-
- DBGPR("<--xgbe_realloc_rx_buffer\n");
-}
-
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
{
DBGPR("-->xgbe_init_function_ptrs_desc\n");
@@ -657,7 +627,7 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
desc_if->free_ring_resources = xgbe_free_ring_resources;
desc_if->map_tx_skb = xgbe_map_tx_skb;
- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
+ desc_if->map_rx_buffer = xgbe_map_rx_buffer;
desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 53f5f66ec2ee..400757b49872 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -115,6 +115,7 @@
*/
#include <linux/phy.h>
+#include <linux/mdio.h>
#include <linux/clk.h>
#include <linux/bitrev.h>
#include <linux/crc32.h>
@@ -130,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
DBGPR("-->xgbe_usec_to_riwt\n");
- rate = clk_get_rate(pdata->sysclk);
+ rate = pdata->sysclk_rate;
/*
* Convert the input usec value to the watchdog timer value. Each
@@ -153,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
DBGPR("-->xgbe_riwt_to_usec\n");
- rate = clk_get_rate(pdata->sysclk);
+ rate = pdata->sysclk_rate;
/*
* Convert the input watchdog timer value to the usec value. Each
@@ -673,6 +674,9 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
+ return 0;
+
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
return 0;
@@ -680,6 +684,9 @@ static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
+ return 0;
+
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
return 0;
@@ -687,6 +694,9 @@ static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
+ return 0;
+
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
return 0;
@@ -881,6 +891,23 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ /* If the PCS is changing modes, match the MAC speed to it */
+ if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
+ ((mmd_address & 0xffff) == MDIO_CTRL2)) {
+ struct phy_device *phydev = pdata->phydev;
+
+ if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
+ /* KX mode */
+ if (phydev->supported & SUPPORTED_1000baseKX_Full)
+ xgbe_set_gmii_speed(pdata);
+ else
+ xgbe_set_gmii_2500_speed(pdata);
+ } else {
+ /* KR mode */
+ xgbe_set_xgmii_speed(pdata);
+ }
+ }
+
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1359,6 +1386,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
unsigned int tso_context, vlan_context;
unsigned int tx_set_ic;
int start_index = ring->cur;
+ int cur_index = ring->cur;
int i;
DBGPR("-->xgbe_dev_xmit\n");
@@ -1401,7 +1429,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
else
tx_set_ic = 0;
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
rdesc = rdata->rdesc;
/* Create a context descriptor if this is a TSO packet */
@@ -1444,8 +1472,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
ring->tx.cur_vlan_ctag = packet->vlan_ctag;
}
- ring->cur++;
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
rdesc = rdata->rdesc;
}
@@ -1473,7 +1501,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
/* Set OWN bit if not the first descriptor */
- if (ring->cur != start_index)
+ if (cur_index != start_index)
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
if (tso) {
@@ -1497,9 +1525,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
packet->length);
}
- for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
- ring->cur++;
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
rdesc = rdata->rdesc;
/* Update buffer address */
@@ -1551,7 +1579,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
/* Make sure ownership is written to the descriptor */
wmb();
- ring->cur++;
+ ring->cur = cur_index + 1;
if (!packet->skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
@@ -2079,10 +2107,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++) {
/* Activate flow control when less than 4k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
/* De-activate flow control when more than 6k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
}
}
@@ -2107,6 +2135,23 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
}
+static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->phy_speed) {
+ case SPEED_10000:
+ xgbe_set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ xgbe_set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ xgbe_set_gmii_speed(pdata);
+ break;
+ }
+}
+
static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
{
if (pdata->netdev->features & NETIF_F_RXCSUM)
@@ -2757,6 +2802,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_mac_address(pdata);
xgbe_config_jumbo_enable(pdata);
xgbe_config_flow_control(pdata);
+ xgbe_config_mac_speed(pdata);
xgbe_config_checksum_offload(pdata);
xgbe_config_vlan_support(pdata);
xgbe_config_mmc(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7bb5f07dbeef..b93d4404d975 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -225,6 +225,11 @@ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
return (ring->rdesc_count - (ring->cur - ring->dirty));
}
+static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
+{
+ return (ring->cur - ring->dirty);
+}
+
static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
struct xgbe_ring *ring, unsigned int count)
{
@@ -337,12 +342,13 @@ static irqreturn_t xgbe_isr(int irq, void *data)
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
- /* If we get a TI or RI interrupt that means per channel DMA
- * interrupts are not enabled, so we use the private data napi
- * structure, not the per channel napi structure
+ /* The TI or RI interrupt bits may still be set even if using
+ * per channel DMA interrupts. Check to be sure those are not
+ * enabled before using the private data napi structure.
*/
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
- XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
+ if (!pdata->per_channel_irq &&
+ (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
if (napi_schedule_prep(&pdata->napi)) {
/* Disable Tx and Rx interrupts */
xgbe_disable_rx_tx_ints(pdata);
@@ -410,17 +416,13 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
struct xgbe_channel *channel = container_of(timer,
struct xgbe_channel,
tx_timer);
- struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
- unsigned long flags;
DBGPR("-->xgbe_tx_timer\n");
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
- spin_lock_irqsave(&ring->lock, flags);
-
if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */
if (pdata->per_channel_irq)
@@ -434,8 +436,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
channel->tx_timer_active = 0;
- spin_unlock_irqrestore(&ring->lock, flags);
-
DBGPR("<--xgbe_tx_timer\n");
return HRTIMER_NORESTART;
@@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
HASHTBLSZ);
@@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
break;
}
- /* The Queue and Channel counts are zero based so increment them
+ /* The Queue, Channel and TC counts are zero based so increment them
* to get the actual number
*/
hw_feat->rx_q_cnt++;
hw_feat->tx_q_cnt++;
hw_feat->rx_ch_cnt++;
hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
DBGPR("<--xgbe_get_all_hw_features\n");
}
@@ -692,7 +694,7 @@ static void xgbe_adjust_link(struct net_device *netdev)
struct phy_device *phydev = pdata->phydev;
int new_state = 0;
- if (phydev == NULL)
+ if (!phydev)
return;
if (phydev->link) {
@@ -927,7 +929,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_stop\n");
}
-static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
+static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -950,9 +952,8 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
xgbe_free_tx_data(pdata);
xgbe_free_rx_data(pdata);
- /* Issue software reset to device if requested */
- if (reset)
- hw_if->exit(pdata);
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
xgbe_start(pdata);
@@ -967,7 +968,7 @@ static void xgbe_restart(struct work_struct *work)
rtnl_lock();
- xgbe_restart_dev(pdata, 1);
+ xgbe_restart_dev(pdata);
rtnl_unlock();
}
@@ -1165,8 +1166,8 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
- if (vlan_tx_tag_present(skb))
- packet->vlan_ctag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb))
+ packet->vlan_ctag = skb_vlan_tag_get(skb);
}
static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
@@ -1247,9 +1248,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
CSUM_ENABLE, 1);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
/* VLAN requires an extra descriptor if tag is different */
- if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
/* We can share with the TSO context descriptor */
if (!context_desc) {
context_desc = 1;
@@ -1446,7 +1447,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
struct xgbe_ring *ring;
struct xgbe_packet_data *packet;
struct netdev_queue *txq;
- unsigned long flags;
int ret;
DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
@@ -1458,8 +1458,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
- spin_lock_irqsave(&ring->lock, flags);
-
if (skb->len == 0) {
netdev_err(netdev, "empty skb received from stack\n");
dev_kfree_skb_any(skb);
@@ -1506,10 +1504,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
tx_netdev_return:
- spin_unlock_irqrestore(&ring->lock, flags);
-
- DBGPR("<--xgbe_xmit\n");
-
return ret;
}
@@ -1587,7 +1581,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
pdata->rx_buf_size = ret;
netdev->mtu = mtu;
- xgbe_restart_dev(pdata, 0);
+ xgbe_restart_dev(pdata);
DBGPR("<--xgbe_change_mtu\n");
@@ -1776,15 +1770,28 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
static void xgbe_rx_refresh(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- desc_if->realloc_rx_buffer(channel);
+ while (ring->dirty != ring->cur) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+
+ /* Reset rdata values */
+ desc_if->unmap_rdata(pdata, rdata);
+
+ if (desc_if->map_rx_buffer(pdata, ring, rdata))
+ break;
+
+ hw_if->rx_desc_reset(rdata);
+
+ ring->dirty++;
+ }
/* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */
- rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
lower_32_bits(rdata->rdesc_dma));
}
@@ -1824,7 +1831,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
struct xgbe_ring_desc *rdesc;
struct net_device *netdev = pdata->netdev;
struct netdev_queue *txq;
- unsigned long flags;
int processed = 0;
unsigned int tx_packets = 0, tx_bytes = 0;
@@ -1836,8 +1842,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
txq = netdev_get_tx_queue(netdev, channel->queue_index);
- spin_lock_irqsave(&ring->lock, flags);
-
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
(ring->dirty != ring->cur)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
@@ -1868,7 +1872,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
}
if (!processed)
- goto unlock;
+ return 0;
netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
@@ -1880,9 +1884,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
-unlock:
- spin_unlock_irqrestore(&ring->lock, flags);
-
return processed;
}
@@ -1934,7 +1935,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
read_again:
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
+ if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
xgbe_rx_refresh(channel);
if (hw_if->dev_read(channel))
@@ -1942,7 +1943,6 @@ read_again:
received++;
ring->cur++;
- ring->dirty++;
incomplete = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index dbd3850b8b0a..32dd65137051 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -123,7 +123,10 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_address.h>
#include <linux/clk.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -148,6 +151,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
pdata->pause_autoneg = 1;
pdata->tx_pause = 1;
pdata->rx_pause = 1;
+ pdata->phy_speed = SPEED_UNKNOWN;
pdata->power_down = 0;
pdata->default_autoneg = AUTONEG_ENABLE;
pdata->default_speed = SPEED_10000;
@@ -161,6 +165,96 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
xgbe_init_function_ptrs_desc(&pdata->desc_if);
}
+#ifdef CONFIG_ACPI
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ struct acpi_device *adev = pdata->adev;
+ struct device *dev = pdata->dev;
+ u32 property;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long data;
+ int cca;
+ int ret;
+
+ /* Obtain the system clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_DMA_FREQ);
+ return ret;
+ }
+ pdata->sysclk_rate = property;
+
+ /* Obtain the PTP clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_PTP_FREQ);
+ return ret;
+ }
+ pdata->ptpclk_rate = property;
+
+ /* Retrieve the device cache coherency value */
+ handle = adev->handle;
+ do {
+ status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
+ if (!ACPI_FAILURE(status)) {
+ cca = data;
+ break;
+ }
+
+ status = acpi_get_parent(handle, &handle);
+ } while (!ACPI_FAILURE(status));
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "error obtaining acpi coherency value\n");
+ return -EINVAL;
+ }
+ pdata->coherent = !!cca;
+
+ return 0;
+}
+#else /* CONFIG_ACPI */
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_OF
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+
+ /* Obtain the system clock setting */
+ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+ if (IS_ERR(pdata->sysclk)) {
+ dev_err(dev, "dma devm_clk_get failed\n");
+ return PTR_ERR(pdata->sysclk);
+ }
+ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
+
+ /* Obtain the PTP clock setting */
+ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+ if (IS_ERR(pdata->ptpclk)) {
+ dev_err(dev, "ptp devm_clk_get failed\n");
+ return PTR_ERR(pdata->ptpclk);
+ }
+ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
+
+ /* Retrieve the device cache coherency value */
+ pdata->coherent = of_dma_is_coherent(dev->of_node);
+
+ return 0;
+}
+#else /* CONFIG_OF */
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+#endif /*CONFIG_OF */
+
static int xgbe_probe(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata;
@@ -169,7 +263,7 @@ static int xgbe_probe(struct platform_device *pdev)
struct net_device *netdev;
struct device *dev = &pdev->dev;
struct resource *res;
- const u8 *mac_addr;
+ const char *phy_mode;
unsigned int i;
int ret;
@@ -186,6 +280,7 @@ static int xgbe_probe(struct platform_device *pdev)
pdata = netdev_priv(netdev);
pdata->netdev = netdev;
pdata->pdev = pdev;
+ pdata->adev = ACPI_COMPANION(dev);
pdata->dev = dev;
platform_set_drvdata(pdev, netdev);
@@ -194,6 +289,9 @@ static int xgbe_probe(struct platform_device *pdev)
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
+ /* Check if we should use ACPI or DT */
+ pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -212,22 +310,6 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_io;
}
- /* Obtain the system clock setting */
- pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
- if (IS_ERR(pdata->sysclk)) {
- dev_err(dev, "dma devm_clk_get failed\n");
- ret = PTR_ERR(pdata->sysclk);
- goto err_io;
- }
-
- /* Obtain the PTP clock setting */
- pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
- if (IS_ERR(pdata->ptpclk)) {
- dev_err(dev, "ptp devm_clk_get failed\n");
- ret = PTR_ERR(pdata->ptpclk);
- goto err_io;
- }
-
/* Obtain the mmio areas for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdata->xgmac_regs = devm_ioremap_resource(dev, res);
@@ -247,16 +329,42 @@ static int xgbe_probe(struct platform_device *pdev)
}
DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
- /* Set the DMA mask */
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed\n");
+ /* Retrieve the MAC address */
+ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
+ pdata->mac_addr,
+ sizeof(pdata->mac_addr));
+ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
+ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
goto err_io;
}
- if (of_property_read_bool(dev->of_node, "dma-coherent")) {
+ /* Retrieve the PHY mode - it must be "xgmii" */
+ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
+ &phy_mode);
+ if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
+ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
+
+ /* Check for per channel interrupt support */
+ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
+ pdata->per_channel_irq = 1;
+
+ /* Obtain device settings unique to ACPI/OF */
+ if (pdata->use_acpi)
+ ret = xgbe_acpi_support(pdata);
+ else
+ ret = xgbe_of_support(pdata);
+ if (ret)
+ goto err_io;
+
+ /* Set the DMA coherency values */
+ if (pdata->coherent) {
pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
pdata->arcache = XGBE_DMA_OS_ARCACHE;
pdata->awcache = XGBE_DMA_OS_AWCACHE;
@@ -266,10 +374,16 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
- /* Check for per channel interrupt support */
- if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
- pdata->per_channel_irq = 1;
+ /* Set the DMA mask */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed\n");
+ goto err_io;
+ }
+ /* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "platform_get_irq 0 failed\n");
@@ -279,6 +393,7 @@ static int xgbe_probe(struct platform_device *pdev)
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
@@ -291,23 +406,6 @@ static int xgbe_probe(struct platform_device *pdev)
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
- /* Retrieve the MAC address */
- mac_addr = of_get_mac_address(dev->of_node);
- if (!mac_addr) {
- dev_err(dev, "invalid mac address for this device\n");
- ret = -EINVAL;
- goto err_io;
- }
- memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
-
- /* Retrieve the PHY mode - it must be "xgmii" */
- pdata->phy_mode = of_get_phy_mode(dev->of_node);
- if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
- dev_err(dev, "invalid phy-mode specified for this device\n");
- ret = -EINVAL;
- goto err_io;
- }
-
/* Set default configuration data */
xgbe_default_config(pdata);
@@ -491,18 +589,35 @@ static int xgbe_resume(struct device *dev)
}
#endif /* CONFIG_PM */
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgbe_acpi_match[] = {
+ { "AMDI8001", 0 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a", },
{},
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
+#endif
+
static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
static struct platform_driver xgbe_driver = {
.driver = {
.name = "amd-xgbe",
+#ifdef CONFIG_ACPI
+ .acpi_match_table = xgbe_acpi_match,
+#endif
+#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
+#endif
.pm = &xgbe_pm_ops,
},
.probe = xgbe_probe,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 363b210560f3..59e267f3f1b7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -205,25 +205,16 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
int xgbe_mdio_register(struct xgbe_prv_data *pdata)
{
- struct device_node *phy_node;
struct mii_bus *mii;
struct phy_device *phydev;
int ret = 0;
DBGPR("-->xgbe_mdio_register\n");
- /* Retrieve the phy-handle */
- phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
- if (!phy_node) {
- dev_err(pdata->dev, "unable to parse phy-handle\n");
- return -EINVAL;
- }
-
mii = mdiobus_alloc();
- if (mii == NULL) {
+ if (!mii) {
dev_err(pdata->dev, "mdiobus_alloc failed\n");
- ret = -ENOMEM;
- goto err_node_get;
+ return -ENOMEM;
}
/* Register on the MDIO bus (don't probe any PHYs) */
@@ -252,18 +243,19 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
- of_node_get(phy_node);
- phydev->dev.of_node = phy_node;
ret = phy_device_register(phydev);
if (ret) {
dev_err(pdata->dev, "phy_device_register failed\n");
- of_node_put(phy_node);
+ goto err_phy_device;
+ }
+ if (!phydev->dev.driver) {
+ dev_err(pdata->dev, "phy driver probe failed\n");
+ ret = -EIO;
goto err_phy_device;
}
/* Add a reference to the PHY driver so it can't be unloaded */
- pdata->phy_module = phydev->dev.driver ?
- phydev->dev.driver->owner : NULL;
+ pdata->phy_module = phydev->dev.driver->owner;
if (!try_module_get(pdata->phy_module)) {
dev_err(pdata->dev, "try_module_get failed\n");
ret = -EIO;
@@ -283,8 +275,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
pdata->phydev = phydev;
- of_node_put(phy_node);
-
DBGPHY_REGS(pdata);
DBGPR("<--xgbe_mdio_register\n");
@@ -300,9 +290,6 @@ err_mdiobus_register:
err_mdiobus_alloc:
mdiobus_free(mii);
-err_node_get:
- of_node_put(phy_node);
-
return ret;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index a1bf9d1cdae1..f326178ef376 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -171,15 +171,9 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
- nsec += delta;
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+ timecounter_adjtime(&pdata->tstamp_tc, delta);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
@@ -239,7 +233,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
snprintf(info->name, sizeof(info->name), "%s",
netdev_name(pdata->netdev));
info->owner = THIS_MODULE;
- info->max_adj = clk_get_rate(pdata->ptpclk);
+ info->max_adj = pdata->ptpclk_rate;
info->adjfreq = xgbe_adjfreq;
info->adjtime = xgbe_adjtime;
info->gettime = xgbe_gettime;
@@ -260,7 +254,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
*/
dividend = 50000000;
dividend <<= 32;
- pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
/* Setup the timecounter */
cc->read = xgbe_cc_read;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f9ec762ac3f0..13e8f95c077c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -124,7 +124,7 @@
#include <linux/if_vlan.h>
#include <linux/bitops.h>
#include <linux/ptp_clock_kernel.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <net/dcbnl.h>
@@ -182,10 +182,18 @@
#define XGBE_PHY_NAME "amd_xgbe_phy"
#define XGBE_PRTAD 0
+/* Common property names */
+#define XGBE_MAC_ADDR_PROPERTY "mac-address"
+#define XGBE_PHY_MODE_PROPERTY "phy-mode"
+#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
#define XGBE_PTP_CLOCK "ptp_clk"
-#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
+
+/* ACPI property names */
+#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
+#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
/* Timestamp support - values based on 50MHz PTP clock
* 50MHz => 20 nsec
@@ -361,8 +369,7 @@ struct xgbe_ring {
* cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability
* dirty - Tx: index of descriptor to check for transfer complete
- * Rx: count of descriptors in which a packet has been received
- * (used with skb_realloc_index to refresh the ring)
+ * Rx: index of descriptor to check for buffer reallocation
*/
unsigned int cur;
unsigned int dirty;
@@ -377,11 +384,6 @@ struct xgbe_ring {
unsigned short cur_mss;
unsigned short cur_vlan_ctag;
} tx;
-
- struct {
- unsigned int realloc_index;
- unsigned int realloc_threshold;
- } rx;
};
} ____cacheline_aligned;
@@ -596,7 +598,8 @@ struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
- void (*realloc_rx_buffer)(struct xgbe_channel *);
+ int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
+ struct xgbe_ring_data *);
void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
@@ -650,8 +653,12 @@ struct xgbe_hw_features {
struct xgbe_prv_data {
struct net_device *netdev;
struct platform_device *pdev;
+ struct acpi_device *adev;
struct device *dev;
+ /* ACPI or DT flag */
+ unsigned int use_acpi;
+
/* XGMAC/XPCS related mmio registers */
void __iomem *xgmac_regs; /* XGMAC CSRs */
void __iomem *xpcs_regs; /* XPCS MMD registers */
@@ -672,6 +679,7 @@ struct xgbe_prv_data {
struct xgbe_desc_if desc_if;
/* AXI DMA settings */
+ unsigned int coherent;
unsigned int axdomain;
unsigned int arcache;
unsigned int awcache;
@@ -739,6 +747,7 @@ struct xgbe_prv_data {
unsigned int phy_rx_pause;
/* Netdev related settings */
+ unsigned char mac_addr[ETH_ALEN];
netdev_features_t netdev_features;
struct napi_struct napi;
struct xgbe_mmc_stats mmc_stats;
@@ -748,7 +757,9 @@ struct xgbe_prv_data {
/* Device clocks */
struct clk *sysclk;
+ unsigned long sysclk_rate;
struct clk *ptpclk;
+ unsigned long ptpclk_rate;
/* Timestamp support */
spinlock_t tstamp_lock;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 7ba83ffb08ac..869d97fcf781 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,10 +593,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- clk_prepare_enable(pdata->clk);
- clk_disable_unprepare(pdata->clk);
- clk_prepare_enable(pdata->clk);
- xgene_enet_ecc_init(pdata);
+ if (!efi_enabled(EFI_BOOT)) {
+ clk_prepare_enable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
+ clk_prepare_enable(pdata->clk);
+ xgene_enet_ecc_init(pdata);
+ }
xgene_enet_config_ring_if_assoc(pdata);
/* Enable auto-incr for scanning */
@@ -663,15 +665,20 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
struct phy_device *phy_dev;
struct device *dev = &pdata->pdev->dev;
- phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
- if (!phy_np) {
- netdev_dbg(ndev, "No phy-handle found\n");
- return -ENODEV;
+ if (dev->of_node) {
+ phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (!phy_np) {
+ netdev_dbg(ndev, "No phy-handle found in DT\n");
+ return -ENODEV;
+ }
+ pdata->phy_dev = of_phy_find_device(phy_np);
}
- phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
- 0, pdata->phy_mode);
- if (!phy_dev) {
+ phy_dev = pdata->phy_dev;
+
+ if (!phy_dev ||
+ phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+ pdata->phy_mode)) {
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
@@ -681,32 +688,71 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
~SUPPORTED_100baseT_Half &
~SUPPORTED_1000baseT_Half;
phy_dev->advertising = phy_dev->supported;
- pdata->phy_dev = phy_dev;
return 0;
}
-int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
+ struct mii_bus *mdio)
{
- struct net_device *ndev = pdata->ndev;
struct device *dev = &pdata->pdev->dev;
+ struct net_device *ndev = pdata->ndev;
+ struct phy_device *phy;
struct device_node *child_np;
struct device_node *mdio_np = NULL;
- struct mii_bus *mdio_bus;
int ret;
+ u32 phy_id;
+
+ if (dev->of_node) {
+ for_each_child_of_node(dev->of_node, child_np) {
+ if (of_device_is_compatible(child_np,
+ "apm,xgene-mdio")) {
+ mdio_np = child_np;
+ break;
+ }
+ }
- for_each_child_of_node(dev->of_node, child_np) {
- if (of_device_is_compatible(child_np, "apm,xgene-mdio")) {
- mdio_np = child_np;
- break;
+ if (!mdio_np) {
+ netdev_dbg(ndev, "No mdio node in the dts\n");
+ return -ENXIO;
}
- }
- if (!mdio_np) {
- netdev_dbg(ndev, "No mdio node in the dts\n");
- return -ENXIO;
+ return of_mdiobus_register(mdio, mdio_np);
}
+ /* Mask out all PHYs from auto probing. */
+ mdio->phy_mask = ~0;
+
+ /* Register the MDIO bus */
+ ret = mdiobus_register(mdio);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_u32(dev, "phy-channel", &phy_id);
+ if (ret)
+ ret = device_property_read_u32(dev, "phy-addr", &phy_id);
+ if (ret)
+ return -EINVAL;
+
+ phy = get_phy_device(mdio, phy_id, true);
+ if (!phy || IS_ERR(phy))
+ return -EIO;
+
+ ret = phy_device_register(phy);
+ if (ret)
+ phy_device_free(phy);
+ else
+ pdata->phy_dev = phy;
+
+ return ret;
+}
+
+int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+{
+ struct net_device *ndev = pdata->ndev;
+ struct mii_bus *mdio_bus;
+ int ret;
+
mdio_bus = mdiobus_alloc();
if (!mdio_bus)
return -ENOMEM;
@@ -720,7 +766,7 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
mdio_bus->priv = pdata;
mdio_bus->parent = &ndev->dev;
- ret = of_mdiobus_register(mdio_bus, mdio_np);
+ ret = xgene_mdiobus_register(pdata, mdio_bus);
if (ret) {
netdev_err(ndev, "Failed to register MDIO bus\n");
mdiobus_free(mdio_bus);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 83a50280bb70..44b15373d6b3 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -24,6 +24,10 @@
#include "xgene_enet_sgmac.h"
#include "xgene_enet_xgmac.h"
+#define RES_ENET_CSR 0
+#define RES_RING_CSR 1
+#define RES_RING_CMD 2
+
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
@@ -369,6 +373,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
break;
+ /* read fpqnum field after dataaddr field */
+ dma_rmb();
if (is_rx_desc(raw_desc))
ret = xgene_enet_rx_frame(ring, raw_desc);
else
@@ -746,6 +752,41 @@ static const struct net_device_ops xgene_ndev_ops = {
.ndo_set_mac_address = xgene_enet_set_mac_address,
};
+static int xgene_get_mac_address(struct device *dev,
+ unsigned char *addr)
+{
+ int ret;
+
+ ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
+ if (ret)
+ ret = device_property_read_u8_array(dev, "mac-address",
+ addr, 6);
+ if (ret)
+ return -ENODEV;
+
+ return ETH_ALEN;
+}
+
+static int xgene_get_phy_mode(struct device *dev)
+{
+ int i, ret;
+ char *modestr;
+
+ ret = device_property_read_string(dev, "phy-connection-type",
+ (const char **)&modestr);
+ if (ret)
+ ret = device_property_read_string(dev, "phy-mode",
+ (const char **)&modestr);
+ if (ret)
+ return -ENODEV;
+
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
+ if (!strcasecmp(modestr, phy_modes(i)))
+ return i;
+ }
+ return -ENODEV;
+}
+
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev;
@@ -753,32 +794,45 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
struct device *dev;
struct resource *res;
void __iomem *base_addr;
- const char *mac;
int ret;
pdev = pdata->pdev;
dev = &pdev->dev;
ndev = pdata->ndev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
- pdata->base_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->base_addr)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
+ if (!res) {
+ dev_err(dev, "Resource enet_csr not defined\n");
+ return -ENODEV;
+ }
+ pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pdata->base_addr) {
dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
- return PTR_ERR(pdata->base_addr);
+ return -ENOMEM;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
- pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->ring_csr_addr)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
+ if (!res) {
+ dev_err(dev, "Resource ring_csr not defined\n");
+ return -ENODEV;
+ }
+ pdata->ring_csr_addr = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!pdata->ring_csr_addr) {
dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
- return PTR_ERR(pdata->ring_csr_addr);
+ return -ENOMEM;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
- pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->ring_cmd_addr)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
+ if (!res) {
+ dev_err(dev, "Resource ring_cmd not defined\n");
+ return -ENODEV;
+ }
+ pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!pdata->ring_cmd_addr) {
dev_err(dev, "Unable to retrieve ENET Ring command region\n");
- return PTR_ERR(pdata->ring_cmd_addr);
+ return -ENOMEM;
}
ret = platform_get_irq(pdev, 0);
@@ -789,14 +843,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
}
pdata->rx_irq = ret;
- mac = of_get_mac_address(dev->of_node);
- if (mac)
- memcpy(ndev->dev_addr, mac, ndev->addr_len);
- else
+ if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
eth_hw_addr_random(ndev);
+
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
- pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ pdata->phy_mode = xgene_get_phy_mode(dev);
if (pdata->phy_mode < 0) {
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
@@ -809,11 +861,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
}
pdata->clk = devm_clk_get(&pdev->dev, NULL);
- ret = IS_ERR(pdata->clk);
if (IS_ERR(pdata->clk)) {
- dev_err(&pdev->dev, "can't get clock\n");
- ret = PTR_ERR(pdata->clk);
- return ret;
+ /* Firmware may have set up the clock already. */
+ pdata->clk = NULL;
}
base_addr = pdata->base_addr;
@@ -924,7 +974,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
netdev_err(ndev, "No usable DMA configuration\n");
goto err;
@@ -972,17 +1022,26 @@ static int xgene_enet_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id xgene_enet_match[] = {
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+ { "APMC0D05", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static struct of_device_id xgene_enet_of_match[] = {
{.compatible = "apm,xgene-enet",},
{},
};
-MODULE_DEVICE_TABLE(of, xgene_enet_match);
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
- .of_match_table = xgene_enet_match,
+ .of_match_table = of_match_ptr(xgene_enet_of_match),
+ .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
.remove = xgene_enet_remove,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index f9958fae6ffd..c2d465c3db66 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -22,7 +22,10 @@
#ifndef __XGENE_ENET_MAIN_H__
#define __XGENE_ENET_MAIN_H__
+#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e398eda07298..c8af3ce3ea38 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
schedule_work(&alx->reset_wk);
}
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
{
struct alx_rx_queue *rxq = &alx->rxq;
struct alx_rrd *rrd;
struct alx_buffer *rxb;
struct sk_buff *skb;
u16 length, rfd_cleaned = 0;
+ int work = 0;
- while (budget > 0) {
+ while (work < budget) {
rrd = &rxq->rrd[rxq->rrd_read_idx];
if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
ALX_GET_FIELD(le32_to_cpu(rrd->word0),
RRD_NOR) != 1) {
alx_schedule_reset(alx);
- return 0;
+ return work;
}
rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
}
napi_gro_receive(&alx->napi, skb);
- budget--;
+ work++;
next_pkt:
if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
if (rfd_cleaned)
alx_refill_rx_ring(alx, GFP_ATOMIC);
- return budget > 0;
+ return work;
}
static int alx_poll(struct napi_struct *napi, int budget)
{
struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
struct alx_hw *hw = &alx->hw;
- bool complete = true;
unsigned long flags;
+ bool tx_complete;
+ int work;
- complete = alx_clean_tx_irq(alx) &&
- alx_clean_rx_irq(alx, budget);
+ tx_complete = alx_clean_tx_irq(alx);
+ work = alx_clean_rx_irq(alx, budget);
- if (!complete)
- return 1;
+ if (!tx_complete || work == budget)
+ return budget;
napi_complete(&alx->napi);
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
alx_post_write(hw);
- return 0;
+ return work;
}
static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index c9946c6c119e..587f63e87588 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2235,8 +2235,8 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if (unlikely(vlan_tx_tag_present(skb))) {
- u16 vlan = vlan_tx_tag_get(skb);
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ u16 vlan = skb_vlan_tag_get(skb);
__le16 tag;
vlan = cpu_to_le16(vlan);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 2326579f9454..59a03a193e83 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1892,8 +1892,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
tpd = atl1e_get_tpd(adapter);
- if (vlan_tx_tag_present(skb)) {
- u16 vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag = skb_vlan_tag_get(skb);
u16 atl1e_vlan_tag;
tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
@@ -2373,9 +2373,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
- init_timer(&adapter->phy_config_timer);
- adapter->phy_config_timer.function = atl1e_phy_config;
- adapter->phy_config_timer.data = (unsigned long) adapter;
+ setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
+ (unsigned long)adapter);
/* get user settings */
atl1e_check_options(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 2c8f398aeda9..eca1d113fee1 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2415,8 +2415,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
(u16) atomic_read(&tpd_ring->next_to_use));
memset(ptpd, 0, sizeof(struct tx_packet_desc));
- if (vlan_tx_tag_present(skb)) {
- vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag = skb_vlan_tag_get(skb);
vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
((vlan_tag >> 9) & 0x8);
ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 84a09e8ddd9c..46a535318c7a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -887,8 +887,8 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
offset = ((u32)(skb->len-copy_len + 3) & ~3);
}
#ifdef NETIF_F_HW_VLAN_CTAG_TX
- if (vlan_tx_tag_present(skb)) {
- u16 vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag = skb_vlan_tag_get(skb);
vlan_tag = (vlan_tag << 4) |
(vlan_tag >> 13) |
((vlan_tag >> 9) & 0x8);
@@ -1436,13 +1436,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_check_options(adapter);
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = atl2_watchdog;
- adapter->watchdog_timer.data = (unsigned long) adapter;
+ setup_timer(&adapter->watchdog_timer, atl2_watchdog,
+ (unsigned long)adapter);
- init_timer(&adapter->phy_config_timer);
- adapter->phy_config_timer.function = atl2_phy_config;
- adapter->phy_config_timer.data = (unsigned long) adapter;
+ setup_timer(&adapter->phy_config_timer, atl2_phy_config,
+ (unsigned long)adapter);
INIT_WORK(&adapter->reset_task, atl2_reset_task);
INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 05c6af6c418f..3007d95fbb9f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
bgmac->int_status = 0;
}
- if (handled < weight)
+ if (handled < weight) {
napi_complete(napi);
-
- bgmac_chip_intrs_on(bgmac);
+ bgmac_chip_intrs_on(bgmac);
+ }
return handled;
}
@@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
err = bgmac_mii_register(bgmac);
if (err) {
bgmac_err(bgmac, "Cannot register MDIO\n");
@@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core)
netif_carrier_off(net_dev);
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
-
return 0;
err_mii_unregister:
@@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core)
{
struct bgmac *bgmac = bcma_get_drvdata(core);
- netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
bgmac_mii_unregister(bgmac);
+ netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 823d01c5684c..02bf0b86995b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6597,9 +6597,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
vlan_tag_flags |=
- (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
+ (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
}
if ((mss = skb_shinfo(skb)->gso_size)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index c3a6072134f5..756053c028be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -22,7 +22,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
/* compilation time flags */
@@ -1138,12 +1138,8 @@ struct bnx2x_port {
u32 link_config[LINK_CONFIG_SIZE];
u32 supported[LINK_CONFIG_SIZE];
-/* link settings - missing defines */
-#define SUPPORTED_2500baseX_Full (1 << 15)
u32 advertising[LINK_CONFIG_SIZE];
-/* link settings - missing defines */
-#define ADVERTISED_2500baseX_Full (1 << 15)
u32 phy_addr;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1d1147c93d59..0a9faa134a9a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
}
#endif
if (!bnx2x_fp_lock_napi(fp))
- return work_done;
+ return budget;
for_each_cos_in_tx_queue(fp, cos)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
@@ -3865,9 +3865,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
"sending pkt %u @%p next_idx %u bd %u @%p\n",
pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_start_bd->vlan_or_ethertype =
- cpu_to_le16(vlan_tx_tag_get(skb));
+ cpu_to_le16(skb_vlan_tag_get(skb));
tx_start_bd->bd_flags.as_bitfield |=
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
} else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 72eef9fc883e..7155e1d2c208 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9169,7 +9169,7 @@ static void bnx2x_disable_ptp(struct bnx2x *bp)
}
/* Called during unload, to stop PTP-related stuff */
-void bnx2x_stop_ptp(struct bnx2x *bp)
+static void bnx2x_stop_ptp(struct bnx2x *bp)
{
/* Cancel PTP work queue. Should be done after the Tx queues are
* drained to prevent additional scheduling.
@@ -13267,14 +13267,10 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
- u64 now;
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
- now = timecounter_read(&bp->timecounter);
- now += delta;
- /* Re-init the timecounter */
- timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
+ timecounter_adjtime(&bp->timecounter, delta);
return 0;
}
@@ -13322,7 +13318,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
return -ENOTSUPP;
}
-void bnx2x_register_phc(struct bnx2x *bp)
+static void bnx2x_register_phc(struct bnx2x *bp)
{
/* Fill the ptp_clock_info struct and register PTP clock*/
bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14614,7 +14610,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
{
memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
bp->cyclecounter.read = bnx2x_cyclecounter_read;
- bp->cyclecounter.mask = CLOCKSOURCE_MASK(64);
+ bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
bp->cyclecounter.shift = 1;
bp->cyclecounter.mult = 1;
}
@@ -14639,7 +14635,7 @@ static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
return bnx2x_func_state_change(bp, &func_params);
}
-int bnx2x_enable_ptp_packets(struct bnx2x *bp)
+static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
{
struct bnx2x_queue_state_params q_params;
int rc, i;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 553dcd8a9df2..615a6dbde047 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
}
static void tg3_irq_quiesce(struct tg3 *tp)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
int i;
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
tp->irq_sync = 1;
smp_mb();
+ spin_unlock_bh(&tp->lock);
+
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
+
+ spin_lock_bh(&tp->lock);
}
/* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -8002,9 +8008,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
!mss && skb->len > VLAN_ETH_FRAME_LEN)
base_flags |= TXD_FLAG_JMB_PKT;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
base_flags |= TXD_FLAG_VLAN;
- vlan = vlan_tx_tag_get(skb);
+ vlan = skb_vlan_tag_get(skb);
}
if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
u32 val;
void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
}
smp_mb();
+ tg3_full_unlock(tp);
+
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
+ tg3_full_lock(tp, 0);
+
if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
{
struct tg3 *tp = (struct tg3 *) __opaque;
- if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
- goto restart_timer;
-
spin_lock(&tp->lock);
+ if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+ spin_unlock(&tp->lock);
+ goto restart_timer;
+ }
+
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
struct tg3 *tp = container_of(work, struct tg3, reset_task);
int err;
+ rtnl_lock();
tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
+ rtnl_unlock();
return;
}
@@ -11138,6 +11154,7 @@ out:
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ rtnl_unlock();
}
static int tg3_request_irq(struct tg3 *tp, int irq_num)
@@ -11556,11 +11573,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
tg3_flag_set(tp, INIT_COMPLETE);
tg3_enable_ints(tp);
- if (init)
- tg3_ptp_init(tp);
- else
- tg3_ptp_resume(tp);
-
+ tg3_ptp_resume(tp);
tg3_full_unlock(tp);
@@ -11681,13 +11694,6 @@ static int tg3_open(struct net_device *dev)
pci_set_power_state(tp->pdev, PCI_D3hot);
}
- if (tg3_flag(tp, PTP_CAPABLE)) {
- tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
- &tp->pdev->dev);
- if (IS_ERR(tp->ptp_clock))
- tp->ptp_clock = NULL;
- }
-
return err;
}
@@ -11701,8 +11707,6 @@ static int tg3_close(struct net_device *dev)
return -EAGAIN;
}
- tg3_ptp_fini(tp);
-
tg3_stop(tp);
/* Clear stats across close / open calls */
@@ -17880,6 +17884,14 @@ static int tg3_init_one(struct pci_dev *pdev,
goto err_out_apeunmap;
}
+ if (tg3_flag(tp, PTP_CAPABLE)) {
+ tg3_ptp_init(tp);
+ tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
+ &tp->pdev->dev);
+ if (IS_ERR(tp->ptp_clock))
+ tp->ptp_clock = NULL;
+ }
+
netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
tp->board_part_number,
tg3_chip_rev_id(tp),
@@ -17955,6 +17967,8 @@ static void tg3_remove_one(struct pci_dev *pdev)
if (dev) {
struct tg3 *tp = netdev_priv(dev);
+ tg3_ptp_fini(tp);
+
release_firmware(tp->fw);
tg3_reset_task_cancel(tp);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 323721838cf9..7714d7790089 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2824,8 +2824,8 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
u32 gso_size;
u16 vlan_tag = 0;
- if (vlan_tx_tag_present(skb)) {
- vlan_tag = (u16)vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag = (u16)skb_vlan_tag_get(skb);
flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
}
if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 55eb7f2af2b4..7ef55f5fa664 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
res = PTR_ERR(lp->pclk);
goto err_free_dev;
}
- clk_enable(lp->pclk);
+ clk_prepare_enable(lp->pclk);
lp->hclk = ERR_PTR(-ENOENT);
lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
err_disable_clock:
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
err_free_dev:
free_netdev(dev);
return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
free_netdev(dev);
return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
}
return 0;
}
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
- clk_enable(lp->pclk);
+ clk_prepare_enable(lp->pclk);
netif_device_attach(net_dev);
netif_start_queue(net_dev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3767271c7667..ad76b8e35a00 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1691,7 +1691,7 @@ static int hash_get_index(__u8 *addr)
for (j = 0; j < 6; j++) {
for (i = 0, bitval = 0; i < 8; i++)
- bitval ^= hash_bit_value(i*6 + j, addr);
+ bitval ^= hash_bit_value(i * 6 + j, addr);
hash_index |= (bitval << j);
}
@@ -1827,12 +1827,23 @@ static int macb_close(struct net_device *dev)
static void gem_update_stats(struct macb *bp)
{
- u32 __iomem *reg = bp->regs + GEM_OTX;
+ int i;
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
- u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
- for (; p < end; p++, reg++)
- *p += __raw_readl(reg);
+ for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
+ u32 offset = gem_statistics[i].offset;
+ u64 val = __raw_readl(bp->regs + offset);
+
+ bp->ethtool_stats[i] += val;
+ *p += val;
+
+ if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
+ /* Add GEM_OCTTXH, GEM_OCTRXH */
+ val = __raw_readl(bp->regs + offset + 4);
+ bp->ethtool_stats[i] += ((u64)val) << 32;
+ *(++p) += val;
+ }
+ }
}
static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -1873,6 +1884,39 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
return nstat;
}
+static void gem_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct macb *bp;
+
+ bp = netdev_priv(dev);
+ gem_update_stats(bp);
+ memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+}
+
+static int gem_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return GEM_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
+{
+ int i;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
+ memcpy(p, gem_statistics[i].stat_string,
+ ETH_GSTRING_LEN);
+ break;
+ }
+}
+
struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
@@ -1991,6 +2035,18 @@ const struct ethtool_ops macb_ethtool_ops = {
};
EXPORT_SYMBOL_GPL(macb_ethtool_ops);
+static const struct ethtool_ops gem_ethtool_ops = {
+ .get_settings = macb_get_settings,
+ .set_settings = macb_set_settings,
+ .get_regs_len = macb_get_regs_len,
+ .get_regs = macb_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_ethtool_stats = gem_get_ethtool_stats,
+ .get_strings = gem_get_ethtool_strings,
+ .get_sset_count = gem_get_sset_count,
+};
+
int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct macb *bp = netdev_priv(dev);
@@ -2148,7 +2204,7 @@ static void macb_probe_queues(void __iomem *mem,
(*num_queues)++;
}
-static int __init macb_probe(struct platform_device *pdev)
+static int macb_probe(struct platform_device *pdev)
{
struct macb_platform_data *pdata;
struct resource *regs;
@@ -2278,7 +2334,6 @@ static int __init macb_probe(struct platform_device *pdev)
dev->netdev_ops = &macb_netdev_ops;
netif_napi_add(dev, &bp->napi, macb_poll, 64);
- dev->ethtool_ops = &macb_ethtool_ops;
dev->base_addr = regs->start;
@@ -2292,12 +2347,14 @@ static int __init macb_probe(struct platform_device *pdev)
bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
bp->macbgem_ops.mog_init_rings = gem_init_rings;
bp->macbgem_ops.mog_rx = gem_rx;
+ dev->ethtool_ops = &gem_ethtool_ops;
} else {
bp->max_tx_length = MACB_MAX_TX_LEN;
bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
bp->macbgem_ops.mog_init_rings = macb_init_rings;
bp->macbgem_ops.mog_rx = macb_rx;
+ dev->ethtool_ops = &macb_ethtool_ops;
}
/* Set features */
@@ -2386,7 +2443,7 @@ err_out:
return err;
}
-static int __exit macb_remove(struct platform_device *pdev)
+static int macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
@@ -2411,8 +2468,7 @@ static int __exit macb_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int macb_suspend(struct device *dev)
+static int __maybe_unused macb_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
@@ -2429,7 +2485,7 @@ static int macb_suspend(struct device *dev)
return 0;
}
-static int macb_resume(struct device *dev)
+static int __maybe_unused macb_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
@@ -2444,12 +2500,12 @@ static int macb_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
static struct platform_driver macb_driver = {
- .remove = __exit_p(macb_remove),
+ .probe = macb_probe,
+ .remove = macb_remove,
.driver = {
.name = "macb",
.of_match_table = of_match_ptr(macb_dt_ids),
@@ -2457,7 +2513,7 @@ static struct platform_driver macb_driver = {
},
};
-module_platform_driver_probe(macb_driver, macb_probe);
+module_platform_driver(macb_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 084191b6fad2..31dc080f2437 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -15,263 +15,309 @@
#define MACB_MAX_QUEUES 8
/* MACB register offsets */
-#define MACB_NCR 0x0000
-#define MACB_NCFGR 0x0004
-#define MACB_NSR 0x0008
-#define MACB_TAR 0x000c /* AT91RM9200 only */
-#define MACB_TCR 0x0010 /* AT91RM9200 only */
-#define MACB_TSR 0x0014
-#define MACB_RBQP 0x0018
-#define MACB_TBQP 0x001c
-#define MACB_RSR 0x0020
-#define MACB_ISR 0x0024
-#define MACB_IER 0x0028
-#define MACB_IDR 0x002c
-#define MACB_IMR 0x0030
-#define MACB_MAN 0x0034
-#define MACB_PTR 0x0038
-#define MACB_PFR 0x003c
-#define MACB_FTO 0x0040
-#define MACB_SCF 0x0044
-#define MACB_MCF 0x0048
-#define MACB_FRO 0x004c
-#define MACB_FCSE 0x0050
-#define MACB_ALE 0x0054
-#define MACB_DTF 0x0058
-#define MACB_LCOL 0x005c
-#define MACB_EXCOL 0x0060
-#define MACB_TUND 0x0064
-#define MACB_CSE 0x0068
-#define MACB_RRE 0x006c
-#define MACB_ROVR 0x0070
-#define MACB_RSE 0x0074
-#define MACB_ELE 0x0078
-#define MACB_RJA 0x007c
-#define MACB_USF 0x0080
-#define MACB_STE 0x0084
-#define MACB_RLE 0x0088
-#define MACB_TPF 0x008c
-#define MACB_HRB 0x0090
-#define MACB_HRT 0x0094
-#define MACB_SA1B 0x0098
-#define MACB_SA1T 0x009c
-#define MACB_SA2B 0x00a0
-#define MACB_SA2T 0x00a4
-#define MACB_SA3B 0x00a8
-#define MACB_SA3T 0x00ac
-#define MACB_SA4B 0x00b0
-#define MACB_SA4T 0x00b4
-#define MACB_TID 0x00b8
-#define MACB_TPQ 0x00bc
-#define MACB_USRIO 0x00c0
-#define MACB_WOL 0x00c4
-#define MACB_MID 0x00fc
+#define MACB_NCR 0x0000 /* Network Control */
+#define MACB_NCFGR 0x0004 /* Network Config */
+#define MACB_NSR 0x0008 /* Network Status */
+#define MACB_TAR 0x000c /* AT91RM9200 only */
+#define MACB_TCR 0x0010 /* AT91RM9200 only */
+#define MACB_TSR 0x0014 /* Transmit Status */
+#define MACB_RBQP 0x0018 /* RX Q Base Address */
+#define MACB_TBQP 0x001c /* TX Q Base Address */
+#define MACB_RSR 0x0020 /* Receive Status */
+#define MACB_ISR 0x0024 /* Interrupt Status */
+#define MACB_IER 0x0028 /* Interrupt Enable */
+#define MACB_IDR 0x002c /* Interrupt Disable */
+#define MACB_IMR 0x0030 /* Interrupt Mask */
+#define MACB_MAN 0x0034 /* PHY Maintenance */
+#define MACB_PTR 0x0038
+#define MACB_PFR 0x003c
+#define MACB_FTO 0x0040
+#define MACB_SCF 0x0044
+#define MACB_MCF 0x0048
+#define MACB_FRO 0x004c
+#define MACB_FCSE 0x0050
+#define MACB_ALE 0x0054
+#define MACB_DTF 0x0058
+#define MACB_LCOL 0x005c
+#define MACB_EXCOL 0x0060
+#define MACB_TUND 0x0064
+#define MACB_CSE 0x0068
+#define MACB_RRE 0x006c
+#define MACB_ROVR 0x0070
+#define MACB_RSE 0x0074
+#define MACB_ELE 0x0078
+#define MACB_RJA 0x007c
+#define MACB_USF 0x0080
+#define MACB_STE 0x0084
+#define MACB_RLE 0x0088
+#define MACB_TPF 0x008c
+#define MACB_HRB 0x0090
+#define MACB_HRT 0x0094
+#define MACB_SA1B 0x0098
+#define MACB_SA1T 0x009c
+#define MACB_SA2B 0x00a0
+#define MACB_SA2T 0x00a4
+#define MACB_SA3B 0x00a8
+#define MACB_SA3T 0x00ac
+#define MACB_SA4B 0x00b0
+#define MACB_SA4T 0x00b4
+#define MACB_TID 0x00b8
+#define MACB_TPQ 0x00bc
+#define MACB_USRIO 0x00c0
+#define MACB_WOL 0x00c4
+#define MACB_MID 0x00fc
/* GEM register offsets. */
-#define GEM_NCFGR 0x0004
-#define GEM_USRIO 0x000c
-#define GEM_DMACFG 0x0010
-#define GEM_HRB 0x0080
-#define GEM_HRT 0x0084
-#define GEM_SA1B 0x0088
-#define GEM_SA1T 0x008C
-#define GEM_SA2B 0x0090
-#define GEM_SA2T 0x0094
-#define GEM_SA3B 0x0098
-#define GEM_SA3T 0x009C
-#define GEM_SA4B 0x00A0
-#define GEM_SA4T 0x00A4
-#define GEM_OTX 0x0100
-#define GEM_DCFG1 0x0280
-#define GEM_DCFG2 0x0284
-#define GEM_DCFG3 0x0288
-#define GEM_DCFG4 0x028c
-#define GEM_DCFG5 0x0290
-#define GEM_DCFG6 0x0294
-#define GEM_DCFG7 0x0298
-
-#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
-#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
-#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
-#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
-#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
-#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
+#define GEM_NCFGR 0x0004 /* Network Config */
+#define GEM_USRIO 0x000c /* User IO */
+#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_HRB 0x0080 /* Hash Bottom */
+#define GEM_HRT 0x0084 /* Hash Top */
+#define GEM_SA1B 0x0088 /* Specific1 Bottom */
+#define GEM_SA1T 0x008C /* Specific1 Top */
+#define GEM_SA2B 0x0090 /* Specific2 Bottom */
+#define GEM_SA2T 0x0094 /* Specific2 Top */
+#define GEM_SA3B 0x0098 /* Specific3 Bottom */
+#define GEM_SA3T 0x009C /* Specific3 Top */
+#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
+#define GEM_SA4T 0x00A4 /* Specific4 Top */
+#define GEM_OTX 0x0100 /* Octets transmitted */
+#define GEM_OCTTXL 0x0100 /* Octets transmitted [31:0] */
+#define GEM_OCTTXH 0x0104 /* Octets transmitted [47:32] */
+#define GEM_TXCNT 0x0108 /* Frames Transmitted counter */
+#define GEM_TXBCCNT 0x010c /* Broadcast Frames counter */
+#define GEM_TXMCCNT 0x0110 /* Multicast Frames counter */
+#define GEM_TXPAUSECNT 0x0114 /* Pause Frames Transmitted Counter */
+#define GEM_TX64CNT 0x0118 /* 64 byte Frames TX counter */
+#define GEM_TX65CNT 0x011c /* 65-127 byte Frames TX counter */
+#define GEM_TX128CNT 0x0120 /* 128-255 byte Frames TX counter */
+#define GEM_TX256CNT 0x0124 /* 256-511 byte Frames TX counter */
+#define GEM_TX512CNT 0x0128 /* 512-1023 byte Frames TX counter */
+#define GEM_TX1024CNT 0x012c /* 1024-1518 byte Frames TX counter */
+#define GEM_TX1519CNT 0x0130 /* 1519+ byte Frames TX counter */
+#define GEM_TXURUNCNT 0x0134 /* TX under run error counter */
+#define GEM_SNGLCOLLCNT 0x0138 /* Single Collision Frame Counter */
+#define GEM_MULTICOLLCNT 0x013c /* Multiple Collision Frame Counter */
+#define GEM_EXCESSCOLLCNT 0x0140 /* Excessive Collision Frame Counter */
+#define GEM_LATECOLLCNT 0x0144 /* Late Collision Frame Counter */
+#define GEM_TXDEFERCNT 0x0148 /* Deferred Transmission Frame Counter */
+#define GEM_TXCSENSECNT 0x014c /* Carrier Sense Error Counter */
+#define GEM_ORX 0x0150 /* Octets received */
+#define GEM_OCTRXL 0x0150 /* Octets received [31:0] */
+#define GEM_OCTRXH 0x0154 /* Octets received [47:32] */
+#define GEM_RXCNT 0x0158 /* Frames Received Counter */
+#define GEM_RXBROADCNT 0x015c /* Broadcast Frames Received Counter */
+#define GEM_RXMULTICNT 0x0160 /* Multicast Frames Received Counter */
+#define GEM_RXPAUSECNT 0x0164 /* Pause Frames Received Counter */
+#define GEM_RX64CNT 0x0168 /* 64 byte Frames RX Counter */
+#define GEM_RX65CNT 0x016c /* 65-127 byte Frames RX Counter */
+#define GEM_RX128CNT 0x0170 /* 128-255 byte Frames RX Counter */
+#define GEM_RX256CNT 0x0174 /* 256-511 byte Frames RX Counter */
+#define GEM_RX512CNT 0x0178 /* 512-1023 byte Frames RX Counter */
+#define GEM_RX1024CNT 0x017c /* 1024-1518 byte Frames RX Counter */
+#define GEM_RX1519CNT 0x0180 /* 1519+ byte Frames RX Counter */
+#define GEM_RXUNDRCNT 0x0184 /* Undersize Frames Received Counter */
+#define GEM_RXOVRCNT 0x0188 /* Oversize Frames Received Counter */
+#define GEM_RXJABCNT 0x018c /* Jabbers Received Counter */
+#define GEM_RXFCSCNT 0x0190 /* Frame Check Sequence Error Counter */
+#define GEM_RXLENGTHCNT 0x0194 /* Length Field Error Counter */
+#define GEM_RXSYMBCNT 0x0198 /* Symbol Error Counter */
+#define GEM_RXALIGNCNT 0x019c /* Alignment Error Counter */
+#define GEM_RXRESERRCNT 0x01a0 /* Receive Resource Error Counter */
+#define GEM_RXORCNT 0x01a4 /* Receive Overrun Counter */
+#define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */
+#define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */
+#define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */
+#define GEM_DCFG1 0x0280 /* Design Config 1 */
+#define GEM_DCFG2 0x0284 /* Design Config 2 */
+#define GEM_DCFG3 0x0288 /* Design Config 3 */
+#define GEM_DCFG4 0x028c /* Design Config 4 */
+#define GEM_DCFG5 0x0290 /* Design Config 5 */
+#define GEM_DCFG6 0x0294 /* Design Config 6 */
+#define GEM_DCFG7 0x0298 /* Design Config 7 */
+
+#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
+#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
+#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
+#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
+#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
+#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
/* Bitfields in NCR */
-#define MACB_LB_OFFSET 0
-#define MACB_LB_SIZE 1
-#define MACB_LLB_OFFSET 1
-#define MACB_LLB_SIZE 1
-#define MACB_RE_OFFSET 2
-#define MACB_RE_SIZE 1
-#define MACB_TE_OFFSET 3
-#define MACB_TE_SIZE 1
-#define MACB_MPE_OFFSET 4
-#define MACB_MPE_SIZE 1
-#define MACB_CLRSTAT_OFFSET 5
-#define MACB_CLRSTAT_SIZE 1
-#define MACB_INCSTAT_OFFSET 6
-#define MACB_INCSTAT_SIZE 1
-#define MACB_WESTAT_OFFSET 7
-#define MACB_WESTAT_SIZE 1
-#define MACB_BP_OFFSET 8
-#define MACB_BP_SIZE 1
-#define MACB_TSTART_OFFSET 9
-#define MACB_TSTART_SIZE 1
-#define MACB_THALT_OFFSET 10
-#define MACB_THALT_SIZE 1
-#define MACB_NCR_TPF_OFFSET 11
-#define MACB_NCR_TPF_SIZE 1
-#define MACB_TZQ_OFFSET 12
-#define MACB_TZQ_SIZE 1
+#define MACB_LB_OFFSET 0 /* reserved */
+#define MACB_LB_SIZE 1
+#define MACB_LLB_OFFSET 1 /* Loop back local */
+#define MACB_LLB_SIZE 1
+#define MACB_RE_OFFSET 2 /* Receive enable */
+#define MACB_RE_SIZE 1
+#define MACB_TE_OFFSET 3 /* Transmit enable */
+#define MACB_TE_SIZE 1
+#define MACB_MPE_OFFSET 4 /* Management port enable */
+#define MACB_MPE_SIZE 1
+#define MACB_CLRSTAT_OFFSET 5 /* Clear stats regs */
+#define MACB_CLRSTAT_SIZE 1
+#define MACB_INCSTAT_OFFSET 6 /* Incremental stats regs */
+#define MACB_INCSTAT_SIZE 1
+#define MACB_WESTAT_OFFSET 7 /* Write enable stats regs */
+#define MACB_WESTAT_SIZE 1
+#define MACB_BP_OFFSET 8 /* Back pressure */
+#define MACB_BP_SIZE 1
+#define MACB_TSTART_OFFSET 9 /* Start transmission */
+#define MACB_TSTART_SIZE 1
+#define MACB_THALT_OFFSET 10 /* Transmit halt */
+#define MACB_THALT_SIZE 1
+#define MACB_NCR_TPF_OFFSET 11 /* Transmit pause frame */
+#define MACB_NCR_TPF_SIZE 1
+#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
+#define MACB_TZQ_SIZE 1
/* Bitfields in NCFGR */
-#define MACB_SPD_OFFSET 0
-#define MACB_SPD_SIZE 1
-#define MACB_FD_OFFSET 1
-#define MACB_FD_SIZE 1
-#define MACB_BIT_RATE_OFFSET 2
-#define MACB_BIT_RATE_SIZE 1
-#define MACB_JFRAME_OFFSET 3
-#define MACB_JFRAME_SIZE 1
-#define MACB_CAF_OFFSET 4
-#define MACB_CAF_SIZE 1
-#define MACB_NBC_OFFSET 5
-#define MACB_NBC_SIZE 1
-#define MACB_NCFGR_MTI_OFFSET 6
-#define MACB_NCFGR_MTI_SIZE 1
-#define MACB_UNI_OFFSET 7
-#define MACB_UNI_SIZE 1
-#define MACB_BIG_OFFSET 8
-#define MACB_BIG_SIZE 1
-#define MACB_EAE_OFFSET 9
-#define MACB_EAE_SIZE 1
-#define MACB_CLK_OFFSET 10
-#define MACB_CLK_SIZE 2
-#define MACB_RTY_OFFSET 12
-#define MACB_RTY_SIZE 1
-#define MACB_PAE_OFFSET 13
-#define MACB_PAE_SIZE 1
-#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
-#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
-#define MACB_RBOF_OFFSET 14
-#define MACB_RBOF_SIZE 2
-#define MACB_RLCE_OFFSET 16
-#define MACB_RLCE_SIZE 1
-#define MACB_DRFCS_OFFSET 17
-#define MACB_DRFCS_SIZE 1
-#define MACB_EFRHD_OFFSET 18
-#define MACB_EFRHD_SIZE 1
-#define MACB_IRXFCS_OFFSET 19
-#define MACB_IRXFCS_SIZE 1
+#define MACB_SPD_OFFSET 0 /* Speed */
+#define MACB_SPD_SIZE 1
+#define MACB_FD_OFFSET 1 /* Full duplex */
+#define MACB_FD_SIZE 1
+#define MACB_BIT_RATE_OFFSET 2 /* Discard non-VLAN frames */
+#define MACB_BIT_RATE_SIZE 1
+#define MACB_JFRAME_OFFSET 3 /* reserved */
+#define MACB_JFRAME_SIZE 1
+#define MACB_CAF_OFFSET 4 /* Copy all frames */
+#define MACB_CAF_SIZE 1
+#define MACB_NBC_OFFSET 5 /* No broadcast */
+#define MACB_NBC_SIZE 1
+#define MACB_NCFGR_MTI_OFFSET 6 /* Multicast hash enable */
+#define MACB_NCFGR_MTI_SIZE 1
+#define MACB_UNI_OFFSET 7 /* Unicast hash enable */
+#define MACB_UNI_SIZE 1
+#define MACB_BIG_OFFSET 8 /* Receive 1536 byte frames */
+#define MACB_BIG_SIZE 1
+#define MACB_EAE_OFFSET 9 /* External address match enable */
+#define MACB_EAE_SIZE 1
+#define MACB_CLK_OFFSET 10
+#define MACB_CLK_SIZE 2
+#define MACB_RTY_OFFSET 12 /* Retry test */
+#define MACB_RTY_SIZE 1
+#define MACB_PAE_OFFSET 13 /* Pause enable */
+#define MACB_PAE_SIZE 1
+#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
+#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
+#define MACB_RBOF_OFFSET 14 /* Receive buffer offset */
+#define MACB_RBOF_SIZE 2
+#define MACB_RLCE_OFFSET 16 /* Length field error frame discard */
+#define MACB_RLCE_SIZE 1
+#define MACB_DRFCS_OFFSET 17 /* FCS remove */
+#define MACB_DRFCS_SIZE 1
+#define MACB_EFRHD_OFFSET 18
+#define MACB_EFRHD_SIZE 1
+#define MACB_IRXFCS_OFFSET 19
+#define MACB_IRXFCS_SIZE 1
/* GEM specific NCFGR bitfields. */
-#define GEM_GBE_OFFSET 10
-#define GEM_GBE_SIZE 1
-#define GEM_CLK_OFFSET 18
-#define GEM_CLK_SIZE 3
-#define GEM_DBW_OFFSET 21
-#define GEM_DBW_SIZE 2
-#define GEM_RXCOEN_OFFSET 24
-#define GEM_RXCOEN_SIZE 1
+#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
+#define GEM_GBE_SIZE 1
+#define GEM_CLK_OFFSET 18 /* MDC clock division */
+#define GEM_CLK_SIZE 3
+#define GEM_DBW_OFFSET 21 /* Data bus width */
+#define GEM_DBW_SIZE 2
+#define GEM_RXCOEN_OFFSET 24
+#define GEM_RXCOEN_SIZE 1
/* Constants for data bus width. */
-#define GEM_DBW32 0
-#define GEM_DBW64 1
-#define GEM_DBW128 2
+#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */
+#define GEM_DBW64 1 /* 64 bit AMBA AHB data bus width */
+#define GEM_DBW128 2 /* 128 bit AMBA AHB data bus width */
/* Bitfields in DMACFG. */
-#define GEM_FBLDO_OFFSET 0
-#define GEM_FBLDO_SIZE 5
-#define GEM_ENDIA_OFFSET 7
-#define GEM_ENDIA_SIZE 1
-#define GEM_RXBMS_OFFSET 8
-#define GEM_RXBMS_SIZE 2
-#define GEM_TXPBMS_OFFSET 10
-#define GEM_TXPBMS_SIZE 1
-#define GEM_TXCOEN_OFFSET 11
-#define GEM_TXCOEN_SIZE 1
-#define GEM_RXBS_OFFSET 16
-#define GEM_RXBS_SIZE 8
-#define GEM_DDRP_OFFSET 24
-#define GEM_DDRP_SIZE 1
+#define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */
+#define GEM_FBLDO_SIZE 5
+#define GEM_ENDIA_OFFSET 7 /* endian swap mode for packet data access */
+#define GEM_ENDIA_SIZE 1
+#define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */
+#define GEM_RXBMS_SIZE 2
+#define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */
+#define GEM_TXPBMS_SIZE 1
+#define GEM_TXCOEN_OFFSET 11 /* TX IP/TCP/UDP checksum gen offload */
+#define GEM_TXCOEN_SIZE 1
+#define GEM_RXBS_OFFSET 16 /* DMA receive buffer size */
+#define GEM_RXBS_SIZE 8
+#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */
+#define GEM_DDRP_SIZE 1
/* Bitfields in NSR */
-#define MACB_NSR_LINK_OFFSET 0
-#define MACB_NSR_LINK_SIZE 1
-#define MACB_MDIO_OFFSET 1
-#define MACB_MDIO_SIZE 1
-#define MACB_IDLE_OFFSET 2
-#define MACB_IDLE_SIZE 1
+#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */
+#define MACB_NSR_LINK_SIZE 1
+#define MACB_MDIO_OFFSET 1 /* status of the mdio_in pin */
+#define MACB_MDIO_SIZE 1
+#define MACB_IDLE_OFFSET 2 /* The PHY management logic is idle */
+#define MACB_IDLE_SIZE 1
/* Bitfields in TSR */
-#define MACB_UBR_OFFSET 0
-#define MACB_UBR_SIZE 1
-#define MACB_COL_OFFSET 1
-#define MACB_COL_SIZE 1
-#define MACB_TSR_RLE_OFFSET 2
-#define MACB_TSR_RLE_SIZE 1
-#define MACB_TGO_OFFSET 3
-#define MACB_TGO_SIZE 1
-#define MACB_BEX_OFFSET 4
-#define MACB_BEX_SIZE 1
-#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
-#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
-#define MACB_COMP_OFFSET 5
-#define MACB_COMP_SIZE 1
-#define MACB_UND_OFFSET 6
-#define MACB_UND_SIZE 1
+#define MACB_UBR_OFFSET 0 /* Used bit read */
+#define MACB_UBR_SIZE 1
+#define MACB_COL_OFFSET 1 /* Collision occurred */
+#define MACB_COL_SIZE 1
+#define MACB_TSR_RLE_OFFSET 2 /* Retry limit exceeded */
+#define MACB_TSR_RLE_SIZE 1
+#define MACB_TGO_OFFSET 3 /* Transmit go */
+#define MACB_TGO_SIZE 1
+#define MACB_BEX_OFFSET 4 /* TX frame corruption due to AHB error */
+#define MACB_BEX_SIZE 1
+#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
+#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
+#define MACB_COMP_OFFSET 5 /* Trnasmit complete */
+#define MACB_COMP_SIZE 1
+#define MACB_UND_OFFSET 6 /* Trnasmit under run */
+#define MACB_UND_SIZE 1
/* Bitfields in RSR */
-#define MACB_BNA_OFFSET 0
-#define MACB_BNA_SIZE 1
-#define MACB_REC_OFFSET 1
-#define MACB_REC_SIZE 1
-#define MACB_OVR_OFFSET 2
-#define MACB_OVR_SIZE 1
+#define MACB_BNA_OFFSET 0 /* Buffer not available */
+#define MACB_BNA_SIZE 1
+#define MACB_REC_OFFSET 1 /* Frame received */
+#define MACB_REC_SIZE 1
+#define MACB_OVR_OFFSET 2 /* Receive overrun */
+#define MACB_OVR_SIZE 1
/* Bitfields in ISR/IER/IDR/IMR */
-#define MACB_MFD_OFFSET 0
-#define MACB_MFD_SIZE 1
-#define MACB_RCOMP_OFFSET 1
-#define MACB_RCOMP_SIZE 1
-#define MACB_RXUBR_OFFSET 2
-#define MACB_RXUBR_SIZE 1
-#define MACB_TXUBR_OFFSET 3
-#define MACB_TXUBR_SIZE 1
-#define MACB_ISR_TUND_OFFSET 4
-#define MACB_ISR_TUND_SIZE 1
-#define MACB_ISR_RLE_OFFSET 5
-#define MACB_ISR_RLE_SIZE 1
-#define MACB_TXERR_OFFSET 6
-#define MACB_TXERR_SIZE 1
-#define MACB_TCOMP_OFFSET 7
-#define MACB_TCOMP_SIZE 1
-#define MACB_ISR_LINK_OFFSET 9
-#define MACB_ISR_LINK_SIZE 1
-#define MACB_ISR_ROVR_OFFSET 10
-#define MACB_ISR_ROVR_SIZE 1
-#define MACB_HRESP_OFFSET 11
-#define MACB_HRESP_SIZE 1
-#define MACB_PFR_OFFSET 12
-#define MACB_PFR_SIZE 1
-#define MACB_PTZ_OFFSET 13
-#define MACB_PTZ_SIZE 1
+#define MACB_MFD_OFFSET 0 /* Management frame sent */
+#define MACB_MFD_SIZE 1
+#define MACB_RCOMP_OFFSET 1 /* Receive complete */
+#define MACB_RCOMP_SIZE 1
+#define MACB_RXUBR_OFFSET 2 /* RX used bit read */
+#define MACB_RXUBR_SIZE 1
+#define MACB_TXUBR_OFFSET 3 /* TX used bit read */
+#define MACB_TXUBR_SIZE 1
+#define MACB_ISR_TUND_OFFSET 4 /* Enable TX buffer under run interrupt */
+#define MACB_ISR_TUND_SIZE 1
+#define MACB_ISR_RLE_OFFSET 5 /* EN retry exceeded/late coll interrupt */
+#define MACB_ISR_RLE_SIZE 1
+#define MACB_TXERR_OFFSET 6 /* EN TX frame corrupt from error interrupt */
+#define MACB_TXERR_SIZE 1
+#define MACB_TCOMP_OFFSET 7 /* Enable transmit complete interrupt */
+#define MACB_TCOMP_SIZE 1
+#define MACB_ISR_LINK_OFFSET 9 /* Enable link change interrupt */
+#define MACB_ISR_LINK_SIZE 1
+#define MACB_ISR_ROVR_OFFSET 10 /* Enable receive overrun interrupt */
+#define MACB_ISR_ROVR_SIZE 1
+#define MACB_HRESP_OFFSET 11 /* Enable hrsep not OK interrupt */
+#define MACB_HRESP_SIZE 1
+#define MACB_PFR_OFFSET 12 /* Enable pause frame w/ quantum interrupt */
+#define MACB_PFR_SIZE 1
+#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */
+#define MACB_PTZ_SIZE 1
/* Bitfields in MAN */
-#define MACB_DATA_OFFSET 0
-#define MACB_DATA_SIZE 16
-#define MACB_CODE_OFFSET 16
-#define MACB_CODE_SIZE 2
-#define MACB_REGA_OFFSET 18
-#define MACB_REGA_SIZE 5
-#define MACB_PHYA_OFFSET 23
-#define MACB_PHYA_SIZE 5
-#define MACB_RW_OFFSET 28
-#define MACB_RW_SIZE 2
-#define MACB_SOF_OFFSET 30
-#define MACB_SOF_SIZE 2
+#define MACB_DATA_OFFSET 0 /* data */
+#define MACB_DATA_SIZE 16
+#define MACB_CODE_OFFSET 16 /* Must be written to 10 */
+#define MACB_CODE_SIZE 2
+#define MACB_REGA_OFFSET 18 /* Register address */
+#define MACB_REGA_SIZE 5
+#define MACB_PHYA_OFFSET 23 /* PHY address */
+#define MACB_PHYA_SIZE 5
+#define MACB_RW_OFFSET 28 /* Operation. 10 is read. 01 is write. */
+#define MACB_RW_SIZE 2
+#define MACB_SOF_OFFSET 30 /* Must be written to 1 for Clause 22 */
+#define MACB_SOF_SIZE 2
/* Bitfields in USRIO (AVR32) */
#define MACB_MII_OFFSET 0
@@ -286,7 +332,7 @@
/* Bitfields in USRIO (AT91) */
#define MACB_RMII_OFFSET 0
#define MACB_RMII_SIZE 1
-#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
+#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
#define GEM_RGMII_SIZE 1
#define MACB_CLKEN_OFFSET 1
#define MACB_CLKEN_SIZE 1
@@ -389,8 +435,7 @@
#define queue_writel(queue, reg, value) \
__raw_writel((value), (queue)->bp->regs + (queue)->reg)
-/*
- * Conditional GEM/MACB macros. These perform the operation to the correct
+/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
* and bitfields that are common across both devices, use macb_{read,write}l
* to avoid the cost of the conditional.
@@ -413,8 +458,7 @@
__v; \
})
-/**
- * struct macb_dma_desc - Hardware DMA descriptor
+/* struct macb_dma_desc - Hardware DMA descriptor
* @addr: DMA address of data buffer
* @ctrl: Control and status bits
*/
@@ -503,8 +547,7 @@ struct macb_dma_desc {
/* limit RX checksum offload to TCP and UDP packets */
#define GEM_RX_CSUM_CHECKED_MASK 2
-/**
- * struct macb_tx_skb - data about an skb which is being transmitted
+/* struct macb_tx_skb - data about an skb which is being transmitted
* @skb: skb currently being transmitted, only set for the last buffer
* of the frame
* @mapping: DMA address of the skb's fragment buffer
@@ -519,8 +562,7 @@ struct macb_tx_skb {
bool mapped_as_page;
};
-/*
- * Hardware-collected statistics. Used when updating the network
+/* Hardware-collected statistics. Used when updating the network
* device stats by a periodic timer.
*/
struct macb_stats {
@@ -595,6 +637,107 @@ struct gem_stats {
u32 rx_udp_checksum_errors;
};
+/* Describes the name and offset of an individual statistic register, as
+ * returned by `ethtool -S`. Also describes which net_device_stats statistics
+ * this register should contribute to.
+ */
+struct gem_statistic {
+ char stat_string[ETH_GSTRING_LEN];
+ int offset;
+ u32 stat_bits;
+};
+
+/* Bitfield defs for net_device_stat statistics */
+#define GEM_NDS_RXERR_OFFSET 0
+#define GEM_NDS_RXLENERR_OFFSET 1
+#define GEM_NDS_RXOVERERR_OFFSET 2
+#define GEM_NDS_RXCRCERR_OFFSET 3
+#define GEM_NDS_RXFRAMEERR_OFFSET 4
+#define GEM_NDS_RXFIFOERR_OFFSET 5
+#define GEM_NDS_TXERR_OFFSET 6
+#define GEM_NDS_TXABORTEDERR_OFFSET 7
+#define GEM_NDS_TXCARRIERERR_OFFSET 8
+#define GEM_NDS_TXFIFOERR_OFFSET 9
+#define GEM_NDS_COLLISIONS_OFFSET 10
+
+#define GEM_STAT_TITLE(name, title) GEM_STAT_TITLE_BITS(name, title, 0)
+#define GEM_STAT_TITLE_BITS(name, title, bits) { \
+ .stat_string = title, \
+ .offset = GEM_##name, \
+ .stat_bits = bits \
+}
+
+/* list of gem statistic registers. The names MUST match the
+ * corresponding GEM_* definitions.
+ */
+static const struct gem_statistic gem_statistics[] = {
+ GEM_STAT_TITLE(OCTTXL, "tx_octets"), /* OCTTXH combined with OCTTXL */
+ GEM_STAT_TITLE(TXCNT, "tx_frames"),
+ GEM_STAT_TITLE(TXBCCNT, "tx_broadcast_frames"),
+ GEM_STAT_TITLE(TXMCCNT, "tx_multicast_frames"),
+ GEM_STAT_TITLE(TXPAUSECNT, "tx_pause_frames"),
+ GEM_STAT_TITLE(TX64CNT, "tx_64_byte_frames"),
+ GEM_STAT_TITLE(TX65CNT, "tx_65_127_byte_frames"),
+ GEM_STAT_TITLE(TX128CNT, "tx_128_255_byte_frames"),
+ GEM_STAT_TITLE(TX256CNT, "tx_256_511_byte_frames"),
+ GEM_STAT_TITLE(TX512CNT, "tx_512_1023_byte_frames"),
+ GEM_STAT_TITLE(TX1024CNT, "tx_1024_1518_byte_frames"),
+ GEM_STAT_TITLE(TX1519CNT, "tx_greater_than_1518_byte_frames"),
+ GEM_STAT_TITLE_BITS(TXURUNCNT, "tx_underrun",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_TXFIFOERR)),
+ GEM_STAT_TITLE_BITS(SNGLCOLLCNT, "tx_single_collision_frames",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE_BITS(MULTICOLLCNT, "tx_multiple_collision_frames",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE_BITS(EXCESSCOLLCNT, "tx_excessive_collisions",
+ GEM_BIT(NDS_TXERR)|
+ GEM_BIT(NDS_TXABORTEDERR)|
+ GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE_BITS(LATECOLLCNT, "tx_late_collisions",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE(TXDEFERCNT, "tx_deferred_frames"),
+ GEM_STAT_TITLE_BITS(TXCSENSECNT, "tx_carrier_sense_errors",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE(OCTRXL, "rx_octets"), /* OCTRXH combined with OCTRXL */
+ GEM_STAT_TITLE(RXCNT, "rx_frames"),
+ GEM_STAT_TITLE(RXBROADCNT, "rx_broadcast_frames"),
+ GEM_STAT_TITLE(RXMULTICNT, "rx_multicast_frames"),
+ GEM_STAT_TITLE(RXPAUSECNT, "rx_pause_frames"),
+ GEM_STAT_TITLE(RX64CNT, "rx_64_byte_frames"),
+ GEM_STAT_TITLE(RX65CNT, "rx_65_127_byte_frames"),
+ GEM_STAT_TITLE(RX128CNT, "rx_128_255_byte_frames"),
+ GEM_STAT_TITLE(RX256CNT, "rx_256_511_byte_frames"),
+ GEM_STAT_TITLE(RX512CNT, "rx_512_1023_byte_frames"),
+ GEM_STAT_TITLE(RX1024CNT, "rx_1024_1518_byte_frames"),
+ GEM_STAT_TITLE(RX1519CNT, "rx_greater_than_1518_byte_frames"),
+ GEM_STAT_TITLE_BITS(RXUNDRCNT, "rx_undersized_frames",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+ GEM_STAT_TITLE_BITS(RXOVRCNT, "rx_oversize_frames",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+ GEM_STAT_TITLE_BITS(RXJABCNT, "rx_jabbers",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+ GEM_STAT_TITLE_BITS(RXFCSCNT, "rx_frame_check_sequence_errors",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXCRCERR)),
+ GEM_STAT_TITLE_BITS(RXLENGTHCNT, "rx_length_field_frame_errors",
+ GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE_BITS(RXSYMBCNT, "rx_symbol_errors",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFRAMEERR)),
+ GEM_STAT_TITLE_BITS(RXALIGNCNT, "rx_alignment_errors",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
+ GEM_STAT_TITLE_BITS(RXRESERRCNT, "rx_resource_errors",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
+ GEM_STAT_TITLE_BITS(RXORCNT, "rx_overruns",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFIFOERR)),
+ GEM_STAT_TITLE_BITS(RXIPCCNT, "rx_ip_header_checksum_errors",
+ GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE_BITS(RXTCPCCNT, "rx_tcp_checksum_errors",
+ GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
+ GEM_BIT(NDS_RXERR)),
+};
+
+#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
+
struct macb;
struct macb_or_gem_ops {
@@ -673,6 +816,8 @@ struct macb {
dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
int skb_length; /* saved skb length for pci_unmap_single */
unsigned int max_tx_length;
+
+ u64 ethtool_stats[GEM_STATS_LEN];
};
extern const struct ethtool_ops macb_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index babe2a915b00..526ea74e82d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1860,9 +1860,9 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
cpl->iff = dev->if_port;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
cpl->vlan_valid = 1;
- cpl->vlan = htons(vlan_tx_tag_get(skb));
+ cpl->vlan = htons(skb_vlan_tag_get(skb));
st->vlan_insert++;
} else
cpl->vlan_valid = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
index e13b7fe9d082..338301b11518 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/mc5.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
@@ -97,14 +97,6 @@ static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
}
-static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
- u32 v3)
-{
- t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
- t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
- t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
-}
-
static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
u32 v3)
{
@@ -113,14 +105,6 @@ static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
}
-static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
- u32 *v3)
-{
- *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
- *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
- *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
-}
-
/*
* Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
* command cmd. The data to be written must have been set up by the caller.
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 3dfcf600fcc6..d6aa602f168d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1148,8 +1148,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
cpl->len = htonl(skb->len);
cntrl = V_TXPKT_INTF(pi->port_id);
- if (vlan_tx_tag_present(skb))
- cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+ if (skb_vlan_tag_present(skb))
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
if (tso_info) {
@@ -1282,7 +1282,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
qs->port_stats[SGE_PSTAT_TX_CSUM]++;
if (skb_shinfo(skb)->gso_size)
qs->port_stats[SGE_PSTAT_TSO]++;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
qs->port_stats[SGE_PSTAT_VLANINS]++;
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index c74a898fcd4f..184a8d545ac4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -727,9 +727,9 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
}
- for (i = 0; i < 6; i++)
- p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
- hex_to_bin(vpd.na_data[2 * i + 1]);
+ ret = hex2bin(p->eth_base, vpd.na_data, 6);
+ if (ret < 0)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index b85280775997..ae50cd72358c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,6 +4,6 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
new file mode 100644
index 000000000000..9062a8434246
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -0,0 +1,317 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
+ *
+ * Written by Deepak (deepak.s@chelsio.com)
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/jhash.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+#include "cxgb4.h"
+#include "clip_tbl.h"
+
+static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
+{
+ unsigned int clipt_size_half = c->clipt_size / 2;
+
+ return jhash_1word(*key, 0) % clipt_size_half;
+}
+
+static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
+{
+ unsigned int clipt_size_half = d->clipt_size / 2;
+ u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+ return clipt_size_half +
+ (jhash_1word(xor, 0) % clipt_size_half);
+}
+
+static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
+ int addr_len)
+{
+ return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
+ ipv6_clip_hash(ctbl, addr);
+}
+
+static int clip6_get_mbox(const struct net_device *dev,
+ const struct in6_addr *lip)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct fw_clip_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
+ *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+ *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+static int clip6_release_mbox(const struct net_device *dev,
+ const struct in6_addr *lip)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct fw_clip_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
+ *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+ *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct clip_tbl *ctbl = adap->clipt;
+ struct clip_entry *ce, *cte;
+ u32 *addr = (u32 *)lip;
+ int hash;
+ int addr_len;
+ int ret = 0;
+
+ if (!ctbl)
+ return 0;
+
+ if (v6)
+ addr_len = 16;
+ else
+ addr_len = 4;
+
+ hash = clip_addr_hash(ctbl, addr, addr_len);
+
+ read_lock_bh(&ctbl->lock);
+ list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
+ if (addr_len == cte->addr_len &&
+ memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ ce = cte;
+ read_unlock_bh(&ctbl->lock);
+ goto found;
+ }
+ }
+ read_unlock_bh(&ctbl->lock);
+
+ write_lock_bh(&ctbl->lock);
+ if (!list_empty(&ctbl->ce_free_head)) {
+ ce = list_first_entry(&ctbl->ce_free_head,
+ struct clip_entry, list);
+ list_del(&ce->list);
+ INIT_LIST_HEAD(&ce->list);
+ spin_lock_init(&ce->lock);
+ atomic_set(&ce->refcnt, 0);
+ atomic_dec(&ctbl->nfree);
+ ce->addr_len = addr_len;
+ memcpy(ce->addr, lip, addr_len);
+ list_add_tail(&ce->list, &ctbl->hash_list[hash]);
+ if (v6) {
+ ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
+ if (ret) {
+ write_unlock_bh(&ctbl->lock);
+ return ret;
+ }
+ }
+ } else {
+ write_unlock_bh(&ctbl->lock);
+ return -ENOMEM;
+ }
+ write_unlock_bh(&ctbl->lock);
+found:
+ atomic_inc(&ce->refcnt);
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_clip_get);
+
+void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct clip_tbl *ctbl = adap->clipt;
+ struct clip_entry *ce, *cte;
+ u32 *addr = (u32 *)lip;
+ int hash;
+ int addr_len;
+
+ if (v6)
+ addr_len = 16;
+ else
+ addr_len = 4;
+
+ hash = clip_addr_hash(ctbl, addr, addr_len);
+
+ read_lock_bh(&ctbl->lock);
+ list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
+ if (addr_len == cte->addr_len &&
+ memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ ce = cte;
+ read_unlock_bh(&ctbl->lock);
+ goto found;
+ }
+ }
+ read_unlock_bh(&ctbl->lock);
+
+ return;
+found:
+ write_lock_bh(&ctbl->lock);
+ spin_lock_bh(&ce->lock);
+ if (atomic_dec_and_test(&ce->refcnt)) {
+ list_del(&ce->list);
+ INIT_LIST_HEAD(&ce->list);
+ list_add_tail(&ce->list, &ctbl->ce_free_head);
+ atomic_inc(&ctbl->nfree);
+ if (v6)
+ clip6_release_mbox(dev, (const struct in6_addr *)lip);
+ }
+ spin_unlock_bh(&ce->lock);
+ write_unlock_bh(&ctbl->lock);
+}
+EXPORT_SYMBOL(cxgb4_clip_release);
+
+/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
+ * a physical device.
+ * The physical device reference is needed to send the actul CLIP command.
+ */
+static int cxgb4_update_dev_clip(struct net_device *root_dev,
+ struct net_device *dev)
+{
+ struct inet6_dev *idev = NULL;
+ struct inet6_ifaddr *ifa;
+ int ret = 0;
+
+ idev = __in6_dev_get(root_dev);
+ if (!idev)
+ return ret;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
+ if (ret < 0)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ return ret;
+}
+
+int cxgb4_update_root_dev_clip(struct net_device *dev)
+{
+ struct net_device *root_dev = NULL;
+ int i, ret = 0;
+
+ /* First populate the real net device's IPv6 addresses */
+ ret = cxgb4_update_dev_clip(dev, dev);
+ if (ret)
+ return ret;
+
+ /* Parse all bond and vlan devices layered on top of the physical dev */
+ root_dev = netdev_master_upper_dev_get_rcu(dev);
+ if (root_dev) {
+ ret = cxgb4_update_dev_clip(root_dev, dev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < VLAN_N_VID; i++) {
+ root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
+ if (!root_dev)
+ continue;
+
+ ret = cxgb4_update_dev_clip(root_dev, dev);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
+
+int clip_tbl_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct clip_tbl *ctbl = adapter->clipt;
+ struct clip_entry *ce;
+ char ip[60];
+ int i;
+
+ read_lock_bh(&ctbl->lock);
+
+ seq_puts(seq, "IP Address Users\n");
+ for (i = 0 ; i < ctbl->clipt_size; ++i) {
+ list_for_each_entry(ce, &ctbl->hash_list[i], list) {
+ ip[0] = '\0';
+ if (ce->addr_len == 16)
+ sprintf(ip, "%pI6c", ce->addr);
+ else
+ sprintf(ip, "%pI4c", ce->addr);
+ seq_printf(seq, "%-25s %u\n", ip,
+ atomic_read(&ce->refcnt));
+ }
+ }
+ seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
+
+ read_unlock_bh(&ctbl->lock);
+
+ return 0;
+}
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end)
+{
+ struct clip_entry *cl_list;
+ struct clip_tbl *ctbl;
+ unsigned int clipt_size;
+ int i;
+
+ if (clipt_start >= clipt_end)
+ return NULL;
+ clipt_size = clipt_end - clipt_start + 1;
+ if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
+ return NULL;
+
+ ctbl = t4_alloc_mem(sizeof(*ctbl) +
+ clipt_size*sizeof(struct list_head));
+ if (!ctbl)
+ return NULL;
+
+ ctbl->clipt_start = clipt_start;
+ ctbl->clipt_size = clipt_size;
+ INIT_LIST_HEAD(&ctbl->ce_free_head);
+
+ atomic_set(&ctbl->nfree, clipt_size);
+ rwlock_init(&ctbl->lock);
+
+ for (i = 0; i < ctbl->clipt_size; ++i)
+ INIT_LIST_HEAD(&ctbl->hash_list[i]);
+
+ cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
+ ctbl->cl_list = (void *)cl_list;
+
+ for (i = 0; i < clipt_size; i++) {
+ INIT_LIST_HEAD(&cl_list[i].list);
+ list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
+ }
+
+ return ctbl;
+}
+
+void t4_cleanup_clip_tbl(struct adapter *adap)
+{
+ struct clip_tbl *ctbl = adap->clipt;
+
+ if (ctbl) {
+ if (ctbl->cl_list)
+ t4_free_mem(ctbl->cl_list);
+ t4_free_mem(ctbl);
+ }
+}
+EXPORT_SYMBOL(t4_cleanup_clip_tbl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
new file mode 100644
index 000000000000..2eaba0161cf8
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -0,0 +1,41 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
+ *
+ * Written by Deepak (deepak.s@chelsio.com)
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+struct clip_entry {
+ spinlock_t lock; /* Hold while modifying clip reference */
+ atomic_t refcnt;
+ struct list_head list;
+ u32 addr[4];
+ int addr_len;
+};
+
+struct clip_tbl {
+ unsigned int clipt_start;
+ unsigned int clipt_size;
+ rwlock_t lock;
+ atomic_t nfree;
+ struct list_head ce_free_head;
+ void *cl_list;
+ struct list_head hash_list[0];
+};
+
+enum {
+ CLIPT_MIN_HASH_BUCKETS = 2,
+};
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end);
+int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6);
+void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6);
+int clip_tbl_show(struct seq_file *seq, void *v);
+int cxgb4_update_root_dev_clip(struct net_device *dev);
+void t4_cleanup_clip_tbl(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 5ab5c3133acd..d6cda17efe6e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -49,16 +49,6 @@
#include <asm/io.h>
#include "cxgb4_uld.h"
-#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0C
-#define T4FW_VERSION_MICRO 0x19
-#define T4FW_VERSION_BUILD 0x00
-
-#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0C
-#define T5FW_VERSION_MICRO 0x19
-#define T5FW_VERSION_BUILD 0x00
-
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
@@ -231,6 +221,7 @@ struct sge_params {
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned int la_mask; /* what events are recorded by TP LA */
unsigned short tx_modq_map; /* TX modulation scheduler queue to */
/* channel map */
@@ -290,11 +281,21 @@ enum chip_type {
T5_LAST_REV = T5_A1,
};
+struct devlog_params {
+ u32 memtype; /* which memory (EDC0, EDC1, MC) */
+ u32 start; /* start of log in firmware memory */
+ u32 size; /* size of log */
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
+ struct devlog_params devlog;
+ enum pcie_memwin drv_memwin;
+
+ unsigned int cim_la_size;
unsigned int sf_size; /* serial flash size in bytes */
unsigned int sf_nsec; /* # of flash sectors */
@@ -476,6 +477,22 @@ struct sge_rspq { /* state for an SGE response queue */
struct adapter *adap;
struct net_device *netdev; /* associated net device */
rspq_handler_t handler;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define CXGB_POLL_STATE_IDLE 0
+#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
+#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */
+#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */
+#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */
+#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \
+ CXGB_POLL_STATE_POLL_YIELD)
+#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \
+ CXGB_POLL_STATE_POLL)
+#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \
+ CXGB_POLL_STATE_POLL_YIELD)
+ unsigned int bpoll_state;
+ spinlock_t bpoll_lock; /* lock for busy poll */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
};
struct sge_eth_stats { /* Ethernet queue statistics */
@@ -658,6 +675,9 @@ struct adapter {
unsigned int l2t_start;
unsigned int l2t_end;
struct l2t_data *l2t;
+ unsigned int clipt_start;
+ unsigned int clipt_end;
+ struct clip_tbl *clipt;
void *uld_handle[CXGB4_ULD_MAX];
struct list_head list_node;
struct list_head rcu_node;
@@ -877,6 +897,102 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
return netdev2pinfo(dev)->adapter;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
+{
+ spin_lock_init(&q->bpoll_lock);
+ q->bpoll_state = CXGB_POLL_STATE_IDLE;
+}
+
+static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
+{
+ bool rc = true;
+
+ spin_lock(&q->bpoll_lock);
+ if (q->bpoll_state & CXGB_POLL_LOCKED) {
+ q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
+ rc = false;
+ } else {
+ q->bpoll_state = CXGB_POLL_STATE_NAPI;
+ }
+ spin_unlock(&q->bpoll_lock);
+ return rc;
+}
+
+static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
+{
+ bool rc = false;
+
+ spin_lock(&q->bpoll_lock);
+ if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
+ rc = true;
+ q->bpoll_state = CXGB_POLL_STATE_IDLE;
+ spin_unlock(&q->bpoll_lock);
+ return rc;
+}
+
+static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
+{
+ bool rc = true;
+
+ spin_lock_bh(&q->bpoll_lock);
+ if (q->bpoll_state & CXGB_POLL_LOCKED) {
+ q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
+ rc = false;
+ } else {
+ q->bpoll_state |= CXGB_POLL_STATE_POLL;
+ }
+ spin_unlock_bh(&q->bpoll_lock);
+ return rc;
+}
+
+static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
+{
+ bool rc = false;
+
+ spin_lock_bh(&q->bpoll_lock);
+ if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
+ rc = true;
+ q->bpoll_state = CXGB_POLL_STATE_IDLE;
+ spin_unlock_bh(&q->bpoll_lock);
+ return rc;
+}
+
+static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
+{
+ return q->bpoll_state & CXGB_POLL_USER_PEND;
+}
+#else
+static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
+{
+}
+
+static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
+{
+ return true;
+}
+
+static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
+{
+ return false;
+}
+
+static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
+{
+ return false;
+}
+
+static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
+{
+ return false;
+}
+
+static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
+{
+ return false;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
void t4_os_portmod_changed(const struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
@@ -905,6 +1021,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
+int cxgb_busy_poll(struct napi_struct *napi);
extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
@@ -995,12 +1112,16 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
int t4_seeprom_wp(struct adapter *adapter, bool enable);
int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
@@ -1013,6 +1134,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid);
+unsigned int qtimer_val(const struct adapter *adap,
+ const struct sge_rspq *q);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
@@ -1022,20 +1145,46 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
+int t4_read_rss(struct adapter *adapter, u16 *entries);
+void t4_read_rss_key(struct adapter *adapter, u32 *key);
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
+ u32 *valp);
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+ u32 *vfl, u32 *vfh);
+u32 t4_read_rss_pf_map(struct adapter *adapter);
+u32 t4_read_rss_pf_mask(struct adapter *adapter);
+
int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
+ size_t n);
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
+ size_t n);
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+ const unsigned int *valp);
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
+
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index a35d1ec6950e..6074680bc985 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -22,7 +22,7 @@
/* DCBx version control
*/
-char *dcb_ver_array[] = {
+static const char * const dcb_ver_array[] = {
"Unknown",
"DCBx-CIN",
"DCBx-CEE 1.01",
@@ -428,7 +428,10 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
}
*pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
- INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+ if (local)
+ INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+ else
+ INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
if (err != FW_PORT_DCB_CFG_SUCCESS) {
@@ -900,6 +903,88 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
(dcb->supported & DCB_CAP_DCBX_VER_IEEE));
}
+static int cxgb4_ieee_read_ets(struct net_device *dev, struct ieee_ets *ets,
+ int local)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct port_dcb_info *dcb = &pi->dcb;
+ struct adapter *adap = pi->adapter;
+ uint32_t tc_info;
+ struct fw_port_cmd pcmd;
+ int i, bwg, err;
+
+ if (!(dcb->msgs & (CXGB4_DCB_FW_PGID | CXGB4_DCB_FW_PGRATE)))
+ return 0;
+
+ ets->ets_cap = dcb->pg_num_tcs_supported;
+
+ if (local) {
+ ets->willing = 1;
+ INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+ } else {
+ INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+ }
+
+ pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+ err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+ if (err != FW_PORT_DCB_CFG_SUCCESS) {
+ dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+ return err;
+ }
+
+ tc_info = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+
+ if (local)
+ INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+ else
+ INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+ pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+ err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+ if (err != FW_PORT_DCB_CFG_SUCCESS) {
+ dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+ -err);
+ return err;
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ bwg = (tc_info >> ((7 - i) * 4)) & 0xF;
+ ets->prio_tc[i] = bwg;
+ ets->tc_tx_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+ ets->tc_rx_bw[i] = ets->tc_tx_bw[i];
+ ets->tc_tsa[i] = pcmd.u.dcb.pgrate.tsa[i];
+ }
+
+ return 0;
+}
+
+static int cxgb4_ieee_get_ets(struct net_device *dev, struct ieee_ets *ets)
+{
+ return cxgb4_ieee_read_ets(dev, ets, 1);
+}
+
+/* We reuse this for peer PFC as well, as we can't have it enabled one way */
+static int cxgb4_ieee_get_pfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct port_dcb_info *dcb = &pi->dcb;
+
+ memset(pfc, 0, sizeof(struct ieee_pfc));
+
+ if (!(dcb->msgs & CXGB4_DCB_FW_PFC))
+ return 0;
+
+ pfc->pfc_cap = dcb->pfc_num_tcs_supported;
+ pfc->pfc_en = bitswap_1(dcb->pfcen);
+
+ return 0;
+}
+
+static int cxgb4_ieee_peer_ets(struct net_device *dev, struct ieee_ets *ets)
+{
+ return cxgb4_ieee_read_ets(dev, ets, 0);
+}
+
/* Fill in the Application User Priority Map associated with the
* specified Application.
* Priority for IEEE dcb_app is an integer, with 0 being a valid value
@@ -1106,14 +1191,23 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
struct port_info *pi = netdev2pinfo(dev);
cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
- pfc->pfc_en = pi->dcb.pfcen;
+
+ /* Firmware sends this to us in a formwat that is a bit flipped version
+ * of spec, correct it before we send it to host. This is taken care of
+ * by bit shifting in other uses of pfcen
+ */
+ pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
return 0;
}
const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
+ .ieee_getets = cxgb4_ieee_get_ets,
+ .ieee_getpfc = cxgb4_ieee_get_pfc,
.ieee_getapp = cxgb4_ieee_getapp,
.ieee_setapp = cxgb4_ieee_setapp,
+ .ieee_peer_getets = cxgb4_ieee_peer_ets,
+ .ieee_peer_getpfc = cxgb4_ieee_get_pfc,
/* CEE std */
.getstate = cxgb4_getstate,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 31ce425616c9..ccf24d3dc982 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -136,6 +136,17 @@ void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
+static inline __u8 bitswap_1(unsigned char val)
+{
+ return ((val & 0x80) >> 7) |
+ ((val & 0x40) >> 5) |
+ ((val & 0x20) >> 3) |
+ ((val & 0x10) >> 1) |
+ ((val & 0x08) << 1) |
+ ((val & 0x04) << 3) |
+ ((val & 0x02) << 5) |
+ ((val & 0x01) << 7);
+}
#define CXGB4_DCB_ENABLED true
#else /* !CONFIG_CHELSIO_T4_DCB */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c98a350d857e..d221f6b28fcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -36,13 +36,1867 @@
#include <linux/debugfs.h>
#include <linux/string_helpers.h>
#include <linux/sort.h>
+#include <linux/ctype.h>
#include "cxgb4.h"
#include "t4_regs.h"
+#include "t4_values.h"
#include "t4fw_api.h"
#include "cxgb4_debugfs.h"
+#include "clip_tbl.h"
#include "l2t.h"
+/* generic seq_file support for showing a table of size rows x width. */
+static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
+{
+ pos -= tb->skip_first;
+ return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
+}
+
+static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
+{
+ struct seq_tab *tb = seq->private;
+
+ if (tb->skip_first && *pos == 0)
+ return SEQ_START_TOKEN;
+
+ return seq_tab_get_idx(tb, *pos);
+}
+
+static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ v = seq_tab_get_idx(seq->private, *pos + 1);
+ if (v)
+ ++*pos;
+ return v;
+}
+
+static void seq_tab_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int seq_tab_show(struct seq_file *seq, void *v)
+{
+ const struct seq_tab *tb = seq->private;
+
+ return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
+}
+
+static const struct seq_operations seq_tab_ops = {
+ .start = seq_tab_start,
+ .next = seq_tab_next,
+ .stop = seq_tab_stop,
+ .show = seq_tab_show
+};
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+ unsigned int width, unsigned int have_header,
+ int (*show)(struct seq_file *seq, void *v, int i))
+{
+ struct seq_tab *p;
+
+ p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
+ if (p) {
+ p->show = show;
+ p->rows = rows;
+ p->width = width;
+ p->skip_first = have_header != 0;
+ }
+ return p;
+}
+
+/* Trim the size of a seq_tab to the supplied number of rows. The operation is
+ * irreversible.
+ */
+static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows)
+{
+ if (new_rows > p->rows)
+ return -EINVAL;
+ p->rows = new_rows;
+ return 0;
+}
+
+static int cim_la_show(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Status Data PC LS0Stat LS0Addr "
+ " LS0Data\n");
+ else {
+ const u32 *p = v;
+
+ seq_printf(seq,
+ " %02x %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n",
+ (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
+ p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
+ p[6], p[7]);
+ }
+ return 0;
+}
+
+static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Status Data PC\n");
+ } else {
+ const u32 *p = v;
+
+ seq_printf(seq, " %02x %08x %08x\n", p[5] & 0xff, p[6],
+ p[7]);
+ seq_printf(seq, " %02x %02x%06x %02x%06x\n",
+ (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
+ p[4] & 0xff, p[5] >> 8);
+ seq_printf(seq, " %02x %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
+ p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
+ }
+ return 0;
+}
+
+static int cim_la_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ unsigned int cfg;
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+ if (ret)
+ return ret;
+
+ p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
+ cfg & UPDBGLACAPTPCONLY_F ?
+ cim_la_show_3in1 : cim_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
+ if (ret)
+ seq_release_private(inode, file);
+ return ret;
+}
+
+static const struct file_operations cim_la_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+static int cim_qcfg_show(struct seq_file *seq, void *v)
+{
+ static const char * const qname[] = {
+ "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",
+ "SGE0-RX", "SGE1-RX"
+ };
+
+ int i;
+ struct adapter *adap = seq->private;
+ u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+ u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+ u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))];
+ u16 thres[CIM_NUM_IBQ];
+ u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr;
+ u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5];
+ u32 *p = stat;
+ int cim_num_obq = is_t4(adap->params.chip) ?
+ CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+ i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A :
+ UP_IBQ_0_SHADOW_RDADDR_A,
+ ARRAY_SIZE(stat), stat);
+ if (!i) {
+ if (is_t4(adap->params.chip)) {
+ i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
+ ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
+ wr = obq_wr_t4;
+ } else {
+ i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
+ ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
+ wr = obq_wr_t5;
+ }
+ }
+ if (i)
+ return i;
+
+ t4_read_cimq_cfg(adap, base, size, thres);
+
+ seq_printf(seq,
+ " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail\n");
+ for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
+ seq_printf(seq, "%7s %5x %5u %5u %6x %4x %4u %4u %5u\n",
+ qname[i], base[i], size[i], thres[i],
+ IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
+ QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+ QUEREMFLITS_G(p[2]) * 16);
+ for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
+ seq_printf(seq, "%7s %5x %5u %12x %4x %4u %4u %5u\n",
+ qname[i], base[i], size[i],
+ QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
+ QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+ QUEREMFLITS_G(p[2]) * 16);
+ return 0;
+}
+
+static int cim_qcfg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cim_qcfg_show, inode->i_private);
+}
+
+static const struct file_operations cim_qcfg_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_qcfg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int cimq_show(struct seq_file *seq, void *v, int idx)
+{
+ const u32 *p = v;
+
+ seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1],
+ p[2], p[3]);
+ return 0;
+}
+
+static int cim_ibq_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct seq_tab *p;
+ unsigned int qid = (uintptr_t)inode->i_private & 7;
+ struct adapter *adap = inode->i_private - qid;
+
+ p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
+ if (!p)
+ return -ENOMEM;
+
+ ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4);
+ if (ret < 0)
+ seq_release_private(inode, file);
+ else
+ ret = 0;
+ return ret;
+}
+
+static const struct file_operations cim_ibq_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_ibq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+static int cim_obq_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct seq_tab *p;
+ unsigned int qid = (uintptr_t)inode->i_private & 7;
+ struct adapter *adap = inode->i_private - qid;
+
+ p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
+ if (!p)
+ return -ENOMEM;
+
+ ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4);
+ if (ret < 0) {
+ seq_release_private(inode, file);
+ } else {
+ seq_tab_trim(p, ret / 4);
+ ret = 0;
+ }
+ return ret;
+}
+
+static const struct file_operations cim_obq_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_obq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+struct field_desc {
+ const char *name;
+ unsigned int start;
+ unsigned int width;
+};
+
+static void field_desc_show(struct seq_file *seq, u64 v,
+ const struct field_desc *p)
+{
+ char buf[32];
+ int line_size = 0;
+
+ while (p->name) {
+ u64 mask = (1ULL << p->width) - 1;
+ int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name,
+ ((unsigned long long)v >> p->start) & mask);
+
+ if (line_size + len >= 79) {
+ line_size = 8;
+ seq_puts(seq, "\n ");
+ }
+ seq_printf(seq, "%s ", buf);
+ line_size += len + 1;
+ p++;
+ }
+ seq_putc(seq, '\n');
+}
+
+static struct field_desc tp_la0[] = {
+ { "RcfOpCodeOut", 60, 4 },
+ { "State", 56, 4 },
+ { "WcfState", 52, 4 },
+ { "RcfOpcSrcOut", 50, 2 },
+ { "CRxError", 49, 1 },
+ { "ERxError", 48, 1 },
+ { "SanityFailed", 47, 1 },
+ { "SpuriousMsg", 46, 1 },
+ { "FlushInputMsg", 45, 1 },
+ { "FlushInputCpl", 44, 1 },
+ { "RssUpBit", 43, 1 },
+ { "RssFilterHit", 42, 1 },
+ { "Tid", 32, 10 },
+ { "InitTcb", 31, 1 },
+ { "LineNumber", 24, 7 },
+ { "Emsg", 23, 1 },
+ { "EdataOut", 22, 1 },
+ { "Cmsg", 21, 1 },
+ { "CdataOut", 20, 1 },
+ { "EreadPdu", 19, 1 },
+ { "CreadPdu", 18, 1 },
+ { "TunnelPkt", 17, 1 },
+ { "RcfPeerFin", 16, 1 },
+ { "RcfReasonOut", 12, 4 },
+ { "TxCchannel", 10, 2 },
+ { "RcfTxChannel", 8, 2 },
+ { "RxEchannel", 6, 2 },
+ { "RcfRxChannel", 5, 1 },
+ { "RcfDataOutSrdy", 4, 1 },
+ { "RxDvld", 3, 1 },
+ { "RxOoDvld", 2, 1 },
+ { "RxCongestion", 1, 1 },
+ { "TxCongestion", 0, 1 },
+ { NULL }
+};
+
+static int tp_la_show(struct seq_file *seq, void *v, int idx)
+{
+ const u64 *p = v;
+
+ field_desc_show(seq, *p, tp_la0);
+ return 0;
+}
+
+static int tp_la_show2(struct seq_file *seq, void *v, int idx)
+{
+ const u64 *p = v;
+
+ if (idx)
+ seq_putc(seq, '\n');
+ field_desc_show(seq, p[0], tp_la0);
+ if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
+ field_desc_show(seq, p[1], tp_la0);
+ return 0;
+}
+
+static int tp_la_show3(struct seq_file *seq, void *v, int idx)
+{
+ static struct field_desc tp_la1[] = {
+ { "CplCmdIn", 56, 8 },
+ { "CplCmdOut", 48, 8 },
+ { "ESynOut", 47, 1 },
+ { "EAckOut", 46, 1 },
+ { "EFinOut", 45, 1 },
+ { "ERstOut", 44, 1 },
+ { "SynIn", 43, 1 },
+ { "AckIn", 42, 1 },
+ { "FinIn", 41, 1 },
+ { "RstIn", 40, 1 },
+ { "DataIn", 39, 1 },
+ { "DataInVld", 38, 1 },
+ { "PadIn", 37, 1 },
+ { "RxBufEmpty", 36, 1 },
+ { "RxDdp", 35, 1 },
+ { "RxFbCongestion", 34, 1 },
+ { "TxFbCongestion", 33, 1 },
+ { "TxPktSumSrdy", 32, 1 },
+ { "RcfUlpType", 28, 4 },
+ { "Eread", 27, 1 },
+ { "Ebypass", 26, 1 },
+ { "Esave", 25, 1 },
+ { "Static0", 24, 1 },
+ { "Cread", 23, 1 },
+ { "Cbypass", 22, 1 },
+ { "Csave", 21, 1 },
+ { "CPktOut", 20, 1 },
+ { "RxPagePoolFull", 18, 2 },
+ { "RxLpbkPkt", 17, 1 },
+ { "TxLpbkPkt", 16, 1 },
+ { "RxVfValid", 15, 1 },
+ { "SynLearned", 14, 1 },
+ { "SetDelEntry", 13, 1 },
+ { "SetInvEntry", 12, 1 },
+ { "CpcmdDvld", 11, 1 },
+ { "CpcmdSave", 10, 1 },
+ { "RxPstructsFull", 8, 2 },
+ { "EpcmdDvld", 7, 1 },
+ { "EpcmdFlush", 6, 1 },
+ { "EpcmdTrimPrefix", 5, 1 },
+ { "EpcmdTrimPostfix", 4, 1 },
+ { "ERssIp4Pkt", 3, 1 },
+ { "ERssIp6Pkt", 2, 1 },
+ { "ERssTcpUdpPkt", 1, 1 },
+ { "ERssFceFipPkt", 0, 1 },
+ { NULL }
+ };
+ static struct field_desc tp_la2[] = {
+ { "CplCmdIn", 56, 8 },
+ { "MpsVfVld", 55, 1 },
+ { "MpsPf", 52, 3 },
+ { "MpsVf", 44, 8 },
+ { "SynIn", 43, 1 },
+ { "AckIn", 42, 1 },
+ { "FinIn", 41, 1 },
+ { "RstIn", 40, 1 },
+ { "DataIn", 39, 1 },
+ { "DataInVld", 38, 1 },
+ { "PadIn", 37, 1 },
+ { "RxBufEmpty", 36, 1 },
+ { "RxDdp", 35, 1 },
+ { "RxFbCongestion", 34, 1 },
+ { "TxFbCongestion", 33, 1 },
+ { "TxPktSumSrdy", 32, 1 },
+ { "RcfUlpType", 28, 4 },
+ { "Eread", 27, 1 },
+ { "Ebypass", 26, 1 },
+ { "Esave", 25, 1 },
+ { "Static0", 24, 1 },
+ { "Cread", 23, 1 },
+ { "Cbypass", 22, 1 },
+ { "Csave", 21, 1 },
+ { "CPktOut", 20, 1 },
+ { "RxPagePoolFull", 18, 2 },
+ { "RxLpbkPkt", 17, 1 },
+ { "TxLpbkPkt", 16, 1 },
+ { "RxVfValid", 15, 1 },
+ { "SynLearned", 14, 1 },
+ { "SetDelEntry", 13, 1 },
+ { "SetInvEntry", 12, 1 },
+ { "CpcmdDvld", 11, 1 },
+ { "CpcmdSave", 10, 1 },
+ { "RxPstructsFull", 8, 2 },
+ { "EpcmdDvld", 7, 1 },
+ { "EpcmdFlush", 6, 1 },
+ { "EpcmdTrimPrefix", 5, 1 },
+ { "EpcmdTrimPostfix", 4, 1 },
+ { "ERssIp4Pkt", 3, 1 },
+ { "ERssIp6Pkt", 2, 1 },
+ { "ERssTcpUdpPkt", 1, 1 },
+ { "ERssFceFipPkt", 0, 1 },
+ { NULL }
+ };
+ const u64 *p = v;
+
+ if (idx)
+ seq_putc(seq, '\n');
+ field_desc_show(seq, p[0], tp_la0);
+ if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
+ field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1);
+ return 0;
+}
+
+static int tp_la_open(struct inode *inode, struct file *file)
+{
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ switch (DBGLAMODE_G(t4_read_reg(adap, TP_DBG_LA_CONFIG_A))) {
+ case 2:
+ p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
+ tp_la_show2);
+ break;
+ case 3:
+ p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
+ tp_la_show3);
+ break;
+ default:
+ p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show);
+ }
+ if (!p)
+ return -ENOMEM;
+
+ t4_tp_read_la(adap, (u64 *)p->data, NULL);
+ return 0;
+}
+
+static ssize_t tp_la_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int err;
+ char s[32];
+ unsigned long val;
+ size_t size = min(sizeof(s) - 1, count);
+ struct adapter *adap = FILE_DATA(file)->i_private;
+
+ if (copy_from_user(s, buf, size))
+ return -EFAULT;
+ s[size] = '\0';
+ err = kstrtoul(s, 0, &val);
+ if (err)
+ return err;
+ if (val > 0xffff)
+ return -EINVAL;
+ adap->params.tp.la_mask = val << 16;
+ t4_set_reg_field(adap, TP_DBG_LA_CONFIG_A, 0xffff0000U,
+ adap->params.tp.la_mask);
+ return count;
+}
+
+static const struct file_operations tp_la_fops = {
+ .owner = THIS_MODULE,
+ .open = tp_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+ .write = tp_la_write
+};
+
+static int ulprx_la_show(struct seq_file *seq, void *v, int idx)
+{
+ const u32 *p = v;
+
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, " Pcmd Type Message"
+ " Data\n");
+ else
+ seq_printf(seq, "%08x%08x %4x %08x %08x%08x%08x%08x\n",
+ p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
+ return 0;
+}
+
+static int ulprx_la_open(struct inode *inode, struct file *file)
+{
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1,
+ ulprx_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ t4_ulprx_read_la(adap, (u32 *)p->data);
+ return 0;
+}
+
+static const struct file_operations ulprx_la_fops = {
+ .owner = THIS_MODULE,
+ .open = ulprx_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+/* Show the PM memory stats. These stats include:
+ *
+ * TX:
+ * Read: memory read operation
+ * Write Bypass: cut-through
+ * Bypass + mem: cut-through and save copy
+ *
+ * RX:
+ * Read: memory read
+ * Write Bypass: cut-through
+ * Flush: payload trim or drop
+ */
+static int pm_stats_show(struct seq_file *seq, void *v)
+{
+ static const char * const tx_pm_stats[] = {
+ "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
+ };
+ static const char * const rx_pm_stats[] = {
+ "Read:", "Write bypass:", "Write mem:", "Flush:"
+ };
+
+ int i;
+ u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
+ u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
+ struct adapter *adap = seq->private;
+
+ t4_pmtx_get_stats(adap, tx_cnt, tx_cyc);
+ t4_pmrx_get_stats(adap, rx_cnt, rx_cyc);
+
+ seq_printf(seq, "%13s %10s %20s\n", " ", "Tx pcmds", "Tx bytes");
+ for (i = 0; i < PM_NSTATS - 1; i++)
+ seq_printf(seq, "%-13s %10u %20llu\n",
+ tx_pm_stats[i], tx_cnt[i], tx_cyc[i]);
+
+ seq_printf(seq, "%13s %10s %20s\n", " ", "Rx pcmds", "Rx bytes");
+ for (i = 0; i < PM_NSTATS - 1; i++)
+ seq_printf(seq, "%-13s %10u %20llu\n",
+ rx_pm_stats[i], rx_cnt[i], rx_cyc[i]);
+ return 0;
+}
+
+static int pm_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pm_stats_show, inode->i_private);
+}
+
+static ssize_t pm_stats_clear(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct adapter *adap = FILE_DATA(file)->i_private;
+
+ t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0);
+ t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0);
+ return count;
+}
+
+static const struct file_operations pm_stats_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = pm_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pm_stats_clear
+};
+
+static int cctrl_tbl_show(struct seq_file *seq, void *v)
+{
+ static const char * const dec_fac[] = {
+ "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
+ "0.9375" };
+
+ int i;
+ u16 incr[NMTUS][NCCTRL_WIN];
+ struct adapter *adap = seq->private;
+
+ t4_read_cong_tbl(adap, incr);
+
+ for (i = 0; i < NCCTRL_WIN; ++i) {
+ seq_printf(seq, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
+ incr[0][i], incr[1][i], incr[2][i], incr[3][i],
+ incr[4][i], incr[5][i], incr[6][i], incr[7][i]);
+ seq_printf(seq, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
+ incr[8][i], incr[9][i], incr[10][i], incr[11][i],
+ incr[12][i], incr[13][i], incr[14][i], incr[15][i],
+ adap->params.a_wnd[i],
+ dec_fac[adap->params.b_wnd[i]]);
+ }
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
+
+/* Format a value in a unit that differs from the value's native unit by the
+ * given factor.
+ */
+static char *unit_conv(char *buf, size_t len, unsigned int val,
+ unsigned int factor)
+{
+ unsigned int rem = val % factor;
+
+ if (rem == 0) {
+ snprintf(buf, len, "%u", val / factor);
+ } else {
+ while (rem % 10 == 0)
+ rem /= 10;
+ snprintf(buf, len, "%u.%u", val / factor, rem);
+ }
+ return buf;
+}
+
+static int clk_show(struct seq_file *seq, void *v)
+{
+ char buf[32];
+ struct adapter *adap = seq->private;
+ unsigned int cclk_ps = 1000000000 / adap->params.vpd.cclk; /* in ps */
+ u32 res = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
+ unsigned int tre = TIMERRESOLUTION_G(res);
+ unsigned int dack_re = DELAYEDACKRESOLUTION_G(res);
+ unsigned long long tp_tick_us = (cclk_ps << tre) / 1000000; /* in us */
+
+ seq_printf(seq, "Core clock period: %s ns\n",
+ unit_conv(buf, sizeof(buf), cclk_ps, 1000));
+ seq_printf(seq, "TP timer tick: %s us\n",
+ unit_conv(buf, sizeof(buf), (cclk_ps << tre), 1000000));
+ seq_printf(seq, "TCP timestamp tick: %s us\n",
+ unit_conv(buf, sizeof(buf),
+ (cclk_ps << TIMESTAMPRESOLUTION_G(res)), 1000000));
+ seq_printf(seq, "DACK tick: %s us\n",
+ unit_conv(buf, sizeof(buf), (cclk_ps << dack_re), 1000000));
+ seq_printf(seq, "DACK timer: %u us\n",
+ ((cclk_ps << dack_re) / 1000000) *
+ t4_read_reg(adap, TP_DACK_TIMER_A));
+ seq_printf(seq, "Retransmit min: %llu us\n",
+ tp_tick_us * t4_read_reg(adap, TP_RXT_MIN_A));
+ seq_printf(seq, "Retransmit max: %llu us\n",
+ tp_tick_us * t4_read_reg(adap, TP_RXT_MAX_A));
+ seq_printf(seq, "Persist timer min: %llu us\n",
+ tp_tick_us * t4_read_reg(adap, TP_PERS_MIN_A));
+ seq_printf(seq, "Persist timer max: %llu us\n",
+ tp_tick_us * t4_read_reg(adap, TP_PERS_MAX_A));
+ seq_printf(seq, "Keepalive idle timer: %llu us\n",
+ tp_tick_us * t4_read_reg(adap, TP_KEEP_IDLE_A));
+ seq_printf(seq, "Keepalive interval: %llu us\n",
+ tp_tick_us * t4_read_reg(adap, TP_KEEP_INTVL_A));
+ seq_printf(seq, "Initial SRTT: %llu us\n",
+ tp_tick_us * INITSRTT_G(t4_read_reg(adap, TP_INIT_SRTT_A)));
+ seq_printf(seq, "FINWAIT2 timer: %llu us\n",
+ tp_tick_us * t4_read_reg(adap, TP_FINWAIT2_TIMER_A));
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(clk);
+
+/* Firmware Device Log dump. */
+static const char * const devlog_level_strings[] = {
+ [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
+ [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
+ [FW_DEVLOG_LEVEL_ERR] = "ERR",
+ [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
+ [FW_DEVLOG_LEVEL_INFO] = "INFO",
+ [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
+};
+
+static const char * const devlog_facility_strings[] = {
+ [FW_DEVLOG_FACILITY_CORE] = "CORE",
+ [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
+ [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
+ [FW_DEVLOG_FACILITY_RES] = "RES",
+ [FW_DEVLOG_FACILITY_HW] = "HW",
+ [FW_DEVLOG_FACILITY_FLR] = "FLR",
+ [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
+ [FW_DEVLOG_FACILITY_PHY] = "PHY",
+ [FW_DEVLOG_FACILITY_MAC] = "MAC",
+ [FW_DEVLOG_FACILITY_PORT] = "PORT",
+ [FW_DEVLOG_FACILITY_VI] = "VI",
+ [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
+ [FW_DEVLOG_FACILITY_ACL] = "ACL",
+ [FW_DEVLOG_FACILITY_TM] = "TM",
+ [FW_DEVLOG_FACILITY_QFC] = "QFC",
+ [FW_DEVLOG_FACILITY_DCB] = "DCB",
+ [FW_DEVLOG_FACILITY_ETH] = "ETH",
+ [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
+ [FW_DEVLOG_FACILITY_RI] = "RI",
+ [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
+ [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
+ [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
+ [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
+};
+
+/* Information gathered by Device Log Open routine for the display routine.
+ */
+struct devlog_info {
+ unsigned int nentries; /* number of entries in log[] */
+ unsigned int first; /* first [temporal] entry in log[] */
+ struct fw_devlog_e log[0]; /* Firmware Device Log */
+};
+
+/* Dump a Firmaware Device Log entry.
+ */
+static int devlog_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_printf(seq, "%10s %15s %8s %8s %s\n",
+ "Seq#", "Tstamp", "Level", "Facility", "Message");
+ else {
+ struct devlog_info *dinfo = seq->private;
+ int fidx = (uintptr_t)v - 2;
+ unsigned long index;
+ struct fw_devlog_e *e;
+
+ /* Get a pointer to the log entry to display. Skip unused log
+ * entries.
+ */
+ index = dinfo->first + fidx;
+ if (index >= dinfo->nentries)
+ index -= dinfo->nentries;
+ e = &dinfo->log[index];
+ if (e->timestamp == 0)
+ return 0;
+
+ /* Print the message. This depends on the firmware using
+ * exactly the same formating strings as the kernel so we may
+ * eventually have to put a format interpreter in here ...
+ */
+ seq_printf(seq, "%10d %15llu %8s %8s ",
+ e->seqno, e->timestamp,
+ (e->level < ARRAY_SIZE(devlog_level_strings)
+ ? devlog_level_strings[e->level]
+ : "UNKNOWN"),
+ (e->facility < ARRAY_SIZE(devlog_facility_strings)
+ ? devlog_facility_strings[e->facility]
+ : "UNKNOWN"));
+ seq_printf(seq, e->fmt, e->params[0], e->params[1],
+ e->params[2], e->params[3], e->params[4],
+ e->params[5], e->params[6], e->params[7]);
+ }
+ return 0;
+}
+
+/* Sequential File Operations for Device Log.
+ */
+static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
+{
+ if (pos > dinfo->nentries)
+ return NULL;
+
+ return (void *)(uintptr_t)(pos + 1);
+}
+
+static void *devlog_start(struct seq_file *seq, loff_t *pos)
+{
+ struct devlog_info *dinfo = seq->private;
+
+ return (*pos
+ ? devlog_get_idx(dinfo, *pos)
+ : SEQ_START_TOKEN);
+}
+
+static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct devlog_info *dinfo = seq->private;
+
+ (*pos)++;
+ return devlog_get_idx(dinfo, *pos);
+}
+
+static void devlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations devlog_seq_ops = {
+ .start = devlog_start,
+ .next = devlog_next,
+ .stop = devlog_stop,
+ .show = devlog_show
+};
+
+/* Set up for reading the firmware's device log. We read the entire log here
+ * and then display it incrementally in devlog_show().
+ */
+static int devlog_open(struct inode *inode, struct file *file)
+{
+ struct adapter *adap = inode->i_private;
+ struct devlog_params *dparams = &adap->params.devlog;
+ struct devlog_info *dinfo;
+ unsigned int index;
+ u32 fseqno;
+ int ret;
+
+ /* If we don't know where the log is we can't do anything.
+ */
+ if (dparams->start == 0)
+ return -ENXIO;
+
+ /* Allocate the space to read in the firmware's device log and set up
+ * for the iterated call to our display function.
+ */
+ dinfo = __seq_open_private(file, &devlog_seq_ops,
+ sizeof(*dinfo) + dparams->size);
+ if (!dinfo)
+ return -ENOMEM;
+
+ /* Record the basic log buffer information and read in the raw log.
+ */
+ dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e));
+ dinfo->first = 0;
+ spin_lock(&adap->win0_lock);
+ ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype,
+ dparams->start, dparams->size, (__be32 *)dinfo->log,
+ T4_MEMORY_READ);
+ spin_unlock(&adap->win0_lock);
+ if (ret) {
+ seq_release_private(inode, file);
+ return ret;
+ }
+
+ /* Translate log multi-byte integral elements into host native format
+ * and determine where the first entry in the log is.
+ */
+ for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
+ struct fw_devlog_e *e = &dinfo->log[index];
+ int i;
+ __u32 seqno;
+
+ if (e->timestamp == 0)
+ continue;
+
+ e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
+ seqno = be32_to_cpu(e->seqno);
+ for (i = 0; i < 8; i++)
+ e->params[i] =
+ (__force __be32)be32_to_cpu(e->params[i]);
+
+ if (seqno < fseqno) {
+ fseqno = seqno;
+ dinfo->first = index;
+ }
+ }
+ return 0;
+}
+
+static const struct file_operations devlog_fops = {
+ .owner = THIS_MODULE,
+ .open = devlog_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+static int mbox_show(struct seq_file *seq, void *v)
+{
+ static const char * const owner[] = { "none", "FW", "driver",
+ "unknown" };
+
+ int i;
+ unsigned int mbox = (uintptr_t)seq->private & 7;
+ struct adapter *adap = seq->private - mbox;
+ void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+ unsigned int ctrl_reg = (is_t4(adap->params.chip)
+ ? CIM_PF_MAILBOX_CTRL_A
+ : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A);
+ void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
+
+ i = MBOWNER_G(readl(ctrl));
+ seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
+
+ for (i = 0; i < MBOX_LEN; i += 8)
+ seq_printf(seq, "%016llx\n",
+ (unsigned long long)readq(addr + i));
+ return 0;
+}
+
+static int mbox_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mbox_show, inode->i_private);
+}
+
+static ssize_t mbox_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int i;
+ char c = '\n', s[256];
+ unsigned long long data[8];
+ const struct inode *ino;
+ unsigned int mbox;
+ struct adapter *adap;
+ void __iomem *addr;
+ void __iomem *ctrl;
+
+ if (count > sizeof(s) - 1 || !count)
+ return -EINVAL;
+ if (copy_from_user(s, buf, count))
+ return -EFAULT;
+ s[count] = '\0';
+
+ if (sscanf(s, "%llx %llx %llx %llx %llx %llx %llx %llx%c", &data[0],
+ &data[1], &data[2], &data[3], &data[4], &data[5], &data[6],
+ &data[7], &c) < 8 || c != '\n')
+ return -EINVAL;
+
+ ino = FILE_DATA(file);
+ mbox = (uintptr_t)ino->i_private & 7;
+ adap = ino->i_private - mbox;
+ addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+ ctrl = addr + MBOX_LEN;
+
+ if (MBOWNER_G(readl(ctrl)) != X_MBOWNER_PL)
+ return -EBUSY;
+
+ for (i = 0; i < 8; i++)
+ writeq(data[i], addr + 8 * i);
+
+ writel(MBMSGVALID_F | MBOWNER_V(X_MBOWNER_FW), ctrl);
+ return count;
+}
+
+static const struct file_operations mbox_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mbox_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = mbox_write
+};
+
+static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = FILE_DATA(file)->i_size;
+ struct adapter *adap = file->private_data;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len;
+ int ret, ofst;
+ u8 data[256];
+
+ ofst = pos & 3;
+ len = min(count + ofst, sizeof(data));
+ ret = t4_read_flash(adap, pos - ofst, (len + 3) / 4,
+ (u32 *)data, 1);
+ if (ret)
+ return ret;
+
+ len -= ofst;
+ if (copy_to_user(buf, data + ofst, len))
+ return -EFAULT;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations flash_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mem_open,
+ .read = flash_read,
+};
+
+static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
+{
+ *mask = x | y;
+ y = (__force u64)cpu_to_be64(y);
+ memcpy(addr, (char *)&y + 2, ETH_ALEN);
+}
+
+static int mps_tcam_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
+ " VF Replication "
+ "P0 P1 P2 P3 ML\n");
+ else {
+ u64 mask;
+ u8 addr[ETH_ALEN];
+ struct adapter *adap = seq->private;
+ unsigned int idx = (uintptr_t)v - 2;
+ u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+ u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+ u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+ u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
+ u32 rplc[4] = {0, 0, 0, 0};
+
+ if (tcamx & tcamy) {
+ seq_printf(seq, "%3u -\n", idx);
+ goto out;
+ }
+
+ if (cls_lo & REPLICATE_F) {
+ struct fw_ldst_cmd ldst_cmd;
+ int ret;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(
+ FW_LDST_ADDRSPC_MPS));
+ ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.mps.fid_ctl =
+ htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
+ FW_LDST_CMD_CTL_V(idx));
+ ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+ if (ret)
+ dev_warn(adap->pdev_dev, "Can't read MPS "
+ "replication map for idx %d: %d\n",
+ idx, -ret);
+ else {
+ rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
+ rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
+ rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
+ rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+ }
+ }
+
+ tcamxy2valmask(tcamx, tcamy, addr, &mask);
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
+ "%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3], addr[4],
+ addr[5], (unsigned long long)mask,
+ (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
+ PF_G(cls_lo),
+ (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+ if (cls_lo & REPLICATE_F)
+ seq_printf(seq, " %08x %08x %08x %08x",
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ else
+ seq_printf(seq, "%36c", ' ');
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+ SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> MULTILISTEN0_S) & 0xf);
+ }
+out: return 0;
+}
+
+static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct adapter *adap = seq->private;
+ int max_mac_addr = is_t4(adap->params.chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mps_tcam_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return mps_tcam_get_idx(seq, *pos);
+}
+
+static void mps_tcam_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mps_tcam_seq_ops = {
+ .start = mps_tcam_start,
+ .next = mps_tcam_next,
+ .stop = mps_tcam_stop,
+ .show = mps_tcam_show
+};
+
+static int mps_tcam_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &mps_tcam_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations mps_tcam_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mps_tcam_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/* Display various sensor information.
+ */
+static int sensors_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+ u32 param[7], val[7];
+ int ret;
+
+ /* Note that if the sensors haven't been initialized and turned on
+ * we'll get values of 0, so treat those as "<unknown>" ...
+ */
+ param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
+ param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ param, val);
+
+ if (ret < 0 || val[0] == 0)
+ seq_puts(seq, "Temperature: <unknown>\n");
+ else
+ seq_printf(seq, "Temperature: %dC\n", val[0]);
+
+ if (ret < 0 || val[1] == 0)
+ seq_puts(seq, "Core VDD: <unknown>\n");
+ else
+ seq_printf(seq, "Core VDD: %dmV\n", val[1]);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int clip_tbl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clip_tbl_show, inode->i_private);
+}
+
+static const struct file_operations clip_tbl_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = clip_tbl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+#endif
+
+/*RSS Table.
+ */
+
+static int rss_show(struct seq_file *seq, void *v, int idx)
+{
+ u16 *entry = v;
+
+ seq_printf(seq, "%4d: %4u %4u %4u %4u %4u %4u %4u %4u\n",
+ idx * 8, entry[0], entry[1], entry[2], entry[3], entry[4],
+ entry[5], entry[6], entry[7]);
+ return 0;
+}
+
+static int rss_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+ if (!p)
+ return -ENOMEM;
+
+ ret = t4_read_rss(adap, (u16 *)p->data);
+ if (ret)
+ seq_release_private(inode, file);
+
+ return ret;
+}
+
+static const struct file_operations rss_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = rss_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+/* RSS Configuration.
+ */
+
+/* Small utility function to return the strings "yes" or "no" if the supplied
+ * argument is non-zero.
+ */
+static const char *yesno(int x)
+{
+ static const char *yes = "yes";
+ static const char *no = "no";
+
+ return x ? yes : no;
+}
+
+static int rss_config_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ static const char * const keymode[] = {
+ "global",
+ "global and per-VF scramble",
+ "per-PF and per-VF scramble",
+ "per-VF and per-VF scramble",
+ };
+ u32 rssconf;
+
+ rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_A);
+ seq_printf(seq, "TP_RSS_CONFIG: %#x\n", rssconf);
+ seq_printf(seq, " Tnl4TupEnIpv6: %3s\n", yesno(rssconf &
+ TNL4TUPENIPV6_F));
+ seq_printf(seq, " Tnl2TupEnIpv6: %3s\n", yesno(rssconf &
+ TNL2TUPENIPV6_F));
+ seq_printf(seq, " Tnl4TupEnIpv4: %3s\n", yesno(rssconf &
+ TNL4TUPENIPV4_F));
+ seq_printf(seq, " Tnl2TupEnIpv4: %3s\n", yesno(rssconf &
+ TNL2TUPENIPV4_F));
+ seq_printf(seq, " TnlTcpSel: %3s\n", yesno(rssconf & TNLTCPSEL_F));
+ seq_printf(seq, " TnlIp6Sel: %3s\n", yesno(rssconf & TNLIP6SEL_F));
+ seq_printf(seq, " TnlVrtSel: %3s\n", yesno(rssconf & TNLVRTSEL_F));
+ seq_printf(seq, " TnlMapEn: %3s\n", yesno(rssconf & TNLMAPEN_F));
+ seq_printf(seq, " OfdHashSave: %3s\n", yesno(rssconf &
+ OFDHASHSAVE_F));
+ seq_printf(seq, " OfdVrtSel: %3s\n", yesno(rssconf & OFDVRTSEL_F));
+ seq_printf(seq, " OfdMapEn: %3s\n", yesno(rssconf & OFDMAPEN_F));
+ seq_printf(seq, " OfdLkpEn: %3s\n", yesno(rssconf & OFDLKPEN_F));
+ seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf &
+ SYN4TUPENIPV6_F));
+ seq_printf(seq, " Syn2TupEnIpv6: %3s\n", yesno(rssconf &
+ SYN2TUPENIPV6_F));
+ seq_printf(seq, " Syn4TupEnIpv4: %3s\n", yesno(rssconf &
+ SYN4TUPENIPV4_F));
+ seq_printf(seq, " Syn2TupEnIpv4: %3s\n", yesno(rssconf &
+ SYN2TUPENIPV4_F));
+ seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf &
+ SYN4TUPENIPV6_F));
+ seq_printf(seq, " SynIp6Sel: %3s\n", yesno(rssconf & SYNIP6SEL_F));
+ seq_printf(seq, " SynVrt6Sel: %3s\n", yesno(rssconf & SYNVRTSEL_F));
+ seq_printf(seq, " SynMapEn: %3s\n", yesno(rssconf & SYNMAPEN_F));
+ seq_printf(seq, " SynLkpEn: %3s\n", yesno(rssconf & SYNLKPEN_F));
+ seq_printf(seq, " ChnEn: %3s\n", yesno(rssconf &
+ CHANNELENABLE_F));
+ seq_printf(seq, " PrtEn: %3s\n", yesno(rssconf &
+ PORTENABLE_F));
+ seq_printf(seq, " TnlAllLkp: %3s\n", yesno(rssconf &
+ TNLALLLOOKUP_F));
+ seq_printf(seq, " VrtEn: %3s\n", yesno(rssconf &
+ VIRTENABLE_F));
+ seq_printf(seq, " CngEn: %3s\n", yesno(rssconf &
+ CONGESTIONENABLE_F));
+ seq_printf(seq, " HashToeplitz: %3s\n", yesno(rssconf &
+ HASHTOEPLITZ_F));
+ seq_printf(seq, " Udp4En: %3s\n", yesno(rssconf & UDPENABLE_F));
+ seq_printf(seq, " Disable: %3s\n", yesno(rssconf & DISABLE_F));
+
+ seq_puts(seq, "\n");
+
+ rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_TNL_A);
+ seq_printf(seq, "TP_RSS_CONFIG_TNL: %#x\n", rssconf);
+ seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
+ seq_printf(seq, " MaskFilter: %3d\n", MASKFILTER_G(rssconf));
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
+ seq_printf(seq, " HashAll: %3s\n",
+ yesno(rssconf & HASHALL_F));
+ seq_printf(seq, " HashEth: %3s\n",
+ yesno(rssconf & HASHETH_F));
+ }
+ seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F));
+
+ seq_puts(seq, "\n");
+
+ rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_OFD_A);
+ seq_printf(seq, "TP_RSS_CONFIG_OFD: %#x\n", rssconf);
+ seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
+ seq_printf(seq, " RRCplMapEn: %3s\n", yesno(rssconf &
+ RRCPLMAPEN_F));
+ seq_printf(seq, " RRCplQueWidth: %3d\n", RRCPLQUEWIDTH_G(rssconf));
+
+ seq_puts(seq, "\n");
+
+ rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_SYN_A);
+ seq_printf(seq, "TP_RSS_CONFIG_SYN: %#x\n", rssconf);
+ seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
+ seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F));
+
+ seq_puts(seq, "\n");
+
+ rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
+ seq_printf(seq, "TP_RSS_CONFIG_VRT: %#x\n", rssconf);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
+ seq_printf(seq, " KeyWrAddrX: %3d\n",
+ KEYWRADDRX_G(rssconf));
+ seq_printf(seq, " KeyExtend: %3s\n",
+ yesno(rssconf & KEYEXTEND_F));
+ }
+ seq_printf(seq, " VfRdRg: %3s\n", yesno(rssconf & VFRDRG_F));
+ seq_printf(seq, " VfRdEn: %3s\n", yesno(rssconf & VFRDEN_F));
+ seq_printf(seq, " VfPerrEn: %3s\n", yesno(rssconf & VFPERREN_F));
+ seq_printf(seq, " KeyPerrEn: %3s\n", yesno(rssconf & KEYPERREN_F));
+ seq_printf(seq, " DisVfVlan: %3s\n", yesno(rssconf &
+ DISABLEVLAN_F));
+ seq_printf(seq, " EnUpSwt: %3s\n", yesno(rssconf & ENABLEUP0_F));
+ seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf));
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf));
+ seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]);
+ seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F));
+ seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F));
+ seq_printf(seq, " KeyWrAddr: %3d\n", KEYWRADDR_G(rssconf));
+
+ seq_puts(seq, "\n");
+
+ rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_CNG_A);
+ seq_printf(seq, "TP_RSS_CONFIG_CNG: %#x\n", rssconf);
+ seq_printf(seq, " ChnCount3: %3s\n", yesno(rssconf & CHNCOUNT3_F));
+ seq_printf(seq, " ChnCount2: %3s\n", yesno(rssconf & CHNCOUNT2_F));
+ seq_printf(seq, " ChnCount1: %3s\n", yesno(rssconf & CHNCOUNT1_F));
+ seq_printf(seq, " ChnCount0: %3s\n", yesno(rssconf & CHNCOUNT0_F));
+ seq_printf(seq, " ChnUndFlow3: %3s\n", yesno(rssconf &
+ CHNUNDFLOW3_F));
+ seq_printf(seq, " ChnUndFlow2: %3s\n", yesno(rssconf &
+ CHNUNDFLOW2_F));
+ seq_printf(seq, " ChnUndFlow1: %3s\n", yesno(rssconf &
+ CHNUNDFLOW1_F));
+ seq_printf(seq, " ChnUndFlow0: %3s\n", yesno(rssconf &
+ CHNUNDFLOW0_F));
+ seq_printf(seq, " RstChn3: %3s\n", yesno(rssconf & RSTCHN3_F));
+ seq_printf(seq, " RstChn2: %3s\n", yesno(rssconf & RSTCHN2_F));
+ seq_printf(seq, " RstChn1: %3s\n", yesno(rssconf & RSTCHN1_F));
+ seq_printf(seq, " RstChn0: %3s\n", yesno(rssconf & RSTCHN0_F));
+ seq_printf(seq, " UpdVld: %3s\n", yesno(rssconf & UPDVLD_F));
+ seq_printf(seq, " Xoff: %3s\n", yesno(rssconf & XOFF_F));
+ seq_printf(seq, " UpdChn3: %3s\n", yesno(rssconf & UPDCHN3_F));
+ seq_printf(seq, " UpdChn2: %3s\n", yesno(rssconf & UPDCHN2_F));
+ seq_printf(seq, " UpdChn1: %3s\n", yesno(rssconf & UPDCHN1_F));
+ seq_printf(seq, " UpdChn0: %3s\n", yesno(rssconf & UPDCHN0_F));
+ seq_printf(seq, " Queue: %3d\n", QUEUE_G(rssconf));
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
+
+/* RSS Secret Key.
+ */
+
+static int rss_key_show(struct seq_file *seq, void *v)
+{
+ u32 key[10];
+
+ t4_read_rss_key(seq->private, key);
+ seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ key[9], key[8], key[7], key[6], key[5], key[4], key[3],
+ key[2], key[1], key[0]);
+ return 0;
+}
+
+static int rss_key_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rss_key_show, inode->i_private);
+}
+
+static ssize_t rss_key_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int i, j;
+ u32 key[10];
+ char s[100], *p;
+ struct adapter *adap = FILE_DATA(file)->i_private;
+
+ if (count > sizeof(s) - 1)
+ return -EINVAL;
+ if (copy_from_user(s, buf, count))
+ return -EFAULT;
+ for (i = count; i > 0 && isspace(s[i - 1]); i--)
+ ;
+ s[i] = '\0';
+
+ for (p = s, i = 9; i >= 0; i--) {
+ key[i] = 0;
+ for (j = 0; j < 8; j++, p++) {
+ if (!isxdigit(*p))
+ return -EINVAL;
+ key[i] = (key[i] << 4) | hex2val(*p);
+ }
+ }
+
+ t4_write_rss_key(adap, key, -1);
+ return count;
+}
+
+static const struct file_operations rss_key_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = rss_key_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = rss_key_write
+};
+
+/* PF RSS Configuration.
+ */
+
+struct rss_pf_conf {
+ u32 rss_pf_map;
+ u32 rss_pf_mask;
+ u32 rss_pf_config;
+};
+
+static int rss_pf_config_show(struct seq_file *seq, void *v, int idx)
+{
+ struct rss_pf_conf *pfconf;
+
+ if (v == SEQ_START_TOKEN) {
+ /* use the 0th entry to dump the PF Map Index Size */
+ pfconf = seq->private + offsetof(struct seq_tab, data);
+ seq_printf(seq, "PF Map Index Size = %d\n\n",
+ LKPIDXSIZE_G(pfconf->rss_pf_map));
+
+ seq_puts(seq, " RSS PF VF Hash Tuple Enable Default\n");
+ seq_puts(seq, " Enable IPF Mask Mask IPv6 IPv4 UDP Queue\n");
+ seq_puts(seq, " PF Map Chn Prt Map Size Size Four Two Four Two Four Ch1 Ch0\n");
+ } else {
+ #define G_PFnLKPIDX(map, n) \
+ (((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M)
+ #define G_PFnMSKSIZE(mask, n) \
+ (((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M)
+
+ pfconf = v;
+ seq_printf(seq, "%3d %3s %3s %3s %3d %3d %3d %3s %3s %3s %3s %3s %3d %3d\n",
+ idx,
+ yesno(pfconf->rss_pf_config & MAPENABLE_F),
+ yesno(pfconf->rss_pf_config & CHNENABLE_F),
+ yesno(pfconf->rss_pf_config & PRTENABLE_F),
+ G_PFnLKPIDX(pfconf->rss_pf_map, idx),
+ G_PFnMSKSIZE(pfconf->rss_pf_mask, idx),
+ IVFWIDTH_G(pfconf->rss_pf_config),
+ yesno(pfconf->rss_pf_config & IP6FOURTUPEN_F),
+ yesno(pfconf->rss_pf_config & IP6TWOTUPEN_F),
+ yesno(pfconf->rss_pf_config & IP4FOURTUPEN_F),
+ yesno(pfconf->rss_pf_config & IP4TWOTUPEN_F),
+ yesno(pfconf->rss_pf_config & UDPFOURTUPEN_F),
+ CH1DEFAULTQUEUE_G(pfconf->rss_pf_config),
+ CH0DEFAULTQUEUE_G(pfconf->rss_pf_config));
+
+ #undef G_PFnLKPIDX
+ #undef G_PFnMSKSIZE
+ }
+ return 0;
+}
+
+static int rss_pf_config_open(struct inode *inode, struct file *file)
+{
+ struct adapter *adapter = inode->i_private;
+ struct seq_tab *p;
+ u32 rss_pf_map, rss_pf_mask;
+ struct rss_pf_conf *pfconf;
+ int pf;
+
+ p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show);
+ if (!p)
+ return -ENOMEM;
+
+ pfconf = (struct rss_pf_conf *)p->data;
+ rss_pf_map = t4_read_rss_pf_map(adapter);
+ rss_pf_mask = t4_read_rss_pf_mask(adapter);
+ for (pf = 0; pf < 8; pf++) {
+ pfconf[pf].rss_pf_map = rss_pf_map;
+ pfconf[pf].rss_pf_mask = rss_pf_mask;
+ t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config);
+ }
+ return 0;
+}
+
+static const struct file_operations rss_pf_config_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = rss_pf_config_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+/* VF RSS Configuration.
+ */
+
+struct rss_vf_conf {
+ u32 rss_vf_vfl;
+ u32 rss_vf_vfh;
+};
+
+static int rss_vf_config_show(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, " RSS Hash Tuple Enable\n");
+ seq_puts(seq, " Enable IVF Dis Enb IPv6 IPv4 UDP Def Secret Key\n");
+ seq_puts(seq, " VF Chn Prt Map VLAN uP Four Two Four Two Four Que Idx Hash\n");
+ } else {
+ struct rss_vf_conf *vfconf = v;
+
+ seq_printf(seq, "%3d %3s %3s %3d %3s %3s %3s %3s %3s %3s %3s %4d %3d %#10x\n",
+ idx,
+ yesno(vfconf->rss_vf_vfh & VFCHNEN_F),
+ yesno(vfconf->rss_vf_vfh & VFPRTEN_F),
+ VFLKPIDX_G(vfconf->rss_vf_vfh),
+ yesno(vfconf->rss_vf_vfh & VFVLNEX_F),
+ yesno(vfconf->rss_vf_vfh & VFUPEN_F),
+ yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
+ yesno(vfconf->rss_vf_vfh & VFIP6TWOTUPEN_F),
+ yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
+ yesno(vfconf->rss_vf_vfh & VFIP4TWOTUPEN_F),
+ yesno(vfconf->rss_vf_vfh & ENABLEUDPHASH_F),
+ DEFAULTQUEUE_G(vfconf->rss_vf_vfh),
+ KEYINDEX_G(vfconf->rss_vf_vfh),
+ vfconf->rss_vf_vfl);
+ }
+ return 0;
+}
+
+static int rss_vf_config_open(struct inode *inode, struct file *file)
+{
+ struct adapter *adapter = inode->i_private;
+ struct seq_tab *p;
+ struct rss_vf_conf *vfconf;
+ int vf;
+
+ p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
+ if (!p)
+ return -ENOMEM;
+
+ vfconf = (struct rss_vf_conf *)p->data;
+ for (vf = 0; vf < 128; vf++) {
+ t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
+ &vfconf[vf].rss_vf_vfh);
+ }
+ return 0;
+}
+
+static const struct file_operations rss_vf_config_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = rss_vf_config_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+/**
+ * ethqset2pinfo - return port_info of an Ethernet Queue Set
+ * @adap: the adapter
+ * @qset: Ethernet Queue Set
+ */
+static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
+{
+ int pidx;
+
+ for_each_port(adap, pidx) {
+ struct port_info *pi = adap2pinfo(adap, pidx);
+
+ if (qset >= pi->first_qset &&
+ qset < pi->first_qset + pi->nqsets)
+ return pi;
+ }
+
+ /* should never happen! */
+ BUG_ON(1);
+ return NULL;
+}
+
+static int sge_qinfo_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+ int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
+ int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+ int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
+ int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
+ int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+ int i, r = (uintptr_t)v - 1;
+ int toe_idx = r - eth_entries;
+ int rdma_idx = toe_idx - toe_entries;
+ int ciq_idx = rdma_idx - rdma_entries;
+ int ctrl_idx = ciq_idx - ciq_entries;
+ int fq_idx = ctrl_idx - ctrl_entries;
+
+ if (r)
+ seq_putc(seq, '\n');
+
+#define S3(fmt_spec, s, v) \
+do { \
+ seq_printf(seq, "%-12s", s); \
+ for (i = 0; i < n; ++i) \
+ seq_printf(seq, " %16" fmt_spec, v); \
+ seq_putc(seq, '\n'); \
+} while (0)
+#define S(s, v) S3("s", s, v)
+#define T(s, v) S3("u", s, tx[i].v)
+#define R(s, v) S3("u", s, rx[i].v)
+
+ if (r < eth_entries) {
+ int base_qset = r * 4;
+ const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
+ const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
+ int n = min(4, adap->sge.ethqsets - 4 * r);
+
+ S("QType:", "Ethernet");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ T("DCB Prio:", dcb_prio);
+ S3("u", "DCB PGID:",
+ (ethqset2pinfo(adap, base_qset + i)->dcb.pgid >>
+ 4*(7-tx[i].dcb_prio)) & 0xf);
+ S3("u", "DCB PFC:",
+ (ethqset2pinfo(adap, base_qset + i)->dcb.pfcen >>
+ 1*(7-tx[i].dcb_prio)) & 0x1);
+#endif
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:",
+ adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ } else if (toe_idx < toe_entries) {
+ const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
+ const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
+ int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
+
+ S("QType:", "TOE");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:",
+ adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ } else if (rdma_idx < rdma_entries) {
+ const struct sge_ofld_rxq *rx =
+ &adap->sge.rdmarxq[rdma_idx * 4];
+ int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
+
+ S("QType:", "RDMA-CPL");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:",
+ adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ } else if (ciq_idx < ciq_entries) {
+ const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
+ int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
+
+ S("QType:", "RDMA-CIQ");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:",
+ adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ } else if (ctrl_idx < ctrl_entries) {
+ const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
+ int n = min(4, adap->params.nports - 4 * ctrl_idx);
+
+ S("QType:", "Control");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ } else if (fq_idx == 0) {
+ const struct sge_rspq *evtq = &adap->sge.fw_evtq;
+
+ seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
+ seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
+ seq_printf(seq, "%-12s %16u\n", "RspQ size:", evtq->size);
+ seq_printf(seq, "%-12s %16u\n", "RspQE size:", evtq->iqe_len);
+ seq_printf(seq, "%-12s %16u\n", "RspQ CIDX:", evtq->cidx);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
+ seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+ qtimer_val(adap, evtq));
+ seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+ adap->sge.counter_val[evtq->pktcnt_idx]);
+ }
+#undef R
+#undef T
+#undef S
+#undef S3
+return 0;
+}
+
+static int sge_queue_entries(const struct adapter *adap)
+{
+ return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
+ DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+ DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
+ DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
+ DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
+}
+
+static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
+{
+ int entries = sge_queue_entries(seq->private);
+
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_queue_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ int entries = sge_queue_entries(seq->private);
+
+ ++*pos;
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qinfo_seq_ops = {
+ .start = sge_queue_start,
+ .next = sge_queue_next,
+ .stop = sge_queue_stop,
+ .show = sge_qinfo_show
+};
+
+static int sge_qinfo_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &sge_qinfo_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations sge_qinfo_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sge_qinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int mem_open(struct inode *inode, struct file *file)
+{
+ unsigned int mem;
+ struct adapter *adap;
+
+ file->private_data = inode->i_private;
+
+ mem = (uintptr_t)file->private_data & 0x3;
+ adap = file->private_data - mem;
+
+ (void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH);
+
+ return 0;
+}
+
static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -80,7 +1934,6 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
*ppos = pos + count;
return count;
}
-
static const struct file_operations mem_debugfs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -88,6 +1941,12 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek,
};
+static void set_debugfs_file_size(struct dentry *de, loff_t size)
+{
+ if (!IS_ERR(de) && de->d_inode)
+ de->d_inode->i_size = size;
+}
+
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
{
@@ -119,14 +1978,65 @@ int t4_setup_debugfs(struct adapter *adap)
{
int i;
u32 size;
+ struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
+ { "cim_la", &cim_la_fops, S_IRUSR, 0 },
+ { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
+ { "clk", &clk_debugfs_fops, S_IRUSR, 0 },
+ { "devlog", &devlog_fops, S_IRUSR, 0 },
+ { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+ { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+ { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+ { "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
+ { "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 },
+ { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
+ { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
+ { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
{ "l2t", &t4_l2t_fops, S_IRUSR, 0},
+ { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
+ { "rss", &rss_debugfs_fops, S_IRUSR, 0 },
+ { "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 },
+ { "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 },
+ { "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 },
+ { "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 },
+ { "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 },
+ { "ibq_tp0", &cim_ibq_fops, S_IRUSR, 0 },
+ { "ibq_tp1", &cim_ibq_fops, S_IRUSR, 1 },
+ { "ibq_ulp", &cim_ibq_fops, S_IRUSR, 2 },
+ { "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 },
+ { "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 },
+ { "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 },
+ { "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 },
+ { "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 },
+ { "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 },
+ { "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 },
+ { "obq_sge", &cim_obq_fops, S_IRUSR, 4 },
+ { "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 },
+ { "tp_la", &tp_la_fops, S_IRUSR, 0 },
+ { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
+ { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
+ { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
+ { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
+#if IS_ENABLED(CONFIG_IPV6)
+ { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
+#endif
+ };
+
+ /* Debug FS nodes common to all T5 and later adapters.
+ */
+ static struct t4_debugfs_entry t5_debugfs_files[] = {
+ { "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 },
+ { "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 },
};
add_debugfs_files(adap,
t4_debugfs_files,
ARRAY_SIZE(t4_debugfs_files));
+ if (!is_t4(adap->params.chip))
+ add_debugfs_files(adap,
+ t5_debugfs_files,
+ ARRAY_SIZE(t5_debugfs_files));
i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (i & EDRAM0_ENABLE_F) {
@@ -154,5 +2064,10 @@ int t4_setup_debugfs(struct adapter *adap)
EXT_MEM1_SIZE_G(size));
}
}
+
+ de = debugfs_create_file("flash", S_IRUSR, adap->debugfs_root, adap,
+ &flash_debugfs_fops);
+ set_debugfs_file_size(de, adap->params.sf_size);
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index a3d8867efd3d..b63cfee2d963 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -37,6 +37,21 @@
#include <linux/export.h>
+#define FILE_DATA(_file) ((_file)->f_path.dentry->d_inode)
+
+#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, name##_show, inode->i_private); \
+} \
+static const struct file_operations name##_debugfs_fops = { \
+ .owner = THIS_MODULE, \
+ .open = name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release \
+}
+
struct t4_debugfs_entry {
const char *name;
const struct file_operations *ops;
@@ -44,9 +59,27 @@ struct t4_debugfs_entry {
unsigned char data;
};
+struct seq_tab {
+ int (*show)(struct seq_file *seq, void *v, int idx);
+ unsigned int rows; /* # of entries */
+ unsigned char width; /* size in bytes of each entry */
+ unsigned char skip_first; /* whether the first line is a header */
+ char data[0]; /* the table data */
+};
+
+static inline unsigned int hex2val(char c)
+{
+ return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+ unsigned int width, unsigned int have_header,
+ int (*show)(struct seq_file *seq, void *v, int i));
+
int t4_setup_debugfs(struct adapter *adap);
void add_debugfs_files(struct adapter *adap,
struct t4_debugfs_entry *files,
unsigned int nfiles);
+int mem_open(struct inode *inode, struct file *file);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index ccf3436024bc..a22cf932ca35 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -62,14 +62,18 @@
#include <net/netevent.h>
#include <net/addrconf.h>
#include <net/bonding.h>
+#include <net/addrconf.h>
#include <asm/uaccess.h>
#include "cxgb4.h"
#include "t4_regs.h"
+#include "t4_values.h"
#include "t4_msg.h"
#include "t4fw_api.h"
+#include "t4fw_version.h"
#include "cxgb4_dcb.h"
#include "cxgb4_debugfs.h"
+#include "clip_tbl.h"
#include "l2t.h"
#ifdef DRV_VERSION
@@ -78,99 +82,6 @@
#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5 Network Driver"
-/*
- * Max interrupt hold-off timer value in us. Queues fall back to this value
- * under extreme memory pressure so it's largish to give the system time to
- * recover.
- */
-#define MAX_SGE_TIMERVAL 200U
-
-enum {
- /*
- * Physical Function provisioning constants.
- */
- PFRES_NVI = 4, /* # of Virtual Interfaces */
- PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
- PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
- */
- PFRES_NEQ = 256, /* # of egress queues */
- PFRES_NIQ = 0, /* # of ingress queues */
- PFRES_TC = 0, /* PCI-E traffic class */
- PFRES_NEXACTF = 128, /* # of exact MPS filters */
-
- PFRES_R_CAPS = FW_CMD_CAP_PF,
- PFRES_WX_CAPS = FW_CMD_CAP_PF,
-
-#ifdef CONFIG_PCI_IOV
- /*
- * Virtual Function provisioning constants. We need two extra Ingress
- * Queues with Interrupt capability to serve as the VF's Firmware
- * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
- * neither will have Free Lists associated with them). For each
- * Ethernet/Control Egress Queue and for each Free List, we need an
- * Egress Context.
- */
- VFRES_NPORTS = 1, /* # of "ports" per VF */
- VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
-
- VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
- VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
- VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
- VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
- VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
- VFRES_TC = 0, /* PCI-E traffic class */
- VFRES_NEXACTF = 16, /* # of exact MPS filters */
-
- VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
- VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
-#endif
-};
-
-/*
- * Provide a Port Access Rights Mask for the specified PF/VF. This is very
- * static and likely not to be useful in the long run. We really need to
- * implement some form of persistent configuration which the firmware
- * controls.
- */
-static unsigned int pfvfres_pmask(struct adapter *adapter,
- unsigned int pf, unsigned int vf)
-{
- unsigned int portn, portvec;
-
- /*
- * Give PF's access to all of the ports.
- */
- if (vf == 0)
- return FW_PFVF_CMD_PMASK_M;
-
- /*
- * For VFs, we'll assign them access to the ports based purely on the
- * PF. We assign active ports in order, wrapping around if there are
- * fewer active ports than PFs: e.g. active port[pf % nports].
- * Unfortunately the adapter's port_info structs haven't been
- * initialized yet so we have to compute this.
- */
- if (adapter->params.nports == 0)
- return 0;
-
- portn = pf % adapter->params.nports;
- portvec = adapter->params.portvec;
- for (;;) {
- /*
- * Isolate the lowest set bit in the port vector. If we're at
- * the port number that we want, return that as the pmask.
- * otherwise mask that bit out of the port vector and
- * decrement our port number ...
- */
- unsigned int pmask = portvec ^ (portvec & (portvec-1));
- if (portn == 0)
- return pmask;
- portn--;
- portvec &= ~pmask;
- }
- /*NOTREACHED*/
-}
-
enum {
MAX_TXQ_ENTRIES = 16384,
MAX_CTRL_TXQ_ENTRIES = 1024,
@@ -263,7 +174,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter")
static uint force_old_init;
module_param(force_old_init, uint, 0644);
-MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
+MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
+ " parameter");
static int dflt_msg_enable = DFLT_MSG_ENABLE;
@@ -292,13 +204,14 @@ static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
module_param_array(intr_holdoff, uint, NULL, 0644);
MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
- "0..4 in microseconds");
+ "0..4 in microseconds, deprecated parameter");
static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
module_param_array(intr_cnt, uint, NULL, 0644);
MODULE_PARM_DESC(intr_cnt,
- "thresholds 1..3 for queue interrupt packet counters");
+ "thresholds 1..3 for queue interrupt packet counters, "
+ "deprecated parameter");
/*
* Normally we tell the chip to deliver Ingress Packets into our DMA buffers
@@ -318,7 +231,8 @@ static bool vf_acls;
#ifdef CONFIG_PCI_IOV
module_param(vf_acls, bool, 0644);
-MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
+ "deprecated parameter");
/* Configure the number of PCI-E Virtual Function which are to be instantiated
* on SR-IOV Capable Physical Functions.
@@ -340,32 +254,11 @@ module_param(select_queue, int, 0644);
MODULE_PARM_DESC(select_queue,
"Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
-/*
- * The filter TCAM has a fixed portion and a variable portion. The fixed
- * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
- * ports. The variable portion is 36 bits which can include things like Exact
- * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
- * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
- * far exceed the 36-bit budget for this "compressed" header portion of the
- * filter. Thus, we have a scarce resource which must be carefully managed.
- *
- * By default we set this up to mostly match the set of filter matching
- * capabilities of T3 but with accommodations for some of T4's more
- * interesting features:
- *
- * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
- * [Inner] VLAN (17), Port (3), FCoE (1) }
- */
-enum {
- TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
- TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
- TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
-};
-
-static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
module_param(tp_vlan_pri_map, uint, 0644);
-MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
+ "deprecated parameter");
static struct dentry *cxgb4_debugfs_root;
@@ -671,7 +564,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
if (idx >= adap->tids.ftid_base && nidx <
(adap->tids.nftids + adap->tids.nsftids)) {
idx = nidx;
- ret = GET_TCB_COOKIE(rpl->cookie);
+ ret = TCB_COOKIE_G(rpl->cookie);
f = &adap->tids.ftid_tab[idx];
if (ret == FW_FILTER_WR_FLT_DELETED) {
@@ -723,7 +616,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp;
- unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+ unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
struct sge_txq *txq;
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
@@ -833,11 +726,11 @@ static void disable_msi(struct adapter *adapter)
static irqreturn_t t4_nondata_intr(int irq, void *cookie)
{
struct adapter *adap = cookie;
+ u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
- u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
- if (v & PFSW) {
+ if (v & PFSW_F) {
adap->swintr = 1;
- t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+ t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
}
t4_slow_intr_handler(adap);
return IRQ_HANDLED;
@@ -1030,8 +923,14 @@ static void quiesce_rx(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
- if (q && q->handler)
+ if (q && q->handler) {
napi_disable(&q->napi);
+ local_bh_disable();
+ while (!cxgb_poll_lock_napi(q))
+ mdelay(1);
+ local_bh_enable();
+ }
+
}
}
@@ -1047,12 +946,14 @@ static void enable_rx(struct adapter *adap)
if (!q)
continue;
- if (q->handler)
+ if (q->handler) {
+ cxgb_busy_poll_init_lock(q);
napi_enable(&q->napi);
+ }
/* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
- SEINTARM(q->intr_params) |
- INGRESSQID(q->cntxt_id));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ SEINTARM_V(q->intr_params) |
+ INGRESSQID_V(q->cntxt_id));
}
}
@@ -1176,10 +1077,10 @@ freeout: t4_free_sge_resources(adap);
}
t4_write_reg(adap, is_t4(adap->params.chip) ?
- MPS_TRC_RSS_CONTROL :
- MPS_T5_TRC_RSS_CONTROL,
- RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
- QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+ MPS_TRC_RSS_CONTROL_A :
+ MPS_T5_TRC_RSS_CONTROL_A,
+ RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
+ QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
}
@@ -1518,6 +1419,7 @@ static int get_eeprom_len(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
+ u32 exprom_vers;
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
@@ -1535,6 +1437,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+ if (!t4_get_exprom_version(adapter, &exprom_vers))
+ snprintf(info->erom_version, sizeof(info->erom_version),
+ "%u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_G(exprom_vers),
+ FW_HDR_FW_VER_MINOR_G(exprom_vers),
+ FW_HDR_FW_VER_MICRO_G(exprom_vers),
+ FW_HDR_FW_VER_BUILD_G(exprom_vers));
}
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1589,9 +1499,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
if (!is_t4(adapter->params.chip)) {
- t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
- val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
- val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
+ t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
+ val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
+ val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
*data = val1 - val2;
data++;
*data = val2;
@@ -2608,8 +2518,8 @@ static int closest_thres(const struct sge *s, int thres)
/*
* Return a queue's interrupt hold-off time in us. 0 means no timer.
*/
-static unsigned int qtimer_val(const struct adapter *adap,
- const struct sge_rspq *q)
+unsigned int qtimer_val(const struct adapter *adap,
+ const struct sge_rspq *q)
{
unsigned int idx = q->intr_params >> 1;
@@ -3346,40 +3256,6 @@ static int tid_init(struct tid_info *t)
return 0;
}
-int cxgb4_clip_get(const struct net_device *dev,
- const struct in6_addr *lip)
-{
- struct adapter *adap;
- struct fw_clip_cmd c;
-
- adap = netdev2adap(dev);
- memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
- c.ip_hi = *(__be64 *)(lip->s6_addr);
- c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
- return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
-}
-EXPORT_SYMBOL(cxgb4_clip_get);
-
-int cxgb4_clip_release(const struct net_device *dev,
- const struct in6_addr *lip)
-{
- struct adapter *adap;
- struct fw_clip_cmd c;
-
- adap = netdev2adap(dev);
- memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
- c.ip_hi = *(__be64 *)(lip->s6_addr);
- c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
- return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
-}
-EXPORT_SYMBOL(cxgb4_clip_release);
-
/**
* cxgb4_create_server - create an IP server
* @dev: the device
@@ -3415,8 +3291,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->peer_ip = htonl(0);
chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
- req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
- SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+ SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret);
}
@@ -3458,8 +3334,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
req->peer_ip_lo = cpu_to_be64(0);
chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
- req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
- SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+ SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret);
}
@@ -3482,8 +3358,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
- req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
- LISTSVR_IPV6(0)) | QUEUENO(queue));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
+ LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret);
}
@@ -3600,14 +3476,14 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
struct adapter *adap = netdev2adap(dev);
u32 v1, v2, lp_count, hp_count;
- v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
if (is_t4(adap->params.chip)) {
- lp_count = G_LP_COUNT(v1);
- hp_count = G_HP_COUNT(v1);
+ lp_count = LP_COUNT_G(v1);
+ hp_count = HP_COUNT_G(v1);
} else {
- lp_count = G_LP_COUNT_T5(v1);
- hp_count = G_HP_COUNT_T5(v2);
+ lp_count = LP_COUNT_T5_G(v1);
+ hp_count = HP_COUNT_T5_G(v2);
}
return lpfifo ? lp_count : hp_count;
}
@@ -3653,10 +3529,10 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
{
struct adapter *adap = netdev2adap(dev);
- t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
- t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
- HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
- HPZ3(pgsz_order[3]));
+ t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
+ t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
+ HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
+ HPZ3_V(pgsz_order[3]));
}
EXPORT_SYMBOL(cxgb4_iscsi_init);
@@ -3666,14 +3542,14 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
int ret;
ret = t4_fwaddrspace_write(adap, adap->mbox,
- 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
+ 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
return ret;
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
{
- u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
+ u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
__be64 indices;
int ret;
@@ -3702,14 +3578,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
if (pidx != hw_pidx) {
u16 delta;
+ u32 val;
if (pidx >= hw_pidx)
delta = pidx - hw_pidx;
else
delta = size - hw_pidx + pidx;
+
+ if (is_t4(adap->params.chip))
+ val = PIDX_V(delta);
+ else
+ val = PIDX_T5_V(delta);
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(qid) | PIDX(delta));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ QID_V(qid) | val);
}
out:
return ret;
@@ -3721,8 +3603,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev)
struct adapter *adap;
adap = netdev2adap(dev);
- t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
- F_NOCOALESCE);
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
+ NOCOALESCE_F);
}
EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
@@ -3731,7 +3613,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
struct adapter *adap;
adap = netdev2adap(dev);
- t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
}
EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
@@ -3809,8 +3691,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
struct adapter *adap;
adap = netdev2adap(dev);
- lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
- hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
+ lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
+ hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
return ((u64)hi << 32) | (u64)lo;
}
@@ -3870,14 +3752,14 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
u32 v1, v2, lp_count, hp_count;
do {
- v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
if (is_t4(adap->params.chip)) {
- lp_count = G_LP_COUNT(v1);
- hp_count = G_HP_COUNT(v1);
+ lp_count = LP_COUNT_G(v1);
+ hp_count = HP_COUNT_G(v1);
} else {
- lp_count = G_LP_COUNT_T5(v1);
- hp_count = G_HP_COUNT_T5(v2);
+ lp_count = LP_COUNT_T5_G(v1);
+ hp_count = HP_COUNT_T5_G(v2);
}
if (lp_count == 0 && hp_count == 0)
@@ -3904,8 +3786,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
* are committed before we tell HW about them.
*/
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
q->db_pidx_inc = 0;
}
q->db_disabled = 0;
@@ -3952,9 +3834,9 @@ static void process_db_full(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT,
- DBFIFO_HP_INT | DBFIFO_LP_INT);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3968,14 +3850,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
goto out;
if (q->db_pidx != hw_pidx) {
u16 delta;
+ u32 val;
if (q->db_pidx >= hw_pidx)
delta = q->db_pidx - hw_pidx;
else
delta = q->size - hw_pidx + q->db_pidx;
+
+ if (is_t4(adap->params.chip))
+ val = PIDX_V(delta);
+ else
+ val = PIDX_T5_V(delta);
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(delta));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ QID_V(q->cntxt_id) | val);
}
out:
q->db_disabled = 0;
@@ -4024,14 +3912,14 @@ static void process_db_drop(struct work_struct *work)
dev_err(adap->pdev_dev, "doorbell drop recovery: "
"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
else
- writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
+ writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
/* Re-enable BAR2 WC */
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
}
- t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
}
void t4_db_full(struct adapter *adap)
@@ -4039,8 +3927,8 @@ void t4_db_full(struct adapter *adap)
if (is_t4(adap->params.chip)) {
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
queue_work(adap->workq, &adap->db_full_task);
}
}
@@ -4081,7 +3969,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
lli.adapter_type = adap->params.chip;
- lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+ lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
lli.udb_density = 1 << adap->params.sge.eq_qpp;
lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4089,8 +3977,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i;
- lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
- lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+ lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
+ lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
lli.fw_vers = adap->params.fw_vers;
lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_ingpadboundary = adap->sge.fl_align;
@@ -4220,148 +4108,61 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
}
EXPORT_SYMBOL(cxgb4_unregister_uld);
-/* Check if netdev on which event is occured belongs to us or not. Return
- * success (true) if it belongs otherwise failure (false).
- * Called with rcu_read_lock() held.
- */
#if IS_ENABLED(CONFIG_IPV6)
-static bool cxgb4_netdev(const struct net_device *netdev)
+static int cxgb4_inet6addr_handler(struct notifier_block *this,
+ unsigned long event, void *data)
{
+ struct inet6_ifaddr *ifa = data;
+ struct net_device *event_dev = ifa->idev->dev;
+ const struct device *parent = NULL;
+#if IS_ENABLED(CONFIG_BONDING)
struct adapter *adap;
- int i;
-
- list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
- for (i = 0; i < MAX_NPORTS; i++)
- if (adap->port[i] == netdev)
- return true;
- return false;
-}
+#endif
+ if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+ event_dev = vlan_dev_real_dev(event_dev);
+#if IS_ENABLED(CONFIG_BONDING)
+ if (event_dev->flags & IFF_MASTER) {
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ switch (event) {
+ case NETDEV_UP:
+ cxgb4_clip_get(adap->port[0],
+ (const u32 *)ifa, 1);
+ break;
+ case NETDEV_DOWN:
+ cxgb4_clip_release(adap->port[0],
+ (const u32 *)ifa, 1);
+ break;
+ default:
+ break;
+ }
+ }
+ return NOTIFY_OK;
+ }
+#endif
-static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
- unsigned long event)
-{
- int ret = NOTIFY_DONE;
+ if (event_dev)
+ parent = event_dev->dev.parent;
- rcu_read_lock();
- if (cxgb4_netdev(event_dev)) {
+ if (parent && parent->driver == &cxgb4_driver.driver) {
switch (event) {
case NETDEV_UP:
- ret = cxgb4_clip_get(event_dev, &ifa->addr);
- if (ret < 0) {
- rcu_read_unlock();
- return ret;
- }
- ret = NOTIFY_OK;
+ cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
break;
case NETDEV_DOWN:
- cxgb4_clip_release(event_dev, &ifa->addr);
- ret = NOTIFY_OK;
+ cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
break;
default:
break;
}
}
- rcu_read_unlock();
- return ret;
-}
-
-static int cxgb4_inet6addr_handler(struct notifier_block *this,
- unsigned long event, void *data)
-{
- struct inet6_ifaddr *ifa = data;
- struct net_device *event_dev;
- int ret = NOTIFY_DONE;
- struct bonding *bond = netdev_priv(ifa->idev->dev);
- struct list_head *iter;
- struct slave *slave;
- struct pci_dev *first_pdev = NULL;
-
- if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
- event_dev = vlan_dev_real_dev(ifa->idev->dev);
- ret = clip_add(event_dev, ifa, event);
- } else if (ifa->idev->dev->flags & IFF_MASTER) {
- /* It is possible that two different adapters are bonded in one
- * bond. We need to find such different adapters and add clip
- * in all of them only once.
- */
- bond_for_each_slave(bond, slave, iter) {
- if (!first_pdev) {
- ret = clip_add(slave->dev, ifa, event);
- /* If clip_add is success then only initialize
- * first_pdev since it means it is our device
- */
- if (ret == NOTIFY_OK)
- first_pdev = to_pci_dev(
- slave->dev->dev.parent);
- } else if (first_pdev !=
- to_pci_dev(slave->dev->dev.parent))
- ret = clip_add(slave->dev, ifa, event);
- }
- } else
- ret = clip_add(ifa->idev->dev, ifa, event);
-
- return ret;
+ return NOTIFY_OK;
}
+static bool inet6addr_registered;
static struct notifier_block cxgb4_inet6addr_notifier = {
.notifier_call = cxgb4_inet6addr_handler
};
-/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
- * a physical device.
- * The physical device reference is needed to send the actul CLIP command.
- */
-static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
-{
- struct inet6_dev *idev = NULL;
- struct inet6_ifaddr *ifa;
- int ret = 0;
-
- idev = __in6_dev_get(root_dev);
- if (!idev)
- return ret;
-
- read_lock_bh(&idev->lock);
- list_for_each_entry(ifa, &idev->addr_list, if_list) {
- ret = cxgb4_clip_get(dev, &ifa->addr);
- if (ret < 0)
- break;
- }
- read_unlock_bh(&idev->lock);
-
- return ret;
-}
-
-static int update_root_dev_clip(struct net_device *dev)
-{
- struct net_device *root_dev = NULL;
- int i, ret = 0;
-
- /* First populate the real net device's IPv6 addresses */
- ret = update_dev_clip(dev, dev);
- if (ret)
- return ret;
-
- /* Parse all bond and vlan devices layered on top of the physical dev */
- root_dev = netdev_master_upper_dev_get_rcu(dev);
- if (root_dev) {
- ret = update_dev_clip(root_dev, dev);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < VLAN_N_VID; i++) {
- root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
- if (!root_dev)
- continue;
-
- ret = update_dev_clip(root_dev, dev);
- if (ret)
- break;
- }
- return ret;
-}
-
static void update_clip(const struct adapter *adap)
{
int i;
@@ -4375,7 +4176,7 @@ static void update_clip(const struct adapter *adap)
ret = 0;
if (dev)
- ret = update_root_dev_clip(dev);
+ ret = cxgb4_update_root_dev_clip(dev);
if (ret < 0)
break;
@@ -4567,13 +4368,13 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0;
}
- if (adap->params.tp.vlan_pri_map & F_PORT) {
+ if (adap->params.tp.vlan_pri_map & PORT_F) {
f->fs.val.iport = port;
f->fs.mask.iport = mask;
}
}
- if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+ if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
f->fs.val.proto = IPPROTO_TCP;
f->fs.mask.proto = ~0;
}
@@ -4779,11 +4580,15 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = cxgb_busy_poll,
+#endif
+
};
void t4_fatal_err(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+ t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
t4_intr_disable(adap);
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
@@ -4858,16 +4663,16 @@ static void setup_memwin(struct adapter *adap)
mem_win2_base = MEMWIN2_BASE_T5;
mem_win2_aperture = MEMWIN2_APERTURE_T5;
}
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
- mem_win0_base | BIR(0) |
- WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
- mem_win1_base | BIR(0) |
- WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
- mem_win2_base | BIR(0) |
- WINDOW(ilog2(mem_win2_aperture) - 10));
- t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
+ mem_win0_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
+ mem_win1_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
+ mem_win2_base | BIR_V(0) |
+ WINDOW_V(ilog2(mem_win2_aperture) - 10));
+ t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
}
static void setup_memwin_rdma(struct adapter *adap)
@@ -4881,13 +4686,13 @@ static void setup_memwin_rdma(struct adapter *adap)
start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
- start | BIR(1) | WINDOW(ilog2(sz_kb)));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
+ start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
adap->vres.ocq.start);
t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
}
}
@@ -4936,38 +4741,38 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
t4_sge_init(adap);
/* tweak some settings */
- t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
- t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
- t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
- v = t4_read_reg(adap, TP_PIO_DATA);
- t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
+ t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
+ v = t4_read_reg(adap, TP_PIO_DATA_A);
+ t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
/* first 4 Tx modulation queues point to consecutive Tx channels */
adap->params.tp.tx_modq_map = 0xE4;
- t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
- V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
+ t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
+ TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
/* associate each Tx modulation queue with consecutive Tx channels */
v = 0x84218421;
- t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &v, 1, A_TP_TX_SCHED_HDR);
- t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &v, 1, A_TP_TX_SCHED_FIFO);
- t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &v, 1, A_TP_TX_SCHED_PCMD);
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &v, 1, TP_TX_SCHED_HDR_A);
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &v, 1, TP_TX_SCHED_FIFO_A);
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &v, 1, TP_TX_SCHED_PCMD_A);
#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
if (is_offload(adap)) {
- t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
- V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
- t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
- V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
+ TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
+ TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
}
/* get basic stuff going */
@@ -5013,16 +4818,16 @@ static int adap_init0_tweaks(struct adapter *adapter)
rx_dma_offset);
rx_dma_offset = 2;
}
- t4_set_reg_field(adapter, SGE_CONTROL,
- PKTSHIFT_MASK,
- PKTSHIFT(rx_dma_offset));
+ t4_set_reg_field(adapter, SGE_CONTROL_A,
+ PKTSHIFT_V(PKTSHIFT_M),
+ PKTSHIFT_V(rx_dma_offset));
/*
* Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
* adds the pseudo header itself.
*/
- t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
- CSUM_HAS_PSEUDO_HDR, 0);
+ t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
+ CSUM_HAS_PSEUDO_HDR_F, 0);
return 0;
}
@@ -5046,7 +4851,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
*/
if (reset) {
ret = t4_fw_reset(adapter, adapter->mbox,
- PIORSTMODE | PIORST);
+ PIORSTMODE_F | PIORST_F);
if (ret < 0)
goto bye;
}
@@ -5212,12 +5017,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
- /*
- * Return successfully and note that we're operating with parameters
- * not supplied by the driver, rather than from hard-wired
- * initialization constants burried in the driver.
+ /* Emit Firmware Configuration File information and return
+ * successfully.
*/
- adapter->flags |= USING_SOFT_PARAMS;
dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
"Configuration File \"%s\", version %#x, computed checksum %#x\n",
config_name, finiver, cfcsum);
@@ -5235,249 +5037,6 @@ bye:
return ret;
}
-/*
- * Attempt to initialize the adapter via hard-coded, driver supplied
- * parameters ...
- */
-static int adap_init0_no_config(struct adapter *adapter, int reset)
-{
- struct sge *s = &adapter->sge;
- struct fw_caps_config_cmd caps_cmd;
- u32 v;
- int i, ret;
-
- /*
- * Reset device if necessary
- */
- if (reset) {
- ret = t4_fw_reset(adapter, adapter->mbox,
- PIORSTMODE | PIORST);
- if (ret < 0)
- goto bye;
- }
-
- /*
- * Get device capabilities and select which we'll be using.
- */
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
- &caps_cmd);
- if (ret < 0)
- goto bye;
-
- if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
- if (!vf_acls)
- caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
- else
- caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
- } else if (vf_acls) {
- dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
- goto bye;
- }
- caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
- NULL);
- if (ret < 0)
- goto bye;
-
- /*
- * Tweak configuration based on system architecture, module
- * parameters, etc.
- */
- ret = adap_init0_tweaks(adapter);
- if (ret < 0)
- goto bye;
-
- /*
- * Select RSS Global Mode we want to use. We use "Basic Virtual"
- * mode which maps each Virtual Interface to its own section of
- * the RSS Table and we turn on all map and hash enables ...
- */
- adapter->flags |= RSS_TNLALLLOOKUP;
- ret = t4_config_glbl_rss(adapter, adapter->mbox,
- FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
- FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
- ((adapter->flags & RSS_TNLALLLOOKUP) ?
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
- if (ret < 0)
- goto bye;
-
- /*
- * Set up our own fundamental resource provisioning ...
- */
- ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
- PFRES_NEQ, PFRES_NETHCTRL,
- PFRES_NIQFLINT, PFRES_NIQ,
- PFRES_TC, PFRES_NVI,
- FW_PFVF_CMD_CMASK_M,
- pfvfres_pmask(adapter, adapter->fn, 0),
- PFRES_NEXACTF,
- PFRES_R_CAPS, PFRES_WX_CAPS);
- if (ret < 0)
- goto bye;
-
- /*
- * Perform low level SGE initialization. We need to do this before we
- * send the firmware the INITIALIZE command because that will cause
- * any other PF Drivers which are waiting for the Master
- * Initialization to proceed forward.
- */
- for (i = 0; i < SGE_NTIMERS - 1; i++)
- s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
- s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
- s->counter_val[0] = 1;
- for (i = 1; i < SGE_NCOUNTERS; i++)
- s->counter_val[i] = min(intr_cnt[i - 1],
- THRESHOLD_0_GET(THRESHOLD_0_MASK));
- t4_sge_init(adapter);
-
-#ifdef CONFIG_PCI_IOV
- /*
- * Provision resource limits for Virtual Functions. We currently
- * grant them all the same static resource limits except for the Port
- * Access Rights Mask which we're assigning based on the PF. All of
- * the static provisioning stuff for both the PF and VF really needs
- * to be managed in a persistent manner for each device which the
- * firmware controls.
- */
- {
- int pf, vf;
-
- for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
- if (num_vf[pf] <= 0)
- continue;
-
- /* VF numbering starts at 1! */
- for (vf = 1; vf <= num_vf[pf]; vf++) {
- ret = t4_cfg_pfvf(adapter, adapter->mbox,
- pf, vf,
- VFRES_NEQ, VFRES_NETHCTRL,
- VFRES_NIQFLINT, VFRES_NIQ,
- VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_M,
- pfvfres_pmask(
- adapter, pf, vf),
- VFRES_NEXACTF,
- VFRES_R_CAPS, VFRES_WX_CAPS);
- if (ret < 0)
- dev_warn(adapter->pdev_dev,
- "failed to "\
- "provision pf/vf=%d/%d; "
- "err=%d\n", pf, vf, ret);
- }
- }
- }
-#endif
-
- /*
- * Set up the default filter mode. Later we'll want to implement this
- * via a firmware command, etc. ... This needs to be done before the
- * firmare initialization command ... If the selected set of fields
- * isn't equal to the default value, we'll need to make sure that the
- * field selections will fit in the 36-bit budget.
- */
- if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
- int j, bits = 0;
-
- for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
- switch (tp_vlan_pri_map & (1 << j)) {
- case 0:
- /* compressed filter field not enabled */
- break;
- case FCOE_MASK:
- bits += 1;
- break;
- case PORT_MASK:
- bits += 3;
- break;
- case VNIC_ID_MASK:
- bits += 17;
- break;
- case VLAN_MASK:
- bits += 17;
- break;
- case TOS_MASK:
- bits += 8;
- break;
- case PROTOCOL_MASK:
- bits += 8;
- break;
- case ETHERTYPE_MASK:
- bits += 16;
- break;
- case MACMATCH_MASK:
- bits += 9;
- break;
- case MPSHITTYPE_MASK:
- bits += 3;
- break;
- case FRAGMENTATION_MASK:
- bits += 1;
- break;
- }
-
- if (bits > 36) {
- dev_err(adapter->pdev_dev,
- "tp_vlan_pri_map=%#x needs %d bits > 36;"\
- " using %#x\n", tp_vlan_pri_map, bits,
- TP_VLAN_PRI_MAP_DEFAULT);
- tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
- }
- }
- v = tp_vlan_pri_map;
- t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
- &v, 1, TP_VLAN_PRI_MAP);
-
- /*
- * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
- * to support any of the compressed filter fields above. Newer
- * versions of the firmware do this automatically but it doesn't hurt
- * to set it here. Meanwhile, we do _not_ need to set Lookup Every
- * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
- * since the firmware automatically turns this on and off when we have
- * a non-zero number of filters active (since it does have a
- * performance impact).
- */
- if (tp_vlan_pri_map)
- t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
- FIVETUPLELOOKUP_MASK,
- FIVETUPLELOOKUP_MASK);
-
- /*
- * Tweak some settings.
- */
- t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
- RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
- PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
- KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
-
- /*
- * Get basic stuff going by issuing the Firmware Initialize command.
- * Note that this _must_ be after all PFVF commands ...
- */
- ret = t4_fw_initialize(adapter, adapter->mbox);
- if (ret < 0)
- goto bye;
-
- /*
- * Return successfully!
- */
- dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
- "driver parameters\n");
- return 0;
-
- /*
- * Something bad happened. Return the error ...
- */
-bye:
- return ret;
-}
-
static struct fw_info fw_info_array[] = {
{
.chip = CHELSIO_T4,
@@ -5529,6 +5088,8 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
+ struct fw_devlog_cmd devlog_cmd;
+ u32 devlog_meminfo;
int reset = 1;
/* Contact FW, advertising Master capability */
@@ -5590,8 +5151,7 @@ static int adap_init0(struct adapter *adap)
state, &reset);
/* Cleaning up */
- if (fw != NULL)
- release_firmware(fw);
+ release_firmware(fw);
t4_free_mem(card_fw);
if (ret < 0)
@@ -5609,6 +5169,30 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
+ /* Read firmware device log parameters. We really need to find a way
+ * to get these parameters initialized with some default values (which
+ * are likely to be correct) for the case where we either don't
+ * attache to the firmware or it's crashed when we probe the adapter.
+ * That way we'll still be able to perform early firmware startup
+ * debugging ... If the request to get the Firmware's Device Log
+ * parameters fails, we'll live so we don't make that a fatal error.
+ */
+ memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+ devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+ &devlog_cmd);
+ if (ret == 0) {
+ devlog_meminfo =
+ ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+ adap->params.devlog.memtype =
+ FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+ adap->params.devlog.start =
+ FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+ adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
+ }
+
/*
* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -5624,88 +5208,58 @@ static int adap_init0(struct adapter *adap)
adap->params.nports = hweight32(port_vec);
adap->params.portvec = port_vec;
- /*
- * If the firmware is initialized already (and we're not forcing a
- * master initialization), note that we're living with existing
- * adapter parameters. Otherwise, it's time to try initializing the
- * adapter ...
+ /* If the firmware is initialized already, emit a simply note to that
+ * effect. Otherwise, it's time to try initializing the adapter.
*/
if (state == DEV_STATE_INIT) {
dev_info(adap->pdev_dev, "Coming up as %s: "\
"Adapter already initialized\n",
adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
- adap->flags |= USING_SOFT_PARAMS;
} else {
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
"Initializing adapter\n");
- /*
- * If the firmware doesn't support Configuration
- * Files warn user and exit,
+
+ /* Find out whether we're dealing with a version of the
+ * firmware which has configuration file support.
*/
- if (ret < 0)
- dev_warn(adap->pdev_dev, "Firmware doesn't support "
- "configuration file.\n");
- if (force_old_init)
- ret = adap_init0_no_config(adap, reset);
- else {
- /*
- * Find out whether we're dealing with a version of
- * the firmware which has configuration file support.
- */
- params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(
- FW_PARAMS_PARAM_DEV_CF));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
- params, val);
-
- /*
- * If the firmware doesn't support Configuration
- * Files, use the old Driver-based, hard-wired
- * initialization. Otherwise, try using the
- * Configuration File support and fall back to the
- * Driver-based initialization if there's no
- * Configuration File found.
- */
- if (ret < 0)
- ret = adap_init0_no_config(adap, reset);
- else {
- /*
- * The firmware provides us with a memory
- * buffer where we can load a Configuration
- * File from the host if we want to override
- * the Configuration File in flash.
- */
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+ params, val);
- ret = adap_init0_config(adap, reset);
- if (ret == -ENOENT) {
- dev_info(adap->pdev_dev,
- "No Configuration File present "
- "on adapter. Using hard-wired "
- "configuration parameters.\n");
- ret = adap_init0_no_config(adap, reset);
- }
- }
+ /* If the firmware doesn't support Configuration Files,
+ * return an error.
+ */
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "firmware doesn't support "
+ "Firmware Configuration Files\n");
+ goto bye;
+ }
+
+ /* The firmware provides us with a memory buffer where we can
+ * load a Configuration File from the host if we want to
+ * override the Configuration File in flash.
+ */
+ ret = adap_init0_config(adap, reset);
+ if (ret == -ENOENT) {
+ dev_err(adap->pdev_dev, "no Configuration File "
+ "present on adapter.\n");
+ goto bye;
}
if (ret < 0) {
- dev_err(adap->pdev_dev,
- "could not initialize adapter, error %d\n",
- -ret);
+ dev_err(adap->pdev_dev, "could not initialize "
+ "adapter, error %d\n", -ret);
goto bye;
}
}
- /*
- * If we're living with non-hard-coded parameters (either from a
- * Firmware Configuration File or values programmed by a different PF
- * Driver), give the SGE code a chance to pull in anything that it
- * needs ... Note that this must be called after we retrieve our VPD
- * parameters in order to know how to convert core ticks to seconds.
+ /* Give the SGE code a chance to pull in anything that it needs ...
+ * Note that this must be called after we retrieve our VPD parameters
+ * in order to know how to convert core ticks to seconds, etc.
*/
- if (adap->flags & USING_SOFT_PARAMS) {
- ret = t4_sge_init(adap);
- if (ret < 0)
- goto bye;
- }
+ ret = t4_sge_init(adap);
+ if (ret < 0)
+ goto bye;
if (is_bypass_device(adap->pdev->device))
adap->params.bypass = 1;
@@ -5739,6 +5293,14 @@ static int adap_init0(struct adapter *adap)
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
+ params[0] = FW_PARAM_PFVF(CLIP_START);
+ params[1] = FW_PARAM_PFVF(CLIP_END);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->clipt_start = val[0];
+ adap->clipt_end = val[1];
+
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
@@ -6401,7 +5963,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_unmap_bar0;
/* We control everything through one PF */
- func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+ func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
if (func != ent->driver_data) {
iounmap(regs);
pci_disable_device(pdev);
@@ -6467,9 +6029,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!is_t4(adapter->params.chip)) {
- s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
- qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
- SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ s_qpp = (QUEUESPERPAGEPF0_S +
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
+ adapter->fn);
+ qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
num_seg = PAGE_SIZE / SEGMENT_SIZE;
/* Each segment size is 128B. Write coalescing is enabled only
@@ -6557,6 +6121,18 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+ adapter->clipt_end);
+ if (!adapter->clipt) {
+ /* We tolerate a lack of clip_table, giving up
+ * some functionality
+ */
+ dev_warn(&pdev->dev,
+ "could not allocate Clip table, continuing\n");
+ adapter->params.offload = 0;
+ }
+#endif
if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
dev_warn(&pdev->dev, "could not allocate TID table, "
"continuing\n");
@@ -6682,6 +6258,9 @@ static void remove_one(struct pci_dev *pdev)
cxgb_down(adapter);
free_some_resources(adapter);
+#if IS_ENABLED(CONFIG_IPV6)
+ t4_cleanup_clip_tbl(adapter);
+#endif
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
@@ -6720,7 +6299,10 @@ static int __init cxgb4_init_module(void)
debugfs_remove(cxgb4_debugfs_root);
#if IS_ENABLED(CONFIG_IPV6)
- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+ if (!inet6addr_registered) {
+ register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+ inet6addr_registered = true;
+ }
#endif
return ret;
@@ -6729,7 +6311,10 @@ static int __init cxgb4_init_module(void)
static void __exit cxgb4_cleanup_module(void)
{
#if IS_ENABLED(CONFIG_IPV6)
- unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+ if (inet6addr_registered) {
+ unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+ inet6addr_registered = false;
+ }
#endif
pci_unregister_driver(&cxgb4_driver);
debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 152b4c4c7809..78ab4d406ce2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -173,9 +173,6 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
unsigned char port, unsigned char mask);
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6);
-int cxgb4_clip_get(const struct net_device *dev, const struct in6_addr *lip);
-int cxgb4_clip_release(const struct net_device *dev,
- const struct in6_addr *lip);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a047baa9fd04..252efc29321f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -46,6 +46,7 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "t4_regs.h"
+#include "t4_values.h"
#define VLAN_NONE 0xfff
@@ -150,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
e->idx | (sync ? F_SYNC_WR : 0) |
- TID_QID(adap->sge.fw_evtq.abs_id)));
- req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
+ TID_QID_V(adap->sge.fw_evtq.abs_id)));
+ req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
req->l2t_idx = htons(e->idx);
req->vlan = htons(e->vlan);
if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
@@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
- ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+ ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
ntuple |= (u64)l2t->lport << tp->port_shift;
@@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
u32 pf = FW_VIID_PFN_G(viid);
u32 vld = FW_VIID_VIVLD_G(viid);
- ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
- V_FT_VNID_ID_PF(pf) |
- V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+ ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
+ FT_VNID_ID_PF_V(pf) |
+ FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
}
return ntuple;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ebf935a1e352..b4b9f6048fe7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -43,8 +43,12 @@
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#include "cxgb4.h"
#include "t4_regs.h"
+#include "t4_values.h"
#include "t4_msg.h"
#include "t4fw_api.h"
@@ -521,10 +525,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
u32 val;
if (q->pend_cred >= 8) {
- val = PIDX(q->pend_cred / 8);
- if (!is_t4(adap->params.chip))
- val |= DBTYPE(1);
- val |= DBPRIO(1);
+ if (is_t4(adap->params.chip))
+ val = PIDX_V(q->pend_cred / 8);
+ else
+ val = PIDX_T5_V(q->pend_cred / 8) |
+ DBTYPE_F;
+ val |= DBPRIO_F;
wmb();
/* If we don't have access to the new User Doorbell (T5+), use
@@ -532,10 +538,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
* mechanism.
*/
if (unlikely(q->bar2_addr == NULL)) {
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- val | QID(q->cntxt_id));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ val | QID_V(q->cntxt_id));
} else {
- writel(val | QID(q->bar2_qid),
+ writel(val | QID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to
@@ -818,7 +824,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
sgl->addr0 = cpu_to_be64(addr[1]);
}
- sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0))
return;
/*
@@ -884,7 +891,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(q->bar2_addr == NULL)) {
- u32 val = PIDX(n);
+ u32 val = PIDX_V(n);
unsigned long flags;
/* For T4 we need to participate in the Doorbell Recovery
@@ -892,14 +899,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
*/
spin_lock_irqsave(&q->db_lock, flags);
if (!q->db_disabled)
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | val);
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ QID_V(q->cntxt_id) | val);
else
q->db_pidx_inc += n;
q->db_pidx = q->pidx;
spin_unlock_irqrestore(&q->db_lock, flags);
} else {
- u32 val = PIDX_T5(n);
+ u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within
* the doorbell, but T5 and later shrank the field in order to
@@ -907,7 +914,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* large in the first place (14 bits) so we just use the T5
* and later limits and warn if a Queue ID is too large.
*/
- WARN_ON(val & DBPRIO(1));
+ WARN_ON(val & DBPRIO_F);
/* If we're only writing a single TX Descriptor and we can use
* Inferred QID registers, we can use the Write Combining
@@ -923,7 +930,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
(q->bar2_addr + SGE_UDB_WCDOORBELL),
wr);
} else {
- writel(val | QID(q->bar2_qid),
+ writel(val | QID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_KDOORBELL);
}
@@ -1150,9 +1157,9 @@ out_free: dev_kfree_skb_any(skb);
cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
}
cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
@@ -1716,6 +1723,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
+ skb_mark_napi_id(skb, &rxq->rspq.napi);
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
PKT_HASH_TYPE_L3);
@@ -1758,7 +1766,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec &&
(q->netdev->features & NETIF_F_RXCSUM);
- if ((pkt->l2info & htonl(RXF_TCP)) &&
+ if ((pkt->l2info & htonl(RXF_TCP_F)) &&
+ !(cxgb_poll_busy_polling(q)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
return 0;
@@ -1780,11 +1789,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++;
- } else if (pkt->l2info & htonl(RXF_IP)) {
+ } else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c);
skb->ip_summed = CHECKSUM_COMPLETE;
@@ -1797,6 +1806,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
}
+ skb_mark_napi_id(skb, &q->napi);
netif_receive_skb(skb);
return 0;
}
@@ -1959,6 +1969,38 @@ static int process_responses(struct sge_rspq *q, int budget)
return budget - budget_left;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+int cxgb_busy_poll(struct napi_struct *napi)
+{
+ struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+ unsigned int params, work_done;
+ u32 val;
+
+ if (!cxgb_poll_lock_poll(q))
+ return LL_FLUSH_BUSY;
+
+ work_done = process_responses(q, 4);
+ params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+ q->next_intr_params = params;
+ val = CIDXINC_V(work_done) | SEINTARM_V(params);
+
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!q->bar2_addr))
+ t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+ val | INGRESSQID_V((u32)q->cntxt_id));
+ else {
+ writel(val | INGRESSQID_V(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_GTS);
+ wmb();
+ }
+
+ cxgb_poll_unlock_poll(q);
+ return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/**
* napi_rx_handler - the NAPI handler for Rx processing
* @napi: the napi instance
@@ -1974,9 +2016,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
{
unsigned int params;
struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
- int work_done = process_responses(q, budget);
+ int work_done;
u32 val;
+ if (!cxgb_poll_lock_napi(q))
+ return budget;
+
+ work_done = process_responses(q, budget);
if (likely(work_done < budget)) {
int timer_index;
@@ -2001,19 +2047,20 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
} else
params = QINTR_TIMER_IDX(7);
- val = CIDXINC(work_done) | SEINTARM(params);
+ val = CIDXINC_V(work_done) | SEINTARM_V(params);
/* If we don't have access to the new User GTS (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(q->bar2_addr == NULL)) {
- t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
- val | INGRESSQID((u32)q->cntxt_id));
+ t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+ val | INGRESSQID_V((u32)q->cntxt_id));
} else {
- writel(val | INGRESSQID(q->bar2_qid),
+ writel(val | INGRESSQID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_GTS);
wmb();
}
+ cxgb_poll_unlock_napi(q);
return work_done;
}
@@ -2056,16 +2103,16 @@ static unsigned int process_intrq(struct adapter *adap)
rspq_next(q);
}
- val = CIDXINC(credits) | SEINTARM(q->intr_params);
+ val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
/* If we don't have access to the new User GTS (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(q->bar2_addr == NULL)) {
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
- val | INGRESSQID(q->cntxt_id));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ val | INGRESSQID_V(q->cntxt_id));
} else {
- writel(val | INGRESSQID(q->bar2_qid),
+ writel(val | INGRESSQID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_GTS);
wmb();
}
@@ -2095,7 +2142,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
{
struct adapter *adap = cookie;
- t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
+ t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
if (t4_slow_intr_handler(adap) | process_intrq(adap))
return IRQ_HANDLED;
return IRQ_NONE; /* probably shared interrupt */
@@ -2142,9 +2189,9 @@ static void sge_rx_timer_cb(unsigned long data)
}
}
- t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
- idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
- idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
+ idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
+ idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
for (i = 0; i < 2; i++) {
u32 debug0, debug11;
@@ -2188,12 +2235,12 @@ static void sge_rx_timer_cb(unsigned long data)
/* Read and save the SGE IDMA State and Queue ID information.
* We do this every time in case it changes across time ...
*/
- t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
- debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
+ debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
- t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
- debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
+ debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
@@ -2337,6 +2384,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto err;
netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ napi_hash_add(&iq->napi);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
@@ -2594,6 +2642,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
+ napi_hash_del(&rq->napi);
netif_napi_del(&rq->napi);
rq->netdev = NULL;
rq->cntxt_id = rq->abs_id = 0;
@@ -2738,24 +2787,11 @@ void t4_sge_stop(struct adapter *adap)
}
/**
- * t4_sge_init - initialize SGE
+ * t4_sge_init_soft - grab core SGE values needed by SGE code
* @adap: the adapter
*
- * Performs SGE initialization needed every time after a chip reset.
- * We do not initialize any of the queues here, instead the driver
- * top-level must request them individually.
- *
- * Called in two different modes:
- *
- * 1. Perform actual hardware initialization and record hard-coded
- * parameters which were used. This gets used when we're the
- * Master PF and the Firmware Configuration File support didn't
- * work for some reason.
- *
- * 2. We're not the Master PF or initialization was performed with
- * a Firmware Configuration File. In this case we need to grab
- * any of the SGE operating parameters that we need to have in
- * order to do our job and make sure we can live with them ...
+ * We need to grab the SGE operating parameters that we need to have
+ * in order to do our job and make sure we can live with them.
*/
static int t4_sge_init_soft(struct adapter *adap)
@@ -2770,8 +2806,8 @@ static int t4_sge_init_soft(struct adapter *adap)
* process_responses() and that only packet data is going to the
* Free Lists.
*/
- if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
- RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
+ RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL;
}
@@ -2785,7 +2821,7 @@ static int t4_sge_init_soft(struct adapter *adap)
* XXX meet our needs!
*/
#define READ_FL_BUF(x) \
- t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
+ t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
@@ -2823,99 +2859,38 @@ static int t4_sge_init_soft(struct adapter *adap)
* Retrieve our RX interrupt holdoff timer values and counter
* threshold values from the SGE parameters.
*/
- timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
- timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
- timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
+ timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
+ timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
+ timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
s->timer_val[0] = core_ticks_to_us(adap,
- TIMERVALUE0_GET(timer_value_0_and_1));
+ TIMERVALUE0_G(timer_value_0_and_1));
s->timer_val[1] = core_ticks_to_us(adap,
- TIMERVALUE1_GET(timer_value_0_and_1));
+ TIMERVALUE1_G(timer_value_0_and_1));
s->timer_val[2] = core_ticks_to_us(adap,
- TIMERVALUE2_GET(timer_value_2_and_3));
+ TIMERVALUE2_G(timer_value_2_and_3));
s->timer_val[3] = core_ticks_to_us(adap,
- TIMERVALUE3_GET(timer_value_2_and_3));
+ TIMERVALUE3_G(timer_value_2_and_3));
s->timer_val[4] = core_ticks_to_us(adap,
- TIMERVALUE4_GET(timer_value_4_and_5));
+ TIMERVALUE4_G(timer_value_4_and_5));
s->timer_val[5] = core_ticks_to_us(adap,
- TIMERVALUE5_GET(timer_value_4_and_5));
+ TIMERVALUE5_G(timer_value_4_and_5));
- ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
- s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
- s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
- s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
- s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
-
- return 0;
-}
-
-static int t4_sge_init_hard(struct adapter *adap)
-{
- struct sge *s = &adap->sge;
-
- /*
- * Set up our basic SGE mode to deliver CPL messages to our Ingress
- * Queue and Packet Date to the Free List.
- */
- t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
- RXPKTCPLMODE_MASK);
-
- /*
- * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
- * and generate an interrupt when this occurs so we can recover.
- */
- if (is_t4(adap->params.chip)) {
- t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
- V_HP_INT_THRESH(M_HP_INT_THRESH) |
- V_LP_INT_THRESH(M_LP_INT_THRESH),
- V_HP_INT_THRESH(dbfifo_int_thresh) |
- V_LP_INT_THRESH(dbfifo_int_thresh));
- } else {
- t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
- V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
- V_LP_INT_THRESH_T5(dbfifo_int_thresh));
- t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
- V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
- V_HP_INT_THRESH_T5(dbfifo_int_thresh));
- }
- t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
- F_ENABLE_DROP);
-
- /*
- * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
- * t4_fixup_host_params().
- */
- s->fl_pg_order = FL_PG_ORDER;
- if (s->fl_pg_order)
- t4_write_reg(adap,
- SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
- PAGE_SIZE << FL_PG_ORDER);
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
- FL_MTU_SMALL_BUFSIZE(adap));
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
- FL_MTU_LARGE_BUFSIZE(adap));
-
- /*
- * Note that the SGE Ingress Packet Count Interrupt Threshold and
- * Timer Holdoff values must be supplied by our caller.
- */
- t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
- THRESHOLD_0(s->counter_val[0]) |
- THRESHOLD_1(s->counter_val[1]) |
- THRESHOLD_2(s->counter_val[2]) |
- THRESHOLD_3(s->counter_val[3]));
- t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
- TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
- TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
- t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
- TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
- TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
- t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
- TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
- TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
+ ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
+ s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+ s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+ s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+ s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
return 0;
}
+/**
+ * t4_sge_init - initialize SGE
+ * @adap: the adapter
+ *
+ * Perform low-level SGE code initialization needed every time after a
+ * chip reset.
+ */
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
@@ -2927,9 +2902,9 @@ int t4_sge_init(struct adapter *adap)
* Ingress Padding Boundary and Egress Status Page Size are set up by
* t4_fixup_host_params().
*/
- sge_control = t4_read_reg(adap, SGE_CONTROL);
- s->pktshift = PKTSHIFT_GET(sge_control);
- s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
+ sge_control = t4_read_reg(adap, SGE_CONTROL_A);
+ s->pktshift = PKTSHIFT_G(sge_control);
+ s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
@@ -2937,8 +2912,8 @@ int t4_sge_init(struct adapter *adap)
* within Packed Buffer Mode is the maximum of these two
* specifications.
*/
- ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
- X_INGPADBOUNDARY_SHIFT);
+ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
+ INGPADBOUNDARY_SHIFT_X);
if (is_t4(adap->params.chip)) {
s->fl_align = ingpadboundary;
} else {
@@ -2956,10 +2931,7 @@ int t4_sge_init(struct adapter *adap)
s->fl_align = max(ingpadboundary, ingpackboundary);
}
- if (adap->flags & USING_SOFT_PARAMS)
- ret = t4_sge_init_soft(adap);
- else
- ret = t4_sge_init_hard(adap);
+ ret = t4_sge_init_soft(adap);
if (ret < 0)
return ret;
@@ -2975,11 +2947,11 @@ int t4_sge_init(struct adapter *adap)
* buffers and a new field which only applies to Packed Mode Free List
* buffers.
*/
- sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
+ sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
if (is_t4(adap->params.chip))
- egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
+ egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
else
- egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
+ egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
s->fl_starve_thres = 2*egress_threshold + 1;
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index c132d9030729..4d643b65265e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include "cxgb4.h"
#include "t4_regs.h"
+#include "t4_values.h"
#include "t4fw_api.h"
/**
@@ -149,20 +150,20 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
*/
void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
{
- u32 req = ENABLE | FUNCTION(adap->fn) | reg;
+ u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
if (is_t4(adap->params.chip))
- req |= F_LOCALCFG;
+ req |= LOCALCFG_F;
- t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
- *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
+ t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
+ *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
* Configuration Space read. (None of the other fields matter when
* ENABLE is 0 so a simple register write is easier than a
* read-modify-write via t4_set_reg_field().)
*/
- t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
+ t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
}
/*
@@ -187,8 +188,8 @@ static void t4_report_fw_error(struct adapter *adap)
};
u32 pcie_fw;
- pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
- if (pcie_fw & PCIE_FW_ERR)
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ if (pcie_fw & PCIE_FW_ERR_F)
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
}
@@ -264,8 +265,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
u64 res;
int i, ms, delay_idx;
const __be64 *p = cmd;
- u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
- u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
+ u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+ u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
@@ -277,9 +278,9 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
if (adap->pdev->error_state != pci_channel_io_normal)
return -EIO;
- v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+ v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
- v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+ v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV)
return v ? -EBUSY : -ETIMEDOUT;
@@ -287,7 +288,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
- t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+ t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
t4_read_reg(adap, ctl_reg); /* flush write */
delay_idx = 0;
@@ -303,8 +304,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
mdelay(ms);
v = t4_read_reg(adap, ctl_reg);
- if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
- if (!(v & MBMSGVALID)) {
+ if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
+ if (!(v & MBMSGVALID_F)) {
t4_write_reg(adap, ctl_reg, 0);
continue;
}
@@ -350,27 +351,27 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
u32 mc_bist_status_rdata, mc_bist_data_pattern;
if (is_t4(adap->params.chip)) {
- mc_bist_cmd = MC_BIST_CMD;
- mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
- mc_bist_cmd_len = MC_BIST_CMD_LEN;
- mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
- mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
+ mc_bist_cmd = MC_BIST_CMD_A;
+ mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
+ mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
+ mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
+ mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
} else {
- mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
- mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
- mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
- mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
- mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+ mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
+ mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+ mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+ mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+ mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
}
- if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
+ if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
return -EBUSY;
t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
t4_write_reg(adap, mc_bist_cmd_len, 64);
t4_write_reg(adap, mc_bist_data_pattern, 0xc);
- t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
- BIST_CMD_GAP(1));
- i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
+ t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
+ BIST_CMD_GAP_V(1));
+ i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
if (i)
return i;
@@ -403,31 +404,31 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
if (is_t4(adap->params.chip)) {
- edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
- edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
- edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
- edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
- idx);
- edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
+ edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
+ edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
+ edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
+ edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
idx);
+ edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
+ idx);
} else {
- edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
- edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
- edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+ edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+ edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
edc_bist_cmd_data_pattern =
- EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+ EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
edc_bist_status_rdata =
- EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+ EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
}
- if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
+ if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
return -EBUSY;
t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
t4_write_reg(adap, edc_bist_cmd_len, 64);
t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
t4_write_reg(adap, edc_bist_cmd,
- BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
- i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
+ BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
+ i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
if (i)
return i;
@@ -505,13 +506,13 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* the address is relative to BAR0.
*/
mem_reg = t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
win));
- mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
- mem_base = GET_PCIEOFST(mem_reg) << 10;
+ mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
+ mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
if (is_t4(adap->params.chip))
mem_base -= adap->t4_bar0;
- win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
+ win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
/* Calculate our initial PCI-E Memory Window Position and Offset into
* that Window.
@@ -524,10 +525,10 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* attempt to use the new value.)
*/
t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
pos | win_pf);
t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
@@ -552,11 +553,11 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
pos += mem_aperture;
offset = 0;
t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
- win), pos | win_pf);
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+ win), pos | win_pf);
t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
- win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+ win));
}
}
@@ -760,14 +761,13 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
+ if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
- cont = cont ? SF_CONT : 0;
- lock = lock ? SF_LOCK : 0;
- t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
- ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
+ t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+ SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
+ ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
if (!ret)
- *valp = t4_read_reg(adapter, SF_DATA);
+ *valp = t4_read_reg(adapter, SF_DATA_A);
return ret;
}
@@ -788,14 +788,12 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
+ if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
- cont = cont ? SF_CONT : 0;
- lock = lock ? SF_LOCK : 0;
- t4_write_reg(adapter, SF_DATA, val);
- t4_write_reg(adapter, SF_OP, lock |
- cont | BYTECNT(byte_cnt - 1) | OP_WR);
- return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
+ t4_write_reg(adapter, SF_DATA_A, val);
+ t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+ SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
+ return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
}
/**
@@ -837,8 +835,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
* (i.e., big-endian), otherwise as 32-bit words in the platform's
* natural endianess.
*/
-static int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented)
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
{
int ret;
@@ -854,7 +852,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
for ( ; nwords; nwords--, data++) {
ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
if (nwords == 1)
- t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
if (ret)
return ret;
if (byte_oriented)
@@ -902,7 +900,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
if (ret)
goto unlock;
- t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
/* Read the page to verify the write succeeded */
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -918,7 +916,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
return 0;
unlock:
- t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
return ret;
}
@@ -950,6 +948,43 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers)
1, vers, 0);
}
+/**
+ * t4_get_exprom_version - return the Expansion ROM version (if any)
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the Expansion ROM header from FLASH and returns the version
+ * number (if present) through the @vers return value pointer. We return
+ * this in the Firmware Version Format since it's convenient. Return
+ * 0 on success, -ENOENT if no Expansion ROM is present.
+ */
+int t4_get_exprom_version(struct adapter *adap, u32 *vers)
+{
+ struct exprom_header {
+ unsigned char hdr_arr[16]; /* must start with 0x55aa */
+ unsigned char hdr_ver[4]; /* Expansion ROM version */
+ } *hdr;
+ u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
+ sizeof(u32))];
+ int ret;
+
+ ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
+ ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
+ 0);
+ if (ret)
+ return ret;
+
+ hdr = (struct exprom_header *)exprom_header_buf;
+ if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
+ return -ENOENT;
+
+ *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
+ FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
+ FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
+ FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
+ return 0;
+}
+
/* Is the given firmware API compatible with the one the driver was compiled
* with?
*/
@@ -1113,7 +1148,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
}
start++;
}
- t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
return ret;
}
@@ -1241,6 +1276,45 @@ out:
return ret;
}
+/**
+ * t4_fwcache - firmware cache operation
+ * @adap: the adapter
+ * @op : the operation (flush or flush and invalidate)
+ */
+int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
+{
+ struct fw_params_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn =
+ cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_PARAMS_CMD_PFN_V(adap->fn) |
+ FW_PARAMS_CMD_VFN_V(0));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.param[0].mnem =
+ cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
+ c.param[0].val = (__force __be32)op;
+
+ return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
+}
+
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < 8; i++) {
+ u32 *p = la_buf + i;
+
+ t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
+ j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
+ t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
+ for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
+ *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
+ }
+}
+
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
FW_PORT_CAP_ANEG)
@@ -1365,95 +1439,97 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
static void pcie_intr_handler(struct adapter *adapter)
{
static const struct intr_info sysbus_intr_info[] = {
- { RNPP, "RXNP array parity error", -1, 1 },
- { RPCP, "RXPC array parity error", -1, 1 },
- { RCIP, "RXCIF array parity error", -1, 1 },
- { RCCP, "Rx completions control array parity error", -1, 1 },
- { RFTP, "RXFT array parity error", -1, 1 },
+ { RNPP_F, "RXNP array parity error", -1, 1 },
+ { RPCP_F, "RXPC array parity error", -1, 1 },
+ { RCIP_F, "RXCIF array parity error", -1, 1 },
+ { RCCP_F, "Rx completions control array parity error", -1, 1 },
+ { RFTP_F, "RXFT array parity error", -1, 1 },
{ 0 }
};
static const struct intr_info pcie_port_intr_info[] = {
- { TPCP, "TXPC array parity error", -1, 1 },
- { TNPP, "TXNP array parity error", -1, 1 },
- { TFTP, "TXFT array parity error", -1, 1 },
- { TCAP, "TXCA array parity error", -1, 1 },
- { TCIP, "TXCIF array parity error", -1, 1 },
- { RCAP, "RXCA array parity error", -1, 1 },
- { OTDD, "outbound request TLP discarded", -1, 1 },
- { RDPE, "Rx data parity error", -1, 1 },
- { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { TPCP_F, "TXPC array parity error", -1, 1 },
+ { TNPP_F, "TXNP array parity error", -1, 1 },
+ { TFTP_F, "TXFT array parity error", -1, 1 },
+ { TCAP_F, "TXCA array parity error", -1, 1 },
+ { TCIP_F, "TXCIF array parity error", -1, 1 },
+ { RCAP_F, "RXCA array parity error", -1, 1 },
+ { OTDD_F, "outbound request TLP discarded", -1, 1 },
+ { RDPE_F, "Rx data parity error", -1, 1 },
+ { TDUE_F, "Tx uncorrectable data error", -1, 1 },
{ 0 }
};
static const struct intr_info pcie_intr_info[] = {
- { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
- { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
- { MSIDATAPERR, "MSI data parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
- { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
- { MATAGPERR, "PCI MA tag parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
- { RXWRPERR, "PCI Rx write parity error", -1, 1 },
- { RPLPERR, "PCI replay buffer parity error", -1, 1 },
- { PCIESINT, "PCI core secondary fault", -1, 1 },
- { PCIEPINT, "PCI core primary fault", -1, 1 },
- { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
+ { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR_F, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT_F, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT_F, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR_F, "PCI unexpected split completion error",
+ -1, 0 },
{ 0 }
};
static struct intr_info t5_pcie_intr_info[] = {
- { MSTGRPPERR, "Master Response Read Queue parity error",
+ { MSTGRPPERR_F, "Master Response Read Queue parity error",
-1, 1 },
- { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
- { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
-1, 1 },
- { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
-1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DREQWRPERR, "PCI DMA channel write request parity error",
+ { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR_F, "PCI DMA channel write request parity error",
-1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
- { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR_F, "PCI FID parity error", -1, 1 },
+ { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
-1, 1 },
- { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
- { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
- { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
- { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
- { READRSPERR, "Outbound read error", -1, 0 },
+ { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
+ -1, 1 },
+ { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR_F, "Outbound read error", -1, 0 },
{ 0 }
};
@@ -1461,15 +1537,15 @@ static void pcie_intr_handler(struct adapter *adapter)
if (is_t4(adapter->params.chip))
fat = t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- sysbus_intr_info) +
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
+ sysbus_intr_info) +
t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
+ pcie_port_intr_info) +
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
pcie_intr_info);
else
- fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
t5_pcie_intr_info);
if (fat)
@@ -1483,11 +1559,11 @@ static void tp_intr_handler(struct adapter *adapter)
{
static const struct intr_info tp_intr_info[] = {
{ 0x3fffffff, "TP parity error", -1, 1 },
- { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
+ if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
t4_fatal_err(adapter);
}
@@ -1499,102 +1575,107 @@ static void sge_intr_handler(struct adapter *adapter)
u64 v;
static const struct intr_info sge_intr_info[] = {
- { ERR_CPL_EXCEED_IQE_SIZE,
+ { ERR_CPL_EXCEED_IQE_SIZE_F,
"SGE received CPL exceeding IQE size", -1, 1 },
- { ERR_INVALID_CIDX_INC,
+ { ERR_INVALID_CIDX_INC_F,
"SGE GTS CIDX increment too large", -1, 0 },
- { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
- { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
- { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
- { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
- { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+ { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
+ { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+ { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+ { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
- { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
0 },
- { ERR_ING_CTXT_PRIO,
+ { ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
- { ERR_EGR_CTXT_PRIO,
+ { ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 },
- { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
- { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
- v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
- ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
+ v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
+ ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) {
dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
(unsigned long long)v);
- t4_write_reg(adapter, SGE_INT_CAUSE1, v);
- t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
+ t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
+ t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
}
- if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
+ if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
v != 0)
t4_fatal_err(adapter);
}
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+ OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+ IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
+
/*
* CIM interrupt handler.
*/
static void cim_intr_handler(struct adapter *adapter)
{
static const struct intr_info cim_intr_info[] = {
- { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
- { OBQPARERR, "CIM OBQ parity error", -1, 1 },
- { IBQPARERR, "CIM IBQ parity error", -1, 1 },
- { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
- { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
- { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
- { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
+ { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+ { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+ { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
{ 0 }
};
static const struct intr_info cim_upintr_info[] = {
- { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
- { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
- { ILLWRINT, "CIM illegal write", -1, 1 },
- { ILLRDINT, "CIM illegal read", -1, 1 },
- { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
- { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
- { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
- { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
- { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
- { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
- { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
- { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
- { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
- { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
- { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
- { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
- { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
- { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
- { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
- { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
- { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
- { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
- { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
- { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
- { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
- { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
- { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
- { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT_F, "CIM illegal write", -1, 1 },
+ { ILLRDINT_F, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
{ 0 }
};
int fat;
- if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR)
+ if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
t4_report_fw_error(adapter);
- fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
+ fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
cim_intr_info) +
- t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
+ t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
cim_upintr_info);
if (fat)
t4_fatal_err(adapter);
@@ -1611,7 +1692,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
{ 0 }
};
- if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
t4_fatal_err(adapter);
}
@@ -1621,19 +1702,19 @@ static void ulprx_intr_handler(struct adapter *adapter)
static void ulptx_intr_handler(struct adapter *adapter)
{
static const struct intr_info ulptx_intr_info[] = {
- { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
0 },
{ 0xfffffff, "ULPTX parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
t4_fatal_err(adapter);
}
@@ -1643,19 +1724,20 @@ static void ulptx_intr_handler(struct adapter *adapter)
static void pmtx_intr_handler(struct adapter *adapter)
{
static const struct intr_info pmtx_intr_info[] = {
- { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
- { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
- { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
- { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
- { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
- { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
- { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
- { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
- { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
+ { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
+ { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
+ -1, 1 },
+ { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+ { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
{ 0 }
};
- if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
+ if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
t4_fatal_err(adapter);
}
@@ -1665,16 +1747,17 @@ static void pmtx_intr_handler(struct adapter *adapter)
static void pmrx_intr_handler(struct adapter *adapter)
{
static const struct intr_info pmrx_intr_info[] = {
- { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
- { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
- { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
- { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
- { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
- { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
+ { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
+ { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
+ -1, 1 },
+ { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+ { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
{ 0 }
};
- if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
+ if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
t4_fatal_err(adapter);
}
@@ -1684,16 +1767,16 @@ static void pmrx_intr_handler(struct adapter *adapter)
static void cplsw_intr_handler(struct adapter *adapter)
{
static const struct intr_info cplsw_intr_info[] = {
- { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
- { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
- { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
- { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
- { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
- { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
+ if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
t4_fatal_err(adapter);
}
@@ -1703,15 +1786,15 @@ static void cplsw_intr_handler(struct adapter *adapter)
static void le_intr_handler(struct adapter *adap)
{
static const struct intr_info le_intr_info[] = {
- { LIPMISS, "LE LIP miss", -1, 0 },
- { LIP0, "LE 0 LIP error", -1, 0 },
- { PARITYERR, "LE parity error", -1, 1 },
- { UNKNOWNCMD, "LE unknown command", -1, 1 },
- { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { LIPMISS_F, "LE LIP miss", -1, 0 },
+ { LIP0_F, "LE 0 LIP error", -1, 0 },
+ { PARITYERR_F, "LE parity error", -1, 1 },
+ { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { REQQPARERR_F, "LE request queue parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
+ if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
t4_fatal_err(adap);
}
@@ -1725,19 +1808,22 @@ static void mps_intr_handler(struct adapter *adapter)
{ 0 }
};
static const struct intr_info mps_tx_intr_info[] = {
- { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
- { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
- { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
- { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
- { BUBBLE, "MPS Tx underflow", -1, 1 },
- { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
- { FRMERR, "MPS Tx framing error", -1, 1 },
+ { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+ -1, 1 },
+ { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+ -1, 1 },
+ { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+ { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR_F, "MPS Tx framing error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_trc_intr_info[] = {
- { FILTMEM, "MPS TRC filter parity error", -1, 1 },
- { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
- { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+ -1, 1 },
+ { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_stat_sram_intr_info[] = {
@@ -1753,37 +1839,37 @@ static void mps_intr_handler(struct adapter *adapter)
{ 0 }
};
static const struct intr_info mps_cls_intr_info[] = {
- { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
- { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
- { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
{ 0 }
};
int fat;
- fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
+ fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
mps_rx_intr_info) +
- t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
+ t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
mps_tx_intr_info) +
- t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
+ t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
mps_trc_intr_info) +
- t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
mps_stat_sram_intr_info) +
- t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
mps_stat_tx_intr_info) +
- t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
mps_stat_rx_intr_info) +
- t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
+ t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
mps_cls_intr_info);
- t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
- RXINT | TXINT | STATINT);
- t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
+ t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
+ t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
if (fat)
t4_fatal_err(adapter);
}
-#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+ ECC_UE_INT_CAUSE_F)
/*
* EDC/MC interrupt handler.
@@ -1795,40 +1881,40 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
unsigned int addr, cnt_addr, v;
if (idx <= MEM_EDC1) {
- addr = EDC_REG(EDC_INT_CAUSE, idx);
- cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
} else if (idx == MEM_MC) {
if (is_t4(adapter->params.chip)) {
- addr = MC_INT_CAUSE;
- cnt_addr = MC_ECC_STATUS;
+ addr = MC_INT_CAUSE_A;
+ cnt_addr = MC_ECC_STATUS_A;
} else {
- addr = MC_P_INT_CAUSE;
- cnt_addr = MC_P_ECC_STATUS;
+ addr = MC_P_INT_CAUSE_A;
+ cnt_addr = MC_P_ECC_STATUS_A;
}
} else {
- addr = MC_REG(MC_P_INT_CAUSE, 1);
- cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
+ addr = MC_REG(MC_P_INT_CAUSE_A, 1);
+ cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
}
v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
- if (v & PERR_INT_CAUSE)
+ if (v & PERR_INT_CAUSE_F)
dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
name[idx]);
- if (v & ECC_CE_INT_CAUSE) {
- u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
+ if (v & ECC_CE_INT_CAUSE_F) {
+ u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
- t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
+ t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
if (printk_ratelimit())
dev_warn(adapter->pdev_dev,
"%u %s correctable ECC data error%s\n",
cnt, name[idx], cnt > 1 ? "s" : "");
}
- if (v & ECC_UE_INT_CAUSE)
+ if (v & ECC_UE_INT_CAUSE_F)
dev_alert(adapter->pdev_dev,
"%s uncorrectable ECC data error\n", name[idx]);
t4_write_reg(adapter, addr, v);
- if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
t4_fatal_err(adapter);
}
@@ -1837,26 +1923,26 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
*/
static void ma_intr_handler(struct adapter *adap)
{
- u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
+ u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
- if (status & MEM_PERR_INT_CAUSE) {
+ if (status & MEM_PERR_INT_CAUSE_F) {
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
- t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+ t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
if (is_t5(adap->params.chip))
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
t4_read_reg(adap,
- MA_PARITY_ERROR_STATUS2));
+ MA_PARITY_ERROR_STATUS2_A));
}
- if (status & MEM_WRAP_INT_CAUSE) {
- v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
+ if (status & MEM_WRAP_INT_CAUSE_F) {
+ v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
dev_alert(adap->pdev_dev, "MA address wrap-around error by "
"client %u to address %#x\n",
- MEM_WRAP_CLIENT_NUM_GET(v),
- MEM_WRAP_ADDRESS_GET(v) << 4);
+ MEM_WRAP_CLIENT_NUM_G(v),
+ MEM_WRAP_ADDRESS_G(v) << 4);
}
- t4_write_reg(adap, MA_INT_CAUSE, status);
+ t4_write_reg(adap, MA_INT_CAUSE_A, status);
t4_fatal_err(adap);
}
@@ -1866,13 +1952,13 @@ static void ma_intr_handler(struct adapter *adap)
static void smb_intr_handler(struct adapter *adap)
{
static const struct intr_info smb_intr_info[] = {
- { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
- { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
- { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
+ if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
t4_fatal_err(adap);
}
@@ -1882,14 +1968,14 @@ static void smb_intr_handler(struct adapter *adap)
static void ncsi_intr_handler(struct adapter *adap)
{
static const struct intr_info ncsi_intr_info[] = {
- { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
- { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
- { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
- { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
+ if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
t4_fatal_err(adap);
}
@@ -1901,23 +1987,23 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
u32 v, int_cause_reg;
if (is_t4(adap->params.chip))
- int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
+ int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
else
- int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
+ int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
v = t4_read_reg(adap, int_cause_reg);
- v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
if (!v)
return;
- if (v & TXFIFO_PRTY_ERR)
+ if (v & TXFIFO_PRTY_ERR_F)
dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
port);
- if (v & RXFIFO_PRTY_ERR)
+ if (v & RXFIFO_PRTY_ERR_F)
dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
port);
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
+ t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
t4_fatal_err(adap);
}
@@ -1927,19 +2013,19 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
static void pl_intr_handler(struct adapter *adap)
{
static const struct intr_info pl_intr_info[] = {
- { FATALPERR, "T4 fatal parity error", -1, 1 },
- { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+ { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
+ if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
t4_fatal_err(adap);
}
-#define PF_INTR_MASK (PFSW)
-#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
- EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
- CPL_SWITCH | SGE | ULP_TX)
+#define PF_INTR_MASK (PFSW_F)
+#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
+ EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
+ CPL_SWITCH_F | SGE_F | ULP_TX_F)
/**
* t4_slow_intr_handler - control path interrupt handler
@@ -1951,60 +2037,60 @@ static void pl_intr_handler(struct adapter *adap)
*/
int t4_slow_intr_handler(struct adapter *adapter)
{
- u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
+ u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
if (!(cause & GLBL_INTR_MASK))
return 0;
- if (cause & CIM)
+ if (cause & CIM_F)
cim_intr_handler(adapter);
- if (cause & MPS)
+ if (cause & MPS_F)
mps_intr_handler(adapter);
- if (cause & NCSI)
+ if (cause & NCSI_F)
ncsi_intr_handler(adapter);
- if (cause & PL)
+ if (cause & PL_F)
pl_intr_handler(adapter);
- if (cause & SMB)
+ if (cause & SMB_F)
smb_intr_handler(adapter);
- if (cause & XGMAC0)
+ if (cause & XGMAC0_F)
xgmac_intr_handler(adapter, 0);
- if (cause & XGMAC1)
+ if (cause & XGMAC1_F)
xgmac_intr_handler(adapter, 1);
- if (cause & XGMAC_KR0)
+ if (cause & XGMAC_KR0_F)
xgmac_intr_handler(adapter, 2);
- if (cause & XGMAC_KR1)
+ if (cause & XGMAC_KR1_F)
xgmac_intr_handler(adapter, 3);
- if (cause & PCIE)
+ if (cause & PCIE_F)
pcie_intr_handler(adapter);
- if (cause & MC)
+ if (cause & MC_F)
mem_intr_handler(adapter, MEM_MC);
- if (!is_t4(adapter->params.chip) && (cause & MC1))
+ if (!is_t4(adapter->params.chip) && (cause & MC1_S))
mem_intr_handler(adapter, MEM_MC1);
- if (cause & EDC0)
+ if (cause & EDC0_F)
mem_intr_handler(adapter, MEM_EDC0);
- if (cause & EDC1)
+ if (cause & EDC1_F)
mem_intr_handler(adapter, MEM_EDC1);
- if (cause & LE)
+ if (cause & LE_F)
le_intr_handler(adapter);
- if (cause & TP)
+ if (cause & TP_F)
tp_intr_handler(adapter);
- if (cause & MA)
+ if (cause & MA_F)
ma_intr_handler(adapter);
- if (cause & PM_TX)
+ if (cause & PM_TX_F)
pmtx_intr_handler(adapter);
- if (cause & PM_RX)
+ if (cause & PM_RX_F)
pmrx_intr_handler(adapter);
- if (cause & ULP_RX)
+ if (cause & ULP_RX_F)
ulprx_intr_handler(adapter);
- if (cause & CPL_SWITCH)
+ if (cause & CPL_SWITCH_F)
cplsw_intr_handler(adapter);
- if (cause & SGE)
+ if (cause & SGE_F)
sge_intr_handler(adapter);
- if (cause & ULP_TX)
+ if (cause & ULP_TX_F)
ulptx_intr_handler(adapter);
/* Clear the interrupts just processed for which we are the master. */
- t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
- (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
+ t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
+ (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
return 1;
}
@@ -2023,19 +2109,19 @@ int t4_slow_intr_handler(struct adapter *adapter)
*/
void t4_intr_enable(struct adapter *adapter)
{
- u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
-
- t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
- ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
- ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
- ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
- ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
- ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
- ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
- DBFIFO_HP_INT | DBFIFO_LP_INT |
- EGRESS_SIZE_ERR);
- t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
- t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
+ u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+
+ t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
+ ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
+ ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+ ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+ ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+ ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
+ EGRESS_SIZE_ERR_F);
+ t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
+ t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
}
/**
@@ -2048,10 +2134,10 @@ void t4_intr_enable(struct adapter *adapter)
*/
void t4_intr_disable(struct adapter *adapter)
{
- u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+ u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
- t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
- t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
+ t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
+ t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
}
/**
@@ -2166,6 +2252,147 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
}
+/* Read an RSS table row */
+static int rd_rss_row(struct adapter *adap, int row, u32 *val)
+{
+ t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
+ return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
+ 5, 0, val);
+}
+
+/**
+ * t4_read_rss - read the contents of the RSS mapping table
+ * @adapter: the adapter
+ * @map: holds the contents of the RSS mapping table
+ *
+ * Reads the contents of the RSS hash->queue mapping table.
+ */
+int t4_read_rss(struct adapter *adapter, u16 *map)
+{
+ u32 val;
+ int i, ret;
+
+ for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+ ret = rd_rss_row(adapter, i, &val);
+ if (ret)
+ return ret;
+ *map++ = LKPTBLQUEUE0_G(val);
+ *map++ = LKPTBLQUEUE1_G(val);
+ }
+ return 0;
+}
+
+/**
+ * t4_read_rss_key - read the global RSS key
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ *
+ * Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
+}
+
+/**
+ * t4_write_rss_key - program one of the RSS keys
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ * @idx: which RSS key to write
+ *
+ * Writes one of the RSS keys with the given 320-bit value. If @idx is
+ * 0..15 the corresponding entry in the RSS key table is written,
+ * otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
+{
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
+ if (idx >= 0 && idx < 16)
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDR_V(idx) | KEYWREN_F);
+}
+
+/**
+ * t4_read_rss_pf_config - read PF RSS Configuration Table
+ * @adapter: the adapter
+ * @index: the entry in the PF RSS table to read
+ * @valp: where to store the returned value
+ *
+ * Reads the PF RSS Configuration Table at the specified index and returns
+ * the value found there.
+ */
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
+ u32 *valp)
+{
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ valp, 1, TP_RSS_PF0_CONFIG_A + index);
+}
+
+/**
+ * t4_read_rss_vf_config - read VF RSS Configuration Table
+ * @adapter: the adapter
+ * @index: the entry in the VF RSS table to read
+ * @vfl: where to store the returned VFL
+ * @vfh: where to store the returned VFH
+ *
+ * Reads the VF RSS Configuration Table at the specified index and returns
+ * the (VFL, VFH) values found there.
+ */
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+ u32 *vfl, u32 *vfh)
+{
+ u32 vrt, mask, data;
+
+ mask = VFWRADDR_V(VFWRADDR_M);
+ data = VFWRADDR_V(index);
+
+ /* Request that the index'th VF Table values be read into VFL/VFH.
+ */
+ vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
+ vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
+ vrt |= data | VFRDEN_F;
+ t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
+
+ /* Grab the VFL/VFH values ...
+ */
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfl, 1, TP_RSS_VFL_CONFIG_A);
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfh, 1, TP_RSS_VFH_CONFIG_A);
+}
+
+/**
+ * t4_read_rss_pf_map - read PF RSS Map
+ * @adapter: the adapter
+ *
+ * Reads the PF RSS Map register and returns its value.
+ */
+u32 t4_read_rss_pf_map(struct adapter *adapter)
+{
+ u32 pfmap;
+
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmap, 1, TP_RSS_PF_MAP_A);
+ return pfmap;
+}
+
+/**
+ * t4_read_rss_pf_mask - read PF RSS Mask
+ * @adapter: the adapter
+ *
+ * Reads the PF RSS Mask register and returns its value.
+ */
+u32 t4_read_rss_pf_mask(struct adapter *adapter)
+{
+ u32 pfmask;
+
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmask, 1, TP_RSS_PF_MSK_A);
+ return pfmask;
+}
+
/**
* t4_tp_get_tcp_stats - read TP's TCP MIB counters
* @adap: the adapter
@@ -2178,23 +2405,23 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6)
{
- u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
+ u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
-#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
+#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
#define STAT(x) val[STAT_IDX(x)]
#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
if (v4) {
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
- ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+ ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
v4->tcpOutRsts = STAT(OUT_RST);
v4->tcpInSegs = STAT64(IN_SEG);
v4->tcpOutSegs = STAT64(OUT_SEG);
v4->tcpRetransSegs = STAT64(RXT_SEG);
}
if (v6) {
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
- ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+ ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
v6->tcpOutRsts = STAT(OUT_RST);
v6->tcpInSegs = STAT64(IN_SEG);
v6->tcpOutSegs = STAT64(OUT_SEG);
@@ -2219,16 +2446,37 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
int i;
for (i = 0; i < NMTUS; ++i) {
- t4_write_reg(adap, TP_MTU_TABLE,
- MTUINDEX(0xff) | MTUVALUE(i));
- v = t4_read_reg(adap, TP_MTU_TABLE);
- mtus[i] = MTUVALUE_GET(v);
+ t4_write_reg(adap, TP_MTU_TABLE_A,
+ MTUINDEX_V(0xff) | MTUVALUE_V(i));
+ v = t4_read_reg(adap, TP_MTU_TABLE_A);
+ mtus[i] = MTUVALUE_G(v);
if (mtu_log)
- mtu_log[i] = MTUWIDTH_GET(v);
+ mtu_log[i] = MTUWIDTH_G(v);
}
}
/**
+ * t4_read_cong_tbl - reads the congestion control table
+ * @adap: the adapter
+ * @incr: where to store the alpha values
+ *
+ * Reads the additive increments programmed into the HW congestion
+ * control table.
+ */
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
+{
+ unsigned int mtu, w;
+
+ for (mtu = 0; mtu < NMTUS; ++mtu)
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ t4_write_reg(adap, TP_CCTRL_TABLE_A,
+ ROWINDEX_V(0xffff) | (mtu << 5) | w);
+ incr[mtu][w] = (u16)t4_read_reg(adap,
+ TP_CCTRL_TABLE_A) & 0x1fff;
+ }
+}
+
+/**
* t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
* @adap: the adapter
* @addr: the indirect TP register address
@@ -2240,9 +2488,9 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val)
{
- t4_write_reg(adap, TP_PIO_ADDR, addr);
- val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
- t4_write_reg(adap, TP_PIO_DATA, val);
+ t4_write_reg(adap, TP_PIO_ADDR_A, addr);
+ val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
+ t4_write_reg(adap, TP_PIO_DATA_A, val);
}
/**
@@ -2321,8 +2569,8 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
if (!(mtu & ((1 << log2) >> 2))) /* round */
log2--;
- t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
- MTUWIDTH(log2) | MTUVALUE(mtu));
+ t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
+ MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
for (w = 0; w < NCCTRL_WIN; ++w) {
unsigned int inc;
@@ -2330,13 +2578,67 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
CC_MIN_INCR);
- t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
+ t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
(w << 16) | (beta[w] << 13) | inc);
}
}
}
/**
+ * t4_pmtx_get_stats - returns the HW stats from PMTX
+ * @adap: the adapter
+ * @cnt: where to store the count statistics
+ * @cycles: where to store the cycle statistics
+ *
+ * Returns performance statistics from PMTX.
+ */
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+ int i;
+ u32 data[2];
+
+ for (i = 0; i < PM_NSTATS; i++) {
+ t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
+ cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
+ if (is_t4(adap->params.chip)) {
+ cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
+ } else {
+ t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
+ PM_TX_DBG_DATA_A, data, 2,
+ PM_TX_DBG_STAT_MSB_A);
+ cycles[i] = (((u64)data[0] << 32) | data[1]);
+ }
+ }
+}
+
+/**
+ * t4_pmrx_get_stats - returns the HW stats from PMRX
+ * @adap: the adapter
+ * @cnt: where to store the count statistics
+ * @cycles: where to store the cycle statistics
+ *
+ * Returns performance statistics from PMRX.
+ */
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+ int i;
+ u32 data[2];
+
+ for (i = 0; i < PM_NSTATS; i++) {
+ t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
+ cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
+ if (is_t4(adap->params.chip)) {
+ cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
+ } else {
+ t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
+ PM_RX_DBG_DATA_A, data, 2,
+ PM_RX_DBG_STAT_MSB_A);
+ cycles[i] = (((u64)data[0] << 32) | data[1]);
+ }
+ }
+}
+
+/**
* get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
@@ -2347,7 +2649,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
*/
static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
{
- u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
+ u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
if (n == 0)
return idx == 0 ? 0xf : 0;
@@ -2485,11 +2787,11 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
if (is_t4(adap->params.chip)) {
mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
} else {
mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
}
if (addr) {
@@ -2499,8 +2801,8 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
t4_write_reg(adap, mag_id_reg_h,
(addr[0] << 8) | addr[1]);
}
- t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
- addr ? MAGICEN : 0);
+ t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
+ addr ? MAGICEN_F : 0);
}
/**
@@ -2525,20 +2827,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
u32 port_cfg_reg;
if (is_t4(adap->params.chip))
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
else
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
if (!enable) {
- t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
+ t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
return 0;
}
if (map > 0xff)
return -EINVAL;
#define EPIO_REG(name) \
- (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
- T5_PORT_REG(port, MAC_PORT_EPIO_##name))
+ (is_t4(adap->params.chip) ? \
+ PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
+ T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2550,21 +2853,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
/* write byte masks */
t4_write_reg(adap, EPIO_REG(DATA0), mask0);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
+ t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
return -ETIMEDOUT;
/* write CRC */
t4_write_reg(adap, EPIO_REG(DATA0), crc);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
+ t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
return -ETIMEDOUT;
}
#undef EPIO_REG
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
+ t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
return 0;
}
@@ -2749,9 +3052,9 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
"IDMA_FL_SEND_COMPLETION_TO_IMSG",
};
static const u32 sge_regs[] = {
- SGE_DEBUG_DATA_LOW_INDEX_2,
- SGE_DEBUG_DATA_LOW_INDEX_3,
- SGE_DEBUG_DATA_HIGH_INDEX_10,
+ SGE_DEBUG_DATA_LOW_INDEX_2_A,
+ SGE_DEBUG_DATA_LOW_INDEX_3_A,
+ SGE_DEBUG_DATA_HIGH_INDEX_10_A,
};
const char **sge_idma_decode;
int sge_idma_decode_nstates;
@@ -2818,7 +3121,7 @@ retry:
if (ret < 0) {
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
goto retry;
- if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR)
+ if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
t4_report_fw_error(adap);
return ret;
}
@@ -2868,8 +3171,8 @@ retry:
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
- pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
- if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
if (waiting <= 0) {
if (retries-- > 0)
goto retry;
@@ -2884,9 +3187,9 @@ retry:
* report errors preferentially.
*/
if (state) {
- if (pcie_fw & PCIE_FW_ERR)
+ if (pcie_fw & PCIE_FW_ERR_F)
*state = DEV_STATE_ERR;
- else if (pcie_fw & PCIE_FW_INIT)
+ else if (pcie_fw & PCIE_FW_INIT_F)
*state = DEV_STATE_INIT;
}
@@ -2896,7 +3199,7 @@ retry:
* for our caller.
*/
if (master_mbox == PCIE_FW_MASTER_M &&
- (pcie_fw & PCIE_FW_MASTER_VLD))
+ (pcie_fw & PCIE_FW_MASTER_VLD_F))
master_mbox = PCIE_FW_MASTER_G(pcie_fw);
break;
}
@@ -2985,7 +3288,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
- c.val = htonl(PIORST | PIORSTMODE);
+ c.val = htonl(PIORST_F | PIORSTMODE_F);
c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3004,8 +3307,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
* rather than a RESET ... if it's new enough to understand that ...
*/
if (ret == 0 || force) {
- t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
- t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F,
+ t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+ t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
PCIE_FW_HALT_F);
}
@@ -3045,7 +3348,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* doing it automatically, we need to clear the PCIE_FW.HALT
* bit.
*/
- t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0);
+ t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
/*
* If we've been given a valid mailbox, first try to get the
@@ -3055,21 +3358,21 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* hitting the chip with a hammer.
*/
if (mbox <= PCIE_FW_MASTER_M) {
- t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+ t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
msleep(100);
if (t4_fw_reset(adap, mbox,
- PIORST | PIORSTMODE) == 0)
+ PIORST_F | PIORSTMODE_F) == 0)
return 0;
}
- t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
+ t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
msleep(2000);
} else {
int ms;
- t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+ t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
- if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F))
+ if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
return 0;
msleep(100);
ms += 100;
@@ -3148,22 +3451,23 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
- t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
- HOSTPAGESIZEPF0(sge_hps) |
- HOSTPAGESIZEPF1(sge_hps) |
- HOSTPAGESIZEPF2(sge_hps) |
- HOSTPAGESIZEPF3(sge_hps) |
- HOSTPAGESIZEPF4(sge_hps) |
- HOSTPAGESIZEPF5(sge_hps) |
- HOSTPAGESIZEPF6(sge_hps) |
- HOSTPAGESIZEPF7(sge_hps));
+ t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
+ HOSTPAGESIZEPF0_V(sge_hps) |
+ HOSTPAGESIZEPF1_V(sge_hps) |
+ HOSTPAGESIZEPF2_V(sge_hps) |
+ HOSTPAGESIZEPF3_V(sge_hps) |
+ HOSTPAGESIZEPF4_V(sge_hps) |
+ HOSTPAGESIZEPF5_V(sge_hps) |
+ HOSTPAGESIZEPF6_V(sge_hps) |
+ HOSTPAGESIZEPF7_V(sge_hps));
if (is_t4(adap->params.chip)) {
- t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE_MASK,
- INGPADBOUNDARY(fl_align_log - 5) |
- EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, SGE_CONTROL_A,
+ INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+ EGRSTATUSPAGESIZE_F,
+ INGPADBOUNDARY_V(fl_align_log -
+ INGPADBOUNDARY_SHIFT_X) |
+ EGRSTATUSPAGESIZE_V(stat_len != 64));
} else {
/* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
@@ -3193,15 +3497,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
fl_align = 64;
fl_align_log = 6;
}
- t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE_MASK,
- INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
- EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, SGE_CONTROL_A,
+ INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+ EGRSTATUSPAGESIZE_F,
+ INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
+ EGRSTATUSPAGESIZE_V(stat_len != 64));
t4_set_reg_field(adap, SGE_CONTROL2_A,
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
INGPACKBOUNDARY_V(fl_align_log -
- INGPACKBOUNDARY_SHIFT_X));
+ INGPACKBOUNDARY_SHIFT_X));
}
/*
* Adjust various SGE Free List Host Buffer Sizes.
@@ -3224,15 +3528,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* Default Firmware Configuration File but we need to adjust it for
* this host's cache line size.
*/
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
- (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
+ (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
& ~(fl_align-1));
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
- (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
+ (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
& ~(fl_align-1));
- t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
return 0;
}
@@ -3917,12 +4221,12 @@ int t4_wait_dev_ready(void __iomem *regs)
{
u32 whoami;
- whoami = readl(regs + PL_WHOAMI);
+ whoami = readl(regs + PL_WHOAMI_A);
if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
return 0;
msleep(500);
- whoami = readl(regs + PL_WHOAMI);
+ whoami = readl(regs + PL_WHOAMI_A);
return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
}
@@ -3946,7 +4250,7 @@ static int get_flash_params(struct adapter *adap)
ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
if (!ret)
ret = sf1_read(adap, 3, 0, 1, &info);
- t4_write_reg(adap, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
if (ret)
return ret;
@@ -3969,7 +4273,7 @@ static int get_flash_params(struct adapter *adap)
return -EINVAL;
adap->params.sf_size = 1 << info;
adap->params.sf_fw_start =
- t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+ t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
if (adap->params.sf_size < FLASH_MIN_SIZE)
dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
@@ -3993,7 +4297,7 @@ int t4_prep_adapter(struct adapter *adapter)
u32 pl_rev;
get_pci_mode(adapter, &adapter->params.pci);
- pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
+ pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
ret = get_flash_params(adapter);
if (ret < 0) {
@@ -4019,6 +4323,7 @@ int t4_prep_adapter(struct adapter *adapter)
return -EINVAL;
}
+ adapter->params.cim_la_size = CIMLA_SIZE;
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
@@ -4133,7 +4438,7 @@ int t4_init_sge_params(struct adapter *adapter)
/* Extract the SGE Page Size for our PF.
*/
- hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE);
+ hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
@@ -4142,10 +4447,10 @@ int t4_init_sge_params(struct adapter *adapter)
*/
s_qpp = (QUEUESPERPAGEPF0_S +
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
- qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF);
- sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
- qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
- sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+ qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
+ sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
+ qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
+ sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
return 0;
}
@@ -4161,9 +4466,9 @@ int t4_init_tp_params(struct adapter *adap)
int chan;
u32 v;
- v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
- adap->params.tp.tre = TIMERRESOLUTION_GET(v);
- adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
+ adap->params.tp.tre = TIMERRESOLUTION_G(v);
+ adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
for (chan = 0; chan < NCHAN; chan++)
@@ -4172,27 +4477,27 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP);
- t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ TP_VLAN_PRI_MAP_A);
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&adap->params.tp.ingress_config, 1,
- TP_INGRESS_CONFIG);
+ TP_INGRESS_CONFIG_A);
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
- adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
- adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
- adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
- F_PROTOCOL);
+ PROTOCOL_F);
/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
* represents the presense of an Outer VLAN instead of a VNIC ID.
*/
- if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ if ((adap->params.tp.ingress_config & VNIC_F) == 0)
adap->params.tp.vnic_shift = -1;
return 0;
@@ -4218,35 +4523,35 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
- case F_FCOE:
- field_shift += W_FT_FCOE;
+ case FCOE_F:
+ field_shift += FT_FCOE_W;
break;
- case F_PORT:
- field_shift += W_FT_PORT;
+ case PORT_F:
+ field_shift += FT_PORT_W;
break;
- case F_VNIC_ID:
- field_shift += W_FT_VNIC_ID;
+ case VNIC_ID_F:
+ field_shift += FT_VNIC_ID_W;
break;
- case F_VLAN:
- field_shift += W_FT_VLAN;
+ case VLAN_F:
+ field_shift += FT_VLAN_W;
break;
- case F_TOS:
- field_shift += W_FT_TOS;
+ case TOS_F:
+ field_shift += FT_TOS_W;
break;
- case F_PROTOCOL:
- field_shift += W_FT_PROTOCOL;
+ case PROTOCOL_F:
+ field_shift += FT_PROTOCOL_W;
break;
- case F_ETHERTYPE:
- field_shift += W_FT_ETHERTYPE;
+ case ETHERTYPE_F:
+ field_shift += FT_ETHERTYPE_W;
break;
- case F_MACMATCH:
- field_shift += W_FT_MACMATCH;
+ case MACMATCH_F:
+ field_shift += FT_MACMATCH_W;
break;
- case F_MPSHITTYPE:
- field_shift += W_FT_MPSHITTYPE;
+ case MPSHITTYPE_F:
+ field_shift += FT_MPSHITTYPE_W;
break;
- case F_FRAGMENTATION:
- field_shift += W_FT_FRAGMENTATION;
+ case FRAGMENTATION_F:
+ field_shift += FT_FRAGMENTATION_W;
break;
}
}
@@ -4311,3 +4616,289 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
}
return 0;
}
+
+/**
+ * t4_read_cimq_cfg - read CIM queue configuration
+ * @adap: the adapter
+ * @base: holds the queue base addresses in bytes
+ * @size: holds the queue sizes in bytes
+ * @thres: holds the queue full thresholds in bytes
+ *
+ * Returns the current configuration of the CIM queues, starting with
+ * the IBQs, then the OBQs.
+ */
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+{
+ unsigned int i, v;
+ int cim_num_obq = is_t4(adap->params.chip) ?
+ CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+ for (i = 0; i < CIM_NUM_IBQ; i++) {
+ t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
+ QUENUMSELECT_V(i));
+ v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+ /* value is in 256-byte units */
+ *base++ = CIMQBASE_G(v) * 256;
+ *size++ = CIMQSIZE_G(v) * 256;
+ *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
+ }
+ for (i = 0; i < cim_num_obq; i++) {
+ t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+ QUENUMSELECT_V(i));
+ v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+ /* value is in 256-byte units */
+ *base++ = CIMQBASE_G(v) * 256;
+ *size++ = CIMQSIZE_G(v) * 256;
+ }
+}
+
+/**
+ * t4_read_cim_ibq - read the contents of a CIM inbound queue
+ * @adap: the adapter
+ * @qid: the queue index
+ * @data: where to store the queue contents
+ * @n: capacity of @data in 32-bit words
+ *
+ * Reads the contents of the selected CIM queue starting at address 0 up
+ * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
+ * error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+ int i, err, attempts;
+ unsigned int addr;
+ const unsigned int nwords = CIM_IBQ_SIZE * 4;
+
+ if (qid > 5 || (n & 3))
+ return -EINVAL;
+
+ addr = qid * nwords;
+ if (n > nwords)
+ n = nwords;
+
+ /* It might take 3-10ms before the IBQ debug read access is allowed.
+ * Wait for 1 Sec with a delay of 1 usec.
+ */
+ attempts = 1000000;
+
+ for (i = 0; i < n; i++, addr++) {
+ t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
+ IBQDBGEN_F);
+ err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
+ attempts, 1);
+ if (err)
+ return err;
+ *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
+ }
+ t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
+ return i;
+}
+
+/**
+ * t4_read_cim_obq - read the contents of a CIM outbound queue
+ * @adap: the adapter
+ * @qid: the queue index
+ * @data: where to store the queue contents
+ * @n: capacity of @data in 32-bit words
+ *
+ * Reads the contents of the selected CIM queue starting at address 0 up
+ * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
+ * error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+ int i, err;
+ unsigned int addr, v, nwords;
+ int cim_num_obq = is_t4(adap->params.chip) ?
+ CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+ if ((qid > (cim_num_obq - 1)) || (n & 3))
+ return -EINVAL;
+
+ t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+ QUENUMSELECT_V(qid));
+ v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+
+ addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
+ nwords = CIMQSIZE_G(v) * 64; /* same */
+ if (n > nwords)
+ n = nwords;
+
+ for (i = 0; i < n; i++, addr++) {
+ t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
+ OBQDBGEN_F);
+ err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
+ 2, 1);
+ if (err)
+ return err;
+ *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
+ }
+ t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
+ return i;
+}
+
+/**
+ * t4_cim_read - read a block from CIM internal address space
+ * @adap: the adapter
+ * @addr: the start address within the CIM address space
+ * @n: number of words to read
+ * @valp: where to store the result
+ *
+ * Reads a block of 4-byte words from the CIM intenal address space.
+ */
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
+ ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+ 0, 5, 2);
+ if (!ret)
+ *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
+ }
+ return ret;
+}
+
+/**
+ * t4_cim_write - write a block into CIM internal address space
+ * @adap: the adapter
+ * @addr: the start address within the CIM address space
+ * @n: number of words to write
+ * @valp: set of values to write
+ *
+ * Writes a block of 4-byte words into the CIM intenal address space.
+ */
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+ const unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
+ t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
+ ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+ 0, 5, 2);
+ }
+ return ret;
+}
+
+static int t4_cim_write1(struct adapter *adap, unsigned int addr,
+ unsigned int val)
+{
+ return t4_cim_write(adap, addr, 1, &val);
+}
+
+/**
+ * t4_cim_read_la - read CIM LA capture buffer
+ * @adap: the adapter
+ * @la_buf: where to store the LA data
+ * @wrptr: the HW write pointer within the capture buffer
+ *
+ * Reads the contents of the CIM LA buffer with the most recent entry at
+ * the end of the returned data and with the entry at @wrptr first.
+ * We try to leave the LA in the running state we find it in.
+ */
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+{
+ int i, ret;
+ unsigned int cfg, val, idx;
+
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+ if (ret)
+ return ret;
+
+ if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
+ ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+ if (ret)
+ goto restart;
+
+ idx = UPDBGLAWRPTR_G(val);
+ if (wrptr)
+ *wrptr = idx;
+
+ for (i = 0; i < adap->params.cim_la_size; i++) {
+ ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+ UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
+ if (ret)
+ break;
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+ if (ret)
+ break;
+ if (val & UPDBGLARDEN_F) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
+ if (ret)
+ break;
+ idx = (idx + 1) & UPDBGLARDPTR_M;
+ }
+restart:
+ if (cfg & UPDBGLAEN_F) {
+ int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+ cfg & ~UPDBGLARDEN_F);
+ if (!ret)
+ ret = r;
+ }
+ return ret;
+}
+
+/**
+ * t4_tp_read_la - read TP LA capture buffer
+ * @adap: the adapter
+ * @la_buf: where to store the LA data
+ * @wrptr: the HW write pointer within the capture buffer
+ *
+ * Reads the contents of the TP LA buffer with the most recent entry at
+ * the end of the returned data and with the entry at @wrptr first.
+ * We leave the LA in the running state we find it in.
+ */
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
+{
+ bool last_incomplete;
+ unsigned int i, cfg, val, idx;
+
+ cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
+ if (cfg & DBGLAENABLE_F) /* freeze LA */
+ t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
+ adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
+
+ val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
+ idx = DBGLAWPTR_G(val);
+ last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
+ if (last_incomplete)
+ idx = (idx + 1) & DBGLARPTR_M;
+ if (wrptr)
+ *wrptr = idx;
+
+ val &= 0xffff;
+ val &= ~DBGLARPTR_V(DBGLARPTR_M);
+ val |= adap->params.tp.la_mask;
+
+ for (i = 0; i < TPLA_SIZE; i++) {
+ t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
+ la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
+ idx = (idx + 1) & DBGLARPTR_M;
+ }
+
+ /* Wipe out last entry if it isn't valid */
+ if (last_incomplete)
+ la_buf[TPLA_SIZE - 1] = ~0ULL;
+
+ if (cfg & DBGLAENABLE_F) /* restore running state */
+ t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
+ cfg | adap->params.tp.la_mask);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c19a90e7f7d1..380b15c0417a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -48,6 +48,7 @@ enum {
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
L2T_SIZE = 4096, /* # of L2T entries */
+ PM_NSTATS = 5, /* # of PM stats */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
@@ -56,6 +57,17 @@ enum {
};
enum {
+ CIM_NUM_IBQ = 6, /* # of CIM IBQs */
+ CIM_NUM_OBQ = 6, /* # of CIM OBQs */
+ CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
+ CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+ CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */
+ CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */
+ TPLA_SIZE = 128, /* # of 64-bit words in TP LA */
+ ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */
+};
+
+enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
};
@@ -110,6 +122,18 @@ enum {
SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
};
+/* PCI-e memory window access */
+enum pcie_memwin {
+ MEMWIN_NIC = 0,
+ MEMWIN_RSVD1 = 1,
+ MEMWIN_RSVD2 = 2,
+ MEMWIN_RDMA = 3,
+ MEMWIN_RSVD4 = 4,
+ MEMWIN_FOISCSI = 5,
+ MEMWIN_CSIOSTOR = 6,
+ MEMWIN_RSVD7 = 7,
+};
+
struct sge_qstat { /* data written to SGE queue status entries */
__be32 qid;
__be16 cidx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 0f89f68948ab..0fb975e258b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -124,6 +124,13 @@ enum CPL_error {
};
enum {
+ CPL_CONN_POLICY_AUTO = 0,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_FILTER = 2,
+ CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
ULP_MODE_NONE = 0,
ULP_MODE_ISCSI = 2,
ULP_MODE_RDMA = 4,
@@ -160,16 +167,28 @@ union opcode_tid {
u8 opcode;
};
-#define CPL_OPCODE(x) ((x) << 24)
-#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF)
-#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
+#define CPL_OPCODE_S 24
+#define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S)
+#define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF)
+#define TID_G(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid))
+
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
-#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd))))
/* partitioning of TID fields that also carry a queue id */
-#define GET_TID_TID(x) ((x) & 0x3fff)
-#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
-#define TID_QID(x) ((x) << 14)
+#define TID_TID_S 0
+#define TID_TID_M 0x3fff
+#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
+
+#define TID_QID_S 14
+#define TID_QID_M 0x3ff
+#define TID_QID_V(x) ((x) << TID_QID_S)
+#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M)
struct rss_header {
u8 opcode;
@@ -199,8 +218,8 @@ struct work_request_hdr {
};
/* wr_hi fields */
-#define S_WR_OP 24
-#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+#define WR_OP_S 24
+#define WR_OP_V(x) ((__u64)(x) << WR_OP_S)
#define WR_HDR struct work_request_hdr wr
@@ -270,17 +289,42 @@ struct cpl_pass_open_req {
__be32 local_ip;
__be32 peer_ip;
__be64 opt0;
-#define NO_CONG(x) ((x) << 4)
-#define DELACK(x) ((x) << 5)
-#define DSCP(x) ((x) << 22)
-#define TCAM_BYPASS(x) ((u64)(x) << 48)
-#define NAGLE(x) ((u64)(x) << 49)
__be64 opt1;
-#define SYN_RSS_ENABLE (1 << 0)
-#define SYN_RSS_QUEUE(x) ((x) << 2)
-#define CONN_POLICY_ASK (1 << 22)
};
+/* option 0 fields */
+#define NO_CONG_S 4
+#define NO_CONG_V(x) ((x) << NO_CONG_S)
+#define NO_CONG_F NO_CONG_V(1U)
+
+#define DELACK_S 5
+#define DELACK_V(x) ((x) << DELACK_S)
+#define DELACK_F DELACK_V(1U)
+
+#define DSCP_S 22
+#define DSCP_M 0x3F
+#define DSCP_V(x) ((x) << DSCP_S)
+#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M)
+
+#define TCAM_BYPASS_S 48
+#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S)
+#define TCAM_BYPASS_F TCAM_BYPASS_V(1ULL)
+
+#define NAGLE_S 49
+#define NAGLE_V(x) ((__u64)(x) << NAGLE_S)
+#define NAGLE_F NAGLE_V(1ULL)
+
+/* option 1 fields */
+#define SYN_RSS_ENABLE_S 0
+#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S)
+#define SYN_RSS_ENABLE_F SYN_RSS_ENABLE_V(1U)
+
+#define SYN_RSS_QUEUE_S 2
+#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S)
+
+#define CONN_POLICY_S 22
+#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S)
+
struct cpl_pass_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -304,16 +348,37 @@ struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
__be32 opt2;
-#define RX_COALESCE_VALID(x) ((x) << 11)
-#define RX_COALESCE(x) ((x) << 12)
-#define PACE(x) ((x) << 16)
-#define TX_QUEUE(x) ((x) << 23)
-#define CCTRL_ECN(x) ((x) << 27)
-#define TSTAMPS_EN(x) ((x) << 29)
-#define SACK_EN(x) ((x) << 30)
__be64 opt0;
};
+/* option 2 fields */
+#define RX_COALESCE_VALID_S 11
+#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S)
+#define RX_COALESCE_VALID_F RX_COALESCE_VALID_V(1U)
+
+#define RX_COALESCE_S 12
+#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S)
+
+#define PACE_S 16
+#define PACE_V(x) ((x) << PACE_S)
+
+#define TX_QUEUE_S 23
+#define TX_QUEUE_M 0x7
+#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S)
+#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M)
+
+#define CCTRL_ECN_S 27
+#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S)
+#define CCTRL_ECN_F CCTRL_ECN_V(1U)
+
+#define TSTAMPS_EN_S 29
+#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S)
+#define TSTAMPS_EN_F TSTAMPS_EN_V(1U)
+
+#define SACK_EN_S 30
+#define SACK_EN_V(x) ((x) << SACK_EN_S)
+#define SACK_EN_F SACK_EN_V(1U)
+
struct cpl_t5_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
@@ -384,30 +449,61 @@ struct cpl_t5_act_open_req6 {
struct cpl_act_open_rpl {
union opcode_tid ot;
__be32 atid_status;
-#define GET_AOPEN_STATUS(x) ((x) & 0xff)
-#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
};
+/* cpl_act_open_rpl.atid_status fields */
+#define AOPEN_STATUS_S 0
+#define AOPEN_STATUS_M 0xFF
+#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M)
+
+#define AOPEN_ATID_S 8
+#define AOPEN_ATID_M 0xFFFFFF
+#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M)
+
struct cpl_pass_establish {
union opcode_tid ot;
__be32 rsvd;
__be32 tos_stid;
-#define PASS_OPEN_TID(x) ((x) << 0)
-#define PASS_OPEN_TOS(x) ((x) << 24)
-#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
-#define GET_POPEN_TID(x) ((x) & 0xffffff)
-#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
__be16 mac_idx;
__be16 tcp_opt;
-#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
-#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
-#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
-#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
-#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
__be32 snd_isn;
__be32 rcv_isn;
};
+/* cpl_pass_establish.tos_stid fields */
+#define PASS_OPEN_TID_S 0
+#define PASS_OPEN_TID_M 0xFFFFFF
+#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S)
+#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M)
+
+#define PASS_OPEN_TOS_S 24
+#define PASS_OPEN_TOS_M 0xFF
+#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S)
+#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M)
+
+/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
+#define TCPOPT_WSCALE_OK_S 5
+#define TCPOPT_WSCALE_OK_M 0x1
+#define TCPOPT_WSCALE_OK_G(x) \
+ (((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M)
+
+#define TCPOPT_SACK_S 6
+#define TCPOPT_SACK_M 0x1
+#define TCPOPT_SACK_G(x) (((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M)
+
+#define TCPOPT_TSTAMP_S 7
+#define TCPOPT_TSTAMP_M 0x1
+#define TCPOPT_TSTAMP_G(x) (((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M)
+
+#define TCPOPT_SND_WSCALE_S 8
+#define TCPOPT_SND_WSCALE_M 0xF
+#define TCPOPT_SND_WSCALE_G(x) \
+ (((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M)
+
+#define TCPOPT_MSS_S 12
+#define TCPOPT_MSS_M 0xF
+#define TCPOPT_MSS_G(x) (((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M)
+
struct cpl_act_establish {
union opcode_tid ot;
__be32 rsvd;
@@ -422,24 +518,39 @@ struct cpl_get_tcb {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
-#define QUEUENO(x) ((x) << 0)
-#define REPLY_CHAN(x) ((x) << 14)
-#define NO_REPLY(x) ((x) << 15)
__be16 cookie;
};
+/* cpl_get_tcb.reply_ctrl fields */
+#define QUEUENO_S 0
+#define QUEUENO_V(x) ((x) << QUEUENO_S)
+
+#define REPLY_CHAN_S 14
+#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S)
+#define REPLY_CHAN_F REPLY_CHAN_V(1U)
+
+#define NO_REPLY_S 15
+#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
+#define NO_REPLY_F NO_REPLY_V(1U)
+
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
__be16 word_cookie;
-#define TCB_WORD(x) ((x) << 0)
-#define TCB_COOKIE(x) ((x) << 5)
-#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
__be64 mask;
__be64 val;
};
+/* cpl_set_tcb_field.word_cookie fields */
+#define TCB_WORD_S 0
+#define TCB_WORD(x) ((x) << TCB_WORD_S)
+
+#define TCB_COOKIE_S 5
+#define TCB_COOKIE_M 0x7
+#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S)
+#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M)
+
struct cpl_set_tcb_rpl {
union opcode_tid ot;
__be16 rsvd;
@@ -466,10 +577,14 @@ struct cpl_close_listsvr_req {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
-#define LISTSVR_IPV6(x) ((x) << 14)
__be16 rsvd;
};
+/* additional cpl_close_listsvr_req.reply_ctrl field */
+#define LISTSVR_IPV6_S 14
+#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S)
+#define LISTSVR_IPV6_F LISTSVR_IPV6_V(1U)
+
struct cpl_close_listsvr_rpl {
union opcode_tid ot;
u8 rsvd[3];
@@ -565,6 +680,34 @@ struct cpl_tx_pkt_lso_core {
/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
};
+/* cpl_tx_pkt_lso_core.lso_ctrl fields */
+#define LSO_TCPHDR_LEN_S 0
+#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S)
+
+#define LSO_IPHDR_LEN_S 4
+#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S)
+
+#define LSO_ETHHDR_LEN_S 16
+#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S)
+
+#define LSO_IPV6_S 20
+#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S)
+#define LSO_IPV6_F LSO_IPV6_V(1U)
+
+#define LSO_LAST_SLICE_S 22
+#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S)
+#define LSO_LAST_SLICE_F LSO_LAST_SLICE_V(1U)
+
+#define LSO_FIRST_SLICE_S 23
+#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S)
+#define LSO_FIRST_SLICE_F LSO_FIRST_SLICE_V(1U)
+
+#define LSO_OPCODE_S 24
+#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S)
+
+#define LSO_T5_XFER_SIZE_S 0
+#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S)
+
struct cpl_tx_pkt_lso {
WR_HDR;
struct cpl_tx_pkt_lso_core c;
@@ -574,8 +717,6 @@ struct cpl_tx_pkt_lso {
struct cpl_iscsi_hdr {
union opcode_tid ot;
__be16 pdu_len_ddp;
-#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
-#define ISCSI_DDP (1 << 15)
__be16 len;
__be32 seq;
__be16 urg;
@@ -583,6 +724,16 @@ struct cpl_iscsi_hdr {
u8 status;
};
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define ISCSI_PDU_LEN_S 0
+#define ISCSI_PDU_LEN_M 0x7FFF
+#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S)
+#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M)
+
+#define ISCSI_DDP_S 15
+#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
+#define ISCSI_DDP_F ISCSI_DDP_V(1U)
+
struct cpl_rx_data {
union opcode_tid ot;
__be16 rsvd;
@@ -639,49 +790,61 @@ struct cpl_rx_pkt {
__be16 vlan;
__be16 len;
__be32 l2info;
-#define RXF_UDP (1 << 22)
-#define RXF_TCP (1 << 23)
-#define RXF_IP (1 << 24)
-#define RXF_IP6 (1 << 25)
__be16 hdr_len;
__be16 err_vec;
};
+#define RXF_UDP_S 22
+#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
+#define RXF_UDP_F RXF_UDP_V(1U)
+
+#define RXF_TCP_S 23
+#define RXF_TCP_V(x) ((x) << RXF_TCP_S)
+#define RXF_TCP_F RXF_TCP_V(1U)
+
+#define RXF_IP_S 24
+#define RXF_IP_V(x) ((x) << RXF_IP_S)
+#define RXF_IP_F RXF_IP_V(1U)
+
+#define RXF_IP6_S 25
+#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
+#define RXF_IP6_F RXF_IP6_V(1U)
+
/* rx_pkt.l2info fields */
-#define S_RX_ETHHDR_LEN 0
-#define M_RX_ETHHDR_LEN 0x1F
-#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
-#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
-
-#define S_RX_T5_ETHHDR_LEN 0
-#define M_RX_T5_ETHHDR_LEN 0x3F
-#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
-#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
-
-#define S_RX_MACIDX 8
-#define M_RX_MACIDX 0x1FF
-#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
-#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
-
-#define S_RXF_SYN 21
-#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
-#define F_RXF_SYN V_RXF_SYN(1U)
-
-#define S_RX_CHAN 28
-#define M_RX_CHAN 0xF
-#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
-#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+#define RX_ETHHDR_LEN_S 0
+#define RX_ETHHDR_LEN_M 0x1F
+#define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S)
+#define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M)
+
+#define RX_T5_ETHHDR_LEN_S 0
+#define RX_T5_ETHHDR_LEN_M 0x3F
+#define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S)
+#define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M)
+
+#define RX_MACIDX_S 8
+#define RX_MACIDX_M 0x1FF
+#define RX_MACIDX_V(x) ((x) << RX_MACIDX_S)
+#define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M)
+
+#define RXF_SYN_S 21
+#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
+#define RXF_SYN_F RXF_SYN_V(1U)
+
+#define RX_CHAN_S 28
+#define RX_CHAN_M 0xF
+#define RX_CHAN_V(x) ((x) << RX_CHAN_S)
+#define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M)
/* rx_pkt.hdr_len fields */
-#define S_RX_TCPHDR_LEN 0
-#define M_RX_TCPHDR_LEN 0x3F
-#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
-#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+#define RX_TCPHDR_LEN_S 0
+#define RX_TCPHDR_LEN_M 0x3F
+#define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S)
+#define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M)
-#define S_RX_IPHDR_LEN 6
-#define M_RX_IPHDR_LEN 0x3FF
-#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
-#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+#define RX_IPHDR_LEN_S 6
+#define RX_IPHDR_LEN_M 0x3FF
+#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
+#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
struct cpl_trace_pkt {
u8 opcode;
@@ -730,14 +893,22 @@ struct cpl_l2t_write_req {
WR_HDR;
union opcode_tid ot;
__be16 params;
-#define L2T_W_INFO(x) ((x) << 2)
-#define L2T_W_PORT(x) ((x) << 8)
-#define L2T_W_NOREPLY(x) ((x) << 15)
__be16 l2t_idx;
__be16 vlan;
u8 dst_mac[6];
};
+/* cpl_l2t_write_req.params fields */
+#define L2T_W_INFO_S 2
+#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S)
+
+#define L2T_W_PORT_S 8
+#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S)
+
+#define L2T_W_NOREPLY_S 15
+#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
+#define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U)
+
struct cpl_l2t_write_rpl {
union opcode_tid ot;
u8 status;
@@ -752,11 +923,15 @@ struct cpl_rdma_terminate {
struct cpl_sge_egr_update {
__be32 opcode_qid;
-#define EGR_QID(x) ((x) & 0x1FFFF)
__be16 cidx;
__be16 pidx;
};
+/* cpl_sge_egr_update.ot fields */
+#define EGR_QID_S 0
+#define EGR_QID_M 0x1FFFF
+#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M)
+
/* cpl_fw*.type values */
enum {
FW_TYPE_CMD_RPL = 0,
@@ -849,22 +1024,30 @@ struct ulptx_sge_pair {
struct ulptx_sgl {
__be32 cmd_nsge;
-#define ULPTX_NSGE(x) ((x) << 0)
-#define ULPTX_MORE (1U << 23)
__be32 len0;
__be64 addr0;
struct ulptx_sge_pair sge[0];
};
+#define ULPTX_NSGE_S 0
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
+
+#define ULPTX_MORE_S 23
+#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S)
+#define ULPTX_MORE_F ULPTX_MORE_V(1U)
+
struct ulp_mem_io {
WR_HDR;
__be32 cmd;
__be32 len16; /* command length */
__be32 dlen; /* data length in 32-byte units */
__be32 lock_addr;
-#define ULP_MEMIO_LOCK(x) ((x) << 31)
};
+#define ULP_MEMIO_LOCK_S 31
+#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S)
+#define ULP_MEMIO_LOCK_F ULP_MEMIO_LOCK_V(1U)
+
/* additional ulp_mem_io.cmd fields */
#define ULP_MEMIO_ORDER_S 23
#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
@@ -874,13 +1057,9 @@ struct ulp_mem_io {
#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U)
-#define S_T5_ULP_MEMIO_IMM 23
-#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
-#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
-
-#define S_T5_ULP_MEMIO_ORDER 22
-#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
-#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
+#define T5_ULP_MEMIO_ORDER_S 22
+#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
+#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
/* ulp_mem_io.lock_addr fields */
#define ULP_MEMIO_ADDR_S 0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 9e4f95a91fb4..ddfb5b846045 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -153,6 +153,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index d7bd34ee65bd..231a725f6d5d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -63,460 +63,779 @@
#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
-#define SGE_PF_KDOORBELL 0x0
-#define QID_MASK 0xffff8000U
-#define QID_SHIFT 15
-#define QID(x) ((x) << QID_SHIFT)
-#define DBPRIO(x) ((x) << 14)
-#define DBTYPE(x) ((x) << 13)
-#define PIDX_MASK 0x00003fffU
-#define PIDX_SHIFT 0
-#define PIDX(x) ((x) << PIDX_SHIFT)
-#define PIDX_SHIFT_T5 0
-#define PIDX_T5(x) ((x) << PIDX_SHIFT_T5)
-
-
-#define SGE_TIMERREGS 6
-#define SGE_PF_GTS 0x4
-#define INGRESSQID_MASK 0xffff0000U
-#define INGRESSQID_SHIFT 16
-#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
-#define TIMERREG_MASK 0x0000e000U
-#define TIMERREG_SHIFT 13
-#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
-#define SEINTARM_MASK 0x00001000U
-#define SEINTARM_SHIFT 12
-#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
-#define CIDXINC_MASK 0x00000fffU
-#define CIDXINC_SHIFT 0
-#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
-
-#define X_RXPKTCPLMODE_SPLIT 1
-#define X_INGPADBOUNDARY_SHIFT 5
-
-#define SGE_CONTROL 0x1008
-#define SGE_CONTROL2_A 0x1124
-#define DCASYSTYPE 0x00080000U
-#define RXPKTCPLMODE_MASK 0x00040000U
-#define RXPKTCPLMODE_SHIFT 18
-#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT)
-#define EGRSTATUSPAGESIZE_MASK 0x00020000U
-#define EGRSTATUSPAGESIZE_SHIFT 17
-#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT)
-#define PKTSHIFT_MASK 0x00001c00U
-#define PKTSHIFT_SHIFT 10
-#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
-#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
-#define INGPCIEBOUNDARY_32B_X 0
-#define INGPCIEBOUNDARY_MASK 0x00000380U
-#define INGPCIEBOUNDARY_SHIFT 7
-#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
-#define INGPADBOUNDARY_MASK 0x00000070U
-#define INGPADBOUNDARY_SHIFT 4
-#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
-#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
- >> INGPADBOUNDARY_SHIFT)
-#define INGPACKBOUNDARY_16B_X 0
-#define INGPACKBOUNDARY_SHIFT_X 5
+#define SGE_PF_KDOORBELL_A 0x0
+
+#define QID_S 15
+#define QID_V(x) ((x) << QID_S)
+
+#define DBPRIO_S 14
+#define DBPRIO_V(x) ((x) << DBPRIO_S)
+#define DBPRIO_F DBPRIO_V(1U)
+
+#define PIDX_S 0
+#define PIDX_V(x) ((x) << PIDX_S)
+
+#define SGE_VF_KDOORBELL_A 0x0
+
+#define DBTYPE_S 13
+#define DBTYPE_V(x) ((x) << DBTYPE_S)
+#define DBTYPE_F DBTYPE_V(1U)
+
+#define PIDX_T5_S 0
+#define PIDX_T5_M 0x1fffU
+#define PIDX_T5_V(x) ((x) << PIDX_T5_S)
+#define PIDX_T5_G(x) (((x) >> PIDX_T5_S) & PIDX_T5_M)
+
+#define SGE_PF_GTS_A 0x4
+
+#define INGRESSQID_S 16
+#define INGRESSQID_V(x) ((x) << INGRESSQID_S)
+
+#define TIMERREG_S 13
+#define TIMERREG_V(x) ((x) << TIMERREG_S)
+
+#define SEINTARM_S 12
+#define SEINTARM_V(x) ((x) << SEINTARM_S)
+
+#define CIDXINC_S 0
+#define CIDXINC_M 0xfffU
+#define CIDXINC_V(x) ((x) << CIDXINC_S)
+
+#define SGE_CONTROL_A 0x1008
+#define SGE_CONTROL2_A 0x1124
+
+#define RXPKTCPLMODE_S 18
+#define RXPKTCPLMODE_V(x) ((x) << RXPKTCPLMODE_S)
+#define RXPKTCPLMODE_F RXPKTCPLMODE_V(1U)
+
+#define EGRSTATUSPAGESIZE_S 17
+#define EGRSTATUSPAGESIZE_V(x) ((x) << EGRSTATUSPAGESIZE_S)
+#define EGRSTATUSPAGESIZE_F EGRSTATUSPAGESIZE_V(1U)
+
+#define PKTSHIFT_S 10
+#define PKTSHIFT_M 0x7U
+#define PKTSHIFT_V(x) ((x) << PKTSHIFT_S)
+#define PKTSHIFT_G(x) (((x) >> PKTSHIFT_S) & PKTSHIFT_M)
+
+#define INGPCIEBOUNDARY_S 7
+#define INGPCIEBOUNDARY_V(x) ((x) << INGPCIEBOUNDARY_S)
+
+#define INGPADBOUNDARY_S 4
+#define INGPADBOUNDARY_M 0x7U
+#define INGPADBOUNDARY_V(x) ((x) << INGPADBOUNDARY_S)
+#define INGPADBOUNDARY_G(x) (((x) >> INGPADBOUNDARY_S) & INGPADBOUNDARY_M)
+
+#define EGRPCIEBOUNDARY_S 1
+#define EGRPCIEBOUNDARY_V(x) ((x) << EGRPCIEBOUNDARY_S)
#define INGPACKBOUNDARY_S 16
#define INGPACKBOUNDARY_M 0x7U
#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
& INGPACKBOUNDARY_M)
-#define EGRPCIEBOUNDARY_MASK 0x0000000eU
-#define EGRPCIEBOUNDARY_SHIFT 1
-#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
-#define GLOBALENABLE 0x00000001U
-#define SGE_HOST_PAGE_SIZE 0x100c
+#define GLOBALENABLE_S 0
+#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
+#define GLOBALENABLE_F GLOBALENABLE_V(1U)
+
+#define SGE_HOST_PAGE_SIZE_A 0x100c
+
+#define HOSTPAGESIZEPF7_S 28
+#define HOSTPAGESIZEPF7_M 0xfU
+#define HOSTPAGESIZEPF7_V(x) ((x) << HOSTPAGESIZEPF7_S)
+#define HOSTPAGESIZEPF7_G(x) (((x) >> HOSTPAGESIZEPF7_S) & HOSTPAGESIZEPF7_M)
+
+#define HOSTPAGESIZEPF6_S 24
+#define HOSTPAGESIZEPF6_M 0xfU
+#define HOSTPAGESIZEPF6_V(x) ((x) << HOSTPAGESIZEPF6_S)
+#define HOSTPAGESIZEPF6_G(x) (((x) >> HOSTPAGESIZEPF6_S) & HOSTPAGESIZEPF6_M)
+
+#define HOSTPAGESIZEPF5_S 20
+#define HOSTPAGESIZEPF5_M 0xfU
+#define HOSTPAGESIZEPF5_V(x) ((x) << HOSTPAGESIZEPF5_S)
+#define HOSTPAGESIZEPF5_G(x) (((x) >> HOSTPAGESIZEPF5_S) & HOSTPAGESIZEPF5_M)
+
+#define HOSTPAGESIZEPF4_S 16
+#define HOSTPAGESIZEPF4_M 0xfU
+#define HOSTPAGESIZEPF4_V(x) ((x) << HOSTPAGESIZEPF4_S)
+#define HOSTPAGESIZEPF4_G(x) (((x) >> HOSTPAGESIZEPF4_S) & HOSTPAGESIZEPF4_M)
+
+#define HOSTPAGESIZEPF3_S 12
+#define HOSTPAGESIZEPF3_M 0xfU
+#define HOSTPAGESIZEPF3_V(x) ((x) << HOSTPAGESIZEPF3_S)
+#define HOSTPAGESIZEPF3_G(x) (((x) >> HOSTPAGESIZEPF3_S) & HOSTPAGESIZEPF3_M)
+
+#define HOSTPAGESIZEPF2_S 8
+#define HOSTPAGESIZEPF2_M 0xfU
+#define HOSTPAGESIZEPF2_V(x) ((x) << HOSTPAGESIZEPF2_S)
+#define HOSTPAGESIZEPF2_G(x) (((x) >> HOSTPAGESIZEPF2_S) & HOSTPAGESIZEPF2_M)
+
+#define HOSTPAGESIZEPF1_S 4
+#define HOSTPAGESIZEPF1_M 0xfU
+#define HOSTPAGESIZEPF1_V(x) ((x) << HOSTPAGESIZEPF1_S)
+#define HOSTPAGESIZEPF1_G(x) (((x) >> HOSTPAGESIZEPF1_S) & HOSTPAGESIZEPF1_M)
+
+#define HOSTPAGESIZEPF0_S 0
+#define HOSTPAGESIZEPF0_M 0xfU
+#define HOSTPAGESIZEPF0_V(x) ((x) << HOSTPAGESIZEPF0_S)
+#define HOSTPAGESIZEPF0_G(x) (((x) >> HOSTPAGESIZEPF0_S) & HOSTPAGESIZEPF0_M)
+
+#define SGE_EGRESS_QUEUES_PER_PAGE_PF_A 0x1010
+#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
-#define HOSTPAGESIZEPF7_MASK 0x0000000fU
-#define HOSTPAGESIZEPF7_SHIFT 28
-#define HOSTPAGESIZEPF7(x) ((x) << HOSTPAGESIZEPF7_SHIFT)
+#define QUEUESPERPAGEPF1_S 4
-#define HOSTPAGESIZEPF6_MASK 0x0000000fU
-#define HOSTPAGESIZEPF6_SHIFT 24
-#define HOSTPAGESIZEPF6(x) ((x) << HOSTPAGESIZEPF6_SHIFT)
+#define QUEUESPERPAGEPF0_S 0
+#define QUEUESPERPAGEPF0_M 0xfU
+#define QUEUESPERPAGEPF0_V(x) ((x) << QUEUESPERPAGEPF0_S)
+#define QUEUESPERPAGEPF0_G(x) (((x) >> QUEUESPERPAGEPF0_S) & QUEUESPERPAGEPF0_M)
-#define HOSTPAGESIZEPF5_MASK 0x0000000fU
-#define HOSTPAGESIZEPF5_SHIFT 20
-#define HOSTPAGESIZEPF5(x) ((x) << HOSTPAGESIZEPF5_SHIFT)
+#define SGE_INT_CAUSE1_A 0x1024
+#define SGE_INT_CAUSE2_A 0x1030
+#define SGE_INT_CAUSE3_A 0x103c
+
+#define ERR_FLM_DBP_S 31
+#define ERR_FLM_DBP_V(x) ((x) << ERR_FLM_DBP_S)
+#define ERR_FLM_DBP_F ERR_FLM_DBP_V(1U)
+
+#define ERR_FLM_IDMA1_S 30
+#define ERR_FLM_IDMA1_V(x) ((x) << ERR_FLM_IDMA1_S)
+#define ERR_FLM_IDMA1_F ERR_FLM_IDMA1_V(1U)
+
+#define ERR_FLM_IDMA0_S 29
+#define ERR_FLM_IDMA0_V(x) ((x) << ERR_FLM_IDMA0_S)
+#define ERR_FLM_IDMA0_F ERR_FLM_IDMA0_V(1U)
+
+#define ERR_FLM_HINT_S 28
+#define ERR_FLM_HINT_V(x) ((x) << ERR_FLM_HINT_S)
+#define ERR_FLM_HINT_F ERR_FLM_HINT_V(1U)
+
+#define ERR_PCIE_ERROR3_S 27
+#define ERR_PCIE_ERROR3_V(x) ((x) << ERR_PCIE_ERROR3_S)
+#define ERR_PCIE_ERROR3_F ERR_PCIE_ERROR3_V(1U)
+
+#define ERR_PCIE_ERROR2_S 26
+#define ERR_PCIE_ERROR2_V(x) ((x) << ERR_PCIE_ERROR2_S)
+#define ERR_PCIE_ERROR2_F ERR_PCIE_ERROR2_V(1U)
+
+#define ERR_PCIE_ERROR1_S 25
+#define ERR_PCIE_ERROR1_V(x) ((x) << ERR_PCIE_ERROR1_S)
+#define ERR_PCIE_ERROR1_F ERR_PCIE_ERROR1_V(1U)
+
+#define ERR_PCIE_ERROR0_S 24
+#define ERR_PCIE_ERROR0_V(x) ((x) << ERR_PCIE_ERROR0_S)
+#define ERR_PCIE_ERROR0_F ERR_PCIE_ERROR0_V(1U)
+
+#define ERR_CPL_EXCEED_IQE_SIZE_S 22
+#define ERR_CPL_EXCEED_IQE_SIZE_V(x) ((x) << ERR_CPL_EXCEED_IQE_SIZE_S)
+#define ERR_CPL_EXCEED_IQE_SIZE_F ERR_CPL_EXCEED_IQE_SIZE_V(1U)
+
+#define ERR_INVALID_CIDX_INC_S 21
+#define ERR_INVALID_CIDX_INC_V(x) ((x) << ERR_INVALID_CIDX_INC_S)
+#define ERR_INVALID_CIDX_INC_F ERR_INVALID_CIDX_INC_V(1U)
+
+#define ERR_CPL_OPCODE_0_S 19
+#define ERR_CPL_OPCODE_0_V(x) ((x) << ERR_CPL_OPCODE_0_S)
+#define ERR_CPL_OPCODE_0_F ERR_CPL_OPCODE_0_V(1U)
+
+#define ERR_DROPPED_DB_S 18
+#define ERR_DROPPED_DB_V(x) ((x) << ERR_DROPPED_DB_S)
+#define ERR_DROPPED_DB_F ERR_DROPPED_DB_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID1_S 17
+#define ERR_DATA_CPL_ON_HIGH_QID1_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID1_S)
+#define ERR_DATA_CPL_ON_HIGH_QID1_F ERR_DATA_CPL_ON_HIGH_QID1_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID0_S 16
+#define ERR_DATA_CPL_ON_HIGH_QID0_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID0_S)
+#define ERR_DATA_CPL_ON_HIGH_QID0_F ERR_DATA_CPL_ON_HIGH_QID0_V(1U)
+
+#define ERR_BAD_DB_PIDX3_S 15
+#define ERR_BAD_DB_PIDX3_V(x) ((x) << ERR_BAD_DB_PIDX3_S)
+#define ERR_BAD_DB_PIDX3_F ERR_BAD_DB_PIDX3_V(1U)
+
+#define ERR_BAD_DB_PIDX2_S 14
+#define ERR_BAD_DB_PIDX2_V(x) ((x) << ERR_BAD_DB_PIDX2_S)
+#define ERR_BAD_DB_PIDX2_F ERR_BAD_DB_PIDX2_V(1U)
+
+#define ERR_BAD_DB_PIDX1_S 13
+#define ERR_BAD_DB_PIDX1_V(x) ((x) << ERR_BAD_DB_PIDX1_S)
+#define ERR_BAD_DB_PIDX1_F ERR_BAD_DB_PIDX1_V(1U)
+
+#define ERR_BAD_DB_PIDX0_S 12
+#define ERR_BAD_DB_PIDX0_V(x) ((x) << ERR_BAD_DB_PIDX0_S)
+#define ERR_BAD_DB_PIDX0_F ERR_BAD_DB_PIDX0_V(1U)
+
+#define ERR_ING_CTXT_PRIO_S 10
+#define ERR_ING_CTXT_PRIO_V(x) ((x) << ERR_ING_CTXT_PRIO_S)
+#define ERR_ING_CTXT_PRIO_F ERR_ING_CTXT_PRIO_V(1U)
+
+#define ERR_EGR_CTXT_PRIO_S 9
+#define ERR_EGR_CTXT_PRIO_V(x) ((x) << ERR_EGR_CTXT_PRIO_S)
+#define ERR_EGR_CTXT_PRIO_F ERR_EGR_CTXT_PRIO_V(1U)
+
+#define DBFIFO_HP_INT_S 8
+#define DBFIFO_HP_INT_V(x) ((x) << DBFIFO_HP_INT_S)
+#define DBFIFO_HP_INT_F DBFIFO_HP_INT_V(1U)
+
+#define DBFIFO_LP_INT_S 7
+#define DBFIFO_LP_INT_V(x) ((x) << DBFIFO_LP_INT_S)
+#define DBFIFO_LP_INT_F DBFIFO_LP_INT_V(1U)
+
+#define INGRESS_SIZE_ERR_S 5
+#define INGRESS_SIZE_ERR_V(x) ((x) << INGRESS_SIZE_ERR_S)
+#define INGRESS_SIZE_ERR_F INGRESS_SIZE_ERR_V(1U)
+
+#define EGRESS_SIZE_ERR_S 4
+#define EGRESS_SIZE_ERR_V(x) ((x) << EGRESS_SIZE_ERR_S)
+#define EGRESS_SIZE_ERR_F EGRESS_SIZE_ERR_V(1U)
+
+#define SGE_INT_ENABLE3_A 0x1040
+#define SGE_FL_BUFFER_SIZE0_A 0x1044
+#define SGE_FL_BUFFER_SIZE1_A 0x1048
+#define SGE_FL_BUFFER_SIZE2_A 0x104c
+#define SGE_FL_BUFFER_SIZE3_A 0x1050
+#define SGE_FL_BUFFER_SIZE4_A 0x1054
+#define SGE_FL_BUFFER_SIZE5_A 0x1058
+#define SGE_FL_BUFFER_SIZE6_A 0x105c
+#define SGE_FL_BUFFER_SIZE7_A 0x1060
+#define SGE_FL_BUFFER_SIZE8_A 0x1064
+
+#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
+
+#define THRESHOLD_0_S 24
+#define THRESHOLD_0_M 0x3fU
+#define THRESHOLD_0_V(x) ((x) << THRESHOLD_0_S)
+#define THRESHOLD_0_G(x) (((x) >> THRESHOLD_0_S) & THRESHOLD_0_M)
+
+#define THRESHOLD_1_S 16
+#define THRESHOLD_1_M 0x3fU
+#define THRESHOLD_1_V(x) ((x) << THRESHOLD_1_S)
+#define THRESHOLD_1_G(x) (((x) >> THRESHOLD_1_S) & THRESHOLD_1_M)
+
+#define THRESHOLD_2_S 8
+#define THRESHOLD_2_M 0x3fU
+#define THRESHOLD_2_V(x) ((x) << THRESHOLD_2_S)
+#define THRESHOLD_2_G(x) (((x) >> THRESHOLD_2_S) & THRESHOLD_2_M)
+
+#define THRESHOLD_3_S 0
+#define THRESHOLD_3_M 0x3fU
+#define THRESHOLD_3_V(x) ((x) << THRESHOLD_3_S)
+#define THRESHOLD_3_G(x) (((x) >> THRESHOLD_3_S) & THRESHOLD_3_M)
+
+#define SGE_CONM_CTRL_A 0x1094
+
+#define EGRTHRESHOLD_S 8
+#define EGRTHRESHOLD_M 0x3fU
+#define EGRTHRESHOLD_V(x) ((x) << EGRTHRESHOLD_S)
+#define EGRTHRESHOLD_G(x) (((x) >> EGRTHRESHOLD_S) & EGRTHRESHOLD_M)
+
+#define EGRTHRESHOLDPACKING_S 14
+#define EGRTHRESHOLDPACKING_M 0x3fU
+#define EGRTHRESHOLDPACKING_V(x) ((x) << EGRTHRESHOLDPACKING_S)
+#define EGRTHRESHOLDPACKING_G(x) \
+ (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
+
+#define SGE_TIMESTAMP_LO_A 0x1098
+#define SGE_TIMESTAMP_HI_A 0x109c
+
+#define TSOP_S 28
+#define TSOP_M 0x3U
+#define TSOP_V(x) ((x) << TSOP_S)
+#define TSOP_G(x) (((x) >> TSOP_S) & TSOP_M)
+
+#define TSVAL_S 0
+#define TSVAL_M 0xfffffffU
+#define TSVAL_V(x) ((x) << TSVAL_S)
+#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
+
+#define SGE_DBFIFO_STATUS_A 0x10a4
+
+#define HP_INT_THRESH_S 28
+#define HP_INT_THRESH_M 0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
+
+#define LP_INT_THRESH_S 12
+#define LP_INT_THRESH_M 0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define NOCOALESCE_S 26
+#define NOCOALESCE_V(x) ((x) << NOCOALESCE_S)
+#define NOCOALESCE_F NOCOALESCE_V(1U)
+
+#define ENABLE_DROP_S 13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F ENABLE_DROP_V(1U)
+
+#define SGE_TIMER_VALUE_0_AND_1_A 0x10b8
+
+#define TIMERVALUE0_S 16
+#define TIMERVALUE0_M 0xffffU
+#define TIMERVALUE0_V(x) ((x) << TIMERVALUE0_S)
+#define TIMERVALUE0_G(x) (((x) >> TIMERVALUE0_S) & TIMERVALUE0_M)
+
+#define TIMERVALUE1_S 0
+#define TIMERVALUE1_M 0xffffU
+#define TIMERVALUE1_V(x) ((x) << TIMERVALUE1_S)
+#define TIMERVALUE1_G(x) (((x) >> TIMERVALUE1_S) & TIMERVALUE1_M)
+
+#define SGE_TIMER_VALUE_2_AND_3_A 0x10bc
+
+#define TIMERVALUE2_S 16
+#define TIMERVALUE2_M 0xffffU
+#define TIMERVALUE2_V(x) ((x) << TIMERVALUE2_S)
+#define TIMERVALUE2_G(x) (((x) >> TIMERVALUE2_S) & TIMERVALUE2_M)
+
+#define TIMERVALUE3_S 0
+#define TIMERVALUE3_M 0xffffU
+#define TIMERVALUE3_V(x) ((x) << TIMERVALUE3_S)
+#define TIMERVALUE3_G(x) (((x) >> TIMERVALUE3_S) & TIMERVALUE3_M)
+
+#define SGE_TIMER_VALUE_4_AND_5_A 0x10c0
+
+#define TIMERVALUE4_S 16
+#define TIMERVALUE4_M 0xffffU
+#define TIMERVALUE4_V(x) ((x) << TIMERVALUE4_S)
+#define TIMERVALUE4_G(x) (((x) >> TIMERVALUE4_S) & TIMERVALUE4_M)
-#define HOSTPAGESIZEPF4_MASK 0x0000000fU
-#define HOSTPAGESIZEPF4_SHIFT 16
-#define HOSTPAGESIZEPF4(x) ((x) << HOSTPAGESIZEPF4_SHIFT)
+#define TIMERVALUE5_S 0
+#define TIMERVALUE5_M 0xffffU
+#define TIMERVALUE5_V(x) ((x) << TIMERVALUE5_S)
+#define TIMERVALUE5_G(x) (((x) >> TIMERVALUE5_S) & TIMERVALUE5_M)
-#define HOSTPAGESIZEPF3_MASK 0x0000000fU
-#define HOSTPAGESIZEPF3_SHIFT 12
-#define HOSTPAGESIZEPF3(x) ((x) << HOSTPAGESIZEPF3_SHIFT)
+#define SGE_DEBUG_INDEX_A 0x10cc
+#define SGE_DEBUG_DATA_HIGH_A 0x10d0
+#define SGE_DEBUG_DATA_LOW_A 0x10d4
-#define HOSTPAGESIZEPF2_MASK 0x0000000fU
-#define HOSTPAGESIZEPF2_SHIFT 8
-#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT)
+#define SGE_DEBUG_DATA_LOW_INDEX_2_A 0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3_A 0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10_A 0x12a8
-#define HOSTPAGESIZEPF1_M 0x0000000fU
-#define HOSTPAGESIZEPF1_S 4
-#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S)
+#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
+#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
-#define HOSTPAGESIZEPF0_M 0x0000000fU
-#define HOSTPAGESIZEPF0_S 0
-#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S)
+#define HP_INT_THRESH_S 28
+#define HP_INT_THRESH_M 0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
-#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
-#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
+#define HP_COUNT_S 16
+#define HP_COUNT_M 0x7ffU
+#define HP_COUNT_G(x) (((x) >> HP_COUNT_S) & HP_COUNT_M)
-#define QUEUESPERPAGEPF1_S 4
+#define LP_INT_THRESH_S 12
+#define LP_INT_THRESH_M 0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
-#define QUEUESPERPAGEPF0_S 0
-#define QUEUESPERPAGEPF0_MASK 0x0000000fU
-#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define LP_COUNT_S 0
+#define LP_COUNT_M 0x7ffU
+#define LP_COUNT_G(x) (((x) >> LP_COUNT_S) & LP_COUNT_M)
-#define QUEUESPERPAGEPF0 0
-#define QUEUESPERPAGEPF1 4
+#define LP_INT_THRESH_T5_S 18
+#define LP_INT_THRESH_T5_M 0xfffU
+#define LP_INT_THRESH_T5_V(x) ((x) << LP_INT_THRESH_T5_S)
-/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
- * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
- * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
- * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues,
- * we have a Going To Sleep register at offsets 8x+4.
- *
- * As noted above, we have many instances of the Simple Doorbell and Going To
- * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
- * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
- * avoid buffering of the writes to the Simple Doorbell and we want to use a
- * non-contiguous offset for the Going To Sleep writes in order to avoid
- * possible combining between them.
- */
-#define SGE_UDB_SIZE 128
-#define SGE_UDB_KDOORBELL 8
-#define SGE_UDB_GTS 20
-#define SGE_UDB_WCDOORBELL 64
-
-#define SGE_INT_CAUSE1 0x1024
-#define SGE_INT_CAUSE2 0x1030
-#define SGE_INT_CAUSE3 0x103c
-#define ERR_FLM_DBP 0x80000000U
-#define ERR_FLM_IDMA1 0x40000000U
-#define ERR_FLM_IDMA0 0x20000000U
-#define ERR_FLM_HINT 0x10000000U
-#define ERR_PCIE_ERROR3 0x08000000U
-#define ERR_PCIE_ERROR2 0x04000000U
-#define ERR_PCIE_ERROR1 0x02000000U
-#define ERR_PCIE_ERROR0 0x01000000U
-#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U
-#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U
-#define ERR_INVALID_CIDX_INC 0x00200000U
-#define ERR_ITP_TIME_PAUSED 0x00100000U
-#define ERR_CPL_OPCODE_0 0x00080000U
-#define ERR_DROPPED_DB 0x00040000U
-#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
-#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
-#define ERR_BAD_DB_PIDX3 0x00008000U
-#define ERR_BAD_DB_PIDX2 0x00004000U
-#define ERR_BAD_DB_PIDX1 0x00002000U
-#define ERR_BAD_DB_PIDX0 0x00001000U
-#define ERR_ING_PCIE_CHAN 0x00000800U
-#define ERR_ING_CTXT_PRIO 0x00000400U
-#define ERR_EGR_CTXT_PRIO 0x00000200U
-#define DBFIFO_HP_INT 0x00000100U
-#define DBFIFO_LP_INT 0x00000080U
-#define REG_ADDRESS_ERR 0x00000040U
-#define INGRESS_SIZE_ERR 0x00000020U
-#define EGRESS_SIZE_ERR 0x00000010U
-#define ERR_INV_CTXT3 0x00000008U
-#define ERR_INV_CTXT2 0x00000004U
-#define ERR_INV_CTXT1 0x00000002U
-#define ERR_INV_CTXT0 0x00000001U
-
-#define SGE_INT_ENABLE3 0x1040
-#define SGE_FL_BUFFER_SIZE0 0x1044
-#define SGE_FL_BUFFER_SIZE1 0x1048
-#define SGE_FL_BUFFER_SIZE2 0x104c
-#define SGE_FL_BUFFER_SIZE3 0x1050
-#define SGE_FL_BUFFER_SIZE4 0x1054
-#define SGE_FL_BUFFER_SIZE5 0x1058
-#define SGE_FL_BUFFER_SIZE6 0x105c
-#define SGE_FL_BUFFER_SIZE7 0x1060
-#define SGE_FL_BUFFER_SIZE8 0x1064
-
-#define SGE_INGRESS_RX_THRESHOLD 0x10a0
-#define THRESHOLD_0_MASK 0x3f000000U
-#define THRESHOLD_0_SHIFT 24
-#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT)
-#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
-#define THRESHOLD_1_MASK 0x003f0000U
-#define THRESHOLD_1_SHIFT 16
-#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT)
-#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
-#define THRESHOLD_2_MASK 0x00003f00U
-#define THRESHOLD_2_SHIFT 8
-#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT)
-#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
-#define THRESHOLD_3_MASK 0x0000003fU
-#define THRESHOLD_3_SHIFT 0
-#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
-#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
-
-#define SGE_CONM_CTRL 0x1094
-#define EGRTHRESHOLD_MASK 0x00003f00U
-#define EGRTHRESHOLDshift 8
-#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
-#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
-
-#define EGRTHRESHOLDPACKING_MASK 0x3fU
-#define EGRTHRESHOLDPACKING_SHIFT 14
-#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT)
-#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
- EGRTHRESHOLDPACKING_MASK)
-
-#define SGE_DBFIFO_STATUS 0x10a4
-#define HP_INT_THRESH_SHIFT 28
-#define HP_INT_THRESH_MASK 0xfU
-#define HP_INT_THRESH(x) ((x) << HP_INT_THRESH_SHIFT)
-#define LP_INT_THRESH_SHIFT 12
-#define LP_INT_THRESH_MASK 0xfU
-#define LP_INT_THRESH(x) ((x) << LP_INT_THRESH_SHIFT)
-
-#define SGE_DOORBELL_CONTROL 0x10a8
-#define ENABLE_DROP (1 << 13)
-
-#define S_NOCOALESCE 26
-#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
-#define F_NOCOALESCE V_NOCOALESCE(1U)
-
-#define SGE_TIMESTAMP_LO 0x1098
-#define SGE_TIMESTAMP_HI 0x109c
-#define S_TSVAL 0
-#define M_TSVAL 0xfffffffU
-#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
-
-#define SGE_TIMER_VALUE_0_AND_1 0x10b8
-#define TIMERVALUE0_MASK 0xffff0000U
-#define TIMERVALUE0_SHIFT 16
-#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT)
-#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
-#define TIMERVALUE1_MASK 0x0000ffffU
-#define TIMERVALUE1_SHIFT 0
-#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT)
-#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
-
-#define SGE_TIMER_VALUE_2_AND_3 0x10bc
-#define TIMERVALUE2_MASK 0xffff0000U
-#define TIMERVALUE2_SHIFT 16
-#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT)
-#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
-#define TIMERVALUE3_MASK 0x0000ffffU
-#define TIMERVALUE3_SHIFT 0
-#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT)
-#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
-
-#define SGE_TIMER_VALUE_4_AND_5 0x10c0
-#define TIMERVALUE4_MASK 0xffff0000U
-#define TIMERVALUE4_SHIFT 16
-#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT)
-#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
-#define TIMERVALUE5_MASK 0x0000ffffU
-#define TIMERVALUE5_SHIFT 0
-#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT)
-#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
-
-#define SGE_DEBUG_INDEX 0x10cc
-#define SGE_DEBUG_DATA_HIGH 0x10d0
-#define SGE_DEBUG_DATA_LOW 0x10d4
-#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
-#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
-#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
-#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
-#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+#define LP_COUNT_T5_S 0
+#define LP_COUNT_T5_M 0x3ffffU
+#define LP_COUNT_T5_G(x) (((x) >> LP_COUNT_T5_S) & LP_COUNT_T5_M)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define SGE_STAT_TOTAL_A 0x10e4
+#define SGE_STAT_MATCH_A 0x10e8
+#define SGE_STAT_CFG_A 0x10ec
+
+#define STATSOURCE_T5_S 9
+#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+
+#define SGE_DBFIFO_STATUS2_A 0x1118
+
+#define HP_INT_THRESH_T5_S 10
+#define HP_INT_THRESH_T5_M 0xfU
+#define HP_INT_THRESH_T5_V(x) ((x) << HP_INT_THRESH_T5_S)
+
+#define HP_COUNT_T5_S 0
+#define HP_COUNT_T5_M 0x3ffU
+#define HP_COUNT_T5_G(x) (((x) >> HP_COUNT_T5_S) & HP_COUNT_T5_M)
+
+#define ENABLE_DROP_S 13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F ENABLE_DROP_V(1U)
+
+#define DROPPED_DB_S 0
+#define DROPPED_DB_V(x) ((x) << DROPPED_DB_S)
+#define DROPPED_DB_F DROPPED_DB_V(1U)
+
+#define SGE_CTXT_CMD_A 0x11fc
+#define SGE_DBQ_CTXT_BADDR_A 0x1084
+
+/* registers for module PCIE */
+#define PCIE_PF_CFG_A 0x40
+
+#define AIVEC_S 4
+#define AIVEC_M 0x3ffU
+#define AIVEC_V(x) ((x) << AIVEC_S)
+
+#define PCIE_PF_CLI_A 0x44
+#define PCIE_INT_CAUSE_A 0x3004
+
+#define UNXSPLCPLERR_S 29
+#define UNXSPLCPLERR_V(x) ((x) << UNXSPLCPLERR_S)
+#define UNXSPLCPLERR_F UNXSPLCPLERR_V(1U)
+
+#define PCIEPINT_S 28
+#define PCIEPINT_V(x) ((x) << PCIEPINT_S)
+#define PCIEPINT_F PCIEPINT_V(1U)
+
+#define PCIESINT_S 27
+#define PCIESINT_V(x) ((x) << PCIESINT_S)
+#define PCIESINT_F PCIESINT_V(1U)
+
+#define RPLPERR_S 26
+#define RPLPERR_V(x) ((x) << RPLPERR_S)
+#define RPLPERR_F RPLPERR_V(1U)
+
+#define RXWRPERR_S 25
+#define RXWRPERR_V(x) ((x) << RXWRPERR_S)
+#define RXWRPERR_F RXWRPERR_V(1U)
+
+#define RXCPLPERR_S 24
+#define RXCPLPERR_V(x) ((x) << RXCPLPERR_S)
+#define RXCPLPERR_F RXCPLPERR_V(1U)
+
+#define PIOTAGPERR_S 23
+#define PIOTAGPERR_V(x) ((x) << PIOTAGPERR_S)
+#define PIOTAGPERR_F PIOTAGPERR_V(1U)
+
+#define MATAGPERR_S 22
+#define MATAGPERR_V(x) ((x) << MATAGPERR_S)
+#define MATAGPERR_F MATAGPERR_V(1U)
+
+#define INTXCLRPERR_S 21
+#define INTXCLRPERR_V(x) ((x) << INTXCLRPERR_S)
+#define INTXCLRPERR_F INTXCLRPERR_V(1U)
+
+#define FIDPERR_S 20
+#define FIDPERR_V(x) ((x) << FIDPERR_S)
+#define FIDPERR_F FIDPERR_V(1U)
+
+#define CFGSNPPERR_S 19
+#define CFGSNPPERR_V(x) ((x) << CFGSNPPERR_S)
+#define CFGSNPPERR_F CFGSNPPERR_V(1U)
+
+#define HRSPPERR_S 18
+#define HRSPPERR_V(x) ((x) << HRSPPERR_S)
+#define HRSPPERR_F HRSPPERR_V(1U)
+
+#define HREQPERR_S 17
+#define HREQPERR_V(x) ((x) << HREQPERR_S)
+#define HREQPERR_F HREQPERR_V(1U)
+
+#define HCNTPERR_S 16
+#define HCNTPERR_V(x) ((x) << HCNTPERR_S)
+#define HCNTPERR_F HCNTPERR_V(1U)
+
+#define DRSPPERR_S 15
+#define DRSPPERR_V(x) ((x) << DRSPPERR_S)
+#define DRSPPERR_F DRSPPERR_V(1U)
+
+#define DREQPERR_S 14
+#define DREQPERR_V(x) ((x) << DREQPERR_S)
+#define DREQPERR_F DREQPERR_V(1U)
+
+#define DCNTPERR_S 13
+#define DCNTPERR_V(x) ((x) << DCNTPERR_S)
+#define DCNTPERR_F DCNTPERR_V(1U)
+
+#define CRSPPERR_S 12
+#define CRSPPERR_V(x) ((x) << CRSPPERR_S)
+#define CRSPPERR_F CRSPPERR_V(1U)
+
+#define CREQPERR_S 11
+#define CREQPERR_V(x) ((x) << CREQPERR_S)
+#define CREQPERR_F CREQPERR_V(1U)
+
+#define CCNTPERR_S 10
+#define CCNTPERR_V(x) ((x) << CCNTPERR_S)
+#define CCNTPERR_F CCNTPERR_V(1U)
+
+#define TARTAGPERR_S 9
+#define TARTAGPERR_V(x) ((x) << TARTAGPERR_S)
+#define TARTAGPERR_F TARTAGPERR_V(1U)
+
+#define PIOREQPERR_S 8
+#define PIOREQPERR_V(x) ((x) << PIOREQPERR_S)
+#define PIOREQPERR_F PIOREQPERR_V(1U)
+
+#define PIOCPLPERR_S 7
+#define PIOCPLPERR_V(x) ((x) << PIOCPLPERR_S)
+#define PIOCPLPERR_F PIOCPLPERR_V(1U)
+
+#define MSIXDIPERR_S 6
+#define MSIXDIPERR_V(x) ((x) << MSIXDIPERR_S)
+#define MSIXDIPERR_F MSIXDIPERR_V(1U)
+
+#define MSIXDATAPERR_S 5
+#define MSIXDATAPERR_V(x) ((x) << MSIXDATAPERR_S)
+#define MSIXDATAPERR_F MSIXDATAPERR_V(1U)
+
+#define MSIXADDRHPERR_S 4
+#define MSIXADDRHPERR_V(x) ((x) << MSIXADDRHPERR_S)
+#define MSIXADDRHPERR_F MSIXADDRHPERR_V(1U)
+
+#define MSIXADDRLPERR_S 3
+#define MSIXADDRLPERR_V(x) ((x) << MSIXADDRLPERR_S)
+#define MSIXADDRLPERR_F MSIXADDRLPERR_V(1U)
+
+#define MSIDATAPERR_S 2
+#define MSIDATAPERR_V(x) ((x) << MSIDATAPERR_S)
+#define MSIDATAPERR_F MSIDATAPERR_V(1U)
+
+#define MSIADDRHPERR_S 1
+#define MSIADDRHPERR_V(x) ((x) << MSIADDRHPERR_S)
+#define MSIADDRHPERR_F MSIADDRHPERR_V(1U)
+
+#define MSIADDRLPERR_S 0
+#define MSIADDRLPERR_V(x) ((x) << MSIADDRLPERR_S)
+#define MSIADDRLPERR_F MSIADDRLPERR_V(1U)
+
+#define READRSPERR_S 29
+#define READRSPERR_V(x) ((x) << READRSPERR_S)
+#define READRSPERR_F READRSPERR_V(1U)
+
+#define TRGT1GRPPERR_S 28
+#define TRGT1GRPPERR_V(x) ((x) << TRGT1GRPPERR_S)
+#define TRGT1GRPPERR_F TRGT1GRPPERR_V(1U)
+
+#define IPSOTPERR_S 27
+#define IPSOTPERR_V(x) ((x) << IPSOTPERR_S)
+#define IPSOTPERR_F IPSOTPERR_V(1U)
+
+#define IPRETRYPERR_S 26
+#define IPRETRYPERR_V(x) ((x) << IPRETRYPERR_S)
+#define IPRETRYPERR_F IPRETRYPERR_V(1U)
+
+#define IPRXDATAGRPPERR_S 25
+#define IPRXDATAGRPPERR_V(x) ((x) << IPRXDATAGRPPERR_S)
+#define IPRXDATAGRPPERR_F IPRXDATAGRPPERR_V(1U)
+
+#define IPRXHDRGRPPERR_S 24
+#define IPRXHDRGRPPERR_V(x) ((x) << IPRXHDRGRPPERR_S)
+#define IPRXHDRGRPPERR_F IPRXHDRGRPPERR_V(1U)
+
+#define MAGRPPERR_S 22
+#define MAGRPPERR_V(x) ((x) << MAGRPPERR_S)
+#define MAGRPPERR_F MAGRPPERR_V(1U)
+
+#define VFIDPERR_S 21
+#define VFIDPERR_V(x) ((x) << VFIDPERR_S)
+#define VFIDPERR_F VFIDPERR_V(1U)
+
+#define HREQWRPERR_S 16
+#define HREQWRPERR_V(x) ((x) << HREQWRPERR_S)
+#define HREQWRPERR_F HREQWRPERR_V(1U)
+
+#define DREQWRPERR_S 13
+#define DREQWRPERR_V(x) ((x) << DREQWRPERR_S)
+#define DREQWRPERR_F DREQWRPERR_V(1U)
+
+#define CREQRDPERR_S 11
+#define CREQRDPERR_V(x) ((x) << CREQRDPERR_S)
+#define CREQRDPERR_F CREQRDPERR_V(1U)
+
+#define MSTTAGQPERR_S 10
+#define MSTTAGQPERR_V(x) ((x) << MSTTAGQPERR_S)
+#define MSTTAGQPERR_F MSTTAGQPERR_V(1U)
+
+#define PIOREQGRPPERR_S 8
+#define PIOREQGRPPERR_V(x) ((x) << PIOREQGRPPERR_S)
+#define PIOREQGRPPERR_F PIOREQGRPPERR_V(1U)
+
+#define PIOCPLGRPPERR_S 7
+#define PIOCPLGRPPERR_V(x) ((x) << PIOCPLGRPPERR_S)
+#define PIOCPLGRPPERR_F PIOCPLGRPPERR_V(1U)
+
+#define MSIXSTIPERR_S 2
+#define MSIXSTIPERR_V(x) ((x) << MSIXSTIPERR_S)
+#define MSIXSTIPERR_F MSIXSTIPERR_V(1U)
+
+#define MSTTIMEOUTPERR_S 1
+#define MSTTIMEOUTPERR_V(x) ((x) << MSTTIMEOUTPERR_S)
+#define MSTTIMEOUTPERR_F MSTTIMEOUTPERR_V(1U)
+
+#define MSTGRPPERR_S 0
+#define MSTGRPPERR_V(x) ((x) << MSTGRPPERR_S)
+#define MSTGRPPERR_F MSTGRPPERR_V(1U)
+
+#define PCIE_NONFAT_ERR_A 0x3010
+#define PCIE_CFG_SPACE_REQ_A 0x3060
+#define PCIE_CFG_SPACE_DATA_A 0x3064
+#define PCIE_MEM_ACCESS_BASE_WIN_A 0x3068
+
+#define PCIEOFST_S 10
+#define PCIEOFST_M 0x3fffffU
+#define PCIEOFST_G(x) (((x) >> PCIEOFST_S) & PCIEOFST_M)
+
+#define BIR_S 8
+#define BIR_M 0x3U
+#define BIR_V(x) ((x) << BIR_S)
+#define BIR_G(x) (((x) >> BIR_S) & BIR_M)
+
+#define WINDOW_S 0
+#define WINDOW_M 0xffU
+#define WINDOW_V(x) ((x) << WINDOW_S)
+#define WINDOW_G(x) (((x) >> WINDOW_S) & WINDOW_M)
+
+#define PCIE_MEM_ACCESS_OFFSET_A 0x306c
+
+#define ENABLE_S 30
+#define ENABLE_V(x) ((x) << ENABLE_S)
+#define ENABLE_F ENABLE_V(1U)
+
+#define LOCALCFG_S 28
+#define LOCALCFG_V(x) ((x) << LOCALCFG_S)
+#define LOCALCFG_F LOCALCFG_V(1U)
+
+#define FUNCTION_S 12
+#define FUNCTION_V(x) ((x) << FUNCTION_S)
+
+#define REGISTER_S 0
+#define REGISTER_V(x) ((x) << REGISTER_S)
+
+#define PFNUM_S 0
+#define PFNUM_V(x) ((x) << PFNUM_S)
+
+#define PCIE_FW_A 0x30b8
+
+#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
+
+#define RNPP_S 31
+#define RNPP_V(x) ((x) << RNPP_S)
+#define RNPP_F RNPP_V(1U)
+
+#define RPCP_S 29
+#define RPCP_V(x) ((x) << RPCP_S)
+#define RPCP_F RPCP_V(1U)
+
+#define RCIP_S 27
+#define RCIP_V(x) ((x) << RCIP_S)
+#define RCIP_F RCIP_V(1U)
+
+#define RCCP_S 26
+#define RCCP_V(x) ((x) << RCCP_S)
+#define RCCP_F RCCP_V(1U)
+
+#define RFTP_S 23
+#define RFTP_V(x) ((x) << RFTP_S)
+#define RFTP_F RFTP_V(1U)
+
+#define PTRP_S 20
+#define PTRP_V(x) ((x) << PTRP_S)
+#define PTRP_F PTRP_V(1U)
-#define S_HP_INT_THRESH 28
-#define M_HP_INT_THRESH 0xfU
-#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
-#define S_LP_INT_THRESH_T5 18
-#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
-#define M_LP_COUNT_T5 0x3ffffU
-#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
-#define M_HP_COUNT 0x7ffU
-#define S_HP_COUNT 16
-#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
-#define S_LP_INT_THRESH 12
-#define M_LP_INT_THRESH 0xfU
-#define M_LP_INT_THRESH_T5 0xfffU
-#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
-#define M_LP_COUNT 0x7ffU
-#define S_LP_COUNT 0
-#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
-#define A_SGE_DBFIFO_STATUS 0x10a4
-
-#define SGE_STAT_TOTAL 0x10e4
-#define SGE_STAT_MATCH 0x10e8
-
-#define SGE_STAT_CFG 0x10ec
-#define S_STATSOURCE_T5 9
-#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
-
-#define SGE_DBFIFO_STATUS2 0x1118
-#define M_HP_COUNT_T5 0x3ffU
-#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
-#define S_HP_INT_THRESH_T5 10
-#define M_HP_INT_THRESH_T5 0xfU
-#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
-
-#define S_ENABLE_DROP 13
-#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
-#define F_ENABLE_DROP V_ENABLE_DROP(1U)
-#define S_DROPPED_DB 0
-#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
-#define F_DROPPED_DB V_DROPPED_DB(1U)
-#define A_SGE_DOORBELL_CONTROL 0x10a8
-
-#define A_SGE_CTXT_CMD 0x11fc
-#define A_SGE_DBQ_CTXT_BADDR 0x1084
-
-#define PCIE_PF_CFG 0x40
-#define AIVEC(x) ((x) << 4)
-#define AIVEC_MASK 0x3ffU
-
-#define PCIE_PF_CLI 0x44
-#define PCIE_INT_CAUSE 0x3004
-#define UNXSPLCPLERR 0x20000000U
-#define PCIEPINT 0x10000000U
-#define PCIESINT 0x08000000U
-#define RPLPERR 0x04000000U
-#define RXWRPERR 0x02000000U
-#define RXCPLPERR 0x01000000U
-#define PIOTAGPERR 0x00800000U
-#define MATAGPERR 0x00400000U
-#define INTXCLRPERR 0x00200000U
-#define FIDPERR 0x00100000U
-#define CFGSNPPERR 0x00080000U
-#define HRSPPERR 0x00040000U
-#define HREQPERR 0x00020000U
-#define HCNTPERR 0x00010000U
-#define DRSPPERR 0x00008000U
-#define DREQPERR 0x00004000U
-#define DCNTPERR 0x00002000U
-#define CRSPPERR 0x00001000U
-#define CREQPERR 0x00000800U
-#define CCNTPERR 0x00000400U
-#define TARTAGPERR 0x00000200U
-#define PIOREQPERR 0x00000100U
-#define PIOCPLPERR 0x00000080U
-#define MSIXDIPERR 0x00000040U
-#define MSIXDATAPERR 0x00000020U
-#define MSIXADDRHPERR 0x00000010U
-#define MSIXADDRLPERR 0x00000008U
-#define MSIDATAPERR 0x00000004U
-#define MSIADDRHPERR 0x00000002U
-#define MSIADDRLPERR 0x00000001U
-
-#define READRSPERR 0x20000000U
-#define TRGT1GRPPERR 0x10000000U
-#define IPSOTPERR 0x08000000U
-#define IPRXDATAGRPPERR 0x02000000U
-#define IPRXHDRGRPPERR 0x01000000U
-#define MAGRPPERR 0x00400000U
-#define VFIDPERR 0x00200000U
-#define HREQWRPERR 0x00010000U
-#define DREQWRPERR 0x00002000U
-#define MSTTAGQPERR 0x00000400U
-#define PIOREQGRPPERR 0x00000100U
-#define PIOCPLGRPPERR 0x00000080U
-#define MSIXSTIPERR 0x00000004U
-#define MSTTIMEOUTPERR 0x00000002U
-#define MSTGRPPERR 0x00000001U
-
-#define PCIE_NONFAT_ERR 0x3010
-#define PCIE_CFG_SPACE_REQ 0x3060
-#define PCIE_CFG_SPACE_DATA 0x3064
-#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
-#define S_PCIEOFST 10
-#define M_PCIEOFST 0x3fffffU
-#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
-#define PCIEOFST_MASK 0xfffffc00U
-#define BIR_MASK 0x00000300U
-#define BIR_SHIFT 8
-#define BIR(x) ((x) << BIR_SHIFT)
-#define WINDOW_MASK 0x000000ffU
-#define WINDOW_SHIFT 0
-#define WINDOW(x) ((x) << WINDOW_SHIFT)
-#define GET_WINDOW(x) (((x) >> WINDOW_SHIFT) & WINDOW_MASK)
-#define PCIE_MEM_ACCESS_OFFSET 0x306c
-#define ENABLE (1U << 30)
-#define FUNCTION(x) ((x) << 12)
-#define F_LOCALCFG (1U << 28)
-
-#define S_PFNUM 0
-#define V_PFNUM(x) ((x) << S_PFNUM)
-
-#define PCIE_FW 0x30b8
-#define PCIE_FW_ERR 0x80000000U
-#define PCIE_FW_INIT 0x40000000U
-#define PCIE_FW_HALT 0x20000000U
-#define PCIE_FW_MASTER_VLD 0x00008000U
-#define PCIE_FW_MASTER(x) ((x) << 12)
-#define PCIE_FW_MASTER_MASK 0x7
-#define PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
-
-#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
-#define RNPP 0x80000000U
-#define RPCP 0x20000000U
-#define RCIP 0x08000000U
-#define RCCP 0x04000000U
-#define RFTP 0x00800000U
-#define PTRP 0x00100000U
-
-#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
-#define TPCP 0x40000000U
-#define TNPP 0x20000000U
-#define TFTP 0x10000000U
-#define TCAP 0x08000000U
-#define TCIP 0x04000000U
-#define RCAP 0x02000000U
-#define PLUP 0x00800000U
-#define PLDN 0x00400000U
-#define OTDD 0x00200000U
-#define GTRP 0x00100000U
-#define RDPE 0x00040000U
-#define TDCE 0x00020000U
-#define TDUE 0x00010000U
-
-#define MC_INT_CAUSE 0x7518
-#define MC_P_INT_CAUSE 0x41318
-#define ECC_UE_INT_CAUSE 0x00000004U
-#define ECC_CE_INT_CAUSE 0x00000002U
-#define PERR_INT_CAUSE 0x00000001U
-
-#define MC_ECC_STATUS 0x751c
-#define MC_P_ECC_STATUS 0x4131c
-#define ECC_CECNT_MASK 0xffff0000U
-#define ECC_CECNT_SHIFT 16
-#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
-#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
-#define ECC_UECNT_MASK 0x0000ffffU
-#define ECC_UECNT_SHIFT 0
-#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
-#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
-
-#define MC_BIST_CMD 0x7600
-#define START_BIST 0x80000000U
-#define BIST_CMD_GAP_MASK 0x0000ff00U
-#define BIST_CMD_GAP_SHIFT 8
-#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
-#define BIST_OPCODE_MASK 0x00000003U
-#define BIST_OPCODE_SHIFT 0
-#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
-
-#define MC_BIST_CMD_ADDR 0x7604
-#define MC_BIST_CMD_LEN 0x7608
-#define MC_BIST_DATA_PATTERN 0x760c
-#define BIST_DATA_TYPE_MASK 0x0000000fU
-#define BIST_DATA_TYPE_SHIFT 0
-#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
-
-#define MC_BIST_STATUS_RDATA 0x7688
+#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A 0x59a4
+#define TPCP_S 30
+#define TPCP_V(x) ((x) << TPCP_S)
+#define TPCP_F TPCP_V(1U)
+
+#define TNPP_S 29
+#define TNPP_V(x) ((x) << TNPP_S)
+#define TNPP_F TNPP_V(1U)
+
+#define TFTP_S 28
+#define TFTP_V(x) ((x) << TFTP_S)
+#define TFTP_F TFTP_V(1U)
+
+#define TCAP_S 27
+#define TCAP_V(x) ((x) << TCAP_S)
+#define TCAP_F TCAP_V(1U)
+
+#define TCIP_S 26
+#define TCIP_V(x) ((x) << TCIP_S)
+#define TCIP_F TCIP_V(1U)
+
+#define RCAP_S 25
+#define RCAP_V(x) ((x) << RCAP_S)
+#define RCAP_F RCAP_V(1U)
+
+#define PLUP_S 23
+#define PLUP_V(x) ((x) << PLUP_S)
+#define PLUP_F PLUP_V(1U)
+
+#define PLDN_S 22
+#define PLDN_V(x) ((x) << PLDN_S)
+#define PLDN_F PLDN_V(1U)
+
+#define OTDD_S 21
+#define OTDD_V(x) ((x) << OTDD_S)
+#define OTDD_F OTDD_V(1U)
+
+#define GTRP_S 20
+#define GTRP_V(x) ((x) << GTRP_S)
+#define GTRP_F GTRP_V(1U)
+
+#define RDPE_S 18
+#define RDPE_V(x) ((x) << RDPE_S)
+#define RDPE_F RDPE_V(1U)
+
+#define TDCE_S 17
+#define TDCE_V(x) ((x) << TDCE_S)
+#define TDCE_F TDCE_V(1U)
+
+#define TDUE_S 16
+#define TDUE_V(x) ((x) << TDUE_S)
+#define TDUE_F TDUE_V(1U)
+
+/* registers for module MC */
+#define MC_INT_CAUSE_A 0x7518
+#define MC_P_INT_CAUSE_A 0x41318
+
+#define ECC_UE_INT_CAUSE_S 2
+#define ECC_UE_INT_CAUSE_V(x) ((x) << ECC_UE_INT_CAUSE_S)
+#define ECC_UE_INT_CAUSE_F ECC_UE_INT_CAUSE_V(1U)
+
+#define ECC_CE_INT_CAUSE_S 1
+#define ECC_CE_INT_CAUSE_V(x) ((x) << ECC_CE_INT_CAUSE_S)
+#define ECC_CE_INT_CAUSE_F ECC_CE_INT_CAUSE_V(1U)
+
+#define PERR_INT_CAUSE_S 0
+#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
+#define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U)
+
+#define MC_ECC_STATUS_A 0x751c
+#define MC_P_ECC_STATUS_A 0x4131c
+
+#define ECC_CECNT_S 16
+#define ECC_CECNT_M 0xffffU
+#define ECC_CECNT_V(x) ((x) << ECC_CECNT_S)
+#define ECC_CECNT_G(x) (((x) >> ECC_CECNT_S) & ECC_CECNT_M)
+
+#define ECC_UECNT_S 0
+#define ECC_UECNT_M 0xffffU
+#define ECC_UECNT_V(x) ((x) << ECC_UECNT_S)
+#define ECC_UECNT_G(x) (((x) >> ECC_UECNT_S) & ECC_UECNT_M)
+
+#define MC_BIST_CMD_A 0x7600
+
+#define START_BIST_S 31
+#define START_BIST_V(x) ((x) << START_BIST_S)
+#define START_BIST_F START_BIST_V(1U)
+
+#define BIST_CMD_GAP_S 8
+#define BIST_CMD_GAP_V(x) ((x) << BIST_CMD_GAP_S)
+
+#define BIST_OPCODE_S 0
+#define BIST_OPCODE_V(x) ((x) << BIST_OPCODE_S)
+
+#define MC_BIST_CMD_ADDR_A 0x7604
+#define MC_BIST_CMD_LEN_A 0x7608
+#define MC_BIST_DATA_PATTERN_A 0x760c
+
+#define MC_BIST_STATUS_RDATA_A 0x7688
+
+/* registers for module MA */
#define MA_EDRAM0_BAR_A 0x77c0
#define EDRAM0_SIZE_S 0
@@ -574,263 +893,608 @@
#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U)
-#define MA_INT_CAUSE 0x77e0
-#define MEM_PERR_INT_CAUSE 0x00000002U
-#define MEM_WRAP_INT_CAUSE 0x00000001U
-
-#define MA_INT_WRAP_STATUS 0x77e4
-#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U
-#define MEM_WRAP_ADDRESS_SHIFT 4
-#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
-#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
-#define MEM_WRAP_CLIENT_NUM_SHIFT 0
-#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
-#define MA_PCIE_FW 0x30b8
-#define MA_PARITY_ERROR_STATUS 0x77f4
-#define MA_PARITY_ERROR_STATUS2 0x7804
-
-#define EDC_0_BASE_ADDR 0x7900
-
-#define EDC_BIST_CMD 0x7904
-#define EDC_BIST_CMD_ADDR 0x7908
-#define EDC_BIST_CMD_LEN 0x790c
-#define EDC_BIST_DATA_PATTERN 0x7910
-#define EDC_BIST_STATUS_RDATA 0x7928
-#define EDC_INT_CAUSE 0x7978
-#define ECC_UE_PAR 0x00000020U
-#define ECC_CE_PAR 0x00000010U
-#define PERR_PAR_CAUSE 0x00000008U
-
-#define EDC_ECC_STATUS 0x797c
-
-#define EDC_1_BASE_ADDR 0x7980
-
-#define CIM_BOOT_CFG 0x7b00
-#define BOOTADDR_MASK 0xffffff00U
-#define UPCRST 0x1U
-
-#define CIM_PF_MAILBOX_DATA 0x240
-#define CIM_PF_MAILBOX_CTRL 0x280
-#define MBMSGVALID 0x00000008U
-#define MBINTREQ 0x00000004U
-#define MBOWNER_MASK 0x00000003U
-#define MBOWNER_SHIFT 0
-#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
-#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
-
-#define CIM_PF_HOST_INT_ENABLE 0x288
-#define MBMSGRDYINTEN(x) ((x) << 19)
-
-#define CIM_PF_HOST_INT_CAUSE 0x28c
-#define MBMSGRDYINT 0x00080000U
-
-#define CIM_HOST_INT_CAUSE 0x7b2c
-#define TIEQOUTPARERRINT 0x00100000U
-#define TIEQINPARERRINT 0x00080000U
-#define MBHOSTPARERR 0x00040000U
-#define MBUPPARERR 0x00020000U
-#define IBQPARERR 0x0001f800U
-#define IBQTP0PARERR 0x00010000U
-#define IBQTP1PARERR 0x00008000U
-#define IBQULPPARERR 0x00004000U
-#define IBQSGELOPARERR 0x00002000U
-#define IBQSGEHIPARERR 0x00001000U
-#define IBQNCSIPARERR 0x00000800U
-#define OBQPARERR 0x000007e0U
-#define OBQULP0PARERR 0x00000400U
-#define OBQULP1PARERR 0x00000200U
-#define OBQULP2PARERR 0x00000100U
-#define OBQULP3PARERR 0x00000080U
-#define OBQSGEPARERR 0x00000040U
-#define OBQNCSIPARERR 0x00000020U
-#define PREFDROPINT 0x00000002U
-#define UPACCNONZERO 0x00000001U
-
-#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
-#define EEPROMWRINT 0x40000000U
-#define TIMEOUTMAINT 0x20000000U
-#define TIMEOUTINT 0x10000000U
-#define RSPOVRLOOKUPINT 0x08000000U
-#define REQOVRLOOKUPINT 0x04000000U
-#define BLKWRPLINT 0x02000000U
-#define BLKRDPLINT 0x01000000U
-#define SGLWRPLINT 0x00800000U
-#define SGLRDPLINT 0x00400000U
-#define BLKWRCTLINT 0x00200000U
-#define BLKRDCTLINT 0x00100000U
-#define SGLWRCTLINT 0x00080000U
-#define SGLRDCTLINT 0x00040000U
-#define BLKWREEPROMINT 0x00020000U
-#define BLKRDEEPROMINT 0x00010000U
-#define SGLWREEPROMINT 0x00008000U
-#define SGLRDEEPROMINT 0x00004000U
-#define BLKWRFLASHINT 0x00002000U
-#define BLKRDFLASHINT 0x00001000U
-#define SGLWRFLASHINT 0x00000800U
-#define SGLRDFLASHINT 0x00000400U
-#define BLKWRBOOTINT 0x00000200U
-#define BLKRDBOOTINT 0x00000100U
-#define SGLWRBOOTINT 0x00000080U
-#define SGLRDBOOTINT 0x00000040U
-#define ILLWRBEINT 0x00000020U
-#define ILLRDBEINT 0x00000010U
-#define ILLRDINT 0x00000008U
-#define ILLWRINT 0x00000004U
-#define ILLTRANSINT 0x00000002U
-#define RSVDSPACEINT 0x00000001U
-
-#define TP_OUT_CONFIG 0x7d04
-#define VLANEXTENABLE_MASK 0x0000f000U
-#define VLANEXTENABLE_SHIFT 12
-
-#define TP_GLOBAL_CONFIG 0x7d08
-#define FIVETUPLELOOKUP_SHIFT 17
-#define FIVETUPLELOOKUP_MASK 0x00060000U
-#define FIVETUPLELOOKUP(x) ((x) << FIVETUPLELOOKUP_SHIFT)
-#define FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
- FIVETUPLELOOKUP_SHIFT)
-
-#define TP_PARA_REG2 0x7d68
-#define MAXRXDATA_MASK 0xffff0000U
-#define MAXRXDATA_SHIFT 16
-#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
-
-#define TP_TIMER_RESOLUTION 0x7d90
-#define TIMERRESOLUTION_MASK 0x00ff0000U
-#define TIMERRESOLUTION_SHIFT 16
-#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
-#define DELAYEDACKRESOLUTION_MASK 0x000000ffU
-#define DELAYEDACKRESOLUTION_SHIFT 0
-#define DELAYEDACKRESOLUTION_GET(x) \
- (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
-
-#define TP_SHIFT_CNT 0x7dc0
-#define SYNSHIFTMAX_SHIFT 24
-#define SYNSHIFTMAX_MASK 0xff000000U
-#define SYNSHIFTMAX(x) ((x) << SYNSHIFTMAX_SHIFT)
-#define SYNSHIFTMAX_GET(x) (((x) & SYNSHIFTMAX_MASK) >> \
- SYNSHIFTMAX_SHIFT)
-#define RXTSHIFTMAXR1_SHIFT 20
-#define RXTSHIFTMAXR1_MASK 0x00f00000U
-#define RXTSHIFTMAXR1(x) ((x) << RXTSHIFTMAXR1_SHIFT)
-#define RXTSHIFTMAXR1_GET(x) (((x) & RXTSHIFTMAXR1_MASK) >> \
- RXTSHIFTMAXR1_SHIFT)
-#define RXTSHIFTMAXR2_SHIFT 16
-#define RXTSHIFTMAXR2_MASK 0x000f0000U
-#define RXTSHIFTMAXR2(x) ((x) << RXTSHIFTMAXR2_SHIFT)
-#define RXTSHIFTMAXR2_GET(x) (((x) & RXTSHIFTMAXR2_MASK) >> \
- RXTSHIFTMAXR2_SHIFT)
-#define PERSHIFTBACKOFFMAX_SHIFT 12
-#define PERSHIFTBACKOFFMAX_MASK 0x0000f000U
-#define PERSHIFTBACKOFFMAX(x) ((x) << PERSHIFTBACKOFFMAX_SHIFT)
-#define PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
- PERSHIFTBACKOFFMAX_SHIFT)
-#define PERSHIFTMAX_SHIFT 8
-#define PERSHIFTMAX_MASK 0x00000f00U
-#define PERSHIFTMAX(x) ((x) << PERSHIFTMAX_SHIFT)
-#define PERSHIFTMAX_GET(x) (((x) & PERSHIFTMAX_MASK) >> \
- PERSHIFTMAX_SHIFT)
-#define KEEPALIVEMAXR1_SHIFT 4
-#define KEEPALIVEMAXR1_MASK 0x000000f0U
-#define KEEPALIVEMAXR1(x) ((x) << KEEPALIVEMAXR1_SHIFT)
-#define KEEPALIVEMAXR1_GET(x) (((x) & KEEPALIVEMAXR1_MASK) >> \
- KEEPALIVEMAXR1_SHIFT)
-#define KEEPALIVEMAXR2_SHIFT 0
-#define KEEPALIVEMAXR2_MASK 0x0000000fU
-#define KEEPALIVEMAXR2(x) ((x) << KEEPALIVEMAXR2_SHIFT)
-#define KEEPALIVEMAXR2_GET(x) (((x) & KEEPALIVEMAXR2_MASK) >> \
- KEEPALIVEMAXR2_SHIFT)
-
-#define TP_CCTRL_TABLE 0x7ddc
-#define TP_MTU_TABLE 0x7de4
-#define MTUINDEX_MASK 0xff000000U
-#define MTUINDEX_SHIFT 24
-#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT)
-#define MTUWIDTH_MASK 0x000f0000U
-#define MTUWIDTH_SHIFT 16
-#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT)
-#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
-#define MTUVALUE_MASK 0x00003fffU
-#define MTUVALUE_SHIFT 0
-#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT)
-#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
-
-#define TP_RSS_LKP_TABLE 0x7dec
-#define LKPTBLROWVLD 0x80000000U
-#define LKPTBLQUEUE1_MASK 0x000ffc00U
-#define LKPTBLQUEUE1_SHIFT 10
-#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT)
-#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
-#define LKPTBLQUEUE0_MASK 0x000003ffU
-#define LKPTBLQUEUE0_SHIFT 0
-#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT)
-#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
-
-#define TP_PIO_ADDR 0x7e40
-#define TP_PIO_DATA 0x7e44
-#define TP_MIB_INDEX 0x7e50
-#define TP_MIB_DATA 0x7e54
-#define TP_INT_CAUSE 0x7e74
-#define FLMTXFLSTEMPTY 0x40000000U
-
-#define TP_VLAN_PRI_MAP 0x140
-#define FRAGMENTATION_SHIFT 9
-#define FRAGMENTATION_MASK 0x00000200U
-#define MPSHITTYPE_MASK 0x00000100U
-#define MACMATCH_MASK 0x00000080U
-#define ETHERTYPE_MASK 0x00000040U
-#define PROTOCOL_MASK 0x00000020U
-#define TOS_MASK 0x00000010U
-#define VLAN_MASK 0x00000008U
-#define VNIC_ID_MASK 0x00000004U
-#define PORT_MASK 0x00000002U
-#define FCOE_SHIFT 0
-#define FCOE_MASK 0x00000001U
-
-#define TP_INGRESS_CONFIG 0x141
-#define VNIC 0x00000800U
-#define CSUM_HAS_PSEUDO_HDR 0x00000400U
-#define RM_OVLAN 0x00000200U
-#define LOOKUPEVERYPKT 0x00000100U
-
-#define TP_MIB_MAC_IN_ERR_0 0x0
-#define TP_MIB_TCP_OUT_RST 0xc
-#define TP_MIB_TCP_IN_SEG_HI 0x10
-#define TP_MIB_TCP_IN_SEG_LO 0x11
-#define TP_MIB_TCP_OUT_SEG_HI 0x12
-#define TP_MIB_TCP_OUT_SEG_LO 0x13
-#define TP_MIB_TCP_RXT_SEG_HI 0x14
-#define TP_MIB_TCP_RXT_SEG_LO 0x15
-#define TP_MIB_TNL_CNG_DROP_0 0x18
-#define TP_MIB_TCP_V6IN_ERR_0 0x28
-#define TP_MIB_TCP_V6OUT_RST 0x2c
-#define TP_MIB_OFD_ARP_DROP 0x36
-#define TP_MIB_TNL_DROP_0 0x44
-#define TP_MIB_OFD_VLN_DROP_0 0x58
-
-#define ULP_TX_INT_CAUSE 0x8dcc
-#define PBL_BOUND_ERR_CH3 0x80000000U
-#define PBL_BOUND_ERR_CH2 0x40000000U
-#define PBL_BOUND_ERR_CH1 0x20000000U
-#define PBL_BOUND_ERR_CH0 0x10000000U
-
-#define PM_RX_INT_CAUSE 0x8fdc
-#define ZERO_E_CMD_ERROR 0x00400000U
-#define PMRX_FRAMING_ERROR 0x003ffff0U
-#define OCSPI_PAR_ERROR 0x00000008U
-#define DB_OPTIONS_PAR_ERROR 0x00000004U
-#define IESPI_PAR_ERROR 0x00000002U
-#define E_PCMD_PAR_ERROR 0x00000001U
-
-#define PM_TX_INT_CAUSE 0x8ffc
-#define PCMD_LEN_OVFL0 0x80000000U
-#define PCMD_LEN_OVFL1 0x40000000U
-#define PCMD_LEN_OVFL2 0x20000000U
-#define ZERO_C_CMD_ERROR 0x10000000U
-#define PMTX_FRAMING_ERROR 0x0ffffff0U
-#define OESPI_PAR_ERROR 0x00000008U
-#define ICSPI_PAR_ERROR 0x00000002U
-#define C_PCMD_PAR_ERROR 0x00000001U
+#define MA_INT_CAUSE_A 0x77e0
+
+#define MEM_PERR_INT_CAUSE_S 1
+#define MEM_PERR_INT_CAUSE_V(x) ((x) << MEM_PERR_INT_CAUSE_S)
+#define MEM_PERR_INT_CAUSE_F MEM_PERR_INT_CAUSE_V(1U)
+
+#define MEM_WRAP_INT_CAUSE_S 0
+#define MEM_WRAP_INT_CAUSE_V(x) ((x) << MEM_WRAP_INT_CAUSE_S)
+#define MEM_WRAP_INT_CAUSE_F MEM_WRAP_INT_CAUSE_V(1U)
+
+#define MA_INT_WRAP_STATUS_A 0x77e4
+
+#define MEM_WRAP_ADDRESS_S 4
+#define MEM_WRAP_ADDRESS_M 0xfffffffU
+#define MEM_WRAP_ADDRESS_G(x) (((x) >> MEM_WRAP_ADDRESS_S) & MEM_WRAP_ADDRESS_M)
+
+#define MEM_WRAP_CLIENT_NUM_S 0
+#define MEM_WRAP_CLIENT_NUM_M 0xfU
+#define MEM_WRAP_CLIENT_NUM_G(x) \
+ (((x) >> MEM_WRAP_CLIENT_NUM_S) & MEM_WRAP_CLIENT_NUM_M)
+
+#define MA_PARITY_ERROR_STATUS_A 0x77f4
+#define MA_PARITY_ERROR_STATUS1_A 0x77f4
+#define MA_PARITY_ERROR_STATUS2_A 0x7804
+
+/* registers for module EDC_0 */
+#define EDC_0_BASE_ADDR 0x7900
+
+#define EDC_BIST_CMD_A 0x7904
+#define EDC_BIST_CMD_ADDR_A 0x7908
+#define EDC_BIST_CMD_LEN_A 0x790c
+#define EDC_BIST_DATA_PATTERN_A 0x7910
+#define EDC_BIST_STATUS_RDATA_A 0x7928
+#define EDC_INT_CAUSE_A 0x7978
+
+#define ECC_UE_PAR_S 5
+#define ECC_UE_PAR_V(x) ((x) << ECC_UE_PAR_S)
+#define ECC_UE_PAR_F ECC_UE_PAR_V(1U)
+
+#define ECC_CE_PAR_S 4
+#define ECC_CE_PAR_V(x) ((x) << ECC_CE_PAR_S)
+#define ECC_CE_PAR_F ECC_CE_PAR_V(1U)
+
+#define PERR_PAR_CAUSE_S 3
+#define PERR_PAR_CAUSE_V(x) ((x) << PERR_PAR_CAUSE_S)
+#define PERR_PAR_CAUSE_F PERR_PAR_CAUSE_V(1U)
+
+#define EDC_ECC_STATUS_A 0x797c
+
+/* registers for module EDC_1 */
+#define EDC_1_BASE_ADDR 0x7980
+
+/* registers for module CIM */
+#define CIM_BOOT_CFG_A 0x7b00
+#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
+
+#define BOOTADDR_M 0xffffff00U
+
+#define UPCRST_S 0
+#define UPCRST_V(x) ((x) << UPCRST_S)
+#define UPCRST_F UPCRST_V(1U)
+
+#define CIM_PF_MAILBOX_DATA_A 0x240
+#define CIM_PF_MAILBOX_CTRL_A 0x280
+
+#define MBMSGVALID_S 3
+#define MBMSGVALID_V(x) ((x) << MBMSGVALID_S)
+#define MBMSGVALID_F MBMSGVALID_V(1U)
+
+#define MBINTREQ_S 2
+#define MBINTREQ_V(x) ((x) << MBINTREQ_S)
+#define MBINTREQ_F MBINTREQ_V(1U)
+
+#define MBOWNER_S 0
+#define MBOWNER_M 0x3U
+#define MBOWNER_V(x) ((x) << MBOWNER_S)
+#define MBOWNER_G(x) (((x) >> MBOWNER_S) & MBOWNER_M)
+
+#define CIM_PF_HOST_INT_ENABLE_A 0x288
+
+#define MBMSGRDYINTEN_S 19
+#define MBMSGRDYINTEN_V(x) ((x) << MBMSGRDYINTEN_S)
+#define MBMSGRDYINTEN_F MBMSGRDYINTEN_V(1U)
+
+#define CIM_PF_HOST_INT_CAUSE_A 0x28c
+
+#define MBMSGRDYINT_S 19
+#define MBMSGRDYINT_V(x) ((x) << MBMSGRDYINT_S)
+#define MBMSGRDYINT_F MBMSGRDYINT_V(1U)
+
+#define CIM_HOST_INT_CAUSE_A 0x7b2c
+
+#define TIEQOUTPARERRINT_S 20
+#define TIEQOUTPARERRINT_V(x) ((x) << TIEQOUTPARERRINT_S)
+#define TIEQOUTPARERRINT_F TIEQOUTPARERRINT_V(1U)
+
+#define TIEQINPARERRINT_S 19
+#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
+#define TIEQINPARERRINT_F TIEQINPARERRINT_V(1U)
+
+#define PREFDROPINT_S 1
+#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
+#define PREFDROPINT_F PREFDROPINT_V(1U)
+
+#define UPACCNONZERO_S 0
+#define UPACCNONZERO_V(x) ((x) << UPACCNONZERO_S)
+#define UPACCNONZERO_F UPACCNONZERO_V(1U)
+
+#define MBHOSTPARERR_S 18
+#define MBHOSTPARERR_V(x) ((x) << MBHOSTPARERR_S)
+#define MBHOSTPARERR_F MBHOSTPARERR_V(1U)
+
+#define MBUPPARERR_S 17
+#define MBUPPARERR_V(x) ((x) << MBUPPARERR_S)
+#define MBUPPARERR_F MBUPPARERR_V(1U)
+
+#define IBQTP0PARERR_S 16
+#define IBQTP0PARERR_V(x) ((x) << IBQTP0PARERR_S)
+#define IBQTP0PARERR_F IBQTP0PARERR_V(1U)
+
+#define IBQTP1PARERR_S 15
+#define IBQTP1PARERR_V(x) ((x) << IBQTP1PARERR_S)
+#define IBQTP1PARERR_F IBQTP1PARERR_V(1U)
+
+#define IBQULPPARERR_S 14
+#define IBQULPPARERR_V(x) ((x) << IBQULPPARERR_S)
+#define IBQULPPARERR_F IBQULPPARERR_V(1U)
+
+#define IBQSGELOPARERR_S 13
+#define IBQSGELOPARERR_V(x) ((x) << IBQSGELOPARERR_S)
+#define IBQSGELOPARERR_F IBQSGELOPARERR_V(1U)
+
+#define IBQSGEHIPARERR_S 12
+#define IBQSGEHIPARERR_V(x) ((x) << IBQSGEHIPARERR_S)
+#define IBQSGEHIPARERR_F IBQSGEHIPARERR_V(1U)
+
+#define IBQNCSIPARERR_S 11
+#define IBQNCSIPARERR_V(x) ((x) << IBQNCSIPARERR_S)
+#define IBQNCSIPARERR_F IBQNCSIPARERR_V(1U)
+
+#define OBQULP0PARERR_S 10
+#define OBQULP0PARERR_V(x) ((x) << OBQULP0PARERR_S)
+#define OBQULP0PARERR_F OBQULP0PARERR_V(1U)
+
+#define OBQULP1PARERR_S 9
+#define OBQULP1PARERR_V(x) ((x) << OBQULP1PARERR_S)
+#define OBQULP1PARERR_F OBQULP1PARERR_V(1U)
+
+#define OBQULP2PARERR_S 8
+#define OBQULP2PARERR_V(x) ((x) << OBQULP2PARERR_S)
+#define OBQULP2PARERR_F OBQULP2PARERR_V(1U)
+
+#define OBQULP3PARERR_S 7
+#define OBQULP3PARERR_V(x) ((x) << OBQULP3PARERR_S)
+#define OBQULP3PARERR_F OBQULP3PARERR_V(1U)
+
+#define OBQSGEPARERR_S 6
+#define OBQSGEPARERR_V(x) ((x) << OBQSGEPARERR_S)
+#define OBQSGEPARERR_F OBQSGEPARERR_V(1U)
+
+#define OBQNCSIPARERR_S 5
+#define OBQNCSIPARERR_V(x) ((x) << OBQNCSIPARERR_S)
+#define OBQNCSIPARERR_F OBQNCSIPARERR_V(1U)
+
+#define CIM_HOST_UPACC_INT_CAUSE_A 0x7b34
+
+#define EEPROMWRINT_S 30
+#define EEPROMWRINT_V(x) ((x) << EEPROMWRINT_S)
+#define EEPROMWRINT_F EEPROMWRINT_V(1U)
+
+#define TIMEOUTMAINT_S 29
+#define TIMEOUTMAINT_V(x) ((x) << TIMEOUTMAINT_S)
+#define TIMEOUTMAINT_F TIMEOUTMAINT_V(1U)
+
+#define TIMEOUTINT_S 28
+#define TIMEOUTINT_V(x) ((x) << TIMEOUTINT_S)
+#define TIMEOUTINT_F TIMEOUTINT_V(1U)
+
+#define RSPOVRLOOKUPINT_S 27
+#define RSPOVRLOOKUPINT_V(x) ((x) << RSPOVRLOOKUPINT_S)
+#define RSPOVRLOOKUPINT_F RSPOVRLOOKUPINT_V(1U)
+
+#define REQOVRLOOKUPINT_S 26
+#define REQOVRLOOKUPINT_V(x) ((x) << REQOVRLOOKUPINT_S)
+#define REQOVRLOOKUPINT_F REQOVRLOOKUPINT_V(1U)
+
+#define BLKWRPLINT_S 25
+#define BLKWRPLINT_V(x) ((x) << BLKWRPLINT_S)
+#define BLKWRPLINT_F BLKWRPLINT_V(1U)
+
+#define BLKRDPLINT_S 24
+#define BLKRDPLINT_V(x) ((x) << BLKRDPLINT_S)
+#define BLKRDPLINT_F BLKRDPLINT_V(1U)
+
+#define SGLWRPLINT_S 23
+#define SGLWRPLINT_V(x) ((x) << SGLWRPLINT_S)
+#define SGLWRPLINT_F SGLWRPLINT_V(1U)
+
+#define SGLRDPLINT_S 22
+#define SGLRDPLINT_V(x) ((x) << SGLRDPLINT_S)
+#define SGLRDPLINT_F SGLRDPLINT_V(1U)
+
+#define BLKWRCTLINT_S 21
+#define BLKWRCTLINT_V(x) ((x) << BLKWRCTLINT_S)
+#define BLKWRCTLINT_F BLKWRCTLINT_V(1U)
+
+#define BLKRDCTLINT_S 20
+#define BLKRDCTLINT_V(x) ((x) << BLKRDCTLINT_S)
+#define BLKRDCTLINT_F BLKRDCTLINT_V(1U)
+
+#define SGLWRCTLINT_S 19
+#define SGLWRCTLINT_V(x) ((x) << SGLWRCTLINT_S)
+#define SGLWRCTLINT_F SGLWRCTLINT_V(1U)
+
+#define SGLRDCTLINT_S 18
+#define SGLRDCTLINT_V(x) ((x) << SGLRDCTLINT_S)
+#define SGLRDCTLINT_F SGLRDCTLINT_V(1U)
+
+#define BLKWREEPROMINT_S 17
+#define BLKWREEPROMINT_V(x) ((x) << BLKWREEPROMINT_S)
+#define BLKWREEPROMINT_F BLKWREEPROMINT_V(1U)
+
+#define BLKRDEEPROMINT_S 16
+#define BLKRDEEPROMINT_V(x) ((x) << BLKRDEEPROMINT_S)
+#define BLKRDEEPROMINT_F BLKRDEEPROMINT_V(1U)
+
+#define SGLWREEPROMINT_S 15
+#define SGLWREEPROMINT_V(x) ((x) << SGLWREEPROMINT_S)
+#define SGLWREEPROMINT_F SGLWREEPROMINT_V(1U)
+
+#define SGLRDEEPROMINT_S 14
+#define SGLRDEEPROMINT_V(x) ((x) << SGLRDEEPROMINT_S)
+#define SGLRDEEPROMINT_F SGLRDEEPROMINT_V(1U)
+
+#define BLKWRFLASHINT_S 13
+#define BLKWRFLASHINT_V(x) ((x) << BLKWRFLASHINT_S)
+#define BLKWRFLASHINT_F BLKWRFLASHINT_V(1U)
+
+#define BLKRDFLASHINT_S 12
+#define BLKRDFLASHINT_V(x) ((x) << BLKRDFLASHINT_S)
+#define BLKRDFLASHINT_F BLKRDFLASHINT_V(1U)
+
+#define SGLWRFLASHINT_S 11
+#define SGLWRFLASHINT_V(x) ((x) << SGLWRFLASHINT_S)
+#define SGLWRFLASHINT_F SGLWRFLASHINT_V(1U)
+
+#define SGLRDFLASHINT_S 10
+#define SGLRDFLASHINT_V(x) ((x) << SGLRDFLASHINT_S)
+#define SGLRDFLASHINT_F SGLRDFLASHINT_V(1U)
+
+#define BLKWRBOOTINT_S 9
+#define BLKWRBOOTINT_V(x) ((x) << BLKWRBOOTINT_S)
+#define BLKWRBOOTINT_F BLKWRBOOTINT_V(1U)
+
+#define BLKRDBOOTINT_S 8
+#define BLKRDBOOTINT_V(x) ((x) << BLKRDBOOTINT_S)
+#define BLKRDBOOTINT_F BLKRDBOOTINT_V(1U)
+
+#define SGLWRBOOTINT_S 7
+#define SGLWRBOOTINT_V(x) ((x) << SGLWRBOOTINT_S)
+#define SGLWRBOOTINT_F SGLWRBOOTINT_V(1U)
+
+#define SGLRDBOOTINT_S 6
+#define SGLRDBOOTINT_V(x) ((x) << SGLRDBOOTINT_S)
+#define SGLRDBOOTINT_F SGLRDBOOTINT_V(1U)
+
+#define ILLWRBEINT_S 5
+#define ILLWRBEINT_V(x) ((x) << ILLWRBEINT_S)
+#define ILLWRBEINT_F ILLWRBEINT_V(1U)
+
+#define ILLRDBEINT_S 4
+#define ILLRDBEINT_V(x) ((x) << ILLRDBEINT_S)
+#define ILLRDBEINT_F ILLRDBEINT_V(1U)
+
+#define ILLRDINT_S 3
+#define ILLRDINT_V(x) ((x) << ILLRDINT_S)
+#define ILLRDINT_F ILLRDINT_V(1U)
+
+#define ILLWRINT_S 2
+#define ILLWRINT_V(x) ((x) << ILLWRINT_S)
+#define ILLWRINT_F ILLWRINT_V(1U)
+
+#define ILLTRANSINT_S 1
+#define ILLTRANSINT_V(x) ((x) << ILLTRANSINT_S)
+#define ILLTRANSINT_F ILLTRANSINT_V(1U)
+
+#define RSVDSPACEINT_S 0
+#define RSVDSPACEINT_V(x) ((x) << RSVDSPACEINT_S)
+#define RSVDSPACEINT_F RSVDSPACEINT_V(1U)
+
+/* registers for module TP */
+#define DBGLAWHLF_S 23
+#define DBGLAWHLF_V(x) ((x) << DBGLAWHLF_S)
+#define DBGLAWHLF_F DBGLAWHLF_V(1U)
+
+#define DBGLAWPTR_S 16
+#define DBGLAWPTR_M 0x7fU
+#define DBGLAWPTR_G(x) (((x) >> DBGLAWPTR_S) & DBGLAWPTR_M)
+
+#define DBGLAENABLE_S 12
+#define DBGLAENABLE_V(x) ((x) << DBGLAENABLE_S)
+#define DBGLAENABLE_F DBGLAENABLE_V(1U)
+
+#define DBGLARPTR_S 0
+#define DBGLARPTR_M 0x7fU
+#define DBGLARPTR_V(x) ((x) << DBGLARPTR_S)
+
+#define TP_DBG_LA_DATAL_A 0x7ed8
+#define TP_DBG_LA_CONFIG_A 0x7ed4
+#define TP_OUT_CONFIG_A 0x7d04
+#define TP_GLOBAL_CONFIG_A 0x7d08
+
+#define DBGLAMODE_S 14
+#define DBGLAMODE_M 0x3U
+#define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M)
+
+#define FIVETUPLELOOKUP_S 17
+#define FIVETUPLELOOKUP_M 0x3U
+#define FIVETUPLELOOKUP_V(x) ((x) << FIVETUPLELOOKUP_S)
+#define FIVETUPLELOOKUP_G(x) (((x) >> FIVETUPLELOOKUP_S) & FIVETUPLELOOKUP_M)
+
+#define TP_PARA_REG2_A 0x7d68
+
+#define MAXRXDATA_S 16
+#define MAXRXDATA_M 0xffffU
+#define MAXRXDATA_G(x) (((x) >> MAXRXDATA_S) & MAXRXDATA_M)
+
+#define TP_TIMER_RESOLUTION_A 0x7d90
+
+#define TIMERRESOLUTION_S 16
+#define TIMERRESOLUTION_M 0xffU
+#define TIMERRESOLUTION_G(x) (((x) >> TIMERRESOLUTION_S) & TIMERRESOLUTION_M)
+
+#define TIMESTAMPRESOLUTION_S 8
+#define TIMESTAMPRESOLUTION_M 0xffU
+#define TIMESTAMPRESOLUTION_G(x) \
+ (((x) >> TIMESTAMPRESOLUTION_S) & TIMESTAMPRESOLUTION_M)
+
+#define DELAYEDACKRESOLUTION_S 0
+#define DELAYEDACKRESOLUTION_M 0xffU
+#define DELAYEDACKRESOLUTION_G(x) \
+ (((x) >> DELAYEDACKRESOLUTION_S) & DELAYEDACKRESOLUTION_M)
+
+#define TP_SHIFT_CNT_A 0x7dc0
+#define TP_RXT_MIN_A 0x7d98
+#define TP_RXT_MAX_A 0x7d9c
+#define TP_PERS_MIN_A 0x7da0
+#define TP_PERS_MAX_A 0x7da4
+#define TP_KEEP_IDLE_A 0x7da8
+#define TP_KEEP_INTVL_A 0x7dac
+#define TP_INIT_SRTT_A 0x7db0
+#define TP_DACK_TIMER_A 0x7db4
+#define TP_FINWAIT2_TIMER_A 0x7db8
+
+#define INITSRTT_S 0
+#define INITSRTT_M 0xffffU
+#define INITSRTT_G(x) (((x) >> INITSRTT_S) & INITSRTT_M)
+
+#define PERSMAX_S 0
+#define PERSMAX_M 0x3fffffffU
+#define PERSMAX_V(x) ((x) << PERSMAX_S)
+#define PERSMAX_G(x) (((x) >> PERSMAX_S) & PERSMAX_M)
+
+#define SYNSHIFTMAX_S 24
+#define SYNSHIFTMAX_M 0xffU
+#define SYNSHIFTMAX_V(x) ((x) << SYNSHIFTMAX_S)
+#define SYNSHIFTMAX_G(x) (((x) >> SYNSHIFTMAX_S) & SYNSHIFTMAX_M)
+
+#define RXTSHIFTMAXR1_S 20
+#define RXTSHIFTMAXR1_M 0xfU
+#define RXTSHIFTMAXR1_V(x) ((x) << RXTSHIFTMAXR1_S)
+#define RXTSHIFTMAXR1_G(x) (((x) >> RXTSHIFTMAXR1_S) & RXTSHIFTMAXR1_M)
+
+#define RXTSHIFTMAXR2_S 16
+#define RXTSHIFTMAXR2_M 0xfU
+#define RXTSHIFTMAXR2_V(x) ((x) << RXTSHIFTMAXR2_S)
+#define RXTSHIFTMAXR2_G(x) (((x) >> RXTSHIFTMAXR2_S) & RXTSHIFTMAXR2_M)
+
+#define PERSHIFTBACKOFFMAX_S 12
+#define PERSHIFTBACKOFFMAX_M 0xfU
+#define PERSHIFTBACKOFFMAX_V(x) ((x) << PERSHIFTBACKOFFMAX_S)
+#define PERSHIFTBACKOFFMAX_G(x) \
+ (((x) >> PERSHIFTBACKOFFMAX_S) & PERSHIFTBACKOFFMAX_M)
+
+#define PERSHIFTMAX_S 8
+#define PERSHIFTMAX_M 0xfU
+#define PERSHIFTMAX_V(x) ((x) << PERSHIFTMAX_S)
+#define PERSHIFTMAX_G(x) (((x) >> PERSHIFTMAX_S) & PERSHIFTMAX_M)
+
+#define KEEPALIVEMAXR1_S 4
+#define KEEPALIVEMAXR1_M 0xfU
+#define KEEPALIVEMAXR1_V(x) ((x) << KEEPALIVEMAXR1_S)
+#define KEEPALIVEMAXR1_G(x) (((x) >> KEEPALIVEMAXR1_S) & KEEPALIVEMAXR1_M)
+
+#define KEEPALIVEMAXR2_S 0
+#define KEEPALIVEMAXR2_M 0xfU
+#define KEEPALIVEMAXR2_V(x) ((x) << KEEPALIVEMAXR2_S)
+#define KEEPALIVEMAXR2_G(x) (((x) >> KEEPALIVEMAXR2_S) & KEEPALIVEMAXR2_M)
+
+#define ROWINDEX_S 16
+#define ROWINDEX_V(x) ((x) << ROWINDEX_S)
+
+#define TP_CCTRL_TABLE_A 0x7ddc
+#define TP_MTU_TABLE_A 0x7de4
+
+#define MTUINDEX_S 24
+#define MTUINDEX_V(x) ((x) << MTUINDEX_S)
+
+#define MTUWIDTH_S 16
+#define MTUWIDTH_M 0xfU
+#define MTUWIDTH_V(x) ((x) << MTUWIDTH_S)
+#define MTUWIDTH_G(x) (((x) >> MTUWIDTH_S) & MTUWIDTH_M)
+
+#define MTUVALUE_S 0
+#define MTUVALUE_M 0x3fffU
+#define MTUVALUE_V(x) ((x) << MTUVALUE_S)
+#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
+
+#define TP_RSS_LKP_TABLE_A 0x7dec
+
+#define LKPTBLROWVLD_S 31
+#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
+#define LKPTBLROWVLD_F LKPTBLROWVLD_V(1U)
+
+#define LKPTBLQUEUE1_S 10
+#define LKPTBLQUEUE1_M 0x3ffU
+#define LKPTBLQUEUE1_G(x) (((x) >> LKPTBLQUEUE1_S) & LKPTBLQUEUE1_M)
+
+#define LKPTBLQUEUE0_S 0
+#define LKPTBLQUEUE0_M 0x3ffU
+#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
+
+#define TP_PIO_ADDR_A 0x7e40
+#define TP_PIO_DATA_A 0x7e44
+#define TP_MIB_INDEX_A 0x7e50
+#define TP_MIB_DATA_A 0x7e54
+#define TP_INT_CAUSE_A 0x7e74
+
+#define FLMTXFLSTEMPTY_S 30
+#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
+#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
+
+#define TP_VLAN_PRI_MAP_A 0x140
+
+#define FRAGMENTATION_S 9
+#define FRAGMENTATION_V(x) ((x) << FRAGMENTATION_S)
+#define FRAGMENTATION_F FRAGMENTATION_V(1U)
+
+#define MPSHITTYPE_S 8
+#define MPSHITTYPE_V(x) ((x) << MPSHITTYPE_S)
+#define MPSHITTYPE_F MPSHITTYPE_V(1U)
+
+#define MACMATCH_S 7
+#define MACMATCH_V(x) ((x) << MACMATCH_S)
+#define MACMATCH_F MACMATCH_V(1U)
+
+#define ETHERTYPE_S 6
+#define ETHERTYPE_V(x) ((x) << ETHERTYPE_S)
+#define ETHERTYPE_F ETHERTYPE_V(1U)
+
+#define PROTOCOL_S 5
+#define PROTOCOL_V(x) ((x) << PROTOCOL_S)
+#define PROTOCOL_F PROTOCOL_V(1U)
+
+#define TOS_S 4
+#define TOS_V(x) ((x) << TOS_S)
+#define TOS_F TOS_V(1U)
+
+#define VLAN_S 3
+#define VLAN_V(x) ((x) << VLAN_S)
+#define VLAN_F VLAN_V(1U)
+
+#define VNIC_ID_S 2
+#define VNIC_ID_V(x) ((x) << VNIC_ID_S)
+#define VNIC_ID_F VNIC_ID_V(1U)
+
+#define PORT_S 1
+#define PORT_V(x) ((x) << PORT_S)
+#define PORT_F PORT_V(1U)
+
+#define FCOE_S 0
+#define FCOE_V(x) ((x) << FCOE_S)
+#define FCOE_F FCOE_V(1U)
+
+#define FILTERMODE_S 15
+#define FILTERMODE_V(x) ((x) << FILTERMODE_S)
+#define FILTERMODE_F FILTERMODE_V(1U)
+
+#define FCOEMASK_S 14
+#define FCOEMASK_V(x) ((x) << FCOEMASK_S)
+#define FCOEMASK_F FCOEMASK_V(1U)
+
+#define TP_INGRESS_CONFIG_A 0x141
+
+#define VNIC_S 11
+#define VNIC_V(x) ((x) << VNIC_S)
+#define VNIC_F VNIC_V(1U)
+
+#define CSUM_HAS_PSEUDO_HDR_S 10
+#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
+#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
+
+#define TP_MIB_MAC_IN_ERR_0_A 0x0
+#define TP_MIB_TCP_OUT_RST_A 0xc
+#define TP_MIB_TCP_IN_SEG_HI_A 0x10
+#define TP_MIB_TCP_IN_SEG_LO_A 0x11
+#define TP_MIB_TCP_OUT_SEG_HI_A 0x12
+#define TP_MIB_TCP_OUT_SEG_LO_A 0x13
+#define TP_MIB_TCP_RXT_SEG_HI_A 0x14
+#define TP_MIB_TCP_RXT_SEG_LO_A 0x15
+#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
+#define TP_MIB_TCP_V6OUT_RST_A 0x2c
+#define TP_MIB_OFD_ARP_DROP_A 0x36
+#define TP_MIB_TNL_DROP_0_A 0x44
+#define TP_MIB_OFD_VLN_DROP_0_A 0x58
+
+#define ULP_TX_INT_CAUSE_A 0x8dcc
+
+#define PBL_BOUND_ERR_CH3_S 31
+#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
+#define PBL_BOUND_ERR_CH3_F PBL_BOUND_ERR_CH3_V(1U)
+
+#define PBL_BOUND_ERR_CH2_S 30
+#define PBL_BOUND_ERR_CH2_V(x) ((x) << PBL_BOUND_ERR_CH2_S)
+#define PBL_BOUND_ERR_CH2_F PBL_BOUND_ERR_CH2_V(1U)
+
+#define PBL_BOUND_ERR_CH1_S 29
+#define PBL_BOUND_ERR_CH1_V(x) ((x) << PBL_BOUND_ERR_CH1_S)
+#define PBL_BOUND_ERR_CH1_F PBL_BOUND_ERR_CH1_V(1U)
+
+#define PBL_BOUND_ERR_CH0_S 28
+#define PBL_BOUND_ERR_CH0_V(x) ((x) << PBL_BOUND_ERR_CH0_S)
+#define PBL_BOUND_ERR_CH0_F PBL_BOUND_ERR_CH0_V(1U)
+
+#define PM_RX_INT_CAUSE_A 0x8fdc
+#define PM_RX_STAT_CONFIG_A 0x8fc8
+#define PM_RX_STAT_COUNT_A 0x8fcc
+#define PM_RX_STAT_LSB_A 0x8fd0
+#define PM_RX_DBG_CTRL_A 0x8fd0
+#define PM_RX_DBG_DATA_A 0x8fd4
+#define PM_RX_DBG_STAT_MSB_A 0x10013
+
+#define PMRX_FRAMING_ERROR_F 0x003ffff0U
+
+#define ZERO_E_CMD_ERROR_S 22
+#define ZERO_E_CMD_ERROR_V(x) ((x) << ZERO_E_CMD_ERROR_S)
+#define ZERO_E_CMD_ERROR_F ZERO_E_CMD_ERROR_V(1U)
+
+#define OCSPI_PAR_ERROR_S 3
+#define OCSPI_PAR_ERROR_V(x) ((x) << OCSPI_PAR_ERROR_S)
+#define OCSPI_PAR_ERROR_F OCSPI_PAR_ERROR_V(1U)
+
+#define DB_OPTIONS_PAR_ERROR_S 2
+#define DB_OPTIONS_PAR_ERROR_V(x) ((x) << DB_OPTIONS_PAR_ERROR_S)
+#define DB_OPTIONS_PAR_ERROR_F DB_OPTIONS_PAR_ERROR_V(1U)
+
+#define IESPI_PAR_ERROR_S 1
+#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
+#define IESPI_PAR_ERROR_F IESPI_PAR_ERROR_V(1U)
+
+#define PMRX_E_PCMD_PAR_ERROR_S 0
+#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
+#define PMRX_E_PCMD_PAR_ERROR_F PMRX_E_PCMD_PAR_ERROR_V(1U)
+
+#define PM_TX_INT_CAUSE_A 0x8ffc
+#define PM_TX_STAT_CONFIG_A 0x8fe8
+#define PM_TX_STAT_COUNT_A 0x8fec
+#define PM_TX_STAT_LSB_A 0x8ff0
+#define PM_TX_DBG_CTRL_A 0x8ff0
+#define PM_TX_DBG_DATA_A 0x8ff4
+#define PM_TX_DBG_STAT_MSB_A 0x1001a
+
+#define PCMD_LEN_OVFL0_S 31
+#define PCMD_LEN_OVFL0_V(x) ((x) << PCMD_LEN_OVFL0_S)
+#define PCMD_LEN_OVFL0_F PCMD_LEN_OVFL0_V(1U)
+
+#define PCMD_LEN_OVFL1_S 30
+#define PCMD_LEN_OVFL1_V(x) ((x) << PCMD_LEN_OVFL1_S)
+#define PCMD_LEN_OVFL1_F PCMD_LEN_OVFL1_V(1U)
+
+#define PCMD_LEN_OVFL2_S 29
+#define PCMD_LEN_OVFL2_V(x) ((x) << PCMD_LEN_OVFL2_S)
+#define PCMD_LEN_OVFL2_F PCMD_LEN_OVFL2_V(1U)
+
+#define ZERO_C_CMD_ERROR_S 28
+#define ZERO_C_CMD_ERROR_V(x) ((x) << ZERO_C_CMD_ERROR_S)
+#define ZERO_C_CMD_ERROR_F ZERO_C_CMD_ERROR_V(1U)
+
+#define PMTX_FRAMING_ERROR_F 0x0ffffff0U
+
+#define OESPI_PAR_ERROR_S 3
+#define OESPI_PAR_ERROR_V(x) ((x) << OESPI_PAR_ERROR_S)
+#define OESPI_PAR_ERROR_F OESPI_PAR_ERROR_V(1U)
+
+#define ICSPI_PAR_ERROR_S 1
+#define ICSPI_PAR_ERROR_V(x) ((x) << ICSPI_PAR_ERROR_S)
+#define ICSPI_PAR_ERROR_F ICSPI_PAR_ERROR_V(1U)
+
+#define PMTX_C_PCMD_PAR_ERROR_S 0
+#define PMTX_C_PCMD_PAR_ERROR_V(x) ((x) << PMTX_C_PCMD_PAR_ERROR_S)
+#define PMTX_C_PCMD_PAR_ERROR_F PMTX_C_PCMD_PAR_ERROR_V(1U)
#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
@@ -959,41 +1623,57 @@
#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
-#define MAC_PORT_CFG2 0x818
#define MAC_PORT_MAGIC_MACID_LO 0x824
#define MAC_PORT_MAGIC_MACID_HI 0x828
-#define MAC_PORT_EPIO_DATA0 0x8c0
-#define MAC_PORT_EPIO_DATA1 0x8c4
-#define MAC_PORT_EPIO_DATA2 0x8c8
-#define MAC_PORT_EPIO_DATA3 0x8cc
-#define MAC_PORT_EPIO_OP 0x8d0
-
-#define MPS_CMN_CTL 0x9000
-#define NUMPORTS_MASK 0x00000003U
-#define NUMPORTS_SHIFT 0
-#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
-
-#define MPS_INT_CAUSE 0x9008
-#define STATINT 0x00000020U
-#define TXINT 0x00000010U
-#define RXINT 0x00000008U
-#define TRCINT 0x00000004U
-#define CLSINT 0x00000002U
-#define PLINT 0x00000001U
-
-#define MPS_TX_INT_CAUSE 0x9408
-#define PORTERR 0x00010000U
-#define FRMERR 0x00008000U
-#define SECNTERR 0x00004000U
-#define BUBBLE 0x00002000U
-#define TXDESCFIFO 0x00001e00U
-#define TXDATAFIFO 0x000001e0U
-#define NCSIFIFO 0x00000010U
-#define TPFIFO 0x0000000fU
-
-#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
-#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
-#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
+
+#define MAC_PORT_EPIO_DATA0_A 0x8c0
+#define MAC_PORT_EPIO_DATA1_A 0x8c4
+#define MAC_PORT_EPIO_DATA2_A 0x8c8
+#define MAC_PORT_EPIO_DATA3_A 0x8cc
+#define MAC_PORT_EPIO_OP_A 0x8d0
+
+#define MAC_PORT_CFG2_A 0x818
+
+#define MPS_CMN_CTL_A 0x9000
+
+#define NUMPORTS_S 0
+#define NUMPORTS_M 0x3U
+#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
+
+#define MPS_INT_CAUSE_A 0x9008
+#define MPS_TX_INT_CAUSE_A 0x9408
+
+#define FRMERR_S 15
+#define FRMERR_V(x) ((x) << FRMERR_S)
+#define FRMERR_F FRMERR_V(1U)
+
+#define SECNTERR_S 14
+#define SECNTERR_V(x) ((x) << SECNTERR_S)
+#define SECNTERR_F SECNTERR_V(1U)
+
+#define BUBBLE_S 13
+#define BUBBLE_V(x) ((x) << BUBBLE_S)
+#define BUBBLE_F BUBBLE_V(1U)
+
+#define TXDESCFIFO_S 9
+#define TXDESCFIFO_M 0xfU
+#define TXDESCFIFO_V(x) ((x) << TXDESCFIFO_S)
+
+#define TXDATAFIFO_S 5
+#define TXDATAFIFO_M 0xfU
+#define TXDATAFIFO_V(x) ((x) << TXDATAFIFO_S)
+
+#define NCSIFIFO_S 4
+#define NCSIFIFO_V(x) ((x) << NCSIFIFO_S)
+#define NCSIFIFO_F NCSIFIFO_V(1U)
+
+#define TPFIFO_S 0
+#define TPFIFO_M 0xfU
+#define TPFIFO_V(x) ((x) << TPFIFO_S)
+
+#define MPS_STAT_PERR_INT_CAUSE_SRAM_A 0x9614
+#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A 0x9620
+#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A 0x962c
#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
@@ -1027,294 +1707,851 @@
#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
-#define MPS_TRC_CFG 0x9800
-#define TRCFIFOEMPTY 0x00000010U
-#define TRCIGNOREDROPINPUT 0x00000008U
-#define TRCKEEPDUPLICATES 0x00000004U
-#define TRCEN 0x00000002U
-#define TRCMULTIFILTER 0x00000001U
-
-#define MPS_TRC_RSS_CONTROL 0x9808
-#define MPS_T5_TRC_RSS_CONTROL 0xa00c
-#define RSSCONTROL_MASK 0x00ff0000U
-#define RSSCONTROL_SHIFT 16
-#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
-#define QUEUENUMBER_MASK 0x0000ffffU
-#define QUEUENUMBER_SHIFT 0
-#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
-
-#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
-#define TFINVERTMATCH 0x01000000U
-#define TFPKTTOOLARGE 0x00800000U
-#define TFEN 0x00400000U
-#define TFPORT_MASK 0x003c0000U
-#define TFPORT_SHIFT 18
-#define TFPORT(x) ((x) << TFPORT_SHIFT)
-#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
-#define TFDROP 0x00020000U
-#define TFSOPEOPERR 0x00010000U
-#define TFLENGTH_MASK 0x00001f00U
-#define TFLENGTH_SHIFT 8
-#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
-#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
-#define TFOFFSET_MASK 0x0000001fU
-#define TFOFFSET_SHIFT 0
-#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
-#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
-
-#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
-#define TFMINPKTSIZE_MASK 0x01ff0000U
-#define TFMINPKTSIZE_SHIFT 16
-#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
-#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
-#define TFCAPTUREMAX_MASK 0x00003fffU
-#define TFCAPTUREMAX_SHIFT 0
-#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
-#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
-
-#define MPS_TRC_INT_CAUSE 0x985c
-#define MISCPERR 0x00000100U
-#define PKTFIFO 0x000000f0U
-#define FILTMEM 0x0000000fU
-
-#define MPS_TRC_FILTER0_MATCH 0x9c00
-#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
-#define MPS_TRC_FILTER1_MATCH 0x9d00
-#define MPS_CLS_INT_CAUSE 0xd028
-#define PLERRENB 0x00000008U
-#define HASHSRAM 0x00000004U
-#define MATCHTCAM 0x00000002U
-#define MATCHSRAM 0x00000001U
-
-#define MPS_RX_PERR_INT_CAUSE 0x11074
-
-#define CPL_INTR_CAUSE 0x19054
-#define CIM_OP_MAP_PERR 0x00000020U
-#define CIM_OVFL_ERROR 0x00000010U
-#define TP_FRAMING_ERROR 0x00000008U
-#define SGE_FRAMING_ERROR 0x00000004U
-#define CIM_FRAMING_ERROR 0x00000002U
-#define ZERO_SWITCH_ERROR 0x00000001U
-
-#define SMB_INT_CAUSE 0x19090
-#define MSTTXFIFOPARINT 0x00200000U
-#define MSTRXFIFOPARINT 0x00100000U
-#define SLVFIFOPARINT 0x00080000U
-
-#define ULP_RX_INT_CAUSE 0x19158
-#define ULP_RX_ISCSI_TAGMASK 0x19164
-#define ULP_RX_ISCSI_PSZ 0x19168
-#define HPZ3_MASK 0x0f000000U
-#define HPZ3_SHIFT 24
-#define HPZ3(x) ((x) << HPZ3_SHIFT)
-#define HPZ2_MASK 0x000f0000U
-#define HPZ2_SHIFT 16
-#define HPZ2(x) ((x) << HPZ2_SHIFT)
-#define HPZ1_MASK 0x00000f00U
-#define HPZ1_SHIFT 8
-#define HPZ1(x) ((x) << HPZ1_SHIFT)
-#define HPZ0_MASK 0x0000000fU
-#define HPZ0_SHIFT 0
-#define HPZ0(x) ((x) << HPZ0_SHIFT)
-
-#define ULP_RX_TDDP_PSZ 0x19178
-
-#define SF_DATA 0x193f8
-#define SF_OP 0x193fc
-#define SF_BUSY 0x80000000U
-#define SF_LOCK 0x00000010U
-#define SF_CONT 0x00000008U
-#define BYTECNT_MASK 0x00000006U
-#define BYTECNT_SHIFT 1
-#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
-#define OP_WR 0x00000001U
-
-#define PL_PF_INT_CAUSE 0x3c0
-#define PFSW 0x00000008U
-#define PFSGE 0x00000004U
-#define PFCIM 0x00000002U
-#define PFMPS 0x00000001U
-
-#define PL_PF_INT_ENABLE 0x3c4
-#define PL_PF_CTL 0x3c8
-#define SWINT 0x00000001U
-
-#define PL_WHOAMI 0x19400
-#define SOURCEPF_MASK 0x00000700U
-#define SOURCEPF_SHIFT 8
-#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
-#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
-#define ISVF 0x00000080U
-#define VFID_MASK 0x0000007fU
-#define VFID_SHIFT 0
-#define VFID(x) ((x) << VFID_SHIFT)
-#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
-
-#define PL_INT_CAUSE 0x1940c
-#define ULP_TX 0x08000000U
-#define SGE 0x04000000U
-#define HMA 0x02000000U
-#define CPL_SWITCH 0x01000000U
-#define ULP_RX 0x00800000U
-#define PM_RX 0x00400000U
-#define PM_TX 0x00200000U
-#define MA 0x00100000U
-#define TP 0x00080000U
-#define LE 0x00040000U
-#define EDC1 0x00020000U
-#define EDC0 0x00010000U
-#define MC 0x00008000U
-#define PCIE 0x00004000U
-#define PMU 0x00002000U
-#define XGMAC_KR1 0x00001000U
-#define XGMAC_KR0 0x00000800U
-#define XGMAC1 0x00000400U
-#define XGMAC0 0x00000200U
-#define SMB 0x00000100U
-#define SF 0x00000080U
-#define PL 0x00000040U
-#define NCSI 0x00000020U
-#define MPS 0x00000010U
-#define MI 0x00000008U
-#define DBG 0x00000004U
-#define I2CM 0x00000002U
-#define CIM 0x00000001U
-
-#define MC1 0x31
-#define PL_INT_ENABLE 0x19410
-#define PL_INT_MAP0 0x19414
-#define PL_RST 0x19428
-#define PIORST 0x00000002U
-#define PIORSTMODE 0x00000001U
-
-#define PL_PL_INT_CAUSE 0x19430
-#define FATALPERR 0x00000010U
-#define PERRVFID 0x00000001U
-
-#define PL_REV 0x1943c
-
-#define S_REV 0
-#define M_REV 0xfU
-#define V_REV(x) ((x) << S_REV)
-#define G_REV(x) (((x) >> S_REV) & M_REV)
-
-#define LE_DB_CONFIG 0x19c04
-#define HASHEN 0x00100000U
-
-#define LE_DB_SERVER_INDEX 0x19c18
-#define LE_DB_ACT_CNT_IPV4 0x19c20
-#define LE_DB_ACT_CNT_IPV6 0x19c24
-
-#define LE_DB_INT_CAUSE 0x19c3c
-#define REQQPARERR 0x00010000U
-#define UNKNOWNCMD 0x00008000U
-#define PARITYERR 0x00000040U
-#define LIPMISS 0x00000020U
-#define LIP0 0x00000010U
-
-#define LE_DB_TID_HASHBASE 0x19df8
-
-#define NCSI_INT_CAUSE 0x1a0d8
-#define CIM_DM_PRTY_ERR 0x00000100U
-#define MPS_DM_PRTY_ERR 0x00000080U
-#define TXFIFO_PRTY_ERR 0x00000002U
-#define RXFIFO_PRTY_ERR 0x00000001U
-
-#define XGMAC_PORT_CFG2 0x1018
-#define PATEN 0x00040000U
-#define MAGICEN 0x00020000U
-#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
-#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+#define MPS_TRC_CFG_A 0x9800
+
+#define TRCFIFOEMPTY_S 4
+#define TRCFIFOEMPTY_V(x) ((x) << TRCFIFOEMPTY_S)
+#define TRCFIFOEMPTY_F TRCFIFOEMPTY_V(1U)
+
+#define TRCIGNOREDROPINPUT_S 3
+#define TRCIGNOREDROPINPUT_V(x) ((x) << TRCIGNOREDROPINPUT_S)
+#define TRCIGNOREDROPINPUT_F TRCIGNOREDROPINPUT_V(1U)
+
+#define TRCKEEPDUPLICATES_S 2
+#define TRCKEEPDUPLICATES_V(x) ((x) << TRCKEEPDUPLICATES_S)
+#define TRCKEEPDUPLICATES_F TRCKEEPDUPLICATES_V(1U)
+
+#define TRCEN_S 1
+#define TRCEN_V(x) ((x) << TRCEN_S)
+#define TRCEN_F TRCEN_V(1U)
+
+#define TRCMULTIFILTER_S 0
+#define TRCMULTIFILTER_V(x) ((x) << TRCMULTIFILTER_S)
+#define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U)
+
+#define MPS_TRC_RSS_CONTROL_A 0x9808
+#define MPS_T5_TRC_RSS_CONTROL_A 0xa00c
+
+#define RSSCONTROL_S 16
+#define RSSCONTROL_V(x) ((x) << RSSCONTROL_S)
+
+#define QUEUENUMBER_S 0
+#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
+
+#define TP_RSS_CONFIG_A 0x7df0
+
+#define TNL4TUPENIPV6_S 31
+#define TNL4TUPENIPV6_V(x) ((x) << TNL4TUPENIPV6_S)
+#define TNL4TUPENIPV6_F TNL4TUPENIPV6_V(1U)
+
+#define TNL2TUPENIPV6_S 30
+#define TNL2TUPENIPV6_V(x) ((x) << TNL2TUPENIPV6_S)
+#define TNL2TUPENIPV6_F TNL2TUPENIPV6_V(1U)
+
+#define TNL4TUPENIPV4_S 29
+#define TNL4TUPENIPV4_V(x) ((x) << TNL4TUPENIPV4_S)
+#define TNL4TUPENIPV4_F TNL4TUPENIPV4_V(1U)
+
+#define TNL2TUPENIPV4_S 28
+#define TNL2TUPENIPV4_V(x) ((x) << TNL2TUPENIPV4_S)
+#define TNL2TUPENIPV4_F TNL2TUPENIPV4_V(1U)
+
+#define TNLTCPSEL_S 27
+#define TNLTCPSEL_V(x) ((x) << TNLTCPSEL_S)
+#define TNLTCPSEL_F TNLTCPSEL_V(1U)
+
+#define TNLIP6SEL_S 26
+#define TNLIP6SEL_V(x) ((x) << TNLIP6SEL_S)
+#define TNLIP6SEL_F TNLIP6SEL_V(1U)
+
+#define TNLVRTSEL_S 25
+#define TNLVRTSEL_V(x) ((x) << TNLVRTSEL_S)
+#define TNLVRTSEL_F TNLVRTSEL_V(1U)
+
+#define TNLMAPEN_S 24
+#define TNLMAPEN_V(x) ((x) << TNLMAPEN_S)
+#define TNLMAPEN_F TNLMAPEN_V(1U)
+
+#define OFDHASHSAVE_S 19
+#define OFDHASHSAVE_V(x) ((x) << OFDHASHSAVE_S)
+#define OFDHASHSAVE_F OFDHASHSAVE_V(1U)
+
+#define OFDVRTSEL_S 18
+#define OFDVRTSEL_V(x) ((x) << OFDVRTSEL_S)
+#define OFDVRTSEL_F OFDVRTSEL_V(1U)
+
+#define OFDMAPEN_S 17
+#define OFDMAPEN_V(x) ((x) << OFDMAPEN_S)
+#define OFDMAPEN_F OFDMAPEN_V(1U)
+
+#define OFDLKPEN_S 16
+#define OFDLKPEN_V(x) ((x) << OFDLKPEN_S)
+#define OFDLKPEN_F OFDLKPEN_V(1U)
+
+#define SYN4TUPENIPV6_S 15
+#define SYN4TUPENIPV6_V(x) ((x) << SYN4TUPENIPV6_S)
+#define SYN4TUPENIPV6_F SYN4TUPENIPV6_V(1U)
+
+#define SYN2TUPENIPV6_S 14
+#define SYN2TUPENIPV6_V(x) ((x) << SYN2TUPENIPV6_S)
+#define SYN2TUPENIPV6_F SYN2TUPENIPV6_V(1U)
+
+#define SYN4TUPENIPV4_S 13
+#define SYN4TUPENIPV4_V(x) ((x) << SYN4TUPENIPV4_S)
+#define SYN4TUPENIPV4_F SYN4TUPENIPV4_V(1U)
+
+#define SYN2TUPENIPV4_S 12
+#define SYN2TUPENIPV4_V(x) ((x) << SYN2TUPENIPV4_S)
+#define SYN2TUPENIPV4_F SYN2TUPENIPV4_V(1U)
+
+#define SYNIP6SEL_S 11
+#define SYNIP6SEL_V(x) ((x) << SYNIP6SEL_S)
+#define SYNIP6SEL_F SYNIP6SEL_V(1U)
+
+#define SYNVRTSEL_S 10
+#define SYNVRTSEL_V(x) ((x) << SYNVRTSEL_S)
+#define SYNVRTSEL_F SYNVRTSEL_V(1U)
+
+#define SYNMAPEN_S 9
+#define SYNMAPEN_V(x) ((x) << SYNMAPEN_S)
+#define SYNMAPEN_F SYNMAPEN_V(1U)
+
+#define SYNLKPEN_S 8
+#define SYNLKPEN_V(x) ((x) << SYNLKPEN_S)
+#define SYNLKPEN_F SYNLKPEN_V(1U)
+
+#define CHANNELENABLE_S 7
+#define CHANNELENABLE_V(x) ((x) << CHANNELENABLE_S)
+#define CHANNELENABLE_F CHANNELENABLE_V(1U)
+
+#define PORTENABLE_S 6
+#define PORTENABLE_V(x) ((x) << PORTENABLE_S)
+#define PORTENABLE_F PORTENABLE_V(1U)
+
+#define TNLALLLOOKUP_S 5
+#define TNLALLLOOKUP_V(x) ((x) << TNLALLLOOKUP_S)
+#define TNLALLLOOKUP_F TNLALLLOOKUP_V(1U)
+
+#define VIRTENABLE_S 4
+#define VIRTENABLE_V(x) ((x) << VIRTENABLE_S)
+#define VIRTENABLE_F VIRTENABLE_V(1U)
+
+#define CONGESTIONENABLE_S 3
+#define CONGESTIONENABLE_V(x) ((x) << CONGESTIONENABLE_S)
+#define CONGESTIONENABLE_F CONGESTIONENABLE_V(1U)
+
+#define HASHTOEPLITZ_S 2
+#define HASHTOEPLITZ_V(x) ((x) << HASHTOEPLITZ_S)
+#define HASHTOEPLITZ_F HASHTOEPLITZ_V(1U)
+
+#define UDPENABLE_S 1
+#define UDPENABLE_V(x) ((x) << UDPENABLE_S)
+#define UDPENABLE_F UDPENABLE_V(1U)
+
+#define DISABLE_S 0
+#define DISABLE_V(x) ((x) << DISABLE_S)
+#define DISABLE_F DISABLE_V(1U)
+
+#define TP_RSS_CONFIG_TNL_A 0x7df4
+
+#define MASKSIZE_S 28
+#define MASKSIZE_M 0xfU
+#define MASKSIZE_V(x) ((x) << MASKSIZE_S)
+#define MASKSIZE_G(x) (((x) >> MASKSIZE_S) & MASKSIZE_M)
+
+#define MASKFILTER_S 16
+#define MASKFILTER_M 0x7ffU
+#define MASKFILTER_V(x) ((x) << MASKFILTER_S)
+#define MASKFILTER_G(x) (((x) >> MASKFILTER_S) & MASKFILTER_M)
+
+#define USEWIRECH_S 0
+#define USEWIRECH_V(x) ((x) << USEWIRECH_S)
+#define USEWIRECH_F USEWIRECH_V(1U)
+
+#define HASHALL_S 2
+#define HASHALL_V(x) ((x) << HASHALL_S)
+#define HASHALL_F HASHALL_V(1U)
+
+#define HASHETH_S 1
+#define HASHETH_V(x) ((x) << HASHETH_S)
+#define HASHETH_F HASHETH_V(1U)
+
+#define TP_RSS_CONFIG_OFD_A 0x7df8
+
+#define RRCPLMAPEN_S 20
+#define RRCPLMAPEN_V(x) ((x) << RRCPLMAPEN_S)
+#define RRCPLMAPEN_F RRCPLMAPEN_V(1U)
+
+#define RRCPLQUEWIDTH_S 16
+#define RRCPLQUEWIDTH_M 0xfU
+#define RRCPLQUEWIDTH_V(x) ((x) << RRCPLQUEWIDTH_S)
+#define RRCPLQUEWIDTH_G(x) (((x) >> RRCPLQUEWIDTH_S) & RRCPLQUEWIDTH_M)
+
+#define TP_RSS_CONFIG_SYN_A 0x7dfc
+#define TP_RSS_CONFIG_VRT_A 0x7e00
+
+#define VFRDRG_S 25
+#define VFRDRG_V(x) ((x) << VFRDRG_S)
+#define VFRDRG_F VFRDRG_V(1U)
+
+#define VFRDEN_S 24
+#define VFRDEN_V(x) ((x) << VFRDEN_S)
+#define VFRDEN_F VFRDEN_V(1U)
+
+#define VFPERREN_S 23
+#define VFPERREN_V(x) ((x) << VFPERREN_S)
+#define VFPERREN_F VFPERREN_V(1U)
+
+#define KEYPERREN_S 22
+#define KEYPERREN_V(x) ((x) << KEYPERREN_S)
+#define KEYPERREN_F KEYPERREN_V(1U)
+
+#define DISABLEVLAN_S 21
+#define DISABLEVLAN_V(x) ((x) << DISABLEVLAN_S)
+#define DISABLEVLAN_F DISABLEVLAN_V(1U)
+
+#define ENABLEUP0_S 20
+#define ENABLEUP0_V(x) ((x) << ENABLEUP0_S)
+#define ENABLEUP0_F ENABLEUP0_V(1U)
+
+#define HASHDELAY_S 16
+#define HASHDELAY_M 0xfU
+#define HASHDELAY_V(x) ((x) << HASHDELAY_S)
+#define HASHDELAY_G(x) (((x) >> HASHDELAY_S) & HASHDELAY_M)
+
+#define VFWRADDR_S 8
+#define VFWRADDR_M 0x7fU
+#define VFWRADDR_V(x) ((x) << VFWRADDR_S)
+#define VFWRADDR_G(x) (((x) >> VFWRADDR_S) & VFWRADDR_M)
+
+#define KEYMODE_S 6
+#define KEYMODE_M 0x3U
+#define KEYMODE_V(x) ((x) << KEYMODE_S)
+#define KEYMODE_G(x) (((x) >> KEYMODE_S) & KEYMODE_M)
+
+#define VFWREN_S 5
+#define VFWREN_V(x) ((x) << VFWREN_S)
+#define VFWREN_F VFWREN_V(1U)
+
+#define KEYWREN_S 4
+#define KEYWREN_V(x) ((x) << KEYWREN_S)
+#define KEYWREN_F KEYWREN_V(1U)
+
+#define KEYWRADDR_S 0
+#define KEYWRADDR_M 0xfU
+#define KEYWRADDR_V(x) ((x) << KEYWRADDR_S)
+#define KEYWRADDR_G(x) (((x) >> KEYWRADDR_S) & KEYWRADDR_M)
+
+#define KEYWRADDRX_S 30
+#define KEYWRADDRX_M 0x3U
+#define KEYWRADDRX_V(x) ((x) << KEYWRADDRX_S)
+#define KEYWRADDRX_G(x) (((x) >> KEYWRADDRX_S) & KEYWRADDRX_M)
+
+#define KEYEXTEND_S 26
+#define KEYEXTEND_V(x) ((x) << KEYEXTEND_S)
+#define KEYEXTEND_F KEYEXTEND_V(1U)
+
+#define LKPIDXSIZE_S 24
+#define LKPIDXSIZE_M 0x3U
+#define LKPIDXSIZE_V(x) ((x) << LKPIDXSIZE_S)
+#define LKPIDXSIZE_G(x) (((x) >> LKPIDXSIZE_S) & LKPIDXSIZE_M)
+
+#define TP_RSS_VFL_CONFIG_A 0x3a
+#define TP_RSS_VFH_CONFIG_A 0x3b
+
+#define ENABLEUDPHASH_S 31
+#define ENABLEUDPHASH_V(x) ((x) << ENABLEUDPHASH_S)
+#define ENABLEUDPHASH_F ENABLEUDPHASH_V(1U)
+
+#define VFUPEN_S 30
+#define VFUPEN_V(x) ((x) << VFUPEN_S)
+#define VFUPEN_F VFUPEN_V(1U)
+
+#define VFVLNEX_S 28
+#define VFVLNEX_V(x) ((x) << VFVLNEX_S)
+#define VFVLNEX_F VFVLNEX_V(1U)
+
+#define VFPRTEN_S 27
+#define VFPRTEN_V(x) ((x) << VFPRTEN_S)
+#define VFPRTEN_F VFPRTEN_V(1U)
+
+#define VFCHNEN_S 26
+#define VFCHNEN_V(x) ((x) << VFCHNEN_S)
+#define VFCHNEN_F VFCHNEN_V(1U)
+
+#define DEFAULTQUEUE_S 16
+#define DEFAULTQUEUE_M 0x3ffU
+#define DEFAULTQUEUE_G(x) (((x) >> DEFAULTQUEUE_S) & DEFAULTQUEUE_M)
+
+#define VFIP6TWOTUPEN_S 6
+#define VFIP6TWOTUPEN_V(x) ((x) << VFIP6TWOTUPEN_S)
+#define VFIP6TWOTUPEN_F VFIP6TWOTUPEN_V(1U)
+
+#define VFIP4FOURTUPEN_S 5
+#define VFIP4FOURTUPEN_V(x) ((x) << VFIP4FOURTUPEN_S)
+#define VFIP4FOURTUPEN_F VFIP4FOURTUPEN_V(1U)
+
+#define VFIP4TWOTUPEN_S 4
+#define VFIP4TWOTUPEN_V(x) ((x) << VFIP4TWOTUPEN_S)
+#define VFIP4TWOTUPEN_F VFIP4TWOTUPEN_V(1U)
+
+#define KEYINDEX_S 0
+#define KEYINDEX_M 0xfU
+#define KEYINDEX_G(x) (((x) >> KEYINDEX_S) & KEYINDEX_M)
+
+#define MAPENABLE_S 31
+#define MAPENABLE_V(x) ((x) << MAPENABLE_S)
+#define MAPENABLE_F MAPENABLE_V(1U)
+
+#define CHNENABLE_S 30
+#define CHNENABLE_V(x) ((x) << CHNENABLE_S)
+#define CHNENABLE_F CHNENABLE_V(1U)
+
+#define PRTENABLE_S 29
+#define PRTENABLE_V(x) ((x) << PRTENABLE_S)
+#define PRTENABLE_F PRTENABLE_V(1U)
+
+#define UDPFOURTUPEN_S 28
+#define UDPFOURTUPEN_V(x) ((x) << UDPFOURTUPEN_S)
+#define UDPFOURTUPEN_F UDPFOURTUPEN_V(1U)
+
+#define IP6FOURTUPEN_S 27
+#define IP6FOURTUPEN_V(x) ((x) << IP6FOURTUPEN_S)
+#define IP6FOURTUPEN_F IP6FOURTUPEN_V(1U)
-#define XGMAC_PORT_EPIO_DATA0 0x10c0
-#define XGMAC_PORT_EPIO_DATA1 0x10c4
-#define XGMAC_PORT_EPIO_DATA2 0x10c8
-#define XGMAC_PORT_EPIO_DATA3 0x10cc
-#define XGMAC_PORT_EPIO_OP 0x10d0
-#define EPIOWR 0x00000100U
-#define ADDRESS_MASK 0x000000ffU
-#define ADDRESS_SHIFT 0
-#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
+#define IP6TWOTUPEN_S 26
+#define IP6TWOTUPEN_V(x) ((x) << IP6TWOTUPEN_S)
+#define IP6TWOTUPEN_F IP6TWOTUPEN_V(1U)
-#define MAC_PORT_INT_CAUSE 0x8dc
-#define XGMAC_PORT_INT_CAUSE 0x10dc
+#define IP4FOURTUPEN_S 25
+#define IP4FOURTUPEN_V(x) ((x) << IP4FOURTUPEN_S)
+#define IP4FOURTUPEN_F IP4FOURTUPEN_V(1U)
-#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+#define IP4TWOTUPEN_S 24
+#define IP4TWOTUPEN_V(x) ((x) << IP4TWOTUPEN_S)
+#define IP4TWOTUPEN_F IP4TWOTUPEN_V(1U)
-#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+#define IVFWIDTH_S 20
+#define IVFWIDTH_M 0xfU
+#define IVFWIDTH_V(x) ((x) << IVFWIDTH_S)
+#define IVFWIDTH_G(x) (((x) >> IVFWIDTH_S) & IVFWIDTH_M)
-#define S_TX_MOD_QUEUE_REQ_MAP 0
-#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU
-#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+#define CH1DEFAULTQUEUE_S 10
+#define CH1DEFAULTQUEUE_M 0x3ffU
+#define CH1DEFAULTQUEUE_V(x) ((x) << CH1DEFAULTQUEUE_S)
+#define CH1DEFAULTQUEUE_G(x) (((x) >> CH1DEFAULTQUEUE_S) & CH1DEFAULTQUEUE_M)
-#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+#define CH0DEFAULTQUEUE_S 0
+#define CH0DEFAULTQUEUE_M 0x3ffU
+#define CH0DEFAULTQUEUE_V(x) ((x) << CH0DEFAULTQUEUE_S)
+#define CH0DEFAULTQUEUE_G(x) (((x) >> CH0DEFAULTQUEUE_S) & CH0DEFAULTQUEUE_M)
-#define S_TX_MODQ_WEIGHT3 24
-#define M_TX_MODQ_WEIGHT3 0xffU
-#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+#define VFLKPIDX_S 8
+#define VFLKPIDX_M 0xffU
+#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
-#define S_TX_MODQ_WEIGHT2 16
-#define M_TX_MODQ_WEIGHT2 0xffU
-#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+#define TP_RSS_CONFIG_CNG_A 0x7e04
+#define TP_RSS_SECRET_KEY0_A 0x40
+#define TP_RSS_PF0_CONFIG_A 0x30
+#define TP_RSS_PF_MAP_A 0x38
+#define TP_RSS_PF_MSK_A 0x39
-#define S_TX_MODQ_WEIGHT1 8
-#define M_TX_MODQ_WEIGHT1 0xffU
-#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+#define PF1LKPIDX_S 3
-#define S_TX_MODQ_WEIGHT0 0
-#define M_TX_MODQ_WEIGHT0 0xffU
-#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+#define PF0LKPIDX_M 0x7U
-#define A_TP_TX_SCHED_HDR 0x23
+#define PF1MSKSIZE_S 4
+#define PF1MSKSIZE_M 0xfU
-#define A_TP_TX_SCHED_FIFO 0x24
+#define CHNCOUNT3_S 31
+#define CHNCOUNT3_V(x) ((x) << CHNCOUNT3_S)
+#define CHNCOUNT3_F CHNCOUNT3_V(1U)
-#define A_TP_TX_SCHED_PCMD 0x25
+#define CHNCOUNT2_S 30
+#define CHNCOUNT2_V(x) ((x) << CHNCOUNT2_S)
+#define CHNCOUNT2_F CHNCOUNT2_V(1U)
-#define S_VNIC 11
-#define V_VNIC(x) ((x) << S_VNIC)
-#define F_VNIC V_VNIC(1U)
+#define CHNCOUNT1_S 29
+#define CHNCOUNT1_V(x) ((x) << CHNCOUNT1_S)
+#define CHNCOUNT1_F CHNCOUNT1_V(1U)
-#define S_FRAGMENTATION 9
-#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
-#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+#define CHNCOUNT0_S 28
+#define CHNCOUNT0_V(x) ((x) << CHNCOUNT0_S)
+#define CHNCOUNT0_F CHNCOUNT0_V(1U)
-#define S_MPSHITTYPE 8
-#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
-#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+#define CHNUNDFLOW3_S 27
+#define CHNUNDFLOW3_V(x) ((x) << CHNUNDFLOW3_S)
+#define CHNUNDFLOW3_F CHNUNDFLOW3_V(1U)
-#define S_MACMATCH 7
-#define V_MACMATCH(x) ((x) << S_MACMATCH)
-#define F_MACMATCH V_MACMATCH(1U)
+#define CHNUNDFLOW2_S 26
+#define CHNUNDFLOW2_V(x) ((x) << CHNUNDFLOW2_S)
+#define CHNUNDFLOW2_F CHNUNDFLOW2_V(1U)
-#define S_ETHERTYPE 6
-#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
-#define F_ETHERTYPE V_ETHERTYPE(1U)
+#define CHNUNDFLOW1_S 25
+#define CHNUNDFLOW1_V(x) ((x) << CHNUNDFLOW1_S)
+#define CHNUNDFLOW1_F CHNUNDFLOW1_V(1U)
-#define S_PROTOCOL 5
-#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
-#define F_PROTOCOL V_PROTOCOL(1U)
+#define CHNUNDFLOW0_S 24
+#define CHNUNDFLOW0_V(x) ((x) << CHNUNDFLOW0_S)
+#define CHNUNDFLOW0_F CHNUNDFLOW0_V(1U)
-#define S_TOS 4
-#define V_TOS(x) ((x) << S_TOS)
-#define F_TOS V_TOS(1U)
+#define RSTCHN3_S 19
+#define RSTCHN3_V(x) ((x) << RSTCHN3_S)
+#define RSTCHN3_F RSTCHN3_V(1U)
-#define S_VLAN 3
-#define V_VLAN(x) ((x) << S_VLAN)
-#define F_VLAN V_VLAN(1U)
+#define RSTCHN2_S 18
+#define RSTCHN2_V(x) ((x) << RSTCHN2_S)
+#define RSTCHN2_F RSTCHN2_V(1U)
-#define S_VNIC_ID 2
-#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
-#define F_VNIC_ID V_VNIC_ID(1U)
+#define RSTCHN1_S 17
+#define RSTCHN1_V(x) ((x) << RSTCHN1_S)
+#define RSTCHN1_F RSTCHN1_V(1U)
-#define S_PORT 1
-#define V_PORT(x) ((x) << S_PORT)
-#define F_PORT V_PORT(1U)
+#define RSTCHN0_S 16
+#define RSTCHN0_V(x) ((x) << RSTCHN0_S)
+#define RSTCHN0_F RSTCHN0_V(1U)
-#define S_FCOE 0
-#define V_FCOE(x) ((x) << S_FCOE)
-#define F_FCOE V_FCOE(1U)
+#define UPDVLD_S 15
+#define UPDVLD_V(x) ((x) << UPDVLD_S)
+#define UPDVLD_F UPDVLD_V(1U)
+
+#define XOFF_S 14
+#define XOFF_V(x) ((x) << XOFF_S)
+#define XOFF_F XOFF_V(1U)
+
+#define UPDCHN3_S 13
+#define UPDCHN3_V(x) ((x) << UPDCHN3_S)
+#define UPDCHN3_F UPDCHN3_V(1U)
+
+#define UPDCHN2_S 12
+#define UPDCHN2_V(x) ((x) << UPDCHN2_S)
+#define UPDCHN2_F UPDCHN2_V(1U)
+
+#define UPDCHN1_S 11
+#define UPDCHN1_V(x) ((x) << UPDCHN1_S)
+#define UPDCHN1_F UPDCHN1_V(1U)
+
+#define UPDCHN0_S 10
+#define UPDCHN0_V(x) ((x) << UPDCHN0_S)
+#define UPDCHN0_F UPDCHN0_V(1U)
+
+#define QUEUE_S 0
+#define QUEUE_M 0x3ffU
+#define QUEUE_V(x) ((x) << QUEUE_S)
+#define QUEUE_G(x) (((x) >> QUEUE_S) & QUEUE_M)
+
+#define MPS_TRC_INT_CAUSE_A 0x985c
+
+#define MISCPERR_S 8
+#define MISCPERR_V(x) ((x) << MISCPERR_S)
+#define MISCPERR_F MISCPERR_V(1U)
+
+#define PKTFIFO_S 4
+#define PKTFIFO_M 0xfU
+#define PKTFIFO_V(x) ((x) << PKTFIFO_S)
+
+#define FILTMEM_S 0
+#define FILTMEM_M 0xfU
+#define FILTMEM_V(x) ((x) << FILTMEM_S)
+
+#define MPS_CLS_INT_CAUSE_A 0xd028
+
+#define HASHSRAM_S 2
+#define HASHSRAM_V(x) ((x) << HASHSRAM_S)
+#define HASHSRAM_F HASHSRAM_V(1U)
+
+#define MATCHTCAM_S 1
+#define MATCHTCAM_V(x) ((x) << MATCHTCAM_S)
+#define MATCHTCAM_F MATCHTCAM_V(1U)
+
+#define MATCHSRAM_S 0
+#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
+#define MATCHSRAM_F MATCHSRAM_V(1U)
+
+#define MPS_RX_PERR_INT_CAUSE_A 0x11074
+
+#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_X_L_A 0xf008
+
+#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
+
+#define MPS_CLS_TCAM_X_L(idx) (MPS_CLS_TCAM_X_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
+
+#define MPS_CLS_SRAM_L_A 0xe000
+#define MPS_CLS_SRAM_H_A 0xe004
+
+#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+
+#define MPS_CLS_SRAM_H(idx) (MPS_CLS_SRAM_H_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_H_INSTANCES 336
+
+#define MULTILISTEN0_S 25
+
+#define REPLICATE_S 11
+#define REPLICATE_V(x) ((x) << REPLICATE_S)
+#define REPLICATE_F REPLICATE_V(1U)
+
+#define PF_S 8
+#define PF_M 0x7U
+#define PF_G(x) (((x) >> PF_S) & PF_M)
+
+#define VF_VALID_S 7
+#define VF_VALID_V(x) ((x) << VF_VALID_S)
+#define VF_VALID_F VF_VALID_V(1U)
+
+#define VF_S 0
+#define VF_M 0x7fU
+#define VF_G(x) (((x) >> VF_S) & VF_M)
+
+#define SRAM_PRIO3_S 22
+#define SRAM_PRIO3_M 0x7U
+#define SRAM_PRIO3_G(x) (((x) >> SRAM_PRIO3_S) & SRAM_PRIO3_M)
+
+#define SRAM_PRIO2_S 19
+#define SRAM_PRIO2_M 0x7U
+#define SRAM_PRIO2_G(x) (((x) >> SRAM_PRIO2_S) & SRAM_PRIO2_M)
+
+#define SRAM_PRIO1_S 16
+#define SRAM_PRIO1_M 0x7U
+#define SRAM_PRIO1_G(x) (((x) >> SRAM_PRIO1_S) & SRAM_PRIO1_M)
+
+#define SRAM_PRIO0_S 13
+#define SRAM_PRIO0_M 0x7U
+#define SRAM_PRIO0_G(x) (((x) >> SRAM_PRIO0_S) & SRAM_PRIO0_M)
+
+#define SRAM_VLD_S 12
+#define SRAM_VLD_V(x) ((x) << SRAM_VLD_S)
+#define SRAM_VLD_F SRAM_VLD_V(1U)
+
+#define PORTMAP_S 0
+#define PORTMAP_M 0xfU
+#define PORTMAP_G(x) (((x) >> PORTMAP_S) & PORTMAP_M)
+
+#define CPL_INTR_CAUSE_A 0x19054
+
+#define CIM_OP_MAP_PERR_S 5
+#define CIM_OP_MAP_PERR_V(x) ((x) << CIM_OP_MAP_PERR_S)
+#define CIM_OP_MAP_PERR_F CIM_OP_MAP_PERR_V(1U)
+
+#define CIM_OVFL_ERROR_S 4
+#define CIM_OVFL_ERROR_V(x) ((x) << CIM_OVFL_ERROR_S)
+#define CIM_OVFL_ERROR_F CIM_OVFL_ERROR_V(1U)
+
+#define TP_FRAMING_ERROR_S 3
+#define TP_FRAMING_ERROR_V(x) ((x) << TP_FRAMING_ERROR_S)
+#define TP_FRAMING_ERROR_F TP_FRAMING_ERROR_V(1U)
+
+#define SGE_FRAMING_ERROR_S 2
+#define SGE_FRAMING_ERROR_V(x) ((x) << SGE_FRAMING_ERROR_S)
+#define SGE_FRAMING_ERROR_F SGE_FRAMING_ERROR_V(1U)
+
+#define CIM_FRAMING_ERROR_S 1
+#define CIM_FRAMING_ERROR_V(x) ((x) << CIM_FRAMING_ERROR_S)
+#define CIM_FRAMING_ERROR_F CIM_FRAMING_ERROR_V(1U)
+
+#define ZERO_SWITCH_ERROR_S 0
+#define ZERO_SWITCH_ERROR_V(x) ((x) << ZERO_SWITCH_ERROR_S)
+#define ZERO_SWITCH_ERROR_F ZERO_SWITCH_ERROR_V(1U)
+
+#define SMB_INT_CAUSE_A 0x19090
+
+#define MSTTXFIFOPARINT_S 21
+#define MSTTXFIFOPARINT_V(x) ((x) << MSTTXFIFOPARINT_S)
+#define MSTTXFIFOPARINT_F MSTTXFIFOPARINT_V(1U)
+
+#define MSTRXFIFOPARINT_S 20
+#define MSTRXFIFOPARINT_V(x) ((x) << MSTRXFIFOPARINT_S)
+#define MSTRXFIFOPARINT_F MSTRXFIFOPARINT_V(1U)
+
+#define SLVFIFOPARINT_S 19
+#define SLVFIFOPARINT_V(x) ((x) << SLVFIFOPARINT_S)
+#define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U)
+
+#define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_TAGMASK_A 0x19164
+#define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_LA_CTL_A 0x1923c
+#define ULP_RX_LA_RDPTR_A 0x19240
+#define ULP_RX_LA_RDDATA_A 0x19244
+#define ULP_RX_LA_WRPTR_A 0x19248
+
+#define HPZ3_S 24
+#define HPZ3_V(x) ((x) << HPZ3_S)
+
+#define HPZ2_S 16
+#define HPZ2_V(x) ((x) << HPZ2_S)
+
+#define HPZ1_S 8
+#define HPZ1_V(x) ((x) << HPZ1_S)
+
+#define HPZ0_S 0
+#define HPZ0_V(x) ((x) << HPZ0_S)
+
+#define ULP_RX_TDDP_PSZ_A 0x19178
+
+/* registers for module SF */
+#define SF_DATA_A 0x193f8
+#define SF_OP_A 0x193fc
+
+#define SF_BUSY_S 31
+#define SF_BUSY_V(x) ((x) << SF_BUSY_S)
+#define SF_BUSY_F SF_BUSY_V(1U)
+
+#define SF_LOCK_S 4
+#define SF_LOCK_V(x) ((x) << SF_LOCK_S)
+#define SF_LOCK_F SF_LOCK_V(1U)
+
+#define SF_CONT_S 3
+#define SF_CONT_V(x) ((x) << SF_CONT_S)
+#define SF_CONT_F SF_CONT_V(1U)
+
+#define BYTECNT_S 1
+#define BYTECNT_V(x) ((x) << BYTECNT_S)
+
+#define OP_S 0
+#define OP_V(x) ((x) << OP_S)
+#define OP_F OP_V(1U)
+
+#define PL_PF_INT_CAUSE_A 0x3c0
+
+#define PFSW_S 3
+#define PFSW_V(x) ((x) << PFSW_S)
+#define PFSW_F PFSW_V(1U)
+
+#define PFCIM_S 1
+#define PFCIM_V(x) ((x) << PFCIM_S)
+#define PFCIM_F PFCIM_V(1U)
+
+#define PL_PF_INT_ENABLE_A 0x3c4
+#define PL_PF_CTL_A 0x3c8
+
+#define PL_WHOAMI_A 0x19400
+
+#define SOURCEPF_S 8
+#define SOURCEPF_M 0x7U
+#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
+
+#define PL_INT_CAUSE_A 0x1940c
+
+#define ULP_TX_S 27
+#define ULP_TX_V(x) ((x) << ULP_TX_S)
+#define ULP_TX_F ULP_TX_V(1U)
+
+#define SGE_S 26
+#define SGE_V(x) ((x) << SGE_S)
+#define SGE_F SGE_V(1U)
+
+#define CPL_SWITCH_S 24
+#define CPL_SWITCH_V(x) ((x) << CPL_SWITCH_S)
+#define CPL_SWITCH_F CPL_SWITCH_V(1U)
+
+#define ULP_RX_S 23
+#define ULP_RX_V(x) ((x) << ULP_RX_S)
+#define ULP_RX_F ULP_RX_V(1U)
+
+#define PM_RX_S 22
+#define PM_RX_V(x) ((x) << PM_RX_S)
+#define PM_RX_F PM_RX_V(1U)
+
+#define PM_TX_S 21
+#define PM_TX_V(x) ((x) << PM_TX_S)
+#define PM_TX_F PM_TX_V(1U)
+
+#define MA_S 20
+#define MA_V(x) ((x) << MA_S)
+#define MA_F MA_V(1U)
+
+#define TP_S 19
+#define TP_V(x) ((x) << TP_S)
+#define TP_F TP_V(1U)
+
+#define LE_S 18
+#define LE_V(x) ((x) << LE_S)
+#define LE_F LE_V(1U)
+
+#define EDC1_S 17
+#define EDC1_V(x) ((x) << EDC1_S)
+#define EDC1_F EDC1_V(1U)
+
+#define EDC0_S 16
+#define EDC0_V(x) ((x) << EDC0_S)
+#define EDC0_F EDC0_V(1U)
+
+#define MC_S 15
+#define MC_V(x) ((x) << MC_S)
+#define MC_F MC_V(1U)
+
+#define PCIE_S 14
+#define PCIE_V(x) ((x) << PCIE_S)
+#define PCIE_F PCIE_V(1U)
+
+#define XGMAC_KR1_S 12
+#define XGMAC_KR1_V(x) ((x) << XGMAC_KR1_S)
+#define XGMAC_KR1_F XGMAC_KR1_V(1U)
+
+#define XGMAC_KR0_S 11
+#define XGMAC_KR0_V(x) ((x) << XGMAC_KR0_S)
+#define XGMAC_KR0_F XGMAC_KR0_V(1U)
+
+#define XGMAC1_S 10
+#define XGMAC1_V(x) ((x) << XGMAC1_S)
+#define XGMAC1_F XGMAC1_V(1U)
+
+#define XGMAC0_S 9
+#define XGMAC0_V(x) ((x) << XGMAC0_S)
+#define XGMAC0_F XGMAC0_V(1U)
+
+#define SMB_S 8
+#define SMB_V(x) ((x) << SMB_S)
+#define SMB_F SMB_V(1U)
+
+#define SF_S 7
+#define SF_V(x) ((x) << SF_S)
+#define SF_F SF_V(1U)
+
+#define PL_S 6
+#define PL_V(x) ((x) << PL_S)
+#define PL_F PL_V(1U)
+
+#define NCSI_S 5
+#define NCSI_V(x) ((x) << NCSI_S)
+#define NCSI_F NCSI_V(1U)
+
+#define MPS_S 4
+#define MPS_V(x) ((x) << MPS_S)
+#define MPS_F MPS_V(1U)
+
+#define CIM_S 0
+#define CIM_V(x) ((x) << CIM_S)
+#define CIM_F CIM_V(1U)
+
+#define MC1_S 31
+
+#define PL_INT_ENABLE_A 0x19410
+#define PL_INT_MAP0_A 0x19414
+#define PL_RST_A 0x19428
+
+#define PIORST_S 1
+#define PIORST_V(x) ((x) << PIORST_S)
+#define PIORST_F PIORST_V(1U)
+
+#define PIORSTMODE_S 0
+#define PIORSTMODE_V(x) ((x) << PIORSTMODE_S)
+#define PIORSTMODE_F PIORSTMODE_V(1U)
+
+#define PL_PL_INT_CAUSE_A 0x19430
+
+#define FATALPERR_S 4
+#define FATALPERR_V(x) ((x) << FATALPERR_S)
+#define FATALPERR_F FATALPERR_V(1U)
+
+#define PERRVFID_S 0
+#define PERRVFID_V(x) ((x) << PERRVFID_S)
+#define PERRVFID_F PERRVFID_V(1U)
+
+#define PL_REV_A 0x1943c
+
+#define REV_S 0
+#define REV_M 0xfU
+#define REV_V(x) ((x) << REV_S)
+#define REV_G(x) (((x) >> REV_S) & REV_M)
+
+#define LE_DB_INT_CAUSE_A 0x19c3c
+
+#define REQQPARERR_S 16
+#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
+#define REQQPARERR_F REQQPARERR_V(1U)
+
+#define UNKNOWNCMD_S 15
+#define UNKNOWNCMD_V(x) ((x) << UNKNOWNCMD_S)
+#define UNKNOWNCMD_F UNKNOWNCMD_V(1U)
+
+#define PARITYERR_S 6
+#define PARITYERR_V(x) ((x) << PARITYERR_S)
+#define PARITYERR_F PARITYERR_V(1U)
+
+#define LIPMISS_S 5
+#define LIPMISS_V(x) ((x) << LIPMISS_S)
+#define LIPMISS_F LIPMISS_V(1U)
+
+#define LIP0_S 4
+#define LIP0_V(x) ((x) << LIP0_S)
+#define LIP0_F LIP0_V(1U)
+
+#define NCSI_INT_CAUSE_A 0x1a0d8
+
+#define CIM_DM_PRTY_ERR_S 8
+#define CIM_DM_PRTY_ERR_V(x) ((x) << CIM_DM_PRTY_ERR_S)
+#define CIM_DM_PRTY_ERR_F CIM_DM_PRTY_ERR_V(1U)
+
+#define MPS_DM_PRTY_ERR_S 7
+#define MPS_DM_PRTY_ERR_V(x) ((x) << MPS_DM_PRTY_ERR_S)
+#define MPS_DM_PRTY_ERR_F MPS_DM_PRTY_ERR_V(1U)
+
+#define TXFIFO_PRTY_ERR_S 1
+#define TXFIFO_PRTY_ERR_V(x) ((x) << TXFIFO_PRTY_ERR_S)
+#define TXFIFO_PRTY_ERR_F TXFIFO_PRTY_ERR_V(1U)
+
+#define RXFIFO_PRTY_ERR_S 0
+#define RXFIFO_PRTY_ERR_V(x) ((x) << RXFIFO_PRTY_ERR_S)
+#define RXFIFO_PRTY_ERR_F RXFIFO_PRTY_ERR_V(1U)
+
+#define XGMAC_PORT_CFG2_A 0x1018
+
+#define PATEN_S 18
+#define PATEN_V(x) ((x) << PATEN_S)
+#define PATEN_F PATEN_V(1U)
+
+#define MAGICEN_S 17
+#define MAGICEN_V(x) ((x) << MAGICEN_S)
+#define MAGICEN_F MAGICEN_V(1U)
+
+#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define XGMAC_PORT_EPIO_DATA0_A 0x10c0
+#define XGMAC_PORT_EPIO_DATA1_A 0x10c4
+#define XGMAC_PORT_EPIO_DATA2_A 0x10c8
+#define XGMAC_PORT_EPIO_DATA3_A 0x10cc
+#define XGMAC_PORT_EPIO_OP_A 0x10d0
+
+#define EPIOWR_S 8
+#define EPIOWR_V(x) ((x) << EPIOWR_S)
+#define EPIOWR_F EPIOWR_V(1U)
+
+#define ADDRESS_S 0
+#define ADDRESS_V(x) ((x) << ADDRESS_S)
+
+#define MAC_PORT_INT_CAUSE_A 0x8dc
+#define XGMAC_PORT_INT_CAUSE_A 0x10dc
+
+#define TP_TX_MOD_QUEUE_REQ_MAP_A 0x7e28
+
+#define TP_TX_MOD_QUEUE_WEIGHT0_A 0x7e30
+#define TP_TX_MOD_CHANNEL_WEIGHT_A 0x7e34
+
+#define TX_MOD_QUEUE_REQ_MAP_S 0
+#define TX_MOD_QUEUE_REQ_MAP_V(x) ((x) << TX_MOD_QUEUE_REQ_MAP_S)
+
+#define TX_MODQ_WEIGHT3_S 24
+#define TX_MODQ_WEIGHT3_V(x) ((x) << TX_MODQ_WEIGHT3_S)
+
+#define TX_MODQ_WEIGHT2_S 16
+#define TX_MODQ_WEIGHT2_V(x) ((x) << TX_MODQ_WEIGHT2_S)
+
+#define TX_MODQ_WEIGHT1_S 8
+#define TX_MODQ_WEIGHT1_V(x) ((x) << TX_MODQ_WEIGHT1_S)
+
+#define TX_MODQ_WEIGHT0_S 0
+#define TX_MODQ_WEIGHT0_V(x) ((x) << TX_MODQ_WEIGHT0_S)
+
+#define TP_TX_SCHED_HDR_A 0x23
+#define TP_TX_SCHED_FIFO_A 0x24
+#define TP_TX_SCHED_PCMD_A 0x25
#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@@ -1329,62 +2566,149 @@
#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
-#define MC_P_BIST_CMD 0x41400
-#define MC_P_BIST_CMD_ADDR 0x41404
-#define MC_P_BIST_CMD_LEN 0x41408
-#define MC_P_BIST_DATA_PATTERN 0x4140c
-#define MC_P_BIST_STATUS_RDATA 0x41488
-#define EDC_T50_BASE_ADDR 0x50000
-#define EDC_H_BIST_CMD 0x50004
-#define EDC_H_BIST_CMD_ADDR 0x50008
-#define EDC_H_BIST_CMD_LEN 0x5000c
-#define EDC_H_BIST_DATA_PATTERN 0x50010
-#define EDC_H_BIST_STATUS_RDATA 0x50028
-
-#define EDC_T51_BASE_ADDR 0x50800
+#define MC_P_BIST_CMD_A 0x41400
+#define MC_P_BIST_CMD_ADDR_A 0x41404
+#define MC_P_BIST_CMD_LEN_A 0x41408
+#define MC_P_BIST_DATA_PATTERN_A 0x4140c
+#define MC_P_BIST_STATUS_RDATA_A 0x41488
+
+#define EDC_T50_BASE_ADDR 0x50000
+
+#define EDC_H_BIST_CMD_A 0x50004
+#define EDC_H_BIST_CMD_ADDR_A 0x50008
+#define EDC_H_BIST_CMD_LEN_A 0x5000c
+#define EDC_H_BIST_DATA_PATTERN_A 0x50010
+#define EDC_H_BIST_STATUS_RDATA_A 0x50028
+
+#define EDC_T51_BASE_ADDR 0x50800
+
#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
-#define A_PL_VF_REV 0x4
-#define A_PL_VF_WHOAMI 0x0
-#define A_PL_VF_REVISION 0x8
+#define PL_VF_REV_A 0x4
+#define PL_VF_WHOAMI_A 0x0
+#define PL_VF_REVISION_A 0x8
-#define S_CHIPID 4
-#define M_CHIPID 0xfU
-#define V_CHIPID(x) ((x) << S_CHIPID)
-#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+/* registers for module CIM */
+#define CIM_HOST_ACC_CTRL_A 0x7b50
+#define CIM_HOST_ACC_DATA_A 0x7b54
+#define UP_UP_DBG_LA_CFG_A 0x140
+#define UP_UP_DBG_LA_DATA_A 0x144
-/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
- * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
- * selects for a particular field being present. These fields, when present
- * in the Compressed Filter Tuple, have the following widths in bits.
- */
-#define W_FT_FCOE 1
-#define W_FT_PORT 3
-#define W_FT_VNIC_ID 17
-#define W_FT_VLAN 17
-#define W_FT_TOS 8
-#define W_FT_PROTOCOL 8
-#define W_FT_ETHERTYPE 16
-#define W_FT_MACMATCH 9
-#define W_FT_MPSHITTYPE 3
-#define W_FT_FRAGMENTATION 1
-
-/* Some of the Compressed Filter Tuple fields have internal structure. These
- * bit shifts/masks describe those structures. All shifts are relative to the
- * base position of the fields within the Compressed Filter Tuple
- */
-#define S_FT_VLAN_VLD 16
-#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
-#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
+#define HOSTBUSY_S 17
+#define HOSTBUSY_V(x) ((x) << HOSTBUSY_S)
+#define HOSTBUSY_F HOSTBUSY_V(1U)
+
+#define HOSTWRITE_S 16
+#define HOSTWRITE_V(x) ((x) << HOSTWRITE_S)
+#define HOSTWRITE_F HOSTWRITE_V(1U)
+
+#define CIM_IBQ_DBG_CFG_A 0x7b60
+
+#define IBQDBGADDR_S 16
+#define IBQDBGADDR_M 0xfffU
+#define IBQDBGADDR_V(x) ((x) << IBQDBGADDR_S)
+#define IBQDBGADDR_G(x) (((x) >> IBQDBGADDR_S) & IBQDBGADDR_M)
+
+#define IBQDBGBUSY_S 1
+#define IBQDBGBUSY_V(x) ((x) << IBQDBGBUSY_S)
+#define IBQDBGBUSY_F IBQDBGBUSY_V(1U)
+
+#define IBQDBGEN_S 0
+#define IBQDBGEN_V(x) ((x) << IBQDBGEN_S)
+#define IBQDBGEN_F IBQDBGEN_V(1U)
+
+#define CIM_OBQ_DBG_CFG_A 0x7b64
+
+#define OBQDBGADDR_S 16
+#define OBQDBGADDR_M 0xfffU
+#define OBQDBGADDR_V(x) ((x) << OBQDBGADDR_S)
+#define OBQDBGADDR_G(x) (((x) >> OBQDBGADDR_S) & OBQDBGADDR_M)
+
+#define OBQDBGBUSY_S 1
+#define OBQDBGBUSY_V(x) ((x) << OBQDBGBUSY_S)
+#define OBQDBGBUSY_F OBQDBGBUSY_V(1U)
+
+#define OBQDBGEN_S 0
+#define OBQDBGEN_V(x) ((x) << OBQDBGEN_S)
+#define OBQDBGEN_F OBQDBGEN_V(1U)
+
+#define CIM_IBQ_DBG_DATA_A 0x7b68
+#define CIM_OBQ_DBG_DATA_A 0x7b6c
+
+#define UPDBGLARDEN_S 1
+#define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S)
+#define UPDBGLARDEN_F UPDBGLARDEN_V(1U)
+
+#define UPDBGLAEN_S 0
+#define UPDBGLAEN_V(x) ((x) << UPDBGLAEN_S)
+#define UPDBGLAEN_F UPDBGLAEN_V(1U)
+
+#define UPDBGLARDPTR_S 2
+#define UPDBGLARDPTR_M 0xfffU
+#define UPDBGLARDPTR_V(x) ((x) << UPDBGLARDPTR_S)
+
+#define UPDBGLAWRPTR_S 16
+#define UPDBGLAWRPTR_M 0xfffU
+#define UPDBGLAWRPTR_G(x) (((x) >> UPDBGLAWRPTR_S) & UPDBGLAWRPTR_M)
+
+#define UPDBGLACAPTPCONLY_S 30
+#define UPDBGLACAPTPCONLY_V(x) ((x) << UPDBGLACAPTPCONLY_S)
+#define UPDBGLACAPTPCONLY_F UPDBGLACAPTPCONLY_V(1U)
+
+#define CIM_QUEUE_CONFIG_REF_A 0x7b48
+#define CIM_QUEUE_CONFIG_CTRL_A 0x7b4c
+
+#define CIMQSIZE_S 24
+#define CIMQSIZE_M 0x3fU
+#define CIMQSIZE_G(x) (((x) >> CIMQSIZE_S) & CIMQSIZE_M)
+
+#define CIMQBASE_S 16
+#define CIMQBASE_M 0x3fU
+#define CIMQBASE_G(x) (((x) >> CIMQBASE_S) & CIMQBASE_M)
+
+#define QUEFULLTHRSH_S 0
+#define QUEFULLTHRSH_M 0x1ffU
+#define QUEFULLTHRSH_G(x) (((x) >> QUEFULLTHRSH_S) & QUEFULLTHRSH_M)
+
+#define UP_IBQ_0_RDADDR_A 0x10
+#define UP_IBQ_0_SHADOW_RDADDR_A 0x280
+#define UP_OBQ_0_REALADDR_A 0x104
+#define UP_OBQ_0_SHADOW_REALADDR_A 0x394
+
+#define IBQRDADDR_S 0
+#define IBQRDADDR_M 0x1fffU
+#define IBQRDADDR_G(x) (((x) >> IBQRDADDR_S) & IBQRDADDR_M)
+
+#define IBQWRADDR_S 0
+#define IBQWRADDR_M 0x1fffU
+#define IBQWRADDR_G(x) (((x) >> IBQWRADDR_S) & IBQWRADDR_M)
+
+#define QUERDADDR_S 0
+#define QUERDADDR_M 0x7fffU
+#define QUERDADDR_G(x) (((x) >> QUERDADDR_S) & QUERDADDR_M)
+
+#define QUEREMFLITS_S 0
+#define QUEREMFLITS_M 0x7ffU
+#define QUEREMFLITS_G(x) (((x) >> QUEREMFLITS_S) & QUEREMFLITS_M)
+
+#define QUEEOPCNT_S 16
+#define QUEEOPCNT_M 0xfffU
+#define QUEEOPCNT_G(x) (((x) >> QUEEOPCNT_S) & QUEEOPCNT_M)
+
+#define QUESOPCNT_S 0
+#define QUESOPCNT_M 0xfffU
+#define QUESOPCNT_G(x) (((x) >> QUESOPCNT_S) & QUESOPCNT_M)
-#define S_FT_VNID_ID_VF 0
-#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
+#define OBQSELECT_S 4
+#define OBQSELECT_V(x) ((x) << OBQSELECT_S)
+#define OBQSELECT_F OBQSELECT_V(1U)
-#define S_FT_VNID_ID_PF 7
-#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
+#define IBQSELECT_S 3
+#define IBQSELECT_V(x) ((x) << IBQSELECT_S)
+#define IBQSELECT_F IBQSELECT_V(1U)
-#define S_FT_VNID_ID_VLD 16
-#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
+#define QUENUMSELECT_S 0
+#define QUENUMSELECT_V(x) ((x) << QUENUMSELECT_S)
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
new file mode 100644
index 000000000000..19b2dcf6acde
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -0,0 +1,124 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_VALUES_H__
+#define __T4_VALUES_H__
+
+/* This file contains definitions for various T4 register value hardware
+ * constants. The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior. For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ */
+
+/* SGE register field values.
+ */
+
+/* CONTROL1 register */
+#define RXPKTCPLMODE_SPLIT_X 1
+
+#define INGPCIEBOUNDARY_SHIFT_X 5
+#define INGPCIEBOUNDARY_32B_X 0
+
+#define INGPADBOUNDARY_SHIFT_X 5
+
+/* CONTROL2 register */
+#define INGPACKBOUNDARY_SHIFT_X 5
+#define INGPACKBOUNDARY_16B_X 0
+
+/* GTS register */
+#define SGE_TIMERREGS 6
+#define TIMERREG_COUNTER0_X 0
+
+/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64. For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE 128
+#define SGE_UDB_KDOORBELL 8
+#define SGE_UDB_GTS 20
+#define SGE_UDB_WCDOORBELL 64
+
+/* CIM register field values.
+ */
+#define X_MBOWNER_FW 1
+#define X_MBOWNER_PL 2
+
+/* PCI-E definitions */
+#define WINDOW_SHIFT_X 10
+#define PCIEOFST_SHIFT_X 10
+
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define FT_FCOE_W 1
+#define FT_PORT_W 3
+#define FT_VNIC_ID_W 17
+#define FT_VLAN_W 17
+#define FT_TOS_W 8
+#define FT_PROTOCOL_W 8
+#define FT_ETHERTYPE_W 16
+#define FT_MACMATCH_W 9
+#define FT_MPSHITTYPE_W 3
+#define FT_FRAGMENTATION_W 1
+
+/* Some of the Compressed Filter Tuple fields have internal structure. These
+ * bit shifts/masks describe those structures. All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define FT_VLAN_VLD_S 16
+#define FT_VLAN_VLD_V(x) ((x) << FT_VLAN_VLD_S)
+#define FT_VLAN_VLD_F FT_VLAN_VLD_V(1U)
+
+#define FT_VNID_ID_VF_S 0
+#define FT_VNID_ID_VF_V(x) ((x) << FT_VNID_ID_VF_S)
+
+#define FT_VNID_ID_PF_S 7
+#define FT_VNID_ID_PF_V(x) ((x) << FT_VNID_ID_PF_S)
+
+#define FT_VNID_ID_VLD_S 16
+#define FT_VNID_ID_VLD_V(x) ((x) << FT_VNID_ID_VLD_S)
+
+#endif /* __T4_VALUES_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7c0aec85137a..9b353a88cbda 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -673,6 +673,7 @@ enum fw_cmd_opcodes {
FW_RSS_IND_TBL_CMD = 0x20,
FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_DEVLOG_CMD = 0x25,
FW_CLIP_CMD = 0x28,
FW_LASTC2E_CMD = 0x40,
FW_ERROR_CMD = 0x80,
@@ -1058,9 +1059,11 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_DIAG = 0x11,
FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
+ FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
};
/*
@@ -1120,6 +1123,16 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
};
+enum fw_params_param_dev_diag {
+ FW_PARAM_DEV_DIAG_TMP = 0x00,
+ FW_PARAM_DEV_DIAG_VDD = 0x01,
+};
+
+enum fw_params_param_dev_fwcache {
+ FW_PARAM_DEV_FWCACHE_FLUSH = 0x00,
+ FW_PARAM_DEV_FWCACHE_FLUSHINV = 0x01,
+};
+
#define FW_PARAMS_MNEM_S 24
#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S)
@@ -3005,21 +3018,29 @@ enum fw_hdr_chip {
#define FW_HDR_FW_VER_MAJOR_S 24
#define FW_HDR_FW_VER_MAJOR_M 0xff
+#define FW_HDR_FW_VER_MAJOR_V(x) \
+ ((x) << FW_HDR_FW_VER_MAJOR_S)
#define FW_HDR_FW_VER_MAJOR_G(x) \
(((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M)
#define FW_HDR_FW_VER_MINOR_S 16
#define FW_HDR_FW_VER_MINOR_M 0xff
+#define FW_HDR_FW_VER_MINOR_V(x) \
+ ((x) << FW_HDR_FW_VER_MINOR_S)
#define FW_HDR_FW_VER_MINOR_G(x) \
(((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M)
#define FW_HDR_FW_VER_MICRO_S 8
#define FW_HDR_FW_VER_MICRO_M 0xff
+#define FW_HDR_FW_VER_MICRO_V(x) \
+ ((x) << FW_HDR_FW_VER_MICRO_S)
#define FW_HDR_FW_VER_MICRO_G(x) \
(((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M)
#define FW_HDR_FW_VER_BUILD_S 0
#define FW_HDR_FW_VER_BUILD_M 0xff
+#define FW_HDR_FW_VER_BUILD_V(x) \
+ ((x) << FW_HDR_FW_VER_BUILD_S)
#define FW_HDR_FW_VER_BUILD_G(x) \
(((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M)
@@ -3038,4 +3059,84 @@ enum fw_hdr_flags {
FW_HDR_FLAGS_RESET_HALT = 0x00000001,
};
+/* length of the formatting string */
+#define FW_DEVLOG_FMT_LEN 192
+
+/* maximum number of the formatting string parameters */
+#define FW_DEVLOG_FMT_PARAMS_NUM 8
+
+/* priority levels */
+enum fw_devlog_level {
+ FW_DEVLOG_LEVEL_EMERG = 0x0,
+ FW_DEVLOG_LEVEL_CRIT = 0x1,
+ FW_DEVLOG_LEVEL_ERR = 0x2,
+ FW_DEVLOG_LEVEL_NOTICE = 0x3,
+ FW_DEVLOG_LEVEL_INFO = 0x4,
+ FW_DEVLOG_LEVEL_DEBUG = 0x5,
+ FW_DEVLOG_LEVEL_MAX = 0x5,
+};
+
+/* facilities that may send a log message */
+enum fw_devlog_facility {
+ FW_DEVLOG_FACILITY_CORE = 0x00,
+ FW_DEVLOG_FACILITY_CF = 0x01,
+ FW_DEVLOG_FACILITY_SCHED = 0x02,
+ FW_DEVLOG_FACILITY_TIMER = 0x04,
+ FW_DEVLOG_FACILITY_RES = 0x06,
+ FW_DEVLOG_FACILITY_HW = 0x08,
+ FW_DEVLOG_FACILITY_FLR = 0x10,
+ FW_DEVLOG_FACILITY_DMAQ = 0x12,
+ FW_DEVLOG_FACILITY_PHY = 0x14,
+ FW_DEVLOG_FACILITY_MAC = 0x16,
+ FW_DEVLOG_FACILITY_PORT = 0x18,
+ FW_DEVLOG_FACILITY_VI = 0x1A,
+ FW_DEVLOG_FACILITY_FILTER = 0x1C,
+ FW_DEVLOG_FACILITY_ACL = 0x1E,
+ FW_DEVLOG_FACILITY_TM = 0x20,
+ FW_DEVLOG_FACILITY_QFC = 0x22,
+ FW_DEVLOG_FACILITY_DCB = 0x24,
+ FW_DEVLOG_FACILITY_ETH = 0x26,
+ FW_DEVLOG_FACILITY_OFLD = 0x28,
+ FW_DEVLOG_FACILITY_RI = 0x2A,
+ FW_DEVLOG_FACILITY_ISCSI = 0x2C,
+ FW_DEVLOG_FACILITY_FCOE = 0x2E,
+ FW_DEVLOG_FACILITY_FOISCSI = 0x30,
+ FW_DEVLOG_FACILITY_FOFCOE = 0x32,
+ FW_DEVLOG_FACILITY_MAX = 0x32,
+};
+
+/* log message format */
+struct fw_devlog_e {
+ __be64 timestamp;
+ __be32 seqno;
+ __be16 reserved1;
+ __u8 level;
+ __u8 facility;
+ __u8 fmt[FW_DEVLOG_FMT_LEN];
+ __be32 params[FW_DEVLOG_FMT_PARAMS_NUM];
+ __be32 reserved3[4];
+};
+
+struct fw_devlog_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __u8 level;
+ __u8 r2[7];
+ __be32 memtype_devlog_memaddr16_devlog;
+ __be32 memsize_devlog;
+ __be32 r3[2];
+};
+
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S 28
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M 0xf
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(x) \
+ (((x) >> FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S) & \
+ FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M)
+
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S 0
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M 0xfffffff
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(x) \
+ (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
+ FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
+
#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
new file mode 100644
index 000000000000..e2bd3f747858
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -0,0 +1,48 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4FW_VERSION_H__
+#define __T4FW_VERSION_H__
+
+#define T4FW_VERSION_MAJOR 0x01
+#define T4FW_VERSION_MINOR 0x0C
+#define T4FW_VERSION_MICRO 0x19
+#define T4FW_VERSION_BUILD 0x00
+
+#define T5FW_VERSION_MAJOR 0x01
+#define T5FW_VERSION_MINOR 0x0C
+#define T5FW_VERSION_MICRO 0x19
+#define T5FW_VERSION_BUILD 0x00
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2215d432a059..122e2964e63b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
* enable interrupts.
*/
t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(0) |
- SEINTARM(rspq->intr_params) |
- INGRESSQID(rspq->cntxt_id));
+ CIDXINC_V(0) |
+ SEINTARM_V(rspq->intr_params) |
+ INGRESSQID_V(rspq->cntxt_id));
}
/*
@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
*/
if (adapter->flags & USING_MSI)
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(0) |
- SEINTARM(s->intrq.intr_params) |
- INGRESSQID(s->intrq.cntxt_id));
+ CIDXINC_V(0) |
+ SEINTARM_V(s->intrq.intr_params) |
+ INGRESSQID_V(s->intrq.cntxt_id));
}
@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
*/
const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
- opcode = G_CPL_OPCODE(ntohl(p->opcode_qid));
+ opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
if (opcode != CPL_SGE_EGR_UPDATE) {
dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
, opcode);
@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
* free TX Queue Descriptors ...
*/
const struct cpl_sge_egr_update *p = cpl;
- unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
+ unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge;
struct sge_txq *tq;
struct sge_eth_txq *txq;
@@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev,
reg_block_dump(adapter, regbuf,
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
- ? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
+ ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
reg_block_dump(adapter, regbuf,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter)
* threshold values from the SGE parameters.
*/
s->timer_val[0] = core_ticks_to_us(adapter,
- TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
+ TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
s->timer_val[1] = core_ticks_to_us(adapter,
- TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
+ TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
s->timer_val[2] = core_ticks_to_us(adapter,
- TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
+ TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
s->timer_val[3] = core_ticks_to_us(adapter,
- TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
+ TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
s->timer_val[4] = core_ticks_to_us(adapter,
- TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
+ TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
s->timer_val[5] = core_ticks_to_us(adapter,
- TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
+ TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
- s->counter_val[0] =
- THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
- s->counter_val[1] =
- THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
- s->counter_val[2] =
- THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
- s->counter_val[3] =
- THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
/*
* Grab our Virtual Interface resource allocation, extract the
@@ -2430,7 +2426,7 @@ static void cfg_queues(struct adapter *adapter)
*/
n10g = 0;
for_each_port(adapter, pidx)
- n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f7fd1317d996..0545f0de1c52 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -47,6 +47,7 @@
#include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
#include "../cxgb4/t4fw_api.h"
#include "../cxgb4/t4_msg.h"
@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip))
- val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+ val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else
- val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) |
- DBTYPE(1);
- val |= DBPRIO(1);
+ val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
+ DBTYPE_F;
+ val |= DBPRIO_F;
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
if (unlikely(fl->bar2_addr == NULL)) {
t4_write_reg(adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- QID(fl->cntxt_id) | val);
+ QID_V(fl->cntxt_id) | val);
} else {
- writel(val | QID(fl->bar2_qid),
+ writel(val | QID_V(fl->bar2_qid),
fl->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to
@@ -925,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
}
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
- ULPTX_NSGE(nfrags));
+ ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0))
return;
/*
@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(tq->bar2_addr == NULL)) {
- u32 val = PIDX(n);
+ u32 val = PIDX_V(n);
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- QID(tq->cntxt_id) | val);
+ QID_V(tq->cntxt_id) | val);
} else {
- u32 val = PIDX_T5(n);
+ u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within
* the doorbell, but T5 and later shrank the field in order to
@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* large in the first place (14 bits) so we just use the T5
* and later limits and warn if a Queue ID is too large.
*/
- WARN_ON(val & DBPRIO(1));
+ WARN_ON(val & DBPRIO_F);
/* If we're only writing a single Egress Unit and the BAR2
* Queue ID is 0, we can use the Write Combining Doorbell
@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
count--;
}
} else
- writel(val | QID(tq->bar2_qid),
+ writel(val | QID_V(tq->bar2_qid),
tq->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write Memory Barrier will force the write to the User
@@ -1325,9 +1326,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* If there's a VLAN tag present, add that to the list of things to
* do in this Work Request.
*/
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
txq->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
}
/*
@@ -1603,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
* If this is a good TCP packet and we have Generic Receive Offload
* enabled, handle the packet in the GRO path.
*/
- if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
+ if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
(rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
!pkt->ip_frag) {
do_gro(rxq, gl, pkt);
@@ -1625,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
rxq->stats.pkts++;
if (csum_ok && !pkt->err_vec &&
- (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {
@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
- val = CIDXINC(work_done) | SEINTARM(intr_params);
+ val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
if (is_t4(rspq->adapter->params.chip)) {
t4_write_reg(rspq->adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- val | INGRESSQID((u32)rspq->cntxt_id));
+ val | INGRESSQID_V((u32)rspq->cntxt_id));
} else {
- writel(val | INGRESSQID(rspq->bar2_qid),
+ writel(val | INGRESSQID_V(rspq->bar2_qid),
rspq->bar2_addr + SGE_UDB_GTS);
wmb();
}
@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
rspq_next(intrq);
}
- val = CIDXINC(work_done) | SEINTARM(intrq->intr_params);
+ val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
if (is_t4(adapter->params.chip))
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- val | INGRESSQID(intrq->cntxt_id));
+ val | INGRESSQID_V(intrq->cntxt_id));
else {
- writel(val | INGRESSQID(intrq->bar2_qid),
+ writel(val | INGRESSQID_V(intrq->bar2_qid),
intrq->bar2_addr + SGE_UDB_GTS);
wmb();
}
@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
fl0, fl1);
return -EINVAL;
}
- if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
+ if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL;
}
@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
*/
if (fl1)
s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
- s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+ s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
- s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+ s->pktshift = PKTSHIFT_G(sge_params->sge_control);
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
*/
- ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
- X_INGPADBOUNDARY_SHIFT);
+ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
+ INGPADBOUNDARY_SHIFT_X);
if (is_t4(adapter->params.chip)) {
s->fl_align = ingpadboundary;
} else {
@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
* Congestion Threshold is in units of 2 Free List pointers.)
*/
s->fl_starve_thres
- = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
+ = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
/*
* Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
index c7b127d93767..b516b12b1884 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
@@ -64,8 +64,8 @@
* Mailbox Data in the fixed CIM PF map and the programmable VF map must
* match. However, it's a useful convention ...
*/
-#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
-#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
+#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
+#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
#endif
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 21dc9a20308c..1b5506df35b1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -39,6 +39,7 @@
#include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
#include "../cxgb4/t4fw_api.h"
/*
@@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* Loop trying to get ownership of the mailbox. Return an error
* if we can't gain ownership.
*/
- v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+ v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
- v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+ v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
if (v != MBOX_OWNER_DRV)
return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
@@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
t4_read_reg(adapter, mbox_data); /* flush write */
t4_write_reg(adapter, mbox_ctl,
- MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+ MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
t4_read_reg(adapter, mbox_ctl); /* flush write */
/*
@@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* If we're the owner, see if this is the reply we wanted.
*/
v = t4_read_reg(adapter, mbox_ctl);
- if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+ if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
/*
* If the Message Valid bit isn't on, revoke ownership
* of the mailbox and continue waiting for our reply.
*/
- if ((v & MBMSGVALID) == 0) {
+ if ((v & MBMSGVALID_F) == 0) {
t4_write_reg(adapter, mbox_ctl,
- MBOWNER(MBOX_OWNER_NONE));
+ MBOWNER_V(MBOX_OWNER_NONE));
continue;
}
@@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
& FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
- MBOWNER(MBOX_OWNER_NONE));
+ MBOWNER_V(MBOX_OWNER_NONE));
return -FW_CMD_RETVAL_G(v);
}
}
@@ -323,6 +324,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
return v;
v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+ pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(v) : -1;
pi->port_type = FW_PORT_CMD_PTYPE_G(v);
pi->mod_type = FW_PORT_MOD_TYPE_NA;
@@ -528,19 +531,19 @@ int t4vf_get_sge_params(struct adapter *adapter)
int v;
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL));
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE));
+ FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0));
+ FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1));
+ FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1));
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3));
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5));
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
v = t4vf_query_params(adapter, 7, params, vals);
if (v)
return v;
@@ -576,9 +579,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
}
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD));
+ FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL));
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
@@ -615,8 +618,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
* the driver can just use it.
*/
whoami = t4_read_reg(adapter,
- T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
- pf = SOURCEPF_GET(whoami);
+ T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
+ pf = SOURCEPF_G(whoami);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
@@ -628,10 +631,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
sge_params->sge_vf_eq_qpp =
((sge_params->sge_egress_queues_per_page >> s_qpp)
- & QUEUESPERPAGEPF0_MASK);
+ & QUEUESPERPAGEPF0_M);
sge_params->sge_vf_iq_qpp =
((sge_params->sge_ingress_queues_per_page >> s_qpp)
- & QUEUESPERPAGEPF0_MASK);
+ & QUEUESPERPAGEPF0_M);
}
return 0;
@@ -1590,7 +1593,7 @@ int t4vf_prep_adapter(struct adapter *adapter)
break;
case CHELSIO_T5:
- chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
break;
}
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 7403dff8f14a..905ac5f5d9a6 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -32,7 +32,8 @@ config CS89x0
will be called cs89x0.
config CS89x0_PLATFORM
- bool "CS89x0 platform driver support"
+ bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
+ default !HAS_IOPORT_MAP
depends on CS89x0
help
Say Y to compile the cs89x0 driver as a platform driver. This
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 3a12c096ea1c..de9f7c97d916 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -475,8 +475,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
if (d)
dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
- if (ep->rx_buf[i] != NULL)
- kfree(ep->rx_buf[i]);
+ kfree(ep->rx_buf[i]);
}
for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
@@ -486,8 +485,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
if (d)
dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
- if (ep->tx_buf[i] != NULL)
- kfree(ep->tx_buf[i]);
+ kfree(ep->tx_buf[i]);
}
dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 25c4d88853d8..84b6a2b46aec 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.67"
+#define DRV_VERSION "2.1.1.83"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -188,6 +188,7 @@ struct enic {
struct enic_rfs_flw_tbl rfs_h;
u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
+ struct vnic_gen_stats gen_stats;
};
static inline struct device *enic_get_dev(struct enic *enic)
@@ -242,6 +243,19 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
return enic->rq_count + enic->wq_count + 1;
}
+static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
+{
+ if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
+ net_warn_ratelimited("%s: PCI dma mapping failed!\n",
+ enic->netdev->name);
+ enic->gen_stats.dma_map_error++;
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void enic_reset_addr_lists(struct enic *enic);
int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 87ddc44b590e..f8d2a6a34282 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -177,40 +177,6 @@ int enic_dev_intr_coal_timer_info(struct enic *enic)
return err;
}
-int enic_vnic_dev_deinit(struct enic *enic)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_deinit(enic->vdev);
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
-int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_init_prov2(enic->vdev,
- (u8 *)vp, vic_provinfo_size(vp));
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
-int enic_dev_deinit_done(struct enic *enic, int *status)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_deinit_done(enic->vdev, status);
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
/* rtnl lock is held */
int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
@@ -237,28 +203,6 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return err;
}
-int enic_dev_enable2(struct enic *enic, int active)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_enable2(enic->vdev, active);
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
-int enic_dev_enable2_done(struct enic *enic, int *status)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_enable2_done(enic->vdev, status);
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
int enic_dev_status_to_errno(int devcmd_status)
{
switch (devcmd_status) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 10bb970b2f35..f5bb058b3f96 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -55,11 +55,6 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
int enic_dev_enable(struct enic *enic);
int enic_dev_disable(struct enic *enic);
int enic_dev_intr_coal_timer_info(struct enic *enic);
-int enic_vnic_dev_deinit(struct enic *enic);
-int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
-int enic_dev_deinit_done(struct enic *enic, int *status);
-int enic_dev_enable2(struct enic *enic, int arg);
-int enic_dev_enable2_done(struct enic *enic, int *status);
int enic_dev_status_to_errno(int devcmd_status);
#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index eba1eb846d34..28d9ca675a27 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -24,6 +24,7 @@
#include "enic_dev.h"
#include "enic_clsf.h"
#include "vnic_rss.h"
+#include "vnic_stats.h"
struct enic_stat {
char name[ETH_GSTRING_LEN];
@@ -40,6 +41,11 @@ struct enic_stat {
.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
}
+#define ENIC_GEN_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
+}
+
static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_frames_ok),
ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -78,10 +84,15 @@ static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_to_max),
};
+static const struct enic_stat enic_gen_stats[] = {
+ ENIC_GEN_STAT(dma_map_error),
+};
+
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
-void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
+static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
{
int i;
int intr;
@@ -146,6 +157,10 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ for (i = 0; i < enic_n_gen_stats; i++) {
+ memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
break;
}
}
@@ -154,7 +169,7 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats;
+ return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
default:
return -EOPNOTSUPP;
}
@@ -173,6 +188,8 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
for (i = 0; i < enic_n_rx_stats; i++)
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
+ for (i = 0; i < enic_n_gen_stats; i++)
+ *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
}
static u32 enic_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b29e027c476e..9cbe038a388e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -45,6 +45,7 @@
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif
+#include <linux/crash_dump.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -88,7 +89,7 @@ MODULE_DEVICE_TABLE(pci, enic_id_table);
* coalescing timer values
* {rx_rate in Mbps, mapping percentage of the range}
*/
-struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
+static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
{4000, 0},
{4400, 10},
{5060, 20},
@@ -105,7 +106,7 @@ struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
/* This table helps the driver to pick different ranges for rx coalescing
* timer depending on the link speed.
*/
-struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{0, 0}, /* 0 - 4 Gbps */
{0, 3}, /* 4 - 10 Gbps */
{3, 6}, /* 10 - 40 Gbps */
@@ -351,80 +352,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
return IRQ_HANDLED;
}
-static inline void enic_queue_wq_skb_cont(struct enic *enic,
- struct vnic_wq *wq, struct sk_buff *skb,
- unsigned int len_left, int loopback)
+static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, unsigned int len_left,
+ int loopback)
{
const skb_frag_t *frag;
+ dma_addr_t dma_addr;
/* Queue additional data fragments */
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
len_left -= skb_frag_size(frag);
- enic_queue_wq_desc_cont(wq, skb,
- skb_frag_dma_map(&enic->pdev->dev,
- frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE),
- skb_frag_size(frag),
- (len_left == 0), /* EOP? */
- loopback);
+ dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
+ enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
+ (len_left == 0), /* EOP? */
+ loopback);
}
+
+ return 0;
}
-static inline void enic_queue_wq_skb_vlan(struct enic *enic,
- struct vnic_wq *wq, struct sk_buff *skb,
- int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, int vlan_tag_insert,
+ unsigned int vlan_tag, int loopback)
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
int eop = (len_left == 0);
+ dma_addr_t dma_addr;
+ int err = 0;
+
+ dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
/* Queue the main skb fragment. The fragments are no larger
* than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
* than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
* per fragment is queued.
*/
- enic_queue_wq_desc(wq, skb,
- pci_map_single(enic->pdev, skb->data,
- head_len, PCI_DMA_TODEVICE),
- head_len,
- vlan_tag_insert, vlan_tag,
- eop, loopback);
+ enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
+ vlan_tag, eop, loopback);
if (!eop)
- enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+ return err;
}
-static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
- struct vnic_wq *wq, struct sk_buff *skb,
- int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, int vlan_tag_insert,
+ unsigned int vlan_tag, int loopback)
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
unsigned int hdr_len = skb_checksum_start_offset(skb);
unsigned int csum_offset = hdr_len + skb->csum_offset;
int eop = (len_left == 0);
+ dma_addr_t dma_addr;
+ int err = 0;
+
+ dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
/* Queue the main skb fragment. The fragments are no larger
* than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
* than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
* per fragment is queued.
*/
- enic_queue_wq_desc_csum_l4(wq, skb,
- pci_map_single(enic->pdev, skb->data,
- head_len, PCI_DMA_TODEVICE),
- head_len,
- csum_offset,
- hdr_len,
- vlan_tag_insert, vlan_tag,
- eop, loopback);
+ enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
+ hdr_len, vlan_tag_insert, vlan_tag, eop,
+ loopback);
if (!eop)
- enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+ return err;
}
-static inline void enic_queue_wq_skb_tso(struct enic *enic,
- struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
- int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, unsigned int mss,
+ int vlan_tag_insert, unsigned int vlan_tag,
+ int loopback)
{
unsigned int frag_len_left = skb_headlen(skb);
unsigned int len_left = skb->len - frag_len_left;
@@ -454,20 +469,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
*/
while (frag_len_left) {
len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
- dma_addr = pci_map_single(enic->pdev, skb->data + offset,
- len, PCI_DMA_TODEVICE);
- enic_queue_wq_desc_tso(wq, skb,
- dma_addr,
- len,
- mss, hdr_len,
- vlan_tag_insert, vlan_tag,
- eop && (len == frag_len_left), loopback);
+ dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
+ enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
+ vlan_tag_insert, vlan_tag,
+ eop && (len == frag_len_left), loopback);
frag_len_left -= len;
offset += len;
}
if (eop)
- return;
+ return 0;
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for additional data fragments
@@ -483,16 +497,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
offset, len,
DMA_TO_DEVICE);
- enic_queue_wq_desc_cont(wq, skb,
- dma_addr,
- len,
- (len_left == 0) &&
- (len == frag_len_left), /* EOP? */
- loopback);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
+ enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
+ (len_left == 0) &&
+ (len == frag_len_left),/*EOP*/
+ loopback);
frag_len_left -= len;
offset += len;
}
}
+
+ return 0;
}
static inline void enic_queue_wq_skb(struct enic *enic,
@@ -502,25 +518,42 @@ static inline void enic_queue_wq_skb(struct enic *enic,
unsigned int vlan_tag = 0;
int vlan_tag_insert = 0;
int loopback = 0;
+ int err;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
- vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = skb_vlan_tag_get(skb);
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
}
if (mss)
- enic_queue_wq_skb_tso(enic, wq, skb, mss,
- vlan_tag_insert, vlan_tag, loopback);
+ err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
+ vlan_tag_insert, vlan_tag,
+ loopback);
else if (skb->ip_summed == CHECKSUM_PARTIAL)
- enic_queue_wq_skb_csum_l4(enic, wq, skb,
- vlan_tag_insert, vlan_tag, loopback);
+ err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
+ vlan_tag, loopback);
else
- enic_queue_wq_skb_vlan(enic, wq, skb,
- vlan_tag_insert, vlan_tag, loopback);
+ err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
+ vlan_tag, loopback);
+ if (unlikely(err)) {
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_use->prev;
+ /* while not EOP of previous pkt && queue not empty.
+ * For all non EOP bufs, os_buf is NULL.
+ */
+ while (!buf->os_buf && (buf->next != wq->to_clean)) {
+ enic_free_wq_buf(wq, buf);
+ wq->ring.desc_avail++;
+ buf = buf->prev;
+ }
+ wq->to_use = buf->next;
+ dev_kfree_skb(skb);
+ }
}
/* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -950,8 +983,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
if (!skb)
return -ENOMEM;
- dma_addr = pci_map_single(enic->pdev, skb->data,
- len, PCI_DMA_FROMDEVICE);
+ dma_addr = pci_map_single(enic->pdev, skb->data, len,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
enic_queue_rq_desc(rq, skb, os_buf_index,
dma_addr, len);
@@ -1266,7 +1303,7 @@ static void enic_set_rx_cpu_rmap(struct enic *enic)
#endif /* CONFIG_RFS_ACCEL */
#ifdef CONFIG_NET_RX_BUSY_POLL
-int enic_busy_poll(struct napi_struct *napi)
+static int enic_busy_poll(struct napi_struct *napi)
{
struct net_device *netdev = napi->dev;
struct enic *enic = netdev_priv(netdev);
@@ -1335,7 +1372,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
int err;
if (!enic_poll_lock_napi(&enic->rq[rq]))
- return work_done;
+ return budget;
/* Service RQ
*/
@@ -2231,6 +2268,18 @@ static void enic_dev_deinit(struct enic *enic)
enic_clear_intr_mode(enic);
}
+static void enic_kdump_kernel_config(struct enic *enic)
+{
+ if (is_kdump_kernel()) {
+ dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+ enic->rq_count = 1;
+ enic->wq_count = 1;
+ enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+ enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+ enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+ }
+}
+
static int enic_dev_init(struct enic *enic)
{
struct device *dev = enic_get_dev(enic);
@@ -2260,6 +2309,10 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
+ /* modify resource count if we are in kdump_kernel
+ */
+ enic_kdump_kernel_config(enic);
+
/* Set interrupt mode based on resource counts and system
* capabilities
*/
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 77750ec93954..74c81ed6fdab 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -62,6 +62,11 @@ struct vnic_rx_stats {
u64 rsvd[16];
};
+/* Generic statistics */
+struct vnic_gen_stats {
+ u64 dma_map_error;
+};
+
struct vnic_stats {
struct vnic_tx_stats tx;
struct vnic_rx_stats rx;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 3e6b8d54dafc..b5a1c937fad2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -47,11 +47,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
+ buf->next->prev = buf;
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
buf->next = wq->bufs[i + 1];
+ buf->next->prev = buf;
} else {
buf->next = buf + 1;
+ buf->next->prev = buf;
buf++;
}
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 816f1ad6072f..296154351823 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,6 +62,7 @@ struct vnic_wq_buf {
uint8_t cq_entry; /* Gets completion event from hw */
uint8_t desc_skip_cnt; /* Num descs to occupy */
uint8_t compressed_send; /* Both hdr and payload in one desc */
+ struct vnic_wq_buf *prev;
};
/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index ef0bb58750e6..c0a7813603c3 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -36,6 +36,9 @@
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -1426,11 +1429,48 @@ dm9000_probe(struct platform_device *pdev)
struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct board_info *db; /* Point a board information structure */
struct net_device *ndev;
+ struct device *dev = &pdev->dev;
const unsigned char *mac_src;
int ret = 0;
int iosize;
int i;
u32 id_val;
+ int reset_gpios;
+ enum of_gpio_flags flags;
+ struct regulator *power;
+
+ power = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(power)) {
+ if (PTR_ERR(power) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(dev, "no regulator provided\n");
+ } else {
+ ret = regulator_enable(power);
+ if (ret != 0) {
+ dev_err(dev,
+ "Failed to enable power regulator: %d\n", ret);
+ return ret;
+ }
+ dev_dbg(dev, "regulator enabled\n");
+ }
+
+ reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
+ &flags);
+ if (gpio_is_valid(reset_gpios)) {
+ ret = devm_gpio_request_one(dev, reset_gpios, flags,
+ "dm9000_reset");
+ if (ret) {
+ dev_err(dev, "failed to request reset gpio %d: %d\n",
+ reset_gpios, ret);
+ return -ENODEV;
+ }
+
+ /* According to manual PWRST# Low Period Min 1ms */
+ msleep(2);
+ gpio_set_value(reset_gpios, 1);
+ /* Needs 3ms to read eeprom when PWRST is deasserted */
+ msleep(4);
+ }
if (!pdata) {
pdata = dm9000_parse_dt(&pdev->dev);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 6aa887e0e1cb..9beb3d34d4ba 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -904,7 +904,7 @@ static void init_registers(struct net_device *dev)
}
#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
i |= 0xE000;
-#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
+#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
i |= 0x4800;
#else
#warning Processor architecture undefined
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index a379c3e4b57f..13d00a38a5bd 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
* break out of while loop if there are no more
* packets waiting
*/
- if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
- napi_complete(napi);
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
- dnet_writel(bp, int_enable, INTR_ENB);
- return 0;
- }
+ if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
+ break;
cmd_word = dnet_readl(bp, RX_LEN_FIFO);
pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
"size %u.\n", dev->name, pkt_len);
}
- budget -= npackets;
-
if (npackets < budget) {
/* We processed all packets available. Tell NAPI it can
- * stop polling then re-enable rx interrupts */
+ * stop polling then re-enable rx interrupts.
+ */
napi_complete(napi);
int_enable = dnet_readl(bp, INTR_ENB);
int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
dnet_writel(bp, int_enable, INTR_ENB);
- return 0;
}
- /* There are still packets waiting */
- return 1;
+ return npackets;
}
static irqreturn_t dnet_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 712e7f8e1df7..27de37aa90af 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -59,26 +59,6 @@
#define OC_SUBSYS_DEVICE_ID3 0xE612
#define OC_SUBSYS_DEVICE_ID4 0xE652
-static inline char *nic_name(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case OC_DEVICE_ID1:
- return OC_NAME;
- case OC_DEVICE_ID2:
- return OC_NAME_BE;
- case OC_DEVICE_ID3:
- case OC_DEVICE_ID4:
- return OC_NAME_LANCER;
- case BE_DEVICE_ID2:
- return BE3_NAME;
- case OC_DEVICE_ID5:
- case OC_DEVICE_ID6:
- return OC_NAME_SH;
- default:
- return BE_NAME;
- }
-}
-
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN ((u16) 64)
/* allocate extra space to allow tunneling decapsulation without head reallocation */
@@ -243,7 +223,6 @@ struct be_tx_stats {
u64 tx_bytes;
u64 tx_pkts;
u64 tx_reqs;
- u64 tx_wrbs;
u64 tx_compl;
ulong tx_jiffies;
u32 tx_stops;
@@ -266,6 +245,9 @@ struct be_tx_obj {
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
+ u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */
+ u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */
+ u16 last_req_hdr; /* index of the last req's hdr-wrb */
} ____cacheline_aligned_in_smp;
/* Struct to remember the pages posted for rx frags */
@@ -379,15 +361,14 @@ enum vf_state {
ASSIGNED = 1
};
-#define BE_FLAGS_LINK_STATUS_INIT 1
-#define BE_FLAGS_SRIOV_ENABLED (1 << 2)
-#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
-#define BE_FLAGS_VLAN_PROMISC (1 << 4)
-#define BE_FLAGS_MCAST_PROMISC (1 << 5)
-#define BE_FLAGS_NAPI_ENABLED (1 << 9)
-#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
-#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
-#define BE_FLAGS_SETUP_DONE (1 << 13)
+#define BE_FLAGS_LINK_STATUS_INIT BIT(1)
+#define BE_FLAGS_SRIOV_ENABLED BIT(2)
+#define BE_FLAGS_WORKER_SCHEDULED BIT(3)
+#define BE_FLAGS_NAPI_ENABLED BIT(6)
+#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7)
+#define BE_FLAGS_VXLAN_OFFLOADS BIT(8)
+#define BE_FLAGS_SETUP_DONE BIT(9)
+#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
@@ -397,6 +378,8 @@ enum vf_state {
#define LANCER_DELETE_FW_DUMP 0x2
struct phy_info {
+/* From SFF-8472 spec */
+#define SFP_VENDOR_NAME_LEN 17
u8 transceiver;
u8 autoneg;
u8 fc_autoneg;
@@ -410,6 +393,8 @@ struct phy_info {
u32 advertising;
u32 supported;
u8 cable_type;
+ u8 vendor_name[SFP_VENDOR_NAME_LEN];
+ u8 vendor_pn[SFP_VENDOR_NAME_LEN];
};
struct be_resources {
@@ -467,8 +452,6 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
- u16 vlans_added;
- unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -484,8 +467,15 @@ struct be_adapter {
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
char fw_on_flash[FW_VER_LEN];
+
+ /* IFACE filtering fields */
int if_handle; /* Used to configure filtering */
+ u32 if_flags; /* Interface filtering flags */
u32 *pmac_id; /* MAC addr handle used by BE card */
+ u32 uc_macs; /* Count of secondary UC MAC programmed */
+ unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 vlans_added;
+
u32 beacon_state; /* for set_phys_id */
bool eeh_error;
@@ -493,7 +483,7 @@ struct be_adapter {
bool hw_error;
u32 port_num;
- bool promiscuous;
+ char port_name;
u8 mc_type;
u32 function_mode;
u32 function_caps;
@@ -526,7 +516,6 @@ struct be_adapter {
struct phy_info phy;
u8 wol_cap;
bool wol_en;
- u32 uc_macs; /* Count of secondary UC MAC programmed */
u16 asic_rev;
u16 qnq_vid;
u32 msg_enable;
@@ -732,19 +721,6 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}
-static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
-{
- u32 addr;
-
- addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
-
- mac[5] = (u8)(addr & 0xFF);
- mac[4] = (u8)((addr >> 8) & 0xFF);
- mac[3] = (u8)((addr >> 16) & 0xFF);
- /* Use the OUI from the current MAC address */
- memcpy(mac, adapter->netdev->dev_addr, 3);
-}
-
static inline bool be_multi_rxq(const struct be_adapter *adapter)
{
return adapter->num_rx_qs > 1;
@@ -767,129 +743,6 @@ static inline void be_clear_all_error(struct be_adapter *adapter)
adapter->fw_timeout = false;
}
-static inline bool be_is_wol_excluded(struct be_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
-
- if (!be_physfn(adapter))
- return true;
-
- switch (pdev->subsystem_device) {
- case OC_SUBSYS_DEVICE_ID1:
- case OC_SUBSYS_DEVICE_ID2:
- case OC_SUBSYS_DEVICE_ID3:
- case OC_SUBSYS_DEVICE_ID4:
- return true;
- default:
- return false;
- }
-}
-
-static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
-{
- return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
-}
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
- bool status = true;
-
- spin_lock(&eqo->lock); /* BH is already disabled */
- if (eqo->state & BE_EQ_LOCKED) {
- WARN_ON(eqo->state & BE_EQ_NAPI);
- eqo->state |= BE_EQ_NAPI_YIELD;
- status = false;
- } else {
- eqo->state = BE_EQ_NAPI;
- }
- spin_unlock(&eqo->lock);
- return status;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
- spin_lock(&eqo->lock); /* BH is already disabled */
-
- WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
- eqo->state = BE_EQ_IDLE;
-
- spin_unlock(&eqo->lock);
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
- bool status = true;
-
- spin_lock_bh(&eqo->lock);
- if (eqo->state & BE_EQ_LOCKED) {
- eqo->state |= BE_EQ_POLL_YIELD;
- status = false;
- } else {
- eqo->state |= BE_EQ_POLL;
- }
- spin_unlock_bh(&eqo->lock);
- return status;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
- spin_lock_bh(&eqo->lock);
-
- WARN_ON(eqo->state & (BE_EQ_NAPI));
- eqo->state = BE_EQ_IDLE;
-
- spin_unlock_bh(&eqo->lock);
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
- spin_lock_init(&eqo->lock);
- eqo->state = BE_EQ_IDLE;
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
- local_bh_disable();
-
- /* It's enough to just acquire napi lock on the eqo to stop
- * be_busy_poll() from processing any queueus.
- */
- while (!be_lock_napi(eqo))
- mdelay(1);
-
- local_bh_enable();
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
- return true;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
- return false;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
void be_link_status_update(struct be_adapter *adapter, u8 link_status);
@@ -898,16 +751,6 @@ int be_load_fw(struct be_adapter *adapter, u8 *func);
bool be_is_wol_supported(struct be_adapter *adapter);
bool be_pause_supported(struct be_adapter *adapter);
u32 be_get_fw_log_level(struct be_adapter *adapter);
-
-static inline int fw_major_num(const char *fw_ver)
-{
- int fw_major = 0;
-
- sscanf(fw_ver, "%d.", &fw_major);
-
- return fw_major;
-}
-
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index fead5c65a4f0..36916cfa70f9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,6 +19,22 @@
#include "be.h"
#include "be_cmds.h"
+static char *be_port_misconfig_evt_desc[] = {
+ "A valid SFP module detected",
+ "Optics faulted/ incorrectly installed/ not installed.",
+ "Optics of two types installed.",
+ "Incompatible optics.",
+ "Unknown port SFP status"
+};
+
+static char *be_port_misconfig_remedy_desc[] = {
+ "",
+ "Reseat optics. If issue not resolved, replace",
+ "Remove one optic or install matching pair of optics",
+ "Replace with compatible optics for card to function",
+ ""
+};
+
static struct be_cmd_priv_map cmd_priv_map[] = {
{
OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
@@ -249,6 +265,29 @@ static void be_async_link_state_process(struct be_adapter *adapter,
evt->port_link_status & LINK_STATUS_MASK);
}
+static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_event_misconfig_port *evt =
+ (struct be_async_event_misconfig_port *)compl;
+ u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
+ struct device *dev = &adapter->pdev->dev;
+ u8 port_misconfig_evt;
+
+ port_misconfig_evt =
+ ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
+
+ /* Log an error message that would allow a user to determine
+ * whether the SFPs have an issue
+ */
+ dev_info(dev, "Port %c: %s %s", adapter->port_name,
+ be_port_misconfig_evt_desc[port_misconfig_evt],
+ be_port_misconfig_remedy_desc[port_misconfig_evt]);
+
+ if (port_misconfig_evt == INCOMPATIBLE_SFP)
+ adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
+}
+
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
struct be_mcc_compl *compl)
@@ -334,6 +373,16 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
}
}
+static void be_async_sliport_evt_process(struct be_adapter *adapter,
+ struct be_mcc_compl *cmp)
+{
+ u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
+
+ if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
+ be_async_port_misconfig_event_process(adapter, cmp);
+}
+
static inline bool is_link_state_evt(u32 flags)
{
return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
@@ -352,6 +401,12 @@ static inline bool is_dbg_evt(u32 flags)
ASYNC_EVENT_CODE_QNQ;
}
+static inline bool is_sliport_evt(u32 flags)
+{
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_SLIPORT;
+}
+
static void be_mcc_event_process(struct be_adapter *adapter,
struct be_mcc_compl *compl)
{
@@ -361,6 +416,8 @@ static void be_mcc_event_process(struct be_adapter *adapter,
be_async_grp5_evt_process(adapter, compl);
else if (is_dbg_evt(compl->flags))
be_async_dbg_evt_process(adapter, compl);
+ else if (is_sliport_evt(compl->flags))
+ be_async_sliport_evt_process(adapter, compl);
}
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -573,7 +630,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
{
#define SLIPORT_READY_TIMEOUT 30
u32 sliport_status;
- int status = 0, i;
+ int i;
for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -584,9 +641,9 @@ static int lancer_wait_ready(struct be_adapter *adapter)
}
if (i == SLIPORT_READY_TIMEOUT)
- status = -1;
+ return sliport_status ? : -1;
- return status;
+ return 0;
}
static bool lancer_provisioning_error(struct be_adapter *adapter)
@@ -624,7 +681,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
iowrite32(SLI_PORT_CONTROL_IP_MASK,
adapter->db + SLIPORT_CONTROL_OFFSET);
- /* check adapter has corrected the error */
+ /* check if adapter has corrected the error */
status = lancer_wait_ready(adapter);
sliport_status = ioread32(adapter->db +
SLIPORT_STATUS_OFFSET);
@@ -655,7 +712,11 @@ int be_fw_wait_ready(struct be_adapter *adapter)
if (lancer_chip(adapter)) {
status = lancer_wait_ready(adapter);
- return status;
+ if (status) {
+ stage = status;
+ goto err;
+ }
+ return 0;
}
do {
@@ -671,7 +732,8 @@ int be_fw_wait_ready(struct be_adapter *adapter)
timeout += 2;
} while (timeout < 60);
- dev_err(dev, "POST timeout; stage=0x%x\n", stage);
+err:
+ dev_err(dev, "POST timeout; stage=%#x\n", stage);
return -1;
}
@@ -1166,9 +1228,15 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
ctxt, 1);
}
- /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
- req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
- req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
+ /* Subscribe to Link State, Sliport Event and Group 5 Events
+ * (bits 1, 5 and 17 set)
+ */
+ req->async_event_bitmap[0] =
+ cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
+ BIT(ASYNC_EVENT_CODE_GRP_5) |
+ BIT(ASYNC_EVENT_CODE_QNQ) |
+ BIT(ASYNC_EVENT_CODE_SLIPORT));
+
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -1881,7 +1949,7 @@ err:
return status;
}
-int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
{
struct be_mcc_wrb *wrb;
struct be_dma_mem *mem = &adapter->rx_filter;
@@ -1901,31 +1969,13 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
wrb, mem);
req->if_id = cpu_to_le32(adapter->if_handle);
- if (flags & IFF_PROMISC) {
- req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
- if (value == ON)
- req->if_flags =
- cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
- } else if (flags & IFF_ALLMULTI) {
- req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
- } else if (flags & BE_FLAGS_VLAN_PROMISC) {
- req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
-
- if (value == ON)
- req->if_flags =
- cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
- } else {
+ req->if_flags_mask = cpu_to_le32(flags);
+ req->if_flags = (value == ON) ? req->if_flags_mask : 0;
+
+ if (flags & BE_IF_FLAGS_MULTICAST) {
struct netdev_hw_addr *ha;
int i = 0;
- req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
-
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
*/
@@ -1937,24 +1987,26 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
}
- if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
- req->if_flags_mask) {
- dev_warn(&adapter->pdev->dev,
- "Cannot set rx filter flags 0x%x\n",
- req->if_flags_mask);
- dev_warn(&adapter->pdev->dev,
- "Interface is capable of 0x%x flags only\n",
- be_if_cap_flags(adapter));
- }
- req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
-
status = be_mcc_notify_wait(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if ((flags & be_if_cap_flags(adapter)) != flags) {
+ dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
+ dev_warn(dev, "Interface is capable of 0x%x flags only\n",
+ be_if_cap_flags(adapter));
+ }
+ flags &= be_if_cap_flags(adapter);
+
+ return __be_cmd_rx_filter(adapter, flags, value);
+}
+
/* Uses synchrounous mcc */
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
{
@@ -2355,6 +2407,24 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
return status;
}
+int be_cmd_query_sfp_info(struct be_adapter *adapter)
+{
+ u8 page_data[PAGE_DATA_LEN];
+ int status;
+
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
+ page_data);
+ if (!status) {
+ strlcpy(adapter->phy.vendor_name, page_data +
+ SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
+ strlcpy(adapter->phy.vendor_pn,
+ page_data + SFP_VENDOR_PN_OFFSET,
+ SFP_VENDOR_NAME_LEN - 1);
+ }
+
+ return status;
+}
+
int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
{
struct lancer_cmd_req_delete_object *req;
@@ -2431,7 +2501,8 @@ err_unlock:
}
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_type, u32 flash_opcode, u32 buf_size)
+ u32 flash_type, u32 flash_opcode, u32 img_offset,
+ u32 buf_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -2452,6 +2523,9 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
cmd);
req->params.op_type = cpu_to_le32(flash_type);
+ if (flash_type == OPTYPE_OFFSET_SPECIFIED)
+ req->params.offset = cpu_to_le32(img_offset);
+
req->params.op_code = cpu_to_le32(flash_opcode);
req->params.data_buf_size = cpu_to_le32(buf_size);
@@ -2472,10 +2546,10 @@ err_unlock:
}
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- u16 optype, int offset)
+ u16 img_optype, u32 img_offset, u32 crc_offset)
{
- struct be_mcc_wrb *wrb;
struct be_cmd_read_flash_crc *req;
+ struct be_mcc_wrb *wrb;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2491,9 +2565,13 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
wrb, NULL);
- req->params.op_type = cpu_to_le32(optype);
+ req->params.op_type = cpu_to_le32(img_optype);
+ if (img_optype == OPTYPE_OFFSET_SPECIFIED)
+ req->params.offset = cpu_to_le32(img_offset + crc_offset);
+ else
+ req->params.offset = cpu_to_le32(crc_offset);
+
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
- req->params.offset = cpu_to_le32(offset);
req->params.data_buf_size = cpu_to_le32(0x4);
status = be_mcc_notify_wait(adapter);
@@ -2742,7 +2820,7 @@ err:
return status;
}
-int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_qos *req;
@@ -3236,6 +3314,24 @@ err:
return status;
}
+static bool be_is_wol_excluded(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (!be_physfn(adapter))
+ return true;
+
+ switch (pdev->subsystem_device) {
+ case OC_SUBSYS_DEVICE_ID1:
+ case OC_SUBSYS_DEVICE_ID2:
+ case OC_SUBSYS_DEVICE_ID3:
+ case OC_SUBSYS_DEVICE_ID4:
+ return true;
+ default:
+ return false;
+ }
+}
+
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
@@ -3422,42 +3518,34 @@ err:
return status;
}
-int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
+int be_cmd_query_port_name(struct be_adapter *adapter)
{
- struct be_mcc_wrb *wrb;
struct be_cmd_req_get_port_name *req;
+ struct be_mcc_wrb *wrb;
int status;
- if (!lancer_chip(adapter)) {
- *port_name = adapter->hba_port_num + '0';
- return 0;
- }
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+ wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
NULL);
- req->hdr.version = 1;
+ if (!BEx_chip(adapter))
+ req->hdr.version = 1;
- status = be_mcc_notify_wait(adapter);
+ status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
- *port_name = resp->port_name[adapter->hba_port_num];
+ adapter->port_name = resp->port_name[adapter->hba_port_num];
} else {
- *port_name = adapter->hba_port_num + '0';
+ adapter->port_name = adapter->hba_port_num + '0';
}
-err:
- spin_unlock_bh(&adapter->mcc_lock);
+
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -3751,6 +3839,7 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
be_reset_nic_desc(&nic_desc);
nic_desc.pf_num = adapter->pf_number;
nic_desc.vf_num = domain;
+ nic_desc.bw_min = 0;
if (lancer_chip(adapter)) {
nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
@@ -4092,7 +4181,7 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
int status;
if (BEx_chip(adapter) || lancer_chip(adapter))
- return 0;
+ return -EOPNOTSUPP;
spin_lock_bh(&adapter->mcc_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index eb5085d6794f..db761e8e42a3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -44,10 +44,10 @@ struct be_mcc_wrb {
} payload;
};
-#define CQE_FLAGS_VALID_MASK (1 << 31)
-#define CQE_FLAGS_ASYNC_MASK (1 << 30)
-#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
-#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
+#define CQE_FLAGS_VALID_MASK BIT(31)
+#define CQE_FLAGS_ASYNC_MASK BIT(30)
+#define CQE_FLAGS_COMPLETED_MASK BIT(28)
+#define CQE_FLAGS_CONSUMED_MASK BIT(27)
/* Completion Status */
enum mcc_base_status {
@@ -102,6 +102,8 @@ struct be_mcc_compl {
#define ASYNC_EVENT_PVID_STATE 0x3
#define ASYNC_EVENT_CODE_QNQ 0x6
#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
+#define ASYNC_EVENT_CODE_SLIPORT 0x11
+#define ASYNC_EVENT_PORT_MISCONFIG 0x9
enum {
LINK_DOWN = 0x0,
@@ -169,6 +171,15 @@ struct be_async_event_qnq {
u32 flags;
} __packed;
+#define INCOMPATIBLE_SFP 0x3
+/* async event indicating misconfigured port */
+struct be_async_event_misconfig_port {
+ u32 event_data_word1;
+ u32 event_data_word2;
+ u32 rsvd0;
+ u32 flags;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -586,6 +597,10 @@ enum be_if_flags {
BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
BE_IF_FLAGS_UNTAGGED)
+#define BE_IF_FLAGS_ALL_PROMISCUOUS (BE_IF_FLAGS_PROMISCUOUS | \
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+ BE_IF_FLAGS_MCAST_PROMISCUOUS)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
@@ -1024,6 +1039,8 @@ enum {
#define SFP_PLUS_SFF_8472_COMP 0x5E
#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8
#define SFP_PLUS_COPPER_CABLE 0x4
+#define SFP_VENDOR_NAME_OFFSET 0x14
+#define SFP_VENDOR_PN_OFFSET 0x28
#define PAGE_DATA_LEN 256
struct be_cmd_resp_port_type {
@@ -1091,6 +1108,10 @@ struct be_cmd_req_query_fw_cfg {
u32 rsvd[31];
};
+/* ASIC revisions */
+#define ASIC_REV_B0 0x10
+#define ASIC_REV_P2 0x11
+
struct be_cmd_resp_query_fw_cfg {
struct be_cmd_resp_hdr hdr;
u32 be_config_number;
@@ -1161,7 +1182,173 @@ struct be_cmd_resp_get_beacon_state {
u8 rsvd0[3];
} __packed;
+/* Flashrom related descriptors */
+#define MAX_FLASH_COMP 32
+
+#define OPTYPE_ISCSI_ACTIVE 0
+#define OPTYPE_REDBOOT 1
+#define OPTYPE_BIOS 2
+#define OPTYPE_PXE_BIOS 3
+#define OPTYPE_OFFSET_SPECIFIED 7
+#define OPTYPE_FCOE_BIOS 8
+#define OPTYPE_ISCSI_BACKUP 9
+#define OPTYPE_FCOE_FW_ACTIVE 10
+#define OPTYPE_FCOE_FW_BACKUP 11
+#define OPTYPE_NCSI_FW 13
+#define OPTYPE_REDBOOT_DIR 18
+#define OPTYPE_REDBOOT_CONFIG 19
+#define OPTYPE_SH_PHY_FW 21
+#define OPTYPE_FLASHISM_JUMPVECTOR 22
+#define OPTYPE_UFI_DIR 23
+#define OPTYPE_PHY_FW 99
+
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 262144 /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 262144 /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g2 1310720 /* Max firmware image size */
+
+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 262144
+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 524288 /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 1048576 /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g3 2097152 /* Max firmware image size */
+
+/* Offsets for components on Flash. */
+#define FLASH_REDBOOT_START_g2 0
+#define FLASH_FCoE_BIOS_START_g2 524288
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 1048576
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 2359296
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 3670016
+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 4980736
+#define FLASH_iSCSI_BIOS_START_g2 7340032
+#define FLASH_PXE_BIOS_START_g2 7864320
+
+#define FLASH_REDBOOT_START_g3 262144
+#define FLASH_PHY_FW_START_g3 1310720
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 2097152
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 4194304
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 6291456
+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 8388608
+#define FLASH_iSCSI_BIOS_START_g3 12582912
+#define FLASH_PXE_BIOS_START_g3 13107200
+#define FLASH_FCoE_BIOS_START_g3 13631488
+#define FLASH_NCSI_START_g3 15990784
+
+#define IMAGE_NCSI 16
+#define IMAGE_OPTION_ROM_PXE 32
+#define IMAGE_OPTION_ROM_FCoE 33
+#define IMAGE_OPTION_ROM_ISCSI 34
+#define IMAGE_FLASHISM_JUMPVECTOR 48
+#define IMAGE_FIRMWARE_iSCSI 160
+#define IMAGE_FIRMWARE_FCoE 162
+#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
+#define IMAGE_FIRMWARE_BACKUP_FCoE 178
+#define IMAGE_FIRMWARE_PHY 192
+#define IMAGE_REDBOOT_DIR 208
+#define IMAGE_REDBOOT_CONFIG 209
+#define IMAGE_UFI_DIR 210
+#define IMAGE_BOOT_CODE 224
+
+struct controller_id {
+ u32 vendor;
+ u32 device;
+ u32 subvendor;
+ u32 subdevice;
+};
+
+struct flash_comp {
+ unsigned long offset;
+ int optype;
+ int size;
+ int img_type;
+};
+
+struct image_hdr {
+ u32 imageid;
+ u32 imageoffset;
+ u32 imagelength;
+ u32 image_checksum;
+ u8 image_version[32];
+};
+
+struct flash_file_hdr_g2 {
+ u8 sign[32];
+ u32 cksum;
+ u32 antidote;
+ struct controller_id cont_id;
+ u32 file_len;
+ u32 chunk_num;
+ u32 total_chunks;
+ u32 num_imgs;
+ u8 build[24];
+};
+
+/* First letter of the build version of the image */
+#define BLD_STR_UFI_TYPE_BE2 '2'
+#define BLD_STR_UFI_TYPE_BE3 '3'
+#define BLD_STR_UFI_TYPE_SH '4'
+
+struct flash_file_hdr_g3 {
+ u8 sign[52];
+ u8 ufi_version[4];
+ u32 file_len;
+ u32 cksum;
+ u32 antidote;
+ u32 num_imgs;
+ u8 build[24];
+ u8 asic_type_rev;
+ u8 rsvd[31];
+};
+
+struct flash_section_hdr {
+ u32 format_rev;
+ u32 cksum;
+ u32 antidote;
+ u32 num_images;
+ u8 id_string[128];
+ u32 rsvd[4];
+} __packed;
+
+struct flash_section_hdr_g2 {
+ u32 format_rev;
+ u32 cksum;
+ u32 antidote;
+ u32 build_num;
+ u8 id_string[128];
+ u32 rsvd[8];
+} __packed;
+
+struct flash_section_entry {
+ u32 type;
+ u32 offset;
+ u32 pad_size;
+ u32 image_size;
+ u32 cksum;
+ u32 entry_point;
+ u16 optype;
+ u16 rsvd0;
+ u32 rsvd1;
+ u8 ver_data[32];
+} __packed;
+
+struct flash_section_info {
+ u8 cookie[32];
+ struct flash_section_hdr fsec_hdr;
+ struct flash_section_entry fsec_entry[32];
+} __packed;
+
+struct flash_section_info_g2 {
+ u8 cookie[32];
+ struct flash_section_hdr_g2 fsec_hdr;
+ struct flash_section_entry fsec_entry[32];
+} __packed;
+
/****************** Firmware Flash ******************/
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+#define FLASHROM_OPER_PHY_FLASH 9
+#define FLASHROM_OPER_PHY_SAVE 10
+
struct flashrom_params {
u32 op_code;
u32 op_type;
@@ -1366,6 +1553,7 @@ enum {
PHY_TYPE_QSFP,
PHY_TYPE_KR4_40GB,
PHY_TYPE_KR2_20GB,
+ PHY_TYPE_TN_8022,
PHY_TYPE_DISABLED = 255
};
@@ -1429,6 +1617,20 @@ struct be_cmd_req_set_qos {
};
/*********************** Controller Attributes ***********************/
+struct mgmt_hba_attribs {
+ u32 rsvd0[24];
+ u8 controller_model_number[32];
+ u32 rsvd1[79];
+ u8 rsvd2[3];
+ u8 phy_port;
+ u32 rsvd3[13];
+} __packed;
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u32 rsvd0[10];
+} __packed;
+
struct be_cmd_req_cntl_attribs {
struct be_cmd_req_hdr hdr;
};
@@ -2070,8 +2272,10 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
u8 page_num, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
+int be_cmd_query_sfp_info(struct be_adapter *adapter);
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_oper, u32 flash_opcode, u32 buf_size);
+ u32 flash_oper, u32 flash_opcode, u32 img_offset,
+ u32 buf_size);
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset,
const char *obj_name, u32 *data_written,
@@ -2081,7 +2285,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 *data_read, u32 *eof, u8 *addn_status);
int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- u16 optype, int offset);
+ u16 img_optype, u32 img_offset, u32 crc_offset);
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2136,7 +2340,7 @@ int lancer_initiate_dump(struct be_adapter *adapter);
int lancer_delete_dump(struct be_adapter *adapter);
bool dump_present(struct be_adapter *adapter);
int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
-int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 73a500ccbf69..4d2de4700769 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -193,8 +193,6 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_pkts)},
/* Number of skbs queued for trasmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
- /* Number of TX work request blocks DMAed to HW */
- {DRVSTAT_TX_INFO(tx_wrbs)},
/* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ.
*/
@@ -707,15 +705,17 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
if (ecmd->autoneg != adapter->phy.fc_autoneg)
return -EINVAL;
- adapter->tx_fc = ecmd->tx_pause;
- adapter->rx_fc = ecmd->rx_pause;
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
+ status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
+ ecmd->rx_pause);
+ if (status) {
dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
+ return be_cmd_status(status);
+ }
- return be_cmd_status(status);
+ adapter->tx_fc = ecmd->tx_pause;
+ adapter->rx_fc = ecmd->rx_pause;
+ return 0;
}
static int be_set_phys_id(struct net_device *netdev,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 295ee0835ba0..48840889db62 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -75,7 +75,7 @@
* atomically without having to arbitrate for the PCI Interrupt Disable bit
* with the OS.
*/
-#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
+#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK BIT(29) /* bit 29 */
/********* PCI Function Capability *********/
#define BE_FUNCTION_CAPS_RSS 0x2
@@ -171,94 +171,6 @@
#define RETRIEVE_FAT 0
#define QUERY_FAT 1
-/* Flashrom related descriptors */
-#define MAX_FLASH_COMP 32
-#define IMAGE_TYPE_FIRMWARE 160
-#define IMAGE_TYPE_BOOTCODE 224
-#define IMAGE_TYPE_OPTIONROM 32
-
-#define NUM_FLASHDIR_ENTRIES 32
-
-#define OPTYPE_ISCSI_ACTIVE 0
-#define OPTYPE_REDBOOT 1
-#define OPTYPE_BIOS 2
-#define OPTYPE_PXE_BIOS 3
-#define OPTYPE_FCOE_BIOS 8
-#define OPTYPE_ISCSI_BACKUP 9
-#define OPTYPE_FCOE_FW_ACTIVE 10
-#define OPTYPE_FCOE_FW_BACKUP 11
-#define OPTYPE_NCSI_FW 13
-#define OPTYPE_REDBOOT_DIR 18
-#define OPTYPE_REDBOOT_CONFIG 19
-#define OPTYPE_SH_PHY_FW 21
-#define OPTYPE_FLASHISM_JUMPVECTOR 22
-#define OPTYPE_UFI_DIR 23
-#define OPTYPE_PHY_FW 99
-#define TN_8022 13
-
-#define FLASHROM_OPER_PHY_FLASH 9
-#define FLASHROM_OPER_PHY_SAVE 10
-#define FLASHROM_OPER_FLASH 1
-#define FLASHROM_OPER_SAVE 2
-#define FLASHROM_OPER_REPORT 4
-
-#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
-#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
-#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
-#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
-
-#define FLASH_NCSI_MAGIC (0x16032009)
-#define FLASH_NCSI_DISABLED (0)
-#define FLASH_NCSI_ENABLED (1)
-
-#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
-
-/* Offsets for components on Flash. */
-#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
-#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
-#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
-#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
-#define FLASH_iSCSI_BIOS_START_g2 (7340032)
-#define FLASH_PXE_BIOS_START_g2 (7864320)
-#define FLASH_FCoE_BIOS_START_g2 (524288)
-#define FLASH_REDBOOT_START_g2 (0)
-
-#define FLASH_NCSI_START_g3 (15990784)
-#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
-#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
-#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
-#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
-#define FLASH_iSCSI_BIOS_START_g3 (12582912)
-#define FLASH_PXE_BIOS_START_g3 (13107200)
-#define FLASH_FCoE_BIOS_START_g3 (13631488)
-#define FLASH_REDBOOT_START_g3 (262144)
-#define FLASH_PHY_FW_START_g3 1310720
-
-#define IMAGE_NCSI 16
-#define IMAGE_OPTION_ROM_PXE 32
-#define IMAGE_OPTION_ROM_FCoE 33
-#define IMAGE_OPTION_ROM_ISCSI 34
-#define IMAGE_FLASHISM_JUMPVECTOR 48
-#define IMAGE_FLASH_ISM 49
-#define IMAGE_JUMP_VECTOR 50
-#define IMAGE_FIRMWARE_iSCSI 160
-#define IMAGE_FIRMWARE_COMP_iSCSI 161
-#define IMAGE_FIRMWARE_FCoE 162
-#define IMAGE_FIRMWARE_COMP_FCoE 163
-#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
-#define IMAGE_FIRMWARE_BACKUP_COMP_iSCSI 177
-#define IMAGE_FIRMWARE_BACKUP_FCoE 178
-#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
-#define IMAGE_FIRMWARE_PHY 192
-#define IMAGE_REDBOOT_DIR 208
-#define IMAGE_REDBOOT_CONFIG 209
-#define IMAGE_UFI_DIR 210
-#define IMAGE_BOOT_CODE 224
-
/************* Rx Packet Type Encoding **************/
#define BE_UNICAST_PACKET 0
#define BE_MULTICAST_PACKET 1
@@ -281,10 +193,10 @@ struct be_eq_entry {
/* TX Queue Descriptor */
#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
struct be_eth_wrb {
- u32 frag_pa_hi; /* dword 0 */
- u32 frag_pa_lo; /* dword 1 */
- u32 rsvd0; /* dword 2 */
- u32 frag_len; /* dword 3: bits 0 - 15 */
+ __le32 frag_pa_hi; /* dword 0 */
+ __le32 frag_pa_lo; /* dword 1 */
+ u32 rsvd0; /* dword 2 */
+ __le32 frag_len; /* dword 3: bits 0 - 15 */
} __packed;
/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
@@ -311,8 +223,13 @@ struct amap_eth_hdr_wrb {
u8 vlan_tag[16];
} __packed;
+#define TX_HDR_WRB_COMPL 1 /* word 2 */
+#define TX_HDR_WRB_EVT BIT(1) /* word 2 */
+#define TX_HDR_WRB_NUM_SHIFT 13 /* word 2: bits 13:17 */
+#define TX_HDR_WRB_NUM_MASK 0x1F /* word 2: bits 13:17 */
+
struct be_eth_hdr_wrb {
- u32 dw[4];
+ __le32 dw[4];
};
/********* Tx Compl Status Encoding *********/
@@ -435,138 +352,3 @@ struct amap_eth_rx_compl_v1 {
struct be_eth_rx_compl {
u32 dw[4];
};
-
-struct mgmt_hba_attribs {
- u8 flashrom_version_string[32];
- u8 manufacturer_name[32];
- u32 supported_modes;
- u32 rsvd0[3];
- u8 ncsi_ver_string[12];
- u32 default_extended_timeout;
- u8 controller_model_number[32];
- u8 controller_description[64];
- u8 controller_serial_number[32];
- u8 ip_version_string[32];
- u8 firmware_version_string[32];
- u8 bios_version_string[32];
- u8 redboot_version_string[32];
- u8 driver_version_string[32];
- u8 fw_on_flash_version_string[32];
- u32 functionalities_supported;
- u16 max_cdblength;
- u8 asic_revision;
- u8 generational_guid[16];
- u8 hba_port_count;
- u16 default_link_down_timeout;
- u8 iscsi_ver_min_max;
- u8 multifunction_device;
- u8 cache_valid;
- u8 hba_status;
- u8 max_domains_supported;
- u8 phy_port;
- u32 firmware_post_status;
- u32 hba_mtu[8];
- u32 rsvd1[4];
-};
-
-struct mgmt_controller_attrib {
- struct mgmt_hba_attribs hba_attribs;
- u16 pci_vendor_id;
- u16 pci_device_id;
- u16 pci_sub_vendor_id;
- u16 pci_sub_system_id;
- u8 pci_bus_number;
- u8 pci_device_number;
- u8 pci_function_number;
- u8 interface_type;
- u64 unique_identifier;
- u32 rsvd0[5];
-};
-
-struct controller_id {
- u32 vendor;
- u32 device;
- u32 subvendor;
- u32 subdevice;
-};
-
-struct flash_comp {
- unsigned long offset;
- int optype;
- int size;
- int img_type;
-};
-
-struct image_hdr {
- u32 imageid;
- u32 imageoffset;
- u32 imagelength;
- u32 image_checksum;
- u8 image_version[32];
-};
-struct flash_file_hdr_g2 {
- u8 sign[32];
- u32 cksum;
- u32 antidote;
- struct controller_id cont_id;
- u32 file_len;
- u32 chunk_num;
- u32 total_chunks;
- u32 num_imgs;
- u8 build[24];
-};
-
-struct flash_file_hdr_g3 {
- u8 sign[52];
- u8 ufi_version[4];
- u32 file_len;
- u32 cksum;
- u32 antidote;
- u32 num_imgs;
- u8 build[24];
- u8 asic_type_rev;
- u8 rsvd[31];
-};
-
-struct flash_section_hdr {
- u32 format_rev;
- u32 cksum;
- u32 antidote;
- u32 num_images;
- u8 id_string[128];
- u32 rsvd[4];
-} __packed;
-
-struct flash_section_hdr_g2 {
- u32 format_rev;
- u32 cksum;
- u32 antidote;
- u32 build_num;
- u8 id_string[128];
- u32 rsvd[8];
-} __packed;
-
-struct flash_section_entry {
- u32 type;
- u32 offset;
- u32 pad_size;
- u32 image_size;
- u32 cksum;
- u32 entry_point;
- u16 optype;
- u16 rsvd0;
- u32 rsvd1;
- u8 ver_data[32];
-} __packed;
-
-struct flash_section_info {
- u8 cookie[32];
- struct flash_section_hdr fsec_hdr;
- struct flash_section_entry fsec_entry[32];
-} __packed;
-
-struct flash_section_info_g2 {
- u8 cookie[32];
- struct flash_section_hdr_g2 fsec_hdr;
- struct flash_section_entry fsec_entry[32];
-} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 41a0a5498da7..0a816859aca5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -26,7 +26,6 @@
#include <net/vxlan.h>
MODULE_VERSION(DRV_VER);
-MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
@@ -662,48 +661,40 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
netif_carrier_off(netdev);
}
-static void be_tx_stats_update(struct be_tx_obj *txo,
- u32 wrb_cnt, u32 copied, u32 gso_segs,
- bool stopped)
+static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
{
struct be_tx_stats *stats = tx_stats(txo);
u64_stats_update_begin(&stats->sync);
stats->tx_reqs++;
- stats->tx_wrbs += wrb_cnt;
- stats->tx_bytes += copied;
- stats->tx_pkts += (gso_segs ? gso_segs : 1);
- if (stopped)
- stats->tx_stops++;
+ stats->tx_bytes += skb->len;
+ stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
u64_stats_update_end(&stats->sync);
}
-/* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- bool *dummy)
+/* Returns number of WRBs needed for the skb */
+static u32 skb_wrb_cnt(struct sk_buff *skb)
{
- int cnt = (skb->len > skb->data_len);
-
- cnt += skb_shinfo(skb)->nr_frags;
-
- /* to account for hdr wrb */
- cnt++;
- if (lancer_chip(adapter) || !(cnt & 1)) {
- *dummy = false;
- } else {
- /* add a dummy to make it an even num */
- cnt++;
- *dummy = true;
- }
- BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
- return cnt;
+ /* +1 for the header wrb */
+ return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
}
static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
{
- wrb->frag_pa_hi = upper_32_bits(addr);
- wrb->frag_pa_lo = addr & 0xFFFFFFFF;
- wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
+ wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
+ wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
+ wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
+ wrb->rsvd0 = 0;
+}
+
+/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
+ * to avoid the swap and shift/mask operations in wrb_fill().
+ */
+static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
+{
+ wrb->frag_pa_hi = 0;
+ wrb->frag_pa_lo = 0;
+ wrb->frag_len = 0;
wrb->rsvd0 = 0;
}
@@ -713,7 +704,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
u8 vlan_prio;
u16 vlan_tag;
- vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = skb_vlan_tag_get(skb);
vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
/* If vlan priority provided by OS is NOT in available bmap */
if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
@@ -764,52 +755,57 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
}
- /* To skip HW VLAN tagging: evt = 1, compl = 0 */
- SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
- SET_TX_WRB_HDR_BITS(event, hdr, 1);
SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
SET_TX_WRB_HDR_BITS(len, hdr, len);
+
+ /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
+ * When this hack is not needed, the evt bit is set while ringing DB
+ */
+ if (skip_hw_vlan)
+ SET_TX_WRB_HDR_BITS(event, hdr, 1);
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
bool unmap_single)
{
dma_addr_t dma;
+ u32 frag_len = le32_to_cpu(wrb->frag_len);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
- if (wrb->frag_len) {
+ dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
+ (u64)le32_to_cpu(wrb->frag_pa_lo);
+ if (frag_len) {
if (unmap_single)
- dma_unmap_single(dev, dma, wrb->frag_len,
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
else
- dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
+ dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
}
}
-static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
- bool skip_hw_vlan)
+/* Returns the number of WRBs used up by the skb */
+static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
+ struct sk_buff *skb, bool skip_hw_vlan)
{
- dma_addr_t busaddr;
- int i, copied = 0;
+ u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
struct device *dev = &adapter->pdev->dev;
- struct sk_buff *first_skb = skb;
- struct be_eth_wrb *wrb;
+ struct be_queue_info *txq = &txo->q;
struct be_eth_hdr_wrb *hdr;
bool map_single = false;
- u16 map_head;
+ struct be_eth_wrb *wrb;
+ dma_addr_t busaddr;
+ u16 head = txq->head;
hdr = queue_head_node(txq);
+ wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
+ be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
queue_head_inc(txq);
- map_head = txq->head;
if (skb->len > skb->data_len) {
int len = skb_headlen(skb);
@@ -820,7 +816,6 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
map_single = true;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, len);
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
queue_head_inc(txq);
copied += len;
}
@@ -834,35 +829,44 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
goto dma_err;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, skb_frag_size(frag));
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
queue_head_inc(txq);
copied += skb_frag_size(frag);
}
- if (dummy_wrb) {
- wrb = queue_head_node(txq);
- wrb_fill(wrb, 0, 0);
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
- queue_head_inc(txq);
- }
+ BUG_ON(txo->sent_skb_list[head]);
+ txo->sent_skb_list[head] = skb;
+ txo->last_req_hdr = head;
+ atomic_add(wrb_cnt, &txq->used);
+ txo->last_req_wrb_cnt = wrb_cnt;
+ txo->pend_wrb_cnt += wrb_cnt;
- wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
- be_dws_cpu_to_le(hdr, sizeof(*hdr));
+ be_tx_stats_update(txo, skb);
+ return wrb_cnt;
- return copied;
dma_err:
- txq->head = map_head;
+ /* Bring the queue back to the state it was in before this
+ * routine was invoked.
+ */
+ txq->head = head;
+ /* skip the first wrb (hdr); it's not mapped */
+ queue_head_inc(txq);
while (copied) {
wrb = queue_head_node(txq);
unmap_tx_frag(dev, wrb, map_single);
map_single = false;
- copied -= wrb->frag_len;
+ copied -= le32_to_cpu(wrb->frag_len);
adapter->drv_stats.dma_map_errors++;
queue_head_inc(txq);
}
+ txq->head = head;
return 0;
}
+static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
+{
+ return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+}
+
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
struct sk_buff *skb,
bool *skip_hw_vlan)
@@ -873,7 +877,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
if (unlikely(!skb))
return skb;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
@@ -932,7 +936,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
- return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
+ return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}
static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
@@ -955,7 +959,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
if (skb->len <= 60 &&
- (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
+ (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
@@ -973,7 +977,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
* Manually insert VLAN in pkt.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL &&
- vlan_tx_tag_present(skb)) {
+ skb_vlan_tag_present(skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
goto err;
@@ -1030,52 +1034,64 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
return skb;
}
+static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
+{
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
+
+ /* Mark the last request eventable if it hasn't been marked already */
+ if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
+ hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
+
+ /* compose a dummy wrb if there are odd set of wrbs to notify */
+ if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
+ wrb_fill_dummy(queue_head_node(txq));
+ queue_head_inc(txq);
+ atomic_inc(&txq->used);
+ txo->pend_wrb_cnt++;
+ hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
+ TX_HDR_WRB_NUM_SHIFT);
+ hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
+ TX_HDR_WRB_NUM_SHIFT);
+ }
+ be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
+ txo->pend_wrb_cnt = 0;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
+ bool skip_hw_vlan = false, flush = !skb->xmit_more;
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+ u16 q_idx = skb_get_queue_mapping(skb);
+ struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
struct be_queue_info *txq = &txo->q;
- bool dummy_wrb, stopped = false;
- u32 wrb_cnt = 0, copied = 0;
- bool skip_hw_vlan = false;
- u32 start = txq->head;
+ u16 wrb_cnt;
skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
- if (!skb) {
- tx_stats(txo)->tx_drv_drops++;
- return NETDEV_TX_OK;
- }
-
- wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
+ if (unlikely(!skb))
+ goto drop;
- copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
- skip_hw_vlan);
- if (copied) {
- int gso_segs = skb_shinfo(skb)->gso_segs;
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
+ if (unlikely(!wrb_cnt)) {
+ dev_kfree_skb_any(skb);
+ goto drop;
+ }
- /* record the sent skb in the sent_skb table */
- BUG_ON(txo->sent_skb_list[start]);
- txo->sent_skb_list[start] = skb;
+ if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
+ netif_stop_subqueue(netdev, q_idx);
+ tx_stats(txo)->tx_stops++;
+ }
- /* Ensure txq has space for the next skb; Else stop the queue
- * *BEFORE* ringing the tx doorbell, so that we serialze the
- * tx compls of the current transmit which'll wake up the queue
- */
- atomic_add(wrb_cnt, &txq->used);
- if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
- txq->len) {
- netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
- stopped = true;
- }
+ if (flush || __netif_subqueue_stopped(netdev, q_idx))
+ be_xmit_flush(adapter, txo);
- be_txq_notify(adapter, txo, wrb_cnt);
+ return NETDEV_TX_OK;
+drop:
+ tx_stats(txo)->tx_drv_drops++;
+ /* Flush the already enqueued tx requests */
+ if (flush && txo->pend_wrb_cnt)
+ be_xmit_flush(adapter, txo);
- be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
- } else {
- txq->head = start;
- tx_stats(txo)->tx_drv_drops++;
- dev_kfree_skb_any(skb);
- }
return NETDEV_TX_OK;
}
@@ -1096,6 +1112,43 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+static inline bool be_in_all_promisc(struct be_adapter *adapter)
+{
+ return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
+ BE_IF_FLAGS_ALL_PROMISCUOUS;
+}
+
+static int be_set_vlan_promisc(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
+ return 0;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
+ if (!status) {
+ dev_info(dev, "Enabled VLAN promiscuous mode\n");
+ adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ } else {
+ dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
+ }
+ return status;
+}
+
+static int be_clear_vlan_promisc(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
+ if (!status) {
+ dev_info(dev, "Disabling VLAN promiscuous mode\n");
+ adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+ return status;
+}
+
/*
* A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
* If the user configures more, place BE in vlan promiscuous mode.
@@ -1108,11 +1161,11 @@ static int be_vid_config(struct be_adapter *adapter)
int status = 0;
/* No need to further configure vids if in promiscuous mode */
- if (adapter->promiscuous)
+ if (be_in_all_promisc(adapter))
return 0;
if (adapter->vlans_added > be_max_vlans(adapter))
- goto set_vlan_promisc;
+ return be_set_vlan_promisc(adapter);
/* Construct VLAN Table to give to HW */
for_each_set_bit(i, adapter->vids, VLAN_N_VID)
@@ -1120,36 +1173,14 @@ static int be_vid_config(struct be_adapter *adapter)
status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
if (status) {
+ dev_err(dev, "Setting HW VLAN filtering failed\n");
/* Set to VLAN promisc mode as setting VLAN filter failed */
if (addl_status(status) ==
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
- goto set_vlan_promisc;
- dev_err(dev, "Setting HW VLAN filtering failed\n");
- } else {
- if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
- /* hw VLAN filtering re-enabled. */
- status = be_cmd_rx_filter(adapter,
- BE_FLAGS_VLAN_PROMISC, OFF);
- if (!status) {
- dev_info(dev,
- "Disabling VLAN Promiscuous mode\n");
- adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
- }
- }
+ return be_set_vlan_promisc(adapter);
+ } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ status = be_clear_vlan_promisc(adapter);
}
-
- return status;
-
-set_vlan_promisc:
- if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
- return 0;
-
- status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
- if (!status) {
- dev_info(dev, "Enable VLAN Promiscuous mode\n");
- adapter->flags |= BE_FLAGS_VLAN_PROMISC;
- } else
- dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
return status;
}
@@ -1191,79 +1222,99 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
return be_vid_config(adapter);
}
-static void be_clear_promisc(struct be_adapter *adapter)
+static void be_clear_all_promisc(struct be_adapter *adapter)
{
- adapter->promiscuous = false;
- adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
+ be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
+ adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+}
- be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+static void be_set_all_promisc(struct be_adapter *adapter)
+{
+ be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
+ adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
}
-static void be_set_rx_mode(struct net_device *netdev)
+static void be_set_mc_promisc(struct be_adapter *adapter)
{
- struct be_adapter *adapter = netdev_priv(netdev);
int status;
- if (netdev->flags & IFF_PROMISC) {
- be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
- adapter->promiscuous = true;
- goto done;
- }
+ if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
+ return;
- /* BE was previously in promiscuous mode; disable it */
- if (adapter->promiscuous) {
- be_clear_promisc(adapter);
- if (adapter->vlans_added)
- be_vid_config(adapter);
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
+ if (!status)
+ adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
+}
+
+static void be_set_mc_list(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
+ if (!status)
+ adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
+ else
+ be_set_mc_promisc(adapter);
+}
+
+static void be_set_uc_list(struct be_adapter *adapter)
+{
+ struct netdev_hw_addr *ha;
+ int i = 1; /* First slot is claimed by the Primary MAC */
+
+ for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+
+ if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
+ be_set_all_promisc(adapter);
+ return;
}
- /* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > be_max_mc(adapter))
- goto set_mcast_promisc;
+ netdev_for_each_uc_addr(ha, adapter->netdev) {
+ adapter->uc_macs++; /* First slot is for Primary MAC */
+ be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
+ &adapter->pmac_id[adapter->uc_macs], 0);
+ }
+}
- if (netdev_uc_count(netdev) != adapter->uc_macs) {
- struct netdev_hw_addr *ha;
- int i = 1; /* First slot is claimed by the Primary MAC */
+static void be_clear_uc_list(struct be_adapter *adapter)
+{
+ int i;
- for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
- }
+ for (i = 1; i < (adapter->uc_macs + 1); i++)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+ adapter->uc_macs = 0;
+}
- if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
- be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
- adapter->promiscuous = true;
- goto done;
- }
+static void be_set_rx_mode(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
- netdev_for_each_uc_addr(ha, adapter->netdev) {
- adapter->uc_macs++; /* First slot is for Primary MAC */
- be_cmd_pmac_add(adapter, (u8 *)ha->addr,
- adapter->if_handle,
- &adapter->pmac_id[adapter->uc_macs], 0);
- }
+ if (netdev->flags & IFF_PROMISC) {
+ be_set_all_promisc(adapter);
+ return;
}
- status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
- if (!status) {
- if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
- adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
- goto done;
+ /* Interface was previously in promiscuous mode; disable it */
+ if (be_in_all_promisc(adapter)) {
+ be_clear_all_promisc(adapter);
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
}
-set_mcast_promisc:
- if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+ /* Enable multicast promisc if num configured exceeds what we support */
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > be_max_mc(adapter)) {
+ be_set_mc_promisc(adapter);
return;
+ }
- /* Set to MCAST promisc mode if setting MULTICAST address fails
- * or if num configured exceeds what we support
- */
- status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
- if (!status)
- adapter->flags |= BE_FLAGS_MCAST_PROMISC;
-done:
- return;
+ if (netdev_uc_count(netdev) != adapter->uc_macs)
+ be_set_uc_list(adapter);
+
+ be_set_mc_list(adapter);
}
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1959,32 +2010,34 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
static u16 be_tx_compl_process(struct be_adapter *adapter,
struct be_tx_obj *txo, u16 last_index)
{
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
struct be_queue_info *txq = &txo->q;
+ u16 frag_index, num_wrbs = 0;
+ struct sk_buff *skb = NULL;
+ bool unmap_skb_hdr = false;
struct be_eth_wrb *wrb;
- struct sk_buff **sent_skbs = txo->sent_skb_list;
- struct sk_buff *sent_skb;
- u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
- bool unmap_skb_hdr = true;
-
- sent_skb = sent_skbs[txq->tail];
- BUG_ON(!sent_skb);
- sent_skbs[txq->tail] = NULL;
-
- /* skip header wrb */
- queue_tail_inc(txq);
do {
- cur_index = txq->tail;
+ if (sent_skbs[txq->tail]) {
+ /* Free skb from prev req */
+ if (skb)
+ dev_consume_skb_any(skb);
+ skb = sent_skbs[txq->tail];
+ sent_skbs[txq->tail] = NULL;
+ queue_tail_inc(txq); /* skip hdr wrb */
+ num_wrbs++;
+ unmap_skb_hdr = true;
+ }
wrb = queue_tail_node(txq);
+ frag_index = txq->tail;
unmap_tx_frag(&adapter->pdev->dev, wrb,
- (unmap_skb_hdr && skb_headlen(sent_skb)));
+ (unmap_skb_hdr && skb_headlen(skb)));
unmap_skb_hdr = false;
-
- num_wrbs++;
queue_tail_inc(txq);
- } while (cur_index != last_index);
+ num_wrbs++;
+ } while (frag_index != last_index);
+ dev_consume_skb_any(skb);
- dev_consume_skb_any(sent_skb);
return num_wrbs;
}
@@ -2068,12 +2121,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
static void be_tx_compl_clean(struct be_adapter *adapter)
{
+ u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
+ struct device *dev = &adapter->pdev->dev;
struct be_tx_obj *txo;
struct be_queue_info *txq;
struct be_eth_tx_compl *txcp;
- u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
- struct sk_buff *sent_skb;
- bool dummy_wrb;
int i, pending_txqs;
/* Stop polling for compls when HW has been silent for 10ms */
@@ -2095,7 +2147,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
atomic_sub(num_wrbs, &txq->used);
timeo = 0;
}
- if (atomic_read(&txq->used) == 0)
+ if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
pending_txqs--;
}
@@ -2105,21 +2157,29 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
mdelay(1);
} while (true);
+ /* Free enqueued TX that was never notified to HW */
for_all_tx_queues(adapter, txo, i) {
txq = &txo->q;
- if (atomic_read(&txq->used))
- dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
- atomic_read(&txq->used));
- /* free posted tx for which compls will never arrive */
- while (atomic_read(&txq->used)) {
- sent_skb = txo->sent_skb_list[txq->tail];
+ if (atomic_read(&txq->used)) {
+ dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
+ i, atomic_read(&txq->used));
+ notified_idx = txq->tail;
end_idx = txq->tail;
- num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
- &dummy_wrb);
- index_adv(&end_idx, num_wrbs - 1, txq->len);
+ index_adv(&end_idx, atomic_read(&txq->used) - 1,
+ txq->len);
+ /* Use the tx-compl process logic to handle requests
+ * that were not sent to the HW.
+ */
num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
atomic_sub(num_wrbs, &txq->used);
+ BUG_ON(atomic_read(&txq->used));
+ txo->pend_wrb_cnt = 0;
+ /* Since hw was never notified of these requests,
+ * reset TXQ indices
+ */
+ txq->head = notified_idx;
+ txq->tail = notified_idx;
}
}
}
@@ -2514,6 +2574,106 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
}
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+ bool status = true;
+
+ spin_lock(&eqo->lock); /* BH is already disabled */
+ if (eqo->state & BE_EQ_LOCKED) {
+ WARN_ON(eqo->state & BE_EQ_NAPI);
+ eqo->state |= BE_EQ_NAPI_YIELD;
+ status = false;
+ } else {
+ eqo->state = BE_EQ_NAPI;
+ }
+ spin_unlock(&eqo->lock);
+ return status;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+ spin_lock(&eqo->lock); /* BH is already disabled */
+
+ WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
+ eqo->state = BE_EQ_IDLE;
+
+ spin_unlock(&eqo->lock);
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+ bool status = true;
+
+ spin_lock_bh(&eqo->lock);
+ if (eqo->state & BE_EQ_LOCKED) {
+ eqo->state |= BE_EQ_POLL_YIELD;
+ status = false;
+ } else {
+ eqo->state |= BE_EQ_POLL;
+ }
+ spin_unlock_bh(&eqo->lock);
+ return status;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+ spin_lock_bh(&eqo->lock);
+
+ WARN_ON(eqo->state & (BE_EQ_NAPI));
+ eqo->state = BE_EQ_IDLE;
+
+ spin_unlock_bh(&eqo->lock);
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+ spin_lock_init(&eqo->lock);
+ eqo->state = BE_EQ_IDLE;
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+ local_bh_disable();
+
+ /* It's enough to just acquire napi lock on the eqo to stop
+ * be_busy_poll() from processing any queueus.
+ */
+ while (!be_lock_napi(eqo))
+ mdelay(1);
+
+ local_bh_enable();
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+ return true;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+ return false;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
int be_poll(struct napi_struct *napi, int budget)
{
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
@@ -2833,11 +2993,7 @@ static int be_close(struct net_device *netdev)
be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
-
- for (i = 1; i < (adapter->uc_macs + 1); i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
- adapter->uc_macs = 0;
+ be_clear_uc_list(adapter);
for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
@@ -3008,6 +3164,19 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
return status;
}
+static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
+{
+ u32 addr;
+
+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
+
+ mac[5] = (u8)(addr & 0xFF);
+ mac[4] = (u8)((addr >> 8) & 0xFF);
+ mac[3] = (u8)((addr >> 16) & 0xFF);
+ /* Use the OUI from the current MAC address */
+ memcpy(mac, adapter->netdev->dev_addr, 3);
+}
+
/*
* Generate a seed MAC address from the PF MAC Address using jhash.
* MAC Address for VFs are assigned incrementally starting from the seed.
@@ -3108,14 +3277,9 @@ static void be_cancel_worker(struct be_adapter *adapter)
static void be_mac_clear(struct be_adapter *adapter)
{
- int i;
-
if (adapter->pmac_id) {
- for (i = 0; i < (adapter->uc_macs + 1); i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
- adapter->uc_macs = 0;
-
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[0], 0);
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
}
@@ -3171,13 +3335,32 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
+static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
+ u32 cap_flags, u32 vf)
+{
+ u32 en_flags;
+ int status;
+
+ en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
+ BE_IF_FLAGS_RSS;
+
+ en_flags &= cap_flags;
+
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ if_handle, vf);
+
+ return status;
+}
+
static int be_vfs_if_create(struct be_adapter *adapter)
{
struct be_resources res = {0};
struct be_vf_cfg *vf_cfg;
- u32 cap_flags, en_flags, vf;
- int status = 0;
+ u32 cap_flags, vf;
+ int status;
+ /* If a FW profile exists, then cap_flags are updated */
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
@@ -3189,18 +3372,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
cap_flags = res.if_cap_flags;
}
- /* If a FW profile exists, then cap_flags are updated */
- en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST);
- status =
- be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ status = be_if_create(adapter, &vf_cfg->if_handle,
+ cap_flags, vf + 1);
if (status)
- goto err;
+ return status;
}
-err:
- return status;
+
+ return 0;
}
static int be_vf_setup_init(struct be_adapter *adapter)
@@ -3385,7 +3563,7 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->phy.link_speed = -1;
adapter->if_handle = -1;
adapter->be3_native = false;
- adapter->promiscuous = false;
+ adapter->if_flags = 0;
if (be_physfn(adapter))
adapter->cmd_privileges = MAX_PRIVILEGES;
else
@@ -3512,7 +3690,9 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
- if (be_physfn(adapter)) {
+ be_cmd_query_port_name(adapter);
+
+ if (be_physfn(adapter)) {
status = be_cmd_get_active_profile(adapter, &profile_id);
if (!status)
dev_info(&adapter->pdev->dev,
@@ -3638,10 +3818,20 @@ int be_update_queues(struct be_adapter *adapter)
return status;
}
+static inline int fw_major_num(const char *fw_ver)
+{
+ int fw_major = 0, i;
+
+ i = sscanf(fw_ver, "%d.", &fw_major);
+ if (i != 1)
+ return 0;
+
+ return fw_major;
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 tx_fc, rx_fc, en_flags;
int status;
be_setup_init(adapter);
@@ -3657,13 +3847,8 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
- en_flags |= BE_IF_FLAGS_RSS;
- en_flags = en_flags & be_if_cap_flags(adapter);
- status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
- &adapter->if_handle, 0);
+ status = be_if_create(adapter, &adapter->if_handle,
+ be_if_cap_flags(adapter), 0);
if (status)
goto err;
@@ -3696,11 +3881,14 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_get_acpi_wol_cap(adapter);
- be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
+ status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
+ adapter->rx_fc);
+ if (status)
+ be_cmd_get_flow_control(adapter, &adapter->tx_fc,
+ &adapter->rx_fc);
- if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
- be_cmd_set_flow_control(adapter, adapter->tx_fc,
- adapter->rx_fc);
+ dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
+ adapter->tx_fc, adapter->rx_fc);
if (be_physfn(adapter))
be_cmd_set_logical_link_config(adapter,
@@ -3739,7 +3927,7 @@ static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool phy_flashing_required(struct be_adapter *adapter)
{
- return (adapter->phy.phy_type == TN_8022 &&
+ return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
}
@@ -3790,7 +3978,8 @@ static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
int status;
u8 crc[4];
- status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
+ status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
+ img_size - 4);
if (status)
return status;
@@ -3806,13 +3995,13 @@ static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
}
static int be_flash(struct be_adapter *adapter, const u8 *img,
- struct be_dma_mem *flash_cmd, int optype, int img_size)
+ struct be_dma_mem *flash_cmd, int optype, int img_size,
+ u32 img_offset)
{
+ u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
struct be_cmd_write_flashrom *req = flash_cmd->va;
- u32 total_bytes, flash_op, num_bytes;
int status;
- total_bytes = img_size;
while (total_bytes) {
num_bytes = min_t(u32, 32*1024, total_bytes);
@@ -3833,12 +4022,15 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
memcpy(req->data_buf, img, num_bytes);
img += num_bytes;
status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
- flash_op, num_bytes);
+ flash_op, img_offset +
+ bytes_sent, num_bytes);
if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
optype == OPTYPE_PHY_FW)
break;
else if (status)
return status;
+
+ bytes_sent += num_bytes;
}
return 0;
}
@@ -3906,6 +4098,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
pflashcomp = gen2_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g2);
num_comp = ARRAY_SIZE(gen2_flash_types);
+ img_hdrs_size = 0;
}
/* Get flash section info*/
@@ -3950,7 +4143,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
return -1;
status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
- pflashcomp[i].size);
+ pflashcomp[i].size, 0);
if (status) {
dev_err(dev, "Flashing section type 0x%x failed\n",
pflashcomp[i].img_type);
@@ -4017,12 +4210,12 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
struct be_dma_mem *flash_cmd, int num_of_images)
{
int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
+ bool crc_match, old_fw_img, flash_offset_support = true;
struct device *dev = &adapter->pdev->dev;
struct flash_section_info *fsec = NULL;
u32 img_offset, img_size, img_type;
+ u16 img_optype, flash_optype;
int status, i, filehdr_size;
- bool crc_match, old_fw_img;
- u16 img_optype;
const u8 *p;
filehdr_size = sizeof(struct flash_file_hdr_g3);
@@ -4032,6 +4225,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
return -EINVAL;
}
+retry_flash:
for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
@@ -4041,6 +4235,12 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
if (img_optype == 0xFFFF)
continue;
+
+ if (flash_offset_support)
+ flash_optype = OPTYPE_OFFSET_SPECIFIED;
+ else
+ flash_optype = img_optype;
+
/* Don't bother verifying CRC if an old FW image is being
* flashed
*/
@@ -4049,16 +4249,26 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
status = be_check_flash_crc(adapter, fw->data, img_offset,
img_size, filehdr_size +
- img_hdrs_size, img_optype,
+ img_hdrs_size, flash_optype,
&crc_match);
- /* The current FW image on the card does not recognize the new
- * FLASH op_type. The FW download is partially complete.
- * Reboot the server now to enable FW image to recognize the
- * new FLASH op_type. To complete the remaining process,
- * download the same FW again after the reboot.
- */
if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
+ /* The current FW image on the card does not support
+ * OFFSET based flashing. Retry using older mechanism
+ * of OPTYPE based flashing
+ */
+ if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
+ flash_offset_support = false;
+ goto retry_flash;
+ }
+
+ /* The current FW image on the card does not recognize
+ * the new FLASH op_type. The FW download is partially
+ * complete. Reboot the server now to enable FW image
+ * to recognize the new FLASH op_type. To complete the
+ * remaining process, download the same FW again after
+ * the reboot.
+ */
dev_err(dev, "Flash incomplete. Reset the server\n");
dev_err(dev, "Download FW image again after reset\n");
return -EAGAIN;
@@ -4076,7 +4286,19 @@ flash:
if (p + img_size > fw->data + fw->size)
return -1;
- status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
+ status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
+ img_offset);
+
+ /* The current FW image on the card does not support OFFSET
+ * based flashing. Retry using older mechanism of OPTYPE based
+ * flashing
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
+ flash_optype == OPTYPE_OFFSET_SPECIFIED) {
+ flash_offset_support = false;
+ goto retry_flash;
+ }
+
/* For old FW images ignore ILLEGAL_FIELD error or errors on
* UFI_DIR region
*/
@@ -4179,98 +4401,105 @@ static int lancer_fw_download(struct be_adapter *adapter,
return 0;
}
-#define UFI_TYPE2 2
-#define UFI_TYPE3 3
-#define UFI_TYPE3R 10
-#define UFI_TYPE4 4
+#define BE2_UFI 2
+#define BE3_UFI 3
+#define BE3R_UFI 10
+#define SH_UFI 4
+#define SH_P2_UFI 11
+
static int be_get_ufi_type(struct be_adapter *adapter,
struct flash_file_hdr_g3 *fhdr)
{
- if (!fhdr)
- goto be_get_ufi_exit;
+ if (!fhdr) {
+ dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
+ return -1;
+ }
- if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
- return UFI_TYPE4;
- else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
- if (fhdr->asic_type_rev == 0x10)
- return UFI_TYPE3R;
- else
- return UFI_TYPE3;
- } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
- return UFI_TYPE2;
+ /* First letter of the build version is used to identify
+ * which chip this image file is meant for.
+ */
+ switch (fhdr->build[0]) {
+ case BLD_STR_UFI_TYPE_SH:
+ return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
+ SH_UFI;
+ case BLD_STR_UFI_TYPE_BE3:
+ return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
+ BE3_UFI;
+ case BLD_STR_UFI_TYPE_BE2:
+ return BE2_UFI;
+ default:
+ return -1;
+ }
+}
-be_get_ufi_exit:
- dev_err(&adapter->pdev->dev,
- "UFI and Interface are not compatible for flashing\n");
- return -1;
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
+ * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+ struct flash_file_hdr_g3 *fhdr)
+{
+ int ufi_type = be_get_ufi_type(adapter, fhdr);
+
+ switch (ufi_type) {
+ case SH_P2_UFI:
+ return skyhawk_chip(adapter);
+ case SH_UFI:
+ return (skyhawk_chip(adapter) &&
+ adapter->asic_rev < ASIC_REV_P2);
+ case BE3R_UFI:
+ return BE3_chip(adapter);
+ case BE3_UFI:
+ return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
+ case BE2_UFI:
+ return BE2_chip(adapter);
+ default:
+ return false;
+ }
}
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
{
+ struct device *dev = &adapter->pdev->dev;
struct flash_file_hdr_g3 *fhdr3;
- struct image_hdr *img_hdr_ptr = NULL;
+ struct image_hdr *img_hdr_ptr;
+ int status = 0, i, num_imgs;
struct be_dma_mem flash_cmd;
- const u8 *p;
- int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
- flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
- flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
- if (!flash_cmd.va) {
- status = -ENOMEM;
- goto be_fw_exit;
+ fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
+ if (!be_check_ufi_compatibility(adapter, fhdr3)) {
+ dev_err(dev, "Flash image is not compatible with adapter\n");
+ return -EINVAL;
}
- p = fw->data;
- fhdr3 = (struct flash_file_hdr_g3 *)p;
-
- ufi_type = be_get_ufi_type(adapter, fhdr3);
+ flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
+ if (!flash_cmd.va)
+ return -ENOMEM;
num_imgs = le32_to_cpu(fhdr3->num_imgs);
for (i = 0; i < num_imgs; i++) {
img_hdr_ptr = (struct image_hdr *)(fw->data +
(sizeof(struct flash_file_hdr_g3) +
i * sizeof(struct image_hdr)));
- if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
- switch (ufi_type) {
- case UFI_TYPE4:
- status = be_flash_skyhawk(adapter, fw,
- &flash_cmd, num_imgs);
- break;
- case UFI_TYPE3R:
- status = be_flash_BEx(adapter, fw, &flash_cmd,
- num_imgs);
- break;
- case UFI_TYPE3:
- /* Do not flash this ufi on BE3-R cards */
- if (adapter->asic_rev < 0x10)
- status = be_flash_BEx(adapter, fw,
- &flash_cmd,
- num_imgs);
- else {
- status = -EINVAL;
- dev_err(&adapter->pdev->dev,
- "Can't load BE3 UFI on BE3R\n");
- }
- }
- }
- }
-
- if (ufi_type == UFI_TYPE2)
- status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
- else if (ufi_type == -1)
- status = -EINVAL;
+ if (!BE2_chip(adapter) &&
+ le32_to_cpu(img_hdr_ptr->imageid) != 1)
+ continue;
- dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
- if (status) {
- dev_err(&adapter->pdev->dev, "Firmware load error\n");
- goto be_fw_exit;
+ if (skyhawk_chip(adapter))
+ status = be_flash_skyhawk(adapter, fw, &flash_cmd,
+ num_imgs);
+ else
+ status = be_flash_BEx(adapter, fw, &flash_cmd,
+ num_imgs);
}
- dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
+ dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
+ if (!status)
+ dev_info(dev, "Firmware flashed successfully\n");
-be_fw_exit:
return status;
}
@@ -4304,7 +4533,8 @@ fw_exit:
return status;
}
-static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags)
{
struct be_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -4383,8 +4613,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
* is expected to work across all types of IP tunnels once exported. Skyhawk
* supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
- * offloads in hw_enc_features only when a VxLAN port is added. Note this only
- * ensures that other tunnels work fine while VxLAN offloads are not enabled.
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
+ * those other tunnels are unexported on the fly through ndo_features_check().
*
* Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
* adds more than one port, disable offloads and don't re-enable them again
@@ -4463,7 +4694,41 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- return vxlan_features_check(skb, features);
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 l4_hdr = 0;
+
+ /* The code below restricts offload features for some tunneled packets.
+ * Offload features for normal (non tunnel) packets are unchanged.
+ */
+ if (!skb->encapsulation ||
+ !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
+ return features;
+
+ /* It's an encapsulated packet and VxLAN offloads are enabled. We
+ * should disable tunnel offload features if it's not a VxLAN packet,
+ * as tunnel offloads have been enabled only for VxLAN. This is done to
+ * allow other tunneled traffic like GRE work fine while VxLAN
+ * offloads are configured in Skyhawk-R.
+ */
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ if (l4_hdr != IPPROTO_UDP ||
+ skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+ return features;
}
#endif
@@ -4797,6 +5062,20 @@ static void be_func_recovery_task(struct work_struct *work)
msecs_to_jiffies(1000));
}
+static void be_log_sfp_info(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_query_sfp_info(adapter);
+ if (!status) {
+ dev_err(&adapter->pdev->dev,
+ "Unqualified SFP+ detected on %c from %s part no: %s",
+ adapter->port_name, adapter->phy.vendor_name,
+ adapter->phy.vendor_pn);
+ }
+ adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
+}
+
static void be_worker(struct work_struct *work)
{
struct be_adapter *adapter =
@@ -4835,6 +5114,9 @@ static void be_worker(struct work_struct *work)
be_eqd_update(adapter);
+ if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
+ be_log_sfp_info(adapter);
+
reschedule:
adapter->work_counter++;
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -4881,12 +5163,31 @@ static inline char *func_name(struct be_adapter *adapter)
return be_physfn(adapter) ? "PF" : "VF";
}
+static inline char *nic_name(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case OC_DEVICE_ID1:
+ return OC_NAME;
+ case OC_DEVICE_ID2:
+ return OC_NAME_BE;
+ case OC_DEVICE_ID3:
+ case OC_DEVICE_ID4:
+ return OC_NAME_LANCER;
+ case BE_DEVICE_ID2:
+ return BE3_NAME;
+ case OC_DEVICE_ID5:
+ case OC_DEVICE_ID6:
+ return OC_NAME_SH;
+ default:
+ return BE_NAME;
+ }
+}
+
static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
{
- int status = 0;
struct be_adapter *adapter;
struct net_device *netdev;
- char port_name;
+ int status = 0;
dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
@@ -4980,10 +5281,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
schedule_delayed_work(&adapter->func_recovery_work,
msecs_to_jiffies(1000));
- be_cmd_query_port_name(adapter, &port_name);
-
dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
- func_name(adapter), mc_name(adapter), port_name);
+ func_name(adapter), mc_name(adapter), adapter->port_name);
return 0;
@@ -5048,6 +5347,10 @@ static int be_resume(struct pci_dev *pdev)
if (status)
return status;
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ return status;
+
be_intr_set(adapter, true);
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 270308315d43..ba84c4a9ce32 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -69,7 +69,8 @@ config FSL_XGMAC_MDIO
select PHYLIB
select OF_MDIO
---help---
- This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
+ on the FMan mEMAC (which supports both Clauses 22 and 45)
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 469691ad4a1e..a86af8a7485d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,6 +16,7 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
@@ -356,6 +357,7 @@ struct bufdesc_ex {
#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
@@ -424,6 +426,8 @@ struct bufdesc_ex {
* (40ns * 6).
*/
#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
struct fec_enet_priv_tx_q {
int index;
@@ -511,6 +515,7 @@ struct fec_enet_private {
int irq[FEC_IRQ_NUM];
bool bufdesc_ex;
int pause_flag;
+ int wol_flag;
u32 quirks;
struct napi_struct napi;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5ebdf8dc8a31..9bb6220663b2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -187,6 +188,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
+/* FEC ECR bits definition */
+#define FEC_ECR_MAGICEN (1 << 2)
+#define FEC_ECR_SLEEP (1 << 3)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -195,6 +199,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_PAUSE_FLAG_AUTONEG 0x1
#define FEC_PAUSE_FLAG_ENABLE 0x2
+#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
+#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
+#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
#define COPYBREAK_DEFAULT 256
@@ -1089,7 +1096,9 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 val;
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -1103,17 +1112,28 @@ fec_stop(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
- writel(0, fep->hwp + FEC_ECNTRL);
+ if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
} else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
+ writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+
+ if (pdata && pdata->sleep_mode_enable)
+ pdata->sleep_mode_enable(true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
- if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC &&
+ !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
writel(2, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
@@ -1169,12 +1189,13 @@ static void
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
{
struct fec_enet_private *fep;
- struct bufdesc *bdp;
+ struct bufdesc *bdp, *bdp_t;
unsigned short status;
struct sk_buff *skb;
struct fec_enet_priv_tx_q *txq;
struct netdev_queue *nq;
int index = 0;
+ int i, bdnum;
int entries_free;
fep = netdev_priv(ndev);
@@ -1195,18 +1216,29 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
if (bdp == txq->cur_tx)
break;
- index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
-
+ bdp_t = bdp;
+ bdnum = 1;
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
skb = txq->tx_skbuff[index];
- txq->tx_skbuff[index] = NULL;
- if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- bdp->cbd_datlen, DMA_TO_DEVICE);
- bdp->cbd_bufaddr = 0;
- if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
- continue;
+ while (!skb) {
+ bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
+ skb = txq->tx_skbuff[index];
+ bdnum++;
}
+ if (skb_shinfo(skb)->nr_frags &&
+ (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
+ break;
+
+ for (i = 0; i < bdnum; i++) {
+ if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
+ if (i < bdnum - 1)
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+ }
+ txq->tx_skbuff[index] = NULL;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1937,7 +1969,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
int err = -ENXIO, i;
/*
- * The dual fec interfaces are not equivalent with enet-mac.
+ * The i.MX28 dual fec interfaces are not equal.
* Here are the differences:
*
* - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1952,7 +1984,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+ if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus;
@@ -2015,7 +2047,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
mii_cnt++;
/* save fec0 mii_bus */
- if (fep->quirks & FEC_QUIRK_ENET_MAC)
+ if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
fec0_mii_bus = fep->mii_bus;
return 0;
@@ -2427,6 +2459,44 @@ static int fec_enet_set_tunable(struct net_device *netdev,
return ret;
}
+static void
+fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = wol->wolopts = 0;
+ }
+}
+
+static int
+fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
+ return -EINVAL;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
+ if (device_may_wakeup(&ndev->dev)) {
+ fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
+ if (fep->irq[0] > 0)
+ enable_irq_wake(fep->irq[0]);
+ } else {
+ fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
+ if (fep->irq[0] > 0)
+ disable_irq_wake(fep->irq[0]);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
@@ -2445,6 +2515,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_ts_info = fec_enet_get_ts_info,
.get_tunable = fec_enet_get_tunable,
.set_tunable = fec_enet_set_tunable,
+ .get_wol = fec_enet_get_wol,
+ .set_wol = fec_enet_set_wol,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2524,12 +2596,9 @@ static void fec_enet_free_queue(struct net_device *ndev)
}
for (i = 0; i < fep->num_rx_queues; i++)
- if (fep->rx_queue[i])
- kfree(fep->rx_queue[i]);
-
+ kfree(fep->rx_queue[i]);
for (i = 0; i < fep->num_tx_queues; i++)
- if (fep->tx_queue[i])
- kfree(fep->tx_queue[i]);
+ kfree(fep->tx_queue[i]);
}
static int fec_enet_alloc_queue(struct net_device *ndev)
@@ -2705,6 +2774,9 @@ fec_enet_open(struct net_device *ndev)
phy_start(fep->phy_dev);
netif_tx_start_all_queues(ndev);
+ device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
+ FEC_WOL_FLAG_ENABLE);
+
return 0;
err_enet_mii_probe:
@@ -3129,6 +3201,7 @@ fec_probe(struct platform_device *pdev)
pdev->id_entry = of_id->data;
fep->quirks = pdev->id_entry->driver_data;
+ fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
@@ -3153,6 +3226,9 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ if (of_get_property(np, "fsl,magic-packet", NULL))
+ fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
@@ -3247,6 +3323,8 @@ fec_probe(struct platform_device *pdev)
0, pdev->name, ndev);
if (ret)
goto failed_irq;
+
+ fep->irq[i] = irq;
}
init_completion(&fep->mdio_done);
@@ -3263,6 +3341,9 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
+ device_init_wakeup(&ndev->dev, fep->wol_flag &
+ FEC_WOL_HAS_MAGIC_PACKET);
+
if (fep->bufdesc_ex && fep->ptp_clock)
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
@@ -3316,6 +3397,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
+ fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
phy_stop(fep->phy_dev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
@@ -3323,11 +3406,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
fec_enet_clk_enable(ndev, false);
- pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
}
rtnl_unlock();
- if (fep->reg_phy)
+ if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
regulator_disable(fep->reg_phy);
/* SOC supply clock to phy, when clock is disabled, phy link down
@@ -3343,9 +3427,11 @@ static int __maybe_unused fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
int ret;
+ int val;
- if (fep->reg_phy) {
+ if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
ret = regulator_enable(fep->reg_phy);
if (ret)
return ret;
@@ -3353,12 +3439,21 @@ static int __maybe_unused fec_resume(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
- pinctrl_pm_select_default_state(&fep->pdev->dev);
ret = fec_enet_clk_enable(ndev, true);
if (ret) {
rtnl_unlock();
goto failed_clk;
}
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
+ if (pdata && pdata->sleep_mode_enable)
+ pdata->sleep_mode_enable(false);
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
+ } else {
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ }
fec_restart(ndev);
netif_tx_lock_bh(ndev);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 992c8c3db553..1f9cf2345266 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -374,23 +374,9 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
unsigned long flags;
- u64 now;
- u32 counter;
spin_lock_irqsave(&fep->tmreg_lock, flags);
-
- now = timecounter_read(&fep->tc);
- now += delta;
-
- /* Get the timer value based on adjusted timestamp.
- * Update the counter with the masked value.
- */
- counter = now & fep->cc.mask;
- writel(counter, fep->hwp + FEC_ATIME);
-
- /* reset the timecounter */
- timecounter_init(&fep->tc, &fep->cc, now);
-
+ timecounter_adjtime(&fep->tc, delta);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 9e2bcb807923..a17628769a1f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -278,14 +278,20 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
fep->stats.collisions++;
/* unmap */
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- skb->len, DMA_TO_DEVICE);
+ if (fep->mapped_as_page[dirtyidx])
+ dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+ else
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
/*
* Free the sk buffer associated with this last transmit.
*/
- dev_kfree_skb(skb);
- fep->tx_skbuff[dirtyidx] = NULL;
+ if (skb) {
+ dev_kfree_skb(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+ }
/*
* Update pointer to next buffer descriptor to be transmitted.
@@ -299,7 +305,7 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
* Since we have freed up a buffer, the ring is no longer
* full.
*/
- if (!fep->tx_free++)
+ if (++fep->tx_free >= MAX_SKB_FRAGS)
do_wake = 1;
has_tx_work = 1;
}
@@ -509,6 +515,9 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
cbd_t __iomem *bdp;
int curidx;
u16 sc;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ skb_frag_t *frag;
+ int len;
#ifdef CONFIG_FS_ENET_MPC5121_FEC
if (((unsigned long)skb->data) & 0x3) {
@@ -530,7 +539,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
bdp = fep->cur_tx;
- if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
+ if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
netif_stop_queue(dev);
spin_unlock(&fep->tx_lock);
@@ -543,35 +552,42 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
curidx = bdp - fep->tx_bd_base;
- /*
- * Clear all of the status flags.
- */
- CBDC_SC(bdp, BD_ENET_TX_STATS);
-
- /*
- * Save skb pointer.
- */
- fep->tx_skbuff[curidx] = skb;
-
- fep->stats.tx_bytes += skb->len;
+ len = skb->len;
+ fep->stats.tx_bytes += len;
+ if (nr_frags)
+ len -= skb->data_len;
+ fep->tx_free -= nr_frags + 1;
/*
* Push the data cache so the CPM does not get stale memory data.
*/
CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
- skb->data, skb->len, DMA_TO_DEVICE));
- CBDW_DATLEN(bdp, skb->len);
+ skb->data, len, DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, len);
+
+ fep->mapped_as_page[curidx] = 0;
+ frag = skb_shinfo(skb)->frags;
+ while (nr_frags) {
+ CBDC_SC(bdp,
+ BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ CBDS_SC(bdp, BD_ENET_TX_READY);
+
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+ bdp++, curidx++;
+ else
+ bdp = fep->tx_bd_base, curidx = 0;
- /*
- * If this was the last BD in the ring, start at the beginning again.
- */
- if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
- fep->cur_tx++;
- else
- fep->cur_tx = fep->tx_bd_base;
+ len = skb_frag_size(frag);
+ CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
+ DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, len);
- if (!--fep->tx_free)
- netif_stop_queue(dev);
+ fep->tx_skbuff[curidx] = NULL;
+ fep->mapped_as_page[curidx] = 1;
+
+ frag++;
+ nr_frags--;
+ }
/* Trigger transmission start */
sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
@@ -582,8 +598,22 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
* yay for hw reuse :) */
if (skb->len <= 60)
sc |= BD_ENET_TX_PAD;
+ CBDC_SC(bdp, BD_ENET_TX_STATS);
CBDS_SC(bdp, sc);
+ /* Save skb pointer. */
+ fep->tx_skbuff[curidx] = skb;
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
+
+ if (fep->tx_free < MAX_SKB_FRAGS)
+ netif_stop_queue(dev);
+
skb_tx_timestamp(skb);
(*fep->ops->tx_kickstart)(dev);
@@ -917,7 +947,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
}
fpi->rx_ring = 32;
- fpi->tx_ring = 32;
+ fpi->tx_ring = 64;
fpi->rx_copybreak = 240;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -955,7 +985,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
privsize = sizeof(*fep) +
sizeof(struct sk_buff **) *
- (fpi->rx_ring + fpi->tx_ring);
+ (fpi->rx_ring + fpi->tx_ring) +
+ sizeof(char) * fpi->tx_ring;
ndev = alloc_etherdev(privsize);
if (!ndev) {
@@ -978,6 +1009,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
fep->rx_skbuff = (struct sk_buff **)&fep[1];
fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
+ fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
+ fpi->tx_ring);
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
@@ -1007,6 +1040,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
netif_carrier_off(ndev);
+ ndev->features |= NETIF_F_SG;
+
ret = register_netdev(ndev);
if (ret)
goto out_free_bd;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 3a4b49e0e717..f184d8f952e2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -134,6 +134,7 @@ struct fs_enet_private {
void __iomem *ring_base;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
+ char *mapped_as_page;
cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */
cbd_t __iomem *tx_bd_base;
cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 5645342f5b28..43df78882e48 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -116,7 +116,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
-struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr);
+static struct sk_buff *gfar_new_skb(struct net_device *dev,
+ dma_addr_t *bufaddr);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -176,7 +177,7 @@ static int gfar_init_bds(struct net_device *ndev)
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
- u32 *rfbptr;
+ u32 __iomem *rfbptr;
int i, j;
dma_addr_t bufaddr;
@@ -554,7 +555,7 @@ static void gfar_ints_enable(struct gfar_private *priv)
}
}
-void lock_tx_qs(struct gfar_private *priv)
+static void lock_tx_qs(struct gfar_private *priv)
{
int i;
@@ -562,7 +563,7 @@ void lock_tx_qs(struct gfar_private *priv)
spin_lock(&priv->tx_queue[i]->txlock);
}
-void unlock_tx_qs(struct gfar_private *priv)
+static void unlock_tx_qs(struct gfar_private *priv)
{
int i;
@@ -763,7 +764,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
u32 *tx_queues, *rx_queues;
unsigned short mode, poll_mode;
- if (!np || !of_device_is_available(np))
+ if (!np)
return -ENODEV;
if (of_device_is_compatible(np, "fsl,etsec2")) {
@@ -2169,7 +2170,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
{
fcb->flags |= TXFCB_VLN;
- fcb->vlctl = vlan_tx_tag_get(skb);
+ fcb->vlctl = skb_vlan_tag_get(skb);
}
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2229,7 +2230,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
regs = tx_queue->grp->regs;
do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
- do_vlan = vlan_tx_tag_present(skb);
+ do_vlan = skb_vlan_tag_present(skb);
do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en;
@@ -2671,7 +2672,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
return skb;
}
-struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index b581b8823a2a..9e1802400c23 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1039,7 +1039,7 @@ struct gfar_priv_rx_q {
/* RX Coalescing values */
unsigned char rxcoalescing;
unsigned long rxic;
- u32 *rfbptr;
+ u32 __iomem *rfbptr;
};
enum gfar_irqinfo_id {
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3e1a9c1a67a9..fda12fb32ec7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
return -EBUSY;
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 6e7db66069aa..3a83bc2c613c 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -32,18 +32,19 @@ struct tgec_mdio_controller {
__be32 mdio_addr; /* MDIO address */
} __packed;
+#define MDIO_STAT_ENC BIT(6)
#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
-#define MDIO_STAT_BSY (1 << 0)
-#define MDIO_STAT_RD_ER (1 << 1)
+#define MDIO_STAT_BSY BIT(0)
+#define MDIO_STAT_RD_ER BIT(1)
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
-#define MDIO_CTL_PRE_DIS (1 << 10)
-#define MDIO_CTL_SCAN_EN (1 << 11)
-#define MDIO_CTL_POST_INC (1 << 14)
-#define MDIO_CTL_READ (1 << 15)
+#define MDIO_CTL_PRE_DIS BIT(10)
+#define MDIO_CTL_SCAN_EN BIT(11)
+#define MDIO_CTL_POST_INC BIT(14)
+#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) (x & 0xffff)
-#define MDIO_DATA_BSY (1 << 31)
+#define MDIO_DATA_BSY BIT(31)
/*
* Wait until the MDIO bus is free
@@ -51,12 +52,16 @@ struct tgec_mdio_controller {
static int xgmac_wait_until_free(struct device *dev,
struct tgec_mdio_controller __iomem *regs)
{
- uint32_t status;
+ unsigned int timeout;
/* Wait till the bus is free */
- status = spin_event_timeout(
- !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
- if (!status) {
+ timeout = TIMEOUT;
+ while ((ioread32be(&regs->mdio_stat) & MDIO_STAT_BSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout) {
dev_err(dev, "timeout waiting for bus to be free\n");
return -ETIMEDOUT;
}
@@ -70,12 +75,16 @@ static int xgmac_wait_until_free(struct device *dev,
static int xgmac_wait_until_done(struct device *dev,
struct tgec_mdio_controller __iomem *regs)
{
- uint32_t status;
+ unsigned int timeout;
/* Wait till the MDIO write is complete */
- status = spin_event_timeout(
- !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
- if (!status) {
+ timeout = TIMEOUT;
+ while ((ioread32be(&regs->mdio_data) & MDIO_DATA_BSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout) {
dev_err(dev, "timeout waiting for operation to complete\n");
return -ETIMEDOUT;
}
@@ -91,29 +100,42 @@ static int xgmac_wait_until_done(struct device *dev,
static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
- uint16_t dev_addr = regnum >> 16;
+ uint16_t dev_addr;
+ u32 mdio_ctl, mdio_stat;
int ret;
- /* Setup the MII Mgmt clock speed */
- out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+ mdio_stat = ioread32be(&regs->mdio_stat);
+ if (regnum & MII_ADDR_C45) {
+ /* Clause 45 (ie 10G) */
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_stat |= MDIO_STAT_ENC;
+ } else {
+ /* Clause 22 (ie 1G) */
+ dev_addr = regnum & 0x1f;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ }
+
+ iowrite32be(mdio_stat, &regs->mdio_stat);
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Set the port and dev addr */
- out_be32(&regs->mdio_ctl,
- MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ iowrite32be(mdio_ctl, &regs->mdio_ctl);
/* Set the register address */
- out_be32(&regs->mdio_addr, regnum & 0xffff);
+ if (regnum & MII_ADDR_C45) {
+ iowrite32be(regnum & 0xffff, &regs->mdio_addr);
- ret = xgmac_wait_until_free(&bus->dev, regs);
- if (ret)
- return ret;
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+ if (ret)
+ return ret;
+ }
/* Write the value to the register */
- out_be32(&regs->mdio_data, MDIO_DATA(value));
+ iowrite32be(MDIO_DATA(value), &regs->mdio_data);
ret = xgmac_wait_until_done(&bus->dev, regs);
if (ret)
@@ -130,13 +152,22 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
- uint16_t dev_addr = regnum >> 16;
+ uint16_t dev_addr;
+ uint32_t mdio_stat;
uint32_t mdio_ctl;
uint16_t value;
int ret;
- /* Setup the MII Mgmt clock speed */
- out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+ mdio_stat = ioread32be(&regs->mdio_stat);
+ if (regnum & MII_ADDR_C45) {
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_stat |= MDIO_STAT_ENC;
+ } else {
+ dev_addr = regnum & 0x1f;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ }
+
+ iowrite32be(mdio_stat, &regs->mdio_stat);
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
@@ -144,54 +175,38 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* Set the Port and Device Addrs */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- out_be32(&regs->mdio_ctl, mdio_ctl);
+ iowrite32be(mdio_ctl, &regs->mdio_ctl);
/* Set the register address */
- out_be32(&regs->mdio_addr, regnum & 0xffff);
+ if (regnum & MII_ADDR_C45) {
+ iowrite32be(regnum & 0xffff, &regs->mdio_addr);
- ret = xgmac_wait_until_free(&bus->dev, regs);
- if (ret)
- return ret;
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+ if (ret)
+ return ret;
+ }
/* Initiate the read */
- out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+ iowrite32be(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl);
ret = xgmac_wait_until_done(&bus->dev, regs);
if (ret)
return ret;
/* Return all Fs if nothing was there */
- if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+ if (ioread32be(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = in_be32(&regs->mdio_data) & 0xffff;
+ value = ioread32be(&regs->mdio_data) & 0xffff;
dev_dbg(&bus->dev, "read %04x\n", value);
return value;
}
-/* Reset the MIIM registers, and wait for the bus to free */
-static int xgmac_mdio_reset(struct mii_bus *bus)
-{
- struct tgec_mdio_controller __iomem *regs = bus->priv;
- int ret;
-
- mutex_lock(&bus->mdio_lock);
-
- /* Setup the MII Mgmt clock speed */
- out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
-
- ret = xgmac_wait_until_free(&bus->dev, regs);
-
- mutex_unlock(&bus->mdio_lock);
-
- return ret;
-}
-
static int xgmac_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -205,15 +220,13 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return ret;
}
- bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+ bus = mdiobus_alloc();
if (!bus)
return -ENOMEM;
bus->name = "Freescale XGMAC MDIO Bus";
bus->read = xgmac_mdio_read;
bus->write = xgmac_mdio_write;
- bus->reset = xgmac_mdio_reset;
- bus->irq = bus->priv;
bus->parent = &pdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
@@ -258,6 +271,9 @@ static struct of_device_id xgmac_mdio_match[] = {
{
.compatible = "fsl,fman-xmdio",
},
+ {
+ .compatible = "fsl,fman-memac-mdio",
+ },
{},
};
MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index e9421731b05e..a54d89791311 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -24,4 +24,13 @@ config HIX5HD2_GMAC
help
This selects the hix5hd2 mac family network device.
+config HIP04_ETH
+ tristate "HISILICON P04 Ethernet support"
+ select PHYLIB
+ select MARVELL_PHY
+ select MFD_SYSCON
+ ---help---
+ If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
+ want to use the internal ethernet then you should answer Y to this.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 9175e84622d4..6c14540a4dc5 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
+obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
new file mode 100644
index 000000000000..b72d238695d7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -0,0 +1,971 @@
+
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define PPE_CFG_RX_ADDR 0x100
+#define PPE_CFG_POOL_GRP 0x300
+#define PPE_CFG_RX_BUF_SIZE 0x400
+#define PPE_CFG_RX_FIFO_SIZE 0x500
+#define PPE_CURR_BUF_CNT 0xa200
+
+#define GE_DUPLEX_TYPE 0x08
+#define GE_MAX_FRM_SIZE_REG 0x3c
+#define GE_PORT_MODE 0x40
+#define GE_PORT_EN 0x44
+#define GE_SHORT_RUNTS_THR_REG 0x50
+#define GE_TX_LOCAL_PAGE_REG 0x5c
+#define GE_TRANSMIT_CONTROL_REG 0x60
+#define GE_CF_CRC_STRIP_REG 0x1b0
+#define GE_MODE_CHANGE_REG 0x1b4
+#define GE_RECV_CONTROL_REG 0x1e0
+#define GE_STATION_MAC_ADDRESS 0x210
+#define PPE_CFG_CPU_ADD_ADDR 0x580
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
+#define PPE_CFG_BUS_CTRL_REG 0x424
+#define PPE_CFG_RX_CTRL_REG 0x428
+#define PPE_CFG_RX_PKT_MODE_REG 0x438
+#define PPE_CFG_QOS_VMID_GEN 0x500
+#define PPE_CFG_RX_PKT_INT 0x538
+#define PPE_INTEN 0x600
+#define PPE_INTSTS 0x608
+#define PPE_RINT 0x604
+#define PPE_CFG_STS_MODE 0x700
+#define PPE_HIS_RX_PKT_CNT 0x804
+
+/* REG_INTERRUPT */
+#define RCV_INT BIT(10)
+#define RCV_NOBUF BIT(8)
+#define RCV_DROP BIT(7)
+#define TX_DROP BIT(6)
+#define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
+#define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
+
+/* TX descriptor config */
+#define TX_FREE_MEM BIT(0)
+#define TX_READ_ALLOC_L3 BIT(1)
+#define TX_FINISH_CACHE_INV BIT(2)
+#define TX_CLEAR_WB BIT(4)
+#define TX_L3_CHECKSUM BIT(5)
+#define TX_LOOP_BACK BIT(11)
+
+/* RX error */
+#define RX_PKT_DROP BIT(0)
+#define RX_L2_ERR BIT(1)
+#define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
+
+#define SGMII_SPEED_1000 0x08
+#define SGMII_SPEED_100 0x07
+#define SGMII_SPEED_10 0x06
+#define MII_SPEED_100 0x01
+#define MII_SPEED_10 0x00
+
+#define GE_DUPLEX_FULL BIT(0)
+#define GE_DUPLEX_HALF 0x00
+#define GE_MODE_CHANGE_EN BIT(0)
+
+#define GE_TX_AUTO_NEG BIT(5)
+#define GE_TX_ADD_CRC BIT(6)
+#define GE_TX_SHORT_PAD_THROUGH BIT(7)
+
+#define GE_RX_STRIP_CRC BIT(0)
+#define GE_RX_STRIP_PAD BIT(3)
+#define GE_RX_PAD_EN BIT(4)
+
+#define GE_AUTO_NEG_CTL BIT(0)
+
+#define GE_RX_INT_THRESHOLD BIT(6)
+#define GE_RX_TIMEOUT 0x04
+
+#define GE_RX_PORT_EN BIT(1)
+#define GE_TX_PORT_EN BIT(2)
+
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
+
+#define PPE_CFG_RX_PKT_ALIGN BIT(18)
+#define PPE_CFG_QOS_VMID_MODE BIT(14)
+#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
+
+#define PPE_CFG_RX_FIFO_FSFU BIT(11)
+#define PPE_CFG_RX_DEPTH_SHIFT 16
+#define PPE_CFG_RX_START_SHIFT 0
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
+
+#define PPE_CFG_BUS_LOCAL_REL BIT(14)
+#define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
+
+#define RX_DESC_NUM 128
+#define TX_DESC_NUM 256
+#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
+#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
+
+#define GMAC_PPE_RX_PKT_MAX_LEN 379
+#define GMAC_MAX_PKT_LEN 1516
+#define GMAC_MIN_PKT_LEN 31
+#define RX_BUF_SIZE 1600
+#define RESET_TIMEOUT 1000
+#define TX_TIMEOUT (6 * HZ)
+
+#define DRV_NAME "hip04-ether"
+#define DRV_VERSION "v1.0"
+
+#define HIP04_MAX_TX_COALESCE_USECS 200
+#define HIP04_MIN_TX_COALESCE_USECS 100
+#define HIP04_MAX_TX_COALESCE_FRAMES 200
+#define HIP04_MIN_TX_COALESCE_FRAMES 100
+
+struct tx_desc {
+ u32 send_addr;
+ u32 send_size;
+ u32 next_addr;
+ u32 cfg;
+ u32 wb_addr;
+} __aligned(64);
+
+struct rx_desc {
+ u16 reserved_16;
+ u16 pkt_len;
+ u32 reserve1[3];
+ u32 pkt_err;
+ u32 reserve2[4];
+};
+
+struct hip04_priv {
+ void __iomem *base;
+ int phy_mode;
+ int chan;
+ unsigned int port;
+ unsigned int speed;
+ unsigned int duplex;
+ unsigned int reg_inten;
+
+ struct napi_struct napi;
+ struct net_device *ndev;
+
+ struct tx_desc *tx_desc;
+ dma_addr_t tx_desc_dma;
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ dma_addr_t tx_phys[TX_DESC_NUM];
+ unsigned int tx_head;
+
+ int tx_coalesce_frames;
+ int tx_coalesce_usecs;
+ struct hrtimer tx_coalesce_timer;
+
+ unsigned char *rx_buf[RX_DESC_NUM];
+ dma_addr_t rx_phys[RX_DESC_NUM];
+ unsigned int rx_head;
+ unsigned int rx_buf_size;
+
+ struct device_node *phy_node;
+ struct phy_device *phy;
+ struct regmap *map;
+ struct work_struct tx_timeout_task;
+
+ /* written only by tx cleanup */
+ unsigned int tx_tail ____cacheline_aligned_in_smp;
+};
+
+static inline unsigned int tx_count(unsigned int head, unsigned int tail)
+{
+ return (head - tail) % (TX_DESC_NUM - 1);
+}
+
+static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ if (speed == SPEED_1000)
+ val = SGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = SGMII_SPEED_100;
+ else
+ val = SGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(ndev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+ writel_relaxed(val, priv->base + GE_PORT_MODE);
+
+ val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
+ writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
+
+ val = GE_MODE_CHANGE_EN;
+ writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
+}
+
+static void hip04_reset_ppe(struct hip04_priv *priv)
+{
+ u32 val, tmp, timeout = 0;
+
+ do {
+ regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
+ regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
+ if (timeout++ > RESET_TIMEOUT)
+ break;
+ } while (val & 0xfff);
+}
+
+static void hip04_config_fifo(struct hip04_priv *priv)
+{
+ u32 val;
+
+ val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
+ val |= PPE_CFG_STS_RX_PKT_CNT_RC;
+ writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
+
+ val = BIT(priv->port);
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
+
+ val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
+ val |= PPE_CFG_QOS_VMID_MODE;
+ writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
+
+ val = RX_BUF_SIZE;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
+
+ val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
+ val |= PPE_CFG_RX_FIFO_FSFU;
+ val |= priv->chan << PPE_CFG_RX_START_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
+
+ val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
+
+ val = PPE_CFG_RX_PKT_ALIGN;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
+
+ val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
+ writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
+
+ val = GMAC_PPE_RX_PKT_MAX_LEN;
+ writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
+
+ val = GMAC_MAX_PKT_LEN;
+ writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
+
+ val = GMAC_MIN_PKT_LEN;
+ writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
+
+ val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
+ val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
+ writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
+
+ val = GE_RX_STRIP_CRC;
+ writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
+
+ val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
+ val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
+ writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
+
+ val = GE_AUTO_NEG_CTL;
+ writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
+}
+
+static void hip04_mac_enable(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* enable tx & rx */
+ val = readl_relaxed(priv->base + GE_PORT_EN);
+ val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
+ writel_relaxed(val, priv->base + GE_PORT_EN);
+
+ /* clear rx int */
+ val = RCV_INT;
+ writel_relaxed(val, priv->base + PPE_RINT);
+
+ /* config recv int */
+ val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
+
+ /* enable interrupt */
+ priv->reg_inten = DEF_INT_MASK;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+}
+
+static void hip04_mac_disable(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* disable int */
+ priv->reg_inten &= ~(DEF_INT_MASK);
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+
+ /* disable tx & rx */
+ val = readl_relaxed(priv->base + GE_PORT_EN);
+ val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
+ writel_relaxed(val, priv->base + GE_PORT_EN);
+}
+
+static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+ writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
+}
+
+static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
+}
+
+static u32 hip04_recv_cnt(struct hip04_priv *priv)
+{
+ return readl(priv->base + PPE_HIS_RX_PKT_CNT);
+}
+
+static void hip04_update_mac_address(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+
+ writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
+ priv->base + GE_STATION_MAC_ADDRESS);
+ writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
+ priv->base + GE_STATION_MAC_ADDRESS + 4);
+}
+
+static int hip04_set_mac_address(struct net_device *ndev, void *addr)
+{
+ eth_mac_addr(ndev, addr);
+ hip04_update_mac_address(ndev);
+ return 0;
+}
+
+static int hip04_tx_reclaim(struct net_device *ndev, bool force)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ unsigned tx_tail = priv->tx_tail;
+ struct tx_desc *desc;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int count;
+
+ smp_rmb();
+ count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
+ if (count == 0)
+ goto out;
+
+ while (count) {
+ desc = &priv->tx_desc[tx_tail];
+ if (desc->send_addr != 0) {
+ if (force)
+ desc->send_addr = 0;
+ else
+ break;
+ }
+
+ if (priv->tx_phys[tx_tail]) {
+ dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+ priv->tx_skb[tx_tail]->len,
+ DMA_TO_DEVICE);
+ priv->tx_phys[tx_tail] = 0;
+ }
+ pkts_compl++;
+ bytes_compl += priv->tx_skb[tx_tail]->len;
+ dev_kfree_skb(priv->tx_skb[tx_tail]);
+ priv->tx_skb[tx_tail] = NULL;
+ tx_tail = TX_NEXT(tx_tail);
+ count--;
+ }
+
+ priv->tx_tail = tx_tail;
+ smp_wmb(); /* Ensure tx_tail visible to xmit */
+
+out:
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
+ netif_wake_queue(ndev);
+
+ return count;
+}
+
+static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int tx_head = priv->tx_head, count;
+ struct tx_desc *desc = &priv->tx_desc[tx_head];
+ dma_addr_t phys;
+
+ smp_rmb();
+ count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
+ if (count == (TX_DESC_NUM - 1)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, phys)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ priv->tx_skb[tx_head] = skb;
+ priv->tx_phys[tx_head] = phys;
+ desc->send_addr = cpu_to_be32(phys);
+ desc->send_size = cpu_to_be32(skb->len);
+ desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+ phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
+ desc->wb_addr = cpu_to_be32(phys);
+ skb_tx_timestamp(skb);
+
+ hip04_set_xmit_desc(priv, phys);
+ priv->tx_head = TX_NEXT(tx_head);
+ count++;
+ netdev_sent_queue(ndev, skb->len);
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+
+ /* Ensure tx_head update visible to tx reclaim */
+ smp_wmb();
+
+ /* queue is getting full, better start cleaning up now */
+ if (count >= priv->tx_coalesce_frames) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt and timer */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT,
+ priv->base + PPE_INTEN);
+ hrtimer_cancel(&priv->tx_coalesce_timer);
+ __napi_schedule(&priv->napi);
+ }
+ } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
+ /* cleanup not pending yet, start a new timer */
+ hrtimer_start_expires(&priv->tx_coalesce_timer,
+ HRTIMER_MODE_REL);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int hip04_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int cnt = hip04_recv_cnt(priv);
+ struct rx_desc *desc;
+ struct sk_buff *skb;
+ unsigned char *buf;
+ bool last = false;
+ dma_addr_t phys;
+ int rx = 0;
+ int tx_remaining;
+ u16 len;
+ u32 err;
+
+ while (cnt && !last) {
+ buf = priv->rx_buf[priv->rx_head];
+ skb = build_skb(buf, priv->rx_buf_size);
+ if (unlikely(!skb))
+ net_dbg_ratelimited("build_skb failed\n");
+
+ dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[priv->rx_head] = 0;
+
+ desc = (struct rx_desc *)skb->data;
+ len = be16_to_cpu(desc->pkt_len);
+ err = be32_to_cpu(desc->pkt_err);
+
+ if (0 == len) {
+ dev_kfree_skb_any(skb);
+ last = true;
+ } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
+ dev_kfree_skb_any(skb);
+ stats->rx_dropped++;
+ stats->rx_errors++;
+ } else {
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ rx++;
+ }
+
+ buf = netdev_alloc_frag(priv->rx_buf_size);
+ if (!buf)
+ goto done;
+ phys = dma_map_single(&ndev->dev, buf,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, phys))
+ goto done;
+ priv->rx_buf[priv->rx_head] = buf;
+ priv->rx_phys[priv->rx_head] = phys;
+ hip04_set_recv_desc(priv, phys);
+
+ priv->rx_head = RX_NEXT(priv->rx_head);
+ if (rx >= budget)
+ goto done;
+
+ if (--cnt == 0)
+ cnt = hip04_recv_cnt(priv);
+ }
+
+ if (!(priv->reg_inten & RCV_INT)) {
+ /* enable rx interrupt */
+ priv->reg_inten |= RCV_INT;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+ }
+ napi_complete(napi);
+done:
+ /* clean up tx descriptors and start a new timer if necessary */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+ if (rx < budget && tx_remaining)
+ hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL);
+
+ return rx;
+}
+
+static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
+
+ if (!ists)
+ return IRQ_NONE;
+
+ writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
+
+ if (unlikely(ists & DEF_INT_ERR)) {
+ if (ists & (RCV_NOBUF | RCV_DROP)) {
+ stats->rx_errors++;
+ stats->rx_dropped++;
+ netdev_err(ndev, "rx drop\n");
+ }
+ if (ists & TX_DROP) {
+ stats->tx_dropped++;
+ netdev_err(ndev, "tx drop\n");
+ }
+ }
+
+ if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+ hrtimer_cancel(&priv->tx_coalesce_timer);
+ __napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+{
+ struct hip04_priv *priv;
+
+ priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+ __napi_schedule(&priv->napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void hip04_adjust_link(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy = priv->phy;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hip04_config_port(ndev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static int hip04_mac_open(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ priv->rx_head = 0;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ hip04_reset_ppe(priv);
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, phys))
+ return -EIO;
+
+ priv->rx_phys[i] = phys;
+ hip04_set_recv_desc(priv, phys);
+ }
+
+ if (priv->phy)
+ phy_start(priv->phy);
+
+ netdev_reset_queue(ndev);
+ netif_start_queue(ndev);
+ hip04_mac_enable(ndev);
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static int hip04_mac_stop(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ napi_disable(&priv->napi);
+ netif_stop_queue(ndev);
+ hip04_mac_disable(ndev);
+ hip04_tx_reclaim(ndev, true);
+ hip04_reset_ppe(priv);
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ if (priv->rx_phys[i]) {
+ dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+static void hip04_timeout(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static void hip04_tx_timeout_task(struct work_struct *work)
+{
+ struct hip04_priv *priv;
+
+ priv = container_of(work, struct hip04_priv, tx_timeout_task);
+ hip04_mac_stop(priv->ndev);
+ hip04_mac_open(priv->ndev);
+}
+
+static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
+{
+ return &ndev->stats;
+}
+
+static int hip04_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct hip04_priv *priv = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+ ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
+
+ return 0;
+}
+
+static int hip04_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct hip04_priv *priv = netdev_priv(netdev);
+
+ /* Check not supported parameters */
+ if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+ (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
+ (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
+ ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
+ (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
+ ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
+ return -EINVAL;
+
+ priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static void hip04_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+}
+
+static struct ethtool_ops hip04_ethtool_ops = {
+ .get_coalesce = hip04_get_coalesce,
+ .set_coalesce = hip04_set_coalesce,
+ .get_drvinfo = hip04_get_drvinfo,
+};
+
+static struct net_device_ops hip04_netdev_ops = {
+ .ndo_open = hip04_mac_open,
+ .ndo_stop = hip04_mac_stop,
+ .ndo_get_stats = hip04_get_stats,
+ .ndo_start_xmit = hip04_mac_start_xmit,
+ .ndo_set_mac_address = hip04_set_mac_address,
+ .ndo_tx_timeout = hip04_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ priv->tx_desc = dma_alloc_coherent(d,
+ TX_DESC_NUM * sizeof(struct tx_desc),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ if (!priv->tx_desc)
+ return -ENOMEM;
+
+ priv->rx_buf_size = RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
+ if (!priv->rx_buf[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hip04_free_ring(struct net_device *ndev, struct device *d)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++)
+ if (priv->rx_buf[i])
+ put_page(virt_to_head_page(priv->rx_buf[i]));
+
+ for (i = 0; i < TX_DESC_NUM; i++)
+ if (priv->tx_skb[i])
+ dev_kfree_skb_any(priv->tx_skb[i]);
+
+ dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
+ priv->tx_desc, priv->tx_desc_dma);
+}
+
+static int hip04_mac_probe(struct platform_device *pdev)
+{
+ struct device *d = &pdev->dev;
+ struct device_node *node = d->of_node;
+ struct of_phandle_args arg;
+ struct net_device *ndev;
+ struct hip04_priv *priv;
+ struct resource *res;
+ unsigned int irq;
+ ktime_t txtime;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hip04_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ platform_set_drvdata(pdev, ndev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(d, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto init_fail;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
+ if (ret < 0) {
+ dev_warn(d, "no port-handle\n");
+ goto init_fail;
+ }
+
+ priv->port = arg.args[0];
+ priv->chan = arg.args[1] * RX_DESC_NUM;
+
+ hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ /* BQL will try to keep the TX queue as short as possible, but it can't
+ * be faster than tx_coalesce_usecs, so we need a fast timeout here,
+ * but also long enough to gather up enough frames to ensure we don't
+ * get more interrupts than necessary.
+ * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
+ */
+ priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
+ priv->tx_coalesce_usecs = 200;
+ /* allow timer to fire after half the time at the earliest */
+ txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2);
+ hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime);
+ priv->tx_coalesce_timer.function = tx_done;
+
+ priv->map = syscon_node_to_regmap(arg.np);
+ if (IS_ERR(priv->map)) {
+ dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
+ ret = PTR_ERR(priv->map);
+ goto init_fail;
+ }
+
+ priv->phy_mode = of_get_phy_mode(node);
+ if (priv->phy_mode < 0) {
+ dev_warn(d, "not find phy-mode\n");
+ ret = -EINVAL;
+ goto init_fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ ret = -EINVAL;
+ goto init_fail;
+ }
+
+ ret = devm_request_irq(d, irq, hip04_mac_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto init_fail;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (priv->phy_node) {
+ priv->phy = of_phy_connect(ndev, priv->phy_node,
+ &hip04_adjust_link,
+ 0, priv->phy_mode);
+ if (!priv->phy) {
+ ret = -EPROBE_DEFER;
+ goto init_fail;
+ }
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
+
+ ether_setup(ndev);
+ ndev->netdev_ops = &hip04_netdev_ops;
+ ndev->ethtool_ops = &hip04_ethtool_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->irq = irq;
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ hip04_reset_ppe(priv);
+ if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
+ hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
+
+ hip04_config_fifo(priv);
+ random_ether_addr(ndev->dev_addr);
+ hip04_update_mac_address(ndev);
+
+ ret = hip04_alloc_ring(ndev, d);
+ if (ret) {
+ netdev_err(ndev, "alloc ring fail\n");
+ goto alloc_fail;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ free_netdev(ndev);
+ goto alloc_fail;
+ }
+
+ return 0;
+
+alloc_fail:
+ hip04_free_ring(ndev, d);
+init_fail:
+ of_node_put(priv->phy_node);
+ free_netdev(ndev);
+ return ret;
+}
+
+static int hip04_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct device *d = &pdev->dev;
+
+ if (priv->phy)
+ phy_disconnect(priv->phy);
+
+ hip04_free_ring(ndev, d);
+ unregister_netdev(ndev);
+ free_irq(ndev->irq, ndev);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hip04_mac_match[] = {
+ { .compatible = "hisilicon,hip04-mac" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, hip04_mac_match);
+
+static struct platform_driver hip04_mac_driver = {
+ .probe = hip04_mac_probe,
+ .remove = hip04_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = hip04_mac_match,
+ },
+};
+module_platform_driver(hip04_mac_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
new file mode 100644
index 000000000000..b3bac25db99c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c
@@ -0,0 +1,186 @@
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+
+#define MDIO_CMD_REG 0x0
+#define MDIO_ADDR_REG 0x4
+#define MDIO_WDATA_REG 0x8
+#define MDIO_RDATA_REG 0xc
+#define MDIO_STA_REG 0x10
+
+#define MDIO_START BIT(14)
+#define MDIO_R_VALID BIT(1)
+#define MDIO_READ (BIT(12) | BIT(11) | MDIO_START)
+#define MDIO_WRITE (BIT(12) | BIT(10) | MDIO_START)
+
+struct hip04_mdio_priv {
+ void __iomem *base;
+};
+
+#define WAIT_TIMEOUT 10
+static int hip04_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hip04_mdio_priv *priv = bus->priv;
+ int i;
+
+ for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
+ if (i == WAIT_TIMEOUT)
+ return -ETIMEDOUT;
+ msleep(20);
+ }
+
+ return 0;
+}
+
+static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct hip04_mdio_priv *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ ret = hip04_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = regnum | (mii_id << 5) | MDIO_READ;
+ writel_relaxed(val, priv->base + MDIO_CMD_REG);
+
+ ret = hip04_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = readl_relaxed(priv->base + MDIO_STA_REG);
+ if (val & MDIO_R_VALID) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ val = readl_relaxed(priv->base + MDIO_RDATA_REG);
+ ret = val & 0xFFFF;
+out:
+ return ret;
+}
+
+static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct hip04_mdio_priv *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ ret = hip04_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(value, priv->base + MDIO_WDATA_REG);
+ val = regnum | (mii_id << 5) | MDIO_WRITE;
+ writel_relaxed(val, priv->base + MDIO_CMD_REG);
+out:
+ return ret;
+}
+
+static int hip04_mdio_reset(struct mii_bus *bus)
+{
+ int temp, i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ hip04_mdio_write(bus, i, 22, 0);
+ temp = hip04_mdio_read(bus, i, MII_BMCR);
+ if (temp < 0)
+ continue;
+
+ temp |= BMCR_RESET;
+ if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
+ continue;
+ }
+
+ mdelay(500);
+ return 0;
+}
+
+static int hip04_mdio_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct mii_bus *bus;
+ struct hip04_mdio_priv *priv;
+ int ret;
+
+ bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
+ if (!bus) {
+ dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ bus->name = "hip04_mdio_bus";
+ bus->read = hip04_mdio_read;
+ bus->write = hip04_mdio_write;
+ bus->reset = hip04_mdio_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+ priv = bus->priv;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_mdio;
+ }
+
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ goto out_mdio;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+
+out_mdio:
+ mdiobus_free(bus);
+ return ret;
+}
+
+static int hip04_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id hip04_mdio_match[] = {
+ { .compatible = "hisilicon,hip04-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hip04_mdio_match);
+
+static struct platform_driver hip04_mdio_driver = {
+ .probe = hip04_mdio_probe,
+ .remove = hip04_mdio_remove,
+ .driver = {
+ .name = "hip04-mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = hip04_mdio_match,
+ },
+};
+
+module_platform_driver(hip04_mdio_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hip04-mdio");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 566b17db135a..e8a1adb7a962 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2064,9 +2064,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
- swqe->vlan_tag = vlan_tx_tag_get(skb);
+ swqe->vlan_tag = skb_vlan_tag_get(skb);
}
pr->tx_packets++;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 9388a83818f2..162762d1a12c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2367,7 +2367,7 @@ static int emac_wait_deps(struct emac_instance *dev)
err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
for (i = 0; i < EMAC_DEP_COUNT; i++) {
of_node_put(deps[i].node);
- if (err && deps[i].ofdev)
+ if (err)
of_dev_put(deps[i].ofdev);
}
if (err == 0) {
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 5b8300a32bf5..f4ff465584a0 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -192,6 +192,17 @@ config IXGBE
To compile this driver as a module, choose M here. The module
will be called ixgbe.
+config IXGBE_VXLAN
+ bool "Virtual eXtensible Local Area Network Support"
+ default n
+ depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
+ ---help---
+ This allows one to create VXLAN virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to use Virtual eXtensible Local Area Network
+ (VXLAN) in the driver.
+
config IXGBE_HWMON
bool "Intel(R) 10GbE PCI Express adapters HWMON support"
default y
@@ -281,6 +292,17 @@ config I40E_DCB
If unsure, say N.
+config I40E_FCOE
+ bool "Fibre Channel over Ethernet (FCoE)"
+ default n
+ depends on I40E && DCB && FCOE
+ ---help---
+ Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
+ in the driver. This will create new netdev for exclusive FCoE
+ use with XL710 FCoE offloads enabled.
+
+ If unsure, say N.
+
config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index b691eb4f6376..4270ad2d4ddf 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -24,6 +24,7 @@
/* ethtool support for e1000 */
#include "e1000.h"
+#include <linux/jiffies.h>
#include <linux/uaccess.h>
enum {NETDEV_STATS, E1000_STATS};
@@ -1460,7 +1461,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
- if (jiffies >= (time + 2)) {
+ if (time_after_eq(jiffies, time + 2)) {
ret_val = 14; /* error code for time out error */
break;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 83140cbb5f01..7f997d36948f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring, int tx_flags,
int count)
{
- struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_desc *tx_desc = NULL;
struct e1000_tx_buffer *buffer_info;
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
wmb();
tx_ring->next_to_use = i;
- writel(i, hw->hw_addr + tx_ring->tdt);
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
/* 82547 workaround to avoid controller hang in half-duplex environment.
@@ -3226,9 +3220,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= E1000_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) <<
+ E1000_TX_FLAGS_VLAN_SHIFT);
}
first = tx_ring->next_to_use;
@@ -3263,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+ if (!skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
+ writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
+ /* we need this if more than one processor can write to
+ * our tail at a time, it synchronizes IO on IA64/Altix
+ * systems
+ */
+ mmiowb();
+ }
} else {
dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 7785240a0da1..9416e5a7e0c8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -34,7 +34,7 @@
#include <linux/pci-aspm.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e14fd85f64eb..1e8c40fd5c3d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4189,7 +4189,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
/* Setup hardware time stamping cyclecounter */
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
adapter->cc.read = e1000e_cyclecounter_read;
- adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.mult = 1;
/* cc.shift set in e1000e_get_base_tininca() */
@@ -5444,16 +5444,6 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
wmb();
tx_ring->next_to_use = i;
-
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_tdt_wa(tx_ring, i);
- else
- writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
#define MINIMUM_DHCP_PACKET_SIZE 282
@@ -5463,8 +5453,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
u16 length, offset;
- if (vlan_tx_tag_present(skb) &&
- !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ if (skb_vlan_tag_present(skb) &&
+ !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
(adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
return 0;
@@ -5603,9 +5593,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (e1000_maybe_stop_tx(tx_ring, count + 2))
return NETDEV_TX_BUSY;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= E1000_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) <<
+ E1000_TX_FLAGS_VLAN_SHIFT);
}
first = tx_ring->next_to_use;
@@ -5635,8 +5626,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
nr_frags);
if (count) {
- if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- !adapter->tx_hwtstamp_skb)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
+ !adapter->tx_hwtstamp_skb) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
adapter->tx_hwtstamp_skb = skb_get(skb);
@@ -5653,6 +5645,21 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
(MAX_SKB_FRAGS *
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
+
+ if (!skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(tx_ring,
+ tx_ring->next_to_use);
+ else
+ writel(tx_ring->next_to_use, tx_ring->tail);
+
+ /* we need this if more than one processor can write
+ * to our tail at a time, it synchronizes IO on
+ *IA64/Altix systems
+ */
+ mmiowb();
+ }
} else {
dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index fb1a914a3ad4..978ef9c4a043 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -90,12 +90,9 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- s64 now;
spin_lock_irqsave(&adapter->systim_lock, flags);
- now = timecounter_read(&adapter->tc);
- now += delta;
- timecounter_init(&adapter->tc, &adapter->cc, now);
+ timecounter_adjtime(&adapter->tc, delta);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index eb088b129bc7..84ab9eea2768 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -97,7 +97,6 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page);
- bi->page = NULL;
rx_ring->rx_stats.alloc_failed++;
return false;
@@ -147,8 +146,8 @@ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the hdr_addr for the next_to_use descriptor */
- rx_desc->q.hdr_addr = 0;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->d.staterr = 0;
cleaned_count--;
} while (cleaned_count);
@@ -194,7 +193,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer));
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -203,12 +202,17 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
DMA_FROM_DEVICE);
}
+static inline bool fm10k_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
struct page *page,
unsigned int truesize)
{
/* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_mem_id()))
+ if (unlikely(fm10k_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
@@ -218,22 +222,19 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
-
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- atomic_inc(&page->_count);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
return false;
-
- /* bump ref count on page before it is given to the stack */
- get_page(page);
#endif
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ atomic_inc(&page->_count);
+
return true;
}
@@ -270,12 +271,12 @@ static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* we can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(page) == numa_mem_id()))
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!fm10k_page_is_reserved(page)))
return true;
/* this page cannot be reused so discard it */
- put_page(page);
+ __free_page(page);
return false;
}
@@ -293,7 +294,6 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
struct page *page;
rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
-
page = rx_buffer->page;
prefetchw(page);
@@ -727,6 +727,12 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
struct ethhdr *eth_hdr;
u8 l4_hdr = 0;
+/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
+#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164
+ if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
+ FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
+ return 0;
+
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
l4_hdr = ip_hdr(skb)->protocol;
@@ -965,8 +971,8 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
tx_desc = FM10K_TX_DESC(tx_ring, i);
/* add HW VLAN tag */
- if (vlan_tx_tag_present(skb))
- tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+ if (skb_vlan_tag_present(skb))
+ tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
else
tx_desc->vlan = 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 14a4ea795c01..9f5457c9e627 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1194,12 +1194,11 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
{
const enum fm10k_mbx_state state = mbx->state;
const u32 *hdr = &mbx->mbx_hdr;
- u16 head, tail;
+ u16 head;
s32 err;
- /* we will need to pull all of the fields for verification */
+ /* we will need to pull the header field for verification */
head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
- tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
/* We should not be receiving disconnect if Rx is incomplete */
if (mbx->pushed)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 8811364b91cb..cfde8bac1aeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -609,7 +609,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
int err;
if ((skb->protocol == htons(ETH_P_8021Q)) &&
- !vlan_tx_tag_present(skb)) {
+ !skb_vlan_tag_present(skb)) {
/* FM10K only supports hardware tagging, any tags in frame
* are considered 2nd level or "outer" tags
*/
@@ -1414,13 +1414,12 @@ struct net_device *fm10k_alloc_netdev(void)
dev->vlan_features |= dev->features;
/* configure tunnel offloads */
- dev->hw_enc_features = NETIF_F_IP_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_IPV6_CSUM |
- NETIF_F_SG;
+ dev->hw_enc_features |= NETIF_F_IP_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_TSO_ECN |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_IPV6_CSUM;
/* we want to leave these both on as we cannot disable VLAN tag
* insertion or stripping on the hardware since it is contained
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 275423d4f777..7e4711958e46 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -330,13 +330,10 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
struct fm10k_mac_update mac_update;
u32 msg[5];
- /* if glort is not valid return error */
- if (!fm10k_glort_valid_pf(hw, glort))
+ /* if glort or vlan are not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
return FM10K_ERR_PARAM;
- /* drop upper 4 bits of VLAN ID */
- vid = (vid << 4) >> 4;
-
/* record fields */
mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
((u32)mac[3] << 16) |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index 7822809436a3..d966044e017a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -57,7 +57,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
struct sk_buff_head *list = &interface->ts_tx_skb_queue;
struct sk_buff *clone;
unsigned long flags;
- __le16 dglort;
/* create clone for us to return on the Tx path */
clone = skb_clone_sk(skb);
@@ -65,8 +64,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
return;
FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
- dglort = FM10K_CB(clone)->fi.w.dglort;
-
spin_lock_irqsave(&list->lock, flags);
/* attempt to locate any buffers with the same dglort,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 280296f29154..7c6d9d5a8ae5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -354,7 +354,7 @@ struct fm10k_hw;
/* Define timeouts for resets and disables */
#define FM10K_QUEUE_DISABLE_TIMEOUT 100
-#define FM10K_RESET_TIMEOUT 100
+#define FM10K_RESET_TIMEOUT 150
/* VF registers */
#define FM10K_VFCTRL 0x00000
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 4b94ddb29c24..c40581999121 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index fc50f6461b13..2b65cdcad6ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -87,11 +87,12 @@
#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
-#define I40E_AQ_LEN 128
-#define I40E_AQ_WORK_LIMIT 16
+#define I40E_AQ_LEN 256
+#define I40E_AQ_WORK_LIMIT 32
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
+#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -147,6 +148,7 @@ enum i40e_state_t {
__I40E_FD_FLUSH_REQUESTED,
__I40E_RESET_FAILED,
__I40E_PORT_TX_SUSPENDED,
+ __I40E_VF_DISABLE,
};
enum i40e_interrupt_policy {
@@ -268,7 +270,7 @@ struct i40e_pf {
u16 rx_itr_default;
u16 tx_itr_default;
u16 msg_enable;
- char misc_int_name[IFNAMSIZ + 9];
+ char int_name[I40E_INT_NAME_STR_LEN];
u16 adminq_work_limit; /* num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
@@ -524,7 +526,7 @@ struct i40e_q_vector {
cpumask_t affinity_mask;
struct rcu_head rcu; /* to avoid race with update stats on free */
- char name[IFNAMSIZ + 9];
+ char name[I40E_INT_NAME_STR_LEN];
} ____cacheline_internodealigned_in_smp;
/* lan device */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 564d0b0192f7..de17b6fbcc4e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 8835aeeff23e..929e3d72a01e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -256,6 +256,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_lldp_stop = 0x0A05,
i40e_aqc_opc_lldp_start = 0x0A06,
i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
+ i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@@ -268,6 +270,8 @@ enum i40e_admin_queue_opc {
/* OEM commands */
i40e_aqc_opc_oem_parameter_change = 0xFE00,
i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
i40e_aqc_opc_debug_get_deviceid = 0xFF00,
@@ -276,7 +280,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
- i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@@ -410,6 +413,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_VSI 0x0017
#define I40E_AQ_CAP_ID_DCB 0x0018
#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_ISCSI 0x0022
#define I40E_AQ_CAP_ID_RSS 0x0040
#define I40E_AQ_CAP_ID_RXQ 0x0041
#define I40E_AQ_CAP_ID_TXQ 0x0042
@@ -454,8 +458,11 @@ struct i40e_aqc_arp_proxy_data {
__le32 pfpm_proxyfc;
__le32 ip_addr;
u8 mac_addr[6];
+ u8 reserved[2];
};
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
__le16 table_idx_mac_addr_0;
@@ -481,6 +488,8 @@ struct i40e_aqc_ns_proxy_data {
u8 ipv6_addr_1[16];
};
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
@@ -491,6 +500,8 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
@@ -562,6 +573,8 @@ struct i40e_aqc_get_switch_config_header_resp {
u8 reserved[12];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
struct i40e_aqc_switch_config_element_resp {
u8 element_type;
#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@@ -587,6 +600,8 @@ struct i40e_aqc_switch_config_element_resp {
__le16 element_info;
};
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
/* Get Switch Configuration (indirect 0x0200)
* an array of elements are returned in the response buffer
* the first in the array is the header, remainder are elements
@@ -596,6 +611,8 @@ struct i40e_aqc_get_switch_config_resp {
struct i40e_aqc_switch_config_element_resp element[1];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
@@ -661,6 +678,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
@@ -1092,6 +1111,8 @@ struct i40e_aqc_remove_tag {
u8 reserved[12];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
/* Add multicast E-Tag (direct 0x0257)
* del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
* and no external data
@@ -1207,7 +1228,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@@ -1240,7 +1261,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
u8 reserved2[14];
/* response section */
@@ -1359,6 +1380,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
u8 reserved1[28];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
@@ -1370,6 +1393,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
__le16 qs_handles[8];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
u8 tc_valid_bits;
@@ -1383,6 +1408,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
u8 reserved3[23];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
u8 tc_valid_bits;
@@ -1394,6 +1421,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
__le16 tc_bw_max[2];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
__le16 seid;
@@ -1421,6 +1450,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved2[96];
};
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 tc_valid_bits;
@@ -1432,6 +1463,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 reserved1[28];
};
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
@@ -1443,6 +1477,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
u8 reserved1[20];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 tc_valid_bits;
@@ -1453,6 +1489,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 reserved2[23];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
u8 reserved[4];
@@ -1468,6 +1506,8 @@ struct i40e_aqc_query_port_ets_config_resp {
u8 reserved3[32];
};
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
@@ -1482,6 +1522,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
__le16 tc_bw_max[2];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
/* Suspend/resume port TX traffic
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
@@ -1495,6 +1537,8 @@ struct i40e_aqc_configure_partition_bw_data {
u8 max_bw[16]; /* bandwidth limit */
};
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1577,6 +1621,8 @@ struct i40e_aqc_module_desc {
u8 reserved2[8];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
u8 link_speed; /* bitmap using the above enum bit patterns */
@@ -1605,6 +1651,8 @@ struct i40e_aq_get_phy_abilities_resp {
struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
@@ -1788,12 +1836,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
__le16 cmd_flags;
-#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
-#define ANVM_READ_SINGLE_FEATURE 0
-#define ANVM_READ_MULTIPLE_FEATURES 1
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
- __le16 element_id; /* Feature/field ID */
- u8 reserved[2];
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
__le32 address_high;
__le32 address_low;
};
@@ -1811,21 +1859,32 @@ struct i40e_aqc_nvm_config_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
- __le16 instance_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
__le16 feature_options;
__le16 feature_selection;
};
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
struct i40e_aqc_nvm_config_data_immediate_field {
-#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
- __le16 field_id;
- __le16 instance_id;
+ __le32 field_id;
+ __le32 field_value;
__le16 field_options;
- __le16 field_value;
+ __le16 reserved;
};
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -2026,12 +2085,54 @@ struct i40e_aqc_get_cee_dcb_cfg_resp {
u8 oper_tc_bw[8];
u8 oper_pfc_en;
__le16 oper_app_prio;
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
__le32 tlv_status;
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
u8 reserved[12];
};
I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
@@ -2106,7 +2207,8 @@ struct i40e_aqc_oem_param_change {
#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
#define I40E_AQ_OEM_PARAM_MAC 2
__le32 param_value1;
- u8 param_value2[8];
+ __le16 param_value2;
+ u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2120,6 +2222,28 @@ struct i40e_aqc_oem_state_change {
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
/* debug commands */
/* get device id (0xFF00) uses the generic structure */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 3d741ee99a2c..11a9ffebf8d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -742,6 +742,65 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
#endif
/**
+ * i40e_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ i40e_status status = 0;
+ u16 pba_word = 0;
+ u16 pba_size = 0;
+ u16 pba_ptr = 0;
+ u16 i = 0;
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+ if (status || (pba_word != 0xFAFA)) {
+ hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block pointer.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block size.\n");
+ return status;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ hw_dbg(hw, "Buffer to small for PBA data.\n");
+ return I40E_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
* i40e_get_media_type - Gets media type
* @hw: pointer to the hardware structure
**/
@@ -1083,8 +1142,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (mode == I40E_LINK_ACTIVITY)
blink = false;
- gpio_val |= (blink ? 1 : 0) <<
- I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+ if (blink)
+ gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ else
+ gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
break;
@@ -2035,6 +2096,43 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
}
/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ i40e_status status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_read_reg);
+
+ cmd_resp->address = cpu_to_le32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ *reg_val = ((u64)cmd_resp->value_high << 32) |
+ (u64)cmd_resp->value_low;
+ *reg_val = le64_to_cpu(*reg_val);
+ }
+
+ return status;
+}
+
+/**
* i40e_aq_debug_write_register
* @hw: pointer to the hw struct
* @reg_addr: register address
@@ -2264,6 +2362,7 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_VSI 0x17
#define I40E_DEV_FUNC_CAP_DCB 0x18
#define I40E_DEV_FUNC_CAP_FCOE 0x21
+#define I40E_DEV_FUNC_CAP_ISCSI 0x22
#define I40E_DEV_FUNC_CAP_RSS 0x40
#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
@@ -2292,6 +2391,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
enum i40e_admin_queue_opc list_type_opc)
{
struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
u32 i = 0;
@@ -2362,6 +2462,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (number == 1)
p->fcoe = true;
break;
+ case I40E_DEV_FUNC_CAP_ISCSI:
+ if (number == 1)
+ p->iscsi = true;
+ break;
case I40E_DEV_FUNC_CAP_RSS:
p->rss = true;
p->rss_table_size = number;
@@ -2427,6 +2531,34 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (p->npar_enable || p->mfp_mode_1)
p->fcoe = false;
+ /* count the enabled ports (aka the "not disabled" ports) */
+ hw->num_ports = 0;
+ for (i = 0; i < 4; i++) {
+ u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+ u64 port_cfg = 0;
+
+ /* use AQ read to get the physical register offset instead
+ * of the port relative offset
+ */
+ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+ if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+ hw->num_ports++;
+ }
+
+ valid_functions = p->valid_functions;
+ num_functions = 0;
+ while (valid_functions) {
+ if (valid_functions & 1)
+ num_functions++;
+ valid_functions >>= 1;
+ }
+
+ /* partition id is 1-based, and functions are evenly spread
+ * across the ports as partitions
+ */
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+
/* additional HW specific goodies that might
* someday be HW version specific
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index cb0de455683e..61236f983971 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1890,7 +1890,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, " dump desc aq\n");
- dev_info(&pf->pdev->dev, " dump stats\n");
dev_info(&pf->pdev->dev, " dump reset stats\n");
dev_info(&pf->pdev->dev, " msg_enable [level]\n");
dev_info(&pf->pdev->dev, " read <reg>\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 951e8767fc50..b8230dc205ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -219,6 +219,16 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
/**
+ * i40e_partition_setting_complaint - generic complaint for MFP restriction
+ * @pf: the PF struct
+ **/
+static void i40e_partition_setting_complaint(struct i40e_pf *pf)
+{
+ dev_info(&pf->pdev->dev,
+ "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
+}
+
+/**
* i40e_get_settings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
* @ecmd: ethtool command
@@ -485,6 +495,14 @@ static int i40e_set_settings(struct net_device *netdev,
u8 autoneg;
u32 advertise;
+ /* Changing port settings is not supported if this isn't the
+ * port's controlling PF
+ */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
@@ -687,6 +705,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
u8 aq_failures;
int err = 0;
+ /* Changing the port's flow control is not supported if this isn't the
+ * port's controlling PF
+ */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
@@ -1503,7 +1529,7 @@ static void i40e_get_wol(struct net_device *netdev,
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if ((1 << hw->port) & wol_nvm_bits) {
+ if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1512,13 +1538,28 @@ static void i40e_get_wol(struct net_device *netdev,
}
}
+/**
+ * i40e_set_wol - set the WakeOnLAN configuration
+ * @netdev: the netdev in question
+ * @wol: the ethtool WoL setting data
+ **/
static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
u16 wol_nvm_bits;
+ /* WoL not supported if this isn't the controlling PF on the port */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return -EOPNOTSUPP;
+
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
if (((1 << hw->port) & wol_nvm_bits))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index a8b8bd95108d..27c206e62da7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -39,15 +39,6 @@
#include "i40e_fcoe.h"
/**
- * i40e_rx_is_fip - returns true if the rx packet type is FIP
- * @ptype: the packet type field from rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fip(u16 ptype)
-{
- return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
-}
-
-/**
* i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
* @ptype: the packet type field from rx descriptor write-back
**/
@@ -404,6 +395,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
+ info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
enabled_tc = i40e_get_fcoe_tc_map(pf);
i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
@@ -1511,12 +1503,16 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
netdev->mtu = FCOE_MTU;
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ /* set different dev_port value 1 for FCoE netdev than the default
+ * zero dev_port value for PF netdev, this helps biosdevname user
+ * tool to differentiate them correctly while both attached to the
+ * same PCI function.
+ */
+ netdev->dev_port = 1;
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a5f2660d552d..cbe281be1c9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 6
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -2819,8 +2819,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
* i40e_enable_misc_int_causes - enable the non-queue interrupts
* @hw: ptr to the hardware info
**/
-static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
+static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
{
+ struct i40e_hw *hw = &pf->hw;
u32 val;
/* clear things first */
@@ -2832,11 +2833,13 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_GRST_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
- I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ if (pf->flags & I40E_FLAG_PTP)
+ val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+
wr32(hw, I40E_PFINT_ICR0_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
@@ -2866,7 +2869,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
q_vector->tx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
- i40e_enable_misc_int_causes(hw);
+ i40e_enable_misc_int_causes(pf);
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
@@ -2937,7 +2940,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
/**
* i40e_irq_dynamic_disable - Disable default interrupt generation settings
* @vsi: pointer to a vsi
- * @vector: enable a particular Hw Interrupt vector
+ * @vector: disable a particular Hw Interrupt vector
**/
void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
{
@@ -3402,10 +3405,10 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
err = i40e_vsi_request_irq_msix(vsi, basename);
else if (pf->flags & I40E_FLAG_MSI_ENABLED)
err = request_irq(pf->pdev->irq, i40e_intr, 0,
- pf->misc_int_name, pf);
+ pf->int_name, pf);
else
err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
- pf->misc_int_name, pf);
+ pf->int_name, pf);
if (err)
dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
@@ -3999,6 +4002,35 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
#endif
/**
+ * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
+ * @pf: pointer to pf
+ *
+ * Get TC map for ISCSI PF type that will include iSCSI TC
+ * and LAN TC.
+ **/
+static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
+{
+ struct i40e_dcb_app_priority_table app;
+ struct i40e_hw *hw = &pf->hw;
+ u8 enabled_tc = 1; /* TC0 is always enabled */
+ u8 tc, i;
+ /* Get the iSCSI APP TLV */
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ app = dcbcfg->app[i];
+ if (app.selector == I40E_APP_SEL_TCPIP &&
+ app.protocolid == I40E_APP_PROTOID_ISCSI) {
+ tc = dcbcfg->etscfg.prioritytable[app.priority];
+ enabled_tc |= (1 << tc);
+ break;
+ }
+ }
+
+ return enabled_tc;
+}
+
+/**
* i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
* @dcbcfg: the corresponding DCBx configuration structure
*
@@ -4061,18 +4093,23 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return 1;
+ /* SFP mode will be enabled for all TCs on port */
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ return i40e_dcb_get_num_tc(dcbcfg);
+
/* MFP mode return count of enabled TCs for this PF */
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (pf->hw.func_caps.iscsi)
+ enabled_tc = i40e_get_iscsi_tc_map(pf);
+ else
enabled_tc = pf->hw.func_caps.enabled_tcmap;
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
- num_tc++;
- }
- return num_tc;
- }
- /* SFP mode will be enabled for all TCs on port */
- return i40e_dcb_get_num_tc(dcbcfg);
+ /* At least have TC0 */
+ enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ num_tc++;
+ }
+ return num_tc;
}
/**
@@ -4110,12 +4147,15 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return i40e_pf_get_default_tc(pf);
- /* MFP mode will have enabled TCs set by FW */
- if (pf->flags & I40E_FLAG_MFP_ENABLED)
- return pf->hw.func_caps.enabled_tcmap;
-
/* SFP mode we want PF to be enabled for all TCs */
- return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+
+ /* MPF enabled and iSCSI PF type */
+ if (pf->hw.func_caps.iscsi)
+ return i40e_get_iscsi_tc_map(pf);
+ else
+ return pf->hw.func_caps.enabled_tcmap;
}
/**
@@ -4505,9 +4545,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
- if (pf->hw.func_caps.npar_enable)
- goto out;
-
/* Get the initial DCB configuration */
err = i40e_init_dcb(hw);
if (!err) {
@@ -4533,7 +4570,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
"DCBX offload is supported for this PF.\n");
}
} else {
- dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+ dev_info(&pf->pdev->dev,
+ "AQ Querying DCB configuration failed: aq_err %d\n",
pf->hw.aq.asq_last_status);
}
@@ -4557,6 +4595,15 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
return;
}
+ /* Warn user if link speed on NPAR enabled partition is not at
+ * least 10GB
+ */
+ if (vsi->back->hw.func_caps.npar_enable &&
+ (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+ vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+ netdev_warn(vsi->netdev,
+ "The partition detected link speed that is less than 10Gbps\n");
+
switch (vsi->back->hw.phy.link_info.link_speed) {
case I40E_LINK_SPEED_40GB:
strlcpy(speed, "40 Gbps", SPEED_SIZE);
@@ -4836,7 +4883,7 @@ static int i40e_open(struct net_device *netdev)
int i40e_vsi_open(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- char int_name[IFNAMSIZ];
+ char int_name[I40E_INT_NAME_STR_LEN];
int err;
/* allocate descriptors */
@@ -4870,7 +4917,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
goto err_set_queues;
} else if (vsi->type == I40E_VSI_FDIR) {
- snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir",
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
dev_driver_string(&pf->pdev->dev),
dev_name(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
@@ -5494,14 +5541,18 @@ static void i40e_link_event(struct i40e_pf *pf)
{
bool new_link, old_link;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 new_link_speed, old_link_speed;
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
new_link = i40e_get_link_status(&pf->hw);
+ old_link_speed = pf->hw.phy.link_info_old.link_speed;
+ new_link_speed = pf->hw.phy.link_info.link_speed;
if (new_link == old_link &&
+ new_link_speed == old_link_speed &&
(test_bit(__I40E_DOWN, &vsi->state) ||
new_link == netif_carrier_ok(vsi->netdev)))
return;
@@ -6175,8 +6226,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
#ifdef CONFIG_I40E_DCB
ret = i40e_init_pf_dcb(pf);
if (ret) {
- dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
- goto end_core_reset;
+ dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ /* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
#ifdef I40E_FCOE
@@ -6881,17 +6933,17 @@ static int i40e_init_msix(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
other_vecs++;
+ /* Scale down if necessary, and the rings will share vectors */
+ pf->num_lan_msix = min_t(int, pf->num_lan_msix,
+ (hw->func_caps.num_msix_vectors - other_vecs));
+ v_budget = pf->num_lan_msix + other_vecs;
+
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_fcoe_msix = pf->num_fcoe_qps;
v_budget += pf->num_fcoe_msix;
}
-
#endif
- /* Scale down if necessary, and the rings will share vectors */
- pf->num_lan_msix = min_t(int, pf->num_lan_msix,
- (hw->func_caps.num_msix_vectors - other_vecs));
- v_budget = pf->num_lan_msix + other_vecs;
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -7113,16 +7165,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
*/
if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
err = request_irq(pf->msix_entries[0].vector,
- i40e_intr, 0, pf->misc_int_name, pf);
+ i40e_intr, 0, pf->int_name, pf);
if (err) {
dev_info(&pf->pdev->dev,
"request_irq for %s failed: %d\n",
- pf->misc_int_name, err);
+ pf->int_name, err);
return -EFAULT;
}
}
- i40e_enable_misc_int_causes(hw);
+ i40e_enable_misc_int_causes(pf);
/* associate no queues to the misc vector */
wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
@@ -7306,7 +7358,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
- if (pf->hw.func_caps.num_vfs) {
+ if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
pf->flags |= I40E_FLAG_SRIOV_ENABLED;
pf->num_req_vfs = min_t(int,
@@ -7766,7 +7818,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
enabled_tc = i40e_pf_get_tc_map(pf);
/* MFP mode setup queue map and update VSI */
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
+ !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
@@ -7787,6 +7840,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
/* Default/Main VSI is only enabled for TC0
* reconfigure it to enable all TCs that are
* available on the port in SFP mode.
+ * For MFP case the iSCSI PF would use this
+ * flow to enable LAN+iSCSI TC.
*/
ret = i40e_vsi_config_tc(vsi, enabled_tc);
if (ret) {
@@ -9164,7 +9219,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
- snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
+ snprintf(pf->int_name, sizeof(pf->int_name) - 1,
"%s-%s:misc",
dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
@@ -9227,6 +9282,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_configure_lan_hmc;
}
+ /* Disable LLDP for NICs that have firmware versions lower than v4.3.
+ * Ignore error return codes because if it was already disabled via
+ * hardware settings this will fail
+ */
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
+ i40e_aq_stop_lldp(hw, true, NULL);
+ }
+
i40e_get_mac_addr(hw, hw->mac.addr);
if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
@@ -9256,7 +9321,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_I40E_DCB
err = i40e_init_pf_dcb(pf);
if (err) {
- dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
+ dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
/* Continue without DCB enabled */
}
@@ -9671,6 +9736,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
rtnl_lock();
i40e_prep_for_reset(pf);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 045b5c4b98b3..ad802dd0f67a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -78,7 +78,7 @@ do { \
} while (0)
typedef enum i40e_status_code i40e_status;
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef CONFIG_I40E_FCOE
#define I40E_FCOE
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2fb4306597e8..68e852a96680 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -71,6 +71,9 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -245,6 +248,8 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw);
bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
#ifdef I40E_FCOE
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 6d1ec926aa37..fabcfa1b45b2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -247,7 +247,12 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
u32 prttsyn_stat;
int n;
- if (!(pf->flags & I40E_FLAG_PTP))
+ /* Since we cannot turn off the Rx timestamp logic if the device is
+ * configured for Tx timestamping, we check if Rx timestamping is
+ * configured. We don't want to spuriously warn about Rx timestamp
+ * hangs if we don't care about the timestamps.
+ */
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
return;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
@@ -305,6 +310,13 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
u32 hi, lo;
u64 ns;
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ return;
+
+ /* don't attempt to timestamp if we don't have an skb */
+ if (!pf->ptp_tx_skb)
+ return;
+
lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
@@ -338,7 +350,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
/* Since we cannot turn off the Rx timestamp logic if the device is
* doing Tx timestamping, check if Rx timestamping is configured.
*/
- if (!pf->ptp_rx)
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
return;
hw = &pf->hw;
@@ -467,7 +479,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
pf->ptp_rx = false;
- tsyntype = 0;
+ /* We set the type to V1, but do not enable UDP packet
+ * recognition. In this way, we should be as close to
+ * disabling PTP Rx timestamps as possible since V1 packets
+ * are always UDP, since L2 packets are a V2 feature.
+ */
+ tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
@@ -521,17 +538,18 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, regval);
- /* There is no simple on/off switch for Rx. To "disable" Rx support,
- * ignore any received timestamps, rather than turn off the clock.
+ /* Although there is no simple on/off switch for Rx, we "disable" Rx
+ * timestamps by setting to V1 only mode and clear the UDP
+ * recognition. This ought to disable all PTP Rx timestamps as V1
+ * packets are always over UDP. Note that software is configured to
+ * ignore Rx timestamps via the pf->ptp_rx flag.
*/
- if (pf->ptp_rx) {
- regval = rd32(hw, I40E_PRTTSYN_CTL1);
- /* clear everything but the enable bit */
- regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
- /* now enable bits for desired Rx timestamps */
- regval |= tsyntype;
- wr32(hw, I40E_PRTTSYN_CTL1, regval);
- }
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ /* clear everything but the enable bit */
+ regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ /* now enable bits for desired Rx timestamps */
+ regval |= tsyntype;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 04b441460bbd..2206d2d36f0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
+#define WB_STRIDE 0x3
+
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ /* check to see if there are any non-cache aligned descriptors
+ * waiting to be written back, and kick the hardware to force
+ * them to be written back in case of napi polling
+ */
+ if (budget &&
+ !((i & WB_STRIDE) == WB_STRIDE) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ else
+ tx_ring->arm_wb = false;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
dev_info(tx_ring->dev,
- "tx hang detected on queue %d, resetting adapter\n",
+ "tx hang detected on queue %d, reset requested\n",
tx_ring->queue_index);
- tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+ /* do not fire the reset immediately, wait for the stack to
+ * decide we are truly stuck, also prevents every queue from
+ * simultaneously requesting a reset
+ */
- /* the adapter is about to reset, no point in enabling stuff */
- return true;
+ /* the adapter is about to reset, no point in enabling polling */
+ budget = 1;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
}
- return budget > 0;
+ return !!budget;
+}
+
+/**
+ * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+ val);
}
/**
@@ -1063,6 +1098,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
goto err;
+ u64_stats_init(&rx_ring->syncp);
+
/* Round up to nearest 4K */
rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
@@ -1290,9 +1327,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
- if (ipv4_tunnel &&
- (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
- !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ if (ipv4_tunnel) {
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1337,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->protocol == htons(ETH_P_8021AD))
? VLAN_HLEN : 0;
- rx_udp_csum = udp_csum(skb);
- iph = ip_hdr(skb);
- csum = csum_tcpudp_magic(
- iph->saddr, iph->daddr,
- (skb->len - skb_transport_offset(skb)),
- IPPROTO_UDP, rx_udp_csum);
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ (udp_hdr(skb)->check != 0)) {
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
- if (udp_hdr(skb)->check != csum)
- goto checksum_fail;
+ } /* else its GRE and so no outer UDP header */
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1620,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
bool clean_complete = true;
+ bool arm_wb = false;
int budget_per_ring;
if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1631,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- i40e_for_each_ring(ring, q_vector->tx)
+ i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+ arm_wb |= ring->arm_wb;
+ }
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1645,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ if (arm_wb)
+ i40e_force_wb(vsi, q_vector);
return budget;
+ }
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
@@ -1772,8 +1817,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
u32 tx_flags = 0;
/* if we have a HW VLAN tag being added, default to the HW one */
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
@@ -1840,17 +1885,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1897,6 +1941,9 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
* we are not already transmitting a packet to be timestamped
*/
pf = i40e_netdev_to_pf(tx_ring->netdev);
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return 0;
+
if (pf->ptp_tx &&
!test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -1946,13 +1993,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1962,7 +2005,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2244,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Place RS bit on last descriptor of any packet that spans across the
* 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
*/
-#define WB_STRIDE 0x3
if (((i & WB_STRIDE) != WB_STRIDE) &&
(first <= &tx_ring->tx_bi[i]) &&
(first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index e60d3accb2e2..18b00231d2f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -241,6 +241,7 @@ struct i40e_ring {
unsigned long last_rx_timestamp;
bool ring_active; /* is ring online or not */
+ bool arm_wb; /* do something to arm write back */
/* stats structs */
struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index c1f2eb963357..e9901ef06a63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
bool evb_802_1_qbh; /* Bridge Port Extension */
bool dcb;
bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
bool mfp_mode_1;
bool mgmt_cem;
bool ieee_1588;
@@ -431,7 +432,7 @@ struct i40e_hw {
u8 __iomem *hw_addr;
void *back;
- /* function pointer structs */
+ /* subsystem structs */
struct i40e_phy_info phy;
struct i40e_mac_info mac;
struct i40e_bus_info bus;
@@ -458,6 +459,11 @@ struct i40e_hw {
u8 pf_id;
u16 main_vsi_seid;
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
/* Closest numa node to the device */
u16 numa_node;
@@ -1135,6 +1141,8 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_PBA_FLAGS 0x15
+#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_NVM_IMAGE_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 5bae89550657..40f042af4131 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -647,6 +647,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
int i;
u32 reg;
+ if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
+ return;
+
/* warn the VF */
clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -668,13 +671,13 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* poll VPGEN_VFRSTAT reg to make sure
* that reset is complete
*/
- for (i = 0; i < 100; i++) {
- /* vf reset requires driver to first reset the
- * vf and then poll the status register to make sure
- * that the requested op was completed
- * successfully
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully. Due to internal HW FIFO flushes,
+ * we must wait 10ms before the register will be valid.
*/
- usleep_range(10, 20);
+ usleep_range(10000, 20000);
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
rsd = true;
@@ -706,6 +709,7 @@ complete_reset:
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw);
+ clear_bit(__I40E_VF_DISABLE, &pf->state);
}
/**
@@ -790,11 +794,18 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf)
return;
+ while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
+ usleep_range(1000, 2000);
- /* Disable interrupt 0 so we don't try to handle the VFLR. */
- i40e_irq_dynamic_disable_icr0(pf);
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+
+ msleep(20); /* let any messages in transit get finished up */
- mdelay(10); /* let any messages in transit get finished up */
/* free up vf resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
@@ -813,7 +824,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
* before this function ever gets called.
*/
if (!pci_vfs_assigned(pf->pdev)) {
- pci_disable_sriov(pf->pdev);
/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
*/
@@ -827,9 +837,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
dev_warn(&pf->pdev->dev,
"unable to disable SR-IOV because VFs are assigned.\n");
}
-
- /* Re-enable interrupt 0. */
- i40e_irq_dynamic_enable_icr0(pf);
+ clear_bit(__I40E_VF_DISABLE, &pf->state);
}
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 6c31bf22c2c3..60f04e96a80e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index ff1b16370da9..e715bccfb5d2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -268,6 +268,8 @@ enum i40e_admin_queue_opc {
/* OEM commands */
i40e_aqc_opc_oem_parameter_change = 0xFE00,
i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
i40e_aqc_opc_debug_get_deviceid = 0xFF00,
@@ -276,7 +278,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
- i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@@ -410,6 +411,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_VSI 0x0017
#define I40E_AQ_CAP_ID_DCB 0x0018
#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_ISCSI 0x0022
#define I40E_AQ_CAP_ID_RSS 0x0040
#define I40E_AQ_CAP_ID_RXQ 0x0041
#define I40E_AQ_CAP_ID_TXQ 0x0042
@@ -454,8 +456,11 @@ struct i40e_aqc_arp_proxy_data {
__le32 pfpm_proxyfc;
__le32 ip_addr;
u8 mac_addr[6];
+ u8 reserved[2];
};
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
__le16 table_idx_mac_addr_0;
@@ -481,6 +486,8 @@ struct i40e_aqc_ns_proxy_data {
u8 ipv6_addr_1[16];
};
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
@@ -491,6 +498,8 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
@@ -562,6 +571,8 @@ struct i40e_aqc_get_switch_config_header_resp {
u8 reserved[12];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
struct i40e_aqc_switch_config_element_resp {
u8 element_type;
#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@@ -587,6 +598,8 @@ struct i40e_aqc_switch_config_element_resp {
__le16 element_info;
};
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
/* Get Switch Configuration (indirect 0x0200)
* an array of elements are returned in the response buffer
* the first in the array is the header, remainder are elements
@@ -596,6 +609,8 @@ struct i40e_aqc_get_switch_config_resp {
struct i40e_aqc_switch_config_element_resp element[1];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
@@ -661,6 +676,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
@@ -1092,6 +1109,8 @@ struct i40e_aqc_remove_tag {
u8 reserved[12];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
/* Add multicast E-Tag (direct 0x0257)
* del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
* and no external data
@@ -1207,7 +1226,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@@ -1240,7 +1259,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
u8 reserved2[14];
/* response section */
@@ -1359,6 +1378,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
u8 reserved1[28];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
@@ -1370,6 +1391,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
__le16 qs_handles[8];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
u8 tc_valid_bits;
@@ -1383,6 +1406,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
u8 reserved3[23];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
u8 tc_valid_bits;
@@ -1394,6 +1419,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
__le16 tc_bw_max[2];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
__le16 seid;
@@ -1421,6 +1448,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved2[96];
};
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 tc_valid_bits;
@@ -1432,6 +1461,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 reserved1[28];
};
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
@@ -1443,6 +1475,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
u8 reserved1[20];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 tc_valid_bits;
@@ -1453,6 +1487,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 reserved2[23];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
u8 reserved[4];
@@ -1468,6 +1504,8 @@ struct i40e_aqc_query_port_ets_config_resp {
u8 reserved3[32];
};
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
@@ -1482,6 +1520,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
__le16 tc_bw_max[2];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
/* Suspend/resume port TX traffic
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
@@ -1495,6 +1535,8 @@ struct i40e_aqc_configure_partition_bw_data {
u8 max_bw[16]; /* bandwidth limit */
};
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1577,6 +1619,8 @@ struct i40e_aqc_module_desc {
u8 reserved2[8];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
u8 link_speed; /* bitmap using the above enum bit patterns */
@@ -1605,6 +1649,8 @@ struct i40e_aq_get_phy_abilities_resp {
struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
@@ -1788,12 +1834,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
__le16 cmd_flags;
-#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
-#define ANVM_READ_SINGLE_FEATURE 0
-#define ANVM_READ_MULTIPLE_FEATURES 1
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
- __le16 element_id; /* Feature/field ID */
- u8 reserved[2];
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
__le32 address_high;
__le32 address_low;
};
@@ -1811,21 +1857,32 @@ struct i40e_aqc_nvm_config_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
- __le16 instance_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
__le16 feature_options;
__le16 feature_selection;
};
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
struct i40e_aqc_nvm_config_data_immediate_field {
-#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
- __le16 field_id;
- __le16 instance_id;
+ __le32 field_id;
+ __le32 field_value;
__le16 field_options;
- __le16 field_value;
+ __le16 reserved;
};
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -2082,7 +2139,8 @@ struct i40e_aqc_oem_param_change {
#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
#define I40E_AQ_OEM_PARAM_MAC 2
__le32 param_value1;
- u8 param_value2[8];
+ __le16 param_value2;
+ u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2096,6 +2154,28 @@ struct i40e_aqc_oem_state_change {
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
/* debug commands */
/* get device id (0xFF00) uses the generic structure */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 04c7c1557a0c..29004382f462 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -192,6 +192,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
+#define WB_STRIDE 0x3
+
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
@@ -293,6 +295,14 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ if (budget &&
+ !((i & WB_STRIDE) == WB_STRIDE) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ else
+ tx_ring->arm_wb = false;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -344,6 +354,24 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
/**
+ * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
+ val);
+}
+
+/**
* i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data
*
@@ -568,6 +596,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
goto err;
+ u64_stats_init(&rx_ring->syncp);
+
/* Round up to nearest 4K */
rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
@@ -1065,6 +1095,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
bool clean_complete = true;
+ bool arm_wb = false;
int budget_per_ring;
if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1075,8 +1106,10 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- i40e_for_each_ring(ring, q_vector->tx)
+ i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+ arm_wb |= ring->arm_wb;
+ }
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
@@ -1087,8 +1120,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ if (arm_wb)
+ i40e_force_wb(vsi, q_vector);
return budget;
+ }
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
@@ -1122,8 +1158,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
u32 tx_flags = 0;
/* if we have a HW VLAN tag being added, default to the HW one */
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c7f29626eada..4e15903b2b6d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -238,6 +238,7 @@ struct i40e_ring {
u8 atr_count;
bool ring_active; /* is ring online or not */
+ bool arm_wb; /* do something to arm write back */
/* stats structs */
struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 68aec11f6523..3d0fdaab5cc8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
bool evb_802_1_qbh; /* Bridge Port Extension */
bool dcb;
bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
bool mfp_mode_1;
bool mgmt_cem;
bool ieee_1588;
@@ -425,7 +426,7 @@ struct i40e_hw {
u8 __iomem *hw_addr;
void *back;
- /* function pointer structs */
+ /* subsystem structs */
struct i40e_phy_info phy;
struct i40e_mac_info mac;
struct i40e_bus_info bus;
@@ -452,6 +453,11 @@ struct i40e_hw {
u8 pf_id;
u16 main_vsi_seid;
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
/* Closest numa node to the device */
u16 numa_node;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index cabaf599f562..8d8c201c63c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.0.6"
+#define DRV_VERSION "1.2.0"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -313,10 +313,6 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, val);
- /* re-enable interrupt causes */
- wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
- wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
-
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -947,30 +943,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
}
/**
- * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_active_queues; i++)
- i40evf_clean_rx_ring(adapter->rx_rings[i]);
-}
-
-/**
- * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_active_queues; i++)
- i40evf_clean_tx_ring(adapter->tx_rings[i]);
-}
-
-/**
* i40e_down - Shutdown the connection processing
* @adapter: board private structure
**/
@@ -982,6 +954,12 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (adapter->state == __I40EVF_DOWN)
return;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
+ i40evf_irq_disable(adapter);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -992,25 +970,27 @@ void i40evf_down(struct i40evf_adapter *adapter)
}
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ /* cancel any current operation */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->aq_pending = 0;
+ /* Schedule operations to close down the HW. Don't wait
+ * here for this to complete. The watchdog is still running
+ * and it will take care of this.
+ */
+ adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
- /* disable receives */
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- msleep(20);
}
netif_tx_disable(netdev);
netif_tx_stop_all_queues(netdev);
- i40evf_irq_disable(adapter);
-
i40evf_napi_disable_all(adapter);
- netif_carrier_off(netdev);
+ msleep(20);
- i40evf_clean_all_tx_rings(adapter);
- i40evf_clean_all_rx_rings(adapter);
+ netif_carrier_off(netdev);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
/**
@@ -1356,8 +1336,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
/* Process admin queue tasks. After init, everything gets done
* here so we don't race on the admin queue.
*/
- if (adapter->aq_pending)
+ if (adapter->aq_pending) {
+ if (!i40evf_asq_done(hw)) {
+ dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
+ i40evf_send_api_ver(adapter);
+ }
goto watchdog_done;
+ }
if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
i40evf_map_queues(adapter);
@@ -1401,11 +1386,14 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
-
- i40evf_irq_enable(adapter, true);
- i40evf_fire_sw_int(adapter, 0xFF);
-
watchdog_done:
+ if (adapter->state == __I40EVF_RUNNING) {
+ i40evf_irq_enable_queues(adapter, ~0);
+ i40evf_fire_sw_int(adapter, 0xFF);
+ } else {
+ i40evf_fire_sw_int(adapter, 0x1);
+ }
+
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@@ -1633,17 +1621,17 @@ static void i40evf_adminq_task(struct work_struct *work)
u16 pending;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
- return;
+ goto out;
event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
- return;
+ goto out;
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
- if (ret)
+ if (ret || !v_msg->v_opcode)
break; /* No event to process or error cleaning ARQ */
i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
@@ -1688,10 +1676,10 @@ static void i40evf_adminq_task(struct work_struct *work)
if (oldval != val)
wr32(hw, hw->aq.asq.len, val);
+ kfree(event.msg_buf);
+out:
/* re-enable Admin queue interrupt cause */
i40evf_misc_irq_enable(adapter);
-
- kfree(event.msg_buf);
}
/**
@@ -2053,12 +2041,8 @@ static void i40evf_init_task(struct work_struct *work)
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
- err);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- dev_info(&pdev->dev, "Resending request\n");
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
err = i40evf_send_api_ver(adapter);
- }
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2081,7 +2065,6 @@ static void i40evf_init_task(struct work_struct *work)
}
err = i40evf_get_vf_config(adapter);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- dev_info(&pdev->dev, "Resending VF config request\n");
err = i40evf_send_vf_config_msg(adapter);
goto err;
}
@@ -2230,12 +2213,18 @@ err:
static void i40evf_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (netif_running(netdev))
i40evf_close(netdev);
+ /* Prevent the watchdog from running. */
+ adapter->state = __I40EVF_REMOVE;
+ adapter->aq_required = 0;
+ adapter->aq_pending = 0;
+
#ifdef CONFIG_PM
pci_save_state(pdev);
@@ -2448,7 +2437,18 @@ static void i40evf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
adapter->netdev_registered = false;
}
+
+ /* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
+ adapter->aq_required = 0;
+ adapter->aq_pending = 0;
+ i40evf_request_reset(adapter);
+ msleep(20);
+ /* If the FW isn't responding, kick it once, but only once. */
+ if (!i40evf_asq_done(hw)) {
+ i40evf_request_reset(adapter);
+ msleep(20);
+ }
if (adapter->msix_entries) {
i40evf_misc_irq_disable(adapter);
@@ -2477,6 +2477,10 @@ static void i40evf_remove(struct pci_dev *pdev)
list_del(&f->list);
kfree(f);
}
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 5fde5a7f4591..3f0c85ecbca6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -715,14 +715,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
return;
}
- if (v_opcode != adapter->current_op)
- dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
- adapter->current_op, v_opcode);
if (v_retval) {
dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode);
}
switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ /* no action, but also not an error */
+ break;
case I40E_VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 82d891e183b1..c2bd4f98a837 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -29,7 +29,7 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/bitops.h>
@@ -343,6 +343,9 @@ struct hwmon_buff {
};
#endif
+#define IGB_N_EXTTS 2
+#define IGB_N_PEROUT 2
+#define IGB_N_SDP 4
#define IGB_RETA_SIZE 128
/* board specific private data structure */
@@ -439,6 +442,12 @@ struct igb_adapter {
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
+ struct ptp_pin_desc sdp_config[IGB_N_SDP];
+ struct {
+ struct timespec start;
+ struct timespec period;
+ } perout[IGB_N_PEROUT];
+
char fw_version[32];
#ifdef CONFIG_IGB_HWMON
struct hwmon_buff *igb_hwmon_buff;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ff59897a9463..f366b3b96d03 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5035,9 +5035,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
}
/* record initial flags and protocol */
@@ -5384,6 +5384,80 @@ void igb_update_stats(struct igb_adapter *adapter,
}
}
+static void igb_tsync_interrupt(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct ptp_clock_event event;
+ struct timespec ts;
+ u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
+
+ if (tsicr & TSINTR_SYS_WRAP) {
+ event.type = PTP_CLOCK_PPS;
+ if (adapter->ptp_caps.pps)
+ ptp_clock_event(adapter->ptp_clock, &event);
+ else
+ dev_err(&adapter->pdev->dev, "unexpected SYS WRAP");
+ ack |= TSINTR_SYS_WRAP;
+ }
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ ack |= E1000_TSICR_TXTS;
+ }
+
+ if (tsicr & TSINTR_TT0) {
+ spin_lock(&adapter->tmreg_lock);
+ ts = timespec_add(adapter->perout[0].start,
+ adapter->perout[0].period);
+ wr32(E1000_TRGTTIML0, ts.tv_nsec);
+ wr32(E1000_TRGTTIMH0, ts.tv_sec);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsauxc |= TSAUXC_EN_TT0;
+ wr32(E1000_TSAUXC, tsauxc);
+ adapter->perout[0].start = ts;
+ spin_unlock(&adapter->tmreg_lock);
+ ack |= TSINTR_TT0;
+ }
+
+ if (tsicr & TSINTR_TT1) {
+ spin_lock(&adapter->tmreg_lock);
+ ts = timespec_add(adapter->perout[1].start,
+ adapter->perout[1].period);
+ wr32(E1000_TRGTTIML1, ts.tv_nsec);
+ wr32(E1000_TRGTTIMH1, ts.tv_sec);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsauxc |= TSAUXC_EN_TT1;
+ wr32(E1000_TSAUXC, tsauxc);
+ adapter->perout[1].start = ts;
+ spin_unlock(&adapter->tmreg_lock);
+ ack |= TSINTR_TT1;
+ }
+
+ if (tsicr & TSINTR_AUTT0) {
+ nsec = rd32(E1000_AUXSTMPL0);
+ sec = rd32(E1000_AUXSTMPH0);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = sec * 1000000000ULL + nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+ ack |= TSINTR_AUTT0;
+ }
+
+ if (tsicr & TSINTR_AUTT1) {
+ nsec = rd32(E1000_AUXSTMPL1);
+ sec = rd32(E1000_AUXSTMPH1);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 1;
+ event.timestamp = sec * 1000000000ULL + nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+ ack |= TSINTR_AUTT1;
+ }
+
+ /* acknowledge the interrupts */
+ wr32(E1000_TSICR, ack);
+}
+
static irqreturn_t igb_msix_other(int irq, void *data)
{
struct igb_adapter *adapter = data;
@@ -5415,16 +5489,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (icr & E1000_ICR_TS) {
- u32 tsicr = rd32(E1000_TSICR);
-
- if (tsicr & E1000_TSICR_TXTS) {
- /* acknowledge the interrupt */
- wr32(E1000_TSICR, E1000_TSICR_TXTS);
- /* retrieve hardware timestamp */
- schedule_work(&adapter->ptp_tx_work);
- }
- }
+ if (icr & E1000_ICR_TS)
+ igb_tsync_interrupt(adapter);
wr32(E1000_EIMS, adapter->eims_other);
@@ -6011,8 +6077,12 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
/* reply to reset with ack and vf mac address */
- msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, ETH_ALEN);
+ if (!is_zero_ether_addr(vf_mac)) {
+ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+ } else {
+ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
+ }
igb_write_mbx(hw, msgbuf, 3, vf);
}
@@ -6203,16 +6273,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (icr & E1000_ICR_TS) {
- u32 tsicr = rd32(E1000_TSICR);
-
- if (tsicr & E1000_TSICR_TXTS) {
- /* acknowledge the interrupt */
- wr32(E1000_TSICR, E1000_TSICR_TXTS);
- /* retrieve hardware timestamp */
- schedule_work(&adapter->ptp_tx_work);
- }
- }
+ if (icr & E1000_ICR_TS)
+ igb_tsync_interrupt(adapter);
napi_schedule(&q_vector->napi);
@@ -6257,16 +6319,8 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (icr & E1000_ICR_TS) {
- u32 tsicr = rd32(E1000_TSICR);
-
- if (tsicr & E1000_TSICR_TXTS) {
- /* acknowledge the interrupt */
- wr32(E1000_TSICR, E1000_TSICR_TXTS);
- /* retrieve hardware timestamp */
- schedule_work(&adapter->ptp_tx_work);
- }
- }
+ if (icr & E1000_ICR_TS)
+ igb_tsync_interrupt(adapter);
napi_schedule(&q_vector->napi);
@@ -6527,15 +6581,17 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
DMA_FROM_DEVICE);
}
+static inline bool igb_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
struct page *page,
unsigned int truesize)
{
/* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
- return false;
-
- if (unlikely(page->pfmemalloc))
+ if (unlikely(igb_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
@@ -6545,22 +6601,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= IGB_RX_BUFSZ;
-
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- atomic_inc(&page->_count);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
return false;
-
- /* bump ref count on page before it is given to the stack */
- get_page(page);
#endif
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ atomic_inc(&page->_count);
+
return true;
}
@@ -6603,13 +6656,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* we can reuse buffer as-is, just make sure it is local */
- if (likely((page_to_nid(page) == numa_node_id()) &&
- !page->pfmemalloc))
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!igb_page_is_reserved(page)))
return true;
/* this page cannot be reused so discard it */
- put_page(page);
+ __free_page(page);
return false;
}
@@ -6627,7 +6679,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
struct page *page;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-
page = rx_buffer->page;
prefetchw(page);
@@ -7042,8 +7093,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the hdr_addr for the next_to_use descriptor */
- rx_desc->read.hdr_addr = 0;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.upper.status_error = 0;
cleaned_count--;
} while (cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 794c139f0cc0..d20fc8ed11f1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -256,14 +256,9 @@ static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
unsigned long flags;
- s64 now;
spin_lock_irqsave(&igb->tmreg_lock, flags);
-
- now = timecounter_read(&igb->tc);
- now += delta;
- timecounter_init(&igb->tc, &igb->cc, now);
-
+ timecounter_adjtime(&igb->tc, delta);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
@@ -360,12 +355,239 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
return 0;
}
+static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
+{
+ u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
+ u32 mask[IGB_N_SDP] = {
+ E1000_CTRL_SDP0_DIR,
+ E1000_CTRL_SDP1_DIR,
+ E1000_CTRL_EXT_SDP2_DIR,
+ E1000_CTRL_EXT_SDP3_DIR,
+ };
+
+ if (input)
+ *ptr &= ~mask[pin];
+ else
+ *ptr |= mask[pin];
+}
+
+static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
+{
+ struct e1000_hw *hw = &igb->hw;
+ u32 aux0_sel_sdp[IGB_N_SDP] = {
+ AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
+ };
+ u32 aux1_sel_sdp[IGB_N_SDP] = {
+ AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
+ };
+ u32 ts_sdp_en[IGB_N_SDP] = {
+ TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
+ };
+ u32 ctrl, ctrl_ext, tssdp = 0;
+
+ ctrl = rd32(E1000_CTRL);
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ tssdp = rd32(E1000_TSSDP);
+
+ igb_pin_direction(pin, 1, &ctrl, &ctrl_ext);
+
+ /* Make sure this pin is not enabled as an output. */
+ tssdp &= ~ts_sdp_en[pin];
+
+ if (chan == 1) {
+ tssdp &= ~AUX1_SEL_SDP3;
+ tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN;
+ } else {
+ tssdp &= ~AUX0_SEL_SDP3;
+ tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN;
+ }
+
+ wr32(E1000_TSSDP, tssdp);
+ wr32(E1000_CTRL, ctrl);
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+{
+ struct e1000_hw *hw = &igb->hw;
+ u32 aux0_sel_sdp[IGB_N_SDP] = {
+ AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
+ };
+ u32 aux1_sel_sdp[IGB_N_SDP] = {
+ AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
+ };
+ u32 ts_sdp_en[IGB_N_SDP] = {
+ TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
+ };
+ u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
+ TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
+ TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
+ };
+ u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
+ TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
+ TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
+ };
+ u32 ts_sdp_sel_clr[IGB_N_SDP] = {
+ TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+ TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+ };
+ u32 ctrl, ctrl_ext, tssdp = 0;
+
+ ctrl = rd32(E1000_CTRL);
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ tssdp = rd32(E1000_TSSDP);
+
+ igb_pin_direction(pin, 0, &ctrl, &ctrl_ext);
+
+ /* Make sure this pin is not enabled as an input. */
+ if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin])
+ tssdp &= ~AUX0_TS_SDP_EN;
+
+ if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin])
+ tssdp &= ~AUX1_TS_SDP_EN;
+
+ tssdp &= ~ts_sdp_sel_clr[pin];
+ if (chan == 1)
+ tssdp |= ts_sdp_sel_tt1[pin];
+ else
+ tssdp |= ts_sdp_sel_tt0[pin];
+
+ tssdp |= ts_sdp_en[pin];
+
+ wr32(E1000_TSSDP, tssdp);
+ wr32(E1000_CTRL, ctrl);
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct igb_adapter *igb =
+ container_of(ptp, struct igb_adapter, ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+ unsigned long flags;
+ struct timespec ts;
+ int pin;
+ s64 ns;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ if (on) {
+ pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ if (rq->extts.index == 1) {
+ tsauxc_mask = TSAUXC_EN_TS1;
+ tsim_mask = TSINTR_AUTT1;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TS0;
+ tsim_mask = TSINTR_AUTT0;
+ }
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsim = rd32(E1000_TSIM);
+ if (on) {
+ igb_pin_extts(igb, rq->extts.index, pin);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ } else {
+ tsauxc &= ~tsauxc_mask;
+ tsim &= ~tsim_mask;
+ }
+ wr32(E1000_TSAUXC, tsauxc);
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PEROUT:
+ if (on) {
+ pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec_to_ns(&ts);
+ ns = ns >> 1;
+ if (on && ns < 500000LL) {
+ /* 2k interrupts per second is an awful lot. */
+ return -EINVAL;
+ }
+ ts = ns_to_timespec(ns);
+ if (rq->perout.index == 1) {
+ tsauxc_mask = TSAUXC_EN_TT1;
+ tsim_mask = TSINTR_TT1;
+ trgttiml = E1000_TRGTTIML1;
+ trgttimh = E1000_TRGTTIMH1;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT0;
+ tsim_mask = TSINTR_TT0;
+ trgttiml = E1000_TRGTTIML0;
+ trgttimh = E1000_TRGTTIMH0;
+ }
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsim = rd32(E1000_TSIM);
+ if (on) {
+ int i = rq->perout.index;
+
+ igb_pin_perout(igb, i, pin);
+ igb->perout[i].start.tv_sec = rq->perout.start.sec;
+ igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
+ igb->perout[i].period.tv_sec = ts.tv_sec;
+ igb->perout[i].period.tv_nsec = ts.tv_nsec;
+ wr32(trgttiml, rq->perout.start.sec);
+ wr32(trgttimh, rq->perout.start.nsec);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ } else {
+ tsauxc &= ~tsauxc_mask;
+ tsim &= ~tsim_mask;
+ }
+ wr32(E1000_TSAUXC, tsauxc);
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PPS:
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsim = rd32(E1000_TSIM);
+ if (on)
+ tsim |= TSINTR_SYS_WRAP;
+ else
+ tsim &= ~TSINTR_SYS_WRAP;
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
+static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
/**
* igb_ptp_tx_work
* @work: pointer to work struct
@@ -756,6 +978,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ int i;
switch (hw->mac.type) {
case e1000_82576:
@@ -770,7 +993,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.settime = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
- adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.mult = 1;
adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
/* Dial the nominal frequency. */
@@ -790,7 +1013,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.settime = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
- adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+ adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
adapter->cc.shift = 0;
/* Enable the timer functions by clearing bit 31. */
@@ -798,16 +1021,27 @@ void igb_ptp_init(struct igb_adapter *adapter)
break;
case e1000_i210:
case e1000_i211:
+ for (i = 0; i < IGB_N_SDP; i++) {
+ struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
+
+ snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ }
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
- adapter->ptp_caps.n_ext_ts = 0;
- adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
+ adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
+ adapter->ptp_caps.n_pins = IGB_N_SDP;
+ adapter->ptp_caps.pps = 1;
+ adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
adapter->ptp_caps.settime = igb_ptp_settime_i210;
- adapter->ptp_caps.enable = igb_ptp_feature_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
+ adapter->ptp_caps.verify = igb_ptp_verify_pin;
/* Enable the timer functions by clearing bit 31. */
wr32(E1000_TSAUXC, 0x0);
break;
@@ -905,6 +1139,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
void igb_ptp_reset(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
if (!(adapter->flags & IGB_FLAG_PTP))
return;
@@ -912,6 +1147,8 @@ void igb_ptp_reset(struct igb_adapter *adapter)
/* reset the tstamp_config */
igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
switch (adapter->hw.mac.type) {
case e1000_82576:
/* Dial the nominal frequency. */
@@ -922,23 +1159,25 @@ void igb_ptp_reset(struct igb_adapter *adapter)
case e1000_i350:
case e1000_i210:
case e1000_i211:
- /* Enable the timer functions and interrupts. */
wr32(E1000_TSAUXC, 0x0);
+ wr32(E1000_TSSDP, 0x0);
wr32(E1000_TSIM, TSYNC_INTERRUPTS);
wr32(E1000_IMS, E1000_IMS_TS);
break;
default:
/* No work to do. */
- return;
+ goto out;
}
/* Re-initialize the timer. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
struct timespec ts = ktime_to_timespec(ktime_get_real());
- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ igb_ptp_write_i210(adapter, &ts);
} else {
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
}
+out:
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 63c807c9b21c..ebf9d4a42fdd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
static int igbvf_tso(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
+ __be16 protocol)
{
struct e1000_adv_tx_context_desc *context_desc;
struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
l4len = tcp_hdrlen(skb);
*hdr_len += l4len;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
struct e1000_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
+ switch (protocol) {
case htons(ETH_P_IP):
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
u8 hdr_len = 0;
int count = 0;
int tso = 0;
+ __be16 protocol = vlan_get_protocol(skb);
if (test_bit(__IGBVF_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
@@ -2234,18 +2237,19 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= IGBVF_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) <<
+ IGBVF_TX_FLAGS_VLAN_SHIFT);
}
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= IGBVF_TX_FLAGS_IPV4;
first = tx_ring->next_to_use;
tso = skb_is_gso(skb) ?
- igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
+ igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
if (unlikely(tso < 0)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2253,7 +2257,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
if (tso)
tx_flags |= IGBVF_TX_FLAGS_TSO;
- else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IGBVF_TX_FLAGS_CSUM;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index aa87605b144a..11a1bdbe3fd9 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1532,9 +1532,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
DESC_NEEDED)))
return NETDEV_TX_BUSY;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= IXGB_TX_FLAGS_VLAN;
- vlan_id = vlan_tx_tag_get(skb);
+ vlan_id = skb_vlan_tag_get(skb);
}
first = adapter->tx_ring.next_to_use;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6137be43920..7dcbbec09a70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -38,7 +38,7 @@
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
@@ -76,6 +76,8 @@
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
+#define IXGBE_ETH_P_LLDP 0x88CC
+
/* flow control */
#define IXGBE_MIN_FCRTL 0x40
#define IXGBE_MAX_FCRTL 0x7FF80
@@ -753,6 +755,7 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
+ u16 vxlan_port;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2ed2c7de2304..70cc4c5c0a01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -50,6 +50,7 @@
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
+#include <net/vxlan.h>
#ifdef CONFIG_OF
#include <linux/of_net.h>
@@ -1396,12 +1397,23 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+ bool encap_pkt = false;
+
skb_checksum_none_assert(skb);
/* Rx csum disabled */
if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
+ if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
+ (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
+ encap_pkt = true;
+ skb->encapsulation = 1;
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
/* if IP and error */
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
@@ -1413,8 +1425,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
return;
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
- __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-
/*
* 82599 errata, UDP frames with a 0 checksum can be marked as
* checksum errors.
@@ -1429,6 +1439,17 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (encap_pkt) {
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
+ return;
+
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
+ ring->rx_stats.csum_err++;
+ return;
+ }
+ /* If we checked the outer header let the stack know */
+ skb->csum_level = 1;
+ }
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -3564,10 +3585,24 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
adapter->num_vfs);
+
+ /* Ensure LLDP is set for Ethertype Antispoofing if we will be
+ * calling set_ethertype_anti_spoofing for each VF in loop below
+ */
+ if (hw->mac.ops.set_ethertype_anti_spoofing)
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
+ (IXGBE_ETQF_FILTER_EN | /* enable filter */
+ IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
+ IXGBE_ETH_P_LLDP)); /* LLDP eth type */
+
/* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
if (!adapter->vfinfo[i].spoofchk_enabled)
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
+
+ /* enable ethertype anti spoofing if hw supports it */
+ if (hw->mac.ops.set_ethertype_anti_spoofing)
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
}
}
@@ -5627,6 +5662,10 @@ static int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
+#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
+ vxlan_get_rx_port(netdev);
+
+#endif
return 0;
err_set_queues:
@@ -7217,8 +7256,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
first->gso_segs = 1;
/* if we have a HW VLAN tag being added default to the HW one */
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
@@ -7227,11 +7266,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (!vhdr)
goto out_drop;
- protocol = vhdr->h_vlan_encapsulated_proto;
tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
}
+ protocol = vlan_get_protocol(skb);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock &&
@@ -7771,6 +7810,64 @@ static int ixgbe_set_features(struct net_device *netdev,
return 0;
}
+/**
+ * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @dev: The port's netdev
+ * @sa_family: Socket Family that VXLAN is notifiying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 new_port = ntohs(port);
+
+ if (sa_family == AF_INET6)
+ return;
+
+ if (adapter->vxlan_port == new_port) {
+ netdev_info(dev, "Port %d already offloaded\n", new_port);
+ return;
+ }
+
+ if (adapter->vxlan_port) {
+ netdev_info(dev,
+ "Hit Max num of UDP ports, not adding port %d\n",
+ new_port);
+ return;
+ }
+
+ adapter->vxlan_port = new_port;
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
+}
+
+/**
+ * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @dev: The port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 new_port = ntohs(port);
+
+ if (sa_family == AF_INET6)
+ return;
+
+ if (adapter->vxlan_port != new_port) {
+ netdev_info(dev, "Port %d was not found, not deleting\n",
+ new_port);
+ return;
+ }
+
+ adapter->vxlan_port = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
+}
+
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
@@ -7786,7 +7883,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
}
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh, u16 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -7982,6 +8079,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
+ .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
+ .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
};
/**
@@ -8339,6 +8438,15 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ break;
+ default:
+ break;
+ }
+
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5fd4b5271f9a..79c00f57d3e7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -261,18 +261,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
unsigned long flags;
- u64 now;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
- now = timecounter_read(&adapter->tc);
- now += delta;
-
- /* reset the timecounter */
- timecounter_init(&adapter->tc,
- &adapter->cc,
- now);
-
+ timecounter_adjtime(&adapter->tc, delta);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
ixgbe_ptp_setup_sdp(adapter);
@@ -802,7 +793,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
memset(&adapter->cc, 0, sizeof(adapter->cc));
adapter->cc.read = ixgbe_ptp_read;
- adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.shift = shift;
adapter->cc.mult = 1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c76ba90ecc6e..7f37fe7269a7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -101,9 +101,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
}
- /* We do not support RSS w/ SR-IOV */
- adapter->ring_feature[RING_F_RSS].limit = 1;
-
/* Disable RSC when in SR-IOV mode */
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
@@ -1097,14 +1094,12 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
u16 vlan, u8 qos)
{
struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
+ int err;
- if (adapter->vfinfo[vf].pf_vlan)
- err = ixgbe_set_vf_vlan(adapter, false,
- adapter->vfinfo[vf].pf_vlan,
- vf);
+ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
+
ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false);
if (adapter->vfinfo[vf].spoofchk_enabled)
@@ -1143,6 +1138,11 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
+
+ /* disable hide VLAN on X550 */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
+
adapter->vfinfo[vf].pf_vlan = 0;
adapter->vfinfo[vf].pf_qos = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index d101b25dc4b6..fc5ecee56ca8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -378,6 +378,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_SPOOF_MACAS_MASK 0xFF
#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000
+#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16
#define IXGBE_PFVFSPOOF_REG_COUNT 8
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
@@ -399,6 +401,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_WUPL 0x05900
#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
* Filter Table */
@@ -1540,6 +1543,7 @@ enum {
#define IXGBE_MAX_ETQF_FILTERS 8
#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
+#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
@@ -1565,6 +1569,9 @@ enum {
#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
#define IXGBE_ETQF_FILTER_FIP 4
+#define IXGBE_ETQF_FILTER_LLDP 5
+#define IXGBE_ETQF_FILTER_LACP 6
+
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -2122,6 +2129,7 @@ enum {
#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */
#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
@@ -2139,6 +2147,7 @@ enum {
#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */
#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
@@ -2227,6 +2236,8 @@ enum {
#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
+#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
@@ -3056,6 +3067,7 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* DMA Coalescing */
s32 (*dmac_config)(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index ba54ff07b438..49395420c9b3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -55,9 +55,6 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
- /* Call PHY identify routine to get the phy type */
- ixgbe_identify_phy_generic(hw);
-
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index ffdd1231f419..50bf81908dd6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -80,7 +80,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@@ -110,8 +110,8 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
* @device_type: 3 bit device type
* @phy_data: Pointer to read data from the register
**/
-s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data)
+static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data)
{
u32 i, command, error;
@@ -158,7 +158,8 @@ s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
{
s32 status;
struct ixgbe_hic_read_shadow_ram buffer;
@@ -193,8 +194,8 @@ s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Reads a 16 bit word(s) from the EEPROM using the hostif.
**/
-s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
- u16 offset, u16 words, u16 *data)
+static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
{
struct ixgbe_hic_read_shadow_ram buffer;
u32 current_word = 0;
@@ -331,7 +332,8 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
+static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+ u32 buffer_size)
{
u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
u16 *local_buffer;
@@ -407,7 +409,7 @@ s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
return ixgbe_calc_checksum_X550(hw, NULL, 0);
}
@@ -419,7 +421,7 @@ s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
s32 status = 0;
@@ -440,7 +442,8 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
+static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+ u16 *checksum_val)
{
s32 status;
u16 checksum;
@@ -489,7 +492,8 @@ s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data)
{
s32 status;
struct ixgbe_hic_write_shadow_ram buffer;
@@ -517,7 +521,7 @@ s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
{
s32 status = 0;
@@ -537,7 +541,7 @@ s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
**/
-s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
{
s32 status = 0;
union ixgbe_hic_hdr2 buffer;
@@ -560,7 +564,7 @@ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
s32 status;
u16 checksum = 0;
@@ -600,8 +604,9 @@ s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Write a 16 bit word(s) to the EEPROM using the hostif.
**/
-s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
- u16 offset, u16 words, u16 *data)
+static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words,
+ u16 *data)
{
s32 status = 0;
u32 i = 0;
@@ -630,7 +635,7 @@ s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
**/
-void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -647,7 +652,7 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
* @hw: pointer to hardware structure
*/
-s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
{
bool setup_linear;
u16 reg_slice, edc_mode;
@@ -703,9 +708,9 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
**/
-s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
/* SFP */
if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -740,8 +745,8 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data)
+static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
{
u32 i, command, error;
@@ -904,7 +909,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
*
* Configures the integrated KX4 PHY.
**/
-s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
{
s32 status;
u32 reg_val;
@@ -942,7 +947,7 @@ s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
*
* Configures the integrated KR PHY.
**/
-s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
s32 status;
u32 reg_val;
@@ -987,7 +992,7 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
* A return of a non-zero value indicates an error, and the base driver should
* not report link up.
**/
-s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
{
u32 status;
u16 lasi, autoneg_status, speed;
@@ -1049,7 +1054,7 @@ s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
* set during init_shared_code because the PHY/SFP type was
* not known. Perform the SFP init if necessary.
**/
-s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
s32 ret_val;
@@ -1102,7 +1107,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
* Returns the media type (fiber, copper, backplane)
*
*/
-enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
{
enum ixgbe_media_type media_type;
@@ -1129,7 +1134,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
-s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
u32 status;
u16 reg;
@@ -1202,7 +1207,7 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
** reset.
**/
-s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
s32 status;
@@ -1295,6 +1300,28 @@ mac_reset_top:
return status;
}
+/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
+ * anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for Ethertype anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ **/
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
+ int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
+ u32 pfvfspoof;
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -1329,6 +1356,8 @@ mac_reset_top:
.init_uta_tables = &ixgbe_init_uta_tables_generic, \
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
+ .set_ethertype_anti_spoofing = \
+ &ixgbe_set_ethertype_anti_spoofing_X550, \
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
.release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
@@ -1345,7 +1374,6 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.setup_link = &ixgbe_setup_mac_link_X540,
- .set_rxpba = &ixgbe_set_rxpba_generic,
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
.setup_sfp = NULL,
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8c44ab25f3fa..3a9b356dff01 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -43,6 +43,13 @@
#define BP_EXTENDED_STATS
#endif
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
@@ -85,6 +92,18 @@ struct ixgbevf_rx_queue_stats {
u64 csum_err;
};
+enum ixgbevf_ring_state_t {
+ __IXGBEVF_TX_DETECT_HANG,
+ __IXGBEVF_HANG_CHECK_ARMED,
+};
+
+#define check_for_tx_hang(ring) \
+ test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+
struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct net_device *netdev;
@@ -101,7 +120,7 @@ struct ixgbevf_ring {
struct ixgbevf_tx_buffer *tx_buffer_info;
struct ixgbevf_rx_buffer *rx_buffer_info;
};
-
+ unsigned long state;
struct ixgbevf_stats stats;
struct u64_stats_sync syncp;
union {
@@ -124,6 +143,7 @@ struct ixgbevf_ring {
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
+#define IXGBEVF_MAX_RSS_QUEUES 2
#define IXGBEVF_DEFAULT_TXD 1024
#define IXGBEVF_DEFAULT_RXD 512
@@ -347,8 +367,6 @@ struct ixgbevf_adapter {
/* this field must be first, see ixgbevf_process_skb_fields */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct timer_list watchdog_timer;
- struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
/* Interrupt Throttle Rate */
@@ -378,8 +396,7 @@ struct ixgbevf_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
-
+#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
struct msix_entry *msix_entries;
@@ -415,9 +432,11 @@ struct ixgbevf_adapter {
u32 link_speed;
bool link_up;
- spinlock_t mbx_lock;
+ struct timer_list service_timer;
+ struct work_struct service_task;
- struct work_struct watchdog_task;
+ spinlock_t mbx_lock;
+ unsigned long last_reset;
};
enum ixbgevf_state_t {
@@ -426,7 +445,8 @@ enum ixbgevf_state_t {
__IXGBEVF_DOWN,
__IXGBEVF_DISABLED,
__IXGBEVF_REMOVING,
- __IXGBEVF_WORK_INIT,
+ __IXGBEVF_SERVICE_SCHED,
+ __IXGBEVF_SERVICE_INITED,
};
enum ixgbevf_boards {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 62a0d8e0f17d..4186981e562d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -98,6 +98,23 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
+{
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
+ !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
+ schedule_work(&adapter->service_task);
+}
+
+static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
+{
+ BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
+
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
+}
+
/* forward decls */
static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
@@ -111,8 +128,8 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
return;
hw->hw_addr = NULL;
dev_err(&adapter->pdev->dev, "Adapter removed\n");
- if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
- schedule_work(&adapter->watchdog_task);
+ if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
+ ixgbevf_service_event_schedule(adapter);
}
static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -199,14 +216,72 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
/* tx_buffer must be completely set up in the transmit path */
}
-#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
+{
+ return ring->stats.packets;
+}
+
+static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
+ u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
+{
+ u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+ u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang.
+ */
+ if ((tx_done_old == tx_done) && tx_pending) {
+ /* make sure it is true for two checks in a row */
+ return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ }
+ /* reset the countdown */
+ clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
+
+ /* update completed stats and continue */
+ tx_ring->tx_stats.tx_done_old = tx_done;
+
+ return false;
+}
+
+static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
+{
+ /* Do the reset outside of interrupt context */
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ ixgbevf_service_event_schedule(adapter);
+ }
+}
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-static void ixgbevf_tx_timeout(struct net_device *netdev);
+ ixgbevf_tx_timeout_reset(adapter);
+}
/**
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
@@ -311,6 +386,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
+ struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_tx_desc *eop_desc;
+
+ eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
+
+ pr_err("Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " next_to_watch <%p>\n"
+ " eop_desc->wb.status <%x>\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, i,
+ eop_desc, (eop_desc ? eop_desc->wb.status : 0),
+ tx_ring->tx_buffer_info[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ /* schedule immediate reset if we believe we hung */
+ ixgbevf_tx_timeout_reset(adapter);
+
+ return true;
+ }
+
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -1158,9 +1264,7 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
hw->mac.get_link_status = 1;
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
- !test_bit(__IXGBEVF_REMOVING, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies);
+ ixgbevf_service_event_schedule(adapter);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1479,6 +1583,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (1 << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
+ clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
+
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
/* poll to verify queue is enabled */
@@ -1584,6 +1690,39 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
reg_idx);
}
+static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vfmrqc = 0, vfreta = 0;
+ u32 rss_key[10];
+ u16 rss_i = adapter->num_rx_queues;
+ int i, j;
+
+ /* Fill out hash function seeds */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+
+ /* Fill out redirection table */
+ for (i = 0, j = 0; i < 64; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+ vfreta = (vfreta << 8) | (j * 0x1);
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+ }
+
+ /* Perform hash on these packet types */
+ vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
+ IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
+ IXGBE_VFMRQC_RSS_FIELD_IPV6 |
+ IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
+
+ vfmrqc |= IXGBE_VFMRQC_RSSEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
+}
+
static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{
@@ -1640,6 +1779,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
ixgbevf_setup_psrtype(adapter);
+ if (hw->mac.type >= ixgbe_mac_X550_vf)
+ ixgbevf_setup_vfmrqc(adapter);
/* notify the PF of our intent to use this size of frame */
ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
@@ -1794,7 +1935,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
unsigned int def_q = 0;
unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
+ unsigned int num_rx_queues = adapter->num_rx_queues;
+ unsigned int num_tx_queues = adapter->num_tx_queues;
int err;
spin_lock_bh(&adapter->mbx_lock);
@@ -1808,6 +1950,9 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
return err;
if (num_tcs > 1) {
+ /* we need only one Tx queue */
+ num_tx_queues = 1;
+
/* update default Tx ring register index */
adapter->tx_ring[0]->reg_idx = def_q;
@@ -1816,7 +1961,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
}
/* if we have a bad config abort request queue reset */
- if (adapter->num_rx_queues != num_rx_queues) {
+ if ((adapter->num_rx_queues != num_rx_queues) ||
+ (adapter->num_tx_queues != num_tx_queues)) {
/* force mailbox timeout to prevent further messages */
hw->mbx.timeout = 0;
@@ -1917,6 +2063,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ ixgbevf_irq_enable(adapter);
+
/* enable transmits */
netif_tx_start_all_queues(netdev);
@@ -1924,21 +2074,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_init_last_counter_stats(adapter);
hw->mac.get_link_status = 1;
- mod_timer(&adapter->watchdog_timer, jiffies);
+ mod_timer(&adapter->service_timer, jiffies);
}
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
-
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
-
- /* clear any pending interrupts, may auto mask */
- IXGBE_READ_REG(hw, IXGBE_VTEICR);
-
- ixgbevf_irq_enable(adapter);
}
/**
@@ -2045,22 +2188,19 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
- netif_tx_disable(netdev);
-
- msleep(10);
+ usleep_range(10000, 20000);
netif_tx_stop_all_queues(netdev);
+ /* call carrier off first to avoid false dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
ixgbevf_irq_disable(adapter);
ixgbevf_napi_disable_all(adapter);
- del_timer_sync(&adapter->watchdog_timer);
- /* can't call flush scheduled work here because it can deadlock
- * if linkwatch_event tries to acquire the rtnl_lock which we are
- * holding */
- while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
- msleep(1);
+ del_timer_sync(&adapter->service_timer);
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -2070,8 +2210,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
IXGBE_TXDCTL_SWFLSH);
}
- netif_carrier_off(netdev);
-
if (!pci_channel_offline(adapter->pdev))
ixgbevf_reset(adapter);
@@ -2110,6 +2248,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
memcpy(netdev->perm_addr, adapter->hw.mac.addr,
netdev->addr_len);
}
+
+ adapter->last_reset = jiffies;
}
static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
@@ -2181,8 +2321,19 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
return;
/* we need as many queues as traffic classes */
- if (num_tcs > 1)
+ if (num_tcs > 1) {
adapter->num_rx_queues = num_tcs;
+ } else {
+ u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_11:
+ adapter->num_rx_queues = rss;
+ adapter->num_tx_queues = rss;
+ default:
+ break;
+ }
+ }
}
/**
@@ -2552,7 +2703,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int i;
- if (!adapter->link_up)
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
return;
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
@@ -2576,79 +2728,176 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
}
/**
- * ixgbevf_watchdog - Timer Call-back
+ * ixgbevf_service_timer - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-static void ixgbevf_watchdog(unsigned long data)
+static void ixgbevf_service_timer(unsigned long data)
{
struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+
+ /* Reset the timer */
+ mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
+
+ ixgbevf_service_event_schedule(adapter);
+}
+
+static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
+{
+ if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
+ return;
+
+ adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ adapter->tx_timeout_count++;
+
+ ixgbevf_reinit_locked(adapter);
+}
+
+/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
+ * @adapter - pointer to the device adapter structure
+ *
+ * This function serves two purposes. First it strobes the interrupt lines
+ * in order to make certain interrupts are occurring. Secondly it sets the
+ * bits needed to check for TX hangs. As a result we should immediately
+ * determine if a hang has occurred.
+ */
+static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
+{
struct ixgbe_hw *hw = &adapter->hw;
u32 eics = 0;
int i;
- /*
- * Do the watchdog outside of interrupt context due to the lovely
- * delays that some of the newer hardware requires
- */
+ /* If we're down or resetting, just bail */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
- if (test_bit(__IXGBEVF_DOWN, &adapter->state))
- goto watchdog_short_circuit;
+ /* Force detection of hung controller */
+ if (netif_carrier_ok(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_check_for_tx_hang(adapter->tx_ring[i]);
+ }
/* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+
if (qv->rx.ring || qv->tx.ring)
eics |= 1 << i;
}
+ /* Cause software interrupt to ensure rings are cleaned */
IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
+}
-watchdog_short_circuit:
- schedule_work(&adapter->watchdog_task);
+/**
+ * ixgbevf_watchdog_update_link - update the link status
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+ s32 err;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ /* if check for link returns error we will need to reset */
+ if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
+ adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ link_up = false;
+ }
+
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
}
/**
- * ixgbevf_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
+ * ixgbevf_watchdog_link_is_up - update netif_carrier status and
+ * print link up message
+ * @adapter - pointer to the device adapter structure
**/
-static void ixgbevf_tx_timeout(struct net_device *netdev)
+static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct net_device *netdev = adapter->netdev;
- /* Do the reset outside of interrupt context */
- schedule_work(&adapter->reset_task);
+ /* only continue if link was previously down */
+ if (netif_carrier_ok(netdev))
+ return;
+
+ dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
+ (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ "10 Gbps" :
+ (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
+ "1 Gbps" :
+ (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
+ "100 Mbps" :
+ "unknown speed");
+
+ netif_carrier_on(netdev);
}
-static void ixgbevf_reset_task(struct work_struct *work)
+/**
+ * ixgbevf_watchdog_link_is_down - update netif_carrier status and
+ * print link down message
+ * @adapter - pointer to the adapter structure
+ **/
+static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
{
- struct ixgbevf_adapter *adapter;
- adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+ struct net_device *netdev = adapter->netdev;
- /* If we're already down or resetting, just bail */
+ adapter->link_speed = 0;
+
+ /* only continue if link was up previously */
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+
+ netif_carrier_off(netdev);
+}
+
+/**
+ * ixgbevf_watchdog_subtask - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
+{
+ /* if interface is down do nothing */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
- test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
return;
- adapter->tx_timeout_count++;
+ ixgbevf_watchdog_update_link(adapter);
- ixgbevf_reinit_locked(adapter);
+ if (adapter->link_up)
+ ixgbevf_watchdog_link_is_up(adapter);
+ else
+ ixgbevf_watchdog_link_is_down(adapter);
+
+ ixgbevf_update_stats(adapter);
}
/**
- * ixgbevf_watchdog_task - worker thread to bring link up
+ * ixgbevf_service_task - manages and runs subtasks
* @work: pointer to work_struct containing our data
**/
-static void ixgbevf_watchdog_task(struct work_struct *work)
+static void ixgbevf_service_task(struct work_struct *work)
{
struct ixgbevf_adapter *adapter = container_of(work,
struct ixgbevf_adapter,
- watchdog_task);
- struct net_device *netdev = adapter->netdev;
+ service_task);
struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = adapter->link_speed;
- bool link_up = adapter->link_up;
- s32 need_reset;
if (IXGBE_REMOVED(hw->hw_addr)) {
if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
@@ -2658,73 +2907,13 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
}
return;
}
- ixgbevf_queue_reset_subtask(adapter);
-
- adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
-
- /*
- * Always check the link on the watchdog because we have
- * no LSC interrupt
- */
- spin_lock_bh(&adapter->mbx_lock);
-
- need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (need_reset) {
- adapter->link_up = link_up;
- adapter->link_speed = link_speed;
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- schedule_work(&adapter->reset_task);
- goto pf_has_reset;
- }
- adapter->link_up = link_up;
- adapter->link_speed = link_speed;
-
- if (link_up) {
- if (!netif_carrier_ok(netdev)) {
- char *link_speed_string;
- switch (link_speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- link_speed_string = "10 Gbps";
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- link_speed_string = "1 Gbps";
- break;
- case IXGBE_LINK_SPEED_100_FULL:
- link_speed_string = "100 Mbps";
- break;
- default:
- link_speed_string = "unknown speed";
- break;
- }
- dev_info(&adapter->pdev->dev,
- "NIC Link is Up, %s\n", link_speed_string);
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
- }
- } else {
- adapter->link_up = false;
- adapter->link_speed = 0;
- if (netif_carrier_ok(netdev)) {
- dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- }
- }
- ixgbevf_update_stats(adapter);
-
-pf_has_reset:
- /* Reset the timer */
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
- !test_bit(__IXGBEVF_REMOVING, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + (2 * HZ)));
+ ixgbevf_queue_reset_subtask(adapter);
+ ixgbevf_reset_subtask(adapter);
+ ixgbevf_watchdog_subtask(adapter);
+ ixgbevf_check_hang_subtask(adapter);
- adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+ ixgbevf_service_event_complete(adapter);
}
/**
@@ -2944,10 +3133,6 @@ static int ixgbevf_open(struct net_device *netdev)
if (!adapter->num_msix_vectors)
return -ENOMEM;
- /* disallow open during test */
- if (test_bit(__IXGBEVF_TESTING, &adapter->state))
- return -EBUSY;
-
if (hw->adapter_stopped) {
ixgbevf_reset(adapter);
/* if adapter is still stopped then PF isn't up and
@@ -2960,6 +3145,12 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
+ /* disallow open during test */
+ if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2979,15 +3170,11 @@ static int ixgbevf_open(struct net_device *netdev)
*/
ixgbevf_map_rings_to_vectors(adapter);
- ixgbevf_up_complete(adapter);
-
- /* clear any pending interrupts, may auto mask */
- IXGBE_READ_REG(hw, IXGBE_VTEICR);
err = ixgbevf_request_irq(adapter);
if (err)
goto err_req_irq;
- ixgbevf_irq_enable(adapter);
+ ixgbevf_up_complete(adapter);
return 0;
@@ -3099,7 +3286,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -3156,7 +3343,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_hdr = 0;
- switch (skb->protocol) {
+ switch (first->protocol) {
case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -3452,8 +3639,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
first->bytecount = skb->len;
first->gso_segs = 1;
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
@@ -3822,28 +4009,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
netdev->priv_flags |= IFF_UNICAST_FLT;
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = ixgbevf_watchdog;
- adapter->watchdog_timer.data = (unsigned long)adapter;
-
if (IXGBE_REMOVED(hw->hw_addr)) {
err = -EIO;
goto err_sw_init;
}
- INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
- INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
- set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
+
+ setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
+ (unsigned long)adapter);
+
+ INIT_WORK(&adapter->service_task, ixgbevf_service_task);
+ set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
+ clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
err = ixgbevf_init_interrupt_scheme(adapter);
if (err)
@@ -3917,11 +4104,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
adapter = netdev_priv(netdev);
set_bit(__IXGBEVF_REMOVING, &adapter->state);
-
- del_timer_sync(&adapter->watchdog_timer);
-
- cancel_work_sync(&adapter->reset_task);
- cancel_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->service_task);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
@@ -3955,7 +4138,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
+ if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
return PCI_ERS_RESULT_DISCONNECT;
rtnl_lock();
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 09dd8f698bea..3e712fd6e695 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -69,6 +69,16 @@
#define IXGBE_VFGOTC_LSB 0x02020
#define IXGBE_VFGOTC_MSB 0x02024
#define IXGBE_VFMPRC 0x01034
+#define IXGBE_VFMRQC 0x3000
+#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
+
+/* VFMRQC bits */
+#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */
+#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000
#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 44ce7d88f554..6e9a792097d3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2154,9 +2154,9 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
static inline void
jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
{
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
*flags |= TXFLAG_TAGON;
- *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+ *vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a62fc38f045e..1c75829eb166 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_dma) && \
(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
/*
* RX/TX descriptors.
*/
@@ -362,6 +366,7 @@ struct tx_queue {
dma_addr_t tso_hdrs_dma;
struct tx_desc *tx_desc_area;
+ char *tx_desc_mapping; /* array to track the type of the dma mapping */
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
desc->l4i_chk = 0;
desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
- void *addr;
this_frag = &skb_shinfo(skb)->frags[frag];
- addr = page_address(this_frag->page.p) + this_frag->page_offset;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
/*
* The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
- desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
- desc->byte_cnt, DMA_TO_DEVICE);
+ desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+ this_frag, 0, desc->byte_cnt,
+ DMA_TO_DEVICE);
}
}
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
if (nr_frags) {
txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
+ char desc_dma_map;
tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
+ desc_dma_map = txq->tx_desc_mapping[tx_index];
+
cmd_sts = desc->cmd_sts;
if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++;
txq->tx_desc_count--;
- if (!IS_TSO_HEADER(txq, desc->buf_ptr))
- dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+ if (desc_dma_map == DESC_DMA_MAP_PAGE)
+ dma_unmap_page(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ }
if (cmd_sts & TX_ENABLE_INTERRUPT) {
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
struct tx_queue *txq = mp->txq + index;
struct tx_desc *tx_desc;
int size;
+ int ret;
int i;
txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc);
}
+ txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+ GFP_KERNEL);
+ if (!txq->tx_desc_mapping) {
+ ret = -ENOMEM;
+ goto err_free_desc_area;
+ }
+
/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma, GFP_KERNEL);
if (txq->tso_hdrs == NULL) {
- dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
- txq->tx_desc_area, txq->tx_desc_dma);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_desc_mapping;
}
skb_queue_head_init(&txq->tx_skb);
return 0;
+
+err_free_desc_mapping:
+ kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+ if (index == 0 && size <= mp->tx_desc_sram_size)
+ iounmap(txq->tx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ return ret;
}
static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
+ kfree(txq->tx_desc_mapping);
+
if (txq->tso_hdrs)
dma_free_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 867a6a3ef81f..d9f4498832a1 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1895,14 +1895,14 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
ctrl = 0;
/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
if (!le) {
le = get_tx_le(sky2, &slot);
le->addr = 0;
le->opcode = OP_VLAN|HW_OWNER;
} else
le->opcode |= OP_VLAN;
- le->length = cpu_to_be16(vlan_tx_tag_get(skb));
+ le->length = cpu_to_be16(skb_vlan_tag_get(skb));
ctrl |= INS_VLAN;
}
@@ -2594,7 +2594,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
- if (vlan_tx_tag_present(re->skb))
+ if (skb_vlan_tag_present(re->skb))
count -= VLAN_HLEN; /* Account for vlan tag */
/* This chip has hardware problems that generates bogus status.
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 963dd7e6d547..0c51c69f802f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -592,7 +592,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->nbufs = 1;
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
+ buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
size, &t, gfp);
if (!buf->direct.buf)
return -ENOMEM;
@@ -619,7 +619,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
- dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE,
&t, gfp);
if (!buf->page_list[i].buf)
goto err_free;
@@ -657,15 +658,17 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
int i;
if (buf->nbufs == 1)
- dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
+ dma_free_coherent(&dev->persist->pdev->dev, size,
+ buf->direct.buf,
buf->direct.map);
else {
- if (BITS_PER_LONG == 64 && buf->direct.buf)
+ if (BITS_PER_LONG == 64)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ dma_free_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
@@ -738,7 +741,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out;
- pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp);
+ pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@@ -775,7 +778,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
set_bit(i, db->u.pgdir->bits[o]);
if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
kfree(db->u.pgdir);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 9c656fe4983d..715de8affcc9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -40,16 +40,177 @@ enum {
MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
};
-static DEFINE_SPINLOCK(catas_lock);
-static LIST_HEAD(catas_list);
-static struct work_struct catas_work;
-static int internal_err_reset = 1;
-module_param(internal_err_reset, int, 0644);
+int mlx4_internal_err_reset = 1;
+module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
- "Reset device on internal errors if non-zero"
- " (default 1, in SRIOV mode default is 0)");
+ "Reset device on internal errors if non-zero (default 1)");
+
+static int read_vendor_id(struct mlx4_dev *dev)
+{
+ u16 vendor_id = 0;
+ int ret;
+
+ ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id);
+ if (ret) {
+ mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret);
+ return ret;
+ }
+
+ if (vendor_id == 0xffff) {
+ mlx4_err(dev, "PCI can't be accessed to read vendor id\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx4_reset_master(struct mlx4_dev *dev)
+{
+ int err = 0;
+
+ if (mlx4_is_master(dev))
+ mlx4_report_internal_err_comm_event(dev);
+
+ if (!pci_channel_offline(dev->persist->pdev)) {
+ err = read_vendor_id(dev);
+ /* If PCI can't be accessed to read vendor ID we assume that its
+ * link was disabled and chip was already reset.
+ */
+ if (err)
+ return 0;
+
+ err = mlx4_reset(dev);
+ if (err)
+ mlx4_err(dev, "Fail to reset HCA\n");
+ }
+
+ return err;
+}
+
+static int mlx4_reset_slave(struct mlx4_dev *dev)
+{
+#define COM_CHAN_RST_REQ_OFFSET 0x10
+#define COM_CHAN_RST_ACK_OFFSET 0x08
+
+ u32 comm_flags;
+ u32 rst_req;
+ u32 rst_ack;
+ unsigned long end;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (pci_channel_offline(dev->persist->pdev))
+ return 0;
+
+ comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
+ MLX4_COMM_CHAN_FLAGS));
+ if (comm_flags == 0xffffffff) {
+ mlx4_err(dev, "VF reset is not needed\n");
+ return 0;
+ }
+
+ if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) {
+ mlx4_err(dev, "VF reset is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
+ COM_CHAN_RST_REQ_OFFSET;
+ rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
+ COM_CHAN_RST_ACK_OFFSET;
+ if (rst_req != rst_ack) {
+ mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n");
+ return -EIO;
+ }
+
+ rst_req ^= 1;
+ mlx4_warn(dev, "VF is sending reset request to Firmware\n");
+ comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
+ __raw_writel((__force u32)cpu_to_be32(comm_flags),
+ (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
+ /* Make sure that our comm channel write doesn't
+ * get mixed in with writes from another CPU.
+ */
+ mmiowb();
+
+ end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
+ while (time_before(jiffies, end)) {
+ comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
+ MLX4_COMM_CHAN_FLAGS));
+ rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
+ COM_CHAN_RST_ACK_OFFSET;
+
+ /* Reading rst_req again since the communication channel can
+ * be reset at any time by the PF and all its bits will be
+ * set to zero.
+ */
+ rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
+ COM_CHAN_RST_REQ_OFFSET;
+
+ if (rst_ack == rst_req) {
+ mlx4_warn(dev, "VF Reset succeed\n");
+ return 0;
+ }
+ cond_resched();
+ }
+ mlx4_err(dev, "Fail to send reset over the communication channel\n");
+ return -ETIMEDOUT;
+}
+
+static int mlx4_comm_internal_err(u32 slave_read)
+{
+ return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
+ (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
+}
+
+void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
+{
+ int err;
+ struct mlx4_dev *dev;
+
+ if (!mlx4_internal_err_reset)
+ return;
+
+ mutex_lock(&persist->device_state_mutex);
+ if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+ goto out;
+
+ dev = persist->dev;
+ mlx4_err(dev, "device is going to be reset\n");
+ if (mlx4_is_slave(dev))
+ err = mlx4_reset_slave(dev);
+ else
+ err = mlx4_reset_master(dev);
+ BUG_ON(err != 0);
+
+ dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
+ mlx4_err(dev, "device was reset successfully\n");
+ mutex_unlock(&persist->device_state_mutex);
+
+ /* At that step HW was already reset, now notify clients */
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
+ mlx4_cmd_wake_completions(dev);
+ return;
+
+out:
+ mutex_unlock(&persist->device_state_mutex);
+}
+
+static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
+{
+ int err = 0;
+
+ mlx4_enter_error_state(persist);
+ mutex_lock(&persist->interface_state_mutex);
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
+ !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
+ err = mlx4_restart_one(persist->pdev);
+ mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
+ err);
+ }
+ mutex_unlock(&persist->interface_state_mutex);
+}
static void dump_err_buf(struct mlx4_dev *dev)
{
@@ -67,58 +228,40 @@ static void poll_catas(unsigned long dev_ptr)
{
struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 slave_read;
- if (readl(priv->catas_err.map)) {
- /* If the device is off-line, we cannot try to recover it */
- if (pci_channel_offline(dev->pdev))
- mod_timer(&priv->catas_err.timer,
- round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
- else {
- dump_err_buf(dev);
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
-
- if (internal_err_reset) {
- spin_lock(&catas_lock);
- list_add(&priv->catas_err.list, &catas_list);
- spin_unlock(&catas_lock);
-
- queue_work(mlx4_wq, &catas_work);
- }
+ if (mlx4_is_slave(dev)) {
+ slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+ if (mlx4_comm_internal_err(slave_read)) {
+ mlx4_warn(dev, "Internal error detected on the communication channel\n");
+ goto internal_err;
}
- } else
- mod_timer(&priv->catas_err.timer,
- round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
+ } else if (readl(priv->catas_err.map)) {
+ dump_err_buf(dev);
+ goto internal_err;
+ }
+
+ if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx4_warn(dev, "Internal error mark was detected on device\n");
+ goto internal_err;
+ }
+
+ mod_timer(&priv->catas_err.timer,
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
+ return;
+
+internal_err:
+ if (mlx4_internal_err_reset)
+ queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
}
static void catas_reset(struct work_struct *work)
{
- struct mlx4_priv *priv, *tmppriv;
- struct mlx4_dev *dev;
+ struct mlx4_dev_persistent *persist =
+ container_of(work, struct mlx4_dev_persistent,
+ catas_work);
- LIST_HEAD(tlist);
- int ret;
-
- spin_lock_irq(&catas_lock);
- list_splice_init(&catas_list, &tlist);
- spin_unlock_irq(&catas_lock);
-
- list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
- struct pci_dev *pdev = priv->dev.pdev;
-
- /* If the device is off-line, we cannot reset it */
- if (pci_channel_offline(pdev))
- continue;
-
- ret = mlx4_restart_one(priv->dev.pdev);
- /* 'priv' now is not valid */
- if (ret)
- pr_err("mlx4 %s: Reset failed (%d)\n",
- pci_name(pdev), ret);
- else {
- dev = pci_get_drvdata(pdev);
- mlx4_dbg(dev, "Reset succeeded\n");
- }
- }
+ mlx4_handle_error_state(persist);
}
void mlx4_start_catas_poll(struct mlx4_dev *dev)
@@ -126,22 +269,21 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
phys_addr_t addr;
- /*If we are in SRIOV the default of the module param must be 0*/
- if (mlx4_is_mfunc(dev))
- internal_err_reset = 0;
-
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
- addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
- priv->fw.catas_offset;
+ if (!mlx4_is_slave(dev)) {
+ addr = pci_resource_start(dev->persist->pdev,
+ priv->fw.catas_bar) +
+ priv->fw.catas_offset;
- priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
- if (!priv->catas_err.map) {
- mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
- (unsigned long long) addr);
- return;
+ priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
+ if (!priv->catas_err.map) {
+ mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
+ (unsigned long long)addr);
+ return;
+ }
}
priv->catas_err.timer.data = (unsigned long) dev;
@@ -157,15 +299,29 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
del_timer_sync(&priv->catas_err.timer);
- if (priv->catas_err.map)
+ if (priv->catas_err.map) {
iounmap(priv->catas_err.map);
+ priv->catas_err.map = NULL;
+ }
- spin_lock_irq(&catas_lock);
- list_del(&priv->catas_err.list);
- spin_unlock_irq(&catas_lock);
+ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION)
+ flush_workqueue(dev->persist->catas_wq);
}
-void __init mlx4_catas_init(void)
+int mlx4_catas_init(struct mlx4_dev *dev)
{
- INIT_WORK(&catas_work, catas_reset);
+ INIT_WORK(&dev->persist->catas_work, catas_reset);
+ dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health");
+ if (!dev->persist->catas_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_catas_end(struct mlx4_dev *dev)
+{
+ if (dev->persist->catas_wq) {
+ destroy_workqueue(dev->persist->catas_wq);
+ dev->persist->catas_wq = NULL;
+ }
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 5c93d1451c44..a681d7c0bb9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -42,6 +42,7 @@
#include <linux/mlx4/device.h>
#include <linux/semaphore.h>
#include <rdma/ib_smi.h>
+#include <linux/delay.h>
#include <asm/io.h>
@@ -182,6 +183,72 @@ static u8 mlx4_errno_to_status(int errno)
}
}
+static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
+ u8 op_modifier)
+{
+ switch (op) {
+ case MLX4_CMD_UNMAP_ICM:
+ case MLX4_CMD_UNMAP_ICM_AUX:
+ case MLX4_CMD_UNMAP_FA:
+ case MLX4_CMD_2RST_QP:
+ case MLX4_CMD_HW2SW_EQ:
+ case MLX4_CMD_HW2SW_CQ:
+ case MLX4_CMD_HW2SW_SRQ:
+ case MLX4_CMD_HW2SW_MPT:
+ case MLX4_CMD_CLOSE_HCA:
+ case MLX4_QP_FLOW_STEERING_DETACH:
+ case MLX4_CMD_FREE_RES:
+ case MLX4_CMD_CLOSE_PORT:
+ return CMD_STAT_OK;
+
+ case MLX4_CMD_QP_ATTACH:
+ /* On Detach case return success */
+ if (op_modifier == 0)
+ return CMD_STAT_OK;
+ return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+
+ default:
+ return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+ }
+}
+
+static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
+{
+ /* Any error during the closing commands below is considered fatal */
+ if (op == MLX4_CMD_CLOSE_HCA ||
+ op == MLX4_CMD_HW2SW_EQ ||
+ op == MLX4_CMD_HW2SW_CQ ||
+ op == MLX4_CMD_2RST_QP ||
+ op == MLX4_CMD_HW2SW_SRQ ||
+ op == MLX4_CMD_SYNC_TPT ||
+ op == MLX4_CMD_UNMAP_ICM ||
+ op == MLX4_CMD_UNMAP_ICM_AUX ||
+ op == MLX4_CMD_UNMAP_FA)
+ return 1;
+ /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
+ * CMD_STAT_REG_BOUND.
+ * This status indicates that memory region has memory windows bound to it
+ * which may result from invalid user space usage and is not fatal.
+ */
+ if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
+ return 1;
+ return 0;
+}
+
+static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
+ int err)
+{
+ /* Only if reset flow is really active return code is based on
+ * command, otherwise current error code is returned.
+ */
+ if (mlx4_internal_err_reset) {
+ mlx4_enter_error_state(dev->persist);
+ err = mlx4_internal_err_ret_value(dev, op, op_modifier);
+ }
+
+ return err;
+}
+
static int comm_pending(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -190,16 +257,30 @@ static int comm_pending(struct mlx4_dev *dev)
return (swab32(status) >> 31) != priv->cmd.comm_toggle;
}
-static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u32 val;
+ /* To avoid writing to unknown addresses after the device state was
+ * changed to internal error and the function was rest,
+ * check the INTERNAL_ERROR flag which is updated under
+ * device_state_mutex lock.
+ */
+ mutex_lock(&dev->persist->device_state_mutex);
+
+ if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ mutex_unlock(&dev->persist->device_state_mutex);
+ return -EIO;
+ }
+
priv->cmd.comm_toggle ^= 1;
val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
__raw_writel((__force u32) cpu_to_be32(val),
&priv->mfunc.comm->slave_write);
mmiowb();
+ mutex_unlock(&dev->persist->device_state_mutex);
+ return 0;
}
static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
@@ -219,7 +300,13 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
/* Write command */
down(&priv->cmd.poll_sem);
- mlx4_comm_cmd_post(dev, cmd, param);
+ if (mlx4_comm_cmd_post(dev, cmd, param)) {
+ /* Only in case the device state is INTERNAL_ERROR,
+ * mlx4_comm_cmd_post returns with an error
+ */
+ err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+ goto out;
+ }
end = msecs_to_jiffies(timeout) + jiffies;
while (comm_pending(dev) && time_before(jiffies, end))
@@ -231,18 +318,23 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
* is MLX4_DELAY_RESET_SLAVE*/
if ((MLX4_COMM_CMD_RESET == cmd)) {
err = MLX4_DELAY_RESET_SLAVE;
+ goto out;
} else {
- mlx4_warn(dev, "Communication channel timed out\n");
- err = -ETIMEDOUT;
+ mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
+ cmd);
+ err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
}
}
+ if (err)
+ mlx4_enter_error_state(dev->persist);
+out:
up(&priv->cmd.poll_sem);
return err;
}
-static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
- u16 param, unsigned long timeout)
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
+ u16 param, u16 op, unsigned long timeout)
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
@@ -258,34 +350,49 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
- init_completion(&context->done);
+ reinit_completion(&context->done);
- mlx4_comm_cmd_post(dev, op, param);
+ if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
+ /* Only in case the device state is INTERNAL_ERROR,
+ * mlx4_comm_cmd_post returns with an error
+ */
+ err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+ goto out;
+ }
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
- mlx4_warn(dev, "communication channel command 0x%x timed out\n",
- op);
- err = -EBUSY;
- goto out;
+ mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
+ vhcr_cmd, op);
+ goto out_reset;
}
err = context->result;
if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
- op, context->fw_status);
- goto out;
+ vhcr_cmd, context->fw_status);
+ if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
+ goto out_reset;
}
-out:
/* wait for comm channel ready
* this is necessary for prevention the race
* when switching between event to polling mode
+ * Skipping this section in case the device is in FATAL_ERROR state,
+ * In this state, no commands are sent via the comm channel until
+ * the device has returned from reset.
*/
- end = msecs_to_jiffies(timeout) + jiffies;
- while (comm_pending(dev) && time_before(jiffies, end))
- cond_resched();
+ if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (comm_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+ }
+ goto out;
+out_reset:
+ err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+ mlx4_enter_error_state(dev->persist);
+out:
spin_lock(&cmd->context_lock);
context->next = cmd->free_head;
cmd->free_head = context - cmd->context;
@@ -296,10 +403,13 @@ out:
}
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
- unsigned long timeout)
+ u16 op, unsigned long timeout)
{
+ if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+ return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+
if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+ return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
}
@@ -307,7 +417,7 @@ static int cmd_pending(struct mlx4_dev *dev)
{
u32 status;
- if (pci_channel_offline(dev->pdev))
+ if (pci_channel_offline(dev->persist->pdev))
return -EIO;
status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -323,17 +433,21 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
u32 __iomem *hcr = cmd->hcr;
- int ret = -EAGAIN;
+ int ret = -EIO;
unsigned long end;
- mutex_lock(&cmd->hcr_mutex);
-
- if (pci_channel_offline(dev->pdev)) {
+ mutex_lock(&dev->persist->device_state_mutex);
+ /* To avoid writing to unknown addresses after the device state was
+ * changed to internal error and the chip was reset,
+ * check the INTERNAL_ERROR flag which is updated under
+ * device_state_mutex lock.
+ */
+ if (pci_channel_offline(dev->persist->pdev) ||
+ (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
/*
* Device is going through error recovery
* and cannot accept commands.
*/
- ret = -EIO;
goto out;
}
@@ -342,12 +456,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
- if (pci_channel_offline(dev->pdev)) {
+ if (pci_channel_offline(dev->persist->pdev)) {
/*
* Device is going through error recovery
* and cannot accept commands.
*/
- ret = -EIO;
goto out;
}
@@ -391,7 +504,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
ret = 0;
out:
- mutex_unlock(&cmd->hcr_mutex);
+ if (ret)
+ mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
+ op, ret, in_param, in_modifier, op_modifier);
+ mutex_unlock(&dev->persist->device_state_mutex);
+
return ret;
}
@@ -428,8 +545,11 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
}
ret = mlx4_status_to_errno(vhcr->status);
}
+ if (ret &&
+ dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+ ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
} else {
- ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+ ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
MLX4_COMM_TIME + timeout);
if (!ret) {
if (out_is_imm) {
@@ -443,9 +563,14 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
}
}
ret = mlx4_status_to_errno(vhcr->status);
- } else
- mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
- op);
+ } else {
+ if (dev->persist->state &
+ MLX4_DEVICE_STATE_INTERNAL_ERROR)
+ ret = mlx4_internal_err_ret_value(dev, op,
+ op_modifier);
+ else
+ mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
+ }
}
mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -464,12 +589,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
down(&priv->cmd.poll_sem);
- if (pci_channel_offline(dev->pdev)) {
+ if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
/*
* Device is going through error recovery
* and cannot accept commands.
*/
- err = -EIO;
+ err = mlx4_internal_err_ret_value(dev, op, op_modifier);
goto out;
}
@@ -483,16 +608,21 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
if (err)
- goto out;
+ goto out_reset;
end = msecs_to_jiffies(timeout) + jiffies;
while (cmd_pending(dev) && time_before(jiffies, end)) {
- if (pci_channel_offline(dev->pdev)) {
+ if (pci_channel_offline(dev->persist->pdev)) {
/*
* Device is going through error recovery
* and cannot accept commands.
*/
err = -EIO;
+ goto out_reset;
+ }
+
+ if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ err = mlx4_internal_err_ret_value(dev, op, op_modifier);
goto out;
}
@@ -502,8 +632,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
if (cmd_pending(dev)) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
- err = -ETIMEDOUT;
- goto out;
+ err = -EIO;
+ goto out_reset;
}
if (out_is_imm)
@@ -515,10 +645,17 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
stat = be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
err = mlx4_status_to_errno(stat);
- if (err)
+ if (err) {
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
op, stat);
+ if (mlx4_closing_cmd_fatal_error(op, stat))
+ goto out_reset;
+ goto out;
+ }
+out_reset:
+ if (err)
+ err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
out:
up(&priv->cmd.poll_sem);
return err;
@@ -565,17 +702,19 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
goto out;
}
- init_completion(&context->done);
+ reinit_completion(&context->done);
- mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
- in_modifier, op_modifier, op, context->token, 1);
+ err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
+ in_modifier, op_modifier, op, context->token, 1);
+ if (err)
+ goto out_reset;
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
- err = -EBUSY;
- goto out;
+ err = -EIO;
+ goto out_reset;
}
err = context->result;
@@ -592,12 +731,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
else
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status);
+ if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+ err = mlx4_internal_err_ret_value(dev, op, op_modifier);
+ else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
+ goto out_reset;
+
goto out;
}
if (out_is_imm)
*out_param = context->out_param;
+out_reset:
+ if (err)
+ err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
out:
spin_lock(&cmd->context_lock);
context->next = cmd->free_head;
@@ -612,10 +759,13 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout, int native)
{
- if (pci_channel_offline(dev->pdev))
- return -EIO;
+ if (pci_channel_offline(dev->persist->pdev))
+ return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+ return mlx4_internal_err_ret_value(dev, op,
+ op_modifier);
if (mlx4_priv(dev)->cmd.use_events)
return mlx4_cmd_wait(dev, in_param, out_param,
out_is_imm, in_modifier,
@@ -631,7 +781,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
EXPORT_SYMBOL_GPL(__mlx4_cmd);
-static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
@@ -751,7 +901,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
index = be32_to_cpu(smp->attr_mod);
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
- table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
+ table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
+ sizeof(*table) * 32, GFP_KERNEL);
+
if (!table)
return -ENOMEM;
/* need to get the full pkey table because the paravirtualized
@@ -1071,7 +1223,7 @@ static struct mlx4_cmd_info cmd_info[] = {
{
.opcode = MLX4_CMD_HW2SW_EQ,
.has_inbox = false,
- .has_outbox = true,
+ .has_outbox = false,
.out_is_imm = false,
.encode_slave_id = true,
.verify = NULL,
@@ -1431,6 +1583,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper
},
+ {
+ .opcode = MLX4_CMD_VIRT_PORT_MAP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CMD_EPERM_wrapper
+ },
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
@@ -1460,8 +1621,10 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
ALIGN(sizeof(struct mlx4_vhcr_cmd),
MLX4_ACCESS_MEM_ALIGN), 1);
if (ret) {
- mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
- __func__, ret);
+ if (!(dev->persist->state &
+ MLX4_DEVICE_STATE_INTERNAL_ERROR))
+ mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+ __func__, ret);
kfree(vhcr);
return ret;
}
@@ -1500,11 +1663,14 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
goto out_status;
}
- if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
- vhcr->in_param,
- MLX4_MAILBOX_SIZE, 1)) {
- mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
- __func__, cmd->opcode);
+ ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+ vhcr->in_param,
+ MLX4_MAILBOX_SIZE, 1);
+ if (ret) {
+ if (!(dev->persist->state &
+ MLX4_DEVICE_STATE_INTERNAL_ERROR))
+ mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+ __func__, cmd->opcode);
vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
goto out_status;
}
@@ -1552,8 +1718,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
}
if (err) {
- mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
- vhcr->op, slave, vhcr->errno, err);
+ if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
+ vhcr->op, slave, vhcr->errno, err);
vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status;
}
@@ -1568,7 +1735,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
/* If we failed to write back the outbox after the
*command was successfully executed, we must fail this
* slave, as it is now in undefined state */
- mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+ if (!(dev->persist->state &
+ MLX4_DEVICE_STATE_INTERNAL_ERROR))
+ mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
goto out;
}
}
@@ -1847,8 +2016,11 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
break;
case MLX4_COMM_CMD_VHCR_POST:
if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
- (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+ (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
+ mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
+ slave, cmd, slave_state[slave].last_cmd);
goto reset_slave;
+ }
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
@@ -1882,7 +2054,18 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
reset_slave:
/* cleanup any slave resources */
- mlx4_delete_all_resources_for_slave(dev, slave);
+ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ mlx4_delete_all_resources_for_slave(dev, slave);
+
+ if (cmd != MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
+ slave, cmd);
+ /* Turn on internal error letting slave reset itself immeditaly,
+ * otherwise it might take till timeout on command is passed
+ */
+ reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
+ }
+
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
@@ -1958,17 +2141,28 @@ void mlx4_master_comm_channel(struct work_struct *work)
static int sync_toggles(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int wr_toggle;
- int rd_toggle;
+ u32 wr_toggle;
+ u32 rd_toggle;
unsigned long end;
- wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
- end = jiffies + msecs_to_jiffies(5000);
+ wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
+ if (wr_toggle == 0xffffffff)
+ end = jiffies + msecs_to_jiffies(30000);
+ else
+ end = jiffies + msecs_to_jiffies(5000);
while (time_before(jiffies, end)) {
- rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
- if (rd_toggle == wr_toggle) {
- priv->cmd.comm_toggle = rd_toggle;
+ rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
+ if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
+ /* PCI might be offline */
+ msleep(100);
+ wr_toggle = swab32(readl(&priv->mfunc.comm->
+ slave_write));
+ continue;
+ }
+
+ if (rd_toggle >> 31 == wr_toggle >> 31) {
+ priv->cmd.comm_toggle = rd_toggle >> 31;
return 0;
}
@@ -1997,11 +2191,12 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
if (mlx4_is_master(dev))
priv->mfunc.comm =
- ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+ ioremap(pci_resource_start(dev->persist->pdev,
+ priv->fw.comm_bar) +
priv->fw.comm_base, MLX4_COMM_PAGESIZE);
else
priv->mfunc.comm =
- ioremap(pci_resource_start(dev->pdev, 2) +
+ ioremap(pci_resource_start(dev->persist->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
if (!priv->mfunc.comm) {
mlx4_err(dev, "Couldn't map communication vector\n");
@@ -2073,13 +2268,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
if (mlx4_init_resource_tracker(dev))
goto err_thread;
- err = mlx4_ARM_COMM_CHANNEL(dev);
- if (err) {
- mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
- err);
- goto err_resource;
- }
-
} else {
err = sync_toggles(dev);
if (err) {
@@ -2089,8 +2277,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
}
return 0;
-err_resource:
- mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
err_thread:
flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
@@ -2107,9 +2293,9 @@ err_comm_admin:
err_comm:
iounmap(priv->mfunc.comm);
err_vhcr:
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
- priv->mfunc.vhcr,
- priv->mfunc.vhcr_dma);
+ dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
priv->mfunc.vhcr = NULL;
return -ENOMEM;
}
@@ -2120,7 +2306,6 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
int flags = 0;
if (!priv->cmd.initialized) {
- mutex_init(&priv->cmd.hcr_mutex);
mutex_init(&priv->cmd.slave_cmd_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
@@ -2130,8 +2315,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
}
if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
- priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
- MLX4_HCR_BASE, MLX4_HCR_SIZE);
+ priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
+ 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
mlx4_err(dev, "Couldn't map command register\n");
goto err;
@@ -2140,7 +2325,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
}
if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
- priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE,
&priv->mfunc.vhcr_dma,
GFP_KERNEL);
if (!priv->mfunc.vhcr)
@@ -2150,7 +2336,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
}
if (!priv->cmd.pool) {
- priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
+ priv->cmd.pool = pci_pool_create("mlx4_cmd",
+ dev->persist->pdev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
if (!priv->cmd.pool)
@@ -2166,6 +2353,27 @@ err:
return -ENOMEM;
}
+void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int slave;
+ u32 slave_read;
+
+ /* Report an internal error event to all
+ * communication channels.
+ */
+ for (slave = 0; slave < dev->num_slaves; slave++) {
+ slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
+ slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
+ __raw_writel((__force u32)cpu_to_be32(slave_read),
+ &priv->mfunc.comm[slave].slave_read);
+ /* Make sure that our comm channel write doesn't
+ * get mixed in with writes from another CPU.
+ */
+ mmiowb();
+ }
+}
+
void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2181,6 +2389,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
kfree(priv->mfunc.master.slave_state);
kfree(priv->mfunc.master.vf_admin);
kfree(priv->mfunc.master.vf_oper);
+ dev->num_slaves = 0;
}
iounmap(priv->mfunc.comm);
@@ -2202,7 +2411,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
}
if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
(cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
priv->mfunc.vhcr = NULL;
}
@@ -2229,6 +2438,11 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
+ /* To support fatal error flow, initialize all
+ * cmd contexts to allow simulating completions
+ * with complete() at any time.
+ */
+ init_completion(&priv->cmd.context[i].done);
}
priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
@@ -2306,8 +2520,9 @@ u32 mlx4_comm_get_version(void)
static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
{
- if ((vf < 0) || (vf >= dev->num_vfs)) {
- mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
+ if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
+ mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
+ vf, dev->persist->num_vfs);
return -EINVAL;
}
@@ -2316,7 +2531,7 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
{
- if (slave < 1 || slave > dev->num_vfs) {
+ if (slave < 1 || slave > dev->persist->num_vfs) {
mlx4_err(dev,
"Bad slave number:%d (number of activated slaves: %lu)\n",
slave, dev->num_slaves);
@@ -2325,6 +2540,25 @@ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
return slave - 1;
}
+void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_context *context;
+ int i;
+
+ spin_lock(&priv->cmd.context_lock);
+ if (priv->cmd.context) {
+ for (i = 0; i < priv->cmd.max_cmds; ++i) {
+ context = &priv->cmd.context[i];
+ context->fw_status = CMD_STAT_INTERNAL_ERR;
+ context->result =
+ mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+ complete(&context->done);
+ }
+ }
+ spin_unlock(&priv->cmd.context_lock);
+}
+
struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
{
struct mlx4_active_ports actv_ports;
@@ -2388,7 +2622,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
if (port <= 0 || port > dev->caps.num_ports)
return slaves_pport;
- for (i = 0; i < dev->num_vfs + 1; i++) {
+ for (i = 0; i < dev->persist->num_vfs + 1; i++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, i);
if (test_bit(port - 1, actv_ports.ports))
@@ -2408,7 +2642,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
- for (i = 0; i < dev->num_vfs + 1; i++) {
+ for (i = 0; i < dev->persist->num_vfs + 1; i++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, i);
if (bitmap_equal(crit_ports->ports, actv_ports.ports,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 999014413b1a..90b5309cdb5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -32,6 +32,7 @@
*/
#include <linux/mlx4/device.h>
+#include <linux/clocksource.h>
#include "mlx4_en.h"
@@ -147,12 +148,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
unsigned long flags;
- s64 now;
write_lock_irqsave(&mdev->clock_lock, flags);
- now = timecounter_read(&mdev->clock);
- now += delta;
- timecounter_init(&mdev->clock, &mdev->cycles, now);
+ timecounter_adjtime(&mdev->clock, delta);
write_unlock_irqrestore(&mdev->clock_lock, flags);
return 0;
@@ -243,7 +241,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
unsigned long flags;
- u64 ns;
+ u64 ns, zero = 0;
rwlock_init(&mdev->clock_lock);
@@ -268,7 +266,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
*/
- ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
+ ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
do_div(ns, NSEC_PER_SEC / 2 / HZ);
mdev->overflow_period = ns;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 82322b1c8411..22da4d0d0f05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -70,10 +70,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
*/
- set_dev_node(&mdev->dev->pdev->dev, node);
+ set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
- set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
+ set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 90e0f045a6bc..a7b58ba8492b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -92,7 +92,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+ strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->regdump_len = 0;
@@ -770,22 +770,20 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
- proto_admin = cpu_to_be32(ptys_adv);
- if (speed >= 0 && speed != priv->port_state.link_speed)
- /* If speed was set then speed decides :-) */
- proto_admin = speed_set_ptys_admin(priv, speed,
- ptys_reg.eth_proto_cap);
+ proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
+ cpu_to_be32(ptys_adv) :
+ speed_set_ptys_admin(priv, speed,
+ ptys_reg.eth_proto_cap);
proto_admin &= ptys_reg.eth_proto_cap;
-
- if (proto_admin == ptys_reg.eth_proto_admin)
- return 0; /* Nothing to change */
-
if (!proto_admin) {
en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
return -EINVAL; /* nothing to change due to bad input */
}
+ if (proto_admin == ptys_reg.eth_proto_admin)
+ return 0; /* Nothing to change */
+
en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
be32_to_cpu(proto_admin));
@@ -798,9 +796,9 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret;
}
- en_warn(priv, "Port link mode changed, restarting port...\n");
mutex_lock(&priv->mdev->state_lock);
if (priv->port_up) {
+ en_warn(priv, "Port link mode changed, restarting port...\n");
mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 9f16f754137b..58d5a07d0ff4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -214,6 +214,8 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
+ if (mdev->nb.notifier_call)
+ unregister_netdevice_notifier(&mdev->nb);
kfree(mdev);
}
@@ -241,8 +243,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
spin_lock_init(&mdev->uar_lock);
mdev->dev = dev;
- mdev->dma_device = &(dev->pdev->dev);
- mdev->pdev = dev->pdev;
+ mdev->dma_device = &dev->persist->pdev->dev;
+ mdev->pdev = dev->persist->pdev;
mdev->device_up = false;
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
@@ -298,6 +300,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
mdev->pndev[i] = NULL;
}
+ /* register notifier */
+ mdev->nb.notifier_call = mlx4_en_netdev_event;
+ if (register_netdevice_notifier(&mdev->nb)) {
+ mdev->nb.notifier_call = NULL;
+ mlx4_err(mdev, "Failed to create notifier\n");
+ }
return mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index d0d6dc1b8e46..2a210c4efb89 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
{
int err;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+ priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
@@ -2061,6 +2062,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
/* Detach the netdev so tasks would not attempt to access it */
mutex_lock(&mdev->state_lock);
mdev->pndev[priv->port] = NULL;
+ mdev->upper[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
mlx4_en_free_resources(priv);
@@ -2200,6 +2202,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
return ret;
}
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
+ en_info(priv, "Turn %s TX vlan strip offload\n",
+ (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+
if (features & NETIF_F_LOOPBACK)
priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
else
@@ -2440,6 +2446,180 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#endif
};
+struct mlx4_en_bond {
+ struct work_struct work;
+ struct mlx4_en_priv *priv;
+ int is_bonded;
+ struct mlx4_port_map port_map;
+};
+
+static void mlx4_en_bond_work(struct work_struct *work)
+{
+ struct mlx4_en_bond *bond = container_of(work,
+ struct mlx4_en_bond,
+ work);
+ int err = 0;
+ struct mlx4_dev *dev = bond->priv->mdev->dev;
+
+ if (bond->is_bonded) {
+ if (!mlx4_is_bonded(dev)) {
+ err = mlx4_bond(dev);
+ if (err)
+ en_err(bond->priv, "Fail to bond device\n");
+ }
+ if (!err) {
+ err = mlx4_port_map_set(dev, &bond->port_map);
+ if (err)
+ en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
+ bond->port_map.port1,
+ bond->port_map.port2,
+ err);
+ }
+ } else if (mlx4_is_bonded(dev)) {
+ err = mlx4_unbond(dev);
+ if (err)
+ en_err(bond->priv, "Fail to unbond device\n");
+ }
+ dev_put(bond->priv->dev);
+ kfree(bond);
+}
+
+static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
+ u8 v2p_p1, u8 v2p_p2)
+{
+ struct mlx4_en_bond *bond = NULL;
+
+ bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
+ if (!bond)
+ return -ENOMEM;
+
+ INIT_WORK(&bond->work, mlx4_en_bond_work);
+ bond->priv = priv;
+ bond->is_bonded = is_bonded;
+ bond->port_map.port1 = v2p_p1;
+ bond->port_map.port2 = v2p_p2;
+ dev_hold(priv->dev);
+ queue_work(priv->mdev->workqueue, &bond->work);
+ return 0;
+}
+
+int mlx4_en_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ u8 port = 0;
+ struct mlx4_en_dev *mdev;
+ struct mlx4_dev *dev;
+ int i, num_eth_ports = 0;
+ bool do_bond = true;
+ struct mlx4_en_priv *priv;
+ u8 v2p_port1 = 0;
+ u8 v2p_port2 = 0;
+
+ if (!net_eq(dev_net(ndev), &init_net))
+ return NOTIFY_DONE;
+
+ mdev = container_of(this, struct mlx4_en_dev, nb);
+ dev = mdev->dev;
+
+ /* Go into this mode only when two network devices set on two ports
+ * of the same mlx4 device are slaves of the same bonding master
+ */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ ++num_eth_ports;
+ if (!port && (mdev->pndev[i] == ndev))
+ port = i;
+ mdev->upper[i] = mdev->pndev[i] ?
+ netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
+ /* condition not met: network device is a slave */
+ if (!mdev->upper[i])
+ do_bond = false;
+ if (num_eth_ports < 2)
+ continue;
+ /* condition not met: same master */
+ if (mdev->upper[i] != mdev->upper[i-1])
+ do_bond = false;
+ }
+ /* condition not met: 2 salves */
+ do_bond = (num_eth_ports == 2) ? do_bond : false;
+
+ /* handle only events that come with enough info */
+ if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
+ return NOTIFY_DONE;
+
+ priv = netdev_priv(ndev);
+ if (do_bond) {
+ struct netdev_notifier_bonding_info *notifier_info = ptr;
+ struct netdev_bonding_info *bonding_info =
+ &notifier_info->bonding_info;
+
+ /* required mode 1, 2 or 4 */
+ if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
+ (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
+ (bonding_info->master.bond_mode != BOND_MODE_8023AD))
+ do_bond = false;
+
+ /* require exactly 2 slaves */
+ if (bonding_info->master.num_slaves != 2)
+ do_bond = false;
+
+ /* calc v2p */
+ if (do_bond) {
+ if (bonding_info->master.bond_mode ==
+ BOND_MODE_ACTIVEBACKUP) {
+ /* in active-backup mode virtual ports are
+ * mapped to the physical port of the active
+ * slave */
+ if (bonding_info->slave.state ==
+ BOND_STATE_BACKUP) {
+ if (port == 1) {
+ v2p_port1 = 2;
+ v2p_port2 = 2;
+ } else {
+ v2p_port1 = 1;
+ v2p_port2 = 1;
+ }
+ } else { /* BOND_STATE_ACTIVE */
+ if (port == 1) {
+ v2p_port1 = 1;
+ v2p_port2 = 1;
+ } else {
+ v2p_port1 = 2;
+ v2p_port2 = 2;
+ }
+ }
+ } else { /* Active-Active */
+ /* in active-active mode a virtual port is
+ * mapped to the native physical port if and only
+ * if the physical port is up */
+ __s8 link = bonding_info->slave.link;
+
+ if (port == 1)
+ v2p_port2 = 2;
+ else
+ v2p_port1 = 1;
+ if ((link == BOND_LINK_UP) ||
+ (link == BOND_LINK_FAIL)) {
+ if (port == 1)
+ v2p_port1 = 1;
+ else
+ v2p_port2 = 2;
+ } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
+ if (port == 1)
+ v2p_port1 = 2;
+ else
+ v2p_port2 = 1;
+ }
+ }
+ }
+ }
+
+ mlx4_en_queue_bond_work(priv, do_bond,
+ v2p_port1, v2p_port2);
+
+ return NOTIFY_DONE;
+}
+
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
@@ -2457,7 +2637,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
- SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+ SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
dev->dev_port = port - 1;
/*
@@ -2622,6 +2802,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
mdev->pndev[port] = dev;
+ mdev->upper[port] = NULL;
netif_carrier_off(dev);
mlx4_en_set_default_moderation(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index f1a5500ff72d..34f2fdf4fe5d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -50,10 +50,14 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->mtu_msgmax = 0xff;
if (!is_tx && !rss)
context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
- if (is_tx)
+ if (is_tx) {
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
- else
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
+ context->params2 |= MLX4_QP_BIT_FPP;
+
+ } else {
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
+ }
context->usr_page = cpu_to_be32(mdev->priv_uar.index);
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a0474eb94aa3..698d60de1255 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -162,6 +162,10 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
frag_info, GFP_KERNEL | __GFP_COLD))
goto out;
+
+ en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
+ i, ring->page_alloc[i].page_size,
+ atomic_read(&ring->page_alloc[i].page->_count));
}
return 0;
@@ -387,10 +391,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->rx_info, tmp);
/* Allocate HW buffers on provided NUMA node */
- set_dev_node(&mdev->dev->pdev->dev, node);
+ set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE);
- set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
+ set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_info;
@@ -1059,8 +1063,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
- priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
- SMP_CACHE_BYTES);
+ priv->frag_info[i].frag_stride =
+ ALIGN(priv->frag_info[i].frag_size,
+ SMP_CACHE_BYTES);
buf_size += priv->frag_info[i].frag_size;
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index e3357bf523df..55f9f5c5344e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -91,10 +91,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
/* Allocate HW buffers on provided NUMA node */
- set_dev_node(&mdev->dev->pdev->dev, node);
+ set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE);
- set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
+ set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
@@ -682,8 +682,8 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
if (dev->num_tc)
return skb_tx_hash(dev, skb);
- if (vlan_tx_tag_present(skb))
- up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
+ if (skb_vlan_tag_present(skb))
+ up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
return fallback(dev, skb) % rings_p_up + up * rings_p_up;
}
@@ -742,8 +742,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (vlan_tx_tag_present(skb))
- vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb))
+ vlan_tag = skb_vlan_tag_get(skb);
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -930,7 +930,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
real_size = (real_size / 16) & 0x3f;
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce &&
- !vlan_tx_tag_present(skb) && send_doorbell) {
+ !skb_vlan_tag_present(skb) && send_doorbell) {
tx_desc->ctrl.bf_qpn = ring->doorbell_qpn |
cpu_to_be32(real_size);
@@ -952,7 +952,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- !!vlan_tx_tag_present(skb);
+ !!skb_vlan_tag_present(skb);
tx_desc->ctrl.fence_size = real_size;
/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3d275fbaf0eb..264bc15c1ff2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev)
u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
+ async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
return async_ev_mask;
}
@@ -237,7 +239,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
struct mlx4_eqe eqe;
/*don't send if we don't have the that slave */
- if (dev->num_vfs < slave)
+ if (dev->persist->num_vfs < slave)
return 0;
memset(&eqe, 0, sizeof eqe);
@@ -255,7 +257,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
struct mlx4_eqe eqe;
/*don't send if we don't have the that slave */
- if (dev->num_vfs < slave)
+ if (dev->persist->num_vfs < slave)
return 0;
memset(&eqe, 0, sizeof eqe);
@@ -310,7 +312,7 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
port);
- for (i = 0; i < dev->num_vfs + 1; i++)
+ for (i = 0; i < dev->persist->num_vfs + 1; i++)
if (test_bit(i, slaves_pport.slaves))
set_and_calc_slave_port_state(dev, i, port,
event, &gen_event);
@@ -429,8 +431,14 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
i);
-
- mlx4_delete_all_resources_for_slave(dev, i);
+ /* In case of 'Reset flow' FLR can be generated for
+ * a slave before mlx4_load_one is done.
+ * make sure interface is up before trying to delete
+ * slave resources which weren't allocated yet.
+ */
+ if (dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_UP)
+ mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
@@ -560,7 +568,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
if (!mlx4_is_master(dev))
break;
- for (i = 0; i < dev->num_vfs + 1; i++) {
+ for (i = 0; i < dev->persist->num_vfs + 1;
+ i++) {
if (!test_bit(i, slaves_port.slaves))
continue;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
@@ -596,7 +605,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (!mlx4_is_master(dev))
break;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
- for (i = 0; i < dev->num_vfs + 1; i++) {
+ for (i = 0;
+ i < dev->persist->num_vfs + 1;
+ i++) {
if (!test_bit(i, slaves_port.slaves))
continue;
if (i == mlx4_master_func_num(dev))
@@ -727,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
(unsigned long) eqe);
break;
+ case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
+ switch (eqe->subtype) {
+ case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
+ mlx4_warn(dev, "Bad cable detected on port %u\n",
+ eqe->event.bad_cable.port);
+ break;
+ case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
+ mlx4_warn(dev, "Unsupported cable detected\n");
+ break;
+ default:
+ mlx4_dbg(dev,
+ "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
+ eqe->type, eqe->subtype, eq->eqn,
+ eq->cons_index, eqe->owner, eq->nent,
+ !!(eqe->owner & 0x80) ^
+ !!(eq->cons_index & eq->nent) ? "HW" : "SW");
+ break;
+ }
+ break;
+
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
@@ -837,12 +868,10 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
MLX4_CMD_WRAPPED);
}
-static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
- int eq_num)
+static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
- 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
+ return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -865,7 +894,7 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (!priv->eq_table.uar_map[index]) {
priv->eq_table.uar_map[index] =
- ioremap(pci_resource_start(dev->pdev, 2) +
+ ioremap(pci_resource_start(dev->persist->pdev, 2) +
((eq->eqn / 4) << PAGE_SHIFT),
PAGE_SIZE);
if (!priv->eq_table.uar_map[index]) {
@@ -928,8 +957,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
eq_context = mailbox->buf;
for (i = 0; i < npages; ++i) {
- eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
- PAGE_SIZE, &t, GFP_KERNEL);
+ eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
+ pdev->dev,
+ PAGE_SIZE, &t,
+ GFP_KERNEL);
if (!eq->page_list[i].buf)
goto err_out_free_pages;
@@ -995,7 +1026,7 @@ err_out_free_eq:
err_out_free_pages:
for (i = 0; i < npages; ++i)
if (eq->page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
eq->page_list[i].map);
@@ -1013,7 +1044,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_cmd_mailbox *mailbox;
int err;
int i;
/* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
@@ -1021,36 +1051,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
*/
int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return;
-
- err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
+ err = mlx4_HW2SW_EQ(dev, eq->eqn);
if (err)
mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
- if (0) {
- mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
- for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
- if (i % 4 == 0)
- pr_cont("[%02x] ", i * 4);
- pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
- if ((i + 1) % 4 == 0)
- pr_cont("\n");
- }
- }
synchronize_irq(eq->irq);
tasklet_disable(&eq->tasklet_ctx.task);
mlx4_mtt_cleanup(dev, &eq->mtt);
for (i = 0; i < npages; ++i)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- eq->page_list[i].buf,
- eq->page_list[i].map);
+ dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
+ eq->page_list[i].buf,
+ eq->page_list[i].map);
kfree(eq->page_list);
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
- mlx4_free_cmd_mailbox(dev, mailbox);
}
static void mlx4_free_irqs(struct mlx4_dev *dev)
@@ -1060,7 +1075,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
int i, vec;
if (eq_table->have_irq)
- free_irq(dev->pdev->irq, dev);
+ free_irq(dev->persist->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
@@ -1089,7 +1104,8 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
+ priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
+ priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) {
mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
@@ -1212,13 +1228,13 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
i * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE,
"mlx4-comp-%d@pci:%s", i,
- pci_name(dev->pdev));
+ pci_name(dev->persist->pdev));
} else {
snprintf(priv->eq_table.irq_names +
i * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE,
"mlx4-async@pci:%s",
- pci_name(dev->pdev));
+ pci_name(dev->persist->pdev));
}
eq_name = priv->eq_table.irq_names +
@@ -1235,8 +1251,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
DRV_NAME "@pci:%s",
- pci_name(dev->pdev));
- err = request_irq(dev->pdev->irq, mlx4_interrupt,
+ pci_name(dev->persist->pdev));
+ err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
goto err_out_async;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 982861d1df44..5a21e5dc94cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[ 1] = "UC transport",
[ 2] = "UD transport",
[ 3] = "XRC transport",
- [ 4] = "reliable multicast",
- [ 5] = "FCoIB support",
[ 6] = "SRQ support",
[ 7] = "IPoIB checksum offload",
[ 8] = "P_Key violation counter",
[ 9] = "Q_Key violation counter",
- [10] = "VMM",
[12] = "Dual Port Different Protocol (DPDP) support",
[15] = "Big LSO headers",
[16] = "MW support",
@@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[19] = "Raw multicast support",
[20] = "Address vector port checking support",
[21] = "UD multicast support",
- [24] = "Demand paging support",
- [25] = "Router support",
[30] = "IBoE support",
[32] = "Unicast loopback support",
[34] = "FCS header control",
- [38] = "Wake On LAN support",
+ [37] = "Wake On LAN (port1) support",
+ [38] = "Wake On LAN (port2) support",
[40] = "UDP RSS support",
[41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support",
@@ -145,7 +141,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[16] = "CONFIG DEV support",
[17] = "Asymmetric EQs support",
[18] = "More than 80 VFs support",
- [19] = "Performance optimized for limited rule configuration flow steering support"
+ [19] = "Performance optimized for limited rule configuration flow steering support",
+ [20] = "Recoverable error events support",
+ [21] = "Port Remap support"
};
int i;
@@ -259,6 +257,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
+#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
@@ -273,6 +272,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
+#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
@@ -344,9 +344,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
- /* enable rdma and ethernet interfaces, and new quota locations */
+ /* enable rdma and ethernet interfaces, new quota locations,
+ * and reserved lkey
+ */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
- QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX);
+ QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
+ QUERY_FUNC_CAP_FLAG_RESD_LKEY);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = min(
@@ -411,6 +414,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
+
+ size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
} else
err = -EINVAL;
@@ -503,6 +509,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF;
+ if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
+ func_cap->reserved_lkey = size;
+ } else {
+ func_cap->reserved_lkey = 0;
+ }
+
func_cap->extra_flags = 0;
/* Mailbox data from 0x6c and onward should only be treated if
@@ -851,6 +864,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
MLX4_GET(dev_cap->bmme_flags, outbox,
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
if (field & 0x20)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
@@ -859,6 +874,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
+ if (field32 & (1 << 7))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -1106,9 +1123,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
- /* For guests, disable mw type 2 */
+ /* For guests, disable mw type 2 and port remap*/
MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
+ bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
/* turn off device-managed steering capability if not enabled */
@@ -1562,6 +1580,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_VXLAN_OFFSET 0x0c
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
+#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
#define INIT_HCA_QPC_OFFSET 0x020
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
@@ -1668,6 +1687,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
}
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
+ *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -1752,8 +1774,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
}
- err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
- MLX4_CMD_NATIVE);
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
+ MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -1879,6 +1901,36 @@ out:
return err;
}
+static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be32 *outbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
+ return PTR_ERR(mailbox);
+ }
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_QUERY_HCA,
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
+ if (err) {
+ mlx4_warn(dev, "hca_core_clock update failed\n");
+ goto out;
+ }
+
+ MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
* and real QP0 are active, so that the paravirtualized QP0 is ready
* to operate */
@@ -1983,6 +2035,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ mlx4_hca_core_clock_update(dev);
+
return err;
}
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
@@ -2007,7 +2062,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
if (priv->mfunc.master.init_port_ref[port] == 1) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
- 1000, MLX4_CMD_NATIVE);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
}
@@ -2018,7 +2073,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
if (!priv->mfunc.master.qp0_state[port].qp0_active &&
priv->mfunc.master.qp0_state[port].port_active) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
- 1000, MLX4_CMD_NATIVE);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
@@ -2033,15 +2088,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{
- return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
- MLX4_CMD_WRAPPED);
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{
- return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
- MLX4_CMD_NATIVE);
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
+ MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
struct mlx4_config_dev {
@@ -2049,13 +2104,16 @@ struct mlx4_config_dev {
__be32 rsvd1[3];
__be16 vxlan_udp_dport;
__be16 rsvd2;
- __be32 rsvd3[27];
- __be16 rsvd4;
- u8 rsvd5;
+ __be32 rsvd3;
+ __be32 roce_flags;
+ __be32 rsvd4[25];
+ __be16 rsvd5;
+ u8 rsvd6;
u8 rx_checksum_val;
};
#define MLX4_VXLAN_UDP_DPORT (1 << 0)
+#define MLX4_DISABLE_RX_PORT BIT(18)
static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
{
@@ -2111,7 +2169,7 @@ static const u8 config_dev_csum_flags[] = {
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
struct mlx4_config_dev_params *params)
{
- struct mlx4_config_dev config_dev;
+ struct mlx4_config_dev config_dev = {0};
int err;
u8 csum_mask;
@@ -2158,6 +2216,45 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
}
EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
+#define CONFIG_DISABLE_RX_PORT BIT(15)
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
+{
+ struct mlx4_config_dev config_dev;
+
+ memset(&config_dev, 0, sizeof(config_dev));
+ config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
+ if (dis)
+ config_dev.roce_flags =
+ cpu_to_be32(CONFIG_DISABLE_RX_PORT);
+
+ return mlx4_CONFIG_DEV_set(dev, &config_dev);
+}
+
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct {
+ __be32 v_port1;
+ __be32 v_port2;
+ } *v2p;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return -ENOMEM;
+
+ v2p = mailbox->buf;
+ v2p->v_port1 = cpu_to_be32(port1);
+ v2p->v_port2 = cpu_to_be32(port2);
+
+ err = mlx4_cmd(dev, mailbox->dma, 0,
+ MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
@@ -2180,7 +2277,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
int mlx4_NOP(struct mlx4_dev *dev)
{
/* Input modifier of 0x1f means "finish as soon as possible." */
- return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
int mlx4_get_phys_port_id(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 62562b60fa87..f44f7f6017ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -147,6 +147,7 @@ struct mlx4_func_cap {
u32 qp0_proxy_qpn;
u32 qp1_tunnel_qpn;
u32 qp1_proxy_qpn;
+ u32 reserved_lkey;
u8 physical_port;
u8 port_flags;
u8 flags1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 97c9b1db1d27..2a9dd460a95f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -56,7 +56,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
int i;
if (chunk->nsg > 0)
- pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
+ pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
@@ -69,7 +69,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
int i;
for (i = 0; i < chunk->npages; ++i)
- dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
+ dma_free_coherent(&dev->persist->pdev->dev,
+ chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
@@ -173,7 +174,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
--cur_order;
if (coherent)
- ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
+ ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
&chunk->mem[chunk->npages],
cur_order, gfp_mask);
else
@@ -193,7 +194,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
- chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
@@ -208,7 +209,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
}
if (!coherent && chunk) {
- chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 116895ac8b35..6fce58718837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -33,11 +33,13 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/errno.h>
#include "mlx4.h"
struct mlx4_device_context {
struct list_head list;
+ struct list_head bond_list;
struct mlx4_interface *intf;
void *context;
};
@@ -115,6 +117,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
}
EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
+int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
+ unsigned long flags;
+ int ret;
+ LIST_HEAD(bond_list);
+
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
+ return -ENOTSUPP;
+
+ ret = mlx4_disable_rx_port_check(dev, enable);
+ if (ret) {
+ mlx4_err(dev, "Fail to %s rx port check\n",
+ enable ? "enable" : "disable");
+ return ret;
+ }
+ if (enable) {
+ dev->flags |= MLX4_FLAG_BONDED;
+ } else {
+ ret = mlx4_virt2phy_port_map(dev, 1, 2);
+ if (ret) {
+ mlx4_err(dev, "Fail to reset port map\n");
+ return ret;
+ }
+ dev->flags &= ~MLX4_FLAG_BONDED;
+ }
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+ list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
+ if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
+ list_add_tail(&dev_ctx->bond_list, &bond_list);
+ list_del(&dev_ctx->list);
+ }
+ }
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &bond_list, bond_list) {
+ dev_ctx->intf->remove(dev, dev_ctx->context);
+ dev_ctx->context = dev_ctx->intf->add(dev);
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
+ dev_ctx->intf->protocol, enable ?
+ "enabled" : "disabled");
+ }
+ return 0;
+}
+
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
unsigned long param)
{
@@ -138,13 +192,13 @@ int mlx4_register_device(struct mlx4_dev *dev)
mutex_lock(&intf_mutex);
+ dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
list_add_tail(&priv->dev_list, &dev_list);
list_for_each_entry(intf, &intf_list, list)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
- if (!mlx4_is_slave(dev))
- mlx4_start_catas_poll(dev);
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -154,14 +208,14 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
- if (!mlx4_is_slave(dev))
- mlx4_stop_catas_poll(dev);
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
mlx4_remove_device(intf, priv);
list_del(&priv->dev_list);
+ dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
mutex_unlock(&intf_mutex);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 03e9eb0dc761..7e487223489a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -108,6 +108,8 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe,
MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
MLX4_FUNC_CAP_DMFS_A0_STATIC)
+#define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
+
static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -249,7 +251,8 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
if (mlx4_is_master(dev))
dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
} else {
- mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n");
+ if (cache_line_size() != 32 && cache_line_size() != 64)
+ mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
}
@@ -318,10 +321,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return -ENODEV;
}
- if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
+ if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev_cap->uar_size,
- (unsigned long long) pci_resource_len(dev->pdev, 2));
+ (unsigned long long)
+ pci_resource_len(dev->persist->pdev, 2));
return -ENODEV;
}
@@ -541,8 +545,10 @@ static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
- err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
- err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
+ err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
+ &lnkcap1);
+ err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
+ &lnkcap2);
if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
*speed = PCIE_SPEED_8_0GT;
@@ -587,7 +593,7 @@ static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
return;
}
- err = pcie_get_minimum_link(dev->pdev, &speed, &width);
+ err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
if (err || speed == PCI_SPEED_UNKNOWN ||
width == PCIE_LNK_WIDTH_UNKNOWN) {
mlx4_warn(dev,
@@ -792,6 +798,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
dev->caps.num_eqs = func_cap.max_eq;
dev->caps.reserved_eqs = func_cap.reserved_eq;
+ dev->caps.reserved_lkey = func_cap.reserved_lkey;
dev->caps.num_pds = MLX4_NUM_PDS;
dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0;
@@ -837,10 +844,12 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
- pci_resource_len(dev->pdev, 2)) {
+ pci_resource_len(dev->persist->pdev,
+ 2)) {
mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev->caps.uar_page_size * dev->caps.num_uars,
- (unsigned long long) pci_resource_len(dev->pdev, 2));
+ (unsigned long long)
+ pci_resource_len(dev->persist->pdev, 2));
goto err_mem;
}
@@ -1152,6 +1161,91 @@ err_set_port:
return err ? err : count;
}
+int mlx4_bond(struct mlx4_dev *dev)
+{
+ int ret = 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_lock(&priv->bond_mutex);
+
+ if (!mlx4_is_bonded(dev))
+ ret = mlx4_do_bond(dev, true);
+ else
+ ret = 0;
+
+ mutex_unlock(&priv->bond_mutex);
+ if (ret)
+ mlx4_err(dev, "Failed to bond device: %d\n", ret);
+ else
+ mlx4_dbg(dev, "Device is bonded\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_bond);
+
+int mlx4_unbond(struct mlx4_dev *dev)
+{
+ int ret = 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_lock(&priv->bond_mutex);
+
+ if (mlx4_is_bonded(dev))
+ ret = mlx4_do_bond(dev, false);
+
+ mutex_unlock(&priv->bond_mutex);
+ if (ret)
+ mlx4_err(dev, "Failed to unbond device: %d\n", ret);
+ else
+ mlx4_dbg(dev, "Device is unbonded\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_unbond);
+
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
+{
+ u8 port1 = v2p->port1;
+ u8 port2 = v2p->port2;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
+ return -ENOTSUPP;
+
+ mutex_lock(&priv->bond_mutex);
+
+ /* zero means keep current mapping for this port */
+ if (port1 == 0)
+ port1 = priv->v2p.port1;
+ if (port2 == 0)
+ port2 = priv->v2p.port2;
+
+ if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
+ (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
+ (port1 == 2 && port2 == 1)) {
+ /* besides boundary checks cross mapping makes
+ * no sense and therefore not allowed */
+ err = -EINVAL;
+ } else if ((port1 == priv->v2p.port1) &&
+ (port2 == priv->v2p.port2)) {
+ err = 0;
+ } else {
+ err = mlx4_virt2phy_port_map(dev, port1, port2);
+ if (!err) {
+ mlx4_dbg(dev, "port map changed: [%d][%d]\n",
+ port1, port2);
+ priv->v2p.port1 = port1;
+ priv->v2p.port2 = port2;
+ } else {
+ mlx4_err(dev, "Failed to change port mape: %d\n", err);
+ }
+ }
+
+ mutex_unlock(&priv->bond_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_port_map_set);
+
static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1477,7 +1571,8 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_lock(&priv->cmd.slave_cmd_mutex);
- if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
+ MLX4_COMM_TIME))
mlx4_warn(dev, "Failed to close slave function\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
@@ -1492,9 +1587,9 @@ static int map_bf_area(struct mlx4_dev *dev)
if (!dev->caps.bf_reg_size)
return -ENXIO;
- bf_start = pci_resource_start(dev->pdev, 2) +
+ bf_start = pci_resource_start(dev->persist->pdev, 2) +
(dev->caps.num_uars << PAGE_SHIFT);
- bf_len = pci_resource_len(dev->pdev, 2) -
+ bf_len = pci_resource_len(dev->persist->pdev, 2) -
(dev->caps.num_uars << PAGE_SHIFT);
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
if (!priv->bf_mapping)
@@ -1536,7 +1631,8 @@ static int map_internal_clock(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
priv->clock_mapping =
- ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
+ ioremap(pci_resource_start(dev->persist->pdev,
+ priv->fw.clock_bar) +
priv->fw.clock_offset, MLX4_CLOCK_SIZE);
if (!priv->clock_mapping)
@@ -1573,6 +1669,50 @@ static void mlx4_close_fw(struct mlx4_dev *dev)
}
}
+static int mlx4_comm_check_offline(struct mlx4_dev *dev)
+{
+#define COMM_CHAN_OFFLINE_OFFSET 0x09
+
+ u32 comm_flags;
+ u32 offline_bit;
+ unsigned long end;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
+ while (time_before(jiffies, end)) {
+ comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
+ MLX4_COMM_CHAN_FLAGS));
+ offline_bit = (comm_flags &
+ (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
+ if (!offline_bit)
+ return 0;
+ /* There are cases as part of AER/Reset flow that PF needs
+ * around 100 msec to load. We therefore sleep for 100 msec
+ * to allow other tasks to make use of that CPU during this
+ * time interval.
+ */
+ msleep(100);
+ }
+ mlx4_err(dev, "Communication channel is offline.\n");
+ return -EIO;
+}
+
+static void mlx4_reset_vf_support(struct mlx4_dev *dev)
+{
+#define COMM_CHAN_RST_OFFSET 0x1e
+
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 comm_rst;
+ u32 comm_caps;
+
+ comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
+ MLX4_COMM_CHAN_CAPS));
+ comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
+
+ if (comm_rst)
+ dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
+}
+
static int mlx4_init_slave(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1588,9 +1728,15 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
mutex_lock(&priv->cmd.slave_cmd_mutex);
priv->cmd.max_cmds = 1;
+ if (mlx4_comm_check_offline(dev)) {
+ mlx4_err(dev, "PF is not responsive, skipping initialization\n");
+ goto err_offline;
+ }
+
+ mlx4_reset_vf_support(dev);
mlx4_warn(dev, "Sending reset\n");
ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
- MLX4_COMM_TIME);
+ MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
/* if we are in the middle of flr the slave will try
* NUM_OF_RESET_RETRIES times before leaving.*/
if (ret_from_reset) {
@@ -1615,22 +1761,24 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
mlx4_warn(dev, "Sending vhcr0\n");
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
- MLX4_COMM_TIME))
+ MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
goto err;
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
- MLX4_COMM_TIME))
+ MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
goto err;
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
- MLX4_COMM_TIME))
+ MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
goto err;
- if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
+ MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
goto err;
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return 0;
err:
- mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
+err_offline:
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return -EIO;
}
@@ -1705,7 +1853,8 @@ static void choose_steering_mode(struct mlx4_dev *dev,
if (mlx4_log_num_mgm_entry_size <= 0 &&
dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
(!mlx4_is_mfunc(dev) ||
- (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
+ (dev_cap->fs_max_num_qp_per_entry >=
+ (dev->persist->num_vfs + 1))) &&
choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
dev->oper_log_mgm_entry_size =
@@ -1744,8 +1893,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
- dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS &&
- dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
else
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
@@ -2288,7 +2436,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
for (i = 0; i < nreq; ++i)
entries[i].entry = i;
- nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
+ nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
+ nreq);
if (nreq < 0) {
kfree(entries);
@@ -2316,7 +2465,7 @@ no_msi:
dev->caps.comp_pool = 0;
for (i = 0; i < 2; ++i)
- priv->eq_table.eq[i].irq = dev->pdev->irq;
+ priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2344,7 +2493,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->port_attr.show = show_port_type;
sysfs_attr_init(&info->port_attr.attr);
- err = device_create_file(&dev->pdev->dev, &info->port_attr);
+ err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
info->port = -1;
@@ -2361,10 +2510,12 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->port_mtu_attr.show = show_port_ib_mtu;
sysfs_attr_init(&info->port_mtu_attr.attr);
- err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
+ err = device_create_file(&dev->persist->pdev->dev,
+ &info->port_mtu_attr);
if (err) {
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
- device_remove_file(&info->dev->pdev->dev, &info->port_attr);
+ device_remove_file(&info->dev->persist->pdev->dev,
+ &info->port_attr);
info->port = -1;
}
@@ -2376,8 +2527,9 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
if (info->port < 0)
return;
- device_remove_file(&info->dev->pdev->dev, &info->port_attr);
- device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
+ device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
+ device_remove_file(&info->dev->persist->pdev->dev,
+ &info->port_mtu_attr);
}
static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2444,10 +2596,11 @@ static int mlx4_get_ownership(struct mlx4_dev *dev)
void __iomem *owner;
u32 ret;
- if (pci_channel_offline(dev->pdev))
+ if (pci_channel_offline(dev->persist->pdev))
return -EIO;
- owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
+ MLX4_OWNER_BASE,
MLX4_OWNER_SIZE);
if (!owner) {
mlx4_err(dev, "Failed to obtain ownership bit\n");
@@ -2463,10 +2616,11 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
{
void __iomem *owner;
- if (pci_channel_offline(dev->pdev))
+ if (pci_channel_offline(dev->persist->pdev))
return;
- owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
+ MLX4_OWNER_BASE,
MLX4_OWNER_SIZE);
if (!owner) {
mlx4_err(dev, "Failed to obtain ownership bit\n");
@@ -2481,11 +2635,19 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
!!((flags) & MLX4_FLAG_MASTER))
static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
- u8 total_vfs, int existing_vfs)
+ u8 total_vfs, int existing_vfs, int reset_flow)
{
u64 dev_flags = dev->flags;
int err = 0;
+ if (reset_flow) {
+ dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
+ GFP_KERNEL);
+ if (!dev->dev_vfs)
+ goto free_mem;
+ return dev_flags;
+ }
+
atomic_inc(&pf_loading);
if (dev->flags & MLX4_FLAG_SRIOV) {
if (existing_vfs != total_vfs) {
@@ -2514,13 +2676,14 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
dev_flags |= MLX4_FLAG_SRIOV |
MLX4_FLAG_MASTER;
dev_flags &= ~MLX4_FLAG_SLAVE;
- dev->num_vfs = total_vfs;
+ dev->persist->num_vfs = total_vfs;
}
return dev_flags;
disable_sriov:
atomic_dec(&pf_loading);
- dev->num_vfs = 0;
+free_mem:
+ dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
return dev_flags & ~MLX4_FLAG_MASTER;
}
@@ -2544,7 +2707,8 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
}
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
- int total_vfs, int *nvfs, struct mlx4_priv *priv)
+ int total_vfs, int *nvfs, struct mlx4_priv *priv,
+ int reset_flow)
{
struct mlx4_dev *dev;
unsigned sum = 0;
@@ -2560,6 +2724,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
spin_lock_init(&priv->ctx_lock);
mutex_init(&priv->port_mutex);
+ mutex_init(&priv->bond_mutex);
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
@@ -2607,10 +2772,15 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
existing_vfs = pci_num_vf(pdev);
if (existing_vfs)
dev->flags |= MLX4_FLAG_SRIOV;
- dev->num_vfs = total_vfs;
+ dev->persist->num_vfs = total_vfs;
}
}
+ /* on load remove any previous indication of internal error,
+ * device is up.
+ */
+ dev->persist->state = MLX4_DEVICE_STATE_UP;
+
slave_start:
err = mlx4_cmd_init(dev);
if (err) {
@@ -2661,8 +2831,10 @@ slave_start:
goto err_fw;
if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
- u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
- existing_vfs);
+ u64 dev_flags = mlx4_enable_sriov(dev, pdev,
+ total_vfs,
+ existing_vfs,
+ reset_flow);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
dev->flags = dev_flags;
@@ -2704,7 +2876,7 @@ slave_start:
if (dev->flags & MLX4_FLAG_SRIOV) {
if (!existing_vfs)
pci_disable_sriov(pdev);
- if (mlx4_is_master(dev))
+ if (mlx4_is_master(dev) && !reset_flow)
atomic_dec(&pf_loading);
dev->flags &= ~MLX4_FLAG_SRIOV;
}
@@ -2718,7 +2890,8 @@ slave_start:
}
if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
- u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs);
+ u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
+ existing_vfs, reset_flow);
if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
@@ -2771,12 +2944,14 @@ slave_start:
dev->caps.num_ports);
goto err_close;
}
- memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs));
+ memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
- for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) {
+ for (i = 0;
+ i < sizeof(dev->persist->nvfs)/
+ sizeof(dev->persist->nvfs[0]); i++) {
unsigned j;
- for (j = 0; j < dev->nvfs[i]; ++sum, ++j) {
+ for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
dev->caps.num_ports;
@@ -2828,6 +3003,17 @@ slave_start:
goto err_steer;
mlx4_init_quotas(dev);
+ /* When PF resources are ready arm its comm channel to enable
+ * getting commands
+ */
+ if (mlx4_is_master(dev)) {
+ err = mlx4_ARM_COMM_CHANNEL(dev);
+ if (err) {
+ mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+ err);
+ goto err_steer;
+ }
+ }
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
@@ -2835,6 +3021,9 @@ slave_start:
goto err_port;
}
+ priv->v2p.port1 = 1;
+ priv->v2p.port2 = 2;
+
err = mlx4_register_device(dev);
if (err)
goto err_port;
@@ -2846,7 +3035,7 @@ slave_start:
priv->removed = 0;
- if (mlx4_is_master(dev) && dev->num_vfs)
+ if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
atomic_dec(&pf_loading);
kfree(dev_cap);
@@ -2880,8 +3069,10 @@ err_free_eq:
mlx4_free_eq_table(dev);
err_master_mfunc:
- if (mlx4_is_master(dev))
+ if (mlx4_is_master(dev)) {
+ mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
mlx4_multi_func_cleanup(dev);
+ }
if (mlx4_is_slave(dev)) {
kfree(dev->caps.qp0_qkey);
@@ -2905,10 +3096,12 @@ err_cmd:
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
err_sriov:
- if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs)
+ if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
pci_disable_sriov(pdev);
+ dev->flags &= ~MLX4_FLAG_SRIOV;
+ }
- if (mlx4_is_master(dev) && dev->num_vfs)
+ if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
atomic_dec(&pf_loading);
kfree(priv->dev.dev_vfs);
@@ -3049,11 +3242,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv);
+ err = mlx4_catas_init(&priv->dev);
if (err)
goto err_release_regions;
+
+ err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
+ if (err)
+ goto err_catas;
+
return 0;
+err_catas:
+ mlx4_catas_end(&priv->dev);
+
err_release_regions:
pci_release_regions(pdev);
@@ -3076,38 +3277,60 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
dev = &priv->dev;
- dev->pdev = pdev;
- pci_set_drvdata(pdev, dev);
+ dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
+ if (!dev->persist) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+ dev->persist->pdev = pdev;
+ dev->persist->dev = dev;
+ pci_set_drvdata(pdev, dev->persist);
priv->pci_dev_data = id->driver_data;
+ mutex_init(&dev->persist->device_state_mutex);
+ mutex_init(&dev->persist->interface_state_mutex);
ret = __mlx4_init_one(pdev, id->driver_data, priv);
- if (ret)
+ if (ret) {
+ kfree(dev->persist);
kfree(priv);
+ } else {
+ pci_save_state(pdev);
+ }
return ret;
}
+static void mlx4_clean_dev(struct mlx4_dev *dev)
+{
+ struct mlx4_dev_persistent *persist = dev->persist;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
+
+ memset(priv, 0, sizeof(*priv));
+ priv->dev.persist = persist;
+ priv->dev.flags = flags;
+}
+
static void mlx4_unload_one(struct pci_dev *pdev)
{
- struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int pci_dev_data;
- int p;
- int active_vfs = 0;
+ int p, i;
if (priv->removed)
return;
+ /* saving current ports type for further use */
+ for (i = 0; i < dev->caps.num_ports; i++) {
+ dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
+ dev->persist->curr_port_poss_type[i] = dev->caps.
+ possible_type[i + 1];
+ }
+
pci_dev_data = priv->pci_dev_data;
- /* Disabling SR-IOV is not allowed while there are active vf's */
- if (mlx4_is_master(dev)) {
- active_vfs = mlx4_how_many_lives_vf(dev);
- if (active_vfs) {
- pr_warn("Removing PF when there are active VF's !!\n");
- pr_warn("Will not disable SR-IOV.\n");
- }
- }
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -3151,12 +3374,6 @@ static void mlx4_unload_one(struct pci_dev *pdev)
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
- if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
- mlx4_warn(dev, "Disabling SR-IOV\n");
- pci_disable_sriov(pdev);
- dev->flags &= ~MLX4_FLAG_SRIOV;
- dev->num_vfs = 0;
- }
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
@@ -3168,42 +3385,96 @@ static void mlx4_unload_one(struct pci_dev *pdev)
kfree(dev->caps.qp1_proxy);
kfree(dev->dev_vfs);
- memset(priv, 0, sizeof(*priv));
+ mlx4_clean_dev(dev);
priv->pci_dev_data = pci_dev_data;
priv->removed = 1;
}
static void mlx4_remove_one(struct pci_dev *pdev)
{
- struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
+ int active_vfs = 0;
+
+ mutex_lock(&persist->interface_state_mutex);
+ persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
+ mutex_unlock(&persist->interface_state_mutex);
+
+ /* Disabling SR-IOV is not allowed while there are active vf's */
+ if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
+ active_vfs = mlx4_how_many_lives_vf(dev);
+ if (active_vfs) {
+ pr_warn("Removing PF when there are active VF's !!\n");
+ pr_warn("Will not disable SR-IOV.\n");
+ }
+ }
+
+ /* device marked to be under deletion running now without the lock
+ * letting other tasks to be terminated
+ */
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ mlx4_unload_one(pdev);
+ else
+ mlx4_info(dev, "%s: interface is down\n", __func__);
+ mlx4_catas_end(dev);
+ if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
+ mlx4_warn(dev, "Disabling SR-IOV\n");
+ pci_disable_sriov(pdev);
+ }
- mlx4_unload_one(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ kfree(dev->persist);
kfree(priv);
pci_set_drvdata(pdev, NULL);
}
+static int restore_current_port_types(struct mlx4_dev *dev,
+ enum mlx4_port_type *types,
+ enum mlx4_port_type *poss_types)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err, i;
+
+ mlx4_stop_sense(dev);
+
+ mutex_lock(&priv->port_mutex);
+ for (i = 0; i < dev->caps.num_ports; i++)
+ dev->caps.possible_type[i + 1] = poss_types[i];
+ err = mlx4_change_port_types(dev, types);
+ mlx4_start_sense(dev);
+ mutex_unlock(&priv->port_mutex);
+
+ return err;
+}
+
int mlx4_restart_one(struct pci_dev *pdev)
{
- struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
int pci_dev_data, err, total_vfs;
pci_dev_data = priv->pci_dev_data;
- total_vfs = dev->num_vfs;
- memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs));
+ total_vfs = dev->persist->num_vfs;
+ memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mlx4_unload_one(pdev);
- err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv);
+ err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
if (err) {
mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
__func__, pci_name(pdev), err);
return err;
}
+ err = restore_current_port_types(dev, dev->persist->curr_port_type,
+ dev->persist->curr_port_poss_type);
+ if (err)
+ mlx4_err(dev, "could not restore original port types (%d)\n",
+ err);
+
return err;
}
@@ -3258,23 +3529,79 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- mlx4_unload_one(pdev);
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+
+ mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
+ mlx4_enter_error_state(persist);
- return state == pci_channel_io_perm_failure ?
- PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+ mutex_lock(&persist->interface_state_mutex);
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ mlx4_unload_one(pdev);
+
+ mutex_unlock(&persist->interface_state_mutex);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
- struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int ret;
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int total_vfs;
- ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv);
+ mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ total_vfs = dev->persist->num_vfs;
+ memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+
+ mutex_lock(&persist->interface_state_mutex);
+ if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
+ ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+ priv, 1);
+ if (ret) {
+ mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = restore_current_port_types(dev, dev->persist->
+ curr_port_type, dev->persist->
+ curr_port_poss_type);
+ if (ret)
+ mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+ }
+end:
+ mutex_unlock(&persist->interface_state_mutex);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
+static void mlx4_shutdown(struct pci_dev *pdev)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+
+ mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+ mutex_lock(&persist->interface_state_mutex);
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ mlx4_unload_one(pdev);
+ mutex_unlock(&persist->interface_state_mutex);
+}
+
static const struct pci_error_handlers mlx4_err_handler = {
.error_detected = mlx4_pci_err_detected,
.slot_reset = mlx4_pci_slot_reset,
@@ -3284,7 +3611,7 @@ static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
- .shutdown = mlx4_unload_one,
+ .shutdown = mlx4_shutdown,
.remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
@@ -3336,7 +3663,6 @@ static int __init mlx4_init(void)
if (mlx4_verify_params())
return -EINVAL;
- mlx4_catas_init();
mlx4_wq = create_singlethread_workqueue("mlx4");
if (!mlx4_wq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index a3867e7ef885..bd9ea0d01aae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1318,6 +1318,9 @@ out:
mutex_unlock(&priv->mcg_table.mutex);
mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+ /* In case device is under an error, return success as a closing command */
+ err = 0;
return err;
}
@@ -1347,6 +1350,9 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err && !attach &&
+ dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+ err = 0;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index bdd4eea2247c..1409d0cd6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -85,7 +85,9 @@ enum {
MLX4_CLR_INT_SIZE = 0x00008,
MLX4_SLAVE_COMM_BASE = 0x0,
MLX4_COMM_PAGESIZE = 0x1000,
- MLX4_CLOCK_SIZE = 0x00008
+ MLX4_CLOCK_SIZE = 0x00008,
+ MLX4_COMM_CHAN_CAPS = 0x8,
+ MLX4_COMM_CHAN_FLAGS = 0xc
};
enum {
@@ -120,6 +122,10 @@ enum mlx4_mpt_state {
};
#define MLX4_COMM_TIME 10000
+#define MLX4_COMM_OFFLINE_TIME_OUT 30000
+#define MLX4_COMM_CMD_NA_OP 0x0
+
+
enum {
MLX4_COMM_CMD_RESET,
MLX4_COMM_CMD_VHCR0,
@@ -190,6 +196,7 @@ struct mlx4_vhcr {
struct mlx4_vhcr_cmd {
__be64 in_param;
__be32 in_modifier;
+ u32 reserved1;
__be64 out_param;
__be16 token;
u16 reserved;
@@ -221,21 +228,24 @@ extern int mlx4_debug_level;
#define mlx4_dbg(mdev, format, ...) \
do { \
if (mlx4_debug_level) \
- dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
+ dev_printk(KERN_DEBUG, \
+ &(mdev)->persist->pdev->dev, format, \
##__VA_ARGS__); \
} while (0)
#define mlx4_err(mdev, format, ...) \
- dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+ dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
#define mlx4_info(mdev, format, ...) \
- dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+ dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
#define mlx4_warn(mdev, format, ...) \
- dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+ dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg;
+extern int mlx4_internal_err_reset;
-#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
+ MLX4_MFUNC_MAX))
#define ALL_SLAVES 0xff
struct mlx4_bitmap {
@@ -606,7 +616,6 @@ struct mlx4_mgm {
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
- struct mutex hcr_mutex;
struct mutex slave_cmd_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
@@ -877,6 +886,8 @@ struct mlx4_priv {
int reserved_mtts;
int fs_hash_mode;
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
+ struct mlx4_port_map v2p; /* cached port mapping configuration */
+ struct mutex bond_mutex; /* for bond mode */
__be64 slave_node_guids[MLX4_MFUNC_MAX];
atomic_t opreq_count;
@@ -994,7 +1005,8 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
-void mlx4_catas_init(void);
+int mlx4_catas_init(struct mlx4_dev *dev);
+void mlx4_catas_end(struct mlx4_dev *dev);
int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
@@ -1160,13 +1172,14 @@ enum {
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
int mlx4_multi_func_init(struct mlx4_dev *dev);
+int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev);
void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
int mlx4_cmd_use_events(struct mlx4_dev *dev);
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
- unsigned long timeout);
+ u16 op, unsigned long timeout);
void mlx4_cq_tasklet_cb(unsigned long data);
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
@@ -1176,7 +1189,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
-void mlx4_handle_catas_err(struct mlx4_dev *dev);
+void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
@@ -1354,6 +1367,7 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
/* Returns the VF index of slave */
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
int mlx4_config_mad_demux(struct mlx4_dev *dev);
+int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
enum mlx4_zone_flags {
MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 944a112dff37..2a8268e6be15 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -390,6 +390,7 @@ struct mlx4_en_dev {
struct pci_dev *pdev;
struct mutex state_lock;
struct net_device *pndev[MLX4_MAX_PORTS + 1];
+ struct net_device *upper[MLX4_MAX_PORTS + 1];
u32 port_cnt;
bool device_up;
struct mlx4_en_profile profile;
@@ -410,6 +411,7 @@ struct mlx4_en_dev {
unsigned long overflow_period;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
+ struct notifier_block nb;
};
@@ -845,6 +847,9 @@ int mlx4_en_reset_config(struct net_device *dev,
struct hwtstamp_config ts_config,
netdev_features_t new_features);
+int mlx4_en_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr);
+
/*
* Functions for time stamping
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 7094a9c70fd5..78f51e103880 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -598,14 +598,11 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
if (err)
return err;
- mpt_entry->start = cpu_to_be64(mr->iova);
- mpt_entry->length = cpu_to_be64(mr->size);
- mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
-
- mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
- MLX4_MPT_PD_FLAG_EN_INV);
- mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
- MLX4_MPT_FLAG_SW_OWNS);
+ mpt_entry->start = cpu_to_be64(iova);
+ mpt_entry->length = cpu_to_be64(size);
+ mpt_entry->entity_size = cpu_to_be32(page_shift);
+ mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
+ MLX4_MPT_FLAG_SW_OWNS));
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
mpt_entry->mtt_addr = 0;
@@ -708,13 +705,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
if (!mtts)
return -ENOMEM;
- dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+ dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
+ dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE);
return 0;
@@ -1020,13 +1017,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
/* Make sure MPT status is visible before writing MTT entries */
wmb();
- dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
+ dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
+ dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key);
@@ -1155,7 +1152,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
- MLX4_CMD_NATIVE);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 74216071201f..609c59dc854e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
return -ENOMEM;
if (mlx4_is_slave(dev))
- offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+ offset = uar->index % ((int)pci_resource_len(dev->persist->pdev,
+ 2) /
dev->caps.uar_page_size);
else
offset = uar->index;
- uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
+ uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
+ + offset;
uar->map = NULL;
return 0;
}
@@ -212,7 +214,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
list_add(&uar->bf_list, &priv->bf_list);
}
- bf->uar = uar;
idx = ffz(uar->free_bf_bmap);
uar->free_bf_bmap |= 1 << idx;
bf->uar = uar;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 30eb1ead0fe6..9f268f05290a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -553,9 +553,9 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports);
slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
- dev->num_vfs + 1);
+ dev->persist->num_vfs + 1);
}
- vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+ vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
@@ -590,10 +590,10 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports);
slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
- dev->num_vfs + 1);
+ dev->persist->num_vfs + 1);
}
gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
- vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+ vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
if (slave_gid <= gids % vfs)
return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
@@ -644,7 +644,7 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
int num_eth_ports, err;
int i;
- if (slave < 0 || slave > dev->num_vfs)
+ if (slave < 0 || slave > dev->persist->num_vfs)
return;
actv_ports = mlx4_get_active_ports(dev, slave);
@@ -1214,7 +1214,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
return -EINVAL;
slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
- num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+ num_vfs = bitmap_weight(slaves_pport.slaves,
+ dev->persist->num_vfs + 1) - 1;
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
@@ -1258,7 +1259,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
dev, &exclusive_ports);
num_vfs_before += bitmap_weight(
slaves_pport_actv.slaves,
- dev->num_vfs + 1);
+ dev->persist->num_vfs + 1);
}
/* candidate_slave_gid isn't necessarily the correct slave, but
@@ -1288,7 +1289,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
dev, &exclusive_ports);
slave_gid += bitmap_weight(
slaves_pport_actv.slaves,
- dev->num_vfs + 1);
+ dev->persist->num_vfs + 1);
}
}
*slave_id = slave_gid;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 1586ecce13c7..2bb8553bd905 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -882,6 +882,8 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
context->flags &= cpu_to_be32(~(0xf << 28));
context->flags |= cpu_to_be32(states[i + 1] << 28);
+ if (states[i + 1] != MLX4_QP_STATE_RTR)
+ context->params2 &= ~MLX4_QP_BIT_FPP;
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index ea1c6d092145..0076d88587ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -76,19 +76,21 @@ int mlx4_reset(struct mlx4_dev *dev)
goto out;
}
- pcie_cap = pci_pcie_cap(dev->pdev);
+ pcie_cap = pci_pcie_cap(dev->persist->pdev);
for (i = 0; i < 64; ++i) {
if (i == 22 || i == 23)
continue;
- if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
+ if (pci_read_config_dword(dev->persist->pdev, i * 4,
+ hca_header + i)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
goto out;
}
}
- reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
+ reset = ioremap(pci_resource_start(dev->persist->pdev, 0) +
+ MLX4_RESET_BASE,
MLX4_RESET_SIZE);
if (!reset) {
err = -ENOMEM;
@@ -122,8 +124,8 @@ int mlx4_reset(struct mlx4_dev *dev)
end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
do {
- if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
- vendor != 0xffff)
+ if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID,
+ &vendor) && vendor != 0xffff)
break;
msleep(1);
@@ -138,14 +140,16 @@ int mlx4_reset(struct mlx4_dev *dev)
/* Now restore the PCI headers */
if (pcie_cap) {
devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
- if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
+ if (pcie_capability_write_word(dev->persist->pdev,
+ PCI_EXP_DEVCTL,
devctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
- if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
+ if (pcie_capability_write_word(dev->persist->pdev,
+ PCI_EXP_LNKCTL,
linkctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
@@ -157,7 +161,8 @@ int mlx4_reset(struct mlx4_dev *dev)
if (i * 4 == PCI_COMMAND)
continue;
- if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
+ if (pci_write_config_dword(dev->persist->pdev, i * 4,
+ hca_header[i])) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
i);
@@ -165,7 +170,7 @@ int mlx4_reset(struct mlx4_dev *dev)
}
}
- if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
+ if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 4efbd1eca611..486e3d26cd4a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -309,12 +309,13 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
int allocated, free, reserved, guaranteed, from_free;
int from_rsvd;
- if (slave > dev->num_vfs)
+ if (slave > dev->persist->num_vfs)
return -EINVAL;
spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ?
- res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+ res_alloc->allocated[(port - 1) *
+ (dev->persist->num_vfs + 1) + slave] :
res_alloc->allocated[slave];
free = (port > 0) ? res_alloc->res_port_free[port - 1] :
res_alloc->res_free;
@@ -352,7 +353,8 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
if (!err) {
/* grant the request */
if (port > 0) {
- res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
+ res_alloc->allocated[(port - 1) *
+ (dev->persist->num_vfs + 1) + slave] += count;
res_alloc->res_port_free[port - 1] -= count;
res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
} else {
@@ -376,13 +378,14 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int allocated, guaranteed, from_rsvd;
- if (slave > dev->num_vfs)
+ if (slave > dev->persist->num_vfs)
return;
spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ?
- res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+ res_alloc->allocated[(port - 1) *
+ (dev->persist->num_vfs + 1) + slave] :
res_alloc->allocated[slave];
guaranteed = res_alloc->guaranteed[slave];
@@ -397,7 +400,8 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
}
if (port > 0) {
- res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
+ res_alloc->allocated[(port - 1) *
+ (dev->persist->num_vfs + 1) + slave] -= count;
res_alloc->res_port_free[port - 1] += count;
res_alloc->res_port_rsvd[port - 1] += from_rsvd;
} else {
@@ -415,7 +419,8 @@ static inline void initialize_res_quotas(struct mlx4_dev *dev,
enum mlx4_resource res_type,
int vf, int num_instances)
{
- res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
+ res_alloc->guaranteed[vf] = num_instances /
+ (2 * (dev->persist->num_vfs + 1));
res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
if (vf == mlx4_master_func_num(dev)) {
res_alloc->res_free = num_instances;
@@ -486,21 +491,26 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[i];
- res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
- res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+ res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
+ sizeof(int), GFP_KERNEL);
+ res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
+ sizeof(int), GFP_KERNEL);
if (i == RES_MAC || i == RES_VLAN)
res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
- (dev->num_vfs + 1) * sizeof(int),
- GFP_KERNEL);
+ (dev->persist->num_vfs
+ + 1) *
+ sizeof(int), GFP_KERNEL);
else
- res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+ res_alloc->allocated = kzalloc((dev->persist->
+ num_vfs + 1) *
+ sizeof(int), GFP_KERNEL);
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated)
goto no_mem_err;
spin_lock_init(&res_alloc->alloc_lock);
- for (t = 0; t < dev->num_vfs + 1; t++) {
+ for (t = 0; t < dev->persist->num_vfs + 1; t++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, t);
switch (i) {
@@ -2531,7 +2541,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
/* Make sure that the PD bits related to the slave id are zeros. */
pd = mr_get_pd(inbox->buf);
pd_slave = (pd >> 17) & 0x7f;
- if (pd_slave != 0 && pd_slave != slave) {
+ if (pd_slave != 0 && --pd_slave != slave) {
err = -EPERM;
goto ex_abort;
}
@@ -2934,6 +2944,9 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
optpar = be32_to_cpu(*(__be32 *) inbox->buf);
+ if (slave != mlx4_master_func_num(dev))
+ qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
+
switch (qp_type) {
case MLX4_QP_ST_RC:
case MLX4_QP_ST_XRC:
@@ -4667,7 +4680,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
int state;
LIST_HEAD(tlist);
int eqn;
- struct mlx4_cmd_mailbox *mailbox;
err = move_all_busy(dev, slave, RES_EQ);
if (err)
@@ -4693,20 +4705,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
break;
case RES_EQ_HW:
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- cond_resched();
- continue;
- }
- err = mlx4_cmd_box(dev, slave, 0,
- eqn & 0xff, 0,
- MLX4_CMD_HW2SW_EQ,
- MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_NATIVE);
+ err = mlx4_cmd(dev, slave, eqn & 0xff,
+ 1, MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
slave, eqn);
- mlx4_free_cmd_mailbox(dev, mailbox);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 56779c1c7811..201ca6d76ce5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -121,7 +121,7 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
buf->direct.map);
else {
- if (BITS_PER_LONG == 64 && buf->direct.buf)
+ if (BITS_PER_LONG == 64)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 10e1f1a18255..4878025e231c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -300,11 +300,11 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
param = qp->pid;
break;
case QP_STATE:
- param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+ param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
*is_str = 1;
break;
case QP_XPORT:
- param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+ param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
*is_str = 1;
break;
case QP_MTU:
@@ -464,7 +464,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
if (is_str)
- ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field);
+ ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
else
ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f4525619a07..d6651937d899 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -903,12 +903,12 @@ static void remove_one(struct pci_dev *pdev)
}
static const struct pci_device_id mlx5_core_pci_table[] = {
- { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
- { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
- { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
- { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
- { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
- { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
+ { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
+ { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
+ { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
+ { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
+ { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 2fa6ae026e4f..10988fbf47eb 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4342,9 +4342,7 @@ static void ksz_init_timer(struct ksz_timer_info *info, int period,
{
info->max = 0;
info->period = period;
- init_timer(&info->timer);
- info->timer.function = function;
- info->timer.data = (unsigned long) data;
+ setup_timer(&info->timer, function, (unsigned long)data);
}
static void ksz_update_timer(struct ksz_timer_info *info)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 71af98bb72cb..1412f5af05ec 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4226,8 +4226,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
#endif
myri10ge_free_slices(mgp);
- if (mgp->msix_vectors != NULL)
- kfree(mgp->msix_vectors);
+ kfree(mgp->msix_vectors);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2552e550a78c..eb807b0dc72a 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1122,12 +1122,12 @@ again:
}
#ifdef NS83820_VLAN_ACCEL_SUPPORT
- if(vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
/* fetch the vlan tag info out of the
* ancillary data if the vlan code
* is using hw vlan acceleration
*/
- short tag = vlan_tx_tag_get(skb);
+ short tag = skb_vlan_tag_get(skb);
extsts |= (EXTSTS_VPKT | htons(tag));
}
#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f5e4b820128b..a4cdf2f8041a 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4045,8 +4045,8 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
}
queue = 0;
- if (vlan_tx_tag_present(skb))
- vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb))
+ vlan_tag = skb_vlan_tag_get(skb);
if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip;
@@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
if (sp->s2io_entries[i].in_use == MSIX_FLG) {
if (sp->s2io_entries[i].type ==
MSIX_RING_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ snprintf(sp->desc[i],
+ sizeof(sp->desc[i]),
+ "%s:MSI-X-%d-RX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle,
@@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
sp->s2io_entries[i].arg);
} else if (sp->s2io_entries[i].type ==
MSIX_ALARM_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ snprintf(sp->desc[i],
+ sizeof(sp->desc[i]),
+ "%s:MSI-X-%d-TX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_fifo_handle,
@@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s: UDP Fragmentation Offload(UFO) enabled\n",
dev->name);
/* Initialize device name */
- sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
+ snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
+ sp->product_name);
if (vlan_tag_strip)
sp->vlan_strip_flag = 1;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 2bbd01fcb9b0..6223930a8155 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -4637,7 +4637,7 @@ static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
vpath->ringh = NULL;
vpath->fifoh = NULL;
memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
- vpath->stats_block = 0;
+ vpath->stats_block = NULL;
vpath->hw_stats = NULL;
vpath->hw_stats_sav = NULL;
vpath->sw_stats = NULL;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index cc0485e3c621..50d5604833ed 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -890,8 +890,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name, __func__, __LINE__,
fifo_hw, dtr, dtr_priv);
- if (vlan_tx_tag_present(skb)) {
- u16 vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag = skb_vlan_tag_get(skb);
vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f39cae620f61..a41bb5e6b954 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2462,9 +2462,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
/* vlan tag */
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
- vlan_tx_tag_get(skb));
+ skb_vlan_tag_get(skb));
else
start_tx->txvlan = 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 613037584d08..e0c31e3947d1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -176,9 +176,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
static void
netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
{
- if (recv_ctx->sds_rings != NULL)
- kfree(recv_ctx->sds_rings);
-
+ kfree(recv_ctx->sds_rings);
recv_ctx->sds_rings = NULL;
}
@@ -1893,9 +1891,9 @@ netxen_tso_check(struct net_device *netdev,
protocol = vh->h_vlan_encapsulated_proto;
flags = FLAGS_VLAN_TAGGED;
- } else if (vlan_tx_tag_present(skb)) {
+ } else if (skb_vlan_tag_present(skb)) {
flags = FLAGS_VLAN_OOB;
- vid = vlan_tx_tag_get(skb);
+ vid = skb_vlan_tag_get(skb);
netxen_set_tx_vlan_tci(first_desc, vid);
vlan_oob = 1;
}
@@ -2388,7 +2386,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
work_done = netxen_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 18e5de72e9b4..d4b5085a21fa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -10,6 +10,7 @@
#include <net/ip.h>
#include <linux/ipv6.h>
#include <net/checksum.h>
+#include <linux/printk.h>
#include "qlcnic.h"
@@ -320,8 +321,8 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
vlan_id = ntohs(vh->h_vlan_TCI);
- } else if (vlan_tx_tag_present(skb)) {
- vlan_id = vlan_tx_tag_get(skb);
+ } else if (skb_vlan_tag_present(skb)) {
+ vlan_id = skb_vlan_tag_get(skb);
}
}
@@ -472,9 +473,9 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
- } else if (vlan_tx_tag_present(skb)) {
+ } else if (skb_vlan_tag_present(skb)) {
flags = QLCNIC_FLAGS_VLAN_OOB;
- vlan_tci = vlan_tx_tag_get(skb);
+ vlan_tci = skb_vlan_tag_get(skb);
}
if (unlikely(adapter->tx_pvid)) {
if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
@@ -967,7 +968,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -992,6 +998,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
napi_complete(&tx_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
qlcnic_enable_tx_intr(adapter, tx_ring);
+ } else {
+ /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
+ work_done = budget;
}
return work_done;
@@ -1465,14 +1474,14 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
{
- int i;
- unsigned char *data = skb->data;
+ if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
+ char prefix[30];
+
+ scnprintf(prefix, sizeof(prefix), "%s: %s: ",
+ dev_name(&adapter->pdev->dev), __func__);
- pr_info(KERN_INFO "\n");
- for (i = 0; i < skb->len; i++) {
- QLCDB(adapter, DRV, "%02x ", data[i]);
- if ((i & 0x0f) == 8)
- pr_info(KERN_INFO "\n");
+ print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
+ skb->data, skb->len, true);
}
}
@@ -1950,7 +1959,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -1973,7 +1987,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -1995,6 +2014,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
napi_complete(&tx_ring->napi);
if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
qlcnic_enable_tx_intr(adapter, tx_ring);
+ } else {
+ /* need a repoll */
+ work_done = budget;
}
return work_done;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2528c3fb6b90..a430a34a4434 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -294,9 +294,7 @@ int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
{
- if (recv_ctx->sds_rings != NULL)
- kfree(recv_ctx->sds_rings);
-
+ kfree(recv_ctx->sds_rings);
recv_ctx->sds_rings = NULL;
}
@@ -1257,8 +1255,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
if (fw_dump->tmpl_hdr == NULL ||
adapter->fw_version > prev_fw_version) {
- if (fw_dump->tmpl_hdr)
- vfree(fw_dump->tmpl_hdr);
+ vfree(fw_dump->tmpl_hdr);
if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
dev_info(&pdev->dev,
"Supports FW dump capability\n");
@@ -2374,13 +2371,12 @@ void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
+ if (tx_ring) {
vfree(tx_ring->cmd_buf_arr);
tx_ring->cmd_buf_arr = NULL;
}
}
- if (adapter->tx_ring != NULL)
- kfree(adapter->tx_ring);
+ kfree(adapter->tx_ring);
}
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
@@ -2758,13 +2754,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
}
qlcnic_dcb_free(adapter->dcb);
-
qlcnic_detach(adapter);
-
- if (adapter->npars != NULL)
- kfree(adapter->npars);
- if (adapter->eswitch != NULL)
- kfree(adapter->eswitch);
+ kfree(adapter->npars);
+ kfree(adapter->eswitch);
if (qlcnic_82xx_check(adapter))
qlcnic_clr_all_drv_state(adapter, 0);
@@ -2932,13 +2924,13 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
{
- if (adapter->fhash.fmax && adapter->fhash.fhead)
+ if (adapter->fhash.fmax)
kfree(adapter->fhash.fhead);
adapter->fhash.fhead = NULL;
adapter->fhash.fmax = 0;
- if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead)
+ if (adapter->rx_fhash.fmax)
kfree(adapter->rx_fhash.fhead);
adapter->rx_fhash.fmax = 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index c9f57fb84b9e..332bb8a3f430 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1407,8 +1407,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
current_version = qlcnic_83xx_get_fw_version(adapter);
if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
- if (fw_dump->tmpl_hdr)
- vfree(fw_dump->tmpl_hdr);
+ vfree(fw_dump->tmpl_hdr);
if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
dev_info(&pdev->dev, "Supports FW dump capability\n");
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 6c904a6cad2a..8011ef3e7707 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status = 0;
+ bool need_restart = netif_running(ndev);
- status = ql_adapter_down(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring down the adapter\n");
- return status;
+ if (need_restart) {
+ status = ql_adapter_down(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring down the adapter\n");
+ return status;
+ }
}
/* update the features with resent change */
ndev->features = features;
- status = ql_adapter_up(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring up the adapter\n");
- return status;
+ if (need_restart) {
+ status = ql_adapter_up(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring up the adapter\n");
+ return status;
+ }
}
+
return status;
}
@@ -2660,11 +2666,11 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
+ "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
- mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
+ mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
}
tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
if (tso < 0) {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 9c31e46d1eee..d79e33b3c191 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -708,8 +708,8 @@ static void cp_tx (struct cp_private *cp)
static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
{
- return vlan_tx_tag_present(skb) ?
- TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+ return skb_vlan_tag_present(skb) ?
+ TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
}
static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 14a1c5cec3a5..ad0020af2193 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2073,8 +2073,8 @@ static int rtl8169_set_features(struct net_device *dev,
static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
{
- return (vlan_tx_tag_present(skb)) ?
- TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+ return (skb_vlan_tag_present(skb)) ?
+ TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
}
static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
@@ -4915,7 +4915,7 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
- rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4948,7 +4948,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, 0x3f);
RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
RTL_W8(Config4, RTL_R8(Config4) | 0x01);
- rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4964,7 +4964,7 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
{
rtl_tx_performance_tweak(tp->pci_dev,
- (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+ PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
}
static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -7049,6 +7049,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 status, len;
u32 opts[2];
int frags;
+ bool stop_queue;
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7105,11 +7106,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- RTL_W8(TxPoll, NPQ);
+ stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
- mmiowb();
+ if (!skb->xmit_more || stop_queue ||
+ netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
+ RTL_W8(TxPoll, NPQ);
+
+ mmiowb();
+ }
- if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+ if (stop_queue) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c29ba80ae02b..4da8bd263997 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRL31] = 0x01fc,
};
+static void sh_eth_rcv_snd_disable(struct net_device *ndev);
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -473,6 +476,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .fdr_value = 0x00000f0f,
.apr = 1,
.mpr = 1,
@@ -495,6 +499,9 @@ static struct sh_eth_cpu_data r8a779x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .fdr_value = 0x00000f0f,
+
+ .trscer_err_mask = DESC_I_RINT8,
.apr = 1,
.mpr = 1,
@@ -590,7 +597,7 @@ static struct sh_eth_cpu_data sh7757_data = {
static void sh_eth_chip_reset_giga(struct net_device *ndev)
{
int i;
- unsigned long mahr[2], malr[2];
+ u32 mahr[2], malr[2];
/* save MAHR and MALR */
for (i = 0; i < 2; i++) {
@@ -856,6 +863,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
if (!cd->eesr_err_check)
cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+ if (!cd->trscer_err_mask)
+ cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
}
static int sh_eth_check_reset(struct net_device *ndev)
@@ -981,7 +991,7 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
}
}
-static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
+static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
{
if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
return EDTRR_TRNS_GETHER;
@@ -1113,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ dma_addr_t dma_addr;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
@@ -1126,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, skbuff_size);
- mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
sh_eth_set_receive_align(skb);
@@ -1135,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 16 bytes. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
- dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
- DMA_FROM_DEVICE);
- rxdesc->addr = virt_to_phys(skb->data);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ kfree_skb(skb);
+ break;
+ }
+ mdp->rx_skbuff[i] = skb;
+ rxdesc->addr = dma_addr;
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1294,7 +1310,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
/* Frame recv control (enable multiple-packets per rx irq) */
sh_eth_write(ndev, RMCR_RNC, RMCR);
- sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+ sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -1309,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
- if (start)
+ if (start) {
+ mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ }
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1349,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
return ret;
}
+static void sh_eth_dev_exit(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i;
+
+ /* Deactivate all TX descriptors, so DMA should stop at next
+ * packet boundary if it's currently running
+ */
+ for (i = 0; i < mdp->num_tx_ring; i++)
+ mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
+
+ /* Disable TX FIFO egress to MAC */
+ sh_eth_rcv_snd_disable(ndev);
+
+ /* Stop RX DMA at next packet boundary */
+ sh_eth_write(ndev, 0, EDRRR);
+
+ /* Aside from TX DMA, we can't tell when the hardware is
+ * really stopped, so we need to reset to make sure.
+ * Before doing that, wait for long enough to *probably*
+ * finish transmitting the last packet and poll stats.
+ */
+ msleep(2); /* max frame time at 10 Mbps < 1250 us */
+ sh_eth_get_stats(ndev);
+ sh_eth_reset(ndev);
+}
+
/* free Tx skb function */
static int sh_eth_txfree(struct net_device *ndev)
{
@@ -1393,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
u16 pkt_len = 0;
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ dma_addr_t dma_addr;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
@@ -1440,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
- ALIGN(mdp->rx_buf_sz, 16),
- DMA_FROM_DEVICE);
+ dma_unmap_single(&ndev->dev, rxdesc->addr,
+ ALIGN(mdp->rx_buf_sz, 16),
+ DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
@@ -1462,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size);
- mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
- dma_map_single(&ndev->dev, skb->data,
- rxdesc->buffer_length, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ kfree_skb(skb);
+ break;
+ }
+ mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb);
- rxdesc->addr = virt_to_phys(skb->data);
+ rxdesc->addr = dma_addr;
}
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
@@ -1514,7 +1565,7 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev)
}
/* error control function */
-static void sh_eth_error(struct net_device *ndev, int intr_status)
+static void sh_eth_error(struct net_device *ndev, u32 intr_status)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 felic_stat;
@@ -1566,7 +1617,6 @@ ignore_link:
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++;
- netif_err(mdp, rx_err, ndev, "Receive Abort\n");
}
}
@@ -1585,13 +1635,11 @@ ignore_link:
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
- netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++;
- netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
}
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1630,7 +1678,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- unsigned long intr_status, intr_enable;
+ u32 intr_status, intr_enable;
spin_lock(&mdp->lock);
@@ -1646,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
ret = IRQ_HANDLED;
else
- goto other_irq;
+ goto out;
+
+ if (!likely(mdp->irq_enabled)) {
+ sh_eth_write(ndev, 0, EESIPR);
+ goto out;
+ }
if (intr_status & EESR_RX_CHECK) {
if (napi_schedule_prep(&mdp->napi)) {
@@ -1656,7 +1709,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
__napi_schedule(&mdp->napi);
} else {
netdev_warn(ndev,
- "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
+ "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
intr_status, intr_enable);
}
}
@@ -1677,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
sh_eth_error(ndev, intr_status);
}
-other_irq:
+out:
spin_unlock(&mdp->lock);
return ret;
@@ -1689,7 +1742,7 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
napi);
struct net_device *ndev = napi->dev;
int quota = budget;
- unsigned long intr_status;
+ u32 intr_status;
for (;;) {
intr_status = sh_eth_read(ndev, EESR);
@@ -1705,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
/* Reenable Rx interrupts */
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ if (mdp->irq_enabled)
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
out:
return budget - quota;
}
@@ -1820,6 +1874,9 @@ static int sh_eth_get_settings(struct net_device *ndev,
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_ethtool_gset(mdp->phydev, ecmd);
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1834,6 +1891,9 @@ static int sh_eth_set_settings(struct net_device *ndev,
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
/* disable tx and rx */
@@ -1868,6 +1928,9 @@ static int sh_eth_nway_reset(struct net_device *ndev)
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_start_aneg(mdp->phydev);
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1952,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return -EINVAL;
if (netif_running(ndev)) {
+ netif_device_detach(ndev);
netif_tx_disable(ndev);
- /* Disable interrupts by clearing the interrupt mask. */
- sh_eth_write(ndev, 0x0000, EESIPR);
- /* Stop the chip's Tx and Rx processes. */
- sh_eth_write(ndev, 0, EDTRR);
- sh_eth_write(ndev, 0, EDRRR);
+
+ /* Serialise with the interrupt handler and NAPI, then
+ * disable interrupts. We have to clear the
+ * irq_enabled flag first to ensure that interrupts
+ * won't be re-enabled.
+ */
+ mdp->irq_enabled = false;
synchronize_irq(ndev->irq);
- }
+ napi_synchronize(&mdp->napi);
+ sh_eth_write(ndev, 0x0000, EESIPR);
- /* Free all the skbuffs in the Rx queue. */
- sh_eth_ring_free(ndev);
- /* Free DMA buffer */
- sh_eth_free_dma_buffer(mdp);
+ sh_eth_dev_exit(ndev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+ /* Free DMA buffer */
+ sh_eth_free_dma_buffer(mdp);
+ }
/* Set new parameters */
mdp->num_rx_ring = ring->rx_pending;
mdp->num_tx_ring = ring->tx_pending;
- ret = sh_eth_ring_init(ndev);
- if (ret < 0) {
- netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
- return ret;
- }
- ret = sh_eth_dev_init(ndev, false);
- if (ret < 0) {
- netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
- return ret;
- }
-
if (netif_running(ndev)) {
+ ret = sh_eth_ring_init(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
+ __func__);
+ return ret;
+ }
+ ret = sh_eth_dev_init(ndev, false);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
+ __func__);
+ return ret;
+ }
+
+ mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* Setting the Rx mode will start the Rx process. */
sh_eth_write(ndev, EDRRR_R, EDRRR);
- netif_wake_queue(ndev);
+ netif_device_attach(ndev);
}
return 0;
@@ -2060,7 +2133,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_err(mdp, timer, ndev,
"transmit timed out, status %8.8x, resetting...\n",
- (int)sh_eth_read(ndev, EESR));
+ sh_eth_read(ndev, EESR));
/* tx_errors count up */
ndev->stats.tx_errors++;
@@ -2101,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
@@ -2110,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (skb->len < ETH_ZLEN)
- txdesc->buffer_length = ETH_ZLEN;
- else
- txdesc->buffer_length = skb->len;
+ if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ txdesc->buffer_length = skb->len;
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2165,24 +2242,26 @@ static int sh_eth_close(struct net_device *ndev)
netif_stop_queue(ndev);
- /* Disable interrupts by clearing the interrupt mask. */
+ /* Serialise with the interrupt handler and NAPI, then disable
+ * interrupts. We have to clear the irq_enabled flag first to
+ * ensure that interrupts won't be re-enabled.
+ */
+ mdp->irq_enabled = false;
+ synchronize_irq(ndev->irq);
+ napi_disable(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR);
- /* Stop the chip's Tx and Rx processes. */
- sh_eth_write(ndev, 0, EDTRR);
- sh_eth_write(ndev, 0, EDRRR);
+ sh_eth_dev_exit(ndev);
- sh_eth_get_stats(ndev);
/* PHY Disconnect */
if (mdp->phydev) {
phy_stop(mdp->phydev);
phy_disconnect(mdp->phydev);
+ mdp->phydev = NULL;
}
free_irq(ndev->irq, ndev);
- napi_disable(&mdp->napi);
-
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
@@ -2410,7 +2489,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int i, ret;
- if (unlikely(!mdp->cd->tsu))
+ if (!mdp->cd->tsu)
return 0;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
@@ -2433,7 +2512,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
- if (unlikely(!mdp->cd->tsu))
+ if (!mdp->cd->tsu)
return;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
@@ -2443,8 +2522,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
}
}
-/* Multicast reception directions set */
-static void sh_eth_set_multicast_list(struct net_device *ndev)
+/* Update promiscuous flag and multicast filter */
+static void sh_eth_set_rx_mode(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 ecmr_bits;
@@ -2455,7 +2534,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
/* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
- ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
+ ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
+ if (mdp->cd->tsu)
+ ecmr_bits |= ECMR_MCT;
if (!(ndev->flags & IFF_MULTICAST)) {
sh_eth_tsu_purge_mcast(ndev);
@@ -2484,9 +2565,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
}
}
}
- } else {
- /* Normal, unicast/broadcast-only mode. */
- ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
}
/* update the ethernet mode */
@@ -2694,6 +2772,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
+ .ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -2706,7 +2785,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
- .ndo_set_rx_mode = sh_eth_set_multicast_list,
+ .ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
@@ -2940,6 +3019,36 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int sh_eth_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = sh_eth_close(ndev);
+ }
+
+ return ret;
+}
+
+static int sh_eth_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (netif_running(ndev)) {
+ ret = sh_eth_open(ndev);
+ if (ret < 0)
+ return ret;
+ netif_device_attach(ndev);
+ }
+
+ return ret;
+}
+#endif
+
static int sh_eth_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
@@ -2953,8 +3062,8 @@ static int sh_eth_runtime_nop(struct device *dev)
}
static const struct dev_pm_ops sh_eth_dev_pm_ops = {
- .runtime_suspend = sh_eth_runtime_nop,
- .runtime_resume = sh_eth_runtime_nop,
+ SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
+ SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
};
#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
#else
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 22301bf9c21d..259d03f353e1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
DESC_I_RINT1 = 0x0001,
};
+#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+
/* RPADIR */
enum RPADIR_BIT {
RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -457,18 +459,21 @@ struct sh_eth_cpu_data {
/* mandatory initialize value */
int register_type;
- unsigned long eesipr_value;
+ u32 eesipr_value;
/* optional initialize value */
- unsigned long ecsr_value;
- unsigned long ecsipr_value;
- unsigned long fdr_value;
- unsigned long fcftr_value;
- unsigned long rpadir_value;
+ u32 ecsr_value;
+ u32 ecsipr_value;
+ u32 fdr_value;
+ u32 fcftr_value;
+ u32 rpadir_value;
/* interrupt checking mask */
- unsigned long tx_check;
- unsigned long eesr_err_check;
+ u32 tx_check;
+ u32 eesr_err_check;
+
+ /* Error mask */
+ u32 trscer_err_mask;
/* hardware features */
unsigned long irq_flags; /* IRQ configuration flags */
@@ -508,6 +513,7 @@ struct sh_eth_private {
u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian;
struct napi_struct napi;
+ bool irq_enabled;
/* MII transceiver section. */
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
@@ -537,7 +543,7 @@ static inline void sh_eth_soft_swap(char *src, int len)
#endif
}
-static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
+static inline void sh_eth_write(struct net_device *ndev, u32 data,
int enum_index)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -545,8 +551,7 @@ static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
}
-static inline unsigned long sh_eth_read(struct net_device *ndev,
- int enum_index)
+static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -559,14 +564,13 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
return mdp->tsu_addr + mdp->reg_offset[enum_index];
}
-static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
- unsigned long data, int enum_index)
+static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
+ int enum_index)
{
iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
-static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
- int enum_index)
+static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2f398fa4b9e6..34389b6aa67c 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -806,13 +806,13 @@ static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
{
- return (void *) desc_info->desc->cookie;
+ return (void *)(uintptr_t)desc_info->desc->cookie;
}
static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
void *ptr)
{
- desc_info->desc->cookie = (long) ptr;
+ desc_info->desc->cookie = (uintptr_t) ptr;
}
static struct rocker_desc_info *
@@ -3026,11 +3026,17 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
container_of(work, struct rocker_fdb_learn_work, work);
bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
+ struct netdev_switch_notifier_fdb_info info;
+
+ info.addr = lw->addr;
+ info.vid = lw->vid;
if (learned && removing)
- br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid);
+ call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
+ lw->dev, &info.info);
else if (learned && !removing)
- br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid);
+ call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
+ lw->dev, &info.info);
kfree(work);
}
@@ -3565,6 +3571,8 @@ nest_cancel:
rocker_tlv_nest_cancel(desc_info, frags);
out:
dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+
return NETDEV_TX_OK;
}
@@ -3668,7 +3676,8 @@ static int rocker_fdb_fill_info(struct sk_buff *skb,
if (vid && nla_put_u16(skb, NDA_VLAN, vid))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -3713,7 +3722,7 @@ skip:
}
static int rocker_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh, u16 flags)
{
struct rocker_port *rocker_port = netdev_priv(dev);
struct nlattr *protinfo;
@@ -3824,11 +3833,145 @@ static void rocker_port_get_drvinfo(struct net_device *dev,
strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
+static struct rocker_port_stats {
+ char str[ETH_GSTRING_LEN];
+ int type;
+} rocker_port_stats[] = {
+ { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
+ { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
+ { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
+ { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
+
+ { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
+ { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
+ { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
+ { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
+};
+
+#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
+
+static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
+ memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int
+rocker_cmd_get_port_stats_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *cmd_stats;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
+ return -EMSGSIZE;
+
+ cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_stats)
+ return -EMSGSIZE;
+
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+
+ rocker_tlv_nest_end(desc_info, cmd_stats);
+
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+ struct rocker_tlv *pattr;
+ u32 lport;
+ u64 *data = priv;
+ int i;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+
+ if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT])
+ return -EIO;
+
+ lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]);
+ if (lport != rocker_port->lport)
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
+ pattr = stats_attrs[rocker_port_stats[i].type];
+ if (!pattr)
+ continue;
+
+ data[i] = rocker_tlv_get_u64(pattr);
+ }
+
+ return 0;
+}
+
+static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
+ void *priv)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_get_port_stats_prep, NULL,
+ rocker_cmd_get_port_stats_ethtool_proc,
+ priv, false);
+}
+
+static void rocker_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
+ data[i] = 0;
+ }
+
+ return;
+}
+
+static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ROCKER_PORT_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ethtool_ops rocker_port_ethtool_ops = {
.get_settings = rocker_port_get_settings,
.set_settings = rocker_port_set_settings,
.get_drvinfo = rocker_port_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_strings = rocker_port_get_strings,
+ .get_ethtool_stats = rocker_port_get_stats,
+ .get_sset_count = rocker_port_get_sset_count,
};
/*****************
@@ -3850,12 +3993,22 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
/* Cleanup tx descriptors */
while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
+ struct sk_buff *skb;
+
err = rocker_desc_err(desc_info);
if (err && net_ratelimit())
netdev_err(rocker_port->dev, "tx desc received with err %d\n",
err);
rocker_tx_desc_frags_unmap(rocker_port, desc_info);
- dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info));
+
+ skb = rocker_desc_cookie_ptr_get(desc_info);
+ if (err == 0) {
+ rocker_port->dev->stats.tx_packets++;
+ rocker_port->dev->stats.tx_bytes += skb->len;
+ } else
+ rocker_port->dev->stats.tx_errors++;
+
+ dev_kfree_skb_any(skb);
credits++;
}
@@ -3888,6 +4041,10 @@ static int rocker_port_rx_proc(struct rocker *rocker,
rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
skb_put(skb, rx_len);
skb->protocol = eth_type_trans(skb, rocker_port->dev);
+
+ rocker_port->dev->stats.rx_packets++;
+ rocker_port->dev->stats.rx_bytes += skb->len;
+
netif_receive_skb(skb);
return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
@@ -3921,6 +4078,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
err);
}
+ if (err)
+ rocker_port->dev->stats.rx_errors++;
+
rocker_desc_gen_clear(desc_info);
rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
credits++;
@@ -4004,7 +4164,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_SWITCH_OFFLOAD;
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 8d2865ba634c..a5bc432feada 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -127,6 +127,9 @@ enum {
ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
+ ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_STATS,
+
__ROCKER_TLV_CMD_TYPE_MAX,
ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
};
@@ -146,6 +149,24 @@ enum {
__ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
};
+enum {
+ ROCKER_TLV_CMD_PORT_STATS_UNSPEC,
+ ROCKER_TLV_CMD_PORT_STATS_LPORT, /* u32 */
+
+ ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */
+
+ ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */
+
+ __ROCKER_TLV_CMD_PORT_STATS_MAX,
+ ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1,
+};
+
enum rocker_port_mode {
ROCKER_PORT_MODE_OF_DPA,
};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 698494481d18..c8a01ee4d25e 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -133,9 +133,8 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
return false;
priv->eee_active = 1;
- init_timer(&priv->eee_ctrl_timer);
- priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
- priv->eee_ctrl_timer.data = (unsigned long)priv;
+ setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer,
+ (unsigned long)priv);
priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
add_timer(&priv->eee_ctrl_timer);
@@ -365,6 +364,26 @@ static int sxgbe_init_rx_buffers(struct net_device *dev,
return 0;
}
+
+/**
+ * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
+ * @dev: net device structure
+ * @rx_ring: ring to be freed
+ * @rx_rsize: ring size
+ * Description: this function initializes the DMA RX descriptor
+ */
+static void sxgbe_free_rx_buffers(struct net_device *dev,
+ struct sxgbe_rx_norm_desc *p, int i,
+ unsigned int dma_buf_sz,
+ struct sxgbe_rx_queue *rx_ring)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ kfree_skb(rx_ring->rx_skbuff[i]);
+ dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
+ dma_buf_sz, DMA_FROM_DEVICE);
+}
+
/**
* init_tx_ring - init the TX descriptor ring
* @dev: net device structure
@@ -457,7 +476,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
/* RX ring is not allcoated */
if (rx_ring == NULL) {
netdev_err(dev, "No memory for RX queue\n");
- goto error;
+ return -ENOMEM;
}
/* assign queue number */
@@ -469,18 +488,22 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
&rx_ring->dma_rx_phy, GFP_KERNEL);
if (rx_ring->dma_rx == NULL)
- goto error;
+ return -ENOMEM;
/* allocate memory for RX skbuff array */
rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
sizeof(dma_addr_t), GFP_KERNEL);
- if (rx_ring->rx_skbuff_dma == NULL)
- goto dmamem_err;
+ if (!rx_ring->rx_skbuff_dma) {
+ ret = -ENOMEM;
+ goto err_free_dma_rx;
+ }
rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
sizeof(struct sk_buff *), GFP_KERNEL);
- if (rx_ring->rx_skbuff == NULL)
- goto rxbuff_err;
+ if (!rx_ring->rx_skbuff) {
+ ret = -ENOMEM;
+ goto err_free_skbuff_dma;
+ }
/* initialise the buffers */
for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
@@ -489,7 +512,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
ret = sxgbe_init_rx_buffers(dev, p, desc_index,
bfsize, rx_ring);
if (ret)
- goto err_init_rx_buffers;
+ goto err_free_rx_buffers;
}
/* initalise counters */
@@ -499,18 +522,22 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
return 0;
-err_init_rx_buffers:
- while (--desc_index >= 0)
- free_rx_ring(priv->device, rx_ring, desc_index);
+err_free_rx_buffers:
+ while (--desc_index >= 0) {
+ struct sxgbe_rx_norm_desc *p;
+
+ p = rx_ring->dma_rx + desc_index;
+ sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
+ }
kfree(rx_ring->rx_skbuff);
-rxbuff_err:
+err_free_skbuff_dma:
kfree(rx_ring->rx_skbuff_dma);
-dmamem_err:
+err_free_dma_rx:
dma_free_coherent(priv->device,
rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
rx_ring->dma_rx, rx_ring->dma_rx_phy);
-error:
- return -ENOMEM;
+
+ return ret;
}
/**
* free_tx_ring - free the TX descriptor ring
@@ -1009,10 +1036,9 @@ static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
struct sxgbe_tx_queue *p = priv->txq[queue_num];
p->tx_coal_frames = SXGBE_TX_FRAMES;
p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
- init_timer(&p->txtimer);
+ setup_timer(&p->txtimer, sxgbe_tx_timer,
+ (unsigned long)&priv->txq[queue_num]);
p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
- p->txtimer.data = (unsigned long)&priv->txq[queue_num];
- p->txtimer.function = sxgbe_tx_timer;
add_timer(&p->txtimer);
}
}
@@ -1274,7 +1300,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
ctxt_desc_req = 1;
- if (unlikely(vlan_tx_tag_present(skb) ||
+ if (unlikely(skb_vlan_tag_present(skb) ||
((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
tqueue->hwts_tx_en)))
ctxt_desc_req = 1;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 866560ea9e18..b02eed12bfc5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
}
- /* Get MAC address if available (DT) */
- if (mac)
- ether_addr_copy(priv->dev->dev_addr, mac);
-
priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed\n", __func__);
@@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
goto err_drv_remove;
}
+ /* Get MAC address if available (DT) */
+ if (mac)
+ ether_addr_copy(priv->dev->dev_addr, mac);
+
/* Get the TX/RX IRQ numbers */
for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 9468e64e6007..3e97a8b43147 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -5,8 +5,9 @@
config NET_VENDOR_SMSC
bool "SMC (SMSC)/Western Digital devices"
default y
- depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
- BLACKFIN || MN10300 || COLDFIRE || XTENSA || NIOS2 || PCI || PCMCIA
+ depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
+ ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \
+ PCMCIA || SUPERH || XTENSA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -38,8 +39,9 @@ config SMC91X
tristate "SMC 91C9x/91C1xxx support"
select CRC32
select MII
- depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
- MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) && (!OF || GPIOLIB)
+ depends on !OF || GPIOLIB
+ depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
+ M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 2a38dacbbd27..be67baf5f677 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -216,6 +216,27 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#include <unit/smc91111.h>
+#elif defined(CONFIG_ATARI)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define RPC_LSA_DEFAULT RPC_LED_100_10
+#define RPC_LSB_DEFAULT RPC_LED_TX_RX
+
#elif defined(CONFIG_ARCH_MSM)
#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ac4d5629d905..73c2715a27f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
- dwmac-sti.o dwmac-socfpga.o
+ dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
new file mode 100644
index 000000000000..6249a4ec08f0
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -0,0 +1,437 @@
+/**
+ * dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
+ *
+ * Copyright (C) 2014 Chen-Zhi (Roger Chen)
+ *
+ * Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/stmmac.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+struct rk_priv_data {
+ struct platform_device *pdev;
+ int phy_iface;
+ struct regulator *regulator;
+
+ bool clk_enabled;
+ bool clock_input;
+
+ struct clk *clk_mac;
+ struct clk *clk_mac_pll;
+ struct clk *gmac_clkin;
+ struct clk *mac_clk_rx;
+ struct clk *mac_clk_tx;
+ struct clk *clk_mac_ref;
+ struct clk *clk_mac_refout;
+ struct clk *aclk_mac;
+ struct clk *pclk_mac;
+
+ int tx_delay;
+ int rx_delay;
+
+ struct regmap *grf;
+};
+
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
+#define GRF_CLR_BIT(nr) (BIT(nr+16))
+
+#define RK3288_GRF_SOC_CON1 0x0248
+#define RK3288_GRF_SOC_CON3 0x0250
+#define RK3288_GRF_GPIO3D_E 0x01ec
+#define RK3288_GRF_GPIO4A_E 0x01f0
+#define RK3288_GRF_GPIO4B_E 0x01f4
+
+/*RK3288_GRF_SOC_CON1*/
+#define GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
+#define GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define GMAC_FLOW_CTRL GRF_BIT(9)
+#define GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
+#define GMAC_SPEED_10M GRF_CLR_BIT(10)
+#define GMAC_SPEED_100M GRF_BIT(10)
+#define GMAC_RMII_CLK_25M GRF_BIT(11)
+#define GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
+#define GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
+#define GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define GMAC_RMII_MODE GRF_BIT(14)
+#define GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
+
+/*RK3288_GRF_SOC_CON3*/
+#define GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+#define GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ GMAC_PHY_INTF_SEL_RGMII | GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
+ GMAC_RXCLK_DLY_ENABLE | GMAC_TXCLK_DLY_ENABLE |
+ GMAC_CLK_RX_DL_CFG(rx_delay) |
+ GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ GMAC_PHY_INTF_SEL_RMII | GMAC_RMII_MODE);
+}
+
+static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ GMAC_RMII_CLK_2_5M | GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ GMAC_RMII_CLK_25M | GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static int gmac_clk_init(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ bsp_priv->clk_enabled = false;
+
+ bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
+ if (IS_ERR(bsp_priv->mac_clk_rx))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "mac_clk_rx");
+
+ bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
+ if (IS_ERR(bsp_priv->mac_clk_tx))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "mac_clk_tx");
+
+ bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
+ if (IS_ERR(bsp_priv->aclk_mac))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "aclk_mac");
+
+ bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
+ if (IS_ERR(bsp_priv->pclk_mac))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "pclk_mac");
+
+ bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(bsp_priv->clk_mac))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "stmmaceth");
+
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+ bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
+ if (IS_ERR(bsp_priv->clk_mac_ref))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "clk_mac_ref");
+
+ if (!bsp_priv->clock_input) {
+ bsp_priv->clk_mac_refout =
+ devm_clk_get(dev, "clk_mac_refout");
+ if (IS_ERR(bsp_priv->clk_mac_refout))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "clk_mac_refout");
+ }
+ }
+
+ if (bsp_priv->clock_input) {
+ dev_info(dev, "%s: clock input from PHY\n", __func__);
+ } else {
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
+ }
+
+ return 0;
+}
+
+static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
+{
+ int phy_iface = phy_iface = bsp_priv->phy_iface;
+
+ if (enable) {
+ if (!bsp_priv->clk_enabled) {
+ if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ if (!IS_ERR(bsp_priv->mac_clk_rx))
+ clk_prepare_enable(
+ bsp_priv->mac_clk_rx);
+
+ if (!IS_ERR(bsp_priv->clk_mac_ref))
+ clk_prepare_enable(
+ bsp_priv->clk_mac_ref);
+
+ if (!IS_ERR(bsp_priv->clk_mac_refout))
+ clk_prepare_enable(
+ bsp_priv->clk_mac_refout);
+ }
+
+ if (!IS_ERR(bsp_priv->aclk_mac))
+ clk_prepare_enable(bsp_priv->aclk_mac);
+
+ if (!IS_ERR(bsp_priv->pclk_mac))
+ clk_prepare_enable(bsp_priv->pclk_mac);
+
+ if (!IS_ERR(bsp_priv->mac_clk_tx))
+ clk_prepare_enable(bsp_priv->mac_clk_tx);
+
+ /**
+ * if (!IS_ERR(bsp_priv->clk_mac))
+ * clk_prepare_enable(bsp_priv->clk_mac);
+ */
+ mdelay(5);
+ bsp_priv->clk_enabled = true;
+ }
+ } else {
+ if (bsp_priv->clk_enabled) {
+ if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ if (!IS_ERR(bsp_priv->mac_clk_rx))
+ clk_disable_unprepare(
+ bsp_priv->mac_clk_rx);
+
+ if (!IS_ERR(bsp_priv->clk_mac_ref))
+ clk_disable_unprepare(
+ bsp_priv->clk_mac_ref);
+
+ if (!IS_ERR(bsp_priv->clk_mac_refout))
+ clk_disable_unprepare(
+ bsp_priv->clk_mac_refout);
+ }
+
+ if (!IS_ERR(bsp_priv->aclk_mac))
+ clk_disable_unprepare(bsp_priv->aclk_mac);
+
+ if (!IS_ERR(bsp_priv->pclk_mac))
+ clk_disable_unprepare(bsp_priv->pclk_mac);
+
+ if (!IS_ERR(bsp_priv->mac_clk_tx))
+ clk_disable_unprepare(bsp_priv->mac_clk_tx);
+ /**
+ * if (!IS_ERR(bsp_priv->clk_mac))
+ * clk_disable_unprepare(bsp_priv->clk_mac);
+ */
+ bsp_priv->clk_enabled = false;
+ }
+ }
+
+ return 0;
+}
+
+static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+{
+ struct regulator *ldo = bsp_priv->regulator;
+ int ret;
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (!ldo) {
+ dev_err(dev, "%s: no regulator found\n", __func__);
+ return -1;
+ }
+
+ if (enable) {
+ ret = regulator_enable(ldo);
+ if (ret)
+ dev_err(dev, "%s: fail to enable phy-supply\n",
+ __func__);
+ } else {
+ ret = regulator_disable(ldo);
+ if (ret)
+ dev_err(dev, "%s: fail to disable phy-supply\n",
+ __func__);
+ }
+
+ return 0;
+}
+
+static void *rk_gmac_setup(struct platform_device *pdev)
+{
+ struct rk_priv_data *bsp_priv;
+ struct device *dev = &pdev->dev;
+ int ret;
+ const char *strings = NULL;
+ int value;
+
+ bsp_priv = devm_kzalloc(dev, sizeof(*bsp_priv), GFP_KERNEL);
+ if (!bsp_priv)
+ return ERR_PTR(-ENOMEM);
+
+ bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+
+ bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(bsp_priv->regulator)) {
+ if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
+ dev_err(dev, "phy regulator is not available yet, deferred probing\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ dev_err(dev, "no regulator found\n");
+ bsp_priv->regulator = NULL;
+ }
+
+ ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
+ if (ret) {
+ dev_err(dev, "%s: Can not read property: clock_in_out.\n",
+ __func__);
+ bsp_priv->clock_input = true;
+ } else {
+ dev_info(dev, "%s: clock input or output? (%s).\n",
+ __func__, strings);
+ if (!strcmp(strings, "input"))
+ bsp_priv->clock_input = true;
+ else
+ bsp_priv->clock_input = false;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
+ if (ret) {
+ bsp_priv->tx_delay = 0x30;
+ dev_err(dev, "%s: Can not read property: tx_delay.", __func__);
+ dev_err(dev, "%s: set tx_delay to 0x%x\n",
+ __func__, bsp_priv->tx_delay);
+ } else {
+ dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value);
+ bsp_priv->tx_delay = value;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
+ if (ret) {
+ bsp_priv->rx_delay = 0x10;
+ dev_err(dev, "%s: Can not read property: rx_delay.", __func__);
+ dev_err(dev, "%s: set rx_delay to 0x%x\n",
+ __func__, bsp_priv->rx_delay);
+ } else {
+ dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value);
+ bsp_priv->rx_delay = value;
+ }
+
+ bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ bsp_priv->pdev = pdev;
+
+ /*rmii or rgmii*/
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
+ dev_info(dev, "%s: init for RGMII\n", __func__);
+ set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay);
+ } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+ dev_info(dev, "%s: init for RMII\n", __func__);
+ set_to_rmii(bsp_priv);
+ } else {
+ dev_err(dev, "%s: NO interface defined!\n", __func__);
+ }
+
+ gmac_clk_init(bsp_priv);
+
+ return bsp_priv;
+}
+
+static int rk_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+ int ret;
+
+ ret = phy_power_on(bsp_priv, true);
+ if (ret)
+ return ret;
+
+ ret = gmac_clk_enable(bsp_priv, true);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *gmac = priv;
+
+ phy_power_on(gmac, false);
+ gmac_clk_enable(gmac, false);
+}
+
+static void rk_fix_speed(void *priv, unsigned int speed)
+{
+ struct rk_priv_data *bsp_priv = priv;
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
+ set_rgmii_speed(bsp_priv, speed);
+ else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ set_rmii_speed(bsp_priv, speed);
+ else
+ dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
+}
+
+const struct stmmac_of_data rk3288_gmac_data = {
+ .has_gmac = 1,
+ .fix_mac_speed = rk_fix_speed,
+ .setup = rk_gmac_setup,
+ .init = rk_gmac_init,
+ .exit = rk_gmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 056b358b4a72..bb6e2dc61bec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -122,7 +122,7 @@ struct sti_dwmac {
bool ext_phyclk; /* Clock from external PHY */
u32 tx_retime_src; /* TXCLK Retiming*/
struct clk *clk; /* PHY clock */
- int ctrl_reg; /* GMAC glue-logic control register */
+ u32 ctrl_reg; /* GMAC glue-logic control register */
int clk_sel_reg; /* GMAC ext clk selection register */
struct device *dev;
struct regmap *regmap;
@@ -285,11 +285,6 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (!np)
return -EINVAL;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
- if (!res)
- return -ENODATA;
- dwmac->ctrl_reg = res->start;
-
/* clk selection from extra syscfg register */
dwmac->clk_sel_reg = -ENXIO;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
@@ -300,6 +295,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
+ if (err) {
+ dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
+ return err;
+ }
+
dwmac->dev = dev;
dwmac->interface = of_get_phy_mode(np);
dwmac->regmap = regmap;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8c6b7c1651e5..55e89b3838f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1097,6 +1097,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
priv->dirty_tx = 0;
priv->cur_tx = 0;
+ netdev_reset_queue(priv->dev);
stmmac_clear_descriptors(priv);
@@ -1287,7 +1288,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* that needs to not insert csum in the TDES.
*/
priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
- tc = SF_DMA_MODE;
+ priv->xstats.threshold = SF_DMA_MODE;
} else
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
}
@@ -1300,6 +1301,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
static void stmmac_tx_clean(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
spin_lock(&priv->tx_lock);
@@ -1356,6 +1358,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
+ pkts_compl++;
+ bytes_compl += skb->len;
dev_consume_skb_any(skb);
priv->tx_skbuff[entry] = NULL;
}
@@ -1364,6 +1368,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->dirty_tx++;
}
+
+ netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
+
if (unlikely(netif_queue_stopped(priv->dev) &&
stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
netif_tx_lock(priv->dev);
@@ -1418,6 +1425,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
(i == txsize - 1));
priv->dirty_tx = 0;
priv->cur_tx = 0;
+ netdev_reset_queue(priv->dev);
priv->hw->dma->start_tx(priv->ioaddr);
priv->dev->stats.tx_errors++;
@@ -1444,9 +1452,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
}
if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
+ (tc <= 256)) {
tc += 64;
- priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
+ if (priv->plat->force_thresh_dma_mode)
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
+ else
+ priv->hw->dma->dma_mode(priv->ioaddr, tc,
+ SF_DMA_MODE);
priv->xstats.threshold = tc;
}
} else if (unlikely(status == tx_hard_error))
@@ -2050,6 +2063,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (!priv->hwts_tx_en)
skb_tx_timestamp(skb);
+ netdev_sent_queue(dev, skb->len);
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
spin_unlock(&priv->tx_lock);
@@ -2742,7 +2756,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
- priv->plat->tx_coe = priv->dma_cap.tx_coe;
+ /* TXCOE doesn't work in thresh DMA mode */
+ if (priv->plat->force_thresh_dma_mode)
+ priv->plat->tx_coe = 0;
+ else
+ priv->plat->tx_coe = priv->dma_cap.tx_coe;
if (priv->dma_cap.rx_coe_type2)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
@@ -2778,6 +2796,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* @addr: iobase memory address
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * on success the new private structure is returned, otherwise the error
+ * pointer.
*/
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
@@ -2789,7 +2810,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
SET_NETDEV_DEV(ndev, device);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 054520d67de4..3bca908716e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -24,8 +24,50 @@
*******************************************************************************/
#include <linux/pci.h>
+#include <linux/dmi.h>
+
#include "stmmac.h"
+/*
+ * This struct is used to associate PCI Function of MAC controller on a board,
+ * discovered via DMI, with the address of PHY connected to the MAC. The
+ * negative value of the address means that MAC controller is not connected
+ * with PHY.
+ */
+struct stmmac_pci_dmi_data {
+ const char *name;
+ unsigned int func;
+ int phy_addr;
+};
+
+struct stmmac_pci_info {
+ struct pci_dev *pdev;
+ int (*setup)(struct plat_stmmacenet_data *plat,
+ struct stmmac_pci_info *info);
+ struct stmmac_pci_dmi_data *dmi;
+};
+
+static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
+{
+ const char *name = dmi_get_system_info(DMI_BOARD_NAME);
+ unsigned int func = PCI_FUNC(info->pdev->devfn);
+ struct stmmac_pci_dmi_data *dmi;
+
+ /*
+ * Galileo boards with old firmware don't support DMI. We always return
+ * 1 here, so at least first found MAC controller would be probed.
+ */
+ if (!name)
+ return 1;
+
+ for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
+ if (!strcmp(dmi->name, name) && dmi->func == func)
+ return dmi->phy_addr;
+ }
+
+ return -ENODEV;
+}
+
static void stmmac_default_data(struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
@@ -48,6 +90,62 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
plat->unicast_filter_entries = 1;
}
+static int quark_default_data(struct plat_stmmacenet_data *plat,
+ struct stmmac_pci_info *info)
+{
+ struct pci_dev *pdev = info->pdev;
+ int ret;
+
+ /*
+ * Refuse to load the driver and register net device if MAC controller
+ * does not connect to any PHY interface.
+ */
+ ret = stmmac_pci_find_phy_addr(info);
+ if (ret < 0)
+ return ret;
+
+ plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ plat->phy_addr = ret;
+ plat->interface = PHY_INTERFACE_MODE_RMII;
+ plat->clk_csr = 2;
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
+
+ plat->mdio_bus_data->phy_reset = NULL;
+ plat->mdio_bus_data->phy_mask = 0;
+
+ plat->dma_cfg->pbl = 16;
+ plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
+ plat->dma_cfg->fixed_burst = 1;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ return 0;
+}
+
+static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
+ {
+ .name = "Galileo",
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .name = "GalileoGen2",
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {}
+};
+
+static struct stmmac_pci_info quark_pci_info = {
+ .setup = quark_default_data,
+ .dmi = quark_pci_dmi_data,
+};
+
/**
* stmmac_pci_probe
*
@@ -63,6 +161,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
static int stmmac_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
struct stmmac_priv *priv;
int i;
@@ -103,7 +202,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- stmmac_default_data(plat);
+ if (info) {
+ info->pdev = pdev;
+ if (info->setup) {
+ ret = info->setup(plat, info);
+ if (ret)
+ return ret;
+ }
+ } else
+ stmmac_default_data(plat);
+
+ pci_enable_msi(pdev);
priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
if (IS_ERR(priv)) {
@@ -155,11 +264,13 @@ static int stmmac_pci_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_QUARK_ID 0x0937
#define STMMAC_DEVICE_ID 0x1108
static const struct pci_device_id stmmac_id_table[] = {
{PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
+ {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info},
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3039de2465ba..fb846ebba1d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,7 @@
static const struct of_device_id stmmac_dt_ids[] = {
/* SoC specific glue layers should come before generic bindings */
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
{ .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
@@ -234,6 +235,9 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
of_property_read_bool(np, "snps,fixed-burst");
dma_cfg->mixed_burst =
of_property_read_bool(np, "snps,mixed-burst");
+ of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len);
+ if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256)
+ dma_cfg->burst_len = 0;
}
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
if (plat->force_thresh_dma_mode) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 25dd1f7ace02..093eb99e5ffd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -24,5 +24,6 @@ extern const struct stmmac_of_data sun7i_gmac_data;
extern const struct stmmac_of_data stih4xx_dwmac_data;
extern const struct stmmac_of_data stid127_dwmac_data;
extern const struct stmmac_of_data socfpga_gmac_data;
+extern const struct stmmac_of_data rk3288_gmac_data;
#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c6416213837..4b51f903fb73 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3341,8 +3341,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
niu_hash_page(rp, page, addr);
if (rp->rbr_blocks_per_page > 1)
- atomic_add(rp->rbr_blocks_per_page - 1,
- &compound_head(page)->_count);
+ atomic_add(rp->rbr_blocks_per_page - 1, &page->_count);
for (i = 0; i < rp->rbr_blocks_per_page; i++) {
__le32 *rbr = &rp->rbr[start_index + i];
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index d2835bf7b4fb..2b10b85d8a08 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -50,6 +50,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define VNET_MAX_RETRIES 10
static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
+static void vnet_port_reset(struct vnet_port *port);
/* Ordered from largest major to lowest */
static struct vio_version vnet_versions[] = {
@@ -351,10 +352,15 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
unsigned int len = desc->size;
unsigned int copy_len;
struct sk_buff *skb;
+ int maxlen;
int err;
err = -EMSGSIZE;
- if (unlikely(len < ETH_ZLEN || len > port->rmtu)) {
+ if (port->tso && port->tsolen > port->rmtu)
+ maxlen = port->tsolen;
+ else
+ maxlen = port->rmtu;
+ if (unlikely(len < ETH_ZLEN || len > maxlen)) {
dev->stats.rx_length_errors++;
goto out_dropped;
}
@@ -731,9 +737,7 @@ ldc_ctrl:
vio_link_state_change(vio, event);
if (event == LDC_EVENT_RESET) {
- port->rmtu = 0;
- port->tso = true;
- port->tsolen = 0;
+ vnet_port_reset(port);
vio_port_up(vio);
}
port->rx_event = 0;
@@ -929,36 +933,36 @@ static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
*pending = 0;
- txi = dr->prod-1;
- if (txi < 0)
- txi = VNET_TX_RING_SIZE-1;
-
+ txi = dr->prod;
for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
struct vio_net_desc *d;
- d = vio_dring_entry(dr, txi);
-
- if (d->hdr.state == VIO_DESC_DONE) {
- if (port->tx_bufs[txi].skb) {
- BUG_ON(port->tx_bufs[txi].skb->next);
+ --txi;
+ if (txi < 0)
+ txi = VNET_TX_RING_SIZE-1;
- port->tx_bufs[txi].skb->next = skb;
- skb = port->tx_bufs[txi].skb;
- port->tx_bufs[txi].skb = NULL;
+ d = vio_dring_entry(dr, txi);
- ldc_unmap(port->vio.lp,
- port->tx_bufs[txi].cookies,
- port->tx_bufs[txi].ncookies);
- }
- d->hdr.state = VIO_DESC_FREE;
- } else if (d->hdr.state == VIO_DESC_READY) {
+ if (d->hdr.state == VIO_DESC_READY) {
(*pending)++;
- } else if (d->hdr.state == VIO_DESC_FREE) {
- break;
+ continue;
}
- --txi;
- if (txi < 0)
- txi = VNET_TX_RING_SIZE-1;
+ if (port->tx_bufs[txi].skb) {
+ if (d->hdr.state != VIO_DESC_DONE)
+ pr_notice("invalid ring buffer state %d\n",
+ d->hdr.state);
+ BUG_ON(port->tx_bufs[txi].skb->next);
+
+ port->tx_bufs[txi].skb->next = skb;
+ skb = port->tx_bufs[txi].skb;
+ port->tx_bufs[txi].skb = NULL;
+
+ ldc_unmap(port->vio.lp,
+ port->tx_bufs[txi].cookies,
+ port->tx_bufs[txi].ncookies);
+ } else if (d->hdr.state == VIO_DESC_FREE)
+ break;
+ d->hdr.state = VIO_DESC_FREE;
}
return skb;
}
@@ -1119,6 +1123,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
}
+ nskb->queue_mapping = skb->queue_mapping;
dev_kfree_skb(skb);
skb = nskb;
}
@@ -1632,16 +1637,9 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
int i;
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (dr->base) {
- ldc_free_exp_dring(port->vio.lp, dr->base,
- (dr->entry_size * dr->num_entries),
- dr->cookies, dr->ncookies);
- dr->base = NULL;
- dr->entry_size = 0;
- dr->num_entries = 0;
- dr->pending = 0;
- dr->ncookies = 0;
- }
+
+ if (dr->base == NULL)
+ return;
for (i = 0; i < VNET_TX_RING_SIZE; i++) {
struct vio_net_desc *d;
@@ -1651,8 +1649,6 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
continue;
d = vio_dring_entry(dr, i);
- if (d->hdr.state == VIO_DESC_READY)
- pr_warn("active transmit buffers freed\n");
ldc_unmap(port->vio.lp,
port->tx_bufs[i].cookies,
@@ -1661,6 +1657,23 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
port->tx_bufs[i].skb = NULL;
d->hdr.state = VIO_DESC_FREE;
}
+ ldc_free_exp_dring(port->vio.lp, dr->base,
+ (dr->entry_size * dr->num_entries),
+ dr->cookies, dr->ncookies);
+ dr->base = NULL;
+ dr->entry_size = 0;
+ dr->num_entries = 0;
+ dr->pending = 0;
+ dr->ncookies = 0;
+}
+
+static void vnet_port_reset(struct vnet_port *port)
+{
+ del_timer(&port->clean_timer);
+ vnet_port_free_tx_bufs(port);
+ port->rmtu = 0;
+ port->tso = true;
+ port->tsolen = 0;
}
static int vnet_port_alloc_tx_ring(struct vnet_port *port)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6ab36d9ff2ab..a9cac8413e49 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1650,9 +1650,9 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
txd_mss);
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
/*Cut VLAN ID to 12 bits */
- txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
+ txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
txd_vtag = 1;
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 605dd909bcc3..3bc992cd70b7 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -56,12 +56,18 @@ config TI_CPSW_PHY_SEL
This driver supports configuring of the phy mode connected to
the CPSW.
+config TI_CPSW_ALE
+ tristate "TI CPSW ALE Support"
+ ---help---
+ This driver supports TI's CPSW ALE module.
+
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
select TI_CPSW_PHY_SEL
+ select TI_CPSW_ALE
select MFD_SYSCON
select REGMAP
---help---
@@ -79,6 +85,25 @@ config TI_CPTS
the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
and Layer 2 packets, and the driver offers a PTP Hardware Clock.
+config TI_KEYSTONE_NETCP
+ tristate "TI Keystone NETCP Core Support"
+ select TI_CPSW_ALE
+ depends on OF
+ depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
+ ---help---
+ This driver supports TI's Keystone NETCP Core.
+
+ To compile this driver as a module, choose M here: the module
+ will be called keystone_netcp.
+
+config TI_KEYSTONE_NETCP_ETHSS
+ depends on TI_KEYSTONE_NETCP
+ tristate "TI Keystone NETCP Ethernet subsystem Support"
+ ---help---
+
+ To compile this driver as a module, choose M here: the module
+ will be called keystone_netcp_ethss.
+
config TLAN
tristate "TI ThunderLAN support"
depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 9cfaab8152be..d420d9413e4a 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -2,11 +2,20 @@
# Makefile for the TI network device drivers.
#
+obj-$(CONFIG_TI_CPSW) += cpsw-common.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
+
obj-$(CONFIG_TLAN) += tlan.o
obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
+obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
+ti_cpsw-y := cpsw.o cpts.o
+
+obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
+keystone_netcp-y := netcp_core.o
+obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
+keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
new file mode 100644
index 000000000000..f59509486113
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "cpsw.h"
+
+#define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
+#define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
+
+int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+ u8 *mac_addr)
+{
+ u32 macid_lo;
+ u32 macid_hi;
+ struct regmap *syscon;
+
+ syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(offset, slave),
+ &macid_lo);
+ regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(offset, slave),
+ &macid_hi);
+
+ mac_addr[5] = (macid_lo >> 8) & 0xff;
+ mac_addr[4] = macid_lo & 0xff;
+ mac_addr[3] = (macid_hi >> 24) & 0xff;
+ mac_addr[2] = (macid_hi >> 16) & 0xff;
+ mac_addr[1] = (macid_hi >> 8) & 0xff;
+ mac_addr[0] = macid_hi & 0xff;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_am33xx_cm_get_macid);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e61ee8351272..7d8dd0d2182e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -33,8 +33,6 @@
#include <linux/of_net.h>
#include <linux/of_device.h>
#include <linux/if_vlan.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
#include <linux/pinctrl/consumer.h>
@@ -610,7 +608,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
- priv->host_port);
+ priv->host_port, -1);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +632,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ int vid;
+
+ if (priv->data.dual_emac)
+ vid = priv->slaves[priv->emac_port].port_vlan;
+ else
+ vid = priv->data.default_vlan;
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
@@ -649,7 +653,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
+ vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -754,17 +759,25 @@ requeue:
dev_kfree_skb_any(new_skb);
}
-static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
+static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
- int value = irq - priv->irqs_table[0];
- /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
- * is to make sure we will always write the correct value to the EOI
- * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
- * for TX Interrupt and 3 for MISC Interrupt.
- */
- cpdma_ctlr_eoi(priv->dma, value);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ cpdma_chan_process(priv->txch, 128);
+
+ priv = cpsw_get_slave_priv(priv, 1);
+ if (priv)
+ cpdma_chan_process(priv->txch, 128);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_priv *priv = dev_id;
+
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
@@ -1617,7 +1630,8 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_intr_disable(priv);
cpdma_ctlr_int_ctrl(priv->dma, false);
- cpsw_interrupt(ndev->irq, priv);
+ cpsw_rx_interrupt(priv->irqs_table[0], priv);
+ cpsw_tx_interrupt(priv->irqs_table[1], priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
@@ -1627,16 +1641,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unsigned short vid)
{
int ret;
- int unreg_mcast_mask;
+ int unreg_mcast_mask = 0;
+ u32 port_mask;
- if (priv->ndev->flags & IFF_ALLMULTI)
- unreg_mcast_mask = ALE_ALL_PORTS;
- else
- unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ if (priv->data.dual_emac) {
+ port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = ALE_ALL_PORTS;
+
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = ALE_ALL_PORTS;
+ else
+ unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ }
- ret = cpsw_ale_add_vlan(priv->ale, vid,
- ALE_ALL_PORTS << priv->host_port,
- 0, ALE_ALL_PORTS << priv->host_port,
+ ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
unreg_mcast_mask << priv->host_port);
if (ret != 0)
return ret;
@@ -1647,8 +1669,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
goto clean_vid;
ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- ALE_ALL_PORTS << priv->host_port,
- ALE_VLAN, vid, 0);
+ port_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
@@ -1669,6 +1690,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ if (priv->data.dual_emac) {
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (vid == priv->slaves[i].port_vlan)
+ return -EINVAL;
+ }
+ }
+
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
return cpsw_add_vlan_ale_entry(priv, vid);
}
@@ -1682,6 +1716,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (vid == priv->slaves[i].port_vlan)
+ return -EINVAL;
+ }
+ }
+
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
if (ret != 0)
@@ -1891,36 +1934,6 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->port_vlan = data->dual_emac_res_vlan;
}
-#define AM33XX_CTRL_MAC_LO_REG(id) (0x630 + 0x8 * id)
-#define AM33XX_CTRL_MAC_HI_REG(id) (0x630 + 0x8 * id + 0x4)
-
-static int cpsw_am33xx_cm_get_macid(struct device *dev, int slave,
- u8 *mac_addr)
-{
- u32 macid_lo;
- u32 macid_hi;
- struct regmap *syscon;
-
- syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
- if (IS_ERR(syscon)) {
- if (PTR_ERR(syscon) == -ENODEV)
- return 0;
- return PTR_ERR(syscon);
- }
-
- regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(slave), &macid_lo);
- regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(slave), &macid_hi);
-
- mac_addr[5] = (macid_lo >> 8) & 0xff;
- mac_addr[4] = macid_lo & 0xff;
- mac_addr[3] = (macid_hi >> 24) & 0xff;
- mac_addr[2] = (macid_hi >> 16) & 0xff;
- mac_addr[1] = (macid_hi >> 8) & 0xff;
- mac_addr[0] = macid_hi & 0xff;
-
- return 0;
-}
-
static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct platform_device *pdev)
{
@@ -2045,7 +2058,8 @@ no_phy_slave:
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
} else {
if (of_machine_is_compatible("ti,am33xx")) {
- ret = cpsw_am33xx_cm_get_macid(&pdev->dev, i,
+ ret = cpsw_am33xx_cm_get_macid(&pdev->dev,
+ 0x630, i,
slave_data->mac_addr);
if (ret)
return ret;
@@ -2156,7 +2170,8 @@ static int cpsw_probe(struct platform_device *pdev)
void __iomem *ss_regs;
struct resource *res, *ss_res;
u32 slave_offset, sliver_offset, slave_size;
- int ret = 0, i, k = 0;
+ int ret = 0, i;
+ int irq;
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
if (!ndev) {
@@ -2338,31 +2353,47 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dma_ret;
}
- ndev->irq = platform_get_irq(pdev, 0);
+ ndev->irq = platform_get_irq(pdev, 1);
if (ndev->irq < 0) {
dev_err(priv->dev, "error getting irq resource\n");
ret = -ENOENT;
goto clean_ale_ret;
}
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
- if (k >= ARRAY_SIZE(priv->irqs_table)) {
- ret = -EINVAL;
- goto clean_ale_ret;
- }
+ /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
+ * MISC IRQs which are always kept disabled with this driver so
+ * we will not request them.
+ *
+ * If anyone wants to implement support for those, make sure to
+ * first request and append them to irqs_table array.
+ */
- ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt,
- 0, dev_name(&pdev->dev), priv);
- if (ret < 0) {
- dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_ale_ret;
- }
+ /* RX IRQ */
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ goto clean_ale_ret;
- priv->irqs_table[k] = res->start;
- k++;
+ priv->irqs_table[0] = irq;
+ ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
+ 0, dev_name(&pdev->dev), priv);
+ if (ret < 0) {
+ dev_err(priv->dev, "error attaching irq (%d)\n", ret);
+ goto clean_ale_ret;
}
- priv->num_irqs = k;
+ /* TX IRQ */
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ goto clean_ale_ret;
+
+ priv->irqs_table[1] = irq;
+ ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
+ 0, dev_name(&pdev->dev), priv);
+ if (ret < 0) {
+ dev_err(priv->dev, "error attaching irq (%d)\n", ret);
+ goto clean_ale_ret;
+ }
+ priv->num_irqs = 2;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 1b710674630c..ca90efafd156 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -41,5 +41,7 @@ struct cpsw_platform_data {
};
void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+ u8 *mac_addr);
#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 097ebe7077ac..6e927b4583aa 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -146,7 +147,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
return idx;
}
-int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
+static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -167,7 +168,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
return -ENOENT;
}
-int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+static int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -234,7 +235,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
}
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int ret, idx;
@@ -245,6 +246,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
continue;
+ /* if vid passed is -1 then remove all multicast entry from
+ * the table irrespective of vlan id, if a valid vlan id is
+ * passed then remove only multicast added to that vlan id.
+ * if vlan id doesn't match then move on to next entry.
+ */
+ if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
+
if (cpsw_ale_get_mcast(ale_entry)) {
u8 addr[6];
@@ -257,6 +266,7 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
}
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
int port_mask)
@@ -289,6 +299,7 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
}
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_flush);
static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
int flags, u16 vid)
@@ -326,6 +337,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid)
@@ -341,6 +353,7 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state)
@@ -372,6 +385,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
int flags, u16 vid)
@@ -393,6 +407,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast)
@@ -422,6 +437,7 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
{
@@ -442,6 +458,7 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
{
@@ -471,6 +488,7 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
cpsw_ale_write(ale, idx, ale_entry);
}
}
+EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti);
struct ale_control_info {
const char *name;
@@ -696,6 +714,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_control_set);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
{
@@ -719,6 +738,7 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
return tmp & BITMASK(info->bits);
}
+EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
static void cpsw_ale_timer(unsigned long arg)
{
@@ -742,6 +762,7 @@ int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
}
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
void cpsw_ale_start(struct cpsw_ale *ale)
{
@@ -761,11 +782,13 @@ void cpsw_ale_start(struct cpsw_ale *ale)
add_timer(&ale->timer);
}
}
+EXPORT_SYMBOL_GPL(cpsw_ale_start);
void cpsw_ale_stop(struct cpsw_ale *ale)
{
del_timer_sync(&ale->timer);
}
+EXPORT_SYMBOL_GPL(cpsw_ale_stop);
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
@@ -780,6 +803,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
return ale;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_create);
int cpsw_ale_destroy(struct cpsw_ale *ale)
{
@@ -789,6 +813,7 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
kfree(ale);
return 0;
}
+EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
{
@@ -799,3 +824,8 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
data += ALE_ENTRY_WORDS;
}
}
+EXPORT_SYMBOL_GPL(cpsw_ale_dump);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI CPSW ALE driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index c0d4127aa549..af1e7ecd87c6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 4a4388b813ac..fbe42cb107ec 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -157,14 +157,11 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- s64 now;
unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
spin_lock_irqsave(&cpts->lock, flags);
- now = timecounter_read(&cpts->tc);
- now += delta;
- timecounter_init(&cpts->tc, &cpts->cc, now);
+ timecounter_adjtime(&cpts->tc, delta);
spin_unlock_irqrestore(&cpts->lock, flags);
return 0;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 1a581ef7eee8..69a46b92c7d6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
+#include <linux/timecounter.h>
struct cpsw_cpts {
u32 idver; /* Identification and version */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ea712512c7d1..aeebc0a7bf47 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -52,6 +52,7 @@
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/semaphore.h>
#include <linux/phy.h>
#include <linux/bitops.h>
@@ -62,12 +63,15 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
#include <asm/irq.h>
#include <asm/page.h>
+#include "cpsw.h"
#include "davinci_cpdma.h"
static int debug_level;
@@ -343,9 +347,7 @@ struct emac_priv {
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
const char *phy_id;
-#ifdef CONFIG_OF
struct device_node *phy_node;
-#endif
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -922,6 +924,16 @@ static void emac_int_disable(struct emac_priv *priv)
if (priv->int_disable)
priv->int_disable();
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+ /* ack rxen only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+ /* ack txen- only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_TXEN);
+
local_irq_restore(flags);
} else {
@@ -951,15 +963,6 @@ static void emac_int_enable(struct emac_priv *priv)
* register */
/* NOTE: Rx Threshold and Misc interrupts are not enabled */
-
- /* ack rxen only then a new pulse will be generated */
- emac_write(EMAC_DM646X_MACEOIVECTOR,
- EMAC_DM646X_MAC_EOI_C0_RXEN);
-
- /* ack txen- only then a new pulse will be generated */
- emac_write(EMAC_DM646X_MACEOIVECTOR,
- EMAC_DM646X_MAC_EOI_C0_TXEN);
-
} else {
/* Set DM644x control registers for interrupt control */
emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
@@ -1537,7 +1540,13 @@ static int emac_dev_open(struct net_device *ndev)
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
- pm_runtime_get(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, ret);
+ return ret;
+ }
netif_carrier_off(ndev);
for (cnt = 0; cnt < ETH_ALEN; cnt++)
@@ -1596,8 +1605,20 @@ static int emac_dev_open(struct net_device *ndev)
cpdma_ctlr_start(priv->dma);
priv->phydev = NULL;
+
+ if (priv->phy_node) {
+ priv->phydev = of_phy_connect(ndev, priv->phy_node,
+ &emac_adjust_link, 0, 0);
+ if (!priv->phydev) {
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_node->full_name);
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+
/* use the first phy on the bus if pdata did not give us a phy id */
- if (!priv->phy_id) {
+ if (!priv->phydev && !priv->phy_id) {
struct device *phy;
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1606,7 +1627,7 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id = dev_name(phy);
}
- if (priv->phy_id && *priv->phy_id) {
+ if (!priv->phydev && priv->phy_id && *priv->phy_id) {
priv->phydev = phy_connect(ndev, priv->phy_id,
&emac_adjust_link,
PHY_INTERFACE_MODE_MII);
@@ -1627,7 +1648,9 @@ static int emac_dev_open(struct net_device *ndev)
"(mii_bus:phy_addr=%s, id=%x)\n",
priv->phydev->drv->name, dev_name(&priv->phydev->dev),
priv->phydev->phy_id);
- } else {
+ }
+
+ if (!priv->phydev) {
/* No PHY , fix the link, speed and duplex settings */
dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
@@ -1724,6 +1747,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
struct emac_priv *priv = netdev_priv(ndev);
u32 mac_control;
u32 stats_clear_mask;
+ int err;
+
+ err = pm_runtime_get_sync(&priv->pdev->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ return &ndev->stats;
+ }
/* update emac hardware stats and reset the registers*/
@@ -1766,6 +1798,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+ pm_runtime_put(&priv->pdev->dev);
+
return &ndev->stats;
}
@@ -1807,7 +1841,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
if (!is_valid_ether_addr(pdata->mac_addr)) {
mac_addr = of_get_mac_address(np);
if (mac_addr)
- memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(pdata->mac_addr, mac_addr);
}
of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
@@ -1848,6 +1882,53 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
return pdata;
}
+static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
+ int slave, u8 *mac_addr)
+{
+ u32 macid_lsb;
+ u32 macid_msb;
+ struct regmap *syscon;
+
+ syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ regmap_read(syscon, offset, &macid_lsb);
+ regmap_read(syscon, offset + 4, &macid_msb);
+
+ mac_addr[0] = (macid_msb >> 16) & 0xff;
+ mac_addr[1] = (macid_msb >> 8) & 0xff;
+ mac_addr[2] = macid_msb & 0xff;
+ mac_addr[3] = (macid_lsb >> 16) & 0xff;
+ mac_addr[4] = (macid_lsb >> 8) & 0xff;
+ mac_addr[5] = macid_lsb & 0xff;
+
+ return 0;
+}
+
+static int davinci_emac_try_get_mac(struct platform_device *pdev,
+ int instance, u8 *mac_addr)
+{
+ int error = -EINVAL;
+
+ if (!pdev->dev.of_node)
+ return error;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "ti,am3517-emac"))
+ error = davinci_emac_3517_get_macid(&pdev->dev, 0x110,
+ 0, mac_addr);
+ else if (of_device_is_compatible(pdev->dev.of_node,
+ "ti,dm816-emac"))
+ error = cpsw_am33xx_cm_get_macid(&pdev->dev, 0x30,
+ instance,
+ mac_addr);
+
+ return error;
+}
+
/**
* davinci_emac_probe - EMAC device probe
* @pdev: The DaVinci EMAC device that we are removing
@@ -1859,7 +1940,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
static int davinci_emac_probe(struct platform_device *pdev)
{
int rc = 0;
- struct resource *res;
+ struct resource *res, *res_ctrl;
struct net_device *ndev;
struct emac_priv *priv;
unsigned long hw_ram_addr;
@@ -1876,6 +1957,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
+ devm_clk_put(&pdev->dev, emac_clk);
/* TODO: Probe PHY here if possible */
@@ -1917,11 +1999,20 @@ static int davinci_emac_probe(struct platform_device *pdev)
rc = PTR_ERR(priv->remap_addr);
goto no_pdata;
}
+
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_ctrl) {
+ priv->ctrl_base =
+ devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(priv->ctrl_base))
+ goto no_pdata;
+ } else {
+ priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+ }
+
priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
ndev->base_addr = (unsigned long)priv->remap_addr;
- priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
-
hw_ram_addr = pdata->hw_ram_addr;
if (!hw_ram_addr)
hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
@@ -1968,6 +2059,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
}
ndev->irq = res->start;
+ rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
+ if (!rc)
+ ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+
if (!is_valid_ether_addr(priv->mac_addr)) {
/* Use random MAC if none passed */
eth_hw_addr_random(ndev);
@@ -1980,12 +2075,22 @@ static int davinci_emac_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ pm_runtime_enable(&pdev->dev);
+ rc = pm_runtime_get_sync(&pdev->dev);
+ if (rc < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, rc);
+ goto no_cpdma_chan;
+ }
+
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
+ pm_runtime_put(&pdev->dev);
goto no_cpdma_chan;
}
@@ -1995,9 +2100,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
return 0;
@@ -2071,9 +2174,14 @@ static const struct emac_platform_data am3517_emac_data = {
.hw_ram_addr = 0x01e20000,
};
+static const struct emac_platform_data dm816_emac_data = {
+ .version = EMAC_VERSION_2,
+};
+
static const struct of_device_id davinci_emac_of_match[] = {
{.compatible = "ti,davinci-dm6467-emac", },
{.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
+ {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, },
{},
};
MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
new file mode 100644
index 000000000000..906e9bc412f5
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -0,0 +1,229 @@
+/*
+ * NetCP driver local header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Sandeep Paulraj <s-paulraj@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Wingman Kwok <w-kwok2@ti.com>
+ * Murali Karicheri <m-karicheri2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __NETCP_H__
+#define __NETCP_H__
+
+#include <linux/netdevice.h>
+#include <linux/soc/ti/knav_dma.h>
+
+/* Maximum Ethernet frame size supported by Keystone switch */
+#define NETCP_MAX_FRAME_SIZE 9504
+
+#define SGMII_LINK_MAC_MAC_AUTONEG 0
+#define SGMII_LINK_MAC_PHY 1
+#define SGMII_LINK_MAC_MAC_FORCED 2
+#define SGMII_LINK_MAC_FIBER 3
+#define SGMII_LINK_MAC_PHY_NO_MDIO 4
+#define XGMII_LINK_MAC_PHY 10
+#define XGMII_LINK_MAC_MAC_FORCED 11
+
+struct netcp_device;
+
+struct netcp_tx_pipe {
+ struct netcp_device *netcp_device;
+ void *dma_queue;
+ unsigned int dma_queue_id;
+ u8 dma_psflags;
+ void *dma_channel;
+ const char *dma_chan_name;
+};
+
+#define ADDR_NEW BIT(0)
+#define ADDR_VALID BIT(1)
+
+enum netcp_addr_type {
+ ADDR_ANY,
+ ADDR_DEV,
+ ADDR_UCAST,
+ ADDR_MCAST,
+ ADDR_BCAST
+};
+
+struct netcp_addr {
+ struct netcp_intf *netcp;
+ unsigned char addr[ETH_ALEN];
+ enum netcp_addr_type type;
+ unsigned int flags;
+ struct list_head node;
+};
+
+struct netcp_intf {
+ struct device *dev;
+ struct device *ndev_dev;
+ struct net_device *ndev;
+ bool big_endian;
+ unsigned int tx_compl_qid;
+ void *tx_pool;
+ struct list_head txhook_list_head;
+ unsigned int tx_pause_threshold;
+ void *tx_compl_q;
+
+ unsigned int tx_resume_threshold;
+ void *rx_queue;
+ void *rx_pool;
+ struct list_head rxhook_list_head;
+ unsigned int rx_queue_id;
+ void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
+ u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
+ struct napi_struct rx_napi;
+ struct napi_struct tx_napi;
+
+ void *rx_channel;
+ const char *dma_chan_name;
+ u32 rx_pool_size;
+ u32 rx_pool_region_id;
+ u32 tx_pool_size;
+ u32 tx_pool_region_id;
+ struct list_head module_head;
+ struct list_head interface_list;
+ struct list_head addr_list;
+ bool netdev_registered;
+ bool primary_module_attached;
+
+ /* Lock used for protecting Rx/Tx hook list management */
+ spinlock_t lock;
+ struct netcp_device *netcp_device;
+ struct device_node *node_interface;
+
+ /* DMA configuration data */
+ u32 msg_enable;
+ u32 rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN];
+};
+
+#define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS
+struct netcp_packet {
+ struct sk_buff *skb;
+ u32 *epib;
+ u32 *psdata;
+ unsigned int psdata_len;
+ struct netcp_intf *netcp;
+ struct netcp_tx_pipe *tx_pipe;
+ bool rxtstamp_complete;
+ void *ts_context;
+
+ int (*txtstamp_complete)(void *ctx, struct netcp_packet *pkt);
+};
+
+static inline u32 *netcp_push_psdata(struct netcp_packet *p_info,
+ unsigned int bytes)
+{
+ u32 *buf;
+ unsigned int words;
+
+ if ((bytes & 0x03) != 0)
+ return NULL;
+ words = bytes >> 2;
+
+ if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN)
+ return NULL;
+
+ p_info->psdata_len += words;
+ buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len];
+ return buf;
+}
+
+static inline int netcp_align_psdata(struct netcp_packet *p_info,
+ unsigned int byte_align)
+{
+ int padding;
+
+ switch (byte_align) {
+ case 0:
+ padding = -EINVAL;
+ break;
+ case 1:
+ case 2:
+ case 4:
+ padding = 0;
+ break;
+ case 8:
+ padding = (p_info->psdata_len << 2) % 8;
+ break;
+ case 16:
+ padding = (p_info->psdata_len << 2) % 16;
+ break;
+ default:
+ padding = (p_info->psdata_len << 2) % byte_align;
+ break;
+ }
+ return padding;
+}
+
+struct netcp_module {
+ const char *name;
+ struct module *owner;
+ bool primary;
+
+ /* probe/remove: called once per NETCP instance */
+ int (*probe)(struct netcp_device *netcp_device,
+ struct device *device, struct device_node *node,
+ void **inst_priv);
+ int (*remove)(struct netcp_device *netcp_device, void *inst_priv);
+
+ /* attach/release: called once per network interface */
+ int (*attach)(void *inst_priv, struct net_device *ndev,
+ struct device_node *node, void **intf_priv);
+ int (*release)(void *intf_priv);
+ int (*open)(void *intf_priv, struct net_device *ndev);
+ int (*close)(void *intf_priv, struct net_device *ndev);
+ int (*add_addr)(void *intf_priv, struct netcp_addr *naddr);
+ int (*del_addr)(void *intf_priv, struct netcp_addr *naddr);
+ int (*add_vid)(void *intf_priv, int vid);
+ int (*del_vid)(void *intf_priv, int vid);
+ int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
+
+ /* used internally */
+ struct list_head module_list;
+ struct list_head interface_list;
+};
+
+int netcp_register_module(struct netcp_module *module);
+void netcp_unregister_module(struct netcp_module *module);
+void *netcp_module_get_intf_data(struct netcp_module *module,
+ struct netcp_intf *intf);
+
+int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
+ struct netcp_device *netcp_device,
+ const char *dma_chan_name, unsigned int dma_queue_id);
+int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe);
+int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe);
+
+typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet);
+int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data);
+void *netcp_device_find_module(struct netcp_device *netcp_device,
+ const char *name);
+
+/* SGMII functions */
+int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
+int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
+int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
+
+/* XGBE SERDES init functions */
+int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs);
+
+#endif /* __NETCP_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
new file mode 100644
index 000000000000..a31a8c3c8e7c
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -0,0 +1,2149 @@
+/*
+ * Keystone NetCP Core driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Sandeep Paulraj <s-paulraj@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/if_vlan.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/knav_qmss.h>
+#include <linux/soc/ti/knav_dma.h>
+
+#include "netcp.h"
+
+#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
+#define NETCP_NAPI_WEIGHT 64
+#define NETCP_TX_TIMEOUT (5 * HZ)
+#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
+#define NETCP_MAX_MCAST_ADDR 16
+
+#define NETCP_EFUSE_REG_INDEX 0
+
+#define NETCP_MOD_PROBE_SKIPPED 1
+#define NETCP_MOD_PROBE_FAILED 2
+
+#define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
+ NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_INTR | \
+ NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
+ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_RX_STATUS)
+
+#define knav_queue_get_id(q) knav_queue_device_control(q, \
+ KNAV_QUEUE_GET_ID, (unsigned long)NULL)
+
+#define knav_queue_enable_notify(q) knav_queue_device_control(q, \
+ KNAV_QUEUE_ENABLE_NOTIFY, \
+ (unsigned long)NULL)
+
+#define knav_queue_disable_notify(q) knav_queue_device_control(q, \
+ KNAV_QUEUE_DISABLE_NOTIFY, \
+ (unsigned long)NULL)
+
+#define knav_queue_get_count(q) knav_queue_device_control(q, \
+ KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
+
+#define for_each_netcp_module(module) \
+ list_for_each_entry(module, &netcp_modules, module_list)
+
+#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
+ list_for_each_entry(inst_modpriv, \
+ &((netcp_device)->modpriv_head), inst_list)
+
+#define for_each_module(netcp, intf_modpriv) \
+ list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
+
+/* Module management structures */
+struct netcp_device {
+ struct list_head device_list;
+ struct list_head interface_head;
+ struct list_head modpriv_head;
+ struct device *device;
+};
+
+struct netcp_inst_modpriv {
+ struct netcp_device *netcp_device;
+ struct netcp_module *netcp_module;
+ struct list_head inst_list;
+ void *module_priv;
+};
+
+struct netcp_intf_modpriv {
+ struct netcp_intf *netcp_priv;
+ struct netcp_module *netcp_module;
+ struct list_head intf_list;
+ void *module_priv;
+};
+
+static LIST_HEAD(netcp_devices);
+static LIST_HEAD(netcp_modules);
+static DEFINE_MUTEX(netcp_modules_lock);
+
+static int netcp_debug_level = -1;
+module_param(netcp_debug_level, int, 0);
+MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
+
+/* Helper functions - Get/Set */
+static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
+ struct knav_dma_desc *desc)
+{
+ *buff_len = desc->buff_len;
+ *buff = desc->buff;
+ *ndesc = desc->next_desc;
+}
+
+static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
+{
+ *pad0 = desc->pad[0];
+ *pad1 = desc->pad[1];
+}
+
+static void get_org_pkt_info(u32 *buff, u32 *buff_len,
+ struct knav_dma_desc *desc)
+{
+ *buff = desc->orig_buff;
+ *buff_len = desc->orig_len;
+}
+
+static void get_words(u32 *words, int num_words, u32 *desc)
+{
+ int i;
+
+ for (i = 0; i < num_words; i++)
+ words[i] = desc[i];
+}
+
+static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
+ struct knav_dma_desc *desc)
+{
+ desc->buff_len = buff_len;
+ desc->buff = buff;
+ desc->next_desc = ndesc;
+}
+
+static void set_desc_info(u32 desc_info, u32 pkt_info,
+ struct knav_dma_desc *desc)
+{
+ desc->desc_info = desc_info;
+ desc->packet_info = pkt_info;
+}
+
+static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
+{
+ desc->pad[0] = pad0;
+ desc->pad[1] = pad1;
+}
+
+static void set_org_pkt_info(u32 buff, u32 buff_len,
+ struct knav_dma_desc *desc)
+{
+ desc->orig_buff = buff;
+ desc->orig_len = buff_len;
+}
+
+static void set_words(u32 *words, int num_words, u32 *desc)
+{
+ int i;
+
+ for (i = 0; i < num_words; i++)
+ desc[i] = words[i];
+}
+
+/* Read the e-fuse value as 32 bit values to be endian independent */
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+{
+ unsigned int addr0, addr1;
+
+ addr1 = readl(efuse_mac + 4);
+ addr0 = readl(efuse_mac);
+
+ x[0] = (addr1 & 0x0000ff00) >> 8;
+ x[1] = addr1 & 0x000000ff;
+ x[2] = (addr0 & 0xff000000) >> 24;
+ x[3] = (addr0 & 0x00ff0000) >> 16;
+ x[4] = (addr0 & 0x0000ff00) >> 8;
+ x[5] = addr0 & 0x000000ff;
+
+ return 0;
+}
+
+static const char *netcp_node_name(struct device_node *node)
+{
+ const char *name;
+
+ if (of_property_read_string(node, "label", &name) < 0)
+ name = node->name;
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+/* Module management routines */
+static int netcp_register_interface(struct netcp_intf *netcp)
+{
+ int ret;
+
+ ret = register_netdev(netcp->ndev);
+ if (!ret)
+ netcp->netdev_registered = true;
+ return ret;
+}
+
+static int netcp_module_probe(struct netcp_device *netcp_device,
+ struct netcp_module *module)
+{
+ struct device *dev = netcp_device->device;
+ struct device_node *devices, *interface, *node = dev->of_node;
+ struct device_node *child;
+ struct netcp_inst_modpriv *inst_modpriv;
+ struct netcp_intf *netcp_intf;
+ struct netcp_module *tmp;
+ bool primary_module_registered = false;
+ int ret;
+
+ /* Find this module in the sub-tree for this device */
+ devices = of_get_child_by_name(node, "netcp-devices");
+ if (!devices) {
+ dev_err(dev, "could not find netcp-devices node\n");
+ return NETCP_MOD_PROBE_SKIPPED;
+ }
+
+ for_each_available_child_of_node(devices, child) {
+ const char *name = netcp_node_name(child);
+
+ if (!strcasecmp(module->name, name))
+ break;
+ }
+
+ of_node_put(devices);
+ /* If module not used for this device, skip it */
+ if (!child) {
+ dev_warn(dev, "module(%s) not used for device\n", module->name);
+ return NETCP_MOD_PROBE_SKIPPED;
+ }
+
+ inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
+ if (!inst_modpriv) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ inst_modpriv->netcp_device = netcp_device;
+ inst_modpriv->netcp_module = module;
+ list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
+
+ ret = module->probe(netcp_device, dev, child,
+ &inst_modpriv->module_priv);
+ of_node_put(child);
+ if (ret) {
+ dev_err(dev, "Probe of module(%s) failed with %d\n",
+ module->name, ret);
+ list_del(&inst_modpriv->inst_list);
+ devm_kfree(dev, inst_modpriv);
+ return NETCP_MOD_PROBE_FAILED;
+ }
+
+ /* Attach modules only if the primary module is probed */
+ for_each_netcp_module(tmp) {
+ if (tmp->primary)
+ primary_module_registered = true;
+ }
+
+ if (!primary_module_registered)
+ return 0;
+
+ /* Attach module to interfaces */
+ list_for_each_entry(netcp_intf, &netcp_device->interface_head,
+ interface_list) {
+ struct netcp_intf_modpriv *intf_modpriv;
+
+ /* If interface not registered then register now */
+ if (!netcp_intf->netdev_registered)
+ ret = netcp_register_interface(netcp_intf);
+
+ if (ret)
+ return -ENODEV;
+
+ intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
+ GFP_KERNEL);
+ if (!intf_modpriv)
+ return -ENOMEM;
+
+ interface = of_parse_phandle(netcp_intf->node_interface,
+ module->name, 0);
+
+ intf_modpriv->netcp_priv = netcp_intf;
+ intf_modpriv->netcp_module = module;
+ list_add_tail(&intf_modpriv->intf_list,
+ &netcp_intf->module_head);
+
+ ret = module->attach(inst_modpriv->module_priv,
+ netcp_intf->ndev, interface,
+ &intf_modpriv->module_priv);
+ of_node_put(interface);
+ if (ret) {
+ dev_dbg(dev, "Attach of module %s declined with %d\n",
+ module->name, ret);
+ list_del(&intf_modpriv->intf_list);
+ devm_kfree(dev, intf_modpriv);
+ continue;
+ }
+ }
+ return 0;
+}
+
+int netcp_register_module(struct netcp_module *module)
+{
+ struct netcp_device *netcp_device;
+ struct netcp_module *tmp;
+ int ret;
+
+ if (!module->name) {
+ WARN(1, "error registering netcp module: no name\n");
+ return -EINVAL;
+ }
+
+ if (!module->probe) {
+ WARN(1, "error registering netcp module: no probe\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&netcp_modules_lock);
+
+ for_each_netcp_module(tmp) {
+ if (!strcasecmp(tmp->name, module->name)) {
+ mutex_unlock(&netcp_modules_lock);
+ return -EEXIST;
+ }
+ }
+ list_add_tail(&module->module_list, &netcp_modules);
+
+ list_for_each_entry(netcp_device, &netcp_devices, device_list) {
+ ret = netcp_module_probe(netcp_device, module);
+ if (ret < 0)
+ goto fail;
+ }
+
+ mutex_unlock(&netcp_modules_lock);
+ return 0;
+
+fail:
+ mutex_unlock(&netcp_modules_lock);
+ netcp_unregister_module(module);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(netcp_register_module);
+
+static void netcp_release_module(struct netcp_device *netcp_device,
+ struct netcp_module *module)
+{
+ struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
+ struct netcp_intf *netcp_intf, *netcp_tmp;
+ struct device *dev = netcp_device->device;
+
+ /* Release the module from each interface */
+ list_for_each_entry_safe(netcp_intf, netcp_tmp,
+ &netcp_device->interface_head,
+ interface_list) {
+ struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
+
+ list_for_each_entry_safe(intf_modpriv, intf_tmp,
+ &netcp_intf->module_head,
+ intf_list) {
+ if (intf_modpriv->netcp_module == module) {
+ module->release(intf_modpriv->module_priv);
+ list_del(&intf_modpriv->intf_list);
+ devm_kfree(dev, intf_modpriv);
+ break;
+ }
+ }
+ }
+
+ /* Remove the module from each instance */
+ list_for_each_entry_safe(inst_modpriv, inst_tmp,
+ &netcp_device->modpriv_head, inst_list) {
+ if (inst_modpriv->netcp_module == module) {
+ module->remove(netcp_device,
+ inst_modpriv->module_priv);
+ list_del(&inst_modpriv->inst_list);
+ devm_kfree(dev, inst_modpriv);
+ break;
+ }
+ }
+}
+
+void netcp_unregister_module(struct netcp_module *module)
+{
+ struct netcp_device *netcp_device;
+ struct netcp_module *module_tmp;
+
+ mutex_lock(&netcp_modules_lock);
+
+ list_for_each_entry(netcp_device, &netcp_devices, device_list) {
+ netcp_release_module(netcp_device, module);
+ }
+
+ /* Remove the module from the module list */
+ for_each_netcp_module(module_tmp) {
+ if (module == module_tmp) {
+ list_del(&module->module_list);
+ break;
+ }
+ }
+
+ mutex_unlock(&netcp_modules_lock);
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_module);
+
+void *netcp_module_get_intf_data(struct netcp_module *module,
+ struct netcp_intf *intf)
+{
+ struct netcp_intf_modpriv *intf_modpriv;
+
+ list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
+ if (intf_modpriv->netcp_module == module)
+ return intf_modpriv->module_priv;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
+
+/* Module TX and RX Hook management */
+struct netcp_hook_list {
+ struct list_head list;
+ netcp_hook_rtn *hook_rtn;
+ void *hook_data;
+ int order;
+};
+
+int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+ struct netcp_hook_list *entry;
+ struct netcp_hook_list *next;
+ unsigned long flags;
+
+ entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->hook_rtn = hook_rtn;
+ entry->hook_data = hook_data;
+ entry->order = order;
+
+ spin_lock_irqsave(&netcp_priv->lock, flags);
+ list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
+ if (next->order > order)
+ break;
+ }
+ __list_add(&entry->list, next->list.prev, &next->list);
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_register_txhook);
+
+int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+ struct netcp_hook_list *next, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&netcp_priv->lock, flags);
+ list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
+ if ((next->order == order) &&
+ (next->hook_rtn == hook_rtn) &&
+ (next->hook_data == hook_data)) {
+ list_del(&next->list);
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+ devm_kfree(netcp_priv->dev, next);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
+
+int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+ struct netcp_hook_list *entry;
+ struct netcp_hook_list *next;
+ unsigned long flags;
+
+ entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->hook_rtn = hook_rtn;
+ entry->hook_data = hook_data;
+ entry->order = order;
+
+ spin_lock_irqsave(&netcp_priv->lock, flags);
+ list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
+ if (next->order > order)
+ break;
+ }
+ __list_add(&entry->list, next->list.prev, &next->list);
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+ return 0;
+}
+
+int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+ struct netcp_hook_list *next, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&netcp_priv->lock, flags);
+ list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
+ if ((next->order == order) &&
+ (next->hook_rtn == hook_rtn) &&
+ (next->hook_data == hook_data)) {
+ list_del(&next->list);
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+ devm_kfree(netcp_priv->dev, next);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+ return -ENOENT;
+}
+
+static void netcp_frag_free(bool is_frag, void *ptr)
+{
+ if (is_frag)
+ put_page(virt_to_head_page(ptr));
+ else
+ kfree(ptr);
+}
+
+static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
+ struct knav_dma_desc *desc)
+{
+ struct knav_dma_desc *ndesc;
+ dma_addr_t dma_desc, dma_buf;
+ unsigned int buf_len, dma_sz = sizeof(*ndesc);
+ void *buf_ptr;
+ u32 tmp;
+
+ get_words(&dma_desc, 1, &desc->next_desc);
+
+ while (dma_desc) {
+ ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+ if (unlikely(!ndesc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+ break;
+ }
+ get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
+ get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
+ dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(buf_ptr);
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ }
+
+ get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
+ if (buf_ptr)
+ netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
+ knav_pool_desc_put(netcp->rx_pool, desc);
+}
+
+static void netcp_empty_rx_queue(struct netcp_intf *netcp)
+{
+ struct knav_dma_desc *desc;
+ unsigned int dma_sz;
+ dma_addr_t dma;
+
+ for (; ;) {
+ dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
+ if (!dma)
+ break;
+
+ desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
+ if (unlikely(!desc)) {
+ dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
+ __func__);
+ netcp->ndev->stats.rx_errors++;
+ continue;
+ }
+ netcp_free_rx_desc_chain(netcp, desc);
+ netcp->ndev->stats.rx_dropped++;
+ }
+}
+
+static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
+{
+ unsigned int dma_sz, buf_len, org_buf_len;
+ struct knav_dma_desc *desc, *ndesc;
+ unsigned int pkt_sz = 0, accum_sz;
+ struct netcp_hook_list *rx_hook;
+ dma_addr_t dma_desc, dma_buff;
+ struct netcp_packet p_info;
+ struct sk_buff *skb;
+ void *org_buf_ptr;
+ u32 tmp;
+
+ dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
+ if (!dma_desc)
+ return -1;
+
+ desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+ if (unlikely(!desc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+ return 0;
+ }
+
+ get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
+ get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
+
+ if (unlikely(!org_buf_ptr)) {
+ dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
+ goto free_desc;
+ }
+
+ pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
+ accum_sz = buf_len;
+ dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
+
+ /* Build a new sk_buff for the primary buffer */
+ skb = build_skb(org_buf_ptr, org_buf_len);
+ if (unlikely(!skb)) {
+ dev_err(netcp->ndev_dev, "build_skb() failed\n");
+ goto free_desc;
+ }
+
+ /* update data, tail and len */
+ skb_reserve(skb, NETCP_SOP_OFFSET);
+ __skb_put(skb, buf_len);
+
+ /* Fill in the page fragment list */
+ while (dma_desc) {
+ struct page *page;
+
+ ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+ if (unlikely(!ndesc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+ goto free_desc;
+ }
+
+ get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
+ get_pad_info((u32 *)&page, &tmp, ndesc);
+
+ if (likely(dma_buff && buf_len && page)) {
+ dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ } else {
+ dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
+ (void *)dma_buff, buf_len, page);
+ goto free_desc;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ offset_in_page(dma_buff), buf_len, PAGE_SIZE);
+ accum_sz += buf_len;
+
+ /* Free the descriptor */
+ knav_pool_desc_put(netcp->rx_pool, ndesc);
+ }
+
+ /* Free the primary descriptor */
+ knav_pool_desc_put(netcp->rx_pool, desc);
+
+ /* check for packet len and warn */
+ if (unlikely(pkt_sz != accum_sz))
+ dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
+ pkt_sz, accum_sz);
+
+ /* Remove ethernet FCS from the packet */
+ __pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ /* Call each of the RX hooks */
+ p_info.skb = skb;
+ p_info.rxtstamp_complete = false;
+ list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
+ int ret;
+
+ ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
+ &p_info);
+ if (unlikely(ret)) {
+ dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
+ rx_hook->order, ret);
+ netcp->ndev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ netcp->ndev->last_rx = jiffies;
+ netcp->ndev->stats.rx_packets++;
+ netcp->ndev->stats.rx_bytes += skb->len;
+
+ /* push skb up the stack */
+ skb->protocol = eth_type_trans(skb, netcp->ndev);
+ netif_receive_skb(skb);
+ return 0;
+
+free_desc:
+ netcp_free_rx_desc_chain(netcp, desc);
+ netcp->ndev->stats.rx_errors++;
+ return 0;
+}
+
+static int netcp_process_rx_packets(struct netcp_intf *netcp,
+ unsigned int budget)
+{
+ int i;
+
+ for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
+ ;
+ return i;
+}
+
+/* Release descriptors and attached buffers from Rx FDQ */
+static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
+{
+ struct knav_dma_desc *desc;
+ unsigned int buf_len, dma_sz;
+ dma_addr_t dma;
+ void *buf_ptr;
+ u32 tmp;
+
+ /* Allocate descriptor */
+ while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
+ desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
+ if (unlikely(!desc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+ continue;
+ }
+
+ get_org_pkt_info(&dma, &buf_len, desc);
+ get_pad_info((u32 *)&buf_ptr, &tmp, desc);
+
+ if (unlikely(!dma)) {
+ dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ continue;
+ }
+
+ if (unlikely(!buf_ptr)) {
+ dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ continue;
+ }
+
+ if (fdq == 0) {
+ dma_unmap_single(netcp->dev, dma, buf_len,
+ DMA_FROM_DEVICE);
+ netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
+ } else {
+ dma_unmap_page(netcp->dev, dma, buf_len,
+ DMA_FROM_DEVICE);
+ __free_page(buf_ptr);
+ }
+
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ }
+}
+
+static void netcp_rxpool_free(struct netcp_intf *netcp)
+{
+ int i;
+
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+ !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
+ netcp_free_rx_buf(netcp, i);
+
+ if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
+ dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
+ netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
+
+ knav_pool_destroy(netcp->rx_pool);
+ netcp->rx_pool = NULL;
+}
+
+static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
+{
+ struct knav_dma_desc *hwdesc;
+ unsigned int buf_len, dma_sz;
+ u32 desc_info, pkt_info;
+ struct page *page;
+ dma_addr_t dma;
+ void *bufptr;
+ u32 pad[2];
+
+ /* Allocate descriptor */
+ hwdesc = knav_pool_desc_get(netcp->rx_pool);
+ if (IS_ERR_OR_NULL(hwdesc)) {
+ dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
+ return;
+ }
+
+ if (likely(fdq == 0)) {
+ unsigned int primary_buf_len;
+ /* Allocate a primary receive queue entry */
+ buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+ primary_buf_len = SKB_DATA_ALIGN(buf_len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ if (primary_buf_len <= PAGE_SIZE) {
+ bufptr = netdev_alloc_frag(primary_buf_len);
+ pad[1] = primary_buf_len;
+ } else {
+ bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
+ GFP_DMA32 | __GFP_COLD);
+ pad[1] = 0;
+ }
+
+ if (unlikely(!bufptr)) {
+ dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+ goto fail;
+ }
+ dma = dma_map_single(netcp->dev, bufptr, buf_len,
+ DMA_TO_DEVICE);
+ pad[0] = (u32)bufptr;
+
+ } else {
+ /* Allocate a secondary receive queue entry */
+ page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+ if (unlikely(!page)) {
+ dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
+ goto fail;
+ }
+ buf_len = PAGE_SIZE;
+ dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
+ pad[0] = (u32)page;
+ pad[1] = 0;
+ }
+
+ desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
+ desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
+ pkt_info = KNAV_DMA_DESC_HAS_EPIB;
+ pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
+ pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
+ KNAV_DMA_DESC_RETQ_SHIFT;
+ set_org_pkt_info(dma, buf_len, hwdesc);
+ set_pad_info(pad[0], pad[1], hwdesc);
+ set_desc_info(desc_info, pkt_info, hwdesc);
+
+ /* Push to FDQs */
+ knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
+ &dma_sz);
+ knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
+ return;
+
+fail:
+ knav_pool_desc_put(netcp->rx_pool, hwdesc);
+}
+
+/* Refill Rx FDQ with descriptors & attached buffers */
+static void netcp_rxpool_refill(struct netcp_intf *netcp)
+{
+ u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
+ int i;
+
+ /* Calculate the FDQ deficit and refill */
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
+ fdq_deficit[i] = netcp->rx_queue_depths[i] -
+ knav_queue_get_count(netcp->rx_fdq[i]);
+
+ while (fdq_deficit[i]--)
+ netcp_allocate_rx_buf(netcp, i);
+ } /* end for fdqs */
+}
+
+/* NAPI poll */
+static int netcp_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
+ rx_napi);
+ unsigned int packets;
+
+ packets = netcp_process_rx_packets(netcp, budget);
+
+ if (packets < budget) {
+ napi_complete(&netcp->rx_napi);
+ knav_queue_enable_notify(netcp->rx_queue);
+ }
+
+ netcp_rxpool_refill(netcp);
+ return packets;
+}
+
+static void netcp_rx_notify(void *arg)
+{
+ struct netcp_intf *netcp = arg;
+
+ knav_queue_disable_notify(netcp->rx_queue);
+ napi_schedule(&netcp->rx_napi);
+}
+
+static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
+ struct knav_dma_desc *desc,
+ unsigned int desc_sz)
+{
+ struct knav_dma_desc *ndesc = desc;
+ dma_addr_t dma_desc, dma_buf;
+ unsigned int buf_len;
+
+ while (ndesc) {
+ get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
+
+ if (dma_buf && buf_len)
+ dma_unmap_single(netcp->dev, dma_buf, buf_len,
+ DMA_TO_DEVICE);
+ else
+ dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
+ (void *)dma_buf, buf_len);
+
+ knav_pool_desc_put(netcp->tx_pool, ndesc);
+ ndesc = NULL;
+ if (dma_desc) {
+ ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
+ desc_sz);
+ if (!ndesc)
+ dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
+ }
+ }
+}
+
+static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
+ unsigned int budget)
+{
+ struct knav_dma_desc *desc;
+ struct sk_buff *skb;
+ unsigned int dma_sz;
+ dma_addr_t dma;
+ int pkts = 0;
+ u32 tmp;
+
+ while (budget--) {
+ dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
+ if (!dma)
+ break;
+ desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
+ if (unlikely(!desc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
+ netcp->ndev->stats.tx_errors++;
+ continue;
+ }
+
+ get_pad_info((u32 *)&skb, &tmp, desc);
+ netcp_free_tx_desc_chain(netcp, desc, dma_sz);
+ if (!skb) {
+ dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
+ netcp->ndev->stats.tx_errors++;
+ continue;
+ }
+
+ if (netif_subqueue_stopped(netcp->ndev, skb) &&
+ netif_running(netcp->ndev) &&
+ (knav_pool_count(netcp->tx_pool) >
+ netcp->tx_resume_threshold)) {
+ u16 subqueue = skb_get_queue_mapping(skb);
+
+ netif_wake_subqueue(netcp->ndev, subqueue);
+ }
+
+ netcp->ndev->stats.tx_packets++;
+ netcp->ndev->stats.tx_bytes += skb->len;
+ dev_kfree_skb(skb);
+ pkts++;
+ }
+ return pkts;
+}
+
+static int netcp_tx_poll(struct napi_struct *napi, int budget)
+{
+ int packets;
+ struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
+ tx_napi);
+
+ packets = netcp_process_tx_compl_packets(netcp, budget);
+ if (packets < budget) {
+ napi_complete(&netcp->tx_napi);
+ knav_queue_enable_notify(netcp->tx_compl_q);
+ }
+
+ return packets;
+}
+
+static void netcp_tx_notify(void *arg)
+{
+ struct netcp_intf *netcp = arg;
+
+ knav_queue_disable_notify(netcp->tx_compl_q);
+ napi_schedule(&netcp->tx_napi);
+}
+
+static struct knav_dma_desc*
+netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
+{
+ struct knav_dma_desc *desc, *ndesc, *pdesc;
+ unsigned int pkt_len = skb_headlen(skb);
+ struct device *dev = netcp->dev;
+ dma_addr_t dma_addr;
+ unsigned int dma_sz;
+ int i;
+
+ /* Map the linear buffer */
+ dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
+ if (unlikely(!dma_addr)) {
+ dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
+ return NULL;
+ }
+
+ desc = knav_pool_desc_get(netcp->tx_pool);
+ if (unlikely(IS_ERR_OR_NULL(desc))) {
+ dev_err(netcp->ndev_dev, "out of TX desc\n");
+ dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
+ return NULL;
+ }
+
+ set_pkt_info(dma_addr, pkt_len, 0, desc);
+ if (skb_is_nonlinear(skb)) {
+ prefetchw(skb_shinfo(skb));
+ } else {
+ desc->next_desc = 0;
+ goto upd_pkt_len;
+ }
+
+ pdesc = desc;
+
+ /* Handle the case where skb is fragmented in pages */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = skb_frag_page(frag);
+ u32 page_offset = frag->page_offset;
+ u32 buf_len = skb_frag_size(frag);
+ dma_addr_t desc_dma;
+ u32 pkt_info;
+
+ dma_addr = dma_map_page(dev, page, page_offset, buf_len,
+ DMA_TO_DEVICE);
+ if (unlikely(!dma_addr)) {
+ dev_err(netcp->ndev_dev, "Failed to map skb page\n");
+ goto free_descs;
+ }
+
+ ndesc = knav_pool_desc_get(netcp->tx_pool);
+ if (unlikely(IS_ERR_OR_NULL(ndesc))) {
+ dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
+ dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
+ goto free_descs;
+ }
+
+ desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
+ (void *)ndesc);
+ pkt_info =
+ (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
+ KNAV_DMA_DESC_RETQ_SHIFT;
+ set_pkt_info(dma_addr, buf_len, 0, ndesc);
+ set_words(&desc_dma, 1, &pdesc->next_desc);
+ pkt_len += buf_len;
+ if (pdesc != desc)
+ knav_pool_desc_map(netcp->tx_pool, pdesc,
+ sizeof(*pdesc), &desc_dma, &dma_sz);
+ pdesc = ndesc;
+ }
+ if (pdesc != desc)
+ knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
+ &dma_addr, &dma_sz);
+
+ /* frag list based linkage is not supported for now. */
+ if (skb_shinfo(skb)->frag_list) {
+ dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
+ goto free_descs;
+ }
+
+upd_pkt_len:
+ WARN_ON(pkt_len != skb->len);
+
+ pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
+ set_words(&pkt_len, 1, &desc->desc_info);
+ return desc;
+
+free_descs:
+ netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
+ return NULL;
+}
+
+static int netcp_tx_submit_skb(struct netcp_intf *netcp,
+ struct sk_buff *skb,
+ struct knav_dma_desc *desc)
+{
+ struct netcp_tx_pipe *tx_pipe = NULL;
+ struct netcp_hook_list *tx_hook;
+ struct netcp_packet p_info;
+ u32 packet_info = 0;
+ unsigned int dma_sz;
+ dma_addr_t dma;
+ int ret = 0;
+
+ p_info.netcp = netcp;
+ p_info.skb = skb;
+ p_info.tx_pipe = NULL;
+ p_info.psdata_len = 0;
+ p_info.ts_context = NULL;
+ p_info.txtstamp_complete = NULL;
+ p_info.epib = desc->epib;
+ p_info.psdata = desc->psdata;
+ memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
+
+ /* Find out where to inject the packet for transmission */
+ list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
+ ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
+ &p_info);
+ if (unlikely(ret != 0)) {
+ dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
+ tx_hook->order, ret);
+ ret = (ret < 0) ? ret : NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ /* Make sure some TX hook claimed the packet */
+ tx_pipe = p_info.tx_pipe;
+ if (!tx_pipe) {
+ dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* update descriptor */
+ if (p_info.psdata_len) {
+ u32 *psdata = p_info.psdata;
+
+ memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
+ p_info.psdata_len);
+ set_words(psdata, p_info.psdata_len, psdata);
+ packet_info |=
+ (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
+ KNAV_DMA_DESC_PSLEN_SHIFT;
+ }
+
+ packet_info |= KNAV_DMA_DESC_HAS_EPIB |
+ ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
+ KNAV_DMA_DESC_RETQ_SHIFT) |
+ ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) <<
+ KNAV_DMA_DESC_PSFLAG_SHIFT);
+
+ set_words(&packet_info, 1, &desc->packet_info);
+ set_words((u32 *)&skb, 1, &desc->pad[0]);
+
+ /* submit packet descriptor */
+ ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
+ &dma_sz);
+ if (unlikely(ret)) {
+ dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+ skb_tx_timestamp(skb);
+ knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
+
+out:
+ return ret;
+}
+
+/* Submit the packet */
+static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ int subqueue = skb_get_queue_mapping(skb);
+ struct knav_dma_desc *desc;
+ int desc_count, ret = 0;
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
+ ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
+ if (ret < 0) {
+ /* If we get here, the skb has already been dropped */
+ dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
+ ret);
+ ndev->stats.tx_dropped++;
+ return ret;
+ }
+ skb->len = NETCP_MIN_PACKET_SIZE;
+ }
+
+ desc = netcp_tx_map_skb(skb, netcp);
+ if (unlikely(!desc)) {
+ netif_stop_subqueue(ndev, subqueue);
+ ret = -ENOBUFS;
+ goto drop;
+ }
+
+ ret = netcp_tx_submit_skb(netcp, skb, desc);
+ if (ret)
+ goto drop;
+
+ ndev->trans_start = jiffies;
+
+ /* Check Tx pool count & stop subqueue if needed */
+ desc_count = knav_pool_count(netcp->tx_pool);
+ if (desc_count < netcp->tx_pause_threshold) {
+ dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
+ netif_stop_subqueue(ndev, subqueue);
+ }
+ return NETDEV_TX_OK;
+
+drop:
+ ndev->stats.tx_dropped++;
+ if (desc)
+ netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
+{
+ if (tx_pipe->dma_channel) {
+ knav_dma_close_channel(tx_pipe->dma_channel);
+ tx_pipe->dma_channel = NULL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_close);
+
+int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
+{
+ struct device *dev = tx_pipe->netcp_device->device;
+ struct knav_dma_cfg config;
+ int ret = 0;
+ u8 name[16];
+
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_MEM_TO_DEV;
+ config.u.tx.filt_einfo = false;
+ config.u.tx.filt_pswords = false;
+ config.u.tx.priority = DMA_PRIO_MED_L;
+
+ tx_pipe->dma_channel = knav_dma_open_channel(dev,
+ tx_pipe->dma_chan_name, &config);
+ if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
+ dev_err(dev, "failed opening tx chan(%s)\n",
+ tx_pipe->dma_chan_name);
+ goto err;
+ }
+
+ snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
+ tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
+ KNAV_QUEUE_SHARED);
+ if (IS_ERR(tx_pipe->dma_queue)) {
+ dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
+ name, ret);
+ ret = PTR_ERR(tx_pipe->dma_queue);
+ goto err;
+ }
+
+ dev_dbg(dev, "opened tx pipe %s\n", name);
+ return 0;
+
+err:
+ if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
+ knav_dma_close_channel(tx_pipe->dma_channel);
+ tx_pipe->dma_channel = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_open);
+
+int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
+ struct netcp_device *netcp_device,
+ const char *dma_chan_name, unsigned int dma_queue_id)
+{
+ memset(tx_pipe, 0, sizeof(*tx_pipe));
+ tx_pipe->netcp_device = netcp_device;
+ tx_pipe->dma_chan_name = dma_chan_name;
+ tx_pipe->dma_queue_id = dma_queue_id;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_init);
+
+static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
+ const u8 *addr,
+ enum netcp_addr_type type)
+{
+ struct netcp_addr *naddr;
+
+ list_for_each_entry(naddr, &netcp->addr_list, node) {
+ if (naddr->type != type)
+ continue;
+ if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
+ continue;
+ return naddr;
+ }
+
+ return NULL;
+}
+
+static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
+ const u8 *addr,
+ enum netcp_addr_type type)
+{
+ struct netcp_addr *naddr;
+
+ naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
+ if (!naddr)
+ return NULL;
+
+ naddr->type = type;
+ naddr->flags = 0;
+ naddr->netcp = netcp;
+ if (addr)
+ ether_addr_copy(naddr->addr, addr);
+ else
+ memset(naddr->addr, 0, ETH_ALEN);
+ list_add_tail(&naddr->node, &netcp->addr_list);
+
+ return naddr;
+}
+
+static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
+{
+ list_del(&naddr->node);
+ devm_kfree(netcp->dev, naddr);
+}
+
+static void netcp_addr_clear_mark(struct netcp_intf *netcp)
+{
+ struct netcp_addr *naddr;
+
+ list_for_each_entry(naddr, &netcp->addr_list, node)
+ naddr->flags = 0;
+}
+
+static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
+ enum netcp_addr_type type)
+{
+ struct netcp_addr *naddr;
+
+ naddr = netcp_addr_find(netcp, addr, type);
+ if (naddr) {
+ naddr->flags |= ADDR_VALID;
+ return;
+ }
+
+ naddr = netcp_addr_add(netcp, addr, type);
+ if (!WARN_ON(!naddr))
+ naddr->flags |= ADDR_NEW;
+}
+
+static void netcp_addr_sweep_del(struct netcp_intf *netcp)
+{
+ struct netcp_addr *naddr, *tmp;
+ struct netcp_intf_modpriv *priv;
+ struct netcp_module *module;
+ int error;
+
+ list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
+ if (naddr->flags & (ADDR_VALID | ADDR_NEW))
+ continue;
+ dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
+ naddr->addr, naddr->type);
+ mutex_lock(&netcp_modules_lock);
+ for_each_module(netcp, priv) {
+ module = priv->netcp_module;
+ if (!module->del_addr)
+ continue;
+ error = module->del_addr(priv->module_priv,
+ naddr);
+ WARN_ON(error);
+ }
+ mutex_unlock(&netcp_modules_lock);
+ netcp_addr_del(netcp, naddr);
+ }
+}
+
+static void netcp_addr_sweep_add(struct netcp_intf *netcp)
+{
+ struct netcp_addr *naddr, *tmp;
+ struct netcp_intf_modpriv *priv;
+ struct netcp_module *module;
+ int error;
+
+ list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
+ if (!(naddr->flags & ADDR_NEW))
+ continue;
+ dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
+ naddr->addr, naddr->type);
+ mutex_lock(&netcp_modules_lock);
+ for_each_module(netcp, priv) {
+ module = priv->netcp_module;
+ if (!module->add_addr)
+ continue;
+ error = module->add_addr(priv->module_priv, naddr);
+ WARN_ON(error);
+ }
+ mutex_unlock(&netcp_modules_lock);
+ }
+}
+
+static void netcp_set_rx_mode(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netdev_hw_addr *ndev_addr;
+ bool promisc;
+
+ promisc = (ndev->flags & IFF_PROMISC ||
+ ndev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
+
+ /* first clear all marks */
+ netcp_addr_clear_mark(netcp);
+
+ /* next add new entries, mark existing ones */
+ netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
+ for_each_dev_addr(ndev, ndev_addr)
+ netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
+ netdev_for_each_uc_addr(ndev_addr, ndev)
+ netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
+ netdev_for_each_mc_addr(ndev_addr, ndev)
+ netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
+
+ if (promisc)
+ netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
+
+ /* finally sweep and callout into modules */
+ netcp_addr_sweep_del(netcp);
+ netcp_addr_sweep_add(netcp);
+}
+
+static void netcp_free_navigator_resources(struct netcp_intf *netcp)
+{
+ int i;
+
+ if (netcp->rx_channel) {
+ knav_dma_close_channel(netcp->rx_channel);
+ netcp->rx_channel = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(netcp->rx_pool))
+ netcp_rxpool_free(netcp);
+
+ if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
+ knav_queue_close(netcp->rx_queue);
+ netcp->rx_queue = NULL;
+ }
+
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+ !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
+ knav_queue_close(netcp->rx_fdq[i]);
+ netcp->rx_fdq[i] = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
+ knav_queue_close(netcp->tx_compl_q);
+ netcp->tx_compl_q = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
+ knav_pool_destroy(netcp->tx_pool);
+ netcp->tx_pool = NULL;
+ }
+}
+
+static int netcp_setup_navigator_resources(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct knav_queue_notify_config notify_cfg;
+ struct knav_dma_cfg config;
+ u32 last_fdq = 0;
+ u8 name[16];
+ int ret;
+ int i;
+
+ /* Create Rx/Tx descriptor pools */
+ snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
+ netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
+ netcp->rx_pool_region_id);
+ if (IS_ERR_OR_NULL(netcp->rx_pool)) {
+ dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
+ ret = PTR_ERR(netcp->rx_pool);
+ goto fail;
+ }
+
+ snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
+ netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
+ netcp->tx_pool_region_id);
+ if (IS_ERR_OR_NULL(netcp->tx_pool)) {
+ dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
+ ret = PTR_ERR(netcp->tx_pool);
+ goto fail;
+ }
+
+ /* open Tx completion queue */
+ snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
+ netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
+ if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
+ ret = PTR_ERR(netcp->tx_compl_q);
+ goto fail;
+ }
+ netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
+
+ /* Set notification for Tx completion */
+ notify_cfg.fn = netcp_tx_notify;
+ notify_cfg.fn_arg = netcp;
+ ret = knav_queue_device_control(netcp->tx_compl_q,
+ KNAV_QUEUE_SET_NOTIFIER,
+ (unsigned long)&notify_cfg);
+ if (ret)
+ goto fail;
+
+ knav_queue_disable_notify(netcp->tx_compl_q);
+
+ /* open Rx completion queue */
+ snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
+ netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
+ if (IS_ERR_OR_NULL(netcp->rx_queue)) {
+ ret = PTR_ERR(netcp->rx_queue);
+ goto fail;
+ }
+ netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
+
+ /* Set notification for Rx completion */
+ notify_cfg.fn = netcp_rx_notify;
+ notify_cfg.fn_arg = netcp;
+ ret = knav_queue_device_control(netcp->rx_queue,
+ KNAV_QUEUE_SET_NOTIFIER,
+ (unsigned long)&notify_cfg);
+ if (ret)
+ goto fail;
+
+ knav_queue_disable_notify(netcp->rx_queue);
+
+ /* open Rx FDQs */
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+ netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+ snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
+ netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
+ if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
+ ret = PTR_ERR(netcp->rx_fdq[i]);
+ goto fail;
+ }
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_DEV_TO_MEM;
+ config.u.rx.einfo_present = true;
+ config.u.rx.psinfo_present = true;
+ config.u.rx.err_mode = DMA_DROP;
+ config.u.rx.desc_type = DMA_DESC_HOST;
+ config.u.rx.psinfo_at_sop = false;
+ config.u.rx.sop_offset = NETCP_SOP_OFFSET;
+ config.u.rx.dst_q = netcp->rx_queue_id;
+ config.u.rx.thresh = DMA_THRESH_NONE;
+
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
+ if (netcp->rx_fdq[i])
+ last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
+ config.u.rx.fdq[i] = last_fdq;
+ }
+
+ netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
+ netcp->dma_chan_name, &config);
+ if (IS_ERR_OR_NULL(netcp->rx_channel)) {
+ dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
+ netcp->dma_chan_name);
+ goto fail;
+ }
+
+ dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
+ return 0;
+
+fail:
+ netcp_free_navigator_resources(netcp);
+ return ret;
+}
+
+/* Open the device */
+static int netcp_ndo_open(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int ret;
+
+ netif_carrier_off(ndev);
+ ret = netcp_setup_navigator_resources(ndev);
+ if (ret) {
+ dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
+ goto fail;
+ }
+
+ mutex_lock(&netcp_modules_lock);
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (module->open) {
+ ret = module->open(intf_modpriv->module_priv, ndev);
+ if (ret != 0) {
+ dev_err(netcp->ndev_dev, "module open failed\n");
+ goto fail_open;
+ }
+ }
+ }
+ mutex_unlock(&netcp_modules_lock);
+
+ netcp_rxpool_refill(netcp);
+ napi_enable(&netcp->rx_napi);
+ napi_enable(&netcp->tx_napi);
+ knav_queue_enable_notify(netcp->tx_compl_q);
+ knav_queue_enable_notify(netcp->rx_queue);
+ netif_tx_wake_all_queues(ndev);
+ dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
+ return 0;
+
+fail_open:
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (module->close)
+ module->close(intf_modpriv->module_priv, ndev);
+ }
+ mutex_unlock(&netcp_modules_lock);
+
+fail:
+ netcp_free_navigator_resources(netcp);
+ return ret;
+}
+
+/* Close the device */
+static int netcp_ndo_stop(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int err = 0;
+
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+ netcp_addr_clear_mark(netcp);
+ netcp_addr_sweep_del(netcp);
+ knav_queue_disable_notify(netcp->rx_queue);
+ knav_queue_disable_notify(netcp->tx_compl_q);
+ napi_disable(&netcp->rx_napi);
+ napi_disable(&netcp->tx_napi);
+
+ mutex_lock(&netcp_modules_lock);
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (module->close) {
+ err = module->close(intf_modpriv->module_priv, ndev);
+ if (err != 0)
+ dev_err(netcp->ndev_dev, "Close failed\n");
+ }
+ }
+ mutex_unlock(&netcp_modules_lock);
+
+ /* Recycle Rx descriptors from completion queue */
+ netcp_empty_rx_queue(netcp);
+
+ /* Recycle Tx descriptors from completion queue */
+ netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
+
+ if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
+ dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
+ netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
+
+ netcp_free_navigator_resources(netcp);
+ dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
+ return 0;
+}
+
+static int netcp_ndo_ioctl(struct net_device *ndev,
+ struct ifreq *req, int cmd)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int ret = -1, err = -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ mutex_lock(&netcp_modules_lock);
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (!module->ioctl)
+ continue;
+
+ err = module->ioctl(intf_modpriv->module_priv, req, cmd);
+ if ((err < 0) && (err != -EOPNOTSUPP)) {
+ ret = err;
+ goto out;
+ }
+ if (err == 0)
+ ret = err;
+ }
+
+out:
+ mutex_unlock(&netcp_modules_lock);
+ return (ret == 0) ? 0 : err;
+}
+
+static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+
+ /* MTU < 68 is an error for IPv4 traffic */
+ if ((new_mtu < 68) ||
+ (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
+ dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
+ return -EINVAL;
+ }
+
+ ndev->mtu = new_mtu;
+ return 0;
+}
+
+static void netcp_ndo_tx_timeout(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ unsigned int descs = knav_pool_count(netcp->tx_pool);
+
+ dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
+ netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
+ ndev->trans_start = jiffies;
+ netif_tx_wake_all_queues(ndev);
+}
+
+static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int err = 0;
+
+ dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
+
+ mutex_lock(&netcp_modules_lock);
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if ((module->add_vid) && (vid != 0)) {
+ err = module->add_vid(intf_modpriv->module_priv, vid);
+ if (err != 0) {
+ dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
+ vid);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&netcp_modules_lock);
+ return err;
+}
+
+static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int err = 0;
+
+ dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
+
+ mutex_lock(&netcp_modules_lock);
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (module->del_vid) {
+ err = module->del_vid(intf_modpriv->module_priv, vid);
+ if (err != 0) {
+ dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
+ vid);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&netcp_modules_lock);
+ return err;
+}
+
+static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv,
+ select_queue_fallback_t fallback)
+{
+ return 0;
+}
+
+static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
+{
+ int i;
+
+ /* setup tc must be called under rtnl lock */
+ ASSERT_RTNL();
+
+ /* Sanity-check the number of traffic classes requested */
+ if ((dev->real_num_tx_queues <= 1) ||
+ (dev->real_num_tx_queues < num_tc))
+ return -EINVAL;
+
+ /* Configure traffic class to queue mappings */
+ if (num_tc) {
+ netdev_set_num_tc(dev, num_tc);
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(dev, i, 1, i);
+ } else {
+ netdev_reset_tc(dev);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops netcp_netdev_ops = {
+ .ndo_open = netcp_ndo_open,
+ .ndo_stop = netcp_ndo_stop,
+ .ndo_start_xmit = netcp_ndo_start_xmit,
+ .ndo_set_rx_mode = netcp_set_rx_mode,
+ .ndo_do_ioctl = netcp_ndo_ioctl,
+ .ndo_change_mtu = netcp_ndo_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = netcp_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
+ .ndo_tx_timeout = netcp_ndo_tx_timeout,
+ .ndo_select_queue = netcp_select_queue,
+ .ndo_setup_tc = netcp_setup_tc,
+};
+
+static int netcp_create_interface(struct netcp_device *netcp_device,
+ struct device_node *node_interface)
+{
+ struct device *dev = netcp_device->device;
+ struct device_node *node = dev->of_node;
+ struct netcp_intf *netcp;
+ struct net_device *ndev;
+ resource_size_t size;
+ struct resource res;
+ void __iomem *efuse = NULL;
+ u32 efuse_mac = 0;
+ const void *mac_addr;
+ u8 efuse_mac_addr[6];
+ u32 temp[2];
+ int ret = 0;
+
+ ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
+ if (!ndev) {
+ dev_err(dev, "Error allocating netdev\n");
+ return -ENOMEM;
+ }
+
+ ndev->features |= NETIF_F_SG;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->hw_features = ndev->features;
+ ndev->vlan_features |= NETIF_F_SG;
+
+ netcp = netdev_priv(ndev);
+ spin_lock_init(&netcp->lock);
+ INIT_LIST_HEAD(&netcp->module_head);
+ INIT_LIST_HEAD(&netcp->txhook_list_head);
+ INIT_LIST_HEAD(&netcp->rxhook_list_head);
+ INIT_LIST_HEAD(&netcp->addr_list);
+ netcp->netcp_device = netcp_device;
+ netcp->dev = netcp_device->device;
+ netcp->ndev = ndev;
+ netcp->ndev_dev = &ndev->dev;
+ netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
+ netcp->tx_pause_threshold = MAX_SKB_FRAGS;
+ netcp->tx_resume_threshold = netcp->tx_pause_threshold;
+ netcp->node_interface = node_interface;
+
+ ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
+ if (efuse_mac) {
+ if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
+ dev_err(dev, "could not find efuse-mac reg resource\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+ size = resource_size(&res);
+
+ if (!devm_request_mem_region(dev, res.start, size,
+ dev_name(dev))) {
+ dev_err(dev, "could not reserve resource\n");
+ ret = -ENOMEM;
+ goto quit;
+ }
+
+ efuse = devm_ioremap_nocache(dev, res.start, size);
+ if (!efuse) {
+ dev_err(dev, "could not map resource\n");
+ devm_release_mem_region(dev, res.start, size);
+ ret = -ENOMEM;
+ goto quit;
+ }
+
+ emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+ if (is_valid_ether_addr(efuse_mac_addr))
+ ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
+ else
+ random_ether_addr(ndev->dev_addr);
+
+ devm_iounmap(dev, efuse);
+ devm_release_mem_region(dev, res.start, size);
+ } else {
+ mac_addr = of_get_mac_address(node_interface);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ else
+ random_ether_addr(ndev->dev_addr);
+ }
+
+ ret = of_property_read_string(node_interface, "rx-channel",
+ &netcp->dma_chan_name);
+ if (ret < 0) {
+ dev_err(dev, "missing \"rx-channel\" parameter\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+
+ ret = of_property_read_u32(node_interface, "rx-queue",
+ &netcp->rx_queue_id);
+ if (ret < 0) {
+ dev_warn(dev, "missing \"rx-queue\" parameter\n");
+ netcp->rx_queue_id = KNAV_QUEUE_QPEND;
+ }
+
+ ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
+ netcp->rx_queue_depths,
+ KNAV_DMA_FDQ_PER_CHAN);
+ if (ret < 0) {
+ dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
+ netcp->rx_queue_depths[0] = 128;
+ }
+
+ ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
+ netcp->rx_buffer_sizes,
+ KNAV_DMA_FDQ_PER_CHAN);
+ if (ret) {
+ dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
+ netcp->rx_buffer_sizes[0] = 1536;
+ }
+
+ ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
+ if (ret < 0) {
+ dev_err(dev, "missing \"rx-pool\" parameter\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+ netcp->rx_pool_size = temp[0];
+ netcp->rx_pool_region_id = temp[1];
+
+ ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
+ if (ret < 0) {
+ dev_err(dev, "missing \"tx-pool\" parameter\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+ netcp->tx_pool_size = temp[0];
+ netcp->tx_pool_region_id = temp[1];
+
+ if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
+ dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
+ MAX_SKB_FRAGS);
+ ret = -ENODEV;
+ goto quit;
+ }
+
+ ret = of_property_read_u32(node_interface, "tx-completion-queue",
+ &netcp->tx_compl_qid);
+ if (ret < 0) {
+ dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
+ netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
+ }
+
+ /* NAPI register */
+ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
+ netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
+
+ /* Register the network device */
+ ndev->dev_id = 0;
+ ndev->watchdog_timeo = NETCP_TX_TIMEOUT;
+ ndev->netdev_ops = &netcp_netdev_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
+ return 0;
+
+quit:
+ free_netdev(ndev);
+ return ret;
+}
+
+static void netcp_delete_interface(struct netcp_device *netcp_device,
+ struct net_device *ndev)
+{
+ struct netcp_intf_modpriv *intf_modpriv, *tmp;
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_module *module;
+
+ dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
+ ndev->name);
+
+ /* Notify each of the modules that the interface is going away */
+ list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
+ intf_list) {
+ module = intf_modpriv->netcp_module;
+ dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
+ module->name);
+ if (module->release)
+ module->release(intf_modpriv->module_priv);
+ list_del(&intf_modpriv->intf_list);
+ kfree(intf_modpriv);
+ }
+ WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
+ ndev->name);
+
+ list_del(&netcp->interface_list);
+
+ of_node_put(netcp->node_interface);
+ unregister_netdev(ndev);
+ netif_napi_del(&netcp->rx_napi);
+ free_netdev(ndev);
+}
+
+static int netcp_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct netcp_intf *netcp_intf, *netcp_tmp;
+ struct device_node *child, *interfaces;
+ struct netcp_device *netcp_device;
+ struct device *dev = &pdev->dev;
+ struct netcp_module *module;
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "could not find device info\n");
+ return -ENODEV;
+ }
+
+ /* Allocate a new NETCP device instance */
+ netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
+ if (!netcp_device)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable NETCP power-domain\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ /* Initialize the NETCP device instance */
+ INIT_LIST_HEAD(&netcp_device->interface_head);
+ INIT_LIST_HEAD(&netcp_device->modpriv_head);
+ netcp_device->device = dev;
+ platform_set_drvdata(pdev, netcp_device);
+
+ /* create interfaces */
+ interfaces = of_get_child_by_name(node, "netcp-interfaces");
+ if (!interfaces) {
+ dev_err(dev, "could not find netcp-interfaces node\n");
+ ret = -ENODEV;
+ goto probe_quit;
+ }
+
+ for_each_available_child_of_node(interfaces, child) {
+ ret = netcp_create_interface(netcp_device, child);
+ if (ret) {
+ dev_err(dev, "could not create interface(%s)\n",
+ child->name);
+ goto probe_quit_interface;
+ }
+ }
+
+ /* Add the device instance to the list */
+ list_add_tail(&netcp_device->device_list, &netcp_devices);
+
+ /* Probe & attach any modules already registered */
+ mutex_lock(&netcp_modules_lock);
+ for_each_netcp_module(module) {
+ ret = netcp_module_probe(netcp_device, module);
+ if (ret < 0)
+ dev_err(dev, "module(%s) probe failed\n", module->name);
+ }
+ mutex_unlock(&netcp_modules_lock);
+ return 0;
+
+probe_quit_interface:
+ list_for_each_entry_safe(netcp_intf, netcp_tmp,
+ &netcp_device->interface_head,
+ interface_list) {
+ netcp_delete_interface(netcp_device, netcp_intf->ndev);
+ }
+
+probe_quit:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int netcp_remove(struct platform_device *pdev)
+{
+ struct netcp_device *netcp_device = platform_get_drvdata(pdev);
+ struct netcp_inst_modpriv *inst_modpriv, *tmp;
+ struct netcp_module *module;
+
+ list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
+ inst_list) {
+ module = inst_modpriv->netcp_module;
+ dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
+ module->remove(netcp_device, inst_modpriv->module_priv);
+ list_del(&inst_modpriv->inst_list);
+ kfree(inst_modpriv);
+ }
+ WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
+ pdev->name);
+
+ devm_kfree(&pdev->dev, netcp_device);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct of_device_id of_match[] = {
+ { .compatible = "ti,netcp-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver netcp_driver = {
+ .driver = {
+ .name = "netcp-1.0",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match,
+ },
+ .probe = netcp_probe,
+ .remove = netcp_remove,
+};
+module_platform_driver(netcp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
new file mode 100644
index 000000000000..84f5ce525750
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -0,0 +1,2159 @@
+/*
+ * Keystone GBE and XGBE subsystem code
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Sandeep Paulraj <s-paulraj@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+
+#include "cpsw_ale.h"
+#include "netcp.h"
+
+#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
+#define NETCP_DRIVER_VERSION "v1.0"
+
+#define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
+#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
+#define GBE_MINOR_VERSION(reg) (reg & 0xff)
+#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
+
+/* 1G Ethernet SS defines */
+#define GBE_MODULE_NAME "netcp-gbe"
+#define GBE_SS_VERSION_14 0x4ed21104
+
+#define GBE13_SGMII_MODULE_OFFSET 0x100
+#define GBE13_SGMII34_MODULE_OFFSET 0x400
+#define GBE13_SWITCH_MODULE_OFFSET 0x800
+#define GBE13_HOST_PORT_OFFSET 0x834
+#define GBE13_SLAVE_PORT_OFFSET 0x860
+#define GBE13_EMAC_OFFSET 0x900
+#define GBE13_SLAVE_PORT2_OFFSET 0xa00
+#define GBE13_HW_STATS_OFFSET 0xb00
+#define GBE13_ALE_OFFSET 0xe00
+#define GBE13_HOST_PORT_NUM 0
+#define GBE13_NUM_SLAVES 4
+#define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1)
+#define GBE13_NUM_ALE_ENTRIES 1024
+
+/* 10G Ethernet SS defines */
+#define XGBE_MODULE_NAME "netcp-xgbe"
+#define XGBE_SS_VERSION_10 0x4ee42100
+
+#define XGBE_SERDES_REG_INDEX 1
+#define XGBE10_SGMII_MODULE_OFFSET 0x100
+#define XGBE10_SWITCH_MODULE_OFFSET 0x1000
+#define XGBE10_HOST_PORT_OFFSET 0x1034
+#define XGBE10_SLAVE_PORT_OFFSET 0x1064
+#define XGBE10_EMAC_OFFSET 0x1400
+#define XGBE10_ALE_OFFSET 0x1700
+#define XGBE10_HW_STATS_OFFSET 0x1800
+#define XGBE10_HOST_PORT_NUM 0
+#define XGBE10_NUM_SLAVES 2
+#define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1)
+#define XGBE10_NUM_ALE_ENTRIES 1024
+
+#define GBE_TIMER_INTERVAL (HZ / 2)
+
+/* Soft reset register values */
+#define SOFT_RESET_MASK BIT(0)
+#define SOFT_RESET BIT(0)
+#define DEVICE_EMACSL_RESET_POLL_COUNT 100
+#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
+
+#define MACSL_RX_ENABLE_CSF BIT(23)
+#define MACSL_ENABLE_EXT_CTL BIT(18)
+#define MACSL_XGMII_ENABLE BIT(13)
+#define MACSL_XGIG_MODE BIT(8)
+#define MACSL_GIG_MODE BIT(7)
+#define MACSL_GMII_ENABLE BIT(5)
+#define MACSL_FULLDUPLEX BIT(0)
+
+#define GBE_CTL_P0_ENABLE BIT(2)
+#define GBE_REG_VAL_STAT_ENABLE_ALL 0xff
+#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
+#define GBE_STATS_CD_SEL BIT(28)
+
+#define GBE_PORT_MASK(x) (BIT(x) - 1)
+#define GBE_MASK_NO_PORTS 0
+
+#define GBE_DEF_1G_MAC_CONTROL \
+ (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
+ MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
+
+#define GBE_DEF_10G_MAC_CONTROL \
+ (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
+ MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
+
+#define GBE_STATSA_MODULE 0
+#define GBE_STATSB_MODULE 1
+#define GBE_STATSC_MODULE 2
+#define GBE_STATSD_MODULE 3
+
+#define XGBE_STATS0_MODULE 0
+#define XGBE_STATS1_MODULE 1
+#define XGBE_STATS2_MODULE 2
+
+#define MAX_SLAVES GBE13_NUM_SLAVES
+/* s: 0-based slave_port */
+#define SGMII_BASE(s) \
+ (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
+
+#define GBE_TX_QUEUE 648
+#define GBE_TXHOOK_ORDER 0
+#define GBE_DEFAULT_ALE_AGEOUT 30
+#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
+#define NETCP_LINK_STATE_INVALID -1
+
+#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+ offsetof(struct gbe##_##rb, rn)
+#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+ offsetof(struct xgbe##_##rb, rn)
+#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
+
+struct xgbe_ss_regs {
+ u32 id_ver;
+ u32 synce_count;
+ u32 synce_mux;
+ u32 control;
+};
+
+struct xgbe_switch_regs {
+ u32 id_ver;
+ u32 control;
+ u32 emcontrol;
+ u32 stat_port_en;
+ u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+ u32 cppi_thresh;
+};
+
+struct xgbe_port_regs {
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 sa_lo;
+ u32 sa_hi;
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+ u32 ts_ctl_ltype2;
+ u32 ts_ctl2;
+ u32 control;
+};
+
+struct xgbe_host_port_regs {
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 src_id;
+ u32 rx_pri_map;
+ u32 rx_maxlen;
+};
+
+struct xgbe_emac_regs {
+ u32 id_ver;
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 rx_maxlen;
+ u32 __reserved_0;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 em_control;
+ u32 __reserved_1;
+ u32 tx_gap;
+ u32 rsvd[4];
+};
+
+struct xgbe_host_hw_stats {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 __rsvd_0[3];
+ u32 rx_oversized_frames;
+ u32 __rsvd_1;
+ u32 rx_undersized_frames;
+ u32 __rsvd_2;
+ u32 overrun_type4;
+ u32 overrun_type5;
+ u32 rx_bytes;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 __rsvd_3[9];
+ u32 tx_bytes;
+ u32 tx_64byte_frames;
+ u32 tx_65_to_127byte_frames;
+ u32 tx_128_to_255byte_frames;
+ u32 tx_256_to_511byte_frames;
+ u32 tx_512_to_1023byte_frames;
+ u32 tx_1024byte_frames;
+ u32 net_bytes;
+ u32 rx_sof_overruns;
+ u32 rx_mof_overruns;
+ u32 rx_dma_overruns;
+};
+
+struct xgbe_hw_stats {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors;
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames;
+ u32 rx_undersized_frames;
+ u32 rx_fragments;
+ u32 overrun_type4;
+ u32 overrun_type5;
+ u32 rx_bytes;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_deferred_frames;
+ u32 tx_collision_frames;
+ u32 tx_single_coll_frames;
+ u32 tx_mult_coll_frames;
+ u32 tx_excessive_collisions;
+ u32 tx_late_collisions;
+ u32 tx_underrun;
+ u32 tx_carrier_sense_errors;
+ u32 tx_bytes;
+ u32 tx_64byte_frames;
+ u32 tx_65_to_127byte_frames;
+ u32 tx_128_to_255byte_frames;
+ u32 tx_256_to_511byte_frames;
+ u32 tx_512_to_1023byte_frames;
+ u32 tx_1024byte_frames;
+ u32 net_bytes;
+ u32 rx_sof_overruns;
+ u32 rx_mof_overruns;
+ u32 rx_dma_overruns;
+};
+
+#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
+
+struct gbe_ss_regs {
+ u32 id_ver;
+ u32 synce_count;
+ u32 synce_mux;
+};
+
+struct gbe_ss_regs_ofs {
+ u16 id_ver;
+ u16 control;
+};
+
+struct gbe_switch_regs {
+ u32 id_ver;
+ u32 control;
+ u32 soft_reset;
+ u32 stat_port_en;
+ u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+};
+
+struct gbe_switch_regs_ofs {
+ u16 id_ver;
+ u16 control;
+ u16 soft_reset;
+ u16 emcontrol;
+ u16 stat_port_en;
+ u16 ptype;
+ u16 flow_control;
+};
+
+struct gbe_port_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 sa_lo;
+ u32 sa_hi;
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+ u32 ts_ctl_ltype2;
+ u32 ts_ctl2;
+};
+
+struct gbe_port_regs_ofs {
+ u16 port_vlan;
+ u16 tx_pri_map;
+ u16 sa_lo;
+ u16 sa_hi;
+ u16 ts_ctl;
+ u16 ts_seq_ltype;
+ u16 ts_vlan;
+ u16 ts_ctl_ltype2;
+ u16 ts_ctl2;
+};
+
+struct gbe_host_port_regs {
+ u32 src_id;
+ u32 port_vlan;
+ u32 rx_pri_map;
+ u32 rx_maxlen;
+};
+
+struct gbe_host_port_regs_ofs {
+ u16 port_vlan;
+ u16 tx_pri_map;
+ u16 rx_maxlen;
+};
+
+struct gbe_emac_regs {
+ u32 id_ver;
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 rx_maxlen;
+ u32 __reserved_0;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 __reserved_1;
+ u32 rx_pri_map;
+ u32 rsvd[6];
+};
+
+struct gbe_emac_regs_ofs {
+ u16 mac_control;
+ u16 soft_reset;
+ u16 rx_maxlen;
+};
+
+struct gbe_hw_stats {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors;
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames;
+ u32 rx_undersized_frames;
+ u32 rx_fragments;
+ u32 __pad_0[2];
+ u32 rx_bytes;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_deferred_frames;
+ u32 tx_collision_frames;
+ u32 tx_single_coll_frames;
+ u32 tx_mult_coll_frames;
+ u32 tx_excessive_collisions;
+ u32 tx_late_collisions;
+ u32 tx_underrun;
+ u32 tx_carrier_sense_errors;
+ u32 tx_bytes;
+ u32 tx_64byte_frames;
+ u32 tx_65_to_127byte_frames;
+ u32 tx_128_to_255byte_frames;
+ u32 tx_256_to_511byte_frames;
+ u32 tx_512_to_1023byte_frames;
+ u32 tx_1024byte_frames;
+ u32 net_bytes;
+ u32 rx_sof_overruns;
+ u32 rx_mof_overruns;
+ u32 rx_dma_overruns;
+};
+
+#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
+#define GBE13_NUM_HW_STATS_MOD 2
+#define XGBE10_NUM_HW_STATS_MOD 3
+#define GBE_MAX_HW_STAT_MODS 3
+#define GBE_HW_STATS_REG_MAP_SZ 0x100
+
+struct gbe_slave {
+ void __iomem *port_regs;
+ void __iomem *emac_regs;
+ struct gbe_port_regs_ofs port_regs_ofs;
+ struct gbe_emac_regs_ofs emac_regs_ofs;
+ int slave_num; /* 0 based logical number */
+ int port_num; /* actual port number */
+ atomic_t link_state;
+ bool open;
+ struct phy_device *phy;
+ u32 link_interface;
+ u32 mac_control;
+ u8 phy_port_t;
+ struct device_node *phy_node;
+ struct list_head slave_list;
+};
+
+struct gbe_priv {
+ struct device *dev;
+ struct netcp_device *netcp_device;
+ struct timer_list timer;
+ u32 num_slaves;
+ u32 ale_entries;
+ u32 ale_ports;
+ bool enable_ale;
+ struct netcp_tx_pipe tx_pipe;
+
+ int host_port;
+ u32 rx_packet_max;
+ u32 ss_version;
+
+ void __iomem *ss_regs;
+ void __iomem *switch_regs;
+ void __iomem *host_port_regs;
+ void __iomem *ale_reg;
+ void __iomem *sgmii_port_regs;
+ void __iomem *sgmii_port34_regs;
+ void __iomem *xgbe_serdes_regs;
+ void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
+
+ struct gbe_ss_regs_ofs ss_regs_ofs;
+ struct gbe_switch_regs_ofs switch_regs_ofs;
+ struct gbe_host_port_regs_ofs host_port_regs_ofs;
+
+ struct cpsw_ale *ale;
+ unsigned int tx_queue_id;
+ const char *dma_chan_name;
+
+ struct list_head gbe_intf_head;
+ struct list_head secondary_slaves;
+ struct net_device *dummy_ndev;
+
+ u64 *hw_stats;
+ const struct netcp_ethtool_stat *et_stats;
+ int num_et_stats;
+ /* Lock for updating the hwstats */
+ spinlock_t hw_stats_lock;
+};
+
+struct gbe_intf {
+ struct net_device *ndev;
+ struct device *dev;
+ struct gbe_priv *gbe_dev;
+ struct netcp_tx_pipe tx_pipe;
+ struct gbe_slave *slave;
+ struct list_head gbe_intf_list;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+};
+
+static struct netcp_module gbe_module;
+static struct netcp_module xgbe_module;
+
+/* Statistic management */
+struct netcp_ethtool_stat {
+ char desc[ETH_GSTRING_LEN];
+ int type;
+ u32 size;
+ int offset;
+};
+
+#define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\
+ FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field)
+
+#define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\
+ FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field)
+
+#define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\
+ FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field)
+
+#define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\
+ FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field)
+
+static const struct netcp_ethtool_stat gbe13_et_stats[] = {
+ /* GBE module A */
+ {GBE_STATSA_INFO(rx_good_frames)},
+ {GBE_STATSA_INFO(rx_broadcast_frames)},
+ {GBE_STATSA_INFO(rx_multicast_frames)},
+ {GBE_STATSA_INFO(rx_pause_frames)},
+ {GBE_STATSA_INFO(rx_crc_errors)},
+ {GBE_STATSA_INFO(rx_align_code_errors)},
+ {GBE_STATSA_INFO(rx_oversized_frames)},
+ {GBE_STATSA_INFO(rx_jabber_frames)},
+ {GBE_STATSA_INFO(rx_undersized_frames)},
+ {GBE_STATSA_INFO(rx_fragments)},
+ {GBE_STATSA_INFO(rx_bytes)},
+ {GBE_STATSA_INFO(tx_good_frames)},
+ {GBE_STATSA_INFO(tx_broadcast_frames)},
+ {GBE_STATSA_INFO(tx_multicast_frames)},
+ {GBE_STATSA_INFO(tx_pause_frames)},
+ {GBE_STATSA_INFO(tx_deferred_frames)},
+ {GBE_STATSA_INFO(tx_collision_frames)},
+ {GBE_STATSA_INFO(tx_single_coll_frames)},
+ {GBE_STATSA_INFO(tx_mult_coll_frames)},
+ {GBE_STATSA_INFO(tx_excessive_collisions)},
+ {GBE_STATSA_INFO(tx_late_collisions)},
+ {GBE_STATSA_INFO(tx_underrun)},
+ {GBE_STATSA_INFO(tx_carrier_sense_errors)},
+ {GBE_STATSA_INFO(tx_bytes)},
+ {GBE_STATSA_INFO(tx_64byte_frames)},
+ {GBE_STATSA_INFO(tx_65_to_127byte_frames)},
+ {GBE_STATSA_INFO(tx_128_to_255byte_frames)},
+ {GBE_STATSA_INFO(tx_256_to_511byte_frames)},
+ {GBE_STATSA_INFO(tx_512_to_1023byte_frames)},
+ {GBE_STATSA_INFO(tx_1024byte_frames)},
+ {GBE_STATSA_INFO(net_bytes)},
+ {GBE_STATSA_INFO(rx_sof_overruns)},
+ {GBE_STATSA_INFO(rx_mof_overruns)},
+ {GBE_STATSA_INFO(rx_dma_overruns)},
+ /* GBE module B */
+ {GBE_STATSB_INFO(rx_good_frames)},
+ {GBE_STATSB_INFO(rx_broadcast_frames)},
+ {GBE_STATSB_INFO(rx_multicast_frames)},
+ {GBE_STATSB_INFO(rx_pause_frames)},
+ {GBE_STATSB_INFO(rx_crc_errors)},
+ {GBE_STATSB_INFO(rx_align_code_errors)},
+ {GBE_STATSB_INFO(rx_oversized_frames)},
+ {GBE_STATSB_INFO(rx_jabber_frames)},
+ {GBE_STATSB_INFO(rx_undersized_frames)},
+ {GBE_STATSB_INFO(rx_fragments)},
+ {GBE_STATSB_INFO(rx_bytes)},
+ {GBE_STATSB_INFO(tx_good_frames)},
+ {GBE_STATSB_INFO(tx_broadcast_frames)},
+ {GBE_STATSB_INFO(tx_multicast_frames)},
+ {GBE_STATSB_INFO(tx_pause_frames)},
+ {GBE_STATSB_INFO(tx_deferred_frames)},
+ {GBE_STATSB_INFO(tx_collision_frames)},
+ {GBE_STATSB_INFO(tx_single_coll_frames)},
+ {GBE_STATSB_INFO(tx_mult_coll_frames)},
+ {GBE_STATSB_INFO(tx_excessive_collisions)},
+ {GBE_STATSB_INFO(tx_late_collisions)},
+ {GBE_STATSB_INFO(tx_underrun)},
+ {GBE_STATSB_INFO(tx_carrier_sense_errors)},
+ {GBE_STATSB_INFO(tx_bytes)},
+ {GBE_STATSB_INFO(tx_64byte_frames)},
+ {GBE_STATSB_INFO(tx_65_to_127byte_frames)},
+ {GBE_STATSB_INFO(tx_128_to_255byte_frames)},
+ {GBE_STATSB_INFO(tx_256_to_511byte_frames)},
+ {GBE_STATSB_INFO(tx_512_to_1023byte_frames)},
+ {GBE_STATSB_INFO(tx_1024byte_frames)},
+ {GBE_STATSB_INFO(net_bytes)},
+ {GBE_STATSB_INFO(rx_sof_overruns)},
+ {GBE_STATSB_INFO(rx_mof_overruns)},
+ {GBE_STATSB_INFO(rx_dma_overruns)},
+ /* GBE module C */
+ {GBE_STATSC_INFO(rx_good_frames)},
+ {GBE_STATSC_INFO(rx_broadcast_frames)},
+ {GBE_STATSC_INFO(rx_multicast_frames)},
+ {GBE_STATSC_INFO(rx_pause_frames)},
+ {GBE_STATSC_INFO(rx_crc_errors)},
+ {GBE_STATSC_INFO(rx_align_code_errors)},
+ {GBE_STATSC_INFO(rx_oversized_frames)},
+ {GBE_STATSC_INFO(rx_jabber_frames)},
+ {GBE_STATSC_INFO(rx_undersized_frames)},
+ {GBE_STATSC_INFO(rx_fragments)},
+ {GBE_STATSC_INFO(rx_bytes)},
+ {GBE_STATSC_INFO(tx_good_frames)},
+ {GBE_STATSC_INFO(tx_broadcast_frames)},
+ {GBE_STATSC_INFO(tx_multicast_frames)},
+ {GBE_STATSC_INFO(tx_pause_frames)},
+ {GBE_STATSC_INFO(tx_deferred_frames)},
+ {GBE_STATSC_INFO(tx_collision_frames)},
+ {GBE_STATSC_INFO(tx_single_coll_frames)},
+ {GBE_STATSC_INFO(tx_mult_coll_frames)},
+ {GBE_STATSC_INFO(tx_excessive_collisions)},
+ {GBE_STATSC_INFO(tx_late_collisions)},
+ {GBE_STATSC_INFO(tx_underrun)},
+ {GBE_STATSC_INFO(tx_carrier_sense_errors)},
+ {GBE_STATSC_INFO(tx_bytes)},
+ {GBE_STATSC_INFO(tx_64byte_frames)},
+ {GBE_STATSC_INFO(tx_65_to_127byte_frames)},
+ {GBE_STATSC_INFO(tx_128_to_255byte_frames)},
+ {GBE_STATSC_INFO(tx_256_to_511byte_frames)},
+ {GBE_STATSC_INFO(tx_512_to_1023byte_frames)},
+ {GBE_STATSC_INFO(tx_1024byte_frames)},
+ {GBE_STATSC_INFO(net_bytes)},
+ {GBE_STATSC_INFO(rx_sof_overruns)},
+ {GBE_STATSC_INFO(rx_mof_overruns)},
+ {GBE_STATSC_INFO(rx_dma_overruns)},
+ /* GBE module D */
+ {GBE_STATSD_INFO(rx_good_frames)},
+ {GBE_STATSD_INFO(rx_broadcast_frames)},
+ {GBE_STATSD_INFO(rx_multicast_frames)},
+ {GBE_STATSD_INFO(rx_pause_frames)},
+ {GBE_STATSD_INFO(rx_crc_errors)},
+ {GBE_STATSD_INFO(rx_align_code_errors)},
+ {GBE_STATSD_INFO(rx_oversized_frames)},
+ {GBE_STATSD_INFO(rx_jabber_frames)},
+ {GBE_STATSD_INFO(rx_undersized_frames)},
+ {GBE_STATSD_INFO(rx_fragments)},
+ {GBE_STATSD_INFO(rx_bytes)},
+ {GBE_STATSD_INFO(tx_good_frames)},
+ {GBE_STATSD_INFO(tx_broadcast_frames)},
+ {GBE_STATSD_INFO(tx_multicast_frames)},
+ {GBE_STATSD_INFO(tx_pause_frames)},
+ {GBE_STATSD_INFO(tx_deferred_frames)},
+ {GBE_STATSD_INFO(tx_collision_frames)},
+ {GBE_STATSD_INFO(tx_single_coll_frames)},
+ {GBE_STATSD_INFO(tx_mult_coll_frames)},
+ {GBE_STATSD_INFO(tx_excessive_collisions)},
+ {GBE_STATSD_INFO(tx_late_collisions)},
+ {GBE_STATSD_INFO(tx_underrun)},
+ {GBE_STATSD_INFO(tx_carrier_sense_errors)},
+ {GBE_STATSD_INFO(tx_bytes)},
+ {GBE_STATSD_INFO(tx_64byte_frames)},
+ {GBE_STATSD_INFO(tx_65_to_127byte_frames)},
+ {GBE_STATSD_INFO(tx_128_to_255byte_frames)},
+ {GBE_STATSD_INFO(tx_256_to_511byte_frames)},
+ {GBE_STATSD_INFO(tx_512_to_1023byte_frames)},
+ {GBE_STATSD_INFO(tx_1024byte_frames)},
+ {GBE_STATSD_INFO(net_bytes)},
+ {GBE_STATSD_INFO(rx_sof_overruns)},
+ {GBE_STATSD_INFO(rx_mof_overruns)},
+ {GBE_STATSD_INFO(rx_dma_overruns)},
+};
+
+#define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \
+ FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field)
+
+#define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \
+ FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field)
+
+#define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \
+ FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field)
+
+static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
+ /* GBE module 0 */
+ {XGBE_STATS0_INFO(rx_good_frames)},
+ {XGBE_STATS0_INFO(rx_broadcast_frames)},
+ {XGBE_STATS0_INFO(rx_multicast_frames)},
+ {XGBE_STATS0_INFO(rx_oversized_frames)},
+ {XGBE_STATS0_INFO(rx_undersized_frames)},
+ {XGBE_STATS0_INFO(overrun_type4)},
+ {XGBE_STATS0_INFO(overrun_type5)},
+ {XGBE_STATS0_INFO(rx_bytes)},
+ {XGBE_STATS0_INFO(tx_good_frames)},
+ {XGBE_STATS0_INFO(tx_broadcast_frames)},
+ {XGBE_STATS0_INFO(tx_multicast_frames)},
+ {XGBE_STATS0_INFO(tx_bytes)},
+ {XGBE_STATS0_INFO(tx_64byte_frames)},
+ {XGBE_STATS0_INFO(tx_65_to_127byte_frames)},
+ {XGBE_STATS0_INFO(tx_128_to_255byte_frames)},
+ {XGBE_STATS0_INFO(tx_256_to_511byte_frames)},
+ {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)},
+ {XGBE_STATS0_INFO(tx_1024byte_frames)},
+ {XGBE_STATS0_INFO(net_bytes)},
+ {XGBE_STATS0_INFO(rx_sof_overruns)},
+ {XGBE_STATS0_INFO(rx_mof_overruns)},
+ {XGBE_STATS0_INFO(rx_dma_overruns)},
+ /* XGBE module 1 */
+ {XGBE_STATS1_INFO(rx_good_frames)},
+ {XGBE_STATS1_INFO(rx_broadcast_frames)},
+ {XGBE_STATS1_INFO(rx_multicast_frames)},
+ {XGBE_STATS1_INFO(rx_pause_frames)},
+ {XGBE_STATS1_INFO(rx_crc_errors)},
+ {XGBE_STATS1_INFO(rx_align_code_errors)},
+ {XGBE_STATS1_INFO(rx_oversized_frames)},
+ {XGBE_STATS1_INFO(rx_jabber_frames)},
+ {XGBE_STATS1_INFO(rx_undersized_frames)},
+ {XGBE_STATS1_INFO(rx_fragments)},
+ {XGBE_STATS1_INFO(overrun_type4)},
+ {XGBE_STATS1_INFO(overrun_type5)},
+ {XGBE_STATS1_INFO(rx_bytes)},
+ {XGBE_STATS1_INFO(tx_good_frames)},
+ {XGBE_STATS1_INFO(tx_broadcast_frames)},
+ {XGBE_STATS1_INFO(tx_multicast_frames)},
+ {XGBE_STATS1_INFO(tx_pause_frames)},
+ {XGBE_STATS1_INFO(tx_deferred_frames)},
+ {XGBE_STATS1_INFO(tx_collision_frames)},
+ {XGBE_STATS1_INFO(tx_single_coll_frames)},
+ {XGBE_STATS1_INFO(tx_mult_coll_frames)},
+ {XGBE_STATS1_INFO(tx_excessive_collisions)},
+ {XGBE_STATS1_INFO(tx_late_collisions)},
+ {XGBE_STATS1_INFO(tx_underrun)},
+ {XGBE_STATS1_INFO(tx_carrier_sense_errors)},
+ {XGBE_STATS1_INFO(tx_bytes)},
+ {XGBE_STATS1_INFO(tx_64byte_frames)},
+ {XGBE_STATS1_INFO(tx_65_to_127byte_frames)},
+ {XGBE_STATS1_INFO(tx_128_to_255byte_frames)},
+ {XGBE_STATS1_INFO(tx_256_to_511byte_frames)},
+ {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)},
+ {XGBE_STATS1_INFO(tx_1024byte_frames)},
+ {XGBE_STATS1_INFO(net_bytes)},
+ {XGBE_STATS1_INFO(rx_sof_overruns)},
+ {XGBE_STATS1_INFO(rx_mof_overruns)},
+ {XGBE_STATS1_INFO(rx_dma_overruns)},
+ /* XGBE module 2 */
+ {XGBE_STATS2_INFO(rx_good_frames)},
+ {XGBE_STATS2_INFO(rx_broadcast_frames)},
+ {XGBE_STATS2_INFO(rx_multicast_frames)},
+ {XGBE_STATS2_INFO(rx_pause_frames)},
+ {XGBE_STATS2_INFO(rx_crc_errors)},
+ {XGBE_STATS2_INFO(rx_align_code_errors)},
+ {XGBE_STATS2_INFO(rx_oversized_frames)},
+ {XGBE_STATS2_INFO(rx_jabber_frames)},
+ {XGBE_STATS2_INFO(rx_undersized_frames)},
+ {XGBE_STATS2_INFO(rx_fragments)},
+ {XGBE_STATS2_INFO(overrun_type4)},
+ {XGBE_STATS2_INFO(overrun_type5)},
+ {XGBE_STATS2_INFO(rx_bytes)},
+ {XGBE_STATS2_INFO(tx_good_frames)},
+ {XGBE_STATS2_INFO(tx_broadcast_frames)},
+ {XGBE_STATS2_INFO(tx_multicast_frames)},
+ {XGBE_STATS2_INFO(tx_pause_frames)},
+ {XGBE_STATS2_INFO(tx_deferred_frames)},
+ {XGBE_STATS2_INFO(tx_collision_frames)},
+ {XGBE_STATS2_INFO(tx_single_coll_frames)},
+ {XGBE_STATS2_INFO(tx_mult_coll_frames)},
+ {XGBE_STATS2_INFO(tx_excessive_collisions)},
+ {XGBE_STATS2_INFO(tx_late_collisions)},
+ {XGBE_STATS2_INFO(tx_underrun)},
+ {XGBE_STATS2_INFO(tx_carrier_sense_errors)},
+ {XGBE_STATS2_INFO(tx_bytes)},
+ {XGBE_STATS2_INFO(tx_64byte_frames)},
+ {XGBE_STATS2_INFO(tx_65_to_127byte_frames)},
+ {XGBE_STATS2_INFO(tx_128_to_255byte_frames)},
+ {XGBE_STATS2_INFO(tx_256_to_511byte_frames)},
+ {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)},
+ {XGBE_STATS2_INFO(tx_1024byte_frames)},
+ {XGBE_STATS2_INFO(net_bytes)},
+ {XGBE_STATS2_INFO(rx_sof_overruns)},
+ {XGBE_STATS2_INFO(rx_mof_overruns)},
+ {XGBE_STATS2_INFO(rx_dma_overruns)},
+};
+
+#define for_each_intf(i, priv) \
+ list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
+
+#define for_each_sec_slave(slave, priv) \
+ list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
+
+#define first_sec_slave(priv) \
+ list_first_entry(&priv->secondary_slaves, \
+ struct gbe_slave, slave_list)
+
+static void keystone_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
+}
+
+static u32 keystone_get_msglevel(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+
+ return netcp->msg_enable;
+}
+
+static void keystone_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+
+ netcp->msg_enable = value;
+}
+
+static void keystone_get_stat_strings(struct net_device *ndev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+ struct gbe_priv *gbe_dev;
+ int i;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ return;
+ gbe_dev = gbe_intf->gbe_dev;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < gbe_dev->num_et_stats; i++) {
+ memcpy(data, gbe_dev->et_stats[i].desc,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_TEST:
+ break;
+ }
+}
+
+static int keystone_get_sset_count(struct net_device *ndev, int stringset)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+ struct gbe_priv *gbe_dev;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ return -EINVAL;
+ gbe_dev = gbe_intf->gbe_dev;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return 0;
+ case ETH_SS_STATS:
+ return gbe_dev->num_et_stats;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+ void __iomem *base = NULL;
+ u32 __iomem *p;
+ u32 tmp = 0;
+ int i;
+
+ for (i = 0; i < gbe_dev->num_et_stats; i++) {
+ base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
+ p = base + gbe_dev->et_stats[i].offset;
+ tmp = readl(p);
+ gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+ if (data)
+ data[i] = gbe_dev->hw_stats[i];
+ /* write-to-decrement:
+ * new register value = old register value - write value
+ */
+ writel(tmp, p);
+ }
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+ void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
+ void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
+ u64 *hw_stats = &gbe_dev->hw_stats[0];
+ void __iomem *base = NULL;
+ u32 __iomem *p;
+ u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
+ int i, j, pair;
+
+ for (pair = 0; pair < 2; pair++) {
+ val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+ if (pair == 0)
+ val &= ~GBE_STATS_CD_SEL;
+ else
+ val |= GBE_STATS_CD_SEL;
+
+ /* make the stat modules visible */
+ writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+ for (i = 0; i < pair_size; i++) {
+ j = pair * pair_size + i;
+ switch (gbe_dev->et_stats[j].type) {
+ case GBE_STATSA_MODULE:
+ case GBE_STATSC_MODULE:
+ base = gbe_statsa;
+ break;
+ case GBE_STATSB_MODULE:
+ case GBE_STATSD_MODULE:
+ base = gbe_statsb;
+ break;
+ }
+
+ p = base + gbe_dev->et_stats[j].offset;
+ tmp = readl(p);
+ hw_stats[j] += tmp;
+ if (data)
+ data[j] = hw_stats[j];
+ /* write-to-decrement:
+ * new register value = old register value - write value
+ */
+ writel(tmp, p);
+ }
+ }
+}
+
+static void keystone_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+ struct gbe_priv *gbe_dev;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ return;
+
+ gbe_dev = gbe_intf->gbe_dev;
+ spin_lock_bh(&gbe_dev->hw_stats_lock);
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ gbe_update_stats_ver14(gbe_dev, data);
+ else
+ gbe_update_stats(gbe_dev, data);
+ spin_unlock_bh(&gbe_dev->hw_stats_lock);
+}
+
+static int keystone_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct phy_device *phy = ndev->phydev;
+ struct gbe_intf *gbe_intf;
+ int ret;
+
+ if (!phy)
+ return -EINVAL;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ return -EINVAL;
+
+ if (!gbe_intf->slave)
+ return -EINVAL;
+
+ ret = phy_ethtool_gset(phy, cmd);
+ if (!ret)
+ cmd->port = gbe_intf->slave->phy_port_t;
+
+ return ret;
+}
+
+static int keystone_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct phy_device *phy = ndev->phydev;
+ struct gbe_intf *gbe_intf;
+ u32 features = cmd->advertising & cmd->supported;
+
+ if (!phy)
+ return -EINVAL;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ return -EINVAL;
+
+ if (!gbe_intf->slave)
+ return -EINVAL;
+
+ if (cmd->port != gbe_intf->slave->phy_port_t) {
+ if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
+ return -EINVAL;
+
+ if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
+ return -EINVAL;
+
+ if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
+ return -EINVAL;
+
+ if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
+ return -EINVAL;
+
+ if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
+ return -EINVAL;
+ }
+
+ gbe_intf->slave->phy_port_t = cmd->port;
+ return phy_ethtool_sset(phy, cmd);
+}
+
+static const struct ethtool_ops keystone_ethtool_ops = {
+ .get_drvinfo = keystone_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = keystone_get_msglevel,
+ .set_msglevel = keystone_set_msglevel,
+ .get_strings = keystone_get_stat_strings,
+ .get_sset_count = keystone_get_sset_count,
+ .get_ethtool_stats = keystone_get_ethtool_stats,
+ .get_settings = keystone_get_settings,
+ .set_settings = keystone_set_settings,
+};
+
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+
+static void gbe_set_slave_mac(struct gbe_slave *slave,
+ struct gbe_intf *gbe_intf)
+{
+ struct net_device *ndev = gbe_intf->ndev;
+
+ writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
+ writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
+}
+
+static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+
+ return slave_num;
+}
+
+static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
+ struct net_device *ndev,
+ struct gbe_slave *slave,
+ int up)
+{
+ struct phy_device *phy = slave->phy;
+ u32 mac_control = 0;
+
+ if (up) {
+ mac_control = slave->mac_control;
+ if (phy && (phy->speed == SPEED_1000)) {
+ mac_control |= MACSL_GIG_MODE;
+ mac_control &= ~MACSL_XGIG_MODE;
+ } else if (phy && (phy->speed == SPEED_10000)) {
+ mac_control |= MACSL_XGIG_MODE;
+ mac_control &= ~MACSL_GIG_MODE;
+ }
+
+ writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
+ mac_control));
+
+ cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+ ALE_PORT_STATE,
+ ALE_PORT_STATE_FORWARD);
+
+ if (ndev && slave->open)
+ netif_carrier_on(ndev);
+ } else {
+ writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
+ mac_control));
+ cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+ ALE_PORT_STATE,
+ ALE_PORT_STATE_DISABLE);
+ if (ndev)
+ netif_carrier_off(ndev);
+ }
+
+ if (phy)
+ phy_print_status(phy);
+}
+
+static bool gbe_phy_link_status(struct gbe_slave *slave)
+{
+ return !slave->phy || slave->phy->link;
+}
+
+static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
+ struct gbe_slave *slave,
+ struct net_device *ndev)
+{
+ int sp = slave->slave_num;
+ int phy_link_state, sgmii_link_state = 1, link_state;
+
+ if (!slave->open)
+ return;
+
+ if (!SLAVE_LINK_IS_XGMII(slave))
+ sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp),
+ sp);
+ phy_link_state = gbe_phy_link_status(slave);
+ link_state = phy_link_state & sgmii_link_state;
+
+ if (atomic_xchg(&slave->link_state, link_state) != link_state)
+ netcp_ethss_link_state_action(gbe_dev, ndev, slave,
+ link_state);
+}
+
+static void xgbe_adjust_link(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+ if (!gbe_intf)
+ return;
+
+ netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
+ ndev);
+}
+
+static void gbe_adjust_link(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ return;
+
+ netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
+ ndev);
+}
+
+static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
+{
+ struct gbe_priv *gbe_dev = netdev_priv(ndev);
+ struct gbe_slave *slave;
+
+ for_each_sec_slave(slave, gbe_dev)
+ netcp_ethss_update_link_state(gbe_dev, slave, NULL);
+}
+
+/* Reset EMAC
+ * Soft reset is set and polled until clear, or until a timeout occurs
+ */
+static int gbe_port_reset(struct gbe_slave *slave)
+{
+ u32 i, v;
+
+ /* Set the soft reset bit */
+ writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
+
+ /* Wait for the bit to clear */
+ for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
+ v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
+ if ((v & SOFT_RESET_MASK) != SOFT_RESET)
+ return 0;
+ }
+
+ /* Timeout on the reset */
+ return GMACSL_RET_WARN_RESET_INCOMPLETE;
+}
+
+/* Configure EMAC */
+static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
+ int max_rx_len)
+{
+ u32 xgmii_mode;
+
+ if (max_rx_len > NETCP_MAX_FRAME_SIZE)
+ max_rx_len = NETCP_MAX_FRAME_SIZE;
+
+ /* Enable correct MII mode at SS level */
+ if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
+ (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
+ xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
+ xgmii_mode |= (1 << slave->slave_num);
+ writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
+ }
+
+ writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen));
+ writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
+}
+
+static void gbe_slave_stop(struct gbe_intf *intf)
+{
+ struct gbe_priv *gbe_dev = intf->gbe_dev;
+ struct gbe_slave *slave = intf->slave;
+
+ gbe_port_reset(slave);
+ /* Disable forwarding */
+ cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+ cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
+ 1 << slave->port_num, 0, 0);
+
+ if (!slave->phy)
+ return;
+
+ phy_stop(slave->phy);
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+}
+
+static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
+{
+ void __iomem *sgmii_port_regs;
+
+ sgmii_port_regs = priv->sgmii_port_regs;
+ if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
+ sgmii_port_regs = priv->sgmii_port34_regs;
+
+ if (!SLAVE_LINK_IS_XGMII(slave)) {
+ netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
+ netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
+ slave->link_interface);
+ }
+}
+
+static int gbe_slave_open(struct gbe_intf *gbe_intf)
+{
+ struct gbe_priv *priv = gbe_intf->gbe_dev;
+ struct gbe_slave *slave = gbe_intf->slave;
+ phy_interface_t phy_mode;
+ bool has_phy = false;
+
+ void (*hndlr)(struct net_device *) = gbe_adjust_link;
+
+ gbe_sgmii_config(priv, slave);
+ gbe_port_reset(slave);
+ gbe_port_config(priv, slave, priv->rx_packet_max);
+ gbe_set_slave_mac(slave, gbe_intf);
+ /* enable forwarding */
+ cpsw_ale_control_set(priv->ale, slave->port_num,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
+ 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
+
+ if (slave->link_interface == SGMII_LINK_MAC_PHY) {
+ has_phy = true;
+ phy_mode = PHY_INTERFACE_MODE_SGMII;
+ slave->phy_port_t = PORT_MII;
+ } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
+ has_phy = true;
+ phy_mode = PHY_INTERFACE_MODE_NA;
+ slave->phy_port_t = PORT_FIBRE;
+ }
+
+ if (has_phy) {
+ if (priv->ss_version == XGBE_SS_VERSION_10)
+ hndlr = xgbe_adjust_link;
+
+ slave->phy = of_phy_connect(gbe_intf->ndev,
+ slave->phy_node,
+ hndlr, 0,
+ phy_mode);
+ if (!slave->phy) {
+ dev_err(priv->dev, "phy not found on slave %d\n",
+ slave->slave_num);
+ return -ENODEV;
+ }
+ dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
+ dev_name(&slave->phy->dev));
+ phy_start(slave->phy);
+ phy_read_status(slave->phy);
+ }
+ return 0;
+}
+
+static void gbe_init_host_port(struct gbe_priv *priv)
+{
+ int bypass_en = 1;
+ /* Max length register */
+ writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
+ rx_maxlen));
+
+ cpsw_ale_start(priv->ale);
+
+ if (priv->enable_ale)
+ bypass_en = 0;
+
+ cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
+
+ cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
+
+ cpsw_ale_control_set(priv->ale, priv->host_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ cpsw_ale_control_set(priv->ale, 0,
+ ALE_PORT_UNKNOWN_VLAN_MEMBER,
+ GBE_PORT_MASK(priv->ale_ports));
+
+ cpsw_ale_control_set(priv->ale, 0,
+ ALE_PORT_UNKNOWN_MCAST_FLOOD,
+ GBE_PORT_MASK(priv->ale_ports - 1));
+
+ cpsw_ale_control_set(priv->ale, 0,
+ ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
+ GBE_PORT_MASK(priv->ale_ports));
+
+ cpsw_ale_control_set(priv->ale, 0,
+ ALE_PORT_UNTAGGED_EGRESS,
+ GBE_PORT_MASK(priv->ale_ports));
+}
+
+static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ u16 vlan_id;
+
+ cpsw_ale_add_mcast(gbe_dev->ale, addr,
+ GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
+ ALE_MCAST_FWD_2);
+ for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+ cpsw_ale_add_mcast(gbe_dev->ale, addr,
+ GBE_PORT_MASK(gbe_dev->ale_ports),
+ ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
+ }
+}
+
+static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ u16 vlan_id;
+
+ cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
+
+ for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
+ cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
+ ALE_VLAN, vlan_id);
+}
+
+static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ u16 vlan_id;
+
+ cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
+
+ for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+ cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
+ }
+}
+
+static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ u16 vlan_id;
+
+ cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
+
+ for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+ cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
+ ALE_VLAN, vlan_id);
+ }
+}
+
+static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
+ naddr->addr, naddr->type);
+
+ switch (naddr->type) {
+ case ADDR_MCAST:
+ case ADDR_BCAST:
+ gbe_add_mcast_addr(gbe_intf, naddr->addr);
+ break;
+ case ADDR_UCAST:
+ case ADDR_DEV:
+ gbe_add_ucast_addr(gbe_intf, naddr->addr);
+ break;
+ case ADDR_ANY:
+ /* nothing to do for promiscuous */
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
+ naddr->addr, naddr->type);
+
+ switch (naddr->type) {
+ case ADDR_MCAST:
+ case ADDR_BCAST:
+ gbe_del_mcast_addr(gbe_intf, naddr->addr);
+ break;
+ case ADDR_UCAST:
+ case ADDR_DEV:
+ gbe_del_ucast_addr(gbe_intf, naddr->addr);
+ break;
+ case ADDR_ANY:
+ /* nothing to do for promiscuous */
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gbe_add_vid(void *intf_priv, int vid)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ set_bit(vid, gbe_intf->active_vlans);
+
+ cpsw_ale_add_vlan(gbe_dev->ale, vid,
+ GBE_PORT_MASK(gbe_dev->ale_ports),
+ GBE_MASK_NO_PORTS,
+ GBE_PORT_MASK(gbe_dev->ale_ports),
+ GBE_PORT_MASK(gbe_dev->ale_ports - 1));
+
+ return 0;
+}
+
+static int gbe_del_vid(void *intf_priv, int vid)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
+ clear_bit(vid, gbe_intf->active_vlans);
+ return 0;
+}
+
+static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct phy_device *phy = gbe_intf->slave->phy;
+ int ret = -EOPNOTSUPP;
+
+ if (phy)
+ ret = phy_mii_ioctl(phy, req, cmd);
+
+ return ret;
+}
+
+static void netcp_ethss_timer(unsigned long arg)
+{
+ struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
+ struct gbe_intf *gbe_intf;
+ struct gbe_slave *slave;
+
+ /* Check & update SGMII link state of interfaces */
+ for_each_intf(gbe_intf, gbe_dev) {
+ if (!gbe_intf->slave->open)
+ continue;
+ netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
+ gbe_intf->ndev);
+ }
+
+ /* Check & update SGMII link state of secondary ports */
+ for_each_sec_slave(slave, gbe_dev) {
+ netcp_ethss_update_link_state(gbe_dev, slave, NULL);
+ }
+
+ spin_lock_bh(&gbe_dev->hw_stats_lock);
+
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ gbe_update_stats_ver14(gbe_dev, NULL);
+ else
+ gbe_update_stats(gbe_dev, NULL);
+
+ spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
+ gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
+ add_timer(&gbe_dev->timer);
+}
+
+static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
+{
+ struct gbe_intf *gbe_intf = data;
+
+ p_info->tx_pipe = &gbe_intf->tx_pipe;
+ return 0;
+}
+
+static int gbe_open(void *intf_priv, struct net_device *ndev)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_slave *slave = gbe_intf->slave;
+ int port_num = slave->port_num;
+ u32 reg;
+ int ret;
+
+ reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
+ dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
+ GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
+ GBE_RTL_VERSION(reg), GBE_IDENT(reg));
+
+ if (gbe_dev->enable_ale)
+ gbe_intf->tx_pipe.dma_psflags = 0;
+ else
+ gbe_intf->tx_pipe.dma_psflags = port_num;
+
+ dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n",
+ gbe_intf->tx_pipe.dma_chan_name,
+ gbe_intf->tx_pipe.dma_channel,
+ gbe_intf->tx_pipe.dma_psflags);
+
+ gbe_slave_stop(gbe_intf);
+
+ /* disable priority elevation and enable statistics on all ports */
+ writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
+
+ /* Control register */
+ writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
+
+ /* All statistics enabled and STAT AB visible by default */
+ writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs,
+ stat_port_en));
+
+ ret = gbe_slave_open(gbe_intf);
+ if (ret)
+ goto fail;
+
+ netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
+ gbe_intf);
+
+ slave->open = true;
+ netcp_ethss_update_link_state(gbe_dev, slave, ndev);
+ return 0;
+
+fail:
+ gbe_slave_stop(gbe_intf);
+ return ret;
+}
+
+static int gbe_close(void *intf_priv, struct net_device *ndev)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct netcp_intf *netcp = netdev_priv(ndev);
+
+ gbe_slave_stop(gbe_intf);
+ netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
+ gbe_intf);
+
+ gbe_intf->slave->open = false;
+ atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
+ return 0;
+}
+
+static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
+ struct device_node *node)
+{
+ int port_reg_num;
+ u32 port_reg_ofs, emac_reg_ofs;
+
+ if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
+ dev_err(gbe_dev->dev, "missing slave-port parameter\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "link-interface",
+ &slave->link_interface)) {
+ dev_warn(gbe_dev->dev,
+ "missing link-interface value defaulting to 1G mac-phy link\n");
+ slave->link_interface = SGMII_LINK_MAC_PHY;
+ }
+
+ slave->open = false;
+ slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
+
+ if (slave->link_interface >= XGMII_LINK_MAC_PHY)
+ slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
+ else
+ slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
+
+ /* Emac regs memmap are contiguous but port regs are not */
+ port_reg_num = slave->slave_num;
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
+ if (slave->slave_num > 1) {
+ port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
+ port_reg_num -= 2;
+ } else {
+ port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
+ }
+ } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
+ port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
+ } else {
+ dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
+ gbe_dev->ss_version);
+ return -EINVAL;
+ }
+
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ emac_reg_ofs = GBE13_EMAC_OFFSET;
+ else if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
+ emac_reg_ofs = XGBE10_EMAC_OFFSET;
+
+ slave->port_regs = gbe_dev->ss_regs + port_reg_ofs +
+ (0x30 * port_reg_num);
+ slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs +
+ (0x40 * slave->slave_num);
+
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
+ /* Initialize slave port register offsets */
+ GBE_SET_REG_OFS(slave, port_regs, port_vlan);
+ GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
+ GBE_SET_REG_OFS(slave, port_regs, sa_lo);
+ GBE_SET_REG_OFS(slave, port_regs, sa_hi);
+ GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
+ GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+ GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
+ GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+ GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
+
+ /* Initialize EMAC register offsets */
+ GBE_SET_REG_OFS(slave, emac_regs, mac_control);
+ GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
+ GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+
+ } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
+ /* Initialize slave port register offsets */
+ XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
+ XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
+ XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
+ XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
+
+ /* Initialize EMAC register offsets */
+ XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
+ XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
+ XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+ }
+
+ atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
+ return 0;
+}
+
+static void init_secondary_ports(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct device *dev = gbe_dev->dev;
+ phy_interface_t phy_mode;
+ struct gbe_priv **priv;
+ struct device_node *port;
+ struct gbe_slave *slave;
+ bool mac_phy_link = false;
+
+ for_each_child_of_node(node, port) {
+ slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ dev_err(dev,
+ "memomry alloc failed for secondary port(%s), skipping...\n",
+ port->name);
+ continue;
+ }
+
+ if (init_slave(gbe_dev, slave, port)) {
+ dev_err(dev,
+ "Failed to initialize secondary port(%s), skipping...\n",
+ port->name);
+ devm_kfree(dev, slave);
+ continue;
+ }
+
+ gbe_sgmii_config(gbe_dev, slave);
+ gbe_port_reset(slave);
+ gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
+ list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
+ gbe_dev->num_slaves++;
+ if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+ (slave->link_interface == XGMII_LINK_MAC_PHY))
+ mac_phy_link = true;
+
+ slave->open = true;
+ }
+
+ /* of_phy_connect() is needed only for MAC-PHY interface */
+ if (!mac_phy_link)
+ return;
+
+ /* Allocate dummy netdev device for attaching to phy device */
+ gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
+ NET_NAME_UNKNOWN, ether_setup);
+ if (!gbe_dev->dummy_ndev) {
+ dev_err(dev,
+ "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
+ return;
+ }
+ priv = netdev_priv(gbe_dev->dummy_ndev);
+ *priv = gbe_dev;
+
+ if (slave->link_interface == SGMII_LINK_MAC_PHY) {
+ phy_mode = PHY_INTERFACE_MODE_SGMII;
+ slave->phy_port_t = PORT_MII;
+ } else {
+ phy_mode = PHY_INTERFACE_MODE_NA;
+ slave->phy_port_t = PORT_FIBRE;
+ }
+
+ for_each_sec_slave(slave, gbe_dev) {
+ if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != XGMII_LINK_MAC_PHY))
+ continue;
+ slave->phy =
+ of_phy_connect(gbe_dev->dummy_ndev,
+ slave->phy_node,
+ gbe_adjust_link_sec_slaves,
+ 0, phy_mode);
+ if (!slave->phy) {
+ dev_err(dev, "phy not found for slave %d\n",
+ slave->slave_num);
+ slave->phy = NULL;
+ } else {
+ dev_dbg(dev, "phy found: id is: 0x%s\n",
+ dev_name(&slave->phy->dev));
+ phy_start(slave->phy);
+ phy_read_status(slave->phy);
+ }
+ }
+}
+
+static void free_secondary_ports(struct gbe_priv *gbe_dev)
+{
+ struct gbe_slave *slave;
+
+ for (;;) {
+ slave = first_sec_slave(gbe_dev);
+ if (!slave)
+ break;
+ if (slave->phy)
+ phy_disconnect(slave->phy);
+ list_del(&slave->slave_list);
+ }
+ if (gbe_dev->dummy_ndev)
+ free_netdev(gbe_dev->dummy_ndev);
+}
+
+static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct resource res;
+ void __iomem *regs;
+ int ret, i;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n",
+ node->name);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev, "Failed to map xgbe register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->ss_regs = regs;
+
+ ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n",
+ node->name);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->xgbe_serdes_regs = regs;
+
+ gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
+ XGBE10_NUM_STAT_ENTRIES *
+ (XGBE10_NUM_SLAVES + 1) * sizeof(u64),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats) {
+ dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ gbe_dev->ss_version = XGBE_SS_VERSION_10;
+ gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
+ XGBE10_SGMII_MODULE_OFFSET;
+ gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET;
+ gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
+
+ for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++)
+ gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs +
+ XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
+
+ gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET;
+ gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS;
+ gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
+ gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
+ gbe_dev->et_stats = xgbe10_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
+ /* Subsystem registers */
+ XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+ XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
+
+ /* Switch module registers */
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
+
+ /* Host port registers */
+ XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+ XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
+ XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+ return 0;
+}
+
+static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n",
+ node->name);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->ss_regs = regs;
+ gbe_dev->ss_version = readl(gbe_dev->ss_regs);
+ return 0;
+}
+
+static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ void __iomem *regs;
+ int i;
+
+ gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
+ GBE13_NUM_HW_STAT_ENTRIES *
+ GBE13_NUM_SLAVES * sizeof(u64),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats) {
+ dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ regs = gbe_dev->ss_regs;
+ gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET;
+ gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET;
+ gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET;
+ gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET;
+
+ for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++)
+ gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET +
+ (GBE_HW_STATS_REG_MAP_SZ * i);
+
+ gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET;
+ gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS;
+ gbe_dev->host_port = GBE13_HOST_PORT_NUM;
+ gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
+ gbe_dev->et_stats = gbe13_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
+ /* Subsystem registers */
+ GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+
+ /* Switch module registers */
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
+
+ /* Host port registers */
+ GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+ GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+ return 0;
+}
+
+static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
+ struct device_node *node, void **inst_priv)
+{
+ struct device_node *interfaces, *interface;
+ struct device_node *secondary_ports;
+ struct cpsw_ale_params ale_params;
+ struct gbe_priv *gbe_dev;
+ u32 slave_num;
+ int ret = 0;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
+ if (!gbe_dev)
+ return -ENOMEM;
+
+ gbe_dev->dev = dev;
+ gbe_dev->netcp_device = netcp_device;
+ gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
+
+ /* init the hw stats lock */
+ spin_lock_init(&gbe_dev->hw_stats_lock);
+
+ if (of_find_property(node, "enable-ale", NULL)) {
+ gbe_dev->enable_ale = true;
+ dev_info(dev, "ALE enabled\n");
+ } else {
+ gbe_dev->enable_ale = false;
+ dev_dbg(dev, "ALE bypass enabled*\n");
+ }
+
+ ret = of_property_read_u32(node, "tx-queue",
+ &gbe_dev->tx_queue_id);
+ if (ret < 0) {
+ dev_err(dev, "missing tx_queue parameter\n");
+ gbe_dev->tx_queue_id = GBE_TX_QUEUE;
+ }
+
+ ret = of_property_read_string(node, "tx-channel",
+ &gbe_dev->dma_chan_name);
+ if (ret < 0) {
+ dev_err(dev, "missing \"tx-channel\" parameter\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+
+ if (!strcmp(node->name, "gbe")) {
+ ret = get_gbe_resource_version(gbe_dev, node);
+ if (ret)
+ goto quit;
+
+ ret = set_gbe_ethss14_priv(gbe_dev, node);
+ if (ret)
+ goto quit;
+ } else if (!strcmp(node->name, "xgbe")) {
+ ret = set_xgbe_ethss10_priv(gbe_dev, node);
+ if (ret)
+ goto quit;
+ ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
+ gbe_dev->ss_regs);
+ if (ret)
+ goto quit;
+ } else {
+ dev_err(dev, "unknown GBE node(%s)\n", node->name);
+ ret = -ENODEV;
+ goto quit;
+ }
+
+ interfaces = of_get_child_by_name(node, "interfaces");
+ if (!interfaces)
+ dev_err(dev, "could not find interfaces\n");
+
+ ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
+ gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
+ if (ret)
+ goto quit;
+
+ ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
+ if (ret)
+ goto quit;
+
+ /* Create network interfaces */
+ INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
+ for_each_child_of_node(interfaces, interface) {
+ ret = of_property_read_u32(interface, "slave-port", &slave_num);
+ if (ret) {
+ dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
+ interface->name);
+ continue;
+ }
+ gbe_dev->num_slaves++;
+ }
+
+ if (!gbe_dev->num_slaves)
+ dev_warn(dev, "No network interface configured\n");
+
+ /* Initialize Secondary slave ports */
+ secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
+ INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
+ if (secondary_ports)
+ init_secondary_ports(gbe_dev, secondary_ports);
+ of_node_put(secondary_ports);
+
+ if (!gbe_dev->num_slaves) {
+ dev_err(dev, "No network interface or secondary ports configured\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+
+ memset(&ale_params, 0, sizeof(ale_params));
+ ale_params.dev = gbe_dev->dev;
+ ale_params.ale_regs = gbe_dev->ale_reg;
+ ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
+ ale_params.ale_entries = gbe_dev->ale_entries;
+ ale_params.ale_ports = gbe_dev->ale_ports;
+
+ gbe_dev->ale = cpsw_ale_create(&ale_params);
+ if (!gbe_dev->ale) {
+ dev_err(gbe_dev->dev, "error initializing ale engine\n");
+ ret = -ENODEV;
+ goto quit;
+ } else {
+ dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
+ }
+
+ /* initialize host port */
+ gbe_init_host_port(gbe_dev);
+
+ init_timer(&gbe_dev->timer);
+ gbe_dev->timer.data = (unsigned long)gbe_dev;
+ gbe_dev->timer.function = netcp_ethss_timer;
+ gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
+ add_timer(&gbe_dev->timer);
+ *inst_priv = gbe_dev;
+ return 0;
+
+quit:
+ if (gbe_dev->hw_stats)
+ devm_kfree(dev, gbe_dev->hw_stats);
+ cpsw_ale_destroy(gbe_dev->ale);
+ if (gbe_dev->ss_regs)
+ devm_iounmap(dev, gbe_dev->ss_regs);
+ of_node_put(interfaces);
+ devm_kfree(dev, gbe_dev);
+ return ret;
+}
+
+static int gbe_attach(void *inst_priv, struct net_device *ndev,
+ struct device_node *node, void **intf_priv)
+{
+ struct gbe_priv *gbe_dev = inst_priv;
+ struct gbe_intf *gbe_intf;
+ int ret;
+
+ if (!node) {
+ dev_err(gbe_dev->dev, "interface node not available\n");
+ return -ENODEV;
+ }
+
+ gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
+ if (!gbe_intf)
+ return -ENOMEM;
+
+ gbe_intf->ndev = ndev;
+ gbe_intf->dev = gbe_dev->dev;
+ gbe_intf->gbe_dev = gbe_dev;
+
+ gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
+ sizeof(*gbe_intf->slave),
+ GFP_KERNEL);
+ if (!gbe_intf->slave) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (init_slave(gbe_dev, gbe_intf->slave, node)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ gbe_intf->tx_pipe = gbe_dev->tx_pipe;
+ ndev->ethtool_ops = &keystone_ethtool_ops;
+ list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
+ *intf_priv = gbe_intf;
+ return 0;
+
+fail:
+ if (gbe_intf->slave)
+ devm_kfree(gbe_dev->dev, gbe_intf->slave);
+ if (gbe_intf)
+ devm_kfree(gbe_dev->dev, gbe_intf);
+ return ret;
+}
+
+static int gbe_release(void *intf_priv)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+
+ gbe_intf->ndev->ethtool_ops = NULL;
+ list_del(&gbe_intf->gbe_intf_list);
+ devm_kfree(gbe_intf->dev, gbe_intf->slave);
+ devm_kfree(gbe_intf->dev, gbe_intf);
+ return 0;
+}
+
+static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
+{
+ struct gbe_priv *gbe_dev = inst_priv;
+
+ del_timer_sync(&gbe_dev->timer);
+ cpsw_ale_stop(gbe_dev->ale);
+ cpsw_ale_destroy(gbe_dev->ale);
+ netcp_txpipe_close(&gbe_dev->tx_pipe);
+ free_secondary_ports(gbe_dev);
+
+ if (!list_empty(&gbe_dev->gbe_intf_head))
+ dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
+
+ devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
+ devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
+ memset(gbe_dev, 0x00, sizeof(*gbe_dev));
+ devm_kfree(gbe_dev->dev, gbe_dev);
+ return 0;
+}
+
+static struct netcp_module gbe_module = {
+ .name = GBE_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .primary = true,
+ .probe = gbe_probe,
+ .open = gbe_open,
+ .close = gbe_close,
+ .remove = gbe_remove,
+ .attach = gbe_attach,
+ .release = gbe_release,
+ .add_addr = gbe_add_addr,
+ .del_addr = gbe_del_addr,
+ .add_vid = gbe_add_vid,
+ .del_vid = gbe_del_vid,
+ .ioctl = gbe_ioctl,
+};
+
+static struct netcp_module xgbe_module = {
+ .name = XGBE_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .primary = true,
+ .probe = gbe_probe,
+ .open = gbe_open,
+ .close = gbe_close,
+ .remove = gbe_remove,
+ .attach = gbe_attach,
+ .release = gbe_release,
+ .add_addr = gbe_add_addr,
+ .del_addr = gbe_del_addr,
+ .add_vid = gbe_add_vid,
+ .del_vid = gbe_del_vid,
+ .ioctl = gbe_ioctl,
+};
+
+static int __init keystone_gbe_init(void)
+{
+ int ret;
+
+ ret = netcp_register_module(&gbe_module);
+ if (ret)
+ return ret;
+
+ ret = netcp_register_module(&xgbe_module);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+module_init(keystone_gbe_init);
+
+static void __exit keystone_gbe_exit(void)
+{
+ netcp_unregister_module(&gbe_module);
+ netcp_unregister_module(&xgbe_module);
+}
+module_exit(keystone_gbe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
new file mode 100644
index 000000000000..dbeb14266e2f
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -0,0 +1,131 @@
+/*
+ * SGMI module initialisation
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Sandeep Paulraj <s-paulraj@ti.com>
+ * Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "netcp.h"
+
+#define SGMII_REG_STATUS_LOCK BIT(4)
+#define SGMII_REG_STATUS_LINK BIT(0)
+#define SGMII_REG_STATUS_AUTONEG BIT(2)
+#define SGMII_REG_CONTROL_AUTONEG BIT(0)
+
+#define SGMII23_OFFSET(x) ((x - 2) * 0x100)
+#define SGMII_OFFSET(x) ((x <= 1) ? (x * 0x100) : (SGMII23_OFFSET(x)))
+
+/* SGMII registers */
+#define SGMII_SRESET_REG(x) (SGMII_OFFSET(x) + 0x004)
+#define SGMII_CTL_REG(x) (SGMII_OFFSET(x) + 0x010)
+#define SGMII_STATUS_REG(x) (SGMII_OFFSET(x) + 0x014)
+#define SGMII_MRADV_REG(x) (SGMII_OFFSET(x) + 0x018)
+
+static void sgmii_write_reg(void __iomem *base, int reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static u32 sgmii_read_reg(void __iomem *base, int reg)
+{
+ return readl(base + reg);
+}
+
+static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
+{
+ writel((readl(base + reg) | val), base + reg);
+}
+
+/* port is 0 based */
+int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
+{
+ /* Soft reset */
+ sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1);
+ while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0)
+ ;
+ return 0;
+}
+
+int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
+{
+ u32 status = 0, link = 0;
+
+ status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+ if ((status & SGMII_REG_STATUS_LINK) != 0)
+ link = 1;
+ return link;
+}
+
+int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface)
+{
+ unsigned int i, status, mask;
+ u32 mr_adv_ability;
+ u32 control;
+
+ switch (interface) {
+ case SGMII_LINK_MAC_MAC_AUTONEG:
+ mr_adv_ability = 0x9801;
+ control = 0x21;
+ break;
+
+ case SGMII_LINK_MAC_PHY:
+ case SGMII_LINK_MAC_PHY_NO_MDIO:
+ mr_adv_ability = 1;
+ control = 1;
+ break;
+
+ case SGMII_LINK_MAC_MAC_FORCED:
+ mr_adv_ability = 0x9801;
+ control = 0x20;
+ break;
+
+ case SGMII_LINK_MAC_FIBER:
+ mr_adv_ability = 0x20;
+ control = 0x1;
+ break;
+
+ default:
+ WARN_ONCE(1, "Invalid sgmii interface: %d\n", interface);
+ return -EINVAL;
+ }
+
+ sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), 0);
+
+ /* Wait for the SerDes pll to lock */
+ for (i = 0; i < 1000; i++) {
+ usleep_range(1000, 2000);
+ status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+ if ((status & SGMII_REG_STATUS_LOCK) != 0)
+ break;
+ }
+
+ if ((status & SGMII_REG_STATUS_LOCK) == 0)
+ pr_err("serdes PLL not locked\n");
+
+ sgmii_write_reg(sgmii_ofs, SGMII_MRADV_REG(port), mr_adv_ability);
+ sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), control);
+
+ mask = SGMII_REG_STATUS_LINK;
+ if (control & SGMII_REG_CONTROL_AUTONEG)
+ mask |= SGMII_REG_STATUS_AUTONEG;
+
+ for (i = 0; i < 1000; i++) {
+ usleep_range(200, 500);
+ status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+ if ((status & mask) == mask)
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
new file mode 100644
index 000000000000..33571acc52b6
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
@@ -0,0 +1,501 @@
+/*
+ * XGE PCSR module initialisation
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * WingMan Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "netcp.h"
+
+/* XGBE registers */
+#define XGBE_CTRL_OFFSET 0x0c
+#define XGBE_SGMII_1_OFFSET 0x0114
+#define XGBE_SGMII_2_OFFSET 0x0214
+
+/* PCS-R registers */
+#define PCSR_CPU_CTRL_OFFSET 0x1fd0
+#define POR_EN BIT(29)
+
+#define reg_rmw(addr, value, mask) \
+ writel(((readl(addr) & (~(mask))) | \
+ (value & (mask))), (addr))
+
+/* bit mask of width w at offset s */
+#define MASK_WID_SH(w, s) (((1 << w) - 1) << s)
+
+/* shift value v to offset s */
+#define VAL_SH(v, s) (v << s)
+
+#define PHY_A(serdes) 0
+
+struct serdes_cfg {
+ u32 ofs;
+ u32 val;
+ u32 mask;
+};
+
+static struct serdes_cfg cfg_phyb_1p25g_156p25mhz_cmu0[] = {
+ {0x0000, 0x00800002, 0x00ff00ff},
+ {0x0014, 0x00003838, 0x0000ffff},
+ {0x0060, 0x1c44e438, 0xffffffff},
+ {0x0064, 0x00c18400, 0x00ffffff},
+ {0x0068, 0x17078200, 0xffffff00},
+ {0x006c, 0x00000014, 0x000000ff},
+ {0x0078, 0x0000c000, 0x0000ff00},
+ {0x0000, 0x00000003, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_156p25mhz_cmu1[] = {
+ {0x0c00, 0x00030002, 0x00ff00ff},
+ {0x0c14, 0x00005252, 0x0000ffff},
+ {0x0c28, 0x80000000, 0xff000000},
+ {0x0c2c, 0x000000f6, 0x000000ff},
+ {0x0c3c, 0x04000405, 0xff00ffff},
+ {0x0c40, 0xc0800000, 0xffff0000},
+ {0x0c44, 0x5a202062, 0xffffffff},
+ {0x0c48, 0x40040424, 0xffffffff},
+ {0x0c4c, 0x00004002, 0x0000ffff},
+ {0x0c50, 0x19001c00, 0xff00ff00},
+ {0x0c54, 0x00002100, 0x0000ff00},
+ {0x0c58, 0x00000060, 0x000000ff},
+ {0x0c60, 0x80131e7c, 0xffffffff},
+ {0x0c64, 0x8400cb02, 0xff00ffff},
+ {0x0c68, 0x17078200, 0xffffff00},
+ {0x0c6c, 0x00000016, 0x000000ff},
+ {0x0c74, 0x00000400, 0x0000ff00},
+ {0x0c78, 0x0000c000, 0x0000ff00},
+ {0x0c00, 0x00000003, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_16bit_lane[] = {
+ {0x0204, 0x00000080, 0x000000ff},
+ {0x0208, 0x0000920d, 0x0000ffff},
+ {0x0204, 0xfc000000, 0xff000000},
+ {0x0208, 0x00009104, 0x0000ffff},
+ {0x0210, 0x1a000000, 0xff000000},
+ {0x0214, 0x00006b58, 0x00ffffff},
+ {0x0218, 0x75800084, 0xffff00ff},
+ {0x022c, 0x00300000, 0x00ff0000},
+ {0x0230, 0x00003800, 0x0000ff00},
+ {0x024c, 0x008f0000, 0x00ff0000},
+ {0x0250, 0x30000000, 0xff000000},
+ {0x0260, 0x00000002, 0x000000ff},
+ {0x0264, 0x00000057, 0x000000ff},
+ {0x0268, 0x00575700, 0x00ffff00},
+ {0x0278, 0xff000000, 0xff000000},
+ {0x0280, 0x00500050, 0x00ff00ff},
+ {0x0284, 0x00001f15, 0x0000ffff},
+ {0x028c, 0x00006f00, 0x0000ff00},
+ {0x0294, 0x00000000, 0xffffff00},
+ {0x0298, 0x00002640, 0xff00ffff},
+ {0x029c, 0x00000003, 0x000000ff},
+ {0x02a4, 0x00000f13, 0x0000ffff},
+ {0x02a8, 0x0001b600, 0x00ffff00},
+ {0x0380, 0x00000030, 0x000000ff},
+ {0x03c0, 0x00000200, 0x0000ff00},
+ {0x03cc, 0x00000018, 0x000000ff},
+ {0x03cc, 0x00000000, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_comlane[] = {
+ {0x0a00, 0x00000800, 0x0000ff00},
+ {0x0a84, 0x00000000, 0x000000ff},
+ {0x0a8c, 0x00130000, 0x00ff0000},
+ {0x0a90, 0x77a00000, 0xffff0000},
+ {0x0a94, 0x00007777, 0x0000ffff},
+ {0x0b08, 0x000f0000, 0xffff0000},
+ {0x0b0c, 0x000f0000, 0x00ffffff},
+ {0x0b10, 0xbe000000, 0xff000000},
+ {0x0b14, 0x000000ff, 0x000000ff},
+ {0x0b18, 0x00000014, 0x000000ff},
+ {0x0b5c, 0x981b0000, 0xffff0000},
+ {0x0b64, 0x00001100, 0x0000ff00},
+ {0x0b78, 0x00000c00, 0x0000ff00},
+ {0x0abc, 0xff000000, 0xff000000},
+ {0x0ac0, 0x0000008b, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_cm_c1_c2[] = {
+ {0x0208, 0x00000000, 0x00000f00},
+ {0x0208, 0x00000000, 0x0000001f},
+ {0x0204, 0x00000000, 0x00040000},
+ {0x0208, 0x000000a0, 0x000000e0},
+};
+
+static void netcp_xgbe_serdes_cmu_init(void __iomem *serdes_regs)
+{
+ int i;
+
+ /* cmu0 setup */
+ for (i = 0; i < ARRAY_SIZE(cfg_phyb_1p25g_156p25mhz_cmu0); i++) {
+ reg_rmw(serdes_regs + cfg_phyb_1p25g_156p25mhz_cmu0[i].ofs,
+ cfg_phyb_1p25g_156p25mhz_cmu0[i].val,
+ cfg_phyb_1p25g_156p25mhz_cmu0[i].mask);
+ }
+
+ /* cmu1 setup */
+ for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_156p25mhz_cmu1); i++) {
+ reg_rmw(serdes_regs + cfg_phyb_10p3125g_156p25mhz_cmu1[i].ofs,
+ cfg_phyb_10p3125g_156p25mhz_cmu1[i].val,
+ cfg_phyb_10p3125g_156p25mhz_cmu1[i].mask);
+ }
+}
+
+/* lane is 0 based */
+static void netcp_xgbe_serdes_lane_config(
+ void __iomem *serdes_regs, int lane)
+{
+ int i;
+
+ /* lane setup */
+ for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_16bit_lane); i++) {
+ reg_rmw(serdes_regs +
+ cfg_phyb_10p3125g_16bit_lane[i].ofs +
+ (0x200 * lane),
+ cfg_phyb_10p3125g_16bit_lane[i].val,
+ cfg_phyb_10p3125g_16bit_lane[i].mask);
+ }
+
+ /* disable auto negotiation*/
+ reg_rmw(serdes_regs + (0x200 * lane) + 0x0380,
+ 0x00000000, 0x00000010);
+
+ /* disable link training */
+ reg_rmw(serdes_regs + (0x200 * lane) + 0x03c0,
+ 0x00000000, 0x00000200);
+}
+
+static void netcp_xgbe_serdes_com_enable(void __iomem *serdes_regs)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_comlane); i++) {
+ reg_rmw(serdes_regs + cfg_phyb_10p3125g_comlane[i].ofs,
+ cfg_phyb_10p3125g_comlane[i].val,
+ cfg_phyb_10p3125g_comlane[i].mask);
+ }
+}
+
+static void netcp_xgbe_serdes_lane_enable(
+ void __iomem *serdes_regs, int lane)
+{
+ /* Set Lane Control Rate */
+ writel(0xe0e9e038, serdes_regs + 0x1fe0 + (4 * lane));
+}
+
+static void netcp_xgbe_serdes_phyb_rst_clr(void __iomem *serdes_regs)
+{
+ reg_rmw(serdes_regs + 0x0a00, 0x0000001f, 0x000000ff);
+}
+
+static void netcp_xgbe_serdes_pll_disable(void __iomem *serdes_regs)
+{
+ writel(0x88000000, serdes_regs + 0x1ff4);
+}
+
+static void netcp_xgbe_serdes_pll_enable(void __iomem *serdes_regs)
+{
+ netcp_xgbe_serdes_phyb_rst_clr(serdes_regs);
+ writel(0xee000000, serdes_regs + 0x1ff4);
+}
+
+static int netcp_xgbe_wait_pll_locked(void __iomem *sw_regs)
+{
+ unsigned long timeout;
+ int ret = 0;
+ u32 val_1, val_0;
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ val_0 = (readl(sw_regs + XGBE_SGMII_1_OFFSET) & BIT(4));
+ val_1 = (readl(sw_regs + XGBE_SGMII_2_OFFSET) & BIT(4));
+
+ if (val_1 && val_0)
+ return 0;
+
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ cpu_relax();
+ } while (true);
+
+ pr_err("XGBE serdes not locked: time out.\n");
+ return ret;
+}
+
+static void netcp_xgbe_serdes_enable_xgmii_port(void __iomem *sw_regs)
+{
+ writel(0x03, sw_regs + XGBE_CTRL_OFFSET);
+}
+
+static u32 netcp_xgbe_serdes_read_tbus_val(void __iomem *serdes_regs)
+{
+ u32 tmp;
+
+ if (PHY_A(serdes_regs)) {
+ tmp = (readl(serdes_regs + 0x0ec) >> 24) & 0x0ff;
+ tmp |= ((readl(serdes_regs + 0x0fc) >> 16) & 0x00f00);
+ } else {
+ tmp = (readl(serdes_regs + 0x0f8) >> 16) & 0x0fff;
+ }
+
+ return tmp;
+}
+
+static void netcp_xgbe_serdes_write_tbus_addr(void __iomem *serdes_regs,
+ int select, int ofs)
+{
+ if (PHY_A(serdes_regs)) {
+ reg_rmw(serdes_regs + 0x0008, ((select << 5) + ofs) << 24,
+ ~0x00ffffff);
+ return;
+ }
+
+ /* For 2 lane Phy-B, lane0 is actually lane1 */
+ switch (select) {
+ case 1:
+ select = 2;
+ break;
+ case 2:
+ select = 3;
+ break;
+ default:
+ return;
+ }
+
+ reg_rmw(serdes_regs + 0x00fc, ((select << 8) + ofs) << 16, ~0xf800ffff);
+}
+
+static u32 netcp_xgbe_serdes_read_select_tbus(void __iomem *serdes_regs,
+ int select, int ofs)
+{
+ /* Set tbus address */
+ netcp_xgbe_serdes_write_tbus_addr(serdes_regs, select, ofs);
+ /* Get TBUS Value */
+ return netcp_xgbe_serdes_read_tbus_val(serdes_regs);
+}
+
+static void netcp_xgbe_serdes_reset_cdr(void __iomem *serdes_regs,
+ void __iomem *sig_detect_reg, int lane)
+{
+ u32 tmp, dlpf, tbus;
+
+ /*Get the DLPF values */
+ tmp = netcp_xgbe_serdes_read_select_tbus(
+ serdes_regs, lane + 1, 5);
+
+ dlpf = tmp >> 2;
+
+ if (dlpf < 400 || dlpf > 700) {
+ reg_rmw(sig_detect_reg, VAL_SH(2, 1), MASK_WID_SH(2, 1));
+ mdelay(1);
+ reg_rmw(sig_detect_reg, VAL_SH(0, 1), MASK_WID_SH(2, 1));
+ } else {
+ tbus = netcp_xgbe_serdes_read_select_tbus(serdes_regs, lane +
+ 1, 0xe);
+
+ pr_debug("XGBE: CDR centered, DLPF: %4d,%d,%d.\n",
+ tmp >> 2, tmp & 3, (tbus >> 2) & 3);
+ }
+}
+
+/* Call every 100 ms */
+static int netcp_xgbe_check_link_status(void __iomem *serdes_regs,
+ void __iomem *sw_regs, u32 lanes,
+ u32 *current_state, u32 *lane_down)
+{
+ void __iomem *pcsr_base = sw_regs + 0x0600;
+ void __iomem *sig_detect_reg;
+ u32 pcsr_rx_stat, blk_lock, blk_errs;
+ int loss, i, status = 1;
+
+ for (i = 0; i < lanes; i++) {
+ /* Get the Loss bit */
+ loss = readl(serdes_regs + 0x1fc0 + 0x20 + (i * 0x04)) & 0x1;
+
+ /* Get Block Errors and Block Lock bits */
+ pcsr_rx_stat = readl(pcsr_base + 0x0c + (i * 0x80));
+ blk_lock = (pcsr_rx_stat >> 30) & 0x1;
+ blk_errs = (pcsr_rx_stat >> 16) & 0x0ff;
+
+ /* Get Signal Detect Overlay Address */
+ sig_detect_reg = serdes_regs + (i * 0x200) + 0x200 + 0x04;
+
+ /* If Block errors maxed out, attempt recovery! */
+ if (blk_errs == 0x0ff)
+ blk_lock = 0;
+
+ switch (current_state[i]) {
+ case 0:
+ /* if good link lock the signal detect ON! */
+ if (!loss && blk_lock) {
+ pr_debug("XGBE PCSR Linked Lane: %d\n", i);
+ reg_rmw(sig_detect_reg, VAL_SH(3, 1),
+ MASK_WID_SH(2, 1));
+ current_state[i] = 1;
+ } else if (!blk_lock) {
+ /* if no lock, then reset CDR */
+ pr_debug("XGBE PCSR Recover Lane: %d\n", i);
+ netcp_xgbe_serdes_reset_cdr(serdes_regs,
+ sig_detect_reg, i);
+ }
+ break;
+
+ case 1:
+ if (!blk_lock) {
+ /* Link Lost? */
+ lane_down[i] = 1;
+ current_state[i] = 2;
+ }
+ break;
+
+ case 2:
+ if (blk_lock)
+ /* Nope just noise */
+ current_state[i] = 1;
+ else {
+ /* Lost the block lock, reset CDR if it is
+ * not centered and go back to sync state
+ */
+ netcp_xgbe_serdes_reset_cdr(serdes_regs,
+ sig_detect_reg, i);
+ current_state[i] = 0;
+ }
+ break;
+
+ default:
+ pr_err("XGBE: unknown current_state[%d] %d\n",
+ i, current_state[i]);
+ break;
+ }
+
+ if (blk_errs > 0) {
+ /* Reset the Error counts! */
+ reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x19, 0),
+ MASK_WID_SH(8, 0));
+
+ reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x00, 0),
+ MASK_WID_SH(8, 0));
+ }
+
+ status &= (current_state[i] == 1);
+ }
+
+ return status;
+}
+
+static int netcp_xgbe_serdes_check_lane(void __iomem *serdes_regs,
+ void __iomem *sw_regs)
+{
+ u32 current_state[2] = {0, 0};
+ int retries = 0, link_up;
+ u32 lane_down[2];
+
+ do {
+ lane_down[0] = 0;
+ lane_down[1] = 0;
+
+ link_up = netcp_xgbe_check_link_status(serdes_regs, sw_regs, 2,
+ current_state,
+ lane_down);
+
+ /* if we did not get link up then wait 100ms before calling
+ * it again
+ */
+ if (link_up)
+ break;
+
+ if (lane_down[0])
+ pr_debug("XGBE: detected link down on lane 0\n");
+
+ if (lane_down[1])
+ pr_debug("XGBE: detected link down on lane 1\n");
+
+ if (++retries > 1) {
+ pr_debug("XGBE: timeout waiting for serdes link up\n");
+ return -ETIMEDOUT;
+ }
+ mdelay(100);
+ } while (!link_up);
+
+ pr_debug("XGBE: PCSR link is up\n");
+ return 0;
+}
+
+static void netcp_xgbe_serdes_setup_cm_c1_c2(void __iomem *serdes_regs,
+ int lane, int cm, int c1, int c2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_cm_c1_c2); i++) {
+ reg_rmw(serdes_regs + cfg_cm_c1_c2[i].ofs + (0x200 * lane),
+ cfg_cm_c1_c2[i].val,
+ cfg_cm_c1_c2[i].mask);
+ }
+}
+
+static void netcp_xgbe_reset_serdes(void __iomem *serdes_regs)
+{
+ /* Toggle the POR_EN bit in CONFIG.CPU_CTRL */
+ /* enable POR_EN bit */
+ reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, POR_EN, POR_EN);
+ usleep_range(10, 100);
+
+ /* disable POR_EN bit */
+ reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, 0, POR_EN);
+ usleep_range(10, 100);
+}
+
+static int netcp_xgbe_serdes_config(void __iomem *serdes_regs,
+ void __iomem *sw_regs)
+{
+ u32 ret, i;
+
+ netcp_xgbe_serdes_pll_disable(serdes_regs);
+ netcp_xgbe_serdes_cmu_init(serdes_regs);
+
+ for (i = 0; i < 2; i++)
+ netcp_xgbe_serdes_lane_config(serdes_regs, i);
+
+ netcp_xgbe_serdes_com_enable(serdes_regs);
+ /* This is EVM + RTM-BOC specific */
+ for (i = 0; i < 2; i++)
+ netcp_xgbe_serdes_setup_cm_c1_c2(serdes_regs, i, 0, 0, 5);
+
+ netcp_xgbe_serdes_pll_enable(serdes_regs);
+ for (i = 0; i < 2; i++)
+ netcp_xgbe_serdes_lane_enable(serdes_regs, i);
+
+ /* SB PLL Status Poll */
+ ret = netcp_xgbe_wait_pll_locked(sw_regs);
+ if (ret)
+ return ret;
+
+ netcp_xgbe_serdes_enable_xgmii_port(sw_regs);
+ netcp_xgbe_serdes_check_lane(serdes_regs, sw_regs);
+ return ret;
+}
+
+int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs)
+{
+ u32 val;
+
+ /* read COMLANE bits 4:0 */
+ val = readl(serdes_regs + 0xa00);
+ if (val & 0x1f) {
+ pr_debug("XGBE: serdes already in operation - reset\n");
+ netcp_xgbe_reset_serdes(serdes_regs);
+ }
+ return netcp_xgbe_serdes_config(serdes_regs, xgbe_regs);
+}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index f2ff0074aac9..691ec936e88d 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -2540,7 +2540,7 @@ static void tlan_phy_power_down(struct net_device *dev)
* This is abitrary. It is intended to make sure the
* transceiver settles.
*/
- tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
+ tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
}
@@ -2561,7 +2561,7 @@ static void tlan_phy_power_up(struct net_device *dev)
* transceiver. The TLAN docs say both 50 ms and
* 500 ms, so do the longer, just in case.
*/
- tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
+ tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
}
@@ -2593,7 +2593,7 @@ static void tlan_phy_reset(struct net_device *dev)
* I don't remember why I wait this long.
* I've changed this to 50ms, as it seems long enough.
*/
- tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
+ tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
}
@@ -2658,7 +2658,7 @@ static void tlan_phy_start_link(struct net_device *dev)
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
| TLAN_NET_CFG_PHY_EN;
tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
- tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+ tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
return;
} else if (priv->phy_num == 0) {
control = 0;
@@ -2725,7 +2725,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
(priv->phy_num != 0)) {
priv->phy_num = 0;
- tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+ tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
return;
}
@@ -2744,7 +2744,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
/* Wait for 100 ms. No reason in partiticular.
*/
- tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
+ tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
}
@@ -2796,7 +2796,7 @@ static void tlan_phy_monitor(unsigned long data)
/* set to external PHY */
priv->phy_num = 1;
/* restart autonegotiation */
- tlan_set_timer(dev, 4 * HZ / 10,
+ tlan_set_timer(dev, msecs_to_jiffies(400),
TLAN_TIMER_PHY_PDOWN);
return;
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 049747f558c9..bea8cd2bb56c 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -292,7 +292,6 @@ static inline int mpipe_instance(struct net_device *dev)
*/
static bool network_cpus_init(void)
{
- char buf[1024];
int rc;
if (network_cpus_string == NULL)
@@ -314,8 +313,8 @@ static bool network_cpus_init(void)
return false;
}
- cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
- pr_info("Linux network CPUs: %s\n", buf);
+ pr_info("Linux network CPUs: %*pbl\n",
+ cpumask_pr_args(&network_cpus_map));
return true;
}
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index fb12d31cfcf6..3d8f60d9643e 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -2410,9 +2410,8 @@ static int __init network_cpus_setup(char *str)
if (cpumask_empty(&network_cpus_map)) {
pr_warn("Ignoring network_cpus='%s'\n", str);
} else {
- char buf[1024];
- cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
- pr_info("Linux network CPUs: %s\n", buf);
+ pr_info("Linux network CPUs: %*pbl\n",
+ cpumask_pr_args(&network_cpus_map));
network_cpus_used = true;
}
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index a191afc23b56..17e276651601 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1326,7 +1326,8 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
+ if (!rp->mii_if.force_media)
+ mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
if (rp->mii_if.full_duplex)
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
@@ -1781,8 +1782,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_ring[entry].desc_length =
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
- if (unlikely(vlan_tx_tag_present(skb))) {
- u16 vid_pcp = vlan_tx_tag_get(skb);
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ u16 vid_pcp = skb_vlan_tag_get(skb);
/* drop CFI/DEI bit, register needs VID and PCP */
vid_pcp = (vid_pcp & VLAN_VID_MASK) |
@@ -1803,7 +1804,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
/* Non-x86 Todo: explicitly flush cache lines here. */
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 282f83a63b67..c20206f83cc1 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2611,8 +2611,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
- if (vlan_tx_tag_present(skb)) {
- td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+ if (skb_vlan_tag_present(skb)) {
+ td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
td_ptr->tdesc1.TCR |= TCR0_VETAG;
}
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 9edada85ed02..cd78b7cacc75 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -1736,18 +1736,6 @@ char *addr_to_string(struct fddi_addr *addr)
}
#endif
-#ifdef AM29K
-int smt_ifconfig(int argc, char *argv[])
-{
- if (argc >= 2 && !strcmp(argv[0],"opt_bypass") &&
- !strcmp(argv[1],"yes")) {
- smc->mib.fddiSMTBypassPresent = 1 ;
- return 0;
- }
- return amdfddi_config(0, argc, argv);
-}
-#endif
-
/*
* return static mac index
*/
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9f49c0129a78..208eb05446ba 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -217,7 +217,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
static int netvsc_init_buf(struct hv_device *device)
{
int ret = 0;
- int t;
+ unsigned long t;
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
@@ -409,7 +409,8 @@ static int negotiate_nvsp_ver(struct hv_device *device,
struct nvsp_message *init_packet,
u32 nvsp_ver)
{
- int ret, t;
+ int ret;
+ unsigned long t;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
@@ -684,9 +685,9 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
return ret_val;
}
-u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
- unsigned int section_index,
- struct hv_netvsc_packet *packet)
+static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ unsigned int section_index,
+ struct hv_netvsc_packet *packet)
{
char *start = net_device->send_buf;
char *dest = (start + (section_index * net_device->send_section_size));
@@ -716,7 +717,7 @@ int netvsc_send(struct hv_device *device,
u64 req_id;
unsigned int section_index = NETVSC_INVALID_INDEX;
u32 msg_size = 0;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
u16 q_idx = packet->q_idx;
@@ -743,8 +744,6 @@ int netvsc_send(struct hv_device *device,
packet);
skb = (struct sk_buff *)
(unsigned long)packet->send_completion_tid;
- if (skb)
- dev_kfree_skb_any(skb);
packet->page_buf_cnt = 0;
}
}
@@ -810,6 +809,13 @@ int netvsc_send(struct hv_device *device,
packet, ret);
}
+ if (ret != 0) {
+ if (section_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, section_index);
+ } else if (skb) {
+ dev_kfree_skb_any(skb);
+ }
+
return ret;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index ec0c40a8f653..7816d98bdddc 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -470,7 +470,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
int ret = 0;
- int t;
+ unsigned long t;
if (!result)
return -EINVAL;
@@ -560,7 +560,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
char macstr[2*ETH_ALEN+1];
u32 extlen = sizeof(struct rndis_config_parameter_info) +
2*NWADR_STRLEN + 4*ETH_ALEN;
- int ret, t;
+ int ret;
+ unsigned long t;
request = get_rndis_request(rdev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -623,7 +624,8 @@ cleanup:
return ret;
}
-int rndis_filter_set_offload_params(struct hv_device *hdev,
+static int
+rndis_filter_set_offload_params(struct hv_device *hdev,
struct ndis_offload_params *req_offloads)
{
struct netvsc_device *nvdev = hv_get_drvdata(hdev);
@@ -634,7 +636,8 @@ int rndis_filter_set_offload_params(struct hv_device *hdev,
struct ndis_offload_params *offload_params;
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_offload_params);
- int ret, t;
+ int ret;
+ unsigned long t;
u32 vsp_version = nvdev->nvsp_version;
if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
@@ -697,7 +700,7 @@ u8 netvsc_hash_key[HASH_KEYLEN] = {
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
-int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
+static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
{
struct net_device *ndev = rdev->net_dev->ndev;
struct rndis_request *request;
@@ -708,7 +711,8 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
struct ndis_recv_scale_param *rssp;
u32 *itab;
u8 *keyp;
- int i, t, ret;
+ int i, ret;
+ unsigned long t;
request = get_rndis_request(
rdev, RNDIS_MSG_SET,
@@ -792,7 +796,8 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
u32 status;
- int ret, t;
+ int ret;
+ unsigned long t;
struct net_device *ndev;
ndev = dev->net_dev->ndev;
@@ -848,7 +853,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
struct rndis_initialize_request *init;
struct rndis_initialize_complete *init_complete;
u32 status;
- int ret, t;
+ int ret;
+ unsigned long t;
request = get_rndis_request(dev, RNDIS_MSG_INIT,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -998,7 +1004,7 @@ int rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info = additional_info;
struct ndis_offload_params offloads;
struct nvsp_message *init_packet;
- int t;
+ unsigned long t;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 1c0135620c62..7b051eacb7f1 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -427,7 +427,7 @@ at86rf230_reg_precious(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config at86rf230_regmap_spi_config = {
+static const struct regmap_config at86rf230_regmap_spi_config = {
.reg_bits = 8,
.val_bits = 8,
.write_flag_mask = CMD_REG | CMD_WRITE,
@@ -450,7 +450,7 @@ at86rf230_async_error_recover(void *context)
ieee802154_wake_queue(lp->hw);
}
-static void
+static inline void
at86rf230_async_error(struct at86rf230_local *lp,
struct at86rf230_state_change *ctx, int rc)
{
@@ -524,7 +524,6 @@ at86rf230_async_state_assert(void *context)
}
}
-
dev_warn(&lp->spi->dev, "unexcept state change from 0x%02x to 0x%02x. Actual state: 0x%02x\n",
ctx->from_state, ctx->to_state, trx_state);
}
@@ -655,7 +654,7 @@ at86rf230_async_state_change_start(void *context)
if (ctx->irq_enable)
enable_irq(lp->spi->irq);
- at86rf230_async_error(lp, &lp->state, rc);
+ at86rf230_async_error(lp, ctx, rc);
}
}
@@ -715,10 +714,7 @@ at86rf230_tx_complete(void *context)
enable_irq(lp->spi->irq);
- if (lp->max_frame_retries <= 0)
- ieee802154_xmit_complete(lp->hw, skb, true);
- else
- ieee802154_xmit_complete(lp->hw, skb, false);
+ ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret);
}
static void
@@ -753,16 +749,13 @@ at86rf230_tx_trac_check(void *context)
* to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
* state to TX_ON.
*/
- if (trac) {
+ if (trac)
at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
at86rf230_tx_trac_error, true);
- return;
- }
-
- at86rf230_tx_on(context);
+ else
+ at86rf230_tx_on(context);
}
-
static void
at86rf230_tx_trac_status(void *context)
{
@@ -1082,7 +1075,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for saddr\n");
+ "at86rf230_set_hw_addr_filt called for saddr\n");
__at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
@@ -1091,7 +1084,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
u16 pan = le16_to_cpu(filt->pan_id);
dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for pan id\n");
+ "at86rf230_set_hw_addr_filt called for pan id\n");
__at86rf230_write(lp, RG_PAN_ID_0, pan);
__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
@@ -1101,14 +1094,14 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
memcpy(addr, &filt->ieee_addr, 8);
dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+ "at86rf230_set_hw_addr_filt called for IEEE addr\n");
for (i = 0; i < 8; i++)
__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for panc change\n");
+ "at86rf230_set_hw_addr_filt called for panc change\n");
if (filt->pan_coord)
at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
else
@@ -1146,11 +1139,37 @@ at86rf230_set_lbt(struct ieee802154_hw *hw, bool on)
}
static int
-at86rf230_set_cca_mode(struct ieee802154_hw *hw, u8 mode)
+at86rf230_set_cca_mode(struct ieee802154_hw *hw,
+ const struct wpan_phy_cca *cca)
{
struct at86rf230_local *lp = hw->priv;
+ u8 val;
+
+ /* mapping 802.15.4 to driver spec */
+ switch (cca->mode) {
+ case NL802154_CCA_ENERGY:
+ val = 1;
+ break;
+ case NL802154_CCA_CARRIER:
+ val = 2;
+ break;
+ case NL802154_CCA_ENERGY_CARRIER:
+ switch (cca->opt) {
+ case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+ val = 3;
+ break;
+ case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
- return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
+ return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
static int
@@ -1400,7 +1419,7 @@ at86rf230_detect_device(struct at86rf230_local *lp)
if (rc)
return rc;
- rc = __at86rf230_read(lp, RG_PART_NUM, &version);
+ rc = __at86rf230_read(lp, RG_VERSION_NUM, &version);
if (rc)
return rc;
@@ -1410,11 +1429,12 @@ at86rf230_detect_device(struct at86rf230_local *lp)
return -EINVAL;
}
- lp->hw->extra_tx_headroom = 0;
lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
+ lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
+
switch (part) {
case 2:
chip = "at86rf230";
@@ -1429,16 +1449,12 @@ at86rf230_detect_device(struct at86rf230_local *lp)
break;
case 7:
chip = "at86rf212";
- if (version == 1) {
- lp->data = &at86rf212_data;
- lp->hw->flags |= IEEE802154_HW_LBT;
- lp->hw->phy->channels_supported[0] = 0x00007FF;
- lp->hw->phy->channels_supported[2] = 0x00007FF;
- lp->hw->phy->current_channel = 5;
- lp->hw->phy->symbol_duration = 25;
- } else {
- rc = -ENOTSUPP;
- }
+ lp->data = &at86rf212_data;
+ lp->hw->flags |= IEEE802154_HW_LBT;
+ lp->hw->phy->channels_supported[0] = 0x00007FF;
+ lp->hw->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->phy->current_channel = 5;
+ lp->hw->phy->symbol_duration = 25;
break;
case 11:
chip = "at86rf233";
@@ -1448,7 +1464,7 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->symbol_duration = 16;
break;
default:
- chip = "unkown";
+ chip = "unknown";
rc = -ENOTSUPP;
break;
}
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index f9df9fa86d5f..181b349b060e 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -19,7 +19,6 @@
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
@@ -45,9 +44,9 @@
#define CC2520_FREG_MASK 0x3F
/* status byte values */
-#define CC2520_STATUS_XOSC32M_STABLE (1 << 7)
-#define CC2520_STATUS_RSSI_VALID (1 << 6)
-#define CC2520_STATUS_TX_UNDERFLOW (1 << 3)
+#define CC2520_STATUS_XOSC32M_STABLE BIT(7)
+#define CC2520_STATUS_RSSI_VALID BIT(6)
+#define CC2520_STATUS_TX_UNDERFLOW BIT(3)
/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
#define CC2520_MINCHANNEL 11
@@ -513,7 +512,6 @@ err_tx:
return rc;
}
-
static int cc2520_rx(struct cc2520_private *priv)
{
u8 len = 0, lqi = 0, bytes = 1;
@@ -551,14 +549,14 @@ cc2520_ed(struct ieee802154_hw *hw, u8 *level)
u8 rssi;
int ret;
- ret = cc2520_read_register(priv , CC2520_RSSISTAT, &status);
+ ret = cc2520_read_register(priv, CC2520_RSSISTAT, &status);
if (ret)
return ret;
if (status != RSSI_VALID)
return -EINVAL;
- ret = cc2520_read_register(priv , CC2520_RSSI, &rssi);
+ ret = cc2520_read_register(priv, CC2520_RSSI, &rssi);
if (ret)
return ret;
@@ -652,6 +650,7 @@ static int cc2520_register(struct cc2520_private *priv)
priv->hw->parent = &priv->spi->dev;
priv->hw->extra_tx_headroom = 0;
priv->hw->vif_data_size = sizeof(*priv);
+ ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
/* We do support only 2.4 Ghz */
priv->hw->phy->channels_supported[0] = 0x7FFF800;
@@ -842,24 +841,15 @@ done:
static int cc2520_probe(struct spi_device *spi)
{
struct cc2520_private *priv;
- struct pinctrl *pinctrl;
struct cc2520_platform_data *pdata;
int ret;
- priv = devm_kzalloc(&spi->dev,
- sizeof(struct cc2520_private), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_ret;
- }
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
spi_set_drvdata(spi, priv);
- pinctrl = devm_pinctrl_get_select_default(&spi->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&spi->dev,
- "pinctrl pins are not configured\n");
-
pdata = cc2520_get_platform_data(spi);
if (!pdata) {
dev_err(&spi->dev, "no platform data\n");
@@ -870,10 +860,8 @@ static int cc2520_probe(struct spi_device *spi)
priv->buf = devm_kzalloc(&spi->dev,
SPI_COMMAND_BUFFER, GFP_KERNEL);
- if (!priv->buf) {
- ret = -ENOMEM;
- goto err_ret;
- }
+ if (!priv->buf)
+ return -ENOMEM;
mutex_init(&priv->buffer_mutex);
INIT_WORK(&priv->fifop_irqwork, cc2520_fifop_irqwork);
@@ -947,7 +935,6 @@ static int cc2520_probe(struct spi_device *spi)
if (ret)
goto err_hw_init;
-
gpio_set_value(pdata->vreg, HIGH);
usleep_range(100, 150);
@@ -991,8 +978,6 @@ static int cc2520_probe(struct spi_device *spi)
err_hw_init:
mutex_destroy(&priv->buffer_mutex);
flush_work(&priv->fifop_irqwork);
-
-err_ret:
return ret;
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index a200fa16beae..fba2dfd910f7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -289,7 +289,7 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
goto out;
/* Range check the RX FIFO length, accounting for the one-byte
- * length field at the begining. */
+ * length field at the beginning. */
if (rx_len > RX_FIFO_SIZE-1) {
dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
rx_len = RX_FIFO_SIZE-1;
@@ -323,7 +323,7 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
#ifdef DEBUG
print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
- DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
+ DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
lqi_rssi[0], lqi_rssi[1]);
#endif
@@ -521,7 +521,7 @@ static int mrf24j40_filter(struct ieee802154_hw *hw,
*/
dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
- filt->pan_coord ? "on" : "off");
+ filt->pan_coord ? "on" : "off");
}
return 0;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index a14d87783245..2a175006028b 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -9,7 +9,7 @@
#include "ipvlan.h"
-static u32 ipvlan_jhash_secret;
+static u32 ipvlan_jhash_secret __read_mostly;
void ipvlan_init_secret(void)
{
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
};
dst = ip6_route_output(dev_net(dev), NULL, &fl6);
- if (IS_ERR(dst))
+ if (dst->error) {
+ ret = dst->error;
+ dst_release(dst);
goto err;
-
+ }
skb_dst_drop(skb);
skb_dst_set(skb, dst);
err = ip6_local_out(skb);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 58f98f4de773..58ae11a14bb6 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1462,17 +1462,12 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
if (mtt)
{
/* Check how much time we have used already */
- do_gettimeofday(&self->now);
-
- diff = self->now.tv_usec - self->stamp.tv_usec;
+ diff = ktime_us_delta(ktime_get(), self->stamp);
/* self->stamp is set from ali_ircc_dma_receive_complete() */
pr_debug("%s(), ******* diff = %d *******\n",
__func__, diff);
-
- if (diff < 0)
- diff += 1000000;
-
+
/* Check if the mtt is larger than the time we have
* already used by all the protocol processing
*/
@@ -1884,7 +1879,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
* reduce the min turn time a bit since we will know
* how much time we have used for protocol processing
*/
- do_gettimeofday(&self->stamp);
+ self->stamp = ktime_get();
skb = dev_alloc_skb(len+1);
if (skb == NULL)
diff --git a/drivers/net/irda/ali-ircc.h b/drivers/net/irda/ali-ircc.h
index 0c8edb41bd0a..c2d9747a5108 100644
--- a/drivers/net/irda/ali-ircc.h
+++ b/drivers/net/irda/ali-ircc.h
@@ -22,7 +22,7 @@
#ifndef ALI_IRCC_H
#define ALI_IRCC_H
-#include <linux/time.h>
+#include <linux/ktime.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
@@ -209,8 +209,7 @@ struct ali_ircc_cb {
unsigned char rcvFramesOverflow;
- struct timeval stamp;
- struct timeval now;
+ ktime_t stamp;
spinlock_t lock; /* For serializing operations */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index e151205281e2..44e4f386a5dc 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/time.h>
#include <linux/types.h>
#include <linux/ioport.h>
@@ -163,8 +162,6 @@ struct au1k_private {
iobuff_t rx_buff;
struct net_device *netdev;
- struct timeval stamp;
- struct timeval now;
struct qos_info qos;
struct irlap_cb *irlap;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 48b2f9a321b7..f6c916312577 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -495,18 +495,12 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
mtt = irda_get_mtt(skb);
if (mtt) {
int diff;
- do_gettimeofday(&self->now);
- diff = self->now.tv_usec - self->stamp.tv_usec;
+ diff = ktime_us_delta(ktime_get(), self->stamp);
#ifdef IU_USB_MIN_RTT
/* Factor in USB delays -> Get rid of udelay() that
* would be lost in the noise - Jean II */
diff += IU_USB_MIN_RTT;
#endif /* IU_USB_MIN_RTT */
- /* If the usec counter did wraparound, the diff will
- * go negative (tv_usec is a long), so we need to
- * correct it by one second. Jean II */
- if (diff < 0)
- diff += 1000000;
/* Check if the mtt is larger than the time we have
* already used by all the protocol processing
@@ -869,7 +863,7 @@ static void irda_usb_receive(struct urb *urb)
* reduce the min turn time a bit since we will know
* how much time we have used for protocol processing
*/
- do_gettimeofday(&self->stamp);
+ self->stamp = ktime_get();
/* Check if we need to copy the data to a new skb or not.
* For most frames, we use ZeroCopy and pass the already
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index 58ddb5214916..8ac389fa9348 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -29,7 +29,7 @@
*
*****************************************************************************/
-#include <linux/time.h>
+#include <linux/ktime.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h> /* struct irlap_cb */
@@ -157,8 +157,7 @@ struct irda_usb_cb {
char *speed_buff; /* Buffer for speed changes */
char *tx_buff;
- struct timeval stamp;
- struct timeval now;
+ ktime_t stamp;
spinlock_t lock; /* For serializing Tx operations */
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index e638893e98a9..fb5d162ec7d2 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -114,7 +114,6 @@ struct kingsun_cb {
(usually 8) */
iobuff_t rx_buff; /* receive unwrap state machine */
- struct timeval rx_time;
spinlock_t lock;
int receiving;
@@ -235,7 +234,6 @@ static void kingsun_rcv_irq(struct urb *urb)
&kingsun->netdev->stats,
&kingsun->rx_buff, bytes[i]);
}
- do_gettimeofday(&kingsun->rx_time);
kingsun->receiving =
(kingsun->rx_buff.state != OUTSIDE_FRAME)
? 1 : 0;
@@ -273,7 +271,6 @@ static int kingsun_net_open(struct net_device *netdev)
skb_reserve(kingsun->rx_buff.skb, 1);
kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
- do_gettimeofday(&kingsun->rx_time);
kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->rx_urb)
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index e6b3804edacd..8e6e0edf2440 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -187,7 +187,6 @@ struct ks959_cb {
__u8 *rx_buf;
__u8 rx_variable_xormask;
iobuff_t rx_unwrap_buff;
- struct timeval rx_time;
struct usb_ctrlrequest *speed_setuprequest;
struct urb *speed_urb;
@@ -476,7 +475,6 @@ static void ks959_rcv_irq(struct urb *urb)
bytes[i]);
}
}
- do_gettimeofday(&kingsun->rx_time);
kingsun->receiving =
(kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
}
@@ -514,7 +512,6 @@ static int ks959_net_open(struct net_device *netdev)
skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
- do_gettimeofday(&kingsun->rx_time);
kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->rx_urb)
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index e4d678fbeb2f..bca6a1e72d1d 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -722,7 +722,6 @@ static int mcs_net_open(struct net_device *netdev)
skb_reserve(mcs->rx_buff.skb, 1);
mcs->rx_buff.head = mcs->rx_buff.skb->data;
- do_gettimeofday(&mcs->rx_time);
/*
* Now that everything should be initialized properly,
@@ -799,7 +798,6 @@ static void mcs_receive_irq(struct urb *urb)
mcs_unwrap_fir(mcs, urb->transfer_buffer,
urb->actual_length);
}
- do_gettimeofday(&mcs->rx_time);
}
ret = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/irda/mcs7780.h b/drivers/net/irda/mcs7780.h
index b10689b2887c..a6e8f7dbafc9 100644
--- a/drivers/net/irda/mcs7780.h
+++ b/drivers/net/irda/mcs7780.h
@@ -116,7 +116,6 @@ struct mcs_cb {
__u8 *fifo_status;
iobuff_t rx_buff; /* receive unwrap state machine */
- struct timeval rx_time;
spinlock_t lock;
int receiving;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index e7317b104bfb..dc0dbd8dd0b5 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1501,10 +1501,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
mtt = irda_get_mtt(skb);
if (mtt) {
/* Check how much time we have used already */
- do_gettimeofday(&self->now);
- diff = self->now.tv_usec - self->stamp.tv_usec;
- if (diff < 0)
- diff += 1000000;
+ diff = ktime_us_delta(ktime_get(), self->stamp);
/* Check if the mtt is larger than the time we have
* already used by all the protocol processing
@@ -1867,7 +1864,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
* reduce the min turn time a bit since we will know
* how much time we have used for protocol processing
*/
- do_gettimeofday(&self->stamp);
+ self->stamp = ktime_get();
skb = dev_alloc_skb(len+1);
if (skb == NULL) {
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 32fa58211fad..7be5acb56532 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -28,7 +28,7 @@
#ifndef NSC_IRCC_H
#define NSC_IRCC_H
-#include <linux/time.h>
+#include <linux/ktime.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
@@ -263,8 +263,7 @@ struct nsc_ircc_cb {
__u8 ier; /* Interrupt enable register */
- struct timeval stamp;
- struct timeval now;
+ ktime_t stamp;
spinlock_t lock; /* For serializing operations */
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 7b17fa2114e1..b6e44ff4e373 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -38,7 +38,7 @@
#include <net/irda/irda_device.h>
#include <mach/hardware.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
static int power_level = 3;
static int tx_lpm;
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index dd1bd1060ec9..83cc48a01802 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -40,6 +40,7 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/skbuff.h>
@@ -174,7 +175,7 @@ struct stir_cb {
__u8 *fifo_status;
iobuff_t rx_buff; /* receive unwrap state machine */
- struct timeval rx_time;
+ ktime_t rx_time;
int receiving;
struct urb *rx_urb;
};
@@ -650,15 +651,12 @@ static int fifo_txwait(struct stir_cb *stir, int space)
static void turnaround_delay(const struct stir_cb *stir, long us)
{
long ticks;
- struct timeval now;
if (us <= 0)
return;
- do_gettimeofday(&now);
- if (now.tv_sec - stir->rx_time.tv_sec > 0)
- us -= USEC_PER_SEC;
- us -= now.tv_usec - stir->rx_time.tv_usec;
+ us -= ktime_us_delta(ktime_get(), stir->rx_time);
+
if (us < 10)
return;
@@ -823,8 +821,8 @@ static void stir_rcv_irq(struct urb *urb)
pr_debug("receive %d\n", urb->actual_length);
unwrap_chars(stir, urb->transfer_buffer,
urb->actual_length);
-
- do_gettimeofday(&stir->rx_time);
+
+ stir->rx_time = ktime_get();
}
/* kernel thread is stopping receiver don't resubmit */
@@ -876,7 +874,7 @@ static int stir_net_open(struct net_device *netdev)
skb_reserve(stir->rx_buff.skb, 1);
stir->rx_buff.head = stir->rx_buff.skb->data;
- do_gettimeofday(&stir->rx_time);
+ stir->rx_time = ktime_get();
stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!stir->rx_urb)
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 7ce820ecc361..ac1525573398 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -29,7 +29,6 @@ this program; if not, see <http://www.gnu.org/licenses/>.
********************************************************************/
#ifndef via_IRCC_H
#define via_IRCC_H
-#include <linux/time.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/types.h>
@@ -106,9 +105,6 @@ struct via_ircc_cb {
__u8 ier; /* Interrupt enable register */
- struct timeval stamp;
- struct timeval now;
-
spinlock_t lock; /* For serializing operations */
__u32 flags; /* Interface flags */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index ac39d9f33d5f..a0849f49bbec 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -33,6 +33,7 @@ MODULE_LICENSE("GPL");
/********************************************************/
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -40,9 +41,9 @@ MODULE_LICENSE("GPL");
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/math64.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -180,8 +181,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
vlsi_irda_dev_t *idev = netdev_priv(ndev);
u8 byte;
u16 word;
- unsigned delta1, delta2;
- struct timeval now;
+ s32 sec, usec;
unsigned iobase = ndev->base_addr;
seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
@@ -277,17 +277,9 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
seq_printf(seq, "\nsw-state:\n");
seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
- do_gettimeofday(&now);
- if (now.tv_usec >= idev->last_rx.tv_usec) {
- delta2 = now.tv_usec - idev->last_rx.tv_usec;
- delta1 = 0;
- }
- else {
- delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
- delta1 = 1;
- }
- seq_printf(seq, "last rx: %lu.%06u sec\n",
- now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
+ sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
+ USEC_PER_SEC, &usec);
+ seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
@@ -661,7 +653,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
}
}
- do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
+ idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
vlsi_fill_rx(r);
@@ -858,9 +850,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
unsigned iobase = ndev->base_addr;
u8 status;
u16 config;
- int mtt;
+ int mtt, diff;
int len, speed;
- struct timeval now, ready;
char *msg = NULL;
speed = irda_get_next_speed(skb);
@@ -940,21 +931,10 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&idev->lock, flags);
if ((mtt = irda_get_mtt(skb)) > 0) {
-
- ready.tv_usec = idev->last_rx.tv_usec + mtt;
- ready.tv_sec = idev->last_rx.tv_sec;
- if (ready.tv_usec >= 1000000) {
- ready.tv_usec -= 1000000;
- ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
- }
- for(;;) {
- do_gettimeofday(&now);
- if (now.tv_sec > ready.tv_sec ||
- (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
- break;
- udelay(100);
+ diff = ktime_us_delta(ktime_get(), idev->last_rx);
+ if (mtt > diff)
+ udelay(mtt - diff);
/* must not sleep here - called under netif_tx_lock! */
- }
}
/* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
@@ -1333,7 +1313,7 @@ static int vlsi_start_hw(vlsi_irda_dev_t *idev)
vlsi_fill_rx(idev->rx_ring);
- do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+ idev->last_rx = ktime_get(); /* first mtt may start from now on */
outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
@@ -1520,7 +1500,7 @@ static int vlsi_open(struct net_device *ndev)
if (!idev->irlap)
goto errout_free_ring;
- do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+ idev->last_rx = ktime_get(); /* first mtt may start from now on */
idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index f9119c6d2a09..f9db2ce4c5c6 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -723,7 +723,7 @@ typedef struct vlsi_irda_dev {
void *virtaddr;
struct vlsi_ring *tx_ring, *rx_ring;
- struct timeval last_rx;
+ ktime_t last_rx;
spinlock_t lock;
struct mutex mtx;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 612e0731142d..1df38bdae2ee 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1471,11 +1471,17 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
};
EXPORT_SYMBOL_GPL(macvlan_link_register);
+static struct net *macvlan_get_link_net(const struct net_device *dev)
+{
+ return dev_net(macvlan_dev_real_dev(dev));
+}
+
static struct rtnl_link_ops macvlan_link_ops = {
.kind = "macvlan",
.setup = macvlan_setup,
.newlink = macvlan_newlink,
.dellink = macvlan_dellink,
+ .get_link_net = macvlan_get_link_net,
};
static int macvlan_device_event(struct notifier_block *unused,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 7df221788cd4..e40fdfccc9c1 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -17,7 +17,6 @@
#include <linux/fs.h>
#include <linux/uio.h>
-#include <net/ipv6.h>
#include <net/net_namespace.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
static const struct proto_ops macvtap_socket_ops;
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
- NETIF_F_TSO6)
+ NETIF_F_TSO6 | NETIF_F_UFO)
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
- current->comm);
gso_type = SKB_GSO_UDP;
- if (skb->protocol == htons(ETH_P_IPV6))
- ipv6_proxy_select_ident(skb);
break;
default:
return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -645,7 +642,7 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
vnet_hdr->csum_start = cpu_to_macvtap16(q,
skb_checksum_start_offset(skb) + VLAN_HLEN);
else
@@ -821,13 +818,13 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
total = vnet_hdr_len;
total += skb->len;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
} veth;
veth.h_vlan_proto = skb->vlan_proto;
- veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+ veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
total += VLAN_HLEN;
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
if (arg & TUN_F_TSO6)
feature_mask |= NETIF_F_TSO6;
}
+
+ if (arg & TUN_F_UFO)
+ feature_mask |= NETIF_F_UFO;
}
/* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
* When user space turns off TSO, we turn off GSO/LRO so that
* user-space will not receive TSO frames.
*/
- if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
+ if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
features |= RX_OFFLOADS;
else
features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN))
+ TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
rtnl_lock();
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 4a99c3919037..993570b1e2ae 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -302,7 +302,7 @@ void mii_check_link (struct mii_if_info *mii)
}
/**
- * mii_check_media - check the MII interface for a duplex change
+ * mii_check_media - check the MII interface for a carrier/speed/duplex change
* @mii: the MII interface
* @ok_to_print: OK to print link up/down messages
* @init_media: OK to save duplex mode in @mii
@@ -318,10 +318,6 @@ unsigned int mii_check_media (struct mii_if_info *mii,
int advertise, lpa, media, duplex;
int lpa2 = 0;
- /* if forced media, go no further */
- if (mii->force_media)
- return 0; /* duplex did not change */
-
/* check current and old link status */
old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
new_carrier = (unsigned int) mii_link_ok(mii);
@@ -345,6 +341,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
*/
netif_carrier_on(mii->dev);
+ if (mii->force_media) {
+ if (ok_to_print)
+ netdev_info(mii->dev, "link up\n");
+ return 0; /* duplex did not change */
+ }
+
/* get MII advertise and LPA values */
if ((!init_media) && (mii->advertising))
advertise = mii->advertising;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a3c251b79f38..16adbc481772 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -26,7 +26,7 @@ config AMD_PHY
config AMD_XGBE_PHY
tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
- depends on OF && HAS_IOMEM
+ depends on (OF || ACPI) && HAS_IOMEM
---help---
Currently supports the AMD 10GbE PHY
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 903dc3dc9ea7..9e3af54c9010 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -60,6 +60,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -74,6 +75,9 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_LICENSE("Dual BSD/GPL");
@@ -84,22 +88,43 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define XGBE_PHY_MASK 0xfffffff0
#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
+#define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
+#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
+#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+
+#define XGBE_PHY_SPEEDS 3
+#define XGBE_PHY_SPEED_1000 0
+#define XGBE_PHY_SPEED_2500 1
+#define XGBE_PHY_SPEED_10000 2
#define XGBE_AN_INT_CMPLT 0x01
#define XGBE_AN_INC_LINK 0x02
#define XGBE_AN_PG_RCV 0x04
+#define XGBE_AN_INT_MASK 0x07
#define XNP_MCF_NULL_MESSAGE 0x001
-#define XNP_ACK_PROCESSED (1 << 12)
-#define XNP_MP_FORMATTED (1 << 13)
-#define XNP_NP_EXCHANGE (1 << 15)
+#define XNP_ACK_PROCESSED BIT(12)
+#define XNP_MP_FORMATTED BIT(13)
+#define XNP_NP_EXCHANGE BIT(15)
#define XGBE_PHY_RATECHANGE_COUNT 500
+#define XGBE_PHY_KR_TRAINING_START 0x01
+#define XGBE_PHY_KR_TRAINING_ENABLE 0x02
+
+#define XGBE_PHY_FEC_ENABLE 0x01
+#define XGBE_PHY_FEC_FORWARD 0x02
+#define XGBE_PHY_FEC_MASK 0x03
+
#ifndef MDIO_PMA_10GBR_PMD_CTRL
#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
#endif
+#ifndef MDIO_PMA_10GBR_FEC_ABILITY
+#define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
+#endif
+
#ifndef MDIO_PMA_10GBR_FEC_CTRL
#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
#endif
@@ -108,6 +133,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define MDIO_AN_XNP 0x0016
#endif
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
#ifndef MDIO_AN_INTMASK
#define MDIO_AN_INTMASK 0x8001
#endif
@@ -116,18 +145,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define MDIO_AN_INT 0x8002
#endif
-#ifndef MDIO_AN_KR_CTRL
-#define MDIO_AN_KR_CTRL 0x8003
-#endif
-
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
-#ifndef MDIO_KR_CTRL_PDETECT
-#define MDIO_KR_CTRL_PDETECT 0x01
-#endif
-
/* SerDes integration register offsets */
#define SIR0_KR_RT_1 0x002c
#define SIR0_STATUS 0x0040
@@ -140,10 +161,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SIR0_STATUS_RX_READY_WIDTH 1
#define SIR0_STATUS_TX_READY_INDEX 8
#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
#define SIR1_SPEED_DATARATE_INDEX 4
#define SIR1_SPEED_DATARATE_WIDTH 2
-#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
-#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
#define SIR1_SPEED_PLLSEL_INDEX 3
#define SIR1_SPEED_PLLSEL_WIDTH 1
#define SIR1_SPEED_RATECHANGE_INDEX 6
@@ -153,20 +174,26 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SIR1_SPEED_WORDMODE_INDEX 0
#define SIR1_SPEED_WORDMODE_WIDTH 3
+#define SPEED_10000_BLWC 0
#define SPEED_10000_CDR 0x7
#define SPEED_10000_PLL 0x1
+#define SPEED_10000_PQ 0x1e
#define SPEED_10000_RATE 0x0
#define SPEED_10000_TXAMP 0xa
#define SPEED_10000_WORD 0x7
+#define SPEED_2500_BLWC 1
#define SPEED_2500_CDR 0x2
#define SPEED_2500_PLL 0x0
+#define SPEED_2500_PQ 0xa
#define SPEED_2500_RATE 0x1
#define SPEED_2500_TXAMP 0xf
#define SPEED_2500_WORD 0x1
+#define SPEED_1000_BLWC 1
#define SPEED_1000_CDR 0x2
#define SPEED_1000_PLL 0x0
+#define SPEED_1000_PQ 0xa
#define SPEED_1000_RATE 0x3
#define SPEED_1000_TXAMP 0xf
#define SPEED_1000_WORD 0x1
@@ -181,15 +208,6 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define RXTX_REG114_PQ_REG_INDEX 9
#define RXTX_REG114_PQ_REG_WIDTH 7
-#define RXTX_10000_BLWC 0
-#define RXTX_10000_PQ 0x1e
-
-#define RXTX_2500_BLWC 1
-#define RXTX_2500_PQ 0xa
-
-#define RXTX_1000_BLWC 1
-#define RXTX_1000_PQ 0xa
-
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@@ -291,23 +309,44 @@ do { \
XRXTX_IOWRITE((_priv), _reg, reg_val); \
} while (0)
+static const u32 amd_xgbe_phy_serdes_blwc[] = {
+ SPEED_1000_BLWC,
+ SPEED_2500_BLWC,
+ SPEED_10000_BLWC,
+};
+
+static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
+ SPEED_1000_CDR,
+ SPEED_2500_CDR,
+ SPEED_10000_CDR,
+};
+
+static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
+ SPEED_1000_PQ,
+ SPEED_2500_PQ,
+ SPEED_10000_PQ,
+};
+
+static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
+ SPEED_1000_TXAMP,
+ SPEED_2500_TXAMP,
+ SPEED_10000_TXAMP,
+};
+
enum amd_xgbe_phy_an {
AMD_XGBE_AN_READY = 0,
- AMD_XGBE_AN_START,
- AMD_XGBE_AN_EVENT,
AMD_XGBE_AN_PAGE_RECEIVED,
AMD_XGBE_AN_INCOMPAT_LINK,
AMD_XGBE_AN_COMPLETE,
AMD_XGBE_AN_NO_LINK,
- AMD_XGBE_AN_EXIT,
AMD_XGBE_AN_ERROR,
};
enum amd_xgbe_phy_rx {
- AMD_XGBE_RX_READY = 0,
- AMD_XGBE_RX_BPA,
+ AMD_XGBE_RX_BPA = 0,
AMD_XGBE_RX_XNP,
AMD_XGBE_RX_COMPLETE,
+ AMD_XGBE_RX_ERROR,
};
enum amd_xgbe_phy_mode {
@@ -316,12 +355,13 @@ enum amd_xgbe_phy_mode {
};
enum amd_xgbe_phy_speedset {
- AMD_XGBE_PHY_SPEEDSET_1000_10000,
+ AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
AMD_XGBE_PHY_SPEEDSET_2500_10000,
};
struct amd_xgbe_phy_priv {
struct platform_device *pdev;
+ struct acpi_device *adev;
struct device *dev;
struct phy_device *phydev;
@@ -336,10 +376,24 @@ struct amd_xgbe_phy_priv {
void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
- /* Maintain link status for re-starting auto-negotiation */
- unsigned int link;
+ int an_irq;
+ char an_irq_name[IFNAMSIZ + 32];
+ struct work_struct an_irq_work;
+ unsigned int an_irq_allocated;
+
unsigned int speed_set;
+ /* SerDes UEFI configurable settings.
+ * Switching between modes/speeds requires new values for some
+ * SerDes settings. The values can be supplied as device
+ * properties in array format. The first array entry is for
+ * 1GbE, second for 2.5GbE and third for 10GbE
+ */
+ u32 serdes_blwc[XGBE_PHY_SPEEDS];
+ u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
+ u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
+ u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+
/* Auto-negotiation state machine support */
struct mutex an_mutex;
enum amd_xgbe_phy_an an_result;
@@ -348,7 +402,11 @@ struct amd_xgbe_phy_priv {
enum amd_xgbe_phy_rx kx_state;
struct work_struct an_work;
struct workqueue_struct *an_workqueue;
+ unsigned int an_supported;
unsigned int parallel_detect;
+ unsigned int fec_ability;
+
+ unsigned int lpm_ctrl; /* CTRL1 for resume */
};
static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
@@ -359,7 +417,7 @@ static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
if (ret < 0)
return ret;
- ret |= 0x02;
+ ret |= XGBE_PHY_KR_TRAINING_ENABLE;
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
return 0;
@@ -373,7 +431,7 @@ static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
if (ret < 0)
return ret;
- ret &= ~0x02;
+ ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
return 0;
@@ -466,12 +524,16 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
+ priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+ priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+ priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -514,12 +576,16 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
+ priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+ priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+ priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -562,12 +628,16 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
+ priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+ priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+ priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -635,6 +705,38 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
return ret;
}
+static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
+ bool restart)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ ret |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ ret |= MDIO_AN_CTRL1_RESTART;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
+{
+ return amd_xgbe_phy_set_an(phydev, true, true);
+}
+
+static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
+{
+ return amd_xgbe_phy_set_an(phydev, false, false);
+}
+
static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
enum amd_xgbe_phy_rx *state)
{
@@ -645,7 +747,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
/* If we're not in KR mode then we're done */
if (!amd_xgbe_phy_in_kr_mode(phydev))
- return AMD_XGBE_AN_EVENT;
+ return AMD_XGBE_AN_PAGE_RECEIVED;
/* Enable/Disable FEC */
ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
@@ -660,10 +762,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
if (ret < 0)
return AMD_XGBE_AN_ERROR;
+ ret &= ~XGBE_PHY_FEC_MASK;
if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
- ret |= 0x01;
- else
- ret &= ~0x01;
+ ret |= priv->fec_ability;
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
@@ -672,14 +773,17 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
if (ret < 0)
return AMD_XGBE_AN_ERROR;
- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
+ if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
+ XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
- ret |= 0x01;
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+ ret |= XGBE_PHY_KR_TRAINING_START;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ ret);
- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
+ XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
+ }
- return AMD_XGBE_AN_EVENT;
+ return AMD_XGBE_AN_PAGE_RECEIVED;
}
static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
@@ -696,7 +800,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
- return AMD_XGBE_AN_EVENT;
+ return AMD_XGBE_AN_PAGE_RECEIVED;
}
static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
@@ -735,11 +839,11 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
int ad_reg, lp_reg;
/* Check Extended Next Page support */
- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
if (ad_reg < 0)
return AMD_XGBE_AN_ERROR;
- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
if (lp_reg < 0)
return AMD_XGBE_AN_ERROR;
@@ -748,226 +852,255 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
amd_xgbe_an_tx_training(phydev, state);
}
-static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
+static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ enum amd_xgbe_phy_rx *state;
+ int ret;
+
+ state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
+ : &priv->kx_state;
+
+ switch (*state) {
+ case AMD_XGBE_RX_BPA:
+ ret = amd_xgbe_an_rx_bpa(phydev, state);
+ break;
+
+ case AMD_XGBE_RX_XNP:
+ ret = amd_xgbe_an_rx_xnp(phydev, state);
+ break;
+
+ default:
+ ret = AMD_XGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
{
struct amd_xgbe_phy_priv *priv = phydev->priv;
int ret;
/* Be sure we aren't looping trying to negotiate */
if (amd_xgbe_phy_in_kr_mode(phydev)) {
- if (priv->kr_state != AMD_XGBE_RX_READY)
+ priv->kr_state = AMD_XGBE_RX_ERROR;
+
+ if (!(phydev->supported & SUPPORTED_1000baseKX_Full) &&
+ !(phydev->supported & SUPPORTED_2500baseX_Full))
+ return AMD_XGBE_AN_NO_LINK;
+
+ if (priv->kx_state != AMD_XGBE_RX_BPA)
return AMD_XGBE_AN_NO_LINK;
- priv->kr_state = AMD_XGBE_RX_BPA;
} else {
- if (priv->kx_state != AMD_XGBE_RX_READY)
+ priv->kx_state = AMD_XGBE_RX_ERROR;
+
+ if (!(phydev->supported & SUPPORTED_10000baseKR_Full))
+ return AMD_XGBE_AN_NO_LINK;
+
+ if (priv->kr_state != AMD_XGBE_RX_BPA)
return AMD_XGBE_AN_NO_LINK;
- priv->kx_state = AMD_XGBE_RX_BPA;
}
- /* Set up Advertisement register 3 first */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- if (ret < 0)
+ ret = amd_xgbe_phy_disable_an(phydev);
+ if (ret)
return AMD_XGBE_AN_ERROR;
- if (phydev->supported & SUPPORTED_10000baseR_FEC)
- ret |= 0xc000;
- else
- ret &= ~0xc000;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
-
- /* Set up Advertisement register 2 next */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
- if (ret < 0)
+ ret = amd_xgbe_phy_switch_mode(phydev);
+ if (ret)
return AMD_XGBE_AN_ERROR;
- if (phydev->supported & SUPPORTED_10000baseKR_Full)
- ret |= 0x80;
- else
- ret &= ~0x80;
-
- if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
- (phydev->supported & SUPPORTED_2500baseX_Full))
- ret |= 0x20;
- else
- ret &= ~0x20;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
-
- /* Set up Advertisement register 1 last */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (ret < 0)
+ ret = amd_xgbe_phy_restart_an(phydev);
+ if (ret)
return AMD_XGBE_AN_ERROR;
- if (phydev->supported & SUPPORTED_Pause)
- ret |= 0x400;
- else
- ret &= ~0x400;
+ return AMD_XGBE_AN_INCOMPAT_LINK;
+}
- if (phydev->supported & SUPPORTED_Asym_Pause)
- ret |= 0x800;
- else
- ret &= ~0x800;
+static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
+{
+ struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
- /* We don't intend to perform XNP */
- ret &= ~XNP_NP_EXCHANGE;
+ /* Interrupt reason must be read and cleared outside of IRQ context */
+ disable_irq_nosync(priv->an_irq);
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
+ queue_work(priv->an_workqueue, &priv->an_irq_work);
- /* Enable and start auto-negotiation */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ return IRQ_HANDLED;
+}
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
+static void amd_xgbe_an_irq_work(struct work_struct *work)
+{
+ struct amd_xgbe_phy_priv *priv = container_of(work,
+ struct amd_xgbe_phy_priv,
+ an_irq_work);
- ret |= MDIO_KR_CTRL_PDETECT;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret);
+ /* Avoid a race between enabling the IRQ and exiting the work by
+ * waiting for the work to finish and then queueing it
+ */
+ flush_work(&priv->an_work);
+ queue_work(priv->an_workqueue, &priv->an_work);
+}
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
+static void amd_xgbe_an_state_machine(struct work_struct *work)
+{
+ struct amd_xgbe_phy_priv *priv = container_of(work,
+ struct amd_xgbe_phy_priv,
+ an_work);
+ struct phy_device *phydev = priv->phydev;
+ enum amd_xgbe_phy_an cur_state = priv->an_state;
+ int int_reg, int_mask;
- ret |= MDIO_AN_CTRL1_ENABLE;
- ret |= MDIO_AN_CTRL1_RESTART;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+ mutex_lock(&priv->an_mutex);
- return AMD_XGBE_AN_EVENT;
-}
+ /* Read the interrupt */
+ int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
+ if (!int_reg)
+ goto out;
-static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
-{
- enum amd_xgbe_phy_an new_state;
- int ret;
+next_int:
+ if (int_reg < 0) {
+ priv->an_state = AMD_XGBE_AN_ERROR;
+ int_mask = XGBE_AN_INT_MASK;
+ } else if (int_reg & XGBE_AN_PG_RCV) {
+ priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
+ int_mask = XGBE_AN_PG_RCV;
+ } else if (int_reg & XGBE_AN_INC_LINK) {
+ priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
+ int_mask = XGBE_AN_INC_LINK;
+ } else if (int_reg & XGBE_AN_INT_CMPLT) {
+ priv->an_state = AMD_XGBE_AN_COMPLETE;
+ int_mask = XGBE_AN_INT_CMPLT;
+ } else {
+ priv->an_state = AMD_XGBE_AN_ERROR;
+ int_mask = 0;
+ }
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
+ /* Clear the interrupt to be processed */
+ int_reg &= ~int_mask;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
- new_state = AMD_XGBE_AN_EVENT;
- if (ret & XGBE_AN_PG_RCV)
- new_state = AMD_XGBE_AN_PAGE_RECEIVED;
- else if (ret & XGBE_AN_INC_LINK)
- new_state = AMD_XGBE_AN_INCOMPAT_LINK;
- else if (ret & XGBE_AN_INT_CMPLT)
- new_state = AMD_XGBE_AN_COMPLETE;
+ priv->an_result = priv->an_state;
- if (new_state != AMD_XGBE_AN_EVENT)
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+again:
+ cur_state = priv->an_state;
- return new_state;
-}
+ switch (priv->an_state) {
+ case AMD_XGBE_AN_READY:
+ priv->an_supported = 0;
+ break;
-static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- enum amd_xgbe_phy_rx *state;
- int ret;
+ case AMD_XGBE_AN_PAGE_RECEIVED:
+ priv->an_state = amd_xgbe_an_page_received(phydev);
+ priv->an_supported++;
+ break;
- state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
- : &priv->kx_state;
+ case AMD_XGBE_AN_INCOMPAT_LINK:
+ priv->an_supported = 0;
+ priv->parallel_detect = 0;
+ priv->an_state = amd_xgbe_an_incompat_link(phydev);
+ break;
- switch (*state) {
- case AMD_XGBE_RX_BPA:
- ret = amd_xgbe_an_rx_bpa(phydev, state);
+ case AMD_XGBE_AN_COMPLETE:
+ priv->parallel_detect = priv->an_supported ? 0 : 1;
+ netdev_dbg(phydev->attached_dev, "%s successful\n",
+ priv->an_supported ? "Auto negotiation"
+ : "Parallel detection");
break;
- case AMD_XGBE_RX_XNP:
- ret = amd_xgbe_an_rx_xnp(phydev, state);
+ case AMD_XGBE_AN_NO_LINK:
break;
default:
- ret = AMD_XGBE_AN_ERROR;
+ priv->an_state = AMD_XGBE_AN_ERROR;
}
- return ret;
-}
+ if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
+ int_reg = 0;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
+ netdev_err(phydev->attached_dev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
-static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
-{
- int ret;
+ int_reg = 0;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ }
- ret = amd_xgbe_phy_switch_mode(phydev);
- if (ret)
- return AMD_XGBE_AN_ERROR;
+ if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
+ priv->an_result = priv->an_state;
+ priv->an_state = AMD_XGBE_AN_READY;
+ priv->kr_state = AMD_XGBE_RX_BPA;
+ priv->kx_state = AMD_XGBE_RX_BPA;
+ }
- return AMD_XGBE_AN_START;
-}
+ if (cur_state != priv->an_state)
+ goto again;
-static void amd_xgbe_an_state_machine(struct work_struct *work)
-{
- struct amd_xgbe_phy_priv *priv = container_of(work,
- struct amd_xgbe_phy_priv,
- an_work);
- struct phy_device *phydev = priv->phydev;
- enum amd_xgbe_phy_an cur_state;
- int sleep;
- unsigned int an_supported = 0;
+ if (int_reg)
+ goto next_int;
- /* Start in KX mode */
- if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX))
- priv->an_state = AMD_XGBE_AN_ERROR;
+out:
+ enable_irq(priv->an_irq);
- while (1) {
- mutex_lock(&priv->an_mutex);
+ mutex_unlock(&priv->an_mutex);
+}
- cur_state = priv->an_state;
+static int amd_xgbe_an_init(struct phy_device *phydev)
+{
+ int ret;
- switch (priv->an_state) {
- case AMD_XGBE_AN_START:
- an_supported = 0;
- priv->parallel_detect = 0;
- priv->an_state = amd_xgbe_an_start(phydev);
- break;
+ /* Set up Advertisement register 3 first */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (ret < 0)
+ return ret;
- case AMD_XGBE_AN_EVENT:
- priv->an_state = amd_xgbe_an_event(phydev);
- break;
+ if (phydev->supported & SUPPORTED_10000baseR_FEC)
+ ret |= 0xc000;
+ else
+ ret &= ~0xc000;
- case AMD_XGBE_AN_PAGE_RECEIVED:
- priv->an_state = amd_xgbe_an_page_received(phydev);
- an_supported++;
- break;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
- case AMD_XGBE_AN_INCOMPAT_LINK:
- priv->an_state = amd_xgbe_an_incompat_link(phydev);
- break;
+ /* Set up Advertisement register 2 next */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (ret < 0)
+ return ret;
- case AMD_XGBE_AN_COMPLETE:
- priv->parallel_detect = an_supported ? 0 : 1;
- netdev_info(phydev->attached_dev, "%s successful\n",
- an_supported ? "Auto negotiation"
- : "Parallel detection");
- /* fall through */
+ if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ ret |= 0x80;
+ else
+ ret &= ~0x80;
- case AMD_XGBE_AN_NO_LINK:
- case AMD_XGBE_AN_EXIT:
- goto exit_unlock;
+ if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+ (phydev->supported & SUPPORTED_2500baseX_Full))
+ ret |= 0x20;
+ else
+ ret &= ~0x20;
- default:
- priv->an_state = AMD_XGBE_AN_ERROR;
- }
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
- if (priv->an_state == AMD_XGBE_AN_ERROR) {
- netdev_err(phydev->attached_dev,
- "error during auto-negotiation, state=%u\n",
- cur_state);
- goto exit_unlock;
- }
+ /* Set up Advertisement register 1 last */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ret < 0)
+ return ret;
- sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
+ if (phydev->supported & SUPPORTED_Pause)
+ ret |= 0x400;
+ else
+ ret &= ~0x400;
- mutex_unlock(&priv->an_mutex);
+ if (phydev->supported & SUPPORTED_Asym_Pause)
+ ret |= 0x800;
+ else
+ ret &= ~0x800;
- if (sleep)
- usleep_range(20, 50);
- }
+ /* We don't intend to perform XNP */
+ ret &= ~XNP_NP_EXCHANGE;
-exit_unlock:
- priv->an_result = priv->an_state;
- priv->an_state = AMD_XGBE_AN_READY;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
- mutex_unlock(&priv->an_mutex);
+ return 0;
}
static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
@@ -992,20 +1125,57 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
if (ret & MDIO_CTRL1_RESET)
return -ETIMEDOUT;
- /* Make sure the XPCS and SerDes are in compatible states */
- return amd_xgbe_phy_xgmii_mode(phydev);
+ /* Disable auto-negotiation for now */
+ ret = amd_xgbe_phy_disable_an(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Clear auto-negotiation interrupts */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return 0;
}
static int amd_xgbe_phy_config_init(struct phy_device *phydev)
{
struct amd_xgbe_phy_priv *priv = phydev->priv;
+ struct net_device *netdev = phydev->attached_dev;
+ int ret;
+
+ if (!priv->an_irq_allocated) {
+ /* Allocate the auto-negotiation workqueue and interrupt */
+ snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
+ "%s-pcs", netdev_name(netdev));
+
+ priv->an_workqueue =
+ create_singlethread_workqueue(priv->an_irq_name);
+ if (!priv->an_workqueue) {
+ netdev_err(netdev, "phy workqueue creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_request_irq(priv->dev, priv->an_irq,
+ amd_xgbe_an_isr, 0, priv->an_irq_name,
+ priv);
+ if (ret) {
+ netdev_err(netdev, "phy irq request failed\n");
+ destroy_workqueue(priv->an_workqueue);
+ return ret;
+ }
+
+ priv->an_irq_allocated = 1;
+ }
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
+ if (ret < 0)
+ return ret;
+ priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
/* Initialize supported features */
phydev->supported = SUPPORTED_Autoneg;
phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
phydev->supported |= SUPPORTED_Backplane;
- phydev->supported |= SUPPORTED_10000baseKR_Full |
- SUPPORTED_10000baseR_FEC;
+ phydev->supported |= SUPPORTED_10000baseKR_Full;
switch (priv->speed_set) {
case AMD_XGBE_PHY_SPEEDSET_1000_10000:
phydev->supported |= SUPPORTED_1000baseKX_Full;
@@ -1014,11 +1184,33 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
phydev->supported |= SUPPORTED_2500baseX_Full;
break;
}
+
+ if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
+ phydev->supported |= SUPPORTED_10000baseR_FEC;
+
phydev->advertising = phydev->supported;
- /* Turn off and clear interrupts */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+ else if (phydev->supported & SUPPORTED_1000baseKX_Full)
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ else if (phydev->supported & SUPPORTED_2500baseX_Full)
+ ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+ else
+ ret = -EINVAL;
+ if (ret < 0)
+ return ret;
+
+ /* Set up advertisement registers based on current settings */
+ ret = amd_xgbe_an_init(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable auto-negotiation interrupts */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
return 0;
}
@@ -1028,25 +1220,19 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
int ret;
/* Disable auto-negotiation */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ ret = amd_xgbe_phy_disable_an(phydev);
if (ret < 0)
return ret;
- ret &= ~MDIO_AN_CTRL1_ENABLE;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
-
/* Validate/Set specified speed */
switch (phydev->speed) {
case SPEED_10000:
- ret = amd_xgbe_phy_xgmii_mode(phydev);
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
break;
case SPEED_2500:
- ret = amd_xgbe_phy_gmii_2500_mode(phydev);
- break;
-
case SPEED_1000:
- ret = amd_xgbe_phy_gmii_mode(phydev);
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
break;
default:
@@ -1066,10 +1252,11 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
return 0;
}
-static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
{
struct amd_xgbe_phy_priv *priv = phydev->priv;
u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int ret;
if (phydev->autoneg != AUTONEG_ENABLE)
return amd_xgbe_phy_setup_forced(phydev);
@@ -1078,56 +1265,79 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
if (!(mmd_mask & MDIO_DEVS_AN))
return -EINVAL;
- /* Start/Restart the auto-negotiation state machine */
- mutex_lock(&priv->an_mutex);
+ /* Disable auto-negotiation interrupt */
+ disable_irq(priv->an_irq);
+
+ /* Start auto-negotiation in a supported mode */
+ if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
+ else if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+ (phydev->supported & SUPPORTED_2500baseX_Full))
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
+ else
+ ret = -EINVAL;
+ if (ret < 0) {
+ enable_irq(priv->an_irq);
+ return ret;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ ret = amd_xgbe_phy_disable_an(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Clear any auto-negotitation interrupts */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
priv->an_result = AMD_XGBE_AN_READY;
- priv->an_state = AMD_XGBE_AN_START;
- priv->kr_state = AMD_XGBE_RX_READY;
- priv->kx_state = AMD_XGBE_RX_READY;
- mutex_unlock(&priv->an_mutex);
+ priv->an_state = AMD_XGBE_AN_READY;
+ priv->kr_state = AMD_XGBE_RX_BPA;
+ priv->kx_state = AMD_XGBE_RX_BPA;
- queue_work(priv->an_workqueue, &priv->an_work);
+ /* Re-enable auto-negotiation interrupt */
+ enable_irq(priv->an_irq);
- return 0;
+ /* Set up advertisement registers based on current settings */
+ ret = amd_xgbe_an_init(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable and start auto-negotiation */
+ return amd_xgbe_phy_restart_an(phydev);
}
-static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
+static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
{
struct amd_xgbe_phy_priv *priv = phydev->priv;
- enum amd_xgbe_phy_an state;
+ int ret;
mutex_lock(&priv->an_mutex);
- state = priv->an_result;
+
+ ret = __amd_xgbe_phy_config_aneg(phydev);
+
mutex_unlock(&priv->an_mutex);
- return (state == AMD_XGBE_AN_COMPLETE);
+ return ret;
+}
+
+static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+ return (priv->an_result == AMD_XGBE_AN_COMPLETE);
}
static int amd_xgbe_phy_update_link(struct phy_device *phydev)
{
struct amd_xgbe_phy_priv *priv = phydev->priv;
- enum amd_xgbe_phy_an state;
- unsigned int check_again, autoneg;
int ret;
/* If we're doing auto-negotiation don't report link down */
- mutex_lock(&priv->an_mutex);
- state = priv->an_state;
- mutex_unlock(&priv->an_mutex);
-
- if (state != AMD_XGBE_AN_READY) {
+ if (priv->an_state != AMD_XGBE_AN_READY) {
phydev->link = 1;
return 0;
}
- /* Since the device can be in the wrong mode when a link is
- * (re-)established (cable connected after the interface is
- * up, etc.), the link status may report no link. If there
- * is no link, try switching modes and checking the status
- * again if auto negotiation is enabled.
- */
- check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
-again:
/* Link status is latched low, so read once to clear
* and then read again to get current state
*/
@@ -1141,25 +1351,6 @@ again:
phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
- if (!phydev->link) {
- if (check_again) {
- ret = amd_xgbe_phy_switch_mode(phydev);
- if (ret < 0)
- return ret;
- check_again = 0;
- goto again;
- }
- }
-
- autoneg = (phydev->link && !priv->link) ? 1 : 0;
- priv->link = phydev->link;
- if (autoneg) {
- /* Link is (back) up, re-start auto-negotiation */
- ret = amd_xgbe_phy_config_aneg(phydev);
- if (ret < 0)
- return ret;
- }
-
return 0;
}
@@ -1249,6 +1440,7 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
static int amd_xgbe_phy_suspend(struct phy_device *phydev)
{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
int ret;
mutex_lock(&phydev->lock);
@@ -1257,6 +1449,8 @@ static int amd_xgbe_phy_suspend(struct phy_device *phydev)
if (ret < 0)
goto unlock;
+ priv->lpm_ctrl = ret;
+
ret |= MDIO_CTRL1_LPOWER;
phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
@@ -1270,69 +1464,106 @@ unlock:
static int amd_xgbe_phy_resume(struct phy_device *phydev)
{
- int ret;
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
mutex_lock(&phydev->lock);
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- goto unlock;
+ priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
- ret &= ~MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+ mutex_unlock(&phydev->lock);
- ret = 0;
+ return 0;
+}
-unlock:
- mutex_unlock(&phydev->lock);
+static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
+ unsigned int type)
+{
+ unsigned int count;
+ int i;
- return ret;
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *r = &pdev->resource[i];
+
+ if (type == resource_type(r))
+ count++;
+ }
+
+ return count;
}
static int amd_xgbe_phy_probe(struct phy_device *phydev)
{
struct amd_xgbe_phy_priv *priv;
- struct platform_device *pdev;
- struct device *dev;
- char *wq_name;
- const __be32 *property;
- unsigned int speed_set;
+ struct platform_device *phy_pdev;
+ struct device *dev, *phy_dev;
+ unsigned int phy_resnum, phy_irqnum;
int ret;
- if (!phydev->dev.of_node)
+ if (!phydev->bus || !phydev->bus->parent)
return -EINVAL;
- pdev = of_find_device_by_node(phydev->dev.of_node);
- if (!pdev)
- return -EINVAL;
- dev = &pdev->dev;
-
- wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
- if (!wq_name) {
- ret = -ENOMEM;
- goto err_pdev;
- }
+ dev = phydev->bus->parent;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_name;
- }
+ if (!priv)
+ return -ENOMEM;
- priv->pdev = pdev;
+ priv->pdev = to_platform_device(dev);
+ priv->adev = ACPI_COMPANION(dev);
priv->dev = dev;
priv->phydev = phydev;
+ mutex_init(&priv->an_mutex);
+ INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
+ INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
+
+ if (!priv->adev || acpi_disabled) {
+ struct device_node *bus_node;
+ struct device_node *phy_node;
+
+ bus_node = priv->dev->of_node;
+ phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(dev, "unable to parse phy-handle\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ phy_pdev = of_find_device_by_node(phy_node);
+ of_node_put(phy_node);
+
+ if (!phy_pdev) {
+ dev_err(dev, "unable to obtain phy device\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ phy_resnum = 0;
+ phy_irqnum = 0;
+ } else {
+ /* In ACPI, the XGBE and PHY resources are the grouped
+ * together with the PHY resources at the end
+ */
+ phy_pdev = priv->pdev;
+ phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
+ IORESOURCE_MEM) - 3;
+ phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
+ IORESOURCE_IRQ) - 1;
+ }
+ phy_dev = &phy_pdev->dev;
/* Get the device mmio areas */
- priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
+ phy_resnum++);
priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
if (IS_ERR(priv->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(priv->rxtx_regs);
- goto err_priv;
+ goto err_put;
}
- priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
+ phy_resnum++);
priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
if (IS_ERR(priv->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
@@ -1340,7 +1571,8 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
goto err_rxtx;
}
- priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
+ phy_resnum++);
priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
if (IS_ERR(priv->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
@@ -1348,40 +1580,98 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
goto err_sir0;
}
+ /* Get the auto-negotiation interrupt */
+ ret = platform_get_irq(phy_pdev, phy_irqnum);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq failed\n");
+ goto err_sir1;
+ }
+ priv->an_irq = ret;
+
/* Get the device speed set property */
- speed_set = 0;
- property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
- NULL);
- if (property)
- speed_set = be32_to_cpu(*property);
-
- switch (speed_set) {
- case 0:
- priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
- break;
- case 1:
- priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
+ ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
+ &priv->speed_set);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_SPEEDSET_PROPERTY);
+ goto err_sir1;
+ }
+
+ switch (priv->speed_set) {
+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
break;
default:
- dev_err(dev, "invalid amd,speed-set property\n");
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_SPEEDSET_PROPERTY);
ret = -EINVAL;
goto err_sir1;
}
- priv->link = 1;
+ if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_BLWC_PROPERTY,
+ priv->serdes_blwc,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_BLWC_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
+ sizeof(priv->serdes_blwc));
+ }
- mutex_init(&priv->an_mutex);
- INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
- priv->an_workqueue = create_singlethread_workqueue(wq_name);
- if (!priv->an_workqueue) {
- ret = -ENOMEM;
- goto err_sir1;
+ if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_CDR_RATE_PROPERTY,
+ priv->serdes_cdr_rate,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_CDR_RATE_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
+ sizeof(priv->serdes_cdr_rate));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_PQ_SKEW_PROPERTY,
+ priv->serdes_pq_skew,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_PQ_SKEW_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
+ sizeof(priv->serdes_pq_skew));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_TX_AMP_PROPERTY,
+ priv->serdes_tx_amp,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_TX_AMP_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
+ sizeof(priv->serdes_tx_amp));
}
phydev->priv = priv;
- kfree(wq_name);
- of_dev_put(pdev);
+ if (!priv->adev || acpi_disabled)
+ platform_device_put(phy_pdev);
return 0;
@@ -1400,15 +1690,13 @@ err_rxtx:
devm_release_mem_region(dev, priv->rxtx_res->start,
resource_size(priv->rxtx_res));
+err_put:
+ if (!priv->adev || acpi_disabled)
+ platform_device_put(phy_pdev);
+
err_priv:
devm_kfree(dev, priv);
-err_name:
- kfree(wq_name);
-
-err_pdev:
- of_dev_put(pdev);
-
return ret;
}
@@ -1417,13 +1705,12 @@ static void amd_xgbe_phy_remove(struct phy_device *phydev)
struct amd_xgbe_phy_priv *priv = phydev->priv;
struct device *dev = priv->dev;
- /* Stop any in process auto-negotiation */
- mutex_lock(&priv->an_mutex);
- priv->an_state = AMD_XGBE_AN_EXIT;
- mutex_unlock(&priv->an_mutex);
+ if (priv->an_irq_allocated) {
+ devm_free_irq(dev, priv->an_irq, priv);
- flush_workqueue(priv->an_workqueue);
- destroy_workqueue(priv->an_workqueue);
+ flush_workqueue(priv->an_workqueue);
+ destroy_workqueue(priv->an_workqueue);
+ }
/* Release resources */
devm_iounmap(dev, priv->sir1_regs);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 3ad0e6e16c39..a08a3c78ba97 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -168,7 +168,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
- if (!link_update || !phydev || !phydev->bus)
+ if (!phydev || !phydev->bus)
return -EINVAL;
list_for_each_entry(fp, &fmb->phys, node) {
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 50051f271b10..095ef3fe369a 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -443,9 +443,13 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!drv || !phydrv->suspend)
return false;
- /* PHY not attached? May suspend. */
+ /* PHY not attached? May suspend if the PHY has not already been
+ * suspended as part of a prior call to phy_disconnect() ->
+ * phy_detach() -> phy_suspend() because the parent netdev might be the
+ * MDIO bus driver and clock gated at this point.
+ */
if (!netdev)
- return true;
+ return !phydev->suspended;
/* Don't suspend PHY if the attched netdev parent may wakeup.
* The parent may point to a PCI device, as in tg3 driver.
@@ -465,7 +469,6 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
static int mdio_bus_suspend(struct device *dev)
{
- struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
/* We must stop the state machine manually, otherwise it stops out of
@@ -479,19 +482,18 @@ static int mdio_bus_suspend(struct device *dev)
if (!mdio_bus_phy_may_suspend(phydev))
return 0;
- return phydrv->suspend(phydev);
+ return phy_suspend(phydev);
}
static int mdio_bus_resume(struct device *dev)
{
- struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
int ret;
if (!mdio_bus_phy_may_suspend(phydev))
goto no_resume;
- ret = phydrv->resume(phydev);
+ ret = phy_resume(phydev);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 767cd110f496..cdcac6aa4260 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -439,6 +439,9 @@ int phy_start_aneg(struct phy_device *phydev)
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
+ /* Invalidate LP advertising flags */
+ phydev->lp_advertising = 0;
+
err = phydev->drv->config_aneg(phydev);
if (err < 0)
goto out_unlock;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3fc91e89f5a5..bdfe51fc3a65 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -699,6 +699,7 @@ int phy_suspend(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ int ret = 0;
/* If the device has WOL enabled, we cannot suspend the PHY */
phy_ethtool_get_wol(phydev, &wol);
@@ -706,18 +707,31 @@ int phy_suspend(struct phy_device *phydev)
return -EBUSY;
if (phydrv->suspend)
- return phydrv->suspend(phydev);
- return 0;
+ ret = phydrv->suspend(phydev);
+
+ if (ret)
+ return ret;
+
+ phydev->suspended = true;
+
+ return ret;
}
EXPORT_SYMBOL(phy_suspend);
int phy_resume(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+ int ret = 0;
if (phydrv->resume)
- return phydrv->resume(phydev);
- return 0;
+ ret = phydrv->resume(phydev);
+
+ if (ret)
+ return ret;
+
+ phydev->suspended = false;
+
+ return ret;
}
EXPORT_SYMBOL(phy_resume);
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 602c625d95d5..b5edc7f96a39 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
/*
* See if we managed to reduce the size of the packet.
*/
- if (olen < isize) {
+ if (olen < isize && olen <= osize) {
state->stats.comp_bytes += olen;
state->stats.comp_packets++;
} else {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 93e224217e24..0e62274e884a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -28,6 +28,7 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
+#include <net/switchdev.h>
#include <generated/utsrelease.h>
#include <linux/if_team.h>
@@ -176,7 +177,6 @@ static int __team_option_inst_add(struct team *team, struct team_option *option,
static int __team_option_inst_add_option(struct team *team,
struct team_option *option)
{
- struct team_port *port;
int err;
if (!option->per_port) {
@@ -184,12 +184,6 @@ static int __team_option_inst_add_option(struct team *team,
if (err)
goto inst_del_option;
}
-
- list_for_each_entry(port, &team->port_list, list) {
- err = __team_option_inst_add(team, option, port);
- if (err)
- goto inst_del_option;
- }
return 0;
inst_del_option:
@@ -629,6 +623,7 @@ static int team_change_mode(struct team *team, const char *kind)
static void team_notify_peers_work(struct work_struct *work)
{
struct team *team;
+ int val;
team = container_of(work, struct team, notify_peers.dw.work);
@@ -636,9 +631,14 @@ static void team_notify_peers_work(struct work_struct *work)
schedule_delayed_work(&team->notify_peers.dw, 0);
return;
}
+ val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+ if (val < 0) {
+ rtnl_unlock();
+ return;
+ }
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
rtnl_unlock();
- if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ if (val)
schedule_delayed_work(&team->notify_peers.dw,
msecs_to_jiffies(team->notify_peers.interval));
}
@@ -669,6 +669,7 @@ static void team_notify_peers_fini(struct team *team)
static void team_mcast_rejoin_work(struct work_struct *work)
{
struct team *team;
+ int val;
team = container_of(work, struct team, mcast_rejoin.dw.work);
@@ -676,9 +677,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
return;
}
+ val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+ if (val < 0) {
+ rtnl_unlock();
+ return;
+ }
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
rtnl_unlock();
- if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+ if (val)
schedule_delayed_work(&team->mcast_rejoin.dw,
msecs_to_jiffies(team->mcast_rejoin.interval));
}
@@ -1920,7 +1926,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
struct team *team = netdev_priv(dev);
netdev_features_t mask;
- mask = features;
+ mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
@@ -1970,6 +1976,8 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
.ndo_change_carrier = team_change_carrier,
+ .ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
+ .ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
};
/***********************
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8c8dc16839a7..857dca47bf80 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -65,7 +65,6 @@
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
-#include <net/ipv6.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
@@ -124,10 +123,9 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
-/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
- * the netdevice to be fit in one page. So we can make sure the success of
- * memory allocation. TODO: increase the limit. */
-#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
+ * to max number of VCPUs in guest. */
+#define MAX_TAP_QUEUES 256
#define MAX_TAP_FLOWS 4096
#define TUN_FLOW_EXPIRE (3 * HZ)
@@ -187,7 +185,7 @@ struct tun_struct {
struct net_device *dev;
netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
- NETIF_F_TSO6)
+ NETIF_F_TSO6|NETIF_F_UFO)
int vnet_hdr_sz;
int sndbuf;
@@ -258,7 +256,6 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
- sock_rps_reset_flow_hash(e->rps_rxhash);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
@@ -375,10 +372,8 @@ unlock:
*/
static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
{
- if (unlikely(e->rps_rxhash != hash)) {
- sock_rps_reset_flow_hash(e->rps_rxhash);
+ if (unlikely(e->rps_rxhash != hash))
e->rps_rxhash = hash;
- }
}
/* We try to identify a flow through its rxhash first. The reason that
@@ -1167,8 +1162,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
break;
}
- skb_reset_network_header(skb);
-
if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1179,20 +1172,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- {
- static bool warned;
-
- if (!warned) {
- warned = true;
- netdev_warn(tun->dev,
- "%s: using disabled UFO feature; please fix this program\n",
- current->comm);
- }
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- if (skb->protocol == htons(ETH_P_IPV6))
- ipv6_proxy_select_ident(skb);
break;
- }
default:
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
@@ -1221,6 +1202,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
+ skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
rxhash = skb_get_hash(skb);
@@ -1261,7 +1243,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
vlan_hlen = VLAN_HLEN;
if (tun->flags & IFF_VNET_HDR)
@@ -1298,6 +1280,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else {
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
@@ -1338,7 +1322,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
} veth;
veth.h_vlan_proto = skb->vlan_proto;
- veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+ veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
@@ -1380,7 +1364,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
&peeked, &off, &err);
if (!skb)
- return 0;
+ return err;
ret = tun_put_user(tun, tfile, skb, to);
if (unlikely(ret < 0))
@@ -1501,7 +1485,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
- if (ret > total_len) {
+ if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
@@ -1566,6 +1550,17 @@ static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
+static struct attribute *tun_dev_attrs[] = {
+ &dev_attr_tun_flags.attr,
+ &dev_attr_owner.attr,
+ &dev_attr_group.attr,
+ NULL
+};
+
+static const struct attribute_group tun_attr_group = {
+ .attrs = tun_dev_attrs
+};
+
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
struct tun_struct *tun;
@@ -1646,6 +1641,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
dev->ifindex = tfile->ifindex;
+ dev->sysfs_groups[0] = &tun_attr_group;
tun = netdev_priv(dev);
tun->dev = dev;
@@ -1681,11 +1677,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
-
- if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
- device_create_file(&tun->dev->dev, &dev_attr_owner) ||
- device_create_file(&tun->dev->dev, &dev_attr_group))
- pr_err("Failed to create tun sysfs files\n");
}
netif_carrier_on(tun->dev);
@@ -1746,6 +1737,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
+
+ if (arg & TUN_F_UFO) {
+ features |= NETIF_F_UFO;
+ arg &= ~TUN_F_UFO;
+ }
}
/* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9c5aa922a9f4..6b8efcabb816 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -58,7 +58,6 @@
#include <linux/module.h>
#include <linux/ethtool.h>
#include <linux/usb.h>
-#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
@@ -154,6 +153,7 @@ struct hso_net {
struct hso_device *parent;
struct net_device *net;
struct rfkill *rfkill;
+ char name[24];
struct usb_endpoint_descriptor *in_endp;
struct usb_endpoint_descriptor *out_endp;
@@ -274,7 +274,6 @@ struct hso_device {
u8 usb_gone;
struct work_struct async_get_intf;
struct work_struct async_put_intf;
- struct work_struct reset_device;
struct usb_device *usb;
struct usb_interface *interface;
@@ -340,7 +339,6 @@ static void async_put_intf(struct work_struct *data);
static int hso_put_activity(struct hso_device *hso_dev);
static int hso_get_activity(struct hso_device *hso_dev);
static void tiocmget_intr_callback(struct urb *urb);
-static void reset_device(struct work_struct *data);
/*****************************************************************************/
/* Helping functions */
/*****************************************************************************/
@@ -533,6 +531,13 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
}
static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
+static struct attribute *hso_serial_dev_attrs[] = {
+ &dev_attr_hsotype.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(hso_serial_dev);
+
static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb)
{
int idx;
@@ -696,7 +701,7 @@ static void handle_usb_error(int status, const char *function,
case -ETIMEDOUT:
explanation = "protocol error";
if (hso_dev)
- schedule_work(&hso_dev->reset_device);
+ usb_queue_reset_device(hso_dev->interface);
break;
default:
explanation = "unknown status";
@@ -1271,7 +1276,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
goto err_out;
D1("Opening %d", serial->minor);
- kref_get(&serial->parent->ref);
/* setup */
tty->driver_data = serial;
@@ -1290,7 +1294,8 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
if (result) {
hso_stop_serial_device(serial->parent);
serial->port.count--;
- kref_put(&serial->parent->ref, hso_serial_ref_free);
+ } else {
+ kref_get(&serial->parent->ref);
}
} else {
D1("Port was already open");
@@ -1340,8 +1345,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
usb_autopm_put_interface(serial->parent->interface);
mutex_unlock(&serial->parent->mutex);
-
- kref_put(&serial->parent->ref, hso_serial_ref_free);
}
/* close the requested serial port */
@@ -1392,6 +1395,16 @@ static int hso_serial_write_room(struct tty_struct *tty)
return room;
}
+static void hso_serial_cleanup(struct tty_struct *tty)
+{
+ struct hso_serial *serial = tty->driver_data;
+
+ if (!serial)
+ return;
+
+ kref_put(&serial->parent->ref, hso_serial_ref_free);
+}
+
/* setup the term */
static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
{
@@ -2198,8 +2211,8 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
for (i = 0; i < serial->num_rx_urbs; i++) {
if (serial->rx_urb[i]) {
- usb_kill_urb(serial->rx_urb[i]);
- serial->rx_urb_filled[i] = 0;
+ usb_kill_urb(serial->rx_urb[i]);
+ serial->rx_urb_filled[i] = 0;
}
}
serial->curr_rx_urb_idx = 0;
@@ -2228,15 +2241,15 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
return 0;
}
+static void hso_serial_tty_unregister(struct hso_serial *serial)
+{
+ tty_unregister_device(tty_drv, serial->minor);
+}
+
static void hso_serial_common_free(struct hso_serial *serial)
{
int i;
- if (serial->parent->dev)
- device_remove_file(serial->parent->dev, &dev_attr_hsotype);
-
- tty_unregister_device(tty_drv, serial->minor);
-
for (i = 0; i < serial->num_rx_urbs; i++) {
/* unlink and free RX URB */
usb_free_urb(serial->rx_urb[i]);
@@ -2246,6 +2259,7 @@ static void hso_serial_common_free(struct hso_serial *serial)
/* unlink and free TX URB */
usb_free_urb(serial->tx_urb);
+ kfree(serial->tx_buffer);
kfree(serial->tx_data);
tty_port_destroy(&serial->port);
}
@@ -2264,11 +2278,10 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
goto exit;
/* register our minor number */
- serial->parent->dev = tty_port_register_device(&serial->port, tty_drv,
- minor, &serial->parent->interface->dev);
+ serial->parent->dev = tty_port_register_device_attr(&serial->port,
+ tty_drv, minor, &serial->parent->interface->dev,
+ serial->parent, hso_serial_dev_groups);
dev = serial->parent->dev;
- dev_set_drvdata(dev, serial->parent);
- i = device_create_file(dev, &dev_attr_hsotype);
/* fill in specific data for later use */
serial->minor = minor;
@@ -2316,6 +2329,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
return 0;
exit:
+ hso_serial_tty_unregister(serial);
hso_serial_common_free(serial);
return -1;
}
@@ -2338,7 +2352,6 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
- INIT_WORK(&hso_dev->reset_device, reset_device);
return hso_dev;
}
@@ -2459,27 +2472,21 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
{
struct hso_net *hso_net = dev2net(hso_dev);
struct device *dev = &hso_net->net->dev;
- char *rfkn;
+ static u32 rfkill_counter;
- rfkn = kzalloc(20, GFP_KERNEL);
- if (!rfkn)
- dev_err(dev, "%s - Out of memory\n", __func__);
-
- snprintf(rfkn, 20, "hso-%d",
- interface->altsetting->desc.bInterfaceNumber);
+ snprintf(hso_net->name, sizeof(hso_net->name), "hso-%d",
+ rfkill_counter++);
- hso_net->rfkill = rfkill_alloc(rfkn,
+ hso_net->rfkill = rfkill_alloc(hso_net->name,
&interface_to_usbdev(interface)->dev,
RFKILL_TYPE_WWAN,
&hso_rfkill_ops, hso_dev);
if (!hso_net->rfkill) {
dev_err(dev, "%s - Out of memory\n", __func__);
- kfree(rfkn);
return;
}
if (rfkill_register(hso_net->rfkill) < 0) {
rfkill_destroy(hso_net->rfkill);
- kfree(rfkn);
hso_net->rfkill = NULL;
dev_err(dev, "%s - Failed to register rfkill\n", __func__);
return;
@@ -2594,7 +2601,6 @@ static void hso_free_serial_device(struct hso_device *hso_dev)
if (!serial)
return;
- set_serial_by_index(serial->minor, NULL);
hso_serial_common_free(serial);
@@ -2684,6 +2690,7 @@ static struct hso_device *hso_create_bulk_serial_device(
return hso_dev;
exit2:
+ hso_serial_tty_unregister(serial);
hso_serial_common_free(serial);
exit:
hso_free_tiomget(serial);
@@ -3083,26 +3090,6 @@ out:
return result;
}
-static void reset_device(struct work_struct *data)
-{
- struct hso_device *hso_dev =
- container_of(data, struct hso_device, reset_device);
- struct usb_device *usb = hso_dev->usb;
- int result;
-
- if (hso_dev->usb_gone) {
- D1("No reset during disconnect\n");
- } else {
- result = usb_lock_device_for_reset(usb, hso_dev->interface);
- if (result < 0)
- D1("unable to lock device for reset: %d\n", result);
- else {
- usb_reset_device(usb);
- usb_unlock_device(usb);
- }
- }
-}
-
static void hso_serial_ref_free(struct kref *ref)
{
struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
@@ -3112,18 +3099,22 @@ static void hso_serial_ref_free(struct kref *ref)
static void hso_free_interface(struct usb_interface *interface)
{
- struct hso_serial *hso_dev;
+ struct hso_serial *serial;
int i;
for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
if (serial_table[i] &&
(serial_table[i]->interface == interface)) {
- hso_dev = dev2ser(serial_table[i]);
- tty_port_tty_hangup(&hso_dev->port, false);
- mutex_lock(&hso_dev->parent->mutex);
- hso_dev->parent->usb_gone = 1;
- mutex_unlock(&hso_dev->parent->mutex);
+ serial = dev2ser(serial_table[i]);
+ tty_port_tty_hangup(&serial->port, false);
+ mutex_lock(&serial->parent->mutex);
+ serial->parent->usb_gone = 1;
+ mutex_unlock(&serial->parent->mutex);
+ cancel_work_sync(&serial_table[i]->async_put_intf);
+ cancel_work_sync(&serial_table[i]->async_get_intf);
+ hso_serial_tty_unregister(serial);
kref_put(&serial_table[i]->ref, hso_serial_ref_free);
+ set_serial_by_index(i, NULL);
}
}
@@ -3215,6 +3206,7 @@ static const struct tty_operations hso_serial_ops = {
.close = hso_serial_close,
.write = hso_serial_write,
.write_room = hso_serial_write_room,
+ .cleanup = hso_serial_cleanup,
.ioctl = hso_serial_ioctl,
.set_termios = hso_serial_set_termios,
.chars_in_buffer = hso_serial_chars_in_buffer,
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index dcb6d33141e0..1e9cdca37014 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
awd.done = 0;
urb->context = &awd;
- status = usb_submit_urb(urb, GFP_NOIO);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
// something went wrong
usb_free_urb(urb);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 57ec23e8ccfa..5980ac6c48dd 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
#include <linux/usb/cdc.h>
/* Version Information */
-#define DRIVER_VERSION "v1.07.0 (2014/10/09)"
+#define DRIVER_VERSION "v1.08.0 (2015/01/13)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -448,6 +448,7 @@ enum rtl_register_content {
#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
#define RTL8153_RMS RTL8153_MAX_PACKET
#define RTL8152_TX_TIMEOUT (5 * HZ)
+#define RTL8152_NAPI_WEIGHT 64
/* rtl8152 flags */
enum rtl8152_flags {
@@ -457,7 +458,7 @@ enum rtl8152_flags {
RTL8152_LINK_CHG,
SELECTIVE_SUSPEND,
PHY_RESET,
- SCHEDULE_TASKLET,
+ SCHEDULE_NAPI,
};
/* Define these values to match your device */
@@ -488,16 +489,16 @@ struct rx_desc {
#define RX_LEN_MASK 0x7fff
__le32 opts2;
-#define RD_UDP_CS (1 << 23)
-#define RD_TCP_CS (1 << 22)
-#define RD_IPV6_CS (1 << 20)
-#define RD_IPV4_CS (1 << 19)
+#define RD_UDP_CS BIT(23)
+#define RD_TCP_CS BIT(22)
+#define RD_IPV6_CS BIT(20)
+#define RD_IPV4_CS BIT(19)
__le32 opts3;
-#define IPF (1 << 23) /* IP checksum fail */
-#define UDPF (1 << 22) /* UDP checksum fail */
-#define TCPF (1 << 21) /* TCP checksum fail */
-#define RX_VLAN_TAG (1 << 16)
+#define IPF BIT(23) /* IP checksum fail */
+#define UDPF BIT(22) /* UDP checksum fail */
+#define TCPF BIT(21) /* TCP checksum fail */
+#define RX_VLAN_TAG BIT(16)
__le32 opts4;
__le32 opts5;
@@ -506,24 +507,24 @@ struct rx_desc {
struct tx_desc {
__le32 opts1;
-#define TX_FS (1 << 31) /* First segment of a packet */
-#define TX_LS (1 << 30) /* Final segment of a packet */
-#define GTSENDV4 (1 << 28)
-#define GTSENDV6 (1 << 27)
+#define TX_FS BIT(31) /* First segment of a packet */
+#define TX_LS BIT(30) /* Final segment of a packet */
+#define GTSENDV4 BIT(28)
+#define GTSENDV6 BIT(27)
#define GTTCPHO_SHIFT 18
#define GTTCPHO_MAX 0x7fU
#define TX_LEN_MAX 0x3ffffU
__le32 opts2;
-#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
-#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
-#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
-#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
+#define UDP_CS BIT(31) /* Calculate UDP/IP checksum */
+#define TCP_CS BIT(30) /* Calculate TCP/IP checksum */
+#define IPV4_CS BIT(29) /* Calculate IPv4 checksum */
+#define IPV6_CS BIT(28) /* Calculate IPv6 checksum */
#define MSS_SHIFT 17
#define MSS_MAX 0x7ffU
#define TCPHO_SHIFT 17
#define TCPHO_MAX 0x7ffU
-#define TX_VLAN_TAG (1 << 16)
+#define TX_VLAN_TAG BIT(16)
};
struct r8152;
@@ -549,14 +550,14 @@ struct tx_agg {
struct r8152 {
unsigned long flags;
struct usb_device *udev;
- struct tasklet_struct tl;
+ struct napi_struct napi;
struct usb_interface *intf;
struct net_device *netdev;
struct urb *intr_urb;
struct tx_agg tx_info[RTL8152_MAX_TX];
struct rx_agg rx_info[RTL8152_MAX_RX];
struct list_head rx_done, tx_free;
- struct sk_buff_head tx_queue;
+ struct sk_buff_head tx_queue, rx_queue;
spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
@@ -580,7 +581,6 @@ struct r8152 {
u16 ocp_base;
u8 *intr_buff;
u8 version;
- u8 speed;
};
enum rtl_version {
@@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
- data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
- data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data)
ocp_reg_write(tp, OCP_SRAM_DATA, data);
}
-static u16 sram_read(struct r8152 *tp, u16 addr)
-{
- ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
- return ocp_reg_read(tp, OCP_SRAM_DATA);
-}
-
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1062,7 +1050,7 @@ static void read_bulk_callback(struct urb *urb)
spin_lock(&tp->rx_lock);
list_add_tail(&agg->list, &tp->rx_done);
spin_unlock(&tp->rx_lock);
- tasklet_schedule(&tp->tl);
+ napi_schedule(&tp->napi);
return;
case -ESHUTDOWN:
set_bit(RTL8152_UNPLUG, &tp->flags);
@@ -1126,7 +1114,7 @@ static void write_bulk_callback(struct urb *urb)
return;
if (!skb_queue_empty(&tp->tx_queue))
- tasklet_schedule(&tp->tl);
+ napi_schedule(&tp->napi);
}
static void intr_callback(struct urb *urb)
@@ -1168,12 +1156,12 @@ static void intr_callback(struct urb *urb)
d = urb->transfer_buffer;
if (INTR_LINK & __le16_to_cpu(d[0])) {
- if (!(tp->speed & LINK_STATUS)) {
+ if (!netif_carrier_ok(tp->netdev)) {
set_bit(RTL8152_LINK_CHG, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
}
} else {
- if (tp->speed & LINK_STATUS) {
+ if (netif_carrier_ok(tp->netdev)) {
set_bit(RTL8152_LINK_CHG, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
}
@@ -1245,6 +1233,7 @@ static int alloc_all_mem(struct r8152 *tp)
spin_lock_init(&tp->tx_lock);
INIT_LIST_HEAD(&tp->tx_free);
skb_queue_head_init(&tp->tx_queue);
+ skb_queue_head_init(&tp->rx_queue);
for (i = 0; i < RTL8152_MAX_RX; i++) {
buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
@@ -1341,18 +1330,6 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
return agg;
}
-static inline __be16 get_protocol(struct sk_buff *skb)
-{
- __be16 protocol;
-
- if (skb->protocol == htons(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
-
- return protocol;
-}
-
/* r8152_csum_workaround()
* The hw limites the value the transport offset. When the offset is out of the
* range, calculate the checksum by sw.
@@ -1421,10 +1398,10 @@ static int msdn_giant_send_check(struct sk_buff *skb)
static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
{
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
u32 opts2;
- opts2 = TX_VLAN_TAG | swab16(vlan_tx_tag_get(skb));
+ opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb));
desc->opts2 |= cpu_to_le32(opts2);
}
}
@@ -1458,7 +1435,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
goto unavailable;
}
- switch (get_protocol(skb)) {
+ switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts1 |= GTSENDV4;
break;
@@ -1489,7 +1466,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
goto unavailable;
}
- switch (get_protocol(skb)) {
+ switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts2 |= IPV4_CS;
ip_protocol = ip_hdr(skb)->protocol;
@@ -1649,13 +1626,32 @@ return_result:
return checksum;
}
-static void rx_bottom(struct r8152 *tp)
+static int rx_bottom(struct r8152 *tp, int budget)
{
unsigned long flags;
struct list_head *cursor, *next, rx_queue;
+ int ret = 0, work_done = 0;
+
+ if (!skb_queue_empty(&tp->rx_queue)) {
+ while (work_done < budget) {
+ struct sk_buff *skb = __skb_dequeue(&tp->rx_queue);
+ struct net_device *netdev = tp->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ unsigned int pkt_len;
+
+ if (!skb)
+ break;
+
+ pkt_len = skb->len;
+ napi_gro_receive(&tp->napi, skb);
+ work_done++;
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ }
+ }
if (list_empty(&tp->rx_done))
- return;
+ goto out1;
INIT_LIST_HEAD(&rx_queue);
spin_lock_irqsave(&tp->rx_lock, flags);
@@ -1708,9 +1704,14 @@ static void rx_bottom(struct r8152 *tp)
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, netdev);
rtl_rx_vlan_tag(rx_desc, skb);
- netif_receive_skb(skb);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
+ if (work_done < budget) {
+ napi_gro_receive(&tp->napi, skb);
+ work_done++;
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ } else {
+ __skb_queue_tail(&tp->rx_queue, skb);
+ }
find_next_rx:
rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
@@ -1720,8 +1721,22 @@ find_next_rx:
}
submit:
- r8152_submit_rx(tp, agg, GFP_ATOMIC);
+ if (!ret) {
+ ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+ } else {
+ urb->actual_length = 0;
+ list_add_tail(&agg->list, next);
+ }
+ }
+
+ if (!list_empty(&rx_queue)) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_splice_tail(&rx_queue, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
}
+
+out1:
+ return work_done;
}
static void tx_bottom(struct r8152 *tp)
@@ -1761,12 +1776,8 @@ static void tx_bottom(struct r8152 *tp)
} while (res == 0);
}
-static void bottom_half(unsigned long data)
+static void bottom_half(struct r8152 *tp)
{
- struct r8152 *tp;
-
- tp = (struct r8152 *)data;
-
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -1778,17 +1789,38 @@ static void bottom_half(unsigned long data)
if (!netif_carrier_ok(tp->netdev))
return;
- clear_bit(SCHEDULE_TASKLET, &tp->flags);
+ clear_bit(SCHEDULE_NAPI, &tp->flags);
- rx_bottom(tp);
tx_bottom(tp);
}
+static int r8152_poll(struct napi_struct *napi, int budget)
+{
+ struct r8152 *tp = container_of(napi, struct r8152, napi);
+ int work_done;
+
+ work_done = rx_bottom(tp, budget);
+ bottom_half(tp);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(napi);
+ }
+
+ return work_done;
+}
+
static
int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
{
int ret;
+ /* The rx would be stopped, so skip submitting */
+ if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
+ !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
+ return 0;
+
usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
agg->head, agg_buf_sz,
(usb_complete_t)read_bulk_callback, agg);
@@ -1805,7 +1837,11 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&agg->list, &tp->rx_done);
spin_unlock_irqrestore(&tp->rx_lock, flags);
- tasklet_schedule(&tp->tl);
+
+ netif_err(tp, rx_err, tp->netdev,
+ "Couldn't submit rx[%p], ret = %d\n", agg, ret);
+
+ napi_schedule(&tp->napi);
}
return ret;
@@ -1845,7 +1881,7 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- if (tp->speed & LINK_STATUS) {
+ if (netif_carrier_ok(netdev)) {
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
}
@@ -1924,11 +1960,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
if (!list_empty(&tp->tx_free)) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- set_bit(SCHEDULE_TASKLET, &tp->flags);
+ set_bit(SCHEDULE_NAPI, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
} else {
usb_mark_last_busy(tp->udev);
- tasklet_schedule(&tp->tl);
+ napi_schedule(&tp->napi);
}
} else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
netif_stop_queue(netdev);
@@ -2007,6 +2043,7 @@ static int rtl_start_rx(struct r8152 *tp)
{
int i, ret = 0;
+ napi_disable(&tp->napi);
INIT_LIST_HEAD(&tp->rx_done);
for (i = 0; i < RTL8152_MAX_RX; i++) {
INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2014,6 +2051,7 @@ static int rtl_start_rx(struct r8152 *tp)
if (ret)
break;
}
+ napi_enable(&tp->napi);
if (ret && ++i < RTL8152_MAX_RX) {
struct list_head rx_queue;
@@ -2044,6 +2082,9 @@ static int rtl_stop_rx(struct r8152 *tp)
for (i = 0; i < RTL8152_MAX_RX; i++)
usb_kill_urb(tp->rx_info[i].urb);
+ while (!skb_queue_empty(&tp->rx_queue))
+ dev_kfree_skb(__skb_dequeue(&tp->rx_queue));
+
return 0;
}
@@ -2059,7 +2100,7 @@ static int rtl_enable(struct r8152 *tp)
rxdy_gated_en(tp, false);
- return rtl_start_rx(tp);
+ return 0;
}
static int rtl8152_enable(struct r8152 *tp)
@@ -2518,24 +2559,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
data = ocp_reg_read(tp, OCP_POWER_CFG);
data |= EN_10M_PLLOFF;
ocp_reg_write(tp, OCP_POWER_CFG, data);
- data = sram_read(tp, SRAM_IMPEDANCE);
- data &= ~RX_DRIVING_MASK;
- sram_write(tp, SRAM_IMPEDANCE, data);
+ sram_write(tp, SRAM_IMPEDANCE, 0x0b13);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= PFM_PWM_SWITCH;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
- data = sram_read(tp, SRAM_LPF_CFG);
- data |= LPF_AUTO_TUNE;
- sram_write(tp, SRAM_LPF_CFG, data);
+ /* Enable LPF corner auto tune */
+ sram_write(tp, SRAM_LPF_CFG, 0xf70f);
- data = sram_read(tp, SRAM_10M_AMP1);
- data |= GDAC_IB_UPALL;
- sram_write(tp, SRAM_10M_AMP1, data);
- data = sram_read(tp, SRAM_10M_AMP2);
- data |= AMP_DN;
- sram_write(tp, SRAM_10M_AMP2, data);
+ /* Adjust 10M Amplitude */
+ sram_write(tp, SRAM_10M_AMP1, 0x00af);
+ sram_write(tp, SRAM_10M_AMP2, 0x0208);
set_bit(PHY_RESET, &tp->flags);
}
@@ -2870,20 +2905,20 @@ static void set_carrier(struct r8152 *tp)
speed = rtl8152_get_speed(tp);
if (speed & LINK_STATUS) {
- if (!(tp->speed & LINK_STATUS)) {
+ if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_carrier_on(netdev);
+ rtl_start_rx(tp);
}
} else {
- if (tp->speed & LINK_STATUS) {
+ if (netif_carrier_ok(netdev)) {
netif_carrier_off(netdev);
- tasklet_disable(&tp->tl);
+ napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
- tasklet_enable(&tp->tl);
+ napi_enable(&tp->napi);
}
}
- tp->speed = speed;
}
static void rtl_work_func_t(struct work_struct *work)
@@ -2913,10 +2948,11 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
- if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
- (tp->speed & LINK_STATUS)) {
- clear_bit(SCHEDULE_TASKLET, &tp->flags);
- tasklet_schedule(&tp->tl);
+ /* don't schedule napi before linking */
+ if (test_bit(SCHEDULE_NAPI, &tp->flags) &&
+ netif_carrier_ok(tp->netdev)) {
+ clear_bit(SCHEDULE_NAPI, &tp->flags);
+ napi_schedule(&tp->napi);
}
if (test_bit(PHY_RESET, &tp->flags))
@@ -2937,8 +2973,7 @@ static int rtl8152_open(struct net_device *netdev)
if (res)
goto out;
- /* set speed to 0 to avoid autoresume try to submit rx */
- tp->speed = 0;
+ netif_carrier_off(netdev);
res = usb_autopm_get_interface(tp->intf);
if (res < 0) {
@@ -2955,7 +2990,7 @@ static int rtl8152_open(struct net_device *netdev)
cancel_delayed_work_sync(&tp->schedule);
/* disable the tx/rx, if the workqueue has enabled them. */
- if (tp->speed & LINK_STATUS)
+ if (netif_carrier_ok(netdev))
tp->rtl_ops.disable(tp);
}
@@ -2964,7 +2999,6 @@ static int rtl8152_open(struct net_device *netdev)
rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
- tp->speed = 0;
netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -2977,7 +3011,7 @@ static int rtl8152_open(struct net_device *netdev)
res);
free_all_mem(tp);
} else {
- tasklet_enable(&tp->tl);
+ napi_enable(&tp->napi);
}
mutex_unlock(&tp->control);
@@ -2993,15 +3027,16 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- tasklet_disable(&tp->tl);
+ napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf);
- if (res < 0) {
+ if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
+ rtl_stop_rx(tp);
} else {
mutex_lock(&tp->control);
@@ -3205,10 +3240,10 @@ static void r8153_init(struct r8152 *tp)
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL);
ocp_data &= ~LPM_TIMER_MASK;
- if (tp->udev->speed == USB_SPEED_SUPER)
- ocp_data |= LPM_TIMER_500US;
- else
+ if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER)
ocp_data |= LPM_TIMER_500MS;
+ else
+ ocp_data |= LPM_TIMER_500US;
ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2);
@@ -3257,7 +3292,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
- tasklet_disable(&tp->tl);
+ napi_disable(&tp->napi);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_stop_rx(tp);
rtl_runtime_suspend_enable(tp, true);
@@ -3265,7 +3300,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
}
- tasklet_enable(&tp->tl);
+ napi_enable(&tp->napi);
}
out1:
mutex_unlock(&tp->control);
@@ -3289,7 +3324,7 @@ static int rtl8152_resume(struct usb_interface *intf)
rtl_runtime_suspend_enable(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
set_bit(WORK_ENABLE, &tp->flags);
- if (tp->speed & LINK_STATUS)
+ if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
} else {
tp->rtl_ops.up(tp);
@@ -3297,7 +3332,6 @@ static int rtl8152_resume(struct usb_interface *intf)
tp->mii.supports_gmii ?
SPEED_1000 : SPEED_100,
DUPLEX_FULL);
- tp->speed = 0;
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
}
@@ -3849,7 +3883,6 @@ static int rtl8152_probe(struct usb_interface *intf,
if (ret)
goto out;
- tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
@@ -3863,8 +3896,7 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6 |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
@@ -3885,6 +3917,7 @@ static int rtl8152_probe(struct usb_interface *intf,
set_ethernet_addr(tp);
usb_set_intfdata(intf, tp);
+ netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT);
ret = register_netdev(netdev);
if (ret != 0) {
@@ -3898,15 +3931,13 @@ static int rtl8152_probe(struct usb_interface *intf,
else
device_set_wakeup_enable(&udev->dev, false);
- tasklet_disable(&tp->tl);
-
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
out1:
+ netif_napi_del(&tp->napi);
usb_set_intfdata(intf, NULL);
- tasklet_kill(&tp->tl);
out:
free_netdev(netdev);
return ret;
@@ -3923,7 +3954,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
if (udev->state == USB_STATE_NOTATTACHED)
set_bit(RTL8152_UNPLUG, &tp->flags);
- tasklet_kill(&tp->tl);
+ netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
tp->rtl_ops.unload(tp);
free_netdev(tp->netdev);
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 99b69af14274..4a1e9c489f1f 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
int ret;
udelay(1);
- ret = sr_read_reg(dev, EPCR, &tmp);
+ ret = sr_read_reg(dev, SR_EPCR, &tmp);
if (ret < 0)
return ret;
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
mutex_lock(&dev->phy_mutex);
- sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
- sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
+ sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+ sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
ret = wait_phy_eeprom_ready(dev, phy);
if (ret < 0)
goto out_unlock;
- sr_write_reg(dev, EPCR, 0x0);
- ret = sr_read(dev, EPDR, 2, value);
+ sr_write_reg(dev, SR_EPCR, 0x0);
+ ret = sr_read(dev, SR_EPDR, 2, value);
netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
mutex_lock(&dev->phy_mutex);
- ret = sr_write(dev, EPDR, 2, &value);
+ ret = sr_write(dev, SR_EPDR, 2, &value);
if (ret < 0)
goto out_unlock;
- sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
- sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
+ sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+ sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
(EPCR_WEP | EPCR_ERPRW));
ret = wait_phy_eeprom_ready(dev, phy);
if (ret < 0)
goto out_unlock;
- sr_write_reg(dev, EPCR, 0x0);
+ sr_write_reg(dev, SR_EPCR, 0x0);
out_unlock:
mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (loc == MII_BMSR) {
u8 value;
- sr_read_reg(dev, NSR, &value);
+ sr_read_reg(dev, SR_NSR, &value);
if (value & NSR_LINKST)
rc = 1;
}
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
int rc = 0;
/* Get the Link Status directly */
- sr_read_reg(dev, NSR, &value);
+ sr_read_reg(dev, SR_NSR, &value);
if (value & NSR_LINKST)
rc = 1;
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
}
}
- sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes);
- sr_write_reg_async(dev, RCR, rx_ctl);
+ sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
+ sr_write_reg_async(dev, SR_RCR, rx_ctl);
}
static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
}
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- sr_write_async(dev, PAR, 6, netdev->dev_addr);
+ sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
return 0;
}
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
mii->phy_id_mask = 0x1f;
mii->reg_num_mask = 0x1f;
- sr_write_reg(dev, NCR, NCR_RST);
+ sr_write_reg(dev, SR_NCR, NCR_RST);
udelay(20);
/* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
* EEPROM automatically to PAR. In case there is no EEPROM externally,
* a default MAC address is stored in PAR for making chip work properly.
*/
- if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+ if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
netdev_err(netdev, "Error reading MAC address\n");
ret = -ENODEV;
goto out;
}
/* power up and reset phy */
- sr_write_reg(dev, PRR, PRR_PHY_RST);
+ sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
/* at least 10ms, here 20ms for safe */
mdelay(20);
- sr_write_reg(dev, PRR, 0);
+ sr_write_reg(dev, SR_PRR, 0);
/* at least 1ms, here 2ms for reading right register */
udelay(2 * 1000);
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
index fd687c575e74..258b030277e7 100644
--- a/drivers/net/usb/sr9700.h
+++ b/drivers/net/usb/sr9700.h
@@ -14,13 +14,13 @@
/* sr9700 spec. register table on Linux platform */
/* Network Control Reg */
-#define NCR 0x00
+#define SR_NCR 0x00
#define NCR_RST (1 << 0)
#define NCR_LBK (3 << 1)
#define NCR_FDX (1 << 3)
#define NCR_WAKEEN (1 << 6)
/* Network Status Reg */
-#define NSR 0x01
+#define SR_NSR 0x01
#define NSR_RXRDY (1 << 0)
#define NSR_RXOV (1 << 1)
#define NSR_TX1END (1 << 2)
@@ -30,7 +30,7 @@
#define NSR_LINKST (1 << 6)
#define NSR_SPEED (1 << 7)
/* Tx Control Reg */
-#define TCR 0x02
+#define SR_TCR 0x02
#define TCR_CRC_DIS (1 << 1)
#define TCR_PAD_DIS (1 << 2)
#define TCR_LC_CARE (1 << 3)
@@ -38,7 +38,7 @@
#define TCR_EXCECM (1 << 5)
#define TCR_LF_EN (1 << 6)
/* Tx Status Reg for Packet Index 1 */
-#define TSR1 0x03
+#define SR_TSR1 0x03
#define TSR1_EC (1 << 2)
#define TSR1_COL (1 << 3)
#define TSR1_LC (1 << 4)
@@ -46,7 +46,7 @@
#define TSR1_LOC (1 << 6)
#define TSR1_TLF (1 << 7)
/* Tx Status Reg for Packet Index 2 */
-#define TSR2 0x04
+#define SR_TSR2 0x04
#define TSR2_EC (1 << 2)
#define TSR2_COL (1 << 3)
#define TSR2_LC (1 << 4)
@@ -54,7 +54,7 @@
#define TSR2_LOC (1 << 6)
#define TSR2_TLF (1 << 7)
/* Rx Control Reg*/
-#define RCR 0x05
+#define SR_RCR 0x05
#define RCR_RXEN (1 << 0)
#define RCR_PRMSC (1 << 1)
#define RCR_RUNT (1 << 2)
@@ -62,87 +62,87 @@
#define RCR_DIS_CRC (1 << 4)
#define RCR_DIS_LONG (1 << 5)
/* Rx Status Reg */
-#define RSR 0x06
+#define SR_RSR 0x06
#define RSR_AE (1 << 2)
#define RSR_MF (1 << 6)
#define RSR_RF (1 << 7)
/* Rx Overflow Counter Reg */
-#define ROCR 0x07
+#define SR_ROCR 0x07
#define ROCR_ROC (0x7F << 0)
#define ROCR_RXFU (1 << 7)
/* Back Pressure Threshold Reg */
-#define BPTR 0x08
+#define SR_BPTR 0x08
#define BPTR_JPT (0x0F << 0)
#define BPTR_BPHW (0x0F << 4)
/* Flow Control Threshold Reg */
-#define FCTR 0x09
+#define SR_FCTR 0x09
#define FCTR_LWOT (0x0F << 0)
#define FCTR_HWOT (0x0F << 4)
/* rx/tx Flow Control Reg */
-#define FCR 0x0A
+#define SR_FCR 0x0A
#define FCR_FLCE (1 << 0)
#define FCR_BKPA (1 << 4)
#define FCR_TXPEN (1 << 5)
#define FCR_TXPF (1 << 6)
#define FCR_TXP0 (1 << 7)
/* Eeprom & Phy Control Reg */
-#define EPCR 0x0B
+#define SR_EPCR 0x0B
#define EPCR_ERRE (1 << 0)
#define EPCR_ERPRW (1 << 1)
#define EPCR_ERPRR (1 << 2)
#define EPCR_EPOS (1 << 3)
#define EPCR_WEP (1 << 4)
/* Eeprom & Phy Address Reg */
-#define EPAR 0x0C
+#define SR_EPAR 0x0C
#define EPAR_EROA (0x3F << 0)
#define EPAR_PHY_ADR_MASK (0x03 << 6)
#define EPAR_PHY_ADR (0x01 << 6)
/* Eeprom & Phy Data Reg */
-#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
+#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
/* Wakeup Control Reg */
-#define WCR 0x0F
+#define SR_WCR 0x0F
#define WCR_MAGICST (1 << 0)
#define WCR_LINKST (1 << 2)
#define WCR_MAGICEN (1 << 3)
#define WCR_LINKEN (1 << 5)
/* Physical Address Reg */
-#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
+#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
/* Multicast Address Reg */
-#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
+#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
/* 0x1e unused */
/* Phy Reset Reg */
-#define PRR 0x1F
+#define SR_PRR 0x1F
#define PRR_PHY_RST (1 << 0)
/* Tx sdram Write Pointer Address Low */
-#define TWPAL 0x20
+#define SR_TWPAL 0x20
/* Tx sdram Write Pointer Address High */
-#define TWPAH 0x21
+#define SR_TWPAH 0x21
/* Tx sdram Read Pointer Address Low */
-#define TRPAL 0x22
+#define SR_TRPAL 0x22
/* Tx sdram Read Pointer Address High */
-#define TRPAH 0x23
+#define SR_TRPAH 0x23
/* Rx sdram Write Pointer Address Low */
-#define RWPAL 0x24
+#define SR_RWPAL 0x24
/* Rx sdram Write Pointer Address High */
-#define RWPAH 0x25
+#define SR_RWPAH 0x25
/* Rx sdram Read Pointer Address Low */
-#define RRPAL 0x26
+#define SR_RRPAL 0x26
/* Rx sdram Read Pointer Address High */
-#define RRPAH 0x27
+#define SR_RRPAH 0x27
/* Vendor ID register */
-#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
+#define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
/* Product ID register */
-#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
+#define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
/* CHIP Revision register */
-#define CHIPR 0x2C
+#define SR_CHIPR 0x2C
/* 0x2D --> 0xEF unused */
/* USB Device Address */
-#define USBDA 0xF0
+#define SR_USBDA 0xF0
#define USBDA_USBFA (0x7F << 0)
/* RX packet Counter Reg */
-#define RXC 0xF1
+#define SR_RXC 0xF1
/* Tx packet Counter & USB Status Reg */
-#define TXC_USBS 0xF2
+#define SR_TXC_USBS 0xF2
#define TXC_USBS_TXC0 (1 << 0)
#define TXC_USBS_TXC1 (1 << 1)
#define TXC_USBS_TXC2 (1 << 2)
@@ -150,7 +150,7 @@
#define TXC_USBS_SUSFLAG (1 << 6)
#define TXC_USBS_RXFAULT (1 << 7)
/* USB Control register */
-#define USBC 0xF4
+#define SR_USBC 0xF4
#define USBC_EP3NAK (1 << 4)
#define USBC_EP3ACK (1 << 5)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3a6770a65d78..449835f4331e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -160,20 +160,19 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
{
- int tmp, i;
+ int tmp = -1, ret;
unsigned char buf [13];
- tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
- if (tmp != 12) {
+ ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
+ if (ret == 12)
+ tmp = hex2bin(dev->net->dev_addr, buf, 6);
+ if (tmp < 0) {
dev_dbg(&dev->udev->dev,
"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
- if (tmp >= 0)
- tmp = -EINVAL;
- return tmp;
+ if (ret >= 0)
+ ret = -EINVAL;
+ return ret;
}
- for (i = tmp = 0; i < 6; i++, tmp += 2)
- dev->net->dev_addr [i] =
- (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8ad596573d17..4cca36ebc4fb 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -469,6 +469,14 @@ static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
[VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
};
+static struct net *veth_get_link_net(const struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
+
+ return peer ? dev_net(peer) : dev_net(dev);
+}
+
static struct rtnl_link_ops veth_link_ops = {
.kind = DRV_NAME,
.priv_size = sizeof(struct veth_priv),
@@ -478,6 +486,7 @@ static struct rtnl_link_ops veth_link_ops = {
.dellink = veth_dellink,
.policy = veth_policy,
.maxtype = VETH_INFO_MAX,
+ .get_link_net = veth_get_link_net,
};
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5ca97713bfb3..110a2cf67244 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- {
- static bool warned;
-
- if (!warned) {
- warned = true;
- netdev_warn(dev,
- "host using disabled UFO feature; please fix it\n");
- }
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
- }
case VIRTIO_NET_HDR_GSO_TCPV6:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -925,6 +918,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
+ /* timestamp packet in software */
+ skb_tx_timestamp(skb);
+
/* Try to transmit */
err = xmit_skb(sq, skb);
@@ -1376,6 +1372,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_ringparam = virtnet_get_ringparam,
.set_channels = virtnet_set_channels,
.get_channels = virtnet_get_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
};
#define MIN_MTU 68
@@ -1748,7 +1745,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
- dev->hw_features |= NETIF_F_TSO
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
@@ -1758,9 +1755,13 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->hw_features |= NETIF_F_TSO6;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
dev->hw_features |= NETIF_F_TSO_ECN;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+ dev->hw_features |= NETIF_F_UFO;
+
+ dev->features |= NETIF_F_GSO_ROBUST;
if (gso)
- dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
/* (!csum && gso) case will be fixed by register_netdev() */
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1798,7 +1799,8 @@ static int virtnet_probe(struct virtio_device *vdev)
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1994,9 +1996,9 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
- VIRTIO_NET_F_GUEST_ECN,
+ VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 4d84912c99ba..3718d024f638 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -342,6 +342,7 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
#define VMXNET3_RX_RING_MAX_SIZE 4096
+#define VMXNET3_RX_RING2_MAX_SIZE 2048
#define VMXNET3_RC_RING_MAX_SIZE 8192
/* a list of reasons for queue stop */
@@ -392,7 +393,7 @@ struct Vmxnet3_DriverInfo {
};
-#define VMXNET3_REV1_MAGIC 0xbabefee1
+#define VMXNET3_REV1_MAGIC 3133079265u
/*
* QueueDescPA must be 128 bytes aligned. It points to an array of
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index afd295348ddb..294214c15292 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1038,9 +1038,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
le32_add_cpu(&tq->shared->txNumDeferred, 1);
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
gdesc->txd.ti = 1;
- gdesc->txd.tci = vlan_tx_tag_get(skb);
+ gdesc->txd.tci = skb_vlan_tag_get(skb);
}
/* finally flips the GEN bit of the SOP desc. */
@@ -2505,6 +2505,9 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
sz * sz);
ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+ ring1_size = (ring1_size + sz - 1) / sz * sz;
+ ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
+ sz * sz);
comp_size = ring0_size + ring1_size;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -2585,7 +2588,7 @@ vmxnet3_open(struct net_device *netdev)
err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
adapter->rx_ring_size,
- VMXNET3_DEF_RX_RING_SIZE);
+ adapter->rx_ring2_size);
if (err)
goto queue_err;
@@ -2964,6 +2967,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
spin_lock_init(&adapter->cmd_lock);
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
@@ -3286,27 +3290,15 @@ skip_arp:
static int
vmxnet3_resume(struct device *device)
{
- int err, i = 0;
+ int err;
unsigned long flags;
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- struct Vmxnet3_PMConf *pmConf;
if (!netif_running(netdev))
return 0;
- /* Destroy wake-up filters. */
- pmConf = adapter->pm_conf;
- memset(pmConf, 0, sizeof(*pmConf));
-
- adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
- adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
- *pmConf));
- adapter->shared->devRead.pmConfDesc.confPA =
- cpu_to_le64(adapter->pm_conf_pa);
-
- netif_device_attach(netdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device_mem(pdev);
@@ -3315,15 +3307,31 @@ vmxnet3_resume(struct device *device)
pci_enable_wake(pdev, PCI_D0, 0);
+ vmxnet3_alloc_intr_resources(adapter);
+
+ /* During hibernate and suspend, device has to be reinitialized as the
+ * device state need not be preserved.
+ */
+
+ /* Need not check adapter state as other reset tasks cannot run during
+ * device resume.
+ */
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_PMCFG);
+ VMXNET3_CMD_QUIESCE_DEV);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- vmxnet3_alloc_intr_resources(adapter);
- vmxnet3_request_irqs(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++)
- napi_enable(&adapter->rx_queue[i].napi);
- vmxnet3_enable_all_intrs(adapter);
+ vmxnet3_tq_cleanup_all(adapter);
+ vmxnet3_rq_cleanup_all(adapter);
+
+ vmxnet3_reset_dev(adapter);
+ err = vmxnet3_activate_dev(adapter);
+ if (err != 0) {
+ netdev_err(netdev,
+ "failed to re-activate on resume, error: %d", err);
+ vmxnet3_force_close(adapter);
+ return err;
+ }
+ netif_device_attach(netdev);
return 0;
}
@@ -3331,6 +3339,8 @@ vmxnet3_resume(struct device *device)
static const struct dev_pm_ops vmxnet3_pm_ops = {
.suspend = vmxnet3_suspend,
.resume = vmxnet3_resume,
+ .freeze = vmxnet3_suspend,
+ .restore = vmxnet3_resume,
};
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b7b53329d575..4c8a944d58b4 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -323,7 +323,7 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
vmxnet3_tq_driver_stats[i].offset);
}
- for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (j = 0; j < adapter->num_rx_queues; j++) {
base = (u8 *)&adapter->rqd_start[j].stats;
*buf++ = (u64) j;
for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
@@ -447,12 +447,12 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
param->rx_mini_max_pending = 0;
- param->rx_jumbo_max_pending = 0;
+ param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
param->rx_pending = adapter->rx_ring_size;
param->tx_pending = adapter->tx_ring_size;
param->rx_mini_pending = 0;
- param->rx_jumbo_pending = 0;
+ param->rx_jumbo_pending = adapter->rx_ring2_size;
}
@@ -461,7 +461,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *param)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- u32 new_tx_ring_size, new_rx_ring_size;
+ u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
u32 sz;
int err = 0;
@@ -473,6 +473,10 @@ vmxnet3_set_ringparam(struct net_device *netdev,
VMXNET3_RX_RING_MAX_SIZE)
return -EINVAL;
+ if (param->rx_jumbo_pending == 0 ||
+ param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
+ return -EINVAL;
+
/* if adapter not yet initialized, do nothing */
if (adapter->rx_buf_per_pkt == 0) {
netdev_err(netdev, "adapter not completely initialized, "
@@ -500,8 +504,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
sz) != 0)
return -EINVAL;
- if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
- new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
+ /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
+ new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
+ ~VMXNET3_RING_SIZE_MASK;
+ new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
+ VMXNET3_RX_RING2_MAX_SIZE);
+
+ if (new_tx_ring_size == adapter->tx_ring_size &&
+ new_rx_ring_size == adapter->rx_ring_size &&
+ new_rx_ring2_size == adapter->rx_ring2_size) {
return 0;
}
@@ -522,7 +533,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
- new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+ new_rx_ring_size, new_rx_ring2_size);
if (err) {
/* failed, most likely because of OOM, try default
@@ -530,11 +541,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
netdev_err(netdev, "failed to apply new sizes, "
"try the default ones\n");
new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
err = vmxnet3_create_queues(adapter,
new_tx_ring_size,
new_rx_ring_size,
- VMXNET3_DEF_RX_RING_SIZE);
+ new_rx_ring2_size);
if (err) {
netdev_err(netdev, "failed to create queues "
"with default sizes. Closing it\n");
@@ -549,6 +561,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
}
adapter->tx_ring_size = new_tx_ring_size;
adapter->rx_ring_size = new_rx_ring_size;
+ adapter->rx_ring2_size = new_rx_ring2_size;
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 5f0199f6c31e..cd71c77f78f2 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.3.4.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01020100
+#define VMXNET3_DRIVER_VERSION_NUM 0x01030400
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -352,6 +352,7 @@ struct vmxnet3_adapter {
/* Ring sizes */
u32 tx_ring_size;
u32 rx_ring_size;
+ u32 rx_ring2_size;
struct work_struct work;
@@ -384,6 +385,7 @@ struct vmxnet3_adapter {
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 256
+#define VMXNET3_DEF_RX_RING2_SIZE 128
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7fbd89fbe107..0e57e862c399 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -61,12 +61,6 @@
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
-#define VXLAN_N_VID (1u << 24)
-#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
-
/* UDP port for VXLAN traffic.
* The IANA assigned port is 4789, but the Linux default is 8472
* for compatibility with early adopters.
@@ -269,15 +263,20 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
-/* Find VXLAN socket based on network namespace, address family and UDP port */
-static struct vxlan_sock *vxlan_find_sock(struct net *net,
- sa_family_t family, __be16 port)
+/* Find VXLAN socket based on network namespace, address family and UDP port
+ * and enabled unshareable flags.
+ */
+static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
+ __be16 port, u32 flags)
{
struct vxlan_sock *vs;
+ flags &= VXLAN_F_RCV_FLAGS;
+
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
if (inet_sk(vs->sock->sk)->inet_sport == port &&
- inet_sk(vs->sock->sk)->sk.sk_family == family)
+ inet_sk(vs->sock->sk)->sk.sk_family == family &&
+ vs->flags == flags)
return vs;
}
return NULL;
@@ -297,11 +296,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
/* Look up VNI in a per net namespace table */
static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
- sa_family_t family, __be16 port)
+ sa_family_t family, __be16 port,
+ u32 flags)
{
struct vxlan_sock *vs;
- vs = vxlan_find_sock(net, family, port);
+ vs = vxlan_find_sock(net, family, port, flags);
if (!vs)
return NULL;
@@ -340,6 +340,11 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
ndm->ndm_flags = fdb->flags;
ndm->ndm_type = RTN_UNICAST;
+ if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
+ nla_put_s32(skb, NDA_LINK_NETNSID,
+ peernet2id(dev_net(vxlan->dev), vxlan->net)))
+ goto nla_put_failure;
+
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
@@ -364,7 +369,8 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -379,6 +385,7 @@ static inline size_t vxlan_nlmsg_size(void)
+ nla_total_size(sizeof(__be16)) /* NDA_PORT */
+ nla_total_size(sizeof(__be32)) /* NDA_VNI */
+ nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
+ + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
@@ -545,15 +552,51 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
-static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
+ unsigned int off,
+ struct vxlanhdr *vh, size_t hdrlen,
+ u32 data)
+{
+ size_t start, offset, plen;
+
+ if (skb->remcsum_offload)
+ return vh;
+
+ if (!NAPI_GRO_CB(skb)->csum_valid)
+ return NULL;
+
+ start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+ offset = start + ((data & VXLAN_RCO_UDP) ?
+ offsetof(struct udphdr, check) :
+ offsetof(struct tcphdr, check));
+
+ plen = hdrlen + offset + sizeof(u16);
+
+ /* Pull checksum that will be written */
+ if (skb_gro_header_hard(skb, off + plen)) {
+ vh = skb_gro_header_slow(skb, off + plen, off);
+ if (!vh)
+ return NULL;
+ }
+
+ skb_gro_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
+
+ skb->remcsum_offload = 1;
+
+ return vh;
+}
+
+static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb,
+ struct udp_offload *uoff)
{
struct sk_buff *p, **pp = NULL;
struct vxlanhdr *vh, *vh2;
- struct ethhdr *eh, *eh2;
- unsigned int hlen, off_vx, off_eth;
- const struct packet_offload *ptype;
- __be16 type;
+ unsigned int hlen, off_vx;
int flush = 1;
+ struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
+ udp_offloads);
+ u32 flags;
off_vx = skb_gro_offset(skb);
hlen = off_vx + sizeof(*vh);
@@ -563,15 +606,17 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
if (unlikely(!vh))
goto out;
}
+
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
- off_eth = skb_gro_offset(skb);
- hlen = off_eth + sizeof(*eh);
- eh = skb_gro_header_fast(skb, off_eth);
- if (skb_gro_header_hard(skb, hlen)) {
- eh = skb_gro_header_slow(skb, hlen, off_eth);
- if (unlikely(!eh))
+ flags = ntohl(vh->vx_flags);
+
+ if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+ vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
+ ntohl(vh->vx_vni));
+
+ if (!vh)
goto out;
}
@@ -582,54 +627,27 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
continue;
vh2 = (struct vxlanhdr *)(p->data + off_vx);
- eh2 = (struct ethhdr *)(p->data + off_eth);
- if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
+ if (vh->vx_flags != vh2->vx_flags ||
+ vh->vx_vni != vh2->vx_vni) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
}
- type = eh->h_proto;
-
- rcu_read_lock();
- ptype = gro_find_receive_by_type(type);
- if (ptype == NULL) {
- flush = 1;
- goto out_unlock;
- }
+ pp = eth_gro_receive(head, skb);
- skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
- skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
-
-out_unlock:
- rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
-static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
+static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
+ struct udp_offload *uoff)
{
- struct ethhdr *eh;
- struct packet_offload *ptype;
- __be16 type;
- int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
- int err = -ENOSYS;
-
udp_tunnel_gro_complete(skb, nhoff);
- eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
- type = eh->h_proto;
-
- rcu_read_lock();
- ptype = gro_find_complete_by_type(type);
- if (ptype != NULL)
- err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
-
- rcu_read_unlock();
- return err;
+ return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
/* Notify netdevs that UDP port started listening */
@@ -991,7 +1009,7 @@ static bool vxlan_snoop(struct net_device *dev,
if (net_ratelimit())
netdev_info(dev,
"%pM migrated from %pIS to %pIS\n",
- src_mac, &rdst->remote_ip, &src_ip);
+ src_mac, &rdst->remote_ip.sa, &src_ip->sa);
rdst->remote_ip = *src_ip;
f->updated = jiffies;
@@ -1131,33 +1149,107 @@ static void vxlan_igmp_leave(struct work_struct *work)
dev_put(vxlan->dev);
}
+static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
+ size_t hdrlen, u32 data)
+{
+ size_t start, offset, plen;
+
+ if (skb->remcsum_offload) {
+ /* Already processed in GRO path */
+ skb->remcsum_offload = 0;
+ return vh;
+ }
+
+ start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+ offset = start + ((data & VXLAN_RCO_UDP) ?
+ offsetof(struct udphdr, check) :
+ offsetof(struct tcphdr, check));
+
+ plen = hdrlen + offset + sizeof(u16);
+
+ if (!pskb_may_pull(skb, plen))
+ return NULL;
+
+ vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+
+ skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
+
+ return vh;
+}
+
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
+ u32 flags, vni;
+ struct vxlan_metadata md = {0};
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
goto error;
- /* Return packets with reserved bits set */
vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
- if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
- (vxh->vx_vni & htonl(0xff))) {
- netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
- goto error;
+ flags = ntohl(vxh->vx_flags);
+ vni = ntohl(vxh->vx_vni);
+
+ if (flags & VXLAN_HF_VNI) {
+ flags &= ~VXLAN_HF_VNI;
+ } else {
+ /* VNI flag always required to be set */
+ goto bad_flags;
}
if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
goto drop;
+ vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
- vs->rcv(vs, skb, vxh->vx_vni);
+ if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+ vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni);
+ if (!vxh)
+ goto drop;
+
+ flags &= ~VXLAN_HF_RCO;
+ vni &= VXLAN_VID_MASK;
+ }
+
+ /* For backwards compatibility, only allow reserved fields to be
+ * used by VXLAN extensions if explicitly requested.
+ */
+ if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
+ struct vxlanhdr_gbp *gbp;
+
+ gbp = (struct vxlanhdr_gbp *)vxh;
+ md.gbp = ntohs(gbp->policy_id);
+
+ if (gbp->dont_learn)
+ md.gbp |= VXLAN_GBP_DONT_LEARN;
+
+ if (gbp->policy_applied)
+ md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+
+ flags &= ~VXLAN_GBP_USED_BITS;
+ }
+
+ if (flags || (vni & ~VXLAN_VID_MASK)) {
+ /* If there are any unprocessed flags remaining treat
+ * this as a malformed packet. This behavior diverges from
+ * VXLAN RFC (RFC7348) which stipulates that bits in reserved
+ * in reserved fields are to be ignored. The approach here
+ * maintains compatbility with previous stack code, and also
+ * is more robust and provides a little more security in
+ * adding extensions to VXLAN.
+ */
+
+ goto bad_flags;
+ }
+
+ md.vni = vxh->vx_vni;
+ vs->rcv(vs, skb, &md);
return 0;
drop:
@@ -1165,13 +1257,17 @@ drop:
kfree_skb(skb);
return 0;
+bad_flags:
+ netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+ ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+
error:
/* Return non vxlan pkt */
return 1;
}
-static void vxlan_rcv(struct vxlan_sock *vs,
- struct sk_buff *skb, __be32 vx_vni)
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+ struct vxlan_metadata *md)
{
struct iphdr *oip = NULL;
struct ipv6hdr *oip6 = NULL;
@@ -1182,7 +1278,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
int err = 0;
union vxlan_addr *remote_ip;
- vni = ntohl(vx_vni) >> 8;
+ vni = ntohl(md->vni) >> 8;
/* Is this VNI defined? */
vxlan = vxlan_vs_find_vni(vs, vni);
if (!vxlan)
@@ -1216,6 +1312,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
goto drop;
skb_reset_network_header(skb);
+ skb->mark = md->gbp;
if (oip6)
err = IP6_ECN_decapsulate(oip6, skb);
@@ -1565,20 +1662,54 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
return false;
}
+static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
+ struct vxlan_metadata *md)
+{
+ struct vxlanhdr_gbp *gbp;
+
+ if (!md->gbp)
+ return;
+
+ gbp = (struct vxlanhdr_gbp *)vxh;
+ vxh->vx_flags |= htonl(VXLAN_HF_GBP);
+
+ if (md->gbp & VXLAN_GBP_DONT_LEARN)
+ gbp->dont_learn = 1;
+
+ if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
+ gbp->policy_applied = 1;
+
+ gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
+}
+
#if IS_ENABLED(CONFIG_IPV6)
-static int vxlan6_xmit_skb(struct vxlan_sock *vs,
- struct dst_entry *dst, struct sk_buff *skb,
+static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr, __u8 prio, __u8 ttl,
- __be16 src_port, __be16 dst_port, __be32 vni,
- bool xnet)
+ __be16 src_port, __be16 dst_port,
+ struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
struct vxlanhdr *vxh;
int min_headroom;
int err;
- bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
+ bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+ int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ u16 hdrlen = sizeof(struct vxlanhdr);
+
+ if ((vxflags & VXLAN_F_REMCSUM_TX) &&
+ skb->ip_summed == CHECKSUM_PARTIAL) {
+ int csum_start = skb_checksum_start_offset(skb);
+
+ if (csum_start <= VXLAN_MAX_REMCSUM_START &&
+ !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
+ (skb->csum_offset == offsetof(struct udphdr, check) ||
+ skb->csum_offset == offsetof(struct tcphdr, check))) {
+ udp_sum = false;
+ type |= SKB_GSO_TUNNEL_REMCSUM;
+ }
+ }
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
if (IS_ERR(skb)) {
err = -EINVAL;
goto err;
@@ -1588,7 +1719,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ VXLAN_HLEN + sizeof(struct ipv6hdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
@@ -1604,13 +1735,33 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
}
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = vni;
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
+ vxh->vx_vni = md->vni;
+
+ if (type & SKB_GSO_TUNNEL_REMCSUM) {
+ u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
+ VXLAN_RCO_SHIFT;
+
+ if (skb->csum_offset == offsetof(struct udphdr, check))
+ data |= VXLAN_RCO_UDP;
+
+ vxh->vx_vni |= htonl(data);
+ vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+
+ if (!skb_is_gso(skb)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->encapsulation = 0;
+ }
+ }
+
+ if (vxflags & VXLAN_F_GBP)
+ vxlan_build_gbp_hdr(vxh, vxflags, md);
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
- ttl, src_port, dst_port);
+ udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
+ ttl, src_port, dst_port,
+ !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
return 0;
err:
dst_release(dst);
@@ -1618,23 +1769,38 @@ err:
}
#endif
-int vxlan_xmit_skb(struct vxlan_sock *vs,
- struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
+ __be16 src_port, __be16 dst_port,
+ struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
struct vxlanhdr *vxh;
int min_headroom;
int err;
- bool udp_sum = !vs->sock->sk->sk_no_check_tx;
+ bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
+ int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ u16 hdrlen = sizeof(struct vxlanhdr);
+
+ if ((vxflags & VXLAN_F_REMCSUM_TX) &&
+ skb->ip_summed == CHECKSUM_PARTIAL) {
+ int csum_start = skb_checksum_start_offset(skb);
+
+ if (csum_start <= VXLAN_MAX_REMCSUM_START &&
+ !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
+ (skb->csum_offset == offsetof(struct udphdr, check) ||
+ skb->csum_offset == offsetof(struct tcphdr, check))) {
+ udp_sum = false;
+ type |= SKB_GSO_TUNNEL_REMCSUM;
+ }
+ }
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
if (IS_ERR(skb))
return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
@@ -1648,13 +1814,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
return -ENOMEM;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = vni;
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
+ vxh->vx_vni = md->vni;
+
+ if (type & SKB_GSO_TUNNEL_REMCSUM) {
+ u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
+ VXLAN_RCO_SHIFT;
+
+ if (skb->csum_offset == offsetof(struct udphdr, check))
+ data |= VXLAN_RCO_UDP;
+
+ vxh->vx_vni |= htonl(data);
+ vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+
+ if (!skb_is_gso(skb)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->encapsulation = 0;
+ }
+ }
+
+ if (vxflags & VXLAN_F_GBP)
+ vxlan_build_gbp_hdr(vxh, vxflags, md);
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos,
- ttl, df, src_port, dst_port, xnet);
+ return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
+ ttl, df, src_port, dst_port, xnet,
+ !(vxflags & VXLAN_F_UDP_CSUM));
}
EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
@@ -1711,6 +1897,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *old_iph;
struct flowi4 fl4;
union vxlan_addr *dst;
+ struct vxlan_metadata md;
__be16 src_port = 0, dst_port;
u32 vni;
__be16 df = 0;
@@ -1772,7 +1959,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ip_rt_put(rt);
dst_vxlan = vxlan_find_vni(vxlan->net, vni,
- dst->sa.sa_family, dst_port);
+ dst->sa.sa_family, dst_port,
+ vxlan->flags);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1781,12 +1969,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-
- err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
- fl4.saddr, dst->sin.sin_addr.s_addr,
- tos, ttl, df, src_port, dst_port,
- htonl(vni << 8),
- !net_eq(vxlan->net, dev_net(vxlan->dev)));
+ md.vni = htonl(vni << 8);
+ md.gbp = skb->mark;
+
+ err = vxlan_xmit_skb(rt, skb, fl4.saddr,
+ dst->sin.sin_addr.s_addr, tos, ttl, df,
+ src_port, dst_port, &md,
+ !net_eq(vxlan->net, dev_net(vxlan->dev)),
+ vxlan->flags);
if (err < 0) {
/* skb is already freed. */
skb = NULL;
@@ -1830,7 +2020,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_release(ndst);
dst_vxlan = vxlan_find_vni(vxlan->net, vni,
- dst->sa.sa_family, dst_port);
+ dst->sa.sa_family, dst_port,
+ vxlan->flags);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1838,11 +2029,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = ttl ? : ip6_dst_hoplimit(ndst);
+ md.vni = htonl(vni << 8);
+ md.gbp = skb->mark;
- err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
- dev, &fl6.saddr, &fl6.daddr, 0, ttl,
- src_port, dst_port, htonl(vni << 8),
- !net_eq(vxlan->net, dev_net(vxlan->dev)));
+ err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
+ 0, ttl, src_port, dst_port, &md,
+ !net_eq(vxlan->net, dev_net(vxlan->dev)),
+ vxlan->flags);
#endif
}
@@ -1998,7 +2191,7 @@ static int vxlan_init(struct net_device *dev)
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
- vxlan->dst_port);
+ vxlan->dst_port, vxlan->flags);
if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
/* If we have a socket with same port already, reuse it */
vxlan_vs_add_dev(vs, vxlan);
@@ -2242,6 +2435,9 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -2311,15 +2507,11 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
if (ipv6) {
udp_conf.family = AF_INET6;
- udp_conf.use_udp6_tx_checksums =
- !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
udp_conf.use_udp6_rx_checksums =
!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
} else {
udp_conf.family = AF_INET;
udp_conf.local_ip.s_addr = INADDR_ANY;
- udp_conf.use_udp_checksums =
- !!(flags & VXLAN_F_UDP_CSUM);
}
udp_conf.local_udp_port = port;
@@ -2363,6 +2555,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
atomic_set(&vs->refcnt, 1);
vs->rcv = rcv;
vs->data = data;
+ vs->flags = (flags & VXLAN_F_RCV_FLAGS);
/* Initialize the vxlan udp offloads structure */
vs->udp_offloads.port = port;
@@ -2401,7 +2594,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
return vs;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+ vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags);
if (vs && ((vs->rcv != rcv) ||
!atomic_add_unless(&vs->refcnt, 1, 0)))
vs = ERR_PTR(-EBUSY);
@@ -2432,10 +2625,10 @@ static void vxlan_sock_work(struct work_struct *work)
dev_put(vxlan->dev);
}
-static int vxlan_newlink(struct net *net, struct net_device *dev,
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
__u32 vni;
@@ -2445,7 +2638,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!data[IFLA_VXLAN_ID])
return -EINVAL;
- vxlan->net = dev_net(dev);
+ vxlan->net = src_net;
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
@@ -2481,7 +2674,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_LINK] &&
(dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
struct net_device *lowerdev
- = __dev_get_by_index(net, dst->remote_ifindex);
+ = __dev_get_by_index(src_net, dst->remote_ifindex);
if (!lowerdev) {
pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2557,8 +2750,19 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
- if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
- vxlan->dst_port)) {
+ if (data[IFLA_VXLAN_REMCSUM_TX] &&
+ nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
+ vxlan->flags |= VXLAN_F_REMCSUM_TX;
+
+ if (data[IFLA_VXLAN_REMCSUM_RX] &&
+ nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
+ vxlan->flags |= VXLAN_F_REMCSUM_RX;
+
+ if (data[IFLA_VXLAN_GBP])
+ vxlan->flags |= VXLAN_F_GBP;
+
+ if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+ vxlan->dst_port, vxlan->flags)) {
pr_info("duplicate VNI %u\n", vni);
return -EEXIST;
}
@@ -2625,6 +2829,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
0;
}
@@ -2690,18 +2896,33 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
!!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
- !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
+ !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
+ !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
+ !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
goto nla_put_failure;
+ if (vxlan->flags & VXLAN_F_GBP &&
+ nla_put_flag(skb, IFLA_VXLAN_GBP))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
return -EMSGSIZE;
}
+static struct net *vxlan_get_link_net(const struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ return vxlan->net;
+}
+
static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
@@ -2713,6 +2934,7 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.dellink = vxlan_dellink,
.get_size = vxlan_get_size,
.fill_info = vxlan_fill_info,
+ .get_link_net = vxlan_get_link_net,
};
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 94e234975c61..a2fdd15f285a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
# There is no way to detect a comtrol sv11 - force it modular for now.
config HOSTESS_SV11
tristate "Comtrol Hostess SV-11 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC
+ depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
help
Driver for Comtrol Hostess SV-11 network card which
operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
# The COSA/SRP driver has not been tested as non-modular yet.
config COSA
tristate "COSA/SRP sync serial boards support"
- depends on ISA && m && ISA_DMA_API && HDLC
+ depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
---help---
Driver for COSA and SRP synchronous serial boards.
@@ -87,7 +87,7 @@ config LANMEDIA
# There is no way to detect a Sealevel board. Force it modular
config SEALEVEL_4021
tristate "Sealevel Systems 4021 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC
+ depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
help
This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 17fcaabb2687..f07a61899545 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1837,6 +1837,7 @@ static int adm8211_probe(struct pci_dev *pdev,
if (!priv->map) {
printk(KERN_ERR "%s (adm8211): Cannot map device memory\n",
pci_name(pdev));
+ err = -ENOMEM;
goto err_free_dev;
}
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index ccba4fea7269..1eebe2ea3dfb 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -64,6 +64,7 @@ enum ath_op_flags {
ATH_OP_HW_RESET,
ATH_OP_SCANNING,
ATH_OP_MULTI_CHANNEL,
+ ATH_OP_WOW_ENABLED,
};
enum ath_bus_type {
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 8b1b1adb477a..f4dbb3e93bf8 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -8,11 +8,15 @@ ath10k_core-y += mac.o \
htt_tx.o \
txrx.o \
wmi.o \
- bmi.o
+ wmi-tlv.o \
+ bmi.o \
+ hw.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
+ath10k_core-$(CONFIG_THERMAL) += thermal.o
+ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a156e6e48708..e508c65b6ba8 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -803,7 +803,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_ce_error_intr_disable(ar, ctrl_addr);
@@ -832,7 +832,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
- u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
nentries = roundup_pow_of_two(attr->src_nentries);
@@ -869,7 +869,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
- u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
nentries = roundup_pow_of_two(attr->dest_nentries);
@@ -1051,7 +1051,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
@@ -1061,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
@@ -1093,10 +1093,12 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ce_state->ar = ar;
ce_state->id = ce_id;
- ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+ ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 617a151e8ce4..c18647b87f71 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -394,7 +394,7 @@ struct ce_attr {
#define DST_WATERMARK_HIGH_RESET 0
#define DST_WATERMARK_ADDRESS 0x0050
-static inline u32 ath10k_ce_base_address(unsigned int ce_id)
+static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7762061a1944..310e12bc078a 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
+#include <linux/of.h>
#include "core.h"
#include "mac.h"
@@ -27,20 +28,18 @@
#include "debug.h"
#include "htt.h"
#include "testmode.h"
+#include "wmi-ops.h"
unsigned int ath10k_debug_mask;
static bool uart_print;
-static unsigned int ath10k_p2p;
static bool skip_otp;
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param(uart_print, bool, 0644);
-module_param_named(p2p, ath10k_p2p, uint, 0644);
module_param(skip_otp, bool, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
-MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
@@ -48,11 +47,57 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.id = QCA988X_HW_2_0_VERSION,
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
.fw = QCA988X_HW_2_0_FW_FILE,
.otp = QCA988X_HW_2_0_OTP_FILE,
.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA988X_BOARD_DATA_SZ,
+ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+ .name = "qca6174 hw2.1",
+ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .fw = {
+ .dir = QCA6174_HW_2_1_FW_DIR,
+ .fw = QCA6174_HW_2_1_FW_FILE,
+ .otp = QCA6174_HW_2_1_OTP_FILE,
+ .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA6174_HW_3_0_VERSION,
+ .name = "qca6174 hw3.0",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .fw = {
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .fw = QCA6174_HW_3_0_FW_FILE,
+ .otp = QCA6174_HW_3_0_OTP_FILE,
+ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA6174_HW_3_2_VERSION,
+ .name = "qca6174 hw3.2",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .fw = {
+ /* uses same binaries as hw3.0 */
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .fw = QCA6174_HW_3_0_FW_FILE,
+ .otp = QCA6174_HW_3_0_OTP_FILE,
+ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
};
@@ -146,8 +191,8 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
size_t data_len)
{
- u32 board_data_size = QCA988X_BOARD_DATA_SZ;
- u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
+ u32 board_data_size = ar->hw_params.fw.board_size;
+ u32 board_ext_data_size = ar->hw_params.fw.board_ext_size;
u32 board_ext_data_addr;
int ret;
@@ -193,7 +238,7 @@ static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
static int ath10k_download_board_data(struct ath10k *ar, const void *data,
size_t data_len)
{
- u32 board_data_size = QCA988X_BOARD_DATA_SZ;
+ u32 board_data_size = ar->hw_params.fw.board_size;
u32 address;
int ret;
@@ -249,6 +294,63 @@ static int ath10k_download_cal_file(struct ath10k *ar)
return 0;
}
+static int ath10k_download_cal_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ int data_len;
+ void *data;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ /* Device Tree is optional, don't print any warnings if
+ * there's no node for ath10k.
+ */
+ return -ENOENT;
+
+ if (!of_get_property(node, "qcom,ath10k-calibration-data",
+ &data_len)) {
+ /* The calibration data node is optional */
+ return -ENOENT;
+ }
+
+ if (data_len != QCA988X_CAL_DATA_LEN) {
+ ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
+ data_len);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
+ data = kmalloc(data_len, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
+ data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+out:
+ return ret;
+}
+
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
@@ -447,7 +549,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
int ie_id, i, index, bit, ret;
struct ath10k_fw_ie *hdr;
const u8 *data;
- __le32 *timestamp;
+ __le32 *timestamp, *version;
/* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
@@ -562,6 +664,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ar->otp_len = ie_len;
break;
+ case ATH10K_FW_IE_WMI_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ ar->wmi.op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
+ ar->wmi.op_version);
+ break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
@@ -582,13 +695,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
goto err;
}
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
- !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
- ret = -EINVAL;
- goto err;
- }
-
/* now fetch the board file */
if (ar->hw_params.fw.board == NULL) {
ath10k_err(ar, "board data file not defined");
@@ -624,6 +730,13 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
/* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar);
+ ar->fw_api = 4;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+ if (ret == 0)
+ goto success;
+
ar->fw_api = 3;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
@@ -662,7 +775,17 @@ static int ath10k_download_cal_data(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot did not find a calibration file, try OTP next: %d\n",
+ "boot did not find a calibration file, try DT next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_dt(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_DT;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find DT entry, try OTP next: %d\n",
ret);
ret = ath10k_download_and_run_otp(ar);
@@ -696,7 +819,7 @@ static int ath10k_init_uart(struct ath10k *ar)
if (!uart_print)
return 0;
- ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
if (ret) {
ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
return ret;
@@ -764,6 +887,7 @@ static void ath10k_core_restart(struct work_struct *work)
complete_all(&ar->offchan_tx_completed);
complete_all(&ar->install_key_done);
complete_all(&ar->vdev_setup_done);
+ complete_all(&ar->thermal.wmi_sync);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
@@ -799,15 +923,63 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
}
-static void ath10k_core_init_max_sta_count(struct ath10k *ar)
+static int ath10k_core_init_firmware_features(struct ath10k *ar)
{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- ar->max_num_peers = TARGET_10X_NUM_PEERS;
- ar->max_num_stations = TARGET_10X_NUM_STATIONS;
- } else {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
+ return -EINVAL;
+ }
+
+ if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+ ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
+ ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+ return -EINVAL;
+ }
+
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_WMI_OP_VERSION.
+ */
+ if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
+ ar->fw_features))
+ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+ else
+ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ } else {
+ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+ }
+ }
+
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->max_num_peers = TARGET_NUM_PEERS;
ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->max_num_peers = TARGET_10X_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ar->max_num_peers = TARGET_TLV_NUM_PEERS;
+ ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ return -EINVAL;
}
+
+ return 0;
}
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
@@ -932,6 +1104,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err_hif_stop;
}
+ /* If firmware indicates Full Rx Reorder support it must be used in a
+ * slightly different manner. Let HTT code know.
+ */
+ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+ ar->wmi.svc_map));
+
+ status = ath10k_htt_rx_ring_refill(ar);
+ if (status) {
+ ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
+ goto err_hif_stop;
+ }
+
/* we don't care about HTT in UTF mode */
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_htt_setup(&ar->htt);
@@ -945,10 +1129,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (status)
goto err_hif_stop;
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- ar->free_vdev_map = (1LL << TARGET_10X_NUM_VDEVS) - 1;
- else
- ar->free_vdev_map = (1LL << TARGET_NUM_VDEVS) - 1;
+ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
INIT_LIST_HEAD(&ar->arvifs);
@@ -1025,8 +1206,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ret = ath10k_bmi_get_target_info(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
- ath10k_hif_power_down(ar);
- return ret;
+ goto err_power_down;
}
ar->target_version = target_info.version;
@@ -1035,28 +1215,28 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ret = ath10k_init_hw_params(ar);
if (ret) {
ath10k_err(ar, "could not get hw params (%d)\n", ret);
- ath10k_hif_power_down(ar);
- return ret;
+ goto err_power_down;
}
ret = ath10k_core_fetch_firmware_files(ar);
if (ret) {
ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
- ath10k_hif_power_down(ar);
- return ret;
+ goto err_power_down;
}
- ath10k_core_init_max_sta_count(ar);
+ ret = ath10k_core_init_firmware_features(ar);
+ if (ret) {
+ ath10k_err(ar, "fatal problem with firmware features: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
mutex_lock(&ar->conf_mutex);
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath10k_err(ar, "could not init core (%d)\n", ret);
- ath10k_core_free_firmware_files(ar);
- ath10k_hif_power_down(ar);
- mutex_unlock(&ar->conf_mutex);
- return ret;
+ goto err_unlock;
}
ath10k_print_driver_info(ar);
@@ -1066,34 +1246,17 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ath10k_hif_power_down(ar);
return 0;
-}
-
-static int ath10k_core_check_chip_id(struct ath10k *ar)
-{
- u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
- ar->chip_id, hw_revision);
- /* Check that we are not using hw1.0 (some of them have same pci id
- * as hw2.0) before doing anything else as ath10k crashes horribly
- * due to missing hw1.0 workarounds. */
- switch (hw_revision) {
- case QCA988X_HW_1_0_CHIP_ID_REV:
- ath10k_err(ar, "ERROR: qca988x hw1.0 is not supported\n");
- return -EOPNOTSUPP;
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
- case QCA988X_HW_2_0_CHIP_ID_REV:
- /* known hardware revision, continue normally */
- return 0;
+err_free_firmware_files:
+ ath10k_core_free_firmware_files(ar);
- default:
- ath10k_warn(ar, "Warning: hardware revision unknown (0x%x), expect problems\n",
- ar->chip_id);
- return 0;
- }
+err_power_down:
+ ath10k_hif_power_down(ar);
- return 0;
+ return ret;
}
static void ath10k_core_register_work(struct work_struct *work)
@@ -1125,9 +1288,18 @@ static void ath10k_core_register_work(struct work_struct *work)
goto err_debug_destroy;
}
+ status = ath10k_thermal_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register thermal device: %d\n",
+ status);
+ goto err_spectral_destroy;
+ }
+
set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
return;
+err_spectral_destroy:
+ ath10k_spectral_destroy(ar);
err_debug_destroy:
ath10k_debug_destroy(ar);
err_unregister_mac:
@@ -1143,16 +1315,7 @@ err:
int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
- int status;
-
ar->chip_id = chip_id;
-
- status = ath10k_core_check_chip_id(ar);
- if (status) {
- ath10k_err(ar, "Unsupported chip id 0x%08x\n", ar->chip_id);
- return status;
- }
-
queue_work(ar->workqueue, &ar->register_work);
return 0;
@@ -1166,6 +1329,7 @@ void ath10k_core_unregister(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
return;
+ ath10k_thermal_unregister(ar);
/* Stop spectral before unregistering from mac80211 to remove the
* relayfs debugfs file cleanly. Otherwise the parent debugfs tree
* would be already be free'd recursively, leading to a double free.
@@ -1187,6 +1351,7 @@ EXPORT_SYMBOL(ath10k_core_unregister);
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
+ enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
@@ -1198,13 +1363,25 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->ath_common.priv = ar;
ar->ath_common.hw = ar->hw;
-
- ar->p2p = !!ath10k_p2p;
ar->dev = dev;
-
+ ar->hw_rev = hw_rev;
ar->hif.ops = hif_ops;
ar->hif.bus = bus;
+ switch (hw_rev) {
+ case ATH10K_HW_QCA988X:
+ ar->regs = &qca988x_regs;
+ break;
+ case ATH10K_HW_QCA6174:
+ ar->regs = &qca6174_regs;
+ break;
+ default:
+ ath10k_err(ar, "unsupported core hardware revision %d\n",
+ hw_rev);
+ ret = -ENOTSUPP;
+ goto err_free_mac;
+ }
+
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
@@ -1212,6 +1389,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->thermal.wmi_sync);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 514c219263a5..d60e46fe6d19 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -34,6 +34,7 @@
#include "../regd.h"
#include "../dfs_pattern_detector.h"
#include "spectral.h"
+#include "thermal.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -96,6 +97,11 @@ struct ath10k_skb_cb {
} bcn;
} __packed;
+struct ath10k_skb_rxcb {
+ dma_addr_t paddr;
+ struct hlist_node hlist;
+};
+
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
@@ -103,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}
+static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath10k_skb_rxcb *)skb->cb;
+}
+
+#define ATH10K_RXCB_SKB(rxcb) \
+ container_of((void *)rxcb, struct sk_buff, cb)
+
static inline u32 host_interest_item_address(u32 item_offset)
{
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
@@ -120,6 +135,7 @@ struct ath10k_mem_chunk {
};
struct ath10k_wmi {
+ enum ath10k_fw_wmi_op_version op_version;
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
@@ -128,6 +144,7 @@ struct ath10k_wmi {
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
+ const struct wmi_ops *ops;
u32 num_mem_chunks;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
@@ -236,10 +253,21 @@ struct ath10k_sta {
u32 smps;
struct work_struct update_wk;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* protected by conf_mutex */
+ bool aggr_mode;
+#endif
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+enum ath10k_beacon_state {
+ ATH10K_BEACON_SCHEDULED = 0,
+ ATH10K_BEACON_SENDING,
+ ATH10K_BEACON_SENT,
+};
+
struct ath10k_vif {
struct list_head list;
@@ -250,7 +278,7 @@ struct ath10k_vif {
u32 dtim_period;
struct sk_buff *beacon;
/* protected by data_lock */
- bool beacon_sent;
+ enum ath10k_beacon_state beacon_state;
void *beacon_buf;
dma_addr_t beacon_paddr;
@@ -263,10 +291,8 @@ struct ath10k_vif {
u32 aid;
u8 bssid[ETH_ALEN];
- struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
- u8 def_wep_key_idx;
- u8 def_wep_key_newidx;
+ s8 def_wep_key_idx;
u16 tx_seq_no;
@@ -293,6 +319,7 @@ struct ath10k_vif {
bool use_cts_prot;
int num_legacy_stations;
int txpower;
+ struct wmi_wmm_params_all_arg wmm_params;
};
struct ath10k_vif_iter {
@@ -323,8 +350,10 @@ struct ath10k_debug {
/* protected by conf_mutex */
u32 fw_dbglog_mask;
+ u32 fw_dbglog_level;
u32 pktlog_filter;
u32 reg_addr;
+ u32 nf_cal_period;
u8 htt_max_amsdu;
u8 htt_max_ampdu;
@@ -369,7 +398,7 @@ enum ath10k_fw_features {
/* wmi_mgmt_rx_hdr contains extra RSSI information */
ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
- /* firmware from 10X branch */
+ /* Firmware from 10X branch. Deprecated, don't use in new code. */
ATH10K_FW_FEATURE_WMI_10X = 1,
/* firmware support tx frame management over WMI, otherwise it's HTT */
@@ -378,8 +407,9 @@ enum ath10k_fw_features {
/* Firmware does not support P2P */
ATH10K_FW_FEATURE_NO_P2P = 3,
- /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature bit
- * is required to be set as well.
+ /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature
+ * bit is required to be set as well. Deprecated, don't use in new
+ * code.
*/
ATH10K_FW_FEATURE_WMI_10_2 = 4,
@@ -401,6 +431,7 @@ enum ath10k_dev_flags {
enum ath10k_cal_mode {
ATH10K_CAL_MODE_FILE,
ATH10K_CAL_MODE_OTP,
+ ATH10K_CAL_MODE_DT,
};
static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
@@ -410,6 +441,8 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
return "file";
case ATH10K_CAL_MODE_OTP:
return "otp";
+ case ATH10K_CAL_MODE_DT:
+ return "dt";
}
return "unknown";
@@ -444,6 +477,7 @@ struct ath10k {
struct device *dev;
u8 mac_addr[ETH_ALEN];
+ enum ath10k_hw_rev hw_rev;
u32 chip_id;
u32 target_version;
u8 fw_version_major;
@@ -459,9 +493,6 @@ struct ath10k {
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
- struct targetdef *targetdef;
- struct hostdef *hostdef;
-
bool p2p;
struct {
@@ -471,6 +502,7 @@ struct ath10k {
struct completion target_suspend;
+ const struct ath10k_hw_regs *regs;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
struct ath10k_htc htc;
@@ -480,12 +512,15 @@ struct ath10k {
u32 id;
const char *name;
u32 patch_load_addr;
+ int uart_pin;
struct ath10k_hw_params_fw {
const char *dir;
const char *fw;
const char *otp;
const char *board;
+ size_t board_size;
+ size_t board_ext_size;
} fw;
} hw_params;
@@ -548,7 +583,6 @@ struct ath10k {
u8 cfg_tx_chainmask;
u8 cfg_rx_chainmask;
- struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
struct completion vdev_setup_done;
@@ -571,6 +605,7 @@ struct ath10k {
int max_num_peers;
int max_num_stations;
+ int max_num_vdevs;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@@ -610,6 +645,7 @@ struct ath10k {
/* protected by conf_mutex */
const struct firmware *utf;
DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
+ enum ath10k_fw_wmi_op_version orig_wmi_op_version;
/* protected by data_lock */
bool utf_monitor;
@@ -622,12 +658,15 @@ struct ath10k {
u32 fw_cold_reset_counter;
} stats;
+ struct ath10k_thermal thermal;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
+ enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index a716758f14b0..d2281e5c2ffe 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -23,6 +23,7 @@
#include "core.h"
#include "debug.h"
#include "hif.h"
+#include "wmi-ops.h"
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
@@ -123,7 +124,7 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d.%d.%d.%d cal %s max_sta %d\n",
+ ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
@@ -131,10 +132,7 @@ void ath10k_print_driver_info(struct ath10k *ar)
ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor,
- ar->fw_version_major,
- ar->fw_version_minor,
- ar->fw_version_release,
- ar->fw_version_build,
+ ar->wmi.op_version,
ath10k_cal_mode_str(ar->cal_mode),
ar->max_num_stations);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
@@ -373,7 +371,7 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
1*HZ);
- if (ret <= 0)
+ if (ret == 0)
return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock);
@@ -1320,10 +1318,10 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
unsigned int len;
- char buf[32];
+ char buf[64];
- len = scnprintf(buf, sizeof(buf), "0x%08x\n",
- ar->debug.fw_dbglog_mask);
+ len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
+ ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1333,19 +1331,32 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- unsigned long mask;
int ret;
+ char buf[64];
+ unsigned int log_level, mask;
- ret = kstrtoul_from_user(user_buf, count, 0, &mask);
- if (ret)
- return ret;
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = 0;
+
+ ret = sscanf(buf, "%x %u", &mask, &log_level);
+
+ if (!ret)
+ return -EINVAL;
+
+ if (ret == 1)
+ /* default if user did not specify */
+ log_level = ATH10K_DBGLOG_LEVEL_WARN;
mutex_lock(&ar->conf_mutex);
ar->debug.fw_dbglog_mask = mask;
+ ar->debug.fw_dbglog_level = log_level;
if (ar->state == ATH10K_STATE_ON) {
- ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+ ar->debug.fw_dbglog_level);
if (ret) {
ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
ret);
@@ -1607,6 +1618,73 @@ static const struct file_operations fops_cal_data = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_nf_cal_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n",
+ ar->debug.nf_cal_period);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_nf_cal_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long period;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &period);
+ if (ret)
+ return ret;
+
+ if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX)
+ return -EINVAL;
+
+ /* there's no way to switch back to the firmware default */
+ if (period == 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.nf_cal_period = period;
+
+ if (ar->state != ATH10K_STATE_ON) {
+ /* firmware is not running, nothing else to do */
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period,
+ ar->debug.nf_cal_period);
+ if (ret) {
+ ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_nf_cal_period = {
+ .read = ath10k_read_nf_cal_period,
+ .write = ath10k_write_nf_cal_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
@@ -1620,7 +1698,8 @@ int ath10k_debug_start(struct ath10k *ar)
ret);
if (ar->debug.fw_dbglog_mask) {
- ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+ ATH10K_DBGLOG_LEVEL_WARN);
if (ret)
/* not serious */
ath10k_warn(ar, "failed to enable dbglog during start: %d",
@@ -1642,6 +1721,16 @@ int ath10k_debug_start(struct ath10k *ar)
ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
}
+ if (ar->debug.nf_cal_period) {
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->cal_period,
+ ar->debug.nf_cal_period);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "cal period cfg failed from debug start: %d\n",
+ ret);
+ }
+
return ret;
}
@@ -1880,6 +1969,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data);
+ debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
+
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 1b87a5dbec53..a12b8323f9f1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -48,6 +48,12 @@ enum ath10k_pktlog_filter {
ATH10K_PKTLOG_ANY = 0x00000001f,
};
+enum ath10k_dbg_aggr_mode {
+ ATH10K_DBG_AGGR_MODE_AUTO,
+ ATH10K_DBG_AGGR_MODE_MANUAL,
+ ATH10K_DBG_AGGR_MODE_MAX,
+};
+
extern unsigned int ath10k_debug_mask;
__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
@@ -77,7 +83,6 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data);
-
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
@@ -129,6 +134,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#define ath10k_debug_get_et_stats NULL
#endif /* CONFIG_ATH10K_DEBUGFS */
+#ifdef CONFIG_MAC80211_DEBUGFS
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+#endif /* CONFIG_MAC80211_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
new file mode 100644
index 000000000000..95b5c49374e0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi-ops.h"
+#include "debug.h"
+
+static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
+ (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
+ "auto" : "manual");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 aggr_mode;
+ int ret;
+
+ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+ return -EINVAL;
+
+ if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (aggr_mode == arsta->aggr_mode)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
+ goto out;
+ }
+
+ arsta->aggr_mode = aggr_mode;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+ .read = ath10k_dbg_sta_read_aggr_mode,
+ .write = ath10k_dbg_sta_write_aggr_mode,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, buf_size;
+ int ret;
+ char buf[64];
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = '\0';
+
+ ret = sscanf(buf, "%u %u", &tid, &buf_size);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, buf_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba = {
+ .write = ath10k_dbg_sta_write_addba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, status;
+ int ret;
+ char buf[64];
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = '\0';
+
+ ret = sscanf(buf, "%u %u", &tid, &status);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, status);
+ if (ret) {
+ ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, status);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+ .write = ath10k_dbg_sta_write_addba_resp,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, initiator, reason;
+ int ret;
+ char buf[64];
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = '\0';
+
+ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+ if (ret != 3)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, initiator, reason);
+ if (ret) {
+ ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, initiator,
+ reason);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_delba = {
+ .write = ath10k_dbg_sta_write_delba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
+ &fops_aggr_mode);
+ debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index f1946a6be442..2fd9e180272b 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -703,11 +703,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
/* wait for response */
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
- if (status <= 0) {
- if (status == 0)
- status = -ETIMEDOUT;
+ if (status == 0) {
ath10k_err(ar, "Service connect timeout: %d\n", status);
- return status;
+ return -ETIMEDOUT;
}
/* we controlled the buffer creation, it's aligned */
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 56cb4aceb383..4f59ab923e48 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -53,7 +53,6 @@ int ath10k_htt_init(struct ath10k *ar)
struct ath10k_htt *htt = &ar->htt;
htt->ar = ar;
- htt->max_throughput_mbps = 800;
/*
* Prefetch enough data to satisfy target
@@ -102,7 +101,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
status = wait_for_completion_timeout(&htt->target_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
- if (status <= 0) {
+ if (status == 0) {
ath10k_warn(ar, "htt version request timed out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 1bd5545af903..874bf44ff7a2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -21,6 +21,7 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
+#include <linux/hashtable.h>
#include <net/mac80211.h>
#include "htc.h"
@@ -286,7 +287,19 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+ /* 0x13 reservd */
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+
+ /* FIXME: Do not depend on this event id. Numbering of this event id is
+ * broken across different firmware revisions and HTT version fails to
+ * indicate this.
+ */
HTT_T2H_MSG_TYPE_TEST,
+
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -655,6 +668,53 @@ struct htt_rx_fragment_indication {
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
+struct htt_rx_pn_ind {
+ __le16 peer_id;
+ u8 tid;
+ u8 seqno_start;
+ u8 seqno_end;
+ u8 pn_ie_count;
+ u8 reserved;
+ u8 pn_ies[0];
+} __packed;
+
+struct htt_rx_offload_msdu {
+ __le16 msdu_len;
+ __le16 peer_id;
+ u8 vdev_id;
+ u8 tid;
+ u8 fw_desc;
+ u8 payload[0];
+} __packed;
+
+struct htt_rx_offload_ind {
+ u8 reserved;
+ __le16 msdu_count;
+} __packed;
+
+struct htt_rx_in_ord_msdu_desc {
+ __le32 msdu_paddr;
+ __le16 msdu_len;
+ u8 fw_desc;
+ u8 reserved;
+} __packed;
+
+struct htt_rx_in_ord_ind {
+ u8 info;
+ __le16 peer_id;
+ u8 vdev_id;
+ u8 reserved;
+ __le16 msdu_count;
+ struct htt_rx_in_ord_msdu_desc msdu_descs[0];
+} __packed;
+
+#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
+#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
+
/*
* target -> host test message definition
*
@@ -1150,6 +1210,9 @@ struct htt_resp {
struct htt_rx_test rx_test;
struct htt_pktlog_msg pktlog_msg;
struct htt_stats_conf stats_conf;
+ struct htt_rx_pn_ind rx_pn_ind;
+ struct htt_rx_offload_ind rx_offload_ind;
+ struct htt_rx_in_ord_ind rx_in_ord_ind;
};
} __packed;
@@ -1182,7 +1245,6 @@ struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
- int max_throughput_mbps;
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
@@ -1198,6 +1260,20 @@ struct ath10k_htt {
* filled.
*/
struct sk_buff **netbufs_ring;
+
+ /* This is used only with firmware supporting IN_ORD_IND.
+ *
+ * With Full Rx Reorder the HTT Rx Ring is more of a temporary
+ * buffer ring from which buffer addresses are copied by the
+ * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
+ * pointing to specific (re-ordered) buffers.
+ *
+ * FIXME: With kernel generic hashing functions there's a lot
+ * of hash collisions for sk_buffs.
+ */
+ bool in_ord_rx;
+ DECLARE_HASHTABLE(skb_table, 4);
+
/*
* Ring of buffer addresses -
* This ring holds the "physical" device address of the
@@ -1252,12 +1328,11 @@ struct ath10k_htt {
unsigned int prefetch_len;
- /* Protects access to %pending_tx, %used_msdu_ids */
+ /* Protects access to pending_tx, num_pending_tx */
spinlock_t tx_lock;
int max_num_pending_tx;
int num_pending_tx;
- struct sk_buff **pending_tx;
- unsigned long *used_msdu_ids; /* bitmap */
+ struct idr pending_tx;
wait_queue_head_t empty_tx_wq;
struct dma_pool *tx_pool;
@@ -1271,6 +1346,7 @@ struct ath10k_htt {
struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
+ struct sk_buff_head rx_in_ord_compl_q;
/* rx_status template */
struct ieee80211_rx_status rx_status;
@@ -1334,6 +1410,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
void ath10k_htt_tx_free(struct ath10k_htt *htt);
int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
+int ath10k_htt_rx_ring_refill(struct ath10k *ar);
void ath10k_htt_rx_free(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
@@ -1346,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_amsdu);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
-int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 9c782a42665e..c1da44f65a4d 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -25,8 +25,8 @@
#include <linux/log2.h>
-#define HTT_RX_RING_SIZE 1024
-#define HTT_RX_RING_FILL_LEVEL 1000
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -34,31 +34,70 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
+static struct sk_buff *
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+{
+ struct ath10k_skb_rxcb *rxcb;
+
+ hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
+ if (rxcb->paddr == paddr)
+ return ATH10K_RXCB_SKB(rxcb);
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
{
struct sk_buff *skb;
- struct ath10k_skb_cb *cb;
+ struct ath10k_skb_rxcb *rxcb;
+ struct hlist_node *n;
int i;
- for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
- skb = htt->rx_ring.netbufs_ring[i];
- cb = ATH10K_SKB_CB(skb);
- dma_unmap_single(htt->ar->dev, cb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ if (htt->rx_ring.in_ord_rx) {
+ hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
+ skb = ATH10K_RXCB_SKB(rxcb);
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ hash_del(&rxcb->hlist);
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ for (i = 0; i < htt->rx_ring.size; i++) {
+ skb = htt->rx_ring.netbufs_ring[i];
+ if (!skb)
+ continue;
+
+ rxcb = ATH10K_SKB_RXCB(skb);
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
}
htt->rx_ring.fill_cnt = 0;
+ hash_init(htt->rx_ring.skb_table);
+ memset(htt->rx_ring.netbufs_ring, 0,
+ htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
}
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
struct htt_rx_desc *rx_desc;
+ struct ath10k_skb_rxcb *rxcb;
struct sk_buff *skb;
dma_addr_t paddr;
int ret = 0, idx;
+ /* The Full Rx Reorder firmware has no way of telling the host
+ * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
+ * To keep things simple make sure ring is always half empty. This
+ * guarantees there'll be no replenishment overruns possible.
+ */
+ BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
+
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
while (num > 0) {
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
@@ -86,17 +125,29 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
goto fail;
}
- ATH10K_SKB_CB(skb)->paddr = paddr;
+ rxcb = ATH10K_SKB_RXCB(skb);
+ rxcb->paddr = paddr;
htt->rx_ring.netbufs_ring[idx] = skb;
htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
htt->rx_ring.fill_cnt++;
+ if (htt->rx_ring.in_ord_rx) {
+ hash_add(htt->rx_ring.skb_table,
+ &ATH10K_SKB_RXCB(skb)->hlist,
+ (u32)paddr);
+ }
+
num--;
idx++;
idx &= htt->rx_ring.size_mask;
}
fail:
+ /*
+ * Make sure the rx buffer is updated before available buffer
+ * index to avoid any potential rx ring corruption.
+ */
+ mb();
*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
return ret;
}
@@ -153,22 +204,20 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
-static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
+int ath10k_htt_rx_ring_refill(struct ath10k *ar)
{
- struct sk_buff *skb;
- int i;
+ struct ath10k_htt *htt = &ar->htt;
+ int ret;
- for (i = 0; i < htt->rx_ring.size; i++) {
- skb = htt->rx_ring.netbufs_ring[i];
- if (!skb)
- continue;
+ spin_lock_bh(&htt->rx_ring.lock);
+ ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
+ htt->rx_ring.fill_cnt));
+ spin_unlock_bh(&htt->rx_ring.lock);
- dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- htt->rx_ring.netbufs_ring[i] = NULL;
- }
+ if (ret)
+ ath10k_htt_rx_ring_free(htt);
+
+ return ret;
}
void ath10k_htt_rx_free(struct ath10k_htt *htt)
@@ -179,8 +228,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q);
+ skb_queue_purge(&htt->rx_in_ord_compl_q);
- ath10k_htt_rx_ring_clean_up(htt);
+ ath10k_htt_rx_ring_free(htt);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
@@ -212,6 +262,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
htt->rx_ring.netbufs_ring[idx] = NULL;
+ htt->rx_ring.paddrs_ring[idx] = 0;
idx++;
idx &= htt->rx_ring.size_mask;
@@ -219,7 +270,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.fill_cnt--;
dma_unmap_single(htt->ar->dev,
- ATH10K_SKB_CB(msdu)->paddr,
+ ATH10K_SKB_RXCB(msdu)->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
@@ -379,6 +430,82 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
+static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
+ u32 paddr)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_skb_rxcb *rxcb;
+ struct sk_buff *msdu;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
+ if (!msdu)
+ return NULL;
+
+ rxcb = ATH10K_SKB_RXCB(msdu);
+ hash_del(&rxcb->hlist);
+ htt->rx_ring.fill_cnt--;
+
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
+
+ return msdu;
+}
+
+static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
+ struct htt_rx_desc *rxd;
+ struct sk_buff *msdu;
+ int msdu_count;
+ bool is_offload;
+ u32 paddr;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu_count = __le16_to_cpu(ev->msdu_count);
+ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+ while (msdu_count--) {
+ paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
+
+ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!msdu) {
+ __skb_queue_purge(list);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(list, msdu);
+
+ if (!is_offload) {
+ rxd = (void *)msdu->data;
+
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+ skb_put(msdu, sizeof(*rxd));
+ skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+ if (!(__le32_to_cpu(rxd->attention.flags) &
+ RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+ return -EIO;
+ }
+ }
+
+ msdu_desc++;
+ }
+
+ return 0;
+}
+
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
@@ -424,7 +551,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.alloc_idx.vaddr = vaddr;
htt->rx_ring.alloc_idx.paddr = paddr;
- htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+ htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
*htt->rx_ring.alloc_idx.vaddr = 0;
/* Initialize the Rx refill retry timer */
@@ -433,14 +560,15 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
spin_lock_init(&htt->rx_ring.lock);
htt->rx_ring.fill_cnt = 0;
- if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
- goto err_fill_ring;
+ htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+ hash_init(htt->rx_ring.skb_table);
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
(unsigned long)htt);
skb_queue_head_init(&htt->tx_compl_q);
skb_queue_head_init(&htt->rx_compl_q);
+ skb_queue_head_init(&htt->rx_in_ord_compl_q);
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt);
@@ -449,12 +577,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
-err_fill_ring:
- ath10k_htt_rx_ring_free(htt);
- dma_free_coherent(htt->ar->dev,
- sizeof(*htt->rx_ring.alloc_idx.vaddr),
- htt->rx_ring.alloc_idx.vaddr,
- htt->rx_ring.alloc_idx.paddr);
err_dma_idx:
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
@@ -691,7 +813,7 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
*
* FIXME: Can we get/compute 64bit TSF?
*/
- status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp);
+ status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
status->flag |= RX_FLAG_MACTIME_END;
}
@@ -1578,6 +1700,194 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
spin_unlock_bh(&ar->data_lock);
}
+static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+ struct sk_buff_head *amsdu)
+{
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+
+ if (skb_queue_empty(list))
+ return -ENOBUFS;
+
+ if (WARN_ON(!skb_queue_empty(amsdu)))
+ return -EINVAL;
+
+ while ((msdu = __skb_dequeue(list))) {
+ __skb_queue_tail(amsdu, msdu);
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ if (rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
+ break;
+ }
+
+ msdu = skb_peek_tail(amsdu);
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ if (!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
+ skb_queue_splice_init(amsdu, list);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ /* Offloaded frames are already decrypted but firmware insists they are
+ * protected in the 802.11 header. Strip the flag. Otherwise mac80211
+ * will drop the frame.
+ */
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+}
+
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct ieee80211_rx_status *status = &htt->rx_status;
+ struct htt_rx_offload_msdu *rx;
+ struct sk_buff *msdu;
+ size_t offset;
+
+ while ((msdu = __skb_dequeue(list))) {
+ /* Offloaded frames don't have Rx descriptor. Instead they have
+ * a short meta information header.
+ */
+
+ rx = (void *)msdu->data;
+
+ skb_put(msdu, sizeof(*rx));
+ skb_pull(msdu, sizeof(*rx));
+
+ if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
+ ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ skb_put(msdu, __le16_to_cpu(rx->msdu_len));
+
+ /* Offloaded rx header length isn't multiple of 2 nor 4 so the
+ * actual payload is unaligned. Align the frame. Otherwise
+ * mac80211 complains. This shouldn't reduce performance much
+ * because these offloaded frames are rare.
+ */
+ offset = 4 - ((unsigned long)msdu->data & 3);
+ skb_put(msdu, offset);
+ memmove(msdu->data + offset, msdu->data, msdu->len);
+ skb_pull(msdu, offset);
+
+ /* FIXME: The frame is NWifi. Re-construct QoS Control
+ * if possible later.
+ */
+
+ memset(status, 0, sizeof(*status));
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ ath10k_htt_rx_h_rx_offload_prot(status, msdu);
+ ath10k_htt_rx_h_channel(ar, status);
+ ath10k_process_rx(ar, status, msdu);
+ }
+}
+
+static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (void *)skb->data;
+ struct ieee80211_rx_status *status = &htt->rx_status;
+ struct sk_buff_head list;
+ struct sk_buff_head amsdu;
+ u16 peer_id;
+ u16 msdu_count;
+ u8 vdev_id;
+ u8 tid;
+ bool offload;
+ bool frag;
+ int ret;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ if (htt->rx_confused)
+ return;
+
+ skb_pull(skb, sizeof(resp->hdr));
+ skb_pull(skb, sizeof(resp->rx_in_ord_ind));
+
+ peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
+ msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
+ vdev_id = resp->rx_in_ord_ind.vdev_id;
+ tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
+ offload = !!(resp->rx_in_ord_ind.info &
+ HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+ frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
+ vdev_id, peer_id, tid, offload, frag, msdu_count);
+
+ if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
+ ath10k_warn(ar, "dropping invalid in order rx indication\n");
+ return;
+ }
+
+ /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
+ * extracted and processed.
+ */
+ __skb_queue_head_init(&list);
+ ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
+ if (ret < 0) {
+ ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
+ htt->rx_confused = true;
+ return;
+ }
+
+ /* Offloaded frames are very different and need to be handled
+ * separately.
+ */
+ if (offload)
+ ath10k_htt_rx_h_rx_offload(ar, &list);
+
+ while (!skb_queue_empty(&list)) {
+ __skb_queue_head_init(&amsdu);
+ ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+ switch (ret) {
+ case 0:
+ /* Note: The in-order indication may report interleaved
+ * frames from different PPDUs meaning reported rx rate
+ * to mac80211 isn't accurate/reliable. It's still
+ * better to report something than nothing though. This
+ * should still give an idea about rx rate to the user.
+ */
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_filter(ar, &amsdu, status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, status);
+ break;
+ case -EAGAIN:
+ /* fall through */
+ default:
+ /* Should not happen. */
+ ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
+ htt->rx_confused = true;
+ __skb_queue_purge(&list);
+ return;
+ }
+ }
+
+ tasklet_schedule(&htt->rx_replenish_task);
+}
+
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -1700,6 +2010,20 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
*/
break;
}
+ case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
+ spin_lock_bh(&htt->rx_ring.lock);
+ __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+ tasklet_schedule(&htt->txrx_compl_task);
+ return;
+ }
+ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
+ /* FIXME: This WMI-TLV event is overlapping with 10.2
+ * CHAN_CHANGE - both being 0xF. Neither is being used in
+ * practice so no immediate action is necessary. Nevertheless
+ * HTT may need an abstraction layer like WMI has one day.
+ */
+ break;
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type);
@@ -1715,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_htt_txrx_compl_task(unsigned long ptr)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+ struct ath10k *ar = htt->ar;
struct htt_resp *resp;
struct sk_buff *skb;
@@ -1731,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
ath10k_htt_rx_handler(htt, &resp->rx_ind);
dev_kfree_skb_any(skb);
}
+
+ while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
+ ath10k_htt_rx_in_ord_ind(ar, skb);
+ dev_kfree_skb_any(skb);
+ }
spin_unlock_bh(&htt->rx_ring.lock);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 4bc51d8a14a3..cbd2bc9e6202 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -56,21 +56,18 @@ exit:
return ret;
}
-int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
{
struct ath10k *ar = htt->ar;
- int msdu_id;
+ int ret;
lockdep_assert_held(&htt->tx_lock);
- msdu_id = find_first_zero_bit(htt->used_msdu_ids,
- htt->max_num_pending_tx);
- if (msdu_id == htt->max_num_pending_tx)
- return -ENOBUFS;
+ ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
- __set_bit(msdu_id, htt->used_msdu_ids);
- return msdu_id;
+ return ret;
}
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
@@ -79,79 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
lockdep_assert_held(&htt->tx_lock);
- if (!test_bit(msdu_id, htt->used_msdu_ids))
- ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
- msdu_id);
-
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
- __clear_bit(msdu_id, htt->used_msdu_ids);
+
+ idr_remove(&htt->pending_tx, msdu_id);
}
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- spin_lock_init(&htt->tx_lock);
-
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
- htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
- else
- htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
-
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
- htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
- htt->max_num_pending_tx, GFP_KERNEL);
- if (!htt->pending_tx)
- return -ENOMEM;
-
- htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
- BITS_TO_LONGS(htt->max_num_pending_tx),
- GFP_KERNEL);
- if (!htt->used_msdu_ids) {
- kfree(htt->pending_tx);
- return -ENOMEM;
- }
+ spin_lock_init(&htt->tx_lock);
+ idr_init(&htt->pending_tx);
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
- kfree(htt->used_msdu_ids);
- kfree(htt->pending_tx);
+ idr_destroy(&htt->pending_tx);
return -ENOMEM;
}
return 0;
}
-static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
+static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
{
- struct ath10k *ar = htt->ar;
+ struct ath10k *ar = ctx;
+ struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {0};
- int msdu_id;
-
- spin_lock_bh(&htt->tx_lock);
- for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
- if (!test_bit(msdu_id, htt->used_msdu_ids))
- continue;
- ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
- msdu_id);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
- tx_done.discard = 1;
- tx_done.msdu_id = msdu_id;
+ tx_done.discard = 1;
+ tx_done.msdu_id = msdu_id;
- ath10k_txrx_tx_unref(htt, &tx_done);
- }
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_txrx_tx_unref(htt, &tx_done);
spin_unlock_bh(&htt->tx_lock);
+
+ return 0;
}
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
- ath10k_htt_tx_free_pending(htt);
- kfree(htt->pending_tx);
- kfree(htt->used_msdu_ids);
+ idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
+ idr_destroy(&htt->pending_tx);
dma_pool_destroy(htt->tx_pool);
}
@@ -383,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
len += sizeof(cmd->mgmt_tx);
spin_lock_bh(&htt->tx_lock);
- res = ath10k_htt_tx_alloc_msdu_id(htt);
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
- htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len);
@@ -428,7 +398,6 @@ err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
@@ -460,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
goto err;
spin_lock_bh(&htt->tx_lock);
- res = ath10k_htt_tx_alloc_msdu_id(htt);
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
- htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
@@ -480,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr);
- if (!skb_cb->htt.txbuf)
+ if (!skb_cb->htt.txbuf) {
+ res = -ENOMEM;
goto err_free_msdu_id;
+ }
skb_cb->htt.txbuf_paddr = paddr;
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control))
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
@@ -539,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
- flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
- flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (msdu->ip_summed == CHECKSUM_PARTIAL) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ }
/* Prevent firmware from sending up tx inspection requests. There's
* nothing ath10k can do with frames requested for inspection so force
@@ -598,7 +576,6 @@ err_free_txbuf:
skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
new file mode 100644
index 000000000000..839a8791fb9e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include "hw.h"
+
+const struct ath10k_hw_regs qca988x_regs = {
+ .rtc_state_cold_reset_mask = 0x00000400,
+ .rtc_soc_base_address = 0x00004000,
+ .rtc_wmac_base_address = 0x00005000,
+ .soc_core_base_address = 0x00009000,
+ .ce_wrapper_base_address = 0x00057000,
+ .ce0_base_address = 0x00057400,
+ .ce1_base_address = 0x00057800,
+ .ce2_base_address = 0x00057c00,
+ .ce3_base_address = 0x00058000,
+ .ce4_base_address = 0x00058400,
+ .ce5_base_address = 0x00058800,
+ .ce6_base_address = 0x00058c00,
+ .ce7_base_address = 0x00059000,
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00040000,
+ .soc_chip_id_address = 0x00ec,
+ .scratch_3_address = 0x0030,
+};
+
+const struct ath10k_hw_regs qca6174_regs = {
+ .rtc_state_cold_reset_mask = 0x00002000,
+ .rtc_soc_base_address = 0x00000800,
+ .rtc_wmac_base_address = 0x00001000,
+ .soc_core_base_address = 0x0003a000,
+ .ce_wrapper_base_address = 0x00034000,
+ .ce0_base_address = 0x00034400,
+ .ce1_base_address = 0x00034800,
+ .ce2_base_address = 0x00034c00,
+ .ce3_base_address = 0x00035000,
+ .ce4_base_address = 0x00035400,
+ .ce5_base_address = 0x00035800,
+ .ce6_base_address = 0x00035c00,
+ .ce7_base_address = 0x00036000,
+ .soc_reset_control_si0_rst_mask = 0x00000000,
+ .soc_reset_control_ce_rst_mask = 0x00000001,
+ .soc_chip_id_address = 0x000f0,
+ .scratch_3_address = 0x0028,
+};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index dfedfd0e0f34..460771fcfe9e 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -34,9 +34,50 @@
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+/* QCA6174 target BMI version signatures */
+#define QCA6174_HW_1_0_VERSION 0x05000000
+#define QCA6174_HW_1_1_VERSION 0x05000001
+#define QCA6174_HW_1_3_VERSION 0x05000003
+#define QCA6174_HW_2_1_VERSION 0x05010000
+#define QCA6174_HW_3_0_VERSION 0x05020000
+#define QCA6174_HW_3_2_VERSION 0x05030000
+
+enum qca6174_pci_rev {
+ QCA6174_PCI_REV_1_1 = 0x11,
+ QCA6174_PCI_REV_1_3 = 0x13,
+ QCA6174_PCI_REV_2_0 = 0x20,
+ QCA6174_PCI_REV_3_0 = 0x30,
+};
+
+enum qca6174_chip_id_rev {
+ QCA6174_HW_1_0_CHIP_ID_REV = 0,
+ QCA6174_HW_1_1_CHIP_ID_REV = 1,
+ QCA6174_HW_1_3_CHIP_ID_REV = 2,
+ QCA6174_HW_2_1_CHIP_ID_REV = 4,
+ QCA6174_HW_2_2_CHIP_ID_REV = 5,
+ QCA6174_HW_3_0_CHIP_ID_REV = 8,
+ QCA6174_HW_3_1_CHIP_ID_REV = 9,
+ QCA6174_HW_3_2_CHIP_ID_REV = 10,
+};
+
+#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
+#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
+#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
+#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
+#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
+
+#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
+#define QCA6174_HW_3_0_FW_FILE "firmware.bin"
+#define QCA6174_HW_3_0_OTP_FILE "otp.bin"
+#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
+#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
+
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
+/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
+#define ATH10K_FW_API4_FILE "firmware-4.bin"
+
#define ATH10K_FW_UTF_FILE "utf.bin"
/* includes also the null byte */
@@ -58,8 +99,57 @@ enum ath10k_fw_ie_type {
ATH10K_FW_IE_FEATURES = 2,
ATH10K_FW_IE_FW_IMAGE = 3,
ATH10K_FW_IE_OTP_IMAGE = 4,
+
+ /* WMI "operations" interface version, 32 bit value. Supported from
+ * FW API 4 and above.
+ */
+ ATH10K_FW_IE_WMI_OP_VERSION = 5,
+};
+
+enum ath10k_fw_wmi_op_version {
+ ATH10K_FW_WMI_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_WMI_OP_VERSION_MAIN = 1,
+ ATH10K_FW_WMI_OP_VERSION_10_1 = 2,
+ ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
+ ATH10K_FW_WMI_OP_VERSION_TLV = 4,
+ ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+
+ /* keep last */
+ ATH10K_FW_WMI_OP_VERSION_MAX,
+};
+
+enum ath10k_hw_rev {
+ ATH10K_HW_QCA988X,
+ ATH10K_HW_QCA6174,
+};
+
+struct ath10k_hw_regs {
+ u32 rtc_state_cold_reset_mask;
+ u32 rtc_soc_base_address;
+ u32 rtc_wmac_base_address;
+ u32 soc_core_base_address;
+ u32 ce_wrapper_base_address;
+ u32 ce0_base_address;
+ u32 ce1_base_address;
+ u32 ce2_base_address;
+ u32 ce3_base_address;
+ u32 ce4_base_address;
+ u32 ce5_base_address;
+ u32 ce6_base_address;
+ u32 ce7_base_address;
+ u32 soc_reset_control_si0_rst_mask;
+ u32 soc_reset_control_ce_rst_mask;
+ u32 soc_chip_id_address;
+ u32 scratch_3_address;
};
+extern const struct ath10k_hw_regs qca988x_regs;
+extern const struct ath10k_hw_regs qca6174_regs;
+
+#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
@@ -162,6 +252,18 @@ struct ath10k_pktlog_hdr {
#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10X_MAX_FRAG_ENTRIES 0
+/* 10.2 parameters */
+#define TARGET_10_2_DMA_BURST_SIZE 1
+
+/* Target specific defines for WMI-TLV firmware */
+#define TARGET_TLV_NUM_VDEVS 3
+#define TARGET_TLV_NUM_STATIONS 32
+#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
+ (TARGET_TLV_NUM_VDEVS) + \
+ 2)
+#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
+#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
+
/* Number of Copy Engines supported */
#define CE_COUNT 8
@@ -192,7 +294,7 @@ struct ath10k_pktlog_hdr {
/* as of IP3.7.1 */
#define RTC_STATE_V_ON 3
-#define RTC_STATE_COLD_RESET_MASK 0x00000400
+#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
#define RTC_STATE_V_LSB 0
#define RTC_STATE_V_MASK 0x00000007
#define RTC_STATE_ADDRESS 0x0000
@@ -201,12 +303,12 @@ struct ath10k_pktlog_hdr {
#define PCIE_SOC_WAKE_RESET 0x00000000
#define SOC_GLOBAL_RESET_ADDRESS 0x0008
-#define RTC_SOC_BASE_ADDRESS 0x00004000
-#define RTC_WMAC_BASE_ADDRESS 0x00005000
+#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
+#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
#define MAC_COEX_BASE_ADDRESS 0x00006000
#define BT_COEX_BASE_ADDRESS 0x00007000
#define SOC_PCIE_BASE_ADDRESS 0x00008000
-#define SOC_CORE_BASE_ADDRESS 0x00009000
+#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
#define WLAN_UART_BASE_ADDRESS 0x0000c000
#define WLAN_SI_BASE_ADDRESS 0x00010000
#define WLAN_GPIO_BASE_ADDRESS 0x00014000
@@ -215,23 +317,23 @@ struct ath10k_pktlog_hdr {
#define EFUSE_BASE_ADDRESS 0x00030000
#define FPGA_REG_BASE_ADDRESS 0x00039000
#define WLAN_UART2_BASE_ADDRESS 0x00054c00
-#define CE_WRAPPER_BASE_ADDRESS 0x00057000
-#define CE0_BASE_ADDRESS 0x00057400
-#define CE1_BASE_ADDRESS 0x00057800
-#define CE2_BASE_ADDRESS 0x00057c00
-#define CE3_BASE_ADDRESS 0x00058000
-#define CE4_BASE_ADDRESS 0x00058400
-#define CE5_BASE_ADDRESS 0x00058800
-#define CE6_BASE_ADDRESS 0x00058c00
-#define CE7_BASE_ADDRESS 0x00059000
+#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
+#define CE0_BASE_ADDRESS ar->regs->ce0_base_address
+#define CE1_BASE_ADDRESS ar->regs->ce1_base_address
+#define CE2_BASE_ADDRESS ar->regs->ce2_base_address
+#define CE3_BASE_ADDRESS ar->regs->ce3_base_address
+#define CE4_BASE_ADDRESS ar->regs->ce4_base_address
+#define CE5_BASE_ADDRESS ar->regs->ce5_base_address
+#define CE6_BASE_ADDRESS ar->regs->ce6_base_address
+#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
#define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
#define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
-#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
-#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
+#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
+#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define SOC_CPU_CLOCK_OFFSET 0x00000020
#define SOC_CPU_CLOCK_STANDARD_LSB 0
@@ -245,7 +347,7 @@ struct ath10k_pktlog_hdr {
#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
-#define SOC_CHIP_ID_ADDRESS 0x000000ec
+#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
#define SOC_CHIP_ID_REV_LSB 8
#define SOC_CHIP_ID_REV_MASK 0x00000f00
@@ -301,7 +403,7 @@ struct ath10k_pktlog_hdr {
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014
-#define SCRATCH_3_ADDRESS 0x0030
+#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010
/* Firmware indications to the Host via SCRATCH_3 register. */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index c4005670cba2..d6d2f0f00caa 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -27,6 +27,8 @@
#include "htt.h"
#include "txrx.h"
#include "testmode.h"
+#include "wmi.h"
+#include "wmi-ops.h"
/**********/
/* Crypto */
@@ -35,7 +37,7 @@
static int ath10k_send_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
- const u8 *macaddr)
+ const u8 *macaddr, bool def_idx)
{
struct ath10k *ar = arvif->ar;
struct wmi_vdev_install_key_arg arg = {
@@ -56,10 +58,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM;
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
- else
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
case WLAN_CIPHER_SUITE_TKIP:
arg.key_cipher = WMI_CIPHER_TKIP;
@@ -73,7 +72,13 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
* Otherwise pairwise key must be set */
if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
arg.key_flags = WMI_KEY_PAIRWISE;
+
+ if (def_idx)
+ arg.key_flags |= WMI_KEY_TX_USAGE;
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /* this one needs to be done in software */
+ return 1;
default:
ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
return -EOPNOTSUPP;
@@ -90,7 +95,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
static int ath10k_install_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
- const u8 *macaddr)
+ const u8 *macaddr, bool def_idx)
{
struct ath10k *ar = arvif->ar;
int ret;
@@ -99,7 +104,7 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
reinit_completion(&ar->install_key_done);
- ret = ath10k_send_key(arvif, key, cmd, macaddr);
+ ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
if (ret)
return ret;
@@ -117,6 +122,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
struct ath10k_peer *peer;
int ret;
int i;
+ bool def_idx;
lockdep_assert_held(&ar->conf_mutex);
@@ -130,9 +136,14 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
if (arvif->wep_keys[i] == NULL)
continue;
+ /* set TX_USAGE flag for default key id */
+ if (arvif->def_wep_key_idx == i)
+ def_idx = true;
+ else
+ def_idx = false;
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
- addr);
+ addr, def_idx);
if (ret)
return ret;
@@ -166,8 +177,9 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
if (peer->keys[i] == NULL)
continue;
+ /* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, peer->keys[i],
- DISABLE_KEY, addr);
+ DISABLE_KEY, addr, false);
if (ret && first_errno == 0)
first_errno = ret;
@@ -241,8 +253,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
if (i == ARRAY_SIZE(peer->keys))
break;
-
- ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
+ /* key flags are not required to delete the key */
+ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
if (ret && first_errno == 0)
first_errno = ret;
@@ -267,7 +279,10 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
case IEEE80211_BAND_2GHZ:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
- phymode = MODE_11G;
+ if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
+ phymode = MODE_11B;
+ else
+ phymode = MODE_11G;
break;
case NL80211_CHAN_WIDTH_20:
phymode = MODE_11NG_HT20;
@@ -519,10 +534,14 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
+ if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
+ arvif->beacon_state != ATH10K_BEACON_SENT))
+ return;
+
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
- arvif->beacon_sent = false;
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
}
static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
@@ -962,6 +981,143 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
return ret;
}
+static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return 0;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie)
+ return -ENOENT;
+
+ ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ size_t len;
+ const u8 *next;
+ const u8 *end;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
+static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!bcn) {
+ ath10k_warn(ar, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
+ kfree_skb(bcn);
+ return ret;
+ }
+
+ /* P2P IE is inserted by firmware automatically (as configured above)
+ * so remove it from the base beacon template to avoid duplicate P2P
+ * IEs in beacon frames.
+ */
+ ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+
+ ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
+ 0, NULL, 0);
+ kfree_skb(bcn);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to submit beacon template command: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct sk_buff *prb;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ prb = ieee80211_proberesp_get(hw, vif);
+ if (!prb) {
+ ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
+ kfree_skb(prb);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -1046,28 +1202,85 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
arvif->vdev_id, ret);
}
-/*
- * Review this when mac80211 gains per-interface powersave support.
- */
+static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ u32 value;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
+ else
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+
+ param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
+ value, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ u32 value;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
+ else
+ value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+
+ param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
+ value, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_conf *conf = &ar->hw->conf;
enum wmi_sta_powersave_param param;
enum wmi_sta_ps_mode psmode;
int ret;
+ int ps_timeout;
lockdep_assert_held(&arvif->ar->conf_mutex);
if (arvif->vif->type != NL80211_IFTYPE_STATION)
return 0;
- if (conf->flags & IEEE80211_CONF_PS) {
+ if (vif->bss_conf.ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+ ps_timeout = conf->dynamic_ps_timeout;
+ if (ps_timeout == 0) {
+ /* Firmware doesn't like 0 */
+ ps_timeout = ieee80211_tu_to_usec(
+ vif->bss_conf.beacon_int) / 1000;
+ }
+
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
- conf->dynamic_ps_timeout);
+ ps_timeout);
if (ret) {
ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
@@ -1090,6 +1303,38 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
return 0;
}
+static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_sta_keepalive_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
+ return 0;
+
+ /* Some firmware revisions have a bug and ignore the `enabled` field.
+ * Instead use the interval to disable the keepalive.
+ */
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
+ arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
+
+ ret = ath10k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/**********************/
/* Station management */
/**********************/
@@ -1358,6 +1603,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
return;
arg->peer_flags |= WMI_PEER_VHT;
+
+ if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ arg->peer_flags |= WMI_PEER_VHT_2G;
+
arg->peer_vht_caps = vht_cap->cap;
ampdu_factor = (vht_cap->cap &
@@ -1409,9 +1658,22 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
if (vif->bss_conf.qos)
arg->peer_flags |= WMI_PEER_QOS;
break;
+ case WMI_VDEV_TYPE_IBSS:
+ if (sta->wme)
+ arg->peer_flags |= WMI_PEER_QOS;
+ break;
default:
break;
}
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
+}
+
+static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
+{
+ /* First 4 rates in ath10k_rates are CCK (11b) rates. */
+ return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
}
static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
@@ -1423,13 +1685,20 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
switch (ar->hw->conf.chandef.chan->band) {
case IEEE80211_BAND_2GHZ:
- if (sta->ht_cap.ht_supported) {
+ if (sta->vht_cap.vht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
- } else {
+ } else if (ath10k_mac_sta_has_11g_rates(sta)) {
phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
}
break;
@@ -1603,7 +1872,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
ath10k_warn(ar, "faield to down vdev %i: %d\n",
arvif->vdev_id, ret);
- arvif->def_wep_key_idx = 0;
+ arvif->def_wep_key_idx = -1;
+
arvif->is_up = false;
}
@@ -1662,11 +1932,14 @@ static int ath10k_station_assoc(struct ath10k *ar,
}
}
- ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
- if (ret) {
- ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
+ /* Plumb cached keys only for static WEP */
+ if (arvif->def_wep_key_idx != -1) {
+ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
}
}
@@ -1931,75 +2204,13 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
* used only for CQM purposes (e.g. hostapd station keepalive ping) so
* it is safe to downgrade to NullFunc.
*/
+ hdr = (void *)skb->data;
if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
}
}
-static void ath10k_tx_wep_key_work(struct work_struct *work)
-{
- struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
- wep_key_work);
- struct ath10k *ar = arvif->ar;
- int ret, keyidx = arvif->def_wep_key_newidx;
-
- mutex_lock(&arvif->ar->conf_mutex);
-
- if (arvif->ar->state != ATH10K_STATE_ON)
- goto unlock;
-
- if (arvif->def_wep_key_idx == keyidx)
- goto unlock;
-
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
- arvif->vdev_id, keyidx);
-
- ret = ath10k_wmi_vdev_set_param(arvif->ar,
- arvif->vdev_id,
- arvif->ar->wmi.vdev_param->def_keyid,
- keyidx);
- if (ret) {
- ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
- arvif->vdev_id,
- ret);
- goto unlock;
- }
-
- arvif->def_wep_key_idx = keyidx;
-
-unlock:
- mutex_unlock(&arvif->ar->conf_mutex);
-}
-
-static void ath10k_tx_h_update_wep_key(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *key,
- struct sk_buff *skb)
-{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- struct ath10k *ar = arvif->ar;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-
- if (!ieee80211_has_protected(hdr->frame_control))
- return;
-
- if (!key)
- return;
-
- if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
- key->cipher != WLAN_CIPHER_SUITE_WEP104)
- return;
-
- if (key->keyidx == arvif->def_wep_key_idx)
- return;
-
- /* FIXME: Most likely a few frames will be TXed with an old key. Simply
- * queueing frames until key index is updated is not an option because
- * sk_buff may need more processing to be done, e.g. offchannel */
- arvif->def_wep_key_newidx = key->keyidx;
- ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
-}
-
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
struct ieee80211_vif *vif,
struct sk_buff *skb)
@@ -2151,7 +2362,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
3 * HZ);
- if (ret <= 0)
+ if (ret == 0)
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
skb);
@@ -2213,6 +2424,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
case ATH10K_SCAN_RUNNING:
if (ar->scan.is_roc)
ieee80211_remain_on_channel_expired(ar->hw);
+ /* fall through */
case ATH10K_SCAN_ABORTING:
if (!ar->scan.is_roc)
ieee80211_scan_completed(ar->hw,
@@ -2359,7 +2571,6 @@ static void ath10k_tx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
- struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
/* We should disable CCK RATE due to P2P */
@@ -2373,7 +2584,6 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/* it makes no sense to process injected frames like that */
if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
ath10k_tx_h_nwifi(hw, skb);
- ath10k_tx_h_update_wep_key(vif, key, skb);
ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
ath10k_tx_h_seq_no(vif, skb);
}
@@ -2871,6 +3081,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
int bit;
u32 vdev_param;
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
mutex_lock(&ar->conf_mutex);
memset(arvif, 0, sizeof(*arvif));
@@ -2878,7 +3090,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->ar = ar;
arvif->vif = vif;
- INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
INIT_LIST_HEAD(&arvif->list);
if (ar->free_vdev_map == 0) {
@@ -2894,10 +3105,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->vdev_id = bit;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
- if (ar->p2p)
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
-
switch (vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
@@ -2966,15 +3178,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
list_add(&arvif->list, &ar->arvifs);
- vdev_param = ar->wmi.vdev_param->def_keyid;
- ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
- arvif->def_wep_key_idx);
+ /* It makes no sense to have firmware do keepalives. mac80211 already
+ * takes care of this with idle connection polling.
+ */
+ ret = ath10k_mac_vif_disable_keepalive(arvif);
if (ret) {
- ath10k_warn(ar, "failed to set vdev %i default key id: %d\n",
+ ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
+ arvif->def_wep_key_idx = -1;
+
vdev_param = ar->wmi.vdev_param->tx_encap_type;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
ATH10K_HW_TXRX_NATIVE_WIFI);
@@ -3026,22 +3241,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_peer_delete;
}
- param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
- value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
- ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
- param, value);
+ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
if (ret) {
- ath10k_warn(ar, "failed to set vdev %i TX wake thresh: %d\n",
+ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
- param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
- value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
- ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
- param, value);
+ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
if (ret) {
- ath10k_warn(ar, "failed to set vdev %i PSPOLL count: %d\n",
+ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -3099,8 +3308,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
- cancel_work_sync(&arvif->wep_key_work);
-
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -3211,9 +3418,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (ret)
ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
arvif->vdev_id, ret);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update beacon template: %d\n",
+ ret);
}
- if (changed & BSS_CHANGED_BEACON_INFO) {
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
arvif->dtim_period = info->dtim_period;
ath10k_dbg(ar, ATH10K_DBG_MAC,
@@ -3314,6 +3533,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
}
+ if (changed & BSS_CHANGED_PS) {
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3453,6 +3679,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
const u8 *peer_addr;
bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104;
+ bool def_idx = false;
int ret = 0;
if (key->keyidx > WMI_MAX_KEY_INDEX)
@@ -3498,7 +3725,14 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ath10k_clear_vdev_key(arvif, key);
}
- ret = ath10k_install_key(arvif, key, cmd, peer_addr);
+ /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
+ * static WEP, do not set this flag for the keys whose key id
+ * is greater than default key id.
+ */
+ if (arvif->def_wep_key_idx == -1)
+ def_idx = true;
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
if (ret) {
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
@@ -3523,6 +3757,39 @@ exit:
return ret;
}
+static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int keyidx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret;
+
+ mutex_lock(&arvif->ar->conf_mutex);
+
+ if (arvif->ar->state != ATH10K_STATE_ON)
+ goto unlock;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+ arvif->vdev_id, keyidx);
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ keyidx);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
+ arvif->vdev_id,
+ ret);
+ goto unlock;
+ }
+
+ arvif->def_wep_key_idx = keyidx;
+unlock:
+ mutex_unlock(&arvif->ar->conf_mutex);
+}
+
static void ath10k_sta_rc_update_wk(struct work_struct *wk)
{
struct ath10k *ar;
@@ -3583,8 +3850,9 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps, err);
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
+ changed & IEEE80211_RC_NSS_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -3757,6 +4025,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
u16 ac, bool enable)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_sta_uapsd_auto_trig_arg arg = {};
+ u32 prio = 0, acc = 0;
u32 value = 0;
int ret = 0;
@@ -3769,18 +4039,26 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
case IEEE80211_AC_VO:
value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ prio = 7;
+ acc = 3;
break;
case IEEE80211_AC_VI:
value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ prio = 5;
+ acc = 2;
break;
case IEEE80211_AC_BE:
value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ prio = 2;
+ acc = 1;
break;
case IEEE80211_AC_BK:
value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ prio = 0;
+ acc = 0;
break;
}
@@ -3808,6 +4086,43 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
if (ret)
ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
+ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
+ /* Only userspace can make an educated decision when to send
+ * trigger frame. The following effectively disables u-UAPSD
+ * autotrigger in firmware (which is enabled by default
+ * provided the autotrigger service is available).
+ */
+
+ arg.wmm_ac = acc;
+ arg.user_priority = prio;
+ arg.service_interval = 0;
+ arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+ arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+
+ ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
+ arvif->bssid, &arg, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
+ ret);
+ return ret;
+ }
+ }
+
exit:
return ret;
}
@@ -3817,6 +4132,7 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
const struct ieee80211_tx_queue_params *params)
{
struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_wmm_params_arg *p = NULL;
int ret;
@@ -3824,16 +4140,16 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
switch (ac) {
case IEEE80211_AC_VO:
- p = &ar->wmm_params.ac_vo;
+ p = &arvif->wmm_params.ac_vo;
break;
case IEEE80211_AC_VI:
- p = &ar->wmm_params.ac_vi;
+ p = &arvif->wmm_params.ac_vi;
break;
case IEEE80211_AC_BE:
- p = &ar->wmm_params.ac_be;
+ p = &arvif->wmm_params.ac_be;
break;
case IEEE80211_AC_BK:
- p = &ar->wmm_params.ac_bk;
+ p = &arvif->wmm_params.ac_bk;
break;
}
@@ -3853,11 +4169,23 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
*/
p->txop = params->txop * 32;
- /* FIXME: FW accepts wmm params per hw, not per vif */
- ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
- if (ret) {
- ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
- goto exit;
+ if (ar->wmi.ops->gen_vdev_wmm_conf) {
+ ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
+ }
+ } else {
+ /* This won't work well with multi-interface cases but it's
+ * better than nothing.
+ */
+ ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
}
ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
@@ -3989,29 +4317,6 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
-{
- struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif;
- int ret = 0;
-
- mutex_lock(&ar->conf_mutex);
- list_for_each_entry(arvif, &ar->arvifs, list) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
- arvif->vdev_id, value);
-
- ret = ath10k_mac_set_frag(arvif, value);
- if (ret) {
- ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n",
- arvif->vdev_id, ret);
- break;
- }
- }
- mutex_unlock(&ar->conf_mutex);
-
- return ret;
-}
-
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@@ -4650,12 +4955,12 @@ static const struct ieee80211_ops ath10k_ops = {
.hw_scan = ath10k_hw_scan,
.cancel_hw_scan = ath10k_cancel_hw_scan,
.set_key = ath10k_set_key,
+ .set_default_unicast_key = ath10k_set_default_unicast_key,
.sta_state = ath10k_sta_state,
.conf_tx = ath10k_conf_tx,
.remain_on_channel = ath10k_remain_on_channel,
.cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
.set_rts_threshold = ath10k_set_rts_threshold,
- .set_frag_threshold = ath10k_set_frag_threshold,
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
.set_antenna = ath10k_set_antenna,
@@ -4676,6 +4981,9 @@ static const struct ieee80211_ops ath10k_ops = {
.suspend = ath10k_suspend,
.resume = ath10k_resume,
#endif
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .sta_add_debugfs = ath10k_sta_add_debugfs,
+#endif
};
#define RATETAB_ENT(_rate, _rateid, _flags) { \
@@ -4746,6 +5054,9 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(165, 5825, 0),
};
+/* Note: Be careful if you re-order these. There is code which depends on this
+ * ordering.
+ */
static struct ieee80211_rate ath10k_rates[] = {
/* CCK */
RATETAB_ENT(10, 0x82, 0),
@@ -4799,6 +5110,10 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
.types = BIT(NL80211_IFTYPE_P2P_GO)
},
{
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ },
+ {
.max = 7,
.types = BIT(NL80211_IFTYPE_AP)
},
@@ -4956,6 +5271,13 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
int ath10k_mac_register(struct ath10k *ar)
{
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ };
struct ieee80211_supported_band *band;
struct ieee80211_sta_vht_cap vht_cap;
struct ieee80211_sta_ht_cap ht_cap;
@@ -4985,7 +5307,8 @@ int ath10k_mac_register(struct ath10k *ar)
band->bitrates = ath10k_g_rates;
band->ht_cap = ht_cap;
- /* vht is not supported in 2.4 GHz */
+ /* Enable the VHT support at 2.4 GHz */
+ band->vht_cap = vht_cap;
ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
}
@@ -5018,18 +5341,19 @@ int ath10k_mac_register(struct ath10k *ar)
if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
ar->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_SUPPORTS_UAPSD |
IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_AP_LINK_PS |
- IEEE80211_HW_SPECTRUM_MGMT;
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_SW_CRYPTO_CONTROL;
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
@@ -5049,6 +5373,19 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
+ if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ /* Firmware delivers WPS/P2P Probe Requests frames to driver so
+ * that userspace (e.g. wpa_supplicant/hostapd) can generate
+ * correct Probe Responses. This is more of a hack advert..
+ */
+ ar->hw->wiphy->probe_resp_offload |=
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+ }
+
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
@@ -5062,16 +5399,26 @@ int ath10k_mac_register(struct ath10k *ar)
*/
ar->hw->queues = 4;
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
- ar->hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(ath10k_10x_if_comb);
- } else {
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_if_comb);
-
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10x_if_comb);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err_free;
}
ar->hw->netdev_features = NETIF_F_HW_CSUM;
@@ -5093,6 +5440,9 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 7abb8367119a..e6972b09333e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,12 +58,27 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
#define QCA988X_2_0_DEVICE_ID (0x003c)
+#define QCA6174_2_1_DEVICE_ID (0x003e)
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
{0}
};
+static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
+ /* QCA988X pre 2.0 chips are not supported because they need some nasty
+ * hacks. ath10k doesn't have them and these devices crash horribly
+ * because of that.
+ */
+ { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+};
+
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar);
static int ath10k_pci_warm_reset(struct ath10k *ar);
@@ -395,7 +410,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
return -EIO;
}
- ATH10K_SKB_CB(skb)->paddr = paddr;
+ ATH10K_SKB_RXCB(skb)->paddr = paddr;
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
if (ret) {
@@ -864,7 +879,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
&flags) == 0) {
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
- dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
@@ -1230,7 +1245,7 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
ce_ring->per_transfer_context[i] = NULL;
- dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@@ -1498,6 +1513,35 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_get_num_banks(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->pdev->device) {
+ case QCA988X_2_0_DEVICE_ID:
+ return 1;
+ case QCA6174_2_1_DEVICE_ID:
+ switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
+ case QCA6174_HW_1_0_CHIP_ID_REV:
+ case QCA6174_HW_1_1_CHIP_ID_REV:
+ return 3;
+ case QCA6174_HW_1_3_CHIP_ID_REV:
+ return 2;
+ case QCA6174_HW_2_1_CHIP_ID_REV:
+ case QCA6174_HW_2_2_CHIP_ID_REV:
+ return 6;
+ case QCA6174_HW_3_0_CHIP_ID_REV:
+ case QCA6174_HW_3_1_CHIP_ID_REV:
+ case QCA6174_HW_3_2_CHIP_ID_REV:
+ return 9;
+ }
+ break;
+ }
+
+ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+ return 1;
+}
+
static int ath10k_pci_init_config(struct ath10k *ar)
{
u32 interconnect_targ_addr;
@@ -1608,7 +1652,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
/* first bank is switched to IRAM */
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
HI_EARLY_ALLOC_MAGIC_MASK);
- ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
+ ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
+ HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
@@ -1804,12 +1849,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0;
}
-static int ath10k_pci_chip_reset(struct ath10k *ar)
+static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
{
int i, ret;
u32 val;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
* It is thus preferred to use warm reset which is safer but may not be
@@ -1873,11 +1918,53 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
return ret;
}
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
+
+ /* FIXME: QCA6174 requires cold + warm reset to work. */
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
return 0;
}
+static int ath10k_pci_chip_reset(struct ath10k *ar)
+{
+ if (QCA_REV_988X(ar))
+ return ath10k_pci_qca988x_chip_reset(ar);
+ else if (QCA_REV_6174(ar))
+ return ath10k_pci_qca6174_chip_reset(ar);
+ else
+ return -ENOTSUPP;
+}
+
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
@@ -1902,6 +1989,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
*/
ret = ath10k_pci_chip_reset(ar);
if (ret) {
+ if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_warn(ar, "firmware crashed during chip reset\n");
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+ }
+
ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_sleep;
}
@@ -2033,6 +2126,7 @@ static void ath10k_msi_err_tasklet(unsigned long data)
return;
}
+ ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
}
@@ -2102,6 +2196,7 @@ static void ath10k_pci_tasklet(unsigned long data)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
return;
@@ -2344,8 +2439,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_EVENT_PENDING) {
ath10k_warn(ar, "device has crashed during init\n");
- ath10k_pci_fw_crashed_clear(ar);
- ath10k_pci_fw_crashed_dump(ar);
return -ECOMM;
}
@@ -2476,17 +2569,46 @@ static void ath10k_pci_release(struct ath10k *ar)
pci_disable_device(pdev);
}
+static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
+{
+ const struct ath10k_pci_supp_chip *supp_chip;
+ int i;
+ u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
+ supp_chip = &ath10k_pci_supp_chips[i];
+
+ if (supp_chip->dev_id == dev_id &&
+ supp_chip->rev_id == rev_id)
+ return true;
+ }
+
+ return false;
+}
+
static int ath10k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
int ret = 0;
struct ath10k *ar;
struct ath10k_pci *ar_pci;
+ enum ath10k_hw_rev hw_rev;
u32 chip_id;
- ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
- ATH10K_BUS_PCI,
- &ath10k_pci_hif_ops);
+ switch (pci_dev->device) {
+ case QCA988X_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA988X;
+ break;
+ case QCA6174_2_1_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA6174;
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+
+ ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
+ hw_rev, &ath10k_pci_hif_ops);
if (!ar) {
dev_err(&pdev->dev, "failed to allocate core\n");
return -ENOMEM;
@@ -2515,12 +2637,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_release;
}
- chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
- if (chip_id == 0xffffffff) {
- ath10k_err(ar, "failed to get chip id\n");
- goto err_sleep;
- }
-
ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
@@ -2547,6 +2663,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq;
}
+ ret = ath10k_pci_chip_reset(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (chip_id == 0xffffffff) {
+ ath10k_err(ar, "failed to get chip id\n");
+ goto err_free_irq;
+ }
+
+ if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
+ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
+ pdev->device, chip_id);
+ goto err_sleep;
+ }
+
ath10k_pci_sleep(ar);
ret = ath10k_core_register(ar, chip_id);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index cf36511c7f4d..bddf54320160 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -152,6 +152,11 @@ struct ath10k_pci_pipe {
struct tasklet_struct intr;
};
+struct ath10k_pci_supp_chip {
+ u32 dev_id;
+ u32 rev_id;
+};
+
struct ath10k_pci {
struct pci_dev *pdev;
struct device *dev;
@@ -189,7 +194,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define ATH10K_PCI_RX_POST_RETRY_MS 50
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
-#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
+#define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
#define BAR_NUM 0
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index e1ffdd57a18c..e9cc7787bf5f 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -850,7 +850,7 @@ struct rx_ppdu_start {
#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
-struct rx_ppdu_end {
+struct rx_ppdu_end_common {
__le32 evm_p0;
__le32 evm_p1;
__le32 evm_p2;
@@ -873,10 +873,33 @@ struct rx_ppdu_end {
u8 phy_err_code;
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
__le32 info0; /* %RX_PPDU_END_INFO0_ */
+} __packed;
+
+struct rx_ppdu_end_qca988x {
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
+#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
+#define RX_PPDU_END_RTT_UNUSED_LSB 24
+#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
+
+struct rx_ppdu_end_qca6174 {
+ __le32 rtt; /* %RX_PPDU_END_RTT_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end {
+ struct rx_ppdu_end_common common;
+ union {
+ struct rx_ppdu_end_qca988x qca988x;
+ struct rx_ppdu_end_qca6174 qca6174;
+ } __packed;
+} __packed;
+
/*
* evm_p0
* EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 63ce61fcdac8..d22addf6118b 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -17,6 +17,7 @@
#include <linux/relay.h>
#include "core.h"
#include "debug.h"
+#include "wmi-ops.h"
static void send_fft_sample(struct ath10k *ar,
const struct fft_sample_tlv *fft_sample_tlv)
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 9d0ae30f9ff1..a417aae52623 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -18,6 +18,8 @@
#ifndef __TARGADDRS_H__
#define __TARGADDRS_H__
+#include "hw.h"
+
/*
* xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
* host_interest structure. It must match the address of the _host_interest
@@ -445,4 +447,7 @@ Fw Mode/SubMode Mask
#define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0
+#define QCA6174_BOARD_DATA_SZ 8192
+#define QCA6174_BOARD_EXT_DATA_SZ 0
+
#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 483db9cb8c96..b084f88da102 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -187,13 +187,14 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
memcpy(ar->testmode.orig_fw_features, ar->fw_features,
sizeof(ar->fw_features));
+ ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
/* utf.bin firmware image does not advertise firmware features. Do
* an ugly hack where we force the firmware features so that wmi.c
* will use the correct WMI interface.
*/
memset(ar->fw_features, 0, sizeof(ar->fw_features));
- __set_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features);
+ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
ret = ath10k_hif_power_up(ar);
if (ret) {
@@ -224,6 +225,7 @@ err_fw_features:
/* return the original firmware features */
memcpy(ar->fw_features, ar->testmode.orig_fw_features,
sizeof(ar->fw_features));
+ ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
release_firmware(ar->testmode.utf);
ar->testmode.utf = NULL;
@@ -250,6 +252,7 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
/* return the original firmware features */
memcpy(ar->fw_features, ar->testmode.orig_fw_features,
sizeof(ar->fw_features));
+ ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
release_firmware(ar->testmode.utf);
ar->testmode.utf = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
new file mode 100644
index 000000000000..aede750809fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
+ enum wmi_vdev_type type)
+{
+ struct ath10k_vif *arvif;
+ int count = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_started)
+ continue;
+
+ if (!arvif->is_up)
+ continue;
+
+ if (arvif->vdev_type != type)
+ continue;
+
+ count++;
+ }
+ return count;
+}
+
+static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
+
+ return 0;
+}
+
+static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct ath10k *ar = cdev->devdata;
+
+ mutex_lock(&ar->conf_mutex);
+ *state = ar->thermal.duty_cycle;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
+ unsigned long duty_cycle)
+{
+ struct ath10k *ar = cdev->devdata;
+ u32 period, duration, enabled;
+ int num_bss, ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
+ ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
+ duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
+ ret = -EINVAL;
+ goto out;
+ }
+ /* TODO: Right now, thermal mitigation is handled only for single/multi
+ * vif AP mode. Since quiet param is not validated in STA mode, it needs
+ * to be investigated further to handle multi STA and multi-vif (AP+STA)
+ * mode properly.
+ */
+ num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
+ if (!num_bss) {
+ ath10k_warn(ar, "no active AP interfaces\n");
+ ret = -ENETDOWN;
+ goto out;
+ }
+ period = max(ATH10K_QUIET_PERIOD_MIN,
+ (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
+ duration = (period * duty_cycle) / 100;
+ enabled = duration ? 1 : 0;
+
+ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+ ATH10K_QUIET_START_OFFSET,
+ enabled);
+ if (ret) {
+ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+ period, duration, enabled, ret);
+ goto out;
+ }
+ ar->thermal.duty_cycle = duty_cycle;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct thermal_cooling_device_ops ath10k_thermal_ops = {
+ .get_max_state = ath10k_thermal_get_max_dutycycle,
+ .get_cur_state = ath10k_thermal_get_cur_dutycycle,
+ .set_cur_state = ath10k_thermal_set_cur_dutycycle,
+};
+
+static ssize_t ath10k_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ath10k *ar = dev_get_drvdata(dev);
+ int ret, temperature;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Can't get temperature when the card is off */
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ reinit_completion(&ar->thermal.wmi_sync);
+ ret = ath10k_wmi_pdev_get_temperature(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to read temperature %d\n", ret);
+ goto out;
+ }
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "failed to synchronize thermal read\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ temperature = ar->thermal.temperature;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* display in millidegree celcius */
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->thermal.temperature = temperature;
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *ath10k_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ath10k_hwmon);
+
+int ath10k_thermal_register(struct ath10k *ar)
+{
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon_dev;
+ int ret;
+
+ cdev = thermal_cooling_device_register("ath10k_thermal", ar,
+ &ath10k_thermal_ops);
+
+ if (IS_ERR(cdev)) {
+ ath10k_err(ar, "failed to setup thermal device result: %ld\n",
+ PTR_ERR(cdev));
+ return -EINVAL;
+ }
+
+ ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
+ "cooling_device");
+ if (ret) {
+ ath10k_err(ar, "failed to create thermal symlink\n");
+ goto err_cooling_destroy;
+ }
+
+ ar->thermal.cdev = cdev;
+
+ /* Do not register hwmon device when temperature reading is not
+ * supported by firmware
+ */
+ if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4)
+ return 0;
+
+ /* Avoid linking error on devm_hwmon_device_register_with_groups, I
+ * guess linux/hwmon.h is missing proper stubs. */
+ if (!config_enabled(CONFIG_HWMON))
+ return 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
+ "ath10k_hwmon", ar,
+ ath10k_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ath10k_err(ar, "failed to register hwmon device: %ld\n",
+ PTR_ERR(hwmon_dev));
+ ret = -EINVAL;
+ goto err_remove_link;
+ }
+ return 0;
+
+err_remove_link:
+ sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
+err_cooling_destroy:
+ thermal_cooling_device_unregister(cdev);
+ return ret;
+}
+
+void ath10k_thermal_unregister(struct ath10k *ar)
+{
+ thermal_cooling_device_unregister(ar->thermal.cdev);
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
new file mode 100644
index 000000000000..bccc17ae0fde
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _THERMAL_
+#define _THERMAL_
+
+#define ATH10K_QUIET_PERIOD_DEFAULT 100
+#define ATH10K_QUIET_PERIOD_MIN 25
+#define ATH10K_QUIET_START_OFFSET 10
+#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
+#define ATH10K_HWMON_NAME_LEN 15
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+
+struct ath10k_thermal {
+ struct thermal_cooling_device *cdev;
+ struct completion wmi_sync;
+
+ /* protected by conf_mutex */
+ u32 duty_cycle;
+ /* temperature value in Celcius degree
+ * protected by data_lock
+ */
+ int temperature;
+};
+
+#ifdef CONFIG_THERMAL
+int ath10k_thermal_register(struct ath10k *ar);
+void ath10k_thermal_unregister(struct ath10k *ar);
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+#else
+static inline int ath10k_thermal_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_thermal_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
+ int temperature)
+{
+}
+
+#endif
+#endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index b289378b6e3e..5407887380ab 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -453,6 +453,74 @@ TRACE_EVENT(ath10k_htt_rx_desc,
)
);
+TRACE_EVENT(ath10k_wmi_diag_container,
+ TP_PROTO(struct ath10k *ar,
+ u8 type,
+ u32 timestamp,
+ u32 code,
+ u16 len,
+ const void *data),
+
+ TP_ARGS(ar, type, timestamp, code, len, data),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, type)
+ __field(u32, timestamp)
+ __field(u32, code)
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->type = type;
+ __entry->timestamp = timestamp;
+ __entry->code = code;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s diag container type %hhu timestamp %u code %u len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->type,
+ __entry->timestamp,
+ __entry->code,
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_diag,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 7579de8e7a8c..3f00cec8aef5 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
return;
}
- msdu = htt->pending_tx[tx_done->msdu_id];
+ msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
+ if (!msdu) {
+ ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
+ tx_done->msdu_id);
+ return;
+ }
+
skb_cb = ATH10K_SKB_CB(msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
@@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
/* we do not own the msdu anymore */
exit:
- htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
new file mode 100644
index 000000000000..04dc4b9db04e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -0,0 +1,1064 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_OPS_H_
+#define _WMI_OPS_H_
+
+struct ath10k;
+struct sk_buff;
+
+struct wmi_ops {
+ void (*rx)(struct ath10k *ar, struct sk_buff *skb);
+ void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
+
+ int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg);
+ int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg);
+ int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg);
+ int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg);
+ int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg);
+ int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg);
+ int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_ev_arg *arg);
+ int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg);
+ int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg);
+ int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats);
+
+ struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
+ struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg);
+ struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
+ u32 value);
+ struct sk_buff *(*gen_init)(struct ath10k *ar);
+ struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg);
+ struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg);
+ struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN]);
+ struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart);
+ struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+ struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+ struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg);
+ struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg);
+ struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable);
+ struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg);
+ struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN]);
+ struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN]);
+ struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ u32 tid_bitmap);
+ struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value);
+ struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg);
+ struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode);
+ struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value);
+ struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id,
+ u32 value);
+ struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg);
+ struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab);
+ struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg);
+ struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
+ enum wmi_stats_id stats_id);
+ struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type,
+ u32 delay_ms);
+ struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
+ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
+ u32 log_level);
+ struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
+ struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
+ u32 period, u32 duration,
+ u32 next_offset,
+ u32 enabled);
+ struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
+ struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac);
+ struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid, u32 buf_size);
+ struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid,
+ u32 status);
+ struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid, u32 initiator,
+ u32 reason);
+ struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
+ u32 tim_ie_offset, struct sk_buff *bcn,
+ u32 prb_caps, u32 prb_erp,
+ void *prb_ies, size_t prb_ies_len);
+ struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
+ struct sk_buff *bcn);
+ struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
+ struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac);
+ struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+};
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+
+static inline int
+ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (WARN_ON_ONCE(!ar->wmi.ops->rx))
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->rx(ar, skb);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ if (!ar->wmi.ops->map_svc)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->map_svc(in, out, len);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_scan)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_scan(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_rx)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_ch_info)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_ch_info(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_vdev_start)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_peer_kick)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_swba)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_swba(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_phyerr)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_phyerr(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_svc_rdy)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_rdy)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ if (!ar->wmi.ops->pull_fw_stats)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
+}
+
+static inline int
+ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_mgmt_tx)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+ if (ret)
+ return ret;
+
+ /* FIXME There's no ACK event for Management Tx. This probably
+ * shouldn't be called here either. */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_rd)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
+ dfs_reg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_suspend)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_resume)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_resume(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_init)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_init(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
+}
+
+static inline int
+ath10k_wmi_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_start_scan)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_start_scan(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_stop_scan)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_stop_scan(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_create)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_delete)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_start)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_start_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_restart(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_start)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_restart_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_stop)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_up)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_down)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
+ u32 param_value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
+ param_value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_install_key)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
+ enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_vdev_sta_uapsd)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
+ num_ac);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_create)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_delete)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_flush)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
+ enum wmi_peer_param param_id, u32 param_value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
+ param_value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_psmode)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_sta_ps)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_ap_ps)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ap_ps_peer_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_scan_chan_list)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_assoc)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
+}
+
+static inline int
+ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_beacon_dma)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
+ dtim_zero, deliver_cab);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb,
+ ar->wmi.cmd->pdev_send_bcn_cmdid);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_wmm)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
+}
+
+static inline int
+ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_request_stats)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
+}
+
+static inline int
+ath10k_wmi_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_force_fw_hang)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
+}
+
+static inline int
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_dbglog_cfg)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pktlog_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pktlog_disable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pktlog_disable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_pktlog_disable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
+ u32 next_offset, u32 enabled)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
+ next_offset, enabled);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_temperature)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_temperature_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_clear_resp)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_clear_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_set_resp)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_set_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_delba_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
+ reason);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->delba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
+ struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
+ void *prb_ies, size_t prb_ies_len)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_bcn_tmpl)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
+ prb_caps, prb_erp, prb_ies,
+ prb_ies_len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_prb_tmpl)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
+}
+
+static inline int
+ath10k_wmi_sta_keepalive(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_sta_keepalive)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
new file mode 100644
index 000000000000..71614ba1b145
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -0,0 +1,2696 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "core.h"
+#include "debug.h"
+#include "hw.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+#include "wmi-tlv.h"
+
+/***************/
+/* TLV helpers */
+/**************/
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TLV_TAG_ARRAY_BYTE]
+ = { .min_len = sizeof(u8) },
+ [WMI_TLV_TAG_ARRAY_UINT32]
+ = { .min_len = sizeof(u32) },
+ [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
+ [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
+ [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
+ = { .min_len = sizeof(struct wmi_host_swba_event) },
+ [WMI_TLV_TAG_STRUCT_TIM_INFO]
+ = { .min_len = sizeof(struct wmi_tim_info) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
+ = { .min_len = sizeof(struct wmi_p2p_noa_info) },
+ [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
+ [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct hal_reg_capabilities) },
+ [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
+ = { .min_len = sizeof(struct wlan_host_mem_req) },
+ [WMI_TLV_TAG_STRUCT_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
+ [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
+ [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+};
+
+static int
+ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
+ int (*iter)(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = __le16_to_cpu(tlv->tag);
+ tlv_len = __le16_to_cpu(tlv->len);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ar, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TLV_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static u16 ath10k_wmi_tlv_len(const void *ptr)
+{
+ return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
+}
+
+/**************/
+/* TLV events */
+/**************/
+static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_bcn_tx_status_ev *ev;
+ u32 vdev_id, tx_status;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ tx_status = __le32_to_cpu(ev->tx_status);
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ switch (tx_status) {
+ case WMI_TLV_BCN_TX_STATUS_OK:
+ break;
+ case WMI_TLV_BCN_TX_STATUS_XRETRY:
+ case WMI_TLV_BCN_TX_STATUS_DROP:
+ case WMI_TLV_BCN_TX_STATUS_FILTERED:
+ /* FIXME: It's probably worth telling mac80211 to stop the
+ * interface as it is crippled.
+ */
+ ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
+ vdev_id, tx_status);
+ break;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_diag_data_ev *ev;
+ const struct wmi_tlv_diag_item *item;
+ const void *data;
+ int ret, num_items, len;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ num_items = __le32_to_cpu(ev->num_items);
+ len = ath10k_wmi_tlv_len(data);
+
+ while (num_items--) {
+ if (len == 0)
+ break;
+ if (len < sizeof(*item)) {
+ ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
+ break;
+ }
+
+ item = data;
+
+ if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
+ ath10k_warn(ar, "failed to parse diag data: item is too long\n");
+ break;
+ }
+
+ trace_ath10k_wmi_diag_container(ar,
+ item->type,
+ __le32_to_cpu(item->timestamp),
+ __le32_to_cpu(item->code),
+ __le16_to_cpu(item->len),
+ item->payload);
+
+ len -= sizeof(*item);
+ len -= roundup(__le16_to_cpu(item->len), 4);
+
+ data += sizeof(*item);
+ data += roundup(__le16_to_cpu(item->len), 4);
+ }
+
+ if (num_items != -1 || len != 0)
+ ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
+ num_items, len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const void *data;
+ int ret, len;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+ if (!data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+ len = ath10k_wmi_tlv_len(data);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
+ trace_ath10k_wmi_diag(ar, data, len);
+
+ kfree(tb);
+ return 0;
+}
+
+/***********/
+/* TLV ops */
+/***********/
+
+static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_TLV_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_TLV_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_TLV_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_TLV_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_TLV_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_TLV_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_TLV_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_TLV_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_TLV_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_TLV_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_TLV_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_TLV_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_TLV_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_TLV_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_TLV_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_TLV_PDEV_FTM_INTG_EVENTID:
+ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+ break;
+ case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
+ ath10k_wmi_event_gtk_offload_status(ar, skb);
+ break;
+ case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
+ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+ break;
+ case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_delba_complete(ar, skb);
+ break;
+ case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_addba_complete(ar, skb);
+ break;
+ case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+ break;
+ case WMI_TLV_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ break;
+ case WMI_TLV_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
+ break;
+ case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
+ ath10k_wmi_tlv_event_diag_data(ar, skb);
+ break;
+ case WMI_TLV_DIAG_EVENTID:
+ ath10k_wmi_tlv_event_diag(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->event_type = ev->event_type;
+ arg->reason = ev->reason;
+ arg->channel_freq = ev->channel_freq;
+ arg->scan_req_id = ev->scan_req_id;
+ arg->scan_id = ev->scan_id;
+ arg->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_rx_ev *ev;
+ const u8 *frame;
+ u32 msdu_len;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
+ frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !frame) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->channel = ev->channel;
+ arg->buf_len = ev->buf_len;
+ arg->status = ev->status;
+ arg->snr = ev->snr;
+ arg->phy_mode = ev->phy_mode;
+ arg->rate = ev->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+
+ if (skb->len < (frame - skb->data) + msdu_len) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, msdu_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_start_response_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->req_id = ev->req_id;
+ arg->resp_type = ev->resp_type;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+struct wmi_tlv_swba_parse {
+ const struct wmi_host_swba_event *ev;
+ bool tim_done;
+ bool noa_done;
+ size_t n_tim;
+ size_t n_noa;
+ struct wmi_swba_ev_arg *arg;
+};
+
+static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+
+ if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
+ return -EPROTO;
+
+ if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
+ return -ENOBUFS;
+
+ swba->arg->tim_info[swba->n_tim++] = ptr;
+ return 0;
+}
+
+static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+
+ if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
+ return -EPROTO;
+
+ if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
+ return -ENOBUFS;
+
+ swba->arg->noa_info[swba->n_noa++] = ptr;
+ return 0;
+}
+
+static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
+ swba->ev = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_STRUCT:
+ if (!swba->tim_done) {
+ swba->tim_done = true;
+ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+ ath10k_wmi_tlv_swba_tim_parse,
+ swba);
+ if (ret)
+ return ret;
+ } else if (!swba->noa_done) {
+ swba->noa_done = true;
+ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+ ath10k_wmi_tlv_swba_noa_parse,
+ swba);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_tlv_swba_parse swba = { .arg = arg };
+ u32 map;
+ size_t n_vdevs;
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_swba_parse, &swba);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ if (!swba.ev)
+ return -EPROTO;
+
+ arg->vdev_map = swba.ev->vdev_map;
+
+ for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
+ if (map & BIT(0))
+ n_vdevs++;
+
+ if (n_vdevs != swba.n_tim ||
+ n_vdevs != swba.n_noa)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_phyerr_ev *ev;
+ const void *phyerrs;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
+ phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !phyerrs) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->num_phyerrs = ev->num_phyerrs;
+ arg->tsf_l32 = ev->tsf_l32;
+ arg->tsf_u32 = ev->tsf_u32;
+ arg->buf_len = ev->buf_len;
+ arg->phyerrs = phyerrs;
+
+ kfree(tb);
+ return 0;
+}
+
+#define WMI_TLV_ABI_VER_NS0 0x5F414351
+#define WMI_TLV_ABI_VER_NS1 0x00004C4D
+#define WMI_TLV_ABI_VER_NS2 0x00000000
+#define WMI_TLV_ABI_VER_NS3 0x00000000
+
+#define WMI_TLV_ABI_VER0_MAJOR 1
+#define WMI_TLV_ABI_VER0_MINOR 0
+#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
+ (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
+#define WMI_TLV_ABI_VER1 53
+
+static int
+ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_svc_rdy_ev_arg *arg = data;
+ int i;
+
+ if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
+ return -EPROTO;
+
+ for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
+ if (!arg->mem_reqs[i]) {
+ arg->mem_reqs[i] = ptr;
+ return 0;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ const void **tb;
+ const struct hal_reg_capabilities *reg;
+ const struct wmi_tlv_svc_rdy_ev *ev;
+ const __le32 *svc_bmap;
+ const struct wlan_host_mem_req *mem_reqs;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
+ reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
+ svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
+ mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+
+ if (!ev || !reg || !svc_bmap || !mem_reqs) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* This is an internal ABI compatibility check for WMI TLV so check it
+ * here instead of the generic WMI code.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
+ __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
+ __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
+ __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
+ __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
+ __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
+
+ if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
+ kfree(tb);
+ return -ENOTSUPP;
+ }
+
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->abi.abi_ver0;
+ arg->sw_ver1 = ev->abi.abi_ver1;
+ arg->fw_build = ev->fw_build_vers;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = reg->eeprom_rd;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = svc_bmap;
+ arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
+
+ ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
+ ath10k_wmi_tlv_parse_mem_reqs, arg);
+ if (ret) {
+ kfree(tb);
+ ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
+ return ret;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_rdy_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->sw_version = ev->abi.abi_ver0;
+ arg->abi_version = ev->abi.abi_ver1;
+ arg->status = ev->status;
+ arg->mac_addr = ev->mac_addr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const void **tb;
+ const struct wmi_stats_event *ev;
+ const void *data;
+ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+ size_t data_len;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data_len = ath10k_wmi_tlv_len(data);
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ WARN_ON(1); /* FIXME: not implemented yet */
+
+ kfree(tb);
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
+{
+ struct wmi_tlv_pdev_suspend *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->opt = __cpu_to_le32(opt);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
+{
+ struct wmi_tlv_resume_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->reserved = __cpu_to_le32(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
+ u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_tlv_pdev_set_rd_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->regd = __cpu_to_le32(rd);
+ cmd->regd_2ghz = __cpu_to_le32(rd2g);
+ cmd->regd_5ghz = __cpu_to_le32(rd5g);
+ cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
+ cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
+ u32 param_value)
+{
+ struct wmi_tlv_pdev_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
+ return skb;
+}
+
+static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ struct wmi_tlv_init_cmd *cmd;
+ struct wmi_tlv_resource_config *cfg;
+ struct wmi_host_mem_chunks *chunks;
+ size_t len, chunks_len;
+ void *ptr;
+
+ chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + sizeof(*cfg)) +
+ (sizeof(*tlv) + chunks_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
+ tlv->len = __cpu_to_le16(sizeof(*cfg));
+ cfg = (void *)tlv->value;
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cfg);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(chunks_len);
+ chunks = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ ptr += chunks_len;
+
+ cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
+ cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
+ cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
+ cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
+ cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
+ cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+
+ if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
+ cfg->num_offload_peers = __cpu_to_le32(3);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
+ } else {
+ cfg->num_offload_peers = __cpu_to_le32(0);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
+ }
+
+ cfg->num_peer_keys = __cpu_to_le32(2);
+ cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
+ cfg->ast_skid_limit = __cpu_to_le32(0x10);
+ cfg->tx_chain_mask = __cpu_to_le32(0x7);
+ cfg->rx_chain_mask = __cpu_to_le32(0x7);
+ cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
+ cfg->rx_decap_mode = __cpu_to_le32(1);
+ cfg->scan_max_pending_reqs = __cpu_to_le32(4);
+ cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
+ cfg->roam_offload_max_vdev = __cpu_to_le32(3);
+ cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
+ cfg->num_mcast_groups = __cpu_to_le32(0);
+ cfg->num_mcast_table_elems = __cpu_to_le32(0);
+ cfg->mcast2ucast_mode = __cpu_to_le32(0);
+ cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
+ cfg->num_wds_entries = __cpu_to_le32(0x20);
+ cfg->dma_burst_size = __cpu_to_le32(0);
+ cfg->mac_aggr_delim = __cpu_to_le32(0);
+ cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
+ cfg->vow_config = __cpu_to_le32(0);
+ cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
+ cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
+ cfg->max_frag_entries = __cpu_to_le32(2);
+ cfg->num_tdls_vdevs = __cpu_to_le32(1);
+ cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
+ cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
+ cfg->num_multicast_filter_entries = __cpu_to_le32(5);
+ cfg->num_wow_filters = __cpu_to_le32(0x16);
+ cfg->num_keep_alive_pattern = __cpu_to_le32(6);
+ cfg->keep_alive_pattern_size = __cpu_to_le32(0);
+ cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
+ cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+
+ ath10k_wmi_put_host_mem_chunks(ar, chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_tlv_start_scan_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, chan_len, ssid_len, bssid_len, ie_len;
+ __le32 *chans;
+ struct wmi_ssid *ssids;
+ struct wmi_mac_addr *addrs;
+ void *ptr;
+ int i, ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ chan_len = arg->n_channels * sizeof(__le32);
+ ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
+ bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
+ ie_len = roundup(arg->ie_len, 4);
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
+ (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
+ (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
+ (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ cmd->burst_duration_ms = __cpu_to_le32(0);
+ cmd->num_channels = __cpu_to_le32(arg->n_channels);
+ cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
+ cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
+ cmd->ie_len = __cpu_to_le32(arg->ie_len);
+ cmd->num_probes = __cpu_to_le32(3);
+
+ /* FIXME: There are some scan flag inconsistencies across firmwares,
+ * e.g. WMI-TLV inverts the logic behind the following flag.
+ */
+ cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(chan_len);
+ chans = (void *)tlv->value;
+ for (i = 0; i < arg->n_channels; i++)
+ chans[i] = __cpu_to_le32(arg->channels[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += chan_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+ tlv->len = __cpu_to_le16(ssid_len);
+ ssids = (void *)tlv->value;
+ for (i = 0; i < arg->n_ssids; i++) {
+ ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
+ memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += ssid_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+ tlv->len = __cpu_to_le16(bssid_len);
+ addrs = (void *)tlv->value;
+ for (i = 0; i < arg->n_bssids; i++)
+ ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
+
+ ptr += sizeof(*tlv);
+ ptr += bssid_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(ie_len);
+ memcpy(tlv->value, arg->ie, arg->ie_len);
+
+ ptr += sizeof(*tlv);
+ ptr += ie_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg)
+{
+ struct wmi_stop_scan_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 scan_id;
+ u32 req_id;
+
+ if (arg->req_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ scan_id = arg->u.scan_id;
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+ req_id = arg->req_id;
+ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->req_type = __cpu_to_le32(arg->req_type);
+ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_vdev_type vdev_type,
+ enum wmi_vdev_subtype vdev_subtype,
+ const u8 mac_addr[ETH_ALEN])
+{
+ struct wmi_vdev_create_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_type = __cpu_to_le32(vdev_type);
+ cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
+ ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_delete_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart)
+{
+ struct wmi_tlv_vdev_start_cmd *cmd;
+ struct wmi_channel *ch;
+ struct wmi_p2p_noa_descriptor *noa;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+ u32 flags = 0;
+
+ if (WARN_ON(arg->ssid && arg->ssid_len == 0))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return ERR_PTR(-EINVAL);
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + sizeof(*ch)) +
+ (sizeof(*tlv) + 0);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (arg->hidden_ssid)
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ flags |= WMI_VDEV_START_PMF_ENABLED;
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
+ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+ cmd->flags = __cpu_to_le32(flags);
+ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*ch));
+ ch = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(ch, &arg->channel);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*ch);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = 0;
+ noa = (void *)tlv->value;
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+ ptr += 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_stop_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid)
+
+{
+ struct wmi_vdev_up_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_down_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+ return ERR_PTR(-EINVAL);
+ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->key_idx = __cpu_to_le32(arg->key_idx);
+ cmd->key_flags = __cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+ cmd->key_len = __cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->macaddr)
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
+ if (arg->key_data)
+ memcpy(tlv->value, arg->key_data, arg->key_len);
+
+ ptr += sizeof(*tlv);
+ ptr += roundup(arg->key_len, sizeof(__le32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
+ return skb;
+}
+
+static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
+ const struct wmi_sta_uapsd_auto_trig_arg *arg)
+{
+ struct wmi_sta_uapsd_auto_trig_param *ac;
+ struct wmi_tlv *tlv;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
+ tlv->len = __cpu_to_le16(sizeof(*ac));
+ ac = (void *)tlv->value;
+
+ ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
+ ac->user_priority = __cpu_to_le32(arg->user_priority);
+ ac->service_interval = __cpu_to_le32(arg->service_interval);
+ ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
+ ac->delay_interval = __cpu_to_le32(arg->delay_interval);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
+ ac->wmm_ac, ac->user_priority, ac->service_interval,
+ ac->suspend_interval, ac->delay_interval);
+
+ return ptr + sizeof(*tlv) + sizeof(*ac);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac)
+{
+ struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
+ struct wmi_sta_uapsd_auto_trig_param *ac;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ size_t ac_tlv_len;
+ void *ptr;
+ int i;
+
+ ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + ac_tlv_len;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->num_ac = __cpu_to_le32(num_ac);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(ac_tlv_len);
+ ac = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ for (i = 0; i < num_ac; i++)
+ ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
+ return skb;
+}
+
+static void *ath10k_wmi_tlv_put_wmm(void *ptr,
+ const struct wmi_wmm_params_arg *arg)
+{
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
+ tlv->len = __cpu_to_le16(sizeof(*wmm));
+ wmm = (void *)tlv->value;
+ ath10k_wmi_set_wmm_param(wmm, arg);
+
+ return ptr + sizeof(*tlv) + sizeof(*wmm);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_tlv_vdev_set_wmm_cmd *cmd;
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (4 * (sizeof(*tlv) + sizeof(*wmm)));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_tlv_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*arp);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->enabled = __cpu_to_le32(arg->enabled);
+ cmd->method = __cpu_to_le32(arg->method);
+ cmd->interval = __cpu_to_le32(arg->interval);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
+ tlv->len = __cpu_to_le16(sizeof(*arp));
+ arp = (void *)tlv->value;
+
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_tlv_peer_create_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
+ ether_addr_copy(cmd->peer_addr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_delete_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value)
+{
+ struct wmi_peer_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_tlv_peer_assoc_cmd *cmd;
+ struct wmi_vht_rate_set *vht_rate;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, legacy_rate_len, ht_rate_len;
+ void *ptr;
+
+ if (arg->peer_mpdu_density > 16)
+ return ERR_PTR(-EINVAL);
+ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+ return ERR_PTR(-EINVAL);
+ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+ return ERR_PTR(-EINVAL);
+
+ legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
+ sizeof(__le32));
+ ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + legacy_rate_len) +
+ (sizeof(*tlv) + ht_rate_len) +
+ (sizeof(*tlv) + sizeof(*vht_rate));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+ cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
+ cmd->flags = __cpu_to_le32(arg->peer_flags);
+ cmd->caps = __cpu_to_le32(arg->peer_caps);
+ cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+ cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+ cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+ cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+ cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+ cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+ cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+ cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
+ cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
+ ether_addr_copy(cmd->mac_addr.addr, arg->addr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(legacy_rate_len);
+ memcpy(tlv->value, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ ptr += sizeof(*tlv);
+ ptr += legacy_rate_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(ht_rate_len);
+ memcpy(tlv->value, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ ptr += sizeof(*tlv);
+ ptr += ht_rate_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
+ tlv->len = __cpu_to_le16(sizeof(*vht_rate));
+ vht_rate = (void *)tlv->value;
+
+ vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+ vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+ vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+ vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*vht_rate);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct wmi_sta_powersave_mode_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 param_value)
+{
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct wmi_tlv_scan_chan_list_cmd *cmd;
+ struct wmi_channel *ci;
+ struct wmi_channel_arg *ch;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t chans_len, len;
+ int i;
+ void *ptr, *chans;
+
+ chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + chans_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(chans_len);
+ chans = (void *)tlv->value;
+
+ for (i = 0; i < arg->n_channels; i++) {
+ ch = &arg->channels[i];
+
+ tlv = chans;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*ci));
+ ci = (void *)tlv->value;
+
+ ath10k_wmi_put_wmi_channel(ci, ch);
+
+ chans += sizeof(*tlv);
+ chans += sizeof(*ci);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += chans_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+
+{
+ struct wmi_bcn_tx_ref_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ hdr = (struct ieee80211_hdr *)bcn;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->data_len = __cpu_to_le32(bcn_len);
+ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+
+ if (dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_tlv_pdev_set_wmm_cmd *cmd;
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (4 * (sizeof(*tlv) + sizeof(*wmm)));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* nothing to set here */
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
+ enum wmi_stats_id stats_id)
+{
+ struct wmi_request_stats_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->stats_id = __cpu_to_le32(stats_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type,
+ u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ u32 log_level) {
+ struct wmi_tlv_dbglog_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, bmap_len;
+ u32 value;
+ void *ptr;
+
+ if (module_enable) {
+ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+ module_enable,
+ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
+ } else {
+ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+ WMI_TLV_DBGLOG_ALL_MODULES,
+ WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
+ }
+
+ bmap_len = 0;
+ len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
+ cmd->value = __cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(bmap_len);
+
+ /* nothing to do here */
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(bmap_len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+ struct wmi_tlv_pktlog_enable *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->filter = __cpu_to_le32(filter);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
+ filter);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
+{
+ struct wmi_tlv_pktlog_disable *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
+ u32 tim_ie_offset, struct sk_buff *bcn,
+ u32 prb_caps, u32 prb_erp, void *prb_ies,
+ size_t prb_ies_len)
+{
+ struct wmi_tlv_bcn_tmpl_cmd *cmd;
+ struct wmi_tlv_bcn_prb_info *info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ if (WARN_ON(prb_ies_len > 0 && !prb_ies))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*info) + prb_ies_len +
+ sizeof(*tlv) + roundup(bcn->len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
+ cmd->buf_len = __cpu_to_le32(bcn->len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
+ * then it is then impossible to pass original ie len.
+ * This chunk is not used yet so if setting probe resp template yields
+ * problems with beaconing or crashes firmware look here.
+ */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+ tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
+ info = (void *)tlv->value;
+ info->caps = __cpu_to_le32(prb_caps);
+ info->erp = __cpu_to_le32(prb_erp);
+ memcpy(info->ies, prb_ies, prb_ies_len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*info);
+ ptr += prb_ies_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ /* FIXME: Adjust TSF? */
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
+ struct sk_buff *prb)
+{
+ struct wmi_tlv_prb_tmpl_cmd *cmd;
+ struct wmi_tlv_bcn_prb_info *info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*info) +
+ sizeof(*tlv) + roundup(prb->len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->buf_len = __cpu_to_le32(prb->len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+ tlv->len = __cpu_to_le16(sizeof(*info));
+ info = (void *)tlv->value;
+ info->caps = 0;
+ info->erp = 0;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*info);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(prb->len, 4));
+ memcpy(tlv->value, prb->data, prb->len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct wmi_tlv_p2p_go_bcn_ie *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
+ memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
+
+ ptr += sizeof(*tlv);
+ ptr += roundup(p2p_ie[1] + 2, 4);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
+ vdev_id);
+ return skb;
+}
+
+/****************/
+/* TLV mappings */
+/****************/
+
+static struct wmi_cmd_map wmi_tlv_cmd_map = {
+ .init_cmdid = WMI_TLV_INIT_CMDID,
+ .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
+ .echo_cmdid = WMI_TLV_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
+ .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+};
+
+static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
+ .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_TLV_PDEV_PARAM_DCS,
+ .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
+ .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_TLV_VDEV_PARAM_WDS,
+ .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_TLV_VDEV_PARAM_SGI,
+ .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_TLV_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_TLV_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+};
+
+static const struct wmi_ops wmi_tlv_ops = {
+ .rx = ath10k_wmi_tlv_op_rx,
+ .map_svc = wmi_tlv_svc_map,
+
+ .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+
+ .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_tlv_op_gen_init,
+ .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
+ .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
+ .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
+ /* .gen_mgmt_tx = not implemented; HTT is used */
+ .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
+ /* .gen_pdev_set_quiet_mode not implemented */
+ /* .gen_pdev_get_temperature not implemented */
+ /* .gen_addba_clear_resp not implemented */
+ /* .gen_addba_send not implemented */
+ /* .gen_addba_set_resp not implemented */
+ /* .gen_delba_send not implemented */
+ .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
+ .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
+ .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
+ .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
+ .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+};
+
+/************/
+/* TLV init */
+/************/
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar)
+{
+ ar->wmi.cmd = &wmi_tlv_cmd_map;
+ ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
+ ar->wmi.ops = &wmi_tlv_ops;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
new file mode 100644
index 000000000000..de68fe76eae6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -0,0 +1,1444 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WMI_TLV_H
+#define _WMI_TLV_H
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+enum wmi_tlv_grp_id {
+ WMI_TLV_GRP_START = 0x3,
+ WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
+ WMI_TLV_GRP_PDEV,
+ WMI_TLV_GRP_VDEV,
+ WMI_TLV_GRP_PEER,
+ WMI_TLV_GRP_MGMT,
+ WMI_TLV_GRP_BA_NEG,
+ WMI_TLV_GRP_STA_PS,
+ WMI_TLV_GRP_DFS,
+ WMI_TLV_GRP_ROAM,
+ WMI_TLV_GRP_OFL_SCAN,
+ WMI_TLV_GRP_P2P,
+ WMI_TLV_GRP_AP_PS,
+ WMI_TLV_GRP_RATECTL,
+ WMI_TLV_GRP_PROFILE,
+ WMI_TLV_GRP_SUSPEND,
+ WMI_TLV_GRP_BCN_FILTER,
+ WMI_TLV_GRP_WOW,
+ WMI_TLV_GRP_RTT,
+ WMI_TLV_GRP_SPECTRAL,
+ WMI_TLV_GRP_STATS,
+ WMI_TLV_GRP_ARP_NS_OFL,
+ WMI_TLV_GRP_NLO_OFL,
+ WMI_TLV_GRP_GTK_OFL,
+ WMI_TLV_GRP_CSA_OFL,
+ WMI_TLV_GRP_CHATTER,
+ WMI_TLV_GRP_TID_ADDBA,
+ WMI_TLV_GRP_MISC,
+ WMI_TLV_GRP_GPIO,
+ WMI_TLV_GRP_FWTEST,
+ WMI_TLV_GRP_TDLS,
+ WMI_TLV_GRP_RESMGR,
+ WMI_TLV_GRP_STA_SMPS,
+ WMI_TLV_GRP_WLAN_HB,
+ WMI_TLV_GRP_RMC,
+ WMI_TLV_GRP_MHF_OFL,
+ WMI_TLV_GRP_LOCATION_SCAN,
+ WMI_TLV_GRP_OEM,
+ WMI_TLV_GRP_NAN,
+ WMI_TLV_GRP_COEX,
+ WMI_TLV_GRP_OBSS_OFL,
+ WMI_TLV_GRP_LPI,
+ WMI_TLV_GRP_EXTSCAN,
+ WMI_TLV_GRP_DHCP_OFL,
+ WMI_TLV_GRP_IPA,
+ WMI_TLV_GRP_MDNS_OFL,
+ WMI_TLV_GRP_SAP_OFL,
+};
+
+enum wmi_tlv_cmd_id {
+ WMI_TLV_INIT_CMDID = 0x1,
+ WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN),
+ WMI_TLV_STOP_SCAN_CMDID,
+ WMI_TLV_SCAN_CHAN_LIST_CMDID,
+ WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_TLV_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV),
+ WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+ WMI_TLV_PDEV_SET_PARAM_CMDID,
+ WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_TLV_PDEV_DUMP_CMDID,
+ WMI_TLV_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_TLV_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV),
+ WMI_TLV_VDEV_DELETE_CMDID,
+ WMI_TLV_VDEV_START_REQUEST_CMDID,
+ WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+ WMI_TLV_VDEV_UP_CMDID,
+ WMI_TLV_VDEV_STOP_CMDID,
+ WMI_TLV_VDEV_DOWN_CMDID,
+ WMI_TLV_VDEV_SET_PARAM_CMDID,
+ WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+ WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_TLV_VDEV_WMM_ADDTS_CMDID,
+ WMI_TLV_VDEV_WMM_DELTS_CMDID,
+ WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_TLV_VDEV_PLMREQ_START_CMDID,
+ WMI_TLV_VDEV_PLMREQ_STOP_CMDID,
+ WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER),
+ WMI_TLV_PEER_DELETE_CMDID,
+ WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+ WMI_TLV_PEER_SET_PARAM_CMDID,
+ WMI_TLV_PEER_ASSOC_CMDID,
+ WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_TLV_PEER_MCAST_GROUP_CMDID,
+ WMI_TLV_PEER_INFO_REQ_CMDID,
+ WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT),
+ WMI_TLV_PDEV_SEND_BCN_CMDID,
+ WMI_TLV_BCN_TMPL_CMDID,
+ WMI_TLV_BCN_FILTER_RX_CMDID,
+ WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+ WMI_TLV_MGMT_TX_CMDID,
+ WMI_TLV_PRB_TMPL_CMDID,
+ WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
+ WMI_TLV_ADDBA_SEND_CMDID,
+ WMI_TLV_ADDBA_STATUS_CMDID,
+ WMI_TLV_DELBA_SEND_CMDID,
+ WMI_TLV_ADDBA_SET_RESP_CMDID,
+ WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+ WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS),
+ WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+ WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+ WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS),
+ WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+ WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM),
+ WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TLV_ROAM_SCAN_PERIOD,
+ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TLV_ROAM_AP_PROFILE,
+ WMI_TLV_ROAM_CHAN_LIST,
+ WMI_TLV_ROAM_SCAN_CMD,
+ WMI_TLV_ROAM_SYNCH_COMPLETE,
+ WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_TLV_ROAM_INVOKE_CMDID,
+ WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN),
+ WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_TLV_OFL_SCAN_PERIOD,
+ WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P),
+ WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_TLV_P2P_GO_SET_BEACON_IE,
+ WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS),
+ WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL),
+ WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE),
+ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND),
+ WMI_TLV_PDEV_RESUME_CMDID,
+ WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER),
+ WMI_TLV_RMV_BCN_FILTER_CMDID,
+ WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW),
+ WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_TLV_WOW_ENABLE_CMDID,
+ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_TLV_EXTWOW_ENABLE_CMDID,
+ WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT),
+ WMI_TLV_RTT_TSF_CMDID,
+ WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL),
+ WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS),
+ WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_TLV_REQUEST_STATS_EXT_CMDID,
+ WMI_TLV_REQUEST_LINK_STATS_CMDID,
+ WMI_TLV_START_LINK_STATS_CMDID,
+ WMI_TLV_CLEAR_LINK_STATS_CMDID,
+ WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
+ WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL),
+ WMI_TLV_APFIND_CMDID,
+ WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL),
+ WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL),
+ WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER),
+ WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_TLV_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA),
+ WMI_TLV_PEER_TID_DELBA_CMDID,
+ WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+ WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_TLV_STA_KEEPALIVE_CMDID,
+ WMI_TLV_BA_REQ_SSN_CMDID,
+ WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC),
+ WMI_TLV_PDEV_UTF_CMDID,
+ WMI_TLV_DBGLOG_CFG_CMDID,
+ WMI_TLV_PDEV_QVIT_CMDID,
+ WMI_TLV_PDEV_FTM_INTG_CMDID,
+ WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_TLV_FORCE_FW_HANG_CMDID,
+ WMI_TLV_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_TLV_THERMAL_MGMT_CMDID,
+ WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO),
+ WMI_TLV_GPIO_OUTPUT_CMDID,
+ WMI_TLV_TXBF_CMDID,
+ WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_FWTEST),
+ WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_TLV_UNIT_TEST_CMDID,
+ WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS),
+ WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR),
+ WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS),
+ WMI_TLV_STA_SMPS_PARAM_CMDID,
+ WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB),
+ WMI_TLV_HB_SET_TCP_PARAMS_CMDID,
+ WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_TLV_HB_SET_UDP_PARAMS_CMDID,
+ WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC),
+ WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_TLV_RMC_CONFIG_CMDID,
+ WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL),
+ WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_TLV_BATCH_SCAN_ENABLE_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN),
+ WMI_TLV_BATCH_SCAN_DISABLE_CMDID,
+ WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM),
+ WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN),
+ WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX),
+ WMI_TLV_CHAN_AVOID_UPDATE_CMDID,
+ WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL),
+ WMI_TLV_OBSS_SCAN_DISABLE_CMDID,
+ WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI),
+ WMI_TLV_LPI_START_SCAN_CMDID,
+ WMI_TLV_LPI_STOP_SCAN_CMDID,
+ WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN),
+ WMI_TLV_EXTSCAN_STOP_CMDID,
+ WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL),
+ WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA),
+ WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL),
+ WMI_TLV_MDNS_SET_FQDN_CMDID,
+ WMI_TLV_MDNS_SET_RESPONSE_CMDID,
+ WMI_TLV_MDNS_GET_STATS_CMDID,
+ WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL),
+};
+
+enum wmi_tlv_event_id {
+ WMI_TLV_SERVICE_READY_EVENTID = 0x1,
+ WMI_TLV_READY_EVENTID,
+ WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
+ WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
+ WMI_TLV_CHAN_INFO_EVENTID,
+ WMI_TLV_PHYERR_EVENTID,
+ WMI_TLV_PDEV_DUMP_EVENTID,
+ WMI_TLV_TX_PAUSE_EVENTID,
+ WMI_TLV_DFS_RADAR_EVENTID,
+ WMI_TLV_PDEV_L1SS_TRACK_EVENTID,
+ WMI_TLV_PDEV_TEMPERATURE_EVENTID,
+ WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV),
+ WMI_TLV_VDEV_STOPPED_EVENTID,
+ WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
+ WMI_TLV_PEER_INFO_EVENTID,
+ WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_TLV_PEER_STATE_EVENTID,
+ WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
+ WMI_TLV_HOST_SWBA_EVENTID,
+ WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
+ WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_TLV_BA_RSP_SSN_EVENTID,
+ WMI_TLV_AGGR_STATE_TRIG_EVENTID,
+ WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM),
+ WMI_TLV_PROFILE_MATCH,
+ WMI_TLV_ROAM_SYNCH_EVENTID,
+ WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P),
+ WMI_TLV_P2P_NOA_EVENTID,
+ WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND),
+ WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW),
+ WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT),
+ WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_TLV_RTT_ERROR_REPORT_EVENTID,
+ WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS),
+ WMI_TLV_IFACE_LINK_STATS_EVENTID,
+ WMI_TLV_PEER_LINK_STATS_EVENTID,
+ WMI_TLV_RADIO_LINK_STATS_EVENTID,
+ WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
+ WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_TLV_APFIND_EVENTID,
+ WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL),
+ WMI_TLV_GTK_REKEY_FAIL_EVENTID,
+ WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL),
+ WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER),
+ WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC),
+ WMI_TLV_PDEV_UTF_EVENTID,
+ WMI_TLV_DEBUG_MESG_EVENTID,
+ WMI_TLV_UPDATE_STATS_EVENTID,
+ WMI_TLV_DEBUG_PRINT_EVENTID,
+ WMI_TLV_DCS_INTERFERENCE_EVENTID,
+ WMI_TLV_PDEV_QVIT_EVENTID,
+ WMI_TLV_WLAN_PROFILE_DATA_EVENTID,
+ WMI_TLV_PDEV_FTM_INTG_EVENTID,
+ WMI_TLV_WLAN_FREQ_AVOID_EVENTID,
+ WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_TLV_THERMAL_MGMT_EVENTID,
+ WMI_TLV_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_TLV_DIAG_EVENTID,
+ WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO),
+ WMI_TLV_UPLOADH_EVENTID,
+ WMI_TLV_CAPTUREH_EVENTID,
+ WMI_TLV_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS),
+ WMI_TLV_BATCH_SCAN_ENABLED_EVENTID =
+ WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN),
+ WMI_TLV_BATCH_SCAN_RESULT_EVENTID,
+ WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM),
+ WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_TLV_OEM_ERROR_REPORT_EVENTID,
+ WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN),
+ WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI),
+ WMI_TLV_LPI_STATUS_EVENTID,
+ WMI_TLV_LPI_HANDOFF_EVENTID,
+ WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN),
+ WMI_TLV_EXTSCAN_OPERATION_EVENTID,
+ WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL),
+ WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL),
+ WMI_TLV_SAP_OFL_DEL_STA_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+ WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+ WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+ WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_PMF_QOS,
+ WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_DCS,
+ WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+ WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+ WMI_TLV_PDEV_PARAM_PROXY_STA,
+ WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_TLV_PDEV_PARAM_BURST_DUR,
+ WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+ WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_TLV_PDEV_PARAM_L1SS_TRACK,
+ WMI_TLV_PDEV_PARAM_HYST_EN,
+ WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_TLV_PDEV_PARAM_LED_SYS_STATE,
+ WMI_TLV_PDEV_PARAM_LED_ENABLE,
+ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+ WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_TLV_VDEV_PARAM_SLOT_TIME,
+ WMI_TLV_VDEV_PARAM_PREAMBLE,
+ WMI_TLV_VDEV_PARAM_SWBA_TIME,
+ WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+ WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_TLV_VDEV_PARAM_WDS,
+ WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+ WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+ WMI_TLV_VDEV_PARAM_CHWIDTH,
+ WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+ WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_TLV_VDEV_PARAM_MGMT_RATE,
+ WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+ WMI_TLV_VDEV_PARAM_FIXED_RATE,
+ WMI_TLV_VDEV_PARAM_SGI,
+ WMI_TLV_VDEV_PARAM_LDPC,
+ WMI_TLV_VDEV_PARAM_TX_STBC,
+ WMI_TLV_VDEV_PARAM_RX_STBC,
+ WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_TLV_VDEV_PARAM_DEF_KEYID,
+ WMI_TLV_VDEV_PARAM_NSS,
+ WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+ WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+ WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_TLV_VDEV_PARAM_TXBF,
+ WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+ WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_TLV_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_TLV_VDEV_PARAM_ENABLE_RMC,
+ WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_TLV_VDEV_PARAM_MAX_RATE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_TLV_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_TLV_VDEV_PARAM_DTIM_POLICY,
+ WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+};
+
+enum wmi_tlv_tag {
+ WMI_TLV_TAG_LAST_RESERVED = 15,
+
+ WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+ WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+ WMI_TLV_TAG_ARRAY_BYTE,
+ WMI_TLV_TAG_ARRAY_STRUCT,
+ WMI_TLV_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TLV_TAG_LAST_ARRAY_ENUM = 31,
+
+ WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT,
+ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ,
+ WMI_TLV_TAG_STRUCT_READY_EVENT,
+ WMI_TLV_TAG_STRUCT_SCAN_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR,
+ WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT,
+ WMI_TLV_TAG_STRUCT_MGMT_RX_HDR,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_EVENT,
+ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO,
+ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER,
+ WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT,
+ WMI_TLV_TAG_STRUCT_ECHO_EVENT,
+ WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT,
+ WMI_TLV_TAG_STRUCT_CSA_EVENT,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_IGTK_INFO,
+ WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT,
+ WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT,
+ WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T,
+ WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT,
+ WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT,
+ WMI_TLV_TAG_STRUCT_TIM_INFO,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_INFO,
+ WMI_TLV_TAG_STRUCT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT,
+ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC,
+ WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT,
+ WMI_TLV_TAG_STRUCT_INIT_CMD,
+ WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG,
+ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TLV_TAG_STRUCT_START_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD,
+ WMI_TLV_TAG_STRUCT_CHANNEL,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_WMM_PARAMS,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR,
+ WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_UP_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TLV_TAG_STRUCT_VHT_RATE_SET,
+ WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD,
+ WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD,
+ WMI_TLV_TAG_STRUCT_BCN_PRB_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD,
+ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD,
+ WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE,
+ WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE,
+ WMI_TLV_TAG_STRUCT_FTM_INTG_CMD,
+ WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD,
+ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD,
+ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY,
+ WMI_TLV_TAG_STRUCT_RTT_TSF_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS,
+ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_ECHO_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD,
+ WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_BCN_TX_HDR,
+ WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_HDR,
+ WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD,
+ WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD,
+ WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE,
+ WMI_TLV_TAG_STRUCT_AP_PROFILE,
+ WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TLV_TAG_STRUCT_TXBF_CMD,
+ WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_EVENT,
+ WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR,
+ WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST,
+ WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD,
+ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD,
+ WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT,
+ WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_HB_IND_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT,
+ WMI_TLV_TAG_STRUCT_RFKILL_EVENT,
+ WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT,
+ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD,
+ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT,
+ WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD,
+ WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHANNEL_STATS,
+ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_RATE_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_WMM_AC_STATS,
+ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE,
+ WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO,
+ WMI_TLV_TAG_STRUCT_RIC_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT,
+ WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_RIC_TSPEC,
+ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG,
+ WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TLV_TAG_STRUCT_KEY_MATERIAL,
+ WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
+ WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+
+ WMI_TLV_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_TLV_SERVICE_STA_PWRSAVE,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_TLV_SERVICE_AP_UAPSD,
+ WMI_TLV_SERVICE_AP_DFS,
+ WMI_TLV_SERVICE_11AC,
+ WMI_TLV_SERVICE_BLOCKACK,
+ WMI_TLV_SERVICE_PHYERR,
+ WMI_TLV_SERVICE_BCN_FILTER,
+ WMI_TLV_SERVICE_RTT,
+ WMI_TLV_SERVICE_WOW,
+ WMI_TLV_SERVICE_RATECTRL_CACHE,
+ WMI_TLV_SERVICE_IRAM_TIDS,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+ WMI_TLV_SERVICE_NLO,
+ WMI_TLV_SERVICE_GTK_OFFLOAD,
+ WMI_TLV_SERVICE_SCAN_SCH,
+ WMI_TLV_SERVICE_CSA_OFFLOAD,
+ WMI_TLV_SERVICE_CHATTER,
+ WMI_TLV_SERVICE_COEX_FREQAVOID,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+ WMI_TLV_SERVICE_FORCE_FW_HANG,
+ WMI_TLV_SERVICE_GPIO,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+ WMI_TLV_SERVICE_TX_ENCAP,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_TLV_SERVICE_EARLY_RX,
+ WMI_TLV_SERVICE_STA_SMPS,
+ WMI_TLV_SERVICE_FWTEST,
+ WMI_TLV_SERVICE_STA_WMMAC,
+ WMI_TLV_SERVICE_TDLS,
+ WMI_TLV_SERVICE_BURST,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_TLV_SERVICE_WLAN_HB,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_TLV_SERVICE_BATCH_SCAN,
+ WMI_TLV_SERVICE_QPOWER,
+ WMI_TLV_SERVICE_PLMREQ,
+ WMI_TLV_SERVICE_THERMAL_MGMT,
+ WMI_TLV_SERVICE_RMC,
+ WMI_TLV_SERVICE_MHF_OFFLOAD,
+ WMI_TLV_SERVICE_COEX_SAR,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_TLV_SERVICE_NAN,
+ WMI_TLV_SERVICE_L1SS_STAT,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_TLV_SERVICE_OBSS_SCAN,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE,
+ WMI_TLV_SERVICE_LPASS,
+ WMI_TLV_SERVICE_EXTSCAN,
+ WMI_TLV_SERVICE_D0WOW,
+ WMI_TLV_SERVICE_HSOFFLOAD,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_TLV_SERVICE_RX_FULL_REORDER,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+};
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+ BIT((svc_id)%(sizeof(u32))))
+
+#define SVCMAP(x, y, len) \
+ do { \
+ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
+ __set_bit(y, out); \
+ } while (0)
+
+static inline void
+wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
+{
+ SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_TLV_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_TLV_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_TLV_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_TLV_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_TLV_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_NLO,
+ WMI_SERVICE_NLO, len);
+ SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_TLV_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_TLV_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+ SVCMAP(WMI_TLV_SERVICE_EARLY_RX,
+ WMI_SERVICE_EARLY_RX, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_SMPS,
+ WMI_SERVICE_STA_SMPS, len);
+ SVCMAP(WMI_TLV_SERVICE_FWTEST,
+ WMI_SERVICE_FWTEST, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_WMMAC,
+ WMI_SERVICE_STA_WMMAC, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS,
+ WMI_SERVICE_TDLS, len);
+ SVCMAP(WMI_TLV_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len);
+ SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS,
+ WMI_SERVICE_ADAPTIVE_OCS, len);
+ SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+ WMI_SERVICE_BA_SSN_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len);
+ SVCMAP(WMI_TLV_SERVICE_WLAN_HB,
+ WMI_SERVICE_WLAN_HB, len);
+ SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN,
+ WMI_SERVICE_BATCH_SCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_QPOWER,
+ WMI_SERVICE_QPOWER, len);
+ SVCMAP(WMI_TLV_SERVICE_PLMREQ,
+ WMI_SERVICE_PLMREQ, len);
+ SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT,
+ WMI_SERVICE_THERMAL_MGMT, len);
+ SVCMAP(WMI_TLV_SERVICE_RMC,
+ WMI_SERVICE_RMC, len);
+ SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD,
+ WMI_SERVICE_MHF_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_COEX_SAR,
+ WMI_SERVICE_COEX_SAR, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE, len);
+ SVCMAP(WMI_TLV_SERVICE_NAN,
+ WMI_SERVICE_NAN, len);
+ SVCMAP(WMI_TLV_SERVICE_L1SS_STAT,
+ WMI_SERVICE_L1SS_STAT, len);
+ SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_SERVICE_ESTIMATE_LINKSPEED, len);
+ SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN,
+ WMI_SERVICE_OBSS_SCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_OFFCHAN, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
+ SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE,
+ WMI_SERVICE_IBSS_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_LPASS,
+ WMI_SERVICE_LPASS, len);
+ SVCMAP(WMI_TLV_SERVICE_EXTSCAN,
+ WMI_SERVICE_EXTSCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_D0WOW,
+ WMI_SERVICE_D0WOW, len);
+ SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD,
+ WMI_SERVICE_HSOFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_SERVICE_ROAM_HO_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER,
+ WMI_SERVICE_RX_FULL_REORDER, len);
+ SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD,
+ WMI_SERVICE_DHCP_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD,
+ WMI_SERVICE_MDNS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+}
+
+#undef SVCMAP
+
+struct wmi_tlv {
+ __le16 len;
+ __le16 tag;
+ u8 value[0];
+} __packed;
+
+#define WMI_TLV_MGMT_RX_NUM_RSSI 4
+
+struct wmi_tlv_mgmt_rx_ev {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+ __le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI];
+} __packed;
+
+struct wmi_tlv_abi_version {
+ __le32 abi_ver0;
+ __le32 abi_ver1;
+ __le32 abi_ver_ns0;
+ __le32 abi_ver_ns1;
+ __le32 abi_ver_ns2;
+ __le32 abi_ver_ns3;
+} __packed;
+
+enum wmi_tlv_hw_bd_id {
+ WMI_TLV_HW_BD_LEGACY = 0,
+ WMI_TLV_HW_BD_QCA6174 = 1,
+ WMI_TLV_HW_BD_QCA2582 = 2,
+};
+
+struct wmi_tlv_hw_bd_info {
+ u8 rev;
+ u8 project_id;
+ u8 custom_id;
+ u8 reference_design_id;
+} __packed;
+
+struct wmi_tlv_svc_rdy_ev {
+ __le32 fw_build_vers;
+ struct wmi_tlv_abi_version abi;
+ __le32 phy_capability;
+ __le32 max_frag_entry;
+ __le32 num_rf_chains;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 vht_supp_mcs;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable;
+ __le32 max_bcn_ie_size;
+ __le32 num_mem_reqs;
+ __le32 max_num_scan_chans;
+ __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
+ struct wmi_tlv_hw_bd_info hw_bd_info[5];
+} __packed;
+
+struct wmi_tlv_rdy_ev {
+ struct wmi_tlv_abi_version abi;
+ struct wmi_mac_addr mac_addr;
+ __le32 status;
+} __packed;
+
+struct wmi_tlv_resource_config {
+ __le32 num_vdevs;
+ __le32 num_peers;
+ __le32 num_offload_peers;
+ __le32 num_offload_reorder_bufs;
+ __le32 num_peer_keys;
+ __le32 num_tids;
+ __le32 ast_skid_limit;
+ __le32 tx_chain_mask;
+ __le32 rx_chain_mask;
+ __le32 rx_timeout_pri[4];
+ __le32 rx_decap_mode;
+ __le32 scan_max_pending_reqs;
+ __le32 bmiss_offload_max_vdev;
+ __le32 roam_offload_max_vdev;
+ __le32 roam_offload_max_ap_profiles;
+ __le32 num_mcast_groups;
+ __le32 num_mcast_table_elems;
+ __le32 mcast2ucast_mode;
+ __le32 tx_dbg_log_size;
+ __le32 num_wds_entries;
+ __le32 dma_burst_size;
+ __le32 mac_aggr_delim;
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+ __le32 vow_config;
+ __le32 gtk_offload_max_vdev;
+ __le32 num_msdu_desc;
+ __le32 max_frag_entries;
+ __le32 num_tdls_vdevs;
+ __le32 num_tdls_conn_table_entries;
+ __le32 beacon_tx_offload_max_vdev;
+ __le32 num_multicast_filter_entries;
+ __le32 num_wow_filters;
+ __le32 num_keep_alive_pattern;
+ __le32 keep_alive_pattern_size;
+ __le32 max_tdls_concurrent_sleep_sta;
+ __le32 max_tdls_concurrent_buffer_sta;
+} __packed;
+
+struct wmi_tlv_init_cmd {
+ struct wmi_tlv_abi_version abi;
+ __le32 num_host_mem_chunks;
+} __packed;
+
+struct wmi_tlv_pdev_set_param_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_tlv_pdev_set_rd_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 regd;
+ __le32 regd_2ghz;
+ __le32 regd_5ghz;
+ __le32 conform_limit_2ghz;
+ __le32 conform_limit_5ghz;
+} __packed;
+
+struct wmi_tlv_scan_chan_list_cmd {
+ __le32 num_scan_chans;
+} __packed;
+
+struct wmi_tlv_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ __le32 num_channels;
+ __le32 num_bssids;
+ __le32 num_ssids;
+ __le32 ie_len;
+ __le32 num_probes;
+} __packed;
+
+struct wmi_tlv_vdev_start_cmd {
+ __le32 vdev_id;
+ __le32 requestor_id;
+ __le32 bcn_intval;
+ __le32 dtim_period;
+ __le32 flags;
+ struct wmi_ssid ssid;
+ __le32 bcn_tx_rate;
+ __le32 bcn_tx_power;
+ __le32 num_noa_descr;
+ __le32 disable_hw_ack;
+} __packed;
+
+enum {
+ WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */
+ WMI_TLV_PEER_TYPE_BSS = 1,
+ WMI_TLV_PEER_TYPE_TDLS = 2,
+ WMI_TLV_PEER_TYPE_HOST_MAX = 127,
+ WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128,
+};
+
+struct wmi_tlv_peer_create_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_addr;
+ __le32 peer_type;
+} __packed;
+
+struct wmi_tlv_peer_assoc_cmd {
+ struct wmi_mac_addr mac_addr;
+ __le32 vdev_id;
+ __le32 new_assoc;
+ __le32 assoc_id;
+ __le32 flags;
+ __le32 caps;
+ __le32 listen_intval;
+ __le32 ht_caps;
+ __le32 max_mpdu;
+ __le32 mpdu_density;
+ __le32 rate_caps;
+ __le32 nss;
+ __le32 vht_caps;
+ __le32 phy_mode;
+ __le32 ht_info[2];
+ __le32 num_legacy_rates;
+ __le32 num_ht_rates;
+} __packed;
+
+struct wmi_tlv_pdev_suspend {
+ __le32 pdev_id; /* not used yet */
+ __le32 opt;
+} __packed;
+
+struct wmi_tlv_pdev_set_wmm_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 dg_type; /* no idea.. */
+} __packed;
+
+struct wmi_tlv_vdev_set_wmm_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_phyerr_ev {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le32 buf_len;
+} __packed;
+
+enum wmi_tlv_dbglog_param {
+ WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1,
+ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE,
+ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE,
+ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP,
+};
+
+enum wmi_tlv_dbglog_log_level {
+ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2,
+ WMI_TLV_DBGLOG_LOG_LEVEL_WARN,
+ WMI_TLV_DBGLOG_LOG_LEVEL_ERR,
+};
+
+#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512
+#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \
+ sizeof(__le32))
+#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff
+#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \
+ (((module_id << 16) & 0xffff0000) | \
+ ((log_level << 0) & 0x000000ff))
+
+struct wmi_tlv_dbglog_cmd {
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct wmi_tlv_resume_cmd {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_req_stats_cmd {
+ __le32 stats_id; /* wmi_stats_id */
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_tlv_vdev_stats {
+ __le32 vdev_id;
+ __le32 beacon_snr;
+ __le32 data_snr;
+ __le32 num_tx_frames[4]; /* per-AC */
+ __le32 num_rx_frames;
+ __le32 num_tx_frames_retries[4];
+ __le32 num_tx_frames_failures[4];
+ __le32 num_rts_fail;
+ __le32 num_rts_success;
+ __le32 num_rx_err;
+ __le32 num_rx_discard;
+ __le32 num_tx_not_acked;
+ __le32 tx_rate_history[10];
+ __le32 beacon_rssi_history[10];
+} __packed;
+
+struct wmi_tlv_pktlog_enable {
+ __le32 reserved;
+ __le32 filter;
+} __packed;
+
+struct wmi_tlv_pktlog_disable {
+ __le32 reserved;
+} __packed;
+
+enum wmi_tlv_bcn_tx_status {
+ WMI_TLV_BCN_TX_STATUS_OK,
+ WMI_TLV_BCN_TX_STATUS_XRETRY,
+ WMI_TLV_BCN_TX_STATUS_DROP,
+ WMI_TLV_BCN_TX_STATUS_FILTERED,
+};
+
+struct wmi_tlv_bcn_tx_status_ev {
+ __le32 vdev_id;
+ __le32 tx_status;
+} __packed;
+
+struct wmi_tlv_bcn_prb_info {
+ __le32 caps;
+ __le32 erp;
+ u8 ies[0];
+} __packed;
+
+struct wmi_tlv_bcn_tmpl_cmd {
+ __le32 vdev_id;
+ __le32 tim_ie_offset;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_prb_tmpl_cmd {
+ __le32 vdev_id;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_p2p_go_bcn_ie {
+ __le32 vdev_id;
+ __le32 ie_len;
+} __packed;
+
+enum wmi_tlv_diag_item_type {
+ WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
+ WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
+ WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
+};
+
+struct wmi_tlv_diag_item {
+ u8 type;
+ u8 reserved;
+ __le16 len;
+ __le32 timestamp;
+ __le32 code;
+ u8 payload[0];
+} __packed;
+
+struct wmi_tlv_diag_data_ev {
+ __le32 num_items;
+} __packed;
+
+struct wmi_tlv_sta_keepalive_cmd {
+ __le32 vdev_id;
+ __le32 enabled;
+ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 interval; /* in seconds */
+} __packed;
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index c0f3e4d09263..aeea1c793943 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -22,8 +22,10 @@
#include "htc.h"
#include "debug.h"
#include "wmi.h"
+#include "wmi-tlv.h"
#include "mac.h"
#include "testmode.h"
+#include "wmi-ops.h"
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
@@ -143,6 +145,7 @@ static struct wmi_cmd_map wmi_cmd_map = {
.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
};
/* 10.X WMI cmd track */
@@ -265,6 +268,129 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.2.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
+ .init_cmdid = WMI_10_2_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_2_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
};
/* MAIN WMI VDEV param map */
@@ -385,6 +511,64 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
};
+static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
static struct wmi_pdev_param_map wmi_pdev_param_map = {
.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
@@ -434,6 +618,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -486,6 +671,60 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+};
+
+static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
};
/* firmware 10.2 specific mappings */
@@ -607,11 +846,11 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
};
-static void
-ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
- const struct wmi_channel_arg *arg)
+void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg)
{
u32 flags = 0;
@@ -685,8 +924,8 @@ static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
-static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
- u32 cmd_id)
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct wmi_cmd_hdr *cmd_hdr;
@@ -717,23 +956,45 @@ err_pull:
static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *bcn;
int ret;
- lockdep_assert_held(&arvif->ar->data_lock);
+ spin_lock_bh(&ar->data_lock);
- if (arvif->beacon == NULL)
- return;
+ bcn = arvif->beacon;
- if (arvif->beacon_sent)
- return;
+ if (!bcn)
+ goto unlock;
- ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
- if (ret)
- return;
+ cb = ATH10K_SKB_CB(bcn);
+
+ switch (arvif->beacon_state) {
+ case ATH10K_BEACON_SENDING:
+ case ATH10K_BEACON_SENT:
+ break;
+ case ATH10K_BEACON_SCHEDULED:
+ arvif->beacon_state = ATH10K_BEACON_SENDING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
+ arvif->vdev_id,
+ bcn->data, bcn->len,
+ cb->paddr,
+ cb->bcn.dtim_zero,
+ cb->bcn.deliver_cab);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (ret == 0)
+ arvif->beacon_state = ATH10K_BEACON_SENT;
+ else
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+ }
- /* We need to retain the arvif->beacon reference for DMA unmapping and
- * freeing the skbuff later. */
- arvif->beacon_sent = true;
+unlock:
+ spin_unlock_bh(&ar->data_lock);
}
static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
@@ -746,12 +1007,10 @@ static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
{
- spin_lock_bh(&ar->data_lock);
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath10k_wmi_tx_beacons_iter,
NULL);
- spin_unlock_bh(&ar->data_lock);
}
static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
@@ -792,24 +1051,23 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
return ret;
}
-int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
+static struct sk_buff *
+ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
- int ret = 0;
struct wmi_mgmt_tx_cmd *cmd;
struct ieee80211_hdr *hdr;
- struct sk_buff *wmi_skb;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff *skb;
int len;
- u32 buf_len = skb->len;
+ u32 buf_len = msdu->len;
u16 fc;
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr *)msdu->data;
fc = le16_to_cpu(hdr->frame_control);
if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- len = sizeof(cmd->hdr) + skb->len;
+ len = sizeof(cmd->hdr) + msdu->len;
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
@@ -821,36 +1079,27 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
len = round_up(len, 4);
- wmi_skb = ath10k_wmi_alloc_skb(ar, len);
- if (!wmi_skb)
- return -ENOMEM;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
- cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
+ cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
- cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
+ cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
cmd->hdr.tx_rate = 0;
cmd->hdr.tx_power = 0;
cmd->hdr.buf_len = __cpu_to_le32(buf_len);
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
- memcpy(cmd->buf, skb->data, skb->len);
+ memcpy(cmd->buf, msdu->data, msdu->len);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
- wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
+ msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
trace_ath10k_tx_payload(ar, skb->data, skb->len);
- /* Send the management frame buffer to the target */
- ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
- if (ret)
- return ret;
-
- /* TODO: report tx status to mac80211 - temporary just ACK */
- info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status_irqsafe(ar->hw, skb);
-
- return ret;
+ return skb;
}
static void ath10k_wmi_event_scan_started(struct ath10k *ar)
@@ -977,22 +1226,48 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
}
}
-static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
{
- struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
+ struct wmi_scan_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->event_type = ev->event_type;
+ arg->reason = ev->reason;
+ arg->channel_freq = ev->channel_freq;
+ arg->scan_req_id = ev->scan_req_id;
+ arg->scan_id = ev->scan_id;
+ arg->vdev_id = ev->vdev_id;
+
+ return 0;
+}
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_scan_ev_arg arg = {};
enum wmi_scan_event_type event_type;
enum wmi_scan_completion_reason reason;
u32 freq;
u32 req_id;
u32 scan_id;
u32 vdev_id;
+ int ret;
+
+ ret = ath10k_wmi_pull_scan(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
+ return ret;
+ }
- event_type = __le32_to_cpu(event->event_type);
- reason = __le32_to_cpu(event->reason);
- freq = __le32_to_cpu(event->channel_freq);
- req_id = __le32_to_cpu(event->scan_req_id);
- scan_id = __le32_to_cpu(event->scan_id);
- vdev_id = __le32_to_cpu(event->vdev_id);
+ event_type = __le32_to_cpu(arg.event_type);
+ reason = __le32_to_cpu(arg.reason);
+ freq = __le32_to_cpu(arg.channel_freq);
+ req_id = __le32_to_cpu(arg.scan_req_id);
+ scan_id = __le32_to_cpu(arg.scan_id);
+ vdev_id = __le32_to_cpu(arg.vdev_id);
spin_lock_bh(&ar->data_lock);
@@ -1147,11 +1422,51 @@ static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
}
}
-static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
{
struct wmi_mgmt_rx_event_v1 *ev_v1;
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+ size_t pull_len;
+ u32 msdu_len;
+
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+ ev_hdr = &ev_v2->hdr.v1;
+ pull_len = sizeof(*ev_v2);
+ } else {
+ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+ ev_hdr = &ev_v1->hdr;
+ pull_len = sizeof(*ev_v1);
+ }
+
+ if (skb->len < pull_len)
+ return -EPROTO;
+
+ skb_pull(skb, pull_len);
+ arg->channel = ev_hdr->channel;
+ arg->buf_len = ev_hdr->buf_len;
+ arg->status = ev_hdr->status;
+ arg->snr = ev_hdr->snr;
+ arg->phy_mode = ev_hdr->phy_mode;
+ arg->rate = ev_hdr->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+ if (skb->len < msdu_len)
+ return -EPROTO;
+
+ /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
+ * trailer with credit update. Trim the excess garbage.
+ */
+ skb_trim(skb, msdu_len);
+
+ return 0;
+}
+
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_mgmt_rx_ev_arg arg = {};
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u32 rx_status;
@@ -1161,24 +1476,20 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rate;
u32 buf_len;
u16 fc;
- int pull_len;
+ int ret;
- if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
- ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
- ev_hdr = &ev_v2->hdr.v1;
- pull_len = sizeof(*ev_v2);
- } else {
- ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
- ev_hdr = &ev_v1->hdr;
- pull_len = sizeof(*ev_v1);
+ ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
+ return ret;
}
- channel = __le32_to_cpu(ev_hdr->channel);
- buf_len = __le32_to_cpu(ev_hdr->buf_len);
- rx_status = __le32_to_cpu(ev_hdr->status);
- snr = __le32_to_cpu(ev_hdr->snr);
- phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
- rate = __le32_to_cpu(ev_hdr->rate);
+ channel = __le32_to_cpu(arg.channel);
+ buf_len = __le32_to_cpu(arg.buf_len);
+ rx_status = __le32_to_cpu(arg.status);
+ snr = __le32_to_cpu(arg.snr);
+ phy_mode = __le32_to_cpu(arg.phy_mode);
+ rate = __le32_to_cpu(arg.rate);
memset(status, 0, sizeof(*status));
@@ -1232,8 +1543,6 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
- skb_pull(skb, pull_len);
-
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -1266,12 +1575,6 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
- /*
- * packets from HTC come aligned to 4byte boundaries
- * because they can originally come in along with a trailer
- */
- skb_trim(skb, buf_len);
-
ieee80211_rx(ar->hw, skb);
return 0;
}
@@ -1295,21 +1598,44 @@ exit:
return idx;
}
-static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
{
- struct wmi_chan_info_event *ev;
+ struct wmi_chan_info_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+
+ return 0;
+}
+
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_ch_info_ev_arg arg = {};
struct survey_info *survey;
u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
- int idx;
+ int idx, ret;
- ev = (struct wmi_chan_info_event *)skb->data;
+ ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
+ return;
+ }
- err_code = __le32_to_cpu(ev->err_code);
- freq = __le32_to_cpu(ev->freq);
- cmd_flags = __le32_to_cpu(ev->cmd_flags);
- noise_floor = __le32_to_cpu(ev->noise_floor);
- rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
- cycle_count = __le32_to_cpu(ev->cycle_count);
+ err_code = __le32_to_cpu(arg.err_code);
+ freq = __le32_to_cpu(arg.freq);
+ cmd_flags = __le32_to_cpu(arg.cmd_flags);
+ noise_floor = __le32_to_cpu(arg.noise_floor);
+ rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
+ cycle_count = __le32_to_cpu(arg.cycle_count);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
@@ -1344,11 +1670,11 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
rx_clear_count -= ar->survey_last_rx_clear_count;
survey = &ar->survey[idx];
- survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
- survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
+ survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
+ survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
survey->noise = noise_floor;
- survey->filled = SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_RX |
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_RX |
SURVEY_INFO_NOISE_DBM;
}
@@ -1359,12 +1685,12 @@ exit:
spin_unlock_bh(&ar->data_lock);
}
-static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
}
-static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
skb->len);
@@ -1374,12 +1700,9 @@ static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
- struct ath10k_fw_stats_pdev *dst)
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath10k_fw_stats_pdev *dst)
{
- const struct wal_dbg_tx_stats *tx = &src->wal.tx;
- const struct wal_dbg_rx_stats *rx = &src->wal.rx;
-
dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
@@ -1387,57 +1710,76 @@ static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
dst->cycle_count = __le32_to_cpu(src->cycle_count);
dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+}
- dst->comp_queued = __le32_to_cpu(tx->comp_queued);
- dst->comp_delivered = __le32_to_cpu(tx->comp_delivered);
- dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued);
- dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued);
- dst->wmm_drop = __le32_to_cpu(tx->wmm_drop);
- dst->local_enqued = __le32_to_cpu(tx->local_enqued);
- dst->local_freed = __le32_to_cpu(tx->local_freed);
- dst->hw_queued = __le32_to_cpu(tx->hw_queued);
- dst->hw_reaped = __le32_to_cpu(tx->hw_reaped);
- dst->underrun = __le32_to_cpu(tx->underrun);
- dst->tx_abort = __le32_to_cpu(tx->tx_abort);
- dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed);
- dst->tx_ko = __le32_to_cpu(tx->tx_ko);
- dst->data_rc = __le32_to_cpu(tx->data_rc);
- dst->self_triggers = __le32_to_cpu(tx->self_triggers);
- dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure);
- dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err);
- dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry);
- dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout);
- dst->pdev_resets = __le32_to_cpu(tx->pdev_resets);
- dst->phy_underrun = __le32_to_cpu(tx->phy_underrun);
- dst->txop_ovf = __le32_to_cpu(tx->txop_ovf);
-
- dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change);
- dst->status_rcvd = __le32_to_cpu(rx->status_rcvd);
- dst->r0_frags = __le32_to_cpu(rx->r0_frags);
- dst->r1_frags = __le32_to_cpu(rx->r1_frags);
- dst->r2_frags = __le32_to_cpu(rx->r2_frags);
- dst->r3_frags = __le32_to_cpu(rx->r3_frags);
- dst->htt_msdus = __le32_to_cpu(rx->htt_msdus);
- dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus);
- dst->loc_msdus = __le32_to_cpu(rx->loc_msdus);
- dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus);
- dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu);
- dst->phy_errs = __le32_to_cpu(rx->phy_errs);
- dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop);
- dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs);
-}
-
-static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
- struct ath10k_fw_stats_peer *dst)
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = __le32_to_cpu(src->comp_queued);
+ dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+ dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+ dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+ dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+ dst->local_enqued = __le32_to_cpu(src->local_enqued);
+ dst->local_freed = __le32_to_cpu(src->local_freed);
+ dst->hw_queued = __le32_to_cpu(src->hw_queued);
+ dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+ dst->underrun = __le32_to_cpu(src->underrun);
+ dst->tx_abort = __le32_to_cpu(src->tx_abort);
+ dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+ dst->tx_ko = __le32_to_cpu(src->tx_ko);
+ dst->data_rc = __le32_to_cpu(src->data_rc);
+ dst->self_triggers = __le32_to_cpu(src->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+}
+
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
+ dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
+ dst->r0_frags = __le32_to_cpu(src->r0_frags);
+ dst->r1_frags = __le32_to_cpu(src->r1_frags);
+ dst->r2_frags = __le32_to_cpu(src->r2_frags);
+ dst->r3_frags = __le32_to_cpu(src->r3_frags);
+ dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
+ dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
+ dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
+ dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
+ dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
+ dst->phy_errs = __le32_to_cpu(src->phy_errs);
+ dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
+ dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
+}
+
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
+ dst->rts_bad = __le32_to_cpu(src->rts_bad);
+ dst->rts_good = __le32_to_cpu(src->rts_good);
+ dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
+ dst->no_beacons = __le32_to_cpu(src->no_beacons);
+ dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
+}
+
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
{
ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
}
-static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
- struct sk_buff *skb,
- struct ath10k_fw_stats *stats)
+static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
{
const struct wmi_stats_event *ev = (void *)skb->data;
u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
@@ -1462,7 +1804,10 @@ static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
if (!dst)
continue;
- ath10k_wmi_pull_pdev_stats(src, dst);
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+
list_add_tail(&dst->list, &stats->pdevs);
}
@@ -1487,9 +1832,9 @@ static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
return 0;
}
-static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
- struct sk_buff *skb,
- struct ath10k_fw_stats *stats)
+static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
{
const struct wmi_stats_event *ev = (void *)skb->data;
u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
@@ -1514,14 +1859,10 @@ static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
if (!dst)
continue;
- ath10k_wmi_pull_pdev_stats(&src->old, dst);
-
- dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
- dst->rts_bad = __le32_to_cpu(src->rts_bad);
- dst->rts_good = __le32_to_cpu(src->rts_good);
- dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
- dst->no_beacons = __le32_to_cpu(src->no_beacons);
- dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
list_add_tail(&dst->list, &stats->pdevs);
}
@@ -1550,61 +1891,250 @@ static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
return 0;
}
-int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
- struct ath10k_fw_stats *stats)
+static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats);
- else
- return ath10k_wmi_main_pull_fw_stats(ar, skb, stats);
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_2_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_2_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_2_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_2_4_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->common.old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
}
-static void ath10k_wmi_event_update_stats(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
ath10k_debug_fw_stats_process(ar, skb);
}
-static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
- struct sk_buff *skb)
+static int
+ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ struct wmi_vdev_start_response_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->req_id = ev->req_id;
+ arg->resp_type = ev->resp_type;
+ arg->status = ev->status;
+
+ return 0;
+}
+
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
{
- struct wmi_vdev_start_response_event *ev;
+ struct wmi_vdev_start_ev_arg arg = {};
+ int ret;
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
- ev = (struct wmi_vdev_start_response_event *)skb->data;
+ ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
+ return;
+ }
- if (WARN_ON(__le32_to_cpu(ev->status)))
+ if (WARN_ON(__le32_to_cpu(arg.status)))
return;
complete(&ar->vdev_setup_done);
}
-static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
complete(&ar->vdev_setup_done);
}
-static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
- struct sk_buff *skb)
+static int
+ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
{
- struct wmi_peer_sta_kickout_event *ev;
+ struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ return 0;
+}
+
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_peer_kick_ev_arg arg = {};
struct ieee80211_sta *sta;
+ int ret;
- ev = (struct wmi_peer_sta_kickout_event *)skb->data;
+ ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
+ ret);
+ return;
+ }
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
- ev->peer_macaddr.addr);
+ arg.mac_addr);
rcu_read_lock();
- sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
if (!sta) {
ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
- ev->peer_macaddr.addr);
+ arg.mac_addr);
goto exit;
}
@@ -1641,7 +2171,7 @@ exit:
static void ath10k_wmi_update_tim(struct ath10k *ar,
struct ath10k_vif *arvif,
struct sk_buff *bcn,
- struct wmi_bcn_info *bcn_info)
+ const struct wmi_tim_info *tim_info)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
struct ieee80211_tim_ie *tim;
@@ -1652,14 +2182,14 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
/* if next SWBA has no tim_changed the tim_bitmap is garbage.
* we must copy the bitmap upon change and reuse it later */
- if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
+ if (__le32_to_cpu(tim_info->tim_changed)) {
int i;
BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
- sizeof(bcn_info->tim_info.tim_bitmap));
+ sizeof(tim_info->tim_bitmap));
for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
- t = bcn_info->tim_info.tim_bitmap[i / 4];
+ t = tim_info->tim_bitmap[i / 4];
v = __le32_to_cpu(t);
arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
}
@@ -1711,13 +2241,13 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
return;
}
- tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
+ tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
if (tim->dtim_count == 0) {
ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
- if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
+ if (__le32_to_cpu(tim_info->tim_mcast) == 1)
ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
}
@@ -1727,7 +2257,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
}
static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
- struct wmi_p2p_noa_info *noa)
+ const struct wmi_p2p_noa_info *noa)
{
struct ieee80211_p2p_noa_attr *noa_attr;
u8 ctwindow_oppps = noa->ctwindow_oppps;
@@ -1769,7 +2299,7 @@ static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
*noa_attr_len = __cpu_to_le16(attr_len);
}
-static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
+static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
{
u32 len = 0;
u8 noa_descriptors = noa->num_descriptors;
@@ -1789,9 +2319,8 @@ static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
struct sk_buff *bcn,
- struct wmi_bcn_info *bcn_info)
+ const struct wmi_p2p_noa_info *noa)
{
- struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
u8 *new_data, *old_data = arvif->u.ap.noa_data;
u32 new_len;
@@ -1832,22 +2361,59 @@ cleanup:
kfree(old_data);
}
-static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
{
- struct wmi_host_swba_event *ev;
+ struct wmi_host_swba_event *ev = (void *)skb->data;
+ u32 map;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ arg->tim_info[i] = &ev->bcn_info[i].tim_info;
+ arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
+ i++;
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_swba_ev_arg arg = {};
u32 map;
int i = -1;
- struct wmi_bcn_info *bcn_info;
+ const struct wmi_tim_info *tim_info;
+ const struct wmi_p2p_noa_info *noa_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
dma_addr_t paddr;
int ret, vdev_id = 0;
- ev = (struct wmi_host_swba_event *)skb->data;
- map = __le32_to_cpu(ev->vdev_map);
+ ret = ath10k_wmi_pull_swba(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
+ return;
+ }
+
+ map = __le32_to_cpu(arg.vdev_map);
ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
- ev->vdev_map);
+ map);
for (; map; map >>= 1, vdev_id++) {
if (!(map & 0x1))
@@ -1860,19 +2426,20 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
break;
}
- bcn_info = &ev->bcn_info[i];
+ tim_info = arg.tim_info[i];
+ noa_info = arg.noa_info[i];
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
i,
- __le32_to_cpu(bcn_info->tim_info.tim_len),
- __le32_to_cpu(bcn_info->tim_info.tim_mcast),
- __le32_to_cpu(bcn_info->tim_info.tim_changed),
- __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
+ __le32_to_cpu(tim_info->tim_len),
+ __le32_to_cpu(tim_info->tim_mcast),
+ __le32_to_cpu(tim_info->tim_changed),
+ __le32_to_cpu(tim_info->tim_num_ps_pending),
+ __le32_to_cpu(tim_info->tim_bitmap[3]),
+ __le32_to_cpu(tim_info->tim_bitmap[2]),
+ __le32_to_cpu(tim_info->tim_bitmap[1]),
+ __le32_to_cpu(tim_info->tim_bitmap[0]));
arvif = ath10k_get_arvif(ar, vdev_id);
if (arvif == NULL) {
@@ -1899,15 +2466,25 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
}
ath10k_tx_h_seq_no(arvif->vif, bcn);
- ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
- ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
+ ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
+ ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
- if (!arvif->beacon_sent)
- ath10k_warn(ar, "SWBA overrun on vdev %d\n",
+ switch (arvif->beacon_state) {
+ case ATH10K_BEACON_SENT:
+ break;
+ case ATH10K_BEACON_SCHEDULED:
+ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
+ arvif->vdev_id);
+ break;
+ case ATH10K_BEACON_SENDING:
+ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
arvif->vdev_id);
+ dev_kfree_skb(bcn);
+ goto skip;
+ }
ath10k_mac_vif_beacon_free(arvif);
}
@@ -1935,19 +2512,19 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
}
arvif->beacon = bcn;
- arvif->beacon_sent = false;
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
- ath10k_wmi_tx_beacon_nowait(arvif);
skip:
spin_unlock_bh(&ar->data_lock);
}
+
+ ath10k_wmi_tx_beacons_nowait(ar);
}
-static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
}
@@ -2068,9 +2645,9 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
return 0;
}
-static void ath10k_wmi_event_dfs(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
- u64 tsf)
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
const struct phyerr_tlv *tlv;
@@ -2133,10 +2710,9 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
}
}
-static void
-ath10k_wmi_event_spectral_scan(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
- u64 tsf)
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
struct phyerr_tlv *tlv;
@@ -2188,37 +2764,53 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
}
}
-static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_ev_arg *arg)
{
- const struct wmi_phyerr_event *ev;
+ struct wmi_phyerr_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ arg->num_phyerrs = ev->num_phyerrs;
+ arg->tsf_l32 = ev->tsf_l32;
+ arg->tsf_u32 = ev->tsf_u32;
+ arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
+ arg->phyerrs = ev->phyerrs;
+
+ return 0;
+}
+
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_phyerr_ev_arg arg = {};
const struct wmi_phyerr *phyerr;
u32 count, i, buf_len, phy_err_code;
u64 tsf;
- int left_len = skb->len;
+ int left_len, ret;
ATH10K_DFS_STAT_INC(ar, phy_errors);
- /* Check if combined event available */
- if (left_len < sizeof(*ev)) {
- ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
+ ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
return;
}
- left_len -= sizeof(*ev);
+ left_len = __le32_to_cpu(arg.buf_len);
/* Check number of included events */
- ev = (const struct wmi_phyerr_event *)skb->data;
- count = __le32_to_cpu(ev->num_phyerrs);
+ count = __le32_to_cpu(arg.num_phyerrs);
- tsf = __le32_to_cpu(ev->tsf_u32);
+ tsf = __le32_to_cpu(arg.tsf_u32);
tsf <<= 32;
- tsf |= __le32_to_cpu(ev->tsf_l32);
+ tsf |= __le32_to_cpu(arg.tsf_l32);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event phyerr count %d tsf64 0x%llX\n",
count, tsf);
- phyerr = ev->phyerrs;
+ phyerr = arg.phyerrs;
for (i = 0; i < count; i++) {
/* Check if we can read event header */
if (left_len < sizeof(*phyerr)) {
@@ -2258,19 +2850,17 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
}
}
-static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
}
-static void ath10k_wmi_event_profile_match(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
}
-static void ath10k_wmi_event_debug_print(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
{
char buf[101], c;
int i;
@@ -2303,103 +2893,90 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
}
-static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
}
-static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
}
-static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
}
-static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
}
-static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
}
-static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
}
-static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
}
-static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
}
-static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
}
-static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
}
-static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
}
-static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
}
-static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
}
-static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
}
-static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
}
-static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
}
-static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
}
@@ -2435,8 +3012,9 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
return 0;
}
-static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb,
- struct wmi_svc_rdy_ev_arg *arg)
+static int
+ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
{
struct wmi_service_ready_event *ev;
size_t i, n;
@@ -2471,8 +3049,9 @@ static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb,
return 0;
}
-static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb,
- struct wmi_svc_rdy_ev_arg *arg)
+static int
+ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
{
struct wmi_10x_service_ready_event *ev;
int i, n;
@@ -2506,30 +3085,22 @@ static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb,
return 0;
}
-static void ath10k_wmi_event_service_ready(struct ath10k *ar,
- struct sk_buff *skb)
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_svc_rdy_ev_arg arg = {};
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
int ret;
- memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
-
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg);
- wmi_10x_svc_map(arg.service_map, ar->wmi.svc_map,
- arg.service_map_len);
- } else {
- ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg);
- wmi_main_svc_map(arg.service_map, ar->wmi.svc_map,
- arg.service_map_len);
- }
-
+ ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
return;
}
+ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+ ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
+ arg.service_map_len);
+
ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
@@ -2607,13 +3178,14 @@ static void ath10k_wmi_event_service_ready(struct ath10k *ar,
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
__le32_to_cpu(arg.min_tx_power),
__le32_to_cpu(arg.max_tx_power),
__le32_to_cpu(arg.ht_cap),
__le32_to_cpu(arg.vht_cap),
__le32_to_cpu(arg.sw_ver0),
__le32_to_cpu(arg.sw_ver1),
+ __le32_to_cpu(arg.fw_build),
__le32_to_cpu(arg.phy_capab),
__le32_to_cpu(arg.num_rf_chains),
__le32_to_cpu(arg.eeprom_rd),
@@ -2622,27 +3194,59 @@ static void ath10k_wmi_event_service_ready(struct ath10k *ar,
complete(&ar->wmi.service_ready);
}
-static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
{
- struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
+ struct wmi_ready_event *ev = (void *)skb->data;
- if (WARN_ON(skb->len < sizeof(*ev)))
- return -EINVAL;
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
- ether_addr_copy(ar->mac_addr, ev->mac_addr.addr);
+ skb_pull(skb, sizeof(*ev));
+ arg->sw_version = ev->sw_version;
+ arg->abi_version = ev->abi_version;
+ arg->status = ev->status;
+ arg->mac_addr = ev->mac_addr.addr;
+
+ return 0;
+}
+
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_rdy_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
+ return ret;
+ }
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
- __le32_to_cpu(ev->sw_version),
- __le32_to_cpu(ev->abi_version),
- ev->mac_addr.addr,
- __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
+ "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ __le32_to_cpu(arg.sw_version),
+ __le32_to_cpu(arg.abi_version),
+ arg.mac_addr,
+ __le32_to_cpu(arg.status));
+ ether_addr_copy(ar->mac_addr, arg.mac_addr);
complete(&ar->wmi.unified_ready);
return 0;
}
-static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
+{
+ const struct wmi_pdev_temperature_event *ev;
+
+ ev = (struct wmi_pdev_temperature_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
+ return 0;
+}
+
+static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_event_id id;
@@ -2758,7 +3362,7 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
-static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10x_event_id id;
@@ -2882,7 +3486,7 @@ out:
dev_kfree_skb(skb);
}
-static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_2_event_id id;
@@ -2981,6 +3585,9 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_2_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
+ case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_event_temperature(ar, skb);
+ break;
case WMI_10_2_RTT_KEEPALIVE_EVENTID:
case WMI_10_2_GPIO_INPUT_EVENTID:
case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
@@ -3001,14 +3608,11 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- ath10k_wmi_10_2_process_rx(ar, skb);
- else
- ath10k_wmi_10x_process_rx(ar, skb);
- } else {
- ath10k_wmi_main_process_rx(ar, skb);
- }
+ int ret;
+
+ ret = ath10k_wmi_rx(ar, skb);
+ if (ret)
+ ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
}
int ath10k_wmi_connect(struct ath10k *ar)
@@ -3039,16 +3643,17 @@ int ath10k_wmi_connect(struct ath10k *ar)
return 0;
}
-static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
- u16 rd2g, u16 rd5g, u16 ctl2g,
- u16 ctl5g)
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
{
struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
cmd->reg_domain = __cpu_to_le32(rd);
@@ -3060,22 +3665,20 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g);
-
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->pdev_set_regdomain_cmdid);
+ return skb;
}
-static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
- u16 rd2g, u16 rd5g,
- u16 ctl2g, u16 ctl5g,
- enum wmi_dfs_region dfs_reg)
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
+ rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
{
struct wmi_pdev_set_regdomain_cmd_10x *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
cmd->reg_domain = __cpu_to_le32(rd);
@@ -3088,50 +3691,39 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
-
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->pdev_set_regdomain_cmdid);
-}
-
-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
- u16 rd5g, u16 ctl2g, u16 ctl5g,
- enum wmi_dfs_region dfs_reg)
-{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
- ctl2g, ctl5g, dfs_reg);
- else
- return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
- ctl2g, ctl5g);
+ return skb;
}
-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
{
struct wmi_pdev_suspend_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
cmd->suspend_opt = __cpu_to_le32(suspend_opt);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
+ return skb;
}
-int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
{
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, 0);
- if (skb == NULL)
- return -ENOMEM;
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
+ return skb;
}
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
{
struct wmi_pdev_set_param_cmd *cmd;
struct sk_buff *skb;
@@ -3139,12 +3731,12 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
ath10k_warn(ar, "pdev param %d not supported by firmware\n",
id);
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
cmd->param_id = __cpu_to_le32(id);
@@ -3152,11 +3744,11 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
id, value);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
+ return skb;
}
-static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
- struct wmi_host_mem_chunks *chunks)
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks)
{
struct host_memory_chunk *chunk;
int i;
@@ -3177,7 +3769,7 @@ static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
}
}
-static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
+static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
{
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
@@ -3240,7 +3832,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
buf = ath10k_wmi_alloc_skb(ar, len);
if (!buf)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_init_cmd *)buf->data;
@@ -3248,10 +3840,10 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
- return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
+ return buf;
}
-static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
+static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
{
struct wmi_init_cmd_10x *cmd;
struct sk_buff *buf;
@@ -3306,7 +3898,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
buf = ath10k_wmi_alloc_skb(ar, len);
if (!buf)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_init_cmd_10x *)buf->data;
@@ -3314,15 +3906,15 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
- return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
+ return buf;
}
-static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
+static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
{
struct wmi_init_cmd_10_2 *cmd;
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
- u32 len, val;
+ u32 len, val, features;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
@@ -3356,7 +3948,7 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
- config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
@@ -3372,34 +3964,21 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
buf = ath10k_wmi_alloc_skb(ar, len);
if (!buf)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
+ features = WMI_10_2_RX_BATCH_MODE;
+ cmd->resource_config.feature_mask = __cpu_to_le32(features);
+
memcpy(&cmd->resource_config.common, &config, sizeof(config));
ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
- return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
+ return buf;
}
-int ath10k_wmi_cmd_init(struct ath10k *ar)
-{
- int ret;
-
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- ret = ath10k_wmi_10_2_cmd_init(ar);
- else
- ret = ath10k_wmi_10x_cmd_init(ar);
- } else {
- ret = ath10k_wmi_main_cmd_init(ar);
- }
-
- return ret;
-}
-
-static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
{
if (arg->ie_len && !arg->ie)
return -EINVAL;
@@ -3450,9 +4029,8 @@ ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
return len;
}
-static void
-ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
- const struct wmi_start_scan_arg *arg)
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg)
{
u32 scan_id;
u32 scan_req_id;
@@ -3546,46 +4124,60 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
}
}
-int ath10k_wmi_start_scan(struct ath10k *ar,
- const struct wmi_start_scan_arg *arg)
+static struct sk_buff *
+ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
{
+ struct wmi_start_scan_cmd *cmd;
struct sk_buff *skb;
size_t len;
int ret;
ret = ath10k_wmi_start_scan_verify(arg);
if (ret)
- return ret;
-
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- len = sizeof(struct wmi_10x_start_scan_cmd) +
- ath10k_wmi_start_scan_tlvs_len(arg);
- else
- len = sizeof(struct wmi_start_scan_cmd) +
- ath10k_wmi_start_scan_tlvs_len(arg);
+ return ERR_PTR(ret);
+ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
- return -ENOMEM;
-
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- struct wmi_10x_start_scan_cmd *cmd;
+ return ERR_PTR(-ENOMEM);
- cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
- ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
- } else {
- struct wmi_start_scan_cmd *cmd;
+ cmd = (struct wmi_start_scan_cmd *)skb->data;
- cmd = (struct wmi_start_scan_cmd *)skb->data;
- cmd->burst_duration_ms = __cpu_to_le32(0);
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
- ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
- }
+ cmd->burst_duration_ms = __cpu_to_le32(0);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_10x_start_scan_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
+ return skb;
}
void ath10k_wmi_start_scan_init(struct ath10k *ar,
@@ -3614,7 +4206,9 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
}
-int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+static struct sk_buff *
+ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg)
{
struct wmi_stop_scan_cmd *cmd;
struct sk_buff *skb;
@@ -3622,13 +4216,13 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
u32 req_id;
if (arg->req_id > 0xFFF)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
scan_id = arg->u.scan_id;
scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
@@ -3645,20 +4239,21 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
arg->req_id, arg->req_type, arg->u.scan_id);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
+ return skb;
}
-int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
- enum wmi_vdev_type type,
- enum wmi_vdev_subtype subtype,
- const u8 macaddr[ETH_ALEN])
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
{
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_create_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3669,58 +4264,52 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
vdev_id, type, subtype, macaddr);
-
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
+ return skb;
}
-int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
{
struct wmi_vdev_delete_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_delete_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"WMI vdev delete id %d\n", vdev_id);
-
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
+ return skb;
}
-static int
-ath10k_wmi_vdev_start_restart(struct ath10k *ar,
- const struct wmi_vdev_start_request_arg *arg,
- u32 cmd_id)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart)
{
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
- if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
- cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
- return -EINVAL;
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
- cmdname = "start";
- else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
+ if (restart)
cmdname = "restart";
else
- return -EINVAL; /* should not happen, we already check cmd_id */
+ cmdname = "start";
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
if (arg->hidden_ssid)
flags |= WMI_VDEV_START_HIDDEN_SSID;
@@ -3749,50 +4338,36 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
flags, arg->channel.freq, arg->channel.mode,
cmd->chan.flags, arg->channel.max_power);
- return ath10k_wmi_cmd_send(ar, skb, cmd_id);
-}
-
-int ath10k_wmi_vdev_start(struct ath10k *ar,
- const struct wmi_vdev_start_request_arg *arg)
-{
- u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
-
- return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
-}
-
-int ath10k_wmi_vdev_restart(struct ath10k *ar,
- const struct wmi_vdev_start_request_arg *arg)
-{
- u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
-
- return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
+ return skb;
}
-int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
{
struct wmi_vdev_stop_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_stop_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
-
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
+ return skb;
}
-int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid)
{
struct wmi_vdev_up_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_up_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3802,30 +4377,30 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
vdev_id, aid, bssid);
-
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
+ return skb;
}
-int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
{
struct wmi_vdev_down_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_down_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mgmt vdev down id 0x%x\n", vdev_id);
-
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
+ return skb;
}
-int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
- u32 param_id, u32 param_value)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
{
struct wmi_vdev_set_param_cmd *cmd;
struct sk_buff *skb;
@@ -3834,12 +4409,12 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"vdev param %d not supported by firmware\n",
param_id);
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3849,24 +4424,24 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev id 0x%x set param %d value %d\n",
vdev_id, param_id, param_value);
-
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
+ return skb;
}
-int ath10k_wmi_vdev_install_key(struct ath10k *ar,
- const struct wmi_vdev_install_key_arg *arg)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
{
struct wmi_vdev_install_key_cmd *cmd;
struct sk_buff *skb;
if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
@@ -3885,20 +4460,19 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev install key idx %d cipher %d len %d\n",
arg->key_idx, arg->key_cipher, arg->key_len);
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->vdev_install_key_cmdid);
+ return skb;
}
-int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
- const struct wmi_vdev_spectral_conf_arg *arg)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
{
struct wmi_vdev_spectral_conf_cmd *cmd;
struct sk_buff *skb;
- u32 cmdid;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
@@ -3921,39 +4495,38 @@ int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
- cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
- return ath10k_wmi_cmd_send(ar, skb, cmdid);
+ return skb;
}
-int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
- u32 enable)
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
{
struct wmi_vdev_spectral_enable_cmd *cmd;
struct sk_buff *skb;
- u32 cmdid;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->trigger_cmd = __cpu_to_le32(trigger);
cmd->enable_cmd = __cpu_to_le32(enable);
- cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
- return ath10k_wmi_cmd_send(ar, skb, cmdid);
+ return skb;
}
-int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
{
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_peer_create_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3962,18 +4535,19 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer create vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
+ return skb;
}
-int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
{
struct wmi_peer_delete_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_peer_delete_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3982,18 +4556,19 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer delete vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
+ return skb;
}
-int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
{
struct wmi_peer_flush_tids_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4003,19 +4578,21 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
vdev_id, peer_addr, tid_bitmap);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
+ return skb;
}
-int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
- const u8 *peer_addr, enum wmi_peer_param param_id,
- u32 param_value)
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value)
{
struct wmi_peer_set_param_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_peer_set_param_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4026,19 +4603,19 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %d peer 0x%pM set param %d value %d\n",
vdev_id, peer_addr, param_id, param_value);
-
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
+ return skb;
}
-int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
- enum wmi_sta_ps_mode psmode)
+static struct sk_buff *
+ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
{
struct wmi_sta_powersave_mode_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4047,21 +4624,20 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi set powersave id 0x%x mode %d\n",
vdev_id, psmode);
-
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->sta_powersave_mode_cmdid);
+ return skb;
}
-int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
- enum wmi_sta_powersave_param param_id,
- u32 value)
+static struct sk_buff *
+ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value)
{
struct wmi_sta_powersave_param_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4071,22 +4647,22 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi sta ps param vdev_id 0x%x param %d value %d\n",
vdev_id, param_id, value);
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->sta_powersave_param_cmdid);
+ return skb;
}
-int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
- enum wmi_ap_ps_peer_param param_id, u32 value)
+static struct sk_buff *
+ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
{
struct wmi_ap_ps_peer_cmd *cmd;
struct sk_buff *skb;
if (!mac)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4097,13 +4673,12 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
vdev_id, param_id, value, mac);
-
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->ap_ps_peer_param_cmdid);
+ return skb;
}
-int ath10k_wmi_scan_chan_list(struct ath10k *ar,
- const struct wmi_scan_chan_list_arg *arg)
+static struct sk_buff *
+ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
{
struct wmi_scan_chan_list_cmd *cmd;
struct sk_buff *skb;
@@ -4116,7 +4691,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
@@ -4128,7 +4703,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
ath10k_wmi_put_wmi_channel(ci, ch);
}
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
+ return skb;
}
static void
@@ -4209,12 +4784,9 @@ ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
cmd->info0 = __cpu_to_le32(info0);
}
-int ath10k_wmi_peer_assoc(struct ath10k *ar,
- const struct wmi_peer_assoc_complete_arg *arg)
+static int
+ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
{
- struct sk_buff *skb;
- int len;
-
if (arg->peer_mpdu_density > 16)
return -EINVAL;
if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
@@ -4222,79 +4794,135 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
return -EINVAL;
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
- else
- len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
- } else {
- len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
- }
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
- else
- ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
- } else {
- ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
- }
+ ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer assoc vdev %d addr %pM (%s)\n",
arg->vdev_id, arg->addr,
arg->peer_reassoc ? "reassociate" : "new");
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
+ return skb;
}
/* This function assumes the beacon is already DMA mapped */
-int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
+static struct sk_buff *
+ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
+ size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
{
struct wmi_bcn_tx_ref_cmd *cmd;
struct sk_buff *skb;
- struct sk_buff *beacon = arvif->beacon;
- struct ath10k *ar = arvif->ar;
struct ieee80211_hdr *hdr;
- int ret;
u16 fc;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- hdr = (struct ieee80211_hdr *)beacon->data;
+ hdr = (struct ieee80211_hdr *)bcn;
fc = le16_to_cpu(hdr->frame_control);
cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
- cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
- cmd->data_len = __cpu_to_le32(beacon->len);
- cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->data_len = __cpu_to_le32(bcn_len);
+ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
cmd->msdu_id = 0;
cmd->frame_control = __cpu_to_le32(fc);
cmd->flags = 0;
cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
- if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
+ if (dtim_zero)
cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
- if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
+ if (deliver_cab)
cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
- ret = ath10k_wmi_cmd_send_nowait(ar, skb,
- ar->wmi.cmd->pdev_send_bcn_cmdid);
-
- if (ret)
- dev_kfree_skb(skb);
-
- return ret;
+ return skb;
}
-static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
- const struct wmi_wmm_params_arg *arg)
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg)
{
params->cwmin = __cpu_to_le32(arg->cwmin);
params->cwmax = __cpu_to_le32(arg->cwmax);
@@ -4304,52 +4932,54 @@ static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
params->no_ack = __cpu_to_le32(arg->no_ack);
}
-int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
- const struct wmi_pdev_set_wmm_params_arg *arg)
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
{
struct wmi_pdev_set_wmm_params *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
+ ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
+ ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
+ ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
+ ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->pdev_set_wmm_params_cmdid);
+ return skb;
}
-int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+static struct sk_buff *
+ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
{
struct wmi_request_stats_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_request_stats_cmd *)skb->data;
cmd->stats_id = __cpu_to_le32(stats_id);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
+ return skb;
}
-int ath10k_wmi_force_fw_hang(struct ath10k *ar,
- enum wmi_force_fw_hang_type type, u32 delay_ms)
+static struct sk_buff *
+ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
{
struct wmi_force_fw_hang_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
cmd->type = __cpu_to_le32(type);
@@ -4357,10 +4987,12 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
type, delay_ms);
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
+ return skb;
}
-int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
+static struct sk_buff *
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ u32 log_level)
{
struct wmi_dbglog_cfg_cmd *cmd;
struct sk_buff *skb;
@@ -4368,12 +5000,12 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
if (module_enable) {
- cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
+ cfg = SM(log_level,
ATH10K_DBGLOG_CFG_LOG_LVL);
} else {
/* set back defaults, all modules with WARN level */
@@ -4393,57 +5025,449 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
__le32_to_cpu(cmd->module_valid),
__le32_to_cpu(cmd->config_enable),
__le32_to_cpu(cmd->config_valid));
-
- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+ return skb;
}
-int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
{
struct wmi_pdev_pktlog_enable_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
ev_bitmap &= ATH10K_PKTLOG_ANY;
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi enable pktlog filter:%x\n", ev_bitmap);
cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->pdev_pktlog_enable_cmdid);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
+ ev_bitmap);
+ return skb;
}
-int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
{
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, 0);
if (!skb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
+ u32 duration, u32 next_offset,
+ u32 enabled)
+{
+ struct wmi_pdev_set_quiet_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
+ cmd->period = __cpu_to_le32(period);
+ cmd->duration = __cpu_to_le32(duration);
+ cmd->next_start = __cpu_to_le32(next_offset);
+ cmd->enabled = __cpu_to_le32(enabled);
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->pdev_pktlog_disable_cmdid);
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi quiet param: period %u duration %u enabled %d\n",
+ period, duration, enabled);
+ return skb;
}
-int ath10k_wmi_attach(struct ath10k *ar)
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac)
{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- ar->wmi.cmd = &wmi_10_2_cmd_map;
- else
- ar->wmi.cmd = &wmi_10x_cmd_map;
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->buffersize = __cpu_to_le32(buf_size);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->statuscode = __cpu_to_le32(status);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->initiator = __cpu_to_le32(initiator);
+ cmd->reasoncode = __cpu_to_le32(reason);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+ return skb;
+}
+
+static const struct wmi_ops wmi_ops = {
+ .rx = ath10k_wmi_op_rx,
+ .map_svc = wmi_main_svc_map,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_op_gen_init,
+ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ /* .gen_pdev_get_temperature not implemented */
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+};
+
+static const struct wmi_ops wmi_10_1_ops = {
+ .rx = ath10k_wmi_10_1_op_rx,
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_1_op_gen_init,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
+ /* .gen_pdev_get_temperature not implemented */
+
+ /* shared with main branch */
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+};
+
+static const struct wmi_ops wmi_10_2_ops = {
+ .rx = ath10k_wmi_10_2_op_rx,
+ .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_2_op_gen_init,
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ /* .gen_pdev_get_temperature not implemented */
+
+ /* shared with 10.1 */
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+};
+static const struct wmi_ops wmi_10_2_4_ops = {
+ .rx = ath10k_wmi_10_2_op_rx,
+ .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_2_op_gen_init,
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+
+ /* shared with 10.1 */
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+};
+
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->wmi.cmd = &wmi_10_2_4_cmd_map;
+ ar->wmi.ops = &wmi_10_2_4_ops;
+ ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ ar->wmi.cmd = &wmi_10_2_cmd_map;
+ ar->wmi.ops = &wmi_10_2_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
- } else {
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+ ar->wmi.ops = &wmi_10_1_ops;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.ops = &wmi_ops;
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ath10k_wmi_tlv_attach(ar);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ ath10k_err(ar, "unsupported WMI op version: %d\n",
+ ar->wmi.op_version);
+ return -EINVAL;
}
init_completion(&ar->wmi.service_ready);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 21391929d318..20ce3603e64b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -109,6 +109,45 @@ enum wmi_service {
WMI_SERVICE_BURST,
WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_EARLY_RX,
+ WMI_SERVICE_STA_SMPS,
+ WMI_SERVICE_FWTEST,
+ WMI_SERVICE_STA_WMMAC,
+ WMI_SERVICE_TDLS,
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_SERVICE_ADAPTIVE_OCS,
+ WMI_SERVICE_BA_SSN_SUPPORT,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_SERVICE_WLAN_HB,
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_SERVICE_BATCH_SCAN,
+ WMI_SERVICE_QPOWER,
+ WMI_SERVICE_PLMREQ,
+ WMI_SERVICE_THERMAL_MGMT,
+ WMI_SERVICE_RMC,
+ WMI_SERVICE_MHF_OFFLOAD,
+ WMI_SERVICE_COEX_SAR,
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_SERVICE_NAN,
+ WMI_SERVICE_L1SS_STAT,
+ WMI_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_SERVICE_OBSS_SCAN,
+ WMI_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_IBSS_PWRSAVE,
+ WMI_SERVICE_LPASS,
+ WMI_SERVICE_EXTSCAN,
+ WMI_SERVICE_D0WOW,
+ WMI_SERVICE_HSOFFLOAD,
+ WMI_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_SERVICE_RX_FULL_REORDER,
+ WMI_SERVICE_DHCP_OFFLOAD,
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_SERVICE_MDNS_OFFLOAD,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD,
/* keep last */
WMI_SERVICE_MAX,
@@ -215,6 +254,45 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_BURST);
SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
+ SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD);
+ SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC);
+ SVCSTR(WMI_SERVICE_EARLY_RX);
+ SVCSTR(WMI_SERVICE_STA_SMPS);
+ SVCSTR(WMI_SERVICE_FWTEST);
+ SVCSTR(WMI_SERVICE_STA_WMMAC);
+ SVCSTR(WMI_SERVICE_TDLS);
+ SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE);
+ SVCSTR(WMI_SERVICE_ADAPTIVE_OCS);
+ SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT);
+ SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE);
+ SVCSTR(WMI_SERVICE_WLAN_HB);
+ SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT);
+ SVCSTR(WMI_SERVICE_BATCH_SCAN);
+ SVCSTR(WMI_SERVICE_QPOWER);
+ SVCSTR(WMI_SERVICE_PLMREQ);
+ SVCSTR(WMI_SERVICE_THERMAL_MGMT);
+ SVCSTR(WMI_SERVICE_RMC);
+ SVCSTR(WMI_SERVICE_MHF_OFFLOAD);
+ SVCSTR(WMI_SERVICE_COEX_SAR);
+ SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE);
+ SVCSTR(WMI_SERVICE_NAN);
+ SVCSTR(WMI_SERVICE_L1SS_STAT);
+ SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED);
+ SVCSTR(WMI_SERVICE_OBSS_SCAN);
+ SVCSTR(WMI_SERVICE_TDLS_OFFCHAN);
+ SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA);
+ SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA);
+ SVCSTR(WMI_SERVICE_IBSS_PWRSAVE);
+ SVCSTR(WMI_SERVICE_LPASS);
+ SVCSTR(WMI_SERVICE_EXTSCAN);
+ SVCSTR(WMI_SERVICE_D0WOW);
+ SVCSTR(WMI_SERVICE_HSOFFLOAD);
+ SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD);
+ SVCSTR(WMI_SERVICE_RX_FULL_REORDER);
+ SVCSTR(WMI_SERVICE_DHCP_OFFLOAD);
+ SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
+ SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
default:
return NULL;
}
@@ -472,6 +550,8 @@ struct wmi_cmd_map {
u32 force_fw_hang_cmdid;
u32 gpio_config_cmdid;
u32 gpio_output_cmdid;
+ u32 pdev_get_temperature_cmdid;
+ u32 vdev_set_wmm_params_cmdid;
};
/*
@@ -1076,6 +1156,11 @@ enum wmi_10_2_cmd_id {
WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ WMI_10_2_PDEV_GET_INFO,
+ WMI_10_2_VDEV_GET_INFO,
+ WMI_10_2_VDEV_ATF_REQUEST_CMDID,
+ WMI_10_2_PEER_ATF_REQUEST_CMDID,
+ WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
};
@@ -1117,6 +1202,8 @@ enum wmi_10_2_event_id {
WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
WMI_10_2_WDS_PEER_EVENTID,
+ WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_10_2_PDEV_TEMPERATURE_EVENTID,
WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
};
@@ -1862,6 +1949,11 @@ struct wmi_resource_config_10x {
__le32 max_frag_entries;
} __packed;
+enum wmi_10_2_feature_mask {
+ WMI_10_2_RX_BATCH_MODE = BIT(0),
+ WMI_10_2_ATF_CONFIG = BIT(1),
+};
+
struct wmi_resource_config_10_2 {
struct wmi_resource_config_10x common;
__le32 max_peer_ext_stats;
@@ -1870,7 +1962,7 @@ struct wmi_resource_config_10_2 {
__le32 be_min_free;
__le32 vi_min_free;
__le32 vo_min_free;
- __le32 rx_batchmode; /* 0-disable, 1-enable */
+ __le32 feature_mask;
} __packed;
#define NUM_UNITS_IS_NUM_VDEVS 0x1
@@ -2505,6 +2597,7 @@ struct wmi_pdev_param_map {
u32 fast_channel_reset;
u32 burst_dur;
u32 burst_enable;
+ u32 cal_period;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -2715,6 +2808,9 @@ enum wmi_10x_pdev_param {
WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10X_PDEV_PARAM_CAL_PERIOD
};
struct wmi_pdev_set_param_cmd {
@@ -2722,6 +2818,9 @@ struct wmi_pdev_set_param_cmd {
__le32 param_value;
} __packed;
+/* valid period is 1 ~ 60000ms, unit in millisecond */
+#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
+
struct wmi_pdev_get_tpc_config_cmd {
/* parameter */
__le32 param;
@@ -2841,14 +2940,14 @@ struct wmi_wmm_params_arg {
u32 no_ack;
};
-struct wmi_pdev_set_wmm_params_arg {
+struct wmi_wmm_params_all_arg {
struct wmi_wmm_params_arg ac_be;
struct wmi_wmm_params_arg ac_bk;
struct wmi_wmm_params_arg ac_vi;
struct wmi_wmm_params_arg ac_vo;
};
-struct wal_dbg_tx_stats {
+struct wmi_pdev_stats_tx {
/* Num HTT cookies queued to dispatch list */
__le32 comp_queued;
@@ -2918,7 +3017,7 @@ struct wal_dbg_tx_stats {
__le32 txop_ovf;
} __packed;
-struct wal_dbg_rx_stats {
+struct wmi_pdev_stats_rx {
/* Cnts any change in ring routing mid-ppdu */
__le32 mid_ppdu_route_change;
@@ -2952,17 +3051,11 @@ struct wal_dbg_rx_stats {
__le32 mpdu_errs;
} __packed;
-struct wal_dbg_peer_stats {
+struct wmi_pdev_stats_peer {
/* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
__le32 dummy;
} __packed;
-struct wal_dbg_stats {
- struct wal_dbg_tx_stats tx;
- struct wal_dbg_rx_stats rx;
- struct wal_dbg_peer_stats peer;
-} __packed;
-
enum wmi_stats_id {
WMI_REQUEST_PEER_STAT = 0x01,
WMI_REQUEST_AP_STAT = 0x02
@@ -3029,23 +3122,38 @@ struct wmi_stats_event {
u8 data[0];
} __packed;
+struct wmi_10_2_stats_event {
+ __le32 stats_id; /* %WMI_REQUEST_ */
+ __le32 num_pdev_stats;
+ __le32 num_pdev_ext_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ u8 data[0];
+} __packed;
+
/*
* PDEV statistics
* TODO: add all PDEV stats here
*/
+struct wmi_pdev_stats_base {
+ __le32 chan_nf;
+ __le32 tx_frame_count;
+ __le32 rx_frame_count;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 phy_err_count;
+ __le32 chan_tx_pwr;
+} __packed;
+
struct wmi_pdev_stats {
- __le32 chan_nf; /* Channel noise floor */
- __le32 tx_frame_count; /* TX frame count */
- __le32 rx_frame_count; /* RX frame count */
- __le32 rx_clear_count; /* rx clear count */
- __le32 cycle_count; /* cycle count */
- __le32 phy_err_count; /* Phy error count */
- __le32 chan_tx_pwr; /* channel tx power */
- struct wal_dbg_stats wal; /* WAL dbg stats */
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ struct wmi_pdev_stats_peer peer;
} __packed;
-struct wmi_10x_pdev_stats {
- struct wmi_pdev_stats old;
+struct wmi_pdev_stats_extra {
__le32 ack_rx_bad;
__le32 rts_bad;
__le32 rts_good;
@@ -3054,6 +3162,30 @@ struct wmi_10x_pdev_stats {
__le32 mib_int_count;
} __packed;
+struct wmi_10x_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ struct wmi_pdev_stats_peer peer;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+struct wmi_pdev_stats_mem {
+ __le32 dram_free;
+ __le32 iram_free;
+} __packed;
+
+struct wmi_10_2_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ __le32 mc_drop;
+ struct wmi_pdev_stats_rx rx;
+ __le32 pdev_rx_timeout;
+ struct wmi_pdev_stats_mem mem;
+ struct wmi_pdev_stats_peer peer;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
/*
* VDEV statistics
* TODO: add all VDEV stats here
@@ -3077,6 +3209,32 @@ struct wmi_10x_peer_stats {
__le32 peer_rx_rate;
} __packed;
+struct wmi_10_2_peer_stats {
+ struct wmi_peer_stats old;
+ __le32 peer_rx_rate;
+ __le32 current_per;
+ __le32 retries;
+ __le32 tx_rate_count;
+ __le32 max_4ms_frame_len;
+ __le32 total_sub_frames;
+ __le32 tx_bytes;
+ __le32 num_pkt_loss_overflow[4];
+ __le32 num_pkt_loss_excess_retry[4];
+} __packed;
+
+struct wmi_10_2_4_peer_stats {
+ struct wmi_10_2_peer_stats common;
+ __le32 unknown_value; /* FIXME: what is this word? */
+} __packed;
+
+struct wmi_10_2_pdev_ext_stats {
+ __le32 rx_rssi_comb;
+ __le32 rx_rssi[4];
+ __le32 rx_mcs[10];
+ __le32 tx_mcs[10];
+ __le32 ack_rssi;
+} __packed;
+
struct wmi_vdev_create_cmd {
__le32 vdev_id;
__le32 vdev_type;
@@ -3930,6 +4088,13 @@ enum wmi_sta_ps_param_pspoll_count {
* Values greater than 0 indicate the maximum numer of PS-Poll frames
* FW will send before waking up.
*/
+
+ /* When u-APSD is enabled the firmware will be very reluctant to exit
+ * STA PS. This could result in very poor Rx performance with STA doing
+ * PS-Poll for each and every buffered frame. This value is a bit
+ * arbitrary.
+ */
+ WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3,
};
/*
@@ -3955,6 +4120,30 @@ enum wmi_sta_ps_param_uapsd {
WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
};
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ __le32 wmm_ac;
+ __le32 user_priority;
+ __le32 service_interval;
+ __le32 suspend_interval;
+ __le32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
enum wmi_sta_powersave_param {
/*
* Controls how frames are retrievd from AP while STA is sleeping
@@ -4120,7 +4309,7 @@ struct wmi_bcn_info {
struct wmi_host_swba_event {
__le32 vdev_map;
- struct wmi_bcn_info bcn_info[1];
+ struct wmi_bcn_info bcn_info[0];
} __packed;
#define WMI_MAX_AP_VDEV 16
@@ -4325,7 +4514,7 @@ struct wmi_peer_set_q_empty_callback_cmd {
#define WMI_PEER_SPATIAL_MUX 0x00200000
#define WMI_PEER_VHT 0x02000000
#define WMI_PEER_80MHZ 0x04000000
-#define WMI_PEER_PMF 0x08000000
+#define WMI_PEER_VHT_2G 0x08000000
/*
* Peer rate capabilities.
@@ -4476,6 +4665,11 @@ enum wmi_sta_keepalive_method {
WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
};
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
+/* Firmware crashes if keepalive interval exceeds this limit */
+#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
+
/* note: ip4 addresses are in network byte order, i.e. big endian */
struct wmi_sta_keepalive_arp_resp {
__be32 src_ip4_addr;
@@ -4491,6 +4685,16 @@ struct wmi_sta_keepalive_cmd {
struct wmi_sta_keepalive_arp_resp arp_resp;
} __packed;
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ __be32 src_ip4_addr;
+ __be32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
enum wmi_force_fw_hang_type {
WMI_FORCE_FW_HANG_ASSERT = 1,
WMI_FORCE_FW_HANG_NO_DETECT,
@@ -4567,6 +4771,58 @@ struct wmi_dbglog_cfg_cmd {
#define WMI_MAX_MEM_REQS 16
+struct wmi_scan_ev_arg {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+};
+
+struct wmi_mgmt_rx_ev_arg {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status; /* %WMI_RX_STATUS_ */
+};
+
+struct wmi_ch_info_ev_arg {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+};
+
+struct wmi_vdev_start_ev_arg {
+ __le32 vdev_id;
+ __le32 req_id;
+ __le32 resp_type; /* %WMI_VDEV_RESP_ */
+ __le32 status;
+};
+
+struct wmi_peer_kick_ev_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_swba_ev_arg {
+ __le32 vdev_map;
+ const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
+ const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
+};
+
+struct wmi_phyerr_ev_arg {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le32 buf_len;
+ const struct wmi_phyerr *phyerrs;
+};
+
struct wmi_svc_rdy_ev_arg {
__le32 min_tx_power;
__le32 max_tx_power;
@@ -4574,6 +4830,7 @@ struct wmi_svc_rdy_ev_arg {
__le32 vht_cap;
__le32 sw_ver0;
__le32 sw_ver1;
+ __le32 fw_build;
__le32 phy_capab;
__le32 num_rf_chains;
__le32 eeprom_rd;
@@ -4583,83 +4840,99 @@ struct wmi_svc_rdy_ev_arg {
const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
};
+struct wmi_rdy_ev_arg {
+ __le32 sw_version;
+ __le32 abi_version;
+ __le32 status;
+ const u8 *mac_addr;
+};
+
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celcius degree */
+ __le32 temperature;
+} __packed;
+
struct ath10k;
struct ath10k_vif;
-struct ath10k_fw_stats;
+struct ath10k_fw_stats_pdev;
+struct ath10k_fw_stats_peer;
int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_connect(struct ath10k *ar);
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
-
-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
-int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
- u16 rd5g, u16 ctl2g, u16 ctl5g,
- enum wmi_dfs_region dfs_reg);
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
-int ath10k_wmi_cmd_init(struct ath10k *ar);
-int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id);
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
-int ath10k_wmi_stop_scan(struct ath10k *ar,
- const struct wmi_stop_scan_arg *arg);
-int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
- enum wmi_vdev_type type,
- enum wmi_vdev_subtype subtype,
- const u8 macaddr[ETH_ALEN]);
-int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id);
-int ath10k_wmi_vdev_start(struct ath10k *ar,
- const struct wmi_vdev_start_request_arg *);
-int ath10k_wmi_vdev_restart(struct ath10k *ar,
- const struct wmi_vdev_start_request_arg *);
-int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id);
-int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
- const u8 *bssid);
-int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
-int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
- u32 param_id, u32 param_value);
-int ath10k_wmi_vdev_install_key(struct ath10k *ar,
- const struct wmi_vdev_install_key_arg *arg);
-int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
- const struct wmi_vdev_spectral_conf_arg *arg);
-int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
- u32 enable);
-int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN]);
-int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN]);
-int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN], u32 tid_bitmap);
-int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
- const u8 *peer_addr,
- enum wmi_peer_param param_id, u32 param_value);
-int ath10k_wmi_peer_assoc(struct ath10k *ar,
- const struct wmi_peer_assoc_complete_arg *arg);
-int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
- enum wmi_sta_ps_mode psmode);
-int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
- enum wmi_sta_powersave_param param_id,
- u32 value);
-int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
- enum wmi_ap_ps_peer_param param_id, u32 value);
-int ath10k_wmi_scan_chan_list(struct ath10k *ar,
- const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif);
-int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
- const struct wmi_pdev_set_wmm_params_arg *arg);
-int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
-int ath10k_wmi_force_fw_hang(struct ath10k *ar,
- enum wmi_force_fw_hang_type type, u32 delay_ms);
-int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
-int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
-int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
- struct ath10k_fw_stats *stats);
-int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_list);
-int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar);
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst);
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks);
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg);
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg);
+void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg);
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr, u64 tsf);
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ u64 tsf);
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8f387cf67340..2ca88b593e4c 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -227,7 +227,6 @@ static struct platform_driver ath_ahb_driver = {
.remove = ath_ahb_remove,
.driver = {
.name = "ar231x-wmac",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 19eab2a69ad5..3b4a6463d87a 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -672,10 +672,10 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
spin_lock_bh(&common->cc_lock);
ath_hw_cycle_counters_update(common);
if (cc->cycles > 0) {
- ah->survey.channel_time += cc->cycles / div;
- ah->survey.channel_time_busy += cc->rx_busy / div;
- ah->survey.channel_time_rx += cc->rx_frame / div;
- ah->survey.channel_time_tx += cc->tx_frame / div;
+ ah->survey.time += cc->cycles / div;
+ ah->survey.time_busy += cc->rx_busy / div;
+ ah->survey.time_rx += cc->rx_frame / div;
+ ah->survey.time_tx += cc->tx_frame / div;
}
memset(cc, 0, sizeof(*cc));
spin_unlock_bh(&common->cc_lock);
@@ -686,10 +686,10 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
survey->noise = ah->ah_noise_floor;
survey->filled = SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_IN_USE |
- SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_BUSY |
- SURVEY_INFO_CHANNEL_TIME_RX |
- SURVEY_INFO_CHANNEL_TIME_TX;
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index c60d36aa13e2..bf29da5e90da 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -912,6 +912,7 @@ ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_PWR_SV : 0);
+ /* fall through */
case NL80211_IFTYPE_MONITOR:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index a3399c4f13a9..b9b651ea9851 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = ioread32(reg);
iowrite32(regval | val, reg);
regval = ioread32(reg);
- usleep_range(100, 150);
+ udelay(100); /* NB: should be atomic */
/* Bring BB/MAC out of reset */
iowrite32(regval & ~val, reg);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7a5337877a0c..85da63a67faf 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1799,20 +1799,20 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->target_stats.rx_byte) {
sinfo->rx_bytes = vif->target_stats.rx_byte;
- sinfo->filled |= STATION_INFO_RX_BYTES64;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
sinfo->rx_packets = vif->target_stats.rx_pkt;
- sinfo->filled |= STATION_INFO_RX_PACKETS;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
}
if (vif->target_stats.tx_byte) {
sinfo->tx_bytes = vif->target_stats.tx_byte;
- sinfo->filled |= STATION_INFO_TX_BYTES64;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
sinfo->tx_packets = vif->target_stats.tx_pkt;
- sinfo->filled |= STATION_INFO_TX_PACKETS;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
}
sinfo->signal = vif->target_stats.cs_rssi;
- sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
rate = vif->target_stats.tx_ucast_rate;
@@ -1827,6 +1827,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
}
sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ sinfo->txrate.bw = RATE_INFO_BW_20;
} else if (is_rate_ht40(rate, &mcs, &sgi)) {
if (sgi) {
sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
@@ -1835,7 +1836,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
sinfo->txrate.mcs = mcs;
}
- sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ sinfo->txrate.bw = RATE_INFO_BW_40;
sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
} else {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@@ -1844,12 +1845,12 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
if (test_bit(CONNECTED, &vif->flags) &&
test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
vif->nw_type == INFRA_NETWORK) {
- sinfo->filled |= STATION_INFO_BSS_PARAM;
+ sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 933aef025698..b42ba46b5030 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -488,7 +488,6 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
sinfo.assoc_req_ies = ies;
sinfo.assoc_req_ies_len = ies_len;
- sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index e000c4c27881..bd4a1a655f42 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -43,6 +43,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
.name = "qca953x_wmac",
.driver_data = AR9300_DEVID_AR953X,
},
+ {
+ .name = "qca956x_wmac",
+ .driver_data = AR9300_DEVID_QCA956X,
+ },
{},
};
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ba502a2d199b..ca01d17d130f 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -259,7 +259,8 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
entry_cck->fir_step_level);
/* Skip MRC CCK for pre AR9003 families */
- if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) ||
+ AR_SREV_9565(ah) || AR_SREV_9561(ah))
return;
if (aniState->mrcCCK != entry_cck->mrc_cck_on)
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 5829074208fa..f273427fdd29 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -22,6 +22,21 @@
/* All code below is for AR5008, AR9001, AR9002 */
+#define AR5008_OFDM_RATES 8
+#define AR5008_HT_SS_RATES 8
+#define AR5008_HT_DS_RATES 8
+
+#define AR5008_HT20_SHIFT 16
+#define AR5008_HT40_SHIFT 24
+
+#define AR5008_11NA_OFDM_SHIFT 0
+#define AR5008_11NA_HT_SS_SHIFT 8
+#define AR5008_11NA_HT_DS_SHIFT 16
+
+#define AR5008_11NG_OFDM_SHIFT 4
+#define AR5008_11NG_HT_SS_SHIFT 12
+#define AR5008_11NG_HT_DS_SHIFT 20
+
static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
@@ -1235,6 +1250,71 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
conf->radar_inband = 8;
}
+static void ar5008_hw_init_txpower_cck(struct ath_hw *ah, int16_t *rate_array)
+{
+#define CCK_DELTA(x) ((OLC_FOR_AR9280_20_LATER) ? max((x) - 2, 0) : (x))
+ ah->tx_power[0] = CCK_DELTA(rate_array[rate1l]);
+ ah->tx_power[1] = CCK_DELTA(min(rate_array[rate2l],
+ rate_array[rate2s]));
+ ah->tx_power[2] = CCK_DELTA(min(rate_array[rate5_5l],
+ rate_array[rate5_5s]));
+ ah->tx_power[3] = CCK_DELTA(min(rate_array[rate11l],
+ rate_array[rate11s]));
+#undef CCK_DELTA
+}
+
+static void ar5008_hw_init_txpower_ofdm(struct ath_hw *ah, int16_t *rate_array,
+ int offset)
+{
+ int i, idx = 0;
+
+ for (i = offset; i < offset + AR5008_OFDM_RATES; i++) {
+ ah->tx_power[i] = rate_array[idx];
+ idx++;
+ }
+}
+
+static void ar5008_hw_init_txpower_ht(struct ath_hw *ah, int16_t *rate_array,
+ int ss_offset, int ds_offset,
+ bool is_40, int ht40_delta)
+{
+ int i, mcs_idx = (is_40) ? AR5008_HT40_SHIFT : AR5008_HT20_SHIFT;
+
+ for (i = ss_offset; i < ss_offset + AR5008_HT_SS_RATES; i++) {
+ ah->tx_power[i] = rate_array[mcs_idx] + ht40_delta;
+ mcs_idx++;
+ }
+ memcpy(&ah->tx_power[ds_offset], &ah->tx_power[ss_offset],
+ AR5008_HT_SS_RATES);
+}
+
+void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
+ struct ath9k_channel *chan, int ht40_delta)
+{
+ if (IS_CHAN_5GHZ(chan)) {
+ ar5008_hw_init_txpower_ofdm(ah, rate_array,
+ AR5008_11NA_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar5008_hw_init_txpower_ht(ah, rate_array,
+ AR5008_11NA_HT_SS_SHIFT,
+ AR5008_11NA_HT_DS_SHIFT,
+ IS_CHAN_HT40(chan),
+ ht40_delta);
+ }
+ } else {
+ ar5008_hw_init_txpower_cck(ah, rate_array);
+ ar5008_hw_init_txpower_ofdm(ah, rate_array,
+ AR5008_11NG_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar5008_hw_init_txpower_ht(ah, rate_array,
+ AR5008_11NG_HT_SS_SHIFT,
+ AR5008_11NG_HT_DS_SHIFT,
+ IS_CHAN_HT40(chan),
+ ht40_delta);
+ }
+ }
+}
+
int ar5008_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 06ab71db6e80..174442beb952 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1203,24 +1203,41 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
{
int offset[8] = {0}, total = 0, test;
- int agc_out, i;
+ int agc_out, i, peak_detect_threshold;
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
+ peak_detect_threshold = 8;
+ else
+ peak_detect_threshold = 0;
+
+ /*
+ * Turn off LNA/SW.
+ */
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0);
- if (is_2g)
- REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
- AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
- else
- REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
- AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
+ if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9330_11(ah)) {
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
+ }
+
+ /*
+ * Turn off RXON.
+ */
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON_OVR, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON, 0x0);
+ /*
+ * Turn on AGC for cal.
+ */
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
@@ -1228,16 +1245,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
- if (AR_SREV_9330_11(ah)) {
+ if (AR_SREV_9330_11(ah))
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
- } else {
+
+ if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
if (is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
+ AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
+ peak_detect_threshold);
else
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+ AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
+ peak_detect_threshold);
}
for (i = 6; i > 0; i--) {
@@ -1266,10 +1286,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total);
+ /*
+ * Turn on LNA.
+ */
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0);
+ /*
+ * Turn off RXON.
+ */
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON_OVR, 0);
+ /*
+ * Turn off peak detect calibration.
+ */
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
}
@@ -1611,8 +1640,14 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
- if (AR_SREV_9330_11(ah))
- ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
+ if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ ar9003_hw_manual_peak_cal(ah, i,
+ IS_CHAN_2GHZ(chan));
+ }
+ }
/*
* For non-AR9550 chips, we just trigger AGC calibration
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 08225a0067c2..8b4561e8ce1a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3536,7 +3536,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
int bias = ar9003_modal_header(ah, is2ghz)->xpaBiasLvl;
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
- AR_SREV_9531(ah))
+ AR_SREV_9531(ah) || AR_SREV_9561(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3599,7 +3599,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9462_ALL, value);
- } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9550_ALL, value);
} else
@@ -3929,9 +3929,13 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
return;
- } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah) ||
+ AR_SREV_9561(ah)) {
reg_val = le32_to_cpu(pBase->swreg);
REG_WRITE(ah, AR_PHY_PMU1, reg_val);
+
+ if (AR_SREV_9561(ah))
+ REG_WRITE(ah, AR_PHY_PMU2, 0x10200000);
} else {
/* Internal regulator is ON. Write swreg register. */
reg_val = le32_to_cpu(pBase->swreg);
@@ -4034,7 +4038,8 @@ static void ar9003_hw_xpa_timing_control_apply(struct ath_hw *ah, bool is2ghz)
if (!AR_SREV_9300(ah) &&
!AR_SREV_9340(ah) &&
!AR_SREV_9580(ah) &&
- !AR_SREV_9531(ah))
+ !AR_SREV_9531(ah) &&
+ !AR_SREV_9561(ah))
return;
xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn;
@@ -4812,7 +4817,7 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
}
tempslope:
- if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4;
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 06ad2172030e..4335ccbe7d7e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -29,6 +29,7 @@
#include "ar9565_1p0_initvals.h"
#include "ar9565_1p1_initvals.h"
#include "ar953x_initvals.h"
+#include "ar956x_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -358,6 +359,40 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesFastClock,
qca953x_1p0_modes_fast_clock);
+ } else if (AR_SREV_9561(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ qca956x_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ qca956x_1p0_mac_postamble);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ qca956x_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ qca956x_1p0_baseband_postamble);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ qca956x_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ qca956x_1p0_radio_postamble);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ qca956x_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ qca956x_1p0_soc_postamble);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca956x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca956x_1p0_common_wo_xlna_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_no_xpa_tx_gain_table);
+
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ qca956x_1p0_baseband_postamble_dfs_channel);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ qca956x_1p0_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ qca956x_1p0_modes_fast_clock);
} else if (AR_SREV_9580(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -544,6 +579,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9531_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
qca953x_2p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9561(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_xpa_tx_gain_table);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_lowest_ob_db_tx_gain_table);
@@ -594,7 +632,10 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
qca953x_1p0_modes_no_xpa_tx_gain_table);
- } else if (AR_SREV_9462_21(ah))
+ } else if (AR_SREV_9561(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9462_21(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_2p1_modes_high_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
@@ -628,6 +669,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_low_ob_db_tx_gain_table);
+ else if (AR_SREV_9561(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_no_xpa_low_ob_db_tx_gain_table);
else if (AR_SREV_9565_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p1_modes_low_ob_db_tx_gain_table);
@@ -699,6 +743,9 @@ static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_type5_tx_gain_table);
+ else if (AR_SREV_9561(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_no_xpa_green_tx_gain_table);
else if (AR_SREV_9300_22(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9300Modes_type5_tx_gain_table_2p2);
@@ -770,6 +817,13 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
qca953x_1p0_common_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
qca953x_1p0_common_rx_gain_bounds);
+ } else if (AR_SREV_9561(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca956x_1p0_common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca956x_1p0_common_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ qca956x_1p0_xlna_only);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_rx_gain_table);
@@ -825,6 +879,11 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
qca953x_2p0_common_wo_xlna_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
qca953x_2p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9561(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca956x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca956x_1p0_common_wo_xlna_rx_gain_bounds);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_wo_xlna_rx_gain_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index ae6cde273414..1ad66b76749b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -183,7 +183,8 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
} else {
channelSel = CHANSEL_2G(freq) >> 1;
}
- } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah)) {
if (ah->is_clk_25mhz)
div = 75;
else
@@ -198,7 +199,8 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
/* Set to 2G mode */
bMode = 1;
} else {
- if ((AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) &&
+ if ((AR_SREV_9340(ah) || AR_SREV_9550(ah) ||
+ AR_SREV_9531(ah) || AR_SREV_9561(ah)) &&
ah->is_clk_25mhz) {
channelSel = freq / 75;
chan_frac = ((freq % 75) * 0x20000) / 75;
@@ -265,7 +267,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
*/
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
- AR_SREV_9550(ah)) {
+ AR_SREV_9550(ah) || AR_SREV_9561(ah)) {
if (spur_fbin_ptr[0] == 0) /* No spur */
return;
max_spur_cnts = 5;
@@ -292,7 +294,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
negative = 0;
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
- AR_SREV_9550(ah))
+ AR_SREV_9550(ah) || AR_SREV_9561(ah))
cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
IS_CHAN_2GHZ(chan));
else
@@ -641,8 +643,10 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
(REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
/* Enable 11n HT, 20 MHz */
- phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 |
- AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
+ phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
+
+ if (!AR_SREV_9561(ah))
+ phymode |= AR_PHY_GC_SINGLE_HT_LTF1;
/* Configure baseband for dynamic 20/40 operation */
if (IS_CHAN_HT40(chan)) {
@@ -745,7 +749,8 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
else
ah->enabled_cals &= ~TX_CL_CAL;
- if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
+ AR_SREV_9561(ah)) {
if (ah->is_clk_25mhz) {
REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
@@ -812,6 +817,19 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
return ret;
}
+static int ar9561_hw_get_modes_txgain_index(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ if (IS_CHAN_2GHZ(chan)) {
+ if (IS_CHAN_HT40(chan))
+ return 1;
+ else
+ return 2;
+ }
+
+ return 0;
+}
+
static void ar9003_doubler_fix(struct ath_hw *ah)
{
if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9550(ah)) {
@@ -911,21 +929,29 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
modesIndex, regWrites);
}
+
+ if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ modesIndex, regWrites);
}
- if (AR_SREV_9550(ah))
+ if (AR_SREV_9550(ah) || AR_SREV_9561(ah))
REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
regWrites);
/*
* TXGAIN initvals.
*/
- if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
int modes_txgain_index = 1;
if (AR_SREV_9550(ah))
modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
+ if (AR_SREV_9561(ah))
+ modes_txgain_index =
+ ar9561_hw_get_modes_txgain_index(ah, chan);
+
if (modes_txgain_index < 0)
return -EINVAL;
@@ -1989,7 +2015,8 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->rf_set_freq = ar9003_hw_set_channel;
priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah))
priv_ops->compute_pll_control = ar9003_hw_compute_pll_control_soc;
else
priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index fd090b1f2d0f..c311b2bfdb00 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -454,7 +454,7 @@
#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
#define AR_PHY_MODE (AR_SM_BASE + 0x8)
#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
-#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + 0x20)
+#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
@@ -506,7 +506,7 @@
#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
#define AR_PHY_TEST_CHAIN_SEL_S 30
-#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + 0x164)
+#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x160 : 0x164))
#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
@@ -525,7 +525,7 @@
#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
-#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
@@ -536,7 +536,7 @@
#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
-#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
@@ -726,21 +726,24 @@
#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
-#define AR_CH0_TOP2_XPABIASLVL 0xf000
+#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
#define AR_CH0_TOP2_XPABIASLVL_S 12
#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
- ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : 0x16290))
+ ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
+ (AR_SREV_9561(ah) ? 0x162c0 : 0x16290)))
#define AR_CH0_XTAL_CAPINDAC 0x7f000000
#define AR_CH0_XTAL_CAPINDAC_S 24
#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
#define AR_CH0_XTAL_CAPOUTDAC_S 17
-#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : 0x16c40)
+#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : \
+ (AR_SREV_9561(ah) ? 0x16cc0 : 0x16c40))
#define AR_PHY_PMU1_PWD 0x1
#define AR_PHY_PMU1_PWD_S 0
-#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : 0x16c44)
+#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : \
+ (AR_SREV_9561(ah) ? 0x16cc4 : 0x16c44))
#define AR_PHY_PMU2_PGM 0x00200000
#define AR_PHY_PMU2_PGM_S 21
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
index 81c88dd606dc..86bfc9604dca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_wow.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -17,23 +17,9 @@
#include <linux/export.h>
#include "ath9k.h"
#include "reg.h"
+#include "reg_wow.h"
#include "hw-ops.h"
-const char *ath9k_hw_wow_event_to_string(u32 wow_event)
-{
- if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
- return "Magic pattern";
- if (wow_event & AH_WOW_USER_PATTERN_EN)
- return "User pattern";
- if (wow_event & AH_WOW_LINK_CHANGE)
- return "Link change";
- if (wow_event & AH_WOW_BEACON_MISS)
- return "Beacon miss";
-
- return "unknown reason";
-}
-EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
-
static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -49,6 +35,15 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
return;
}
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ if (!REG_READ(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL))
+ REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
+ } else if (AR_SREV_9485(ah)){
+ if (!(REG_READ(ah, AR_NDP2_TIMER_MODE) &
+ AR_GEN_TIMERS2_MODE_ENABLE_MASK))
+ REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
+ }
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
}
@@ -67,11 +62,15 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
/* set the transmit buffer */
ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
ctl[1] = 0;
- ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
ctl[4] = 0;
ctl[7] = (ah->txchainmask) << 2;
ctl[2] = 0xf << 16; /* tx_tries 0 */
+ if (IS_CHAN_2GHZ(ah->curchan))
+ ctl[3] = 0x1b; /* CCK_1M */
+ else
+ ctl[3] = 0xb; /* OFDM_6M */
+
for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
@@ -103,21 +102,22 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
}
-void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
- u8 *user_mask, int pattern_count,
- int pattern_len)
+int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len)
{
int i;
u32 pattern_val, mask_val;
u32 set, clr;
- /* FIXME: should check count by querying the hardware capability */
- if (pattern_count >= MAX_NUM_PATTERN)
- return;
+ if (pattern_count >= ah->wow.max_patterns)
+ return -ENOSPC;
- REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
+ if (pattern_count < MAX_NUM_PATTERN_LEGACY)
+ REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
+ else
+ REG_SET_BIT(ah, AR_MAC_PCU_WOW4, BIT(pattern_count - 8));
- /* set the registers for pattern */
for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
memcpy(&pattern_val, user_pattern, 4);
REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
@@ -125,49 +125,42 @@ void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
user_pattern += 4;
}
- /* set the registers for mask */
for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
memcpy(&mask_val, user_mask, 4);
REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
user_mask += 4;
}
- /* set the pattern length to be matched
- *
- * AR_WOW_LENGTH1_REG1
- * bit 31:24 pattern 0 length
- * bit 23:16 pattern 1 length
- * bit 15:8 pattern 2 length
- * bit 7:0 pattern 3 length
- *
- * AR_WOW_LENGTH1_REG2
- * bit 31:24 pattern 4 length
- * bit 23:16 pattern 5 length
- * bit 15:8 pattern 6 length
- * bit 7:0 pattern 7 length
- *
- * the below logic writes out the new
- * pattern length for the corresponding
- * pattern_count, while masking out the
- * other fields
- */
-
- ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
+ if (pattern_count < MAX_NUM_PATTERN_LEGACY)
+ ah->wow.wow_event_mask |=
+ BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
+ else
+ ah->wow.wow_event_mask2 |=
+ BIT((pattern_count - 8) + AR_WOW_PAT_FOUND_SHIFT);
if (pattern_count < 4) {
- /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
set = (pattern_len & AR_WOW_LENGTH_MAX) <<
AR_WOW_LEN1_SHIFT(pattern_count);
clr = AR_WOW_LENGTH1_MASK(pattern_count);
REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
- } else {
- /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
+ } else if (pattern_count < 8) {
set = (pattern_len & AR_WOW_LENGTH_MAX) <<
AR_WOW_LEN2_SHIFT(pattern_count);
clr = AR_WOW_LENGTH2_MASK(pattern_count);
REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
+ } else if (pattern_count < 12) {
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN3_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH3_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH3, set, clr);
+ } else if (pattern_count < MAX_NUM_PATTERN) {
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN4_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH4_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH4, set, clr);
}
+ return 0;
}
EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
@@ -189,7 +182,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
* register. This mask will clean it up.
*/
- val &= ah->wow_event_mask;
+ val &= ah->wow.wow_event_mask;
if (val) {
if (val & AR_WOW_MAGIC_PAT_FOUND)
@@ -233,190 +226,192 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, false);
- ah->wow_event_mask = 0;
+ ah->wow.wow_event_mask = 0;
return wow_status;
}
EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
-void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah)
{
- u32 wow_event_mask;
- u32 set, clr;
+ u32 wa_reg;
- /*
- * wow_event_mask is a mask to the AR_WOW_PATTERN register to
- * indicate which WoW events we have enabled. The WoW events
- * are from the 'pattern_enable' in this function and
- * 'pattern_count' of ath9k_hw_wow_apply_pattern()
- */
- wow_event_mask = ah->wow_event_mask;
+ if (!ah->is_pciexpress)
+ return;
/*
- * Untie Power-on-Reset from the PCI-E-Reset. When we are in
- * WOW sleep, we do want the Reset from the PCI-E to disturb
- * our hw state
+ * We need to untie the internal POR (power-on-reset)
+ * to the external PCI-E reset. We also need to tie
+ * the PCI-E Phy reset to the PCI-E reset.
*/
- if (ah->is_pciexpress) {
- /*
- * we need to untie the internal POR (power-on-reset)
- * to the external PCI-E reset. We also need to tie
- * the PCI-E Phy reset to the PCI-E reset.
- */
- set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
- clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
- REG_RMW(ah, AR_WA, set, clr);
- }
+ wa_reg = REG_READ(ah, AR_WA);
+ wa_reg &= ~AR_WA_UNTIE_RESET_EN;
+ wa_reg |= AR_WA_RESET_EN;
+ wa_reg |= AR_WA_POR_SHORT;
- /*
- * set the power states appropriately and enable PME
- */
- set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
- AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+ REG_WRITE(ah, AR_WA, wa_reg);
+}
+
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+ u32 wow_event_mask;
+ u32 keep_alive, magic_pattern, host_pm_ctrl;
+
+ wow_event_mask = ah->wow.wow_event_mask;
/*
- * set and clear WOW_PME_CLEAR registers for the chip
+ * AR_PMCTRL_HOST_PME_EN - Override PME enable in configuration
+ * space and allow MAC to generate WoW anyway.
+ *
+ * AR_PMCTRL_PWR_PM_CTRL_ENA - ???
+ *
+ * AR_PMCTRL_AUX_PWR_DET - PCI core SYS_AUX_PWR_DET signal,
+ * needs to be set for WoW in PCI mode.
+ *
+ * AR_PMCTRL_WOW_PME_CLR - WoW Clear Signal going to the MAC.
+ *
+ * Set the power states appropriately and enable PME.
+ *
+ * Set and clear WOW_PME_CLEAR for the chip
* to generate next wow signal.
*/
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
- clr = AR_PMCTRL_WOW_PME_CLR;
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_HOST_PME_EN |
+ AR_PMCTRL_PWR_PM_CTRL_ENA |
+ AR_PMCTRL_AUX_PWR_DET |
+ AR_PMCTRL_WOW_PME_CLR);
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR);
/*
- * Setup for:
- * - beacon misses
- * - magic pattern
- * - keep alive timeout
- * - pattern matching
+ * Random Backoff.
+ *
+ * 31:28 in AR_WOW_PATTERN : Indicates the number of bits used in the
+ * contention window. For value N,
+ * the random backoff will be selected between
+ * 0 and (2 ^ N) - 1.
*/
+ REG_SET_BIT(ah, AR_WOW_PATTERN,
+ AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF));
/*
- * Program default values for pattern backoff, aifs/slot/KAL count,
- * beacon miss timeout, KAL timeout, etc.
+ * AIFS time, Slot time, Keep Alive count.
+ */
+ REG_SET_BIT(ah, AR_WOW_COUNT, AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
+ AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
+ AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT));
+ /*
+ * Beacon timeout.
*/
- set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
- REG_SET_BIT(ah, AR_WOW_PATTERN, set);
-
- set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
- AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
- AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
- REG_SET_BIT(ah, AR_WOW_COUNT, set);
-
if (pattern_enable & AH_WOW_BEACON_MISS)
- set = AR_WOW_BEACON_TIMO;
- /* We are not using beacon miss, program a large value */
+ REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO);
else
- set = AR_WOW_BEACON_TIMO_MAX;
-
- REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+ REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO_MAX);
/*
- * Keep alive timo in ms except AR9280
+ * Keep alive timeout in ms.
*/
if (!pattern_enable)
- set = AR_WOW_KEEP_ALIVE_NEVER;
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, AR_WOW_KEEP_ALIVE_NEVER);
else
- set = KAL_TIMEOUT * 32;
-
- REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, KAL_TIMEOUT * 32);
/*
- * Keep alive delay in us. based on 'power on clock',
- * therefore in usec
+ * Keep alive delay in us.
*/
- set = KAL_DELAY * 1000;
- REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, KAL_DELAY * 1000);
/*
- * Create keep alive pattern to respond to beacons
+ * Create keep alive pattern to respond to beacons.
*/
ath9k_wow_create_keep_alive_pattern(ah);
/*
- * Configure MAC WoW Registers
+ * Configure keep alive register.
*/
- set = 0;
+ keep_alive = REG_READ(ah, AR_WOW_KEEP_ALIVE);
+
/* Send keep alive timeouts anyway */
- clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+ keep_alive &= ~AR_WOW_KEEP_ALIVE_AUTO_DIS;
- if (pattern_enable & AH_WOW_LINK_CHANGE)
+ if (pattern_enable & AH_WOW_LINK_CHANGE) {
+ keep_alive &= ~AR_WOW_KEEP_ALIVE_FAIL_DIS;
wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
- else
- set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+ } else {
+ keep_alive |= AR_WOW_KEEP_ALIVE_FAIL_DIS;
+ }
- set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
- REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE, keep_alive);
/*
- * we are relying on a bmiss failure. ensure we have
- * enough threshold to prevent false positives
+ * We are relying on a bmiss failure, ensure we have
+ * enough threshold to prevent false positives.
*/
REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
AR_WOW_BMISSTHRESHOLD);
- set = 0;
- clr = 0;
-
if (pattern_enable & AH_WOW_BEACON_MISS) {
- set = AR_WOW_BEACON_FAIL_EN;
wow_event_mask |= AR_WOW_BEACON_FAIL;
+ REG_SET_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
} else {
- clr = AR_WOW_BEACON_FAIL_EN;
+ REG_CLR_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
}
- REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
-
- set = 0;
- clr = 0;
/*
- * Enable the magic packet registers
+ * Enable the magic packet registers.
*/
+ magic_pattern = REG_READ(ah, AR_WOW_PATTERN);
+ magic_pattern |= AR_WOW_MAC_INTR_EN;
+
if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
- set = AR_WOW_MAGIC_EN;
+ magic_pattern |= AR_WOW_MAGIC_EN;
wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
} else {
- clr = AR_WOW_MAGIC_EN;
+ magic_pattern &= ~AR_WOW_MAGIC_EN;
}
- set |= AR_WOW_MAC_INTR_EN;
- REG_RMW(ah, AR_WOW_PATTERN, set, clr);
+ REG_WRITE(ah, AR_WOW_PATTERN, magic_pattern);
+
+ /*
+ * Enable pattern matching for packets which are less
+ * than 256 bytes.
+ */
REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
AR_WOW_PATTERN_SUPPORTED);
/*
- * Set the power states appropriately and enable PME
+ * Set the power states appropriately and enable PME.
*/
- clr = 0;
- set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
- AR_PMCTRL_PWR_PM_CTRL_ENA;
+ host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL);
+ host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 |
+ AR_PMCTRL_HOST_PME_EN |
+ AR_PMCTRL_PWR_PM_CTRL_ENA;
+ host_pm_ctrl &= ~AR_PCIE_PM_CTRL_ENA;
- clr = AR_PCIE_PM_CTRL_ENA;
- REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+ if (AR_SREV_9462(ah)) {
+ /*
+ * This is needed to prevent the chip waking up
+ * the host within 3-4 seconds with certain
+ * platform/BIOS.
+ */
+ host_pm_ctrl &= ~AR_PMCTRL_PWR_STATE_D1D3;
+ host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL;
+ }
+
+ REG_WRITE(ah, AR_PCIE_PM_CTRL, host_pm_ctrl);
/*
- * this is needed to prevent the chip waking up
- * the host within 3-4 seconds with certain
- * platform/BIOS. The fix is to enable
- * D1 & D3 to match original definition and
- * also match the OTP value. Anyway this
- * is more related to SW WOW.
+ * Enable sequence number generation when asleep.
*/
- clr = AR_PMCTRL_PWR_STATE_D1D3;
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+ REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
- set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ /* To bring down WOW power low margin */
+ REG_SET_BIT(ah, AR_PCIE_PHY_REG3, BIT(13));
- REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+ ath9k_hw_wow_set_arwr_reg(ah);
- /* to bring down WOW power low margin */
- set = BIT(13);
- REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
/* HW WoW */
- clr = BIT(5);
- REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
ath9k_hw_set_powermode_wow_sleep(ah);
- ah->wow_event_mask = wow_event_mask;
+ ah->wow.wow_event_mask = wow_event_mask;
}
EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 159cc6fd2362..6fc0d07e5ec6 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -358,7 +358,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10820, 0xcfa10820},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -378,7 +378,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index fd6a84ccd49e..148562addd38 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -63,7 +63,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10820, 0xcfa10820},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -83,7 +83,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
new file mode 100644
index 000000000000..c3a47eaaf0c0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
@@ -0,0 +1,1046 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_956X_H
+#define INITVALS_956X_H
+
+#define qca956x_1p0_mac_core ar955x_1p0_mac_core
+
+#define qca956x_1p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define qca956x_1p0_soc_preamble ar955x_1p0_soc_preamble
+
+#define qca956x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define qca956x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define qca956x_1p0_baseband_postamble_dfs_channel ar9300_2p2_baseband_postamble_dfs_channel
+
+#define qca956x_1p0_common_wo_xlna_rx_gain_bounds ar955x_1p0_common_wo_xlna_rx_gain_bounds
+
+#define qca956x_1p0_common_rx_gain_bounds ar955x_1p0_common_rx_gain_bounds
+
+#define qca956x_1p0_modes_fast_clock ar9462_2p0_modes_fast_clock
+
+static const u32 qca956x_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840cbf},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb514},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4789},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x02993b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a218, 0x00000000},
+ {0x0000a21c, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a360, 0x00000000},
+ {0x0000a36c, 0x00000000},
+ {0x0000a384, 0x00000001},
+ {0x0000a388, 0x00000444},
+ {0x0000a38c, 0x00000000},
+ {0x0000a390, 0x210d0401},
+ {0x0000a394, 0xab9a7144},
+ {0x0000a398, 0x00000201},
+ {0x0000a39c, 0x42424848},
+ {0x0000a3a0, 0x3c466478},
+ {0x0000a3a4, 0x3a363600},
+ {0x0000a3a8, 0x0000003a},
+ {0x0000a3ac, 0x00000000},
+ {0x0000a3b0, 0x009011fe},
+ {0x0000a3b4, 0x00000034},
+ {0x0000a3b8, 0x00b3ec0a},
+ {0x0000a3bc, 0x00000036},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d0019ce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a454, 0x05000000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9fee},
+ {0x0000a648, 0x0048660d},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x21200504},
+ {0x0000a678, 0x61602322},
+ {0x0000a67c, 0x65646362},
+ {0x0000a680, 0x6b6a6968},
+ {0x0000a684, 0xe2706d6c},
+ {0x0000a688, 0x000000e3},
+ {0x0000a690, 0x00000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 qca956x_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac621f1, 0x5ac621f1},
+ {0x00009828, 0x06903081, 0x06903081, 0x07d43881, 0x07d43881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000de, 0x6c4000de},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x337d605e, 0x337d5d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x045c0cc4, 0x045c0cc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001a6, 0x000001a6},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001a6, 0x000001a6},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 qca956x_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x3f80fff8},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x8036db6c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f080a},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480000},
+ {0x000160c0, 0x006db6d8},
+ {0x000160c4, 0x24b6db6c},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6db6fb7c},
+ {0x000160d0, 0x6db6da44},
+ {0x00016100, 0x07ff8001},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x00008058},
+ {0x00016288, 0x001c6000},
+ {0x0001628c, 0x50000000},
+ {0x000162c0, 0x4b962100},
+ {0x000162c4, 0x00000480},
+ {0x000162c8, 0x04000144},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x3f80fff8},
+ {0x0001644c, 0x000f0278},
+ {0x00016450, 0x8036db6c},
+ {0x00016454, 0x6db60000},
+ {0x00016500, 0x07ff8001},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x01884080},
+ {0x00016548, 0x00008058},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x3f80fff8},
+ {0x0001684c, 0x000f0278},
+ {0x00016850, 0x8036db6c},
+ {0x00016854, 0x6db60000},
+ {0x00016900, 0x07ff8001},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x01884080},
+ {0x00016948, 0x00008058},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 qca956x_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xc4128f5c, 0xc4128f5c},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0fd08f25, 0x0fd08f25},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24646800, 0x24646800},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x00fe7f46, 0x00fe7f46},
+ {0x00016104, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
+ {0x0001610c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+ {0x00016504, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
+ {0x0001650c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+ {0x00016904, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
+ {0x0001690c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+};
+
+static const u32 qca956x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a38c, 0x00000000},
+ {0x0000a390, 0x6f7f0301},
+ {0x0000a394, 0xca9228ee},
+};
+
+static const u32 qca956x_1p0_modes_no_xpa_tx_gain_table[][3] = {
+ /* Addr 5G 2G */
+ {0x0000a2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000a2e0, 0xff323118, 0xff323118},
+ {0x0000a2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000a2e8, 0xffc00000, 0xffc00000},
+ {0x0000a39c, 0x42424242, 0x42424242},
+ {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
+ {0x0000a3b0, 0x00a01404, 0x00a01404},
+ {0x0000a3b4, 0x00000034, 0x00000034},
+ {0x0000a3b8, 0x00800408, 0x00800408},
+ {0x0000a3bc, 0x00000036, 0x00000036},
+ {0x0000a410, 0x000050dc, 0x000050dc},
+ {0x0000a500, 0x09000040, 0x09000040},
+ {0x0000a504, 0x0b000041, 0x0b000041},
+ {0x0000a508, 0x0d000042, 0x0d000042},
+ {0x0000a50c, 0x11000044, 0x11000044},
+ {0x0000a510, 0x15000046, 0x15000046},
+ {0x0000a514, 0x1d000440, 0x1d000440},
+ {0x0000a518, 0x1f000441, 0x1f000441},
+ {0x0000a51c, 0x23000443, 0x23000443},
+ {0x0000a520, 0x25000444, 0x25000444},
+ {0x0000a524, 0x280004e0, 0x280004e0},
+ {0x0000a528, 0x2c0004e2, 0x2c0004e2},
+ {0x0000a52c, 0x2e0004e3, 0x2e0004e3},
+ {0x0000a530, 0x300004e4, 0x300004e4},
+ {0x0000a534, 0x340004e6, 0x340004e6},
+ {0x0000a538, 0x37000ce0, 0x37000ce0},
+ {0x0000a53c, 0x3b000ce2, 0x3b000ce2},
+ {0x0000a540, 0x3d000ce3, 0x3d000ce3},
+ {0x0000a544, 0x3f000ce4, 0x3f000ce4},
+ {0x0000a548, 0x45001ee0, 0x45001ee0},
+ {0x0000a54c, 0x49001ee2, 0x49001ee2},
+ {0x0000a550, 0x4d001ee4, 0x4d001ee4},
+ {0x0000a554, 0x51001ee6, 0x51001ee6},
+ {0x0000a558, 0x55001eea, 0x55001eea},
+ {0x0000a55c, 0x59001eec, 0x59001eec},
+ {0x0000a560, 0x5d001ef0, 0x5d001ef0},
+ {0x0000a564, 0x5f001ef1, 0x5f001ef1},
+ {0x0000a568, 0x60001ef2, 0x60001ef2},
+ {0x0000a56c, 0x61001ef3, 0x61001ef3},
+ {0x0000a570, 0x62001ef4, 0x62001ef4},
+ {0x0000a574, 0x63001ef5, 0x63001ef5},
+ {0x0000a578, 0x64001ffc, 0x64001ffc},
+ {0x0000a57c, 0x64001ffc, 0x64001ffc},
+ {0x0000a600, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000},
+ {0x0000a614, 0x00804201, 0x00804201},
+ {0x0000a618, 0x00804201, 0x00804201},
+ {0x0000a61c, 0x00804201, 0x00804201},
+ {0x0000a620, 0x00804201, 0x00804201},
+ {0x0000a624, 0x00804201, 0x00804201},
+ {0x0000a628, 0x00804201, 0x00804201},
+ {0x0000a62c, 0x02808a02, 0x02808a02},
+ {0x0000a630, 0x0340cd03, 0x0340cd03},
+ {0x0000a634, 0x0340cd03, 0x0340cd03},
+ {0x0000a638, 0x0340cd03, 0x0340cd03},
+ {0x0000a63c, 0x05011404, 0x05011404},
+ {0x0000b2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000b2e0, 0xff323118, 0xff323118},
+ {0x0000b2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000b2e8, 0xffc00000, 0xffc00000},
+ {0x0000c2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000c2e0, 0xff323118, 0xff323118},
+ {0x0000c2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000c2e8, 0xffc00000, 0xffc00000},
+ {0x00016044, 0x049242db, 0x049242db},
+ {0x00016048, 0x64925a70, 0x64925a70},
+ {0x00016148, 0x00008050, 0x00008050},
+ {0x00016280, 0x41110005, 0x41110005},
+ {0x00016284, 0x453a6000, 0x453a6000},
+ {0x00016444, 0x049242db, 0x049242db},
+ {0x00016448, 0x6c925a70, 0x6c925a70},
+ {0x00016548, 0x00008050, 0x00008050},
+ {0x00016844, 0x049242db, 0x049242db},
+ {0x00016848, 0x6c925a70, 0x6c925a70},
+ {0x00016948, 0x00008050, 0x00008050},
+};
+
+static const u32 qca956x_1p0_modes_xpa_tx_gain_table[][3] = {
+ /* Addr 5G 2G */
+ {0x0000a2dc, 0xcc69ac94, 0xcc69ac94},
+ {0x0000a2e0, 0xf0b23118, 0xf0b23118},
+ {0x0000a2e4, 0xffffc000, 0xffffc000},
+ {0x0000a2e8, 0xc0000000, 0xc0000000},
+ {0x0000a410, 0x000050d2, 0x000050d2},
+ {0x0000a500, 0x0a000040, 0x0a000040},
+ {0x0000a504, 0x0c000041, 0x0c000041},
+ {0x0000a508, 0x0e000042, 0x0e000042},
+ {0x0000a50c, 0x12000044, 0x12000044},
+ {0x0000a510, 0x16000046, 0x16000046},
+ {0x0000a514, 0x1d000440, 0x1d000440},
+ {0x0000a518, 0x1f000441, 0x1f000441},
+ {0x0000a51c, 0x23000443, 0x23000443},
+ {0x0000a520, 0x25000444, 0x25000444},
+ {0x0000a524, 0x29000a40, 0x29000a40},
+ {0x0000a528, 0x2d000a42, 0x2d000a42},
+ {0x0000a52c, 0x2f000a43, 0x2f000a43},
+ {0x0000a530, 0x31000a44, 0x31000a44},
+ {0x0000a534, 0x35000a46, 0x35000a46},
+ {0x0000a538, 0x38000ce0, 0x38000ce0},
+ {0x0000a53c, 0x3c000ce2, 0x3c000ce2},
+ {0x0000a540, 0x3e000ce3, 0x3e000ce3},
+ {0x0000a544, 0x40000ce4, 0x40000ce4},
+ {0x0000a548, 0x46001ee0, 0x46001ee0},
+ {0x0000a54c, 0x4a001ee2, 0x4a001ee2},
+ {0x0000a550, 0x4e001ee4, 0x4e001ee4},
+ {0x0000a554, 0x52001ee6, 0x52001ee6},
+ {0x0000a558, 0x56001eea, 0x56001eea},
+ {0x0000a55c, 0x5a001eec, 0x5a001eec},
+ {0x0000a560, 0x5e001ef0, 0x5e001ef0},
+ {0x0000a564, 0x60001ef1, 0x60001ef1},
+ {0x0000a568, 0x61001ef2, 0x61001ef2},
+ {0x0000a56c, 0x62001ef3, 0x62001ef3},
+ {0x0000a570, 0x63001ef4, 0x63001ef4},
+ {0x0000a574, 0x64001ef5, 0x64001ef5},
+ {0x0000a578, 0x65001ffc, 0x65001ffc},
+ {0x0000a57c, 0x65001ffc, 0x65001ffc},
+ {0x0000a600, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00000000, 0x00000000},
+ {0x0000a618, 0x00000000, 0x00000000},
+ {0x0000a61c, 0x00804201, 0x00804201},
+ {0x0000a620, 0x00804201, 0x00804201},
+ {0x0000a624, 0x00804201, 0x00804201},
+ {0x0000a628, 0x00804201, 0x00804201},
+ {0x0000a62c, 0x02808a02, 0x02808a02},
+ {0x0000a630, 0x0340cd03, 0x0340cd03},
+ {0x0000a634, 0x0340cd03, 0x0340cd03},
+ {0x0000a638, 0x0340cd03, 0x0340cd03},
+ {0x0000a63c, 0x05011404, 0x05011404},
+ {0x0000b2dc, 0xcc69ac94, 0xcc69ac94},
+ {0x0000b2e0, 0xf0b23118, 0xf0b23118},
+ {0x0000b2e4, 0xffffc000, 0xffffc000},
+ {0x0000b2e8, 0xc0000000, 0xc0000000},
+ {0x0000c2dc, 0xcc69ac94, 0xcc69ac94},
+ {0x0000c2e0, 0xf0b23118, 0xf0b23118},
+ {0x0000c2e4, 0xffffc000, 0xffffc000},
+ {0x0000c2e8, 0xc0000000, 0xc0000000},
+ {0x00016044, 0x012492db, 0x012492db},
+ {0x00016048, 0x6c927a70, 0x6c927a70},
+ {0x00016050, 0x8036d36c, 0x8036d36c},
+ {0x00016280, 0x41110005, 0x41110005},
+ {0x00016284, 0x453a7e00, 0x453a7e00},
+ {0x00016444, 0x012492db, 0x012492db},
+ {0x00016448, 0x6c927a70, 0x6c927a70},
+ {0x00016450, 0x8036d36c, 0x8036d36c},
+ {0x00016844, 0x012492db, 0x012492db},
+ {0x00016848, 0x6c927a70, 0x6c927a70},
+ {0x00016850, 0x8036d36c, 0x8036d36c},
+};
+
+static const u32 qca956x_1p0_modes_no_xpa_low_ob_db_tx_gain_table[][3] = {
+ /* Addr 5G 2G */
+ {0x0000a2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000a2e0, 0xff323118, 0xff323118},
+ {0x0000a2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000a2e8, 0xffc00000, 0xffc00000},
+ {0x0000a39c, 0x42424242, 0x42424242},
+ {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
+ {0x0000a3b0, 0x00a01404, 0x00a01404},
+ {0x0000a3b4, 0x00000034, 0x00000034},
+ {0x0000a3b8, 0x00800408, 0x00800408},
+ {0x0000a3bc, 0x00000036, 0x00000036},
+ {0x0000a410, 0x000050dc, 0x000050dc},
+ {0x0000a414, 0x16b739ce, 0x16b739ce},
+ {0x0000a418, 0x2d00198b, 0x2d00198b},
+ {0x0000a41c, 0x16b5adce, 0x16b5adce},
+ {0x0000a420, 0x0000014a, 0x0000014a},
+ {0x0000a424, 0x14a525cc, 0x14a525cc},
+ {0x0000a428, 0x0000012a, 0x0000012a},
+ {0x0000a42c, 0x14a5294a, 0x14a5294a},
+ {0x0000a430, 0x1294a929, 0x1294a929},
+ {0x0000a500, 0x09000040, 0x09000040},
+ {0x0000a504, 0x0b000041, 0x0b000041},
+ {0x0000a508, 0x0d000042, 0x0d000042},
+ {0x0000a50c, 0x11000044, 0x11000044},
+ {0x0000a510, 0x15000046, 0x15000046},
+ {0x0000a514, 0x1d000440, 0x1d000440},
+ {0x0000a518, 0x1f000441, 0x1f000441},
+ {0x0000a51c, 0x23000443, 0x23000443},
+ {0x0000a520, 0x25000444, 0x25000444},
+ {0x0000a524, 0x280004e0, 0x280004e0},
+ {0x0000a528, 0x2c0004e2, 0x2c0004e2},
+ {0x0000a52c, 0x2e0004e3, 0x2e0004e3},
+ {0x0000a530, 0x300004e4, 0x300004e4},
+ {0x0000a534, 0x340004e6, 0x340004e6},
+ {0x0000a538, 0x37000ce0, 0x37000ce0},
+ {0x0000a53c, 0x3b000ce2, 0x3b000ce2},
+ {0x0000a540, 0x3d000ce3, 0x3d000ce3},
+ {0x0000a544, 0x3f000ce4, 0x3f000ce4},
+ {0x0000a548, 0x45001ee0, 0x45001ee0},
+ {0x0000a54c, 0x49001ee2, 0x49001ee2},
+ {0x0000a550, 0x4d001ee4, 0x4d001ee4},
+ {0x0000a554, 0x51001ee6, 0x51001ee6},
+ {0x0000a558, 0x55001eea, 0x55001eea},
+ {0x0000a55c, 0x59001eec, 0x59001eec},
+ {0x0000a560, 0x5d001ef0, 0x5d001ef0},
+ {0x0000a564, 0x5f001ef1, 0x5f001ef1},
+ {0x0000a568, 0x60001ef2, 0x60001ef2},
+ {0x0000a56c, 0x61001ef3, 0x61001ef3},
+ {0x0000a570, 0x62001ef4, 0x62001ef4},
+ {0x0000a574, 0x63001ef5, 0x63001ef5},
+ {0x0000a578, 0x64001ffc, 0x64001ffc},
+ {0x0000a57c, 0x64001ffc, 0x64001ffc},
+ {0x0000a600, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000},
+ {0x0000a614, 0x00804201, 0x00804201},
+ {0x0000a618, 0x00804201, 0x00804201},
+ {0x0000a61c, 0x00804201, 0x00804201},
+ {0x0000a620, 0x00804201, 0x00804201},
+ {0x0000a624, 0x00804201, 0x00804201},
+ {0x0000a628, 0x00804201, 0x00804201},
+ {0x0000a62c, 0x02808a02, 0x02808a02},
+ {0x0000a630, 0x0340cd03, 0x0340cd03},
+ {0x0000a634, 0x0340cd03, 0x0340cd03},
+ {0x0000a638, 0x0340cd03, 0x0340cd03},
+ {0x0000a63c, 0x05011404, 0x05011404},
+ {0x0000b2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000b2e0, 0xff323118, 0xff323118},
+ {0x0000b2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000b2e8, 0xffc00000, 0xffc00000},
+ {0x0000c2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000c2e0, 0xff323118, 0xff323118},
+ {0x0000c2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000c2e8, 0xffc00000, 0xffc00000},
+ {0x00016044, 0x046e42db, 0x046e42db},
+ {0x00016048, 0x64925a70, 0x64925a70},
+ {0x00016148, 0x00008050, 0x00008050},
+ {0x00016280, 0x41110005, 0x41110005},
+ {0x00016284, 0x453a6000, 0x453a6000},
+ {0x00016444, 0x046e42db, 0x046e42db},
+ {0x00016448, 0x6c925a70, 0x6c925a70},
+ {0x00016548, 0x00008050, 0x00008050},
+ {0x00016844, 0x046e42db, 0x046e42db},
+ {0x00016848, 0x6c925a70, 0x6c925a70},
+ {0x00016948, 0x00008050, 0x00008050},
+};
+
+static const u32 qca956x_1p0_modes_no_xpa_green_tx_gain_table[][3] = {
+ /* Addr 5G 2G */
+ {0x000098bc, 0x00000001, 0x00000001},
+ {0x0000a2dc, 0xd3555284, 0xd3555284},
+ {0x0000a2e0, 0x1c666318, 0x1c666318},
+ {0x0000a2e4, 0xe07bbc00, 0xe07bbc00},
+ {0x0000a2e8, 0xff800000, 0xff800000},
+ {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
+ {0x0000a410, 0x000050dc, 0x000050dc},
+ {0x0000a500, 0x02000040, 0x02000040},
+ {0x0000a504, 0x04000041, 0x04000041},
+ {0x0000a508, 0x06000042, 0x06000042},
+ {0x0000a50c, 0x0a000044, 0x0a000044},
+ {0x0000a510, 0x0c000045, 0x0c000045},
+ {0x0000a514, 0x13000440, 0x13000440},
+ {0x0000a518, 0x15000441, 0x15000441},
+ {0x0000a51c, 0x19000443, 0x19000443},
+ {0x0000a520, 0x1b000444, 0x1b000444},
+ {0x0000a524, 0x1e0004e0, 0x1e0004e0},
+ {0x0000a528, 0x220004e2, 0x220004e2},
+ {0x0000a52c, 0x240004e3, 0x240004e3},
+ {0x0000a530, 0x260004e4, 0x260004e4},
+ {0x0000a534, 0x2a0004e6, 0x2a0004e6},
+ {0x0000a538, 0x32000ce0, 0x32000ce0},
+ {0x0000a53c, 0x36000ce2, 0x36000ce2},
+ {0x0000a540, 0x3a000ce4, 0x3a000ce4},
+ {0x0000a544, 0x3e000ce6, 0x3e000ce6},
+ {0x0000a548, 0x45001ee0, 0x45001ee0},
+ {0x0000a54c, 0x49001ee2, 0x49001ee2},
+ {0x0000a550, 0x4d001ee4, 0x4d001ee4},
+ {0x0000a554, 0x51001ee6, 0x51001ee6},
+ {0x0000a558, 0x55001eea, 0x55001eea},
+ {0x0000a55c, 0x59001eec, 0x59001eec},
+ {0x0000a560, 0x5d001ef0, 0x5d001ef0},
+ {0x0000a564, 0x5f001ef1, 0x5f001ef1},
+ {0x0000a568, 0x60001ef2, 0x60001ef2},
+ {0x0000a56c, 0x61001ef3, 0x61001ef3},
+ {0x0000a570, 0x62001ef4, 0x62001ef4},
+ {0x0000a574, 0x63001ff5, 0x63001ff5},
+ {0x0000a578, 0x64001ffc, 0x64001ffc},
+ {0x0000a57c, 0x64001ffc, 0x64001ffc},
+ {0x0000a600, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000},
+ {0x0000a614, 0x00804201, 0x00804201},
+ {0x0000a618, 0x00804201, 0x00804201},
+ {0x0000a61c, 0x00804201, 0x00804201},
+ {0x0000a620, 0x00804201, 0x00804201},
+ {0x0000a624, 0x00804201, 0x00804201},
+ {0x0000a628, 0x00804201, 0x00804201},
+ {0x0000a62c, 0x02808a02, 0x02808a02},
+ {0x0000a630, 0x0340cd03, 0x0340cd03},
+ {0x0000a634, 0x0340cd03, 0x0340cd03},
+ {0x0000a638, 0x0340cd03, 0x0340cd03},
+ {0x0000a63c, 0x05011404, 0x05011404},
+ {0x0000b2dc, 0xd3555284, 0xd3555284},
+ {0x0000b2e0, 0x1c666318, 0x1c666318},
+ {0x0000b2e4, 0xe07bbc00, 0xe07bbc00},
+ {0x0000b2e8, 0xff800000, 0xff800000},
+ {0x0000c2dc, 0xd3555284, 0xd3555284},
+ {0x0000c2e0, 0x1c666318, 0x1c666318},
+ {0x0000c2e4, 0xe07bbc00, 0xe07bbc00},
+ {0x0000c2e8, 0xff800000, 0xff800000},
+ {0x00016044, 0x849242db, 0x849242db},
+ {0x00016048, 0x64925a70, 0x64925a70},
+ {0x00016280, 0x41110005, 0x41110005},
+ {0x00016284, 0x453a6000, 0x453a6000},
+ {0x00016444, 0x849242db, 0x849242db},
+ {0x00016448, 0x6c925a70, 0x6c925a70},
+ {0x00016844, 0x849242db, 0x849242db},
+ {0x00016848, 0x6c925a70, 0x6c925a70},
+ {0x0000a7f0, 0x800002cc, 0x800002cc},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000028, 0x00000028},
+ {0x0000a7f4, 0x00000028, 0x00000028},
+ {0x0000a7f4, 0x00000028, 0x00000028},
+ {0x0000a7f4, 0x00000028, 0x00000028},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+};
+
+static const u32 qca956x_1p0_common_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222222},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x17171717},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 qca956x_1p0_xlna_only[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac621f1, 0x5ac621f1},
+ {0x00009828, 0x06903081, 0x06903081, 0x07d43881, 0x07d43881},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x03721720},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000de, 0x6c4000da},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec8ad2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x317a6062, 0x317a5ae2},
+ {0x00009e18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003b2, 0x000003b2},
+ {0x00009fc0, 0x813e4788, 0x813e4788, 0x813e4789, 0x813e4789},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001b2, 0x000001b2},
+ {0x0000be18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001b2, 0x000001b2},
+};
+
+#endif /* INITVALS_956X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1a9fe0983a6b..0f8e9464e4ab 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -34,7 +34,7 @@ struct ath_vif;
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
-extern int led_blink;
+extern int ath9k_led_blink;
extern bool is_ath9k_unloaded;
extern int ath9k_use_chanctx;
@@ -830,14 +830,9 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
/* Wake on Wireless LAN */
/************************/
-struct ath9k_wow_pattern {
- u8 pattern_bytes[MAX_PATTERN_SIZE];
- u8 mask_bytes[MAX_PATTERN_SIZE];
- u32 pattern_len;
-};
-
#ifdef CONFIG_ATH9K_WOW
void ath9k_init_wow(struct ieee80211_hw *hw);
+void ath9k_deinit_wow(struct ieee80211_hw *hw);
int ath9k_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
int ath9k_resume(struct ieee80211_hw *hw);
@@ -846,6 +841,9 @@ void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled);
static inline void ath9k_init_wow(struct ieee80211_hw *hw)
{
}
+static inline void ath9k_deinit_wow(struct ieee80211_hw *hw)
+{
+}
static inline int ath9k_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
@@ -1039,9 +1037,8 @@ struct ath_softc {
s16 tx99_power;
#ifdef CONFIG_ATH9K_WOW
- atomic_t wow_got_bmiss_intr;
- atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
u32 wow_intr_before_sleep;
+ bool force_wow;
#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index ec93ddf0863a..5cee231cca1f 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -582,7 +582,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 871e969409bf..50a2e0ac3b8b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -403,7 +403,8 @@ static const struct file_operations fops_antenna_diversity = {
static int read_file_dma(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private;
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
int i, qcuOffset = 0, dcuOffset = 0;
@@ -470,20 +471,6 @@ static int read_file_dma(struct seq_file *file, void *data)
return 0;
}
-static int open_file_dma(struct inode *inode, struct file *f)
-{
- return single_open(f, read_file_dma, inode->i_private);
-}
-
-static const struct file_operations fops_dma = {
- .open = open_file_dma,
- .read = seq_read,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
{
if (status)
@@ -539,7 +526,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
static int read_file_interrupt(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private;
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
#define PR_IS(a, s) \
do { \
@@ -600,22 +588,10 @@ static int read_file_interrupt(struct seq_file *file, void *data)
return 0;
}
-static int open_file_interrupt(struct inode *inode, struct file *f)
-{
- return single_open(f, read_file_interrupt, inode->i_private);
-}
-
-static const struct file_operations fops_interrupt = {
- .read = seq_read,
- .open = open_file_interrupt,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int read_file_xmit(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private;
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
seq_printf(file, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
@@ -661,7 +637,8 @@ static void print_queue(struct ath_softc *sc, struct ath_txq *txq,
static int read_file_queues(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private;
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
struct ath_txq *txq;
int i;
static const char *qname[4] = {
@@ -682,7 +659,8 @@ static int read_file_queues(struct seq_file *file, void *data)
static int read_file_misc(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private;
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath9k_vif_iter_data iter_data;
struct ath_chanctx *ctx;
@@ -773,7 +751,8 @@ static int read_file_misc(struct seq_file *file, void *data)
static int read_file_reset(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private;
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
static const char * const reset_cause[__RESET_TYPE_MAX] = {
[RESET_TYPE_BB_HANG] = "Baseband Hang",
[RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog",
@@ -837,58 +816,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
TX_STAT_INC(qnum, delim_underrun);
}
-static int open_file_xmit(struct inode *inode, struct file *f)
-{
- return single_open(f, read_file_xmit, inode->i_private);
-}
-
-static const struct file_operations fops_xmit = {
- .read = seq_read,
- .open = open_file_xmit,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int open_file_queues(struct inode *inode, struct file *f)
-{
- return single_open(f, read_file_queues, inode->i_private);
-}
-
-static const struct file_operations fops_queues = {
- .read = seq_read,
- .open = open_file_queues,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int open_file_misc(struct inode *inode, struct file *f)
-{
- return single_open(f, read_file_misc, inode->i_private);
-}
-
-static const struct file_operations fops_misc = {
- .read = seq_read,
- .open = open_file_misc,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int open_file_reset(struct inode *inode, struct file *f)
-{
- return single_open(f, read_file_reset, inode->i_private);
-}
-
-static const struct file_operations fops_reset = {
- .read = seq_read,
- .open = open_file_reset,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
@@ -1018,7 +945,8 @@ static const struct file_operations fops_regdump = {
static int read_file_dump_nfcal(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private;
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath9k_nfcal_hist *h = sc->cur_chan->caldata.nfCalHist;
struct ath_common *common = ath9k_hw_common(ah);
@@ -1115,6 +1043,133 @@ static const struct file_operations fops_ackto = {
};
#endif
+#ifdef CONFIG_ATH9K_WOW
+
+static ssize_t read_file_wow(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned int len = 0, size = 32;
+ ssize_t retval;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "WOW: %s\n",
+ sc->force_wow ? "ENABLED" : "DISABLED");
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t write_file_wow(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val != 1)
+ return -EINVAL;
+
+ if (!sc->force_wow) {
+ sc->force_wow = true;
+ ath9k_init_wow(sc->hw);
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_wow = {
+ .read = read_file_wow,
+ .write = write_file_wow,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#endif
+
+static ssize_t read_file_tpc(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned int len = 0, size = 32;
+ ssize_t retval;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "%s\n",
+ ah->tpc_enabled ? "ENABLED" : "DISABLED");
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+ bool tpc_enabled;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ tpc_enabled = !!val;
+
+ if (tpc_enabled != ah->tpc_enabled) {
+ ah->tpc_enabled = tpc_enabled;
+ ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_tpc = {
+ .read = read_file_tpc,
+ .write = write_file_tpc,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1260,14 +1315,14 @@ int ath9k_init_debug(struct ath_hw *ah)
ath9k_tx99_init_debug(sc);
ath9k_cmn_spectral_init_debug(&sc->spec_priv, sc->debug.debugfs_phy);
- debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_dma);
- debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_interrupt);
- debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_xmit);
- debugfs_create_file("queues", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_queues);
+ debugfs_create_devm_seqfile(sc->dev, "dma", sc->debug.debugfs_phy,
+ read_file_dma);
+ debugfs_create_devm_seqfile(sc->dev, "interrupt", sc->debug.debugfs_phy,
+ read_file_interrupt);
+ debugfs_create_devm_seqfile(sc->dev, "xmit", sc->debug.debugfs_phy,
+ read_file_xmit);
+ debugfs_create_devm_seqfile(sc->dev, "queues", sc->debug.debugfs_phy,
+ read_file_queues);
debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
&sc->tx.txq_max_pending[IEEE80211_AC_BK]);
debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -1276,10 +1331,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&sc->tx.txq_max_pending[IEEE80211_AC_VI]);
debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
&sc->tx.txq_max_pending[IEEE80211_AC_VO]);
- debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_misc);
- debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_reset);
+ debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
+ read_file_misc);
+ debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
+ read_file_reset);
ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
@@ -1301,8 +1356,9 @@ int ath9k_init_debug(struct ath_hw *ah)
&ah->config.cwm_ignore_extcca);
debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_regdump);
- debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_dump_nfcal);
+ debugfs_create_devm_seqfile(sc->dev, "dump_nfcal",
+ sc->debug.debugfs_phy,
+ read_file_dump_nfcal);
ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
@@ -1320,10 +1376,17 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_btcoex);
#endif
+#ifdef CONFIG_ATH9K_WOW
+ debugfs_create_file("wow", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_wow);
+#endif
+
#ifdef CONFIG_ATH9K_DYNACK
debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_ackto);
#endif
+ debugfs_create_file("tpc", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_tpc);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 07b806c56c56..e5a78d4fd66e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -748,6 +748,20 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rateDupCck], 0));
}
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ int ht40_delta;
+
+ ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
+ ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
+ MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
+ }
+
REGWRITE_BUFFER_FLUSH(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 5ba1385c9838..6ca33dfde1fd 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -886,6 +886,21 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
| ATH9K_POW_SM(ratesArray[rateDupCck], 0));
}
+
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ int ht40_delta;
+
+ ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
+ ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
+ MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
+ }
+
REGWRITE_BUFFER_FLUSH(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 122b846b8ec0..098059039351 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1332,6 +1332,20 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
| ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ int ht40_delta;
+
+ ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
+ ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
+ MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
+ }
+
REGWRITE_BUFFER_FLUSH(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 2fef7a480fec..da344b27326c 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -49,7 +49,7 @@ void ath_init_leds(struct ath_softc *sc)
if (AR_SREV_9100(sc->sc_ah))
return;
- if (!led_blink)
+ if (!ath9k_led_blink)
sc->led_cdev.default_trigger =
ieee80211_get_radio_led_name(sc->hw);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 9dde265d3f84..300d3671d0ef 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -44,6 +44,9 @@
extern struct ieee80211_ops ath9k_htc_ops;
extern int htc_modparam_nohwcrypt;
+#ifdef CONFIG_MAC80211_LEDS
+extern int ath9k_htc_led_blink;
+#endif
enum htc_phymode {
HTC_MODE_11NA = 0,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 8cef1edcc621..dc79afd7e151 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -291,26 +291,15 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
{
struct ath9k_htc_priv *priv = file->private_data;
char buf[512];
- unsigned int len = 0;
+ unsigned int len;
spin_lock_bh(&priv->tx.tx_lock);
-
- len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
-
- len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
- priv->tx.tx_slot, MAX_TX_BUF_NUM);
-
- len += scnprintf(buf + len, sizeof(buf) - len, "\n");
-
- len += scnprintf(buf + len, sizeof(buf) - len,
- "Used slots : %d\n",
- bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
-
+ len = scnprintf(buf, sizeof(buf),
+ "TX slot bitmap : %*pb\n"
+ "Used slots : %d\n",
+ MAX_TX_BUF_NUM, priv->tx.tx_slot,
+ bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
spin_unlock_bh(&priv->tx.tx_lock);
-
- if (len > sizeof(buf))
- len = sizeof(buf);
-
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 50f74a2a4cf8..2aabcbdaba4e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -279,6 +279,10 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
else
priv->ah->led_pin = ATH_LED_PIN_DEF;
+ if (!ath9k_htc_led_blink)
+ priv->led_cdev.default_trigger =
+ ieee80211_get_radio_led_name(priv->hw);
+
ath9k_configure_leds(priv);
snprintf(priv->led_name, sizeof(priv->led_name),
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index e8fa9448da24..fd229409f676 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -39,6 +39,10 @@ module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
#ifdef CONFIG_MAC80211_LEDS
+int ath9k_htc_led_blink = 1;
+module_param_named(blink, ath9k_htc_led_blink, int, 0444);
+MODULE_PARM_DESC(blink, "Enable LED blink on activity");
+
static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
{ .throughput = 0 * 1024, .blink_time = 334 },
{ .throughput = 1 * 1024, .blink_time = 260 },
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index a0ff5b637054..d2408da38c1c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -351,11 +351,7 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
return;
ret:
- /* HTC-generated packets are freed here. */
- if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
- dev_kfree_skb_any(skb);
- else
- kfree_skb(skb);
+ kfree_skb(skb);
}
static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6d4b273469b1..60aa8d71e753 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -246,6 +246,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
case AR9300_DEVID_AR953X:
ah->hw_version.macVersion = AR_SREV_VERSION_9531;
return;
+ case AR9300_DEVID_QCA956X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9561;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -422,6 +424,8 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
+ ah->tpc_enabled = true;
+
ah->ani_function = ATH9K_ANI_ALL;
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
@@ -536,6 +540,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
case AR_SREV_VERSION_9550:
case AR_SREV_VERSION_9565:
case AR_SREV_VERSION_9531:
+ case AR_SREV_VERSION_9561:
break;
default:
ath_err(common,
@@ -636,6 +641,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9485_DEVID_AR1111:
case AR9300_DEVID_AR9565:
case AR9300_DEVID_AR953X:
+ case AR9300_DEVID_QCA956X:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -776,7 +782,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
/* program BB PLL phase_shift */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
- } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
REG_WRITE(ah, AR_RTC_PLL_CONTROL,
@@ -787,7 +794,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(100);
if (ah->is_clk_25mhz) {
- if (AR_SREV_9531(ah)) {
+ if (AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
pll2_divint = 0x1c;
pll2_divfrac = 0xa3d2;
refdiv = 1;
@@ -803,14 +810,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
refdiv = 5;
} else {
pll2_divint = 0x11;
- pll2_divfrac =
- AR_SREV_9531(ah) ? 0x26665 : 0x26666;
+ pll2_divfrac = (AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah)) ?
+ 0x26665 : 0x26666;
refdiv = 1;
}
}
regval = REG_READ(ah, AR_PHY_PLL_MODE);
- if (AR_SREV_9531(ah))
+ if (AR_SREV_9531(ah) || AR_SREV_9561(ah))
regval |= (0x1 << 22);
else
regval |= (0x1 << 16);
@@ -828,14 +836,16 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
(0x1 << 13) |
(0x4 << 26) |
(0x18 << 19);
- else if (AR_SREV_9531(ah))
+ else if (AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
regval = (regval & 0x01c00fff) |
(0x1 << 31) |
(0x2 << 29) |
(0xa << 25) |
- (0x1 << 19) |
- (0x6 << 12);
- else
+ (0x1 << 19);
+
+ if (AR_SREV_9531(ah))
+ regval |= (0x6 << 12);
+ } else
regval = (regval & 0x80071fff) |
(0x3 << 30) |
(0x1 << 13) |
@@ -843,7 +853,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
(0x60 << 19);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
- if (AR_SREV_9531(ah))
+ if (AR_SREV_9531(ah) || AR_SREV_9561(ah))
REG_WRITE(ah, AR_PHY_PLL_MODE,
REG_READ(ah, AR_PHY_PLL_MODE) & 0xffbfffff);
else
@@ -882,7 +892,8 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -1671,7 +1682,8 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
}
#ifdef __BIG_ENDIAN
else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
- AR_SREV_9550(ah) || AR_SREV_9531(ah))
+ AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah))
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -2459,7 +2471,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
- if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah))
+ if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) &&
+ !AR_SREV_9561(ah) && !AR_SREV_9565(ah))
pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
@@ -2476,7 +2489,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah))
pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
- if (AR_SREV_9300_20_OR_LATER(ah))
+ if (AR_SREV_9561(ah))
+ ah->ent_mode = 0x3BDA000;
+ else if (AR_SREV_9300_20_OR_LATER(ah))
ah->ent_mode = REG_READ(ah, AR_ENT_OTP);
if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
@@ -2529,13 +2544,17 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RTT;
}
- if (AR_SREV_9462(ah))
- pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE;
-
if (AR_SREV_9300_20_OR_LATER(ah) &&
ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
+#ifdef CONFIG_ATH9K_WOW
+ if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565_11_OR_LATER(ah))
+ ah->wow.max_patterns = MAX_NUM_PATTERN;
+ else
+ ah->wow.max_patterns = MAX_NUM_PATTERN_LEGACY;
+#endif
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 1cbd33551513..e82e570de330 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -54,6 +54,7 @@
#define AR9485_DEVID_AR1111 0x0037
#define AR9300_DEVID_AR9565 0x0036
#define AR9300_DEVID_AR953X 0x003d
+#define AR9300_DEVID_QCA956X 0x003f
#define AR5416_AR9100_DEVID 0x000b
@@ -198,12 +199,13 @@
#define KAL_NUM_DESC_WORDS 12
#define KAL_ANTENNA_MODE 1
#define KAL_TO_DS 1
-#define KAL_DELAY 4 /*delay of 4ms between 2 KAL frames */
+#define KAL_DELAY 4 /* delay of 4ms between 2 KAL frames */
#define KAL_TIMEOUT 900
#define MAX_PATTERN_SIZE 256
#define MAX_PATTERN_MASK_SIZE 32
-#define MAX_NUM_PATTERN 8
+#define MAX_NUM_PATTERN 16
+#define MAX_NUM_PATTERN_LEGACY 8
#define MAX_NUM_USER_PATTERN 6 /* deducting the disassociate and
deauthenticate packets */
@@ -247,12 +249,10 @@ enum ath9k_hw_caps {
#ifdef CONFIG_ATH9K_PCOEM
ATH9K_HW_CAP_RTT = BIT(14),
ATH9K_HW_CAP_MCI = BIT(15),
- ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(16),
ATH9K_HW_CAP_BT_ANT_DIV = BIT(17),
#else
ATH9K_HW_CAP_RTT = 0,
ATH9K_HW_CAP_MCI = 0,
- ATH9K_HW_WOW_DEVICE_CAPABLE = 0,
ATH9K_HW_CAP_BT_ANT_DIV = 0,
#endif
ATH9K_HW_CAP_DFS = BIT(18),
@@ -271,6 +271,12 @@ enum ath9k_hw_caps {
* of those types.
*/
+struct ath9k_hw_wow {
+ u32 wow_event_mask;
+ u32 wow_event_mask2;
+ u8 max_patterns;
+};
+
struct ath9k_hw_capabilities {
u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
u16 rts_aggr_limit;
@@ -929,7 +935,7 @@ struct ath_hw {
u32 ent_mode;
#ifdef CONFIG_ATH9K_WOW
- u32 wow_event_mask;
+ struct ath9k_hw_wow wow;
#endif
bool is_clk_25mhz;
int (*get_mac_revision)(void);
@@ -1086,6 +1092,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah);
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
struct ath9k_channel *chan);
+void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
+ struct ath9k_channel *chan, int ht40_delta);
/* Hardware family op attach helpers */
int ar5008_hw_attach_phy_ops(struct ath_hw *ah);
@@ -1145,23 +1153,19 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
#ifdef CONFIG_ATH9K_WOW
-const char *ath9k_hw_wow_event_to_string(u32 wow_event);
-void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
- u8 *user_mask, int pattern_count,
- int pattern_len);
+int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len);
u32 ath9k_hw_wow_wakeup(struct ath_hw *ah);
void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable);
#else
-static inline const char *ath9k_hw_wow_event_to_string(u32 wow_event)
-{
- return NULL;
-}
-static inline void ath9k_hw_wow_apply_pattern(struct ath_hw *ah,
- u8 *user_pattern,
- u8 *user_mask,
- int pattern_count,
- int pattern_len)
+static inline int ath9k_hw_wow_apply_pattern(struct ath_hw *ah,
+ u8 *user_pattern,
+ u8 *user_mask,
+ int pattern_count,
+ int pattern_len)
{
+ return 0;
}
static inline u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
{
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d1c39346b264..6c6e88495394 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -45,8 +45,8 @@ int ath9k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
-int led_blink;
-module_param_named(blink, led_blink, int, 0444);
+int ath9k_led_blink;
+module_param_named(blink, ath9k_led_blink, int, 0444);
MODULE_PARM_DESC(blink, "Enable LED blink on activity");
static int ath9k_btcoex_enable;
@@ -996,6 +996,7 @@ void ath9k_deinit_device(struct ath_softc *sc)
ath9k_ps_restore(sc);
ath9k_deinit_debug(sc);
+ ath9k_deinit_wow(hw);
ieee80211_unregister_hw(hw);
ath_rx_cleanup(sc);
ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index b829263e3d0a..90631d768a60 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -516,14 +516,14 @@ int ath_update_survey_stats(struct ath_softc *sc)
ath_hw_cycle_counters_update(common);
if (cc->cycles > 0) {
- survey->filled |= SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_BUSY |
- SURVEY_INFO_CHANNEL_TIME_RX |
- SURVEY_INFO_CHANNEL_TIME_TX;
- survey->channel_time += cc->cycles / div;
- survey->channel_time_busy += cc->rx_busy / div;
- survey->channel_time_rx += cc->rx_frame / div;
- survey->channel_time_tx += cc->tx_frame / div;
+ survey->filled |= SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+ survey->time += cc->cycles / div;
+ survey->time_busy += cc->rx_busy / div;
+ survey->time_rx += cc->rx_frame / div;
+ survey->time_tx += cc->tx_frame / div;
}
if (cc->cycles < div)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 3e58bfa0c1fd..bba85d1a6cd1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -820,7 +820,8 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
return;
}
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
async_mask = AR_INTR_MAC_IRQ;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9a72640237cb..9ede991b8d76 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
__ath_cancel_work(sc);
+ disable_irq(sc->irq);
tasklet_disable(&sc->intr_tq);
tasklet_disable(&sc->bcon_tasklet);
spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
r = -EIO;
out:
+ enable_irq(sc->irq);
spin_unlock_bh(&sc->sc_pcu_lock);
tasklet_enable(&sc->bcon_tasklet);
tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
- if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
- return IRQ_NONE;
-
/* shared irq, not for us */
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
- if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return IRQ_HANDLED;
/*
@@ -556,15 +555,6 @@ irqreturn_t ath_isr(int irq, void *dev)
(status & ATH9K_INT_BB_WATCHDOG))
goto chip_reset;
-#ifdef CONFIG_ATH9K_WOW
- if (status & ATH9K_INT_BMISS) {
- if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
- atomic_inc(&sc->wow_got_bmiss_intr);
- atomic_dec(&sc->wow_sleep_proc_intr);
- }
- }
-#endif
-
if (status & ATH9K_INT_SWBA)
tasklet_schedule(&sc->bcon_tasklet);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f009b5b57e5e..e6fef1be9977 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -427,6 +427,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
+ 0x1842),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
0x6671),
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -446,9 +451,19 @@ static const struct pci_device_id ath_pci_id_table[] = {
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A3),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_AZWAVE,
0x218A),
.driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2F8A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
/* WB335 1-ANT / Antenna Diversity */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -508,6 +523,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213C),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_HP,
0x18E3),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -555,6 +575,16 @@ static const struct pci_device_id ath_pci_id_table[] = {
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4129),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x412A),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_ATHEROS,
0x3027),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -586,10 +616,25 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
+ 0x1832),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
0x0692),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ 0x11AD, /* LITEON */
+ 0x0803),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0813),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_AZWAVE,
0x2130),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -605,6 +650,21 @@ static const struct pci_device_id ath_pci_id_table[] = {
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x218B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x218C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2F82),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
0x144F, /* ASKEY */
0x7202),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -616,10 +676,20 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x1B9A, /* XAVI */
+ 0x2813),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
0x28A2),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A4),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
0x185F, /* WNC */
0x3027),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -636,10 +706,25 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_FOXCONN,
+ 0xE08F),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
0xE081),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE091),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE099),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_LENOVO,
0x3026),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -913,9 +998,12 @@ static int ath_pci_suspend(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- if (sc->wow_enabled)
+ if (test_bit(ATH_OP_WOW_ENABLED, &common->op_flags)) {
+ dev_info(&pdev->dev, "WOW is enabled, bypassing PCI suspend\n");
return 0;
+ }
/* The device has to be moved to FULLSLEEP forcibly.
* Otherwise the chip never moved to full sleep,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 7395afbc5124..6fb40ef86fd6 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -425,7 +425,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
}
- if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah))
+ if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) ||
+ AR_SREV_9561(sc->sc_ah))
rfilt |= ATH9K_RX_FILTER_4ADDRESS;
if (ath9k_is_chanctx_enabled() &&
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index fb11a9172f38..9587ec655680 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -814,6 +814,7 @@
#define AR_SREV_REVISION_9531_10 0
#define AR_SREV_REVISION_9531_11 1
#define AR_SREV_REVISION_9531_20 2
+#define AR_SREV_VERSION_9561 0x600
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -899,10 +900,13 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
#define AR_SREV_9565(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
+#define AR_SREV_9003_PCOEM(_ah) \
+ (AR_SREV_9462(_ah) || AR_SREV_9485(_ah) || AR_SREV_9565(_ah))
#else
#define AR_SREV_9462(_ah) 0
#define AR_SREV_9485(_ah) 0
#define AR_SREV_9565(_ah) 0
+#define AR_SREV_9003_PCOEM(_ah) 0
#endif
#define AR_SREV_9485_11_OR_LATER(_ah) \
@@ -974,6 +978,9 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_20))
+#define AR_SREV_9561(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
+
/* NOTE: When adding chips newer than Peacock, add chip check here */
#define AR_SREV_9580_10_OR_LATER(_ah) \
(AR_SREV_9580(_ah))
@@ -1876,6 +1883,7 @@ enum {
#define AR_FIRST_NDP_TIMER 7
#define AR_NDP2_PERIOD 0x81a0
#define AR_NDP2_TIMER_MODE 0x81c0
+#define AR_GEN_TIMERS2_MODE_ENABLE_MASK 0x000000FF
#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2))
#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0)
@@ -1971,6 +1979,7 @@ enum {
#define AR_DIRECT_CONNECT 0x83a0
#define AR_DC_AP_STA_EN 0x00000001
+#define AR_DC_TSF2_ENABLE 0x00000001
#define AR_AES_MUTE_MASK0 0x805c
#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
@@ -2003,126 +2012,6 @@ enum {
#define AR_WOW_BEACON_TIMO_MAX 0xffffffff
-/*
- * MAC WoW Registers
- */
-
-#define AR_WOW_PATTERN 0x825C
-#define AR_WOW_COUNT 0x8260
-#define AR_WOW_BCN_EN 0x8270
-#define AR_WOW_BCN_TIMO 0x8274
-#define AR_WOW_KEEP_ALIVE_TIMO 0x8278
-#define AR_WOW_KEEP_ALIVE 0x827c
-#define AR_WOW_US_SCALAR 0x8284
-#define AR_WOW_KEEP_ALIVE_DELAY 0x8288
-#define AR_WOW_PATTERN_MATCH 0x828c
-#define AR_WOW_PATTERN_OFF1 0x8290 /* pattern bytes 0 -> 3 */
-#define AR_WOW_PATTERN_OFF2 0x8294 /* pattern bytes 4 -> 7 */
-
-/* for AR9285 or later version of chips */
-#define AR_WOW_EXACT 0x829c
-#define AR_WOW_LENGTH1 0x8360
-#define AR_WOW_LENGTH2 0X8364
-/* register to enable match for less than 256 bytes packets */
-#define AR_WOW_PATTERN_MATCH_LT_256B 0x8368
-
-#define AR_SW_WOW_CONTROL 0x20018
-#define AR_SW_WOW_ENABLE 0x1
-#define AR_SWITCH_TO_REFCLK 0x2
-#define AR_RESET_CONTROL 0x4
-#define AR_RESET_VALUE_MASK 0x8
-#define AR_HW_WOW_DISABLE 0x10
-#define AR_CLR_MAC_INTERRUPT 0x20
-#define AR_CLR_KA_INTERRUPT 0x40
-
-/* AR_WOW_PATTERN register values */
-#define AR_WOW_BACK_OFF_SHIFT(x) ((x & 0xf) << 28) /* in usecs */
-#define AR_WOW_MAC_INTR_EN 0x00040000
-#define AR_WOW_MAGIC_EN 0x00010000
-#define AR_WOW_PATTERN_EN(x) (x & 0xff)
-#define AR_WOW_PAT_FOUND_SHIFT 8
-#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
-#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
-#define AR_WOW_MAGIC_PAT_FOUND 0x00020000
-#define AR_WOW_MAC_INTR 0x00080000
-#define AR_WOW_KEEP_ALIVE_FAIL 0x00100000
-#define AR_WOW_BEACON_FAIL 0x00200000
-
-#define AR_WOW_STATUS(x) (x & (AR_WOW_PATTERN_FOUND_MASK | \
- AR_WOW_MAGIC_PAT_FOUND | \
- AR_WOW_KEEP_ALIVE_FAIL | \
- AR_WOW_BEACON_FAIL))
-#define AR_WOW_CLEAR_EVENTS(x) (x & ~(AR_WOW_PATTERN_EN(0xff) | \
- AR_WOW_MAGIC_EN | \
- AR_WOW_MAC_INTR_EN | \
- AR_WOW_BEACON_FAIL | \
- AR_WOW_KEEP_ALIVE_FAIL))
-
-/* AR_WOW_COUNT register values */
-#define AR_WOW_AIFS_CNT(x) (x & 0xff)
-#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
-#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)
-
-/* AR_WOW_BCN_EN register */
-#define AR_WOW_BEACON_FAIL_EN 0x00000001
-
-/* AR_WOW_BCN_TIMO rgister */
-#define AR_WOW_BEACON_TIMO 0x40000000 /* valid if BCN_EN is set */
-
-/* AR_WOW_KEEP_ALIVE_TIMO register */
-#define AR_WOW_KEEP_ALIVE_TIMO_VALUE
-#define AR_WOW_KEEP_ALIVE_NEVER 0xffffffff
-
-/* AR_WOW_KEEP_ALIVE register */
-#define AR_WOW_KEEP_ALIVE_AUTO_DIS 0x00000001
-#define AR_WOW_KEEP_ALIVE_FAIL_DIS 0x00000002
-
-/* AR_WOW_KEEP_ALIVE_DELAY register */
-#define AR_WOW_KEEP_ALIVE_DELAY_VALUE 0x000003e8 /* 1 msec */
-
-
-/*
- * keep it long for beacon workaround - ensure no false alarm
- */
-#define AR_WOW_BMISSTHRESHOLD 0x20
-
-/* AR_WOW_PATTERN_MATCH register */
-#define AR_WOW_PAT_END_OF_PKT(x) (x & 0xf)
-#define AR_WOW_PAT_OFF_MATCH(x) ((x & 0xf) << 8)
-
-/*
- * default values for Wow Configuration for backoff, aifs, slot, keep-alive
- * to be programmed into various registers.
- */
-#define AR_WOW_PAT_BACKOFF 0x00000004 /* AR_WOW_PATTERN_REG */
-#define AR_WOW_CNT_AIFS_CNT 0x00000022 /* AR_WOW_COUNT_REG */
-#define AR_WOW_CNT_SLOT_CNT 0x00000009 /* AR_WOW_COUNT_REG */
-/*
- * Keepalive count applicable for AR9280 2.0 and above.
- */
-#define AR_WOW_CNT_KA_CNT 0x00000008 /* AR_WOW_COUNT register */
-
-/* WoW - Transmit buffer for keep alive frames */
-#define AR_WOW_TRANSMIT_BUFFER 0xe000 /* E000 - EFFC */
-
-#define AR_WOW_TXBUF(i) (AR_WOW_TRANSMIT_BUFFER + ((i) << 2))
-
-#define AR_WOW_KA_DESC_WORD2 0xe000
-
-#define AR_WOW_KA_DATA_WORD0 0xe030
-
-/* WoW Transmit Buffer for patterns */
-#define AR_WOW_TB_PATTERN(i) (0xe100 + (i << 8))
-#define AR_WOW_TB_MASK(i) (0xec00 + (i << 5))
-
-/* Currently Pattern 0-7 are supported - so bit 0-7 are set */
-#define AR_WOW_PATTERN_SUPPORTED 0xff
-#define AR_WOW_LENGTH_MAX 0xff
-#define AR_WOW_LEN1_SHIFT(_i) ((0x3 - ((_i) & 0x3)) << 0x3)
-#define AR_WOW_LENGTH1_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN1_SHIFT(_i))
-#define AR_WOW_LEN2_SHIFT(_i) ((0x7 - ((_i) & 0x7)) << 0x3)
-#define AR_WOW_LENGTH2_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN2_SHIFT(_i))
-
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
diff --git a/drivers/net/wireless/ath/ath9k/reg_wow.h b/drivers/net/wireless/ath/ath9k/reg_wow.h
new file mode 100644
index 000000000000..3abfca56ca58
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg_wow.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_WOW_H
+#define REG_WOW_H
+
+#define AR_WOW_PATTERN 0x825C
+#define AR_WOW_COUNT 0x8260
+#define AR_WOW_BCN_EN 0x8270
+#define AR_WOW_BCN_TIMO 0x8274
+#define AR_WOW_KEEP_ALIVE_TIMO 0x8278
+#define AR_WOW_KEEP_ALIVE 0x827c
+#define AR_WOW_KEEP_ALIVE_DELAY 0x8288
+#define AR_WOW_PATTERN_MATCH 0x828c
+
+/*
+ * AR_WOW_LENGTH1
+ * bit 31:24 pattern 0 length
+ * bit 23:16 pattern 1 length
+ * bit 15:8 pattern 2 length
+ * bit 7:0 pattern 3 length
+ *
+ * AR_WOW_LENGTH2
+ * bit 31:24 pattern 4 length
+ * bit 23:16 pattern 5 length
+ * bit 15:8 pattern 6 length
+ * bit 7:0 pattern 7 length
+ *
+ * AR_WOW_LENGTH3
+ * bit 31:24 pattern 8 length
+ * bit 23:16 pattern 9 length
+ * bit 15:8 pattern 10 length
+ * bit 7:0 pattern 11 length
+ *
+ * AR_WOW_LENGTH4
+ * bit 31:24 pattern 12 length
+ * bit 23:16 pattern 13 length
+ * bit 15:8 pattern 14 length
+ * bit 7:0 pattern 15 length
+ */
+#define AR_WOW_LENGTH1 0x8360
+#define AR_WOW_LENGTH2 0X8364
+#define AR_WOW_LENGTH3 0X8380
+#define AR_WOW_LENGTH4 0X8384
+
+#define AR_WOW_PATTERN_MATCH_LT_256B 0x8368
+#define AR_MAC_PCU_WOW4 0x8370
+
+#define AR_SW_WOW_CONTROL 0x20018
+#define AR_SW_WOW_ENABLE 0x1
+#define AR_SWITCH_TO_REFCLK 0x2
+#define AR_RESET_CONTROL 0x4
+#define AR_RESET_VALUE_MASK 0x8
+#define AR_HW_WOW_DISABLE 0x10
+#define AR_CLR_MAC_INTERRUPT 0x20
+#define AR_CLR_KA_INTERRUPT 0x40
+
+#define AR_WOW_BACK_OFF_SHIFT(x) ((x & 0xf) << 27) /* in usecs */
+#define AR_WOW_MAC_INTR_EN 0x00040000
+#define AR_WOW_MAGIC_EN 0x00010000
+#define AR_WOW_PATTERN_EN(x) (x & 0xff)
+#define AR_WOW_PAT_FOUND_SHIFT 8
+#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
+#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
+#define AR_WOW_MAGIC_PAT_FOUND 0x00020000
+#define AR_WOW_MAC_INTR 0x00080000
+#define AR_WOW_KEEP_ALIVE_FAIL 0x00100000
+#define AR_WOW_BEACON_FAIL 0x00200000
+
+#define AR_WOW_STATUS(x) (x & (AR_WOW_PATTERN_FOUND_MASK | \
+ AR_WOW_MAGIC_PAT_FOUND | \
+ AR_WOW_KEEP_ALIVE_FAIL | \
+ AR_WOW_BEACON_FAIL))
+#define AR_WOW_CLEAR_EVENTS(x) (x & ~(AR_WOW_PATTERN_EN(0xff) | \
+ AR_WOW_MAGIC_EN | \
+ AR_WOW_MAC_INTR_EN | \
+ AR_WOW_BEACON_FAIL | \
+ AR_WOW_KEEP_ALIVE_FAIL))
+
+#define AR_WOW_AIFS_CNT(x) (x & 0xff)
+#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
+#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)
+
+#define AR_WOW_BEACON_FAIL_EN 0x00000001
+#define AR_WOW_BEACON_TIMO 0x40000000
+#define AR_WOW_KEEP_ALIVE_NEVER 0xffffffff
+#define AR_WOW_KEEP_ALIVE_AUTO_DIS 0x00000001
+#define AR_WOW_KEEP_ALIVE_FAIL_DIS 0x00000002
+#define AR_WOW_KEEP_ALIVE_DELAY_VALUE 0x000003e8 /* 1 msec */
+#define AR_WOW_BMISSTHRESHOLD 0x20
+#define AR_WOW_PAT_END_OF_PKT(x) (x & 0xf)
+#define AR_WOW_PAT_OFF_MATCH(x) ((x & 0xf) << 8)
+#define AR_WOW_PAT_BACKOFF 0x00000004
+#define AR_WOW_CNT_AIFS_CNT 0x00000022
+#define AR_WOW_CNT_SLOT_CNT 0x00000009
+#define AR_WOW_CNT_KA_CNT 0x00000008
+
+#define AR_WOW_TRANSMIT_BUFFER 0xe000
+#define AR_WOW_TXBUF(i) (AR_WOW_TRANSMIT_BUFFER + ((i) << 2))
+#define AR_WOW_KA_DESC_WORD2 0xe000
+#define AR_WOW_TB_PATTERN(i) (0xe100 + (i << 8))
+#define AR_WOW_TB_MASK(i) (0xec00 + (i << 5))
+#define AR_WOW_PATTERN_SUPPORTED_LEGACY 0xff
+#define AR_WOW_PATTERN_SUPPORTED 0xffff
+#define AR_WOW_LENGTH_MAX 0xff
+#define AR_WOW_LEN1_SHIFT(_i) ((0x3 - ((_i) & 0x3)) << 0x3)
+#define AR_WOW_LENGTH1_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN1_SHIFT(_i))
+#define AR_WOW_LEN2_SHIFT(_i) ((0x7 - ((_i) & 0x7)) << 0x3)
+#define AR_WOW_LENGTH2_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN2_SHIFT(_i))
+#define AR_WOW_LEN3_SHIFT(_i) ((0xb - ((_i) & 0xb)) << 0x3)
+#define AR_WOW_LENGTH3_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN3_SHIFT(_i))
+#define AR_WOW_LEN4_SHIFT(_i) ((0xf - ((_i) & 0xf)) << 0x3)
+#define AR_WOW_LENGTH4_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN4_SHIFT(_i))
+
+#endif /* REG_WOW_H */
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 5f30e580d942..8d0b1730a9d5 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -16,36 +16,43 @@
#include "ath9k.h"
-static const struct wiphy_wowlan_support ath9k_wowlan_support = {
+static const struct wiphy_wowlan_support ath9k_wowlan_support_legacy = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
.n_patterns = MAX_NUM_USER_PATTERN,
.pattern_min_len = 1,
.pattern_max_len = MAX_PATTERN_SIZE,
};
-static void ath9k_wow_map_triggers(struct ath_softc *sc,
- struct cfg80211_wowlan *wowlan,
- u32 *wow_triggers)
+static const struct wiphy_wowlan_support ath9k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = MAX_NUM_PATTERN - 2,
+ .pattern_min_len = 1,
+ .pattern_max_len = MAX_PATTERN_SIZE,
+};
+
+static u8 ath9k_wow_map_triggers(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan)
{
+ u8 wow_triggers = 0;
+
if (wowlan->disconnect)
- *wow_triggers |= AH_WOW_LINK_CHANGE |
- AH_WOW_BEACON_MISS;
+ wow_triggers |= AH_WOW_LINK_CHANGE |
+ AH_WOW_BEACON_MISS;
if (wowlan->magic_pkt)
- *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
+ wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
if (wowlan->n_patterns)
- *wow_triggers |= AH_WOW_USER_PATTERN_EN;
-
- sc->wow_enabled = *wow_triggers;
+ wow_triggers |= AH_WOW_USER_PATTERN_EN;
+ return wow_triggers;
}
-static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
+static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
int pattern_count = 0;
- int i, byte_cnt;
+ int ret, i, byte_cnt = 0;
u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
u8 dis_deauth_mask[MAX_PATTERN_SIZE];
@@ -80,12 +87,7 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
* | x:x:x:x:x:x -- 22 bytes
*/
- /* Create Disassociate Pattern first */
-
- byte_cnt = 0;
-
/* Fill out the mask with all FF's */
-
for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
dis_deauth_mask[i] = 0xff;
@@ -108,19 +110,17 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
byte_cnt += 6;
/* copy the bssid, its same as the source mac address */
-
memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
/* Create Disassociate pattern mask */
-
dis_deauth_mask[0] = 0xfe;
dis_deauth_mask[1] = 0x03;
dis_deauth_mask[2] = 0xc0;
- ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
-
- ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
- pattern_count, byte_cnt);
+ ret = ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+ if (ret)
+ goto exit;
pattern_count++;
/*
@@ -129,59 +129,39 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
*/
dis_deauth_pattern[0] = 0xC0;
- ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
- pattern_count, byte_cnt);
-
+ ret = ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+exit:
+ return ret;
}
-static void ath9k_wow_add_pattern(struct ath_softc *sc,
- struct cfg80211_wowlan *wowlan)
+static int ath9k_wow_add_pattern(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath9k_wow_pattern *wow_pattern = NULL;
struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
- int mask_len;
+ u8 wow_pattern[MAX_PATTERN_SIZE];
+ u8 wow_mask[MAX_PATTERN_SIZE];
+ int mask_len, ret = 0;
s8 i = 0;
- if (!wowlan->n_patterns)
- return;
-
- /*
- * Add the new user configured patterns
- */
for (i = 0; i < wowlan->n_patterns; i++) {
-
- wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
-
- if (!wow_pattern)
- return;
-
- /*
- * TODO: convert the generic user space pattern to
- * appropriate chip specific/802.11 pattern.
- */
-
- mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
- memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
- memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
- memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
- patterns[i].pattern_len);
- memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
- wow_pattern->pattern_len = patterns[i].pattern_len;
-
- /*
- * just need to take care of deauth and disssoc pattern,
- * make sure we don't overwrite them.
- */
-
- ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
- wow_pattern->mask_bytes,
- i + 2,
- wow_pattern->pattern_len);
- kfree(wow_pattern);
-
+ mask_len = DIV_ROUND_UP(patterns[i].pattern_len, 8);
+ memset(wow_pattern, 0, MAX_PATTERN_SIZE);
+ memset(wow_mask, 0, MAX_PATTERN_SIZE);
+ memcpy(wow_pattern, patterns[i].pattern, patterns[i].pattern_len);
+ memcpy(wow_mask, patterns[i].mask, mask_len);
+
+ ret = ath9k_hw_wow_apply_pattern(ah,
+ wow_pattern,
+ wow_mask,
+ i + 2,
+ patterns[i].pattern_len);
+ if (ret)
+ break;
}
+ return ret;
}
int ath9k_suspend(struct ieee80211_hw *hw,
@@ -190,41 +170,39 @@ int ath9k_suspend(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u32 wow_triggers_enabled = 0;
+ u8 triggers;
int ret = 0;
ath9k_deinit_channel_context(sc);
mutex_lock(&sc->mutex);
- ath_cancel_work(sc);
- ath_stop_ani(sc);
-
if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
- ath_dbg(common, ANY, "Device not present\n");
- ret = -EINVAL;
+ ath_err(common, "Device not present\n");
+ ret = -ENODEV;
goto fail_wow;
}
if (WARN_ON(!wowlan)) {
- ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
+ ath_err(common, "None of the WoW triggers enabled\n");
ret = -EINVAL;
goto fail_wow;
}
- if (!device_can_wakeup(sc->dev)) {
- ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
+ if (sc->cur_chan->nvifs > 1) {
+ ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
ret = 1;
goto fail_wow;
}
- /*
- * none of the sta vifs are associated
- * and we are not currently handling multivif
- * cases, for instance we have to seperately
- * configure 'keep alive frame' for each
- * STA.
- */
+ if (ath9k_is_chanctx_enabled()) {
+ if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) {
+ ath_dbg(common, WOW,
+ "Multi-channel WOW is not supported\n");
+ ret = 1;
+ goto fail_wow;
+ }
+ }
if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
ath_dbg(common, WOW, "None of the STA vifs are associated\n");
@@ -232,16 +210,15 @@ int ath9k_suspend(struct ieee80211_hw *hw,
goto fail_wow;
}
- if (sc->cur_chan->nvifs > 1) {
- ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
+ triggers = ath9k_wow_map_triggers(sc, wowlan);
+ if (!triggers) {
+ ath_dbg(common, WOW, "No valid WoW triggers\n");
ret = 1;
goto fail_wow;
}
- ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
-
- ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
- wow_triggers_enabled);
+ ath_cancel_work(sc);
+ ath_stop_ani(sc);
ath9k_ps_wakeup(sc);
@@ -251,10 +228,21 @@ int ath9k_suspend(struct ieee80211_hw *hw,
* Enable wake up on recieving disassoc/deauth
* frame by default.
*/
- ath9k_wow_add_disassoc_deauth_pattern(sc);
+ ret = ath9k_wow_add_disassoc_deauth_pattern(sc);
+ if (ret) {
+ ath_err(common,
+ "Unable to add disassoc/deauth pattern: %d\n", ret);
+ goto fail_wow;
+ }
- if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
- ath9k_wow_add_pattern(sc, wowlan);
+ if (triggers & AH_WOW_USER_PATTERN_EN) {
+ ret = ath9k_wow_add_pattern(sc, wowlan);
+ if (ret) {
+ ath_err(common,
+ "Unable to add user pattern: %d\n", ret);
+ goto fail_wow;
+ }
+ }
spin_lock_bh(&sc->sc_pcu_lock);
/*
@@ -278,12 +266,12 @@ int ath9k_suspend(struct ieee80211_hw *hw,
synchronize_irq(sc->irq);
tasklet_kill(&sc->intr_tq);
- ath9k_hw_wow_enable(ah, wow_triggers_enabled);
+ ath9k_hw_wow_enable(ah, triggers);
ath9k_ps_restore(sc);
- ath_dbg(common, ANY, "WoW enabled in ath9k\n");
- atomic_inc(&sc->wow_sleep_proc_intr);
+ ath_dbg(common, WOW, "Suspend with WoW triggers: 0x%x\n", triggers);
+ set_bit(ATH_OP_WOW_ENABLED, &common->op_flags);
fail_wow:
mutex_unlock(&sc->mutex);
return ret;
@@ -294,7 +282,7 @@ int ath9k_resume(struct ieee80211_hw *hw)
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u32 wow_status;
+ u8 status;
mutex_lock(&sc->mutex);
@@ -309,29 +297,14 @@ int ath9k_resume(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
- wow_status = ath9k_hw_wow_wakeup(ah);
-
- if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
- /*
- * some devices may not pick beacon miss
- * as the reason they woke up so we add
- * that here for that shortcoming.
- */
- wow_status |= AH_WOW_BEACON_MISS;
- atomic_dec(&sc->wow_got_bmiss_intr);
- ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
- }
-
- atomic_dec(&sc->wow_sleep_proc_intr);
-
- if (wow_status) {
- ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
- ath9k_hw_wow_event_to_string(wow_status), wow_status);
- }
+ status = ath9k_hw_wow_wakeup(ah);
+ ath_dbg(common, WOW, "Resume with WoW status: 0x%x\n", status);
ath_restart_work(sc);
ath9k_start_btcoex(sc);
+ clear_bit(ATH_OP_WOW_ENABLED, &common->op_flags);
+
ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
@@ -341,22 +314,35 @@ int ath9k_resume(struct ieee80211_hw *hw)
void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
- device_init_wakeup(sc->dev, 1);
device_set_wakeup_enable(sc->dev, enabled);
mutex_unlock(&sc->mutex);
+
+ ath_dbg(common, WOW, "WoW wakeup source is %s\n",
+ (enabled) ? "enabled" : "disabled");
}
void ath9k_init_wow(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if ((sc->driver_data & ATH9K_PCI_WOW) || sc->force_wow) {
+ if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565_11_OR_LATER(ah))
+ hw->wiphy->wowlan = &ath9k_wowlan_support;
+ else
+ hw->wiphy->wowlan = &ath9k_wowlan_support_legacy;
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
- (sc->driver_data & ATH9K_PCI_WOW) &&
- device_can_wakeup(sc->dev))
- hw->wiphy->wowlan = &ath9k_wowlan_support;
+ device_init_wakeup(sc->dev, 1);
+ }
+}
+
+void ath9k_deinit_wow(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
- atomic_set(&sc->wow_sleep_proc_intr, -1);
- atomic_set(&sc->wow_got_bmiss_intr, -1);
+ if ((sc->driver_data & ATH9K_PCI_WOW) || sc->force_wow)
+ device_init_wakeup(sc->dev, 0);
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index e9bd02c2e844..1b8e75c4d2c2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1097,24 +1097,65 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
}
static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
- u8 rateidx)
+ u8 rateidx, bool is_40, bool is_cck)
{
u8 max_power;
+ struct sk_buff *skb;
+ struct ath_frame_info *fi;
+ struct ieee80211_tx_info *info;
struct ath_hw *ah = sc->sc_ah;
- if (sc->tx99_state)
+ if (sc->tx99_state || !ah->tpc_enabled)
return MAX_RATE_POWER;
+ skb = bf->bf_mpdu;
+ fi = get_frame_info(skb);
+ info = IEEE80211_SKB_CB(skb);
+
if (!AR_SREV_9300_20_OR_LATER(ah)) {
- /* ar9002 is not sipported for the moment */
- return MAX_RATE_POWER;
- }
+ int txpower = fi->tx_power;
- if (!bf->bf_state.bfs_paprd) {
- struct sk_buff *skb = bf->bf_mpdu;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ath_frame_info *fi = get_frame_info(skb);
+ if (is_40) {
+ u8 power_ht40delta;
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
+ bool is_2ghz;
+ struct modal_eep_header *pmodal;
+ is_2ghz = info->band == IEEE80211_BAND_2GHZ;
+ pmodal = &eep->modalHeader[is_2ghz];
+ power_ht40delta = pmodal->ht40PowerIncForPdadc;
+ } else {
+ power_ht40delta = 2;
+ }
+ txpower += power_ht40delta;
+ }
+
+ if (AR_SREV_9287(ah) || AR_SREV_9285(ah) ||
+ AR_SREV_9271(ah)) {
+ txpower -= 2 * AR9287_PWR_TABLE_OFFSET_DB;
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ s8 power_offset;
+
+ power_offset = ah->eep_ops->get_eeprom(ah,
+ EEP_PWR_TABLE_OFFSET);
+ txpower -= 2 * power_offset;
+ }
+
+ if (OLC_FOR_AR9280_20_LATER && is_cck)
+ txpower -= 2;
+
+ txpower = max(txpower, 0);
+ max_power = min_t(u8, ah->tx_power[rateidx], txpower);
+
+ /* XXX: clamp minimum TX power at 1 for AR9160 since if
+ * max_power is set to 0, frames are transmitted at max
+ * TX power
+ */
+ if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
+ max_power = 1;
+ } else if (!bf->bf_state.bfs_paprd) {
if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
max_power = min(ah->tx_power_stbc[rateidx],
fi->tx_power);
@@ -1152,7 +1193,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
info->rtscts_rate = fi->rtscts_rate;
for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
- bool is_40, is_sgi, is_sp;
+ bool is_40, is_sgi, is_sp, is_cck;
int phy;
if (!rates[i].count || (rates[i].idx < 0))
@@ -1198,7 +1239,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
- info->txpower[i] = ath_get_rate_txpower(sc, bf, rix);
+ info->txpower[i] = ath_get_rate_txpower(sc, bf, rix,
+ is_40, false);
continue;
}
@@ -1227,7 +1269,9 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
phy, rate->bitrate * 100, len, rix, is_sp);
- info->txpower[i] = ath_get_rate_txpower(sc, bf, rix);
+ is_cck = IS_CCK_RATE(info->rates[i].Rate);
+ info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, false,
+ is_cck);
}
/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
@@ -2259,7 +2303,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
- bool queue, skip_uapsd = false;
+ bool queue, skip_uapsd = false, ps_resp;
int q, ret;
if (vif)
@@ -2268,6 +2312,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
txctl->force_channel = true;
+ ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
+
ret = ath_tx_prepare(hw, skb, txctl);
if (ret)
return ret;
@@ -2310,7 +2356,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
if (txctl->an && queue)
tid = ath_get_skb_tid(sc, txctl->an, skb);
- if (!skip_uapsd && (info->flags & IEEE80211_TX_CTL_PS_RESPONSE)) {
+ if (!skip_uapsd && ps_resp) {
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
@@ -2443,9 +2489,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
if (sc->sc_ah->caldata)
set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
- if (!(tx_flags & ATH_TX_ERROR))
- /* Frame was ACKed */
- tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ if (!(tx_flags & ATH_TX_ERROR)) {
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ }
padpos = ieee80211_hdrlen(hdr->frame_control);
padsize = padpos & 3;
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index 39a63874b275..f2b4f537e4c1 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -188,12 +188,12 @@ int carl9170_collect_tally(struct ar9170 *ar)
if (ar->channel) {
info = &ar->survey[ar->channel->hw_value];
- info->channel_time = ar->tally.active;
- info->channel_time_busy = ar->tally.cca;
- info->channel_time_tx = ar->tally.tx_time;
- do_div(info->channel_time, 1000);
- do_div(info->channel_time_busy, 1000);
- do_div(info->channel_time_tx, 1000);
+ info->time = ar->tally.active;
+ info->time_busy = ar->tally.cca;
+ info->time_tx = ar->tally.tx_time;
+ do_div(info->time, 1000);
+ do_div(info->time_busy, 1000);
+ do_div(info->time_tx, 1000);
}
}
return 0;
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index 1c0af9cd9a85..6808db433283 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -214,14 +214,10 @@ DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize)
static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *len)
{
- ADD(buf, *len, bufsize, "jar: [");
-
spin_lock_bh(&ar->mem_lock);
- *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
- ar->mem_bitmap, ar->fw.mem_blocks);
-
- ADD(buf, *len, bufsize, "]\n");
+ ADD(buf, *len, bufsize, "jar: [%*pb]\n",
+ ar->fw.mem_blocks, ar->mem_bitmap);
ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n",
bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks),
@@ -316,17 +312,13 @@ static char *carl9170_debugfs_ampdu_state_read(struct ar9170 *ar, char *buf,
cnt, iter->tid, iter->bsn, iter->snx, iter->hsn,
iter->max, iter->state, iter->counter);
- ADD(buf, *len, bufsize, "\tWindow: [");
-
- *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
- iter->bitmap, CARL9170_BAW_BITS);
+ ADD(buf, *len, bufsize, "\tWindow: [%*pb,W]\n",
+ CARL9170_BAW_BITS, iter->bitmap);
#define BM_STR_OFF(offset) \
((CARL9170_BAW_BITS - (offset) - 1) / 4 + \
(CARL9170_BAW_BITS - (offset) - 1) / 32 + 1)
- ADD(buf, *len, bufsize, ",W]\n");
-
offset = BM_STR_OFF(0);
ADD(buf, *len, bufsize, "\tBase Seq: %*s\n", offset, "T");
@@ -448,12 +440,8 @@ static char *carl9170_debugfs_vif_dump_read(struct ar9170 *ar, char *buf,
ADD(buf, *len, bufsize, "registered VIFs:%d \\ %d\n",
ar->vifs, ar->fw.vif_num);
- ADD(buf, *len, bufsize, "VIF bitmap: [");
-
- *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
- &ar->vif_bitmap, ar->fw.vif_num);
-
- ADD(buf, *len, bufsize, "]\n");
+ ADD(buf, *len, bufsize, "VIF bitmap: [%*pb]\n",
+ ar->fw.vif_num, &ar->vif_bitmap);
rcu_read_lock();
list_for_each_entry_rcu(iter, &ar->vif_list, list) {
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index ef5b6dc7b7f1..f1455a04cb62 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1690,9 +1690,9 @@ found:
survey->filled |= SURVEY_INFO_IN_USE;
if (ar->fw.hw_counters) {
- survey->filled |= SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_BUSY |
- SURVEY_INFO_CHANNEL_TIME_TX;
+ survey->filled |= SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_TX;
}
return 0;
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index cfd0554cf140..3d57f8772389 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -86,7 +86,7 @@ static const struct radar_detector_specs fcc_radar_ref_types[] = {
FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
- FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 20),
+ FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
};
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 73f12f196f14..086549b732b9 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -84,6 +84,7 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
if (!cur_ctl)
goto out_fail;
+ spin_lock_init(&cur_ctl->skb_lock);
cur_ctl->ctl_blk_order = i;
if (i == 0) {
ch->head_blk_ctl = cur_ctl;
@@ -354,6 +355,8 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
* and while-do will not make any cycles.
*/
do {
+ if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
+ break;
if (ctl->skb) {
dma_unmap_single(NULL, ctl->desc->src_addr_l,
ctl->skb->len, DMA_TO_DEVICE);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 7dd8873f757e..0783d2ed8238 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -298,6 +298,8 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_debugfs_init(wcn);
INIT_LIST_HEAD(&wcn->vif_list);
+ spin_lock_init(&wcn->dxe_lock);
+
return 0;
out_smd_stop:
@@ -795,6 +797,7 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
vif, sta->addr);
+ spin_lock_init(&sta_priv->ampdu_lock);
vif_priv->sta = sta_priv;
sta_priv->vif = vif_priv;
/*
@@ -873,21 +876,32 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
get_sta_index(vif, sta_priv));
wcn36xx_smd_add_ba(wcn);
wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
- ieee80211_start_tx_ba_session(sta, tid, 0);
break;
case IEEE80211_AMPDU_RX_STOP:
wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
break;
case IEEE80211_AMPDU_TX_START:
+ spin_lock_bh(&sta_priv->ampdu_lock);
+ sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
+ spin_unlock_bh(&sta_priv->ampdu_lock);
+
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ spin_lock_bh(&sta_priv->ampdu_lock);
+ sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_OPERATIONAL;
+ spin_unlock_bh(&sta_priv->ampdu_lock);
+
wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
get_sta_index(vif, sta_priv));
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
case IEEE80211_AMPDU_TX_STOP_CONT:
+ spin_lock_bh(&sta_priv->ampdu_lock);
+ sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_NONE;
+ spin_unlock_bh(&sta_priv->ampdu_lock);
+
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
default:
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 63986931829e..69ed39731902 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -21,6 +21,61 @@
#include <linux/bitops.h>
#include "smd.h"
+struct wcn36xx_cfg_val {
+ u32 cfg_id;
+ u32 value;
+};
+
+#define WCN36XX_CFG_VAL(id, val) \
+{ \
+ .cfg_id = WCN36XX_HAL_CFG_ ## id, \
+ .value = val \
+}
+
+static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
+ WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1),
+ WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1),
+ WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0),
+ WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785),
+ WCN36XX_CFG_VAL(CAL_PERIOD, 5),
+ WCN36XX_CFG_VAL(CAL_CONTROL, 1),
+ WCN36XX_CFG_VAL(PROXIMITY, 0),
+ WCN36XX_CFG_VAL(NETWORK_DENSITY, 3),
+ WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 6000),
+ WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
+ WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
+ WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 6),
+ WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 6),
+ WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15),
+ WCN36XX_CFG_VAL(FIXED_RATE, 0),
+ WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4),
+ WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0),
+ WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0),
+ WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5),
+ WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1),
+ WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5),
+ WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5),
+ WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40),
+ WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200),
+ WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1),
+ WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1),
+ WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20),
+ WCN36XX_CFG_VAL(STATS_PERIOD, 10),
+ WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000),
+ WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0),
+ WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128),
+ WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560),
+ WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0),
+ WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
+ WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
+ WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+ WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
+ WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
+};
+
static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
{
struct wcn36xx_hal_cfg *entry;
@@ -357,8 +412,10 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
int wcn36xx_smd_start(struct wcn36xx *wcn)
{
- struct wcn36xx_hal_mac_start_req_msg msg_body;
+ struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
int ret = 0;
+ int i;
+ size_t len;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
@@ -368,10 +425,22 @@ int wcn36xx_smd_start(struct wcn36xx *wcn)
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+ body = (struct wcn36xx_hal_mac_start_req_msg *)wcn->hal_buf;
+ len = body->header.len;
+
+ for (i = 0; i < ARRAY_SIZE(wcn36xx_cfg_vals); i++) {
+ ret = put_cfg_tlv_u32(wcn, &len, wcn36xx_cfg_vals[i].cfg_id,
+ wcn36xx_cfg_vals[i].value);
+ if (ret)
+ goto out;
+ }
+ body->header.len = len;
+ body->params.len = len - sizeof(*body);
+
wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
msg_body.params.type);
- ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
if (ret) {
wcn36xx_err("Sending hal_start failed\n");
goto out;
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 32bb26a0db2a..9bec8237231d 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -93,6 +93,7 @@ static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
bd->pdu.mpdu_header_off;
bd->pdu.mpdu_len = len;
bd->pdu.tid = tid;
+ bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS;
}
static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
@@ -110,15 +111,54 @@ static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
wcn36xx_warn("vif %pM not found\n", addr);
return NULL;
}
+
+static void wcn36xx_tx_start_ampdu(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_sta *sta;
+ u8 *qc, tid;
+
+ if (!conf_is_ht(&wcn->hw->conf))
+ return;
+
+ sta = wcn36xx_priv_to_sta(sta_priv);
+
+ if (WARN_ON(!ieee80211_is_data_qos(hdr->frame_control)))
+ return;
+
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+
+ spin_lock(&sta_priv->ampdu_lock);
+ if (sta_priv->ampdu_state[tid] != WCN36XX_AMPDU_NONE)
+ goto out_unlock;
+
+ if (sta_priv->non_agg_frame_ct++ >= WCN36XX_AMPDU_START_THRESH) {
+ sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
+ sta_priv->non_agg_frame_ct = 0;
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+ }
+out_unlock:
+ spin_unlock(&sta_priv->ampdu_lock);
+}
+
static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
struct wcn36xx *wcn,
struct wcn36xx_vif **vif_priv,
struct wcn36xx_sta *sta_priv,
- struct ieee80211_hdr *hdr,
+ struct sk_buff *skb,
bool bcast)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *__vif_priv = NULL;
+ bool is_data_qos;
+
bd->bd_rate = WCN36XX_BD_RATE_DATA;
/*
@@ -157,14 +197,26 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
bd->ack_policy = 1;
}
*vif_priv = __vif_priv;
+
+ is_data_qos = ieee80211_is_data_qos(hdr->frame_control);
+
+ wcn36xx_set_tx_pdu(bd,
+ is_data_qos ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, sta_priv ? sta_priv->tid : 0);
+
+ if (sta_priv && is_data_qos)
+ wcn36xx_tx_start_ampdu(wcn, sta_priv, skb);
}
static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
struct wcn36xx *wcn,
struct wcn36xx_vif **vif_priv,
- struct ieee80211_hdr *hdr,
+ struct sk_buff *skb,
bool bcast)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct wcn36xx_vif *__vif_priv =
get_vif_by_addr(wcn, hdr->addr2);
bd->sta_index = __vif_priv->self_sta_index;
@@ -198,6 +250,12 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
} else
bd->queue_id = WCN36XX_TX_U_WQ_ID;
*vif_priv = __vif_priv;
+
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, WCN36XX_TID);
}
int wcn36xx_start_tx(struct wcn36xx *wcn,
@@ -237,7 +295,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd->dpu_rf = WCN36XX_BMU_WQ_TX;
- bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS;
+ bd->tx_comp = !!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS);
if (bd->tx_comp) {
wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
spin_lock_irqsave(&wcn->dxe_lock, flags);
@@ -259,22 +317,11 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
}
/* Data frames served first*/
- if (is_low) {
- wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast);
- wcn36xx_set_tx_pdu(bd,
- ieee80211_is_data_qos(hdr->frame_control) ?
- sizeof(struct ieee80211_qos_hdr) :
- sizeof(struct ieee80211_hdr_3addr),
- skb->len, sta_priv ? sta_priv->tid : 0);
- } else {
+ if (is_low)
+ wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, skb, bcast);
+ else
/* MGMT and CTRL frames are handeld here*/
- wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast);
- wcn36xx_set_tx_pdu(bd,
- ieee80211_is_data_qos(hdr->frame_control) ?
- sizeof(struct ieee80211_qos_hdr) :
- sizeof(struct ieee80211_hdr_3addr),
- skb->len, WCN36XX_TID);
- }
+ wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, skb, bcast);
buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
bd->tx_bd_sign = 0xbdbdbdbd;
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
index bbfbcf808c77..032216e82b2b 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.h
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -32,6 +32,12 @@
#define WCN36XX_BD_RATE_MGMT 2
#define WCN36XX_BD_RATE_CTRL 3
+enum wcn36xx_txbd_ssn_type {
+ WCN36XX_TXBD_SSN_FILL_HOST = 0,
+ WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS = 1,
+ WCN36XX_TXBD_SSN_FILL_DPU_QOS = 2,
+};
+
struct wcn36xx_pdu {
u32 dpu_fb:8;
u32 adu_fb:8;
@@ -50,7 +56,8 @@ struct wcn36xx_pdu {
/* 0x0c*/
u32 reserved4:8;
u32 tid:4;
- u32 reserved3:4;
+ u32 bd_ssn:2;
+ u32 reserved3:2;
u32 mpdu_len:16;
};
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index f0fb81dfd17b..7b41e833e18c 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -32,6 +32,9 @@
#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
#define WCN36XX_AGGR_BUFFER_SIZE 64
+/* How many frames until we start a-mpdu TX session */
+#define WCN36XX_AMPDU_START_THRESH 20
+
extern unsigned int wcn36xx_dbg_mask;
enum wcn36xx_debug_mask {
@@ -74,6 +77,13 @@ enum wcn36xx_debug_mask {
buf, len, false); \
} while (0)
+enum wcn36xx_ampdu_state {
+ WCN36XX_AMPDU_NONE,
+ WCN36XX_AMPDU_INIT,
+ WCN36XX_AMPDU_START,
+ WCN36XX_AMPDU_OPERATIONAL,
+};
+
#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
@@ -165,6 +175,10 @@ struct wcn36xx_sta {
bool is_data_encrypted;
/* Rates */
struct wcn36xx_hal_supported_rates supported_rates;
+
+ spinlock_t ampdu_lock; /* protects next two fields */
+ enum wcn36xx_ampdu_state ampdu_state[16];
+ int non_agg_frame_ct;
};
struct wcn36xx_dxe_ch;
struct wcn36xx {
@@ -243,4 +257,10 @@ static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
}
void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+static inline
+struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
+{
+ return container_of((void *)sta_priv, struct ieee80211_sta, drv_priv);
+}
+
#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 481680a3aa55..ce8c0381825e 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -39,12 +39,3 @@ config WIL6210_TRACING
option if you are interested in debugging the driver.
If unsure, say Y to make it easier to debug problems.
-
-config WIL6210_PLATFORM_MSM
- bool "wil6210 MSM platform specific support"
- depends on WIL6210
- depends on ARCH_MSM
- default y
- ---help---
- Say Y here to enable wil6210 driver support for MSM
- platform specific features
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 8ad4b5f97e04..caa717bf52f3 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -14,7 +14,6 @@ wil6210-y += ioctl.o
wil6210-y += fw.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
-wil6210-$(CONFIG_WIL6210_PLATFORM_MSM) += wil_platform_msm.o
wil6210-y += ethtool.o
# for tracing framework to find trace.h
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 38332a6dfb3a..2d5ea21be47e 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -142,14 +142,14 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
sinfo->generation = wil->sinfo_gen;
- sinfo->filled = STATION_INFO_RX_BYTES |
- STATION_INFO_TX_BYTES |
- STATION_INFO_RX_PACKETS |
- STATION_INFO_TX_PACKETS |
- STATION_INFO_RX_BITRATE |
- STATION_INFO_TX_BITRATE |
- STATION_INFO_RX_DROP_MISC |
- STATION_INFO_TX_FAILED;
+ sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) |
+ BIT(NL80211_STA_INFO_TX_BYTES) |
+ BIT(NL80211_STA_INFO_RX_PACKETS) |
+ BIT(NL80211_STA_INFO_TX_PACKETS) |
+ BIT(NL80211_STA_INFO_RX_BITRATE) |
+ BIT(NL80211_STA_INFO_TX_BITRATE) |
+ BIT(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT(NL80211_STA_INFO_TX_FAILED);
sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
@@ -162,8 +162,8 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
sinfo->tx_packets = stats->tx_packets;
sinfo->tx_failed = stats->tx_errors;
- if (test_bit(wil_status_fwconnected, &wil->status)) {
- sinfo->filled |= STATION_INFO_SIGNAL;
+ if (test_bit(wil_status_fwconnected, wil->status)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
sinfo->signal = reply.evt.sqi;
}
@@ -282,7 +282,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
}
/* FW don't support scan after connection attempt */
- if (test_bit(wil_status_dontscan, &wil->status)) {
+ if (test_bit(wil_status_dontscan, wil->status)) {
wil_err(wil, "Can't scan now\n");
return -EBUSY;
}
@@ -334,6 +334,30 @@ out:
return rc;
}
+static void wil_print_crypto(struct wil6210_priv *wil,
+ struct cfg80211_crypto_settings *c)
+{
+ int i, n;
+
+ wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
+ c->wpa_versions, c->cipher_group);
+ wil_dbg_misc(wil, "Pairwise ciphers [%d] {\n", c->n_ciphers_pairwise);
+ n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise));
+ for (i = 0; i < n; i++)
+ wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
+ c->ciphers_pairwise[i]);
+ wil_dbg_misc(wil, "}\n");
+ wil_dbg_misc(wil, "AKM suites [%d] {\n", c->n_akm_suites);
+ n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites));
+ for (i = 0; i < n; i++)
+ wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
+ c->akm_suites[i]);
+ wil_dbg_misc(wil, "}\n");
+ wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
+ c->control_port, be16_to_cpu(c->control_port_ethertype),
+ c->control_port_no_encrypt);
+}
+
static void wil_print_connect_params(struct wil6210_priv *wil,
struct cfg80211_connect_params *sme)
{
@@ -348,6 +372,7 @@ static void wil_print_connect_params(struct wil6210_priv *wil,
print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
16, 1, sme->ssid, sme->ssid_len, true);
wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
+ wil_print_crypto(wil, &sme->crypto);
}
static int wil_cfg80211_connect(struct wiphy *wiphy,
@@ -362,8 +387,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int ch;
int rc = 0;
- if (test_bit(wil_status_fwconnecting, &wil->status) ||
- test_bit(wil_status_fwconnected, &wil->status))
+ if (test_bit(wil_status_fwconnecting, wil->status) ||
+ test_bit(wil_status_fwconnected, wil->status))
return -EALREADY;
wil_print_connect_params(wil, sme);
@@ -450,15 +475,16 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
memcpy(conn.bssid, bss->bssid, ETH_ALEN);
memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
- set_bit(wil_status_fwconnecting, &wil->status);
+ set_bit(wil_status_fwconnecting, wil->status);
rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
if (rc == 0) {
+ netif_carrier_on(ndev);
/* Connect can take lots of time */
mod_timer(&wil->connect_timer,
jiffies + msecs_to_jiffies(2000));
} else {
- clear_bit(wil_status_fwconnecting, &wil->status);
+ clear_bit(wil_status_fwconnecting, wil->status);
}
out:
@@ -618,18 +644,6 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
b->assocresp_ies, b->assocresp_ies_len);
}
-static void wil_print_crypto(struct wil6210_priv *wil,
- struct cfg80211_crypto_settings *c)
-{
- wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
- c->wpa_versions, c->cipher_group);
- wil_dbg_misc(wil, "Pairwise ciphers [%d]\n", c->n_ciphers_pairwise);
- wil_dbg_misc(wil, "AKM suites [%d]\n", c->n_akm_suites);
- wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
- c->control_port, be16_to_cpu(c->control_port_ethertype),
- c->control_port_no_encrypt);
-}
-
static int wil_fix_bcon(struct wil6210_priv *wil,
struct cfg80211_beacon_data *bcon)
{
@@ -757,12 +771,12 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil->secure_pcp = info->privacy;
+ netif_carrier_on(ndev);
+
rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
channel->hw_value);
if (rc)
- goto out;
-
- netif_carrier_on(ndev);
+ netif_carrier_off(ndev);
out:
mutex_unlock(&wil->mutex);
@@ -772,23 +786,26 @@ out:
static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
struct net_device *ndev)
{
- int rc, rc1;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s()\n", __func__);
+ netif_carrier_off(ndev);
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
- rc = wmi_pcp_stop(wil);
+ wmi_pcp_stop(wil);
__wil_down(wil);
- rc1 = __wil_up(wil);
+ __wil_up(wil);
mutex_unlock(&wil->mutex);
- return min(rc, rc1);
+ /* some functions above might fail (e.g. __wil_up). Nevertheless, we
+ * return success because AP has stopped
+ */
+ return 0;
}
static int wil_cfg80211_del_station(struct wiphy *wiphy,
@@ -804,6 +821,96 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
return 0;
}
+/* probe_client handling */
+static void wil_probe_client_handle(struct wil6210_priv *wil,
+ struct wil_probe_client_req *req)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wil_sta_info *sta = &wil->sta[req->cid];
+ /* assume STA is alive if it is still connected,
+ * else FW will disconnect it
+ */
+ bool alive = (sta->status == wil_sta_connected);
+
+ cfg80211_probe_status(ndev, sta->addr, req->cookie, alive, GFP_KERNEL);
+}
+
+static struct list_head *next_probe_client(struct wil6210_priv *wil)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&wil->probe_client_mutex);
+
+ if (!list_empty(&wil->probe_client_pending)) {
+ ret = wil->probe_client_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&wil->probe_client_mutex);
+
+ return ret;
+}
+
+void wil_probe_client_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ probe_client_worker);
+ struct wil_probe_client_req *req;
+ struct list_head *lh;
+
+ while ((lh = next_probe_client(wil)) != NULL) {
+ req = list_entry(lh, struct wil_probe_client_req, list);
+
+ wil_probe_client_handle(wil, req);
+ kfree(req);
+ }
+}
+
+void wil_probe_client_flush(struct wil6210_priv *wil)
+{
+ struct wil_probe_client_req *req, *t;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->probe_client_mutex);
+
+ list_for_each_entry_safe(req, t, &wil->probe_client_pending, list) {
+ list_del(&req->list);
+ kfree(req);
+ }
+
+ mutex_unlock(&wil->probe_client_mutex);
+}
+
+static int wil_cfg80211_probe_client(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *peer, u64 *cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil_probe_client_req *req;
+ int cid = wil_find_cid(wil, peer);
+
+ wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid);
+
+ if (cid < 0)
+ return -ENOLINK;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->cid = cid;
+ req->cookie = cid;
+
+ mutex_lock(&wil->probe_client_mutex);
+ list_add_tail(&req->list, &wil->probe_client_pending);
+ mutex_unlock(&wil->probe_client_mutex);
+
+ *cookie = req->cookie;
+ queue_work(wil->wq_service, &wil->probe_client_worker);
+ return 0;
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.scan = wil_cfg80211_scan,
.connect = wil_cfg80211_connect,
@@ -823,6 +930,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
.start_ap = wil_cfg80211_start_ap,
.stop_ap = wil_cfg80211_stop_ap,
.del_station = wil_cfg80211_del_station,
+ .probe_client = wil_cfg80211_probe_client,
};
static void wil_wiphy_init(struct wiphy *wiphy)
@@ -854,6 +962,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->cipher_suites = wil_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
+ wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 4e6e14501c2f..45c3558ec804 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -50,6 +50,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
char _s, char _h)
{
void __iomem *x = wmi_addr(wil, vring->hwtail);
+ u32 v;
seq_printf(s, "VRING %s = {\n", name);
seq_printf(s, " pa = %pad\n", &vring->pa);
@@ -58,10 +59,12 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
seq_printf(s, " swtail = %d\n", vring->swtail);
seq_printf(s, " swhead = %d\n", vring->swhead);
seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
- if (x)
- seq_printf(s, "0x%08x\n", ioread32(x));
- else
+ if (x) {
+ v = ioread32(x);
+ seq_printf(s, "0x%08x = %d\n", v, v);
+ } else {
seq_puts(s, "???\n");
+ }
if (vring->va && (vring->size < 1025)) {
uint i;
@@ -101,8 +104,8 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
char name[10];
/* performance monitoring */
cycles_t now = get_cycles();
- cycles_t idle = txdata->idle * 100;
- cycles_t total = now - txdata->begin;
+ uint64_t idle = txdata->idle * 100;
+ uint64_t total = now - txdata->begin;
do_div(idle, total);
txdata->begin = now;
@@ -110,9 +113,12 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
snprintf(name, sizeof(name), "tx_%2d", i);
- seq_printf(s, "\n%pM CID %d TID %d [%3d|%3d] idle %3d%%\n",
- wil->sta[cid].addr, cid, tid, used, avail,
- (int)idle);
+ seq_printf(s,
+ "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %3d%%\n",
+ wil->sta[cid].addr, cid, tid,
+ txdata->agg_wsize, txdata->agg_timeout,
+ txdata->agg_amsdu ? "+" : "-",
+ used, avail, (int)idle);
wil_print_vring(s, wil, name, vring, '_', 'H');
}
@@ -384,24 +390,67 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
return 0;
}
-static const struct dbg_off itr_cnt_off[] = {
+static const struct dbg_off lgc_itr_cnt_off[] = {
{"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
{"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
{"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
{},
};
+static const struct dbg_off tx_itr_cnt_off[] = {
+ {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
+ doff_io32},
+ {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
+ doff_io32},
+ {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
+ doff_io32},
+ {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
+ doff_io32},
+ {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
+ doff_io32},
+ {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
+ doff_io32},
+ {},
+};
+
+static const struct dbg_off rx_itr_cnt_off[] = {
+ {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
+ doff_io32},
+ {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
+ doff_io32},
+ {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
+ doff_io32},
+ {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
+ doff_io32},
+ {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
+ doff_io32},
+ {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
+ doff_io32},
+ {},
+};
+
static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
struct dentry *parent)
{
- struct dentry *d = debugfs_create_dir("ITR_CNT", parent);
+ struct dentry *d, *dtx, *drx;
+ d = debugfs_create_dir("ITR_CNT", parent);
if (IS_ERR_OR_NULL(d))
return -ENODEV;
+ dtx = debugfs_create_dir("TX", d);
+ drx = debugfs_create_dir("RX", d);
+ if (IS_ERR_OR_NULL(dtx) || IS_ERR_OR_NULL(drx))
+ return -ENODEV;
+
wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
- itr_cnt_off);
+ lgc_itr_cnt_off);
+
+ wil6210_debugfs_init_offset(wil, dtx, (void * __force)wil->csr,
+ tx_itr_cnt_off);
+ wil6210_debugfs_init_offset(wil, drx, (void * __force)wil->csr,
+ rx_itr_cnt_off);
return 0;
}
@@ -558,6 +607,87 @@ static const struct file_operations fops_rxon = {
.open = simple_open,
};
+/* block ack control, write:
+ * - "add <ringid> <agg_size> <timeout>" to trigger ADDBA
+ * - "del_tx <ringid> <reason>" to trigger DELBA for Tx side
+ * - "del_rx <CID> <TID> <reason>" to trigger DELBA for Rx side
+ */
+static ssize_t wil_write_back(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+ char cmd[8];
+ int p1, p2, p3;
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%8s %d %d %d", cmd, &p1, &p2, &p3);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+ if (rc < 2)
+ return -EINVAL;
+
+ if (0 == strcmp(cmd, "add")) {
+ if (rc < 3) {
+ wil_err(wil, "BACK: add require at least 2 params\n");
+ return -EINVAL;
+ }
+ if (rc < 4)
+ p3 = 0;
+ wmi_addba(wil, p1, p2, p3);
+ } else if (0 == strcmp(cmd, "del_tx")) {
+ if (rc < 3)
+ p2 = WLAN_REASON_QSTA_LEAVE_QBSS;
+ wmi_delba_tx(wil, p1, p2);
+ } else if (0 == strcmp(cmd, "del_rx")) {
+ if (rc < 3) {
+ wil_err(wil,
+ "BACK: del_rx require at least 2 params\n");
+ return -EINVAL;
+ }
+ if (rc < 4)
+ p3 = WLAN_REASON_QSTA_LEAVE_QBSS;
+ wmi_delba_rx(wil, mk_cidxtid(p1, p2), p3);
+ } else {
+ wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static ssize_t wil_read_back(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static const char text[] = "block ack control, write:\n"
+ " - \"add <ringid> <agg_size> <timeout>\" to trigger ADDBA\n"
+ "If missing, <timeout> defaults to 0\n"
+ " - \"del_tx <ringid> <reason>\" to trigger DELBA for Tx side\n"
+ " - \"del_rx <CID> <TID> <reason>\" to trigger DELBA for Rx side\n"
+ "If missing, <reason> set to \"STA_LEAVING\" (36)\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ sizeof(text));
+}
+
+static const struct file_operations fops_back = {
+ .read = wil_read_back,
+ .write = wil_write_back,
+ .open = simple_open,
+};
+
/*---tx_mgmt---*/
/* Write mgmt frame to this file to send it */
static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@@ -1116,7 +1246,8 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
int i;
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
- seq_printf(s, "0x%03x [", r->head_seq_num);
+ seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
+ r->head_seq_num);
for (i = 0; i < r->buf_size; i++) {
if (i == index)
seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
@@ -1127,10 +1258,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
}
static int wil_sta_debugfs_show(struct seq_file *s, void *data)
+__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
{
struct wil6210_priv *wil = s->private;
int i, tid;
- unsigned long flags;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
struct wil_sta_info *p = &wil->sta[i];
@@ -1151,7 +1282,7 @@ static int wil_sta_debugfs_show(struct seq_file *s, void *data)
(p->data_port_open ? " data_port_open" : ""));
if (p->status == wil_sta_connected) {
- spin_lock_irqsave(&p->tid_rx_lock, flags);
+ spin_lock_bh(&p->tid_rx_lock);
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
@@ -1160,7 +1291,7 @@ static int wil_sta_debugfs_show(struct seq_file *s, void *data)
wil_print_rxtid(s, r);
}
}
- spin_unlock_irqrestore(&p->tid_rx_lock, flags);
+ spin_unlock_bh(&p->tid_rx_lock);
}
}
@@ -1217,6 +1348,7 @@ static const struct {
{"rxon", S_IWUSR, &fops_rxon},
{"tx_mgmt", S_IWUSR, &fops_txmgmt},
{"wmi_send", S_IWUSR, &fops_wmi},
+ {"back", S_IRUGO | S_IWUSR, &fops_back},
{"temp", S_IRUGO, &fops_temp},
{"freq", S_IRUGO, &fops_freq},
{"link", S_IRUGO, &fops_link},
@@ -1261,7 +1393,7 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
/* fields in struct wil6210_priv */
static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(secure_pcp, S_IRUGO | S_IWUSR, doff_u32),
- WIL_FIELD(status, S_IRUGO | S_IWUSR, doff_ulong),
+ WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
WIL_FIELD(fw_version, S_IRUGO, doff_u32),
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index d686638972be..4c44a82c34d7 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -45,16 +45,35 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *cp)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- u32 itr_en, itr_val = 0;
+ u32 tx_itr_en, tx_itr_val = 0;
+ u32 rx_itr_en, rx_itr_val = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
- itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
- if (itr_en & BIT_DMA_ITR_CNT_CRL_EN)
- itr_val = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
-
- cp->rx_coalesce_usecs = itr_val;
+ if (test_bit(hw_capability_advanced_itr_moderation,
+ wil->hw_capabilities)) {
+ tx_itr_en = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+ if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
+ tx_itr_val =
+ ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+
+ rx_itr_en = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+ if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
+ rx_itr_val =
+ ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
+ } else {
+ rx_itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+ if (rx_itr_en & BIT_DMA_ITR_CNT_CRL_EN)
+ rx_itr_val = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+ }
+ cp->tx_coalesce_usecs = tx_itr_val;
+ cp->rx_coalesce_usecs = rx_itr_val;
return 0;
}
@@ -63,22 +82,25 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s(%d usec)\n", __func__, cp->rx_coalesce_usecs);
+ wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__,
+ cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n");
return -EINVAL;
}
- /* only @rx_coalesce_usecs supported, ignore
- * other parameters
+ /* only @rx_coalesce_usecs and @tx_coalesce_usecs supported,
+ * ignore other parameters
*/
- if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX)
+ if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX ||
+ cp->tx_coalesce_usecs > WIL6210_ITR_TRSH_MAX)
goto out_bad;
- wil->itr_trsh = cp->rx_coalesce_usecs;
- wil_set_itr_trsh(wil);
+ wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
+ wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+ wil_configure_interrupt_moderation(wil);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 4bcbd6297b3e..a6f923086f31 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -102,7 +102,7 @@ static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
- clear_bit(wil_status_irqen, &wil->status);
+ clear_bit(wil_status_irqen, wil->status);
}
void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
@@ -130,7 +130,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
- set_bit(wil_status_irqen, &wil->status);
+ set_bit(wil_status_irqen, wil->status);
iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
@@ -157,15 +157,91 @@ void wil_unmask_irq(struct wil6210_priv *wil)
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICC));
- /* interrupt moderation parameters */
- wil_set_itr_trsh(wil);
-
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
wil6210_unmask_irq_misc(wil);
}
+/* target write operation */
+#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
+
+static
+void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
+{
+ /* Disable and clear tx counter before (re)configuration */
+ W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+ W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+ wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
+ wil->tx_max_burst_duration);
+ /* Configure TX max burst duration timer to use usec units */
+ W(RGF_DMA_ITR_TX_CNT_CTL,
+ BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear tx idle counter before (re)configuration */
+ W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+ W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+ wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
+ wil->tx_interframe_timeout);
+ /* Configure TX max burst duration timer to use usec units */
+ W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear rx counter before (re)configuration */
+ W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+ W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+ wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
+ wil->rx_max_burst_duration);
+ /* Configure TX max burst duration timer to use usec units */
+ W(RGF_DMA_ITR_RX_CNT_CTL,
+ BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear rx idle counter before (re)configuration */
+ W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+ W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+ wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
+ wil->rx_interframe_timeout);
+ /* Configure TX max burst duration timer to use usec units */
+ W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+}
+
+static
+void wil_configure_interrupt_moderation_lgc(struct wil6210_priv *wil)
+{
+ /* disable, use usec resolution */
+ W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_CLR);
+
+ wil_info(wil, "set ITR_TRSH = %d usec\n", wil->rx_max_burst_duration);
+ W(RGF_DMA_ITR_CNT_TRSH, wil->rx_max_burst_duration);
+ /* start it */
+ W(RGF_DMA_ITR_CNT_CRL,
+ BIT_DMA_ITR_CNT_CRL_EN | BIT_DMA_ITR_CNT_CRL_EXT_TICK);
+}
+
+#undef W
+
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ /* disable interrupt moderation for monitor
+ * to get better timestamp precision
+ */
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
+ return;
+
+ if (test_bit(hw_capability_advanced_itr_moderation,
+ wil->hw_capabilities))
+ wil_configure_interrupt_moderation_new(wil);
+ else {
+ /* Advanced interrupt moderation is not available before
+ * Sparrow v2. Will use legacy interrupt moderation
+ */
+ wil_configure_interrupt_moderation_lgc(wil);
+ }
+}
+
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -194,18 +270,19 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
wil_dbg_irq(wil, "RX done\n");
if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)
- wil_err_ratelimited(wil, "Received \"Rx buffer is in risk "
- "of overflow\" interrupt\n");
+ wil_err_ratelimited(wil,
+ "Received \"Rx buffer is in risk of overflow\" interrupt\n");
- isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH);
- if (test_bit(wil_status_reset_done, &wil->status)) {
- if (test_bit(wil_status_napi_en, &wil->status)) {
+ isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
+ BIT_DMA_EP_RX_ICR_RX_HTRSH);
+ if (test_bit(wil_status_reset_done, wil->status)) {
+ if (test_bit(wil_status_napi_en, wil->status)) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_rx);
} else {
- wil_err(wil, "Got Rx interrupt while "
- "stopping interface\n");
+ wil_err(wil,
+ "Got Rx interrupt while stopping interface\n");
}
} else {
wil_err(wil, "Got Rx interrupt while in reset\n");
@@ -248,7 +325,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
/* clear also all VRING interrupts */
isr &= ~(BIT(25) - 1UL);
- if (test_bit(wil_status_reset_done, &wil->status)) {
+ if (test_bit(wil_status_reset_done, wil->status)) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_tx);
@@ -310,7 +387,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
if (isr & ISR_MISC_FW_ERROR) {
wil_err(wil, "Firmware error detected\n");
- clear_bit(wil_status_fwready, &wil->status);
+ clear_bit(wil_status_fwready, wil->status);
/*
* do not clear @isr here - we do 2-nd part in thread
* there, user space get notified, and it should be done
@@ -321,7 +398,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
wil_cache_mbox_regs(wil);
- set_bit(wil_status_reset_done, &wil->status);
+ set_bit(wil_status_reset_done, wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
@@ -394,7 +471,7 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
*/
static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
{
- if (!test_bit(wil_status_irqen, &wil->status)) {
+ if (!test_bit(wil_status_irqen, wil->status)) {
u32 icm_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICM));
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 8ff3fe34fe05..b04e0afdcb21 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -33,15 +33,18 @@ static bool no_fw_load = true;
module_param(no_fw_load, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash.");
-static unsigned int itr_trsh = WIL6210_ITR_TRSH_DEFAULT;
-
-module_param(itr_trsh, uint, S_IRUGO);
-MODULE_PARM_DESC(itr_trsh, " Interrupt moderation threshold, usecs.");
+/* if not set via modparam, will be set to default value of 1/8 of
+ * rx ring size during init flow
+ */
+unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
+module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_ring_overflow_thrsh,
+ " RX ring overflow threshold in descriptors.");
/* We allow allocation of more than 1 page buffers to support large packets.
* It is suboptimal behavior performance wise in case MTU above page size.
*/
-unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - ETH_HLEN;
+unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
static int mtu_max_set(const char *val, const struct kernel_param *kp)
{
int ret;
@@ -53,7 +56,7 @@ static int mtu_max_set(const char *val, const struct kernel_param *kp)
if (ret)
return ret;
- if (mtu_max < 68 || mtu_max > IEEE80211_MAX_DATA_LEN_DMG)
+ if (mtu_max < 68 || mtu_max > WIL_MAX_ETH_MTU)
ret = -EINVAL;
return ret;
@@ -135,12 +138,14 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
u16 reason_code, bool from_event)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
uint i;
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
struct wil_sta_info *sta = &wil->sta[cid];
+ might_sleep();
wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
sta->status);
@@ -163,15 +168,14 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
for (i = 0; i < WIL_STA_TID_NUM; i++) {
struct wil_tid_ampdu_rx *r;
- unsigned long flags;
- spin_lock_irqsave(&sta->tid_rx_lock, flags);
+ spin_lock_bh(&sta->tid_rx_lock);
r = sta->tid_rx[i];
sta->tid_rx[i] = NULL;
wil_tid_ampdu_rx_free(wil, r);
- spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
+ spin_unlock_bh(&sta->tid_rx_lock);
}
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
if (wil->vring2cid_tid[i][0] == cid)
@@ -188,34 +192,47 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
struct wireless_dev *wdev = wil->wdev;
might_sleep();
- if (bssid) {
+ wil_dbg_misc(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
+ reason_code, from_event ? "+" : "-");
+
+ /* Cases are:
+ * - disconnect single STA, still connected
+ * - disconnect single STA, already disconnected
+ * - disconnect all
+ *
+ * For "disconnect all", there are 2 options:
+ * - bssid == NULL
+ * - bssid is our MAC address
+ */
+ if (bssid && memcmp(ndev->dev_addr, bssid, ETH_ALEN)) {
cid = wil_find_cid(wil, bssid);
- wil_dbg_misc(wil, "%s(%pM, CID %d)\n", __func__, bssid, cid);
- } else {
- wil_dbg_misc(wil, "%s(all)\n", __func__);
- }
-
- if (cid >= 0) /* disconnect 1 peer */
- wil_disconnect_cid(wil, cid, reason_code, from_event);
- else /* disconnect all */
+ wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
+ bssid, cid, reason_code);
+ if (cid >= 0) /* disconnect 1 peer */
+ wil_disconnect_cid(wil, cid, reason_code, from_event);
+ } else { /* all */
+ wil_dbg_misc(wil, "Disconnect all\n");
for (cid = 0; cid < WIL6210_MAX_CID; cid++)
wil_disconnect_cid(wil, cid, reason_code, from_event);
+ }
/* link state */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- wil_link_off(wil);
- if (test_bit(wil_status_fwconnected, &wil->status)) {
- clear_bit(wil_status_fwconnected, &wil->status);
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+
+ if (test_bit(wil_status_fwconnected, wil->status)) {
+ clear_bit(wil_status_fwconnected, wil->status);
cfg80211_disconnected(ndev, reason_code,
NULL, 0, GFP_KERNEL);
- } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
+ } else if (test_bit(wil_status_fwconnecting, wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
}
- clear_bit(wil_status_fwconnecting, &wil->status);
+ clear_bit(wil_status_fwconnecting, wil->status);
break;
default:
break;
@@ -248,7 +265,7 @@ static void wil_scan_timer_fn(ulong x)
{
struct wil6210_priv *wil = (void *)x;
- clear_bit(wil_status_fwready, &wil->status);
+ clear_bit(wil_status_fwready, wil->status);
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
wil->recovery_state = fw_recovery_pending;
schedule_work(&wil->fw_error_worker);
@@ -352,6 +369,8 @@ static void wil_connect_worker(struct work_struct *work)
int rc;
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
connect_worker);
+ struct net_device *ndev = wil_to_ndev(wil);
+
int cid = wil->pending_connect_cid;
int ringid = wil_find_free_vring(wil);
@@ -366,7 +385,7 @@ static void wil_connect_worker(struct work_struct *work)
wil->pending_connect_cid = -1;
if (rc == 0) {
wil->sta[cid].status = wil_sta_connected;
- wil_link_on(wil);
+ netif_tx_wake_all_queues(ndev);
} else {
wil->sta[cid].status = wil_sta_unused;
}
@@ -384,6 +403,9 @@ int wil_priv_init(struct wil6210_priv *wil)
mutex_init(&wil->mutex);
mutex_init(&wil->wmi_mutex);
+ mutex_init(&wil->back_rx_mutex);
+ mutex_init(&wil->back_tx_mutex);
+ mutex_init(&wil->probe_client_mutex);
init_completion(&wil->wmi_ready);
init_completion(&wil->wmi_call);
@@ -396,25 +418,39 @@ int wil_priv_init(struct wil6210_priv *wil)
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
+ INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker);
+ INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker);
+ INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
+ INIT_LIST_HEAD(&wil->back_rx_pending);
+ INIT_LIST_HEAD(&wil->back_tx_pending);
+ INIT_LIST_HEAD(&wil->probe_client_pending);
spin_lock_init(&wil->wmi_ev_lock);
init_waitqueue_head(&wil->wq);
- wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi");
+ wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
if (!wil->wmi_wq)
return -EAGAIN;
- wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect");
- if (!wil->wmi_wq_conn) {
- destroy_workqueue(wil->wmi_wq);
- return -EAGAIN;
- }
+ wil->wq_service = create_singlethread_workqueue(WIL_NAME "_service");
+ if (!wil->wq_service)
+ goto out_wmi_wq;
wil->last_fw_recovery = jiffies;
- wil->itr_trsh = itr_trsh;
+ wil->tx_interframe_timeout = WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT;
+ wil->rx_interframe_timeout = WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT;
+ wil->tx_max_burst_duration = WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT;
+ wil->rx_max_burst_duration = WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT;
+ if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
+ rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
return 0;
+
+out_wmi_wq:
+ destroy_workqueue(wil->wmi_wq);
+
+ return -EAGAIN;
}
/**
@@ -448,7 +484,13 @@ void wil_priv_deinit(struct wil6210_priv *wil)
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
mutex_unlock(&wil->mutex);
wmi_event_flush(wil);
- destroy_workqueue(wil->wmi_wq_conn);
+ wil_back_rx_flush(wil);
+ cancel_work_sync(&wil->back_rx_worker);
+ wil_back_tx_flush(wil);
+ cancel_work_sync(&wil->back_tx_worker);
+ wil_probe_client_flush(wil);
+ cancel_work_sync(&wil->probe_client_worker);
+ destroy_workqueue(wil->wq_service);
destroy_workqueue(wil->wmi_wq);
}
@@ -478,13 +520,10 @@ static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
u32 x;
- u32 rev_id;
- bool is_sparrow = (wil->board->board == WIL_BOARD_SPARROW);
+ bool is_reset_v2 = test_bit(hw_capability_reset_v2,
+ wil->hw_capabilities);
- wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->board->name);
-
- wil->hw_version = R(RGF_USER_FW_REV_ID);
- rev_id = wil->hw_version & 0xff;
+ wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
/* Clear MAC link up */
S(RGF_HP_CTRL, BIT(15));
@@ -496,7 +535,7 @@ static int wil_target_reset(struct wil6210_priv *wil)
/* Clear Fw Download notification */
C(RGF_USER_USAGE_6, BIT(0));
- if (is_sparrow) {
+ if (is_reset_v2) {
S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
/* XTAL stabilization should take about 3ms */
usleep_range(5000, 7000);
@@ -517,10 +556,11 @@ static int wil_target_reset(struct wil6210_priv *wil)
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, is_sparrow ? 0x000000f0 : 0x00000170);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3,
+ is_reset_v2 ? 0x000000f0 : 0x00000170);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
- if (is_sparrow) {
+ if (is_reset_v2) {
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
}
@@ -530,19 +570,14 @@ static int wil_target_reset(struct wil6210_priv *wil)
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- if (is_sparrow) {
+ if (is_reset_v2) {
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
/* reset A2 PCIE AHB */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
} else {
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
- if (rev_id == 1) {
- /* reset A1 BOTH PCIE AHB & PCIE RGF */
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
- } else {
- W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
- }
+ W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
}
/* TODO: check order here!!! Erez code is different */
@@ -559,8 +594,7 @@ static int wil_target_reset(struct wil6210_priv *wil)
}
} while (x != HW_MACHINE_BOOT_DONE);
- /* TODO: Erez check rev_id != 1 */
- if (!is_sparrow && (rev_id != 1))
+ if (!is_reset_v2)
W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -569,26 +603,6 @@ static int wil_target_reset(struct wil6210_priv *wil)
return 0;
}
-/**
- * wil_set_itr_trsh: - apply interrupt coalescing params
- */
-void wil_set_itr_trsh(struct wil6210_priv *wil)
-{
- /* disable, use usec resolution */
- W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_EXT_TICK);
-
- /* disable interrupt moderation for monitor
- * to get better timestamp precision
- */
- if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
- return;
-
- wil_info(wil, "set ITR_TRSH = %d usec\n", wil->itr_trsh);
- W(RGF_DMA_ITR_CNT_TRSH, wil->itr_trsh);
- W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_EN |
- BIT_DMA_ITR_CNT_CRL_EXT_TICK); /* start it */
-}
-
#undef R
#undef W
#undef S
@@ -629,13 +643,17 @@ int wil_reset(struct wil6210_priv *wil)
wil_dbg_misc(wil, "%s()\n", __func__);
+ if (wil->hw_version == HW_VER_UNKNOWN)
+ return -ENODEV;
+
WARN_ON(!mutex_is_locked(&wil->mutex));
- WARN_ON(test_bit(wil_status_napi_en, &wil->status));
+ WARN_ON(test_bit(wil_status_napi_en, wil->status));
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
- wil->status = 0; /* prevent NAPI from being scheduled */
+ /* prevent NAPI from being scheduled */
+ bitmap_zero(wil->status, wil_status_last);
if (wil->scan_request) {
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
@@ -649,7 +667,7 @@ int wil_reset(struct wil6210_priv *wil)
wmi_event_flush(wil);
- flush_workqueue(wil->wmi_wq_conn);
+ flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
rc = wil_target_reset(wil);
@@ -688,6 +706,7 @@ int wil_reset(struct wil6210_priv *wil)
reinit_completion(&wil->wmi_ready);
reinit_completion(&wil->wmi_call);
+ wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
/* we just started MAC, wait for FW ready */
@@ -703,28 +722,6 @@ void wil_fw_error_recovery(struct wil6210_priv *wil)
schedule_work(&wil->fw_error_worker);
}
-void wil_link_on(struct wil6210_priv *wil)
-{
- struct net_device *ndev = wil_to_ndev(wil);
-
- wil_dbg_misc(wil, "%s()\n", __func__);
-
- netif_carrier_on(ndev);
- wil_dbg_misc(wil, "netif_tx_wake : link on\n");
- netif_tx_wake_all_queues(ndev);
-}
-
-void wil_link_off(struct wil6210_priv *wil)
-{
- struct net_device *ndev = wil_to_ndev(wil);
-
- wil_dbg_misc(wil, "%s()\n", __func__);
-
- netif_tx_stop_all_queues(ndev);
- wil_dbg_misc(wil, "netif_tx_stop : link off\n");
- netif_carrier_off(ndev);
-}
-
int __wil_up(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
@@ -774,7 +771,7 @@ int __wil_up(struct wil6210_priv *wil)
wil_dbg_misc(wil, "NAPI enable\n");
napi_enable(&wil->napi_rx);
napi_enable(&wil->napi_tx);
- set_bit(wil_status_napi_en, &wil->status);
+ set_bit(wil_status_napi_en, wil->status);
if (wil->platform_ops.bus_request)
wil->platform_ops.bus_request(wil->platform_handle,
@@ -807,7 +804,7 @@ int __wil_down(struct wil6210_priv *wil)
wil->platform_ops.bus_request(wil->platform_handle, 0);
wil_disable_irq(wil);
- if (test_and_clear_bit(wil_status_napi_en, &wil->status)) {
+ if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
napi_disable(&wil->napi_rx);
napi_disable(&wil->napi_tx);
wil_dbg_misc(wil, "NAPI disable\n");
@@ -822,15 +819,15 @@ int __wil_down(struct wil6210_priv *wil)
wil->scan_request = NULL;
}
- if (test_bit(wil_status_fwconnected, &wil->status) ||
- test_bit(wil_status_fwconnecting, &wil->status))
+ if (test_bit(wil_status_fwconnected, wil->status) ||
+ test_bit(wil_status_fwconnecting, wil->status))
wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
/* make sure wil is idle (not connected) */
mutex_unlock(&wil->mutex);
while (iter--) {
- int idle = !test_bit(wil_status_fwconnected, &wil->status) &&
- !test_bit(wil_status_fwconnecting, &wil->status);
+ int idle = !test_bit(wil_status_fwconnected, wil->status) &&
+ !test_bit(wil_status_fwconnecting, wil->status);
if (idle)
break;
msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index e81703ca7701..ace30c1b5c64 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -15,7 +15,6 @@
*/
#include <linux/etherdevice.h>
-
#include "wil6210.h"
#include "txrx.h"
@@ -122,6 +121,12 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
return min(tx_done, budget);
}
+static void wil_dev_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->tx_queue_len = WIL_TX_Q_LEN_DEFAULT;
+}
+
void *wil_if_alloc(struct device *dev, void __iomem *csr)
{
struct net_device *ndev;
@@ -153,7 +158,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
- ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
+ ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
if (!ndev) {
dev_err(dev, "alloc_netdev_mqs failed\n");
rc = -ENOMEM;
@@ -174,7 +179,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
- wil_link_off(wil);
+ netif_tx_stop_all_queues(ndev);
return wil;
@@ -217,8 +222,6 @@ int wil_if_add(struct wil6210_priv *wil)
return rc;
}
- wil_link_off(wil);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 66626a8ee728..3dd26709ccb2 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -31,6 +31,46 @@ static bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
+static
+void wil_set_capabilities(struct wil6210_priv *wil)
+{
+ u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
+
+ bitmap_zero(wil->hw_capabilities, hw_capability_last);
+
+ switch (rev_id) {
+ case JTAG_DEV_ID_MARLON_B0:
+ wil->hw_name = "Marlon B0";
+ wil->hw_version = HW_VER_MARLON_B0;
+ break;
+ case JTAG_DEV_ID_SPARROW_A0:
+ wil->hw_name = "Sparrow A0";
+ wil->hw_version = HW_VER_SPARROW_A0;
+ break;
+ case JTAG_DEV_ID_SPARROW_A1:
+ wil->hw_name = "Sparrow A1";
+ wil->hw_version = HW_VER_SPARROW_A1;
+ break;
+ case JTAG_DEV_ID_SPARROW_B0:
+ wil->hw_name = "Sparrow B0";
+ wil->hw_version = HW_VER_SPARROW_B0;
+ break;
+ default:
+ wil_err(wil, "Unknown board hardware 0x%08x\n", rev_id);
+ wil->hw_name = "Unknown";
+ wil->hw_version = HW_VER_UNKNOWN;
+ }
+
+ wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+
+ if (wil->hw_version >= HW_VER_SPARROW_A0)
+ set_bit(hw_capability_reset_v2, wil->hw_capabilities);
+
+ if (wil->hw_version >= HW_VER_SPARROW_B0)
+ set_bit(hw_capability_advanced_itr_moderation,
+ wil->hw_capabilities);
+}
+
void wil_disable_irq(struct wil6210_priv *wil)
{
int irq = wil->pdev->irq;
@@ -149,12 +189,11 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct wil6210_priv *wil;
struct device *dev = &pdev->dev;
void __iomem *csr;
- struct wil_board *board = (struct wil_board *)id->driver_data;
int rc;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
- " \"%s\" device found [%04x:%04x] (rev %x)\n", board->name,
+ " device found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
@@ -204,8 +243,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, wil);
wil->pdev = pdev;
- wil->board = board;
-
+ wil_set_capabilities(wil);
wil6210_clear_irq(wil);
wil->platform_handle =
@@ -266,23 +304,10 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static const struct wil_board wil_board_marlon = {
- .board = WIL_BOARD_MARLON,
- .name = "marlon",
-};
-
-static const struct wil_board wil_board_sparrow = {
- .board = WIL_BOARD_SPARROW,
- .name = "sparrow",
-};
-
static const struct pci_device_id wil6210_pcie_ids[] = {
- { PCI_DEVICE(0x1ae9, 0x0301),
- .driver_data = (kernel_ulong_t)&wil_board_marlon },
- { PCI_DEVICE(0x1ae9, 0x0310),
- .driver_data = (kernel_ulong_t)&wil_board_sparrow },
- { PCI_DEVICE(0x1ae9, 0x0302), /* same as above, firmware broken */
- .driver_data = (kernel_ulong_t)&wil_board_sparrow },
+ { PCI_DEVICE(0x1ae9, 0x0301) },
+ { PCI_DEVICE(0x1ae9, 0x0310) },
+ { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 489cb73d139b..ca10dcf0986e 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -89,7 +89,9 @@ static void wil_reorder_release(struct wil6210_priv *wil,
}
}
+/* called in NAPI context */
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct net_device *ndev = wil_to_ndev(wil);
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
@@ -97,22 +99,26 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
int cid = wil_rxdesc_cid(d);
int mid = wil_rxdesc_mid(d);
u16 seq = wil_rxdesc_seq(d);
+ int mcast = wil_rxdesc_mcast(d);
struct wil_sta_info *sta = &wil->sta[cid];
struct wil_tid_ampdu_rx *r;
u16 hseq;
int index;
- unsigned long flags;
- wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n",
- mid, cid, tid, seq);
+ wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
+ mid, cid, tid, seq, mcast);
- spin_lock_irqsave(&sta->tid_rx_lock, flags);
+ if (unlikely(mcast)) {
+ wil_netif_rx_any(skb, ndev);
+ return;
+ }
+
+ spin_lock(&sta->tid_rx_lock);
r = sta->tid_rx[tid];
if (!r) {
- spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
wil_netif_rx_any(skb, ndev);
- return;
+ goto out;
}
hseq = r->head_seq_num;
@@ -121,13 +127,24 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
* reported, and data Rx, few packets may be pass up before reorder
* buffer get allocated. Catch up by pretending SSN is what we
* see in the 1-st Rx packet
+ *
+ * Another scenario, Rx get delayed and we got packet from before
+ * BACK. Pass it to the stack and wait.
*/
if (r->first_time) {
r->first_time = false;
if (seq != r->head_seq_num) {
- wil_err(wil, "Error: 1-st frame with wrong sequence"
- " %d, should be %d. Fixing...\n", seq,
- r->head_seq_num);
+ if (seq_less(seq, r->head_seq_num)) {
+ wil_err(wil,
+ "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
+ seq, r->head_seq_num);
+ r->first_time = true;
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+ wil_err(wil,
+ "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
+ seq, r->head_seq_num);
r->head_seq_num = seq;
r->ssn = seq;
}
@@ -179,7 +196,7 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
wil_reorder_release(wil, r);
out:
- spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
+ spin_unlock(&sta->tid_rx_lock);
}
struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
@@ -219,3 +236,241 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
kfree(r->reorder_time);
kfree(r);
}
+
+/* ADDBA processing */
+static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
+{
+ u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
+ (mtu_max + WIL_MAX_MPDU_OVERHEAD));
+
+ if (!req_agg_wsize)
+ return max_agg_size;
+
+ return min(max_agg_size, req_agg_wsize);
+}
+
+/* Block Ack - Rx side (recipient */
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
+ u8 dialog_token, __le16 ba_param_set,
+ __le16 ba_timeout, __le16 ba_seq_ctrl)
+{
+ struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->cidxtid = cidxtid;
+ req->dialog_token = dialog_token;
+ req->ba_param_set = le16_to_cpu(ba_param_set);
+ req->ba_timeout = le16_to_cpu(ba_timeout);
+ req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
+
+ mutex_lock(&wil->back_rx_mutex);
+ list_add_tail(&req->list, &wil->back_rx_pending);
+ mutex_unlock(&wil->back_rx_mutex);
+
+ queue_work(wil->wq_service, &wil->back_rx_worker);
+
+ return 0;
+}
+
+static void wil_back_rx_handle(struct wil6210_priv *wil,
+ struct wil_back_rx *req)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ struct wil_sta_info *sta;
+ u8 cid, tid;
+ u16 agg_wsize = 0;
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
+ bool agg_amsdu = !!(req->ba_param_set & BIT(0));
+ int ba_policy = req->ba_param_set & BIT(1);
+ u16 agg_timeout = req->ba_timeout;
+ u16 status = WLAN_STATUS_SUCCESS;
+ u16 ssn = req->ba_seq_ctrl >> 4;
+ struct wil_tid_ampdu_rx *r;
+ int rc;
+
+ might_sleep();
+ parse_cidxtid(req->cidxtid, &cid, &tid);
+
+ /* sanity checks */
+ if (cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "BACK: invalid CID %d\n", cid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ if (sta->status != wil_sta_connected) {
+ wil_err(wil, "BACK: CID %d not connected\n", cid);
+ return;
+ }
+
+ wil_dbg_wmi(wil,
+ "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
+ cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
+ agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
+
+ /* apply policies */
+ if (ba_policy) {
+ wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
+ status = WLAN_STATUS_INVALID_QOS_PARAM;
+ }
+ if (status == WLAN_STATUS_SUCCESS)
+ agg_wsize = wil_agg_size(wil, req_agg_wsize);
+
+ rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
+ agg_amsdu, agg_wsize, agg_timeout);
+ if (rc || (status != WLAN_STATUS_SUCCESS))
+ return;
+
+ /* apply */
+ r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
+ spin_lock_bh(&sta->tid_rx_lock);
+ wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
+ sta->tid_rx[tid] = r;
+ spin_unlock_bh(&sta->tid_rx_lock);
+}
+
+void wil_back_rx_flush(struct wil6210_priv *wil)
+{
+ struct wil_back_rx *evt, *t;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->back_rx_mutex);
+
+ list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+
+ mutex_unlock(&wil->back_rx_mutex);
+}
+
+/* Retrieve next ADDBA request from the pending list */
+static struct list_head *next_back_rx(struct wil6210_priv *wil)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&wil->back_rx_mutex);
+
+ if (!list_empty(&wil->back_rx_pending)) {
+ ret = wil->back_rx_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&wil->back_rx_mutex);
+
+ return ret;
+}
+
+void wil_back_rx_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ back_rx_worker);
+ struct wil_back_rx *evt;
+ struct list_head *lh;
+
+ while ((lh = next_back_rx(wil)) != NULL) {
+ evt = list_entry(lh, struct wil_back_rx, list);
+
+ wil_back_rx_handle(wil, evt);
+ kfree(evt);
+ }
+}
+
+/* BACK - Tx (originator) side */
+static void wil_back_tx_handle(struct wil6210_priv *wil,
+ struct wil_back_tx *req)
+{
+ struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
+ int rc;
+
+ if (txdata->addba_in_progress) {
+ wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
+ req->ringid);
+ return;
+ }
+ if (txdata->agg_wsize) {
+ wil_dbg_misc(wil,
+ "ADDBA for vring[%d] already established wsize %d\n",
+ req->ringid, txdata->agg_wsize);
+ return;
+ }
+ txdata->addba_in_progress = true;
+ rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
+ if (rc)
+ txdata->addba_in_progress = false;
+}
+
+static struct list_head *next_back_tx(struct wil6210_priv *wil)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&wil->back_tx_mutex);
+
+ if (!list_empty(&wil->back_tx_pending)) {
+ ret = wil->back_tx_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&wil->back_tx_mutex);
+
+ return ret;
+}
+
+void wil_back_tx_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ back_tx_worker);
+ struct wil_back_tx *evt;
+ struct list_head *lh;
+
+ while ((lh = next_back_tx(wil)) != NULL) {
+ evt = list_entry(lh, struct wil_back_tx, list);
+
+ wil_back_tx_handle(wil, evt);
+ kfree(evt);
+ }
+}
+
+void wil_back_tx_flush(struct wil6210_priv *wil)
+{
+ struct wil_back_tx *evt, *t;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->back_tx_mutex);
+
+ list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+
+ mutex_unlock(&wil->back_tx_mutex);
+}
+
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
+{
+ struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->ringid = ringid;
+ req->agg_wsize = wil_agg_size(wil, wsize);
+ req->agg_timeout = 0;
+
+ mutex_lock(&wil->back_tx_mutex);
+ list_add_tail(&req->list, &wil->back_tx_pending);
+ mutex_unlock(&wil->back_tx_mutex);
+
+ queue_work(wil->wq_service, &wil->back_tx_worker);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index e3f8bdce5abc..8439f65db259 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -463,7 +463,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
* and in case of error drop the packet
* higher stack layers will handle retransmission (if required)
*/
- if (d->dma.status & RX_DMA_STATUS_L4_IDENT) {
+ if (d->dma.status & RX_DMA_STATUS_L4I) {
/* L4 protocol identified, csum calculated */
if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -581,14 +581,8 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
skb->protocol = htons(ETH_P_802_2);
wil_netif_rx_any(skb, ndev);
} else {
- struct ethhdr *eth = (void *)skb->data;
-
skb->protocol = eth_type_trans(skb, ndev);
-
- if (is_unicast_ether_addr(eth->h_dest))
- wil_rx_reorder(wil, skb);
- else
- wil_netif_rx_any(skb, ndev);
+ wil_rx_reorder(wil, skb);
}
}
wil_rx_refill(wil, v->size);
@@ -645,7 +639,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
.vring_cfg = {
.tx_sw_ring = {
.max_mpdu_size =
- cpu_to_le16(mtu_max + ETH_HLEN),
+ cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.ring_size = cpu_to_le16(size),
},
.ringid = id,
@@ -653,7 +647,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.mac_ctrl = 0,
.to_resolution = 0,
- .agg_max_wsize = 16,
+ .agg_max_wsize = 0,
.schd_params = {
.priority = cpu_to_le16(0),
.timeslot_us = cpu_to_le16(0xfff),
@@ -677,6 +671,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
}
memset(txdata, 0, sizeof(*txdata));
+ spin_lock_init(&txdata->lock);
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
@@ -701,6 +696,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
+ if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
+ wil_addba_tx_request(wil, id, agg_wsize);
return 0;
out_free:
@@ -713,6 +710,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
{
struct vring *vring = &wil->vring_tx[id];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[id];
WARN_ON(!mutex_is_locked(&wil->mutex));
@@ -721,12 +719,15 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
+ spin_lock_bh(&txdata->lock);
+ txdata->enabled = 0; /* no Tx can be in progress or start anew */
+ spin_unlock_bh(&txdata->lock);
/* make sure NAPI won't touch this vring */
- wil->vring_tx_data[id].enabled = 0;
- if (test_bit(wil_status_napi_en, &wil->status))
+ if (test_bit(wil_status_napi_en, wil->status))
napi_synchronize(&wil->napi_tx);
wil_vring_free(wil, vring, 1);
+ memset(txdata, 0, sizeof(*txdata));
}
static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
@@ -773,6 +774,38 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil,
static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb);
+
+static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v;
+ int i;
+ u8 cid;
+
+ /* In the STA mode, it is expected to have only 1 VRING
+ * for the AP we connected to.
+ * find 1-st vring and see whether it is eligible for data
+ */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->vring_tx[i];
+ if (!v->va)
+ continue;
+
+ cid = wil->vring2cid_tid[i][0];
+ if (!wil->sta[cid].data_port_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ break;
+
+ wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
+
+ return v;
+ }
+
+ wil_dbg_txrx(wil, "Tx while no vrings active?\n");
+
+ return NULL;
+}
+
/*
* Find 1-st vring and return it; set dest address for this vring in skb
* duplicate skb and send it to other active vrings
@@ -843,9 +876,6 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
d->mac.d[1] = 0;
d->mac.d[2] = 0;
d->mac.ucode_cmd = 0;
- /* use dst index 0 */
- d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
- (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
/* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
(1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
@@ -908,8 +938,8 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
return 0;
}
-static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
- struct sk_buff *skb)
+static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
struct vring_tx_desc dd, *d = &dd;
@@ -925,18 +955,21 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "%s()\n", __func__);
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
if (avail < 1 + nr_frags) {
wil_err_ratelimited(wil,
- "Tx ring full. No space for %d fragments\n",
- 1 + nr_frags);
+ "Tx ring[%2d] full. No space for %d fragments\n",
+ vring_index, 1 + nr_frags);
return -ENOMEM;
}
_d = &vring->va[i].tx;
pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
- wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb),
- skb->data, &pa);
+ wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
+ skb_headlen(skb), skb->data, &pa);
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
@@ -947,15 +980,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
/* Process TCP/UDP checksum offloading */
if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
- wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n",
+ wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
vring_index);
goto dma_error;
}
vring->ctx[i].nr_frags = nr_frags;
wil_tx_desc_set_nr_frags(d, nr_frags);
- if (nr_frags)
- *_d = *d;
/* middle segments */
for (; f < nr_frags; f++) {
@@ -963,6 +994,10 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
&skb_shinfo(skb)->frags[f];
int len = skb_frag_size(frag);
+ *_d = *d;
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
i = (swhead + f + 1) % vring->size;
_d = &vring->va[i].tx;
pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
@@ -976,13 +1011,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
* it will succeed here too
*/
wil_tx_desc_offload_cksum_set(wil, d, skb);
- *_d = *d;
}
/* for the last seg only */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
*_d = *d;
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
/* hold reference to skb
* to prevent skb release before accounting
@@ -990,15 +1027,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
*/
vring->ctx[i].skb = skb_get(skb);
- wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
- (const void *)d, sizeof(*d), false);
-
if (wil_vring_is_empty(vring)) /* performance monitoring */
txdata->idle += get_cycles() - txdata->last_idle;
/* advance swhead */
wil_vring_advance_head(vring, nr_frags + 1);
- wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+ wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
+ vring->swhead);
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
@@ -1025,6 +1060,19 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
return -EINVAL;
}
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ int vring_index = vring - wil->vring_tx;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int rc;
+
+ spin_lock(&txdata->lock);
+ rc = __wil_tx_vring(wil, vring, skb);
+ spin_unlock(&txdata->lock);
+ return rc;
+}
+
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
@@ -1034,14 +1082,14 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
int rc;
wil_dbg_txrx(wil, "%s()\n", __func__);
- if (!test_bit(wil_status_fwready, &wil->status)) {
+ if (!test_bit(wil_status_fwready, wil->status)) {
if (!pr_once_fw) {
wil_err(wil, "FW not ready\n");
pr_once_fw = true;
}
goto drop;
}
- if (!test_bit(wil_status_fwconnected, &wil->status)) {
+ if (!test_bit(wil_status_fwconnected, wil->status)) {
wil_err(wil, "FW not connected\n");
goto drop;
}
@@ -1052,15 +1100,19 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pr_once_fw = false;
/* find vring */
- if (is_unicast_ether_addr(eth->h_dest))
- vring = wil_find_tx_vring(wil, skb);
- else
- vring = wil_tx_bcast(wil, skb);
+ if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
+ /* in STA mode (ESS), all to same VRING */
+ vring = wil_find_tx_vring_sta(wil, skb);
+ } else { /* direct communication, find matching VRING */
+ if (is_unicast_ether_addr(eth->h_dest))
+ vring = wil_find_tx_vring(wil, skb);
+ else
+ vring = wil_tx_bcast(wil, skb);
+ }
if (!vring) {
wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
goto drop;
}
-
/* set up vring entry */
rc = wil_tx_vring(wil, vring, skb);
@@ -1087,6 +1139,22 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NET_XMIT_DROP;
}
+static inline bool wil_need_txstat(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (void *)skb->data;
+
+ return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+}
+
+static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
+{
+ if (unlikely(wil_need_txstat(skb)))
+ skb_complete_wifi_ack(skb, acked);
+ else
+ acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+}
+
/**
* Clean up transmitted skb's from the Tx VRING
*
@@ -1147,10 +1215,10 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
d->dma.error);
wil_dbg_txrx(wil,
- "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
- vring->swtail, dmalen, d->dma.status,
- d->dma.error);
- wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
+ "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ ringid, vring->swtail, dmalen,
+ d->dma.status, d->dma.error);
+ wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
wil_txdesc_unmap(dev, d, ctx);
@@ -1165,8 +1233,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
ndev->stats.tx_errors++;
stats->tx_errors++;
}
-
- dev_kfree_skb_any(skb);
+ wil_consume_skb(skb, d->dma.error == 0);
}
memset(ctx, 0, sizeof(*ctx));
/* There is no need to touch HW descriptor:
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 630aeb5fa7f4..d90c8aa20c15 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -20,17 +20,15 @@
#define BUF_SW_OWNED (1)
#define BUF_HW_OWNED (0)
-/* size of max. Tx/Rx buffers, as supported by FW */
-#define TXRX_BUF_LEN_DEFAULT (2242)
+/* default size of MAC Tx/Rx buffers */
+#define TXRX_BUF_LEN_DEFAULT (2048)
/* how many bytes to reserve for rtap header? */
#define WIL6210_RTAP_SIZE (128)
/* Tx/Rx path */
-/*
- * Common representation of physical address in Vring
- */
+/* Common representation of physical address in Vring */
struct vring_dma_addr {
__le32 addr_low;
__le16 addr_high;
@@ -49,11 +47,10 @@ static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
}
-/*
- * Tx descriptor - MAC part
+/* Tx descriptor - MAC part
* [dword 0]
* bit 0.. 9 : lifetime_expiry_value:10
- * bit 10 : interrup_en:1
+ * bit 10 : interrupt_en:1
* bit 11 : status_en:1
* bit 12..13 : txss_override:2
* bit 14 : timestamp_insertion:1
@@ -61,15 +58,12 @@ static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
* bit 16..21 : reserved0:6
* bit 22..26 : mcs_index:5
* bit 27 : mcs_en:1
- * bit 28..29 : reserved1:2
- * bit 30 : reserved2:1
+ * bit 28..30 : reserved1:3
* bit 31 : sn_preserved:1
* [dword 1]
* bit 0.. 3 : pkt_mode:4
* bit 4 : pkt_mode_en:1
- * bit 5.. 7 : reserved0:3
- * bit 8..13 : reserved1:6
- * bit 14 : reserved2:1
+ * bit 5..14 : reserved0:10
* bit 15 : ack_policy_en:1
* bit 16..19 : dst_index:4
* bit 20 : dst_index_en:1
@@ -80,7 +74,7 @@ static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
* [dword 2]
* bit 0.. 7 : num_of_descriptors:8
* bit 8..17 : reserved:10
- * bit 18..19 : l2_translation_type:2
+ * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
* bit 20 : snap_hdr_insertion_en:1
* bit 21 : vlan_removal_en:1
* bit 22..31 : reserved0:10
@@ -247,6 +241,46 @@ struct vring_tx_mac {
#define TX_DMA_STATUS_DU BIT(0)
+/* Tx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length
+ * bit 8 : cmd_eop:1 This descriptor is the last one in the packet
+ * bit 9 : reserved
+ * bit 10 : cmd_dma_it:1 immediate interrupt
+ * bit 11..12 : SBD - Segment Buffer Details
+ * 00 - Header Segment
+ * 01 - First Data Segment
+ * 10 - Medium Data Segment
+ * 11 - Last Data Segment
+ * bit 13 : TSE - TCP Segmentation Enable
+ * bit 14 : IIC - Directs the HW to Insert IPv4 Checksum
+ * bit 15 : ITC - Directs the HW to Insert TCP/UDP Checksum
+ * bit 16..20 : QID - The target QID that the packet should be stored
+ * in the MAC.
+ * bit 21 : PO - Pseudo header Offload:
+ * 0 - Use the pseudo header value from the TCP checksum field
+ * 1- Calculate Pseudo header Checksum
+ * bit 22 : NC - No UDP Checksum
+ * bit 23..29 : reserved
+ * bit 30..31 : L4T - Layer 4 Type: 00 - UDP , 10 - TCP , 10, 11 - Reserved
+ * If L4Len equal 0, no L4 at all
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
+ * offload feature
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * [byte 12] error
+ * bit 0 2 : mac_status:3
+ * bit 3 7 : reserved:5
+ * [byte 13] status
+ * bit 0 : DU:1 Descriptor Used
+ * bit 1 7 : reserved:7
+ * [word 7] length
+ */
struct vring_tx_dma {
u32 d0;
struct vring_dma_addr addr;
@@ -257,45 +291,45 @@ struct vring_tx_dma {
__le16 length;
} __packed;
-/*
- * Rx descriptor - MAC part
+/* Rx descriptor - MAC part
* [dword 0]
* bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
- * bit 4.. 6 : connection_id:3 :The Source index that was found during
- * Parsing the TA. This field is used to define the source of the packet
+ * bit 4.. 6 : cid:3 The Source index that was found during parsing the TA.
+ * This field is used to define the source of the packet
* bit 7 : reserved:1
- * bit 8.. 9 : mac_id:2 : The MAC virtual Ring number (always zero)
- * bit 10..11 : frame_type:2 : The FC Control (b3-2) - MPDU Type
- * (management, data, control and extension)
- * bit 12..15 : frame_subtype:4 : The FC Control (b7-4) - Frame Subtype
+ * bit 8.. 9 : mid:2 The MAC virtual number
+ * bit 10..11 : frame_type:2 : The FC (b3-2) - MPDU Type
+ * (management, data, control and extension)
+ * bit 12..15 : frame_subtype:4 : The FC (b7-4) - Frame Subtype
* bit 16..27 : seq_number:12 The received Sequence number field
* bit 28..31 : extended:4 extended subtype
* [dword 1]
* bit 0.. 3 : reserved
* bit 4.. 5 : key_id:2
* bit 6 : decrypt_bypass:1
- * bit 7 : security:1
- * bit 8.. 9 : ds_bits:2
- * bit 10 : a_msdu_present:1 from qos header
- * bit 11 : a_msdu_type:1 from qos header
+ * bit 7 : security:1 FC (b14)
+ * bit 8.. 9 : ds_bits:2 FC (b9-8)
+ * bit 10 : a_msdu_present:1 QoS (b7)
+ * bit 11 : a_msdu_type:1 QoS (b8)
* bit 12 : a_mpdu:1 part of AMPDU aggregation
* bit 13 : broadcast:1
* bit 14 : mutlicast:1
* bit 15 : reserved:1
- * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
- * is received from
+ * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
+ * is received from
* bit 21..24 : mcs:4
- * bit 25..28 : mic_icr:4
+ * bit 25..28 : mic_icr:4 this signal tells the DMA to assert an interrupt
+ * after it writes the packet
* bit 29..31 : reserved:3
* [dword 2]
* bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received
- * bit 3 : fc_protocol_ver:1 The FC Control (b0) - Protocol Version
- * bit 4 : fc_order:1 The FC Control (b15) -Order
- * bit 5.. 7 : qos_ack_policy:3 The QoS (b6-5) ack policy Field
+ * bit 3.. 4 : fc_protocol_ver:1 The FC (b1-0) - Protocol Version
+ * bit 5 : fc_order:1 The FC Control (b15) -Order
+ * bit 6.. 7 : qos_ack_policy:2 The QoS (b6-5) ack policy Field
* bit 8 : esop:1 The QoS (b4) ESOP field
- * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
- * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
- * bit 15 : qos_ac_constraint:1
+ * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
+ * bit 15 : qos_ac_constraint:1 QoS (b15)
* bit 16..31 : pn_15_0:16 low 2 bytes of PN
* [dword 3]
* bit 0..31 : pn_47_16:32 high 4 bytes of PN
@@ -308,35 +342,46 @@ struct vring_rx_mac {
u32 pn_47_16;
} __packed;
-/*
- * Rx descriptor - DMA part
+/* Rx descriptor - DMA part
* [dword 0]
- * bit 0.. 7 : l4_length:8 layer 4 length
- * bit 8.. 9 : reserved:2
- * bit 10 : cmd_dma_it:1
+ * bit 0.. 7 : l4_length:8 layer 4 length. The field is only valid if
+ * L4I bit is set
+ * bit 8 : cmd_eop:1 set to 1
+ * bit 9 : cmd_rt:1 set to 1
+ * bit 10 : cmd_dma_it:1 immediate interrupt
* bit 11..15 : reserved:5
- * bit 16..29 : phy_info_length:14
+ * bit 16..29 : phy_info_length:14 It is valid when the PII is set.
+ * When the FFM bit is set bits 29-27 are used for for
+ * Flex Filter Match. Matching Index to one of the L2
+ * EtherType Flex Filter
* bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * 00 - UDP, 01 - TCP, 10, 11 - reserved
* [dword 1]
* bit 0..31 : addr_low:32 The payload buffer low address
* [dword 2]
* bit 0..15 : addr_high:16 The payload buffer high address
- * bit 16..23 : ip_length:8
+ * bit 16..23 : ip_length:8 The filed is valid only if the L3I bit is set
* bit 24..30 : mac_length:7
- * bit 31 : ip_version:1
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
* [dword 3]
* [byte 12] error
+ * bit 0 : FCS:1
+ * bit 1 : MIC:1
+ * bit 2 : Key miss:1
+ * bit 3 : Replay:1
+ * bit 4 : L3:1 IPv4 checksum
+ * bit 5 : L4:1 TCP/UDP checksum
+ * bit 6 7 : reserved:2
* [byte 13] status
- * bit 0 : du:1
- * bit 1 : eop:1
+ * bit 0 : DU:1 Descriptor Used
+ * bit 1 : EOP:1 The descriptor indicates the End of Packet
* bit 2 : error:1
- * bit 3 : mi:1
- * bit 4 : l3_identified:1
- * bit 5 : l4_identified:1
- * bit 6 : phy_info_included:1
- * bit 7 : reserved:1
+ * bit 3 : MI:1 MAC Interrupt is asserted (according to parser decision)
+ * bit 4 : L3I:1 L3 identified and checksum calculated
+ * bit 5 : L4I:1 L4 identified and checksum calculated
+ * bit 6 : PII:1 PHY Info Included in the packet
+ * bit 7 : FFM:1 EtherType Flex Filter Match
* [word 7] length
- *
*/
#define RX_DMA_D0_CMD_DMA_IT BIT(10)
@@ -349,9 +394,9 @@ struct vring_rx_mac {
#define RX_DMA_STATUS_DU BIT(0)
#define RX_DMA_STATUS_ERROR BIT(2)
-#define RX_DMA_STATUS_L3_IDENT BIT(4)
-#define RX_DMA_STATUS_L4_IDENT BIT(5)
-#define RX_DMA_STATUS_PHY_INFO BIT(6)
+#define RX_DMA_STATUS_L3I BIT(4)
+#define RX_DMA_STATUS_L4I BIT(5)
+#define RX_DMA_STATUS_PHY_INFO BIT(6)
struct vring_rx_dma {
u32 d0;
@@ -423,6 +468,11 @@ static inline int wil_rxdesc_mcs(struct vring_rx_desc *d)
return WIL_GET_BITS(d->mac.d1, 21, 24);
}
+static inline int wil_rxdesc_mcast(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 13, 14);
+}
+
static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->dma.d0, 16, 29);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index c6ec5b99ac7d..94611568fc9a 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -25,19 +25,14 @@
extern bool no_fw_recovery;
extern unsigned int mtu_max;
+extern unsigned short rx_ring_overflow_thrsh;
+extern int agg_wsize;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME "wil6210.fw"
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
-struct wil_board {
- int board;
-#define WIL_BOARD_MARLON (1)
-#define WIL_BOARD_SPARROW (2)
- const char * const name;
-};
-
/**
* extract bits [@b0:@b1] (inclusive) from the value @x
* it should be @b0 <= @b1, or result is incorrect
@@ -49,21 +44,50 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
-#define WIL_RX_RING_SIZE_ORDER_DEFAULT (9)
-#define WIL_TX_RING_SIZE_ORDER_DEFAULT (9)
+#define WIL_TX_Q_LEN_DEFAULT (4000)
+#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
/* limit ring size in range [32..32k] */
#define WIL_RING_SIZE_ORDER_MIN (5)
#define WIL_RING_SIZE_ORDER_MAX (15)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
+#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
+#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
+/* Hardware offload block adds the following:
+ * 26 bytes - 3-address QoS data header
+ * 8 bytes - IV + EIV (for GCMP)
+ * 8 bytes - SNAP
+ * 16 bytes - MIC (for GCMP)
+ * 4 bytes - CRC
+ */
+#define WIL_MAX_MPDU_OVERHEAD (62)
+
+/* Calculate MAC buffer size for the firmware. It includes all overhead,
+ * as it will go over the air, and need to be 8 byte aligned
+ */
+static inline u32 wil_mtu2macbuf(u32 mtu)
+{
+ return ALIGN(mtu + WIL_MAX_MPDU_OVERHEAD, 8);
+}
+
+/* MTU for Ethernet need to take into account 8-byte SNAP header
+ * to be added when encapsulating Ethernet frame into 802.11
+ */
+#define WIL_MAX_ETH_MTU (IEEE80211_MAX_DATA_LEN_DMG - 8)
/* Max supported by wil6210 value for interrupt threshold is 5sec. */
#define WIL6210_ITR_TRSH_MAX (5000000)
-#define WIL6210_ITR_TRSH_DEFAULT (300) /* usec */
+#define WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT (13) /* usec */
+#define WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT (13) /* usec */
+#define WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT (500) /* usec */
+#define WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT (500) /* usec */
#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
-
+#define WIL6210_RX_HIGH_TRSH_INIT (0)
+#define WIL6210_RX_HIGH_TRSH_DEFAULT \
+ (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3))
/* Hardware definitions begin */
/*
@@ -135,7 +159,7 @@ struct RGF_ICR {
#define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
#define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
-/* Interrupt moderation control */
+/* Legacy interrupt moderation control (before Sparrow v2)*/
#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
#define RGF_DMA_ITR_CNT_DATA (0x881c60)
#define RGF_DMA_ITR_CNT_CRL (0x881c64)
@@ -145,6 +169,46 @@ struct RGF_ICR {
#define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
#define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+/* New (sparrow v2+) interrupt moderation control */
+#define RGF_DMA_ITR_TX_DESQ_NO_MOD (0x881d40)
+#define RGF_DMA_ITR_TX_CNT_TRSH (0x881d34)
+#define RGF_DMA_ITR_TX_CNT_DATA (0x881d38)
+#define RGF_DMA_ITR_TX_CNT_CTL (0x881d3c)
+ #define BIT_DMA_ITR_TX_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_TX_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_TX_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_TX_CNT_CTL_REACHED_TRESH BIT(4)
+ #define BIT_DMA_ITR_TX_CNT_CTL_CROSS_EN BIT(5)
+ #define BIT_DMA_ITR_TX_CNT_CTL_FREE_RUNNIG BIT(6)
+#define RGF_DMA_ITR_TX_IDL_CNT_TRSH (0x881d60)
+#define RGF_DMA_ITR_TX_IDL_CNT_DATA (0x881d64)
+#define RGF_DMA_ITR_TX_IDL_CNT_CTL (0x881d68)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+#define RGF_DMA_ITR_RX_DESQ_NO_MOD (0x881d50)
+#define RGF_DMA_ITR_RX_CNT_TRSH (0x881d44)
+#define RGF_DMA_ITR_RX_CNT_DATA (0x881d48)
+#define RGF_DMA_ITR_RX_CNT_CTL (0x881d4c)
+ #define BIT_DMA_ITR_RX_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_RX_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_RX_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_RX_CNT_CTL_REACHED_TRESH BIT(4)
+ #define BIT_DMA_ITR_RX_CNT_CTL_CROSS_EN BIT(5)
+ #define BIT_DMA_ITR_RX_CNT_CTL_FREE_RUNNIG BIT(6)
+#define RGF_DMA_ITR_RX_IDL_CNT_TRSH (0x881d54)
+#define RGF_DMA_ITR_RX_IDL_CNT_DATA (0x881d58)
+#define RGF_DMA_ITR_RX_IDL_CNT_CTL (0x881d5c)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+
#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
@@ -164,6 +228,20 @@ struct RGF_ICR {
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
+ #define JTAG_DEV_ID_MARLON_B0 (0x0612072f)
+ #define JTAG_DEV_ID_SPARROW_A0 (0x0632072f)
+ #define JTAG_DEV_ID_SPARROW_A1 (0x1632072f)
+ #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f)
+
+enum {
+ HW_VER_UNKNOWN,
+ HW_VER_MARLON_B0, /* JTAG_DEV_ID_MARLON_B0 */
+ HW_VER_SPARROW_A0, /* JTAG_DEV_ID_SPARROW_A0 */
+ HW_VER_SPARROW_A1, /* JTAG_DEV_ID_SPARROW_A1 */
+ HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
+};
+
/* popular locations */
#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
@@ -303,6 +381,11 @@ struct vring {
struct vring_tx_data {
int enabled;
cycles_t idle, last_idle, begin;
+ u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
+ u16 agg_timeout;
+ u8 agg_amsdu;
+ bool addba_in_progress; /* if set, agg_xxx is for request in progress */
+ spinlock_t lock;
};
enum { /* for wil6210_priv.status */
@@ -313,6 +396,7 @@ enum { /* for wil6210_priv.status */
wil_status_reset_done,
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
+ wil_status_last /* keep last */
};
struct pci_dev;
@@ -397,15 +481,46 @@ enum {
fw_recovery_running = 2,
};
+enum {
+ hw_capability_reset_v2 = 0,
+ hw_capability_advanced_itr_moderation = 1,
+ hw_capability_last
+};
+
+struct wil_back_rx {
+ struct list_head list;
+ /* request params, converted to CPU byte order - what we asked for */
+ u8 cidxtid;
+ u8 dialog_token;
+ u16 ba_param_set;
+ u16 ba_timeout;
+ u16 ba_seq_ctrl;
+};
+
+struct wil_back_tx {
+ struct list_head list;
+ /* request params, converted to CPU byte order - what we asked for */
+ u8 ringid;
+ u8 agg_wsize;
+ u16 agg_timeout;
+};
+
+struct wil_probe_client_req {
+ struct list_head list;
+ u64 cookie;
+ u8 cid;
+};
+
struct wil6210_priv {
struct pci_dev *pdev;
int n_msi;
struct wireless_dev *wdev;
void __iomem *csr;
- ulong status;
+ DECLARE_BITMAP(status, wil_status_last);
u32 fw_version;
u32 hw_version;
- struct wil_board *board;
+ const char *hw_name;
+ DECLARE_BITMAP(hw_capabilities, hw_capability_last);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
@@ -415,7 +530,11 @@ struct wil6210_priv {
u32 monitor_flags;
u32 secure_pcp; /* create secure PCP? */
int sinfo_gen;
- u32 itr_trsh;
+ /* interrupt moderation */
+ u32 tx_max_burst_duration;
+ u32 tx_interframe_timeout;
+ u32 rx_max_burst_duration;
+ u32 rx_interframe_timeout;
/* cached ISR registers */
u32 isr_misc;
/* mailbox related */
@@ -429,7 +548,7 @@ struct wil6210_priv {
u16 reply_size;
struct workqueue_struct *wmi_wq; /* for deferred calls */
struct work_struct wmi_event_worker;
- struct workqueue_struct *wmi_wq_conn; /* for connect worker */
+ struct workqueue_struct *wq_service;
struct work_struct connect_worker;
struct work_struct disconnect_worker;
struct work_struct fw_error_worker; /* for FW error recovery */
@@ -445,6 +564,17 @@ struct wil6210_priv {
spinlock_t wmi_ev_lock;
struct napi_struct napi_rx;
struct napi_struct napi_tx;
+ /* BACK */
+ struct list_head back_rx_pending;
+ struct mutex back_rx_mutex; /* protect @back_rx_pending */
+ struct work_struct back_rx_worker;
+ struct list_head back_tx_pending;
+ struct mutex back_tx_mutex; /* protect @back_tx_pending */
+ struct work_struct back_tx_worker;
+ /* keep alive */
+ struct list_head probe_client_pending;
+ struct mutex probe_client_mutex; /* protect @probe_client_pending */
+ struct work_struct probe_client_worker;
/* DMA related */
struct vring vring_rx;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
@@ -529,11 +659,8 @@ void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil);
int wil_reset(struct wil6210_priv *wil);
-void wil_set_itr_trsh(struct wil6210_priv *wil);
void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_set_recovery_state(struct wil6210_priv *wil, int state);
-void wil_link_on(struct wil6210_priv *wil);
-void wil_link_off(struct wil6210_priv *wil);
int wil_up(struct wil6210_priv *wil);
int __wil_up(struct wil6210_priv *wil);
int wil_down(struct wil6210_priv *wil);
@@ -567,12 +694,26 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
+int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout);
+int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason);
+int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason);
+int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
+ u16 status, bool amsdu, u16 agg_wsize, u16 timeout);
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
+ u8 dialog_token, __le16 ba_param_set,
+ __le16 ba_timeout, __le16 ba_seq_ctrl);
+void wil_back_rx_worker(struct work_struct *work);
+void wil_back_rx_flush(struct wil6210_priv *wil);
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
+void wil_back_tx_worker(struct work_struct *work);
+void wil_back_tx_flush(struct wil6210_priv *wil);
void wil6210_clear_irq(struct wil6210_priv *wil);
int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil_mask_irq(struct wil6210_priv *wil);
void wil_unmask_irq(struct wil6210_priv *wil);
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
void wil_disable_irq(struct wil6210_priv *wil);
void wil_enable_irq(struct wil6210_priv *wil);
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
@@ -592,6 +733,8 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
int wmi_pcp_stop(struct wil6210_priv *wil);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
+void wil_probe_client_flush(struct wil6210_priv *wil);
+void wil_probe_client_worker(struct work_struct *work);
int wil_rx_init(struct wil6210_priv *wil, u16 size);
void wil_rx_fini(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index 8f1d78f8a74d..976a071ba74e 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -17,10 +17,6 @@
#include "linux/device.h"
#include "wil_platform.h"
-#ifdef CONFIG_WIL6210_PLATFORM_MSM
-#include "wil_platform_msm.h"
-#endif
-
/**
* wil_platform_init() - wil6210 platform module init
*
@@ -37,13 +33,7 @@ void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops)
return NULL;
}
-#ifdef CONFIG_WIL6210_PLATFORM_MSM
- handle = wil_platform_msm_init(dev, ops);
- if (handle)
- return handle;
-#endif
-
- /* other platform specific init functions should be called here */
+ /* platform specific init functions should be called here */
return handle;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform_msm.c b/drivers/net/wireless/ath/wil6210/wil_platform_msm.c
deleted file mode 100644
index b354a743240d..000000000000
--- a/drivers/net/wireless/ath/wil6210/wil_platform_msm.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/msm-bus.h>
-
-#include "wil_platform.h"
-#include "wil_platform_msm.h"
-
-/**
- * struct wil_platform_msm - wil6210 msm platform module info
- *
- * @dev: device object
- * @msm_bus_handle: handle for using msm_bus API
- * @pdata: bus scale info retrieved from DT
- */
-struct wil_platform_msm {
- struct device *dev;
- uint32_t msm_bus_handle;
- struct msm_bus_scale_pdata *pdata;
-};
-
-#define KBTOB(a) (a * 1000ULL)
-
-/**
- * wil_platform_get_pdata() - Generate bus client data from device tree
- * provided by clients.
- *
- * dev: device object
- * of_node: Device tree node to extract information from
- *
- * The function returns a valid pointer to the allocated bus-scale-pdata
- * if the vectors were correctly read from the client's device node.
- * Any error in reading or parsing the device node will return NULL
- * to the caller.
- */
-static struct msm_bus_scale_pdata *wil_platform_get_pdata(
- struct device *dev,
- struct device_node *of_node)
-{
- struct msm_bus_scale_pdata *pdata;
- struct msm_bus_paths *usecase;
- int i, j, ret, len;
- unsigned int num_usecases, num_paths, mem_size;
- const uint32_t *vec_arr;
- struct msm_bus_vectors *vectors;
-
- /* first read num_usecases and num_paths so we can calculate
- * amount of memory to allocate
- */
- ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
- &num_usecases);
- if (ret) {
- dev_err(dev, "Error: num-usecases not found\n");
- return NULL;
- }
-
- ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
- &num_paths);
- if (ret) {
- dev_err(dev, "Error: num_paths not found\n");
- return NULL;
- }
-
- /* pdata memory layout:
- * msm_bus_scale_pdata
- * msm_bus_paths[num_usecases]
- * msm_bus_vectors[num_usecases][num_paths]
- */
- mem_size = sizeof(struct msm_bus_scale_pdata) +
- sizeof(struct msm_bus_paths) * num_usecases +
- sizeof(struct msm_bus_vectors) * num_usecases * num_paths;
-
- pdata = kzalloc(mem_size, GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- ret = of_property_read_string(of_node, "qcom,msm-bus,name",
- &pdata->name);
- if (ret) {
- dev_err(dev, "Error: Client name not found\n");
- goto err;
- }
-
- if (of_property_read_bool(of_node, "qcom,msm-bus,active-only")) {
- pdata->active_only = 1;
- } else {
- dev_info(dev, "active_only flag absent.\n");
- dev_info(dev, "Using dual context by default\n");
- }
-
- pdata->num_usecases = num_usecases;
- pdata->usecase = (struct msm_bus_paths *)(pdata + 1);
-
- vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
- if (vec_arr == NULL) {
- dev_err(dev, "Error: Vector array not found\n");
- goto err;
- }
-
- if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
- dev_err(dev, "Error: Length-error on getting vectors\n");
- goto err;
- }
-
- vectors = (struct msm_bus_vectors *)(pdata->usecase + num_usecases);
- for (i = 0; i < num_usecases; i++) {
- usecase = &pdata->usecase[i];
- usecase->num_paths = num_paths;
- usecase->vectors = &vectors[i];
-
- for (j = 0; j < num_paths; j++) {
- int index = ((i * num_paths) + j) * 4;
-
- usecase->vectors[j].src = be32_to_cpu(vec_arr[index]);
- usecase->vectors[j].dst =
- be32_to_cpu(vec_arr[index + 1]);
- usecase->vectors[j].ab = (uint64_t)
- KBTOB(be32_to_cpu(vec_arr[index + 2]));
- usecase->vectors[j].ib = (uint64_t)
- KBTOB(be32_to_cpu(vec_arr[index + 3]));
- }
- }
-
- return pdata;
-
-err:
- kfree(pdata);
-
- return NULL;
-}
-
-/* wil_platform API (callbacks) */
-
-static int wil_platform_bus_request(void *handle,
- uint32_t kbps /* KBytes/Sec */)
-{
- int rc, i;
- struct wil_platform_msm *msm = (struct wil_platform_msm *)handle;
- int vote = 0; /* vote 0 in case requested kbps cannot be satisfied */
- struct msm_bus_paths *usecase;
- uint32_t usecase_kbps;
- uint32_t min_kbps = ~0;
-
- /* find the lowest usecase that is bigger than requested kbps */
- for (i = 0; i < msm->pdata->num_usecases; i++) {
- usecase = &msm->pdata->usecase[i];
- /* assume we have single path (vectors[0]). If we ever
- * have multiple paths, need to define the behavior */
- usecase_kbps = div64_u64(usecase->vectors[0].ib, 1000);
- if (usecase_kbps >= kbps && usecase_kbps < min_kbps) {
- min_kbps = usecase_kbps;
- vote = i;
- }
- }
-
- rc = msm_bus_scale_client_update_request(msm->msm_bus_handle, vote);
- if (rc)
- dev_err(msm->dev, "Failed msm_bus voting. kbps=%d vote=%d, rc=%d\n",
- kbps, vote, rc);
- else
- /* TOOD: remove */
- dev_info(msm->dev, "msm_bus_scale_client_update_request succeeded. kbps=%d vote=%d\n",
- kbps, vote);
-
- return rc;
-}
-
-static void wil_platform_uninit(void *handle)
-{
- struct wil_platform_msm *msm = (struct wil_platform_msm *)handle;
-
- dev_info(msm->dev, "wil_platform_uninit\n");
-
- if (msm->msm_bus_handle)
- msm_bus_scale_unregister_client(msm->msm_bus_handle);
-
- kfree(msm->pdata);
- kfree(msm);
-}
-
-static int wil_platform_msm_bus_register(struct wil_platform_msm *msm,
- struct device_node *node)
-{
- msm->pdata = wil_platform_get_pdata(msm->dev, node);
- if (!msm->pdata) {
- dev_err(msm->dev, "Failed getting DT info\n");
- return -EINVAL;
- }
-
- msm->msm_bus_handle = msm_bus_scale_register_client(msm->pdata);
- if (!msm->msm_bus_handle) {
- dev_err(msm->dev, "Failed msm_bus registration\n");
- return -EINVAL;
- }
-
- dev_info(msm->dev, "msm_bus registration succeeded! handle 0x%x\n",
- msm->msm_bus_handle);
-
- return 0;
-}
-
-/**
- * wil_platform_msm_init() - wil6210 msm platform module init
- *
- * The function must be called before all other functions in this module.
- * It returns a handle which is used with the rest of the API
- *
- */
-void *wil_platform_msm_init(struct device *dev, struct wil_platform_ops *ops)
-{
- struct device_node *of_node;
- struct wil_platform_msm *msm;
- int rc;
-
- of_node = of_find_compatible_node(NULL, NULL, "qcom,wil6210");
- if (!of_node) {
- /* this could mean non-msm platform */
- dev_err(dev, "DT node not found\n");
- return NULL;
- }
-
- msm = kzalloc(sizeof(*msm), GFP_KERNEL);
- if (!msm)
- return NULL;
-
- msm->dev = dev;
-
- /* register with msm_bus module for scaling requests */
- rc = wil_platform_msm_bus_register(msm, of_node);
- if (rc)
- goto cleanup;
-
- memset(ops, 0, sizeof(*ops));
- ops->bus_request = wil_platform_bus_request;
- ops->uninit = wil_platform_uninit;
-
- return (void *)msm;
-
-cleanup:
- kfree(msm);
- return NULL;
-}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 63476c86cd0e..0f3e4334c8e3 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -23,10 +23,15 @@
#include "wmi.h"
#include "trace.h"
-static uint max_assoc_sta = 1;
+static uint max_assoc_sta = WIL6210_MAX_CID;
module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
+int agg_wsize; /* = 0; */
+module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
+ " 0 - use default; < 0 - don't auto-establish");
+
/**
* WMI event receiving - theory of operations
*
@@ -197,7 +202,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
might_sleep();
- if (!test_bit(wil_status_fwready, &wil->status)) {
+ if (!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "WMI: cannot send command while FW not ready\n");
return -EAGAIN;
}
@@ -300,7 +305,7 @@ static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
wil_dbg_wmi(wil, "WMI: got FW ready event\n");
wil_set_recovery_state(wil, fw_recovery_idle);
- set_bit(wil_status_fwready, &wil->status);
+ set_bit(wil_status_fwready, wil->status);
/* let the reset sequence continue */
complete(&wil->wmi_ready);
}
@@ -438,7 +443,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
- if (!test_bit(wil_status_fwconnecting, &wil->status)) {
+ if (!test_bit(wil_status_fwconnecting, wil->status)) {
wil_err(wil, "Not in connecting state\n");
return;
}
@@ -457,13 +462,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
if (assoc_req_ie) {
sinfo.assoc_req_ies = assoc_req_ie;
sinfo.assoc_req_ies_len = assoc_req_ielen;
- sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
}
cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
}
- clear_bit(wil_status_fwconnecting, &wil->status);
- set_bit(wil_status_fwconnected, &wil->status);
+ clear_bit(wil_status_fwconnecting, wil->status);
+ set_bit(wil_status_fwconnected, wil->status);
/* FIXME FW can transmit only ucast frames to peer */
/* FIXME real ring_id instead of hard coded 0 */
@@ -471,7 +475,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
wil->sta[evt->cid].status = wil_sta_conn_pending;
wil->pending_connect_cid = evt->cid;
- queue_work(wil->wmi_wq_conn, &wil->connect_worker);
+ queue_work(wil->wq_service, &wil->connect_worker);
}
static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
@@ -544,9 +548,24 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
}
}
+static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
+{
+ struct vring_tx_data *t;
+ int i;
+
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (cid != wil->vring2cid_tid[i][0])
+ continue;
+ t = &wil->vring_tx_data[i];
+ if (!t->enabled)
+ continue;
+
+ wil_addba_tx_request(wil, i, wsize);
+ }
+}
+
static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
{
- struct net_device *ndev = wil_to_ndev(wil);
struct wmi_data_port_open_event *evt = d;
u8 cid = evt->cid;
@@ -558,7 +577,8 @@ static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
}
wil->sta[cid].data_port_open = true;
- netif_carrier_on(ndev);
+ if (agg_wsize >= 0)
+ wil_addba_tx_cid(wil, cid, agg_wsize);
}
static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
@@ -583,55 +603,89 @@ static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
int len)
{
struct wmi_vring_ba_status_event *evt = d;
- struct wil_sta_info *sta;
- uint i, cid;
+ struct vring_tx_data *txdata;
- /* TODO: use Rx BA status, not Tx one */
-
- wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
+ wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
evt->ringid,
evt->status == WMI_BA_AGREED ? "OK" : "N/A",
- evt->agg_wsize, __le16_to_cpu(evt->ba_timeout));
+ evt->agg_wsize, __le16_to_cpu(evt->ba_timeout),
+ evt->amsdu ? "+" : "-");
if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
wil_err(wil, "invalid ring id %d\n", evt->ringid);
return;
}
- mutex_lock(&wil->mutex);
-
- cid = wil->vring2cid_tid[evt->ringid][0];
- if (cid >= WIL6210_MAX_CID) {
- wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid);
- goto out;
+ if (evt->status != WMI_BA_AGREED) {
+ evt->ba_timeout = 0;
+ evt->agg_wsize = 0;
+ evt->amsdu = 0;
}
- sta = &wil->sta[cid];
- if (sta->status == wil_sta_unused) {
- wil_err(wil, "CID %d unused\n", cid);
- goto out;
- }
+ txdata = &wil->vring_tx_data[evt->ringid];
- wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr);
- for (i = 0; i < WIL_STA_TID_NUM; i++) {
- struct wil_tid_ampdu_rx *r;
- unsigned long flags;
+ txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
+ txdata->agg_wsize = evt->agg_wsize;
+ txdata->agg_amsdu = evt->amsdu;
+ txdata->addba_in_progress = false;
+}
- spin_lock_irqsave(&sta->tid_rx_lock, flags);
+static void wmi_evt_addba_rx_req(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ struct wmi_rcp_addba_req_event *evt = d;
- r = sta->tid_rx[i];
- sta->tid_rx[i] = NULL;
- wil_tid_ampdu_rx_free(wil, r);
+ wil_addba_rx_request(wil, evt->cidxtid, evt->dialog_token,
+ evt->ba_param_set, evt->ba_timeout,
+ evt->ba_seq_ctrl);
+}
- spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
+static void wmi_evt_delba(struct wil6210_priv *wil, int id, void *d, int len)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ struct wmi_delba_event *evt = d;
+ u8 cid, tid;
+ u16 reason = __le16_to_cpu(evt->reason);
+ struct wil_sta_info *sta;
+ struct wil_tid_ampdu_rx *r;
- if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize)
- sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil,
- evt->agg_wsize, 0);
+ might_sleep();
+ parse_cidxtid(evt->cidxtid, &cid, &tid);
+ wil_dbg_wmi(wil, "DELBA CID %d TID %d from %s reason %d\n",
+ cid, tid,
+ evt->from_initiator ? "originator" : "recipient",
+ reason);
+ if (!evt->from_initiator) {
+ int i;
+ /* find Tx vring it belongs to */
+ for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+ if ((wil->vring2cid_tid[i][0] == cid) &&
+ (wil->vring2cid_tid[i][1] == tid)) {
+ struct vring_tx_data *txdata =
+ &wil->vring_tx_data[i];
+
+ wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
+ txdata->agg_timeout = 0;
+ txdata->agg_wsize = 0;
+ txdata->addba_in_progress = false;
+
+ break; /* max. 1 matching ring */
+ }
+ }
+ if (i >= ARRAY_SIZE(wil->vring2cid_tid))
+ wil_err(wil, "DELBA: unable to find Tx vring\n");
+ return;
}
-out:
- mutex_unlock(&wil->mutex);
+ sta = &wil->sta[cid];
+
+ spin_lock_bh(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[tid];
+ sta->tid_rx[tid] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+
+ spin_unlock_bh(&sta->tid_rx_lock);
}
static const struct {
@@ -649,6 +703,8 @@ static const struct {
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
{WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
+ {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
+ {WMI_DELBA_EVENTID, wmi_evt_delba},
};
/*
@@ -668,7 +724,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
ulong flags;
unsigned n;
- if (!test_bit(wil_status_reset_done, &wil->status)) {
+ if (!test_bit(wil_status_reset_done, wil->status)) {
wil_err(wil, "Reset in progress. Cannot handle WMI event\n");
return;
}
@@ -1025,13 +1081,14 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
struct wmi_cfg_rx_chain_cmd cmd = {
.action = WMI_RX_CHAIN_ADD,
.rx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(mtu_max + ETH_HLEN),
+ .max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.ring_mem_base = cpu_to_le64(vring->pa),
.ring_size = cpu_to_le16(vring->size),
},
.mid = 0, /* TODO - what is it? */
.decap_trans_type = WMI_DECAP_TYPE_802_3,
.reorder_type = WMI_RX_SW_REORDER,
+ .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
@@ -1074,12 +1131,13 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
return rc;
}
-int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
+int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
{
int rc;
struct wmi_temp_sense_cmd cmd = {
- .measure_marlon_m_en = cpu_to_le32(!!t_m),
- .measure_marlon_r_en = cpu_to_le32(!!t_r),
+ .measure_baseband_en = cpu_to_le32(!!t_bb),
+ .measure_rf_en = cpu_to_le32(!!t_rf),
+ .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
@@ -1091,10 +1149,10 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
if (rc)
return rc;
- if (t_m)
- *t_m = le32_to_cpu(reply.evt.marlon_m_t1000);
- if (t_r)
- *t_r = le32_to_cpu(reply.evt.marlon_r_t1000);
+ if (t_bb)
+ *t_bb = le32_to_cpu(reply.evt.baseband_t1000);
+ if (t_rf)
+ *t_rf = le32_to_cpu(reply.evt.rf_t1000);
return 0;
}
@@ -1111,6 +1169,87 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
}
+int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
+{
+ struct wmi_vring_ba_en_cmd cmd = {
+ .ringid = ringid,
+ .agg_max_wsize = size,
+ .ba_timeout = cpu_to_le16(timeout),
+ .amsdu = 0,
+ };
+
+ wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__,
+ ringid, size, timeout);
+
+ return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason)
+{
+ struct wmi_vring_ba_dis_cmd cmd = {
+ .ringid = ringid,
+ .reason = cpu_to_le16(reason),
+ };
+
+ wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__,
+ ringid, reason);
+
+ return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason)
+{
+ struct wmi_rcp_delba_cmd cmd = {
+ .cidxtid = cidxtid,
+ .reason = cpu_to_le16(reason),
+ };
+
+ wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__,
+ cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason);
+
+ return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
+ u16 status, bool amsdu, u16 agg_wsize, u16 timeout)
+{
+ int rc;
+ struct wmi_rcp_addba_resp_cmd cmd = {
+ .cidxtid = mk_cidxtid(cid, tid),
+ .dialog_token = token,
+ .status_code = cpu_to_le16(status),
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
+ (agg_wsize << 6)),
+ .ba_timeout = cpu_to_le16(timeout),
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_rcp_addba_resp_sent_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil,
+ "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
+ cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
+
+ rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
+ WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status) {
+ wil_err(wil, "ADDBA response failed with status %d\n",
+ le16_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
void wmi_event_flush(struct wil6210_priv *wil)
{
struct pending_wmi_event *evt, *t;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 27b97432d1c2..8a4af613e191 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -29,8 +29,10 @@
/* General */
#define WILOCITY_MAX_ASSOC_STA (8)
+#define WILOCITY_DEFAULT_ASSOC_STA (1)
#define WMI_MAC_LEN (6)
#define WMI_PROX_RANGE_NUM (3)
+#define WMI_MAX_LOSS_DMG_BEACONS (32)
/* List of Commands */
enum wmi_command_id {
@@ -48,7 +50,7 @@ enum wmi_command_id {
WMI_SET_WSC_STATUS_CMDID = 0x0041,
WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
- WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300,
+/* WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, */
WMI_MEM_READ_CMDID = 0x0800,
WMI_MEM_WR_CMDID = 0x0801,
WMI_ECHO_CMDID = 0x0803,
@@ -102,6 +104,8 @@ enum wmi_command_id {
WMI_MAINTAIN_RESUME_CMDID = 0x0851,
WMI_RS_MGMT_CMDID = 0x0852,
WMI_RF_MGMT_CMDID = 0x0853,
+ WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x0854,
+ WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855,
/* Performance monitoring commands */
WMI_BF_CTRL_CMDID = 0x0862,
WMI_NOTIFY_REQ_CMDID = 0x0863,
@@ -136,6 +140,7 @@ enum wmi_command_id {
WMI_EAPOL_TX_CMDID = 0xf04c,
WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
WMI_FW_VER_CMDID = 0xf04e,
+ WMI_PMC_CMDID = 0xf04f,
};
/*
@@ -283,8 +288,8 @@ enum wmi_scan_type {
WMI_LONG_SCAN = 0,
WMI_SHORT_SCAN = 1,
WMI_PBC_SCAN = 2,
- WMI_ACTIVE_SCAN = 3,
- WMI_DIRECT_SCAN = 4,
+ WMI_DIRECT_SCAN = 3,
+ WMI_ACTIVE_SCAN = 4,
};
struct wmi_start_scan_cmd {
@@ -375,6 +380,17 @@ struct wmi_rf_mgmt_cmd {
} __packed;
/*
+ * WMI_THERMAL_THROTTLING_CTRL_CMDID
+ */
+#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
+
+struct wmi_thermal_throttling_ctrl_cmd {
+ __le32 time_on_usec;
+ __le32 time_off_usec;
+ __le32 max_txop_length_usec;
+} __packed;
+
+/*
* WMI_RF_RX_TEST_CMDID
*/
struct wmi_rf_rx_test_cmd {
@@ -586,6 +602,7 @@ struct wmi_vring_ba_en_cmd {
u8 ringid;
u8 agg_max_wsize;
__le16 ba_timeout;
+ u8 amsdu;
} __packed;
/*
@@ -647,6 +664,7 @@ enum wmi_cfg_rx_chain_cmd_action {
enum wmi_cfg_rx_chain_cmd_decap_trans_type {
WMI_DECAP_TYPE_802_3 = 0,
WMI_DECAP_TYPE_NATIVE_WIFI = 1,
+ WMI_DECAP_TYPE_NONE = 2,
};
enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
@@ -784,9 +802,17 @@ struct wmi_echo_cmd {
*
* Measure MAC and radio temperatures
*/
+
+/* Possible modes for temperature measurement */
+enum wmi_temperature_measure_mode {
+ TEMPERATURE_USE_OLD_VALUE = 0x1,
+ TEMPERATURE_MEASURE_NOW = 0x2,
+};
+
struct wmi_temp_sense_cmd {
- __le32 measure_marlon_m_en;
- __le32 measure_marlon_r_en;
+ __le32 measure_baseband_en;
+ __le32 measure_rf_en;
+ __le32 measure_mode;
} __packed;
/*
@@ -842,6 +868,7 @@ enum wmi_event_id {
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
@@ -858,6 +885,7 @@ enum wmi_event_id {
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
/*P2P*/
+ WMI_P2P_CFG_DONE_EVENTID = 0x1910,
WMI_PORT_ALLOCATED_EVENTID = 0x1911,
WMI_PORT_DELETED_EVENTID = 0x1912,
WMI_LISTEN_STARTED_EVENTID = 0x1914,
@@ -898,6 +926,15 @@ struct wmi_rf_mgmt_status_event {
} __packed;
/*
+ * WMI_THERMAL_THROTTLING_STATUS_EVENTID
+ */
+struct wmi_thermal_throttling_status_event {
+ __le32 time_on_usec;
+ __le32 time_off_usec;
+ __le32 max_txop_length_usec;
+} __packed;
+
+/*
* WMI_GET_STATUS_DONE_EVENTID
*/
struct wmi_get_status_done_event {
@@ -1052,14 +1089,23 @@ struct wmi_scan_complete_event {
enum wmi_vring_ba_status {
WMI_BA_AGREED = 0,
WMI_BA_NON_AGREED = 1,
+ /* BA_EN in middle of teardown flow */
+ WMI_BA_TD_WIP = 2,
+ /* BA_DIS or BA_EN in middle of BA SETUP flow */
+ WMI_BA_SETUP_WIP = 3,
+ /* BA_EN when the BA session is already active */
+ WMI_BA_SESSION_ACTIVE = 4,
+ /* BA_DIS when the BA session is not active */
+ WMI_BA_SESSION_NOT_ACTIVE = 5,
};
struct wmi_vring_ba_status_event {
- __le16 status;
+ __le16 status; /* enum wmi_vring_ba_status */
u8 reserved[2];
u8 ringid;
u8 agg_wsize;
__le16 ba_timeout;
+ u8 amsdu;
} __packed;
/*
@@ -1145,6 +1191,14 @@ struct wmi_get_pcp_channel_event {
} __packed;
/*
+ * WMI_P2P_CFG_DONE_EVENTID
+ */
+struct wmi_p2p_cfg_done_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
* WMI_PORT_ALLOCATED_EVENTID
*/
struct wmi_port_allocated_event {
@@ -1272,8 +1326,8 @@ struct wmi_echo_event {
* Measure MAC and radio temperatures
*/
struct wmi_temp_sense_done_event {
- __le32 marlon_m_t1000;
- __le32 marlon_r_t1000;
+ __le32 baseband_t1000;
+ __le32 rf_t1000;
} __packed;
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 9183f1cf89a7..55db9f03eb2a 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -45,7 +45,6 @@
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/ctype.h>
#include <linux/timer.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -2699,16 +2698,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
domain[REGDOMAINSZ] = 0;
rc = -EINVAL;
for (i = 0; i < ARRAY_SIZE(channel_table); i++) {
- /* strcasecmp doesn't exist in the library */
- char *a = channel_table[i].name;
- char *b = domain;
- while (*a) {
- char c1 = *a++;
- char c2 = *b++;
- if (tolower(c1) != tolower(c2))
- break;
- }
- if (!*a && !*b) {
+ if (!strcasecmp(channel_table[i].name, domain)) {
priv->config_reg_domain = channel_table[i].reg_domain;
rc = 0;
}
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64a5b672e30a..759fb8d41fc9 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -166,6 +166,15 @@ config B43_PHY_LCN
Say N, this is BROKEN and crashes driver.
+config B43_PHY_AC
+ bool "Support for AC-PHY (802.11ac) devices (BROKEN)"
+ depends on B43 && B43_BCMA && BROKEN
+ ---help---
+ This PHY type can be found in the following chipsets:
+ PCI: BCM4352, BCM4360
+
+ Say N, this is BROKEN and crashes driver.
+
# This config option automatically enables b43 LEDS support,
# if it's possible.
config B43_LEDS
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 9f7965aae93d..c624d4d90e4f 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -13,6 +13,7 @@ b43-$(CONFIG_B43_PHY_HT) += phy_ht.o
b43-$(CONFIG_B43_PHY_HT) += tables_phy_ht.o
b43-$(CONFIG_B43_PHY_HT) += radio_2059.o
b43-$(CONFIG_B43_PHY_LCN) += phy_lcn.o tables_phy_lcn.o
+b43-$(CONFIG_B43_PHY_AC) += phy_ac.o
b43-y += sysfs.o
b43-y += xmit.o
b43-y += dma.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index bb12586cd7cd..036552439816 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -500,6 +500,8 @@ enum {
#define B43_BCMA_IOCTL_PHY_BW_10MHZ 0x00000000 /* 10 MHz bandwidth, 40 MHz PHY */
#define B43_BCMA_IOCTL_PHY_BW_20MHZ 0x00000040 /* 20 MHz bandwidth, 80 MHz PHY */
#define B43_BCMA_IOCTL_PHY_BW_40MHZ 0x00000080 /* 40 MHz bandwidth, 160 MHz PHY */
+#define B43_BCMA_IOCTL_PHY_BW_80MHZ 0x000000C0 /* 80 MHz bandwidth */
+#define B43_BCMA_IOCTL_DAC 0x00000300 /* Highspeed DAC mode control field */
#define B43_BCMA_IOCTL_GMODE 0x00002000 /* G Mode Enable */
/* BCMA 802.11 core specific IO status (BCMA_IOST) flags */
@@ -941,6 +943,7 @@ struct b43_wl {
bool beacon1_uploaded;
bool beacon_templates_virgin; /* Never wrote the templates? */
struct work_struct beacon_update_trigger;
+ spinlock_t beacon_lock;
/* The current QOS parameters for the 4 queues. */
struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM];
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 47731cb0d815..2c9088633ec6 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1262,6 +1262,23 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
flags |= B43_BCMA_IOCTL_GMODE;
b43_device_enable(dev, flags);
+ if (dev->phy.type == B43_PHYTYPE_AC) {
+ u16 tmp;
+
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~B43_BCMA_IOCTL_DAC;
+ tmp |= 0x100;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~B43_BCMA_IOCTL_PHY_CLKEN;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp |= B43_BCMA_IOCTL_PHY_CLKEN;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ }
+
bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
b43_bcma_phy_reset(dev);
bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
@@ -1601,12 +1618,26 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
unsigned int rate;
u16 ctl;
int antenna;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+ struct sk_buff *beacon_skb;
- bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
- len = min_t(size_t, dev->wl->current_beacon->len,
- 0x200 - sizeof(struct b43_plcp_hdr6));
+ spin_lock_irqsave(&dev->wl->beacon_lock, flags);
+ info = IEEE80211_SKB_CB(dev->wl->current_beacon);
rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
+ /* Clone the beacon, so it cannot go away, while we write it to hw. */
+ beacon_skb = skb_clone(dev->wl->current_beacon, GFP_ATOMIC);
+ spin_unlock_irqrestore(&dev->wl->beacon_lock, flags);
+
+ if (!beacon_skb) {
+ b43dbg(dev->wl, "Could not upload beacon. "
+ "Failed to clone beacon skb.");
+ return;
+ }
+
+ bcn = (const struct ieee80211_mgmt *)(beacon_skb->data);
+ len = min_t(size_t, beacon_skb->len,
+ 0x200 - sizeof(struct b43_plcp_hdr6));
b43_write_template_common(dev, (const u8 *)bcn,
len, ram_offset, shm_size_offset, rate);
@@ -1674,6 +1705,8 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
B43_SHM_SH_DTIMPER, 0);
}
b43dbg(dev->wl, "Updated beacon template at 0x%x\n", ram_offset);
+
+ dev_kfree_skb_any(beacon_skb);
}
static void b43_upload_beacon0(struct b43_wldev *dev)
@@ -1790,13 +1823,13 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
mutex_unlock(&wl->mutex);
}
-/* Asynchronously update the packet templates in template RAM.
- * Locking: Requires wl->mutex to be locked. */
+/* Asynchronously update the packet templates in template RAM. */
static void b43_update_templates(struct b43_wl *wl)
{
- struct sk_buff *beacon;
+ struct sk_buff *beacon, *old_beacon;
+ unsigned long flags;
- /* This is the top half of the ansynchronous beacon update.
+ /* This is the top half of the asynchronous beacon update.
* The bottom half is the beacon IRQ.
* Beacon update must be asynchronous to avoid sending an
* invalid beacon. This can happen for example, if the firmware
@@ -1810,12 +1843,17 @@ static void b43_update_templates(struct b43_wl *wl)
if (unlikely(!beacon))
return;
- if (wl->current_beacon)
- dev_kfree_skb_any(wl->current_beacon);
+ spin_lock_irqsave(&wl->beacon_lock, flags);
+ old_beacon = wl->current_beacon;
wl->current_beacon = beacon;
wl->beacon0_uploaded = false;
wl->beacon1_uploaded = false;
+ spin_unlock_irqrestore(&wl->beacon_lock, flags);
+
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
+
+ if (old_beacon)
+ dev_kfree_skb_any(old_beacon);
}
static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
@@ -4318,6 +4356,7 @@ redo:
mutex_unlock(&wl->mutex);
cancel_delayed_work_sync(&dev->periodic_work);
cancel_work_sync(&wl->tx_work);
+ b43_leds_stop(dev);
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev || b43_status(dev) < B43_STAT_STARTED) {
@@ -4505,6 +4544,12 @@ static int b43_phy_versioning(struct b43_wldev *dev)
unsupported = 1;
break;
#endif
+#ifdef CONFIG_B43_PHY_AC
+ case B43_PHYTYPE_AC:
+ if (phy_rev > 1)
+ unsupported = 1;
+ break;
+#endif
default:
unsupported = 1;
}
@@ -4601,6 +4646,10 @@ static int b43_phy_versioning(struct b43_wldev *dev)
if (radio_id != 0x2064)
unsupported = 1;
break;
+ case B43_PHYTYPE_AC:
+ if (radio_id != 0x2069)
+ unsupported = 1;
+ break;
default:
B43_WARN_ON(1);
}
@@ -5094,7 +5143,6 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
{
struct b43_wl *wl = hw_to_b43_wl(hw);
- /* FIXME: add locking */
b43_update_templates(wl);
return 0;
@@ -5584,6 +5632,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
wl->hw = hw;
mutex_init(&wl->mutex);
spin_lock_init(&wl->hardirq_lock);
+ spin_lock_init(&wl->beacon_lock);
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
diff --git a/drivers/net/wireless/b43/phy_ac.c b/drivers/net/wireless/b43/phy_ac.c
new file mode 100644
index 000000000000..e75633d67938
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_ac.c
@@ -0,0 +1,92 @@
+/*
+ * Broadcom B43 wireless driver
+ * IEEE 802.11ac AC-PHY support
+ *
+ * Copyright (c) 2015 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include "b43.h"
+#include "phy_ac.h"
+
+/**************************************************
+ * Basic PHY ops
+ **************************************************/
+
+static int b43_phy_ac_op_allocate(struct b43_wldev *dev)
+{
+ struct b43_phy_ac *phy_ac;
+
+ phy_ac = kzalloc(sizeof(*phy_ac), GFP_KERNEL);
+ if (!phy_ac)
+ return -ENOMEM;
+ dev->phy.ac = phy_ac;
+
+ return 0;
+}
+
+static void b43_phy_ac_op_free(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_ac *phy_ac = phy->ac;
+
+ kfree(phy_ac);
+ phy->ac = NULL;
+}
+
+static void b43_phy_ac_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask,
+ u16 set)
+{
+ b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_PHY_DATA,
+ (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set);
+}
+
+static u16 b43_phy_ac_op_radio_read(struct b43_wldev *dev, u16 reg)
+{
+ b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg);
+ return b43_read16(dev, B43_MMIO_RADIO24_DATA);
+}
+
+static void b43_phy_ac_op_radio_write(struct b43_wldev *dev, u16 reg,
+ u16 value)
+{
+ b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_RADIO24_DATA, value);
+}
+
+static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ return 11;
+ return 36;
+}
+
+static enum b43_txpwr_result
+b43_phy_ac_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi)
+{
+ return B43_TXPWR_RES_DONE;
+}
+
+static void b43_phy_ac_op_adjust_txpower(struct b43_wldev *dev)
+{
+}
+
+/**************************************************
+ * PHY ops struct
+ **************************************************/
+
+const struct b43_phy_operations b43_phyops_ac = {
+ .allocate = b43_phy_ac_op_allocate,
+ .free = b43_phy_ac_op_free,
+ .phy_maskset = b43_phy_ac_op_maskset,
+ .radio_read = b43_phy_ac_op_radio_read,
+ .radio_write = b43_phy_ac_op_radio_write,
+ .get_default_chan = b43_phy_ac_op_get_default_chan,
+ .recalc_txpower = b43_phy_ac_op_recalc_txpower,
+ .adjust_txpower = b43_phy_ac_op_adjust_txpower,
+};
diff --git a/drivers/net/wireless/b43/phy_ac.h b/drivers/net/wireless/b43/phy_ac.h
new file mode 100644
index 000000000000..d1ca79e0eb24
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_ac.h
@@ -0,0 +1,38 @@
+#ifndef B43_PHY_AC_H_
+#define B43_PHY_AC_H_
+
+#include "phy_common.h"
+
+#define B43_PHY_AC_BBCFG 0x001
+#define B43_PHY_AC_BBCFG_RSTCCA 0x4000 /* Reset CCA */
+#define B43_PHY_AC_BANDCTL 0x003 /* Band control */
+#define B43_PHY_AC_BANDCTL_5GHZ 0x0001
+#define B43_PHY_AC_TABLE_ID 0x00d
+#define B43_PHY_AC_TABLE_OFFSET 0x00e
+#define B43_PHY_AC_TABLE_DATA1 0x00f
+#define B43_PHY_AC_TABLE_DATA2 0x010
+#define B43_PHY_AC_TABLE_DATA3 0x011
+#define B43_PHY_AC_CLASSCTL 0x140 /* Classifier control */
+#define B43_PHY_AC_CLASSCTL_CCKEN 0x0001 /* CCK enable */
+#define B43_PHY_AC_CLASSCTL_OFDMEN 0x0002 /* OFDM enable */
+#define B43_PHY_AC_CLASSCTL_WAITEDEN 0x0004 /* Waited enable */
+#define B43_PHY_AC_BW1A 0x371
+#define B43_PHY_AC_BW2 0x372
+#define B43_PHY_AC_BW3 0x373
+#define B43_PHY_AC_BW4 0x374
+#define B43_PHY_AC_BW5 0x375
+#define B43_PHY_AC_BW6 0x376
+#define B43_PHY_AC_RFCTL_CMD 0x408
+#define B43_PHY_AC_C1_CLIP 0x6d4
+#define B43_PHY_AC_C1_CLIP_DIS 0x4000
+#define B43_PHY_AC_C2_CLIP 0x8d4
+#define B43_PHY_AC_C2_CLIP_DIS 0x4000
+#define B43_PHY_AC_C3_CLIP 0xad4
+#define B43_PHY_AC_C3_CLIP_DIS 0x4000
+
+struct b43_phy_ac {
+};
+
+extern const struct b43_phy_operations b43_phyops_ac;
+
+#endif /* B43_PHY_AC_H_ */
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index ee27b06074e1..ec2b9c577b90 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -33,6 +33,7 @@
#include "phy_lp.h"
#include "phy_ht.h"
#include "phy_lcn.h"
+#include "phy_ac.h"
#include "b43.h"
#include "main.h"
@@ -70,6 +71,11 @@ int b43_phy_allocate(struct b43_wldev *dev)
phy->ops = &b43_phyops_lcn;
#endif
break;
+ case B43_PHYTYPE_AC:
+#ifdef CONFIG_B43_PHY_AC
+ phy->ops = &b43_phyops_ac;
+#endif
+ break;
}
if (B43_WARN_ON(!phy->ops))
return -ENODEV;
@@ -572,7 +578,8 @@ void b43_phy_force_clock(struct b43_wldev *dev, bool force)
u32 tmp;
WARN_ON(dev->phy.type != B43_PHYTYPE_N &&
- dev->phy.type != B43_PHYTYPE_HT);
+ dev->phy.type != B43_PHYTYPE_HT &&
+ dev->phy.type != B43_PHYTYPE_AC);
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 3912274f71e3..78d86526799e 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -222,6 +222,8 @@ struct b43_phy {
struct b43_phy_ht *ht;
/* LCN-PHY specific information */
struct b43_phy_lcn *lcn;
+ /* AC-PHY specific information */
+ struct b43_phy_ac *ac;
};
/* Band support flags. */
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 896177690394..9501420340a9 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1743,25 +1743,6 @@ u16 freq_r3A_value(u16 frequency)
return value;
}
-void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev)
-{
- static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
- static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
- u16 tmp = b43legacy_radio_read16(dev, 0x001E);
- int i;
- int j;
-
- for (i = 0; i < 5; i++) {
- for (j = 0; j < 5; j++) {
- if (tmp == (data_high[i] | data_low[j])) {
- b43legacy_phy_write(dev, 0x0069, (i - j) << 8 |
- 0x00C0);
- return;
- }
- }
- }
-}
-
int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev,
u8 channel,
int synthetic_pu_workaround)
diff --git a/drivers/net/wireless/b43legacy/radio.h b/drivers/net/wireless/b43legacy/radio.h
index bccb3d7da682..dd2976d1d561 100644
--- a/drivers/net/wireless/b43legacy/radio.h
+++ b/drivers/net/wireless/b43legacy/radio.h
@@ -92,7 +92,6 @@ void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val);
void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val);
void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev);
-void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev);
u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev);
#endif /* B43legacy_RADIO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 9880dae2a569..7944224e3fc9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -97,25 +97,6 @@ static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
{
}
-static bool brcmf_sdiod_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
-{
- bool is_err = false;
-#ifdef CONFIG_PM_SLEEP
- is_err = atomic_read(&sdiodev->suspend);
-#endif
- return is_err;
-}
-
-static void brcmf_sdiod_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
- wait_queue_head_t *wq)
-{
-#ifdef CONFIG_PM_SLEEP
- int retry = 0;
- while (atomic_read(&sdiodev->suspend) && retry++ != 30)
- wait_event_timeout(*wq, false, HZ/100);
-#endif
-}
-
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
@@ -244,10 +225,6 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
write, fn, addr, regsz);
- brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
- if (brcmf_sdiod_pm_resume_error(sdiodev))
- return -EIO;
-
/* only allow byte access on F0 */
if (WARN_ON(regsz > 1 && !fn))
return -EINVAL;
@@ -292,6 +269,12 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
return ret;
}
+static void brcmf_sdiod_nomedium_state(struct brcmf_sdio_dev *sdiodev)
+{
+ sdiodev->state = BRCMF_STATE_NOMEDIUM;
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
+}
+
static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 regsz, void *data, bool write)
{
@@ -299,7 +282,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
s32 retry = 0;
int ret;
- if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
+ if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
return -ENOMEDIUM;
/*
@@ -325,7 +308,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
if (ret == -ENOMEDIUM)
- brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+ brcmf_sdiod_nomedium_state(sdiodev);
else if (ret != 0) {
/*
* SleepCSR register access can fail when
@@ -348,7 +331,7 @@ brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
int err = 0, i;
u8 addr[3];
- if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
+ if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
return -ENOMEDIUM;
addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
@@ -462,10 +445,6 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
unsigned int req_sz;
int err;
- brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
- if (brcmf_sdiod_pm_resume_error(sdiodev))
- return -EIO;
-
/* Single skb use the standard mmc interface */
req_sz = pkt->len + 3;
req_sz &= (uint)~3;
@@ -481,7 +460,7 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
req_sz);
if (err == -ENOMEDIUM)
- brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+ brcmf_sdiod_nomedium_state(sdiodev);
return err;
}
@@ -516,10 +495,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
if (!pktlist->qlen)
return -EINVAL;
- brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
- if (brcmf_sdiod_pm_resume_error(sdiodev))
- return -EIO;
-
target_list = pktlist;
/* for host with broken sg support, prepare a page aligned list */
__skb_queue_head_init(&local_list);
@@ -620,8 +595,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret == -ENOMEDIUM) {
- brcmf_bus_change_state(sdiodev->bus_if,
- BRCMF_BUS_NOMEDIUM);
+ brcmf_sdiod_nomedium_state(sdiodev);
break;
} else if (ret != 0) {
brcmf_err("CMD53 sg block %s failed %d\n",
@@ -996,18 +970,20 @@ out:
}
#define BRCMF_SDIO_DEVICE(dev_id) \
- {SDIO_DEVICE(BRCM_SDIO_VENDOR_ID_BROADCOM, dev_id)}
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
/* devices we support, null terminated */
static const struct sdio_device_id brcmf_sdmmc_ids[] = {
- BRCMF_SDIO_DEVICE(BRCM_SDIO_43143_DEVICE_ID),
- BRCMF_SDIO_DEVICE(BRCM_SDIO_43241_DEVICE_ID),
- BRCMF_SDIO_DEVICE(BRCM_SDIO_4329_DEVICE_ID),
- BRCMF_SDIO_DEVICE(BRCM_SDIO_4330_DEVICE_ID),
- BRCMF_SDIO_DEVICE(BRCM_SDIO_4334_DEVICE_ID),
- BRCMF_SDIO_DEVICE(BRCM_SDIO_43362_DEVICE_ID),
- BRCMF_SDIO_DEVICE(BRCM_SDIO_4335_4339_DEVICE_ID),
- BRCMF_SDIO_DEVICE(BRCM_SDIO_4354_DEVICE_ID),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1074,9 +1050,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
bus_if->wowl_supported = true;
#endif
+ sdiodev->sleeping = false;
atomic_set(&sdiodev->suspend, false);
- init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_buffer_wait);
+ init_waitqueue_head(&sdiodev->idle_wait);
brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
err = brcmf_sdiod_probe(sdiodev);
@@ -1138,12 +1114,23 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
#ifdef CONFIG_PM_SLEEP
static int brcmf_ops_sdio_suspend(struct device *dev)
{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_bus *bus_if;
+ struct brcmf_sdio_dev *sdiodev;
mmc_pm_flag_t sdio_flags;
brcmf_dbg(SDIO, "Enter\n");
+ bus_if = dev_get_drvdata(dev);
+ sdiodev = bus_if->bus_priv.sdio;
+
+ /* wait for watchdog to go idle */
+ if (wait_event_timeout(sdiodev->idle_wait, sdiodev->sleeping,
+ msecs_to_jiffies(3 * BRCMF_WD_POLL_MS)) == 0) {
+ brcmf_err("bus still active\n");
+ return -EBUSY;
+ }
+ /* disable watchdog */
+ brcmf_sdio_wd_timer(sdiodev->bus, 0);
atomic_set(&sdiodev->suspend, true);
if (sdiodev->wowl_enabled) {
@@ -1155,9 +1142,6 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
}
-
- brcmf_sdio_wd_timer(sdiodev->bus, 0);
-
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
index ef344e47218a..89e6a4dc105e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
@@ -33,11 +33,8 @@
/* The level of bus communication with the dongle */
enum brcmf_bus_state {
- BRCMF_BUS_UNKNOWN, /* Not determined yet */
- BRCMF_BUS_NOMEDIUM, /* No medium access to dongle */
BRCMF_BUS_DOWN, /* Not ready for frame transfers */
- BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
- BRCMF_BUS_DATA /* Ready for frame transfers */
+ BRCMF_BUS_UP /* Ready for frame transfers */
};
/* The level of bus communication with the dongle */
@@ -188,22 +185,6 @@ void brcmf_bus_wowl_config(struct brcmf_bus *bus, bool enabled)
bus->ops->wowl_config(bus->dev, enabled);
}
-static inline bool brcmf_bus_ready(struct brcmf_bus *bus)
-{
- return bus->state == BRCMF_BUS_LOAD || bus->state == BRCMF_BUS_DATA;
-}
-
-static inline void brcmf_bus_change_state(struct brcmf_bus *bus,
- enum brcmf_bus_state new_state)
-{
- /* NOMEDIUM is permanent */
- if (bus->state == BRCMF_BUS_NOMEDIUM)
- return;
-
- brcmf_dbg(TRACE, "%d -> %d\n", bus->state, new_state);
- bus->state = new_state;
-}
-
/*
* interface functions from common layer
*/
@@ -226,6 +207,9 @@ void brcmf_txflowblock(struct device *dev, bool state);
/* Notify the bus has transferred the tx packet to firmware */
void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
+/* Configure the "global" bus state used by upper layers */
+void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
+
int brcmf_bus_start(struct device *dev);
s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len);
void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 3aecc5f48719..b59b8c6c42ab 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -38,6 +38,7 @@
#include "proto.h"
#include "vendor.h"
#include "bus.h"
+#include "common.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
#define BRCMF_PNO_VERSION 2
@@ -452,16 +453,16 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
}
static int
-send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
+send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key)
{
int err;
struct brcmf_wsec_key_le key_le;
convert_key_from_CPU(key, &key_le);
- brcmf_netdev_wait_pend8021x(ndev);
+ brcmf_netdev_wait_pend8021x(ifp);
- err = brcmf_fil_bsscfg_data_set(netdev_priv(ndev), "wsec_key", &key_le,
+ err = brcmf_fil_bsscfg_data_set(ifp, "wsec_key", &key_le,
sizeof(key_le));
if (err)
@@ -1228,7 +1229,25 @@ static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
memset(prof, 0, sizeof(*prof));
}
-static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
+static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
+{
+ u16 reason;
+
+ switch (e->event_code) {
+ case BRCMF_E_DEAUTH:
+ case BRCMF_E_DEAUTH_IND:
+ case BRCMF_E_DISASSOC_IND:
+ reason = e->reason;
+ break;
+ case BRCMF_E_LINK:
+ default:
+ reason = 0;
+ break;
+ }
+ return reason;
+}
+
+static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
s32 err = 0;
@@ -1243,7 +1262,8 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
brcmf_err("WLC_DISASSOC failed (%d)\n", err);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
- cfg80211_disconnected(vif->wdev.netdev, 0, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
+ GFP_KERNEL);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -1413,7 +1433,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
if (!check_vif_up(ifp->vif))
return -EIO;
- brcmf_link_down(ifp->vif);
+ brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING);
brcmf_dbg(TRACE, "Exit\n");
@@ -1670,7 +1690,7 @@ brcmf_set_sharedkey(struct net_device *ndev,
brcmf_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n",
key.len, key.index, key.algo);
brcmf_dbg(CONN, "key \"%s\"\n", key.data);
- err = send_key_to_dongle(ndev, &key);
+ err = send_key_to_dongle(netdev_priv(ndev), &key);
if (err)
return err;
@@ -2052,7 +2072,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
/* check for key index change */
if (key.len == 0) {
/* key delete */
- err = send_key_to_dongle(ndev, &key);
+ err = send_key_to_dongle(ifp, &key);
if (err)
brcmf_err("key delete error (%d)\n", err);
} else {
@@ -2108,7 +2128,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
return -EINVAL;
}
- err = send_key_to_dongle(ndev, &key);
+ err = send_key_to_dongle(ifp, &key);
if (err)
brcmf_err("wsec_key error (%d)\n", err);
}
@@ -2121,7 +2141,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
struct key_params *params)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_wsec_key key;
+ struct brcmf_wsec_key *key;
s32 val;
s32 wsec;
s32 err = 0;
@@ -2132,54 +2152,62 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
+ if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
+ /* we ignore this key index in this case */
+ brcmf_err("invalid key index (%d)\n", key_idx);
+ return -EINVAL;
+ }
+
if (mac_addr &&
(params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
(params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
brcmf_dbg(TRACE, "Exit");
return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
}
- memset(&key, 0, sizeof(key));
- key.len = (u32) params->key_len;
- key.index = (u32) key_idx;
+ key = &ifp->vif->profile.key[key_idx];
+ memset(key, 0, sizeof(*key));
- if (key.len > sizeof(key.data)) {
- brcmf_err("Too long key length (%u)\n", key.len);
+ if (params->key_len > sizeof(key->data)) {
+ brcmf_err("Too long key length (%u)\n", params->key_len);
err = -EINVAL;
goto done;
}
- memcpy(key.data, params->key, key.len);
+ key->len = params->key_len;
+ key->index = key_idx;
- key.flags = BRCMF_PRIMARY_KEY;
+ memcpy(key->data, params->key, key->len);
+
+ key->flags = BRCMF_PRIMARY_KEY;
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
+ key->algo = CRYPTO_ALGO_WEP1;
val = WEP_ENABLED;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
+ key->algo = CRYPTO_ALGO_WEP128;
val = WEP_ENABLED;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
if (!brcmf_is_apmode(ifp->vif)) {
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
- memcpy(keybuf, &key.data[24], sizeof(keybuf));
- memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
- memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ memcpy(keybuf, &key->data[24], sizeof(keybuf));
+ memcpy(&key->data[24], &key->data[16], sizeof(keybuf));
+ memcpy(&key->data[16], keybuf, sizeof(keybuf));
}
- key.algo = CRYPTO_ALGO_TKIP;
+ key->algo = CRYPTO_ALGO_TKIP;
val = TKIP_ENABLED;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
+ key->algo = CRYPTO_ALGO_AES_CCM;
val = AES_ENABLED;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
+ key->algo = CRYPTO_ALGO_AES_CCM;
val = AES_ENABLED;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
break;
@@ -2189,7 +2217,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- err = send_key_to_dongle(ndev, &key);
+ err = send_key_to_dongle(ifp, key);
if (err)
goto done;
@@ -2222,7 +2250,7 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
- if (key_idx >= DOT11_MAX_DEFAULT_KEYS) {
+ if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
/* we ignore this key index in this case */
brcmf_err("invalid key index (%d)\n", key_idx);
return -EINVAL;
@@ -2237,7 +2265,7 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "key index (%d)\n", key_idx);
/* Set the new key/index */
- err = send_key_to_dongle(ndev, &key);
+ err = send_key_to_dongle(ifp, &key);
brcmf_dbg(TRACE, "Exit\n");
return err;
@@ -2305,6 +2333,39 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
+static void
+brcmf_cfg80211_reconfigure_wep(struct brcmf_if *ifp)
+{
+ s32 err;
+ u8 key_idx;
+ struct brcmf_wsec_key *key;
+ s32 wsec;
+
+ for (key_idx = 0; key_idx < BRCMF_MAX_DEFAULT_KEYS; key_idx++) {
+ key = &ifp->vif->profile.key[key_idx];
+ if ((key->algo == CRYPTO_ALGO_WEP1) ||
+ (key->algo == CRYPTO_ALGO_WEP128))
+ break;
+ }
+ if (key_idx == BRCMF_MAX_DEFAULT_KEYS)
+ return;
+
+ err = send_key_to_dongle(ifp, key);
+ if (err) {
+ brcmf_err("Setting WEP key failed (%d)\n", err);
+ return;
+ }
+ err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
+ if (err) {
+ brcmf_err("get wsec error (%d)\n", err);
+ return;
+ }
+ wsec |= WEP_ENABLED;
+ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
+ if (err)
+ brcmf_err("set wsec error (%d)\n", err);
+}
+
static s32
brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
@@ -2333,10 +2394,10 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("GET STA INFO failed, %d\n", err);
goto done;
}
- sinfo->filled = STATION_INFO_INACTIVE_TIME;
+ sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME);
sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
if (le32_to_cpu(sta_info_le.flags) & BRCMF_STA_ASSOC) {
- sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+ sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
sinfo->connected_time = le32_to_cpu(sta_info_le.in);
}
brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
@@ -2354,7 +2415,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("Could not get rate (%d)\n", err);
goto done;
} else {
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy = rate * 5;
brcmf_dbg(CONN, "Rate %d Mbps\n", rate / 2);
}
@@ -2369,7 +2430,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
goto done;
} else {
rssi = le32_to_cpu(scb_val.val);
- sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
@@ -2396,7 +2457,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "DTIM peroid %d\n",
dtim_period);
}
- sinfo->filled |= STATION_INFO_BSS_PARAM;
+ sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
}
} else
err = -EPERM;
@@ -2999,7 +3060,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
* disassociate from AP to save power while system is
* in suspended state
*/
- brcmf_link_down(vif);
+ brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED);
/* Make sure WPA_Supplicant receives all the event
* generated due to DISASSOC call to the fw to keep
* the state fw and WPA_Supplicant state consistent
@@ -3695,17 +3756,12 @@ static u32
brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
{
- __le32 iecount_le;
- __le32 pktflag_le;
-
strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1);
iebuf[VNDR_IE_CMD_LEN - 1] = '\0';
- iecount_le = cpu_to_le32(1);
- memcpy(&iebuf[VNDR_IE_COUNT_OFFSET], &iecount_le, sizeof(iecount_le));
+ put_unaligned_le32(1, &iebuf[VNDR_IE_COUNT_OFFSET]);
- pktflag_le = cpu_to_le32(pktflag);
- memcpy(&iebuf[VNDR_IE_PKTFLAG_OFFSET], &pktflag_le, sizeof(pktflag_le));
+ put_unaligned_le32(pktflag, &iebuf[VNDR_IE_PKTFLAG_OFFSET]);
memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len);
@@ -3924,6 +3980,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
const struct brcmf_tlv *ssid_ie;
+ const struct brcmf_tlv *country_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
const struct brcmf_tlv *rsn_ie;
@@ -3933,6 +3990,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_fil_bss_enable_le bss_enable;
u16 chanspec;
bool mbss;
+ int is_11d;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
@@ -3941,10 +3999,16 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
settings->ssid, settings->ssid_len, settings->auth_type,
settings->inactivity_timeout);
-
dev_role = ifp->vif->wdev.iftype;
mbss = ifp->vif->mbss;
+ /* store current 11d setting */
+ brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, &ifp->vif->is_11d);
+ country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+ settings->beacon.tail_len,
+ WLAN_EID_COUNTRY);
+ is_11d = country_ie ? 1 : 0;
+
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
@@ -4010,6 +4074,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
+ if (is_11d != ifp->vif->is_11d) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
+ is_11d);
+ if (err < 0) {
+ brcmf_err("Regulatory Set Error, %d\n", err);
+ goto exit;
+ }
+ }
if (settings->beacon_interval) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
settings->beacon_interval);
@@ -4042,6 +4114,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
+ } else if (WARN_ON(is_11d != ifp->vif->is_11d)) {
+ /* Multiple-BSS should use same 11d configuration */
+ err = -EINVAL;
+ goto exit;
}
if (dev_role == NL80211_IFTYPE_AP) {
if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
@@ -4057,6 +4133,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("BRCMF_C_UP error (%d)\n", err);
goto exit;
}
+ /* On DOWN the firmware removes the WEP keys, reconfigure
+ * them if they were set.
+ */
+ brcmf_cfg80211_reconfigure_wep(ifp);
memset(&join_params, 0, sizeof(join_params));
/* join parameters starts with ssid */
@@ -4133,6 +4213,11 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
brcmf_err("setting INFRA mode failed %d\n", err);
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
brcmf_fil_iovar_int_set(ifp, "mbss", 0);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
+ ifp->vif->is_11d);
+ if (err < 0)
+ brcmf_err("restoring REGULATORY setting failed %d\n",
+ err);
/* Bring device back up so it can be used again */
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0)
@@ -4197,6 +4282,34 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
return err;
}
+static int
+brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev,
+ const u8 *mac, struct station_parameters *params)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter, MAC %pM, mask 0x%04x set 0x%04x\n", mac,
+ params->sta_flags_mask, params->sta_flags_set);
+
+ /* Ignore all 00 MAC */
+ if (is_zero_ether_addr(mac))
+ return 0;
+
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+ return 0;
+
+ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SCB_AUTHORIZE,
+ (void *)mac, ETH_ALEN);
+ else
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SCB_DEAUTHORIZE,
+ (void *)mac, ETH_ALEN);
+ if (err < 0)
+ brcmf_err("Setting SCB (de-)authorize failed, %d\n", err);
+
+ return err;
+}
static void
brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
@@ -4471,6 +4584,7 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.stop_ap = brcmf_cfg80211_stop_ap,
.change_beacon = brcmf_cfg80211_change_beacon,
.del_station = brcmf_cfg80211_del_station,
+ .change_station = brcmf_cfg80211_change_station,
.sched_scan_start = brcmf_cfg80211_sched_scan_start,
.sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
.mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
@@ -4778,7 +4892,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
(reason == BRCMF_E_STATUS_SUCCESS)) {
memset(&sinfo, 0, sizeof(sinfo));
- sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
if (!data) {
brcmf_err("No IEs present in ASSOC/REASSOC_IND");
return -EINVAL;
@@ -4833,7 +4946,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
if (!brcmf_is_ibssmode(ifp->vif)) {
brcmf_bss_connect_done(cfg, ndev, e, false);
}
- brcmf_link_down(ifp->vif);
+ brcmf_link_down(ifp->vif, brcmf_map_fw_linkdown_reason(e));
brcmf_init_prof(ndev_to_prof(ndev));
if (ndev != cfg_to_ndev(cfg))
complete(&cfg->vif_disabled);
@@ -5774,7 +5887,7 @@ static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp)
* from AP to save power
*/
if (check_vif_up(ifp->vif)) {
- brcmf_link_down(ifp->vif);
+ brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED);
/* Make sure WPA_Supplicant receives all the event
generated due to DISASSOC call to the fw to keep
@@ -5876,6 +5989,29 @@ int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
vif_event_equals(event, action), timeout);
}
+static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *req)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_fil_country_le ccreq;
+ int i;
+
+ brcmf_dbg(TRACE, "enter: initiator=%d, alpha=%c%c\n", req->initiator,
+ req->alpha2[0], req->alpha2[1]);
+
+ /* ignore non-ISO3166 country codes */
+ for (i = 0; i < sizeof(req->alpha2); i++)
+ if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
+ brcmf_err("not a ISO3166 code\n");
+ return;
+ }
+ memset(&ccreq, 0, sizeof(ccreq));
+ ccreq.rev = cpu_to_le32(-1);
+ memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
+ brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
+}
+
static void brcmf_free_wiphy(struct wiphy *wiphy)
{
kfree(wiphy->iface_combinations);
@@ -5952,6 +6088,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto priv_out;
brcmf_dbg(INFO, "Registering custom regulatory\n");
+ wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
index 9e98b8d52757..d9e6d01b2b69 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
@@ -75,6 +75,8 @@
#define BRCMF_VNDR_IE_P2PAF_SHIFT 12
+#define BRCMF_MAX_DEFAULT_KEYS 4
+
/**
* enum brcmf_scan_status - scan engine status
@@ -125,11 +127,13 @@ struct brcmf_cfg80211_security {
* @ssid: ssid of associated/associating ap.
* @bssid: bssid of joined/joining ibss.
* @sec: security information.
+ * @key: key information
*/
struct brcmf_cfg80211_profile {
struct brcmf_ssid ssid;
u8 bssid[ETH_ALEN];
struct brcmf_cfg80211_security sec;
+ struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
};
/**
@@ -196,6 +200,7 @@ struct brcmf_cfg80211_vif {
struct list_head list;
u16 mgmt_rx_reg;
bool mbss;
+ int is_11d;
};
/* association inform */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index ddae0b5e56ec..04d2ca0d87d6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -101,14 +101,7 @@
/* ARM Cortex M3 core, ID 0x82a */
#define BCM4329_CORE_ARM_BASE 0x18002000
#define BCM4329_RAMSIZE 0x48000
-
/* bcm43143 */
-/* SDIO device core */
-#define BCM43143_CORE_BUS_BASE 0x18002000
-/* internal memory core */
-#define BCM43143_CORE_SOCRAM_BASE 0x18004000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM43143_CORE_ARM_BASE 0x18003000
#define BCM43143_RAMSIZE 0x70000
#define CORE_SB(base, field) \
@@ -164,13 +157,6 @@ struct brcmf_core_priv {
struct brcmf_chip_priv *chip;
};
-/* ARM CR4 core specific control flag bits */
-#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
-
-/* D11 core specific control flag bits */
-#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
-#define D11_BCMA_IOCTL_PHYRESET 0x0008
-
struct brcmf_chip_priv {
struct brcmf_chip pub;
const struct brcmf_buscore_ops *ops;
@@ -495,6 +481,7 @@ static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
ci->pub.ramsize = 0x48000;
break;
case BRCM_CC_4334_CHIP_ID:
+ case BRCM_CC_43340_CHIP_ID:
ci->pub.ramsize = 0x80000;
break;
case BRCM_CC_4335_CHIP_ID:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.c b/drivers/net/wireless/brcm80211/brcmfmac/common.c
index 1861a13e8d03..fe54844c75e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.c
@@ -25,6 +25,9 @@
#include "fwil.h"
#include "fwil_types.h"
#include "tracepoint.h"
+#include "common.h"
+
+const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
#define BRCMF_DEFAULT_BCN_TIMEOUT 3
#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
@@ -38,6 +41,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
u8 buf[BRCMF_DCMD_SMLEN];
struct brcmf_join_pref_params join_pref_params[2];
+ struct brcmf_rev_info_le revinfo;
+ struct brcmf_rev_info *ri;
char *ptr;
s32 err;
@@ -45,12 +50,37 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
sizeof(ifp->mac_addr));
if (err < 0) {
- brcmf_err("Retreiving cur_etheraddr failed, %d\n",
- err);
+ brcmf_err("Retreiving cur_etheraddr failed, %d\n", err);
goto done;
}
memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_REVINFO,
+ &revinfo, sizeof(revinfo));
+ ri = &ifp->drvr->revinfo;
+ if (err < 0) {
+ brcmf_err("retrieving revision info failed, %d\n", err);
+ } else {
+ ri->vendorid = le32_to_cpu(revinfo.vendorid);
+ ri->deviceid = le32_to_cpu(revinfo.deviceid);
+ ri->radiorev = le32_to_cpu(revinfo.radiorev);
+ ri->chiprev = le32_to_cpu(revinfo.chiprev);
+ ri->corerev = le32_to_cpu(revinfo.corerev);
+ ri->boardid = le32_to_cpu(revinfo.boardid);
+ ri->boardvendor = le32_to_cpu(revinfo.boardvendor);
+ ri->boardrev = le32_to_cpu(revinfo.boardrev);
+ ri->driverrev = le32_to_cpu(revinfo.driverrev);
+ ri->ucoderev = le32_to_cpu(revinfo.ucoderev);
+ ri->bus = le32_to_cpu(revinfo.bus);
+ ri->chipnum = le32_to_cpu(revinfo.chipnum);
+ ri->phytype = le32_to_cpu(revinfo.phytype);
+ ri->phyrev = le32_to_cpu(revinfo.phyrev);
+ ri->anarev = le32_to_cpu(revinfo.anarev);
+ ri->chippkg = le32_to_cpu(revinfo.chippkg);
+ ri->nvramrev = le32_to_cpu(revinfo.nvramrev);
+ }
+ ri->result = err;
+
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
strcpy(buf, "ver");
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform_msm.h b/drivers/net/wireless/brcm80211/brcmfmac/common.h
index 2f2229edb498..0d39d80cee28 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform_msm.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+/* Copyright (c) 2014 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -7,18 +6,15 @@
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#ifndef BRCMFMAC_COMMON_H
+#define BRCMFMAC_COMMON_H
-#ifndef __WIL_PLATFORM__MSM_H__
-#define __WIL_PLATFORM_MSM_H__
+extern const u8 ALLFFMAC[ETH_ALEN];
-#include "wil_platform.h"
-
-void *wil_platform_msm_init(struct device *dev, struct wil_platform_ops *ops);
-
-#endif /* __WIL_PLATFORM__MSM_H__ */
+#endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
index 002336e35764..3d404016a92e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
@@ -37,6 +37,8 @@ struct brcmf_commonring {
unsigned long flags;
bool inited;
bool was_full;
+
+ atomic_t outstanding_tx;
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index effe6d7831d9..2d6e2cc1b12c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -197,7 +197,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
/* Can the device send data? */
- if (drvr->bus_if->state != BRCMF_BUS_DATA) {
+ if (drvr->bus_if->state != BRCMF_BUS_UP) {
brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
netif_stop_queue(ndev);
dev_kfree_skb(skb);
@@ -601,9 +601,12 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
+ char drev[BRCMU_DOTREV_LEN] = "n/a";
+ if (drvr->revinfo.result == 0)
+ brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- snprintf(info->version, sizeof(info->version), "n/a");
+ strlcpy(info->version, drev, sizeof(info->version));
strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
sizeof(info->bus_info));
@@ -637,7 +640,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
/* If bus is not ready, can't continue */
- if (bus_if->state != BRCMF_BUS_DATA) {
+ if (bus_if->state != BRCMF_BUS_UP) {
brcmf_err("failed bus is not ready\n");
return -EAGAIN;
}
@@ -964,13 +967,20 @@ int brcmf_bus_start(struct device *dev)
p2p_ifp = NULL;
/* signal bus ready */
- brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA);
+ brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
/* Bus is ready, do any initialization */
ret = brcmf_c_preinit_dcmds(ifp);
if (ret < 0)
goto fail;
+ /* assure we have chipid before feature attach */
+ if (!bus_if->chip) {
+ bus_if->chip = drvr->revinfo.chipnum;
+ bus_if->chiprev = drvr->revinfo.chiprev;
+ brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n",
+ bus_if->chip, bus_if->chip, bus_if->chiprev);
+ }
brcmf_feat_attach(drvr);
ret = brcmf_fws_init(drvr);
@@ -1093,9 +1103,8 @@ static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
return atomic_read(&ifp->pend_8021x_cnt);
}
-int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
+int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
int err;
err = wait_event_timeout(ifp->pend_8021x_wait,
@@ -1107,6 +1116,27 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
return !err;
}
+void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
+{
+ struct brcmf_pub *drvr = bus->drvr;
+ struct net_device *ndev;
+ int ifidx;
+
+ brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state);
+ bus->state = state;
+
+ if (state == BRCMF_BUS_UP) {
+ for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
+ if ((drvr->iflist[ifidx]) &&
+ (drvr->iflist[ifidx]->ndev)) {
+ ndev = drvr->iflist[ifidx]->ndev;
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ }
+ }
+ }
+}
+
static void brcmf_driver_register(struct work_struct *work)
{
#ifdef CONFIG_BRCMFMAC_SDIO
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index 23f74b139cc8..fd74a9c6e9ac 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -29,8 +29,6 @@
/* For supporting multiple interfaces */
#define BRCMF_MAX_IFS 16
-#define DOT11_MAX_DEFAULT_KEYS 4
-
/* Small, medium and maximum buffer size for dcmd
*/
#define BRCMF_DCMD_SMLEN 256
@@ -73,6 +71,35 @@ struct brcmf_proto; /* device communication protocol info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
struct brcmf_fws_info; /* firmware signalling info */
+/*
+ * struct brcmf_rev_info
+ *
+ * The result field stores the error code of the
+ * revision info request from firmware. For the
+ * other fields see struct brcmf_rev_info_le in
+ * fwil_types.h
+ */
+struct brcmf_rev_info {
+ int result;
+ u32 vendorid;
+ u32 deviceid;
+ u32 radiorev;
+ u32 chiprev;
+ u32 corerev;
+ u32 boardid;
+ u32 boardvendor;
+ u32 boardrev;
+ u32 driverrev;
+ u32 ucoderev;
+ u32 bus;
+ u32 chipnum;
+ u32 phytype;
+ u32 phyrev;
+ u32 anarev;
+ u32 chippkg;
+ u32 nvramrev;
+};
+
/* Common structure for module and instance linkage */
struct brcmf_pub {
/* Linkage ponters */
@@ -106,6 +133,7 @@ struct brcmf_pub {
u32 feat_flags;
u32 chip_quirks;
+ struct brcmf_rev_info revinfo;
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
@@ -167,7 +195,7 @@ struct brcmf_skb_reorder_data {
u8 *reorder;
};
-int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
/* Return pointer to interface name */
char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 1ff787d1a36b..9cb99152ad17 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -103,7 +103,11 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
c = nvp->fwnv->data[nvp->pos];
if (c == '=') {
- st = VALUE;
+ /* ignore RAW1 by treating as comment */
+ if (strncmp(&nvp->fwnv->data[nvp->entry], "RAW1", 4) == 0)
+ st = COMMENT;
+ else
+ st = VALUE;
} else if (!is_nvram_char(c)) {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 44f3a84d1999..910fbb561469 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -25,6 +25,7 @@
#include "proto.h"
#include "flowring.h"
#include "msgbuf.h"
+#include "common.h"
#define BRCMF_FLOWRING_HIGH 1024
@@ -34,9 +35,6 @@
#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
-static const u8 ALLZEROMAC[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
-static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
static const u8 brcmf_flowring_prio2fifo[] = {
1,
0,
@@ -137,7 +135,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
hash = flow->hash;
for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
- (memcmp(hash[hash_idx].mac, ALLZEROMAC, ETH_ALEN) == 0)) {
+ (is_zero_ether_addr(hash[hash_idx].mac))) {
found = true;
break;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 03f2c406a17b..dcfa0bb149ce 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -109,7 +109,7 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
struct brcmf_pub *drvr = ifp->drvr;
s32 err;
- if (drvr->bus_if->state != BRCMF_BUS_DATA) {
+ if (drvr->bus_if->state != BRCMF_BUS_UP) {
brcmf_err("bus is down. we have nothing to do.\n");
return -EIO;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index a30be683f4a1..5434dcf64f7d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -43,6 +43,8 @@
#define BRCMF_C_SET_RADIO 38
#define BRCMF_C_GET_PHYTYPE 39
#define BRCMF_C_SET_KEY 45
+#define BRCMF_C_GET_REGULATORY 46
+#define BRCMF_C_SET_REGULATORY 47
#define BRCMF_C_SET_PASSIVE_SCAN 49
#define BRCMF_C_SCAN 50
#define BRCMF_C_SCAN_RESULTS 51
@@ -57,9 +59,12 @@
#define BRCMF_C_SET_COUNTRY 84
#define BRCMF_C_GET_PM 85
#define BRCMF_C_SET_PM 86
+#define BRCMF_C_GET_REVINFO 98
#define BRCMF_C_GET_CURR_RATESET 114
#define BRCMF_C_GET_AP 117
#define BRCMF_C_SET_AP 118
+#define BRCMF_C_SET_SCB_AUTHORIZE 121
+#define BRCMF_C_SET_SCB_DEAUTHORIZE 122
#define BRCMF_C_GET_RSSI 127
#define BRCMF_C_GET_WSEC 133
#define BRCMF_C_SET_WSEC 134
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 50891c02c4c1..374920965108 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -112,6 +112,7 @@
#define BRCMF_WOWL_MAXPATTERNS 8
#define BRCMF_WOWL_MAXPATTERNSIZE 128
+#define BRCMF_COUNTRY_BUF_SZ 4
/* join preference types for join_pref iovar */
enum brcmf_join_pref_types {
@@ -525,4 +526,58 @@ struct brcmf_mbss_ssid_le {
unsigned char SSID[32];
};
+/**
+ * struct brcmf_fil_country_le - country configuration structure.
+ *
+ * @country_abbrev: null-terminated country code used in the country IE.
+ * @rev: revision specifier for ccode. on set, -1 indicates unspecified.
+ * @ccode: null-terminated built-in country code.
+ */
+struct brcmf_fil_country_le {
+ char country_abbrev[BRCMF_COUNTRY_BUF_SZ];
+ __le32 rev;
+ char ccode[BRCMF_COUNTRY_BUF_SZ];
+};
+
+/**
+ * struct brcmf_rev_info_le - device revision info.
+ *
+ * @vendorid: PCI vendor id.
+ * @deviceid: device id of chip.
+ * @radiorev: radio revision.
+ * @chiprev: chip revision.
+ * @corerev: core revision.
+ * @boardid: board identifier (usu. PCI sub-device id).
+ * @boardvendor: board vendor (usu. PCI sub-vendor id).
+ * @boardrev: board revision.
+ * @driverrev: driver version.
+ * @ucoderev: microcode version.
+ * @bus: bus type.
+ * @chipnum: chip number.
+ * @phytype: phy type.
+ * @phyrev: phy revision.
+ * @anarev: anacore rev.
+ * @chippkg: chip package info.
+ * @nvramrev: nvram revision number.
+ */
+struct brcmf_rev_info_le {
+ __le32 vendorid;
+ __le32 deviceid;
+ __le32 radiorev;
+ __le32 chiprev;
+ __le32 corerev;
+ __le32 boardid;
+ __le32 boardvendor;
+ __le32 boardrev;
+ __le32 driverrev;
+ __le32 ucoderev;
+ __le32 bus;
+ __le32 chipnum;
+ __le32 phytype;
+ __le32 phyrev;
+ __le32 anarev;
+ __le32 chippkg;
+ __le32 nvramrev;
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 456944a6a2db..6262612dec45 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -73,6 +73,8 @@
#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
+#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
+#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
struct msgbuf_common_hdr {
u8 msgtype;
@@ -583,7 +585,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
u32 flowid;
void *dma_buf;
u32 dma_sz;
- long long address;
+ u64 address;
int err;
flowid = work->flowid;
@@ -620,7 +622,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
BRCMF_NROF_H2D_COMMON_MSGRINGS);
memcpy(create->sa, work->sa, ETH_ALEN);
memcpy(create->da, work->da, ETH_ALEN);
- address = (long long)(long)msgbuf->flowring_dma_handle[flowid];
+ address = (u64)msgbuf->flowring_dma_handle[flowid];
create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
@@ -698,7 +700,7 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
dma_addr_t physaddr;
u32 pktid;
struct msgbuf_tx_msghdr *tx_msghdr;
- long long address;
+ u64 address;
commonring = msgbuf->flowrings[flowid];
if (!brcmf_commonring_write_available(commonring))
@@ -742,13 +744,14 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
tx_msghdr->seg_cnt = 1;
memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
- address = (long long)(long)physaddr;
+ address = (u64)physaddr;
tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
tx_msghdr->data_buf_addr.low_addr =
cpu_to_le32(address & 0xffffffff);
tx_msghdr->metadata_buf_len = 0;
tx_msghdr->metadata_buf_addr.high_addr = 0;
tx_msghdr->metadata_buf_addr.low_addr = 0;
+ atomic_inc(&commonring->outstanding_tx);
if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
brcmf_commonring_write_complete(commonring);
count = 0;
@@ -773,10 +776,16 @@ static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
}
-static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid)
+static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
+ bool force)
{
+ struct brcmf_commonring *commonring;
+
set_bit(flowid, msgbuf->flow_map);
- queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
+ commonring = msgbuf->flowrings[flowid];
+ if ((force) || (atomic_read(&commonring->outstanding_tx) <
+ BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
+ queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
return 0;
}
@@ -797,7 +806,7 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
return -ENOMEM;
}
brcmf_flowring_enqueue(flow, flowid, skb);
- brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
+ brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
return 0;
}
@@ -854,6 +863,7 @@ brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
static void
brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
{
+ struct brcmf_commonring *commonring;
struct msgbuf_tx_status *tx_status;
u32 idx;
struct sk_buff *skb;
@@ -871,6 +881,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
}
set_bit(flowid, msgbuf->txstatus_done_map);
+ commonring = msgbuf->flowrings[flowid];
+ atomic_dec(&commonring->outstanding_tx);
brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true);
}
@@ -885,7 +897,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
u32 pktlen;
dma_addr_t physaddr;
struct msgbuf_rx_bufpost *rx_bufpost;
- long long address;
+ u64 address;
u32 pktid;
u32 i;
@@ -894,7 +906,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
count,
&alloced);
if (!ret_ptr) {
- brcmf_err("Failed to reserve space in commonring\n");
+ brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
return 0;
}
@@ -921,7 +933,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
}
if (msgbuf->rx_metadata_offset) {
- address = (long long)(long)physaddr;
+ address = (u64)physaddr;
rx_bufpost->metadata_buf_len =
cpu_to_le16(msgbuf->rx_metadata_offset);
rx_bufpost->metadata_buf_addr.high_addr =
@@ -936,7 +948,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
rx_bufpost->msg.request_id = cpu_to_le32(pktid);
- address = (long long)(long)physaddr;
+ address = (u64)physaddr;
rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
rx_bufpost->data_buf_addr.high_addr =
cpu_to_le32(address >> 32);
@@ -992,7 +1004,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
u32 pktlen;
dma_addr_t physaddr;
struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
- long long address;
+ u64 address;
u32 pktid;
u32 i;
@@ -1035,7 +1047,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
MSGBUF_TYPE_IOCTLRESP_BUF_POST;
rx_bufpost->msg.request_id = cpu_to_le32(pktid);
- address = (long long)(long)physaddr;
+ address = (u64)physaddr;
rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
rx_bufpost->host_buf_addr.high_addr =
cpu_to_le32(address >> 32);
@@ -1181,7 +1193,7 @@ brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
brcmf_flowring_open(msgbuf->flow, flowid);
- brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
+ brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
}
@@ -1280,8 +1292,10 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ struct brcmf_commonring *commonring;
void *buf;
u32 flowid;
+ int qlen;
buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
brcmf_msgbuf_process_rx(msgbuf, buf);
@@ -1293,8 +1307,12 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
for_each_set_bit(flowid, msgbuf->txstatus_done_map,
msgbuf->nrof_flowrings) {
clear_bit(flowid, msgbuf->txstatus_done_map);
- if (brcmf_flowring_qlen(msgbuf->flow, flowid))
- brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
+ commonring = msgbuf->flowrings[flowid];
+ qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
+ if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
+ ((qlen) && (atomic_read(&commonring->outstanding_tx) <
+ BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
+ brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
}
return 0;
@@ -1348,7 +1366,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
{
struct brcmf_bus_msgbuf *if_msgbuf;
struct brcmf_msgbuf *msgbuf;
- long long address;
+ u64 address;
u32 count;
if_msgbuf = drvr->bus_if->msgbuf;
@@ -1379,7 +1397,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
GFP_KERNEL);
if (!msgbuf->ioctbuf)
goto fail;
- address = (long long)(long)msgbuf->ioctbuf_handle;
+ address = (u64)msgbuf->ioctbuf_handle;
msgbuf->ioctbuf_phys_hi = address >> 32;
msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 905991fdb7b1..61c053a729be 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -959,14 +959,14 @@ brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
dma_addr_t *dma_handle)
{
void *ring;
- long long address;
+ u64 address;
ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
GFP_KERNEL);
if (!ring)
return NULL;
- address = (long long)(long)*dma_handle;
+ address = (u64)*dma_handle;
brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
@@ -1166,7 +1166,7 @@ brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
{
- long long address;
+ u64 address;
u32 addr;
devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
@@ -1180,7 +1180,7 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
- address = (long long)(long)devinfo->shared.scratch_dmahandle;
+ address = (u64)devinfo->shared.scratch_dmahandle;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
addr = devinfo->shared.tcm_base_address +
@@ -1198,7 +1198,7 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
- address = (long long)(long)devinfo->shared.ringupd_dmahandle;
+ address = (u64)devinfo->shared.ringupd_dmahandle;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
addr = devinfo->shared.tcm_base_address +
@@ -1828,7 +1828,7 @@ static int brcmf_pcie_resume(struct pci_dev *pdev)
goto cleanup;
brcmf_dbg(PCIE, "Hot resume, continue....\n");
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
- brcmf_bus_change_state(bus, BRCMF_BUS_DATA);
+ brcmf_bus_change_state(bus, BRCMF_BUS_UP);
brcmf_pcie_intr_enable(devinfo);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index 0b0d51a61060..faec35c899ec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -44,7 +44,8 @@
#include "chip.h"
#include "firmware.h"
-#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
+#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
+#define CTL_DONE_TIMEOUT 2000 /* In milli second */
#ifdef DEBUG
@@ -495,9 +496,9 @@ struct brcmf_sdio {
u8 *ctrl_frame_buf;
u16 ctrl_frame_len;
bool ctrl_frame_stat;
+ int ctrl_frame_err;
spinlock_t txq_lock; /* protect bus->txq */
- struct semaphore tx_seq_lock; /* protect bus->tx_seq */
wait_queue_head_t ctrl_wait;
wait_queue_head_t dcmd_resp_wait;
@@ -514,7 +515,6 @@ struct brcmf_sdio {
bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
- bool sleeping; /* SDIO bus sleeping */
u8 tx_hdrlen; /* sdio bus header length for tx packet */
bool txglom; /* host tx glomming enable flag */
@@ -608,6 +608,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
+#define BCM43340_FIRMWARE_NAME "brcm/brcmfmac43340-sdio.bin"
+#define BCM43340_NVRAM_NAME "brcm/brcmfmac43340-sdio.txt"
#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
#define BCM43362_FIRMWARE_NAME "brcm/brcmfmac43362-sdio.bin"
@@ -629,6 +631,8 @@ MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43340_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43340_NVRAM_NAME);
MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
@@ -660,6 +664,7 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+ { BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43340) },
{ BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
{ BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
{ BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
@@ -1008,12 +1013,12 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"),
- (bus->sleeping ? "SLEEP" : "WAKE"));
+ (bus->sdiodev->sleeping ? "SLEEP" : "WAKE"));
/* If SR is enabled control bus state with KSO */
if (bus->sr_enabled) {
/* Done if we're already in the requested state */
- if (sleep == bus->sleeping)
+ if (sleep == bus->sdiodev->sleeping)
goto end;
/* Going to sleep */
@@ -1045,12 +1050,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
bus->idlecount = 0;
err = brcmf_sdio_kso_control(bus, true);
}
- if (!err) {
- /* Change state */
- bus->sleeping = sleep;
- brcmf_dbg(SDIO, "new state %s\n",
- (sleep ? "SLEEP" : "WAKE"));
- } else {
+ if (err) {
brcmf_err("error while changing bus sleep state %d\n",
err);
goto done;
@@ -1065,6 +1065,11 @@ end:
} else {
brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
}
+ bus->sdiodev->sleeping = sleep;
+ if (sleep)
+ wake_up(&bus->sdiodev->idle_wait);
+ brcmf_dbg(SDIO, "new state %s\n",
+ (sleep ? "SLEEP" : "WAKE"));
done:
brcmf_dbg(SDIO, "Exit: err=%d\n", err);
return err;
@@ -1904,7 +1909,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxpending = true;
for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft && brcmf_bus_ready(bus->sdiodev->bus_if);
+ !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_STATE_DATA;
rd->seq_num++, rxleft--) {
/* Handle glomming separately */
@@ -2371,8 +2376,6 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* Send frames until the limit or some other event */
for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
pkt_num = 1;
- if (down_interruptible(&bus->tx_seq_lock))
- return cnt;
if (bus->txglom)
pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
bus->sdiodev->txglomsz);
@@ -2388,13 +2391,10 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
__skb_queue_tail(&pktq, pkt);
}
spin_unlock_bh(&bus->txq_lock);
- if (i == 0) {
- up(&bus->tx_seq_lock);
+ if (i == 0)
break;
- }
ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
- up(&bus->tx_seq_lock);
cnt += i;
@@ -2415,7 +2415,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+ if ((bus->sdiodev->state == BRCMF_STATE_DATA) &&
bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
bus->txoff = false;
brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2503,7 +2503,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
bus->watchdog_tsk = NULL;
}
- if (bus_if->state == BRCMF_BUS_DOWN) {
+ if (sdiodev->state != BRCMF_STATE_NOMEDIUM) {
sdio_claim_host(sdiodev->func[1]);
/* Enable clock for device interrupts */
@@ -2538,8 +2538,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
/* Clear any held glomming stuff */
- if (bus->glomd)
- brcmu_pkt_buf_free_skb(bus->glomd);
+ brcmu_pkt_buf_free_skb(bus->glomd);
brcmf_sdio_free_glom(bus);
/* Clear rx control and wake any waiters */
@@ -2604,6 +2603,21 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
return ret;
}
+static int brcmf_sdio_pm_resume_wait(struct brcmf_sdio_dev *sdiodev)
+{
+#ifdef CONFIG_PM_SLEEP
+ int retry;
+
+ /* Wait for possible resume to complete */
+ retry = 0;
+ while ((atomic_read(&sdiodev->suspend)) && (retry++ != 50))
+ msleep(20);
+ if (atomic_read(&sdiodev->suspend))
+ return -EIO;
+#endif
+ return 0;
+}
+
static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
u32 newstatus = 0;
@@ -2614,6 +2628,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
+ if (brcmf_sdio_pm_resume_wait(bus->sdiodev))
+ return;
+
sdio_claim_host(bus->sdiodev->func[1]);
/* If waiting for HTAVAIL, check status */
@@ -2720,17 +2737,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_sdio_clrintr(bus);
if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
- (down_interruptible(&bus->tx_seq_lock) == 0)) {
- if (data_ok(bus)) {
- sdio_claim_host(bus->sdiodev->func[1]);
- err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
- bus->ctrl_frame_len);
- sdio_release_host(bus->sdiodev->func[1]);
-
- bus->ctrl_frame_stat = false;
- brcmf_sdio_wait_event_wakeup(bus);
- }
- up(&bus->tx_seq_lock);
+ data_ok(bus)) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+ err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
+ bus->ctrl_frame_len);
+ sdio_release_host(bus->sdiodev->func[1]);
+ bus->ctrl_frame_err = err;
+ bus->ctrl_frame_stat = false;
+ brcmf_sdio_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
@@ -2741,7 +2755,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_sdio_sendfromq(bus, framecnt);
}
- if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
+ if ((bus->sdiodev->state != BRCMF_STATE_DATA) || (err != 0)) {
brcmf_err("failed backplane access over SDIO, halting operation\n");
atomic_set(&bus->intstatus, 0);
} else if (atomic_read(&bus->intstatus) ||
@@ -2942,43 +2956,30 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- int ret = -1;
+ int ret;
brcmf_dbg(TRACE, "Enter\n");
- if (down_interruptible(&bus->tx_seq_lock))
- return -EINTR;
-
- if (!data_ok(bus)) {
- brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
- bus->tx_max, bus->tx_seq);
- up(&bus->tx_seq_lock);
- /* Send from dpc */
- bus->ctrl_frame_buf = msg;
- bus->ctrl_frame_len = msglen;
- bus->ctrl_frame_stat = true;
-
- wait_event_interruptible_timeout(bus->ctrl_wait,
- !bus->ctrl_frame_stat,
- msecs_to_jiffies(2000));
-
- if (!bus->ctrl_frame_stat) {
- brcmf_dbg(SDIO, "ctrl_frame_stat == false\n");
- ret = 0;
- } else {
- brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
- bus->ctrl_frame_stat = false;
- if (down_interruptible(&bus->tx_seq_lock))
- return -EINTR;
- ret = -1;
- }
+ /* Send from dpc */
+ bus->ctrl_frame_buf = msg;
+ bus->ctrl_frame_len = msglen;
+ bus->ctrl_frame_stat = true;
+ if (atomic_read(&bus->dpc_tskcnt) == 0) {
+ atomic_inc(&bus->dpc_tskcnt);
+ queue_work(bus->brcmf_wq, &bus->datawork);
}
- if (ret == -1) {
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, false, false);
- ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen);
- sdio_release_host(bus->sdiodev->func[1]);
- up(&bus->tx_seq_lock);
+
+ wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
+ msecs_to_jiffies(CTL_DONE_TIMEOUT));
+
+ if (!bus->ctrl_frame_stat) {
+ brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
+ bus->ctrl_frame_err);
+ ret = bus->ctrl_frame_err;
+ } else {
+ brcmf_dbg(SDIO, "ctrl_frame timeout\n");
+ bus->ctrl_frame_stat = false;
+ ret = -ETIMEDOUT;
}
if (ret)
@@ -2986,7 +2987,7 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
else
bus->sdcnt.tx_ctlpkts++;
- return ret ? -EIO : 0;
+ return ret;
}
#ifdef DEBUG
@@ -3409,8 +3410,8 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
goto err;
}
- /* Allow HT Clock now that the ARM is running. */
- brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_LOAD);
+ /* Allow full data communication using DPC from now on. */
+ bus->sdiodev->state = BRCMF_STATE_DATA;
bcmerror = 0;
err:
@@ -3556,7 +3557,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
return;
}
- if (!brcmf_bus_ready(bus->sdiodev->bus_if)) {
+ if (bus->sdiodev->state != BRCMF_STATE_DATA) {
brcmf_err("bus is down. we have nothing to do\n");
return;
}
@@ -3579,10 +3580,6 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
{
-#ifdef DEBUG
- struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
-#endif /* DEBUG */
-
brcmf_dbg(TIMER, "Enter\n");
/* Poll period: check device if appropriate. */
@@ -3626,7 +3623,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
}
#ifdef DEBUG
/* Poll for console output periodically */
- if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
+ if (bus->sdiodev->state == BRCMF_STATE_DATA &&
bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) {
@@ -3811,7 +3808,7 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
u32 val, rev;
val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- if (sdiodev->func[0]->device == BRCM_SDIO_4335_4339_DEVICE_ID &&
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
if (rev >= 2) {
@@ -3867,11 +3864,6 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
goto fail;
}
- /* SDIO register access works so moving
- * state from UNKNOWN to DOWN.
- */
- brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
-
bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
if (IS_ERR(bus->ci)) {
brcmf_err("brcmf_chip_attach failed!\n");
@@ -4005,18 +3997,16 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
- /* try to download image and nvram to the dongle */
- if (bus_if->state == BRCMF_BUS_DOWN) {
- bus->alp_only = true;
- err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
- if (err)
- goto fail;
- bus->alp_only = false;
- }
-
if (!bus_if->drvr)
return;
+ /* try to download image and nvram to the dongle */
+ bus->alp_only = true;
+ err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
+ if (err)
+ goto fail;
+ bus->alp_only = false;
+
/* Start the watchdog timer */
bus->sdcnt.tickcnt = 0;
brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
@@ -4142,7 +4132,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
spin_lock_init(&bus->rxctl_lock);
spin_lock_init(&bus->txq_lock);
- sema_init(&bus->tx_seq_lock, 1);
init_waitqueue_head(&bus->ctrl_wait);
init_waitqueue_head(&bus->dcmd_resp_wait);
@@ -4213,7 +4202,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
bus->idleclock = BRCMF_IDLE_ACTIVE;
/* SR state */
- bus->sleeping = false;
bus->sr_enabled = false;
brcmf_sdio_debugfs_create(bus);
@@ -4254,7 +4242,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
destroy_workqueue(bus->brcmf_wq);
if (bus->ci) {
- if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+ if (bus->sdiodev->state != BRCMF_STATE_NOMEDIUM) {
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Leave the device in state where it is
@@ -4289,7 +4277,7 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
}
/* don't start the wd until fw is loaded */
- if (bus->sdiodev->bus_if->state != BRCMF_BUS_DATA)
+ if (bus->sdiodev->state != BRCMF_STATE_DATA)
return;
if (wdtick) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
index 8eb42620129c..ec2586a8425c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
@@ -155,6 +155,13 @@
/* watchdog polling interval in ms */
#define BRCMF_WD_POLL_MS 10
+/* The state of the bus */
+enum brcmf_sdio_state {
+ BRCMF_STATE_DOWN, /* Device available, still initialising */
+ BRCMF_STATE_DATA, /* Ready for data transfers, DPC enabled */
+ BRCMF_STATE_NOMEDIUM /* No medium access to dongle possible */
+};
+
struct brcmf_sdreg {
int func;
int offset;
@@ -169,8 +176,8 @@ struct brcmf_sdio_dev {
u32 sbwad; /* Save backplane window address */
struct brcmf_sdio *bus;
atomic_t suspend; /* suspend flag */
- wait_queue_head_t request_word_wait;
- wait_queue_head_t request_buffer_wait;
+ bool sleeping;
+ wait_queue_head_t idle_wait;
struct device *dev;
struct brcmf_bus *bus_if;
struct brcmfmac_sdio_platform_data *pdata;
@@ -187,6 +194,7 @@ struct brcmf_sdio_dev {
char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
bool wowl_enabled;
+ enum brcmf_sdio_state state;
};
/* sdio core registers */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 4572defc280f..5df6aa72cc2d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -421,7 +421,7 @@ fail:
brcmf_err("fail!\n");
while (!list_empty(q)) {
req = list_entry(q->next, struct brcmf_usbreq, list);
- if (req && req->urb)
+ if (req)
usb_free_urb(req->urb);
list_del(q->next);
}
@@ -576,7 +576,7 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN);
} else if (state == BRCMFMAC_USB_STATE_UP) {
brcmf_dbg(USB, "DBUS is up\n");
- brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DATA);
+ brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_UP);
} else {
brcmf_dbg(USB, "DBUS current state=%d\n", state);
}
@@ -1263,6 +1263,8 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
ret = brcmf_usb_bus_setup(devinfo);
if (ret)
goto fail;
+ /* we are done */
+ return 0;
}
bus->chip = bus_pub->devid;
bus->chiprev = bus_pub->chiprev;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
index c9a8b9360ab1..7a1fbb2e3a71 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
@@ -78,7 +78,7 @@ int brcms_debugfs_hardware_read(struct seq_file *s, void *data)
struct brcms_hardware *hw = drvr->wlc->hw;
struct bcma_device *core = hw->d11core;
struct bcma_bus *bus = core->bus;
- char boardrev[10];
+ char boardrev[BRCMU_BOARDREV_LEN];
seq_printf(s, "chipnum 0x%x\n"
"chiprev 0x%x\n"
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index 906e89ddf319..0543607002fd 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -267,15 +267,43 @@ char *brcmu_boardrev_str(u32 brev, char *buf)
char c;
if (brev < 0x100) {
- snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ snprintf(buf, BRCMU_BOARDREV_LEN, "%d.%d",
+ (brev & 0xf0) >> 4, brev & 0xf);
} else {
c = (brev & 0xf000) == 0x1000 ? 'P' : 'A';
- snprintf(buf, 8, "%c%03x", c, brev & 0xfff);
+ snprintf(buf, BRCMU_BOARDREV_LEN, "%c%03x", c, brev & 0xfff);
}
return buf;
}
EXPORT_SYMBOL(brcmu_boardrev_str);
+char *brcmu_dotrev_str(u32 dotrev, char *buf)
+{
+ u8 dotval[4];
+
+ if (!dotrev) {
+ snprintf(buf, BRCMU_DOTREV_LEN, "unknown");
+ return buf;
+ }
+ dotval[0] = (dotrev >> 24) & 0xFF;
+ dotval[1] = (dotrev >> 16) & 0xFF;
+ dotval[2] = (dotrev >> 8) & 0xFF;
+ dotval[3] = dotrev & 0xFF;
+
+ if (dotval[3])
+ snprintf(buf, BRCMU_DOTREV_LEN, "%d.%d.%d.%d", dotval[0],
+ dotval[1], dotval[2], dotval[3]);
+ else if (dotval[2])
+ snprintf(buf, BRCMU_DOTREV_LEN, "%d.%d.%d", dotval[0],
+ dotval[1], dotval[2]);
+ else
+ snprintf(buf, BRCMU_DOTREV_LEN, "%d.%d", dotval[0],
+ dotval[1]);
+
+ return buf;
+}
+EXPORT_SYMBOL(brcmu_dotrev_str);
+
#if defined(DEBUG)
/* pretty hex print a pkt buffer chain */
void brcmu_prpkt(const char *msg, struct sk_buff *p0)
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 6996fcc144cf..2124a17d0bfd 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -22,7 +22,6 @@
#define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c
#define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
-#define BRCM_SDIO_VENDOR_ID_BROADCOM SDIO_VENDOR_ID_BROADCOM
/* Chipcommon Core Chip IDs */
#define BRCM_CC_43143_CHIP_ID 43143
@@ -34,6 +33,7 @@
#define BRCM_CC_4329_CHIP_ID 0x4329
#define BRCM_CC_4330_CHIP_ID 0x4330
#define BRCM_CC_4334_CHIP_ID 0x4334
+#define BRCM_CC_43340_CHIP_ID 43340
#define BRCM_CC_43362_CHIP_ID 43362
#define BRCM_CC_4335_CHIP_ID 0x4335
#define BRCM_CC_4339_CHIP_ID 0x4339
@@ -45,16 +45,6 @@
#define BRCM_CC_43570_CHIP_ID 43570
#define BRCM_CC_43602_CHIP_ID 43602
-/* SDIO Device IDs */
-#define BRCM_SDIO_43143_DEVICE_ID BRCM_CC_43143_CHIP_ID
-#define BRCM_SDIO_43241_DEVICE_ID BRCM_CC_43241_CHIP_ID
-#define BRCM_SDIO_4329_DEVICE_ID BRCM_CC_4329_CHIP_ID
-#define BRCM_SDIO_4330_DEVICE_ID BRCM_CC_4330_CHIP_ID
-#define BRCM_SDIO_4334_DEVICE_ID BRCM_CC_4334_CHIP_ID
-#define BRCM_SDIO_43362_DEVICE_ID BRCM_CC_43362_CHIP_ID
-#define BRCM_SDIO_4335_4339_DEVICE_ID BRCM_CC_4335_CHIP_ID
-#define BRCM_SDIO_4354_DEVICE_ID BRCM_CC_4354_CHIP_ID
-
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
#define BRCM_USB_43236_DEVICE_ID 0xbd17
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index a043e29f07e2..41969527b459 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -218,6 +218,10 @@ void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
}
#endif
+#define BRCMU_BOARDREV_LEN 8
+#define BRCMU_DOTREV_LEN 16
+
char *brcmu_boardrev_str(u32 brev, char *buf);
+char *brcmu_dotrev_str(u32 dotrev, char *buf);
#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index 6f1b9aace8b3..30e7646d04af 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -66,25 +66,31 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
do { \
ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \
if (ret < 0) \
- goto error; \
+ goto exit; \
+ } while (0)
+#define APB_WRITE2(reg, val) \
+ do { \
+ ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \
+ if (ret < 0) \
+ goto free_buffer; \
} while (0)
#define APB_READ(reg, val) \
do { \
ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \
if (ret < 0) \
- goto error; \
+ goto free_buffer; \
} while (0)
#define REG_WRITE(reg, val) \
do { \
ret = cw1200_reg_write_32(priv, (reg), (val)); \
if (ret < 0) \
- goto error; \
+ goto exit; \
} while (0)
#define REG_READ(reg, val) \
do { \
ret = cw1200_reg_read_32(priv, (reg), &(val)); \
if (ret < 0) \
- goto error; \
+ goto exit; \
} while (0)
switch (priv->hw_revision) {
@@ -142,14 +148,14 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
ret = request_firmware(&firmware, fw_path, priv->pdev);
if (ret) {
pr_err("Can't load firmware file %s.\n", fw_path);
- goto error;
+ goto exit;
}
buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
if (!buf) {
pr_err("Can't allocate firmware load buffer.\n");
ret = -ENOMEM;
- goto error;
+ goto firmware_release;
}
/* Check if the bootloader is ready */
@@ -163,7 +169,7 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
if (val32 != DOWNLOAD_I_AM_HERE) {
pr_err("Bootloader is not ready.\n");
ret = -ETIMEDOUT;
- goto error;
+ goto free_buffer;
}
/* Calculcate number of download blocks */
@@ -171,7 +177,7 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
/* Updating the length in Download Ctrl Area */
val32 = firmware->size; /* Explicit cast from size_t to u32 */
- APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32);
+ APB_WRITE2(DOWNLOAD_IMAGE_SIZE_REG, val32);
/* Firmware downloading loop */
for (block = 0; block < num_blocks; block++) {
@@ -183,7 +189,7 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
if (val32 != DOWNLOAD_PENDING) {
pr_err("Bootloader reported error %d.\n", val32);
ret = -EIO;
- goto error;
+ goto free_buffer;
}
/* loop until put - get <= 24K */
@@ -198,7 +204,7 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) {
pr_err("Timeout waiting for FIFO.\n");
ret = -ETIMEDOUT;
- goto error;
+ goto free_buffer;
}
/* calculate the block size */
@@ -220,12 +226,12 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
if (ret < 0) {
pr_err("Can't write firmware block @ %d!\n",
put & (DOWNLOAD_FIFO_SIZE - 1));
- goto error;
+ goto free_buffer;
}
/* update the put register */
put += block_size;
- APB_WRITE(DOWNLOAD_PUT_REG, put);
+ APB_WRITE2(DOWNLOAD_PUT_REG, put);
} /* End of firmware download loop */
/* Wait for the download completion */
@@ -238,19 +244,21 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
if (val32 != DOWNLOAD_SUCCESS) {
pr_err("Wait for download completion failed: 0x%.8X\n", val32);
ret = -ETIMEDOUT;
- goto error;
+ goto free_buffer;
} else {
pr_info("Firmware download completed.\n");
ret = 0;
}
-error:
+free_buffer:
kfree(buf);
- if (firmware)
- release_firmware(firmware);
+firmware_release:
+ release_firmware(firmware);
+exit:
return ret;
#undef APB_WRITE
+#undef APB_WRITE2
#undef APB_READ
#undef REG_WRITE
#undef REG_READ
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 3e78cc3ccb78..3689dbbd10bd 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -282,7 +282,6 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_UAPSD |
IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
@@ -374,9 +373,8 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work);
INIT_WORK(&priv->set_beacon_wakeup_period_work,
cw1200_set_beacon_wakeup_period_work);
- init_timer(&priv->mcast_timeout);
- priv->mcast_timeout.data = (unsigned long)priv;
- priv->mcast_timeout.function = cw1200_mcast_timeout;
+ setup_timer(&priv->mcast_timeout, cw1200_mcast_timeout,
+ (unsigned long)priv);
if (cw1200_queue_stats_init(&priv->tx_queue_stats,
CW1200_LINK_ID_MAX,
diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c
index 6907c8fd4578..d2202ae92bdd 100644
--- a/drivers/net/wireless/cw1200/pm.c
+++ b/drivers/net/wireless/cw1200/pm.c
@@ -101,9 +101,8 @@ int cw1200_pm_init(struct cw1200_pm_state *pm,
{
spin_lock_init(&pm->lock);
- init_timer(&pm->stay_awake);
- pm->stay_awake.data = (unsigned long)pm;
- pm->stay_awake.function = cw1200_pm_stay_awake_tmo;
+ setup_timer(&pm->stay_awake, cw1200_pm_stay_awake_tmo,
+ (unsigned long)pm);
return 0;
}
diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/cw1200/queue.c
index 9c3925f58d79..0ba5ef9b3e7b 100644
--- a/drivers/net/wireless/cw1200/queue.c
+++ b/drivers/net/wireless/cw1200/queue.c
@@ -179,9 +179,7 @@ int cw1200_queue_init(struct cw1200_queue *queue,
INIT_LIST_HEAD(&queue->pending);
INIT_LIST_HEAD(&queue->free_pool);
spin_lock_init(&queue->lock);
- init_timer(&queue->gc);
- queue->gc.data = (unsigned long)queue;
- queue->gc.function = cw1200_queue_gc;
+ setup_timer(&queue->gc, cw1200_queue_gc, (unsigned long)queue);
queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
GFP_KERNEL);
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
index f2e276faca70..bff81b8d4164 100644
--- a/drivers/net/wireless/cw1200/scan.c
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -39,9 +39,9 @@ static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan)
cancel_delayed_work_sync(&priv->clear_recent_scan_work);
atomic_set(&priv->scan.in_progress, 1);
atomic_set(&priv->recent_scan, 1);
- cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000);
+ cw1200_pm_stay_awake(&priv->pm_state, msecs_to_jiffies(tmo));
queue_delayed_work(priv->workqueue, &priv->scan.timeout,
- tmo * HZ / 1000);
+ msecs_to_jiffies(tmo));
ret = wsm_scan(priv, scan);
if (ret) {
atomic_set(&priv->scan.in_progress, 0);
@@ -386,8 +386,8 @@ void cw1200_probe_work(struct work_struct *work)
if (down_trylock(&priv->scan.lock)) {
/* Scan is already in progress. Requeue self. */
schedule();
- queue_delayed_work(priv->workqueue,
- &priv->scan.probe_work, HZ / 10);
+ queue_delayed_work(priv->workqueue, &priv->scan.probe_work,
+ msecs_to_jiffies(100));
mutex_unlock(&priv->conf_mutex);
return;
}
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 5b84664db13b..4a47c7f8a246 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -213,6 +213,7 @@ int cw1200_add_interface(struct ieee80211_hw *dev,
/* __le32 auto_calibration_mode = __cpu_to_le32(1); */
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_UAPSD |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
mutex_lock(&priv->conf_mutex);
@@ -708,7 +709,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
if (sta)
peer_addr = sta->addr;
- key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 596525528f50..fd8d83dd4f62 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -145,7 +145,7 @@ static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
if (sta->aid > 0)
ap->sta_aid[sta->aid - 1] = NULL;
- if (!sta->ap && sta->u.sta.challenge)
+ if (!sta->ap)
kfree(sta->u.sta.challenge);
del_timer_sync(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index dc1d20cf64ee..e5665804d986 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3429,9 +3429,7 @@ il3945_setup_deferred_work(struct il_priv *il)
il3945_hw_setup_deferred_work(il);
- init_timer(&il->watchdog);
- il->watchdog.data = (unsigned long)il;
- il->watchdog.function = il_bg_watchdog;
+ setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
tasklet_init(&il->irq_tasklet,
(void (*)(unsigned long))il3945_irq_tasklet,
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 2748fde4b90c..976f65fe9c38 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6247,13 +6247,10 @@ il4965_setup_deferred_work(struct il_priv *il)
INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
- init_timer(&il->stats_periodic);
- il->stats_periodic.data = (unsigned long)il;
- il->stats_periodic.function = il4965_bg_stats_periodic;
+ setup_timer(&il->stats_periodic, il4965_bg_stats_periodic,
+ (unsigned long)il);
- init_timer(&il->watchdog);
- il->watchdog.data = (unsigned long)il;
- il->watchdog.function = il_bg_watchdog;
+ setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
tasklet_init(&il->irq_tasklet,
(void (*)(unsigned long))il4965_irq_tasklet,
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 0b7f46f0b079..c4d6dd7402d9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -64,22 +64,8 @@
*
******************************************************************************/
-/*
- * module name, copyright, version, etc.
- */
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD
-
-
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
@@ -1011,13 +997,11 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
if (priv->lib->bt_params)
iwlagn_bt_setup_deferred_work(priv);
- init_timer(&priv->statistics_periodic);
- priv->statistics_periodic.data = (unsigned long)priv;
- priv->statistics_periodic.function = iwl_bg_statistics_periodic;
+ setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
+ (unsigned long)priv);
- init_timer(&priv->ucode_trace);
- priv->ucode_trace.data = (unsigned long)priv;
- priv->ucode_trace.function = iwl_bg_ucode_trace;
+ setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
+ (unsigned long)priv);
}
void iwl_cancel_deferred_work(struct iwl_priv *priv)
@@ -1244,11 +1228,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
- if (!iwlwifi_mod_params.wd_disable)
- trans_cfg.queue_watchdog_timeout =
- priv->cfg->base_params->wd_timeout;
- else
- trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
+ trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED;
+
trans_cfg.command_names = iwl_dvm_cmd_strings;
trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index acb981a0a0aa..c4736c8834c5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -612,15 +612,10 @@ void iwl_tt_initialize(struct iwl_priv *priv)
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
tt->state = IWL_TI_0;
- init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
- priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
- priv->thermal_throttle.ct_kill_exit_tm.function =
- iwl_tt_check_exit_ct_kill;
- init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
- priv->thermal_throttle.ct_kill_waiting_tm.data =
- (unsigned long)priv;
- priv->thermal_throttle.ct_kill_waiting_tm.function =
- iwl_tt_ready_for_ct_kill;
+ setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
+ iwl_tt_check_exit_ct_kill, (unsigned long)priv);
+ setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
+ iwl_tt_ready_for_ct_kill, (unsigned long)priv);
/* setup deferred ct kill work */
INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index d1ce3ce13591..1e40a12de077 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -715,7 +715,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
- buf_size, ssn);
+ buf_size, ssn, 0);
/*
* If the limit is 0, then it wasn't initialised yet,
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index d5cee1530597..4dbef7e58c2e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -267,7 +267,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
for (i = 0; i < n_queues; i++)
if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
iwl_trans_ac_txq_enable(priv->trans, i,
- queue_to_txf[i]);
+ queue_to_txf[i], 0);
priv->passive_no_rx = false;
priv->transport_queue_stop = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index e5be2d21868f..97e38d2e2983 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,8 +69,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 10
-#define IWL3160_UCODE_API_MAX 10
+#define IWL7260_UCODE_API_MAX 12
+#define IWL3160_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 10
@@ -92,6 +92,12 @@
#define IWL7265D_NVM_VERSION 0x0c11
#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
+/* DCCM offsets and lengths */
+#define IWL7000_DCCM_OFFSET 0x800000
+#define IWL7260_DCCM_LEN 0x14000
+#define IWL3160_DCCM_LEN 0x10000
+#define IWL7265_DCCM_LEN 0x17A00
+
#define IWL7260_FW_PRE "iwlwifi-7260-"
#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -105,7 +111,7 @@
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_FW_PRE "iwlwifi-7265D-"
-#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
@@ -138,7 +144,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \
.non_shared_ant = ANT_A, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .dccm_offset = IWL7000_DCCM_OFFSET
const struct iwl_cfg iwl7260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7260",
@@ -149,6 +156,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
+ .dccm_len = IWL7260_DCCM_LEN,
};
const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -161,6 +169,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.high_temp = true,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
+ .dccm_len = IWL7260_DCCM_LEN,
};
const struct iwl_cfg iwl7260_2n_cfg = {
@@ -172,6 +181,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
+ .dccm_len = IWL7260_DCCM_LEN,
};
const struct iwl_cfg iwl7260_n_cfg = {
@@ -183,6 +193,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
+ .dccm_len = IWL7260_DCCM_LEN,
};
const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -193,6 +204,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = {
.nvm_ver = IWL3160_NVM_VERSION,
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .dccm_len = IWL3160_DCCM_LEN,
};
const struct iwl_cfg iwl3160_2n_cfg = {
@@ -203,6 +215,7 @@ const struct iwl_cfg iwl3160_2n_cfg = {
.nvm_ver = IWL3160_NVM_VERSION,
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .dccm_len = IWL3160_DCCM_LEN,
};
const struct iwl_cfg iwl3160_n_cfg = {
@@ -213,6 +226,7 @@ const struct iwl_cfg iwl3160_n_cfg = {
.nvm_ver = IWL3160_NVM_VERSION,
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .dccm_len = IWL3160_DCCM_LEN,
};
static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
@@ -240,6 +254,7 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
.nvm_ver = IWL3165_NVM_VERSION,
.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+ .dccm_len = IWL7265_DCCM_LEN,
};
const struct iwl_cfg iwl7265_2ac_cfg = {
@@ -250,6 +265,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+ .dccm_len = IWL7265_DCCM_LEN,
};
const struct iwl_cfg iwl7265_2n_cfg = {
@@ -260,6 +276,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+ .dccm_len = IWL7265_DCCM_LEN,
};
const struct iwl_cfg iwl7265_n_cfg = {
@@ -270,6 +287,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+ .dccm_len = IWL7265_DCCM_LEN,
};
const struct iwl_cfg iwl7265d_2ac_cfg = {
@@ -280,6 +298,7 @@ const struct iwl_cfg iwl7265d_2ac_cfg = {
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+ .dccm_len = IWL7265_DCCM_LEN,
};
const struct iwl_cfg iwl7265d_2n_cfg = {
@@ -290,6 +309,7 @@ const struct iwl_cfg iwl7265d_2n_cfg = {
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+ .dccm_len = IWL7265_DCCM_LEN,
};
const struct iwl_cfg iwl7265d_n_cfg = {
@@ -300,6 +320,7 @@ const struct iwl_cfg iwl7265d_n_cfg = {
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+ .dccm_len = IWL7265_DCCM_LEN,
};
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index bf0a95cb7153..2f7fe8167dc9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 10
+#define IWL8000_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 10
@@ -81,12 +81,21 @@
#define IWL8000_NVM_VERSION 0x0a1d
#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
+/* Memory offsets and lengths */
+#define IWL8260_DCCM_OFFSET 0x800000
+#define IWL8260_DCCM_LEN 0x18000
+#define IWL8260_DCCM2_OFFSET 0x880000
+#define IWL8260_DCCM2_LEN 0x8000
+#define IWL8260_SMEM_OFFSET 0x400000
+#define IWL8260_SMEM_LEN 0x68000
+
#define IWL8000_FW_PRE "iwlwifi-8000"
#define IWL8000_MODULE_FIRMWARE(api) \
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
-#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin"
+#define DEFAULT_NVM_FILE_FAMILY_8000A "iwl_nvm_8000.bin"
+#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000B.bin"
/* Max SDIO RX aggregation size of the ADDBA request/response */
#define MAX_RX_AGG_SIZE_8260_SDIO 28
@@ -124,7 +133,13 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
- .non_shared_ant = ANT_A
+ .non_shared_ant = ANT_A, \
+ .dccm_offset = IWL8260_DCCM_OFFSET, \
+ .dccm_len = IWL8260_DCCM_LEN, \
+ .dccm2_offset = IWL8260_DCCM2_OFFSET, \
+ .dccm2_len = IWL8260_DCCM2_LEN, \
+ .smem_offset = IWL8260_SMEM_OFFSET, \
+ .smem_len = IWL8260_SMEM_LEN
const struct iwl_cfg iwl8260_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 8260",
@@ -145,6 +160,16 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
+const struct iwl_cfg iwl4165_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 4165",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 8260",
.fw_name_pre = IWL8000_FW_PRE,
@@ -153,6 +178,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
+ .default_nvm_file_8000A = DEFAULT_NVM_FILE_FAMILY_8000A,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -167,6 +193,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
+ .default_nvm_file_8000A = DEFAULT_NVM_FILE_FAMILY_8000A,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3a4b9c7fc083..4b190d98a1ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -126,7 +126,7 @@ enum iwl_led_mode {
/* TX queue watchdog timeouts in mSecs */
#define IWL_WATCHDOG_DISABLED 0
-#define IWL_DEF_WD_TIMEOUT 2000
+#define IWL_DEF_WD_TIMEOUT 2500
#define IWL_LONG_WD_TIMEOUT 10000
#define IWL_MAX_WD_TIMEOUT 120000
@@ -261,6 +261,12 @@ struct iwl_pwr_tx_backoff {
* station can receive in HT
* @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
* station can receive in VHT
+ * @dccm_offset: offset from which DCCM begins
+ * @dccm_len: length of DCCM (including runtime stack CCM)
+ * @dccm2_offset: offset from which the second DCCM begins
+ * @dccm2_len: length of the second DCCM
+ * @smem_offset: offset from which the SMEM begins
+ * @smem_len: the length of SMEM
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -298,11 +304,18 @@ struct iwl_cfg {
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
bool no_power_up_nic_in_init;
const char *default_nvm_file;
+ const char *default_nvm_file_8000A;
unsigned int max_rx_agg_size;
bool disable_dummy_notification;
unsigned int max_tx_agg_size;
unsigned int max_ht_ampdu_exponent;
unsigned int max_vht_ampdu_exponent;
+ const u32 dccm_offset;
+ const u32 dccm_len;
+ const u32 dccm2_offset;
+ const u32 dccm2_len;
+ const u32 smem_offset;
+ const u32 smem_len;
};
/*
@@ -369,8 +382,8 @@ extern const struct iwl_cfg iwl7265d_2n_cfg;
extern const struct iwl_cfg iwl7265d_n_cfg;
extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
+extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl4265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index aff63c3f5bf8..faa17f2e352a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -184,6 +184,7 @@
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5)
@@ -306,6 +307,7 @@
enum {
SILICON_A_STEP = 0,
SILICON_B_STEP,
+ SILICON_C_STEP,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 850b85a47806..996e7f16adf9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -84,21 +84,8 @@
*
******************************************************************************/
-/*
- * module name, copyright, version, etc.
- */
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD
-
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
@@ -250,9 +237,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
/*
* Starting 8000B - FW name format has changed. This overwrites the
* previous name and uses the new format.
- *
- * TODO:
- * Once there is only one supported step for 8000 family - delete this!
*/
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
char rev_step[2] = {
@@ -263,13 +247,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP)
rev_step[0] = 0;
- /*
- * If hw_rev wasn't set yet - default as B-step. If it IS A-step
- * we'll reload that FW later instead.
- */
- if (drv->trans->hw_rev == 0)
- rev_step[0] = 'B';
-
snprintf(drv->firmware_name, sizeof(drv->firmware_name),
"%s%s-%s.ucode", name_pre, rev_step, tag);
}
@@ -926,6 +903,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_UCODE_REGULAR_USNIFFER,
tlv_len);
break;
+ case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ drv->fw.sdio_adma_addr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -1082,7 +1065,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
u32 api_ver;
int i;
bool load_module = false;
- u32 hw_rev = drv->trans->hw_rev;
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
@@ -1275,50 +1257,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
op->name, err);
#endif
}
-
- /*
- * We may have loaded the wrong FW file in 8000 HW family if it is an
- * A-step card, and if drv->trans->hw_rev wasn't properly read when
- * the FW file had been loaded. (This might happen in SDIO.) In such a
- * case - unload and reload the correct file.
- *
- * TODO:
- * Once there is only one supported step for 8000 family - delete this!
- */
- if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
- CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP &&
- drv->trans->hw_rev != hw_rev) {
- char firmware_name[32];
-
- /* Free previous FW resources */
- if (drv->op_mode)
- _iwl_op_mode_stop(drv);
- iwl_dealloc_ucode(drv);
-
- /* Build name of correct-step FW */
- snprintf(firmware_name, sizeof(firmware_name),
- strrchr(drv->firmware_name, '-'));
- snprintf(drv->firmware_name, sizeof(drv->firmware_name),
- "%s%s", drv->cfg->fw_name_pre, firmware_name);
-
- /* Clear data before loading correct FW */
- list_del(&drv->list);
-
- /* Request correct FW file this time */
- IWL_DEBUG_INFO(drv, "attempting to load A-step FW %s\n",
- drv->firmware_name);
- err = request_firmware(&ucode_raw, drv->firmware_name,
- drv->trans->dev);
- if (err) {
- IWL_ERR(drv, "Failed swapping FW!\n");
- goto out_unbind;
- }
-
- /* Redo callback function - this time with right FW */
- iwl_req_fw_callback(ucode_raw, context);
- }
-
- kfree(pieces);
return;
try_again:
@@ -1429,7 +1367,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.restart_fw = true,
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
- .wd_disable = true,
+ .d0i3_disable = true,
#ifndef CONFIG_IWLWIFI_UAPSD
.uapsd_disable = true,
#endif /* CONFIG_IWLWIFI_UAPSD */
@@ -1492,7 +1430,7 @@ static int __init iwl_drv_init(void)
for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_DESCRIPTION "\n");
pr_info(DRV_COPYRIGHT "\n");
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1539,15 +1477,15 @@ module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
MODULE_PARM_DESC(antenna_coupling,
"specify antenna coupling in dB (default: 0 dB)");
-module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
-MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer 0=system default, 1=disable (default: 1)");
-
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
MODULE_PARM_DESC(nvm_file, "NVM file name");
-module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
+module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
bool, S_IRUGO);
+MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
+
+module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
+ bool, S_IRUGO | S_IWUSR);
#ifdef CONFIG_IWLWIFI_UAPSD
MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
#else
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index be4f8972241a..adf522c756e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -68,7 +68,6 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
-#define IWLWIFI_VERSION "in-tree:"
#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 20a8a64c9fe3..919a2548a92c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -71,7 +71,6 @@
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
- * @IWL_FW_ERROR_DUMP_SRAM:
* @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
* @IWL_FW_ERROR_DUMP_RXF:
* @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
@@ -82,9 +81,10 @@
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
+ * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
*/
enum iwl_fw_error_dump_type {
- IWL_FW_ERROR_DUMP_SRAM = 0,
+ /* 0 is deprecated */
IWL_FW_ERROR_DUMP_CSR = 1,
IWL_FW_ERROR_DUMP_RXF = 2,
IWL_FW_ERROR_DUMP_TXCMD = 3,
@@ -93,6 +93,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_PRPH = 6,
IWL_FW_ERROR_DUMP_TXF = 7,
IWL_FW_ERROR_DUMP_FH_REGS = 8,
+ IWL_FW_ERROR_DUMP_MEM = 9,
IWL_FW_ERROR_DUMP_MAX,
};
@@ -133,6 +134,27 @@ struct iwl_fw_error_dump_txcmd {
u8 data[];
} __packed;
+/**
+ * struct iwl_fw_error_dump_fifo - RX/TX FIFO data
+ * @fifo_num: number of FIFO (starting from 0)
+ * @available_bytes: num of bytes available in FIFO (may be less than FIFO size)
+ * @wr_ptr: position of write pointer
+ * @rd_ptr: position of read pointer
+ * @fence_ptr: position of fence pointer
+ * @fence_mode: the current mode of the fence (before locking) -
+ * 0=follow RD pointer ; 1 = freeze
+ * @data: all of the FIFO's data
+ */
+struct iwl_fw_error_dump_fifo {
+ __le32 fifo_num;
+ __le32 available_bytes;
+ __le32 wr_ptr;
+ __le32 rd_ptr;
+ __le32 fence_ptr;
+ __le32 fence_mode;
+ u8 data[];
+} __packed;
+
enum iwl_fw_error_dump_family {
IWL_FW_ERROR_DUMP_FAMILY_7 = 7,
IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
@@ -180,6 +202,23 @@ struct iwl_fw_error_dump_prph {
__le32 data[];
};
+enum iwl_fw_error_dump_mem_type {
+ IWL_FW_ERROR_DUMP_MEM_SRAM,
+ IWL_FW_ERROR_DUMP_MEM_SMEM,
+};
+
+/**
+ * struct iwl_fw_error_dump_mem - chunk of memory
+ * @type: %enum iwl_fw_error_dump_mem_type
+ * @offset: the offset from which the memory was read
+ * @data: the content of the memory
+ */
+struct iwl_fw_error_dump_mem {
+ __le32 type;
+ __le32 offset;
+ u8 data[];
+};
+
/**
* iwl_fw_error_next_data - advance fw error dump data pointer
* @data: previous data block
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index f2a047f6bb3e..016d91384681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -132,6 +132,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
+ IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
};
@@ -234,25 +235,34 @@ enum iwl_ucode_tlv_flag {
/**
* enum iwl_ucode_tlv_api - ucode api
- * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
- * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
- * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
* @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
* @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
+ * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
+ * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
+ * regardless of the band or the number of the probes. FW will calculate
+ * the actual dwell time.
+ * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
+ * through the dedicated host command.
+ * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
+ * @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
+ * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
*/
enum iwl_ucode_tlv_api {
- IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
- IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
- IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
+ IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
+ IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
+ IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
+ IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
+ IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
};
/**
@@ -260,6 +270,7 @@ enum iwl_ucode_tlv_api {
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
@@ -278,6 +289,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
+ IWL_UCODE_TLV_CAPA_BEAMFORMER = BIT(3),
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index e6dc3b870949..ffd785cc67d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -152,6 +152,8 @@ struct iwl_fw_cscheme_list {
* @mvm_fw: indicates this is MVM firmware
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
+ * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
+ * we get the ALIVE from the uCode
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
@@ -181,6 +183,8 @@ struct iwl_fw {
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
+ u32 sdio_adma_addr;
+
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
size_t dbg_conf_tlv_len[FW_DBG_MAX];
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 7a2cbf6f90db..03250a45272e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -193,11 +193,15 @@ void iwl_force_nmi(struct iwl_trans *trans)
* DEVICE_SET_NMI_8000B_REG - is used.
*/
if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
- (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP))
- iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL);
- else
+ (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)) {
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG,
+ DEVICE_SET_NMI_VAL_DRV);
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG,
+ DEVICE_SET_NMI_VAL_HW);
+ } else {
iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
DEVICE_SET_NMI_8000B_VAL);
+ }
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 71507cf490e6..e8eabd21ccfe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -96,13 +96,13 @@ enum iwl_disable_11n {
* use IWL_[DIS,EN]ABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
- * @wd_disable: disable stuck queue check, default = 1
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @power_save: enable power save, default = false
* @power_level: power level, default = 1
* @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0
+ * @d0i3_disable: disable d0i3, default = 1,
* @fw_monitor: allow to use firmware monitor
*/
struct iwl_mod_params {
@@ -110,7 +110,6 @@ struct iwl_mod_params {
unsigned int disable_11n;
int amsdu_size_8K;
bool restart_fw;
- int wd_disable;
bool bt_coex_active;
int led_mode;
bool power_save;
@@ -121,6 +120,7 @@ struct iwl_mod_params {
int ant_coupling;
char *nvm_file;
bool uapsd_disable;
+ bool d0i3_disable;
bool fw_monitor;
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 06e02fcd6f7b..c74f1a4edf23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -468,6 +468,8 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
+ data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(radio_cfg);
+ data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg);
}
static void iwl_set_hw_address(const struct iwl_cfg *cfg,
@@ -592,6 +594,10 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
iwl_set_radio_cfg(cfg, data, radio_cfg);
+ if (data->valid_tx_ant)
+ tx_chains &= data->valid_tx_ant;
+ if (data->valid_rx_ant)
+ rx_chains &= data->valid_rx_ant;
sku = iwl_get_sku(cfg, nvm_sw);
data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 2df51eab1348..6221e4dfc64f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -99,6 +99,7 @@
#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+#define APMG_PCIDEV_STT_VAL_WAKE_ME (0x00004000)
#define APMG_RTC_INT_STT_RFKILL (0x10000000)
@@ -107,7 +108,8 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
-#define DEVICE_SET_NMI_VAL 0x1
+#define DEVICE_SET_NMI_VAL_HW BIT(0)
+#define DEVICE_SET_NMI_VAL_DRV BIT(7)
#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
#define DEVICE_SET_NMI_8000B_VAL 0x1000000
@@ -250,6 +252,7 @@
#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0)
/* Context Data */
#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
@@ -283,32 +286,9 @@
#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
#define SCD_AGGR_SEL (SCD_BASE + 0x248)
#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
+#define SCD_GP_CTRL (SCD_BASE + 0x1a8)
#define SCD_EN_CTRL (SCD_BASE + 0x254)
-static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
-{
- if (chnl < 20)
- return SCD_BASE + 0x18 + chnl * 4;
- WARN_ON_ONCE(chnl >= 32);
- return SCD_BASE + 0x284 + (chnl - 20) * 4;
-}
-
-static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
-{
- if (chnl < 20)
- return SCD_BASE + 0x68 + chnl * 4;
- WARN_ON_ONCE(chnl >= 32);
- return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
-}
-
-static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
-{
- if (chnl < 20)
- return SCD_BASE + 0x10c + chnl * 4;
- WARN_ON_ONCE(chnl >= 32);
- return SCD_BASE + 0x384 + (chnl - 20) * 4;
-}
-
/*********************** END TX SCHEDULER *************************************/
/* Oscillator clock */
@@ -358,18 +338,40 @@ enum secure_load_status_reg {
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)
+#define RXF_RD_D_SPACE (0xa00c40)
+#define RXF_RD_WR_PTR (0xa00c50)
+#define RXF_RD_RD_PTR (0xa00c54)
+#define RXF_RD_FENCE_PTR (0xa00c4c)
+#define RXF_SET_FENCE_MODE (0xa00c14)
+#define RXF_LD_WR2FENCE (0xa00c1c)
+#define RXF_FIFO_RD_FENCE_INC (0xa00c68)
#define RXF_SIZE_BYTE_CND_POS (7)
#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
+#define RXF_DIFF_FROM_PREV (0x200)
#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
+/* Tx FIFO */
+#define TXF_FIFO_ITEM_CNT (0xa00438)
+#define TXF_WR_PTR (0xa00414)
+#define TXF_RD_PTR (0xa00410)
+#define TXF_FENCE_PTR (0xa00418)
+#define TXF_LOCK_FENCE (0xa00424)
+#define TXF_LARC_NUM (0xa0043c)
+#define TXF_READ_MODIFY_DATA (0xa00448)
+#define TXF_READ_MODIFY_ADDR (0xa0044c)
+
/* FW monitor */
+#define MON_BUFF_SAMPLE_CTL (0xa03c00)
#define MON_BUFF_BASE_ADDR (0xa03c3c)
#define MON_BUFF_END_ADDR (0xa03c40)
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
+#define DBGC_IN_SAMPLE (0xa03c00)
+#define DBGC_OUT_CTRL (0xa03c0c)
+
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8
enum {
diff --git a/drivers/net/wireless/iwlwifi/iwl-scd.h b/drivers/net/wireless/iwlwifi/iwl-scd.h
index 6c622b21bba7..f2353ebf2666 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scd.h
+++ b/drivers/net/wireless/iwlwifi/iwl-scd.h
@@ -69,14 +69,6 @@
#include "iwl-prph.h"
-static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans,
- u16 txq_id)
-{
- iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans,
u16 txq_id)
{
@@ -115,4 +107,37 @@ static inline void iwl_scd_enable_set_active(struct iwl_trans *trans,
{
iwl_write_prph(trans, SCD_EN_CTRL, value);
}
+
+static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
+{
+ if (chnl < 20)
+ return SCD_BASE + 0x18 + chnl * 4;
+ WARN_ON_ONCE(chnl >= 32);
+ return SCD_BASE + 0x284 + (chnl - 20) * 4;
+}
+
+static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
+{
+ if (chnl < 20)
+ return SCD_BASE + 0x68 + chnl * 4;
+ WARN_ON_ONCE(chnl >= 32);
+ return SCD_BASE + 0x2B4 + chnl * 4;
+}
+
+static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
+{
+ if (chnl < 20)
+ return SCD_BASE + 0x10c + chnl * 4;
+ WARN_ON_ONCE(chnl >= 32);
+ return SCD_BASE + 0x334 + chnl * 4;
+}
+
+static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans,
+ u16 txq_id)
+{
+ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+ (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 028408a6ecba..a96bd8db6ceb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -368,6 +368,7 @@ enum iwl_trans_status {
* @cmd_queue: the index of the command queue.
* Must be set before start_fw.
* @cmd_fifo: the fifo for host commands
+ * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
* @no_reclaim_cmds: Some devices erroneously don't set the
* SEQ_RX_FRAME bit on some notifications, this is the
* list of such notifications to filter. Max length is
@@ -378,24 +379,26 @@ enum iwl_trans_status {
* @bc_table_dword: set to true if the BC table expects the byte count to be
* in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
- * @queue_watchdog_timeout: time (in ms) after which queues
- * are considered stuck and will trigger device restart
* @command_names: array of command names, must be 256 entries
* (one for each command); for debugging only
+ * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
+ * we get the ALIVE from the uCode
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
u8 cmd_queue;
u8 cmd_fifo;
+ unsigned int cmd_q_wdg_timeout;
const u8 *no_reclaim_cmds;
unsigned int n_no_reclaim_cmds;
bool rx_buf_size_8k;
bool bc_table_dword;
bool scd_set_active;
- unsigned int queue_watchdog_timeout;
const char *const *command_names;
+
+ u32 sdio_adma_addr;
};
struct iwl_trans_dump_data {
@@ -507,7 +510,8 @@ struct iwl_trans_ops {
struct sk_buff_head *skbs);
void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg);
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
@@ -552,6 +556,21 @@ enum iwl_trans_state {
};
/**
+ * enum iwl_d0i3_mode - d0i3 mode
+ *
+ * @IWL_D0I3_MODE_OFF - d0i3 is disabled
+ * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle
+ * (e.g. no active references)
+ * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend
+ * (in case of 'any' trigger)
+ */
+enum iwl_d0i3_mode {
+ IWL_D0I3_MODE_OFF = 0,
+ IWL_D0I3_MODE_ON_IDLE,
+ IWL_D0I3_MODE_ON_SUSPEND,
+};
+
+/**
* struct iwl_trans - transport common data
*
* @ops - pointer to iwl_trans_ops
@@ -612,6 +631,8 @@ struct iwl_trans {
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
u8 dbg_dest_reg_num;
+ enum iwl_d0i3_mode d0i3_mode;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -808,19 +829,21 @@ static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
static inline void
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg)
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int queue_wdg_timeout)
{
might_sleep();
if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- trans->ops->txq_enable(trans, queue, ssn, cfg);
+ trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
}
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
- int frame_limit, u16 ssn)
+ int frame_limit, u16 ssn,
+ unsigned int queue_wdg_timeout)
{
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
@@ -830,11 +853,12 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
.aggregate = sta_id >= 0,
};
- iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg);
+ iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
}
-static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
- int fifo)
+static inline
+void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
+ unsigned int queue_wdg_timeout)
{
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
@@ -844,16 +868,16 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
.aggregate = false,
};
- iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg);
+ iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
}
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
- u32 txq_bm)
+ u32 txqs)
{
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return trans->ops->wait_tx_queue_empty(trans, txq_bm);
+ return trans->ops->wait_tx_queue_empty(trans, txqs);
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index a3bfda45d9e6..1ec4d55155f7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -342,7 +342,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 12,
.lut20 = {
- cpu_to_le32(0x00000001), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -363,7 +363,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 20,
.lut20 = {
- cpu_to_le32(0x00000002), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -384,7 +384,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 21,
.lut20 = {
- cpu_to_le32(0x00000003), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -405,7 +405,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 23,
.lut20 = {
- cpu_to_le32(0x00000004), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -426,7 +426,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 27,
.lut20 = {
- cpu_to_le32(0x00000005), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -447,7 +447,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 30,
.lut20 = {
- cpu_to_le32(0x00000006), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -468,7 +468,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 32,
.lut20 = {
- cpu_to_le32(0x00000007), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -489,7 +489,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 33,
.lut20 = {
- cpu_to_le32(0x00000008), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -989,7 +989,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
@@ -1025,7 +1025,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event)
{
- struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data data = {
.mvm = mvm,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index b3210cfbecc8..d530ef3da107 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -330,7 +330,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 12,
.lut20 = {
- cpu_to_le32(0x00000001), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -351,7 +351,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 20,
.lut20 = {
- cpu_to_le32(0x00000002), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -372,7 +372,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 21,
.lut20 = {
- cpu_to_le32(0x00000003), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -393,7 +393,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 23,
.lut20 = {
- cpu_to_le32(0x00000004), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -414,7 +414,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 27,
.lut20 = {
- cpu_to_le32(0x00000005), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -435,7 +435,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 30,
.lut20 = {
- cpu_to_le32(0x00000006), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -456,7 +456,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 32,
.lut20 = {
- cpu_to_le32(0x00000007), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -477,7 +477,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 33,
.lut20 = {
- cpu_to_le32(0x00000008), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -1034,7 +1034,7 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
@@ -1070,7 +1070,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event)
{
- struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data data = {
.mvm = mvm,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 3bd93476ec1c..beba375489f1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -94,13 +94,42 @@
#define IWL_MVM_BT_COEX_MPLUT 1
#define IWL_MVM_BT_COEX_RRC 1
#define IWL_MVM_BT_COEX_TTC 1
-#define IWL_MVM_BT_COEX_MPLUT_REG0 0x28412201
+#define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200
#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
-#define IWL_MVM_QUOTA_THRESHOLD 8
+#define IWL_MVM_QUOTA_THRESHOLD 4
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
-#define IWL_MVM_RS_DISABLE_MIMO 0
+#define IWL_MVM_RS_DISABLE_P2P_MIMO 0
+#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
+#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
+#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3
+#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3
+#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2
+#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2
+#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1
+#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16
+#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3
+#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1
+#define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3
+#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8
+#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */
+#define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */
+#define IWL_MVM_RS_MISSED_RATE_MAX 15
+#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160
+#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480
+#define IWL_MVM_RS_LEGACY_TABLE_COUNT 160
+#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400
+#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500
+#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500
+#define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */
+#define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */
+#define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */
+#define IWL_MVM_RS_AGG_DISABLE_START 3
+#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */
+#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */
+#define IWL_MVM_RS_TPC_TX_POWER_STEP 3
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 744de262373e..14e8fd661889 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -793,7 +793,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
struct ieee80211_sta *ap_sta)
{
int ret;
- struct iwl_mvm_sta *mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+ struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
@@ -1137,12 +1137,43 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
return ret;
}
+static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
+{
+ struct iwl_notification_wait wait_d3;
+ static const u8 d3_notif[] = { D3_CONFIG_CMD };
+ int ret;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
+ d3_notif, ARRAY_SIZE(d3_notif),
+ NULL, NULL);
+
+ ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
+ if (ret)
+ goto remove_notif;
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
+ WARN_ON_ONCE(ret);
+ return ret;
+
+remove_notif:
+ iwl_remove_notification(&mvm->notif_wait, &wait_d3);
+ return ret;
+}
+
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
iwl_trans_suspend(mvm->trans);
- if (iwl_mvm_is_d0i3_supported(mvm)) {
+ if (wowlan->any) {
+ /* 'any' trigger means d0i3 usage */
+ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+ int ret = iwl_mvm_enter_d0i3_sync(mvm);
+
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&mvm->d0i3_suspend_mutex);
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
@@ -1626,7 +1657,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (IS_ERR_OR_NULL(ap_sta))
goto out_free;
- mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+ mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status.qos_seq_ctr[i];
/* firmware stores last-used value, we store next value */
@@ -1876,8 +1907,20 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
iwl_trans_resume(mvm->trans);
- if (iwl_mvm_is_d0i3_supported(mvm))
+ if (mvm->hw->wiphy->wowlan_config->any) {
+ /* 'any' trigger means d0i3 usage */
+ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+ int ret = iwl_mvm_exit_d0i3(hw->priv);
+
+ if (ret)
+ return ret;
+ /*
+ * d0i3 exit will be deferred until reconfig_complete.
+ * make sure there we are out of d0i3.
+ */
+ }
return 0;
+ }
return __iwl_mvm_resume(mvm, false);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 9aa2311a776c..5fe14591e1c4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -268,7 +268,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (!IS_ERR_OR_NULL(sta)) {
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
pos += scnprintf(buf+pos, bufsz-pos,
"ap_sta_id %d - reduced Tx power %d\n",
@@ -517,6 +517,34 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
}
+static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[20];
+ int len;
+
+ len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ bool ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ? count : -EINVAL;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -531,6 +559,7 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -564,6 +593,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 33bf915cd7ea..82c09d86af8c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -654,10 +654,10 @@ out:
return ret ?: count;
}
-#define PRINT_STATS_LE32(_str, _val) \
+#define PRINT_STATS_LE32(_struct, _memb) \
pos += scnprintf(buf + pos, bufsz - pos, \
- fmt_table, _str, \
- le32_to_cpu(_val))
+ fmt_table, #_memb, \
+ le32_to_cpu(_struct->_memb))
static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
char __user *user_buf, size_t count,
@@ -692,97 +692,89 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - OFDM");
- PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt);
- PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt);
- PRINT_STATS_LE32("plcp_err", ofdm->plcp_err);
- PRINT_STATS_LE32("crc32_err", ofdm->crc32_err);
- PRINT_STATS_LE32("overrun_err", ofdm->overrun_err);
- PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err);
- PRINT_STATS_LE32("crc32_good", ofdm->crc32_good);
- PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt);
- PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt);
- PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout);
- PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout);
- PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts);
- PRINT_STATS_LE32("rxe_frame_lmt_overrun",
- ofdm->rxe_frame_limit_overrun);
- PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt);
- PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt);
- PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt);
- PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill);
- PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err);
- PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum);
- PRINT_STATS_LE32("reserved", ofdm->reserved);
+ PRINT_STATS_LE32(ofdm, ina_cnt);
+ PRINT_STATS_LE32(ofdm, fina_cnt);
+ PRINT_STATS_LE32(ofdm, plcp_err);
+ PRINT_STATS_LE32(ofdm, crc32_err);
+ PRINT_STATS_LE32(ofdm, overrun_err);
+ PRINT_STATS_LE32(ofdm, early_overrun_err);
+ PRINT_STATS_LE32(ofdm, crc32_good);
+ PRINT_STATS_LE32(ofdm, false_alarm_cnt);
+ PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
+ PRINT_STATS_LE32(ofdm, sfd_timeout);
+ PRINT_STATS_LE32(ofdm, fina_timeout);
+ PRINT_STATS_LE32(ofdm, unresponded_rts);
+ PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(ofdm, sent_ack_cnt);
+ PRINT_STATS_LE32(ofdm, sent_cts_cnt);
+ PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(ofdm, dsp_self_kill);
+ PRINT_STATS_LE32(ofdm, mh_format_err);
+ PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
+ PRINT_STATS_LE32(ofdm, reserved);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - CCK");
- PRINT_STATS_LE32("ina_cnt", cck->ina_cnt);
- PRINT_STATS_LE32("fina_cnt", cck->fina_cnt);
- PRINT_STATS_LE32("plcp_err", cck->plcp_err);
- PRINT_STATS_LE32("crc32_err", cck->crc32_err);
- PRINT_STATS_LE32("overrun_err", cck->overrun_err);
- PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err);
- PRINT_STATS_LE32("crc32_good", cck->crc32_good);
- PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt);
- PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt);
- PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout);
- PRINT_STATS_LE32("fina_timeout", cck->fina_timeout);
- PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts);
- PRINT_STATS_LE32("rxe_frame_lmt_overrun",
- cck->rxe_frame_limit_overrun);
- PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt);
- PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt);
- PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt);
- PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill);
- PRINT_STATS_LE32("mh_format_err", cck->mh_format_err);
- PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum);
- PRINT_STATS_LE32("reserved", cck->reserved);
+ PRINT_STATS_LE32(cck, ina_cnt);
+ PRINT_STATS_LE32(cck, fina_cnt);
+ PRINT_STATS_LE32(cck, plcp_err);
+ PRINT_STATS_LE32(cck, crc32_err);
+ PRINT_STATS_LE32(cck, overrun_err);
+ PRINT_STATS_LE32(cck, early_overrun_err);
+ PRINT_STATS_LE32(cck, crc32_good);
+ PRINT_STATS_LE32(cck, false_alarm_cnt);
+ PRINT_STATS_LE32(cck, fina_sync_err_cnt);
+ PRINT_STATS_LE32(cck, sfd_timeout);
+ PRINT_STATS_LE32(cck, fina_timeout);
+ PRINT_STATS_LE32(cck, unresponded_rts);
+ PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(cck, sent_ack_cnt);
+ PRINT_STATS_LE32(cck, sent_cts_cnt);
+ PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(cck, dsp_self_kill);
+ PRINT_STATS_LE32(cck, mh_format_err);
+ PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
+ PRINT_STATS_LE32(cck, reserved);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - GENERAL");
- PRINT_STATS_LE32("bogus_cts", general->bogus_cts);
- PRINT_STATS_LE32("bogus_ack", general->bogus_ack);
- PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames);
- PRINT_STATS_LE32("filtered_frames", general->filtered_frames);
- PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons);
- PRINT_STATS_LE32("channel_beacons", general->channel_beacons);
- PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon);
- PRINT_STATS_LE32("adc_rx_saturation_time",
- general->adc_rx_saturation_time);
- PRINT_STATS_LE32("ina_detection_search_time",
- general->ina_detection_search_time);
- PRINT_STATS_LE32("beacon_silence_rssi_a",
- general->beacon_silence_rssi_a);
- PRINT_STATS_LE32("beacon_silence_rssi_b",
- general->beacon_silence_rssi_b);
- PRINT_STATS_LE32("beacon_silence_rssi_c",
- general->beacon_silence_rssi_c);
- PRINT_STATS_LE32("interference_data_flag",
- general->interference_data_flag);
- PRINT_STATS_LE32("channel_load", general->channel_load);
- PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms);
- PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a);
- PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b);
- PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c);
- PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
- PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
- PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
- PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
- PRINT_STATS_LE32("mac_id", general->mac_id);
- PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
+ PRINT_STATS_LE32(general, bogus_cts);
+ PRINT_STATS_LE32(general, bogus_ack);
+ PRINT_STATS_LE32(general, non_bssid_frames);
+ PRINT_STATS_LE32(general, filtered_frames);
+ PRINT_STATS_LE32(general, non_channel_beacons);
+ PRINT_STATS_LE32(general, channel_beacons);
+ PRINT_STATS_LE32(general, num_missed_bcon);
+ PRINT_STATS_LE32(general, adc_rx_saturation_time);
+ PRINT_STATS_LE32(general, ina_detection_search_time);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+ PRINT_STATS_LE32(general, interference_data_flag);
+ PRINT_STATS_LE32(general, channel_load);
+ PRINT_STATS_LE32(general, dsp_false_alarms);
+ PRINT_STATS_LE32(general, beacon_rssi_a);
+ PRINT_STATS_LE32(general, beacon_rssi_b);
+ PRINT_STATS_LE32(general, beacon_rssi_c);
+ PRINT_STATS_LE32(general, beacon_energy_a);
+ PRINT_STATS_LE32(general, beacon_energy_b);
+ PRINT_STATS_LE32(general, beacon_energy_c);
+ PRINT_STATS_LE32(general, num_bt_kills);
+ PRINT_STATS_LE32(general, mac_id);
+ PRINT_STATS_LE32(general, directed_data_mpdu);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - HT");
- PRINT_STATS_LE32("plcp_err", ht->plcp_err);
- PRINT_STATS_LE32("overrun_err", ht->overrun_err);
- PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err);
- PRINT_STATS_LE32("crc32_good", ht->crc32_good);
- PRINT_STATS_LE32("crc32_err", ht->crc32_err);
- PRINT_STATS_LE32("mh_format_err", ht->mh_format_err);
- PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good);
- PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt);
- PRINT_STATS_LE32("agg_cnt", ht->agg_cnt);
- PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs);
+ PRINT_STATS_LE32(ht, plcp_err);
+ PRINT_STATS_LE32(ht, overrun_err);
+ PRINT_STATS_LE32(ht, early_overrun_err);
+ PRINT_STATS_LE32(ht, crc32_good);
+ PRINT_STATS_LE32(ht, crc32_err);
+ PRINT_STATS_LE32(ht, mh_format_err);
+ PRINT_STATS_LE32(ht, agg_crc32_good);
+ PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+ PRINT_STATS_LE32(ht, agg_cnt);
+ PRINT_STATS_LE32(ht, unsupport_mcs);
mutex_unlock(&mvm->mutex);
@@ -933,7 +925,7 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return -EINVAL;
if (scan_rx_ant > ANT_ABC)
return -EINVAL;
- if (scan_rx_ant & ~mvm->fw->valid_rx_ant)
+ if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm)))
return -EINVAL;
if (mvm->scan_rx_ant != scan_rx_ant) {
@@ -945,6 +937,61 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return count;
}
+static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ enum iwl_fw_dbg_conf conf;
+ char buf[8];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+ conf = mvm->fw_dbg_conf;
+ mutex_unlock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int ret, conf_id;
+
+ ret = kstrtoint(buf, 0, &conf_id);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(conf_id >= FW_DBG_MAX))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+
+ if (ret)
+ return ret;
+
+ iwl_mvm_fw_dbg_collect(mvm);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
+
+ return count;
+}
+
#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
@@ -1340,6 +1387,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
+ PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -1439,6 +1487,26 @@ out:
return count;
}
+static ssize_t iwl_dbgfs_enable_scan_iteration_notif_write(struct iwl_mvm *mvm,
+ char *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int val;
+
+ mutex_lock(&mvm->mutex);
+
+ if (kstrtoint(buf, 10, &val)) {
+ mutex_unlock(&mvm->mutex);
+ return -EINVAL;
+ }
+
+ mvm->scan_iter_notif_enabled = val;
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
@@ -1459,6 +1527,9 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(enable_scan_iteration_notif, 8);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1500,6 +1571,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(enable_scan_iteration_notif, mvm->debugfs_dir,
+ S_IWUSR);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 430020047b77..4fc0938b3fb6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -92,14 +92,32 @@ enum iwl_ltr_config_flags {
};
/**
+ * struct iwl_ltr_config_cmd_v1 - configures the LTR
+ * @flags: See %enum iwl_ltr_config_flags
+ */
+struct iwl_ltr_config_cmd_v1 {
+ __le32 flags;
+ __le32 static_long;
+ __le32 static_short;
+} __packed; /* LTR_CAPABLE_API_S_VER_1 */
+
+#define LTR_VALID_STATES_NUM 4
+
+/**
* struct iwl_ltr_config_cmd - configures the LTR
* @flags: See %enum iwl_ltr_config_flags
+ * @static_long:
+ * @static_short:
+ * @ltr_cfg_values:
+ * @ltr_short_idle_timeout:
*/
struct iwl_ltr_config_cmd {
__le32 flags;
__le32 static_long;
__le32 static_short;
-} __packed;
+ __le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
+ __le32 ltr_short_idle_timeout;
+} __packed; /* LTR_CAPABLE_API_S_VER_2 */
/* Radio LP RX Energy Threshold measured in dBm */
#define POWER_LPRX_RSSI_THRESHOLD 75
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 8bb5b94bf963..0f1ea80a55ef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -308,6 +308,42 @@ enum {
#define LQ_FLAG_DYNAMIC_BW_POS 6
#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
+/* Single Stream Tx Parameters (lq_cmd->ss_params)
+ * Flags to control a smart FW decision about whether BFER/STBC/SISO will be
+ * used for single stream Tx.
+ */
+
+/* Bit 0-1: Max STBC streams allowed. Can be 0-3.
+ * (0) - No STBC allowed
+ * (1) - 2x1 STBC allowed (HT/VHT)
+ * (2) - 4x2 STBC allowed (HT/VHT)
+ * (3) - 3x2 STBC allowed (HT only)
+ * All our chips are at most 2 antennas so only (1) is valid for now.
+ */
+#define LQ_SS_STBC_ALLOWED_POS 0
+#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK)
+
+/* 2x1 STBC is allowed */
+#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS)
+
+/* Bit 2: Beamformer (VHT only) is allowed */
+#define LQ_SS_BFER_ALLOWED_POS 2
+#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS)
+
+/* Bit 3: Force BFER or STBC for testing
+ * If this is set:
+ * If BFER is allowed then force the ucode to choose BFER else
+ * If STBC is allowed then force the ucode to choose STBC over SISO
+ */
+#define LQ_SS_FORCE_POS 3
+#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS)
+
+/* Bit 31: ss_params field is valid. Used for FW backward compatibility
+ * with other drivers which don't support the ss_params API yet
+ */
+#define LQ_SS_PARAMS_VALID_POS 31
+#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS)
+
/**
* struct iwl_lq_cmd - link quality command
* @sta_id: station to update
@@ -330,7 +366,7 @@ enum {
* 2 - 0x3f: maximal number of frames (up to 3f == 63)
* @rs_table: array of rates for each TX try, each is rate_n_flags,
* meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
- * @bf_params: beam forming params, currently not used
+ * @ss_params: single stream features. declare whether STBC or BFER are allowed.
*/
struct iwl_lq_cmd {
u8 sta_id;
@@ -348,6 +384,6 @@ struct iwl_lq_cmd {
u8 agg_frame_cnt_limit;
__le32 reserved2;
__le32 rs_table[LQ_MAX_RETRY_NUM];
- __le32 bf_params;
+ __le32 ss_params;
}; /* LINK_QUALITY_CMD_API_S_VER_1 */
#endif /* __fw_api_rs_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 1f2acf47bfb2..cfc0e65b34a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
};
/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
- * @flags: enum iwl_scan_channel_flgs
- * @non_ebs_ratio: how many regular scan iteration before EBS
+ * @flags: enum iwl_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
*/
struct iwl_scan_channel_opt {
__le16 flags;
@@ -672,6 +675,7 @@ struct iwl_scan_channel_opt {
* @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
* @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
* and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
*/
enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
@@ -681,6 +685,7 @@ enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
+ IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
};
enum iwl_scan_priority {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
new file mode 100644
index 000000000000..928168b18346
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
@@ -0,0 +1,277 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_stats_h__
+#define __fw_api_stats_h__
+
+struct mvm_statistics_dbg {
+ __le32 burst_check;
+ __le32 burst_count;
+ __le32 wait_for_silence_timeout_cnt;
+ __le32 reserved[3];
+} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
+
+struct mvm_statistics_div {
+ __le32 tx_on_a;
+ __le32 tx_on_b;
+ __le32 exec_time;
+ __le32 probe_time;
+ __le32 rssi_ant;
+ __le32 reserved2;
+} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+struct mvm_statistics_rx_non_phy {
+ __le32 bogus_cts; /* CTS received when not expecting CTS */
+ __le32 bogus_ack; /* ACK received when not expecting ACK */
+ __le32 non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ __le32 filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ __le32 non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ __le32 channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ __le32 num_missed_bcon; /* number of missed beacons */
+ __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ __le32 ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
+ __le32 interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 beacon_rssi_c;
+ __le32 beacon_energy_a;
+ __le32 beacon_energy_b;
+ __le32 beacon_energy_c;
+ __le32 num_bt_kills;
+ __le32 mac_id;
+ __le32 directed_data_mpdu;
+} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct mvm_statistics_rx_phy {
+ __le32 ina_cnt;
+ __le32 fina_cnt;
+ __le32 plcp_err;
+ __le32 crc32_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 false_alarm_cnt;
+ __le32 fina_sync_err_cnt;
+ __le32 sfd_timeout;
+ __le32 fina_timeout;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_lmt_overrun;
+ __le32 sent_ack_cnt;
+ __le32 sent_cts_cnt;
+ __le32 sent_ba_rsp_cnt;
+ __le32 dsp_self_kill;
+ __le32 mh_format_err;
+ __le32 re_acq_main_rssi_sum;
+ __le32 reserved;
+} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct mvm_statistics_rx_ht_phy {
+ __le32 plcp_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 crc32_err;
+ __le32 mh_format_err;
+ __le32 agg_crc32_good;
+ __le32 agg_mpdu_cnt;
+ __le32 agg_cnt;
+ __le32 unsupport_mcs;
+} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+struct mvm_statistics_tx_non_phy {
+ __le32 preamble_cnt;
+ __le32 rx_detected_cnt;
+ __le32 bt_prio_defer_cnt;
+ __le32 bt_prio_kill_cnt;
+ __le32 few_bytes_cnt;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 expected_ack_cnt;
+ __le32 actual_ack_cnt;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_next_frame_mismatch_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */
+
+#define MAX_CHAINS 3
+
+struct mvm_statistics_tx_non_phy_agg {
+ __le32 ba_timeout;
+ __le32 ba_reschedule_frames;
+ __le32 scd_query_agg_frame_cnt;
+ __le32 scd_query_no_agg;
+ __le32 scd_query_agg;
+ __le32 scd_query_mismatch;
+ __le32 frame_not_ready;
+ __le32 underrun;
+ __le32 bt_prio_kill;
+ __le32 rx_ba_rsp_cnt;
+ __s8 txpower[MAX_CHAINS];
+ __s8 reserved;
+ __le32 reserved2;
+} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct mvm_statistics_tx_channel_width {
+ __le32 ext_cca_narrow_ch20[1];
+ __le32 ext_cca_narrow_ch40[2];
+ __le32 ext_cca_narrow_ch80[3];
+ __le32 ext_cca_narrow_ch160[4];
+ __le32 last_tx_ch_width_indx;
+ __le32 rx_detected_per_ch_width[4];
+ __le32 success_per_ch_width[4];
+ __le32 fail_per_ch_width[4];
+}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct mvm_statistics_tx {
+ struct mvm_statistics_tx_non_phy general;
+ struct mvm_statistics_tx_non_phy_agg agg;
+ struct mvm_statistics_tx_channel_width channel_width;
+} __packed; /* STATISTICS_TX_API_S_VER_4 */
+
+
+struct mvm_statistics_bt_activity {
+ __le32 hi_priority_tx_req_cnt;
+ __le32 hi_priority_tx_denied_cnt;
+ __le32 lo_priority_tx_req_cnt;
+ __le32 lo_priority_tx_denied_cnt;
+ __le32 hi_priority_rx_req_cnt;
+ __le32 hi_priority_rx_denied_cnt;
+ __le32 lo_priority_rx_req_cnt;
+ __le32 lo_priority_rx_denied_cnt;
+} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct mvm_statistics_general {
+ __le32 radio_temperature;
+ __le32 radio_voltage;
+ struct mvm_statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct mvm_statistics_div slow_div;
+ __le32 rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
+ __le32 beacon_filtered;
+ __le32 missed_beacons;
+ __s8 beacon_filter_average_energy;
+ __s8 beacon_filter_reason;
+ __s8 beacon_filter_current_energy;
+ __s8 beacon_filter_reserved;
+ __le32 beacon_filter_delta_time;
+ struct mvm_statistics_bt_activity bt_activity;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
+
+struct mvm_statistics_rx {
+ struct mvm_statistics_rx_phy ofdm;
+ struct mvm_statistics_rx_phy cck;
+ struct mvm_statistics_rx_non_phy general;
+ struct mvm_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans. uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+
+struct iwl_notif_statistics {
+ __le32 flag;
+ struct mvm_statistics_rx rx;
+ struct mvm_statistics_tx tx;
+ struct mvm_statistics_general general;
+} __packed; /* STATISTICS_NTFY_API_S_VER_8 */
+
+#endif /* __fw_api_stats_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 5bca1f8bfebf..81c4ea3c6958 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -592,4 +592,43 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
tx_resp->frame_count) & 0xfff;
}
+/**
+ * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
+ * @token:
+ * @sta_id: station id
+ * @tid:
+ * @scd_queue: scheduler queue to confiug
+ * @enable: 1 queue enable, 0 queue disable
+ * @aggregate: 1 aggregated queue, 0 otherwise
+ * @tx_fifo: %enum iwl_mvm_tx_fifo
+ * @window: BA window size
+ * @ssn: SSN for the BA agreement
+ */
+struct iwl_scd_txq_cfg_cmd {
+ u8 token;
+ u8 sta_id;
+ u8 tid;
+ u8 scd_queue;
+ u8 enable;
+ u8 aggregate;
+ u8 tx_fifo;
+ u8 window;
+ __le16 ssn;
+ __le16 reserved;
+} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_scd_txq_cfg_rsp
+ * @token: taken from the command
+ * @sta_id: station id from the command
+ * @tid: tid from the command
+ * @scd_queue: scd_queue from the command
+ */
+struct iwl_scd_txq_cfg_rsp {
+ u8 token;
+ u8 sta_id;
+ u8 tid;
+ u8 scd_queue;
+} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
+
#endif /* __fw_api_tx_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 88af6dd2ceaa..b56154fe8ec5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -74,6 +74,7 @@
#include "fw-api-d3.h"
#include "fw-api-coex.h"
#include "fw-api-scan.h"
+#include "fw-api-stats.h"
/* Tx queue numbers */
enum {
@@ -128,6 +129,9 @@ enum {
/* global key */
WEP_KEY = 0x20,
+ /* Memory */
+ SHARED_MEM_CFG = 0x25,
+
/* TDLS */
TDLS_CHANNEL_SWITCH_CMD = 0x27,
TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
@@ -1381,214 +1385,6 @@ struct iwl_mvm_marker {
__le32 metadata[0];
} __packed; /* MARKER_API_S_VER_1 */
-struct mvm_statistics_dbg {
- __le32 burst_check;
- __le32 burst_count;
- __le32 wait_for_silence_timeout_cnt;
- __le32 reserved[3];
-} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
-
-struct mvm_statistics_div {
- __le32 tx_on_a;
- __le32 tx_on_b;
- __le32 exec_time;
- __le32 probe_time;
- __le32 rssi_ant;
- __le32 reserved2;
-} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
-
-struct mvm_statistics_general_common {
- __le32 temperature; /* radio temperature */
- __le32 temperature_m; /* radio voltage */
- struct mvm_statistics_dbg dbg;
- __le32 sleep_time;
- __le32 slots_out;
- __le32 slots_idle;
- __le32 ttl_timestamp;
- struct mvm_statistics_div div;
- __le32 rx_enable_counter;
- /*
- * num_of_sos_states:
- * count the number of times we have to re-tune
- * in order to get out of bad PHY status
- */
- __le32 num_of_sos_states;
-} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
-
-struct mvm_statistics_rx_non_phy {
- __le32 bogus_cts; /* CTS received when not expecting CTS */
- __le32 bogus_ack; /* ACK received when not expecting ACK */
- __le32 non_bssid_frames; /* number of frames with BSSID that
- * doesn't belong to the STA BSSID */
- __le32 filtered_frames; /* count frames that were dumped in the
- * filtering process */
- __le32 non_channel_beacons; /* beacons with our bss id but not on
- * our serving channel */
- __le32 channel_beacons; /* beacons with our bss id and in our
- * serving channel */
- __le32 num_missed_bcon; /* number of missed beacons */
- __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
- * ADC was in saturation */
- __le32 ina_detection_search_time;/* total time (in 0.8us) searched
- * for INA */
- __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
- __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
- __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
- __le32 interference_data_flag; /* flag for interference data
- * availability. 1 when data is
- * available. */
- __le32 channel_load; /* counts RX Enable time in uSec */
- __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
- * and CCK) counter */
- __le32 beacon_rssi_a;
- __le32 beacon_rssi_b;
- __le32 beacon_rssi_c;
- __le32 beacon_energy_a;
- __le32 beacon_energy_b;
- __le32 beacon_energy_c;
- __le32 num_bt_kills;
- __le32 mac_id;
- __le32 directed_data_mpdu;
-} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
-
-struct mvm_statistics_rx_phy {
- __le32 ina_cnt;
- __le32 fina_cnt;
- __le32 plcp_err;
- __le32 crc32_err;
- __le32 overrun_err;
- __le32 early_overrun_err;
- __le32 crc32_good;
- __le32 false_alarm_cnt;
- __le32 fina_sync_err_cnt;
- __le32 sfd_timeout;
- __le32 fina_timeout;
- __le32 unresponded_rts;
- __le32 rxe_frame_limit_overrun;
- __le32 sent_ack_cnt;
- __le32 sent_cts_cnt;
- __le32 sent_ba_rsp_cnt;
- __le32 dsp_self_kill;
- __le32 mh_format_err;
- __le32 re_acq_main_rssi_sum;
- __le32 reserved;
-} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
-
-struct mvm_statistics_rx_ht_phy {
- __le32 plcp_err;
- __le32 overrun_err;
- __le32 early_overrun_err;
- __le32 crc32_good;
- __le32 crc32_err;
- __le32 mh_format_err;
- __le32 agg_crc32_good;
- __le32 agg_mpdu_cnt;
- __le32 agg_cnt;
- __le32 unsupport_mcs;
-} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
-
-#define MAX_CHAINS 3
-
-struct mvm_statistics_tx_non_phy_agg {
- __le32 ba_timeout;
- __le32 ba_reschedule_frames;
- __le32 scd_query_agg_frame_cnt;
- __le32 scd_query_no_agg;
- __le32 scd_query_agg;
- __le32 scd_query_mismatch;
- __le32 frame_not_ready;
- __le32 underrun;
- __le32 bt_prio_kill;
- __le32 rx_ba_rsp_cnt;
- __s8 txpower[MAX_CHAINS];
- __s8 reserved;
- __le32 reserved2;
-} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
-
-struct mvm_statistics_tx_channel_width {
- __le32 ext_cca_narrow_ch20[1];
- __le32 ext_cca_narrow_ch40[2];
- __le32 ext_cca_narrow_ch80[3];
- __le32 ext_cca_narrow_ch160[4];
- __le32 last_tx_ch_width_indx;
- __le32 rx_detected_per_ch_width[4];
- __le32 success_per_ch_width[4];
- __le32 fail_per_ch_width[4];
-}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
-
-struct mvm_statistics_tx {
- __le32 preamble_cnt;
- __le32 rx_detected_cnt;
- __le32 bt_prio_defer_cnt;
- __le32 bt_prio_kill_cnt;
- __le32 few_bytes_cnt;
- __le32 cts_timeout;
- __le32 ack_timeout;
- __le32 expected_ack_cnt;
- __le32 actual_ack_cnt;
- __le32 dump_msdu_cnt;
- __le32 burst_abort_next_frame_mismatch_cnt;
- __le32 burst_abort_missing_next_frame_cnt;
- __le32 cts_timeout_collision;
- __le32 ack_or_ba_timeout_collision;
- struct mvm_statistics_tx_non_phy_agg agg;
- struct mvm_statistics_tx_channel_width channel_width;
-} __packed; /* STATISTICS_TX_API_S_VER_4 */
-
-
-struct mvm_statistics_bt_activity {
- __le32 hi_priority_tx_req_cnt;
- __le32 hi_priority_tx_denied_cnt;
- __le32 lo_priority_tx_req_cnt;
- __le32 lo_priority_tx_denied_cnt;
- __le32 hi_priority_rx_req_cnt;
- __le32 hi_priority_rx_denied_cnt;
- __le32 lo_priority_rx_req_cnt;
- __le32 lo_priority_rx_denied_cnt;
-} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
-
-struct mvm_statistics_general {
- struct mvm_statistics_general_common common;
- __le32 beacon_filtered;
- __le32 missed_beacons;
- __s8 beacon_filter_average_energy;
- __s8 beacon_filter_reason;
- __s8 beacon_filter_current_energy;
- __s8 beacon_filter_reserved;
- __le32 beacon_filter_delta_time;
- struct mvm_statistics_bt_activity bt_activity;
-} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
-
-struct mvm_statistics_rx {
- struct mvm_statistics_rx_phy ofdm;
- struct mvm_statistics_rx_phy cck;
- struct mvm_statistics_rx_non_phy general;
- struct mvm_statistics_rx_ht_phy ofdm_ht;
-} __packed; /* STATISTICS_RX_API_S_VER_3 */
-
-/*
- * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
- *
- * By default, uCode issues this notification after receiving a beacon
- * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
- *
- * Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
- * 0x9c with CLEAR_STATS bit set (see above).
- *
- * uCode also issues this notification during scans. uCode clears statistics
- * appropriately so that each notification contains statistics for only the
- * one channel that has just been scanned.
- */
-
-struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
- __le32 flag;
- struct mvm_statistics_rx rx;
- struct mvm_statistics_tx tx;
- struct mvm_statistics_general general;
-} __packed;
-
/***********************************
* Smart Fifo API
***********************************/
@@ -1680,63 +1476,6 @@ struct iwl_dts_measurement_notif {
__le32 voltage;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
-/**
- * enum iwl_scd_control - scheduler config command control flags
- * @IWL_SCD_CONTROL_RM_TID: remove TID from this queue
- * @IWL_SCD_CONTROL_SET_SSN: use the SSN and program it into HW
- */
-enum iwl_scd_control {
- IWL_SCD_CONTROL_RM_TID = BIT(4),
- IWL_SCD_CONTROL_SET_SSN = BIT(5),
-};
-
-/**
- * enum iwl_scd_flags - scheduler config command flags
- * @IWL_SCD_FLAGS_SHARE_TID: multiple TIDs map to this queue
- * @IWL_SCD_FLAGS_SHARE_RA: multiple RAs map to this queue
- * @IWL_SCD_FLAGS_DQA_ENABLED: DQA is enabled
- */
-enum iwl_scd_flags {
- IWL_SCD_FLAGS_SHARE_TID = BIT(0),
- IWL_SCD_FLAGS_SHARE_RA = BIT(1),
- IWL_SCD_FLAGS_DQA_ENABLED = BIT(2),
-};
-
-#define IWL_SCDQ_INVALID_STA 0xff
-
-/**
- * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
- * @token: dialog token addba - unused legacy
- * @sta_id: station id 4-bit
- * @tid: TID 0..7
- * @scd_queue: TFD queue num 0 .. 31
- * @enable: 1 queue enable, 0 queue disable
- * @aggregate: 1 aggregated queue, 0 otherwise
- * @tx_fifo: tx fifo num 0..7
- * @window: up to 64
- * @ssn: starting seq num 12-bit
- * @control: command control flags
- * @flags: flags - see &enum iwl_scd_flags
- *
- * Note that every time the command is sent, all parameters must
- * be filled with the exception of
- * - the SSN, which is only used with @IWL_SCD_CONTROL_SET_SSN
- * - the window, which is only relevant when starting aggregation
- */
-struct iwl_scd_txq_cfg_cmd {
- u8 token;
- u8 sta_id;
- u8 tid;
- u8 scd_queue;
- u8 enable;
- u8 aggregate;
- u8 tx_fifo;
- u8 window;
- __le16 ssn;
- u8 control;
- u8 flags;
-} __packed;
-
/***********************************
* TDLS API
***********************************/
@@ -1878,4 +1617,36 @@ struct iwl_tdls_config_res {
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
+#define TX_FIFO_MAX_NUM 8
+#define RX_FIFO_MAX_NUM 2
+
+/**
+ * Shared memory configuration information from the FW
+ *
+ * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
+ * accessible)
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
+ * 0x0 as accessible only via DBGM RDAT)
+ * @sample_buff_size: internal sample buff size
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
+ * 8000 HW set to 0x0 as not accessible)
+ * @txfifo_size: size of TXF0 ... TXF7
+ * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ * when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ */
+struct iwl_shared_mem_cfg {
+ __le32 shared_mem_addr;
+ __le32 shared_mem_size;
+ __le32 sample_buff_addr;
+ __le32 sample_buff_size;
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM];
+ __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+ __le32 page_buff_addr;
+ __le32 page_buff_size;
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index d0fa6e9ed590..ca38e9817374 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -70,6 +70,7 @@
#include "iwl-debug.h"
#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-prph.h"
#include "iwl-eeprom-parse.h"
#include "mvm.h"
@@ -269,7 +270,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
enum iwl_ucode_type ucode_type = mvm->cur_ucode;
/* Set parameters */
- phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
+ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
phy_cfg_cmd.calib_control.event_trigger =
mvm->fw->default_calib[ucode_type].event_trigger;
phy_cfg_cmd.calib_control.flow_trigger =
@@ -346,7 +347,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
mvm->calibrating = true;
/* Send TX valid antennas before triggering calibrations */
- ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
if (ret)
goto error;
@@ -399,8 +400,71 @@ out:
return ret;
}
-static int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm,
- enum iwl_fw_dbg_conf conf_id)
+static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
+{
+ struct iwl_host_cmd cmd = {
+ .id = SHARED_MEM_CFG,
+ .flags = CMD_WANT_SKB,
+ .data = { NULL, },
+ .len = { 0, },
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_shared_mem_cfg *mem_cfg;
+ u32 i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
+ return;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
+ pkt->hdr.flags);
+ goto exit;
+ }
+
+ mem_cfg = (void *)pkt->data;
+
+ mvm->shared_mem_cfg.shared_mem_addr =
+ le32_to_cpu(mem_cfg->shared_mem_addr);
+ mvm->shared_mem_cfg.shared_mem_size =
+ le32_to_cpu(mem_cfg->shared_mem_size);
+ mvm->shared_mem_cfg.sample_buff_addr =
+ le32_to_cpu(mem_cfg->sample_buff_addr);
+ mvm->shared_mem_cfg.sample_buff_size =
+ le32_to_cpu(mem_cfg->sample_buff_size);
+ mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
+ mvm->shared_mem_cfg.txfifo_size[i] =
+ le32_to_cpu(mem_cfg->txfifo_size[i]);
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
+ mvm->shared_mem_cfg.rxfifo_size[i] =
+ le32_to_cpu(mem_cfg->rxfifo_size[i]);
+ mvm->shared_mem_cfg.page_buff_addr =
+ le32_to_cpu(mem_cfg->page_buff_addr);
+ mvm->shared_mem_cfg.page_buff_size =
+ le32_to_cpu(mem_cfg->page_buff_size);
+ IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
+
+exit:
+ iwl_free_resp(&cmd);
+}
+
+void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm)
+{
+ /* stop recording */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ } else {
+ iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
+ iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+ }
+
+ schedule_work(&mvm->fw_error_dump_wk);
+}
+
+int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
{
u8 *ptr;
int ret;
@@ -435,6 +499,35 @@ static int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm,
return ret;
}
+static int iwl_mvm_config_ltr_v1(struct iwl_mvm *mvm)
+{
+ struct iwl_ltr_config_cmd_v1 cmd_v1 = {
+ .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+ };
+
+ if (!mvm->trans->ltr_enabled)
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+ sizeof(cmd_v1), &cmd_v1);
+}
+
+static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
+{
+ struct iwl_ltr_config_cmd cmd = {
+ .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+ };
+
+ if (!mvm->trans->ltr_enabled)
+ return 0;
+
+ if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
+ return iwl_mvm_config_ltr_v1(mvm);
+
+ return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+ sizeof(cmd), &cmd);
+}
+
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@@ -482,6 +575,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
+ iwl_mvm_get_shared_mem_conf(mvm);
+
ret = iwl_mvm_sf_update(mvm, NULL, false);
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
@@ -489,7 +585,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
mvm->fw_dbg_conf = FW_DBG_INVALID;
iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM);
- ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
if (ret)
goto error;
@@ -538,14 +634,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
- if (mvm->trans->ltr_enabled) {
- struct iwl_ltr_config_cmd cmd = {
- .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
- };
-
- WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
- sizeof(cmd), &cmd));
- }
+ WARN_ON(iwl_mvm_config_ltr(mvm));
ret = iwl_mvm_power_update_device(mvm);
if (ret)
@@ -584,7 +673,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
goto error;
}
- ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
if (ret)
goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index f6d86ccce6a8..7bdc6220743f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -208,8 +208,10 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- qmask |= BIT(vif->hw_queue[ac]);
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ qmask |= BIT(vif->hw_queue[ac]);
+ }
if (vif->type == NL80211_IFTYPE_AP)
qmask |= BIT(vif->cab_queue);
@@ -460,6 +462,9 @@ exit_fail:
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
+ unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+ mvm->cfg->base_params->wd_timeout :
+ IWL_WATCHDOG_DISABLED;
u32 ac;
int ret;
@@ -472,16 +477,17 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_TX_FIFO_VO);
+ IWL_MVM_TX_FIFO_VO, wdg_timeout);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue,
- IWL_MVM_TX_FIFO_MCAST);
+ IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
- iwl_mvm_ac_to_tx_fifo[ac]);
+ iwl_mvm_ac_to_tx_fifo[ac],
+ wdg_timeout);
break;
}
@@ -496,14 +502,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE);
+ iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, 0);
break;
case NL80211_IFTYPE_AP:
- iwl_mvm_disable_txq(mvm, vif->cab_queue);
+ iwl_mvm_disable_txq(mvm, vif->cab_queue, 0);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- iwl_mvm_disable_txq(mvm, vif->hw_queue[ac]);
+ iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], 0);
}
}
@@ -975,7 +981,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags);
mvm->mgmt_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
+ iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->mgmt_last_antenna_idx);
beacon_cmd.tx.rate_n_flags =
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e880f9d4717b..1ff7ec08532d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -85,6 +85,7 @@
#include "testmode.h"
#include "iwl-fw-error-dump.h"
#include "iwl-prph.h"
+#include "iwl-csr.h"
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
{
@@ -105,7 +106,7 @@ static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
{
- .num_different_channels = 1,
+ .num_different_channels = 2,
.max_interfaces = 3,
.limits = iwl_mvm_limits,
.n_limits = ARRAY_SIZE(iwl_mvm_limits),
@@ -326,6 +327,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
hw->rate_control_algorithm = "iwl-mvm-rs";
+ hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/*
* Enable 11w if advertised by firmware and software crypto
@@ -336,13 +339,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
- !iwlwifi_mod_params.uapsd_disable) {
- hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
- hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
- hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- }
-
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
@@ -377,6 +373,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_remain_on_channel_duration = 10000;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+ /* we can compensate an offset of up to 3 channels = 15 MHz */
+ hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
/* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -403,10 +401,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
- if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
+ if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER)
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+ }
+
hw->wiphy->hw_version = mvm->trans->hw_id;
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
@@ -459,15 +462,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
device_can_wakeup(mvm->trans->dev)) {
mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
hw->wiphy->wowlan = &mvm->wowlan;
- } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ }
+
+ if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
mvm->trans->ops->d3_suspend &&
mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
- mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_RFKILL_RELEASE |
- WIPHY_WOWLAN_NET_DETECT;
+ mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE |
+ WIPHY_WOWLAN_NET_DETECT;
if (!iwlwifi_mod_params.sw_crypto)
mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -707,9 +712,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
mvmvif->uploaded = false;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
- /* does this make sense at all? */
- mvmvif->color++;
-
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
spin_unlock_bh(&mvm->time_event_lock);
@@ -761,41 +763,215 @@ static void iwl_mvm_free_coredump(const void *data)
kfree(fw_error_dump);
}
+static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
+ struct iwl_fw_error_dump_data **dump_data)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ u32 *fifo_data;
+ u32 fifo_len;
+ unsigned long flags;
+ int i, j;
+
+ if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
+ return;
+
+ /* Pull RXF data from all RXFs */
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
+ /*
+ * Keep aside the additional offset that might be needed for
+ * next RXF
+ */
+ u32 offset_diff = RXF_DIFF_FROM_PREV * i;
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ continue;
+
+ /* Add a TLV for the RXF */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(i);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_D_SPACE +
+ offset_diff));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_WR_PTR +
+ offset_diff));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_RD_PTR +
+ offset_diff));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_FENCE_PTR +
+ offset_diff));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_SET_FENCE_MODE +
+ offset_diff));
+
+ /* Lock fence */
+ iwl_trans_write_prph(mvm->trans,
+ RXF_SET_FENCE_MODE + offset_diff, 0x1);
+ /* Set fence pointer to the same place like WR pointer */
+ iwl_trans_write_prph(mvm->trans,
+ RXF_LD_WR2FENCE + offset_diff, 0x1);
+ /* Set fence offset */
+ iwl_trans_write_prph(mvm->trans,
+ RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
+ 0x0);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (j = 0; j < fifo_len; j++)
+ fifo_data[j] = iwl_trans_read_prph(mvm->trans,
+ RXF_FIFO_RD_FENCE_INC +
+ offset_diff);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+ }
+
+ /* Pull TXF data from all TXFs */
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ continue;
+
+ /* Add a TLV for the FIFO */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(i);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_FIFO_ITEM_CNT));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_WR_PTR));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_RD_PTR));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_FENCE_PTR));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_LOCK_FENCE));
+
+ /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+ iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
+ TXF_WR_PTR);
+
+ /* Dummy-read to advance the read pointer to the head */
+ iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (j = 0; j < fifo_len; j++)
+ fifo_data[j] = iwl_trans_read_prph(mvm->trans,
+ TXF_READ_MODIFY_DATA);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+ }
+
+ iwl_trans_release_nic_access(mvm->trans, &flags);
+}
+
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info;
+ struct iwl_fw_error_dump_mem *dump_mem;
struct iwl_mvm_dump_ptrs *fw_error_dump;
- const struct fw_img *img;
u32 sram_len, sram_ofs;
- u32 file_len, rxf_len;
- unsigned long flags;
- int reg_val;
+ u32 file_len, fifo_data_len = 0;
+ u32 smem_len = mvm->cfg->smem_len;
+ u32 sram2_len = mvm->cfg->dccm2_len;
lockdep_assert_held(&mvm->mutex);
+ /* W/A for 8000 HW family A-step */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) {
+ if (smem_len)
+ smem_len = 0x38000;
+
+ if (sram2_len)
+ sram2_len = 0x10000;
+ }
+
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
return;
- img = &mvm->fw->img[mvm->cur_ucode];
- sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
- sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ /* SRAM - include stack CCM if driver knows the values for it */
+ if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
+ const struct fw_img *img;
- /* reading buffer size */
- reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
- rxf_len = (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+ img = &mvm->fw->img[mvm->cur_ucode];
+ sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ } else {
+ sram_ofs = mvm->cfg->dccm_offset;
+ sram_len = mvm->cfg->dccm_len;
+ }
- /* the register holds the value divided by 128 */
- rxf_len = rxf_len << 7;
+ /* reading RXF/TXF sizes */
+ if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
+ struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
+ int i;
+
+ fifo_data_len = 0;
+
+ /* Count RXF size */
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
+ if (!mem_cfg->rxfifo_size[i])
+ continue;
+
+ /* Add header info */
+ fifo_data_len += mem_cfg->rxfifo_size[i] +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
+ if (!mem_cfg->txfifo_size[i])
+ continue;
+
+ /* Add header info */
+ fifo_data_len += mem_cfg->txfifo_size[i] +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
+ }
file_len = sizeof(*dump_file) +
- sizeof(*dump_data) * 3 +
- sram_len +
- rxf_len +
+ sizeof(*dump_data) * 2 +
+ sram_len + sizeof(*dump_mem) +
+ fifo_data_len +
sizeof(*dump_info);
+ /* Make room for the SMEM, if it exists */
+ if (smem_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+ /* Make room for the secondary SRAM, if it exists */
+ if (sram2_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
dump_file = vzalloc(file_len);
if (!dump_file) {
kfree(fw_error_dump);
@@ -814,6 +990,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+ dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
sizeof(dump_info->fw_human_readable));
strncpy(dump_info->dev_human_readable, mvm->cfg->name,
@@ -822,28 +999,39 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(dump_info->bus_human_readable));
dump_data = iwl_fw_error_next_data(dump_data);
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
- dump_data->len = cpu_to_le32(rxf_len);
-
- if (iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
- u32 *rxf = (void *)dump_data->data;
- int i;
+ /* We only dump the FIFOs if the FW is in error state */
+ if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
+ iwl_mvm_dump_fifos(mvm, &dump_data);
+
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+ dump_mem->offset = cpu_to_le32(sram_ofs);
+ iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
+ sram_len);
- for (i = 0; i < (rxf_len / sizeof(u32)); i++) {
- iwl_trans_write_prph(mvm->trans,
- RXF_LD_FENCE_OFFSET_ADDR,
- i * sizeof(u32));
- rxf[i] = iwl_trans_read_prph(mvm->trans,
- RXF_FIFO_RD_FENCE_ADDR);
- }
- iwl_trans_release_nic_access(mvm->trans, &flags);
+ if (smem_len) {
+ dump_data = iwl_fw_error_next_data(dump_data);
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
+ dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
+ iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
+ dump_mem->data, smem_len);
}
- dump_data = iwl_fw_error_next_data(dump_data);
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
- dump_data->len = cpu_to_le32(sram_len);
- iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_data->data,
- sram_len);
+ if (sram2_len) {
+ dump_data = iwl_fw_error_next_data(dump_data);
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+ dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
+ iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
+ dump_mem->data, sram2_len);
+ }
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
fw_error_dump->op_mode_len = file_len;
@@ -864,6 +1052,11 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status))
iwl_mvm_fw_error_dump(mvm);
+ /* cleanup all stale references (scan, roc), but keep the
+ * ucode_down ref until reconfig is complete
+ */
+ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
+
iwl_trans_stop_device(mvm->trans);
mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -893,10 +1086,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
- /* cleanup all stale references (scan, roc), but keep the
- * ucode_down ref until reconfig is complete */
- iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
-
/* clear any stale d0i3 state */
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
@@ -933,6 +1122,19 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
+ /* Some hw restart cleanups must not hold the mutex */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ /*
+ * Make sure we are out of d0i3. This is needed
+ * to make sure the reference accounting is correct
+ * (and there is no stale d0i3_exit_work).
+ */
+ wait_event_timeout(mvm->d0i3_exit_waitq,
+ !test_bit(IWL_MVM_STATUS_IN_D0I3,
+ &mvm->status),
+ HZ);
+ }
+
mutex_lock(&mvm->mutex);
ret = __iwl_mvm_mac_start(mvm);
mutex_unlock(&mvm->mutex);
@@ -982,6 +1184,13 @@ static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
_iwl_mvm_exit_d0i3(mvm);
}
+
+ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
+ if (!wait_event_timeout(mvm->d0i3_exit_waitq,
+ !test_bit(IWL_MVM_STATUS_IN_D0I3,
+ &mvm->status),
+ HZ))
+ WARN_ONCE(1, "D0i3 exit on resume timed out\n");
}
static void
@@ -1146,7 +1355,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
- goto out_release;
+ goto out_remove_mac;
/* beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
@@ -2088,7 +2297,7 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
/*
* This is called before mac80211 does RCU synchronisation,
@@ -2105,6 +2314,20 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
+static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const u8 *bssid)
+{
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
+ return;
+
+ if (iwlwifi_mod_params.uapsd_disable) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+}
+
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2164,6 +2387,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* Reset EBS status here assuming environment has been changed.
*/
mvm->last_ebs_successful = true;
+ iwl_mvm_check_uapsd(mvm, vif, sta->addr);
ret = 0;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
@@ -3103,7 +3327,7 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
bool set)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
if (!mvm_sta || !mvm_sta->vif) {
IWL_ERR(mvm, "Station is not associated to a vif\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d24660fb4ef2..6c69d0584f6c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -119,11 +119,13 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
* We will register to mac80211 to have testmode working. The NIC must not
* be up'ed after the INIT fw asserted. This is useful to be able to use
* proprietary tools over testmode to debug the INIT fw.
+ * @tfd_q_hang_detect: enabled the detection of hung transmit queues
* @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
* Save)-2(default), LP(Low Power)-3
*/
struct iwl_mvm_mod_params {
bool init_dbg;
+ bool tfd_q_hang_detect;
int power_scheme;
};
extern struct iwl_mvm_mod_params iwlmvm_mod_params;
@@ -276,6 +278,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_TM_CMD,
IWL_MVM_REF_EXIT_WORK,
IWL_MVM_REF_PROTECT_CSA,
+ IWL_MVM_REF_FW_DBG_COLLECT,
/* update debugfs.c when changing this */
@@ -531,10 +534,23 @@ enum {
enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_IDLE = 0,
IWL_MVM_TDLS_SW_REQ_SENT,
+ IWL_MVM_TDLS_SW_RESP_RCVD,
IWL_MVM_TDLS_SW_REQ_RCVD,
IWL_MVM_TDLS_SW_ACTIVE,
};
+struct iwl_mvm_shared_mem_cfg {
+ u32 shared_mem_addr;
+ u32 shared_mem_size;
+ u32 sample_buff_addr;
+ u32 sample_buff_size;
+ u32 txfifo_addr;
+ u32 txfifo_size[TX_FIFO_MAX_NUM];
+ u32 rxfifo_size[RX_FIFO_MAX_NUM];
+ u32 page_buff_addr;
+ u32 page_buff_size;
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -641,6 +657,8 @@ struct iwl_mvm {
bool disable_power_off;
bool disable_power_off_d3;
+ bool scan_iter_notif_enabled;
+
struct debugfs_blob_wrapper nvm_hw_blob;
struct debugfs_blob_wrapper nvm_sw_blob;
struct debugfs_blob_wrapper nvm_calib_blob;
@@ -782,8 +800,13 @@ struct iwl_mvm {
struct cfg80211_chan_def chandef;
struct sk_buff *skb; /* ch sw template */
u32 ch_sw_tm_ie;
+
+ /* timestamp of last ch-sw request sent (GP2 time) */
+ u32 sent_timestamp;
} peer;
} tdls_cs;
+
+ struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
};
/* Extract MVM priv from op_mode and _hw */
@@ -850,12 +873,14 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
{
return mvm->trans->cfg->d0i3 &&
+ mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
+ !iwlwifi_mod_params.d0i3_disable &&
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
-static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
+static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
{
- return mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_DQA_SUPPORT;
+ return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
}
extern const u8 iwl_mvm_ac_to_tx_fifo[];
@@ -937,6 +962,33 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
+static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
+{
+ return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ?
+ mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant :
+ mvm->fw->valid_tx_ant;
+}
+
+static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
+{
+ return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ?
+ mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant :
+ mvm->fw->valid_rx_ant;
+}
+
+static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
+{
+ u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
+ FW_PHY_CFG_RX_CHAIN);
+ u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
+ u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+
+ phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
+ valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
+
+ return mvm->fw->phy_config & phy_config;
+}
+
int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
@@ -970,6 +1022,9 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
/* MVM PHY */
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -1031,6 +1086,9 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
@@ -1091,9 +1149,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
-void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
- struct iwl_mvm_frame_stats *stats,
- u32 rate, bool agg);
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
int rs_pretty_print_rate(char *buf, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
@@ -1159,6 +1215,8 @@ void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
+int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
/* BT Coex */
@@ -1260,11 +1318,13 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
/* hw scheduler queue config */
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg);
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue);
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout);
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags);
-static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
- u8 fifo)
+static inline
+void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
+ u8 fifo, unsigned int wdg_timeout)
{
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
@@ -1273,12 +1333,13 @@ static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
.frame_limit = IWL_FRAME_LIMIT,
};
- iwl_mvm_enable_txq(mvm, queue, 0, &cfg);
+ iwl_mvm_enable_txq(mvm, queue, 0, &cfg, wdg_timeout);
}
static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
int fifo, int sta_id, int tid,
- int frame_limit, u16 ssn)
+ int frame_limit, u16 ssn,
+ unsigned int wdg_timeout)
{
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
@@ -1288,7 +1349,7 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
.aggregate = true,
};
- iwl_mvm_enable_txq(mvm, queue, ssn, &cfg);
+ iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
}
/* Assoc status */
@@ -1344,4 +1405,7 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
+int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf id);
+void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index d55fd8e3654c..5383429d96c1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -356,7 +356,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE;
- else /* Family 8000 B-step */
+ else /* Family 8000 B-step or C-step */
max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE;
/*
@@ -565,6 +565,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
mvm->nvm_data = iwl_parse_nvm_sections(mvm);
if (!mvm->nvm_data)
return -ENODATA;
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
+ mvm->nvm_data->nvm_version);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 97dfba50c682..2dffc3600ed3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -84,15 +84,8 @@
#include "time-event.h"
#include "iwl-fw-error-dump.h"
-/*
- * module name, copyright, version, etc.
- */
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
-
-#define DRV_VERSION IWLWIFI_VERSION
-
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
@@ -100,6 +93,7 @@ static const struct iwl_op_mode_ops iwl_mvm_ops;
struct iwl_mvm_mod_params iwlmvm_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS,
+ .tfd_q_hang_detect = true
/* rest of fields are 0 by default */
};
@@ -109,6 +103,10 @@ MODULE_PARM_DESC(init_dbg,
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
MODULE_PARM_DESC(power_scheme,
"power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
+module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(tfd_q_hang_detect,
+ "TFD queues hang detection (default: true");
/*
* module init and exit functions
@@ -146,13 +144,14 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
u32 reg_val = 0;
+ u32 phy_config = iwl_mvm_get_phy_config(mvm);
- radio_cfg_type = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_TYPE) >>
- FW_PHY_CFG_RADIO_TYPE_POS;
- radio_cfg_step = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_STEP) >>
- FW_PHY_CFG_RADIO_STEP_POS;
- radio_cfg_dash = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_DASH) >>
- FW_PHY_CFG_RADIO_DASH_POS;
+ radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
+ FW_PHY_CFG_RADIO_TYPE_POS;
+ radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
+ FW_PHY_CFG_RADIO_STEP_POS;
+ radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
+ FW_PHY_CFG_RADIO_DASH_POS;
/* SKU control */
reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
@@ -240,6 +239,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
+ RX_HANDLER(SCAN_ITERATION_COMPLETE,
+ iwl_mvm_rx_scan_offload_iter_complete_notif, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
@@ -274,6 +275,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(MGMT_MCAST_KEY),
CMD(TX_CMD),
CMD(TXPATH_FLUSH),
+ CMD(SHARED_MEM_CFG),
CMD(MAC_CONTEXT_CMD),
CMD(TIME_EVENT_CMD),
CMD(TIME_EVENT_NOTIFICATION),
@@ -476,17 +478,19 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
trans_cfg.bc_table_dword = true;
- if (!iwlwifi_mod_params.wd_disable)
- trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout;
- else
- trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
-
trans_cfg.command_names = iwl_mvm_cmd_strings;
trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true;
+ trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
+
+ /* Set a short watchdog for the command queue */
+ trans_cfg.cmd_q_wdg_timeout =
+ iwlmvm_mod_params.tfd_q_hang_detect ? IWL_DEF_WD_TIMEOUT :
+ IWL_WATCHDOG_DISABLED;
+
snprintf(mvm->hw->wiphy->fw_version,
sizeof(mvm->hw->wiphy->fw_version),
"%s", fw->fw_version);
@@ -517,10 +521,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_tt_initialize(mvm, min_backoff);
/* set the nvm_file_name according to priority */
- if (iwlwifi_mod_params.nvm_file)
+ if (iwlwifi_mod_params.nvm_file) {
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
- else
- mvm->nvm_file_name = mvm->cfg->default_nvm_file;
+ } else {
+ if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
+ (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP))
+ mvm->nvm_file_name = mvm->cfg->default_nvm_file_8000A;
+ else
+ mvm->nvm_file_name = mvm->cfg->default_nvm_file;
+ }
if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
"not allowing power-up and not having nvm_file\n"))
@@ -559,6 +568,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!mvm->scan_cmd)
goto out_free;
+ /* Set EBS as successful as long as not stated otherwise by the FW. */
+ mvm->last_ebs_successful = true;
+
err = iwl_mvm_mac_setup_register(mvm);
if (err)
goto out_free;
@@ -817,9 +829,20 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
struct iwl_mvm *mvm =
container_of(work, struct iwl_mvm, fw_error_dump_wk);
+ if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
+ return;
+
mutex_lock(&mvm->mutex);
iwl_mvm_fw_error_dump(mvm);
+
+ /* start recording again if the firmware is not crashed */
+ WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
+ mvm->fw->dbg_dest_tlv &&
+ iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
+
mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
}
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
@@ -855,7 +878,10 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
* If WoWLAN fw asserted, don't restart either, mac80211
* can't recover this since we're already half suspended.
*/
- if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ if (!mvm->restart_fw && fw_error) {
+ schedule_work(&mvm->fw_error_dump_wk);
+ } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
IWL_ERR(mvm,
@@ -879,16 +905,13 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
reprobe->dev = mvm->trans->dev;
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
- } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
- (!fw_error || mvm->restart_fw)) {
+ } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
/* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
if (fw_error && mvm->restart_fw > 0)
mvm->restart_fw--;
ieee80211_restart_hw(mvm->hw);
- } else if (fw_error) {
- schedule_work(&mvm->fw_error_dump_wk);
}
}
@@ -1031,7 +1054,8 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
out:
rcu_read_unlock();
}
-static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
+
+int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
@@ -1047,6 +1071,7 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
};
struct iwl_d3_manager_config d3_cfg_cmd = {
.min_sleep_time = cpu_to_le32(1000),
+ .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
};
IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
@@ -1146,7 +1171,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
if (mvm->d0i3_offloading && qos_seq) {
/* update qos seq numbers if offloading was enabled */
- mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+ mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = le16_to_cpu(qos_seq[i]);
/* firmware stores last-used one, we store next one */
@@ -1245,7 +1270,7 @@ out:
return ret;
}
-static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 1c0d4a45c1a8..5b43616eeb06 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -170,13 +170,13 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
active_cnt = 2;
}
- cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
+ cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
- cmd->txchain_info = cpu_to_le32(mvm->fw->valid_tx_ant);
+ cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
}
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 30ceb67ed7a7..194bd1f939ca 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -39,28 +39,16 @@
#include "sta.h"
#include "iwl-op-mode.h"
#include "mvm.h"
+#include "debugfs.h"
#define RS_NAME "iwl-mvm-rs"
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define RS_LEGACY_RETRIES_PER_RATE 1
-#define RS_HT_VHT_RETRIES_PER_RATE 2
-#define RS_HT_VHT_RETRIES_PER_RATE_TW 1
-#define RS_INITIAL_MIMO_NUM_RATES 3
-#define RS_INITIAL_SISO_NUM_RATES 3
-#define RS_INITIAL_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
-#define RS_SECONDARY_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
-#define RS_SECONDARY_SISO_NUM_RATES 3
-#define RS_SECONDARY_SISO_RETRIES 1
-
#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH 3 /* min failures to calc tpt */
-#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
-/* max allowed rate miss before sync LQ cmd */
-#define IWL_MISSED_RATE_MAX 15
-#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
-#define RS_IDLE_TIMEOUT (5*HZ)
+/* Calculations of success ratio are done in fixed point where 12800 is 100%.
+ * Use this macro when dealing with thresholds consts set as a percentage
+ */
+#define RS_PERCENT(x) (128 * x)
static u8 rs_ht_to_legacy[] = {
[IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
@@ -173,7 +161,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return false;
- if (num_of_ant(mvm->fw->valid_tx_ant) < 2)
+ if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
return false;
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -613,7 +601,8 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
* at this rate. window->data contains the bitmask of successful
* packets.
*/
-static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+static int _rs_collect_tx_data(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl,
int scale_index, int attempts, int successes,
struct iwl_rate_scale_data *window)
{
@@ -668,8 +657,8 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
fail_count = window->counter - window->success_counter;
/* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+ if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) ||
+ (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH))
window->average_tpt = (window->success_ratio * tpt + 64) / 128;
else
window->average_tpt = IWL_INVALID_VALUE;
@@ -677,7 +666,8 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
return 0;
}
-static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
+static int rs_collect_tx_data(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
int scale_index, int attempts, int successes,
u8 reduced_txp)
@@ -698,7 +688,7 @@ static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
/* Select window for current tx bit rate */
window = &(tbl->win[scale_index]);
- ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
window);
if (ret)
return ret;
@@ -707,7 +697,7 @@ static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
return -EINVAL;
window = &tbl->tpc_win[reduced_txp];
- return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
window);
}
@@ -928,7 +918,6 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
break;
if (rate_mask & (1 << low))
break;
- IWL_DEBUG_RATE(mvm, "Skipping masked lower rate: %d\n", low);
}
high = index;
@@ -938,7 +927,6 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
break;
if (rate_mask & (1 << high))
break;
- IWL_DEBUG_RATE(mvm, "Skipping masked higher rate: %d\n", high);
}
return (high << 8) | low;
@@ -1004,7 +992,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
}
if (num_of_ant(rate->ant) > 1)
- rate->ant = first_antenna(mvm->fw->valid_tx_ant);
+ rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
/* Relevant in both switching to SISO or Legacy */
rate->sgi = false;
@@ -1125,7 +1113,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (time_after(jiffies,
- (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
+ (unsigned long)(lq_sta->last_tx +
+ (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
int t;
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
@@ -1158,7 +1147,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* ... driver.
*/
lq_sta->missed_rate_counter++;
- if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+ if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
IWL_DEBUG_RATE(mvm,
"Too many rates mismatch. Send sync LQ. rs_state %d\n",
@@ -1213,7 +1202,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
ucode_rate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
- rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
+ rs_collect_tx_data(mvm, lq_sta, curr_tbl, rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len,
reduced_txp);
@@ -1249,7 +1238,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
else
continue;
- rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
+ rs_collect_tx_data(mvm, lq_sta, tmp_tbl, rate.index, 1,
i < retries ? 0 : legacy_success,
reduced_txp);
}
@@ -1303,13 +1292,13 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
if (is_legacy) {
- lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+ lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT;
} else {
- lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+ lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT;
}
lq_sta->table_count = 0;
lq_sta->total_failed = 0;
@@ -1318,6 +1307,13 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->visited_columns = 0;
}
+static inline int rs_get_max_rate_from_mask(unsigned long rate_mask)
+{
+ if (rate_mask)
+ return find_last_bit(&rate_mask, BITS_PER_LONG);
+ return IWL_RATE_INVALID;
+}
+
static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
const struct rs_tx_column *column)
{
@@ -1420,7 +1416,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
u32 target_tpt;
int rate_idx;
- if (success_ratio > RS_SR_NO_DECREASE) {
+ if (success_ratio > IWL_MVM_RS_SR_NO_DECREASE) {
target_tpt = 100 * expected_current_tpt;
IWL_DEBUG_RATE(mvm,
"SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
@@ -1488,7 +1484,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
flush_interval_passed =
time_after(jiffies,
(unsigned long)(lq_sta->flush_timer +
- RS_STAY_IN_COLUMN_TIMEOUT));
+ (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ)));
/*
* Check if we should allow search for new modulation mode.
@@ -1567,7 +1563,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
const struct rs_tx_column *next_col;
allow_column_func_t allow_func;
- u8 valid_ants = mvm->fw->valid_tx_ant;
+ u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm);
const u16 *expected_tpt_tbl;
u16 tpt, max_expected_tpt;
@@ -1613,8 +1609,12 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
continue;
max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
- if (WARN_ON_ONCE(max_rate == IWL_RATE_INVALID))
+ if (max_rate == IWL_RATE_INVALID) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d: no rate is allowed in this column\n",
+ next_col_id);
continue;
+ }
max_expected_tpt = expected_tpt_tbl[max_rate];
if (tpt >= max_expected_tpt) {
@@ -1724,7 +1724,8 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
{
enum rs_action action = RS_ACTION_STAY;
- if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
+ if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) ||
+ (current_tpt == 0)) {
IWL_DEBUG_RATE(mvm,
"Decrease rate because of low SR\n");
return RS_ACTION_DOWNSCALE;
@@ -1783,7 +1784,7 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
out:
if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
- if (sr >= RS_SR_NO_DECREASE) {
+ if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
IWL_DEBUG_RATE(mvm,
"SR is above NO DECREASE. Avoid downscale\n");
action = RS_ACTION_STAY;
@@ -1802,18 +1803,10 @@ out:
static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta)
{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct ieee80211_vif *vif = mvmsta->vif;
- bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
- !vif->bss_conf.ps);
-
/* Our chip supports Tx STBC and the peer is an HT/VHT STA which
* supports STBC of at least 1*SS
*/
- if (!lq_sta->stbc)
- return false;
-
- if (!mvm->ps_disabled && !sta_ps_disabled)
+ if (!lq_sta->stbc_capable)
return false;
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -1825,11 +1818,11 @@ static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
int *weaker, int *stronger)
{
- *weaker = index + TPC_TX_POWER_STEP;
+ *weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP;
if (*weaker > TPC_MAX_REDUCTION)
*weaker = TPC_INVALID;
- *stronger = index - TPC_TX_POWER_STEP;
+ *stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP;
if (*stronger < 0)
*stronger = TPC_INVALID;
}
@@ -1885,7 +1878,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
}
/* Too many failures, increase txp */
- if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
+ if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) ||
+ current_tpt == 0) {
IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
return TPC_ACTION_NO_RESTIRCTION;
}
@@ -1908,7 +1902,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
}
/* next, increase if needed */
- if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
+ if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
+ strong != TPC_INVALID) {
if (weak_tpt == IWL_INVALID_VALUE &&
strong_tpt != IWL_INVALID_VALUE &&
current_tpt < strong_tpt) {
@@ -1935,7 +1930,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl)
{
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *vif = mvm_sta->vif;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_band band;
@@ -2044,7 +2039,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
u16 high_low;
s32 sr;
u8 prev_agg = lq_sta->is_agg;
- struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data;
struct rs_rate *rate;
@@ -2106,8 +2101,8 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* in current association (use new rate found above).
*/
fail_count = window->counter - window->success_counter;
- if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+ if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
+ (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
IWL_DEBUG_RATE(mvm,
"(%s: %d): Test Window: succ %d total %d\n",
rs_pretty_lq_type(rate->type),
@@ -2385,7 +2380,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
int i, nentries;
s8 best_rssi = S8_MIN;
u8 best_ant = ANT_NONE;
- u8 valid_tx_ant = mvm->fw->valid_tx_ant;
+ u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
const struct rs_init_rate_info *initial_rates;
for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
@@ -2530,7 +2525,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
gfp_t gfp)
{
- struct iwl_mvm_sta *sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
+ struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
@@ -2606,68 +2601,121 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
}
}
+static void rs_ht_init(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ /* active_siso_rate mask includes 9 MBits (bit 5),
+ * and CCK (bits 0-3), supp_rates[] does not;
+ * shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ if (mvm->cfg->ht_params->ldpc &&
+ (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
+ lq_sta->ldpc = true;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
+ lq_sta->stbc_capable = true;
+
+ lq_sta->is_vht = false;
+}
+
+static void rs_vht_init(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta_vht_cap *vht_cap)
+{
+ rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
+
+ if (mvm->cfg->ht_params->ldpc &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
+ lq_sta->ldpc = true;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
+ lq_sta->stbc_capable = true;
+
+ if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+ lq_sta->bfer_capable = true;
+
+ lq_sta->is_vht = true;
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
-static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm,
- struct iwl_mvm_frame_stats *stats)
+static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
{
spin_lock_bh(&mvm->drv_stats_lock);
- memset(stats, 0, sizeof(*stats));
+ memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
spin_unlock_bh(&mvm->drv_stats_lock);
}
-void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
- struct iwl_mvm_frame_stats *stats,
- u32 rate, bool agg)
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
{
u8 nss = 0, mcs = 0;
spin_lock(&mvm->drv_stats_lock);
if (agg)
- stats->agg_frames++;
+ mvm->drv_rx_stats.agg_frames++;
- stats->success_frames++;
+ mvm->drv_rx_stats.success_frames++;
switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
- stats->bw_20_frames++;
+ mvm->drv_rx_stats.bw_20_frames++;
break;
case RATE_MCS_CHAN_WIDTH_40:
- stats->bw_40_frames++;
+ mvm->drv_rx_stats.bw_40_frames++;
break;
case RATE_MCS_CHAN_WIDTH_80:
- stats->bw_80_frames++;
+ mvm->drv_rx_stats.bw_80_frames++;
break;
default:
WARN_ONCE(1, "bad BW. rate 0x%x", rate);
}
if (rate & RATE_MCS_HT_MSK) {
- stats->ht_frames++;
+ mvm->drv_rx_stats.ht_frames++;
mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
} else if (rate & RATE_MCS_VHT_MSK) {
- stats->vht_frames++;
+ mvm->drv_rx_stats.vht_frames++;
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
} else {
- stats->legacy_frames++;
+ mvm->drv_rx_stats.legacy_frames++;
}
if (nss == 1)
- stats->siso_frames++;
+ mvm->drv_rx_stats.siso_frames++;
else if (nss == 2)
- stats->mimo2_frames++;
+ mvm->drv_rx_stats.mimo2_frames++;
if (rate & RATE_MCS_SGI_MSK)
- stats->sgi_frames++;
+ mvm->drv_rx_stats.sgi_frames++;
else
- stats->ngi_frames++;
+ mvm->drv_rx_stats.ngi_frames++;
- stats->last_rates[stats->last_frame_idx] = rate;
- stats->last_frame_idx = (stats->last_frame_idx + 1) %
- ARRAY_SIZE(stats->last_rates);
+ mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
+ mvm->drv_rx_stats.last_frame_idx =
+ (mvm->drv_rx_stats.last_frame_idx + 1) %
+ ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
spin_unlock(&mvm->drv_stats_lock);
}
@@ -2683,14 +2731,11 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- struct iwl_mvm_sta *sta_priv;
- struct iwl_lq_sta *lq_sta;
+ struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
struct ieee80211_supported_band *sband;
unsigned long supp; /* must be unsigned long for for_each_set_bit */
- sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
- lq_sta = &sta_priv->lq_sta;
-
/* clear all non-persistent lq data */
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
@@ -2712,7 +2757,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* previous packets? Need to have IEEE 802.1X auth succeed immediately
* after assoc.. */
- lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+ lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX;
lq_sta->band = sband->band;
/*
* active legacy rates as per supported rates bitmap
@@ -2723,61 +2768,28 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
/* TODO: should probably account for rx_highest for both HT/VHT */
- if (!vht_cap || !vht_cap->vht_supported) {
- /* active_siso_rate mask includes 9 MBits (bit 5),
- * and CCK (bits 0-3), supp_rates[] does not;
- * shift to convert format, force 9 MBits off.
- */
- lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
- lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
- lq_sta->active_siso_rate &= ~((u16)0x2);
- lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* Same here */
- lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
- lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
- lq_sta->active_mimo2_rate &= ~((u16)0x2);
- lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
-
- lq_sta->is_vht = false;
- if (mvm->cfg->ht_params->ldpc &&
- (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
- lq_sta->ldpc = true;
-
- if (mvm->cfg->ht_params->stbc &&
- (num_of_ant(mvm->fw->valid_tx_ant) > 1) &&
- (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
- lq_sta->stbc = true;
- } else {
- rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
- lq_sta->is_vht = true;
-
- if (mvm->cfg->ht_params->ldpc &&
- (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
- lq_sta->ldpc = true;
-
- if (mvm->cfg->ht_params->stbc &&
- (num_of_ant(mvm->fw->valid_tx_ant) > 1) &&
- (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
- lq_sta->stbc = true;
- }
+ if (!vht_cap || !vht_cap->vht_supported)
+ rs_ht_init(mvm, sta, lq_sta, ht_cap);
+ else
+ rs_vht_init(mvm, sta, lq_sta, vht_cap);
- if (IWL_MVM_RS_DISABLE_MIMO)
+ if (IWL_MVM_RS_DISABLE_P2P_MIMO && sta_priv->vif->p2p)
lq_sta->active_mimo2_rate = 0;
- lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate,
- BITS_PER_LONG);
- lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate,
- BITS_PER_LONG);
- lq_sta->max_mimo2_rate_idx = find_last_bit(&lq_sta->active_mimo2_rate,
- BITS_PER_LONG);
+ lq_sta->max_legacy_rate_idx =
+ rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
+ lq_sta->max_siso_rate_idx =
+ rs_get_max_rate_from_mask(lq_sta->active_siso_rate);
+ lq_sta->max_mimo2_rate_idx =
+ rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
IWL_DEBUG_RATE(mvm,
- "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC%d\n",
+ "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n",
lq_sta->active_legacy_rate,
lq_sta->active_siso_rate,
lq_sta->active_mimo2_rate,
- lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc);
+ lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable,
+ lq_sta->bfer_capable);
IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
lq_sta->max_legacy_rate_idx,
lq_sta->max_siso_rate_idx,
@@ -2785,14 +2797,14 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
- first_antenna(mvm->fw->valid_tx_ant);
+ first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
lq_sta->lq.dual_stream_ant_msk = ANT_AB;
/* as default allow aggregation for all tids */
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
lq_sta->is_agg = 0;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
+ iwl_mvm_reset_frame_stats(mvm);
#endif
rs_initialize_lq(mvm, sta, lq_sta, band, init);
}
@@ -2861,12 +2873,13 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
int index = *rs_table_index;
for (i = 0; i < num_rates && index < end; i++) {
- ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate));
- for (j = 0; j < num_retries && index < end; j++, index++)
+ for (j = 0; j < num_retries && index < end; j++, index++) {
+ ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm,
+ rate));
rs_table[index] = ucode_rate;
-
- if (toggle_ant)
- rs_toggle_antenna(valid_tx_ant, rate);
+ if (toggle_ant)
+ rs_toggle_antenna(valid_tx_ant, rate);
+ }
prev_rate_idx = rate->index;
bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
@@ -2874,7 +2887,7 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
break;
}
- if (!bottom_reached)
+ if (!bottom_reached && !is_legacy(rate))
rate->index = prev_rate_idx;
*rs_table_index = index;
@@ -2913,18 +2926,22 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
memcpy(&rate, initial_rate, sizeof(rate));
- valid_tx_ant = mvm->fw->valid_tx_ant;
- rate.stbc = rs_stbc_allow(mvm, sta, lq_sta);
+ valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+
+ /* TODO: remove old API when min FW API hits 14 */
+ if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+ rs_stbc_allow(mvm, sta, lq_sta))
+ rate.stbc = true;
if (is_siso(&rate)) {
- num_rates = RS_INITIAL_SISO_NUM_RATES;
- num_retries = RS_HT_VHT_RETRIES_PER_RATE;
+ num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
+ num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
} else if (is_mimo(&rate)) {
- num_rates = RS_INITIAL_MIMO_NUM_RATES;
- num_retries = RS_HT_VHT_RETRIES_PER_RATE;
+ num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES;
+ num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
} else {
- num_rates = RS_INITIAL_LEGACY_NUM_RATES;
- num_retries = RS_LEGACY_RETRIES_PER_RATE;
+ num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES;
+ num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES;
toggle_ant = true;
}
@@ -2935,12 +2952,12 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
rs_get_lower_rate_down_column(lq_sta, &rate);
if (is_siso(&rate)) {
- num_rates = RS_SECONDARY_SISO_NUM_RATES;
- num_retries = RS_SECONDARY_SISO_RETRIES;
+ num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES;
+ num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES;
lq_cmd->mimo_delim = index;
} else if (is_legacy(&rate)) {
- num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
- num_retries = RS_LEGACY_RETRIES_PER_RATE;
+ num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
+ num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
} else {
WARN_ON_ONCE(1);
}
@@ -2953,8 +2970,8 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
rs_get_lower_rate_down_column(lq_sta, &rate);
- num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
- num_retries = RS_LEGACY_RETRIES_PER_RATE;
+ num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
+ num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
num_rates, num_retries, valid_tx_ant,
@@ -2962,6 +2979,142 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
}
+struct rs_bfer_active_iter_data {
+ struct ieee80211_sta *exclude_sta;
+ struct iwl_mvm_sta *bfer_mvmsta;
+};
+
+static void rs_bfer_active_iter(void *_data,
+ struct ieee80211_sta *sta)
+{
+ struct rs_bfer_active_iter_data *data = _data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
+ u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
+
+ if (sta == data->exclude_sta)
+ return;
+
+ /* The current sta has BFER allowed */
+ if (ss_params & LQ_SS_BFER_ALLOWED) {
+ WARN_ON_ONCE(data->bfer_mvmsta != NULL);
+
+ data->bfer_mvmsta = mvmsta;
+ }
+}
+
+static int rs_bfer_priority(struct iwl_mvm_sta *sta)
+{
+ int prio = -1;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif);
+
+ switch (viftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ prio = 3;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ prio = 2;
+ break;
+ case NL80211_IFTYPE_STATION:
+ prio = 1;
+ break;
+ default:
+ WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id);
+ prio = -1;
+ }
+
+ return prio;
+}
+
+/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */
+static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1,
+ struct iwl_mvm_sta *sta2)
+{
+ int prio1 = rs_bfer_priority(sta1);
+ int prio2 = rs_bfer_priority(sta2);
+
+ if (prio1 > prio2)
+ return 1;
+ if (prio1 < prio2)
+ return -1;
+ return 0;
+}
+
+static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate)
+{
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct rs_bfer_active_iter_data data = {
+ .exclude_sta = sta,
+ .bfer_mvmsta = NULL,
+ };
+ struct iwl_mvm_sta *bfer_mvmsta = NULL;
+ u32 ss_params = LQ_SS_PARAMS_VALID;
+
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ goto out;
+
+ /* Check if forcing the decision is configured.
+ * Note that SISO is forced by not allowing STBC or BFER
+ */
+ if (lq_sta->ss_force == RS_SS_FORCE_STBC)
+ ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
+ else if (lq_sta->ss_force == RS_SS_FORCE_BFER)
+ ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
+
+ if (lq_sta->ss_force != RS_SS_FORCE_NONE) {
+ IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
+ lq_sta->ss_force);
+ goto out;
+ }
+
+ if (lq_sta->stbc_capable)
+ ss_params |= LQ_SS_STBC_1SS_ALLOWED;
+
+ if (!lq_sta->bfer_capable)
+ goto out;
+
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ rs_bfer_active_iter,
+ &data);
+ bfer_mvmsta = data.bfer_mvmsta;
+
+ /* This code is safe as it doesn't run concurrently for different
+ * stations. This is guaranteed by the fact that calls to
+ * ieee80211_tx_status wouldn't run concurrently for a single HW.
+ */
+ if (!bfer_mvmsta) {
+ IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n");
+
+ ss_params |= LQ_SS_BFER_ALLOWED;
+ goto out;
+ }
+
+ IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n",
+ bfer_mvmsta->sta_id);
+
+ /* Disallow BFER on another STA if active and we're a higher priority */
+ if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
+ struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
+ u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
+
+ bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
+ bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
+ iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
+
+ ss_params |= LQ_SS_BFER_ALLOWED;
+ IWL_DEBUG_RATE(mvm,
+ "Lower priority BFER sta found (%d). Switch BFER\n",
+ bfer_mvmsta->sta_id);
+ }
+out:
+ lq_cmd->ss_params = cpu_to_le32(ss_params);
+}
+
static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
@@ -2971,9 +3124,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
- lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START;
lq_cmd->agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+ cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT);
#ifdef CONFIG_MAC80211_DEBUGFS
if (lq_sta->pers.dbg_fixed_rate) {
@@ -2988,6 +3141,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
+ rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
+
if (num_of_ant(initial_rate->ant) == 1)
lq_cmd->single_stream_ant_msk = initial_rate->ant;
@@ -3167,9 +3323,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->pers.dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (mvm->fw->valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (mvm->fw->valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (mvm->fw->valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(rate)) ? "legacy" :
is_vht(rate) ? "VHT" : "HT");
@@ -3361,9 +3517,73 @@ static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
.llseek = default_llseek,
};
+static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ char buf[12];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+ static const char * const ss_force_name[] = {
+ [RS_SS_FORCE_NONE] = "none",
+ [RS_SS_FORCE_STBC] = "stbc",
+ [RS_SS_FORCE_BFER] = "bfer",
+ [RS_SS_FORCE_SISO] = "siso",
+ };
+
+ pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
+ ss_force_name[lq_sta->ss_force]);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = lq_sta->pers.drv;
+ int ret = 0;
+
+ if (!strncmp("none", buf, 4)) {
+ lq_sta->ss_force = RS_SS_FORCE_NONE;
+ } else if (!strncmp("siso", buf, 4)) {
+ lq_sta->ss_force = RS_SS_FORCE_SISO;
+ } else if (!strncmp("stbc", buf, 4)) {
+ if (lq_sta->stbc_capable) {
+ lq_sta->ss_force = RS_SS_FORCE_STBC;
+ } else {
+ IWL_ERR(mvm,
+ "can't force STBC. peer doesn't support\n");
+ ret = -EINVAL;
+ }
+ } else if (!strncmp("bfer", buf, 4)) {
+ if (lq_sta->bfer_capable) {
+ lq_sta->ss_force = RS_SS_FORCE_BFER;
+ } else {
+ IWL_ERR(mvm,
+ "can't force BFER. peer doesn't support\n");
+ ret = -EINVAL;
+ }
+ } else {
+ IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n");
+ ret = -EINVAL;
+ }
+ return ret ?: count;
+}
+
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
+#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, lq_sta, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
+
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = mvm_sta;
+
debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
lq_sta, &rs_sta_dbgfs_scale_table_ops);
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
@@ -3374,6 +3594,11 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
&lq_sta->tx_agg_tid_en);
debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
&lq_sta->pers.dbg_fixed_txp_reduction);
+
+ MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, S_IRUSR | S_IWUSR);
+ return;
+err:
+ IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
}
static void rs_remove_debugfs(void *mvm, void *mvm_sta)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index defd70a6d9e6..dc4ef3dfafe1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -137,42 +137,10 @@ enum {
#define IWL_INVALID_VALUE -1
-#define IWL_MIN_RSSI_VAL -100
-#define IWL_MAX_RSSI_VAL 0
-
-/* These values specify how many Tx frame attempts before
- * searching for a new modulation mode */
-#define IWL_LEGACY_FAILURE_LIMIT 160
-#define IWL_LEGACY_SUCCESS_LIMIT 480
-#define IWL_LEGACY_TABLE_COUNT 160
-
-#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
-#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
-#define IWL_NONE_LEGACY_TABLE_COUNT 1500
-
-/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
-#define IWL_RS_GOOD_RATIO 12800 /* 100% */
-#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
-#define IWL_RATE_HIGH_TH 10880 /* 85% */
-#define IWL_RATE_INCREASE_TH 6400 /* 50% */
-#define RS_SR_FORCE_DECREASE 1920 /* 15% */
-#define RS_SR_NO_DECREASE 10880 /* 85% */
-
-#define TPC_SR_FORCE_INCREASE 9600 /* 75% */
-#define TPC_SR_NO_INCREASE 10880 /* 85% */
-#define TPC_TX_POWER_STEP 3
#define TPC_MAX_REDUCTION 15
#define TPC_NO_REDUCTION 0
#define TPC_INVALID 0xff
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
-#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
-#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
-
-#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
-#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
-#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
-
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
@@ -181,14 +149,7 @@ enum {
/* load per tid defines for A-MPDU activation */
#define IWL_AGG_TPT_THREHOLD 0
-#define IWL_AGG_LOAD_THRESHOLD 10
#define IWL_AGG_ALL_TID 0xff
-#define TID_QUEUE_CELL_SPACING 50 /*mS */
-#define TID_QUEUE_MAX_SIZE 20
-#define TID_ROUND_VALUE 5 /* mS */
-
-#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
-#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
enum iwl_table_type {
LQ_NONE,
@@ -279,6 +240,13 @@ enum rs_column {
RS_COLUMN_INVALID,
};
+enum rs_ss_force_opt {
+ RS_SS_FORCE_NONE = 0,
+ RS_SS_FORCE_STBC,
+ RS_SS_FORCE_BFER,
+ RS_SS_FORCE_SISO,
+};
+
/* Packet stats per rate */
struct rs_rate_stats {
u64 success;
@@ -332,7 +300,9 @@ struct iwl_lq_sta {
u64 last_tx;
bool is_vht;
bool ldpc; /* LDPC Rx is supported by the STA */
- bool stbc; /* Tx STBC is supported by chip and Rx by STA */
+ bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */
+ bool bfer_capable; /* Remote supports beamformee and we BFer */
+
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -361,6 +331,9 @@ struct iwl_lq_sta {
/* tx power reduce for this sta */
int tpc_reduce;
+ /* force STBC/BFER/SISO for testing */
+ enum rs_ss_force_opt ss_force;
+
/* persistent fields - initialized only once - keep last! */
struct lq_sta_pers {
#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 94b6e7297a1e..f922131b4eab 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -407,7 +407,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
- iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags,
+ iwl_mvm_update_frame_stats(mvm, rate_n_flags,
rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif
iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
@@ -511,13 +511,17 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_notif_statistics *stats = (void *)&pkt->data;
- struct mvm_statistics_general_common *common = &stats->general.common;
struct iwl_mvm_stat_data data = {
.stats = stats,
.mvm = mvm,
};
- iwl_mvm_tt_temp_changed(mvm, le32_to_cpu(common->temperature));
+ /* Only handle rx statistics temperature changes if async temp
+ * notifications are not supported
+ */
+ if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
+ iwl_mvm_tt_temp_changed(mvm,
+ le32_to_cpu(stats->general.radio_temperature));
iwl_mvm_update_rx_statistics(mvm, stats);
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index e5294d01181e..7e9aa3cb3254 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,6 +72,8 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
struct iwl_mvm_scan_params {
u32 max_out_time;
@@ -97,7 +99,7 @@ static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
return mvm->scan_rx_ant;
- return mvm->fw->valid_rx_ant;
+ return iwl_mvm_get_valid_rx_ant(mvm);
}
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
@@ -128,7 +130,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
u32 tx_ant;
mvm->scan_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
+ iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->scan_last_antenna_idx);
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
@@ -171,15 +173,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
* already included in the probe template, so we need to set only
* req->n_ssids - 1 bits in addition to the first bit.
*/
-static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
+ enum ieee80211_band band, int n_ssids)
{
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ return 10;
if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1);
return 10 + 2 * (n_ssids + 1);
}
-static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
+ enum ieee80211_band band)
{
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
}
@@ -282,11 +290,11 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- bool *global_bound = data;
+ int *global_cnt = data;
if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
mvmvif->phy_ctxt->id < MAX_PHYS)
- *global_bound = true;
+ *global_cnt += 1;
}
static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
@@ -294,27 +302,31 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
int n_ssids, u32 flags,
struct iwl_mvm_scan_params *params)
{
- bool global_bound = false;
+ int global_cnt = 0;
enum ieee80211_band band;
u8 frag_passive_dwell = 0;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
- &global_bound);
+ &global_cnt);
- if (!global_bound)
+ if (!global_cnt)
goto not_bound;
params->suspend_time = 30;
- params->max_out_time = 170;
+ params->max_out_time = 120;
if (iwl_mvm_low_latency(mvm)) {
if (mvm->fw->ucode_capa.api[0] &
IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
params->suspend_time = 105;
- params->max_out_time = 70;
- frag_passive_dwell = 20;
+ /*
+ * If there is more than one active interface make
+ * passive scan more fragmented.
+ */
+ frag_passive_dwell = (global_cnt < 2) ? 40 : 20;
+ params->max_out_time = frag_passive_dwell;
} else {
params->suspend_time = 120;
params->max_out_time = 120;
@@ -331,7 +343,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
u32 passive_dwell =
- iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ);
+ iwl_mvm_get_passive_dwell(mvm,
+ IEEE80211_BAND_2GHZ);
params->max_out_time = passive_dwell;
} else {
params->passive_fragmented = true;
@@ -348,8 +361,8 @@ not_bound:
params->dwell[band].passive = frag_passive_dwell;
else
params->dwell[band].passive =
- iwl_mvm_get_passive_dwell(band);
- params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+ iwl_mvm_get_passive_dwell(mvm, band);
+ params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
n_ssids);
}
}
@@ -530,6 +543,19 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
+int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scan_complete_notif *notif = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
+ notif->status, notif->scanned_channels);
+ return 0;
+}
+
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
@@ -678,7 +704,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
- mvm->last_ebs_successful = !ebs_status;
+ if (ebs_status)
+ mvm->last_ebs_successful = false;
return 0;
}
@@ -1098,6 +1125,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify);
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+ return 0;
+
+ if (iwl_mvm_is_radio_killed(mvm))
+ goto out;
+
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1134,6 +1167,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_OS)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) {
@@ -1290,18 +1324,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->iter_num = cpu_to_le32(1);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
- mvm->last_ebs_successful) {
- cmd->channel_opt[0].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- cmd->channel_opt[1].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- }
-
if (iwl_mvm_rrm_scan_needed(mvm))
cmd->scan_flags |=
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
@@ -1376,6 +1398,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0;
cmd->schedule[1].full_scan_mul = 0;
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
+ mvm->last_ebs_successful) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
for (i = 1; i <= req->req.n_ssids; i++)
ssid_bitmap |= BIT(i);
@@ -1448,6 +1486,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ else
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
@@ -1458,6 +1498,11 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (req->n_ssids == 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+#endif
+
cmd->scan_flags |= cpu_to_le32(flags);
cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
@@ -1474,6 +1519,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0xff;
cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+ mvm->last_ebs_successful) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd);
@@ -1603,7 +1664,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
SCAN_CONFIG_N_CHANNELS(num_channels));
- scan_config->tx_chains = cpu_to_le32(mvm->fw->valid_tx_ant);
+ scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
scan_config->out_of_channel_time = cpu_to_le32(170);
@@ -1622,10 +1683,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
for (i = 0; i < band->n_channels; i++, j++)
- scan_config->channel_array[j] = band->channels[i].center_freq;
+ scan_config->channel_array[j] = band->channels[i].hw_value;
band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
for (i = 0; i < band->n_channels; i++, j++)
- scan_config->channel_array[j] = band->channels[i].center_freq;
+ scan_config->channel_array[j] = band->channels[i].hw_value;
cmd.data[0] = scan_config;
cmd.len[0] = cmd_size;
@@ -1802,6 +1863,13 @@ int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
cmd->general_flags = cpu_to_le32(flags);
+
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
+ mvm->last_ebs_successful)
+ cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
cmd->n_channels = req->req.n_channels;
for (i = 0; i < req->req.n_ssids; i++)
@@ -1965,7 +2033,9 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
"success" : "failed");
- mvm->last_ebs_successful = !notif->ebs_status;
+ if (notif->ebs_status)
+ mvm->last_ebs_successful = false;
+
mvm->scan_uid[uid_idx] = 0;
if (!sched) {
@@ -1998,10 +2068,14 @@ static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
/*
* Clear scan uid of scans that was aborted from above and completed
- * in FW so the RX handler does nothing.
+ * in FW so the RX handler does nothing. Set last_ebs_successful here if
+ * needed.
*/
scan_done->mvm->scan_uid[uid_idx] = 0;
+ if (notif->ebs_status)
+ scan_done->mvm->last_ebs_successful = false;
+
return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d86fe432e51f..5c23cddaaae3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -99,7 +99,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd add_sta_cmd = {
.sta_id = mvm_sta->sta_id,
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
@@ -209,6 +209,9 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
{
unsigned long used_hw_queues;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+ mvm->cfg->base_params->wd_timeout :
+ IWL_WATCHDOG_DISABLED;
u32 ac;
lockdep_assert_held(&mvm->mutex);
@@ -232,7 +235,7 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
/* Found a place for all queues - enable them */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
- iwl_mvm_ac_to_tx_fifo[ac]);
+ iwl_mvm_ac_to_tx_fifo[ac], wdg_timeout);
mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
}
@@ -250,8 +253,8 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
/* disable the TDLS STA-specific queues */
sta_msk = mvmsta->tfd_queue_msk;
- for_each_set_bit(i, &sta_msk, sizeof(sta_msk))
- iwl_mvm_disable_txq(mvm, i);
+ for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
+ iwl_mvm_disable_txq(mvm, i, 0);
}
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@@ -259,7 +262,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int i, ret, sta_id;
lockdep_assert_held(&mvm->mutex);
@@ -464,8 +467,8 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
if (mvm->tfd_drained[sta_id]) {
unsigned long i, msk = mvm->tfd_drained[sta_id];
- for_each_set_bit(i, &msk, sizeof(msk))
- iwl_mvm_disable_txq(mvm, i);
+ for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
+ iwl_mvm_disable_txq(mvm, i, 0);
mvm->tfd_drained[sta_id] = 0;
IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
@@ -481,7 +484,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -626,13 +629,16 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
{
+ unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+ mvm->cfg->base_params->wd_timeout :
+ IWL_WATCHDOG_DISABLED;
int ret;
lockdep_assert_held(&mvm->mutex);
/* Map Aux queue to fifo - needs to happen before adding Aux station */
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
- IWL_MVM_TX_FIFO_MCAST);
+ IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
/* Allocate aux station and assign to it the aux queue */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@@ -774,7 +780,7 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -834,7 +840,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -965,6 +971,9 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+ mvm->cfg->base_params->wd_timeout :
+ IWL_WATCHDOG_DISABLED;
int queue, fifo, ret;
u16 ssn;
@@ -988,7 +997,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
- buf_size, ssn);
+ buf_size, ssn, wdg_timeout);
/*
* Even though in theory the peer could have different
@@ -1058,7 +1067,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
- iwl_mvm_disable_txq(mvm, txq_id);
+ iwl_mvm_disable_txq(mvm, txq_id, 0);
return 0;
case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1116,7 +1125,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
- iwl_mvm_disable_txq(mvm, tid_data->txq_id);
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id, 0);
}
mvm->queue_to_mac80211[tid_data->txq_id] =
@@ -1144,10 +1153,10 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (sta) {
- struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
return mvm_sta->sta_id;
}
@@ -1196,6 +1205,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
break;
case WLAN_CIPHER_SUITE_WEP104:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
+ /* fall through */
case WLAN_CIPHER_SUITE_WEP40:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
@@ -1280,7 +1290,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (sta)
return sta->addr;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c
index c0e00bae5bd0..a87b506c8c72 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c
@@ -64,6 +64,8 @@
#include <linux/etherdevice.h>
#include "mvm.h"
#include "time-event.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
#define TU_TO_US(x) (x * 1024)
#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
@@ -228,6 +230,8 @@ iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
return "IDLE";
case IWL_MVM_TDLS_SW_REQ_SENT:
return "REQ SENT";
+ case IWL_MVM_TDLS_SW_RESP_RCVD:
+ return "RESP RECEIVED";
case IWL_MVM_TDLS_SW_REQ_RCVD:
return "REQ RECEIVED";
case IWL_MVM_TDLS_SW_ACTIVE:
@@ -248,6 +252,11 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
iwl_mvm_tdls_cs_state_str(state));
mvm->tdls_cs.state = state;
+ /* we only send requests to our switching peer - update sent time */
+ if (state == IWL_MVM_TDLS_SW_REQ_SENT)
+ mvm->tdls_cs.peer.sent_timestamp =
+ iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+
if (state == IWL_MVM_TDLS_SW_IDLE)
mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
}
@@ -300,7 +309,7 @@ out:
static int
iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
enum iwl_tdls_channel_switch_type type,
- const u8 *peer, bool peer_initiator)
+ const u8 *peer, bool peer_initiator, u32 timestamp)
{
bool same_peer = false;
int ret = 0;
@@ -325,17 +334,30 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
ret = -EINVAL;
break;
case IWL_MVM_TDLS_SW_REQ_SENT:
+ /* only allow requests from the same peer */
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
+ !peer_initiator)
+ /*
+ * We received a ch-switch request while an outgoing
+ * one is pending. Allow it if the peer is the link
+ * initiator.
+ */
+ ret = -EBUSY;
+ else if (type == TDLS_SEND_CHAN_SW_REQ)
+ /* wait for idle before sending another request */
+ ret = -EBUSY;
+ else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
+ /* we got a stale response - ignore it */
+ ret = -EINVAL;
+ break;
+ case IWL_MVM_TDLS_SW_RESP_RCVD:
/*
- * We received a ch-switch request while an outgoing one is
- * pending. Allow it to proceed if the other peer is the same
- * one we sent to, and we are not the link initiator.
+ * we are waiting for the FW to give an "active" notification,
+ * so ignore requests in the meantime
*/
- if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH) {
- if (!same_peer)
- ret = -EBUSY;
- else if (!peer_initiator) /* we are the initiator */
- ret = -EBUSY;
- }
+ ret = -EBUSY;
break;
case IWL_MVM_TDLS_SW_REQ_RCVD:
/* as above, allow the link initiator to proceed */
@@ -349,9 +371,12 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
}
break;
case IWL_MVM_TDLS_SW_ACTIVE:
- /* we don't allow initiations during active channel switch */
- if (type == TDLS_SEND_CHAN_SW_REQ)
- ret = -EINVAL;
+ /*
+ * the only valid request when active is a request to return
+ * to the base channel by the current off-channel peer
+ */
+ if (type != TDLS_MOVE_CH || !same_peer)
+ ret = -EBUSY;
break;
}
@@ -384,7 +409,8 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator);
+ ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
+ timestamp);
if (ret)
return ret;
@@ -473,6 +499,8 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
type == TDLS_SEND_CHAN_SW_REQ ?
IWL_MVM_TDLS_SW_REQ_SENT :
IWL_MVM_TDLS_SW_REQ_RCVD);
+ } else {
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
}
out:
@@ -657,12 +685,15 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
enum iwl_tdls_channel_switch_type type;
unsigned int delay;
+ const char *action_str =
+ params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
+ "REQ" : "RESP";
mutex_lock(&mvm->mutex);
IWL_DEBUG_TDLS(mvm,
- "Received TDLS ch switch action %d from %pM status %d\n",
- params->action_code, params->sta->addr, params->status);
+ "Received TDLS ch switch action %s from %pM status %d\n",
+ action_str, params->sta->addr, params->status);
/*
* we got a non-zero status from a peer we were switching to - move to
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 2b1e61fac34a..ba615ad2176c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -69,6 +69,7 @@
static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
{
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
@@ -77,12 +78,15 @@ static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Enter CT Kill\n");
iwl_mvm_set_hw_ctkill_state(mvm, true);
+ tt->throttle = false;
+ tt->dynamic_smps = false;
+
/* Don't schedule an exit work if we're in test mode, since
* the temperature will not change unless we manually set it
* again (or disable testing).
*/
if (!mvm->temperature_test)
- schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
+ schedule_delayed_work(&tt->ct_kill_exit,
round_jiffies_relative(duration * HZ));
}
@@ -452,6 +456,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
tt->params = &iwl7000_tt_params;
tt->throttle = false;
+ tt->dynamic_smps = false;
tt->min_backoff = min_backoff;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4f15d9decc81..07304e1fd64a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_probe_resp(fc))
tx_flags |= TX_CMD_FLG_TSF;
- else if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ } else if (ieee80211_is_back_req(fc)) {
+ struct ieee80211_bar *bar = (void *)skb->data;
+ u16 control = le16_to_cpu(bar->control);
+
+ tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+ tx_cmd->tid_tspec = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+ WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
} else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -108,8 +115,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
- /* tid_tspec will default to 0 = BE when QOS isn't enabled */
- ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+ ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ else
+ ac = tid_to_mac80211_ac[0];
+
tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
TX_CMD_FLG_BT_PRIO_POS;
@@ -209,7 +220,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
mvm->mgmt_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
+ iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->mgmt_last_antenna_idx);
if (info->band == IEEE80211_BAND_2GHZ &&
@@ -496,7 +507,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed);
- iwl_mvm_disable_txq(mvm, tid_data->txq_id);
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id, CMD_ASYNC);
tid_data->state = IWL_AGG_OFF;
/*
* we can't hold the mutex - but since we are after a sequence
@@ -656,7 +667,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
/* Single frame failure in an AMPDU queue => send BAR */
if (txq_id >= mvm->first_agg_queue &&
- !(info->flags & IEEE80211_TX_STAT_ACK))
+ !(info->flags & IEEE80211_TX_STAT_ACK) &&
+ !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
/* W/A FW bug: seq_ctl is wrong when the status isn't success */
@@ -919,6 +931,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
sta_id = ba_notif->sta_id;
tid = ba_notif->tid;
+ if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
+ tid >= IWL_MAX_TID_COUNT,
+ "sta_id %d tid %d", sta_id, tid))
+ return 0;
+
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index e56e77ef5d2e..8decf9953229 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -432,7 +432,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
- IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
desc_lookup(table.error_id));
IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
@@ -531,49 +531,50 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg)
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
{
- if (iwl_mvm_is_dqa_supported(mvm)) {
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .enable = 1,
- .window = cfg->frame_limit,
- .sta_id = cfg->sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = cfg->fifo,
- .aggregate = cfg->aggregate,
- .flags = IWL_SCD_FLAGS_DQA_ENABLED,
- .tid = cfg->tid,
- .control = IWL_SCD_CONTROL_SET_SSN,
- };
- int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(mvm,
- "Failed to configure queue %d on FIFO %d\n",
- queue, cfg->fifo);
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .enable = 1,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .tid = cfg->tid,
+ };
+
+ if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, cfg,
+ wdg_timeout);
+ return;
}
- iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
- iwl_mvm_is_dqa_supported(mvm) ? NULL : cfg);
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
}
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue)
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags)
{
- iwl_trans_txq_disable(mvm->trans, queue,
- !iwl_mvm_is_dqa_supported(mvm));
-
- if (iwl_mvm_is_dqa_supported(mvm)) {
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .enable = 0,
- };
- int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, CMD_ASYNC,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
- queue, ret);
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .enable = 0,
+ };
+ int ret;
+
+ if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
+ iwl_trans_txq_disable(mvm->trans, queue, true);
+ return;
}
+
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+ queue, ret);
}
/**
@@ -620,7 +621,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
- if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
+ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
return;
if (vif->type == NL80211_IFTYPE_AP)
@@ -662,10 +663,10 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
+ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
return false;
- if (!mvm->cfg->rx_with_siso_diversity)
+ if (mvm->cfg->rx_with_siso_diversity)
return false;
ieee80211_iterate_active_interfaces_atomic(
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2f0c4b170344..dbd6bcf52205 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -415,6 +415,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -527,8 +529,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
- (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
+ }
#endif
pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 1aea6b66c594..cae0eb8835ce 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -216,6 +216,7 @@ struct iwl_pcie_txq_scratch_buf {
* @need_update: indicates need to update read/write index
* @active: stores if queue is active
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
+ * @wd_timeout: queue watchdog timeout (jiffies) - per queue
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -232,6 +233,7 @@ struct iwl_txq {
bool need_update;
u8 active;
bool ampdu;
+ unsigned long wd_timeout;
};
static inline dma_addr_t
@@ -259,7 +261,6 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @rx_page_order: page order for receive buffer size
- * @wd_timeout: queue watchdog timeout (jiffies)
* @reg_lock: protect hw register access
* @cmd_in_flight: true when we have a host command in flight
* @fw_mon_phys: physical address of the buffer for the firmware monitor
@@ -302,6 +303,7 @@ struct iwl_trans_pcie {
u8 cmd_queue;
u8 cmd_fifo;
+ unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
@@ -312,12 +314,14 @@ struct iwl_trans_pcie {
const char *const *command_names;
- /* queue watchdog */
- unsigned long wd_timeout;
-
/*protect hw register */
spinlock_t reg_lock;
bool cmd_in_flight;
+ bool ref_cmd_in_flight;
+
+ /* protect ref counter */
+ spinlock_t ref_lock;
+ u32 ref_count;
dma_addr_t fw_mon_phys;
struct page *fw_mon_page;
@@ -368,7 +372,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg);
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -381,6 +386,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+void iwl_trans_pcie_ref(struct iwl_trans *trans);
+void iwl_trans_pcie_unref(struct iwl_trans *trans);
+
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 523fe0c88dcb..69935aa5a1b3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -75,6 +75,7 @@
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
+#include "iwl-scd.h"
#include "iwl-agn-hw.h"
#include "iwl-fw-error-dump.h"
#include "internal.h"
@@ -443,10 +444,25 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
return ret;
}
-static void iwl_pcie_apm_stop(struct iwl_trans *trans)
+static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
{
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+ if (op_mode_leave) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_pcie_apm_init(trans);
+
+ /* inform ME that we are leaving */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_WAKE_ME);
+ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE |
+ CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ mdelay(5);
+ }
+
clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
/* Stop device's DMA activity */
@@ -707,6 +723,11 @@ static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
*first_ucode_section = last_read_idx;
+ if (cpu == 1)
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
+ else
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+
return 0;
}
@@ -893,8 +914,8 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
if (ret)
return ret;
- /* Notify FW loading is done */
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+ if (trans->dbg_dest_tlv)
+ iwl_pcie_apply_destination(trans);
/* wait for image verification to complete */
ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
@@ -916,6 +937,7 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
bool hw_rfkill;
@@ -945,6 +967,9 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
return ret;
}
+ /* init ref_count to 1 (should be cleared when ucode is loaded) */
+ trans_pcie->ref_count = 1;
+
/* make sure rfkill handshake bits are cleared */
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
@@ -960,7 +985,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* Load the given image to the HW */
if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
- (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP))
+ (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP))
return iwl_pcie_load_given_ucode_8000b(trans, fw);
else
return iwl_pcie_load_given_ucode(trans, fw);
@@ -1010,7 +1035,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
- iwl_pcie_apm_stop(trans);
+ iwl_pcie_apm_stop(trans, false);
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -1192,7 +1217,7 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
- iwl_pcie_apm_stop(trans);
+ iwl_pcie_apm_stop(trans, true);
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
@@ -1244,6 +1269,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->cmd_queue = trans_cfg->cmd_queue;
trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
+ trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
@@ -1258,9 +1284,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
else
trans_pcie->rx_page_order = get_order(4 * 1024);
- trans_pcie->wd_timeout =
- msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
-
trans_pcie->command_names = trans_cfg->command_names;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -1540,6 +1563,38 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
+void iwl_trans_pcie_ref(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ if (iwlwifi_mod_params.d0i3_disable)
+ return;
+
+ spin_lock_irqsave(&trans_pcie->ref_lock, flags);
+ IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
+ trans_pcie->ref_count++;
+ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+}
+
+void iwl_trans_pcie_unref(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ if (iwlwifi_mod_params.d0i3_disable)
+ return;
+
+ spin_lock_irqsave(&trans_pcie->ref_lock, flags);
+ IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
+ if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
+ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+ return;
+ }
+ trans_pcie->ref_count--;
+ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+}
+
static const char *get_csr_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
@@ -2264,6 +2319,9 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.release_nic_access = iwl_trans_pcie_release_nic_access,
.set_bits_mask = iwl_trans_pcie_set_bits_mask,
+ .ref = iwl_trans_pcie_ref,
+ .unref = iwl_trans_pcie_unref,
+
.dump_data = iwl_trans_pcie_dump_data,
};
@@ -2291,6 +2349,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
+ spin_lock_init(&trans_pcie->ref_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
err = pci_enable_device(pdev);
@@ -2404,6 +2463,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND;
return trans;
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8a6c7a084aa1..af0bce736358 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -147,7 +147,6 @@ static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
static void iwl_pcie_txq_stuck_timer(unsigned long data)
{
struct iwl_txq *txq = (void *)data;
- struct iwl_queue *q = &txq->q;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
u32 scd_sram_addr = trans_pcie->scd_base_addr +
@@ -164,7 +163,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
spin_unlock(&txq->lock);
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
- jiffies_to_msecs(trans_pcie->wd_timeout));
+ jiffies_to_msecs(txq->wd_timeout));
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
@@ -198,11 +197,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
}
- for (i = q->read_ptr; i != q->write_ptr;
- i = iwl_queue_inc_wrap(i))
- IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
- le32_to_cpu(txq->scratchbufs[i].scratch));
-
iwl_force_nmi(trans);
}
@@ -680,7 +674,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
- trans_pcie->cmd_fifo);
+ trans_pcie->cmd_fifo,
+ trans_pcie->cmd_q_wdg_timeout);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
@@ -722,7 +717,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
+ /*
+ * Send 0 as the scd_base_addr since the device may have be reset
+ * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
+ * contain garbage.
+ */
+ iwl_pcie_tx_start(trans, 0);
}
/*
@@ -898,6 +898,10 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
}
+ if (trans->cfg->base_params->num_of_queues > 20)
+ iwl_set_bits_prph(trans, SCD_GP_CTRL,
+ SCD_GP_CTRL_ENABLE_31_QUEUES);
+
return 0;
error:
/*Upon error, free only if we allocated something */
@@ -906,10 +910,9 @@ error:
return ret;
}
-static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq)
+static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
{
- if (!trans_pcie->wd_timeout)
+ if (!txq->wd_timeout)
return;
/*
@@ -919,7 +922,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
if (txq->q.read_ptr == txq->q.write_ptr)
del_timer(&txq->stuck_timer);
else
- mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
}
/* Frees buffers until index _not_ inclusive */
@@ -981,21 +984,35 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_free_tfd(trans, txq);
}
- iwl_pcie_txq_progress(trans_pcie, txq);
+ iwl_pcie_txq_progress(txq);
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
+
+ if (q->read_ptr == q->write_ptr) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
+ iwl_trans_pcie_unref(trans);
+ }
+
out:
spin_unlock_bh(&txq->lock);
}
-static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans)
+static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
+ const struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
lockdep_assert_held(&trans_pcie->reg_lock);
+ if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
+ !trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = true;
+ IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
+ iwl_trans_pcie_ref(trans);
+ }
+
if (trans_pcie->cmd_in_flight)
return 0;
@@ -1036,6 +1053,12 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
lockdep_assert_held(&trans_pcie->reg_lock);
+ if (trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = false;
+ IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
+ iwl_trans_pcie_unref(trans);
+ }
+
if (WARN_ON(!trans_pcie->cmd_in_flight))
return 0;
@@ -1089,7 +1112,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
- iwl_pcie_txq_progress(trans_pcie, txq);
+ iwl_pcie_txq_progress(txq);
}
static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
@@ -1122,14 +1145,18 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg)
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
int fifo = -1;
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
+ txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
+
if (cfg) {
fifo = cfg->fifo;
@@ -1153,7 +1180,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
/* enable aggregations for the queue */
iwl_scd_txq_enable_agg(trans, txq_id);
- trans_pcie->txq[txq_id].ampdu = true;
+ txq->ampdu = true;
} else {
/*
* disable aggregations for the queue, this will also
@@ -1162,20 +1189,20 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
*/
iwl_scd_txq_disable_agg(trans, txq_id);
- ssn = trans_pcie->txq[txq_id].q.read_ptr;
+ ssn = txq->q.read_ptr;
}
}
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
- trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
+ txq->q.read_ptr = (ssn & 0xff);
+ txq->q.write_ptr = (ssn & 0xff);
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+ (ssn & 0xff) | (txq_id << 8));
if (cfg) {
u8 frame_limit = cfg->frame_limit;
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- (ssn & 0xff) | (txq_id << 8));
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */
@@ -1200,11 +1227,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
if (txq_id == trans_pcie->cmd_queue &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
+
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Activate queue %d on FIFO %d WrPtr: %d\n",
+ txq_id, fifo, ssn & 0xff);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Activate queue %d WrPtr: %d\n",
+ txq_id, ssn & 0xff);
}
- trans_pcie->txq[txq_id].active = true;
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
- txq_id, fifo, ssn & 0xff);
+ txq->active = true;
}
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
@@ -1469,11 +1502,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+ if (q->read_ptr == q->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- ret = iwl_pcie_set_cmd_in_flight(trans);
+ ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
if (ret < 0) {
idx = ret;
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1819,9 +1852,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
- if (txq->need_update && q->read_ptr == q->write_ptr &&
- trans_pcie->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+ if (q->read_ptr == q->write_ptr) {
+ if (txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
+ iwl_trans_pcie_ref(trans);
+ }
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 34f09ef90bb3..a92985a6ea21 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1616,10 +1616,10 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
lbs_deb_enter(LBS_DEB_CFG80211);
- sinfo->filled |= STATION_INFO_TX_BYTES |
- STATION_INFO_TX_PACKETS |
- STATION_INFO_RX_BYTES |
- STATION_INFO_RX_PACKETS;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES) |
+ BIT(NL80211_STA_INFO_TX_PACKETS) |
+ BIT(NL80211_STA_INFO_RX_BYTES) |
+ BIT(NL80211_STA_INFO_RX_PACKETS);
sinfo->tx_bytes = priv->dev->stats.tx_bytes;
sinfo->tx_packets = priv->dev->stats.tx_packets;
sinfo->rx_bytes = priv->dev->stats.rx_bytes;
@@ -1629,14 +1629,14 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
ret = lbs_get_rssi(priv, &signal, &noise);
if (ret == 0) {
sinfo->signal = signal;
- sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
}
/* Convert priv->cur_rate from hw_value to NL80211 value */
for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
if (priv->cur_rate == lbs_rates[i].hw_value) {
sinfo->txrate.legacy = lbs_rates[i].bitrate;
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
break;
}
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index ef58a8862d91..4a4c6586a8d2 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -625,22 +625,22 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
old_ps = data->ps;
data->ps = val;
+ local_bh_disable();
if (val == PS_MANUAL_POLL) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_ps_poll, data);
+ ieee80211_iterate_active_interfaces_atomic(
+ data->hw, IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_ps_poll, data);
data->ps_poll_pending = true;
} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_nullfunc_ps,
- data);
+ ieee80211_iterate_active_interfaces_atomic(
+ data->hw, IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_nullfunc_ps, data);
} else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_nullfunc_no_ps,
- data);
+ ieee80211_iterate_active_interfaces_atomic(
+ data->hw, IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_nullfunc_no_ps, data);
}
+ local_bh_enable();
return 0;
}
@@ -2149,14 +2149,14 @@ static int append_radio_msg(struct sk_buff *skb, int id,
if (param->regd) {
int i;
- for (i = 0; hwsim_world_regdom_custom[i] != param->regd &&
- i < ARRAY_SIZE(hwsim_world_regdom_custom); i++)
- ;
+ for (i = 0; i < ARRAY_SIZE(hwsim_world_regdom_custom); i++) {
+ if (hwsim_world_regdom_custom[i] != param->regd)
+ continue;
- if (i < ARRAY_SIZE(hwsim_world_regdom_custom)) {
ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i);
if (ret < 0)
return ret;
+ break;
}
}
@@ -2557,7 +2557,8 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
if (res < 0)
goto out_err;
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
out_err:
genlmsg_cancel(skb, hdr);
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
index 2668e83afbb6..3ab87a855122 100644
--- a/drivers/net/wireless/mwifiex/11h.c
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -21,6 +21,16 @@
#include "fw.h"
+void mwifiex_init_11h_params(struct mwifiex_private *priv)
+{
+ priv->state_11h.is_11h_enabled = true;
+ priv->state_11h.is_11h_active = false;
+}
+
+inline int mwifiex_is_11h_active(struct mwifiex_private *priv)
+{
+ return priv->state_11h.is_11h_active;
+}
/* This function appends 11h info to a buffer while joining an
* infrastructure BSS
*/
@@ -39,7 +49,7 @@ mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer,
return;
radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
- sband = priv->wdev->wiphy->bands[radio_type];
+ sband = priv->wdev.wiphy->bands[radio_type];
cap = (struct mwifiex_ie_types_pwr_capability *)*buffer;
cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY);
@@ -69,10 +79,14 @@ mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer,
}
/* Enable or disable the 11h extensions in the firmware */
-static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
+int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
{
u32 enable = flag;
+ /* enable master mode radar detection on AP interface */
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && enable)
+ enable |= MWIFIEX_MASTER_RADAR_DET_MASK;
+
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
HostCmd_ACT_GEN_SET, DOT11H_I, &enable, true);
}
@@ -91,11 +105,191 @@ void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
* bit
*/
mwifiex_11h_activate(priv, true);
+ priv->state_11h.is_11h_active = true;
bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT;
mwifiex_11h_process_infra_join(priv, buffer, bss_desc);
} else {
/* Deactivate 11h functions in the firmware */
mwifiex_11h_activate(priv, false);
+ priv->state_11h.is_11h_active = false;
bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT;
}
}
+
+/* This is DFS CAC work queue function.
+ * This delayed work emits CAC finished event for cfg80211 if
+ * CAC was started earlier.
+ */
+void mwifiex_dfs_cac_work_queue(struct work_struct *work)
+{
+ struct cfg80211_chan_def chandef;
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct mwifiex_private *priv =
+ container_of(delayed_work, struct mwifiex_private,
+ dfs_cac_work);
+
+ if (WARN_ON(!priv))
+ return;
+
+ chandef = priv->dfs_chandef;
+ if (priv->wdev.cac_started) {
+ dev_dbg(priv->adapter->dev,
+ "CAC timer finished; No radar detected\n");
+ cfg80211_cac_event(priv->netdev, &chandef,
+ NL80211_RADAR_CAC_FINISHED,
+ GFP_KERNEL);
+ }
+}
+
+/* This function prepares channel report request command to FW for
+ * starting radar detection.
+ */
+int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_chan_rpt_req *cr_req = &cmd->params.chan_rpt_req;
+ struct mwifiex_radar_params *radar_params = (void *)data_buf;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REPORT_REQUEST);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_chan_rpt_req));
+
+ cr_req->chan_desc.start_freq = cpu_to_le16(MWIFIEX_A_BAND_START_FREQ);
+ cr_req->chan_desc.chan_num = radar_params->chandef->chan->hw_value;
+ cr_req->chan_desc.chan_width = radar_params->chandef->width;
+ cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms);
+
+ dev_dbg(priv->adapter->dev,
+ "11h: issuing DFS Radar check for channel=%d\n",
+ radar_params->chandef->chan->hw_value);
+
+ return 0;
+}
+
+/* This function is to abort ongoing CAC upon stopping AP operations
+ * or during unload.
+ */
+void mwifiex_abort_cac(struct mwifiex_private *priv)
+{
+ if (priv->wdev.cac_started) {
+ dev_dbg(priv->adapter->dev,
+ "Aborting delayed work for CAC.\n");
+ cancel_delayed_work_sync(&priv->dfs_cac_work);
+ cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ }
+}
+
+/* This function handles channel report event from FW during CAC period.
+ * If radar is detected during CAC, driver indicates the same to cfg80211
+ * and also cancels ongoing delayed work.
+ */
+int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct host_cmd_ds_chan_rpt_event *rpt_event;
+ struct mwifiex_ie_types_chan_rpt_data *rpt;
+ u8 *evt_buf;
+ u16 event_len, tlv_len;
+
+ rpt_event = (void *)(skb->data + sizeof(u32));
+ event_len = skb->len - (sizeof(struct host_cmd_ds_chan_rpt_event)+
+ sizeof(u32));
+
+ if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) {
+ dev_err(priv->adapter->dev, "Error in channel report event\n");
+ return -1;
+ }
+
+ evt_buf = (void *)&rpt_event->tlvbuf;
+
+ while (event_len >= sizeof(struct mwifiex_ie_types_header)) {
+ rpt = (void *)&rpt_event->tlvbuf;
+ tlv_len = le16_to_cpu(rpt->header.len);
+
+ switch (le16_to_cpu(rpt->header.type)) {
+ case TLV_TYPE_CHANRPT_11H_BASIC:
+ if (rpt->map.radar) {
+ dev_notice(priv->adapter->dev,
+ "RADAR Detected on channel %d!\n",
+ priv->dfs_chandef.chan->hw_value);
+ cancel_delayed_work_sync(&priv->dfs_cac_work);
+ cfg80211_cac_event(priv->netdev,
+ &priv->dfs_chandef,
+ NL80211_RADAR_DETECTED,
+ GFP_KERNEL);
+ }
+ break;
+ default:
+ break;
+ }
+
+ evt_buf += (tlv_len + sizeof(rpt->header));
+ event_len -= (tlv_len + sizeof(rpt->header));
+ }
+
+ return 0;
+}
+
+/* Handler for radar detected event from FW.*/
+int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct mwifiex_radar_det_event *rdr_event;
+
+ rdr_event = (void *)(skb->data + sizeof(u32));
+
+ if (le32_to_cpu(rdr_event->passed)) {
+ dev_notice(priv->adapter->dev,
+ "radar detected; indicating kernel\n");
+ cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
+ GFP_KERNEL);
+ dev_dbg(priv->adapter->dev, "regdomain: %d\n",
+ rdr_event->reg_domain);
+ dev_dbg(priv->adapter->dev, "radar detection type: %d\n",
+ rdr_event->det_type);
+ } else {
+ dev_dbg(priv->adapter->dev, "false radar detection event!\n");
+ }
+
+ return 0;
+}
+
+/* This is work queue function for channel switch handling.
+ * This function takes care of updating new channel definitin to
+ * bss config structure, restart AP and indicate channel switch success
+ * to cfg80211.
+ */
+void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
+{
+ struct mwifiex_uap_bss_param *bss_cfg;
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct mwifiex_private *priv =
+ container_of(delayed_work, struct mwifiex_private,
+ dfs_chan_sw_work);
+
+ if (WARN_ON(!priv))
+ return;
+
+ bss_cfg = &priv->bss_cfg;
+ if (!bss_cfg->beacon_period) {
+ dev_err(priv->adapter->dev,
+ "channel switch: AP already stopped\n");
+ return;
+ }
+
+ mwifiex_uap_set_channel(bss_cfg, priv->dfs_chandef);
+
+ if (mwifiex_config_start_uap(priv, bss_cfg)) {
+ dev_dbg(priv->adapter->dev,
+ "Failed to start AP after channel switch\n");
+ return;
+ }
+
+ dev_notice(priv->adapter->dev,
+ "indicating channel switch completion to kernel\n");
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
+}
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 9d4786e7ddff..543148d27b01 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -39,7 +39,7 @@ int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
{
uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info);
struct ieee80211_supported_band *sband =
- priv->wdev->wiphy->bands[radio_type];
+ priv->wdev.wiphy->bands[radio_type];
if (WARN_ON_ONCE(!sband)) {
dev_err(priv->adapter->dev, "Invalid radio type!\n");
@@ -314,7 +314,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
return ret_len;
radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
- sband = priv->wdev->wiphy->bands[radio_type];
+ sband = priv->wdev.wiphy->bands[radio_type];
if (bss_desc->bcn_ht_cap) {
ht_cap = (struct mwifiex_ie_types_htcap *) *buffer;
@@ -558,10 +558,10 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
if (!sta_ptr) {
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
dev_warn(priv->adapter->dev,
"BA setup with unknown TDLS peer %pM!\n",
peer_mac);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return -1;
}
if (sta_ptr->is_11ac_enabled)
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index f275675cdbd3..8e2e39422ad8 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -130,7 +130,9 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
{
struct mwifiex_private *priv;
u8 i;
- u32 ba_stream_num = 0;
+ u32 ba_stream_num = 0, ba_stream_max;
+
+ ba_stream_max = MWIFIEX_MAX_TX_BASTREAM_SUPPORTED;
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
@@ -139,8 +141,14 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
&priv->tx_ba_stream_tbl_ptr);
}
- return ((ba_stream_num <
- MWIFIEX_MAX_TX_BASTREAM_SUPPORTED) ? true : false);
+ if (adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ ba_stream_max =
+ GETSUPP_TXBASTREAMS(adapter->hw_dot_11n_dev_cap);
+ if (!ba_stream_max)
+ ba_stream_max = MWIFIEX_MAX_TX_BASTREAM_SUPPORTED;
+ }
+
+ return ((ba_stream_num < ba_stream_max) ? true : false);
}
/*
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 8720a3d3c755..9b983b5cebbd 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -101,6 +101,13 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
{
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+ unsigned int pad;
+ int headroom = (priv->adapter->iface_type ==
+ MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
+
+ pad = ((void *)skb->data - sizeof(*local_tx_pd) -
+ headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
+ skb_push(skb, pad);
skb_push(skb, sizeof(*local_tx_pd));
@@ -114,10 +121,12 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
/* Always zero as the data is followed by struct txpd */
- local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
+ local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) +
+ pad);
local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
- sizeof(*local_tx_pd));
+ sizeof(*local_tx_pd) -
+ pad);
if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
@@ -182,7 +191,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
ra_list_flags);
return -1;
}
- skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
+ skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN);
tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index d73fda312c87..a2e8817b56d8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev->iftype, 0, false);
+ priv->wdev.iftype, 0, false);
while (!skb_queue_empty(&list)) {
rx_skb = __skb_dequeue(&list);
@@ -353,9 +353,6 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
if (mwifiex_queuing_ra_based(priv)) {
- dev_dbg(priv->adapter->dev,
- "info: AP/ADHOC:last_seq=%d start_win=%d\n",
- last_seq, new_node->start_win);
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
node = mwifiex_get_sta_entry(priv, ta);
if (node)
@@ -370,6 +367,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
}
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ dev_dbg(priv->adapter->dev, "info: last_seq=%d start_win=%d\n",
+ last_seq, new_node->start_win);
+
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
last_seq >= new_node->start_win) {
new_node->start_win = last_seq + 1;
@@ -391,10 +391,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->timer_context.priv = priv;
new_node->timer_context.timer_is_set = false;
- init_timer(&new_node->timer_context.timer);
- new_node->timer_context.timer.function = mwifiex_flush_data;
- new_node->timer_context.timer.data =
- (unsigned long) &new_node->timer_context;
+ setup_timer(&new_node->timer_context.timer, mwifiex_flush_data,
+ (unsigned long)&new_node->timer_context);
for (i = 0; i < win_size; ++i)
new_node->rx_reorder_ptr[i] = NULL;
@@ -468,10 +466,10 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
sta_ptr = mwifiex_get_sta_entry(priv,
cmd_addba_req->peer_mac_addr);
if (!sta_ptr) {
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
dev_warn(priv->adapter->dev,
"BA setup with unknown TDLS peer %pM!\n",
cmd_addba_req->peer_mac_addr);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return -1;
}
if (sta_ptr->is_11ac_enabled)
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 9487d728ac20..fdfd9bf15ed4 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -53,3 +53,5 @@ obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
mwifiex_usb-y += usb.o
obj-$(CONFIG_MWIFIEX_USB) += mwifiex_usb.o
+
+ccflags-y += -D__CHECK_ENDIAN
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 4a66a6555366..41c8e25df954 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -590,77 +590,62 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv;
struct mwifiex_uap_bss_param *bss_cfg;
- int ret, bss_started, i;
-
- for (i = 0; i < adapter->priv_num; i++) {
- priv = adapter->priv[i];
-
- switch (priv->bss_role) {
- case MWIFIEX_BSS_ROLE_UAP:
- bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param),
- GFP_KERNEL);
- if (!bss_cfg)
- return -ENOMEM;
-
- mwifiex_set_sys_config_invalid_data(bss_cfg);
-
- if (changed & WIPHY_PARAM_RTS_THRESHOLD)
- bss_cfg->rts_threshold = wiphy->rts_threshold;
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
- bss_cfg->frag_threshold = wiphy->frag_threshold;
- if (changed & WIPHY_PARAM_RETRY_LONG)
- bss_cfg->retry_limit = wiphy->retry_long;
-
- bss_started = priv->bss_started;
-
- ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0,
- NULL, true);
- if (ret) {
- wiphy_err(wiphy, "Failed to stop the BSS\n");
- kfree(bss_cfg);
- return ret;
- }
+ int ret;
- ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_BSS_PARAMS_I, bss_cfg,
- false);
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
- kfree(bss_cfg);
+ switch (priv->bss_role) {
+ case MWIFIEX_BSS_ROLE_UAP:
+ if (priv->bss_started) {
+ dev_err(adapter->dev,
+ "cannot change wiphy params when bss started");
+ return -EINVAL;
+ }
- if (ret) {
- wiphy_err(wiphy, "Failed to set bss config\n");
- return ret;
- }
+ bss_cfg = kzalloc(sizeof(*bss_cfg), GFP_KERNEL);
+ if (!bss_cfg)
+ return -ENOMEM;
- if (!bss_started)
- break;
+ mwifiex_set_sys_config_invalid_data(bss_cfg);
- ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
- HostCmd_ACT_GEN_SET, 0,
- NULL, false);
- if (ret) {
- wiphy_err(wiphy, "Failed to start BSS\n");
- return ret;
- }
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD)
+ bss_cfg->rts_threshold = wiphy->rts_threshold;
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
+ bss_cfg->frag_threshold = wiphy->frag_threshold;
+ if (changed & WIPHY_PARAM_RETRY_LONG)
+ bss_cfg->retry_limit = wiphy->retry_long;
+
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg,
+ false);
+
+ kfree(bss_cfg);
+ if (ret) {
+ wiphy_err(wiphy, "Failed to set wiphy phy params\n");
+ return ret;
+ }
+ break;
- break;
case MWIFIEX_BSS_ROLE_STA:
- if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- ret = mwifiex_set_rts(priv,
- wiphy->rts_threshold);
- if (ret)
- return ret;
- }
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
- ret = mwifiex_set_frag(priv,
- wiphy->frag_threshold);
- if (ret)
- return ret;
- }
- break;
+ if (priv->media_connected) {
+ dev_err(adapter->dev,
+ "cannot change wiphy params when connected");
+ return -EINVAL;
}
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ ret = mwifiex_set_rts(priv,
+ wiphy->rts_threshold);
+ if (ret)
+ return ret;
+ }
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ ret = mwifiex_set_frag(priv,
+ wiphy->frag_threshold);
+ if (ret)
+ return ret;
+ }
+ break;
}
return 0;
@@ -671,9 +656,6 @@ mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
{
u16 mode = P2P_MODE_DISABLE;
- if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
- mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
-
if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
@@ -730,12 +712,249 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
- if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
- mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_UAP);
+ return 0;
+}
+
+static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
+{
+ priv->mgmt_frame_mask = 0;
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask, false)) {
+ dev_warn(priv->adapter->dev,
+ "could not unregister mgmt frame rx\n");
+ return -1;
+ }
+
+ mwifiex_deauthenticate(priv, NULL);
+ mwifiex_free_priv(priv);
+ priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
+
+ return 0;
+}
+
+static int
+mwifiex_init_new_priv_params(struct mwifiex_private *priv,
+ struct net_device *dev,
+ enum nl80211_iftype type)
+{
+ mwifiex_init_priv(priv);
+
+ priv->bss_mode = type;
+ priv->wdev.iftype = type;
+
+ mwifiex_init_priv_params(priv, priv->netdev);
+ priv->bss_started = 0;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_type = MWIFIEX_BSS_TYPE_STA;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+ break;
+ case NL80211_IFTYPE_AP:
+ priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
+ priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ break;
+ default:
+ dev_err(priv->adapter->dev,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
+ return -EOPNOTSUPP;
+ }
return 0;
}
+static int
+mwifiex_change_vif_to_p2p(struct net_device *dev,
+ enum nl80211_iftype curr_iftype,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct mwifiex_private *priv;
+ struct mwifiex_adapter *adapter;
+
+ priv = mwifiex_netdev_get_priv(dev);
+
+ if (!priv)
+ return -1;
+
+ adapter = priv->adapter;
+
+ if (adapter->curr_iface_comb.p2p_intf ==
+ adapter->iface_limit.p2p_intf) {
+ dev_err(adapter->dev,
+ "cannot create multiple P2P ifaces\n");
+ return -1;
+ }
+
+ dev_dbg(priv->adapter->dev, "%s: changing role to p2p\n", dev->name);
+
+ if (mwifiex_deinit_priv_params(priv))
+ return -1;
+ if (mwifiex_init_new_priv_params(priv, dev, type))
+ return -1;
+
+ switch (type) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (mwifiex_cfg80211_init_p2p_client(priv))
+ return -EFAULT;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ if (mwifiex_cfg80211_init_p2p_go(priv))
+ return -EFAULT;
+ break;
+ default:
+ dev_err(priv->adapter->dev,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
+ return -EOPNOTSUPP;
+ }
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true))
+ return -1;
+
+ if (mwifiex_sta_init_cmd(priv, false, false))
+ return -1;
+
+ switch (curr_iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ adapter->curr_iface_comb.sta_intf--;
+ break;
+ case NL80211_IFTYPE_AP:
+ adapter->curr_iface_comb.uap_intf--;
+ break;
+ default:
+ break;
+ }
+
+ adapter->curr_iface_comb.p2p_intf++;
+ dev->ieee80211_ptr->iftype = type;
+
+ return 0;
+}
+
+static int
+mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
+ enum nl80211_iftype curr_iftype,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct mwifiex_private *priv;
+ struct mwifiex_adapter *adapter;
+
+ priv = mwifiex_netdev_get_priv(dev);
+
+ if (!priv)
+ return -1;
+
+ adapter = priv->adapter;
+
+ if ((curr_iftype != NL80211_IFTYPE_P2P_CLIENT &&
+ curr_iftype != NL80211_IFTYPE_P2P_GO) &&
+ (adapter->curr_iface_comb.sta_intf ==
+ adapter->iface_limit.sta_intf)) {
+ dev_err(adapter->dev,
+ "cannot create multiple station/adhoc ifaces\n");
+ return -1;
+ }
+
+ if (type == NL80211_IFTYPE_STATION)
+ dev_notice(adapter->dev,
+ "%s: changing role to station\n", dev->name);
+ else
+ dev_notice(adapter->dev,
+ "%s: changing role to adhoc\n", dev->name);
+
+ if (mwifiex_deinit_priv_params(priv))
+ return -1;
+ if (mwifiex_init_new_priv_params(priv, dev, type))
+ return -1;
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true))
+ return -1;
+ if (mwifiex_sta_init_cmd(priv, false, false))
+ return -1;
+
+ switch (curr_iftype) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ adapter->curr_iface_comb.p2p_intf--;
+ break;
+ case NL80211_IFTYPE_AP:
+ adapter->curr_iface_comb.uap_intf--;
+ break;
+ default:
+ break;
+ }
+
+ adapter->curr_iface_comb.sta_intf++;
+ dev->ieee80211_ptr->iftype = type;
+ return 0;
+}
+
+static int
+mwifiex_change_vif_to_ap(struct net_device *dev,
+ enum nl80211_iftype curr_iftype,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct mwifiex_private *priv;
+ struct mwifiex_adapter *adapter;
+
+ priv = mwifiex_netdev_get_priv(dev);
+
+ if (!priv)
+ return -1;
+
+ adapter = priv->adapter;
+
+ if (adapter->curr_iface_comb.uap_intf ==
+ adapter->iface_limit.uap_intf) {
+ dev_err(adapter->dev,
+ "cannot create multiple AP ifaces\n");
+ return -1;
+ }
+
+ dev_notice(adapter->dev, "%s: changing role to AP\n", dev->name);
+
+ if (mwifiex_deinit_priv_params(priv))
+ return -1;
+ if (mwifiex_init_new_priv_params(priv, dev, type))
+ return -1;
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true))
+ return -1;
+ if (mwifiex_sta_init_cmd(priv, false, false))
+ return -1;
+
+ switch (curr_iftype) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ adapter->curr_iface_comb.p2p_intf--;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ adapter->curr_iface_comb.sta_intf--;
+ break;
+ default:
+ break;
+ }
+
+ adapter->curr_iface_comb.uap_intf++;
+ dev->ieee80211_ptr->iftype = type;
+ return 0;
+}
/*
* CFG802.11 operation handler to change interface type.
*/
@@ -745,19 +964,32 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- int ret;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype;
- switch (dev->ieee80211_ptr->iftype) {
+ switch (curr_iftype) {
case NL80211_IFTYPE_ADHOC:
switch (type) {
case NL80211_IFTYPE_STATION:
- break;
+ priv->bss_mode = type;
+ priv->sec_info.authentication_mode =
+ NL80211_AUTHTYPE_OPEN_SYSTEM;
+ dev->ieee80211_ptr->iftype = type;
+ mwifiex_deauthenticate(priv, NULL);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL,
+ true);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, flags, params);
+ case NL80211_IFTYPE_AP:
+ return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
+ flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name);
case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
return 0;
- case NL80211_IFTYPE_AP:
default:
wiphy_err(wiphy, "%s: changing to %d not supported\n",
dev->name, type);
@@ -767,22 +999,25 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_STATION:
switch (type) {
case NL80211_IFTYPE_ADHOC:
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- if (mwifiex_cfg80211_init_p2p_client(priv))
- return -EFAULT;
+ priv->bss_mode = type;
+ priv->sec_info.authentication_mode =
+ NL80211_AUTHTYPE_OPEN_SYSTEM;
dev->ieee80211_ptr->iftype = type;
- return 0;
+ mwifiex_deauthenticate(priv, NULL);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL,
+ true);
+ case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
- if (mwifiex_cfg80211_init_p2p_go(priv))
- return -EFAULT;
- dev->ieee80211_ptr->iftype = type;
- return 0;
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, flags, params);
+ case NL80211_IFTYPE_AP:
+ return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
+ flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
case NL80211_IFTYPE_STATION: /* This shouldn't happen */
return 0;
- case NL80211_IFTYPE_AP:
default:
wiphy_err(wiphy, "%s: changing to %d not supported\n",
dev->name, type);
@@ -791,12 +1026,20 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
break;
case NL80211_IFTYPE_AP:
switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
+ type, flags,
+ params);
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name);
case NL80211_IFTYPE_AP: /* This shouldn't happen */
return 0;
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_STATION:
default:
wiphy_err(wiphy, "%s: changing to %d not supported\n",
dev->name, type);
@@ -807,11 +1050,30 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
switch (type) {
case NL80211_IFTYPE_STATION:
- if (mwifiex_cfg80211_deinit_p2p(priv))
+ if (mwifiex_cfg80211_init_p2p_client(priv))
return -EFAULT;
dev->ieee80211_ptr->iftype = type;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+ return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
+ type, flags,
+ params);
+ break;
+ case NL80211_IFTYPE_AP:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+ return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
+ flags, params);
+ case NL80211_IFTYPE_UNSPECIFIED:
+ wiphy_warn(wiphy, "%s: kept type as P2P\n", dev->name);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
return 0;
default:
+ wiphy_err(wiphy, "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
@@ -821,16 +1083,8 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
- dev->ieee80211_ptr->iftype = type;
- priv->bss_mode = type;
- mwifiex_deauthenticate(priv, NULL);
-
- priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
-
- ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
- HostCmd_ACT_GEN_SET, 0, NULL, true);
- return ret;
+ return 0;
}
static void
@@ -856,16 +1110,16 @@ mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
/* HT or VHT */
switch (tx_htinfo & (BIT(3) | BIT(2))) {
case 0:
- /* This will be 20MHz */
+ rate->bw = RATE_INFO_BW_20;
break;
case (BIT(2)):
- rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ rate->bw = RATE_INFO_BW_40;
break;
case (BIT(3)):
- rate->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
+ rate->bw = RATE_INFO_BW_80;
break;
case (BIT(3) | BIT(2)):
- rate->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
+ rate->bw = RATE_INFO_BW_160;
break;
}
@@ -885,8 +1139,9 @@ mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
if ((tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
rate->mcs = priv->tx_rate;
rate->flags |= RATE_INFO_FLAGS_MCS;
+ rate->bw = RATE_INFO_BW_20;
if (tx_htinfo & BIT(1))
- rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ rate->bw = RATE_INFO_BW_40;
if (tx_htinfo & BIT(2))
rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
@@ -910,10 +1165,10 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
{
u32 rate;
- sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES |
- STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS |
- STATION_INFO_TX_BITRATE |
- STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
+ sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | BIT(NL80211_STA_INFO_TX_BYTES) |
+ BIT(NL80211_STA_INFO_RX_PACKETS) | BIT(NL80211_STA_INFO_TX_PACKETS) |
+ BIT(NL80211_STA_INFO_TX_BITRATE) |
+ BIT(NL80211_STA_INFO_SIGNAL) | BIT(NL80211_STA_INFO_SIGNAL_AVG);
/* Get signal information from the firmware */
if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
@@ -944,7 +1199,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
sinfo->txrate.legacy = rate * 5;
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
- sinfo->filled |= STATION_INFO_BSS_PARAM;
+ sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap &
WLAN_CAPABILITY_SHORT_PREAMBLE)
@@ -1037,10 +1292,11 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
survey->channel = ieee80211_get_channel(wiphy,
ieee80211_channel_to_frequency(pchan_stats[idx].chan_num, band));
survey->filled = SURVEY_INFO_NOISE_DBM |
- SURVEY_INFO_CHANNEL_TIME | SURVEY_INFO_CHANNEL_TIME_BUSY;
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
survey->noise = pchan_stats[idx].noise;
- survey->channel_time = pchan_stats[idx].cca_scan_dur;
- survey->channel_time_busy = pchan_stats[idx].cca_busy_dur;
+ survey->time = pchan_stats[idx].cca_scan_dur;
+ survey->time_busy = pchan_stats[idx].cca_busy_dur;
return 0;
}
@@ -1395,10 +1651,13 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ mwifiex_abort_cac(priv);
+
if (mwifiex_del_mgmt_ies(priv))
wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
priv->ap_11n_enabled = 0;
+ memset(&priv->bss_cfg, 0, sizeof(priv->bss_cfg));
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
@@ -1420,12 +1679,9 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
{
struct mwifiex_uap_bss_param *bss_cfg;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- u8 config_bands = 0;
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
return -1;
- if (mwifiex_set_mgmt_ies(priv, &params->beacon))
- return -1;
bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
if (!bss_cfg)
@@ -1442,6 +1698,11 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
memcpy(bss_cfg->ssid.ssid, params->ssid, params->ssid_len);
bss_cfg->ssid.ssid_len = params->ssid_len;
}
+ if (params->inactivity_timeout > 0) {
+ /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
+ bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout;
+ bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
+ }
switch (params->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
@@ -1457,33 +1718,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
- bss_cfg->channel = ieee80211_frequency_to_channel(
- params->chandef.chan->center_freq);
-
- /* Set appropriate bands */
- if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- bss_cfg->band_cfg = BAND_CONFIG_BG;
- config_bands = BAND_B | BAND_G;
-
- if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
- config_bands |= BAND_GN;
- } else {
- bss_cfg->band_cfg = BAND_CONFIG_A;
- config_bands = BAND_A;
-
- if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
- config_bands |= BAND_AN;
-
- if (params->chandef.width > NL80211_CHAN_WIDTH_40)
- config_bands |= BAND_AAC;
- }
-
- if (!((config_bands | priv->adapter->fw_bands) &
- ~priv->adapter->fw_bands))
- priv->adapter->config_bands = config_bands;
-
+ mwifiex_uap_set_channel(bss_cfg, params->chandef);
mwifiex_set_uap_rates(bss_cfg, params);
- mwifiex_send_domain_info_cmd_fw(wiphy);
if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
kfree(bss_cfg);
@@ -1506,45 +1742,29 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
mwifiex_set_wmm_params(priv, bss_cfg, params);
- if (params->inactivity_timeout > 0) {
- /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
- bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout;
- bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
+ if (mwifiex_is_11h_active(priv) &&
+ !cfg80211_chandef_dfs_required(wiphy, &params->chandef,
+ priv->bss_mode)) {
+ dev_dbg(priv->adapter->dev, "Disable 11h extensions in FW\n");
+ if (mwifiex_11h_activate(priv, false)) {
+ dev_err(priv->adapter->dev,
+ "Failed to disable 11h extensions!!");
+ return -1;
+ }
+ priv->state_11h.is_11h_active = true;
}
- if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL, true)) {
- wiphy_err(wiphy, "Failed to stop the BSS\n");
+ if (mwifiex_config_start_uap(priv, bss_cfg)) {
+ wiphy_err(wiphy, "Failed to start AP\n");
kfree(bss_cfg);
return -1;
}
- if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_BSS_PARAMS_I, bss_cfg, false)) {
- wiphy_err(wiphy, "Failed to set the SSID\n");
- kfree(bss_cfg);
+ if (mwifiex_set_mgmt_ies(priv, &params->beacon))
return -1;
- }
+ memcpy(&priv->bss_cfg, bss_cfg, sizeof(priv->bss_cfg));
kfree(bss_cfg);
-
- if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
- HostCmd_ACT_GEN_SET, 0, NULL, false)) {
- wiphy_err(wiphy, "Failed to start the BSS\n");
- return -1;
- }
-
- if (priv->sec_info.wep_enabled)
- priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
- else
- priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
-
- if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter, true))
- return -1;
-
return 0;
}
@@ -1603,15 +1823,15 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
- chan = __ieee80211_get_channel(priv->wdev->wiphy,
+ chan = __ieee80211_get_channel(priv->wdev.wiphy,
ieee80211_channel_to_frequency(bss_info.bss_chan,
band));
- bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
+ bss = cfg80211_inform_bss(priv->wdev.wiphy, chan,
CFG80211_BSS_FTYPE_UNKNOWN,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
0, ie_buf, ie_len, 0, GFP_KERNEL);
- cfg80211_put_bss(priv->wdev->wiphy, bss);
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
return 0;
@@ -1732,12 +1952,12 @@ done:
/* Find the BSS we want using available scan results */
if (mode == NL80211_IFTYPE_ADHOC)
- bss = cfg80211_get_bss(priv->wdev->wiphy, channel,
+ bss = cfg80211_get_bss(priv->wdev.wiphy, channel,
bssid, ssid, ssid_len,
WLAN_CAPABILITY_IBSS,
WLAN_CAPABILITY_IBSS);
else
- bss = cfg80211_get_bss(priv->wdev->wiphy, channel,
+ bss = cfg80211_get_bss(priv->wdev.wiphy, channel,
bssid, ssid, ssid_len,
WLAN_CAPABILITY_ESS,
WLAN_CAPABILITY_ESS);
@@ -1784,6 +2004,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret;
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
@@ -1793,11 +2014,18 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
}
- if (priv->wdev && priv->wdev->current_bss) {
+ if (priv->wdev.current_bss) {
wiphy_warn(wiphy, "%s: already connected\n", dev->name);
return -EALREADY;
}
+ if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ wiphy_err(wiphy,
+ "%s: Ignore connection. Card removed or FW in bad state\n",
+ dev->name);
+ return -EFAULT;
+ }
+
wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
(char *) sme->ssid, sme->bssid);
@@ -1844,7 +2072,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
struct cfg80211_ibss_params *params)
{
- struct wiphy *wiphy = priv->wdev->wiphy;
+ struct wiphy *wiphy = priv->wdev.wiphy;
struct mwifiex_adapter *adapter = priv->adapter;
int index = 0, i;
u8 config_bands = 0;
@@ -2169,6 +2397,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
}
+#define MWIFIEX_MAX_WQ_LEN 30
/*
* create a new virtual interface with the given name
*/
@@ -2182,7 +2411,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
struct mwifiex_private *priv;
struct net_device *dev;
void *mdev_priv;
- struct wireless_dev *wdev;
+ char dfs_cac_str[MWIFIEX_MAX_WQ_LEN], dfs_chsw_str[MWIFIEX_MAX_WQ_LEN];
if (!adapter)
return ERR_PTR(-EFAULT);
@@ -2191,20 +2420,22 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
- priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
- if (priv->bss_mode) {
+ if (adapter->curr_iface_comb.sta_intf ==
+ adapter->iface_limit.sta_intf) {
wiphy_err(wiphy,
"cannot create multiple sta/adhoc ifaces\n");
return ERR_PTR(-EINVAL);
}
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev)
- return ERR_PTR(-ENOMEM);
+ priv = mwifiex_get_unused_priv(adapter);
+ if (!priv) {
+ wiphy_err(wiphy,
+ "could not get free private struct\n");
+ return ERR_PTR(-EFAULT);
+ }
- wdev->wiphy = wiphy;
- priv->wdev = wdev;
- wdev->iftype = NL80211_IFTYPE_STATION;
+ priv->wdev.wiphy = wiphy;
+ priv->wdev.iftype = NL80211_IFTYPE_STATION;
if (type == NL80211_IFTYPE_UNSPECIFIED)
priv->bss_mode = NL80211_IFTYPE_STATION;
@@ -2219,20 +2450,22 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
break;
case NL80211_IFTYPE_AP:
- priv = adapter->priv[MWIFIEX_BSS_TYPE_UAP];
-
- if (priv->bss_mode) {
- wiphy_err(wiphy, "Can't create multiple AP interfaces");
+ if (adapter->curr_iface_comb.uap_intf ==
+ adapter->iface_limit.uap_intf) {
+ wiphy_err(wiphy,
+ "cannot create multiple AP ifaces\n");
return ERR_PTR(-EINVAL);
}
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev)
- return ERR_PTR(-ENOMEM);
+ priv = mwifiex_get_unused_priv(adapter);
+ if (!priv) {
+ wiphy_err(wiphy,
+ "could not get free private struct\n");
+ return ERR_PTR(-EFAULT);
+ }
- priv->wdev = wdev;
- wdev->wiphy = wiphy;
- wdev->iftype = NL80211_IFTYPE_AP;
+ priv->wdev.wiphy = wiphy;
+ priv->wdev.iftype = NL80211_IFTYPE_AP;
priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
@@ -2244,24 +2477,25 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
break;
case NL80211_IFTYPE_P2P_CLIENT:
- priv = adapter->priv[MWIFIEX_BSS_TYPE_P2P];
-
- if (priv->bss_mode) {
- wiphy_err(wiphy, "Can't create multiple P2P ifaces");
+ if (adapter->curr_iface_comb.p2p_intf ==
+ adapter->iface_limit.p2p_intf) {
+ wiphy_err(wiphy,
+ "cannot create multiple P2P ifaces\n");
return ERR_PTR(-EINVAL);
}
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev)
- return ERR_PTR(-ENOMEM);
-
- priv->wdev = wdev;
- wdev->wiphy = wiphy;
+ priv = mwifiex_get_unused_priv(adapter);
+ if (!priv) {
+ wiphy_err(wiphy,
+ "could not get free private struct\n");
+ return ERR_PTR(-EFAULT);
+ }
+ priv->wdev.wiphy = wiphy;
/* At start-up, wpa_supplicant tries to change the interface
* to NL80211_IFTYPE_STATION if it is not managed mode.
*/
- wdev->iftype = NL80211_IFTYPE_P2P_CLIENT;
+ priv->wdev.iftype = NL80211_IFTYPE_P2P_CLIENT;
priv->bss_mode = NL80211_IFTYPE_P2P_CLIENT;
/* Setting bss_type to P2P tells firmware that this interface
@@ -2277,8 +2511,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_num = 0;
if (mwifiex_cfg80211_init_p2p_client(priv)) {
- wdev = ERR_PTR(-EFAULT);
- goto done;
+ memset(&priv->wdev, 0, sizeof(priv->wdev));
+ priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(-EFAULT);
}
break;
@@ -2292,9 +2527,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
IEEE80211_NUM_ACS, 1);
if (!dev) {
wiphy_err(wiphy, "no memory available for netdevice\n");
+ memset(&priv->wdev, 0, sizeof(priv->wdev));
+ priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
- wdev = ERR_PTR(-ENOMEM);
- goto done;
+ return ERR_PTR(-ENOMEM);
}
mwifiex_init_priv_params(priv, dev);
@@ -2314,7 +2550,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
&wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv);
dev_net_set(dev, wiphy_net(wiphy));
- dev->ieee80211_ptr = priv->wdev;
+ dev->ieee80211_ptr = &priv->wdev;
dev->ieee80211_ptr->iftype = priv->bss_mode;
memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
@@ -2335,10 +2571,47 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
free_netdev(dev);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->netdev = NULL;
- wdev = ERR_PTR(-EFAULT);
- goto done;
+ memset(&priv->wdev, 0, sizeof(priv->wdev));
+ priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(-EFAULT);
+ }
+
+ strcpy(dfs_cac_str, "MWIFIEX_DFS_CAC");
+ strcat(dfs_cac_str, name);
+ priv->dfs_cac_workqueue = alloc_workqueue(dfs_cac_str,
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM |
+ WQ_UNBOUND, 1);
+ if (!priv->dfs_cac_workqueue) {
+ wiphy_err(wiphy, "cannot register virtual network device\n");
+ free_netdev(dev);
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ priv->netdev = NULL;
+ memset(&priv->wdev, 0, sizeof(priv->wdev));
+ priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(-ENOMEM);
}
+ INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue);
+
+ strcpy(dfs_chsw_str, "MWIFIEX_DFS_CHSW");
+ strcat(dfs_chsw_str, name);
+ priv->dfs_chan_sw_workqueue = alloc_workqueue(dfs_chsw_str,
+ WQ_HIGHPRI | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!priv->dfs_chan_sw_workqueue) {
+ wiphy_err(wiphy, "cannot register virtual network device\n");
+ free_netdev(dev);
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ priv->netdev = NULL;
+ memset(&priv->wdev, 0, sizeof(priv->wdev));
+ priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_DELAYED_WORK(&priv->dfs_chan_sw_work,
+ mwifiex_dfs_chan_sw_work_queue);
+
sema_init(&priv->async_sem, 1);
dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
@@ -2347,13 +2620,24 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mwifiex_dev_debugfs_init(priv);
#endif
-done:
- if (IS_ERR(wdev)) {
- kfree(priv->wdev);
- priv->wdev = NULL;
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ adapter->curr_iface_comb.sta_intf++;
+ break;
+ case NL80211_IFTYPE_AP:
+ adapter->curr_iface_comb.uap_intf++;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ adapter->curr_iface_comb.p2p_intf++;
+ break;
+ default:
+ wiphy_err(wiphy, "type not supported\n");
+ return ERR_PTR(-EINVAL);
}
- return wdev;
+ return &priv->wdev;
}
EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
@@ -2363,12 +2647,13 @@ EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+ struct mwifiex_adapter *adapter = priv->adapter;
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_remove(priv);
#endif
- mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
@@ -2376,16 +2661,48 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
if (wdev->netdev->reg_state == NETREG_REGISTERED)
unregister_netdevice(wdev->netdev);
+ if (priv->dfs_cac_workqueue) {
+ flush_workqueue(priv->dfs_cac_workqueue);
+ destroy_workqueue(priv->dfs_cac_workqueue);
+ priv->dfs_cac_workqueue = NULL;
+ }
+
+ if (priv->dfs_chan_sw_workqueue) {
+ flush_workqueue(priv->dfs_chan_sw_workqueue);
+ destroy_workqueue(priv->dfs_chan_sw_workqueue);
+ priv->dfs_chan_sw_workqueue = NULL;
+ }
/* Clear the priv in adapter */
priv->netdev->ieee80211_ptr = NULL;
priv->netdev = NULL;
- kfree(wdev);
- priv->wdev = NULL;
+ priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
priv->media_connected = false;
+ switch (priv->bss_mode) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ adapter->curr_iface_comb.sta_intf++;
+ break;
+ case NL80211_IFTYPE_AP:
+ adapter->curr_iface_comb.uap_intf++;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ adapter->curr_iface_comb.p2p_intf++;
+ break;
+ default:
+ dev_err(adapter->dev, "del_virtual_intf: type not supported\n");
+ break;
+ }
+
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
+ GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
+ kfree(priv->hist_data);
+
return 0;
}
EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
@@ -2421,30 +2738,16 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
}
#ifdef CONFIG_PM
-static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
- struct cfg80211_wowlan *wowlan)
+static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
+ struct cfg80211_wowlan *wowlan)
{
- struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
- struct mwifiex_ds_mef_cfg mef_cfg;
- struct mwifiex_mef_entry *mef_entry;
- int i, filt_num = 0, ret;
+ int i, filt_num = 0, ret = 0;
bool first_pat = true;
u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
const u8 ipv4_mc_mac[] = {0x33, 0x33};
const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
-
- if (!wowlan) {
- dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
- return 0;
- }
-
- if (!priv->media_connected) {
- dev_warn(adapter->dev,
- "Can not configure WOWLAN in disconnected state\n");
- return 0;
- }
+ struct mwifiex_ds_mef_cfg mef_cfg;
+ struct mwifiex_mef_entry *mef_entry;
mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
if (!mef_entry)
@@ -2459,9 +2762,9 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
for (i = 0; i < wowlan->n_patterns; i++) {
memset(byte_seq, 0, sizeof(byte_seq));
if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
- byte_seq,
- MWIFIEX_MEF_MAX_BYTESEQ)) {
- wiphy_err(wiphy, "Pattern not supported\n");
+ byte_seq,
+ MWIFIEX_MEF_MAX_BYTESEQ)) {
+ dev_err(priv->adapter->dev, "Pattern not supported\n");
kfree(mef_entry);
return -EOPNOTSUPP;
}
@@ -2485,9 +2788,9 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
mef_entry->filter[filt_num].repeat = 1;
mef_entry->filter[filt_num].offset =
- wowlan->patterns[i].pkt_offset;
+ wowlan->patterns[i].pkt_offset;
memcpy(mef_entry->filter[filt_num].byte_seq, byte_seq,
- sizeof(byte_seq));
+ sizeof(byte_seq));
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
if (first_pat)
@@ -2502,9 +2805,9 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
mef_entry->filter[filt_num].repeat = 16;
memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
- ETH_ALEN);
+ ETH_ALEN);
mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
- ETH_ALEN;
+ ETH_ALEN;
mef_entry->filter[filt_num].offset = 28;
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
if (filt_num)
@@ -2513,9 +2816,9 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
filt_num++;
mef_entry->filter[filt_num].repeat = 16;
memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
- ETH_ALEN);
+ ETH_ALEN);
mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
- ETH_ALEN;
+ ETH_ALEN;
mef_entry->filter[filt_num].offset = 56;
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
mef_entry->filter[filt_num].filt_action = TYPE_OR;
@@ -2523,16 +2826,61 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
if (!mef_cfg.criteria)
mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
- MWIFIEX_CRITERIA_UNICAST |
- MWIFIEX_CRITERIA_MULTICAST;
+ MWIFIEX_CRITERIA_UNICAST |
+ MWIFIEX_CRITERIA_MULTICAST;
ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
- HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
+ HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
kfree(mef_entry);
return ret;
}
+static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_ds_hs_cfg hs_cfg;
+ int ret = 0;
+ struct mwifiex_private *priv =
+ mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+ if (!wowlan) {
+ dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
+ return 0;
+ }
+
+ if (!priv->media_connected) {
+ dev_warn(adapter->dev,
+ "Can not configure WOWLAN in disconnected state\n");
+ return 0;
+ }
+
+ if (wowlan->n_patterns || wowlan->magic_pkt) {
+ ret = mwifiex_set_mef_filter(priv, wowlan);
+ if (ret) {
+ dev_err(adapter->dev, "Failed to set MEF filter\n");
+ return ret;
+ }
+ }
+
+ if (wowlan->disconnect) {
+ memset(&hs_cfg, 0, sizeof(hs_cfg));
+ hs_cfg.is_invoke_hostcmd = false;
+ hs_cfg.conditions = HS_CFG_COND_MAC_EVENT;
+ hs_cfg.gpio = HS_CFG_GPIO_DEF;
+ hs_cfg.gap = HS_CFG_GAP_DEF;
+ ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
+ MWIFIEX_SYNC_CMD, &hs_cfg);
+ if (ret) {
+ dev_err(adapter->dev, "Failed to set HS params\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
{
return 0;
@@ -2803,6 +3151,102 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
}
static int
+mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params)
+{
+ struct ieee_types_header *chsw_ie;
+ struct ieee80211_channel_sw_ie *channel_sw;
+ int chsw_msec;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (priv->adapter->scan_processing) {
+ dev_err(priv->adapter->dev,
+ "radar detection: scan in process...\n");
+ return -EBUSY;
+ }
+
+ if (priv->wdev.cac_started)
+ return -EBUSY;
+
+ if (cfg80211_chandef_identical(&params->chandef,
+ &priv->dfs_chandef))
+ return -EINVAL;
+
+ chsw_ie = (void *)cfg80211_find_ie(WLAN_EID_CHANNEL_SWITCH,
+ params->beacon_csa.tail,
+ params->beacon_csa.tail_len);
+ if (!chsw_ie) {
+ dev_err(priv->adapter->dev,
+ "Could not parse channel switch announcement IE\n");
+ return -EINVAL;
+ }
+
+ channel_sw = (void *)(chsw_ie + 1);
+ if (channel_sw->mode) {
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+ }
+
+ if (mwifiex_del_mgmt_ies(priv))
+ wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+
+ if (mwifiex_set_mgmt_ies(priv, &params->beacon_csa)) {
+ wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+ return -EFAULT;
+ }
+
+ memcpy(&priv->dfs_chandef, &params->chandef, sizeof(priv->dfs_chandef));
+ memcpy(&priv->beacon_after, &params->beacon_after,
+ sizeof(priv->beacon_after));
+
+ chsw_msec = max(channel_sw->count * priv->bss_cfg.beacon_period, 100);
+ queue_delayed_work(priv->dfs_chan_sw_workqueue, &priv->dfs_chan_sw_work,
+ msecs_to_jiffies(chsw_msec));
+ return 0;
+}
+
+static int
+mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_radar_params radar_params;
+
+ if (priv->adapter->scan_processing) {
+ dev_err(priv->adapter->dev,
+ "radar detection: scan already in process...\n");
+ return -EBUSY;
+ }
+
+ if (!mwifiex_is_11h_active(priv)) {
+ dev_dbg(priv->adapter->dev, "Enable 11h extensions in FW\n");
+ if (mwifiex_11h_activate(priv, true)) {
+ dev_err(priv->adapter->dev,
+ "Failed to activate 11h extensions!!");
+ return -1;
+ }
+ priv->state_11h.is_11h_active = true;
+ }
+
+ memset(&radar_params, 0, sizeof(struct mwifiex_radar_params));
+ radar_params.chandef = chandef;
+ radar_params.cac_time_ms = cac_time_ms;
+
+ memcpy(&priv->dfs_chandef, chandef, sizeof(priv->dfs_chandef));
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_CHAN_REPORT_REQUEST,
+ HostCmd_ACT_GEN_SET, 0, &radar_params, true))
+ return -1;
+
+ queue_delayed_work(priv->dfs_cac_workqueue, &priv->dfs_cac_work,
+ msecs_to_jiffies(cac_time_ms));
+ return 0;
+}
+
+static int
mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac,
struct station_parameters *params)
@@ -2866,11 +3310,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.tdls_oper = mwifiex_cfg80211_tdls_oper,
.add_station = mwifiex_cfg80211_add_station,
.change_station = mwifiex_cfg80211_change_station,
+ .start_radar_detection = mwifiex_cfg80211_start_radar_detection,
+ .channel_switch = mwifiex_cfg80211_channel_switch,
};
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT,
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
.n_patterns = MWIFIEX_MEF_MAX_FILTERS,
.pattern_min_len = 1,
.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
@@ -2964,12 +3410,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->cipher_suites = mwifiex_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
- memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
+ ether_addr_copy(wiphy->perm_addr, adapter->perm_addr);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH;
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index b8242eb2be6f..e9df8826f124 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -322,9 +322,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
return cfp;
if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
- sband = priv->wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = priv->wdev.wiphy->bands[IEEE80211_BAND_2GHZ];
else
- sband = priv->wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
if (!sband) {
dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n",
@@ -509,3 +509,21 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
return k;
}
+
+u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
+ u8 rx_rate, u8 rate_info)
+{
+ u8 rate_index = 0;
+
+ /* HT40 */
+ if ((rate_info & BIT(0)) && (rate_info & BIT(1)))
+ rate_index = MWIFIEX_RATE_INDEX_MCS0 +
+ MWIFIEX_BW20_MCS_NUM + rx_rate;
+ else if (rate_info & BIT(0)) /* HT20 */
+ rate_index = MWIFIEX_RATE_INDEX_MCS0 + rx_rate;
+ else
+ rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
+ rx_rate - 1 : rx_rate;
+
+ return rate_index;
+}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 85597200badc..c5a14ff7eb82 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -315,22 +315,19 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
return -1;
}
- if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY))
- == MWIFIEX_BSS_ROLE_STA) {
- if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl))
- /* Response is not needed for sleep
- confirm command */
- adapter->ps_state = PS_STATE_SLEEP;
- else
- adapter->ps_state = PS_STATE_SLEEP_CFM;
-
- if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
- (adapter->is_hs_configured &&
- !adapter->sleep_period.period)) {
- adapter->pm_wakeup_card_req = true;
- mwifiex_hs_activated_event(mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_STA), true);
- }
+
+ if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl))
+ /* Response is not needed for sleep confirm command */
+ adapter->ps_state = PS_STATE_SLEEP;
+ else
+ adapter->ps_state = PS_STATE_SLEEP_CFM;
+
+ if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
+ (adapter->is_hs_configured &&
+ !adapter->sleep_period.period)) {
+ adapter->pm_wakeup_card_req = true;
+ mwifiex_hs_activated_event(mwifiex_get_priv
+ (adapter, MWIFIEX_BSS_ROLE_ANY), true);
}
return ret;
@@ -450,6 +447,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
EVENT_GET_BSS_TYPE(eventcause));
if (!priv)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+
/* Clear BSS_NO_BITS from event */
eventcause &= EVENT_ID_MASK;
adapter->event_cause = eventcause;
@@ -462,12 +460,6 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
}
dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause);
- if (eventcause == EVENT_PS_SLEEP || eventcause == EVENT_PS_AWAKE) {
- /* Handle PS_SLEEP/AWAKE events on STA */
- priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
- if (!priv)
- priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
- }
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
ret = mwifiex_process_uap_event(priv);
@@ -1008,11 +1000,9 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->scan_pending_q, list) {
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
cmd_node->wait_q_enabled = false;
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
}
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1070,12 +1060,8 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->scan_pending_q, list) {
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- scan_pending_q_flags);
cmd_node->wait_q_enabled = false;
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- spin_lock_irqsave(&adapter->scan_pending_q_lock,
- scan_pending_q_flags);
}
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
@@ -1588,9 +1574,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
le16_to_cpu(hw_spec->hw_if_version),
le16_to_cpu(hw_spec->version));
- if (priv->curr_addr[0] == 0xff)
- memmove(priv->curr_addr, hw_spec->permanent_addr, ETH_ALEN);
-
+ ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr);
adapter->region_code = le16_to_cpu(hw_spec->region_code);
for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++)
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 2713f7acd35e..1fb329dc6744 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -39,111 +39,6 @@ static char *bss_modes[] = {
"P2P_DEVICE",
};
-/* size/addr for mwifiex_debug_info */
-#define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n))
-#define item_addr(n) (offsetof(struct mwifiex_debug_info, n))
-
-/* size/addr for struct mwifiex_adapter */
-#define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n))
-#define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n))
-
-struct mwifiex_debug_data {
- char name[32]; /* variable/array name */
- u32 size; /* size of the variable/array */
- size_t addr; /* address of the variable/array */
- int num; /* number of variables in an array */
-};
-
-static struct mwifiex_debug_data items[] = {
- {"int_counter", item_size(int_counter),
- item_addr(int_counter), 1},
- {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
- item_addr(packets_out[WMM_AC_VO]), 1},
- {"wmm_ac_vi", item_size(packets_out[WMM_AC_VI]),
- item_addr(packets_out[WMM_AC_VI]), 1},
- {"wmm_ac_be", item_size(packets_out[WMM_AC_BE]),
- item_addr(packets_out[WMM_AC_BE]), 1},
- {"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
- item_addr(packets_out[WMM_AC_BK]), 1},
- {"tx_buf_size", item_size(tx_buf_size),
- item_addr(tx_buf_size), 1},
- {"curr_tx_buf_size", item_size(curr_tx_buf_size),
- item_addr(curr_tx_buf_size), 1},
- {"ps_mode", item_size(ps_mode),
- item_addr(ps_mode), 1},
- {"ps_state", item_size(ps_state),
- item_addr(ps_state), 1},
- {"is_deep_sleep", item_size(is_deep_sleep),
- item_addr(is_deep_sleep), 1},
- {"wakeup_dev_req", item_size(pm_wakeup_card_req),
- item_addr(pm_wakeup_card_req), 1},
- {"wakeup_tries", item_size(pm_wakeup_fw_try),
- item_addr(pm_wakeup_fw_try), 1},
- {"hs_configured", item_size(is_hs_configured),
- item_addr(is_hs_configured), 1},
- {"hs_activated", item_size(hs_activated),
- item_addr(hs_activated), 1},
- {"num_tx_timeout", item_size(num_tx_timeout),
- item_addr(num_tx_timeout), 1},
- {"is_cmd_timedout", item_size(is_cmd_timedout),
- item_addr(is_cmd_timedout), 1},
- {"timeout_cmd_id", item_size(timeout_cmd_id),
- item_addr(timeout_cmd_id), 1},
- {"timeout_cmd_act", item_size(timeout_cmd_act),
- item_addr(timeout_cmd_act), 1},
- {"last_cmd_id", item_size(last_cmd_id),
- item_addr(last_cmd_id), DBG_CMD_NUM},
- {"last_cmd_act", item_size(last_cmd_act),
- item_addr(last_cmd_act), DBG_CMD_NUM},
- {"last_cmd_index", item_size(last_cmd_index),
- item_addr(last_cmd_index), 1},
- {"last_cmd_resp_id", item_size(last_cmd_resp_id),
- item_addr(last_cmd_resp_id), DBG_CMD_NUM},
- {"last_cmd_resp_index", item_size(last_cmd_resp_index),
- item_addr(last_cmd_resp_index), 1},
- {"last_event", item_size(last_event),
- item_addr(last_event), DBG_CMD_NUM},
- {"last_event_index", item_size(last_event_index),
- item_addr(last_event_index), 1},
- {"num_cmd_h2c_fail", item_size(num_cmd_host_to_card_failure),
- item_addr(num_cmd_host_to_card_failure), 1},
- {"num_cmd_sleep_cfm_fail",
- item_size(num_cmd_sleep_cfm_host_to_card_failure),
- item_addr(num_cmd_sleep_cfm_host_to_card_failure), 1},
- {"num_tx_h2c_fail", item_size(num_tx_host_to_card_failure),
- item_addr(num_tx_host_to_card_failure), 1},
- {"num_evt_deauth", item_size(num_event_deauth),
- item_addr(num_event_deauth), 1},
- {"num_evt_disassoc", item_size(num_event_disassoc),
- item_addr(num_event_disassoc), 1},
- {"num_evt_link_lost", item_size(num_event_link_lost),
- item_addr(num_event_link_lost), 1},
- {"num_cmd_deauth", item_size(num_cmd_deauth),
- item_addr(num_cmd_deauth), 1},
- {"num_cmd_assoc_ok", item_size(num_cmd_assoc_success),
- item_addr(num_cmd_assoc_success), 1},
- {"num_cmd_assoc_fail", item_size(num_cmd_assoc_failure),
- item_addr(num_cmd_assoc_failure), 1},
- {"cmd_sent", item_size(cmd_sent),
- item_addr(cmd_sent), 1},
- {"data_sent", item_size(data_sent),
- item_addr(data_sent), 1},
- {"cmd_resp_received", item_size(cmd_resp_received),
- item_addr(cmd_resp_received), 1},
- {"event_received", item_size(event_received),
- item_addr(event_received), 1},
-
- /* variables defined in struct mwifiex_adapter */
- {"cmd_pending", adapter_item_size(cmd_pending),
- adapter_item_addr(cmd_pending), 1},
- {"tx_pending", adapter_item_size(tx_pending),
- adapter_item_addr(tx_pending), 1},
- {"rx_pending", adapter_item_size(rx_pending),
- adapter_item_addr(rx_pending), 1},
-};
-
-static int num_of_items = ARRAY_SIZE(items);
-
/*
* Proc info file read handler.
*
@@ -297,6 +192,8 @@ mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
* - Number of FCS errors
* - Number of Tx frames
* - WEP ICV error counts
+ * - Number of received beacons
+ * - Number of missed beacons
*/
static ssize_t
mwifiex_getlog_read(struct file *file, char __user *ubuf,
@@ -333,7 +230,9 @@ mwifiex_getlog_read(struct file *file, char __user *ubuf,
"wepicverrcnt-1 %u\n"
"wepicverrcnt-2 %u\n"
"wepicverrcnt-3 %u\n"
- "wepicverrcnt-4 %u\n",
+ "wepicverrcnt-4 %u\n"
+ "bcn_rcv_cnt %u\n"
+ "bcn_miss_cnt %u\n",
stats.mcast_tx_frame,
stats.failed,
stats.retry,
@@ -349,7 +248,9 @@ mwifiex_getlog_read(struct file *file, char __user *ubuf,
stats.wep_icv_error[0],
stats.wep_icv_error[1],
stats.wep_icv_error[2],
- stats.wep_icv_error[3]);
+ stats.wep_icv_error[3],
+ stats.bcn_rcv_cnt,
+ stats.bcn_miss_cnt);
ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
@@ -360,6 +261,103 @@ free_and_exit:
return ret;
}
+/* Sysfs histogram file read handler.
+ *
+ * This function is called when the 'histogram' file is opened for reading
+ * It prints the following histogram information -
+ * - Number of histogram samples
+ * - Receive packet number of each rx_rate
+ * - Receive packet number of each snr
+ * - Receive packet number of each nosie_flr
+ * - Receive packet number of each signal streath
+ */
+static ssize_t
+mwifiex_histogram_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv =
+ (struct mwifiex_private *)file->private_data;
+ ssize_t ret;
+ struct mwifiex_histogram_data *phist_data;
+ int i, value;
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+ char *p = (char *)page;
+
+ if (!p)
+ return -ENOMEM;
+
+ if (!priv || !priv->hist_data)
+ return -EFAULT;
+ phist_data = priv->hist_data;
+
+ p += sprintf(p, "\n"
+ "total samples = %d\n",
+ atomic_read(&phist_data->num_samples));
+
+ p += sprintf(p, "rx rates (in Mbps): 0=1M 1=2M");
+ p += sprintf(p, "2=5.5M 3=11M 4=6M 5=9M 6=12M\n");
+ p += sprintf(p, "7=18M 8=24M 9=36M 10=48M 11=54M");
+ p += sprintf(p, "12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
+
+ if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) {
+ p += sprintf(p, "44-53=MCS0-9(VHT:BW20)");
+ p += sprintf(p, "54-63=MCS0-9(VHT:BW40)");
+ p += sprintf(p, "64-73=MCS0-9(VHT:BW80)\n\n");
+ } else {
+ p += sprintf(p, "\n");
+ }
+
+ for (i = 0; i < MWIFIEX_MAX_RX_RATES; i++) {
+ value = atomic_read(&phist_data->rx_rate[i]);
+ if (value)
+ p += sprintf(p, "rx_rate[%02d] = %d\n", i, value);
+ }
+
+ if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) {
+ for (i = MWIFIEX_MAX_RX_RATES; i < MWIFIEX_MAX_AC_RX_RATES;
+ i++) {
+ value = atomic_read(&phist_data->rx_rate[i]);
+ if (value)
+ p += sprintf(p, "rx_rate[%02d] = %d\n",
+ i, value);
+ }
+ }
+
+ for (i = 0; i < MWIFIEX_MAX_SNR; i++) {
+ value = atomic_read(&phist_data->snr[i]);
+ if (value)
+ p += sprintf(p, "snr[%02ddB] = %d\n", i, value);
+ }
+ for (i = 0; i < MWIFIEX_MAX_NOISE_FLR; i++) {
+ value = atomic_read(&phist_data->noise_flr[i]);
+ if (value)
+ p += sprintf(p, "noise_flr[-%02ddBm] = %d\n",
+ (int)(i-128), value);
+ }
+ for (i = 0; i < MWIFIEX_MAX_SIG_STRENGTH; i++) {
+ value = atomic_read(&phist_data->sig_str[i]);
+ if (value)
+ p += sprintf(p, "sig_strength[-%02ddBm] = %d\n",
+ i, value);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page,
+ (unsigned long)p - page);
+
+ return ret;
+}
+
+static ssize_t
+mwifiex_histogram_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv = (void *)file->private_data;
+
+ if (priv && priv->hist_data)
+ mwifiex_hist_data_reset(priv);
+ return 0;
+}
+
static struct mwifiex_debug_info info;
/*
@@ -415,13 +413,9 @@ mwifiex_debug_read(struct file *file, char __user *ubuf,
{
struct mwifiex_private *priv =
(struct mwifiex_private *) file->private_data;
- struct mwifiex_debug_data *d = &items[0];
unsigned long page = get_zeroed_page(GFP_KERNEL);
char *p = (char *) page;
ssize_t ret;
- size_t size, addr;
- long val;
- int i, j;
if (!p)
return -ENOMEM;
@@ -430,68 +424,7 @@ mwifiex_debug_read(struct file *file, char __user *ubuf,
if (ret)
goto free_and_exit;
- for (i = 0; i < num_of_items; i++) {
- p += sprintf(p, "%s=", d[i].name);
-
- size = d[i].size / d[i].num;
-
- if (i < (num_of_items - 3))
- addr = d[i].addr + (size_t) &info;
- else /* The last 3 items are struct mwifiex_adapter variables */
- addr = d[i].addr + (size_t) priv->adapter;
-
- for (j = 0; j < d[i].num; j++) {
- switch (size) {
- case 1:
- val = *((u8 *) addr);
- break;
- case 2:
- val = *((u16 *) addr);
- break;
- case 4:
- val = *((u32 *) addr);
- break;
- case 8:
- val = *((long long *) addr);
- break;
- default:
- val = -1;
- break;
- }
-
- p += sprintf(p, "%#lx ", val);
- addr += size;
- }
-
- p += sprintf(p, "\n");
- }
-
- if (info.tx_tbl_num) {
- p += sprintf(p, "Tx BA stream table:\n");
- for (i = 0; i < info.tx_tbl_num; i++)
- p += sprintf(p, "tid = %d, ra = %pM\n",
- info.tx_tbl[i].tid, info.tx_tbl[i].ra);
- }
-
- if (info.rx_tbl_num) {
- p += sprintf(p, "Rx reorder table:\n");
- for (i = 0; i < info.rx_tbl_num; i++) {
- p += sprintf(p, "tid = %d, ta = %pM, "
- "start_win = %d, "
- "win_size = %d, buffer: ",
- info.rx_tbl[i].tid,
- info.rx_tbl[i].ta,
- info.rx_tbl[i].start_win,
- info.rx_tbl[i].win_size);
-
- for (j = 0; j < info.rx_tbl[i].win_size; j++)
- p += sprintf(p, "%c ",
- info.rx_tbl[i].buffer[j] ?
- '1' : '0');
-
- p += sprintf(p, "\n");
- }
- }
+ p += mwifiex_debug_info_to_buffer(priv, p, &info);
ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
(unsigned long) p - page);
@@ -817,6 +750,7 @@ MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
MWIFIEX_DFS_FILE_OPS(regrdwr);
MWIFIEX_DFS_FILE_OPS(rdeeprom);
MWIFIEX_DFS_FILE_OPS(hscfg);
+MWIFIEX_DFS_FILE_OPS(histogram);
/*
* This function creates the debug FS directory structure and the files.
@@ -840,6 +774,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
MWIFIEX_DFS_ADD_FILE(rdeeprom);
MWIFIEX_DFS_ADD_FILE(fw_dump);
MWIFIEX_DFS_ADD_FILE(hscfg);
+ MWIFIEX_DFS_ADD_FILE(histogram);
}
/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 2269acf41ad8..88d0eade6bb1 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -32,15 +32,19 @@
#define MWIFIEX_MAX_BSS_NUM (3)
-#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
- * + 4 byte alignment
- */
+#define MWIFIEX_DMA_ALIGN_SZ 64
+#define MAX_TXPD_SZ 32
+#define INTF_HDR_ALIGN 4
+
+#define MWIFIEX_MIN_DATA_HEADER_LEN (MWIFIEX_DMA_ALIGN_SZ + INTF_HDR_ALIGN + \
+ MAX_TXPD_SZ)
#define MWIFIEX_MGMT_FRAME_HEADER_SIZE 8 /* sizeof(pkt_type)
* + sizeof(tx_control)
*/
#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
+#define MWIFIEX_MAX_TDLS_PEER_SUPPORTED 8
#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 64
#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 64
@@ -92,6 +96,20 @@
#define MWIFIEX_TDLS_MAX_FAIL_COUNT 4
#define MWIFIEX_AUTO_TDLS_IDLE_TIME 10
+/* 54M rates, index from 0 to 11 */
+#define MWIFIEX_RATE_INDEX_MCS0 12
+/* 12-27=MCS0-15(BW20) */
+#define MWIFIEX_BW20_MCS_NUM 15
+
+/* Rate index for OFDM 0 */
+#define MWIFIEX_RATE_INDEX_OFDM0 4
+
+#define MWIFIEX_MAX_STA_NUM 1
+#define MWIFIEX_MAX_UAP_NUM 1
+#define MWIFIEX_MAX_P2P_NUM 1
+
+#define MWIFIEX_A_BAND_START_FREQ 5000
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -204,4 +222,35 @@ struct mwifiex_chan_stats {
u16 cca_scan_dur;
u16 cca_busy_dur;
} __packed;
+
+#define MWIFIEX_HIST_MAX_SAMPLES 1048576
+#define MWIFIEX_MAX_RX_RATES 44
+#define MWIFIEX_MAX_AC_RX_RATES 74
+#define MWIFIEX_MAX_SNR 256
+#define MWIFIEX_MAX_NOISE_FLR 256
+#define MWIFIEX_MAX_SIG_STRENGTH 256
+
+struct mwifiex_histogram_data {
+ atomic_t rx_rate[MWIFIEX_MAX_AC_RX_RATES];
+ atomic_t snr[MWIFIEX_MAX_SNR];
+ atomic_t noise_flr[MWIFIEX_MAX_NOISE_FLR];
+ atomic_t sig_str[MWIFIEX_MAX_SIG_STRENGTH];
+ atomic_t num_samples;
+};
+
+struct mwifiex_iface_comb {
+ u8 sta_intf;
+ u8 uap_intf;
+ u8 p2p_intf;
+};
+
+struct mwifiex_radar_params {
+ struct cfg80211_chan_def *chandef;
+ u32 cac_time_ms;
+} __packed;
+
+struct mwifiex_11h_intf_state {
+ bool is_11h_enabled;
+ bool is_11h_active;
+} __packed;
#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c
index 04e56b5fc535..65d8d6d4b6ba 100644
--- a/drivers/net/wireless/mwifiex/ethtool.c
+++ b/drivers/net/wireless/mwifiex/ethtool.c
@@ -76,7 +76,9 @@ mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
dump->flag = adapter->curr_mem_idx;
dump->version = 1;
- if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) {
+ if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
+ dump->len = adapter->drv_info_size;
+ } else if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) {
entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
dump->len = entry->mem_size;
} else {
@@ -98,6 +100,13 @@ mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
if (!adapter->if_ops.fw_dump)
return -ENOTSUPP;
+ if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
+ if (!adapter->drv_info_dump)
+ return -EFAULT;
+ memcpy(p, adapter->drv_info_dump, adapter->drv_info_size);
+ return 0;
+ }
+
if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
dev_err(adapter->dev, "firmware dump in progress!!\n");
return -EBUSY;
@@ -125,6 +134,11 @@ static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val)
if (!adapter->if_ops.fw_dump)
return -ENOTSUPP;
+ if (val->flag == MWIFIEX_DRV_INFO_IDX) {
+ adapter->curr_mem_idx = MWIFIEX_DRV_INFO_IDX;
+ return 0;
+ }
+
if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
dev_err(adapter->dev, "firmware dump in progress!!\n");
return -EBUSY;
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index fb5936eb82e3..df553e86a0ad 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -158,6 +158,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
#define TLV_TYPE_BSS_SCAN_RSP (PROPRIETARY_TLV_BASE_ID + 86)
#define TLV_TYPE_BSS_SCAN_INFO (PROPRIETARY_TLV_BASE_ID + 87)
+#define TLV_TYPE_CHANRPT_11H_BASIC (PROPRIETARY_TLV_BASE_ID + 91)
#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
@@ -233,6 +234,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
#define ISALLOWED_CHANWIDTH40(ht_param) (ht_param & BIT(2))
+#define GETSUPP_TXBASTREAMS(Dot11nDevCap) ((Dot11nDevCap >> 18) & 0xF)
/* httxcfg bitmap
* 0 reserved
@@ -335,6 +337,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf
#define HostCmd_CMD_11N_DELBA 0x00d0
#define HostCmd_CMD_RECONFIGURE_TX_BUFF 0x00d9
+#define HostCmd_CMD_CHAN_REPORT_REQUEST 0x00dd
#define HostCmd_CMD_AMSDU_AGGR_CTRL 0x00df
#define HostCmd_CMD_TXPWR_CFG 0x00d1
#define HostCmd_CMD_TX_RATE_CFG 0x00d6
@@ -492,6 +495,8 @@ enum P2P_MODES {
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
#define EVENT_TDLS_GENERIC_EVENT 0x00000052
+#define EVENT_RADAR_DETECTED 0x00000053
+#define EVENT_CHANNEL_REPORT_RDY 0x00000054
#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_TX_STATUS_REPORT 0x00000074
@@ -529,6 +534,8 @@ enum P2P_MODES {
#define MWIFIEX_FW_V15 15
+#define MWIFIEX_MASTER_RADAR_DET_MASK BIT(1)
+
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -1076,6 +1083,8 @@ struct host_cmd_ds_802_11_get_log {
__le32 tx_frame;
__le32 reserved;
__le32 wep_icv_err_cnt[4];
+ __le32 bcn_rcv_cnt;
+ __le32 bcn_miss_cnt;
};
/* Enumeration for rate format */
@@ -1213,6 +1222,24 @@ struct host_cmd_ds_tdls_oper {
u8 peer_mac[ETH_ALEN];
} __packed;
+struct mwifiex_chan_desc {
+ __le16 start_freq;
+ u8 chan_width;
+ u8 chan_num;
+} __packed;
+
+struct host_cmd_ds_chan_rpt_req {
+ struct mwifiex_chan_desc chan_desc;
+ __le32 msec_dwell_time;
+} __packed;
+
+struct host_cmd_ds_chan_rpt_event {
+ __le32 result;
+ __le64 start_tsf;
+ __le32 duration;
+ u8 tlvbuf[0];
+} __packed;
+
struct mwifiex_fixed_bcn_param {
__le64 timestamp;
__le16 beacon_period;
@@ -1789,6 +1816,39 @@ struct mwifiex_ie_types_rssi_threshold {
u8 evt_freq;
} __packed;
+#define MWIFIEX_DFS_REC_HDR_LEN 8
+#define MWIFIEX_DFS_REC_HDR_NUM 10
+#define MWIFIEX_BIN_COUNTER_LEN 7
+
+struct mwifiex_radar_det_event {
+ __le32 detect_count;
+ u8 reg_domain; /*1=fcc, 2=etsi, 3=mic*/
+ u8 det_type; /*0=none, 1=pw(chirp), 2=pri(radar)*/
+ __le16 pw_chirp_type;
+ u8 pw_chirp_idx;
+ u8 pw_value;
+ u8 pri_radar_type;
+ u8 pri_bincnt;
+ u8 bin_counter[MWIFIEX_BIN_COUNTER_LEN];
+ u8 num_dfs_records;
+ u8 dfs_record_hdr[MWIFIEX_DFS_REC_HDR_NUM][MWIFIEX_DFS_REC_HDR_LEN];
+ __le32 passed;
+} __packed;
+
+struct meas_rpt_map {
+ u8 rssi:3;
+ u8 unmeasured:1;
+ u8 radar:1;
+ u8 unidentified_sig:1;
+ u8 ofdm_preamble:1;
+ u8 bss:1;
+} __packed;
+
+struct mwifiex_ie_types_chan_rpt_data {
+ struct mwifiex_ie_types_header header;
+ struct meas_rpt_map map;
+} __packed;
+
struct host_cmd_ds_802_11_subsc_evt {
__le16 action;
__le16 events;
@@ -1901,6 +1961,7 @@ struct host_cmd_ds_command {
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
struct host_cmd_ds_tdls_oper tdls_oper;
+ struct host_cmd_ds_chan_rpt_req chan_rpt_req;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index b933794758b7..f3b6ed249403 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -317,27 +317,27 @@ done:
return ret;
}
-/* This function parses different IEs-tail IEs, beacon IEs, probe response IEs,
- * association response IEs from cfg80211_ap_settings function and sets these IE
- * to FW.
+/* This function parses head and tail IEs, from cfg80211_beacon_data and sets
+ * these IE to FW.
*/
-int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
- struct cfg80211_beacon_data *info)
+static int mwifiex_uap_set_head_tail_ies(struct mwifiex_private *priv,
+ struct cfg80211_beacon_data *info)
{
struct mwifiex_ie *gen_ie;
- struct ieee_types_header *rsn_ie, *wpa_ie = NULL;
- u16 rsn_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
+ struct ieee_types_header *rsn_ie = NULL, *wpa_ie = NULL;
+ struct ieee_types_header *chsw_ie = NULL;
+ u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
const u8 *vendor_ie;
- if (info->tail && info->tail_len) {
- gen_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!gen_ie)
- return -ENOMEM;
- gen_ie->ie_index = cpu_to_le16(rsn_idx);
- gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
- MGMT_MASK_PROBE_RESP |
- MGMT_MASK_ASSOC_RESP);
+ gen_ie = kzalloc(sizeof(*gen_ie), GFP_KERNEL);
+ if (!gen_ie)
+ return -ENOMEM;
+ gen_ie->ie_index = cpu_to_le16(gen_idx);
+ gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
+ MGMT_MASK_PROBE_RESP |
+ MGMT_MASK_ASSOC_RESP);
+ if (info->tail && info->tail_len) {
rsn_ie = (void *)cfg80211_find_ie(WLAN_EID_RSN,
info->tail, info->tail_len);
if (rsn_ie) {
@@ -358,19 +358,41 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
gen_ie->ie_length = cpu_to_le16(ie_len);
}
- if (rsn_ie || wpa_ie) {
- if (mwifiex_update_uap_custom_ie(priv, gen_ie, &rsn_idx,
- NULL, NULL,
- NULL, NULL)) {
- kfree(gen_ie);
- return -1;
- }
- priv->rsn_idx = rsn_idx;
+ chsw_ie = (void *)cfg80211_find_ie(WLAN_EID_CHANNEL_SWITCH,
+ info->tail, info->tail_len);
+ if (chsw_ie) {
+ memcpy(gen_ie->ie_buffer + ie_len,
+ chsw_ie, chsw_ie->len + 2);
+ ie_len += chsw_ie->len + 2;
+ gen_ie->ie_length = cpu_to_le16(ie_len);
}
+ }
- kfree(gen_ie);
+ if (rsn_ie || wpa_ie || chsw_ie) {
+ if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL,
+ NULL, NULL, NULL)) {
+ kfree(gen_ie);
+ return -1;
+ }
+ priv->gen_idx = gen_idx;
}
+ kfree(gen_ie);
+ return 0;
+}
+
+/* This function parses different IEs-head & tail IEs, beacon IEs,
+ * probe response IEs, association response IEs from cfg80211_ap_settings
+ * function and sets these IE to FW.
+ */
+int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
+ struct cfg80211_beacon_data *info)
+{
+ int ret;
+
+ ret = mwifiex_uap_set_head_tail_ies(priv, info);
+ return ret;
+
return mwifiex_set_mgmt_beacon_data_ies(priv, info);
}
@@ -378,25 +400,25 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
{
struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
- struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL;
+ struct mwifiex_ie *ar_ie = NULL, *gen_ie = NULL;
int ret = 0;
- if (priv->rsn_idx != MWIFIEX_AUTO_IDX_MASK) {
- rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!rsn_ie)
+ if (priv->gen_idx != MWIFIEX_AUTO_IDX_MASK) {
+ gen_ie = kmalloc(sizeof(*gen_ie), GFP_KERNEL);
+ if (!gen_ie)
return -ENOMEM;
- rsn_ie->ie_index = cpu_to_le16(priv->rsn_idx);
- rsn_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
- rsn_ie->ie_length = 0;
- if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &priv->rsn_idx,
+ gen_ie->ie_index = cpu_to_le16(priv->gen_idx);
+ gen_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ gen_ie->ie_length = 0;
+ if (mwifiex_update_uap_custom_ie(priv, gen_ie, &priv->gen_idx,
NULL, &priv->proberesp_idx,
NULL, &priv->assocresp_idx)) {
ret = -1;
goto done;
}
- priv->rsn_idx = MWIFIEX_AUTO_IDX_MASK;
+ priv->gen_idx = MWIFIEX_AUTO_IDX_MASK;
}
if (priv->beacon_idx != MWIFIEX_AUTO_IDX_MASK) {
@@ -440,7 +462,6 @@ done:
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
- kfree(rsn_ie);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 520ad4a3018b..b77ba743e1c4 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -52,6 +52,18 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
return 0;
}
+static void wakeup_timer_fn(unsigned long data)
+{
+ struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data;
+
+ dev_err(adapter->dev, "Firmware wakeup failed\n");
+ adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
+ mwifiex_cancel_all_pending_cmd(adapter);
+
+ if (adapter->if_ops.card_reset)
+ adapter->if_ops.card_reset(adapter);
+}
+
/*
* This function initializes the private structure and sets default
* values to the members.
@@ -140,6 +152,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->check_tdls_tx = false;
memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
+ mwifiex_init_11h_params(priv);
+
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -282,9 +296,16 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
- adapter->ext_scan = true;
+ adapter->ext_scan = false;
adapter->key_api_major_ver = 0;
adapter->key_api_minor_ver = 0;
+ memset(adapter->perm_addr, 0xff, ETH_ALEN);
+ adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
+ adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
+ adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
+
+ setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
+ (unsigned long)adapter);
}
/*
@@ -391,7 +412,10 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
return;
}
+ del_timer(&adapter->wakeup_timer);
mwifiex_cancel_all_pending_cmd(adapter);
+ wake_up_interruptible(&adapter->cmd_wait_q.wait);
+ wake_up_interruptible(&adapter->hs_activate_wait_q);
/* Free lock variables */
mwifiex_free_lock_list(adapter);
@@ -411,6 +435,11 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
entry->mem_size = 0;
}
+ if (adapter->drv_info_dump) {
+ vfree(adapter->drv_info_dump);
+ adapter->drv_info_size = 0;
+ }
+
if (adapter->sleep_cfm)
dev_kfree_skb_any(adapter->sleep_cfm);
}
@@ -528,7 +557,8 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
- ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta);
+ ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
+ true);
if (ret == -1)
return -1;
@@ -653,6 +683,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
priv = adapter->priv[i];
mwifiex_clean_auto_tdls(priv);
+ mwifiex_abort_cac(priv);
mwifiex_clean_txrx(priv);
mwifiex_delete_bss_prio_tbl(priv);
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 0847f3e07ab7..d2b05c3a96da 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -137,6 +137,8 @@ struct mwifiex_ds_get_stats {
u32 fcs_error;
u32 tx_frame;
u32 wep_icv_error[4];
+ u32 bcn_rcv_cnt;
+ u32 bcn_miss_cnt;
};
#define MWIFIEX_MAX_VER_STR_LEN 128
@@ -180,7 +182,11 @@ struct mwifiex_ds_tx_ba_stream_tbl {
u8 amsdu;
};
-#define DBG_CMD_NUM 5
+#define DBG_CMD_NUM 5
+
+struct tdls_peer_info {
+ u8 peer_addr[ETH_ALEN];
+};
struct mwifiex_debug_info {
u32 int_counter;
@@ -193,6 +199,9 @@ struct mwifiex_debug_info {
u32 rx_tbl_num;
struct mwifiex_ds_rx_reorder_tbl rx_tbl
[MWIFIEX_MAX_RX_BASTREAM_SUPPORTED];
+ u32 tdls_peer_num;
+ struct tdls_peer_info tdls_list
+ [MWIFIEX_MAX_TDLS_PEER_SUPPORTED];
u16 ps_mode;
u32 ps_state;
u8 is_deep_sleep;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index d4d2223d1f31..7e74b4fccddd 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -83,9 +83,8 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
}
mwifiex_init_lock_list(adapter);
- init_timer(&adapter->cmd_timer);
- adapter->cmd_timer.function = mwifiex_cmd_timeout_func;
- adapter->cmd_timer.data = (unsigned long) adapter;
+ setup_timer(&adapter->cmd_timer, mwifiex_cmd_timeout_func,
+ (unsigned long)adapter);
return 0;
@@ -237,6 +236,7 @@ process_start:
(is_command_pending(adapter) ||
!mwifiex_wmm_lists_empty(adapter))) {
adapter->pm_wakeup_fw_try = true;
+ mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
adapter->if_ops.wakeup(adapter);
continue;
}
@@ -244,6 +244,7 @@ process_start:
if (IS_CARD_RX_RCVD(adapter)) {
adapter->data_received = false;
adapter->pm_wakeup_fw_try = false;
+ del_timer_sync(&adapter->wakeup_timer);
if (adapter->ps_state == PS_STATE_SLEEP)
adapter->ps_state = PS_STATE_AWAKE;
} else {
@@ -511,8 +512,7 @@ err_dnld_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
- if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
- (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
adapter->init_wait_q_woken = false;
@@ -562,7 +562,8 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
static int
mwifiex_open(struct net_device *dev)
{
- netif_tx_start_all_queues(dev);
+ netif_carrier_off(dev);
+
return 0;
}
@@ -801,6 +802,114 @@ mwifiex_tx_timeout(struct net_device *dev)
}
}
+void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
+{
+ void *p;
+ char drv_version[64];
+ struct usb_card_rec *cardp;
+ struct sdio_mmc_card *sdio_card;
+ struct mwifiex_private *priv;
+ int i, idx;
+ struct netdev_queue *txq;
+ struct mwifiex_debug_info *debug_info;
+
+ if (adapter->drv_info_dump) {
+ vfree(adapter->drv_info_dump);
+ adapter->drv_info_size = 0;
+ }
+
+ dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n");
+
+ adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
+
+ if (!adapter->drv_info_dump)
+ return;
+
+ p = (char *)(adapter->drv_info_dump);
+ p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
+
+ mwifiex_drv_get_driver_version(adapter, drv_version,
+ sizeof(drv_version) - 1);
+ p += sprintf(p, "driver_version = %s\n", drv_version);
+
+ if (adapter->iface_type == MWIFIEX_USB) {
+ cardp = (struct usb_card_rec *)adapter->card;
+ p += sprintf(p, "tx_cmd_urb_pending = %d\n",
+ atomic_read(&cardp->tx_cmd_urb_pending));
+ p += sprintf(p, "tx_data_urb_pending = %d\n",
+ atomic_read(&cardp->tx_data_urb_pending));
+ p += sprintf(p, "rx_cmd_urb_pending = %d\n",
+ atomic_read(&cardp->rx_cmd_urb_pending));
+ p += sprintf(p, "rx_data_urb_pending = %d\n",
+ atomic_read(&cardp->rx_data_urb_pending));
+ }
+
+ p += sprintf(p, "tx_pending = %d\n",
+ atomic_read(&adapter->tx_pending));
+ p += sprintf(p, "rx_pending = %d\n",
+ atomic_read(&adapter->rx_pending));
+
+ if (adapter->iface_type == MWIFIEX_SDIO) {
+ sdio_card = (struct sdio_mmc_card *)adapter->card;
+ p += sprintf(p, "\nmp_rd_bitmap=0x%x curr_rd_port=0x%x\n",
+ sdio_card->mp_rd_bitmap, sdio_card->curr_rd_port);
+ p += sprintf(p, "mp_wr_bitmap=0x%x curr_wr_port=0x%x\n",
+ sdio_card->mp_wr_bitmap, sdio_card->curr_wr_port);
+ }
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (!adapter->priv[i] || !adapter->priv[i]->netdev)
+ continue;
+ priv = adapter->priv[i];
+ p += sprintf(p, "\n[interface : \"%s\"]\n",
+ priv->netdev->name);
+ p += sprintf(p, "wmm_tx_pending[0] = %d\n",
+ atomic_read(&priv->wmm_tx_pending[0]));
+ p += sprintf(p, "wmm_tx_pending[1] = %d\n",
+ atomic_read(&priv->wmm_tx_pending[1]));
+ p += sprintf(p, "wmm_tx_pending[2] = %d\n",
+ atomic_read(&priv->wmm_tx_pending[2]));
+ p += sprintf(p, "wmm_tx_pending[3] = %d\n",
+ atomic_read(&priv->wmm_tx_pending[3]));
+ p += sprintf(p, "media_state=\"%s\"\n", !priv->media_connected ?
+ "Disconnected" : "Connected");
+ p += sprintf(p, "carrier %s\n", (netif_carrier_ok(priv->netdev)
+ ? "on" : "off"));
+ for (idx = 0; idx < priv->netdev->num_tx_queues; idx++) {
+ txq = netdev_get_tx_queue(priv->netdev, idx);
+ p += sprintf(p, "tx queue %d:%s ", idx,
+ netif_tx_queue_stopped(txq) ?
+ "stopped" : "started");
+ }
+ p += sprintf(p, "\n%s: num_tx_timeout = %d\n",
+ priv->netdev->name, priv->num_tx_timeout);
+ }
+
+ if (adapter->iface_type == MWIFIEX_SDIO) {
+ p += sprintf(p, "\n=== SDIO register DUMP===\n");
+ if (adapter->if_ops.reg_dump)
+ p += adapter->if_ops.reg_dump(adapter, p);
+ }
+
+ p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n");
+ debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
+ if (debug_info) {
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (!adapter->priv[i] || !adapter->priv[i]->netdev)
+ continue;
+ priv = adapter->priv[i];
+ mwifiex_get_debug_info(priv, debug_info);
+ p += mwifiex_debug_info_to_buffer(priv, p, debug_info);
+ break;
+ }
+ kfree(debug_info);
+ }
+
+ adapter->drv_info_size = p - adapter->drv_info_dump;
+ dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n");
+}
+EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info);
+
/*
* CFG802.11 network device handler for statistics retrieval.
*/
@@ -847,26 +956,34 @@ static const struct net_device_ops mwifiex_netdev_ops = {
* - Nick name : Set to null
* - Number of Tx timeout : Set to 0
* - Device address : Set to current address
+ * - Rx histogram statistc : Set to 0
*
* In addition, the CFG80211 work queue is also created.
*/
void mwifiex_init_priv_params(struct mwifiex_private *priv,
- struct net_device *dev)
+ struct net_device *dev)
{
dev->netdev_ops = &mwifiex_netdev_ops;
dev->destructor = free_netdev;
/* Initialize private structure */
priv->current_key_index = 0;
priv->media_connected = false;
- memset(&priv->nick_name, 0, sizeof(priv->nick_name));
memset(priv->mgmt_ie, 0,
sizeof(struct mwifiex_ie) * MAX_MGMT_IE_INDEX);
priv->beacon_idx = MWIFIEX_AUTO_IDX_MASK;
priv->proberesp_idx = MWIFIEX_AUTO_IDX_MASK;
priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK;
- priv->rsn_idx = MWIFIEX_AUTO_IDX_MASK;
+ priv->gen_idx = MWIFIEX_AUTO_IDX_MASK;
priv->num_tx_timeout = 0;
+ ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr);
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
+ GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ priv->hist_data = kmalloc(sizeof(*priv->hist_data), GFP_KERNEL);
+ if (priv->hist_data)
+ mwifiex_hist_data_reset(priv);
+ }
}
/*
@@ -1000,8 +1117,7 @@ err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
- if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
- (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
adapter->init_wait_q_woken = false;
@@ -1052,6 +1168,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
+
/* Stop data */
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
@@ -1086,16 +1204,15 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
continue;
rtnl_lock();
- if (priv->wdev && priv->netdev)
- mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
+ if (priv->netdev &&
+ priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
+ mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
rtnl_unlock();
}
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
- mwifiex_terminate_workqueue(adapter);
-
/* Unregister device */
dev_dbg(adapter->dev, "info: unregister device\n");
if (adapter->if_ops.unregister_dev)
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index e66993cb5daf..f0a6af179af0 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -41,6 +41,8 @@
#include "util.h"
#include "fw.h"
#include "pcie.h"
+#include "usb.h"
+#include "sdio.h"
extern const char driver_version[];
@@ -136,6 +138,8 @@ enum {
/* Threshold for tx_timeout_cnt before we trigger a card reset */
#define TX_TIMEOUT_THRESHOLD 6
+#define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000
+
struct mwifiex_dbg {
u32 num_cmd_host_to_card_failure;
u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -161,7 +165,6 @@ struct mwifiex_dbg {
enum MWIFIEX_HARDWARE_STATUS {
MWIFIEX_HW_STATUS_READY,
MWIFIEX_HW_STATUS_INITIALIZING,
- MWIFIEX_HW_STATUS_FW_READY,
MWIFIEX_HW_STATUS_INIT_DONE,
MWIFIEX_HW_STATUS_RESET,
MWIFIEX_HW_STATUS_CLOSING,
@@ -413,6 +416,7 @@ struct mwifiex_roc_cfg {
};
#define MWIFIEX_FW_DUMP_IDX 0xff
+#define MWIFIEX_DRV_INFO_IDX 20
#define FW_DUMP_MAX_NAME_LEN 8
#define FW_DUMP_HOST_READY 0xEE
#define FW_DUMP_DONE 0xFF
@@ -543,13 +547,12 @@ struct mwifiex_private {
u32 curr_bcn_size;
/* spin lock for beacon buffer */
spinlock_t curr_bcn_buf_lock;
- struct wireless_dev *wdev;
+ struct wireless_dev wdev;
struct mwifiex_chan_freq_power cfp;
char version_str[128];
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_dev_dir;
#endif
- u8 nick_name[16];
u16 current_key_index;
struct semaphore async_sem;
struct cfg80211_scan_request *scan_request;
@@ -564,7 +567,7 @@ struct mwifiex_private {
u16 beacon_idx;
u16 proberesp_idx;
u16 assocresp_idx;
- u16 rsn_idx;
+ u16 gen_idx;
u8 ap_11n_enabled;
u8 ap_11ac_enabled;
u32 mgmt_frame_mask;
@@ -574,6 +577,7 @@ struct mwifiex_private {
unsigned long csa_expire_time;
u8 del_list_idx;
bool hs2_enabled;
+ struct mwifiex_uap_bss_param bss_cfg;
struct station_parameters *sta_params;
struct sk_buff_head tdls_txq;
u8 check_tdls_tx;
@@ -582,6 +586,16 @@ struct mwifiex_private {
struct idr ack_status_frames;
/* spin lock for ack status */
spinlock_t ack_status_lock;
+ /** rx histogram data */
+ struct mwifiex_histogram_data *hist_data;
+ struct cfg80211_chan_def dfs_chandef;
+ struct workqueue_struct *dfs_cac_workqueue;
+ struct delayed_work dfs_cac_work;
+ struct timer_list dfs_chan_switch_timer;
+ struct workqueue_struct *dfs_chan_sw_workqueue;
+ struct delayed_work dfs_chan_sw_work;
+ struct cfg80211_beacon_data beacon_after;
+ struct mwifiex_11h_intf_state state_11h;
};
enum mwifiex_ba_status {
@@ -717,6 +731,7 @@ struct mwifiex_if_ops {
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
void (*fw_dump)(struct mwifiex_adapter *);
+ int (*reg_dump)(struct mwifiex_adapter *, char *);
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
void (*iface_work)(struct work_struct *work);
void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
@@ -724,6 +739,8 @@ struct mwifiex_if_ops {
struct mwifiex_adapter {
u8 iface_type;
+ struct mwifiex_iface_comb iface_limit;
+ struct mwifiex_iface_comb curr_iface_comb;
struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
u8 priv_num;
const struct firmware *firmware;
@@ -731,6 +748,7 @@ struct mwifiex_adapter {
int winner;
struct device *dev;
struct wiphy *wiphy;
+ u8 perm_addr[ETH_ALEN];
bool surprise_removed;
u32 fw_release_number;
u16 init_wait_q_woken;
@@ -744,6 +762,8 @@ struct mwifiex_adapter {
struct work_struct main_work;
struct workqueue_struct *rx_workqueue;
struct work_struct rx_work;
+ struct workqueue_struct *dfs_workqueue;
+ struct work_struct dfs_work;
bool rx_work_enabled;
bool rx_processing;
bool delay_main_work;
@@ -823,6 +843,7 @@ struct mwifiex_adapter {
u16 gen_null_pkt;
u16 pps_uapsd_mode;
u32 pm_wakeup_fw_try;
+ struct timer_list wakeup_timer;
u8 is_hs_configured;
struct mwifiex_hs_config_param hs_cfg;
u8 hs_activated;
@@ -865,6 +886,8 @@ struct mwifiex_adapter {
struct memory_type_mapping *mem_type_mapping_tbl;
u8 num_mem_types;
u8 curr_mem_idx;
+ void *drv_info_dump;
+ u32 drv_info_size;
bool scan_chan_gap_enabled;
struct sk_buff_head rx_data_q;
struct mwifiex_chan_stats *chan_stats;
@@ -979,7 +1002,7 @@ void mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv,
const u8 *ra_addr);
void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
-int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
+int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta, bool init);
int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
struct mwifiex_scan_cmd_config *scan_cfg);
void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
@@ -1140,6 +1163,25 @@ mwifiex_get_priv(struct mwifiex_adapter *adapter,
}
/*
+ * This function returns the first available unused private structure pointer.
+ */
+static inline struct mwifiex_private *
+mwifiex_get_unused_priv(struct mwifiex_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i]) {
+ if (adapter->priv[i]->bss_mode ==
+ NL80211_IFTYPE_UNSPECIFIED)
+ break;
+ }
+ }
+
+ return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
+}
+
+/*
* This function returns the driver private structure of a network device.
*/
static inline struct mwifiex_private *
@@ -1230,8 +1272,6 @@ int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
struct ieee80211_channel *chan,
unsigned int duration);
-int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
-
int mwifiex_get_stats_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *log);
@@ -1291,9 +1331,17 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *data);
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
u8 *mwifiex_11d_code_2_region(u8 code);
+void mwifiex_uap_set_channel(struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_chan_def chandef);
+int mwifiex_config_start_uap(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_cfg);
void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
struct mwifiex_sta_node *node);
+void mwifiex_init_11h_params(struct mwifiex_private *priv);
+int mwifiex_is_11h_active(struct mwifiex_private *priv);
+int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag);
+
void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
@@ -1324,6 +1372,8 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
u8 *buf, int len);
int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action);
int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac);
+int mwifiex_get_tdls_list(struct mwifiex_private *priv,
+ struct tdls_peer_info *buf);
void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
@@ -1340,6 +1390,11 @@ void mwifiex_check_auto_tdls(unsigned long context);
void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf);
+int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
+ struct sk_buff *skb);
void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
void *event_body);
@@ -1347,6 +1402,21 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
struct sk_buff *
mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
struct sk_buff *skb, u8 flag, u64 *cookie);
+void mwifiex_dfs_cac_work_queue(struct work_struct *work);
+void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work);
+void mwifiex_abort_cac(struct mwifiex_private *priv);
+int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
+ struct sk_buff *skb);
+
+void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr,
+ s8 nflr);
+void mwifiex_hist_data_reset(struct mwifiex_private *priv);
+void mwifiex_hist_data_add(struct mwifiex_private *priv,
+ u8 rx_rate, s8 snr, s8 nflr);
+u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
+ u8 rx_rate, u8 ht_info);
+
+void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index c3a20f94f3c9..a5828da59365 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -204,6 +204,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->pcie.blksz_fw_dl = data->blksz_fw_dl;
card->pcie.tx_buf_size = data->tx_buf_size;
card->pcie.supports_fw_dump = data->supports_fw_dump;
+ card->pcie.can_ext_scan = data->can_ext_scan;
}
if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
@@ -1952,8 +1953,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
- dev_dbg(adapter->dev, "info:\nFW download over, size %d bytes\n",
- offset);
+ dev_notice(adapter->dev,
+ "info: FW download over, size %d bytes\n", offset);
ret = 0;
@@ -2064,6 +2065,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
* state until cookie is set */
adapter->ps_state = PS_STATE_AWAKE;
adapter->pm_wakeup_fw_try = false;
+ del_timer(&adapter->wakeup_timer);
}
}
}
@@ -2562,6 +2564,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
strcpy(adapter->fw_name, card->pcie.firmware);
+ adapter->ext_scan = card->pcie.can_ext_scan;
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 200e8b0cb582..666d40e9dbc3 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -206,6 +206,7 @@ struct mwifiex_pcie_device {
u16 blksz_fw_dl;
u16 tx_buf_size;
bool supports_fw_dump;
+ bool can_ext_scan;
};
static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
@@ -214,6 +215,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.supports_fw_dump = false,
+ .can_ext_scan = true,
};
static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
@@ -222,6 +224,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
.supports_fw_dump = true,
+ .can_ext_scan = true,
};
struct mwifiex_evt_buf_desc {
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 984a7a4fa93b..0ffdb7c5afd2 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -496,10 +496,10 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
- if (!priv->wdev->wiphy->bands[band])
+ if (!priv->wdev.wiphy->bands[band])
continue;
- sband = priv->wdev->wiphy->bands[band];
+ sband = priv->wdev.wiphy->bands[band];
for (i = 0; (i < sband->n_channels) ; i++) {
ch = &sband->channels[i];
@@ -1429,6 +1429,12 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
return -EBUSY;
}
+ if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ dev_err(adapter->dev,
+ "Ignore scan. Card removed or firmware in bad state\n");
+ return -EFAULT;
+ }
+
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = true;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
@@ -1727,10 +1733,10 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
freq = cfp ? cfp->freq : 0;
- chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
+ chan = ieee80211_get_channel(priv->wdev.wiphy, freq);
if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
- bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ bss = cfg80211_inform_bss(priv->wdev.wiphy,
chan, CFG80211_BSS_FTYPE_UNKNOWN,
bssid, timestamp,
cap_info_bitmap, beacon_period,
@@ -1742,7 +1748,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
!memcmp(bssid, priv->curr_bss_params.bss_descriptor
.mac_address, ETH_ALEN))
mwifiex_update_curr_bss_params(priv, bss);
- cfg80211_put_bss(priv->wdev->wiphy, bss);
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
}
} else {
dev_dbg(adapter->dev, "missing BSS channel IE\n");
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 933dae137850..91e36cda9543 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -107,6 +107,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
card->supports_fw_dump = data->supports_fw_dump;
card->auto_tdls = data->auto_tdls;
+ card->can_ext_scan = data->can_ext_scan;
}
sdio_claim_host(func);
@@ -282,6 +283,9 @@ static int mwifiex_sdio_suspend(struct device *dev)
#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d)
/* Device ID for SD8887 */
#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135)
+/* Device ID for SD8801 */
+#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
+
/* WLAN IDs */
static const struct sdio_device_id mwifiex_ids[] = {
@@ -295,6 +299,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long) &mwifiex_sdio_sd8897},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887),
.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8801},
{},
};
@@ -986,8 +992,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
- dev_dbg(adapter->dev, "info: FW download over, size %d bytes\n",
- offset);
+ dev_notice(adapter->dev,
+ "info: FW download over, size %d bytes\n", offset);
ret = 0;
done:
@@ -1882,6 +1888,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
}
adapter->auto_tdls = card->auto_tdls;
+ adapter->ext_scan = card->can_ext_scan;
return ret;
}
@@ -1958,8 +1965,8 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
pr_err("Resetting card...\n");
mmc_remove_host(target);
- /* 20ms delay is based on experiment with sdhci controller */
- mdelay(20);
+ /* 200ms delay is based on experiment with sdhci controller */
+ mdelay(200);
target->rescan_entered = 0; /* rescan non-removable cards */
mmc_add_host(target);
}
@@ -2023,6 +2030,8 @@ static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
u32 memory_size;
static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
+ mwifiex_dump_drv_info(adapter);
+
if (!card->supports_fw_dump)
return;
@@ -2166,6 +2175,99 @@ static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
schedule_work(&adapter->iface_work);
}
+/* Function to dump SDIO function registers and SDIO scratch registers in case
+ * of FW crash
+ */
+static int
+mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
+{
+ char *p = drv_buf;
+ struct sdio_mmc_card *cardp = adapter->card;
+ int ret = 0;
+ u8 count, func, data, index = 0, size = 0;
+ u8 reg, reg_start, reg_end;
+ char buf[256], *ptr;
+
+ if (!p)
+ return 0;
+
+ dev_info(adapter->dev, "SDIO register DUMP START\n");
+
+ mwifiex_pm_wakeup_card(adapter);
+
+ sdio_claim_host(cardp->func);
+
+ for (count = 0; count < 5; count++) {
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+
+ switch (count) {
+ case 0:
+ /* Read the registers of SDIO function0 */
+ func = count;
+ reg_start = 0;
+ reg_end = 9;
+ break;
+ case 1:
+ /* Read the registers of SDIO function1 */
+ func = count;
+ reg_start = cardp->reg->func1_dump_reg_start;
+ reg_end = cardp->reg->func1_dump_reg_end;
+ break;
+ case 2:
+ index = 0;
+ func = 1;
+ reg_start = cardp->reg->func1_spec_reg_table[index++];
+ size = cardp->reg->func1_spec_reg_num;
+ reg_end = cardp->reg->func1_spec_reg_table[size-1];
+ break;
+ default:
+ /* Read the scratch registers of SDIO function1 */
+ if (count == 4)
+ mdelay(100);
+ func = 1;
+ reg_start = cardp->reg->func1_scratch_reg;
+ reg_end = reg_start + MWIFIEX_SDIO_SCRATCH_SIZE;
+ }
+
+ if (count != 2)
+ ptr += sprintf(ptr, "SDIO Func%d (%#x-%#x): ",
+ func, reg_start, reg_end);
+ else
+ ptr += sprintf(ptr, "SDIO Func%d: ", func);
+
+ for (reg = reg_start; reg <= reg_end;) {
+ if (func == 0)
+ data = sdio_f0_readb(cardp->func, reg, &ret);
+ else
+ data = sdio_readb(cardp->func, reg, &ret);
+
+ if (count == 2)
+ ptr += sprintf(ptr, "(%#x) ", reg);
+ if (!ret) {
+ ptr += sprintf(ptr, "%02x ", data);
+ } else {
+ ptr += sprintf(ptr, "ERR");
+ break;
+ }
+
+ if (count == 2 && reg < reg_end)
+ reg = cardp->reg->func1_spec_reg_table[index++];
+ else
+ reg++;
+ }
+
+ dev_info(adapter->dev, "%s\n", buf);
+ p += sprintf(p, "%s\n", buf);
+ }
+
+ sdio_release_host(cardp->func);
+
+ dev_info(adapter->dev, "SDIO register DUMP END\n");
+
+ return p - drv_buf;
+}
+
static struct mwifiex_if_ops sdio_ops = {
.init_if = mwifiex_init_sdio,
.cleanup_if = mwifiex_cleanup_sdio,
@@ -2188,6 +2290,7 @@ static struct mwifiex_if_ops sdio_ops = {
.card_reset = mwifiex_sdio_card_reset,
.iface_work = mwifiex_sdio_work,
.fw_dump = mwifiex_sdio_fw_dump,
+ .reg_dump = mwifiex_sdio_reg_dump,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 54c07156dd78..957cca246618 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -34,6 +34,7 @@
#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
+#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
@@ -44,6 +45,9 @@
#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000
+#define MWIFIEX_MAX_FUNC2_REG_NUM 13
+#define MWIFIEX_SDIO_SCRATCH_SIZE 10
+
#define SDIO_MPA_ADDR_BASE 0x1000
#define CTRL_PORT 0
#define CTRL_PORT_MASK 0x0001
@@ -219,6 +223,11 @@ struct mwifiex_sdio_card_reg {
u8 fw_dump_ctrl;
u8 fw_dump_start;
u8 fw_dump_end;
+ u8 func1_dump_reg_start;
+ u8 func1_dump_reg_end;
+ u8 func1_scratch_reg;
+ u8 func1_spec_reg_num;
+ u8 func1_spec_reg_table[MWIFIEX_MAX_FUNC2_REG_NUM];
};
struct sdio_mmc_card {
@@ -247,6 +256,7 @@ struct sdio_mmc_card {
u8 *mp_regs;
u8 auto_tdls;
+ bool can_ext_scan;
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
@@ -264,6 +274,7 @@ struct mwifiex_sdio_device {
u32 mp_tx_agg_buf_size;
u32 mp_rx_agg_buf_size;
u8 auto_tdls;
+ bool can_ext_scan;
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -291,6 +302,11 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
.rd_len_p0_l = 0x08,
.rd_len_p0_u = 0x09,
.card_misc_cfg_reg = 0x6c,
+ .func1_dump_reg_start = 0x0,
+ .func1_dump_reg_end = 0x9,
+ .func1_scratch_reg = 0x60,
+ .func1_spec_reg_num = 5,
+ .func1_spec_reg_table = {0x28, 0x30, 0x34, 0x38, 0x3c},
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
@@ -335,6 +351,12 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
.fw_dump_ctrl = 0xe2,
.fw_dump_start = 0xe3,
.fw_dump_end = 0xea,
+ .func1_dump_reg_start = 0x0,
+ .func1_dump_reg_end = 0xb,
+ .func1_scratch_reg = 0xc0,
+ .func1_spec_reg_num = 8,
+ .func1_spec_reg_table = {0x4C, 0x50, 0x54, 0x55, 0x58,
+ 0x59, 0x5c, 0x5d},
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
@@ -376,6 +398,13 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
.cmd_cfg_1 = 0xc5,
.cmd_cfg_2 = 0xc6,
.cmd_cfg_3 = 0xc7,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0x90,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
+ 0x61, 0x62, 0x64, 0x65, 0x66,
+ 0x68, 0x69, 0x6a},
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
@@ -390,6 +419,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
.auto_tdls = false,
+ .can_ext_scan = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -404,6 +434,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
.auto_tdls = false,
+ .can_ext_scan = true,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -418,6 +449,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
.auto_tdls = false,
+ .can_ext_scan = true,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -432,6 +464,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.supports_fw_dump = true,
.auto_tdls = false,
+ .can_ext_scan = true,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
@@ -446,6 +479,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.supports_fw_dump = false,
.auto_tdls = true,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
+ .firmware = SD8801_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .supports_fw_dump = false,
+ .auto_tdls = false,
+ .can_ext_scan = true,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 1c2ca291d1f5..f7d204ffd6e9 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -26,6 +26,10 @@
#include "11n.h"
#include "11ac.h"
+static bool disable_auto_ds;
+module_param(disable_auto_ds, bool, 0);
+MODULE_PARM_DESC(disable_auto_ds,
+ "deepsleep enabled=0(default), deepsleep disabled=1");
/*
* This function prepares command to set/get RSSI information.
*
@@ -1893,6 +1897,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_TDLS_OPER:
ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_CHAN_REPORT_REQUEST:
+ ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
+ data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1907,6 +1915,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
*
* This is called after firmware download to bring the card to
* working state.
+ * Function is also called during reinitialization of virtual
+ * interfaces.
*
* The following commands are issued sequentially -
* - Set PCI-Express host buffer configuration (PCIE only)
@@ -1921,7 +1931,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
* - Set 11d control
* - Set MAC control (this must be the last command to initialize firmware)
*/
-int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
+int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret;
@@ -2031,7 +2041,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- if (first_sta && priv->adapter->iface_type != MWIFIEX_USB &&
+ if (!disable_auto_ds &&
+ first_sta && priv->adapter->iface_type != MWIFIEX_USB &&
priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Enable auto deep sleep */
auto_ds.auto_ds = DEEP_SLEEP_ON;
@@ -2054,9 +2065,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
"11D: failed to enable 11D\n");
}
- /* set last_init_cmd before sending the command */
- priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
-
/* Send cmd to FW to configure 11n specific configuration
* (Short GI, Channel BW, Green field support etc.) for transmit
*/
@@ -2064,7 +2072,11 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG,
HostCmd_ACT_GEN_SET, 0, &tx_cfg, true);
- ret = -EINPROGRESS;
+ if (init) {
+ /* set last_init_cmd before sending the command */
+ priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
+ ret = -EINPROGRESS;
+ }
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index b65e1014b0fc..5f8da5924666 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -248,6 +248,8 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
le32_to_cpu(get_log->wep_icv_err_cnt[2]);
stats->wep_icv_error[3] =
le32_to_cpu(get_log->wep_icv_err_cnt[3]);
+ stats->bcn_rcv_cnt = le32_to_cpu(get_log->bcn_rcv_cnt);
+ stats->bcn_miss_cnt = le32_to_cpu(get_log->bcn_miss_cnt);
}
return 0;
@@ -1103,6 +1105,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_UAP_SYS_CONFIG:
break;
case HostCmd_CMD_UAP_BSS_START:
+ adapter->tx_lock_flag = false;
+ adapter->pps_uapsd_mode = false;
+ adapter->delay_null_pkt = false;
priv->bss_started = 1;
break;
case HostCmd_CMD_UAP_BSS_STOP:
@@ -1117,6 +1122,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_TDLS_OPER:
ret = mwifiex_ret_tdls_oper(priv, resp);
break;
+ case HostCmd_CMD_CHAN_REPORT_REQUEST:
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index b8c171df6223..80ffe7412496 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -90,6 +90,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->is_data_rate_auto = true;
priv->data_rate = 0;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
+ GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && priv->hist_data)
+ mwifiex_hist_data_reset(priv);
+
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
priv->adhoc_state = ADHOC_IDLE;
priv->adhoc_is_link_sensed = false;
@@ -308,6 +312,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->ps_state = PS_STATE_AWAKE;
adapter->pm_wakeup_card_req = false;
adapter->pm_wakeup_fw_try = false;
+ del_timer_sync(&adapter->wakeup_timer);
break;
}
if (!mwifiex_send_null_packet
@@ -322,6 +327,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->ps_state = PS_STATE_AWAKE;
adapter->pm_wakeup_card_req = false;
adapter->pm_wakeup_fw_try = false;
+ del_timer_sync(&adapter->wakeup_timer);
break;
@@ -480,7 +486,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_REMAIN_ON_CHAN_EXPIRED:
dev_dbg(adapter->dev, "event: Remain on channel expired\n");
- cfg80211_remain_on_channel_expired(priv->wdev,
+ cfg80211_remain_on_channel_expired(&priv->wdev,
priv->roc_cfg.cookie,
&priv->roc_cfg.chan,
GFP_ATOMIC);
@@ -509,6 +515,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
mwifiex_parse_tx_status_event(priv, adapter->event_body);
break;
+ case EVENT_CHANNEL_REPORT_RDY:
+ dev_dbg(adapter->dev, "event: Channel Report\n");
+ ret = mwifiex_11h_handle_chanrpt_ready(priv,
+ adapter->event_skb);
+ break;
+ case EVENT_RADAR_DETECTED:
+ dev_dbg(adapter->dev, "event: Radar detected\n");
+ ret = mwifiex_11h_handle_radar_detected(priv,
+ adapter->event_skb);
+ break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 1626868a4b5c..0599e41e253c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -219,7 +219,7 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
rcu_read_unlock();
- wiphy_dbg(priv->wdev->wiphy,
+ wiphy_dbg(priv->wdev.wiphy,
"11D: skip setting domain info in FW\n");
return 0;
}
@@ -902,9 +902,12 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
if (wep_key->key_length) {
void *enc_key;
- if (encrypt_key->key_disable)
+ if (encrypt_key->key_disable) {
memset(&priv->wep_key[index], 0,
sizeof(struct mwifiex_wep_key));
+ if (wep_key->key_length)
+ goto done;
+ }
if (adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2)
enc_key = encrypt_key;
@@ -918,6 +921,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
return ret;
}
+done:
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
@@ -1131,36 +1135,6 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
return roc_cfg.status;
}
-int
-mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
-{
- if (GET_BSS_ROLE(priv) == bss_role) {
- dev_dbg(priv->adapter->dev,
- "info: already in the desired role.\n");
- return 0;
- }
-
- mwifiex_free_priv(priv);
- mwifiex_init_priv(priv);
-
- priv->bss_role = bss_role;
- switch (bss_role) {
- case MWIFIEX_BSS_ROLE_UAP:
- priv->bss_mode = NL80211_IFTYPE_AP;
- break;
- case MWIFIEX_BSS_ROLE_STA:
- case MWIFIEX_BSS_ROLE_ANY:
- default:
- priv->bss_mode = NL80211_IFTYPE_STATION;
- break;
- }
-
- mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
- HostCmd_ACT_GEN_SET, 0, NULL, true);
-
- return mwifiex_sta_init_cmd(priv, false);
-}
-
/*
* Sends IOCTL request to get statistics information.
*
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index c2ad3b63ae70..b8729c9394e9 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -90,6 +90,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct ethhdr *eth;
u16 rx_pkt_off, rx_pkt_len;
u8 *offset;
+ u8 adj_rx_rate = 0;
local_rx_pd = (struct rxpd *) (skb->data);
@@ -155,6 +156,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
priv->rxpd_htinfo = local_rx_pd->ht_info;
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
+ GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ adj_rx_rate = mwifiex_adjust_data_rate(priv, priv->rxpd_rate,
+ priv->rxpd_htinfo);
+ mwifiex_hist_data_add(priv, adj_rx_rate, local_rx_pd->snr,
+ local_rx_pd->nf);
+ }
+
ret = mwifiex_recv_packet(priv, skb);
if (ret == -1)
dev_err(priv->adapter->dev, "recv packet failed\n");
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index b896d7375b52..5ce2d9a4f919 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -47,8 +47,10 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
- u8 pad;
+ unsigned int pad;
u16 pkt_type, pkt_offset;
+ int hroom = (priv->adapter->iface_type == MWIFIEX_USB) ? 0 :
+ INTF_HEADER_LEN;
if (!skb->len) {
dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -56,13 +58,12 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
return skb->data;
}
- pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
+ BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
- /* If skb->data is not aligned; add padding */
- pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
+ pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
- BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN
- + pad));
+ pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)-
+ NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
skb_push(skb, sizeof(*local_tx_pd) + pad);
local_tx_pd = (struct txpd *) skb->data;
@@ -70,8 +71,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len -
- (sizeof(struct txpd)
- + pad)));
+ (sizeof(struct txpd) +
+ pad)));
local_tx_pd->priority = (u8) skb->priority;
local_tx_pd->pkt_delay_2ms =
@@ -115,7 +116,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset);
/* make space for INTF_HEADER_LEN */
- skb_push(skb, INTF_HEADER_LEN);
+ skb_push(skb, hroom);
if (!local_tx_pd->tx_control)
/* TxCtrl set by user or default */
@@ -182,9 +183,13 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
}
switch (ret) {
case -EBUSY:
- adapter->data_sent = true;
- /* Fall through FAILURE handling */
+ dev_kfree_skb_any(skb);
+ dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
+ __func__, ret);
+ adapter->dbg.num_tx_host_to_card_failure++;
+ break;
case -1:
+ adapter->data_sent = false;
dev_kfree_skb_any(skb);
dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
__func__, ret);
@@ -197,6 +202,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
adapter->tx_lock_flag = true;
break;
case -EINPROGRESS:
+ adapter->tx_lock_flag = true;
break;
default:
break;
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 22884b429be7..087d84762cd3 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -1123,6 +1123,36 @@ int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac)
return TDLS_NOT_SETUP;
}
+int mwifiex_get_tdls_list(struct mwifiex_private *priv,
+ struct tdls_peer_info *buf)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct tdls_peer_info *peer = buf;
+ int count = 0;
+ unsigned long flags;
+
+ if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return 0;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return 0;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ list_for_each_entry(sta_ptr, &priv->sta_list, list) {
+ if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) {
+ ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
+ peer++;
+ count++;
+ if (count >= MWIFIEX_MAX_TDLS_PEER_SUPPORTED)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ return count;
+}
+
void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
{
struct mwifiex_sta_node *sta_ptr;
@@ -1367,9 +1397,8 @@ void mwifiex_check_auto_tdls(unsigned long context)
void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv)
{
- init_timer(&priv->auto_tdls_timer);
- priv->auto_tdls_timer.function = mwifiex_check_auto_tdls;
- priv->auto_tdls_timer.data = (unsigned long)priv;
+ setup_timer(&priv->auto_tdls_timer, mwifiex_check_auto_tdls,
+ (unsigned long)priv);
priv->auto_tdls_timer_active = true;
mod_timer(&priv->auto_tdls_timer,
jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 6ae133333363..ac93557cbdc9 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -227,7 +227,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
/* consumes ack_skb */
skb_complete_wifi_ack(ack_skb, !tx_status->status);
} else {
- cfg80211_mgmt_tx_status(priv->wdev, tx_info->cookie,
+ cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie,
ack_skb->data, ack_skb->len,
!tx_status->status, GFP_ATOMIC);
dev_kfree_skb_any(ack_skb);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 0f347fdefa0a..f5c2af01ba0a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -761,6 +761,11 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
return -1;
break;
+ case HostCmd_CMD_CHAN_REPORT_REQUEST:
+ if (mwifiex_cmd_issue_chan_report_request(priv, cmd_buf,
+ data_buf))
+ return -1;
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
@@ -769,3 +774,68 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
return 0;
}
+
+void mwifiex_uap_set_channel(struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_chan_def chandef)
+{
+ u8 config_bands = 0;
+
+ bss_cfg->channel = ieee80211_frequency_to_channel(
+ chandef.chan->center_freq);
+
+ /* Set appropriate bands */
+ if (chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ bss_cfg->band_cfg = BAND_CONFIG_BG;
+ config_bands = BAND_B | BAND_G;
+
+ if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
+ config_bands |= BAND_GN;
+ } else {
+ bss_cfg->band_cfg = BAND_CONFIG_A;
+ config_bands = BAND_A;
+
+ if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
+ config_bands |= BAND_AN;
+
+ if (chandef.width > NL80211_CHAN_WIDTH_40)
+ config_bands |= BAND_AAC;
+ }
+}
+
+int mwifiex_config_start_uap(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_cfg)
+{
+ if (mwifiex_del_mgmt_ies(priv))
+ dev_err(priv->adapter->dev, "Failed to delete mgmt IEs!\n");
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true)) {
+ dev_err(priv->adapter->dev, "Failed to stop the BSS\n");
+ return -1;
+ }
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg, false)) {
+ dev_err(priv->adapter->dev, "Failed to set the SSID\n");
+ return -1;
+ }
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0, NULL, false)) {
+ dev_err(priv->adapter->dev, "Failed to start the BSS\n");
+ return -1;
+ }
+
+ if (priv->sec_info.wep_enabled)
+ priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
+ else
+ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, true))
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index c54a537e31fb..f4794cdc36d2 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -68,7 +68,6 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
len = ETH_ALEN;
if (len != -1) {
- sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
sinfo.assoc_req_ies = &event->data[len];
len = (u8 *)sinfo.assoc_req_ies -
(u8 *)&event->frame_control;
@@ -132,6 +131,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
ETH_ALEN);
+ if (priv->hist_data)
+ mwifiex_hist_data_reset(priv);
break;
case EVENT_UAP_MIC_COUNTERMEASURES:
/* For future development */
@@ -177,6 +178,53 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
mwifiex_parse_tx_status_event(priv, adapter->event_body);
break;
+ case EVENT_PS_SLEEP:
+ dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+
+ adapter->ps_state = PS_STATE_PRE_SLEEP;
+
+ mwifiex_check_ps_cond(adapter);
+ break;
+
+ case EVENT_PS_AWAKE:
+ dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+ if (!adapter->pps_uapsd_mode &&
+ priv->media_connected && adapter->sleep_period.period) {
+ adapter->pps_uapsd_mode = true;
+ dev_dbg(adapter->dev,
+ "event: PPS/UAPSD mode activated\n");
+ }
+ adapter->tx_lock_flag = false;
+ if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
+ if (mwifiex_check_last_packet_indication(priv)) {
+ if (adapter->data_sent) {
+ adapter->ps_state = PS_STATE_AWAKE;
+ adapter->pm_wakeup_card_req = false;
+ adapter->pm_wakeup_fw_try = false;
+ break;
+ }
+ if (!mwifiex_send_null_packet
+ (priv,
+ MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
+ MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET))
+ adapter->ps_state =
+ PS_STATE_SLEEP;
+ return 0;
+ }
+ }
+ adapter->ps_state = PS_STATE_AWAKE;
+ adapter->pm_wakeup_card_req = false;
+ adapter->pm_wakeup_fw_try = false;
+ break;
+
+ case EVENT_CHANNEL_REPORT_RDY:
+ dev_dbg(adapter->dev, "event: Channel Report\n");
+ mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb);
+ break;
+ case EVENT_RADAR_DETECTED:
+ dev_dbg(adapter->dev, "event: Radar detected\n");
+ mwifiex_11h_handle_radar_detected(priv, adapter->event_skb);
+ break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index be3a203a529b..38ac4d74c486 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -348,8 +348,10 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct uap_txpd *txpd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
- int pad, len;
- u16 pkt_type;
+ int pad;
+ u16 pkt_type, pkt_offset;
+ int hroom = (priv->adapter->iface_type == MWIFIEX_USB) ? 0 :
+ INTF_HEADER_LEN;
if (!skb->len) {
dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -357,22 +359,21 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
return skb->data;
}
- pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
-
- /* If skb->data is not aligned, add padding */
- pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
+ BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
- len = sizeof(*txpd) + pad;
+ pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
- BUG_ON(skb_headroom(skb) < len + INTF_HEADER_LEN);
+ pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) &
+ (MWIFIEX_DMA_ALIGN_SZ - 1);
- skb_push(skb, len);
+ skb_push(skb, sizeof(*txpd) + pad);
txpd = (struct uap_txpd *)skb->data;
memset(txpd, 0, sizeof(*txpd));
txpd->bss_num = priv->bss_num;
txpd->bss_type = priv->bss_type;
- txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len));
+ txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(*txpd) +
+ pad)));
txpd->priority = (u8)skb->priority;
txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
@@ -392,16 +393,17 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
/* Offset of actual data */
+ pkt_offset = sizeof(*txpd) + pad;
if (pkt_type == PKT_TYPE_MGMT) {
/* Set the packet type and add header for management frame */
txpd->tx_pkt_type = cpu_to_le16(pkt_type);
- len += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+ pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
}
- txpd->tx_pkt_offset = cpu_to_le16(len);
+ txpd->tx_pkt_offset = cpu_to_le16(pkt_offset);
/* make space for INTF_HEADER_LEN */
- skb_push(skb, INTF_HEADER_LEN);
+ skb_push(skb, hroom);
if (!txpd->tx_control)
/* TxCtrl set by user or default */
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 1b56495ec872..223873022ffe 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -37,6 +37,11 @@ static struct usb_device_id mwifiex_usb_table[] = {
{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+ /* 8801 */
+ {USB_DEVICE(USB8XXX_VID, USB8801_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8801_PID_2,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0xff)},
/* 8897 */
{USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
@@ -361,11 +366,13 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
switch (id_product) {
case USB8766_PID_1:
case USB8797_PID_1:
+ case USB8801_PID_1:
case USB8897_PID_1:
card->usb_boot_state = USB8XXX_FW_DNLD;
break;
case USB8766_PID_2:
case USB8797_PID_2:
+ case USB8801_PID_2:
case USB8897_PID_2:
card->usb_boot_state = USB8XXX_FW_READY;
break;
@@ -792,11 +799,19 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
case USB8897_PID_2:
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
+ adapter->ext_scan = true;
break;
case USB8766_PID_1:
case USB8766_PID_2:
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
strcpy(adapter->fw_name, USB8766_DEFAULT_FW_NAME);
+ adapter->ext_scan = true;
+ break;
+ case USB8801_PID_1:
+ case USB8801_PID_2:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
+ strcpy(adapter->fw_name, USB8801_DEFAULT_FW_NAME);
+ adapter->ext_scan = false;
break;
case USB8797_PID_1:
case USB8797_PID_2:
@@ -930,7 +945,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
} while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
cleanup:
- dev_dbg(adapter->dev, "%s: %d bytes downloaded\n", __func__, tlen);
+ dev_notice(adapter->dev,
+ "info: FW download over, size %d bytes\n", tlen);
kfree(recv_buff);
kfree(fwdata);
@@ -990,6 +1006,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
/* Simulation of HS_AWAKE event */
adapter->pm_wakeup_fw_try = false;
+ del_timer_sync(&adapter->wakeup_timer);
adapter->pm_wakeup_card_req = false;
adapter->ps_state = PS_STATE_AWAKE;
@@ -1010,6 +1027,13 @@ static void mwifiex_usb_submit_rem_rx_urbs(struct mwifiex_adapter *adapter)
}
}
+/* This function is called after the card has woken up. */
+static inline int
+mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
+{
+ return 0;
+}
+
static struct mwifiex_if_ops usb_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
@@ -1074,4 +1098,5 @@ MODULE_VERSION(USB_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index a7cbba1355af..57e1a5736318 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -30,6 +30,9 @@
#define USB8797_PID_2 0x2044
#define USB8897_PID_1 0x2045
#define USB8897_PID_2 0x2046
+#define USB8801_PID_1 0x2049
+#define USB8801_PID_2 0x204a
+
#define USB8XXX_FW_DNLD 1
#define USB8XXX_FW_READY 2
@@ -41,6 +44,7 @@
#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin"
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
+#define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin"
#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
#define FW_DNLD_TX_BUF_SIZE 620
@@ -96,11 +100,4 @@ struct fw_data {
u8 data[1];
};
-/* This function is called after the card has woken up. */
-static inline int
-mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
-{
- return 0;
-}
-
#endif /*_MWIFIEX_USB_H */
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index b1768fbf98f2..308550611f22 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -25,6 +25,96 @@
#include "wmm.h"
#include "11n.h"
+static struct mwifiex_debug_data items[] = {
+ {"int_counter", item_size(int_counter),
+ item_addr(int_counter), 1},
+ {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
+ item_addr(packets_out[WMM_AC_VO]), 1},
+ {"wmm_ac_vi", item_size(packets_out[WMM_AC_VI]),
+ item_addr(packets_out[WMM_AC_VI]), 1},
+ {"wmm_ac_be", item_size(packets_out[WMM_AC_BE]),
+ item_addr(packets_out[WMM_AC_BE]), 1},
+ {"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
+ item_addr(packets_out[WMM_AC_BK]), 1},
+ {"tx_buf_size", item_size(tx_buf_size),
+ item_addr(tx_buf_size), 1},
+ {"curr_tx_buf_size", item_size(curr_tx_buf_size),
+ item_addr(curr_tx_buf_size), 1},
+ {"ps_mode", item_size(ps_mode),
+ item_addr(ps_mode), 1},
+ {"ps_state", item_size(ps_state),
+ item_addr(ps_state), 1},
+ {"is_deep_sleep", item_size(is_deep_sleep),
+ item_addr(is_deep_sleep), 1},
+ {"wakeup_dev_req", item_size(pm_wakeup_card_req),
+ item_addr(pm_wakeup_card_req), 1},
+ {"wakeup_tries", item_size(pm_wakeup_fw_try),
+ item_addr(pm_wakeup_fw_try), 1},
+ {"hs_configured", item_size(is_hs_configured),
+ item_addr(is_hs_configured), 1},
+ {"hs_activated", item_size(hs_activated),
+ item_addr(hs_activated), 1},
+ {"num_tx_timeout", item_size(num_tx_timeout),
+ item_addr(num_tx_timeout), 1},
+ {"is_cmd_timedout", item_size(is_cmd_timedout),
+ item_addr(is_cmd_timedout), 1},
+ {"timeout_cmd_id", item_size(timeout_cmd_id),
+ item_addr(timeout_cmd_id), 1},
+ {"timeout_cmd_act", item_size(timeout_cmd_act),
+ item_addr(timeout_cmd_act), 1},
+ {"last_cmd_id", item_size(last_cmd_id),
+ item_addr(last_cmd_id), DBG_CMD_NUM},
+ {"last_cmd_act", item_size(last_cmd_act),
+ item_addr(last_cmd_act), DBG_CMD_NUM},
+ {"last_cmd_index", item_size(last_cmd_index),
+ item_addr(last_cmd_index), 1},
+ {"last_cmd_resp_id", item_size(last_cmd_resp_id),
+ item_addr(last_cmd_resp_id), DBG_CMD_NUM},
+ {"last_cmd_resp_index", item_size(last_cmd_resp_index),
+ item_addr(last_cmd_resp_index), 1},
+ {"last_event", item_size(last_event),
+ item_addr(last_event), DBG_CMD_NUM},
+ {"last_event_index", item_size(last_event_index),
+ item_addr(last_event_index), 1},
+ {"num_cmd_h2c_fail", item_size(num_cmd_host_to_card_failure),
+ item_addr(num_cmd_host_to_card_failure), 1},
+ {"num_cmd_sleep_cfm_fail",
+ item_size(num_cmd_sleep_cfm_host_to_card_failure),
+ item_addr(num_cmd_sleep_cfm_host_to_card_failure), 1},
+ {"num_tx_h2c_fail", item_size(num_tx_host_to_card_failure),
+ item_addr(num_tx_host_to_card_failure), 1},
+ {"num_evt_deauth", item_size(num_event_deauth),
+ item_addr(num_event_deauth), 1},
+ {"num_evt_disassoc", item_size(num_event_disassoc),
+ item_addr(num_event_disassoc), 1},
+ {"num_evt_link_lost", item_size(num_event_link_lost),
+ item_addr(num_event_link_lost), 1},
+ {"num_cmd_deauth", item_size(num_cmd_deauth),
+ item_addr(num_cmd_deauth), 1},
+ {"num_cmd_assoc_ok", item_size(num_cmd_assoc_success),
+ item_addr(num_cmd_assoc_success), 1},
+ {"num_cmd_assoc_fail", item_size(num_cmd_assoc_failure),
+ item_addr(num_cmd_assoc_failure), 1},
+ {"cmd_sent", item_size(cmd_sent),
+ item_addr(cmd_sent), 1},
+ {"data_sent", item_size(data_sent),
+ item_addr(data_sent), 1},
+ {"cmd_resp_received", item_size(cmd_resp_received),
+ item_addr(cmd_resp_received), 1},
+ {"event_received", item_size(event_received),
+ item_addr(event_received), 1},
+
+ /* variables defined in struct mwifiex_adapter */
+ {"cmd_pending", adapter_item_size(cmd_pending),
+ adapter_item_addr(cmd_pending), 1},
+ {"tx_pending", adapter_item_size(tx_pending),
+ adapter_item_addr(tx_pending), 1},
+ {"rx_pending", adapter_item_size(rx_pending),
+ adapter_item_addr(rx_pending), 1},
+};
+
+static int num_of_items = ARRAY_SIZE(items);
+
/*
* Firmware initialization complete callback handler.
*
@@ -97,6 +187,8 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->rx_tbl);
info->tx_tbl_num = mwifiex_get_tx_ba_stream_tbl(priv,
info->tx_tbl);
+ info->tdls_peer_num = mwifiex_get_tdls_list(priv,
+ info->tdls_list);
info->ps_mode = adapter->ps_mode;
info->ps_state = adapter->ps_state;
info->is_deep_sleep = adapter->is_deep_sleep;
@@ -141,6 +233,93 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
return 0;
}
+int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf,
+ struct mwifiex_debug_info *info)
+{
+ char *p = buf;
+ struct mwifiex_debug_data *d = &items[0];
+ size_t size, addr;
+ long val;
+ int i, j;
+
+ if (!info)
+ return 0;
+
+ for (i = 0; i < num_of_items; i++) {
+ p += sprintf(p, "%s=", d[i].name);
+
+ size = d[i].size / d[i].num;
+
+ if (i < (num_of_items - 3))
+ addr = d[i].addr + (size_t)info;
+ else /* The last 3 items are struct mwifiex_adapter variables */
+ addr = d[i].addr + (size_t)priv->adapter;
+
+ for (j = 0; j < d[i].num; j++) {
+ switch (size) {
+ case 1:
+ val = *((u8 *)addr);
+ break;
+ case 2:
+ val = *((u16 *)addr);
+ break;
+ case 4:
+ val = *((u32 *)addr);
+ break;
+ case 8:
+ val = *((long long *)addr);
+ break;
+ default:
+ val = -1;
+ break;
+ }
+
+ p += sprintf(p, "%#lx ", val);
+ addr += size;
+ }
+
+ p += sprintf(p, "\n");
+ }
+
+ if (info->tx_tbl_num) {
+ p += sprintf(p, "Tx BA stream table:\n");
+ for (i = 0; i < info->tx_tbl_num; i++)
+ p += sprintf(p, "tid = %d, ra = %pM\n",
+ info->tx_tbl[i].tid, info->tx_tbl[i].ra);
+ }
+
+ if (info->rx_tbl_num) {
+ p += sprintf(p, "Rx reorder table:\n");
+ for (i = 0; i < info->rx_tbl_num; i++) {
+ p += sprintf(p, "tid = %d, ta = %pM, ",
+ info->rx_tbl[i].tid,
+ info->rx_tbl[i].ta);
+ p += sprintf(p, "start_win = %d, ",
+ info->rx_tbl[i].start_win);
+ p += sprintf(p, "win_size = %d, buffer: ",
+ info->rx_tbl[i].win_size);
+
+ for (j = 0; j < info->rx_tbl[i].win_size; j++)
+ p += sprintf(p, "%c ",
+ info->rx_tbl[i].buffer[j] ?
+ '1' : '0');
+
+ p += sprintf(p, "\n");
+ }
+ }
+
+ if (info->tdls_peer_num) {
+ p += sprintf(p, "TDLS peer table:\n");
+ for (i = 0; i < info->tdls_peer_num; i++) {
+ p += sprintf(p, "peer = %pM",
+ info->tdls_list[i].peer_addr);
+ p += sprintf(p, "\n");
+ }
+ }
+
+ return p - buf;
+}
+
static int
mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
struct rxpd *rx_pd)
@@ -208,7 +387,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
pkt_len -= ETH_ALEN + sizeof(pkt_len);
rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
- cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
+ cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq,
CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len,
0);
@@ -404,3 +583,44 @@ void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return;
}
+
+/* This function adds histogram data to histogram array*/
+void mwifiex_hist_data_add(struct mwifiex_private *priv,
+ u8 rx_rate, s8 snr, s8 nflr)
+{
+ struct mwifiex_histogram_data *phist_data = priv->hist_data;
+
+ if (atomic_read(&phist_data->num_samples) > MWIFIEX_HIST_MAX_SAMPLES)
+ mwifiex_hist_data_reset(priv);
+ mwifiex_hist_data_set(priv, rx_rate, snr, nflr);
+}
+
+/* function to add histogram record */
+void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr,
+ s8 nflr)
+{
+ struct mwifiex_histogram_data *phist_data = priv->hist_data;
+
+ atomic_inc(&phist_data->num_samples);
+ atomic_inc(&phist_data->rx_rate[rx_rate]);
+ atomic_inc(&phist_data->snr[snr]);
+ atomic_inc(&phist_data->noise_flr[128 + nflr]);
+ atomic_inc(&phist_data->sig_str[nflr - snr]);
+}
+
+/* function to reset histogram data during init/reset */
+void mwifiex_hist_data_reset(struct mwifiex_private *priv)
+{
+ int ix;
+ struct mwifiex_histogram_data *phist_data = priv->hist_data;
+
+ atomic_set(&phist_data->num_samples, 0);
+ for (ix = 0; ix < MWIFIEX_MAX_AC_RX_RATES; ix++)
+ atomic_set(&phist_data->rx_rate[ix], 0);
+ for (ix = 0; ix < MWIFIEX_MAX_SNR; ix++)
+ atomic_set(&phist_data->snr[ix], 0);
+ for (ix = 0; ix < MWIFIEX_MAX_NOISE_FLR; ix++)
+ atomic_set(&phist_data->noise_flr[ix], 0);
+ for (ix = 0; ix < MWIFIEX_MAX_SIG_STRENGTH; ix++)
+ atomic_set(&phist_data->sig_str[ix], 0);
+}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index 40296cb4a3f1..b541d66c01eb 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -20,6 +20,8 @@
#ifndef _MWIFIEX_UTIL_H_
#define _MWIFIEX_UTIL_H_
+struct mwifiex_private;
+
struct mwifiex_dma_mapping {
dma_addr_t addr;
size_t len;
@@ -33,6 +35,21 @@ struct mwifiex_cb {
};
};
+/* size/addr for mwifiex_debug_info */
+#define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n))
+#define item_addr(n) (offsetof(struct mwifiex_debug_info, n))
+
+/* size/addr for struct mwifiex_adapter */
+#define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n))
+#define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n))
+
+struct mwifiex_debug_data {
+ char name[32]; /* variable/array name */
+ u32 size; /* size of the variable/array */
+ size_t addr; /* address of the variable/array */
+ int num; /* number of variables in an array */
+};
+
static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
{
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
@@ -73,4 +90,7 @@ static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
return mapping.addr;
}
+int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf,
+ struct mwifiex_debug_info *info);
+
#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index ffffd2c5a76e..ef717acec8b7 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1228,6 +1228,9 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
case -EINPROGRESS:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
+ break;
+ case 0:
+ mwifiex_write_data_complete(adapter, skb, 0, ret);
default:
break;
}
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b8d1e04aa9b9..f9b1218c761a 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -3098,14 +3098,14 @@ static void mwl8k_update_survey(struct mwl8k_priv *priv,
cca_cnt = ioread32(priv->regs + NOK_CCA_CNT_REG);
cca_cnt /= 1000; /* uSecs to mSecs */
- survey->channel_time_busy = (u64) cca_cnt;
+ survey->time_busy = (u64) cca_cnt;
rx_rdy = ioread32(priv->regs + BBU_RXRDY_CNT_REG);
rx_rdy /= 1000; /* uSecs to mSecs */
- survey->channel_time_rx = (u64) rx_rdy;
+ survey->time_rx = (u64) rx_rdy;
priv->channel_time = jiffies - priv->channel_time;
- survey->channel_time = jiffies_to_msecs(priv->channel_time);
+ survey->time = jiffies_to_msecs(priv->channel_time);
survey->channel = channel;
@@ -3115,9 +3115,9 @@ static void mwl8k_update_survey(struct mwl8k_priv *priv,
survey->noise = nf * -1;
survey->filled = SURVEY_INFO_NOISE_DBM |
- SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_BUSY |
- SURVEY_INFO_CHANNEL_TIME_RX;
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX;
}
/*
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index 60698b020851..6d831d4d1b5f 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -1,7 +1,8 @@
config HERMES
tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
depends on (PPC_PMAC || PCI || PCMCIA)
- depends on CFG80211 && CFG80211_WEXT
+ depends on CFG80211
+ select CFG80211_WEXT
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 38ec8d19ac29..c410180479e6 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -2342,7 +2342,7 @@ void free_orinocodev(struct orinoco_private *priv)
list_for_each_entry_safe(sd, sdtemp, &priv->scan_list, list) {
list_del(&sd->list);
- if ((sd->len > 0) && sd->buf)
+ if (sd->len > 0)
kfree(sd->buf);
kfree(sd);
}
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index b6bdad632842..74219d59d7e1 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -94,7 +94,7 @@ static int orinoco_pci_cor_reset(struct orinoco_private *priv)
mdelay(HERMES_PCI_COR_OFFT);
/* The card is ready when it's no longer busy */
- timeout = jiffies + (HERMES_PCI_COR_BUSYT * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(HERMES_PCI_COR_BUSYT);
reg = hermes_read_regn(hw, CMD);
while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
mdelay(1);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index b8f6e5c431ae..8b045236b6e0 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -121,7 +121,7 @@ static int orinoco_plx_cor_reset(struct orinoco_private *priv)
mdelay(1);
/* Just in case, wait more until the card is no longer busy */
- timeout = jiffies + (PLX_RESET_TIME * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(PLX_RESET_TIME);
reg = hermes_read_regn(hw, CMD);
while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
mdelay(1);
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 79d0e33b625e..20ce569b8a43 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -71,7 +71,7 @@ static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
mdelay(1);
/* Just in case, wait more until the card is no longer busy */
- timeout = jiffies + (TMD_RESET_TIME * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(TMD_RESET_TIME);
reg = hermes_read_regn(hw, CMD);
while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
mdelay(1);
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 995846422dc0..91f05442de28 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -364,9 +364,7 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
atomic_set(&ctx->refcount, 1);
init_completion(&ctx->done);
- init_timer(&ctx->timer);
- ctx->timer.function = ezusb_request_timerfn;
- ctx->timer.data = (u_long) ctx;
+ setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
return ctx;
}
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 0fe67d2da208..2fe713eda7ad 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -196,9 +196,9 @@ static int p54_generate_band(struct ieee80211_hw *dev,
dest->max_power = chan->max_power;
priv->survey[*chan_num].channel = &tmp->channels[j];
priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
- SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_BUSY |
- SURVEY_INFO_CHANNEL_TIME_TX;
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_TX;
dest->hw_value = (*chan_num);
j++;
(*chan_num)++;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index bc065e8e348b..5367d510b22d 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -220,6 +220,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
struct sk_buff *skb;
size_t eeprom_hdr_size;
int ret = 0;
+ long timeout;
if (priv->fw_var >= 0x509)
eeprom_hdr_size = sizeof(*eeprom_hdr);
@@ -249,9 +250,11 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
p54_tx(priv, skb);
- if (!wait_for_completion_interruptible_timeout(
- &priv->eeprom_comp, HZ)) {
- wiphy_err(priv->hw->wiphy, "device does not respond!\n");
+ timeout = wait_for_completion_interruptible_timeout(
+ &priv->eeprom_comp, HZ);
+ if (timeout <= 0) {
+ wiphy_err(priv->hw->wiphy,
+ "device does not respond or signal received!\n");
ret = -EBUSY;
}
priv->eeprom = NULL;
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 97aeff0edb84..b9250d75d253 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -305,9 +305,9 @@ static void p54_reset_stats(struct p54_common *priv)
struct survey_info *info = &priv->survey[chan->hw_value];
/* only reset channel statistics, don't touch .filled, etc. */
- info->channel_time = 0;
- info->channel_time_busy = 0;
- info->channel_time_tx = 0;
+ info->time = 0;
+ info->time_busy = 0;
+ info->time_tx = 0;
}
priv->update_stats = true;
@@ -575,6 +575,8 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
key->hw_key_idx = 0xff;
goto out_unlock;
}
+
+ key->flags |= IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
} else {
slot = key->hw_key_idx;
@@ -634,7 +636,7 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
if (in_use) {
/* test if the reported statistics are valid. */
- if (survey->channel_time != 0) {
+ if (survey->time != 0) {
survey->filled |= SURVEY_INFO_IN_USE;
} else {
/*
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index d4aee64fb5ea..27a49068d32d 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -431,6 +431,7 @@ static int p54p_open(struct ieee80211_hw *dev)
{
struct p54p_priv *priv = dev->priv;
int err;
+ long timeout;
init_completion(&priv->boot_comp);
err = request_irq(priv->pdev->irq, p54p_interrupt,
@@ -468,10 +469,12 @@ static int p54p_open(struct ieee80211_hw *dev)
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
P54P_READ(dev_int);
- if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
+ timeout = wait_for_completion_interruptible_timeout(
+ &priv->boot_comp, HZ);
+ if (timeout <= 0) {
wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
p54p_stop(dev);
- return -ETIMEDOUT;
+ return timeout ? -ERESTARTSYS : -ETIMEDOUT;
}
P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 153c61539ec8..24e5ff9a9272 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -587,13 +587,13 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
if (chan) {
struct survey_info *survey = &priv->survey[chan->hw_value];
survey->noise = clamp(priv->noise, -128, 127);
- survey->channel_time = priv->survey_raw.active;
- survey->channel_time_tx = priv->survey_raw.tx;
- survey->channel_time_busy = priv->survey_raw.tx +
+ survey->time = priv->survey_raw.active;
+ survey->time_tx = priv->survey_raw.tx;
+ survey->time_busy = priv->survey_raw.tx +
priv->survey_raw.cca;
- do_div(survey->channel_time, 1024);
- do_div(survey->channel_time_tx, 1024);
- do_div(survey->channel_time_busy, 1024);
+ do_div(survey->time, 1024);
+ do_div(survey->time_tx, 1024);
+ do_div(survey->time_busy, 1024);
}
tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 1a4facd1fbf3..60d44ce9c017 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2478,7 +2478,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len);
if (ret == 0) {
sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000;
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
}
len = sizeof(rssi);
@@ -2486,7 +2486,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
&rssi, &len);
if (ret == 0) {
sinfo->signal = level_to_qual(le32_to_cpu(rssi));
- sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
}
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 4834a9abc171..b6cc9ff47fc2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,7 +172,6 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
u32 len;
u32 num_blocks;
- const u8 *fw;
const struct firmware *fw_entry = NULL;
u32 block_size = dev->tx_blk_size;
int status = 0;
@@ -201,7 +200,6 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status;
}
- fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
len = fw_entry->size;
if (len % 4)
@@ -212,7 +210,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
- status = rsi_copy_to_card(common, fw, len, num_blocks);
+ status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 81ee481487cf..be2d54f257b1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -8020,13 +8020,13 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &busy_ext);
if (idle || busy) {
- survey->filled = SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_BUSY |
- SURVEY_INFO_CHANNEL_TIME_EXT_BUSY;
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_EXT_BUSY;
- survey->channel_time = (idle + busy) / 1000;
- survey->channel_time_busy = busy / 1000;
- survey->channel_time_ext_busy = busy_ext / 1000;
+ survey->time = (idle + busy) / 1000;
+ survey->time_busy = busy / 1000;
+ survey->time_ext_busy = busy_ext / 1000;
}
if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 1122dc44c9fd..48a2cad29477 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -240,7 +240,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
rt2x00dev->rf_channel = libconf.rf.channel;
}
- if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_PS_AUTOWAKE) &&
(ieee80211_flags & IEEE80211_CONF_CHANGE_PS))
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
@@ -257,7 +257,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
rt2x00link_reset_tuner(rt2x00dev, false);
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
- test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
+ rt2x00_has_cap_flag(rt2x00dev, REQUIRE_PS_AUTOWAKE) &&
(ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
(conf->flags & IEEE80211_CONF_PS)) {
beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9967a1d9f0ec..5639ed816813 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -351,7 +351,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
/*
* Remove L2 padding which was added during
*/
- if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
rt2x00queue_remove_l2pad(entry->skb, header_length);
/*
@@ -460,7 +460,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* send the status report back.
*/
if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
- if (test_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT))
ieee80211_tx_status(rt2x00dev->hw, entry->skb);
else
ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
@@ -1056,9 +1056,9 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Take TX headroom required for alignment into account.
*/
- if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
- else if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
+ else if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA))
rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
/*
@@ -1069,7 +1069,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Allocate tx status FIFO for driver use.
*/
- if (test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO)) {
/*
* Allocate the txstatus fifo. In the worst case the tx
* status fifo has to hold the tx status of all entries
@@ -1131,7 +1131,7 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
/*
* Stop rfkill polling.
*/
- if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
rt2x00rfkill_unregister(rt2x00dev);
/*
@@ -1173,7 +1173,7 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
/*
* Start rfkill polling.
*/
- if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
rt2x00rfkill_register(rt2x00dev);
return 0;
@@ -1389,7 +1389,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
/*
* Start rfkill polling.
*/
- if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
rt2x00rfkill_register(rt2x00dev);
return 0;
@@ -1408,7 +1408,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
/*
* Stop rfkill polling.
*/
- if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
rt2x00rfkill_unregister(rt2x00dev);
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index fbae2799e3ee..5813300f68a2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -96,7 +96,7 @@ int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev)
{
int retval;
- if (!test_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_FIRMWARE))
return 0;
if (!rt2x00dev->fw) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index cb40245a0695..300876df056f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -119,7 +119,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
* Use the ATIM queue if appropriate and present.
*/
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
- test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE))
qid = QID_ATIM;
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 66ff36447b94..68b620b2462f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -85,7 +85,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->entry = entry;
- if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
dma_addr_t skb_dma;
skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
@@ -198,7 +198,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
- if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
/*
* rt2800 has a H/W (or F/W) bug, device incorrectly increase
* seqno on retransmited data (non-QOS) frames. To workaround
@@ -484,7 +484,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
- if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
sta, hwrate);
else
@@ -526,7 +526,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
/*
* Map the skb to DMA.
*/
- if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
rt2x00queue_map_txskb(entry))
return -ENOMEM;
@@ -646,7 +646,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
*/
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
- if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
rt2x00crypto_tx_copy_iv(skb, &txdesc);
else
rt2x00crypto_tx_remove_iv(skb, &txdesc);
@@ -660,9 +660,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* PCI and USB devices, while header alignment only is valid
* for PCI devices.
*/
- if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
rt2x00queue_insert_l2pad(skb, txdesc.header_length);
- else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
+ else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
rt2x00queue_align_frame(skb);
/*
@@ -1178,7 +1178,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
if (status)
goto exit;
- if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
status = rt2x00queue_alloc_entries(rt2x00dev->atim);
if (status)
goto exit;
@@ -1234,7 +1234,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
enum data_queue_qid qid;
unsigned int req_atim =
- !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
+ rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
/*
* We need the following queues:
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 892270dd3e7b..7627af6098eb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -274,7 +274,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
* Schedule the delayed work for reading the TX status
* from the device.
*/
- if (!test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags) ||
+ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO) ||
!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
}
@@ -456,7 +456,7 @@ static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
* Kill guardian urb (if required by driver).
*/
if ((entry->queue->qid == QID_BEACON) &&
- (test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags)))
+ (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)))
usb_kill_urb(bcn_priv->guardian_urb);
return false;
@@ -655,7 +655,7 @@ static int rt2x00usb_alloc_entries(struct data_queue *queue)
* then we are done.
*/
if (queue->qid != QID_BEACON ||
- !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
+ !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))
return 0;
for (i = 0; i < queue->limit; i++) {
@@ -690,7 +690,7 @@ static void rt2x00usb_free_entries(struct data_queue *queue)
* then we are done.
*/
if (queue->qid != QID_BEACON ||
- !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
+ !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))
return;
for (i = 0; i < queue->limit; i++) {
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 40b6d1d006d7..1d4677460711 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -867,63 +867,135 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
*
* B/G rate:
* (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92_RATE1M-->DESC92_RATE54M ==> idx is 0-->11,
+ * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
*
* N rate:
* (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
*
* 5G band:rx_status->band == IEEE80211_BAND_5GHZ
* A rate:
* (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92_RATE6M-->DESC92_RATE54M ==> idx is 0-->7,
+ * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
*
* N rate:
* (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ *
+ * VHT rates:
+ * DESC_RATEVHT1SS_MCS0-->DESC_RATEVHT1SS_MCS9 ==> idx is 0-->9
+ * DESC_RATEVHT2SS_MCS0-->DESC_RATEVHT2SS_MCS9 ==> idx is 0-->9
*/
-int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate, bool first_ampdu)
+int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
+ u8 desc_rate)
{
int rate_idx;
+ if (isvht) {
+ switch (desc_rate) {
+ case DESC_RATEVHT1SS_MCS0:
+ rate_idx = 0;
+ break;
+ case DESC_RATEVHT1SS_MCS1:
+ rate_idx = 1;
+ break;
+ case DESC_RATEVHT1SS_MCS2:
+ rate_idx = 2;
+ break;
+ case DESC_RATEVHT1SS_MCS3:
+ rate_idx = 3;
+ break;
+ case DESC_RATEVHT1SS_MCS4:
+ rate_idx = 4;
+ break;
+ case DESC_RATEVHT1SS_MCS5:
+ rate_idx = 5;
+ break;
+ case DESC_RATEVHT1SS_MCS6:
+ rate_idx = 6;
+ break;
+ case DESC_RATEVHT1SS_MCS7:
+ rate_idx = 7;
+ break;
+ case DESC_RATEVHT1SS_MCS8:
+ rate_idx = 8;
+ break;
+ case DESC_RATEVHT1SS_MCS9:
+ rate_idx = 9;
+ break;
+ case DESC_RATEVHT2SS_MCS0:
+ rate_idx = 0;
+ break;
+ case DESC_RATEVHT2SS_MCS1:
+ rate_idx = 1;
+ break;
+ case DESC_RATEVHT2SS_MCS2:
+ rate_idx = 2;
+ break;
+ case DESC_RATEVHT2SS_MCS3:
+ rate_idx = 3;
+ break;
+ case DESC_RATEVHT2SS_MCS4:
+ rate_idx = 4;
+ break;
+ case DESC_RATEVHT2SS_MCS5:
+ rate_idx = 5;
+ break;
+ case DESC_RATEVHT2SS_MCS6:
+ rate_idx = 6;
+ break;
+ case DESC_RATEVHT2SS_MCS7:
+ rate_idx = 7;
+ break;
+ case DESC_RATEVHT2SS_MCS8:
+ rate_idx = 8;
+ break;
+ case DESC_RATEVHT2SS_MCS9:
+ rate_idx = 9;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ return rate_idx;
+ }
if (false == isht) {
if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
switch (desc_rate) {
- case DESC92_RATE1M:
+ case DESC_RATE1M:
rate_idx = 0;
break;
- case DESC92_RATE2M:
+ case DESC_RATE2M:
rate_idx = 1;
break;
- case DESC92_RATE5_5M:
+ case DESC_RATE5_5M:
rate_idx = 2;
break;
- case DESC92_RATE11M:
+ case DESC_RATE11M:
rate_idx = 3;
break;
- case DESC92_RATE6M:
+ case DESC_RATE6M:
rate_idx = 4;
break;
- case DESC92_RATE9M:
+ case DESC_RATE9M:
rate_idx = 5;
break;
- case DESC92_RATE12M:
+ case DESC_RATE12M:
rate_idx = 6;
break;
- case DESC92_RATE18M:
+ case DESC_RATE18M:
rate_idx = 7;
break;
- case DESC92_RATE24M:
+ case DESC_RATE24M:
rate_idx = 8;
break;
- case DESC92_RATE36M:
+ case DESC_RATE36M:
rate_idx = 9;
break;
- case DESC92_RATE48M:
+ case DESC_RATE48M:
rate_idx = 10;
break;
- case DESC92_RATE54M:
+ case DESC_RATE54M:
rate_idx = 11;
break;
default:
@@ -932,28 +1004,28 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
}
} else {
switch (desc_rate) {
- case DESC92_RATE6M:
+ case DESC_RATE6M:
rate_idx = 0;
break;
- case DESC92_RATE9M:
+ case DESC_RATE9M:
rate_idx = 1;
break;
- case DESC92_RATE12M:
+ case DESC_RATE12M:
rate_idx = 2;
break;
- case DESC92_RATE18M:
+ case DESC_RATE18M:
rate_idx = 3;
break;
- case DESC92_RATE24M:
+ case DESC_RATE24M:
rate_idx = 4;
break;
- case DESC92_RATE36M:
+ case DESC_RATE36M:
rate_idx = 5;
break;
- case DESC92_RATE48M:
+ case DESC_RATE48M:
rate_idx = 6;
break;
- case DESC92_RATE54M:
+ case DESC_RATE54M:
rate_idx = 7;
break;
default:
@@ -963,52 +1035,52 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
}
} else {
switch (desc_rate) {
- case DESC92_RATEMCS0:
+ case DESC_RATEMCS0:
rate_idx = 0;
break;
- case DESC92_RATEMCS1:
+ case DESC_RATEMCS1:
rate_idx = 1;
break;
- case DESC92_RATEMCS2:
+ case DESC_RATEMCS2:
rate_idx = 2;
break;
- case DESC92_RATEMCS3:
+ case DESC_RATEMCS3:
rate_idx = 3;
break;
- case DESC92_RATEMCS4:
+ case DESC_RATEMCS4:
rate_idx = 4;
break;
- case DESC92_RATEMCS5:
+ case DESC_RATEMCS5:
rate_idx = 5;
break;
- case DESC92_RATEMCS6:
+ case DESC_RATEMCS6:
rate_idx = 6;
break;
- case DESC92_RATEMCS7:
+ case DESC_RATEMCS7:
rate_idx = 7;
break;
- case DESC92_RATEMCS8:
+ case DESC_RATEMCS8:
rate_idx = 8;
break;
- case DESC92_RATEMCS9:
+ case DESC_RATEMCS9:
rate_idx = 9;
break;
- case DESC92_RATEMCS10:
+ case DESC_RATEMCS10:
rate_idx = 10;
break;
- case DESC92_RATEMCS11:
+ case DESC_RATEMCS11:
rate_idx = 11;
break;
- case DESC92_RATEMCS12:
+ case DESC_RATEMCS12:
rate_idx = 12;
break;
- case DESC92_RATEMCS13:
+ case DESC_RATEMCS13:
rate_idx = 13;
break;
- case DESC92_RATEMCS14:
+ case DESC_RATEMCS14:
rate_idx = 14;
break;
- case DESC92_RATEMCS15:
+ case DESC_RATEMCS15:
rate_idx = 15;
break;
default:
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 982f2450feea..c6cb49c3ee32 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -123,8 +123,8 @@ void rtl_watch_dog_timer_callback(unsigned long data);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
-int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate, bool first_ampdu);
+int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
+ bool isvht, u8 desc_rate);
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 5fc6f52641bd..a31a12775f1a 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -95,7 +95,8 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
}
EXPORT_SYMBOL(rtl_bb_delay);
-void rtl_fw_cb(const struct firmware *firmware, void *context)
+static void rtl_fw_do_work(const struct firmware *firmware, void *context,
+ bool is_wow)
{
struct ieee80211_hw *hw = context;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -125,12 +126,31 @@ found_alt:
release_firmware(firmware);
return;
}
- memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+ if (!is_wow) {
+ memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
+ firmware->size);
+ rtlpriv->rtlhal.fwsize = firmware->size;
+ } else {
+ memcpy(rtlpriv->rtlhal.wowlan_firmware, firmware->data,
+ firmware->size);
+ rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
+ }
rtlpriv->rtlhal.fwsize = firmware->size;
release_firmware(firmware);
}
+
+void rtl_fw_cb(const struct firmware *firmware, void *context)
+{
+ rtl_fw_do_work(firmware, context, false);
+}
EXPORT_SYMBOL(rtl_fw_cb);
+void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context)
+{
+ rtl_fw_do_work(firmware, context, true);
+}
+EXPORT_SYMBOL(rtl_wowlan_fw_cb);
+
/*mutex for start & stop is must here. */
static int rtl_op_start(struct ieee80211_hw *hw)
{
@@ -990,6 +1010,16 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw,
return 0;
}
+static void send_beacon_frame(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+ if (skb)
+ rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+}
+
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1020,6 +1050,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->linked_set_reg)
rtlpriv->cfg->ops->linked_set_reg(hw);
+ send_beacon_frame(hw, vif);
}
}
if ((changed & BSS_CHANGED_BEACON_ENABLED &&
@@ -1851,3 +1882,40 @@ bool rtl_btc_status_false(void)
return false;
}
EXPORT_SYMBOL_GPL(rtl_btc_status_false);
+
+void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igvalue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+ dm_digtable->dig_enable_flag = true;
+ dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+ dm_digtable->cur_igvalue = cur_igvalue;
+ dm_digtable->pre_igvalue = 0;
+ dm_digtable->cur_sta_cstate = DIG_STA_DISCONNECT;
+ dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
+ dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
+ dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
+ dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
+ dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+ dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+ dm_digtable->rx_gain_max = DM_DIG_MAX;
+ dm_digtable->rx_gain_min = DM_DIG_MIN;
+ dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
+ dm_digtable->pre_cck_cca_thres = 0xff;
+ dm_digtable->cur_cck_cca_thres = 0x83;
+ dm_digtable->forbidden_igi = DM_DIG_MIN;
+ dm_digtable->large_fa_hit = 0;
+ dm_digtable->recover_cnt = 0;
+ dm_digtable->dig_min_0 = 0x25;
+ dm_digtable->dig_min_1 = 0x25;
+ dm_digtable->media_connect_0 = false;
+ dm_digtable->media_connect_1 = false;
+ rtlpriv->dm.dm_initialgain_enable = true;
+ dm_digtable->bt30_cur_igi = 0x32;
+ dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
+ dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+}
+EXPORT_SYMBOL(rtl_dm_diginit);
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 624e1dc16d31..7b64e34f421e 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -35,13 +35,55 @@
#define RTL_SUPPORTED_CTRL_FILTER 0xFF
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1e
+#define DM_DIG_MAX_AP 0x32
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+enum cck_packet_detection_threshold {
+ CCK_PD_STAGE_LOWRSSI = 0,
+ CCK_PD_STAGE_HIGHRSSI = 1,
+ CCK_FA_STAGE_LOW = 2,
+ CCK_FA_STAGE_HIGH = 3,
+ CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_dig_ext_port_alg_e {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+ DIG_STA_DISCONNECT,
+ DIG_STA_CONNECT,
+ DIG_STA_BEFORE_CONNECT,
+ DIG_MULTISTA_DISCONNECT,
+ DIG_MULTISTA_CONNECT,
+ DIG_AP_DISCONNECT,
+ DIG_AP_CONNECT,
+ DIG_AP_ADD_STATION,
+ DIG_CONNECT_MAX
+};
+
extern const struct ieee80211_ops rtl_ops;
void rtl_fw_cb(const struct firmware *firmware, void *context);
+void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
void rtl_addr_delay(u32 addr);
void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
u32 mask, u32 data);
void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
bool rtl_btc_status_false(void);
+void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval);
#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 846a2e6e34d8..ec456f0d972e 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -578,6 +578,13 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
else
entry = (u8 *)(&ring->desc[ring->idx]);
+ if (rtlpriv->cfg->ops->get_available_desc &&
+ rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
+ "no available desc!\n");
+ return;
+ }
+
if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
return;
ring->idx = (ring->idx + 1) % ring->entries;
@@ -641,10 +648,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
ieee80211_tx_status_irqsafe(hw, skb);
- if ((ring->entries - skb_queue_len(&ring->queue))
- == 2) {
+ if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
"more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
prio, ring->idx,
skb_queue_len(&ring->queue));
@@ -666,7 +672,8 @@ tx_status_ok:
}
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
- u8 *entry, int rxring_idx, int desc_idx)
+ struct sk_buff *new_skb, u8 *entry,
+ int rxring_idx, int desc_idx)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -674,11 +681,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
u8 tmp_one = 1;
struct sk_buff *skb;
+ if (likely(new_skb)) {
+ skb = new_skb;
+ goto remap;
+ }
skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (!skb)
return 0;
- rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
+remap:
/* just set skb->cb to mapping addr for pci_unmap_single use */
*((dma_addr_t *)skb->cb) =
pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -686,6 +697,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
bufferaddress = *((dma_addr_t *)skb->cb);
if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
return 0;
+ rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) {
rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RX_PREPARE,
@@ -781,12 +793,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/*rx pkt */
struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
rtlpci->rx_ring[rxring_idx].idx];
+ struct sk_buff *new_skb;
if (rtlpriv->use_new_trx_flow) {
rx_remained_cnt =
rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
hw_queue);
- if (rx_remained_cnt < 1)
+ if (rx_remained_cnt == 0)
return;
} else { /* rx descriptor */
@@ -807,6 +820,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ /* get a new skb - if fail, old one will be reused */
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ pr_err("Allocation of new skb failed in %s\n",
+ __func__);
+ goto no_new;
+ }
if (rtlpriv->use_new_trx_flow) {
buffer_desc =
&rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -834,18 +854,18 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
else
skb_reserve(skb, stats.rx_drvinfo_size +
stats.rx_bufshift);
-
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"skb->end - skb->tail = %d, len is %d\n",
skb->end - skb->tail, len);
- break;
+ dev_kfree_skb_any(skb);
+ goto new_trx_end;
}
/* handle command packet here */
if (rtlpriv->cfg->ops->rx_command_packet &&
rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
dev_kfree_skb_any(skb);
- goto end;
+ goto new_trx_end;
}
/*
@@ -895,6 +915,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
} else {
dev_kfree_skb_any(skb);
}
+new_trx_end:
if (rtlpriv->use_new_trx_flow) {
rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
rtlpci->rx_ring[hw_queue].next_rx_rp %=
@@ -910,15 +931,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
}
-end:
+ skb = new_skb;
+no_new:
if (rtlpriv->use_new_trx_flow) {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
+ rtlpci->rx_ring[rxring_idx].idx);
} else {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+ rxring_idx,
rtlpci->rx_ring[rxring_idx].idx);
-
if (rtlpci->rx_ring[rxring_idx].idx ==
rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1307,7 +1329,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
rtlpci->rx_ring[rxring_idx].idx = 0;
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
@@ -1332,7 +1354,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
@@ -1672,6 +1694,15 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
}
}
+ if (rtlpriv->cfg->ops->get_available_desc &&
+ rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "get_available_desc fail\n");
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+ flags);
+ return skb->len;
+ }
+
if (ieee80211_is_data_qos(fc)) {
tid = rtl_get_tid(skb);
if (sta) {
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index 5e832306dba9..d4567d12e07e 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -325,4 +325,11 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
}
+static inline u16 calc_fifo_space(u16 rp, u16 wp)
+{
+ if (rp <= wp)
+ return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
+ return rp - wp - 1;
+}
+
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index 2aa34d9055f0..d930c1f78721 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -26,6 +26,7 @@
#include "../wifi.h"
#include "../base.h"
#include "../pci.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -341,38 +342,6 @@ static void dm_tx_pwr_track_set_pwr(struct ieee80211_hw *hw,
}
}
-static void rtl88e_dm_diginit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_dig = &rtlpriv->dm_digtable;
-
- dm_dig->dig_enable_flag = true;
- dm_dig->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
- dm_dig->pre_igvalue = 0;
- dm_dig->cur_sta_cstate = DIG_STA_DISCONNECT;
- dm_dig->presta_cstate = DIG_STA_DISCONNECT;
- dm_dig->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
- dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW;
- dm_dig->rssi_highthresh = DM_DIG_THRESH_HIGH;
- dm_dig->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- dm_dig->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- dm_dig->rx_gain_max = DM_DIG_MAX;
- dm_dig->rx_gain_min = DM_DIG_MIN;
- dm_dig->back_val = DM_DIG_BACKOFF_DEFAULT;
- dm_dig->back_range_max = DM_DIG_BACKOFF_MAX;
- dm_dig->back_range_min = DM_DIG_BACKOFF_MIN;
- dm_dig->pre_cck_cca_thres = 0xff;
- dm_dig->cur_cck_cca_thres = 0x83;
- dm_dig->forbidden_igi = DM_DIG_MIN;
- dm_dig->large_fa_hit = 0;
- dm_dig->recover_cnt = 0;
- dm_dig->dig_min_0 = 0x25;
- dm_dig->dig_min_1 = 0x25;
- dm_dig->media_connect_0 = false;
- dm_dig->media_connect_1 = false;
- rtlpriv->dm.dm_initialgain_enable = true;
-}
-
static u8 rtl88e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1796,9 +1765,10 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw)
void rtl88e_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
- rtl88e_dm_diginit(hw);
+ rtl_dm_diginit(hw, cur_igvalue);
rtl88e_dm_init_dynamic_txpower(hw);
rtl88e_dm_init_edca_turbo(hw);
rtl88e_dm_init_rate_adaptive_mask(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
index 64f1f3ea9807..071ccee69eae 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
@@ -186,28 +186,12 @@
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1e
-
-#define DM_DIG_MAX_AP 0x32
-#define DM_DIG_MIN_AP 0x20
-
#define DM_DIG_FA_UPPER 0x3e
#define DM_DIG_FA_LOWER 0x1e
#define DM_DIG_FA_TH0 0x200
#define DM_DIG_FA_TH1 0x300
#define DM_DIG_FA_TH2 0x400
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
#define RXPATHSELECTION_SS_TH_W 30
#define RXPATHSELECTION_DIFF_TH 18
@@ -262,14 +246,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
DIG_OP_TYPE_MAX
};
-enum tag_cck_packet_detection_threshold_type_definition {
- CCK_PD_STAGE_LOWRSSI = 0,
- CCK_PD_STAGE_HIGHRSSI = 1,
- CCK_FA_STAGE_LOW = 2,
- CCK_FA_STAGE_HIGH = 3,
- CCK_PD_STAGE_MAX = 4,
-};
-
enum dm_1r_cca_e {
CCA_1R = 0,
CCA_2R = 1,
@@ -288,23 +264,6 @@ enum dm_sw_ant_switch_e {
ANS_ANTENNA_MAX = 3,
};
-enum dm_dig_ext_port_alg_e {
- DIG_EXT_PORT_STAGE_0 = 0,
- DIG_EXT_PORT_STAGE_1 = 1,
- DIG_EXT_PORT_STAGE_2 = 2,
- DIG_EXT_PORT_STAGE_3 = 3,
- DIG_EXT_PORT_STAGE_MAX = 4,
-};
-
-enum dm_dig_connect_e {
- DIG_STA_DISCONNECT = 0,
- DIG_STA_CONNECT = 1,
- DIG_STA_BEFORE_CONNECT = 2,
- DIG_MULTISTA_DISCONNECT = 3,
- DIG_MULTISTA_CONNECT = 4,
- DIG_CONNECT_MAX
-};
-
enum pwr_track_control_method {
BBSWING,
TXAGC
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index df549c96adef..791efbe6b18c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl88ee_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATE6M:
- rate_idx = 0;
- break;
- case DESC92C_RATE9M:
- rate_idx = 1;
- break;
- case DESC92C_RATE12M:
- rate_idx = 2;
- break;
- case DESC92C_RATE18M:
- rate_idx = 3;
- break;
- case DESC92C_RATE24M:
- rate_idx = 4;
- break;
- case DESC92C_RATE36M:
- rate_idx = 5;
- break;
- case DESC92C_RATE48M:
- rate_idx = 6;
- break;
- case DESC92C_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC92C_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC92C_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC92C_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC92C_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC92C_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC92C_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC92C_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC92C_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC92C_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC92C_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC92C_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC92C_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC92C_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC92C_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC92C_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo_88e *p_drvinfo,
@@ -630,8 +472,8 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
* are use (RX_FLAG_HT)
* Notice: this is diff with windows define
*/
- rx_status->rate_idx = _rtl88ee_rate_mapping(hw,
- status->is_ht, status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ false, status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus == true) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index f6cb5aedfdd1..f5ee67cda73a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -32,6 +32,7 @@
#include "phy_common.h"
#include "../pci.h"
#include "../base.h"
+#include "../core.h"
#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
@@ -194,36 +195,6 @@ void dm_savepowerindex(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(dm_savepowerindex);
-static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
-
- dm_digtable->dig_enable_flag = true;
- dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
- dm_digtable->cur_igvalue = 0x20;
- dm_digtable->pre_igvalue = 0x0;
- dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
- dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
- dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
- dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
- dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
- dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- dm_digtable->rx_gain_max = DM_DIG_MAX;
- dm_digtable->rx_gain_min = DM_DIG_MIN;
- dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
- dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
- dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
- dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
- dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi;
-
- dm_digtable->forbidden_igi = DM_DIG_MIN;
- dm_digtable->large_fa_hit = 0;
- dm_digtable->recover_cnt = 0;
- dm_digtable->dig_dynamic_min = 0x25;
-}
-
static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -507,27 +478,27 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
if (dm_digtable->rssi_val_min > 100)
dm_digtable->rssi_val_min = 100;
- if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+ if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
if (dm_digtable->rssi_val_min <= 25)
dm_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_LowRssi;
+ CCK_PD_STAGE_LOWRSSI;
else
dm_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_HighRssi;
+ CCK_PD_STAGE_HIGHRSSI;
} else {
if (dm_digtable->rssi_val_min <= 20)
dm_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_LowRssi;
+ CCK_PD_STAGE_LOWRSSI;
else
dm_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_HighRssi;
+ CCK_PD_STAGE_HIGHRSSI;
}
} else {
dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
}
if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
- if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) ||
+ if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) ||
(dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_MAX))
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
else
@@ -1374,7 +1345,7 @@ void rtl92c_dm_init(struct ieee80211_hw *hw)
rtlpriv->dm.undec_sm_pwdb = -1;
rtlpriv->dm.undec_sm_cck = -1;
rtlpriv->dm.dm_initialgain_enable = true;
- rtl92c_dm_diginit(hw);
+ rtl_dm_diginit(hw, 0x20);
rtlpriv->dm.dm_flag |= HAL_DM_HIPWR_DISABLE;
rtl92c_dm_init_dynamic_txpower(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index 4f232a063636..4422e31fedd9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -47,25 +47,12 @@
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1e
-
#define DM_DIG_FA_UPPER 0x32
#define DM_DIG_FA_LOWER 0x20
#define DM_DIG_FA_TH0 0x20
#define DM_DIG_FA_TH1 0x100
#define DM_DIG_FA_TH2 0x200
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
#define RXPATHSELECTION_SS_TH_lOW 30
#define RXPATHSELECTION_DIFF_TH 18
@@ -123,14 +110,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
DIG_OP_TYPE_MAX
};
-enum tag_cck_packet_detection_threshold_type_definition {
- CCK_PD_STAGE_LowRssi = 0,
- CCK_PD_STAGE_HighRssi = 1,
- CCK_FA_STAGE_Low = 2,
- CCK_FA_STAGE_High = 3,
- CCK_PD_STAGE_MAX = 4,
-};
-
enum dm_1r_cca_e {
CCA_1R = 0,
CCA_2R = 1,
@@ -149,23 +128,6 @@ enum dm_sw_ant_switch_e {
ANS_ANTENNA_MAX = 3,
};
-enum dm_dig_ext_port_alg_e {
- DIG_EXT_PORT_STAGE_0 = 0,
- DIG_EXT_PORT_STAGE_1 = 1,
- DIG_EXT_PORT_STAGE_2 = 2,
- DIG_EXT_PORT_STAGE_3 = 3,
- DIG_EXT_PORT_STAGE_MAX = 4,
-};
-
-enum dm_dig_connect_e {
- DIG_STA_DISCONNECT = 0,
- DIG_STA_CONNECT = 1,
- DIG_STA_BEFORE_CONNECT = 2,
- DIG_MULTISTA_DISCONNECT = 3,
- DIG_MULTISTA_CONNECT = 4,
- DIG_CONNECT_MAX
-};
-
void rtl92c_dm_init(struct ieee80211_hw *hw);
void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index b64ae45dc674..e9f4281f5067 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -37,6 +37,7 @@
#define FW_8192C_POLLING_DELAY 5
#define FW_8192C_POLLING_TIMEOUT_COUNT 100
#define NORMAL_CHIP BIT(4)
+#define H2C_92C_KEEP_ALIVE_CTRL 48
#define IS_FW_HEADER_EXIST(_pfwhdr) \
((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 74f9c083b80d..09898cf2e07a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../base.h"
#include "../pci.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 9c5311c299fd..38ba707015f5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -42,25 +42,12 @@
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1e
-
#define DM_DIG_FA_UPPER 0x32
#define DM_DIG_FA_LOWER 0x20
#define DM_DIG_FA_TH0 0x20
#define DM_DIG_FA_TH1 0x100
#define DM_DIG_FA_TH2 0x200
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
#define RXPATHSELECTION_SS_TH_lOW 30
#define RXPATHSELECTION_DIFF_TH 18
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 5c646d5f7bb8..303b299376c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -544,8 +544,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u8 *)(&fw_current_inps));
}
break; }
- case HW_VAR_KEEP_ALIVE:
- break;
+ case HW_VAR_KEEP_ALIVE: {
+ u8 array[2];
+
+ array[0] = 0xff;
+ array[1] = *((u8 *)val);
+ rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, array);
+ break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case %d not processed\n", variable);
@@ -1156,47 +1161,35 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
- bt_msr &= 0xfc;
+ u8 mode = MSR_NOLINK;
- if (type == NL80211_IFTYPE_UNSPECIFIED ||
- type == NL80211_IFTYPE_STATION) {
- _rtl92ce_stop_tx_beacon(hw);
- _rtl92ce_enable_bcn_sub_func(hw);
- } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP ||
- type == NL80211_IFTYPE_MESH_POINT) {
- _rtl92ce_resume_tx_beacon(hw);
- _rtl92ce_disable_bcn_sub_func(hw);
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
- type);
- }
+ bt_msr &= 0xfc;
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
- bt_msr |= MSR_NOLINK;
- ledaction = LED_CTL_LINK;
+ mode = MSR_NOLINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
- bt_msr |= MSR_ADHOC;
+ mode = MSR_ADHOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
- bt_msr |= MSR_INFRA;
+ mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
- bt_msr |= MSR_AP;
+ mode = MSR_AP;
+ ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to AP!\n");
break;
case NL80211_IFTYPE_MESH_POINT:
- bt_msr |= MSR_ADHOC;
+ mode = MSR_ADHOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to Mesh Point!\n");
break;
@@ -1207,9 +1200,32 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
}
- rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ /* MSR_INFRA == Link in infrastructure network;
+ * MSR_ADHOC == Link in ad hoc network;
+ * Therefore, check link state is necessary.
+ *
+ * MSR_AP == AP mode; link state does not matter here.
+ */
+ if (mode != MSR_AP &&
+ rtlpriv->mac80211.link_state < MAC80211_LINKED) {
+ mode = MSR_NOLINK;
+ ledaction = LED_CTL_NO_LINK;
+ }
+ if (mode == MSR_NOLINK || mode == MSR_INFRA) {
+ _rtl92ce_stop_tx_beacon(hw);
+ _rtl92ce_enable_bcn_sub_func(hw);
+ } else if (mode == MSR_ADHOC || mode == MSR_AP) {
+ _rtl92ce_resume_tx_beacon(hw);
+ _rtl92ce_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
+ }
+ rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
+
rtlpriv->cfg->ops->led_control(hw, ledaction);
- if ((bt_msr & MSR_MASK) == MSR_AP)
+ if (mode == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
else
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
@@ -1833,7 +1849,6 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
u32 ratr_value;
u8 ratr_index = 0;
u8 nmode = mac->ht_enable;
- u8 mimo_ps = IEEE80211_SMPS_OFF;
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
@@ -1842,6 +1857,7 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
+ u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
ratr_value = sta->supp_rates[1] << 4;
@@ -1865,19 +1881,13 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
nmode = 1;
- if (mimo_ps == IEEE80211_SMPS_STATIC) {
- ratr_value &= 0x0007F005;
- } else {
- u32 ratr_mask;
-
- if (get_rf_type(rtlphy) == RF_1T2R ||
- get_rf_type(rtlphy) == RF_1T1R)
- ratr_mask = 0x000ff005;
- else
- ratr_mask = 0x0f0ff005;
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
- ratr_value &= ratr_mask;
- }
+ ratr_value &= ratr_mask;
break;
default:
if (rtlphy->rf_type == RF_1T2R)
@@ -1930,17 +1940,16 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
- u8 curshortgi_40mhz = curtxbw_40mhz &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
- 1 : 0;
+ u8 curtxbw_40mhz = (sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
u8 rate_mask[5];
u8 macid = 0;
- u8 mimo_ps = IEEE80211_SMPS_OFF;
sta_entry = (struct rtl_sta_info *) sta->drv_priv;
wirelessmode = sta_entry->wireless_mode;
@@ -1985,47 +1994,38 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
case WIRELESS_MODE_N_5G:
ratr_index = RATR_INX_WIRELESS_NGB;
- if (mimo_ps == IEEE80211_SMPS_STATIC) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x00070000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0007f000;
- else
- ratr_bitmap &= 0x0007f005;
+ if (rtlphy->rf_type == RF_1T2R ||
+ rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
} else {
- if (rtlphy->rf_type == RF_1T2R ||
- rtlphy->rf_type == RF_1T1R) {
- if (curtxbw_40mhz) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x000f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x000ff000;
- else
- ratr_bitmap &= 0x000ff015;
- } else {
- if (rssi_level == 1)
- ratr_bitmap &= 0x000f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x000ff000;
- else
- ratr_bitmap &= 0x000ff005;
- }
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff015;
} else {
- if (curtxbw_40mhz) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x0f0f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0f0ff000;
- else
- ratr_bitmap &= 0x0f0ff015;
- } else {
- if (rssi_level == 1)
- ratr_bitmap &= 0x0f0f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0f0ff000;
- else
- ratr_bitmap &= 0x0f0ff005;
- }
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff005;
}
}
@@ -2058,9 +2058,6 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
"Rate_index:%x, ratr_val:%x, %5phC\n",
ratr_index, ratr_bitmap, rate_mask);
rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
-
- if (macid != 0)
- sta_entry->ratr_index = ratr_index;
}
void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index bc5ca989b915..1ee5a6ae9960 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -518,11 +518,12 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- return false;
+ break;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
- if (skb_queue_len(&ring->queue) == 0) {
+ if (queue_id == BEACON_QUEUE ||
+ skb_queue_len(&ring->queue) == 0) {
queue_id++;
continue;
} else {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index dd5aa089126a..de6cb6c3a48c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -334,21 +334,21 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
static const struct pci_device_id rtl92ce_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index e88dcd0e0af1..84ddd4d07a1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -257,8 +257,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
pstats->recvsignalpower = rx_pwr_all;
/* (3)EVM of HT rate */
- if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
- pstats->rate <= DESC92_RATEMCS15)
+ if (pstats->is_ht && pstats->rate >= DESC_RATEMCS8 &&
+ pstats->rate <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -400,9 +400,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
* are use (RX_FLAG_HT)
* Notice: this is diff with windows define
*/
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- stats->is_ht, stats->rate,
- stats->isfirst_ampdu);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
rx_status->mactime = stats->timestamp_low;
if (phystatus) {
@@ -501,7 +500,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BW(pdesc, 0);
SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc,
- ((tcb_desc->rts_rate <= DESC92_RATE54M) ?
+ ((tcb_desc->rts_rate <= DESC_RATE54M) ?
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
@@ -624,7 +623,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SEQ(pdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 551321728ae0..fe4b699a12f5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
local_save_flags(flags);
local_irq_enable();
+ rtlhal->fw_ready = false;
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
err = _rtl92cu_init_mac(hw);
if (err) {
@@ -1013,6 +1014,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
err = 1;
goto exit;
}
+
+ rtlhal->fw_ready = true;
rtlhal->last_hmeboxnum = 0; /* h2c */
_rtl92cu_phy_param_tab_init(hw);
rtl92cu_phy_mac_config(hw);
@@ -1509,6 +1512,7 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
/* TODO: Modify later (Find the right parameters)
* NOTE: Fix test chip's bug (about contention windows's randomness) */
if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
+ (mac->opmode == NL80211_IFTYPE_MESH_POINT) ||
(mac->opmode == NL80211_IFTYPE_AP)) {
rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index c2d8ec6afcda..133e395b7401 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -880,8 +880,8 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
if (GET_RX_DESC_RX_MCS(pdesc) &&
- GET_RX_DESC_RX_MCS(pdesc) >= DESC92_RATEMCS8 &&
- GET_RX_DESC_RX_MCS(pdesc) <= DESC92_RATEMCS15)
+ GET_RX_DESC_RX_MCS(pdesc) >= DESC_RATEMCS8 &&
+ GET_RX_DESC_RX_MCS(pdesc) <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index e06bafee37f9..90a714c189a8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -257,20 +257,20 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
- .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
#define USB_VENDER_ID_REALTEK 0x0bda
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index f383d5f1fed5..cbead007171f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -325,6 +325,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
&& (GET_RX_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ stats->is_ht = (bool)GET_RX_DESC_RX_HT(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
if (GET_RX_DESC_CRC32(pdesc))
@@ -338,10 +339,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- (bool)GET_RX_DESC_RX_HT(pdesc),
- (u8)GET_RX_DESC_RX_MCS(pdesc),
- (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
@@ -393,6 +392,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
&& (GET_RX_DESC_FAGGR(rxdesc) == 1));
stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
+ stats.is_ht = (bool)GET_RX_DESC_RX_HT(rxdesc);
/* TODO: is center_freq changed when doing scan? */
/* TODO: Shall we add protection or just skip those two step? */
rx_status->freq = hw->conf.chandef.chan->center_freq;
@@ -406,10 +406,8 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
if (GET_RX_DESC_RX_HT(rxdesc))
rx_status->flag |= RX_FLAG_HT;
/* Data rate */
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- (bool)GET_RX_DESC_RX_HT(rxdesc),
- (u8)GET_RX_DESC_RX_MCS(rxdesc),
- (bool)GET_RX_DESC_PAGGR(rxdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats.is_ht,
+ false, stats.rate);
/* There is a phy status after this rx descriptor. */
if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
@@ -545,7 +543,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BW(txdesc, 0);
SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(txdesc,
- ((tcb_desc->rts_rate <= DESC92_RATE54M) ?
+ ((tcb_desc->rts_rate <= DESC_RATE54M) ?
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (mac->bw_40) {
@@ -644,7 +642,7 @@ void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
}
SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
SET_TX_DESC_OWN(pDesc, 1);
- SET_TX_DESC_TX_RATE(pDesc, DESC92_RATE1M);
+ SET_TX_DESC_TX_RATE(pDesc, DESC_RATE1M);
_rtl_tx_desc_checksum(pDesc);
}
@@ -660,7 +658,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
- SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SEQ(pdesc, 0);
SET_TX_DESC_LINIP(pdesc, 0);
SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 304c443b89b2..a1be5a68edfb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -29,6 +29,7 @@
#include "../wifi.h"
#include "../base.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -155,34 +156,6 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
};
-static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
-
- de_digtable->dig_enable_flag = true;
- de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
- de_digtable->cur_igvalue = 0x20;
- de_digtable->pre_igvalue = 0x0;
- de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
- de_digtable->presta_cstate = DIG_STA_DISCONNECT;
- de_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
- de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
- de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
- de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- de_digtable->rx_gain_max = DM_DIG_FA_UPPER;
- de_digtable->rx_gain_min = DM_DIG_FA_LOWER;
- de_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
- de_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
- de_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
- de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
- de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
- de_digtable->large_fa_hit = 0;
- de_digtable->recover_cnt = 0;
- de_digtable->forbidden_igi = DM_DIG_FA_LOWER;
-}
-
static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
@@ -1305,7 +1278,9 @@ void rtl92d_dm_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
- rtl92d_dm_diginit(hw);
+ rtl_dm_diginit(hw, 0x20);
+ rtlpriv->dm_digtable.rx_gain_max = DM_DIG_FA_UPPER;
+ rtlpriv->dm_digtable.rx_gain_min = DM_DIG_FA_LOWER;
rtl92d_dm_init_dynamic_txpower(hw);
rtl92d_dm_init_edca_turbo(hw);
rtl92d_dm_init_rate_adaptive_mask(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
index 3fea0c11c24a..f2d318ceeb28 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
@@ -42,25 +42,12 @@
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1c
-
#define DM_DIG_FA_UPPER 0x32
#define DM_DIG_FA_LOWER 0x20
#define DM_DIG_FA_TH0 0x100
#define DM_DIG_FA_TH1 0x400
#define DM_DIG_FA_TH2 0x600
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
#define RXPATHSELECTION_SS_TH_lOW 30
#define RXPATHSELECTION_DIFF_TH 18
@@ -108,14 +95,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
DIG_OP_TYPE_MAX
};
-enum tag_cck_packet_detection_threshold_type_definition {
- CCK_PD_STAGE_LOWRSSI = 0,
- CCK_PD_STAGE_HIGHRSSI = 1,
- CCK_FA_STAGE_LOW = 2,
- CCK_FA_STAGE_HIGH = 3,
- CCK_PD_STAGE_MAX = 4,
-};
-
enum dm_1r_cca {
CCA_1R = 0,
CCA_2R = 1,
@@ -134,23 +113,6 @@ enum dm_sw_ant_switch {
ANS_ANTENNA_MAX = 3,
};
-enum dm_dig_ext_port_alg {
- DIG_EXT_PORT_STAGE_0 = 0,
- DIG_EXT_PORT_STAGE_1 = 1,
- DIG_EXT_PORT_STAGE_2 = 2,
- DIG_EXT_PORT_STAGE_3 = 3,
- DIG_EXT_PORT_STAGE_MAX = 4,
-};
-
-enum dm_dig_connect {
- DIG_STA_DISCONNECT = 0,
- DIG_STA_CONNECT = 1,
- DIG_STA_BEFORE_CONNECT = 2,
- DIG_MULTISTA_DISCONNECT = 3,
- DIG_MULTISTA_CONNECT = 4,
- DIG_CONNECT_MAX
-};
-
void rtl92d_dm_init(struct ieee80211_hw *hw);
void rtl92d_dm_watchdog(struct ieee80211_hw *hw);
void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index 23177076b97f..62ef8209718f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -540,23 +540,6 @@ void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
return;
}
-void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 u1_h2c_set_pwrmode[3] = { 0 };
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
- SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
- SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
- SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
- ppsc->reg_max_lps_awakeintvl);
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode",
- u1_h2c_set_pwrmode, 3);
- rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
-}
-
static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
index a55a803a0b4d..1646e7c3d0f8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
@@ -136,7 +136,6 @@ int rtl92d_download_fw(struct ieee80211_hw *hw);
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
-void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 280c3da42993..01bcc2d218dc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -546,7 +546,7 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
txpktbuf_bndy = 246;
value8 = 0;
value32 = 0x80bf0d29;
- } else if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) {
+ } else {
maxPage = 127;
txpktbuf_bndy = 123;
value8 = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index a0aba088259a..b19d0398215f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -337,21 +337,21 @@ static struct rtl_hal_cfg rtl92de_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
static struct pci_device_id rtl92de_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 8efbcc7af250..1feaa629dd4f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -235,8 +235,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
- if (pdesc->rxht && pdesc->rxmcs >= DESC92_RATEMCS8 &&
- pdesc->rxmcs <= DESC92_RATEMCS15)
+ if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
+ pdesc->rxmcs <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -499,6 +499,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
&& (GET_RX_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
if (GET_RX_DESC_CRC32(pdesc))
@@ -512,10 +513,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- (bool)GET_RX_DESC_RXHT(pdesc),
- (u8)GET_RX_DESC_RXMCS(pdesc),
- (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
@@ -612,14 +611,14 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
}
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
- if (ptcb_desc->hw_rate < DESC92_RATE6M)
- ptcb_desc->hw_rate = DESC92_RATE6M;
+ if (ptcb_desc->hw_rate < DESC_RATE6M)
+ ptcb_desc->hw_rate = DESC_RATE6M;
SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
- ptcb_desc->hw_rate == DESC92_RATEMCS7)
+ ptcb_desc->hw_rate == DESC_RATEMCS7)
SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
@@ -635,13 +634,13 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
- if (ptcb_desc->rts_rate < DESC92_RATE6M)
- ptcb_desc->rts_rate = DESC92_RATE6M;
+ if (ptcb_desc->rts_rate < DESC_RATE6M)
+ ptcb_desc->rts_rate = DESC_RATE6M;
SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
SET_TX_DESC_RTS_BW(pdesc, 0);
SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
- DESC92_RATE54M) ?
+ DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
if (bw_40) {
@@ -756,9 +755,9 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
* The braces are needed no matter what checkpatch says
*/
if (rtlhal->current_bandtype == BAND_ON_5G) {
- SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE6M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE6M);
} else {
- SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
}
SET_TX_DESC_SEQ(pdesc, 0);
SET_TX_DESC_LINIP(pdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
index 77deedf79d1d..459f3d0efa2f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
@@ -26,6 +26,7 @@
#include "../wifi.h"
#include "../base.h"
#include "../pci.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -151,35 +152,6 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
};
-static void rtl92ee_dm_diginit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_dig = &rtlpriv->dm_digtable;
-
- dm_dig->cur_igvalue = rtl_get_bbreg(hw, DM_REG_IGI_A_11N,
- DM_BIT_IGI_11N);
- dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW;
- dm_dig->rssi_highthresh = DM_DIG_THRESH_HIGH;
- dm_dig->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- dm_dig->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- dm_dig->rx_gain_max = DM_DIG_MAX;
- dm_dig->rx_gain_min = DM_DIG_MIN;
- dm_dig->back_val = DM_DIG_BACKOFF_DEFAULT;
- dm_dig->back_range_max = DM_DIG_BACKOFF_MAX;
- dm_dig->back_range_min = DM_DIG_BACKOFF_MIN;
- dm_dig->pre_cck_cca_thres = 0xff;
- dm_dig->cur_cck_cca_thres = 0x83;
- dm_dig->forbidden_igi = DM_DIG_MIN;
- dm_dig->large_fa_hit = 0;
- dm_dig->recover_cnt = 0;
- dm_dig->dig_dynamic_min = DM_DIG_MIN;
- dm_dig->dig_dynamic_min_1 = DM_DIG_MIN;
- dm_dig->media_connect_0 = false;
- dm_dig->media_connect_1 = false;
- rtlpriv->dm.dm_initialgain_enable = true;
- dm_dig->bt30_cur_igi = 0x32;
-}
-
static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
@@ -298,7 +270,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct dig_t *dm_dig = &rtlpriv->dm_digtable;
- u8 dig_dynamic_min , dig_maxofmin;
+ u8 dig_min_0, dig_maxofmin;
bool bfirstconnect , bfirstdisconnect;
u8 dm_dig_max, dm_dig_min;
u8 current_igi = dm_dig->cur_igvalue;
@@ -308,7 +280,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
if (mac->act_scanning)
return;
- dig_dynamic_min = dm_dig->dig_dynamic_min;
+ dig_min_0 = dm_dig->dig_min_0;
bfirstconnect = (mac->link_state >= MAC80211_LINKED) &&
!dm_dig->media_connect_0;
bfirstdisconnect = (mac->link_state < MAC80211_LINKED) &&
@@ -329,19 +301,19 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
if (rtlpriv->dm.one_entry_only) {
offset = 0;
if (dm_dig->rssi_val_min - offset < dm_dig_min)
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
else if (dm_dig->rssi_val_min - offset >
dig_maxofmin)
- dig_dynamic_min = dig_maxofmin;
+ dig_min_0 = dig_maxofmin;
else
- dig_dynamic_min = dm_dig->rssi_val_min - offset;
+ dig_min_0 = dm_dig->rssi_val_min - offset;
} else {
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
}
} else {
dm_dig->rx_gain_max = dm_dig_max;
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
}
@@ -368,10 +340,10 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
} else {
if (dm_dig->large_fa_hit < 3) {
if ((dm_dig->forbidden_igi - 1) <
- dig_dynamic_min) {
- dm_dig->forbidden_igi = dig_dynamic_min;
+ dig_min_0) {
+ dm_dig->forbidden_igi = dig_min_0;
dm_dig->rx_gain_min =
- dig_dynamic_min;
+ dig_min_0;
} else {
dm_dig->forbidden_igi--;
dm_dig->rx_gain_min =
@@ -430,7 +402,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
rtl92ee_dm_write_dig(hw , current_igi);
dm_dig->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ?
true : false);
- dm_dig->dig_dynamic_min = dig_dynamic_min;
+ dm_dig->dig_min_0 = dig_min_0;
}
void rtl92ee_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 cur_thres)
@@ -1088,10 +1060,11 @@ static void rtl92ee_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
void rtl92ee_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 cur_igvalue = rtl_get_bbreg(hw, DM_REG_IGI_A_11N, DM_BIT_IGI_11N);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
- rtl92ee_dm_diginit(hw);
+ rtl_dm_diginit(hw, cur_igvalue);
rtl92ee_dm_init_rate_adaptive_mask(hw);
rtl92ee_dm_init_primary_cca_check(hw);
rtl92ee_dm_init_edca_turbo(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
index 881db7d6fef7..107d5a488fa8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
@@ -189,28 +189,12 @@
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1e
-
-#define DM_DIG_MAX_AP 0x32
-#define DM_DIG_MIN_AP 0x20
-
#define DM_DIG_FA_UPPER 0x3e
#define DM_DIG_FA_LOWER 0x1e
#define DM_DIG_FA_TH0 0x200
#define DM_DIG_FA_TH1 0x300
#define DM_DIG_FA_TH2 0x400
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
#define RXPATHSELECTION_SS_TH_LOW 30
#define RXPATHSELECTION_DIFF_TH 18
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
index 45c128b91f7f..c5d4b8013cde 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
@@ -666,7 +666,6 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
struct sk_buff *skb = NULL;
u32 totalpacketlen;
- bool rtstatus;
u8 u1rsvdpageloc[5] = { 0 };
bool b_dlok = false;
@@ -728,10 +727,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
memcpy((u8 *)skb_put(skb, totalpacketlen),
&reserved_page_packet, totalpacketlen);
- rtstatus = rtl_cmd_send_packet(hw, skb);
-
- if (rtstatus)
- b_dlok = true;
+ b_dlok = true;
if (b_dlok) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index 1a87edca2c3f..b461b3128da5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -85,29 +85,6 @@ static void _rtl92ee_enable_bcn_sub_func(struct ieee80211_hw *hw)
_rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(1));
}
-static void _rtl92ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
- unsigned long flags;
-
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
- while (skb_queue_len(&ring->queue)) {
- struct rtl_tx_buffer_desc *entry =
- &ring->buffer_desc[ring->idx];
- struct sk_buff *skb = __skb_dequeue(&ring->queue);
-
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
- kfree_skb(skb);
- ring->idx = (ring->idx + 1) % ring->entries;
- }
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-}
-
static void _rtl92ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
{
_rtl92ee_set_bcn_ctrl_reg(hw, BIT(1), 0);
@@ -403,9 +380,6 @@ static void _rtl92ee_download_rsvd_page(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_DWBCN0_CTRL + 2,
bcnvalid_reg | BIT(0));
- /* Return Beacon TCB */
- _rtl92ee_return_beacon_queue_skb(hw);
-
/* download rsvd page */
rtl92ee_set_fw_rsvdpagepkt(hw, false);
@@ -1163,6 +1137,139 @@ void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
+static bool _rtl8192ee_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
+{
+ u8 tmp;
+
+ /* write reg 0x350 Bit[26]=1. Enable debug port. */
+ tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
+ if (!(tmp & BIT(2))) {
+ rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3,
+ tmp | BIT(2));
+ mdelay(100); /* Suggested by DD Justin_tsai. */
+ }
+
+ /* read reg 0x350 Bit[25] if 1 : RX hang
+ * read reg 0x350 Bit[24] if 1 : TX hang
+ */
+ tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
+ if ((tmp & BIT(0)) || (tmp & BIT(1))) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CheckPcieDMAHang8192EE(): true!!\n");
+ return true;
+ }
+ return false;
+}
+
+static void _rtl8192ee_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
+ bool mac_power_on)
+{
+ u8 tmp;
+ bool release_mac_rx_pause;
+ u8 backup_pcie_dma_pause;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "ResetPcieInterfaceDMA8192EE()\n");
+
+ /* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
+ * released by SD1 Alan.
+ */
+
+ /* 1. disable register write lock
+ * write 0x1C bit[1:0] = 2'h0
+ * write 0xCC bit[2] = 1'b1
+ */
+ tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
+ tmp &= ~(BIT(1) | BIT(0));
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp);
+ tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
+ tmp |= BIT(2);
+ rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
+
+ /* 2. Check and pause TRX DMA
+ * write 0x284 bit[18] = 1'b1
+ * write 0x301 = 0xFF
+ */
+ tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+ if (tmp & BIT(2)) {
+ /* Already pause before the function for another reason. */
+ release_mac_rx_pause = false;
+ } else {
+ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp | BIT(2)));
+ release_mac_rx_pause = true;
+ }
+
+ backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 1);
+ if (backup_pcie_dma_pause != 0xFF)
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFF);
+
+ if (mac_power_on) {
+ /* 3. reset TRX function
+ * write 0x100 = 0x00
+ */
+ rtl_write_byte(rtlpriv, REG_CR, 0);
+ }
+
+ /* 4. Reset PCIe DMA
+ * write 0x003 bit[0] = 0
+ */
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ tmp &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp);
+
+ /* 5. Enable PCIe DMA
+ * write 0x003 bit[0] = 1
+ */
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ tmp |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp);
+
+ if (mac_power_on) {
+ /* 6. enable TRX function
+ * write 0x100 = 0xFF
+ */
+ rtl_write_byte(rtlpriv, REG_CR, 0xFF);
+
+ /* We should init LLT & RQPN and
+ * prepare Tx/Rx descrptor address later
+ * because MAC function is reset.
+ */
+ }
+
+ /* 7. Restore PCIe autoload down bit
+ * write 0xF8 bit[17] = 1'b1
+ */
+ tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2);
+ tmp |= BIT(1);
+ rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2, tmp);
+
+ /* In MAC power on state, BB and RF maybe in ON state,
+ * if we release TRx DMA here
+ * it will cause packets to be started to Tx/Rx,
+ * so we release Tx/Rx DMA later.
+ */
+ if (!mac_power_on) {
+ /* 8. release TRX DMA
+ * write 0x284 bit[18] = 1'b0
+ * write 0x301 = 0x00
+ */
+ if (release_mac_rx_pause) {
+ tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL,
+ (tmp & (~BIT(2))));
+ }
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1,
+ backup_pcie_dma_pause);
+ }
+
+ /* 9. lock system register
+ * write 0xCC bit[2] = 1'b0
+ */
+ tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
+ tmp &= ~(BIT(2));
+ rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
+}
+
int rtl92ee_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1188,6 +1295,13 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_92E;
}
+ if (_rtl8192ee_check_pcie_dma_hang(rtlpriv)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "92ee dma hang!\n");
+ _rtl8192ee_reset_pcie_interface_dma(rtlpriv,
+ rtlhal->mac_func_enable);
+ rtlhal->mac_func_enable = false;
+ }
+
rtstatus = _rtl92ee_init_mac(hw);
rtl_write_byte(rtlpriv, 0x577, 0x03);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
index 3f2a9596e7cd..1eaa1fab550d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
@@ -77,9 +77,11 @@
#define REG_HIMRE 0x00B8
#define REG_HISRE 0x00BC
+#define REG_PMC_DBG_CTRL2 0x00CC
#define REG_EFUSE_ACCESS 0x00CF
#define REG_HPON_FSM 0x00EC
#define REG_SYS_CFG1 0x00F0
+#define REG_MAC_PHY_CTRL_NORMAL 0x00F8
#define REG_SYS_CFG2 0x00FC
#define REG_CR 0x0100
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
index 9b5a7d5be121..c31c6bfb536d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
@@ -113,8 +113,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
RCR_HTC_LOC_CTRL |
RCR_AMF |
RCR_ACF |
- RCR_ADF |
- RCR_AICV |
RCR_ACRC32 |
RCR_AB |
RCR_AM |
@@ -241,6 +239,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = {
.set_desc = rtl92ee_set_desc,
.get_desc = rtl92ee_get_desc,
.is_tx_desc_closed = rtl92ee_is_tx_desc_closed,
+ .get_available_desc = rtl92ee_get_available_desc,
.tx_polling = rtl92ee_tx_polling,
.enable_hw_sec = rtl92ee_enable_hw_security_config,
.set_key = rtl92ee_set_key,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
index 2fcbef1d029f..d39ee67f6113 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl92ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl92ee_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATE6M:
- rate_idx = 0;
- break;
- case DESC92C_RATE9M:
- rate_idx = 1;
- break;
- case DESC92C_RATE12M:
- rate_idx = 2;
- break;
- case DESC92C_RATE18M:
- rate_idx = 3;
- break;
- case DESC92C_RATE24M:
- rate_idx = 4;
- break;
- case DESC92C_RATE36M:
- rate_idx = 5;
- break;
- case DESC92C_RATE48M:
- rate_idx = 6;
- break;
- case DESC92C_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC92C_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC92C_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC92C_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC92C_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC92C_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC92C_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC92C_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC92C_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC92C_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC92C_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC92C_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC92C_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC92C_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC92C_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC92C_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo *p_drvinfo,
@@ -345,8 +187,8 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->recvsignalpower = rx_pwr_all;
/* (3)EVM of HT rate */
- if (pstatus->rate >= DESC92C_RATEMCS8 &&
- pstatus->rate <= DESC92C_RATEMCS15)
+ if (pstatus->rate >= DESC_RATEMCS8 &&
+ pstatus->rate <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -512,6 +354,10 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0)
+ status->packet_report_type = NORMAL_RX;
+ else
+ status->packet_report_type = C2H_PACKET;
status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
@@ -576,9 +422,8 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
* are use (RX_FLAG_HT)
* Notice: this is diff with windows define
*/
- rx_status->rate_idx = _rtl92ee_rate_mapping(hw,
- status->is_ht,
- status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ false, status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus) {
@@ -654,14 +499,7 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
if (!start_rx)
return 0;
- if ((last_read_point > (RX_DESC_NUM_92E / 2)) &&
- (read_point <= (RX_DESC_NUM_92E / 2))) {
- remind_cnt = RX_DESC_NUM_92E - write_point;
- } else {
- remind_cnt = (read_point >= write_point) ?
- (read_point - write_point) :
- (RX_DESC_NUM_92E - write_point + read_point);
- }
+ remind_cnt = calc_fifo_space(read_point, write_point);
if (remind_cnt == 0)
return 0;
@@ -710,7 +548,7 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
return desc_address;
}
-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
+u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -723,12 +561,11 @@ void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
current_tx_read_point = (u16)((tmp_4byte >> 16) & 0x0fff);
current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
- point_diff = ((current_tx_read_point > current_tx_write_point) ?
- (current_tx_read_point - current_tx_write_point) :
- (TX_DESC_NUM_92E - current_tx_write_point +
- current_tx_read_point));
+ point_diff = calc_fifo_space(current_tx_read_point,
+ current_tx_write_point);
rtlpci->tx_ring[q_idx].avl_desc = point_diff;
+ return point_diff;
}
void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
@@ -901,13 +738,13 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
} else {
if (rtlpriv->ra.is_special_data) {
ptcb_desc->use_driver_rate = true;
- SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE11M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE11M);
} else {
ptcb_desc->use_driver_rate = false;
}
}
- if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
+ if (ptcb_desc->hw_rate > DESC_RATEMCS0)
short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
else
short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
@@ -927,7 +764,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc,
- ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
+ ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
@@ -1038,7 +875,7 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, txdesc_len);
- SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SEQ(pdesc, 0);
@@ -1207,8 +1044,7 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
static u8 stop_report_cnt;
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
- /*checking Read/Write Point each interrupt wastes CPU */
- if (stop_report_cnt > 15 || !rtlpriv->link_info.busytraffic) {
+ {
u16 point_diff = 0;
u16 cur_tx_rp, cur_tx_wp;
u32 tmpu32 = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
index 6f9be1c7515c..8f78ac9e6040 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
@@ -542,6 +542,8 @@
LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
#define GET_RX_DESC_RX_IS_QOS(__pdesc) \
LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
+#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
#define GET_RX_DESC_RXMCS(__pdesc) \
LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
@@ -591,10 +593,10 @@ do { \
} while (0)
#define RTL92EE_RX_HAL_IS_CCK_RATE(rxmcs)\
- (rxmcs == DESC92C_RATE1M ||\
- rxmcs == DESC92C_RATE2M ||\
- rxmcs == DESC92C_RATE5_5M ||\
- rxmcs == DESC92C_RATE11M)
+ (rxmcs == DESC_RATE1M ||\
+ rxmcs == DESC_RATE2M ||\
+ rxmcs == DESC_RATE5_5M ||\
+ rxmcs == DESC_RATE11M)
#define IS_LITTLE_ENDIAN 1
@@ -829,7 +831,7 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
u8 queue_index);
u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw,
u8 queue_index);
-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
+u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
u8 *tx_bd_desc, u8 *desc, u8 queue_index,
struct sk_buff *skb, dma_addr_t addr);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index 6e7a70b43949..ef87c09b77d0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -450,10 +450,10 @@
SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
- (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE2M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE5_5M ||\
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE11M)
+ (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE1M || \
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE2M || \
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE5_5M ||\
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE11M)
enum rf_optype {
RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index b3a2d5ec59e6..575980b88658 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -29,6 +29,7 @@
#include "../wifi.h"
#include "../base.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -469,7 +470,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
if (digtable->backoff_enable_flag)
rtl92s_backoff_enable_flag(hw);
else
- digtable->back_val = DM_DIG_BACKOFF;
+ digtable->back_val = DM_DIG_BACKOFF_MAX;
if ((digtable->rssi_val + 10 - digtable->back_val) >
digtable->rx_gain_max)
@@ -503,7 +504,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
- digtable->back_val = DM_DIG_BACKOFF;
+ digtable->back_val = DM_DIG_BACKOFF_MAX;
digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0];
digtable->pre_igvalue = 0;
return;
@@ -691,7 +692,7 @@ static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
/* for dig debug rssi value */
digtable->rssi_val = 50;
- digtable->back_val = DM_DIG_BACKOFF;
+ digtable->back_val = DM_DIG_BACKOFF_MAX;
digtable->rx_gain_max = DM_DIG_MAX;
digtable->rx_gain_min = DM_DIG_MIN;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
index 2e9052c8fe4b..de6ac796c74d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
@@ -54,24 +54,6 @@ enum dm_dig_sta {
DM_STA_DIG_MAX
};
-enum dm_dig_connect {
- DIG_STA_DISCONNECT = 0,
- DIG_STA_CONNECT = 1,
- DIG_STA_BEFORE_CONNECT = 2,
- DIG_AP_DISCONNECT = 3,
- DIG_AP_CONNECT = 4,
- DIG_AP_ADD_STATION = 5,
- DIG_CONNECT_MAX
-};
-
-enum dm_dig_ext_port_alg {
- DIG_EXT_PORT_STAGE_0 = 0,
- DIG_EXT_PORT_STAGE_1 = 1,
- DIG_EXT_PORT_STAGE_2 = 2,
- DIG_EXT_PORT_STAGE_3 = 3,
- DIG_EXT_PORT_STAGE_MAX = 4,
-};
-
enum dm_ratr_sta {
DM_RATR_STA_HIGH = 0,
DM_RATR_STA_MIDDLEHIGH = 1,
@@ -99,22 +81,12 @@ enum dm_ratr_sta {
#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-#define DM_FALSEALARM_THRESH_LOW 40
-#define DM_FALSEALARM_THRESH_HIGH 1000
#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
#define DM_DIG_HIGH_PWR_THRESH_LOW 70
-#define DM_DIG_BACKOFF 12
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1c
#define DM_DIG_MIN_Netcore 0x12
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
void rtl92s_dm_watchdog(struct ieee80211_hw *hw);
void rtl92s_dm_init(struct ieee80211_hw *hw);
void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw);
#endif
-
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index fb003868bdef..e1fd27c888bf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -383,21 +383,21 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
static struct pci_device_id rtl92se_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 672fd3b02835..125b29bd2f93 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -191,8 +191,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
- if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
- pstats->rate <= DESC92_RATEMCS15)
+ if (pstats->is_ht && pstats->rate >= DESC_RATEMCS8 &&
+ pstats->rate <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -264,7 +264,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct rx_fwinfo *p_drvinfo;
u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
struct ieee80211_hdr *hdr;
- bool first_ampdu = false;
stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
@@ -319,8 +318,8 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->flag |= RX_FLAG_DECRYPTED;
}
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- stats->is_ht, stats->rate, first_ampdu);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
rx_status->mactime = stats->timestamp_low;
if (phystatus) {
@@ -394,14 +393,14 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
- DESC92_RATEMCS0) ? 1 : 0));
+ DESC_RATEMCS0) ? 1 : 0));
if (rtlhal->version == VERSION_8192S_ACUT) {
- if (ptcb_desc->hw_rate == DESC92_RATE1M ||
- ptcb_desc->hw_rate == DESC92_RATE2M ||
- ptcb_desc->hw_rate == DESC92_RATE5_5M ||
- ptcb_desc->hw_rate == DESC92_RATE11M) {
- ptcb_desc->hw_rate = DESC92_RATE12M;
+ if (ptcb_desc->hw_rate == DESC_RATE1M ||
+ ptcb_desc->hw_rate == DESC_RATE2M ||
+ ptcb_desc->hw_rate == DESC_RATE5_5M ||
+ ptcb_desc->hw_rate == DESC_RATE11M) {
+ ptcb_desc->hw_rate = DESC_RATE12M;
}
}
@@ -430,7 +429,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
- DESC92_RATE54M) ?
+ DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0)
: (ptcb_desc->rts_use_shortgi ? 1 : 0)));
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
index a0e86922780a..4c1c96c96a5a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
@@ -26,6 +26,7 @@
#include "../wifi.h"
#include "../base.h"
#include "../pci.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -146,31 +147,6 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
};
-static void rtl8723e_dm_diginit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
-
- dm_digtable->dig_enable_flag = true;
- dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
- dm_digtable->cur_igvalue = 0x20;
- dm_digtable->pre_igvalue = 0x0;
- dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
- dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
- dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
- dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
- dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
- dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- dm_digtable->rx_gain_max = DM_DIG_MAX;
- dm_digtable->rx_gain_min = DM_DIG_MIN;
- dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
- dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
- dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
- dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
- dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
-}
-
static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -395,30 +371,30 @@ static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
dm_digtable->rssi_val_min = rtl8723e_dm_initial_gain_min_pwdb(hw);
- if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+ if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
if (dm_digtable->rssi_val_min <= 25)
dm_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_LowRssi;
+ CCK_PD_STAGE_LOWRSSI;
else
dm_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_HighRssi;
+ CCK_PD_STAGE_HIGHRSSI;
} else {
if (dm_digtable->rssi_val_min <= 20)
dm_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_LowRssi;
+ CCK_PD_STAGE_LOWRSSI;
else
dm_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_HighRssi;
+ CCK_PD_STAGE_HIGHRSSI;
}
} else {
dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
}
if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
- if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+ if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
dm_digtable->cur_cck_fa_state =
- CCK_FA_STAGE_High;
+ CCK_FA_STAGE_HIGH;
else
dm_digtable->cur_cck_fa_state =
CCK_FA_STAGE_LOW;
@@ -818,7 +794,7 @@ void rtl8723e_dm_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
- rtl8723e_dm_diginit(hw);
+ rtl_dm_diginit(hw, 0x20);
rtl8723_dm_init_dynamic_txpower(hw);
rtl8723_dm_init_edca_turbo(hw);
rtl8723e_dm_init_rate_adaptive_mask(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
index 6fa0feb05f6d..57111052e86b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
@@ -42,25 +42,12 @@
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1e
-
#define DM_DIG_FA_UPPER 0x32
#define DM_DIG_FA_LOWER 0x20
#define DM_DIG_FA_TH0 0x20
#define DM_DIG_FA_TH1 0x100
#define DM_DIG_FA_TH2 0x200
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
#define RXPATHSELECTION_SS_TH_LOW 30
#define RXPATHSELECTION_DIFF_TH 18
@@ -108,14 +95,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
DIG_OP_TYPE_MAX
};
-enum tag_cck_packet_detection_threshold_type_definition {
- CCK_PD_STAGE_LowRssi = 0,
- CCK_PD_STAGE_HighRssi = 1,
- CCK_FA_STAGE_LOW = 2,
- CCK_FA_STAGE_High = 3,
- CCK_PD_STAGE_MAX = 4,
-};
-
enum dm_1r_cca_e {
CCA_1R = 0,
CCA_2R = 1,
@@ -134,23 +113,6 @@ enum dm_sw_ant_switch_e {
ANS_ANTENNA_MAX = 3,
};
-enum dm_dig_ext_port_alg_e {
- DIG_EXT_PORT_STAGE_0 = 0,
- DIG_EXT_PORT_STAGE_1 = 1,
- DIG_EXT_PORT_STAGE_2 = 2,
- DIG_EXT_PORT_STAGE_3 = 3,
- DIG_EXT_PORT_STAGE_MAX = 4,
-};
-
-enum dm_dig_connect_e {
- DIG_STA_DISCONNECT = 0,
- DIG_STA_CONNECT = 1,
- DIG_STA_BEFORE_CONNECT = 2,
- DIG_MULTISTA_DISCONNECT = 3,
- DIG_MULTISTA_CONNECT = 4,
- DIG_CONNECT_MAX
-};
-
#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index d372ccaf3465..2f7c144d7980 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -45,164 +45,6 @@ static u8 _rtl8723e_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8723e_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATE6M:
- rate_idx = 0;
- break;
- case DESC92C_RATE9M:
- rate_idx = 1;
- break;
- case DESC92C_RATE12M:
- rate_idx = 2;
- break;
- case DESC92C_RATE18M:
- rate_idx = 3;
- break;
- case DESC92C_RATE24M:
- rate_idx = 4;
- break;
- case DESC92C_RATE36M:
- rate_idx = 5;
- break;
- case DESC92C_RATE48M:
- rate_idx = 6;
- break;
- case DESC92C_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC92C_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC92C_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC92C_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC92C_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC92C_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC92C_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC92C_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC92C_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC92C_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC92C_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC92C_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC92C_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC92C_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC92C_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC92C_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo_8723e *p_drvinfo,
@@ -503,8 +345,8 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw,
* are use (RX_FLAG_HT)
* Notice: this is diff with windows define
*/
- rx_status->rate_idx = _rtl8723e_rate_mapping(hw,
- status->is_ht, status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ false, status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus == true) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
index dd7eb4371f49..2367e8f47a5b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
@@ -26,6 +26,7 @@
#include "../wifi.h"
#include "../base.h"
#include "../pci.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -211,35 +212,6 @@ void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
(pwr_val << 16) | (pwr_val << 24);
}
-static void rtl8723be_dm_diginit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
-
- dm_digtable->dig_enable_flag = true;
- dm_digtable->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
- dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
- dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
- dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- dm_digtable->rx_gain_max = DM_DIG_MAX;
- dm_digtable->rx_gain_min = DM_DIG_MIN;
- dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
- dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
- dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
- dm_digtable->pre_cck_cca_thres = 0xff;
- dm_digtable->cur_cck_cca_thres = 0x83;
- dm_digtable->forbidden_igi = DM_DIG_MIN;
- dm_digtable->large_fa_hit = 0;
- dm_digtable->recover_cnt = 0;
- dm_digtable->dig_dynamic_min = DM_DIG_MIN;
- dm_digtable->dig_dynamic_min_1 = DM_DIG_MIN;
- dm_digtable->media_connect_0 = false;
- dm_digtable->media_connect_1 = false;
- rtlpriv->dm.dm_initialgain_enable = true;
- dm_digtable->bt30_cur_igi = 0x32;
-}
-
void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -293,9 +265,10 @@ static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
void rtl8723be_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
- rtl8723be_dm_diginit(hw);
+ rtl_dm_diginit(hw, cur_igvalue);
rtl8723be_dm_init_rate_adaptive_mask(hw);
rtl8723_dm_init_edca_turbo(hw);
rtl8723_dm_init_dynamic_bb_powersaving(hw);
@@ -424,7 +397,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u8 dig_dynamic_min, dig_maxofmin;
+ u8 dig_min_0, dig_maxofmin;
bool bfirstconnect, bfirstdisconnect;
u8 dm_dig_max, dm_dig_min;
u8 current_igi = dm_digtable->cur_igvalue;
@@ -434,7 +407,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
if (mac->act_scanning)
return;
- dig_dynamic_min = dm_digtable->dig_dynamic_min;
+ dig_min_0 = dm_digtable->dig_min_0;
bfirstconnect = (mac->link_state >= MAC80211_LINKED) &&
!dm_digtable->media_connect_0;
bfirstdisconnect = (mac->link_state < MAC80211_LINKED) &&
@@ -456,20 +429,20 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
if (rtlpriv->dm.one_entry_only) {
offset = 12;
if (dm_digtable->rssi_val_min - offset < dm_dig_min)
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
else if (dm_digtable->rssi_val_min - offset >
dig_maxofmin)
- dig_dynamic_min = dig_maxofmin;
+ dig_min_0 = dig_maxofmin;
else
- dig_dynamic_min =
+ dig_min_0 =
dm_digtable->rssi_val_min - offset;
} else {
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
}
} else {
dm_digtable->rx_gain_max = dm_dig_max;
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
}
@@ -497,11 +470,11 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
} else {
if (dm_digtable->large_fa_hit < 3) {
if ((dm_digtable->forbidden_igi - 1) <
- dig_dynamic_min) {
+ dig_min_0) {
dm_digtable->forbidden_igi =
- dig_dynamic_min;
+ dig_min_0;
dm_digtable->rx_gain_min =
- dig_dynamic_min;
+ dig_min_0;
} else {
dm_digtable->forbidden_igi--;
dm_digtable->rx_gain_min =
@@ -552,7 +525,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
rtl8723be_dm_write_dig(hw, current_igi);
dm_digtable->media_connect_0 =
((mac->link_state >= MAC80211_LINKED) ? true : false);
- dm_digtable->dig_dynamic_min = dig_dynamic_min;
+ dm_digtable->dig_min_0 = dig_min_0;
}
static void rtl8723be_dm_false_alarm_counter_statistics(
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
index e4c0e8ae6f47..f752a2cad63d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
@@ -180,28 +180,12 @@
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1e
-
-#define DM_DIG_MAX_AP 0x32
-#define DM_DIG_MIN_AP 0x20
-
#define DM_DIG_FA_UPPER 0x3e
#define DM_DIG_FA_LOWER 0x1e
#define DM_DIG_FA_TH0 0x200
#define DM_DIG_FA_TH1 0x300
#define DM_DIG_FA_TH2 0x400
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
#define RXPATHSELECTION_SS_TH_LOW 30
#define RXPATHSELECTION_DIFF_TH 18
@@ -252,23 +236,6 @@ enum dm_sw_ant_switch_e {
ANS_ANTENNA_MAX = 3,
};
-enum dm_dig_ext_port_alg_e {
- DIG_EXT_PORT_STAGE_0 = 0,
- DIG_EXT_PORT_STAGE_1 = 1,
- DIG_EXT_PORT_STAGE_2 = 2,
- DIG_EXT_PORT_STAGE_3 = 3,
- DIG_EXT_PORT_STAGE_MAX = 4,
-};
-
-enum dm_dig_connect_e {
- DIG_STA_DISCONNECT = 0,
- DIG_STA_CONNECT = 1,
- DIG_STA_BEFORE_CONNECT = 2,
- DIG_MULTISTA_DISCONNECT = 3,
- DIG_MULTISTA_CONNECT = 4,
- DIG_CONNECT_MAX
-};
-
enum pwr_track_control_method {
BBSWING,
TXAGC
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
index 20dcc25c506c..b7b73cbe346d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
@@ -874,31 +874,6 @@ void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
-void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 txpwr_level;
- long txpwr_dbm;
-
- txpwr_level = rtlphy->cur_cck_txpwridx;
- txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
- txpwr_level);
- txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
- txpwr_dbm)
- txpwr_dbm =
- rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
- txpwr_level);
- txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
- txpwr_level) > txpwr_dbm)
- txpwr_dbm =
- rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
- txpwr_level);
- *powerlevel = txpwr_dbm;
-}
-
static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
u8 rate)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
index 6339738a0e33..9021d4745ab7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
@@ -114,8 +114,6 @@ bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw);
bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw);
bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw);
void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw,
u8 channel);
void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 223eb42992bd..1017f02d7bf7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -387,12 +387,14 @@ module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
bool, 0444);
-MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
-MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
-MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
-MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
+MODULE_PARM_DESC(disable_watchdog,
+ "Set to 1 to disable the watchdog (default 0)\n");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
index d6a1c70cb657..338ec9a9d09b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATE6M:
- rate_idx = 0;
- break;
- case DESC92C_RATE9M:
- rate_idx = 1;
- break;
- case DESC92C_RATE12M:
- rate_idx = 2;
- break;
- case DESC92C_RATE18M:
- rate_idx = 3;
- break;
- case DESC92C_RATE24M:
- rate_idx = 4;
- break;
- case DESC92C_RATE36M:
- rate_idx = 5;
- break;
- case DESC92C_RATE48M:
- rate_idx = 6;
- break;
- case DESC92C_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC92C_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC92C_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC92C_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC92C_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC92C_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC92C_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC92C_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC92C_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC92C_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC92C_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC92C_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC92C_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC92C_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC92C_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC92C_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo_8723be *p_drvinfo,
@@ -558,8 +400,8 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
* supported rates or MCS index if HT rates
* are use (RX_FLAG_HT)
*/
- rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht,
- status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ false, status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
index a730985ae81d..ee7c208bd070 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
@@ -373,60 +373,6 @@ enum rtl_desc_qsel {
QSLT_CMD = 0x13,
};
-enum rtl_desc8821ae_rate {
- DESC_RATE1M = 0x00,
- DESC_RATE2M = 0x01,
- DESC_RATE5_5M = 0x02,
- DESC_RATE11M = 0x03,
-
- DESC_RATE6M = 0x04,
- DESC_RATE9M = 0x05,
- DESC_RATE12M = 0x06,
- DESC_RATE18M = 0x07,
- DESC_RATE24M = 0x08,
- DESC_RATE36M = 0x09,
- DESC_RATE48M = 0x0a,
- DESC_RATE54M = 0x0b,
-
- DESC_RATEMCS0 = 0x0c,
- DESC_RATEMCS1 = 0x0d,
- DESC_RATEMCS2 = 0x0e,
- DESC_RATEMCS3 = 0x0f,
- DESC_RATEMCS4 = 0x10,
- DESC_RATEMCS5 = 0x11,
- DESC_RATEMCS6 = 0x12,
- DESC_RATEMCS7 = 0x13,
- DESC_RATEMCS8 = 0x14,
- DESC_RATEMCS9 = 0x15,
- DESC_RATEMCS10 = 0x16,
- DESC_RATEMCS11 = 0x17,
- DESC_RATEMCS12 = 0x18,
- DESC_RATEMCS13 = 0x19,
- DESC_RATEMCS14 = 0x1a,
- DESC_RATEMCS15 = 0x1b,
-
- DESC_RATEVHT1SS_MCS0 = 0x2c,
- DESC_RATEVHT1SS_MCS1 = 0x2d,
- DESC_RATEVHT1SS_MCS2 = 0x2e,
- DESC_RATEVHT1SS_MCS3 = 0x2f,
- DESC_RATEVHT1SS_MCS4 = 0x30,
- DESC_RATEVHT1SS_MCS5 = 0x31,
- DESC_RATEVHT1SS_MCS6 = 0x32,
- DESC_RATEVHT1SS_MCS7 = 0x33,
- DESC_RATEVHT1SS_MCS8 = 0x34,
- DESC_RATEVHT1SS_MCS9 = 0x35,
- DESC_RATEVHT2SS_MCS0 = 0x36,
- DESC_RATEVHT2SS_MCS1 = 0x37,
- DESC_RATEVHT2SS_MCS2 = 0x38,
- DESC_RATEVHT2SS_MCS3 = 0x39,
- DESC_RATEVHT2SS_MCS4 = 0x3a,
- DESC_RATEVHT2SS_MCS5 = 0x3b,
- DESC_RATEVHT2SS_MCS6 = 0x3c,
- DESC_RATEVHT2SS_MCS7 = 0x3d,
- DESC_RATEVHT2SS_MCS8 = 0x3e,
- DESC_RATEVHT2SS_MCS9 = 0x3f,
-};
-
enum rx_packet_type {
NORMAL_RX,
TX_REPORT1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
index ba30b0d250fd..0b2082dc48f1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
@@ -26,6 +26,7 @@
#include "../wifi.h"
#include "../base.h"
#include "../pci.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -519,34 +520,6 @@ void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(
}
}
-static void rtl8821ae_dm_diginit(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
-
- dm_digtable->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
- dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
- dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
- dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
- dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
- dm_digtable->rx_gain_max = DM_DIG_MAX;
- dm_digtable->rx_gain_min = DM_DIG_MIN;
- dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
- dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
- dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
- dm_digtable->pre_cck_cca_thres = 0xff;
- dm_digtable->cur_cck_cca_thres = 0x83;
- dm_digtable->forbidden_igi = DM_DIG_MIN;
- dm_digtable->large_fa_hit = 0;
- dm_digtable->recover_cnt = 0;
- dm_digtable->dig_dynamic_min = DM_DIG_MIN;
- dm_digtable->dig_dynamic_min_1 = DM_DIG_MIN;
- dm_digtable->media_connect_0 = false;
- dm_digtable->media_connect_1 = false;
- rtlpriv->dm.dm_initialgain_enable = true;
- dm_digtable->bt30_cur_igi = 0x32;
-}
-
void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -606,6 +579,7 @@ void rtl8821ae_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
spin_lock(&rtlpriv->locks.iqk_lock);
rtlphy->lck_inprogress = false;
@@ -613,7 +587,7 @@ void rtl8821ae_dm_init(struct ieee80211_hw *hw)
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
rtl8821ae_dm_common_info_self_init(hw);
- rtl8821ae_dm_diginit(hw);
+ rtl_dm_diginit(hw, cur_igvalue);
rtl8821ae_dm_init_rate_adaptive_mask(hw);
rtl8821ae_dm_init_edca_turbo(hw);
rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw);
@@ -822,7 +796,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 dig_dynamic_min;
+ u8 dig_min_0;
u8 dig_max_of_min;
bool first_connect, first_disconnect;
u8 dm_dig_max, dm_dig_min, offset;
@@ -837,7 +811,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
}
/*add by Neil Chen to avoid PSD is processing*/
- dig_dynamic_min = dm_digtable->dig_dynamic_min;
+ dig_min_0 = dm_digtable->dig_min_0;
first_connect = (mac->link_state >= MAC80211_LINKED) &&
(!dm_digtable->media_connect_0);
first_disconnect = (mac->link_state < MAC80211_LINKED) &&
@@ -876,23 +850,23 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
offset = 0;
if (dm_digtable->rssi_val_min - offset < dm_dig_min)
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
else if (dm_digtable->rssi_val_min -
offset > dig_max_of_min)
- dig_dynamic_min = dig_max_of_min;
+ dig_min_0 = dig_max_of_min;
else
- dig_dynamic_min =
+ dig_min_0 =
dm_digtable->rssi_val_min - offset;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "bOneEntryOnly=TRUE, dig_dynamic_min=0x%x\n",
- dig_dynamic_min);
+ "bOneEntryOnly=TRUE, dig_min_0=0x%x\n",
+ dig_min_0);
} else {
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
}
} else {
dm_digtable->rx_gain_max = dm_dig_max;
- dig_dynamic_min = dm_dig_min;
+ dig_min_0 = dm_dig_min;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"No Link\n");
}
@@ -925,11 +899,11 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
} else {
if (dm_digtable->large_fa_hit < 3) {
if ((dm_digtable->forbidden_igi - 1) <
- dig_dynamic_min) {
+ dig_min_0) {
dm_digtable->forbidden_igi =
- dig_dynamic_min;
+ dig_min_0;
dm_digtable->rx_gain_min =
- dig_dynamic_min;
+ dig_min_0;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Normal Case: At Lower Bound\n");
} else {
@@ -1024,7 +998,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
rtl8821ae_dm_write_dig(hw, current_igi);
dm_digtable->media_connect_0 =
((mac->link_state >= MAC80211_LINKED) ? true : false);
- dm_digtable->dig_dynamic_min = dig_dynamic_min;
+ dm_digtable->dig_min_0 = dig_min_0;
}
static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
index 9dd40dd316c1..625a6bbb21fc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
@@ -187,28 +187,12 @@
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX 0x3e
-#define DM_DIG_MIN 0x1e
-
-#define DM_DIG_MAX_AP 0x32
-#define DM_DIG_MIN_AP 0x20
-
#define DM_DIG_FA_UPPER 0x3e
#define DM_DIG_FA_LOWER 0x1e
#define DM_DIG_FA_TH0 200
#define DM_DIG_FA_TH1 0x300
#define DM_DIG_FA_TH2 0x400
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
#define RXPATHSELECTION_SS_TH_LOW 30
#define RXPATHSELECTION_DIFF_TH 18
@@ -262,14 +246,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
DIG_OP_TYPE_MAX
};
-enum tag_cck_packet_detection_threshold_type_definition {
- CCK_PD_STAGE_LOWRSSI = 0,
- CCK_PD_STAGE_HIGHRSSI = 1,
- CCK_FA_STAGE_LOW = 2,
- CCK_FA_STAGE_HIGH = 3,
- CCK_PD_STAGE_MAX = 4,
-};
-
enum dm_1r_cca_e {
CCA_1R = 0,
CCA_2R = 1,
@@ -288,23 +264,6 @@ enum dm_sw_ant_switch_e {
ANS_ANTENNA_MAX = 3,
};
-enum dm_dig_ext_port_alg_e {
- DIG_EXT_PORT_STAGE_0 = 0,
- DIG_EXT_PORT_STAGE_1 = 1,
- DIG_EXT_PORT_STAGE_2 = 2,
- DIG_EXT_PORT_STAGE_3 = 3,
- DIG_EXT_PORT_STAGE_MAX = 4,
-};
-
-enum dm_dig_connect_e {
- DIG_STA_DISCONNECT = 0,
- DIG_STA_CONNECT = 1,
- DIG_STA_BEFORE_CONNECT = 2,
- DIG_MULTISTA_DISCONNECT = 3,
- DIG_MULTISTA_CONNECT = 4,
- DIG_CONNECT_MAX
-};
-
enum pwr_track_control_method {
BBSWING,
TXAGC,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
index bf0b0ce9519c..36b3e91d996e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
@@ -93,9 +93,9 @@
#define RTL8812_TRANS_CARDEMU_TO_SUS \
{0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xcc}, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xc0}, \
{0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xEC}, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xE0}, \
{0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x07 \
/* gpio11 input mode, gpio10~8 output mode */}, \
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
index fc92dd6a0d07..a4988121e1ab 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
@@ -85,52 +85,6 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = 1;
}
-static void load_wowlan_fw(struct rtl_priv *rtlpriv)
-{
- /* callback routine to load wowlan firmware after main fw has
- * been loaded
- */
- const struct firmware *wowlan_firmware;
- char *fw_name = NULL;
- int err;
-
- /* for wowlan firmware buf */
- rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
- if (!rtlpriv->rtlhal.wowlan_firmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for wowlan fw.\n");
- return;
- }
-
- if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8821AE)
- fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
- else
- fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
- err = request_firmware(&wowlan_firmware, fw_name, rtlpriv->io.dev);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request wowlan firmware!\n");
- goto error;
- }
-
- if (wowlan_firmware->size > 0x8000) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Wowlan Firmware is too big!\n");
- goto error;
- }
-
- memcpy(rtlpriv->rtlhal.wowlan_firmware, wowlan_firmware->data,
- wowlan_firmware->size);
- rtlpriv->rtlhal.wowlan_fwsize = wowlan_firmware->size;
- release_firmware(wowlan_firmware);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "WOWLAN FirmwareDownload OK\n");
- return;
-error:
- release_firmware(wowlan_firmware);
- vfree(rtlpriv->rtlhal.wowlan_firmware);
-}
-
/*InitializeVariables8812E*/
int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
{
@@ -231,7 +185,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
else if (rtlpriv->psc.reg_fwctrl_lps == 3)
rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
- rtlpriv->rtl_fw_second_cb = load_wowlan_fw;
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
@@ -239,20 +192,41 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
"Can't alloc buffer for fw.\n");
return 1;
}
+ rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
+ if (!rtlpriv->rtlhal.wowlan_firmware) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Can't alloc buffer for wowlan fw.\n");
+ return 1;
+ }
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
- else
+ rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
+ } else {
rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
+ rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
+ }
rtlpriv->max_fw_size = 0x8000;
+ /*load normal firmware*/
pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ "Failed to request normal firmware!\n");
+ return 1;
+ }
+ /*load wowlan firmware*/
+ pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1,
+ rtlpriv->cfg->wowlan_fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_wowlan_fw_cb);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Failed to request wowlan firmware!\n");
return 1;
}
return 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
index 383b86b05cba..72af4b9ee32b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
@@ -48,232 +48,6 @@ static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw,
- bool isht, bool isvht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC_RATE1M:
- rate_idx = 0;
- break;
- case DESC_RATE2M:
- rate_idx = 1;
- break;
- case DESC_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC_RATE11M:
- rate_idx = 3;
- break;
- case DESC_RATE6M:
- rate_idx = 4;
- break;
- case DESC_RATE9M:
- rate_idx = 5;
- break;
- case DESC_RATE12M:
- rate_idx = 6;
- break;
- case DESC_RATE18M:
- rate_idx = 7;
- break;
- case DESC_RATE24M:
- rate_idx = 8;
- break;
- case DESC_RATE36M:
- rate_idx = 9;
- break;
- case DESC_RATE48M:
- rate_idx = 10;
- break;
- case DESC_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC_RATE6M:
- rate_idx = 0;
- break;
- case DESC_RATE9M:
- rate_idx = 1;
- break;
- case DESC_RATE12M:
- rate_idx = 2;
- break;
- case DESC_RATE18M:
- rate_idx = 3;
- break;
- case DESC_RATE24M:
- rate_idx = 4;
- break;
- case DESC_RATE36M:
- rate_idx = 5;
- break;
- case DESC_RATE48M:
- rate_idx = 6;
- break;
- case DESC_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
-
- if (isvht) {
- switch (desc_rate) {
- case DESC_RATEVHT1SS_MCS0:
- rate_idx = 0;
- break;
- case DESC_RATEVHT1SS_MCS1:
- rate_idx = 1;
- break;
- case DESC_RATEVHT1SS_MCS2:
- rate_idx = 2;
- break;
- case DESC_RATEVHT1SS_MCS3:
- rate_idx = 3;
- break;
- case DESC_RATEVHT1SS_MCS4:
- rate_idx = 4;
- break;
- case DESC_RATEVHT1SS_MCS5:
- rate_idx = 5;
- break;
- case DESC_RATEVHT1SS_MCS6:
- rate_idx = 6;
- break;
- case DESC_RATEVHT1SS_MCS7:
- rate_idx = 7;
- break;
- case DESC_RATEVHT1SS_MCS8:
- rate_idx = 8;
- break;
- case DESC_RATEVHT1SS_MCS9:
- rate_idx = 9;
- break;
- case DESC_RATEVHT2SS_MCS0:
- rate_idx = 0;
- break;
- case DESC_RATEVHT2SS_MCS1:
- rate_idx = 1;
- break;
- case DESC_RATEVHT2SS_MCS2:
- rate_idx = 2;
- break;
- case DESC_RATEVHT2SS_MCS3:
- rate_idx = 3;
- break;
- case DESC_RATEVHT2SS_MCS4:
- rate_idx = 4;
- break;
- case DESC_RATEVHT2SS_MCS5:
- rate_idx = 5;
- break;
- case DESC_RATEVHT2SS_MCS6:
- rate_idx = 6;
- break;
- case DESC_RATEVHT2SS_MCS7:
- rate_idx = 7;
- break;
- case DESC_RATEVHT2SS_MCS8:
- rate_idx = 8;
- break;
- case DESC_RATEVHT2SS_MCS9:
- rate_idx = 9;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static u16 odm_cfo(char value)
{
int ret_val;
@@ -766,9 +540,9 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
* supported rates or MCS index if HT rates
* are use (RX_FLAG_HT)
*/
- rx_status->rate_idx =
- _rtl8821ae_rate_mapping(hw, status->is_ht,
- status->is_vht, status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ status->is_vht,
+ status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus) {
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 6866dcf24340..51572912c53d 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -331,10 +331,10 @@ enum hardware_type {
(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
#define RX_HAL_IS_CCK_RATE(rxmcs) \
- ((rxmcs) == DESC92_RATE1M || \
- (rxmcs) == DESC92_RATE2M || \
- (rxmcs) == DESC92_RATE5_5M || \
- (rxmcs) == DESC92_RATE11M)
+ ((rxmcs) == DESC_RATE1M || \
+ (rxmcs) == DESC_RATE2M || \
+ (rxmcs) == DESC_RATE5_5M || \
+ (rxmcs) == DESC_RATE11M)
enum scan_operation_backup_opt {
SCAN_OPT_BACKUP = 0,
@@ -579,38 +579,59 @@ enum rtl_hal_state {
};
enum rtl_desc92_rate {
- DESC92_RATE1M = 0x00,
- DESC92_RATE2M = 0x01,
- DESC92_RATE5_5M = 0x02,
- DESC92_RATE11M = 0x03,
-
- DESC92_RATE6M = 0x04,
- DESC92_RATE9M = 0x05,
- DESC92_RATE12M = 0x06,
- DESC92_RATE18M = 0x07,
- DESC92_RATE24M = 0x08,
- DESC92_RATE36M = 0x09,
- DESC92_RATE48M = 0x0a,
- DESC92_RATE54M = 0x0b,
-
- DESC92_RATEMCS0 = 0x0c,
- DESC92_RATEMCS1 = 0x0d,
- DESC92_RATEMCS2 = 0x0e,
- DESC92_RATEMCS3 = 0x0f,
- DESC92_RATEMCS4 = 0x10,
- DESC92_RATEMCS5 = 0x11,
- DESC92_RATEMCS6 = 0x12,
- DESC92_RATEMCS7 = 0x13,
- DESC92_RATEMCS8 = 0x14,
- DESC92_RATEMCS9 = 0x15,
- DESC92_RATEMCS10 = 0x16,
- DESC92_RATEMCS11 = 0x17,
- DESC92_RATEMCS12 = 0x18,
- DESC92_RATEMCS13 = 0x19,
- DESC92_RATEMCS14 = 0x1a,
- DESC92_RATEMCS15 = 0x1b,
- DESC92_RATEMCS15_SG = 0x1c,
- DESC92_RATEMCS32 = 0x20,
+ DESC_RATE1M = 0x00,
+ DESC_RATE2M = 0x01,
+ DESC_RATE5_5M = 0x02,
+ DESC_RATE11M = 0x03,
+
+ DESC_RATE6M = 0x04,
+ DESC_RATE9M = 0x05,
+ DESC_RATE12M = 0x06,
+ DESC_RATE18M = 0x07,
+ DESC_RATE24M = 0x08,
+ DESC_RATE36M = 0x09,
+ DESC_RATE48M = 0x0a,
+ DESC_RATE54M = 0x0b,
+
+ DESC_RATEMCS0 = 0x0c,
+ DESC_RATEMCS1 = 0x0d,
+ DESC_RATEMCS2 = 0x0e,
+ DESC_RATEMCS3 = 0x0f,
+ DESC_RATEMCS4 = 0x10,
+ DESC_RATEMCS5 = 0x11,
+ DESC_RATEMCS6 = 0x12,
+ DESC_RATEMCS7 = 0x13,
+ DESC_RATEMCS8 = 0x14,
+ DESC_RATEMCS9 = 0x15,
+ DESC_RATEMCS10 = 0x16,
+ DESC_RATEMCS11 = 0x17,
+ DESC_RATEMCS12 = 0x18,
+ DESC_RATEMCS13 = 0x19,
+ DESC_RATEMCS14 = 0x1a,
+ DESC_RATEMCS15 = 0x1b,
+ DESC_RATEMCS15_SG = 0x1c,
+ DESC_RATEMCS32 = 0x20,
+
+ DESC_RATEVHT1SS_MCS0 = 0x2c,
+ DESC_RATEVHT1SS_MCS1 = 0x2d,
+ DESC_RATEVHT1SS_MCS2 = 0x2e,
+ DESC_RATEVHT1SS_MCS3 = 0x2f,
+ DESC_RATEVHT1SS_MCS4 = 0x30,
+ DESC_RATEVHT1SS_MCS5 = 0x31,
+ DESC_RATEVHT1SS_MCS6 = 0x32,
+ DESC_RATEVHT1SS_MCS7 = 0x33,
+ DESC_RATEVHT1SS_MCS8 = 0x34,
+ DESC_RATEVHT1SS_MCS9 = 0x35,
+ DESC_RATEVHT2SS_MCS0 = 0x36,
+ DESC_RATEVHT2SS_MCS1 = 0x37,
+ DESC_RATEVHT2SS_MCS2 = 0x38,
+ DESC_RATEVHT2SS_MCS3 = 0x39,
+ DESC_RATEVHT2SS_MCS4 = 0x3a,
+ DESC_RATEVHT2SS_MCS5 = 0x3b,
+ DESC_RATEVHT2SS_MCS6 = 0x3c,
+ DESC_RATEVHT2SS_MCS7 = 0x3d,
+ DESC_RATEVHT2SS_MCS8 = 0x3e,
+ DESC_RATEVHT2SS_MCS9 = 0x3f,
};
enum rtl_var_map {
@@ -2161,6 +2182,7 @@ struct rtl_hal_ops {
void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
struct rtl_wow_pattern *rtl_pattern,
u8 index);
+ u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx);
};
struct rtl_intf_ops {
@@ -2242,6 +2264,7 @@ struct rtl_hal_cfg {
char *name;
char *fw_name;
char *alt_fw_name;
+ char *wowlan_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
@@ -2390,8 +2413,6 @@ struct dig_t {
u8 pre_ccastate;
u8 cur_ccasate;
u8 large_fa_hit;
- u8 dig_dynamic_min;
- u8 dig_dynamic_min_1;
u8 forbidden_igi;
u8 dig_state;
u8 dig_highpwrstate;
@@ -2518,8 +2539,6 @@ struct proxim {
struct rtl_priv {
struct ieee80211_hw *hw;
- /* Used to load a second firmware */
- void (*rtl_fw_second_cb)(struct rtl_priv *rtlpriv);
struct completion firmware_loading_complete;
struct list_head list;
struct rtl_priv *buddy_priv;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 0b30a7b4d663..d4ba009ac9aa 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -500,6 +500,7 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
int ret = 0;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_UAPSD |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
@@ -1480,9 +1481,7 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
/* unit us */
/* FIXME: find a proper value */
- wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_UAPSD;
+ wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index d6d0d6d9c7a8..144d1f8ba473 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -250,6 +250,7 @@ static struct wlcore_conf wl12xx_conf = {
.keep_alive_interval = 55000,
.max_listen_interval = 20,
.sta_sleep_auth = WL1271_PSM_ILLEGAL,
+ .suspend_rx_ba_activity = 0,
},
.itrim = {
.enable = false,
@@ -1728,6 +1729,9 @@ static struct wlcore_ops wl12xx_ops = {
.convert_hwaddr = wl12xx_convert_hwaddr,
.lnk_high_prio = wl12xx_lnk_high_prio,
.lnk_low_prio = wl12xx_lnk_low_prio,
+ .interrupt_notify = NULL,
+ .rx_ba_filter = NULL,
+ .ap_sleep = NULL,
};
static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index a169bb5a5dbf..67f2a0eec854 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -24,6 +24,7 @@
#include "../wlcore/acx.h"
#include "acx.h"
+#include "wl18xx.h"
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
@@ -194,3 +195,90 @@ out:
kfree(acx);
return ret;
}
+
+/*
+ * When the host is suspended, we don't want to get any fast-link/PSM
+ * notifications
+ */
+int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl,
+ bool action)
+{
+ struct wl18xx_acx_interrupt_notify *acx;
+ int ret = 0;
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enable = action;
+ ret = wl1271_cmd_configure(wl, ACX_INTERRUPT_NOTIFY, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx interrupt notify setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+/*
+ * When the host is suspended, we can configure the FW to disable RX BA
+ * notifications.
+ */
+int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action)
+{
+ struct wl18xx_acx_rx_ba_filter *acx;
+ int ret = 0;
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enable = (u32)action;
+ ret = wl1271_cmd_configure(wl, ACX_RX_BA_FILTER, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx rx ba activity filter setting failed: %d",
+ ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl18xx_acx_ap_sleep(struct wl1271 *wl)
+{
+ struct wl18xx_priv *priv = wl->priv;
+ struct acx_ap_sleep_cfg *acx;
+ struct conf_ap_sleep_settings *conf = &priv->conf.ap_sleep;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx config ap sleep");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->idle_duty_cycle = conf->idle_duty_cycle;
+ acx->connected_duty_cycle = conf->connected_duty_cycle;
+ acx->max_stations_thresh = conf->max_stations_thresh;
+ acx->idle_conn_thresh = conf->idle_conn_thresh;
+
+ ret = wl1271_cmd_configure(wl, ACX_AP_SLEEP_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx config ap-sleep failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index 0e636def1217..4afccd4b9467 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -32,7 +32,10 @@ enum {
ACX_SIM_CONFIG = 0x0053,
ACX_CLEAR_STATISTICS = 0x0054,
ACX_AUTO_RX_STREAMING = 0x0055,
- ACX_PEER_CAP = 0x0056
+ ACX_PEER_CAP = 0x0056,
+ ACX_INTERRUPT_NOTIFY = 0x0057,
+ ACX_RX_BA_FILTER = 0x0058,
+ ACX_AP_SLEEP_CFG = 0x0059
};
/* numbers of bits the length field takes (add 1 for the actual number) */
@@ -326,6 +329,44 @@ struct wlcore_acx_peer_cap {
u8 padding;
} __packed;
+/*
+ * ACX_INTERRUPT_NOTIFY
+ * enable/disable fast-link/PSM notification from FW
+ */
+struct wl18xx_acx_interrupt_notify {
+ struct acx_header header;
+ u32 enable;
+};
+
+/*
+ * ACX_RX_BA_FILTER
+ * enable/disable RX BA filtering in FW
+ */
+struct wl18xx_acx_rx_ba_filter {
+ struct acx_header header;
+ u32 enable;
+};
+
+struct acx_ap_sleep_cfg {
+ struct acx_header header;
+ /* Duty Cycle (20-80% of staying Awake) for IDLE AP
+ * (0: disable)
+ */
+ u8 idle_duty_cycle;
+ /* Duty Cycle (20-80% of staying Awake) for Connected AP
+ * (0: disable)
+ */
+ u8 connected_duty_cycle;
+ /* Maximum stations that are allowed to be connected to AP
+ * (255: no limit)
+ */
+ u8 max_stations_thresh;
+ /* Timeout till enabling the Sleep Mechanism after data stops
+ * [unit: 100 msec]
+ */
+ u8 idle_conn_thresh;
+} __packed;
+
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
u32 len_field_size);
@@ -336,5 +377,8 @@ int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation,
u32 rate_set, u8 hlid);
+int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
+int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
+int wl18xx_acx_ap_sleep(struct wl1271 *wl);
#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
index 44f0b205b065..a8d176ddc73c 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.c
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -33,7 +33,8 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
u32 supported_rates;
int ret;
- wl1271_debug(DEBUG_ACX, "cmd channel switch");
+ wl1271_debug(DEBUG_ACX, "cmd channel switch (count=%d)",
+ ch_switch->count);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -60,8 +61,12 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
goto out_free;
}
- supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
- wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
+ supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES;
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ supported_rates |= wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
+ else
+ supported_rates |=
+ wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
if (wlvif->p2p)
supported_rates &= ~CONF_TX_CCK_RATES;
cmd->local_supported_rates = cpu_to_le32(supported_rates);
@@ -167,3 +172,85 @@ out_free:
out:
return ret;
}
+
+int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
+{
+ struct wlcore_cmd_cac_start *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd cac (channel %d) %s",
+ wlvif->channel, start ? "start" : "stop");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->role_id = wlvif->role_id;
+ cmd->channel = wlvif->channel;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
+ cmd->band = WLCORE_BAND_5GHZ;
+ cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type);
+
+ ret = wl1271_cmd_send(wl,
+ start ? CMD_CAC_START : CMD_CAC_STOP,
+ cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send cac command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+ return ret;
+}
+
+int wl18xx_cmd_radar_detection_debug(struct wl1271 *wl, u8 channel)
+{
+ struct wl18xx_cmd_dfs_radar_debug *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd radar detection debug (chan %d)",
+ channel);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->channel = channel;
+
+ ret = wl1271_cmd_send(wl, CMD_DFS_RADAR_DETECTION_DEBUG,
+ cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send radar detection debug command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+ return ret;
+}
+
+int wl18xx_cmd_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct wl18xx_cmd_dfs_master_restart *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd dfs master restart (role %d)",
+ wlvif->role_id);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->role_id = wlvif->role_id;
+
+ ret = wl1271_cmd_send(wl, CMD_DFS_MASTER_RESTART,
+ cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send dfs master restart command");
+ goto out_free;
+ }
+out_free:
+ kfree(cmd);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.h b/drivers/net/wireless/ti/wl18xx/cmd.h
index 92499e2dfa83..7f9440a2bff8 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.h
+++ b/drivers/net/wireless/ti/wl18xx/cmd.h
@@ -59,6 +59,30 @@ struct wl18xx_cmd_smart_config_set_group_key {
u8 key[16];
} __packed;
+struct wl18xx_cmd_dfs_radar_debug {
+ struct wl1271_cmd_header header;
+
+ u8 channel;
+ u8 padding[3];
+} __packed;
+
+struct wl18xx_cmd_dfs_master_restart {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 padding[3];
+} __packed;
+
+/* cac_start and cac_stop share the same params */
+struct wlcore_cmd_cac_start {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 channel;
+ u8 band;
+ u8 bandwidth;
+} __packed;
+
int wl18xx_cmd_channel_switch(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_channel_switch *ch_switch);
@@ -66,4 +90,7 @@ int wl18xx_cmd_smart_config_start(struct wl1271 *wl, u32 group_bitmap);
int wl18xx_cmd_smart_config_stop(struct wl1271 *wl);
int wl18xx_cmd_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
u8 key_len, u8 *key);
+int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start);
+int wl18xx_cmd_radar_detection_debug(struct wl1271 *wl, u8 channel);
+int wl18xx_cmd_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif);
#endif
diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h
index e34302e3b51d..71f1ec448ba5 100644
--- a/drivers/net/wireless/ti/wl18xx/conf.h
+++ b/drivers/net/wireless/ti/wl18xx/conf.h
@@ -23,7 +23,7 @@
#define __WL18XX_CONF_H__
#define WL18XX_CONF_MAGIC 0x10e100ca
-#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0006)
+#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0007)
#define WL18XX_CONF_MASK 0x0000ffff
#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \
sizeof(struct wl18xx_priv_conf))
@@ -110,12 +110,33 @@ struct wl18xx_ht_settings {
u8 mode;
} __packed;
+struct conf_ap_sleep_settings {
+ /* Duty Cycle (20-80% of staying Awake) for IDLE AP
+ * (0: disable)
+ */
+ u8 idle_duty_cycle;
+ /* Duty Cycle (20-80% of staying Awake) for Connected AP
+ * (0: disable)
+ */
+ u8 connected_duty_cycle;
+ /* Maximum stations that are allowed to be connected to AP
+ * (255: no limit)
+ */
+ u8 max_stations_thresh;
+ /* Timeout till enabling the Sleep Mechanism after data stops
+ * [unit: 100 msec]
+ */
+ u8 idle_conn_thresh;
+} __packed;
+
struct wl18xx_priv_conf {
/* Module params structures */
struct wl18xx_ht_settings ht;
/* this structure is copied wholesale to FW */
struct wl18xx_mac_and_phy_params phy;
+
+ struct conf_ap_sleep_settings ap_sleep;
} __packed;
#endif /* __WL18XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 7f1669cdea09..c93fae95baac 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -22,9 +22,12 @@
#include "../wlcore/debugfs.h"
#include "../wlcore/wlcore.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/ps.h"
#include "wl18xx.h"
#include "acx.h"
+#include "cmd.h"
#include "debugfs.h"
#define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
@@ -239,6 +242,45 @@ static const struct file_operations clear_fw_stats_ops = {
.llseek = default_llseek,
};
+static ssize_t radar_detection_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ int ret;
+ u8 channel;
+
+ ret = kstrtou8_from_user(user_buf, count, 10, &channel);
+ if (ret < 0) {
+ wl1271_warning("illegal channel");
+ return -EINVAL;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_cmd_radar_detection_debug(wl, channel);
+ if (ret < 0)
+ count = ret;
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations radar_detection_ops = {
+ .write = radar_detection_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
int wl18xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
@@ -390,6 +432,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
DEBUGFS_ADD(conf, moddir);
+ DEBUGFS_ADD(radar_detection, moddir);
return 0;
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index eb1848e08424..c28f06854195 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -47,6 +47,19 @@ int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
}
+static const char *wl18xx_radar_type_decode(u8 radar_type)
+{
+ switch (radar_type) {
+ case RADAR_TYPE_REGULAR:
+ return "REGULAR";
+ case RADAR_TYPE_CHIRP:
+ return "CHIRP";
+ case RADAR_TYPE_NONE:
+ default:
+ return "N/A";
+ }
+}
+
static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
u8 sync_band)
{
@@ -115,6 +128,14 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
wl18xx_scan_completed(wl, wl->scan_wlvif);
}
+ if (vector & RADAR_DETECTED_EVENT_ID) {
+ wl1271_info("radar event: channel %d type %s",
+ mbox->radar_channel,
+ wl18xx_radar_type_decode(mbox->radar_type));
+
+ ieee80211_radar_detected(wl->hw);
+ }
+
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
wl1271_debug(DEBUG_EVENT,
"PERIODIC_SCAN_REPORT_EVENT (results %d)",
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 0680312d4943..266ee87834e4 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -42,6 +42,12 @@ enum {
SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
};
+enum wl18xx_radar_types {
+ RADAR_TYPE_NONE,
+ RADAR_TYPE_REGULAR,
+ RADAR_TYPE_CHIRP
+};
+
struct wl18xx_event_mailbox {
__le32 events_vector;
@@ -83,13 +89,19 @@ struct wl18xx_event_mailbox {
u8 sc_token_len;
u8 padding1;
u8 sc_ssid[32];
- u8 sc_pwd[32];
+ u8 sc_pwd[64];
u8 sc_token[32];
/* smart config sync channel */
u8 sc_sync_channel;
u8 sc_sync_band;
u8 padding2[2];
+
+ /* radar detect */
+ u8 radar_channel;
+ u8 radar_type;
+
+ u8 padding3[2];
} __packed;
int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 8e562610bf16..717c4f5a02c2 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -378,6 +378,7 @@ static struct wlcore_conf wl18xx_conf = {
.keep_alive_interval = 55000,
.max_listen_interval = 20,
.sta_sleep_auth = WL1271_PSM_ILLEGAL,
+ .suspend_rx_ba_activity = 0,
},
.itrim = {
.enable = false,
@@ -567,6 +568,12 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.high_power_val_2nd = 0xff,
.tx_rf_margin = 1,
},
+ .ap_sleep = { /* disabled by default */
+ .idle_duty_cycle = 0,
+ .connected_duty_cycle = 0,
+ .max_stations_thresh = 0,
+ .idle_conn_thresh = 0,
+ },
};
static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
@@ -648,7 +655,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
};
/* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-3.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-4.bin"
static int wl18xx_identify_chip(struct wl1271 *wl)
{
@@ -983,6 +990,7 @@ static int wl18xx_boot(struct wl1271 *wl)
wl->event_mask = BSS_LOSS_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
+ RADAR_DETECTED_EVENT_ID |
RSSI_SNR_TRIGGER_0_EVENT_ID |
PERIODIC_SCAN_COMPLETE_EVENT_ID |
PERIODIC_SCAN_REPORT_EVENT_ID |
@@ -1559,26 +1567,19 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
}
static void wl18xx_sta_rc_update(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct ieee80211_sta *sta,
- u32 changed)
+ struct wl12xx_vif *wlvif)
{
- bool wide = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ bool wide = wlvif->rc_update_bw >= IEEE80211_STA_RX_BW_40;
wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update wide %d", wide);
- if (!(changed & IEEE80211_RC_BW_CHANGED))
- return;
-
- mutex_lock(&wl->mutex);
-
/* sanity */
if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
- goto out;
+ return;
/* ignore the change before association */
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- goto out;
+ return;
/*
* If we started out as wide, we can change the operation mode. If we
@@ -1589,9 +1590,6 @@ static void wl18xx_sta_rc_update(struct wl1271 *wl,
wl18xx_acx_peer_ht_operation_mode(wl, wlvif->sta.hlid, wide);
else
ieee80211_connection_loss(wl12xx_wlvif_to_vif(wlvif));
-
-out:
- mutex_unlock(&wl->mutex);
}
static int wl18xx_set_peer_cap(struct wl1271 *wl,
@@ -1703,6 +1701,11 @@ static struct wlcore_ops wl18xx_ops = {
.smart_config_start = wl18xx_cmd_smart_config_start,
.smart_config_stop = wl18xx_cmd_smart_config_stop,
.smart_config_set_group_key = wl18xx_cmd_smart_config_set_group_key,
+ .interrupt_notify = wl18xx_acx_interrupt_notify_config,
+ .rx_ba_filter = wl18xx_acx_rx_ba_filter,
+ .ap_sleep = wl18xx_acx_ap_sleep,
+ .set_cac = wl18xx_cmd_set_cac,
+ .dfs_master_restart = wl18xx_cmd_dfs_master_restart,
};
/* HT cap appropriate for wide channels in 2Ghz */
@@ -1796,6 +1799,10 @@ wl18xx_iface_combinations[] = {
.limits = wl18xx_iface_ap_limits,
.n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
.num_different_channels = 1,
+ .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) |
+ BIT(NL80211_CHAN_HT20) |
+ BIT(NL80211_CHAN_HT40MINUS) |
+ BIT(NL80211_CHAN_HT40PLUS),
}
};
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 6a2b88030c1d..71e9e382ce80 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
-#define WL18XX_IFTYPE_VER 8
+#define WL18XX_IFTYPE_VER 9
#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
-#define WL18XX_MINOR_VER 13
+#define WL18XX_MINOR_VER 11
#define WL18XX_CMD_MAX_SIZE 740
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index b924ceadc02c..f28fa3b5029d 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1725,7 +1725,7 @@ int wl12xx_acx_config_hangover(struct wl1271 *wl)
acx->decrease_delta = conf->decrease_delta;
acx->quiet_time = conf->quiet_time;
acx->increase_time = conf->increase_time;
- acx->window_size = acx->window_size;
+ acx->window_size = conf->window_size;
ret = wl1271_cmd_configure(wl, ACX_CONFIG_HANGOVER, acx,
sizeof(*acx));
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index b82661962d33..c26fc2106e5b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -403,7 +403,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
WARN_ON_ONCE(wl->active_link_count < 0);
}
-static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
+u8 wlcore_get_native_channel_type(u8 nl_channel_type)
{
switch (nl_channel_type) {
case NL80211_CHAN_NO_HT:
@@ -419,6 +419,7 @@ static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
return WLCORE_CHAN_NO_HT;
}
}
+EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type);
static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
@@ -1686,9 +1687,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
{
struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL;
int ret = 0, i, b, ch_bit_idx;
- struct ieee80211_channel *channel;
u32 tmp_ch_bitmap[2];
- u16 ch;
struct wiphy *wiphy = wl->hw->wiphy;
struct ieee80211_supported_band *band;
bool timeout = false;
@@ -1703,12 +1702,16 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
band = wiphy->bands[b];
for (i = 0; i < band->n_channels; i++) {
- channel = &band->channels[i];
- ch = channel->hw_value;
+ struct ieee80211_channel *channel = &band->channels[i];
+ u16 ch = channel->hw_value;
+ u32 flags = channel->flags;
- if (channel->flags & (IEEE80211_CHAN_DISABLED |
- IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IR))
+ if (flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_IR))
+ continue;
+
+ if ((flags & IEEE80211_CHAN_RADAR) &&
+ channel->dfs_state != NL80211_DFS_AVAILABLE)
continue;
ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
@@ -1733,6 +1736,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]);
cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]);
+ cmd->dfs_region = wl->dfs_region;
wl1271_debug(DEBUG_CMD,
"cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x",
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 453684a71d30..e14cd407a6ae 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -105,6 +105,7 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
u32 mask, bool *timeout);
+u8 wlcore_get_native_channel_type(u8 nl_channel_type);
enum wl1271_commands {
CMD_INTERROGATE = 1, /* use this to read information elements */
@@ -172,6 +173,11 @@ enum wl1271_commands {
CMD_SMART_CONFIG_STOP = 62,
CMD_SMART_CONFIG_SET_GROUP_KEY = 63,
+ CMD_CAC_START = 64,
+ CMD_CAC_STOP = 65,
+ CMD_DFS_MASTER_RESTART = 66,
+ CMD_DFS_RADAR_DETECTION_DEBUG = 67,
+
MAX_COMMAND_ID = 0xFFFF,
};
@@ -642,6 +648,8 @@ struct wl12xx_cmd_regdomain_dfs_config {
__le32 ch_bit_map1;
__le32 ch_bit_map2;
+ u8 dfs_region;
+ u8 padding[3];
} __packed;
struct wl12xx_cmd_config_fwlog {
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 40995c42bef8..166add00b50f 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -997,6 +997,11 @@ struct conf_conn_settings {
* whether we can go to ELP.
*/
u8 sta_sleep_auth;
+
+ /*
+ * Default RX BA Activity filter configuration
+ */
+ u8 suspend_rx_ba_activity;
} __packed;
enum {
@@ -1347,7 +1352,7 @@ struct conf_recovery_settings {
* version, the two LSB are the lower driver's private conf
* version.
*/
-#define WLCORE_CONF_VERSION (0x0005 << 16)
+#define WLCORE_CONF_VERSION (0x0006 << 16)
#define WLCORE_CONF_MASK 0xffff0000
#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
sizeof(struct wlcore_conf))
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 0be21f62fcb0..68f3bf229b5a 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -929,17 +929,10 @@ static ssize_t beacon_filtering_write(struct file *file,
{
struct wl1271 *wl = file->private_data;
struct wl12xx_vif *wlvif;
- char buf[10];
- size_t len;
unsigned long value;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- ret = kstrtoul(buf, 0, &value);
+ ret = kstrtoul_from_user(user_buf, count, 0, &value);
if (ret < 0) {
wl1271_warning("illegal value for beacon_filtering!");
return -EINVAL;
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 5153640f4532..c42e78955e7b 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -139,7 +139,7 @@ void wlcore_event_channel_switch(struct wl1271 *wl,
wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
__func__, roles_bitmap, success);
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl12xx_for_each_wlvif(wl, wlvif) {
if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
!test_bit(wlvif->role_id , &roles_bitmap))
continue;
@@ -150,8 +150,13 @@ void wlcore_event_channel_switch(struct wl1271 *wl,
vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_chswitch_done(vif, success);
- cancel_delayed_work(&wlvif->channel_switch_work);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+ ieee80211_chswitch_done(vif, success);
+ cancel_delayed_work(&wlvif->channel_switch_work);
+ } else {
+ set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
+ ieee80211_csa_finish(vif);
+ }
}
}
EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index aa9f82c72296..eec56935b1b6 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -211,11 +211,35 @@ wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
}
static inline void
-wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct ieee80211_sta *sta, u32 changed)
+wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
if (wl->ops->sta_rc_update)
- wl->ops->sta_rc_update(wl, wlvif, sta, changed);
+ wl->ops->sta_rc_update(wl, wlvif);
+}
+
+static inline int
+wlcore_hw_interrupt_notify(struct wl1271 *wl, bool action)
+{
+ if (wl->ops->interrupt_notify)
+ return wl->ops->interrupt_notify(wl, action);
+ return 0;
+}
+
+static inline int
+wlcore_hw_rx_ba_filter(struct wl1271 *wl, bool action)
+{
+ if (wl->ops->rx_ba_filter)
+ return wl->ops->rx_ba_filter(wl, action);
+ return 0;
+}
+
+static inline int
+wlcore_hw_ap_sleep(struct wl1271 *wl)
+{
+ if (wl->ops->ap_sleep)
+ return wl->ops->ap_sleep(wl);
+
+ return 0;
}
static inline int
@@ -287,4 +311,22 @@ wlcore_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
return wl->ops->smart_config_set_group_key(wl, group_id, key_len, key);
}
+
+static inline int
+wlcore_hw_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
+{
+ if (!wl->ops->set_cac)
+ return -EINVAL;
+
+ return wl->ops->set_cac(wl, wlvif, start);
+}
+
+static inline int
+wlcore_hw_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ if (!wl->ops->dfs_master_restart)
+ return -EINVAL;
+
+ return wl->ops->dfs_master_restart(wl, wlvif);
+}
#endif
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 199e94120864..5ca1fb161a50 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -392,6 +392,11 @@ static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (ret < 0)
return ret;
+ /* configure AP sleep, if enabled */
+ ret = wlcore_hw_ap_sleep(wl);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -567,8 +572,7 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
/* consider all existing roles before configuring psm. */
if (wl->ap_count == 0 && is_ap) { /* first AP */
- /* Configure for power always on */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 6ad3fcedab9b..1e136993580f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -79,22 +79,12 @@ static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
static void wl1271_reg_notify(struct wiphy *wiphy,
struct regulatory_request *request)
{
- struct ieee80211_supported_band *band;
- struct ieee80211_channel *ch;
- int i;
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wl1271 *wl = hw->priv;
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- for (i = 0; i < band->n_channels; i++) {
- ch = &band->channels[i];
- if (ch->flags & IEEE80211_CHAN_DISABLED)
- continue;
-
- if (ch->flags & IEEE80211_CHAN_RADAR)
- ch->flags |= IEEE80211_CHAN_NO_IR;
-
- }
+ /* copy the current dfs region */
+ if (request)
+ wl->dfs_region = request->dfs_region;
wlcore_regdomain_config(wl);
}
@@ -226,6 +216,29 @@ void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
}
+static void wlcore_rc_update_work(struct work_struct *work)
+{
+ int ret;
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rc_update_work);
+ struct wl1271 *wl = wlvif->wl;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wlcore_hw_sta_rc_update(wl, wlvif);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static void wl12xx_tx_watchdog_work(struct work_struct *work)
{
struct delayed_work *dwork;
@@ -1662,19 +1675,15 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
ret = wl1271_configure_wowlan(wl, wow);
if (ret < 0)
- goto out_sleep;
+ goto out;
if ((wl->conf.conn.suspend_wake_up_event ==
wl->conf.conn.wake_up_event) &&
(wl->conf.conn.suspend_listen_interval ==
wl->conf.conn.listen_interval))
- goto out_sleep;
+ goto out;
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.suspend_wake_up_event,
@@ -1682,29 +1691,28 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (ret < 0)
wl1271_error("suspend: set wake up conditions failed: %d", ret);
-
-out_sleep:
- wl1271_ps_elp_sleep(wl);
out:
return ret;
}
static int wl1271_configure_suspend_ap(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_wowlan *wow)
{
int ret = 0;
if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
if (ret < 0)
goto out;
- ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ ret = wl1271_configure_wowlan(wl, wow);
+ if (ret < 0)
+ goto out;
- wl1271_ps_elp_sleep(wl);
out:
return ret;
@@ -1717,7 +1725,7 @@ static int wl1271_configure_suspend(struct wl1271 *wl,
if (wlvif->bss_type == BSS_TYPE_STA_BSS)
return wl1271_configure_suspend_sta(wl, wlvif, wow);
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
- return wl1271_configure_suspend_ap(wl, wlvif);
+ return wl1271_configure_suspend_ap(wl, wlvif, wow);
return 0;
}
@@ -1730,21 +1738,18 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if ((!is_ap) && (!is_sta))
return;
- if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
+ (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
return;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- return;
+ wl1271_configure_wowlan(wl, NULL);
if (is_sta) {
- wl1271_configure_wowlan(wl, NULL);
-
if ((wl->conf.conn.suspend_wake_up_event ==
wl->conf.conn.wake_up_event) &&
(wl->conf.conn.suspend_listen_interval ==
wl->conf.conn.listen_interval))
- goto out_sleep;
+ return;
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.wake_up_event,
@@ -1757,9 +1762,6 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
} else if (is_ap) {
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
}
-
-out_sleep:
- wl1271_ps_elp_sleep(wl);
}
static int wl1271_op_suspend(struct ieee80211_hw *hw,
@@ -1781,6 +1783,13 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0) {
+ mutex_unlock(&wl->mutex);
+ return ret;
+ }
+
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_configure_suspend(wl, wlvif, wow);
@@ -1790,7 +1799,27 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
return ret;
}
}
+
+ /* disable fast link flow control notifications from FW */
+ ret = wlcore_hw_interrupt_notify(wl, false);
+ if (ret < 0)
+ goto out_sleep;
+
+ /* if filtering is enabled, configure the FW to drop all RX BA frames */
+ ret = wlcore_hw_rx_ba_filter(wl,
+ !!wl->conf.conn.suspend_rx_ba_activity);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
mutex_unlock(&wl->mutex);
+
+ if (ret < 0) {
+ wl1271_warning("couldn't prepare device to suspend");
+ return ret;
+ }
+
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1864,13 +1893,29 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
if (pending_recovery) {
wl1271_warning("queuing forgotten recovery on resume");
ieee80211_queue_work(wl->hw, &wl->recovery_work);
- goto out;
+ goto out_sleep;
}
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
wl12xx_for_each_wlvif(wl, wlvif) {
wl1271_configure_resume(wl, wlvif);
}
+ ret = wlcore_hw_interrupt_notify(wl, true);
+ if (ret < 0)
+ goto out_sleep;
+
+ /* if filtering is enabled, configure the FW to drop all RX BA frames */
+ ret = wlcore_hw_rx_ba_filter(wl, false);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
out:
wl->wow_enabled = false;
@@ -2279,6 +2324,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wl1271_rx_streaming_enable_work);
INIT_WORK(&wlvif->rx_streaming_disable_work,
wl1271_rx_streaming_disable_work);
+ INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
INIT_DELAYED_WORK(&wlvif->channel_switch_work,
wlcore_channel_switch_work);
INIT_DELAYED_WORK(&wlvif->connection_loss_work,
@@ -2508,6 +2554,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
}
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_UAPSD |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
@@ -2723,6 +2770,7 @@ unlock:
del_timer_sync(&wlvif->rx_streaming_timer);
cancel_work_sync(&wlvif->rx_streaming_enable_work);
cancel_work_sync(&wlvif->rx_streaming_disable_work);
+ cancel_work_sync(&wlvif->rc_update_work);
cancel_delayed_work_sync(&wlvif->connection_loss_work);
cancel_delayed_work_sync(&wlvif->channel_switch_work);
cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
@@ -4072,8 +4120,14 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
ret = wlcore_set_beacon_template(wl, vif, is_ap);
if (ret < 0)
goto out;
- }
+ if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
+ &wlvif->flags)) {
+ ret = wlcore_hw_dfs_master_restart(wl, wlvif);
+ if (ret < 0)
+ goto out;
+ }
+ }
out:
if (ret != 0)
wl1271_error("beacon info change failed: %d", ret);
@@ -4574,10 +4628,46 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+ int ret;
+ int channel = ieee80211_frequency_to_channel(
+ ctx->def.chan->center_freq);
+
wl1271_debug(DEBUG_MAC80211,
"mac80211 change chanctx %d (type %d) changed 0x%x",
- ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
- cfg80211_get_chandef_type(&ctx->def), changed);
+ channel, cfg80211_get_chandef_type(&ctx->def), changed);
+
+ mutex_lock(&wl->mutex);
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+ rcu_read_lock();
+ if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
+ rcu_read_unlock();
+ continue;
+ }
+ rcu_read_unlock();
+
+ /* start radar if needed */
+ if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
+ wlvif->bss_type == BSS_TYPE_AP_BSS &&
+ ctx->radar_enabled && !wlvif->radar_enabled &&
+ ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
+ wl1271_debug(DEBUG_MAC80211, "Start radar detection");
+ wlcore_hw_set_cac(wl, wlvif, true);
+ wlvif->radar_enabled = true;
+ }
+ }
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
}
static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
@@ -4588,13 +4678,26 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int channel = ieee80211_frequency_to_channel(
ctx->def.chan->center_freq);
+ int ret = -EINVAL;
wl1271_debug(DEBUG_MAC80211,
- "mac80211 assign chanctx (role %d) %d (type %d)",
- wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
+ "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
+ wlvif->role_id, channel,
+ cfg80211_get_chandef_type(&ctx->def),
+ ctx->radar_enabled, ctx->def.chan->dfs_state);
mutex_lock(&wl->mutex);
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
wlvif->band = ctx->def.chan->band;
wlvif->channel = channel;
wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
@@ -4602,6 +4705,15 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
/* update default rates according to the band */
wl1271_set_band_rate(wl, wlvif);
+ if (ctx->radar_enabled &&
+ ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
+ wl1271_debug(DEBUG_MAC80211, "Start radar detection");
+ wlcore_hw_set_cac(wl, wlvif, true);
+ wlvif->radar_enabled = true;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+out:
mutex_unlock(&wl->mutex);
return 0;
@@ -4613,6 +4725,7 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int ret;
wl1271_debug(DEBUG_MAC80211,
"mac80211 unassign chanctx (role %d) %d (type %d)",
@@ -4621,6 +4734,99 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
cfg80211_get_chandef_type(&ctx->def));
wl1271_tx_flush(wl);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ if (wlvif->radar_enabled) {
+ wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
+ wlcore_hw_set_cac(wl, wlvif, false);
+ wlvif->radar_enabled = false;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static int __wlcore_switch_vif_chan(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_chanctx_conf *new_ctx)
+{
+ int channel = ieee80211_frequency_to_channel(
+ new_ctx->def.chan->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211,
+ "switch vif (role %d) %d -> %d chan_type: %d",
+ wlvif->role_id, wlvif->channel, channel,
+ cfg80211_get_chandef_type(&new_ctx->def));
+
+ if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
+ return 0;
+
+ WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
+
+ if (wlvif->radar_enabled) {
+ wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
+ wlcore_hw_set_cac(wl, wlvif, false);
+ wlvif->radar_enabled = false;
+ }
+
+ wlvif->band = new_ctx->def.chan->band;
+ wlvif->channel = channel;
+ wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
+
+ /* start radar if needed */
+ if (new_ctx->radar_enabled) {
+ wl1271_debug(DEBUG_MAC80211, "Start radar detection");
+ wlcore_hw_set_cac(wl, wlvif, true);
+ wlvif->radar_enabled = true;
+ }
+
+ return 0;
+}
+
+static int
+wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct wl1271 *wl = hw->priv;
+ int i, ret;
+
+ wl1271_debug(DEBUG_MAC80211,
+ "mac80211 switch chanctx n_vifs %d mode %d",
+ n_vifs, mode);
+
+ mutex_lock(&wl->mutex);
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < n_vifs; i++) {
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
+
+ ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
+ if (ret)
+ goto out_sleep;
+ }
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+
+ return 0;
}
static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
@@ -5228,6 +5434,83 @@ out:
mutex_unlock(&wl->mutex);
}
+static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 eid)
+{
+ int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ struct sk_buff *beacon =
+ ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
+
+ if (!beacon)
+ return NULL;
+
+ return cfg80211_find_ie(eid,
+ beacon->data + ieoffset,
+ beacon->len - ieoffset);
+}
+
+static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 *csa_count)
+{
+ const u8 *ie;
+ const struct ieee80211_channel_sw_ie *ie_csa;
+
+ ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
+ if (!ie)
+ return -EINVAL;
+
+ ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
+ *csa_count = ie_csa->count;
+
+ return 0;
+}
+
+static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct ieee80211_channel_switch ch_switch = {
+ .block_tx = true,
+ .chandef = *chandef,
+ };
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211,
+ "mac80211 channel switch beacon (role %d)",
+ wlvif->role_id);
+
+ ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
+ if (ret < 0) {
+ wl1271_error("error getting beacon (for CSA counter)");
+ return;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
+ if (ret)
+ goto out_sleep;
+
+ set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@@ -5370,19 +5653,26 @@ static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
u32 changed)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- struct wl1271 *wl = hw->priv;
- wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
+
+ if (!(changed & IEEE80211_RC_BW_CHANGED))
+ return;
+
+ /* this callback is atomic, so schedule a new work */
+ wlvif->rc_update_bw = sta->bandwidth;
+ ieee80211_queue_work(hw, &wlvif->rc_update_work);
}
-static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- s8 *rssi_dbm)
+static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- int ret = 0;
+ s8 rssi_dbm;
+ int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
@@ -5395,17 +5685,18 @@ static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
+ ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
if (ret < 0)
goto out_sleep;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = rssi_dbm;
+
out_sleep:
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
-
- return ret;
}
static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
@@ -5596,6 +5887,7 @@ static const struct ieee80211_ops wl1271_ops = {
.set_bitrate_mask = wl12xx_set_bitrate_mask,
.set_default_unicast_key = wl1271_op_set_default_key_idx,
.channel_switch = wl12xx_op_channel_switch,
+ .channel_switch_beacon = wlcore_op_channel_switch_beacon,
.flush = wlcore_op_flush,
.remain_on_channel = wlcore_op_remain_on_channel,
.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
@@ -5604,8 +5896,9 @@ static const struct ieee80211_ops wl1271_ops = {
.change_chanctx = wlcore_op_change_chanctx,
.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
.sta_rc_update = wlcore_op_sta_rc_update,
- .get_rssi = wlcore_op_get_rssi,
+ .sta_statistics = wlcore_op_sta_statistics,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -5776,7 +6069,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_SUPPORTS_UAPSD |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
@@ -5811,7 +6103,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH;
/* make sure all our channels fit in the scanned_ch bitmask */
BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index b52516eed7b2..4cd316e61466 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -56,9 +56,6 @@ void wl1271_elp_work(struct work_struct *work)
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
- if (wlvif->bss_type == BSS_TYPE_AP_BSS)
- goto out;
-
if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
goto out;
@@ -95,9 +92,6 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
return;
wl12xx_for_each_wlvif(wl, wlvif) {
- if (wlvif->bss_type == BSS_TYPE_AP_BSS)
- return;
-
if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return;
@@ -108,6 +102,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(timeout));
}
+EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep);
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
@@ -175,6 +170,7 @@ err:
out:
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup);
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum wl1271_cmd_ps_mode mode)
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index ad86a48dcfcb..fd4e9ba176c9 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -21,7 +21,7 @@ static const
struct nla_policy wlcore_vendor_attr_policy[NUM_WLCORE_VENDOR_ATTR] = {
[WLCORE_VENDOR_ATTR_FREQ] = { .type = NLA_U32 },
[WLCORE_VENDOR_ATTR_GROUP_ID] = { .type = NLA_U32 },
- [WLCORE_VENDOR_ATTR_GROUP_KEY] = { .type = NLA_U32,
+ [WLCORE_VENDOR_ATTR_GROUP_KEY] = { .type = NLA_BINARY,
.len = WLAN_MAX_KEY_LEN },
};
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index df78cf12ef15..d599c869e6e8 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -106,8 +106,7 @@ struct wlcore_ops {
struct wl12xx_vif *wlvif,
struct ieee80211_channel_switch *ch_switch);
u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
- void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct ieee80211_sta *sta, u32 changed);
+ void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int (*set_peer_cap)(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation,
@@ -117,10 +116,16 @@ struct wlcore_ops {
struct wl1271_link *lnk);
bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk);
+ int (*interrupt_notify)(struct wl1271 *wl, bool action);
+ int (*rx_ba_filter)(struct wl1271 *wl, bool action);
+ int (*ap_sleep)(struct wl1271 *wl);
int (*smart_config_start)(struct wl1271 *wl, u32 group_bitmap);
int (*smart_config_stop)(struct wl1271 *wl);
int (*smart_config_set_group_key)(struct wl1271 *wl, u16 group_id,
u8 key_len, u8 *key);
+ int (*set_cac)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool start);
+ int (*dfs_master_restart)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
};
enum wlcore_partitions {
@@ -460,6 +465,9 @@ struct wl1271 {
/* HW HT (11n) capabilities */
struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
+ /* the current dfs region */
+ enum nl80211_dfs_regions dfs_region;
+
/* size of the private FW status data */
size_t fw_status_len;
size_t fw_status_priv_len;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 0e52556044d9..3396ce5a934d 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -251,6 +251,7 @@ enum wl12xx_vif_flags {
WLVIF_FLAG_AP_PROBE_RESP_SET,
WLVIF_FLAG_IN_USE,
WLVIF_FLAG_ACTIVE,
+ WLVIF_FLAG_BEACON_DISABLED,
};
struct wl12xx_vif;
@@ -434,6 +435,8 @@ struct wl12xx_vif {
bool wmm_enabled;
+ bool radar_enabled;
+
/* Rx Streaming */
struct work_struct rx_streaming_enable_work;
struct work_struct rx_streaming_disable_work;
@@ -463,6 +466,10 @@ struct wl12xx_vif {
/* work for canceling ROC after pending auth reply */
struct delayed_work pending_auth_complete_work;
+ /* update rate conrol */
+ enum ieee80211_sta_rx_bandwidth rc_update_bw;
+ struct work_struct rc_update_work;
+
/*
* total freed FW packets on the link.
* For STA this holds the PN of the link to the AP.
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5f1fda44882b..589fa256256b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -251,7 +251,6 @@ struct xenvif {
struct xenvif_rx_cb {
unsigned long expires;
int meta_slots_used;
- bool full_coalesce;
};
#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9259a732e8a4..f38227afe099 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -80,7 +80,7 @@ static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int xenvif_poll(struct napi_struct *napi, int budget)
+static int xenvif_poll(struct napi_struct *napi, int budget)
{
struct xenvif_queue *queue =
container_of(napi, struct xenvif_queue, napi);
@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
* better enable it. The long term solution would be to use just a
* bunch of valid page descriptors, without dependency on ballooning
*/
- err = alloc_xenballooned_pages(MAX_PENDING_REQS,
- queue->mmap_pages,
- false);
+ err = gnttab_alloc_pages(MAX_PENDING_REQS,
+ queue->mmap_pages);
if (err) {
netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
return -ENOMEM;
@@ -578,6 +577,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
goto err_rx_unbind;
}
queue->task = task;
+ get_task_struct(task);
task = kthread_create(xenvif_dealloc_kthread,
(void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +634,7 @@ void xenvif_disconnect(struct xenvif *vif)
if (queue->task) {
kthread_stop(queue->task);
+ put_task_struct(queue->task);
queue->task = NULL;
}
@@ -662,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif)
*/
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
- free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
+ gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
}
void xenvif_free(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 908e65e9b821..f7a31d2cb3f1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -233,51 +233,6 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
}
}
-/*
- * Returns true if we should start a new receive buffer instead of
- * adding 'size' bytes to a buffer which currently contains 'offset'
- * bytes.
- */
-static bool start_new_rx_buffer(int offset, unsigned long size, int head,
- bool full_coalesce)
-{
- /* simple case: we have completely filled the current buffer. */
- if (offset == MAX_BUFFER_OFFSET)
- return true;
-
- /*
- * complex case: start a fresh buffer if the current frag
- * would overflow the current buffer but only if:
- * (i) this frag would fit completely in the next buffer
- * and (ii) there is already some data in the current buffer
- * and (iii) this is not the head buffer.
- * and (iv) there is no need to fully utilize the buffers
- *
- * Where:
- * - (i) stops us splitting a frag into two copies
- * unless the frag is too large for a single buffer.
- * - (ii) stops us from leaving a buffer pointlessly empty.
- * - (iii) stops us leaving the first buffer
- * empty. Strictly speaking this is already covered
- * by (ii) but is explicitly checked because
- * netfront relies on the first buffer being
- * non-empty and can crash otherwise.
- * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
- * slot
- *
- * This means we will effectively linearise small
- * frags but do not needlessly split large buffers
- * into multiple copies tend to give large frags their
- * own buffers as before.
- */
- BUG_ON(size > MAX_BUFFER_OFFSET);
- if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
- !full_coalesce)
- return true;
-
- return false;
-}
-
struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
@@ -314,9 +269,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
- unsigned long offset, int *head,
- struct xenvif_queue *foreign_queue,
- grant_ref_t foreign_gref)
+ unsigned long offset, int *head)
{
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
@@ -333,27 +286,18 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
offset &= ~PAGE_MASK;
while (size > 0) {
+ struct xen_page_foreign *foreign;
+
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
- bytes = PAGE_SIZE - offset;
+ if (npo->copy_off == MAX_BUFFER_OFFSET)
+ meta = get_next_rx_buffer(queue, npo);
+ bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
- if (start_new_rx_buffer(npo->copy_off,
- bytes,
- *head,
- XENVIF_RX_CB(skb)->full_coalesce)) {
- /*
- * Netfront requires there to be some data in the head
- * buffer.
- */
- BUG_ON(*head);
-
- meta = get_next_rx_buffer(queue, npo);
- }
-
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - npo->copy_off;
@@ -361,9 +305,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes;
- if (foreign_queue) {
- copy_gop->source.domid = foreign_queue->vif->domid;
- copy_gop->source.u.ref = foreign_gref;
+ foreign = xen_page_foreign(page);
+ if (foreign) {
+ copy_gop->source.domid = foreign->domid;
+ copy_gop->source.u.ref = foreign->gref;
copy_gop->flags |= GNTCOPY_source_gref;
} else {
copy_gop->source.domid = DOMID_SELF;
@@ -406,35 +351,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
}
/*
- * Find the grant ref for a given frag in a chain of struct ubuf_info's
- * skb: the skb itself
- * i: the frag's number
- * ubuf: a pointer to an element in the chain. It should not be NULL
- *
- * Returns a pointer to the element in the chain where the page were found. If
- * not found, returns NULL.
- * See the definition of callback_struct in common.h for more details about
- * the chain.
- */
-static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
- const int i,
- const struct ubuf_info *ubuf)
-{
- struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
-
- do {
- u16 pending_idx = ubuf->desc;
-
- if (skb_shinfo(skb)->frags[i].page.p ==
- foreign_queue->mmap_pages[pending_idx])
- break;
- ubuf = (struct ubuf_info *) ubuf->ctx;
- } while (ubuf);
-
- return ubuf;
-}
-
-/*
* Prepare an SKB to be transmitted to the frontend.
*
* This function is responsible for allocating grant operations, meta
@@ -459,8 +375,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
- const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
- const struct ubuf_info *const head_ubuf = ubuf;
old_meta_prod = npo->meta_prod;
@@ -507,68 +421,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(queue, skb, npo,
- virt_to_page(data), len, offset, &head,
- NULL,
- 0);
+ virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
- /* This variable also signals whether foreign_gref has a real
- * value or not.
- */
- struct xenvif_queue *foreign_queue = NULL;
- grant_ref_t foreign_gref;
-
- if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
- (ubuf->callback == &xenvif_zerocopy_callback)) {
- const struct ubuf_info *const startpoint = ubuf;
-
- /* Ideally ubuf points to the chain element which
- * belongs to this frag. Or if frags were removed from
- * the beginning, then shortly before it.
- */
- ubuf = xenvif_find_gref(skb, i, ubuf);
-
- /* Try again from the beginning of the list, if we
- * haven't tried from there. This only makes sense in
- * the unlikely event of reordering the original frags.
- * For injected local pages it's an unnecessary second
- * run.
- */
- if (unlikely(!ubuf) && startpoint != head_ubuf)
- ubuf = xenvif_find_gref(skb, i, head_ubuf);
-
- if (likely(ubuf)) {
- u16 pending_idx = ubuf->desc;
-
- foreign_queue = ubuf_to_queue(ubuf);
- foreign_gref =
- foreign_queue->pending_tx_info[pending_idx].req.gref;
- /* Just a safety measure. If this was the last
- * element on the list, the for loop will
- * iterate again if a local page were added to
- * the end. Using head_ubuf here prevents the
- * second search on the chain. Or the original
- * frags changed order, but that's less likely.
- * In any way, ubuf shouldn't be NULL.
- */
- ubuf = ubuf->ctx ?
- (struct ubuf_info *) ubuf->ctx :
- head_ubuf;
- } else
- /* This frag was a local page, added to the
- * array after the skb left netback.
- */
- ubuf = head_ubuf;
- }
xenvif_gop_frag_copy(queue, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
- &head,
- foreign_queue,
- foreign_queue ? foreign_gref : UINT_MAX);
+ &head);
}
return npo->meta_prod - old_meta_prod;
@@ -652,60 +514,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
- RING_IDX max_slots_needed;
RING_IDX old_req_cons;
RING_IDX ring_slots_used;
- int i;
queue->last_rx_time = jiffies;
- /* We need a cheap worse case estimate for the number of
- * slots we'll use.
- */
-
- max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
- skb_headlen(skb),
- PAGE_SIZE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned int size;
- unsigned int offset;
-
- size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- offset = skb_shinfo(skb)->frags[i].page_offset;
-
- /* For a worse-case estimate we need to factor in
- * the fragment page offset as this will affect the
- * number of times xenvif_gop_frag_copy() will
- * call start_new_rx_buffer().
- */
- max_slots_needed += DIV_ROUND_UP(offset + size,
- PAGE_SIZE);
- }
-
- /* To avoid the estimate becoming too pessimal for some
- * frontends that limit posted rx requests, cap the estimate
- * at MAX_SKB_FRAGS. In this case netback will fully coalesce
- * the skb into the provided slots.
- */
- if (max_slots_needed > MAX_SKB_FRAGS) {
- max_slots_needed = MAX_SKB_FRAGS;
- XENVIF_RX_CB(skb)->full_coalesce = true;
- } else {
- XENVIF_RX_CB(skb)->full_coalesce = false;
- }
-
- /* We may need one more slot for GSO metadata */
- if (skb_is_gso(skb) &&
- (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
- max_slots_needed++;
-
old_req_cons = queue->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
ring_slots_used = queue->rx.req_cons - old_req_cons;
- BUG_ON(ring_slots_used > max_slots_needed);
-
__skb_queue_tail(&rxq, skb);
}
@@ -1241,12 +1058,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
/* Take an extra reference to offset network stack's put_page */
get_page(queue->mmap_pages[pending_idx]);
}
- /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
- * overlaps with "index", and "mapping" is not set. I think mapping
- * should be set. If delivered to local stack, it would drop this
- * skb in sk_filter unless the socket has the right to use it.
- */
- skb->pfmemalloc = false;
}
static int xenvif_get_extras(struct xenvif_queue *queue,
@@ -2109,8 +1920,7 @@ int xenvif_kthread_guest_rx(void *data)
*/
if (unlikely(vif->disabled && queue->id == 0)) {
xenvif_carrier_off(vif);
- xenvif_rx_queue_purge(queue);
- continue;
+ break;
}
if (!skb_queue_empty(&queue->rx_queue))
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 22bcb4e12e2a..e9b960f0ff32 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -88,10 +88,8 @@ struct netfront_cb {
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
struct netfront_stats {
- u64 rx_packets;
- u64 tx_packets;
- u64 rx_bytes;
- u64 tx_bytes;
+ u64 packets;
+ u64 bytes;
struct u64_stats_sync syncp;
};
@@ -144,10 +142,6 @@ struct netfront_queue {
struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
grant_ref_t gref_rx_head;
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
-
- unsigned long rx_pfn_array[NET_RX_RING_SIZE];
- struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
- struct mmu_update rx_mmu[NET_RX_RING_SIZE];
};
struct netfront_info {
@@ -160,7 +154,8 @@ struct netfront_info {
struct netfront_queue *queues;
/* Statistics */
- struct netfront_stats __percpu *stats;
+ struct netfront_stats __percpu *rx_stats;
+ struct netfront_stats __percpu *tx_stats;
atomic_t rx_gso_checksum_fixup;
};
@@ -224,11 +219,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
}
#ifdef CONFIG_SYSFS
-static int xennet_sysfs_addif(struct net_device *netdev);
-static void xennet_sysfs_delif(struct net_device *netdev);
-#else /* !CONFIG_SYSFS */
-#define xennet_sysfs_addif(dev) (0)
-#define xennet_sysfs_delif(dev) do { } while (0)
+static const struct attribute_group xennet_dev_group;
#endif
static bool xennet_can_sg(struct net_device *dev)
@@ -425,109 +416,68 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
xennet_maybe_wake_tx(queue);
}
-static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
- struct xen_netif_tx_request *tx)
+static struct xen_netif_tx_request *xennet_make_one_txreq(
+ struct netfront_queue *queue, struct sk_buff *skb,
+ struct page *page, unsigned int offset, unsigned int len)
{
- char *data = skb->data;
- unsigned long mfn;
- RING_IDX prod = queue->tx.req_prod_pvt;
- int frags = skb_shinfo(skb)->nr_frags;
- unsigned int offset = offset_in_page(data);
- unsigned int len = skb_headlen(skb);
unsigned int id;
+ struct xen_netif_tx_request *tx;
grant_ref_t ref;
- int i;
- /* While the header overlaps a page boundary (including being
- larger than a page), split it it into page-sized chunks. */
- while (len > PAGE_SIZE - offset) {
- tx->size = PAGE_SIZE - offset;
- tx->flags |= XEN_NETTXF_more_data;
- len -= tx->size;
- data += tx->size;
- offset = 0;
+ len = min_t(unsigned int, PAGE_SIZE - offset, len);
- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
- queue->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&queue->tx, prod++);
- tx->id = id;
- ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+ tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
- mfn = virt_to_mfn(data);
- gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
+ gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
+ page_to_mfn(page), GNTMAP_readonly);
- queue->grant_tx_page[id] = virt_to_page(data);
- tx->gref = queue->grant_tx_ref[id] = ref;
- tx->offset = offset;
- tx->size = len;
- tx->flags = 0;
- }
+ queue->tx_skbs[id].skb = skb;
+ queue->grant_tx_page[id] = page;
+ queue->grant_tx_ref[id] = ref;
- /* Grant backend access to each skb fragment page. */
- for (i = 0; i < frags; i++) {
- skb_frag_t *frag = skb_shinfo(skb)->frags + i;
- struct page *page = skb_frag_page(frag);
+ tx->id = id;
+ tx->gref = ref;
+ tx->offset = offset;
+ tx->size = len;
+ tx->flags = 0;
- len = skb_frag_size(frag);
- offset = frag->page_offset;
+ return tx;
+}
- /* Skip unused frames from start of page */
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
+static struct xen_netif_tx_request *xennet_make_txreqs(
+ struct netfront_queue *queue, struct xen_netif_tx_request *tx,
+ struct sk_buff *skb, struct page *page,
+ unsigned int offset, unsigned int len)
+{
+ /* Skip unused frames from start of page */
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
- while (len > 0) {
- unsigned long bytes;
-
- bytes = PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
-
- tx->flags |= XEN_NETTXF_more_data;
-
- id = get_id_from_freelist(&queue->tx_skb_freelist,
- queue->tx_skbs);
- queue->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&queue->tx, prod++);
- tx->id = id;
- ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
-
- mfn = pfn_to_mfn(page_to_pfn(page));
- gnttab_grant_foreign_access_ref(ref,
- queue->info->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
-
- queue->grant_tx_page[id] = page;
- tx->gref = queue->grant_tx_ref[id] = ref;
- tx->offset = offset;
- tx->size = bytes;
- tx->flags = 0;
-
- offset += bytes;
- len -= bytes;
-
- /* Next frame */
- if (offset == PAGE_SIZE && len) {
- BUG_ON(!PageCompound(page));
- page++;
- offset = 0;
- }
- }
+ while (len) {
+ tx->flags |= XEN_NETTXF_more_data;
+ tx = xennet_make_one_txreq(queue, skb_get(skb),
+ page, offset, len);
+ page++;
+ offset = 0;
+ len -= tx->size;
}
- queue->tx.req_prod_pvt = prod;
+ return tx;
}
/*
- * Count how many ring slots are required to send the frags of this
- * skb. Each frag might be a compound page.
+ * Count how many ring slots are required to send this skb. Each frag
+ * might be a compound page.
*/
-static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+static int xennet_count_skb_slots(struct sk_buff *skb)
{
int i, frags = skb_shinfo(skb)->nr_frags;
- int pages = 0;
+ int pages;
+
+ pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
@@ -563,18 +513,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned short id;
struct netfront_info *np = netdev_priv(dev);
- struct netfront_stats *stats = this_cpu_ptr(np->stats);
- struct xen_netif_tx_request *tx;
- char *data = skb->data;
- RING_IDX i;
- grant_ref_t ref;
- unsigned long mfn;
+ struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+ struct xen_netif_tx_request *tx, *first_tx;
+ unsigned int i;
int notify;
int slots;
- unsigned int offset = offset_in_page(data);
- unsigned int len = skb_headlen(skb);
+ struct page *page;
+ unsigned int offset;
+ unsigned int len;
unsigned long flags;
struct netfront_queue *queue = NULL;
unsigned int num_queues = dev->real_num_tx_queues;
@@ -597,18 +544,18 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
- xennet_count_skb_frag_slots(skb);
+ slots = xennet_count_skb_slots(skb);
if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
slots, skb->len);
if (skb_linearize(skb))
goto drop;
- data = skb->data;
- offset = offset_in_page(data);
- len = skb_headlen(skb);
}
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ len = skb_headlen(skb);
+
spin_lock_irqsave(&queue->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
@@ -618,25 +565,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- i = queue->tx.req_prod_pvt;
-
- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
- queue->tx_skbs[id].skb = skb;
-
- tx = RING_GET_REQUEST(&queue->tx, i);
-
- tx->id = id;
- ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
- mfn = virt_to_mfn(data);
- gnttab_grant_foreign_access_ref(
- ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
- queue->grant_tx_page[id] = virt_to_page(data);
- tx->gref = queue->grant_tx_ref[id] = ref;
- tx->offset = offset;
- tx->size = len;
+ /* First request for the linear area. */
+ first_tx = tx = xennet_make_one_txreq(queue, skb,
+ page, offset, len);
+ page++;
+ offset = 0;
+ len -= tx->size;
- tx->flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
@@ -644,11 +579,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* remote but checksummed. */
tx->flags |= XEN_NETTXF_data_validated;
+ /* Optional extra info after the first request. */
if (skb_shinfo(skb)->gso_size) {
struct xen_netif_extra_info *gso;
gso = (struct xen_netif_extra_info *)
- RING_GET_REQUEST(&queue->tx, ++i);
+ RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
tx->flags |= XEN_NETTXF_extra_info;
@@ -663,19 +599,28 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
gso->flags = 0;
}
- queue->tx.req_prod_pvt = i + 1;
+ /* Requests for the rest of the linear area. */
+ tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
+
+ /* Requests for all the frags. */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ tx = xennet_make_txreqs(queue, tx, skb,
+ skb_frag_page(frag), frag->page_offset,
+ skb_frag_size(frag));
+ }
- xennet_make_frags(skb, queue, tx);
- tx->size = skb->len;
+ /* First request has the packet length. */
+ first_tx->size = skb->len;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->bytes += skb->len;
+ tx_stats->packets++;
+ u64_stats_update_end(&tx_stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(queue);
@@ -931,7 +876,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
static int handle_incoming_queue(struct netfront_queue *queue,
struct sk_buff_head *rxq)
{
- struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+ struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
int packets_dropped = 0;
struct sk_buff *skb;
@@ -952,10 +897,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
continue;
}
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += skb->len;
+ u64_stats_update_end(&rx_stats->syncp);
/* Pass it up. */
napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1024,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
int cpu;
for_each_possible_cpu(cpu) {
- struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+ struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
+ struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ tx_packets = tx_stats->packets;
+ tx_bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
- rx_packets = stats->rx_packets;
- tx_packets = stats->tx_packets;
- rx_bytes = stats->rx_bytes;
- tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ rx_packets = rx_stats->packets;
+ rx_bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
@@ -1275,6 +1224,15 @@ static const struct net_device_ops xennet_netdev_ops = {
#endif
};
+static void xennet_free_netdev(struct net_device *netdev)
+{
+ struct netfront_info *np = netdev_priv(netdev);
+
+ free_percpu(np->rx_stats);
+ free_percpu(np->tx_stats);
+ free_netdev(netdev);
+}
+
static struct net_device *xennet_create_dev(struct xenbus_device *dev)
{
int err;
@@ -1295,8 +1253,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->queues = NULL;
err = -ENOMEM;
- np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
- if (np->stats == NULL)
+ np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+ if (np->rx_stats == NULL)
+ goto exit;
+ np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+ if (np->tx_stats == NULL)
goto exit;
netdev->netdev_ops = &xennet_netdev_ops;
@@ -1327,7 +1288,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return netdev;
exit:
- free_netdev(netdev);
+ xennet_free_netdev(netdev);
return ERR_PTR(err);
}
@@ -1352,24 +1313,19 @@ static int netfront_probe(struct xenbus_device *dev,
info = netdev_priv(netdev);
dev_set_drvdata(&dev->dev, info);
-
+#ifdef CONFIG_SYSFS
+ info->netdev->sysfs_groups[0] = &xennet_dev_group;
+#endif
err = register_netdev(info->netdev);
if (err) {
pr_warn("%s: register_netdev err=%d\n", __func__, err);
goto fail;
}
- err = xennet_sysfs_addif(info->netdev);
- if (err) {
- unregister_netdev(info->netdev);
- pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
- goto fail;
- }
-
return 0;
fail:
- free_netdev(netdev);
+ xennet_free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return err;
}
@@ -2129,39 +2085,20 @@ static ssize_t store_rxbuf(struct device *dev,
return len;
}
-static struct device_attribute xennet_attrs[] = {
- __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
- __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
- __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
-};
+static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
+static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
+static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
-static int xennet_sysfs_addif(struct net_device *netdev)
-{
- int i;
- int err;
-
- for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
- err = device_create_file(&netdev->dev,
- &xennet_attrs[i]);
- if (err)
- goto fail;
- }
- return 0;
-
- fail:
- while (--i >= 0)
- device_remove_file(&netdev->dev, &xennet_attrs[i]);
- return err;
-}
-
-static void xennet_sysfs_delif(struct net_device *netdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
- device_remove_file(&netdev->dev, &xennet_attrs[i]);
-}
+static struct attribute *xennet_dev_attrs[] = {
+ &dev_attr_rxbuf_min.attr,
+ &dev_attr_rxbuf_max.attr,
+ &dev_attr_rxbuf_cur.attr,
+ NULL
+};
+static const struct attribute_group xennet_dev_group = {
+ .attrs = xennet_dev_attrs
+};
#endif /* CONFIG_SYSFS */
static int xennet_remove(struct xenbus_device *dev)
@@ -2175,8 +2112,6 @@ static int xennet_remove(struct xenbus_device *dev)
xennet_disconnect_backend(info);
- xennet_sysfs_delif(info->netdev);
-
unregister_netdev(info->netdev);
for (i = 0; i < num_queues; ++i) {
@@ -2189,9 +2124,7 @@ static int xennet_remove(struct xenbus_device *dev)
info->queues = NULL;
}
- free_percpu(info->stats);
-
- free_netdev(info->netdev);
+ xennet_free_netdev(info->netdev);
return 0;
}
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index 963a4a5dc88e..f454dc68cc03 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -557,10 +557,11 @@ exit:
pr_err("Failed to handle discovered target err=%d\n", r);
}
-static int microread_event_received(struct nfc_hci_dev *hdev, u8 gate,
+static int microread_event_received(struct nfc_hci_dev *hdev, u8 pipe,
u8 event, struct sk_buff *skb)
{
int r;
+ u8 gate = hdev->pipes[pipe].gate;
u8 mode;
pr_info("Microread received event 0x%x to gate 0x%x\n", event, gate);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index fc02e8d6a193..cdde745b96bd 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -24,11 +24,13 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
+#include <linux/acpi.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/nfc.h>
#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_data/pn544.h>
#include <asm/unaligned.h>
@@ -41,6 +43,11 @@
#define PN544_I2C_FRAME_HEADROOM 1
#define PN544_I2C_FRAME_TAILROOM 2
+/* GPIO names */
+#define PN544_GPIO_NAME_IRQ "pn544_irq"
+#define PN544_GPIO_NAME_FW "pn544_fw"
+#define PN544_GPIO_NAME_EN "pn544_en"
+
/* framing in HCI mode */
#define PN544_HCI_I2C_LLC_LEN 1
#define PN544_HCI_I2C_LLC_CRC 2
@@ -58,6 +65,13 @@ static struct i2c_device_id pn544_hci_i2c_id_table[] = {
MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
+static const struct acpi_device_id pn544_hci_i2c_acpi_match[] = {
+ {"NXP5440", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, pn544_hci_i2c_acpi_match);
+
#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
/*
@@ -195,18 +209,19 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
/* Disable fw download */
- gpio_set_value(phy->gpio_fw, 0);
+ gpio_set_value_cansleep(phy->gpio_fw, 0);
for (polarity = 0; polarity < 2; polarity++) {
phy->en_polarity = polarity;
retry = 3;
while (retry--) {
/* power off */
- gpio_set_value(phy->gpio_en, !phy->en_polarity);
+ gpio_set_value_cansleep(phy->gpio_en,
+ !phy->en_polarity);
usleep_range(10000, 15000);
/* power on */
- gpio_set_value(phy->gpio_en, phy->en_polarity);
+ gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity);
usleep_range(10000, 15000);
/* send reset */
@@ -225,13 +240,14 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
"Could not detect nfc_en polarity, fallback to active high\n");
out:
- gpio_set_value(phy->gpio_en, !phy->en_polarity);
+ gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
}
static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
{
- gpio_set_value(phy->gpio_fw, run_mode == PN544_FW_MODE ? 1 : 0);
- gpio_set_value(phy->gpio_en, phy->en_polarity);
+ gpio_set_value_cansleep(phy->gpio_fw,
+ run_mode == PN544_FW_MODE ? 1 : 0);
+ gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity);
usleep_range(10000, 15000);
phy->run_mode = run_mode;
@@ -254,14 +270,14 @@ static void pn544_hci_i2c_disable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
- gpio_set_value(phy->gpio_fw, 0);
- gpio_set_value(phy->gpio_en, !phy->en_polarity);
+ gpio_set_value_cansleep(phy->gpio_fw, 0);
+ gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
usleep_range(10000, 15000);
- gpio_set_value(phy->gpio_en, phy->en_polarity);
+ gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity);
usleep_range(10000, 15000);
- gpio_set_value(phy->gpio_en, !phy->en_polarity);
+ gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
usleep_range(10000, 15000);
phy->powered = 0;
@@ -859,6 +875,90 @@ exit_state_wait_secure_write_answer:
}
}
+static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client)
+{
+ struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
+ const struct acpi_device_id *id;
+ struct gpio_desc *gpiod_en, *gpiod_irq, *gpiod_fw;
+ struct device *dev;
+ int ret;
+
+ if (!client)
+ return -EINVAL;
+
+ dev = &client->dev;
+
+ /* Match the struct device against a given list of ACPI IDs */
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+
+ if (!id)
+ return -ENODEV;
+
+ /* Get EN GPIO from ACPI */
+ gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1);
+ if (IS_ERR(gpiod_en)) {
+ nfc_err(dev,
+ "Unable to get EN GPIO\n");
+ return -ENODEV;
+ }
+
+ phy->gpio_en = desc_to_gpio(gpiod_en);
+
+ /* Configuration EN GPIO */
+ ret = gpiod_direction_output(gpiod_en, 0);
+ if (ret) {
+ nfc_err(dev, "Fail EN pin direction\n");
+ return ret;
+ }
+
+ /* Get FW GPIO from ACPI */
+ gpiod_fw = devm_gpiod_get_index(dev, PN544_GPIO_NAME_FW, 2);
+ if (IS_ERR(gpiod_fw)) {
+ nfc_err(dev,
+ "Unable to get FW GPIO\n");
+ return -ENODEV;
+ }
+
+ phy->gpio_fw = desc_to_gpio(gpiod_fw);
+
+ /* Configuration FW GPIO */
+ ret = gpiod_direction_output(gpiod_fw, 0);
+ if (ret) {
+ nfc_err(dev, "Fail FW pin direction\n");
+ return ret;
+ }
+
+ /* Get IRQ GPIO */
+ gpiod_irq = devm_gpiod_get_index(dev, PN544_GPIO_NAME_IRQ, 0);
+ if (IS_ERR(gpiod_irq)) {
+ nfc_err(dev,
+ "Unable to get IRQ GPIO\n");
+ return -ENODEV;
+ }
+
+ phy->gpio_irq = desc_to_gpio(gpiod_irq);
+
+ /* Configure IRQ GPIO */
+ ret = gpiod_direction_input(gpiod_irq);
+ if (ret) {
+ nfc_err(dev, "Fail IRQ pin direction\n");
+ return ret;
+ }
+
+ /* Map the pin to an IRQ */
+ ret = gpiod_to_irq(gpiod_irq);
+ if (ret < 0) {
+ nfc_err(dev, "Fail pin IRQ mapping\n");
+ return ret;
+ }
+
+ nfc_info(dev, "GPIO resource, no:%d irq:%d\n",
+ desc_to_gpio(gpiod_irq), ret);
+ client->irq = ret;
+
+ return 0;
+}
+
#ifdef CONFIG_OF
static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
@@ -884,7 +984,7 @@ static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
phy->gpio_en = ret;
/* Configuration of EN GPIO */
- ret = gpio_request(phy->gpio_en, "pn544_en");
+ ret = gpio_request(phy->gpio_en, PN544_GPIO_NAME_EN);
if (ret) {
nfc_err(&client->dev, "Fail EN pin\n");
goto err_dt;
@@ -906,7 +1006,7 @@ static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
phy->gpio_fw = ret;
/* Configuration of FW GPIO */
- ret = gpio_request(phy->gpio_fw, "pn544_fw");
+ ret = gpio_request(phy->gpio_fw, PN544_GPIO_NAME_FW);
if (ret) {
nfc_err(&client->dev, "Fail FW pin\n");
goto err_gpio_en;
@@ -1001,6 +1101,14 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+ /* Using ACPI */
+ } else if (ACPI_HANDLE(&client->dev)) {
+ r = pn544_hci_i2c_acpi_request_resources(client);
+ if (r) {
+ nfc_err(&client->dev,
+ "Cannot get ACPI data\n");
+ return r;
+ }
} else {
nfc_err(&client->dev, "No platform data\n");
return -EINVAL;
@@ -1080,6 +1188,7 @@ static struct i2c_driver pn544_hci_i2c_driver = {
.name = PN544_HCI_I2C_DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_pn544_i2c_match),
+ .acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match),
},
.probe = pn544_hci_i2c_probe,
.id_table = pn544_hci_i2c_id_table,
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 9c8051d20cea..12e819ddf17a 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -724,10 +724,11 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
* <= 0: driver handled the event, skb consumed
* 1: driver does not handle the event, please do standard processing
*/
-static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
struct sk_buff *skb)
{
struct sk_buff *rgb_skb = NULL;
+ u8 gate = hdev->pipes[pipe].gate;
int r;
pr_debug("hci event %d\n", event);
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile
index 7d688f97aa27..97edab4bbdf8 100644
--- a/drivers/nfc/st21nfca/Makefile
+++ b/drivers/nfc/st21nfca/Makefile
@@ -2,7 +2,7 @@
# Makefile for ST21NFCA HCI based NFC driver
#
-st21nfca_hci-objs = st21nfca.o st21nfca_dep.o
+st21nfca_hci-objs = st21nfca.o st21nfca_dep.o st21nfca_se.o
obj-$(CONFIG_NFC_ST21NFCA) += st21nfca_hci.o
st21nfca_i2c-objs = i2c.o
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 05722085a59f..a32143951616 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -74,6 +74,8 @@ struct st21nfca_i2c_phy {
unsigned int gpio_ena;
unsigned int irq_polarity;
+ struct st21nfca_se_status se_status;
+
struct sk_buff *pending_skb;
int current_read_len;
/*
@@ -537,6 +539,11 @@ static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
phy->irq_polarity = irq_get_trigger_type(client->irq);
+ phy->se_status.is_ese_present =
+ of_property_read_bool(pp, "ese-present");
+ phy->se_status.is_uicc_present =
+ of_property_read_bool(pp, "uicc-present");
+
return 0;
}
#else
@@ -571,6 +578,9 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client)
}
}
+ phy->se_status.is_ese_present = pdata->is_ese_present;
+ phy->se_status.is_uicc_present = pdata->is_uicc_present;
+
return 0;
}
@@ -591,11 +601,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
phy = devm_kzalloc(&client->dev, sizeof(struct st21nfca_i2c_phy),
GFP_KERNEL);
- if (!phy) {
- nfc_err(&client->dev,
- "Cannot allocate memory for st21nfca i2c phy.\n");
+ if (!phy)
return -ENOMEM;
- }
phy->i2c_dev = client;
phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
@@ -641,8 +648,11 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
}
return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
- ST21NFCA_FRAME_HEADROOM, ST21NFCA_FRAME_TAILROOM,
- ST21NFCA_HCI_LLC_MAX_PAYLOAD, &phy->hdev);
+ ST21NFCA_FRAME_HEADROOM,
+ ST21NFCA_FRAME_TAILROOM,
+ ST21NFCA_HCI_LLC_MAX_PAYLOAD,
+ &phy->hdev,
+ &phy->se_status);
}
static int st21nfca_hci_i2c_remove(struct i2c_client *client)
@@ -661,6 +671,7 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
#ifdef CONFIG_OF
static const struct of_device_id of_st21nfca_i2c_match[] = {
+ { .compatible = "st,st21nfca-i2c", },
{ .compatible = "st,st21nfca_i2c", },
{}
};
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
index f2596c8d68b0..24d3d240d5f4 100644
--- a/drivers/nfc/st21nfca/st21nfca.c
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -23,6 +23,7 @@
#include "st21nfca.h"
#include "st21nfca_dep.h"
+#include "st21nfca_se.h"
#define DRIVER_DESC "HCI NFC driver for ST21NFCA"
@@ -62,7 +63,6 @@
#define ST21NFCA_RF_CARD_F_DATARATE 0x08
#define ST21NFCA_RF_CARD_F_DATARATE_212_424 0x01
-#define ST21NFCA_DEVICE_MGNT_GATE 0x01
#define ST21NFCA_DEVICE_MGNT_PIPE 0x02
#define ST21NFCA_DM_GETINFO 0x13
@@ -78,6 +78,11 @@
#define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/
+#define ST21NFCA_EVT_HOT_PLUG 0x03
+#define ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80)
+
+#define ST21NFCA_SE_TO_PIPES 2000
+
static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES);
static struct nfc_hci_gate st21nfca_gates[] = {
@@ -92,6 +97,10 @@ static struct nfc_hci_gate st21nfca_gates[] = {
{ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE},
{ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
{ST21NFCA_RF_CARD_F_GATE, NFC_HCI_INVALID_PIPE},
+
+ /* Secure element pipes are created by secure element host */
+ {ST21NFCA_CONNECTIVITY_GATE, NFC_HCI_DO_NOT_CREATE_PIPE},
+ {ST21NFCA_APDU_READER_GATE, NFC_HCI_DO_NOT_CREATE_PIPE},
};
struct st21nfca_pipe_info {
@@ -118,18 +127,6 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
NFC_HCI_TERMINAL_HOST_ID, 0
};
- skb_pipe_list = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
- if (!skb_pipe_list) {
- r = -ENOMEM;
- goto free_list;
- }
-
- skb_pipe_info = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
- if (!skb_pipe_info) {
- r = -ENOMEM;
- goto free_info;
- }
-
/* On ST21NFCA device pipes number are dynamics
* A maximum of 16 pipes can be created at the same time
* If pipes are already created, hci_dev_up will fail.
@@ -148,7 +145,8 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
* Pipe can be closed and need to be open.
*/
r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
- ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE);
+ ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_DEVICE_MGNT_PIPE);
if (r < 0)
goto free_info;
@@ -179,17 +177,28 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
* - destination gid (1byte)
*/
info = (struct st21nfca_pipe_info *) skb_pipe_info->data;
+ if (info->dst_gate_id == ST21NFCA_APDU_READER_GATE &&
+ info->src_host_id != ST21NFCA_ESE_HOST_ID) {
+ pr_err("Unexpected apdu_reader pipe on host %x\n",
+ info->src_host_id);
+ continue;
+ }
+
for (j = 0; (j < ARRAY_SIZE(st21nfca_gates)) &&
- (st21nfca_gates[j].gate != info->dst_gate_id);
- j++)
+ (st21nfca_gates[j].gate != info->dst_gate_id) ; j++)
;
if (j < ARRAY_SIZE(st21nfca_gates) &&
st21nfca_gates[j].gate == info->dst_gate_id &&
ST21NFCA_DM_IS_PIPE_OPEN(info->pipe_state)) {
st21nfca_gates[j].pipe = pipe_info[2];
+
hdev->gate2pipe[st21nfca_gates[j].gate] =
- st21nfca_gates[j].pipe;
+ st21nfca_gates[j].pipe;
+ hdev->pipes[st21nfca_gates[j].pipe].gate =
+ st21nfca_gates[j].gate;
+ hdev->pipes[st21nfca_gates[j].pipe].dest_host =
+ info->src_host_id;
}
}
@@ -199,7 +208,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
*/
if (skb_pipe_list->len + 3 < ARRAY_SIZE(st21nfca_gates)) {
for (i = skb_pipe_list->len + 3;
- i < ARRAY_SIZE(st21nfca_gates); i++) {
+ i < ARRAY_SIZE(st21nfca_gates) - 2; i++) {
r = nfc_hci_connect_gate(hdev,
NFC_HCI_HOST_CONTROLLER_ID,
st21nfca_gates[i].gate,
@@ -212,7 +221,6 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
free_info:
kfree_skb(skb_pipe_info);
-free_list:
kfree_skb(skb_pipe_list);
return r;
}
@@ -257,16 +265,33 @@ out:
static int st21nfca_hci_ready(struct nfc_hci_dev *hdev)
{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
struct sk_buff *skb;
u8 param;
+ u8 white_list[2];
+ int wl_size = 0;
int r;
- param = NFC_HCI_UICC_HOST_ID;
- r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
- NFC_HCI_ADMIN_WHITELIST, &param, 1);
- if (r < 0)
- return r;
+ if (info->se_status->is_ese_present &&
+ info->se_status->is_uicc_present) {
+ white_list[wl_size++] = NFC_HCI_UICC_HOST_ID;
+ white_list[wl_size++] = ST21NFCA_ESE_HOST_ID;
+ } else if (!info->se_status->is_ese_present &&
+ info->se_status->is_uicc_present) {
+ white_list[wl_size++] = NFC_HCI_UICC_HOST_ID;
+ } else if (info->se_status->is_ese_present &&
+ !info->se_status->is_uicc_present) {
+ white_list[wl_size++] = ST21NFCA_ESE_HOST_ID;
+ }
+
+ if (wl_size) {
+ r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_WHITELIST,
+ (u8 *) &white_list, wl_size);
+ if (r < 0)
+ return r;
+ }
/* Set NFC_MODE in device management gate to enable */
r = nfc_hci_get_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
@@ -274,8 +299,9 @@ static int st21nfca_hci_ready(struct nfc_hci_dev *hdev)
if (r < 0)
return r;
- if (skb->data[0] == 0) {
- kfree_skb(skb);
+ param = skb->data[0];
+ kfree_skb(skb);
+ if (param == 0) {
param = 1;
r = nfc_hci_set_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
@@ -417,9 +443,12 @@ static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev,
r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE,
ST21NFCA_RF_CARD_F_DATARATE,
param, 1);
- if (r < 0)
+ if (r < 0) {
+ kfree_skb(datarate_skb);
return r;
+ }
}
+ kfree_skb(datarate_skb);
/*
* Configure sens_res
@@ -673,15 +702,15 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
struct nfc_target *target)
{
int r;
- struct sk_buff *nfcid2_skb = NULL, *nfcid1_skb;
+ struct sk_buff *nfcid_skb = NULL;
if (gate == ST21NFCA_RF_READER_F_GATE) {
r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
- ST21NFCA_RF_READER_F_NFCID2, &nfcid2_skb);
+ ST21NFCA_RF_READER_F_NFCID2, &nfcid_skb);
if (r < 0)
goto exit;
- if (nfcid2_skb->len > NFC_SENSF_RES_MAXSIZE) {
+ if (nfcid_skb->len > NFC_SENSF_RES_MAXSIZE) {
r = -EPROTO;
goto exit;
}
@@ -693,11 +722,11 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
* - After the reception of SEL_RES with NFCIP-1 compliant bit
* set for type A frame NFCID1 will be updated
*/
- if (nfcid2_skb->len > 0) {
+ if (nfcid_skb->len > 0) {
/* P2P in type F */
- memcpy(target->sensf_res, nfcid2_skb->data,
- nfcid2_skb->len);
- target->sensf_res_len = nfcid2_skb->len;
+ memcpy(target->sensf_res, nfcid_skb->data,
+ nfcid_skb->len);
+ target->sensf_res_len = nfcid_skb->len;
/* NFC Forum Digital Protocol Table 44 */
if (target->sensf_res[0] == 0x01 &&
target->sensf_res[1] == 0xfe)
@@ -707,27 +736,28 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
target->supported_protocols =
NFC_PROTO_FELICA_MASK;
} else {
+ kfree_skb(nfcid_skb);
/* P2P in type A */
r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
ST21NFCA_RF_READER_F_NFCID1,
- &nfcid1_skb);
+ &nfcid_skb);
if (r < 0)
goto exit;
- if (nfcid1_skb->len > NFC_NFCID1_MAXSIZE) {
+ if (nfcid_skb->len > NFC_NFCID1_MAXSIZE) {
r = -EPROTO;
goto exit;
}
- memcpy(target->sensf_res, nfcid1_skb->data,
- nfcid1_skb->len);
- target->sensf_res_len = nfcid1_skb->len;
+ memcpy(target->sensf_res, nfcid_skb->data,
+ nfcid_skb->len);
+ target->sensf_res_len = nfcid_skb->len;
target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
}
target->hci_reader_gate = ST21NFCA_RF_READER_F_GATE;
}
r = 1;
exit:
- kfree_skb(nfcid2_skb);
+ kfree_skb(nfcid_skb);
return r;
}
@@ -829,24 +859,82 @@ static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev,
}
}
+static void st21nfca_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+ u8 gate = hdev->pipes[pipe].gate;
+
+ pr_debug("cmd: %x\n", cmd);
+
+ switch (cmd) {
+ case NFC_HCI_ANY_OPEN_PIPE:
+ if (gate != ST21NFCA_APDU_READER_GATE &&
+ hdev->pipes[pipe].dest_host != NFC_HCI_UICC_HOST_ID)
+ info->se_info.count_pipes++;
+
+ if (info->se_info.count_pipes == info->se_info.expected_pipes) {
+ del_timer_sync(&info->se_info.se_active_timer);
+ info->se_info.se_active = false;
+ info->se_info.count_pipes = 0;
+ complete(&info->se_info.req_completion);
+ }
+ break;
+ }
+}
+
+static int st21nfca_admin_event_received(struct nfc_hci_dev *hdev, u8 event,
+ struct sk_buff *skb)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ pr_debug("admin event: %x\n", event);
+
+ switch (event) {
+ case ST21NFCA_EVT_HOT_PLUG:
+ if (info->se_info.se_active) {
+ if (!ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
+ del_timer_sync(&info->se_info.se_active_timer);
+ info->se_info.se_active = false;
+ complete(&info->se_info.req_completion);
+ } else {
+ mod_timer(&info->se_info.se_active_timer,
+ jiffies +
+ msecs_to_jiffies(ST21NFCA_SE_TO_PIPES));
+ }
+ }
+ break;
+ }
+ kfree_skb(skb);
+ return 0;
+}
+
/*
* Returns:
* <= 0: driver handled the event, skb consumed
* 1: driver does not handle the event, please do standard processing
*/
-static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
+static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe,
u8 event, struct sk_buff *skb)
{
+ u8 gate = hdev->pipes[pipe].gate;
+ u8 host = hdev->pipes[pipe].dest_host;
+
pr_debug("hci event: %d gate: %x\n", event, gate);
switch (gate) {
+ case NFC_HCI_ADMIN_GATE:
+ return st21nfca_admin_event_received(hdev, event, skb);
case ST21NFCA_RF_CARD_F_GATE:
return st21nfca_dep_event_received(hdev, event, skb);
+ case ST21NFCA_CONNECTIVITY_GATE:
+ return st21nfca_connectivity_event_received(hdev, host,
+ event, skb);
+ case ST21NFCA_APDU_READER_GATE:
+ return st21nfca_apdu_reader_event_received(hdev, event, skb);
default:
return 1;
}
- kfree_skb(skb);
- return 0;
}
static struct nfc_hci_ops st21nfca_hci_ops = {
@@ -865,11 +953,17 @@ static struct nfc_hci_ops st21nfca_hci_ops = {
.tm_send = st21nfca_hci_tm_send,
.check_presence = st21nfca_hci_check_presence,
.event_received = st21nfca_hci_event_received,
+ .cmd_received = st21nfca_hci_cmd_received,
+ .discover_se = st21nfca_hci_discover_se,
+ .enable_se = st21nfca_hci_enable_se,
+ .disable_se = st21nfca_hci_disable_se,
+ .se_io = st21nfca_hci_se_io,
};
int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
char *llc_name, int phy_headroom, int phy_tailroom,
- int phy_payload, struct nfc_hci_dev **hdev)
+ int phy_payload, struct nfc_hci_dev **hdev,
+ struct st21nfca_se_status *se_status)
{
struct st21nfca_hci_info *info;
int r = 0;
@@ -929,6 +1023,8 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
goto err_alloc_hdev;
}
+ info->se_status = se_status;
+
nfc_hci_set_clientdata(info->hdev, info);
r = nfc_hci_register_device(info->hdev);
@@ -937,6 +1033,7 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
*hdev = info->hdev;
st21nfca_dep_init(info->hdev);
+ st21nfca_se_init(info->hdev);
return 0;
@@ -955,6 +1052,7 @@ void st21nfca_hci_remove(struct nfc_hci_dev *hdev)
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
st21nfca_dep_deinit(hdev);
+ st21nfca_se_deinit(hdev);
nfc_hci_unregister_device(hdev);
nfc_hci_free_device(hdev);
kfree(info);
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
index 7c2a85292230..15a78d330a9f 100644
--- a/drivers/nfc/st21nfca/st21nfca.h
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -20,6 +20,7 @@
#include <net/nfc/hci.h>
#include "st21nfca_dep.h"
+#include "st21nfca_se.h"
#define HCI_MODE 0
@@ -51,9 +52,15 @@
#define ST21NFCA_NUM_DEVICES 256
+struct st21nfca_se_status {
+ bool is_ese_present;
+ bool is_uicc_present;
+};
+
int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
char *llc_name, int phy_headroom, int phy_tailroom,
- int phy_payload, struct nfc_hci_dev **hdev);
+ int phy_payload, struct nfc_hci_dev **hdev,
+ struct st21nfca_se_status *se_status);
void st21nfca_hci_remove(struct nfc_hci_dev *hdev);
enum st21nfca_state {
@@ -66,6 +73,7 @@ struct st21nfca_hci_info {
void *phy_id;
struct nfc_hci_dev *hdev;
+ struct st21nfca_se_status *se_status;
enum st21nfca_state state;
@@ -76,13 +84,16 @@ struct st21nfca_hci_info {
void *async_cb_context;
struct st21nfca_dep_info dep_info;
+ struct st21nfca_se_info se_info;
};
/* Reader RF commands */
-#define ST21NFCA_WR_XCHG_DATA 0x10
-
-#define ST21NFCA_RF_READER_F_GATE 0x14
+#define ST21NFCA_WR_XCHG_DATA 0x10
-#define ST21NFCA_RF_CARD_F_GATE 0x24
+#define ST21NFCA_DEVICE_MGNT_GATE 0x01
+#define ST21NFCA_RF_READER_F_GATE 0x14
+#define ST21NFCA_RF_CARD_F_GATE 0x24
+#define ST21NFCA_APDU_READER_GATE 0xf0
+#define ST21NFCA_CONNECTIVITY_GATE 0x41
#endif /* __LOCAL_ST21NFCA_H_ */
diff --git a/drivers/nfc/st21nfca/st21nfca_se.c b/drivers/nfc/st21nfca/st21nfca_se.c
new file mode 100644
index 000000000000..bd13cac9c66a
--- /dev/null
+++ b/drivers/nfc/st21nfca/st21nfca_se.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <net/nfc/hci.h>
+
+#include "st21nfca.h"
+#include "st21nfca_se.h"
+
+#define ST21NFCA_EVT_UICC_ACTIVATE 0x10
+#define ST21NFCA_EVT_UICC_DEACTIVATE 0x13
+#define ST21NFCA_EVT_SE_HARD_RESET 0x20
+#define ST21NFCA_EVT_SE_SOFT_RESET 0x11
+#define ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER 0x21
+#define ST21NFCA_EVT_SE_ACTIVATE 0x22
+#define ST21NFCA_EVT_SE_DEACTIVATE 0x23
+
+#define ST21NFCA_EVT_TRANSMIT_DATA 0x10
+#define ST21NFCA_EVT_WTX_REQUEST 0x11
+
+#define ST21NFCA_EVT_CONNECTIVITY 0x10
+#define ST21NFCA_EVT_TRANSACTION 0x12
+
+#define ST21NFCA_ESE_HOST_ID 0xc0
+
+#define ST21NFCA_SE_TO_HOT_PLUG 1000
+/* Connectivity pipe only */
+#define ST21NFCA_SE_COUNT_PIPE_UICC 0x01
+/* Connectivity + APDU Reader pipe */
+#define ST21NFCA_SE_COUNT_PIPE_EMBEDDED 0x02
+
+#define ST21NFCA_SE_MODE_OFF 0x00
+#define ST21NFCA_SE_MODE_ON 0x01
+
+#define ST21NFCA_PARAM_ATR 0x01
+#define ST21NFCA_ATR_DEFAULT_BWI 0x04
+
+/*
+ * WT = 2^BWI/10[s], convert into msecs and add a secure
+ * room by increasing by 2 this timeout
+ */
+#define ST21NFCA_BWI_TO_TIMEOUT(x) ((1 << x) * 200)
+#define ST21NFCA_ATR_GET_Y_FROM_TD(x) (x >> 4)
+
+/* If TA is present bit 0 is set */
+#define ST21NFCA_ATR_TA_PRESENT(x) (x & 0x01)
+/* If TB is present bit 1 is set */
+#define ST21NFCA_ATR_TB_PRESENT(x) (x & 0x02)
+
+static u8 st21nfca_se_get_bwi(struct nfc_hci_dev *hdev)
+{
+ int i;
+ u8 td;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ /* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */
+ for (i = 1; i < ST21NFCA_ESE_MAX_LENGTH; i++) {
+ td = ST21NFCA_ATR_GET_Y_FROM_TD(info->se_info.atr[i]);
+ if (ST21NFCA_ATR_TA_PRESENT(td))
+ i++;
+ if (ST21NFCA_ATR_TB_PRESENT(td)) {
+ i++;
+ return info->se_info.atr[i] >> 4;
+ }
+ }
+ return ST21NFCA_ATR_DEFAULT_BWI;
+}
+
+static void st21nfca_se_get_atr(struct nfc_hci_dev *hdev)
+{
+ int r;
+ struct sk_buff *skb;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ r = nfc_hci_get_param(hdev, ST21NFCA_APDU_READER_GATE,
+ ST21NFCA_PARAM_ATR, &skb);
+ if (r < 0)
+ return;
+
+ if (skb->len <= ST21NFCA_ESE_MAX_LENGTH) {
+ memcpy(info->se_info.atr, skb->data, skb->len);
+ info->se_info.wt_timeout =
+ ST21NFCA_BWI_TO_TIMEOUT(st21nfca_se_get_bwi(hdev));
+ }
+ kfree_skb(skb);
+}
+
+static int st21nfca_hci_control_se(struct nfc_hci_dev *hdev, u32 se_idx,
+ u8 state)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+ int r;
+ struct sk_buff *sk_host_list;
+ u8 se_event, host_id;
+
+ switch (se_idx) {
+ case NFC_HCI_UICC_HOST_ID:
+ se_event = (state == ST21NFCA_SE_MODE_ON ?
+ ST21NFCA_EVT_UICC_ACTIVATE :
+ ST21NFCA_EVT_UICC_DEACTIVATE);
+
+ info->se_info.count_pipes = 0;
+ info->se_info.expected_pipes = ST21NFCA_SE_COUNT_PIPE_UICC;
+ break;
+ case ST21NFCA_ESE_HOST_ID:
+ se_event = (state == ST21NFCA_SE_MODE_ON ?
+ ST21NFCA_EVT_SE_ACTIVATE :
+ ST21NFCA_EVT_SE_DEACTIVATE);
+
+ info->se_info.count_pipes = 0;
+ info->se_info.expected_pipes = ST21NFCA_SE_COUNT_PIPE_EMBEDDED;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Wait for an EVT_HOT_PLUG in order to
+ * retrieve a relevant host list.
+ */
+ reinit_completion(&info->se_info.req_completion);
+ r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, se_event,
+ NULL, 0);
+ if (r < 0)
+ return r;
+
+ mod_timer(&info->se_info.se_active_timer, jiffies +
+ msecs_to_jiffies(ST21NFCA_SE_TO_HOT_PLUG));
+ info->se_info.se_active = true;
+
+ /* Ignore return value and check in any case the host_list */
+ wait_for_completion_interruptible(&info->se_info.req_completion);
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_HOST_LIST,
+ &sk_host_list);
+ if (r < 0)
+ return r;
+
+ host_id = sk_host_list->data[sk_host_list->len - 1];
+ kfree_skb(sk_host_list);
+
+ if (state == ST21NFCA_SE_MODE_ON && host_id == se_idx)
+ return se_idx;
+ else if (state == ST21NFCA_SE_MODE_OFF && host_id != se_idx)
+ return se_idx;
+
+ return -1;
+}
+
+int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+ int se_count = 0;
+
+ if (info->se_status->is_uicc_present) {
+ nfc_add_se(hdev->ndev, NFC_HCI_UICC_HOST_ID, NFC_SE_UICC);
+ se_count++;
+ }
+
+ if (info->se_status->is_ese_present) {
+ nfc_add_se(hdev->ndev, ST21NFCA_ESE_HOST_ID, NFC_SE_EMBEDDED);
+ se_count++;
+ }
+
+ return !se_count;
+}
+EXPORT_SYMBOL(st21nfca_hci_discover_se);
+
+int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+ int r;
+
+ /*
+ * According to upper layer, se_idx == NFC_SE_UICC when
+ * info->se_status->is_uicc_enable is true should never happen.
+ * Same for eSE.
+ */
+ r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_ON);
+
+ if (r == ST21NFCA_ESE_HOST_ID) {
+ st21nfca_se_get_atr(hdev);
+ r = nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE,
+ ST21NFCA_EVT_SE_SOFT_RESET, NULL, 0);
+ if (r < 0)
+ return r;
+ } else if (r < 0) {
+ /*
+ * The activation tentative failed, the secure element
+ * is not connected. Remove from the list.
+ */
+ nfc_remove_se(hdev->ndev, se_idx);
+ return r;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(st21nfca_hci_enable_se);
+
+int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+ int r;
+
+ /*
+ * According to upper layer, se_idx == NFC_SE_UICC when
+ * info->se_status->is_uicc_enable is true should never happen
+ * Same for eSE.
+ */
+ r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_OFF);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+EXPORT_SYMBOL(st21nfca_hci_disable_se);
+
+int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ pr_debug("se_io %x\n", se_idx);
+
+ switch (se_idx) {
+ case ST21NFCA_ESE_HOST_ID:
+ info->se_info.cb = cb;
+ info->se_info.cb_context = cb_context;
+ mod_timer(&info->se_info.bwi_timer, jiffies +
+ msecs_to_jiffies(info->se_info.wt_timeout));
+ info->se_info.bwi_active = true;
+ return nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE,
+ ST21NFCA_EVT_TRANSMIT_DATA,
+ apdu, apdu_length);
+ default:
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(st21nfca_hci_se_io);
+
+static void st21nfca_se_wt_timeout(unsigned long data)
+{
+ /*
+ * No answer from the secure element
+ * within the defined timeout.
+ * Let's send a reset request as recovery procedure.
+ * According to the situation, we first try to send a software reset
+ * to the secure element. If the next command is still not
+ * answering in time, we send to the CLF a secure element hardware
+ * reset request.
+ */
+ /* hardware reset managed through VCC_UICC_OUT power supply */
+ u8 param = 0x01;
+ struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+
+ pr_debug("\n");
+
+ info->se_info.bwi_active = false;
+
+ if (!info->se_info.xch_error) {
+ info->se_info.xch_error = true;
+ nfc_hci_send_event(info->hdev, ST21NFCA_APDU_READER_GATE,
+ ST21NFCA_EVT_SE_SOFT_RESET, NULL, 0);
+ } else {
+ info->se_info.xch_error = false;
+ nfc_hci_send_event(info->hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_EVT_SE_HARD_RESET, &param, 1);
+ }
+ info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
+}
+
+static void st21nfca_se_activation_timeout(unsigned long data)
+{
+ struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+
+ pr_debug("\n");
+
+ info->se_info.se_active = false;
+
+ complete(&info->se_info.req_completion);
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the event, skb consumed
+ * 1: driver does not handle the event, please do standard processing
+ */
+int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
+ u8 event, struct sk_buff *skb)
+{
+ int r = 0;
+ struct device *dev = &hdev->ndev->dev;
+ struct nfc_evt_transaction *transaction;
+
+ pr_debug("connectivity gate event: %x\n", event);
+
+ switch (event) {
+ case ST21NFCA_EVT_CONNECTIVITY:
+ break;
+ case ST21NFCA_EVT_TRANSACTION:
+ if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
+ skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+ return -EPROTO;
+
+ transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+ skb->len - 2, GFP_KERNEL);
+
+ transaction->aid_len = skb->data[1];
+ memcpy(transaction->aid, &skb->data[2], skb->data[1]);
+
+ if (skb->data[transaction->aid_len + 2] !=
+ NFC_EVT_TRANSACTION_PARAMS_TAG)
+ return -EPROTO;
+
+ transaction->params_len = skb->data[transaction->aid_len + 3];
+ memcpy(transaction->params, skb->data +
+ transaction->aid_len + 4, transaction->params_len);
+
+ r = nfc_se_transaction(hdev->ndev, host, transaction);
+ break;
+ default:
+ return 1;
+ }
+ kfree_skb(skb);
+ return r;
+}
+EXPORT_SYMBOL(st21nfca_connectivity_event_received);
+
+int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
+ u8 event, struct sk_buff *skb)
+{
+ int r = 0;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ pr_debug("apdu reader gate event: %x\n", event);
+
+ switch (event) {
+ case ST21NFCA_EVT_TRANSMIT_DATA:
+ del_timer_sync(&info->se_info.bwi_timer);
+ info->se_info.bwi_active = false;
+ r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
+ if (r < 0)
+ goto exit;
+
+ info->se_info.cb(info->se_info.cb_context,
+ skb->data, skb->len, 0);
+ break;
+ case ST21NFCA_EVT_WTX_REQUEST:
+ mod_timer(&info->se_info.bwi_timer, jiffies +
+ msecs_to_jiffies(info->se_info.wt_timeout));
+ break;
+ }
+
+exit:
+ kfree_skb(skb);
+ return r;
+}
+EXPORT_SYMBOL(st21nfca_apdu_reader_event_received);
+
+void st21nfca_se_init(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ init_completion(&info->se_info.req_completion);
+ /* initialize timers */
+ init_timer(&info->se_info.bwi_timer);
+ info->se_info.bwi_timer.data = (unsigned long)info;
+ info->se_info.bwi_timer.function = st21nfca_se_wt_timeout;
+ info->se_info.bwi_active = false;
+
+ init_timer(&info->se_info.se_active_timer);
+ info->se_info.se_active_timer.data = (unsigned long)info;
+ info->se_info.se_active_timer.function = st21nfca_se_activation_timeout;
+ info->se_info.se_active = false;
+
+ info->se_info.count_pipes = 0;
+ info->se_info.expected_pipes = 0;
+
+ info->se_info.xch_error = false;
+
+ info->se_info.wt_timeout =
+ ST21NFCA_BWI_TO_TIMEOUT(ST21NFCA_ATR_DEFAULT_BWI);
+}
+EXPORT_SYMBOL(st21nfca_se_init);
+
+void st21nfca_se_deinit(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ if (info->se_info.bwi_active)
+ del_timer_sync(&info->se_info.bwi_timer);
+ if (info->se_info.se_active)
+ del_timer_sync(&info->se_info.se_active_timer);
+
+ info->se_info.bwi_active = false;
+ info->se_info.se_active = false;
+}
+EXPORT_SYMBOL(st21nfca_se_deinit);
diff --git a/drivers/nfc/st21nfca/st21nfca_se.h b/drivers/nfc/st21nfca/st21nfca_se.h
new file mode 100644
index 000000000000..b172cfcaeb90
--- /dev/null
+++ b/drivers/nfc/st21nfca/st21nfca_se.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ST21NFCA_SE_H
+#define __ST21NFCA_SE_H
+
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+
+/*
+ * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
+ * sequence of at most 32 characters.
+ */
+#define ST21NFCA_ESE_MAX_LENGTH 33
+#define ST21NFCA_ESE_HOST_ID 0xc0
+
+struct st21nfca_se_info {
+ u8 atr[ST21NFCA_ESE_MAX_LENGTH];
+ struct completion req_completion;
+
+ struct timer_list bwi_timer;
+ int wt_timeout; /* in msecs */
+ bool bwi_active;
+
+ struct timer_list se_active_timer;
+ bool se_active;
+ int expected_pipes;
+ int count_pipes;
+
+ bool xch_error;
+
+ se_io_cb_t cb;
+ void *cb_context;
+};
+
+int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
+ u8 event, struct sk_buff *skb);
+int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
+ u8 event, struct sk_buff *skb);
+
+int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev);
+int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx);
+int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx);
+int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
+
+void st21nfca_se_init(struct nfc_hci_dev *hdev);
+void st21nfca_se_deinit(struct nfc_hci_dev *hdev);
+#endif /* __ST21NFCA_SE_H */
diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile
index f4d835dd15f2..ce659a9e5a1a 100644
--- a/drivers/nfc/st21nfcb/Makefile
+++ b/drivers/nfc/st21nfcb/Makefile
@@ -2,7 +2,7 @@
# Makefile for ST21NFCB NCI based NFC driver
#
-st21nfcb_nci-objs = ndlc.o st21nfcb.o
+st21nfcb_nci-objs = ndlc.o st21nfcb.o st21nfcb_se.o
obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb_nci.o
st21nfcb_i2c-objs = i2c.o
diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
index 01ba865863ee..eb886932d972 100644
--- a/drivers/nfc/st21nfcb/i2c.c
+++ b/drivers/nfc/st21nfcb/i2c.c
@@ -199,7 +199,7 @@ static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id)
struct sk_buff *skb = NULL;
int r;
- if (!phy || irq != phy->i2c_dev->irq) {
+ if (!phy || !phy->ndlc || irq != phy->i2c_dev->irq) {
WARN_ON_ONCE(1);
return IRQ_NONE;
}
@@ -343,18 +343,22 @@ static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
return -ENODEV;
}
+ r = ndlc_probe(phy, &i2c_phy_ops, &client->dev,
+ ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
+ &phy->ndlc);
+ if (r < 0) {
+ nfc_err(&client->dev, "Unable to register ndlc layer\n");
+ return r;
+ }
+
r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
st21nfcb_nci_irq_thread_fn,
phy->irq_polarity | IRQF_ONESHOT,
ST21NFCB_NCI_DRIVER_NAME, phy);
- if (r < 0) {
+ if (r < 0)
nfc_err(&client->dev, "Unable to register IRQ handler\n");
- return r;
- }
- return ndlc_probe(phy, &i2c_phy_ops, &client->dev,
- ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
- &phy->ndlc);
+ return r;
}
static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
@@ -373,6 +377,7 @@ static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
#ifdef CONFIG_OF
static const struct of_device_id of_st21nfcb_i2c_match[] = {
+ { .compatible = "st,st21nfcb-i2c", },
{ .compatible = "st,st21nfcb_i2c", },
{}
};
diff --git a/drivers/nfc/st21nfcb/ndlc.c b/drivers/nfc/st21nfcb/ndlc.c
index bac50e805f1d..5fbf59d2138c 100644
--- a/drivers/nfc/st21nfcb/ndlc.c
+++ b/drivers/nfc/st21nfcb/ndlc.c
@@ -138,7 +138,7 @@ static void llt_ndlc_requeue_data_pending(struct llt_ndlc *ndlc)
default:
pr_err("UNKNOWN Packet Control Byte=%d\n", pcb);
kfree_skb(skb);
- break;
+ continue;
}
skb_queue_head(&ndlc->send_q, skb);
}
@@ -297,6 +297,5 @@ void ndlc_remove(struct llt_ndlc *ndlc)
skb_queue_purge(&ndlc->send_q);
st21nfcb_nci_remove(ndlc->ndev);
- kfree(ndlc);
}
EXPORT_SYMBOL(ndlc_remove);
diff --git a/drivers/nfc/st21nfcb/st21nfcb.c b/drivers/nfc/st21nfcb/st21nfcb.c
index ea63d5877831..ca9871ab3fb3 100644
--- a/drivers/nfc/st21nfcb/st21nfcb.c
+++ b/drivers/nfc/st21nfcb/st21nfcb.c
@@ -22,6 +22,7 @@
#include <net/nfc/nci_core.h>
#include "st21nfcb.h"
+#include "st21nfcb_se.h"
#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
@@ -78,6 +79,13 @@ static struct nci_ops st21nfcb_nci_ops = {
.close = st21nfcb_nci_close,
.send = st21nfcb_nci_send,
.get_rfprotocol = st21nfcb_nci_get_rfprotocol,
+ .discover_se = st21nfcb_nci_discover_se,
+ .enable_se = st21nfcb_nci_enable_se,
+ .disable_se = st21nfcb_nci_disable_se,
+ .se_io = st21nfcb_nci_se_io,
+ .hci_load_session = st21nfcb_hci_load_session,
+ .hci_event_received = st21nfcb_hci_event_received,
+ .hci_cmd_received = st21nfcb_hci_cmd_received,
};
int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
@@ -114,9 +122,10 @@ int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
if (r) {
pr_err("Cannot register nfc device to nci core\n");
nci_free_device(ndlc->ndev);
+ return r;
}
- return r;
+ return st21nfcb_se_init(ndlc->ndev);
}
EXPORT_SYMBOL_GPL(st21nfcb_nci_probe);
diff --git a/drivers/nfc/st21nfcb/st21nfcb.h b/drivers/nfc/st21nfcb/st21nfcb.h
index ea58a56ad794..5ef8a58c9839 100644
--- a/drivers/nfc/st21nfcb/st21nfcb.h
+++ b/drivers/nfc/st21nfcb/st21nfcb.h
@@ -19,6 +19,7 @@
#ifndef __LOCAL_ST21NFCB_H_
#define __LOCAL_ST21NFCB_H_
+#include "st21nfcb_se.h"
#include "ndlc.h"
/* Define private flags: */
@@ -27,6 +28,7 @@
struct st21nfcb_nci_info {
struct llt_ndlc *ndlc;
unsigned long flags;
+ struct st21nfcb_se_info se_info;
};
void st21nfcb_nci_remove(struct nci_dev *ndev);
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.c b/drivers/nfc/st21nfcb/st21nfcb_se.c
new file mode 100644
index 000000000000..7c82e9d87a65
--- /dev/null
+++ b/drivers/nfc/st21nfcb/st21nfcb_se.c
@@ -0,0 +1,707 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/delay.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+
+#include "st21nfcb.h"
+#include "st21nfcb_se.h"
+
+struct st21nfcb_pipe_info {
+ u8 pipe_state;
+ u8 src_host_id;
+ u8 src_gate_id;
+ u8 dst_host_id;
+ u8 dst_gate_id;
+} __packed;
+
+/* Hosts */
+#define ST21NFCB_HOST_CONTROLLER_ID 0x00
+#define ST21NFCB_TERMINAL_HOST_ID 0x01
+#define ST21NFCB_UICC_HOST_ID 0x02
+#define ST21NFCB_ESE_HOST_ID 0xc0
+
+/* Gates */
+#define ST21NFCB_DEVICE_MGNT_GATE 0x01
+#define ST21NFCB_APDU_READER_GATE 0xf0
+#define ST21NFCB_CONNECTIVITY_GATE 0x41
+
+/* Pipes */
+#define ST21NFCB_DEVICE_MGNT_PIPE 0x02
+
+/* Connectivity pipe only */
+#define ST21NFCB_SE_COUNT_PIPE_UICC 0x01
+/* Connectivity + APDU Reader pipe */
+#define ST21NFCB_SE_COUNT_PIPE_EMBEDDED 0x02
+
+#define ST21NFCB_SE_TO_HOT_PLUG 1000 /* msecs */
+#define ST21NFCB_SE_TO_PIPES 2000
+
+#define ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80)
+
+#define NCI_HCI_APDU_PARAM_ATR 0x01
+#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY 0x01
+#define NCI_HCI_ADMIN_PARAM_WHITELIST 0x03
+#define NCI_HCI_ADMIN_PARAM_HOST_LIST 0x04
+
+#define ST21NFCB_EVT_SE_HARD_RESET 0x20
+#define ST21NFCB_EVT_TRANSMIT_DATA 0x10
+#define ST21NFCB_EVT_WTX_REQUEST 0x11
+#define ST21NFCB_EVT_SE_SOFT_RESET 0x11
+#define ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER 0x21
+#define ST21NFCB_EVT_HOT_PLUG 0x03
+
+#define ST21NFCB_SE_MODE_OFF 0x00
+#define ST21NFCB_SE_MODE_ON 0x01
+
+#define ST21NFCB_EVT_CONNECTIVITY 0x10
+#define ST21NFCB_EVT_TRANSACTION 0x12
+
+#define ST21NFCB_DM_GETINFO 0x13
+#define ST21NFCB_DM_GETINFO_PIPE_LIST 0x02
+#define ST21NFCB_DM_GETINFO_PIPE_INFO 0x01
+#define ST21NFCB_DM_PIPE_CREATED 0x02
+#define ST21NFCB_DM_PIPE_OPEN 0x04
+#define ST21NFCB_DM_RF_ACTIVE 0x80
+#define ST21NFCB_DM_DISCONNECT 0x30
+
+#define ST21NFCB_DM_IS_PIPE_OPEN(p) \
+ ((p & 0x0f) == (ST21NFCB_DM_PIPE_CREATED | ST21NFCB_DM_PIPE_OPEN))
+
+#define ST21NFCB_ATR_DEFAULT_BWI 0x04
+
+/*
+ * WT = 2^BWI/10[s], convert into msecs and add a secure
+ * room by increasing by 2 this timeout
+ */
+#define ST21NFCB_BWI_TO_TIMEOUT(x) ((1 << x) * 200)
+#define ST21NFCB_ATR_GET_Y_FROM_TD(x) (x >> 4)
+
+/* If TA is present bit 0 is set */
+#define ST21NFCB_ATR_TA_PRESENT(x) (x & 0x01)
+/* If TB is present bit 1 is set */
+#define ST21NFCB_ATR_TB_PRESENT(x) (x & 0x02)
+
+#define ST21NFCB_NUM_DEVICES 256
+
+static DECLARE_BITMAP(dev_mask, ST21NFCB_NUM_DEVICES);
+
+/* Here are the mandatory pipe for st21nfcb */
+static struct nci_hci_gate st21nfcb_gates[] = {
+ {NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PIPE,
+ ST21NFCB_HOST_CONTROLLER_ID},
+ {NCI_HCI_LINK_MGMT_GATE, NCI_HCI_LINK_MGMT_PIPE,
+ ST21NFCB_HOST_CONTROLLER_ID},
+ {ST21NFCB_DEVICE_MGNT_GATE, ST21NFCB_DEVICE_MGNT_PIPE,
+ ST21NFCB_HOST_CONTROLLER_ID},
+
+ /* Secure element pipes are created by secure element host */
+ {ST21NFCB_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
+ ST21NFCB_HOST_CONTROLLER_ID},
+ {ST21NFCB_APDU_READER_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
+ ST21NFCB_HOST_CONTROLLER_ID},
+};
+
+static u8 st21nfcb_se_get_bwi(struct nci_dev *ndev)
+{
+ int i;
+ u8 td;
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ /* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */
+ for (i = 1; i < ST21NFCB_ESE_MAX_LENGTH; i++) {
+ td = ST21NFCB_ATR_GET_Y_FROM_TD(info->se_info.atr[i]);
+ if (ST21NFCB_ATR_TA_PRESENT(td))
+ i++;
+ if (ST21NFCB_ATR_TB_PRESENT(td)) {
+ i++;
+ return info->se_info.atr[i] >> 4;
+ }
+ }
+ return ST21NFCB_ATR_DEFAULT_BWI;
+}
+
+static void st21nfcb_se_get_atr(struct nci_dev *ndev)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ int r;
+ struct sk_buff *skb;
+
+ r = nci_hci_get_param(ndev, ST21NFCB_APDU_READER_GATE,
+ NCI_HCI_APDU_PARAM_ATR, &skb);
+ if (r < 0)
+ return;
+
+ if (skb->len <= ST21NFCB_ESE_MAX_LENGTH) {
+ memcpy(info->se_info.atr, skb->data, skb->len);
+
+ info->se_info.wt_timeout =
+ ST21NFCB_BWI_TO_TIMEOUT(st21nfcb_se_get_bwi(ndev));
+ }
+ kfree_skb(skb);
+}
+
+int st21nfcb_hci_load_session(struct nci_dev *ndev)
+{
+ int i, j, r;
+ struct sk_buff *skb_pipe_list, *skb_pipe_info;
+ struct st21nfcb_pipe_info *dm_pipe_info;
+ u8 pipe_list[] = { ST21NFCB_DM_GETINFO_PIPE_LIST,
+ ST21NFCB_TERMINAL_HOST_ID};
+ u8 pipe_info[] = { ST21NFCB_DM_GETINFO_PIPE_INFO,
+ ST21NFCB_TERMINAL_HOST_ID, 0};
+
+ /* On ST21NFCB device pipes number are dynamics
+ * If pipes are already created, hci_dev_up will fail.
+ * Doing a clear all pipe is a bad idea because:
+ * - It does useless EEPROM cycling
+ * - It might cause issue for secure elements support
+ * (such as removing connectivity or APDU reader pipe)
+ * A better approach on ST21NFCB is to:
+ * - get a pipe list for each host.
+ * (eg: ST21NFCB_HOST_CONTROLLER_ID for now).
+ * (TODO Later on UICC HOST and eSE HOST)
+ * - get pipe information
+ * - match retrieved pipe list in st21nfcb_gates
+ * ST21NFCB_DEVICE_MGNT_GATE is a proprietary gate
+ * with ST21NFCB_DEVICE_MGNT_PIPE.
+ * Pipe can be closed and need to be open.
+ */
+ r = nci_hci_connect_gate(ndev, ST21NFCB_HOST_CONTROLLER_ID,
+ ST21NFCB_DEVICE_MGNT_GATE,
+ ST21NFCB_DEVICE_MGNT_PIPE);
+ if (r < 0)
+ goto free_info;
+
+ /* Get pipe list */
+ r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE,
+ ST21NFCB_DM_GETINFO, pipe_list, sizeof(pipe_list),
+ &skb_pipe_list);
+ if (r < 0)
+ goto free_info;
+
+ /* Complete the existing gate_pipe table */
+ for (i = 0; i < skb_pipe_list->len; i++) {
+ pipe_info[2] = skb_pipe_list->data[i];
+ r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE,
+ ST21NFCB_DM_GETINFO, pipe_info,
+ sizeof(pipe_info), &skb_pipe_info);
+
+ if (r)
+ continue;
+
+ /*
+ * Match pipe ID and gate ID
+ * Output format from ST21NFC_DM_GETINFO is:
+ * - pipe state (1byte)
+ * - source hid (1byte)
+ * - source gid (1byte)
+ * - destination hid (1byte)
+ * - destination gid (1byte)
+ */
+ dm_pipe_info = (struct st21nfcb_pipe_info *)skb_pipe_info->data;
+ if (dm_pipe_info->dst_gate_id == ST21NFCB_APDU_READER_GATE &&
+ dm_pipe_info->src_host_id != ST21NFCB_ESE_HOST_ID) {
+ pr_err("Unexpected apdu_reader pipe on host %x\n",
+ dm_pipe_info->src_host_id);
+ continue;
+ }
+
+ for (j = 0; (j < ARRAY_SIZE(st21nfcb_gates)) &&
+ (st21nfcb_gates[j].gate != dm_pipe_info->dst_gate_id); j++)
+ ;
+
+ if (j < ARRAY_SIZE(st21nfcb_gates) &&
+ st21nfcb_gates[j].gate == dm_pipe_info->dst_gate_id &&
+ ST21NFCB_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) {
+ st21nfcb_gates[j].pipe = pipe_info[2];
+
+ ndev->hci_dev->gate2pipe[st21nfcb_gates[j].gate] =
+ st21nfcb_gates[j].pipe;
+ ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].gate =
+ st21nfcb_gates[j].gate;
+ ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].host =
+ dm_pipe_info->src_host_id;
+ }
+ }
+
+ memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates,
+ sizeof(st21nfcb_gates));
+
+free_info:
+ kfree_skb(skb_pipe_info);
+ kfree_skb(skb_pipe_list);
+ return r;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_hci_load_session);
+
+static void st21nfcb_hci_admin_event_received(struct nci_dev *ndev,
+ u8 event, struct sk_buff *skb)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ switch (event) {
+ case ST21NFCB_EVT_HOT_PLUG:
+ if (info->se_info.se_active) {
+ if (!ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
+ del_timer_sync(&info->se_info.se_active_timer);
+ info->se_info.se_active = false;
+ complete(&info->se_info.req_completion);
+ } else {
+ mod_timer(&info->se_info.se_active_timer,
+ jiffies +
+ msecs_to_jiffies(ST21NFCB_SE_TO_PIPES));
+ }
+ }
+ break;
+ }
+}
+
+static int st21nfcb_hci_apdu_reader_event_received(struct nci_dev *ndev,
+ u8 event,
+ struct sk_buff *skb)
+{
+ int r = 0;
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ pr_debug("apdu reader gate event: %x\n", event);
+
+ switch (event) {
+ case ST21NFCB_EVT_TRANSMIT_DATA:
+ del_timer_sync(&info->se_info.bwi_timer);
+ info->se_info.bwi_active = false;
+ info->se_info.cb(info->se_info.cb_context,
+ skb->data, skb->len, 0);
+ break;
+ case ST21NFCB_EVT_WTX_REQUEST:
+ mod_timer(&info->se_info.bwi_timer, jiffies +
+ msecs_to_jiffies(info->se_info.wt_timeout));
+ break;
+ }
+
+ kfree_skb(skb);
+ return r;
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the event, skb consumed
+ * 1: driver does not handle the event, please do standard processing
+ */
+static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
+ u8 host, u8 event,
+ struct sk_buff *skb)
+{
+ int r = 0;
+ struct device *dev = &ndev->nfc_dev->dev;
+ struct nfc_evt_transaction *transaction;
+
+ pr_debug("connectivity gate event: %x\n", event);
+
+ switch (event) {
+ case ST21NFCB_EVT_CONNECTIVITY:
+
+ break;
+ case ST21NFCB_EVT_TRANSACTION:
+ if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
+ skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+ return -EPROTO;
+
+ transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+ skb->len - 2, GFP_KERNEL);
+
+ transaction->aid_len = skb->data[1];
+ memcpy(transaction->aid, &skb->data[2], skb->data[1]);
+
+ if (skb->data[transaction->aid_len + 2] !=
+ NFC_EVT_TRANSACTION_PARAMS_TAG)
+ return -EPROTO;
+
+ transaction->params_len = skb->data[transaction->aid_len + 3];
+ memcpy(transaction->params, skb->data +
+ transaction->aid_len + 4, transaction->params_len);
+
+ r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
+ default:
+ return 1;
+ }
+ kfree_skb(skb);
+ return r;
+}
+
+void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
+ u8 event, struct sk_buff *skb)
+{
+ u8 gate = ndev->hci_dev->pipes[pipe].gate;
+ u8 host = ndev->hci_dev->pipes[pipe].host;
+
+ switch (gate) {
+ case NCI_HCI_ADMIN_GATE:
+ st21nfcb_hci_admin_event_received(ndev, event, skb);
+ break;
+ case ST21NFCB_APDU_READER_GATE:
+ st21nfcb_hci_apdu_reader_event_received(ndev, event, skb);
+ break;
+ case ST21NFCB_CONNECTIVITY_GATE:
+ st21nfcb_hci_connectivity_event_received(ndev, host, event,
+ skb);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(st21nfcb_hci_event_received);
+
+
+void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+ struct sk_buff *skb)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ u8 gate = ndev->hci_dev->pipes[pipe].gate;
+
+ pr_debug("cmd: %x\n", cmd);
+
+ switch (cmd) {
+ case NCI_HCI_ANY_OPEN_PIPE:
+ if (gate != ST21NFCB_APDU_READER_GATE &&
+ ndev->hci_dev->pipes[pipe].host != ST21NFCB_UICC_HOST_ID)
+ ndev->hci_dev->count_pipes++;
+
+ if (ndev->hci_dev->count_pipes ==
+ ndev->hci_dev->expected_pipes) {
+ del_timer_sync(&info->se_info.se_active_timer);
+ info->se_info.se_active = false;
+ ndev->hci_dev->count_pipes = 0;
+ complete(&info->se_info.req_completion);
+ }
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(st21nfcb_hci_cmd_received);
+
+/*
+ * Remarks: On some early st21nfcb firmware, nci_nfcee_mode_set(0)
+ * is rejected
+ */
+static int st21nfcb_nci_control_se(struct nci_dev *ndev, u8 se_idx,
+ u8 state)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ int r;
+ struct sk_buff *sk_host_list;
+ u8 host_id;
+
+ switch (se_idx) {
+ case ST21NFCB_UICC_HOST_ID:
+ ndev->hci_dev->count_pipes = 0;
+ ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_UICC;
+ break;
+ case ST21NFCB_ESE_HOST_ID:
+ ndev->hci_dev->count_pipes = 0;
+ ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_EMBEDDED;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Wait for an EVT_HOT_PLUG in order to
+ * retrieve a relevant host list.
+ */
+ reinit_completion(&info->se_info.req_completion);
+ r = nci_nfcee_mode_set(ndev, se_idx, NCI_NFCEE_ENABLE);
+ if (r != NCI_STATUS_OK)
+ return r;
+
+ mod_timer(&info->se_info.se_active_timer, jiffies +
+ msecs_to_jiffies(ST21NFCB_SE_TO_HOT_PLUG));
+ info->se_info.se_active = true;
+
+ /* Ignore return value and check in any case the host_list */
+ wait_for_completion_interruptible(&info->se_info.req_completion);
+
+ /* There might be some "collision" after receiving a HOT_PLUG event
+ * This may cause the CLF to not answer to the next hci command.
+ * There is no possible synchronization to prevent this.
+ * Adding a small delay is the only way to solve the issue.
+ */
+ usleep_range(3000, 5000);
+
+ r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE,
+ NCI_HCI_ADMIN_PARAM_HOST_LIST, &sk_host_list);
+ if (r != NCI_HCI_ANY_OK)
+ return r;
+
+ host_id = sk_host_list->data[sk_host_list->len - 1];
+ kfree_skb(sk_host_list);
+ if (state == ST21NFCB_SE_MODE_ON && host_id == se_idx)
+ return se_idx;
+ else if (state == ST21NFCB_SE_MODE_OFF && host_id != se_idx)
+ return se_idx;
+
+ return -1;
+}
+
+int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx)
+{
+ int r;
+
+ pr_debug("st21nfcb_nci_disable_se\n");
+
+ if (se_idx == NFC_SE_EMBEDDED) {
+ r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
+ ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_nci_disable_se);
+
+int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
+{
+ int r;
+
+ pr_debug("st21nfcb_nci_enable_se\n");
+
+ if (se_idx == ST21NFCB_HCI_HOST_ID_ESE) {
+ r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
+ ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_nci_enable_se);
+
+static int st21nfcb_hci_network_init(struct nci_dev *ndev)
+{
+ struct core_conn_create_dest_spec_params *dest_params;
+ struct dest_spec_params spec_params;
+ struct nci_conn_info *conn_info;
+ int r, dev_num;
+
+ dest_params =
+ kzalloc(sizeof(struct core_conn_create_dest_spec_params) +
+ sizeof(struct dest_spec_params), GFP_KERNEL);
+ if (dest_params == NULL) {
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ dest_params->type = NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE;
+ dest_params->length = sizeof(struct dest_spec_params);
+ spec_params.id = ndev->hci_dev->nfcee_id;
+ spec_params.protocol = NCI_NFCEE_INTERFACE_HCI_ACCESS;
+ memcpy(dest_params->value, &spec_params, sizeof(struct dest_spec_params));
+ r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCEE, 1,
+ sizeof(struct core_conn_create_dest_spec_params) +
+ sizeof(struct dest_spec_params),
+ dest_params);
+ if (r != NCI_STATUS_OK)
+ goto free_dest_params;
+
+ conn_info = ndev->hci_dev->conn_info;
+ if (!conn_info)
+ goto free_dest_params;
+
+ memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates,
+ sizeof(st21nfcb_gates));
+
+ /*
+ * Session id must include the driver name + i2c bus addr
+ * persistent info to discriminate 2 identical chips
+ */
+ dev_num = find_first_zero_bit(dev_mask, ST21NFCB_NUM_DEVICES);
+ if (dev_num >= ST21NFCB_NUM_DEVICES) {
+ r = -ENODEV;
+ goto free_dest_params;
+ }
+
+ scnprintf(ndev->hci_dev->init_data.session_id,
+ sizeof(ndev->hci_dev->init_data.session_id),
+ "%s%2x", "ST21BH", dev_num);
+
+ r = nci_hci_dev_session_init(ndev);
+ if (r != NCI_HCI_ANY_OK)
+ goto exit;
+
+ r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+ NCI_NFCEE_ENABLE);
+ if (r != NCI_STATUS_OK)
+ goto exit;
+
+ return 0;
+
+free_dest_params:
+ kfree(dest_params);
+
+exit:
+ return r;
+}
+
+int st21nfcb_nci_discover_se(struct nci_dev *ndev)
+{
+ u8 param[2];
+ int r;
+ int se_count = 0;
+
+ pr_debug("st21nfcb_nci_discover_se\n");
+
+ r = st21nfcb_hci_network_init(ndev);
+ if (r != 0)
+ return r;
+
+ param[0] = ST21NFCB_UICC_HOST_ID;
+ param[1] = ST21NFCB_HCI_HOST_ID_ESE;
+ r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
+ NCI_HCI_ADMIN_PARAM_WHITELIST,
+ param, sizeof(param));
+ if (r != NCI_HCI_ANY_OK)
+ return r;
+
+ r = st21nfcb_nci_control_se(ndev, ST21NFCB_UICC_HOST_ID,
+ ST21NFCB_SE_MODE_ON);
+ if (r == ST21NFCB_UICC_HOST_ID) {
+ nfc_add_se(ndev->nfc_dev, ST21NFCB_UICC_HOST_ID, NFC_SE_UICC);
+ se_count++;
+ }
+
+ /* Try to enable eSE in order to check availability */
+ r = st21nfcb_nci_control_se(ndev, ST21NFCB_HCI_HOST_ID_ESE,
+ ST21NFCB_SE_MODE_ON);
+ if (r == ST21NFCB_HCI_HOST_ID_ESE) {
+ nfc_add_se(ndev->nfc_dev, ST21NFCB_HCI_HOST_ID_ESE,
+ NFC_SE_EMBEDDED);
+ se_count++;
+ st21nfcb_se_get_atr(ndev);
+ }
+
+ return !se_count;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_nci_discover_se);
+
+int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ pr_debug("\n");
+
+ switch (se_idx) {
+ case ST21NFCB_HCI_HOST_ID_ESE:
+ info->se_info.cb = cb;
+ info->se_info.cb_context = cb_context;
+ mod_timer(&info->se_info.bwi_timer, jiffies +
+ msecs_to_jiffies(info->se_info.wt_timeout));
+ info->se_info.bwi_active = true;
+ return nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
+ ST21NFCB_EVT_TRANSMIT_DATA, apdu,
+ apdu_length);
+ default:
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(st21nfcb_nci_se_io);
+
+static void st21nfcb_se_wt_timeout(unsigned long data)
+{
+ /*
+ * No answer from the secure element
+ * within the defined timeout.
+ * Let's send a reset request as recovery procedure.
+ * According to the situation, we first try to send a software reset
+ * to the secure element. If the next command is still not
+ * answering in time, we send to the CLF a secure element hardware
+ * reset request.
+ */
+ /* hardware reset managed through VCC_UICC_OUT power supply */
+ u8 param = 0x01;
+ struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data;
+
+ pr_debug("\n");
+
+ info->se_info.bwi_active = false;
+
+ if (!info->se_info.xch_error) {
+ info->se_info.xch_error = true;
+ nci_hci_send_event(info->ndlc->ndev, ST21NFCB_APDU_READER_GATE,
+ ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0);
+ } else {
+ info->se_info.xch_error = false;
+ nci_hci_send_event(info->ndlc->ndev, ST21NFCB_DEVICE_MGNT_GATE,
+ ST21NFCB_EVT_SE_HARD_RESET, &param, 1);
+ }
+ info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
+}
+
+static void st21nfcb_se_activation_timeout(unsigned long data)
+{
+ struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data;
+
+ pr_debug("\n");
+
+ info->se_info.se_active = false;
+
+ complete(&info->se_info.req_completion);
+}
+
+int st21nfcb_se_init(struct nci_dev *ndev)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ init_completion(&info->se_info.req_completion);
+ /* initialize timers */
+ init_timer(&info->se_info.bwi_timer);
+ info->se_info.bwi_timer.data = (unsigned long)info;
+ info->se_info.bwi_timer.function = st21nfcb_se_wt_timeout;
+ info->se_info.bwi_active = false;
+
+ init_timer(&info->se_info.se_active_timer);
+ info->se_info.se_active_timer.data = (unsigned long)info;
+ info->se_info.se_active_timer.function =
+ st21nfcb_se_activation_timeout;
+ info->se_info.se_active = false;
+
+ info->se_info.xch_error = false;
+
+ info->se_info.wt_timeout =
+ ST21NFCB_BWI_TO_TIMEOUT(ST21NFCB_ATR_DEFAULT_BWI);
+
+ return 0;
+}
+EXPORT_SYMBOL(st21nfcb_se_init);
+
+void st21nfcb_se_deinit(struct nci_dev *ndev)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ if (info->se_info.bwi_active)
+ del_timer_sync(&info->se_info.bwi_timer);
+ if (info->se_info.se_active)
+ del_timer_sync(&info->se_info.se_active_timer);
+
+ info->se_info.se_active = false;
+ info->se_info.bwi_active = false;
+}
+EXPORT_SYMBOL(st21nfcb_se_deinit);
+
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.h b/drivers/nfc/st21nfcb/st21nfcb_se.h
new file mode 100644
index 000000000000..52a323872bea
--- /dev/null
+++ b/drivers/nfc/st21nfcb/st21nfcb_se.h
@@ -0,0 +1,61 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LOCAL_ST21NFCB_SE_H_
+#define __LOCAL_ST21NFCB_SE_H_
+
+/*
+ * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
+ * sequence of at most 32 characters.
+ */
+#define ST21NFCB_ESE_MAX_LENGTH 33
+#define ST21NFCB_HCI_HOST_ID_ESE 0xc0
+
+struct st21nfcb_se_info {
+ u8 atr[ST21NFCB_ESE_MAX_LENGTH];
+ struct completion req_completion;
+
+ struct timer_list bwi_timer;
+ int wt_timeout; /* in msecs */
+ bool bwi_active;
+
+ struct timer_list se_active_timer;
+ bool se_active;
+
+ bool xch_error;
+
+ se_io_cb_t cb;
+ void *cb_context;
+};
+
+int st21nfcb_se_init(struct nci_dev *ndev);
+void st21nfcb_se_deinit(struct nci_dev *ndev);
+
+int st21nfcb_nci_discover_se(struct nci_dev *ndev);
+int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx);
+int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx);
+int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
+int st21nfcb_hci_load_session(struct nci_dev *ndev);
+void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
+ u8 event, struct sk_buff *skb);
+void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
+
+
+#endif /* __LOCAL_ST21NFCB_NCI_H_ */
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index b5e0c873d4e1..38d1c51f58b1 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -10,7 +10,6 @@ menu "Device Tree and Open Firmware support"
config OF_UNITTEST
bool "Device Tree runtime unit tests"
depends on OF_IRQ && OF_EARLY_FLATTREE
- select OF_DYNAMIC
select OF_RESOLVE
help
This option builds in test cases for the device tree infrastructure
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 36536b6a8834..0a8aeb8523fe 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1303,6 +1303,7 @@ int of_property_read_u64_array(const struct device_node *np,
}
return 0;
}
+EXPORT_SYMBOL_GPL(of_property_read_u64_array);
/**
* of_property_read_string - Find and read a string from a property
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 510074226d57..3a896c9aeb74 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -762,7 +762,7 @@ static inline void early_init_dt_check_for_initrd(unsigned long node)
#ifdef CONFIG_SERIAL_EARLYCON
extern struct of_device_id __earlycon_of_table[];
-int __init early_init_dt_scan_chosen_serial(void)
+static int __init early_init_dt_scan_chosen_serial(void)
{
int offset;
const char *p;
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 88471d3d98cd..110fece2ff53 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -140,6 +140,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base)
{
+ struct resource_entry *window;
struct resource *res;
struct resource *bus_range;
struct of_pci_range range;
@@ -225,7 +226,10 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
conversion_failed:
kfree(res);
parse_failed:
+ resource_list_for_each_entry(window, resources)
+ kfree(window->res);
pci_free_resource_list(resources);
+ kfree(bus_range);
return err;
}
EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index dc566b38645f..726ebe792813 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -265,6 +265,7 @@ int of_reserved_mem_device_init(struct device *dev)
return ret;
}
+EXPORT_SYMBOL_GPL(of_reserved_mem_device_init);
/**
* of_reserved_mem_device_release() - release reserved memory device structures
@@ -289,3 +290,4 @@ void of_reserved_mem_device_release(struct device *dev)
rmem->ops->device_release(rmem, dev);
}
+EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index ea63fbd228ed..352b4f28f82c 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
ret = of_overlay_apply_one(ov, tchild, child);
if (ret)
return ret;
-
- /* The properties are already copied, now do the child nodes */
- for_each_child_of_node(child, grandchild) {
- ret = of_overlay_apply_single_device_node(ov, tchild, grandchild);
- if (ret) {
- pr_err("%s: Failed to apply single node @%s/%s\n",
- __func__, tchild->full_name,
- grandchild->name);
- return ret;
- }
- }
}
return ret;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 5b33c6a21807..b0d50d70a8a1 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev)
size = dev->coherent_dma_mask;
} else {
offset = PFN_DOWN(paddr - dma_addr);
- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
+ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
dev->dma_pfn_offset = offset;
@@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb,
if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
return NOTIFY_OK; /* not for us */
+ /* already populated? (driver using of_populate manually) */
+ if (of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(rd->dn->parent);
pdev = of_platform_device_create(rd->dn, NULL,
@@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb,
break;
case OF_RECONFIG_CHANGE_REMOVE:
+
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* find our device by node */
pdev = of_find_device_by_node(rd->dn);
if (pdev == NULL)
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 75976da22b2e..244226cbb5a3 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -68,6 +68,48 @@
status = "disabled";
reg = <8>;
};
+
+ i2c-test-bus {
+ compatible = "selftest-i2c-bus";
+ status = "okay";
+ reg = <50>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest12 {
+ reg = <8>;
+ compatible = "selftest-i2c-dev";
+ status = "disabled";
+ };
+
+ test-selftest13 {
+ reg = <9>;
+ compatible = "selftest-i2c-dev";
+ status = "okay";
+ };
+
+ test-selftest14 {
+ reg = <10>;
+ compatible = "selftest-i2c-mux";
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ test-mux-dev {
+ reg = <32>;
+ compatible = "selftest-i2c-dev";
+ status = "okay";
+ };
+ };
+ };
+ };
};
};
@@ -176,5 +218,112 @@
};
};
+ overlay10 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus";
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest10 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <10>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest101 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ };
+ };
+ };
+ };
+
+ overlay11 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus";
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest11 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <11>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest111 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ };
+ };
+ };
+ };
+
+ /* test enable using absolute target path (i2c) */
+ overlay12 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus/test-selftest12";
+ __overlay__ {
+ status = "okay";
+ };
+ };
+ };
+
+ /* test disable using absolute target path (i2c) */
+ overlay13 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus/test-selftest13";
+ __overlay__ {
+ status = "disabled";
+ };
+ };
+ };
+
+ /* test mux overlay */
+ overlay15 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus";
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ test-selftest15 {
+ reg = <11>;
+ compatible = "selftest-i2c-mux";
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ test-mux-dev {
+ reg = <32>;
+ compatible = "selftest-i2c-dev";
+ status = "okay";
+ };
+ };
+ };
+ };
+ };
+ };
+
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 844838e11ef1..0cf9a236d438 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -20,6 +20,9 @@
#include <linux/platform_device.h>
#include <linux/of_platform.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+
#include "of_private.h"
static struct selftest_results {
@@ -27,11 +30,6 @@ static struct selftest_results {
int failed;
} selftest_results;
-#define NO_OF_NODES 3
-static struct device_node *nodes[NO_OF_NODES];
-static int last_node_index;
-static bool selftest_live_tree;
-
#define selftest(result, fmt, ...) ({ \
bool failed = !(result); \
if (failed) { \
@@ -822,6 +820,7 @@ static void update_node_properties(struct device_node *np,
static int attach_node_and_children(struct device_node *np)
{
struct device_node *next, *dup, *child;
+ unsigned long flags;
dup = of_find_node_by_path(np->full_name);
if (dup) {
@@ -829,17 +828,19 @@ static int attach_node_and_children(struct device_node *np)
return 0;
}
- /* Children of the root need to be remembered for removal */
- if (np->parent == of_root) {
- if (WARN_ON(last_node_index >= NO_OF_NODES))
- return -EINVAL;
- nodes[last_node_index++] = np;
- }
-
child = np->child;
np->child = NULL;
- np->sibling = NULL;
- of_attach_node(np);
+
+ mutex_lock(&of_mutex);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np->sibling = np->parent->child;
+ np->parent->child = np;
+ of_node_clear_flag(np, OF_DETACHED);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+ __of_attach_node_sysfs(np);
+ mutex_unlock(&of_mutex);
+
while (child) {
next = child->sibling;
attach_node_and_children(child);
@@ -889,10 +890,7 @@ static int __init selftest_data_add(void)
}
if (!of_root) {
- /* enabling flag for removing nodes */
- selftest_live_tree = true;
of_root = selftest_data_node;
-
for_each_of_allnodes(np)
__of_attach_node_sysfs(np);
of_aliases = of_find_node_by_path("/aliases");
@@ -911,59 +909,6 @@ static int __init selftest_data_add(void)
return 0;
}
-/**
- * detach_node_and_children - detaches node
- * and its children from live tree
- *
- * @np: Node to detach from live tree
- */
-static void detach_node_and_children(struct device_node *np)
-{
- while (np->child)
- detach_node_and_children(np->child);
- of_detach_node(np);
-}
-
-/**
- * selftest_data_remove - removes the selftest data
- * nodes from the live tree
- */
-static void selftest_data_remove(void)
-{
- struct device_node *np;
- struct property *prop;
-
- if (selftest_live_tree) {
- of_node_put(of_aliases);
- of_node_put(of_chosen);
- of_aliases = NULL;
- of_chosen = NULL;
- for_each_child_of_node(of_root, np)
- detach_node_and_children(np);
- __of_detach_node_sysfs(of_root);
- of_root = NULL;
- return;
- }
-
- while (last_node_index-- > 0) {
- if (nodes[last_node_index]) {
- np = of_find_node_by_path(nodes[last_node_index]->full_name);
- if (np == nodes[last_node_index]) {
- if (of_aliases == np) {
- of_node_put(of_aliases);
- of_aliases = NULL;
- }
- detach_node_and_children(np);
- } else {
- for_each_property_of_node(np, prop) {
- if (strcmp(prop->name, "testcase-alias") == 0)
- of_remove_property(np, prop);
- }
- }
- }
- }
-}
-
#ifdef CONFIG_OF_OVERLAY
static int selftest_probe(struct platform_device *pdev)
@@ -978,6 +923,9 @@ static int selftest_probe(struct platform_device *pdev)
}
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
return 0;
}
@@ -1031,17 +979,94 @@ static int of_path_platform_device_exists(const char *path)
return pdev != NULL;
}
-static const char *selftest_path(int nr)
+#if IS_ENABLED(CONFIG_I2C)
+
+/* get the i2c client device instantiated at the path */
+static struct i2c_client *of_path_to_i2c_client(const char *path)
+{
+ struct device_node *np;
+ struct i2c_client *client;
+
+ np = of_find_node_by_path(path);
+ if (np == NULL)
+ return NULL;
+
+ client = of_find_i2c_device_by_node(np);
+ of_node_put(np);
+
+ return client;
+}
+
+/* find out if a i2c client device exists at that path */
+static int of_path_i2c_client_exists(const char *path)
+{
+ struct i2c_client *client;
+
+ client = of_path_to_i2c_client(path);
+ if (client)
+ put_device(&client->dev);
+ return client != NULL;
+}
+#else
+static int of_path_i2c_client_exists(const char *path)
+{
+ return 0;
+}
+#endif
+
+enum overlay_type {
+ PDEV_OVERLAY,
+ I2C_OVERLAY
+};
+
+static int of_path_device_type_exists(const char *path,
+ enum overlay_type ovtype)
{
+ switch (ovtype) {
+ case PDEV_OVERLAY:
+ return of_path_platform_device_exists(path);
+ case I2C_OVERLAY:
+ return of_path_i2c_client_exists(path);
+ }
+ return 0;
+}
+
+static const char *selftest_path(int nr, enum overlay_type ovtype)
+{
+ const char *base;
static char buf[256];
- snprintf(buf, sizeof(buf) - 1,
- "/testcase-data/overlay-node/test-bus/test-selftest%d", nr);
+ switch (ovtype) {
+ case PDEV_OVERLAY:
+ base = "/testcase-data/overlay-node/test-bus";
+ break;
+ case I2C_OVERLAY:
+ base = "/testcase-data/overlay-node/test-bus/i2c-test-bus";
+ break;
+ default:
+ buf[0] = '\0';
+ return buf;
+ }
+ snprintf(buf, sizeof(buf) - 1, "%s/test-selftest%d", base, nr);
buf[sizeof(buf) - 1] = '\0';
-
return buf;
}
+static int of_selftest_device_exists(int selftest_nr, enum overlay_type ovtype)
+{
+ const char *path;
+
+ path = selftest_path(selftest_nr, ovtype);
+
+ switch (ovtype) {
+ case PDEV_OVERLAY:
+ return of_path_platform_device_exists(path);
+ case I2C_OVERLAY:
+ return of_path_i2c_client_exists(path);
+ }
+ return 0;
+}
+
static const char *overlay_path(int nr)
{
static char buf[256];
@@ -1090,16 +1115,15 @@ out:
/* apply an overlay while checking before and after states */
static int of_selftest_apply_overlay_check(int overlay_nr, int selftest_nr,
- int before, int after)
+ int before, int after, enum overlay_type ovtype)
{
int ret;
/* selftest device must not be in before state */
- if (of_path_platform_device_exists(selftest_path(selftest_nr))
- != before) {
+ if (of_selftest_device_exists(selftest_nr, ovtype) != before) {
selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr),
+ selftest_path(selftest_nr, ovtype),
!before ? "enabled" : "disabled");
return -EINVAL;
}
@@ -1111,11 +1135,10 @@ static int of_selftest_apply_overlay_check(int overlay_nr, int selftest_nr,
}
/* selftest device must be to set to after state */
- if (of_path_platform_device_exists(selftest_path(selftest_nr))
- != after) {
+ if (of_selftest_device_exists(selftest_nr, ovtype) != after) {
selftest(0, "overlay @\"%s\" failed to create @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr),
+ selftest_path(selftest_nr, ovtype),
!after ? "enabled" : "disabled");
return -EINVAL;
}
@@ -1125,16 +1148,16 @@ static int of_selftest_apply_overlay_check(int overlay_nr, int selftest_nr,
/* apply an overlay and then revert it while checking before, after states */
static int of_selftest_apply_revert_overlay_check(int overlay_nr,
- int selftest_nr, int before, int after)
+ int selftest_nr, int before, int after,
+ enum overlay_type ovtype)
{
int ret, ov_id;
/* selftest device must be in before state */
- if (of_path_platform_device_exists(selftest_path(selftest_nr))
- != before) {
+ if (of_selftest_device_exists(selftest_nr, ovtype) != before) {
selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr),
+ selftest_path(selftest_nr, ovtype),
!before ? "enabled" : "disabled");
return -EINVAL;
}
@@ -1147,11 +1170,10 @@ static int of_selftest_apply_revert_overlay_check(int overlay_nr,
}
/* selftest device must be in after state */
- if (of_path_platform_device_exists(selftest_path(selftest_nr))
- != after) {
+ if (of_selftest_device_exists(selftest_nr, ovtype) != after) {
selftest(0, "overlay @\"%s\" failed to create @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr),
+ selftest_path(selftest_nr, ovtype),
!after ? "enabled" : "disabled");
return -EINVAL;
}
@@ -1160,16 +1182,15 @@ static int of_selftest_apply_revert_overlay_check(int overlay_nr,
if (ret != 0) {
selftest(0, "overlay @\"%s\" failed to be destroyed @\"%s\"\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr));
+ selftest_path(selftest_nr, ovtype));
return ret;
}
/* selftest device must be again in before state */
- if (of_path_platform_device_exists(selftest_path(selftest_nr))
- != before) {
+ if (of_selftest_device_exists(selftest_nr, PDEV_OVERLAY) != before) {
selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr),
+ selftest_path(selftest_nr, ovtype),
!before ? "enabled" : "disabled");
return -EINVAL;
}
@@ -1183,7 +1204,7 @@ static void of_selftest_overlay_0(void)
int ret;
/* device should enable */
- ret = of_selftest_apply_overlay_check(0, 0, 0, 1);
+ ret = of_selftest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY);
if (ret != 0)
return;
@@ -1196,7 +1217,7 @@ static void of_selftest_overlay_1(void)
int ret;
/* device should disable */
- ret = of_selftest_apply_overlay_check(1, 1, 1, 0);
+ ret = of_selftest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY);
if (ret != 0)
return;
@@ -1209,7 +1230,7 @@ static void of_selftest_overlay_2(void)
int ret;
/* device should enable */
- ret = of_selftest_apply_overlay_check(2, 2, 0, 1);
+ ret = of_selftest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY);
if (ret != 0)
return;
@@ -1222,7 +1243,7 @@ static void of_selftest_overlay_3(void)
int ret;
/* device should disable */
- ret = of_selftest_apply_overlay_check(3, 3, 1, 0);
+ ret = of_selftest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY);
if (ret != 0)
return;
@@ -1235,7 +1256,7 @@ static void of_selftest_overlay_4(void)
int ret;
/* device should disable */
- ret = of_selftest_apply_overlay_check(4, 4, 0, 1);
+ ret = of_selftest_apply_overlay_check(4, 4, 0, 1, PDEV_OVERLAY);
if (ret != 0)
return;
@@ -1248,7 +1269,7 @@ static void of_selftest_overlay_5(void)
int ret;
/* device should disable */
- ret = of_selftest_apply_revert_overlay_check(5, 5, 0, 1);
+ ret = of_selftest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY);
if (ret != 0)
return;
@@ -1265,12 +1286,12 @@ static void of_selftest_overlay_6(void)
/* selftest device must be in before state */
for (i = 0; i < 2; i++) {
- if (of_path_platform_device_exists(
- selftest_path(selftest_nr + i))
+ if (of_selftest_device_exists(selftest_nr + i, PDEV_OVERLAY)
!= before) {
selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr + i),
+ selftest_path(selftest_nr + i,
+ PDEV_OVERLAY),
!before ? "enabled" : "disabled");
return;
}
@@ -1297,12 +1318,12 @@ static void of_selftest_overlay_6(void)
for (i = 0; i < 2; i++) {
/* selftest device must be in after state */
- if (of_path_platform_device_exists(
- selftest_path(selftest_nr + i))
+ if (of_selftest_device_exists(selftest_nr + i, PDEV_OVERLAY)
!= after) {
selftest(0, "overlay @\"%s\" failed @\"%s\" %s\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr + i),
+ selftest_path(selftest_nr + i,
+ PDEV_OVERLAY),
!after ? "enabled" : "disabled");
return;
}
@@ -1313,19 +1334,20 @@ static void of_selftest_overlay_6(void)
if (ret != 0) {
selftest(0, "overlay @\"%s\" failed destroy @\"%s\"\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr + i));
+ selftest_path(selftest_nr + i,
+ PDEV_OVERLAY));
return;
}
}
for (i = 0; i < 2; i++) {
/* selftest device must be again in before state */
- if (of_path_platform_device_exists(
- selftest_path(selftest_nr + i))
+ if (of_selftest_device_exists(selftest_nr + i, PDEV_OVERLAY)
!= before) {
selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr + i),
+ selftest_path(selftest_nr + i,
+ PDEV_OVERLAY),
!before ? "enabled" : "disabled");
return;
}
@@ -1367,7 +1389,8 @@ static void of_selftest_overlay_8(void)
if (ret == 0) {
selftest(0, "overlay @\"%s\" was destroyed @\"%s\"\n",
overlay_path(overlay_nr + 0),
- selftest_path(selftest_nr));
+ selftest_path(selftest_nr,
+ PDEV_OVERLAY));
return;
}
@@ -1377,7 +1400,8 @@ static void of_selftest_overlay_8(void)
if (ret != 0) {
selftest(0, "overlay @\"%s\" not destroyed @\"%s\"\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr));
+ selftest_path(selftest_nr,
+ PDEV_OVERLAY));
return;
}
}
@@ -1385,6 +1409,360 @@ static void of_selftest_overlay_8(void)
selftest(1, "overlay test %d passed\n", 8);
}
+/* test insertion of a bus with parent devices */
+static void of_selftest_overlay_10(void)
+{
+ int ret;
+ char *child_path;
+
+ /* device should disable */
+ ret = of_selftest_apply_overlay_check(10, 10, 0, 1, PDEV_OVERLAY);
+ if (selftest(ret == 0,
+ "overlay test %d failed; overlay application\n", 10))
+ return;
+
+ child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101",
+ selftest_path(10, PDEV_OVERLAY));
+ if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10))
+ return;
+
+ ret = of_path_device_type_exists(child_path, PDEV_OVERLAY);
+ kfree(child_path);
+ if (selftest(ret, "overlay test %d failed; no child device\n", 10))
+ return;
+}
+
+/* test insertion of a bus with parent devices (and revert) */
+static void of_selftest_overlay_11(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1,
+ PDEV_OVERLAY);
+ if (selftest(ret == 0,
+ "overlay test %d failed; overlay application\n", 11))
+ return;
+}
+
+#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
+
+struct selftest_i2c_bus_data {
+ struct platform_device *pdev;
+ struct i2c_adapter adap;
+};
+
+static int selftest_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct selftest_i2c_bus_data *std = i2c_get_adapdata(adap);
+
+ (void)std;
+
+ return num;
+}
+
+static u32 selftest_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm selftest_i2c_algo = {
+ .master_xfer = selftest_i2c_master_xfer,
+ .functionality = selftest_i2c_functionality,
+};
+
+static int selftest_i2c_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct selftest_i2c_bus_data *std;
+ struct i2c_adapter *adap;
+ int ret;
+
+ if (np == NULL) {
+ dev_err(dev, "No OF data for device\n");
+ return -EINVAL;
+
+ }
+
+ dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+ std = devm_kzalloc(dev, sizeof(*std), GFP_KERNEL);
+ if (!std) {
+ dev_err(dev, "Failed to allocate selftest i2c data\n");
+ return -ENOMEM;
+ }
+
+ /* link them together */
+ std->pdev = pdev;
+ platform_set_drvdata(pdev, std);
+
+ adap = &std->adap;
+ i2c_set_adapdata(adap, std);
+ adap->nr = -1;
+ strlcpy(adap->name, pdev->name, sizeof(adap->name));
+ adap->class = I2C_CLASS_DEPRECATED;
+ adap->algo = &selftest_i2c_algo;
+ adap->dev.parent = dev;
+ adap->dev.of_node = dev->of_node;
+ adap->timeout = 5 * HZ;
+ adap->retries = 3;
+
+ ret = i2c_add_numbered_adapter(adap);
+ if (ret != 0) {
+ dev_err(dev, "Failed to add I2C adapter\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int selftest_i2c_bus_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct selftest_i2c_bus_data *std = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+ i2c_del_adapter(&std->adap);
+
+ return 0;
+}
+
+static struct of_device_id selftest_i2c_bus_match[] = {
+ { .compatible = "selftest-i2c-bus", },
+ {},
+};
+
+static struct platform_driver selftest_i2c_bus_driver = {
+ .probe = selftest_i2c_bus_probe,
+ .remove = selftest_i2c_bus_remove,
+ .driver = {
+ .name = "selftest-i2c-bus",
+ .of_match_table = of_match_ptr(selftest_i2c_bus_match),
+ },
+};
+
+static int selftest_i2c_dev_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = client->dev.of_node;
+
+ if (!np) {
+ dev_err(dev, "No OF node\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+ return 0;
+};
+
+static int selftest_i2c_dev_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = client->dev.of_node;
+
+ dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+ return 0;
+}
+
+static const struct i2c_device_id selftest_i2c_dev_id[] = {
+ { .name = "selftest-i2c-dev" },
+ { }
+};
+
+static struct i2c_driver selftest_i2c_dev_driver = {
+ .driver = {
+ .name = "selftest-i2c-dev",
+ .owner = THIS_MODULE,
+ },
+ .probe = selftest_i2c_dev_probe,
+ .remove = selftest_i2c_dev_remove,
+ .id_table = selftest_i2c_dev_id,
+};
+
+#if IS_ENABLED(CONFIG_I2C_MUX)
+
+struct selftest_i2c_mux_data {
+ int nchans;
+ struct i2c_adapter *adap[];
+};
+
+static int selftest_i2c_mux_select_chan(struct i2c_adapter *adap,
+ void *client, u32 chan)
+{
+ return 0;
+}
+
+static int selftest_i2c_mux_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret, i, nchans, size;
+ struct device *dev = &client->dev;
+ struct i2c_adapter *adap = to_i2c_adapter(dev->parent);
+ struct device_node *np = client->dev.of_node, *child;
+ struct selftest_i2c_mux_data *stm;
+ u32 reg, max_reg;
+
+ dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+ if (!np) {
+ dev_err(dev, "No OF node\n");
+ return -EINVAL;
+ }
+
+ max_reg = (u32)-1;
+ for_each_child_of_node(np, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret)
+ continue;
+ if (max_reg == (u32)-1 || reg > max_reg)
+ max_reg = reg;
+ }
+ nchans = max_reg == (u32)-1 ? 0 : max_reg + 1;
+ if (nchans == 0) {
+ dev_err(dev, "No channels\n");
+ return -EINVAL;
+ }
+
+ size = offsetof(struct selftest_i2c_mux_data, adap[nchans]);
+ stm = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!stm) {
+ dev_err(dev, "Out of memory\n");
+ return -ENOMEM;
+ }
+ stm->nchans = nchans;
+ for (i = 0; i < nchans; i++) {
+ stm->adap[i] = i2c_add_mux_adapter(adap, dev, client,
+ 0, i, 0, selftest_i2c_mux_select_chan, NULL);
+ if (!stm->adap[i]) {
+ dev_err(dev, "Failed to register mux #%d\n", i);
+ for (i--; i >= 0; i--)
+ i2c_del_mux_adapter(stm->adap[i]);
+ return -ENODEV;
+ }
+ }
+
+ i2c_set_clientdata(client, stm);
+
+ return 0;
+};
+
+static int selftest_i2c_mux_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = client->dev.of_node;
+ struct selftest_i2c_mux_data *stm = i2c_get_clientdata(client);
+ int i;
+
+ dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+ for (i = stm->nchans - 1; i >= 0; i--)
+ i2c_del_mux_adapter(stm->adap[i]);
+ return 0;
+}
+
+static const struct i2c_device_id selftest_i2c_mux_id[] = {
+ { .name = "selftest-i2c-mux" },
+ { }
+};
+
+static struct i2c_driver selftest_i2c_mux_driver = {
+ .driver = {
+ .name = "selftest-i2c-mux",
+ .owner = THIS_MODULE,
+ },
+ .probe = selftest_i2c_mux_probe,
+ .remove = selftest_i2c_mux_remove,
+ .id_table = selftest_i2c_mux_id,
+};
+
+#endif
+
+static int of_selftest_overlay_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&selftest_i2c_dev_driver);
+ if (selftest(ret == 0,
+ "could not register selftest i2c device driver\n"))
+ return ret;
+
+ ret = platform_driver_register(&selftest_i2c_bus_driver);
+ if (selftest(ret == 0,
+ "could not register selftest i2c bus driver\n"))
+ return ret;
+
+#if IS_ENABLED(CONFIG_I2C_MUX)
+ ret = i2c_add_driver(&selftest_i2c_mux_driver);
+ if (selftest(ret == 0,
+ "could not register selftest i2c mux driver\n"))
+ return ret;
+#endif
+
+ return 0;
+}
+
+static void of_selftest_overlay_i2c_cleanup(void)
+{
+#if IS_ENABLED(CONFIG_I2C_MUX)
+ i2c_del_driver(&selftest_i2c_mux_driver);
+#endif
+ platform_driver_unregister(&selftest_i2c_bus_driver);
+ i2c_del_driver(&selftest_i2c_dev_driver);
+}
+
+static void of_selftest_overlay_i2c_12(void)
+{
+ int ret;
+
+ /* device should enable */
+ ret = of_selftest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 12);
+}
+
+/* test deactivation of device */
+static void of_selftest_overlay_i2c_13(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 13);
+}
+
+/* just check for i2c mux existence */
+static void of_selftest_overlay_i2c_14(void)
+{
+}
+
+static void of_selftest_overlay_i2c_15(void)
+{
+ int ret;
+
+ /* device should enable */
+ ret = of_selftest_apply_overlay_check(16, 15, 0, 1, I2C_OVERLAY);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 15);
+}
+
+#else
+
+static inline void of_selftest_overlay_i2c_14(void) { }
+static inline void of_selftest_overlay_i2c_15(void) { }
+
+#endif
+
static void __init of_selftest_overlay(void)
{
struct device_node *bus_np = NULL;
@@ -1409,15 +1787,15 @@ static void __init of_selftest_overlay(void)
goto out;
}
- if (!of_path_platform_device_exists(selftest_path(100))) {
+ if (!of_selftest_device_exists(100, PDEV_OVERLAY)) {
selftest(0, "could not find selftest0 @ \"%s\"\n",
- selftest_path(100));
+ selftest_path(100, PDEV_OVERLAY));
goto out;
}
- if (of_path_platform_device_exists(selftest_path(101))) {
+ if (of_selftest_device_exists(101, PDEV_OVERLAY)) {
selftest(0, "selftest1 @ \"%s\" should not exist\n",
- selftest_path(101));
+ selftest_path(101, PDEV_OVERLAY));
goto out;
}
@@ -1433,6 +1811,21 @@ static void __init of_selftest_overlay(void)
of_selftest_overlay_6();
of_selftest_overlay_8();
+ of_selftest_overlay_10();
+ of_selftest_overlay_11();
+
+#if IS_ENABLED(CONFIG_I2C)
+ if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n"))
+ goto out;
+
+ of_selftest_overlay_i2c_12();
+ of_selftest_overlay_i2c_13();
+ of_selftest_overlay_i2c_14();
+ of_selftest_overlay_i2c_15();
+
+ of_selftest_overlay_i2c_cleanup();
+#endif
+
out:
of_node_put(bus_np);
}
@@ -1475,9 +1868,6 @@ static int __init of_selftest(void)
of_selftest_platform_populate();
of_selftest_overlay();
- /* removing selftest data from live tree */
- selftest_data_remove();
-
/* Double check linkage after removing testcase data */
of_selftest_check_tree_linkage();
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 37e71ff6408d..dceb9ddfd99a 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus)
int i;
/* PCI-PCI Bridge */
pci_read_bridge_bases(bus);
- for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
- pci_claim_resource(bus->self, i);
- }
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
+ pci_claim_bridge_resource(bus->self, i);
} else {
/* Host-PCI Bridge */
int err;
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c
index 7ad59ac68cf6..a81cd2a2747f 100644
--- a/drivers/parport/parport_atari.c
+++ b/drivers/parport/parport_atari.c
@@ -192,8 +192,8 @@ static int __init parport_atari_init(void)
&parport_atari_ops);
if (!p)
return -ENODEV;
- if (request_irq(IRQ_MFP_BUSY, parport_irq_handler,
- IRQ_TYPE_SLOW, p->name, p)) {
+ if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 0, p->name,
+ p)) {
parport_put_port (p);
return -ENODEV;
}
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 49dd766852ba..d9b64a175990 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = bus->ops->map_bus(bus, devfn, where);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (size == 1)
+ *val = readb(addr);
+ else if (size == 2)
+ *val = readw(addr);
+ else
+ *val = readl(addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_read);
+
+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *addr;
+
+ addr = bus->ops->map_bus(bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 1)
+ writeb(val, addr);
+ else if (size == 2)
+ writew(val, addr);
+ else
+ writel(val, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_write);
+
+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ *val = readl(addr);
+
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_read32);
+
+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *addr;
+ u32 mask, tmp;
+
+ addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ } else {
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+ }
+
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_write32);
+
/**
* pci_bus_set_ops - Set raw operations of pci bus
* @bus: pci bus struct
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 73aef51a28f0..90fa3a78fb7c 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -20,17 +20,16 @@
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset)
{
- struct pci_host_bridge_window *window;
+ struct resource_entry *entry;
- window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL);
- if (!window) {
+ entry = resource_list_create_entry(res, 0);
+ if (!entry) {
printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
return;
}
- window->res = res;
- window->offset = offset;
- list_add_tail(&window->list, resources);
+ entry->offset = offset;
+ resource_list_add_tail(entry, resources);
}
EXPORT_SYMBOL(pci_add_resource_offset);
@@ -42,12 +41,7 @@ EXPORT_SYMBOL(pci_add_resource);
void pci_free_resource_list(struct list_head *resources)
{
- struct pci_host_bridge_window *window, *tmp;
-
- list_for_each_entry_safe(window, tmp, resources, list) {
- list_del(&window->list);
- kfree(window);
- }
+ resource_list_free(resources);
}
EXPORT_SYMBOL(pci_free_resource_list);
@@ -228,6 +222,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
}
EXPORT_SYMBOL(pci_bus_alloc_resource);
+/*
+ * The @idx resource of @dev should be a PCI-PCI bridge window. If this
+ * resource fits inside a window of an upstream bridge, do nothing. If it
+ * overlaps an upstream window but extends outside it, clip the resource so
+ * it fits completely inside.
+ */
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
+{
+ struct pci_bus *bus = dev->bus;
+ struct resource *res = &dev->resource[idx];
+ struct resource orig_res = *res;
+ struct resource *r;
+ int i;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ resource_size_t start, end;
+
+ if (!r)
+ continue;
+
+ if (resource_type(res) != resource_type(r))
+ continue;
+
+ start = max(r->start, res->start);
+ end = min(r->end, res->end);
+
+ if (start > end)
+ continue; /* no overlap */
+
+ if (res->start == start && res->end == end)
+ return false; /* no change */
+
+ res->start = start;
+ res->end = end;
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+ &orig_res, res);
+
+ return true;
+ }
+
+ return false;
+}
+
void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
/**
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 0e5f3c95af5b..39b2dbe585aa 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -35,10 +35,10 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
struct resource *res)
{
struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
- struct pci_host_bridge_window *window;
+ struct resource_entry *window;
resource_size_t offset = 0;
- list_for_each_entry(window, &bridge->windows, list) {
+ resource_list_for_each_entry(window, &bridge->windows) {
if (resource_contains(window->res, res)) {
offset = window->offset;
break;
@@ -60,10 +60,10 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
struct pci_bus_region *region)
{
struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
- struct pci_host_bridge_window *window;
+ struct resource_entry *window;
resource_size_t offset = 0;
- list_for_each_entry(window, &bridge->windows, list) {
+ resource_list_for_each_entry(window, &bridge->windows) {
struct pci_bus_region bus_region;
if (resource_type(res) != resource_type(window->res))
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index c4b6568e486d..7b892a9cc4fc 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -102,4 +102,8 @@ config PCI_LAYERSCAPE
help
Say Y here if you want PCIe controller support on Layerscape SoCs.
+config PCI_VERSATILE
+ bool "ARM Versatile PB PCI controller"
+ depends on ARCH_VERSATILE
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 44c26998027f..e61d91c92bf1 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 6eb1aa75bd37..ba46e581db99 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -76,55 +76,9 @@ static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
.map_bus = gen_pci_map_cfg_bus_ecam,
};
-static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *val)
-{
- void __iomem *addr;
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
-
- addr = pci->cfg.ops->map_bus(bus, devfn, where);
-
- switch (size) {
- case 1:
- *val = readb(addr);
- break;
- case 2:
- *val = readw(addr);
- break;
- default:
- *val = readl(addr);
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 val)
-{
- void __iomem *addr;
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
-
- addr = pci->cfg.ops->map_bus(bus, devfn, where);
-
- switch (size) {
- case 1:
- writeb(val, addr);
- break;
- case 2:
- writew(val, addr);
- break;
- default:
- writel(val, addr);
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
static struct pci_ops gen_pci_ops = {
- .read = gen_pci_config_read,
- .write = gen_pci_config_write,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
static const struct of_device_id gen_pci_of_match[] = {
@@ -149,14 +103,14 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
struct device *dev = pci->host.dev.parent;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct pci_host_bridge_window *win;
+ struct resource_entry *win;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
&iobase);
if (err)
return err;
- list_for_each_entry(win, &pci->resources, list) {
+ resource_list_for_each_entry(win, &pci->resources) {
struct resource *parent, *res = win->res;
switch (resource_type(res)) {
@@ -287,6 +241,7 @@ static int gen_pci_probe(struct platform_device *pdev)
of_id = of_match_node(gen_pci_of_match, np);
pci->cfg.ops = of_id->data;
+ gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
pci->host.dev.parent = dev;
INIT_LIST_HEAD(&pci->host.windows);
INIT_LIST_HEAD(&pci->resources);
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 78f79e31ac5c..75333b0c4f0a 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -119,7 +119,7 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
struct pcie_port *pp = &ks_pcie->pp;
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
+ dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
/*
* The chained irq handler installation would have replaced normal
@@ -197,7 +197,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
*/
for (temp = 0; temp < max_host_irqs; temp++) {
host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
- if (host_irqs[temp] < 0)
+ if (!host_irqs[temp])
break;
}
if (temp) {
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 6697b1a4d4fa..68c9e5e9b0a8 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -167,7 +167,6 @@ MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
static struct platform_driver ls_pcie_driver = {
.driver = {
.name = "layerscape-pcie",
- .owner = THIS_MODULE,
.of_match_table = ls_pcie_of_match,
},
};
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 1dd759596b0a..1309cfbaa719 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -101,9 +101,7 @@ struct mvebu_pcie {
struct mvebu_pcie_port *ports;
struct msi_controller *msi;
struct resource io;
- char io_name[30];
struct resource realio;
- char mem_name[30];
struct resource mem;
struct resource busn;
int nports;
@@ -723,18 +721,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
int i;
- int domain = 0;
-#ifdef CONFIG_PCI_DOMAINS
- domain = sys->domain;
-#endif
-
- snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x",
- domain);
- pcie->mem.name = pcie->mem_name;
-
- snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain);
- pcie->realio.name = pcie->io_name;
+ pcie->mem.name = "PCI MEM";
+ pcie->realio.name = "PCI I/O";
if (request_resource(&iomem_resource, &pcie->mem))
return 0;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index d9c042febb1a..dd6b84e6206c 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -131,52 +131,6 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
return priv->reg + (slot >> 1) * 0x100 + where;
}
-static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *val)
-{
- void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
-
- if (!reg)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- switch (size) {
- case 1:
- *val = ioread8(reg);
- break;
- case 2:
- *val = ioread16(reg);
- break;
- default:
- *val = ioread32(reg);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 val)
-{
- void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
-
- if (!reg)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- switch (size) {
- case 1:
- iowrite8(val, reg);
- break;
- case 2:
- iowrite16(val, reg);
- break;
- default:
- iowrite32(val, reg);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
/* PCI interrupt mapping */
static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
@@ -325,8 +279,9 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
}
static struct pci_ops rcar_pci_ops = {
- .read = rcar_pci_read_config,
- .write = rcar_pci_write_config,
+ .map_bus = rcar_pci_cfg_base,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
static int rcar_pci_probe(struct platform_device *pdev)
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index a800ae916394..00e92720d7f7 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -480,59 +480,10 @@ static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
return addr;
}
-static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *value)
-{
- void __iomem *addr;
-
- addr = tegra_pcie_conf_address(bus, devfn, where);
- if (!addr) {
- *value = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- *value = readl(addr);
-
- if (size == 1)
- *value = (*value >> (8 * (where & 3))) & 0xff;
- else if (size == 2)
- *value = (*value >> (8 * (where & 3))) & 0xffff;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
-{
- void __iomem *addr;
- u32 mask, tmp;
-
- addr = tegra_pcie_conf_address(bus, devfn, where);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (size == 4) {
- writel(value, addr);
- return PCIBIOS_SUCCESSFUL;
- }
-
- if (size == 2)
- mask = ~(0xffff << ((where & 0x3) * 8));
- else if (size == 1)
- mask = ~(0xff << ((where & 0x3) * 8));
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- tmp = readl(addr) & mask;
- tmp |= value << ((where & 0x3) * 8);
- writel(tmp, addr);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
static struct pci_ops tegra_pcie_ops = {
- .read = tegra_pcie_read_conf,
- .write = tegra_pcie_write_conf,
+ .map_bus = tegra_pcie_conf_address,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
};
static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
@@ -625,19 +576,6 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port)
devm_kfree(pcie->dev, port);
}
-static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
-{
- u16 reg;
-
- if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
- pci_read_config_word(dev, PCI_COMMAND, &reg);
- reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
- pci_write_config_word(dev, PCI_COMMAND, reg);
- }
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
-
/* Tegra PCIE root complex wrongly reports device class */
static void tegra_pcie_fixup_class(struct pci_dev *dev)
{
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
new file mode 100644
index 000000000000..1ec694a52379
--- /dev/null
+++ b/drivers/pci/host/pci-versatile.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2004 Koninklijke Philips Electronics NV
+ *
+ * Conversion to platform driver and DT:
+ * Copyright 2014 Linaro Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * 14/04/2005 Initial version, colin.king@philips.com
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+static void __iomem *versatile_pci_base;
+static void __iomem *versatile_cfg_base[2];
+
+#define PCI_IMAP(m) (versatile_pci_base + ((m) * 4))
+#define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4))
+#define PCI_SELFID (versatile_pci_base + 0xc)
+
+#define VP_PCI_DEVICE_ID 0x030010ee
+#define VP_PCI_CLASS_ID 0x0b400000
+
+static u32 pci_slot_ignore;
+
+static int __init versatile_pci_slot_ignore(char *str)
+{
+ int retval;
+ int slot;
+
+ while ((retval = get_option(&str, &slot))) {
+ if ((slot < 0) || (slot > 31))
+ pr_err("Illegal slot value: %d\n", slot);
+ else
+ pci_slot_ignore |= (1 << slot);
+ }
+ return 1;
+}
+__setup("pci_slot_ignore=", versatile_pci_slot_ignore);
+
+
+static void __iomem *versatile_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int offset)
+{
+ unsigned int busnr = bus->number;
+
+ if (pci_slot_ignore & (1 << PCI_SLOT(devfn)))
+ return NULL;
+
+ return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset);
+}
+
+static struct pci_ops pci_versatile_ops = {
+ .map_bus = versatile_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write,
+};
+
+static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
+ struct list_head *res)
+{
+ int err, mem = 1, res_valid = 0;
+ struct device_node *np = dev->of_node;
+ resource_size_t iobase;
+ struct resource_entry *win;
+
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
+ if (err)
+ return err;
+
+ resource_list_for_each_entry(win, res, list) {
+ struct resource *parent, *res = win->res;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ parent = &ioport_resource;
+ err = pci_remap_iospace(res, iobase);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+
+ writel(res->start >> 28, PCI_IMAP(mem));
+ writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
+ mem++;
+
+ break;
+ case IORESOURCE_BUS:
+ default:
+ continue;
+ }
+
+ err = devm_request_resource(dev, parent, res);
+ if (err)
+ goto out_release_res;
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ pci_free_resource_list(res);
+ return err;
+}
+
+/* Unused, temporary to satisfy ARM arch code */
+struct pci_sys_data sys;
+
+static int versatile_pci_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret, i, myslot = -1;
+ u32 val;
+ void __iomem *local_pci_cfg_base;
+ struct pci_bus *bus;
+ LIST_HEAD(pci_res);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ versatile_pci_base = devm_ioremap_resource(&pdev->dev, res);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return -ENODEV;
+ versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res)
+ return -ENODEV;
+ versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res);
+
+ ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res);
+ if (ret)
+ return ret;
+
+ /*
+ * We need to discover the PCI core first to configure itself
+ * before the main PCI probing is performed
+ */
+ for (i = 0; i < 32; i++) {
+ if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) &&
+ (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) {
+ myslot = i;
+ break;
+ }
+ }
+ if (myslot == -1) {
+ dev_err(&pdev->dev, "Cannot find PCI core!\n");
+ return -EIO;
+ }
+ /*
+ * Do not to map Versatile FPGA PCI device into memory space
+ */
+ pci_slot_ignore |= (1 << myslot);
+
+ dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot);
+
+ writel(myslot, PCI_SELFID);
+ local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
+
+ val = readl(local_pci_cfg_base + PCI_COMMAND);
+ val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
+ writel(val, local_pci_cfg_base + PCI_COMMAND);
+
+ /*
+ * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
+ */
+ writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+ writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+ writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+
+ /*
+ * For many years the kernel and QEMU were symbiotically buggy
+ * in that they both assumed the same broken IRQ mapping.
+ * QEMU therefore attempts to auto-detect old broken kernels
+ * so that they still work on newer QEMU as they did on old
+ * QEMU. Since we now use the correct (ie matching-hardware)
+ * IRQ mapping we write a definitely different value to a
+ * PCI_INTERRUPT_LINE register to tell QEMU that we expect
+ * real hardware behaviour and it need not be backwards
+ * compatible for us. This write is harmless on real hardware.
+ */
+ writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE);
+
+ pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
+
+ bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, &sys, &pci_res);
+ if (!bus)
+ return -ENOMEM;
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ pci_assign_unassigned_bus_resources(bus);
+
+ return 0;
+}
+
+static const struct of_device_id versatile_pci_of_match[] = {
+ { .compatible = "arm,versatile-pci", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, versatile_pci_of_match);
+
+static struct platform_driver versatile_pci_driver = {
+ .driver = {
+ .name = "versatile-pci",
+ .of_match_table = versatile_pci_of_match,
+ },
+ .probe = versatile_pci_probe,
+};
+module_platform_driver(versatile_pci_driver);
+
+MODULE_DESCRIPTION("Versatile PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index b1d0596457c5..aab55474dd0d 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -16,7 +16,7 @@
* GNU General Public License for more details.
*
*/
-#include <linux/clk-private.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
@@ -74,92 +74,6 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
}
-/* PCIe Configuration Out/In */
-static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val)
-{
- writel(val, addr + offset);
-}
-
-static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val)
-{
- u32 val32 = readl(addr + (offset & ~0x3));
-
- switch (offset & 0x3) {
- case 2:
- val32 &= ~0xFFFF0000;
- val32 |= (u32)val << 16;
- break;
- case 0:
- default:
- val32 &= ~0xFFFF;
- val32 |= val;
- break;
- }
- writel(val32, addr + (offset & ~0x3));
-}
-
-static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val)
-{
- u32 val32 = readl(addr + (offset & ~0x3));
-
- switch (offset & 0x3) {
- case 0:
- val32 &= ~0xFF;
- val32 |= val;
- break;
- case 1:
- val32 &= ~0xFF00;
- val32 |= (u32)val << 8;
- break;
- case 2:
- val32 &= ~0xFF0000;
- val32 |= (u32)val << 16;
- break;
- case 3:
- default:
- val32 &= ~0xFF000000;
- val32 |= (u32)val << 24;
- break;
- }
- writel(val32, addr + (offset & ~0x3));
-}
-
-static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val)
-{
- *val = readl(addr + offset);
-}
-
-static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val)
-{
- *val = readl(addr + (offset & ~0x3));
-
- switch (offset & 0x3) {
- case 2:
- *val >>= 16;
- break;
- }
-
- *val &= 0xFFFF;
-}
-
-static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val)
-{
- *val = readl(addr + (offset & ~0x3));
-
- switch (offset & 0x3) {
- case 3:
- *val = *val >> 24;
- break;
- case 2:
- *val = *val >> 16;
- break;
- case 1:
- *val = *val >> 8;
- break;
- }
- *val &= 0xFF;
-}
-
/*
* When the address bit [17:16] is 2'b01, the Configuration access will be
* treated as Type 1 and it will be forwarded to external PCIe device.
@@ -213,69 +127,23 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
return false;
}
-static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 *val)
-{
- struct xgene_pcie_port *port = bus->sysdata;
- void __iomem *addr;
-
- if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (xgene_pcie_hide_rc_bars(bus, offset)) {
- *val = 0;
- return PCIBIOS_SUCCESSFUL;
- }
-
- xgene_pcie_set_rtdid_reg(bus, devfn);
- addr = xgene_pcie_get_cfg_base(bus);
- switch (len) {
- case 1:
- xgene_pcie_cfg_in8(addr, offset, val);
- break;
- case 2:
- xgene_pcie_cfg_in16(addr, offset, val);
- break;
- default:
- xgene_pcie_cfg_in32(addr, offset, val);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 val)
+static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int offset)
{
struct xgene_pcie_port *port = bus->sysdata;
- void __iomem *addr;
- if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (xgene_pcie_hide_rc_bars(bus, offset))
- return PCIBIOS_SUCCESSFUL;
+ if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up ||
+ xgene_pcie_hide_rc_bars(bus, offset))
+ return NULL;
xgene_pcie_set_rtdid_reg(bus, devfn);
- addr = xgene_pcie_get_cfg_base(bus);
- switch (len) {
- case 1:
- xgene_pcie_cfg_out8(addr, offset, (u8)val);
- break;
- case 2:
- xgene_pcie_cfg_out16(addr, offset, (u16)val);
- break;
- default:
- xgene_pcie_cfg_out32(addr, offset, val);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
+ return xgene_pcie_get_cfg_base(bus);
}
static struct pci_ops xgene_pcie_ops = {
- .read = xgene_pcie_read_config,
- .write = xgene_pcie_write_config
+ .map_bus = xgene_pcie_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
};
static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
@@ -401,11 +269,11 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
{
- struct pci_host_bridge_window *window;
+ struct resource_entry *window;
struct device *dev = port->dev;
int ret;
- list_for_each_entry(window, res, list) {
+ resource_list_for_each_entry(window, res) {
struct resource *res = window->res;
u64 restype = resource_type(res);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index df781cdf13c1..1f4ea6f2d910 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
struct msi_msg msg;
struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+ if (desc->msi_attrib.is_msix)
+ return -EINVAL;
+
irq = assign_irq(1, desc, &pos);
if (irq < 0)
return irq;
@@ -508,9 +511,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
dw_pci.private_data = (void **)&pp;
pci_common_init_dev(pp->dev, &dw_pci);
-#ifdef CONFIG_PCI_DOMAINS
- dw_pci.domain++;
-#endif
return 0;
}
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 748786c402fc..c57bd0ac39a0 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -397,9 +397,6 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie)
#endif
pci_common_init_dev(&pdev->dev, &rcar_pci);
-#ifdef CONFIG_PCI_DOMAINS
- rcar_pci.domain++;
-#endif
}
static int phy_wait_for_ack(struct rcar_pcie *pcie)
@@ -757,7 +754,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
goto err_map_reg;
i = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (i < 0) {
+ if (!i) {
dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
@@ -765,7 +762,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
pcie->msi.irq1 = i;
i = irq_of_parse_and_map(pdev->dev.of_node, 1);
- if (i < 0) {
+ if (!i) {
dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index ef3ebaf9a738..f1a06a091ccb 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -148,10 +148,10 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
{
- u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+ unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
- dev_dbg(port->dev, "Requester ID %d\n",
+ dev_dbg(port->dev, "Requester ID %lu\n",
val & XILINX_PCIE_RPEFR_REQ_ID);
pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
XILINX_PCIE_REG_RPEFR);
@@ -189,7 +189,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
}
/**
- * xilinx_pcie_config_base - Get configuration base
+ * xilinx_pcie_map_bus - Get configuration base
* @bus: PCI Bus structure
* @devfn: Device/function
* @where: Offset from base
@@ -197,96 +197,26 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
* Return: Base address of the configuration space needed to be
* accessed.
*/
-static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
- unsigned int devfn, int where)
+static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
int relbus;
+ if (!xilinx_pcie_valid_device(bus, devfn))
+ return NULL;
+
relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
(devfn << ECAM_DEV_NUM_SHIFT);
return port->reg_base + relbus + where;
}
-/**
- * xilinx_pcie_read_config - Read configuration space
- * @bus: PCI Bus structure
- * @devfn: Device/function
- * @where: Offset from base
- * @size: Byte/word/dword
- * @val: Value to be read
- *
- * Return: PCIBIOS_SUCCESSFUL on success
- * PCIBIOS_DEVICE_NOT_FOUND on failure
- */
-static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *val)
-{
- void __iomem *addr;
-
- if (!xilinx_pcie_valid_device(bus, devfn)) {
- *val = 0xFFFFFFFF;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- addr = xilinx_pcie_config_base(bus, devfn, where);
-
- switch (size) {
- case 1:
- *val = readb(addr);
- break;
- case 2:
- *val = readw(addr);
- break;
- default:
- *val = readl(addr);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-/**
- * xilinx_pcie_write_config - Write configuration space
- * @bus: PCI Bus structure
- * @devfn: Device/function
- * @where: Offset from base
- * @size: Byte/word/dword
- * @val: Value to be written to device
- *
- * Return: PCIBIOS_SUCCESSFUL on success
- * PCIBIOS_DEVICE_NOT_FOUND on failure
- */
-static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 val)
-{
- void __iomem *addr;
-
- if (!xilinx_pcie_valid_device(bus, devfn))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- addr = xilinx_pcie_config_base(bus, devfn, where);
-
- switch (size) {
- case 1:
- writeb(val, addr);
- break;
- case 2:
- writew(val, addr);
- break;
- default:
- writel(val, addr);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
/* PCIe operations */
static struct pci_ops xilinx_pcie_ops = {
- .read = xilinx_pcie_read_config,
- .write = xilinx_pcie_write_config,
+ .map_bus = xilinx_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
/* MSI functions */
@@ -737,7 +667,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
resource_size_t offset;
struct of_pci_range_parser parser;
struct of_pci_range range;
- struct pci_host_bridge_window *win;
+ struct resource_entry *win;
int err = 0, mem_resno = 0;
/* Get the ranges */
@@ -807,7 +737,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
free_resources:
release_child_resources(&iomem_resource);
- list_for_each_entry(win, &port->resources, list)
+ resource_list_for_each_entry(win, &port->resources)
devm_kfree(dev, win->res);
pci_free_resource_list(&port->resources);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index a5a7fd8332ac..46db29395a62 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -214,8 +214,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
- if (slot->dev)
- pci_dev_put(slot->dev);
+ pci_dev_put(slot->dev);
kfree(slot);
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index ff32e85e1de6..f052e951b23e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -532,8 +532,6 @@ static void interrupt_event_handler(struct work_struct *work)
pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
- if (!HP_SUPR_RM(ctrl))
- break;
ctrl_dbg(ctrl, "Surprise Insertion\n");
handle_surprise_event(p_slot);
break;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index bada20999870..c32fb786d48e 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -475,7 +475,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
struct slot *slot = bss_hotplug_slot->private;
struct pci_dev *dev, *temp;
int rc;
- acpi_owner_id ssdt_id = 0;
+ acpi_handle ssdt_hdl = NULL;
/* Acquire update access to the bus */
mutex_lock(&sn_hotplug_mutex);
@@ -522,7 +522,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
if (ACPI_SUCCESS(ret) &&
(adr>>16) == (slot->device_num + 1)) {
/* retain the owner id */
- acpi_get_id(chandle, &ssdt_id);
+ ssdt_hdl = chandle;
ret = acpi_bus_get_device(chandle,
&device);
@@ -547,12 +547,13 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
pci_unlock_rescan_remove();
/* Remove the SSDT for the slot from the ACPI namespace */
- if (SN_ACPI_BASE_SUPPORT() && ssdt_id) {
+ if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) {
acpi_status ret;
- ret = acpi_unload_table_id(ssdt_id);
+ ret = acpi_unload_parent_table(ssdt_hdl);
if (ACPI_FAILURE(ret)) {
- printk(KERN_ERR "%s: acpi_unload_table_id failed (0x%x) for id %d\n",
- __func__, ret, ssdt_id);
+ acpi_handle_err(ssdt_hdl,
+ "%s: acpi_unload_parent_table failed (0x%x)\n",
+ __func__, ret);
/* try to continue on */
}
}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index fd60806d3fd0..c3e7dfcf9ff5 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -694,11 +694,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
{
resource_size_t phys_addr;
u32 table_offset;
+ unsigned long flags;
u8 bir;
pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
&table_offset);
bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
+ flags = pci_resource_flags(dev, bir);
+ if (!flags || (flags & IORESOURCE_UNSET))
+ return NULL;
+
table_offset &= PCI_MSIX_TABLE_OFFSET;
phys_addr = pci_resource_start(dev, bir) + table_offset;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3542150fc8a3..489063987325 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -501,12 +501,29 @@ static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
return 0;
}
+static bool acpi_pci_need_resume(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+
+ if (!adev || !acpi_device_power_manageable(adev))
+ return false;
+
+ if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
+ return true;
+
+ if (acpi_target_system_state() == ACPI_STATE_S0)
+ return false;
+
+ return !!adev->power.flags.dsw_present;
+}
+
static struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.choose_state = acpi_pci_choose_state,
.sleep_wake = acpi_pci_sleep_wake,
.run_wake = acpi_pci_run_wake,
+ .need_resume = acpi_pci_need_resume,
};
void acpi_pci_add_bus(struct pci_bus *bus)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 887e6bd95af7..3cb2210de553 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -653,7 +653,6 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
static int pci_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
- int error = 0;
/*
* Devices having power.ignore_children set may still be necessary for
@@ -662,10 +661,12 @@ static int pci_pm_prepare(struct device *dev)
if (dev->power.ignore_children)
pm_runtime_resume(dev);
- if (drv && drv->pm && drv->pm->prepare)
- error = drv->pm->prepare(dev);
-
- return error;
+ if (drv && drv->pm && drv->pm->prepare) {
+ int error = drv->pm->prepare(dev);
+ if (error)
+ return error;
+ }
+ return pci_dev_keep_suspended(to_pci_dev(dev));
}
@@ -1383,7 +1384,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
return -ENOMEM;
- if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
+ if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device,
(u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cab05f31223f..81f06e8dcc04 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -10,6 +10,8 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -521,6 +523,11 @@ static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
pci_platform_pm->run_wake(dev, enable) : -ENODEV;
}
+static inline bool platform_pci_need_resume(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
+}
+
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
* given PCI device
@@ -1999,6 +2006,27 @@ bool pci_dev_run_wake(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
+/**
+ * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
+ * @pci_dev: Device to check.
+ *
+ * Return 'true' if the device is runtime-suspended, it doesn't have to be
+ * reconfigured due to wakeup settings difference between system and runtime
+ * suspend and the current power state of it is suitable for the upcoming
+ * (system) transition.
+ */
+bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
+{
+ struct device *dev = &pci_dev->dev;
+
+ if (!pm_runtime_suspended(dev)
+ || (device_can_wakeup(dev) && !device_may_wakeup(dev))
+ || platform_pci_need_resume(pci_dev))
+ return false;
+
+ return pci_target_state(pci_dev) == pci_dev->current_state;
+}
+
void pci_config_pm_runtime_get(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
@@ -3197,7 +3225,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
{
u16 csr;
- if (!dev->pm_cap)
+ if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
return -ENOTTY;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
@@ -3271,7 +3299,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
struct pci_dev *pdev;
- if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
+ if (pci_is_root_bus(dev->bus) || dev->subordinate ||
+ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3305,7 +3334,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
{
struct pci_dev *pdev;
- if (dev->subordinate || !dev->slot)
+ if (dev->subordinate || !dev->slot ||
+ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3557,6 +3587,20 @@ int pci_try_reset_function(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_try_reset_function);
+/* Do any devices on or below this bus prevent a bus reset? */
+static bool pci_bus_resetable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
/* Lock devices from the top of the tree down */
static void pci_bus_lock(struct pci_bus *bus)
{
@@ -3607,6 +3651,22 @@ unlock:
return 0;
}
+/* Do any devices on or below this slot prevent a bus reset? */
+static bool pci_slot_resetable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
/* Lock devices from the top of the tree down */
static void pci_slot_lock(struct pci_slot *slot)
{
@@ -3728,7 +3788,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
{
int rc;
- if (!slot)
+ if (!slot || !pci_slot_resetable(slot))
return -ENOTTY;
if (!probe)
@@ -3820,7 +3880,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
- if (!bus->self)
+ if (!bus->self || !pci_bus_resetable(bus))
return -ENOTTY;
if (probe)
@@ -4439,6 +4499,53 @@ int pci_get_new_domain_nr(void)
{
return atomic_inc_return(&__domain_nr);
}
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+{
+ static int use_dt_domains = -1;
+ int domain = of_get_pci_domain_nr(parent->of_node);
+
+ /*
+ * Check DT domain and use_dt_domains values.
+ *
+ * If DT domain property is valid (domain >= 0) and
+ * use_dt_domains != 0, the DT assignment is valid since this means
+ * we have not previously allocated a domain number by using
+ * pci_get_new_domain_nr(); we should also update use_dt_domains to
+ * 1, to indicate that we have just assigned a domain number from
+ * DT.
+ *
+ * If DT domain property value is not valid (ie domain < 0), and we
+ * have not previously assigned a domain number from DT
+ * (use_dt_domains != 1) we should assign a domain number by
+ * using the:
+ *
+ * pci_get_new_domain_nr()
+ *
+ * API and update the use_dt_domains value to keep track of method we
+ * are using to assign domain numbers (use_dt_domains = 0).
+ *
+ * All other combinations imply we have a platform that is trying
+ * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
+ * which is a recipe for domain mishandling and it is prevented by
+ * invalidating the domain value (domain = -1) and printing a
+ * corresponding error.
+ */
+ if (domain >= 0 && use_dt_domains) {
+ use_dt_domains = 1;
+ } else if (domain < 0 && use_dt_domains != 1) {
+ use_dt_domains = 0;
+ domain = pci_get_new_domain_nr();
+ } else {
+ dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
+ parent->of_node->full_name);
+ domain = -1;
+ }
+
+ bus->domain_nr = domain;
+}
+#endif
#endif
/**
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 8aff29a804ff..4091f82239cd 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -50,6 +50,10 @@ int pci_probe_reset_function(struct pci_dev *dev);
* for given device (the device's wake-up capability has to be
* enabled by @sleep_wake for this feature to work)
*
+ * @need_resume: returns 'true' if the given device (which is currently
+ * suspended) needs to be resumed to be configured for system
+ * wakeup.
+ *
* If given platform is generally capable of power managing PCI devices, all of
* these callbacks are mandatory.
*/
@@ -59,6 +63,7 @@ struct pci_platform_pm_ops {
pci_power_t (*choose_state)(struct pci_dev *dev);
int (*sleep_wake)(struct pci_dev *dev, bool enable);
int (*run_wake)(struct pci_dev *dev, bool enable);
+ bool (*need_resume)(struct pci_dev *dev);
};
int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
@@ -67,6 +72,7 @@ void pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+bool pci_dev_keep_suspended(struct pci_dev *dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
void pci_pm_init(struct pci_dev *dev);
@@ -208,6 +214,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus,
void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head);
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
/**
* pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e1e7026b838d..820740a22e94 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -859,7 +859,10 @@ static ssize_t link_state_store(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link, *root = pdev->link_state->root;
- u32 val = buf[0] - '0', state = 0;
+ u32 val, state = 0;
+
+ if (kstrtouint(buf, 10, &val))
+ return -EINVAL;
if (aspm_disabled)
return -EPERM;
@@ -900,15 +903,14 @@ static ssize_t clk_ctl_store(struct device *dev,
size_t n)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int state;
+ bool state;
- if (n < 1)
+ if (strtobool(buf, &state))
return -EINVAL;
- state = buf[0]-'0';
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clkpm_nocheck(pdev->link_state, !!state);
+ pcie_set_clkpm_nocheck(pdev->link_state, state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 23212f8ae09b..8d2f400e96cb 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1895,7 +1895,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
int error;
struct pci_host_bridge *bridge;
struct pci_bus *b, *b2;
- struct pci_host_bridge_window *window, *n;
+ struct resource_entry *window, *n;
struct resource *res;
resource_size_t offset;
char bus_addr[64];
@@ -1959,8 +1959,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
/* Add initial resources to the bus */
- list_for_each_entry_safe(window, n, resources, list) {
- list_move_tail(&window->list, &bridge->windows);
+ resource_list_for_each_entry_safe(window, n, resources) {
+ list_move_tail(&window->node, &bridge->windows);
res = window->res;
offset = window->offset;
if (res->flags & IORESOURCE_BUS)
@@ -2060,12 +2060,12 @@ void pci_bus_release_busn_res(struct pci_bus *b)
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
- struct pci_host_bridge_window *window;
+ struct resource_entry *window;
bool found = false;
struct pci_bus *b;
int max;
- list_for_each_entry(window, resources, list)
+ resource_list_for_each_entry(window, resources)
if (window->res->flags & IORESOURCE_BUS) {
found = true;
break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ed6f89b6efe5..85f247e28a80 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
+static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
+ const char *name)
+{
+ u32 region;
+ struct pci_bus_region bus_region;
+ struct resource *res = dev->resource + pos;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
+
+ if (!region)
+ return;
+
+ res->name = pci_name(dev);
+ res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
+ res->flags |=
+ (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
+ region &= ~(size - 1);
+
+ /* Convert from PCI bus to resource space */
+ bus_region.start = region;
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+ dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
+ name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+}
+
/*
* Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
* ver. 1.33 20070103) don't set the correct ISA PCI region header info.
* BAR0 should be 8 bytes; instead, it may be set to something like 8k
* (which conflicts w/ BAR1's memory range).
+ *
+ * CS553x's ISA PCI BARs may also be read-only (ref:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
*/
static void quirk_cs5536_vsa(struct pci_dev *dev)
{
+ static char *name = "CS5536 ISA bridge";
+
if (pci_resource_len(dev, 0) != 8) {
- struct resource *res = &dev->resource[0];
- res->end = res->start + 8 - 1;
- dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n");
+ quirk_io(dev, 0, 8, name); /* SMB */
+ quirk_io(dev, 1, 256, name); /* GPIO */
+ quirk_io(dev, 2, 64, name); /* MFGPT */
+ dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
+ name);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
@@ -3028,6 +3062,41 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
quirk_broken_intx_masking);
+static void quirk_no_bus_reset(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+}
+
+/*
+ * Atheros AR93xx chips do not behave after a bus reset. The device will
+ * throw a Link Down error on AER-capable systems and regardless of AER,
+ * config space of the device is never accessible again and typically
+ * causes the system to hang or reset when access is attempted.
+ * http://www.spinics.net/lists/linux-pci/msg34797.html
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+
+static void quirk_no_pm_reset(struct pci_dev *dev)
+{
+ /*
+ * We can't do a bus reset on root bus devices, but an ineffective
+ * PM reset may be better than nothing.
+ */
+ if (!pci_is_root_bus(dev->bus))
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
+}
+
+/*
+ * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition
+ * causes a reset (i.e., they advertise NoSoftRst-). This transition seems
+ * to have no effect on the device: it retains the framebuffer contents and
+ * monitor sync. Advertising this support makes other layers, like VFIO,
+ * assume pci_reset_function() is viable for this device. Mark it as
+ * unavailable to skip it when testing reset methods.
+ */
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
@@ -3528,6 +3597,44 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
quirk_dma_func1_alias);
/*
+ * Some devices DMA with the wrong devfn, not just the wrong function.
+ * quirk_fixed_dma_alias() uses this table to create fixed aliases, where
+ * the alias is "fixed" and independent of the device devfn.
+ *
+ * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O
+ * processor. To software, this appears as a PCIe-to-PCI/X bridge with a
+ * single device on the secondary bus. In reality, the single exposed
+ * device at 0e.0 is the Address Translation Unit (ATU) of the controller
+ * that provides a bridge to the internal bus of the I/O processor. The
+ * controller supports private devices, which can be hidden from PCI config
+ * space. In the case of the Adaptec 3405, a private device at 01.0
+ * appears to be the DMA engine, which therefore needs to become a DMA
+ * alias for the device.
+ */
+static const struct pci_device_id fixed_dma_alias_tbl[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
+ PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
+ .driver_data = PCI_DEVFN(1, 0) },
+ { 0 }
+};
+
+static void quirk_fixed_dma_alias(struct pci_dev *dev)
+{
+ const struct pci_device_id *id;
+
+ id = pci_match_id(fixed_dma_alias_tbl, dev);
+ if (id) {
+ dev->dma_alias_devfn = id->driver_data;
+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
+ PCI_SLOT(dev->dma_alias_devfn),
+ PCI_FUNC(dev->dma_alias_devfn));
+ }
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
+
+/*
* A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
* using the wrong DMA alias for the device. Some of these devices can be
* used as either forward or reverse bridges, so we need to test whether the
@@ -3630,6 +3737,9 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = {
0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
/* Patsburg (X79) PCH */
0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
+ /* Wellsburg (X99) PCH */
+ 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
+ 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
};
static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
@@ -3713,6 +3823,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
+ { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
+ { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
{ 0 }
};
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index f955edb9bea7..eb0ad530dc43 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -71,6 +71,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
{
void __iomem *image;
int last_image;
+ unsigned length;
image = rom;
do {
@@ -93,9 +94,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
if (readb(pds + 3) != 'R')
break;
last_image = readb(pds + 21) & 0x80;
- /* this length is reliable */
- image += readw(pds + 16) * 512;
- } while (!last_image);
+ length = readw(pds + 16);
+ image += length * 512;
+ } while (length && !last_image);
/* never return a size larger than the PCI resource window */
/* there are known ROMs that get the size wrong */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 0482235eee92..e3e17f3c0f0f 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus);
config space writes, so it's quite possible that an I/O window of
the bridge will have some undesirable address (e.g. 0) after the
first write. Ditto 64-bit prefetchable MMIO. */
-static void pci_setup_bridge_io(struct pci_bus *bus)
+static void pci_setup_bridge_io(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
unsigned long io_mask;
@@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
io_mask = PCI_IO_1K_RANGE_MASK;
/* Set up the top and bottom of the PCI I/O segment for this bus. */
- res = bus->resource[0];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
-static void pci_setup_bridge_mmio(struct pci_bus *bus)
+static void pci_setup_bridge_mmio(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus. */
- res = bus->resource[1];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
@@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
}
-static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
+static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l, bu, lu;
@@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
/* Set up PREF base/limit. */
bu = lu = 0;
- res = bus->resource[2];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
@@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
&bus->busn_res);
if (type & IORESOURCE_IO)
- pci_setup_bridge_io(bus);
+ pci_setup_bridge_io(bridge);
if (type & IORESOURCE_MEM)
- pci_setup_bridge_mmio(bus);
+ pci_setup_bridge_mmio(bridge);
if (type & IORESOURCE_PREFETCH)
- pci_setup_bridge_mmio_pref(bus);
+ pci_setup_bridge_mmio_pref(bridge);
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus)
__pci_setup_bridge(bus, type);
}
+
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
+{
+ if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
+ return 0;
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed the window */
+
+ if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ return 0;
+
+ if (!pci_bus_clip_resource(bridge, i))
+ return -EINVAL; /* clipping didn't change anything */
+
+ switch (i - PCI_BRIDGE_RESOURCES) {
+ case 0:
+ pci_setup_bridge_io(bridge);
+ break;
+ case 1:
+ pci_setup_bridge_mmio(bridge);
+ break;
+ case 2:
+ pci_setup_bridge_mmio_pref(bridge);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed a smaller window */
+
+ return -EINVAL;
+}
+
/* Check whether the bridge supports optional I/O and
prefetchable memory ranges. If not, the respective
base/limit registers must be read-only and read as 0. */
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 910e90bf16c6..8843a678f200 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,7 +69,8 @@ config YENTA
tristate "CardBus yenta-compatible bridge support"
depends on PCI
select CARDBUS if !EXPERT
- select PCCARD_NONSTATIC if PCMCIA != n
+ select PCCARD_NONSTATIC if PCMCIA != n && ISA
+ select PCCARD_PCI if PCMCIA !=n && !ISA
---help---
This option enables support for CardBus host bridges. Virtually
all modern PCMCIA bridges are CardBus compatible. A "bridge" is
@@ -109,7 +110,8 @@ config YENTA_TOSHIBA
config PD6729
tristate "Cirrus PD6729 compatible bridge support"
depends on PCMCIA && PCI
- select PCCARD_NONSTATIC
+ select PCCARD_NONSTATIC if PCMCIA != n && ISA
+ select PCCARD_PCI if PCMCIA !=n && !ISA
help
This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge
device, found in some older laptops and PCMCIA card readers.
@@ -117,7 +119,8 @@ config PD6729
config I82092
tristate "i82092 compatible bridge support"
depends on PCMCIA && PCI
- select PCCARD_NONSTATIC
+ select PCCARD_NONSTATIC if PCMCIA != n && ISA
+ select PCCARD_PCI if PCMCIA !=n && !ISA
help
This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device,
found in some older laptops and more commonly in evaluation boards for the
@@ -287,6 +290,9 @@ config ELECTRA_CF
Say Y here to support the CompactFlash controller on the
PA Semi Electra eval board.
+config PCCARD_PCI
+ bool
+
config PCCARD_NONSTATIC
bool
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 27e94b30cf96..f1a7ca04d89e 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o
pcmcia_rsrc-y += rsrc_mgr.o
pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
+pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o
obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 884a984216fe..64d0515b76bd 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -168,9 +168,12 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
} else {
u_int inc = 1, card_offset, flags;
- if (addr > CISTPL_MAX_CIS_SIZE)
+ if (addr > CISTPL_MAX_CIS_SIZE) {
dev_dbg(&s->dev,
"attempt to read CIS mem at addr %#x", addr);
+ memset(ptr, 0xff, len);
+ return -1;
+ }
flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
if (attr) {
@@ -1383,7 +1386,7 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
if (!s)
return -EINVAL;
- if (s->functions) {
+ if (s->functions || !(s->state & SOCKET_PRESENT)) {
WARN_ON(1);
return -EINVAL;
}
@@ -1448,10 +1451,26 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
done:
/* invalidate CIS cache on failure */
if (!dev_ok || !ident_ok || !count) {
- mutex_lock(&s->ops_mutex);
- destroy_cis_cache(s);
- mutex_unlock(&s->ops_mutex);
- ret = -EIO;
+#if defined(CONFIG_MTD_PCMCIA_ANONYMOUS)
+ /* Set up as an anonymous card. If we don't have anonymous
+ memory support then just error the card as there is no
+ point trying to second guess.
+
+ Note: some cards have just a device entry, it may be
+ worth extending support to cover these in future */
+ if (!dev_ok || !ident_ok) {
+ dev_info(&s->dev, "no CIS, assuming an anonymous memory card.\n");
+ pcmcia_replace_cis(s, "\xFF", 1);
+ count = 1;
+ ret = 0;
+ } else
+#endif
+ {
+ mutex_lock(&s->ops_mutex);
+ destroy_cis_cache(s);
+ mutex_unlock(&s->ops_mutex);
+ ret = -EIO;
+ }
}
if (info)
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 7f1953f78b12..e86cd6b31773 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -80,9 +80,9 @@ struct pccard_resource_ops {
* Stuff internal to module "pcmcia_rsrc":
*/
extern int static_init(struct pcmcia_socket *s);
-extern struct resource *pcmcia_make_resource(unsigned long start,
- unsigned long end,
- int flags, const char *name);
+extern struct resource *pcmcia_make_resource(resource_size_t start,
+ resource_size_t end,
+ unsigned long flags, const char *name);
/*
* Stuff internal to module "pcmcia_core":
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 757119b87146..d3baf0bfca9f 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -667,6 +667,9 @@ static void pcmcia_requery(struct pcmcia_socket *s)
{
int has_pfc;
+ if (!(s->state & SOCKET_PRESENT))
+ return;
+
if (s->functions == 0) {
pcmcia_card_add(s);
return;
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index aa628ed0e9f4..df2cb70aef5b 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -30,8 +30,9 @@ int static_init(struct pcmcia_socket *s)
return 0;
}
-struct resource *pcmcia_make_resource(unsigned long start, unsigned long end,
- int flags, const char *name)
+struct resource *pcmcia_make_resource(resource_size_t start,
+ resource_size_t end,
+ unsigned long flags, const char *name)
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c
new file mode 100644
index 000000000000..1f67b3ba70fb
--- /dev/null
+++ b/drivers/pcmcia/rsrc_pci.c
@@ -0,0 +1,173 @@
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+#include <pcmcia/ss.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+
+struct pcmcia_align_data {
+ unsigned long mask;
+ unsigned long offset;
+};
+
+static resource_size_t pcmcia_align(void *align_data,
+ const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pcmcia_align_data *data = align_data;
+ resource_size_t start;
+
+ start = (res->start & ~data->mask) + data->offset;
+ if (start < res->start)
+ start += data->mask + 1;
+ return start;
+}
+
+static struct resource *find_io_region(struct pcmcia_socket *s,
+ unsigned long base, int num,
+ unsigned long align)
+{
+ struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
+ dev_name(&s->dev));
+ struct pcmcia_align_data data;
+ int ret;
+
+ data.mask = align - 1;
+ data.offset = base & data.mask;
+
+ ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
+ base, 0, pcmcia_align, &data);
+ if (ret != 0) {
+ kfree(res);
+ res = NULL;
+ }
+ return res;
+}
+
+static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr,
+ unsigned int *base, unsigned int num,
+ unsigned int align, struct resource **parent)
+{
+ int i, ret = 0;
+
+ /* Check for an already-allocated window that must conflict with
+ * what was asked for. It is a hack because it does not catch all
+ * potential conflicts, just the most obvious ones.
+ */
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (!s->io[i].res)
+ continue;
+
+ if (!*base)
+ continue;
+
+ if ((s->io[i].res->start & (align-1)) == *base)
+ return -EBUSY;
+ }
+
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ struct resource *res = s->io[i].res;
+ unsigned int try;
+
+ if (res && (res->flags & IORESOURCE_BITS) !=
+ (attr & IORESOURCE_BITS))
+ continue;
+
+ if (!res) {
+ if (align == 0)
+ align = 0x10000;
+
+ res = s->io[i].res = find_io_region(s, *base, num,
+ align);
+ if (!res)
+ return -EINVAL;
+
+ *base = res->start;
+ s->io[i].res->flags =
+ ((res->flags & ~IORESOURCE_BITS) |
+ (attr & IORESOURCE_BITS));
+ s->io[i].InUse = num;
+ *parent = res;
+ return 0;
+ }
+
+ /* Try to extend top of window */
+ try = res->end + 1;
+ if ((*base == 0) || (*base == try)) {
+ ret = adjust_resource(s->io[i].res, res->start,
+ resource_size(res) + num);
+ if (ret)
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ *parent = res;
+ return 0;
+ }
+
+ /* Try to extend bottom of window */
+ try = res->start - num;
+ if ((*base == 0) || (*base == try)) {
+ ret = adjust_resource(s->io[i].res,
+ res->start - num,
+ resource_size(res) + num);
+ if (ret)
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ *parent = res;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static struct resource *res_pci_find_mem(u_long base, u_long num,
+ u_long align, int low, struct pcmcia_socket *s)
+{
+ struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
+ dev_name(&s->dev));
+ struct pcmcia_align_data data;
+ unsigned long min;
+ int ret;
+
+ if (align < 0x20000)
+ align = 0x20000;
+ data.mask = align - 1;
+ data.offset = base & data.mask;
+
+ min = 0;
+ if (!low)
+ min = 0x100000UL;
+
+ ret = pci_bus_alloc_resource(s->cb_dev->bus,
+ res, num, 1, min, 0,
+ pcmcia_align, &data);
+
+ if (ret != 0) {
+ kfree(res);
+ res = NULL;
+ }
+ return res;
+}
+
+
+static int res_pci_init(struct pcmcia_socket *s)
+{
+ if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) {
+ dev_err(&s->dev, "not supported by res_pci\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+struct pccard_resource_ops pccard_nonstatic_ops = {
+ .validate_mem = NULL,
+ .find_io = res_pci_find_io,
+ .find_mem = res_pci_find_mem,
+ .init = res_pci_init,
+ .exit = NULL,
+};
+EXPORT_SYMBOL(pccard_nonstatic_ops);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index ccad8809ecb1..2962de205ba7 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -239,6 +239,13 @@ config PHY_QCOM_IPQ806X_SATA
depends on OF
select GENERIC_PHY
+config PHY_ROCKCHIP_USB
+ tristate "Rockchip USB2 PHY Driver"
+ depends on ARCH_ROCKCHIP && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip USB 2.0 PHY.
+
config PHY_ST_SPEAR1310_MIPHY
tristate "ST SPEAR1310-MIPHY driver"
select GENERIC_PHY
@@ -277,4 +284,11 @@ config PHY_STIH41X_USB
Enable this to support the USB transceiver that is part of
STMicroelectronics STiH41x SoC series.
+config PHY_QCOM_UFS
+ tristate "Qualcomm UFS PHY driver"
+ depends on OF && ARCH_MSM
+ select GENERIC_PHY
+ help
+ Support for UFS PHY on QCOM chipsets.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index aa74f961e44e..f080e1bb2a74 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -28,9 +28,13 @@ phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2) += phy-s5pv210-usb2.o
obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
+obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o
obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o
obj-$(CONFIG_PHY_STIH41X_USB) += phy-stih41x-usb.o
+obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
+obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
+obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c
index ac7d99d01cb3..7c99ca256f05 100644
--- a/drivers/phy/phy-armada375-usb2.c
+++ b/drivers/phy/phy-armada375-usb2.c
@@ -118,8 +118,8 @@ static int armada375_usb_phy_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
usb_cluster_base = devm_ioremap_resource(&pdev->dev, res);
- if (!usb_cluster_base)
- return -ENOMEM;
+ if (IS_ERR(usb_cluster_base))
+ return PTR_ERR(usb_cluster_base);
phy = devm_phy_create(dev, NULL, &armada375_usb_phy_ops);
if (IS_ERR(phy)) {
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 943e0f88a120..f017b2f2a54e 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -12,19 +12,18 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon/exynos4-pmu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
-/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
+/* MIPI_PHYn_CONTROL reg. offset (for base address from ioremap): n = 0..1 */
#define EXYNOS_MIPI_PHY_CONTROL(n) ((n) * 4)
-#define EXYNOS_MIPI_PHY_ENABLE (1 << 0)
-#define EXYNOS_MIPI_PHY_SRESETN (1 << 1)
-#define EXYNOS_MIPI_PHY_MRESETN (1 << 2)
-#define EXYNOS_MIPI_PHY_RESET_MASK (3 << 1)
enum exynos_mipi_phy_id {
EXYNOS_MIPI_PHY_ID_CSIS0,
@@ -38,43 +37,62 @@ enum exynos_mipi_phy_id {
((id) == EXYNOS_MIPI_PHY_ID_DSIM0 || (id) == EXYNOS_MIPI_PHY_ID_DSIM1)
struct exynos_mipi_video_phy {
- spinlock_t slock;
struct video_phy_desc {
struct phy *phy;
unsigned int index;
} phys[EXYNOS_MIPI_PHYS_NUM];
+ spinlock_t slock;
void __iomem *regs;
+ struct mutex mutex;
+ struct regmap *regmap;
};
static int __set_phy_state(struct exynos_mipi_video_phy *state,
enum exynos_mipi_phy_id id, unsigned int on)
{
+ const unsigned int offset = EXYNOS4_MIPI_PHY_CONTROL(id / 2);
void __iomem *addr;
- u32 reg, reset;
-
- addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
+ u32 val, reset;
if (is_mipi_dsim_phy_id(id))
- reset = EXYNOS_MIPI_PHY_MRESETN;
- else
- reset = EXYNOS_MIPI_PHY_SRESETN;
-
- spin_lock(&state->slock);
- reg = readl(addr);
- if (on)
- reg |= reset;
+ reset = EXYNOS4_MIPI_PHY_MRESETN;
else
- reg &= ~reset;
- writel(reg, addr);
-
- /* Clear ENABLE bit only if MRESETN, SRESETN bits are not set. */
- if (on)
- reg |= EXYNOS_MIPI_PHY_ENABLE;
- else if (!(reg & EXYNOS_MIPI_PHY_RESET_MASK))
- reg &= ~EXYNOS_MIPI_PHY_ENABLE;
+ reset = EXYNOS4_MIPI_PHY_SRESETN;
+
+ if (state->regmap) {
+ mutex_lock(&state->mutex);
+ regmap_read(state->regmap, offset, &val);
+ if (on)
+ val |= reset;
+ else
+ val &= ~reset;
+ regmap_write(state->regmap, offset, val);
+ if (on)
+ val |= EXYNOS4_MIPI_PHY_ENABLE;
+ else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
+ val &= ~EXYNOS4_MIPI_PHY_ENABLE;
+ regmap_write(state->regmap, offset, val);
+ mutex_unlock(&state->mutex);
+ } else {
+ addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
+
+ spin_lock(&state->slock);
+ val = readl(addr);
+ if (on)
+ val |= reset;
+ else
+ val &= ~reset;
+ writel(val, addr);
+ /* Clear ENABLE bit only if MRESETN, SRESETN bits are not set */
+ if (on)
+ val |= EXYNOS4_MIPI_PHY_ENABLE;
+ else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
+ val &= ~EXYNOS4_MIPI_PHY_ENABLE;
+
+ writel(val, addr);
+ spin_unlock(&state->slock);
+ }
- writel(reg, addr);
- spin_unlock(&state->slock);
return 0;
}
@@ -118,7 +136,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
{
struct exynos_mipi_video_phy *state;
struct device *dev = &pdev->dev;
- struct resource *res;
struct phy_provider *phy_provider;
unsigned int i;
@@ -126,14 +143,22 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
if (!state)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ state->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(state->regmap)) {
+ struct resource *res;
- state->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(state->regs))
- return PTR_ERR(state->regs);
+ dev_info(dev, "regmap lookup failed: %ld\n",
+ PTR_ERR(state->regmap));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ state->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
+ }
dev_set_drvdata(dev, state);
spin_lock_init(&state->slock);
+ mutex_init(&state->mutex);
for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev, NULL,
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index e34da13885e8..9b2848e6115d 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -194,6 +194,14 @@
#define MIPHY_SATA_BANK_NB 3
#define MIPHY_PCIE_BANK_NB 2
+enum {
+ SYSCFG_CTRL,
+ SYSCFG_STATUS,
+ SYSCFG_PCI,
+ SYSCFG_SATA,
+ SYSCFG_REG_MAX,
+};
+
struct miphy28lp_phy {
struct phy *phy;
struct miphy28lp_dev *phydev;
@@ -211,10 +219,7 @@ struct miphy28lp_phy {
u32 sata_gen;
/* Sysconfig registers offsets needed to configure the device */
- u32 syscfg_miphy_ctrl;
- u32 syscfg_miphy_status;
- u32 syscfg_pci;
- u32 syscfg_sata;
+ u32 syscfg_reg[SYSCFG_REG_MAX];
u8 type;
};
@@ -834,12 +839,12 @@ static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
if (!miphy_phy->osc_rdy)
return 0;
- if (!miphy_phy->syscfg_miphy_status)
+ if (!miphy_phy->syscfg_reg[SYSCFG_STATUS])
return -EINVAL;
do {
- regmap_read(miphy_dev->regmap, miphy_phy->syscfg_miphy_status,
- &val);
+ regmap_read(miphy_dev->regmap,
+ miphy_phy->syscfg_reg[SYSCFG_STATUS], &val);
if ((val & MIPHY_OSC_RDY) != MIPHY_OSC_RDY)
cpu_relax();
@@ -888,7 +893,7 @@ static int miphy28lp_setup(struct miphy28lp_phy *miphy_phy, u32 miphy_val)
int err;
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
- if (!miphy_phy->syscfg_miphy_ctrl)
+ if (!miphy_phy->syscfg_reg[SYSCFG_CTRL])
return -EINVAL;
err = reset_control_assert(miphy_phy->miphy_rst);
@@ -900,7 +905,8 @@ static int miphy28lp_setup(struct miphy28lp_phy *miphy_phy, u32 miphy_val)
if (miphy_phy->osc_force_ext)
miphy_val |= MIPHY_OSC_FORCE_EXT;
- regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_miphy_ctrl,
+ regmap_update_bits(miphy_dev->regmap,
+ miphy_phy->syscfg_reg[SYSCFG_CTRL],
MIPHY_CTRL_MASK, miphy_val);
err = reset_control_deassert(miphy_phy->miphy_rst);
@@ -917,8 +923,9 @@ static int miphy28lp_init_sata(struct miphy28lp_phy *miphy_phy)
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err, sata_conf = SATA_CTRL_SELECT_SATA;
- if ((!miphy_phy->syscfg_sata) || (!miphy_phy->syscfg_pci)
- || (!miphy_phy->base))
+ if ((!miphy_phy->syscfg_reg[SYSCFG_SATA]) ||
+ (!miphy_phy->syscfg_reg[SYSCFG_PCI]) ||
+ (!miphy_phy->base))
return -EINVAL;
dev_info(miphy_dev->dev, "sata-up mode, addr 0x%p\n", miphy_phy->base);
@@ -926,10 +933,11 @@ static int miphy28lp_init_sata(struct miphy28lp_phy *miphy_phy)
/* Configure the glue-logic */
sata_conf |= ((miphy_phy->sata_gen - SATA_GEN1) << SATA_SPDMODE);
- regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_sata,
+ regmap_update_bits(miphy_dev->regmap,
+ miphy_phy->syscfg_reg[SYSCFG_SATA],
SATA_CTRL_MASK, sata_conf);
- regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_pci,
+ regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_reg[SYSCFG_PCI],
PCIE_CTRL_MASK, SATA_CTRL_SELECT_PCIE);
/* MiPHY path and clocking init */
@@ -951,17 +959,19 @@ static int miphy28lp_init_pcie(struct miphy28lp_phy *miphy_phy)
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err;
- if ((!miphy_phy->syscfg_sata) || (!miphy_phy->syscfg_pci)
+ if ((!miphy_phy->syscfg_reg[SYSCFG_SATA]) ||
+ (!miphy_phy->syscfg_reg[SYSCFG_PCI])
|| (!miphy_phy->base) || (!miphy_phy->pipebase))
return -EINVAL;
dev_info(miphy_dev->dev, "pcie-up mode, addr 0x%p\n", miphy_phy->base);
/* Configure the glue-logic */
- regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_sata,
+ regmap_update_bits(miphy_dev->regmap,
+ miphy_phy->syscfg_reg[SYSCFG_SATA],
SATA_CTRL_MASK, SATA_CTRL_SELECT_PCIE);
- regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_pci,
+ regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_reg[SYSCFG_PCI],
PCIE_CTRL_MASK, SYSCFG_PCIE_PCIE_VAL);
/* MiPHY path and clocking init */
@@ -1050,7 +1060,8 @@ static int miphy28lp_init(struct phy *phy)
ret = miphy28lp_init_usb3(miphy_phy);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
mutex_unlock(&miphy_dev->miphy_mutex);
@@ -1155,7 +1166,8 @@ static int miphy28lp_probe_resets(struct device_node *node,
static int miphy28lp_of_probe(struct device_node *np,
struct miphy28lp_phy *miphy_phy)
{
- struct resource res;
+ int i;
+ u32 ctrlreg;
miphy_phy->osc_force_ext =
of_property_read_bool(np, "st,osc-force-ext");
@@ -1174,18 +1186,10 @@ static int miphy28lp_of_probe(struct device_node *np,
if (!miphy_phy->sata_gen)
miphy_phy->sata_gen = SATA_GEN1;
- if (!miphy28lp_get_resource_byname(np, "miphy-ctrl-glue", &res))
- miphy_phy->syscfg_miphy_ctrl = res.start;
-
- if (!miphy28lp_get_resource_byname(np, "miphy-status-glue", &res))
- miphy_phy->syscfg_miphy_status = res.start;
-
- if (!miphy28lp_get_resource_byname(np, "pcie-glue", &res))
- miphy_phy->syscfg_pci = res.start;
-
- if (!miphy28lp_get_resource_byname(np, "sata-glue", &res))
- miphy_phy->syscfg_sata = res.start;
-
+ for (i = 0; i < SYSCFG_REG_MAX; i++) {
+ if (!of_property_read_u32_index(np, "st,syscfg", i, &ctrlreg))
+ miphy_phy->syscfg_reg[i] = ctrlreg;
+ }
return 0;
}
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 6ab43a814ad2..6c80154e8bff 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -141,7 +141,7 @@ struct miphy365x_phy {
bool pcie_tx_pol_inv;
bool sata_tx_pol_inv;
u32 sata_gen;
- u64 ctrlreg;
+ u32 ctrlreg;
u8 type;
};
@@ -179,7 +179,7 @@ static int miphy365x_set_path(struct miphy365x_phy *miphy_phy,
bool sata = (miphy_phy->type == MIPHY_TYPE_SATA);
return regmap_update_bits(miphy_dev->regmap,
- (unsigned int)miphy_phy->ctrlreg,
+ miphy_phy->ctrlreg,
SYSCFG_SELECT_SATA_MASK,
sata << SYSCFG_SELECT_SATA_POS);
}
@@ -445,7 +445,6 @@ int miphy365x_get_addr(struct device *dev, struct miphy365x_phy *miphy_phy,
{
struct device_node *phynode = miphy_phy->phy->dev.of_node;
const char *name;
- const __be32 *taddr;
int type = miphy_phy->type;
int ret;
@@ -455,22 +454,6 @@ int miphy365x_get_addr(struct device *dev, struct miphy365x_phy *miphy_phy,
return ret;
}
- if (!strncmp(name, "syscfg", 6)) {
- taddr = of_get_address(phynode, index, NULL, NULL);
- if (!taddr) {
- dev_err(dev, "failed to fetch syscfg address\n");
- return -EINVAL;
- }
-
- miphy_phy->ctrlreg = of_translate_address(phynode, taddr);
- if (miphy_phy->ctrlreg == OF_BAD_ADDR) {
- dev_err(dev, "failed to translate syscfg address\n");
- return -EINVAL;
- }
-
- return 0;
- }
-
if (!((!strncmp(name, "sata", 4) && type == MIPHY_TYPE_SATA) ||
(!strncmp(name, "pcie", 4) && type == MIPHY_TYPE_PCIE)))
return 0;
@@ -606,7 +589,15 @@ static int miphy365x_probe(struct platform_device *pdev)
return ret;
phy_set_drvdata(phy, miphy_dev->phys[port]);
+
port++;
+ /* sysconfig offsets are indexed from 1 */
+ ret = of_property_read_u32_index(np, "st,syscfg", port,
+ &miphy_phy->ctrlreg);
+ if (ret) {
+ dev_err(&pdev->dev, "No sysconfig offset found\n");
+ return ret;
+ }
}
provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index c96e8183a8ff..efe724f97e02 100644
--- a/drivers/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -29,10 +29,9 @@
/**
* omap_control_pcie_pcs - set the PCS delay count
* @dev: the control module device
- * @id: index of the pcie PHY (should be 1 or 2)
* @delay: 8 bit delay value
*/
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+void omap_control_pcie_pcs(struct device *dev, u8 delay)
{
u32 val;
struct omap_control_phy *control_phy;
@@ -55,8 +54,8 @@ void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
val = readl(control_phy->pcie_pcs);
val &= ~(OMAP_CTRL_PCIE_PCS_MASK <<
- (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT));
- val |= delay << (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+ OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+ val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
writel(val, control_phy->pcie_pcs);
}
EXPORT_SYMBOL_GPL(omap_control_pcie_pcs);
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
new file mode 100644
index 000000000000..591a39175e8a
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_I_H_
+#define UFS_QCOM_PHY_I_H_
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/phy/phy-qcom-ufs.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(timeout_us); \
+ for (;;) { \
+ (val) = readl(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ (val) = readl(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+#define UFS_QCOM_PHY_CAL_ENTRY(reg, val) \
+ { \
+ .reg_offset = reg, \
+ .cfg_value = val, \
+ }
+
+#define UFS_QCOM_PHY_NAME_LEN 30
+
+enum {
+ MASK_SERDES_START = 0x1,
+ MASK_PCS_READY = 0x1,
+};
+
+enum {
+ OFFSET_SERDES_START = 0x0,
+};
+
+struct ufs_qcom_phy_stored_attributes {
+ u32 att;
+ u32 value;
+};
+
+
+struct ufs_qcom_phy_calibration {
+ u32 reg_offset;
+ u32 cfg_value;
+};
+
+struct ufs_qcom_phy_vreg {
+ const char *name;
+ struct regulator *reg;
+ int max_uA;
+ int min_uV;
+ int max_uV;
+ bool enabled;
+ bool is_always_on;
+};
+
+struct ufs_qcom_phy {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *mmio;
+ void __iomem *dev_ref_clk_ctrl_mmio;
+ struct clk *tx_iface_clk;
+ struct clk *rx_iface_clk;
+ bool is_iface_clk_enabled;
+ struct clk *ref_clk_src;
+ struct clk *ref_clk_parent;
+ struct clk *ref_clk;
+ bool is_ref_clk_enabled;
+ bool is_dev_ref_clk_enabled;
+ struct ufs_qcom_phy_vreg vdda_pll;
+ struct ufs_qcom_phy_vreg vdda_phy;
+ struct ufs_qcom_phy_vreg vddp_ref_clk;
+ unsigned int quirks;
+
+ /*
+ * If UFS link is put into Hibern8 and if UFS PHY analog hardware is
+ * power collapsed (by clearing UFS_PHY_POWER_DOWN_CONTROL), Hibern8
+ * exit might fail even after powering on UFS PHY analog hardware.
+ * Enabling this quirk will help to solve above issue by doing
+ * custom PHY settings just before PHY analog power collapse.
+ */
+ #define UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE BIT(0)
+
+ u8 host_ctrl_rev_major;
+ u16 host_ctrl_rev_minor;
+ u16 host_ctrl_rev_step;
+
+ char name[UFS_QCOM_PHY_NAME_LEN];
+ struct ufs_qcom_phy_calibration *cached_regs;
+ int cached_regs_table_size;
+ bool is_powered_on;
+ struct ufs_qcom_phy_specific_ops *phy_spec_ops;
+};
+
+/**
+ * struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a
+ * specific implementation per phy. Each UFS phy, should implement
+ * those functions according to its spec and requirements
+ * @calibrate_phy: pointer to a function that calibrate the phy
+ * @start_serdes: pointer to a function that starts the serdes
+ * @is_physical_coding_sublayer_ready: pointer to a function that
+ * checks pcs readiness. returns 0 for success and non-zero for error.
+ * @set_tx_lane_enable: pointer to a function that enable tx lanes
+ * @power_control: pointer to a function that controls analog rail of phy
+ * and writes to QSERDES_RX_SIGDET_CNTRL attribute
+ */
+struct ufs_qcom_phy_specific_ops {
+ int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
+ void (*start_serdes)(struct ufs_qcom_phy *phy);
+ int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
+ void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
+ void (*power_control)(struct ufs_qcom_phy *phy, bool val);
+};
+
+struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
+int ufs_qcom_phy_power_on(struct phy *generic_phy);
+int ufs_qcom_phy_power_off(struct phy *generic_phy);
+int ufs_qcom_phy_exit(struct phy *generic_phy);
+int ufs_qcom_phy_init_clks(struct phy *generic_phy,
+ struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
+ struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_remove(struct phy *generic_phy,
+ struct ufs_qcom_phy *ufs_qcom_phy);
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+ struct ufs_qcom_phy *common_cfg,
+ struct phy_ops *ufs_qcom_phy_gen_ops,
+ struct ufs_qcom_phy_specific_ops *phy_spec_ops);
+int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+ struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
+ struct ufs_qcom_phy_calibration *tbl_B, int tbl_size_B,
+ bool is_rate_B);
+#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
new file mode 100644
index 000000000000..f5fc50a9fce7
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-14nm.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_14nm"
+#define UFS_PHY_VDDA_PHY_UV (925000)
+
+static
+int ufs_qcom_phy_qmp_14nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+ bool is_rate_B)
+{
+ int tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
+ int tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+ int err;
+
+ err = ufs_qcom_phy_calibrate(ufs_qcom_phy, phy_cal_table_rate_A,
+ tbl_size_A, phy_cal_table_rate_B, tbl_size_B, is_rate_B);
+
+ if (err)
+ dev_err(ufs_qcom_phy->dev,
+ "%s: ufs_qcom_phy_calibrate() failed %d\n",
+ __func__, err);
+ return err;
+}
+
+static
+void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
+{
+ phy_common->quirks =
+ UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
+}
+
+static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy);
+ struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+ int err;
+
+ err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+ phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
+ phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
+
+ ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
+
+out:
+ return err;
+}
+
+static
+void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
+{
+ writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+ /*
+ * Before any transactions involving PHY, ensure PHY knows
+ * that it's analog rail is powered ON (or OFF).
+ */
+ mb();
+}
+
+static inline
+void ufs_qcom_phy_qmp_14nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
+{
+ /*
+ * 14nm PHY does not have TX_LANE_ENABLE register.
+ * Implement this function so as not to propagate error to caller.
+ */
+}
+
+static inline void ufs_qcom_phy_qmp_14nm_start_serdes(struct ufs_qcom_phy *phy)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+ tmp &= ~MASK_SERDES_START;
+ tmp |= (1 << OFFSET_SERDES_START);
+ writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+ /* Ensure register value is committed */
+ mb();
+}
+
+static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+ int err = 0;
+ u32 val;
+
+ err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+ val, (val & MASK_PCS_READY), 10, 1000000);
+ if (err)
+ dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+ __func__, err);
+ return err;
+}
+
+static struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
+ .init = ufs_qcom_phy_qmp_14nm_init,
+ .exit = ufs_qcom_phy_exit,
+ .power_on = ufs_qcom_phy_power_on,
+ .power_off = ufs_qcom_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct ufs_qcom_phy_specific_ops phy_14nm_ops = {
+ .calibrate_phy = ufs_qcom_phy_qmp_14nm_phy_calibrate,
+ .start_serdes = ufs_qcom_phy_qmp_14nm_start_serdes,
+ .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
+ .set_tx_lane_enable = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
+ .power_control = ufs_qcom_phy_qmp_14nm_power_control,
+};
+
+static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct ufs_qcom_phy_qmp_14nm *phy;
+ int err = 0;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ dev_err(dev, "%s: failed to allocate phy\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ &ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
+
+ if (!generic_phy) {
+ dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+ __func__);
+ err = -EIO;
+ goto out;
+ }
+
+ phy_set_drvdata(generic_phy, phy);
+
+ strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+ sizeof(phy->common_cfg.name));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = to_phy(dev);
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+ int err = 0;
+
+ err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+ if (err)
+ dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+ __func__, err);
+
+ return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
+ {.compatible = "qcom,ufs-phy-qmp-14nm"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
+ .probe = ufs_qcom_phy_qmp_14nm_probe,
+ .remove = ufs_qcom_phy_qmp_14nm_remove,
+ .driver = {
+ .of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
+ .name = "ufs_qcom_phy_qmp_14nm",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_14nm_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 14nm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.h b/drivers/phy/phy-qcom-ufs-qmp-14nm.h
new file mode 100644
index 000000000000..3aefdbacbcd0
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_14NM_H_
+#define UFS_QCOM_PHY_QMP_14NM_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_OFF(x) (0x000 + x)
+#define PHY_OFF(x) (0xC00 + x)
+#define TX_OFF(n, x) (0x400 + (0x400 * n) + x)
+#define RX_OFF(n, x) (0x600 + (0x400 * n) + x)
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_BG_TIMER COM_OFF(0x0C)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x34)
+#define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x3C)
+#define QSERDES_COM_LOCK_CMP1_MODE0 COM_OFF(0x4C)
+#define QSERDES_COM_LOCK_CMP2_MODE0 COM_OFF(0x50)
+#define QSERDES_COM_LOCK_CMP3_MODE0 COM_OFF(0x54)
+#define QSERDES_COM_LOCK_CMP1_MODE1 COM_OFF(0x58)
+#define QSERDES_COM_LOCK_CMP2_MODE1 COM_OFF(0x5C)
+#define QSERDES_COM_LOCK_CMP3_MODE1 COM_OFF(0x60)
+#define QSERDES_COM_CP_CTRL_MODE0 COM_OFF(0x78)
+#define QSERDES_COM_CP_CTRL_MODE1 COM_OFF(0x7C)
+#define QSERDES_COM_PLL_RCTRL_MODE0 COM_OFF(0x84)
+#define QSERDES_COM_PLL_RCTRL_MODE1 COM_OFF(0x88)
+#define QSERDES_COM_PLL_CCTRL_MODE0 COM_OFF(0x90)
+#define QSERDES_COM_PLL_CCTRL_MODE1 COM_OFF(0x94)
+#define QSERDES_COM_SYSCLK_EN_SEL COM_OFF(0xAC)
+#define QSERDES_COM_RESETSM_CNTRL COM_OFF(0xB4)
+#define QSERDES_COM_LOCK_CMP_EN COM_OFF(0xC8)
+#define QSERDES_COM_LOCK_CMP_CFG COM_OFF(0xCC)
+#define QSERDES_COM_DEC_START_MODE0 COM_OFF(0xD0)
+#define QSERDES_COM_DEC_START_MODE1 COM_OFF(0xD4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 COM_OFF(0xE0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 COM_OFF(0xE4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 COM_OFF(0xE8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 COM_OFF(0xEC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 COM_OFF(0xF0)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 COM_OFF(0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 COM_OFF(0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 COM_OFF(0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_CTRL COM_OFF(0x124)
+#define QSERDES_COM_VCO_TUNE_MAP COM_OFF(0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0 COM_OFF(0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0 COM_OFF(0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1 COM_OFF(0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1 COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_TIMER1 COM_OFF(0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2 COM_OFF(0x148)
+#define QSERDES_COM_CLK_SELECT COM_OFF(0x174)
+#define QSERDES_COM_HSCLK_SEL COM_OFF(0x178)
+#define QSERDES_COM_CORECLK_DIV COM_OFF(0x184)
+#define QSERDES_COM_CORE_CLK_EN COM_OFF(0x18C)
+#define QSERDES_COM_CMN_CONFIG COM_OFF(0x194)
+#define QSERDES_COM_SVS_MODE_CLK_SEL COM_OFF(0x19C)
+#define QSERDES_COM_CORECLK_DIV_MODE1 COM_OFF(0x1BC)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x04)
+#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x168)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN TX_OFF(0, 0x68)
+#define QSERDES_TX_LANE_MODE TX_OFF(0, 0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN RX_OFF(0, 0x40)
+#define QSERDES_RX_RX_TERM_BW RX_OFF(0, 0x90)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB RX_OFF(0, 0xC4)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB RX_OFF(0, 0xC8)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB RX_OFF(0, 0xCC)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB RX_OFF(0, 0xD0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 RX_OFF(0, 0xD8)
+#define QSERDES_RX_SIGDET_CNTRL RX_OFF(0, 0x114)
+#define QSERDES_RX_SIGDET_LVL RX_OFF(0, 0x118)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL RX_OFF(0, 0x11C)
+#define QSERDES_RX_RX_INTERFACE_MODE RX_OFF(0, 0x12C)
+
+/*
+ * This structure represents the 14nm specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_14nm {
+ struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x10),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x14),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x02),
+
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x54),
+};
+
+#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
new file mode 100644
index 000000000000..8332f96b2c4a
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-20nm.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_20nm"
+
+static
+int ufs_qcom_phy_qmp_20nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+ bool is_rate_B)
+{
+ struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+ int tbl_size_A, tbl_size_B;
+ u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+ u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+ u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+ int err;
+
+ if ((major == 0x1) && (minor == 0x002) && (step == 0x0000)) {
+ tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_2_0);
+ tbl_A = phy_cal_table_rate_A_1_2_0;
+ } else if ((major == 0x1) && (minor == 0x003) && (step == 0x0000)) {
+ tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_3_0);
+ tbl_A = phy_cal_table_rate_A_1_3_0;
+ } else {
+ dev_err(ufs_qcom_phy->dev, "%s: Unknown UFS-PHY version, no calibration values\n",
+ __func__);
+ err = -ENODEV;
+ goto out;
+ }
+
+ tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+ tbl_B = phy_cal_table_rate_B;
+
+ err = ufs_qcom_phy_calibrate(ufs_qcom_phy, tbl_A, tbl_size_A,
+ tbl_B, tbl_size_B, is_rate_B);
+
+ if (err)
+ dev_err(ufs_qcom_phy->dev, "%s: ufs_qcom_phy_calibrate() failed %d\n",
+ __func__, err);
+
+out:
+ return err;
+}
+
+static
+void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
+{
+ phy_common->quirks =
+ UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
+}
+
+static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy);
+ struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+ int err = 0;
+
+ err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
+
+out:
+ return err;
+}
+
+static
+void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val)
+{
+ bool hibern8_exit_after_pwr_collapse = phy->quirks &
+ UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
+
+ if (val) {
+ writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+ /*
+ * Before any transactions involving PHY, ensure PHY knows
+ * that it's analog rail is powered ON.
+ */
+ mb();
+
+ if (hibern8_exit_after_pwr_collapse) {
+ /*
+ * Give atleast 1us delay after restoring PHY analog
+ * power.
+ */
+ usleep_range(1, 2);
+ writel_relaxed(0x0A, phy->mmio +
+ QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
+ writel_relaxed(0x08, phy->mmio +
+ QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
+ /*
+ * Make sure workaround is deactivated before proceeding
+ * with normal PHY operations.
+ */
+ mb();
+ }
+ } else {
+ if (hibern8_exit_after_pwr_collapse) {
+ writel_relaxed(0x0A, phy->mmio +
+ QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
+ writel_relaxed(0x02, phy->mmio +
+ QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
+ /*
+ * Make sure that above workaround is activated before
+ * PHY analog power collapse.
+ */
+ mb();
+ }
+
+ writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+ /*
+ * ensure that PHY knows its PHY analog rail is going
+ * to be powered down
+ */
+ mb();
+ }
+}
+
+static
+void ufs_qcom_phy_qmp_20nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
+{
+ writel_relaxed(val & UFS_PHY_TX_LANE_ENABLE_MASK,
+ phy->mmio + UFS_PHY_TX_LANE_ENABLE);
+ mb();
+}
+
+static inline void ufs_qcom_phy_qmp_20nm_start_serdes(struct ufs_qcom_phy *phy)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+ tmp &= ~MASK_SERDES_START;
+ tmp |= (1 << OFFSET_SERDES_START);
+ writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+ mb();
+}
+
+static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+ int err = 0;
+ u32 val;
+
+ err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+ val, (val & MASK_PCS_READY), 10, 1000000);
+ if (err)
+ dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+ __func__, err);
+ return err;
+}
+
+static struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
+ .init = ufs_qcom_phy_qmp_20nm_init,
+ .exit = ufs_qcom_phy_exit,
+ .power_on = ufs_qcom_phy_power_on,
+ .power_off = ufs_qcom_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
+ .calibrate_phy = ufs_qcom_phy_qmp_20nm_phy_calibrate,
+ .start_serdes = ufs_qcom_phy_qmp_20nm_start_serdes,
+ .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
+ .set_tx_lane_enable = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
+ .power_control = ufs_qcom_phy_qmp_20nm_power_control,
+};
+
+static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct ufs_qcom_phy_qmp_20nm *phy;
+ int err = 0;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ dev_err(dev, "%s: failed to allocate phy\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ &ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
+
+ if (!generic_phy) {
+ dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+ __func__);
+ err = -EIO;
+ goto out;
+ }
+
+ phy_set_drvdata(generic_phy, phy);
+
+ strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+ sizeof(phy->common_cfg.name));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = to_phy(dev);
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+ int err = 0;
+
+ err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+ if (err)
+ dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+ __func__, err);
+
+ return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_20nm_of_match[] = {
+ {.compatible = "qcom,ufs-phy-qmp-20nm"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
+ .probe = ufs_qcom_phy_qmp_20nm_probe,
+ .remove = ufs_qcom_phy_qmp_20nm_remove,
+ .driver = {
+ .of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
+ .name = "ufs_qcom_phy_qmp_20nm",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_20nm_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 20nm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.h b/drivers/phy/phy-qcom-ufs-qmp-20nm.h
new file mode 100644
index 000000000000..4f3076bb3d71
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_20NM_H_
+#define UFS_QCOM_PHY_QMP_20NM_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+
+#define COM_OFF(x) (0x000 + x)
+#define PHY_OFF(x) (0xC00 + x)
+#define TX_OFF(n, x) (0x400 + (0x400 * n) + x)
+#define RX_OFF(n, x) (0x600 + (0x400 * n) + x)
+
+/* UFS PHY PLL block registers */
+#define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x0)
+#define QSERDES_COM_PLL_VCOTAIL_EN COM_OFF(0x04)
+#define QSERDES_COM_PLL_CNTRL COM_OFF(0x14)
+#define QSERDES_COM_PLL_IP_SETI COM_OFF(0x24)
+#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL COM_OFF(0x28)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x30)
+#define QSERDES_COM_PLL_CP_SETI COM_OFF(0x34)
+#define QSERDES_COM_PLL_IP_SETP COM_OFF(0x38)
+#define QSERDES_COM_PLL_CP_SETP COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_EN_SEL_TXBAND COM_OFF(0x48)
+#define QSERDES_COM_RESETSM_CNTRL COM_OFF(0x4C)
+#define QSERDES_COM_RESETSM_CNTRL2 COM_OFF(0x50)
+#define QSERDES_COM_PLLLOCK_CMP1 COM_OFF(0x90)
+#define QSERDES_COM_PLLLOCK_CMP2 COM_OFF(0x94)
+#define QSERDES_COM_PLLLOCK_CMP3 COM_OFF(0x98)
+#define QSERDES_COM_PLLLOCK_CMP_EN COM_OFF(0x9C)
+#define QSERDES_COM_BGTC COM_OFF(0xA0)
+#define QSERDES_COM_DEC_START1 COM_OFF(0xAC)
+#define QSERDES_COM_PLL_AMP_OS COM_OFF(0xB0)
+#define QSERDES_COM_RES_CODE_UP_OFFSET COM_OFF(0xD8)
+#define QSERDES_COM_RES_CODE_DN_OFFSET COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START1 COM_OFF(0x100)
+#define QSERDES_COM_DIV_FRAC_START2 COM_OFF(0x104)
+#define QSERDES_COM_DIV_FRAC_START3 COM_OFF(0x108)
+#define QSERDES_COM_DEC_START2 COM_OFF(0x10C)
+#define QSERDES_COM_PLL_RXTXEPCLK_EN COM_OFF(0x110)
+#define QSERDES_COM_PLL_CRCTRL COM_OFF(0x114)
+#define QSERDES_COM_PLL_CLKEPDIV COM_OFF(0x118)
+
+/* TX LANE n (0, 1) registers */
+#define QSERDES_TX_EMP_POST1_LVL(n) TX_OFF(n, 0x08)
+#define QSERDES_TX_DRV_LVL(n) TX_OFF(n, 0x0C)
+#define QSERDES_TX_LANE_MODE(n) TX_OFF(n, 0x54)
+
+/* RX LANE n (0, 1) registers */
+#define QSERDES_RX_CDR_CONTROL1(n) RX_OFF(n, 0x0)
+#define QSERDES_RX_CDR_CONTROL_HALF(n) RX_OFF(n, 0x8)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB(n) RX_OFF(n, 0xA8)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB(n) RX_OFF(n, 0xAC)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB(n) RX_OFF(n, 0xB0)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB(n) RX_OFF(n, 0xB4)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(n) RX_OFF(n, 0xBC)
+#define QSERDES_RX_CDR_CONTROL_QUARTER(n) RX_OFF(n, 0xC)
+#define QSERDES_RX_SIGDET_CNTRL(n) RX_OFF(n, 0x100)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x4)
+#define UFS_PHY_TX_LANE_ENABLE PHY_OFF(0x44)
+#define UFS_PHY_PWM_G1_CLK_DIVIDER PHY_OFF(0x08)
+#define UFS_PHY_PWM_G2_CLK_DIVIDER PHY_OFF(0x0C)
+#define UFS_PHY_PWM_G3_CLK_DIVIDER PHY_OFF(0x10)
+#define UFS_PHY_PWM_G4_CLK_DIVIDER PHY_OFF(0x14)
+#define UFS_PHY_CORECLK_PWM_G1_CLK_DIVIDER PHY_OFF(0x34)
+#define UFS_PHY_CORECLK_PWM_G2_CLK_DIVIDER PHY_OFF(0x38)
+#define UFS_PHY_CORECLK_PWM_G3_CLK_DIVIDER PHY_OFF(0x3C)
+#define UFS_PHY_CORECLK_PWM_G4_CLK_DIVIDER PHY_OFF(0x40)
+#define UFS_PHY_OMC_STATUS_RDVAL PHY_OFF(0x68)
+#define UFS_PHY_LINE_RESET_TIME PHY_OFF(0x28)
+#define UFS_PHY_LINE_RESET_GRANULARITY PHY_OFF(0x2C)
+#define UFS_PHY_TSYNC_RSYNC_CNTL PHY_OFF(0x48)
+#define UFS_PHY_PLL_CNTL PHY_OFF(0x50)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL PHY_OFF(0x54)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL PHY_OFF(0x5C)
+#define UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL PHY_OFF(0x58)
+#define UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL PHY_OFF(0x60)
+#define UFS_PHY_CFG_CHANGE_CNT_VAL PHY_OFF(0x64)
+#define UFS_PHY_RX_SYNC_WAIT_TIME PHY_OFF(0x6C)
+#define UFS_PHY_TX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xB4)
+#define UFS_PHY_RX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xE0)
+#define UFS_PHY_TX_MIN_STALL_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xB8)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xE4)
+#define UFS_PHY_TX_MIN_SAVE_CONFIG_TIME_CAPABILITY PHY_OFF(0xBC)
+#define UFS_PHY_RX_MIN_SAVE_CONFIG_TIME_CAPABILITY PHY_OFF(0xE8)
+#define UFS_PHY_RX_PWM_BURST_CLOSURE_LENGTH_CAPABILITY PHY_OFF(0xFC)
+#define UFS_PHY_RX_MIN_ACTIVATETIME_CAPABILITY PHY_OFF(0x100)
+#define UFS_PHY_RX_SIGDET_CTRL3 PHY_OFF(0x14c)
+#define UFS_PHY_RMMI_ATTR_CTRL PHY_OFF(0x160)
+#define UFS_PHY_RMMI_RX_CFGUPDT_L1 (1 << 7)
+#define UFS_PHY_RMMI_TX_CFGUPDT_L1 (1 << 6)
+#define UFS_PHY_RMMI_CFGWR_L1 (1 << 5)
+#define UFS_PHY_RMMI_CFGRD_L1 (1 << 4)
+#define UFS_PHY_RMMI_RX_CFGUPDT_L0 (1 << 3)
+#define UFS_PHY_RMMI_TX_CFGUPDT_L0 (1 << 2)
+#define UFS_PHY_RMMI_CFGWR_L0 (1 << 1)
+#define UFS_PHY_RMMI_CFGRD_L0 (1 << 0)
+#define UFS_PHY_RMMI_ATTRID PHY_OFF(0x164)
+#define UFS_PHY_RMMI_ATTRWRVAL PHY_OFF(0x168)
+#define UFS_PHY_RMMI_ATTRRDVAL_L0_STATUS PHY_OFF(0x16C)
+#define UFS_PHY_RMMI_ATTRRDVAL_L1_STATUS PHY_OFF(0x170)
+#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x174)
+
+#define UFS_PHY_TX_LANE_ENABLE_MASK 0x3
+
+/*
+ * This structure represents the 20nm specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_20nm {
+ struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_2_0[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x3f),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x1b),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x0f),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(0), 0x2F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(0), 0x20),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(1), 0x2F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(1), 0x20),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_3_0[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x2b),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x38),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x3c),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_UP_OFFSET, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_DN_OFFSET, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CNTRL, 0x40),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x98),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0x65),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x1e),
+};
+
+#endif
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
new file mode 100644
index 000000000000..44ee983d57fe
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -0,0 +1,745 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-i.h"
+
+#define MAX_PROP_NAME 32
+#define VDDA_PHY_MIN_UV 1000000
+#define VDDA_PHY_MAX_UV 1000000
+#define VDDA_PLL_MIN_UV 1800000
+#define VDDA_PLL_MAX_UV 1800000
+#define VDDP_REF_CLK_MIN_UV 1200000
+#define VDDP_REF_CLK_MAX_UV 1200000
+
+static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
+ const char *, bool);
+static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
+ const char *);
+static int ufs_qcom_phy_base_init(struct platform_device *pdev,
+ struct ufs_qcom_phy *phy_common);
+
+int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+ struct ufs_qcom_phy_calibration *tbl_A,
+ int tbl_size_A,
+ struct ufs_qcom_phy_calibration *tbl_B,
+ int tbl_size_B, bool is_rate_B)
+{
+ int i;
+ int ret = 0;
+
+ if (!tbl_A) {
+ dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
+ ret = EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < tbl_size_A; i++)
+ writel_relaxed(tbl_A[i].cfg_value,
+ ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
+
+ /*
+ * In case we would like to work in rate B, we need
+ * to override a registers that were configured in rate A table
+ * with registers of rate B table.
+ * table.
+ */
+ if (is_rate_B) {
+ if (!tbl_B) {
+ dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
+ __func__);
+ ret = EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < tbl_size_B; i++)
+ writel_relaxed(tbl_B[i].cfg_value,
+ ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
+ }
+
+ /* flush buffered writes */
+ mb();
+
+out:
+ return ret;
+}
+
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+ struct ufs_qcom_phy *common_cfg,
+ struct phy_ops *ufs_qcom_phy_gen_ops,
+ struct ufs_qcom_phy_specific_ops *phy_spec_ops)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = NULL;
+ struct phy_provider *phy_provider;
+
+ err = ufs_qcom_phy_base_init(pdev, common_cfg);
+ if (err) {
+ dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
+ goto out;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ err = PTR_ERR(phy_provider);
+ dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
+ goto out;
+ }
+
+ generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ err = PTR_ERR(generic_phy);
+ dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
+ goto out;
+ }
+
+ common_cfg->phy_spec_ops = phy_spec_ops;
+ common_cfg->dev = dev;
+
+out:
+ return generic_phy;
+}
+
+/*
+ * This assumes the embedded phy structure inside generic_phy is of type
+ * struct ufs_qcom_phy. In order to function properly it's crucial
+ * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
+ * as the first inside generic_phy.
+ */
+struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
+{
+ return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
+}
+
+static
+int ufs_qcom_phy_base_init(struct platform_device *pdev,
+ struct ufs_qcom_phy *phy_common)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int err = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
+ if (!res) {
+ dev_err(dev, "%s: phy_mem resource not found\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ phy_common->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR((void const *)phy_common->mmio)) {
+ err = PTR_ERR((void const *)phy_common->mmio);
+ phy_common->mmio = NULL;
+ dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* "dev_ref_clk_ctrl_mem" is optional resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dev_ref_clk_ctrl_mem");
+ if (!res) {
+ dev_dbg(dev, "%s: dev_ref_clk_ctrl_mem resource not found\n",
+ __func__);
+ goto out;
+ }
+
+ phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) {
+ err = PTR_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio);
+ phy_common->dev_ref_clk_ctrl_mmio = NULL;
+ dev_err(dev, "%s: ioremap for dev_ref_clk_ctrl_mem resource failed %d\n",
+ __func__, err);
+ }
+
+out:
+ return err;
+}
+
+static int __ufs_qcom_phy_clk_get(struct phy *phy,
+ const char *name, struct clk **clk_out, bool err_print)
+{
+ struct clk *clk;
+ int err = 0;
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+ struct device *dev = ufs_qcom_phy->dev;
+
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ if (err_print)
+ dev_err(dev, "failed to get %s err %d", name, err);
+ } else {
+ *clk_out = clk;
+ }
+
+ return err;
+}
+
+static
+int ufs_qcom_phy_clk_get(struct phy *phy,
+ const char *name, struct clk **clk_out)
+{
+ return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
+}
+
+int
+ufs_qcom_phy_init_clks(struct phy *generic_phy,
+ struct ufs_qcom_phy *phy_common)
+{
+ int err;
+
+ err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+ &phy_common->tx_iface_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+ &phy_common->rx_iface_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
+ &phy_common->ref_clk_src);
+ if (err)
+ goto out;
+
+ /*
+ * "ref_clk_parent" is optional hence don't abort init if it's not
+ * found.
+ */
+ __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
+ &phy_common->ref_clk_parent, false);
+
+ err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
+ &phy_common->ref_clk);
+
+out:
+ return err;
+}
+
+int
+ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
+ struct ufs_qcom_phy *phy_common)
+{
+ int err;
+
+ err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
+ "vdda-pll");
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
+ "vdda-phy");
+
+ if (err)
+ goto out;
+
+ /* vddp-ref-clk-* properties are optional */
+ __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
+ "vddp-ref-clk", true);
+out:
+ return err;
+}
+
+static int __ufs_qcom_phy_init_vreg(struct phy *phy,
+ struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
+{
+ int err = 0;
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+ struct device *dev = ufs_qcom_phy->dev;
+
+ char prop_name[MAX_PROP_NAME];
+
+ vreg->name = kstrdup(name, GFP_KERNEL);
+ if (!vreg->name) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ vreg->reg = devm_regulator_get(dev, name);
+ if (IS_ERR(vreg->reg)) {
+ err = PTR_ERR(vreg->reg);
+ vreg->reg = NULL;
+ if (!optional)
+ dev_err(dev, "failed to get %s, %d\n", name, err);
+ goto out;
+ }
+
+ if (dev->of_node) {
+ snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
+ err = of_property_read_u32(dev->of_node,
+ prop_name, &vreg->max_uA);
+ if (err && err != -EINVAL) {
+ dev_err(dev, "%s: failed to read %s\n",
+ __func__, prop_name);
+ goto out;
+ } else if (err == -EINVAL || !vreg->max_uA) {
+ if (regulator_count_voltages(vreg->reg) > 0) {
+ dev_err(dev, "%s: %s is mandatory\n",
+ __func__, prop_name);
+ goto out;
+ }
+ err = 0;
+ }
+ snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
+ if (of_get_property(dev->of_node, prop_name, NULL))
+ vreg->is_always_on = true;
+ else
+ vreg->is_always_on = false;
+ }
+
+ if (!strcmp(name, "vdda-pll")) {
+ vreg->max_uV = VDDA_PLL_MAX_UV;
+ vreg->min_uV = VDDA_PLL_MIN_UV;
+ } else if (!strcmp(name, "vdda-phy")) {
+ vreg->max_uV = VDDA_PHY_MAX_UV;
+ vreg->min_uV = VDDA_PHY_MIN_UV;
+ } else if (!strcmp(name, "vddp-ref-clk")) {
+ vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+ vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+ }
+
+out:
+ if (err)
+ kfree(vreg->name);
+ return err;
+}
+
+static int ufs_qcom_phy_init_vreg(struct phy *phy,
+ struct ufs_qcom_phy_vreg *vreg, const char *name)
+{
+ return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
+}
+
+static
+int ufs_qcom_phy_cfg_vreg(struct phy *phy,
+ struct ufs_qcom_phy_vreg *vreg, bool on)
+{
+ int ret = 0;
+ struct regulator *reg = vreg->reg;
+ const char *name = vreg->name;
+ int min_uV;
+ int uA_load;
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+ struct device *dev = ufs_qcom_phy->dev;
+
+ BUG_ON(!vreg);
+
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, name, ret);
+ goto out;
+ }
+ uA_load = on ? vreg->max_uA : 0;
+ ret = regulator_set_optimum_mode(reg, uA_load);
+ if (ret >= 0) {
+ /*
+ * regulator_set_optimum_mode() returns new regulator
+ * mode upon success.
+ */
+ ret = 0;
+ } else {
+ dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
+ __func__, name, uA_load, ret);
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+static
+int ufs_qcom_phy_enable_vreg(struct phy *phy,
+ struct ufs_qcom_phy_vreg *vreg)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+ struct device *dev = ufs_qcom_phy->dev;
+ int ret = 0;
+
+ if (!vreg || vreg->enabled)
+ goto out;
+
+ ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
+ if (ret) {
+ dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = regulator_enable(vreg->reg);
+ if (ret) {
+ dev_err(dev, "%s: enable failed, err=%d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ vreg->enabled = true;
+out:
+ return ret;
+}
+
+int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
+{
+ int ret = 0;
+ struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+
+ if (phy->is_ref_clk_enabled)
+ goto out;
+
+ /*
+ * reference clock is propagated in a daisy-chained manner from
+ * source to phy, so ungate them at each stage.
+ */
+ ret = clk_prepare_enable(phy->ref_clk_src);
+ if (ret) {
+ dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /*
+ * "ref_clk_parent" is optional clock hence make sure that clk reference
+ * is available before trying to enable the clock.
+ */
+ if (phy->ref_clk_parent) {
+ ret = clk_prepare_enable(phy->ref_clk_parent);
+ if (ret) {
+ dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
+ __func__, ret);
+ goto out_disable_src;
+ }
+ }
+
+ ret = clk_prepare_enable(phy->ref_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
+ __func__, ret);
+ goto out_disable_parent;
+ }
+
+ phy->is_ref_clk_enabled = true;
+ goto out;
+
+out_disable_parent:
+ if (phy->ref_clk_parent)
+ clk_disable_unprepare(phy->ref_clk_parent);
+out_disable_src:
+ clk_disable_unprepare(phy->ref_clk_src);
+out:
+ return ret;
+}
+
+static
+int ufs_qcom_phy_disable_vreg(struct phy *phy,
+ struct ufs_qcom_phy_vreg *vreg)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+ struct device *dev = ufs_qcom_phy->dev;
+ int ret = 0;
+
+ if (!vreg || !vreg->enabled || vreg->is_always_on)
+ goto out;
+
+ ret = regulator_disable(vreg->reg);
+
+ if (!ret) {
+ /* ignore errors on applying disable config */
+ ufs_qcom_phy_cfg_vreg(phy, vreg, false);
+ vreg->enabled = false;
+ } else {
+ dev_err(dev, "%s: %s disable failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+
+ if (phy->is_ref_clk_enabled) {
+ clk_disable_unprepare(phy->ref_clk);
+ /*
+ * "ref_clk_parent" is optional clock hence make sure that clk
+ * reference is available before trying to disable the clock.
+ */
+ if (phy->ref_clk_parent)
+ clk_disable_unprepare(phy->ref_clk_parent);
+ clk_disable_unprepare(phy->ref_clk_src);
+ phy->is_ref_clk_enabled = false;
+ }
+}
+
+#define UFS_REF_CLK_EN (1 << 5)
+
+static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
+{
+ struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+
+ if (phy->dev_ref_clk_ctrl_mmio &&
+ (enable ^ phy->is_dev_ref_clk_enabled)) {
+ u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
+
+ if (enable)
+ temp |= UFS_REF_CLK_EN;
+ else
+ temp &= ~UFS_REF_CLK_EN;
+
+ /*
+ * If we are here to disable this clock immediately after
+ * entering into hibern8, we need to make sure that device
+ * ref_clk is active atleast 1us after the hibern8 enter.
+ */
+ if (!enable)
+ udelay(1);
+
+ writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
+ /* ensure that ref_clk is enabled/disabled before we return */
+ wmb();
+ /*
+ * If we call hibern8 exit after this, we need to make sure that
+ * device ref_clk is stable for atleast 1us before the hibern8
+ * exit command.
+ */
+ if (enable)
+ udelay(1);
+
+ phy->is_dev_ref_clk_enabled = enable;
+ }
+}
+
+void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
+{
+ ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
+}
+
+void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
+{
+ ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
+}
+
+/* Turn ON M-PHY RMMI interface clocks */
+int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+ int ret = 0;
+
+ if (phy->is_iface_clk_enabled)
+ goto out;
+
+ ret = clk_prepare_enable(phy->tx_iface_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
+ __func__, ret);
+ goto out;
+ }
+ ret = clk_prepare_enable(phy->rx_iface_clk);
+ if (ret) {
+ clk_disable_unprepare(phy->tx_iface_clk);
+ dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
+ __func__, ret);
+ goto out;
+ }
+ phy->is_iface_clk_enabled = true;
+
+out:
+ return ret;
+}
+
+/* Turn OFF M-PHY RMMI interface clocks */
+void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+
+ if (phy->is_iface_clk_enabled) {
+ clk_disable_unprepare(phy->tx_iface_clk);
+ clk_disable_unprepare(phy->rx_iface_clk);
+ phy->is_iface_clk_enabled = false;
+ }
+}
+
+int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+ int ret = 0;
+
+ if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
+ dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
+ __func__);
+ ret = -ENOTSUPP;
+ } else {
+ ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
+ }
+
+ return ret;
+}
+
+int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+ int ret = 0;
+
+ if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
+ dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
+ __func__);
+ ret = -ENOTSUPP;
+ } else {
+ ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
+ tx_lanes);
+ }
+
+ return ret;
+}
+
+void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
+ u8 major, u16 minor, u16 step)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+ ufs_qcom_phy->host_ctrl_rev_major = major;
+ ufs_qcom_phy->host_ctrl_rev_minor = minor;
+ ufs_qcom_phy->host_ctrl_rev_step = step;
+}
+
+int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+ int ret = 0;
+
+ if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
+ dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
+ __func__);
+ ret = -ENOTSUPP;
+ } else {
+ ret = ufs_qcom_phy->phy_spec_ops->
+ calibrate_phy(ufs_qcom_phy, is_rate_B);
+ if (ret)
+ dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+int ufs_qcom_phy_remove(struct phy *generic_phy,
+ struct ufs_qcom_phy *ufs_qcom_phy)
+{
+ phy_power_off(generic_phy);
+
+ kfree(ufs_qcom_phy->vdda_pll.name);
+ kfree(ufs_qcom_phy->vdda_phy.name);
+
+ return 0;
+}
+
+int ufs_qcom_phy_exit(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+ if (ufs_qcom_phy->is_powered_on)
+ phy_power_off(generic_phy);
+
+ return 0;
+}
+
+int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+ if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
+ dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
+ __func__);
+ return -ENOTSUPP;
+ }
+
+ return ufs_qcom_phy->phy_spec_ops->
+ is_physical_coding_sublayer_ready(ufs_qcom_phy);
+}
+
+int ufs_qcom_phy_power_on(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ struct device *dev = phy_common->dev;
+ int err;
+
+ err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
+ if (err) {
+ dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
+ __func__, err);
+ goto out;
+ }
+
+ phy_common->phy_spec_ops->power_control(phy_common, true);
+
+ /* vdda_pll also enables ref clock LDOs so enable it first */
+ err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
+ if (err) {
+ dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
+ __func__, err);
+ goto out_disable_phy;
+ }
+
+ err = ufs_qcom_phy_enable_ref_clk(generic_phy);
+ if (err) {
+ dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ __func__, err);
+ goto out_disable_pll;
+ }
+
+ /* enable device PHY ref_clk pad rail */
+ if (phy_common->vddp_ref_clk.reg) {
+ err = ufs_qcom_phy_enable_vreg(generic_phy,
+ &phy_common->vddp_ref_clk);
+ if (err) {
+ dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
+ __func__, err);
+ goto out_disable_ref_clk;
+ }
+ }
+
+ phy_common->is_powered_on = true;
+ goto out;
+
+out_disable_ref_clk:
+ ufs_qcom_phy_disable_ref_clk(generic_phy);
+out_disable_pll:
+ ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+out_disable_phy:
+ ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+out:
+ return err;
+}
+
+int ufs_qcom_phy_power_off(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+
+ phy_common->phy_spec_ops->power_control(phy_common, false);
+
+ if (phy_common->vddp_ref_clk.reg)
+ ufs_qcom_phy_disable_vreg(generic_phy,
+ &phy_common->vddp_ref_clk);
+ ufs_qcom_phy_disable_ref_clk(generic_phy);
+
+ ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ phy_common->is_powered_on = false;
+
+ return 0;
+}
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
new file mode 100644
index 000000000000..22011c3b6a4b
--- /dev/null
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -0,0 +1,158 @@
+/*
+ * Rockchip usb PHY driver
+ *
+ * Copyright (C) 2014 Yunzhi Li <lyz@rock-chips.com>
+ * Copyright (C) 2014 ROCKCHIP, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+/*
+ * The higher 16-bit of this register is used for write protection
+ * only if BIT(13 + 16) set to 1 the BIT(13) can be written.
+ */
+#define SIDDQ_WRITE_ENA BIT(29)
+#define SIDDQ_ON BIT(13)
+#define SIDDQ_OFF (0 << 13)
+
+struct rockchip_usb_phy {
+ unsigned int reg_offset;
+ struct regmap *reg_base;
+ struct clk *clk;
+ struct phy *phy;
+};
+
+static int rockchip_usb_phy_power(struct rockchip_usb_phy *phy,
+ bool siddq)
+{
+ return regmap_write(phy->reg_base, phy->reg_offset,
+ SIDDQ_WRITE_ENA | (siddq ? SIDDQ_ON : SIDDQ_OFF));
+}
+
+static int rockchip_usb_phy_power_off(struct phy *_phy)
+{
+ struct rockchip_usb_phy *phy = phy_get_drvdata(_phy);
+ int ret = 0;
+
+ /* Power down usb phy analog blocks by set siddq 1 */
+ ret = rockchip_usb_phy_power(phy, 1);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(phy->clk);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rockchip_usb_phy_power_on(struct phy *_phy)
+{
+ struct rockchip_usb_phy *phy = phy_get_drvdata(_phy);
+ int ret = 0;
+
+ ret = clk_prepare_enable(phy->clk);
+ if (ret)
+ return ret;
+
+ /* Power up usb phy analog blocks by set siddq 0 */
+ ret = rockchip_usb_phy_power(phy, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct phy_ops ops = {
+ .power_on = rockchip_usb_phy_power_on,
+ .power_off = rockchip_usb_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int rockchip_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_usb_phy *rk_phy;
+ struct phy_provider *phy_provider;
+ struct device_node *child;
+ struct regmap *grf;
+ unsigned int reg_offset;
+
+ grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(grf)) {
+ dev_err(&pdev->dev, "Missing rockchip,grf property\n");
+ return PTR_ERR(grf);
+ }
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
+ if (!rk_phy)
+ return -ENOMEM;
+
+ if (of_property_read_u32(child, "reg", &reg_offset)) {
+ dev_err(dev, "missing reg property in node %s\n",
+ child->name);
+ return -EINVAL;
+ }
+
+ rk_phy->reg_offset = reg_offset;
+ rk_phy->reg_base = grf;
+
+ rk_phy->clk = of_clk_get_by_name(child, "phyclk");
+ if (IS_ERR(rk_phy->clk))
+ rk_phy->clk = NULL;
+
+ rk_phy->phy = devm_phy_create(dev, child, &ops);
+ if (IS_ERR(rk_phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(rk_phy->phy);
+ }
+ phy_set_drvdata(rk_phy->phy, rk_phy);
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id rockchip_usb_phy_dt_ids[] = {
+ { .compatible = "rockchip,rk3288-usb-phy" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_usb_phy_dt_ids);
+
+static struct platform_driver rockchip_usb_driver = {
+ .probe = rockchip_usb_phy_probe,
+ .driver = {
+ .name = "rockchip-usb-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = rockchip_usb_phy_dt_ids,
+ },
+};
+
+module_platform_driver(rockchip_usb_driver);
+
+MODULE_AUTHOR("Yunzhi Li <lyz@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip USB 2.0 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-stih407-usb.c b/drivers/phy/phy-stih407-usb.c
index 74f0fab3cd8a..1d5ae5f8ef69 100644
--- a/drivers/phy/phy-stih407-usb.c
+++ b/drivers/phy/phy-stih407-usb.c
@@ -22,6 +22,9 @@
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
+#define PHYPARAM_REG 1
+#define PHYCTRL_REG 2
+
/* Default PHY_SEL and REFCLKSEL configuration */
#define STIH407_USB_PICOPHY_CTRL_PORT_CONF 0x6
#define STIH407_USB_PICOPHY_CTRL_PORT_MASK 0x1f
@@ -93,7 +96,7 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
struct phy *phy;
- struct resource *res;
+ int ret;
phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
if (!phy_dev)
@@ -123,19 +126,19 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
return PTR_ERR(phy_dev->regmap);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
- if (!res) {
- dev_err(dev, "No ctrl reg found\n");
- return -ENXIO;
+ ret = of_property_read_u32_index(np, "st,syscfg", PHYPARAM_REG,
+ &phy_dev->param);
+ if (ret) {
+ dev_err(dev, "can't get phyparam offset (%d)\n", ret);
+ return ret;
}
- phy_dev->ctrl = res->start;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "param");
- if (!res) {
- dev_err(dev, "No param reg found\n");
- return -ENXIO;
+ ret = of_property_read_u32_index(np, "st,syscfg", PHYCTRL_REG,
+ &phy_dev->ctrl);
+ if (ret) {
+ dev_err(dev, "can't get phyctrl offset (%d)\n", ret);
+ return ret;
}
- phy_dev->param = res->start;
phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data);
if (IS_ERR(phy)) {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index fb02a67c9181..a2b08f3ccb03 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -244,7 +244,8 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
else
data->num_phys = 3;
- if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy"))
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") ||
+ of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
data->disc_thresh = 3;
else
data->disc_thresh = 2;
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 1387b4d4afe3..95c88f929f27 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
+#include <linux/spinlock.h>
#define PLL_STATUS 0x00000004
#define PLL_GO 0x00000008
@@ -82,7 +83,10 @@ struct ti_pipe3 {
struct clk *refclk;
struct clk *div_clk;
struct pipe3_dpll_map *dpll_map;
- u8 id;
+ bool enabled;
+ spinlock_t lock; /* serialize clock enable/disable */
+ /* the below flag is needed specifically for SATA */
+ bool refclk_enabled;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -217,8 +221,13 @@ static int ti_pipe3_init(struct phy *x)
u32 val;
int ret = 0;
+ /*
+ * Set pcie_pcs register to 0x96 for proper functioning of phy
+ * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
+ * 18-1804.
+ */
if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
- omap_control_pcie_pcs(phy->control_dev, phy->id, 0xF1);
+ omap_control_pcie_pcs(phy->control_dev, 0x96);
return 0;
}
@@ -303,6 +312,7 @@ static int ti_pipe3_probe(struct platform_device *pdev)
return -ENOMEM;
phy->dev = &pdev->dev;
+ spin_lock_init(&phy->lock);
if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
match = of_match_device(of_match_ptr(ti_pipe3_id_table),
@@ -329,26 +339,27 @@ static int ti_pipe3_probe(struct platform_device *pdev)
}
}
+ phy->refclk = devm_clk_get(phy->dev, "refclk");
+ if (IS_ERR(phy->refclk)) {
+ dev_err(&pdev->dev, "unable to get refclk\n");
+ /* older DTBs have missing refclk in SATA PHY
+ * so don't bail out in case of SATA PHY.
+ */
+ if (!of_device_is_compatible(node, "ti,phy-pipe3-sata"))
+ return PTR_ERR(phy->refclk);
+ }
+
if (!of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get wkupclk\n");
return PTR_ERR(phy->wkupclk);
}
-
- phy->refclk = devm_clk_get(phy->dev, "refclk");
- if (IS_ERR(phy->refclk)) {
- dev_err(&pdev->dev, "unable to get refclk\n");
- return PTR_ERR(phy->refclk);
- }
} else {
phy->wkupclk = ERR_PTR(-ENODEV);
- phy->refclk = ERR_PTR(-ENODEV);
}
if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
- if (of_property_read_u8(node, "id", &phy->id) < 0)
- phy->id = 1;
clk = devm_clk_get(phy->dev, "dpll_ref");
if (IS_ERR(clk)) {
@@ -424,33 +435,42 @@ static int ti_pipe3_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-
-static int ti_pipe3_runtime_suspend(struct device *dev)
+static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
+ if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) {
+ int ret;
- if (!IS_ERR(phy->wkupclk))
- clk_disable_unprepare(phy->wkupclk);
+ ret = clk_prepare_enable(phy->refclk);
+ if (ret) {
+ dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
+ return ret;
+ }
+ phy->refclk_enabled = true;
+ }
+
+ return 0;
+}
+
+static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
+{
if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
- if (!IS_ERR(phy->div_clk))
- clk_disable_unprepare(phy->div_clk);
- return 0;
+ phy->refclk_enabled = false;
}
-static int ti_pipe3_runtime_resume(struct device *dev)
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
{
- u32 ret = 0;
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
+ int ret = 0;
+ unsigned long flags;
- if (!IS_ERR(phy->refclk)) {
- ret = clk_prepare_enable(phy->refclk);
- if (ret) {
- dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
- goto err1;
- }
- }
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->enabled)
+ goto err1;
+
+ ret = ti_pipe3_enable_refclk(phy);
+ if (ret)
+ goto err1;
if (!IS_ERR(phy->wkupclk)) {
ret = clk_prepare_enable(phy->wkupclk);
@@ -467,6 +487,9 @@ static int ti_pipe3_runtime_resume(struct device *dev)
goto err3;
}
}
+
+ phy->enabled = true;
+ spin_unlock_irqrestore(&phy->lock, flags);
return 0;
err3:
@@ -477,20 +500,80 @@ err2:
if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
+ ti_pipe3_disable_refclk(phy);
err1:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return ret;
+}
+
+static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!phy->enabled) {
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return;
+ }
+
+ if (!IS_ERR(phy->wkupclk))
+ clk_disable_unprepare(phy->wkupclk);
+ /* Don't disable refclk for SATA PHY due to Errata i783 */
+ if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
+ ti_pipe3_disable_refclk(phy);
+ if (!IS_ERR(phy->div_clk))
+ clk_disable_unprepare(phy->div_clk);
+ phy->enabled = false;
+ spin_unlock_irqrestore(&phy->lock, flags);
+}
+
+static int ti_pipe3_runtime_suspend(struct device *dev)
+{
+ struct ti_pipe3 *phy = dev_get_drvdata(dev);
+
+ ti_pipe3_disable_clocks(phy);
+ return 0;
+}
+
+static int ti_pipe3_runtime_resume(struct device *dev)
+{
+ struct ti_pipe3 *phy = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ti_pipe3_enable_clocks(phy);
return ret;
}
+static int ti_pipe3_suspend(struct device *dev)
+{
+ struct ti_pipe3 *phy = dev_get_drvdata(dev);
+
+ ti_pipe3_disable_clocks(phy);
+ return 0;
+}
+
+static int ti_pipe3_resume(struct device *dev)
+{
+ struct ti_pipe3 *phy = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ti_pipe3_enable_clocks(phy);
+ if (ret)
+ return ret;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops ti_pipe3_pm_ops = {
SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
ti_pipe3_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
};
-#define DEV_PM_OPS (&ti_pipe3_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif
-
#ifdef CONFIG_OF
static const struct of_device_id ti_pipe3_id_table[] = {
{
@@ -518,7 +601,7 @@ static struct platform_driver ti_pipe3_driver = {
.remove = ti_pipe3_remove,
.driver = {
.name = "ti-pipe3",
- .pm = DEV_PM_OPS,
+ .pm = &ti_pipe3_pm_ops,
.of_match_table = of_match_ptr(ti_pipe3_id_table),
},
};
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index d014f22f387a..ee9f44ad7f02 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -96,6 +96,14 @@ config PINCTRL_FALCON
depends on SOC_FALCON
depends on PINCTRL_LANTIQ
+config PINCTRL_MESON
+ bool
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select OF_GPIO
+ select REGMAP_MMIO
+
config PINCTRL_ROCKCHIP
bool
select PINMUX
@@ -113,7 +121,7 @@ config PINCTRL_SINGLE
This selects the device tree based generic pinctrl driver.
config PINCTRL_SIRF
- bool "CSR SiRFprimaII/SiRFmarco pin controller driver"
+ bool "CSR SiRFprimaII pin controller driver"
depends on ARCH_SIRF
select PINMUX
select GPIOLIB_IRQCHIP
@@ -191,6 +199,14 @@ config PINCTRL_PALMAS
open drain configuration for the Palmas series devices like
TPS65913, TPS80036 etc.
+config PINCTRL_ZYNQ
+ bool "Pinctrl driver for Xilinx Zynq"
+ depends on ARCH_ZYNQ
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This selectes the pinctrl driver for Xilinx Zynq.
+
source "drivers/pinctrl/berlin/Kconfig"
source "drivers/pinctrl/freescale/Kconfig"
source "drivers/pinctrl/intel/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index c030b3db8034..0475206dd600 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -1,6 +1,6 @@
# generic pinmux support
-ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
+subdir-ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
obj-$(CONFIG_PINCTRL) += core.o pinctrl-utils.o
obj-$(CONFIG_PINMUX) += pinmux.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
+obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
+obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-y += freescale/
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e4f65510c87e..89dca77ca038 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
if (pctldev == NULL)
return;
- mutex_lock(&pinctrldev_list_mutex);
mutex_lock(&pctldev->mutex);
-
pinctrl_remove_device_debugfs(pctldev);
+ mutex_unlock(&pctldev->mutex);
if (!IS_ERR(pctldev->p))
pinctrl_put(pctldev->p);
+ mutex_lock(&pinctrldev_list_mutex);
+ mutex_lock(&pctldev->mutex);
/* TODO: check that no pinmuxes are still active? */
list_del(&pctldev->node);
/* Destroy descriptor tree */
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 52f2b9404fe0..448f10986c28 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -437,7 +437,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
unsigned long config;
- if (!pin_reg || !pin_reg->conf_reg) {
+ if (!pin_reg || pin_reg->conf_reg == -1) {
seq_printf(s, "N/A");
return;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index 8d1013a040c9..faf635654312 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -27,150 +27,148 @@
enum imx25_pads {
MX25_PAD_RESERVE0 = 1,
- MX25_PAD_RESERVE1 = 2,
- MX25_PAD_A10 = 3,
- MX25_PAD_A13 = 4,
- MX25_PAD_A14 = 5,
- MX25_PAD_A15 = 6,
- MX25_PAD_A16 = 7,
- MX25_PAD_A17 = 8,
- MX25_PAD_A18 = 9,
- MX25_PAD_A19 = 10,
- MX25_PAD_A20 = 11,
- MX25_PAD_A21 = 12,
- MX25_PAD_A22 = 13,
- MX25_PAD_A23 = 14,
- MX25_PAD_A24 = 15,
- MX25_PAD_A25 = 16,
- MX25_PAD_EB0 = 17,
- MX25_PAD_EB1 = 18,
- MX25_PAD_OE = 19,
- MX25_PAD_CS0 = 20,
- MX25_PAD_CS1 = 21,
- MX25_PAD_CS4 = 22,
- MX25_PAD_CS5 = 23,
- MX25_PAD_NF_CE0 = 24,
- MX25_PAD_ECB = 25,
- MX25_PAD_LBA = 26,
- MX25_PAD_BCLK = 27,
- MX25_PAD_RW = 28,
- MX25_PAD_NFWE_B = 29,
- MX25_PAD_NFRE_B = 30,
- MX25_PAD_NFALE = 31,
- MX25_PAD_NFCLE = 32,
- MX25_PAD_NFWP_B = 33,
- MX25_PAD_NFRB = 34,
- MX25_PAD_D15 = 35,
- MX25_PAD_D14 = 36,
- MX25_PAD_D13 = 37,
- MX25_PAD_D12 = 38,
- MX25_PAD_D11 = 39,
- MX25_PAD_D10 = 40,
- MX25_PAD_D9 = 41,
- MX25_PAD_D8 = 42,
- MX25_PAD_D7 = 43,
- MX25_PAD_D6 = 44,
- MX25_PAD_D5 = 45,
- MX25_PAD_D4 = 46,
- MX25_PAD_D3 = 47,
- MX25_PAD_D2 = 48,
- MX25_PAD_D1 = 49,
- MX25_PAD_D0 = 50,
- MX25_PAD_LD0 = 51,
- MX25_PAD_LD1 = 52,
- MX25_PAD_LD2 = 53,
- MX25_PAD_LD3 = 54,
- MX25_PAD_LD4 = 55,
- MX25_PAD_LD5 = 56,
- MX25_PAD_LD6 = 57,
- MX25_PAD_LD7 = 58,
- MX25_PAD_LD8 = 59,
- MX25_PAD_LD9 = 60,
- MX25_PAD_LD10 = 61,
- MX25_PAD_LD11 = 62,
- MX25_PAD_LD12 = 63,
- MX25_PAD_LD13 = 64,
- MX25_PAD_LD14 = 65,
- MX25_PAD_LD15 = 66,
- MX25_PAD_HSYNC = 67,
- MX25_PAD_VSYNC = 68,
- MX25_PAD_LSCLK = 69,
- MX25_PAD_OE_ACD = 70,
- MX25_PAD_CONTRAST = 71,
- MX25_PAD_PWM = 72,
- MX25_PAD_CSI_D2 = 73,
- MX25_PAD_CSI_D3 = 74,
- MX25_PAD_CSI_D4 = 75,
- MX25_PAD_CSI_D5 = 76,
- MX25_PAD_CSI_D6 = 77,
- MX25_PAD_CSI_D7 = 78,
- MX25_PAD_CSI_D8 = 79,
- MX25_PAD_CSI_D9 = 80,
- MX25_PAD_CSI_MCLK = 81,
- MX25_PAD_CSI_VSYNC = 82,
- MX25_PAD_CSI_HSYNC = 83,
- MX25_PAD_CSI_PIXCLK = 84,
- MX25_PAD_I2C1_CLK = 85,
- MX25_PAD_I2C1_DAT = 86,
- MX25_PAD_CSPI1_MOSI = 87,
- MX25_PAD_CSPI1_MISO = 88,
- MX25_PAD_CSPI1_SS0 = 89,
- MX25_PAD_CSPI1_SS1 = 90,
- MX25_PAD_CSPI1_SCLK = 91,
- MX25_PAD_CSPI1_RDY = 92,
- MX25_PAD_UART1_RXD = 93,
- MX25_PAD_UART1_TXD = 94,
- MX25_PAD_UART1_RTS = 95,
- MX25_PAD_UART1_CTS = 96,
- MX25_PAD_UART2_RXD = 97,
- MX25_PAD_UART2_TXD = 98,
- MX25_PAD_UART2_RTS = 99,
- MX25_PAD_UART2_CTS = 100,
- MX25_PAD_SD1_CMD = 101,
- MX25_PAD_SD1_CLK = 102,
- MX25_PAD_SD1_DATA0 = 103,
- MX25_PAD_SD1_DATA1 = 104,
- MX25_PAD_SD1_DATA2 = 105,
- MX25_PAD_SD1_DATA3 = 106,
- MX25_PAD_KPP_ROW0 = 107,
- MX25_PAD_KPP_ROW1 = 108,
- MX25_PAD_KPP_ROW2 = 109,
- MX25_PAD_KPP_ROW3 = 110,
- MX25_PAD_KPP_COL0 = 111,
- MX25_PAD_KPP_COL1 = 112,
- MX25_PAD_KPP_COL2 = 113,
- MX25_PAD_KPP_COL3 = 114,
- MX25_PAD_FEC_MDC = 115,
- MX25_PAD_FEC_MDIO = 116,
- MX25_PAD_FEC_TDATA0 = 117,
- MX25_PAD_FEC_TDATA1 = 118,
- MX25_PAD_FEC_TX_EN = 119,
- MX25_PAD_FEC_RDATA0 = 120,
- MX25_PAD_FEC_RDATA1 = 121,
- MX25_PAD_FEC_RX_DV = 122,
- MX25_PAD_FEC_TX_CLK = 123,
- MX25_PAD_RTCK = 124,
- MX25_PAD_DE_B = 125,
- MX25_PAD_GPIO_A = 126,
- MX25_PAD_GPIO_B = 127,
- MX25_PAD_GPIO_C = 128,
- MX25_PAD_GPIO_D = 129,
- MX25_PAD_GPIO_E = 130,
- MX25_PAD_GPIO_F = 131,
- MX25_PAD_EXT_ARMCLK = 132,
- MX25_PAD_UPLL_BYPCLK = 133,
- MX25_PAD_VSTBY_REQ = 134,
- MX25_PAD_VSTBY_ACK = 135,
- MX25_PAD_POWER_FAIL = 136,
- MX25_PAD_CLKO = 137,
- MX25_PAD_BOOT_MODE0 = 138,
- MX25_PAD_BOOT_MODE1 = 139,
+ MX25_PAD_A10 = 2,
+ MX25_PAD_A13 = 3,
+ MX25_PAD_A14 = 4,
+ MX25_PAD_A15 = 5,
+ MX25_PAD_A16 = 6,
+ MX25_PAD_A17 = 7,
+ MX25_PAD_A18 = 8,
+ MX25_PAD_A19 = 9,
+ MX25_PAD_A20 = 10,
+ MX25_PAD_A21 = 11,
+ MX25_PAD_A22 = 12,
+ MX25_PAD_A23 = 13,
+ MX25_PAD_A24 = 14,
+ MX25_PAD_A25 = 15,
+ MX25_PAD_EB0 = 16,
+ MX25_PAD_EB1 = 17,
+ MX25_PAD_OE = 18,
+ MX25_PAD_CS0 = 19,
+ MX25_PAD_CS1 = 20,
+ MX25_PAD_CS4 = 21,
+ MX25_PAD_CS5 = 22,
+ MX25_PAD_NF_CE0 = 23,
+ MX25_PAD_ECB = 24,
+ MX25_PAD_LBA = 25,
+ MX25_PAD_BCLK = 26,
+ MX25_PAD_RW = 27,
+ MX25_PAD_NFWE_B = 28,
+ MX25_PAD_NFRE_B = 29,
+ MX25_PAD_NFALE = 30,
+ MX25_PAD_NFCLE = 31,
+ MX25_PAD_NFWP_B = 32,
+ MX25_PAD_NFRB = 33,
+ MX25_PAD_D15 = 34,
+ MX25_PAD_D14 = 35,
+ MX25_PAD_D13 = 36,
+ MX25_PAD_D12 = 37,
+ MX25_PAD_D11 = 38,
+ MX25_PAD_D10 = 39,
+ MX25_PAD_D9 = 40,
+ MX25_PAD_D8 = 41,
+ MX25_PAD_D7 = 42,
+ MX25_PAD_D6 = 43,
+ MX25_PAD_D5 = 44,
+ MX25_PAD_D4 = 45,
+ MX25_PAD_D3 = 46,
+ MX25_PAD_D2 = 47,
+ MX25_PAD_D1 = 48,
+ MX25_PAD_D0 = 49,
+ MX25_PAD_LD0 = 50,
+ MX25_PAD_LD1 = 51,
+ MX25_PAD_LD2 = 52,
+ MX25_PAD_LD3 = 53,
+ MX25_PAD_LD4 = 54,
+ MX25_PAD_LD5 = 55,
+ MX25_PAD_LD6 = 56,
+ MX25_PAD_LD7 = 57,
+ MX25_PAD_LD8 = 58,
+ MX25_PAD_LD9 = 59,
+ MX25_PAD_LD10 = 60,
+ MX25_PAD_LD11 = 61,
+ MX25_PAD_LD12 = 62,
+ MX25_PAD_LD13 = 63,
+ MX25_PAD_LD14 = 64,
+ MX25_PAD_LD15 = 65,
+ MX25_PAD_HSYNC = 66,
+ MX25_PAD_VSYNC = 67,
+ MX25_PAD_LSCLK = 68,
+ MX25_PAD_OE_ACD = 69,
+ MX25_PAD_CONTRAST = 70,
+ MX25_PAD_PWM = 71,
+ MX25_PAD_CSI_D2 = 72,
+ MX25_PAD_CSI_D3 = 73,
+ MX25_PAD_CSI_D4 = 74,
+ MX25_PAD_CSI_D5 = 75,
+ MX25_PAD_CSI_D6 = 76,
+ MX25_PAD_CSI_D7 = 77,
+ MX25_PAD_CSI_D8 = 78,
+ MX25_PAD_CSI_D9 = 79,
+ MX25_PAD_CSI_MCLK = 80,
+ MX25_PAD_CSI_VSYNC = 81,
+ MX25_PAD_CSI_HSYNC = 82,
+ MX25_PAD_CSI_PIXCLK = 83,
+ MX25_PAD_I2C1_CLK = 84,
+ MX25_PAD_I2C1_DAT = 85,
+ MX25_PAD_CSPI1_MOSI = 86,
+ MX25_PAD_CSPI1_MISO = 87,
+ MX25_PAD_CSPI1_SS0 = 88,
+ MX25_PAD_CSPI1_SS1 = 89,
+ MX25_PAD_CSPI1_SCLK = 90,
+ MX25_PAD_CSPI1_RDY = 91,
+ MX25_PAD_UART1_RXD = 92,
+ MX25_PAD_UART1_TXD = 93,
+ MX25_PAD_UART1_RTS = 94,
+ MX25_PAD_UART1_CTS = 95,
+ MX25_PAD_UART2_RXD = 96,
+ MX25_PAD_UART2_TXD = 97,
+ MX25_PAD_UART2_RTS = 98,
+ MX25_PAD_UART2_CTS = 99,
+ MX25_PAD_SD1_CMD = 100,
+ MX25_PAD_SD1_CLK = 101,
+ MX25_PAD_SD1_DATA0 = 102,
+ MX25_PAD_SD1_DATA1 = 103,
+ MX25_PAD_SD1_DATA2 = 104,
+ MX25_PAD_SD1_DATA3 = 105,
+ MX25_PAD_KPP_ROW0 = 106,
+ MX25_PAD_KPP_ROW1 = 107,
+ MX25_PAD_KPP_ROW2 = 108,
+ MX25_PAD_KPP_ROW3 = 109,
+ MX25_PAD_KPP_COL0 = 110,
+ MX25_PAD_KPP_COL1 = 111,
+ MX25_PAD_KPP_COL2 = 112,
+ MX25_PAD_KPP_COL3 = 113,
+ MX25_PAD_FEC_MDC = 114,
+ MX25_PAD_FEC_MDIO = 115,
+ MX25_PAD_FEC_TDATA0 = 116,
+ MX25_PAD_FEC_TDATA1 = 117,
+ MX25_PAD_FEC_TX_EN = 118,
+ MX25_PAD_FEC_RDATA0 = 119,
+ MX25_PAD_FEC_RDATA1 = 120,
+ MX25_PAD_FEC_RX_DV = 121,
+ MX25_PAD_FEC_TX_CLK = 122,
+ MX25_PAD_RTCK = 123,
+ MX25_PAD_DE_B = 124,
+ MX25_PAD_GPIO_A = 125,
+ MX25_PAD_GPIO_B = 126,
+ MX25_PAD_GPIO_C = 127,
+ MX25_PAD_GPIO_D = 128,
+ MX25_PAD_GPIO_E = 129,
+ MX25_PAD_GPIO_F = 130,
+ MX25_PAD_EXT_ARMCLK = 131,
+ MX25_PAD_UPLL_BYPCLK = 132,
+ MX25_PAD_VSTBY_REQ = 133,
+ MX25_PAD_VSTBY_ACK = 134,
+ MX25_PAD_POWER_FAIL = 135,
+ MX25_PAD_CLKO = 136,
+ MX25_PAD_BOOT_MODE0 = 137,
+ MX25_PAD_BOOT_MODE1 = 138,
};
/* Pad names for the pinmux subsystem */
static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
- IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
IMX_PINCTRL_PIN(MX25_PAD_A10),
IMX_PINCTRL_PIN(MX25_PAD_A13),
IMX_PINCTRL_PIN(MX25_PAD_A14),
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index e9f8b39d1a9f..3034fd03bced 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -148,6 +148,11 @@ struct chv_community {
size_t ngpios;
};
+struct chv_pin_context {
+ u32 padctrl0;
+ u32 padctrl1;
+};
+
/**
* struct chv_pinctrl - CHV pinctrl private structure
* @dev: Pointer to the parent device
@@ -172,6 +177,8 @@ struct chv_pinctrl {
spinlock_t lock;
unsigned intr_lines[16];
const struct chv_community *community;
+ u32 saved_intmask;
+ struct chv_pin_context *saved_pin_context;
};
#define gpiochip_to_pinctrl(c) container_of(c, struct chv_pinctrl, chip)
@@ -873,9 +880,22 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
value &= ~CHV_PADCTRL1_INVRXTX_MASK;
chv_writel(value, reg);
- /* Switch to a GPIO mode */
reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
- value = readl(reg) | CHV_PADCTRL0_GPIOEN;
+ value = readl(reg);
+
+ /*
+ * If the pin is in HiZ mode (both TX and RX buffers are
+ * disabled) we turn it to be input now.
+ */
+ if ((value & CHV_PADCTRL0_GPIOCFG_MASK) ==
+ (CHV_PADCTRL0_GPIOCFG_HIZ << CHV_PADCTRL0_GPIOCFG_SHIFT)) {
+ value &= ~CHV_PADCTRL0_GPIOCFG_MASK;
+ value |= CHV_PADCTRL0_GPIOCFG_GPI <<
+ CHV_PADCTRL0_GPIOCFG_SHIFT;
+ }
+
+ /* Switch to a GPIO mode */
+ value |= CHV_PADCTRL0_GPIOEN;
chv_writel(value, reg);
}
@@ -1443,6 +1463,14 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
spin_lock_init(&pctrl->lock);
pctrl->dev = &pdev->dev;
+#ifdef CONFIG_PM_SLEEP
+ pctrl->saved_pin_context = devm_kcalloc(pctrl->dev,
+ pctrl->community->npins, sizeof(*pctrl->saved_pin_context),
+ GFP_KERNEL);
+ if (!pctrl->saved_pin_context)
+ return -ENOMEM;
+#endif
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pctrl->regs))
@@ -1486,6 +1514,94 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int chv_pinctrl_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ int i;
+
+ pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK);
+
+ for (i = 0; i < pctrl->community->npins; i++) {
+ const struct pinctrl_pin_desc *desc;
+ struct chv_pin_context *ctx;
+ void __iomem *reg;
+
+ desc = &pctrl->community->pins[i];
+ if (chv_pad_locked(pctrl, desc->number))
+ continue;
+
+ ctx = &pctrl->saved_pin_context[i];
+
+ reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL0);
+ ctx->padctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIORXSTATE;
+
+ reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL1);
+ ctx->padctrl1 = readl(reg);
+ }
+
+ return 0;
+}
+
+static int chv_pinctrl_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ int i;
+
+ /*
+ * Mask all interrupts before restoring per-pin configuration
+ * registers because we don't know in which state BIOS left them
+ * upon exiting suspend.
+ */
+ chv_writel(0, pctrl->regs + CHV_INTMASK);
+
+ for (i = 0; i < pctrl->community->npins; i++) {
+ const struct pinctrl_pin_desc *desc;
+ const struct chv_pin_context *ctx;
+ void __iomem *reg;
+ u32 val;
+
+ desc = &pctrl->community->pins[i];
+ if (chv_pad_locked(pctrl, desc->number))
+ continue;
+
+ ctx = &pctrl->saved_pin_context[i];
+
+ /* Only restore if our saved state differs from the current */
+ reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL0);
+ val = readl(reg) & ~CHV_PADCTRL0_GPIORXSTATE;
+ if (ctx->padctrl0 != val) {
+ chv_writel(ctx->padctrl0, reg);
+ dev_dbg(pctrl->dev, "restored pin %2u ctrl0 0x%08x\n",
+ desc->number, readl(reg));
+ }
+
+ reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL1);
+ val = readl(reg);
+ if (ctx->padctrl1 != val) {
+ chv_writel(ctx->padctrl1, reg);
+ dev_dbg(pctrl->dev, "restored pin %2u ctrl1 0x%08x\n",
+ desc->number, readl(reg));
+ }
+ }
+
+ /*
+ * Now that all pins are restored to known state, we can restore
+ * the interrupt mask register as well.
+ */
+ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
+ chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops chv_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume)
+};
+
static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
{ "INT33FF" },
{ }
@@ -1497,7 +1613,7 @@ static struct platform_driver chv_pinctrl_driver = {
.remove = chv_pinctrl_remove,
.driver = {
.name = "cherryview-pinctrl",
- .owner = THIS_MODULE,
+ .pm = &chv_pinctrl_pm_ops,
.acpi_match_table = chv_pinctrl_acpi_match,
},
};
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
new file mode 100644
index 000000000000..eafc216067a4
--- /dev/null
+++ b/drivers/pinctrl/meson/Makefile
@@ -0,0 +1,2 @@
+obj-y += pinctrl-meson8.o
+obj-y += pinctrl-meson.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
new file mode 100644
index 000000000000..a2bf49ce16e7
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -0,0 +1,761 @@
+/*
+ * Pin controller and GPIO driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * The available pins are organized in banks (A,B,C,D,E,X,Y,Z,AO,
+ * BOOT,CARD for meson6 and X,Y,DV,H,Z,AO,BOOT,CARD for meson8) and
+ * each bank has a variable number of pins.
+ *
+ * The AO bank is special because it belongs to the Always-On power
+ * domain which can't be powered off; the bank also uses a set of
+ * registers different from the other banks.
+ *
+ * For each of the two power domains (regular and always-on) there are
+ * 4 different register ranges that control the following properties
+ * of the pins:
+ * 1) pin muxing
+ * 2) pull enable/disable
+ * 3) pull up/down
+ * 4) GPIO direction, output value, input value
+ *
+ * In some cases the register ranges for pull enable and pull
+ * direction are the same and thus there are only 3 register ranges.
+ *
+ * Every pinmux group can be enabled by a specific bit in the first
+ * register range of the domain; when all groups for a given pin are
+ * disabled the pin acts as a GPIO.
+ *
+ * For the pull and GPIO configuration every bank uses a contiguous
+ * set of bits in the register sets described above; the same register
+ * can be shared by more banks with different offsets.
+ *
+ * In addition to this there are some registers shared between all
+ * banks that control the IRQ functionality. This feature is not
+ * supported at the moment by the driver.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-meson.h"
+
+/**
+ * meson_get_bank() - find the bank containing a given pin
+ *
+ * @domain: the domain containing the pin
+ * @pin: the pin number
+ * @bank: the found bank
+ *
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_get_bank(struct meson_domain *domain, unsigned int pin,
+ struct meson_bank **bank)
+{
+ int i;
+
+ for (i = 0; i < domain->data->num_banks; i++) {
+ if (pin >= domain->data->banks[i].first &&
+ pin <= domain->data->banks[i].last) {
+ *bank = &domain->data->banks[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * meson_get_domain_and_bank() - find domain and bank containing a given pin
+ *
+ * @pc: Meson pin controller device
+ * @pin: the pin number
+ * @domain: the found domain
+ * @bank: the found bank
+ *
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_get_domain_and_bank(struct meson_pinctrl *pc, unsigned int pin,
+ struct meson_domain **domain,
+ struct meson_bank **bank)
+{
+ struct meson_domain *d;
+ int i;
+
+ for (i = 0; i < pc->data->num_domains; i++) {
+ d = &pc->domains[i];
+ if (pin >= d->data->pin_base &&
+ pin < d->data->pin_base + d->data->num_pins) {
+ *domain = d;
+ return meson_get_bank(d, pin, bank);
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * meson_calc_reg_and_bit() - calculate register and bit for a pin
+ *
+ * @bank: the bank containing the pin
+ * @pin: the pin number
+ * @reg_type: the type of register needed (pull-enable, pull, etc...)
+ * @reg: the computed register offset
+ * @bit: the computed bit
+ */
+static void meson_calc_reg_and_bit(struct meson_bank *bank, unsigned int pin,
+ enum meson_reg_type reg_type,
+ unsigned int *reg, unsigned int *bit)
+{
+ struct meson_reg_desc *desc = &bank->regs[reg_type];
+
+ *reg = desc->reg * 4;
+ *bit = desc->bit + pin - bank->first;
+}
+
+static int meson_get_groups_count(struct pinctrl_dev *pcdev)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ return pc->data->num_groups;
+}
+
+static const char *meson_get_group_name(struct pinctrl_dev *pcdev,
+ unsigned selector)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ return pc->data->groups[selector].name;
+}
+
+static int meson_get_group_pins(struct pinctrl_dev *pcdev, unsigned selector,
+ const unsigned **pins, unsigned *num_pins)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ *pins = pc->data->groups[selector].pins;
+ *num_pins = pc->data->groups[selector].num_pins;
+
+ return 0;
+}
+
+static void meson_pin_dbg_show(struct pinctrl_dev *pcdev, struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, " %s", dev_name(pcdev->dev));
+}
+
+static const struct pinctrl_ops meson_pctrl_ops = {
+ .get_groups_count = meson_get_groups_count,
+ .get_group_name = meson_get_group_name,
+ .get_group_pins = meson_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+ .pin_dbg_show = meson_pin_dbg_show,
+};
+
+/**
+ * meson_pmx_disable_other_groups() - disable other groups using a given pin
+ *
+ * @pc: meson pin controller device
+ * @pin: number of the pin
+ * @sel_group: index of the selected group, or -1 if none
+ *
+ * The function disables all pinmux groups using a pin except the
+ * selected one. If @sel_group is -1 all groups are disabled, leaving
+ * the pin in GPIO mode.
+ */
+static void meson_pmx_disable_other_groups(struct meson_pinctrl *pc,
+ unsigned int pin, int sel_group)
+{
+ struct meson_pmx_group *group;
+ struct meson_domain *domain;
+ int i, j;
+
+ for (i = 0; i < pc->data->num_groups; i++) {
+ group = &pc->data->groups[i];
+ if (group->is_gpio || i == sel_group)
+ continue;
+
+ for (j = 0; j < group->num_pins; j++) {
+ if (group->pins[j] == pin) {
+ /* We have found a group using the pin */
+ domain = &pc->domains[group->domain];
+ regmap_update_bits(domain->reg_mux,
+ group->reg * 4,
+ BIT(group->bit), 0);
+ }
+ }
+ }
+}
+
+static int meson_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
+ unsigned group_num)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+ struct meson_pmx_func *func = &pc->data->funcs[func_num];
+ struct meson_pmx_group *group = &pc->data->groups[group_num];
+ struct meson_domain *domain = &pc->domains[group->domain];
+ int i, ret = 0;
+
+ dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
+ group->name);
+
+ /*
+ * Disable groups using the same pin.
+ * The selected group is not disabled to avoid glitches.
+ */
+ for (i = 0; i < group->num_pins; i++)
+ meson_pmx_disable_other_groups(pc, group->pins[i], group_num);
+
+ /* Function 0 (GPIO) doesn't need any additional setting */
+ if (func_num)
+ ret = regmap_update_bits(domain->reg_mux, group->reg * 4,
+ BIT(group->bit), BIT(group->bit));
+
+ return ret;
+}
+
+static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1);
+
+ return 0;
+}
+
+static int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ return pc->data->num_funcs;
+}
+
+static const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
+ unsigned selector)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ return pc->data->funcs[selector].name;
+}
+
+static int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ *groups = pc->data->funcs[selector].groups;
+ *num_groups = pc->data->funcs[selector].num_groups;
+
+ return 0;
+}
+
+static const struct pinmux_ops meson_pmx_ops = {
+ .set_mux = meson_pmx_set_mux,
+ .get_functions_count = meson_pmx_get_funcs_count,
+ .get_function_name = meson_pmx_get_func_name,
+ .get_function_groups = meson_pmx_get_groups,
+ .gpio_request_enable = meson_pmx_request_gpio,
+};
+
+static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
+ unsigned long *configs, unsigned num_configs)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+ struct meson_domain *domain;
+ struct meson_bank *bank;
+ enum pin_config_param param;
+ unsigned int reg, bit;
+ int i, ret;
+ u16 arg;
+
+ ret = meson_get_domain_and_bank(pc, pin, &domain, &bank);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
+
+ meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+ ret = regmap_update_bits(domain->reg_pull, reg,
+ BIT(bit), 0);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ dev_dbg(pc->dev, "pin %u: enable pull-up\n", pin);
+
+ meson_calc_reg_and_bit(bank, pin, REG_PULLEN,
+ &reg, &bit);
+ ret = regmap_update_bits(domain->reg_pullen, reg,
+ BIT(bit), BIT(bit));
+ if (ret)
+ return ret;
+
+ meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+ ret = regmap_update_bits(domain->reg_pull, reg,
+ BIT(bit), BIT(bit));
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ dev_dbg(pc->dev, "pin %u: enable pull-down\n", pin);
+
+ meson_calc_reg_and_bit(bank, pin, REG_PULLEN,
+ &reg, &bit);
+ ret = regmap_update_bits(domain->reg_pullen, reg,
+ BIT(bit), BIT(bit));
+ if (ret)
+ return ret;
+
+ meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+ ret = regmap_update_bits(domain->reg_pull, reg,
+ BIT(bit), 0);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int meson_pinconf_get_pull(struct meson_pinctrl *pc, unsigned int pin)
+{
+ struct meson_domain *domain;
+ struct meson_bank *bank;
+ unsigned int reg, bit, val;
+ int ret, conf;
+
+ ret = meson_get_domain_and_bank(pc, pin, &domain, &bank);
+ if (ret)
+ return ret;
+
+ meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg, &bit);
+
+ ret = regmap_read(domain->reg_pullen, reg, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & BIT(bit))) {
+ conf = PIN_CONFIG_BIAS_DISABLE;
+ } else {
+ meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+
+ ret = regmap_read(domain->reg_pull, reg, &val);
+ if (ret)
+ return ret;
+
+ if (val & BIT(bit))
+ conf = PIN_CONFIG_BIAS_PULL_UP;
+ else
+ conf = PIN_CONFIG_BIAS_PULL_DOWN;
+ }
+
+ return conf;
+}
+
+static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
+ unsigned long *config)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u16 arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (meson_pinconf_get_pull(pc, pin) == param)
+ arg = 1;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ dev_dbg(pc->dev, "pinconf for pin %u is %lu\n", pin, *config);
+
+ return 0;
+}
+
+static int meson_pinconf_group_set(struct pinctrl_dev *pcdev,
+ unsigned int num_group,
+ unsigned long *configs, unsigned num_configs)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+ struct meson_pmx_group *group = &pc->data->groups[num_group];
+ int i;
+
+ dev_dbg(pc->dev, "set pinconf for group %s\n", group->name);
+
+ for (i = 0; i < group->num_pins; i++) {
+ meson_pinconf_set(pcdev, group->pins[i], configs,
+ num_configs);
+ }
+
+ return 0;
+}
+
+static int meson_pinconf_group_get(struct pinctrl_dev *pcdev,
+ unsigned int group, unsigned long *config)
+{
+ return -ENOSYS;
+}
+
+static const struct pinconf_ops meson_pinconf_ops = {
+ .pin_config_get = meson_pinconf_get,
+ .pin_config_set = meson_pinconf_set,
+ .pin_config_group_get = meson_pinconf_group_get,
+ .pin_config_group_set = meson_pinconf_group_set,
+ .is_generic = true,
+};
+
+static inline struct meson_domain *to_meson_domain(struct gpio_chip *chip)
+{
+ return container_of(chip, struct meson_domain, chip);
+}
+
+static int meson_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+ return pinctrl_request_gpio(chip->base + gpio);
+}
+
+static void meson_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+ struct meson_domain *domain = to_meson_domain(chip);
+
+ pinctrl_free_gpio(domain->data->pin_base + gpio);
+}
+
+static int meson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct meson_domain *domain = to_meson_domain(chip);
+ unsigned int reg, bit, pin;
+ struct meson_bank *bank;
+ int ret;
+
+ pin = domain->data->pin_base + gpio;
+ ret = meson_get_bank(domain, pin, &bank);
+ if (ret)
+ return ret;
+
+ meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
+
+ return regmap_update_bits(domain->reg_gpio, reg, BIT(bit), BIT(bit));
+}
+
+static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct meson_domain *domain = to_meson_domain(chip);
+ unsigned int reg, bit, pin;
+ struct meson_bank *bank;
+ int ret;
+
+ pin = domain->data->pin_base + gpio;
+ ret = meson_get_bank(domain, pin, &bank);
+ if (ret)
+ return ret;
+
+ meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
+ ret = regmap_update_bits(domain->reg_gpio, reg, BIT(bit), 0);
+ if (ret)
+ return ret;
+
+ meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
+ return regmap_update_bits(domain->reg_gpio, reg, BIT(bit),
+ value ? BIT(bit) : 0);
+}
+
+static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ struct meson_domain *domain = to_meson_domain(chip);
+ unsigned int reg, bit, pin;
+ struct meson_bank *bank;
+ int ret;
+
+ pin = domain->data->pin_base + gpio;
+ ret = meson_get_bank(domain, pin, &bank);
+ if (ret)
+ return;
+
+ meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
+ regmap_update_bits(domain->reg_gpio, reg, BIT(bit),
+ value ? BIT(bit) : 0);
+}
+
+static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+ struct meson_domain *domain = to_meson_domain(chip);
+ unsigned int reg, bit, val, pin;
+ struct meson_bank *bank;
+ int ret;
+
+ pin = domain->data->pin_base + gpio;
+ ret = meson_get_bank(domain, pin, &bank);
+ if (ret)
+ return ret;
+
+ meson_calc_reg_and_bit(bank, pin, REG_IN, &reg, &bit);
+ regmap_read(domain->reg_gpio, reg, &val);
+
+ return !!(val & BIT(bit));
+}
+
+static const struct of_device_id meson_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson8-pinctrl",
+ .data = &meson8_pinctrl_data,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, meson_pinctrl_dt_match);
+
+static int meson_gpiolib_register(struct meson_pinctrl *pc)
+{
+ struct meson_domain *domain;
+ int i, ret;
+
+ for (i = 0; i < pc->data->num_domains; i++) {
+ domain = &pc->domains[i];
+
+ domain->chip.label = domain->data->name;
+ domain->chip.dev = pc->dev;
+ domain->chip.request = meson_gpio_request;
+ domain->chip.free = meson_gpio_free;
+ domain->chip.direction_input = meson_gpio_direction_input;
+ domain->chip.direction_output = meson_gpio_direction_output;
+ domain->chip.get = meson_gpio_get;
+ domain->chip.set = meson_gpio_set;
+ domain->chip.base = -1;
+ domain->chip.ngpio = domain->data->num_pins;
+ domain->chip.can_sleep = false;
+ domain->chip.of_node = domain->of_node;
+ domain->chip.of_gpio_n_cells = 2;
+
+ ret = gpiochip_add(&domain->chip);
+ if (ret) {
+ dev_err(pc->dev, "can't add gpio chip %s\n",
+ domain->data->name);
+ goto fail;
+ }
+
+ ret = gpiochip_add_pin_range(&domain->chip, dev_name(pc->dev),
+ 0, domain->data->pin_base,
+ domain->chip.ngpio);
+ if (ret) {
+ dev_err(pc->dev, "can't add pin range\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ for (i--; i >= 0; i--)
+ gpiochip_remove(&pc->domains[i].chip);
+
+ return ret;
+}
+
+static struct meson_domain_data *meson_get_domain_data(struct meson_pinctrl *pc,
+ struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < pc->data->num_domains; i++) {
+ if (!strcmp(np->name, pc->data->domain_data[i].name))
+ return &pc->data->domain_data[i];
+ }
+
+ return NULL;
+}
+
+static struct regmap_config meson_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static struct regmap *meson_map_resource(struct meson_pinctrl *pc,
+ struct device_node *node, char *name)
+{
+ struct resource res;
+ void __iomem *base;
+ int i;
+
+ i = of_property_match_string(node, "reg-names", name);
+ if (of_address_to_resource(node, i, &res))
+ return ERR_PTR(-ENOENT);
+
+ base = devm_ioremap_resource(pc->dev, &res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ meson_regmap_config.max_register = resource_size(&res) - 4;
+ meson_regmap_config.name = devm_kasprintf(pc->dev, GFP_KERNEL,
+ "%s-%s", node->name,
+ name);
+ if (!meson_regmap_config.name)
+ return ERR_PTR(-ENOMEM);
+
+ return devm_regmap_init_mmio(pc->dev, base, &meson_regmap_config);
+}
+
+static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
+ struct device_node *node)
+{
+ struct device_node *np;
+ struct meson_domain *domain;
+ int i = 0, num_domains = 0;
+
+ for_each_child_of_node(node, np) {
+ if (!of_find_property(np, "gpio-controller", NULL))
+ continue;
+ num_domains++;
+ }
+
+ if (num_domains != pc->data->num_domains) {
+ dev_err(pc->dev, "wrong number of subnodes\n");
+ return -EINVAL;
+ }
+
+ pc->domains = devm_kzalloc(pc->dev, num_domains *
+ sizeof(struct meson_domain), GFP_KERNEL);
+ if (!pc->domains)
+ return -ENOMEM;
+
+ for_each_child_of_node(node, np) {
+ if (!of_find_property(np, "gpio-controller", NULL))
+ continue;
+
+ domain = &pc->domains[i];
+
+ domain->data = meson_get_domain_data(pc, np);
+ if (!domain->data) {
+ dev_err(pc->dev, "domain data not found for node %s\n",
+ np->name);
+ return -ENODEV;
+ }
+
+ domain->of_node = np;
+
+ domain->reg_mux = meson_map_resource(pc, np, "mux");
+ if (IS_ERR(domain->reg_mux)) {
+ dev_err(pc->dev, "mux registers not found\n");
+ return PTR_ERR(domain->reg_mux);
+ }
+
+ domain->reg_pull = meson_map_resource(pc, np, "pull");
+ if (IS_ERR(domain->reg_pull)) {
+ dev_err(pc->dev, "pull registers not found\n");
+ return PTR_ERR(domain->reg_pull);
+ }
+
+ domain->reg_pullen = meson_map_resource(pc, np, "pull-enable");
+ /* Use pull region if pull-enable one is not present */
+ if (IS_ERR(domain->reg_pullen))
+ domain->reg_pullen = domain->reg_pull;
+
+ domain->reg_gpio = meson_map_resource(pc, np, "gpio");
+ if (IS_ERR(domain->reg_gpio)) {
+ dev_err(pc->dev, "gpio registers not found\n");
+ return PTR_ERR(domain->reg_gpio);
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int meson_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct meson_pinctrl *pc;
+ int ret;
+
+ pc = devm_kzalloc(dev, sizeof(struct meson_pinctrl), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ pc->dev = dev;
+ match = of_match_node(meson_pinctrl_dt_match, pdev->dev.of_node);
+ pc->data = (struct meson_pinctrl_data *)match->data;
+
+ ret = meson_pinctrl_parse_dt(pc, pdev->dev.of_node);
+ if (ret)
+ return ret;
+
+ pc->desc.name = "pinctrl-meson";
+ pc->desc.owner = THIS_MODULE;
+ pc->desc.pctlops = &meson_pctrl_ops;
+ pc->desc.pmxops = &meson_pmx_ops;
+ pc->desc.confops = &meson_pinconf_ops;
+ pc->desc.pins = pc->data->pins;
+ pc->desc.npins = pc->data->num_pins;
+
+ pc->pcdev = pinctrl_register(&pc->desc, pc->dev, pc);
+ if (!pc->pcdev) {
+ dev_err(pc->dev, "can't register pinctrl device");
+ return -EINVAL;
+ }
+
+ ret = meson_gpiolib_register(pc);
+ if (ret) {
+ pinctrl_unregister(pc->pcdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver meson_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-pinctrl",
+ .of_match_table = meson_pinctrl_dt_match,
+ },
+};
+module_platform_driver(meson_pinctrl_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
new file mode 100644
index 000000000000..bfea8adc7953
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -0,0 +1,209 @@
+/*
+ * Pin controller and GPIO driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+/**
+ * struct meson_pmx_group - a pinmux group
+ *
+ * @name: group name
+ * @pins: pins in the group
+ * @num_pins: number of pins in the group
+ * @is_gpio: whether the group is a single GPIO group
+ * @reg: register offset for the group in the domain mux registers
+ * @bit bit index enabling the group
+ * @domain: index of the domain this group belongs to
+ */
+struct meson_pmx_group {
+ const char *name;
+ const unsigned int *pins;
+ unsigned int num_pins;
+ bool is_gpio;
+ unsigned int reg;
+ unsigned int bit;
+ unsigned int domain;
+};
+
+/**
+ * struct meson_pmx_func - a pinmux function
+ *
+ * @name: function name
+ * @groups: groups in the function
+ * @num_groups: number of groups in the function
+ */
+struct meson_pmx_func {
+ const char *name;
+ const char * const *groups;
+ unsigned int num_groups;
+};
+
+/**
+ * struct meson_reg_desc - a register descriptor
+ *
+ * @reg: register offset in the regmap
+ * @bit: bit index in register
+ *
+ * The structure describes the information needed to control pull,
+ * pull-enable, direction, etc. for a single pin
+ */
+struct meson_reg_desc {
+ unsigned int reg;
+ unsigned int bit;
+};
+
+/**
+ * enum meson_reg_type - type of registers encoded in @meson_reg_desc
+ */
+enum meson_reg_type {
+ REG_PULLEN,
+ REG_PULL,
+ REG_DIR,
+ REG_OUT,
+ REG_IN,
+ NUM_REG,
+};
+
+/**
+ * struct meson bank
+ *
+ * @name: bank name
+ * @first: first pin of the bank
+ * @last: last pin of the bank
+ * @regs: array of register descriptors
+ *
+ * A bank represents a set of pins controlled by a contiguous set of
+ * bits in the domain registers. The structure specifies which bits in
+ * the regmap control the different functionalities. Each member of
+ * the @regs array refers to the first pin of the bank.
+ */
+struct meson_bank {
+ const char *name;
+ unsigned int first;
+ unsigned int last;
+ struct meson_reg_desc regs[NUM_REG];
+};
+
+/**
+ * struct meson_domain_data - domain platform data
+ *
+ * @name: name of the domain
+ * @banks: set of banks belonging to the domain
+ * @num_banks: number of banks in the domain
+ */
+struct meson_domain_data {
+ const char *name;
+ struct meson_bank *banks;
+ unsigned int num_banks;
+ unsigned int pin_base;
+ unsigned int num_pins;
+};
+
+/**
+ * struct meson_domain
+ *
+ * @reg_mux: registers for mux settings
+ * @reg_pullen: registers for pull-enable settings
+ * @reg_pull: registers for pull settings
+ * @reg_gpio: registers for gpio settings
+ * @chip: gpio chip associated with the domain
+ * @data; platform data for the domain
+ * @node: device tree node for the domain
+ *
+ * A domain represents a set of banks controlled by the same set of
+ * registers.
+ */
+struct meson_domain {
+ struct regmap *reg_mux;
+ struct regmap *reg_pullen;
+ struct regmap *reg_pull;
+ struct regmap *reg_gpio;
+
+ struct gpio_chip chip;
+ struct meson_domain_data *data;
+ struct device_node *of_node;
+};
+
+struct meson_pinctrl_data {
+ const struct pinctrl_pin_desc *pins;
+ struct meson_pmx_group *groups;
+ struct meson_pmx_func *funcs;
+ struct meson_domain_data *domain_data;
+ unsigned int num_pins;
+ unsigned int num_groups;
+ unsigned int num_funcs;
+ unsigned int num_domains;
+};
+
+struct meson_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pcdev;
+ struct pinctrl_desc desc;
+ struct meson_pinctrl_data *data;
+ struct meson_domain *domains;
+};
+
+#define GROUP(grp, r, b) \
+ { \
+ .name = #grp, \
+ .pins = grp ## _pins, \
+ .num_pins = ARRAY_SIZE(grp ## _pins), \
+ .reg = r, \
+ .bit = b, \
+ .domain = 0, \
+ }
+
+#define GPIO_GROUP(gpio) \
+ { \
+ .name = #gpio, \
+ .pins = (const unsigned int[]){ PIN_ ## gpio}, \
+ .num_pins = 1, \
+ .is_gpio = true, \
+ }
+
+#define GROUP_AO(grp, r, b) \
+ { \
+ .name = #grp, \
+ .pins = grp ## _pins, \
+ .num_pins = ARRAY_SIZE(grp ## _pins), \
+ .reg = r, \
+ .bit = b, \
+ .domain = 1, \
+ }
+
+#define FUNCTION(fn) \
+ { \
+ .name = #fn, \
+ .groups = fn ## _groups, \
+ .num_groups = ARRAY_SIZE(fn ## _groups), \
+ }
+
+#define BANK(n, f, l, per, peb, pr, pb, dr, db, or, ob, ir, ib) \
+ { \
+ .name = n, \
+ .first = f, \
+ .last = l, \
+ .regs = { \
+ [REG_PULLEN] = { per, peb }, \
+ [REG_PULL] = { pr, pb }, \
+ [REG_DIR] = { dr, db }, \
+ [REG_OUT] = { or, ob }, \
+ [REG_IN] = { ir, ib }, \
+ }, \
+ }
+
+#define MESON_PIN(x) PINCTRL_PIN(PIN_ ## x, #x)
+
+extern struct meson_pinctrl_data meson8_pinctrl_data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
new file mode 100644
index 000000000000..f8aa3a281767
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -0,0 +1,1089 @@
+/*
+ * Pin controller and GPIO driver for Amlogic Meson8.
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/gpio/meson8-gpio.h>
+#include "pinctrl-meson.h"
+
+#define AO_OFFSET 120
+
+#define PIN_GPIOX_0 GPIOX_0
+#define PIN_GPIOX_1 GPIOX_1
+#define PIN_GPIOX_2 GPIOX_2
+#define PIN_GPIOX_3 GPIOX_3
+#define PIN_GPIOX_4 GPIOX_4
+#define PIN_GPIOX_5 GPIOX_5
+#define PIN_GPIOX_6 GPIOX_6
+#define PIN_GPIOX_7 GPIOX_7
+#define PIN_GPIOX_8 GPIOX_8
+#define PIN_GPIOX_9 GPIOX_9
+#define PIN_GPIOX_10 GPIOX_10
+#define PIN_GPIOX_11 GPIOX_11
+#define PIN_GPIOX_12 GPIOX_12
+#define PIN_GPIOX_13 GPIOX_13
+#define PIN_GPIOX_14 GPIOX_14
+#define PIN_GPIOX_15 GPIOX_15
+#define PIN_GPIOX_16 GPIOX_16
+#define PIN_GPIOX_17 GPIOX_17
+#define PIN_GPIOX_18 GPIOX_18
+#define PIN_GPIOX_19 GPIOX_19
+#define PIN_GPIOX_20 GPIOX_20
+#define PIN_GPIOX_21 GPIOX_21
+#define PIN_GPIOY_0 GPIOY_0
+#define PIN_GPIOY_1 GPIOY_1
+#define PIN_GPIOY_2 GPIOY_2
+#define PIN_GPIOY_3 GPIOY_3
+#define PIN_GPIOY_4 GPIOY_4
+#define PIN_GPIOY_5 GPIOY_5
+#define PIN_GPIOY_6 GPIOY_6
+#define PIN_GPIOY_7 GPIOY_7
+#define PIN_GPIOY_8 GPIOY_8
+#define PIN_GPIOY_9 GPIOY_9
+#define PIN_GPIOY_10 GPIOY_10
+#define PIN_GPIOY_11 GPIOY_11
+#define PIN_GPIOY_12 GPIOY_12
+#define PIN_GPIOY_13 GPIOY_13
+#define PIN_GPIOY_14 GPIOY_14
+#define PIN_GPIOY_15 GPIOY_15
+#define PIN_GPIOY_16 GPIOY_16
+#define PIN_GPIODV_0 GPIODV_0
+#define PIN_GPIODV_1 GPIODV_1
+#define PIN_GPIODV_2 GPIODV_2
+#define PIN_GPIODV_3 GPIODV_3
+#define PIN_GPIODV_4 GPIODV_4
+#define PIN_GPIODV_5 GPIODV_5
+#define PIN_GPIODV_6 GPIODV_6
+#define PIN_GPIODV_7 GPIODV_7
+#define PIN_GPIODV_8 GPIODV_8
+#define PIN_GPIODV_9 GPIODV_9
+#define PIN_GPIODV_10 GPIODV_10
+#define PIN_GPIODV_11 GPIODV_11
+#define PIN_GPIODV_12 GPIODV_12
+#define PIN_GPIODV_13 GPIODV_13
+#define PIN_GPIODV_14 GPIODV_14
+#define PIN_GPIODV_15 GPIODV_15
+#define PIN_GPIODV_16 GPIODV_16
+#define PIN_GPIODV_17 GPIODV_17
+#define PIN_GPIODV_18 GPIODV_18
+#define PIN_GPIODV_19 GPIODV_19
+#define PIN_GPIODV_20 GPIODV_20
+#define PIN_GPIODV_21 GPIODV_21
+#define PIN_GPIODV_22 GPIODV_22
+#define PIN_GPIODV_23 GPIODV_23
+#define PIN_GPIODV_24 GPIODV_24
+#define PIN_GPIODV_25 GPIODV_25
+#define PIN_GPIODV_26 GPIODV_26
+#define PIN_GPIODV_27 GPIODV_27
+#define PIN_GPIODV_28 GPIODV_28
+#define PIN_GPIODV_29 GPIODV_29
+#define PIN_GPIOH_0 GPIOH_0
+#define PIN_GPIOH_1 GPIOH_1
+#define PIN_GPIOH_2 GPIOH_2
+#define PIN_GPIOH_3 GPIOH_3
+#define PIN_GPIOH_4 GPIOH_4
+#define PIN_GPIOH_5 GPIOH_5
+#define PIN_GPIOH_6 GPIOH_6
+#define PIN_GPIOH_7 GPIOH_7
+#define PIN_GPIOH_8 GPIOH_8
+#define PIN_GPIOH_9 GPIOH_9
+#define PIN_GPIOZ_0 GPIOZ_0
+#define PIN_GPIOZ_1 GPIOZ_1
+#define PIN_GPIOZ_2 GPIOZ_2
+#define PIN_GPIOZ_3 GPIOZ_3
+#define PIN_GPIOZ_4 GPIOZ_4
+#define PIN_GPIOZ_5 GPIOZ_5
+#define PIN_GPIOZ_6 GPIOZ_6
+#define PIN_GPIOZ_7 GPIOZ_7
+#define PIN_GPIOZ_8 GPIOZ_8
+#define PIN_GPIOZ_9 GPIOZ_9
+#define PIN_GPIOZ_10 GPIOZ_10
+#define PIN_GPIOZ_11 GPIOZ_11
+#define PIN_GPIOZ_12 GPIOZ_12
+#define PIN_GPIOZ_13 GPIOZ_13
+#define PIN_GPIOZ_14 GPIOZ_14
+#define PIN_CARD_0 CARD_0
+#define PIN_CARD_1 CARD_1
+#define PIN_CARD_2 CARD_2
+#define PIN_CARD_3 CARD_3
+#define PIN_CARD_4 CARD_4
+#define PIN_CARD_5 CARD_5
+#define PIN_CARD_6 CARD_6
+#define PIN_BOOT_0 BOOT_0
+#define PIN_BOOT_1 BOOT_1
+#define PIN_BOOT_2 BOOT_2
+#define PIN_BOOT_3 BOOT_3
+#define PIN_BOOT_4 BOOT_4
+#define PIN_BOOT_5 BOOT_5
+#define PIN_BOOT_6 BOOT_6
+#define PIN_BOOT_7 BOOT_7
+#define PIN_BOOT_8 BOOT_8
+#define PIN_BOOT_9 BOOT_9
+#define PIN_BOOT_10 BOOT_10
+#define PIN_BOOT_11 BOOT_11
+#define PIN_BOOT_12 BOOT_12
+#define PIN_BOOT_13 BOOT_13
+#define PIN_BOOT_14 BOOT_14
+#define PIN_BOOT_15 BOOT_15
+#define PIN_BOOT_16 BOOT_16
+#define PIN_BOOT_17 BOOT_17
+#define PIN_BOOT_18 BOOT_18
+
+#define PIN_GPIOAO_0 (AO_OFFSET + GPIOAO_0)
+#define PIN_GPIOAO_1 (AO_OFFSET + GPIOAO_1)
+#define PIN_GPIOAO_2 (AO_OFFSET + GPIOAO_2)
+#define PIN_GPIOAO_3 (AO_OFFSET + GPIOAO_3)
+#define PIN_GPIOAO_4 (AO_OFFSET + GPIOAO_4)
+#define PIN_GPIOAO_5 (AO_OFFSET + GPIOAO_5)
+#define PIN_GPIOAO_6 (AO_OFFSET + GPIOAO_6)
+#define PIN_GPIOAO_7 (AO_OFFSET + GPIOAO_7)
+#define PIN_GPIOAO_8 (AO_OFFSET + GPIOAO_8)
+#define PIN_GPIOAO_9 (AO_OFFSET + GPIOAO_9)
+#define PIN_GPIOAO_10 (AO_OFFSET + GPIOAO_10)
+#define PIN_GPIOAO_11 (AO_OFFSET + GPIOAO_11)
+#define PIN_GPIOAO_12 (AO_OFFSET + GPIOAO_12)
+#define PIN_GPIOAO_13 (AO_OFFSET + GPIOAO_13)
+#define PIN_GPIO_BSD_EN (AO_OFFSET + GPIO_BSD_EN)
+#define PIN_GPIO_TEST_N (AO_OFFSET + GPIO_TEST_N)
+
+static const struct pinctrl_pin_desc meson8_pins[] = {
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_2),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_4),
+ MESON_PIN(GPIOY_5),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+ MESON_PIN(GPIOY_15),
+ MESON_PIN(GPIOY_16),
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+ MESON_PIN(BOOT_18),
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
+ MESON_PIN(GPIO_BSD_EN),
+ MESON_PIN(GPIO_TEST_N),
+};
+
+/* bank X */
+static const unsigned int sd_d0_a_pins[] = { PIN_GPIOX_0 };
+static const unsigned int sd_d1_a_pins[] = { PIN_GPIOX_1 };
+static const unsigned int sd_d2_a_pins[] = { PIN_GPIOX_2 };
+static const unsigned int sd_d3_a_pins[] = { PIN_GPIOX_3 };
+static const unsigned int sd_clk_a_pins[] = { PIN_GPIOX_8 };
+static const unsigned int sd_cmd_a_pins[] = { PIN_GPIOX_9 };
+
+static const unsigned int sdxc_d0_a_pins[] = { PIN_GPIOX_0 };
+static const unsigned int sdxc_d13_a_pins[] = { PIN_GPIOX_1, PIN_GPIOX_2,
+ PIN_GPIOX_3 };
+static const unsigned int sdxc_d47_a_pins[] = { PIN_GPIOX_4, PIN_GPIOX_5,
+ PIN_GPIOX_6, PIN_GPIOX_7 };
+static const unsigned int sdxc_clk_a_pins[] = { PIN_GPIOX_8 };
+static const unsigned int sdxc_cmd_a_pins[] = { PIN_GPIOX_9 };
+
+static const unsigned int pcm_out_a_pins[] = { PIN_GPIOX_4 };
+static const unsigned int pcm_in_a_pins[] = { PIN_GPIOX_5 };
+static const unsigned int pcm_fs_a_pins[] = { PIN_GPIOX_6 };
+static const unsigned int pcm_clk_a_pins[] = { PIN_GPIOX_7 };
+
+static const unsigned int uart_tx_a0_pins[] = { PIN_GPIOX_4 };
+static const unsigned int uart_rx_a0_pins[] = { PIN_GPIOX_5 };
+static const unsigned int uart_cts_a0_pins[] = { PIN_GPIOX_6 };
+static const unsigned int uart_rts_a0_pins[] = { PIN_GPIOX_7 };
+
+static const unsigned int uart_tx_a1_pins[] = { PIN_GPIOX_12 };
+static const unsigned int uart_rx_a1_pins[] = { PIN_GPIOX_13 };
+static const unsigned int uart_cts_a1_pins[] = { PIN_GPIOX_14 };
+static const unsigned int uart_rts_a1_pins[] = { PIN_GPIOX_15 };
+
+static const unsigned int uart_tx_b0_pins[] = { PIN_GPIOX_16 };
+static const unsigned int uart_rx_b0_pins[] = { PIN_GPIOX_17 };
+static const unsigned int uart_cts_b0_pins[] = { PIN_GPIOX_18 };
+static const unsigned int uart_rts_b0_pins[] = { PIN_GPIOX_19 };
+
+static const unsigned int iso7816_det_pins[] = { PIN_GPIOX_16 };
+static const unsigned int iso7816_reset_pins[] = { PIN_GPIOX_17 };
+static const unsigned int iso7816_clk_pins[] = { PIN_GPIOX_18 };
+static const unsigned int iso7816_data_pins[] = { PIN_GPIOX_19 };
+
+static const unsigned int i2c_sda_d0_pins[] = { PIN_GPIOX_16 };
+static const unsigned int i2c_sck_d0_pins[] = { PIN_GPIOX_17 };
+
+static const unsigned int xtal_32k_out_pins[] = { PIN_GPIOX_10 };
+static const unsigned int xtal_24m_out_pins[] = { PIN_GPIOX_11 };
+
+/* bank Y */
+static const unsigned int uart_tx_c_pins[] = { PIN_GPIOY_0 };
+static const unsigned int uart_rx_c_pins[] = { PIN_GPIOY_1 };
+static const unsigned int uart_cts_c_pins[] = { PIN_GPIOY_2 };
+static const unsigned int uart_rts_c_pins[] = { PIN_GPIOY_3 };
+
+static const unsigned int pcm_out_b_pins[] = { PIN_GPIOY_4 };
+static const unsigned int pcm_in_b_pins[] = { PIN_GPIOY_5 };
+static const unsigned int pcm_fs_b_pins[] = { PIN_GPIOY_6 };
+static const unsigned int pcm_clk_b_pins[] = { PIN_GPIOY_7 };
+
+static const unsigned int i2c_sda_c0_pins[] = { PIN_GPIOY_0 };
+static const unsigned int i2c_sck_c0_pins[] = { PIN_GPIOY_1 };
+
+/* bank DV */
+static const unsigned int dvin_rgb_pins[] = { PIN_GPIODV_0, PIN_GPIODV_1,
+ PIN_GPIODV_2, PIN_GPIODV_3,
+ PIN_GPIODV_4, PIN_GPIODV_5,
+ PIN_GPIODV_6, PIN_GPIODV_7,
+ PIN_GPIODV_8, PIN_GPIODV_9,
+ PIN_GPIODV_10, PIN_GPIODV_11,
+ PIN_GPIODV_12, PIN_GPIODV_13,
+ PIN_GPIODV_14, PIN_GPIODV_15,
+ PIN_GPIODV_16, PIN_GPIODV_17,
+ PIN_GPIODV_18, PIN_GPIODV_19,
+ PIN_GPIODV_20, PIN_GPIODV_21,
+ PIN_GPIODV_22, PIN_GPIODV_23 };
+static const unsigned int dvin_vs_pins[] = { PIN_GPIODV_24 };
+static const unsigned int dvin_hs_pins[] = { PIN_GPIODV_25 };
+static const unsigned int dvin_clk_pins[] = { PIN_GPIODV_26 };
+static const unsigned int dvin_de_pins[] = { PIN_GPIODV_27 };
+
+static const unsigned int enc_0_pins[] = { PIN_GPIODV_0 };
+static const unsigned int enc_1_pins[] = { PIN_GPIODV_1 };
+static const unsigned int enc_2_pins[] = { PIN_GPIODV_2 };
+static const unsigned int enc_3_pins[] = { PIN_GPIODV_3 };
+static const unsigned int enc_4_pins[] = { PIN_GPIODV_4 };
+static const unsigned int enc_5_pins[] = { PIN_GPIODV_5 };
+static const unsigned int enc_6_pins[] = { PIN_GPIODV_6 };
+static const unsigned int enc_7_pins[] = { PIN_GPIODV_7 };
+static const unsigned int enc_8_pins[] = { PIN_GPIODV_8 };
+static const unsigned int enc_9_pins[] = { PIN_GPIODV_9 };
+static const unsigned int enc_10_pins[] = { PIN_GPIODV_10 };
+static const unsigned int enc_11_pins[] = { PIN_GPIODV_11 };
+static const unsigned int enc_12_pins[] = { PIN_GPIODV_12 };
+static const unsigned int enc_13_pins[] = { PIN_GPIODV_13 };
+static const unsigned int enc_14_pins[] = { PIN_GPIODV_14 };
+static const unsigned int enc_15_pins[] = { PIN_GPIODV_15 };
+static const unsigned int enc_16_pins[] = { PIN_GPIODV_16 };
+static const unsigned int enc_17_pins[] = { PIN_GPIODV_17 };
+
+static const unsigned int uart_tx_b1_pins[] = { PIN_GPIODV_24 };
+static const unsigned int uart_rx_b1_pins[] = { PIN_GPIODV_25 };
+static const unsigned int uart_cts_b1_pins[] = { PIN_GPIODV_26 };
+static const unsigned int uart_rts_b1_pins[] = { PIN_GPIODV_27 };
+
+static const unsigned int vga_vs_pins[] = { PIN_GPIODV_24 };
+static const unsigned int vga_hs_pins[] = { PIN_GPIODV_25 };
+
+/* bank H */
+static const unsigned int hdmi_hpd_pins[] = { PIN_GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { PIN_GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { PIN_GPIOH_2 };
+static const unsigned int hdmi_cec_pins[] = { PIN_GPIOH_3 };
+
+static const unsigned int spi_ss0_0_pins[] = { PIN_GPIOH_3 };
+static const unsigned int spi_miso_0_pins[] = { PIN_GPIOH_4 };
+static const unsigned int spi_mosi_0_pins[] = { PIN_GPIOH_5 };
+static const unsigned int spi_sclk_0_pins[] = { PIN_GPIOH_6 };
+
+static const unsigned int i2c_sda_d1_pins[] = { PIN_GPIOH_7 };
+static const unsigned int i2c_sck_d1_pins[] = { PIN_GPIOH_8 };
+
+/* bank Z */
+static const unsigned int spi_ss0_1_pins[] = { PIN_GPIOZ_9 };
+static const unsigned int spi_ss1_1_pins[] = { PIN_GPIOZ_10 };
+static const unsigned int spi_sclk_1_pins[] = { PIN_GPIOZ_11 };
+static const unsigned int spi_mosi_1_pins[] = { PIN_GPIOZ_12 };
+static const unsigned int spi_miso_1_pins[] = { PIN_GPIOZ_13 };
+static const unsigned int spi_ss2_1_pins[] = { PIN_GPIOZ_14 };
+
+static const unsigned int eth_tx_clk_50m_pins[] = { PIN_GPIOZ_4 };
+static const unsigned int eth_tx_en_pins[] = { PIN_GPIOZ_5 };
+static const unsigned int eth_txd1_pins[] = { PIN_GPIOZ_6 };
+static const unsigned int eth_txd0_pins[] = { PIN_GPIOZ_7 };
+static const unsigned int eth_rx_clk_in_pins[] = { PIN_GPIOZ_8 };
+static const unsigned int eth_rx_dv_pins[] = { PIN_GPIOZ_9 };
+static const unsigned int eth_rxd1_pins[] = { PIN_GPIOZ_10 };
+static const unsigned int eth_rxd0_pins[] = { PIN_GPIOZ_11 };
+static const unsigned int eth_mdio_pins[] = { PIN_GPIOZ_12 };
+static const unsigned int eth_mdc_pins[] = { PIN_GPIOZ_13 };
+
+static const unsigned int i2c_sda_a0_pins[] = { PIN_GPIOZ_0 };
+static const unsigned int i2c_sck_a0_pins[] = { PIN_GPIOZ_1 };
+
+static const unsigned int i2c_sda_b_pins[] = { PIN_GPIOZ_2 };
+static const unsigned int i2c_sck_b_pins[] = { PIN_GPIOZ_3 };
+
+static const unsigned int i2c_sda_c1_pins[] = { PIN_GPIOZ_4 };
+static const unsigned int i2c_sck_c1_pins[] = { PIN_GPIOZ_5 };
+
+static const unsigned int i2c_sda_a1_pins[] = { PIN_GPIOZ_0 };
+static const unsigned int i2c_sck_a1_pins[] = { PIN_GPIOZ_1 };
+
+static const unsigned int i2c_sda_a2_pins[] = { PIN_GPIOZ_0 };
+static const unsigned int i2c_sck_a2_pins[] = { PIN_GPIOZ_1 };
+
+/* bank BOOT */
+static const unsigned int sd_d0_c_pins[] = { PIN_BOOT_0 };
+static const unsigned int sd_d1_c_pins[] = { PIN_BOOT_1 };
+static const unsigned int sd_d2_c_pins[] = { PIN_BOOT_2 };
+static const unsigned int sd_d3_c_pins[] = { PIN_BOOT_3 };
+static const unsigned int sd_cmd_c_pins[] = { PIN_BOOT_16 };
+static const unsigned int sd_clk_c_pins[] = { PIN_BOOT_17 };
+
+static const unsigned int sdxc_d0_c_pins[] = { PIN_BOOT_0};
+static const unsigned int sdxc_d13_c_pins[] = { PIN_BOOT_1, PIN_BOOT_2,
+ PIN_BOOT_3 };
+static const unsigned int sdxc_d47_c_pins[] = { PIN_BOOT_4, PIN_BOOT_5,
+ PIN_BOOT_6, PIN_BOOT_7 };
+static const unsigned int sdxc_cmd_c_pins[] = { PIN_BOOT_16 };
+static const unsigned int sdxc_clk_c_pins[] = { PIN_BOOT_17 };
+
+static const unsigned int nand_io_pins[] = { PIN_BOOT_0, PIN_BOOT_1,
+ PIN_BOOT_2, PIN_BOOT_3,
+ PIN_BOOT_4, PIN_BOOT_5,
+ PIN_BOOT_6, PIN_BOOT_7 };
+static const unsigned int nand_io_ce0_pins[] = { PIN_BOOT_8 };
+static const unsigned int nand_io_ce1_pins[] = { PIN_BOOT_9 };
+static const unsigned int nand_io_rb0_pins[] = { PIN_BOOT_10 };
+static const unsigned int nand_ale_pins[] = { PIN_BOOT_11 };
+static const unsigned int nand_cle_pins[] = { PIN_BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { PIN_BOOT_13 };
+static const unsigned int nand_ren_clk_pins[] = { PIN_BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { PIN_BOOT_15 };
+static const unsigned int nand_ce2_pins[] = { PIN_BOOT_16 };
+static const unsigned int nand_ce3_pins[] = { PIN_BOOT_17 };
+
+static const unsigned int nor_d_pins[] = { PIN_BOOT_11 };
+static const unsigned int nor_q_pins[] = { PIN_BOOT_12 };
+static const unsigned int nor_c_pins[] = { PIN_BOOT_13 };
+static const unsigned int nor_cs_pins[] = { PIN_BOOT_18 };
+
+/* bank CARD */
+static const unsigned int sd_d1_b_pins[] = { PIN_CARD_0 };
+static const unsigned int sd_d0_b_pins[] = { PIN_CARD_1 };
+static const unsigned int sd_clk_b_pins[] = { PIN_CARD_2 };
+static const unsigned int sd_cmd_b_pins[] = { PIN_CARD_3 };
+static const unsigned int sd_d3_b_pins[] = { PIN_CARD_4 };
+static const unsigned int sd_d2_b_pins[] = { PIN_CARD_5 };
+
+static const unsigned int sdxc_d13_b_pins[] = { PIN_CARD_0, PIN_CARD_4,
+ PIN_CARD_5 };
+static const unsigned int sdxc_d0_b_pins[] = { PIN_CARD_1 };
+static const unsigned int sdxc_clk_b_pins[] = { PIN_CARD_2 };
+static const unsigned int sdxc_cmd_b_pins[] = { PIN_CARD_3 };
+
+/* bank AO */
+static const unsigned int uart_tx_ao_a_pins[] = { PIN_GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { PIN_GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { PIN_GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { PIN_GPIOAO_3 };
+
+static const unsigned int remote_input_pins[] = { PIN_GPIOAO_7 };
+
+static const unsigned int i2c_slave_sck_ao_pins[] = { PIN_GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = { PIN_GPIOAO_5 };
+
+static const unsigned int uart_tx_ao_b0_pins[] = { PIN_GPIOAO_0 };
+static const unsigned int uart_rx_ao_b0_pins[] = { PIN_GPIOAO_1 };
+
+static const unsigned int uart_tx_ao_b1_pins[] = { PIN_GPIOAO_4 };
+static const unsigned int uart_rx_ao_b1_pins[] = { PIN_GPIOAO_5 };
+
+static const unsigned int i2c_mst_sck_ao_pins[] = { PIN_GPIOAO_4 };
+static const unsigned int i2c_mst_sda_ao_pins[] = { PIN_GPIOAO_5 };
+
+static struct meson_pmx_group meson8_groups[] = {
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_2),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_4),
+ GPIO_GROUP(GPIOY_5),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+ GPIO_GROUP(GPIOY_15),
+ GPIO_GROUP(GPIOY_16),
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_18),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
+ GPIO_GROUP(GPIO_BSD_EN),
+ GPIO_GROUP(GPIO_TEST_N),
+
+ /* bank X */
+ GROUP(sd_d0_a, 8, 5),
+ GROUP(sd_d1_a, 8, 4),
+ GROUP(sd_d2_a, 8, 3),
+ GROUP(sd_d3_a, 8, 2),
+ GROUP(sd_clk_a, 8, 1),
+ GROUP(sd_cmd_a, 8, 0),
+
+ GROUP(sdxc_d0_a, 5, 14),
+ GROUP(sdxc_d13_a, 5, 13),
+ GROUP(sdxc_d47_a, 5, 12),
+ GROUP(sdxc_clk_a, 5, 11),
+ GROUP(sdxc_cmd_a, 5, 10),
+
+ GROUP(pcm_out_a, 3, 30),
+ GROUP(pcm_in_a, 3, 29),
+ GROUP(pcm_fs_a, 3, 28),
+ GROUP(pcm_clk_a, 3, 27),
+
+ GROUP(uart_tx_a0, 4, 17),
+ GROUP(uart_rx_a0, 4, 16),
+ GROUP(uart_cts_a0, 4, 15),
+ GROUP(uart_rts_a0, 4, 14),
+
+ GROUP(uart_tx_a1, 4, 13),
+ GROUP(uart_rx_a1, 4, 12),
+ GROUP(uart_cts_a1, 4, 11),
+ GROUP(uart_rts_a1, 4, 10),
+
+ GROUP(uart_tx_b0, 4, 9),
+ GROUP(uart_rx_b0, 4, 8),
+ GROUP(uart_cts_b0, 4, 7),
+ GROUP(uart_rts_b0, 4, 6),
+
+ GROUP(iso7816_det, 4, 21),
+ GROUP(iso7816_reset, 4, 20),
+ GROUP(iso7816_clk, 4, 19),
+ GROUP(iso7816_data, 4, 18),
+
+ GROUP(i2c_sda_d0, 4, 5),
+ GROUP(i2c_sck_d0, 4, 4),
+
+ GROUP(xtal_32k_out, 3, 22),
+ GROUP(xtal_24m_out, 3, 23),
+
+ /* bank Y */
+ GROUP(uart_tx_c, 1, 19),
+ GROUP(uart_rx_c, 1, 18),
+ GROUP(uart_cts_c, 1, 17),
+ GROUP(uart_rts_c, 1, 16),
+
+ GROUP(pcm_out_b, 4, 25),
+ GROUP(pcm_in_b, 4, 24),
+ GROUP(pcm_fs_b, 4, 23),
+ GROUP(pcm_clk_b, 4, 22),
+
+ GROUP(i2c_sda_c0, 1, 15),
+ GROUP(i2c_sck_c0, 1, 14),
+
+ /* bank DV */
+ GROUP(dvin_rgb, 0, 6),
+ GROUP(dvin_vs, 0, 9),
+ GROUP(dvin_hs, 0, 8),
+ GROUP(dvin_clk, 0, 7),
+ GROUP(dvin_de, 0, 10),
+
+ GROUP(enc_0, 7, 0),
+ GROUP(enc_1, 7, 1),
+ GROUP(enc_2, 7, 2),
+ GROUP(enc_3, 7, 3),
+ GROUP(enc_4, 7, 4),
+ GROUP(enc_5, 7, 5),
+ GROUP(enc_6, 7, 6),
+ GROUP(enc_7, 7, 7),
+ GROUP(enc_8, 7, 8),
+ GROUP(enc_9, 7, 9),
+ GROUP(enc_10, 7, 10),
+ GROUP(enc_11, 7, 11),
+ GROUP(enc_12, 7, 12),
+ GROUP(enc_13, 7, 13),
+ GROUP(enc_14, 7, 14),
+ GROUP(enc_15, 7, 15),
+ GROUP(enc_16, 7, 16),
+ GROUP(enc_17, 7, 17),
+
+ GROUP(uart_tx_b1, 6, 23),
+ GROUP(uart_rx_b1, 6, 22),
+ GROUP(uart_cts_b1, 6, 21),
+ GROUP(uart_rts_b1, 6, 20),
+
+ GROUP(vga_vs, 0, 21),
+ GROUP(vga_hs, 0, 20),
+
+ /* bank H */
+ GROUP(hdmi_hpd, 1, 26),
+ GROUP(hdmi_sda, 1, 25),
+ GROUP(hdmi_scl, 1, 24),
+ GROUP(hdmi_cec, 1, 23),
+
+ GROUP(spi_ss0_0, 9, 13),
+ GROUP(spi_miso_0, 9, 12),
+ GROUP(spi_mosi_0, 9, 11),
+ GROUP(spi_sclk_0, 9, 10),
+
+ GROUP(i2c_sda_d1, 4, 3),
+ GROUP(i2c_sck_d1, 4, 2),
+
+ /* bank Z */
+ GROUP(spi_ss0_1, 8, 16),
+ GROUP(spi_ss1_1, 8, 12),
+ GROUP(spi_sclk_1, 8, 15),
+ GROUP(spi_mosi_1, 8, 14),
+ GROUP(spi_miso_1, 8, 13),
+ GROUP(spi_ss2_1, 8, 17),
+
+ GROUP(eth_tx_clk_50m, 6, 15),
+ GROUP(eth_tx_en, 6, 14),
+ GROUP(eth_txd1, 6, 13),
+ GROUP(eth_txd0, 6, 12),
+ GROUP(eth_rx_clk_in, 6, 10),
+ GROUP(eth_rx_dv, 6, 11),
+ GROUP(eth_rxd1, 6, 8),
+ GROUP(eth_rxd0, 6, 7),
+ GROUP(eth_mdio, 6, 6),
+ GROUP(eth_mdc, 6, 5),
+
+ GROUP(i2c_sda_a0, 5, 31),
+ GROUP(i2c_sck_a0, 5, 30),
+
+ GROUP(i2c_sda_b, 5, 27),
+ GROUP(i2c_sck_b, 5, 26),
+
+ GROUP(i2c_sda_c1, 5, 25),
+ GROUP(i2c_sck_c1, 5, 24),
+
+ GROUP(i2c_sda_a1, 5, 9),
+ GROUP(i2c_sck_a1, 5, 8),
+
+ GROUP(i2c_sda_a2, 5, 7),
+ GROUP(i2c_sck_a2, 5, 6),
+
+ /* bank BOOT */
+ GROUP(sd_d0_c, 6, 29),
+ GROUP(sd_d1_c, 6, 28),
+ GROUP(sd_d2_c, 6, 27),
+ GROUP(sd_d3_c, 6, 26),
+ GROUP(sd_cmd_c, 6, 25),
+ GROUP(sd_clk_c, 6, 24),
+
+ GROUP(sdxc_d0_c, 4, 30),
+ GROUP(sdxc_d13_c, 4, 29),
+ GROUP(sdxc_d47_c, 4, 28),
+ GROUP(sdxc_cmd_c, 4, 27),
+ GROUP(sdxc_clk_c, 4, 26),
+
+ GROUP(nand_io, 2, 26),
+ GROUP(nand_io_ce0, 2, 25),
+ GROUP(nand_io_ce1, 2, 24),
+ GROUP(nand_io_rb0, 2, 17),
+ GROUP(nand_ale, 2, 21),
+ GROUP(nand_cle, 2, 20),
+ GROUP(nand_wen_clk, 2, 19),
+ GROUP(nand_ren_clk, 2, 18),
+ GROUP(nand_dqs, 2, 27),
+ GROUP(nand_ce2, 2, 23),
+ GROUP(nand_ce3, 2, 22),
+
+ GROUP(nor_d, 5, 1),
+ GROUP(nor_q, 5, 3),
+ GROUP(nor_c, 5, 2),
+ GROUP(nor_cs, 5, 0),
+
+ /* bank CARD */
+ GROUP(sd_d1_b, 2, 14),
+ GROUP(sd_d0_b, 2, 15),
+ GROUP(sd_clk_b, 2, 11),
+ GROUP(sd_cmd_b, 2, 10),
+ GROUP(sd_d3_b, 2, 12),
+ GROUP(sd_d2_b, 2, 13),
+
+ GROUP(sdxc_d13_b, 2, 6),
+ GROUP(sdxc_d0_b, 2, 7),
+ GROUP(sdxc_clk_b, 2, 5),
+ GROUP(sdxc_cmd_b, 2, 4),
+
+ /* bank AO */
+ GROUP_AO(uart_tx_ao_a, 0, 12),
+ GROUP_AO(uart_rx_ao_a, 0, 11),
+ GROUP_AO(uart_cts_ao_a, 0, 10),
+ GROUP_AO(uart_rts_ao_a, 0, 9),
+
+ GROUP_AO(remote_input, 0, 0),
+
+ GROUP_AO(i2c_slave_sck_ao, 0, 2),
+ GROUP_AO(i2c_slave_sda_ao, 0, 1),
+
+ GROUP_AO(uart_tx_ao_b0, 0, 26),
+ GROUP_AO(uart_rx_ao_b0, 0, 25),
+
+ GROUP_AO(uart_tx_ao_b1, 0, 24),
+ GROUP_AO(uart_rx_ao_b1, 0, 23),
+
+ GROUP_AO(i2c_mst_sck_ao, 0, 6),
+ GROUP_AO(i2c_mst_sda_ao, 0, 5),
+};
+
+static const char * const gpio_groups[] = {
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+ "GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18", "GPIOX_19",
+ "GPIOX_20", "GPIOX_21",
+
+ "GPIOY_0", "GPIOY_1", "GPIOY_2", "GPIOY_3", "GPIOY_4",
+ "GPIOY_5", "GPIOY_6", "GPIOY_7", "GPIOY_8", "GPIOY_9",
+ "GPIOY_10", "GPIOY_11", "GPIOY_12", "GPIOY_13", "GPIOY_14",
+ "GPIOY_15", "GPIOY_16",
+
+ "GPIODV_0", "GPIODV_1", "GPIODV_2", "GPIODV_3", "GPIODV_4",
+ "GPIODV_5", "GPIODV_6", "GPIODV_7", "GPIODV_8", "GPIODV_9",
+ "GPIODV_10", "GPIODV_11", "GPIODV_12", "GPIODV_13", "GPIODV_14",
+ "GPIODV_15", "GPIODV_16", "GPIODV_17", "GPIODV_18", "GPIODV_19",
+ "GPIODV_20", "GPIODV_21", "GPIODV_22", "GPIODV_23", "GPIODV_24",
+ "GPIODV_25", "GPIODV_26", "GPIODV_27", "GPIODV_28", "GPIODV_29",
+
+ "GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3", "GPIOH_4",
+ "GPIOH_5", "GPIOH_6", "GPIOH_7", "GPIOH_8", "GPIOH_9",
+
+ "GPIOZ_0", "GPIOZ_1", "GPIOZ_2", "GPIOZ_3", "GPIOZ_4",
+ "GPIOZ_5", "GPIOZ_6", "GPIOZ_7", "GPIOZ_8", "GPIOZ_9",
+ "GPIOZ_10", "GPIOZ_11", "GPIOZ_12", "GPIOZ_13", "GPIOZ_14",
+
+ "CARD_0", "CARD_1", "CARD_2", "CARD_3", "CARD_4",
+ "CARD_5", "CARD_6",
+
+ "BOOT_0", "BOOT_1", "BOOT_2", "BOOT_3", "BOOT_4",
+ "BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
+ "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
+ "BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
+
+ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
+ "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
+ "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
+ "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N"
+};
+
+static const char * const sd_a_groups[] = {
+ "sd_d0_a", "sd_d1_a", "sd_d2_a", "sd_d3_a", "sd_clk_a", "sd_cmd_a"
+};
+
+static const char * const sdxc_a_groups[] = {
+ "sdxc_d0_a", "sdxc_d13_a", "sdxc_d47_a", "sdxc_clk_a", "sdxc_cmd_a"
+};
+
+static const char * const pcm_a_groups[] = {
+ "pcm_out_a", "pcm_in_a", "pcm_fs_a", "pcm_clk_a"
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_tx_a0", "uart_rx_a0", "uart_cts_a0", "uart_rts_a0",
+ "uart_tx_a1", "uart_rx_a1", "uart_cts_a1", "uart_rts_a1"
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_tx_b0", "uart_rx_b0", "uart_cts_b0", "uart_rts_b0",
+ "uart_tx_b1", "uart_rx_b1", "uart_cts_b1", "uart_rts_b1"
+};
+
+static const char * const iso7816_groups[] = {
+ "iso7816_det", "iso7816_reset", "iso7816_clk", "iso7816_data"
+};
+
+static const char * const i2c_d_groups[] = {
+ "i2c_sda_d0", "i2c_sck_d0", "i2c_sda_d1", "i2c_sck_d1"
+};
+
+static const char * const xtal_groups[] = {
+ "xtal_32k_out", "xtal_24m_out"
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_tx_c", "uart_rx_c", "uart_cts_c", "uart_rts_c"
+};
+
+static const char * const pcm_b_groups[] = {
+ "pcm_out_b", "pcm_in_b", "pcm_fs_b", "pcm_clk_b"
+};
+
+static const char * const i2c_c_groups[] = {
+ "i2c_sda_c0", "i2c_sck_c0", "i2c_sda_c1", "i2c_sck_c1"
+};
+
+static const char * const dvin_groups[] = {
+ "dvin_rgb", "dvin_vs", "dvin_hs", "dvin_clk", "dvin_de"
+};
+
+static const char * const enc_groups[] = {
+ "enc_0", "enc_1", "enc_2", "enc_3", "enc_4", "enc_5",
+ "enc_6", "enc_7", "enc_8", "enc_9", "enc_10", "enc_11",
+ "enc_12", "enc_13", "enc_14", "enc_15", "enc_16", "enc_17"
+};
+
+static const char * const vga_groups[] = {
+ "vga_vs", "vga_hs"
+};
+
+static const char * const hdmi_groups[] = {
+ "hdmi_hpd", "hdmi_sda", "hdmi_scl", "hdmi_cec"
+};
+
+static const char * const spi_groups[] = {
+ "spi_ss0_0", "spi_miso_0", "spi_mosi_0", "spi_sclk_0",
+ "spi_ss0_1", "spi_ss1_1", "spi_sclk_1", "spi_mosi_1",
+ "spi_miso_1", "spi_ss2_1"
+};
+
+static const char * const ethernet_groups[] = {
+ "eth_tx_clk_50m", "eth_tx_en", "eth_txd1",
+ "eth_txd0", "eth_rx_clk_in", "eth_rx_dv",
+ "eth_rxd1", "eth_rxd0", "eth_mdio", "eth_mdc"
+};
+
+static const char * const i2c_a_groups[] = {
+ "i2c_sda_a0", "i2c_sck_a0", "i2c_sda_a1", "i2c_sck_a1",
+ "i2c_sda_a2", "i2c_sck_a2"
+};
+
+static const char * const i2c_b_groups[] = {
+ "i2c_sda_b", "i2c_sck_b"
+};
+
+static const char * const sd_c_groups[] = {
+ "sd_d0_c", "sd_d1_c", "sd_d2_c", "sd_d3_c",
+ "sd_cmd_c", "sd_clk_c"
+};
+
+static const char * const sdxc_c_groups[] = {
+ "sdxc_d0_c", "sdxc_d13_c", "sdxc_d47_c", "sdxc_cmd_c",
+ "sdxc_clk_c"
+};
+
+static const char * const nand_groups[] = {
+ "nand_io", "nand_io_ce0", "nand_io_ce1",
+ "nand_io_rb0", "nand_ale", "nand_cle",
+ "nand_wen_clk", "nand_ren_clk", "nand_dqs",
+ "nand_ce2", "nand_ce3"
+};
+
+static const char * const nor_groups[] = {
+ "nor_d", "nor_q", "nor_c", "nor_cs"
+};
+
+static const char * const sd_b_groups[] = {
+ "sd_d1_b", "sd_d0_b", "sd_clk_b", "sd_cmd_b",
+ "sd_d3_b", "sd_d2_b"
+};
+
+static const char * const sdxc_b_groups[] = {
+ "sdxc_d13_b", "sdxc_d0_b", "sdxc_clk_b", "sdxc_cmd_b"
+};
+
+static const char * const uart_ao_groups[] = {
+ "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a"
+};
+
+static const char * const remote_groups[] = {
+ "remote_input"
+};
+
+static const char * const i2c_slave_ao_groups[] = {
+ "i2c_slave_sck_ao", "i2c_slave_sda_ao"
+};
+
+static const char * const uart_ao_b_groups[] = {
+ "uart_tx_ao_b0", "uart_rx_ao_b0", "uart_tx_ao_b1", "uart_rx_ao_b1"
+};
+
+static const char * const i2c_mst_ao_groups[] = {
+ "i2c_mst_sck_ao", "i2c_mst_sda_ao"
+};
+
+static struct meson_pmx_func meson8_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(sd_a),
+ FUNCTION(sdxc_a),
+ FUNCTION(pcm_a),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(iso7816),
+ FUNCTION(i2c_d),
+ FUNCTION(xtal),
+ FUNCTION(uart_c),
+ FUNCTION(pcm_b),
+ FUNCTION(i2c_c),
+ FUNCTION(dvin),
+ FUNCTION(enc),
+ FUNCTION(vga),
+ FUNCTION(hdmi),
+ FUNCTION(spi),
+ FUNCTION(ethernet),
+ FUNCTION(i2c_a),
+ FUNCTION(i2c_b),
+ FUNCTION(sd_c),
+ FUNCTION(sdxc_c),
+ FUNCTION(nand),
+ FUNCTION(nor),
+ FUNCTION(sd_b),
+ FUNCTION(sdxc_b),
+ FUNCTION(uart_ao),
+ FUNCTION(remote),
+ FUNCTION(i2c_slave_ao),
+ FUNCTION(uart_ao_b),
+ FUNCTION(i2c_mst_ao),
+};
+
+static struct meson_bank meson8_banks[] = {
+ /* name first last pullen pull dir out in */
+ BANK("X", PIN_GPIOX_0, PIN_GPIOX_21, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", PIN_GPIOY_0, PIN_GPIOY_16, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", PIN_GPIODV_0, PIN_GPIODV_29, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", PIN_GPIOH_0, PIN_GPIOH_9, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("Z", PIN_GPIOZ_0, PIN_GPIOZ_14, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
+ BANK("CARD", PIN_CARD_0, PIN_CARD_6, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", PIN_BOOT_0, PIN_BOOT_18, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+};
+
+static struct meson_bank meson8_ao_banks[] = {
+ /* name first last pullen pull dir out in */
+ BANK("AO", PIN_GPIOAO_0, PIN_GPIO_TEST_N, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+};
+
+static struct meson_domain_data meson8_domain_data[] = {
+ {
+ .name = "banks",
+ .banks = meson8_banks,
+ .num_banks = ARRAY_SIZE(meson8_banks),
+ .pin_base = 0,
+ .num_pins = 120,
+ },
+ {
+ .name = "ao-bank",
+ .banks = meson8_ao_banks,
+ .num_banks = ARRAY_SIZE(meson8_ao_banks),
+ .pin_base = 120,
+ .num_pins = 16,
+ },
+};
+
+struct meson_pinctrl_data meson8_pinctrl_data = {
+ .pins = meson8_pins,
+ .groups = meson8_groups,
+ .funcs = meson8_functions,
+ .domain_data = meson8_domain_data,
+ .num_pins = ARRAY_SIZE(meson8_pins),
+ .num_groups = ARRAY_SIZE(meson8_groups),
+ .num_funcs = ARRAY_SIZE(meson8_functions),
+ .num_domains = ARRAY_SIZE(meson8_domain_data),
+};
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
index 224c6cff6aa2..7302f66f4f19 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -145,14 +145,16 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
MPP_VAR_FUNCTION(2, "ptp", "event_req", V_88F6810_PLUS),
MPP_VAR_FUNCTION(3, "pcie0", "clkreq", V_88F6810_PLUS),
MPP_VAR_FUNCTION(4, "sata1", "prsnt", V_88F6810_PLUS),
- MPP_VAR_FUNCTION(5, "ua0", "cts", V_88F6810_PLUS)),
+ MPP_VAR_FUNCTION(5, "ua0", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "rxd", V_88F6810_PLUS)),
MPP_MODE(20,
MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
MPP_VAR_FUNCTION(1, "ge0", "txclk", V_88F6810_PLUS),
MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
MPP_VAR_FUNCTION(4, "sata0", "prsnt", V_88F6810_PLUS),
- MPP_VAR_FUNCTION(5, "ua0", "rts", V_88F6810_PLUS)),
+ MPP_VAR_FUNCTION(5, "ua0", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "txd", V_88F6810_PLUS)),
MPP_MODE(21,
MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
MPP_VAR_FUNCTION(1, "spi0", "cs1", V_88F6810_PLUS),
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 89a52e15f0ae..95bfd0653e8f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -751,12 +751,12 @@ static struct mvebu_pinctrl_soc_info dove_pinctrl_info = {
static struct clk *clk;
-static struct of_device_id dove_pinctrl_of_match[] = {
+static const struct of_device_id dove_pinctrl_of_match[] = {
{ .compatible = "marvell,dove-pinctrl", .data = &dove_pinctrl_info },
{ }
};
-static struct regmap_config gc_regmap_config = {
+static const struct regmap_config gc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 3d6d97228523..1806b24faa14 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -914,7 +914,7 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
}
}
- ret = pinconf_generic_parse_dt_config(np, &configs, &nconfigs);
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs, &nconfigs);
if (nconfigs) {
const char *gpio_name;
const char *pin;
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index f78b416d7984..4db92f64b4de 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -27,17 +27,6 @@
#include "pinctrl-utils.h"
#ifdef CONFIG_DEBUG_FS
-
-struct pin_config_item {
- const enum pin_config_param param;
- const char * const display;
- const char * const format;
- bool has_arg;
-};
-
-#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
- .has_arg = d }
-
static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false),
PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false),
@@ -60,22 +49,25 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
};
-void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
- struct seq_file *s, unsigned pin)
+static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
+ struct seq_file *s, const char *gname,
+ unsigned pin,
+ const struct pin_config_item *items,
+ int nitems)
{
- const struct pinconf_ops *ops = pctldev->desc->confops;
int i;
- if (!ops->is_generic)
- return;
-
- for (i = 0; i < ARRAY_SIZE(conf_items); i++) {
+ for (i = 0; i < nitems; i++) {
unsigned long config;
int ret;
/* We want to check out this parameter */
- config = pinconf_to_config_packed(conf_items[i].param, 0);
- ret = pin_config_get_for_pin(pctldev, pin, &config);
+ config = pinconf_to_config_packed(items[i].param, 0);
+ if (gname)
+ ret = pin_config_group_get(dev_name(pctldev->dev),
+ gname, &config);
+ else
+ ret = pin_config_get_for_pin(pctldev, pin, &config);
/* These are legal errors */
if (ret == -EINVAL || ret == -ENOTSUPP)
continue;
@@ -85,56 +77,47 @@ void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
}
/* Space between multiple configs */
seq_puts(s, " ");
- seq_puts(s, conf_items[i].display);
+ seq_puts(s, items[i].display);
/* Print unit if available */
- if (conf_items[i].has_arg) {
+ if (items[i].has_arg) {
seq_printf(s, " (%u",
pinconf_to_config_argument(config));
- if (conf_items[i].format)
- seq_printf(s, " %s)", conf_items[i].format);
+ if (items[i].format)
+ seq_printf(s, " %s)", items[i].format);
else
seq_puts(s, ")");
}
}
}
-void pinconf_generic_dump_group(struct pinctrl_dev *pctldev,
- struct seq_file *s, const char *gname)
+/**
+ * pinconf_generic_dump_pins - Print information about pin or group of pins
+ * @pctldev: Pincontrol device
+ * @s: File to print to
+ * @gname: Group name specifying pins
+ * @pin: Pin number specyfying pin
+ *
+ * Print the pinconf configuration for the requested pin(s) to @s. Pins can be
+ * specified either by pin using @pin or by group using @gname. Only one needs
+ * to be specified the other can be NULL/0.
+ */
+void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev, struct seq_file *s,
+ const char *gname, unsigned pin)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
- int i;
if (!ops->is_generic)
return;
- for (i = 0; i < ARRAY_SIZE(conf_items); i++) {
- unsigned long config;
- int ret;
-
- /* We want to check out this parameter */
- config = pinconf_to_config_packed(conf_items[i].param, 0);
- ret = pin_config_group_get(dev_name(pctldev->dev), gname,
- &config);
- /* These are legal errors */
- if (ret == -EINVAL || ret == -ENOTSUPP)
- continue;
- if (ret) {
- seq_printf(s, "ERROR READING CONFIG SETTING %d ", i);
- continue;
- }
- /* Space between multiple configs */
- seq_puts(s, " ");
- seq_puts(s, conf_items[i].display);
- /* Print unit if available */
- if (conf_items[i].has_arg) {
- seq_printf(s, " (%u",
- pinconf_to_config_argument(config));
- if (conf_items[i].format)
- seq_printf(s, " %s)", conf_items[i].format);
- else
- seq_puts(s, ")");
- }
- }
+ /* generic parameters */
+ pinconf_generic_dump_one(pctldev, s, gname, pin, conf_items,
+ ARRAY_SIZE(conf_items));
+ /* driver-specific parameters */
+ if (pctldev->desc->num_custom_params &&
+ pctldev->desc->custom_conf_items)
+ pinconf_generic_dump_one(pctldev, s, gname, pin,
+ pctldev->desc->custom_conf_items,
+ pctldev->desc->num_custom_params);
}
void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
@@ -148,18 +131,25 @@ void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
seq_printf(s, "%s: 0x%x", conf_items[i].display,
pinconf_to_config_argument(config));
}
+
+ if (!pctldev->desc->num_custom_params ||
+ !pctldev->desc->custom_conf_items)
+ return;
+
+ for (i = 0; i < pctldev->desc->num_custom_params; i++) {
+ if (pinconf_to_config_param(config) !=
+ pctldev->desc->custom_conf_items[i].param)
+ continue;
+ seq_printf(s, "%s: 0x%x",
+ pctldev->desc->custom_conf_items[i].display,
+ pinconf_to_config_argument(config));
+ }
}
EXPORT_SYMBOL_GPL(pinconf_generic_dump_config);
#endif
#ifdef CONFIG_OF
-struct pinconf_generic_dt_params {
- const char * const property;
- enum pin_config_param param;
- u32 default_value;
-};
-
-static const struct pinconf_generic_dt_params dt_params[] = {
+static const struct pinconf_generic_params dt_params[] = {
{ "bias-disable", PIN_CONFIG_BIAS_DISABLE, 0 },
{ "bias-high-impedance", PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0 },
{ "bias-bus-hold", PIN_CONFIG_BIAS_BUS_HOLD, 0 },
@@ -184,6 +174,47 @@ static const struct pinconf_generic_dt_params dt_params[] = {
};
/**
+ * parse_dt_cfg() - Parse DT pinconf parameters
+ * @np: DT node
+ * @params: Array of describing generic parameters
+ * @count: Number of entries in @params
+ * @cfg: Array of parsed config options
+ * @ncfg: Number of entries in @cfg
+ *
+ * Parse the config options described in @params from @np and puts the result
+ * in @cfg. @cfg does not need to be empty, entries are added beggining at
+ * @ncfg. @ncfg is updated to reflect the number of entries after parsing. @cfg
+ * needs to have enough memory allocated to hold all possible entries.
+ */
+static void parse_dt_cfg(struct device_node *np,
+ const struct pinconf_generic_params *params,
+ unsigned int count, unsigned long *cfg,
+ unsigned int *ncfg)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ u32 val;
+ int ret;
+ const struct pinconf_generic_params *par = &params[i];
+
+ ret = of_property_read_u32(np, par->property, &val);
+
+ /* property not found */
+ if (ret == -EINVAL)
+ continue;
+
+ /* use default value, when no value is specified */
+ if (ret)
+ val = par->default_value;
+
+ pr_debug("found %s with value %u\n", par->property, val);
+ cfg[*ncfg] = pinconf_to_config_packed(par->param, val);
+ (*ncfg)++;
+ }
+}
+
+/**
* pinconf_generic_parse_dt_config()
* parse the config properties into generic pinconfig values.
* @np: node containing the pinconfig properties
@@ -191,39 +222,30 @@ static const struct pinconf_generic_dt_params dt_params[] = {
* @nconfigs: umber of configurations
*/
int pinconf_generic_parse_dt_config(struct device_node *np,
+ struct pinctrl_dev *pctldev,
unsigned long **configs,
unsigned int *nconfigs)
{
unsigned long *cfg;
- unsigned int ncfg = 0;
+ unsigned int max_cfg, ncfg = 0;
int ret;
- int i;
- u32 val;
if (!np)
return -EINVAL;
/* allocate a temporary array big enough to hold one of each option */
- cfg = kzalloc(sizeof(*cfg) * ARRAY_SIZE(dt_params), GFP_KERNEL);
+ max_cfg = ARRAY_SIZE(dt_params);
+ if (pctldev)
+ max_cfg += pctldev->desc->num_custom_params;
+ cfg = kcalloc(max_cfg, sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
- const struct pinconf_generic_dt_params *par = &dt_params[i];
- ret = of_property_read_u32(np, par->property, &val);
-
- /* property not found */
- if (ret == -EINVAL)
- continue;
-
- /* use default value, when no value is specified */
- if (ret)
- val = par->default_value;
-
- pr_debug("found %s with value %u\n", par->property, val);
- cfg[ncfg] = pinconf_to_config_packed(par->param, val);
- ncfg++;
- }
+ parse_dt_cfg(np, dt_params, ARRAY_SIZE(dt_params), cfg, &ncfg);
+ if (pctldev && pctldev->desc->num_custom_params &&
+ pctldev->desc->custom_params)
+ parse_dt_cfg(np, pctldev->desc->custom_params,
+ pctldev->desc->num_custom_params, cfg, &ncfg);
ret = 0;
@@ -264,6 +286,7 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
unsigned reserve;
struct property *prop;
const char *group;
+ const char *subnode_target_type = "pins";
ret = of_property_read_string(np, "function", &function);
if (ret < 0) {
@@ -273,7 +296,8 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
function = NULL;
}
- ret = pinconf_generic_parse_dt_config(np, &configs, &num_configs);
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
+ &num_configs);
if (ret < 0) {
dev_err(dev, "could not parse node property\n");
return ret;
@@ -284,10 +308,20 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
reserve++;
if (num_configs)
reserve++;
+
ret = of_property_count_strings(np, "pins");
if (ret < 0) {
- dev_err(dev, "could not parse property pins\n");
- goto exit;
+ ret = of_property_count_strings(np, "groups");
+ if (ret < 0) {
+ dev_err(dev, "could not parse property pins/groups\n");
+ goto exit;
+ }
+ if (type == PIN_MAP_TYPE_INVALID)
+ type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ subnode_target_type = "groups";
+ } else {
+ if (type == PIN_MAP_TYPE_INVALID)
+ type = PIN_MAP_TYPE_CONFIGS_PIN;
}
reserve *= ret;
@@ -296,7 +330,7 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
- of_property_for_each_string(np, "pins", prop, group) {
+ of_property_for_each_string(np, subnode_target_type, prop, group) {
if (function) {
ret = pinctrl_utils_add_map_mux(pctldev, map,
reserved_maps, num_maps, group,
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 8bfa0643e5dc..1fc09dc20199 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -288,7 +288,7 @@ static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
const struct pinconf_ops *ops = pctldev->desc->confops;
/* no-op when not using generic pin config */
- pinconf_generic_dump_pin(pctldev, s, pin);
+ pinconf_generic_dump_pins(pctldev, s, NULL, pin);
if (ops && ops->pin_config_dbg_show)
ops->pin_config_dbg_show(pctldev, s, pin);
}
@@ -333,7 +333,7 @@ static void pinconf_dump_group(struct pinctrl_dev *pctldev,
const struct pinconf_ops *ops = pctldev->desc->confops;
/* no-op when not using generic pin config */
- pinconf_generic_dump_group(pctldev, s, gname);
+ pinconf_generic_dump_pins(pctldev, s, gname, 0);
if (ops && ops->pin_config_group_dbg_show)
ops->pin_config_group_dbg_show(pctldev, s, selector);
}
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index a4a5417e1413..55c75780b3b2 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -92,26 +92,17 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot,
#if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_DEBUG_FS)
-void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
- struct seq_file *s, unsigned pin);
-
-void pinconf_generic_dump_group(struct pinctrl_dev *pctldev,
- struct seq_file *s, const char *gname);
+void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev,
+ struct seq_file *s, const char *gname,
+ unsigned pin);
void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned long config);
#else
-static inline void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
- struct seq_file *s,
- unsigned pin)
-{
- return;
-}
-
-static inline void pinconf_generic_dump_group(struct pinctrl_dev *pctldev,
- struct seq_file *s,
- const char *gname)
+static inline void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ const char *gname, unsigned pin)
{
return;
}
@@ -126,6 +117,7 @@ static inline void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
#if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_OF)
int pinconf_generic_parse_dt_config(struct device_node *np,
+ struct pinctrl_dev *pctldev,
unsigned long **configs,
unsigned int *nconfigs);
#endif
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index dfd021e8268f..f4cd0b9b2438 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -177,7 +177,7 @@ struct at91_pinctrl {
struct device *dev;
struct pinctrl_dev *pctl;
- int nbanks;
+ int nactive_banks;
uint32_t *mux_mask;
int nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
int mux;
/* check if it's a valid config */
- if (pin->bank >= info->nbanks) {
+ if (pin->bank >= gpio_banks) {
dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
- name, index, pin->bank, info->nbanks);
+ name, index, pin->bank, gpio_banks);
return -EINVAL;
}
+ if (!gpio_chips[pin->bank]) {
+ dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
+ name, index, pin->bank);
+ return -ENXIO;
+ }
+
if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
for_each_child_of_node(np, child) {
if (of_device_is_compatible(child, gpio_compat)) {
- info->nbanks++;
+ if (of_device_is_available(child))
+ info->nactive_banks++;
} else {
info->nfunctions++;
info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
}
size /= sizeof(*list);
- if (!size || size % info->nbanks) {
- dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
+ if (!size || size % gpio_banks) {
+ dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
return -EINVAL;
}
- info->nmux = size / info->nbanks;
+ info->nmux = size / gpio_banks;
info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
at91_pinctrl_child_count(info, np);
- if (info->nbanks < 1) {
+ if (gpio_banks < 1) {
dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
return -EINVAL;
}
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
dev_dbg(&pdev->dev, "mux-mask\n");
tmp = info->mux_mask;
- for (i = 0; i < info->nbanks; i++) {
+ for (i = 0; i < gpio_banks; i++) {
for (j = 0; j < info->nmux; j++, tmp++) {
dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
}
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->groups)
return -ENOMEM;
- dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+ dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
{
struct at91_pinctrl *info;
struct pinctrl_pin_desc *pdesc;
- int ret, i, j, k;
+ int ret, i, j, k, ngpio_chips_enabled = 0;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
* to obtain references to the struct gpio_chip * for them, and we
* need this to proceed.
*/
- for (i = 0; i < info->nbanks; i++) {
- if (!gpio_chips[i]) {
- dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
- devm_kfree(&pdev->dev, info);
- return -EPROBE_DEFER;
- }
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ ngpio_chips_enabled++;
+
+ if (ngpio_chips_enabled < info->nactive_banks) {
+ dev_warn(&pdev->dev,
+ "All GPIO chips are not registered yet (%d/%d)\n",
+ ngpio_chips_enabled, info->nactive_banks);
+ devm_kfree(&pdev->dev, info);
+ return -EPROBE_DEFER;
}
at91_pinctrl_desc.name = dev_name(&pdev->dev);
- at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+ at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
at91_pinctrl_desc.pins = pdesc =
devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
if (!at91_pinctrl_desc.pins)
return -ENOMEM;
- for (i = 0 , k = 0; i < info->nbanks; i++) {
+ for (i = 0, k = 0; i < gpio_banks; i++) {
for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
pdesc->number = k;
pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
}
/* We will handle a range of GPIO pins */
- for (i = 0; i < info->nbanks; i++)
- pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *at91_gpio)
{
+ struct gpio_chip *gpiochip_prev = NULL;
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
- int ret;
+ int ret, i;
at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
return ret;
}
- /* Setup chained handler */
- if (at91_gpio->pioc_idx)
- prev = gpio_chips[at91_gpio->pioc_idx - 1];
-
/* The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
- if (prev && prev->next == at91_gpio)
+ gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
+ if (!gpiochip_prev) {
+ /* Then register the chain on the parent IRQ */
+ gpiochip_set_chained_irqchip(&at91_gpio->chip,
+ &gpio_irqchip,
+ at91_gpio->pioc_virq,
+ gpio_irq_handler);
return 0;
+ }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- &gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
- return 0;
+ /* we can only have 2 banks before */
+ for (i = 0; i < 2; i++) {
+ if (prev->next) {
+ prev = prev->next;
+ } else {
+ prev->next = at91_gpio;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
/* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
.ngpio = MAX_NB_GPIO_PER_BANK,
};
-static void at91_gpio_probe_fixup(void)
-{
- unsigned i;
- struct at91_gpio_chip *at91_gpio, *last = NULL;
-
- for (i = 0; i < gpio_banks; i++) {
- at91_gpio = gpio_chips[i];
-
- /*
- * GPIO controller are grouped on some SoC:
- * PIOC, PIOD and PIOE can share the same IRQ line
- */
- if (last && last->pioc_virq == at91_gpio->pioc_virq)
- last->next = at91_gpio;
- last = at91_gpio;
- }
-}
-
static struct of_device_id at91_gpio_of_match[] = {
{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- at91_gpio_probe_fixup();
-
ret = at91_gpio_of_irq_setup(pdev, at91_chip);
if (ret)
goto irq_setup_err;
diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/pinctrl-bcm281xx.c
index fa2a00f22ff1..b88cfe5ed55a 100644
--- a/drivers/pinctrl/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/pinctrl-bcm281xx.c
@@ -976,7 +976,7 @@ static inline void bcm281xx_pin_update(u32 *reg_val, u32 *reg_mask,
*reg_mask |= param_mask;
}
-static struct regmap_config bcm281xx_pinctrl_regmap_config = {
+static const struct regmap_config bcm281xx_pinctrl_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
@@ -1435,7 +1435,7 @@ static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static struct of_device_id bcm281xx_pinctrl_of_match[] = {
+static const struct of_device_id bcm281xx_pinctrl_of_match[] = {
{ .compatible = "brcm,bcm11351-pinctrl", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index 1d21dc226920..0b0fc2eb48e0 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -101,6 +101,7 @@ static void lantiq_load_pin_desc(struct pinctrl_pin_desc *d, int bank, int len)
for (i = 0; i < len; i++) {
/* strlen("ioXYZ") + 1 = 6 */
char *name = kzalloc(6, GFP_KERNEL);
+
snprintf(name, 6, "io%d", base + i);
d[i].number = base + i;
d[i].name = name;
@@ -463,7 +464,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
&res);
if (IS_ERR(falcon_info.membase[*bank]))
return PTR_ERR(falcon_info.membase[*bank]);
-
+
avail = pad_r32(falcon_info.membase[*bank],
LTQ_PADC_AVAIL);
pins = fls(avail);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 3c22dbebc80f..dee7d5f06c60 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -89,7 +89,7 @@ struct rockchip_iomux {
* @reg_pull: optional separate register for additional pull settings
* @clk: clock of the gpio bank
* @irq: interrupt of the gpio bank
- * @saved_enables: Saved content of GPIO_INTEN at suspend time.
+ * @saved_masks: Saved content of GPIO_INTEN at suspend time.
* @pin_base: first pin number
* @nr_pins: number of pins in this bank
* @name: name of the bank
@@ -108,7 +108,7 @@ struct rockchip_pin_bank {
struct regmap *regmap_pull;
struct clk *clk;
int irq;
- u32 saved_enables;
+ u32 saved_masks;
u32 pin_base;
u8 nr_pins;
char *name;
@@ -1142,7 +1142,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
return -EINVAL;
np_config = of_find_node_by_phandle(be32_to_cpup(phandle));
- ret = pinconf_generic_parse_dt_config(np_config,
+ ret = pinconf_generic_parse_dt_config(np_config, NULL,
&grp->data[j].configs, &grp->data[j].nconfigs);
if (ret)
return ret;
@@ -1398,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
- u32 polarity = 0, data = 0;
u32 pend;
- bool edge_changed = false;
- unsigned long flags;
dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1409,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
- if (bank->toggle_edge_mode) {
- polarity = readl_relaxed(bank->reg_base +
- GPIO_INT_POLARITY);
- data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
- }
-
while (pend) {
unsigned int virq;
@@ -1434,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
* needs manual intervention.
*/
if (bank->toggle_edge_mode & BIT(irq)) {
- if (data & BIT(irq))
- polarity &= ~BIT(irq);
- else
- polarity |= BIT(irq);
+ u32 data, data_old, polarity;
+ unsigned long flags;
- edge_changed = true;
- }
+ data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
+ do {
+ spin_lock_irqsave(&bank->slock, flags);
- generic_handle_irq(virq);
- }
+ polarity = readl_relaxed(bank->reg_base +
+ GPIO_INT_POLARITY);
+ if (data & BIT(irq))
+ polarity &= ~BIT(irq);
+ else
+ polarity |= BIT(irq);
+ writel(polarity,
+ bank->reg_base + GPIO_INT_POLARITY);
- if (bank->toggle_edge_mode && edge_changed) {
- /* Interrupt params should only be set with ints disabled */
- spin_lock_irqsave(&bank->slock, flags);
+ spin_unlock_irqrestore(&bank->slock, flags);
- data = readl_relaxed(bank->reg_base + GPIO_INTEN);
- writel_relaxed(0, bank->reg_base + GPIO_INTEN);
- writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
- writel(data, bank->reg_base + GPIO_INTEN);
+ data_old = data;
+ data = readl_relaxed(bank->reg_base +
+ GPIO_EXT_PORT);
+ } while ((data & BIT(irq)) != (data_old & BIT(irq)));
+ }
- spin_unlock_irqrestore(&bank->slock, flags);
+ generic_handle_irq(virq);
}
chained_irq_exit(chip, desc);
@@ -1550,8 +1545,8 @@ static void rockchip_irq_suspend(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct rockchip_pin_bank *bank = gc->private;
- bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
- irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
+ bank->saved_masks = irq_reg_readl(gc, GPIO_INTMASK);
+ irq_reg_writel(gc, ~gc->wake_active, GPIO_INTMASK);
}
static void rockchip_irq_resume(struct irq_data *d)
@@ -1559,35 +1554,7 @@ static void rockchip_irq_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct rockchip_pin_bank *bank = gc->private;
- irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
-}
-
-static void rockchip_irq_disable(struct irq_data *d)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 val;
-
- irq_gc_lock(gc);
-
- val = irq_reg_readl(gc, GPIO_INTEN);
- val &= ~d->mask;
- irq_reg_writel(gc, val, GPIO_INTEN);
-
- irq_gc_unlock(gc);
-}
-
-static void rockchip_irq_enable(struct irq_data *d)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 val;
-
- irq_gc_lock(gc);
-
- val = irq_reg_readl(gc, GPIO_INTEN);
- val |= d->mask;
- irq_reg_writel(gc, val, GPIO_INTEN);
-
- irq_gc_unlock(gc);
+ irq_reg_writel(gc, bank->saved_masks, GPIO_INTMASK);
}
static int rockchip_interrupts_register(struct platform_device *pdev,
@@ -1625,6 +1592,14 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
continue;
}
+ /*
+ * Linux assumes that all interrupts start out disabled/masked.
+ * Our driver only uses the concept of masked and always keeps
+ * things enabled, so for us that's all masked and all enabled.
+ */
+ writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
+ writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
+
gc = irq_get_domain_generic_chip(bank->domain, 0);
gc->reg_base = bank->reg_base;
gc->private = bank;
@@ -1633,8 +1608,6 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
- gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
- gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index 146e48a9b839..fab6aafa6a9f 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -415,7 +415,7 @@ static int tz1090_pdc_pinctrl_dt_subnode_to_map(struct device *dev,
function = NULL;
}
- ret = pinconf_generic_parse_dt_config(np, &configs, &num_configs);
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
if (ret)
return ret;
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index df8cb1e5b7b4..8bd73075f9dd 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1131,7 +1131,7 @@ static int tz1090_pinctrl_dt_subnode_to_map(struct device *dev,
function = NULL;
}
- ret = pinconf_generic_parse_dt_config(np, &configs, &num_configs);
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
if (ret)
return ret;
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index c5cef59f5965..779950c62e53 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
/* load the gpio chip */
xway_chip.dev = &pdev->dev;
- of_gpiochip_add(&xway_chip);
ret = gpiochip_add(&xway_chip);
if (ret) {
- of_gpiochip_remove(&xway_chip);
dev_err(&pdev->dev, "Failed to register gpio chip\n");
return ret;
}
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
new file mode 100644
index 000000000000..22280bddb9e2
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -0,0 +1,1180 @@
+/*
+ * Zynq pin controller
+ *
+ * Copyright (C) 2014 Xilinx
+ *
+ * Sören Brinkmann <soren.brinkmann@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/regmap.h>
+#include "pinctrl-utils.h"
+#include "core.h"
+
+#define ZYNQ_NUM_MIOS 54
+
+#define ZYNQ_PCTRL_MIO_MST_TRI0 0x10c
+#define ZYNQ_PCTRL_MIO_MST_TRI1 0x110
+
+#define ZYNQ_PINMUX_MUX_SHIFT 1
+#define ZYNQ_PINMUX_MUX_MASK (0x7f << ZYNQ_PINMUX_MUX_SHIFT)
+
+/**
+ * struct zynq_pinctrl - driver data
+ * @pctrl: Pinctrl device
+ * @syscon: Syscon regmap
+ * @pctrl_offset: Offset for pinctrl into the @syscon space
+ * @groups: Pingroups
+ * @ngroupos: Number of @groups
+ * @funcs: Pinmux functions
+ * @nfuncs: Number of @funcs
+ */
+struct zynq_pinctrl {
+ struct pinctrl_dev *pctrl;
+ struct regmap *syscon;
+ u32 pctrl_offset;
+ const struct zynq_pctrl_group *groups;
+ unsigned int ngroups;
+ const struct zynq_pinmux_function *funcs;
+ unsigned int nfuncs;
+};
+
+struct zynq_pctrl_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned npins;
+};
+
+/**
+ * struct zynq_pinmux_function - a pinmux function
+ * @name: Name of the pinmux function.
+ * @groups: List of pingroups for this function.
+ * @ngroups: Number of entries in @groups.
+ * @mux_val: Selector for this function
+ * @mux: Offset of function specific mux
+ * @mux_mask: Mask for function specific selector
+ * @mux_shift: Shift for function specific selector
+ */
+struct zynq_pinmux_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+ unsigned int mux_val;
+ u32 mux;
+ u32 mux_mask;
+ u8 mux_shift;
+};
+
+enum zynq_pinmux_functions {
+ ZYNQ_PMUX_can0,
+ ZYNQ_PMUX_can1,
+ ZYNQ_PMUX_ethernet0,
+ ZYNQ_PMUX_ethernet1,
+ ZYNQ_PMUX_gpio0,
+ ZYNQ_PMUX_i2c0,
+ ZYNQ_PMUX_i2c1,
+ ZYNQ_PMUX_mdio0,
+ ZYNQ_PMUX_mdio1,
+ ZYNQ_PMUX_qspi0,
+ ZYNQ_PMUX_qspi1,
+ ZYNQ_PMUX_qspi_fbclk,
+ ZYNQ_PMUX_qspi_cs1,
+ ZYNQ_PMUX_spi0,
+ ZYNQ_PMUX_spi1,
+ ZYNQ_PMUX_sdio0,
+ ZYNQ_PMUX_sdio0_pc,
+ ZYNQ_PMUX_sdio0_cd,
+ ZYNQ_PMUX_sdio0_wp,
+ ZYNQ_PMUX_sdio1,
+ ZYNQ_PMUX_sdio1_pc,
+ ZYNQ_PMUX_sdio1_cd,
+ ZYNQ_PMUX_sdio1_wp,
+ ZYNQ_PMUX_smc0_nor,
+ ZYNQ_PMUX_smc0_nor_cs1,
+ ZYNQ_PMUX_smc0_nor_addr25,
+ ZYNQ_PMUX_smc0_nand,
+ ZYNQ_PMUX_ttc0,
+ ZYNQ_PMUX_ttc1,
+ ZYNQ_PMUX_uart0,
+ ZYNQ_PMUX_uart1,
+ ZYNQ_PMUX_usb0,
+ ZYNQ_PMUX_usb1,
+ ZYNQ_PMUX_swdt0,
+ ZYNQ_PMUX_MAX_FUNC
+};
+
+const struct pinctrl_pin_desc zynq_pins[] = {
+ PINCTRL_PIN(0, "MIO0"),
+ PINCTRL_PIN(1, "MIO1"),
+ PINCTRL_PIN(2, "MIO2"),
+ PINCTRL_PIN(3, "MIO3"),
+ PINCTRL_PIN(4, "MIO4"),
+ PINCTRL_PIN(5, "MIO5"),
+ PINCTRL_PIN(6, "MIO6"),
+ PINCTRL_PIN(7, "MIO7"),
+ PINCTRL_PIN(8, "MIO8"),
+ PINCTRL_PIN(9, "MIO9"),
+ PINCTRL_PIN(10, "MIO10"),
+ PINCTRL_PIN(11, "MIO11"),
+ PINCTRL_PIN(12, "MIO12"),
+ PINCTRL_PIN(13, "MIO13"),
+ PINCTRL_PIN(14, "MIO14"),
+ PINCTRL_PIN(15, "MIO15"),
+ PINCTRL_PIN(16, "MIO16"),
+ PINCTRL_PIN(17, "MIO17"),
+ PINCTRL_PIN(18, "MIO18"),
+ PINCTRL_PIN(19, "MIO19"),
+ PINCTRL_PIN(20, "MIO20"),
+ PINCTRL_PIN(21, "MIO21"),
+ PINCTRL_PIN(22, "MIO22"),
+ PINCTRL_PIN(23, "MIO23"),
+ PINCTRL_PIN(24, "MIO24"),
+ PINCTRL_PIN(25, "MIO25"),
+ PINCTRL_PIN(26, "MIO26"),
+ PINCTRL_PIN(27, "MIO27"),
+ PINCTRL_PIN(28, "MIO28"),
+ PINCTRL_PIN(29, "MIO29"),
+ PINCTRL_PIN(30, "MIO30"),
+ PINCTRL_PIN(31, "MIO31"),
+ PINCTRL_PIN(32, "MIO32"),
+ PINCTRL_PIN(33, "MIO33"),
+ PINCTRL_PIN(34, "MIO34"),
+ PINCTRL_PIN(35, "MIO35"),
+ PINCTRL_PIN(36, "MIO36"),
+ PINCTRL_PIN(37, "MIO37"),
+ PINCTRL_PIN(38, "MIO38"),
+ PINCTRL_PIN(39, "MIO39"),
+ PINCTRL_PIN(40, "MIO40"),
+ PINCTRL_PIN(41, "MIO41"),
+ PINCTRL_PIN(42, "MIO42"),
+ PINCTRL_PIN(43, "MIO43"),
+ PINCTRL_PIN(44, "MIO44"),
+ PINCTRL_PIN(45, "MIO45"),
+ PINCTRL_PIN(46, "MIO46"),
+ PINCTRL_PIN(47, "MIO47"),
+ PINCTRL_PIN(48, "MIO48"),
+ PINCTRL_PIN(49, "MIO49"),
+ PINCTRL_PIN(50, "MIO50"),
+ PINCTRL_PIN(51, "MIO51"),
+ PINCTRL_PIN(52, "MIO52"),
+ PINCTRL_PIN(53, "MIO53"),
+ PINCTRL_PIN(54, "EMIO_SD0_WP"),
+ PINCTRL_PIN(55, "EMIO_SD0_CD"),
+ PINCTRL_PIN(56, "EMIO_SD1_WP"),
+ PINCTRL_PIN(57, "EMIO_SD1_CD"),
+};
+
+/* pin groups */
+static const unsigned int ethernet0_0_pins[] = {16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27};
+static const unsigned int ethernet1_0_pins[] = {28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39};
+static const unsigned int mdio0_0_pins[] = {52, 53};
+static const unsigned int mdio1_0_pins[] = {52, 53};
+static const unsigned int qspi0_0_pins[] = {1, 2, 3, 4, 5, 6};
+
+static const unsigned int qspi1_0_pins[] = {9, 10, 11, 12, 13};
+static const unsigned int qspi_cs1_pins[] = {0};
+static const unsigned int qspi_fbclk_pins[] = {8};
+static const unsigned int spi0_0_pins[] = {16, 17, 18, 19, 20, 21};
+static const unsigned int spi0_1_pins[] = {28, 29, 30, 31, 32, 33};
+static const unsigned int spi0_2_pins[] = {40, 41, 42, 43, 44, 45};
+static const unsigned int spi1_0_pins[] = {10, 11, 12, 13, 14, 15};
+static const unsigned int spi1_1_pins[] = {22, 23, 24, 25, 26, 27};
+static const unsigned int spi1_2_pins[] = {34, 35, 36, 37, 38, 39};
+static const unsigned int spi1_3_pins[] = {46, 47, 48, 49, 40, 51};
+static const unsigned int sdio0_0_pins[] = {16, 17, 18, 19, 20, 21};
+static const unsigned int sdio0_1_pins[] = {28, 29, 30, 31, 32, 33};
+static const unsigned int sdio0_2_pins[] = {40, 41, 42, 43, 44, 45};
+static const unsigned int sdio1_0_pins[] = {10, 11, 12, 13, 14, 15};
+static const unsigned int sdio1_1_pins[] = {22, 23, 24, 25, 26, 27};
+static const unsigned int sdio1_2_pins[] = {34, 35, 36, 37, 38, 39};
+static const unsigned int sdio1_3_pins[] = {46, 47, 48, 49, 40, 51};
+static const unsigned int sdio0_emio_wp_pins[] = {54};
+static const unsigned int sdio0_emio_cd_pins[] = {55};
+static const unsigned int sdio1_emio_wp_pins[] = {56};
+static const unsigned int sdio1_emio_cd_pins[] = {57};
+static const unsigned int smc0_nor_pins[] = {0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39};
+static const unsigned int smc0_nor_cs1_pins[] = {1};
+static const unsigned int smc0_nor_addr25_pins[] = {1};
+static const unsigned int smc0_nand_pins[] = {0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 16, 17, 18, 19, 20,
+ 21, 22, 23};
+/* Note: CAN MIO clock inputs are modeled in the clock framework */
+static const unsigned int can0_0_pins[] = {10, 11};
+static const unsigned int can0_1_pins[] = {14, 15};
+static const unsigned int can0_2_pins[] = {18, 19};
+static const unsigned int can0_3_pins[] = {22, 23};
+static const unsigned int can0_4_pins[] = {26, 27};
+static const unsigned int can0_5_pins[] = {30, 31};
+static const unsigned int can0_6_pins[] = {34, 35};
+static const unsigned int can0_7_pins[] = {38, 39};
+static const unsigned int can0_8_pins[] = {42, 43};
+static const unsigned int can0_9_pins[] = {46, 47};
+static const unsigned int can0_10_pins[] = {50, 51};
+static const unsigned int can1_0_pins[] = {8, 9};
+static const unsigned int can1_1_pins[] = {12, 13};
+static const unsigned int can1_2_pins[] = {16, 17};
+static const unsigned int can1_3_pins[] = {20, 21};
+static const unsigned int can1_4_pins[] = {24, 25};
+static const unsigned int can1_5_pins[] = {28, 29};
+static const unsigned int can1_6_pins[] = {32, 33};
+static const unsigned int can1_7_pins[] = {36, 37};
+static const unsigned int can1_8_pins[] = {40, 41};
+static const unsigned int can1_9_pins[] = {44, 45};
+static const unsigned int can1_10_pins[] = {48, 49};
+static const unsigned int can1_11_pins[] = {52, 53};
+static const unsigned int uart0_0_pins[] = {10, 11};
+static const unsigned int uart0_1_pins[] = {14, 15};
+static const unsigned int uart0_2_pins[] = {18, 19};
+static const unsigned int uart0_3_pins[] = {22, 23};
+static const unsigned int uart0_4_pins[] = {26, 27};
+static const unsigned int uart0_5_pins[] = {30, 31};
+static const unsigned int uart0_6_pins[] = {34, 35};
+static const unsigned int uart0_7_pins[] = {38, 39};
+static const unsigned int uart0_8_pins[] = {42, 43};
+static const unsigned int uart0_9_pins[] = {46, 47};
+static const unsigned int uart0_10_pins[] = {50, 51};
+static const unsigned int uart1_0_pins[] = {8, 9};
+static const unsigned int uart1_1_pins[] = {12, 13};
+static const unsigned int uart1_2_pins[] = {16, 17};
+static const unsigned int uart1_3_pins[] = {20, 21};
+static const unsigned int uart1_4_pins[] = {24, 25};
+static const unsigned int uart1_5_pins[] = {28, 29};
+static const unsigned int uart1_6_pins[] = {32, 33};
+static const unsigned int uart1_7_pins[] = {36, 37};
+static const unsigned int uart1_8_pins[] = {40, 41};
+static const unsigned int uart1_9_pins[] = {44, 45};
+static const unsigned int uart1_10_pins[] = {48, 49};
+static const unsigned int uart1_11_pins[] = {52, 53};
+static const unsigned int i2c0_0_pins[] = {10, 11};
+static const unsigned int i2c0_1_pins[] = {14, 15};
+static const unsigned int i2c0_2_pins[] = {18, 19};
+static const unsigned int i2c0_3_pins[] = {22, 23};
+static const unsigned int i2c0_4_pins[] = {26, 27};
+static const unsigned int i2c0_5_pins[] = {30, 31};
+static const unsigned int i2c0_6_pins[] = {34, 35};
+static const unsigned int i2c0_7_pins[] = {38, 39};
+static const unsigned int i2c0_8_pins[] = {42, 43};
+static const unsigned int i2c0_9_pins[] = {46, 47};
+static const unsigned int i2c0_10_pins[] = {50, 51};
+static const unsigned int i2c1_0_pins[] = {12, 13};
+static const unsigned int i2c1_1_pins[] = {16, 17};
+static const unsigned int i2c1_2_pins[] = {20, 21};
+static const unsigned int i2c1_3_pins[] = {24, 25};
+static const unsigned int i2c1_4_pins[] = {28, 29};
+static const unsigned int i2c1_5_pins[] = {32, 33};
+static const unsigned int i2c1_6_pins[] = {36, 37};
+static const unsigned int i2c1_7_pins[] = {40, 41};
+static const unsigned int i2c1_8_pins[] = {44, 45};
+static const unsigned int i2c1_9_pins[] = {48, 49};
+static const unsigned int i2c1_10_pins[] = {52, 53};
+static const unsigned int ttc0_0_pins[] = {18, 19};
+static const unsigned int ttc0_1_pins[] = {30, 31};
+static const unsigned int ttc0_2_pins[] = {42, 43};
+static const unsigned int ttc1_0_pins[] = {16, 17};
+static const unsigned int ttc1_1_pins[] = {28, 29};
+static const unsigned int ttc1_2_pins[] = {40, 41};
+static const unsigned int swdt0_0_pins[] = {14, 15};
+static const unsigned int swdt0_1_pins[] = {26, 27};
+static const unsigned int swdt0_2_pins[] = {38, 39};
+static const unsigned int swdt0_3_pins[] = {50, 51};
+static const unsigned int swdt0_4_pins[] = {52, 53};
+static const unsigned int gpio0_0_pins[] = {0};
+static const unsigned int gpio0_1_pins[] = {1};
+static const unsigned int gpio0_2_pins[] = {2};
+static const unsigned int gpio0_3_pins[] = {3};
+static const unsigned int gpio0_4_pins[] = {4};
+static const unsigned int gpio0_5_pins[] = {5};
+static const unsigned int gpio0_6_pins[] = {6};
+static const unsigned int gpio0_7_pins[] = {7};
+static const unsigned int gpio0_8_pins[] = {8};
+static const unsigned int gpio0_9_pins[] = {9};
+static const unsigned int gpio0_10_pins[] = {10};
+static const unsigned int gpio0_11_pins[] = {11};
+static const unsigned int gpio0_12_pins[] = {12};
+static const unsigned int gpio0_13_pins[] = {13};
+static const unsigned int gpio0_14_pins[] = {14};
+static const unsigned int gpio0_15_pins[] = {15};
+static const unsigned int gpio0_16_pins[] = {16};
+static const unsigned int gpio0_17_pins[] = {17};
+static const unsigned int gpio0_18_pins[] = {18};
+static const unsigned int gpio0_19_pins[] = {19};
+static const unsigned int gpio0_20_pins[] = {20};
+static const unsigned int gpio0_21_pins[] = {21};
+static const unsigned int gpio0_22_pins[] = {22};
+static const unsigned int gpio0_23_pins[] = {23};
+static const unsigned int gpio0_24_pins[] = {24};
+static const unsigned int gpio0_25_pins[] = {25};
+static const unsigned int gpio0_26_pins[] = {26};
+static const unsigned int gpio0_27_pins[] = {27};
+static const unsigned int gpio0_28_pins[] = {28};
+static const unsigned int gpio0_29_pins[] = {29};
+static const unsigned int gpio0_30_pins[] = {30};
+static const unsigned int gpio0_31_pins[] = {31};
+static const unsigned int gpio0_32_pins[] = {32};
+static const unsigned int gpio0_33_pins[] = {33};
+static const unsigned int gpio0_34_pins[] = {34};
+static const unsigned int gpio0_35_pins[] = {35};
+static const unsigned int gpio0_36_pins[] = {36};
+static const unsigned int gpio0_37_pins[] = {37};
+static const unsigned int gpio0_38_pins[] = {38};
+static const unsigned int gpio0_39_pins[] = {39};
+static const unsigned int gpio0_40_pins[] = {40};
+static const unsigned int gpio0_41_pins[] = {41};
+static const unsigned int gpio0_42_pins[] = {42};
+static const unsigned int gpio0_43_pins[] = {43};
+static const unsigned int gpio0_44_pins[] = {44};
+static const unsigned int gpio0_45_pins[] = {45};
+static const unsigned int gpio0_46_pins[] = {46};
+static const unsigned int gpio0_47_pins[] = {47};
+static const unsigned int gpio0_48_pins[] = {48};
+static const unsigned int gpio0_49_pins[] = {49};
+static const unsigned int gpio0_50_pins[] = {50};
+static const unsigned int gpio0_51_pins[] = {51};
+static const unsigned int gpio0_52_pins[] = {52};
+static const unsigned int gpio0_53_pins[] = {53};
+static const unsigned int usb0_0_pins[] = {28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39};
+static const unsigned int usb1_0_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51};
+
+#define DEFINE_ZYNQ_PINCTRL_GRP(nm) \
+ { \
+ .name = #nm "_grp", \
+ .pins = nm ## _pins, \
+ .npins = ARRAY_SIZE(nm ## _pins), \
+ }
+
+struct zynq_pctrl_group zynq_pctrl_groups[] = {
+ DEFINE_ZYNQ_PINCTRL_GRP(ethernet0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(ethernet1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(mdio0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(mdio1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(qspi0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(qspi1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(qspi_fbclk),
+ DEFINE_ZYNQ_PINCTRL_GRP(qspi_cs1),
+ DEFINE_ZYNQ_PINCTRL_GRP(spi0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(spi0_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(spi0_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(spi1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(spi1_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(spi1_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(spi1_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio0_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio0_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio1_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio1_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio1_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio0_emio_wp),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio0_emio_cd),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio1_emio_wp),
+ DEFINE_ZYNQ_PINCTRL_GRP(sdio1_emio_cd),
+ DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor),
+ DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor_cs1),
+ DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor_addr25),
+ DEFINE_ZYNQ_PINCTRL_GRP(smc0_nand),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_4),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_5),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_6),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_7),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_8),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_9),
+ DEFINE_ZYNQ_PINCTRL_GRP(can0_10),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_4),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_5),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_6),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_7),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_8),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_9),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_10),
+ DEFINE_ZYNQ_PINCTRL_GRP(can1_11),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_4),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_5),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_6),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_7),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_8),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_9),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart0_10),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_4),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_5),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_6),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_7),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_8),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_9),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_10),
+ DEFINE_ZYNQ_PINCTRL_GRP(uart1_11),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_4),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_5),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_6),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_7),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_8),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_9),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c0_10),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_4),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_5),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_6),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_7),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_8),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_9),
+ DEFINE_ZYNQ_PINCTRL_GRP(i2c1_10),
+ DEFINE_ZYNQ_PINCTRL_GRP(ttc0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(ttc0_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(ttc0_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(ttc1_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(ttc1_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(ttc1_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(swdt0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(swdt0_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(swdt0_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(swdt0_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(swdt0_4),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_1),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_2),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_3),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_4),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_5),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_6),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_7),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_8),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_9),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_10),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_11),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_12),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_13),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_14),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_15),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_16),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_17),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_18),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_19),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_20),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_21),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_22),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_23),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_24),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_25),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_26),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_27),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_28),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_29),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_30),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_31),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_32),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_33),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_34),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_35),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_36),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_37),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_38),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_39),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_40),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_41),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_42),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_43),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_44),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_45),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_46),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_47),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_48),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_49),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_50),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_51),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_52),
+ DEFINE_ZYNQ_PINCTRL_GRP(gpio0_53),
+ DEFINE_ZYNQ_PINCTRL_GRP(usb0_0),
+ DEFINE_ZYNQ_PINCTRL_GRP(usb1_0),
+};
+
+/* function groups */
+static const char * const ethernet0_groups[] = {"ethernet0_0_grp"};
+static const char * const ethernet1_groups[] = {"ethernet1_0_grp"};
+static const char * const usb0_groups[] = {"usb0_0_grp"};
+static const char * const usb1_groups[] = {"usb1_0_grp"};
+static const char * const mdio0_groups[] = {"mdio0_0_grp"};
+static const char * const mdio1_groups[] = {"mdio1_0_grp"};
+static const char * const qspi0_groups[] = {"qspi0_0_grp"};
+static const char * const qspi1_groups[] = {"qspi0_1_grp"};
+static const char * const qspi_fbclk_groups[] = {"qspi_fbclk_grp"};
+static const char * const qspi_cs1_groups[] = {"qspi_cs1_grp"};
+static const char * const spi0_groups[] = {"spi0_0_grp", "spi0_1_grp",
+ "spi0_2_grp"};
+static const char * const spi1_groups[] = {"spi1_0_grp", "spi1_1_grp",
+ "spi1_2_grp", "spi1_3_grp"};
+static const char * const sdio0_groups[] = {"sdio0_0_grp", "sdio0_1_grp",
+ "sdio0_2_grp"};
+static const char * const sdio1_groups[] = {"sdio1_0_grp", "sdio1_1_grp",
+ "sdio1_2_grp", "sdio1_3_grp"};
+static const char * const sdio0_pc_groups[] = {"gpio0_0_grp",
+ "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp",
+ "gpio0_8_grp", "gpio0_10_grp", "gpio0_12_grp",
+ "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp",
+ "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp",
+ "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp",
+ "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp",
+ "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp",
+ "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp",
+ "gpio0_50_grp", "gpio0_52_grp"};
+static const char * const sdio1_pc_groups[] = {"gpio0_1_grp",
+ "gpio0_3_grp", "gpio0_5_grp", "gpio0_7_grp",
+ "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp",
+ "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp",
+ "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp",
+ "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp",
+ "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp",
+ "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
+ "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
+ "gpio0_51_grp", "gpio0_53_grp"};
+static const char * const sdio0_cd_groups[] = {"gpio0_0_grp",
+ "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp",
+ "gpio0_10_grp", "gpio0_12_grp",
+ "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp",
+ "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp",
+ "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp",
+ "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp",
+ "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp",
+ "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp",
+ "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp",
+ "gpio0_3_grp", "gpio0_5_grp",
+ "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp",
+ "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp",
+ "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp",
+ "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp",
+ "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp",
+ "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
+ "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
+ "gpio0_51_grp", "gpio0_53_grp", "sdio0_emio_cd_grp"};
+static const char * const sdio0_wp_groups[] = {"gpio0_0_grp",
+ "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp",
+ "gpio0_10_grp", "gpio0_12_grp",
+ "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp",
+ "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp",
+ "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp",
+ "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp",
+ "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp",
+ "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp",
+ "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp",
+ "gpio0_3_grp", "gpio0_5_grp",
+ "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp",
+ "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp",
+ "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp",
+ "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp",
+ "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp",
+ "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
+ "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
+ "gpio0_51_grp", "gpio0_53_grp", "sdio0_emio_wp_grp"};
+static const char * const sdio1_cd_groups[] = {"gpio0_0_grp",
+ "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp",
+ "gpio0_10_grp", "gpio0_12_grp",
+ "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp",
+ "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp",
+ "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp",
+ "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp",
+ "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp",
+ "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp",
+ "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp",
+ "gpio0_3_grp", "gpio0_5_grp",
+ "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp",
+ "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp",
+ "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp",
+ "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp",
+ "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp",
+ "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
+ "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
+ "gpio0_51_grp", "gpio0_53_grp", "sdio1_emio_cd_grp"};
+static const char * const sdio1_wp_groups[] = {"gpio0_0_grp",
+ "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp",
+ "gpio0_10_grp", "gpio0_12_grp",
+ "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp",
+ "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp",
+ "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp",
+ "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp",
+ "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp",
+ "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp",
+ "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp",
+ "gpio0_3_grp", "gpio0_5_grp",
+ "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp",
+ "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp",
+ "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp",
+ "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp",
+ "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp",
+ "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
+ "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
+ "gpio0_51_grp", "gpio0_53_grp", "sdio1_emio_wp_grp"};
+static const char * const smc0_nor_groups[] = {"smc0_nor"};
+static const char * const smc0_nor_cs1_groups[] = {"smc0_nor_cs1_grp"};
+static const char * const smc0_nor_addr25_groups[] = {"smc0_nor_addr25_grp"};
+static const char * const smc0_nand_groups[] = {"smc0_nand"};
+static const char * const can0_groups[] = {"can0_0_grp", "can0_1_grp",
+ "can0_2_grp", "can0_3_grp", "can0_4_grp", "can0_5_grp",
+ "can0_6_grp", "can0_7_grp", "can0_8_grp", "can0_9_grp",
+ "can0_10_grp"};
+static const char * const can1_groups[] = {"can1_0_grp", "can1_1_grp",
+ "can1_2_grp", "can1_3_grp", "can1_4_grp", "can1_5_grp",
+ "can1_6_grp", "can1_7_grp", "can1_8_grp", "can1_9_grp",
+ "can1_10_grp", "can1_11_grp"};
+static const char * const uart0_groups[] = {"uart0_0_grp", "uart0_1_grp",
+ "uart0_2_grp", "uart0_3_grp", "uart0_4_grp", "uart0_5_grp",
+ "uart0_6_grp", "uart0_7_grp", "uart0_8_grp", "uart0_9_grp",
+ "uart0_10_grp"};
+static const char * const uart1_groups[] = {"uart1_0_grp", "uart1_1_grp",
+ "uart1_2_grp", "uart1_3_grp", "uart1_4_grp", "uart1_5_grp",
+ "uart1_6_grp", "uart1_7_grp", "uart1_8_grp", "uart1_9_grp",
+ "uart1_10_grp", "uart1_11_grp"};
+static const char * const i2c0_groups[] = {"i2c0_0_grp", "i2c0_1_grp",
+ "i2c0_2_grp", "i2c0_3_grp", "i2c0_4_grp", "i2c0_5_grp",
+ "i2c0_6_grp", "i2c0_7_grp", "i2c0_8_grp", "i2c0_9_grp",
+ "i2c0_10_grp"};
+static const char * const i2c1_groups[] = {"i2c1_0_grp", "i2c1_1_grp",
+ "i2c1_2_grp", "i2c1_3_grp", "i2c1_4_grp", "i2c1_5_grp",
+ "i2c1_6_grp", "i2c1_7_grp", "i2c1_8_grp", "i2c1_9_grp",
+ "i2c1_10_grp"};
+static const char * const ttc0_groups[] = {"ttc0_0_grp", "ttc0_1_grp",
+ "ttc0_2_grp"};
+static const char * const ttc1_groups[] = {"ttc1_0_grp", "ttc1_1_grp",
+ "ttc1_2_grp"};
+static const char * const swdt0_groups[] = {"swdt0_0_grp", "swdt0_1_grp",
+ "swdt0_2_grp", "swdt0_3_grp", "swdt0_4_grp"};
+static const char * const gpio0_groups[] = {"gpio0_0_grp",
+ "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp",
+ "gpio0_8_grp", "gpio0_10_grp", "gpio0_12_grp",
+ "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp",
+ "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp",
+ "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp",
+ "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp",
+ "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp",
+ "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp",
+ "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp",
+ "gpio0_3_grp", "gpio0_5_grp", "gpio0_7_grp",
+ "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp",
+ "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp",
+ "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp",
+ "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp",
+ "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp",
+ "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
+ "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
+ "gpio0_51_grp", "gpio0_53_grp"};
+
+#define DEFINE_ZYNQ_PINMUX_FUNCTION(fname, mval) \
+ [ZYNQ_PMUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ .mux_val = mval, \
+ }
+
+#define DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(fname, mval, mux, mask, shift) \
+ [ZYNQ_PMUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ .mux_val = mval, \
+ .mux_mask = mask, \
+ .mux_shift = shift, \
+ }
+
+#define ZYNQ_SDIO_WP_SHIFT 0
+#define ZYNQ_SDIO_WP_MASK (0x3f << ZYNQ_SDIO_WP_SHIFT)
+#define ZYNQ_SDIO_CD_SHIFT 16
+#define ZYNQ_SDIO_CD_MASK (0x3f << ZYNQ_SDIO_CD_SHIFT)
+
+static const struct zynq_pinmux_function zynq_pmux_functions[] = {
+ DEFINE_ZYNQ_PINMUX_FUNCTION(ethernet0, 1),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(ethernet1, 1),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(usb0, 2),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(usb1, 2),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(mdio0, 0x40),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(mdio1, 0x50),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(qspi0, 1),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(qspi1, 1),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(qspi_fbclk, 1),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(qspi_cs1, 1),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(spi0, 0x50),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(spi1, 0x50),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0, 0x40),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0_pc, 0xc),
+ DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_wp, 0, 130, ZYNQ_SDIO_WP_MASK,
+ ZYNQ_SDIO_WP_SHIFT),
+ DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_cd, 0, 130, ZYNQ_SDIO_CD_MASK,
+ ZYNQ_SDIO_CD_SHIFT),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1, 0x40),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1_pc, 0xc),
+ DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_wp, 0, 134, ZYNQ_SDIO_WP_MASK,
+ ZYNQ_SDIO_WP_SHIFT),
+ DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_cd, 0, 134, ZYNQ_SDIO_CD_MASK,
+ ZYNQ_SDIO_CD_SHIFT),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor, 4),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor_cs1, 8),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor_addr25, 4),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nand, 8),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(can0, 0x10),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(can1, 0x10),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(uart0, 0x70),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(uart1, 0x70),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(i2c0, 0x20),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(i2c1, 0x20),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(ttc0, 0x60),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(ttc1, 0x60),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(swdt0, 0x30),
+ DEFINE_ZYNQ_PINMUX_FUNCTION(gpio0, 0),
+};
+
+
+/* pinctrl */
+static int zynq_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->ngroups;
+}
+
+static const char *zynq_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->groups[selector].name;
+}
+
+static int zynq_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->groups[selector].pins;
+ *num_pins = pctrl->groups[selector].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops zynq_pctrl_ops = {
+ .get_groups_count = zynq_pctrl_get_groups_count,
+ .get_group_name = zynq_pctrl_get_group_name,
+ .get_group_pins = zynq_pctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+/* pinmux */
+static int zynq_pmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->nfuncs;
+}
+
+static const char *zynq_pmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->funcs[selector].name;
+}
+
+static int zynq_pmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->funcs[selector].groups;
+ *num_groups = pctrl->funcs[selector].ngroups;
+ return 0;
+}
+
+static int zynq_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ int i, ret;
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynq_pctrl_group *pgrp = &pctrl->groups[group];
+ const struct zynq_pinmux_function *func = &pctrl->funcs[function];
+
+ /*
+ * SD WP & CD are special. They have dedicated registers
+ * to mux them in
+ */
+ if (function == ZYNQ_PMUX_sdio0_cd || function == ZYNQ_PMUX_sdio0_wp ||
+ function == ZYNQ_PMUX_sdio1_cd ||
+ function == ZYNQ_PMUX_sdio1_wp) {
+ u32 reg;
+
+ ret = regmap_read(pctrl->syscon,
+ pctrl->pctrl_offset + func->mux, &reg);
+ if (ret)
+ return ret;
+
+ reg &= ~func->mux_mask;
+ reg |= pgrp->pins[0] << func->mux_shift;
+ ret = regmap_write(pctrl->syscon,
+ pctrl->pctrl_offset + func->mux, reg);
+ if (ret)
+ return ret;
+ } else {
+ for (i = 0; i < pgrp->npins; i++) {
+ unsigned int pin = pgrp->pins[i];
+ u32 reg, addr = pctrl->pctrl_offset + (4 * pin);
+
+ ret = regmap_read(pctrl->syscon, addr, &reg);
+ if (ret)
+ return ret;
+
+ reg &= ~ZYNQ_PINMUX_MUX_MASK;
+ reg |= func->mux_val << ZYNQ_PINMUX_MUX_SHIFT;
+ ret = regmap_write(pctrl->syscon, addr, reg);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops zynq_pinmux_ops = {
+ .get_functions_count = zynq_pmux_get_functions_count,
+ .get_function_name = zynq_pmux_get_function_name,
+ .get_function_groups = zynq_pmux_get_function_groups,
+ .set_mux = zynq_pinmux_set_mux,
+};
+
+/* pinconfig */
+#define ZYNQ_PINCONF_TRISTATE BIT(0)
+#define ZYNQ_PINCONF_SPEED BIT(8)
+#define ZYNQ_PINCONF_PULLUP BIT(12)
+#define ZYNQ_PINCONF_DISABLE_RECVR BIT(13)
+
+#define ZYNQ_PINCONF_IOTYPE_SHIFT 9
+#define ZYNQ_PINCONF_IOTYPE_MASK (7 << ZYNQ_PINCONF_IOTYPE_SHIFT)
+
+enum zynq_io_standards {
+ zynq_iostd_min,
+ zynq_iostd_lvcmos18,
+ zynq_iostd_lvcmos25,
+ zynq_iostd_lvcmos33,
+ zynq_iostd_hstl,
+ zynq_iostd_max
+};
+
+/**
+ * enum zynq_pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
+ * this parameter (on a custom format) tells the driver which alternative
+ * IO standard to use.
+ */
+enum zynq_pin_config_param {
+ PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1,
+};
+
+static const struct pinconf_generic_params zynq_dt_params[] = {
+ {"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item zynq_conf_items[ARRAY_SIZE(zynq_dt_params)] = {
+ PCONFDUMP(PIN_CONFIG_IOSTANDARD, "IO-standard", NULL, true),
+};
+#endif
+
+static unsigned int zynq_pinconf_iostd_get(u32 reg)
+{
+ return (reg & ZYNQ_PINCONF_IOTYPE_MASK) >> ZYNQ_PINCONF_IOTYPE_SHIFT;
+}
+
+static int zynq_pinconf_cfg_get(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *config)
+{
+ u32 reg;
+ int ret;
+ unsigned int arg = 0;
+ unsigned int param = pinconf_to_config_param(*config);
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ if (pin >= ZYNQ_NUM_MIOS)
+ return -ENOTSUPP;
+
+ ret = regmap_read(pctrl->syscon, pctrl->pctrl_offset + (4 * pin), &reg);
+ if (ret)
+ return -EIO;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (!(reg & ZYNQ_PINCONF_PULLUP))
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ if (!(reg & ZYNQ_PINCONF_TRISTATE))
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (reg & ZYNQ_PINCONF_PULLUP || reg & ZYNQ_PINCONF_TRISTATE)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ arg = !!(reg & ZYNQ_PINCONF_SPEED);
+ break;
+ case PIN_CONFIG_LOW_POWER_MODE:
+ {
+ enum zynq_io_standards iostd = zynq_pinconf_iostd_get(reg);
+
+ if (iostd != zynq_iostd_hstl)
+ return -EINVAL;
+ if (!(reg & ZYNQ_PINCONF_DISABLE_RECVR))
+ return -EINVAL;
+ arg = !!(reg & ZYNQ_PINCONF_DISABLE_RECVR);
+ break;
+ }
+ case PIN_CONFIG_IOSTANDARD:
+ arg = zynq_pinconf_iostd_get(reg);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int zynq_pinconf_cfg_set(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ int i, ret;
+ u32 reg;
+ u32 pullup = 0;
+ u32 tristate = 0;
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ if (pin >= ZYNQ_NUM_MIOS)
+ return -ENOTSUPP;
+
+ ret = regmap_read(pctrl->syscon, pctrl->pctrl_offset + (4 * pin), &reg);
+ if (ret)
+ return -EIO;
+
+ for (i = 0; i < num_configs; i++) {
+ unsigned int param = pinconf_to_config_param(configs[i]);
+ unsigned int arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pullup = ZYNQ_PINCONF_PULLUP;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ tristate = ZYNQ_PINCONF_TRISTATE;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ reg &= ~(ZYNQ_PINCONF_PULLUP | ZYNQ_PINCONF_TRISTATE);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (arg)
+ reg |= ZYNQ_PINCONF_SPEED;
+ else
+ reg &= ~ZYNQ_PINCONF_SPEED;
+
+ break;
+ case PIN_CONFIG_IOSTANDARD:
+ if (arg <= zynq_iostd_min || arg >= zynq_iostd_max) {
+ dev_warn(pctldev->dev,
+ "unsupported IO standard '%u'\n",
+ param);
+ break;
+ }
+ reg &= ~ZYNQ_PINCONF_IOTYPE_MASK;
+ reg |= arg << ZYNQ_PINCONF_IOTYPE_SHIFT;
+ break;
+ case PIN_CONFIG_LOW_POWER_MODE:
+ if (arg)
+ reg |= ZYNQ_PINCONF_DISABLE_RECVR;
+ else
+ reg &= ~ZYNQ_PINCONF_DISABLE_RECVR;
+
+ break;
+ default:
+ dev_warn(pctldev->dev,
+ "unsupported configuration parameter '%u'\n",
+ param);
+ continue;
+ }
+ }
+
+ if (tristate || pullup) {
+ reg &= ~(ZYNQ_PINCONF_PULLUP | ZYNQ_PINCONF_TRISTATE);
+ reg |= tristate | pullup;
+ }
+
+ ret = regmap_write(pctrl->syscon, pctrl->pctrl_offset + (4 * pin), reg);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+static int zynq_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ int i, ret;
+ struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynq_pctrl_group *pgrp = &pctrl->groups[selector];
+
+ for (i = 0; i < pgrp->npins; i++) {
+ ret = zynq_pinconf_cfg_set(pctldev, pgrp->pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops zynq_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = zynq_pinconf_cfg_get,
+ .pin_config_set = zynq_pinconf_cfg_set,
+ .pin_config_group_set = zynq_pinconf_group_set,
+};
+
+static struct pinctrl_desc zynq_desc = {
+ .name = "zynq_pinctrl",
+ .pins = zynq_pins,
+ .npins = ARRAY_SIZE(zynq_pins),
+ .pctlops = &zynq_pctrl_ops,
+ .pmxops = &zynq_pinmux_ops,
+ .confops = &zynq_pinconf_ops,
+ .num_custom_params = ARRAY_SIZE(zynq_dt_params),
+ .custom_params = zynq_dt_params,
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = zynq_conf_items,
+#endif
+ .owner = THIS_MODULE,
+};
+
+static int zynq_pinctrl_probe(struct platform_device *pdev)
+
+{
+ struct resource *res;
+ struct zynq_pinctrl *pctrl;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(pctrl->syscon)) {
+ dev_err(&pdev->dev, "unable to get syscon\n");
+ return PTR_ERR(pctrl->syscon);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing IO resource\n");
+ return -ENODEV;
+ }
+ pctrl->pctrl_offset = res->start;
+
+ pctrl->groups = zynq_pctrl_groups;
+ pctrl->ngroups = ARRAY_SIZE(zynq_pctrl_groups);
+ pctrl->funcs = zynq_pmux_functions;
+ pctrl->nfuncs = ARRAY_SIZE(zynq_pmux_functions);
+
+ pctrl->pctrl = pinctrl_register(&zynq_desc, &pdev->dev, pctrl);
+ if (!pctrl->pctrl)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pctrl);
+
+ dev_info(&pdev->dev, "zynq pinctrl initialized\n");
+
+ return 0;
+}
+
+int zynq_pinctrl_remove(struct platform_device *pdev)
+{
+ struct zynq_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_pinctrl_of_match[] = {
+ { .compatible = "xlnx,pinctrl-zynq" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, zynq_pinctrl_of_match);
+
+static struct platform_driver zynq_pinctrl_driver = {
+ .driver = {
+ .name = "zynq-pinctrl",
+ .of_match_table = zynq_pinctrl_of_match,
+ },
+ .probe = zynq_pinctrl_probe,
+ .remove = zynq_pinctrl_remove,
+};
+
+module_platform_driver(zynq_pinctrl_driver);
+
+MODULE_AUTHOR("Sören Brinkmann <soren.brinkmann@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Zynq pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 3cd243c26b7d..ea575f60f001 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -47,6 +47,14 @@ config PINCTRL_MSM8X74
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8974 platform.
+config PINCTRL_MSM8916
+ tristate "Qualcomm 8916 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm 8916 platform.
+
config PINCTRL_QCOM_SPMI_PMIC
tristate "Qualcomm SPMI PMIC pin controller driver"
depends on GPIOLIB && OF && SPMI
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index bfd79af5f982..68958702917d 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
+obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e730935fa457..a535f9c23678 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -204,21 +204,6 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
return 0;
}
-static int msm_config_get(struct pinctrl_dev *pctldev,
- unsigned int pin,
- unsigned long *config)
-{
- dev_err(pctldev->dev, "pin_config_set op not supported\n");
- return -ENOTSUPP;
-}
-
-static int msm_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *configs, unsigned num_configs)
-{
- dev_err(pctldev->dev, "pin_config_set op not supported\n");
- return -ENOTSUPP;
-}
-
#define MSM_NO_PULL 0
#define MSM_PULL_DOWN 1
#define MSM_KEEPER 2
@@ -372,8 +357,6 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
}
static const struct pinconf_ops msm_pinconf_ops = {
- .pin_config_get = msm_config_get,
- .pin_config_set = msm_config_set,
.pin_config_group_get = msm_config_group_get,
.pin_config_group_set = msm_config_group_set,
};
@@ -865,10 +848,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
{
- int i = 0;
+ int i;
const struct msm_function *func = pctrl->soc->functions;
- for (; i <= pctrl->soc->nfunctions; i++)
+ for (i = 0; i < pctrl->soc->nfunctions; i++)
if (!strcmp(func[i].name, "ps_hold")) {
pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
pctrl->restart_nb.priority = 128;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index b952c4b4a8e9..54fdd04ce9d5 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -70,11 +70,11 @@ struct msm_pingroup {
unsigned *funcs;
unsigned nfuncs;
- s16 ctl_reg;
- s16 io_reg;
- s16 intr_cfg_reg;
- s16 intr_status_reg;
- s16 intr_target_reg;
+ u32 ctl_reg;
+ u32 io_reg;
+ u32 intr_cfg_reg;
+ u32 intr_status_reg;
+ u32 intr_target_reg;
unsigned mux_bit:5;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
new file mode 100644
index 000000000000..20ebf244e80d
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -0,0 +1,1005 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc msm8916_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "SDC1_CLK"),
+ PINCTRL_PIN(123, "SDC1_CMD"),
+ PINCTRL_PIN(124, "SDC1_DATA"),
+ PINCTRL_PIN(125, "SDC2_CLK"),
+ PINCTRL_PIN(126, "SDC2_CMD"),
+ PINCTRL_PIN(127, "SDC2_DATA"),
+ PINCTRL_PIN(128, "QDSD_CLK"),
+ PINCTRL_PIN(129, "QDSD_CMD"),
+ PINCTRL_PIN(130, "QDSD_DATA0"),
+ PINCTRL_PIN(131, "QDSD_DATA1"),
+ PINCTRL_PIN(132, "QDSD_DATA2"),
+ PINCTRL_PIN(133, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+
+static const unsigned int sdc1_clk_pins[] = { 122 };
+static const unsigned int sdc1_cmd_pins[] = { 123 };
+static const unsigned int sdc1_data_pins[] = { 124 };
+static const unsigned int sdc2_clk_pins[] = { 125 };
+static const unsigned int sdc2_cmd_pins[] = { 126 };
+static const unsigned int sdc2_data_pins[] = { 127 };
+static const unsigned int qdsd_clk_pins[] = { 128 };
+static const unsigned int qdsd_cmd_pins[] = { 129 };
+static const unsigned int qdsd_data0_pins[] = { 130 };
+static const unsigned int qdsd_data1_pins[] = { 131 };
+static const unsigned int qdsd_data2_pins[] = { 132 };
+static const unsigned int qdsd_data3_pins[] = { 133 };
+
+#define FUNCTION(fname) \
+ [MSM_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ MSM_MUX_gpio, \
+ MSM_MUX_##f1, \
+ MSM_MUX_##f2, \
+ MSM_MUX_##f3, \
+ MSM_MUX_##f4, \
+ MSM_MUX_##f5, \
+ MSM_MUX_##f6, \
+ MSM_MUX_##f7, \
+ MSM_MUX_##f8, \
+ MSM_MUX_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+enum msm8916_functions {
+ MSM_MUX_adsp_ext,
+ MSM_MUX_alsp_int,
+ MSM_MUX_atest_bbrx0,
+ MSM_MUX_atest_bbrx1,
+ MSM_MUX_atest_char,
+ MSM_MUX_atest_char0,
+ MSM_MUX_atest_char1,
+ MSM_MUX_atest_char2,
+ MSM_MUX_atest_char3,
+ MSM_MUX_atest_combodac,
+ MSM_MUX_atest_gpsadc0,
+ MSM_MUX_atest_gpsadc1,
+ MSM_MUX_atest_tsens,
+ MSM_MUX_atest_wlan0,
+ MSM_MUX_atest_wlan1,
+ MSM_MUX_backlight_en,
+ MSM_MUX_bimc_dte0,
+ MSM_MUX_bimc_dte1,
+ MSM_MUX_blsp_i2c1,
+ MSM_MUX_blsp_i2c2,
+ MSM_MUX_blsp_i2c3,
+ MSM_MUX_blsp_i2c4,
+ MSM_MUX_blsp_i2c5,
+ MSM_MUX_blsp_i2c6,
+ MSM_MUX_blsp_spi1,
+ MSM_MUX_blsp_spi1_cs1,
+ MSM_MUX_blsp_spi1_cs2,
+ MSM_MUX_blsp_spi1_cs3,
+ MSM_MUX_blsp_spi2,
+ MSM_MUX_blsp_spi2_cs1,
+ MSM_MUX_blsp_spi2_cs2,
+ MSM_MUX_blsp_spi2_cs3,
+ MSM_MUX_blsp_spi3,
+ MSM_MUX_blsp_spi3_cs1,
+ MSM_MUX_blsp_spi3_cs2,
+ MSM_MUX_blsp_spi3_cs3,
+ MSM_MUX_blsp_spi4,
+ MSM_MUX_blsp_spi5,
+ MSM_MUX_blsp_spi6,
+ MSM_MUX_blsp_uart1,
+ MSM_MUX_blsp_uart2,
+ MSM_MUX_blsp_uim1,
+ MSM_MUX_blsp_uim2,
+ MSM_MUX_cam1_rst,
+ MSM_MUX_cam1_standby,
+ MSM_MUX_cam_mclk0,
+ MSM_MUX_cam_mclk1,
+ MSM_MUX_cci_async,
+ MSM_MUX_cci_i2c,
+ MSM_MUX_cci_timer0,
+ MSM_MUX_cci_timer1,
+ MSM_MUX_cci_timer2,
+ MSM_MUX_cdc_pdm0,
+ MSM_MUX_codec_mad,
+ MSM_MUX_dbg_out,
+ MSM_MUX_display_5v,
+ MSM_MUX_dmic0_clk,
+ MSM_MUX_dmic0_data,
+ MSM_MUX_dsi_rst,
+ MSM_MUX_ebi0_wrcdc,
+ MSM_MUX_euro_us,
+ MSM_MUX_ext_lpass,
+ MSM_MUX_flash_strobe,
+ MSM_MUX_gcc_gp1_clk_a,
+ MSM_MUX_gcc_gp1_clk_b,
+ MSM_MUX_gcc_gp2_clk_a,
+ MSM_MUX_gcc_gp2_clk_b,
+ MSM_MUX_gcc_gp3_clk_a,
+ MSM_MUX_gcc_gp3_clk_b,
+ MSM_MUX_gpio,
+ MSM_MUX_gsm0_tx0,
+ MSM_MUX_gsm0_tx1,
+ MSM_MUX_gsm1_tx0,
+ MSM_MUX_gsm1_tx1,
+ MSM_MUX_gyro_accl,
+ MSM_MUX_kpsns0,
+ MSM_MUX_kpsns1,
+ MSM_MUX_kpsns2,
+ MSM_MUX_ldo_en,
+ MSM_MUX_ldo_update,
+ MSM_MUX_mag_int,
+ MSM_MUX_mdp_vsync,
+ MSM_MUX_modem_tsync,
+ MSM_MUX_m_voc,
+ MSM_MUX_nav_pps,
+ MSM_MUX_nav_tsync,
+ MSM_MUX_pa_indicator,
+ MSM_MUX_pbs0,
+ MSM_MUX_pbs1,
+ MSM_MUX_pbs2,
+ MSM_MUX_pri_mi2s,
+ MSM_MUX_pri_mi2s_ws,
+ MSM_MUX_prng_rosc,
+ MSM_MUX_pwr_crypto_enabled_a,
+ MSM_MUX_pwr_crypto_enabled_b,
+ MSM_MUX_pwr_modem_enabled_a,
+ MSM_MUX_pwr_modem_enabled_b,
+ MSM_MUX_pwr_nav_enabled_a,
+ MSM_MUX_pwr_nav_enabled_b,
+ MSM_MUX_qdss_ctitrig_in_a0,
+ MSM_MUX_qdss_ctitrig_in_a1,
+ MSM_MUX_qdss_ctitrig_in_b0,
+ MSM_MUX_qdss_ctitrig_in_b1,
+ MSM_MUX_qdss_ctitrig_out_a0,
+ MSM_MUX_qdss_ctitrig_out_a1,
+ MSM_MUX_qdss_ctitrig_out_b0,
+ MSM_MUX_qdss_ctitrig_out_b1,
+ MSM_MUX_qdss_traceclk_a,
+ MSM_MUX_qdss_traceclk_b,
+ MSM_MUX_qdss_tracectl_a,
+ MSM_MUX_qdss_tracectl_b,
+ MSM_MUX_qdss_tracedata_a,
+ MSM_MUX_qdss_tracedata_b,
+ MSM_MUX_reset_n,
+ MSM_MUX_sd_card,
+ MSM_MUX_sd_write,
+ MSM_MUX_sec_mi2s,
+ MSM_MUX_smb_int,
+ MSM_MUX_ssbi_wtr0,
+ MSM_MUX_ssbi_wtr1,
+ MSM_MUX_uim1,
+ MSM_MUX_uim2,
+ MSM_MUX_uim3,
+ MSM_MUX_uim_batt,
+ MSM_MUX_wcss_bt,
+ MSM_MUX_wcss_fm,
+ MSM_MUX_wcss_wlan,
+ MSM_MUX_webcam1_rst,
+ MSM_MUX_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121"
+};
+static const char * const adsp_ext_groups[] = { "gpio38" };
+static const char * const alsp_int_groups[] = { "gpio113" };
+static const char * const atest_bbrx0_groups[] = { "gpio17" };
+static const char * const atest_bbrx1_groups[] = { "gpio16" };
+static const char * const atest_char_groups[] = { "gpio62" };
+static const char * const atest_char0_groups[] = { "gpio60" };
+static const char * const atest_char1_groups[] = { "gpio59" };
+static const char * const atest_char2_groups[] = { "gpio58" };
+static const char * const atest_char3_groups[] = { "gpio57" };
+static const char * const atest_combodac_groups[] = {
+ "gpio4", "gpio12", "gpio13", "gpio20", "gpio21", "gpio28", "gpio29",
+ "gpio30", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", "gpio44",
+ "gpio45", "gpio46", "gpio47", "gpio48", "gpio69", "gpio107"
+};
+static const char * const atest_gpsadc0_groups[] = { "gpio7" };
+static const char * const atest_gpsadc1_groups[] = { "gpio18" };
+static const char * const atest_tsens_groups[] = { "gpio112" };
+static const char * const atest_wlan0_groups[] = { "gpio22" };
+static const char * const atest_wlan1_groups[] = { "gpio23" };
+static const char * const backlight_en_groups[] = { "gpio98" };
+static const char * const bimc_dte0_groups[] = { "gpio63", "gpio65" };
+static const char * const bimc_dte1_groups[] = { "gpio64", "gpio66" };
+static const char * const blsp_i2c1_groups[] = { "gpio2", "gpio3" };
+static const char * const blsp_i2c2_groups[] = { "gpio6", "gpio7" };
+static const char * const blsp_i2c3_groups[] = { "gpio10", "gpio11" };
+static const char * const blsp_i2c4_groups[] = { "gpio14", "gpio15" };
+static const char * const blsp_i2c5_groups[] = { "gpio18", "gpio19" };
+static const char * const blsp_i2c6_groups[] = { "gpio22", "gpio23" };
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_spi1_cs1_groups[] = { "gpio110" };
+static const char * const blsp_spi1_cs2_groups[] = { "gpio16" };
+static const char * const blsp_spi1_cs3_groups[] = { "gpio4" };
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_spi2_cs1_groups[] = { "gpio121" };
+static const char * const blsp_spi2_cs2_groups[] = { "gpio17" };
+static const char * const blsp_spi2_cs3_groups[] = { "gpio5" };
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+static const char * const blsp_spi3_cs1_groups[] = { "gpio120" };
+static const char * const blsp_spi3_cs2_groups[] = { "gpio37" };
+static const char * const blsp_spi3_cs3_groups[] = { "gpio69" };
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15"
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19"
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23"
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_uim1_groups[] = { "gpio0", "gpio1" };
+static const char * const blsp_uim2_groups[] = { "gpio4", "gpio5" };
+static const char * const cam1_rst_groups[] = { "gpio35" };
+static const char * const cam1_standby_groups[] = { "gpio34" };
+static const char * const cam_mclk0_groups[] = { "gpio26" };
+static const char * const cam_mclk1_groups[] = { "gpio27" };
+static const char * const cci_async_groups[] = { "gpio33" };
+static const char * const cci_i2c_groups[] = { "gpio29", "gpio30" };
+static const char * const cci_timer0_groups[] = { "gpio31" };
+static const char * const cci_timer1_groups[] = { "gpio32" };
+static const char * const cci_timer2_groups[] = { "gpio38" };
+static const char * const cdc_pdm0_groups[] = {
+ "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"
+};
+static const char * const codec_mad_groups[] = { "gpio16" };
+static const char * const dbg_out_groups[] = { "gpio47" };
+static const char * const display_5v_groups[] = { "gpio97" };
+static const char * const dmic0_clk_groups[] = { "gpio0" };
+static const char * const dmic0_data_groups[] = { "gpio1" };
+static const char * const dsi_rst_groups[] = { "gpio25" };
+static const char * const ebi0_wrcdc_groups[] = { "gpio67" };
+static const char * const euro_us_groups[] = { "gpio120" };
+static const char * const ext_lpass_groups[] = { "gpio45" };
+static const char * const flash_strobe_groups[] = { "gpio31", "gpio32" };
+static const char * const gcc_gp1_clk_a_groups[] = { "gpio49" };
+static const char * const gcc_gp1_clk_b_groups[] = { "gpio97" };
+static const char * const gcc_gp2_clk_a_groups[] = { "gpio50" };
+static const char * const gcc_gp2_clk_b_groups[] = { "gpio12" };
+static const char * const gcc_gp3_clk_a_groups[] = { "gpio51" };
+static const char * const gcc_gp3_clk_b_groups[] = { "gpio13" };
+static const char * const gsm0_tx0_groups[] = { "gpio99" };
+static const char * const gsm0_tx1_groups[] = { "gpio100" };
+static const char * const gsm1_tx0_groups[] = { "gpio101" };
+static const char * const gsm1_tx1_groups[] = { "gpio102" };
+static const char * const gyro_accl_groups[] = {"gpio115" };
+static const char * const kpsns0_groups[] = { "gpio107" };
+static const char * const kpsns1_groups[] = { "gpio108" };
+static const char * const kpsns2_groups[] = { "gpio109" };
+static const char * const ldo_en_groups[] = { "gpio121" };
+static const char * const ldo_update_groups[] = { "gpio120" };
+static const char * const mag_int_groups[] = { "gpio69" };
+static const char * const mdp_vsync_groups[] = { "gpio24", "gpio25" };
+static const char * const modem_tsync_groups[] = { "gpio95" };
+static const char * const m_voc_groups[] = { "gpio8", "gpio119" };
+static const char * const nav_pps_groups[] = { "gpio95" };
+static const char * const nav_tsync_groups[] = { "gpio95" };
+static const char * const pa_indicator_groups[] = { "gpio86" };
+static const char * const pbs0_groups[] = { "gpio107" };
+static const char * const pbs1_groups[] = { "gpio108" };
+static const char * const pbs2_groups[] = { "gpio109" };
+static const char * const pri_mi2s_groups[] = {
+ "gpio113", "gpio114", "gpio115", "gpio116"
+};
+static const char * const pri_mi2s_ws_groups[] = { "gpio110" };
+static const char * const prng_rosc_groups[] = { "gpio43" };
+static const char * const pwr_crypto_enabled_a_groups[] = { "gpio35" };
+static const char * const pwr_crypto_enabled_b_groups[] = { "gpio115" };
+static const char * const pwr_modem_enabled_a_groups[] = { "gpio28" };
+static const char * const pwr_modem_enabled_b_groups[] = { "gpio113" };
+static const char * const pwr_nav_enabled_a_groups[] = { "gpio34" };
+static const char * const pwr_nav_enabled_b_groups[] = { "gpio114" };
+static const char * const qdss_ctitrig_in_a0_groups[] = { "gpio20" };
+static const char * const qdss_ctitrig_in_a1_groups[] = { "gpio49" };
+static const char * const qdss_ctitrig_in_b0_groups[] = { "gpio21" };
+static const char * const qdss_ctitrig_in_b1_groups[] = { "gpio50" };
+static const char * const qdss_ctitrig_out_a0_groups[] = { "gpio23" };
+static const char * const qdss_ctitrig_out_a1_groups[] = { "gpio52" };
+static const char * const qdss_ctitrig_out_b0_groups[] = { "gpio22" };
+static const char * const qdss_ctitrig_out_b1_groups[] = { "gpio51" };
+static const char * const qdss_traceclk_a_groups[] = { "gpio46" };
+static const char * const qdss_traceclk_b_groups[] = { "gpio5" };
+static const char * const qdss_tracectl_a_groups[] = { "gpio45" };
+static const char * const qdss_tracectl_b_groups[] = { "gpio4" };
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio47", "gpio48", "gpio62", "gpio69", "gpio112", "gpio113",
+ "gpio114", "gpio115"
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio110", "gpio111",
+ "gpio120", "gpio121"
+};
+static const char * const reset_n_groups[] = { "gpio36" };
+static const char * const sd_card_groups[] = { "gpio38" };
+static const char * const sd_write_groups[] = { "gpio121" };
+static const char * const sec_mi2s_groups[] = {
+ "gpio112", "gpio117", "gpio118", "gpio119"
+};
+static const char * const smb_int_groups[] = { "gpio62" };
+static const char * const ssbi_wtr0_groups[] = { "gpio103", "gpio104" };
+static const char * const ssbi_wtr1_groups[] = { "gpio105", "gpio106" };
+static const char * const uim1_groups[] = {
+ "gpio57", "gpio58", "gpio59", "gpio60"
+};
+
+static const char * const uim2_groups[] = {
+ "gpio53", "gpio54", "gpio55", "gpio56"
+};
+static const char * const uim3_groups[] = {
+ "gpio49", "gpio50", "gpio51", "gpio52"
+};
+static const char * const uim_batt_groups[] = { "gpio61" };
+static const char * const wcss_bt_groups[] = { "gpio39", "gpio47", "gpio48" };
+static const char * const wcss_fm_groups[] = { "gpio45", "gpio46" };
+static const char * const wcss_wlan_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"
+};
+static const char * const webcam1_rst_groups[] = { "gpio28" };
+
+static const struct msm_function msm8916_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(alsp_int),
+ FUNCTION(atest_bbrx0),
+ FUNCTION(atest_bbrx1),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_combodac),
+ FUNCTION(atest_gpsadc0),
+ FUNCTION(atest_gpsadc1),
+ FUNCTION(atest_tsens),
+ FUNCTION(atest_wlan0),
+ FUNCTION(atest_wlan1),
+ FUNCTION(backlight_en),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi1_cs1),
+ FUNCTION(blsp_spi1_cs2),
+ FUNCTION(blsp_spi1_cs3),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi2_cs1),
+ FUNCTION(blsp_spi2_cs2),
+ FUNCTION(blsp_spi2_cs3),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi3_cs1),
+ FUNCTION(blsp_spi3_cs2),
+ FUNCTION(blsp_spi3_cs3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(cam1_rst),
+ FUNCTION(cam1_standby),
+ FUNCTION(cam_mclk0),
+ FUNCTION(cam_mclk1),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(codec_mad),
+ FUNCTION(dbg_out),
+ FUNCTION(display_5v),
+ FUNCTION(dmic0_clk),
+ FUNCTION(dmic0_data),
+ FUNCTION(dsi_rst),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(euro_us),
+ FUNCTION(ext_lpass),
+ FUNCTION(flash_strobe),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(gpio),
+ FUNCTION(gsm0_tx0),
+ FUNCTION(gsm0_tx1),
+ FUNCTION(gsm1_tx0),
+ FUNCTION(gsm1_tx1),
+ FUNCTION(gyro_accl),
+ FUNCTION(kpsns0),
+ FUNCTION(kpsns1),
+ FUNCTION(kpsns2),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(mag_int),
+ FUNCTION(mdp_vsync),
+ FUNCTION(modem_tsync),
+ FUNCTION(m_voc),
+ FUNCTION(nav_pps),
+ FUNCTION(nav_tsync),
+ FUNCTION(pa_indicator),
+ FUNCTION(pbs0),
+ FUNCTION(pbs1),
+ FUNCTION(pbs2),
+ FUNCTION(pri_mi2s),
+ FUNCTION(pri_mi2s_ws),
+ FUNCTION(prng_rosc),
+ FUNCTION(pwr_crypto_enabled_a),
+ FUNCTION(pwr_crypto_enabled_b),
+ FUNCTION(pwr_modem_enabled_a),
+ FUNCTION(pwr_modem_enabled_b),
+ FUNCTION(pwr_nav_enabled_a),
+ FUNCTION(pwr_nav_enabled_b),
+ FUNCTION(qdss_ctitrig_in_a0),
+ FUNCTION(qdss_ctitrig_in_a1),
+ FUNCTION(qdss_ctitrig_in_b0),
+ FUNCTION(qdss_ctitrig_in_b1),
+ FUNCTION(qdss_ctitrig_out_a0),
+ FUNCTION(qdss_ctitrig_out_a1),
+ FUNCTION(qdss_ctitrig_out_b0),
+ FUNCTION(qdss_ctitrig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(reset_n),
+ FUNCTION(sd_card),
+ FUNCTION(sd_write),
+ FUNCTION(sec_mi2s),
+ FUNCTION(smb_int),
+ FUNCTION(ssbi_wtr0),
+ FUNCTION(ssbi_wtr1),
+ FUNCTION(uim1),
+ FUNCTION(uim2),
+ FUNCTION(uim3),
+ FUNCTION(uim_batt),
+ FUNCTION(wcss_bt),
+ FUNCTION(wcss_fm),
+ FUNCTION(wcss_wlan),
+ FUNCTION(webcam1_rst)
+};
+
+static const struct msm_pingroup msm8916_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, blsp_uim1, dmic0_clk, NA, NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, blsp_uim1, dmic0_data, NA, NA, NA, NA, NA),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, blsp_uim2, blsp_spi1_cs3, qdss_tracectl_b, NA, atest_combodac, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, blsp_uim2, blsp_spi2_cs3, qdss_traceclk_b, NA, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, blsp_spi3, m_voc, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(10, blsp_spi3, blsp_i2c3, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(11, blsp_spi3, blsp_i2c3, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, blsp_spi4, gcc_gp2_clk_b, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(13, blsp_spi4, gcc_gp3_clk_b, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(14, blsp_spi4, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, blsp_spi4, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, blsp_spi5, blsp_spi1_cs2, NA, atest_bbrx1, NA, NA, NA, NA, NA),
+ PINGROUP(17, blsp_spi5, blsp_spi2_cs2, NA, atest_bbrx0, NA, NA, NA, NA, NA),
+ PINGROUP(18, blsp_spi5, blsp_i2c5, NA, atest_gpsadc1, NA, NA, NA, NA, NA),
+ PINGROUP(19, blsp_spi5, blsp_i2c5, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, blsp_spi6, NA, NA, NA, NA, NA, NA, qdss_ctitrig_in_a0, NA),
+ PINGROUP(21, blsp_spi6, NA, NA, NA, NA, NA, NA, qdss_ctitrig_in_b0, NA),
+ PINGROUP(22, blsp_spi6, blsp_i2c6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, blsp_spi6, blsp_i2c6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, cam_mclk0, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(27, cam_mclk1, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
+ PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
+ PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(37, blsp_spi3_cs2, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(38, cci_timer2, adsp_ext, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, wcss_bt, qdss_tracedata_a, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(40, wcss_wlan, qdss_tracedata_a, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(41, wcss_wlan, qdss_tracedata_a, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(42, wcss_wlan, qdss_tracedata_a, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(43, wcss_wlan, prng_rosc, qdss_tracedata_a, NA, atest_combodac, NA, NA, NA, NA),
+ PINGROUP(44, wcss_wlan, NA, atest_combodac, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, wcss_fm, ext_lpass, qdss_tracectl_a, NA, atest_combodac, NA, NA, NA, NA),
+ PINGROUP(46, wcss_fm, qdss_traceclk_a, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(47, wcss_bt, dbg_out, qdss_tracedata_a, NA, atest_combodac, NA, NA, NA, NA),
+ PINGROUP(48, wcss_bt, qdss_tracedata_a, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(49, uim3, gcc_gp1_clk_a, qdss_ctitrig_in_a1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, uim3, gcc_gp2_clk_a, qdss_ctitrig_in_b1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, uim3, gcc_gp3_clk_a, qdss_ctitrig_out_b1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, uim3, NA, qdss_ctitrig_out_a1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, uim2, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, uim2, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, uim2, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, uim2, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, uim1, atest_char3, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, uim1, atest_char2, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, uim1, atest_char1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, uim1, atest_char0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, atest_char, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, cdc_pdm0, bimc_dte0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, cdc_pdm0, bimc_dte1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, cdc_pdm0, bimc_dte0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, cdc_pdm0, bimc_dte1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, cdc_pdm0, ebi0_wrcdc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, blsp_spi3_cs3, qdss_tracedata_a, NA, atest_combodac, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, pa_indicator, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, modem_tsync, nav_tsync, nav_pps, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, gcc_gp1_clk_b, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, gsm0_tx0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, gsm0_tx1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, gsm1_tx0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, gsm1_tx1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, ssbi_wtr0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, ssbi_wtr0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, pbs0, NA, atest_combodac, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, pbs1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, pbs2, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, blsp_spi1_cs1, pri_mi2s_ws, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(111, qdss_tracedata_b, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, sec_mi2s, NA, NA, NA, qdss_tracedata_a, NA, atest_tsens, NA, NA),
+ PINGROUP(113, pri_mi2s, NA, pwr_modem_enabled_b, NA, NA, NA, NA, NA, qdss_tracedata_a),
+ PINGROUP(114, pri_mi2s, pwr_nav_enabled_b, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(115, pri_mi2s, pwr_crypto_enabled_b, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(116, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(117, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, sec_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, blsp_spi3_cs1, ldo_update, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(121, sd_write, blsp_spi2_cs1, ldo_en, NA, NA, NA, NA, NA, NA),
+ SDC_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+#define NUM_GPIO_PINGROUPS 122
+
+static const struct msm_pinctrl_soc_data msm8916_pinctrl = {
+ .pins = msm8916_pins,
+ .npins = ARRAY_SIZE(msm8916_pins),
+ .functions = msm8916_functions,
+ .nfunctions = ARRAY_SIZE(msm8916_functions),
+ .groups = msm8916_groups,
+ .ngroups = ARRAY_SIZE(msm8916_groups),
+ .ngpios = NUM_GPIO_PINGROUPS,
+};
+
+static int msm8916_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8916_pinctrl);
+}
+
+static const struct of_device_id msm8916_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8916-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8916_pinctrl_driver = {
+ .driver = {
+ .name = "msm8916-pinctrl",
+ .of_match_table = msm8916_pinctrl_of_match,
+ },
+ .probe = msm8916_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8916_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8916_pinctrl_driver);
+}
+arch_initcall(msm8916_pinctrl_init);
+
+static void __exit msm8916_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8916_pinctrl_driver);
+}
+module_exit(msm8916_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm msm8916 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8916_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index b863b5080890..0f11a26d932b 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -131,15 +131,17 @@ struct pmic_gpio_state {
struct gpio_chip chip;
};
-struct pmic_gpio_bindings {
- const char *property;
- unsigned param;
+static const struct pinconf_generic_params pmic_gpio_bindings[] = {
+ {"qcom,pull-up-strength", PMIC_GPIO_CONF_PULL_UP, 0},
+ {"qcom,drive-strength", PMIC_GPIO_CONF_STRENGTH, 0},
};
-static struct pmic_gpio_bindings pmic_gpio_bindings[] = {
- {"qcom,pull-up-strength", PMIC_GPIO_CONF_PULL_UP},
- {"qcom,drive-strength", PMIC_GPIO_CONF_STRENGTH},
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pmic_conf_items[ARRAY_SIZE(pmic_gpio_bindings)] = {
+ PCONFDUMP(PMIC_GPIO_CONF_PULL_UP, "pull up strength", NULL, true),
+ PCONFDUMP(PMIC_GPIO_CONF_STRENGTH, "drive-strength", NULL, true),
};
+#endif
static const char *const pmic_gpio_groups[] = {
"gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8",
@@ -209,118 +211,11 @@ static int pmic_gpio_get_group_pins(struct pinctrl_dev *pctldev, unsigned pin,
return 0;
}
-static int pmic_gpio_parse_dt_config(struct device_node *np,
- struct pinctrl_dev *pctldev,
- unsigned long **configs,
- unsigned int *nconfs)
-{
- struct pmic_gpio_bindings *par;
- unsigned long cfg;
- int ret, i;
- u32 val;
-
- for (i = 0; i < ARRAY_SIZE(pmic_gpio_bindings); i++) {
- par = &pmic_gpio_bindings[i];
- ret = of_property_read_u32(np, par->property, &val);
-
- /* property not found */
- if (ret == -EINVAL)
- continue;
-
- /* use zero as default value */
- if (ret)
- val = 0;
-
- dev_dbg(pctldev->dev, "found %s with value %u\n",
- par->property, val);
-
- cfg = pinconf_to_config_packed(par->param, val);
-
- ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int pmic_gpio_dt_subnode_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np,
- struct pinctrl_map **map,
- unsigned *reserv, unsigned *nmaps,
- enum pinctrl_map_type type)
-{
- unsigned long *configs = NULL;
- unsigned nconfs = 0;
- struct property *prop;
- const char *group;
- int ret;
-
- ret = pmic_gpio_parse_dt_config(np, pctldev, &configs, &nconfs);
- if (ret < 0)
- return ret;
-
- if (!nconfs)
- return 0;
-
- ret = of_property_count_strings(np, "pins");
- if (ret < 0)
- goto exit;
-
- ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
- if (ret < 0)
- goto exit;
-
- of_property_for_each_string(np, "pins", prop, group) {
- ret = pinctrl_utils_add_map_configs(pctldev, map,
- reserv, nmaps, group,
- configs, nconfs, type);
- if (ret < 0)
- break;
- }
-exit:
- kfree(configs);
- return ret;
-}
-
-static int pmic_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map, unsigned *nmaps)
-{
- enum pinctrl_map_type type;
- struct device_node *np;
- unsigned reserv;
- int ret;
-
- ret = 0;
- *map = NULL;
- *nmaps = 0;
- reserv = 0;
- type = PIN_MAP_TYPE_CONFIGS_GROUP;
-
- for_each_child_of_node(np_config, np) {
- ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
- &reserv, nmaps, type);
- if (ret)
- break;
-
- ret = pmic_gpio_dt_subnode_to_map(pctldev, np, map, &reserv,
- nmaps, type);
- if (ret)
- break;
- }
-
- if (ret < 0)
- pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
-
- return ret;
-}
-
static const struct pinctrl_ops pmic_gpio_pinctrl_ops = {
.get_groups_count = pmic_gpio_get_groups_count,
.get_group_name = pmic_gpio_get_group_name,
.get_group_pins = pmic_gpio_get_group_pins,
- .dt_node_to_map = pmic_gpio_dt_node_to_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
.dt_free_map = pinctrl_utils_dt_free_map,
};
@@ -590,6 +485,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
}
static const struct pinconf_ops pmic_gpio_pinconf_ops = {
+ .is_generic = true,
.pin_config_group_get = pmic_gpio_config_get,
.pin_config_group_set = pmic_gpio_config_set,
.pin_config_group_dbg_show = pmic_gpio_config_dbg_show,
@@ -848,6 +744,11 @@ static int pmic_gpio_probe(struct platform_device *pdev)
pctrldesc->name = dev_name(dev);
pctrldesc->pins = pindesc;
pctrldesc->npins = npins;
+ pctrldesc->num_custom_params = ARRAY_SIZE(pmic_gpio_bindings);
+ pctrldesc->custom_params = pmic_gpio_bindings;
+#ifdef CONFIG_DEBUG_FS
+ pctrldesc->custom_conf_items = pmic_conf_items;
+#endif
for (i = 0; i < npins; i++, pindesc++) {
pad = &pads[i];
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index becb3792977b..c8f83f96546c 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -1300,6 +1300,25 @@ static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr3", 0x0c),
};
+/* pin banks of exynos7 pin-controller - BUS1 */
+static const struct samsung_pin_bank_data exynos7_pin_banks8[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpf0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpf1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpf2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpf3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpf4", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpf5", 0x14),
+ EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpg1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpg2", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(6, 0x120, "gph1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(3, 0x140, "gpv6", 0x24),
+};
+
+static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+};
+
const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 Alive data */
@@ -1342,5 +1361,15 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
.pin_banks = exynos7_pin_banks7,
.nr_banks = ARRAY_SIZE(exynos7_pin_banks7),
.eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 8 BUS1 data */
+ .pin_banks = exynos7_pin_banks8,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks8),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 9 AUD data */
+ .pin_banks = exynos7_pin_banks9,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks9),
+ .eint_gpio_init = exynos_eint_gpio_init,
},
};
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 26187aa5cf5b..8c4b3d391823 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -20,6 +20,11 @@ config GPIO_SH_PFC
This enables support for GPIOs within the SoC's pin function
controller.
+config PINCTRL_PFC_EMEV2
+ def_bool y
+ depends on ARCH_EMEV2
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A73A4
def_bool y
depends on ARCH_R8A73A4
@@ -68,11 +73,6 @@ config PINCTRL_PFC_SH7269
depends on GPIOLIB
select PINCTRL_SH_PFC
-config PINCTRL_PFC_SH7372
- def_bool y
- depends on ARCH_SH7372
- select PINCTRL_SH_PFC
-
config PINCTRL_PFC_SH73A0
def_bool y
depends on ARCH_SH73A0
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index ad8f4cf9faaa..f4074e166bcf 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -3,6 +3,7 @@ ifeq ($(CONFIG_GPIO_SH_PFC),y)
sh-pfc-objs += gpio.o
endif
obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc.o
+obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
@@ -12,7 +13,6 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7791) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
-obj-$(CONFIG_PINCTRL_PFC_SH7372) += pfc-sh7372.o
obj-$(CONFIG_PINCTRL_PFC_SH73A0) += pfc-sh73a0.o
obj-$(CONFIG_PINCTRL_PFC_SH7720) += pfc-sh7720.o
obj-$(CONFIG_PINCTRL_PFC_SH7722) += pfc-sh7722.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 66dc62d2156c..a56280814a3f 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -439,6 +439,12 @@ static int sh_pfc_init_ranges(struct sh_pfc *pfc)
#ifdef CONFIG_OF
static const struct of_device_id sh_pfc_of_table[] = {
+#ifdef CONFIG_PINCTRL_PFC_EMEV2
+ {
+ .compatible = "renesas,pfc-emev2",
+ .data = &emev2_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A73A4
{
.compatible = "renesas,pfc-r8a73a4",
@@ -475,12 +481,6 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7791_pinmux_info,
},
#endif
-#ifdef CONFIG_PINCTRL_PFC_SH7372
- {
- .compatible = "renesas,pfc-sh7372",
- .data = &sh7372_pinmux_info,
- },
-#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
@@ -579,6 +579,9 @@ static int sh_pfc_remove(struct platform_device *pdev)
}
static const struct platform_device_id sh_pfc_id_table[] = {
+#ifdef CONFIG_PINCTRL_PFC_EMEV2
+ { "pfc-emev2", (kernel_ulong_t)&emev2_pinmux_info },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A73A4
{ "pfc-r8a73a4", (kernel_ulong_t)&r8a73a4_pinmux_info },
#endif
@@ -606,9 +609,6 @@ static const struct platform_device_id sh_pfc_id_table[] = {
#ifdef CONFIG_PINCTRL_PFC_SH7269
{ "pfc-sh7269", (kernel_ulong_t)&sh7269_pinmux_info },
#endif
-#ifdef CONFIG_PINCTRL_PFC_SH7372
- { "pfc-sh7372", (kernel_ulong_t)&sh7372_pinmux_info },
-#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{ "pfc-sh73a0", (kernel_ulong_t)&sh73a0_pinmux_info },
#endif
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 3daaa5241c47..6b59d63b9c01 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -65,6 +65,7 @@ void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned long reg_width,
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
+extern const struct sh_pfc_soc_info emev2_pinmux_info;
extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
@@ -74,7 +75,6 @@ extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
-extern const struct sh_pfc_soc_info sh7372_pinmux_info;
extern const struct sh_pfc_soc_info sh73a0_pinmux_info;
extern const struct sh_pfc_soc_info sh7720_pinmux_info;
extern const struct sh_pfc_soc_info sh7722_pinmux_info;
diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c
new file mode 100644
index 000000000000..849c6943ed30
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c
@@ -0,0 +1,1711 @@
+/*
+ * Pin Function Controller Support
+ *
+ * Copyright (C) 2015 Niklas Söderlund
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(0, fn, pfx, sfx), PORT_90(0, fn, pfx, sfx), \
+ PORT_10(100, fn, pfx##10, sfx), PORT_10(110, fn, pfx##11, sfx), \
+ PORT_10(120, fn, pfx##12, sfx), PORT_10(130, fn, pfx##13, sfx), \
+ PORT_10(140, fn, pfx##14, sfx), PORT_1(150, fn, pfx##150, sfx), \
+ PORT_1(151, fn, pfx##151, sfx), PORT_1(152, fn, pfx##152, sfx), \
+ PORT_1(153, fn, pfx##153, sfx), PORT_1(154, fn, pfx##154, sfx), \
+ PORT_1(155, fn, pfx##155, sfx), PORT_1(156, fn, pfx##156, sfx), \
+ PORT_1(157, fn, pfx##157, sfx), PORT_1(158, fn, pfx##158, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PORT_ALL(DATA),
+ PINMUX_DATA_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_ALL(FN),
+
+ /* GPSR0 */
+ FN_LCD3_1_0_PORT18, FN_LCD3_1_0_PORT20, FN_LCD3_1_0_PORT21,
+ FN_LCD3_1_0_PORT22, FN_LCD3_1_0_PORT23,
+ FN_JT_SEL, FN_ERR_RST_REQB, FN_REF_CLKO, FN_EXT_CLKI, FN_LCD3_PXCLKB,
+
+ /* GPSR1 */
+ FN_LCD3_9_8_PORT38, FN_LCD3_9_8_PORT39, FN_LCD3_11_10_PORT40,
+ FN_LCD3_11_10_PORT41, FN_LCD3_11_10_PORT42, FN_LCD3_11_10_PORT43,
+ FN_IIC_1_0_PORT46, FN_IIC_1_0_PORT47,
+ FN_LCD3_R0, FN_LCD3_R1, FN_LCD3_R2, FN_LCD3_R3, FN_LCD3_R4, FN_LCD3_R5,
+ FN_IIC0_SCL, FN_IIC0_SDA, FN_SD_CKI, FN_SDI0_CKO, FN_SDI0_CKI,
+ FN_SDI0_CMD, FN_SDI0_DATA0, FN_SDI0_DATA1, FN_SDI0_DATA2,
+ FN_SDI0_DATA3, FN_SDI0_DATA4, FN_SDI0_DATA5, FN_SDI0_DATA6,
+ FN_SDI0_DATA7, FN_SDI1_CKO, FN_SDI1_CKI, FN_SDI1_CMD,
+
+ /* GPSR2 */
+ FN_AB_1_0_PORT71, FN_AB_1_0_PORT72, FN_AB_1_0_PORT73,
+ FN_AB_1_0_PORT74, FN_AB_1_0_PORT75, FN_AB_1_0_PORT76,
+ FN_AB_1_0_PORT77, FN_AB_1_0_PORT78, FN_AB_1_0_PORT79,
+ FN_AB_1_0_PORT80, FN_AB_1_0_PORT81, FN_AB_1_0_PORT82,
+ FN_AB_1_0_PORT83, FN_AB_1_0_PORT84, FN_AB_3_2_PORT85,
+ FN_AB_3_2_PORT86, FN_AB_3_2_PORT87, FN_AB_3_2_PORT88,
+ FN_AB_5_4_PORT89, FN_AB_5_4_PORT90, FN_AB_7_6_PORT91,
+ FN_AB_7_6_PORT92, FN_AB_1_0_PORT93, FN_AB_1_0_PORT94,
+ FN_AB_1_0_PORT95,
+ FN_SDI1_DATA0, FN_SDI1_DATA1, FN_SDI1_DATA2, FN_SDI1_DATA3,
+ FN_AB_CLK, FN_AB_CSB0, FN_AB_CSB1,
+
+ /* GPSR3 */
+ FN_AB_13_12_PORT104, FN_AB_13_12_PORT103, FN_AB_11_10_PORT102,
+ FN_AB_11_10_PORT101, FN_AB_11_10_PORT100, FN_AB_9_8_PORT99,
+ FN_AB_9_8_PORT98, FN_AB_9_8_PORT97,
+ FN_USI_1_0_PORT109, FN_USI_1_0_PORT110, FN_USI_1_0_PORT111,
+ FN_USI_1_0_PORT112, FN_USI_3_2_PORT113, FN_USI_3_2_PORT114,
+ FN_USI_5_4_PORT115, FN_USI_5_4_PORT116, FN_USI_5_4_PORT117,
+ FN_USI_5_4_PORT118, FN_USI_7_6_PORT119, FN_USI_9_8_PORT120,
+ FN_USI_9_8_PORT121,
+ FN_AB_A20, FN_USI0_CS1, FN_USI0_CS2, FN_USI1_DI,
+ FN_USI1_DO,
+ FN_NTSC_CLK, FN_NTSC_DATA0, FN_NTSC_DATA1, FN_NTSC_DATA2,
+ FN_NTSC_DATA3, FN_NTSC_DATA4,
+
+ /* GPRS4 */
+ FN_HSI_1_0_PORT143, FN_HSI_1_0_PORT144, FN_HSI_1_0_PORT145,
+ FN_HSI_1_0_PORT146, FN_HSI_1_0_PORT147, FN_HSI_1_0_PORT148,
+ FN_HSI_1_0_PORT149, FN_HSI_1_0_PORT150,
+ FN_UART_1_0_PORT157, FN_UART_1_0_PORT158,
+ FN_NTSC_DATA5, FN_NTSC_DATA6, FN_NTSC_DATA7, FN_CAM_CLKO,
+ FN_CAM_CLKI, FN_CAM_VS, FN_CAM_HS, FN_CAM_YUV0,
+ FN_CAM_YUV1, FN_CAM_YUV2, FN_CAM_YUV3, FN_CAM_YUV4,
+ FN_CAM_YUV5, FN_CAM_YUV6, FN_CAM_YUV7,
+ FN_JT_TDO, FN_JT_TDOEN, FN_LOWPWR, FN_USB_VBUS, FN_UART1_RX,
+ FN_UART1_TX,
+
+ /* CHG_PINSEL_LCD3 */
+ FN_SEL_LCD3_1_0_00, FN_SEL_LCD3_1_0_01,
+ FN_SEL_LCD3_9_8_00, FN_SEL_LCD3_9_8_10,
+ FN_SEL_LCD3_11_10_00, FN_SEL_LCD3_11_10_01, FN_SEL_LCD3_11_10_10,
+
+ /* CHG_PINSEL_IIC */
+ FN_SEL_IIC_1_0_00, FN_SEL_IIC_1_0_01,
+
+ /* CHG_PINSEL_AB */
+ FN_SEL_AB_1_0_00, FN_SEL_AB_1_0_10, FN_SEL_AB_3_2_00,
+ FN_SEL_AB_3_2_01, FN_SEL_AB_3_2_10, FN_SEL_AB_3_2_11,
+ FN_SEL_AB_5_4_00, FN_SEL_AB_5_4_01, FN_SEL_AB_5_4_10,
+ FN_SEL_AB_5_4_11, FN_SEL_AB_7_6_00, FN_SEL_AB_7_6_01,
+ FN_SEL_AB_7_6_10,
+ FN_SEL_AB_9_8_00, FN_SEL_AB_9_8_01, FN_SEL_AB_9_8_10,
+ FN_SEL_AB_11_10_00, FN_SEL_AB_11_10_10,
+ FN_SEL_AB_13_12_00, FN_SEL_AB_13_12_10,
+
+ /* CHG_PINSEL_USI */
+ FN_SEL_USI_1_0_00, FN_SEL_USI_1_0_01,
+ FN_SEL_USI_3_2_00, FN_SEL_USI_3_2_01,
+ FN_SEL_USI_5_4_00, FN_SEL_USI_5_4_01,
+ FN_SEL_USI_7_6_00, FN_SEL_USI_7_6_01,
+ FN_SEL_USI_9_8_00, FN_SEL_USI_9_8_01,
+
+ /* CHG_PINSEL_HSI */
+ FN_SEL_HSI_1_0_00, FN_SEL_HSI_1_0_01,
+
+ /* CHG_PINSEL_UART */
+ FN_SEL_UART_1_0_00, FN_SEL_UART_1_0_01,
+
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ /* GPSR0 */
+ JT_SEL_MARK, ERR_RST_REQB_MARK, REF_CLKO_MARK, EXT_CLKI_MARK,
+ LCD3_PXCLKB_MARK, SD_CKI_MARK,
+
+ /* GPSR1 */
+ LCD3_R0_MARK, LCD3_R1_MARK, LCD3_R2_MARK, LCD3_R3_MARK, LCD3_R4_MARK,
+ LCD3_R5_MARK, IIC0_SCL_MARK, IIC0_SDA_MARK, SDI0_CKO_MARK,
+ SDI0_CKI_MARK, SDI0_CMD_MARK, SDI0_DATA0_MARK, SDI0_DATA1_MARK,
+ SDI0_DATA2_MARK, SDI0_DATA3_MARK, SDI0_DATA4_MARK, SDI0_DATA5_MARK,
+ SDI0_DATA6_MARK, SDI0_DATA7_MARK, SDI1_CKO_MARK, SDI1_CKI_MARK,
+ SDI1_CMD_MARK,
+
+ /* GPSR2 */
+ SDI1_DATA0_MARK, SDI1_DATA1_MARK, SDI1_DATA2_MARK, SDI1_DATA3_MARK,
+ AB_CLK_MARK, AB_CSB0_MARK, AB_CSB1_MARK,
+
+ /* GPSR3 */
+ AB_A20_MARK, USI0_CS1_MARK, USI0_CS2_MARK, USI1_DI_MARK,
+ USI1_DO_MARK,
+ NTSC_CLK_MARK, NTSC_DATA0_MARK, NTSC_DATA1_MARK, NTSC_DATA2_MARK,
+ NTSC_DATA3_MARK, NTSC_DATA4_MARK,
+
+ /* GPSR3 */
+ NTSC_DATA5_MARK, NTSC_DATA6_MARK, NTSC_DATA7_MARK, CAM_CLKO_MARK,
+ CAM_CLKI_MARK, CAM_VS_MARK, CAM_HS_MARK, CAM_YUV0_MARK,
+ CAM_YUV1_MARK, CAM_YUV2_MARK, CAM_YUV3_MARK, CAM_YUV4_MARK,
+ CAM_YUV5_MARK, CAM_YUV6_MARK, CAM_YUV7_MARK,
+ JT_TDO_MARK, JT_TDOEN_MARK, USB_VBUS_MARK, LOWPWR_MARK,
+ UART1_RX_MARK, UART1_TX_MARK,
+
+ /* CHG_PINSEL_LCD3 */
+ LCD3_PXCLK_MARK, LCD3_CLK_I_MARK, LCD3_HS_MARK, LCD3_VS_MARK,
+ LCD3_DE_MARK, LCD3_R6_MARK, LCD3_R7_MARK, LCD3_G0_MARK, LCD3_G1_MARK,
+ LCD3_G2_MARK, LCD3_G3_MARK, LCD3_G4_MARK, LCD3_G5_MARK, LCD3_G6_MARK,
+ LCD3_G7_MARK, LCD3_B0_MARK, LCD3_B1_MARK, LCD3_B2_MARK, LCD3_B3_MARK,
+ LCD3_B4_MARK, LCD3_B5_MARK, LCD3_B6_MARK, LCD3_B7_MARK,
+ YUV3_CLK_O_MARK, YUV3_CLK_I_MARK, YUV3_HS_MARK, YUV3_VS_MARK,
+ YUV3_DE_MARK, YUV3_D0_MARK, YUV3_D1_MARK, YUV3_D2_MARK, YUV3_D3_MARK,
+ YUV3_D4_MARK, YUV3_D5_MARK, YUV3_D6_MARK, YUV3_D7_MARK, YUV3_D8_MARK,
+ YUV3_D9_MARK, YUV3_D10_MARK, YUV3_D11_MARK, YUV3_D12_MARK,
+ YUV3_D13_MARK, YUV3_D14_MARK, YUV3_D15_MARK,
+ TP33_CLK_MARK, TP33_CTRL_MARK, TP33_DATA0_MARK, TP33_DATA1_MARK,
+ TP33_DATA2_MARK, TP33_DATA3_MARK, TP33_DATA4_MARK, TP33_DATA5_MARK,
+ TP33_DATA6_MARK, TP33_DATA7_MARK, TP33_DATA8_MARK, TP33_DATA9_MARK,
+ TP33_DATA10_MARK, TP33_DATA11_MARK, TP33_DATA12_MARK, TP33_DATA13_MARK,
+ TP33_DATA14_MARK, TP33_DATA15_MARK,
+
+ /* CHG_PINSEL_IIC */
+ IIC1_SCL_MARK, IIC1_SDA_MARK, UART3_RX_MARK, UART3_TX_MARK,
+
+ /* CHG_PINSEL_AB */
+ AB_CSB2_MARK, AB_CSB3_MARK, AB_RDB_MARK, AB_WRB_MARK,
+ AB_WAIT_MARK, AB_ADV_MARK, AB_AD0_MARK, AB_AD1_MARK,
+ AB_AD2_MARK, AB_AD3_MARK, AB_AD4_MARK, AB_AD5_MARK,
+ AB_AD6_MARK, AB_AD7_MARK, AB_AD8_MARK, AB_AD9_MARK,
+ AB_AD10_MARK, AB_AD11_MARK, AB_AD12_MARK, AB_AD13_MARK,
+ AB_AD14_MARK, AB_AD15_MARK, AB_A17_MARK, AB_A18_MARK,
+ AB_A19_MARK, AB_A21_MARK, AB_A22_MARK, AB_A23_MARK,
+ AB_A24_MARK, AB_A25_MARK, AB_A26_MARK, AB_A27_MARK,
+ AB_A28_MARK, AB_BEN0_MARK, AB_BEN1_MARK,
+ DTV_BCLK_A_MARK, DTV_PSYNC_A_MARK, DTV_VALID_A_MARK,
+ DTV_DATA_A_MARK,
+ SDI2_CKO_MARK, SDI2_CKI_MARK, SDI2_CMD_MARK,
+ SDI2_DATA0_MARK, SDI2_DATA1_MARK, SDI2_DATA2_MARK,
+ SDI2_DATA3_MARK,
+ CF_CSB0_MARK, CF_CSB1_MARK, CF_IORDB_MARK,
+ CF_IOWRB_MARK, CF_IORDY_MARK, CF_RESET_MARK,
+ CF_D00_MARK, CF_D01_MARK, CF_D02_MARK, CF_D03_MARK,
+ CF_D04_MARK, CF_D05_MARK, CF_D06_MARK, CF_D07_MARK,
+ CF_D08_MARK, CF_D09_MARK, CF_D10_MARK, CF_D11_MARK,
+ CF_D12_MARK, CF_D13_MARK, CF_D14_MARK, CF_D15_MARK,
+ CF_A00_MARK, CF_A01_MARK, CF_A02_MARK,
+ CF_INTRQ_MARK, CF_INPACKB_MARK, CF_CDB1_MARK, CF_CDB2_MARK,
+ USI5_CLK_A_MARK, USI5_DI_A_MARK, USI5_DO_A_MARK,
+ USI5_CS0_A_MARK, USI5_CS1_A_MARK, USI5_CS2_A_MARK,
+
+ /* CHG_PINSEL_USI */
+ USI0_CS3_MARK, USI0_CS4_MARK, USI0_CS5_MARK,
+ USI0_CS6_MARK,
+ USI2_CLK_MARK, USI2_DI_MARK, USI2_DO_MARK,
+ USI2_CS0_MARK, USI2_CS1_MARK, USI2_CS2_MARK,
+ USI3_CLK_MARK, USI3_DI_MARK, USI3_DO_MARK,
+ USI3_CS0_MARK,
+ USI4_CLK_MARK, USI4_DI_MARK, USI4_DO_MARK,
+ USI4_CS0_MARK, USI4_CS1_MARK,
+ PWM0_MARK, PWM1_MARK,
+ DTV_BCLK_B_MARK, DTV_PSYNC_B_MARK, DTV_VALID_B_MARK,
+ DTV_DATA_B_MARK,
+
+ /* CHG_PINSEL_HSI */
+ USI5_CLK_B_MARK, USI5_DO_B_MARK, USI5_CS0_B_MARK, USI5_CS1_B_MARK,
+ USI5_CS2_B_MARK, USI5_CS3_B_MARK, USI5_CS4_B_MARK, USI5_DI_B_MARK,
+
+ /* CHG_PINSEL_UART */
+ UART1_CTSB_MARK, UART1_RTSB_MARK,
+ UART2_RX_MARK, UART2_TX_MARK,
+
+ PINMUX_MARK_END,
+};
+
+/* Pin numbers for pins without a corresponding GPIO port number are computed
+ * from the row and column numbers with a 1000 offset to avoid collisions with
+ * GPIO port numbers. */
+#define PIN_NUMBER(row, col) (1000+((row)-1)*23+(col)-1)
+
+/* Expand to a list of sh_pfc_pin entries (named PORT#).
+ * NOTE: No config are recorded since the driver do not handle pinconf. */
+#define __PIN_CFG(pn, pfx, sfx) SH_PFC_PIN_CFG(pfx, 0)
+#define PINMUX_EMEV_GPIO_ALL() CPU_ALL_PORT(__PIN_CFG, , unused)
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_EMEV_GPIO_ALL(),
+
+ /* Pins not associated with a GPIO port */
+ SH_PFC_PIN_NAMED(2, 14, B14),
+ SH_PFC_PIN_NAMED(2, 15, B15),
+ SH_PFC_PIN_NAMED(2, 16, B16),
+ SH_PFC_PIN_NAMED(2, 17, B17),
+ SH_PFC_PIN_NAMED(3, 14, C14),
+ SH_PFC_PIN_NAMED(3, 15, C15),
+ SH_PFC_PIN_NAMED(3, 16, C16),
+ SH_PFC_PIN_NAMED(3, 17, C17),
+ SH_PFC_PIN_NAMED(4, 14, D14),
+ SH_PFC_PIN_NAMED(4, 15, D15),
+ SH_PFC_PIN_NAMED(4, 16, D16),
+ SH_PFC_PIN_NAMED(4, 17, D17),
+};
+
+/* Expand to a list of name_DATA, name_FN marks */
+#define __PORT_DATA(pn, pfx, sfx) PINMUX_DATA(PORT##pfx##_DATA, PORT##pfx##_FN)
+#define PINMUX_EMEV_DATA_ALL() CPU_ALL_PORT(__PORT_DATA, , unused)
+
+static const u16 pinmux_data[] = {
+ PINMUX_EMEV_DATA_ALL(), /* PINMUX_DATA(PORTN_DATA, PORTN_FN), */
+
+ /* GPSR0 */
+ /* V9 */
+ PINMUX_DATA(JT_SEL_MARK, FN_JT_SEL),
+ /* U9 */
+ PINMUX_DATA(ERR_RST_REQB_MARK, FN_ERR_RST_REQB),
+ /* V8 */
+ PINMUX_DATA(REF_CLKO_MARK, FN_REF_CLKO),
+ /* U8 */
+ PINMUX_DATA(EXT_CLKI_MARK, FN_EXT_CLKI),
+ /* B22*/
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT18, LCD3_PXCLK, SEL_LCD3_1_0_00),
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT18, YUV3_CLK_O, SEL_LCD3_1_0_01),
+ /* C21 */
+ PINMUX_DATA(LCD3_PXCLKB_MARK, FN_LCD3_PXCLKB),
+ /* A21 */
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT20, LCD3_CLK_I, SEL_LCD3_1_0_00),
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT20, YUV3_CLK_I, SEL_LCD3_1_0_01),
+ /* B21 */
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT21, LCD3_HS, SEL_LCD3_1_0_00),
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT21, YUV3_HS, SEL_LCD3_1_0_01),
+ /* C20 */
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT22, LCD3_VS, SEL_LCD3_1_0_00),
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT22, YUV3_VS, SEL_LCD3_1_0_01),
+ /* D19 */
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT23, LCD3_DE, SEL_LCD3_1_0_00),
+ PINMUX_IPSR_NOFN(LCD3_1_0_PORT23, YUV3_DE, SEL_LCD3_1_0_01),
+
+ /* GPSR1 */
+ /* A20 */
+ PINMUX_DATA(LCD3_R0_MARK, FN_LCD3_R0),
+ /* B20 */
+ PINMUX_DATA(LCD3_R1_MARK, FN_LCD3_R1),
+ /* A19 */
+ PINMUX_DATA(LCD3_R2_MARK, FN_LCD3_R2),
+ /* B19 */
+ PINMUX_DATA(LCD3_R3_MARK, FN_LCD3_R3),
+ /* C19 */
+ PINMUX_DATA(LCD3_R4_MARK, FN_LCD3_R4),
+ /* B18 */
+ PINMUX_DATA(LCD3_R5_MARK, FN_LCD3_R5),
+ /* C18 */
+ PINMUX_IPSR_NOFN(LCD3_9_8_PORT38, LCD3_R6, SEL_LCD3_9_8_00),
+ PINMUX_IPSR_NOFN(LCD3_9_8_PORT38, TP33_CLK, SEL_LCD3_9_8_10),
+ /* D18 */
+ PINMUX_IPSR_NOFN(LCD3_9_8_PORT39, LCD3_R7, SEL_LCD3_9_8_00),
+ PINMUX_IPSR_NOFN(LCD3_9_8_PORT39, TP33_CTRL, SEL_LCD3_9_8_10),
+ /* A18 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT40, LCD3_G0, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT40, YUV3_D0, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT40, TP33_DATA0, SEL_LCD3_11_10_10),
+ /* A17 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT41, LCD3_G1, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT41, YUV3_D1, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT41, TP33_DATA1, SEL_LCD3_11_10_10),
+ /* B17 */
+ PINMUX_DATA(LCD3_G2_MARK, FN_SEL_LCD3_11_10_00),
+ PINMUX_DATA(YUV3_D2_MARK, FN_SEL_LCD3_11_10_01),
+ PINMUX_DATA(TP33_DATA2_MARK, FN_SEL_LCD3_11_10_10),
+ /* C17 */
+ PINMUX_DATA(LCD3_G3_MARK, FN_SEL_LCD3_11_10_00),
+ PINMUX_DATA(YUV3_D3_MARK, FN_SEL_LCD3_11_10_01),
+ PINMUX_DATA(TP33_DATA3_MARK, FN_SEL_LCD3_11_10_10),
+ /* D17 */
+ PINMUX_DATA(LCD3_G4_MARK, FN_SEL_LCD3_11_10_00),
+ PINMUX_DATA(YUV3_D4_MARK, FN_SEL_LCD3_11_10_01),
+ PINMUX_DATA(TP33_DATA4_MARK, FN_SEL_LCD3_11_10_10),
+ /* B16 */
+ PINMUX_DATA(LCD3_G5_MARK, FN_SEL_LCD3_11_10_00),
+ PINMUX_DATA(YUV3_D5_MARK, FN_SEL_LCD3_11_10_01),
+ PINMUX_DATA(TP33_DATA5_MARK, FN_SEL_LCD3_11_10_10),
+ /* C16 */
+ PINMUX_DATA(LCD3_G6_MARK, FN_SEL_LCD3_11_10_00),
+ PINMUX_DATA(YUV3_D6_MARK, FN_SEL_LCD3_11_10_01),
+ PINMUX_DATA(TP33_DATA6_MARK, FN_SEL_LCD3_11_10_10),
+ /* D16 */
+ PINMUX_DATA(LCD3_G7_MARK, FN_SEL_LCD3_11_10_00),
+ PINMUX_DATA(YUV3_D7_MARK, FN_SEL_LCD3_11_10_01),
+ PINMUX_DATA(TP33_DATA7_MARK, FN_SEL_LCD3_11_10_10),
+ /* A16 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT42, LCD3_B0, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT42, YUV3_D8, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT42, TP33_DATA8, SEL_LCD3_11_10_10),
+ /* A15 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, LCD3_B1, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, YUV3_D9, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, TP33_DATA9, SEL_LCD3_11_10_10),
+ /* B15 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, LCD3_B2, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, YUV3_D10, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, TP33_DATA10, SEL_LCD3_11_10_10),
+ /* C15 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, LCD3_B3, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, YUV3_D11, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, TP33_DATA11, SEL_LCD3_11_10_10),
+ /* D15 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, LCD3_B4, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, YUV3_D12, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, TP33_DATA12, SEL_LCD3_11_10_10),
+ /* B14 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, LCD3_B5, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, YUV3_D13, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, TP33_DATA13, SEL_LCD3_11_10_10),
+ /* C14 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, LCD3_B6, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, YUV3_D14, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, TP33_DATA14, SEL_LCD3_11_10_10),
+ /* D14 */
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, LCD3_B7, SEL_LCD3_11_10_00),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, YUV3_D15, SEL_LCD3_11_10_01),
+ PINMUX_IPSR_NOFN(LCD3_11_10_PORT43, TP33_DATA15, SEL_LCD3_11_10_10),
+ /* AA9 */
+ PINMUX_DATA(IIC0_SCL_MARK, FN_IIC0_SCL),
+ /* AA8 */
+ PINMUX_DATA(IIC0_SDA_MARK, FN_IIC0_SDA),
+ /* Y9 */
+ PINMUX_IPSR_NOFN(IIC_1_0_PORT46, IIC1_SCL, SEL_IIC_1_0_00),
+ PINMUX_IPSR_NOFN(IIC_1_0_PORT46, UART3_RX, SEL_IIC_1_0_01),
+ /* Y8 */
+ PINMUX_IPSR_NOFN(IIC_1_0_PORT47, IIC1_SDA, SEL_IIC_1_0_00),
+ PINMUX_IPSR_NOFN(IIC_1_0_PORT47, UART3_TX, SEL_IIC_1_0_01),
+ /* AC19 */
+ PINMUX_DATA(SD_CKI_MARK, FN_SD_CKI),
+ /* AB18 */
+ PINMUX_DATA(SDI0_CKO_MARK, FN_SDI0_CKO),
+ /* AC18 */
+ PINMUX_DATA(SDI0_CKI_MARK, FN_SDI0_CKI),
+ /* Y12 */
+ PINMUX_DATA(SDI0_CMD_MARK, FN_SDI0_CMD),
+ /* AA13 */
+ PINMUX_DATA(SDI0_DATA0_MARK, FN_SDI0_DATA0),
+ /* Y13 */
+ PINMUX_DATA(SDI0_DATA1_MARK, FN_SDI0_DATA1),
+ /* AA14 */
+ PINMUX_DATA(SDI0_DATA2_MARK, FN_SDI0_DATA2),
+ /* Y14 */
+ PINMUX_DATA(SDI0_DATA3_MARK, FN_SDI0_DATA3),
+ /* AA15 */
+ PINMUX_DATA(SDI0_DATA4_MARK, FN_SDI0_DATA4),
+ /* Y15 */
+ PINMUX_DATA(SDI0_DATA5_MARK, FN_SDI0_DATA5),
+ /* AA16 */
+ PINMUX_DATA(SDI0_DATA6_MARK, FN_SDI0_DATA6),
+ /* Y16 */
+ PINMUX_DATA(SDI0_DATA7_MARK, FN_SDI0_DATA7),
+ /* AB22 */
+ PINMUX_DATA(SDI1_CKO_MARK, FN_SDI1_CKO),
+ /* AA23 */
+ PINMUX_DATA(SDI1_CKI_MARK, FN_SDI1_CKI),
+ /* AC21 */
+ PINMUX_DATA(SDI1_CMD_MARK, FN_SDI1_CMD),
+
+ /* GPSR2 */
+ /* AB21 */
+ PINMUX_DATA(SDI1_DATA0_MARK, FN_SDI1_DATA0),
+ /* AB20 */
+ PINMUX_DATA(SDI1_DATA1_MARK, FN_SDI1_DATA1),
+ /* AB19 */
+ PINMUX_DATA(SDI1_DATA2_MARK, FN_SDI1_DATA2),
+ /* AA19 */
+ PINMUX_DATA(SDI1_DATA3_MARK, FN_SDI1_DATA3),
+ /* J23 */
+ PINMUX_DATA(AB_CLK_MARK, FN_AB_CLK),
+ /* D21 */
+ PINMUX_DATA(AB_CSB0_MARK, FN_AB_CSB0),
+ /* E21 */
+ PINMUX_DATA(AB_CSB1_MARK, FN_AB_CSB1),
+ /* F20 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT71, AB_CSB2, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT71, CF_CSB0, SEL_AB_1_0_10),
+ /* G20 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT72, AB_CSB3, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT72, CF_CSB1, SEL_AB_1_0_10),
+ /* J20 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT73, AB_RDB, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT73, CF_IORDB, SEL_AB_1_0_10),
+ /* H20 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT74, AB_WRB, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT74, CF_IOWRB, SEL_AB_1_0_10),
+ /* L20 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT75, AB_WAIT, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT75, CF_IORDY, SEL_AB_1_0_10),
+ /* K20 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT76, AB_ADV, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT76, CF_RESET, SEL_AB_1_0_10),
+ /* C23 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT77, AB_AD0, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT77, CF_D00, SEL_AB_1_0_10),
+ /* C22 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT78, AB_AD1, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT78, CF_D01, SEL_AB_1_0_10),
+ /* D23 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT79, AB_AD2, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT79, CF_D02, SEL_AB_1_0_10),
+ /* D22 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT80, AB_AD3, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT80, CF_D03, SEL_AB_1_0_10),
+ /* E23 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT81, AB_AD4, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT81, CF_D04, SEL_AB_1_0_10),
+ /* E22 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT82, AB_AD5, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT82, CF_D05, SEL_AB_1_0_10),
+ /* F23 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT83, AB_AD6, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT83, CF_D06, SEL_AB_1_0_10),
+ /* F22 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT84, AB_AD7, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT84, CF_D07, SEL_AB_1_0_10),
+ /* F21 */
+ PINMUX_IPSR_NOFN(AB_3_2_PORT85, AB_AD8, SEL_AB_3_2_00),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT85, DTV_BCLK_A, SEL_AB_3_2_01),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT85, CF_D08, SEL_AB_3_2_10),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT85, USI5_CLK_A, SEL_AB_3_2_11),
+ /* G23 */
+ PINMUX_IPSR_NOFN(AB_3_2_PORT86, AB_AD9, SEL_AB_3_2_00),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT86, DTV_PSYNC_A, SEL_AB_3_2_01),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT86, CF_D09, SEL_AB_3_2_10),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT86, USI5_DI_A, SEL_AB_3_2_11),
+ /* G22 */
+ PINMUX_IPSR_NOFN(AB_3_2_PORT87, AB_AD10, SEL_AB_3_2_00),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT87, DTV_VALID_A, SEL_AB_3_2_01),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT87, CF_D10, SEL_AB_3_2_10),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT87, USI5_DO_A, SEL_AB_3_2_11),
+ /* G21 */
+ PINMUX_IPSR_NOFN(AB_3_2_PORT88, AB_AD11, SEL_AB_3_2_00),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT88, DTV_DATA_A, SEL_AB_3_2_01),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT88, CF_D11, SEL_AB_3_2_10),
+ PINMUX_IPSR_NOFN(AB_3_2_PORT88, USI5_CS0_A, SEL_AB_3_2_11),
+ /* H23 */
+ PINMUX_IPSR_NOFN(AB_5_4_PORT89, AB_AD12, SEL_AB_5_4_00),
+ PINMUX_IPSR_NOFN(AB_5_4_PORT89, SDI2_DATA0, SEL_AB_5_4_01),
+ PINMUX_IPSR_NOFN(AB_5_4_PORT89, CF_D12, SEL_AB_5_4_10),
+ PINMUX_IPSR_NOFN(AB_5_4_PORT89, USI5_CS1_A, SEL_AB_5_4_11),
+ /* H22 */
+ PINMUX_IPSR_NOFN(AB_5_4_PORT90, AB_AD13, SEL_AB_5_4_00),
+ PINMUX_IPSR_NOFN(AB_5_4_PORT90, SDI2_DATA1, SEL_AB_5_4_01),
+ PINMUX_IPSR_NOFN(AB_5_4_PORT90, CF_D13, SEL_AB_5_4_10),
+ PINMUX_IPSR_NOFN(AB_5_4_PORT90, USI5_CS2_A, SEL_AB_5_4_11),
+ /* H21 */
+ PINMUX_IPSR_NOFN(AB_7_6_PORT91, AB_AD14, SEL_AB_7_6_00),
+ PINMUX_IPSR_NOFN(AB_7_6_PORT91, SDI2_DATA2, SEL_AB_7_6_01),
+ PINMUX_IPSR_NOFN(AB_7_6_PORT91, CF_D14, SEL_AB_7_6_10),
+ /* J22 */
+ PINMUX_IPSR_NOFN(AB_7_6_PORT92, AB_AD15, SEL_AB_7_6_00),
+ PINMUX_IPSR_NOFN(AB_7_6_PORT92, SDI2_DATA3, SEL_AB_7_6_01),
+ PINMUX_IPSR_NOFN(AB_7_6_PORT92, CF_D15, SEL_AB_7_6_10),
+ /* J21 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT93, AB_A17, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT93, CF_A00, SEL_AB_1_0_10),
+ /* K21 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT94, AB_A18, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT94, CF_A01, SEL_AB_1_0_10),
+ /* L21 */
+ PINMUX_IPSR_NOFN(AB_1_0_PORT95, AB_A19, SEL_AB_1_0_00),
+ PINMUX_IPSR_NOFN(AB_1_0_PORT95, CF_A02, SEL_AB_1_0_10),
+
+ /* GPSR3 */
+ /* M21 */
+ PINMUX_DATA(AB_A20_MARK, FN_AB_A20),
+ /* N21 */
+ PINMUX_IPSR_NOFN(AB_9_8_PORT97, AB_A21, SEL_AB_9_8_00),
+ PINMUX_IPSR_NOFN(AB_9_8_PORT97, SDI2_CKO, SEL_AB_9_8_01),
+ PINMUX_IPSR_NOFN(AB_9_8_PORT97, CF_INTRQ, SEL_AB_9_8_10),
+ /* M20 */
+ PINMUX_IPSR_NOFN(AB_9_8_PORT98, AB_A22, SEL_AB_9_8_00),
+ PINMUX_IPSR_NOFN(AB_9_8_PORT98, SDI2_CKI, SEL_AB_9_8_01),
+ /* N20 */
+ PINMUX_IPSR_NOFN(AB_9_8_PORT99, AB_A23, SEL_AB_9_8_00),
+ PINMUX_IPSR_NOFN(AB_9_8_PORT99, SDI2_CMD, SEL_AB_9_8_01),
+ /* L18 */
+ PINMUX_IPSR_NOFN(AB_11_10_PORT100, AB_A24, SEL_AB_11_10_00),
+ PINMUX_IPSR_NOFN(AB_11_10_PORT100, CF_INPACKB, SEL_AB_11_10_10),
+ /* M18 */
+ PINMUX_IPSR_NOFN(AB_11_10_PORT101, AB_A25, SEL_AB_11_10_00),
+ PINMUX_IPSR_NOFN(AB_11_10_PORT101, CF_CDB1, SEL_AB_11_10_10),
+ /* N18 */
+ PINMUX_IPSR_NOFN(AB_11_10_PORT102, AB_A26, SEL_AB_11_10_00),
+ PINMUX_IPSR_NOFN(AB_11_10_PORT102, CF_CDB2, SEL_AB_11_10_10),
+ /* L17 */
+ PINMUX_IPSR_NOFN(AB_13_12_PORT103, AB_A27, SEL_AB_13_12_00),
+ PINMUX_IPSR_NOFN(AB_13_12_PORT103, AB_BEN0, SEL_AB_13_12_10),
+ /* M17 */
+ PINMUX_IPSR_NOFN(AB_13_12_PORT104, AB_A28, SEL_AB_13_12_00),
+ PINMUX_IPSR_NOFN(AB_13_12_PORT104, AB_BEN1, SEL_AB_13_12_10),
+ /* B8 */
+ PINMUX_DATA(USI0_CS1_MARK, FN_USI0_CS1),
+ /* B9 */
+ PINMUX_DATA(USI0_CS2_MARK, FN_USI0_CS2),
+ /* C10 */
+ PINMUX_DATA(USI1_DI_MARK, FN_USI1_DI),
+ /* D10 */
+ PINMUX_DATA(USI1_DO_MARK, FN_USI1_DO),
+ /* AB5 */
+ PINMUX_IPSR_NOFN(USI_1_0_PORT109, USI2_CLK, SEL_USI_1_0_00),
+ PINMUX_IPSR_NOFN(USI_1_0_PORT109, DTV_BCLK_B, SEL_USI_1_0_01),
+ /* AA6 */
+ PINMUX_IPSR_NOFN(USI_1_0_PORT110, USI2_DI, SEL_USI_1_0_00),
+ PINMUX_IPSR_NOFN(USI_1_0_PORT110, DTV_PSYNC_B, SEL_USI_1_0_01),
+ /* AA5 */
+ PINMUX_IPSR_NOFN(USI_1_0_PORT111, USI2_DO, SEL_USI_1_0_00),
+ PINMUX_IPSR_NOFN(USI_1_0_PORT111, DTV_VALID_B, SEL_USI_1_0_01),
+ /* Y7 */
+ PINMUX_IPSR_NOFN(USI_1_0_PORT112, USI2_CS0, SEL_USI_1_0_00),
+ PINMUX_IPSR_NOFN(USI_1_0_PORT112, DTV_DATA_B, SEL_USI_1_0_01),
+ /* AA7 */
+ PINMUX_IPSR_NOFN(USI_3_2_PORT113, USI2_CS1, SEL_USI_3_2_00),
+ PINMUX_IPSR_NOFN(USI_3_2_PORT113, USI4_CS0, SEL_USI_3_2_01),
+ /* Y6 */
+ PINMUX_IPSR_NOFN(USI_3_2_PORT114, USI2_CS2, SEL_USI_3_2_00),
+ PINMUX_IPSR_NOFN(USI_3_2_PORT114, USI4_CS1, SEL_USI_3_2_01),
+ /* AC5 */
+ PINMUX_IPSR_NOFN(USI_5_4_PORT115, USI3_CLK, SEL_USI_5_4_00),
+ PINMUX_IPSR_NOFN(USI_5_4_PORT115, USI0_CS3, SEL_USI_5_4_01),
+ /* AC4 */
+ PINMUX_IPSR_NOFN(USI_5_4_PORT116, USI3_DI, SEL_USI_5_4_00),
+ PINMUX_IPSR_NOFN(USI_5_4_PORT116, USI0_CS4, SEL_USI_5_4_01),
+ /* AC3 */
+ PINMUX_IPSR_NOFN(USI_5_4_PORT117, USI3_DO, SEL_USI_5_4_00),
+ PINMUX_IPSR_NOFN(USI_5_4_PORT117, USI0_CS5, SEL_USI_5_4_01),
+ /* AB4 */
+ PINMUX_IPSR_NOFN(USI_5_4_PORT118, USI3_CS0, SEL_USI_5_4_00),
+ PINMUX_IPSR_NOFN(USI_5_4_PORT118, USI0_CS6, SEL_USI_5_4_01),
+ /* AB3 */
+ PINMUX_IPSR_NOFN(USI_7_6_PORT119, USI4_CLK, SEL_USI_7_6_01),
+ /* AA4 */
+ PINMUX_IPSR_NOFN(USI_9_8_PORT120, PWM0, SEL_USI_9_8_00),
+ PINMUX_IPSR_NOFN(USI_9_8_PORT120, USI4_DI, SEL_USI_9_8_01),
+ /* Y5 */
+ PINMUX_IPSR_NOFN(USI_9_8_PORT121, PWM1, SEL_USI_9_8_00),
+ PINMUX_IPSR_NOFN(USI_9_8_PORT121, USI4_DO, SEL_USI_9_8_01),
+ /* V20 */
+ PINMUX_DATA(NTSC_CLK_MARK, FN_NTSC_CLK),
+ /* P20 */
+ PINMUX_DATA(NTSC_DATA0_MARK, FN_NTSC_DATA0),
+ /* P18 */
+ PINMUX_DATA(NTSC_DATA1_MARK, FN_NTSC_DATA1),
+ /* R20 */
+ PINMUX_DATA(NTSC_DATA2_MARK, FN_NTSC_DATA2),
+ /* R18 */
+ PINMUX_DATA(NTSC_DATA3_MARK, FN_NTSC_DATA3),
+ /* T20 */
+ PINMUX_DATA(NTSC_DATA4_MARK, FN_NTSC_DATA4),
+
+ /* GPRS3 */
+ /* T18 */
+ PINMUX_DATA(NTSC_DATA5_MARK, FN_NTSC_DATA5),
+ /* U20 */
+ PINMUX_DATA(NTSC_DATA6_MARK, FN_NTSC_DATA6),
+ /* U18 */
+ PINMUX_DATA(NTSC_DATA7_MARK, FN_NTSC_DATA7),
+ /* W23 */
+ PINMUX_DATA(CAM_CLKO_MARK, FN_CAM_CLKO),
+ /* Y23 */
+ PINMUX_DATA(CAM_CLKI_MARK, FN_CAM_CLKI),
+ /* W22 */
+ PINMUX_DATA(CAM_VS_MARK, FN_CAM_VS),
+ /* V21 */
+ PINMUX_DATA(CAM_HS_MARK, FN_CAM_HS),
+ /* T21 */
+ PINMUX_DATA(CAM_YUV0_MARK, FN_CAM_YUV0),
+ /* T22 */
+ PINMUX_DATA(CAM_YUV1_MARK, FN_CAM_YUV1),
+ /* T23 */
+ PINMUX_DATA(CAM_YUV2_MARK, FN_CAM_YUV2),
+ /* U21 */
+ PINMUX_DATA(CAM_YUV3_MARK, FN_CAM_YUV3),
+ /* U22 */
+ PINMUX_DATA(CAM_YUV4_MARK, FN_CAM_YUV4),
+ /* U23 */
+ PINMUX_DATA(CAM_YUV5_MARK, FN_CAM_YUV5),
+ /* V22 */
+ PINMUX_DATA(CAM_YUV6_MARK, FN_CAM_YUV6),
+ /* V23 */
+ PINMUX_DATA(CAM_YUV7_MARK, FN_CAM_YUV7),
+ /* K22 */
+ PINMUX_IPSR_NOFN(HSI_1_0_PORT143, USI5_CLK_B, SEL_HSI_1_0_01),
+ /* K23 */
+ PINMUX_IPSR_NOFN(HSI_1_0_PORT144, USI5_DO_B, SEL_HSI_1_0_01),
+ /* L23 */
+ PINMUX_IPSR_NOFN(HSI_1_0_PORT145, USI5_CS0_B, SEL_HSI_1_0_01),
+ /* L22 */
+ PINMUX_IPSR_NOFN(HSI_1_0_PORT146, USI5_CS1_B, SEL_HSI_1_0_01),
+ /* N22 */
+ PINMUX_IPSR_NOFN(HSI_1_0_PORT147, USI5_CS2_B, SEL_HSI_1_0_01),
+ /* N23 */
+ PINMUX_IPSR_NOFN(HSI_1_0_PORT148, USI5_CS3_B, SEL_HSI_1_0_01),
+ /* M23 */
+ PINMUX_IPSR_NOFN(HSI_1_0_PORT149, USI5_CS4_B, SEL_HSI_1_0_01),
+ /* M22 */
+ PINMUX_IPSR_NOFN(HSI_1_0_PORT150, USI5_DI_B, SEL_HSI_1_0_01),
+ /* D13 */
+ PINMUX_DATA(JT_TDO_MARK, FN_JT_TDO),
+ /* F13 */
+ PINMUX_DATA(JT_TDOEN_MARK, FN_JT_TDOEN),
+ /* AA12 */
+ PINMUX_DATA(USB_VBUS_MARK, FN_USB_VBUS),
+ /* A12 */
+ PINMUX_DATA(LOWPWR_MARK, FN_LOWPWR),
+ /* Y11 */
+ PINMUX_DATA(UART1_RX_MARK, FN_UART1_RX),
+ /* Y10 */
+ PINMUX_DATA(UART1_TX_MARK, FN_UART1_TX),
+ /* AA10 */
+ PINMUX_IPSR_NOFN(UART_1_0_PORT157, UART1_CTSB, SEL_UART_1_0_00),
+ PINMUX_IPSR_NOFN(UART_1_0_PORT157, UART2_RX, SEL_UART_1_0_01),
+ /* AB10 */
+ PINMUX_IPSR_NOFN(UART_1_0_PORT158, UART1_RTSB, SEL_UART_1_0_00),
+ PINMUX_IPSR_NOFN(UART_1_0_PORT158, UART2_TX, SEL_UART_1_0_01),
+};
+
+
+#define EMEV_MUX_PIN(name, pin, mark) \
+ static const unsigned int name##_pins[] = { pin }; \
+ static const unsigned int name##_mux[] = { mark##_MARK }
+
+/* = [ System ] =========== */
+EMEV_MUX_PIN(err_rst_reqb, 3, ERR_RST_REQB);
+EMEV_MUX_PIN(ref_clko, 4, REF_CLKO);
+EMEV_MUX_PIN(ext_clki, 5, EXT_CLKI);
+EMEV_MUX_PIN(lowpwr, 154, LOWPWR);
+
+/* = [ External Memory] === */
+static const unsigned int ab_main_pins[] = {
+ /* AB_RDB, AB_WRB */
+ 73, 74,
+ /* AB_AD[0:15] */
+ 77, 78, 79, 80,
+ 81, 82, 83, 84,
+ 85, 86, 87, 88,
+ 89, 90, 91, 92,
+};
+static const unsigned int ab_main_mux[] = {
+ AB_RDB_MARK, AB_WRB_MARK,
+ AB_AD0_MARK, AB_AD1_MARK, AB_AD2_MARK, AB_AD3_MARK,
+ AB_AD4_MARK, AB_AD5_MARK, AB_AD6_MARK, AB_AD7_MARK,
+ AB_AD8_MARK, AB_AD9_MARK, AB_AD10_MARK, AB_AD11_MARK,
+ AB_AD12_MARK, AB_AD13_MARK, AB_AD14_MARK, AB_AD15_MARK,
+};
+
+EMEV_MUX_PIN(ab_clk, 68, AB_CLK);
+EMEV_MUX_PIN(ab_csb0, 69, AB_CSB0);
+EMEV_MUX_PIN(ab_csb1, 70, AB_CSB1);
+EMEV_MUX_PIN(ab_csb2, 71, AB_CSB2);
+EMEV_MUX_PIN(ab_csb3, 72, AB_CSB3);
+EMEV_MUX_PIN(ab_wait, 75, AB_WAIT);
+EMEV_MUX_PIN(ab_adv, 76, AB_ADV);
+EMEV_MUX_PIN(ab_a17, 93, AB_A17);
+EMEV_MUX_PIN(ab_a18, 94, AB_A18);
+EMEV_MUX_PIN(ab_a19, 95, AB_A19);
+EMEV_MUX_PIN(ab_a20, 96, AB_A20);
+EMEV_MUX_PIN(ab_a21, 97, AB_A21);
+EMEV_MUX_PIN(ab_a22, 98, AB_A22);
+EMEV_MUX_PIN(ab_a23, 99, AB_A23);
+EMEV_MUX_PIN(ab_a24, 100, AB_A24);
+EMEV_MUX_PIN(ab_a25, 101, AB_A25);
+EMEV_MUX_PIN(ab_a26, 102, AB_A26);
+EMEV_MUX_PIN(ab_a27, 103, AB_A27);
+EMEV_MUX_PIN(ab_a28, 104, AB_A28);
+EMEV_MUX_PIN(ab_ben0, 103, AB_BEN0);
+EMEV_MUX_PIN(ab_ben1, 104, AB_BEN1);
+
+/* = [ CAM ] ============== */
+EMEV_MUX_PIN(cam_clko, 131, CAM_CLKO);
+static const unsigned int cam_pins[] = {
+ /* CLKI, VS, HS */
+ 132, 133, 134,
+ /* CAM_YUV[0:7] */
+ 135, 136, 137, 138,
+ 139, 140, 141, 142,
+};
+static const unsigned int cam_mux[] = {
+ CAM_CLKI_MARK, CAM_VS_MARK, CAM_HS_MARK,
+ CAM_YUV0_MARK, CAM_YUV1_MARK, CAM_YUV2_MARK, CAM_YUV3_MARK,
+ CAM_YUV4_MARK, CAM_YUV5_MARK, CAM_YUV6_MARK, CAM_YUV7_MARK,
+};
+
+/* = [ CF ] -============== */
+static const unsigned int cf_ctrl_pins[] = {
+ /* CSB0, CSB1, IORDB, IOWRB, IORDY, RESET,
+ * A00, A01, A02, INTRQ, INPACKB, CDB1, CDB2 */
+ 71, 72, 73, 74,
+ 75, 76, 93, 94,
+ 95, 97, 100, 101,
+ 102,
+};
+static const unsigned int cf_ctrl_mux[] = {
+ CF_CSB0_MARK, CF_CSB1_MARK, CF_IORDB_MARK, CF_IOWRB_MARK,
+ CF_IORDY_MARK, CF_RESET_MARK, CF_A00_MARK, CF_A01_MARK,
+ CF_A02_MARK, CF_INTRQ_MARK, CF_INPACKB_MARK, CF_CDB1_MARK,
+ CF_CDB2_MARK,
+};
+
+static const unsigned int cf_data8_pins[] = {
+ /* CF_D[0:8] */
+ 77, 78, 79, 80,
+ 81, 82, 83, 84,
+};
+static const unsigned int cf_data8_mux[] = {
+ CF_D00_MARK, CF_D01_MARK, CF_D02_MARK, CF_D03_MARK,
+ CF_D04_MARK, CF_D05_MARK, CF_D06_MARK, CF_D07_MARK,
+};
+static const unsigned int cf_data16_pins[] = {
+ /* CF_D[0:15] */
+ 77, 78, 79, 80,
+ 81, 82, 83, 84,
+ 85, 86, 87, 88,
+ 89, 90, 91, 92,
+};
+static const unsigned int cf_data16_mux[] = {
+ CF_D00_MARK, CF_D01_MARK, CF_D02_MARK, CF_D03_MARK,
+ CF_D04_MARK, CF_D05_MARK, CF_D06_MARK, CF_D07_MARK,
+ CF_D08_MARK, CF_D09_MARK, CF_D10_MARK, CF_D11_MARK,
+ CF_D12_MARK, CF_D13_MARK, CF_D14_MARK, CF_D15_MARK,
+};
+
+/* = [ DTV ] ============== */
+static const unsigned int dtv_a_pins[] = {
+ /* BCLK, PSYNC, VALID, DATA */
+ 85, 86, 87, 88,
+};
+static const unsigned int dtv_a_mux[] = {
+ DTV_BCLK_A_MARK, DTV_PSYNC_A_MARK, DTV_VALID_A_MARK, DTV_DATA_A_MARK,
+};
+
+static const unsigned int dtv_b_pins[] = {
+ /* BCLK, PSYNC, VALID, DATA */
+ 109, 110, 111, 112,
+};
+static const unsigned int dtv_b_mux[] = {
+ DTV_BCLK_B_MARK, DTV_PSYNC_B_MARK, DTV_VALID_B_MARK, DTV_DATA_B_MARK,
+};
+
+/* = [ IIC0 ] ============= */
+static const unsigned int iic0_pins[] = {
+ /* SCL, SDA */
+ 44, 45,
+};
+static const unsigned int iic0_mux[] = {
+ IIC0_SCL_MARK, IIC0_SDA_MARK,
+};
+
+/* = [ IIC1 ] ============= */
+static const unsigned int iic1_pins[] = {
+ /* SCL, SDA */
+ 46, 47,
+};
+static const unsigned int iic1_mux[] = {
+ IIC1_SCL_MARK, IIC1_SDA_MARK,
+};
+
+/* = [ JTAG ] ============= */
+static const unsigned int jtag_pins[] = {
+ /* SEL, TDO, TDOEN */
+ 2, 151, 152,
+};
+static const unsigned int jtag_mux[] = {
+ JT_SEL_MARK, JT_TDO_MARK, JT_TDOEN_MARK,
+};
+
+/* = [ LCD/YUV ] ========== */
+EMEV_MUX_PIN(lcd3_pxclk, 18, LCD3_PXCLK);
+EMEV_MUX_PIN(lcd3_pxclkb, 19, LCD3_PXCLKB);
+EMEV_MUX_PIN(lcd3_clk_i, 20, LCD3_CLK_I);
+
+static const unsigned int lcd3_sync_pins[] = {
+ /* HS, VS, DE */
+ 21, 22, 23,
+};
+static const unsigned int lcd3_sync_mux[] = {
+ LCD3_HS_MARK, LCD3_VS_MARK, LCD3_DE_MARK,
+};
+
+static const unsigned int lcd3_rgb888_pins[] = {
+ /* R[0:7], G[0:7], B[0:7] */
+ 32, 33, 34, 35,
+ 36, 37, 38, 39,
+ 40, 41, PIN_NUMBER(2, 17), PIN_NUMBER(3, 17),
+ PIN_NUMBER(4, 17), PIN_NUMBER(2, 16), PIN_NUMBER(3, 16),
+ PIN_NUMBER(4, 16),
+ 42, 43, PIN_NUMBER(2, 15), PIN_NUMBER(3, 15),
+ PIN_NUMBER(4, 15), PIN_NUMBER(2, 14), PIN_NUMBER(3, 14),
+ PIN_NUMBER(4, 14)
+};
+static const unsigned int lcd3_rgb888_mux[] = {
+ LCD3_R0_MARK, LCD3_R1_MARK, LCD3_R2_MARK, LCD3_R3_MARK,
+ LCD3_R4_MARK, LCD3_R5_MARK, LCD3_R6_MARK, LCD3_R7_MARK,
+ LCD3_G0_MARK, LCD3_G1_MARK, LCD3_G2_MARK, LCD3_G3_MARK,
+ LCD3_G4_MARK, LCD3_G5_MARK, LCD3_G6_MARK, LCD3_G7_MARK,
+ LCD3_B0_MARK, LCD3_B1_MARK, LCD3_B2_MARK, LCD3_B3_MARK,
+ LCD3_B4_MARK, LCD3_B5_MARK, LCD3_B6_MARK, LCD3_B7_MARK,
+};
+
+EMEV_MUX_PIN(yuv3_clk_i, 20, YUV3_CLK_I);
+static const unsigned int yuv3_pins[] = {
+ /* CLK_O, HS, VS, DE */
+ 18, 21, 22, 23,
+ /* YUV3_D[0:15] */
+ 40, 41, PIN_NUMBER(2, 17), PIN_NUMBER(3, 17),
+ PIN_NUMBER(4, 17), PIN_NUMBER(2, 16), PIN_NUMBER(3, 16),
+ PIN_NUMBER(4, 16),
+ 42, 43, PIN_NUMBER(2, 15), PIN_NUMBER(3, 15),
+ PIN_NUMBER(4, 15), PIN_NUMBER(2, 14), PIN_NUMBER(3, 14),
+ PIN_NUMBER(4, 14),
+};
+static const unsigned int yuv3_mux[] = {
+ YUV3_CLK_O_MARK, YUV3_HS_MARK, YUV3_VS_MARK, YUV3_DE_MARK,
+ YUV3_D0_MARK, YUV3_D1_MARK, YUV3_D2_MARK, YUV3_D3_MARK,
+ YUV3_D4_MARK, YUV3_D5_MARK, YUV3_D6_MARK, YUV3_D7_MARK,
+ YUV3_D8_MARK, YUV3_D9_MARK, YUV3_D10_MARK, YUV3_D11_MARK,
+ YUV3_D12_MARK, YUV3_D13_MARK, YUV3_D14_MARK, YUV3_D15_MARK,
+};
+
+/* = [ NTSC ] ============= */
+EMEV_MUX_PIN(ntsc_clk, 122, NTSC_CLK);
+static const unsigned int ntsc_data_pins[] = {
+ /* NTSC_DATA[0:7] */
+ 123, 124, 125, 126,
+ 127, 128, 129, 130,
+};
+static const unsigned int ntsc_data_mux[] = {
+ NTSC_DATA0_MARK, NTSC_DATA1_MARK, NTSC_DATA2_MARK, NTSC_DATA3_MARK,
+ NTSC_DATA4_MARK, NTSC_DATA5_MARK, NTSC_DATA6_MARK, NTSC_DATA7_MARK,
+};
+
+/* = [ PWM0 ] ============= */
+EMEV_MUX_PIN(pwm0, 120, PWM0);
+
+/* = [ PWM1 ] ============= */
+EMEV_MUX_PIN(pwm1, 121, PWM1);
+
+/* = [ SD ] =============== */
+EMEV_MUX_PIN(sd_cki, 48, SD_CKI);
+
+/* = [ SDIO0 ] ============ */
+static const unsigned int sdi0_ctrl_pins[] = {
+ /* CKO, CKI, CMD */
+ 50, 51, 52,
+};
+static const unsigned int sdi0_ctrl_mux[] = {
+ SDI0_CKO_MARK, SDI0_CKI_MARK, SDI0_CMD_MARK,
+};
+
+static const unsigned int sdi0_data1_pins[] = {
+ /* SDI0_DATA[0] */
+ 53,
+};
+static const unsigned int sdi0_data1_mux[] = {
+ SDI0_DATA0_MARK,
+};
+static const unsigned int sdi0_data4_pins[] = {
+ /* SDI0_DATA[0:3] */
+ 53, 54, 55, 56,
+};
+static const unsigned int sdi0_data4_mux[] = {
+ SDI0_DATA0_MARK, SDI0_DATA1_MARK, SDI0_DATA2_MARK, SDI0_DATA3_MARK,
+};
+static const unsigned int sdi0_data8_pins[] = {
+ /* SDI0_DATA[0:7] */
+ 53, 54, 55, 56,
+ 57, 58, 59, 60
+};
+static const unsigned int sdi0_data8_mux[] = {
+ SDI0_DATA0_MARK, SDI0_DATA1_MARK, SDI0_DATA2_MARK, SDI0_DATA3_MARK,
+ SDI0_DATA4_MARK, SDI0_DATA5_MARK, SDI0_DATA6_MARK, SDI0_DATA7_MARK,
+};
+
+/* = [ SDIO1 ] ============ */
+static const unsigned int sdi1_ctrl_pins[] = {
+ /* CKO, CKI, CMD */
+ 61, 62, 63,
+};
+static const unsigned int sdi1_ctrl_mux[] = {
+ SDI1_CKO_MARK, SDI1_CKI_MARK, SDI1_CMD_MARK,
+};
+
+static const unsigned int sdi1_data1_pins[] = {
+ /* SDI1_DATA[0] */
+ 64,
+};
+static const unsigned int sdi1_data1_mux[] = {
+ SDI1_DATA0_MARK,
+};
+static const unsigned int sdi1_data4_pins[] = {
+ /* SDI1_DATA[0:3] */
+ 64, 65, 66, 67,
+};
+static const unsigned int sdi1_data4_mux[] = {
+ SDI1_DATA0_MARK, SDI1_DATA1_MARK, SDI1_DATA2_MARK, SDI1_DATA3_MARK,
+};
+
+/* = [ SDIO2 ] ============ */
+static const unsigned int sdi2_ctrl_pins[] = {
+ /* CKO, CKI, CMD */
+ 97, 98, 99,
+};
+static const unsigned int sdi2_ctrl_mux[] = {
+ SDI2_CKO_MARK, SDI2_CKI_MARK, SDI2_CMD_MARK,
+};
+
+static const unsigned int sdi2_data1_pins[] = {
+ /* SDI2_DATA[0] */
+ 89,
+};
+static const unsigned int sdi2_data1_mux[] = {
+ SDI2_DATA0_MARK,
+};
+static const unsigned int sdi2_data4_pins[] = {
+ /* SDI2_DATA[0:3] */
+ 89, 90, 91, 92,
+};
+static const unsigned int sdi2_data4_mux[] = {
+ SDI2_DATA0_MARK, SDI2_DATA1_MARK, SDI2_DATA2_MARK, SDI2_DATA3_MARK,
+};
+
+/* = [ TP33 ] ============= */
+static const unsigned int tp33_pins[] = {
+ /* CLK, CTRL */
+ 38, 39,
+ /* TP33_DATA[0:15] */
+ 40, 41, PIN_NUMBER(2, 17), PIN_NUMBER(3, 17),
+ PIN_NUMBER(4, 17), PIN_NUMBER(2, 16), PIN_NUMBER(3, 16),
+ PIN_NUMBER(4, 16),
+ 42, 43, PIN_NUMBER(2, 15), PIN_NUMBER(3, 15),
+ PIN_NUMBER(4, 15), PIN_NUMBER(2, 14), PIN_NUMBER(3, 14),
+ PIN_NUMBER(4, 14),
+};
+static const unsigned int tp33_mux[] = {
+ TP33_CLK_MARK, TP33_CTRL_MARK,
+ TP33_DATA0_MARK, TP33_DATA1_MARK, TP33_DATA2_MARK, TP33_DATA3_MARK,
+ TP33_DATA4_MARK, TP33_DATA5_MARK, TP33_DATA6_MARK, TP33_DATA7_MARK,
+ TP33_DATA8_MARK, TP33_DATA9_MARK, TP33_DATA10_MARK, TP33_DATA11_MARK,
+ TP33_DATA12_MARK, TP33_DATA13_MARK, TP33_DATA14_MARK, TP33_DATA15_MARK,
+};
+
+/* = [ UART1 ] ============ */
+static const unsigned int uart1_data_pins[] = {
+ /* RX, TX */
+ 155, 156,
+};
+static const unsigned int uart1_data_mux[] = {
+ UART1_RX_MARK, UART1_TX_MARK,
+};
+
+static const unsigned int uart1_ctrl_pins[] = {
+ /* CTSB, RTSB */
+ 157, 158,
+};
+static const unsigned int uart1_ctrl_mux[] = {
+ UART1_CTSB_MARK, UART1_RTSB_MARK,
+};
+
+/* = [ UART2 ] ============ */
+static const unsigned int uart2_data_pins[] = {
+ /* RX, TX */
+ 157, 158,
+};
+static const unsigned int uart2_data_mux[] = {
+ UART2_RX_MARK, UART2_TX_MARK,
+};
+
+/* = [ UART3 ] ============ */
+static const unsigned int uart3_data_pins[] = {
+ /* RX, TX */
+ 46, 47,
+};
+static const unsigned int uart3_data_mux[] = {
+ UART3_RX_MARK, UART3_TX_MARK,
+};
+
+/* = [ USB ] ============== */
+EMEV_MUX_PIN(usb_vbus, 153, USB_VBUS);
+
+/* = [ USI0 ] ============== */
+EMEV_MUX_PIN(usi0_cs1, 105, USI0_CS1);
+EMEV_MUX_PIN(usi0_cs2, 106, USI0_CS2);
+EMEV_MUX_PIN(usi0_cs3, 115, USI0_CS3);
+EMEV_MUX_PIN(usi0_cs4, 116, USI0_CS4);
+EMEV_MUX_PIN(usi0_cs5, 117, USI0_CS5);
+EMEV_MUX_PIN(usi0_cs6, 118, USI0_CS6);
+
+/* = [ USI1 ] ============== */
+static const unsigned int usi1_pins[] = {
+ /* DI, DO*/
+ 107, 108,
+};
+static const unsigned int usi1_mux[] = {
+ USI1_DI_MARK, USI1_DO_MARK,
+};
+
+/* = [ USI2 ] ============== */
+static const unsigned int usi2_pins[] = {
+ /* CLK, DI, DO*/
+ 109, 110, 111,
+};
+static const unsigned int usi2_mux[] = {
+ USI2_CLK_MARK, USI2_DI_MARK, USI2_DO_MARK,
+};
+EMEV_MUX_PIN(usi2_cs0, 112, USI2_CS0);
+EMEV_MUX_PIN(usi2_cs1, 113, USI2_CS1);
+EMEV_MUX_PIN(usi2_cs2, 114, USI2_CS2);
+
+/* = [ USI3 ] ============== */
+static const unsigned int usi3_pins[] = {
+ /* CLK, DI, DO*/
+ 115, 116, 117,
+};
+static const unsigned int usi3_mux[] = {
+ USI3_CLK_MARK, USI3_DI_MARK, USI3_DO_MARK,
+};
+EMEV_MUX_PIN(usi3_cs0, 118, USI3_CS0);
+
+/* = [ USI4 ] ============== */
+static const unsigned int usi4_pins[] = {
+ /* CLK, DI, DO*/
+ 119, 120, 121,
+};
+static const unsigned int usi4_mux[] = {
+ USI4_CLK_MARK, USI4_DI_MARK, USI4_DO_MARK,
+};
+EMEV_MUX_PIN(usi4_cs0, 113, USI4_CS0);
+EMEV_MUX_PIN(usi4_cs1, 114, USI4_CS1);
+
+/* = [ USI5 ] ============== */
+static const unsigned int usi5_a_pins[] = {
+ /* CLK, DI, DO*/
+ 85, 86, 87,
+};
+static const unsigned int usi5_a_mux[] = {
+ USI5_CLK_A_MARK, USI5_DI_A_MARK, USI5_DO_A_MARK,
+};
+EMEV_MUX_PIN(usi5_cs0_a, 88, USI5_CS0_A);
+EMEV_MUX_PIN(usi5_cs1_a, 89, USI5_CS1_A);
+EMEV_MUX_PIN(usi5_cs2_a, 90, USI5_CS2_A);
+
+static const unsigned int usi5_b_pins[] = {
+ /* CLK, DI, DO*/
+ 143, 144, 150,
+};
+static const unsigned int usi5_b_mux[] = {
+ USI5_CLK_B_MARK, USI5_DI_B_MARK, USI5_DO_B_MARK,
+};
+EMEV_MUX_PIN(usi5_cs0_b, 145, USI5_CS0_B);
+EMEV_MUX_PIN(usi5_cs1_b, 146, USI5_CS1_B);
+EMEV_MUX_PIN(usi5_cs2_b, 147, USI5_CS2_B);
+EMEV_MUX_PIN(usi5_cs3_b, 148, USI5_CS3_B);
+EMEV_MUX_PIN(usi5_cs4_b, 149, USI5_CS4_B);
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(err_rst_reqb),
+ SH_PFC_PIN_GROUP(ref_clko),
+ SH_PFC_PIN_GROUP(ext_clki),
+ SH_PFC_PIN_GROUP(lowpwr),
+
+ SH_PFC_PIN_GROUP(ab_main),
+ SH_PFC_PIN_GROUP(ab_clk),
+ SH_PFC_PIN_GROUP(ab_csb0),
+ SH_PFC_PIN_GROUP(ab_csb1),
+ SH_PFC_PIN_GROUP(ab_csb2),
+ SH_PFC_PIN_GROUP(ab_csb3),
+ SH_PFC_PIN_GROUP(ab_wait),
+ SH_PFC_PIN_GROUP(ab_adv),
+ SH_PFC_PIN_GROUP(ab_a17),
+ SH_PFC_PIN_GROUP(ab_a18),
+ SH_PFC_PIN_GROUP(ab_a19),
+ SH_PFC_PIN_GROUP(ab_a20),
+ SH_PFC_PIN_GROUP(ab_a21),
+ SH_PFC_PIN_GROUP(ab_a22),
+ SH_PFC_PIN_GROUP(ab_a23),
+ SH_PFC_PIN_GROUP(ab_a24),
+ SH_PFC_PIN_GROUP(ab_a25),
+ SH_PFC_PIN_GROUP(ab_a26),
+ SH_PFC_PIN_GROUP(ab_a27),
+ SH_PFC_PIN_GROUP(ab_a28),
+ SH_PFC_PIN_GROUP(ab_ben0),
+ SH_PFC_PIN_GROUP(ab_ben1),
+
+ SH_PFC_PIN_GROUP(cam_clko),
+ SH_PFC_PIN_GROUP(cam),
+
+ SH_PFC_PIN_GROUP(cf_ctrl),
+ SH_PFC_PIN_GROUP(cf_data8),
+ SH_PFC_PIN_GROUP(cf_data16),
+
+ SH_PFC_PIN_GROUP(dtv_a),
+ SH_PFC_PIN_GROUP(dtv_b),
+
+ SH_PFC_PIN_GROUP(iic0),
+
+ SH_PFC_PIN_GROUP(iic1),
+
+ SH_PFC_PIN_GROUP(jtag),
+
+ SH_PFC_PIN_GROUP(lcd3_pxclk),
+ SH_PFC_PIN_GROUP(lcd3_pxclkb),
+ SH_PFC_PIN_GROUP(lcd3_clk_i),
+ SH_PFC_PIN_GROUP(lcd3_sync),
+ SH_PFC_PIN_GROUP(lcd3_rgb888),
+ SH_PFC_PIN_GROUP(yuv3_clk_i),
+ SH_PFC_PIN_GROUP(yuv3),
+
+ SH_PFC_PIN_GROUP(ntsc_clk),
+ SH_PFC_PIN_GROUP(ntsc_data),
+
+ SH_PFC_PIN_GROUP(pwm0),
+
+ SH_PFC_PIN_GROUP(pwm1),
+
+ SH_PFC_PIN_GROUP(sd_cki),
+
+ SH_PFC_PIN_GROUP(sdi0_ctrl),
+ SH_PFC_PIN_GROUP(sdi0_data1),
+ SH_PFC_PIN_GROUP(sdi0_data4),
+ SH_PFC_PIN_GROUP(sdi0_data8),
+
+ SH_PFC_PIN_GROUP(sdi1_ctrl),
+ SH_PFC_PIN_GROUP(sdi1_data1),
+ SH_PFC_PIN_GROUP(sdi1_data4),
+
+ SH_PFC_PIN_GROUP(sdi2_ctrl),
+ SH_PFC_PIN_GROUP(sdi2_data1),
+ SH_PFC_PIN_GROUP(sdi2_data4),
+
+ SH_PFC_PIN_GROUP(tp33),
+
+ SH_PFC_PIN_GROUP(uart1_data),
+ SH_PFC_PIN_GROUP(uart1_ctrl),
+
+ SH_PFC_PIN_GROUP(uart2_data),
+
+ SH_PFC_PIN_GROUP(uart3_data),
+
+ SH_PFC_PIN_GROUP(usb_vbus),
+
+ SH_PFC_PIN_GROUP(usi0_cs1),
+ SH_PFC_PIN_GROUP(usi0_cs2),
+ SH_PFC_PIN_GROUP(usi0_cs3),
+ SH_PFC_PIN_GROUP(usi0_cs4),
+ SH_PFC_PIN_GROUP(usi0_cs5),
+ SH_PFC_PIN_GROUP(usi0_cs6),
+
+ SH_PFC_PIN_GROUP(usi1),
+
+ SH_PFC_PIN_GROUP(usi2),
+ SH_PFC_PIN_GROUP(usi2_cs0),
+ SH_PFC_PIN_GROUP(usi2_cs1),
+ SH_PFC_PIN_GROUP(usi2_cs2),
+
+ SH_PFC_PIN_GROUP(usi3),
+ SH_PFC_PIN_GROUP(usi3_cs0),
+
+ SH_PFC_PIN_GROUP(usi4),
+ SH_PFC_PIN_GROUP(usi4_cs0),
+ SH_PFC_PIN_GROUP(usi4_cs1),
+
+ SH_PFC_PIN_GROUP(usi5_a),
+ SH_PFC_PIN_GROUP(usi5_cs0_a),
+ SH_PFC_PIN_GROUP(usi5_cs1_a),
+ SH_PFC_PIN_GROUP(usi5_cs2_a),
+ SH_PFC_PIN_GROUP(usi5_b),
+ SH_PFC_PIN_GROUP(usi5_cs0_b),
+ SH_PFC_PIN_GROUP(usi5_cs1_b),
+ SH_PFC_PIN_GROUP(usi5_cs2_b),
+ SH_PFC_PIN_GROUP(usi5_cs3_b),
+ SH_PFC_PIN_GROUP(usi5_cs4_b),
+};
+
+static const char * const ab_groups[] = {
+ "ab_main",
+ "ab_clk",
+ "ab_csb0",
+ "ab_csb1",
+ "ab_csb2",
+ "ab_csb3",
+ "ab_wait",
+ "ab_adv",
+ "ab_a17",
+ "ab_a18",
+ "ab_a19",
+ "ab_a20",
+ "ab_a21",
+ "ab_a22",
+ "ab_a23",
+ "ab_a24",
+ "ab_a25",
+ "ab_a26",
+ "ab_a27",
+ "ab_a28",
+ "ab_ben0",
+ "ab_ben1",
+};
+
+static const char * const cam_groups[] = {
+ "cam_clko",
+ "cam",
+};
+
+static const char * const cf_groups[] = {
+ "cf_ctrl",
+ "cf_data8",
+ "cf_data16",
+};
+
+static const char * const dtv_groups[] = {
+ "dtv_a",
+ "dtv_b",
+};
+
+static const char * const iic0_groups[] = {
+ "iic0",
+};
+
+static const char * const iic1_groups[] = {
+ "iic1",
+};
+
+static const char * const jtag_groups[] = {
+ "jtag",
+};
+
+static const char * const lcd_groups[] = {
+ "lcd3_pxclk",
+ "lcd3_pxclkb",
+ "lcd3_clk_i",
+ "lcd3_sync",
+ "lcd3_rgb888",
+ "yuv3_clk_i",
+ "yuv3",
+};
+
+static const char * const ntsc_groups[] = {
+ "ntsc_clk",
+ "ntsc_data",
+};
+
+static const char * const pwm0_groups[] = {
+ "pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1",
+};
+
+static const char * const sd_groups[] = {
+ "sd_cki",
+};
+
+static const char * const sdi0_groups[] = {
+ "sdi0_ctrl",
+ "sdi0_data1",
+ "sdi0_data4",
+ "sdi0_data8",
+};
+
+static const char * const sdi1_groups[] = {
+ "sdi1_ctrl",
+ "sdi1_data1",
+ "sdi1_data4",
+};
+
+static const char * const sdi2_groups[] = {
+ "sdi2_ctrl",
+ "sdi2_data1",
+ "sdi2_data4",
+};
+
+static const char * const tp33_groups[] = {
+ "tp33",
+};
+
+static const char * const uart1_groups[] = {
+ "uart1_data",
+ "uart1_ctrl",
+};
+
+static const char * const uart2_groups[] = {
+ "uart2_data",
+};
+
+static const char * const uart3_groups[] = {
+ "uart3_data",
+};
+
+static const char * const usb_groups[] = {
+ "usb_vbus",
+};
+
+static const char * const usi0_groups[] = {
+ "usi0_cs1",
+ "usi0_cs2",
+ "usi0_cs3",
+ "usi0_cs4",
+ "usi0_cs5",
+ "usi0_cs6",
+};
+
+static const char * const usi1_groups[] = {
+ "usi1",
+};
+
+static const char * const usi2_groups[] = {
+ "usi2",
+ "usi2_cs0",
+ "usi2_cs1",
+ "usi2_cs2",
+};
+
+static const char * const usi3_groups[] = {
+ "usi3",
+ "usi3_cs0",
+};
+
+static const char * const usi4_groups[] = {
+ "usi4",
+ "usi4_cs0",
+ "usi4_cs1",
+};
+
+static const char * const usi5_groups[] = {
+ "usi5_a",
+ "usi5_cs0_a",
+ "usi5_cs1_a",
+ "usi5_cs2_a",
+ "usi5_b",
+ "usi5_cs0_b",
+ "usi5_cs1_b",
+ "usi5_cs2_b",
+ "usi5_cs3_b",
+ "usi5_cs4_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(ab),
+ SH_PFC_FUNCTION(cam),
+ SH_PFC_FUNCTION(cf),
+ SH_PFC_FUNCTION(dtv),
+ SH_PFC_FUNCTION(iic0),
+ SH_PFC_FUNCTION(iic1),
+ SH_PFC_FUNCTION(jtag),
+ SH_PFC_FUNCTION(lcd),
+ SH_PFC_FUNCTION(ntsc),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(sd),
+ SH_PFC_FUNCTION(sdi0),
+ SH_PFC_FUNCTION(sdi1),
+ SH_PFC_FUNCTION(sdi2),
+ SH_PFC_FUNCTION(tp33),
+ SH_PFC_FUNCTION(uart1),
+ SH_PFC_FUNCTION(uart2),
+ SH_PFC_FUNCTION(uart3),
+ SH_PFC_FUNCTION(usb),
+ SH_PFC_FUNCTION(usi0),
+ SH_PFC_FUNCTION(usi1),
+ SH_PFC_FUNCTION(usi2),
+ SH_PFC_FUNCTION(usi3),
+ SH_PFC_FUNCTION(usi4),
+ SH_PFC_FUNCTION(usi5),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xe0140200, 32, 1) {
+ 0, PORT31_FN, /* PIN: J18 */
+ 0, PORT30_FN, /* PIN: H18 */
+ 0, PORT29_FN, /* PIN: G18 */
+ 0, PORT28_FN, /* PIN: F18 */
+ 0, PORT27_FN, /* PIN: F17 */
+ 0, PORT26_FN, /* PIN: F16 */
+ 0, PORT25_FN, /* PIN: E20 */
+ 0, PORT24_FN, /* PIN: D20 */
+ FN_LCD3_1_0_PORT23, PORT23_FN, /* PIN: D19 */
+ FN_LCD3_1_0_PORT22, PORT22_FN, /* PIN: C20 */
+ FN_LCD3_1_0_PORT21, PORT21_FN, /* PIN: B21 */
+ FN_LCD3_1_0_PORT20, PORT20_FN, /* PIN: A21 */
+ FN_LCD3_PXCLKB, PORT19_FN, /* PIN: C21 */
+ FN_LCD3_1_0_PORT18, PORT18_FN, /* PIN: B22 */
+ 0, PORT17_FN, /* PIN: W20 */
+ 0, PORT16_FN, /* PIN: W21 */
+ 0, PORT15_FN, /* PIN: Y19 */
+ 0, PORT14_FN, /* PIN: Y20 */
+ 0, PORT13_FN, /* PIN: Y21 */
+ 0, PORT12_FN, /* PIN: AA20 */
+ 0, PORT11_FN, /* PIN: AA21 */
+ 0, PORT10_FN, /* PIN: AA22 */
+ 0, PORT9_FN, /* PIN: V15 */
+ 0, PORT8_FN, /* PIN: V16 */
+ 0, PORT7_FN, /* PIN: V17 */
+ 0, PORT6_FN, /* PIN: V18 */
+ FN_EXT_CLKI, PORT5_FN, /* PIN: U8 */
+ FN_REF_CLKO, PORT4_FN, /* PIN: V8 */
+ FN_ERR_RST_REQB, PORT3_FN, /* PIN: U9 */
+ FN_JT_SEL, PORT2_FN, /* PIN: V9 */
+ 0, PORT1_FN, /* PIN: U10 */
+ 0, PORT0_FN, /* PIN: V10 */
+ }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xe0140204, 32, 1) {
+ FN_SDI1_CMD, PORT63_FN, /* PIN: AC21 */
+ FN_SDI1_CKI, PORT62_FN, /* PIN: AA23 */
+ FN_SDI1_CKO, PORT61_FN, /* PIN: AB22 */
+ FN_SDI0_DATA7, PORT60_FN, /* PIN: Y16 */
+ FN_SDI0_DATA6, PORT59_FN, /* PIN: AA16 */
+ FN_SDI0_DATA5, PORT58_FN, /* PIN: Y15 */
+ FN_SDI0_DATA4, PORT57_FN, /* PIN: AA15 */
+ FN_SDI0_DATA3, PORT56_FN, /* PIN: Y14 */
+ FN_SDI0_DATA2, PORT55_FN, /* PIN: AA14 */
+ FN_SDI0_DATA1, PORT54_FN, /* PIN: Y13 */
+ FN_SDI0_DATA0, PORT53_FN, /* PIN: AA13 */
+ FN_SDI0_CMD, PORT52_FN, /* PIN: Y12 */
+ FN_SDI0_CKI, PORT51_FN, /* PIN: AC18 */
+ FN_SDI0_CKO, PORT50_FN, /* PIN: AB18 */
+ 0, PORT49_FN, /* PIN: AB16 */
+ FN_SD_CKI, PORT48_FN, /* PIN: AC19 */
+ FN_IIC_1_0_PORT47, PORT47_FN, /* PIN: Y8 */
+ FN_IIC_1_0_PORT46, PORT46_FN, /* PIN: Y9 */
+ FN_IIC0_SDA, PORT45_FN, /* PIN: AA8 */
+ FN_IIC0_SCL, PORT44_FN, /* PIN: AA9 */
+ FN_LCD3_11_10_PORT43, PORT43_FN, /* PIN: A15 */
+ FN_LCD3_11_10_PORT42, PORT42_FN, /* PIN: A16 */
+ FN_LCD3_11_10_PORT41, PORT41_FN, /* PIN: A17 */
+ FN_LCD3_11_10_PORT40, PORT40_FN, /* PIN: A18 */
+ FN_LCD3_9_8_PORT39, PORT39_FN, /* PIN: D18 */
+ FN_LCD3_9_8_PORT38, PORT38_FN, /* PIN: C18 */
+ FN_LCD3_R5, PORT37_FN, /* PIN: B18 */
+ FN_LCD3_R4, PORT36_FN, /* PIN: C19 */
+ FN_LCD3_R3, PORT35_FN, /* PIN: B19 */
+ FN_LCD3_R2, PORT34_FN, /* PIN: A19 */
+ FN_LCD3_R1, PORT33_FN, /* PIN: B20 */
+ FN_LCD3_R0, PORT32_FN, /* PIN: A20 */
+ }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xe0140208, 32, 1) {
+ FN_AB_1_0_PORT95, PORT95_FN, /* PIN: L21 */
+ FN_AB_1_0_PORT94, PORT94_FN, /* PIN: K21 */
+ FN_AB_1_0_PORT93, PORT93_FN, /* PIN: J21 */
+ FN_AB_7_6_PORT92, PORT92_FN, /* PIN: J22 */
+ FN_AB_7_6_PORT91, PORT91_FN, /* PIN: H21 */
+ FN_AB_5_4_PORT90, PORT90_FN, /* PIN: H22 */
+ FN_AB_5_4_PORT89, PORT89_FN, /* PIN: H23 */
+ FN_AB_3_2_PORT88, PORT88_FN, /* PIN: G21 */
+ FN_AB_3_2_PORT87, PORT87_FN, /* PIN: G22 */
+ FN_AB_3_2_PORT86, PORT86_FN, /* PIN: G23 */
+ FN_AB_3_2_PORT85, PORT85_FN, /* PIN: F21 */
+ FN_AB_1_0_PORT84, PORT84_FN, /* PIN: F22 */
+ FN_AB_1_0_PORT83, PORT83_FN, /* PIN: F23 */
+ FN_AB_1_0_PORT82, PORT82_FN, /* PIN: E22 */
+ FN_AB_1_0_PORT81, PORT81_FN, /* PIN: E23 */
+ FN_AB_1_0_PORT80, PORT80_FN, /* PIN: D22 */
+ FN_AB_1_0_PORT79, PORT79_FN, /* PIN: D23 */
+ FN_AB_1_0_PORT78, PORT78_FN, /* PIN: C22 */
+ FN_AB_1_0_PORT77, PORT77_FN, /* PIN: C23 */
+ FN_AB_1_0_PORT76, PORT76_FN, /* PIN: K20 */
+ FN_AB_1_0_PORT75, PORT75_FN, /* PIN: L20 */
+ FN_AB_1_0_PORT74, PORT74_FN, /* PIN: H20 */
+ FN_AB_1_0_PORT73, PORT73_FN, /* PIN: J20 */
+ FN_AB_1_0_PORT72, PORT72_FN, /* PIN: G20 */
+ FN_AB_1_0_PORT71, PORT71_FN, /* PIN: F20 */
+ FN_AB_CSB1, PORT70_FN, /* PIN: E21 */
+ FN_AB_CSB0, PORT69_FN, /* PIN: D21 */
+ FN_AB_CLK, PORT68_FN, /* PIN: J23 */
+ FN_SDI1_DATA3, PORT67_FN, /* PIN: AA19 */
+ FN_SDI1_DATA2, PORT66_FN, /* PIN: AB19 */
+ FN_SDI1_DATA1, PORT65_FN, /* PIN: AB20 */
+ FN_SDI1_DATA0, PORT64_FN, /* PIN: AB21 */
+ }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xe014020c, 32, 1) {
+ FN_NTSC_DATA4, PORT127_FN, /* PIN: T20 */
+ FN_NTSC_DATA3, PORT126_FN, /* PIN: R18 */
+ FN_NTSC_DATA2, PORT125_FN, /* PIN: R20 */
+ FN_NTSC_DATA1, PORT124_FN, /* PIN: P18 */
+ FN_NTSC_DATA0, PORT123_FN, /* PIN: P20 */
+ FN_NTSC_CLK, PORT122_FN, /* PIN: V20 */
+ FN_USI_9_8_PORT121, PORT121_FN, /* PIN: Y5 */
+ FN_USI_9_8_PORT120, PORT120_FN, /* PIN: AA4 */
+ FN_USI_7_6_PORT119, PORT119_FN, /* PIN: AB3 */
+ FN_USI_5_4_PORT118, PORT118_FN, /* PIN: AB4 */
+ FN_USI_5_4_PORT117, PORT117_FN, /* PIN: AC3 */
+ FN_USI_5_4_PORT116, PORT116_FN, /* PIN: AC4 */
+ FN_USI_5_4_PORT115, PORT115_FN, /* PIN: AC5 */
+ FN_USI_3_2_PORT114, PORT114_FN, /* PIN: Y6 */
+ FN_USI_3_2_PORT113, PORT113_FN, /* PIN: AA7 */
+ FN_USI_1_0_PORT112, PORT112_FN, /* PIN: Y7 */
+ FN_USI_1_0_PORT111, PORT111_FN, /* PIN: AA5 */
+ FN_USI_1_0_PORT110, PORT110_FN, /* PIN: AA6 */
+ FN_USI_1_0_PORT109, PORT109_FN, /* PIN: AB5 */
+ FN_USI1_DO, PORT108_FN, /* PIN: D10 */
+ FN_USI1_DI, PORT107_FN, /* PIN: C10 */
+ FN_USI0_CS2, PORT106_FN, /* PIN: B9 */
+ FN_USI0_CS1, PORT105_FN, /* PIN: B8 */
+ FN_AB_13_12_PORT104, PORT104_FN, /* PIN: M17 */
+ FN_AB_13_12_PORT103, PORT103_FN, /* PIN: L17 */
+ FN_AB_11_10_PORT102, PORT102_FN, /* PIN: N18 */
+ FN_AB_11_10_PORT101, PORT101_FN, /* PIN: M18 */
+ FN_AB_11_10_PORT100, PORT100_FN, /* PIN: L18 */
+ FN_AB_9_8_PORT99, PORT99_FN, /* PIN: N20 */
+ FN_AB_9_8_PORT98, PORT98_FN, /* PIN: M20 */
+ FN_AB_9_8_PORT97, PORT97_FN, /* PIN: N21 */
+ FN_AB_A20, PORT96_FN, /* PIN: M21 */
+ }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xe0140210, 32, 1) {
+ 0, 0,
+ FN_UART_1_0_PORT158, PORT158_FN, /* PIN: AB10 */
+ FN_UART_1_0_PORT157, PORT157_FN, /* PIN: AA10 */
+ FN_UART1_TX, PORT156_FN, /* PIN: Y10 */
+ FN_UART1_RX, PORT155_FN, /* PIN: Y11 */
+ FN_LOWPWR, PORT154_FN, /* PIN: A12 */
+ FN_USB_VBUS, PORT153_FN, /* PIN: AA12 */
+ FN_JT_TDOEN, PORT152_FN, /* PIN: F13 */
+ FN_JT_TDO, PORT151_FN, /* PIN: D13 */
+ FN_HSI_1_0_PORT150, PORT150_FN, /* PIN: M22 */
+ FN_HSI_1_0_PORT149, PORT149_FN, /* PIN: M23 */
+ FN_HSI_1_0_PORT148, PORT148_FN, /* PIN: N23 */
+ FN_HSI_1_0_PORT147, PORT147_FN, /* PIN: N22 */
+ FN_HSI_1_0_PORT146, PORT146_FN, /* PIN: L22 */
+ FN_HSI_1_0_PORT145, PORT145_FN, /* PIN: L23 */
+ FN_HSI_1_0_PORT144, PORT144_FN, /* PIN: K23 */
+ FN_HSI_1_0_PORT143, PORT143_FN, /* PIN: K22 */
+ FN_CAM_YUV7, PORT142_FN, /* PIN: V23 */
+ FN_CAM_YUV6, PORT141_FN, /* PIN: V22 */
+ FN_CAM_YUV5, PORT140_FN, /* PIN: U23 */
+ FN_CAM_YUV4, PORT139_FN, /* PIN: U22 */
+ FN_CAM_YUV3, PORT138_FN, /* PIN: U21 */
+ FN_CAM_YUV2, PORT137_FN, /* PIN: T23 */
+ FN_CAM_YUV1, PORT136_FN, /* PIN: T22 */
+ FN_CAM_YUV0, PORT135_FN, /* PIN: T21 */
+ FN_CAM_HS, PORT134_FN, /* PIN: V21 */
+ FN_CAM_VS, PORT133_FN, /* PIN: W22 */
+ FN_CAM_CLKI, PORT132_FN, /* PIN: Y23 */
+ FN_CAM_CLKO, PORT131_FN, /* PIN: W23 */
+ FN_NTSC_DATA7, PORT130_FN, /* PIN: U18 */
+ FN_NTSC_DATA6, PORT129_FN, /* PIN: U20 */
+ FN_NTSC_DATA5, PORT128_FN, /* PIN: T18 */
+ }
+ },
+ { PINMUX_CFG_REG_VAR("CHG_PINSEL_LCD3", 0xe0140284, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2) {
+ /* 31 - 12 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 11 - 10 */
+ FN_SEL_LCD3_11_10_00, FN_SEL_LCD3_11_10_01,
+ FN_SEL_LCD3_11_10_10, 0,
+ /* 9 - 8 */
+ FN_SEL_LCD3_9_8_00, 0, FN_SEL_LCD3_9_8_10, 0,
+ /* 7 - 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 1 - 0 */
+ FN_SEL_LCD3_1_0_00, FN_SEL_LCD3_1_0_01, 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG_VAR("CHG_PINSEL_UART", 0xe0140288, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2) {
+ /* 31 - 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 1 - 0 */
+ FN_SEL_UART_1_0_00, FN_SEL_UART_1_0_01, 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG_VAR("CHG_PINSEL_IIC", 0xe014028c, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2) {
+ /* 31 - 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 1 - 0 */
+ FN_SEL_IIC_1_0_00, FN_SEL_IIC_1_0_01, 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG_VAR("CHG_PINSEL_AB", 0xe0140294, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* 31 - 14 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* 13 - 12 */
+ FN_SEL_AB_13_12_00, 0, FN_SEL_AB_13_12_10, 0,
+ /* 11 - 10 */
+ FN_SEL_AB_11_10_00, 0, FN_SEL_AB_11_10_10, 0,
+ /* 9 - 8 */
+ FN_SEL_AB_9_8_00, FN_SEL_AB_9_8_01, FN_SEL_AB_9_8_10, 0,
+ /* 7 - 6 */
+ FN_SEL_AB_7_6_00, FN_SEL_AB_7_6_01, FN_SEL_AB_7_6_10, 0,
+ /* 5 - 4 */
+ FN_SEL_AB_5_4_00, FN_SEL_AB_5_4_01,
+ FN_SEL_AB_5_4_10, FN_SEL_AB_5_4_11,
+ /* 3 - 2 */
+ FN_SEL_AB_3_2_00, FN_SEL_AB_3_2_01,
+ FN_SEL_AB_3_2_10, FN_SEL_AB_3_2_11,
+ /* 1 - 0 */
+ FN_SEL_AB_1_0_00, 0, FN_SEL_AB_1_0_10, 0,
+ }
+ },
+ { PINMUX_CFG_REG_VAR("CHG_PINSEL_USI", 0xe0140298, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2) {
+ /* 31 - 10 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 9 - 8 */
+ FN_SEL_USI_9_8_00, FN_SEL_USI_9_8_01, 0, 0,
+ /* 7 - 6 */
+ FN_SEL_USI_7_6_00, FN_SEL_USI_7_6_01, 0, 0,
+ /* 5 - 4 */
+ FN_SEL_USI_5_4_00, FN_SEL_USI_5_4_01, 0, 0,
+ /* 3 - 2 */
+ FN_SEL_USI_3_2_00, FN_SEL_USI_3_2_01, 0, 0,
+ /* 1 - 0 */
+ FN_SEL_USI_1_0_00, FN_SEL_USI_1_0_01, 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG_VAR("CHG_PINSEL_HSI", 0xe01402a8, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2) {
+ /* 31 - 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 1 - 0 */
+ FN_SEL_HSI_1_0_00, FN_SEL_HSI_1_0_01, 0, 0,
+ }
+ },
+ { },
+};
+
+const struct sh_pfc_soc_info emev2_pinmux_info = {
+ .name = "emev2_pfc",
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 9a179c94b4dc..80c1843bb6ad 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -2241,6 +2241,13 @@ static const unsigned int intc_irq3_pins[] = {
static const unsigned int intc_irq3_mux[] = {
IRQ3_MARK,
};
+/* - MLB+ ------------------------------------------------------------------- */
+static const unsigned int mlb_3pin_pins[] = {
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 2),
+};
+static const unsigned int mlb_3pin_mux[] = {
+ MLB_CLK_MARK, MLB_SIG_MARK, MLB_DAT_MARK,
+};
/* - MMCIF0 ----------------------------------------------------------------- */
static const unsigned int mmc0_data1_pins[] = {
/* D[0] */
@@ -3873,6 +3880,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(intc_irq1),
SH_PFC_PIN_GROUP(intc_irq2),
SH_PFC_PIN_GROUP(intc_irq3),
+ SH_PFC_PIN_GROUP(mlb_3pin),
SH_PFC_PIN_GROUP(mmc0_data1),
SH_PFC_PIN_GROUP(mmc0_data4),
SH_PFC_PIN_GROUP(mmc0_data8),
@@ -4198,6 +4206,10 @@ static const char * const intc_groups[] = {
"intc_irq3",
};
+static const char * const mlb_groups[] = {
+ "mlb_3pin",
+};
+
static const char * const mmc0_groups[] = {
"mmc0_data1",
"mmc0_data4",
@@ -4511,6 +4523,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(iic2),
SH_PFC_FUNCTION(iic3),
SH_PFC_FUNCTION(intc),
+ SH_PFC_FUNCTION(mlb),
SH_PFC_FUNCTION(mmc0),
SH_PFC_FUNCTION(mmc1),
SH_PFC_FUNCTION(msiof0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index c6e5deba238e..fdd2c8729791 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -378,7 +378,7 @@ enum {
/* IPSR16 */
FN_HRX1, FN_SCIFB1_RXD, FN_VI1_R0_B, FN_GLO_SDATA_C, FN_VI1_DATA6_C,
FN_HTX1, FN_SCIFB1_TXD, FN_VI1_R1_B, FN_GLO_SS_C, FN_VI1_DATA7_C,
- FN_HSCK1, FN_SCIFB1_SCK, FN_MLB_CK, FN_GLO_RFON_C,
+ FN_HSCK1, FN_SCIFB1_SCK, FN_MLB_CLK, FN_GLO_RFON_C,
FN_HCTS1_N, FN_SCIFB1_CTS_N, FN_MLB_SIG, FN_CAN1_TX_B,
FN_HRTS1_N, FN_SCIFB1_RTS_N, FN_MLB_DAT, FN_CAN1_RX_B,
@@ -764,7 +764,7 @@ enum {
GLO_SDATA_C_MARK, VI1_DATA6_C_MARK,
HTX1_MARK, SCIFB1_TXD_MARK, VI1_R1_B_MARK,
GLO_SS_C_MARK, VI1_DATA7_C_MARK,
- HSCK1_MARK, SCIFB1_SCK_MARK, MLB_CK_MARK, GLO_RFON_C_MARK,
+ HSCK1_MARK, SCIFB1_SCK_MARK, MLB_CLK_MARK, GLO_RFON_C_MARK,
HCTS1_N_MARK, SCIFB1_CTS_N_MARK, MLB_SIG_MARK, CAN1_TX_B_MARK,
HRTS1_N_MARK, SCIFB1_RTS_N_MARK, MLB_DAT_MARK, CAN1_RX_B_MARK,
PINMUX_MARK_END,
@@ -1664,7 +1664,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP16_5_3, VI1_DATA7_C, SEL_VI1_2),
PINMUX_IPSR_MODSEL_DATA(IP16_7_6, HSCK1, SEL_HSCIF1_0),
PINMUX_IPSR_MODSEL_DATA(IP16_7_6, SCIFB1_SCK, SEL_SCIFB1_0),
- PINMUX_IPSR_DATA(IP16_7_6, MLB_CK),
+ PINMUX_IPSR_DATA(IP16_7_6, MLB_CLK),
PINMUX_IPSR_MODSEL_DATA(IP16_7_6, GLO_RFON_C, SEL_GPS_2),
PINMUX_IPSR_MODSEL_DATA(IP16_9_8, HCTS1_N, SEL_HSCIF1_0),
PINMUX_IPSR_DATA(IP16_9_8, SCIFB1_CTS_N),
@@ -2391,6 +2391,13 @@ static const unsigned int intc_irq3_pins[] = {
static const unsigned int intc_irq3_mux[] = {
IRQ3_MARK,
};
+/* - MLB+ ------------------------------------------------------------------- */
+static const unsigned int mlb_3pin_pins[] = {
+ RCAR_GP_PIN(7, 7), RCAR_GP_PIN(7, 8), RCAR_GP_PIN(7, 9),
+};
+static const unsigned int mlb_3pin_mux[] = {
+ MLB_CLK_MARK, MLB_SIG_MARK, MLB_DAT_MARK,
+};
/* - MMCIF ------------------------------------------------------------------ */
static const unsigned int mmc_data1_pins[] = {
/* D[0] */
@@ -4267,6 +4274,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(intc_irq1),
SH_PFC_PIN_GROUP(intc_irq2),
SH_PFC_PIN_GROUP(intc_irq3),
+ SH_PFC_PIN_GROUP(mlb_3pin),
SH_PFC_PIN_GROUP(mmc_data1),
SH_PFC_PIN_GROUP(mmc_data4),
SH_PFC_PIN_GROUP(mmc_data8),
@@ -4648,6 +4656,10 @@ static const char * const intc_groups[] = {
"intc_irq3",
};
+static const char * const mlb_groups[] = {
+ "mlb_3pin",
+};
+
static const char * const mmc_groups[] = {
"mmc_data1",
"mmc_data4",
@@ -4972,6 +4984,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c7),
SH_PFC_FUNCTION(i2c8),
SH_PFC_FUNCTION(intc),
+ SH_PFC_FUNCTION(mlb),
SH_PFC_FUNCTION(mmc),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
@@ -5974,7 +5987,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP16_9_8 [2] */
FN_HCTS1_N, FN_SCIFB1_CTS_N, FN_MLB_SIG, FN_CAN1_TX_B,
/* IP16_7_6 [2] */
- FN_HSCK1, FN_SCIFB1_SCK, FN_MLB_CK, FN_GLO_RFON_C,
+ FN_HSCK1, FN_SCIFB1_SCK, FN_MLB_CLK, FN_GLO_RFON_C,
/* IP16_5_3 [3] */
FN_HTX1, FN_SCIFB1_TXD, FN_VI1_R1_B,
FN_GLO_SS_C, FN_VI1_DATA7_C,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
deleted file mode 100644
index 8211f66a2f68..000000000000
--- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c
+++ /dev/null
@@ -1,2645 +0,0 @@
-/*
- * sh7372 processor support - PFC hardware block
- *
- * Copyright (C) 2010 Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on
- * sh7367 processor support - PFC hardware block
- * Copyright (C) 2010 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/sh_intc.h>
-
-#include "core.h"
-#include "sh_pfc.h"
-
-#define CPU_ALL_PORT(fn, pfx, sfx) \
- PORT_10(0, fn, pfx, sfx), PORT_90(0, fn, pfx, sfx), \
- PORT_10(100, fn, pfx##10, sfx), PORT_10(110, fn, pfx##11, sfx), \
- PORT_10(120, fn, pfx##12, sfx), PORT_10(130, fn, pfx##13, sfx), \
- PORT_10(140, fn, pfx##14, sfx), PORT_10(150, fn, pfx##15, sfx), \
- PORT_10(160, fn, pfx##16, sfx), PORT_10(170, fn, pfx##17, sfx), \
- PORT_10(180, fn, pfx##18, sfx), PORT_1(190, fn, pfx##190, sfx)
-
-#define IRQC_PIN_MUX(irq, pin) \
-static const unsigned int intc_irq##irq##_pins[] = { \
- pin, \
-}; \
-static const unsigned int intc_irq##irq##_mux[] = { \
- IRQ##irq##_MARK, \
-}
-
-#define IRQC_PINS_MUX(irq, pin0, pin1) \
-static const unsigned int intc_irq##irq##_0_pins[] = { \
- pin0, \
-}; \
-static const unsigned int intc_irq##irq##_0_mux[] = { \
- IRQ##irq##_##pin0##_MARK, \
-}; \
-static const unsigned int intc_irq##irq##_1_pins[] = { \
- pin1, \
-}; \
-static const unsigned int intc_irq##irq##_1_mux[] = { \
- IRQ##irq##_##pin1##_MARK, \
-}
-
-enum {
- PINMUX_RESERVED = 0,
-
- /* PORT0_DATA -> PORT190_DATA */
- PINMUX_DATA_BEGIN,
- PORT_ALL(DATA),
- PINMUX_DATA_END,
-
- /* PORT0_IN -> PORT190_IN */
- PINMUX_INPUT_BEGIN,
- PORT_ALL(IN),
- PINMUX_INPUT_END,
-
- /* PORT0_OUT -> PORT190_OUT */
- PINMUX_OUTPUT_BEGIN,
- PORT_ALL(OUT),
- PINMUX_OUTPUT_END,
-
- PINMUX_FUNCTION_BEGIN,
- PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT190_FN_IN */
- PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT190_FN_OUT */
- PORT_ALL(FN0), /* PORT0_FN0 -> PORT190_FN0 */
- PORT_ALL(FN1), /* PORT0_FN1 -> PORT190_FN1 */
- PORT_ALL(FN2), /* PORT0_FN2 -> PORT190_FN2 */
- PORT_ALL(FN3), /* PORT0_FN3 -> PORT190_FN3 */
- PORT_ALL(FN4), /* PORT0_FN4 -> PORT190_FN4 */
- PORT_ALL(FN5), /* PORT0_FN5 -> PORT190_FN5 */
- PORT_ALL(FN6), /* PORT0_FN6 -> PORT190_FN6 */
- PORT_ALL(FN7), /* PORT0_FN7 -> PORT190_FN7 */
-
- MSEL1CR_31_0, MSEL1CR_31_1,
- MSEL1CR_30_0, MSEL1CR_30_1,
- MSEL1CR_29_0, MSEL1CR_29_1,
- MSEL1CR_28_0, MSEL1CR_28_1,
- MSEL1CR_27_0, MSEL1CR_27_1,
- MSEL1CR_26_0, MSEL1CR_26_1,
- MSEL1CR_16_0, MSEL1CR_16_1,
- MSEL1CR_15_0, MSEL1CR_15_1,
- MSEL1CR_14_0, MSEL1CR_14_1,
- MSEL1CR_13_0, MSEL1CR_13_1,
- MSEL1CR_12_0, MSEL1CR_12_1,
- MSEL1CR_9_0, MSEL1CR_9_1,
- MSEL1CR_8_0, MSEL1CR_8_1,
- MSEL1CR_7_0, MSEL1CR_7_1,
- MSEL1CR_6_0, MSEL1CR_6_1,
- MSEL1CR_4_0, MSEL1CR_4_1,
- MSEL1CR_3_0, MSEL1CR_3_1,
- MSEL1CR_2_0, MSEL1CR_2_1,
- MSEL1CR_0_0, MSEL1CR_0_1,
-
- MSEL3CR_27_0, MSEL3CR_27_1,
- MSEL3CR_26_0, MSEL3CR_26_1,
- MSEL3CR_21_0, MSEL3CR_21_1,
- MSEL3CR_20_0, MSEL3CR_20_1,
- MSEL3CR_15_0, MSEL3CR_15_1,
- MSEL3CR_9_0, MSEL3CR_9_1,
- MSEL3CR_6_0, MSEL3CR_6_1,
-
- MSEL4CR_19_0, MSEL4CR_19_1,
- MSEL4CR_18_0, MSEL4CR_18_1,
- MSEL4CR_17_0, MSEL4CR_17_1,
- MSEL4CR_16_0, MSEL4CR_16_1,
- MSEL4CR_15_0, MSEL4CR_15_1,
- MSEL4CR_14_0, MSEL4CR_14_1,
- MSEL4CR_10_0, MSEL4CR_10_1,
- MSEL4CR_6_0, MSEL4CR_6_1,
- MSEL4CR_4_0, MSEL4CR_4_1,
- MSEL4CR_1_0, MSEL4CR_1_1,
- PINMUX_FUNCTION_END,
-
- PINMUX_MARK_BEGIN,
-
- /* IRQ */
- IRQ0_6_MARK, IRQ0_162_MARK, IRQ1_MARK, IRQ2_4_MARK,
- IRQ2_5_MARK, IRQ3_8_MARK, IRQ3_16_MARK, IRQ4_17_MARK,
- IRQ4_163_MARK, IRQ5_MARK, IRQ6_39_MARK, IRQ6_164_MARK,
- IRQ7_40_MARK, IRQ7_167_MARK, IRQ8_41_MARK, IRQ8_168_MARK,
- IRQ9_42_MARK, IRQ9_169_MARK, IRQ10_MARK, IRQ11_MARK,
- IRQ12_80_MARK, IRQ12_137_MARK, IRQ13_81_MARK, IRQ13_145_MARK,
- IRQ14_82_MARK, IRQ14_146_MARK, IRQ15_83_MARK, IRQ15_147_MARK,
- IRQ16_84_MARK, IRQ16_170_MARK, IRQ17_MARK, IRQ18_MARK,
- IRQ19_MARK, IRQ20_MARK, IRQ21_MARK, IRQ22_MARK,
- IRQ23_MARK, IRQ24_MARK, IRQ25_MARK, IRQ26_121_MARK,
- IRQ26_172_MARK, IRQ27_122_MARK, IRQ27_180_MARK, IRQ28_123_MARK,
- IRQ28_181_MARK, IRQ29_129_MARK, IRQ29_182_MARK, IRQ30_130_MARK,
- IRQ30_183_MARK, IRQ31_138_MARK, IRQ31_184_MARK,
-
- /* MSIOF0 */
- MSIOF0_TSYNC_MARK, MSIOF0_TSCK_MARK, MSIOF0_RXD_MARK,
- MSIOF0_RSCK_MARK, MSIOF0_RSYNC_MARK, MSIOF0_MCK0_MARK,
- MSIOF0_MCK1_MARK, MSIOF0_SS1_MARK, MSIOF0_SS2_MARK,
- MSIOF0_TXD_MARK,
-
- /* MSIOF1 */
- MSIOF1_TSCK_39_MARK, MSIOF1_TSYNC_40_MARK,
- MSIOF1_TSCK_88_MARK, MSIOF1_TSYNC_89_MARK,
- MSIOF1_TXD_41_MARK, MSIOF1_RXD_42_MARK,
- MSIOF1_TXD_90_MARK, MSIOF1_RXD_91_MARK,
- MSIOF1_SS1_43_MARK, MSIOF1_SS2_44_MARK,
- MSIOF1_SS1_92_MARK, MSIOF1_SS2_93_MARK,
- MSIOF1_RSCK_MARK, MSIOF1_RSYNC_MARK,
- MSIOF1_MCK0_MARK, MSIOF1_MCK1_MARK,
-
- /* MSIOF2 */
- MSIOF2_RSCK_MARK, MSIOF2_RSYNC_MARK, MSIOF2_MCK0_MARK,
- MSIOF2_MCK1_MARK, MSIOF2_SS1_MARK, MSIOF2_SS2_MARK,
- MSIOF2_TSYNC_MARK, MSIOF2_TSCK_MARK, MSIOF2_RXD_MARK,
- MSIOF2_TXD_MARK,
-
- /* BBIF1 */
- BBIF1_RXD_MARK, BBIF1_TSYNC_MARK, BBIF1_TSCK_MARK,
- BBIF1_TXD_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK,
- BBIF1_FLOW_MARK, BB_RX_FLOW_N_MARK,
-
- /* BBIF2 */
- BBIF2_TSCK1_MARK, BBIF2_TSYNC1_MARK,
- BBIF2_TXD1_MARK, BBIF2_RXD_MARK,
-
- /* FSI */
- FSIACK_MARK, FSIBCK_MARK, FSIAILR_MARK, FSIAIBT_MARK,
- FSIAISLD_MARK, FSIAOMC_MARK, FSIAOLR_MARK, FSIAOBT_MARK,
- FSIAOSLD_MARK, FSIASPDIF_11_MARK, FSIASPDIF_15_MARK,
-
- /* FMSI */
- FMSOCK_MARK, FMSOOLR_MARK, FMSIOLR_MARK, FMSOOBT_MARK,
- FMSIOBT_MARK, FMSOSLD_MARK, FMSOILR_MARK, FMSIILR_MARK,
- FMSOIBT_MARK, FMSIIBT_MARK, FMSISLD_MARK, FMSICK_MARK,
-
- /* SCIFA0 */
- SCIFA0_TXD_MARK, SCIFA0_RXD_MARK, SCIFA0_SCK_MARK,
- SCIFA0_RTS_MARK, SCIFA0_CTS_MARK,
-
- /* SCIFA1 */
- SCIFA1_TXD_MARK, SCIFA1_RXD_MARK, SCIFA1_SCK_MARK,
- SCIFA1_RTS_MARK, SCIFA1_CTS_MARK,
-
- /* SCIFA2 */
- SCIFA2_CTS1_MARK, SCIFA2_RTS1_MARK, SCIFA2_TXD1_MARK,
- SCIFA2_RXD1_MARK, SCIFA2_SCK1_MARK,
-
- /* SCIFA3 */
- SCIFA3_CTS_43_MARK, SCIFA3_CTS_140_MARK, SCIFA3_RTS_44_MARK,
- SCIFA3_RTS_141_MARK, SCIFA3_SCK_MARK, SCIFA3_TXD_MARK,
- SCIFA3_RXD_MARK,
-
- /* SCIFA4 */
- SCIFA4_RXD_MARK, SCIFA4_TXD_MARK,
-
- /* SCIFA5 */
- SCIFA5_RXD_MARK, SCIFA5_TXD_MARK,
-
- /* SCIFB */
- SCIFB_SCK_MARK, SCIFB_RTS_MARK, SCIFB_CTS_MARK,
- SCIFB_TXD_MARK, SCIFB_RXD_MARK,
-
- /* CEU */
- VIO_HD_MARK, VIO_CKO1_MARK, VIO_CKO2_MARK, VIO_VD_MARK,
- VIO_CLK_MARK, VIO_FIELD_MARK, VIO_CKO_MARK,
- VIO_D0_MARK, VIO_D1_MARK, VIO_D2_MARK, VIO_D3_MARK,
- VIO_D4_MARK, VIO_D5_MARK, VIO_D6_MARK, VIO_D7_MARK,
- VIO_D8_MARK, VIO_D9_MARK, VIO_D10_MARK, VIO_D11_MARK,
- VIO_D12_MARK, VIO_D13_MARK, VIO_D14_MARK, VIO_D15_MARK,
-
- /* USB0 */
- IDIN_0_MARK, EXTLP_0_MARK, OVCN2_0_MARK, PWEN_0_MARK,
- OVCN_0_MARK, VBUS0_0_MARK,
-
- /* USB1 */
- IDIN_1_18_MARK, IDIN_1_113_MARK,
- PWEN_1_115_MARK, PWEN_1_138_MARK,
- OVCN_1_114_MARK, OVCN_1_162_MARK,
- EXTLP_1_MARK, OVCN2_1_MARK,
- VBUS0_1_MARK,
-
- /* GPIO */
- GPI0_MARK, GPI1_MARK, GPO0_MARK, GPO1_MARK,
-
- /* BSC */
- BS_MARK, WE1_MARK,
- CKO_MARK, WAIT_MARK, RDWR_MARK,
-
- A0_MARK, A1_MARK, A2_MARK, A3_MARK,
- A6_MARK, A7_MARK, A8_MARK, A9_MARK,
- A10_MARK, A11_MARK, A12_MARK, A13_MARK,
- A14_MARK, A15_MARK, A16_MARK, A17_MARK,
- A18_MARK, A19_MARK, A20_MARK, A21_MARK,
- A22_MARK, A23_MARK, A24_MARK, A25_MARK,
- A26_MARK,
-
- CS0_MARK, CS2_MARK, CS4_MARK,
- CS5A_MARK, CS5B_MARK, CS6A_MARK,
-
- /* BSC/FLCTL */
- RD_FSC_MARK, WE0_FWE_MARK, A4_FOE_MARK, A5_FCDE_MARK,
- D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, D3_NAF3_MARK,
- D4_NAF4_MARK, D5_NAF5_MARK, D6_NAF6_MARK, D7_NAF7_MARK,
- D8_NAF8_MARK, D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK,
- D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, D15_NAF15_MARK,
-
- /* MMCIF(1) */
- MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
- MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK,
- MMCCMD0_MARK, MMCCLK0_MARK,
-
- /* MMCIF(2) */
- MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
- MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK,
- MMCCLK1_MARK, MMCCMD1_MARK,
-
- /* SPU2 */
- VINT_I_MARK,
-
- /* FLCTL */
- FCE1_MARK, FCE0_MARK, FRB_MARK,
-
- /* HSI */
- GP_RX_FLAG_MARK, GP_RX_DATA_MARK, GP_TX_READY_MARK,
- GP_RX_WAKE_MARK, MP_TX_FLAG_MARK, MP_TX_DATA_MARK,
- MP_RX_READY_MARK, MP_TX_WAKE_MARK,
-
- /* MFI */
- MFIv6_MARK,
- MFIv4_MARK,
-
- MEMC_CS0_MARK, MEMC_BUSCLK_MEMC_A0_MARK,
- MEMC_CS1_MEMC_A1_MARK, MEMC_ADV_MEMC_DREQ0_MARK,
- MEMC_WAIT_MEMC_DREQ1_MARK, MEMC_NOE_MARK,
- MEMC_NWE_MARK, MEMC_INT_MARK,
-
- MEMC_AD0_MARK, MEMC_AD1_MARK, MEMC_AD2_MARK,
- MEMC_AD3_MARK, MEMC_AD4_MARK, MEMC_AD5_MARK,
- MEMC_AD6_MARK, MEMC_AD7_MARK, MEMC_AD8_MARK,
- MEMC_AD9_MARK, MEMC_AD10_MARK, MEMC_AD11_MARK,
- MEMC_AD12_MARK, MEMC_AD13_MARK, MEMC_AD14_MARK,
- MEMC_AD15_MARK,
-
- /* SIM */
- SIM_RST_MARK, SIM_CLK_MARK, SIM_D_MARK,
-
- /* TPU */
- TPU0TO0_MARK, TPU0TO1_MARK,
- TPU0TO2_93_MARK, TPU0TO2_99_MARK,
- TPU0TO3_MARK,
-
- /* I2C2 */
- I2C_SCL2_MARK, I2C_SDA2_MARK,
-
- /* I2C3(1) */
- I2C_SCL3_MARK, I2C_SDA3_MARK,
-
- /* I2C3(2) */
- I2C_SCL3S_MARK, I2C_SDA3S_MARK,
-
- /* I2C4(2) */
- I2C_SCL4_MARK, I2C_SDA4_MARK,
-
- /* I2C4(2) */
- I2C_SCL4S_MARK, I2C_SDA4S_MARK,
-
- /* KEYSC */
- KEYOUT0_MARK, KEYIN0_121_MARK, KEYIN0_136_MARK,
- KEYOUT1_MARK, KEYIN1_122_MARK, KEYIN1_135_MARK,
- KEYOUT2_MARK, KEYIN2_123_MARK, KEYIN2_134_MARK,
- KEYOUT3_MARK, KEYIN3_124_MARK, KEYIN3_133_MARK,
- KEYOUT4_MARK, KEYIN4_MARK,
- KEYOUT5_MARK, KEYIN5_MARK,
- KEYOUT6_MARK, KEYIN6_MARK,
- KEYOUT7_MARK, KEYIN7_MARK,
-
- /* LCDC */
- LCDC0_SELECT_MARK,
- LCDC1_SELECT_MARK,
- LCDHSYN_MARK, LCDCS_MARK, LCDVSYN_MARK, LCDDCK_MARK,
- LCDWR_MARK, LCDRD_MARK, LCDDISP_MARK, LCDRS_MARK,
- LCDLCLK_MARK, LCDDON_MARK,
-
- LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
- LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
- LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
- LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
- LCDD16_MARK, LCDD17_MARK, LCDD18_MARK, LCDD19_MARK,
- LCDD20_MARK, LCDD21_MARK, LCDD22_MARK, LCDD23_MARK,
-
- /* IRDA */
- IRDA_OUT_MARK, IRDA_IN_MARK, IRDA_FIRSEL_MARK,
- IROUT_139_MARK, IROUT_140_MARK,
-
- /* TSIF1 */
- TS0_1SELECT_MARK,
- TS0_2SELECT_MARK,
- TS1_1SELECT_MARK,
- TS1_2SELECT_MARK,
-
- TS_SPSYNC1_MARK, TS_SDAT1_MARK,
- TS_SDEN1_MARK, TS_SCK1_MARK,
-
- /* TSIF2 */
- TS_SPSYNC2_MARK, TS_SDAT2_MARK,
- TS_SDEN2_MARK, TS_SCK2_MARK,
-
- /* HDMI */
- HDMI_HPD_MARK, HDMI_CEC_MARK,
-
- /* SDHI0 */
- SDHICLK0_MARK, SDHICD0_MARK,
- SDHICMD0_MARK, SDHIWP0_MARK,
- SDHID0_0_MARK, SDHID0_1_MARK,
- SDHID0_2_MARK, SDHID0_3_MARK,
-
- /* SDHI1 */
- SDHICLK1_MARK, SDHICMD1_MARK, SDHID1_0_MARK,
- SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK,
-
- /* SDHI2 */
- SDHICLK2_MARK, SDHICMD2_MARK, SDHID2_0_MARK,
- SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK,
-
- /* SDENC */
- SDENC_CPG_MARK,
- SDENC_DV_CLKI_MARK,
-
- PINMUX_MARK_END,
-};
-
-static const u16 pinmux_data[] = {
- PINMUX_DATA_ALL(),
-
- /* IRQ */
- PINMUX_DATA(IRQ0_6_MARK, PORT6_FN0, MSEL1CR_0_0),
- PINMUX_DATA(IRQ0_162_MARK, PORT162_FN0, MSEL1CR_0_1),
- PINMUX_DATA(IRQ1_MARK, PORT12_FN0),
- PINMUX_DATA(IRQ2_4_MARK, PORT4_FN0, MSEL1CR_2_0),
- PINMUX_DATA(IRQ2_5_MARK, PORT5_FN0, MSEL1CR_2_1),
- PINMUX_DATA(IRQ3_8_MARK, PORT8_FN0, MSEL1CR_3_0),
- PINMUX_DATA(IRQ3_16_MARK, PORT16_FN0, MSEL1CR_3_1),
- PINMUX_DATA(IRQ4_17_MARK, PORT17_FN0, MSEL1CR_4_0),
- PINMUX_DATA(IRQ4_163_MARK, PORT163_FN0, MSEL1CR_4_1),
- PINMUX_DATA(IRQ5_MARK, PORT18_FN0),
- PINMUX_DATA(IRQ6_39_MARK, PORT39_FN0, MSEL1CR_6_0),
- PINMUX_DATA(IRQ6_164_MARK, PORT164_FN0, MSEL1CR_6_1),
- PINMUX_DATA(IRQ7_40_MARK, PORT40_FN0, MSEL1CR_7_1),
- PINMUX_DATA(IRQ7_167_MARK, PORT167_FN0, MSEL1CR_7_0),
- PINMUX_DATA(IRQ8_41_MARK, PORT41_FN0, MSEL1CR_8_1),
- PINMUX_DATA(IRQ8_168_MARK, PORT168_FN0, MSEL1CR_8_0),
- PINMUX_DATA(IRQ9_42_MARK, PORT42_FN0, MSEL1CR_9_0),
- PINMUX_DATA(IRQ9_169_MARK, PORT169_FN0, MSEL1CR_9_1),
- PINMUX_DATA(IRQ10_MARK, PORT65_FN0, MSEL1CR_9_1),
- PINMUX_DATA(IRQ11_MARK, PORT67_FN0),
- PINMUX_DATA(IRQ12_80_MARK, PORT80_FN0, MSEL1CR_12_0),
- PINMUX_DATA(IRQ12_137_MARK, PORT137_FN0, MSEL1CR_12_1),
- PINMUX_DATA(IRQ13_81_MARK, PORT81_FN0, MSEL1CR_13_0),
- PINMUX_DATA(IRQ13_145_MARK, PORT145_FN0, MSEL1CR_13_1),
- PINMUX_DATA(IRQ14_82_MARK, PORT82_FN0, MSEL1CR_14_0),
- PINMUX_DATA(IRQ14_146_MARK, PORT146_FN0, MSEL1CR_14_1),
- PINMUX_DATA(IRQ15_83_MARK, PORT83_FN0, MSEL1CR_15_0),
- PINMUX_DATA(IRQ15_147_MARK, PORT147_FN0, MSEL1CR_15_1),
- PINMUX_DATA(IRQ16_84_MARK, PORT84_FN0, MSEL1CR_16_0),
- PINMUX_DATA(IRQ16_170_MARK, PORT170_FN0, MSEL1CR_16_1),
- PINMUX_DATA(IRQ17_MARK, PORT85_FN0),
- PINMUX_DATA(IRQ18_MARK, PORT86_FN0),
- PINMUX_DATA(IRQ19_MARK, PORT87_FN0),
- PINMUX_DATA(IRQ20_MARK, PORT92_FN0),
- PINMUX_DATA(IRQ21_MARK, PORT93_FN0),
- PINMUX_DATA(IRQ22_MARK, PORT94_FN0),
- PINMUX_DATA(IRQ23_MARK, PORT95_FN0),
- PINMUX_DATA(IRQ24_MARK, PORT112_FN0),
- PINMUX_DATA(IRQ25_MARK, PORT119_FN0),
- PINMUX_DATA(IRQ26_121_MARK, PORT121_FN0, MSEL1CR_26_1),
- PINMUX_DATA(IRQ26_172_MARK, PORT172_FN0, MSEL1CR_26_0),
- PINMUX_DATA(IRQ27_122_MARK, PORT122_FN0, MSEL1CR_27_1),
- PINMUX_DATA(IRQ27_180_MARK, PORT180_FN0, MSEL1CR_27_0),
- PINMUX_DATA(IRQ28_123_MARK, PORT123_FN0, MSEL1CR_28_1),
- PINMUX_DATA(IRQ28_181_MARK, PORT181_FN0, MSEL1CR_28_0),
- PINMUX_DATA(IRQ29_129_MARK, PORT129_FN0, MSEL1CR_29_1),
- PINMUX_DATA(IRQ29_182_MARK, PORT182_FN0, MSEL1CR_29_0),
- PINMUX_DATA(IRQ30_130_MARK, PORT130_FN0, MSEL1CR_30_1),
- PINMUX_DATA(IRQ30_183_MARK, PORT183_FN0, MSEL1CR_30_0),
- PINMUX_DATA(IRQ31_138_MARK, PORT138_FN0, MSEL1CR_31_1),
- PINMUX_DATA(IRQ31_184_MARK, PORT184_FN0, MSEL1CR_31_0),
-
- /* Function 1 */
- PINMUX_DATA(BBIF2_TSCK1_MARK, PORT0_FN1),
- PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT1_FN1),
- PINMUX_DATA(BBIF2_TXD1_MARK, PORT2_FN1),
- PINMUX_DATA(BBIF2_RXD_MARK, PORT3_FN1),
- PINMUX_DATA(FSIACK_MARK, PORT4_FN1),
- PINMUX_DATA(FSIAILR_MARK, PORT5_FN1),
- PINMUX_DATA(FSIAIBT_MARK, PORT6_FN1),
- PINMUX_DATA(FSIAISLD_MARK, PORT7_FN1),
- PINMUX_DATA(FSIAOMC_MARK, PORT8_FN1),
- PINMUX_DATA(FSIAOLR_MARK, PORT9_FN1),
- PINMUX_DATA(FSIAOBT_MARK, PORT10_FN1),
- PINMUX_DATA(FSIAOSLD_MARK, PORT11_FN1),
- PINMUX_DATA(FMSOCK_MARK, PORT12_FN1),
- PINMUX_DATA(FMSOOLR_MARK, PORT13_FN1),
- PINMUX_DATA(FMSOOBT_MARK, PORT14_FN1),
- PINMUX_DATA(FMSOSLD_MARK, PORT15_FN1),
- PINMUX_DATA(FMSOILR_MARK, PORT16_FN1),
- PINMUX_DATA(FMSOIBT_MARK, PORT17_FN1),
- PINMUX_DATA(FMSISLD_MARK, PORT18_FN1),
- PINMUX_DATA(A0_MARK, PORT19_FN1),
- PINMUX_DATA(A1_MARK, PORT20_FN1),
- PINMUX_DATA(A2_MARK, PORT21_FN1),
- PINMUX_DATA(A3_MARK, PORT22_FN1),
- PINMUX_DATA(A4_FOE_MARK, PORT23_FN1),
- PINMUX_DATA(A5_FCDE_MARK, PORT24_FN1),
- PINMUX_DATA(A6_MARK, PORT25_FN1),
- PINMUX_DATA(A7_MARK, PORT26_FN1),
- PINMUX_DATA(A8_MARK, PORT27_FN1),
- PINMUX_DATA(A9_MARK, PORT28_FN1),
- PINMUX_DATA(A10_MARK, PORT29_FN1),
- PINMUX_DATA(A11_MARK, PORT30_FN1),
- PINMUX_DATA(A12_MARK, PORT31_FN1),
- PINMUX_DATA(A13_MARK, PORT32_FN1),
- PINMUX_DATA(A14_MARK, PORT33_FN1),
- PINMUX_DATA(A15_MARK, PORT34_FN1),
- PINMUX_DATA(A16_MARK, PORT35_FN1),
- PINMUX_DATA(A17_MARK, PORT36_FN1),
- PINMUX_DATA(A18_MARK, PORT37_FN1),
- PINMUX_DATA(A19_MARK, PORT38_FN1),
- PINMUX_DATA(A20_MARK, PORT39_FN1),
- PINMUX_DATA(A21_MARK, PORT40_FN1),
- PINMUX_DATA(A22_MARK, PORT41_FN1),
- PINMUX_DATA(A23_MARK, PORT42_FN1),
- PINMUX_DATA(A24_MARK, PORT43_FN1),
- PINMUX_DATA(A25_MARK, PORT44_FN1),
- PINMUX_DATA(A26_MARK, PORT45_FN1),
- PINMUX_DATA(D0_NAF0_MARK, PORT46_FN1),
- PINMUX_DATA(D1_NAF1_MARK, PORT47_FN1),
- PINMUX_DATA(D2_NAF2_MARK, PORT48_FN1),
- PINMUX_DATA(D3_NAF3_MARK, PORT49_FN1),
- PINMUX_DATA(D4_NAF4_MARK, PORT50_FN1),
- PINMUX_DATA(D5_NAF5_MARK, PORT51_FN1),
- PINMUX_DATA(D6_NAF6_MARK, PORT52_FN1),
- PINMUX_DATA(D7_NAF7_MARK, PORT53_FN1),
- PINMUX_DATA(D8_NAF8_MARK, PORT54_FN1),
- PINMUX_DATA(D9_NAF9_MARK, PORT55_FN1),
- PINMUX_DATA(D10_NAF10_MARK, PORT56_FN1),
- PINMUX_DATA(D11_NAF11_MARK, PORT57_FN1),
- PINMUX_DATA(D12_NAF12_MARK, PORT58_FN1),
- PINMUX_DATA(D13_NAF13_MARK, PORT59_FN1),
- PINMUX_DATA(D14_NAF14_MARK, PORT60_FN1),
- PINMUX_DATA(D15_NAF15_MARK, PORT61_FN1),
- PINMUX_DATA(CS0_MARK, PORT62_FN1),
- PINMUX_DATA(CS2_MARK, PORT63_FN1),
- PINMUX_DATA(CS4_MARK, PORT64_FN1),
- PINMUX_DATA(CS5A_MARK, PORT65_FN1),
- PINMUX_DATA(CS5B_MARK, PORT66_FN1),
- PINMUX_DATA(CS6A_MARK, PORT67_FN1),
- PINMUX_DATA(FCE0_MARK, PORT68_FN1),
- PINMUX_DATA(RD_FSC_MARK, PORT69_FN1),
- PINMUX_DATA(WE0_FWE_MARK, PORT70_FN1),
- PINMUX_DATA(WE1_MARK, PORT71_FN1),
- PINMUX_DATA(CKO_MARK, PORT72_FN1),
- PINMUX_DATA(FRB_MARK, PORT73_FN1),
- PINMUX_DATA(WAIT_MARK, PORT74_FN1),
- PINMUX_DATA(RDWR_MARK, PORT75_FN1),
- PINMUX_DATA(MEMC_AD0_MARK, PORT76_FN1),
- PINMUX_DATA(MEMC_AD1_MARK, PORT77_FN1),
- PINMUX_DATA(MEMC_AD2_MARK, PORT78_FN1),
- PINMUX_DATA(MEMC_AD3_MARK, PORT79_FN1),
- PINMUX_DATA(MEMC_AD4_MARK, PORT80_FN1),
- PINMUX_DATA(MEMC_AD5_MARK, PORT81_FN1),
- PINMUX_DATA(MEMC_AD6_MARK, PORT82_FN1),
- PINMUX_DATA(MEMC_AD7_MARK, PORT83_FN1),
- PINMUX_DATA(MEMC_AD8_MARK, PORT84_FN1),
- PINMUX_DATA(MEMC_AD9_MARK, PORT85_FN1),
- PINMUX_DATA(MEMC_AD10_MARK, PORT86_FN1),
- PINMUX_DATA(MEMC_AD11_MARK, PORT87_FN1),
- PINMUX_DATA(MEMC_AD12_MARK, PORT88_FN1),
- PINMUX_DATA(MEMC_AD13_MARK, PORT89_FN1),
- PINMUX_DATA(MEMC_AD14_MARK, PORT90_FN1),
- PINMUX_DATA(MEMC_AD15_MARK, PORT91_FN1),
- PINMUX_DATA(MEMC_CS0_MARK, PORT92_FN1),
- PINMUX_DATA(MEMC_BUSCLK_MEMC_A0_MARK, PORT93_FN1),
- PINMUX_DATA(MEMC_CS1_MEMC_A1_MARK, PORT94_FN1),
- PINMUX_DATA(MEMC_ADV_MEMC_DREQ0_MARK, PORT95_FN1),
- PINMUX_DATA(MEMC_WAIT_MEMC_DREQ1_MARK, PORT96_FN1),
- PINMUX_DATA(MEMC_NOE_MARK, PORT97_FN1),
- PINMUX_DATA(MEMC_NWE_MARK, PORT98_FN1),
- PINMUX_DATA(MEMC_INT_MARK, PORT99_FN1),
- PINMUX_DATA(VIO_VD_MARK, PORT100_FN1),
- PINMUX_DATA(VIO_HD_MARK, PORT101_FN1),
- PINMUX_DATA(VIO_D0_MARK, PORT102_FN1),
- PINMUX_DATA(VIO_D1_MARK, PORT103_FN1),
- PINMUX_DATA(VIO_D2_MARK, PORT104_FN1),
- PINMUX_DATA(VIO_D3_MARK, PORT105_FN1),
- PINMUX_DATA(VIO_D4_MARK, PORT106_FN1),
- PINMUX_DATA(VIO_D5_MARK, PORT107_FN1),
- PINMUX_DATA(VIO_D6_MARK, PORT108_FN1),
- PINMUX_DATA(VIO_D7_MARK, PORT109_FN1),
- PINMUX_DATA(VIO_D8_MARK, PORT110_FN1),
- PINMUX_DATA(VIO_D9_MARK, PORT111_FN1),
- PINMUX_DATA(VIO_D10_MARK, PORT112_FN1),
- PINMUX_DATA(VIO_D11_MARK, PORT113_FN1),
- PINMUX_DATA(VIO_D12_MARK, PORT114_FN1),
- PINMUX_DATA(VIO_D13_MARK, PORT115_FN1),
- PINMUX_DATA(VIO_D14_MARK, PORT116_FN1),
- PINMUX_DATA(VIO_D15_MARK, PORT117_FN1),
- PINMUX_DATA(VIO_CLK_MARK, PORT118_FN1),
- PINMUX_DATA(VIO_FIELD_MARK, PORT119_FN1),
- PINMUX_DATA(VIO_CKO_MARK, PORT120_FN1),
- PINMUX_DATA(LCDD0_MARK, PORT121_FN1),
- PINMUX_DATA(LCDD1_MARK, PORT122_FN1),
- PINMUX_DATA(LCDD2_MARK, PORT123_FN1),
- PINMUX_DATA(LCDD3_MARK, PORT124_FN1),
- PINMUX_DATA(LCDD4_MARK, PORT125_FN1),
- PINMUX_DATA(LCDD5_MARK, PORT126_FN1),
- PINMUX_DATA(LCDD6_MARK, PORT127_FN1),
- PINMUX_DATA(LCDD7_MARK, PORT128_FN1),
- PINMUX_DATA(LCDD8_MARK, PORT129_FN1),
- PINMUX_DATA(LCDD9_MARK, PORT130_FN1),
- PINMUX_DATA(LCDD10_MARK, PORT131_FN1),
- PINMUX_DATA(LCDD11_MARK, PORT132_FN1),
- PINMUX_DATA(LCDD12_MARK, PORT133_FN1),
- PINMUX_DATA(LCDD13_MARK, PORT134_FN1),
- PINMUX_DATA(LCDD14_MARK, PORT135_FN1),
- PINMUX_DATA(LCDD15_MARK, PORT136_FN1),
- PINMUX_DATA(LCDD16_MARK, PORT137_FN1),
- PINMUX_DATA(LCDD17_MARK, PORT138_FN1),
- PINMUX_DATA(LCDD18_MARK, PORT139_FN1),
- PINMUX_DATA(LCDD19_MARK, PORT140_FN1),
- PINMUX_DATA(LCDD20_MARK, PORT141_FN1),
- PINMUX_DATA(LCDD21_MARK, PORT142_FN1),
- PINMUX_DATA(LCDD22_MARK, PORT143_FN1),
- PINMUX_DATA(LCDD23_MARK, PORT144_FN1),
- PINMUX_DATA(LCDHSYN_MARK, PORT145_FN1),
- PINMUX_DATA(LCDVSYN_MARK, PORT146_FN1),
- PINMUX_DATA(LCDDCK_MARK, PORT147_FN1),
- PINMUX_DATA(LCDRD_MARK, PORT148_FN1),
- PINMUX_DATA(LCDDISP_MARK, PORT149_FN1),
- PINMUX_DATA(LCDLCLK_MARK, PORT150_FN1),
- PINMUX_DATA(LCDDON_MARK, PORT151_FN1),
- PINMUX_DATA(SCIFA0_TXD_MARK, PORT152_FN1),
- PINMUX_DATA(SCIFA0_RXD_MARK, PORT153_FN1),
- PINMUX_DATA(SCIFA1_TXD_MARK, PORT154_FN1),
- PINMUX_DATA(SCIFA1_RXD_MARK, PORT155_FN1),
- PINMUX_DATA(TS_SPSYNC1_MARK, PORT156_FN1),
- PINMUX_DATA(TS_SDAT1_MARK, PORT157_FN1),
- PINMUX_DATA(TS_SDEN1_MARK, PORT158_FN1),
- PINMUX_DATA(TS_SCK1_MARK, PORT159_FN1),
- PINMUX_DATA(TPU0TO0_MARK, PORT160_FN1),
- PINMUX_DATA(TPU0TO1_MARK, PORT161_FN1),
- PINMUX_DATA(SCIFB_SCK_MARK, PORT162_FN1),
- PINMUX_DATA(SCIFB_RTS_MARK, PORT163_FN1),
- PINMUX_DATA(SCIFB_CTS_MARK, PORT164_FN1),
- PINMUX_DATA(SCIFB_TXD_MARK, PORT165_FN1),
- PINMUX_DATA(SCIFB_RXD_MARK, PORT166_FN1),
- PINMUX_DATA(VBUS0_0_MARK, PORT167_FN1),
- PINMUX_DATA(VBUS0_1_MARK, PORT168_FN1),
- PINMUX_DATA(HDMI_HPD_MARK, PORT169_FN1),
- PINMUX_DATA(HDMI_CEC_MARK, PORT170_FN1),
- PINMUX_DATA(SDHICLK0_MARK, PORT171_FN1),
- PINMUX_DATA(SDHICD0_MARK, PORT172_FN1),
- PINMUX_DATA(SDHID0_0_MARK, PORT173_FN1),
- PINMUX_DATA(SDHID0_1_MARK, PORT174_FN1),
- PINMUX_DATA(SDHID0_2_MARK, PORT175_FN1),
- PINMUX_DATA(SDHID0_3_MARK, PORT176_FN1),
- PINMUX_DATA(SDHICMD0_MARK, PORT177_FN1),
- PINMUX_DATA(SDHIWP0_MARK, PORT178_FN1),
- PINMUX_DATA(SDHICLK1_MARK, PORT179_FN1),
- PINMUX_DATA(SDHID1_0_MARK, PORT180_FN1),
- PINMUX_DATA(SDHID1_1_MARK, PORT181_FN1),
- PINMUX_DATA(SDHID1_2_MARK, PORT182_FN1),
- PINMUX_DATA(SDHID1_3_MARK, PORT183_FN1),
- PINMUX_DATA(SDHICMD1_MARK, PORT184_FN1),
- PINMUX_DATA(SDHICLK2_MARK, PORT185_FN1),
- PINMUX_DATA(SDHID2_0_MARK, PORT186_FN1),
- PINMUX_DATA(SDHID2_1_MARK, PORT187_FN1),
- PINMUX_DATA(SDHID2_2_MARK, PORT188_FN1),
- PINMUX_DATA(SDHID2_3_MARK, PORT189_FN1),
- PINMUX_DATA(SDHICMD2_MARK, PORT190_FN1),
-
- /* Function 2 */
- PINMUX_DATA(FSIBCK_MARK, PORT4_FN2),
- PINMUX_DATA(SCIFA4_RXD_MARK, PORT5_FN2),
- PINMUX_DATA(SCIFA4_TXD_MARK, PORT6_FN2),
- PINMUX_DATA(SCIFA5_RXD_MARK, PORT8_FN2),
- PINMUX_DATA(FSIASPDIF_11_MARK, PORT11_FN2),
- PINMUX_DATA(SCIFA5_TXD_MARK, PORT12_FN2),
- PINMUX_DATA(FMSIOLR_MARK, PORT13_FN2),
- PINMUX_DATA(FMSIOBT_MARK, PORT14_FN2),
- PINMUX_DATA(FSIASPDIF_15_MARK, PORT15_FN2),
- PINMUX_DATA(FMSIILR_MARK, PORT16_FN2),
- PINMUX_DATA(FMSIIBT_MARK, PORT17_FN2),
- PINMUX_DATA(BS_MARK, PORT19_FN2),
- PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT36_FN2),
- PINMUX_DATA(MSIOF0_TSCK_MARK, PORT37_FN2),
- PINMUX_DATA(MSIOF0_RXD_MARK, PORT38_FN2),
- PINMUX_DATA(MSIOF0_RSCK_MARK, PORT39_FN2),
- PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT40_FN2),
- PINMUX_DATA(MSIOF0_MCK0_MARK, PORT41_FN2),
- PINMUX_DATA(MSIOF0_MCK1_MARK, PORT42_FN2),
- PINMUX_DATA(MSIOF0_SS1_MARK, PORT43_FN2),
- PINMUX_DATA(MSIOF0_SS2_MARK, PORT44_FN2),
- PINMUX_DATA(MSIOF0_TXD_MARK, PORT45_FN2),
- PINMUX_DATA(FMSICK_MARK, PORT65_FN2),
- PINMUX_DATA(FCE1_MARK, PORT66_FN2),
- PINMUX_DATA(BBIF1_RXD_MARK, PORT76_FN2),
- PINMUX_DATA(BBIF1_TSYNC_MARK, PORT77_FN2),
- PINMUX_DATA(BBIF1_TSCK_MARK, PORT78_FN2),
- PINMUX_DATA(BBIF1_TXD_MARK, PORT79_FN2),
- PINMUX_DATA(BBIF1_RSCK_MARK, PORT80_FN2),
- PINMUX_DATA(BBIF1_RSYNC_MARK, PORT81_FN2),
- PINMUX_DATA(BBIF1_FLOW_MARK, PORT82_FN2),
- PINMUX_DATA(BB_RX_FLOW_N_MARK, PORT83_FN2),
- PINMUX_DATA(MSIOF1_RSCK_MARK, PORT84_FN2),
- PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT85_FN2),
- PINMUX_DATA(MSIOF1_MCK0_MARK, PORT86_FN2),
- PINMUX_DATA(MSIOF1_MCK1_MARK, PORT87_FN2),
- PINMUX_DATA(MSIOF1_TSCK_88_MARK, PORT88_FN2, MSEL4CR_10_1),
- PINMUX_DATA(MSIOF1_TSYNC_89_MARK, PORT89_FN2, MSEL4CR_10_1),
- PINMUX_DATA(MSIOF1_TXD_90_MARK, PORT90_FN2, MSEL4CR_10_1),
- PINMUX_DATA(MSIOF1_RXD_91_MARK, PORT91_FN2, MSEL4CR_10_1),
- PINMUX_DATA(MSIOF1_SS1_92_MARK, PORT92_FN2, MSEL4CR_10_1),
- PINMUX_DATA(MSIOF1_SS2_93_MARK, PORT93_FN2, MSEL4CR_10_1),
- PINMUX_DATA(SCIFA2_CTS1_MARK, PORT94_FN2),
- PINMUX_DATA(SCIFA2_RTS1_MARK, PORT95_FN2),
- PINMUX_DATA(SCIFA2_TXD1_MARK, PORT96_FN2),
- PINMUX_DATA(SCIFA2_RXD1_MARK, PORT97_FN2),
- PINMUX_DATA(SCIFA2_SCK1_MARK, PORT98_FN2),
- PINMUX_DATA(I2C_SCL2_MARK, PORT110_FN2),
- PINMUX_DATA(I2C_SDA2_MARK, PORT111_FN2),
- PINMUX_DATA(I2C_SCL3_MARK, PORT114_FN2, MSEL4CR_16_1),
- PINMUX_DATA(I2C_SDA3_MARK, PORT115_FN2, MSEL4CR_16_1),
- PINMUX_DATA(I2C_SCL4_MARK, PORT116_FN2, MSEL4CR_17_1),
- PINMUX_DATA(I2C_SDA4_MARK, PORT117_FN2, MSEL4CR_17_1),
- PINMUX_DATA(MSIOF2_RSCK_MARK, PORT134_FN2),
- PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT135_FN2),
- PINMUX_DATA(MSIOF2_MCK0_MARK, PORT136_FN2),
- PINMUX_DATA(MSIOF2_MCK1_MARK, PORT137_FN2),
- PINMUX_DATA(MSIOF2_SS1_MARK, PORT138_FN2),
- PINMUX_DATA(MSIOF2_SS2_MARK, PORT139_FN2),
- PINMUX_DATA(SCIFA3_CTS_140_MARK, PORT140_FN2, MSEL3CR_9_1),
- PINMUX_DATA(SCIFA3_RTS_141_MARK, PORT141_FN2),
- PINMUX_DATA(SCIFA3_SCK_MARK, PORT142_FN2),
- PINMUX_DATA(SCIFA3_TXD_MARK, PORT143_FN2),
- PINMUX_DATA(SCIFA3_RXD_MARK, PORT144_FN2),
- PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT148_FN2),
- PINMUX_DATA(MSIOF2_TSCK_MARK, PORT149_FN2),
- PINMUX_DATA(MSIOF2_RXD_MARK, PORT150_FN2),
- PINMUX_DATA(MSIOF2_TXD_MARK, PORT151_FN2),
- PINMUX_DATA(SCIFA0_SCK_MARK, PORT156_FN2),
- PINMUX_DATA(SCIFA0_RTS_MARK, PORT157_FN2),
- PINMUX_DATA(SCIFA0_CTS_MARK, PORT158_FN2),
- PINMUX_DATA(SCIFA1_SCK_MARK, PORT159_FN2),
- PINMUX_DATA(SCIFA1_RTS_MARK, PORT160_FN2),
- PINMUX_DATA(SCIFA1_CTS_MARK, PORT161_FN2),
-
- /* Function 3 */
- PINMUX_DATA(VIO_CKO1_MARK, PORT16_FN3),
- PINMUX_DATA(VIO_CKO2_MARK, PORT17_FN3),
- PINMUX_DATA(IDIN_1_18_MARK, PORT18_FN3, MSEL4CR_14_1),
- PINMUX_DATA(MSIOF1_TSCK_39_MARK, PORT39_FN3, MSEL4CR_10_0),
- PINMUX_DATA(MSIOF1_TSYNC_40_MARK, PORT40_FN3, MSEL4CR_10_0),
- PINMUX_DATA(MSIOF1_TXD_41_MARK, PORT41_FN3, MSEL4CR_10_0),
- PINMUX_DATA(MSIOF1_RXD_42_MARK, PORT42_FN3, MSEL4CR_10_0),
- PINMUX_DATA(MSIOF1_SS1_43_MARK, PORT43_FN3, MSEL4CR_10_0),
- PINMUX_DATA(MSIOF1_SS2_44_MARK, PORT44_FN3, MSEL4CR_10_0),
- PINMUX_DATA(MMCD1_0_MARK, PORT54_FN3, MSEL4CR_15_1),
- PINMUX_DATA(MMCD1_1_MARK, PORT55_FN3, MSEL4CR_15_1),
- PINMUX_DATA(MMCD1_2_MARK, PORT56_FN3, MSEL4CR_15_1),
- PINMUX_DATA(MMCD1_3_MARK, PORT57_FN3, MSEL4CR_15_1),
- PINMUX_DATA(MMCD1_4_MARK, PORT58_FN3, MSEL4CR_15_1),
- PINMUX_DATA(MMCD1_5_MARK, PORT59_FN3, MSEL4CR_15_1),
- PINMUX_DATA(MMCD1_6_MARK, PORT60_FN3, MSEL4CR_15_1),
- PINMUX_DATA(MMCD1_7_MARK, PORT61_FN3, MSEL4CR_15_1),
- PINMUX_DATA(VINT_I_MARK, PORT65_FN3),
- PINMUX_DATA(MMCCLK1_MARK, PORT66_FN3, MSEL4CR_15_1),
- PINMUX_DATA(MMCCMD1_MARK, PORT67_FN3, MSEL4CR_15_1),
- PINMUX_DATA(TPU0TO2_93_MARK, PORT93_FN3),
- PINMUX_DATA(TPU0TO2_99_MARK, PORT99_FN3),
- PINMUX_DATA(TPU0TO3_MARK, PORT112_FN3),
- PINMUX_DATA(IDIN_0_MARK, PORT113_FN3),
- PINMUX_DATA(EXTLP_0_MARK, PORT114_FN3),
- PINMUX_DATA(OVCN2_0_MARK, PORT115_FN3),
- PINMUX_DATA(PWEN_0_MARK, PORT116_FN3),
- PINMUX_DATA(OVCN_0_MARK, PORT117_FN3),
- PINMUX_DATA(KEYOUT7_MARK, PORT121_FN3),
- PINMUX_DATA(KEYOUT6_MARK, PORT122_FN3),
- PINMUX_DATA(KEYOUT5_MARK, PORT123_FN3),
- PINMUX_DATA(KEYOUT4_MARK, PORT124_FN3),
- PINMUX_DATA(KEYOUT3_MARK, PORT125_FN3),
- PINMUX_DATA(KEYOUT2_MARK, PORT126_FN3),
- PINMUX_DATA(KEYOUT1_MARK, PORT127_FN3),
- PINMUX_DATA(KEYOUT0_MARK, PORT128_FN3),
- PINMUX_DATA(KEYIN7_MARK, PORT129_FN3),
- PINMUX_DATA(KEYIN6_MARK, PORT130_FN3),
- PINMUX_DATA(KEYIN5_MARK, PORT131_FN3),
- PINMUX_DATA(KEYIN4_MARK, PORT132_FN3),
- PINMUX_DATA(KEYIN3_133_MARK, PORT133_FN3, MSEL4CR_18_0),
- PINMUX_DATA(KEYIN2_134_MARK, PORT134_FN3, MSEL4CR_18_0),
- PINMUX_DATA(KEYIN1_135_MARK, PORT135_FN3, MSEL4CR_18_0),
- PINMUX_DATA(KEYIN0_136_MARK, PORT136_FN3, MSEL4CR_18_0),
- PINMUX_DATA(TS_SPSYNC2_MARK, PORT137_FN3),
- PINMUX_DATA(IROUT_139_MARK, PORT139_FN3),
- PINMUX_DATA(IRDA_OUT_MARK, PORT140_FN3),
- PINMUX_DATA(IRDA_IN_MARK, PORT141_FN3),
- PINMUX_DATA(IRDA_FIRSEL_MARK, PORT142_FN3),
- PINMUX_DATA(TS_SDAT2_MARK, PORT145_FN3),
- PINMUX_DATA(TS_SDEN2_MARK, PORT146_FN3),
- PINMUX_DATA(TS_SCK2_MARK, PORT147_FN3),
-
- /* Function 4 */
- PINMUX_DATA(SCIFA3_CTS_43_MARK, PORT43_FN4, MSEL3CR_9_0),
- PINMUX_DATA(SCIFA3_RTS_44_MARK, PORT44_FN4),
- PINMUX_DATA(GP_RX_FLAG_MARK, PORT76_FN4),
- PINMUX_DATA(GP_RX_DATA_MARK, PORT77_FN4),
- PINMUX_DATA(GP_TX_READY_MARK, PORT78_FN4),
- PINMUX_DATA(GP_RX_WAKE_MARK, PORT79_FN4),
- PINMUX_DATA(MP_TX_FLAG_MARK, PORT80_FN4),
- PINMUX_DATA(MP_TX_DATA_MARK, PORT81_FN4),
- PINMUX_DATA(MP_RX_READY_MARK, PORT82_FN4),
- PINMUX_DATA(MP_TX_WAKE_MARK, PORT83_FN4),
- PINMUX_DATA(MMCD0_0_MARK, PORT84_FN4, MSEL4CR_15_0),
- PINMUX_DATA(MMCD0_1_MARK, PORT85_FN4, MSEL4CR_15_0),
- PINMUX_DATA(MMCD0_2_MARK, PORT86_FN4, MSEL4CR_15_0),
- PINMUX_DATA(MMCD0_3_MARK, PORT87_FN4, MSEL4CR_15_0),
- PINMUX_DATA(MMCD0_4_MARK, PORT88_FN4, MSEL4CR_15_0),
- PINMUX_DATA(MMCD0_5_MARK, PORT89_FN4, MSEL4CR_15_0),
- PINMUX_DATA(MMCD0_6_MARK, PORT90_FN4, MSEL4CR_15_0),
- PINMUX_DATA(MMCD0_7_MARK, PORT91_FN4, MSEL4CR_15_0),
- PINMUX_DATA(MMCCMD0_MARK, PORT92_FN4, MSEL4CR_15_0),
- PINMUX_DATA(SIM_RST_MARK, PORT94_FN4),
- PINMUX_DATA(SIM_CLK_MARK, PORT95_FN4),
- PINMUX_DATA(SIM_D_MARK, PORT98_FN4),
- PINMUX_DATA(MMCCLK0_MARK, PORT99_FN4, MSEL4CR_15_0),
- PINMUX_DATA(IDIN_1_113_MARK, PORT113_FN4, MSEL4CR_14_0),
- PINMUX_DATA(OVCN_1_114_MARK, PORT114_FN4, MSEL4CR_14_0),
- PINMUX_DATA(PWEN_1_115_MARK, PORT115_FN4),
- PINMUX_DATA(EXTLP_1_MARK, PORT116_FN4),
- PINMUX_DATA(OVCN2_1_MARK, PORT117_FN4),
- PINMUX_DATA(KEYIN0_121_MARK, PORT121_FN4, MSEL4CR_18_1),
- PINMUX_DATA(KEYIN1_122_MARK, PORT122_FN4, MSEL4CR_18_1),
- PINMUX_DATA(KEYIN2_123_MARK, PORT123_FN4, MSEL4CR_18_1),
- PINMUX_DATA(KEYIN3_124_MARK, PORT124_FN4, MSEL4CR_18_1),
- PINMUX_DATA(PWEN_1_138_MARK, PORT138_FN4),
- PINMUX_DATA(IROUT_140_MARK, PORT140_FN4),
- PINMUX_DATA(LCDCS_MARK, PORT145_FN4),
- PINMUX_DATA(LCDWR_MARK, PORT147_FN4),
- PINMUX_DATA(LCDRS_MARK, PORT149_FN4),
- PINMUX_DATA(OVCN_1_162_MARK, PORT162_FN4, MSEL4CR_14_1),
-
- /* Function 5 */
- PINMUX_DATA(GPI0_MARK, PORT41_FN5),
- PINMUX_DATA(GPI1_MARK, PORT42_FN5),
- PINMUX_DATA(GPO0_MARK, PORT43_FN5),
- PINMUX_DATA(GPO1_MARK, PORT44_FN5),
- PINMUX_DATA(I2C_SCL3S_MARK, PORT137_FN5, MSEL4CR_16_0),
- PINMUX_DATA(I2C_SDA3S_MARK, PORT145_FN5, MSEL4CR_16_0),
- PINMUX_DATA(I2C_SCL4S_MARK, PORT146_FN5, MSEL4CR_17_0),
- PINMUX_DATA(I2C_SDA4S_MARK, PORT147_FN5, MSEL4CR_17_0),
-
- /* Function select */
- PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0),
- PINMUX_DATA(LCDC1_SELECT_MARK, MSEL3CR_6_1),
-
- PINMUX_DATA(TS0_1SELECT_MARK, MSEL3CR_21_0, MSEL3CR_20_0),
- PINMUX_DATA(TS0_2SELECT_MARK, MSEL3CR_21_0, MSEL3CR_20_1),
- PINMUX_DATA(TS1_1SELECT_MARK, MSEL3CR_27_0, MSEL3CR_26_0),
- PINMUX_DATA(TS1_2SELECT_MARK, MSEL3CR_27_0, MSEL3CR_26_1),
-
- PINMUX_DATA(SDENC_CPG_MARK, MSEL4CR_19_0),
- PINMUX_DATA(SDENC_DV_CLKI_MARK, MSEL4CR_19_1),
-
- PINMUX_DATA(MFIv6_MARK, MSEL4CR_6_0),
- PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1),
-};
-
-#define __I (SH_PFC_PIN_CFG_INPUT)
-#define __O (SH_PFC_PIN_CFG_OUTPUT)
-#define __IO (SH_PFC_PIN_CFG_INPUT | SH_PFC_PIN_CFG_OUTPUT)
-#define __PD (SH_PFC_PIN_CFG_PULL_DOWN)
-#define __PU (SH_PFC_PIN_CFG_PULL_UP)
-#define __PUD (SH_PFC_PIN_CFG_PULL_DOWN | SH_PFC_PIN_CFG_PULL_UP)
-
-#define SH7372_PIN_I_PD(pin) SH_PFC_PIN_CFG(pin, __I | __PD)
-#define SH7372_PIN_I_PU(pin) SH_PFC_PIN_CFG(pin, __I | __PU)
-#define SH7372_PIN_I_PU_PD(pin) SH_PFC_PIN_CFG(pin, __I | __PUD)
-#define SH7372_PIN_IO(pin) SH_PFC_PIN_CFG(pin, __IO)
-#define SH7372_PIN_IO_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PD)
-#define SH7372_PIN_IO_PU(pin) SH_PFC_PIN_CFG(pin, __IO | __PU)
-#define SH7372_PIN_IO_PU_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PUD)
-#define SH7372_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O)
-#define SH7372_PIN_O_PU_PD(pin) SH_PFC_PIN_CFG(pin, __O | __PUD)
-
-static const struct sh_pfc_pin pinmux_pins[] = {
- /* Table 57-1 (I/O and Pull U/D) */
- SH7372_PIN_IO_PD(0), SH7372_PIN_IO_PD(1),
- SH7372_PIN_O(2), SH7372_PIN_I_PD(3),
- SH7372_PIN_I_PD(4), SH7372_PIN_I_PD(5),
- SH7372_PIN_IO_PU_PD(6), SH7372_PIN_I_PD(7),
- SH7372_PIN_IO_PD(8), SH7372_PIN_O(9),
- SH7372_PIN_O(10), SH7372_PIN_O(11),
- SH7372_PIN_IO_PU_PD(12), SH7372_PIN_IO_PD(13),
- SH7372_PIN_IO_PD(14), SH7372_PIN_O(15),
- SH7372_PIN_IO_PD(16), SH7372_PIN_IO_PD(17),
- SH7372_PIN_I_PD(18), SH7372_PIN_IO(19),
- SH7372_PIN_IO(20), SH7372_PIN_IO(21),
- SH7372_PIN_IO(22), SH7372_PIN_IO(23),
- SH7372_PIN_IO(24), SH7372_PIN_IO(25),
- SH7372_PIN_IO(26), SH7372_PIN_IO(27),
- SH7372_PIN_IO(28), SH7372_PIN_IO(29),
- SH7372_PIN_IO(30), SH7372_PIN_IO(31),
- SH7372_PIN_IO(32), SH7372_PIN_IO(33),
- SH7372_PIN_IO(34), SH7372_PIN_IO(35),
- SH7372_PIN_IO(36), SH7372_PIN_IO(37),
- SH7372_PIN_IO(38), SH7372_PIN_IO(39),
- SH7372_PIN_IO(40), SH7372_PIN_IO(41),
- SH7372_PIN_IO(42), SH7372_PIN_IO(43),
- SH7372_PIN_IO(44), SH7372_PIN_IO(45),
- SH7372_PIN_IO_PU(46), SH7372_PIN_IO_PU(47),
- SH7372_PIN_IO_PU(48), SH7372_PIN_IO_PU(49),
- SH7372_PIN_IO_PU(50), SH7372_PIN_IO_PU(51),
- SH7372_PIN_IO_PU(52), SH7372_PIN_IO_PU(53),
- SH7372_PIN_IO_PU(54), SH7372_PIN_IO_PU(55),
- SH7372_PIN_IO_PU(56), SH7372_PIN_IO_PU(57),
- SH7372_PIN_IO_PU(58), SH7372_PIN_IO_PU(59),
- SH7372_PIN_IO_PU(60), SH7372_PIN_IO_PU(61),
- SH7372_PIN_IO(62), SH7372_PIN_O(63),
- SH7372_PIN_O(64), SH7372_PIN_IO_PU(65),
- SH7372_PIN_O_PU_PD(66), SH7372_PIN_IO_PU(67),
- SH7372_PIN_O(68), SH7372_PIN_IO(69),
- SH7372_PIN_IO(70), SH7372_PIN_IO(71),
- SH7372_PIN_O(72), SH7372_PIN_I_PU(73),
- SH7372_PIN_I_PU_PD(74), SH7372_PIN_IO_PU_PD(75),
- SH7372_PIN_IO_PU_PD(76), SH7372_PIN_IO_PU_PD(77),
- SH7372_PIN_IO_PU_PD(78), SH7372_PIN_IO_PU_PD(79),
- SH7372_PIN_IO_PU_PD(80), SH7372_PIN_IO_PU_PD(81),
- SH7372_PIN_IO_PU_PD(82), SH7372_PIN_IO_PU_PD(83),
- SH7372_PIN_IO_PU_PD(84), SH7372_PIN_IO_PU_PD(85),
- SH7372_PIN_IO_PU_PD(86), SH7372_PIN_IO_PU_PD(87),
- SH7372_PIN_IO_PU_PD(88), SH7372_PIN_IO_PU_PD(89),
- SH7372_PIN_IO_PU_PD(90), SH7372_PIN_IO_PU_PD(91),
- SH7372_PIN_IO_PU_PD(92), SH7372_PIN_IO_PU_PD(93),
- SH7372_PIN_IO_PU_PD(94), SH7372_PIN_IO_PU_PD(95),
- SH7372_PIN_IO_PU(96), SH7372_PIN_IO_PU_PD(97),
- SH7372_PIN_IO_PU_PD(98), SH7372_PIN_O_PU_PD(99),
- SH7372_PIN_IO_PD(100), SH7372_PIN_IO_PD(101),
- SH7372_PIN_IO_PD(102), SH7372_PIN_IO_PD(103),
- SH7372_PIN_IO_PD(104), SH7372_PIN_IO_PD(105),
- SH7372_PIN_IO_PU(106), SH7372_PIN_IO_PU(107),
- SH7372_PIN_IO_PU(108), SH7372_PIN_IO_PU(109),
- SH7372_PIN_IO_PU(110), SH7372_PIN_IO_PU(111),
- SH7372_PIN_IO_PD(112), SH7372_PIN_IO_PD(113),
- SH7372_PIN_IO_PU(114), SH7372_PIN_IO_PU(115),
- SH7372_PIN_IO_PU(116), SH7372_PIN_IO_PU(117),
- SH7372_PIN_IO_PU(118), SH7372_PIN_IO_PU(119),
- SH7372_PIN_IO_PU(120), SH7372_PIN_IO_PD(121),
- SH7372_PIN_IO_PD(122), SH7372_PIN_IO_PD(123),
- SH7372_PIN_IO_PD(124), SH7372_PIN_IO_PD(125),
- SH7372_PIN_IO_PD(126), SH7372_PIN_IO_PD(127),
- SH7372_PIN_IO_PD(128), SH7372_PIN_IO_PU_PD(129),
- SH7372_PIN_IO_PU_PD(130), SH7372_PIN_IO_PU_PD(131),
- SH7372_PIN_IO_PU_PD(132), SH7372_PIN_IO_PU_PD(133),
- SH7372_PIN_IO_PU_PD(134), SH7372_PIN_IO_PU_PD(135),
- SH7372_PIN_IO_PD(136), SH7372_PIN_IO_PD(137),
- SH7372_PIN_IO_PD(138), SH7372_PIN_IO_PD(139),
- SH7372_PIN_IO_PD(140), SH7372_PIN_IO_PD(141),
- SH7372_PIN_IO_PD(142), SH7372_PIN_IO_PU_PD(143),
- SH7372_PIN_IO_PD(144), SH7372_PIN_IO_PD(145),
- SH7372_PIN_IO_PD(146), SH7372_PIN_IO_PD(147),
- SH7372_PIN_IO_PD(148), SH7372_PIN_IO_PD(149),
- SH7372_PIN_IO_PD(150), SH7372_PIN_IO_PD(151),
- SH7372_PIN_IO_PU_PD(152), SH7372_PIN_I_PD(153),
- SH7372_PIN_IO_PU_PD(154), SH7372_PIN_I_PD(155),
- SH7372_PIN_IO_PD(156), SH7372_PIN_IO_PD(157),
- SH7372_PIN_I_PD(158), SH7372_PIN_IO_PD(159),
- SH7372_PIN_O(160), SH7372_PIN_IO_PD(161),
- SH7372_PIN_IO_PD(162), SH7372_PIN_IO_PD(163),
- SH7372_PIN_I_PD(164), SH7372_PIN_IO_PD(165),
- SH7372_PIN_I_PD(166), SH7372_PIN_I_PD(167),
- SH7372_PIN_I_PD(168), SH7372_PIN_I_PD(169),
- SH7372_PIN_I_PD(170), SH7372_PIN_O(171),
- SH7372_PIN_IO_PU_PD(172), SH7372_PIN_IO_PU_PD(173),
- SH7372_PIN_IO_PU_PD(174), SH7372_PIN_IO_PU_PD(175),
- SH7372_PIN_IO_PU_PD(176), SH7372_PIN_IO_PU_PD(177),
- SH7372_PIN_IO_PU_PD(178), SH7372_PIN_O(179),
- SH7372_PIN_IO_PU_PD(180), SH7372_PIN_IO_PU_PD(181),
- SH7372_PIN_IO_PU_PD(182), SH7372_PIN_IO_PU_PD(183),
- SH7372_PIN_IO_PU_PD(184), SH7372_PIN_O(185),
- SH7372_PIN_IO_PU_PD(186), SH7372_PIN_IO_PU_PD(187),
- SH7372_PIN_IO_PU_PD(188), SH7372_PIN_IO_PU_PD(189),
- SH7372_PIN_IO_PU_PD(190),
-};
-
-/* - BSC -------------------------------------------------------------------- */
-static const unsigned int bsc_data8_pins[] = {
- /* D[0:7] */
- 46, 47, 48, 49, 50, 51, 52, 53,
-};
-static const unsigned int bsc_data8_mux[] = {
- D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, D3_NAF3_MARK,
- D4_NAF4_MARK, D5_NAF5_MARK, D6_NAF6_MARK, D7_NAF7_MARK,
-};
-static const unsigned int bsc_data16_pins[] = {
- /* D[0:15] */
- 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
-};
-static const unsigned int bsc_data16_mux[] = {
- D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, D3_NAF3_MARK,
- D4_NAF4_MARK, D5_NAF5_MARK, D6_NAF6_MARK, D7_NAF7_MARK,
- D8_NAF8_MARK, D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK,
- D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, D15_NAF15_MARK,
-};
-static const unsigned int bsc_cs0_pins[] = {
- /* CS */
- 62,
-};
-static const unsigned int bsc_cs0_mux[] = {
- CS0_MARK,
-};
-static const unsigned int bsc_cs2_pins[] = {
- /* CS */
- 63,
-};
-static const unsigned int bsc_cs2_mux[] = {
- CS2_MARK,
-};
-static const unsigned int bsc_cs4_pins[] = {
- /* CS */
- 64,
-};
-static const unsigned int bsc_cs4_mux[] = {
- CS4_MARK,
-};
-static const unsigned int bsc_cs5a_pins[] = {
- /* CS */
- 65,
-};
-static const unsigned int bsc_cs5a_mux[] = {
- CS5A_MARK,
-};
-static const unsigned int bsc_cs5b_pins[] = {
- /* CS */
- 66,
-};
-static const unsigned int bsc_cs5b_mux[] = {
- CS5B_MARK,
-};
-static const unsigned int bsc_cs6a_pins[] = {
- /* CS */
- 67,
-};
-static const unsigned int bsc_cs6a_mux[] = {
- CS6A_MARK,
-};
-static const unsigned int bsc_rd_we8_pins[] = {
- /* RD, WE[0] */
- 69, 70,
-};
-static const unsigned int bsc_rd_we8_mux[] = {
- RD_FSC_MARK, WE0_FWE_MARK,
-};
-static const unsigned int bsc_rd_we16_pins[] = {
- /* RD, WE[0:1] */
- 69, 70, 71,
-};
-static const unsigned int bsc_rd_we16_mux[] = {
- RD_FSC_MARK, WE0_FWE_MARK, WE1_MARK,
-};
-static const unsigned int bsc_bs_pins[] = {
- /* BS */
- 19,
-};
-static const unsigned int bsc_bs_mux[] = {
- BS_MARK,
-};
-static const unsigned int bsc_rdwr_pins[] = {
- /* RDWR */
- 75,
-};
-static const unsigned int bsc_rdwr_mux[] = {
- RDWR_MARK,
-};
-static const unsigned int bsc_wait_pins[] = {
- /* WAIT */
- 74,
-};
-static const unsigned int bsc_wait_mux[] = {
- WAIT_MARK,
-};
-/* - CEU -------------------------------------------------------------------- */
-static const unsigned int ceu_data_0_7_pins[] = {
- /* D[0:7] */
- 102, 103, 104, 105, 106, 107, 108, 109,
-};
-static const unsigned int ceu_data_0_7_mux[] = {
- VIO_D0_MARK, VIO_D1_MARK, VIO_D2_MARK, VIO_D3_MARK,
- VIO_D4_MARK, VIO_D5_MARK, VIO_D6_MARK, VIO_D7_MARK,
-};
-static const unsigned int ceu_data_8_15_pins[] = {
- /* D[8:15] */
- 110, 111, 112, 113, 114, 115, 116, 117,
-};
-static const unsigned int ceu_data_8_15_mux[] = {
- VIO_D8_MARK, VIO_D9_MARK, VIO_D10_MARK, VIO_D11_MARK,
- VIO_D12_MARK, VIO_D13_MARK, VIO_D14_MARK, VIO_D15_MARK,
-};
-static const unsigned int ceu_clk_0_pins[] = {
- /* CKO */
- 120,
-};
-static const unsigned int ceu_clk_0_mux[] = {
- VIO_CKO_MARK,
-};
-static const unsigned int ceu_clk_1_pins[] = {
- /* CKO */
- 16,
-};
-static const unsigned int ceu_clk_1_mux[] = {
- VIO_CKO1_MARK,
-};
-static const unsigned int ceu_clk_2_pins[] = {
- /* CKO */
- 17,
-};
-static const unsigned int ceu_clk_2_mux[] = {
- VIO_CKO2_MARK,
-};
-static const unsigned int ceu_sync_pins[] = {
- /* CLK, VD, HD */
- 118, 100, 101,
-};
-static const unsigned int ceu_sync_mux[] = {
- VIO_CLK_MARK, VIO_VD_MARK, VIO_HD_MARK,
-};
-static const unsigned int ceu_field_pins[] = {
- /* FIELD */
- 119,
-};
-static const unsigned int ceu_field_mux[] = {
- VIO_FIELD_MARK,
-};
-/* - FLCTL ------------------------------------------------------------------ */
-static const unsigned int flctl_data_pins[] = {
- /* NAF[0:15] */
- 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
-};
-static const unsigned int flctl_data_mux[] = {
- D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, D3_NAF3_MARK,
- D4_NAF4_MARK, D5_NAF5_MARK, D6_NAF6_MARK, D7_NAF7_MARK,
- D8_NAF8_MARK, D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK,
- D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, D15_NAF15_MARK,
-};
-static const unsigned int flctl_ce0_pins[] = {
- /* CE */
- 68,
-};
-static const unsigned int flctl_ce0_mux[] = {
- FCE0_MARK,
-};
-static const unsigned int flctl_ce1_pins[] = {
- /* CE */
- 66,
-};
-static const unsigned int flctl_ce1_mux[] = {
- FCE1_MARK,
-};
-static const unsigned int flctl_ctrl_pins[] = {
- /* FCDE, FOE, FSC, FWE, FRB */
- 24, 23, 69, 70, 73,
-};
-static const unsigned int flctl_ctrl_mux[] = {
- A5_FCDE_MARK, A4_FOE_MARK, RD_FSC_MARK, WE0_FWE_MARK, FRB_MARK,
-};
-/* - FSIA ------------------------------------------------------------------- */
-static const unsigned int fsia_mclk_in_pins[] = {
- /* CK */
- 4,
-};
-static const unsigned int fsia_mclk_in_mux[] = {
- FSIACK_MARK,
-};
-static const unsigned int fsia_mclk_out_pins[] = {
- /* OMC */
- 8,
-};
-static const unsigned int fsia_mclk_out_mux[] = {
- FSIAOMC_MARK,
-};
-static const unsigned int fsia_sclk_in_pins[] = {
- /* ILR, IBT */
- 5, 6,
-};
-static const unsigned int fsia_sclk_in_mux[] = {
- FSIAILR_MARK, FSIAIBT_MARK,
-};
-static const unsigned int fsia_sclk_out_pins[] = {
- /* OLR, OBT */
- 9, 10,
-};
-static const unsigned int fsia_sclk_out_mux[] = {
- FSIAOLR_MARK, FSIAOBT_MARK,
-};
-static const unsigned int fsia_data_in_pins[] = {
- /* ISLD */
- 7,
-};
-static const unsigned int fsia_data_in_mux[] = {
- FSIAISLD_MARK,
-};
-static const unsigned int fsia_data_out_pins[] = {
- /* OSLD */
- 11,
-};
-static const unsigned int fsia_data_out_mux[] = {
- FSIAOSLD_MARK,
-};
-static const unsigned int fsia_spdif_0_pins[] = {
- /* SPDIF */
- 11,
-};
-static const unsigned int fsia_spdif_0_mux[] = {
- FSIASPDIF_11_MARK,
-};
-static const unsigned int fsia_spdif_1_pins[] = {
- /* SPDIF */
- 15,
-};
-static const unsigned int fsia_spdif_1_mux[] = {
- FSIASPDIF_15_MARK,
-};
-/* - FSIB ------------------------------------------------------------------- */
-static const unsigned int fsib_mclk_in_pins[] = {
- /* CK */
- 4,
-};
-static const unsigned int fsib_mclk_in_mux[] = {
- FSIBCK_MARK,
-};
-/* - HDMI ------------------------------------------------------------------- */
-static const unsigned int hdmi_pins[] = {
- /* HPD, CEC */
- 169, 170,
-};
-static const unsigned int hdmi_mux[] = {
- HDMI_HPD_MARK, HDMI_CEC_MARK,
-};
-/* - INTC ------------------------------------------------------------------- */
-IRQC_PINS_MUX(0, 6, 162);
-IRQC_PIN_MUX(1, 12);
-IRQC_PINS_MUX(2, 4, 5);
-IRQC_PINS_MUX(3, 8, 16);
-IRQC_PINS_MUX(4, 17, 163);
-IRQC_PIN_MUX(5, 18);
-IRQC_PINS_MUX(6, 39, 164);
-IRQC_PINS_MUX(7, 40, 167);
-IRQC_PINS_MUX(8, 41, 168);
-IRQC_PINS_MUX(9, 42, 169);
-IRQC_PIN_MUX(10, 65);
-IRQC_PIN_MUX(11, 67);
-IRQC_PINS_MUX(12, 80, 137);
-IRQC_PINS_MUX(13, 81, 145);
-IRQC_PINS_MUX(14, 82, 146);
-IRQC_PINS_MUX(15, 83, 147);
-IRQC_PINS_MUX(16, 84, 170);
-IRQC_PIN_MUX(17, 85);
-IRQC_PIN_MUX(18, 86);
-IRQC_PIN_MUX(19, 87);
-IRQC_PIN_MUX(20, 92);
-IRQC_PIN_MUX(21, 93);
-IRQC_PIN_MUX(22, 94);
-IRQC_PIN_MUX(23, 95);
-IRQC_PIN_MUX(24, 112);
-IRQC_PIN_MUX(25, 119);
-IRQC_PINS_MUX(26, 121, 172);
-IRQC_PINS_MUX(27, 122, 180);
-IRQC_PINS_MUX(28, 123, 181);
-IRQC_PINS_MUX(29, 129, 182);
-IRQC_PINS_MUX(30, 130, 183);
-IRQC_PINS_MUX(31, 138, 184);
-/* - KEYSC ------------------------------------------------------------------ */
-static const unsigned int keysc_in04_0_pins[] = {
- /* KEYIN[0:4] */
- 136, 135, 134, 133, 132,
-};
-static const unsigned int keysc_in04_0_mux[] = {
- KEYIN0_136_MARK, KEYIN1_135_MARK, KEYIN2_134_MARK, KEYIN3_133_MARK,
- KEYIN4_MARK,
-};
-static const unsigned int keysc_in04_1_pins[] = {
- /* KEYIN[0:4] */
- 121, 122, 123, 124, 132,
-};
-static const unsigned int keysc_in04_1_mux[] = {
- KEYIN0_121_MARK, KEYIN1_122_MARK, KEYIN2_123_MARK, KEYIN3_124_MARK,
- KEYIN4_MARK,
-};
-static const unsigned int keysc_in5_pins[] = {
- /* KEYIN5 */
- 131,
-};
-static const unsigned int keysc_in5_mux[] = {
- KEYIN5_MARK,
-};
-static const unsigned int keysc_in6_pins[] = {
- /* KEYIN6 */
- 130,
-};
-static const unsigned int keysc_in6_mux[] = {
- KEYIN6_MARK,
-};
-static const unsigned int keysc_in7_pins[] = {
- /* KEYIN7 */
- 129,
-};
-static const unsigned int keysc_in7_mux[] = {
- KEYIN7_MARK,
-};
-static const unsigned int keysc_out4_pins[] = {
- /* KEYOUT[0:3] */
- 128, 127, 126, 125,
-};
-static const unsigned int keysc_out4_mux[] = {
- KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
-};
-static const unsigned int keysc_out5_pins[] = {
- /* KEYOUT[0:4] */
- 128, 127, 126, 125, 124,
-};
-static const unsigned int keysc_out5_mux[] = {
- KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
- KEYOUT4_MARK,
-};
-static const unsigned int keysc_out6_pins[] = {
- /* KEYOUT[0:5] */
- 128, 127, 126, 125, 124, 123,
-};
-static const unsigned int keysc_out6_mux[] = {
- KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
- KEYOUT4_MARK, KEYOUT5_MARK,
-};
-static const unsigned int keysc_out8_pins[] = {
- /* KEYOUT[0:7] */
- 128, 127, 126, 125, 124, 123, 122, 121,
-};
-static const unsigned int keysc_out8_mux[] = {
- KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
- KEYOUT4_MARK, KEYOUT5_MARK, KEYOUT6_MARK, KEYOUT7_MARK,
-};
-/* - LCD -------------------------------------------------------------------- */
-static const unsigned int lcd_data8_pins[] = {
- /* D[0:7] */
- 121, 122, 123, 124, 125, 126, 127, 128,
-};
-static const unsigned int lcd_data8_mux[] = {
- /* LCDC */
- LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
- LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
-};
-static const unsigned int lcd_data9_pins[] = {
- /* D[0:8] */
- 121, 122, 123, 124, 125, 126, 127, 128,
- 129,
- 137, 138, 139, 140, 141, 142, 143, 144,
-};
-static const unsigned int lcd_data9_mux[] = {
- LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
- LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
- LCDD8_MARK,
-};
-static const unsigned int lcd_data12_pins[] = {
- /* D[0:11] */
- 121, 122, 123, 124, 125, 126, 127, 128,
- 129, 130, 131, 132,
-};
-static const unsigned int lcd_data12_mux[] = {
- LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
- LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
- LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
-};
-static const unsigned int lcd_data16_pins[] = {
- /* D[0:15] */
- 121, 122, 123, 124, 125, 126, 127, 128,
- 129, 130, 131, 132, 133, 134, 135, 136,
-};
-static const unsigned int lcd_data16_mux[] = {
- LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
- LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
- LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
- LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
-};
-static const unsigned int lcd_data18_pins[] = {
- /* D[0:17] */
- 121, 122, 123, 124, 125, 126, 127, 128,
- 129, 130, 131, 132, 133, 134, 135, 136,
- 137, 138,
-};
-static const unsigned int lcd_data18_mux[] = {
- LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
- LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
- LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
- LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
- LCDD16_MARK, LCDD17_MARK,
-};
-static const unsigned int lcd_data24_pins[] = {
- /* D[0:23] */
- 121, 122, 123, 124, 125, 126, 127, 128,
- 129, 130, 131, 132, 133, 134, 135, 136,
- 137, 138, 139, 140, 141, 142, 143, 144,
-};
-static const unsigned int lcd_data24_mux[] = {
- LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
- LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
- LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
- LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
- LCDD16_MARK, LCDD17_MARK, LCDD18_MARK, LCDD19_MARK,
- LCDD20_MARK, LCDD21_MARK, LCDD22_MARK, LCDD23_MARK,
-};
-static const unsigned int lcd_display_pins[] = {
- /* DON */
- 151,
-};
-static const unsigned int lcd_display_mux[] = {
- LCDDON_MARK,
-};
-static const unsigned int lcd_lclk_pins[] = {
- /* LCLK */
- 150,
-};
-static const unsigned int lcd_lclk_mux[] = {
- LCDLCLK_MARK,
-};
-static const unsigned int lcd_sync_pins[] = {
- /* VSYN, HSYN, DCK, DISP */
- 146, 145, 147, 149,
-};
-static const unsigned int lcd_sync_mux[] = {
- LCDVSYN_MARK, LCDHSYN_MARK, LCDDCK_MARK, LCDDISP_MARK,
-};
-static const unsigned int lcd_sys_pins[] = {
- /* CS, WR, RD, RS */
- 145, 147, 148, 149,
-};
-static const unsigned int lcd_sys_mux[] = {
- LCDCS_MARK, LCDWR_MARK, LCDRD_MARK, LCDRS_MARK,
-};
-/* - MMCIF ------------------------------------------------------------------ */
-static const unsigned int mmc0_data1_0_pins[] = {
- /* D[0] */
- 84,
-};
-static const unsigned int mmc0_data1_0_mux[] = {
- MMCD0_0_MARK,
-};
-static const unsigned int mmc0_data4_0_pins[] = {
- /* D[0:3] */
- 84, 85, 86, 87,
-};
-static const unsigned int mmc0_data4_0_mux[] = {
- MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
-};
-static const unsigned int mmc0_data8_0_pins[] = {
- /* D[0:7] */
- 84, 85, 86, 87, 88, 89, 90, 91,
-};
-static const unsigned int mmc0_data8_0_mux[] = {
- MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
- MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK,
-};
-static const unsigned int mmc0_ctrl_0_pins[] = {
- /* CMD, CLK */
- 92, 99,
-};
-static const unsigned int mmc0_ctrl_0_mux[] = {
- MMCCMD0_MARK, MMCCLK0_MARK,
-};
-
-static const unsigned int mmc0_data1_1_pins[] = {
- /* D[0] */
- 54,
-};
-static const unsigned int mmc0_data1_1_mux[] = {
- MMCD1_0_MARK,
-};
-static const unsigned int mmc0_data4_1_pins[] = {
- /* D[0:3] */
- 54, 55, 56, 57,
-};
-static const unsigned int mmc0_data4_1_mux[] = {
- MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
-};
-static const unsigned int mmc0_data8_1_pins[] = {
- /* D[0:7] */
- 54, 55, 56, 57, 58, 59, 60, 61,
-};
-static const unsigned int mmc0_data8_1_mux[] = {
- MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
- MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK,
-};
-static const unsigned int mmc0_ctrl_1_pins[] = {
- /* CMD, CLK */
- 67, 66,
-};
-static const unsigned int mmc0_ctrl_1_mux[] = {
- MMCCMD1_MARK, MMCCLK1_MARK,
-};
-/* - SCIFA0 ----------------------------------------------------------------- */
-static const unsigned int scifa0_data_pins[] = {
- /* RXD, TXD */
- 153, 152,
-};
-static const unsigned int scifa0_data_mux[] = {
- SCIFA0_RXD_MARK, SCIFA0_TXD_MARK,
-};
-static const unsigned int scifa0_clk_pins[] = {
- /* SCK */
- 156,
-};
-static const unsigned int scifa0_clk_mux[] = {
- SCIFA0_SCK_MARK,
-};
-static const unsigned int scifa0_ctrl_pins[] = {
- /* RTS, CTS */
- 157, 158,
-};
-static const unsigned int scifa0_ctrl_mux[] = {
- SCIFA0_RTS_MARK, SCIFA0_CTS_MARK,
-};
-/* - SCIFA1 ----------------------------------------------------------------- */
-static const unsigned int scifa1_data_pins[] = {
- /* RXD, TXD */
- 155, 154,
-};
-static const unsigned int scifa1_data_mux[] = {
- SCIFA1_RXD_MARK, SCIFA1_TXD_MARK,
-};
-static const unsigned int scifa1_clk_pins[] = {
- /* SCK */
- 159,
-};
-static const unsigned int scifa1_clk_mux[] = {
- SCIFA1_SCK_MARK,
-};
-static const unsigned int scifa1_ctrl_pins[] = {
- /* RTS, CTS */
- 160, 161,
-};
-static const unsigned int scifa1_ctrl_mux[] = {
- SCIFA1_RTS_MARK, SCIFA1_CTS_MARK,
-};
-/* - SCIFA2 ----------------------------------------------------------------- */
-static const unsigned int scifa2_data_pins[] = {
- /* RXD, TXD */
- 97, 96,
-};
-static const unsigned int scifa2_data_mux[] = {
- SCIFA2_RXD1_MARK, SCIFA2_TXD1_MARK,
-};
-static const unsigned int scifa2_clk_pins[] = {
- /* SCK */
- 98,
-};
-static const unsigned int scifa2_clk_mux[] = {
- SCIFA2_SCK1_MARK,
-};
-static const unsigned int scifa2_ctrl_pins[] = {
- /* RTS, CTS */
- 95, 94,
-};
-static const unsigned int scifa2_ctrl_mux[] = {
- SCIFA2_RTS1_MARK, SCIFA2_CTS1_MARK,
-};
-/* - SCIFA3 ----------------------------------------------------------------- */
-static const unsigned int scifa3_data_pins[] = {
- /* RXD, TXD */
- 144, 143,
-};
-static const unsigned int scifa3_data_mux[] = {
- SCIFA3_RXD_MARK, SCIFA3_TXD_MARK,
-};
-static const unsigned int scifa3_clk_pins[] = {
- /* SCK */
- 142,
-};
-static const unsigned int scifa3_clk_mux[] = {
- SCIFA3_SCK_MARK,
-};
-static const unsigned int scifa3_ctrl_0_pins[] = {
- /* RTS, CTS */
- 44, 43,
-};
-static const unsigned int scifa3_ctrl_0_mux[] = {
- SCIFA3_RTS_44_MARK, SCIFA3_CTS_43_MARK,
-};
-static const unsigned int scifa3_ctrl_1_pins[] = {
- /* RTS, CTS */
- 141, 140,
-};
-static const unsigned int scifa3_ctrl_1_mux[] = {
- SCIFA3_RTS_141_MARK, SCIFA3_CTS_140_MARK,
-};
-/* - SCIFA4 ----------------------------------------------------------------- */
-static const unsigned int scifa4_data_pins[] = {
- /* RXD, TXD */
- 5, 6,
-};
-static const unsigned int scifa4_data_mux[] = {
- SCIFA4_RXD_MARK, SCIFA4_TXD_MARK,
-};
-/* - SCIFA5 ----------------------------------------------------------------- */
-static const unsigned int scifa5_data_pins[] = {
- /* RXD, TXD */
- 8, 12,
-};
-static const unsigned int scifa5_data_mux[] = {
- SCIFA5_RXD_MARK, SCIFA5_TXD_MARK,
-};
-/* - SCIFB ------------------------------------------------------------------ */
-static const unsigned int scifb_data_pins[] = {
- /* RXD, TXD */
- 166, 165,
-};
-static const unsigned int scifb_data_mux[] = {
- SCIFB_RXD_MARK, SCIFB_TXD_MARK,
-};
-static const unsigned int scifb_clk_pins[] = {
- /* SCK */
- 162,
-};
-static const unsigned int scifb_clk_mux[] = {
- SCIFB_SCK_MARK,
-};
-static const unsigned int scifb_ctrl_pins[] = {
- /* RTS, CTS */
- 163, 164,
-};
-static const unsigned int scifb_ctrl_mux[] = {
- SCIFB_RTS_MARK, SCIFB_CTS_MARK,
-};
-/* - SDHI0 ------------------------------------------------------------------ */
-static const unsigned int sdhi0_data1_pins[] = {
- /* D0 */
- 173,
-};
-static const unsigned int sdhi0_data1_mux[] = {
- SDHID0_0_MARK,
-};
-static const unsigned int sdhi0_data4_pins[] = {
- /* D[0:3] */
- 173, 174, 175, 176,
-};
-static const unsigned int sdhi0_data4_mux[] = {
- SDHID0_0_MARK, SDHID0_1_MARK, SDHID0_2_MARK, SDHID0_3_MARK,
-};
-static const unsigned int sdhi0_ctrl_pins[] = {
- /* CMD, CLK */
- 177, 171,
-};
-static const unsigned int sdhi0_ctrl_mux[] = {
- SDHICMD0_MARK, SDHICLK0_MARK,
-};
-static const unsigned int sdhi0_cd_pins[] = {
- /* CD */
- 172,
-};
-static const unsigned int sdhi0_cd_mux[] = {
- SDHICD0_MARK,
-};
-static const unsigned int sdhi0_wp_pins[] = {
- /* WP */
- 178,
-};
-static const unsigned int sdhi0_wp_mux[] = {
- SDHIWP0_MARK,
-};
-/* - SDHI1 ------------------------------------------------------------------ */
-static const unsigned int sdhi1_data1_pins[] = {
- /* D0 */
- 180,
-};
-static const unsigned int sdhi1_data1_mux[] = {
- SDHID1_0_MARK,
-};
-static const unsigned int sdhi1_data4_pins[] = {
- /* D[0:3] */
- 180, 181, 182, 183,
-};
-static const unsigned int sdhi1_data4_mux[] = {
- SDHID1_0_MARK, SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK,
-};
-static const unsigned int sdhi1_ctrl_pins[] = {
- /* CMD, CLK */
- 184, 179,
-};
-static const unsigned int sdhi1_ctrl_mux[] = {
- SDHICMD1_MARK, SDHICLK1_MARK,
-};
-
-static const unsigned int sdhi2_data1_pins[] = {
- /* D0 */
- 186,
-};
-static const unsigned int sdhi2_data1_mux[] = {
- SDHID2_0_MARK,
-};
-static const unsigned int sdhi2_data4_pins[] = {
- /* D[0:3] */
- 186, 187, 188, 189,
-};
-static const unsigned int sdhi2_data4_mux[] = {
- SDHID2_0_MARK, SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK,
-};
-static const unsigned int sdhi2_ctrl_pins[] = {
- /* CMD, CLK */
- 190, 185,
-};
-static const unsigned int sdhi2_ctrl_mux[] = {
- SDHICMD2_MARK, SDHICLK2_MARK,
-};
-/* - USB0 ------------------------------------------------------------------- */
-static const unsigned int usb0_vbus_pins[] = {
- /* VBUS */
- 167,
-};
-static const unsigned int usb0_vbus_mux[] = {
- VBUS0_0_MARK,
-};
-static const unsigned int usb0_otg_id_pins[] = {
- /* IDIN */
- 113,
-};
-static const unsigned int usb0_otg_id_mux[] = {
- IDIN_0_MARK,
-};
-static const unsigned int usb0_otg_ctrl_pins[] = {
- /* PWEN, EXTLP, OVCN, OVCN2 */
- 116, 114, 117, 115,
-};
-static const unsigned int usb0_otg_ctrl_mux[] = {
- PWEN_0_MARK, EXTLP_0_MARK, OVCN_0_MARK, OVCN2_0_MARK,
-};
-/* - USB1 ------------------------------------------------------------------- */
-static const unsigned int usb1_vbus_pins[] = {
- /* VBUS */
- 168,
-};
-static const unsigned int usb1_vbus_mux[] = {
- VBUS0_1_MARK,
-};
-static const unsigned int usb1_otg_id_0_pins[] = {
- /* IDIN */
- 113,
-};
-static const unsigned int usb1_otg_id_0_mux[] = {
- IDIN_1_113_MARK,
-};
-static const unsigned int usb1_otg_id_1_pins[] = {
- /* IDIN */
- 18,
-};
-static const unsigned int usb1_otg_id_1_mux[] = {
- IDIN_1_18_MARK,
-};
-static const unsigned int usb1_otg_ctrl_0_pins[] = {
- /* PWEN, EXTLP, OVCN, OVCN2 */
- 115, 116, 114, 117, 113,
-};
-static const unsigned int usb1_otg_ctrl_0_mux[] = {
- PWEN_1_115_MARK, EXTLP_1_MARK, OVCN_1_114_MARK, OVCN2_1_MARK,
-};
-static const unsigned int usb1_otg_ctrl_1_pins[] = {
- /* PWEN, EXTLP, OVCN, OVCN2 */
- 138, 116, 162, 117, 18,
-};
-static const unsigned int usb1_otg_ctrl_1_mux[] = {
- PWEN_1_138_MARK, EXTLP_1_MARK, OVCN_1_162_MARK, OVCN2_1_MARK,
-};
-
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(bsc_data8),
- SH_PFC_PIN_GROUP(bsc_data16),
- SH_PFC_PIN_GROUP(bsc_cs0),
- SH_PFC_PIN_GROUP(bsc_cs2),
- SH_PFC_PIN_GROUP(bsc_cs4),
- SH_PFC_PIN_GROUP(bsc_cs5a),
- SH_PFC_PIN_GROUP(bsc_cs5b),
- SH_PFC_PIN_GROUP(bsc_cs6a),
- SH_PFC_PIN_GROUP(bsc_rd_we8),
- SH_PFC_PIN_GROUP(bsc_rd_we16),
- SH_PFC_PIN_GROUP(bsc_bs),
- SH_PFC_PIN_GROUP(bsc_rdwr),
- SH_PFC_PIN_GROUP(ceu_data_0_7),
- SH_PFC_PIN_GROUP(ceu_data_8_15),
- SH_PFC_PIN_GROUP(ceu_clk_0),
- SH_PFC_PIN_GROUP(ceu_clk_1),
- SH_PFC_PIN_GROUP(ceu_clk_2),
- SH_PFC_PIN_GROUP(ceu_sync),
- SH_PFC_PIN_GROUP(ceu_field),
- SH_PFC_PIN_GROUP(flctl_data),
- SH_PFC_PIN_GROUP(flctl_ce0),
- SH_PFC_PIN_GROUP(flctl_ce1),
- SH_PFC_PIN_GROUP(flctl_ctrl),
- SH_PFC_PIN_GROUP(fsia_mclk_in),
- SH_PFC_PIN_GROUP(fsia_mclk_out),
- SH_PFC_PIN_GROUP(fsia_sclk_in),
- SH_PFC_PIN_GROUP(fsia_sclk_out),
- SH_PFC_PIN_GROUP(fsia_data_in),
- SH_PFC_PIN_GROUP(fsia_data_out),
- SH_PFC_PIN_GROUP(fsia_spdif_0),
- SH_PFC_PIN_GROUP(fsia_spdif_1),
- SH_PFC_PIN_GROUP(fsib_mclk_in),
- SH_PFC_PIN_GROUP(hdmi),
- SH_PFC_PIN_GROUP(intc_irq0_0),
- SH_PFC_PIN_GROUP(intc_irq0_1),
- SH_PFC_PIN_GROUP(intc_irq1),
- SH_PFC_PIN_GROUP(intc_irq2_0),
- SH_PFC_PIN_GROUP(intc_irq2_1),
- SH_PFC_PIN_GROUP(intc_irq3_0),
- SH_PFC_PIN_GROUP(intc_irq3_1),
- SH_PFC_PIN_GROUP(intc_irq4_0),
- SH_PFC_PIN_GROUP(intc_irq4_1),
- SH_PFC_PIN_GROUP(intc_irq5),
- SH_PFC_PIN_GROUP(intc_irq6_0),
- SH_PFC_PIN_GROUP(intc_irq6_1),
- SH_PFC_PIN_GROUP(intc_irq7_0),
- SH_PFC_PIN_GROUP(intc_irq7_1),
- SH_PFC_PIN_GROUP(intc_irq8_0),
- SH_PFC_PIN_GROUP(intc_irq8_1),
- SH_PFC_PIN_GROUP(intc_irq9_0),
- SH_PFC_PIN_GROUP(intc_irq9_1),
- SH_PFC_PIN_GROUP(intc_irq10),
- SH_PFC_PIN_GROUP(intc_irq11),
- SH_PFC_PIN_GROUP(intc_irq12_0),
- SH_PFC_PIN_GROUP(intc_irq12_1),
- SH_PFC_PIN_GROUP(intc_irq13_0),
- SH_PFC_PIN_GROUP(intc_irq13_1),
- SH_PFC_PIN_GROUP(intc_irq14_0),
- SH_PFC_PIN_GROUP(intc_irq14_1),
- SH_PFC_PIN_GROUP(intc_irq15_0),
- SH_PFC_PIN_GROUP(intc_irq15_1),
- SH_PFC_PIN_GROUP(intc_irq16_0),
- SH_PFC_PIN_GROUP(intc_irq16_1),
- SH_PFC_PIN_GROUP(intc_irq17),
- SH_PFC_PIN_GROUP(intc_irq18),
- SH_PFC_PIN_GROUP(intc_irq19),
- SH_PFC_PIN_GROUP(intc_irq20),
- SH_PFC_PIN_GROUP(intc_irq21),
- SH_PFC_PIN_GROUP(intc_irq22),
- SH_PFC_PIN_GROUP(intc_irq23),
- SH_PFC_PIN_GROUP(intc_irq24),
- SH_PFC_PIN_GROUP(intc_irq25),
- SH_PFC_PIN_GROUP(intc_irq26_0),
- SH_PFC_PIN_GROUP(intc_irq26_1),
- SH_PFC_PIN_GROUP(intc_irq27_0),
- SH_PFC_PIN_GROUP(intc_irq27_1),
- SH_PFC_PIN_GROUP(intc_irq28_0),
- SH_PFC_PIN_GROUP(intc_irq28_1),
- SH_PFC_PIN_GROUP(intc_irq29_0),
- SH_PFC_PIN_GROUP(intc_irq29_1),
- SH_PFC_PIN_GROUP(intc_irq30_0),
- SH_PFC_PIN_GROUP(intc_irq30_1),
- SH_PFC_PIN_GROUP(intc_irq31_0),
- SH_PFC_PIN_GROUP(intc_irq31_1),
- SH_PFC_PIN_GROUP(keysc_in04_0),
- SH_PFC_PIN_GROUP(keysc_in04_1),
- SH_PFC_PIN_GROUP(keysc_in5),
- SH_PFC_PIN_GROUP(keysc_in6),
- SH_PFC_PIN_GROUP(keysc_in7),
- SH_PFC_PIN_GROUP(keysc_out4),
- SH_PFC_PIN_GROUP(keysc_out5),
- SH_PFC_PIN_GROUP(keysc_out6),
- SH_PFC_PIN_GROUP(keysc_out8),
- SH_PFC_PIN_GROUP(lcd_data8),
- SH_PFC_PIN_GROUP(lcd_data9),
- SH_PFC_PIN_GROUP(lcd_data12),
- SH_PFC_PIN_GROUP(lcd_data16),
- SH_PFC_PIN_GROUP(lcd_data18),
- SH_PFC_PIN_GROUP(lcd_data24),
- SH_PFC_PIN_GROUP(lcd_display),
- SH_PFC_PIN_GROUP(lcd_lclk),
- SH_PFC_PIN_GROUP(lcd_sync),
- SH_PFC_PIN_GROUP(lcd_sys),
- SH_PFC_PIN_GROUP(mmc0_data1_0),
- SH_PFC_PIN_GROUP(mmc0_data4_0),
- SH_PFC_PIN_GROUP(mmc0_data8_0),
- SH_PFC_PIN_GROUP(mmc0_ctrl_0),
- SH_PFC_PIN_GROUP(mmc0_data1_1),
- SH_PFC_PIN_GROUP(mmc0_data4_1),
- SH_PFC_PIN_GROUP(mmc0_data8_1),
- SH_PFC_PIN_GROUP(mmc0_ctrl_1),
- SH_PFC_PIN_GROUP(scifa0_data),
- SH_PFC_PIN_GROUP(scifa0_clk),
- SH_PFC_PIN_GROUP(scifa0_ctrl),
- SH_PFC_PIN_GROUP(scifa1_data),
- SH_PFC_PIN_GROUP(scifa1_clk),
- SH_PFC_PIN_GROUP(scifa1_ctrl),
- SH_PFC_PIN_GROUP(scifa2_data),
- SH_PFC_PIN_GROUP(scifa2_clk),
- SH_PFC_PIN_GROUP(scifa2_ctrl),
- SH_PFC_PIN_GROUP(scifa3_data),
- SH_PFC_PIN_GROUP(scifa3_clk),
- SH_PFC_PIN_GROUP(scifa3_ctrl_0),
- SH_PFC_PIN_GROUP(scifa3_ctrl_1),
- SH_PFC_PIN_GROUP(scifa4_data),
- SH_PFC_PIN_GROUP(scifa5_data),
- SH_PFC_PIN_GROUP(scifb_data),
- SH_PFC_PIN_GROUP(scifb_clk),
- SH_PFC_PIN_GROUP(scifb_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(usb0_vbus),
- SH_PFC_PIN_GROUP(usb0_otg_id),
- SH_PFC_PIN_GROUP(usb0_otg_ctrl),
- SH_PFC_PIN_GROUP(usb1_vbus),
- SH_PFC_PIN_GROUP(usb1_otg_id_0),
- SH_PFC_PIN_GROUP(usb1_otg_id_1),
- SH_PFC_PIN_GROUP(usb1_otg_ctrl_0),
- SH_PFC_PIN_GROUP(usb1_otg_ctrl_1),
-};
-
-static const char * const bsc_groups[] = {
- "bsc_data8",
- "bsc_data16",
- "bsc_cs0",
- "bsc_cs2",
- "bsc_cs4",
- "bsc_cs5a",
- "bsc_cs5b",
- "bsc_cs6a",
- "bsc_rd_we8",
- "bsc_rd_we16",
- "bsc_bs",
- "bsc_rdwr",
-};
-
-static const char * const ceu_groups[] = {
- "ceu_data_0_7",
- "ceu_data_8_15",
- "ceu_clk_0",
- "ceu_clk_1",
- "ceu_clk_2",
- "ceu_sync",
- "ceu_field",
-};
-
-static const char * const flctl_groups[] = {
- "flctl_data",
- "flctl_ce0",
- "flctl_ce1",
- "flctl_ctrl",
-};
-
-static const char * const fsia_groups[] = {
- "fsia_mclk_in",
- "fsia_mclk_out",
- "fsia_sclk_in",
- "fsia_sclk_out",
- "fsia_data_in",
- "fsia_data_out",
- "fsia_spdif_0",
- "fsia_spdif_1",
-};
-
-static const char * const fsib_groups[] = {
- "fsib_mclk_in",
-};
-
-static const char * const hdmi_groups[] = {
- "hdmi",
-};
-
-static const char * const intc_groups[] = {
- "intc_irq0_0",
- "intc_irq0_1",
- "intc_irq1",
- "intc_irq2_0",
- "intc_irq2_1",
- "intc_irq3_0",
- "intc_irq3_1",
- "intc_irq4_0",
- "intc_irq4_1",
- "intc_irq5",
- "intc_irq6_0",
- "intc_irq6_1",
- "intc_irq7_0",
- "intc_irq7_1",
- "intc_irq8_0",
- "intc_irq8_1",
- "intc_irq9_0",
- "intc_irq9_1",
- "intc_irq10",
- "intc_irq11",
- "intc_irq12_0",
- "intc_irq12_1",
- "intc_irq13_0",
- "intc_irq13_1",
- "intc_irq14_0",
- "intc_irq14_1",
- "intc_irq15_0",
- "intc_irq15_1",
- "intc_irq16_0",
- "intc_irq16_1",
- "intc_irq17",
- "intc_irq18",
- "intc_irq19",
- "intc_irq20",
- "intc_irq21",
- "intc_irq22",
- "intc_irq23",
- "intc_irq24",
- "intc_irq25",
- "intc_irq26_0",
- "intc_irq26_1",
- "intc_irq27_0",
- "intc_irq27_1",
- "intc_irq28_0",
- "intc_irq28_1",
- "intc_irq29_0",
- "intc_irq29_1",
- "intc_irq30_0",
- "intc_irq30_1",
- "intc_irq31_0",
- "intc_irq31_1",
-};
-
-static const char * const keysc_groups[] = {
- "keysc_in04_0",
- "keysc_in04_1",
- "keysc_in5",
- "keysc_in6",
- "keysc_in7",
- "keysc_out4",
- "keysc_out5",
- "keysc_out6",
- "keysc_out8",
-};
-
-static const char * const lcd_groups[] = {
- "lcd_data8",
- "lcd_data9",
- "lcd_data12",
- "lcd_data16",
- "lcd_data18",
- "lcd_data24",
- "lcd_display",
- "lcd_lclk",
- "lcd_sync",
- "lcd_sys",
-};
-
-static const char * const mmc0_groups[] = {
- "mmc0_data1_0",
- "mmc0_data4_0",
- "mmc0_data8_0",
- "mmc0_ctrl_0",
- "mmc0_data1_1",
- "mmc0_data4_1",
- "mmc0_data8_1",
- "mmc0_ctrl_1",
-};
-
-static const char * const scifa0_groups[] = {
- "scifa0_data",
- "scifa0_clk",
- "scifa0_ctrl",
-};
-
-static const char * const scifa1_groups[] = {
- "scifa1_data",
- "scifa1_clk",
- "scifa1_ctrl",
-};
-
-static const char * const scifa2_groups[] = {
- "scifa2_data",
- "scifa2_clk",
- "scifa2_ctrl",
-};
-
-static const char * const scifa3_groups[] = {
- "scifa3_data",
- "scifa3_clk",
- "scifa3_ctrl_0",
- "scifa3_ctrl_1",
-};
-
-static const char * const scifa4_groups[] = {
- "scifa4_data",
-};
-
-static const char * const scifa5_groups[] = {
- "scifa5_data",
-};
-
-static const char * const scifb_groups[] = {
- "scifb_data",
- "scifb_clk",
- "scifb_ctrl",
-};
-
-static const char * const sdhi0_groups[] = {
- "sdhi0_data1",
- "sdhi0_data4",
- "sdhi0_ctrl",
- "sdhi0_cd",
- "sdhi0_wp",
-};
-
-static const char * const sdhi1_groups[] = {
- "sdhi1_data1",
- "sdhi1_data4",
- "sdhi1_ctrl",
-};
-
-static const char * const sdhi2_groups[] = {
- "sdhi2_data1",
- "sdhi2_data4",
- "sdhi2_ctrl",
-};
-
-static const char * const usb0_groups[] = {
- "usb0_vbus",
- "usb0_otg_id",
- "usb0_otg_ctrl",
-};
-
-static const char * const usb1_groups[] = {
- "usb1_vbus",
- "usb1_otg_id_0",
- "usb1_otg_id_1",
- "usb1_otg_ctrl_0",
- "usb1_otg_ctrl_1",
-};
-
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(bsc),
- SH_PFC_FUNCTION(ceu),
- SH_PFC_FUNCTION(flctl),
- SH_PFC_FUNCTION(fsia),
- SH_PFC_FUNCTION(fsib),
- SH_PFC_FUNCTION(hdmi),
- SH_PFC_FUNCTION(intc),
- SH_PFC_FUNCTION(keysc),
- SH_PFC_FUNCTION(lcd),
- SH_PFC_FUNCTION(mmc0),
- SH_PFC_FUNCTION(scifa0),
- SH_PFC_FUNCTION(scifa1),
- SH_PFC_FUNCTION(scifa2),
- SH_PFC_FUNCTION(scifa3),
- SH_PFC_FUNCTION(scifa4),
- SH_PFC_FUNCTION(scifa5),
- SH_PFC_FUNCTION(scifb),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
-};
-
-static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- PORTCR(0, 0xE6051000), /* PORT0CR */
- PORTCR(1, 0xE6051001), /* PORT1CR */
- PORTCR(2, 0xE6051002), /* PORT2CR */
- PORTCR(3, 0xE6051003), /* PORT3CR */
- PORTCR(4, 0xE6051004), /* PORT4CR */
- PORTCR(5, 0xE6051005), /* PORT5CR */
- PORTCR(6, 0xE6051006), /* PORT6CR */
- PORTCR(7, 0xE6051007), /* PORT7CR */
- PORTCR(8, 0xE6051008), /* PORT8CR */
- PORTCR(9, 0xE6051009), /* PORT9CR */
- PORTCR(10, 0xE605100A), /* PORT10CR */
- PORTCR(11, 0xE605100B), /* PORT11CR */
- PORTCR(12, 0xE605100C), /* PORT12CR */
- PORTCR(13, 0xE605100D), /* PORT13CR */
- PORTCR(14, 0xE605100E), /* PORT14CR */
- PORTCR(15, 0xE605100F), /* PORT15CR */
- PORTCR(16, 0xE6051010), /* PORT16CR */
- PORTCR(17, 0xE6051011), /* PORT17CR */
- PORTCR(18, 0xE6051012), /* PORT18CR */
- PORTCR(19, 0xE6051013), /* PORT19CR */
- PORTCR(20, 0xE6051014), /* PORT20CR */
- PORTCR(21, 0xE6051015), /* PORT21CR */
- PORTCR(22, 0xE6051016), /* PORT22CR */
- PORTCR(23, 0xE6051017), /* PORT23CR */
- PORTCR(24, 0xE6051018), /* PORT24CR */
- PORTCR(25, 0xE6051019), /* PORT25CR */
- PORTCR(26, 0xE605101A), /* PORT26CR */
- PORTCR(27, 0xE605101B), /* PORT27CR */
- PORTCR(28, 0xE605101C), /* PORT28CR */
- PORTCR(29, 0xE605101D), /* PORT29CR */
- PORTCR(30, 0xE605101E), /* PORT30CR */
- PORTCR(31, 0xE605101F), /* PORT31CR */
- PORTCR(32, 0xE6051020), /* PORT32CR */
- PORTCR(33, 0xE6051021), /* PORT33CR */
- PORTCR(34, 0xE6051022), /* PORT34CR */
- PORTCR(35, 0xE6051023), /* PORT35CR */
- PORTCR(36, 0xE6051024), /* PORT36CR */
- PORTCR(37, 0xE6051025), /* PORT37CR */
- PORTCR(38, 0xE6051026), /* PORT38CR */
- PORTCR(39, 0xE6051027), /* PORT39CR */
- PORTCR(40, 0xE6051028), /* PORT40CR */
- PORTCR(41, 0xE6051029), /* PORT41CR */
- PORTCR(42, 0xE605102A), /* PORT42CR */
- PORTCR(43, 0xE605102B), /* PORT43CR */
- PORTCR(44, 0xE605102C), /* PORT44CR */
- PORTCR(45, 0xE605102D), /* PORT45CR */
- PORTCR(46, 0xE605202E), /* PORT46CR */
- PORTCR(47, 0xE605202F), /* PORT47CR */
- PORTCR(48, 0xE6052030), /* PORT48CR */
- PORTCR(49, 0xE6052031), /* PORT49CR */
- PORTCR(50, 0xE6052032), /* PORT50CR */
- PORTCR(51, 0xE6052033), /* PORT51CR */
- PORTCR(52, 0xE6052034), /* PORT52CR */
- PORTCR(53, 0xE6052035), /* PORT53CR */
- PORTCR(54, 0xE6052036), /* PORT54CR */
- PORTCR(55, 0xE6052037), /* PORT55CR */
- PORTCR(56, 0xE6052038), /* PORT56CR */
- PORTCR(57, 0xE6052039), /* PORT57CR */
- PORTCR(58, 0xE605203A), /* PORT58CR */
- PORTCR(59, 0xE605203B), /* PORT59CR */
- PORTCR(60, 0xE605203C), /* PORT60CR */
- PORTCR(61, 0xE605203D), /* PORT61CR */
- PORTCR(62, 0xE605203E), /* PORT62CR */
- PORTCR(63, 0xE605203F), /* PORT63CR */
- PORTCR(64, 0xE6052040), /* PORT64CR */
- PORTCR(65, 0xE6052041), /* PORT65CR */
- PORTCR(66, 0xE6052042), /* PORT66CR */
- PORTCR(67, 0xE6052043), /* PORT67CR */
- PORTCR(68, 0xE6052044), /* PORT68CR */
- PORTCR(69, 0xE6052045), /* PORT69CR */
- PORTCR(70, 0xE6052046), /* PORT70CR */
- PORTCR(71, 0xE6052047), /* PORT71CR */
- PORTCR(72, 0xE6052048), /* PORT72CR */
- PORTCR(73, 0xE6052049), /* PORT73CR */
- PORTCR(74, 0xE605204A), /* PORT74CR */
- PORTCR(75, 0xE605204B), /* PORT75CR */
- PORTCR(76, 0xE605004C), /* PORT76CR */
- PORTCR(77, 0xE605004D), /* PORT77CR */
- PORTCR(78, 0xE605004E), /* PORT78CR */
- PORTCR(79, 0xE605004F), /* PORT79CR */
- PORTCR(80, 0xE6050050), /* PORT80CR */
- PORTCR(81, 0xE6050051), /* PORT81CR */
- PORTCR(82, 0xE6050052), /* PORT82CR */
- PORTCR(83, 0xE6050053), /* PORT83CR */
- PORTCR(84, 0xE6050054), /* PORT84CR */
- PORTCR(85, 0xE6050055), /* PORT85CR */
- PORTCR(86, 0xE6050056), /* PORT86CR */
- PORTCR(87, 0xE6050057), /* PORT87CR */
- PORTCR(88, 0xE6050058), /* PORT88CR */
- PORTCR(89, 0xE6050059), /* PORT89CR */
- PORTCR(90, 0xE605005A), /* PORT90CR */
- PORTCR(91, 0xE605005B), /* PORT91CR */
- PORTCR(92, 0xE605005C), /* PORT92CR */
- PORTCR(93, 0xE605005D), /* PORT93CR */
- PORTCR(94, 0xE605005E), /* PORT94CR */
- PORTCR(95, 0xE605005F), /* PORT95CR */
- PORTCR(96, 0xE6050060), /* PORT96CR */
- PORTCR(97, 0xE6050061), /* PORT97CR */
- PORTCR(98, 0xE6050062), /* PORT98CR */
- PORTCR(99, 0xE6050063), /* PORT99CR */
- PORTCR(100, 0xE6053064), /* PORT100CR */
- PORTCR(101, 0xE6053065), /* PORT101CR */
- PORTCR(102, 0xE6053066), /* PORT102CR */
- PORTCR(103, 0xE6053067), /* PORT103CR */
- PORTCR(104, 0xE6053068), /* PORT104CR */
- PORTCR(105, 0xE6053069), /* PORT105CR */
- PORTCR(106, 0xE605306A), /* PORT106CR */
- PORTCR(107, 0xE605306B), /* PORT107CR */
- PORTCR(108, 0xE605306C), /* PORT108CR */
- PORTCR(109, 0xE605306D), /* PORT109CR */
- PORTCR(110, 0xE605306E), /* PORT110CR */
- PORTCR(111, 0xE605306F), /* PORT111CR */
- PORTCR(112, 0xE6053070), /* PORT112CR */
- PORTCR(113, 0xE6053071), /* PORT113CR */
- PORTCR(114, 0xE6053072), /* PORT114CR */
- PORTCR(115, 0xE6053073), /* PORT115CR */
- PORTCR(116, 0xE6053074), /* PORT116CR */
- PORTCR(117, 0xE6053075), /* PORT117CR */
- PORTCR(118, 0xE6053076), /* PORT118CR */
- PORTCR(119, 0xE6053077), /* PORT119CR */
- PORTCR(120, 0xE6053078), /* PORT120CR */
- PORTCR(121, 0xE6050079), /* PORT121CR */
- PORTCR(122, 0xE605007A), /* PORT122CR */
- PORTCR(123, 0xE605007B), /* PORT123CR */
- PORTCR(124, 0xE605007C), /* PORT124CR */
- PORTCR(125, 0xE605007D), /* PORT125CR */
- PORTCR(126, 0xE605007E), /* PORT126CR */
- PORTCR(127, 0xE605007F), /* PORT127CR */
- PORTCR(128, 0xE6050080), /* PORT128CR */
- PORTCR(129, 0xE6050081), /* PORT129CR */
- PORTCR(130, 0xE6050082), /* PORT130CR */
- PORTCR(131, 0xE6050083), /* PORT131CR */
- PORTCR(132, 0xE6050084), /* PORT132CR */
- PORTCR(133, 0xE6050085), /* PORT133CR */
- PORTCR(134, 0xE6050086), /* PORT134CR */
- PORTCR(135, 0xE6050087), /* PORT135CR */
- PORTCR(136, 0xE6050088), /* PORT136CR */
- PORTCR(137, 0xE6050089), /* PORT137CR */
- PORTCR(138, 0xE605008A), /* PORT138CR */
- PORTCR(139, 0xE605008B), /* PORT139CR */
- PORTCR(140, 0xE605008C), /* PORT140CR */
- PORTCR(141, 0xE605008D), /* PORT141CR */
- PORTCR(142, 0xE605008E), /* PORT142CR */
- PORTCR(143, 0xE605008F), /* PORT143CR */
- PORTCR(144, 0xE6050090), /* PORT144CR */
- PORTCR(145, 0xE6050091), /* PORT145CR */
- PORTCR(146, 0xE6050092), /* PORT146CR */
- PORTCR(147, 0xE6050093), /* PORT147CR */
- PORTCR(148, 0xE6050094), /* PORT148CR */
- PORTCR(149, 0xE6050095), /* PORT149CR */
- PORTCR(150, 0xE6050096), /* PORT150CR */
- PORTCR(151, 0xE6050097), /* PORT151CR */
- PORTCR(152, 0xE6053098), /* PORT152CR */
- PORTCR(153, 0xE6053099), /* PORT153CR */
- PORTCR(154, 0xE605309A), /* PORT154CR */
- PORTCR(155, 0xE605309B), /* PORT155CR */
- PORTCR(156, 0xE605009C), /* PORT156CR */
- PORTCR(157, 0xE605009D), /* PORT157CR */
- PORTCR(158, 0xE605009E), /* PORT158CR */
- PORTCR(159, 0xE605009F), /* PORT159CR */
- PORTCR(160, 0xE60500A0), /* PORT160CR */
- PORTCR(161, 0xE60500A1), /* PORT161CR */
- PORTCR(162, 0xE60500A2), /* PORT162CR */
- PORTCR(163, 0xE60500A3), /* PORT163CR */
- PORTCR(164, 0xE60500A4), /* PORT164CR */
- PORTCR(165, 0xE60500A5), /* PORT165CR */
- PORTCR(166, 0xE60500A6), /* PORT166CR */
- PORTCR(167, 0xE60520A7), /* PORT167CR */
- PORTCR(168, 0xE60520A8), /* PORT168CR */
- PORTCR(169, 0xE60520A9), /* PORT169CR */
- PORTCR(170, 0xE60520AA), /* PORT170CR */
- PORTCR(171, 0xE60520AB), /* PORT171CR */
- PORTCR(172, 0xE60520AC), /* PORT172CR */
- PORTCR(173, 0xE60520AD), /* PORT173CR */
- PORTCR(174, 0xE60520AE), /* PORT174CR */
- PORTCR(175, 0xE60520AF), /* PORT175CR */
- PORTCR(176, 0xE60520B0), /* PORT176CR */
- PORTCR(177, 0xE60520B1), /* PORT177CR */
- PORTCR(178, 0xE60520B2), /* PORT178CR */
- PORTCR(179, 0xE60520B3), /* PORT179CR */
- PORTCR(180, 0xE60520B4), /* PORT180CR */
- PORTCR(181, 0xE60520B5), /* PORT181CR */
- PORTCR(182, 0xE60520B6), /* PORT182CR */
- PORTCR(183, 0xE60520B7), /* PORT183CR */
- PORTCR(184, 0xE60520B8), /* PORT184CR */
- PORTCR(185, 0xE60520B9), /* PORT185CR */
- PORTCR(186, 0xE60520BA), /* PORT186CR */
- PORTCR(187, 0xE60520BB), /* PORT187CR */
- PORTCR(188, 0xE60520BC), /* PORT188CR */
- PORTCR(189, 0xE60520BD), /* PORT189CR */
- PORTCR(190, 0xE60520BE), /* PORT190CR */
-
- { PINMUX_CFG_REG("MSEL1CR", 0xE605800C, 32, 1) {
- MSEL1CR_31_0, MSEL1CR_31_1,
- MSEL1CR_30_0, MSEL1CR_30_1,
- MSEL1CR_29_0, MSEL1CR_29_1,
- MSEL1CR_28_0, MSEL1CR_28_1,
- MSEL1CR_27_0, MSEL1CR_27_1,
- MSEL1CR_26_0, MSEL1CR_26_1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- MSEL1CR_16_0, MSEL1CR_16_1,
- MSEL1CR_15_0, MSEL1CR_15_1,
- MSEL1CR_14_0, MSEL1CR_14_1,
- MSEL1CR_13_0, MSEL1CR_13_1,
- MSEL1CR_12_0, MSEL1CR_12_1,
- 0, 0, 0, 0,
- MSEL1CR_9_0, MSEL1CR_9_1,
- MSEL1CR_8_0, MSEL1CR_8_1,
- MSEL1CR_7_0, MSEL1CR_7_1,
- MSEL1CR_6_0, MSEL1CR_6_1,
- 0, 0,
- MSEL1CR_4_0, MSEL1CR_4_1,
- MSEL1CR_3_0, MSEL1CR_3_1,
- MSEL1CR_2_0, MSEL1CR_2_1,
- 0, 0,
- MSEL1CR_0_0, MSEL1CR_0_1,
- }
- },
- { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1) {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- MSEL3CR_27_0, MSEL3CR_27_1,
- MSEL3CR_26_0, MSEL3CR_26_1,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- MSEL3CR_21_0, MSEL3CR_21_1,
- MSEL3CR_20_0, MSEL3CR_20_1,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- MSEL3CR_15_0, MSEL3CR_15_1,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0,
- MSEL3CR_9_0, MSEL3CR_9_1,
- 0, 0, 0, 0,
- MSEL3CR_6_0, MSEL3CR_6_1,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- }
- },
- { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1) {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- MSEL4CR_19_0, MSEL4CR_19_1,
- MSEL4CR_18_0, MSEL4CR_18_1,
- MSEL4CR_17_0, MSEL4CR_17_1,
- MSEL4CR_16_0, MSEL4CR_16_1,
- MSEL4CR_15_0, MSEL4CR_15_1,
- MSEL4CR_14_0, MSEL4CR_14_1,
- 0, 0, 0, 0,
- 0, 0,
- MSEL4CR_10_0, MSEL4CR_10_1,
- 0, 0, 0, 0,
- 0, 0,
- MSEL4CR_6_0, MSEL4CR_6_1,
- 0, 0,
- MSEL4CR_4_0, MSEL4CR_4_1,
- 0, 0, 0, 0,
- MSEL4CR_1_0, MSEL4CR_1_1,
- 0, 0,
- }
- },
- { },
-};
-
-static const struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("PORTL095_064DR", 0xE6054008, 32) {
- PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
- PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
- PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
- PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
- PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- }
- },
- { PINMUX_DATA_REG("PORTL127_096DR", 0xE605400C, 32) {
- PORT127_DATA, PORT126_DATA, PORT125_DATA, PORT124_DATA,
- PORT123_DATA, PORT122_DATA, PORT121_DATA, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA,
- }
- },
- { PINMUX_DATA_REG("PORTL159_128DR", 0xE6054010, 32) {
- PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
- 0, 0, 0, 0,
- PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
- PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
- PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
- PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
- PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
- PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA,
- }
- },
- { PINMUX_DATA_REG("PORTL191_160DR", 0xE6054014, 32) {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, PORT166_DATA, PORT165_DATA, PORT164_DATA,
- PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA,
- }
- },
- { PINMUX_DATA_REG("PORTD031_000DR", 0xE6055000, 32) {
- PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
- PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
- PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
- PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
- PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
- PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
- PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
- PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA,
- }
- },
- { PINMUX_DATA_REG("PORTD063_032DR", 0xE6055004, 32) {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, PORT45_DATA, PORT44_DATA,
- PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
- PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
- PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA,
- }
- },
- { PINMUX_DATA_REG("PORTR063_032DR", 0xE6056004, 32) {
- PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
- PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
- PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
- PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
- PORT47_DATA, PORT46_DATA, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- }
- },
- { PINMUX_DATA_REG("PORTR095_064DR", 0xE6056008, 32) {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
- PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
- PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA,
- }
- },
- { PINMUX_DATA_REG("PORTR191_160DR", 0xE6056014, 32) {
- 0, PORT190_DATA, PORT189_DATA, PORT188_DATA,
- PORT187_DATA, PORT186_DATA, PORT185_DATA, PORT184_DATA,
- PORT183_DATA, PORT182_DATA, PORT181_DATA, PORT180_DATA,
- PORT179_DATA, PORT178_DATA, PORT177_DATA, PORT176_DATA,
- PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA,
- PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
- PORT167_DATA, 0, 0, 0,
- 0, 0, 0, 0,
- }
- },
- { PINMUX_DATA_REG("PORTU127_096DR", 0xE605700C, 32) {
- 0, 0, 0, 0,
- 0, 0, 0, PORT120_DATA,
- PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
- PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA,
- PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
- PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
- PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
- 0, 0, 0, 0,
- }
- },
- { PINMUX_DATA_REG("PORTU159_128DR", 0xE6057010, 32) {
- 0, 0, 0, 0,
- PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- }
- },
- { },
-};
-
-#define EXT_IRQ16L(n) evt2irq(0x200 + ((n) << 5))
-#define EXT_IRQ16H(n) evt2irq(0x3200 + (((n) - 16) << 5))
-static const struct pinmux_irq pinmux_irqs[] = {
- PINMUX_IRQ(EXT_IRQ16L(0), 6, 162),
- PINMUX_IRQ(EXT_IRQ16L(1), 12),
- PINMUX_IRQ(EXT_IRQ16L(2), 4, 5),
- PINMUX_IRQ(EXT_IRQ16L(3), 8, 16),
- PINMUX_IRQ(EXT_IRQ16L(4), 17, 163),
- PINMUX_IRQ(EXT_IRQ16L(5), 18),
- PINMUX_IRQ(EXT_IRQ16L(6), 39, 164),
- PINMUX_IRQ(EXT_IRQ16L(7), 40, 167),
- PINMUX_IRQ(EXT_IRQ16L(8), 41, 168),
- PINMUX_IRQ(EXT_IRQ16L(9), 42, 169),
- PINMUX_IRQ(EXT_IRQ16L(10), 65),
- PINMUX_IRQ(EXT_IRQ16L(11), 67),
- PINMUX_IRQ(EXT_IRQ16L(12), 80, 137),
- PINMUX_IRQ(EXT_IRQ16L(13), 81, 145),
- PINMUX_IRQ(EXT_IRQ16L(14), 82, 146),
- PINMUX_IRQ(EXT_IRQ16L(15), 83, 147),
- PINMUX_IRQ(EXT_IRQ16H(16), 84, 170),
- PINMUX_IRQ(EXT_IRQ16H(17), 85),
- PINMUX_IRQ(EXT_IRQ16H(18), 86),
- PINMUX_IRQ(EXT_IRQ16H(19), 87),
- PINMUX_IRQ(EXT_IRQ16H(20), 92),
- PINMUX_IRQ(EXT_IRQ16H(21), 93),
- PINMUX_IRQ(EXT_IRQ16H(22), 94),
- PINMUX_IRQ(EXT_IRQ16H(23), 95),
- PINMUX_IRQ(EXT_IRQ16H(24), 112),
- PINMUX_IRQ(EXT_IRQ16H(25), 119),
- PINMUX_IRQ(EXT_IRQ16H(26), 121, 172),
- PINMUX_IRQ(EXT_IRQ16H(27), 122, 180),
- PINMUX_IRQ(EXT_IRQ16H(28), 123, 181),
- PINMUX_IRQ(EXT_IRQ16H(29), 129, 182),
- PINMUX_IRQ(EXT_IRQ16H(30), 130, 183),
- PINMUX_IRQ(EXT_IRQ16H(31), 138, 184),
-};
-
-#define PORTnCR_PULMD_OFF (0 << 6)
-#define PORTnCR_PULMD_DOWN (2 << 6)
-#define PORTnCR_PULMD_UP (3 << 6)
-#define PORTnCR_PULMD_MASK (3 << 6)
-
-struct sh7372_portcr_group {
- unsigned int end_pin;
- unsigned int offset;
-};
-
-static const struct sh7372_portcr_group sh7372_portcr_offsets[] = {
- { 45, 0x1000 }, { 75, 0x2000 }, { 99, 0x0000 }, { 120, 0x3000 },
- { 151, 0x0000 }, { 155, 0x3000 }, { 166, 0x0000 }, { 190, 0x2000 },
-};
-
-static void __iomem *sh7372_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(sh7372_portcr_offsets); ++i) {
- const struct sh7372_portcr_group *group =
- &sh7372_portcr_offsets[i];
-
- if (pin <= group->end_pin)
- return pfc->windows->virt + group->offset + pin;
- }
-
- return NULL;
-}
-
-static unsigned int sh7372_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
-{
- void __iomem *addr = sh7372_pinmux_portcr(pfc, pin);
- u32 value = ioread8(addr) & PORTnCR_PULMD_MASK;
-
- switch (value) {
- case PORTnCR_PULMD_UP:
- return PIN_CONFIG_BIAS_PULL_UP;
- case PORTnCR_PULMD_DOWN:
- return PIN_CONFIG_BIAS_PULL_DOWN;
- case PORTnCR_PULMD_OFF:
- default:
- return PIN_CONFIG_BIAS_DISABLE;
- }
-}
-
-static void sh7372_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- void __iomem *addr = sh7372_pinmux_portcr(pfc, pin);
- u32 value = ioread8(addr) & ~PORTnCR_PULMD_MASK;
-
- switch (bias) {
- case PIN_CONFIG_BIAS_PULL_UP:
- value |= PORTnCR_PULMD_UP;
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- value |= PORTnCR_PULMD_DOWN;
- break;
- }
-
- iowrite8(value, addr);
-}
-
-static const struct sh_pfc_soc_operations sh7372_pfc_ops = {
- .get_bias = sh7372_pinmux_get_bias,
- .set_bias = sh7372_pinmux_set_bias,
-};
-
-const struct sh_pfc_soc_info sh7372_pinmux_info = {
- .name = "sh7372_pfc",
- .ops = &sh7372_pfc_ops,
-
- .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
-
- .pins = pinmux_pins,
- .nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
-
- .cfg_regs = pinmux_config_regs,
- .data_regs = pinmux_data_regs,
-
- .gpio_data = pinmux_data,
- .gpio_data_size = ARRAY_SIZE(pinmux_data),
-
- .gpio_irq = pinmux_irqs,
- .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
-};
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 910deaefa0ac..072e7c62cab7 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -122,7 +122,7 @@ static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
return ret;
}
- ret = pinconf_generic_parse_dt_config(np, &configs, &num_configs);
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
if (ret < 0)
return ret;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 5b7283182c1e..c83728626906 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -167,6 +167,8 @@ struct sh_pfc_soc_info {
PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr)
#define PINMUX_IPSR_NOGM(ispr, fn, ms) \
PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ms)
+#define PINMUX_IPSR_NOFN(ipsr, fn, ms) \
+ PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##ms)
#define PINMUX_IPSR_MSEL(ipsr, fn, ms) \
PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr, FN_##ms)
#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) \
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 4871647c7f85..2a1f07249b2f 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -38,7 +38,6 @@ struct sirfsoc_gpio_bank {
struct sirfsoc_gpio_chip {
struct of_mm_gpio_chip chip;
- bool is_marco; /* for marco, some registers are different with prima2 */
struct sirfsoc_gpio_bank sgpio_bank[SIRFSOC_GPIO_NO_OF_BANKS];
};
@@ -149,23 +148,14 @@ static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx,
for (i = 0; i < mux->muxmask_counts; i++) {
u32 muxval;
- if (!spmx->is_marco) {
- muxval = readl(spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(mask[i].group));
- if (enable)
- muxval = muxval & ~mask[i].mask;
- else
- muxval = muxval | mask[i].mask;
- writel(muxval, spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(mask[i].group));
- } else {
- if (enable)
- writel(mask[i].mask, spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN_CLR(mask[i].group));
- else
- writel(mask[i].mask, spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(mask[i].group));
- }
+ muxval = readl(spmx->gpio_virtbase +
+ SIRFSOC_GPIO_PAD_EN(mask[i].group));
+ if (enable)
+ muxval = muxval & ~mask[i].mask;
+ else
+ muxval = muxval | mask[i].mask;
+ writel(muxval, spmx->gpio_virtbase +
+ SIRFSOC_GPIO_PAD_EN(mask[i].group));
}
if (mux->funcmask && enable) {
@@ -223,16 +213,11 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
spmx = pinctrl_dev_get_drvdata(pmxdev);
- if (!spmx->is_marco) {
- muxval = readl(spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(group));
- muxval = muxval | (1 << (offset - range->pin_base));
- writel(muxval, spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(group));
- } else {
- writel(1 << (offset - range->pin_base), spmx->gpio_virtbase +
- SIRFSOC_GPIO_PAD_EN(group));
- }
+ muxval = readl(spmx->gpio_virtbase +
+ SIRFSOC_GPIO_PAD_EN(group));
+ muxval = muxval | (1 << (offset - range->pin_base));
+ writel(muxval, spmx->gpio_virtbase +
+ SIRFSOC_GPIO_PAD_EN(group));
return 0;
}
@@ -256,7 +241,6 @@ static void __iomem *sirfsoc_rsc_of_iomap(void)
{
const struct of_device_id rsc_ids[] = {
{ .compatible = "sirf,prima2-rsc" },
- { .compatible = "sirf,marco-rsc" },
{}
};
struct device_node *np;
@@ -284,7 +268,6 @@ static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc,
static const struct of_device_id pinmux_ids[] = {
{ .compatible = "sirf,prima2-pinctrl", .data = &prima2_pinctrl_data, },
{ .compatible = "sirf,atlas6-pinctrl", .data = &atlas6_pinctrl_data, },
- { .compatible = "sirf,marco-pinctrl", .data = &prima2_pinctrl_data, },
{}
};
@@ -317,9 +300,6 @@ static int sirfsoc_pinmux_probe(struct platform_device *pdev)
goto out_no_rsc_remap;
}
- if (of_device_is_compatible(np, "sirf,marco-pinctrl"))
- spmx->is_marco = 1;
-
pdata = of_match_node(pinmux_ids, np)->data;
sirfsoc_pin_groups = pdata->grps;
sirfsoc_pingrp_cnt = pdata->grps_cnt;
@@ -803,7 +783,6 @@ static int sirfsoc_gpio_probe(struct device_node *np)
struct sirfsoc_gpio_bank *bank;
void __iomem *regs;
struct platform_device *pdev;
- bool is_marco = false;
u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS];
@@ -819,9 +798,6 @@ static int sirfsoc_gpio_probe(struct device_node *np)
if (!regs)
return -ENOMEM;
- if (of_device_is_compatible(np, "sirf,marco-pinctrl"))
- is_marco = 1;
-
sgpio->chip.gc.request = sirfsoc_gpio_request;
sgpio->chip.gc.free = sirfsoc_gpio_free;
sgpio->chip.gc.direction_input = sirfsoc_gpio_direction_input;
@@ -836,7 +812,6 @@ static int sirfsoc_gpio_probe(struct device_node *np)
sgpio->chip.gc.of_gpio_n_cells = 2;
sgpio->chip.gc.dev = &pdev->dev;
sgpio->chip.regs = regs;
- sgpio->is_marco = is_marco;
err = gpiochip_add(&sgpio->chip.gc);
if (err) {
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.h b/drivers/pinctrl/sirf/pinctrl-sirf.h
index d7f16b499ad9..9550335fe57a 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.h
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.h
@@ -49,7 +49,6 @@ struct sirfsoc_pmx {
u32 paden_regs[SIRFSOC_GPIO_NO_OF_BANKS];
u32 dspen_regs;
u32 rsc_regs[3];
- bool is_marco;
};
/* SIRFSOC_GPIO_PAD_EN set */
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 230a952608cb..2eb893e0ea1e 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -21,6 +21,10 @@ config PINCTRL_SUN6I_A31
def_bool MACH_SUN6I
select PINCTRL_SUNXI_COMMON
+config PINCTRL_SUN6I_A31S
+ def_bool MACH_SUN6I
+ select PINCTRL_SUNXI_COMMON
+
config PINCTRL_SUN6I_A31_R
def_bool MACH_SUN6I
depends on RESET_CONTROLLER
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index c7d92e4673b5..b796d579dce6 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_PINCTRL_SUN4I_A10) += pinctrl-sun4i-a10.o
obj-$(CONFIG_PINCTRL_SUN5I_A10S) += pinctrl-sun5i-a10s.o
obj-$(CONFIG_PINCTRL_SUN5I_A13) += pinctrl-sun5i-a13.o
obj-$(CONFIG_PINCTRL_SUN6I_A31) += pinctrl-sun6i-a31.o
+obj-$(CONFIG_PINCTRL_SUN6I_A31S) += pinctrl-sun6i-a31s.o
obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o
obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o
obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index f42858eaca28..18038f0d6b52 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -134,24 +134,28 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD4 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D15 */
+ SUNXI_FUNCTION(0x4, "clk_out_a"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD5 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D16 */
+ SUNXI_FUNCTION(0x4, "dmic"), /* CLK */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD6 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D17 */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DIN */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD7 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D18 */
+ SUNXI_FUNCTION(0x4, "clk_out_b"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 18)), /* PA_EINT18 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -207,6 +211,7 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* MDC */
SUNXI_FUNCTION(0x3, "lcd1"), /* HSYNC */
+ SUNXI_FUNCTION(0x4, "clk_out_c"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 26)), /* PA_EINT26 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 27),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
new file mode 100644
index 000000000000..9b5a91f610c7
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
@@ -0,0 +1,815 @@
+/*
+ * Allwinner A31s SoCs pinctrl driver.
+ *
+ * Copyright (C) 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on pinctrl-sun6i-a31.c, which is:
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun6i_a31s_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD0 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD1 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD2 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD3 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RING */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD4 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD5 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD6 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD7 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXEN */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* CMD */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* GTXCLK */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* CLK */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD0 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* D0 */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD1 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* D1 */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD2 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* D2 */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD3 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* D3 */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD4 */
+ SUNXI_FUNCTION(0x4, "clk_out_a"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD5 */
+ SUNXI_FUNCTION(0x4, "dmic"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD6 */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD7 */
+ SUNXI_FUNCTION(0x4, "clk_out_b"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 18)), /* PA_EINT18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXDV */
+ SUNXI_FUNCTION(0x4, "pwm3"), /* Positive */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 19)), /* PA_EINT19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXCLK */
+ SUNXI_FUNCTION(0x4, "pwm3"), /* Negative */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 20)), /* PA_EINT20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXERR */
+ SUNXI_FUNCTION(0x4, "spi3"), /* CS0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 21)), /* PA_EINT21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXERR */
+ SUNXI_FUNCTION(0x4, "spi3"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 22)), /* PA_EINT22 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* COL */
+ SUNXI_FUNCTION(0x4, "spi3"), /* MOSI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 23)), /* PA_EINT23 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* CRS */
+ SUNXI_FUNCTION(0x4, "spi3"), /* MISO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 24)), /* PA_EINT24 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 25),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* CLKIN */
+ SUNXI_FUNCTION(0x4, "spi3"), /* CS1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 25)), /* PA_EINT25 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 26),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* MDC */
+ SUNXI_FUNCTION(0x4, "clk_out_c"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 26)), /* PA_EINT26 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 27),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* MDIO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 27)), /* PA_EINT27 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PB_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PB_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PB_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PB_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO1 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PB_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO2 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PB_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO3 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PB_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s0"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)), /* PB_EINT7 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* WE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* RE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB1 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D4 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D5 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D6 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D7 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D7 */
+ /* Hole in pin numbering ! */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* RST */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* RST */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 25),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 26),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 27),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D22 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D23 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* DE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* VSYNC */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "ts"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PE_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "ts"), /* ERR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PE_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "ts"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PE_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "ts"), /* DVLD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PE_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PE_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PE_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PE_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PE_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D4 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PE_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D5 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PE_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D6 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PE_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D7 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PE_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D8 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D4 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)), /* PE_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D9 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D5 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)), /* PE_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D10 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D6 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)), /* PE_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D11 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D7 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)), /* PE_EINT15 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* MS1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* DI1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x4, "jtag")), /* DO1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* CK1 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 0)), /* PG_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 1)), /* PG_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 2)), /* PG_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 3)), /* PG_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 4)), /* PG_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 5)), /* PG_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 6)), /* PG_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 7)), /* PG_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 8)), /* PG_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 9)), /* PG_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 10)), /* PG_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 11)), /* PG_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* MCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 12)), /* PG_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 13)), /* PG_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 14)), /* PG_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* DIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 15)), /* PG_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* DOUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 16)), /* PG_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 17)), /* PG_EINT17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 18)), /* PG_EINT18 */
+ /* Hole, note H starts at pin 9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
+ SUNXI_FUNCTION(0x4, "pwm1")), /* Positive */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
+ SUNXI_FUNCTION(0x4, "pwm1")), /* Negative */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
+ SUNXI_FUNCTION(0x4, "pwm2")), /* Positive */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI0 */
+ SUNXI_FUNCTION(0x4, "pwm2")), /* Negative */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 25),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 26),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 27),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 28),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+};
+
+static const struct sunxi_pinctrl_desc sun6i_a31s_pinctrl_data = {
+ .pins = sun6i_a31s_pins,
+ .npins = ARRAY_SIZE(sun6i_a31s_pins),
+ .irq_banks = 4,
+};
+
+static int sun6i_a31s_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun6i_a31s_pinctrl_data);
+}
+
+static struct of_device_id sun6i_a31s_pinctrl_match[] = {
+ { .compatible = "allwinner,sun6i-a31s-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun6i_a31s_pinctrl_match);
+
+static struct platform_driver sun6i_a31s_pinctrl_driver = {
+ .probe = sun6i_a31s_pinctrl_probe,
+ .driver = {
+ .name = "sun6i-a31s-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = sun6i_a31s_pinctrl_match,
+ },
+};
+module_platform_driver(sun6i_a31s_pinctrl_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Allwinner A31s pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 9411eae39a4e..3d21efe11d7b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,11 +2,9 @@
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <mjg@redhat.com>
- * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
*
- * Based on documentation in the libsmbios package:
- * Copyright (C) 2005-2014 Dell Inc.
+ * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
+ * Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -34,13 +32,6 @@
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -71,13 +62,6 @@ struct calling_interface_structure {
struct quirk_entry {
u8 touchpad_led;
-
- int needs_kbd_timeouts;
- /*
- * Ordered list of timeouts expressed in seconds.
- * The list must end with -1
- */
- int kbd_timeouts[];
};
static struct quirk_entry *quirks;
@@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
-/*
- * These values come from Windows utility provided by Dell. If any other value
- * is used then BIOS silently set timeout to 0 without any error message.
- */
-static struct quirk_entry quirk_dell_xps13_9333 = {
- .needs_kbd_timeouts = 1,
- .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
-};
-
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_vostro_v130,
},
- {
- .callback = dmi_matched,
- .ident = "Dell XPS13 9333",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
- },
- .driver_data = &quirk_dell_xps13_9333,
- },
{ }
};
@@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int find_token_id(int tokenid)
+static int find_token_location(int tokenid)
{
int i;
-
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
- return i;
+ return da_tokens[i].location;
}
return -1;
}
-static int find_token_location(int tokenid)
-{
- int id;
-
- id = find_token_id(tokenid);
- if (id == -1)
- return -1;
-
- return da_tokens[id].location;
-}
-
static struct calling_interface_buffer *
dell_send_request(struct calling_interface_buffer *buffer, int class,
int select)
@@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
-static inline int dell_smi_error(int value)
-{
- switch (value) {
- case 0: /* Completed successfully */
- return 0;
- case -1: /* Completed with error */
- return -EIO;
- case -2: /* Function not supported */
- return -ENXIO;
- default: /* Unknown error */
- return -EINVAL;
- }
-}
-
/* Derived from information in DellWirelessCtl.cpp:
Class 17, select 11 is radio control. It returns an array of 32-bit values.
@@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 1, 1);
- out:
+out:
release_buffer();
return ret;
}
@@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd)
ret = buffer->output[1];
- out:
+out:
release_buffer();
return ret;
}
@@ -849,984 +789,6 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
-/*
- * Derived from information in smbios-keyboard-ctl:
- *
- * cbClass 4
- * cbSelect 11
- * Keyboard illumination
- * cbArg1 determines the function to be performed
- *
- * cbArg1 0x0 = Get Feature Information
- * cbRES1 Standard return codes (0, -1, -2)
- * cbRES2, word0 Bitmap of user-selectable modes
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * cbRES2, byte2 Reserved for future use
- * cbRES2, byte3 Keyboard illumination type
- * 0 Reserved
- * 1 Tasklight
- * 2 Backlight
- * 3-255 Reserved for future use
- * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbRES3, byte1 Supported timeout unit bitmap
- * bit 0 Seconds
- * bit 1 Minutes
- * bit 2 Hours
- * bit 3 Days
- * bits 4-7 Reserved for future use
- * cbRES3, byte2 Number of keyboard light brightness levels
- * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
- * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
- * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
- * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
- *
- * cbArg1 0x1 = Get Current State
- * cbRES1 Standard return codes (0, -1, -2)
- * cbRES2, word0 Bitmap of current mode state
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * Note: Only One bit can be set
- * cbRES2, byte2 Currently active auto keyboard illumination triggers.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbRES2, byte3 Current Timeout
- * bits 7:6 Timeout units indicator:
- * 00b Seconds
- * 01b Minutes
- * 10b Hours
- * 11b Days
- * bits 5:0 Timeout value (0-63) in sec/min/hr/day
- * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
- * are set upon return from the [Get feature information] call.
- * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
- * cbRES3, byte1 Current ALS reading
- * cbRES3, byte2 Current keyboard light level.
- *
- * cbArg1 0x2 = Set New State
- * cbRES1 Standard return codes (0, -1, -2)
- * cbArg2, word0 Bitmap of current mode state
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * Note: Only One bit can be set
- * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
- * keyboard to turn off automatically.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbArg2, byte3 Desired Timeout
- * bits 7:6 Timeout units indicator:
- * 00b Seconds
- * 01b Minutes
- * 10b Hours
- * 11b Days
- * bits 5:0 Timeout value (0-63) in sec/min/hr/day
- * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
- * cbArg3, byte2 Desired keyboard light level.
- */
-
-
-enum kbd_timeout_unit {
- KBD_TIMEOUT_SECONDS = 0,
- KBD_TIMEOUT_MINUTES,
- KBD_TIMEOUT_HOURS,
- KBD_TIMEOUT_DAYS,
-};
-
-enum kbd_mode_bit {
- KBD_MODE_BIT_OFF = 0,
- KBD_MODE_BIT_ON,
- KBD_MODE_BIT_ALS,
- KBD_MODE_BIT_TRIGGER_ALS,
- KBD_MODE_BIT_TRIGGER,
- KBD_MODE_BIT_TRIGGER_25,
- KBD_MODE_BIT_TRIGGER_50,
- KBD_MODE_BIT_TRIGGER_75,
- KBD_MODE_BIT_TRIGGER_100,
-};
-
-#define kbd_is_als_mode_bit(bit) \
- ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
-#define kbd_is_trigger_mode_bit(bit) \
- ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-#define kbd_is_level_mode_bit(bit) \
- ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-
-struct kbd_info {
- u16 modes;
- u8 type;
- u8 triggers;
- u8 levels;
- u8 seconds;
- u8 minutes;
- u8 hours;
- u8 days;
-};
-
-struct kbd_state {
- u8 mode_bit;
- u8 triggers;
- u8 timeout_value;
- u8 timeout_unit;
- u8 als_setting;
- u8 als_value;
- u8 level;
-};
-
-static const int kbd_tokens[] = {
- KBD_LED_OFF_TOKEN,
- KBD_LED_AUTO_25_TOKEN,
- KBD_LED_AUTO_50_TOKEN,
- KBD_LED_AUTO_75_TOKEN,
- KBD_LED_AUTO_100_TOKEN,
- KBD_LED_ON_TOKEN,
-};
-
-static u16 kbd_token_bits;
-
-static struct kbd_info kbd_info;
-static bool kbd_als_supported;
-static bool kbd_triggers_supported;
-
-static u8 kbd_mode_levels[16];
-static int kbd_mode_levels_count;
-
-static u8 kbd_previous_level;
-static u8 kbd_previous_mode_bit;
-
-static bool kbd_led_present;
-
-/*
- * NOTE: there are three ways to set the keyboard backlight level.
- * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
- * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
- * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
- *
- * There are laptops which support only one of these methods. If we want to
- * support as many machines as possible we need to implement all three methods.
- * The first two methods use the kbd_state structure. The third uses SMBIOS
- * tokens. If kbd_info.levels == 0, the machine does not support setting the
- * keyboard backlight level via kbd_state.level.
- */
-
-static int kbd_get_info(struct kbd_info *info)
-{
- u8 units;
- int ret;
-
- get_buffer();
-
- buffer->input[0] = 0x0;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smi_error(ret);
- goto out;
- }
-
- info->modes = buffer->output[1] & 0xFFFF;
- info->type = (buffer->output[1] >> 24) & 0xFF;
- info->triggers = buffer->output[2] & 0xFF;
- units = (buffer->output[2] >> 8) & 0xFF;
- info->levels = (buffer->output[2] >> 16) & 0xFF;
-
- if (units & BIT(0))
- info->seconds = (buffer->output[3] >> 0) & 0xFF;
- if (units & BIT(1))
- info->minutes = (buffer->output[3] >> 8) & 0xFF;
- if (units & BIT(2))
- info->hours = (buffer->output[3] >> 16) & 0xFF;
- if (units & BIT(3))
- info->days = (buffer->output[3] >> 24) & 0xFF;
-
- out:
- release_buffer();
- return ret;
-}
-
-static unsigned int kbd_get_max_level(void)
-{
- if (kbd_info.levels != 0)
- return kbd_info.levels;
- if (kbd_mode_levels_count > 0)
- return kbd_mode_levels_count - 1;
- return 0;
-}
-
-static int kbd_get_level(struct kbd_state *state)
-{
- int i;
-
- if (kbd_info.levels != 0)
- return state->level;
-
- if (kbd_mode_levels_count > 0) {
- for (i = 0; i < kbd_mode_levels_count; ++i)
- if (kbd_mode_levels[i] == state->mode_bit)
- return i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int kbd_set_level(struct kbd_state *state, u8 level)
-{
- if (kbd_info.levels != 0) {
- if (level != 0)
- kbd_previous_level = level;
- if (state->level == level)
- return 0;
- state->level = level;
- if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
- state->mode_bit = kbd_previous_mode_bit;
- else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
- kbd_previous_mode_bit = state->mode_bit;
- state->mode_bit = KBD_MODE_BIT_OFF;
- }
- return 0;
- }
-
- if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
- if (level != 0)
- kbd_previous_level = level;
- state->mode_bit = kbd_mode_levels[level];
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int kbd_get_state(struct kbd_state *state)
-{
- int ret;
-
- get_buffer();
-
- buffer->input[0] = 0x1;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smi_error(ret);
- goto out;
- }
-
- state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
- if (state->mode_bit != 0)
- state->mode_bit--;
-
- state->triggers = (buffer->output[1] >> 16) & 0xFF;
- state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
- state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
- state->als_setting = buffer->output[2] & 0xFF;
- state->als_value = (buffer->output[2] >> 8) & 0xFF;
- state->level = (buffer->output[2] >> 16) & 0xFF;
-
- out:
- release_buffer();
- return ret;
-}
-
-static int kbd_set_state(struct kbd_state *state)
-{
- int ret;
-
- get_buffer();
- buffer->input[0] = 0x2;
- buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
- buffer->input[1] |= (state->triggers & 0xFF) << 16;
- buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
- buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
- buffer->input[2] = state->als_setting & 0xFF;
- buffer->input[2] |= (state->level & 0xFF) << 16;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
- release_buffer();
-
- return dell_smi_error(ret);
-}
-
-static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
-{
- int ret;
-
- ret = kbd_set_state(state);
- if (ret == 0)
- return 0;
-
- /*
- * When setting the new state fails,try to restore the previous one.
- * This is needed on some machines where BIOS sets a default state when
- * setting a new state fails. This default state could be all off.
- */
-
- if (kbd_set_state(old))
- pr_err("Setting old previous keyboard state failed\n");
-
- return ret;
-}
-
-static int kbd_set_token_bit(u8 bit)
-{
- int id;
- int ret;
-
- if (bit >= ARRAY_SIZE(kbd_tokens))
- return -EINVAL;
-
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
- return -EINVAL;
-
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- buffer->input[1] = da_tokens[id].value;
- dell_send_request(buffer, 1, 0);
- ret = buffer->output[0];
- release_buffer();
-
- return dell_smi_error(ret);
-}
-
-static int kbd_get_token_bit(u8 bit)
-{
- int id;
- int ret;
- int val;
-
- if (bit >= ARRAY_SIZE(kbd_tokens))
- return -EINVAL;
-
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
- return -EINVAL;
-
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- dell_send_request(buffer, 0, 0);
- ret = buffer->output[0];
- val = buffer->output[1];
- release_buffer();
-
- if (ret)
- return dell_smi_error(ret);
-
- return (val == da_tokens[id].value);
-}
-
-static int kbd_get_first_active_token_bit(void)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
- ret = kbd_get_token_bit(i);
- if (ret == 1)
- return i;
- }
-
- return ret;
-}
-
-static int kbd_get_valid_token_counts(void)
-{
- return hweight16(kbd_token_bits);
-}
-
-static inline int kbd_init_info(void)
-{
- struct kbd_state state;
- int ret;
- int i;
-
- ret = kbd_get_info(&kbd_info);
- if (ret)
- return ret;
-
- kbd_get_state(&state);
-
- /* NOTE: timeout value is stored in 6 bits so max value is 63 */
- if (kbd_info.seconds > 63)
- kbd_info.seconds = 63;
- if (kbd_info.minutes > 63)
- kbd_info.minutes = 63;
- if (kbd_info.hours > 63)
- kbd_info.hours = 63;
- if (kbd_info.days > 63)
- kbd_info.days = 63;
-
- /* NOTE: On tested machines ON mode did not work and caused
- * problems (turned backlight off) so do not use it
- */
- kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
-
- kbd_previous_level = kbd_get_level(&state);
- kbd_previous_mode_bit = state.mode_bit;
-
- if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
- kbd_previous_level = 1;
-
- if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
- kbd_previous_mode_bit =
- ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
- if (kbd_previous_mode_bit != 0)
- kbd_previous_mode_bit--;
- }
-
- if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
- BIT(KBD_MODE_BIT_TRIGGER_ALS)))
- kbd_als_supported = true;
-
- if (kbd_info.modes & (
- BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
- BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
- BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
- ))
- kbd_triggers_supported = true;
-
- /* kbd_mode_levels[0] is reserved, see below */
- for (i = 0; i < 16; ++i)
- if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
- kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
-
- /*
- * Find the first supported mode and assign to kbd_mode_levels[0].
- * This should be 0 (off), but we cannot depend on the BIOS to
- * support 0.
- */
- if (kbd_mode_levels_count > 0) {
- for (i = 0; i < 16; ++i) {
- if (BIT(i) & kbd_info.modes) {
- kbd_mode_levels[0] = i;
- break;
- }
- }
- kbd_mode_levels_count++;
- }
-
- return 0;
-
-}
-
-static inline void kbd_init_tokens(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
- if (find_token_id(kbd_tokens[i]) != -1)
- kbd_token_bits |= BIT(i);
-}
-
-static void kbd_init(void)
-{
- int ret;
-
- ret = kbd_init_info();
- kbd_init_tokens();
-
- if (kbd_token_bits != 0 || ret == 0)
- kbd_led_present = true;
-}
-
-static ssize_t kbd_led_timeout_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state new_state;
- struct kbd_state state;
- bool convert;
- int value;
- int ret;
- char ch;
- u8 unit;
- int i;
-
- ret = sscanf(buf, "%d %c", &value, &ch);
- if (ret < 1)
- return -EINVAL;
- else if (ret == 1)
- ch = 's';
-
- if (value < 0)
- return -EINVAL;
-
- convert = false;
-
- switch (ch) {
- case 's':
- if (value > kbd_info.seconds)
- convert = true;
- unit = KBD_TIMEOUT_SECONDS;
- break;
- case 'm':
- if (value > kbd_info.minutes)
- convert = true;
- unit = KBD_TIMEOUT_MINUTES;
- break;
- case 'h':
- if (value > kbd_info.hours)
- convert = true;
- unit = KBD_TIMEOUT_HOURS;
- break;
- case 'd':
- if (value > kbd_info.days)
- convert = true;
- unit = KBD_TIMEOUT_DAYS;
- break;
- default:
- return -EINVAL;
- }
-
- if (quirks && quirks->needs_kbd_timeouts)
- convert = true;
-
- if (convert) {
- /* Convert value from current units to seconds */
- switch (unit) {
- case KBD_TIMEOUT_DAYS:
- value *= 24;
- case KBD_TIMEOUT_HOURS:
- value *= 60;
- case KBD_TIMEOUT_MINUTES:
- value *= 60;
- unit = KBD_TIMEOUT_SECONDS;
- }
-
- if (quirks && quirks->needs_kbd_timeouts) {
- for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
- if (value <= quirks->kbd_timeouts[i]) {
- value = quirks->kbd_timeouts[i];
- break;
- }
- }
- }
-
- if (value <= kbd_info.seconds && kbd_info.seconds) {
- unit = KBD_TIMEOUT_SECONDS;
- } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
- value /= 60;
- unit = KBD_TIMEOUT_MINUTES;
- } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
- value /= (60 * 60);
- unit = KBD_TIMEOUT_HOURS;
- } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
- value /= (60 * 60 * 24);
- unit = KBD_TIMEOUT_DAYS;
- } else {
- return -EINVAL;
- }
- }
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- new_state = state;
- new_state.timeout_value = value;
- new_state.timeout_unit = unit;
-
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t kbd_led_timeout_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- int ret;
- int len;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- len = sprintf(buf, "%d", state.timeout_value);
-
- switch (state.timeout_unit) {
- case KBD_TIMEOUT_SECONDS:
- return len + sprintf(buf+len, "s\n");
- case KBD_TIMEOUT_MINUTES:
- return len + sprintf(buf+len, "m\n");
- case KBD_TIMEOUT_HOURS:
- return len + sprintf(buf+len, "h\n");
- case KBD_TIMEOUT_DAYS:
- return len + sprintf(buf+len, "d\n");
- default:
- return -EINVAL;
- }
-
- return len;
-}
-
-static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
- kbd_led_timeout_show, kbd_led_timeout_store);
-
-static const char * const kbd_led_triggers[] = {
- "keyboard",
- "touchpad",
- /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
- "mouse",
-};
-
-static ssize_t kbd_led_triggers_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state new_state;
- struct kbd_state state;
- bool triggers_enabled = false;
- bool als_enabled = false;
- bool disable_als = false;
- bool enable_als = false;
- int trigger_bit = -1;
- char trigger[21];
- int i, ret;
-
- ret = sscanf(buf, "%20s", trigger);
- if (ret != 1)
- return -EINVAL;
-
- if (trigger[0] != '+' && trigger[0] != '-')
- return -EINVAL;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- if (kbd_als_supported)
- als_enabled = kbd_is_als_mode_bit(state.mode_bit);
-
- if (kbd_triggers_supported)
- triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
-
- if (kbd_als_supported) {
- if (strcmp(trigger, "+als") == 0) {
- if (als_enabled)
- return count;
- enable_als = true;
- } else if (strcmp(trigger, "-als") == 0) {
- if (!als_enabled)
- return count;
- disable_als = true;
- }
- }
-
- if (enable_als || disable_als) {
- new_state = state;
- if (enable_als) {
- if (triggers_enabled)
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
- else
- new_state.mode_bit = KBD_MODE_BIT_ALS;
- } else {
- if (triggers_enabled) {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- } else {
- new_state.mode_bit = KBD_MODE_BIT_ON;
- }
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
- }
-
- if (kbd_triggers_supported) {
- for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
- if (!(kbd_info.triggers & BIT(i)))
- continue;
- if (!kbd_led_triggers[i])
- continue;
- if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
- continue;
- if (trigger[0] == '+' &&
- triggers_enabled && (state.triggers & BIT(i)))
- return count;
- if (trigger[0] == '-' &&
- (!triggers_enabled || !(state.triggers & BIT(i))))
- return count;
- trigger_bit = i;
- break;
- }
- }
-
- if (trigger_bit != -1) {
- new_state = state;
- if (trigger[0] == '+')
- new_state.triggers |= BIT(trigger_bit);
- else {
- new_state.triggers &= ~BIT(trigger_bit);
- /* NOTE: trackstick bit (2) must be disabled when
- * disabling touchpad bit (1), otherwise touchpad
- * bit (1) will not be disabled */
- if (trigger_bit == 1)
- new_state.triggers &= ~BIT(2);
- }
- if ((kbd_info.triggers & new_state.triggers) !=
- new_state.triggers)
- return -EINVAL;
- if (new_state.triggers && !triggers_enabled) {
- if (als_enabled)
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
- else {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- }
- } else if (new_state.triggers == 0) {
- if (als_enabled)
- new_state.mode_bit = KBD_MODE_BIT_ALS;
- else
- kbd_set_level(&new_state, 0);
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- if (new_state.mode_bit != KBD_MODE_BIT_OFF)
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
- }
-
- return -EINVAL;
-}
-
-static ssize_t kbd_led_triggers_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- bool triggers_enabled;
- int level, i, ret;
- int len = 0;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- len = 0;
-
- if (kbd_triggers_supported) {
- triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
- level = kbd_get_level(&state);
- for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
- if (!(kbd_info.triggers & BIT(i)))
- continue;
- if (!kbd_led_triggers[i])
- continue;
- if ((triggers_enabled || level <= 0) &&
- (state.triggers & BIT(i)))
- buf[len++] = '+';
- else
- buf[len++] = '-';
- len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
- }
- }
-
- if (kbd_als_supported) {
- if (kbd_is_als_mode_bit(state.mode_bit))
- len += sprintf(buf+len, "+als ");
- else
- len += sprintf(buf+len, "-als ");
- }
-
- if (len)
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
- kbd_led_triggers_show, kbd_led_triggers_store);
-
-static ssize_t kbd_led_als_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state state;
- struct kbd_state new_state;
- u8 setting;
- int ret;
-
- ret = kstrtou8(buf, 10, &setting);
- if (ret)
- return ret;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- new_state = state;
- new_state.als_setting = setting;
-
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t kbd_led_als_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- int ret;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", state.als_setting);
-}
-
-static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
- kbd_led_als_show, kbd_led_als_store);
-
-static struct attribute *kbd_led_attrs[] = {
- &dev_attr_stop_timeout.attr,
- &dev_attr_start_triggers.attr,
- &dev_attr_als_setting.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(kbd_led);
-
-static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
-{
- int ret;
- u16 num;
- struct kbd_state state;
-
- if (kbd_get_max_level()) {
- ret = kbd_get_state(&state);
- if (ret)
- return 0;
- ret = kbd_get_level(&state);
- if (ret < 0)
- return 0;
- return ret;
- }
-
- if (kbd_get_valid_token_counts()) {
- ret = kbd_get_first_active_token_bit();
- if (ret < 0)
- return 0;
- for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
- num &= num - 1; /* clear the first bit set */
- if (num == 0)
- return 0;
- return ffs(num) - 1;
- }
-
- pr_warn("Keyboard brightness level control not supported\n");
- return 0;
-}
-
-static void kbd_led_level_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct kbd_state state;
- struct kbd_state new_state;
- u16 num;
-
- if (kbd_get_max_level()) {
- if (kbd_get_state(&state))
- return;
- new_state = state;
- if (kbd_set_level(&new_state, value))
- return;
- kbd_set_state_safe(&new_state, &state);
- return;
- }
-
- if (kbd_get_valid_token_counts()) {
- for (num = kbd_token_bits; num != 0 && value > 0; --value)
- num &= num - 1; /* clear the first bit set */
- if (num == 0)
- return;
- kbd_set_token_bit(ffs(num) - 1);
- return;
- }
-
- pr_warn("Keyboard brightness level control not supported\n");
-}
-
-static struct led_classdev kbd_led = {
- .name = "dell::kbd_backlight",
- .brightness_set = kbd_led_level_set,
- .brightness_get = kbd_led_level_get,
- .groups = kbd_led_groups,
-};
-
-static int __init kbd_led_init(struct device *dev)
-{
- kbd_init();
- if (!kbd_led_present)
- return -ENODEV;
- kbd_led.max_brightness = kbd_get_max_level();
- if (!kbd_led.max_brightness) {
- kbd_led.max_brightness = kbd_get_valid_token_counts();
- if (kbd_led.max_brightness)
- kbd_led.max_brightness--;
- }
- return led_classdev_register(dev, &kbd_led);
-}
-
-static void brightness_set_exit(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- /* Don't change backlight level on exit */
-};
-
-static void kbd_led_exit(void)
-{
- if (!kbd_led_present)
- return;
- kbd_led.brightness_set = brightness_set_exit;
- led_classdev_unregister(&kbd_led);
-}
-
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -1879,8 +841,6 @@ static int __init dell_init(void)
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
- kbd_led_init(&platform_device->dev);
-
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -1948,7 +908,6 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
- kbd_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
@@ -1965,7 +924,5 @@ module_init(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index f748cc8cbb03..4e57d3370368 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -182,7 +182,7 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
return error;
}
- if (pnp_dev->protocol->suspend)
+ if (pnp_can_suspend(pnp_dev))
pnp_dev->protocol->suspend(pnp_dev, state);
return 0;
}
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 66977ebf13b3..ff0356fb378f 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -180,20 +180,21 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
struct pnp_dev *dev = data;
struct acpi_resource_dma *dma;
struct acpi_resource_vendor_typed *vendor_typed;
- struct resource r = {0};
+ struct resource_win win = {{0}, 0};
+ struct resource *r = &win.res;
int i, flags;
- if (acpi_dev_resource_address_space(res, &r)
- || acpi_dev_resource_ext_address_space(res, &r)) {
- pnp_add_resource(dev, &r);
+ if (acpi_dev_resource_address_space(res, &win)
+ || acpi_dev_resource_ext_address_space(res, &win)) {
+ pnp_add_resource(dev, &win.res);
return AE_OK;
}
- r.flags = 0;
- if (acpi_dev_resource_interrupt(res, 0, &r)) {
- pnpacpi_add_irqresource(dev, &r);
- for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++)
- pnpacpi_add_irqresource(dev, &r);
+ r->flags = 0;
+ if (acpi_dev_resource_interrupt(res, 0, r)) {
+ pnpacpi_add_irqresource(dev, r);
+ for (i = 1; acpi_dev_resource_interrupt(res, i, r); i++)
+ pnpacpi_add_irqresource(dev, r);
if (i > 1) {
/*
@@ -209,7 +210,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
}
}
return AE_OK;
- } else if (r.flags & IORESOURCE_DISABLED) {
+ } else if (r->flags & IORESOURCE_DISABLED) {
pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
return AE_OK;
}
@@ -218,13 +219,13 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_MEMORY24:
case ACPI_RESOURCE_TYPE_MEMORY32:
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
- if (acpi_dev_resource_memory(res, &r))
- pnp_add_resource(dev, &r);
+ if (acpi_dev_resource_memory(res, r))
+ pnp_add_resource(dev, r);
break;
case ACPI_RESOURCE_TYPE_IO:
case ACPI_RESOURCE_TYPE_FIXED_IO:
- if (acpi_dev_resource_io(res, &r))
- pnp_add_resource(dev, &r);
+ if (acpi_dev_resource_io(res, r))
+ pnp_add_resource(dev, r);
break;
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
@@ -410,12 +411,12 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
- pnp_register_mem_resource(dev, option_flags, p->minimum,
- p->minimum, 0, p->address_length,
+ pnp_register_mem_resource(dev, option_flags, p->address.minimum,
+ p->address.minimum, 0, p->address.address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
- pnp_register_port_resource(dev, option_flags, p->minimum,
- p->minimum, 0, p->address_length,
+ pnp_register_port_resource(dev, option_flags, p->address.minimum,
+ p->address.minimum, 0, p->address.address_length,
IORESOURCE_IO_FIXED);
}
@@ -429,12 +430,12 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
- pnp_register_mem_resource(dev, option_flags, p->minimum,
- p->minimum, 0, p->address_length,
+ pnp_register_mem_resource(dev, option_flags, p->address.minimum,
+ p->address.minimum, 0, p->address.address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
- pnp_register_port_resource(dev, option_flags, p->minimum,
- p->minimum, 0, p->address_length,
+ pnp_register_port_resource(dev, option_flags, p->address.minimum,
+ p->address.minimum, 0, p->address.address_length,
IORESOURCE_IO_FIXED);
}
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 074569e77d22..facd43b8516c 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -181,7 +181,8 @@ static int pnp_dock_thread(void *unused)
break;
default:
pnpbios_print_status("pnp_dock_thread", status);
- continue;
+ printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n");
+ complete_and_exit(&unload_sem, 0);
}
if (d != docked) {
if (pnp_dock_event(d, &now) == 0) {
diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
index 650930e4fa79..734ec4afa14d 100644
--- a/drivers/power/88pm860x_charger.c
+++ b/drivers/power/88pm860x_charger.c
@@ -711,6 +711,7 @@ static int pm860x_charger_probe(struct platform_device *pdev)
return 0;
out_irq:
+ power_supply_unregister(&info->usb);
while (--i >= 0)
free_irq(info->irq[i], info);
out:
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 0108c2af005b..27b751b995fb 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -315,7 +315,7 @@ config CHARGER_GPIO
config CHARGER_MANAGER
bool "Battery charger manager for multiple chargers"
- depends on REGULATOR && RTC_CLASS
+ depends on REGULATOR
select EXTCON
help
Say Y to enable charger-manager support, which allows multiple
@@ -327,11 +327,16 @@ config CHARGER_MANAGER
config CHARGER_MAX14577
tristate "Maxim MAX14577/77836 battery charger driver"
depends on MFD_MAX14577
- depends on SYSFS
help
Say Y to enable support for the battery charger control sysfs and
platform data of MAX14577/77836 MUICs.
+config CHARGER_MAX77693
+ tristate "Maxim MAX77693 battery charger driver"
+ depends on MFD_MAX77693
+ help
+ Say Y to enable support for the Maxim MAX77693 battery charger.
+
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
@@ -383,6 +388,14 @@ config CHARGER_TPS65090
Say Y here to enable support for battery charging with TPS65090
PMIC chips.
+config BATTERY_GAUGE_LTC2941
+ tristate "LTC2941/LTC2943 Battery Gauge Driver"
+ depends on I2C
+ help
+ Say Y here to include support for LTC2941 and LTC2943 Battery
+ Gauge IC. The driver reports the charge count continuously, and
+ measures the voltage and temperature every 10 seconds.
+
config AB8500_BM
bool "AB8500 Battery Management Driver"
depends on AB8500_CORE && AB8500_GPADC
@@ -397,6 +410,14 @@ config BATTERY_GOLDFISH
Say Y to enable support for the battery and AC power in the
Goldfish emulator.
+config BATTERY_RT5033
+ tristate "RT5033 fuel gauge support"
+ depends on MFD_RT5033
+ help
+ This adds support for battery fuel gauge in Richtek RT5033 PMIC.
+ The fuelgauge calculates and determines the battery state of charge
+ according to battery open circuit voltage.
+
source "drivers/power/reset/Kconfig"
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index dfa894273926..36f9e0d10111 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
+obj-$(CONFIG_BATTERY_GAUGE_LTC2941) += ltc2941-battery-gauge.o
obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_BATTERY_DA9052) += da9052-battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
+obj-$(CONFIG_BATTERY_RT5033) += rt5033_battery.o
obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
@@ -50,6 +52,7 @@ obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
+obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 69b80bcaa9e7..c908658aa31a 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2435,20 +2435,6 @@ static void ab8500_fg_reinit_work(struct work_struct *work)
}
}
-/**
- * ab8500_fg_reinit() - forces FG algorithm to reinitialize with current values
- *
- * This function can be used to force the FG algorithm to recalculate a new
- * voltage based battery capacity.
- */
-void ab8500_fg_reinit(void)
-{
- struct ab8500_fg *di = ab8500_fg_get();
- /* User won't be notified if a null pointer returned. */
- if (di != NULL)
- queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0);
-}
-
/* Exposure to the sysfs interface */
struct ab8500_fg_sysfs_entry {
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
index ad3ff8fbfbbb..d0e8236a6404 100644
--- a/drivers/power/bq24190_charger.c
+++ b/drivers/power/bq24190_charger.c
@@ -929,7 +929,7 @@ static void bq24190_charger_init(struct power_supply *charger)
charger->properties = bq24190_charger_properties;
charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
charger->supplied_to = bq24190_charger_supplied_to;
- charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
+ charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to);
charger->get_property = bq24190_charger_get_property;
charger->set_property = bq24190_charger_set_property;
charger->property_is_writeable = bq24190_charger_property_is_writeable;
@@ -1208,7 +1208,7 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
{
struct bq24190_dev_info *bdi = data;
bool alert_userspace = false;
- u8 ss_reg, f_reg;
+ u8 ss_reg = 0, f_reg = 0;
int ret;
pm_runtime_get_sync(bdi->dev);
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index a78ac201828e..b72ba7c1bd69 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -76,7 +76,8 @@
/* bq27425 register addresses are same as bq27x00 addresses minus 4 */
#define BQ27425_REG_OFFSET 0x04
-#define BQ27425_REG_SOC 0x18 /* Register address plus offset */
+#define BQ27425_REG_SOC (0x1C + BQ27425_REG_OFFSET)
+#define BQ27425_REG_DCAP (0x3C + BQ27425_REG_OFFSET)
#define BQ27000_RS 20 /* Resistor sense */
#define BQ27x00_POWER_CONSTANT (256 * 29200 / 1000)
@@ -282,9 +283,12 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
{
int ilmd;
- if (bq27xxx_is_chip_version_higher(di))
- ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
- else
+ if (bq27xxx_is_chip_version_higher(di)) {
+ if (di->chip == BQ27425)
+ ilmd = bq27x00_read(di, BQ27425_REG_DCAP, false);
+ else
+ ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
+ } else
ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
if (ilmd < 0) {
@@ -493,10 +497,11 @@ static void bq27x00_update(struct bq27x00_device_info *di)
di->charge_design_full = bq27x00_battery_read_ilmd(di);
}
- if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) {
- di->cache = cache;
+ if (di->cache.capacity != cache.capacity)
power_supply_changed(&di->bat);
- }
+
+ if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
+ di->cache = cache;
di->last_update = jiffies;
}
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 649052e1f2d9..14b0d85318eb 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -69,16 +69,10 @@ static LIST_HEAD(cm_list);
static DEFINE_MUTEX(cm_list_mtx);
/* About in-suspend (suspend-again) monitoring */
-static struct rtc_device *rtc_dev;
-/*
- * Backup RTC alarm
- * Save the wakeup alarm before entering suspend-to-RAM
- */
-static struct rtc_wkalrm rtc_wkalarm_save;
-/* Backup RTC alarm time in terms of seconds since 01-01-1970 00:00:00 */
-static unsigned long rtc_wkalarm_save_time;
+static struct alarm *cm_timer;
+
static bool cm_suspended;
-static bool cm_rtc_set;
+static bool cm_timer_set;
static unsigned long cm_suspend_duration_ms;
/* About normal (not suspended) monitoring */
@@ -87,9 +81,6 @@ static unsigned long next_polling; /* Next appointed polling time */
static struct workqueue_struct *cm_wq; /* init at driver add */
static struct delayed_work cm_monitor_work; /* init at driver add */
-/* Global charger-manager description */
-static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
-
/**
* is_batt_present - See if the battery presents in place.
* @cm: the Charger Manager representing the battery.
@@ -1047,10 +1038,13 @@ static bool cm_setup_timer(void)
{
struct charger_manager *cm;
unsigned int wakeup_ms = UINT_MAX;
- bool ret = false;
+ int timer_req = 0;
- mutex_lock(&cm_list_mtx);
+ if (time_after(next_polling, jiffies))
+ CM_MIN_VALID(wakeup_ms,
+ jiffies_to_msecs(next_polling - jiffies));
+ mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
unsigned int fbchk_ms = 0;
@@ -1070,162 +1064,38 @@ static bool cm_setup_timer(void)
/* Skip if polling is not required for this CM */
if (!is_polling_required(cm) && !cm->emergency_stop)
continue;
+ timer_req++;
if (cm->desc->polling_interval_ms == 0)
continue;
CM_MIN_VALID(wakeup_ms, cm->desc->polling_interval_ms);
}
-
mutex_unlock(&cm_list_mtx);
- if (wakeup_ms < UINT_MAX && wakeup_ms > 0) {
- pr_info("Charger Manager wakeup timer: %u ms\n", wakeup_ms);
- if (rtc_dev) {
- struct rtc_wkalrm tmp;
- unsigned long time, now;
- unsigned long add = DIV_ROUND_UP(wakeup_ms, 1000);
-
- /*
- * Set alarm with the polling interval (wakeup_ms)
- * except when rtc_wkalarm_save comes first.
- * However, the alarm time should be NOW +
- * CM_RTC_SMALL or later.
- */
- tmp.enabled = 1;
- rtc_read_time(rtc_dev, &tmp.time);
- rtc_tm_to_time(&tmp.time, &now);
- if (add < CM_RTC_SMALL)
- add = CM_RTC_SMALL;
- time = now + add;
+ if (timer_req && cm_timer) {
+ ktime_t now, add;
- ret = true;
+ /*
+ * Set alarm with the polling interval (wakeup_ms)
+ * The alarm time should be NOW + CM_RTC_SMALL or later.
+ */
+ if (wakeup_ms == UINT_MAX ||
+ wakeup_ms < CM_RTC_SMALL * MSEC_PER_SEC)
+ wakeup_ms = 2 * CM_RTC_SMALL * MSEC_PER_SEC;
- if (rtc_wkalarm_save.enabled &&
- rtc_wkalarm_save_time &&
- rtc_wkalarm_save_time < time) {
- if (rtc_wkalarm_save_time < now + CM_RTC_SMALL)
- time = now + CM_RTC_SMALL;
- else
- time = rtc_wkalarm_save_time;
+ pr_info("Charger Manager wakeup timer: %u ms\n", wakeup_ms);
- /* The timer is not appointed by CM */
- ret = false;
- }
+ now = ktime_get_boottime();
+ add = ktime_set(wakeup_ms / MSEC_PER_SEC,
+ (wakeup_ms % MSEC_PER_SEC) * NSEC_PER_MSEC);
+ alarm_start(cm_timer, ktime_add(now, add));
- pr_info("Waking up after %lu secs\n", time - now);
+ cm_suspend_duration_ms = wakeup_ms;
- rtc_time_to_tm(time, &tmp.time);
- rtc_set_alarm(rtc_dev, &tmp);
- cm_suspend_duration_ms += wakeup_ms;
- return ret;
- }
+ return true;
}
-
- if (rtc_dev)
- rtc_set_alarm(rtc_dev, &rtc_wkalarm_save);
return false;
}
-static void _cm_fbchk_in_suspend(struct charger_manager *cm)
-{
- unsigned long jiffy_now = jiffies;
-
- if (!cm->fullbatt_vchk_jiffies_at)
- return;
-
- if (g_desc && g_desc->assume_timer_stops_in_suspend)
- jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
-
- /* Execute now if it's going to be executed not too long after */
- jiffy_now += CM_JIFFIES_SMALL;
-
- if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
- fullbatt_vchk(&cm->fullbatt_vchk_work.work);
-}
-
-/**
- * cm_suspend_again - Determine whether suspend again or not
- *
- * Returns true if the system should be suspended again
- * Returns false if the system should be woken up
- */
-bool cm_suspend_again(void)
-{
- struct charger_manager *cm;
- bool ret = false;
-
- if (!g_desc || !g_desc->rtc_only_wakeup || !g_desc->rtc_only_wakeup() ||
- !cm_rtc_set)
- return false;
-
- if (cm_monitor())
- goto out;
-
- ret = true;
- mutex_lock(&cm_list_mtx);
- list_for_each_entry(cm, &cm_list, entry) {
- _cm_fbchk_in_suspend(cm);
-
- if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
- cm->status_save_batt != is_batt_present(cm)) {
- ret = false;
- break;
- }
- }
- mutex_unlock(&cm_list_mtx);
-
- cm_rtc_set = cm_setup_timer();
-out:
- /* It's about the time when the non-CM appointed timer goes off */
- if (rtc_wkalarm_save.enabled) {
- unsigned long now;
- struct rtc_time tmp;
-
- rtc_read_time(rtc_dev, &tmp);
- rtc_tm_to_time(&tmp, &now);
-
- if (rtc_wkalarm_save_time &&
- now + CM_RTC_SMALL >= rtc_wkalarm_save_time)
- return false;
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(cm_suspend_again);
-
-/**
- * setup_charger_manager - initialize charger_global_desc data
- * @gd: pointer to instance of charger_global_desc
- */
-int setup_charger_manager(struct charger_global_desc *gd)
-{
- if (!gd)
- return -EINVAL;
-
- if (rtc_dev)
- rtc_class_close(rtc_dev);
- rtc_dev = NULL;
- g_desc = NULL;
-
- if (!gd->rtc_only_wakeup) {
- pr_err("The callback rtc_only_wakeup is not given\n");
- return -EINVAL;
- }
-
- if (gd->rtc_name) {
- rtc_dev = rtc_class_open(gd->rtc_name);
- if (IS_ERR_OR_NULL(rtc_dev)) {
- rtc_dev = NULL;
- /* Retry at probe. RTC may be not registered yet */
- }
- } else {
- pr_warn("No wakeup timer is given for charger manager. "
- "In-suspend monitoring won't work.\n");
- }
-
- g_desc = gd;
- return 0;
-}
-EXPORT_SYMBOL_GPL(setup_charger_manager);
-
/**
* charger_extcon_work - enable/diable charger according to the state
* of charger cable
@@ -1719,6 +1589,12 @@ static inline struct charger_desc *cm_get_drv_data(struct platform_device *pdev)
return dev_get_platdata(&pdev->dev);
}
+static enum alarmtimer_restart cm_timer_func(struct alarm *alarm, ktime_t now)
+{
+ cm_timer_set = false;
+ return ALARMTIMER_NORESTART;
+}
+
static int charger_manager_probe(struct platform_device *pdev)
{
struct charger_desc *desc = cm_get_drv_data(pdev);
@@ -1728,16 +1604,6 @@ static int charger_manager_probe(struct platform_device *pdev)
union power_supply_propval val;
struct power_supply *fuel_gauge;
- if (g_desc && !rtc_dev && g_desc->rtc_name) {
- rtc_dev = rtc_class_open(g_desc->rtc_name);
- if (IS_ERR_OR_NULL(rtc_dev)) {
- rtc_dev = NULL;
- dev_err(&pdev->dev, "Cannot get RTC %s\n",
- g_desc->rtc_name);
- return -ENODEV;
- }
- }
-
if (IS_ERR(desc)) {
dev_err(&pdev->dev, "No platform data (desc) found\n");
return -ENODEV;
@@ -1752,6 +1618,12 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->dev = &pdev->dev;
cm->desc = desc;
+ /* Initialize alarm timer */
+ if (alarmtimer_get_rtcdev()) {
+ cm_timer = devm_kzalloc(cm->dev, sizeof(*cm_timer), GFP_KERNEL);
+ alarm_init(cm_timer, ALARM_BOOTTIME, cm_timer_func);
+ }
+
/*
* The following two do not need to be errors.
* Users may intentionally ignore those two features.
@@ -1993,38 +1865,41 @@ static int cm_suspend_noirq(struct device *dev)
return ret;
}
+static bool cm_need_to_awake(void)
+{
+ struct charger_manager *cm;
+
+ if (cm_timer)
+ return false;
+
+ mutex_lock(&cm_list_mtx);
+ list_for_each_entry(cm, &cm_list, entry) {
+ if (is_charging(cm)) {
+ mutex_unlock(&cm_list_mtx);
+ return true;
+ }
+ }
+ mutex_unlock(&cm_list_mtx);
+
+ return false;
+}
+
static int cm_suspend_prepare(struct device *dev)
{
struct charger_manager *cm = dev_get_drvdata(dev);
- if (!cm_suspended) {
- if (rtc_dev) {
- struct rtc_time tmp;
- unsigned long now;
-
- rtc_read_alarm(rtc_dev, &rtc_wkalarm_save);
- rtc_read_time(rtc_dev, &tmp);
+ if (cm_need_to_awake())
+ return -EBUSY;
- if (rtc_wkalarm_save.enabled) {
- rtc_tm_to_time(&rtc_wkalarm_save.time,
- &rtc_wkalarm_save_time);
- rtc_tm_to_time(&tmp, &now);
- if (now > rtc_wkalarm_save_time)
- rtc_wkalarm_save_time = 0;
- } else {
- rtc_wkalarm_save_time = 0;
- }
- }
+ if (!cm_suspended)
cm_suspended = true;
- }
- cancel_delayed_work(&cm->fullbatt_vchk_work);
- cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
- cm->status_save_batt = is_batt_present(cm);
+ cm_timer_set = cm_setup_timer();
- if (!cm_rtc_set) {
- cm_suspend_duration_ms = 0;
- cm_rtc_set = cm_setup_timer();
+ if (cm_timer_set) {
+ cancel_work_sync(&setup_polling);
+ cancel_delayed_work_sync(&cm_monitor_work);
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
}
return 0;
@@ -2034,18 +1909,21 @@ static void cm_suspend_complete(struct device *dev)
{
struct charger_manager *cm = dev_get_drvdata(dev);
- if (cm_suspended) {
- if (rtc_dev) {
- struct rtc_wkalrm tmp;
-
- rtc_read_alarm(rtc_dev, &tmp);
- rtc_wkalarm_save.pending = tmp.pending;
- rtc_set_alarm(rtc_dev, &rtc_wkalarm_save);
- }
+ if (cm_suspended)
cm_suspended = false;
- cm_rtc_set = false;
+
+ if (cm_timer_set) {
+ ktime_t remain;
+
+ alarm_cancel(cm_timer);
+ cm_timer_set = false;
+ remain = alarm_expires_remaining(cm_timer);
+ cm_suspend_duration_ms -= ktime_to_ms(remain);
+ schedule_work(&setup_polling);
}
+ _cm_monitor(cm);
+
/* Re-enqueue delayed work (fullbatt_vchk_work) */
if (cm->fullbatt_vchk_jiffies_at) {
unsigned long delay = 0;
@@ -2060,21 +1938,18 @@ static void cm_suspend_complete(struct device *dev)
}
/*
- * Account for cm_suspend_duration_ms if
- * assume_timer_stops_in_suspend is active
+ * Account for cm_suspend_duration_ms with assuming that
+ * timer stops in suspend.
*/
- if (g_desc && g_desc->assume_timer_stops_in_suspend) {
- if (delay > cm_suspend_duration_ms)
- delay -= cm_suspend_duration_ms;
- else
- delay = 0;
- }
+ if (delay > cm_suspend_duration_ms)
+ delay -= cm_suspend_duration_ms;
+ else
+ delay = 0;
queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
msecs_to_jiffies(delay));
}
device_set_wakeup_capable(cm->dev, false);
- uevent_notify(cm, NULL);
}
static const struct dev_pm_ops charger_manager_pm = {
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index d02ae02a7590..594e4dbc2d51 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -26,6 +26,7 @@
static DEFINE_MUTEX(bat_lock); /* protects gpio pins */
static struct work_struct bat_work;
static struct ucb1x00 *ucb;
+static int wakeup_enabled;
struct collie_bat {
int status;
@@ -291,11 +292,21 @@ static int collie_bat_suspend(struct ucb1x00_dev *dev)
{
/* flush all pending status updates */
flush_work(&bat_work);
+
+ if (device_may_wakeup(&dev->ucb->dev) &&
+ collie_bat_main.status == POWER_SUPPLY_STATUS_CHARGING)
+ wakeup_enabled = !enable_irq_wake(gpio_to_irq(COLLIE_GPIO_CO));
+ else
+ wakeup_enabled = 0;
+
return 0;
}
static int collie_bat_resume(struct ucb1x00_dev *dev)
{
+ if (wakeup_enabled)
+ disable_irq_wake(gpio_to_irq(COLLIE_GPIO_CO));
+
/* things may have changed while we were away */
schedule_work(&bat_work);
return 0;
@@ -334,10 +345,15 @@ static int collie_bat_probe(struct ucb1x00_dev *dev)
collie_bat_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"main full", &collie_bat_main);
- if (!ret) {
- schedule_work(&bat_work);
- return 0;
- }
+ if (ret)
+ goto err_irq;
+
+ device_init_wakeup(&ucb->dev, 1);
+ schedule_work(&bat_work);
+
+ return 0;
+
+err_irq:
power_supply_unregister(&collie_bat_bu.psy);
err_psy_reg_bu:
power_supply_unregister(&collie_bat_main.psy);
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index aef74bdf7ab3..b7424c8501f1 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -229,7 +229,7 @@ static int gpio_charger_suspend(struct device *dev)
if (device_may_wakeup(dev))
gpio_charger->wakeup_enabled =
- enable_irq_wake(gpio_charger->irq);
+ !enable_irq_wake(gpio_charger->irq);
return 0;
}
@@ -239,7 +239,7 @@ static int gpio_charger_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
- if (gpio_charger->wakeup_enabled)
+ if (device_may_wakeup(dev) && gpio_charger->wakeup_enabled)
disable_irq_wake(gpio_charger->irq);
power_supply_changed(&gpio_charger->charger);
diff --git a/drivers/power/ltc2941-battery-gauge.c b/drivers/power/ltc2941-battery-gauge.c
new file mode 100644
index 000000000000..e31c927a6d16
--- /dev/null
+++ b/drivers/power/ltc2941-battery-gauge.c
@@ -0,0 +1,547 @@
+/*
+ * I2C client/driver for the Linear Technology LTC2941 and LTC2943
+ * Battery Gas Gauge IC
+ *
+ * Copyright (C) 2014 Topic Embedded Systems
+ *
+ * Author: Auryn Verwegen
+ * Author: Mike Looijmans
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/swab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#define I16_MSB(x) ((x >> 8) & 0xFF)
+#define I16_LSB(x) (x & 0xFF)
+
+#define LTC294X_WORK_DELAY 10 /* Update delay in seconds */
+
+#define LTC294X_MAX_VALUE 0xFFFF
+#define LTC294X_MID_SUPPLY 0x7FFF
+
+#define LTC2941_MAX_PRESCALER_EXP 7
+#define LTC2943_MAX_PRESCALER_EXP 6
+
+enum ltc294x_reg {
+ LTC294X_REG_STATUS = 0x00,
+ LTC294X_REG_CONTROL = 0x01,
+ LTC294X_REG_ACC_CHARGE_MSB = 0x02,
+ LTC294X_REG_ACC_CHARGE_LSB = 0x03,
+ LTC294X_REG_THRESH_HIGH_MSB = 0x04,
+ LTC294X_REG_THRESH_HIGH_LSB = 0x05,
+ LTC294X_REG_THRESH_LOW_MSB = 0x06,
+ LTC294X_REG_THRESH_LOW_LSB = 0x07,
+ LTC294X_REG_VOLTAGE_MSB = 0x08,
+ LTC294X_REG_VOLTAGE_LSB = 0x09,
+ LTC294X_REG_CURRENT_MSB = 0x0E,
+ LTC294X_REG_CURRENT_LSB = 0x0F,
+ LTC294X_REG_TEMPERATURE_MSB = 0x14,
+ LTC294X_REG_TEMPERATURE_LSB = 0x15,
+};
+
+#define LTC2943_REG_CONTROL_MODE_MASK (BIT(7) | BIT(6))
+#define LTC2943_REG_CONTROL_MODE_SCAN BIT(7)
+#define LTC294X_REG_CONTROL_PRESCALER_MASK (BIT(5) | BIT(4) | BIT(3))
+#define LTC294X_REG_CONTROL_SHUTDOWN_MASK (BIT(0))
+#define LTC294X_REG_CONTROL_PRESCALER_SET(x) \
+ ((x << 3) & LTC294X_REG_CONTROL_PRESCALER_MASK)
+#define LTC294X_REG_CONTROL_ALCC_CONFIG_DISABLED 0
+
+#define LTC2941_NUM_REGS 0x08
+#define LTC2943_NUM_REGS 0x18
+
+struct ltc294x_info {
+ struct i2c_client *client; /* I2C Client pointer */
+ struct power_supply supply; /* Supply pointer */
+ struct delayed_work work; /* Work scheduler */
+ int num_regs; /* Number of registers (chip type) */
+ int id; /* Identifier of ltc294x chip */
+ int charge; /* Last charge register content */
+ int r_sense; /* mOhm */
+ int Qlsb; /* nAh */
+};
+
+static DEFINE_IDR(ltc294x_id);
+static DEFINE_MUTEX(ltc294x_lock);
+
+static inline int convert_bin_to_uAh(
+ const struct ltc294x_info *info, int Q)
+{
+ return ((Q * (info->Qlsb / 10))) / 100;
+}
+
+static inline int convert_uAh_to_bin(
+ const struct ltc294x_info *info, int uAh)
+{
+ int Q;
+
+ Q = (uAh * 100) / (info->Qlsb/10);
+ return (Q < LTC294X_MAX_VALUE) ? Q : LTC294X_MAX_VALUE;
+}
+
+static int ltc294x_read_regs(struct i2c_client *client,
+ enum ltc294x_reg reg, u8 *buf, int num_regs)
+{
+ int ret;
+ struct i2c_msg msgs[2] = { };
+ u8 reg_start = reg;
+
+ msgs[0].addr = client->addr;
+ msgs[0].len = 1;
+ msgs[0].buf = &reg_start;
+
+ msgs[1].addr = client->addr;
+ msgs[1].len = num_regs;
+ msgs[1].buf = buf;
+ msgs[1].flags = I2C_M_RD;
+
+ ret = i2c_transfer(client->adapter, &msgs[0], 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "ltc2941 read_reg failed!\n");
+ return ret;
+ }
+
+ dev_dbg(&client->dev, "%s (%#x, %d) -> %#x\n",
+ __func__, reg, num_regs, *buf);
+
+ return 0;
+}
+
+static int ltc294x_write_regs(struct i2c_client *client,
+ enum ltc294x_reg reg, const u8 *buf, int num_regs)
+{
+ int ret;
+ u8 reg_start = reg;
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg_start, num_regs, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "ltc2941 write_reg failed!\n");
+ return ret;
+ }
+
+ dev_dbg(&client->dev, "%s (%#x, %d) -> %#x\n",
+ __func__, reg, num_regs, *buf);
+
+ return 0;
+}
+
+static int ltc294x_reset(const struct ltc294x_info *info, int prescaler_exp)
+{
+ int ret;
+ u8 value;
+ u8 control;
+
+ /* Read status and control registers */
+ ret = ltc294x_read_regs(info->client, LTC294X_REG_CONTROL, &value, 1);
+ if (ret < 0) {
+ dev_err(&info->client->dev,
+ "Could not read registers from device\n");
+ goto error_exit;
+ }
+
+ control = LTC294X_REG_CONTROL_PRESCALER_SET(prescaler_exp) |
+ LTC294X_REG_CONTROL_ALCC_CONFIG_DISABLED;
+ /* Put the 2943 into "monitor" mode, so it measures every 10 sec */
+ if (info->num_regs == LTC2943_NUM_REGS)
+ control |= LTC2943_REG_CONTROL_MODE_SCAN;
+
+ if (value != control) {
+ ret = ltc294x_write_regs(info->client,
+ LTC294X_REG_CONTROL, &control, 1);
+ if (ret < 0) {
+ dev_err(&info->client->dev,
+ "Could not write register\n");
+ goto error_exit;
+ }
+ }
+
+ return 0;
+
+error_exit:
+ return ret;
+}
+
+static int ltc294x_read_charge_register(const struct ltc294x_info *info)
+{
+ int ret;
+ u8 datar[2];
+
+ ret = ltc294x_read_regs(info->client,
+ LTC294X_REG_ACC_CHARGE_MSB, &datar[0], 2);
+ if (ret < 0)
+ return ret;
+ return (datar[0] << 8) + datar[1];
+}
+
+static int ltc294x_get_charge_now(const struct ltc294x_info *info, int *val)
+{
+ int value = ltc294x_read_charge_register(info);
+
+ if (value < 0)
+ return value;
+ /* When r_sense < 0, this counts up when the battery discharges */
+ if (info->Qlsb < 0)
+ value -= 0xFFFF;
+ *val = convert_bin_to_uAh(info, value);
+ return 0;
+}
+
+static int ltc294x_set_charge_now(const struct ltc294x_info *info, int val)
+{
+ int ret;
+ u8 dataw[2];
+ u8 ctrl_reg;
+ s32 value;
+
+ value = convert_uAh_to_bin(info, val);
+ /* Direction depends on how sense+/- were connected */
+ if (info->Qlsb < 0)
+ value += 0xFFFF;
+ if ((value < 0) || (value > 0xFFFF)) /* input validation */
+ return -EINVAL;
+
+ /* Read control register */
+ ret = ltc294x_read_regs(info->client,
+ LTC294X_REG_CONTROL, &ctrl_reg, 1);
+ if (ret < 0)
+ return ret;
+ /* Disable analog section */
+ ctrl_reg |= LTC294X_REG_CONTROL_SHUTDOWN_MASK;
+ ret = ltc294x_write_regs(info->client,
+ LTC294X_REG_CONTROL, &ctrl_reg, 1);
+ if (ret < 0)
+ return ret;
+ /* Set new charge value */
+ dataw[0] = I16_MSB(value);
+ dataw[1] = I16_LSB(value);
+ ret = ltc294x_write_regs(info->client,
+ LTC294X_REG_ACC_CHARGE_MSB, &dataw[0], 2);
+ if (ret < 0)
+ goto error_exit;
+ /* Enable analog section */
+error_exit:
+ ctrl_reg &= ~LTC294X_REG_CONTROL_SHUTDOWN_MASK;
+ ret = ltc294x_write_regs(info->client,
+ LTC294X_REG_CONTROL, &ctrl_reg, 1);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ltc294x_get_charge_counter(
+ const struct ltc294x_info *info, int *val)
+{
+ int value = ltc294x_read_charge_register(info);
+
+ if (value < 0)
+ return value;
+ value -= LTC294X_MID_SUPPLY;
+ *val = convert_bin_to_uAh(info, value);
+ return 0;
+}
+
+static int ltc294x_get_voltage(const struct ltc294x_info *info, int *val)
+{
+ int ret;
+ u8 datar[2];
+ u32 value;
+
+ ret = ltc294x_read_regs(info->client,
+ LTC294X_REG_VOLTAGE_MSB, &datar[0], 2);
+ value = (datar[0] << 8) | datar[1];
+ *val = ((value * 23600) / 0xFFFF) * 1000; /* in uV */
+ return ret;
+}
+
+static int ltc294x_get_current(const struct ltc294x_info *info, int *val)
+{
+ int ret;
+ u8 datar[2];
+ s32 value;
+
+ ret = ltc294x_read_regs(info->client,
+ LTC294X_REG_CURRENT_MSB, &datar[0], 2);
+ value = (datar[0] << 8) | datar[1];
+ value -= 0x7FFF;
+ /* Value is in range -32k..+32k, r_sense is usually 10..50 mOhm,
+ * the formula below keeps everything in s32 range while preserving
+ * enough digits */
+ *val = 1000 * ((60000 * value) / (info->r_sense * 0x7FFF)); /* in uA */
+ return ret;
+}
+
+static int ltc294x_get_temperature(const struct ltc294x_info *info, int *val)
+{
+ int ret;
+ u8 datar[2];
+ u32 value;
+
+ ret = ltc294x_read_regs(info->client,
+ LTC294X_REG_TEMPERATURE_MSB, &datar[0], 2);
+ value = (datar[0] << 8) | datar[1];
+ /* Full-scale is 510 Kelvin, convert to centidegrees */
+ *val = (((51000 * value) / 0xFFFF) - 27215);
+ return ret;
+}
+
+static int ltc294x_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct ltc294x_info *info =
+ container_of(psy, struct ltc294x_info, supply);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ return ltc294x_get_charge_now(info, &val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ return ltc294x_get_charge_counter(info, &val->intval);
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ return ltc294x_get_voltage(info, &val->intval);
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return ltc294x_get_current(info, &val->intval);
+ case POWER_SUPPLY_PROP_TEMP:
+ return ltc294x_get_temperature(info, &val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc294x_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct ltc294x_info *info =
+ container_of(psy, struct ltc294x_info, supply);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ return ltc294x_set_charge_now(info, val->intval);
+ default:
+ return -EPERM;
+ }
+}
+
+static int ltc294x_property_is_writeable(
+ struct power_supply *psy, enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void ltc294x_update(struct ltc294x_info *info)
+{
+ int charge = ltc294x_read_charge_register(info);
+
+ if (charge != info->charge) {
+ info->charge = charge;
+ power_supply_changed(&info->supply);
+ }
+}
+
+static void ltc294x_work(struct work_struct *work)
+{
+ struct ltc294x_info *info;
+
+ info = container_of(work, struct ltc294x_info, work.work);
+ ltc294x_update(info);
+ schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ);
+}
+
+static enum power_supply_property ltc294x_properties[] = {
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static int ltc294x_i2c_remove(struct i2c_client *client)
+{
+ struct ltc294x_info *info = i2c_get_clientdata(client);
+
+ cancel_delayed_work(&info->work);
+ power_supply_unregister(&info->supply);
+ kfree(info->supply.name);
+ mutex_lock(&ltc294x_lock);
+ idr_remove(&ltc294x_id, info->id);
+ mutex_unlock(&ltc294x_lock);
+ return 0;
+}
+
+static int ltc294x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ltc294x_info *info;
+ int ret;
+ int num;
+ u32 prescaler_exp;
+ s32 r_sense;
+ struct device_node *np;
+
+ mutex_lock(&ltc294x_lock);
+ ret = idr_alloc(&ltc294x_id, client, 0, 0, GFP_KERNEL);
+ mutex_unlock(&ltc294x_lock);
+ if (ret < 0)
+ goto fail_id;
+
+ num = ret;
+
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ ret = -ENOMEM;
+ goto fail_info;
+ }
+
+ i2c_set_clientdata(client, info);
+
+ info->num_regs = id->driver_data;
+ info->supply.name = kasprintf(GFP_KERNEL, "%s-%d", client->name, num);
+ if (!info->supply.name) {
+ ret = -ENOMEM;
+ goto fail_name;
+ }
+
+ np = of_node_get(client->dev.of_node);
+
+ /* r_sense can be negative, when sense+ is connected to the battery
+ * instead of the sense-. This results in reversed measurements. */
+ ret = of_property_read_u32(np, "lltc,resistor-sense", &r_sense);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Could not find lltc,resistor-sense in devicetree\n");
+ goto fail_name;
+ }
+ info->r_sense = r_sense;
+
+ ret = of_property_read_u32(np, "lltc,prescaler-exponent",
+ &prescaler_exp);
+ if (ret < 0) {
+ dev_warn(&client->dev,
+ "lltc,prescaler-exponent not in devicetree\n");
+ prescaler_exp = LTC2941_MAX_PRESCALER_EXP;
+ }
+
+ if (info->num_regs == LTC2943_NUM_REGS) {
+ if (prescaler_exp > LTC2943_MAX_PRESCALER_EXP)
+ prescaler_exp = LTC2943_MAX_PRESCALER_EXP;
+ info->Qlsb = ((340 * 50000) / r_sense) /
+ (4096 / (1 << (2*prescaler_exp)));
+ } else {
+ if (prescaler_exp > LTC2941_MAX_PRESCALER_EXP)
+ prescaler_exp = LTC2941_MAX_PRESCALER_EXP;
+ info->Qlsb = ((58 * 50000) / r_sense) /
+ (128 / (1 << prescaler_exp));
+ }
+
+ info->client = client;
+ info->id = num;
+ info->supply.type = POWER_SUPPLY_TYPE_BATTERY;
+ info->supply.properties = ltc294x_properties;
+ if (info->num_regs >= LTC294X_REG_TEMPERATURE_LSB)
+ info->supply.num_properties =
+ ARRAY_SIZE(ltc294x_properties);
+ else if (info->num_regs >= LTC294X_REG_CURRENT_LSB)
+ info->supply.num_properties =
+ ARRAY_SIZE(ltc294x_properties) - 1;
+ else if (info->num_regs >= LTC294X_REG_VOLTAGE_LSB)
+ info->supply.num_properties =
+ ARRAY_SIZE(ltc294x_properties) - 2;
+ else
+ info->supply.num_properties =
+ ARRAY_SIZE(ltc294x_properties) - 3;
+ info->supply.get_property = ltc294x_get_property;
+ info->supply.set_property = ltc294x_set_property;
+ info->supply.property_is_writeable = ltc294x_property_is_writeable;
+ info->supply.external_power_changed = NULL;
+
+ INIT_DELAYED_WORK(&info->work, ltc294x_work);
+
+ ret = ltc294x_reset(info, prescaler_exp);
+ if (ret < 0) {
+ dev_err(&client->dev, "Communication with chip failed\n");
+ goto fail_comm;
+ }
+
+ ret = power_supply_register(&client->dev, &info->supply);
+ if (ret) {
+ dev_err(&client->dev, "failed to register ltc2941\n");
+ goto fail_register;
+ } else {
+ schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ);
+ }
+
+ return 0;
+
+fail_register:
+ kfree(info->supply.name);
+fail_comm:
+fail_name:
+fail_info:
+ mutex_lock(&ltc294x_lock);
+ idr_remove(&ltc294x_id, num);
+ mutex_unlock(&ltc294x_lock);
+fail_id:
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int ltc294x_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ltc294x_info *info = i2c_get_clientdata(client);
+
+ cancel_delayed_work(&info->work);
+ return 0;
+}
+
+static int ltc294x_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ltc294x_info *info = i2c_get_clientdata(client);
+
+ schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ltc294x_pm_ops, ltc294x_suspend, ltc294x_resume);
+#define LTC294X_PM_OPS (&ltc294x_pm_ops)
+
+#else
+#define LTC294X_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+
+static const struct i2c_device_id ltc294x_i2c_id[] = {
+ {"ltc2941", LTC2941_NUM_REGS},
+ {"ltc2943", LTC2943_NUM_REGS},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, ltc294x_i2c_id);
+
+static struct i2c_driver ltc294x_driver = {
+ .driver = {
+ .name = "LTC2941",
+ .pm = LTC294X_PM_OPS,
+ },
+ .probe = ltc294x_i2c_probe,
+ .remove = ltc294x_i2c_remove,
+ .id_table = ltc294x_i2c_id,
+};
+module_i2c_driver(ltc294x_driver);
+
+MODULE_AUTHOR("Auryn Verwegen, Topic Embedded Systems");
+MODULE_AUTHOR("Mike Looijmans, Topic Embedded Products");
+MODULE_DESCRIPTION("LTC2941/LTC2943 Battery Gas Gauge IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 66da691c41cf..1da6c5fbdff5 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -658,7 +658,7 @@ max17042_get_pdata(struct device *dev)
}
#endif
-static struct regmap_config max17042_regmap_config = {
+static const struct regmap_config max17042_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
diff --git a/drivers/power/max77693_charger.c b/drivers/power/max77693_charger.c
new file mode 100644
index 000000000000..b042970fdeaf
--- /dev/null
+++ b/drivers/power/max77693_charger.c
@@ -0,0 +1,753 @@
+/*
+ * max77693_charger.c - Battery charger driver for the Maxim 77693
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+
+static const char *max77693_charger_name = "max77693-charger";
+static const char *max77693_charger_model = "MAX77693";
+static const char *max77693_charger_manufacturer = "Maxim Integrated";
+
+struct max77693_charger {
+ struct device *dev;
+ struct max77693_dev *max77693;
+ struct power_supply charger;
+
+ u32 constant_volt;
+ u32 min_system_volt;
+ u32 thermal_regulation_temp;
+ u32 batttery_overcurrent;
+ u32 charge_input_threshold_volt;
+};
+
+static int max77693_get_charger_state(struct regmap *regmap)
+{
+ int state;
+ unsigned int data;
+
+ if (regmap_read(regmap, MAX77693_CHG_REG_CHG_DETAILS_01, &data) < 0)
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+
+ data &= CHG_DETAILS_01_CHG_MASK;
+ data >>= CHG_DETAILS_01_CHG_SHIFT;
+
+ switch (data) {
+ case MAX77693_CHARGING_PREQUALIFICATION:
+ case MAX77693_CHARGING_FAST_CONST_CURRENT:
+ case MAX77693_CHARGING_FAST_CONST_VOLTAGE:
+ case MAX77693_CHARGING_TOP_OFF:
+ /* In high temp the charging current is reduced, but still charging */
+ case MAX77693_CHARGING_HIGH_TEMP:
+ state = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case MAX77693_CHARGING_DONE:
+ state = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case MAX77693_CHARGING_TIMER_EXPIRED:
+ case MAX77693_CHARGING_THERMISTOR_SUSPEND:
+ state = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case MAX77693_CHARGING_OFF:
+ case MAX77693_CHARGING_OVER_TEMP:
+ case MAX77693_CHARGING_WATCHDOG_EXPIRED:
+ state = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case MAX77693_CHARGING_RESERVED:
+ default:
+ state = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ return state;
+}
+
+static int max77693_get_charge_type(struct regmap *regmap)
+{
+ int state;
+ unsigned int data;
+
+ if (regmap_read(regmap, MAX77693_CHG_REG_CHG_DETAILS_01, &data) < 0)
+ return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+
+ data &= CHG_DETAILS_01_CHG_MASK;
+ data >>= CHG_DETAILS_01_CHG_SHIFT;
+
+ switch (data) {
+ case MAX77693_CHARGING_PREQUALIFICATION:
+ /*
+ * Top-off: trickle or fast? In top-off the current varies between
+ * 100 and 250 mA. It is higher than prequalification current.
+ */
+ case MAX77693_CHARGING_TOP_OFF:
+ state = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case MAX77693_CHARGING_FAST_CONST_CURRENT:
+ case MAX77693_CHARGING_FAST_CONST_VOLTAGE:
+ /* In high temp the charging current is reduced, but still charging */
+ case MAX77693_CHARGING_HIGH_TEMP:
+ state = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case MAX77693_CHARGING_DONE:
+ case MAX77693_CHARGING_TIMER_EXPIRED:
+ case MAX77693_CHARGING_THERMISTOR_SUSPEND:
+ case MAX77693_CHARGING_OFF:
+ case MAX77693_CHARGING_OVER_TEMP:
+ case MAX77693_CHARGING_WATCHDOG_EXPIRED:
+ state = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case MAX77693_CHARGING_RESERVED:
+ default:
+ state = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+
+ return state;
+}
+
+/*
+ * Supported health statuses:
+ * - POWER_SUPPLY_HEALTH_DEAD
+ * - POWER_SUPPLY_HEALTH_GOOD
+ * - POWER_SUPPLY_HEALTH_OVERVOLTAGE
+ * - POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE
+ * - POWER_SUPPLY_HEALTH_UNKNOWN
+ * - POWER_SUPPLY_HEALTH_UNSPEC_FAILURE
+ */
+static int max77693_get_battery_health(struct regmap *regmap)
+{
+ int state;
+ unsigned int data;
+
+ if (regmap_read(regmap, MAX77693_CHG_REG_CHG_DETAILS_01, &data) < 0)
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+ data &= CHG_DETAILS_01_BAT_MASK;
+ data >>= CHG_DETAILS_01_BAT_SHIFT;
+
+ switch (data) {
+ case MAX77693_BATTERY_NOBAT:
+ state = POWER_SUPPLY_HEALTH_DEAD;
+ break;
+ case MAX77693_BATTERY_PREQUALIFICATION:
+ case MAX77693_BATTERY_GOOD:
+ case MAX77693_BATTERY_LOWVOLTAGE:
+ state = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case MAX77693_BATTERY_TIMER_EXPIRED:
+ /*
+ * Took longer to charge than expected, charging suspended.
+ * Damaged battery?
+ */
+ state = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ break;
+ case MAX77693_BATTERY_OVERVOLTAGE:
+ state = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ break;
+ case MAX77693_BATTERY_OVERCURRENT:
+ state = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ case MAX77693_BATTERY_RESERVED:
+ default:
+ state = POWER_SUPPLY_HEALTH_UNKNOWN;
+ break;
+ }
+
+ return state;
+}
+
+static int max77693_get_present(struct regmap *regmap)
+{
+ unsigned int data;
+
+ /*
+ * Read CHG_INT_OK register. High DETBAT bit here should be
+ * equal to value 0x0 in CHG_DETAILS_01/BAT field.
+ */
+ regmap_read(regmap, MAX77693_CHG_REG_CHG_INT_OK, &data);
+ if (data & CHG_INT_OK_DETBAT_MASK)
+ return 0;
+ return 1;
+}
+
+static int max77693_get_online(struct regmap *regmap)
+{
+ unsigned int data;
+
+ regmap_read(regmap, MAX77693_CHG_REG_CHG_INT_OK, &data);
+ if (data & CHG_INT_OK_CHGIN_MASK)
+ return 1;
+ return 0;
+}
+
+static enum power_supply_property max77693_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static int max77693_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max77693_charger *chg = container_of(psy,
+ struct max77693_charger,
+ charger);
+ struct regmap *regmap = chg->max77693->regmap;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = max77693_get_charger_state(regmap);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = max77693_get_charge_type(regmap);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = max77693_get_battery_health(regmap);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = max77693_get_present(regmap);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = max77693_get_online(regmap);
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = max77693_charger_model;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = max77693_charger_manufacturer;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t device_attr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count,
+ int (*fn)(struct max77693_charger *, unsigned long))
+{
+ struct max77693_charger *chg = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = fn(chg, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t fast_charge_timer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct max77693_charger *chg = dev_get_drvdata(dev);
+ unsigned int data, val;
+ int ret;
+
+ ret = regmap_read(chg->max77693->regmap, MAX77693_CHG_REG_CHG_CNFG_01,
+ &data);
+ if (ret < 0)
+ return ret;
+
+ data &= CHG_CNFG_01_FCHGTIME_MASK;
+ data >>= CHG_CNFG_01_FCHGTIME_SHIFT;
+ switch (data) {
+ case 0x1 ... 0x7:
+ /* Starting from 4 hours, step by 2 hours */
+ val = 4 + (data - 1) * 2;
+ break;
+ case 0x0:
+ default:
+ val = 0;
+ break;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static int max77693_set_fast_charge_timer(struct max77693_charger *chg,
+ unsigned long hours)
+{
+ unsigned int data;
+
+ /*
+ * 0x00 - disable
+ * 0x01 - 4h
+ * 0x02 - 6h
+ * ...
+ * 0x07 - 16h
+ * Round down odd values.
+ */
+ switch (hours) {
+ case 4 ... 16:
+ data = (hours - 4) / 2 + 1;
+ break;
+ case 0:
+ /* Disable */
+ data = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ data <<= CHG_CNFG_01_FCHGTIME_SHIFT;
+
+ return regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_01,
+ CHG_CNFG_01_FCHGTIME_MASK, data);
+}
+
+static ssize_t fast_charge_timer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return device_attr_store(dev, attr, buf, count,
+ max77693_set_fast_charge_timer);
+}
+
+static ssize_t top_off_threshold_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct max77693_charger *chg = dev_get_drvdata(dev);
+ unsigned int data, val;
+ int ret;
+
+ ret = regmap_read(chg->max77693->regmap, MAX77693_CHG_REG_CHG_CNFG_03,
+ &data);
+ if (ret < 0)
+ return ret;
+
+ data &= CHG_CNFG_03_TOITH_MASK;
+ data >>= CHG_CNFG_03_TOITH_SHIFT;
+
+ if (data <= 0x04)
+ val = 100000 + data * 25000;
+ else
+ val = data * 50000;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static int max77693_set_top_off_threshold_current(struct max77693_charger *chg,
+ unsigned long uamp)
+{
+ unsigned int data;
+
+ if (uamp < 100000 || uamp > 350000)
+ return -EINVAL;
+
+ if (uamp <= 200000)
+ data = (uamp - 100000) / 25000;
+ else
+ /* (200000, 350000> */
+ data = uamp / 50000;
+
+ data <<= CHG_CNFG_03_TOITH_SHIFT;
+
+ return regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_03,
+ CHG_CNFG_03_TOITH_MASK, data);
+}
+
+static ssize_t top_off_threshold_current_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return device_attr_store(dev, attr, buf, count,
+ max77693_set_top_off_threshold_current);
+}
+
+static ssize_t top_off_timer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct max77693_charger *chg = dev_get_drvdata(dev);
+ unsigned int data, val;
+ int ret;
+
+ ret = regmap_read(chg->max77693->regmap, MAX77693_CHG_REG_CHG_CNFG_03,
+ &data);
+ if (ret < 0)
+ return ret;
+
+ data &= CHG_CNFG_03_TOTIME_MASK;
+ data >>= CHG_CNFG_03_TOTIME_SHIFT;
+
+ val = data * 10;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static int max77693_set_top_off_timer(struct max77693_charger *chg,
+ unsigned long minutes)
+{
+ unsigned int data;
+
+ if (minutes > 70)
+ return -EINVAL;
+
+ data = minutes / 10;
+ data <<= CHG_CNFG_03_TOTIME_SHIFT;
+
+ return regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_03,
+ CHG_CNFG_03_TOTIME_MASK, data);
+}
+
+static ssize_t top_off_timer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return device_attr_store(dev, attr, buf, count,
+ max77693_set_top_off_timer);
+}
+
+static DEVICE_ATTR_RW(fast_charge_timer);
+static DEVICE_ATTR_RW(top_off_threshold_current);
+static DEVICE_ATTR_RW(top_off_timer);
+
+static int max77693_set_constant_volt(struct max77693_charger *chg,
+ unsigned int uvolt)
+{
+ unsigned int data;
+
+ /*
+ * 0x00 - 3.650 V
+ * 0x01 - 3.675 V
+ * ...
+ * 0x1b - 4.325 V
+ * 0x1c - 4.340 V
+ * 0x1d - 4.350 V
+ * 0x1e - 4.375 V
+ * 0x1f - 4.400 V
+ */
+ if (uvolt >= 3650000 && uvolt < 4340000)
+ data = (uvolt - 3650000) / 25000;
+ else if (uvolt >= 4340000 && uvolt < 4350000)
+ data = 0x1c;
+ else if (uvolt >= 4350000 && uvolt <= 4400000)
+ data = 0x1d + (uvolt - 4350000) / 25000;
+ else {
+ dev_err(chg->dev, "Wrong value for charging constant voltage\n");
+ return -EINVAL;
+ }
+
+ data <<= CHG_CNFG_04_CHGCVPRM_SHIFT;
+
+ dev_dbg(chg->dev, "Charging constant voltage: %u (0x%x)\n", uvolt,
+ data);
+
+ return regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_04,
+ CHG_CNFG_04_CHGCVPRM_MASK, data);
+}
+
+static int max77693_set_min_system_volt(struct max77693_charger *chg,
+ unsigned int uvolt)
+{
+ unsigned int data;
+
+ if (uvolt < 3000000 || uvolt > 3700000) {
+ dev_err(chg->dev, "Wrong value for minimum system regulation voltage\n");
+ return -EINVAL;
+ }
+
+ data = (uvolt - 3000000) / 100000;
+
+ data <<= CHG_CNFG_04_MINVSYS_SHIFT;
+
+ dev_dbg(chg->dev, "Minimum system regulation voltage: %u (0x%x)\n",
+ uvolt, data);
+
+ return regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_04,
+ CHG_CNFG_04_MINVSYS_MASK, data);
+}
+
+static int max77693_set_thermal_regulation_temp(struct max77693_charger *chg,
+ unsigned int cels)
+{
+ unsigned int data;
+
+ switch (cels) {
+ case 70:
+ case 85:
+ case 100:
+ case 115:
+ data = (cels - 70) / 15;
+ break;
+ default:
+ dev_err(chg->dev, "Wrong value for thermal regulation loop temperature\n");
+ return -EINVAL;
+ }
+
+ data <<= CHG_CNFG_07_REGTEMP_SHIFT;
+
+ dev_dbg(chg->dev, "Thermal regulation loop temperature: %u (0x%x)\n",
+ cels, data);
+
+ return regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_07,
+ CHG_CNFG_07_REGTEMP_MASK, data);
+}
+
+static int max77693_set_batttery_overcurrent(struct max77693_charger *chg,
+ unsigned int uamp)
+{
+ unsigned int data;
+
+ if (uamp && (uamp < 2000000 || uamp > 3500000)) {
+ dev_err(chg->dev, "Wrong value for battery overcurrent\n");
+ return -EINVAL;
+ }
+
+ if (uamp)
+ data = ((uamp - 2000000) / 250000) + 1;
+ else
+ data = 0; /* disable */
+
+ data <<= CHG_CNFG_12_B2SOVRC_SHIFT;
+
+ dev_dbg(chg->dev, "Battery overcurrent: %u (0x%x)\n", uamp, data);
+
+ return regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_12,
+ CHG_CNFG_12_B2SOVRC_MASK, data);
+}
+
+static int max77693_set_charge_input_threshold_volt(struct max77693_charger *chg,
+ unsigned int uvolt)
+{
+ unsigned int data;
+
+ switch (uvolt) {
+ case 4300000:
+ data = 0x0;
+ break;
+ case 4700000:
+ case 4800000:
+ case 4900000:
+ data = (uvolt - 4700000) / 100000;
+ default:
+ dev_err(chg->dev, "Wrong value for charge input voltage regulation threshold\n");
+ return -EINVAL;
+ }
+
+ data <<= CHG_CNFG_12_VCHGINREG_SHIFT;
+
+ dev_dbg(chg->dev, "Charge input voltage regulation threshold: %u (0x%x)\n",
+ uvolt, data);
+
+ return regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_12,
+ CHG_CNFG_12_VCHGINREG_MASK, data);
+}
+
+/*
+ * Sets charger registers to proper and safe default values.
+ */
+static int max77693_reg_init(struct max77693_charger *chg)
+{
+ int ret;
+ unsigned int data;
+
+ /* Unlock charger register protection */
+ data = (0x3 << CHG_CNFG_06_CHGPROT_SHIFT);
+ ret = regmap_update_bits(chg->max77693->regmap,
+ MAX77693_CHG_REG_CHG_CNFG_06,
+ CHG_CNFG_06_CHGPROT_MASK, data);
+ if (ret) {
+ dev_err(chg->dev, "Error unlocking registers: %d\n", ret);
+ return ret;
+ }
+
+ ret = max77693_set_fast_charge_timer(chg, DEFAULT_FAST_CHARGE_TIMER);
+ if (ret)
+ return ret;
+
+ ret = max77693_set_top_off_threshold_current(chg,
+ DEFAULT_TOP_OFF_THRESHOLD_CURRENT);
+ if (ret)
+ return ret;
+
+ ret = max77693_set_top_off_timer(chg, DEFAULT_TOP_OFF_TIMER);
+ if (ret)
+ return ret;
+
+ ret = max77693_set_constant_volt(chg, chg->constant_volt);
+ if (ret)
+ return ret;
+
+ ret = max77693_set_min_system_volt(chg, chg->min_system_volt);
+ if (ret)
+ return ret;
+
+ ret = max77693_set_thermal_regulation_temp(chg,
+ chg->thermal_regulation_temp);
+ if (ret)
+ return ret;
+
+ ret = max77693_set_batttery_overcurrent(chg, chg->batttery_overcurrent);
+ if (ret)
+ return ret;
+
+ return max77693_set_charge_input_threshold_volt(chg,
+ chg->charge_input_threshold_volt);
+}
+
+#ifdef CONFIG_OF
+static int max77693_dt_init(struct device *dev, struct max77693_charger *chg)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "no charger OF node\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(np, "maxim,constant-microvolt",
+ &chg->constant_volt))
+ chg->constant_volt = DEFAULT_CONSTANT_VOLT;
+
+ if (of_property_read_u32(np, "maxim,min-system-microvolt",
+ &chg->min_system_volt))
+ chg->min_system_volt = DEFAULT_MIN_SYSTEM_VOLT;
+
+ if (of_property_read_u32(np, "maxim,thermal-regulation-celsius",
+ &chg->thermal_regulation_temp))
+ chg->thermal_regulation_temp = DEFAULT_THERMAL_REGULATION_TEMP;
+
+ if (of_property_read_u32(np, "maxim,battery-overcurrent-microamp",
+ &chg->batttery_overcurrent))
+ chg->batttery_overcurrent = DEFAULT_BATTERY_OVERCURRENT;
+
+ if (of_property_read_u32(np, "maxim,charge-input-threshold-microvolt",
+ &chg->charge_input_threshold_volt))
+ chg->charge_input_threshold_volt =
+ DEFAULT_CHARGER_INPUT_THRESHOLD_VOLT;
+
+ return 0;
+}
+#else /* CONFIG_OF */
+static int max77693_dt_init(struct device *dev, struct max77693_charger *chg)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int max77693_charger_probe(struct platform_device *pdev)
+{
+ struct max77693_charger *chg;
+ struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ chg = devm_kzalloc(&pdev->dev, sizeof(*chg), GFP_KERNEL);
+ if (!chg)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, chg);
+ chg->dev = &pdev->dev;
+ chg->max77693 = max77693;
+
+ ret = max77693_dt_init(&pdev->dev, chg);
+ if (ret)
+ return ret;
+
+ ret = max77693_reg_init(chg);
+ if (ret)
+ return ret;
+
+ chg->charger.name = max77693_charger_name;
+ chg->charger.type = POWER_SUPPLY_TYPE_BATTERY;
+ chg->charger.properties = max77693_charger_props;
+ chg->charger.num_properties = ARRAY_SIZE(max77693_charger_props);
+ chg->charger.get_property = max77693_charger_get_property;
+
+ ret = device_create_file(&pdev->dev, &dev_attr_fast_charge_timer);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: create fast charge timer sysfs entry\n");
+ goto err;
+ }
+
+ ret = device_create_file(&pdev->dev,
+ &dev_attr_top_off_threshold_current);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: create top off current sysfs entry\n");
+ goto err;
+ }
+
+ ret = device_create_file(&pdev->dev, &dev_attr_top_off_timer);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: create top off timer sysfs entry\n");
+ goto err;
+ }
+
+ ret = power_supply_register(&pdev->dev, &chg->charger);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ device_remove_file(&pdev->dev, &dev_attr_top_off_timer);
+ device_remove_file(&pdev->dev, &dev_attr_top_off_threshold_current);
+ device_remove_file(&pdev->dev, &dev_attr_fast_charge_timer);
+
+ return ret;
+}
+
+static int max77693_charger_remove(struct platform_device *pdev)
+{
+ struct max77693_charger *chg = platform_get_drvdata(pdev);
+
+ device_remove_file(&pdev->dev, &dev_attr_top_off_timer);
+ device_remove_file(&pdev->dev, &dev_attr_top_off_threshold_current);
+ device_remove_file(&pdev->dev, &dev_attr_fast_charge_timer);
+
+ power_supply_unregister(&chg->charger);
+
+ return 0;
+}
+
+static const struct platform_device_id max77693_charger_id[] = {
+ { "max77693-charger", 0, },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max77693_charger_id);
+
+static struct platform_driver max77693_charger_driver = {
+ .driver = {
+ .name = "max77693-charger",
+ },
+ .probe = max77693_charger_probe,
+ .remove = max77693_charger_remove,
+ .id_table = max77693_charger_id,
+};
+module_platform_driver(max77693_charger_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_DESCRIPTION("Maxim 77693 charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 028e76504519..27f6646731b0 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -39,14 +39,13 @@ config POWER_RESET_AXXIA
Say Y if you have an Axxia family SoC.
config POWER_RESET_BRCMSTB
- bool "Broadcom STB reset driver" if COMPILE_TEST
- depends on ARM
+ bool "Broadcom STB reset driver"
+ depends on ARM || MIPS || COMPILE_TEST
default ARCH_BRCMSTB
help
- This driver provides restart support for ARM-based Broadcom STB
- boards.
+ This driver provides restart support for Broadcom STB boards.
- Say Y here if you have an ARM-based Broadcom STB board and you wish
+ Say Y here if you have a Broadcom STB board and you wish
to have restart support.
config POWER_RESET_GPIO
@@ -104,23 +103,16 @@ config POWER_RESET_QNAP
config POWER_RESET_RESTART
bool "Restart power-off driver"
- depends on ARM
help
Some boards don't actually have the ability to power off.
Instead they restart, and u-boot holds the SoC until the
user presses a key. u-boot then boots into Linux.
-config POWER_RESET_SUN6I
- bool "Allwinner A31 SoC reset driver"
- depends on ARCH_SUNXI
- help
- Reboot support for the Allwinner A31 SoCs.
-
config POWER_RESET_ST
- bool "ST restart power-off driver"
+ bool "ST restart driver"
depends on ARCH_STI
help
- Power off and reset support for STMicroelectronics boards.
+ Reset support for STMicroelectronics boards.
config POWER_RESET_VERSATILE
bool "ARM Versatile family reboot driver"
@@ -159,5 +151,11 @@ config POWER_RESET_SYSCON
help
Reboot support for generic SYSCON mapped register reset.
+config POWER_RESET_RMOBILE
+ tristate "Renesas R-Mobile reset driver"
+ depends on ARCH_RMOBILE || COMPILE_TEST
+ help
+ Reboot support for Renesas R-Mobile and SH-Mobile SoCs.
+
endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 1d4804d6b323..11de15bae52e 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -11,10 +11,10 @@ obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
-obj-$(CONFIG_POWER_RESET_SUN6I) += sun6i-reboot.o
obj-$(CONFIG_POWER_RESET_ST) += st-poweroff.o
obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o
obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o
obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o
+obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o
diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c
index 5b08bffcf1a8..b208073c887d 100644
--- a/drivers/power/reset/arm-versatile-reboot.c
+++ b/drivers/power/reset/arm-versatile-reboot.c
@@ -13,16 +13,22 @@
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <asm/system_misc.h>
+
+#define INTEGRATOR_HDR_CTRL_OFFSET 0x0C
+#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
+#define INTEGRATOR_CM_CTRL_RESET (1 << 3)
#define REALVIEW_SYS_LOCK_OFFSET 0x20
-#define REALVIEW_SYS_LOCK_VAL 0xA05F
#define REALVIEW_SYS_RESETCTL_OFFSET 0x40
+/* Magic unlocking token used on all Versatile boards */
+#define VERSATILE_LOCK_VAL 0xA05F
+
/*
* We detect the different syscon types from the compatible strings.
*/
enum versatile_reboot {
+ INTEGRATOR_REBOOT_CM,
REALVIEW_REBOOT_EB,
REALVIEW_REBOOT_PB1176,
REALVIEW_REBOOT_PB11MP,
@@ -36,6 +42,10 @@ static enum versatile_reboot versatile_reboot_type;
static const struct of_device_id versatile_reboot_of_match[] = {
{
+ .compatible = "arm,core-module-integrator",
+ .data = (void *)INTEGRATOR_REBOOT_CM
+ },
+ {
.compatible = "arm,realview-eb-syscon",
.data = (void *)REALVIEW_REBOOT_EB,
},
@@ -55,31 +65,47 @@ static const struct of_device_id versatile_reboot_of_match[] = {
.compatible = "arm,realview-pbx-syscon",
.data = (void *)REALVIEW_REBOOT_PBX,
},
+ {},
};
-static void versatile_reboot(enum reboot_mode mode, const char *cmd)
+static int versatile_reboot(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
/* Unlock the reset register */
- regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
- REALVIEW_SYS_LOCK_VAL);
/* Then hit reset on the different machines */
switch (versatile_reboot_type) {
+ case INTEGRATOR_REBOOT_CM:
+ regmap_write(syscon_regmap, INTEGRATOR_HDR_LOCK_OFFSET,
+ VERSATILE_LOCK_VAL);
+ regmap_update_bits(syscon_regmap,
+ INTEGRATOR_HDR_CTRL_OFFSET,
+ INTEGRATOR_CM_CTRL_RESET,
+ INTEGRATOR_CM_CTRL_RESET);
+ break;
case REALVIEW_REBOOT_EB:
+ regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
+ VERSATILE_LOCK_VAL);
regmap_write(syscon_regmap,
REALVIEW_SYS_RESETCTL_OFFSET, 0x0008);
break;
case REALVIEW_REBOOT_PB1176:
+ regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
+ VERSATILE_LOCK_VAL);
regmap_write(syscon_regmap,
REALVIEW_SYS_RESETCTL_OFFSET, 0x0100);
break;
case REALVIEW_REBOOT_PB11MP:
case REALVIEW_REBOOT_PBA8:
+ regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
+ VERSATILE_LOCK_VAL);
regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET,
0x0000);
regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET,
0x0004);
break;
case REALVIEW_REBOOT_PBX:
+ regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
+ VERSATILE_LOCK_VAL);
regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET,
0x00f0);
regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET,
@@ -87,12 +113,20 @@ static void versatile_reboot(enum reboot_mode mode, const char *cmd)
break;
}
dsb();
+
+ return NOTIFY_DONE;
}
+static struct notifier_block versatile_reboot_nb = {
+ .notifier_call = versatile_reboot,
+ .priority = 192,
+};
+
static int __init versatile_reboot_probe(void)
{
const struct of_device_id *reboot_id;
struct device_node *np;
+ int err;
np = of_find_matching_node_and_match(NULL, versatile_reboot_of_match,
&reboot_id);
@@ -104,7 +138,10 @@ static int __init versatile_reboot_probe(void)
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);
- arm_pm_restart = versatile_reboot;
+ err = register_restart_handler(&versatile_reboot_nb);
+ if (err)
+ return err;
+
pr_info("versatile reboot driver registered\n");
return 0;
}
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index c61000333bb9..4b72ea51c364 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -71,10 +71,11 @@ static void at91_poweroff(void)
writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR);
}
-const enum wakeup_type at91_poweroff_get_wakeup_mode(struct device_node *np)
+static int at91_poweroff_get_wakeup_mode(struct device_node *np)
{
const char *pm;
- int err, i;
+ unsigned int i;
+ int err;
err = of_property_read_string(np, "atmel,wakeup-mode", &pm);
if (err < 0)
@@ -90,7 +91,7 @@ const enum wakeup_type at91_poweroff_get_wakeup_mode(struct device_node *np)
static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- enum wakeup_type wakeup_mode;
+ int wakeup_mode;
u32 mode = 0, tmp;
wakeup_mode = at91_poweroff_get_wakeup_mode(np);
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 69a75d99ae92..13584e24736a 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -17,8 +17,6 @@
#include <linux/platform_device.h>
#include <linux/reboot.h>
-#include <asm/system_misc.h>
-
#include <soc/at91/at91sam9_ddrsdr.h>
#include <soc/at91/at91sam9_sdramc.h>
@@ -54,7 +52,8 @@ static void __iomem *at91_ramc_base[2], *at91_rstc_base;
* reset register it can be left driving the data bus and
* killing the chance of a subsequent boot from NAND
*/
-static void at91sam9260_restart(enum reboot_mode mode, const char *cmd)
+static int at91sam9260_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
asm volatile(
/* Align to cache lines */
@@ -76,9 +75,12 @@ static void at91sam9260_restart(enum reboot_mode mode, const char *cmd)
"r" (1),
"r" (AT91_SDRAMC_LPCB_POWER_DOWN),
"r" (AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST));
+
+ return NOTIFY_DONE;
}
-static void at91sam9g45_restart(enum reboot_mode mode, const char *cmd)
+static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
asm volatile(
/*
@@ -117,6 +119,8 @@ static void at91sam9g45_restart(enum reboot_mode mode, const char *cmd)
"r" (AT91_DDRSDRC_LPCB_POWER_DOWN),
"r" (AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST)
: "r0");
+
+ return NOTIFY_DONE;
}
static void __init at91_reset_status(struct platform_device *pdev)
@@ -161,6 +165,10 @@ static struct of_device_id at91_reset_of_match[] = {
{ /* sentinel */ }
};
+static struct notifier_block at91_restart_nb = {
+ .priority = 192,
+};
+
static int at91_reset_of_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -183,9 +191,8 @@ static int at91_reset_of_probe(struct platform_device *pdev)
}
match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
- arm_pm_restart = match->data;
-
- return 0;
+ at91_restart_nb.notifier_call = match->data;
+ return register_restart_handler(&at91_restart_nb);
}
static int at91_reset_platform_probe(struct platform_device *pdev)
@@ -212,10 +219,11 @@ static int at91_reset_platform_probe(struct platform_device *pdev)
}
match = platform_get_device_id(pdev);
- arm_pm_restart = (void (*)(enum reboot_mode, const char*))
- match->driver_data;
+ at91_restart_nb.notifier_call =
+ (int (*)(struct notifier_block *,
+ unsigned long, void *)) match->driver_data;
- return 0;
+ return register_restart_handler(&at91_restart_nb);
}
static int at91_reset_probe(struct platform_device *pdev)
diff --git a/drivers/power/reset/brcmstb-reboot.c b/drivers/power/reset/brcmstb-reboot.c
index 100606f9b3dc..884b53c483c0 100644
--- a/drivers/power/reset/brcmstb-reboot.c
+++ b/drivers/power/reset/brcmstb-reboot.c
@@ -11,6 +11,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -34,13 +35,20 @@ static struct regmap *regmap;
static u32 rst_src_en;
static u32 sw_mstr_rst;
+struct reset_reg_mask {
+ u32 rst_src_en_mask;
+ u32 sw_mstr_rst_mask;
+};
+
+static const struct reset_reg_mask *reset_masks;
+
static int brcmstb_restart_handler(struct notifier_block *this,
unsigned long mode, void *cmd)
{
int rc;
u32 tmp;
- rc = regmap_write(regmap, rst_src_en, 1);
+ rc = regmap_write(regmap, rst_src_en, reset_masks->rst_src_en_mask);
if (rc) {
pr_err("failed to write rst_src_en (%d)\n", rc);
return NOTIFY_DONE;
@@ -52,7 +60,7 @@ static int brcmstb_restart_handler(struct notifier_block *this,
return NOTIFY_DONE;
}
- rc = regmap_write(regmap, sw_mstr_rst, 1);
+ rc = regmap_write(regmap, sw_mstr_rst, reset_masks->sw_mstr_rst_mask);
if (rc) {
pr_err("failed to write sw_mstr_rst (%d)\n", rc);
return NOTIFY_DONE;
@@ -75,10 +83,34 @@ static struct notifier_block brcmstb_restart_nb = {
.priority = 128,
};
+static const struct reset_reg_mask reset_bits_40nm = {
+ .rst_src_en_mask = BIT(0),
+ .sw_mstr_rst_mask = BIT(0),
+};
+
+static const struct reset_reg_mask reset_bits_65nm = {
+ .rst_src_en_mask = BIT(3),
+ .sw_mstr_rst_mask = BIT(31),
+};
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "brcm,brcmstb-reboot", .data = &reset_bits_40nm },
+ { .compatible = "brcm,bcm7038-reboot", .data = &reset_bits_65nm },
+ {},
+};
+
static int brcmstb_reboot_probe(struct platform_device *pdev)
{
int rc;
struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(of_match, np);
+ if (!of_id) {
+ pr_err("failed to look up compatible string\n");
+ return -EINVAL;
+ }
+ reset_masks = of_id->data;
regmap = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(regmap)) {
@@ -108,11 +140,6 @@ static int brcmstb_reboot_probe(struct platform_device *pdev)
return rc;
}
-static const struct of_device_id of_match[] = {
- { .compatible = "brcm,brcmstb-reboot", },
- {},
-};
-
static struct platform_driver brcmstb_reboot_driver = {
.probe = brcmstb_reboot_probe,
.driver = {
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 34f38a3dc3ff..7ef193b6f7fe 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -32,7 +32,9 @@
* - trigger (input)
* A level change indicates the shut-down trigger. If it's state reverts
* within the time-out defined by trigger_delay, the shut down is not
- * executed.
+ * executed. If no pin is assigned to this input, the driver will start the
+ * watchdog toggle immediately. The chip will only power off the system if
+ * it is requested to do so through the kill line.
*
* - watchdog (output)
* Once a shut down is triggered, the driver will toggle this signal,
@@ -63,7 +65,7 @@
#include <linux/gpio/consumer.h>
#include <linux/reboot.h>
-struct ltc2952_poweroff_data {
+struct ltc2952_poweroff {
struct hrtimer timer_trigger;
struct hrtimer timer_wde;
@@ -72,22 +74,21 @@ struct ltc2952_poweroff_data {
struct device *dev;
- unsigned int virq;
+ struct gpio_desc *gpio_trigger;
+ struct gpio_desc *gpio_watchdog;
+ struct gpio_desc *gpio_kill;
- /**
- * 0: trigger
- * 1: watchdog
- * 2: kill
- */
- struct gpio_desc *gpio[3];
+ bool kernel_panic;
+ struct notifier_block panic_notifier;
};
-static int ltc2952_poweroff_panic;
-static struct ltc2952_poweroff_data *ltc2952_data;
+#define to_ltc2952(p, m) container_of(p, struct ltc2952_poweroff, m)
-#define POWERPATH_IO_TRIGGER 0
-#define POWERPATH_IO_WATCHDOG 1
-#define POWERPATH_IO_KILL 2
+/*
+ * This global variable is only needed for pm_power_off. We should
+ * remove it entirely once we don't need the global state anymore.
+ */
+static struct ltc2952_poweroff *ltc2952_data;
/**
* ltc2952_poweroff_timer_wde - Timer callback
@@ -103,29 +104,24 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer)
ktime_t now;
int state;
unsigned long overruns;
+ struct ltc2952_poweroff *data = to_ltc2952(timer, timer_wde);
- if (ltc2952_poweroff_panic)
+ if (data->kernel_panic)
return HRTIMER_NORESTART;
- state = gpiod_get_value(ltc2952_data->gpio[POWERPATH_IO_WATCHDOG]);
- gpiod_set_value(ltc2952_data->gpio[POWERPATH_IO_WATCHDOG], !state);
+ state = gpiod_get_value(data->gpio_watchdog);
+ gpiod_set_value(data->gpio_watchdog, !state);
now = hrtimer_cb_get_time(timer);
- overruns = hrtimer_forward(timer, now, ltc2952_data->wde_interval);
+ overruns = hrtimer_forward(timer, now, data->wde_interval);
return HRTIMER_RESTART;
}
-static enum hrtimer_restart ltc2952_poweroff_timer_trigger(
- struct hrtimer *timer)
+static void ltc2952_poweroff_start_wde(struct ltc2952_poweroff *data)
{
- int ret;
-
- ret = hrtimer_start(&ltc2952_data->timer_wde,
- ltc2952_data->wde_interval, HRTIMER_MODE_REL);
-
- if (ret) {
- dev_err(ltc2952_data->dev, "unable to start the timer\n");
+ if (hrtimer_start(&data->timer_wde, data->wde_interval,
+ HRTIMER_MODE_REL)) {
/*
* The device will not toggle the watchdog reset,
* thus shut down is only safe if the PowerPath controller
@@ -134,10 +130,17 @@ static enum hrtimer_restart ltc2952_poweroff_timer_trigger(
*
* Only sending a warning as the system will power-off anyway
*/
+ dev_err(data->dev, "unable to start the timer\n");
}
+}
- dev_info(ltc2952_data->dev, "executing shutdown\n");
+static enum hrtimer_restart
+ltc2952_poweroff_timer_trigger(struct hrtimer *timer)
+{
+ struct ltc2952_poweroff *data = to_ltc2952(timer, timer_trigger);
+ ltc2952_poweroff_start_wde(data);
+ dev_info(data->dev, "executing shutdown\n");
orderly_poweroff(true);
return HRTIMER_NORESTART;
@@ -154,180 +157,161 @@ static enum hrtimer_restart ltc2952_poweroff_timer_trigger(
*/
static irqreturn_t ltc2952_poweroff_handler(int irq, void *dev_id)
{
- int ret;
- struct ltc2952_poweroff_data *data = dev_id;
+ struct ltc2952_poweroff *data = dev_id;
- if (ltc2952_poweroff_panic)
- goto irq_ok;
-
- if (hrtimer_active(&data->timer_wde)) {
+ if (data->kernel_panic || hrtimer_active(&data->timer_wde)) {
/* shutdown is already triggered, nothing to do any more */
- goto irq_ok;
+ return IRQ_HANDLED;
}
- if (!hrtimer_active(&data->timer_trigger)) {
- ret = hrtimer_start(&data->timer_trigger, data->trigger_delay,
- HRTIMER_MODE_REL);
-
- if (ret)
+ if (gpiod_get_value(data->gpio_trigger)) {
+ if (hrtimer_start(&data->timer_trigger, data->trigger_delay,
+ HRTIMER_MODE_REL))
dev_err(data->dev, "unable to start the wait timer\n");
} else {
- ret = hrtimer_cancel(&data->timer_trigger);
+ hrtimer_cancel(&data->timer_trigger);
/* omitting return value check, timer should have been valid */
}
-
-irq_ok:
return IRQ_HANDLED;
}
static void ltc2952_poweroff_kill(void)
{
- gpiod_set_value(ltc2952_data->gpio[POWERPATH_IO_KILL], 1);
-}
-
-static int ltc2952_poweroff_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- return -ENOSYS;
-}
-
-static int ltc2952_poweroff_resume(struct platform_device *pdev)
-{
- return -ENOSYS;
+ gpiod_set_value(ltc2952_data->gpio_kill, 1);
}
-static void ltc2952_poweroff_default(struct ltc2952_poweroff_data *data)
+static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(data->gpio); i++)
- data->gpio[i] = NULL;
-
data->wde_interval = ktime_set(0, 300L*1E6L);
data->trigger_delay = ktime_set(2, 500L*1E6L);
hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_trigger.function = &ltc2952_poweroff_timer_trigger;
+ data->timer_trigger.function = ltc2952_poweroff_timer_trigger;
hrtimer_init(&data->timer_wde, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_wde.function = &ltc2952_poweroff_timer_wde;
+ data->timer_wde.function = ltc2952_poweroff_timer_wde;
}
static int ltc2952_poweroff_init(struct platform_device *pdev)
{
- int ret, virq;
- unsigned int i;
- struct ltc2952_poweroff_data *data;
-
- static char *name[] = {
- "trigger",
- "watchdog",
- "kill",
- NULL
- };
-
- data = ltc2952_data;
- ltc2952_poweroff_default(ltc2952_data);
-
- for (i = 0; i < ARRAY_SIZE(ltc2952_data->gpio); i++) {
- ltc2952_data->gpio[i] = gpiod_get(&pdev->dev, name[i]);
+ int ret;
+ struct ltc2952_poweroff *data = platform_get_drvdata(pdev);
- if (IS_ERR(ltc2952_data->gpio[i])) {
- ret = PTR_ERR(ltc2952_data->gpio[i]);
- dev_err(&pdev->dev,
- "unable to claim the following gpio: %s\n",
- name[i]);
- goto err_io;
- }
- }
+ ltc2952_poweroff_default(data);
- ret = gpiod_direction_output(
- ltc2952_data->gpio[POWERPATH_IO_WATCHDOG], 0);
- if (ret) {
- dev_err(&pdev->dev, "unable to use watchdog-gpio as output\n");
- goto err_io;
+ data->gpio_watchdog = devm_gpiod_get(&pdev->dev, "watchdog",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpio_watchdog)) {
+ ret = PTR_ERR(data->gpio_watchdog);
+ dev_err(&pdev->dev, "unable to claim gpio \"watchdog\"\n");
+ return ret;
}
- ret = gpiod_direction_output(ltc2952_data->gpio[POWERPATH_IO_KILL], 0);
- if (ret) {
- dev_err(&pdev->dev, "unable to use kill-gpio as output\n");
- goto err_io;
+ data->gpio_kill = devm_gpiod_get(&pdev->dev, "kill", GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpio_kill)) {
+ ret = PTR_ERR(data->gpio_kill);
+ dev_err(&pdev->dev, "unable to claim gpio \"kill\"\n");
+ return ret;
}
- virq = gpiod_to_irq(ltc2952_data->gpio[POWERPATH_IO_TRIGGER]);
- if (virq < 0) {
- dev_err(&pdev->dev, "cannot map GPIO as interrupt");
- goto err_io;
+ data->gpio_trigger = devm_gpiod_get(&pdev->dev, "trigger", GPIOD_IN);
+ if (IS_ERR(data->gpio_trigger)) {
+ /*
+ * It's not a problem if the trigger gpio isn't available, but
+ * it is worth a warning if its use was defined in the device
+ * tree.
+ */
+ if (PTR_ERR(data->gpio_trigger) != -ENOENT)
+ dev_err(&pdev->dev,
+ "unable to claim gpio \"trigger\"\n");
+ data->gpio_trigger = NULL;
}
- ltc2952_data->virq = virq;
- ret = request_irq(virq,
- ltc2952_poweroff_handler,
- (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING),
- "ltc2952-poweroff",
- ltc2952_data
- );
-
- if (ret) {
- dev_err(&pdev->dev, "cannot configure an interrupt handler\n");
- goto err_io;
+ if (devm_request_irq(&pdev->dev, gpiod_to_irq(data->gpio_trigger),
+ ltc2952_poweroff_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING),
+ "ltc2952-poweroff",
+ data)) {
+ /*
+ * Some things may have happened:
+ * - No trigger input was defined
+ * - Claiming the GPIO failed
+ * - We could not map to an IRQ
+ * - We couldn't register an interrupt handler
+ *
+ * None of these really are problems, but all of them
+ * disqualify the push button from controlling the power.
+ *
+ * It is therefore important to note that if the ltc2952
+ * detects a button press for long enough, it will still start
+ * its own powerdown window and cut the power on us if we don't
+ * start the watchdog trigger.
+ */
+ if (data->gpio_trigger) {
+ dev_warn(&pdev->dev,
+ "unable to configure the trigger interrupt\n");
+ devm_gpiod_put(&pdev->dev, data->gpio_trigger);
+ data->gpio_trigger = NULL;
+ }
+ dev_info(&pdev->dev,
+ "power down trigger input will not be used\n");
+ ltc2952_poweroff_start_wde(data);
}
return 0;
+}
-err_io:
- for (i = 0; i < ARRAY_SIZE(ltc2952_data->gpio); i++)
- if (ltc2952_data->gpio[i])
- gpiod_put(ltc2952_data->gpio[i]);
+static int ltc2952_poweroff_notify_panic(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct ltc2952_poweroff *data = to_ltc2952(nb, panic_notifier);
- return ret;
+ data->kernel_panic = true;
+ return NOTIFY_DONE;
}
static int ltc2952_poweroff_probe(struct platform_device *pdev)
{
int ret;
+ struct ltc2952_poweroff *data;
if (pm_power_off) {
dev_err(&pdev->dev, "pm_power_off already registered");
return -EBUSY;
}
- ltc2952_data = kzalloc(sizeof(*ltc2952_data), GFP_KERNEL);
- if (!ltc2952_data)
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- ltc2952_data->dev = &pdev->dev;
+ data->dev = &pdev->dev;
+ platform_set_drvdata(pdev, data);
ret = ltc2952_poweroff_init(pdev);
if (ret)
- goto err;
+ return ret;
- pm_power_off = &ltc2952_poweroff_kill;
+ /* TODO: remove ltc2952_data */
+ ltc2952_data = data;
+ pm_power_off = ltc2952_poweroff_kill;
+ data->panic_notifier.notifier_call = ltc2952_poweroff_notify_panic;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &data->panic_notifier);
dev_info(&pdev->dev, "probe successful\n");
return 0;
-
-err:
- kfree(ltc2952_data);
- return ret;
}
static int ltc2952_poweroff_remove(struct platform_device *pdev)
{
- unsigned int i;
+ struct ltc2952_poweroff *data = platform_get_drvdata(pdev);
pm_power_off = NULL;
-
- if (ltc2952_data) {
- free_irq(ltc2952_data->virq, ltc2952_data);
-
- for (i = 0; i < ARRAY_SIZE(ltc2952_data->gpio); i++)
- gpiod_put(ltc2952_data->gpio[i]);
-
- kfree(ltc2952_data);
- }
-
+ hrtimer_cancel(&data->timer_trigger);
+ hrtimer_cancel(&data->timer_wde);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &data->panic_notifier);
return 0;
}
@@ -344,41 +328,9 @@ static struct platform_driver ltc2952_poweroff_driver = {
.name = "ltc2952-poweroff",
.of_match_table = of_ltc2952_poweroff_match,
},
- .suspend = ltc2952_poweroff_suspend,
- .resume = ltc2952_poweroff_resume,
-};
-
-static int ltc2952_poweroff_notify_panic(struct notifier_block *nb,
- unsigned long code, void *unused)
-{
- ltc2952_poweroff_panic = 1;
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ltc2952_poweroff_panic_nb = {
- .notifier_call = ltc2952_poweroff_notify_panic,
};
-static int __init ltc2952_poweroff_platform_init(void)
-{
- ltc2952_poweroff_panic = 0;
-
- atomic_notifier_chain_register(&panic_notifier_list,
- &ltc2952_poweroff_panic_nb);
-
- return platform_driver_register(&ltc2952_poweroff_driver);
-}
-
-static void __exit ltc2952_poweroff_platform_exit(void)
-{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &ltc2952_poweroff_panic_nb);
-
- platform_driver_unregister(&ltc2952_poweroff_driver);
-}
-
-module_init(ltc2952_poweroff_platform_init);
-module_exit(ltc2952_poweroff_platform_exit);
+module_platform_driver(ltc2952_poweroff_driver);
MODULE_AUTHOR("René Moll <rene.moll@xsens.com>");
MODULE_DESCRIPTION("LTC PowerPath power-off driver");
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index f46f2c2e4648..41b22c4d5236 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -16,7 +16,6 @@
#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/reboot.h>
-#include <asm/system_misc.h>
static void restart_poweroff_do_poweroff(void)
{
diff --git a/drivers/power/reset/rmobile-reset.c b/drivers/power/reset/rmobile-reset.c
new file mode 100644
index 000000000000..e6569df76941
--- /dev/null
+++ b/drivers/power/reset/rmobile-reset.c
@@ -0,0 +1,91 @@
+/*
+ * Renesas R-Mobile Reset Driver
+ *
+ * Copyright (C) 2014 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+
+/* SYSC Register Bank 2 */
+#define RESCNT2 0x20 /* Reset Control Register 2 */
+
+/* Reset Control Register 2 */
+#define RESCNT2_PRES 0x80000000 /* Soft power-on reset */
+
+static void __iomem *sysc_base2;
+
+static int rmobile_reset_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ pr_debug("%s %lu\n", __func__, mode);
+
+ /* Let's assume we have acquired the HPB semaphore */
+ writel(RESCNT2_PRES, sysc_base2 + RESCNT2);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rmobile_reset_nb = {
+ .notifier_call = rmobile_reset_handler,
+ .priority = 192,
+};
+
+static int rmobile_reset_probe(struct platform_device *pdev)
+{
+ int error;
+
+ sysc_base2 = of_iomap(pdev->dev.of_node, 1);
+ if (!sysc_base2)
+ return -ENODEV;
+
+ error = register_restart_handler(&rmobile_reset_nb);
+ if (error) {
+ dev_err(&pdev->dev,
+ "cannot register restart handler (err=%d)\n", error);
+ goto fail_unmap;
+ }
+
+ return 0;
+
+fail_unmap:
+ iounmap(sysc_base2);
+ return error;
+}
+
+static int rmobile_reset_remove(struct platform_device *pdev)
+{
+ unregister_restart_handler(&rmobile_reset_nb);
+ iounmap(sysc_base2);
+ return 0;
+}
+
+static const struct of_device_id rmobile_reset_of_match[] = {
+ { .compatible = "renesas,sysc-rmobile", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rmobile_reset_of_match);
+
+static struct platform_driver rmobile_reset_driver = {
+ .probe = rmobile_reset_probe,
+ .remove = rmobile_reset_remove,
+ .driver = {
+ .name = "rmobile_reset",
+ .of_match_table = rmobile_reset_of_match,
+ },
+};
+
+module_platform_driver(rmobile_reset_driver);
+
+MODULE_DESCRIPTION("Renesas R-Mobile Reset Driver");
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/st-poweroff.c b/drivers/power/reset/st-poweroff.c
index a0acf25ee2a2..27383de9caa8 100644
--- a/drivers/power/reset/st-poweroff.c
+++ b/drivers/power/reset/st-poweroff.c
@@ -15,10 +15,9 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
-#include <asm/system_misc.h>
-
struct reset_syscfg {
struct regmap *regmap;
/* syscfg used for reset */
@@ -75,7 +74,8 @@ static struct reset_syscfg stid127_reset = {
static struct reset_syscfg *st_restart_syscfg;
-static void st_restart(enum reboot_mode reboot_mode, const char *cmd)
+static int st_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
/* reset syscfg updated */
regmap_update_bits(st_restart_syscfg->regmap,
@@ -88,8 +88,15 @@ static void st_restart(enum reboot_mode reboot_mode, const char *cmd)
st_restart_syscfg->offset_rst_msk,
st_restart_syscfg->mask_rst_msk,
0);
+
+ return NOTIFY_DONE;
}
+static struct notifier_block st_restart_nb = {
+ .notifier_call = st_restart,
+ .priority = 192,
+};
+
static struct of_device_id st_reset_of_match[] = {
{
.compatible = "st,stih415-restart",
@@ -126,9 +133,7 @@ static int st_reset_probe(struct platform_device *pdev)
return PTR_ERR(st_restart_syscfg->regmap);
}
- arm_pm_restart = st_restart;
-
- return 0;
+ return register_restart_handler(&st_restart_nb);
}
static struct platform_driver st_reset_driver = {
diff --git a/drivers/power/reset/sun6i-reboot.c b/drivers/power/reset/sun6i-reboot.c
deleted file mode 100644
index af2cd7ff2fe8..000000000000
--- a/drivers/power/reset/sun6i-reboot.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Allwinner A31 SoCs reset code
- *
- * Copyright (C) 2012-2014 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
-
-#include <asm/system_misc.h>
-
-#define SUN6I_WATCHDOG1_IRQ_REG 0x00
-#define SUN6I_WATCHDOG1_CTRL_REG 0x10
-#define SUN6I_WATCHDOG1_CTRL_RESTART BIT(0)
-#define SUN6I_WATCHDOG1_CONFIG_REG 0x14
-#define SUN6I_WATCHDOG1_CONFIG_RESTART BIT(0)
-#define SUN6I_WATCHDOG1_CONFIG_IRQ BIT(1)
-#define SUN6I_WATCHDOG1_MODE_REG 0x18
-#define SUN6I_WATCHDOG1_MODE_ENABLE BIT(0)
-
-static void __iomem *wdt_base;
-
-static void sun6i_wdt_restart(enum reboot_mode mode, const char *cmd)
-{
- if (!wdt_base)
- return;
-
- /* Disable interrupts */
- writel(0, wdt_base + SUN6I_WATCHDOG1_IRQ_REG);
-
- /* We want to disable the IRQ and just reset the whole system */
- writel(SUN6I_WATCHDOG1_CONFIG_RESTART,
- wdt_base + SUN6I_WATCHDOG1_CONFIG_REG);
-
- /* Enable timer. The default and lowest interval value is 0.5s */
- writel(SUN6I_WATCHDOG1_MODE_ENABLE,
- wdt_base + SUN6I_WATCHDOG1_MODE_REG);
-
- /* Restart the watchdog. */
- writel(SUN6I_WATCHDOG1_CTRL_RESTART,
- wdt_base + SUN6I_WATCHDOG1_CTRL_REG);
-
- while (1) {
- mdelay(5);
- writel(SUN6I_WATCHDOG1_MODE_ENABLE,
- wdt_base + SUN6I_WATCHDOG1_MODE_REG);
- }
-}
-
-static int sun6i_reboot_probe(struct platform_device *pdev)
-{
- wdt_base = of_iomap(pdev->dev.of_node, 0);
- if (!wdt_base) {
- WARN(1, "failed to map watchdog base address");
- return -ENODEV;
- }
-
- arm_pm_restart = sun6i_wdt_restart;
-
- return 0;
-}
-
-static struct of_device_id sun6i_reboot_of_match[] = {
- { .compatible = "allwinner,sun6i-a31-wdt" },
- {}
-};
-
-static struct platform_driver sun6i_reboot_driver = {
- .probe = sun6i_reboot_probe,
- .driver = {
- .name = "sun6i-reboot",
- .of_match_table = sun6i_reboot_of_match,
- },
-};
-module_platform_driver(sun6i_reboot_driver);
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 9dfc9cee3232..be12d9b92957 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -111,23 +111,20 @@ static int _vexpress_register_restart_handler(struct device *dev)
static int vexpress_reset_probe(struct platform_device *pdev)
{
- enum vexpress_reset_func func;
const struct of_device_id *match =
of_match_device(vexpress_reset_of_match, &pdev->dev);
struct regmap *regmap;
int ret = 0;
- if (match)
- func = (enum vexpress_reset_func)match->data;
- else
- func = pdev->id_entry->driver_data;
+ if (!match)
+ return -EINVAL;
regmap = devm_regmap_init_vexpress_config(&pdev->dev);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
dev_set_drvdata(&pdev->dev, regmap);
- switch (func) {
+ switch ((enum vexpress_reset_func)match->data) {
case FUNC_SHUTDOWN:
vexpress_power_off_device = &pdev->dev;
pm_power_off = vexpress_power_off;
@@ -144,20 +141,12 @@ static int vexpress_reset_probe(struct platform_device *pdev)
return ret;
}
-static const struct platform_device_id vexpress_reset_id_table[] = {
- { .name = "vexpress-reset", .driver_data = FUNC_RESET, },
- { .name = "vexpress-shutdown", .driver_data = FUNC_SHUTDOWN, },
- { .name = "vexpress-reboot", .driver_data = FUNC_REBOOT, },
- {}
-};
-
static struct platform_driver vexpress_reset_driver = {
.probe = vexpress_reset_probe,
.driver = {
.name = "vexpress-reset",
.of_match_table = vexpress_reset_of_match,
},
- .id_table = vexpress_reset_id_table,
};
static int __init vexpress_reset_init(void)
diff --git a/drivers/power/rt5033_battery.c b/drivers/power/rt5033_battery.c
new file mode 100644
index 000000000000..7b898f41c595
--- /dev/null
+++ b/drivers/power/rt5033_battery.c
@@ -0,0 +1,177 @@
+/*
+ * Fuel gauge driver for Richtek RT5033
+ *
+ * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published bythe Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/rt5033-private.h>
+#include <linux/mfd/rt5033.h>
+
+static int rt5033_battery_get_capacity(struct i2c_client *client)
+{
+ struct rt5033_battery *battery = i2c_get_clientdata(client);
+ u32 msb;
+
+ regmap_read(battery->regmap, RT5033_FUEL_REG_SOC_H, &msb);
+
+ return msb;
+}
+
+static int rt5033_battery_get_present(struct i2c_client *client)
+{
+ struct rt5033_battery *battery = i2c_get_clientdata(client);
+ u32 val;
+
+ regmap_read(battery->regmap, RT5033_FUEL_REG_CONFIG_L, &val);
+
+ return (val & RT5033_FUEL_BAT_PRESENT) ? true : false;
+}
+
+static int rt5033_battery_get_watt_prop(struct i2c_client *client,
+ enum power_supply_property psp)
+{
+ struct rt5033_battery *battery = i2c_get_clientdata(client);
+ unsigned int regh, regl;
+ int ret;
+ u32 msb, lsb;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ regh = RT5033_FUEL_REG_VBAT_H;
+ regl = RT5033_FUEL_REG_VBAT_L;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ regh = RT5033_FUEL_REG_AVG_VOLT_H;
+ regl = RT5033_FUEL_REG_AVG_VOLT_L;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ regh = RT5033_FUEL_REG_OCV_H;
+ regl = RT5033_FUEL_REG_OCV_L;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_read(battery->regmap, regh, &msb);
+ regmap_read(battery->regmap, regl, &lsb);
+
+ ret = ((msb << 4) + (lsb >> 4)) * 1250 / 1000;
+
+ return ret;
+}
+
+static int rt5033_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct rt5033_battery *battery = container_of(psy,
+ struct rt5033_battery, psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = rt5033_battery_get_watt_prop(battery->client,
+ psp);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = rt5033_battery_get_present(battery->client);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = rt5033_battery_get_capacity(battery->client);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property rt5033_battery_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static struct regmap_config rt5033_battery_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT5033_FUEL_REG_END,
+};
+
+static int rt5033_battery_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct rt5033_battery *battery;
+ u32 ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ return -EIO;
+
+ battery = devm_kzalloc(&client->dev, sizeof(*battery), GFP_KERNEL);
+ if (!battery)
+ return -EINVAL;
+
+ battery->client = client;
+ battery->regmap = devm_regmap_init_i2c(client,
+ &rt5033_battery_regmap_config);
+ if (IS_ERR(battery->regmap)) {
+ dev_err(&client->dev, "Failed to initialize regmap\n");
+ return -EINVAL;
+ }
+
+ i2c_set_clientdata(client, battery);
+
+ battery->psy.name = "rt5033-battery";
+ battery->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->psy.get_property = rt5033_battery_get_property;
+ battery->psy.properties = rt5033_battery_props;
+ battery->psy.num_properties = ARRAY_SIZE(rt5033_battery_props);
+
+ ret = power_supply_register(&client->dev, &battery->psy);
+ if (ret) {
+ dev_err(&client->dev, "Failed to register power supply\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rt5033_battery_remove(struct i2c_client *client)
+{
+ struct rt5033_battery *battery = i2c_get_clientdata(client);
+
+ power_supply_unregister(&battery->psy);
+
+ return 0;
+}
+
+static const struct i2c_device_id rt5033_battery_id[] = {
+ { "rt5033-battery", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, rt5033_battery_id);
+
+static struct i2c_driver rt5033_battery_driver = {
+ .driver = {
+ .name = "rt5033-battery",
+ },
+ .probe = rt5033_battery_probe,
+ .remove = rt5033_battery_remove,
+ .id_table = rt5033_battery_id,
+};
+module_i2c_driver(rt5033_battery_driver);
+
+MODULE_DESCRIPTION("Richtek RT5033 fuel gauge driver");
+MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
index 0152f35dca5c..f26b1fa00fe1 100644
--- a/drivers/power/test_power.c
+++ b/drivers/power/test_power.c
@@ -21,6 +21,13 @@
#include <linux/delay.h>
#include <linux/vermagic.h>
+enum test_power_id {
+ TEST_AC,
+ TEST_BATTERY,
+ TEST_USB,
+ TEST_POWER_NUM,
+};
+
static int ac_online = 1;
static int usb_online = 1;
static int battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
@@ -147,7 +154,7 @@ static char *test_power_ac_supplied_to[] = {
};
static struct power_supply test_power_supplies[] = {
- {
+ [TEST_AC] = {
.name = "test_ac",
.type = POWER_SUPPLY_TYPE_MAINS,
.supplied_to = test_power_ac_supplied_to,
@@ -155,13 +162,15 @@ static struct power_supply test_power_supplies[] = {
.properties = test_power_ac_props,
.num_properties = ARRAY_SIZE(test_power_ac_props),
.get_property = test_power_get_ac_property,
- }, {
+ },
+ [TEST_BATTERY] = {
.name = "test_battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = test_power_battery_props,
.num_properties = ARRAY_SIZE(test_power_battery_props),
.get_property = test_power_get_battery_property,
- }, {
+ },
+ [TEST_USB] = {
.name = "test_usb",
.type = POWER_SUPPLY_TYPE_USB,
.supplied_to = test_power_ac_supplied_to,
@@ -178,6 +187,8 @@ static int __init test_power_init(void)
int i;
int ret;
+ BUILD_BUG_ON(TEST_POWER_NUM != ARRAY_SIZE(test_power_supplies));
+
for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) {
ret = power_supply_register(NULL, &test_power_supplies[i]);
if (ret) {
@@ -309,7 +320,7 @@ static inline void signal_power_supply_changed(struct power_supply *psy)
static int param_set_ac_online(const char *key, const struct kernel_param *kp)
{
ac_online = map_get_value(map_ac_online, key, ac_online);
- signal_power_supply_changed(&test_power_supplies[0]);
+ signal_power_supply_changed(&test_power_supplies[TEST_AC]);
return 0;
}
@@ -322,7 +333,7 @@ static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
static int param_set_usb_online(const char *key, const struct kernel_param *kp)
{
usb_online = map_get_value(map_ac_online, key, usb_online);
- signal_power_supply_changed(&test_power_supplies[2]);
+ signal_power_supply_changed(&test_power_supplies[TEST_USB]);
return 0;
}
@@ -336,7 +347,7 @@ static int param_set_battery_status(const char *key,
const struct kernel_param *kp)
{
battery_status = map_get_value(map_status, key, battery_status);
- signal_power_supply_changed(&test_power_supplies[1]);
+ signal_power_supply_changed(&test_power_supplies[TEST_BATTERY]);
return 0;
}
@@ -350,7 +361,7 @@ static int param_set_battery_health(const char *key,
const struct kernel_param *kp)
{
battery_health = map_get_value(map_health, key, battery_health);
- signal_power_supply_changed(&test_power_supplies[1]);
+ signal_power_supply_changed(&test_power_supplies[TEST_BATTERY]);
return 0;
}
@@ -364,7 +375,7 @@ static int param_set_battery_present(const char *key,
const struct kernel_param *kp)
{
battery_present = map_get_value(map_present, key, battery_present);
- signal_power_supply_changed(&test_power_supplies[0]);
+ signal_power_supply_changed(&test_power_supplies[TEST_AC]);
return 0;
}
@@ -380,7 +391,7 @@ static int param_set_battery_technology(const char *key,
{
battery_technology = map_get_value(map_technology, key,
battery_technology);
- signal_power_supply_changed(&test_power_supplies[1]);
+ signal_power_supply_changed(&test_power_supplies[TEST_BATTERY]);
return 0;
}
@@ -401,7 +412,7 @@ static int param_set_battery_capacity(const char *key,
return -EINVAL;
battery_capacity = tmp;
- signal_power_supply_changed(&test_power_supplies[1]);
+ signal_power_supply_changed(&test_power_supplies[TEST_BATTERY]);
return 0;
}
@@ -416,7 +427,7 @@ static int param_set_battery_voltage(const char *key,
return -EINVAL;
battery_voltage = tmp;
- signal_power_supply_changed(&test_power_supplies[1]);
+ signal_power_supply_changed(&test_power_supplies[TEST_BATTERY]);
return 0;
}
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index bc1e5139ba29..d6db822bef84 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -151,11 +151,6 @@ static void __maybe_unused _dump_port_params(unsigned int port_number,
#endif
}
-struct vuart_triggers {
- unsigned long rx;
- unsigned long tx;
-};
-
int ps3_vuart_get_triggers(struct ps3_system_bus_device *dev,
struct vuart_triggers *trig)
{
diff --git a/drivers/ps3/sys-manager-core.c b/drivers/ps3/sys-manager-core.c
index 0e41737ea835..c429ffca1ab7 100644
--- a/drivers/ps3/sys-manager-core.c
+++ b/drivers/ps3/sys-manager-core.c
@@ -47,7 +47,7 @@ void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops)
}
EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops);
-void ps3_sys_manager_power_off(void)
+void __noreturn ps3_sys_manager_power_off(void)
{
if (ps3_sys_manager_ops.power_off)
ps3_sys_manager_ops.power_off(ps3_sys_manager_ops.dev);
@@ -55,7 +55,7 @@ void ps3_sys_manager_power_off(void)
ps3_sys_manager_halt();
}
-void ps3_sys_manager_restart(void)
+void __noreturn ps3_sys_manager_restart(void)
{
if (ps3_sys_manager_ops.restart)
ps3_sys_manager_ops.restart(ps3_sys_manager_ops.dev);
@@ -63,7 +63,7 @@ void ps3_sys_manager_restart(void)
ps3_sys_manager_halt();
}
-void ps3_sys_manager_halt(void)
+void __noreturn ps3_sys_manager_halt(void)
{
pr_emerg("System Halted, OK to turn off power\n");
local_irq_disable();
diff --git a/drivers/ps3/vuart.h b/drivers/ps3/vuart.h
index eb7f6d94a890..23358b719319 100644
--- a/drivers/ps3/vuart.h
+++ b/drivers/ps3/vuart.h
@@ -82,4 +82,20 @@ void ps3_vuart_cancel_async(struct ps3_system_bus_device *dev);
void ps3_vuart_clear_rx_bytes(struct ps3_system_bus_device *dev,
unsigned int bytes);
+struct vuart_triggers {
+ unsigned long rx;
+ unsigned long tx;
+};
+
+int ps3_vuart_get_triggers(struct ps3_system_bus_device *dev,
+ struct vuart_triggers *trig);
+int ps3_vuart_set_triggers(struct ps3_system_bus_device *dev, unsigned int tx,
+ unsigned int rx);
+int ps3_vuart_enable_interrupt_tx(struct ps3_system_bus_device *dev);
+int ps3_vuart_disable_interrupt_tx(struct ps3_system_bus_device *dev);
+int ps3_vuart_enable_interrupt_rx(struct ps3_system_bus_device *dev);
+int ps3_vuart_disable_interrupt_rx(struct ps3_system_bus_device *dev);
+int ps3_vuart_enable_interrupt_disconnect(struct ps3_system_bus_device *dev);
+int ps3_vuart_disable_interrupt_disconnect(struct ps3_system_bus_device *dev);
+
#endif
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 8bcfecd66281..eeca70ddbf61 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2430,7 +2430,7 @@ static int tsi721_probe(struct pci_dev *pdev,
pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
PCI_EXP_DEVCTL_NOSNOOP_EN,
- 0x2 << MAX_READ_REQUEST_SZ_SHIFT);
+ PCI_EXP_DEVCTL_READRQ_512B);
/* Adjust PCIe completion timeout. */
pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index a7b42680a06a..9d2502543ef6 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,8 +72,6 @@
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
-#define MAX_READ_REQUEST_SZ_SHIFT 12
-
/*
* Event Management Registers
*/
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c3a60b57a865..a6f116aa5235 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -414,6 +414,14 @@ config REGULATOR_MAX77802
Exynos5420/Exynos5800 SoCs to control various voltages.
It includes support for control of voltage and ramp speed.
+config REGULATOR_MAX77843
+ tristate "Maxim 77843 regulator"
+ depends on MFD_MAX77843
+ help
+ This driver controls a Maxim 77843 regulator.
+ The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
+ This is suitable for Exynos5433 SoC chips.
+
config REGULATOR_MC13XXX_CORE
tristate
@@ -433,6 +441,15 @@ config REGULATOR_MC13892
Say y here to support the regulators found on the Freescale MC13892
PMIC.
+config REGULATOR_MT6397
+ tristate "MediaTek MT6397 PMIC"
+ depends on MFD_MT6397
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6397 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_PALMAS
tristate "TI Palmas PMIC Regulators"
depends on MFD_PALMAS
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 1f28ebfc6f3a..2c4da15e1545 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -55,9 +55,11 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
+obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
+obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f23d7e1f2ee7..e4331f5e5d7d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -32,11 +32,13 @@
#define AXP20X_FREQ_DCDC_MASK 0x0f
-#define AXP20X_DESC_IO(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \
- _emask, _enable_val, _disable_val) \
+#define AXP20X_DESC_IO(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
+ _ereg, _emask, _enable_val, _disable_val) \
[AXP20X_##_id] = { \
.name = #_id, \
.supply_name = (_supply), \
+ .of_match = of_match_ptr(_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = AXP20X_##_id, \
.n_voltages = (((_max) - (_min)) / (_step) + 1), \
@@ -52,11 +54,13 @@
.ops = &axp20x_ops, \
}
-#define AXP20X_DESC(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \
- _emask) \
+#define AXP20X_DESC(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
+ _ereg, _emask) \
[AXP20X_##_id] = { \
.name = #_id, \
.supply_name = (_supply), \
+ .of_match = of_match_ptr(_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = AXP20X_##_id, \
.n_voltages = (((_max) - (_min)) / (_step) + 1), \
@@ -70,10 +74,12 @@
.ops = &axp20x_ops, \
}
-#define AXP20X_DESC_FIXED(_id, _supply, _volt) \
+#define AXP20X_DESC_FIXED(_id, _match, _supply, _volt) \
[AXP20X_##_id] = { \
.name = #_id, \
.supply_name = (_supply), \
+ .of_match = of_match_ptr(_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = AXP20X_##_id, \
.n_voltages = 1, \
@@ -82,10 +88,13 @@
.ops = &axp20x_ops_fixed \
}
-#define AXP20X_DESC_TABLE(_id, _supply, _table, _vreg, _vmask, _ereg, _emask) \
+#define AXP20X_DESC_TABLE(_id, _match, _supply, _table, _vreg, _vmask, _ereg, \
+ _emask) \
[AXP20X_##_id] = { \
.name = #_id, \
.supply_name = (_supply), \
+ .of_match = of_match_ptr(_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = AXP20X_##_id, \
.n_voltages = ARRAY_SIZE(_table), \
@@ -127,36 +136,20 @@ static struct regulator_ops axp20x_ops = {
};
static const struct regulator_desc axp20x_regulators[] = {
- AXP20X_DESC(DCDC2, "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, 0x3f,
- AXP20X_PWR_OUT_CTRL, 0x10),
- AXP20X_DESC(DCDC3, "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, 0x7f,
- AXP20X_PWR_OUT_CTRL, 0x02),
- AXP20X_DESC_FIXED(LDO1, "acin", 1300),
- AXP20X_DESC(LDO2, "ldo24in", 1800, 3300, 100, AXP20X_LDO24_V_OUT, 0xf0,
- AXP20X_PWR_OUT_CTRL, 0x04),
- AXP20X_DESC(LDO3, "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, 0x7f,
- AXP20X_PWR_OUT_CTRL, 0x40),
- AXP20X_DESC_TABLE(LDO4, "ldo24in", axp20x_ldo4_data, AXP20X_LDO24_V_OUT, 0x0f,
- AXP20X_PWR_OUT_CTRL, 0x08),
- AXP20X_DESC_IO(LDO5, "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, 0xf0,
- AXP20X_GPIO0_CTRL, 0x07, AXP20X_IO_ENABLED,
- AXP20X_IO_DISABLED),
-};
-
-#define AXP_MATCH(_name, _id) \
- [AXP20X_##_id] = { \
- .name = #_name, \
- .driver_data = (void *) &axp20x_regulators[AXP20X_##_id], \
- }
-
-static struct of_regulator_match axp20x_matches[] = {
- AXP_MATCH(dcdc2, DCDC2),
- AXP_MATCH(dcdc3, DCDC3),
- AXP_MATCH(ldo1, LDO1),
- AXP_MATCH(ldo2, LDO2),
- AXP_MATCH(ldo3, LDO3),
- AXP_MATCH(ldo4, LDO4),
- AXP_MATCH(ldo5, LDO5),
+ AXP20X_DESC(DCDC2, "dcdc2", "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT,
+ 0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
+ AXP20X_DESC(DCDC3, "dcdc3", "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT,
+ 0x7f, AXP20X_PWR_OUT_CTRL, 0x02),
+ AXP20X_DESC_FIXED(LDO1, "ldo1", "acin", 1300),
+ AXP20X_DESC(LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
+ AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
+ AXP20X_DESC(LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT,
+ 0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
+ AXP20X_DESC_TABLE(LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
+ AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
+ AXP20X_DESC_IO(LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
+ AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
+ AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
};
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
@@ -193,13 +186,6 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
if (!regulators) {
dev_warn(&pdev->dev, "regulators node not found\n");
} else {
- ret = of_regulator_match(&pdev->dev, regulators, axp20x_matches,
- ARRAY_SIZE(axp20x_matches));
- if (ret < 0) {
- dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
- }
-
dcdcfreq = 1500;
of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq);
ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
@@ -233,23 +219,17 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
{
struct regulator_dev *rdev;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct regulator_config config = { };
- struct regulator_init_data *init_data;
+ struct regulator_config config = {
+ .dev = pdev->dev.parent,
+ .regmap = axp20x->regmap,
+ };
int ret, i;
u32 workmode;
- ret = axp20x_regulator_parse_dt(pdev);
- if (ret)
- return ret;
+ /* This only sets the dcdc freq. Ignore any errors */
+ axp20x_regulator_parse_dt(pdev);
for (i = 0; i < AXP20X_REG_ID_MAX; i++) {
- init_data = axp20x_matches[i].init_data;
-
- config.dev = pdev->dev.parent;
- config.init_data = init_data;
- config.regmap = axp20x->regmap;
- config.of_node = axp20x_matches[i].of_node;
-
rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i],
&config);
if (IS_ERR(rdev)) {
@@ -259,7 +239,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
return PTR_ERR(rdev);
}
- ret = of_property_read_u32(axp20x_matches[i].of_node, "x-powers,dcdc-workmode",
+ ret = of_property_read_u32(rdev->dev.of_node,
+ "x-powers,dcdc-workmode",
&workmode);
if (!ret) {
if (axp20x_set_dcdc_workmode(rdev, i, workmode))
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e225711bb8bc..b899947d839d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -632,49 +632,34 @@ static ssize_t regulator_bypass_show(struct device *dev,
static DEVICE_ATTR(bypass, 0444,
regulator_bypass_show, NULL);
-/*
- * These are the only attributes are present for all regulators.
- * Other attributes are a function of regulator functionality.
- */
-static struct attribute *regulator_dev_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_num_users.attr,
- &dev_attr_type.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(regulator_dev);
-
-static void regulator_dev_release(struct device *dev)
-{
- struct regulator_dev *rdev = dev_get_drvdata(dev);
- kfree(rdev);
-}
-
-static struct class regulator_class = {
- .name = "regulator",
- .dev_release = regulator_dev_release,
- .dev_groups = regulator_dev_groups,
-};
-
/* Calculate the new optimum regulator operating mode based on the new total
* consumer load. All locks held by caller */
-static void drms_uA_update(struct regulator_dev *rdev)
+static int drms_uA_update(struct regulator_dev *rdev)
{
struct regulator *sibling;
int current_uA = 0, output_uV, input_uV, err;
unsigned int mode;
+ /*
+ * first check to see if we can set modes at all, otherwise just
+ * tell the consumer everything is OK.
+ */
err = regulator_check_drms(rdev);
- if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
- (!rdev->desc->ops->get_voltage &&
- !rdev->desc->ops->get_voltage_sel) ||
- !rdev->desc->ops->set_mode)
- return;
+ if (err < 0)
+ return 0;
+
+ if (!rdev->desc->ops->get_optimum_mode)
+ return 0;
+
+ if (!rdev->desc->ops->set_mode)
+ return -EINVAL;
/* get output voltage */
output_uV = _regulator_get_voltage(rdev);
- if (output_uV <= 0)
- return;
+ if (output_uV <= 0) {
+ rdev_err(rdev, "invalid output voltage found\n");
+ return -EINVAL;
+ }
/* get input voltage */
input_uV = 0;
@@ -682,8 +667,10 @@ static void drms_uA_update(struct regulator_dev *rdev)
input_uV = regulator_get_voltage(rdev->supply);
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
- if (input_uV <= 0)
- return;
+ if (input_uV <= 0) {
+ rdev_err(rdev, "invalid input voltage found\n");
+ return -EINVAL;
+ }
/* calc total requested load */
list_for_each_entry(sibling, &rdev->consumer_list, list)
@@ -695,8 +682,17 @@ static void drms_uA_update(struct regulator_dev *rdev)
/* check the new mode is allowed */
err = regulator_mode_constrain(rdev, &mode);
- if (err == 0)
- rdev->desc->ops->set_mode(rdev, mode);
+ if (err < 0) {
+ rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
+ current_uA, input_uV, output_uV);
+ return err;
+ }
+
+ err = rdev->desc->ops->set_mode(rdev, mode);
+ if (err < 0)
+ rdev_err(rdev, "failed to set optimum mode %x\n", mode);
+
+ return err;
}
static int suspend_set_state(struct regulator_dev *rdev,
@@ -1488,7 +1484,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(regulator_get_optional);
-/* Locks held by regulator_put() */
+/* regulator_list_mutex lock held by regulator_put() */
static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
@@ -1503,12 +1499,14 @@ static void _regulator_put(struct regulator *regulator)
/* remove any sysfs entries */
if (regulator->dev)
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+ mutex_lock(&rdev->mutex);
kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
rdev->open_count--;
rdev->exclusive = 0;
+ mutex_unlock(&rdev->mutex);
module_put(rdev->owner);
}
@@ -3024,75 +3022,13 @@ EXPORT_SYMBOL_GPL(regulator_get_mode);
int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
{
struct regulator_dev *rdev = regulator->rdev;
- struct regulator *consumer;
- int ret, output_uV, input_uV = 0, total_uA_load = 0;
- unsigned int mode;
-
- if (rdev->supply)
- input_uV = regulator_get_voltage(rdev->supply);
+ int ret;
mutex_lock(&rdev->mutex);
-
- /*
- * first check to see if we can set modes at all, otherwise just
- * tell the consumer everything is OK.
- */
regulator->uA_load = uA_load;
- ret = regulator_check_drms(rdev);
- if (ret < 0) {
- ret = 0;
- goto out;
- }
-
- if (!rdev->desc->ops->get_optimum_mode)
- goto out;
-
- /*
- * we can actually do this so any errors are indicators of
- * potential real failure.
- */
- ret = -EINVAL;
-
- if (!rdev->desc->ops->set_mode)
- goto out;
-
- /* get output voltage */
- output_uV = _regulator_get_voltage(rdev);
- if (output_uV <= 0) {
- rdev_err(rdev, "invalid output voltage found\n");
- goto out;
- }
-
- /* No supply? Use constraint voltage */
- if (input_uV <= 0)
- input_uV = rdev->constraints->input_uV;
- if (input_uV <= 0) {
- rdev_err(rdev, "invalid input voltage found\n");
- goto out;
- }
-
- /* calc total requested load for this regulator */
- list_for_each_entry(consumer, &rdev->consumer_list, list)
- total_uA_load += consumer->uA_load;
-
- mode = rdev->desc->ops->get_optimum_mode(rdev,
- input_uV, output_uV,
- total_uA_load);
- ret = regulator_mode_constrain(rdev, &mode);
- if (ret < 0) {
- rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
- total_uA_load, input_uV, output_uV);
- goto out;
- }
-
- ret = rdev->desc->ops->set_mode(rdev, mode);
- if (ret < 0) {
- rdev_err(rdev, "failed to set optimum mode %x\n", mode);
- goto out;
- }
- ret = mode;
-out:
+ ret = drms_uA_update(rdev);
mutex_unlock(&rdev->mutex);
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
@@ -3434,126 +3370,136 @@ int regulator_mode_to_status(unsigned int mode)
}
EXPORT_SYMBOL_GPL(regulator_mode_to_status);
+static struct attribute *regulator_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_num_users.attr,
+ &dev_attr_type.attr,
+ &dev_attr_microvolts.attr,
+ &dev_attr_microamps.attr,
+ &dev_attr_opmode.attr,
+ &dev_attr_state.attr,
+ &dev_attr_status.attr,
+ &dev_attr_bypass.attr,
+ &dev_attr_requested_microamps.attr,
+ &dev_attr_min_microvolts.attr,
+ &dev_attr_max_microvolts.attr,
+ &dev_attr_min_microamps.attr,
+ &dev_attr_max_microamps.attr,
+ &dev_attr_suspend_standby_state.attr,
+ &dev_attr_suspend_mem_state.attr,
+ &dev_attr_suspend_disk_state.attr,
+ &dev_attr_suspend_standby_microvolts.attr,
+ &dev_attr_suspend_mem_microvolts.attr,
+ &dev_attr_suspend_disk_microvolts.attr,
+ &dev_attr_suspend_standby_mode.attr,
+ &dev_attr_suspend_mem_mode.attr,
+ &dev_attr_suspend_disk_mode.attr,
+ NULL
+};
+
/*
* To avoid cluttering sysfs (and memory) with useless state, only
* create attributes that can be meaningfully displayed.
*/
-static int add_regulator_attributes(struct regulator_dev *rdev)
+static umode_t regulator_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
{
- struct device *dev = &rdev->dev;
+ struct device *dev = kobj_to_dev(kobj);
+ struct regulator_dev *rdev = container_of(dev, struct regulator_dev, dev);
const struct regulator_ops *ops = rdev->desc->ops;
- int status = 0;
+ umode_t mode = attr->mode;
+
+ /* these three are always present */
+ if (attr == &dev_attr_name.attr ||
+ attr == &dev_attr_num_users.attr ||
+ attr == &dev_attr_type.attr)
+ return mode;
/* some attributes need specific methods to be displayed */
- if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
- (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
- (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
- (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) {
- status = device_create_file(dev, &dev_attr_microvolts);
- if (status < 0)
- return status;
- }
- if (ops->get_current_limit) {
- status = device_create_file(dev, &dev_attr_microamps);
- if (status < 0)
- return status;
- }
- if (ops->get_mode) {
- status = device_create_file(dev, &dev_attr_opmode);
- if (status < 0)
- return status;
- }
- if (rdev->ena_pin || ops->is_enabled) {
- status = device_create_file(dev, &dev_attr_state);
- if (status < 0)
- return status;
- }
- if (ops->get_status) {
- status = device_create_file(dev, &dev_attr_status);
- if (status < 0)
- return status;
- }
- if (ops->get_bypass) {
- status = device_create_file(dev, &dev_attr_bypass);
- if (status < 0)
- return status;
+ if (attr == &dev_attr_microvolts.attr) {
+ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
+ (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
+ (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
+ (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1))
+ return mode;
+ return 0;
}
+ if (attr == &dev_attr_microamps.attr)
+ return ops->get_current_limit ? mode : 0;
+
+ if (attr == &dev_attr_opmode.attr)
+ return ops->get_mode ? mode : 0;
+
+ if (attr == &dev_attr_state.attr)
+ return (rdev->ena_pin || ops->is_enabled) ? mode : 0;
+
+ if (attr == &dev_attr_status.attr)
+ return ops->get_status ? mode : 0;
+
+ if (attr == &dev_attr_bypass.attr)
+ return ops->get_bypass ? mode : 0;
+
/* some attributes are type-specific */
- if (rdev->desc->type == REGULATOR_CURRENT) {
- status = device_create_file(dev, &dev_attr_requested_microamps);
- if (status < 0)
- return status;
- }
+ if (attr == &dev_attr_requested_microamps.attr)
+ return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
/* all the other attributes exist to support constraints;
* don't show them if there are no constraints, or if the
* relevant supporting methods are missing.
*/
if (!rdev->constraints)
- return status;
+ return 0;
/* constraints need specific supporting methods */
- if (ops->set_voltage || ops->set_voltage_sel) {
- status = device_create_file(dev, &dev_attr_min_microvolts);
- if (status < 0)
- return status;
- status = device_create_file(dev, &dev_attr_max_microvolts);
- if (status < 0)
- return status;
- }
- if (ops->set_current_limit) {
- status = device_create_file(dev, &dev_attr_min_microamps);
- if (status < 0)
- return status;
- status = device_create_file(dev, &dev_attr_max_microamps);
- if (status < 0)
- return status;
- }
-
- status = device_create_file(dev, &dev_attr_suspend_standby_state);
- if (status < 0)
- return status;
- status = device_create_file(dev, &dev_attr_suspend_mem_state);
- if (status < 0)
- return status;
- status = device_create_file(dev, &dev_attr_suspend_disk_state);
- if (status < 0)
- return status;
+ if (attr == &dev_attr_min_microvolts.attr ||
+ attr == &dev_attr_max_microvolts.attr)
+ return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0;
+
+ if (attr == &dev_attr_min_microamps.attr ||
+ attr == &dev_attr_max_microamps.attr)
+ return ops->set_current_limit ? mode : 0;
+
+ if (attr == &dev_attr_suspend_standby_state.attr ||
+ attr == &dev_attr_suspend_mem_state.attr ||
+ attr == &dev_attr_suspend_disk_state.attr)
+ return mode;
+
+ if (attr == &dev_attr_suspend_standby_microvolts.attr ||
+ attr == &dev_attr_suspend_mem_microvolts.attr ||
+ attr == &dev_attr_suspend_disk_microvolts.attr)
+ return ops->set_suspend_voltage ? mode : 0;
+
+ if (attr == &dev_attr_suspend_standby_mode.attr ||
+ attr == &dev_attr_suspend_mem_mode.attr ||
+ attr == &dev_attr_suspend_disk_mode.attr)
+ return ops->set_suspend_mode ? mode : 0;
+
+ return mode;
+}
+
+static const struct attribute_group regulator_dev_group = {
+ .attrs = regulator_dev_attrs,
+ .is_visible = regulator_attr_is_visible,
+};
+
+static const struct attribute_group *regulator_dev_groups[] = {
+ &regulator_dev_group,
+ NULL
+};
- if (ops->set_suspend_voltage) {
- status = device_create_file(dev,
- &dev_attr_suspend_standby_microvolts);
- if (status < 0)
- return status;
- status = device_create_file(dev,
- &dev_attr_suspend_mem_microvolts);
- if (status < 0)
- return status;
- status = device_create_file(dev,
- &dev_attr_suspend_disk_microvolts);
- if (status < 0)
- return status;
- }
-
- if (ops->set_suspend_mode) {
- status = device_create_file(dev,
- &dev_attr_suspend_standby_mode);
- if (status < 0)
- return status;
- status = device_create_file(dev,
- &dev_attr_suspend_mem_mode);
- if (status < 0)
- return status;
- status = device_create_file(dev,
- &dev_attr_suspend_disk_mode);
- if (status < 0)
- return status;
- }
-
- return status;
+static void regulator_dev_release(struct device *dev)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ kfree(rdev);
}
+static struct class regulator_class = {
+ .name = "regulator",
+ .dev_release = regulator_dev_release,
+ .dev_groups = regulator_dev_groups,
+};
+
static void rdev_init_debugfs(struct regulator_dev *rdev)
{
rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
@@ -3573,7 +3519,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
/**
* regulator_register - register regulator
* @regulator_desc: regulator to register
- * @config: runtime configuration for regulator
+ * @cfg: runtime configuration for regulator
*
* Called by regulator drivers to register a regulator.
* Returns a valid pointer to struct regulator_dev on success
@@ -3581,20 +3527,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
*/
struct regulator_dev *
regulator_register(const struct regulator_desc *regulator_desc,
- const struct regulator_config *config)
+ const struct regulator_config *cfg)
{
const struct regulation_constraints *constraints = NULL;
const struct regulator_init_data *init_data;
- static atomic_t regulator_no = ATOMIC_INIT(0);
+ struct regulator_config *config = NULL;
+ static atomic_t regulator_no = ATOMIC_INIT(-1);
struct regulator_dev *rdev;
struct device *dev;
int ret, i;
const char *supply = NULL;
- if (regulator_desc == NULL || config == NULL)
+ if (regulator_desc == NULL || cfg == NULL)
return ERR_PTR(-EINVAL);
- dev = config->dev;
+ dev = cfg->dev;
WARN_ON(!dev);
if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
@@ -3624,7 +3571,17 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (rdev == NULL)
return ERR_PTR(-ENOMEM);
- init_data = regulator_of_get_init_data(dev, regulator_desc,
+ /*
+ * Duplicate the config so the driver could override it after
+ * parsing init data.
+ */
+ config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
+ if (config == NULL) {
+ kfree(rdev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init_data = regulator_of_get_init_data(dev, regulator_desc, config,
&rdev->dev.of_node);
if (!init_data) {
init_data = config->init_data;
@@ -3658,8 +3615,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
/* register with sysfs */
rdev->dev.class = &regulator_class;
rdev->dev.parent = dev;
- dev_set_name(&rdev->dev, "regulator.%d",
- atomic_inc_return(&regulator_no) - 1);
+ dev_set_name(&rdev->dev, "regulator.%lu",
+ (unsigned long) atomic_inc_return(&regulator_no));
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
@@ -3692,11 +3649,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret < 0)
goto scrub;
- /* add attributes supported by this regulator */
- ret = add_regulator_attributes(rdev);
- if (ret < 0)
- goto scrub;
-
if (init_data && init_data->supply_regulator)
supply = init_data->supply_regulator;
else if (regulator_desc->supply_name)
@@ -3752,6 +3704,7 @@ add_dev:
rdev_init_debugfs(rdev);
out:
mutex_unlock(&regulator_list_mutex);
+ kfree(config);
return rdev;
unset_supplies:
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index c78d2106d6cb..01343419555e 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -24,6 +24,7 @@
#include <linux/regmap.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/da9211.h>
#include "da9211-regulator.h"
@@ -276,7 +277,10 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
continue;
pdata->init_data[n] = da9211_matches[i].init_data;
-
+ pdata->reg_node[n] = da9211_matches[i].of_node;
+ pdata->gpio_ren[n] =
+ of_get_named_gpio(da9211_matches[i].of_node,
+ "enable-gpios", 0);
n++;
}
@@ -364,7 +368,15 @@ static int da9211_regulator_init(struct da9211 *chip)
config.dev = chip->dev;
config.driver_data = chip;
config.regmap = chip->regmap;
- config.of_node = chip->dev->of_node;
+ config.of_node = chip->pdata->reg_node[i];
+
+ if (gpio_is_valid(chip->pdata->gpio_ren[i])) {
+ config.ena_gpio = chip->pdata->gpio_ren[i];
+ config.ena_gpio_initialized = true;
+ } else {
+ config.ena_gpio = -EINVAL;
+ config.ena_gpio_initialized = false;
+ }
chip->rdev[i] = devm_regulator_register(chip->dev,
&da9211_regulators[i], &config);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 6c43ab2d5121..3c25db89a021 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -147,7 +147,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
}
-static int slew_rates[] = {
+static const int slew_rates[] = {
64000,
32000,
16000,
@@ -296,7 +296,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
return PTR_ERR_OR_ZERO(di->rdev);
}
-static struct regmap_config fan53555_regmap_config = {
+static const struct regmap_config fan53555_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 80ba2a35a04b..c74ac8734023 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -38,11 +38,13 @@ struct regulator {
#ifdef CONFIG_OF
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
+ struct regulator_config *config,
struct device_node **node);
#else
static inline struct regulator_init_data *
regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
+ struct regulator_config *config,
struct device_node **node)
{
return NULL;
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 92fefd98da58..6e3a15fe00f1 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -177,8 +177,10 @@ static int isl9305_i2c_probe(struct i2c_client *i2c,
#ifdef CONFIG_OF
static const struct of_device_id isl9305_dt_ids[] = {
- { .compatible = "isl,isl9305" },
- { .compatible = "isl,isl9305h" },
+ { .compatible = "isl,isl9305" }, /* for backward compat., don't use */
+ { .compatible = "isil,isl9305" },
+ { .compatible = "isl,isl9305h" }, /* for backward compat., don't use */
+ { .compatible = "isil,isl9305h" },
{},
};
#endif
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 021d64d856bb..3de328ab41f3 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -106,7 +106,6 @@ struct lp872x {
struct device *dev;
enum lp872x_id chipid;
struct lp872x_platform_data *pdata;
- struct regulator_dev **regulators;
int num_regulators;
enum lp872x_dvs_state dvs_pin;
int dvs_gpio;
@@ -801,8 +800,6 @@ static int lp872x_regulator_register(struct lp872x *lp)
dev_err(lp->dev, "regulator register err");
return PTR_ERR(rdev);
}
-
- *(lp->regulators + i) = rdev;
}
return 0;
@@ -906,7 +903,7 @@ static struct lp872x_platform_data
static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct lp872x *lp;
- int ret, size, num_regulators;
+ int ret;
const int lp872x_num_regulators[] = {
[LP8720] = LP8720_NUM_REGULATORS,
[LP8725] = LP8725_NUM_REGULATORS,
@@ -918,38 +915,27 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
if (!lp)
- goto err_mem;
-
- num_regulators = lp872x_num_regulators[id->driver_data];
- size = sizeof(struct regulator_dev *) * num_regulators;
+ return -ENOMEM;
- lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL);
- if (!lp->regulators)
- goto err_mem;
+ lp->num_regulators = lp872x_num_regulators[id->driver_data];
lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
if (IS_ERR(lp->regmap)) {
ret = PTR_ERR(lp->regmap);
dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
- goto err_dev;
+ return ret;
}
lp->dev = &cl->dev;
lp->pdata = dev_get_platdata(&cl->dev);
lp->chipid = id->driver_data;
- lp->num_regulators = num_regulators;
i2c_set_clientdata(cl, lp);
ret = lp872x_config(lp);
if (ret)
- goto err_dev;
+ return ret;
return lp872x_regulator_register(lp);
-
-err_mem:
- return -ENOMEM;
-err_dev:
- return ret;
}
static const struct of_device_id lp872x_dt_ids[] = {
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index bf9a44c5fdd2..b3678d289619 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -103,6 +103,8 @@ static struct regulator_ops max14577_charger_ops = {
static const struct regulator_desc max14577_supported_regulators[] = {
[MAX14577_SAFEOUT] = {
.name = "SAFEOUT",
+ .of_match = of_match_ptr("SAFEOUT"),
+ .regulators_node = of_match_ptr("regulators"),
.id = MAX14577_SAFEOUT,
.ops = &max14577_safeout_ops,
.type = REGULATOR_VOLTAGE,
@@ -114,6 +116,8 @@ static const struct regulator_desc max14577_supported_regulators[] = {
},
[MAX14577_CHARGER] = {
.name = "CHARGER",
+ .of_match = of_match_ptr("CHARGER"),
+ .regulators_node = of_match_ptr("regulators"),
.id = MAX14577_CHARGER,
.ops = &max14577_charger_ops,
.type = REGULATOR_CURRENT,
@@ -137,6 +141,8 @@ static struct regulator_ops max77836_ldo_ops = {
static const struct regulator_desc max77836_supported_regulators[] = {
[MAX14577_SAFEOUT] = {
.name = "SAFEOUT",
+ .of_match = of_match_ptr("SAFEOUT"),
+ .regulators_node = of_match_ptr("regulators"),
.id = MAX14577_SAFEOUT,
.ops = &max14577_safeout_ops,
.type = REGULATOR_VOLTAGE,
@@ -148,6 +154,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
},
[MAX14577_CHARGER] = {
.name = "CHARGER",
+ .of_match = of_match_ptr("CHARGER"),
+ .regulators_node = of_match_ptr("regulators"),
.id = MAX14577_CHARGER,
.ops = &max14577_charger_ops,
.type = REGULATOR_CURRENT,
@@ -157,6 +165,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
},
[MAX77836_LDO1] = {
.name = "LDO1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
.id = MAX77836_LDO1,
.ops = &max77836_ldo_ops,
.type = REGULATOR_VOLTAGE,
@@ -171,6 +181,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
},
[MAX77836_LDO2] = {
.name = "LDO2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
.id = MAX77836_LDO2,
.ops = &max77836_ldo_ops,
.type = REGULATOR_VOLTAGE,
@@ -198,43 +210,6 @@ static struct of_regulator_match max77836_regulator_matches[] = {
{ .name = "LDO2", },
};
-static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
- enum maxim_device_type dev_type)
-{
- int ret;
- struct device_node *np;
- struct of_regulator_match *regulator_matches;
- unsigned int regulator_matches_size;
-
- np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
- if (!np) {
- dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
- return -EINVAL;
- }
-
- switch (dev_type) {
- case MAXIM_DEVICE_TYPE_MAX77836:
- regulator_matches = max77836_regulator_matches;
- regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches);
- break;
- case MAXIM_DEVICE_TYPE_MAX14577:
- default:
- regulator_matches = max14577_regulator_matches;
- regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches);
- }
-
- ret = of_regulator_match(&pdev->dev, np, regulator_matches,
- regulator_matches_size);
- if (ret < 0)
- dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
- else
- ret = 0;
-
- of_node_put(np);
-
- return ret;
-}
-
static inline struct regulator_init_data *match_init_data(int index,
enum maxim_device_type dev_type)
{
@@ -261,11 +236,6 @@ static inline struct device_node *match_of_node(int index,
}
}
#else /* CONFIG_OF */
-static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
- enum maxim_device_type dev_type)
-{
- return 0;
-}
static inline struct regulator_init_data *match_init_data(int index,
enum maxim_device_type dev_type)
{
@@ -308,16 +278,12 @@ static int max14577_regulator_probe(struct platform_device *pdev)
{
struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
- int i, ret;
+ int i, ret = 0;
struct regulator_config config = {};
const struct regulator_desc *supported_regulators;
unsigned int supported_regulators_size;
enum maxim_device_type dev_type = max14577->dev_type;
- ret = max14577_regulator_dt_parse_pdata(pdev, dev_type);
- if (ret)
- return ret;
-
switch (dev_type) {
case MAXIM_DEVICE_TYPE_MAX77836:
supported_regulators = max77836_supported_regulators;
@@ -329,7 +295,7 @@ static int max14577_regulator_probe(struct platform_device *pdev)
supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators);
}
- config.dev = &pdev->dev;
+ config.dev = max14577->dev;
config.driver_data = max14577;
for (i = 0; i < supported_regulators_size; i++) {
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index 10d206266ac2..15fb1416bfbd 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -26,6 +26,7 @@
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
@@ -46,6 +47,11 @@
#define MAX77686_DVS_UVSTEP 12500
/*
+ * Value for configuring buck[89] and LDO{20,21,22} as GPIO control.
+ * It is the same as 'off' for other regulators.
+ */
+#define MAX77686_GPIO_CONTROL 0x0
+/*
* Values used for configuring LDOs and bucks.
* Forcing low power mode: LDO1, 3-5, 9, 13, 17-26
*/
@@ -82,6 +88,8 @@ enum max77686_ramp_rate {
};
struct max77686_data {
+ u64 gpio_enabled:MAX77686_REGULATORS;
+
/* Array indexed by regulator id */
unsigned int opmode[MAX77686_REGULATORS];
};
@@ -100,6 +108,26 @@ static unsigned int max77686_get_opmode_shift(int id)
}
}
+/*
+ * When regulator is configured for GPIO control then it
+ * replaces "normal" mode. Any change from low power mode to normal
+ * should actually change to GPIO control.
+ * Map normal mode to proper value for such regulators.
+ */
+static unsigned int max77686_map_normal_mode(struct max77686_data *max77686,
+ int id)
+{
+ switch (id) {
+ case MAX77686_BUCK8:
+ case MAX77686_BUCK9:
+ case MAX77686_LDO20 ... MAX77686_LDO22:
+ if (max77686->gpio_enabled & (1 << id))
+ return MAX77686_GPIO_CONTROL;
+ }
+
+ return MAX77686_NORMAL;
+}
+
/* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */
static int max77686_set_suspend_disable(struct regulator_dev *rdev)
{
@@ -136,7 +164,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
val = MAX77686_LDO_LOWPOWER_PWRREQ;
break;
case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
- val = MAX77686_NORMAL;
+ val = max77686_map_normal_mode(max77686, id);
break;
default:
pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -160,7 +188,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
{
unsigned int val;
struct max77686_data *max77686 = rdev_get_drvdata(rdev);
- int ret;
+ int ret, id = rdev_get_id(rdev);
switch (mode) {
case REGULATOR_MODE_STANDBY: /* switch off */
@@ -170,7 +198,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
val = MAX77686_LDO_LOWPOWER_PWRREQ;
break;
case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
- val = MAX77686_NORMAL;
+ val = max77686_map_normal_mode(max77686, id);
break;
default:
pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -184,7 +212,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
if (ret)
return ret;
- max77686->opmode[rdev_get_id(rdev)] = val;
+ max77686->opmode[id] = val;
return 0;
}
@@ -197,7 +225,7 @@ static int max77686_enable(struct regulator_dev *rdev)
shift = max77686_get_opmode_shift(id);
if (max77686->opmode[id] == MAX77686_OFF_PWRREQ)
- max77686->opmode[id] = MAX77686_NORMAL;
+ max77686->opmode[id] = max77686_map_normal_mode(max77686, id);
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
@@ -229,6 +257,36 @@ static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
MAX77686_RAMP_RATE_MASK, ramp_value << 6);
}
+static int max77686_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct max77686_data *max77686 = config->driver_data;
+
+ switch (desc->id) {
+ case MAX77686_BUCK8:
+ case MAX77686_BUCK9:
+ case MAX77686_LDO20 ... MAX77686_LDO22:
+ config->ena_gpio = of_get_named_gpio(np,
+ "maxim,ena-gpios", 0);
+ config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
+ config->ena_gpio_initialized = true;
+ break;
+ default:
+ return 0;
+ }
+
+ if (gpio_is_valid(config->ena_gpio)) {
+ max77686->gpio_enabled |= (1 << desc->id);
+
+ return regmap_update_bits(config->regmap, desc->enable_reg,
+ desc->enable_mask,
+ MAX77686_GPIO_CONTROL);
+ }
+
+ return 0;
+}
+
static struct regulator_ops max77686_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
@@ -283,6 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.name = "LDO"#num, \
.of_match = of_match_ptr("LDO"#num), \
.regulators_node = of_match_ptr("voltage-regulators"), \
+ .of_parse_cb = max77686_of_parse_cb, \
.id = MAX77686_LDO##num, \
.ops = &max77686_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -355,6 +414,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("voltage-regulators"), \
+ .of_parse_cb = max77686_of_parse_cb, \
.id = MAX77686_BUCK##num, \
.ops = &max77686_ops, \
.type = REGULATOR_VOLTAGE, \
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
new file mode 100644
index 000000000000..c132ef527cdd
--- /dev/null
+++ b/drivers/regulator/max77843.c
@@ -0,0 +1,227 @@
+/*
+ * max77843.c - Regulator driver for the Maxim MAX77843
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/max77843-private.h>
+#include <linux/regulator/of_regulator.h>
+
+enum max77843_regulator_type {
+ MAX77843_SAFEOUT1 = 0,
+ MAX77843_SAFEOUT2,
+ MAX77843_CHARGER,
+
+ MAX77843_NUM,
+};
+
+static const unsigned int max77843_safeout_voltage_table[] = {
+ 4850000,
+ 4900000,
+ 4950000,
+ 3300000,
+};
+
+static int max77843_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev->regmap;
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_read(regmap, rdev->desc->enable_reg, &reg);
+ if (ret) {
+ dev_err(&rdev->dev, "Fialed to read charger register\n");
+ return ret;
+ }
+
+ return (reg & rdev->desc->enable_mask) == rdev->desc->enable_mask;
+}
+
+static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev->regmap;
+ unsigned int chg_min_uA = rdev->constraints->min_uA;
+ unsigned int chg_max_uA = rdev->constraints->max_uA;
+ unsigned int val;
+ int ret;
+ unsigned int reg, sel;
+
+ ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
+ if (ret) {
+ dev_err(&rdev->dev, "Failed to read charger register\n");
+ return ret;
+ }
+
+ sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
+
+ if (sel < 0x03)
+ sel = 0;
+ else
+ sel -= 2;
+
+ val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
+ if (val > chg_max_uA)
+ return -EINVAL;
+
+ return val;
+}
+
+static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct regmap *regmap = rdev->regmap;
+ unsigned int chg_min_uA = rdev->constraints->min_uA;
+ int sel = 0;
+
+ while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
+ sel++;
+
+ if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
+ return -EINVAL;
+
+ sel += 2;
+
+ return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
+}
+
+static struct regulator_ops max77843_charger_ops = {
+ .is_enabled = max77843_reg_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_current_limit = max77843_reg_get_current_limit,
+ .set_current_limit = max77843_reg_set_current_limit,
+};
+
+static struct regulator_ops max77843_regulator_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc max77843_supported_regulators[] = {
+ [MAX77843_SAFEOUT1] = {
+ .name = "SAFEOUT1",
+ .id = MAX77843_SAFEOUT1,
+ .ops = &max77843_regulator_ops,
+ .of_match = of_match_ptr("SAFEOUT1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table),
+ .volt_table = max77843_safeout_voltage_table,
+ .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
+ .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1,
+ .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
+ .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK,
+ },
+ [MAX77843_SAFEOUT2] = {
+ .name = "SAFEOUT2",
+ .id = MAX77843_SAFEOUT2,
+ .ops = &max77843_regulator_ops,
+ .of_match = of_match_ptr("SAFEOUT2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table),
+ .volt_table = max77843_safeout_voltage_table,
+ .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
+ .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2,
+ .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
+ .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK,
+ },
+ [MAX77843_CHARGER] = {
+ .name = "CHARGER",
+ .id = MAX77843_CHARGER,
+ .ops = &max77843_charger_ops,
+ .of_match = of_match_ptr("CHARGER"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00,
+ .enable_mask = MAX77843_CHG_MASK,
+ },
+};
+
+static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
+{
+ switch (reg_id) {
+ case MAX77843_SAFEOUT1:
+ case MAX77843_SAFEOUT2:
+ return max77843->regmap;
+ case MAX77843_CHARGER:
+ return max77843->regmap_chg;
+ default:
+ return max77843->regmap;
+ }
+}
+
+static int max77843_regulator_probe(struct platform_device *pdev)
+{
+ struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ int i;
+
+ config.dev = max77843->dev;
+ config.driver_data = max77843;
+
+ for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
+ struct regulator_dev *regulator;
+
+ config.regmap = max77843_get_regmap(max77843,
+ max77843_supported_regulators[i].id);
+
+ regulator = devm_regulator_register(&pdev->dev,
+ &max77843_supported_regulators[i], &config);
+ if (IS_ERR(regulator)) {
+ dev_err(&pdev->dev,
+ "Failed to regiser regulator-%d\n", i);
+ return PTR_ERR(regulator);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id max77843_regulator_id[] = {
+ { "max77843-regulator", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver max77843_regulator_driver = {
+ .driver = {
+ .name = "max77843-regulator",
+ },
+ .probe = max77843_regulator_probe,
+ .id_table = max77843_regulator_id,
+};
+
+static int __init max77843_regulator_init(void)
+{
+ return platform_driver_register(&max77843_regulator_driver);
+}
+subsys_initcall(max77843_regulator_init);
+
+static void __exit max77843_regulator_exit(void)
+{
+ platform_driver_unregister(&max77843_regulator_driver);
+}
+module_exit(max77843_regulator_exit);
+
+MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
+MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
+MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index c8bddcc8f911..81229579ece9 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -115,7 +115,7 @@ static unsigned int max8649_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
}
-static struct regulator_ops max8649_dcdc_ops = {
+static const struct regulator_ops max8649_dcdc_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
@@ -143,7 +143,7 @@ static struct regulator_desc dcdc_desc = {
.enable_is_inverted = true,
};
-static struct regmap_config max8649_regmap_config = {
+static const struct regmap_config max8649_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
new file mode 100644
index 000000000000..a5b2f4762677
--- /dev/null
+++ b/drivers/regulator/mt6397-regulator.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu <flora.fu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6397/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6397-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * MT6397 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ */
+struct mt6397_regulator_info {
+ struct regulator_desc desc;
+ u32 qi;
+ u32 vselon_reg;
+ u32 vselctrl_reg;
+ u32 vselctrl_mask;
+};
+
+#define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
+ vosel, vosel_mask, voselon, vosel_ctrl) \
+[MT6397_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6397_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6397_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = (max - min)/step + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .qi = BIT(13), \
+ .vselon_reg = voselon, \
+ .vselctrl_reg = vosel_ctrl, \
+ .vselctrl_mask = BIT(1), \
+}
+
+#define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
+ vosel_mask) \
+[MT6397_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6397_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6397_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .qi = BIT(15), \
+}
+
+#define MT6397_REG_FIXED(match, vreg, enreg, enbit, volt) \
+[MT6397_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6397_volt_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6397_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ .min_uV = volt, \
+ }, \
+ .qi = BIT(15), \
+}
+
+static const struct regulator_linear_range buck_volt_range1[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range2[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range3[] = {
+ REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
+};
+
+static const u32 ldo_volt_table1[] = {
+ 1500000, 1800000, 2500000, 2800000,
+};
+
+static const u32 ldo_volt_table2[] = {
+ 1800000, 3300000,
+};
+
+static const u32 ldo_volt_table3[] = {
+ 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table4[] = {
+ 1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table5[] = {
+ 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table5_v2[] = {
+ 1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table6[] = {
+ 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
+};
+
+static const u32 ldo_volt_table7[] = {
+ 1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static int mt6397_get_status(struct regulator_dev *rdev)
+{
+ int ret;
+ u32 regval;
+ struct mt6397_regulator_info *info = rdev_get_drvdata(rdev);
+
+ ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static struct regulator_ops mt6397_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6397_get_status,
+};
+
+static struct regulator_ops mt6397_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6397_get_status,
+};
+
+static struct regulator_ops mt6397_volt_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6397_get_status,
+};
+
+/* The array is indexed by id(MT6397_ID_XXX) */
+static struct mt6397_regulator_info mt6397_regulators[] = {
+ MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250,
+ buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f,
+ MT6397_VCA15_CON10, MT6397_VCA15_CON5),
+ MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250,
+ buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f,
+ MT6397_VPCA7_CON10, MT6397_VPCA7_CON5),
+ MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250,
+ buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9,
+ 0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5),
+ MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250,
+ buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9,
+ 0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5),
+ MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250,
+ buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f,
+ MT6397_VCORE_CON10, MT6397_VCORE_CON5),
+ MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1,
+ MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f,
+ MT6397_VGPU_CON10, MT6397_VGPU_CON5),
+ MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2,
+ MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f,
+ MT6397_VDRM_CON10, MT6397_VDRM_CON5),
+ MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000,
+ buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f,
+ MT6397_VIO18_CON10, MT6397_VIO18_CON5),
+ MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000),
+ MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000),
+ MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1,
+ MT6397_ANALDO_CON2, 15, MT6397_ANALDO_CON6, 0xC0),
+ MT6397_REG_FIXED("ldo_vio28", VIO28, MT6397_DIGLDO_CON0, 14, 2800000),
+ MT6397_REG_FIXED("ldo_vusb", VUSB, MT6397_DIGLDO_CON1, 14, 3300000),
+ MT6397_LDO("ldo_vmc", VMC, ldo_volt_table2,
+ MT6397_DIGLDO_CON2, 12, MT6397_DIGLDO_CON29, 0x10),
+ MT6397_LDO("ldo_vmch", VMCH, ldo_volt_table3,
+ MT6397_DIGLDO_CON3, 14, MT6397_DIGLDO_CON17, 0x80),
+ MT6397_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table3,
+ MT6397_DIGLDO_CON4, 14, MT6397_DIGLDO_CON18, 0x10),
+ MT6397_LDO("ldo_vgp1", VGP1, ldo_volt_table4,
+ MT6397_DIGLDO_CON5, 15, MT6397_DIGLDO_CON19, 0xE0),
+ MT6397_LDO("ldo_vgp2", VGP2, ldo_volt_table5,
+ MT6397_DIGLDO_CON6, 15, MT6397_DIGLDO_CON20, 0xE0),
+ MT6397_LDO("ldo_vgp3", VGP3, ldo_volt_table5,
+ MT6397_DIGLDO_CON7, 15, MT6397_DIGLDO_CON21, 0xE0),
+ MT6397_LDO("ldo_vgp4", VGP4, ldo_volt_table5,
+ MT6397_DIGLDO_CON8, 15, MT6397_DIGLDO_CON22, 0xE0),
+ MT6397_LDO("ldo_vgp5", VGP5, ldo_volt_table6,
+ MT6397_DIGLDO_CON9, 15, MT6397_DIGLDO_CON23, 0xE0),
+ MT6397_LDO("ldo_vgp6", VGP6, ldo_volt_table5,
+ MT6397_DIGLDO_CON10, 15, MT6397_DIGLDO_CON33, 0xE0),
+ MT6397_LDO("ldo_vibr", VIBR, ldo_volt_table7,
+ MT6397_DIGLDO_CON24, 15, MT6397_DIGLDO_CON25, 0xE00),
+};
+
+static int mt6397_set_buck_vosel_reg(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+ int i;
+ u32 regval;
+
+ for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
+ if (mt6397_regulators[i].vselctrl_reg) {
+ if (regmap_read(mt6397->regmap,
+ mt6397_regulators[i].vselctrl_reg,
+ &regval) < 0) {
+ dev_err(&pdev->dev,
+ "Failed to read buck ctrl\n");
+ return -EIO;
+ }
+
+ if (regval & mt6397_regulators[i].vselctrl_mask) {
+ mt6397_regulators[i].desc.vsel_reg =
+ mt6397_regulators[i].vselon_reg;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mt6397_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ int i;
+ u32 reg_value, version;
+
+ /* Query buck controller to select activated voltage register part */
+ if (mt6397_set_buck_vosel_reg(pdev))
+ return -EIO;
+
+ /* Read PMIC chip revision to update constraints and voltage table */
+ if (regmap_read(mt6397->regmap, MT6397_CID, &reg_value) < 0) {
+ dev_err(&pdev->dev, "Failed to read Chip ID\n");
+ return -EIO;
+ }
+ dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+ version = (reg_value & 0xFF);
+ switch (version) {
+ case MT6397_REGULATOR_ID91:
+ mt6397_regulators[MT6397_ID_VGP2].desc.volt_table =
+ ldo_volt_table5_v2;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
+ config.dev = &pdev->dev;
+ config.driver_data = &mt6397_regulators[i];
+ config.regmap = mt6397->regmap;
+ rdev = devm_regulator_register(&pdev->dev,
+ &mt6397_regulators[i].desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ mt6397_regulators[i].desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver mt6397_regulator_driver = {
+ .driver = {
+ .name = "mt6397-regulator",
+ },
+ .probe = mt6397_regulator_probe,
+};
+
+module_platform_driver(mt6397_regulator_driver);
+
+MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mt6397-regulator");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 91eaaf010524..24e812c48d93 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -270,6 +270,7 @@ EXPORT_SYMBOL_GPL(of_regulator_match);
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
+ struct regulator_config *config,
struct device_node **node)
{
struct device_node *search, *child;
@@ -307,6 +308,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
break;
}
+ if (desc->of_parse_cb) {
+ if (desc->of_parse_cb(child, desc, config)) {
+ dev_err(dev,
+ "driver callback failed to parse DT for regulator %s\n",
+ child->name);
+ init_data = NULL;
+ break;
+ }
+ }
+
of_node_get(child);
*node = child;
break;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index c879dff597ee..8cc8d1877c44 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -56,7 +56,7 @@
#define PFUZE100_VGEN5VOL 0x70
#define PFUZE100_VGEN6VOL 0x71
-enum chips { PFUZE100, PFUZE200 };
+enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 };
struct pfuze_regulator {
struct regulator_desc desc;
@@ -80,9 +80,18 @@ static const int pfuze100_vsnvs[] = {
1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
};
+static const int pfuze3000_sw2lo[] = {
+ 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000,
+};
+
+static const int pfuze3000_sw2hi[] = {
+ 2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000,
+};
+
static const struct i2c_device_id pfuze_device_id[] = {
{.name = "pfuze100", .driver_data = PFUZE100},
{.name = "pfuze200", .driver_data = PFUZE200},
+ {.name = "pfuze3000", .driver_data = PFUZE3000},
{ }
};
MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
@@ -90,6 +99,7 @@ MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
static const struct of_device_id pfuze_dt_ids[] = {
{ .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
{ .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
+ { .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
{ }
};
MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
@@ -219,6 +229,60 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
.stby_mask = 0x20, \
}
+#define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step) { \
+ .desc = { \
+ .name = #_name, \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .ops = &pfuze100_ldo_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = _chip ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ .min_uV = (min), \
+ .uV_step = (step), \
+ .vsel_reg = (base), \
+ .vsel_mask = 0x3, \
+ .enable_reg = (base), \
+ .enable_mask = 0x10, \
+ }, \
+ .stby_reg = (base), \
+ .stby_mask = 0x20, \
+}
+
+
+#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
+ .desc = { \
+ .name = #_name,\
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .ops = &pfuze100_sw_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = _chip ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ .min_uV = (min), \
+ .uV_step = (step), \
+ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
+ .vsel_mask = 0x7, \
+ }, \
+ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
+ .stby_mask = 0x7, \
+}
+
+#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
+ .desc = { \
+ .name = #_name,\
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .ops = &pfuze100_sw_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = _chip ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ .min_uV = (min), \
+ .uV_step = (step), \
+ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
+ .vsel_mask = 0xf, \
+ }, \
+ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
+ .stby_mask = 0xf, \
+}
+
/* PFUZE100 */
static struct pfuze_regulator pfuze100_regulators[] = {
PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
@@ -254,6 +318,22 @@ static struct pfuze_regulator pfuze200_regulators[] = {
PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
+static struct pfuze_regulator pfuze3000_regulators[] = {
+ PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000),
+ PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
+ PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
+ PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
+ PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+ PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+ PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
+ PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
+ PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
static struct pfuze_regulator *pfuze_regulators;
#ifdef CONFIG_OF
@@ -294,6 +374,24 @@ static struct of_regulator_match pfuze200_matches[] = {
{ .name = "vgen6", },
};
+/* PFUZE3000 */
+static struct of_regulator_match pfuze3000_matches[] = {
+
+ { .name = "sw1a", },
+ { .name = "sw1b", },
+ { .name = "sw2", },
+ { .name = "sw3", },
+ { .name = "swbst", },
+ { .name = "vsnvs", },
+ { .name = "vrefddr", },
+ { .name = "vldo1", },
+ { .name = "vldo2", },
+ { .name = "vccsd", },
+ { .name = "v33", },
+ { .name = "vldo3", },
+ { .name = "vldo4", },
+};
+
static struct of_regulator_match *pfuze_matches;
static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -313,6 +411,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
}
switch (chip->chip_id) {
+ case PFUZE3000:
+ pfuze_matches = pfuze3000_matches;
+ ret = of_regulator_match(dev, parent, pfuze3000_matches,
+ ARRAY_SIZE(pfuze3000_matches));
+ break;
case PFUZE200:
pfuze_matches = pfuze200_matches;
ret = of_regulator_match(dev, parent, pfuze200_matches,
@@ -378,7 +481,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
* as ID=8 in PFUZE100
*/
dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
- } else if ((value & 0x0f) != pfuze_chip->chip_id) {
+ } else if ((value & 0x0f) != pfuze_chip->chip_id &&
+ (value & 0xf0) >> 4 != pfuze_chip->chip_id) {
/* device id NOT match with your setting */
dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
return -ENODEV;
@@ -417,7 +521,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
int i, ret;
const struct of_device_id *match;
u32 regulator_num;
- u32 sw_check_start, sw_check_end;
+ u32 sw_check_start, sw_check_end, sw_hi = 0x40;
pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
GFP_KERNEL);
@@ -458,13 +562,19 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
/* use the right regulators after identify the right device */
switch (pfuze_chip->chip_id) {
+ case PFUZE3000:
+ pfuze_regulators = pfuze3000_regulators;
+ regulator_num = ARRAY_SIZE(pfuze3000_regulators);
+ sw_check_start = PFUZE3000_SW2;
+ sw_check_end = PFUZE3000_SW2;
+ sw_hi = 1 << 3;
+ break;
case PFUZE200:
pfuze_regulators = pfuze200_regulators;
regulator_num = ARRAY_SIZE(pfuze200_regulators);
sw_check_start = PFUZE200_SW2;
sw_check_end = PFUZE200_SW3B;
break;
-
case PFUZE100:
default:
pfuze_regulators = pfuze100_regulators;
@@ -474,7 +584,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
break;
}
dev_info(&client->dev, "pfuze%s found.\n",
- (pfuze_chip->chip_id == PFUZE100) ? "100" : "200");
+ (pfuze_chip->chip_id == PFUZE100) ? "100" :
+ ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
sizeof(pfuze_chip->regulator_descs));
@@ -498,10 +609,15 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
/* SW2~SW4 high bit check and modify the voltage value table */
if (i >= sw_check_start && i <= sw_check_end) {
regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
- if (val & 0x40) {
- desc->min_uV = 800000;
- desc->uV_step = 50000;
- desc->n_voltages = 51;
+ if (val & sw_hi) {
+ if (pfuze_chip->chip_id == PFUZE3000) {
+ desc->volt_table = pfuze3000_sw2hi;
+ desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
+ } else {
+ desc->min_uV = 800000;
+ desc->uV_step = 50000;
+ desc->n_voltages = 51;
+ }
}
}
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 8364ff331a81..e8647f7cf25e 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -227,9 +227,11 @@ static int rpm_reg_set_mV_sel(struct regulator_dev *rdev,
return uV;
mutex_lock(&vreg->lock);
- vreg->uV = uV;
if (vreg->is_enabled)
- ret = rpm_reg_write(vreg, req, vreg->uV / 1000);
+ ret = rpm_reg_write(vreg, req, uV / 1000);
+
+ if (!ret)
+ vreg->uV = uV;
mutex_unlock(&vreg->lock);
return ret;
@@ -252,9 +254,11 @@ static int rpm_reg_set_uV_sel(struct regulator_dev *rdev,
return uV;
mutex_lock(&vreg->lock);
- vreg->uV = uV;
if (vreg->is_enabled)
- ret = rpm_reg_write(vreg, req, vreg->uV);
+ ret = rpm_reg_write(vreg, req, uV);
+
+ if (!ret)
+ vreg->uV = uV;
mutex_unlock(&vreg->lock);
return ret;
@@ -674,6 +678,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
vreg->desc.name = pdev->dev.of_node->name;
+ vreg->desc.supply_name = "vin";
vreg->rpm = dev_get_drvdata(pdev->dev.parent);
if (!vreg->rpm) {
@@ -768,7 +773,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
break;
}
- if (force_mode < 0) {
+ if (force_mode == -1) {
dev_err(&pdev->dev, "invalid force mode\n");
return -EINVAL;
}
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index c94a3e0f3b91..1f93b752a81c 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -97,7 +97,7 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
RK808_RAMP_RATE_MASK, ramp_value);
}
-int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
+static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
{
unsigned int reg;
int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
@@ -112,7 +112,7 @@ int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
sel);
}
-int rk808_set_suspend_enable(struct regulator_dev *rdev)
+static int rk808_set_suspend_enable(struct regulator_dev *rdev)
{
unsigned int reg;
@@ -123,7 +123,7 @@ int rk808_set_suspend_enable(struct regulator_dev *rdev)
0);
}
-int rk808_set_suspend_disable(struct regulator_dev *rdev)
+static int rk808_set_suspend_disable(struct regulator_dev *rdev)
{
unsigned int reg;
diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c
index 870cc49438db..96d2c18e051a 100644
--- a/drivers/regulator/rt5033-regulator.c
+++ b/drivers/regulator/rt5033-regulator.c
@@ -36,6 +36,8 @@ static struct regulator_ops rt5033_buck_ops = {
static const struct regulator_desc rt5033_supported_regulators[] = {
[RT5033_BUCK] = {
.name = "BUCK",
+ .of_match = of_match_ptr("BUCK"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RT5033_BUCK,
.ops = &rt5033_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -50,6 +52,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
},
[RT5033_LDO] = {
.name = "LDO",
+ .of_match = of_match_ptr("LDO"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RT5033_LDO,
.ops = &rt5033_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -64,6 +68,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
},
[RT5033_SAFE_LDO] = {
.name = "SAFE_LDO",
+ .of_match = of_match_ptr("SAFE_LDO"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RT5033_SAFE_LDO,
.ops = &rt5033_safe_ldo_ops,
.type = REGULATOR_VOLTAGE,
@@ -81,7 +87,7 @@ static int rt5033_regulator_probe(struct platform_device *pdev)
int ret, i;
struct regulator_config config = {};
- config.dev = &pdev->dev;
+ config.dev = rt5033->dev;
config.driver_data = rt5033;
for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) {
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 2809ae0d6bcd..ff828117798f 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops;
.enable_mask = S2MPS14_ENABLE_MASK \
}
+#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS13_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
+#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS13_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C),
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10),
- regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10),
- regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
- regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
- regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10),
+ regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10),
+ regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
+ regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
+ regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10),
};
static int s2mps14_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 7380af8bd50d..b941e564b3f3 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -173,7 +173,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
}
/* Operations permitted on VDCDCx */
-static struct regulator_ops tps65023_dcdc_ops = {
+static const struct regulator_ops tps65023_dcdc_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -184,7 +184,7 @@ static struct regulator_ops tps65023_dcdc_ops = {
};
/* Operations permitted on LDOx */
-static struct regulator_ops tps65023_ldo_ops = {
+static const struct regulator_ops tps65023_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -194,7 +194,7 @@ static struct regulator_ops tps65023_ldo_ops = {
.map_voltage = regulator_map_voltage_ascend,
};
-static struct regmap_config tps65023_regmap_config = {
+static const struct regmap_config tps65023_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index eebc52cb6984..3d95c87160b3 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np)
goto err_alloc;
}
+ spin_lock_init(&data->lock);
+
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = size * 32;
data->rcdev.ops = &sunxi_reset_ops;
@@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev)
if (IS_ERR(data->membase))
return PTR_ERR(data->membase);
+ spin_lock_init(&data->lock);
+
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = resource_size(res) * 32;
data->rcdev.ops = &sunxi_reset_ops;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f211dfb7b913..cedb41c95dae 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -153,6 +153,17 @@ config RTC_DRV_88PM80X
This driver can also be built as a module. If so, the module
will be called rtc-88pm80x.
+config RTC_DRV_ABB5ZES3
+ depends on I2C
+ select REGMAP_I2C
+ tristate "Abracon AB-RTCMC-32.768kHz-B5ZE-S3"
+ help
+ If you say yes here you get support for the Abracon
+ AB-RTCMC-32.768kHz-B5ZE-S3 I2C RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-ab-b5ze-s3.
+
config RTC_DRV_AS3722
tristate "ams AS3722 RTC driver"
depends on MFD_AS3722
@@ -790,6 +801,96 @@ config RTC_DRV_DS1553
This driver can also be built as a module. If so, the module
will be called rtc-ds1553.
+config RTC_DRV_DS1685_FAMILY
+ tristate "Dallas/Maxim DS1685 Family"
+ help
+ If you say yes here you get support for the Dallas/Maxim DS1685
+ family of real time chips. This family includes the DS1685/DS1687,
+ DS1689/DS1693, DS17285/DS17287, DS17485/DS17487, and
+ DS17885/DS17887 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-ds1685.
+
+choice
+ prompt "Subtype"
+ depends on RTC_DRV_DS1685_FAMILY
+ default RTC_DRV_DS1685
+
+config RTC_DRV_DS1685
+ bool "DS1685/DS1687"
+ help
+ This enables support for the Dallas/Maxim DS1685/DS1687 real time
+ clock chip.
+
+ This chip is commonly found in SGI O2 (IP32) and SGI Octane (IP30)
+ systems, as well as EPPC-405-UC modules by electronic system design
+ GmbH.
+
+config RTC_DRV_DS1689
+ bool "DS1689/DS1693"
+ help
+ This enables support for the Dallas/Maxim DS1689/DS1693 real time
+ clock chip.
+
+ This is an older RTC chip, supplanted by the DS1685/DS1687 above,
+ which supports a few minor features such as Vcc, Vbat, and Power
+ Cycle counters, plus a customer-specific, 8-byte ROM/Serial number.
+
+ It also works for the even older DS1688/DS1691 RTC chips, which are
+ virtually the same and carry the same model number. Both chips
+ have 114 bytes of user NVRAM.
+
+config RTC_DRV_DS17285
+ bool "DS17285/DS17287"
+ help
+ This enables support for the Dallas/Maxim DS17285/DS17287 real time
+ clock chip.
+
+ This chip features 2kb of extended NV-SRAM. It may possibly be
+ found in some SGI O2 systems (rare).
+
+config RTC_DRV_DS17485
+ bool "DS17485/DS17487"
+ help
+ This enables support for the Dallas/Maxim DS17485/DS17487 real time
+ clock chip.
+
+ This chip features 4kb of extended NV-SRAM.
+
+config RTC_DRV_DS17885
+ bool "DS17885/DS17887"
+ help
+ This enables support for the Dallas/Maxim DS17885/DS17887 real time
+ clock chip.
+
+ This chip features 8kb of extended NV-SRAM.
+
+endchoice
+
+config RTC_DS1685_PROC_REGS
+ bool "Display register values in /proc"
+ depends on RTC_DRV_DS1685_FAMILY && PROC_FS
+ help
+ Enable this to display a readout of all of the RTC registers in
+ /proc/drivers/rtc. Keep in mind that this can potentially lead
+ to lost interrupts, as reading Control Register C will clear
+ all pending IRQ flags.
+
+ Unless you are debugging this driver, choose N.
+
+config RTC_DS1685_SYSFS_REGS
+ bool "SysFS access to RTC register bits"
+ depends on RTC_DRV_DS1685_FAMILY && SYSFS
+ help
+ Enable this to provide access to the RTC control register bits
+ in /sys. Some of the bits are read-write, others are read-only.
+
+ Keep in mind that reading Control C's bits automatically clears
+ all pending IRQ flags - this can cause lost interrupts.
+
+ If you know that you need access to these bits, choose Y, Else N.
+
config RTC_DRV_DS1742
tristate "Maxim/Dallas DS1742/1743"
depends on HAS_IOMEM
@@ -1241,6 +1342,16 @@ config RTC_DRV_MV
This driver can also be built as a module. If so, the module
will be called rtc-mv.
+config RTC_DRV_ARMADA38X
+ tristate "Armada 38x Marvell SoC RTC"
+ depends on ARCH_MVEBU
+ help
+ If you say yes here you will get support for the in-chip RTC
+ that can be found in the Armada 38x Marvell's SoC device
+
+ This driver can also be built as a module. If so, the module
+ will be called armada38x-rtc.
+
config RTC_DRV_PS3
tristate "PS3 RTC"
depends on PPC_PS3
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index c8ef3e1e6ccd..69c87062b098 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o
obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
+obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o
+obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o
obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
@@ -52,6 +54,7 @@ obj-$(CONFIG_RTC_DRV_DS1390) += rtc-ds1390.o
obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o
obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
+obj-$(CONFIG_RTC_DRV_DS1685_FAMILY) += rtc-ds1685.o
obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
obj-$(CONFIG_RTC_DRV_DS2404) += rtc-ds2404.o
obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index 4aa60d74004e..6c719f23520a 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -26,7 +26,7 @@ static int __init rtc_hctosys(void)
{
int err = -ENODEV;
struct rtc_time tm;
- struct timespec tv = {
+ struct timespec64 tv64 = {
.tv_nsec = NSEC_PER_SEC >> 1,
};
struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
@@ -45,25 +45,17 @@ static int __init rtc_hctosys(void)
}
- err = rtc_valid_tm(&tm);
- if (err) {
- dev_err(rtc->dev.parent,
- "hctosys: invalid date/time\n");
- goto err_invalid;
- }
-
- rtc_tm_to_time(&tm, &tv.tv_sec);
+ tv64.tv_sec = rtc_tm_to_time64(&tm);
- err = do_settimeofday(&tv);
+ err = do_settimeofday64(&tv64);
dev_info(rtc->dev.parent,
"setting system clock to "
- "%d-%02d-%02d %02d:%02d:%02d UTC (%u)\n",
+ "%d-%02d-%02d %02d:%02d:%02d UTC (%lld)\n",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec,
- (unsigned int) tv.tv_sec);
+ (long long) tv64.tv_sec);
-err_invalid:
err_read:
rtc_class_close(rtc);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 45bfc28ee3aa..37215cf983e9 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -73,10 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
else if (rtc->ops->set_time)
err = rtc->ops->set_time(rtc->dev.parent, tm);
else if (rtc->ops->set_mmss) {
- unsigned long secs;
- err = rtc_tm_to_time(tm, &secs);
- if (err == 0)
- err = rtc->ops->set_mmss(rtc->dev.parent, secs);
+ time64_t secs64 = rtc_tm_to_time64(tm);
+ err = rtc->ops->set_mmss(rtc->dev.parent, secs64);
} else
err = -EINVAL;
@@ -105,7 +103,7 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
err = rtc->ops->read_time(rtc->dev.parent, &old);
if (err == 0) {
- rtc_time_to_tm(secs, &new);
+ rtc_time64_to_tm(secs, &new);
/*
* avoid writing when we're going to change the day of
@@ -157,7 +155,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
int err;
struct rtc_time before, now;
int first_time = 1;
- unsigned long t_now, t_alm;
+ time64_t t_now, t_alm;
enum { none, day, month, year } missing = none;
unsigned days;
@@ -258,8 +256,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
/* with luck, no rollover is needed */
- rtc_tm_to_time(&now, &t_now);
- rtc_tm_to_time(&alarm->time, &t_alm);
+ t_now = rtc_tm_to_time64(&now);
+ t_alm = rtc_tm_to_time64(&alarm->time);
if (t_now < t_alm)
goto done;
@@ -273,7 +271,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
case day:
dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
t_alm += 24 * 60 * 60;
- rtc_time_to_tm(t_alm, &alarm->time);
+ rtc_time64_to_tm(t_alm, &alarm->time);
break;
/* Month rollover ... if it's the 31th, an alarm on the 3rd will
@@ -346,19 +344,19 @@ EXPORT_SYMBOL_GPL(rtc_read_alarm);
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
- long now, scheduled;
+ time64_t now, scheduled;
int err;
err = rtc_valid_tm(&alarm->time);
if (err)
return err;
- rtc_tm_to_time(&alarm->time, &scheduled);
+ scheduled = rtc_tm_to_time64(&alarm->time);
/* Make sure we're not setting alarms in the past */
err = __rtc_read_time(rtc, &tm);
if (err)
return err;
- rtc_tm_to_time(&tm, &now);
+ now = rtc_tm_to_time64(&tm);
if (scheduled <= now)
return -ETIME;
/*
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
new file mode 100644
index 000000000000..cfc2ef98d393
--- /dev/null
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -0,0 +1,1035 @@
+/*
+ * rtc-ab-b5ze-s3 - Driver for Abracon AB-RTCMC-32.768Khz-B5ZE-S3
+ * I2C RTC / Alarm chip
+ *
+ * Copyright (C) 2014, Arnaud EBALARD <arno@natisbad.org>
+ *
+ * Detailed datasheet of the chip is available here:
+ *
+ * http://www.abracon.com/realtimeclock/AB-RTCMC-32.768kHz-B5ZE-S3-Application-Manual.pdf
+ *
+ * This work is based on ISL12057 driver (drivers/rtc/rtc-isl12057.c).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/i2c.h>
+#include <linux/bcd.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+
+#define DRV_NAME "rtc-ab-b5ze-s3"
+
+/* Control section */
+#define ABB5ZES3_REG_CTRL1 0x00 /* Control 1 register */
+#define ABB5ZES3_REG_CTRL1_CIE BIT(0) /* Pulse interrupt enable */
+#define ABB5ZES3_REG_CTRL1_AIE BIT(1) /* Alarm interrupt enable */
+#define ABB5ZES3_REG_CTRL1_SIE BIT(2) /* Second interrupt enable */
+#define ABB5ZES3_REG_CTRL1_PM BIT(3) /* 24h/12h mode */
+#define ABB5ZES3_REG_CTRL1_SR BIT(4) /* Software reset */
+#define ABB5ZES3_REG_CTRL1_STOP BIT(5) /* RTC circuit enable */
+#define ABB5ZES3_REG_CTRL1_CAP BIT(7)
+
+#define ABB5ZES3_REG_CTRL2 0x01 /* Control 2 register */
+#define ABB5ZES3_REG_CTRL2_CTBIE BIT(0) /* Countdown timer B int. enable */
+#define ABB5ZES3_REG_CTRL2_CTAIE BIT(1) /* Countdown timer A int. enable */
+#define ABB5ZES3_REG_CTRL2_WTAIE BIT(2) /* Watchdog timer A int. enable */
+#define ABB5ZES3_REG_CTRL2_AF BIT(3) /* Alarm interrupt status */
+#define ABB5ZES3_REG_CTRL2_SF BIT(4) /* Second interrupt status */
+#define ABB5ZES3_REG_CTRL2_CTBF BIT(5) /* Countdown timer B int. status */
+#define ABB5ZES3_REG_CTRL2_CTAF BIT(6) /* Countdown timer A int. status */
+#define ABB5ZES3_REG_CTRL2_WTAF BIT(7) /* Watchdog timer A int. status */
+
+#define ABB5ZES3_REG_CTRL3 0x02 /* Control 3 register */
+#define ABB5ZES3_REG_CTRL3_PM2 BIT(7) /* Power Management bit 2 */
+#define ABB5ZES3_REG_CTRL3_PM1 BIT(6) /* Power Management bit 1 */
+#define ABB5ZES3_REG_CTRL3_PM0 BIT(5) /* Power Management bit 0 */
+#define ABB5ZES3_REG_CTRL3_BSF BIT(3) /* Battery switchover int. status */
+#define ABB5ZES3_REG_CTRL3_BLF BIT(2) /* Battery low int. status */
+#define ABB5ZES3_REG_CTRL3_BSIE BIT(1) /* Battery switchover int. enable */
+#define ABB5ZES3_REG_CTRL3_BLIE BIT(0) /* Battery low int. enable */
+
+#define ABB5ZES3_CTRL_SEC_LEN 3
+
+/* RTC section */
+#define ABB5ZES3_REG_RTC_SC 0x03 /* RTC Seconds register */
+#define ABB5ZES3_REG_RTC_SC_OSC BIT(7) /* Clock integrity status */
+#define ABB5ZES3_REG_RTC_MN 0x04 /* RTC Minutes register */
+#define ABB5ZES3_REG_RTC_HR 0x05 /* RTC Hours register */
+#define ABB5ZES3_REG_RTC_HR_PM BIT(5) /* RTC Hours PM bit */
+#define ABB5ZES3_REG_RTC_DT 0x06 /* RTC Date register */
+#define ABB5ZES3_REG_RTC_DW 0x07 /* RTC Day of the week register */
+#define ABB5ZES3_REG_RTC_MO 0x08 /* RTC Month register */
+#define ABB5ZES3_REG_RTC_YR 0x09 /* RTC Year register */
+
+#define ABB5ZES3_RTC_SEC_LEN 7
+
+/* Alarm section (enable bits are all active low) */
+#define ABB5ZES3_REG_ALRM_MN 0x0A /* Alarm - minute register */
+#define ABB5ZES3_REG_ALRM_MN_AE BIT(7) /* Minute enable */
+#define ABB5ZES3_REG_ALRM_HR 0x0B /* Alarm - hours register */
+#define ABB5ZES3_REG_ALRM_HR_AE BIT(7) /* Hour enable */
+#define ABB5ZES3_REG_ALRM_DT 0x0C /* Alarm - date register */
+#define ABB5ZES3_REG_ALRM_DT_AE BIT(7) /* Date (day of the month) enable */
+#define ABB5ZES3_REG_ALRM_DW 0x0D /* Alarm - day of the week reg. */
+#define ABB5ZES3_REG_ALRM_DW_AE BIT(7) /* Day of the week enable */
+
+#define ABB5ZES3_ALRM_SEC_LEN 4
+
+/* Frequency offset section */
+#define ABB5ZES3_REG_FREQ_OF 0x0E /* Frequency offset register */
+#define ABB5ZES3_REG_FREQ_OF_MODE 0x0E /* Offset mode: 2 hours / minute */
+
+/* CLOCKOUT section */
+#define ABB5ZES3_REG_TIM_CLK 0x0F /* Timer & Clockout register */
+#define ABB5ZES3_REG_TIM_CLK_TAM BIT(7) /* Permanent/pulsed timer A/int. 2 */
+#define ABB5ZES3_REG_TIM_CLK_TBM BIT(6) /* Permanent/pulsed timer B */
+#define ABB5ZES3_REG_TIM_CLK_COF2 BIT(5) /* Clkout Freq bit 2 */
+#define ABB5ZES3_REG_TIM_CLK_COF1 BIT(4) /* Clkout Freq bit 1 */
+#define ABB5ZES3_REG_TIM_CLK_COF0 BIT(3) /* Clkout Freq bit 0 */
+#define ABB5ZES3_REG_TIM_CLK_TAC1 BIT(2) /* Timer A: - 01 : countdown */
+#define ABB5ZES3_REG_TIM_CLK_TAC0 BIT(1) /* - 10 : timer */
+#define ABB5ZES3_REG_TIM_CLK_TBC BIT(0) /* Timer B enable */
+
+/* Timer A Section */
+#define ABB5ZES3_REG_TIMA_CLK 0x10 /* Timer A clock register */
+#define ABB5ZES3_REG_TIMA_CLK_TAQ2 BIT(2) /* Freq bit 2 */
+#define ABB5ZES3_REG_TIMA_CLK_TAQ1 BIT(1) /* Freq bit 1 */
+#define ABB5ZES3_REG_TIMA_CLK_TAQ0 BIT(0) /* Freq bit 0 */
+#define ABB5ZES3_REG_TIMA 0x11 /* Timer A register */
+
+#define ABB5ZES3_TIMA_SEC_LEN 2
+
+/* Timer B Section */
+#define ABB5ZES3_REG_TIMB_CLK 0x12 /* Timer B clock register */
+#define ABB5ZES3_REG_TIMB_CLK_TBW2 BIT(6)
+#define ABB5ZES3_REG_TIMB_CLK_TBW1 BIT(5)
+#define ABB5ZES3_REG_TIMB_CLK_TBW0 BIT(4)
+#define ABB5ZES3_REG_TIMB_CLK_TAQ2 BIT(2)
+#define ABB5ZES3_REG_TIMB_CLK_TAQ1 BIT(1)
+#define ABB5ZES3_REG_TIMB_CLK_TAQ0 BIT(0)
+#define ABB5ZES3_REG_TIMB 0x13 /* Timer B register */
+#define ABB5ZES3_TIMB_SEC_LEN 2
+
+#define ABB5ZES3_MEM_MAP_LEN 0x14
+
+struct abb5zes3_rtc_data {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ struct mutex lock;
+
+ int irq;
+
+ bool battery_low;
+ bool timer_alarm; /* current alarm is via timer A */
+};
+
+/*
+ * Try and match register bits w/ fixed null values to see whether we
+ * are dealing with an ABB5ZES3. Note: this function is called early
+ * during init and hence does need mutex protection.
+ */
+static int abb5zes3_i2c_validate_chip(struct regmap *regmap)
+{
+ u8 regs[ABB5ZES3_MEM_MAP_LEN];
+ static const u8 mask[ABB5ZES3_MEM_MAP_LEN] = { 0x00, 0x00, 0x10, 0x00,
+ 0x80, 0xc0, 0xc0, 0xf8,
+ 0xe0, 0x00, 0x00, 0x40,
+ 0x40, 0x78, 0x00, 0x00,
+ 0xf8, 0x00, 0x88, 0x00 };
+ int ret, i;
+
+ ret = regmap_bulk_read(regmap, 0, regs, ABB5ZES3_MEM_MAP_LEN);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ABB5ZES3_MEM_MAP_LEN; ++i) {
+ if (regs[i] & mask[i]) /* check if bits are cleared */
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* Clear alarm status bit. */
+static int _abb5zes3_rtc_clear_alarm(struct device *dev)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, ABB5ZES3_REG_CTRL2,
+ ABB5ZES3_REG_CTRL2_AF, 0);
+ if (ret)
+ dev_err(dev, "%s: clearing alarm failed (%d)\n", __func__, ret);
+
+ return ret;
+}
+
+/* Enable or disable alarm (i.e. alarm interrupt generation) */
+static int _abb5zes3_rtc_update_alarm(struct device *dev, bool enable)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, ABB5ZES3_REG_CTRL1,
+ ABB5ZES3_REG_CTRL1_AIE,
+ enable ? ABB5ZES3_REG_CTRL1_AIE : 0);
+ if (ret)
+ dev_err(dev, "%s: writing alarm INT failed (%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/* Enable or disable timer (watchdog timer A interrupt generation) */
+static int _abb5zes3_rtc_update_timer(struct device *dev, bool enable)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, ABB5ZES3_REG_CTRL2,
+ ABB5ZES3_REG_CTRL2_WTAIE,
+ enable ? ABB5ZES3_REG_CTRL2_WTAIE : 0);
+ if (ret)
+ dev_err(dev, "%s: writing timer INT failed (%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/*
+ * Note: we only read, so regmap inner lock protection is sufficient, i.e.
+ * we do not need driver's main lock protection.
+ */
+static int _abb5zes3_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ABB5ZES3_REG_RTC_SC + ABB5ZES3_RTC_SEC_LEN];
+ int ret;
+
+ /*
+ * As we need to read CTRL1 register anyway to access 24/12h
+ * mode bit, we do a single bulk read of both control and RTC
+ * sections (they are consecutive). This also ease indexing
+ * of register values after bulk read.
+ */
+ ret = regmap_bulk_read(data->regmap, ABB5ZES3_REG_CTRL1, regs,
+ sizeof(regs));
+ if (ret) {
+ dev_err(dev, "%s: reading RTC time failed (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
+ /* If clock integrity is not guaranteed, do not return a time value */
+ if (regs[ABB5ZES3_REG_RTC_SC] & ABB5ZES3_REG_RTC_SC_OSC) {
+ ret = -ENODATA;
+ goto err;
+ }
+
+ tm->tm_sec = bcd2bin(regs[ABB5ZES3_REG_RTC_SC] & 0x7F);
+ tm->tm_min = bcd2bin(regs[ABB5ZES3_REG_RTC_MN]);
+
+ if (regs[ABB5ZES3_REG_CTRL1] & ABB5ZES3_REG_CTRL1_PM) { /* 12hr mode */
+ tm->tm_hour = bcd2bin(regs[ABB5ZES3_REG_RTC_HR] & 0x1f);
+ if (regs[ABB5ZES3_REG_RTC_HR] & ABB5ZES3_REG_RTC_HR_PM) /* PM */
+ tm->tm_hour += 12;
+ } else { /* 24hr mode */
+ tm->tm_hour = bcd2bin(regs[ABB5ZES3_REG_RTC_HR]);
+ }
+
+ tm->tm_mday = bcd2bin(regs[ABB5ZES3_REG_RTC_DT]);
+ tm->tm_wday = bcd2bin(regs[ABB5ZES3_REG_RTC_DW]);
+ tm->tm_mon = bcd2bin(regs[ABB5ZES3_REG_RTC_MO]) - 1; /* starts at 1 */
+ tm->tm_year = bcd2bin(regs[ABB5ZES3_REG_RTC_YR]) + 100;
+
+ ret = rtc_valid_tm(tm);
+
+err:
+ return ret;
+}
+
+static int abb5zes3_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ABB5ZES3_REG_RTC_SC + ABB5ZES3_RTC_SEC_LEN];
+ int ret;
+
+ /*
+ * Year register is 8-bit wide and bcd-coded, i.e records values
+ * between 0 and 99. tm_year is an offset from 1900 and we are
+ * interested in the 2000-2099 range, so any value less than 100
+ * is invalid.
+ */
+ if (tm->tm_year < 100)
+ return -EINVAL;
+
+ regs[ABB5ZES3_REG_RTC_SC] = bin2bcd(tm->tm_sec); /* MSB=0 clears OSC */
+ regs[ABB5ZES3_REG_RTC_MN] = bin2bcd(tm->tm_min);
+ regs[ABB5ZES3_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
+ regs[ABB5ZES3_REG_RTC_DT] = bin2bcd(tm->tm_mday);
+ regs[ABB5ZES3_REG_RTC_DW] = bin2bcd(tm->tm_wday);
+ regs[ABB5ZES3_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1);
+ regs[ABB5ZES3_REG_RTC_YR] = bin2bcd(tm->tm_year - 100);
+
+ mutex_lock(&data->lock);
+ ret = regmap_bulk_write(data->regmap, ABB5ZES3_REG_RTC_SC,
+ regs + ABB5ZES3_REG_RTC_SC,
+ ABB5ZES3_RTC_SEC_LEN);
+ mutex_unlock(&data->lock);
+
+
+ return ret;
+}
+
+/*
+ * Set provided TAQ and Timer A registers (TIMA_CLK and TIMA) based on
+ * given number of seconds.
+ */
+static inline void sec_to_timer_a(u8 secs, u8 *taq, u8 *timer_a)
+{
+ *taq = ABB5ZES3_REG_TIMA_CLK_TAQ1; /* 1Hz */
+ *timer_a = secs;
+}
+
+/*
+ * Return current number of seconds in Timer A. As we only use
+ * timer A with a 1Hz freq, this is what we expect to have.
+ */
+static inline int sec_from_timer_a(u8 *secs, u8 taq, u8 timer_a)
+{
+ if (taq != ABB5ZES3_REG_TIMA_CLK_TAQ1) /* 1Hz */
+ return -EINVAL;
+
+ *secs = timer_a;
+
+ return 0;
+}
+
+/*
+ * Read alarm currently configured via a watchdog timer using timer A. This
+ * is done by reading current RTC time and adding remaining timer time.
+ */
+static int _abb5zes3_rtc_read_timer(struct device *dev,
+ struct rtc_wkalrm *alarm)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ struct rtc_time rtc_tm, *alarm_tm = &alarm->time;
+ u8 regs[ABB5ZES3_TIMA_SEC_LEN + 1];
+ unsigned long rtc_secs;
+ unsigned int reg;
+ u8 timer_secs;
+ int ret;
+
+ /*
+ * Instead of doing two separate calls, because they are consecutive,
+ * we grab both clockout register and Timer A section. The latter is
+ * used to decide if timer A is enabled (as a watchdog timer).
+ */
+ ret = regmap_bulk_read(data->regmap, ABB5ZES3_REG_TIM_CLK, regs,
+ ABB5ZES3_TIMA_SEC_LEN + 1);
+ if (ret) {
+ dev_err(dev, "%s: reading Timer A section failed (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
+ /* get current time ... */
+ ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
+ if (ret)
+ goto err;
+
+ /* ... convert to seconds ... */
+ ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (ret)
+ goto err;
+
+ /* ... add remaining timer A time ... */
+ ret = sec_from_timer_a(&timer_secs, regs[1], regs[2]);
+ if (ret)
+ goto err;
+
+ /* ... and convert back. */
+ rtc_time_to_tm(rtc_secs + timer_secs, alarm_tm);
+
+ ret = regmap_read(data->regmap, ABB5ZES3_REG_CTRL2, &reg);
+ if (ret) {
+ dev_err(dev, "%s: reading ctrl reg failed (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
+ alarm->enabled = !!(reg & ABB5ZES3_REG_CTRL2_WTAIE);
+
+err:
+ return ret;
+}
+
+/* Read alarm currently configured via a RTC alarm registers. */
+static int _abb5zes3_rtc_read_alarm(struct device *dev,
+ struct rtc_wkalrm *alarm)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ struct rtc_time rtc_tm, *alarm_tm = &alarm->time;
+ unsigned long rtc_secs, alarm_secs;
+ u8 regs[ABB5ZES3_ALRM_SEC_LEN];
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, ABB5ZES3_REG_ALRM_MN, regs,
+ ABB5ZES3_ALRM_SEC_LEN);
+ if (ret) {
+ dev_err(dev, "%s: reading alarm section failed (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
+ alarm_tm->tm_sec = 0;
+ alarm_tm->tm_min = bcd2bin(regs[0] & 0x7f);
+ alarm_tm->tm_hour = bcd2bin(regs[1] & 0x3f);
+ alarm_tm->tm_mday = bcd2bin(regs[2] & 0x3f);
+ alarm_tm->tm_wday = -1;
+
+ /*
+ * The alarm section does not store year/month. We use the ones in rtc
+ * section as a basis and increment month and then year if needed to get
+ * alarm after current time.
+ */
+ ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
+ if (ret)
+ goto err;
+
+ alarm_tm->tm_year = rtc_tm.tm_year;
+ alarm_tm->tm_mon = rtc_tm.tm_mon;
+
+ ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (ret)
+ goto err;
+
+ ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
+ if (ret)
+ goto err;
+
+ if (alarm_secs < rtc_secs) {
+ if (alarm_tm->tm_mon == 11) {
+ alarm_tm->tm_mon = 0;
+ alarm_tm->tm_year += 1;
+ } else {
+ alarm_tm->tm_mon += 1;
+ }
+ }
+
+ ret = regmap_read(data->regmap, ABB5ZES3_REG_CTRL1, &reg);
+ if (ret) {
+ dev_err(dev, "%s: reading ctrl reg failed (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
+ alarm->enabled = !!(reg & ABB5ZES3_REG_CTRL1_AIE);
+
+err:
+ return ret;
+}
+
+/*
+ * As the Alarm mechanism supported by the chip is only accurate to the
+ * minute, we use the watchdog timer mechanism provided by timer A
+ * (up to 256 seconds w/ a second accuracy) for low alarm values (below
+ * 4 minutes). Otherwise, we use the common alarm mechanism provided
+ * by the chip. In order for that to work, we keep track of currently
+ * configured timer type via 'timer_alarm' flag in our private data
+ * structure.
+ */
+static int abb5zes3_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ if (data->timer_alarm)
+ ret = _abb5zes3_rtc_read_timer(dev, alarm);
+ else
+ ret = _abb5zes3_rtc_read_alarm(dev, alarm);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+/*
+ * Set alarm using chip alarm mechanism. It is only accurate to the
+ * minute (not the second). The function expects alarm interrupt to
+ * be disabled.
+ */
+static int _abb5zes3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ struct rtc_time *alarm_tm = &alarm->time;
+ unsigned long rtc_secs, alarm_secs;
+ u8 regs[ABB5ZES3_ALRM_SEC_LEN];
+ struct rtc_time rtc_tm;
+ int ret, enable = 1;
+
+ ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
+ if (ret)
+ goto err;
+
+ ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (ret)
+ goto err;
+
+ ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
+ if (ret)
+ goto err;
+
+ /* If alarm time is before current time, disable the alarm */
+ if (!alarm->enabled || alarm_secs <= rtc_secs) {
+ enable = 0;
+ } else {
+ /*
+ * Chip only support alarms up to one month in the future. Let's
+ * return an error if we get something after that limit.
+ * Comparison is done by incrementing rtc_tm month field by one
+ * and checking alarm value is still below.
+ */
+ if (rtc_tm.tm_mon == 11) { /* handle year wrapping */
+ rtc_tm.tm_mon = 0;
+ rtc_tm.tm_year += 1;
+ } else {
+ rtc_tm.tm_mon += 1;
+ }
+
+ ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (ret)
+ goto err;
+
+ if (alarm_secs > rtc_secs) {
+ dev_err(dev, "%s: alarm maximum is one month in the "
+ "future (%d)\n", __func__, ret);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ /*
+ * Program all alarm registers but DW one. For each register, setting
+ * MSB to 0 enables associated alarm.
+ */
+ regs[0] = bin2bcd(alarm_tm->tm_min) & 0x7f;
+ regs[1] = bin2bcd(alarm_tm->tm_hour) & 0x3f;
+ regs[2] = bin2bcd(alarm_tm->tm_mday) & 0x3f;
+ regs[3] = ABB5ZES3_REG_ALRM_DW_AE; /* do not match day of the week */
+
+ ret = regmap_bulk_write(data->regmap, ABB5ZES3_REG_ALRM_MN, regs,
+ ABB5ZES3_ALRM_SEC_LEN);
+ if (ret < 0) {
+ dev_err(dev, "%s: writing ALARM section failed (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
+ /* Record currently configured alarm is not a timer */
+ data->timer_alarm = 0;
+
+ /* Enable or disable alarm interrupt generation */
+ ret = _abb5zes3_rtc_update_alarm(dev, enable);
+
+err:
+ return ret;
+}
+
+/*
+ * Set alarm using timer watchdog (via timer A) mechanism. The function expects
+ * timer A interrupt to be disabled.
+ */
+static int _abb5zes3_rtc_set_timer(struct device *dev, struct rtc_wkalrm *alarm,
+ u8 secs)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ABB5ZES3_TIMA_SEC_LEN];
+ u8 mask = ABB5ZES3_REG_TIM_CLK_TAC0 | ABB5ZES3_REG_TIM_CLK_TAC1;
+ int ret = 0;
+
+ /* Program given number of seconds to Timer A registers */
+ sec_to_timer_a(secs, &regs[0], &regs[1]);
+ ret = regmap_bulk_write(data->regmap, ABB5ZES3_REG_TIMA_CLK, regs,
+ ABB5ZES3_TIMA_SEC_LEN);
+ if (ret < 0) {
+ dev_err(dev, "%s: writing timer section failed\n", __func__);
+ goto err;
+ }
+
+ /* Configure Timer A as a watchdog timer */
+ ret = regmap_update_bits(data->regmap, ABB5ZES3_REG_TIM_CLK,
+ mask, ABB5ZES3_REG_TIM_CLK_TAC1);
+ if (ret)
+ dev_err(dev, "%s: failed to update timer\n", __func__);
+
+ /* Record currently configured alarm is a timer */
+ data->timer_alarm = 1;
+
+ /* Enable or disable timer interrupt generation */
+ ret = _abb5zes3_rtc_update_timer(dev, alarm->enabled);
+
+err:
+ return ret;
+}
+
+/*
+ * The chip has an alarm which is only accurate to the minute. In order to
+ * handle alarms below that limit, we use the watchdog timer function of
+ * timer A. More precisely, the timer method is used for alarms below 240
+ * seconds.
+ */
+static int abb5zes3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ struct rtc_time *alarm_tm = &alarm->time;
+ unsigned long rtc_secs, alarm_secs;
+ struct rtc_time rtc_tm;
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = _abb5zes3_rtc_read_time(dev, &rtc_tm);
+ if (ret)
+ goto err;
+
+ ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (ret)
+ goto err;
+
+ ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
+ if (ret)
+ goto err;
+
+ /* Let's first disable both the alarm and the timer interrupts */
+ ret = _abb5zes3_rtc_update_alarm(dev, false);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to disable alarm (%d)\n", __func__,
+ ret);
+ goto err;
+ }
+ ret = _abb5zes3_rtc_update_timer(dev, false);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to disable timer (%d)\n", __func__,
+ ret);
+ goto err;
+ }
+
+ data->timer_alarm = 0;
+
+ /*
+ * Let's now configure the alarm; if we are expected to ring in
+ * more than 240s, then we setup an alarm. Otherwise, a timer.
+ */
+ if ((alarm_secs > rtc_secs) && ((alarm_secs - rtc_secs) <= 240))
+ ret = _abb5zes3_rtc_set_timer(dev, alarm,
+ alarm_secs - rtc_secs);
+ else
+ ret = _abb5zes3_rtc_set_alarm(dev, alarm);
+
+ err:
+ mutex_unlock(&data->lock);
+
+ if (ret)
+ dev_err(dev, "%s: unable to configure alarm (%d)\n", __func__,
+ ret);
+
+ return ret;
+ }
+
+/* Enable or disable battery low irq generation */
+static inline int _abb5zes3_rtc_battery_low_irq_enable(struct regmap *regmap,
+ bool enable)
+{
+ return regmap_update_bits(regmap, ABB5ZES3_REG_CTRL3,
+ ABB5ZES3_REG_CTRL3_BLIE,
+ enable ? ABB5ZES3_REG_CTRL3_BLIE : 0);
+}
+
+/*
+ * Check current RTC status and enable/disable what needs to be. Return 0 if
+ * everything went ok and a negative value upon error. Note: this function
+ * is called early during init and hence does need mutex protection.
+ */
+static int abb5zes3_rtc_check_setup(struct device *dev)
+{
+ struct abb5zes3_rtc_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int reg;
+ int ret;
+ u8 mask;
+
+ /*
+ * By default, the devices generates a 32.768KHz signal on IRQ#1 pin. It
+ * is disabled here to prevent polluting the interrupt line and
+ * uselessly triggering the IRQ handler we install for alarm and battery
+ * low events. Note: this is done before clearing int. status below
+ * in this function.
+ * We also disable all timers and set timer interrupt to permanent (not
+ * pulsed).
+ */
+ mask = (ABB5ZES3_REG_TIM_CLK_TBC | ABB5ZES3_REG_TIM_CLK_TAC0 |
+ ABB5ZES3_REG_TIM_CLK_TAC1 | ABB5ZES3_REG_TIM_CLK_COF0 |
+ ABB5ZES3_REG_TIM_CLK_COF1 | ABB5ZES3_REG_TIM_CLK_COF2 |
+ ABB5ZES3_REG_TIM_CLK_TBM | ABB5ZES3_REG_TIM_CLK_TAM);
+ ret = regmap_update_bits(regmap, ABB5ZES3_REG_TIM_CLK, mask,
+ ABB5ZES3_REG_TIM_CLK_COF0 | ABB5ZES3_REG_TIM_CLK_COF1 |
+ ABB5ZES3_REG_TIM_CLK_COF2);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to initialize clkout register (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Each component of the alarm (MN, HR, DT, DW) can be enabled/disabled
+ * individually by clearing/setting MSB of each associated register. So,
+ * we set all alarm enable bits to disable current alarm setting.
+ */
+ mask = (ABB5ZES3_REG_ALRM_MN_AE | ABB5ZES3_REG_ALRM_HR_AE |
+ ABB5ZES3_REG_ALRM_DT_AE | ABB5ZES3_REG_ALRM_DW_AE);
+ ret = regmap_update_bits(regmap, ABB5ZES3_REG_CTRL2, mask, mask);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to disable alarm setting (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Set Control 1 register (RTC enabled, 24hr mode, all int. disabled) */
+ mask = (ABB5ZES3_REG_CTRL1_CIE | ABB5ZES3_REG_CTRL1_AIE |
+ ABB5ZES3_REG_CTRL1_SIE | ABB5ZES3_REG_CTRL1_PM |
+ ABB5ZES3_REG_CTRL1_CAP | ABB5ZES3_REG_CTRL1_STOP);
+ ret = regmap_update_bits(regmap, ABB5ZES3_REG_CTRL1, mask, 0);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to initialize CTRL1 register (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Set Control 2 register (timer int. disabled, alarm status cleared).
+ * WTAF is read-only and cleared automatically by reading the register.
+ */
+ mask = (ABB5ZES3_REG_CTRL2_CTBIE | ABB5ZES3_REG_CTRL2_CTAIE |
+ ABB5ZES3_REG_CTRL2_WTAIE | ABB5ZES3_REG_CTRL2_AF |
+ ABB5ZES3_REG_CTRL2_SF | ABB5ZES3_REG_CTRL2_CTBF |
+ ABB5ZES3_REG_CTRL2_CTAF);
+ ret = regmap_update_bits(regmap, ABB5ZES3_REG_CTRL2, mask, 0);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to initialize CTRL2 register (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Enable battery low detection function and battery switchover function
+ * (standard mode). Disable associated interrupts. Clear battery
+ * switchover flag but not battery low flag. The latter is checked
+ * later below.
+ */
+ mask = (ABB5ZES3_REG_CTRL3_PM0 | ABB5ZES3_REG_CTRL3_PM1 |
+ ABB5ZES3_REG_CTRL3_PM2 | ABB5ZES3_REG_CTRL3_BLIE |
+ ABB5ZES3_REG_CTRL3_BSIE| ABB5ZES3_REG_CTRL3_BSF);
+ ret = regmap_update_bits(regmap, ABB5ZES3_REG_CTRL3, mask, 0);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to initialize CTRL3 register (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Check oscillator integrity flag */
+ ret = regmap_read(regmap, ABB5ZES3_REG_RTC_SC, &reg);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to read osc. integrity flag (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (reg & ABB5ZES3_REG_RTC_SC_OSC) {
+ dev_err(dev, "clock integrity not guaranteed. Osc. has stopped "
+ "or has been interrupted.\n");
+ dev_err(dev, "change battery (if not already done) and "
+ "then set time to reset osc. failure flag.\n");
+ }
+
+ /*
+ * Check battery low flag at startup: this allows reporting battery
+ * is low at startup when IRQ line is not connected. Note: we record
+ * current status to avoid reenabling this interrupt later in probe
+ * function if battery is low.
+ */
+ ret = regmap_read(regmap, ABB5ZES3_REG_CTRL3, &reg);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to read battery low flag (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ data->battery_low = reg & ABB5ZES3_REG_CTRL3_BLF;
+ if (data->battery_low) {
+ dev_err(dev, "RTC battery is low; please, consider "
+ "changing it!\n");
+
+ ret = _abb5zes3_rtc_battery_low_irq_enable(regmap, false);
+ if (ret)
+ dev_err(dev, "%s: disabling battery low interrupt "
+ "generation failed (%d)\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+static int abb5zes3_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enable)
+{
+ struct abb5zes3_rtc_data *rtc_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (rtc_data->irq) {
+ mutex_lock(&rtc_data->lock);
+ if (rtc_data->timer_alarm)
+ ret = _abb5zes3_rtc_update_timer(dev, enable);
+ else
+ ret = _abb5zes3_rtc_update_alarm(dev, enable);
+ mutex_unlock(&rtc_data->lock);
+ }
+
+ return ret;
+}
+
+static irqreturn_t _abb5zes3_rtc_interrupt(int irq, void *data)
+{
+ struct i2c_client *client = data;
+ struct device *dev = &client->dev;
+ struct abb5zes3_rtc_data *rtc_data = dev_get_drvdata(dev);
+ struct rtc_device *rtc = rtc_data->rtc;
+ u8 regs[ABB5ZES3_CTRL_SEC_LEN];
+ int ret, handled = IRQ_NONE;
+
+ ret = regmap_bulk_read(rtc_data->regmap, 0, regs,
+ ABB5ZES3_CTRL_SEC_LEN);
+ if (ret) {
+ dev_err(dev, "%s: unable to read control section (%d)!\n",
+ __func__, ret);
+ return handled;
+ }
+
+ /*
+ * Check battery low detection flag and disable battery low interrupt
+ * generation if flag is set (interrupt can only be cleared when
+ * battery is replaced).
+ */
+ if (regs[ABB5ZES3_REG_CTRL3] & ABB5ZES3_REG_CTRL3_BLF) {
+ dev_err(dev, "RTC battery is low; please change it!\n");
+
+ _abb5zes3_rtc_battery_low_irq_enable(rtc_data->regmap, false);
+
+ handled = IRQ_HANDLED;
+ }
+
+ /* Check alarm flag */
+ if (regs[ABB5ZES3_REG_CTRL2] & ABB5ZES3_REG_CTRL2_AF) {
+ dev_dbg(dev, "RTC alarm!\n");
+
+ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+
+ /* Acknowledge and disable the alarm */
+ _abb5zes3_rtc_clear_alarm(dev);
+ _abb5zes3_rtc_update_alarm(dev, 0);
+
+ handled = IRQ_HANDLED;
+ }
+
+ /* Check watchdog Timer A flag */
+ if (regs[ABB5ZES3_REG_CTRL2] & ABB5ZES3_REG_CTRL2_WTAF) {
+ dev_dbg(dev, "RTC timer!\n");
+
+ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+
+ /*
+ * Acknowledge and disable the alarm. Note: WTAF
+ * flag had been cleared when reading CTRL2
+ */
+ _abb5zes3_rtc_update_timer(dev, 0);
+
+ rtc_data->timer_alarm = 0;
+
+ handled = IRQ_HANDLED;
+ }
+
+ return handled;
+}
+
+static const struct rtc_class_ops rtc_ops = {
+ .read_time = _abb5zes3_rtc_read_time,
+ .set_time = abb5zes3_rtc_set_time,
+ .read_alarm = abb5zes3_rtc_read_alarm,
+ .set_alarm = abb5zes3_rtc_set_alarm,
+ .alarm_irq_enable = abb5zes3_rtc_alarm_irq_enable,
+};
+
+static struct regmap_config abb5zes3_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int abb5zes3_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct abb5zes3_rtc_data *data = NULL;
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ regmap = devm_regmap_init_i2c(client, &abb5zes3_rtc_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "%s: regmap allocation failed: %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = abb5zes3_i2c_validate_chip(regmap);
+ if (ret)
+ goto err;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mutex_init(&data->lock);
+ data->regmap = regmap;
+ dev_set_drvdata(dev, data);
+
+ ret = abb5zes3_rtc_check_setup(dev);
+ if (ret)
+ goto err;
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ _abb5zes3_rtc_interrupt,
+ IRQF_SHARED|IRQF_ONESHOT,
+ DRV_NAME, client);
+ if (!ret) {
+ device_init_wakeup(dev, true);
+ data->irq = client->irq;
+ dev_dbg(dev, "%s: irq %d used by RTC\n", __func__,
+ client->irq);
+ } else {
+ dev_err(dev, "%s: irq %d unavailable (%d)\n",
+ __func__, client->irq, ret);
+ goto err;
+ }
+ }
+
+ data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops,
+ THIS_MODULE);
+ ret = PTR_ERR_OR_ZERO(data->rtc);
+ if (ret) {
+ dev_err(dev, "%s: unable to register RTC device (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
+ /* Enable battery low detection interrupt if battery not already low */
+ if (!data->battery_low && data->irq) {
+ ret = _abb5zes3_rtc_battery_low_irq_enable(regmap, true);
+ if (ret) {
+ dev_err(dev, "%s: enabling battery low interrupt "
+ "generation failed (%d)\n", __func__, ret);
+ goto err;
+ }
+ }
+
+err:
+ if (ret && data && data->irq)
+ device_init_wakeup(dev, false);
+ return ret;
+}
+
+static int abb5zes3_remove(struct i2c_client *client)
+{
+ struct abb5zes3_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
+
+ if (rtc_data->irq > 0)
+ device_init_wakeup(&client->dev, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int abb5zes3_rtc_suspend(struct device *dev)
+{
+ struct abb5zes3_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ return enable_irq_wake(rtc_data->irq);
+
+ return 0;
+}
+
+static int abb5zes3_rtc_resume(struct device *dev)
+{
+ struct abb5zes3_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ return disable_irq_wake(rtc_data->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(abb5zes3_rtc_pm_ops, abb5zes3_rtc_suspend,
+ abb5zes3_rtc_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id abb5zes3_dt_match[] = {
+ { .compatible = "abracon,abb5zes3" },
+ { },
+};
+#endif
+
+static const struct i2c_device_id abb5zes3_id[] = {
+ { "abb5zes3", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, abb5zes3_id);
+
+static struct i2c_driver abb5zes3_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &abb5zes3_rtc_pm_ops,
+ .of_match_table = of_match_ptr(abb5zes3_dt_match),
+ },
+ .probe = abb5zes3_probe,
+ .remove = abb5zes3_remove,
+ .id_table = abb5zes3_id,
+};
+module_i2c_driver(abb5zes3_driver);
+
+MODULE_AUTHOR("Arnaud EBALARD <arno@natisbad.org>");
+MODULE_DESCRIPTION("Abracon AB-RTCMC-32.768kHz-B5ZE-S3 RTC/Alarm driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
new file mode 100644
index 000000000000..43e04af39e09
--- /dev/null
+++ b/drivers/rtc/rtc-armada38x.c
@@ -0,0 +1,320 @@
+/*
+ * RTC driver for the Armada 38x Marvell SoCs
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define RTC_STATUS 0x0
+#define RTC_STATUS_ALARM1 BIT(0)
+#define RTC_STATUS_ALARM2 BIT(1)
+#define RTC_IRQ1_CONF 0x4
+#define RTC_IRQ1_AL_EN BIT(0)
+#define RTC_IRQ1_FREQ_EN BIT(1)
+#define RTC_IRQ1_FREQ_1HZ BIT(2)
+#define RTC_TIME 0xC
+#define RTC_ALARM1 0x10
+
+#define SOC_RTC_INTERRUPT 0x8
+#define SOC_RTC_ALARM1 BIT(0)
+#define SOC_RTC_ALARM2 BIT(1)
+#define SOC_RTC_ALARM1_MASK BIT(2)
+#define SOC_RTC_ALARM2_MASK BIT(3)
+
+struct armada38x_rtc {
+ struct rtc_device *rtc_dev;
+ void __iomem *regs;
+ void __iomem *regs_soc;
+ spinlock_t lock;
+ int irq;
+};
+
+/*
+ * According to the datasheet, the OS should wait 5us after every
+ * register write to the RTC hard macro so that the required update
+ * can occur without holding off the system bus
+ */
+static void rtc_delayed_write(u32 val, struct armada38x_rtc *rtc, int offset)
+{
+ writel(val, rtc->regs + offset);
+ udelay(5);
+}
+
+static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long time, time_check, flags;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+
+ time = readl(rtc->regs + RTC_TIME);
+ /*
+ * WA for failing time set attempts. As stated in HW ERRATA if
+ * more than one second between two time reads is detected
+ * then read once again.
+ */
+ time_check = readl(rtc->regs + RTC_TIME);
+ if ((time_check - time) > 1)
+ time_check = readl(rtc->regs + RTC_TIME);
+
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ rtc_time_to_tm(time_check, tm);
+
+ return 0;
+}
+
+static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ int ret = 0;
+ unsigned long time, flags;
+
+ ret = rtc_tm_to_time(tm, &time);
+
+ if (ret)
+ goto out;
+ /*
+ * Setting the RTC time not always succeeds. According to the
+ * errata we need to first write on the status register and
+ * then wait for 100ms before writing to the time register to be
+ * sure that the data will be taken into account.
+ */
+ spin_lock_irqsave(&rtc->lock, flags);
+
+ rtc_delayed_write(0, rtc, RTC_STATUS);
+
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ msleep(100);
+
+ spin_lock_irqsave(&rtc->lock, flags);
+
+ rtc_delayed_write(time, rtc, RTC_TIME);
+
+ spin_unlock_irqrestore(&rtc->lock, flags);
+out:
+ return ret;
+}
+
+static int armada38x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long time, flags;
+ u32 val;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+
+ time = readl(rtc->regs + RTC_ALARM1);
+ val = readl(rtc->regs + RTC_IRQ1_CONF) & RTC_IRQ1_AL_EN;
+
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ alrm->enabled = val ? 1 : 0;
+ rtc_time_to_tm(time, &alrm->time);
+
+ return 0;
+}
+
+static int armada38x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long time, flags;
+ int ret = 0;
+ u32 val;
+
+ ret = rtc_tm_to_time(&alrm->time, &time);
+
+ if (ret)
+ goto out;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+
+ rtc_delayed_write(time, rtc, RTC_ALARM1);
+
+ if (alrm->enabled) {
+ rtc_delayed_write(RTC_IRQ1_AL_EN, rtc, RTC_IRQ1_CONF);
+ val = readl(rtc->regs_soc + SOC_RTC_INTERRUPT);
+ writel(val | SOC_RTC_ALARM1_MASK,
+ rtc->regs_soc + SOC_RTC_INTERRUPT);
+ }
+
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+out:
+ return ret;
+}
+
+static int armada38x_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+
+ if (enabled)
+ rtc_delayed_write(RTC_IRQ1_AL_EN, rtc, RTC_IRQ1_CONF);
+ else
+ rtc_delayed_write(0, rtc, RTC_IRQ1_CONF);
+
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ return 0;
+}
+
+static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data)
+{
+ struct armada38x_rtc *rtc = data;
+ u32 val;
+ int event = RTC_IRQF | RTC_AF;
+
+ dev_dbg(&rtc->rtc_dev->dev, "%s:irq(%d)\n", __func__, irq);
+
+ spin_lock(&rtc->lock);
+
+ val = readl(rtc->regs_soc + SOC_RTC_INTERRUPT);
+
+ writel(val & ~SOC_RTC_ALARM1, rtc->regs_soc + SOC_RTC_INTERRUPT);
+ val = readl(rtc->regs + RTC_IRQ1_CONF);
+ /* disable all the interrupts for alarm 1 */
+ rtc_delayed_write(0, rtc, RTC_IRQ1_CONF);
+ /* Ack the event */
+ rtc_delayed_write(RTC_STATUS_ALARM1, rtc, RTC_STATUS);
+
+ spin_unlock(&rtc->lock);
+
+ if (val & RTC_IRQ1_FREQ_EN) {
+ if (val & RTC_IRQ1_FREQ_1HZ)
+ event |= RTC_UF;
+ else
+ event |= RTC_PF;
+ }
+
+ rtc_update_irq(rtc->rtc_dev, 1, event);
+
+ return IRQ_HANDLED;
+}
+
+static struct rtc_class_ops armada38x_rtc_ops = {
+ .read_time = armada38x_rtc_read_time,
+ .set_time = armada38x_rtc_set_time,
+ .read_alarm = armada38x_rtc_read_alarm,
+ .set_alarm = armada38x_rtc_set_alarm,
+ .alarm_irq_enable = armada38x_rtc_alarm_irq_enable,
+};
+
+static __init int armada38x_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct armada38x_rtc *rtc;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(struct armada38x_rtc),
+ GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ spin_lock_init(&rtc->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
+ rtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc-soc");
+ rtc->regs_soc = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtc->regs_soc))
+ return PTR_ERR(rtc->regs_soc);
+
+ rtc->irq = platform_get_irq(pdev, 0);
+
+ if (rtc->irq < 0) {
+ dev_err(&pdev->dev, "no irq\n");
+ return rtc->irq;
+ }
+ if (devm_request_irq(&pdev->dev, rtc->irq, armada38x_rtc_alarm_irq,
+ 0, pdev->name, rtc) < 0) {
+ dev_warn(&pdev->dev, "Interrupt not available.\n");
+ rtc->irq = -1;
+ /*
+ * If there is no interrupt available then we can't
+ * use the alarm
+ */
+ armada38x_rtc_ops.set_alarm = NULL;
+ armada38x_rtc_ops.alarm_irq_enable = NULL;
+ }
+ platform_set_drvdata(pdev, rtc);
+ if (rtc->irq != -1)
+ device_init_wakeup(&pdev->dev, 1);
+
+ rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &armada38x_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc_dev)) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int armada38x_rtc_suspend(struct device *dev)
+{
+ if (device_may_wakeup(dev)) {
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+
+ return enable_irq_wake(rtc->irq);
+ }
+
+ return 0;
+}
+
+static int armada38x_rtc_resume(struct device *dev)
+{
+ if (device_may_wakeup(dev)) {
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+
+ return disable_irq_wake(rtc->irq);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(armada38x_rtc_pm_ops,
+ armada38x_rtc_suspend, armada38x_rtc_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id armada38x_rtc_of_match_table[] = {
+ { .compatible = "marvell,armada-380-rtc", },
+ {}
+};
+#endif
+
+static struct platform_driver armada38x_rtc_driver = {
+ .driver = {
+ .name = "armada38x-rtc",
+ .pm = &armada38x_rtc_pm_ops,
+ .of_match_table = of_match_ptr(armada38x_rtc_of_match_table),
+ },
+};
+
+module_platform_driver_probe(armada38x_rtc_driver, armada38x_rtc_probe);
+
+MODULE_DESCRIPTION("Marvell Armada 38x RTC driver");
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 6b9aaf1afc72..2183fd2750ab 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -313,7 +313,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
.alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
-static struct regmap_config gpbr_regmap_config = {
+static const struct regmap_config gpbr_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index d04939369251..799c34bcb26f 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -304,12 +304,12 @@ static long rtc_dev_ioctl(struct file *file,
* Not supported here.
*/
{
- unsigned long now, then;
+ time64_t now, then;
err = rtc_read_time(rtc, &tm);
if (err < 0)
return err;
- rtc_tm_to_time(&tm, &now);
+ now = rtc_tm_to_time64(&tm);
alarm.time.tm_mday = tm.tm_mday;
alarm.time.tm_mon = tm.tm_mon;
@@ -317,11 +317,11 @@ static long rtc_dev_ioctl(struct file *file,
err = rtc_valid_tm(&alarm.time);
if (err < 0)
return err;
- rtc_tm_to_time(&alarm.time, &then);
+ then = rtc_tm_to_time64(&alarm.time);
/* alarm may need to wrap into tomorrow */
if (then < now) {
- rtc_time_to_tm(now + 24 * 60 * 60, &tm);
+ rtc_time64_to_tm(now + 24 * 60 * 60, &tm);
alarm.time.tm_mday = tm.tm_mday;
alarm.time.tm_mon = tm.tm_mon;
alarm.time.tm_year = tm.tm_year;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
new file mode 100644
index 000000000000..8c3bfcb115b7
--- /dev/null
+++ b/drivers/rtc/rtc-ds1685.c
@@ -0,0 +1,2252 @@
+/*
+ * An rtc driver for the Dallas/Maxim DS1685/DS1687 and related real-time
+ * chips.
+ *
+ * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>.
+ * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>.
+ *
+ * References:
+ * DS1685/DS1687 3V/5V Real-Time Clocks, 19-5215, Rev 4/10.
+ * DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10.
+ * DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105.
+ * Application Note 90, Using the Multiplex Bus RTC Extended Features.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bcd.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/workqueue.h>
+
+#include <linux/rtc/ds1685.h>
+
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif
+
+#define DRV_VERSION "0.42.0"
+
+
+/* ----------------------------------------------------------------------- */
+/* Standard read/write functions if platform does not provide overrides */
+
+/**
+ * ds1685_read - read a value from an rtc register.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @reg: the register address to read.
+ */
+static u8
+ds1685_read(struct ds1685_priv *rtc, int reg)
+{
+ return readb((u8 __iomem *)rtc->regs +
+ (reg * rtc->regstep));
+}
+
+/**
+ * ds1685_write - write a value to an rtc register.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @reg: the register address to write.
+ * @value: value to write to the register.
+ */
+static void
+ds1685_write(struct ds1685_priv *rtc, int reg, u8 value)
+{
+ writeb(value, ((u8 __iomem *)rtc->regs +
+ (reg * rtc->regstep)));
+}
+/* ----------------------------------------------------------------------- */
+
+
+/* ----------------------------------------------------------------------- */
+/* Inlined functions */
+
+/**
+ * ds1685_rtc_bcd2bin - bcd2bin wrapper in case platform doesn't support BCD.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @val: u8 time value to consider converting.
+ * @bcd_mask: u8 mask value if BCD mode is used.
+ * @bin_mask: u8 mask value if BIN mode is used.
+ *
+ * Returns the value, converted to BIN if originally in BCD and bcd_mode TRUE.
+ */
+static inline u8
+ds1685_rtc_bcd2bin(struct ds1685_priv *rtc, u8 val, u8 bcd_mask, u8 bin_mask)
+{
+ if (rtc->bcd_mode)
+ return (bcd2bin(val) & bcd_mask);
+
+ return (val & bin_mask);
+}
+
+/**
+ * ds1685_rtc_bin2bcd - bin2bcd wrapper in case platform doesn't support BCD.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @val: u8 time value to consider converting.
+ * @bin_mask: u8 mask value if BIN mode is used.
+ * @bcd_mask: u8 mask value if BCD mode is used.
+ *
+ * Returns the value, converted to BCD if originally in BIN and bcd_mode TRUE.
+ */
+static inline u8
+ds1685_rtc_bin2bcd(struct ds1685_priv *rtc, u8 val, u8 bin_mask, u8 bcd_mask)
+{
+ if (rtc->bcd_mode)
+ return (bin2bcd(val) & bcd_mask);
+
+ return (val & bin_mask);
+}
+
+/**
+ * ds1685_rtc_switch_to_bank0 - switch the rtc to bank 0.
+ * @rtc: pointer to the ds1685 rtc structure.
+ */
+static inline void
+ds1685_rtc_switch_to_bank0(struct ds1685_priv *rtc)
+{
+ rtc->write(rtc, RTC_CTRL_A,
+ (rtc->read(rtc, RTC_CTRL_A) & ~(RTC_CTRL_A_DV0)));
+}
+
+/**
+ * ds1685_rtc_switch_to_bank1 - switch the rtc to bank 1.
+ * @rtc: pointer to the ds1685 rtc structure.
+ */
+static inline void
+ds1685_rtc_switch_to_bank1(struct ds1685_priv *rtc)
+{
+ rtc->write(rtc, RTC_CTRL_A,
+ (rtc->read(rtc, RTC_CTRL_A) | RTC_CTRL_A_DV0));
+}
+
+/**
+ * ds1685_rtc_begin_data_access - prepare the rtc for data access.
+ * @rtc: pointer to the ds1685 rtc structure.
+ *
+ * This takes several steps to prepare the rtc for access to get/set time
+ * and alarm values from the rtc registers:
+ * - Sets the SET bit in Control Register B.
+ * - Reads Ext Control Register 4A and checks the INCR bit.
+ * - If INCR is active, a short delay is added before Ext Control Register 4A
+ * is read again in a loop until INCR is inactive.
+ * - Switches the rtc to bank 1. This allows access to all relevant
+ * data for normal rtc operation, as bank 0 contains only the nvram.
+ */
+static inline void
+ds1685_rtc_begin_data_access(struct ds1685_priv *rtc)
+{
+ /* Set the SET bit in Ctrl B */
+ rtc->write(rtc, RTC_CTRL_B,
+ (rtc->read(rtc, RTC_CTRL_B) | RTC_CTRL_B_SET));
+
+ /* Read Ext Ctrl 4A and check the INCR bit to avoid a lockout. */
+ while (rtc->read(rtc, RTC_EXT_CTRL_4A) & RTC_CTRL_4A_INCR)
+ cpu_relax();
+
+ /* Switch to Bank 1 */
+ ds1685_rtc_switch_to_bank1(rtc);
+}
+
+/**
+ * ds1685_rtc_end_data_access - end data access on the rtc.
+ * @rtc: pointer to the ds1685 rtc structure.
+ *
+ * This ends what was started by ds1685_rtc_begin_data_access:
+ * - Switches the rtc back to bank 0.
+ * - Clears the SET bit in Control Register B.
+ */
+static inline void
+ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
+{
+ /* Switch back to Bank 0 */
+ ds1685_rtc_switch_to_bank1(rtc);
+
+ /* Clear the SET bit in Ctrl B */
+ rtc->write(rtc, RTC_CTRL_B,
+ (rtc->read(rtc, RTC_CTRL_B) & ~(RTC_CTRL_B_SET)));
+}
+
+/**
+ * ds1685_rtc_begin_ctrl_access - prepare the rtc for ctrl access.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @flags: irq flags variable for spin_lock_irqsave.
+ *
+ * This takes several steps to prepare the rtc for access to read just the
+ * control registers:
+ * - Sets a spinlock on the rtc IRQ.
+ * - Switches the rtc to bank 1. This allows access to the two extended
+ * control registers.
+ *
+ * Only use this where you are certain another lock will not be held.
+ */
+static inline void
+ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
+{
+ spin_lock_irqsave(&rtc->lock, flags);
+ ds1685_rtc_switch_to_bank1(rtc);
+}
+
+/**
+ * ds1685_rtc_end_ctrl_access - end ctrl access on the rtc.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @flags: irq flags variable for spin_unlock_irqrestore.
+ *
+ * This ends what was started by ds1685_rtc_begin_ctrl_access:
+ * - Switches the rtc back to bank 0.
+ * - Unsets the spinlock on the rtc IRQ.
+ */
+static inline void
+ds1685_rtc_end_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
+{
+ ds1685_rtc_switch_to_bank0(rtc);
+ spin_unlock_irqrestore(&rtc->lock, flags);
+}
+
+/**
+ * ds1685_rtc_get_ssn - retrieve the silicon serial number.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @ssn: u8 array to hold the bits of the silicon serial number.
+ *
+ * This number starts at 0x40, and is 8-bytes long, ending at 0x47. The
+ * first byte is the model number, the next six bytes are the serial number
+ * digits, and the final byte is a CRC check byte. Together, they form the
+ * silicon serial number.
+ *
+ * These values are stored in bank1, so ds1685_rtc_switch_to_bank1 must be
+ * called first before calling this function, else data will be read out of
+ * the bank0 NVRAM. Be sure to call ds1685_rtc_switch_to_bank0 when done.
+ */
+static inline void
+ds1685_rtc_get_ssn(struct ds1685_priv *rtc, u8 *ssn)
+{
+ ssn[0] = rtc->read(rtc, RTC_BANK1_SSN_MODEL);
+ ssn[1] = rtc->read(rtc, RTC_BANK1_SSN_BYTE_1);
+ ssn[2] = rtc->read(rtc, RTC_BANK1_SSN_BYTE_2);
+ ssn[3] = rtc->read(rtc, RTC_BANK1_SSN_BYTE_3);
+ ssn[4] = rtc->read(rtc, RTC_BANK1_SSN_BYTE_4);
+ ssn[5] = rtc->read(rtc, RTC_BANK1_SSN_BYTE_5);
+ ssn[6] = rtc->read(rtc, RTC_BANK1_SSN_BYTE_6);
+ ssn[7] = rtc->read(rtc, RTC_BANK1_SSN_CRC);
+}
+/* ----------------------------------------------------------------------- */
+
+
+/* ----------------------------------------------------------------------- */
+/* Read/Set Time & Alarm functions */
+
+/**
+ * ds1685_rtc_read_time - reads the time registers.
+ * @dev: pointer to device structure.
+ * @tm: pointer to rtc_time structure.
+ */
+static int
+ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 ctrlb, century;
+ u8 seconds, minutes, hours, wday, mday, month, years;
+
+ /* Fetch the time info from the RTC registers. */
+ ds1685_rtc_begin_data_access(rtc);
+ seconds = rtc->read(rtc, RTC_SECS);
+ minutes = rtc->read(rtc, RTC_MINS);
+ hours = rtc->read(rtc, RTC_HRS);
+ wday = rtc->read(rtc, RTC_WDAY);
+ mday = rtc->read(rtc, RTC_MDAY);
+ month = rtc->read(rtc, RTC_MONTH);
+ years = rtc->read(rtc, RTC_YEAR);
+ century = rtc->read(rtc, RTC_CENTURY);
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ ds1685_rtc_end_data_access(rtc);
+
+ /* bcd2bin if needed, perform fixups, and store to rtc_time. */
+ years = ds1685_rtc_bcd2bin(rtc, years, RTC_YEAR_BCD_MASK,
+ RTC_YEAR_BIN_MASK);
+ century = ds1685_rtc_bcd2bin(rtc, century, RTC_CENTURY_MASK,
+ RTC_CENTURY_MASK);
+ tm->tm_sec = ds1685_rtc_bcd2bin(rtc, seconds, RTC_SECS_BCD_MASK,
+ RTC_SECS_BIN_MASK);
+ tm->tm_min = ds1685_rtc_bcd2bin(rtc, minutes, RTC_MINS_BCD_MASK,
+ RTC_MINS_BIN_MASK);
+ tm->tm_hour = ds1685_rtc_bcd2bin(rtc, hours, RTC_HRS_24_BCD_MASK,
+ RTC_HRS_24_BIN_MASK);
+ tm->tm_wday = (ds1685_rtc_bcd2bin(rtc, wday, RTC_WDAY_MASK,
+ RTC_WDAY_MASK) - 1);
+ tm->tm_mday = ds1685_rtc_bcd2bin(rtc, mday, RTC_MDAY_BCD_MASK,
+ RTC_MDAY_BIN_MASK);
+ tm->tm_mon = (ds1685_rtc_bcd2bin(rtc, month, RTC_MONTH_BCD_MASK,
+ RTC_MONTH_BIN_MASK) - 1);
+ tm->tm_year = ((years + (century * 100)) - 1900);
+ tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+ tm->tm_isdst = 0; /* RTC has hardcoded timezone, so don't use. */
+
+ return rtc_valid_tm(tm);
+}
+
+/**
+ * ds1685_rtc_set_time - sets the time registers.
+ * @dev: pointer to device structure.
+ * @tm: pointer to rtc_time structure.
+ */
+static int
+ds1685_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 ctrlb, seconds, minutes, hours, wday, mday, month, years, century;
+
+ /* Fetch the time info from rtc_time. */
+ seconds = ds1685_rtc_bin2bcd(rtc, tm->tm_sec, RTC_SECS_BIN_MASK,
+ RTC_SECS_BCD_MASK);
+ minutes = ds1685_rtc_bin2bcd(rtc, tm->tm_min, RTC_MINS_BIN_MASK,
+ RTC_MINS_BCD_MASK);
+ hours = ds1685_rtc_bin2bcd(rtc, tm->tm_hour, RTC_HRS_24_BIN_MASK,
+ RTC_HRS_24_BCD_MASK);
+ wday = ds1685_rtc_bin2bcd(rtc, (tm->tm_wday + 1), RTC_WDAY_MASK,
+ RTC_WDAY_MASK);
+ mday = ds1685_rtc_bin2bcd(rtc, tm->tm_mday, RTC_MDAY_BIN_MASK,
+ RTC_MDAY_BCD_MASK);
+ month = ds1685_rtc_bin2bcd(rtc, (tm->tm_mon + 1), RTC_MONTH_BIN_MASK,
+ RTC_MONTH_BCD_MASK);
+ years = ds1685_rtc_bin2bcd(rtc, (tm->tm_year % 100),
+ RTC_YEAR_BIN_MASK, RTC_YEAR_BCD_MASK);
+ century = ds1685_rtc_bin2bcd(rtc, ((tm->tm_year + 1900) / 100),
+ RTC_CENTURY_MASK, RTC_CENTURY_MASK);
+
+ /*
+ * Perform Sanity Checks:
+ * - Months: !> 12, Month Day != 0.
+ * - Month Day !> Max days in current month.
+ * - Hours !>= 24, Mins !>= 60, Secs !>= 60, & Weekday !> 7.
+ */
+ if ((tm->tm_mon > 11) || (mday == 0))
+ return -EDOM;
+
+ if (tm->tm_mday > rtc_month_days(tm->tm_mon, tm->tm_year))
+ return -EDOM;
+
+ if ((tm->tm_hour >= 24) || (tm->tm_min >= 60) ||
+ (tm->tm_sec >= 60) || (wday > 7))
+ return -EDOM;
+
+ /*
+ * Set the data mode to use and store the time values in the
+ * RTC registers.
+ */
+ ds1685_rtc_begin_data_access(rtc);
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ if (rtc->bcd_mode)
+ ctrlb &= ~(RTC_CTRL_B_DM);
+ else
+ ctrlb |= RTC_CTRL_B_DM;
+ rtc->write(rtc, RTC_CTRL_B, ctrlb);
+ rtc->write(rtc, RTC_SECS, seconds);
+ rtc->write(rtc, RTC_MINS, minutes);
+ rtc->write(rtc, RTC_HRS, hours);
+ rtc->write(rtc, RTC_WDAY, wday);
+ rtc->write(rtc, RTC_MDAY, mday);
+ rtc->write(rtc, RTC_MONTH, month);
+ rtc->write(rtc, RTC_YEAR, years);
+ rtc->write(rtc, RTC_CENTURY, century);
+ ds1685_rtc_end_data_access(rtc);
+
+ return 0;
+}
+
+/**
+ * ds1685_rtc_read_alarm - reads the alarm registers.
+ * @dev: pointer to device structure.
+ * @alrm: pointer to rtc_wkalrm structure.
+ *
+ * There are three primary alarm registers: seconds, minutes, and hours.
+ * A fourth alarm register for the month date is also available in bank1 for
+ * kickstart/wakeup features. The DS1685/DS1687 manual states that a
+ * "don't care" value ranging from 0xc0 to 0xff may be written into one or
+ * more of the three alarm bytes to act as a wildcard value. The fourth
+ * byte doesn't support a "don't care" value.
+ */
+static int
+ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 seconds, minutes, hours, mday, ctrlb, ctrlc;
+
+ /* Fetch the alarm info from the RTC alarm registers. */
+ ds1685_rtc_begin_data_access(rtc);
+ seconds = rtc->read(rtc, RTC_SECS_ALARM);
+ minutes = rtc->read(rtc, RTC_MINS_ALARM);
+ hours = rtc->read(rtc, RTC_HRS_ALARM);
+ mday = rtc->read(rtc, RTC_MDAY_ALARM);
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ ctrlc = rtc->read(rtc, RTC_CTRL_C);
+ ds1685_rtc_end_data_access(rtc);
+
+ /* Check month date. */
+ if (!(mday >= 1) && (mday <= 31))
+ return -EDOM;
+
+ /*
+ * Check the three alarm bytes.
+ *
+ * The Linux RTC system doesn't support the "don't care" capability
+ * of this RTC chip. We check for it anyways in case support is
+ * added in the future.
+ */
+ if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+ alrm->time.tm_sec = -1;
+ else
+ alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
+ RTC_SECS_BCD_MASK,
+ RTC_SECS_BIN_MASK);
+
+ if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+ alrm->time.tm_min = -1;
+ else
+ alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
+ RTC_MINS_BCD_MASK,
+ RTC_MINS_BIN_MASK);
+
+ if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+ alrm->time.tm_hour = -1;
+ else
+ alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
+ RTC_HRS_24_BCD_MASK,
+ RTC_HRS_24_BIN_MASK);
+
+ /* Write the data to rtc_wkalrm. */
+ alrm->time.tm_mday = ds1685_rtc_bcd2bin(rtc, mday, RTC_MDAY_BCD_MASK,
+ RTC_MDAY_BIN_MASK);
+ alrm->time.tm_mon = -1;
+ alrm->time.tm_year = -1;
+ alrm->time.tm_wday = -1;
+ alrm->time.tm_yday = -1;
+ alrm->time.tm_isdst = -1;
+ alrm->enabled = !!(ctrlb & RTC_CTRL_B_AIE);
+ alrm->pending = !!(ctrlc & RTC_CTRL_C_AF);
+
+ return 0;
+}
+
+/**
+ * ds1685_rtc_set_alarm - sets the alarm in registers.
+ * @dev: pointer to device structure.
+ * @alrm: pointer to rtc_wkalrm structure.
+ */
+static int
+ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 ctrlb, seconds, minutes, hours, mday;
+
+ /* Fetch the alarm info and convert to BCD. */
+ seconds = ds1685_rtc_bin2bcd(rtc, alrm->time.tm_sec,
+ RTC_SECS_BIN_MASK,
+ RTC_SECS_BCD_MASK);
+ minutes = ds1685_rtc_bin2bcd(rtc, alrm->time.tm_min,
+ RTC_MINS_BIN_MASK,
+ RTC_MINS_BCD_MASK);
+ hours = ds1685_rtc_bin2bcd(rtc, alrm->time.tm_hour,
+ RTC_HRS_24_BIN_MASK,
+ RTC_HRS_24_BCD_MASK);
+ mday = ds1685_rtc_bin2bcd(rtc, alrm->time.tm_mday,
+ RTC_MDAY_BIN_MASK,
+ RTC_MDAY_BCD_MASK);
+
+ /* Check the month date for validity. */
+ if (!(mday >= 1) && (mday <= 31))
+ return -EDOM;
+
+ /*
+ * Check the three alarm bytes.
+ *
+ * The Linux RTC system doesn't support the "don't care" capability
+ * of this RTC chip because rtc_valid_tm tries to validate every
+ * field, and we only support four fields. We put the support
+ * here anyways for the future.
+ */
+ if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+ seconds = 0xff;
+
+ if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+ minutes = 0xff;
+
+ if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+ hours = 0xff;
+
+ alrm->time.tm_mon = -1;
+ alrm->time.tm_year = -1;
+ alrm->time.tm_wday = -1;
+ alrm->time.tm_yday = -1;
+ alrm->time.tm_isdst = -1;
+
+ /* Disable the alarm interrupt first. */
+ ds1685_rtc_begin_data_access(rtc);
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ rtc->write(rtc, RTC_CTRL_B, (ctrlb & ~(RTC_CTRL_B_AIE)));
+
+ /* Read ctrlc to clear RTC_CTRL_C_AF. */
+ rtc->read(rtc, RTC_CTRL_C);
+
+ /*
+ * Set the data mode to use and store the time values in the
+ * RTC registers.
+ */
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ if (rtc->bcd_mode)
+ ctrlb &= ~(RTC_CTRL_B_DM);
+ else
+ ctrlb |= RTC_CTRL_B_DM;
+ rtc->write(rtc, RTC_CTRL_B, ctrlb);
+ rtc->write(rtc, RTC_SECS_ALARM, seconds);
+ rtc->write(rtc, RTC_MINS_ALARM, minutes);
+ rtc->write(rtc, RTC_HRS_ALARM, hours);
+ rtc->write(rtc, RTC_MDAY_ALARM, mday);
+
+ /* Re-enable the alarm if needed. */
+ if (alrm->enabled) {
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ ctrlb |= RTC_CTRL_B_AIE;
+ rtc->write(rtc, RTC_CTRL_B, ctrlb);
+ }
+
+ /* Done! */
+ ds1685_rtc_end_data_access(rtc);
+
+ return 0;
+}
+/* ----------------------------------------------------------------------- */
+
+
+/* ----------------------------------------------------------------------- */
+/* /dev/rtcX Interface functions */
+
+#ifdef CONFIG_RTC_INTF_DEV
+/**
+ * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off.
+ * @dev: pointer to device structure.
+ * @enabled: flag indicating whether to enable or disable.
+ */
+static int
+ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ unsigned long flags = 0;
+
+ /* Enable/disable the Alarm IRQ-Enable flag. */
+ spin_lock_irqsave(&rtc->lock, flags);
+
+ /* Flip the requisite interrupt-enable bit. */
+ if (enabled)
+ rtc->write(rtc, RTC_CTRL_B, (rtc->read(rtc, RTC_CTRL_B) |
+ RTC_CTRL_B_AIE));
+ else
+ rtc->write(rtc, RTC_CTRL_B, (rtc->read(rtc, RTC_CTRL_B) &
+ ~(RTC_CTRL_B_AIE)));
+
+ /* Read Control C to clear all the flag bits. */
+ rtc->read(rtc, RTC_CTRL_C);
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ return 0;
+}
+#endif
+/* ----------------------------------------------------------------------- */
+
+
+/* ----------------------------------------------------------------------- */
+/* IRQ handler & workqueue. */
+
+/**
+ * ds1685_rtc_irq_handler - IRQ handler.
+ * @irq: IRQ number.
+ * @dev_id: platform device pointer.
+ */
+static irqreturn_t
+ds1685_rtc_irq_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 ctrlb, ctrlc;
+ unsigned long events = 0;
+ u8 num_irqs = 0;
+
+ /* Abort early if the device isn't ready yet (i.e., DEBUG_SHIRQ). */
+ if (unlikely(!rtc))
+ return IRQ_HANDLED;
+
+ /* Ctrlb holds the interrupt-enable bits and ctrlc the flag bits. */
+ spin_lock(&rtc->lock);
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ ctrlc = rtc->read(rtc, RTC_CTRL_C);
+
+ /* Is the IRQF bit set? */
+ if (likely(ctrlc & RTC_CTRL_C_IRQF)) {
+ /*
+ * We need to determine if it was one of the standard
+ * events: PF, AF, or UF. If so, we handle them and
+ * update the RTC core.
+ */
+ if (likely(ctrlc & RTC_CTRL_B_PAU_MASK)) {
+ events = RTC_IRQF;
+
+ /* Check for a periodic interrupt. */
+ if ((ctrlb & RTC_CTRL_B_PIE) &&
+ (ctrlc & RTC_CTRL_C_PF)) {
+ events |= RTC_PF;
+ num_irqs++;
+ }
+
+ /* Check for an alarm interrupt. */
+ if ((ctrlb & RTC_CTRL_B_AIE) &&
+ (ctrlc & RTC_CTRL_C_AF)) {
+ events |= RTC_AF;
+ num_irqs++;
+ }
+
+ /* Check for an update interrupt. */
+ if ((ctrlb & RTC_CTRL_B_UIE) &&
+ (ctrlc & RTC_CTRL_C_UF)) {
+ events |= RTC_UF;
+ num_irqs++;
+ }
+
+ rtc_update_irq(rtc->dev, num_irqs, events);
+ } else {
+ /*
+ * One of the "extended" interrupts was received that
+ * is not recognized by the RTC core. These need to
+ * be handled in task context as they can call other
+ * functions and the time spent in irq context needs
+ * to be minimized. Schedule them into a workqueue
+ * and inform the RTC core that the IRQs were handled.
+ */
+ spin_unlock(&rtc->lock);
+ schedule_work(&rtc->work);
+ rtc_update_irq(rtc->dev, 0, 0);
+ return IRQ_HANDLED;
+ }
+ }
+ spin_unlock(&rtc->lock);
+
+ return events ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * ds1685_rtc_work_queue - work queue handler.
+ * @work: work_struct containing data to work on in task context.
+ */
+static void
+ds1685_rtc_work_queue(struct work_struct *work)
+{
+ struct ds1685_priv *rtc = container_of(work,
+ struct ds1685_priv, work);
+ struct platform_device *pdev = to_platform_device(&rtc->dev->dev);
+ struct mutex *rtc_mutex = &rtc->dev->ops_lock;
+ u8 ctrl4a, ctrl4b;
+
+ mutex_lock(rtc_mutex);
+
+ ds1685_rtc_switch_to_bank1(rtc);
+ ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
+ ctrl4b = rtc->read(rtc, RTC_EXT_CTRL_4B);
+
+ /*
+ * Check for a kickstart interrupt. With Vcc applied, this
+ * typically means that the power button was pressed, so we
+ * begin the shutdown sequence.
+ */
+ if ((ctrl4b & RTC_CTRL_4B_KSE) && (ctrl4a & RTC_CTRL_4A_KF)) {
+ /* Briefly disable kickstarts to debounce button presses. */
+ rtc->write(rtc, RTC_EXT_CTRL_4B,
+ (rtc->read(rtc, RTC_EXT_CTRL_4B) &
+ ~(RTC_CTRL_4B_KSE)));
+
+ /* Clear the kickstart flag. */
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (ctrl4a & ~(RTC_CTRL_4A_KF)));
+
+
+ /*
+ * Sleep 500ms before re-enabling kickstarts. This allows
+ * adequate time to avoid reading signal jitter as additional
+ * button presses.
+ */
+ msleep(500);
+ rtc->write(rtc, RTC_EXT_CTRL_4B,
+ (rtc->read(rtc, RTC_EXT_CTRL_4B) |
+ RTC_CTRL_4B_KSE));
+
+ /* Call the platform pre-poweroff function. Else, shutdown. */
+ if (rtc->prepare_poweroff != NULL)
+ rtc->prepare_poweroff();
+ else
+ ds1685_rtc_poweroff(pdev);
+ }
+
+ /*
+ * Check for a wake-up interrupt. With Vcc applied, this is
+ * essentially a second alarm interrupt, except it takes into
+ * account the 'date' register in bank1 in addition to the
+ * standard three alarm registers.
+ */
+ if ((ctrl4b & RTC_CTRL_4B_WIE) && (ctrl4a & RTC_CTRL_4A_WF)) {
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (ctrl4a & ~(RTC_CTRL_4A_WF)));
+
+ /* Call the platform wake_alarm function if defined. */
+ if (rtc->wake_alarm != NULL)
+ rtc->wake_alarm();
+ else
+ dev_warn(&pdev->dev,
+ "Wake Alarm IRQ just occurred!\n");
+ }
+
+ /*
+ * Check for a ram-clear interrupt. This happens if RIE=1 and RF=0
+ * when RCE=1 in 4B. This clears all NVRAM bytes in bank0 by setting
+ * each byte to a logic 1. This has no effect on any extended
+ * NV-SRAM that might be present, nor on the time/calendar/alarm
+ * registers. After a ram-clear is completed, there is a minimum
+ * recovery time of ~150ms in which all reads/writes are locked out.
+ * NOTE: A ram-clear can still occur if RCE=1 and RIE=0. We cannot
+ * catch this scenario.
+ */
+ if ((ctrl4b & RTC_CTRL_4B_RIE) && (ctrl4a & RTC_CTRL_4A_RF)) {
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (ctrl4a & ~(RTC_CTRL_4A_RF)));
+ msleep(150);
+
+ /* Call the platform post_ram_clear function if defined. */
+ if (rtc->post_ram_clear != NULL)
+ rtc->post_ram_clear();
+ else
+ dev_warn(&pdev->dev,
+ "RAM-Clear IRQ just occurred!\n");
+ }
+ ds1685_rtc_switch_to_bank0(rtc);
+
+ mutex_unlock(rtc_mutex);
+}
+/* ----------------------------------------------------------------------- */
+
+
+/* ----------------------------------------------------------------------- */
+/* ProcFS interface */
+
+#ifdef CONFIG_PROC_FS
+#define NUM_REGS 6 /* Num of control registers. */
+#define NUM_BITS 8 /* Num bits per register. */
+#define NUM_SPACES 4 /* Num spaces between each bit. */
+
+/*
+ * Periodic Interrupt Rates.
+ */
+static const char *ds1685_rtc_pirq_rate[16] = {
+ "none", "3.90625ms", "7.8125ms", "0.122070ms", "0.244141ms",
+ "0.488281ms", "0.9765625ms", "1.953125ms", "3.90625ms", "7.8125ms",
+ "15.625ms", "31.25ms", "62.5ms", "125ms", "250ms", "500ms"
+};
+
+/*
+ * Square-Wave Output Frequencies.
+ */
+static const char *ds1685_rtc_sqw_freq[16] = {
+ "none", "256Hz", "128Hz", "8192Hz", "4096Hz", "2048Hz", "1024Hz",
+ "512Hz", "256Hz", "128Hz", "64Hz", "32Hz", "16Hz", "8Hz", "4Hz", "2Hz"
+};
+
+#ifdef CONFIG_RTC_DS1685_PROC_REGS
+/**
+ * ds1685_rtc_print_regs - helper function to print register values.
+ * @hex: hex byte to convert into binary bits.
+ * @dest: destination char array.
+ *
+ * This is basically a hex->binary function, just with extra spacing between
+ * the digits. It only works on 1-byte values (8 bits).
+ */
+static char*
+ds1685_rtc_print_regs(u8 hex, char *dest)
+{
+ u32 i, j;
+ char *tmp = dest;
+
+ for (i = 0; i < NUM_BITS; i++) {
+ *tmp++ = ((hex & 0x80) != 0 ? '1' : '0');
+ for (j = 0; j < NUM_SPACES; j++)
+ *tmp++ = ' ';
+ hex <<= 1;
+ }
+ *tmp++ = '\0';
+
+ return dest;
+}
+#endif
+
+/**
+ * ds1685_rtc_proc - procfs access function.
+ * @dev: pointer to device structure.
+ * @seq: pointer to seq_file structure.
+ */
+static int
+ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 ctrla, ctrlb, ctrlc, ctrld, ctrl4a, ctrl4b, ssn[8];
+ char *model = '\0';
+#ifdef CONFIG_RTC_DS1685_PROC_REGS
+ char bits[NUM_REGS][(NUM_BITS * NUM_SPACES) + NUM_BITS + 1];
+#endif
+
+ /* Read all the relevant data from the control registers. */
+ ds1685_rtc_switch_to_bank1(rtc);
+ ds1685_rtc_get_ssn(rtc, ssn);
+ ctrla = rtc->read(rtc, RTC_CTRL_A);
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ ctrlc = rtc->read(rtc, RTC_CTRL_C);
+ ctrld = rtc->read(rtc, RTC_CTRL_D);
+ ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
+ ctrl4b = rtc->read(rtc, RTC_EXT_CTRL_4B);
+ ds1685_rtc_switch_to_bank0(rtc);
+
+ /* Determine the RTC model. */
+ switch (ssn[0]) {
+ case RTC_MODEL_DS1685:
+ model = "DS1685/DS1687\0";
+ break;
+ case RTC_MODEL_DS1689:
+ model = "DS1689/DS1693\0";
+ break;
+ case RTC_MODEL_DS17285:
+ model = "DS17285/DS17287\0";
+ break;
+ case RTC_MODEL_DS17485:
+ model = "DS17485/DS17487\0";
+ break;
+ case RTC_MODEL_DS17885:
+ model = "DS17885/DS17887\0";
+ break;
+ default:
+ model = "Unknown\0";
+ break;
+ }
+
+ /* Print out the information. */
+ seq_printf(seq,
+ "Model\t\t: %s\n"
+ "Oscillator\t: %s\n"
+ "12/24hr\t\t: %s\n"
+ "DST\t\t: %s\n"
+ "Data mode\t: %s\n"
+ "Battery\t\t: %s\n"
+ "Aux batt\t: %s\n"
+ "Update IRQ\t: %s\n"
+ "Periodic IRQ\t: %s\n"
+ "Periodic Rate\t: %s\n"
+ "SQW Freq\t: %s\n"
+#ifdef CONFIG_RTC_DS1685_PROC_REGS
+ "Serial #\t: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n"
+ "Register Status\t:\n"
+ " Ctrl A\t: UIP DV2 DV1 DV0 RS3 RS2 RS1 RS0\n"
+ "\t\t: %s\n"
+ " Ctrl B\t: SET PIE AIE UIE SQWE DM 2412 DSE\n"
+ "\t\t: %s\n"
+ " Ctrl C\t: IRQF PF AF UF --- --- --- ---\n"
+ "\t\t: %s\n"
+ " Ctrl D\t: VRT --- --- --- --- --- --- ---\n"
+ "\t\t: %s\n"
+#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
+ " Ctrl 4A\t: VRT2 INCR BME --- PAB RF WF KF\n"
+#else
+ " Ctrl 4A\t: VRT2 INCR --- --- PAB RF WF KF\n"
+#endif
+ "\t\t: %s\n"
+ " Ctrl 4B\t: ABE E32k CS RCE PRS RIE WIE KSE\n"
+ "\t\t: %s\n",
+#else
+ "Serial #\t: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+#endif
+ model,
+ ((ctrla & RTC_CTRL_A_DV1) ? "enabled" : "disabled"),
+ ((ctrlb & RTC_CTRL_B_2412) ? "24-hour" : "12-hour"),
+ ((ctrlb & RTC_CTRL_B_DSE) ? "enabled" : "disabled"),
+ ((ctrlb & RTC_CTRL_B_DM) ? "binary" : "BCD"),
+ ((ctrld & RTC_CTRL_D_VRT) ? "ok" : "exhausted or n/a"),
+ ((ctrl4a & RTC_CTRL_4A_VRT2) ? "ok" : "exhausted or n/a"),
+ ((ctrlb & RTC_CTRL_B_UIE) ? "yes" : "no"),
+ ((ctrlb & RTC_CTRL_B_PIE) ? "yes" : "no"),
+ (!(ctrl4b & RTC_CTRL_4B_E32K) ?
+ ds1685_rtc_pirq_rate[(ctrla & RTC_CTRL_A_RS_MASK)] : "none"),
+ (!((ctrl4b & RTC_CTRL_4B_E32K)) ?
+ ds1685_rtc_sqw_freq[(ctrla & RTC_CTRL_A_RS_MASK)] : "32768Hz"),
+#ifdef CONFIG_RTC_DS1685_PROC_REGS
+ ssn[0], ssn[1], ssn[2], ssn[3], ssn[4], ssn[5], ssn[6], ssn[7],
+ ds1685_rtc_print_regs(ctrla, bits[0]),
+ ds1685_rtc_print_regs(ctrlb, bits[1]),
+ ds1685_rtc_print_regs(ctrlc, bits[2]),
+ ds1685_rtc_print_regs(ctrld, bits[3]),
+ ds1685_rtc_print_regs(ctrl4a, bits[4]),
+ ds1685_rtc_print_regs(ctrl4b, bits[5]));
+#else
+ ssn[0], ssn[1], ssn[2], ssn[3], ssn[4], ssn[5], ssn[6], ssn[7]);
+#endif
+ return 0;
+}
+#else
+#define ds1685_rtc_proc NULL
+#endif /* CONFIG_PROC_FS */
+/* ----------------------------------------------------------------------- */
+
+
+/* ----------------------------------------------------------------------- */
+/* RTC Class operations */
+
+static const struct rtc_class_ops
+ds1685_rtc_ops = {
+ .proc = ds1685_rtc_proc,
+ .read_time = ds1685_rtc_read_time,
+ .set_time = ds1685_rtc_set_time,
+ .read_alarm = ds1685_rtc_read_alarm,
+ .set_alarm = ds1685_rtc_set_alarm,
+ .alarm_irq_enable = ds1685_rtc_alarm_irq_enable,
+};
+/* ----------------------------------------------------------------------- */
+
+
+/* ----------------------------------------------------------------------- */
+/* SysFS interface */
+
+#ifdef CONFIG_SYSFS
+/**
+ * ds1685_rtc_sysfs_nvram_read - reads rtc nvram via sysfs.
+ * @file: pointer to file structure.
+ * @kobj: pointer to kobject structure.
+ * @bin_attr: pointer to bin_attribute structure.
+ * @buf: pointer to char array to hold the output.
+ * @pos: current file position pointer.
+ * @size: size of the data to read.
+ */
+static ssize_t
+ds1685_rtc_sysfs_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t size)
+{
+ struct platform_device *pdev =
+ to_platform_device(container_of(kobj, struct device, kobj));
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ ssize_t count;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+ ds1685_rtc_switch_to_bank0(rtc);
+
+ /* Read NVRAM in time and bank0 registers. */
+ for (count = 0; size > 0 && pos < NVRAM_TOTAL_SZ_BANK0;
+ count++, size--) {
+ if (count < NVRAM_SZ_TIME)
+ *buf++ = rtc->read(rtc, (NVRAM_TIME_BASE + pos++));
+ else
+ *buf++ = rtc->read(rtc, (NVRAM_BANK0_BASE + pos++));
+ }
+
+#ifndef CONFIG_RTC_DRV_DS1689
+ if (size > 0) {
+ ds1685_rtc_switch_to_bank1(rtc);
+
+#ifndef CONFIG_RTC_DRV_DS1685
+ /* Enable burst-mode on DS17x85/DS17x87 */
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (rtc->read(rtc, RTC_EXT_CTRL_4A) |
+ RTC_CTRL_4A_BME));
+
+ /* We need one write to RTC_BANK1_RAM_ADDR_LSB to start
+ * reading with burst-mode */
+ rtc->write(rtc, RTC_BANK1_RAM_ADDR_LSB,
+ (pos - NVRAM_TOTAL_SZ_BANK0));
+#endif
+
+ /* Read NVRAM in bank1 registers. */
+ for (count = 0; size > 0 && pos < NVRAM_TOTAL_SZ;
+ count++, size--) {
+#ifdef CONFIG_RTC_DRV_DS1685
+ /* DS1685/DS1687 has to write to RTC_BANK1_RAM_ADDR
+ * before each read. */
+ rtc->write(rtc, RTC_BANK1_RAM_ADDR,
+ (pos - NVRAM_TOTAL_SZ_BANK0));
+#endif
+ *buf++ = rtc->read(rtc, RTC_BANK1_RAM_DATA_PORT);
+ pos++;
+ }
+
+#ifndef CONFIG_RTC_DRV_DS1685
+ /* Disable burst-mode on DS17x85/DS17x87 */
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (rtc->read(rtc, RTC_EXT_CTRL_4A) &
+ ~(RTC_CTRL_4A_BME)));
+#endif
+ ds1685_rtc_switch_to_bank0(rtc);
+ }
+#endif /* !CONFIG_RTC_DRV_DS1689 */
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ /*
+ * XXX: Bug? this appears to cause the function to get executed
+ * several times in succession. But it's the only way to actually get
+ * data written out to a file.
+ */
+ return count;
+}
+
+/**
+ * ds1685_rtc_sysfs_nvram_write - writes rtc nvram via sysfs.
+ * @file: pointer to file structure.
+ * @kobj: pointer to kobject structure.
+ * @bin_attr: pointer to bin_attribute structure.
+ * @buf: pointer to char array to hold the input.
+ * @pos: current file position pointer.
+ * @size: size of the data to write.
+ */
+static ssize_t
+ds1685_rtc_sysfs_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t size)
+{
+ struct platform_device *pdev =
+ to_platform_device(container_of(kobj, struct device, kobj));
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ ssize_t count;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+ ds1685_rtc_switch_to_bank0(rtc);
+
+ /* Write NVRAM in time and bank0 registers. */
+ for (count = 0; size > 0 && pos < NVRAM_TOTAL_SZ_BANK0;
+ count++, size--)
+ if (count < NVRAM_SZ_TIME)
+ rtc->write(rtc, (NVRAM_TIME_BASE + pos++),
+ *buf++);
+ else
+ rtc->write(rtc, (NVRAM_BANK0_BASE), *buf++);
+
+#ifndef CONFIG_RTC_DRV_DS1689
+ if (size > 0) {
+ ds1685_rtc_switch_to_bank1(rtc);
+
+#ifndef CONFIG_RTC_DRV_DS1685
+ /* Enable burst-mode on DS17x85/DS17x87 */
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (rtc->read(rtc, RTC_EXT_CTRL_4A) |
+ RTC_CTRL_4A_BME));
+
+ /* We need one write to RTC_BANK1_RAM_ADDR_LSB to start
+ * writing with burst-mode */
+ rtc->write(rtc, RTC_BANK1_RAM_ADDR_LSB,
+ (pos - NVRAM_TOTAL_SZ_BANK0));
+#endif
+
+ /* Write NVRAM in bank1 registers. */
+ for (count = 0; size > 0 && pos < NVRAM_TOTAL_SZ;
+ count++, size--) {
+#ifdef CONFIG_RTC_DRV_DS1685
+ /* DS1685/DS1687 has to write to RTC_BANK1_RAM_ADDR
+ * before each read. */
+ rtc->write(rtc, RTC_BANK1_RAM_ADDR,
+ (pos - NVRAM_TOTAL_SZ_BANK0));
+#endif
+ rtc->write(rtc, RTC_BANK1_RAM_DATA_PORT, *buf++);
+ pos++;
+ }
+
+#ifndef CONFIG_RTC_DRV_DS1685
+ /* Disable burst-mode on DS17x85/DS17x87 */
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (rtc->read(rtc, RTC_EXT_CTRL_4A) &
+ ~(RTC_CTRL_4A_BME)));
+#endif
+ ds1685_rtc_switch_to_bank0(rtc);
+ }
+#endif /* !CONFIG_RTC_DRV_DS1689 */
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ return count;
+}
+
+/**
+ * struct ds1685_rtc_sysfs_nvram_attr - sysfs attributes for rtc nvram.
+ * @attr: nvram attributes.
+ * @read: nvram read function.
+ * @write: nvram write function.
+ * @size: nvram total size (bank0 + extended).
+ */
+static struct bin_attribute
+ds1685_rtc_sysfs_nvram_attr = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .read = ds1685_rtc_sysfs_nvram_read,
+ .write = ds1685_rtc_sysfs_nvram_write,
+ .size = NVRAM_TOTAL_SZ
+};
+
+/**
+ * ds1685_rtc_sysfs_battery_show - sysfs file for main battery status.
+ * @dev: pointer to device structure.
+ * @attr: pointer to device_attribute structure.
+ * @buf: pointer to char array to hold the output.
+ */
+static ssize_t
+ds1685_rtc_sysfs_battery_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 ctrld;
+
+ ctrld = rtc->read(rtc, RTC_CTRL_D);
+
+ return snprintf(buf, 13, "%s\n",
+ (ctrld & RTC_CTRL_D_VRT) ? "ok" : "not ok or N/A");
+}
+static DEVICE_ATTR(battery, S_IRUGO, ds1685_rtc_sysfs_battery_show, NULL);
+
+/**
+ * ds1685_rtc_sysfs_auxbatt_show - sysfs file for aux battery status.
+ * @dev: pointer to device structure.
+ * @attr: pointer to device_attribute structure.
+ * @buf: pointer to char array to hold the output.
+ */
+static ssize_t
+ds1685_rtc_sysfs_auxbatt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 ctrl4a;
+
+ ds1685_rtc_switch_to_bank1(rtc);
+ ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
+ ds1685_rtc_switch_to_bank0(rtc);
+
+ return snprintf(buf, 13, "%s\n",
+ (ctrl4a & RTC_CTRL_4A_VRT2) ? "ok" : "not ok or N/A");
+}
+static DEVICE_ATTR(auxbatt, S_IRUGO, ds1685_rtc_sysfs_auxbatt_show, NULL);
+
+/**
+ * ds1685_rtc_sysfs_serial_show - sysfs file for silicon serial number.
+ * @dev: pointer to device structure.
+ * @attr: pointer to device_attribute structure.
+ * @buf: pointer to char array to hold the output.
+ */
+static ssize_t
+ds1685_rtc_sysfs_serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ u8 ssn[8];
+
+ ds1685_rtc_switch_to_bank1(rtc);
+ ds1685_rtc_get_ssn(rtc, ssn);
+ ds1685_rtc_switch_to_bank0(rtc);
+
+ return snprintf(buf, 24, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ ssn[0], ssn[1], ssn[2], ssn[3], ssn[4], ssn[5],
+ ssn[6], ssn[7]);
+
+ return 0;
+}
+static DEVICE_ATTR(serial, S_IRUGO, ds1685_rtc_sysfs_serial_show, NULL);
+
+/**
+ * struct ds1685_rtc_sysfs_misc_attrs - list for misc RTC features.
+ */
+static struct attribute*
+ds1685_rtc_sysfs_misc_attrs[] = {
+ &dev_attr_battery.attr,
+ &dev_attr_auxbatt.attr,
+ &dev_attr_serial.attr,
+ NULL,
+};
+
+/**
+ * struct ds1685_rtc_sysfs_misc_grp - attr group for misc RTC features.
+ */
+static const struct attribute_group
+ds1685_rtc_sysfs_misc_grp = {
+ .name = "misc",
+ .attrs = ds1685_rtc_sysfs_misc_attrs,
+};
+
+#ifdef CONFIG_RTC_DS1685_SYSFS_REGS
+/**
+ * struct ds1685_rtc_ctrl_regs.
+ * @name: char pointer for the bit name.
+ * @reg: control register the bit is in.
+ * @bit: the bit's offset in the register.
+ */
+struct ds1685_rtc_ctrl_regs {
+ const char *name;
+ const u8 reg;
+ const u8 bit;
+};
+
+/*
+ * Ctrl register bit lookup table.
+ */
+static const struct ds1685_rtc_ctrl_regs
+ds1685_ctrl_regs_table[] = {
+ { "uip", RTC_CTRL_A, RTC_CTRL_A_UIP },
+ { "dv2", RTC_CTRL_A, RTC_CTRL_A_DV2 },
+ { "dv1", RTC_CTRL_A, RTC_CTRL_A_DV1 },
+ { "dv0", RTC_CTRL_A, RTC_CTRL_A_DV0 },
+ { "rs3", RTC_CTRL_A, RTC_CTRL_A_RS3 },
+ { "rs2", RTC_CTRL_A, RTC_CTRL_A_RS2 },
+ { "rs1", RTC_CTRL_A, RTC_CTRL_A_RS1 },
+ { "rs0", RTC_CTRL_A, RTC_CTRL_A_RS0 },
+ { "set", RTC_CTRL_B, RTC_CTRL_B_SET },
+ { "pie", RTC_CTRL_B, RTC_CTRL_B_PIE },
+ { "aie", RTC_CTRL_B, RTC_CTRL_B_AIE },
+ { "uie", RTC_CTRL_B, RTC_CTRL_B_UIE },
+ { "sqwe", RTC_CTRL_B, RTC_CTRL_B_SQWE },
+ { "dm", RTC_CTRL_B, RTC_CTRL_B_DM },
+ { "2412", RTC_CTRL_B, RTC_CTRL_B_2412 },
+ { "dse", RTC_CTRL_B, RTC_CTRL_B_DSE },
+ { "irqf", RTC_CTRL_C, RTC_CTRL_C_IRQF },
+ { "pf", RTC_CTRL_C, RTC_CTRL_C_PF },
+ { "af", RTC_CTRL_C, RTC_CTRL_C_AF },
+ { "uf", RTC_CTRL_C, RTC_CTRL_C_UF },
+ { "vrt", RTC_CTRL_D, RTC_CTRL_D_VRT },
+ { "vrt2", RTC_EXT_CTRL_4A, RTC_CTRL_4A_VRT2 },
+ { "incr", RTC_EXT_CTRL_4A, RTC_CTRL_4A_INCR },
+ { "pab", RTC_EXT_CTRL_4A, RTC_CTRL_4A_PAB },
+ { "rf", RTC_EXT_CTRL_4A, RTC_CTRL_4A_RF },
+ { "wf", RTC_EXT_CTRL_4A, RTC_CTRL_4A_WF },
+ { "kf", RTC_EXT_CTRL_4A, RTC_CTRL_4A_KF },
+#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
+ { "bme", RTC_EXT_CTRL_4A, RTC_CTRL_4A_BME },
+#endif
+ { "abe", RTC_EXT_CTRL_4B, RTC_CTRL_4B_ABE },
+ { "e32k", RTC_EXT_CTRL_4B, RTC_CTRL_4B_E32K },
+ { "cs", RTC_EXT_CTRL_4B, RTC_CTRL_4B_CS },
+ { "rce", RTC_EXT_CTRL_4B, RTC_CTRL_4B_RCE },
+ { "prs", RTC_EXT_CTRL_4B, RTC_CTRL_4B_PRS },
+ { "rie", RTC_EXT_CTRL_4B, RTC_CTRL_4B_RIE },
+ { "wie", RTC_EXT_CTRL_4B, RTC_CTRL_4B_WIE },
+ { "kse", RTC_EXT_CTRL_4B, RTC_CTRL_4B_KSE },
+ { NULL, 0, 0 },
+};
+
+/**
+ * ds1685_rtc_sysfs_ctrl_regs_lookup - ctrl register bit lookup function.
+ * @name: ctrl register bit to look up in ds1685_ctrl_regs_table.
+ */
+static const struct ds1685_rtc_ctrl_regs*
+ds1685_rtc_sysfs_ctrl_regs_lookup(const char *name)
+{
+ const struct ds1685_rtc_ctrl_regs *p = ds1685_ctrl_regs_table;
+
+ for (; p->name != NULL; ++p)
+ if (strcmp(p->name, name) == 0)
+ return p;
+
+ return NULL;
+}
+
+/**
+ * ds1685_rtc_sysfs_ctrl_regs_show - reads a ctrl register bit via sysfs.
+ * @dev: pointer to device structure.
+ * @attr: pointer to device_attribute structure.
+ * @buf: pointer to char array to hold the output.
+ */
+static ssize_t
+ds1685_rtc_sysfs_ctrl_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 tmp;
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ const struct ds1685_rtc_ctrl_regs *reg_info =
+ ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
+
+ /* Make sure we actually matched something. */
+ if (!reg_info)
+ return -EINVAL;
+
+ /* No spinlock during a read -- mutex is already held. */
+ ds1685_rtc_switch_to_bank1(rtc);
+ tmp = rtc->read(rtc, reg_info->reg) & reg_info->bit;
+ ds1685_rtc_switch_to_bank0(rtc);
+
+ return snprintf(buf, 2, "%d\n", (tmp ? 1 : 0));
+}
+
+/**
+ * ds1685_rtc_sysfs_ctrl_regs_store - writes a ctrl register bit via sysfs.
+ * @dev: pointer to device structure.
+ * @attr: pointer to device_attribute structure.
+ * @buf: pointer to char array to hold the output.
+ * @count: number of bytes written.
+ */
+static ssize_t
+ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ u8 reg = 0, bit = 0, tmp;
+ unsigned long flags = 0;
+ long int val = 0;
+ const struct ds1685_rtc_ctrl_regs *reg_info =
+ ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
+
+ /* We only accept numbers. */
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ /* bits are binary, 0 or 1 only. */
+ if ((val != 0) && (val != 1))
+ return -ERANGE;
+
+ /* Make sure we actually matched something. */
+ if (!reg_info)
+ return -EINVAL;
+
+ reg = reg_info->reg;
+ bit = reg_info->bit;
+
+ /* Safe to spinlock during a write. */
+ ds1685_rtc_begin_ctrl_access(rtc, flags);
+ tmp = rtc->read(rtc, reg);
+ rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
+ ds1685_rtc_end_ctrl_access(rtc, flags);
+
+ return count;
+}
+
+/**
+ * DS1685_RTC_SYSFS_CTRL_REG_RO - device_attribute for read-only register bit.
+ * @bit: bit to read.
+ */
+#define DS1685_RTC_SYSFS_CTRL_REG_RO(bit) \
+ static DEVICE_ATTR(bit, S_IRUGO, \
+ ds1685_rtc_sysfs_ctrl_regs_show, NULL)
+
+/**
+ * DS1685_RTC_SYSFS_CTRL_REG_RW - device_attribute for read-write register bit.
+ * @bit: bit to read or write.
+ */
+#define DS1685_RTC_SYSFS_CTRL_REG_RW(bit) \
+ static DEVICE_ATTR(bit, S_IRUGO | S_IWUSR, \
+ ds1685_rtc_sysfs_ctrl_regs_show, \
+ ds1685_rtc_sysfs_ctrl_regs_store)
+
+/*
+ * Control Register A bits.
+ */
+DS1685_RTC_SYSFS_CTRL_REG_RO(uip);
+DS1685_RTC_SYSFS_CTRL_REG_RW(dv2);
+DS1685_RTC_SYSFS_CTRL_REG_RW(dv1);
+DS1685_RTC_SYSFS_CTRL_REG_RO(dv0);
+DS1685_RTC_SYSFS_CTRL_REG_RW(rs3);
+DS1685_RTC_SYSFS_CTRL_REG_RW(rs2);
+DS1685_RTC_SYSFS_CTRL_REG_RW(rs1);
+DS1685_RTC_SYSFS_CTRL_REG_RW(rs0);
+
+static struct attribute*
+ds1685_rtc_sysfs_ctrla_attrs[] = {
+ &dev_attr_uip.attr,
+ &dev_attr_dv2.attr,
+ &dev_attr_dv1.attr,
+ &dev_attr_dv0.attr,
+ &dev_attr_rs3.attr,
+ &dev_attr_rs2.attr,
+ &dev_attr_rs1.attr,
+ &dev_attr_rs0.attr,
+ NULL,
+};
+
+static const struct attribute_group
+ds1685_rtc_sysfs_ctrla_grp = {
+ .name = "ctrla",
+ .attrs = ds1685_rtc_sysfs_ctrla_attrs,
+};
+
+
+/*
+ * Control Register B bits.
+ */
+DS1685_RTC_SYSFS_CTRL_REG_RO(set);
+DS1685_RTC_SYSFS_CTRL_REG_RW(pie);
+DS1685_RTC_SYSFS_CTRL_REG_RW(aie);
+DS1685_RTC_SYSFS_CTRL_REG_RW(uie);
+DS1685_RTC_SYSFS_CTRL_REG_RW(sqwe);
+DS1685_RTC_SYSFS_CTRL_REG_RO(dm);
+DS1685_RTC_SYSFS_CTRL_REG_RO(2412);
+DS1685_RTC_SYSFS_CTRL_REG_RO(dse);
+
+static struct attribute*
+ds1685_rtc_sysfs_ctrlb_attrs[] = {
+ &dev_attr_set.attr,
+ &dev_attr_pie.attr,
+ &dev_attr_aie.attr,
+ &dev_attr_uie.attr,
+ &dev_attr_sqwe.attr,
+ &dev_attr_dm.attr,
+ &dev_attr_2412.attr,
+ &dev_attr_dse.attr,
+ NULL,
+};
+
+static const struct attribute_group
+ds1685_rtc_sysfs_ctrlb_grp = {
+ .name = "ctrlb",
+ .attrs = ds1685_rtc_sysfs_ctrlb_attrs,
+};
+
+/*
+ * Control Register C bits.
+ *
+ * Reading Control C clears these bits! Reading them individually can
+ * possibly cause an interrupt to be missed. Use the /proc interface
+ * to see all the bits in this register simultaneously.
+ */
+DS1685_RTC_SYSFS_CTRL_REG_RO(irqf);
+DS1685_RTC_SYSFS_CTRL_REG_RO(pf);
+DS1685_RTC_SYSFS_CTRL_REG_RO(af);
+DS1685_RTC_SYSFS_CTRL_REG_RO(uf);
+
+static struct attribute*
+ds1685_rtc_sysfs_ctrlc_attrs[] = {
+ &dev_attr_irqf.attr,
+ &dev_attr_pf.attr,
+ &dev_attr_af.attr,
+ &dev_attr_uf.attr,
+ NULL,
+};
+
+static const struct attribute_group
+ds1685_rtc_sysfs_ctrlc_grp = {
+ .name = "ctrlc",
+ .attrs = ds1685_rtc_sysfs_ctrlc_attrs,
+};
+
+/*
+ * Control Register D bits.
+ */
+DS1685_RTC_SYSFS_CTRL_REG_RO(vrt);
+
+static struct attribute*
+ds1685_rtc_sysfs_ctrld_attrs[] = {
+ &dev_attr_vrt.attr,
+ NULL,
+};
+
+static const struct attribute_group
+ds1685_rtc_sysfs_ctrld_grp = {
+ .name = "ctrld",
+ .attrs = ds1685_rtc_sysfs_ctrld_attrs,
+};
+
+/*
+ * Control Register 4A bits.
+ */
+DS1685_RTC_SYSFS_CTRL_REG_RO(vrt2);
+DS1685_RTC_SYSFS_CTRL_REG_RO(incr);
+DS1685_RTC_SYSFS_CTRL_REG_RW(pab);
+DS1685_RTC_SYSFS_CTRL_REG_RW(rf);
+DS1685_RTC_SYSFS_CTRL_REG_RW(wf);
+DS1685_RTC_SYSFS_CTRL_REG_RW(kf);
+#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
+DS1685_RTC_SYSFS_CTRL_REG_RO(bme);
+#endif
+
+static struct attribute*
+ds1685_rtc_sysfs_ctrl4a_attrs[] = {
+ &dev_attr_vrt2.attr,
+ &dev_attr_incr.attr,
+ &dev_attr_pab.attr,
+ &dev_attr_rf.attr,
+ &dev_attr_wf.attr,
+ &dev_attr_kf.attr,
+#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
+ &dev_attr_bme.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group
+ds1685_rtc_sysfs_ctrl4a_grp = {
+ .name = "ctrl4a",
+ .attrs = ds1685_rtc_sysfs_ctrl4a_attrs,
+};
+
+/*
+ * Control Register 4B bits.
+ */
+DS1685_RTC_SYSFS_CTRL_REG_RW(abe);
+DS1685_RTC_SYSFS_CTRL_REG_RW(e32k);
+DS1685_RTC_SYSFS_CTRL_REG_RO(cs);
+DS1685_RTC_SYSFS_CTRL_REG_RW(rce);
+DS1685_RTC_SYSFS_CTRL_REG_RW(prs);
+DS1685_RTC_SYSFS_CTRL_REG_RW(rie);
+DS1685_RTC_SYSFS_CTRL_REG_RW(wie);
+DS1685_RTC_SYSFS_CTRL_REG_RW(kse);
+
+static struct attribute*
+ds1685_rtc_sysfs_ctrl4b_attrs[] = {
+ &dev_attr_abe.attr,
+ &dev_attr_e32k.attr,
+ &dev_attr_cs.attr,
+ &dev_attr_rce.attr,
+ &dev_attr_prs.attr,
+ &dev_attr_rie.attr,
+ &dev_attr_wie.attr,
+ &dev_attr_kse.attr,
+ NULL,
+};
+
+static const struct attribute_group
+ds1685_rtc_sysfs_ctrl4b_grp = {
+ .name = "ctrl4b",
+ .attrs = ds1685_rtc_sysfs_ctrl4b_attrs,
+};
+
+
+/**
+ * struct ds1685_rtc_ctrl_regs.
+ * @name: char pointer for the bit name.
+ * @reg: control register the bit is in.
+ * @bit: the bit's offset in the register.
+ */
+struct ds1685_rtc_time_regs {
+ const char *name;
+ const u8 reg;
+ const u8 mask;
+ const u8 min;
+ const u8 max;
+};
+
+/*
+ * Time/Date register lookup tables.
+ */
+static const struct ds1685_rtc_time_regs
+ds1685_time_regs_bcd_table[] = {
+ { "seconds", RTC_SECS, RTC_SECS_BCD_MASK, 0, 59 },
+ { "minutes", RTC_MINS, RTC_MINS_BCD_MASK, 0, 59 },
+ { "hours", RTC_HRS, RTC_HRS_24_BCD_MASK, 0, 23 },
+ { "wday", RTC_WDAY, RTC_WDAY_MASK, 1, 7 },
+ { "mday", RTC_MDAY, RTC_MDAY_BCD_MASK, 1, 31 },
+ { "month", RTC_MONTH, RTC_MONTH_BCD_MASK, 1, 12 },
+ { "year", RTC_YEAR, RTC_YEAR_BCD_MASK, 0, 99 },
+ { "century", RTC_CENTURY, RTC_CENTURY_MASK, 0, 99 },
+ { "alarm_seconds", RTC_SECS_ALARM, RTC_SECS_BCD_MASK, 0, 59 },
+ { "alarm_minutes", RTC_MINS_ALARM, RTC_MINS_BCD_MASK, 0, 59 },
+ { "alarm_hours", RTC_HRS_ALARM, RTC_HRS_24_BCD_MASK, 0, 23 },
+ { "alarm_mday", RTC_MDAY_ALARM, RTC_MDAY_ALARM_MASK, 1, 31 },
+ { NULL, 0, 0, 0, 0 },
+};
+
+static const struct ds1685_rtc_time_regs
+ds1685_time_regs_bin_table[] = {
+ { "seconds", RTC_SECS, RTC_SECS_BIN_MASK, 0x00, 0x3b },
+ { "minutes", RTC_MINS, RTC_MINS_BIN_MASK, 0x00, 0x3b },
+ { "hours", RTC_HRS, RTC_HRS_24_BIN_MASK, 0x00, 0x17 },
+ { "wday", RTC_WDAY, RTC_WDAY_MASK, 0x01, 0x07 },
+ { "mday", RTC_MDAY, RTC_MDAY_BIN_MASK, 0x01, 0x1f },
+ { "month", RTC_MONTH, RTC_MONTH_BIN_MASK, 0x01, 0x0c },
+ { "year", RTC_YEAR, RTC_YEAR_BIN_MASK, 0x00, 0x63 },
+ { "century", RTC_CENTURY, RTC_CENTURY_MASK, 0x00, 0x63 },
+ { "alarm_seconds", RTC_SECS_ALARM, RTC_SECS_BIN_MASK, 0x00, 0x3b },
+ { "alarm_minutes", RTC_MINS_ALARM, RTC_MINS_BIN_MASK, 0x00, 0x3b },
+ { "alarm_hours", RTC_HRS_ALARM, RTC_HRS_24_BIN_MASK, 0x00, 0x17 },
+ { "alarm_mday", RTC_MDAY_ALARM, RTC_MDAY_ALARM_MASK, 0x01, 0x1f },
+ { NULL, 0, 0, 0x00, 0x00 },
+};
+
+/**
+ * ds1685_rtc_sysfs_time_regs_bcd_lookup - time/date reg bit lookup function.
+ * @name: register bit to look up in ds1685_time_regs_bcd_table.
+ */
+static const struct ds1685_rtc_time_regs*
+ds1685_rtc_sysfs_time_regs_lookup(const char *name, bool bcd_mode)
+{
+ const struct ds1685_rtc_time_regs *p;
+
+ if (bcd_mode)
+ p = ds1685_time_regs_bcd_table;
+ else
+ p = ds1685_time_regs_bin_table;
+
+ for (; p->name != NULL; ++p)
+ if (strcmp(p->name, name) == 0)
+ return p;
+
+ return NULL;
+}
+
+/**
+ * ds1685_rtc_sysfs_time_regs_show - reads a time/date register via sysfs.
+ * @dev: pointer to device structure.
+ * @attr: pointer to device_attribute structure.
+ * @buf: pointer to char array to hold the output.
+ */
+static ssize_t
+ds1685_rtc_sysfs_time_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 tmp;
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ const struct ds1685_rtc_time_regs *bcd_reg_info =
+ ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, true);
+ const struct ds1685_rtc_time_regs *bin_reg_info =
+ ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
+
+ /* Make sure we actually matched something. */
+ if (!bcd_reg_info && !bin_reg_info)
+ return -EINVAL;
+
+ /* bcd_reg_info->reg == bin_reg_info->reg. */
+ ds1685_rtc_begin_data_access(rtc);
+ tmp = rtc->read(rtc, bcd_reg_info->reg);
+ ds1685_rtc_end_data_access(rtc);
+
+ tmp = ds1685_rtc_bcd2bin(rtc, tmp, bcd_reg_info->mask,
+ bin_reg_info->mask);
+
+ return snprintf(buf, 4, "%d\n", tmp);
+}
+
+/**
+ * ds1685_rtc_sysfs_time_regs_store - writes a time/date register via sysfs.
+ * @dev: pointer to device structure.
+ * @attr: pointer to device_attribute structure.
+ * @buf: pointer to char array to hold the output.
+ * @count: number of bytes written.
+ */
+static ssize_t
+ds1685_rtc_sysfs_time_regs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ long int val = 0;
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ const struct ds1685_rtc_time_regs *bcd_reg_info =
+ ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, true);
+ const struct ds1685_rtc_time_regs *bin_reg_info =
+ ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
+
+ /* We only accept numbers. */
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ /* Make sure we actually matched something. */
+ if (!bcd_reg_info && !bin_reg_info)
+ return -EINVAL;
+
+ /* Check for a valid range. */
+ if (rtc->bcd_mode) {
+ if ((val < bcd_reg_info->min) || (val > bcd_reg_info->max))
+ return -ERANGE;
+ } else {
+ if ((val < bin_reg_info->min) || (val > bin_reg_info->max))
+ return -ERANGE;
+ }
+
+ val = ds1685_rtc_bin2bcd(rtc, val, bin_reg_info->mask,
+ bcd_reg_info->mask);
+
+ /* bcd_reg_info->reg == bin_reg_info->reg. */
+ ds1685_rtc_begin_data_access(rtc);
+ rtc->write(rtc, bcd_reg_info->reg, val);
+ ds1685_rtc_end_data_access(rtc);
+
+ return count;
+}
+
+/**
+ * DS1685_RTC_SYSFS_REG_RW - device_attribute for a read-write time register.
+ * @reg: time/date register to read or write.
+ */
+#define DS1685_RTC_SYSFS_TIME_REG_RW(reg) \
+ static DEVICE_ATTR(reg, S_IRUGO | S_IWUSR, \
+ ds1685_rtc_sysfs_time_regs_show, \
+ ds1685_rtc_sysfs_time_regs_store)
+
+/*
+ * Time/Date Register bits.
+ */
+DS1685_RTC_SYSFS_TIME_REG_RW(seconds);
+DS1685_RTC_SYSFS_TIME_REG_RW(minutes);
+DS1685_RTC_SYSFS_TIME_REG_RW(hours);
+DS1685_RTC_SYSFS_TIME_REG_RW(wday);
+DS1685_RTC_SYSFS_TIME_REG_RW(mday);
+DS1685_RTC_SYSFS_TIME_REG_RW(month);
+DS1685_RTC_SYSFS_TIME_REG_RW(year);
+DS1685_RTC_SYSFS_TIME_REG_RW(century);
+DS1685_RTC_SYSFS_TIME_REG_RW(alarm_seconds);
+DS1685_RTC_SYSFS_TIME_REG_RW(alarm_minutes);
+DS1685_RTC_SYSFS_TIME_REG_RW(alarm_hours);
+DS1685_RTC_SYSFS_TIME_REG_RW(alarm_mday);
+
+static struct attribute*
+ds1685_rtc_sysfs_time_attrs[] = {
+ &dev_attr_seconds.attr,
+ &dev_attr_minutes.attr,
+ &dev_attr_hours.attr,
+ &dev_attr_wday.attr,
+ &dev_attr_mday.attr,
+ &dev_attr_month.attr,
+ &dev_attr_year.attr,
+ &dev_attr_century.attr,
+ NULL,
+};
+
+static const struct attribute_group
+ds1685_rtc_sysfs_time_grp = {
+ .name = "datetime",
+ .attrs = ds1685_rtc_sysfs_time_attrs,
+};
+
+static struct attribute*
+ds1685_rtc_sysfs_alarm_attrs[] = {
+ &dev_attr_alarm_seconds.attr,
+ &dev_attr_alarm_minutes.attr,
+ &dev_attr_alarm_hours.attr,
+ &dev_attr_alarm_mday.attr,
+ NULL,
+};
+
+static const struct attribute_group
+ds1685_rtc_sysfs_alarm_grp = {
+ .name = "alarm",
+ .attrs = ds1685_rtc_sysfs_alarm_attrs,
+};
+#endif /* CONFIG_RTC_DS1685_SYSFS_REGS */
+
+
+/**
+ * ds1685_rtc_sysfs_register - register sysfs files.
+ * @dev: pointer to device structure.
+ */
+static int
+ds1685_rtc_sysfs_register(struct device *dev)
+{
+ int ret = 0;
+
+ sysfs_bin_attr_init(&ds1685_rtc_sysfs_nvram_attr);
+ ret = sysfs_create_bin_file(&dev->kobj, &ds1685_rtc_sysfs_nvram_attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_misc_grp);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_RTC_DS1685_SYSFS_REGS
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrla_grp);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrlb_grp);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrlc_grp);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrld_grp);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrl4a_grp);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrl4b_grp);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_time_grp);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_alarm_grp);
+ if (ret)
+ return ret;
+#endif
+ return 0;
+}
+
+/**
+ * ds1685_rtc_sysfs_unregister - unregister sysfs files.
+ * @dev: pointer to device structure.
+ */
+static int
+ds1685_rtc_sysfs_unregister(struct device *dev)
+{
+ sysfs_remove_bin_file(&dev->kobj, &ds1685_rtc_sysfs_nvram_attr);
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_misc_grp);
+
+#ifdef CONFIG_RTC_DS1685_SYSFS_REGS
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrla_grp);
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrlb_grp);
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrlc_grp);
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrld_grp);
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrl4a_grp);
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrl4b_grp);
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_time_grp);
+ sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_alarm_grp);
+#endif
+
+ return 0;
+}
+#endif /* CONFIG_SYSFS */
+
+
+
+/* ----------------------------------------------------------------------- */
+/* Driver Probe/Removal */
+
+/**
+ * ds1685_rtc_probe - initializes rtc driver.
+ * @pdev: pointer to platform_device structure.
+ */
+static int
+ds1685_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc_dev;
+ struct resource *res;
+ struct ds1685_priv *rtc;
+ struct ds1685_rtc_platform_data *pdata;
+ u8 ctrla, ctrlb, hours;
+ unsigned char am_pm;
+ int ret = 0;
+
+ /* Get the platform data. */
+ pdata = (struct ds1685_rtc_platform_data *) pdev->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ /* Allocate memory for the rtc device. */
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ /*
+ * Allocate/setup any IORESOURCE_MEM resources, if required. Not all
+ * platforms put the RTC in an easy-access place. Like the SGI Octane,
+ * which attaches the RTC to a "ByteBus", hooked to a SuperIO chip
+ * that sits behind the IOC3 PCI metadevice.
+ */
+ if (pdata->alloc_io_resources) {
+ /* Get the platform resources. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+ rtc->size = resource_size(res);
+
+ /* Request a memory region. */
+ /* XXX: mmio-only for now. */
+ if (!devm_request_mem_region(&pdev->dev, res->start, rtc->size,
+ pdev->name))
+ return -EBUSY;
+
+ /*
+ * Set the base address for the rtc, and ioremap its
+ * registers.
+ */
+ rtc->baseaddr = res->start;
+ rtc->regs = devm_ioremap(&pdev->dev, res->start, rtc->size);
+ if (!rtc->regs)
+ return -ENOMEM;
+ }
+ rtc->alloc_io_resources = pdata->alloc_io_resources;
+
+ /* Get the register step size. */
+ if (pdata->regstep > 0)
+ rtc->regstep = pdata->regstep;
+ else
+ rtc->regstep = 1;
+
+ /* Platform read function, else default if mmio setup */
+ if (pdata->plat_read)
+ rtc->read = pdata->plat_read;
+ else
+ if (pdata->alloc_io_resources)
+ rtc->read = ds1685_read;
+ else
+ return -ENXIO;
+
+ /* Platform write function, else default if mmio setup */
+ if (pdata->plat_write)
+ rtc->write = pdata->plat_write;
+ else
+ if (pdata->alloc_io_resources)
+ rtc->write = ds1685_write;
+ else
+ return -ENXIO;
+
+ /* Platform pre-shutdown function, if defined. */
+ if (pdata->plat_prepare_poweroff)
+ rtc->prepare_poweroff = pdata->plat_prepare_poweroff;
+
+ /* Platform wake_alarm function, if defined. */
+ if (pdata->plat_wake_alarm)
+ rtc->wake_alarm = pdata->plat_wake_alarm;
+
+ /* Platform post_ram_clear function, if defined. */
+ if (pdata->plat_post_ram_clear)
+ rtc->post_ram_clear = pdata->plat_post_ram_clear;
+
+ /* Init the spinlock, workqueue, & set the driver data. */
+ spin_lock_init(&rtc->lock);
+ INIT_WORK(&rtc->work, ds1685_rtc_work_queue);
+ platform_set_drvdata(pdev, rtc);
+
+ /* Turn the oscillator on if is not already on (DV1 = 1). */
+ ctrla = rtc->read(rtc, RTC_CTRL_A);
+ if (!(ctrla & RTC_CTRL_A_DV1))
+ ctrla |= RTC_CTRL_A_DV1;
+
+ /* Enable the countdown chain (DV2 = 0) */
+ ctrla &= ~(RTC_CTRL_A_DV2);
+
+ /* Clear RS3-RS0 in Control A. */
+ ctrla &= ~(RTC_CTRL_A_RS_MASK);
+
+ /*
+ * All done with Control A. Switch to Bank 1 for the remainder of
+ * the RTC setup so we have access to the extended functions.
+ */
+ ctrla |= RTC_CTRL_A_DV0;
+ rtc->write(rtc, RTC_CTRL_A, ctrla);
+
+ /* Default to 32768kHz output. */
+ rtc->write(rtc, RTC_EXT_CTRL_4B,
+ (rtc->read(rtc, RTC_EXT_CTRL_4B) | RTC_CTRL_4B_E32K));
+
+ /* Set the SET bit in Control B so we can do some housekeeping. */
+ rtc->write(rtc, RTC_CTRL_B,
+ (rtc->read(rtc, RTC_CTRL_B) | RTC_CTRL_B_SET));
+
+ /* Read Ext Ctrl 4A and check the INCR bit to avoid a lockout. */
+ while (rtc->read(rtc, RTC_EXT_CTRL_4A) & RTC_CTRL_4A_INCR)
+ cpu_relax();
+
+ /*
+ * If the platform supports BCD mode, then set DM=0 in Control B.
+ * Otherwise, set DM=1 for BIN mode.
+ */
+ ctrlb = rtc->read(rtc, RTC_CTRL_B);
+ if (pdata->bcd_mode)
+ ctrlb &= ~(RTC_CTRL_B_DM);
+ else
+ ctrlb |= RTC_CTRL_B_DM;
+ rtc->bcd_mode = pdata->bcd_mode;
+
+ /*
+ * Disable Daylight Savings Time (DSE = 0).
+ * The RTC has hardcoded timezone information that is rendered
+ * obselete. We'll let the OS deal with DST settings instead.
+ */
+ if (ctrlb & RTC_CTRL_B_DSE)
+ ctrlb &= ~(RTC_CTRL_B_DSE);
+
+ /* Force 24-hour mode (2412 = 1). */
+ if (!(ctrlb & RTC_CTRL_B_2412)) {
+ /* Reinitialize the time hours. */
+ hours = rtc->read(rtc, RTC_HRS);
+ am_pm = hours & RTC_HRS_AMPM_MASK;
+ hours = ds1685_rtc_bcd2bin(rtc, hours, RTC_HRS_12_BCD_MASK,
+ RTC_HRS_12_BIN_MASK);
+ hours = ((hours == 12) ? 0 : ((am_pm) ? hours + 12 : hours));
+
+ /* Enable 24-hour mode. */
+ ctrlb |= RTC_CTRL_B_2412;
+
+ /* Write back to Control B, including DM & DSE bits. */
+ rtc->write(rtc, RTC_CTRL_B, ctrlb);
+
+ /* Write the time hours back. */
+ rtc->write(rtc, RTC_HRS,
+ ds1685_rtc_bin2bcd(rtc, hours,
+ RTC_HRS_24_BIN_MASK,
+ RTC_HRS_24_BCD_MASK));
+
+ /* Reinitialize the alarm hours. */
+ hours = rtc->read(rtc, RTC_HRS_ALARM);
+ am_pm = hours & RTC_HRS_AMPM_MASK;
+ hours = ds1685_rtc_bcd2bin(rtc, hours, RTC_HRS_12_BCD_MASK,
+ RTC_HRS_12_BIN_MASK);
+ hours = ((hours == 12) ? 0 : ((am_pm) ? hours + 12 : hours));
+
+ /* Write the alarm hours back. */
+ rtc->write(rtc, RTC_HRS_ALARM,
+ ds1685_rtc_bin2bcd(rtc, hours,
+ RTC_HRS_24_BIN_MASK,
+ RTC_HRS_24_BCD_MASK));
+ } else {
+ /* 24-hour mode is already set, so write Control B back. */
+ rtc->write(rtc, RTC_CTRL_B, ctrlb);
+ }
+
+ /* Unset the SET bit in Control B so the RTC can update. */
+ rtc->write(rtc, RTC_CTRL_B,
+ (rtc->read(rtc, RTC_CTRL_B) & ~(RTC_CTRL_B_SET)));
+
+ /* Check the main battery. */
+ if (!(rtc->read(rtc, RTC_CTRL_D) & RTC_CTRL_D_VRT))
+ dev_warn(&pdev->dev,
+ "Main battery is exhausted! RTC may be invalid!\n");
+
+ /* Check the auxillary battery. It is optional. */
+ if (!(rtc->read(rtc, RTC_EXT_CTRL_4A) & RTC_CTRL_4A_VRT2))
+ dev_warn(&pdev->dev,
+ "Aux battery is exhausted or not available.\n");
+
+ /* Read Ctrl B and clear PIE/AIE/UIE. */
+ rtc->write(rtc, RTC_CTRL_B,
+ (rtc->read(rtc, RTC_CTRL_B) & ~(RTC_CTRL_B_PAU_MASK)));
+
+ /* Reading Ctrl C auto-clears PF/AF/UF. */
+ rtc->read(rtc, RTC_CTRL_C);
+
+ /* Read Ctrl 4B and clear RIE/WIE/KSE. */
+ rtc->write(rtc, RTC_EXT_CTRL_4B,
+ (rtc->read(rtc, RTC_EXT_CTRL_4B) & ~(RTC_CTRL_4B_RWK_MASK)));
+
+ /* Clear RF/WF/KF in Ctrl 4A. */
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (rtc->read(rtc, RTC_EXT_CTRL_4A) & ~(RTC_CTRL_4A_RWK_MASK)));
+
+ /*
+ * Re-enable KSE to handle power button events. We do not enable
+ * WIE or RIE by default.
+ */
+ rtc->write(rtc, RTC_EXT_CTRL_4B,
+ (rtc->read(rtc, RTC_EXT_CTRL_4B) | RTC_CTRL_4B_KSE));
+
+ /*
+ * Fetch the IRQ and setup the interrupt handler.
+ *
+ * Not all platforms have the IRQF pin tied to something. If not, the
+ * RTC will still set the *IE / *F flags and raise IRQF in ctrlc, but
+ * there won't be an automatic way of notifying the kernel about it,
+ * unless ctrlc is explicitly polled.
+ */
+ if (!pdata->no_irq) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret > 0) {
+ rtc->irq_num = ret;
+
+ /* Request an IRQ. */
+ ret = devm_request_irq(&pdev->dev, rtc->irq_num,
+ ds1685_rtc_irq_handler,
+ IRQF_SHARED, pdev->name, pdev);
+
+ /* Check to see if something came back. */
+ if (unlikely(ret)) {
+ dev_warn(&pdev->dev,
+ "RTC interrupt not available\n");
+ rtc->irq_num = 0;
+ }
+ } else
+ return ret;
+ }
+ rtc->no_irq = pdata->no_irq;
+
+ /* Setup complete. */
+ ds1685_rtc_switch_to_bank0(rtc);
+
+ /* Register the device as an RTC. */
+ rtc_dev = rtc_device_register(pdev->name, &pdev->dev,
+ &ds1685_rtc_ops, THIS_MODULE);
+
+ /* Success? */
+ if (IS_ERR(rtc_dev))
+ return PTR_ERR(rtc_dev);
+
+ /* Maximum periodic rate is 8192Hz (0.122070ms). */
+ rtc_dev->max_user_freq = RTC_MAX_USER_FREQ;
+
+ /* See if the platform doesn't support UIE. */
+ if (pdata->uie_unsupported)
+ rtc_dev->uie_unsupported = 1;
+ rtc->uie_unsupported = pdata->uie_unsupported;
+
+ rtc->dev = rtc_dev;
+
+#ifdef CONFIG_SYSFS
+ ret = ds1685_rtc_sysfs_register(&pdev->dev);
+ if (ret)
+ rtc_device_unregister(rtc->dev);
+#endif
+
+ /* Done! */
+ return ret;
+}
+
+/**
+ * ds1685_rtc_remove - removes rtc driver.
+ * @pdev: pointer to platform_device structure.
+ */
+static int
+ds1685_rtc_remove(struct platform_device *pdev)
+{
+ struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_SYSFS
+ ds1685_rtc_sysfs_unregister(&pdev->dev);
+#endif
+
+ rtc_device_unregister(rtc->dev);
+
+ /* Read Ctrl B and clear PIE/AIE/UIE. */
+ rtc->write(rtc, RTC_CTRL_B,
+ (rtc->read(rtc, RTC_CTRL_B) &
+ ~(RTC_CTRL_B_PAU_MASK)));
+
+ /* Reading Ctrl C auto-clears PF/AF/UF. */
+ rtc->read(rtc, RTC_CTRL_C);
+
+ /* Read Ctrl 4B and clear RIE/WIE/KSE. */
+ rtc->write(rtc, RTC_EXT_CTRL_4B,
+ (rtc->read(rtc, RTC_EXT_CTRL_4B) &
+ ~(RTC_CTRL_4B_RWK_MASK)));
+
+ /* Manually clear RF/WF/KF in Ctrl 4A. */
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (rtc->read(rtc, RTC_EXT_CTRL_4A) &
+ ~(RTC_CTRL_4A_RWK_MASK)));
+
+ cancel_work_sync(&rtc->work);
+
+ return 0;
+}
+
+/**
+ * ds1685_rtc_driver - rtc driver properties.
+ */
+static struct platform_driver ds1685_rtc_driver = {
+ .driver = {
+ .name = "rtc-ds1685",
+ .owner = THIS_MODULE,
+ },
+ .probe = ds1685_rtc_probe,
+ .remove = ds1685_rtc_remove,
+};
+
+/**
+ * ds1685_rtc_init - rtc module init.
+ */
+static int __init
+ds1685_rtc_init(void)
+{
+ return platform_driver_register(&ds1685_rtc_driver);
+}
+
+/**
+ * ds1685_rtc_exit - rtc module exit.
+ */
+static void __exit
+ds1685_rtc_exit(void)
+{
+ platform_driver_unregister(&ds1685_rtc_driver);
+}
+
+module_init(ds1685_rtc_init);
+module_exit(ds1685_rtc_exit);
+/* ----------------------------------------------------------------------- */
+
+
+/* ----------------------------------------------------------------------- */
+/* Poweroff function */
+
+/**
+ * ds1685_rtc_poweroff - uses the RTC chip to power the system off.
+ * @pdev: pointer to platform_device structure.
+ */
+extern void __noreturn
+ds1685_rtc_poweroff(struct platform_device *pdev)
+{
+ u8 ctrla, ctrl4a, ctrl4b;
+ struct ds1685_priv *rtc;
+
+ /* Check for valid RTC data, else, spin forever. */
+ if (unlikely(!pdev)) {
+ pr_emerg("rtc-ds1685: platform device data not available, spinning forever ...\n");
+ unreachable();
+ } else {
+ /* Get the rtc data. */
+ rtc = platform_get_drvdata(pdev);
+
+ /*
+ * Disable our IRQ. We're powering down, so we're not
+ * going to worry about cleaning up. Most of that should
+ * have been taken care of by the shutdown scripts and this
+ * is the final function call.
+ */
+ if (!rtc->no_irq)
+ disable_irq_nosync(rtc->irq_num);
+
+ /* Oscillator must be on and the countdown chain enabled. */
+ ctrla = rtc->read(rtc, RTC_CTRL_A);
+ ctrla |= RTC_CTRL_A_DV1;
+ ctrla &= ~(RTC_CTRL_A_DV2);
+ rtc->write(rtc, RTC_CTRL_A, ctrla);
+
+ /*
+ * Read Control 4A and check the status of the auxillary
+ * battery. This must be present and working (VRT2 = 1)
+ * for wakeup and kickstart functionality to be useful.
+ */
+ ds1685_rtc_switch_to_bank1(rtc);
+ ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
+ if (ctrl4a & RTC_CTRL_4A_VRT2) {
+ /* Clear all of the interrupt flags on Control 4A. */
+ ctrl4a &= ~(RTC_CTRL_4A_RWK_MASK);
+ rtc->write(rtc, RTC_EXT_CTRL_4A, ctrl4a);
+
+ /*
+ * The auxillary battery is present and working.
+ * Enable extended functions (ABE=1), enable
+ * wake-up (WIE=1), and enable kickstart (KSE=1)
+ * in Control 4B.
+ */
+ ctrl4b = rtc->read(rtc, RTC_EXT_CTRL_4B);
+ ctrl4b |= (RTC_CTRL_4B_ABE | RTC_CTRL_4B_WIE |
+ RTC_CTRL_4B_KSE);
+ rtc->write(rtc, RTC_EXT_CTRL_4B, ctrl4b);
+ }
+
+ /* Set PAB to 1 in Control 4A to power the system down. */
+ dev_warn(&pdev->dev, "Powerdown.\n");
+ msleep(20);
+ rtc->write(rtc, RTC_EXT_CTRL_4A,
+ (ctrl4a | RTC_CTRL_4A_PAB));
+
+ /* Spin ... we do not switch back to bank0. */
+ unreachable();
+ }
+}
+EXPORT_SYMBOL(ds1685_rtc_poweroff);
+/* ----------------------------------------------------------------------- */
+
+
+MODULE_AUTHOR("Joshua Kinard <kumba@gentoo.org>");
+MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd-electronics.com>");
+MODULE_DESCRIPTION("Dallas/Maxim DS1685/DS1687-series RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:rtc-ds1685");
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index b37b0c80bd5a..cb989cd00b14 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -218,6 +218,7 @@ static int __init efi_rtc_probe(struct platform_device *dev)
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->uie_unsupported = 1;
platform_set_drvdata(dev, rtc);
return 0;
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 42f5570f42f8..c666eab98273 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -50,22 +50,58 @@
#define DCAMR_UNSET 0xFFFFFFFF /* doomsday - 1 sec */
#define DCR 0x10 /* Control Reg */
+#define DCR_TDCHL (1 << 30) /* Tamper-detect configuration hard lock */
+#define DCR_TDCSL (1 << 29) /* Tamper-detect configuration soft lock */
+#define DCR_KSSL (1 << 27) /* Key-select soft lock */
+#define DCR_MCHL (1 << 20) /* Monotonic-counter hard lock */
+#define DCR_MCSL (1 << 19) /* Monotonic-counter soft lock */
+#define DCR_TCHL (1 << 18) /* Timer-counter hard lock */
+#define DCR_TCSL (1 << 17) /* Timer-counter soft lock */
+#define DCR_FSHL (1 << 16) /* Failure state hard lock */
#define DCR_TCE (1 << 3) /* Time Counter Enable */
+#define DCR_MCE (1 << 2) /* Monotonic Counter Enable */
#define DSR 0x14 /* Status Reg */
-#define DSR_WBF (1 << 10) /* Write Busy Flag */
-#define DSR_WNF (1 << 9) /* Write Next Flag */
-#define DSR_WCF (1 << 8) /* Write Complete Flag */
+#define DSR_WTD (1 << 23) /* Wire-mesh tamper detected */
+#define DSR_ETBD (1 << 22) /* External tamper B detected */
+#define DSR_ETAD (1 << 21) /* External tamper A detected */
+#define DSR_EBD (1 << 20) /* External boot detected */
+#define DSR_SAD (1 << 19) /* SCC alarm detected */
+#define DSR_TTD (1 << 18) /* Temperatur tamper detected */
+#define DSR_CTD (1 << 17) /* Clock tamper detected */
+#define DSR_VTD (1 << 16) /* Voltage tamper detected */
+#define DSR_WBF (1 << 10) /* Write Busy Flag (synchronous) */
+#define DSR_WNF (1 << 9) /* Write Next Flag (synchronous) */
+#define DSR_WCF (1 << 8) /* Write Complete Flag (synchronous)*/
#define DSR_WEF (1 << 7) /* Write Error Flag */
#define DSR_CAF (1 << 4) /* Clock Alarm Flag */
+#define DSR_MCO (1 << 3) /* monotonic counter overflow */
+#define DSR_TCO (1 << 2) /* time counter overflow */
#define DSR_NVF (1 << 1) /* Non-Valid Flag */
#define DSR_SVF (1 << 0) /* Security Violation Flag */
-#define DIER 0x18 /* Interrupt Enable Reg */
+#define DIER 0x18 /* Interrupt Enable Reg (synchronous) */
#define DIER_WNIE (1 << 9) /* Write Next Interrupt Enable */
#define DIER_WCIE (1 << 8) /* Write Complete Interrupt Enable */
#define DIER_WEIE (1 << 7) /* Write Error Interrupt Enable */
#define DIER_CAIE (1 << 4) /* Clock Alarm Interrupt Enable */
+#define DIER_SVIE (1 << 0) /* Security-violation Interrupt Enable */
+
+#define DMCR 0x1c /* DryIce Monotonic Counter Reg */
+
+#define DTCR 0x28 /* DryIce Tamper Configuration Reg */
+#define DTCR_MOE (1 << 9) /* monotonic overflow enabled */
+#define DTCR_TOE (1 << 8) /* time overflow enabled */
+#define DTCR_WTE (1 << 7) /* wire-mesh tamper enabled */
+#define DTCR_ETBE (1 << 6) /* external B tamper enabled */
+#define DTCR_ETAE (1 << 5) /* external A tamper enabled */
+#define DTCR_EBE (1 << 4) /* external boot tamper enabled */
+#define DTCR_SAIE (1 << 3) /* SCC enabled */
+#define DTCR_TTE (1 << 2) /* temperature tamper enabled */
+#define DTCR_CTE (1 << 1) /* clock tamper enabled */
+#define DTCR_VTE (1 << 0) /* voltage tamper enabled */
+
+#define DGPR 0x3c /* DryIce General Purpose Reg */
/**
* struct imxdi_dev - private imxdi rtc data
@@ -313,7 +349,7 @@ static irqreturn_t dryice_norm_irq(int irq, void *dev_id)
dier = __raw_readl(imxdi->ioaddr + DIER);
/* handle write complete and write error cases */
- if ((dier & DIER_WCIE)) {
+ if (dier & DIER_WCIE) {
/*If the write wait queue is empty then there is no pending
operations. It means the interrupt is for DryIce -Security.
IRQ must be returned as none.*/
@@ -322,7 +358,7 @@ static irqreturn_t dryice_norm_irq(int irq, void *dev_id)
/* DSR_WCF clears itself on DSR read */
dsr = __raw_readl(imxdi->ioaddr + DSR);
- if ((dsr & (DSR_WCF | DSR_WEF))) {
+ if (dsr & (DSR_WCF | DSR_WEF)) {
/* mask the interrupt */
di_int_disable(imxdi, DIER_WCIE);
@@ -335,7 +371,7 @@ static irqreturn_t dryice_norm_irq(int irq, void *dev_id)
}
/* handle the alarm case */
- if ((dier & DIER_CAIE)) {
+ if (dier & DIER_CAIE) {
/* DSR_WCF clears itself on DSR read */
dsr = __raw_readl(imxdi->ioaddr + DSR);
if (dsr & DSR_CAF) {
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index ee3ba7e6b45e..f9b082784b90 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -275,7 +275,8 @@ static int isl12022_probe(struct i2c_client *client,
#ifdef CONFIG_OF
static const struct of_device_id isl12022_dt_match[] = {
- { .compatible = "isl,isl12022" },
+ { .compatible = "isl,isl12022" }, /* for backward compat., don't use */
+ { .compatible = "isil,isl12022" },
{ },
};
#endif
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
index 6e1fcfb5d7e6..da818d3337ce 100644
--- a/drivers/rtc/rtc-isl12057.c
+++ b/drivers/rtc/rtc-isl12057.c
@@ -79,8 +79,10 @@
#define ISL12057_MEM_MAP_LEN 0x10
struct isl12057_rtc_data {
+ struct rtc_device *rtc;
struct regmap *regmap;
struct mutex lock;
+ int irq;
};
static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
@@ -160,14 +162,47 @@ static int isl12057_i2c_validate_chip(struct regmap *regmap)
return 0;
}
-static int isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int _isl12057_rtc_clear_alarm(struct device *dev)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
+ ISL12057_REG_SR_A1F, 0);
+ if (ret)
+ dev_err(dev, "%s: clearing alarm failed (%d)\n", __func__, ret);
+
+ return ret;
+}
+
+static int _isl12057_rtc_update_alarm(struct device *dev, int enable)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, ISL12057_REG_INT,
+ ISL12057_REG_INT_A1IE,
+ enable ? ISL12057_REG_INT_A1IE : 0);
+ if (ret)
+ dev_err(dev, "%s: changing alarm interrupt flag failed (%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/*
+ * Note: as we only read from device and do not perform any update, there is
+ * no need for an equivalent function which would try and get driver's main
+ * lock. Here, it is safe for everyone if we just use regmap internal lock
+ * on the device when reading.
+ */
+static int _isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct isl12057_rtc_data *data = dev_get_drvdata(dev);
u8 regs[ISL12057_RTC_SEC_LEN];
unsigned int sr;
int ret;
- mutex_lock(&data->lock);
ret = regmap_read(data->regmap, ISL12057_REG_SR, &sr);
if (ret) {
dev_err(dev, "%s: unable to read oscillator status flag (%d)\n",
@@ -187,8 +222,6 @@ static int isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
__func__, ret);
out:
- mutex_unlock(&data->lock);
-
if (ret)
return ret;
@@ -197,6 +230,168 @@ out:
return rtc_valid_tm(tm);
}
+static int isl12057_rtc_update_alarm(struct device *dev, int enable)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = _isl12057_rtc_update_alarm(dev, enable);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ struct rtc_time rtc_tm, *alarm_tm = &alarm->time;
+ unsigned long rtc_secs, alarm_secs;
+ u8 regs[ISL12057_A1_SEC_LEN];
+ unsigned int ir;
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_bulk_read(data->regmap, ISL12057_REG_A1_SC, regs,
+ ISL12057_A1_SEC_LEN);
+ if (ret) {
+ dev_err(dev, "%s: reading alarm section failed (%d)\n",
+ __func__, ret);
+ goto err_unlock;
+ }
+
+ alarm_tm->tm_sec = bcd2bin(regs[0] & 0x7f);
+ alarm_tm->tm_min = bcd2bin(regs[1] & 0x7f);
+ alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f);
+ alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f);
+ alarm_tm->tm_wday = -1;
+
+ /*
+ * The alarm section does not store year/month. We use the ones in rtc
+ * section as a basis and increment month and then year if needed to get
+ * alarm after current time.
+ */
+ ret = _isl12057_rtc_read_time(dev, &rtc_tm);
+ if (ret)
+ goto err_unlock;
+
+ alarm_tm->tm_year = rtc_tm.tm_year;
+ alarm_tm->tm_mon = rtc_tm.tm_mon;
+
+ ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (ret)
+ goto err_unlock;
+
+ ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
+ if (ret)
+ goto err_unlock;
+
+ if (alarm_secs < rtc_secs) {
+ if (alarm_tm->tm_mon == 11) {
+ alarm_tm->tm_mon = 0;
+ alarm_tm->tm_year += 1;
+ } else {
+ alarm_tm->tm_mon += 1;
+ }
+ }
+
+ ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir);
+ if (ret) {
+ dev_err(dev, "%s: reading alarm interrupt flag failed (%d)\n",
+ __func__, ret);
+ goto err_unlock;
+ }
+
+ alarm->enabled = !!(ir & ISL12057_REG_INT_A1IE);
+
+err_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int isl12057_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ struct rtc_time *alarm_tm = &alarm->time;
+ unsigned long rtc_secs, alarm_secs;
+ u8 regs[ISL12057_A1_SEC_LEN];
+ struct rtc_time rtc_tm;
+ int ret, enable = 1;
+
+ mutex_lock(&data->lock);
+ ret = _isl12057_rtc_read_time(dev, &rtc_tm);
+ if (ret)
+ goto err_unlock;
+
+ ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (ret)
+ goto err_unlock;
+
+ ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
+ if (ret)
+ goto err_unlock;
+
+ /* If alarm time is before current time, disable the alarm */
+ if (!alarm->enabled || alarm_secs <= rtc_secs) {
+ enable = 0;
+ } else {
+ /*
+ * Chip only support alarms up to one month in the future. Let's
+ * return an error if we get something after that limit.
+ * Comparison is done by incrementing rtc_tm month field by one
+ * and checking alarm value is still below.
+ */
+ if (rtc_tm.tm_mon == 11) { /* handle year wrapping */
+ rtc_tm.tm_mon = 0;
+ rtc_tm.tm_year += 1;
+ } else {
+ rtc_tm.tm_mon += 1;
+ }
+
+ ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
+ if (ret)
+ goto err_unlock;
+
+ if (alarm_secs > rtc_secs) {
+ dev_err(dev, "%s: max for alarm is one month (%d)\n",
+ __func__, ret);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ }
+
+ /* Disable the alarm before modifying it */
+ ret = _isl12057_rtc_update_alarm(dev, 0);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to disable the alarm (%d)\n",
+ __func__, ret);
+ goto err_unlock;
+ }
+
+ /* Program alarm registers */
+ regs[0] = bin2bcd(alarm_tm->tm_sec) & 0x7f;
+ regs[1] = bin2bcd(alarm_tm->tm_min) & 0x7f;
+ regs[2] = bin2bcd(alarm_tm->tm_hour) & 0x3f;
+ regs[3] = bin2bcd(alarm_tm->tm_mday) & 0x3f;
+
+ ret = regmap_bulk_write(data->regmap, ISL12057_REG_A1_SC, regs,
+ ISL12057_A1_SEC_LEN);
+ if (ret < 0) {
+ dev_err(dev, "%s: writing alarm section failed (%d)\n",
+ __func__, ret);
+ goto err_unlock;
+ }
+
+ /* Enable or disable alarm */
+ ret = _isl12057_rtc_update_alarm(dev, enable);
+
+err_unlock:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct isl12057_rtc_data *data = dev_get_drvdata(dev);
@@ -262,12 +457,85 @@ static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
return 0;
}
+#ifdef CONFIG_OF
+/*
+ * One would expect the device to be marked as a wakeup source only
+ * when an IRQ pin of the RTC is routed to an interrupt line of the
+ * CPU. In practice, such an IRQ pin can be connected to a PMIC and
+ * this allows the device to be powered up when RTC alarm rings. This
+ * is for instance the case on ReadyNAS 102, 104 and 2120. On those
+ * devices with no IRQ driectly connected to the SoC, the RTC chip
+ * can be forced as a wakeup source by stating that explicitly in
+ * the device's .dts file using the "isil,irq2-can-wakeup-machine"
+ * boolean property. This will guarantee 'wakealarm' sysfs entry is
+ * available on the device.
+ *
+ * The function below returns 1, i.e. the capability of the chip to
+ * wakeup the device, based on IRQ availability or if the boolean
+ * property has been set in the .dts file. Otherwise, it returns 0.
+ */
+
+static bool isl12057_can_wakeup_machine(struct device *dev)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+
+ return (data->irq || of_property_read_bool(dev->of_node,
+ "isil,irq2-can-wakeup-machine"));
+}
+#else
+static bool isl12057_can_wakeup_machine(struct device *dev)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+
+ return !!data->irq;
+}
+#endif
+
+static int isl12057_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enable)
+{
+ struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
+ int ret = -ENOTTY;
+
+ if (rtc_data->irq)
+ ret = isl12057_rtc_update_alarm(dev, enable);
+
+ return ret;
+}
+
+static irqreturn_t isl12057_rtc_interrupt(int irq, void *data)
+{
+ struct i2c_client *client = data;
+ struct isl12057_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
+ struct rtc_device *rtc = rtc_data->rtc;
+ int ret, handled = IRQ_NONE;
+ unsigned int sr;
+
+ ret = regmap_read(rtc_data->regmap, ISL12057_REG_SR, &sr);
+ if (!ret && (sr & ISL12057_REG_SR_A1F)) {
+ dev_dbg(&client->dev, "RTC alarm!\n");
+
+ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+
+ /* Acknowledge and disable the alarm */
+ _isl12057_rtc_clear_alarm(&client->dev);
+ _isl12057_rtc_update_alarm(&client->dev, 0);
+
+ handled = IRQ_HANDLED;
+ }
+
+ return handled;
+}
+
static const struct rtc_class_ops rtc_ops = {
- .read_time = isl12057_rtc_read_time,
+ .read_time = _isl12057_rtc_read_time,
.set_time = isl12057_rtc_set_time,
+ .read_alarm = isl12057_rtc_read_alarm,
+ .set_alarm = isl12057_rtc_set_alarm,
+ .alarm_irq_enable = isl12057_rtc_alarm_irq_enable,
};
-static struct regmap_config isl12057_rtc_regmap_config = {
+static const struct regmap_config isl12057_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
@@ -277,7 +545,6 @@ static int isl12057_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct isl12057_rtc_data *data;
- struct rtc_device *rtc;
struct regmap *regmap;
int ret;
@@ -310,13 +577,75 @@ static int isl12057_probe(struct i2c_client *client,
data->regmap = regmap;
dev_set_drvdata(dev, data);
- rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops, THIS_MODULE);
- return PTR_ERR_OR_ZERO(rtc);
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ isl12057_rtc_interrupt,
+ IRQF_SHARED|IRQF_ONESHOT,
+ DRV_NAME, client);
+ if (!ret)
+ data->irq = client->irq;
+ else
+ dev_err(dev, "%s: irq %d unavailable (%d)\n", __func__,
+ client->irq, ret);
+ }
+
+ if (isl12057_can_wakeup_machine(dev))
+ device_init_wakeup(dev, true);
+
+ data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops,
+ THIS_MODULE);
+ ret = PTR_ERR_OR_ZERO(data->rtc);
+ if (ret) {
+ dev_err(dev, "%s: unable to register RTC device (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
+ /* We cannot support UIE mode if we do not have an IRQ line */
+ if (!data->irq)
+ data->rtc->uie_unsupported = 1;
+
+err:
+ return ret;
+}
+
+static int isl12057_remove(struct i2c_client *client)
+{
+ if (isl12057_can_wakeup_machine(&client->dev))
+ device_init_wakeup(&client->dev, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int isl12057_rtc_suspend(struct device *dev)
+{
+ struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ if (rtc_data->irq && device_may_wakeup(dev))
+ return enable_irq_wake(rtc_data->irq);
+
+ return 0;
+}
+
+static int isl12057_rtc_resume(struct device *dev)
+{
+ struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ if (rtc_data->irq && device_may_wakeup(dev))
+ return disable_irq_wake(rtc_data->irq);
+
+ return 0;
}
+#endif
+
+static SIMPLE_DEV_PM_OPS(isl12057_rtc_pm_ops, isl12057_rtc_suspend,
+ isl12057_rtc_resume);
#ifdef CONFIG_OF
static const struct of_device_id isl12057_dt_match[] = {
- { .compatible = "isl,isl12057" },
+ { .compatible = "isl,isl12057" }, /* for backward compat., don't use */
+ { .compatible = "isil,isl12057" },
{ },
};
#endif
@@ -331,9 +660,11 @@ static struct i2c_driver isl12057_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &isl12057_rtc_pm_ops,
.of_match_table = of_match_ptr(isl12057_dt_match),
},
.probe = isl12057_probe,
+ .remove = isl12057_remove,
.id_table = isl12057_id,
};
module_i2c_driver(isl12057_driver);
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index d1953bb244c5..8a7556cbcb7f 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/rtc.h>
@@ -340,10 +341,19 @@ static int pcf2123_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id pcf2123_dt_ids[] = {
+ { .compatible = "nxp,rtc-pcf2123", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pcf2123_dt_ids);
+#endif
+
static struct spi_driver pcf2123_driver = {
.driver = {
.name = "rtc-pcf2123",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pcf2123_dt_ids),
},
.probe = pcf2123_probe,
.remove = pcf2123_remove,
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index df42257668ac..91ca0bc1b484 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -67,15 +67,21 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
/* Force an update of the shadowed registers right now */
ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG,
BIT_RTC_CTRL_REG_RTC_GET_TIME,
- 0);
+ BIT_RTC_CTRL_REG_RTC_GET_TIME);
if (ret) {
dev_err(dev, "Failed to update bits rtc_ctrl: %d\n", ret);
return ret;
}
+ /*
+ * After we set the GET_TIME bit, the rtc time can't be read
+ * immediately. So we should wait up to 31.25 us, about one cycle of
+ * 32khz. If we clear the GET_TIME bit here, the time of i2c transfer
+ * certainly more than 31.25us: 16 * 2.5us at 400kHz bus frequency.
+ */
ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG,
BIT_RTC_CTRL_REG_RTC_GET_TIME,
- BIT_RTC_CTRL_REG_RTC_GET_TIME);
+ 0);
if (ret) {
dev_err(dev, "Failed to update bits rtc_ctrl: %d\n", ret);
return ret;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b5e7c4670205..89ac1d5083c6 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X },
{ "s2mps14-rtc", S2MPS14X },
+ { },
};
static struct platform_driver s5m_rtc_driver = {
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c
index bf3e242ccc5c..eb71872d0361 100644
--- a/drivers/rtc/systohc.c
+++ b/drivers/rtc/systohc.c
@@ -20,16 +20,16 @@
*
* If temporary failure is indicated the caller should try again 'soon'
*/
-int rtc_set_ntp_time(struct timespec now)
+int rtc_set_ntp_time(struct timespec64 now)
{
struct rtc_device *rtc;
struct rtc_time tm;
int err = -ENODEV;
if (now.tv_nsec < (NSEC_PER_SEC >> 1))
- rtc_time_to_tm(now.tv_sec, &tm);
+ rtc_time64_to_tm(now.tv_sec, &tm);
else
- rtc_time_to_tm(now.tv_sec + 1, &tm);
+ rtc_time64_to_tm(now.tv_sec + 1, &tm);
rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
if (rtc) {
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4abf11965484..be34ef41b7c7 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -674,8 +674,9 @@ EXPORT_SYMBOL(dasd_enable_device);
unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
#ifdef CONFIG_DASD_PROFILE
-struct dasd_profile_info dasd_global_profile_data;
-static struct dentry *dasd_global_profile_dentry;
+struct dasd_profile dasd_global_profile = {
+ .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
+};
static struct dentry *dasd_debugfs_global_entry;
/*
@@ -696,11 +697,13 @@ static void dasd_profile_start(struct dasd_block *block,
if (++counter >= 31)
break;
- if (dasd_global_profile_level) {
- dasd_global_profile_data.dasd_io_nr_req[counter]++;
+ spin_lock(&dasd_global_profile.lock);
+ if (dasd_global_profile.data) {
+ dasd_global_profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
- dasd_global_profile_data.dasd_read_nr_req[counter]++;
+ dasd_global_profile.data->dasd_read_nr_req[counter]++;
}
+ spin_unlock(&dasd_global_profile.lock);
spin_lock(&block->profile.lock);
if (block->profile.data) {
@@ -825,8 +828,9 @@ static void dasd_profile_end(struct dasd_block *block,
dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
dasd_profile_counter(endtime, endtime_ind);
- if (dasd_global_profile_level) {
- dasd_profile_end_add_data(&dasd_global_profile_data,
+ spin_lock(&dasd_global_profile.lock);
+ if (dasd_global_profile.data) {
+ dasd_profile_end_add_data(dasd_global_profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
rq_data_dir(req) == READ,
@@ -835,6 +839,7 @@ static void dasd_profile_end(struct dasd_block *block,
irqtime_ind, irqtimeps_ind,
endtime_ind);
}
+ spin_unlock(&dasd_global_profile.lock);
spin_lock(&block->profile.lock);
if (block->profile.data)
@@ -876,12 +881,6 @@ void dasd_profile_reset(struct dasd_profile *profile)
spin_unlock_bh(&profile->lock);
}
-void dasd_global_profile_reset(void)
-{
- memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data));
- getnstimeofday(&dasd_global_profile_data.starttod);
-}
-
int dasd_profile_on(struct dasd_profile *profile)
{
struct dasd_profile_info *data;
@@ -949,12 +948,20 @@ static ssize_t dasd_stats_write(struct file *file,
dasd_profile_reset(prof);
} else if (strncmp(str, "on", 2) == 0) {
rc = dasd_profile_on(prof);
- if (!rc)
- rc = user_len;
+ if (rc)
+ goto out;
+ rc = user_len;
+ if (prof == &dasd_global_profile) {
+ dasd_profile_reset(prof);
+ dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
+ }
} else if (strncmp(str, "off", 3) == 0) {
+ if (prof == &dasd_global_profile)
+ dasd_global_profile_level = DASD_PROFILE_OFF;
dasd_profile_off(prof);
} else
rc = -EINVAL;
+out:
vfree(buffer);
return rc;
}
@@ -1044,57 +1051,6 @@ static const struct file_operations dasd_stats_raw_fops = {
.write = dasd_stats_write,
};
-static ssize_t dasd_stats_global_write(struct file *file,
- const char __user *user_buf,
- size_t user_len, loff_t *pos)
-{
- char *buffer, *str;
- ssize_t rc;
-
- if (user_len > 65536)
- user_len = 65536;
- buffer = dasd_get_user_string(user_buf, user_len);
- if (IS_ERR(buffer))
- return PTR_ERR(buffer);
- str = skip_spaces(buffer);
- rc = user_len;
- if (strncmp(str, "reset", 5) == 0) {
- dasd_global_profile_reset();
- } else if (strncmp(str, "on", 2) == 0) {
- dasd_global_profile_reset();
- dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
- } else if (strncmp(str, "off", 3) == 0) {
- dasd_global_profile_level = DASD_PROFILE_OFF;
- } else
- rc = -EINVAL;
- vfree(buffer);
- return rc;
-}
-
-static int dasd_stats_global_show(struct seq_file *m, void *v)
-{
- if (!dasd_global_profile_level) {
- seq_puts(m, "disabled\n");
- return 0;
- }
- dasd_stats_seq_print(m, &dasd_global_profile_data);
- return 0;
-}
-
-static int dasd_stats_global_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dasd_stats_global_show, NULL);
-}
-
-static const struct file_operations dasd_stats_global_fops = {
- .owner = THIS_MODULE,
- .open = dasd_stats_global_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = dasd_stats_global_write,
-};
-
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
@@ -1123,20 +1079,16 @@ static void dasd_profile_exit(struct dasd_profile *profile)
static void dasd_statistics_removeroot(void)
{
dasd_global_profile_level = DASD_PROFILE_OFF;
- debugfs_remove(dasd_global_profile_dentry);
- dasd_global_profile_dentry = NULL;
+ dasd_profile_exit(&dasd_global_profile);
debugfs_remove(dasd_debugfs_global_entry);
debugfs_remove(dasd_debugfs_root_entry);
}
static void dasd_statistics_createroot(void)
{
- umode_t mode;
struct dentry *pde;
dasd_debugfs_root_entry = NULL;
- dasd_debugfs_global_entry = NULL;
- dasd_global_profile_dentry = NULL;
pde = debugfs_create_dir("dasd", NULL);
if (!pde || IS_ERR(pde))
goto error;
@@ -1145,13 +1097,7 @@ static void dasd_statistics_createroot(void)
if (!pde || IS_ERR(pde))
goto error;
dasd_debugfs_global_entry = pde;
-
- mode = (S_IRUSR | S_IWUSR | S_IFREG);
- pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry,
- NULL, &dasd_stats_global_fops);
- if (!pde || IS_ERR(pde))
- goto error;
- dasd_global_profile_dentry = pde;
+ dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
return;
error:
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8b5d4100abf7..227e3dea3155 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -651,7 +651,7 @@ dasd_check_blocksize(int bsize)
#define DASD_PROFILE_GLOBAL_ONLY 2
extern debug_info_t *dasd_debug_area;
-extern struct dasd_profile_info dasd_global_profile_data;
+extern struct dasd_profile dasd_global_profile;
extern unsigned int dasd_global_profile_level;
extern const struct block_device_operations dasd_device_operations;
@@ -728,7 +728,6 @@ int dasd_device_is_ro(struct dasd_device *);
void dasd_profile_reset(struct dasd_profile *);
int dasd_profile_on(struct dasd_profile *);
void dasd_profile_off(struct dasd_profile *);
-void dasd_global_profile_reset(void);
char *dasd_get_user_string(const char __user *, size_t);
/* externals in dasd_devmap.c */
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 78ac905a5b7f..aa7bb2d1da81 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -212,14 +212,15 @@ static int dasd_stats_proc_show(struct seq_file *m, void *v)
struct dasd_profile_info *prof;
int factor;
- /* check for active profiling */
- if (!dasd_global_profile_level) {
+ spin_lock_bh(&dasd_global_profile.lock);
+ prof = dasd_global_profile.data;
+ if (!prof) {
+ spin_unlock_bh(&dasd_global_profile.lock);
seq_printf(m, "Statistics are off - they might be "
"switched on using 'echo set on > "
"/proc/dasd/statistics'\n");
return 0;
}
- prof = &dasd_global_profile_data;
/* prevent counter 'overflow' on output */
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
@@ -255,6 +256,7 @@ static int dasd_stats_proc_show(struct seq_file *m, void *v)
dasd_statistics_array(m, prof->dasd_io_time3, factor);
seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
+ spin_unlock_bh(&dasd_global_profile.lock);
#else
seq_printf(m, "Statistics are not activated in this kernel\n");
#endif
@@ -291,14 +293,19 @@ static ssize_t dasd_stats_proc_write(struct file *file,
dasd_stats_all_block_off();
goto out_error;
}
- dasd_global_profile_reset();
+ rc = dasd_profile_on(&dasd_global_profile);
+ if (rc) {
+ dasd_stats_all_block_off();
+ goto out_error;
+ }
+ dasd_profile_reset(&dasd_global_profile);
dasd_global_profile_level = DASD_PROFILE_ON;
pr_info("The statistics feature has been switched "
"on\n");
} else if (strcmp(str, "off") == 0) {
- /* switch off and reset statistics profiling */
+ /* switch off statistics profiling */
dasd_global_profile_level = DASD_PROFILE_OFF;
- dasd_global_profile_reset();
+ dasd_profile_off(&dasd_global_profile);
dasd_stats_all_block_off();
pr_info("The statistics feature has been switched "
"off\n");
@@ -306,7 +313,7 @@ static ssize_t dasd_stats_proc_write(struct file *file,
goto out_parse_error;
} else if (strncmp(str, "reset", 5) == 0) {
/* reset the statistics */
- dasd_global_profile_reset();
+ dasd_profile_reset(&dasd_global_profile);
dasd_stats_all_block_reset();
pr_info("The statistics have been reset\n");
} else
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b550c8c8d010..96128cb009f3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -28,8 +28,8 @@
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
-static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
- void **kaddr, unsigned long *pfn);
+static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
+ void **kaddr, unsigned long *pfn, long size);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -438,7 +438,13 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
pr_info("All DCSSs that map to device %s are "
"saved\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) {
- segment_save(entry->segment_name);
+ if (entry->segment_type == SEG_TYPE_EN ||
+ entry->segment_type == SEG_TYPE_SN)
+ pr_warn("DCSS %s is of type SN or EN"
+ " and cannot be saved\n",
+ entry->segment_name);
+ else
+ segment_save(entry->segment_name);
}
} else {
// device is busy => we save it when it becomes
@@ -797,7 +803,12 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
pr_info("Device %s has become idle and is being saved "
"now\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) {
- segment_save(entry->segment_name);
+ if (entry->segment_type == SEG_TYPE_EN ||
+ entry->segment_type == SEG_TYPE_SN)
+ pr_warn("DCSS %s is of type SN or EN and cannot"
+ " be saved\n", entry->segment_name);
+ else
+ segment_save(entry->segment_name);
}
dev_info->save_pending = 0;
}
@@ -866,25 +877,22 @@ fail:
bio_io_error(bio);
}
-static int
+static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
- void **kaddr, unsigned long *pfn)
+ void **kaddr, unsigned long *pfn, long size)
{
struct dcssblk_dev_info *dev_info;
- unsigned long pgoff;
+ unsigned long offset, dev_sz;
dev_info = bdev->bd_disk->private_data;
if (!dev_info)
return -ENODEV;
- if (secnum % (PAGE_SIZE/512))
- return -EINVAL;
- pgoff = secnum / (PAGE_SIZE / 512);
- if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start)
- return -ERANGE;
- *kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE);
+ dev_sz = dev_info->end - dev_info->start;
+ offset = secnum * 512;
+ *kaddr = (void *) (dev_info->start + offset);
*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
- return 0;
+ return dev_sz - offset;
}
static void
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 4bd63322fc29..d4b61d9088fb 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -200,10 +200,9 @@ int hmcdrv_ftp_probe(void)
rc = hmcdrv_ftp_startup();
if (rc)
- return rc;
+ goto out;
rc = hmcdrv_ftp_do(&ftp);
- free_page((unsigned long) ftp.buf);
hmcdrv_ftp_shutdown();
switch (rc) {
@@ -216,7 +215,8 @@ int hmcdrv_ftp_probe(void)
rc = 0; /* clear length (success) */
break;
} /* switch */
-
+out:
+ free_page((unsigned long) ftp.buf);
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_probe);
diff --git a/drivers/s390/char/hmcdrv_mod.c b/drivers/s390/char/hmcdrv_mod.c
index 505c6a78ee1a..251a318a9b75 100644
--- a/drivers/s390/char/hmcdrv_mod.c
+++ b/drivers/s390/char/hmcdrv_mod.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/stat.h>
#include "hmcdrv_ftp.h"
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 5bd6cb145a87..1efa4fdb7fe2 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -20,26 +20,31 @@ struct read_info_sccb {
struct sccb_header header; /* 0-7 */
u16 rnmax; /* 8-9 */
u8 rnsize; /* 10 */
- u8 _reserved0[16 - 11]; /* 11-15 */
+ u8 _pad_11[16 - 11]; /* 11-15 */
u16 ncpurl; /* 16-17 */
u16 cpuoff; /* 18-19 */
- u8 _reserved7[24 - 20]; /* 20-23 */
+ u8 _pad_20[24 - 20]; /* 20-23 */
u8 loadparm[8]; /* 24-31 */
- u8 _reserved1[48 - 32]; /* 32-47 */
+ u8 _pad_32[42 - 32]; /* 32-41 */
+ u8 fac42; /* 42 */
+ u8 fac43; /* 43 */
+ u8 _pad_44[48 - 44]; /* 44-47 */
u64 facilities; /* 48-55 */
- u8 _reserved2a[76 - 56]; /* 56-75 */
+ u8 _pad_56[66 - 56]; /* 56-65 */
+ u8 fac66; /* 66 */
+ u8 _pad_67[76 - 67]; /* 67-83 */
u32 ibc; /* 76-79 */
- u8 _reserved2b[84 - 80]; /* 80-83 */
+ u8 _pad80[84 - 80]; /* 80-83 */
u8 fac84; /* 84 */
u8 fac85; /* 85 */
- u8 _reserved3[91 - 86]; /* 86-90 */
+ u8 _pad_86[91 - 86]; /* 86-90 */
u8 flags; /* 91 */
- u8 _reserved4[100 - 92]; /* 92-99 */
+ u8 _pad_92[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
- u8 _reserved5[120 - 112]; /* 112-119 */
+ u8 _pad_112[120 - 112]; /* 112-119 */
u16 hcpua; /* 120-121 */
- u8 _reserved6[4096 - 122]; /* 122-4095 */
+ u8 _pad_122[4096 - 122]; /* 122-4095 */
} __packed __aligned(PAGE_SIZE);
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
@@ -49,7 +54,12 @@ static unsigned long sclp_hsa_size;
static unsigned int sclp_max_cpu;
static struct sclp_ipl_info sclp_ipl_info;
static unsigned char sclp_siif;
+static unsigned char sclp_sigpif;
static u32 sclp_ibc;
+static unsigned int sclp_mtid;
+static unsigned int sclp_mtid_cp;
+static unsigned int sclp_mtid_max;
+static unsigned int sclp_mtid_prev;
u64 sclp_facilities;
u8 sclp_fac84;
@@ -128,9 +138,10 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
boot_cpu_address = stap();
cpue = (void *)sccb + sccb->cpuoff;
for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
- if (boot_cpu_address != cpue->address)
+ if (boot_cpu_address != cpue->core_id)
continue;
sclp_siif = cpue->siif;
+ sclp_sigpif = cpue->sigpif;
break;
}
@@ -139,6 +150,11 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
if (sccb->flags & 0x2)
sclp_ipl_info.has_dump = 1;
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
+
+ sclp_mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
+ sclp_mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
+ sclp_mtid_max = max(sclp_mtid, sclp_mtid_cp);
+ sclp_mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
}
bool __init sclp_has_linemode(void)
@@ -172,12 +188,33 @@ int sclp_has_siif(void)
}
EXPORT_SYMBOL(sclp_has_siif);
+int sclp_has_sigpif(void)
+{
+ return sclp_sigpif;
+}
+EXPORT_SYMBOL(sclp_has_sigpif);
+
unsigned int sclp_get_ibc(void)
{
return sclp_ibc;
}
EXPORT_SYMBOL(sclp_get_ibc);
+unsigned int sclp_get_mtid(u8 cpu_type)
+{
+ return cpu_type ? sclp_mtid : sclp_mtid_cp;
+}
+
+unsigned int sclp_get_mtid_max(void)
+{
+ return sclp_mtid_max;
+}
+
+unsigned int sclp_get_mtid_prev(void)
+{
+ return sclp_mtid_prev;
+}
+
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. The sclp_facilities_detect() function retrieves
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 9aa79702b370..de69f0ddc321 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -773,13 +773,11 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
"occurred\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x57:
- if (device->cdev->id.driver_info == tape_3480) {
- /* Attention intercept. */
- return tape_34xx_erp_retry(request);
- } else {
- /* Global status intercept. */
- return tape_34xx_erp_retry(request);
- }
+ /*
+ * 3480: Attention intercept.
+ * 3490: Global status intercept.
+ */
+ return tape_34xx_erp_retry(request);
case 0x5a:
/*
* Tape length incompatible. The tape inserted is too long,
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 10eb738fc81a..3578105989a0 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -938,7 +938,7 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
{
struct subchannel_id uninitialized_var(schid);
- s390_reset_system(NULL, NULL);
+ s390_reset_system(NULL, NULL, NULL);
if (reipl_find_schid(devid, &schid) != 0)
panic("IPL Device not found\n");
do_reipl_asm(*((__u32*)&schid));
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 5a999084a229..b3e06a7b9480 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -38,11 +38,6 @@ void idset_free(struct idset *set)
vfree(set);
}
-void idset_clear(struct idset *set)
-{
- memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
-}
-
void idset_fill(struct idset *set)
{
memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
@@ -103,21 +98,6 @@ int idset_sch_contains(struct idset *set, struct subchannel_id schid)
return idset_contains(set, schid.ssid, schid.sch_no);
}
-int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
-{
- int ssid = 0;
- int id = 0;
- int rc;
-
- rc = idset_get_first(set, &ssid, &id);
- if (rc) {
- init_subchannel_id(schid);
- schid->ssid = ssid;
- schid->sch_no = id;
- }
- return rc;
-}
-
int idset_is_empty(struct idset *set)
{
return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 06d3bc01bb09..22b58104683b 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -11,7 +11,6 @@
struct idset;
void idset_free(struct idset *set);
-void idset_clear(struct idset *set);
void idset_fill(struct idset *set);
struct idset *idset_sch_new(void);
@@ -19,7 +18,6 @@ void idset_sch_add(struct idset *set, struct subchannel_id id);
void idset_sch_del(struct idset *set, struct subchannel_id id);
void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
int idset_sch_contains(struct idset *set, struct subchannel_id id);
-int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
int idset_is_empty(struct idset *set);
void idset_add_set(struct idset *to, struct idset *from);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 91e97ec01418..3d7f19fb9a4e 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -204,6 +204,24 @@ ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
}
/**
+ * ap_query_facilities(): PQAP(TAPQ) query facilities.
+ * @qid: The AP queue number
+ *
+ * Returns content of general register 2 after the PQAP(TAPQ)
+ * instruction was called.
+ */
+static inline unsigned long ap_query_facilities(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | 0x00800000UL;
+ register unsigned long reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ return reg2;
+}
+
+/**
* ap_reset_queue(): Reset adjunct processor queue.
* @qid: The AP queue number
*
@@ -1007,6 +1025,51 @@ void ap_bus_force_rescan(void)
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
+ * ap_test_config(): helper function to extract the nrth bit
+ * within the unsigned int array field.
+ */
+static inline int ap_test_config(unsigned int *field, unsigned int nr)
+{
+ if (nr > 0xFFu)
+ return 0;
+ return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
+}
+
+/*
+ * ap_test_config_card_id(): Test, whether an AP card ID is configured.
+ * @id AP card ID
+ *
+ * Returns 0 if the card is not configured
+ * 1 if the card is configured or
+ * if the configuration information is not available
+ */
+static inline int ap_test_config_card_id(unsigned int id)
+{
+ if (!ap_configuration)
+ return 1;
+ return ap_test_config(ap_configuration->apm, id);
+}
+
+/*
+ * ap_test_config_domain(): Test, whether an AP usage domain is configured.
+ * @domain AP usage domain ID
+ *
+ * Returns 0 if the usage domain is not configured
+ * 1 if the usage domain is configured or
+ * if the configuration information is not available
+ */
+static inline int ap_test_config_domain(unsigned int domain)
+{
+ if (!ap_configuration) /* QCI not supported */
+ if (domain < 16)
+ return 1; /* then domains 0...15 are configured */
+ else
+ return 0;
+ else
+ return ap_test_config(ap_configuration->aqm, domain);
+}
+
+/*
* AP bus attributes.
*/
static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
@@ -1121,6 +1184,42 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
+static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
+{
+ ap_qid_t qid;
+ int i, nd, max_domain_id = -1;
+ unsigned long fbits;
+
+ if (ap_configuration) {
+ if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) {
+ for (i = 0; i < AP_DEVICES; i++) {
+ if (!ap_test_config_card_id(i))
+ continue;
+ qid = AP_MKQID(i, ap_domain_index);
+ fbits = ap_query_facilities(qid);
+ if (fbits & (1UL << 57)) {
+ /* the N bit is 0, Nd field is filled */
+ nd = (int)((fbits & 0x00FF0000UL)>>16);
+ if (nd > 0)
+ max_domain_id = nd;
+ else
+ max_domain_id = 15;
+ } else {
+ /* N bit is 1, max 16 domains */
+ max_domain_id = 15;
+ }
+ break;
+ }
+ }
+ } else {
+ /* no APXA support, older machines with max 16 domains */
+ max_domain_id = 15;
+ }
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
+}
+
+static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
+
static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
&bus_attr_ap_control_domain_mask,
@@ -1128,46 +1227,10 @@ static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_poll_thread,
&bus_attr_ap_interrupts,
&bus_attr_poll_timeout,
+ &bus_attr_ap_max_domain_id,
NULL,
};
-static inline int ap_test_config(unsigned int *field, unsigned int nr)
-{
- if (nr > 0xFFu)
- return 0;
- return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
-}
-
-/*
- * ap_test_config_card_id(): Test, whether an AP card ID is configured.
- * @id AP card ID
- *
- * Returns 0 if the card is not configured
- * 1 if the card is configured or
- * if the configuration information is not available
- */
-static inline int ap_test_config_card_id(unsigned int id)
-{
- if (!ap_configuration)
- return 1;
- return ap_test_config(ap_configuration->apm, id);
-}
-
-/*
- * ap_test_config_domain(): Test, whether an AP usage domain is configured.
- * @domain AP usage domain ID
- *
- * Returns 0 if the usage domain is not configured
- * 1 if the usage domain is configured or
- * if the configuration information is not available
- */
-static inline int ap_test_config_domain(unsigned int domain)
-{
- if (!ap_configuration)
- return 1;
- return ap_test_config(ap_configuration->aqm, domain);
-}
-
/**
* ap_query_configuration(): Query AP configuration information.
*
@@ -1430,9 +1493,6 @@ static void ap_scan_bus(struct work_struct *unused)
continue;
}
break;
- case 11:
- ap_dev->device_type = 10;
- break;
default:
ap_dev->device_type = device_type;
}
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 055a0f956d17..2737d261a324 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -117,6 +117,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX3A 8
#define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10
+#define AP_DEVICE_TYPE_CEX5 11
/*
* Known function facilities
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index b3d496bfaa7e..750876891931 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -75,6 +75,7 @@ struct ica_z90_status {
#define ZCRYPT_CEX3C 7
#define ZCRYPT_CEX3A 8
#define ZCRYPT_CEX4 10
+#define ZCRYPT_CEX5 11
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 569f8b1d86c0..71e698b85772 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -26,6 +26,10 @@
#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
+#define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */
+#define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */
+#define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */
+#define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */
#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
@@ -39,6 +43,7 @@
static struct ap_device_id zcrypt_cex4_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
+ { AP_DEVICE(AP_DEVICE_TYPE_CEX5) },
{ /* end of list */ },
};
@@ -70,11 +75,18 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
switch (ap_dev->device_type) {
case AP_DEVICE_TYPE_CEX4:
+ case AP_DEVICE_TYPE_CEX5:
if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) {
zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
- zdev->type_string = "CEX4A";
+ if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
+ zdev->type_string = "CEX4A";
+ zdev->speed_rating = CEX4A_SPEED_RATING;
+ } else {
+ zdev->type_string = "CEX5A";
+ zdev->speed_rating = CEX5A_SPEED_RATING;
+ }
zdev->user_space_type = ZCRYPT_CEX3A;
zdev->min_mod_size = CEX4A_MIN_MOD_SIZE;
if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
@@ -90,33 +102,42 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
CEX4A_MAX_MOD_SIZE_2K;
}
zdev->short_crt = 1;
- zdev->speed_rating = CEX4A_SPEED_RATING;
zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
MSGTYPE50_VARIANT_DEFAULT);
} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
- zdev->type_string = "CEX4C";
+ if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
+ zdev->type_string = "CEX4C";
+ zdev->speed_rating = CEX4C_SPEED_RATING;
+ } else {
+ zdev->type_string = "CEX5C";
+ zdev->speed_rating = CEX5C_SPEED_RATING;
+ }
zdev->user_space_type = ZCRYPT_CEX3C;
zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
zdev->short_crt = 0;
- zdev->speed_rating = CEX4C_SPEED_RATING;
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
- zdev->type_string = "CEX4P";
+ if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
+ zdev->type_string = "CEX4P";
+ zdev->speed_rating = CEX4P_SPEED_RATING;
+ } else {
+ zdev->type_string = "CEX5P";
+ zdev->speed_rating = CEX5P_SPEED_RATING;
+ }
zdev->user_space_type = ZCRYPT_CEX4;
zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
zdev->short_crt = 0;
- zdev->speed_rating = CEX4C_SPEED_RATING;
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_EP11);
}
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 213e54ee8a66..d609ca09aa94 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -109,10 +109,8 @@ static debug_info_t *claw_dbf_trace;
static void
claw_unregister_debug_facility(void)
{
- if (claw_dbf_setup)
- debug_unregister(claw_dbf_setup);
- if (claw_dbf_trace)
- debug_unregister(claw_dbf_trace);
+ debug_unregister(claw_dbf_setup);
+ debug_unregister(claw_dbf_trace);
}
static int
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index fb92524d24ef..fd5944bbe224 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -251,13 +251,11 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
int first = 1;
int i;
unsigned long duration;
- struct timespec done_stamp = current_kernel_time(); /* xtime */
+ unsigned long done_stamp = jiffies;
CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
- duration =
- (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
- (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+ duration = done_stamp - ch->prof.send_stamp;
if (duration > ch->prof.tx_time)
ch->prof.tx_time = duration;
@@ -307,7 +305,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
spin_unlock(&ch->collect_lock);
ch->ccw[1].count = ch->trans_skb->len;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
- ch->prof.send_stamp = current_kernel_time(); /* xtime */
+ ch->prof.send_stamp = jiffies;
rc = ccw_device_start(ch->cdev, &ch->ccw[0],
(unsigned long)ch, 0xff, 0);
ch->prof.doios_multi++;
@@ -1229,14 +1227,12 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
int rc;
struct th_header *header;
struct pdu *p_header;
- struct timespec done_stamp = current_kernel_time(); /* xtime */
+ unsigned long done_stamp = jiffies;
CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
__func__, dev->name, smp_processor_id());
- duration =
- (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
- (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+ duration = done_stamp - ch->prof.send_stamp;
if (duration > ch->prof.tx_time)
ch->prof.tx_time = duration;
@@ -1361,7 +1357,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
ch->ccw[1].count = ch->trans_skb->len;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
- ch->prof.send_stamp = current_kernel_time(); /* xtime */
+ ch->prof.send_stamp = jiffies;
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
rc = ccw_device_start(ch->cdev, &ch->ccw[0],
@@ -1827,7 +1823,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
fsm_newstate(wch->fsm, CTC_STATE_TX);
spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
- wch->prof.send_stamp = current_kernel_time(); /* xtime */
+ wch->prof.send_stamp = jiffies;
rc = ccw_device_start(wch->cdev, &wch->ccw[3],
(unsigned long) wch, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index e056dd4fe44d..05c37d6d4afe 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -567,7 +567,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
fsm_newstate(ch->fsm, CTC_STATE_TX);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
- ch->prof.send_stamp = current_kernel_time(); /* xtime */
+ ch->prof.send_stamp = jiffies;
rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
(unsigned long)ch, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
@@ -831,7 +831,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
sizeof(struct ccw1) * 3);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
- ch->prof.send_stamp = current_kernel_time(); /* xtime */
+ ch->prof.send_stamp = jiffies;
rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
(unsigned long)ch, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index 477c933685f3..6f4417c80247 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -121,7 +121,7 @@ struct ctcm_profile {
unsigned long doios_multi;
unsigned long txlen;
unsigned long tx_time;
- struct timespec send_stamp;
+ unsigned long send_stamp;
};
/*
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 47773c4d235a..ddb0aa321339 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -100,8 +100,8 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
priv->channel[WRITE]->prof.doios_multi);
p += sprintf(p, " Netto bytes written: %ld\n",
priv->channel[WRITE]->prof.txlen);
- p += sprintf(p, " Max. TX IO-time: %ld\n",
- priv->channel[WRITE]->prof.tx_time);
+ p += sprintf(p, " Max. TX IO-time: %u\n",
+ jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time));
printk(KERN_INFO "Statistics for %s:\n%s",
priv->channel[CTCM_WRITE]->netdev->name, sbuf);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 92190aa20b9f..00b7d9c9fe48 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -88,10 +88,8 @@ static debug_info_t *lcs_dbf_trace;
static void
lcs_unregister_debug_facility(void)
{
- if (lcs_dbf_setup)
- debug_unregister(lcs_dbf_setup);
- if (lcs_dbf_trace)
- debug_unregister(lcs_dbf_trace);
+ debug_unregister(lcs_dbf_setup);
+ debug_unregister(lcs_dbf_trace);
}
static int
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 0a87809c8af7..33f7040d711d 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -178,7 +178,7 @@ struct connection_profile {
unsigned long doios_multi;
unsigned long txlen;
unsigned long tx_time;
- struct timespec send_stamp;
+ unsigned long send_stamp;
unsigned long tx_pending;
unsigned long tx_max_pending;
};
@@ -487,12 +487,9 @@ DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
static void iucv_unregister_dbf_views(void)
{
- if (iucv_dbf_setup)
- debug_unregister(iucv_dbf_setup);
- if (iucv_dbf_data)
- debug_unregister(iucv_dbf_data);
- if (iucv_dbf_trace)
- debug_unregister(iucv_dbf_trace);
+ debug_unregister(iucv_dbf_setup);
+ debug_unregister(iucv_dbf_data);
+ debug_unregister(iucv_dbf_trace);
}
static int iucv_register_dbf_views(void)
{
@@ -786,7 +783,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
header.next = 0;
memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
- conn->prof.send_stamp = current_kernel_time();
+ conn->prof.send_stamp = jiffies;
txmsg.class = 0;
txmsg.tag = 0;
rc = iucv_message_send(conn->path, &txmsg, 0, 0,
@@ -1220,7 +1217,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
fsm_newstate(conn->fsm, CONN_STATE_TX);
- conn->prof.send_stamp = current_kernel_time();
+ conn->prof.send_stamp = jiffies;
msg.tag = 1;
msg.class = 0;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 7a8bb9f78e76..3abac028899f 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -596,7 +596,6 @@ struct qeth_channel {
struct ccw1 ccw;
spinlock_t iob_lock;
wait_queue_head_t wait_q;
- struct tasklet_struct irq_tasklet;
struct ccw_device *ccwdev;
/*command buffer for control data*/
struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f407e3763432..642c77c76b84 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
QETH_DBF_TEXT(SETUP, 2, "idxanswr");
card = CARD_FROM_CDEV(channel->ccwdev);
iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
QETH_DBF_TEXT(SETUP, 2, "idxactch");
iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
}
EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
+/**
+ * qeth_send_control_data() - send control command to the card
+ * @card: qeth_card structure pointer
+ * @len: size of the command buffer
+ * @iob: qeth_cmd_buffer pointer
+ * @reply_cb: callback function pointer
+ * @cb_card: pointer to the qeth_card structure
+ * @cb_reply: pointer to the qeth_reply structure
+ * @cb_cmd: pointer to the original iob for non-IPA
+ * commands, or to the qeth_ipa_cmd structure
+ * for the IPA commands.
+ * @reply_param: private pointer passed to the callback
+ *
+ * Returns the value of the `return_code' field of the response
+ * block returned from the hardware, or other error indication.
+ * Value of zero indicates successful execution of the command.
+ *
+ * Callback function gets called one or more times, with cb_cmd
+ * pointing to the response returned by the hardware. Callback
+ * function must return non-zero if more reply blocks are expected,
+ * and zero if the last or only reply block is received. Callback
+ * function can get the value of the reply_param pointer from the
+ * field 'param' of the structure qeth_reply.
+ */
+
int qeth_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
- unsigned long),
+ int (*reply_cb)(struct qeth_card *cb_card,
+ struct qeth_reply *cb_reply,
+ unsigned long cb_cmd),
void *reply_param)
{
int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- iob = qeth_wait_for_buffer(&card->write);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+ iob = qeth_get_buffer(&card->write);
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+ } else {
+ dev_warn(&card->gdev->dev,
+ "The qeth driver ran out of channel command buffers\n");
+ QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
+ dev_name(&card->gdev->dev));
+ }
return iob;
}
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+/**
+ * qeth_send_ipa_cmd() - send an IPA command
+ *
+ * See qeth_send_control_data() for explanation of the arguments.
+ */
+
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "strtlan");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
QETH_PROT_IPV4);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
- cmd->data.setadapterparms.hdr.command_code = command;
- cmd->data.setadapterparms.hdr.used_total = 1;
- cmd->data.setadapterparms.hdr.seq_no = 1;
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+ cmd->data.setadapterparms.hdr.command_code = command;
+ cmd->data.setadapterparms.hdr.used_total = 1;
+ cmd->data.setadapterparms.hdr.seq_no = 1;
+ }
return iob;
}
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
return rc;
}
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
return rc;
}
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
return -ENOMEDIUM;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ if (!iob)
+ return -ENOMEM;
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "qdiagass");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
QETH_DBF_TEXT(SETUP, 2, "diagtrap");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 80;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return;
cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.mode = mode;
qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_set_access_ctrl));
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
QETH_SNMP_SETADP_CMDLENGTH + req_len);
+ if (!iob) {
+ rc = -ENOMEM;
+ goto out;
+ }
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
-
+out:
kfree(ureq);
kfree(qinfo.udata);
return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_query_oat));
+ if (!iob) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
oat_req = &cmd->data.setadapterparms.data.query_oat;
oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
return -EOPNOTSUPP;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ if (!iob)
+ return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
(void *)carrier_info);
}
@@ -5060,11 +5133,23 @@ retriable:
card->options.adp.supported_funcs = 0;
card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
- qeth_query_ipassists(card, QETH_PROT_IPV4);
- if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
- qeth_query_setadapterparms(card);
- if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
- qeth_query_setdiagass(card);
+ rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
+ if (rc == -ENOMEM)
+ goto out;
+ if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+ rc = qeth_query_setadapterparms(card);
+ if (rc < 0) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ goto out;
+ }
+ }
+ if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ rc = qeth_query_setdiagass(card);
+ if (rc < 0) {
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ goto out;
+ }
+ }
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 15523f0e4c03..423bec56cffa 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -231,7 +231,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
int rc = 0;
if (!card)
@@ -253,36 +252,35 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
goto out;
}
- tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "prio_queueing_prec")) {
+ if (sysfs_streq(buf, "prio_queueing_prec")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- } else if (!strcmp(tmp, "prio_queueing_skb")) {
+ } else if (sysfs_streq(buf, "prio_queueing_skb")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- } else if (!strcmp(tmp, "prio_queueing_tos")) {
+ } else if (sysfs_streq(buf, "prio_queueing_tos")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- } else if (!strcmp(tmp, "prio_queueing_vlan")) {
+ } else if (sysfs_streq(buf, "prio_queueing_vlan")) {
if (!card->options.layer2) {
rc = -ENOTSUPP;
goto out;
}
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- } else if (!strcmp(tmp, "no_prio_queueing:0")) {
+ } else if (sysfs_streq(buf, "no_prio_queueing:0")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 0;
- } else if (!strcmp(tmp, "no_prio_queueing:1")) {
+ } else if (sysfs_streq(buf, "no_prio_queueing:1")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 1;
- } else if (!strcmp(tmp, "no_prio_queueing:2")) {
+ } else if (sysfs_streq(buf, "no_prio_queueing:2")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
- } else if (!strcmp(tmp, "no_prio_queueing:3")) {
+ } else if (sysfs_streq(buf, "no_prio_queueing:3")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
- } else if (!strcmp(tmp, "no_prio_queueing")) {
+ } else if (sysfs_streq(buf, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else
@@ -497,8 +495,6 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_ipa_isolation_modes isolation;
int rc = 0;
- char *tmp, *curtoken;
- curtoken = (char *) buf;
if (!card)
return -EINVAL;
@@ -515,12 +511,11 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
}
/* parse input into isolation mode */
- tmp = strsep(&curtoken, "\n");
- if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
+ if (sysfs_streq(buf, ATTR_QETH_ISOLATION_NONE)) {
isolation = ISOLATION_MODE_NONE;
- } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
+ } else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_FWD)) {
isolation = ISOLATION_MODE_FWD;
- } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
+ } else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_DROP)) {
isolation = ISOLATION_MODE_DROP;
} else {
rc = -EINVAL;
@@ -531,8 +526,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
/* defer IP assist if device is offline (until discipline->set_online)*/
card->options.prev_isolation = card->options.isolation;
card->options.isolation = isolation;
- if (card->state == CARD_STATE_SOFTSETUP ||
- card->state == CARD_STATE_UP) {
+ if (qeth_card_hw_is_reachable(card)) {
int ipa_rc = qeth_set_access_ctrl_online(card, 1);
if (ipa_rc != 0)
rc = ipa_rc;
@@ -555,7 +549,7 @@ static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
if (!card)
return -EINVAL;
- if (card->state != CARD_STATE_SOFTSETUP && card->state != CARD_STATE_UP)
+ if (!qeth_card_hw_is_reachable(card))
return sprintf(buf, "n/a\n");
rc = qeth_query_switch_attributes(card, &sw_info);
@@ -598,19 +592,16 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
- char *tmp, *curtoken;
int state = 0;
- curtoken = (char *)buf;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (card->state == CARD_STATE_SOFTSETUP || card->state == CARD_STATE_UP)
+ if (qeth_card_hw_is_reachable(card))
state = 1;
- tmp = strsep(&curtoken, "\n");
- if (!strcmp(tmp, "arm") && !card->info.hwtrap) {
+ if (sysfs_streq(buf, "arm") && !card->info.hwtrap) {
if (state) {
if (qeth_is_diagass_supported(card,
QETH_DIAGS_CMD_TRAP)) {
@@ -621,14 +612,14 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
rc = -EINVAL;
} else
card->info.hwtrap = 1;
- } else if (!strcmp(tmp, "disarm") && card->info.hwtrap) {
+ } else if (sysfs_streq(buf, "disarm") && card->info.hwtrap) {
if (state) {
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
if (!rc)
card->info.hwtrap = 0;
} else
card->info.hwtrap = 0;
- } else if (!strcmp(tmp, "trap") && state && card->info.hwtrap)
+ } else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap)
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
else
rc = -EINVAL;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d02cd1a67943..0ea0869120cf 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
- enum qeth_ipa_cmds,
- int (*reply_cb) (struct qeth_card *,
- struct qeth_reply*,
- unsigned long));
+ enum qeth_ipa_cmds);
static void qeth_l2_set_multicast_list(struct net_device *);
static int qeth_l2_recover(void *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -48,8 +45,7 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!card)
return -ENODEV;
- if ((card->state != CARD_STATE_UP) &&
- (card->state != CARD_STATE_SOFTSETUP))
+ if (!qeth_card_hw_is_reachable(card))
return -ENODEV;
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -130,56 +126,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
return ndev;
}
-static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
{
- struct qeth_ipa_cmd *cmd;
- __u8 *mac;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Sgmacb");
- cmd = (struct qeth_ipa_cmd *) data;
- mac = &cmd->data.setdelmac.mac[0];
- /* MAC already registered, needed in couple/uncouple case */
- if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
- QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
- mac, QETH_CARD_IFNAME(card));
- cmd->hdr.return_code = 0;
+ if (retcode)
+ QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+ switch (retcode) {
+ case IPA_RC_SUCCESS:
+ rc = 0;
+ break;
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ rc = -ENOSYS;
+ break;
+ case IPA_RC_L2_ADDR_TABLE_FULL:
+ rc = -ENOSPC;
+ break;
+ case IPA_RC_L2_DUP_MAC:
+ case IPA_RC_L2_DUP_LAYER3_MAC:
+ rc = -EEXIST;
+ break;
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ rc = -EPERM;
+ break;
+ case IPA_RC_L2_MAC_NOT_FOUND:
+ rc = -ENOENT;
+ break;
+ case -ENOMEM:
+ rc = -ENOMEM;
+ break;
+ default:
+ rc = -EIO;
+ break;
}
- if (cmd->hdr.return_code)
- QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
- mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- return 0;
+ return rc;
}
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_CARD_TEXT(card, 2, "L2Sgmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
- qeth_l2_send_setgroupmac_cb);
-}
-
-static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
- __u8 *mac;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Dgmacb");
- cmd = (struct qeth_ipa_cmd *) data;
- mac = &cmd->data.setdelmac.mac[0];
- if (cmd->hdr.return_code)
- QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
- mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- return 0;
+ QETH_CARD_TEXT(card, 2, "L2Sgmac");
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_SETGMAC));
+ if (rc == -EEXIST)
+ QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
+ mac, QETH_CARD_IFNAME(card));
+ else if (rc)
+ QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
+ return rc;
}
static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "L2Dgmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
- qeth_l2_send_delgroupmac_cb);
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_DELGMAC));
+ if (rc)
+ QETH_DBF_MESSAGE(2,
+ "Could not delete group MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
+ return rc;
}
static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +208,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
mc->is_vmac = vmac;
if (vmac) {
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
- NULL);
+ rc = qeth_setdel_makerc(card,
+ qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
} else {
- rc = qeth_l2_send_setgroupmac(card, mac);
+ rc = qeth_setdel_makerc(card,
+ qeth_l2_send_setgroupmac(card, mac));
}
if (!rc)
@@ -218,7 +230,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
if (del) {
if (mc->is_vmac)
qeth_l2_send_setdelmac(card, mc->mc_addr,
- IPA_CMD_DELVMAC, NULL);
+ IPA_CMD_DELVMAC);
else
qeth_l2_send_delgroupmac(card, mc->mc_addr);
}
@@ -291,6 +303,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +327,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
+ int rc;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
@@ -328,7 +343,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
id->vid = vid;
- qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+ rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+ if (rc) {
+ kfree(id);
+ return rc;
+ }
spin_lock_bh(&card->vlanlock);
list_add_tail(&id->list, &card->vid_list);
spin_unlock_bh(&card->vlanlock);
@@ -343,6 +362,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
{
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
+ int rc = 0;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +383,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
}
spin_unlock_bh(&card->vlanlock);
if (tmpid) {
- qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+ rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
kfree(tmpid);
}
qeth_l2_set_multicast_list(card->dev);
- return 0;
+ return rc;
}
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +559,62 @@ out:
}
static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
- enum qeth_ipa_cmds ipacmd,
- int (*reply_cb) (struct qeth_card *,
- struct qeth_reply*,
- unsigned long))
+ enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "L2sdmac");
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
- return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+ return qeth_send_ipa_cmd(card, iob, NULL, NULL);
}
-static int qeth_l2_send_setmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
{
- struct qeth_ipa_cmd *cmd;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Smaccb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_SETVMAC));
+ if (rc == 0) {
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ dev_info(&card->gdev->dev,
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
+ } else {
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- switch (cmd->hdr.return_code) {
- case IPA_RC_L2_DUP_MAC:
- case IPA_RC_L2_DUP_LAYER3_MAC:
+ switch (rc) {
+ case -EEXIST:
dev_warn(&card->gdev->dev,
- "MAC address %pM already exists\n",
- cmd->data.setdelmac.mac);
+ "MAC address %pM already exists\n", mac);
break;
- case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
- case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ case -EPERM:
dev_warn(&card->gdev->dev,
- "MAC address %pM is not authorized\n",
- cmd->data.setdelmac.mac);
- break;
- default:
+ "MAC address %pM is not authorized\n", mac);
break;
}
- } else {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
- OSA_ADDR_LEN);
- dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
- }
- return 0;
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
- QETH_CARD_TEXT(card, 2, "L2Setmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
- qeth_l2_send_setmac_cb);
-}
-
-static int qeth_l2_send_delmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 2, "L2Dmaccb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
- return 0;
}
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-
- return 0;
+ return rc;
}
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "L2Delmac");
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
return 0;
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
- qeth_l2_send_delmac_cb);
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_DELVMAC));
+ if (rc == 0)
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ return rc;
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +642,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
if (rc) {
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
"device %s: x%x\n", CARD_BUS_ID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
return rc;
}
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +678,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
+ if (!rc || (rc == -ENOENT))
rc = qeth_l2_send_setmac(card, addr->sa_data);
return rc ? -EINVAL : 0;
}
@@ -996,7 +987,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -1344,8 +1335,7 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
if (!card)
return -ENODEV;
QETH_CARD_TEXT(card, 2, "osnsdmc");
- if ((card->state != CARD_STATE_UP) &&
- (card->state != CARD_STATE_SOFTSETUP))
+ if (!qeth_card_hw_is_reachable(card))
return -ENODEV;
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
@@ -1730,6 +1720,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "brqsuppo");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1797,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1811,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
if (rc)
return rc;
rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
- if (rc)
- return rc;
- return 0;
+ return rc;
}
EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
@@ -1873,6 +1865,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
if (!(card->options.sbp.supported_funcs & setcmd))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength = cmdlength;
cmd->data.sbp.hdr.command_code = setcmd;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 625227ad16ee..04e42c649134 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -432,10 +432,8 @@ void qeth_l3_set_ip_addr_list(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "sdiplist");
QETH_CARD_HEX(card, 2, &card, sizeof(void *));
- if ((card->state != CARD_STATE_UP &&
- card->state != CARD_STATE_SOFTSETUP) || card->options.sniffer) {
+ if (!qeth_card_hw_is_reachable(card) || card->options.sniffer)
return;
- }
spin_lock_irqsave(&card->ip_lock, flags);
tbd_list = card->ip_tbd_list;
@@ -549,6 +547,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setdelmc");
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +588,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if (addr->proto == QETH_PROT_IPV6) {
memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +618,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setroutg");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setrtg.type = (type);
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1053,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
QETH_CARD_TEXT(card, 4, "getasscm");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setassparms.hdr.assist_no = ipa_func;
- cmd->data.setassparms.hdr.length = 8 + len;
- cmd->data.setassparms.hdr.command_code = cmd_code;
- cmd->data.setassparms.hdr.return_code = 0;
- cmd->data.setassparms.hdr.seq_no = 0;
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setassparms.hdr.assist_no = ipa_func;
+ cmd->data.setassparms.hdr.length = 8 + len;
+ cmd->data.setassparms.hdr.command_code = cmd_code;
+ cmd->data.setassparms.hdr.return_code = 0;
+ cmd->data.setassparms.hdr.seq_no = 0;
+ }
return iob;
}
@@ -1090,6 +1096,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "simassp6");
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
0, QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob, 0, 0,
qeth_l3_default_setassparms_cb, NULL);
return rc;
@@ -1108,6 +1116,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
length = sizeof(__u32);
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
length, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob, length, data,
qeth_l3_default_setassparms_cb, NULL);
return rc;
@@ -1494,6 +1504,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
card->info.unique_id;
@@ -1537,6 +1549,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
card->info.unique_id;
@@ -1611,6 +1625,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
QETH_DBF_TEXT(SETUP, 2, "diagtrac");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2458,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
IPA_CMD_ASS_ARP_QUERY_INFO,
sizeof(struct qeth_arp_query_data) - sizeof(char),
prot);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2553,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
IPA_CMD_ASS_ARP_ADD_ENTRY,
sizeof(struct qeth_arp_cache_entry),
QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob,
sizeof(struct qeth_arp_cache_entry),
(unsigned long) entry,
@@ -2574,6 +2594,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
IPA_CMD_ASS_ARP_REMOVE_ENTRY,
12,
QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob,
12, (unsigned long)buf,
qeth_l3_default_setassparms_cb, NULL);
@@ -2626,8 +2648,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!card)
return -ENODEV;
- if ((card->state != CARD_STATE_UP) &&
- (card->state != CARD_STATE_SOFTSETUP))
+ if (!qeth_card_hw_is_reachable(card))
return -ENODEV;
switch (cmd) {
@@ -2800,12 +2821,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
* before we're going to overwrite this location with next hop ip.
* v6 uses passthrough, v4 sets the tag in the QDIO header.
*/
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
else
hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
- hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
+ hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
}
hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
@@ -2986,7 +3007,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_pull(new_skb, ETH_HLEN);
}
- if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
+ if (ipv != 4 && skb_vlan_tag_present(new_skb)) {
skb_push(new_skb, VLAN_HLEN);
skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
skb_copy_to_linear_data_offset(new_skb, 4,
@@ -2995,7 +3016,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
new_skb->data + 12, 4);
tag = (u16 *)(new_skb->data + 12);
*tag = __constant_htons(ETH_P_8021Q);
- *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
+ *(tag + 1) = htons(skb_vlan_tag_get(new_skb));
}
}
@@ -3262,6 +3283,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
+ int rc;
+
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3316,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
return -ENODEV;
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
- qeth_l3_iqd_read_initial_mac(card);
+ rc = qeth_l3_iqd_read_initial_mac(card);
+ if (rc)
+ return rc;
if (card->options.hsuid[0])
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
@@ -3360,7 +3385,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -3401,7 +3426,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
if (!card->options.sniffer) {
rc = qeth_l3_start_ipassists(card);
if (rc) {
@@ -3410,10 +3435,10 @@ contin:
}
rc = qeth_l3_setrouting_v4(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
rc = qeth_l3_setrouting_v6(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
}
netif_tx_disable(card->dev);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index adef5f5de118..386eb7b89b1e 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -57,29 +57,26 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
const char *buf, size_t count)
{
enum qeth_routing_types old_route_type = route->type;
- char *tmp;
int rc = 0;
- tmp = strsep((char **) &buf, "\n");
mutex_lock(&card->conf_mutex);
- if (!strcmp(tmp, "no_router")) {
+ if (sysfs_streq(buf, "no_router")) {
route->type = NO_ROUTER;
- } else if (!strcmp(tmp, "primary_connector")) {
+ } else if (sysfs_streq(buf, "primary_connector")) {
route->type = PRIMARY_CONNECTOR;
- } else if (!strcmp(tmp, "secondary_connector")) {
+ } else if (sysfs_streq(buf, "secondary_connector")) {
route->type = SECONDARY_CONNECTOR;
- } else if (!strcmp(tmp, "primary_router")) {
+ } else if (sysfs_streq(buf, "primary_router")) {
route->type = PRIMARY_ROUTER;
- } else if (!strcmp(tmp, "secondary_router")) {
+ } else if (sysfs_streq(buf, "secondary_router")) {
route->type = SECONDARY_ROUTER;
- } else if (!strcmp(tmp, "multicast_router")) {
+ } else if (sysfs_streq(buf, "multicast_router")) {
route->type = MULTICAST_ROUTER;
} else {
rc = -EINVAL;
goto out;
}
- if (((card->state == CARD_STATE_SOFTSETUP) ||
- (card->state == CARD_STATE_UP)) &&
+ if (qeth_card_hw_is_reachable(card) &&
(old_route_type != route->type)) {
if (prot == QETH_PROT_IPV4)
rc = qeth_l3_setrouting_v4(card);
@@ -371,7 +368,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *tmpipa, *t;
- char *tmp;
int rc = 0;
if (!card)
@@ -384,10 +380,9 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
goto out;
}
- tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "toggle")) {
+ if (sysfs_streq(buf, "toggle")) {
card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
- } else if (!strcmp(tmp, "1")) {
+ } else if (sysfs_streq(buf, "1")) {
card->ipato.enabled = 1;
list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
@@ -396,7 +391,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
QETH_IPA_SETIP_TAKEOVER_FLAG;
}
- } else if (!strcmp(tmp, "0")) {
+ } else if (sysfs_streq(buf, "0")) {
card->ipato.enabled = 0;
list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
if (tmpipa->set_flags &
@@ -431,21 +426,19 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "toggle")) {
+ if (sysfs_streq(buf, "toggle"))
card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
- } else if (!strcmp(tmp, "1")) {
+ else if (sysfs_streq(buf, "1"))
card->ipato.invert4 = 1;
- } else if (!strcmp(tmp, "0")) {
+ else if (sysfs_streq(buf, "0"))
card->ipato.invert4 = 0;
- } else
+ else
rc = -EINVAL;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -613,21 +606,19 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "toggle")) {
+ if (sysfs_streq(buf, "toggle"))
card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
- } else if (!strcmp(tmp, "1")) {
+ else if (sysfs_streq(buf, "1"))
card->ipato.invert6 = 1;
- } else if (!strcmp(tmp, "0")) {
+ else if (sysfs_streq(buf, "0"))
card->ipato.invert6 = 0;
- } else
+ else
rc = -EINVAL;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index cd4129ff7ae4..7600639db4c4 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -608,7 +608,8 @@ static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
}
/* Load rest of compatibility struct */
- strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
+ strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
+ sizeof(tw_dev->tw_compat_info.driver_version));
tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 8d66a6469e29..c7be7bb37209 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3485,7 +3485,7 @@ static int blogic_show_info(struct seq_file *m, struct Scsi_Host *shost)
seq_printf(m, "\n\
Current Driver Queue Depth: %d\n\
Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs);
- seq_printf(m, "\n\n\
+ seq_puts(m, "\n\n\
DATA TRANSFER STATISTICS\n\
\n\
Target Tagged Queuing Queue Depth Active Attempted Completed\n\
@@ -3500,7 +3500,7 @@ Target Tagged Queuing Queue Depth Active Attempted Completed\n\
seq_printf(m,
" %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete);
}
- seq_printf(m, "\n\
+ seq_puts(m, "\n\
Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\
====== ============= ============== =================== ===================\n");
for (tgt = 0; tgt < adapter->maxdev; tgt++) {
@@ -3517,7 +3517,7 @@ Target Read Commands Write Commands Total Bytes Read Total Bytes Written\
else
seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units);
}
- seq_printf(m, "\n\
+ seq_puts(m, "\n\
Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\
====== ======= ========= ========= ========= ========= =========\n");
for (tgt = 0; tgt < adapter->maxdev; tgt++) {
@@ -3533,7 +3533,7 @@ Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\
tgt_stats[tgt].write_sz_buckets[0],
tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]);
}
- seq_printf(m, "\n\
+ seq_puts(m, "\n\
Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\
====== ======= ========= ========= ========= ========= =========\n");
for (tgt = 0; tgt < adapter->maxdev; tgt++) {
@@ -3549,7 +3549,7 @@ Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\
tgt_stats[tgt].write_sz_buckets[5],
tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]);
}
- seq_printf(m, "\n\n\
+ seq_puts(m, "\n\n\
ERROR RECOVERY STATISTICS\n\
\n\
Command Aborts Bus Device Resets Host Adapter Resets\n\
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9c92f415229f..b021bcb88537 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -201,12 +201,12 @@ config SCSI_ENCLOSURE
certain enclosure conditions to be reported and is not required.
config SCSI_CONSTANTS
- bool "Verbose SCSI error reporting (kernel size +=12K)"
+ bool "Verbose SCSI error reporting (kernel size +=75K)"
depends on SCSI
help
The error messages regarding your SCSI hardware will be easier to
understand if you say Y here; it will enlarge your kernel by about
- 12 KB. If in doubt, say Y.
+ 75 KB. If in doubt, say Y.
config SCSI_LOGGING
bool "SCSI logging facility"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 58158f11ed7b..dee160a4f163 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -159,15 +159,15 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
# This goes last, so that "real" scsi devices probe earlier
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
-
-scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
+scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \
scsicam.o scsi_error.o scsi_lib.o
+scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o
scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
-scsi_mod-y += scsi_trace.o
+scsi_mod-y += scsi_trace.o scsi_logging.o
scsi_mod-$(CONFIG_PM) += scsi_pm.o
hv_storvsc-y := storvsc_drv.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 36244d63def2..8981701802ca 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -716,8 +716,6 @@ static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
}
#endif
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m, ## args)
static
void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m);
static
@@ -734,19 +732,19 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m,
hostdata = (struct NCR5380_hostdata *) instance->hostdata;
#ifdef PSEUDO_DMA
- SPRINTF("Highwater I/O busy spin counts: write %d, read %d\n",
+ seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n",
hostdata->spin_max_w, hostdata->spin_max_r);
#endif
spin_lock_irq(instance->host_lock);
if (!hostdata->connected)
- SPRINTF("scsi%d: no currently connected command\n", instance->host_no);
+ seq_printf(m, "scsi%d: no currently connected command\n", instance->host_no);
else
lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m);
- SPRINTF("scsi%d: issue_queue\n", instance->host_no);
+ seq_printf(m, "scsi%d: issue_queue\n", instance->host_no);
for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
lprint_Scsi_Cmnd(ptr, m);
- SPRINTF("scsi%d: disconnected_queue\n", instance->host_no);
+ seq_printf(m, "scsi%d: disconnected_queue\n", instance->host_no);
for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
lprint_Scsi_Cmnd(ptr, m);
spin_unlock_irq(instance->host_lock);
@@ -755,8 +753,8 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m,
static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
{
- SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
- SPRINTF(" command = ");
+ seq_printf(m, "scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
+ seq_puts(m, " command = ");
lprint_command(cmd->cmnd, m);
}
@@ -765,13 +763,13 @@ static void lprint_command(unsigned char *command, struct seq_file *m)
int i, s;
lprint_opcode(command[0], m);
for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
- SPRINTF("%02x ", command[i]);
- SPRINTF("\n");
+ seq_printf(m, "%02x ", command[i]);
+ seq_putc(m, '\n');
}
static void lprint_opcode(int opcode, struct seq_file *m)
{
- SPRINTF("%2d (0x%02x)", opcode, opcode);
+ seq_printf(m, "%2d (0x%02x)", opcode, opcode);
}
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 2c5ce48c8f95..ae95e347f37d 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2880,7 +2880,7 @@ static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost)
chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
}
- seq_printf(m, "Target IDs Detected:");
+ seq_puts(m, "Target IDs Detected:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i))
seq_printf(m, " %X,", i);
@@ -2896,18 +2896,16 @@ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost)
struct asc_board *boardp = shost_priv(shost);
ushort major, minor, letter;
- seq_printf(m, "\nROM BIOS Version: ");
+ seq_puts(m, "\nROM BIOS Version: ");
/*
* If the BIOS saved a valid signature, then fill in
* the BIOS code segment base address.
*/
if (boardp->bios_signature != 0x55AA) {
- seq_printf(m, "Disabled or Pre-3.1\n");
- seq_printf(m,
- "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n");
- seq_printf(m,
- "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n");
+ seq_puts(m, "Disabled or Pre-3.1\n"
+ "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"
+ "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n");
} else {
major = (boardp->bios_version >> 12) & 0xF;
minor = (boardp->bios_version >> 8) & 0xF;
@@ -2923,10 +2921,8 @@ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost)
*/
if (major < 3 || (major <= 3 && minor < 1) ||
(major <= 3 && minor <= 1 && letter < ('I' - 'A'))) {
- seq_printf(m,
- "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n");
- seq_printf(m,
- "ftp://ftp.connectcom.net/pub\n");
+ seq_puts(m, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"
+ "ftp://ftp.connectcom.net/pub\n");
}
}
}
@@ -3056,11 +3052,10 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
== ASC_TRUE)
seq_printf(m, " Serial Number: %s\n", serialstr);
else if (ep->adapter_info[5] == 0xBB)
- seq_printf(m,
- " Default Settings Used for EEPROM-less Adapter.\n");
+ seq_puts(m,
+ " Default Settings Used for EEPROM-less Adapter.\n");
else
- seq_printf(m,
- " Serial Number Signature Not Present.\n");
+ seq_puts(m, " Serial Number Signature Not Present.\n");
seq_printf(m,
" Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
@@ -3070,34 +3065,30 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
seq_printf(m,
" cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam);
- seq_printf(m, " Target ID: ");
+ seq_puts(m, " Target ID: ");
for (i = 0; i <= ASC_MAX_TID; i++)
seq_printf(m, " %d", i);
- seq_printf(m, "\n");
- seq_printf(m, " Disconnects: ");
+ seq_puts(m, "\n Disconnects: ");
for (i = 0; i <= ASC_MAX_TID; i++)
seq_printf(m, " %c",
(ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- seq_printf(m, "\n");
- seq_printf(m, " Command Queuing: ");
+ seq_puts(m, "\n Command Queuing: ");
for (i = 0; i <= ASC_MAX_TID; i++)
seq_printf(m, " %c",
(ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- seq_printf(m, "\n");
- seq_printf(m, " Start Motor: ");
+ seq_puts(m, "\n Start Motor: ");
for (i = 0; i <= ASC_MAX_TID; i++)
seq_printf(m, " %c",
(ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- seq_printf(m, "\n");
- seq_printf(m, " Synchronous Transfer:");
+ seq_puts(m, "\n Synchronous Transfer:");
for (i = 0; i <= ASC_MAX_TID; i++)
seq_printf(m, " %c",
(ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
#ifdef CONFIG_ISA
if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
@@ -3151,7 +3142,7 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE)
seq_printf(m, " Serial Number: %s\n", serialstr);
else
- seq_printf(m, " Serial Number Signature Not Present.\n");
+ seq_puts(m, " Serial Number Signature Not Present.\n");
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550)
seq_printf(m,
@@ -3209,10 +3200,10 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
ep_38C1600->termination_lvd, termstr,
ep_38C1600->bios_ctrl);
- seq_printf(m, " Target ID: ");
+ seq_puts(m, " Target ID: ");
for (i = 0; i <= ADV_MAX_TID; i++)
seq_printf(m, " %X", i);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
word = ep_3550->disc_enable;
@@ -3221,11 +3212,11 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
} else {
word = ep_38C1600->disc_enable;
}
- seq_printf(m, " Disconnects: ");
+ seq_puts(m, " Disconnects: ");
for (i = 0; i <= ADV_MAX_TID; i++)
seq_printf(m, " %c",
(word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
word = ep_3550->tagqng_able;
@@ -3234,11 +3225,11 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
} else {
word = ep_38C1600->tagqng_able;
}
- seq_printf(m, " Command Queuing: ");
+ seq_puts(m, " Command Queuing: ");
for (i = 0; i <= ADV_MAX_TID; i++)
seq_printf(m, " %c",
(word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
word = ep_3550->start_motor;
@@ -3247,28 +3238,28 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
} else {
word = ep_38C1600->start_motor;
}
- seq_printf(m, " Start Motor: ");
+ seq_puts(m, " Start Motor: ");
for (i = 0; i <= ADV_MAX_TID; i++)
seq_printf(m, " %c",
(word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
- seq_printf(m, " Synchronous Transfer:");
+ seq_puts(m, " Synchronous Transfer:");
for (i = 0; i <= ADV_MAX_TID; i++)
seq_printf(m, " %c",
(ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ?
'Y' : 'N');
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
- seq_printf(m, " Ultra Transfer: ");
+ seq_puts(m, " Ultra Transfer: ");
for (i = 0; i <= ADV_MAX_TID; i++)
seq_printf(m, " %c",
(ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i))
? 'Y' : 'N');
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
@@ -3278,16 +3269,15 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
} else {
word = ep_38C1600->wdtr_able;
}
- seq_printf(m, " Wide Transfer: ");
+ seq_puts(m, " Wide Transfer: ");
for (i = 0; i <= ADV_MAX_TID; i++)
seq_printf(m, " %c",
(word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 ||
adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) {
- seq_printf(m,
- " Synchronous Transfer Speed (Mhz):\n ");
+ seq_puts(m, " Synchronous Transfer Speed (Mhz):\n ");
for (i = 0; i <= ADV_MAX_TID; i++) {
char *speed_str;
@@ -3325,10 +3315,10 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
}
seq_printf(m, "%X:%s ", i, speed_str);
if (i == 7)
- seq_printf(m, "\n ");
+ seq_puts(m, "\n ");
sdtr_speed >>= 4;
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
}
@@ -3403,7 +3393,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
seq_printf(m,
" Total Command Pending: %d\n", v->cur_total_qng);
- seq_printf(m, " Command Queuing:");
+ seq_puts(m, " Command Queuing:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3413,10 +3403,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
i,
(v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- seq_printf(m, "\n");
/* Current number of commands waiting for a device. */
- seq_printf(m, " Command Queue Pending:");
+ seq_puts(m, "\n Command Queue Pending:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3424,10 +3413,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
}
seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]);
}
- seq_printf(m, "\n");
/* Current limit on number of commands that can be sent to a device. */
- seq_printf(m, " Command Queue Limit:");
+ seq_puts(m, "\n Command Queue Limit:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3435,10 +3423,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
}
seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]);
}
- seq_printf(m, "\n");
/* Indicate whether the device has returned queue full status. */
- seq_printf(m, " Command Queue Full:");
+ seq_puts(m, "\n Command Queue Full:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3450,9 +3437,8 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
else
seq_printf(m, " %X:N", i);
}
- seq_printf(m, "\n");
- seq_printf(m, " Synchronous Transfer:");
+ seq_puts(m, "\n Synchronous Transfer:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3462,7 +3448,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
i,
(v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
for (i = 0; i <= ASC_MAX_TID; i++) {
uchar syn_period_ix;
@@ -3476,7 +3462,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
seq_printf(m, " %X:", i);
if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) {
- seq_printf(m, " Asynchronous");
+ seq_puts(m, " Asynchronous");
} else {
syn_period_ix =
(boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index -
@@ -3494,16 +3480,15 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
}
if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
- seq_printf(m, "*\n");
+ seq_puts(m, "*\n");
renegotiate = 1;
} else {
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
}
if (renegotiate) {
- seq_printf(m,
- " * = Re-negotiation pending before next command.\n");
+ seq_puts(m, " * = Re-negotiation pending before next command.\n");
}
}
@@ -3548,7 +3533,7 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
c->mcode_date, c->mcode_version);
AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
- seq_printf(m, " Queuing Enabled:");
+ seq_puts(m, " Queuing Enabled:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3559,9 +3544,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
i,
(tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- seq_printf(m, "\n");
- seq_printf(m, " Queue Limit:");
+ seq_puts(m, "\n Queue Limit:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3573,9 +3557,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
seq_printf(m, " %X:%d", i, lrambyte);
}
- seq_printf(m, "\n");
- seq_printf(m, " Command Pending:");
+ seq_puts(m, "\n Command Pending:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3587,10 +3570,10 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
seq_printf(m, " %X:%d", i, lrambyte);
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
- seq_printf(m, " Wide Enabled:");
+ seq_puts(m, " Wide Enabled:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3601,10 +3584,10 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
i,
(wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done);
- seq_printf(m, " Transfer Bit Width:");
+ seq_puts(m, " Transfer Bit Width:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3620,14 +3603,14 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) &&
(wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
- seq_printf(m, "*");
+ seq_putc(m, '*');
renegotiate = 1;
}
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
- seq_printf(m, " Synchronous Enabled:");
+ seq_puts(m, " Synchronous Enabled:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3638,7 +3621,7 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
i,
(sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done);
for (i = 0; i <= ADV_MAX_TID; i++) {
@@ -3657,14 +3640,14 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
seq_printf(m, " %X:", i);
if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */
- seq_printf(m, " Asynchronous");
+ seq_puts(m, " Asynchronous");
} else {
- seq_printf(m, " Transfer Period Factor: ");
+ seq_puts(m, " Transfer Period Factor: ");
if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */
- seq_printf(m, "9 (80.0 Mhz),");
+ seq_puts(m, "9 (80.0 Mhz),");
} else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */
- seq_printf(m, "10 (40.0 Mhz),");
+ seq_puts(m, "10 (40.0 Mhz),");
} else { /* 20 Mhz or below. */
period = (((lramword >> 8) * 25) + 50) / 4;
@@ -3684,16 +3667,15 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
}
if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
- seq_printf(m, "*\n");
+ seq_puts(m, "*\n");
renegotiate = 1;
} else {
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
}
if (renegotiate) {
- seq_printf(m,
- " * = Re-negotiation pending before next command.\n");
+ seq_puts(m, " * = Re-negotiation pending before next command.\n");
}
}
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 2b960b326daf..e31c460a1335 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -2490,299 +2490,296 @@ static void show_queues(struct Scsi_Host *shpnt)
disp_enintr(shpnt);
}
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m, ##args)
-
static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
{
int i;
- SPRINTF("%p: target=%d; lun=%d; cmnd=( ",
+ seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
ptr, ptr->device->id, (u8)ptr->device->lun);
for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++)
- SPRINTF("0x%02x ", ptr->cmnd[i]);
+ seq_printf(m, "0x%02x ", ptr->cmnd[i]);
- SPRINTF("); resid=%d; residual=%d; buffers=%d; phase |",
+ seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
scsi_get_resid(ptr), ptr->SCp.this_residual,
ptr->SCp.buffers_residual);
if (ptr->SCp.phase & not_issued)
- SPRINTF("not issued|");
+ seq_puts(m, "not issued|");
if (ptr->SCp.phase & selecting)
- SPRINTF("selecting|");
+ seq_puts(m, "selecting|");
if (ptr->SCp.phase & disconnected)
- SPRINTF("disconnected|");
+ seq_puts(m, "disconnected|");
if (ptr->SCp.phase & aborted)
- SPRINTF("aborted|");
+ seq_puts(m, "aborted|");
if (ptr->SCp.phase & identified)
- SPRINTF("identified|");
+ seq_puts(m, "identified|");
if (ptr->SCp.phase & completed)
- SPRINTF("completed|");
+ seq_puts(m, "completed|");
if (ptr->SCp.phase & spiordy)
- SPRINTF("spiordy|");
+ seq_puts(m, "spiordy|");
if (ptr->SCp.phase & syncneg)
- SPRINTF("syncneg|");
- SPRINTF("; next=0x%p\n", SCNEXT(ptr));
+ seq_puts(m, "syncneg|");
+ seq_printf(m, "; next=0x%p\n", SCNEXT(ptr));
}
static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt)
{
int s;
- SPRINTF("\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name);
+ seq_printf(m, "\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name);
s = GETPORT(SCSISEQ);
- SPRINTF("SCSISEQ( ");
+ seq_puts(m, "SCSISEQ( ");
if (s & TEMODEO)
- SPRINTF("TARGET MODE ");
+ seq_puts(m, "TARGET MODE ");
if (s & ENSELO)
- SPRINTF("SELO ");
+ seq_puts(m, "SELO ");
if (s & ENSELI)
- SPRINTF("SELI ");
+ seq_puts(m, "SELI ");
if (s & ENRESELI)
- SPRINTF("RESELI ");
+ seq_puts(m, "RESELI ");
if (s & ENAUTOATNO)
- SPRINTF("AUTOATNO ");
+ seq_puts(m, "AUTOATNO ");
if (s & ENAUTOATNI)
- SPRINTF("AUTOATNI ");
+ seq_puts(m, "AUTOATNI ");
if (s & ENAUTOATNP)
- SPRINTF("AUTOATNP ");
+ seq_puts(m, "AUTOATNP ");
if (s & SCSIRSTO)
- SPRINTF("SCSIRSTO ");
- SPRINTF(");");
+ seq_puts(m, "SCSIRSTO ");
+ seq_puts(m, ");");
- SPRINTF(" SCSISIG(");
+ seq_puts(m, " SCSISIG(");
s = GETPORT(SCSISIG);
switch (s & P_MASK) {
case P_DATAO:
- SPRINTF("DATA OUT");
+ seq_puts(m, "DATA OUT");
break;
case P_DATAI:
- SPRINTF("DATA IN");
+ seq_puts(m, "DATA IN");
break;
case P_CMD:
- SPRINTF("COMMAND");
+ seq_puts(m, "COMMAND");
break;
case P_STATUS:
- SPRINTF("STATUS");
+ seq_puts(m, "STATUS");
break;
case P_MSGO:
- SPRINTF("MESSAGE OUT");
+ seq_puts(m, "MESSAGE OUT");
break;
case P_MSGI:
- SPRINTF("MESSAGE IN");
+ seq_puts(m, "MESSAGE IN");
break;
default:
- SPRINTF("*invalid*");
+ seq_puts(m, "*invalid*");
break;
}
- SPRINTF("); ");
+ seq_puts(m, "); ");
- SPRINTF("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
+ seq_printf(m, "INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
- SPRINTF("SSTAT( ");
+ seq_puts(m, "SSTAT( ");
s = GETPORT(SSTAT0);
if (s & TARGET)
- SPRINTF("TARGET ");
+ seq_puts(m, "TARGET ");
if (s & SELDO)
- SPRINTF("SELDO ");
+ seq_puts(m, "SELDO ");
if (s & SELDI)
- SPRINTF("SELDI ");
+ seq_puts(m, "SELDI ");
if (s & SELINGO)
- SPRINTF("SELINGO ");
+ seq_puts(m, "SELINGO ");
if (s & SWRAP)
- SPRINTF("SWRAP ");
+ seq_puts(m, "SWRAP ");
if (s & SDONE)
- SPRINTF("SDONE ");
+ seq_puts(m, "SDONE ");
if (s & SPIORDY)
- SPRINTF("SPIORDY ");
+ seq_puts(m, "SPIORDY ");
if (s & DMADONE)
- SPRINTF("DMADONE ");
+ seq_puts(m, "DMADONE ");
s = GETPORT(SSTAT1);
if (s & SELTO)
- SPRINTF("SELTO ");
+ seq_puts(m, "SELTO ");
if (s & ATNTARG)
- SPRINTF("ATNTARG ");
+ seq_puts(m, "ATNTARG ");
if (s & SCSIRSTI)
- SPRINTF("SCSIRSTI ");
+ seq_puts(m, "SCSIRSTI ");
if (s & PHASEMIS)
- SPRINTF("PHASEMIS ");
+ seq_puts(m, "PHASEMIS ");
if (s & BUSFREE)
- SPRINTF("BUSFREE ");
+ seq_puts(m, "BUSFREE ");
if (s & SCSIPERR)
- SPRINTF("SCSIPERR ");
+ seq_puts(m, "SCSIPERR ");
if (s & PHASECHG)
- SPRINTF("PHASECHG ");
+ seq_puts(m, "PHASECHG ");
if (s & REQINIT)
- SPRINTF("REQINIT ");
- SPRINTF("); ");
+ seq_puts(m, "REQINIT ");
+ seq_puts(m, "); ");
- SPRINTF("SSTAT( ");
+ seq_puts(m, "SSTAT( ");
s = GETPORT(SSTAT0) & GETPORT(SIMODE0);
if (s & TARGET)
- SPRINTF("TARGET ");
+ seq_puts(m, "TARGET ");
if (s & SELDO)
- SPRINTF("SELDO ");
+ seq_puts(m, "SELDO ");
if (s & SELDI)
- SPRINTF("SELDI ");
+ seq_puts(m, "SELDI ");
if (s & SELINGO)
- SPRINTF("SELINGO ");
+ seq_puts(m, "SELINGO ");
if (s & SWRAP)
- SPRINTF("SWRAP ");
+ seq_puts(m, "SWRAP ");
if (s & SDONE)
- SPRINTF("SDONE ");
+ seq_puts(m, "SDONE ");
if (s & SPIORDY)
- SPRINTF("SPIORDY ");
+ seq_puts(m, "SPIORDY ");
if (s & DMADONE)
- SPRINTF("DMADONE ");
+ seq_puts(m, "DMADONE ");
s = GETPORT(SSTAT1) & GETPORT(SIMODE1);
if (s & SELTO)
- SPRINTF("SELTO ");
+ seq_puts(m, "SELTO ");
if (s & ATNTARG)
- SPRINTF("ATNTARG ");
+ seq_puts(m, "ATNTARG ");
if (s & SCSIRSTI)
- SPRINTF("SCSIRSTI ");
+ seq_puts(m, "SCSIRSTI ");
if (s & PHASEMIS)
- SPRINTF("PHASEMIS ");
+ seq_puts(m, "PHASEMIS ");
if (s & BUSFREE)
- SPRINTF("BUSFREE ");
+ seq_puts(m, "BUSFREE ");
if (s & SCSIPERR)
- SPRINTF("SCSIPERR ");
+ seq_puts(m, "SCSIPERR ");
if (s & PHASECHG)
- SPRINTF("PHASECHG ");
+ seq_puts(m, "PHASECHG ");
if (s & REQINIT)
- SPRINTF("REQINIT ");
- SPRINTF("); ");
+ seq_puts(m, "REQINIT ");
+ seq_puts(m, "); ");
- SPRINTF("SXFRCTL0( ");
+ seq_puts(m, "SXFRCTL0( ");
s = GETPORT(SXFRCTL0);
if (s & SCSIEN)
- SPRINTF("SCSIEN ");
+ seq_puts(m, "SCSIEN ");
if (s & DMAEN)
- SPRINTF("DMAEN ");
+ seq_puts(m, "DMAEN ");
if (s & CH1)
- SPRINTF("CH1 ");
+ seq_puts(m, "CH1 ");
if (s & CLRSTCNT)
- SPRINTF("CLRSTCNT ");
+ seq_puts(m, "CLRSTCNT ");
if (s & SPIOEN)
- SPRINTF("SPIOEN ");
+ seq_puts(m, "SPIOEN ");
if (s & CLRCH1)
- SPRINTF("CLRCH1 ");
- SPRINTF("); ");
+ seq_puts(m, "CLRCH1 ");
+ seq_puts(m, "); ");
- SPRINTF("SIGNAL( ");
+ seq_puts(m, "SIGNAL( ");
s = GETPORT(SCSISIG);
if (s & SIG_ATNI)
- SPRINTF("ATNI ");
+ seq_puts(m, "ATNI ");
if (s & SIG_SELI)
- SPRINTF("SELI ");
+ seq_puts(m, "SELI ");
if (s & SIG_BSYI)
- SPRINTF("BSYI ");
+ seq_puts(m, "BSYI ");
if (s & SIG_REQI)
- SPRINTF("REQI ");
+ seq_puts(m, "REQI ");
if (s & SIG_ACKI)
- SPRINTF("ACKI ");
- SPRINTF("); ");
+ seq_puts(m, "ACKI ");
+ seq_puts(m, "); ");
- SPRINTF("SELID(%02x), ", GETPORT(SELID));
+ seq_printf(m, "SELID(%02x), ", GETPORT(SELID));
- SPRINTF("STCNT(%d), ", GETSTCNT());
+ seq_printf(m, "STCNT(%d), ", GETSTCNT());
- SPRINTF("SSTAT2( ");
+ seq_puts(m, "SSTAT2( ");
s = GETPORT(SSTAT2);
if (s & SOFFSET)
- SPRINTF("SOFFSET ");
+ seq_puts(m, "SOFFSET ");
if (s & SEMPTY)
- SPRINTF("SEMPTY ");
+ seq_puts(m, "SEMPTY ");
if (s & SFULL)
- SPRINTF("SFULL ");
- SPRINTF("); SFCNT (%d); ", s & (SFULL | SFCNT));
+ seq_puts(m, "SFULL ");
+ seq_printf(m, "); SFCNT (%d); ", s & (SFULL | SFCNT));
s = GETPORT(SSTAT3);
- SPRINTF("SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f);
+ seq_printf(m, "SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f);
- SPRINTF("SSTAT4( ");
+ seq_puts(m, "SSTAT4( ");
s = GETPORT(SSTAT4);
if (s & SYNCERR)
- SPRINTF("SYNCERR ");
+ seq_puts(m, "SYNCERR ");
if (s & FWERR)
- SPRINTF("FWERR ");
+ seq_puts(m, "FWERR ");
if (s & FRERR)
- SPRINTF("FRERR ");
- SPRINTF("); ");
+ seq_puts(m, "FRERR ");
+ seq_puts(m, "); ");
- SPRINTF("DMACNTRL0( ");
+ seq_puts(m, "DMACNTRL0( ");
s = GETPORT(DMACNTRL0);
- SPRINTF("%s ", s & _8BIT ? "8BIT" : "16BIT");
- SPRINTF("%s ", s & DMA ? "DMA" : "PIO");
- SPRINTF("%s ", s & WRITE_READ ? "WRITE" : "READ");
+ seq_printf(m, "%s ", s & _8BIT ? "8BIT" : "16BIT");
+ seq_printf(m, "%s ", s & DMA ? "DMA" : "PIO");
+ seq_printf(m, "%s ", s & WRITE_READ ? "WRITE" : "READ");
if (s & ENDMA)
- SPRINTF("ENDMA ");
+ seq_puts(m, "ENDMA ");
if (s & INTEN)
- SPRINTF("INTEN ");
+ seq_puts(m, "INTEN ");
if (s & RSTFIFO)
- SPRINTF("RSTFIFO ");
+ seq_puts(m, "RSTFIFO ");
if (s & SWINT)
- SPRINTF("SWINT ");
- SPRINTF("); ");
+ seq_puts(m, "SWINT ");
+ seq_puts(m, "); ");
- SPRINTF("DMASTAT( ");
+ seq_puts(m, "DMASTAT( ");
s = GETPORT(DMASTAT);
if (s & ATDONE)
- SPRINTF("ATDONE ");
+ seq_puts(m, "ATDONE ");
if (s & WORDRDY)
- SPRINTF("WORDRDY ");
+ seq_puts(m, "WORDRDY ");
if (s & DFIFOFULL)
- SPRINTF("DFIFOFULL ");
+ seq_puts(m, "DFIFOFULL ");
if (s & DFIFOEMP)
- SPRINTF("DFIFOEMP ");
- SPRINTF(")\n");
+ seq_puts(m, "DFIFOEMP ");
+ seq_puts(m, ")\n");
- SPRINTF("enabled interrupts( ");
+ seq_puts(m, "enabled interrupts( ");
s = GETPORT(SIMODE0);
if (s & ENSELDO)
- SPRINTF("ENSELDO ");
+ seq_puts(m, "ENSELDO ");
if (s & ENSELDI)
- SPRINTF("ENSELDI ");
+ seq_puts(m, "ENSELDI ");
if (s & ENSELINGO)
- SPRINTF("ENSELINGO ");
+ seq_puts(m, "ENSELINGO ");
if (s & ENSWRAP)
- SPRINTF("ENSWRAP ");
+ seq_puts(m, "ENSWRAP ");
if (s & ENSDONE)
- SPRINTF("ENSDONE ");
+ seq_puts(m, "ENSDONE ");
if (s & ENSPIORDY)
- SPRINTF("ENSPIORDY ");
+ seq_puts(m, "ENSPIORDY ");
if (s & ENDMADONE)
- SPRINTF("ENDMADONE ");
+ seq_puts(m, "ENDMADONE ");
s = GETPORT(SIMODE1);
if (s & ENSELTIMO)
- SPRINTF("ENSELTIMO ");
+ seq_puts(m, "ENSELTIMO ");
if (s & ENATNTARG)
- SPRINTF("ENATNTARG ");
+ seq_puts(m, "ENATNTARG ");
if (s & ENPHASEMIS)
- SPRINTF("ENPHASEMIS ");
+ seq_puts(m, "ENPHASEMIS ");
if (s & ENBUSFREE)
- SPRINTF("ENBUSFREE ");
+ seq_puts(m, "ENBUSFREE ");
if (s & ENSCSIPERR)
- SPRINTF("ENSCSIPERR ");
+ seq_puts(m, "ENSCSIPERR ");
if (s & ENPHASECHG)
- SPRINTF("ENPHASECHG ");
+ seq_puts(m, "ENPHASECHG ");
if (s & ENREQINIT)
- SPRINTF("ENREQINIT ");
- SPRINTF(")\n");
+ seq_puts(m, "ENREQINIT ");
+ seq_puts(m, ")\n");
}
static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
@@ -2825,56 +2822,56 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
Scsi_Cmnd *ptr;
unsigned long flags;
- SPRINTF(AHA152X_REVID "\n");
+ seq_puts(m, AHA152X_REVID "\n");
- SPRINTF("ioports 0x%04lx to 0x%04lx\n",
+ seq_printf(m, "ioports 0x%04lx to 0x%04lx\n",
shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1);
- SPRINTF("interrupt 0x%02x\n", shpnt->irq);
- SPRINTF("disconnection/reconnection %s\n",
+ seq_printf(m, "interrupt 0x%02x\n", shpnt->irq);
+ seq_printf(m, "disconnection/reconnection %s\n",
RECONNECT ? "enabled" : "disabled");
- SPRINTF("parity checking %s\n",
+ seq_printf(m, "parity checking %s\n",
PARITY ? "enabled" : "disabled");
- SPRINTF("synchronous transfers %s\n",
+ seq_printf(m, "synchronous transfers %s\n",
SYNCHRONOUS ? "enabled" : "disabled");
- SPRINTF("%d commands currently queued\n", HOSTDATA(shpnt)->commands);
+ seq_printf(m, "%d commands currently queued\n", HOSTDATA(shpnt)->commands);
if(SYNCHRONOUS) {
- SPRINTF("synchronously operating targets (tick=50 ns):\n");
+ seq_puts(m, "synchronously operating targets (tick=50 ns):\n");
for (i = 0; i < 8; i++)
if (HOSTDATA(shpnt)->syncrate[i] & 0x7f)
- SPRINTF("target %d: period %dT/%dns; req/ack offset %d\n",
+ seq_printf(m, "target %d: period %dT/%dns; req/ack offset %d\n",
i,
(((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2),
(((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50,
HOSTDATA(shpnt)->syncrate[i] & 0x0f);
}
- SPRINTF("\nqueue status:\n");
+ seq_puts(m, "\nqueue status:\n");
DO_LOCK(flags);
if (ISSUE_SC) {
- SPRINTF("not yet issued commands:\n");
+ seq_puts(m, "not yet issued commands:\n");
for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr))
get_command(m, ptr);
} else
- SPRINTF("no not yet issued commands\n");
+ seq_puts(m, "no not yet issued commands\n");
DO_UNLOCK(flags);
if (CURRENT_SC) {
- SPRINTF("current command:\n");
+ seq_puts(m, "current command:\n");
get_command(m, CURRENT_SC);
} else
- SPRINTF("no current command\n");
+ seq_puts(m, "no current command\n");
if (DISCONNECTED_SC) {
- SPRINTF("disconnected commands:\n");
+ seq_puts(m, "disconnected commands:\n");
for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr))
get_command(m, ptr);
} else
- SPRINTF("no disconnected commands\n");
+ seq_puts(m, "no disconnected commands\n");
get_ports(m, shpnt);
#if defined(AHA152X_STAT)
- SPRINTF("statistics:\n"
+ seq_printf(m, "statistics:\n"
"total commands: %d\n"
"disconnections: %d\n"
"busfree with check condition: %d\n"
@@ -2894,7 +2891,7 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
HOSTDATA(shpnt)->busfree_without_done_command,
HOSTDATA(shpnt)->busfree_without_any_action);
for(i=0; i<maxstate; i++) {
- SPRINTF("%-10s %-12d %-12d %-12ld\n",
+ seq_printf(m, "%-10s %-12d %-12d %-12ld\n",
states[i].name,
HOSTDATA(shpnt)->count_trans[i],
HOSTDATA(shpnt)->count[i],
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 0bcacf71aef8..97f2accd3dbb 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -1298,7 +1298,7 @@ rescan_fifos:
/*
* Wait for any inprogress DMA to complete and clear DMA state
- * if this if for an SCB in the qinfifo.
+ * if this is for an SCB in the qinfifo.
*/
while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 27dbfccea774..add2da581d66 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -97,7 +97,7 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo)
u_int mb;
if (tinfo->period == AHD_PERIOD_UNKNOWN) {
- seq_printf(m, "Renegotiation Pending\n");
+ seq_puts(m, "Renegotiation Pending\n");
return;
}
speed = 3300;
@@ -119,40 +119,38 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo)
printed_options = 0;
seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000);
if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
- seq_printf(m, " RDSTRM");
+ seq_puts(m, " RDSTRM");
printed_options++;
}
if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
- seq_printf(m, "%s", printed_options ? "|DT" : " DT");
+ seq_puts(m, printed_options ? "|DT" : " DT");
printed_options++;
}
if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
- seq_printf(m, "%s", printed_options ? "|IU" : " IU");
+ seq_puts(m, printed_options ? "|IU" : " IU");
printed_options++;
}
if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) {
- seq_printf(m, "%s",
- printed_options ? "|RTI" : " RTI");
+ seq_puts(m, printed_options ? "|RTI" : " RTI");
printed_options++;
}
if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
- seq_printf(m, "%s",
- printed_options ? "|QAS" : " QAS");
+ seq_puts(m, printed_options ? "|QAS" : " QAS");
printed_options++;
}
}
if (tinfo->width > 0) {
if (freq != 0) {
- seq_printf(m, ", ");
+ seq_puts(m, ", ");
} else {
- seq_printf(m, " (");
+ seq_puts(m, " (");
}
seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width));
} else if (freq != 0) {
- seq_printf(m, ")");
+ seq_putc(m, ')');
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
static void
@@ -167,15 +165,15 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m,
tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
target_id, &tstate);
seq_printf(m, "Target %d Negotiation Settings\n", target_id);
- seq_printf(m, "\tUser: ");
+ seq_puts(m, "\tUser: ");
ahd_format_transinfo(m, &tinfo->user);
starget = ahd->platform_data->starget[target_id];
if (starget == NULL)
return;
- seq_printf(m, "\tGoal: ");
+ seq_puts(m, "\tGoal: ");
ahd_format_transinfo(m, &tinfo->goal);
- seq_printf(m, "\tCurr: ");
+ seq_puts(m, "\tCurr: ");
ahd_format_transinfo(m, &tinfo->curr);
for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
@@ -291,19 +289,19 @@ ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost)
max_targ = 16;
if (ahd->seep_config == NULL)
- seq_printf(m, "No Serial EEPROM\n");
+ seq_puts(m, "No Serial EEPROM\n");
else {
- seq_printf(m, "Serial EEPROM:\n");
+ seq_puts(m, "Serial EEPROM:\n");
for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) {
if (((i % 8) == 0) && (i != 0)) {
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
seq_printf(m, "0x%.4x ",
((uint16_t*)ahd->seep_config)[i]);
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
if ((ahd->features & AHD_WIDE) == 0)
max_targ = 8;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 64eec6c07a83..18459605d991 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -119,15 +119,15 @@ ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo)
if (tinfo->width > 0) {
if (freq != 0) {
- seq_printf(m, ", ");
+ seq_puts(m, ", ");
} else {
- seq_printf(m, " (");
+ seq_puts(m, " (");
}
seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width));
} else if (freq != 0) {
- seq_printf(m, ")");
+ seq_putc(m, ')');
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
static void
@@ -145,15 +145,15 @@ ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m,
if ((ahc->features & AHC_TWIN) != 0)
seq_printf(m, "Channel %c ", channel);
seq_printf(m, "Target %d Negotiation Settings\n", target_id);
- seq_printf(m, "\tUser: ");
+ seq_puts(m, "\tUser: ");
ahc_format_transinfo(m, &tinfo->user);
starget = ahc->platform_data->starget[target_offset];
if (!starget)
return;
- seq_printf(m, "\tGoal: ");
+ seq_puts(m, "\tGoal: ");
ahc_format_transinfo(m, &tinfo->goal);
- seq_printf(m, "\tCurr: ");
+ seq_puts(m, "\tCurr: ");
ahc_format_transinfo(m, &tinfo->curr);
for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
@@ -303,19 +303,19 @@ ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost)
if (ahc->seep_config == NULL)
- seq_printf(m, "No Serial EEPROM\n");
+ seq_puts(m, "No Serial EEPROM\n");
else {
- seq_printf(m, "Serial EEPROM:\n");
+ seq_puts(m, "Serial EEPROM:\n");
for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) {
if (((i % 8) == 0) && (i != 0)) {
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
seq_printf(m, "0x%.4x ",
((uint16_t*)ahc->seep_config)[i]);
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
max_targ = 16;
if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index e64c3af7c1a0..decdc71b6b86 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2990,7 +2990,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
struct fas216_device *dev;
struct scsi_device *scd;
- seq_printf(m, "Device/Lun TaggedQ Parity Sync\n");
+ seq_puts(m, "Device/Lun TaggedQ Parity Sync\n");
shost_for_each_device(scd, info->host) {
dev = &info->device[scd->id];
@@ -3000,7 +3000,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
scd->simple_tags ? "en" : "dis",
scd->current_tag);
else
- seq_printf(m, "unsupported ");
+ seq_puts(m, "unsupported ");
seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis");
@@ -3008,7 +3008,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
seq_printf(m, "offset %d, %d ns\n",
dev->sof, dev->period * 4);
else
- seq_printf(m, "async\n");
+ seq_puts(m, "async\n");
}
}
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 6daed6b386d4..a70255413e7f 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -711,12 +711,12 @@ static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
unsigned char *command;
seq_printf(m, "scsi%d: destination target %d, lun %llu\n",
H_NO(cmd), cmd->device->id, cmd->device->lun);
- seq_printf(m, " command = ");
+ seq_puts(m, " command = ");
command = cmd->cmnd;
seq_printf(m, "%2d (0x%02x)", command[0], command[0]);
for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
seq_printf(m, " %02x", command[i]);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
static int __maybe_unused NCR5380_show_info(struct seq_file *m,
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index a795d81ef875..0836433e3a2d 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3101,9 +3101,8 @@ static const char *atp870u_info(struct Scsi_Host *notused)
static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr)
{
- seq_printf(m, "ACARD AEC-671X Driver Version: 2.6+ac\n");
- seq_printf(m, "\n");
- seq_printf(m, "Adapter Configuration:\n");
+ seq_puts(m, "ACARD AEC-671X Driver Version: 2.6+ac\n\n"
+ "Adapter Configuration:\n");
seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port);
seq_printf(m, " IRQ: %d\n", HBAptr->irq);
return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index f3193406776c..96241b20fd2c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -48,7 +48,6 @@ static unsigned int be_iopoll_budget = 10;
static unsigned int be_max_phys_size = 64;
static unsigned int enable_msix = 1;
-MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
MODULE_VERSION(BUILD_STR);
MODULE_AUTHOR("Emulex Corporation");
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 6bac8a746ee2..0045742fab7d 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -194,16 +194,10 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
retry:
errno = 0;
- if (debug) {
- DPRINTK("command: ");
- __scsi_print_command(cmd, cmd_len);
- }
-
result = scsi_execute_req(ch->device, cmd, direction, buffer,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
- DPRINTK("result: 0x%x\n",result);
if (driver_byte(result) & DRIVER_SENSE) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index e2068a2621c4..fa09d4be2b53 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -18,14 +18,10 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
-
-
/* Commands with service actions that change the command name */
#define THIRD_PARTY_COPY_OUT 0x83
#define THIRD_PARTY_COPY_IN 0x84
-#define VENDOR_SPECIFIC_CDB 0xc0
-
struct sa_name_list {
int opcode;
const struct value_name_pair *arr;
@@ -37,7 +33,6 @@ struct value_name_pair {
const char * name;
};
-#ifdef CONFIG_SCSI_CONSTANTS
static const char * cdb_byte0_names[] = {
/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense",
/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL,
@@ -261,28 +256,8 @@ static struct sa_name_list sa_names_arr[] = {
{0, NULL, 0},
};
-#else /* ifndef CONFIG_SCSI_CONSTANTS */
-static const char *cdb_byte0_names[0];
-
-static struct sa_name_list sa_names_arr[] = {
- {VARIABLE_LENGTH_CMD, NULL, 0},
- {MAINTENANCE_IN, NULL, 0},
- {MAINTENANCE_OUT, NULL, 0},
- {PERSISTENT_RESERVE_IN, NULL, 0},
- {PERSISTENT_RESERVE_OUT, NULL, 0},
- {SERVICE_ACTION_IN_12, NULL, 0},
- {SERVICE_ACTION_OUT_12, NULL, 0},
- {SERVICE_ACTION_BIDIRECTIONAL, NULL, 0},
- {SERVICE_ACTION_IN_16, NULL, 0},
- {SERVICE_ACTION_OUT_16, NULL, 0},
- {THIRD_PARTY_COPY_IN, NULL, 0},
- {THIRD_PARTY_COPY_OUT, NULL, 0},
- {0, NULL, 0},
-};
-#endif /* CONFIG_SCSI_CONSTANTS */
-
-static bool scsi_opcode_sa_name(int opcode, int service_action,
- const char **cdb_name, const char **sa_name)
+bool scsi_opcode_sa_name(int opcode, int service_action,
+ const char **cdb_name, const char **sa_name)
{
struct sa_name_list *sa_name_ptr;
const struct value_name_pair *arr = NULL;
@@ -315,76 +290,6 @@ static bool scsi_opcode_sa_name(int opcode, int service_action,
return true;
}
-static void print_opcode_name(const unsigned char *cdbp, size_t cdb_len)
-{
- int sa, cdb0;
- const char *cdb_name = NULL, *sa_name = NULL;
-
- cdb0 = cdbp[0];
- if (cdb0 == VARIABLE_LENGTH_CMD) {
- if (cdb_len < 10) {
- printk("short variable length command, len=%zu",
- cdb_len);
- return;
- }
- sa = (cdbp[8] << 8) + cdbp[9];
- } else
- sa = cdbp[1] & 0x1f;
-
- if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) {
- if (cdb_name)
- printk("%s", cdb_name);
- else if (cdb0 >= VENDOR_SPECIFIC_CDB)
- printk("cdb[0]=0x%x (vendor)", cdb0);
- else if (cdb0 >= 0x60 && cdb0 < 0x7e)
- printk("cdb[0]=0x%x (reserved)", cdb0);
- else
- printk("cdb[0]=0x%x", cdb0);
- } else {
- if (sa_name)
- printk("%s", sa_name);
- else if (cdb_name)
- printk("%s, sa=0x%x", cdb_name, sa);
- else
- printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
- }
-}
-
-void __scsi_print_command(const unsigned char *cdb, size_t cdb_len)
-{
- int k, len;
-
- print_opcode_name(cdb, cdb_len);
- len = scsi_command_size(cdb);
- if (cdb_len < len)
- len = cdb_len;
- /* print out all bytes in cdb */
- for (k = 0; k < len; ++k)
- printk(" %02x", cdb[k]);
- printk("\n");
-}
-EXPORT_SYMBOL(__scsi_print_command);
-
-void scsi_print_command(struct scsi_cmnd *cmd)
-{
- int k;
-
- if (cmd->cmnd == NULL)
- return;
-
- scmd_printk(KERN_INFO, cmd, "CDB: ");
- print_opcode_name(cmd->cmnd, cmd->cmd_len);
-
- /* print out all bytes in cdb */
- printk(":");
- for (k = 0; k < cmd->cmd_len; ++k)
- printk(" %02x", cmd->cmnd[k]);
- printk("\n");
-}
-EXPORT_SYMBOL(scsi_print_command);
-
-#ifdef CONFIG_SCSI_CONSTANTS
-
struct error_info {
unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */
const char * text;
@@ -392,7 +297,7 @@ struct error_info {
/*
* The canonical list of T10 Additional Sense Codes is available at:
- * http://www.t10.org/lists/asc-num.txt [most recent: 20130605]
+ * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
*/
static const struct error_info additional[] =
@@ -421,6 +326,7 @@ static const struct error_info additional[] =
{0x001E, "Conflicting SA creation request"},
{0x001F, "Logical unit transitioning to another power condition"},
{0x0020, "Extended copy information available"},
+ {0x0021, "Atomic command aborted due to ACA"},
{0x0100, "No index/sector signal"},
@@ -446,6 +352,7 @@ static const struct error_info additional[] =
{0x040C, "Logical unit not accessible, target port in unavailable "
"state"},
{0x040D, "Logical unit not ready, structure check required"},
+ {0x040E, "Logical unit not ready, security session in progress"},
{0x0410, "Logical unit not ready, auxiliary memory not accessible"},
{0x0411, "Logical unit not ready, notify (enable spinup) required"},
{0x0412, "Logical unit not ready, offline"},
@@ -462,6 +369,11 @@ static const struct error_info additional[] =
{0x041C, "Logical unit not ready, additional power use not yet "
"granted"},
{0x041D, "Logical unit not ready, configuration in progress"},
+ {0x041E, "Logical unit not ready, microcode activation required"},
+ {0x041F, "Logical unit not ready, microcode download required"},
+ {0x0420, "Logical unit not ready, logical unit reset required"},
+ {0x0421, "Logical unit not ready, hard reset required"},
+ {0x0422, "Logical unit not ready, power cycle required"},
{0x0500, "Logical unit does not respond to selection"},
@@ -480,6 +392,7 @@ static const struct error_info additional[] =
{0x0902, "Focus servo failure"},
{0x0903, "Spindle servo failure"},
{0x0904, "Head select fault"},
+ {0x0905, "Vibration induced tracking error"},
{0x0A00, "Error log overflow"},
@@ -510,6 +423,7 @@ static const struct error_info additional[] =
{0x0C0D, "Write error - not enough unsolicited data"},
{0x0C0E, "Multiple write errors"},
{0x0C0F, "Defects in error window"},
+ {0x0C10, "Incomplete multiple atomic write operations"},
{0x0D00, "Error detected by third party temporary initiator"},
{0x0D01, "Third party device failure"},
@@ -635,6 +549,10 @@ static const struct error_info additional[] =
{0x2101, "Invalid element address"},
{0x2102, "Invalid address for write"},
{0x2103, "Invalid write crossing layer jump"},
+ {0x2104, "Unaligned write command"},
+ {0x2105, "Write boundary violation"},
+ {0x2106, "Attempt to read invalid data"},
+ {0x2107, "Read boundary violation"},
{0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
@@ -691,6 +609,7 @@ static const struct error_info additional[] =
{0x2705, "Permanent write protect"},
{0x2706, "Conditional write protect"},
{0x2707, "Space allocation failed write protect"},
+ {0x2708, "Zone is read only"},
{0x2800, "Not ready to ready change, medium may have changed"},
{0x2801, "Import or export element accessed"},
@@ -743,10 +662,15 @@ static const struct error_info additional[] =
{0x2C0A, "Partition or collection contains user objects"},
{0x2C0B, "Not reserved"},
{0x2C0C, "Orwrite generation does not match"},
+ {0x2C0D, "Reset write pointer not allowed"},
+ {0x2C0E, "Zone is offline"},
{0x2D00, "Overwrite error on update in place"},
{0x2E00, "Insufficient time for operation"},
+ {0x2E01, "Command timeout before processing"},
+ {0x2E02, "Command timeout during processing"},
+ {0x2E03, "Command timeout during processing due to error recovery"},
{0x2F00, "Commands cleared by another initiator"},
{0x2F01, "Commands cleared by power loss notification"},
@@ -868,6 +792,7 @@ static const struct error_info additional[] =
{0x3F13, "iSCSI IP address removed"},
{0x3F14, "iSCSI IP address changed"},
{0x3F15, "Inspect referrals sense descriptors"},
+ {0x3F16, "Microcode has been changed without reset"},
/*
* {0x40NN, "Ram failure"},
* {0x40NN, "Diagnostic failure on component nn"},
@@ -946,6 +871,11 @@ static const struct error_info additional[] =
{0x5306, "Volume identifier missing"},
{0x5307, "Duplicate volume identifier"},
{0x5308, "Element status unknown"},
+ {0x5309, "Data transfer device error - load failed"},
+ {0x530a, "Data transfer device error - unload failed"},
+ {0x530b, "Data transfer device error - unload missing"},
+ {0x530c, "Data transfer device error - eject failed"},
+ {0x530d, "Data transfer device error - library communication failed"},
{0x5400, "Scsi to host system interface failure"},
@@ -963,6 +893,7 @@ static const struct error_info additional[] =
{0x550B, "Insufficient power for operation"},
{0x550C, "Insufficient resources to create rod"},
{0x550D, "Insufficient resources to create rod token"},
+ {0x550E, "Insufficient zone resources"},
{0x5700, "Unable to recover table-of-contents"},
@@ -1247,15 +1178,12 @@ static const char * const snstext[] = {
"Completed", /* F: command completed sense data reported,
may occur for successful command */
};
-#endif
/* Get sense key string or NULL if not available */
const char *
scsi_sense_key_string(unsigned char key) {
-#ifdef CONFIG_SCSI_CONSTANTS
if (key <= 0xE)
return snstext[key];
-#endif
return NULL;
}
EXPORT_SYMBOL(scsi_sense_key_string);
@@ -1267,7 +1195,6 @@ EXPORT_SYMBOL(scsi_sense_key_string);
const char *
scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
{
-#ifdef CONFIG_SCSI_CONSTANTS
int i;
unsigned short code = ((asc << 8) | ascq);
@@ -1283,122 +1210,10 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
return additional2[i].str;
}
}
-#else
- *fmt = NULL;
-#endif
return NULL;
}
EXPORT_SYMBOL(scsi_extd_sense_format);
-void
-scsi_show_extd_sense(const struct scsi_device *sdev, const char *name,
- unsigned char asc, unsigned char ascq)
-{
- const char *extd_sense_fmt = NULL;
- const char *extd_sense_str = scsi_extd_sense_format(asc, ascq,
- &extd_sense_fmt);
-
- if (extd_sense_str) {
- if (extd_sense_fmt)
- sdev_prefix_printk(KERN_INFO, sdev, name,
- "Add. Sense: %s (%s%x)",
- extd_sense_str, extd_sense_fmt,
- ascq);
- else
- sdev_prefix_printk(KERN_INFO, sdev, name,
- "Add. Sense: %s", extd_sense_str);
-
- } else {
- sdev_prefix_printk(KERN_INFO, sdev, name,
- "%sASC=0x%x %sASCQ=0x%x\n",
- asc >= 0x80 ? "<<vendor>> " : "", asc,
- ascq >= 0x80 ? "<<vendor>> " : "", ascq);
- }
-}
-EXPORT_SYMBOL(scsi_show_extd_sense);
-
-void
-scsi_show_sense_hdr(const struct scsi_device *sdev, const char *name,
- const struct scsi_sense_hdr *sshdr)
-{
- const char *sense_txt;
-
- sense_txt = scsi_sense_key_string(sshdr->sense_key);
- if (sense_txt)
- sdev_prefix_printk(KERN_INFO, sdev, name,
- "Sense Key : %s [%s]%s\n", sense_txt,
- scsi_sense_is_deferred(sshdr) ?
- "deferred" : "current",
- sshdr->response_code >= 0x72 ?
- " [descriptor]" : "");
- else
- sdev_prefix_printk(KERN_INFO, sdev, name,
- "Sense Key : 0x%x [%s]%s", sshdr->sense_key,
- scsi_sense_is_deferred(sshdr) ?
- "deferred" : "current",
- sshdr->response_code >= 0x72 ?
- " [descriptor]" : "");
-}
-EXPORT_SYMBOL(scsi_show_sense_hdr);
-
-/*
- * Print normalized SCSI sense header with a prefix.
- */
-void
-scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name,
- const struct scsi_sense_hdr *sshdr)
-{
- scsi_show_sense_hdr(sdev, name, sshdr);
- scsi_show_extd_sense(sdev, name, sshdr->asc, sshdr->ascq);
-}
-EXPORT_SYMBOL(scsi_print_sense_hdr);
-
-static void
-scsi_dump_sense_buffer(const unsigned char *sense_buffer, int sense_len)
-{
- int k, num;
-
- num = (sense_len < 32) ? sense_len : 32;
- printk("Unrecognized sense data (in hex):");
- for (k = 0; k < num; ++k) {
- if (0 == (k % 16)) {
- printk("\n");
- printk(KERN_INFO " ");
- }
- printk("%02x ", sense_buffer[k]);
- }
- printk("\n");
- return;
-}
-
-/* Normalize and print sense buffer with name prefix */
-void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
- const unsigned char *sense_buffer, int sense_len)
-{
- struct scsi_sense_hdr sshdr;
-
- if (!scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) {
- scsi_dump_sense_buffer(sense_buffer, sense_len);
- return;
- }
- scsi_show_sense_hdr(sdev, name, &sshdr);
- scsi_show_extd_sense(sdev, name, sshdr.asc, sshdr.ascq);
-}
-EXPORT_SYMBOL(__scsi_print_sense);
-
-/* Normalize and print sense buffer in SCSI command */
-void scsi_print_sense(const struct scsi_cmnd *cmd)
-{
- struct gendisk *disk = cmd->request->rq_disk;
- const char *disk_name = disk ? disk->disk_name : NULL;
-
- __scsi_print_sense(cmd->device, disk_name, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE);
-}
-EXPORT_SYMBOL(scsi_print_sense);
-
-#ifdef CONFIG_SCSI_CONSTANTS
-
static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
@@ -1410,17 +1225,13 @@ static const char * const driverbyte_table[]={
"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
-#endif
-
const char *scsi_hostbyte_string(int result)
{
const char *hb_string = NULL;
-#ifdef CONFIG_SCSI_CONSTANTS
int hb = host_byte(result);
if (hb < ARRAY_SIZE(hostbyte_table))
hb_string = hostbyte_table[hb];
-#endif
return hb_string;
}
EXPORT_SYMBOL(scsi_hostbyte_string);
@@ -1428,17 +1239,14 @@ EXPORT_SYMBOL(scsi_hostbyte_string);
const char *scsi_driverbyte_string(int result)
{
const char *db_string = NULL;
-#ifdef CONFIG_SCSI_CONSTANTS
int db = driver_byte(result);
if (db < ARRAY_SIZE(driverbyte_table))
db_string = driverbyte_table[db];
-#endif
return db_string;
}
EXPORT_SYMBOL(scsi_driverbyte_string);
-#ifdef CONFIG_SCSI_CONSTANTS
#define scsi_mlreturn_name(result) { result, #result }
static const struct value_name_pair scsi_mlreturn_arr[] = {
scsi_mlreturn_name(NEEDS_RETRY),
@@ -1451,11 +1259,9 @@ static const struct value_name_pair scsi_mlreturn_arr[] = {
scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED),
scsi_mlreturn_name(FAST_IO_FAIL)
};
-#endif
const char *scsi_mlreturn_string(int result)
{
-#ifdef CONFIG_SCSI_CONSTANTS
const struct value_name_pair *arr = scsi_mlreturn_arr;
int k;
@@ -1463,29 +1269,6 @@ const char *scsi_mlreturn_string(int result)
if (result == arr->value)
return arr->name;
}
-#endif
return NULL;
}
EXPORT_SYMBOL(scsi_mlreturn_string);
-
-void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition)
-{
- const char *mlret_string = scsi_mlreturn_string(disposition);
- const char *hb_string = scsi_hostbyte_string(cmd->result);
- const char *db_string = scsi_driverbyte_string(cmd->result);
-
- if (hb_string || db_string)
- scmd_printk(KERN_INFO, cmd,
- "%s%s Result: hostbyte=%s driverbyte=%s",
- msg ? msg : "",
- mlret_string ? mlret_string : "UNKNOWN",
- hb_string ? hb_string : "invalid",
- db_string ? db_string : "invalid");
- else
- scmd_printk(KERN_INFO, cmd,
- "%s%s Result: hostbyte=0x%02x driverbyte=0x%02x",
- msg ? msg : "",
- mlret_string ? mlret_string : "UNKNOWN",
- host_byte(cmd->result), driver_byte(cmd->result));
-}
-EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
index 913b9a92fb06..3681a3fbd499 100644
--- a/drivers/scsi/csiostor/Makefile
+++ b/drivers/scsi/csiostor/Makefile
@@ -8,5 +8,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
- csio_hw.o csio_hw_t4.o csio_hw_t5.o csio_isr.o \
+ csio_hw.o csio_hw_t5.o csio_isr.o \
csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 9ab997e18b20..2e66f34ebb79 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -60,37 +60,10 @@ int csio_msi = 2;
static int dev_num;
/* FCoE Adapter types & its description */
-static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
- {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
- {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
- {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
- {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
- {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
- {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
- {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
- {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
- {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
- {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
- {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
- {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
- {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
- {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
- {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
- {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
- {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
- {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
- {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
- {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
- {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
- {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
- {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
- {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
-};
-
static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
{"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
{"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
- {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
+ {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
{"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
{"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
{"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
@@ -107,7 +80,9 @@ static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
{"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
{"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
{"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
- {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
+ {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
+ {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
+ {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
};
static void csio_mgmtm_cleanup(struct csio_mgmtm *);
@@ -188,9 +163,9 @@ void
csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
unsigned int mask, unsigned int val)
{
- csio_wr_reg32(hw, addr, TP_PIO_ADDR);
- val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
- csio_wr_reg32(hw, val, TP_PIO_DATA);
+ csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
+ val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
+ csio_wr_reg32(hw, val, TP_PIO_DATA_A);
}
void
@@ -256,7 +231,7 @@ csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
}
pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
- *data = le32_to_cpu(*data);
+ *data = le32_to_cpu(*(__le32 *)data);
return 0;
}
@@ -421,17 +396,15 @@ csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
- cont = cont ? SF_CONT : 0;
- lock = lock ? SF_LOCK : 0;
-
- csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
- ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
- 10, NULL);
+ csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) |
+ BYTECNT_V(byte_cnt - 1), SF_OP_A);
+ ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
+ 10, NULL);
if (!ret)
- *valp = csio_rd_reg32(hw, SF_DATA);
+ *valp = csio_rd_reg32(hw, SF_DATA_A);
return ret;
}
@@ -453,16 +426,14 @@ csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
- cont = cont ? SF_CONT : 0;
- lock = lock ? SF_LOCK : 0;
-
- csio_wr_reg32(hw, val, SF_DATA);
- csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
+ csio_wr_reg32(hw, val, SF_DATA_A);
+ csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
+ OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
- return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+ return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
10, NULL);
}
@@ -533,11 +504,11 @@ csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
for ( ; nwords; nwords--, data++) {
ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
if (nwords == 1)
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
if (ret)
return ret;
if (byte_oriented)
- *data = htonl(*data);
+ *data = (__force __u32) htonl(*data);
}
return 0;
}
@@ -586,7 +557,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
if (ret)
goto unlock;
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
/* Read the page to verify the write succeeded */
ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -603,7 +574,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
return 0;
unlock:
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
return ret;
}
@@ -641,7 +612,7 @@ out:
if (ret)
csio_err(hw, "erase of flash sector %d failed, error %d\n",
start, ret);
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
return 0;
}
@@ -665,7 +636,7 @@ csio_hw_print_fw_version(struct csio_hw *hw, char *str)
static int
csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
{
- return csio_hw_read_flash(hw, FW_IMG_START +
+ return csio_hw_read_flash(hw, FLASH_FW_START +
offsetof(struct fw_hdr, fw_ver), 1,
vers, 0);
}
@@ -686,43 +657,6 @@ csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
}
/*
- * csio_hw_check_fw_version - check if the FW is compatible with
- * this driver
- * @hw: HW module
- *
- * Checks if an adapter's FW is compatible with the driver. Returns 0
- * if there's exact match, a negative error if the version could not be
- * read or there's a major/minor version mismatch/minor.
- */
-static int
-csio_hw_check_fw_version(struct csio_hw *hw)
-{
- int ret, major, minor, micro;
-
- ret = csio_hw_get_fw_version(hw, &hw->fwrev);
- if (!ret)
- ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
- if (ret)
- return ret;
-
- major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
- minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
- micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
-
- if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
- csio_err(hw, "card FW has major version %u, driver wants %u\n",
- major, FW_VERSION_MAJOR(hw));
- return -EINVAL;
- }
-
- if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
- return 0; /* perfect match */
-
- /* Minor/micro version mismatch */
- return -EINVAL;
-}
-
-/*
* csio_hw_fw_dload - download firmware.
* @hw: HW module
* @fw_data: firmware image to write.
@@ -762,9 +696,9 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
return -EINVAL;
}
- if (size > FW_MAX_SIZE) {
+ if (size > FLASH_FW_MAX_SIZE) {
csio_err(hw, "FW image too large, max is %u bytes\n",
- FW_MAX_SIZE);
+ FLASH_FW_MAX_SIZE);
return -EINVAL;
}
@@ -780,10 +714,10 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
- FW_START_SEC, FW_START_SEC + i - 1);
+ FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1);
- ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
- FW_START_SEC + i - 1);
+ ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC,
+ FLASH_FW_START_SEC + i - 1);
if (ret) {
csio_err(hw, "Flash Erase failed\n");
goto out;
@@ -796,14 +730,14 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
- ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
+ ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
FW_IMG_START, FW_IMG_START + size);
- addr = FW_IMG_START;
+ addr = FLASH_FW_START;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
@@ -813,7 +747,7 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
}
ret = csio_hw_write_flash(hw,
- FW_IMG_START +
+ FLASH_FW_START +
offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver),
(const uint8_t *)&hdr->fw_ver);
@@ -833,7 +767,7 @@ csio_hw_get_flash_params(struct csio_hw *hw)
ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
if (!ret)
ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
if (ret != 0)
return ret;
@@ -861,17 +795,17 @@ csio_hw_dev_ready(struct csio_hw *hw)
uint32_t reg;
int cnt = 6;
- while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
- (--cnt != 0))
+ while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
+ (--cnt != 0))
mdelay(100);
- if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
- (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
+ if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
+ (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
return -EIO;
}
- hw->pfn = SOURCEPF_GET(reg);
+ hw->pfn = SOURCEPF_G(reg);
return 0;
}
@@ -959,8 +893,8 @@ retry:
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
- pcie_fw = csio_rd_reg32(hw, PCIE_FW);
- if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+ pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
+ if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
if (waiting <= 0) {
if (retries-- > 0)
goto retry;
@@ -976,10 +910,10 @@ retry:
* report errors preferentially.
*/
if (state) {
- if (pcie_fw & PCIE_FW_ERR) {
+ if (pcie_fw & PCIE_FW_ERR_F) {
*state = CSIO_DEV_STATE_ERR;
rv = -ETIMEDOUT;
- } else if (pcie_fw & PCIE_FW_INIT)
+ } else if (pcie_fw & PCIE_FW_INIT_F)
*state = CSIO_DEV_STATE_INIT;
}
@@ -988,9 +922,9 @@ retry:
* there's not a valid Master PF, grab its identity
* for our caller.
*/
- if (mpfn == PCIE_FW_MASTER_MASK &&
- (pcie_fw & PCIE_FW_MASTER_VLD))
- mpfn = PCIE_FW_MASTER_GET(pcie_fw);
+ if (mpfn == PCIE_FW_MASTER_M &&
+ (pcie_fw & PCIE_FW_MASTER_VLD_F))
+ mpfn = PCIE_FW_MASTER_G(pcie_fw);
break;
}
hw->flags &= ~CSIO_HWF_MASTER;
@@ -1078,7 +1012,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
if (!fw_rst) {
/* PIO reset */
- csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
mdelay(2000);
return 0;
}
@@ -1090,7 +1024,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
}
csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
- PIORSTMODE | PIORST, 0, NULL);
+ PIORSTMODE_F | PIORST_F, 0, NULL);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "Issue of RESET command failed.n");
@@ -1156,7 +1090,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
* If a legitimate mailbox is provided, issue a RESET command
* with a HALT indication.
*/
- if (mbox <= PCIE_FW_MASTER_MASK) {
+ if (mbox <= PCIE_FW_MASTER_M) {
struct csio_mb *mbp;
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
@@ -1166,7 +1100,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
}
csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
- PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
+ PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
NULL);
if (csio_mb_issue(hw, mbp)) {
@@ -1193,8 +1127,9 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
* rather than a RESET ... if it's new enough to understand that ...
*/
if (retval == 0 || force) {
- csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
- csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+ csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
+ PCIE_FW_HALT_F);
}
/*
@@ -1234,7 +1169,7 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
* doing it automatically, we need to clear the PCIE_FW.HALT
* bit.
*/
- csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
+ csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
/*
* If we've been given a valid mailbox, first try to get the
@@ -1243,21 +1178,21 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
* valid mailbox or the RESET command failed, fall back to
* hitting the chip with a hammer.
*/
- if (mbox <= PCIE_FW_MASTER_MASK) {
- csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ if (mbox <= PCIE_FW_MASTER_M) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
msleep(100);
if (csio_do_reset(hw, true) == 0)
return 0;
}
- csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
msleep(2000);
} else {
int ms;
- csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
- if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
+ if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
return 0;
msleep(100);
ms += 100;
@@ -1315,116 +1250,6 @@ csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
return csio_hw_fw_restart(hw, mbox, reset);
}
-
-/*
- * csio_hw_fw_config_file - setup an adapter via a Configuration File
- * @hw: the HW module
- * @mbox: mailbox to use for the FW command
- * @mtype: the memory type where the Configuration File is located
- * @maddr: the memory address where the Configuration File is located
- * @finiver: return value for CF [fini] version
- * @finicsum: return value for CF [fini] checksum
- * @cfcsum: return value for CF computed checksum
- *
- * Issue a command to get the firmware to process the Configuration
- * File located at the specified mtype/maddress. If the Configuration
- * File is processed successfully and return value pointers are
- * provided, the Configuration File "[fini] section version and
- * checksum values will be returned along with the computed checksum.
- * It's up to the caller to decide how it wants to respond to the
- * checksums not matching but it recommended that a prominant warning
- * be emitted in order to help people rapidly identify changed or
- * corrupted Configuration Files.
- *
- * Also note that it's possible to modify things like "niccaps",
- * "toecaps",etc. between processing the Configuration File and telling
- * the firmware to use the new configuration. Callers which want to
- * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
- * Configuration Files if they want to do this.
- */
-static int
-csio_hw_fw_config_file(struct csio_hw *hw,
- unsigned int mtype, unsigned int maddr,
- uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
-{
- struct csio_mb *mbp;
- struct fw_caps_config_cmd *caps_cmd;
- int rv = -EINVAL;
- enum fw_retval ret;
-
- mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
- if (!mbp) {
- CSIO_INC_STATS(hw, n_err_nomem);
- return -ENOMEM;
- }
- /*
- * Tell the firmware to process the indicated Configuration File.
- * If there are no errors and the caller has provided return value
- * pointers for the [fini] section version, checksum and computed
- * checksum, pass those back to the caller.
- */
- caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
- CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
- caps_cmd->op_to_write =
- htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F);
- caps_cmd->cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
- FW_LEN16(*caps_cmd));
-
- if (csio_mb_issue(hw, mbp)) {
- csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
- goto out;
- }
-
- ret = csio_mb_fw_retval(mbp);
- if (ret != FW_SUCCESS) {
- csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
- goto out;
- }
-
- if (finiver)
- *finiver = ntohl(caps_cmd->finiver);
- if (finicsum)
- *finicsum = ntohl(caps_cmd->finicsum);
- if (cfcsum)
- *cfcsum = ntohl(caps_cmd->cfcsum);
-
- /* Validate device capabilities */
- if (csio_hw_validate_caps(hw, mbp)) {
- rv = -ENOENT;
- goto out;
- }
-
- /*
- * And now tell the firmware to use the configuration we just loaded.
- */
- caps_cmd->op_to_write =
- htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F);
- caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
-
- if (csio_mb_issue(hw, mbp)) {
- csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
- goto out;
- }
-
- ret = csio_mb_fw_retval(mbp);
- if (ret != FW_SUCCESS) {
- csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
- goto out;
- }
-
- rv = 0;
-out:
- mempool_free(mbp, hw->mb_mempool);
- return rv;
-}
-
/*
* csio_get_device_params - Get device parameters.
* @hw: HW module
@@ -1547,7 +1372,8 @@ csio_config_device_caps(struct csio_hw *hw)
}
/* Validate device capabilities */
- if (csio_hw_validate_caps(hw, mbp))
+ rv = csio_hw_validate_caps(hw, mbp);
+ if (rv != 0)
goto out;
/* Don't config device capabilities if already configured */
@@ -1756,9 +1582,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
uint32_t *cfg_data;
int value_to_add = 0;
- if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
+ if (request_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) {
csio_err(hw, "could not find config file %s, err: %d\n",
- CSIO_CF_FNAME(hw), ret);
+ FW_CFG_NAME_T5, ret);
return -ENOENT;
}
@@ -1798,8 +1624,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
}
if (ret == 0) {
csio_info(hw, "config file upgraded to %s\n",
- CSIO_CF_FNAME(hw));
- snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
+ FW_CFG_NAME_T5);
+ snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5);
}
leave:
@@ -1827,11 +1653,13 @@ leave:
static int
csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
{
+ struct csio_mb *mbp = NULL;
+ struct fw_caps_config_cmd *caps_cmd;
unsigned int mtype, maddr;
- int rv;
+ int rv = -EINVAL;
uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
- int using_flash;
char path[64];
+ char *config_name = NULL;
/*
* Reset device if necessary
@@ -1851,51 +1679,107 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
rv = csio_hw_flash_config(hw, fw_cfg_param, path);
spin_lock_irq(&hw->lock);
if (rv != 0) {
- if (rv == -ENOENT) {
- /*
- * config file was not found. Use default
- * config file from flash.
- */
- mtype = FW_MEMTYPE_CF_FLASH;
- maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
- using_flash = 1;
- } else {
- /*
- * we revert back to the hardwired config if
- * flashing failed.
- */
- goto bye;
- }
+ /*
+ * config file was not found. Use default
+ * config file from flash.
+ */
+ config_name = "On FLASH";
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
} else {
+ config_name = path;
mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
- using_flash = 0;
}
- hw->cfg_store = (uint8_t)mtype;
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+ /*
+ * Tell the firmware to process the indicated Configuration File.
+ * If there are no errors and the caller has provided return value
+ * pointers for the [fini] section version, checksum and computed
+ * checksum, pass those back to the caller.
+ */
+ caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
+ caps_cmd->cfvalid_to_len16 =
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
+ FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ rv = -EINVAL;
+ goto bye;
+ }
+
+ rv = csio_mb_fw_retval(mbp);
+ /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+ * Configuration File in FLASH), our last gasp effort is to use the
+ * Firmware Configuration File which is embedded in the
+ * firmware. A very few early versions of the firmware didn't
+ * have one embedded but we can ignore those.
+ */
+ if (rv == ENOENT) {
+ CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+ caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
+ caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ rv = -EINVAL;
+ goto bye;
+ }
+
+ rv = csio_mb_fw_retval(mbp);
+ config_name = "Firmware Default";
+ }
+ if (rv != FW_SUCCESS)
+ goto bye;
+
+ finiver = ntohl(caps_cmd->finiver);
+ finicsum = ntohl(caps_cmd->finicsum);
+ cfcsum = ntohl(caps_cmd->cfcsum);
/*
- * Issue a Capability Configuration command to the firmware to get it
- * to parse the Configuration File.
+ * And now tell the firmware to use the configuration we just loaded.
*/
- rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
- &finicsum, &cfcsum);
- if (rv != 0)
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
+ caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ rv = -EINVAL;
goto bye;
+ }
- hw->cfg_finiver = finiver;
- hw->cfg_finicsum = finicsum;
- hw->cfg_cfcsum = cfcsum;
- hw->cfg_csum_status = true;
+ rv = csio_mb_fw_retval(mbp);
+ if (rv != FW_SUCCESS) {
+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+ goto bye;
+ }
+ mempool_free(mbp, hw->mb_mempool);
if (finicsum != cfcsum) {
csio_warn(hw,
"Config File checksum mismatch: csum=%#x, computed=%#x\n",
finicsum, cfcsum);
-
- hw->cfg_csum_status = false;
}
+ /* Validate device capabilities */
+ rv = csio_hw_validate_caps(hw, mbp);
+ if (rv != 0)
+ goto bye;
/*
* Note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
@@ -1918,56 +1802,184 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
/* Post event to notify completion of configuration */
csio_post_event(&hw->sm, CSIO_HWE_INIT);
- csio_info(hw,
- "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
- (using_flash ? "in device FLASH" : path), finiver, cfcsum);
-
+ csio_info(hw, "Successfully configure using Firmware "
+ "Configuration File %s, version %#x, computed checksum %#x\n",
+ config_name, finiver, cfcsum);
return 0;
/*
* Something bad happened. Return the error ...
*/
bye:
+ if (mbp)
+ mempool_free(mbp, hw->mb_mempool);
hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
- csio_dbg(hw, "Configuration file error %d\n", rv);
+ csio_warn(hw, "Configuration file error %d\n", rv);
return rv;
}
-/*
- * Attempt to initialize the adapter via hard-coded, driver supplied
- * parameters ...
+/* Is the given firmware API compatible with the one the driver was compiled
+ * with?
*/
-static int
-csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
+static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
{
- int rv;
- /*
- * Reset device if necessary
- */
- if (reset) {
- rv = csio_do_reset(hw, true);
- if (rv != 0)
- goto out;
+
+ /* short circuit if it's the exact same firmware version */
+ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
+ return 1;
+
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+ if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+ SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
+ return 1;
+#undef SAME_INTF
+
+ return 0;
+}
+
+/* The firmware in the filesystem is usable, but should it be installed?
+ * This routine explains itself in detail if it indicates the filesystem
+ * firmware should be installed.
+ */
+static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable,
+ int k, int c)
+{
+ const char *reason;
+
+ if (!card_fw_usable) {
+ reason = "incompatible or unusable";
+ goto install;
}
- /* Get and set device capabilities */
- rv = csio_config_device_caps(hw);
- if (rv != 0)
- goto out;
+ if (k > c) {
+ reason = "older than the version supported with this driver";
+ goto install;
+ }
- /* device parameters */
- rv = csio_get_device_params(hw);
- if (rv != 0)
- goto out;
+ return 0;
- /* Configure SGE */
- csio_wr_sge_init(hw);
+install:
+ csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, "
+ "installing firmware %u.%u.%u.%u on card.\n",
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
- /* Post event to notify completion of configuration */
- csio_post_event(&hw->sm, CSIO_HWE_INIT);
+ return 1;
+}
-out:
- return rv;
+static struct fw_info fw_info_array[] = {
+ {
+ .chip = CHELSIO_T5,
+ .fs_name = FW_CFG_NAME_T5,
+ .fw_mod_name = FW_FNAME_T5,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T5,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
+ .intfver_nic = FW_INTFVER(T5, NIC),
+ .intfver_vnic = FW_INTFVER(T5, VNIC),
+ .intfver_ri = FW_INTFVER(T5, RI),
+ .intfver_iscsi = FW_INTFVER(T5, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T5, FCOE),
+ },
+ }
+};
+
+static struct fw_info *find_fw_info(int chip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
+ if (fw_info_array[i].chip == chip)
+ return &fw_info_array[i];
+ }
+ return NULL;
+}
+
+static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
+ const u8 *fw_data, unsigned int fw_size,
+ struct fw_hdr *card_fw, enum csio_dev_state state,
+ int *reset)
+{
+ int ret, card_fw_usable, fs_fw_usable;
+ const struct fw_hdr *fs_fw;
+ const struct fw_hdr *drv_fw;
+
+ drv_fw = &fw_info->fw_hdr;
+
+ /* Read the header of the firmware on the card */
+ ret = csio_hw_read_flash(hw, FLASH_FW_START,
+ sizeof(*card_fw) / sizeof(uint32_t),
+ (uint32_t *)card_fw, 1);
+ if (ret == 0) {
+ card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
+ } else {
+ csio_err(hw,
+ "Unable to read card's firmware header: %d\n", ret);
+ card_fw_usable = 0;
+ }
+
+ if (fw_data != NULL) {
+ fs_fw = (const void *)fw_data;
+ fs_fw_usable = fw_compatible(drv_fw, fs_fw);
+ } else {
+ fs_fw = NULL;
+ fs_fw_usable = 0;
+ }
+
+ if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+ (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
+ /* Common case: the firmware on the card is an exact match and
+ * the filesystem one is an exact match too, or the filesystem
+ * one is absent/incompatible.
+ */
+ } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT &&
+ csio_should_install_fs_fw(hw, card_fw_usable,
+ be32_to_cpu(fs_fw->fw_ver),
+ be32_to_cpu(card_fw->fw_ver))) {
+ ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
+ fw_size, 0);
+ if (ret != 0) {
+ csio_err(hw,
+ "failed to install firmware: %d\n", ret);
+ goto bye;
+ }
+
+ /* Installed successfully, update the cached header too. */
+ memcpy(card_fw, fs_fw, sizeof(*card_fw));
+ card_fw_usable = 1;
+ *reset = 0; /* already reset as part of load_fw */
+ }
+
+ if (!card_fw_usable) {
+ uint32_t d, c, k;
+
+ d = be32_to_cpu(drv_fw->fw_ver);
+ c = be32_to_cpu(card_fw->fw_ver);
+ k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
+
+ csio_err(hw, "Cannot find a usable firmware: "
+ "chip state %d, "
+ "driver compiled with %d.%d.%d.%d, "
+ "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
+ state,
+ FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
+ FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+ ret = EINVAL;
+ goto bye;
+ }
+
+ /* We're using whatever's on the card and it's known to be good. */
+ hw->fwrev = be32_to_cpu(card_fw->fw_ver);
+ hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
+
+bye:
+ return ret;
}
/*
@@ -1977,48 +1989,52 @@ out:
* latest firmware ECANCELED is returned
*/
static int
-csio_hw_flash_fw(struct csio_hw *hw)
+csio_hw_flash_fw(struct csio_hw *hw, int *reset)
{
int ret = -ECANCELED;
const struct firmware *fw;
- const struct fw_hdr *hdr;
- u32 fw_ver;
+ struct fw_info *fw_info;
+ struct fw_hdr *card_fw;
struct pci_dev *pci_dev = hw->pdev;
struct device *dev = &pci_dev->dev ;
+ const u8 *fw_data = NULL;
+ unsigned int fw_size = 0;
- if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
- csio_err(hw, "could not find firmware image %s, err: %d\n",
- CSIO_FW_FNAME(hw), ret);
+ /* This is the firmware whose headers the driver was compiled
+ * against
+ */
+ fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id));
+ if (fw_info == NULL) {
+ csio_err(hw,
+ "unable to get firmware info for chip %d.\n",
+ CHELSIO_CHIP_VERSION(hw->chip_id));
return -EINVAL;
}
- hdr = (const struct fw_hdr *)fw->data;
- fw_ver = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
- return -EINVAL; /* wrong major version, won't do */
+ if (request_firmware(&fw, FW_FNAME_T5, dev) < 0) {
+ csio_err(hw, "could not find firmware image %s, err: %d\n",
+ FW_FNAME_T5, ret);
+ } else {
+ fw_data = fw->data;
+ fw_size = fw->size;
+ }
- /*
- * If the flash FW is unusable or we found something newer, load it.
+ /* allocate memory to read the header of the firmware on the
+ * card
*/
- if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
- fw_ver > hw->fwrev) {
- ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
- /*force=*/false);
- if (!ret)
- csio_info(hw,
- "firmware upgraded to version %pI4 from %s\n",
- &hdr->fw_ver, CSIO_FW_FNAME(hw));
- else
- csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
- } else
- ret = -EINVAL;
+ card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
- release_firmware(fw);
+ /* upgrade FW logic */
+ ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
+ hw->fw_state, reset);
+ /* Cleaning up */
+ if (fw != NULL)
+ release_firmware(fw);
+ kfree(card_fw);
return ret;
}
-
/*
* csio_hw_configure - Configure HW
* @hw - HW module
@@ -2039,7 +2055,7 @@ csio_hw_configure(struct csio_hw *hw)
}
/* HW version */
- hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
+ hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
/* Needed for FW download */
rv = csio_hw_get_flash_params(hw);
@@ -2074,50 +2090,43 @@ csio_hw_configure(struct csio_hw *hw)
if (rv != 0)
goto out;
+ csio_hw_get_fw_version(hw, &hw->fwrev);
+ csio_hw_get_tp_version(hw, &hw->tp_vers);
if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
- rv = csio_hw_check_fw_version(hw);
- if (rv == -EINVAL) {
/* Do firmware update */
- spin_unlock_irq(&hw->lock);
- rv = csio_hw_flash_fw(hw);
- spin_lock_irq(&hw->lock);
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_fw(hw, &reset);
+ spin_lock_irq(&hw->lock);
- if (rv == 0) {
- reset = 0;
- /*
- * Note that the chip was reset as part of the
- * firmware upgrade so we don't reset it again
- * below and grab the new firmware version.
- */
- rv = csio_hw_check_fw_version(hw);
- }
- }
- /*
- * If the firmware doesn't support Configuration
- * Files, use the old Driver-based, hard-wired
- * initialization. Otherwise, try using the
- * Configuration File support and fall back to the
- * Driver-based initialization if there's no
- * Configuration File found.
+ if (rv != 0)
+ goto out;
+
+ /* If the firmware doesn't support Configuration Files,
+ * return an error.
*/
- if (csio_hw_check_fwconfig(hw, param) == 0) {
- rv = csio_hw_use_fwconfig(hw, reset, param);
- if (rv == -ENOENT)
- goto out;
- if (rv != 0) {
- csio_info(hw,
- "No Configuration File present "
- "on adapter. Using hard-wired "
- "configuration parameters.\n");
- rv = csio_hw_no_fwconfig(hw, reset);
- }
- } else {
- rv = csio_hw_no_fwconfig(hw, reset);
+ rv = csio_hw_check_fwconfig(hw, param);
+ if (rv != 0) {
+ csio_info(hw, "Firmware doesn't support "
+ "Firmware Configuration files\n");
+ goto out;
}
- if (rv != 0)
+ /* The firmware provides us with a memory buffer where we can
+ * load a Configuration File from the host if we want to
+ * override the Configuration File in flash.
+ */
+ rv = csio_hw_use_fwconfig(hw, reset, param);
+ if (rv == -ENOENT) {
+ csio_info(hw, "Could not initialize "
+ "adapter, error%d\n", rv);
+ goto out;
+ }
+ if (rv != 0) {
+ csio_info(hw, "Could not initialize "
+ "adapter, error%d\n", rv);
goto out;
+ }
} else {
if (hw->fw_state == CSIO_DEV_STATE_INIT) {
@@ -2217,7 +2226,7 @@ out:
return;
}
-#define PF_INTR_MASK (PFSW | PFCIM)
+#define PF_INTR_MASK (PFSW_F | PFCIM_F)
/*
* csio_hw_intr_enable - Enable HW interrupts
@@ -2229,21 +2238,21 @@ static void
csio_hw_intr_enable(struct csio_hw *hw)
{
uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
- uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
- uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
+ uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
/*
* Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
* by FW, so do nothing for INTX.
*/
if (hw->intr_mode == CSIO_IM_MSIX)
- csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
- AIVEC(AIVEC_MASK), vec);
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+ AIVEC_V(AIVEC_M), vec);
else if (hw->intr_mode == CSIO_IM_MSI)
- csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
- AIVEC(AIVEC_MASK), 0);
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+ AIVEC_V(AIVEC_M), 0);
- csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
+ csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
/* Turn on MB interrupts - this will internally flush PIO as well */
csio_mb_intr_enable(hw);
@@ -2253,19 +2262,19 @@ csio_hw_intr_enable(struct csio_hw *hw)
/*
* Disable the Serial FLASH interrupt, if enabled!
*/
- pl &= (~SF);
- csio_wr_reg32(hw, pl, PL_INT_ENABLE);
+ pl &= (~SF_F);
+ csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
- csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
- EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
- ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
- ERR_DATA_CPL_ON_HIGH_QID1 |
- ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
- ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
- ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
- ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
- SGE_INT_ENABLE3);
- csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
+ csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
+ EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
+ ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
+ ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+ ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+ ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+ ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
+ SGE_INT_ENABLE3_A);
+ csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
}
hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
@@ -2281,16 +2290,16 @@ csio_hw_intr_enable(struct csio_hw *hw)
void
csio_hw_intr_disable(struct csio_hw *hw)
{
- uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+ uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
return;
hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
- csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
+ csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
if (csio_is_hw_master(hw))
- csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
+ csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
/* Turn off MB interrupts */
csio_mb_intr_disable(hw);
@@ -2300,7 +2309,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
void
csio_hw_fatal_err(struct csio_hw *hw)
{
- csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
+ csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
csio_hw_intr_disable(hw);
/* Do not reset HW, we may need FW state for debugging */
@@ -2594,7 +2603,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
* register directly.
*/
csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
- csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
mdelay(2000);
break;
@@ -2682,11 +2691,11 @@ static void csio_tp_intr_handler(struct csio_hw *hw)
{
static struct intr_info tp_intr_info[] = {
{ 0x3fffffff, "TP parity error", -1, 1 },
- { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
+ if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2698,52 +2707,52 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
uint64_t v;
static struct intr_info sge_intr_info[] = {
- { ERR_CPL_EXCEED_IQE_SIZE,
+ { ERR_CPL_EXCEED_IQE_SIZE_F,
"SGE received CPL exceeding IQE size", -1, 1 },
- { ERR_INVALID_CIDX_INC,
+ { ERR_INVALID_CIDX_INC_F,
"SGE GTS CIDX increment too large", -1, 0 },
- { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
- { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
- { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+ { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
+ { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
- { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
0 },
- { ERR_ING_CTXT_PRIO,
+ { ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
- { ERR_EGR_CTXT_PRIO,
+ { ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 },
- { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
- { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0, NULL, 0, 0 }
};
- v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
- ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
+ v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
+ ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
if (v) {
csio_fatal(hw, "SGE parity error (%#llx)\n",
(unsigned long long)v);
csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
- SGE_INT_CAUSE1);
- csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
+ SGE_INT_CAUSE1_A);
+ csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
}
- v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
+ v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
- if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
+ if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
v != 0)
csio_hw_fatal_err(hw);
}
-#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
- OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
-#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
- IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+ OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+ IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
/*
* CIM interrupt handler.
@@ -2751,53 +2760,53 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
static void csio_cim_intr_handler(struct csio_hw *hw)
{
static struct intr_info cim_intr_info[] = {
- { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+ { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
- { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
- { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
- { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
- { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info cim_upintr_info[] = {
- { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
- { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
- { ILLWRINT, "CIM illegal write", -1, 1 },
- { ILLRDINT, "CIM illegal read", -1, 1 },
- { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
- { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
- { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
- { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
- { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
- { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
- { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
- { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
- { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
- { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
- { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
- { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
- { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
- { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
- { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
- { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
- { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
- { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
- { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
- { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
- { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
- { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
- { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
- { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT_F, "CIM illegal write", -1, 1 },
+ { ILLRDINT_F, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
{ 0, NULL, 0, 0 }
};
int fat;
- fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
- cim_intr_info) +
- csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
- cim_upintr_info);
+ fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
+ cim_intr_info) +
+ csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
+ cim_upintr_info);
if (fat)
csio_hw_fatal_err(hw);
}
@@ -2813,7 +2822,7 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2823,19 +2832,19 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
static void csio_ulptx_intr_handler(struct csio_hw *hw)
{
static struct intr_info ulptx_intr_info[] = {
- { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
0 },
{ 0xfffffff, "ULPTX parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2845,20 +2854,20 @@ static void csio_ulptx_intr_handler(struct csio_hw *hw)
static void csio_pmtx_intr_handler(struct csio_hw *hw)
{
static struct intr_info pmtx_intr_info[] = {
- { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
- { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
- { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
- { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
{ 0xffffff0, "PMTX framing error", -1, 1 },
- { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
- { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+ { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
1 },
- { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
- { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+ { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
+ if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2868,17 +2877,17 @@ static void csio_pmtx_intr_handler(struct csio_hw *hw)
static void csio_pmrx_intr_handler(struct csio_hw *hw)
{
static struct intr_info pmrx_intr_info[] = {
- { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
{ 0x3ffff0, "PMRX framing error", -1, 1 },
- { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
- { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+ { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
1 },
- { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
- { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+ { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
+ if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2888,16 +2897,16 @@ static void csio_pmrx_intr_handler(struct csio_hw *hw)
static void csio_cplsw_intr_handler(struct csio_hw *hw)
{
static struct intr_info cplsw_intr_info[] = {
- { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
- { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
- { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
- { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
- { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
- { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
+ if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2907,15 +2916,15 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw)
static void csio_le_intr_handler(struct csio_hw *hw)
{
static struct intr_info le_intr_info[] = {
- { LIPMISS, "LE LIP miss", -1, 0 },
- { LIP0, "LE 0 LIP error", -1, 0 },
- { PARITYERR, "LE parity error", -1, 1 },
- { UNKNOWNCMD, "LE unknown command", -1, 1 },
- { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { LIPMISS_F, "LE LIP miss", -1, 0 },
+ { LIP0_F, "LE 0 LIP error", -1, 0 },
+ { PARITYERR_F, "LE parity error", -1, 1 },
+ { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { REQQPARERR_F, "LE request queue parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2929,19 +2938,22 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
{ 0, NULL, 0, 0 }
};
static struct intr_info mps_tx_intr_info[] = {
- { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
- { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
- { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
- { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
- { BUBBLE, "MPS Tx underflow", -1, 1 },
- { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
- { FRMERR, "MPS Tx framing error", -1, 1 },
+ { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+ -1, 1 },
+ { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+ -1, 1 },
+ { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+ { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR_F, "MPS Tx framing error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info mps_trc_intr_info[] = {
- { FILTMEM, "MPS TRC filter parity error", -1, 1 },
- { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
- { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+ -1, 1 },
+ { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info mps_stat_sram_intr_info[] = {
@@ -2957,36 +2969,37 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
{ 0, NULL, 0, 0 }
};
static struct intr_info mps_cls_intr_info[] = {
- { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
- { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
- { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
int fat;
- fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
- mps_rx_intr_info) +
- csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
- mps_tx_intr_info) +
- csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
- mps_trc_intr_info) +
- csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
- mps_stat_sram_intr_info) +
- csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
- mps_stat_tx_intr_info) +
- csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
- mps_stat_rx_intr_info) +
- csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
- mps_cls_intr_info);
-
- csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
- csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */
+ fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
+ mps_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
+ mps_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
+ mps_trc_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
+ mps_stat_sram_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
+ mps_stat_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
+ mps_stat_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
+ mps_cls_intr_info);
+
+ csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
+ csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */
if (fat)
csio_hw_fatal_err(hw);
}
-#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+ ECC_UE_INT_CAUSE_F)
/*
* EDC/MC interrupt handler.
@@ -2998,28 +3011,28 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
unsigned int addr, cnt_addr, v;
if (idx <= MEM_EDC1) {
- addr = EDC_REG(EDC_INT_CAUSE, idx);
- cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
} else {
- addr = MC_INT_CAUSE;
- cnt_addr = MC_ECC_STATUS;
+ addr = MC_INT_CAUSE_A;
+ cnt_addr = MC_ECC_STATUS_A;
}
v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
- if (v & PERR_INT_CAUSE)
+ if (v & PERR_INT_CAUSE_F)
csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
- if (v & ECC_CE_INT_CAUSE) {
- uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
+ if (v & ECC_CE_INT_CAUSE_F) {
+ uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
- csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
+ csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
csio_warn(hw, "%u %s correctable ECC data error%s\n",
cnt, name[idx], cnt > 1 ? "s" : "");
}
- if (v & ECC_UE_INT_CAUSE)
+ if (v & ECC_UE_INT_CAUSE_F)
csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
csio_wr_reg32(hw, v, addr);
- if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
csio_hw_fatal_err(hw);
}
@@ -3028,18 +3041,18 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
*/
static void csio_ma_intr_handler(struct csio_hw *hw)
{
- uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
+ uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
- if (status & MEM_PERR_INT_CAUSE)
+ if (status & MEM_PERR_INT_CAUSE_F)
csio_fatal(hw, "MA parity error, parity status %#x\n",
- csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
- if (status & MEM_WRAP_INT_CAUSE) {
- v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
+ csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
+ if (status & MEM_WRAP_INT_CAUSE_F) {
+ v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
csio_fatal(hw,
"MA address wrap-around error by client %u to address %#x\n",
- MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
+ MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
}
- csio_wr_reg32(hw, status, MA_INT_CAUSE);
+ csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
csio_hw_fatal_err(hw);
}
@@ -3049,13 +3062,13 @@ static void csio_ma_intr_handler(struct csio_hw *hw)
static void csio_smb_intr_handler(struct csio_hw *hw)
{
static struct intr_info smb_intr_info[] = {
- { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
- { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
- { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
+ if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
csio_hw_fatal_err(hw);
}
@@ -3065,14 +3078,14 @@ static void csio_smb_intr_handler(struct csio_hw *hw)
static void csio_ncsi_intr_handler(struct csio_hw *hw)
{
static struct intr_info ncsi_intr_info[] = {
- { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
- { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
- { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
- { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
+ if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
csio_hw_fatal_err(hw);
}
@@ -3081,17 +3094,17 @@ static void csio_ncsi_intr_handler(struct csio_hw *hw)
*/
static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
{
- uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
+ uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
- v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
if (!v)
return;
- if (v & TXFIFO_PRTY_ERR)
+ if (v & TXFIFO_PRTY_ERR_F)
csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
- if (v & RXFIFO_PRTY_ERR)
+ if (v & RXFIFO_PRTY_ERR_F)
csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
- csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
+ csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
csio_hw_fatal_err(hw);
}
@@ -3101,12 +3114,12 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
static void csio_pl_intr_handler(struct csio_hw *hw)
{
static struct intr_info pl_intr_info[] = {
- { FATALPERR, "T4 fatal parity error", -1, 1 },
- { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+ { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
+ if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
csio_hw_fatal_err(hw);
}
@@ -3121,7 +3134,7 @@ static void csio_pl_intr_handler(struct csio_hw *hw)
int
csio_hw_slow_intr_handler(struct csio_hw *hw)
{
- uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
+ uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
if (!(cause & CSIO_GLBL_INTR_MASK)) {
CSIO_INC_STATS(hw, n_plint_unexp);
@@ -3132,75 +3145,75 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
CSIO_INC_STATS(hw, n_plint_cnt);
- if (cause & CIM)
+ if (cause & CIM_F)
csio_cim_intr_handler(hw);
- if (cause & MPS)
+ if (cause & MPS_F)
csio_mps_intr_handler(hw);
- if (cause & NCSI)
+ if (cause & NCSI_F)
csio_ncsi_intr_handler(hw);
- if (cause & PL)
+ if (cause & PL_F)
csio_pl_intr_handler(hw);
- if (cause & SMB)
+ if (cause & SMB_F)
csio_smb_intr_handler(hw);
- if (cause & XGMAC0)
+ if (cause & XGMAC0_F)
csio_xgmac_intr_handler(hw, 0);
- if (cause & XGMAC1)
+ if (cause & XGMAC1_F)
csio_xgmac_intr_handler(hw, 1);
- if (cause & XGMAC_KR0)
+ if (cause & XGMAC_KR0_F)
csio_xgmac_intr_handler(hw, 2);
- if (cause & XGMAC_KR1)
+ if (cause & XGMAC_KR1_F)
csio_xgmac_intr_handler(hw, 3);
- if (cause & PCIE)
+ if (cause & PCIE_F)
hw->chip_ops->chip_pcie_intr_handler(hw);
- if (cause & MC)
+ if (cause & MC_F)
csio_mem_intr_handler(hw, MEM_MC);
- if (cause & EDC0)
+ if (cause & EDC0_F)
csio_mem_intr_handler(hw, MEM_EDC0);
- if (cause & EDC1)
+ if (cause & EDC1_F)
csio_mem_intr_handler(hw, MEM_EDC1);
- if (cause & LE)
+ if (cause & LE_F)
csio_le_intr_handler(hw);
- if (cause & TP)
+ if (cause & TP_F)
csio_tp_intr_handler(hw);
- if (cause & MA)
+ if (cause & MA_F)
csio_ma_intr_handler(hw);
- if (cause & PM_TX)
+ if (cause & PM_TX_F)
csio_pmtx_intr_handler(hw);
- if (cause & PM_RX)
+ if (cause & PM_RX_F)
csio_pmrx_intr_handler(hw);
- if (cause & ULP_RX)
+ if (cause & ULP_RX_F)
csio_ulprx_intr_handler(hw);
- if (cause & CPL_SWITCH)
+ if (cause & CPL_SWITCH_F)
csio_cplsw_intr_handler(hw);
- if (cause & SGE)
+ if (cause & SGE_F)
csio_sge_intr_handler(hw);
- if (cause & ULP_TX)
+ if (cause & ULP_TX_F)
csio_ulptx_intr_handler(hw);
/* Clear the interrupts just processed for which we are the master. */
- csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
- csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
+ csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
+ csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
return 1;
}
@@ -3840,13 +3853,7 @@ csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
- if (prot_type == CSIO_T4_FCOE_ASIC) {
- memcpy(hw->hw_ver,
- csio_t4_fcoe_adapters[adap_type].model_no, 16);
- memcpy(hw->model_desc,
- csio_t4_fcoe_adapters[adap_type].description,
- 32);
- } else if (prot_type == CSIO_T5_FCOE_ASIC) {
+ if (prot_type == CSIO_T5_FCOE_ASIC) {
memcpy(hw->hw_ver,
csio_t5_fcoe_adapters[adap_type].model_no, 16);
memcpy(hw->model_desc,
@@ -3883,8 +3890,8 @@ csio_hw_init(struct csio_hw *hw)
strcpy(hw->name, CSIO_HW_NAME);
- /* Initialize the HW chip ops with T4/T5 specific ops */
- hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
+ /* Initialize the HW chip ops T5 specific ops */
+ hw->chip_ops = &t5_ops;
/* Set the model & its description */
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 68248da1b9af..029bef82c057 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -48,6 +48,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
+#include "t4_hw.h"
#include "csio_hw_chip.h"
#include "csio_wr.h"
#include "csio_mb.h"
@@ -117,10 +118,10 @@ extern int csio_msi;
#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
-#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
- EDC1 | LE | TP | MA | PM_TX | PM_RX | \
- ULP_RX | CPL_SWITCH | SGE | \
- ULP_TX | SF)
+#define CSIO_GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \
+ EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \
+ PM_TX_F | PM_RX_F | ULP_RX_F | \
+ CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
/*
* Hard parameters used to initialize the card in the absence of a
@@ -174,16 +175,12 @@ struct csio_evt_msg {
};
enum {
- EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
SERNUM_LEN = 16, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
- TRACE_LEN = 112, /* length of trace data and mask */
};
enum {
- SF_PAGE_SIZE = 256, /* serial flash page size */
- SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
};
@@ -199,39 +196,8 @@ enum {
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
-
- FW_START_SEC = 8, /* first flash sector for FW */
- FW_END_SEC = 15, /* last flash sector for FW */
- FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
- FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
-
- FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
- FLASH_CFG_OFFSET = 0x1f0000,
- FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
};
-/*
- * Flash layout.
- */
-#define FLASH_START(start) ((start) * SF_SEC_SIZE)
-#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
-
-enum {
- /*
- * Location of firmware image in FLASH.
- */
- FLASH_FW_START_SEC = 8,
- FLASH_FW_NSECS = 8,
- FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
- FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
-
- /* Location of Firmware Configuration File in FLASH. */
- FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
-};
-
-#undef FLASH_START
-#undef FLASH_MAX_SIZE
-
/* Management module */
enum {
CSIO_MGMT_EQ_WRSIZE = 512,
@@ -482,11 +448,6 @@ struct csio_hw {
uint32_t tp_vers;
char chip_ver;
uint16_t chip_id; /* Tells T4/T5 chip */
- uint32_t cfg_finiver;
- uint32_t cfg_finicsum;
- uint32_t cfg_cfcsum;
- uint8_t cfg_csum_status;
- uint8_t cfg_store;
enum csio_dev_state fw_state;
struct csio_vpd vpd;
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
index 4752fed476df..b56a11d817be 100644
--- a/drivers/scsi/csiostor/csio_hw_chip.h
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -37,24 +37,27 @@
#include "csio_defs.h"
/* Define MACRO values */
-#define CSIO_HW_T4 0x4000
-#define CSIO_T4_FCOE_ASIC 0x4600
#define CSIO_HW_T5 0x5000
#define CSIO_T5_FCOE_ASIC 0x5600
#define CSIO_HW_CHIP_MASK 0xF000
-#define T4_REGMAP_SIZE (160 * 1024)
#define T5_REGMAP_SIZE (332 * 1024)
-#define FW_FNAME_T4 "cxgb4/t4fw.bin"
#define FW_FNAME_T5 "cxgb4/t5fw.bin"
-#define FW_CFG_NAME_T4 "cxgb4/t4-config.txt"
#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
-/* Define static functions */
-static inline int csio_is_t4(uint16_t chip)
-{
- return (chip == CSIO_HW_T4);
-}
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_FPGA 0x100
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
+ T5_LAST_REV = T5_A1,
+};
static inline int csio_is_t5(uint16_t chip)
{
@@ -65,30 +68,22 @@ static inline int csio_is_t5(uint16_t chip)
#define CSIO_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
-#define CSIO_HW_PIDX(hw, index) \
- (csio_is_t4(hw->chip_id) ? (PIDX(index)) : \
- (PIDX_T5(index) | DBTYPE(1U)))
-
-#define CSIO_HW_LP_INT_THRESH(hw, val) \
- (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \
- (V_LP_INT_THRESH_T5(val)))
-
-#define CSIO_HW_M_LP_INT_THRESH(hw) \
- (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
-
-#define CSIO_MAC_INT_CAUSE_REG(hw, port) \
- (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
- (T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
-
-#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
-#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
-#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0)
-
-#define CSIO_FW_FNAME(hw) \
- (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5)
-
-#define CSIO_CF_FNAME(hw) \
- (csio_is_t4(hw->chip_id) ? FW_CFG_NAME_T4 : FW_CFG_NAME_T5)
+#include "t4fw_api.h"
+#include "t4fw_version.h"
+
+#define FW_VERSION(chip) ( \
+ FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
+ FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
+ FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
+ FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+ u8 chip;
+ char *fs_name;
+ char *fw_mod_name;
+ struct fw_hdr fw_hdr;
+};
/* Declare ENUMS */
enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
@@ -96,7 +91,6 @@ enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
enum {
MEMWIN_APERTURE = 2048,
MEMWIN_BASE = 0x1b800,
- MEMWIN_CSIOSTOR = 6, /* PCI-e Memory Window access */
};
/* Slow path handlers */
@@ -122,7 +116,6 @@ struct csio_hw_chip_ops {
void (*chip_dfs_create_ext_mem)(struct csio_hw *);
};
-extern struct csio_hw_chip_ops t4_ops;
extern struct csio_hw_chip_ops t5_ops;
#endif /* #ifndef __CSIO_HW_CHIP_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
deleted file mode 100644
index 95d831857640..000000000000
--- a/drivers/scsi/csiostor/csio_hw_t4.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is part of the Chelsio FCoE driver for Linux.
- *
- * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "csio_hw.h"
-#include "csio_init.h"
-
-/*
- * Return the specified PCI-E Configuration Space register from our Physical
- * Function. We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static uint32_t
-csio_t4_read_pcie_cfg4(struct csio_hw *hw, int reg)
-{
- u32 val = 0;
- struct csio_mb *mbp;
- int rv;
- struct fw_ldst_cmd *ldst_cmd;
-
- mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
- if (!mbp) {
- CSIO_INC_STATS(hw, n_err_nomem);
- pci_read_config_dword(hw->pdev, reg, &val);
- return val;
- }
-
- csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
- rv = csio_mb_issue(hw, mbp);
-
- /*
- * If the LDST Command suucceeded, exctract the returned register
- * value. Otherwise read it directly ourself.
- */
- if (rv == 0) {
- ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
- val = ntohl(ldst_cmd->u.pcie.data[0]);
- } else
- pci_read_config_dword(hw->pdev, reg, &val);
-
- mempool_free(mbp, hw->mb_mempool);
-
- return val;
-}
-
-static int
-csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
-{
- u32 bar0;
- u32 mem_win_base;
-
- /*
- * Truncation intentional: we only read the bottom 32-bits of the
- * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
- * read BAR0 instead of using pci_resource_start() because we could be
- * operating from within a Virtual Machine which is trapping our
- * accesses to our Configuration Space and we need to set up the PCI-E
- * Memory Window decoders with the actual addresses which will be
- * coming across the PCI-E link.
- */
- bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
- bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
-
- mem_win_base = bar0 + MEMWIN_BASE;
-
- /*
- * Set up memory window for accessing adapter memory ranges. (Read
- * back MA register to ensure that changes propagate before we attempt
- * to use the new values.)
- */
- csio_wr_reg32(hw, mem_win_base | BIR(0) |
- WINDOW(ilog2(MEMWIN_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
- csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
- return 0;
-}
-
-/*
- * Interrupt handler for the PCIE module.
- */
-static void
-csio_t4_pcie_intr_handler(struct csio_hw *hw)
-{
- static struct intr_info sysbus_intr_info[] = {
- { RNPP, "RXNP array parity error", -1, 1 },
- { RPCP, "RXPC array parity error", -1, 1 },
- { RCIP, "RXCIF array parity error", -1, 1 },
- { RCCP, "Rx completions control array parity error", -1, 1 },
- { RFTP, "RXFT array parity error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
- static struct intr_info pcie_port_intr_info[] = {
- { TPCP, "TXPC array parity error", -1, 1 },
- { TNPP, "TXNP array parity error", -1, 1 },
- { TFTP, "TXFT array parity error", -1, 1 },
- { TCAP, "TXCA array parity error", -1, 1 },
- { TCIP, "TXCIF array parity error", -1, 1 },
- { RCAP, "RXCA array parity error", -1, 1 },
- { OTDD, "outbound request TLP discarded", -1, 1 },
- { RDPE, "Rx data parity error", -1, 1 },
- { TDUE, "Tx uncorrectable data error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
-
- static struct intr_info pcie_intr_info[] = {
- { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
- { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
- { MSIDATAPERR, "MSI data parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
- { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
- { MATAGPERR, "PCI MA tag parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
- { RXWRPERR, "PCI Rx write parity error", -1, 1 },
- { RPLPERR, "PCI replay buffer parity error", -1, 1 },
- { PCIESINT, "PCI core secondary fault", -1, 1 },
- { PCIEPINT, "PCI core primary fault", -1, 1 },
- { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
- 0 },
- { 0, NULL, 0, 0 }
- };
-
- int fat;
- fat = csio_handle_intr_status(hw,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- sysbus_intr_info) +
- csio_handle_intr_status(hw,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- pcie_port_intr_info) +
- csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
- if (fat)
- csio_hw_fatal_err(hw);
-}
-
-/*
- * csio_t4_flash_cfg_addr - return the address of the flash configuration file
- * @hw: the HW module
- *
- * Return the address within the flash where the Firmware Configuration
- * File is stored.
- */
-static unsigned int
-csio_t4_flash_cfg_addr(struct csio_hw *hw)
-{
- return FLASH_CFG_OFFSET;
-}
-
-/*
- * csio_t4_mc_read - read from MC through backdoor accesses
- * @hw: the hw module
- * @idx: not used for T4 adapter
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from MC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-static int
-csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
- uint64_t *ecc)
-{
- int i;
-
- if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
- return -EBUSY;
- csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
- csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
- csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
- csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
- MC_BIST_CMD);
- i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
- 0, 10, 1, NULL);
- if (i)
- return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
- if (ecc)
- *ecc = csio_rd_reg64(hw, MC_DATA(16));
-#undef MC_DATA
- return 0;
-}
-
-/*
- * csio_t4_edc_read - read from EDC through backdoor accesses
- * @hw: the hw module
- * @idx: which EDC to access
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-static int
-csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
- uint64_t *ecc)
-{
- int i;
-
- idx *= EDC_STRIDE;
- if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
- return -EBUSY;
- csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
- csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
- csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
- csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
- EDC_BIST_CMD + idx);
- i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
- 0, 10, 1, NULL);
- if (i)
- return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
- if (ecc)
- *ecc = csio_rd_reg64(hw, EDC_DATA(16));
-#undef EDC_DATA
- return 0;
-}
-
-/*
- * csio_t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
- * @hw: the csio_hw
- * @win: PCI-E memory Window to use
- * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
- * @addr: address within indicated memory type
- * @len: amount of memory to transfer
- * @buf: host memory buffer
- * @dir: direction of transfer 1 => read, 0 => write
- *
- * Reads/writes an [almost] arbitrary memory region in the firmware: the
- * firmware memory address, length and host buffer must be aligned on
- * 32-bit boudaries. The memory is transferred as a raw byte sequence
- * from/to the firmware's memory. If this memory contains data
- * structures which contain multi-byte integers, it's the callers
- * responsibility to perform appropriate byte order conversions.
- */
-static int
-csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
- u32 len, uint32_t *buf, int dir)
-{
- u32 pos, start, offset, memoffset, bar0;
- u32 edc_size, mc_size, mem_reg, mem_aperture, mem_base;
-
- /*
- * Argument sanity checks ...
- */
- if ((addr & 0x3) || (len & 0x3))
- return -EINVAL;
-
- /* Offset into the region of memory which is being accessed
- * MEM_EDC0 = 0
- * MEM_EDC1 = 1
- * MEM_MC = 2 -- T4
- */
- edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A));
- if (mtype != MEM_MC1)
- memoffset = (mtype * (edc_size * 1024 * 1024));
- else {
- mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw,
- MA_EXT_MEMORY_BAR_A));
- memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
- }
-
- /* Determine the PCIE_MEM_ACCESS_OFFSET */
- addr = addr + memoffset;
-
- /*
- * Each PCI-E Memory Window is programmed with a window size -- or
- * "aperture" -- which controls the granularity of its mapping onto
- * adapter memory. We need to grab that aperture in order to know
- * how to use the specified window. The window is also programmed
- * with the base address of the Memory Window in BAR0's address
- * space. For T4 this is an absolute PCI-E Bus Address. For T5
- * the address is relative to BAR0.
- */
- mem_reg = csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
- mem_aperture = 1 << (WINDOW(mem_reg) + 10);
- mem_base = GET_PCIEOFST(mem_reg) << 10;
-
- bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
- bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
- mem_base -= bar0;
-
- start = addr & ~(mem_aperture-1);
- offset = addr - start;
-
- csio_dbg(hw, "csio_t4_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
- mem_reg, mem_aperture);
- csio_dbg(hw, "csio_t4_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
- mem_base, memoffset);
- csio_dbg(hw, "csio_t4_memory_rw: bar0: 0x%x, start:0x%x, offset:0x%x\n",
- bar0, start, offset);
- csio_dbg(hw, "csio_t4_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
- mtype, addr, len);
-
- for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
- /*
- * Move PCI-E Memory Window to our current transfer
- * position. Read it back to ensure that changes propagate
- * before we attempt to use the new value.
- */
- csio_wr_reg32(hw, pos,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
- csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
-
- while (offset < mem_aperture && len > 0) {
- if (dir)
- *buf++ = csio_rd_reg32(hw, mem_base + offset);
- else
- csio_wr_reg32(hw, *buf++, mem_base + offset);
-
- offset += sizeof(__be32);
- len -= sizeof(__be32);
- }
- }
- return 0;
-}
-
-/*
- * csio_t4_dfs_create_ext_mem - setup debugfs for MC to read the values
- * @hw: the csio_hw
- *
- * This function creates files in the debugfs with external memory region MC.
- */
-static void
-csio_t4_dfs_create_ext_mem(struct csio_hw *hw)
-{
- u32 size;
- int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
-
- if (i & EXT_MEM_ENABLE_F) {
- size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A);
- csio_add_debugfs_mem(hw, "mc", MEM_MC,
- EXT_MEM_SIZE_G(size));
- }
-}
-
-/* T4 adapter specific function */
-struct csio_hw_chip_ops t4_ops = {
- .chip_set_mem_win = csio_t4_set_mem_win,
- .chip_pcie_intr_handler = csio_t4_pcie_intr_handler,
- .chip_flash_cfg_addr = csio_t4_flash_cfg_addr,
- .chip_mc_read = csio_t4_mc_read,
- .chip_edc_read = csio_t4_edc_read,
- .chip_memory_rw = csio_t4_memory_rw,
- .chip_dfs_create_ext_mem = csio_t4_dfs_create_ext_mem,
-};
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
index 66e180a58718..3267f4f627c9 100644
--- a/drivers/scsi/csiostor/csio_hw_t5.c
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -56,11 +56,11 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
* back MA register to ensure that changes propagate before we attempt
* to use the new values.)
*/
- csio_wr_reg32(hw, mem_win_base | BIR(0) |
- WINDOW(ilog2(MEMWIN_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
return 0;
}
@@ -72,74 +72,74 @@ static void
csio_t5_pcie_intr_handler(struct csio_hw *hw)
{
static struct intr_info sysbus_intr_info[] = {
- { RNPP, "RXNP array parity error", -1, 1 },
- { RPCP, "RXPC array parity error", -1, 1 },
- { RCIP, "RXCIF array parity error", -1, 1 },
- { RCCP, "Rx completions control array parity error", -1, 1 },
- { RFTP, "RXFT array parity error", -1, 1 },
+ { RNPP_F, "RXNP array parity error", -1, 1 },
+ { RPCP_F, "RXPC array parity error", -1, 1 },
+ { RCIP_F, "RXCIF array parity error", -1, 1 },
+ { RCCP_F, "Rx completions control array parity error", -1, 1 },
+ { RFTP_F, "RXFT array parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info pcie_port_intr_info[] = {
- { TPCP, "TXPC array parity error", -1, 1 },
- { TNPP, "TXNP array parity error", -1, 1 },
- { TFTP, "TXFT array parity error", -1, 1 },
- { TCAP, "TXCA array parity error", -1, 1 },
- { TCIP, "TXCIF array parity error", -1, 1 },
- { RCAP, "RXCA array parity error", -1, 1 },
- { OTDD, "outbound request TLP discarded", -1, 1 },
- { RDPE, "Rx data parity error", -1, 1 },
- { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { TPCP_F, "TXPC array parity error", -1, 1 },
+ { TNPP_F, "TXNP array parity error", -1, 1 },
+ { TFTP_F, "TXFT array parity error", -1, 1 },
+ { TCAP_F, "TXCA array parity error", -1, 1 },
+ { TCIP_F, "TXCIF array parity error", -1, 1 },
+ { RCAP_F, "RXCA array parity error", -1, 1 },
+ { OTDD_F, "outbound request TLP discarded", -1, 1 },
+ { RDPE_F, "Rx data parity error", -1, 1 },
+ { TDUE_F, "Tx uncorrectable data error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info pcie_intr_info[] = {
- { MSTGRPPERR, "Master Response Read Queue parity error",
+ { MSTGRPPERR_F, "Master Response Read Queue parity error",
-1, 1 },
- { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
- { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
-1, 1 },
- { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
-1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DREQWRPERR, "PCI DMA channel write request parity error",
+ { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR_F, "PCI DMA channel write request parity error",
-1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
- { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR_F, "PCI FID parity error", -1, 1 },
+ { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
-1, 1 },
- { IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
+ { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
-1, 1 },
- { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
- { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
- { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
- { READRSPERR, "Outbound read error", -1, 0 },
+ { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR_F, "Outbound read error", -1, 0 },
{ 0, NULL, 0, 0 }
};
int fat;
fat = csio_handle_intr_status(hw,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
sysbus_intr_info) +
csio_handle_intr_status(hw,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
pcie_port_intr_info) +
- csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
if (fat)
csio_hw_fatal_err(hw);
}
@@ -177,25 +177,25 @@ csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
- mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
- mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
- mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
- mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
- mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+ mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx);
+ mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+ mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+ mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+ mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
- if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
+ if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F)
return -EBUSY;
csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
- csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
mc_bist_cmd_reg);
- i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
+ i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F,
0, 10, 1, NULL);
if (i)
return i;
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
for (i = 15; i >= 0; i--)
*data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
@@ -231,27 +231,27 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
- edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
- edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
- edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
- edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
- edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+ edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+ edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+ edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
+ edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
+ edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
#undef EDC_REG_T5
#undef EDC_STRIDE_T5
- if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
+ if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F)
return -EBUSY;
csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
- csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
edc_bist_cmd_reg);
- i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
+ i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F,
0, 10, 1, NULL);
if (i)
return i;
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
for (i = 15; i >= 0; i--)
*data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
@@ -320,13 +320,13 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* the address is relative to BAR0.
*/
mem_reg = csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
- mem_aperture = 1 << (WINDOW(mem_reg) + 10);
- mem_base = GET_PCIEOFST(mem_reg) << 10;
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+ mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
+ mem_base = PCIEOFST_G(mem_reg) << 10;
start = addr & ~(mem_aperture-1);
offset = addr - start;
- win_pf = V_PFNUM(hw->pfn);
+ win_pf = PFNUM_V(hw->pfn);
csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
mem_reg, mem_aperture);
@@ -344,9 +344,9 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* before we attempt to use the new value.
*/
csio_wr_reg32(hw, pos | win_pf,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
while (offset < mem_aperture && len > 0) {
if (dir)
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 34d20cc3e110..9b9794d42ffe 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1176,9 +1176,8 @@ static struct pci_error_handlers csio_err_handler = {
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
static struct pci_device_id csio_pci_tbl[] = {
-/* Define for iSCSI uses PF5, FCoE uses PF6 */
-#define CH_PCI_DEVICE_ID_FUNCTION 0x5
-#define CH_PCI_DEVICE_ID_FUNCTION2 0x6
+/* Define for FCoE uses PF6 */
+#define CH_PCI_DEVICE_ID_FUNCTION 0x6
#define CH_PCI_ID_TABLE_ENTRY(devid) \
{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
@@ -1256,5 +1255,4 @@ MODULE_DESCRIPTION(CSIO_DRV_DESC);
MODULE_LICENSE(CSIO_DRV_LICENSE);
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
-MODULE_FIRMWARE(FW_FNAME_T4);
MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
index a8c748a35f9c..2fb71c6c3b37 100644
--- a/drivers/scsi/csiostor/csio_isr.c
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -317,7 +317,7 @@ csio_fcoe_isr(int irq, void *dev_id)
/* Disable the interrupt for this PCI function. */
if (hw->intr_mode == CSIO_IM_INTX)
- csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
+ csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
/*
* The read in the following function will flush the
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 87f9280d9b43..c00b2ff72b55 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
else {
/* Program DSGL to dma payload */
dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
- ULPTX_MORE | ULPTX_NSGE(1));
+ ULPTX_MORE_F | ULPTX_NSGE_V(1));
dsgl.len0 = cpu_to_be32(pld_len);
dsgl.addr0 = cpu_to_be64(pld->paddr);
csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 08c265c0f353..9451787ca7f2 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -327,7 +327,8 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
}
#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G |\
+ FW_PORT_CAP_ANEG)
/*
* csio_mb_port- FW PORT command helper
@@ -1104,8 +1105,8 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw,
void
csio_mb_intr_enable(struct csio_hw *hw)
{
- csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
- csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
}
/*
@@ -1117,8 +1118,9 @@ csio_mb_intr_enable(struct csio_hw *hw)
void
csio_mb_intr_disable(struct csio_hw *hw)
{
- csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
- csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_wr_reg32(hw, MBMSGRDYINTEN_V(0),
+ MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
}
static void
@@ -1153,8 +1155,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
{
int i;
__be64 cmd[CSIO_MB_MAX_REGS];
- uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
- uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size = sizeof(struct fw_debug_cmd);
/* Copy mailbox data */
@@ -1164,8 +1166,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
csio_mb_dump_fw_dbg(hw, cmd);
/* Notify FW of mailbox by setting owner as UP */
- csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
- ctl_reg);
+ csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+ MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
csio_rd_reg32(hw, ctl_reg);
wmb();
@@ -1187,8 +1189,8 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
__be64 *cmd = mbp->mb;
__be64 hdr;
struct csio_mbm *mbm = &hw->mbm;
- uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
- uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size = mbp->mb_size;
int rv = -EINVAL;
struct fw_cmd_hdr *fw_hdr;
@@ -1224,12 +1226,12 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
}
/* Now get ownership of mailbox */
- owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+ owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
if (!csio_mb_is_host_owner(owner)) {
for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
- owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+ owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
/*
* Mailbox unavailable. In immediate mode, fail the command.
* In other modes, enqueue the request.
@@ -1271,10 +1273,10 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
if (mbp->mb_cbfn != NULL) {
mbm->mcurrent = mbp;
mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
- csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
- MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
+ csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+ MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
} else
- csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
+ csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW),
ctl_reg);
/* Flush posted writes */
@@ -1294,9 +1296,9 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
/* Check for response */
ctl = csio_rd_reg32(hw, ctl_reg);
- if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+ if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
- if (!(ctl & MBMSGVALID)) {
+ if (!(ctl & MBMSGVALID_F)) {
csio_wr_reg32(hw, 0, ctl_reg);
continue;
}
@@ -1457,16 +1459,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
__be64 *cmd;
uint32_t ctl, cim_cause, pl_cause;
int i;
- uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
- uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size;
__be64 hdr;
struct fw_cmd_hdr *fw_hdr;
- pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
- cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+ pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A));
+ cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
- if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
+ if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) {
CSIO_INC_STATS(hw, n_mbint_unexp);
return -EINVAL;
}
@@ -1477,16 +1479,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
* the upper level cause register. In other words, CIM-cause
* first followed by PL-Cause next.
*/
- csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
- csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
+ csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
+ csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A));
ctl = csio_rd_reg32(hw, ctl_reg);
- if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+ if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
- if (!(ctl & MBMSGVALID)) {
+ if (!(ctl & MBMSGVALID_F)) {
csio_warn(hw,
"Stray mailbox interrupt recvd,"
" mailbox data not valid\n");
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 3987284e0d2a..2c4562d82dc0 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
struct csio_dma_buf *dma_buf;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
- sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE |
- ULPTX_NSGE(req->nsge));
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
+ ULPTX_NSGE_V(req->nsge));
/* Now add the data SGLs */
if (likely(!req->dcopy)) {
scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 773da14cfa14..e8f18174f2e9 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -51,12 +51,12 @@ int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
static int csio_sge_timer_reg = 1;
#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
- csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
+ csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
static void
csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
{
- sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
+ sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
reg * sizeof(uint32_t));
}
@@ -71,7 +71,7 @@ csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
static inline uint32_t
csio_wr_qstat_pgsz(struct csio_hw *hw)
{
- return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;
+ return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
}
/* Ring freelist doorbell */
@@ -84,9 +84,9 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
* 8 freelist buffer pointers (since each pointer is 8 bytes).
*/
if (flq->inc_idx >= 8) {
- csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
- CSIO_HW_PIDX(hw, flq->inc_idx / 8),
- MYPF_REG(SGE_PF_KDOORBELL));
+ csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
+ PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F,
+ MYPF_REG(SGE_PF_KDOORBELL_A));
flq->inc_idx &= 7;
}
}
@@ -95,10 +95,10 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
static void
csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
{
- csio_wr_reg32(hw, CIDXINC(0) |
- INGRESSQID(iqid) |
- TIMERREG(X_TIMERREG_RESTART_COUNTER),
- MYPF_REG(SGE_PF_GTS));
+ csio_wr_reg32(hw, CIDXINC_V(0) |
+ INGRESSQID_V(iqid) |
+ TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
+ MYPF_REG(SGE_PF_GTS_A));
}
/*
@@ -982,9 +982,9 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
wmb();
/* Ring SGE Doorbell writing q->pidx into it */
- csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
- CSIO_HW_PIDX(hw, q->inc_idx),
- MYPF_REG(SGE_PF_KDOORBELL));
+ csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
+ PIDX_T5_V(q->inc_idx) | DBTYPE_F,
+ MYPF_REG(SGE_PF_KDOORBELL_A));
q->inc_idx = 0;
return 0;
@@ -1242,10 +1242,10 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
restart:
/* Now inform SGE about our incremental index value */
- csio_wr_reg32(hw, CIDXINC(q->inc_idx) |
- INGRESSQID(q->un.iq.physiqid) |
- TIMERREG(csio_sge_timer_reg),
- MYPF_REG(SGE_PF_GTS));
+ csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
+ INGRESSQID_V(q->un.iq.physiqid) |
+ TIMERREG_V(csio_sge_timer_reg),
+ MYPF_REG(SGE_PF_GTS_A));
q->stats.n_tot_rsps += q->inc_idx;
q->inc_idx = 0;
@@ -1310,22 +1310,23 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
uint32_t ingpad = 0;
uint32_t stat_len = clsz > 64 ? 128 : 64;
- csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
- HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
- HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
- HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
- SGE_HOST_PAGE_SIZE);
+ csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
+ HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
+ HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
+ HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
+ SGE_HOST_PAGE_SIZE_A);
sge->csio_fl_align = clsz < 32 ? 32 : clsz;
ingpad = ilog2(sge->csio_fl_align) - 5;
- csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE(1),
- INGPADBOUNDARY(ingpad) |
- EGRSTATUSPAGESIZE(stat_len != 64));
+ csio_set_reg_field(hw, SGE_CONTROL_A,
+ INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+ EGRSTATUSPAGESIZE_F,
+ INGPADBOUNDARY_V(ingpad) |
+ EGRSTATUSPAGESIZE_V(stat_len != 64));
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
- csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
+ csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
/*
* If using hard params, the following will get set correctly
@@ -1333,23 +1334,24 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
*/
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
csio_wr_reg32(hw,
- (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
- SGE_FL_BUFFER_SIZE2);
+ SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw,
- (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
- SGE_FL_BUFFER_SIZE3);
+ SGE_FL_BUFFER_SIZE3_A);
}
- csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
+ csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
/* default value of rx_dma_offset of the NIC driver */
- csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
- PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+ csio_set_reg_field(hw, SGE_CONTROL_A,
+ PKTSHIFT_V(PKTSHIFT_M),
+ PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
- csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
- CSUM_HAS_PSEUDO_HDR, 0);
+ csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
+ CSUM_HAS_PSEUDO_HDR_F, 0);
}
static void
@@ -1384,9 +1386,9 @@ csio_wr_get_sge(struct csio_hw *hw)
u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
u32 ingress_rx_threshold;
- sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
- ingpad = INGPADBOUNDARY_GET(sge->sge_control);
+ ingpad = INGPADBOUNDARY_G(sge->sge_control);
switch (ingpad) {
case X_INGPCIEBOUNDARY_32B:
@@ -1410,28 +1412,28 @@ csio_wr_get_sge(struct csio_hw *hw)
for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
csio_get_flbuf_size(hw, sge, i);
- timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
- timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
- timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
+ timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
+ timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
+ timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE0_GET(timer_value_0_and_1));
+ TIMERVALUE0_G(timer_value_0_and_1));
sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE1_GET(timer_value_0_and_1));
+ TIMERVALUE1_G(timer_value_0_and_1));
sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE2_GET(timer_value_2_and_3));
+ TIMERVALUE2_G(timer_value_2_and_3));
sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE3_GET(timer_value_2_and_3));
+ TIMERVALUE3_G(timer_value_2_and_3));
sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE4_GET(timer_value_4_and_5));
+ TIMERVALUE4_G(timer_value_4_and_5));
sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE5_GET(timer_value_4_and_5));
+ TIMERVALUE5_G(timer_value_4_and_5));
- ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
- sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
- sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
- sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
- sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+ ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
+ sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+ sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+ sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+ sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
csio_init_intr_coalesce_parms(hw);
}
@@ -1454,9 +1456,9 @@ csio_wr_set_sge(struct csio_hw *hw)
* Set up our basic SGE mode to deliver CPL messages to our Ingress
* Queue and Packet Date to the Free List.
*/
- csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
+ csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
- sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
@@ -1464,22 +1466,23 @@ csio_wr_set_sge(struct csio_hw *hw)
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
- csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
- HP_INT_THRESH(HP_INT_THRESH_MASK) |
- CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
- HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
- CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
+ LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
+ LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A,
+ HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
+ HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
- csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
- ENABLE_DROP);
+ csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
+ ENABLE_DROP_F);
/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
- & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
- & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
@@ -1502,26 +1505,26 @@ csio_wr_set_sge(struct csio_hw *hw)
sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
- csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
- THRESHOLD_1(sge->counter_val[1]) |
- THRESHOLD_2(sge->counter_val[2]) |
- THRESHOLD_3(sge->counter_val[3]),
- SGE_INGRESS_RX_THRESHOLD);
+ csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
+ THRESHOLD_1_V(sge->counter_val[1]) |
+ THRESHOLD_2_V(sge->counter_val[2]) |
+ THRESHOLD_3_V(sge->counter_val[3]),
+ SGE_INGRESS_RX_THRESHOLD_A);
csio_wr_reg32(hw,
- TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
- TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
- SGE_TIMER_VALUE_0_AND_1);
+ TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+ TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+ SGE_TIMER_VALUE_0_AND_1_A);
csio_wr_reg32(hw,
- TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
- TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
- SGE_TIMER_VALUE_2_AND_3);
+ TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+ TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+ SGE_TIMER_VALUE_2_AND_3_A);
csio_wr_reg32(hw,
- TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
- TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
- SGE_TIMER_VALUE_4_AND_5);
+ TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+ TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+ SGE_TIMER_VALUE_4_AND_5_A);
csio_init_intr_coalesce_parms(hw);
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index a83d2ceded83..dd00e5fe4a5e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -28,6 +28,7 @@
#include "t4fw_api.h"
#include "l2t.h"
#include "cxgb4i.h"
+#include "clip_tbl.h"
static unsigned int dbg_level;
@@ -704,7 +705,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req);
- unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+ unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
u32 rcv_isn = be32_to_cpu(req->rcv_isn);
@@ -752,15 +753,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
- csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
- if (GET_TCPOPT_TSTAMP(tcp_opt))
+ csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
+ if (TCPOPT_TSTAMP_G(tcp_opt))
csk->advmss -= 12;
if (csk->advmss < 128)
csk->advmss = 128;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, mss_idx %u, advmss %u.\n",
- csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
+ csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
@@ -856,8 +857,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
unsigned int tid = GET_TID(rpl);
unsigned int atid =
- GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
- unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+ TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
+ unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
@@ -1112,7 +1113,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
hlen = ntohs(cpl->len);
dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
- plen = ISCSI_PDU_LEN(pdu_len_ddp);
+ plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
if (is_t4(lldi->adapter_type))
plen -= 40;
@@ -1322,6 +1323,9 @@ static inline void l2t_put(struct cxgbi_sock *csk)
static void release_offload_resources(struct cxgbi_sock *csk)
{
struct cxgb4_lld_info *lldi;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct net_device *ndev = csk->cdev->ports[csk->port_id];
+#endif
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
@@ -1334,6 +1338,12 @@ static void release_offload_resources(struct cxgbi_sock *csk)
}
l2t_put(csk);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (csk->csk_family == AF_INET6)
+ cxgb4_clip_release(ndev,
+ (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+
if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
free_atid(csk);
else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
@@ -1391,10 +1401,15 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
- goto rel_resource;
+ goto rel_resource_without_clip;
}
cxgbi_sock_get(csk);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (csk->csk_family == AF_INET6)
+ cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+
if (t4) {
size = sizeof(struct cpl_act_open_req);
size6 = sizeof(struct cpl_act_open_req6);
@@ -1451,6 +1466,12 @@ static int init_act_open(struct cxgbi_sock *csk)
return 0;
rel_resource:
+#if IS_ENABLED(CONFIG_IPV6)
+ if (csk->csk_family == AF_INET6)
+ cxgb4_clip_release(ndev,
+ (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+rel_resource_without_clip:
if (n)
neigh_release(n);
if (skb)
@@ -1619,7 +1640,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
- req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
@@ -1651,7 +1672,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 0c6be0a17f53..5ee7f44cf869 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4610,13 +4610,10 @@ static void adapter_uninit(struct AdapterCtlBlk *acb)
}
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m,##args)
-
#undef YESNO
#define YESNO(YN) \
- if (YN) SPRINTF(" Yes ");\
- else SPRINTF(" No ")
+ if (YN) seq_printf(m, " Yes ");\
+ else seq_printf(m, " No ")
static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
{
@@ -4626,47 +4623,45 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
unsigned long flags;
int dev;
- SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n");
- SPRINTF(" Driver Version " DC395X_VERSION "\n");
+ seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
+ " Driver Version " DC395X_VERSION "\n");
DC395x_LOCK_IO(acb->scsi_host, flags);
- SPRINTF("SCSI Host Nr %i, ", host->host_no);
- SPRINTF("DC395U/UW/F DC315/U %s\n",
+ seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
+ seq_printf(m, "DC395U/UW/F DC315/U %s\n",
(acb->config & HCC_WIDE_CARD) ? "Wide" : "");
- SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base);
- SPRINTF("irq_level 0x%04x, ", acb->irq_level);
- SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
+ seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
+ seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
+ seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
- SPRINTF("MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
- SPRINTF("AdapterID %i\n", host->this_id);
+ seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
+ seq_printf(m, "AdapterID %i\n", host->this_id);
- SPRINTF("tag_max_num %i", acb->tag_max_num);
- /*SPRINTF(", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
- SPRINTF(", FilterCfg 0x%02x",
+ seq_printf(m, "tag_max_num %i", acb->tag_max_num);
+ /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
+ seq_printf(m, ", FilterCfg 0x%02x",
DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
- SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time);
- /*SPRINTF("\n"); */
+ seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
+ /*seq_printf(m, "\n"); */
- SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list));
- SPRINTF
- ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
+ seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
acb->dcb_map[6], acb->dcb_map[7]);
- SPRINTF
- (" %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
acb->dcb_map[14], acb->dcb_map[15]);
- SPRINTF
- ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
+ seq_puts(m,
+ "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
dev = 0;
list_for_each_entry(dcb, &acb->dcb_list, list) {
int nego_period;
- SPRINTF("%02i %02i %02i ", dev, dcb->target_id,
+ seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id,
dcb->target_lun);
YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
YESNO(dcb->sync_offset);
@@ -4676,53 +4671,53 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
nego_period = clock_period[dcb->sync_period & 0x07] << 2;
if (dcb->sync_offset)
- SPRINTF(" %03i ns ", nego_period);
+ seq_printf(m, " %03i ns ", nego_period);
else
- SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
+ seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
if (dcb->sync_offset & 0x0f) {
spd = 1000 / (nego_period);
spd1 = 1000 % (nego_period);
spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
- SPRINTF(" %2i.%1i M %02i ", spd, spd1,
+ seq_printf(m, " %2i.%1i M %02i ", spd, spd1,
(dcb->sync_offset & 0x0f));
} else
- SPRINTF(" ");
+ seq_puts(m, " ");
/* Add more info ... */
- SPRINTF(" %02i\n", dcb->max_command);
+ seq_printf(m, " %02i\n", dcb->max_command);
dev++;
}
if (timer_pending(&acb->waiting_timer))
- SPRINTF("Waiting queue timer running\n");
+ seq_puts(m, "Waiting queue timer running\n");
else
- SPRINTF("\n");
+ seq_putc(m, '\n');
list_for_each_entry(dcb, &acb->dcb_list, list) {
struct ScsiReqBlk *srb;
if (!list_empty(&dcb->srb_waiting_list))
- SPRINTF("DCB (%02i-%i): Waiting: %i:",
+ seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
dcb->target_id, dcb->target_lun,
list_size(&dcb->srb_waiting_list));
list_for_each_entry(srb, &dcb->srb_waiting_list, list)
- SPRINTF(" %p", srb->cmd);
+ seq_printf(m, " %p", srb->cmd);
if (!list_empty(&dcb->srb_going_list))
- SPRINTF("\nDCB (%02i-%i): Going : %i:",
+ seq_printf(m, "\nDCB (%02i-%i): Going : %i:",
dcb->target_id, dcb->target_lun,
list_size(&dcb->srb_going_list));
list_for_each_entry(srb, &dcb->srb_going_list, list)
- SPRINTF(" %p", srb->cmd);
+ seq_printf(m, " %p", srb->cmd);
if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
- SPRINTF("\n");
+ seq_putc(m, '\n');
}
if (debug_enabled(DBG_1)) {
- SPRINTF("DCB list for ACB %p:\n", acb);
+ seq_printf(m, "DCB list for ACB %p:\n", acb);
list_for_each_entry(dcb, &acb->dcb_list, list) {
- SPRINTF("%p -> ", dcb);
+ seq_printf(m, "%p -> ", dcb);
}
- SPRINTF("END\n");
+ seq_puts(m, "END\n");
}
DC395x_UNLOCK_IO(acb->scsi_host, flags);
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 1dba62c5cf6a..1efebc9eedfb 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref)
struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
struct scsi_device *sdev = scsi_dh_data->sdev;
+ scsi_dh->detach(sdev);
+
spin_lock_irq(sdev->request_queue->queue_lock);
sdev->scsi_dh_data = NULL;
spin_unlock_irq(sdev->request_queue->queue_lock);
- scsi_dh->detach(sdev);
sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
module_put(scsi_dh->module);
}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 0bf976936a10..2806cfbec2b9 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -568,7 +568,7 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
- seq_printf(m, "Devices:\n");
+ seq_puts(m, "Devices:\n");
for(chan = 0; chan < MAX_CHANNEL; chan++) {
for(id = 0; id < MAX_ID; id++) {
d = pHba->channel[chan].device[id];
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 8319d2b417b8..ca8003f0d8a3 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -102,7 +102,7 @@ static int eata_pio_show_info(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no, SD(shost)->name);
seq_printf(m, "Firmware revision: v%s\n",
SD(shost)->revision);
- seq_printf(m, "IO: PIO\n");
+ seq_puts(m, "IO: PIO\n");
seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base);
seq_printf(m, "Host Bus: %s\n",
(SD(shost)->bustype == 'P')?"PCI ":
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 6776931e25d4..78ce4d61a69b 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -813,12 +813,13 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
&devcontrol);
- if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
+ if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
+ PCI_EXP_DEVCTL_READRQ_512B) {
esas2r_log(ESAS2R_LOG_INFO,
"max read request size > 512B");
devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
- devcontrol |= 0x2000;
+ devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
pci_write_config_word(a->pcid,
pcie_cap_reg + PCI_EXP_DEVCTL,
devcontrol);
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 7e1c21e6736b..31f8966b2e03 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -749,7 +749,7 @@ int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
if (dev_count == 0)
seq_puts(m, "none\n");
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
return 0;
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index ce5bd52fe692..065b25df741b 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2396,8 +2396,6 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
if (!esp->num_tags)
esp->num_tags = ESP_DEFAULT_TAGS;
- else if (esp->num_tags >= ESP_MAX_TAG)
- esp->num_tags = ESP_MAX_TAG - 1;
esp->host->transportt = esp_transport_template;
esp->host->max_lun = ESP_MAX_LUN;
esp->host->cmd_per_lun = 2;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 9fb632684863..e66e997992e3 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -173,7 +173,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
/* request is i.e. "cat /proc/scsi/gdth/0" */
/* format: %-15s\t%-10s\t%-15s\t%s */
/* driver parameters */
- seq_printf(m, "Driver Parameters:\n");
+ seq_puts(m, "Driver Parameters:\n");
if (reserve_list[0] == 0xff)
strcpy(hrec, "--");
else {
@@ -192,7 +192,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
max_ids, hdr_channel);
/* controller information */
- seq_printf(m,"\nDisk Array Controller Information:\n");
+ seq_puts(m, "\nDisk Array Controller Information:\n");
seq_printf(m,
" Number: \t%d \tName: \t%s\n",
ha->hanum, ha->binfo.type_string);
@@ -219,7 +219,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
#ifdef GDTH_DMA_STATISTICS
/* controller statistics */
- seq_printf(m,"\nController Statistics:\n");
+ seq_puts(m, "\nController Statistics:\n");
seq_printf(m,
" 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n",
ha->dma32_cnt, ha->dma64_cnt);
@@ -227,7 +227,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
if (ha->more_proc) {
/* more information: 2. about physical devices */
- seq_printf(m, "\nPhysical Devices:");
+ seq_puts(m, "\nPhysical Devices:");
flag = FALSE;
buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -326,10 +326,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
if (!flag)
- seq_printf(m, "\n --\n");
+ seq_puts(m, "\n --\n");
/* 3. about logical drives */
- seq_printf(m,"\nLogical Drives:");
+ seq_puts(m, "\nLogical Drives:");
flag = FALSE;
buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -411,10 +411,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
if (!flag)
- seq_printf(m, "\n --\n");
+ seq_puts(m, "\n --\n");
/* 4. about array drives */
- seq_printf(m,"\nArray Drives:");
+ seq_puts(m, "\nArray Drives:");
flag = FALSE;
buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -471,10 +471,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
if (!flag)
- seq_printf(m, "\n --\n");
+ seq_puts(m, "\n --\n");
/* 5. about host drives */
- seq_printf(m,"\nHost Drives:");
+ seq_puts(m, "\nHost Drives:");
flag = FALSE;
buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr);
@@ -527,11 +527,11 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
}
if (!flag)
- seq_printf(m, "\n --\n");
+ seq_puts(m, "\n --\n");
}
/* controller events */
- seq_printf(m,"\nController Events:\n");
+ seq_puts(m, "\nController Events:\n");
for (id = -1;;) {
id = gdth_read_event(ha, id, estr);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6bb4611b238a..95d581c45413 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -50,6 +50,7 @@
#include <linux/jiffies.h>
#include <linux/percpu-defs.h>
#include <linux/percpu.h>
+#include <asm/unaligned.h>
#include <asm/div64.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
@@ -59,8 +60,11 @@
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
-/* How long to wait (in milliseconds) for board to go into simple mode */
-#define MAX_CONFIG_WAIT 30000
+/* How long to wait for CISS doorbell communication */
+#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
+#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
+#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
+#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
#define MAX_IOCTL_CONFIG_WAIT 1000
/*define how many times we will try a command because of bus resets */
@@ -164,24 +168,24 @@ static struct board_type products[] = {
{0x1926103C, "Smart Array P731m", &SA5_access},
{0x1928103C, "Smart Array P230i", &SA5_access},
{0x1929103C, "Smart Array P530", &SA5_access},
- {0x21BD103C, "Smart Array", &SA5_access},
- {0x21BE103C, "Smart Array", &SA5_access},
- {0x21BF103C, "Smart Array", &SA5_access},
- {0x21C0103C, "Smart Array", &SA5_access},
- {0x21C1103C, "Smart Array", &SA5_access},
- {0x21C2103C, "Smart Array", &SA5_access},
- {0x21C3103C, "Smart Array", &SA5_access},
+ {0x21BD103C, "Smart Array P244br", &SA5_access},
+ {0x21BE103C, "Smart Array P741m", &SA5_access},
+ {0x21BF103C, "Smart HBA H240ar", &SA5_access},
+ {0x21C0103C, "Smart Array P440ar", &SA5_access},
+ {0x21C1103C, "Smart Array P840ar", &SA5_access},
+ {0x21C2103C, "Smart Array P440", &SA5_access},
+ {0x21C3103C, "Smart Array P441", &SA5_access},
{0x21C4103C, "Smart Array", &SA5_access},
- {0x21C5103C, "Smart Array", &SA5_access},
- {0x21C6103C, "Smart Array", &SA5_access},
- {0x21C7103C, "Smart Array", &SA5_access},
- {0x21C8103C, "Smart Array", &SA5_access},
+ {0x21C5103C, "Smart Array P841", &SA5_access},
+ {0x21C6103C, "Smart HBA H244br", &SA5_access},
+ {0x21C7103C, "Smart HBA H240", &SA5_access},
+ {0x21C8103C, "Smart HBA H241", &SA5_access},
{0x21C9103C, "Smart Array", &SA5_access},
- {0x21CA103C, "Smart Array", &SA5_access},
- {0x21CB103C, "Smart Array", &SA5_access},
+ {0x21CA103C, "Smart Array P246br", &SA5_access},
+ {0x21CB103C, "Smart Array P840", &SA5_access},
{0x21CC103C, "Smart Array", &SA5_access},
{0x21CD103C, "Smart Array", &SA5_access},
- {0x21CE103C, "Smart Array", &SA5_access},
+ {0x21CE103C, "Smart HBA", &SA5_access},
{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -195,8 +199,6 @@ static int number_of_controllers;
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
-static void lock_and_start_io(struct ctlr_info *h);
-static void start_io(struct ctlr_info *h, unsigned long *flags);
#ifdef CONFIG_COMPAT
static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
@@ -204,18 +206,18 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
#endif
static void cmd_free(struct ctlr_info *h, struct CommandList *c);
-static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
-static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type);
+static void hpsa_free_cmd_pool(struct ctlr_info *h);
#define VPD_PAGE (1 << 8)
static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static void hpsa_scan_start(struct Scsi_Host *);
static int hpsa_scan_finished(struct Scsi_Host *sh,
unsigned long elapsed_time);
+static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
@@ -229,7 +231,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
struct CommandList *c);
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
- int nsgs, int min_blocks, int *bucket_map);
+ int nsgs, int min_blocks, u32 *bucket_map);
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h, u8 q);
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
@@ -241,14 +243,15 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
-static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
+static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
#define BOARD_NOT_READY 0
#define BOARD_READY 1
static void hpsa_drain_accel_commands(struct ctlr_info *h);
static void hpsa_flush_cache(struct ctlr_info *h);
static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
- u8 *scsi3addr);
+ u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
+static void hpsa_command_resubmit_worker(struct work_struct *work);
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -505,8 +508,8 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
return (scsi3addr[3] & 0xC0) == 0x40;
}
-static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
- "1(ADM)", "UNKNOWN"
+static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
+ "1(+0)ADM", "UNKNOWN"
};
#define HPSA_RAID_0 0
#define HPSA_RAID_4 1
@@ -671,7 +674,7 @@ static struct scsi_host_template hpsa_driver_template = {
.queuecommand = hpsa_scsi_queue_command,
.scan_start = hpsa_scan_start,
.scan_finished = hpsa_scan_finished,
- .change_queue_depth = scsi_change_queue_depth,
+ .change_queue_depth = hpsa_change_queue_depth,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.eh_abort_handler = hpsa_eh_abort_handler,
@@ -688,13 +691,6 @@ static struct scsi_host_template hpsa_driver_template = {
.no_write_same = 1,
};
-
-/* Enqueuing and dequeuing functions for cmdlists. */
-static inline void addQ(struct list_head *list, struct CommandList *c)
-{
- list_add_tail(&c->list, list);
-}
-
static inline u32 next_command(struct ctlr_info *h, u8 q)
{
u32 a;
@@ -828,31 +824,21 @@ static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
static void enqueue_cmd_and_start_io(struct ctlr_info *h,
struct CommandList *c)
{
- unsigned long flags;
-
+ dial_down_lockup_detection_during_fw_flash(h, c);
+ atomic_inc(&h->commands_outstanding);
switch (c->cmd_type) {
case CMD_IOACCEL1:
set_ioaccel1_performant_mode(h, c);
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
break;
case CMD_IOACCEL2:
set_ioaccel2_performant_mode(h, c);
+ writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
break;
default:
set_performant_mode(h, c);
+ h->access.submit_command(h, c);
}
- dial_down_lockup_detection_during_fw_flash(h, c);
- spin_lock_irqsave(&h->lock, flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- start_io(h, &flags);
- spin_unlock_irqrestore(&h->lock, flags);
-}
-
-static inline void removeQ(struct CommandList *c)
-{
- if (WARN_ON(list_empty(&c->list)))
- return;
- list_del_init(&c->list);
}
static inline int is_hba_lunid(unsigned char scsi3addr[])
@@ -919,7 +905,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
/* If this device a non-zero lun of a multi-lun device
* byte 4 of the 8-byte LUN addr will contain the logical
- * unit no, zero otherise.
+ * unit no, zero otherwise.
*/
if (device->scsi3addr[4] == 0) {
/* This is not a non-zero lun of a multi-lun device */
@@ -984,12 +970,24 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
- /* Raid offload parameters changed. */
+ /* Raid offload parameters changed. Careful about the ordering. */
+ if (new_entry->offload_config && new_entry->offload_enabled) {
+ /*
+ * if drive is newly offload_enabled, we want to copy the
+ * raid map data first. If previously offload_enabled and
+ * offload_config were set, raid map data had better be
+ * the same as it was before. if raid map data is changed
+ * then it had better be the case that
+ * h->dev[entry]->offload_enabled is currently 0.
+ */
+ h->dev[entry]->raid_map = new_entry->raid_map;
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+ wmb(); /* ensure raid map updated prior to ->offload_enabled */
+ }
h->dev[entry]->offload_config = new_entry->offload_config;
- h->dev[entry]->offload_enabled = new_entry->offload_enabled;
- h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
- h->dev[entry]->raid_map = new_entry->raid_map;
+ h->dev[entry]->offload_enabled = new_entry->offload_enabled;
+ h->dev[entry]->queue_depth = new_entry->queue_depth;
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
@@ -1115,6 +1113,8 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
return 1;
if (dev1->offload_enabled != dev2->offload_enabled)
return 1;
+ if (dev1->queue_depth != dev2->queue_depth)
+ return 1;
return 0;
}
@@ -1260,6 +1260,85 @@ static void hpsa_show_volume_status(struct ctlr_info *h,
}
}
+/*
+ * Figure the list of physical drive pointers for a logical drive with
+ * raid offload configured.
+ */
+static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev[], int ndevices,
+ struct hpsa_scsi_dev_t *logical_drive)
+{
+ struct raid_map_data *map = &logical_drive->raid_map;
+ struct raid_map_disk_data *dd = &map->data[0];
+ int i, j;
+ int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
+ le16_to_cpu(map->metadata_disks_per_row);
+ int nraid_map_entries = le16_to_cpu(map->row_cnt) *
+ le16_to_cpu(map->layout_map_count) *
+ total_disks_per_row;
+ int nphys_disk = le16_to_cpu(map->layout_map_count) *
+ total_disks_per_row;
+ int qdepth;
+
+ if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
+ nraid_map_entries = RAID_MAP_MAX_ENTRIES;
+
+ qdepth = 0;
+ for (i = 0; i < nraid_map_entries; i++) {
+ logical_drive->phys_disk[i] = NULL;
+ if (!logical_drive->offload_config)
+ continue;
+ for (j = 0; j < ndevices; j++) {
+ if (dev[j]->devtype != TYPE_DISK)
+ continue;
+ if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
+ continue;
+ if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
+ continue;
+
+ logical_drive->phys_disk[i] = dev[j];
+ if (i < nphys_disk)
+ qdepth = min(h->nr_cmds, qdepth +
+ logical_drive->phys_disk[i]->queue_depth);
+ break;
+ }
+
+ /*
+ * This can happen if a physical drive is removed and
+ * the logical drive is degraded. In that case, the RAID
+ * map data will refer to a physical disk which isn't actually
+ * present. And in that case offload_enabled should already
+ * be 0, but we'll turn it off here just in case
+ */
+ if (!logical_drive->phys_disk[i]) {
+ logical_drive->offload_enabled = 0;
+ logical_drive->queue_depth = h->nr_cmds;
+ }
+ }
+ if (nraid_map_entries)
+ /*
+ * This is correct for reads, too high for full stripe writes,
+ * way too high for partial stripe writes
+ */
+ logical_drive->queue_depth = qdepth;
+ else
+ logical_drive->queue_depth = h->nr_cmds;
+}
+
+static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev[], int ndevices)
+{
+ int i;
+
+ for (i = 0; i < ndevices; i++) {
+ if (dev[i]->devtype != TYPE_DISK)
+ continue;
+ if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
+ continue;
+ hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
+ }
+}
+
static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *sd[], int nsds)
{
@@ -1444,8 +1523,12 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
spin_lock_irqsave(&h->devlock, flags);
sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
sdev_id(sdev), sdev->lun);
- if (sd != NULL)
+ if (sd != NULL) {
sdev->hostdata = sd;
+ if (sd->queue_depth)
+ scsi_change_queue_depth(sdev, sd->queue_depth);
+ atomic_set(&sd->ioaccel_cmds_out, 0);
+ }
spin_unlock_irqrestore(&h->devlock, flags);
return 0;
}
@@ -1478,13 +1561,17 @@ static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
GFP_KERNEL);
- if (!h->cmd_sg_list)
+ if (!h->cmd_sg_list) {
+ dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
return -ENOMEM;
+ }
for (i = 0; i < h->nr_cmds; i++) {
h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
h->chainsize, GFP_KERNEL);
- if (!h->cmd_sg_list[i])
+ if (!h->cmd_sg_list[i]) {
+ dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
goto clean;
+ }
}
return 0;
@@ -1504,7 +1591,7 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
chain_block = h->cmd_sg_list[c->cmdindex];
chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
chain_len = sizeof(*chain_sg) *
- (c->Header.SGTotal - h->max_cmd_sg_entries);
+ (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
chain_sg->Len = cpu_to_le32(chain_len);
temp64 = pci_map_single(h->pdev, chain_block, chain_len,
PCI_DMA_TODEVICE);
@@ -1635,7 +1722,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
struct hpsa_scsi_dev_t *dev)
{
struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
- int raid_retry = 0;
/* check for good status */
if (likely(c2->error_data.serv_response == 0 &&
@@ -1652,26 +1738,22 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
if (is_logical_dev_addr_mode(dev->scsi3addr) &&
c2->error_data.serv_response ==
IOACCEL2_SERV_RESPONSE_FAILURE) {
- dev->offload_enabled = 0;
- h->drv_req_rescan = 1; /* schedule controller for a rescan */
- cmd->result = DID_SOFT_ERROR << 16;
- cmd_free(h, c);
- cmd->scsi_done(cmd);
- return;
- }
- raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
- /* If error found, disable Smart Path, schedule a rescan,
- * and force a retry on the standard path.
- */
- if (raid_retry) {
- dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
- "HP SSD Smart Path");
- dev->offload_enabled = 0; /* Disable Smart Path */
- h->drv_req_rescan = 1; /* schedule controller rescan */
- cmd->result = DID_SOFT_ERROR << 16;
+ if (c2->error_data.status ==
+ IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+ dev->offload_enabled = 0;
+ goto retry_cmd;
}
+
+ if (handle_ioaccel_mode2_error(h, c, cmd, c2))
+ goto retry_cmd;
+
cmd_free(h, c);
cmd->scsi_done(cmd);
+ return;
+
+retry_cmd:
+ INIT_WORK(&c->work, hpsa_command_resubmit_worker);
+ queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
}
static void complete_scsi_command(struct CommandList *cp)
@@ -1687,18 +1769,21 @@ static void complete_scsi_command(struct CommandList *cp)
unsigned long sense_data_size;
ei = cp->err_info;
- cmd = (struct scsi_cmnd *) cp->scsi_cmd;
+ cmd = cp->scsi_cmd;
h = cp->h;
dev = cmd->device->hostdata;
scsi_dma_unmap(cmd); /* undo the DMA mappings */
if ((cp->cmd_type == CMD_SCSI) &&
- (cp->Header.SGTotal > h->max_cmd_sg_entries))
+ (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
hpsa_unmap_sg_chain_block(h, cp);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
+ atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
+
if (cp->cmd_type == CMD_IOACCEL2)
return process_ioaccel2_completion(h, cp, cmd, dev);
@@ -1706,6 +1791,8 @@ static void complete_scsi_command(struct CommandList *cp)
scsi_set_resid(cmd, ei->ResidualCnt);
if (ei->CommandStatus == 0) {
+ if (cp->cmd_type == CMD_IOACCEL1)
+ atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
cmd_free(h, cp);
cmd->scsi_done(cmd);
return;
@@ -1726,8 +1813,10 @@ static void complete_scsi_command(struct CommandList *cp)
*/
if (cp->cmd_type == CMD_IOACCEL1) {
struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
- cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
- cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
+ cp->Header.SGList = scsi_sg_count(cmd);
+ cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
+ cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
+ IOACCEL1_IOFLAGS_CDBLEN_MASK;
cp->Header.tag = c->tag;
memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
@@ -1739,9 +1828,9 @@ static void complete_scsi_command(struct CommandList *cp)
if (is_logical_dev_addr_mode(dev->scsi3addr)) {
if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
dev->offload_enabled = 0;
- cmd->result = DID_SOFT_ERROR << 16;
- cmd_free(h, cp);
- cmd->scsi_done(cmd);
+ INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
+ queue_work_on(raw_smp_processor_id(),
+ h->resubmit_wq, &cp->work);
return;
}
}
@@ -1798,9 +1887,8 @@ static void complete_scsi_command(struct CommandList *cp)
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
break;
case CMD_DATA_OVERRUN:
- dev_warn(&h->pdev->dev, "cp %p has"
- " completed with data overrun "
- "reported\n", cp);
+ dev_warn(&h->pdev->dev,
+ "CDB %16phN data overrun\n", cp->Request.CDB);
break;
case CMD_INVALID: {
/* print_bytes(cp, sizeof(*cp), 1, 0);
@@ -1816,34 +1904,38 @@ static void complete_scsi_command(struct CommandList *cp)
break;
case CMD_PROTOCOL_ERR:
cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev, "cp %p has "
- "protocol error\n", cp);
+ dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
+ cp->Request.CDB);
break;
case CMD_HARDWARE_ERR:
cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
+ dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
+ cp->Request.CDB);
break;
case CMD_CONNECTION_LOST:
cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
+ dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
+ cp->Request.CDB);
break;
case CMD_ABORTED:
cmd->result = DID_ABORT << 16;
- dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
- cp, ei->ScsiStatus);
+ dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
+ cp->Request.CDB, ei->ScsiStatus);
break;
case CMD_ABORT_FAILED:
cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
+ dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
+ cp->Request.CDB);
break;
case CMD_UNSOLICITED_ABORT:
cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
- dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
- "abort\n", cp);
+ dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
+ cp->Request.CDB);
break;
case CMD_TIMEOUT:
cmd->result = DID_TIME_OUT << 16;
- dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
+ dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
+ cp->Request.CDB);
break;
case CMD_UNABORTABLE:
cmd->result = DID_ERROR << 16;
@@ -2048,10 +2140,10 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
struct CommandList *c;
struct ErrorInfo *ei;
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
- if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ if (c == NULL) {
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return -ENOMEM;
}
@@ -2067,7 +2159,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
rc = -1;
}
out:
- cmd_special_free(h, c);
+ cmd_free(h, c);
return rc;
}
@@ -2079,10 +2171,9 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
struct CommandList *c;
struct ErrorInfo *ei;
- c = cmd_special_alloc(h);
-
+ c = cmd_alloc(h);
if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return -ENOMEM;
}
@@ -2098,7 +2189,7 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
rc = -1;
}
out:
- cmd_special_free(h, c);
+ cmd_free(h, c);
return rc;
}
@@ -2109,10 +2200,10 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
struct CommandList *c;
struct ErrorInfo *ei;
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return -ENOMEM;
}
@@ -2128,7 +2219,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
hpsa_scsi_interpret_error(h, c);
rc = -1;
}
- cmd_special_free(h, c);
+ cmd_free(h, c);
return rc;
}
@@ -2191,15 +2282,13 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
le16_to_cpu(map_buff->row_cnt));
dev_info(&h->pdev->dev, "layout_map_count = %u\n",
le16_to_cpu(map_buff->layout_map_count));
- dev_info(&h->pdev->dev, "flags = %u\n",
+ dev_info(&h->pdev->dev, "flags = 0x%x\n",
le16_to_cpu(map_buff->flags));
- if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
- dev_info(&h->pdev->dev, "encrypytion = ON\n");
- else
- dev_info(&h->pdev->dev, "encrypytion = OFF\n");
+ dev_info(&h->pdev->dev, "encrypytion = %s\n",
+ le16_to_cpu(map_buff->flags) &
+ RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
dev_info(&h->pdev->dev, "dekindex = %u\n",
le16_to_cpu(map_buff->dekindex));
-
map_cnt = le16_to_cpu(map_buff->layout_map_count);
for (map = 0; map < map_cnt; map++) {
dev_info(&h->pdev->dev, "Map%u:\n", map);
@@ -2238,26 +2327,26 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
struct CommandList *c;
struct ErrorInfo *ei;
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
if (c == NULL) {
- dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return -ENOMEM;
}
if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
sizeof(this_device->raid_map), 0,
scsi3addr, TYPE_CMD)) {
dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
- cmd_special_free(h, c);
+ cmd_free(h, c);
return -ENOMEM;
}
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
- cmd_special_free(h, c);
+ cmd_free(h, c);
return -1;
}
- cmd_special_free(h, c);
+ cmd_free(h, c);
/* @todo in the future, dynamically allocate RAID map memory */
if (le32_to_cpu(this_device->raid_map.structure_size) >
@@ -2269,6 +2358,34 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
return rc;
}
+static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
+ unsigned char scsi3addr[], u16 bmic_device_index,
+ struct bmic_identify_physical_device *buf, size_t bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
+ 0, RAID_CTLR_LUNID, TYPE_CMD);
+ if (rc)
+ goto out;
+
+ c->Request.CDB[2] = bmic_device_index & 0xff;
+ c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
+
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_free(h, c);
+ return rc;
+}
+
static int hpsa_vpd_page_supported(struct ctlr_info *h,
unsigned char scsi3addr[], u8 page)
{
@@ -2369,7 +2486,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
}
static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
- struct ReportLUNdata *buf, int bufsize,
+ void *buf, int bufsize,
int extended_response)
{
int rc = IO_OK;
@@ -2377,9 +2494,9 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
unsigned char scsi3addr[8];
struct ErrorInfo *ei;
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
if (c == NULL) { /* trouble... */
- dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return -1;
}
/* address the controller */
@@ -2398,24 +2515,26 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
hpsa_scsi_interpret_error(h, c);
rc = -1;
} else {
- if (buf->extended_response_flag != extended_response) {
+ struct ReportLUNdata *rld = buf;
+
+ if (rld->extended_response_flag != extended_response) {
dev_err(&h->pdev->dev,
"report luns requested format %u, got %u\n",
extended_response,
- buf->extended_response_flag);
+ rld->extended_response_flag);
rc = -1;
}
}
out:
- cmd_special_free(h, c);
+ cmd_free(h, c);
return rc;
}
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
- struct ReportLUNdata *buf,
- int bufsize, int extended_response)
+ struct ReportExtendedLUNdata *buf, int bufsize)
{
- return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
+ return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+ HPSA_REPORT_PHYS_EXTENDED);
}
static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -2590,6 +2709,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
this_device->offload_config = 0;
this_device->offload_enabled = 0;
this_device->volume_offline = 0;
+ this_device->queue_depth = h->nr_cmds;
}
if (is_OBDR_device) {
@@ -2732,7 +2852,6 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
{
struct ReportExtendedLUNdata *physicals = NULL;
int responsesize = 24; /* size of physical extended response */
- int extended = 2; /* flag forces reporting 'other dev info'. */
int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
u32 nphysicals = 0; /* number of reported physical devs */
int found = 0; /* found match (1) or not (0) */
@@ -2741,8 +2860,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
struct scsi_cmnd *scmd; /* scsi command within request being aborted */
struct hpsa_scsi_dev_t *d; /* device of request being aborted */
struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
- u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
- u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
+ __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
+ __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
return 0; /* no match */
@@ -2761,8 +2880,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
return 0; /* no match */
it_nexus = cpu_to_le32(d->ioaccel_handle);
- scsi_nexus = cpu_to_le32(c2a->scsi_nexus);
- find = c2a->scsi_nexus;
+ scsi_nexus = c2a->scsi_nexus;
+ find = le32_to_cpu(c2a->scsi_nexus);
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
@@ -2779,8 +2898,7 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
physicals = kzalloc(reportsize, GFP_KERNEL);
if (physicals == NULL)
return 0;
- if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
- reportsize, extended)) {
+ if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
dev_err(&h->pdev->dev,
"Can't lookup %s device handle: report physical LUNs failed.\n",
"HP SSD Smart Path");
@@ -2821,34 +2939,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
* Returns 0 on success, -1 otherwise.
*/
static int hpsa_gather_lun_info(struct ctlr_info *h,
- int reportphyslunsize, int reportloglunsize,
- struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
+ struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
struct ReportLUNdata *logdev, u32 *nlogicals)
{
- int physical_entry_size = 8;
-
- *physical_mode = 0;
-
- /* For I/O accelerator mode we need to read physical device handles */
- if (h->transMethod & CFGTBL_Trans_io_accel1 ||
- h->transMethod & CFGTBL_Trans_io_accel2) {
- *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
- physical_entry_size = 24;
- }
- if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize,
- *physical_mode)) {
+ if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
return -1;
}
- *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
- physical_entry_size;
+ *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
if (*nphysicals > HPSA_MAX_PHYS_LUN) {
- dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
- " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
- *nphysicals - HPSA_MAX_PHYS_LUN);
+ dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
+ HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
*nphysicals = HPSA_MAX_PHYS_LUN;
}
- if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) {
+ if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
return -1;
}
@@ -2921,6 +3025,33 @@ static int hpsa_hba_mode_enabled(struct ctlr_info *h)
return hba_mode_enabled;
}
+/* get physical drive ioaccel handle and queue depth */
+static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev,
+ u8 *lunaddrbytes,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
+ struct ext_report_lun_entry *rle =
+ (struct ext_report_lun_entry *) lunaddrbytes;
+
+ dev->ioaccel_handle = rle->ioaccel_handle;
+ memset(id_phys, 0, sizeof(*id_phys));
+ rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
+ GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
+ sizeof(*id_phys));
+ if (!rc)
+ /* Reserve space for FW operations */
+#define DRIVE_CMDS_RESERVED_FOR_FW 2
+#define DRIVE_QUEUE_DEPTH 7
+ dev->queue_depth =
+ le16_to_cpu(id_phys->current_queue_depth_limit) -
+ DRIVE_CMDS_RESERVED_FOR_FW;
+ else
+ dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
+ atomic_set(&dev->ioaccel_cmds_out, 0);
+}
+
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
{
/* the idea here is we could get notified
@@ -2935,9 +3066,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
*/
struct ReportExtendedLUNdata *physdev_list = NULL;
struct ReportLUNdata *logdev_list = NULL;
+ struct bmic_identify_physical_device *id_phys = NULL;
u32 nphysicals = 0;
u32 nlogicals = 0;
- int physical_mode = 0;
u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
@@ -2950,8 +3081,10 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
+ id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
- if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
+ if (!currentsd || !physdev_list || !logdev_list ||
+ !tmpdevice || !id_phys) {
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
@@ -2968,10 +3101,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
h->hba_mode_enabled = rescan_hba_mode;
- if (hpsa_gather_lun_info(h,
- sizeof(*physdev_list), sizeof(*logdev_list),
- (struct ReportLUNdata *) physdev_list, &nphysicals,
- &physical_mode, logdev_list, &nlogicals))
+ if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
+ logdev_list, &nlogicals))
goto out;
/* We might see up to the maximum number of logical and physical disks
@@ -3068,10 +3199,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
ncurrent++;
break;
}
- if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
- memcpy(&this_device->ioaccel_handle,
- &lunaddrbytes[20],
- sizeof(this_device->ioaccel_handle));
+ if (h->transMethod & CFGTBL_Trans_io_accel1 ||
+ h->transMethod & CFGTBL_Trans_io_accel2) {
+ hpsa_get_ioaccel_drive_info(h, this_device,
+ lunaddrbytes, id_phys);
+ atomic_set(&this_device->ioaccel_cmds_out, 0);
ncurrent++;
}
break;
@@ -3095,6 +3227,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
if (ncurrent >= HPSA_MAX_DEVICES)
break;
}
+ hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
out:
kfree(tmpdevice);
@@ -3103,9 +3236,22 @@ out:
kfree(currentsd);
kfree(physdev_list);
kfree(logdev_list);
+ kfree(id_phys);
}
-/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
+static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
+ struct scatterlist *sg)
+{
+ u64 addr64 = (u64) sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
+
+ desc->Addr = cpu_to_le64(addr64);
+ desc->Len = cpu_to_le32(len);
+ desc->Ext = 0;
+}
+
+/*
+ * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
* dma mapping and fills in the scatter gather entries of the
* hpsa command, cp.
*/
@@ -3113,9 +3259,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
struct CommandList *cp,
struct scsi_cmnd *cmd)
{
- unsigned int len;
struct scatterlist *sg;
- u64 addr64;
int use_sg, i, sg_index, chained;
struct SGDescriptor *curr_sg;
@@ -3138,13 +3282,11 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
curr_sg = h->cmd_sg_list[cp->cmdindex];
sg_index = 0;
}
- addr64 = (u64) sg_dma_address(sg);
- len = sg_dma_len(sg);
- curr_sg->Addr = cpu_to_le64(addr64);
- curr_sg->Len = cpu_to_le32(len);
- curr_sg->Ext = cpu_to_le32(0);
+ hpsa_set_sg_descriptor(curr_sg, sg);
curr_sg++;
}
+
+ /* Back the pointer up to the last entry and mark it as "last". */
(--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
if (use_sg + chained > h->maxSG)
@@ -3163,7 +3305,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
sglist_finished:
cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */
+ cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
return 0;
}
@@ -3217,7 +3359,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
- u8 *scsi3addr)
+ u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
{
struct scsi_cmnd *cmd = c->scsi_cmd;
struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
@@ -3230,13 +3372,17 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
/* TODO: implement chaining support */
- if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
return IO_ACCEL_INELIGIBLE;
+ }
BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
- if (fixup_ioaccel_cdb(cdb, &cdb_len))
+ if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
return IO_ACCEL_INELIGIBLE;
+ }
c->cmd_type = CMD_IOACCEL1;
@@ -3246,8 +3392,10 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
BUG_ON(c->busaddr & 0x0000007F);
use_sg = scsi_dma_map(cmd);
- if (use_sg < 0)
+ if (use_sg < 0) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
return use_sg;
+ }
if (use_sg) {
curr_sg = cp->SG;
@@ -3284,11 +3432,11 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
c->Header.SGList = use_sg;
/* Fill out the command structure to submit */
- cp->dev_handle = ioaccel_handle & 0xFFFF;
- cp->transfer_len = total_len;
- cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
- (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
- cp->control = control;
+ cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
+ cp->transfer_len = cpu_to_le32(total_len);
+ cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
+ (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
+ cp->control = cpu_to_le32(control);
memcpy(cp->CDB, cdb, cdb_len);
memcpy(cp->CISS_LUN, scsi3addr, 8);
/* Tag was already set at init time. */
@@ -3306,8 +3454,10 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
struct scsi_cmnd *cmd = c->scsi_cmd;
struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ c->phys_disk = dev;
+
return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
- cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
+ cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
}
/*
@@ -3321,10 +3471,8 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
struct raid_map_data *map = &dev->raid_map;
u64 first_block;
- BUG_ON(!(dev->offload_config && dev->offload_enabled));
-
/* Are we doing encryption on this device */
- if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
+ if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
return;
/* Set the data encryption key index. */
cp->dekindex = map->dekindex;
@@ -3340,101 +3488,38 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
case WRITE_6:
case READ_6:
- if (map->volume_blk_size == 512) {
- cp->tweak_lower =
- (((u32) cmd->cmnd[2]) << 8) |
- cmd->cmnd[3];
- cp->tweak_upper = 0;
- } else {
- first_block =
- (((u64) cmd->cmnd[2]) << 8) |
- cmd->cmnd[3];
- first_block = (first_block * map->volume_blk_size)/512;
- cp->tweak_lower = (u32)first_block;
- cp->tweak_upper = (u32)(first_block >> 32);
- }
+ first_block = get_unaligned_be16(&cmd->cmnd[2]);
break;
case WRITE_10:
case READ_10:
- if (map->volume_blk_size == 512) {
- cp->tweak_lower =
- (((u32) cmd->cmnd[2]) << 24) |
- (((u32) cmd->cmnd[3]) << 16) |
- (((u32) cmd->cmnd[4]) << 8) |
- cmd->cmnd[5];
- cp->tweak_upper = 0;
- } else {
- first_block =
- (((u64) cmd->cmnd[2]) << 24) |
- (((u64) cmd->cmnd[3]) << 16) |
- (((u64) cmd->cmnd[4]) << 8) |
- cmd->cmnd[5];
- first_block = (first_block * map->volume_blk_size)/512;
- cp->tweak_lower = (u32)first_block;
- cp->tweak_upper = (u32)(first_block >> 32);
- }
- break;
/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
case WRITE_12:
case READ_12:
- if (map->volume_blk_size == 512) {
- cp->tweak_lower =
- (((u32) cmd->cmnd[2]) << 24) |
- (((u32) cmd->cmnd[3]) << 16) |
- (((u32) cmd->cmnd[4]) << 8) |
- cmd->cmnd[5];
- cp->tweak_upper = 0;
- } else {
- first_block =
- (((u64) cmd->cmnd[2]) << 24) |
- (((u64) cmd->cmnd[3]) << 16) |
- (((u64) cmd->cmnd[4]) << 8) |
- cmd->cmnd[5];
- first_block = (first_block * map->volume_blk_size)/512;
- cp->tweak_lower = (u32)first_block;
- cp->tweak_upper = (u32)(first_block >> 32);
- }
+ first_block = get_unaligned_be32(&cmd->cmnd[2]);
break;
case WRITE_16:
case READ_16:
- if (map->volume_blk_size == 512) {
- cp->tweak_lower =
- (((u32) cmd->cmnd[6]) << 24) |
- (((u32) cmd->cmnd[7]) << 16) |
- (((u32) cmd->cmnd[8]) << 8) |
- cmd->cmnd[9];
- cp->tweak_upper =
- (((u32) cmd->cmnd[2]) << 24) |
- (((u32) cmd->cmnd[3]) << 16) |
- (((u32) cmd->cmnd[4]) << 8) |
- cmd->cmnd[5];
- } else {
- first_block =
- (((u64) cmd->cmnd[2]) << 56) |
- (((u64) cmd->cmnd[3]) << 48) |
- (((u64) cmd->cmnd[4]) << 40) |
- (((u64) cmd->cmnd[5]) << 32) |
- (((u64) cmd->cmnd[6]) << 24) |
- (((u64) cmd->cmnd[7]) << 16) |
- (((u64) cmd->cmnd[8]) << 8) |
- cmd->cmnd[9];
- first_block = (first_block * map->volume_blk_size)/512;
- cp->tweak_lower = (u32)first_block;
- cp->tweak_upper = (u32)(first_block >> 32);
- }
+ first_block = get_unaligned_be64(&cmd->cmnd[2]);
break;
default:
dev_err(&h->pdev->dev,
- "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
- __func__);
+ "ERROR: %s: size (0x%x) not supported for encryption\n",
+ __func__, cmd->cmnd[0]);
BUG();
break;
}
+
+ if (le32_to_cpu(map->volume_blk_size) != 512)
+ first_block = first_block *
+ le32_to_cpu(map->volume_blk_size)/512;
+
+ cp->tweak_lower = cpu_to_le32(first_block);
+ cp->tweak_upper = cpu_to_le32(first_block >> 32);
}
static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
- u8 *scsi3addr)
+ u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
{
struct scsi_cmnd *cmd = c->scsi_cmd;
struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
@@ -3445,11 +3530,16 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
u32 len;
u32 total_len = 0;
- if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
return IO_ACCEL_INELIGIBLE;
+ }
- if (fixup_ioaccel_cdb(cdb, &cdb_len))
+ if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
return IO_ACCEL_INELIGIBLE;
+ }
+
c->cmd_type = CMD_IOACCEL2;
/* Adjust the DMA address to point to the accelerated command buffer */
c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
@@ -3460,8 +3550,10 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
cp->IU_type = IOACCEL2_IU_TYPE;
use_sg = scsi_dma_map(cmd);
- if (use_sg < 0)
+ if (use_sg < 0) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
return use_sg;
+ }
if (use_sg) {
BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
@@ -3506,9 +3598,8 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
/* Set encryption parameters, if necessary */
set_encrypt_ioaccel2(h, c, cp);
- cp->scsi_nexus = ioaccel_handle;
- cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
- DIRECT_LOOKUP_BIT;
+ cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
+ cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
memcpy(cp->cdb, cdb, sizeof(cp->cdb));
/* fill in sg elements */
@@ -3528,14 +3619,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
*/
static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
- u8 *scsi3addr)
+ u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
{
+ /* Try to honor the device's queue depth */
+ if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
+ phys_disk->queue_depth) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
if (h->transMethod & CFGTBL_Trans_io_accel1)
return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
- cdb, cdb_len, scsi3addr);
+ cdb, cdb_len, scsi3addr,
+ phys_disk);
else
return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
- cdb, cdb_len, scsi3addr);
+ cdb, cdb_len, scsi3addr,
+ phys_disk);
}
static void raid_map_helper(struct raid_map_data *map,
@@ -3543,21 +3642,22 @@ static void raid_map_helper(struct raid_map_data *map,
{
if (offload_to_mirror == 0) {
/* use physical disk in the first mirrored group. */
- *map_index %= map->data_disks_per_row;
+ *map_index %= le16_to_cpu(map->data_disks_per_row);
return;
}
do {
/* determine mirror group that *map_index indicates */
- *current_group = *map_index / map->data_disks_per_row;
+ *current_group = *map_index /
+ le16_to_cpu(map->data_disks_per_row);
if (offload_to_mirror == *current_group)
continue;
- if (*current_group < (map->layout_map_count - 1)) {
+ if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
/* select map index from next group */
- *map_index += map->data_disks_per_row;
+ *map_index += le16_to_cpu(map->data_disks_per_row);
(*current_group)++;
} else {
/* select map index from first group */
- *map_index %= map->data_disks_per_row;
+ *map_index %= le16_to_cpu(map->data_disks_per_row);
*current_group = 0;
}
} while (offload_to_mirror != *current_group);
@@ -3595,13 +3695,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
u32 disk_block_cnt;
u8 cdb[16];
u8 cdb_len;
+ u16 strip_size;
#if BITS_PER_LONG == 32
u64 tmpdiv;
#endif
int offload_to_mirror;
- BUG_ON(!(dev->offload_config && dev->offload_enabled));
-
/* check for valid opcode, get LBA and block count */
switch (cmd->cmnd[0]) {
case WRITE_6:
@@ -3668,11 +3767,14 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
return IO_ACCEL_INELIGIBLE;
/* check for invalid block or wraparound */
- if (last_block >= map->volume_blk_cnt || last_block < first_block)
+ if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
+ last_block < first_block)
return IO_ACCEL_INELIGIBLE;
/* calculate stripe information for the request */
- blocks_per_row = map->data_disks_per_row * map->strip_size;
+ blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
+ le16_to_cpu(map->strip_size);
+ strip_size = le16_to_cpu(map->strip_size);
#if BITS_PER_LONG == 32
tmpdiv = first_block;
(void) do_div(tmpdiv, blocks_per_row);
@@ -3683,18 +3785,18 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
tmpdiv = first_row_offset;
- (void) do_div(tmpdiv, map->strip_size);
+ (void) do_div(tmpdiv, strip_size);
first_column = tmpdiv;
tmpdiv = last_row_offset;
- (void) do_div(tmpdiv, map->strip_size);
+ (void) do_div(tmpdiv, strip_size);
last_column = tmpdiv;
#else
first_row = first_block / blocks_per_row;
last_row = last_block / blocks_per_row;
first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
- first_column = first_row_offset / map->strip_size;
- last_column = last_row_offset / map->strip_size;
+ first_column = first_row_offset / strip_size;
+ last_column = last_row_offset / strip_size;
#endif
/* if this isn't a single row/column then give to the controller */
@@ -3702,10 +3804,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
return IO_ACCEL_INELIGIBLE;
/* proceeding with driver mapping */
- total_disks_per_row = map->data_disks_per_row +
- map->metadata_disks_per_row;
+ total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
+ le16_to_cpu(map->metadata_disks_per_row);
map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
- map->row_cnt;
+ le16_to_cpu(map->row_cnt);
map_index = (map_row * total_disks_per_row) + first_column;
switch (dev->raid_level) {
@@ -3716,23 +3818,24 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
* (2-drive R1 and R10 with even # of drives.)
* Appropriate for SSDs, not optimal for HDDs
*/
- BUG_ON(map->layout_map_count != 2);
+ BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
if (dev->offload_to_mirror)
- map_index += map->data_disks_per_row;
+ map_index += le16_to_cpu(map->data_disks_per_row);
dev->offload_to_mirror = !dev->offload_to_mirror;
break;
case HPSA_RAID_ADM:
/* Handles N-way mirrors (R1-ADM)
* and R10 with # of drives divisible by 3.)
*/
- BUG_ON(map->layout_map_count != 3);
+ BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
offload_to_mirror = dev->offload_to_mirror;
raid_map_helper(map, offload_to_mirror,
&map_index, &current_group);
/* set mirror group to use next time */
offload_to_mirror =
- (offload_to_mirror >= map->layout_map_count - 1)
+ (offload_to_mirror >=
+ le16_to_cpu(map->layout_map_count) - 1)
? 0 : offload_to_mirror + 1;
dev->offload_to_mirror = offload_to_mirror;
/* Avoid direct use of dev->offload_to_mirror within this
@@ -3742,14 +3845,16 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case HPSA_RAID_5:
case HPSA_RAID_6:
- if (map->layout_map_count <= 1)
+ if (le16_to_cpu(map->layout_map_count) <= 1)
break;
/* Verify first and last block are in same RAID group */
r5or6_blocks_per_row =
- map->strip_size * map->data_disks_per_row;
+ le16_to_cpu(map->strip_size) *
+ le16_to_cpu(map->data_disks_per_row);
BUG_ON(r5or6_blocks_per_row == 0);
- stripesize = r5or6_blocks_per_row * map->layout_map_count;
+ stripesize = r5or6_blocks_per_row *
+ le16_to_cpu(map->layout_map_count);
#if BITS_PER_LONG == 32
tmpdiv = first_block;
first_group = do_div(tmpdiv, stripesize);
@@ -3812,28 +3917,35 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
r5or6_blocks_per_row);
first_column = r5or6_first_column =
- r5or6_first_row_offset / map->strip_size;
+ r5or6_first_row_offset / le16_to_cpu(map->strip_size);
r5or6_last_column =
- r5or6_last_row_offset / map->strip_size;
+ r5or6_last_row_offset / le16_to_cpu(map->strip_size);
#endif
if (r5or6_first_column != r5or6_last_column)
return IO_ACCEL_INELIGIBLE;
/* Request is eligible */
map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
- map->row_cnt;
+ le16_to_cpu(map->row_cnt);
map_index = (first_group *
- (map->row_cnt * total_disks_per_row)) +
+ (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
(map_row * total_disks_per_row) + first_column;
break;
default:
return IO_ACCEL_INELIGIBLE;
}
+ if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
+ return IO_ACCEL_INELIGIBLE;
+
+ c->phys_disk = dev->phys_disk[map_index];
+
disk_handle = dd[map_index].ioaccel_handle;
- disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
- (first_row_offset - (first_column * map->strip_size));
+ disk_block = le64_to_cpu(map->disk_starting_blk) +
+ first_row * le16_to_cpu(map->strip_size) +
+ (first_row_offset - first_column *
+ le16_to_cpu(map->strip_size));
disk_block_cnt = block_cnt;
/* handle differing logical/physical block sizes */
@@ -3876,78 +3988,21 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
cdb_len = 10;
}
return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
- dev->scsi3addr);
+ dev->scsi3addr,
+ dev->phys_disk[map_index]);
}
-/*
- * Running in struct Scsi_Host->host_lock less mode using LLD internal
- * struct ctlr_info *h->lock w/ spin_lock_irqsave() protection.
- */
-static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+/* Submit commands down the "normal" RAID stack path */
+static int hpsa_ciss_submit(struct ctlr_info *h,
+ struct CommandList *c, struct scsi_cmnd *cmd,
+ unsigned char scsi3addr[])
{
- struct ctlr_info *h;
- struct hpsa_scsi_dev_t *dev;
- unsigned char scsi3addr[8];
- struct CommandList *c;
- int rc = 0;
-
- /* Get the ptr to our adapter structure out of cmd->host. */
- h = sdev_to_hba(cmd->device);
- dev = cmd->device->hostdata;
- if (!dev) {
- cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
- return 0;
- }
- memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
-
- if (unlikely(lockup_detected(h))) {
- cmd->result = DID_ERROR << 16;
- cmd->scsi_done(cmd);
- return 0;
- }
- c = cmd_alloc(h);
- if (c == NULL) { /* trouble... */
- dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- return SCSI_MLQUEUE_HOST_BUSY;
- }
-
- /* Fill in the command list header */
- /* save c in case we have to abort it */
cmd->host_scribble = (unsigned char *) c;
-
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
-
- /* Call alternate submit routine for I/O accelerated commands.
- * Retries always go down the normal I/O path.
- */
- if (likely(cmd->retries == 0 &&
- cmd->request->cmd_type == REQ_TYPE_FS &&
- h->acciopath_status)) {
- if (dev->offload_enabled) {
- rc = hpsa_scsi_ioaccel_raid_map(h, c);
- if (rc == 0)
- return 0; /* Sent on ioaccel path */
- if (rc < 0) { /* scsi_dma_map failed. */
- cmd_free(h, c);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- } else if (dev->ioaccel_handle) {
- rc = hpsa_scsi_ioaccel_direct_map(h, c);
- if (rc == 0)
- return 0; /* Sent on direct map path */
- if (rc < 0) { /* scsi_dma_map failed. */
- cmd_free(h, c);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- }
- }
-
c->Header.ReplyQueue = 0; /* unused in simple mode */
memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
- c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) |
- DIRECT_LOOKUP_BIT);
+ c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
/* Fill in the request block... */
@@ -4003,25 +4058,108 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return 0;
}
-static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
+static void hpsa_command_resubmit_worker(struct work_struct *work)
{
- unsigned long flags;
+ struct scsi_cmnd *cmd;
+ struct hpsa_scsi_dev_t *dev;
+ struct CommandList *c =
+ container_of(work, struct CommandList, work);
+
+ cmd = c->scsi_cmd;
+ dev = cmd->device->hostdata;
+ if (!dev) {
+ cmd->result = DID_NO_CONNECT << 16;
+ cmd->scsi_done(cmd);
+ return;
+ }
+ if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
+ /*
+ * If we get here, it means dma mapping failed. Try
+ * again via scsi mid layer, which will then get
+ * SCSI_MLQUEUE_HOST_BUSY.
+ */
+ cmd->result = DID_IMM_RETRY << 16;
+ cmd->scsi_done(cmd);
+ }
+}
+
+/* Running in struct Scsi_Host->host_lock less mode */
+static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+{
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+ unsigned char scsi3addr[8];
+ struct CommandList *c;
+ int rc = 0;
+
+ /* Get the ptr to our adapter structure out of cmd->host. */
+ h = sdev_to_hba(cmd->device);
+ dev = cmd->device->hostdata;
+ if (!dev) {
+ cmd->result = DID_NO_CONNECT << 16;
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
+
+ if (unlikely(lockup_detected(h))) {
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ c = cmd_alloc(h);
+ if (c == NULL) { /* trouble... */
+ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if (unlikely(lockup_detected(h))) {
+ cmd->result = DID_ERROR << 16;
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
/*
- * Don't let rescans be initiated on a controller known
- * to be locked up. If the controller locks up *during*
- * a rescan, that thread is probably hosed, but at least
- * we can prevent new rescan threads from piling up on a
- * locked up controller.
+ * Call alternate submit routine for I/O accelerated commands.
+ * Retries always go down the normal I/O path.
*/
- if (unlikely(lockup_detected(h))) {
- spin_lock_irqsave(&h->scan_lock, flags);
- h->scan_finished = 1;
- wake_up_all(&h->scan_wait_queue);
- spin_unlock_irqrestore(&h->scan_lock, flags);
- return 1;
+ if (likely(cmd->retries == 0 &&
+ cmd->request->cmd_type == REQ_TYPE_FS &&
+ h->acciopath_status)) {
+
+ cmd->host_scribble = (unsigned char *) c;
+ c->cmd_type = CMD_SCSI;
+ c->scsi_cmd = cmd;
+
+ if (dev->offload_enabled) {
+ rc = hpsa_scsi_ioaccel_raid_map(h, c);
+ if (rc == 0)
+ return 0; /* Sent on ioaccel path */
+ if (rc < 0) { /* scsi_dma_map failed. */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ } else if (dev->ioaccel_handle) {
+ rc = hpsa_scsi_ioaccel_direct_map(h, c);
+ if (rc == 0)
+ return 0; /* Sent on direct map path */
+ if (rc < 0) { /* scsi_dma_map failed. */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ }
}
- return 0;
+ return hpsa_ciss_submit(h, c, cmd, scsi3addr);
+}
+
+static void hpsa_scan_complete(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->scan_lock, flags);
+ h->scan_finished = 1;
+ wake_up_all(&h->scan_wait_queue);
+ spin_unlock_irqrestore(&h->scan_lock, flags);
}
static void hpsa_scan_start(struct Scsi_Host *sh)
@@ -4029,8 +4167,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
struct ctlr_info *h = shost_to_hba(sh);
unsigned long flags;
- if (do_not_scan_if_controller_locked_up(h))
- return;
+ /*
+ * Don't let rescans be initiated on a controller known to be locked
+ * up. If the controller locks up *during* a rescan, that thread is
+ * probably hosed, but at least we can prevent new rescan threads from
+ * piling up on a locked up controller.
+ */
+ if (unlikely(lockup_detected(h)))
+ return hpsa_scan_complete(h);
/* wait until any scan already in progress is finished. */
while (1) {
@@ -4048,15 +4192,27 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
h->scan_finished = 0; /* mark scan as in progress */
spin_unlock_irqrestore(&h->scan_lock, flags);
- if (do_not_scan_if_controller_locked_up(h))
- return;
+ if (unlikely(lockup_detected(h)))
+ return hpsa_scan_complete(h);
hpsa_update_scsi_devices(h, h->scsi_host->host_no);
- spin_lock_irqsave(&h->scan_lock, flags);
- h->scan_finished = 1; /* mark scan as finished. */
- wake_up_all(&h->scan_wait_queue);
- spin_unlock_irqrestore(&h->scan_lock, flags);
+ hpsa_scan_complete(h);
+}
+
+static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
+
+ if (!logical_drive)
+ return -ENODEV;
+
+ if (qdepth < 1)
+ qdepth = 1;
+ else if (qdepth > logical_drive->queue_depth)
+ qdepth = logical_drive->queue_depth;
+
+ return scsi_change_queue_depth(sdev, qdepth);
}
static int hpsa_scan_finished(struct Scsi_Host *sh,
@@ -4096,11 +4252,11 @@ static int hpsa_register_scsi(struct ctlr_info *h)
sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
- sh->can_queue = h->nr_cmds;
- if (h->hba_mode_enabled)
- sh->cmd_per_lun = 7;
- else
- sh->cmd_per_lun = h->nr_cmds;
+ sh->can_queue = h->nr_cmds -
+ HPSA_CMDS_RESERVED_FOR_ABORTS -
+ HPSA_CMDS_RESERVED_FOR_DRIVER -
+ HPSA_MAX_CONCURRENT_PASSTHRUS;
+ sh->cmd_per_lun = sh->can_queue;
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
@@ -4131,7 +4287,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
int waittime = 1; /* seconds */
struct CommandList *c;
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
if (!c) {
dev_warn(&h->pdev->dev, "out of memory in "
"wait_for_device_to_become_ready.\n");
@@ -4177,7 +4333,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
else
dev_warn(&h->pdev->dev, "device is ready.\n");
- cmd_special_free(h, c);
+ cmd_free(h, c);
return rc;
}
@@ -4194,6 +4350,10 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
h = sdev_to_hba(scsicmd->device);
if (h == NULL) /* paranoia */
return FAILED;
+
+ if (lockup_detected(h))
+ return FAILED;
+
dev = scsicmd->device->hostdata;
if (!dev) {
dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
@@ -4227,13 +4387,15 @@ static void swizzle_abort_tag(u8 *tag)
}
static void hpsa_get_tag(struct ctlr_info *h,
- struct CommandList *c, u32 *taglower, u32 *tagupper)
+ struct CommandList *c, __le32 *taglower, __le32 *tagupper)
{
+ u64 tag;
if (c->cmd_type == CMD_IOACCEL1) {
struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
&h->ioaccel_cmd_pool[c->cmdindex];
- *tagupper = (u32) (cm1->tag >> 32);
- *taglower = (u32) (cm1->tag & 0x0ffffffffULL);
+ tag = le64_to_cpu(cm1->tag);
+ *tagupper = cpu_to_le32(tag >> 32);
+ *taglower = cpu_to_le32(tag);
return;
}
if (c->cmd_type == CMD_IOACCEL2) {
@@ -4244,8 +4406,9 @@ static void hpsa_get_tag(struct ctlr_info *h,
*taglower = cm2->Tag;
return;
}
- *tagupper = (u32) (c->Header.tag >> 32);
- *taglower = (u32) (c->Header.tag & 0x0ffffffffULL);
+ tag = le64_to_cpu(c->Header.tag);
+ *tagupper = cpu_to_le32(tag >> 32);
+ *taglower = cpu_to_le32(tag);
}
static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
@@ -4254,11 +4417,11 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
int rc = IO_OK;
struct CommandList *c;
struct ErrorInfo *ei;
- u32 tagupper, taglower;
+ __le32 tagupper, taglower;
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return -ENOMEM;
}
@@ -4287,62 +4450,12 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
rc = -1;
break;
}
- cmd_special_free(h, c);
+ cmd_free(h, c);
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
__func__, tagupper, taglower);
return rc;
}
-/*
- * hpsa_find_cmd_in_queue
- *
- * Used to determine whether a command (find) is still present
- * in queue_head. Optionally excludes the last element of queue_head.
- *
- * This is used to avoid unnecessary aborts. Commands in h->reqQ have
- * not yet been submitted, and so can be aborted by the driver without
- * sending an abort to the hardware.
- *
- * Returns pointer to command if found in queue, NULL otherwise.
- */
-static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
- struct scsi_cmnd *find, struct list_head *queue_head)
-{
- unsigned long flags;
- struct CommandList *c = NULL; /* ptr into cmpQ */
-
- if (!find)
- return NULL;
- spin_lock_irqsave(&h->lock, flags);
- list_for_each_entry(c, queue_head, list) {
- if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
- continue;
- if (c->scsi_cmd == find) {
- spin_unlock_irqrestore(&h->lock, flags);
- return c;
- }
- }
- spin_unlock_irqrestore(&h->lock, flags);
- return NULL;
-}
-
-static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
- u8 *tag, struct list_head *queue_head)
-{
- unsigned long flags;
- struct CommandList *c;
-
- spin_lock_irqsave(&h->lock, flags);
- list_for_each_entry(c, queue_head, list) {
- if (memcmp(&c->Header.tag, tag, 8) != 0)
- continue;
- spin_unlock_irqrestore(&h->lock, flags);
- return c;
- }
- spin_unlock_irqrestore(&h->lock, flags);
- return NULL;
-}
-
/* ioaccel2 path firmware cannot handle abort task requests.
* Change abort requests to physical target reset, and send to the
* address of the physical disk used for the ioaccel 2 command.
@@ -4360,7 +4473,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
unsigned char *psa = &phys_scsi3addr[0];
/* Get a pointer to the hpsa logical device. */
- scmd = (struct scsi_cmnd *) abort->scsi_cmd;
+ scmd = abort->scsi_cmd;
dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
if (dev == NULL) {
dev_warn(&h->pdev->dev,
@@ -4429,10 +4542,6 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
static int hpsa_send_abort_both_ways(struct ctlr_info *h,
unsigned char *scsi3addr, struct CommandList *abort)
{
- u8 swizzled_tag[8];
- struct CommandList *c;
- int rc = 0, rc2 = 0;
-
/* ioccelerator mode 2 commands should be aborted via the
* accelerated path, since RAID path is unaware of these commands,
* but underlying firmware can't handle abort TMF.
@@ -4441,27 +4550,8 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
if (abort->cmd_type == CMD_IOACCEL2)
return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
- /* we do not expect to find the swizzled tag in our queue, but
- * check anyway just to be sure the assumptions which make this
- * the case haven't become wrong.
- */
- memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
- swizzle_abort_tag(swizzled_tag);
- c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
- if (c != NULL) {
- dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
- return hpsa_send_abort(h, scsi3addr, abort, 0);
- }
- rc = hpsa_send_abort(h, scsi3addr, abort, 0);
-
- /* if the command is still in our queue, we can't conclude that it was
- * aborted (it might have just completed normally) but in any case
- * we don't need to try to abort it another way.
- */
- c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
- if (c)
- rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
- return rc && rc2;
+ return hpsa_send_abort(h, scsi3addr, abort, 0) &&
+ hpsa_send_abort(h, scsi3addr, abort, 1);
}
/* Send an abort for the specified command.
@@ -4475,11 +4565,11 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
struct CommandList *abort; /* pointer to command to be aborted */
- struct CommandList *found;
struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
char msg[256]; /* For debug messaging. */
int ml = 0;
- u32 tagupper, taglower;
+ __le32 tagupper, taglower;
+ int refcount;
/* Find the controller of the command to be aborted */
h = sdev_to_hba(sc->device);
@@ -4487,6 +4577,9 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
"ABORT REQUEST FAILED, Controller lookup failed.\n"))
return FAILED;
+ if (lockup_detected(h))
+ return FAILED;
+
/* Check that controller supports some kind of task abort */
if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
@@ -4508,41 +4601,23 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
/* Get SCSI command to be aborted */
abort = (struct CommandList *) sc->host_scribble;
if (abort == NULL) {
- dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
- msg);
- return FAILED;
+ /* This can happen if the command already completed. */
+ return SUCCESS;
+ }
+ refcount = atomic_inc_return(&abort->refcount);
+ if (refcount == 1) { /* Command is done already. */
+ cmd_free(h, abort);
+ return SUCCESS;
}
hpsa_get_tag(h, abort, &taglower, &tagupper);
ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
- as = (struct scsi_cmnd *) abort->scsi_cmd;
+ as = abort->scsi_cmd;
if (as != NULL)
ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
as->cmnd[0], as->serial_number);
dev_dbg(&h->pdev->dev, "%s\n", msg);
dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
-
- /* Search reqQ to See if command is queued but not submitted,
- * if so, complete the command with aborted status and remove
- * it from the reqQ.
- */
- found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
- if (found) {
- found->err_info->CommandStatus = CMD_ABORTED;
- finish_cmd(found);
- dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
- msg);
- return SUCCESS;
- }
-
- /* not in reqQ, if also not in cmpQ, must have already completed */
- found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
- if (!found) {
- dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
- msg);
- return SUCCESS;
- }
-
/*
* Command is in flight, or possibly already completed
* by the firmware (but not to the scsi mid layer) but we can't
@@ -4554,6 +4629,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
h->scsi_host->host_no,
dev->bus, dev->target, dev->lun);
+ cmd_free(h, abort);
return FAILED;
}
dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
@@ -4565,32 +4641,38 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
*/
#define ABORT_COMPLETE_WAIT_SECS 30
for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
- found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
- if (!found)
+ refcount = atomic_read(&abort->refcount);
+ if (refcount < 2) {
+ cmd_free(h, abort);
return SUCCESS;
- msleep(100);
+ } else {
+ msleep(100);
+ }
}
dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
msg, ABORT_COMPLETE_WAIT_SECS);
+ cmd_free(h, abort);
return FAILED;
}
-
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
* which ones are free or in use. Lock must be held when calling this.
* cmd_free() is the complement.
*/
+
static struct CommandList *cmd_alloc(struct ctlr_info *h)
{
struct CommandList *c;
int i;
union u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
- int loopcount;
+ int refcount;
+ unsigned long offset;
- /* There is some *extremely* small but non-zero chance that that
+ /*
+ * There is some *extremely* small but non-zero chance that that
* multiple threads could get in here, and one thread could
* be scanning through the list of bits looking for a free
* one, but the free ones are always behind him, and other
@@ -4601,24 +4683,30 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
* infrequently as to be indistinguishable from never.
*/
- loopcount = 0;
- do {
- i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
- if (i == h->nr_cmds)
- i = 0;
- loopcount++;
- } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 &&
- loopcount < 10);
-
- /* Thread got starved? We do not expect this to ever happen. */
- if (loopcount >= 10)
- return NULL;
-
- c = h->cmd_pool + i;
- memset(c, 0, sizeof(*c));
- cmd_dma_handle = h->cmd_pool_dhandle
- + i * sizeof(*c);
+ offset = h->last_allocation; /* benignly racy */
+ for (;;) {
+ i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
+ if (unlikely(i == h->nr_cmds)) {
+ offset = 0;
+ continue;
+ }
+ c = h->cmd_pool + i;
+ refcount = atomic_inc_return(&c->refcount);
+ if (unlikely(refcount > 1)) {
+ cmd_free(h, c); /* already in use */
+ offset = (i + 1) % h->nr_cmds;
+ continue;
+ }
+ set_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG));
+ break; /* it's ours now. */
+ }
+ h->last_allocation = i; /* benignly racy */
+
+ /* Zero out all of commandlist except the last field, refcount */
+ memset(c, 0, offsetof(struct CommandList, refcount));
+ c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
+ cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
c->err_info = h->errinfo_pool + i;
memset(c->err_info, 0, sizeof(*c->err_info));
err_dma_handle = h->errinfo_pool_dhandle
@@ -4626,45 +4714,10 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
c->cmdindex = i;
- INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
- c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
- c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
-
- c->h = h;
- return c;
-}
-
-/* For operations that can wait for kmalloc to possibly sleep,
- * this routine can be called. Lock need not be held to call
- * cmd_special_alloc. cmd_special_free() is the complement.
- */
-static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
-{
- struct CommandList *c;
- dma_addr_t cmd_dma_handle, err_dma_handle;
-
- c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
- if (c == NULL)
- return NULL;
-
- c->cmd_type = CMD_SCSI;
- c->cmdindex = -1;
-
- c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info),
- &err_dma_handle);
-
- if (c->err_info == NULL) {
- pci_free_consistent(h->pdev,
- sizeof(*c), c, cmd_dma_handle);
- return NULL;
- }
-
- INIT_LIST_HEAD(&c->list);
- c->busaddr = (u32) cmd_dma_handle;
- c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
- c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
+ c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
+ c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
c->h = h;
return c;
@@ -4672,20 +4725,13 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
static void cmd_free(struct ctlr_info *h, struct CommandList *c)
{
- int i;
-
- i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
-}
+ if (atomic_dec_and_test(&c->refcount)) {
+ int i;
-static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
-{
- pci_free_consistent(h->pdev, sizeof(*c->err_info),
- c->err_info,
- (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr));
- pci_free_consistent(h->pdev, sizeof(*c),
- c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
+ i = c - h->cmd_pool;
+ clear_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG));
+ }
}
#ifdef CONFIG_COMPAT
@@ -4866,7 +4912,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
memset(buff, 0, iocommand.buf_size);
}
}
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
if (c == NULL) {
rc = -ENOMEM;
goto out_kfree;
@@ -4883,8 +4929,6 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->Header.SGTotal = cpu_to_le16(0);
}
memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
- /* use the kernel address the cmd block for tag */
- c->Header.tag = c->busaddr;
/* Fill in Request block */
memcpy(&c->Request, &iocommand.Request,
@@ -4925,7 +4969,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
}
out:
- cmd_special_free(h, c);
+ cmd_free(h, c);
out_kfree:
kfree(buff);
return rc;
@@ -4940,7 +4984,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
u64 temp64;
BYTE sg_used = 0;
int status = 0;
- int i;
u32 left;
u32 sz;
BYTE __user *data_ptr;
@@ -5004,7 +5047,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
data_ptr += sz;
sg_used++;
}
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
if (c == NULL) {
status = -ENOMEM;
goto cleanup1;
@@ -5014,7 +5057,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->Header.SGList = (u8) sg_used;
c->Header.SGTotal = cpu_to_le16(sg_used);
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
- c->Header.tag = c->busaddr;
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
if (ioc->buf_size > 0) {
int i;
@@ -5047,6 +5089,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
goto cleanup0;
}
if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
+ int i;
+
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
@@ -5059,9 +5103,11 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
status = 0;
cleanup0:
- cmd_special_free(h, c);
+ cmd_free(h, c);
cleanup1:
if (buff) {
+ int i;
+
for (i = 0; i < sg_used; i++)
kfree(buff[i]);
kfree(buff);
@@ -5079,35 +5125,6 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
(void) check_for_unit_attention(h, c);
}
-static int increment_passthru_count(struct ctlr_info *h)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&h->passthru_count_lock, flags);
- if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
- spin_unlock_irqrestore(&h->passthru_count_lock, flags);
- return -1;
- }
- h->passthru_count++;
- spin_unlock_irqrestore(&h->passthru_count_lock, flags);
- return 0;
-}
-
-static void decrement_passthru_count(struct ctlr_info *h)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&h->passthru_count_lock, flags);
- if (h->passthru_count <= 0) {
- spin_unlock_irqrestore(&h->passthru_count_lock, flags);
- /* not expecting to get here. */
- dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
- return;
- }
- h->passthru_count--;
- spin_unlock_irqrestore(&h->passthru_count_lock, flags);
-}
-
/*
* ioctl
*/
@@ -5130,16 +5147,16 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
case CCISS_GETDRIVVER:
return hpsa_getdrivver_ioctl(h, argp);
case CCISS_PASSTHRU:
- if (increment_passthru_count(h))
+ if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
return -EAGAIN;
rc = hpsa_passthru_ioctl(h, argp);
- decrement_passthru_count(h);
+ atomic_inc(&h->passthru_cmds_avail);
return rc;
case CCISS_BIG_PASSTHRU:
- if (increment_passthru_count(h))
+ if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
return -EAGAIN;
rc = hpsa_big_passthru_ioctl(h, argp);
- decrement_passthru_count(h);
+ atomic_inc(&h->passthru_cmds_avail);
return rc;
default:
return -ENOTTY;
@@ -5173,7 +5190,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
{
int pci_dir = XFER_NONE;
struct CommandList *a; /* for commands to be aborted */
- u32 tupper, tlower;
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
@@ -5184,7 +5200,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Header.SGList = 0;
c->Header.SGTotal = cpu_to_le16(0);
}
- c->Header.tag = c->busaddr;
memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
if (cmd_type == TYPE_CMD) {
@@ -5256,6 +5271,16 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[7] = (size >> 16) & 0xFF;
c->Request.CDB[8] = (size >> 8) & 0xFF;
break;
+ case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+ c->Request.CDBLen = 10;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = BMIC_READ;
+ c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0XFF;
+ break;
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
@@ -5281,10 +5306,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
break;
case HPSA_ABORT_MSG:
a = buff; /* point to command to be aborted */
- dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx",
+ dev_dbg(&h->pdev->dev,
+ "Abort Tag:0x%016llx request Tag:0x%016llx",
a->Header.tag, c->Header.tag);
- tlower = (u32) (a->Header.tag >> 32);
- tupper = (u32) (a->Header.tag & 0x0ffffffffULL);
c->Request.CDBLen = 16;
c->Request.type_attr_dir =
TYPE_ATTR_DIR(cmd_type,
@@ -5295,14 +5319,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[2] = 0x00; /* reserved */
c->Request.CDB[3] = 0x00; /* reserved */
/* Tag to abort goes in CDB[4]-CDB[11] */
- c->Request.CDB[4] = tlower & 0xFF;
- c->Request.CDB[5] = (tlower >> 8) & 0xFF;
- c->Request.CDB[6] = (tlower >> 16) & 0xFF;
- c->Request.CDB[7] = (tlower >> 24) & 0xFF;
- c->Request.CDB[8] = tupper & 0xFF;
- c->Request.CDB[9] = (tupper >> 8) & 0xFF;
- c->Request.CDB[10] = (tupper >> 16) & 0xFF;
- c->Request.CDB[11] = (tupper >> 24) & 0xFF;
+ memcpy(&c->Request.CDB[4], &a->Header.tag,
+ sizeof(a->Header.tag));
c->Request.CDB[12] = 0x00; /* reserved */
c->Request.CDB[13] = 0x00; /* reserved */
c->Request.CDB[14] = 0x00; /* reserved */
@@ -5349,47 +5367,6 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
return page_remapped ? (page_remapped + page_offs) : NULL;
}
-/* Takes cmds off the submission queue and sends them to the hardware,
- * then puts them on the queue of cmds waiting for completion.
- * Assumes h->lock is held
- */
-static void start_io(struct ctlr_info *h, unsigned long *flags)
-{
- struct CommandList *c;
-
- while (!list_empty(&h->reqQ)) {
- c = list_entry(h->reqQ.next, struct CommandList, list);
- /* can't do anything if fifo is full */
- if ((h->access.fifo_full(h))) {
- h->fifo_recently_full = 1;
- dev_warn(&h->pdev->dev, "fifo full\n");
- break;
- }
- h->fifo_recently_full = 0;
-
- /* Get the first entry from the Request Q */
- removeQ(c);
- h->Qdepth--;
-
- /* Put job onto the completed Q */
- addQ(&h->cmpQ, c);
- atomic_inc(&h->commands_outstanding);
- spin_unlock_irqrestore(&h->lock, *flags);
- /* Tell the controller execute command */
- h->access.submit_command(h, c);
- spin_lock_irqsave(&h->lock, *flags);
- }
-}
-
-static void lock_and_start_io(struct ctlr_info *h)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- start_io(h, &flags);
- spin_unlock_irqrestore(&h->lock, flags);
-}
-
static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
{
return h->access.command_completed(h, q);
@@ -5418,53 +5395,12 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
static inline void finish_cmd(struct CommandList *c)
{
- unsigned long flags;
- int io_may_be_stalled = 0;
- struct ctlr_info *h = c->h;
- int count;
-
- spin_lock_irqsave(&h->lock, flags);
- removeQ(c);
-
- /*
- * Check for possibly stalled i/o.
- *
- * If a fifo_full condition is encountered, requests will back up
- * in h->reqQ. This queue is only emptied out by start_io which is
- * only called when a new i/o request comes in. If no i/o's are
- * forthcoming, the i/o's in h->reqQ can get stuck. So we call
- * start_io from here if we detect such a danger.
- *
- * Normally, we shouldn't hit this case, but pounding on the
- * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
- * commands_outstanding is low. We want to avoid calling
- * start_io from in here as much as possible, and esp. don't
- * want to get in a cycle where we call start_io every time
- * through here.
- */
- count = atomic_read(&h->commands_outstanding);
- spin_unlock_irqrestore(&h->lock, flags);
- if (unlikely(h->fifo_recently_full) && count < 5)
- io_may_be_stalled = 1;
-
dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
|| c->cmd_type == CMD_IOACCEL2))
complete_scsi_command(c);
else if (c->cmd_type == CMD_IOCTL_PEND)
complete(c->waiting);
- if (unlikely(io_may_be_stalled))
- lock_and_start_io(h);
-}
-
-static inline u32 hpsa_tag_contains_index(u32 tag)
-{
- return tag & DIRECT_LOOKUP_BIT;
-}
-
-static inline u32 hpsa_tag_to_index(u32 tag)
-{
- return tag >> DIRECT_LOOKUP_SHIFT;
}
@@ -5484,34 +5420,13 @@ static inline void process_indexed_cmd(struct ctlr_info *h,
u32 tag_index;
struct CommandList *c;
- tag_index = hpsa_tag_to_index(raw_tag);
+ tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
if (!bad_tag(h, tag_index, raw_tag)) {
c = h->cmd_pool + tag_index;
finish_cmd(c);
}
}
-/* process completion of a non-indexed command */
-static inline void process_nonindexed_cmd(struct ctlr_info *h,
- u32 raw_tag)
-{
- u32 tag;
- struct CommandList *c = NULL;
- unsigned long flags;
-
- tag = hpsa_tag_discard_error_bits(h, raw_tag);
- spin_lock_irqsave(&h->lock, flags);
- list_for_each_entry(c, &h->cmpQ, list) {
- if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
- spin_unlock_irqrestore(&h->lock, flags);
- finish_cmd(c);
- return;
- }
- }
- spin_unlock_irqrestore(&h->lock, flags);
- bad_tag(h, h->nr_cmds + 1, raw_tag);
-}
-
/* Some controllers, like p400, will give us one interrupt
* after a soft reset, even if we turned interrupts off.
* Only need to check for this in the hpsa_xxx_discard_completions
@@ -5589,10 +5504,7 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
while (interrupt_pending(h)) {
raw_tag = get_next_completion(h, q);
while (raw_tag != FIFO_EMPTY) {
- if (likely(hpsa_tag_contains_index(raw_tag)))
- process_indexed_cmd(h, raw_tag);
- else
- process_nonindexed_cmd(h, raw_tag);
+ process_indexed_cmd(h, raw_tag);
raw_tag = next_command(h, q);
}
}
@@ -5608,10 +5520,7 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
h->last_intr_timestamp = get_jiffies_64();
raw_tag = get_next_completion(h, q);
while (raw_tag != FIFO_EMPTY) {
- if (likely(hpsa_tag_contains_index(raw_tag)))
- process_indexed_cmd(h, raw_tag);
- else
- process_nonindexed_cmd(h, raw_tag);
+ process_indexed_cmd(h, raw_tag);
raw_tag = next_command(h, q);
}
return IRQ_HANDLED;
@@ -5633,7 +5542,8 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
static const size_t cmd_sz = sizeof(*cmd) +
sizeof(cmd->ErrorDescriptor);
dma_addr_t paddr64;
- uint32_t paddr32, tag;
+ __le32 paddr32;
+ u32 tag;
void __iomem *vaddr;
int i, err;
@@ -5648,7 +5558,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
iounmap(vaddr);
- return -ENOMEM;
+ return err;
}
cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
@@ -5661,12 +5571,12 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
* although there's no guarantee, we assume that the address is at
* least 4-byte aligned (most likely, it's page-aligned).
*/
- paddr32 = paddr64;
+ paddr32 = cpu_to_le32(paddr64);
cmd->CommandHeader.ReplyQueue = 0;
cmd->CommandHeader.SGList = 0;
cmd->CommandHeader.SGTotal = cpu_to_le16(0);
- cmd->CommandHeader.tag = paddr32;
+ cmd->CommandHeader.tag = cpu_to_le64(paddr64);
memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
cmd->Request.CDBLen = 16;
@@ -5677,14 +5587,14 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
cmd->Request.CDB[1] = type;
memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
cmd->ErrorDescriptor.Addr =
- cpu_to_le64((paddr32 + sizeof(*cmd)));
+ cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
- writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
+ writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
- if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
+ if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
break;
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
}
@@ -5718,8 +5628,6 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
static int hpsa_controller_hard_reset(struct pci_dev *pdev,
void __iomem *vaddr, u32 use_doorbell)
{
- u16 pmcsr;
- int pos;
if (use_doorbell) {
/* For everything after the P600, the PCI power state method
@@ -5745,26 +5653,21 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
* this causes a secondary PCI reset which will reset the
* controller." */
- pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pos == 0) {
- dev_err(&pdev->dev,
- "hpsa_reset_controller: "
- "PCI PM not supported\n");
- return -ENODEV;
- }
+ int rc = 0;
+
dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+
/* enter the D3hot power management state */
- pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D3hot;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+ rc = pci_set_power_state(pdev, PCI_D3hot);
+ if (rc)
+ return rc;
msleep(500);
/* enter the D0 power management state */
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D0;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+ rc = pci_set_power_state(pdev, PCI_D0);
+ if (rc)
+ return rc;
/*
* The P600 requires a small delay when changing states.
@@ -5858,8 +5761,12 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
*/
rc = hpsa_lookup_board_id(pdev, &board_id);
- if (rc < 0 || !ctlr_is_resettable(board_id)) {
- dev_warn(&pdev->dev, "Not resetting device.\n");
+ if (rc < 0) {
+ dev_warn(&pdev->dev, "Board ID not found\n");
+ return rc;
+ }
+ if (!ctlr_is_resettable(board_id)) {
+ dev_warn(&pdev->dev, "Controller not resettable\n");
return -ENODEV;
}
@@ -5892,7 +5799,7 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
}
rc = write_driver_ver_to_cfgtable(cfgtable);
if (rc)
- goto unmap_vaddr;
+ goto unmap_cfgtable;
/* If reset via doorbell register is supported, use that.
* There are two such methods. Favor the newest method.
@@ -5904,8 +5811,8 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
} else {
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
if (use_doorbell) {
- dev_warn(&pdev->dev, "Soft reset not supported. "
- "Firmware update is required.\n");
+ dev_warn(&pdev->dev,
+ "Soft reset not supported. Firmware update is required.\n");
rc = -ENOTSUPP; /* try soft reset */
goto unmap_cfgtable;
}
@@ -5925,8 +5832,7 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
if (rc) {
dev_warn(&pdev->dev,
- "failed waiting for board to become ready "
- "after hard reset\n");
+ "Failed waiting for board to become ready after hard reset\n");
goto unmap_cfgtable;
}
@@ -5977,7 +5883,7 @@ static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
readl(&(tb->HostWrite.CoalIntDelay)));
dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
readl(&(tb->HostWrite.CoalIntCount)));
- dev_info(dev, " Max outstanding commands = 0x%d\n",
+ dev_info(dev, " Max outstanding commands = %d\n",
readl(&(tb->CmdsOutMax)));
dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
for (i = 0; i < 16; i++)
@@ -6025,7 +5931,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
}
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
- * controllers that are capable. If not, we use IO-APIC mode.
+ * controllers that are capable. If not, we use legacy INTx mode.
*/
static void hpsa_interrupt_mode(struct ctlr_info *h)
@@ -6044,7 +5950,7 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
(h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
goto default_int_mode;
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
- dev_info(&h->pdev->dev, "MSIX\n");
+ dev_info(&h->pdev->dev, "MSI-X capable controller\n");
h->msix_vector = MAX_REPLY_QUEUES;
if (h->msix_vector > num_online_cpus())
h->msix_vector = num_online_cpus();
@@ -6065,7 +5971,7 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
}
single_msi_mode:
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
- dev_info(&h->pdev->dev, "MSI\n");
+ dev_info(&h->pdev->dev, "MSI capable controller\n");
if (!pci_enable_msi(h->pdev))
h->msi_vector = 1;
else
@@ -6172,8 +6078,10 @@ static int hpsa_find_cfgtables(struct ctlr_info *h)
return rc;
h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
- if (!h->cfgtable)
+ if (!h->cfgtable) {
+ dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
return -ENOMEM;
+ }
rc = write_driver_ver_to_cfgtable(h->cfgtable);
if (rc)
return rc;
@@ -6204,6 +6112,15 @@ static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
}
}
+/* If the controller reports that the total max sg entries is greater than 512,
+ * then we know that chained SG blocks work. (Original smart arrays did not
+ * support chained SG blocks and would return zero for max sg entries.)
+ */
+static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
+{
+ return h->maxsgentries > 512;
+}
+
/* Interrogate the hardware for some limits:
* max commands, max SG elements without chaining, and with chaining,
* SG chain block size, etc.
@@ -6211,21 +6128,23 @@ static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
static void hpsa_find_board_params(struct ctlr_info *h)
{
hpsa_get_max_perf_mode_cmds(h);
- h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
+ h->nr_cmds = h->max_commands;
h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
h->fw_support = readl(&(h->cfgtable->misc_fw_support));
- /*
- * Limit in-command s/g elements to 32 save dma'able memory.
- * Howvever spec says if 0, use 31
- */
- h->max_cmd_sg_entries = 31;
- if (h->maxsgentries > 512) {
+ if (hpsa_supports_chained_sg_blocks(h)) {
+ /* Limit in-command s/g elements to 32 save dma'able memory. */
h->max_cmd_sg_entries = 32;
h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
h->maxsgentries--; /* save one for chain pointer */
} else {
- h->chainsize = 0;
+ /*
+ * Original smart arrays supported at most 31 s/g entries
+ * embedded inline in the command (trying to use more
+ * would lock up the controller)
+ */
+ h->max_cmd_sg_entries = 31;
h->maxsgentries = 31; /* default to traditional values */
+ h->chainsize = 0;
}
/* Find out what task management functions are supported and cache */
@@ -6239,7 +6158,7 @@ static void hpsa_find_board_params(struct ctlr_info *h)
static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
{
if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
- dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
+ dev_err(&h->pdev->dev, "not a valid CISS config table\n");
return false;
}
return true;
@@ -6272,24 +6191,27 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
}
-static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
+static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
{
int i;
u32 doorbell_value;
unsigned long flags;
/* wait until the clear_event_notify bit 6 is cleared by controller. */
- for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+ for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
spin_lock_irqsave(&h->lock, flags);
doorbell_value = readl(h->vaddr + SA5_DOORBELL);
spin_unlock_irqrestore(&h->lock, flags);
if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
- break;
+ goto done;
/* delay and try again */
- msleep(20);
+ msleep(CLEAR_EVENT_WAIT_INTERVAL);
}
+ return -ENODEV;
+done:
+ return 0;
}
-static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
+static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
u32 doorbell_value;
@@ -6299,17 +6221,21 @@ static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
* (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
* as we enter this code.)
*/
- for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+ for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
spin_lock_irqsave(&h->lock, flags);
doorbell_value = readl(h->vaddr + SA5_DOORBELL);
spin_unlock_irqrestore(&h->lock, flags);
if (!(doorbell_value & CFGTBL_ChangeReq))
- break;
+ goto done;
/* delay and try again */
- usleep_range(10000, 20000);
+ msleep(MODE_CHANGE_WAIT_INTERVAL);
}
+ return -ENODEV;
+done:
+ return 0;
}
+/* return -ENODEV or other reason on error, 0 on success */
static int hpsa_enter_simple_mode(struct ctlr_info *h)
{
u32 trans_support;
@@ -6324,14 +6250,15 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)
writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- hpsa_wait_for_mode_change_ack(h);
+ if (hpsa_wait_for_mode_change_ack(h))
+ goto error;
print_cfg_table(&h->pdev->dev, h->cfgtable);
if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
goto error;
h->transMethod = CFGTBL_Trans_Simple;
return 0;
error:
- dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
+ dev_err(&h->pdev->dev, "failed to enter simple mode\n");
return -ENODEV;
}
@@ -6341,7 +6268,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
if (prod_index < 0)
- return -ENODEV;
+ return prod_index;
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
@@ -6422,6 +6349,7 @@ static void hpsa_hba_inquiry(struct ctlr_info *h)
static int hpsa_init_reset_devices(struct pci_dev *pdev)
{
int rc, i;
+ void __iomem *vaddr;
if (!reset_devices)
return 0;
@@ -6445,6 +6373,14 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
pci_set_master(pdev);
+ vaddr = pci_ioremap_bar(pdev, 0);
+ if (vaddr == NULL) {
+ rc = -ENOMEM;
+ goto out_disable;
+ }
+ writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ iounmap(vaddr);
+
/* Reset the controller with a PCI power-cycle or via doorbell */
rc = hpsa_kdump_hard_reset_controller(pdev);
@@ -6453,14 +6389,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
* "performant mode". Or, it might be 640x, which can't reset
* due to concerns about shared bbwc between 6402/6404 pair.
*/
- if (rc) {
- if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
- rc = -ENODEV;
+ if (rc)
goto out_disable;
- }
/* Now try to get the controller to respond to a no-op */
- dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
+ dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
if (hpsa_noop(pdev) == 0)
break;
@@ -6490,9 +6423,12 @@ static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
|| (h->cmd_pool == NULL)
|| (h->errinfo_pool == NULL)) {
dev_err(&h->pdev->dev, "out of memory in %s", __func__);
- return -ENOMEM;
+ goto clean_up;
}
return 0;
+clean_up:
+ hpsa_free_cmd_pool(h);
+ return -ENOMEM;
}
static void hpsa_free_cmd_pool(struct ctlr_info *h)
@@ -6519,16 +6455,38 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
static void hpsa_irq_affinity_hints(struct ctlr_info *h)
{
- int i, cpu, rc;
+ int i, cpu;
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < h->msix_vector; i++) {
- rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
+ irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
}
-static int hpsa_request_irq(struct ctlr_info *h,
+/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
+static void hpsa_free_irqs(struct ctlr_info *h)
+{
+ int i;
+
+ if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+ /* Single reply queue, only one irq to free */
+ i = h->intr_mode;
+ irq_set_affinity_hint(h->intr[i], NULL);
+ free_irq(h->intr[i], &h->q[i]);
+ return;
+ }
+
+ for (i = 0; i < h->msix_vector; i++) {
+ irq_set_affinity_hint(h->intr[i], NULL);
+ free_irq(h->intr[i], &h->q[i]);
+ }
+ for (; i < MAX_REPLY_QUEUES; i++)
+ h->q[i] = 0;
+}
+
+/* returns 0 on success; cleans up and returns -Enn on error */
+static int hpsa_request_irqs(struct ctlr_info *h,
irqreturn_t (*msixhandler)(int, void *),
irqreturn_t (*intxhandler)(int, void *))
{
@@ -6543,10 +6501,25 @@ static int hpsa_request_irq(struct ctlr_info *h,
if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
/* If performant mode and MSI-X, use multiple reply queues */
- for (i = 0; i < h->msix_vector; i++)
+ for (i = 0; i < h->msix_vector; i++) {
rc = request_irq(h->intr[i], msixhandler,
0, h->devname,
&h->q[i]);
+ if (rc) {
+ int j;
+
+ dev_err(&h->pdev->dev,
+ "failed to get irq %d for %s\n",
+ h->intr[i], h->devname);
+ for (j = 0; j < i; j++) {
+ free_irq(h->intr[j], &h->q[j]);
+ h->q[j] = 0;
+ }
+ for (; j < MAX_REPLY_QUEUES; j++)
+ h->q[j] = 0;
+ return rc;
+ }
+ }
hpsa_irq_affinity_hints(h);
} else {
/* Use single reply pool */
@@ -6592,27 +6565,9 @@ static int hpsa_kdump_soft_reset(struct ctlr_info *h)
return 0;
}
-static void free_irqs(struct ctlr_info *h)
-{
- int i;
-
- if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
- /* Single reply queue, only one irq to free */
- i = h->intr_mode;
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
- return;
- }
-
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
- }
-}
-
static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
{
- free_irqs(h);
+ hpsa_free_irqs(h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector) {
if (h->pdev->msix_enabled)
@@ -6658,16 +6613,20 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
}
/* Called when controller lockup detected. */
-static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
+static void fail_all_outstanding_cmds(struct ctlr_info *h)
{
- struct CommandList *c = NULL;
+ int i, refcount;
+ struct CommandList *c;
- assert_spin_locked(&h->lock);
- /* Mark all outstanding commands as failed and complete them. */
- while (!list_empty(list)) {
- c = list_entry(list->next, struct CommandList, list);
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
- finish_cmd(c);
+ flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
+ for (i = 0; i < h->nr_cmds; i++) {
+ c = h->cmd_pool + i;
+ refcount = atomic_inc_return(&c->refcount);
+ if (refcount > 1) {
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ finish_cmd(c);
+ }
+ cmd_free(h, c);
}
}
@@ -6704,10 +6663,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
lockup_detected);
pci_disable_device(h->pdev);
- spin_lock_irqsave(&h->lock, flags);
- fail_all_cmds_on_list(h, &h->cmpQ);
- fail_all_cmds_on_list(h, &h->reqQ);
- spin_unlock_irqrestore(&h->lock, flags);
+ fail_all_outstanding_cmds(h);
}
static void detect_controller_lockup(struct ctlr_info *h)
@@ -6750,8 +6706,8 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
int i;
char *event_type;
- /* Clear the driver-requested rescan flag */
- h->drv_req_rescan = 0;
+ if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
+ return;
/* Ask the controller to clear the events we're handling. */
if ((h->transMethod & (CFGTBL_Trans_io_accel1
@@ -6798,9 +6754,6 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
*/
static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
{
- if (h->drv_req_rescan)
- return 1;
-
if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
return 0;
@@ -6834,34 +6787,60 @@ static int hpsa_offline_devices_ready(struct ctlr_info *h)
return 0;
}
-
-static void hpsa_monitor_ctlr_worker(struct work_struct *work)
+static void hpsa_rescan_ctlr_worker(struct work_struct *work)
{
unsigned long flags;
struct ctlr_info *h = container_of(to_delayed_work(work),
- struct ctlr_info, monitor_ctlr_work);
- detect_controller_lockup(h);
- if (lockup_detected(h))
+ struct ctlr_info, rescan_ctlr_work);
+
+
+ if (h->remove_in_progress)
return;
if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
scsi_host_get(h->scsi_host);
- h->drv_req_rescan = 0;
hpsa_ack_ctlr_events(h);
hpsa_scan_start(h->scsi_host);
scsi_host_put(h->scsi_host);
}
-
spin_lock_irqsave(&h->lock, flags);
- if (h->remove_in_progress) {
- spin_unlock_irqrestore(&h->lock, flags);
+ if (!h->remove_in_progress)
+ queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
+ h->heartbeat_sample_interval);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static void hpsa_monitor_ctlr_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ctlr_info *h = container_of(to_delayed_work(work),
+ struct ctlr_info, monitor_ctlr_work);
+
+ detect_controller_lockup(h);
+ if (lockup_detected(h))
return;
- }
- schedule_delayed_work(&h->monitor_ctlr_work,
+
+ spin_lock_irqsave(&h->lock, flags);
+ if (!h->remove_in_progress)
+ schedule_delayed_work(&h->monitor_ctlr_work,
h->heartbeat_sample_interval);
spin_unlock_irqrestore(&h->lock, flags);
}
+static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
+ char *name)
+{
+ struct workqueue_struct *wq = NULL;
+ char wq_name[20];
+
+ snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr);
+ wq = alloc_ordered_workqueue(wq_name, 0);
+ if (!wq)
+ dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
+
+ return wq;
+}
+
static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int dac, rc;
@@ -6898,13 +6877,23 @@ reinit_after_soft_reset:
h->pdev = pdev;
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
- INIT_LIST_HEAD(&h->cmpQ);
- INIT_LIST_HEAD(&h->reqQ);
INIT_LIST_HEAD(&h->offline_device_list);
spin_lock_init(&h->lock);
spin_lock_init(&h->offline_device_lock);
spin_lock_init(&h->scan_lock);
- spin_lock_init(&h->passthru_count_lock);
+ atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
+
+ h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
+ if (!h->rescan_ctlr_wq) {
+ rc = -ENOMEM;
+ goto clean1;
+ }
+
+ h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
+ if (!h->resubmit_wq) {
+ rc = -ENOMEM;
+ goto clean1;
+ }
/* Allocate and clear per-cpu variable lockup_detected */
h->lockup_detected = alloc_percpu(u32);
@@ -6939,13 +6928,14 @@ reinit_after_soft_reset:
/* make sure the board interrupts are off */
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
+ if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
goto clean2;
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
h->devname, pdev->device,
h->intr[h->intr_mode], dac ? "" : " not");
- if (hpsa_allocate_cmd_pool(h))
- goto clean4;
+ rc = hpsa_allocate_cmd_pool(h);
+ if (rc)
+ goto clean2_and_free_irqs;
if (hpsa_allocate_sg_chain_blocks(h))
goto clean4;
init_waitqueue_head(&h->scan_wait_queue);
@@ -6974,12 +6964,12 @@ reinit_after_soft_reset:
spin_lock_irqsave(&h->lock, flags);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
spin_unlock_irqrestore(&h->lock, flags);
- free_irqs(h);
- rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
+ hpsa_free_irqs(h);
+ rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
hpsa_intx_discard_completions);
if (rc) {
- dev_warn(&h->pdev->dev, "Failed to request_irq after "
- "soft reset.\n");
+ dev_warn(&h->pdev->dev,
+ "Failed to request_irq after soft reset.\n");
goto clean4;
}
@@ -7016,7 +7006,6 @@ reinit_after_soft_reset:
/* Enable Accelerated IO path at driver layer */
h->acciopath_status = 1;
- h->drv_req_rescan = 0;
/* Turn the interrupts on so we can service requests */
h->access.set_intr_mask(h, HPSA_INTR_ON);
@@ -7029,14 +7018,22 @@ reinit_after_soft_reset:
INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
schedule_delayed_work(&h->monitor_ctlr_work,
h->heartbeat_sample_interval);
+ INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
+ queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
+ h->heartbeat_sample_interval);
return 0;
clean4:
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
- free_irqs(h);
+clean2_and_free_irqs:
+ hpsa_free_irqs(h);
clean2:
clean1:
+ if (h->resubmit_wq)
+ destroy_workqueue(h->resubmit_wq);
+ if (h->rescan_ctlr_wq)
+ destroy_workqueue(h->rescan_ctlr_wq);
if (h->lockup_detected)
free_percpu(h->lockup_detected);
kfree(h);
@@ -7055,9 +7052,9 @@ static void hpsa_flush_cache(struct ctlr_info *h)
if (!flush_buf)
return;
- c = cmd_special_alloc(h);
+ c = cmd_alloc(h);
if (!c) {
- dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
goto out_of_memory;
}
if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
@@ -7069,7 +7066,7 @@ static void hpsa_flush_cache(struct ctlr_info *h)
out:
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
- cmd_special_free(h, c);
+ cmd_free(h, c);
out_of_memory:
kfree(flush_buf);
}
@@ -7110,9 +7107,11 @@ static void hpsa_remove_one(struct pci_dev *pdev)
/* Get rid of any controller monitoring work items */
spin_lock_irqsave(&h->lock, flags);
h->remove_in_progress = 1;
- cancel_delayed_work(&h->monitor_ctlr_work);
spin_unlock_irqrestore(&h->lock, flags);
-
+ cancel_delayed_work_sync(&h->monitor_ctlr_work);
+ cancel_delayed_work_sync(&h->rescan_ctlr_work);
+ destroy_workqueue(h->rescan_ctlr_wq);
+ destroy_workqueue(h->resubmit_wq);
hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
hpsa_shutdown(pdev);
iounmap(h->vaddr);
@@ -7172,7 +7171,7 @@ static struct pci_driver hpsa_pci_driver = {
* bits of the command address.
*/
static void calc_bucket_map(int bucket[], int num_buckets,
- int nsgs, int min_blocks, int *bucket_map)
+ int nsgs, int min_blocks, u32 *bucket_map)
{
int i, j, b, size;
@@ -7193,7 +7192,8 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+/* return -ENODEV or other reason on error, 0 on success */
+static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
{
int i;
unsigned long register_value;
@@ -7285,12 +7285,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
}
}
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- hpsa_wait_for_mode_change_ack(h);
+ if (hpsa_wait_for_mode_change_ack(h)) {
+ dev_err(&h->pdev->dev,
+ "performant mode problem - doorbell timeout\n");
+ return -ENODEV;
+ }
register_value = readl(&(h->cfgtable->TransportActive));
if (!(register_value & CFGTBL_Trans_Performant)) {
- dev_warn(&h->pdev->dev, "unable to get board into"
- " performant mode\n");
- return;
+ dev_err(&h->pdev->dev,
+ "performant mode problem - transport not active\n");
+ return -ENODEV;
}
/* Change the access methods to the performant access methods */
h->access = access;
@@ -7298,7 +7302,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
if (!((trans_support & CFGTBL_Trans_io_accel1) ||
(trans_support & CFGTBL_Trans_io_accel2)))
- return;
+ return 0;
if (trans_support & CFGTBL_Trans_io_accel1) {
/* Set up I/O accelerator mode */
@@ -7328,12 +7332,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
(i * sizeof(struct ErrorInfo)));
cp->err_info_len = sizeof(struct ErrorInfo);
cp->sgl_offset = IOACCEL1_SGLOFFSET;
- cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
+ cp->host_context_flags =
+ cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
cp->timeout_sec = 0;
cp->ReplyQueue = 0;
cp->tag =
- cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) |
- DIRECT_LOOKUP_BIT);
+ cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
cp->host_addr =
cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
(i * sizeof(struct io_accel1_cmd)));
@@ -7362,7 +7366,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
}
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- hpsa_wait_for_mode_change_ack(h);
+ if (hpsa_wait_for_mode_change_ack(h)) {
+ dev_err(&h->pdev->dev,
+ "performant mode problem - enabling ioaccel mode\n");
+ return -ENODEV;
+ }
+ return 0;
}
static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
@@ -7508,17 +7517,18 @@ static int is_accelerated_cmd(struct CommandList *c)
static void hpsa_drain_accel_commands(struct ctlr_info *h)
{
struct CommandList *c = NULL;
- unsigned long flags;
- int accel_cmds_out;
+ int i, accel_cmds_out;
+ int refcount;
- do { /* wait for all outstanding commands to drain out */
+ do { /* wait for all outstanding ioaccel commands to drain out */
accel_cmds_out = 0;
- spin_lock_irqsave(&h->lock, flags);
- list_for_each_entry(c, &h->cmpQ, list)
- accel_cmds_out += is_accelerated_cmd(c);
- list_for_each_entry(c, &h->reqQ, list)
- accel_cmds_out += is_accelerated_cmd(c);
- spin_unlock_irqrestore(&h->lock, flags);
+ for (i = 0; i < h->nr_cmds; i++) {
+ c = h->cmd_pool + i;
+ refcount = atomic_inc_return(&c->refcount);
+ if (refcount > 1) /* Command is allocated */
+ accel_cmds_out += is_accelerated_cmd(c);
+ cmd_free(h, c);
+ }
if (accel_cmds_out <= 0)
break;
msleep(100);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 8e06d9e280ec..657713050349 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -32,7 +32,6 @@ struct access_method {
void (*submit_command)(struct ctlr_info *h,
struct CommandList *c);
void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
- unsigned long (*fifo_full)(struct ctlr_info *h);
bool (*intr_pending)(struct ctlr_info *h);
unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
};
@@ -47,6 +46,11 @@ struct hpsa_scsi_dev_t {
unsigned char model[16]; /* bytes 16-31 of inquiry data */
unsigned char raid_level; /* from inquiry page 0xC1 */
unsigned char volume_offline; /* discovered via TUR or VPD */
+ u16 queue_depth; /* max queue_depth for this device */
+ atomic_t ioaccel_cmds_out; /* Only used for physical devices
+ * counts commands sent to physical
+ * device via "ioaccel" path.
+ */
u32 ioaccel_handle;
int offload_config; /* I/O accel RAID offload configured */
int offload_enabled; /* I/O accel RAID offload enabled */
@@ -55,6 +59,15 @@ struct hpsa_scsi_dev_t {
*/
struct raid_map_data raid_map; /* I/O accelerator RAID map */
+ /*
+ * Pointers from logical drive map indices to the phys drives that
+ * make those logical drives. Note, multiple logical drives may
+ * share physical drives. You can have for instance 5 physical
+ * drives with 3 logical drives each using those same 5 physical
+ * disks. We need these pointers for counting i/o's out to physical
+ * devices in order to honor physical device queue depth limits.
+ */
+ struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
};
struct reply_queue_buffer {
@@ -115,9 +128,12 @@ struct ctlr_info {
void __iomem *vaddr;
unsigned long paddr;
int nr_cmds; /* Number of commands allowed on this controller */
+#define HPSA_CMDS_RESERVED_FOR_ABORTS 2
+#define HPSA_CMDS_RESERVED_FOR_DRIVER 1
struct CfgTable __iomem *cfgtable;
int interrupts_enabled;
int max_commands;
+ int last_allocation;
atomic_t commands_outstanding;
# define PERF_MODE_INT 0
# define DOORBELL_INT 1
@@ -131,8 +147,6 @@ struct ctlr_info {
char hba_mode_enabled;
/* queue and queue Info */
- struct list_head reqQ;
- struct list_head cmpQ;
unsigned int Qdepth;
unsigned int maxSG;
spinlock_t lock;
@@ -168,9 +182,8 @@ struct ctlr_info {
unsigned long transMethod;
/* cap concurrent passthrus at some reasonable maximum */
-#define HPSA_MAX_CONCURRENT_PASSTHRUS (20)
- spinlock_t passthru_count_lock; /* protects passthru_count */
- int passthru_count;
+#define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
+ atomic_t passthru_cmds_avail;
/*
* Performant mode completion buffers
@@ -194,8 +207,8 @@ struct ctlr_info {
atomic_t firmware_flash_in_progress;
u32 __percpu *lockup_detected;
struct delayed_work monitor_ctlr_work;
+ struct delayed_work rescan_ctlr_work;
int remove_in_progress;
- u32 fifo_recently_full;
/* Address of h->q[x] is passed to intr handler to know which queue */
u8 q[MAX_REPLY_QUEUES];
u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
@@ -237,8 +250,9 @@ struct ctlr_info {
spinlock_t offline_device_lock;
struct list_head offline_device_list;
int acciopath_status;
- int drv_req_rescan; /* flag for driver to request rescan event */
int raid_offload_debug;
+ struct workqueue_struct *resubmit_wq;
+ struct workqueue_struct *rescan_ctlr_wq;
};
struct offline_device_entry {
@@ -297,6 +311,8 @@ struct offline_device_entry {
*/
#define SA5_DOORBELL 0x20
#define SA5_REQUEST_PORT_OFFSET 0x40
+#define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
+#define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
#define SA5_REPLY_INTR_MASK_OFFSET 0x34
#define SA5_REPLY_PORT_OFFSET 0x44
#define SA5_INTR_STATUS 0x30
@@ -353,10 +369,7 @@ static void SA5_submit_command_no_read(struct ctlr_info *h,
static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
struct CommandList *c)
{
- if (c->cmd_type == CMD_IOACCEL2)
- writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
- else
- writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
}
/*
@@ -398,19 +411,19 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
unsigned long register_value = FIFO_EMPTY;
/* msi auto clears the interrupt pending bit. */
- if (!(h->msi_vector || h->msix_vector)) {
+ if (unlikely(!(h->msi_vector || h->msix_vector))) {
/* flush the controller write of the reply queue by reading
* outbound doorbell status register.
*/
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ (void) readl(h->vaddr + SA5_OUTDB_STATUS);
writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
/* Do a read in order to flush the write to the controller
* (as per spec.)
*/
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ (void) readl(h->vaddr + SA5_OUTDB_STATUS);
}
- if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+ if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
register_value = rq->head[rq->current_entry];
rq->current_entry++;
atomic_dec(&h->commands_outstanding);
@@ -426,14 +439,6 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
}
/*
- * Returns true if fifo is full.
- *
- */
-static unsigned long SA5_fifo_full(struct ctlr_info *h)
-{
- return atomic_read(&h->commands_outstanding) >= h->max_commands;
-}
-/*
* returns value read from hardware.
* returns FIFO_EMPTY if there is nothing to read
*/
@@ -473,9 +478,6 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
if (!register_value)
return false;
- if (h->msi_vector || h->msix_vector)
- return true;
-
/* Read outbound doorbell to flush */
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
return register_value & SA5_OUTDB_STATUS_PERF_BIT;
@@ -525,7 +527,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
static struct access_method SA5_access = {
SA5_submit_command,
SA5_intr_mask,
- SA5_fifo_full,
SA5_intr_pending,
SA5_completed,
};
@@ -533,7 +534,6 @@ static struct access_method SA5_access = {
static struct access_method SA5_ioaccel_mode1_access = {
SA5_submit_command,
SA5_performant_intr_mask,
- SA5_fifo_full,
SA5_ioaccel_mode1_intr_pending,
SA5_ioaccel_mode1_completed,
};
@@ -541,7 +541,6 @@ static struct access_method SA5_ioaccel_mode1_access = {
static struct access_method SA5_ioaccel_mode2_access = {
SA5_submit_command_ioaccel2,
SA5_performant_intr_mask,
- SA5_fifo_full,
SA5_performant_intr_pending,
SA5_performant_completed,
};
@@ -549,7 +548,6 @@ static struct access_method SA5_ioaccel_mode2_access = {
static struct access_method SA5_performant_access = {
SA5_submit_command,
SA5_performant_intr_mask,
- SA5_fifo_full,
SA5_performant_intr_pending,
SA5_performant_completed,
};
@@ -557,7 +555,6 @@ static struct access_method SA5_performant_access = {
static struct access_method SA5_performant_access_no_read = {
SA5_submit_command_no_read,
SA5_performant_intr_mask,
- SA5_fifo_full,
SA5_performant_intr_pending,
SA5_performant_completed,
};
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index cb988c41cad9..3a621c74b76f 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -206,27 +206,27 @@ struct raid_map_disk_data {
};
struct raid_map_data {
- u32 structure_size; /* Size of entire structure in bytes */
- u32 volume_blk_size; /* bytes / block in the volume */
- u64 volume_blk_cnt; /* logical blocks on the volume */
+ __le32 structure_size; /* Size of entire structure in bytes */
+ __le32 volume_blk_size; /* bytes / block in the volume */
+ __le64 volume_blk_cnt; /* logical blocks on the volume */
u8 phys_blk_shift; /* Shift factor to convert between
* units of logical blocks and physical
* disk blocks */
u8 parity_rotation_shift; /* Shift factor to convert between units
* of logical stripes and physical
* stripes */
- u16 strip_size; /* blocks used on each disk / stripe */
- u64 disk_starting_blk; /* First disk block used in volume */
- u64 disk_blk_cnt; /* disk blocks used by volume / disk */
- u16 data_disks_per_row; /* data disk entries / row in the map */
- u16 metadata_disks_per_row; /* mirror/parity disk entries / row
+ __le16 strip_size; /* blocks used on each disk / stripe */
+ __le64 disk_starting_blk; /* First disk block used in volume */
+ __le64 disk_blk_cnt; /* disk blocks used by volume / disk */
+ __le16 data_disks_per_row; /* data disk entries / row in the map */
+ __le16 metadata_disks_per_row;/* mirror/parity disk entries / row
* in the map */
- u16 row_cnt; /* rows in each layout map */
- u16 layout_map_count; /* layout maps (1 map per mirror/parity
+ __le16 row_cnt; /* rows in each layout map */
+ __le16 layout_map_count; /* layout maps (1 map per mirror/parity
* group) */
- u16 flags; /* Bit 0 set if encryption enabled */
+ __le16 flags; /* Bit 0 set if encryption enabled */
#define RAID_MAP_FLAG_ENCRYPT_ON 0x01
- u16 dekindex; /* Data encryption key index. */
+ __le16 dekindex; /* Data encryption key index. */
u8 reserved[16];
struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
};
@@ -240,6 +240,10 @@ struct ReportLUNdata {
struct ext_report_lun_entry {
u8 lunid[8];
+#define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F)
+#define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6])
+#define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \
+ GET_BMIC_LEVEL_TWO_TARGET((lunid)))
u8 wwid[8];
u8 device_type;
u8 device_flags;
@@ -268,6 +272,7 @@ struct SenseSubsystem_info {
#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
#define BMIC_FLASH_FIRMWARE 0xF7
#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
+#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
/* Command List Structure */
union SCSI3Addr {
@@ -313,8 +318,8 @@ union LUNAddr {
struct CommandListHeader {
u8 ReplyQueue;
u8 SGList;
- u16 SGTotal;
- u64 tag;
+ __le16 SGTotal;
+ __le64 tag;
union LUNAddr LUN;
};
@@ -338,14 +343,14 @@ struct RequestBlock {
};
struct ErrDescriptor {
- u64 Addr;
- u32 Len;
+ __le64 Addr;
+ __le32 Len;
};
struct SGDescriptor {
- u64 Addr;
- u32 Len;
- u32 Ext;
+ __le64 Addr;
+ __le32 Len;
+ __le32 Ext;
};
union MoreErrInfo {
@@ -375,22 +380,19 @@ struct ErrorInfo {
#define CMD_IOACCEL1 0x04
#define CMD_IOACCEL2 0x05
-#define DIRECT_LOOKUP_SHIFT 5
-#define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_SHIFT 4
#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
#define HPSA_ERROR_BIT 0x02
struct ctlr_info; /* defined in hpsa.h */
-/* The size of this structure needs to be divisible by 32
- * on all architectures because low 5 bits of the addresses
+/* The size of this structure needs to be divisible by 128
+ * on all architectures. The low 4 bits of the addresses
* are used as follows:
*
* bit 0: to device, used to indicate "performant mode" command
* from device, indidcates error status.
* bit 1-3: to device, indicates block fetch table entry for
* reducing DMA in fetching commands from host memory.
- * bit 4: used to indicate whether tag is "direct lookup" (index),
- * or a bus address.
*/
#define COMMANDLIST_ALIGNMENT 128
@@ -405,9 +407,21 @@ struct CommandList {
struct ctlr_info *h;
int cmd_type;
long cmdindex;
- struct list_head list;
struct completion *waiting;
- void *scsi_cmd;
+ struct scsi_cmnd *scsi_cmd;
+ struct work_struct work;
+
+ /*
+ * For commands using either of the two "ioaccel" paths to
+ * bypass the RAID stack and go directly to the physical disk
+ * phys_disk is a pointer to the hpsa_scsi_dev_t to which the
+ * i/o is destined. We need to store that here because the command
+ * may potentially encounter TASK SET FULL and need to be resubmitted
+ * For "normal" i/o's not using the "ioaccel" paths, phys_disk is
+ * not used.
+ */
+ struct hpsa_scsi_dev_t *phys_disk;
+ atomic_t refcount; /* Must be last to avoid memset in cmd_alloc */
} __aligned(COMMANDLIST_ALIGNMENT);
/* Max S/G elements in I/O accelerator command */
@@ -420,7 +434,7 @@ struct CommandList {
*/
#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
struct io_accel1_cmd {
- u16 dev_handle; /* 0x00 - 0x01 */
+ __le16 dev_handle; /* 0x00 - 0x01 */
u8 reserved1; /* 0x02 */
u8 function; /* 0x03 */
u8 reserved2[8]; /* 0x04 - 0x0B */
@@ -430,20 +444,20 @@ struct io_accel1_cmd {
u8 reserved4; /* 0x13 */
u8 sgl_offset; /* 0x14 */
u8 reserved5[7]; /* 0x15 - 0x1B */
- u32 transfer_len; /* 0x1C - 0x1F */
+ __le32 transfer_len; /* 0x1C - 0x1F */
u8 reserved6[4]; /* 0x20 - 0x23 */
- u16 io_flags; /* 0x24 - 0x25 */
+ __le16 io_flags; /* 0x24 - 0x25 */
u8 reserved7[14]; /* 0x26 - 0x33 */
u8 LUN[8]; /* 0x34 - 0x3B */
- u32 control; /* 0x3C - 0x3F */
+ __le32 control; /* 0x3C - 0x3F */
u8 CDB[16]; /* 0x40 - 0x4F */
u8 reserved8[16]; /* 0x50 - 0x5F */
- u16 host_context_flags; /* 0x60 - 0x61 */
- u16 timeout_sec; /* 0x62 - 0x63 */
+ __le16 host_context_flags; /* 0x60 - 0x61 */
+ __le16 timeout_sec; /* 0x62 - 0x63 */
u8 ReplyQueue; /* 0x64 */
u8 reserved9[3]; /* 0x65 - 0x67 */
- u64 tag; /* 0x68 - 0x6F */
- u64 host_addr; /* 0x70 - 0x77 */
+ __le64 tag; /* 0x68 - 0x6F */
+ __le64 host_addr; /* 0x70 - 0x77 */
u8 CISS_LUN[8]; /* 0x78 - 0x7F */
struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
@@ -470,8 +484,8 @@ struct io_accel1_cmd {
#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060
struct ioaccel2_sg_element {
- u64 address;
- u32 length;
+ __le64 address;
+ __le32 length;
u8 reserved[3];
u8 chain_indicator;
#define IOACCEL2_CHAIN 0x80
@@ -526,20 +540,20 @@ struct io_accel2_cmd {
/* 0=off, 1=on */
u8 reply_queue; /* Reply Queue ID */
u8 reserved1; /* Reserved */
- u32 scsi_nexus; /* Device Handle */
- u32 Tag; /* cciss tag, lower 4 bytes only */
- u32 tweak_lower; /* Encryption tweak, lower 4 bytes */
+ __le32 scsi_nexus; /* Device Handle */
+ __le32 Tag; /* cciss tag, lower 4 bytes only */
+ __le32 tweak_lower; /* Encryption tweak, lower 4 bytes */
u8 cdb[16]; /* SCSI Command Descriptor Block */
u8 cciss_lun[8]; /* 8 byte SCSI address */
- u32 data_len; /* Total bytes to transfer */
+ __le32 data_len; /* Total bytes to transfer */
u8 cmd_priority_task_attr; /* priority and task attrs */
#define IOACCEL2_PRIORITY_MASK 0x78
#define IOACCEL2_ATTR_MASK 0x07
u8 sg_count; /* Number of sg elements */
- u16 dekindex; /* Data encryption key index */
- u64 err_ptr; /* Error Pointer */
- u32 err_len; /* Error Length*/
- u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
+ __le16 dekindex; /* Data encryption key index */
+ __le64 err_ptr; /* Error Pointer */
+ __le32 err_len; /* Error Length*/
+ __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */
struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
struct io_accel2_scsi_response error_data;
} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
@@ -563,18 +577,18 @@ struct hpsa_tmf_struct {
u8 reserved1; /* byte 3 Reserved */
u32 it_nexus; /* SCSI I-T Nexus */
u8 lun_id[8]; /* LUN ID for TMF request */
- u64 tag; /* cciss tag associated w/ request */
- u64 abort_tag; /* cciss tag of SCSI cmd or task to abort */
- u64 error_ptr; /* Error Pointer */
- u32 error_len; /* Error Length */
+ __le64 tag; /* cciss tag associated w/ request */
+ __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */
+ __le64 error_ptr; /* Error Pointer */
+ __le32 error_len; /* Error Length */
};
/* Configuration Table Structure */
struct HostWrite {
- u32 TransportRequest;
- u32 command_pool_addr_hi;
- u32 CoalIntDelay;
- u32 CoalIntCount;
+ __le32 TransportRequest;
+ __le32 command_pool_addr_hi;
+ __le32 CoalIntDelay;
+ __le32 CoalIntCount;
};
#define SIMPLE_MODE 0x02
@@ -585,54 +599,54 @@ struct HostWrite {
#define DRIVER_SUPPORT_UA_ENABLE 0x00000001
struct CfgTable {
- u8 Signature[4];
- u32 SpecValence;
- u32 TransportSupport;
- u32 TransportActive;
- struct HostWrite HostWrite;
- u32 CmdsOutMax;
- u32 BusTypes;
- u32 TransMethodOffset;
- u8 ServerName[16];
- u32 HeartBeat;
- u32 driver_support;
-#define ENABLE_SCSI_PREFETCH 0x100
-#define ENABLE_UNIT_ATTN 0x01
- u32 MaxScatterGatherElements;
- u32 MaxLogicalUnits;
- u32 MaxPhysicalDevices;
- u32 MaxPhysicalDrivesPerLogicalUnit;
- u32 MaxPerformantModeCommands;
- u32 MaxBlockFetch;
- u32 PowerConservationSupport;
- u32 PowerConservationEnable;
- u32 TMFSupportFlags;
+ u8 Signature[4];
+ __le32 SpecValence;
+ __le32 TransportSupport;
+ __le32 TransportActive;
+ struct HostWrite HostWrite;
+ __le32 CmdsOutMax;
+ __le32 BusTypes;
+ __le32 TransMethodOffset;
+ u8 ServerName[16];
+ __le32 HeartBeat;
+ __le32 driver_support;
+#define ENABLE_SCSI_PREFETCH 0x100
+#define ENABLE_UNIT_ATTN 0x01
+ __le32 MaxScatterGatherElements;
+ __le32 MaxLogicalUnits;
+ __le32 MaxPhysicalDevices;
+ __le32 MaxPhysicalDrivesPerLogicalUnit;
+ __le32 MaxPerformantModeCommands;
+ __le32 MaxBlockFetch;
+ __le32 PowerConservationSupport;
+ __le32 PowerConservationEnable;
+ __le32 TMFSupportFlags;
u8 TMFTagMask[8];
u8 reserved[0x78 - 0x70];
- u32 misc_fw_support; /* offset 0x78 */
-#define MISC_FW_DOORBELL_RESET (0x02)
-#define MISC_FW_DOORBELL_RESET2 (0x010)
-#define MISC_FW_RAID_OFFLOAD_BASIC (0x020)
-#define MISC_FW_EVENT_NOTIFY (0x080)
+ __le32 misc_fw_support; /* offset 0x78 */
+#define MISC_FW_DOORBELL_RESET 0x02
+#define MISC_FW_DOORBELL_RESET2 0x010
+#define MISC_FW_RAID_OFFLOAD_BASIC 0x020
+#define MISC_FW_EVENT_NOTIFY 0x080
u8 driver_version[32];
- u32 max_cached_write_size;
- u8 driver_scratchpad[16];
- u32 max_error_info_length;
- u32 io_accel_max_embedded_sg_count;
- u32 io_accel_request_size_offset;
- u32 event_notify;
-#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
-#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
- u32 clear_event_notify;
+ __le32 max_cached_write_size;
+ u8 driver_scratchpad[16];
+ __le32 max_error_info_length;
+ __le32 io_accel_max_embedded_sg_count;
+ __le32 io_accel_request_size_offset;
+ __le32 event_notify;
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
+ __le32 clear_event_notify;
};
#define NUM_BLOCKFETCH_ENTRIES 8
struct TransTable_struct {
- u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
- u32 RepQSize;
- u32 RepQCount;
- u32 RepQCtrAddrLow32;
- u32 RepQCtrAddrHigh32;
+ __le32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
+ __le32 RepQSize;
+ __le32 RepQCount;
+ __le32 RepQCtrAddrLow32;
+ __le32 RepQCtrAddrHigh32;
#define MAX_REPLY_QUEUES 64
struct vals32 RepQAddr[MAX_REPLY_QUEUES];
};
@@ -644,5 +658,137 @@ struct hpsa_pci_info {
u32 board_id;
};
+struct bmic_identify_physical_device {
+ u8 scsi_bus; /* SCSI Bus number on controller */
+ u8 scsi_id; /* SCSI ID on this bus */
+ __le16 block_size; /* sector size in bytes */
+ __le32 total_blocks; /* number for sectors on drive */
+ __le32 reserved_blocks; /* controller reserved (RIS) */
+ u8 model[40]; /* Physical Drive Model */
+ u8 serial_number[40]; /* Drive Serial Number */
+ u8 firmware_revision[8]; /* drive firmware revision */
+ u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
+ u8 compaq_drive_stamp; /* 0 means drive not stamped */
+ u8 last_failure_reason;
+#define BMIC_LAST_FAILURE_TOO_SMALL_IN_LOAD_CONFIG 0x01
+#define BMIC_LAST_FAILURE_ERROR_ERASING_RIS 0x02
+#define BMIC_LAST_FAILURE_ERROR_SAVING_RIS 0x03
+#define BMIC_LAST_FAILURE_FAIL_DRIVE_COMMAND 0x04
+#define BMIC_LAST_FAILURE_MARK_BAD_FAILED 0x05
+#define BMIC_LAST_FAILURE_MARK_BAD_FAILED_IN_FINISH_REMAP 0x06
+#define BMIC_LAST_FAILURE_TIMEOUT 0x07
+#define BMIC_LAST_FAILURE_AUTOSENSE_FAILED 0x08
+#define BMIC_LAST_FAILURE_MEDIUM_ERROR_1 0x09
+#define BMIC_LAST_FAILURE_MEDIUM_ERROR_2 0x0a
+#define BMIC_LAST_FAILURE_NOT_READY_BAD_SENSE 0x0b
+#define BMIC_LAST_FAILURE_NOT_READY 0x0c
+#define BMIC_LAST_FAILURE_HARDWARE_ERROR 0x0d
+#define BMIC_LAST_FAILURE_ABORTED_COMMAND 0x0e
+#define BMIC_LAST_FAILURE_WRITE_PROTECTED 0x0f
+#define BMIC_LAST_FAILURE_SPIN_UP_FAILURE_IN_RECOVER 0x10
+#define BMIC_LAST_FAILURE_REBUILD_WRITE_ERROR 0x11
+#define BMIC_LAST_FAILURE_TOO_SMALL_IN_HOT_PLUG 0x12
+#define BMIC_LAST_FAILURE_BUS_RESET_RECOVERY_ABORTED 0x13
+#define BMIC_LAST_FAILURE_REMOVED_IN_HOT_PLUG 0x14
+#define BMIC_LAST_FAILURE_INIT_REQUEST_SENSE_FAILED 0x15
+#define BMIC_LAST_FAILURE_INIT_START_UNIT_FAILED 0x16
+#define BMIC_LAST_FAILURE_INQUIRY_FAILED 0x17
+#define BMIC_LAST_FAILURE_NON_DISK_DEVICE 0x18
+#define BMIC_LAST_FAILURE_READ_CAPACITY_FAILED 0x19
+#define BMIC_LAST_FAILURE_INVALID_BLOCK_SIZE 0x1a
+#define BMIC_LAST_FAILURE_HOT_PLUG_REQUEST_SENSE_FAILED 0x1b
+#define BMIC_LAST_FAILURE_HOT_PLUG_START_UNIT_FAILED 0x1c
+#define BMIC_LAST_FAILURE_WRITE_ERROR_AFTER_REMAP 0x1d
+#define BMIC_LAST_FAILURE_INIT_RESET_RECOVERY_ABORTED 0x1e
+#define BMIC_LAST_FAILURE_DEFERRED_WRITE_ERROR 0x1f
+#define BMIC_LAST_FAILURE_MISSING_IN_SAVE_RIS 0x20
+#define BMIC_LAST_FAILURE_WRONG_REPLACE 0x21
+#define BMIC_LAST_FAILURE_GDP_VPD_INQUIRY_FAILED 0x22
+#define BMIC_LAST_FAILURE_GDP_MODE_SENSE_FAILED 0x23
+#define BMIC_LAST_FAILURE_DRIVE_NOT_IN_48BIT_MODE 0x24
+#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_HOT_PLUG 0x25
+#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_LOAD_CFG 0x26
+#define BMIC_LAST_FAILURE_PROTOCOL_ADAPTER_FAILED 0x27
+#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_EMPTY 0x28
+#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_OCCUPIED 0x29
+#define BMIC_LAST_FAILURE_FAULTY_ID_INVALID_BAY 0x2a
+#define BMIC_LAST_FAILURE_WRITE_RETRIES_FAILED 0x2b
+
+#define BMIC_LAST_FAILURE_SMART_ERROR_REPORTED 0x37
+#define BMIC_LAST_FAILURE_PHY_RESET_FAILED 0x38
+#define BMIC_LAST_FAILURE_ONLY_ONE_CTLR_CAN_SEE_DRIVE 0x40
+#define BMIC_LAST_FAILURE_KC_VOLUME_FAILED 0x41
+#define BMIC_LAST_FAILURE_UNEXPECTED_REPLACEMENT 0x42
+#define BMIC_LAST_FAILURE_OFFLINE_ERASE 0x80
+#define BMIC_LAST_FAILURE_OFFLINE_TOO_SMALL 0x81
+#define BMIC_LAST_FAILURE_OFFLINE_DRIVE_TYPE_MIX 0x82
+#define BMIC_LAST_FAILURE_OFFLINE_ERASE_COMPLETE 0x83
+
+ u8 flags;
+ u8 more_flags;
+ u8 scsi_lun; /* SCSI LUN for phys drive */
+ u8 yet_more_flags;
+ u8 even_more_flags;
+ __le32 spi_speed_rules;/* SPI Speed data:Ultra disable diagnose */
+ u8 phys_connector[2]; /* connector number on controller */
+ u8 phys_box_on_bus; /* phys enclosure this drive resides */
+ u8 phys_bay_in_box; /* phys drv bay this drive resides */
+ __le32 rpm; /* Drive rotational speed in rpm */
+ u8 device_type; /* type of drive */
+ u8 sata_version; /* only valid when drive_type is SATA */
+ __le64 big_total_block_count;
+ __le64 ris_starting_lba;
+ __le32 ris_size;
+ u8 wwid[20];
+ u8 controller_phy_map[32];
+ __le16 phy_count;
+ u8 phy_connected_dev_type[256];
+ u8 phy_to_drive_bay_num[256];
+ __le16 phy_to_attached_dev_index[256];
+ u8 box_index;
+ u8 reserved;
+ __le16 extra_physical_drive_flags;
+#define BMIC_PHYS_DRIVE_SUPPORTS_GAS_GAUGE(idphydrv) \
+ (idphydrv->extra_physical_drive_flags & (1 << 10))
+ u8 negotiated_link_rate[256];
+ u8 phy_to_phy_map[256];
+ u8 redundant_path_present_map;
+ u8 redundant_path_failure_map;
+ u8 active_path_number;
+ __le16 alternate_paths_phys_connector[8];
+ u8 alternate_paths_phys_box_on_port[8];
+ u8 multi_lun_device_lun_count;
+ u8 minimum_good_fw_revision[8];
+ u8 unique_inquiry_bytes[20];
+ u8 current_temperature_degreesC;
+ u8 temperature_threshold_degreesC;
+ u8 max_temperature_degreesC;
+ u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */
+ __le16 current_queue_depth_limit;
+ u8 switch_name[10];
+ __le16 switch_port;
+ u8 alternate_paths_switch_name[40];
+ u8 alternate_paths_switch_port[8];
+ __le16 power_on_hours; /* valid only if gas gauge supported */
+ __le16 percent_endurance_used; /* valid only if gas gauge supported. */
+#define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \
+ ((idphydrv->percent_endurance_used & 0x80) || \
+ (idphydrv->percent_endurance_used > 10000))
+ u8 drive_authentication;
+#define BMIC_PHYS_DRIVE_AUTHENTICATED(idphydrv) \
+ (idphydrv->drive_authentication == 0x80)
+ u8 smart_carrier_authentication;
+#define BMIC_SMART_CARRIER_AUTHENTICATION_SUPPORTED(idphydrv) \
+ (idphydrv->smart_carrier_authentication != 0x0)
+#define BMIC_SMART_CARRIER_AUTHENTICATED(idphydrv) \
+ (idphydrv->smart_carrier_authentication == 0x01)
+ u8 smart_carrier_app_fw_version;
+ u8 smart_carrier_bootloader_fw_version;
+ u8 encryption_key_name[64];
+ __le32 misc_drive_flags;
+ __le16 dek_index;
+ u8 padding[112];
+};
+
#pragma pack()
#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index ddf0694d87f0..3882d9f519c8 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2226,36 +2226,36 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
if (hd->proc & PR_INFO) {
seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
- seq_printf(m, "\nsync_xfer[] = ");
+ seq_puts(m, "\nsync_xfer[] = ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%02x", hd->sync_xfer[x]);
- seq_printf(m, "\nsync_stat[] = ");
+ seq_puts(m, "\nsync_stat[] = ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%02x", hd->sync_stat[x]);
}
#ifdef PROC_STATISTICS
if (hd->proc & PR_STATISTICS) {
- seq_printf(m, "\ncommands issued: ");
+ seq_puts(m, "\ncommands issued: ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
- seq_printf(m, "\ndisconnects allowed:");
+ seq_puts(m, "\ndisconnects allowed:");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
- seq_printf(m, "\ndisconnects done: ");
+ seq_puts(m, "\ndisconnects done: ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt);
}
#endif
if (hd->proc & PR_CONNECTED) {
- seq_printf(m, "\nconnected: ");
+ seq_puts(m, "\nconnected: ");
if (hd->connected) {
cmd = (Scsi_Cmnd *) hd->connected;
seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
}
}
if (hd->proc & PR_INPUTQ) {
- seq_printf(m, "\ninput_Q: ");
+ seq_puts(m, "\ninput_Q: ");
cmd = (Scsi_Cmnd *) hd->input_Q;
while (cmd) {
seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
@@ -2263,7 +2263,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
}
}
if (hd->proc & PR_DISCQ) {
- seq_printf(m, "\ndisconnected_Q:");
+ seq_puts(m, "\ndisconnected_Q:");
cmd = (Scsi_Cmnd *) hd->disconnected_Q;
while (cmd) {
seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
@@ -2273,7 +2273,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
if (hd->proc & PR_TEST) {
; /* insert your own custom function here */
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
spin_unlock_irqrestore(instance->host_lock, flags);
#endif /* PROC_INTERFACE */
return 0;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index df4e27cd996a..9219953ee949 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->u.scratch = 0;
ipr_cmd->sibling = NULL;
+ ipr_cmd->eh_comp = NULL;
ipr_cmd->fast_done = fast_done;
init_timer(&ipr_cmd->timer);
}
@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
@@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
return rc;
}
+/**
+ * ipr_match_lun - Match function for specified LUN
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
+{
+ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ipr_wait_for_ops - Wait for matching commands to complete
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ * @match: match function to use
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
+ int (*match)(struct ipr_cmnd *, void *))
+{
+ struct ipr_cmnd *ipr_cmd;
+ int wait;
+ unsigned long flags;
+ struct ipr_hrr_queue *hrrq;
+ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
+ DECLARE_COMPLETION_ONSTACK(comp);
+
+ ENTER;
+ do {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
+
+ if (!timeout) {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait)
+ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
+
+ LEAVE;
+ return SUCCESS;
+}
+
static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
@@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
+
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
spin_lock_irq(cmd->device->host->host_lock);
rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+
return rc;
}
@@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
{
unsigned long flags;
int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
ENTER;
+ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
rc = ipr_cancel_op(scsi_cmd);
spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
LEAVE;
return rc;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b4f3eec51bc9..ec03b42fa2b9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1606,6 +1606,7 @@ struct ipr_cmnd {
struct scsi_device *sdev;
} u;
+ struct completion *eh_comp;
struct ipr_hrr_queue *hrrq;
struct ipr_ioa_cfg *ioa_cfg;
};
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e5c28435d768..7542f11d3fcd 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -2038,15 +2038,14 @@ ips_host_info(ips_ha_t *ha, struct seq_file *m)
{
METHOD_TRACE("ips_host_info", 1);
- seq_printf(m, "\nIBM ServeRAID General Information:\n\n");
+ seq_puts(m, "\nIBM ServeRAID General Information:\n\n");
if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
(le16_to_cpu(ha->nvram->adapter_type) != 0))
seq_printf(m, "\tController Type : %s\n",
ips_adapter_name[ha->ad_type - 1]);
else
- seq_printf(m,
- "\tController Type : Unknown\n");
+ seq_puts(m, "\tController Type : Unknown\n");
if (ha->io_addr)
seq_printf(m,
@@ -2138,7 +2137,7 @@ ips_host_info(ips_ha_t *ha, struct seq_file *m)
seq_printf(m, "\tCurrent Active PT Commands : %d\n",
ha->num_ioctl);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4c25485aa934..c66088d0fd2a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2225,6 +2225,15 @@ lpfc_adisc_done(struct lpfc_vport *vport)
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
+ /* The ADISCs are complete. Doesn't matter if they
+ * succeeded or failed because the ADISC completion
+ * routine guarantees to call the state machine and
+ * the RPI is either unregistered (failed ADISC response)
+ * or the RPI is still valid and the node is marked
+ * mapped for a target. The exchanges should be in the
+ * correct state. This code is specific to SLI3.
+ */
+ lpfc_issue_clear_la(phba, vport);
lpfc_issue_reg_vpi(phba, vport);
return;
}
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 2485255f3414..bc7b34c02723 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -2240,7 +2240,7 @@ proc_show_battery(struct seq_file *m, void *v)
goto free_pdev;
if( mega_adapinq(adapter, dma_handle) != 0 ) {
- seq_printf(m, "Adapter inquiry failed.\n");
+ seq_puts(m, "Adapter inquiry failed.\n");
printk(KERN_WARNING "megaraid: inquiry failed.\n");
goto free_inquiry;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 0d44d91c2fce..14e5c7cea929 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,7 +35,7 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.805.06.01-rc1"
+#define MEGASAS_VERSION "06.806.08.00-rc1"
/*
* Device IDs
@@ -969,7 +969,20 @@ struct megasas_ctrl_info {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:25;
+ u32 reserved:12;
+ u32 discardCacheDuringLDDelete:1;
+ u32 supportSecurityonJBOD:1;
+ u32 supportCacheBypassModes:1;
+ u32 supportDisableSESMonitoring:1;
+ u32 supportForceFlash:1;
+ u32 supportNVDRAM:1;
+ u32 supportDrvActivityLEDSetting:1;
+ u32 supportAllowedOpsforDrvRemoval:1;
+ u32 supportHOQRebuild:1;
+ u32 supportForceTo512e:1;
+ u32 supportNVCacheErase:1;
+ u32 supportDebugQueue:1;
+ u32 supportSwZone:1;
u32 supportCrashDump:1;
u32 supportMaxExtLDs:1;
u32 supportT10RebuildAssist:1;
@@ -981,9 +994,22 @@ struct megasas_ctrl_info {
u32 supportThermalPollInterval:1;
u32 supportDisableImmediateIO:1;
u32 supportT10RebuildAssist:1;
- u32 supportMaxExtLDs:1;
- u32 supportCrashDump:1;
- u32 reserved:25;
+ u32 supportMaxExtLDs:1;
+ u32 supportCrashDump:1;
+ u32 supportSwZone:1;
+ u32 supportDebugQueue:1;
+ u32 supportNVCacheErase:1;
+ u32 supportForceTo512e:1;
+ u32 supportHOQRebuild:1;
+ u32 supportAllowedOpsforDrvRemoval:1;
+ u32 supportDrvActivityLEDSetting:1;
+ u32 supportNVDRAM:1;
+ u32 supportForceFlash:1;
+ u32 supportDisableSESMonitoring:1;
+ u32 supportCacheBypassModes:1;
+ u32 supportSecurityonJBOD:1;
+ u32 discardCacheDuringLDDelete:1;
+ u32 reserved:12;
#endif
} adapterOperations3;
@@ -1022,6 +1048,13 @@ enum MR_MFI_MPT_PTHR_FLAGS {
MFI_MPT_ATTACHED = 2,
};
+enum MR_SCSI_CMD_TYPE {
+ READ_WRITE_LDIO = 0,
+ NON_READ_WRITE_LDIO = 1,
+ READ_WRITE_SYSPDIO = 2,
+ NON_READ_WRITE_SYSPDIO = 3,
+};
+
/* Frame Type */
#define IO_FRAME 0
#define PTHRU_FRAME 1
@@ -1049,6 +1082,8 @@ enum MR_MFI_MPT_PTHR_FLAGS {
*/
#define MEGASAS_INT_CMDS 32
#define MEGASAS_SKINNY_INT_CMDS 5
+#define MEGASAS_FUSION_INTERNAL_CMDS 5
+#define MEGASAS_FUSION_IOCTL_CMDS 3
#define MEGASAS_MAX_MSIX_QUEUES 128
/*
@@ -1194,19 +1229,23 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:27;
+ u32 reserved:25;
+ u32 security_protocol_cmds_fw:1;
+ u32 support_core_affinity:1;
u32 support_ndrive_r1_lb:1;
u32 support_max_255lds:1;
- u32 reserved1:1;
+ u32 support_fastpath_wb:1;
u32 support_additional_msix:1;
u32 support_fp_remote_lun:1;
#else
u32 support_fp_remote_lun:1;
u32 support_additional_msix:1;
- u32 reserved1:1;
+ u32 support_fastpath_wb:1;
u32 support_max_255lds:1;
u32 support_ndrive_r1_lb:1;
- u32 reserved:27;
+ u32 support_core_affinity:1;
+ u32 security_protocol_cmds_fw:1;
+ u32 reserved:25;
#endif
} mfi_capabilities;
u32 reg;
@@ -1638,20 +1677,20 @@ struct megasas_instance {
u32 crash_dump_fw_support;
u32 crash_dump_drv_support;
u32 crash_dump_app_support;
+ u32 secure_jbod_support;
spinlock_t crashdump_lock;
struct megasas_register_set __iomem *reg_set;
u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
- u8 ld_ids[MEGASAS_MAX_LD_IDS];
+ u8 ld_ids[MEGASAS_MAX_LD_IDS];
s8 init_id;
u16 max_num_sge;
u16 max_fw_cmds;
- /* For Fusion its num IOCTL cmds, for others MFI based its
- max_fw_cmds */
u16 max_mfi_cmds;
+ u16 max_scsi_cmds;
u32 max_sectors_per_req;
struct megasas_aen_event *ev;
@@ -1727,7 +1766,7 @@ struct megasas_instance {
u8 requestorId;
char PlasmaFW111;
char mpio;
- int throttlequeuedepth;
+ u16 throttlequeuedepth;
u8 mask_interrupts;
u8 is_imr;
};
@@ -1946,5 +1985,6 @@ void __megasas_return_cmd(struct megasas_instance *instance,
void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
+int megasas_cmd_type(struct scsi_cmnd *cmd);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index ff283d23788a..890637fdd61e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -78,7 +78,7 @@ static int allow_vf_ioctls;
module_param(allow_vf_ioctls, int, S_IRUGO);
MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
-static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
+static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
module_param(throttlequeuedepth, int, S_IRUGO);
MODULE_PARM_DESC(throttlequeuedepth,
"Adapter queue depth when throttled due to I/O timeout. Default: 16");
@@ -1417,16 +1417,15 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
}
/**
- * megasas_is_ldio - Checks if the cmd is for logical drive
+ * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
+ * and whether it's RW or non RW
* @scmd: SCSI command
*
- * Called by megasas_queue_command to find out if the command to be queued
- * is a logical drive command
*/
-inline int megasas_is_ldio(struct scsi_cmnd *cmd)
+inline int megasas_cmd_type(struct scsi_cmnd *cmd)
{
- if (!MEGASAS_IS_LOGICAL(cmd))
- return 0;
+ int ret;
+
switch (cmd->cmnd[0]) {
case READ_10:
case WRITE_10:
@@ -1436,10 +1435,14 @@ inline int megasas_is_ldio(struct scsi_cmnd *cmd)
case WRITE_6:
case READ_16:
case WRITE_16:
- return 1;
+ ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+ READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
+ break;
default:
- return 0;
+ ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+ NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
}
+ return ret;
}
/**
@@ -1471,7 +1474,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
if(!cmd->scmd)
continue;
printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
- if (megasas_is_ldio(cmd->scmd)){
+ if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
ldio = (struct megasas_io_frame *)cmd->frame;
mfi_sgl = &ldio->sgl;
sgcount = ldio->sge_count;
@@ -1531,7 +1534,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
/*
* Logical drive command
*/
- if (megasas_is_ldio(scmd))
+ if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
frame_count = megasas_build_ldio(instance, scmd, cmd);
else
frame_count = megasas_build_dcdb(instance, scmd, cmd);
@@ -1689,22 +1692,66 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
return 0;
}
+/*
+* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
+* kill adapter
+* @instance: Adapter soft state
+*
+*/
+void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
+{
+ int i;
+ struct megasas_cmd *cmd_mfi;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ /* Find all outstanding ioctls */
+ if (fusion) {
+ for (i = 0; i < instance->max_fw_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+ if (cmd_mfi->sync_cmd &&
+ cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+ megasas_complete_cmd(instance,
+ cmd_mfi, DID_OK);
+ }
+ }
+ } else {
+ for (i = 0; i < instance->max_fw_cmds; i++) {
+ cmd_mfi = instance->cmd_list[i];
+ if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
+ MFI_CMD_ABORT)
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ }
+ }
+}
+
+
void megaraid_sas_kill_hba(struct megasas_instance *instance)
{
+ /* Set critical error to block I/O & ioctls in case caller didn't */
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ /* Wait 1 second to ensure IO or ioctls in build have posted */
+ msleep(1000);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
- writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->doorbell);
/* Flush */
readl(&instance->reg_set->doorbell);
if (instance->mpio && instance->requestorId)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
} else {
- writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->inbound_doorbell);
}
+ /* Complete outstanding ioctls when adapter is killed */
+ megasas_complete_outstanding_ioctls(instance);
}
/**
@@ -1717,6 +1764,7 @@ void
megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
{
unsigned long flags;
+
if (instance->flag & MEGASAS_FW_BUSY
&& time_after(jiffies, instance->last_time + 5 * HZ)
&& atomic_read(&instance->fw_outstanding) <
@@ -1724,13 +1772,8 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
spin_lock_irqsave(instance->host->host_lock, flags);
instance->flag &= ~MEGASAS_FW_BUSY;
- if (instance->is_imr) {
- instance->host->can_queue =
- instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
- } else
- instance->host->can_queue =
- instance->max_fw_cmds - MEGASAS_INT_CMDS;
+ instance->host->can_queue = instance->max_scsi_cmds;
spin_unlock_irqrestore(instance->host->host_lock, flags);
}
}
@@ -3028,10 +3071,9 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
"was tried multiple times during reset."
"Shutting down the HBA\n",
cmd, cmd->scmd, cmd->sync_cmd);
+ instance->instancet->disable_intr(instance);
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
megaraid_sas_kill_hba(instance);
-
- instance->adprecovery =
- MEGASAS_HW_CRITICAL_ERROR;
return;
}
}
@@ -3165,8 +3207,8 @@ process_fw_state_change_wq(struct work_struct *work)
if (megasas_transition_to_ready(instance, 1)) {
printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
megaraid_sas_kill_hba(instance);
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
return ;
}
@@ -3547,7 +3589,6 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
int i;
u32 max_cmd;
u32 sge_sz;
- u32 sgl_sz;
u32 total_sz;
u32 frame_count;
struct megasas_cmd *cmd;
@@ -3566,24 +3607,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
}
/*
- * Calculated the number of 64byte frames required for SGL
- */
- sgl_sz = sge_sz * instance->max_num_sge;
- frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
- frame_count = 15;
-
- /*
- * We need one extra frame for the MFI command
+ * For MFI controllers.
+ * max_num_sge = 60
+ * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
+ * Total 960 byte (15 MFI frame of 64 byte)
+ *
+ * Fusion adapter require only 3 extra frame.
+ * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
+ * max_sge_sz = 12 byte (sizeof megasas_sge64)
+ * Total 192 byte (3 MFI frame of 64 byte)
*/
- frame_count++;
-
+ frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
total_sz = MEGAMFI_FRAME_SIZE * frame_count;
/*
* Use DMA pool facility provided by PCI layer
*/
instance->frame_dma_pool = pci_pool_create("megasas frame pool",
- instance->pdev, total_sz, 64,
- 0);
+ instance->pdev, total_sz, 256, 0);
if (!instance->frame_dma_pool) {
printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
@@ -4631,28 +4671,48 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->crash_dump_h);
instance->crash_dump_buf = NULL;
}
+
+ instance->secure_jbod_support =
+ ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ if (instance->secure_jbod_support)
+ dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n");
instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512;
if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
instance->max_sectors_per_req = tmp_sectors;
- /* Check for valid throttlequeuedepth module parameter */
- if (instance->is_imr) {
- if (throttlequeuedepth > (instance->max_fw_cmds -
- MEGASAS_SKINNY_INT_CMDS))
- instance->throttlequeuedepth =
- MEGASAS_THROTTLE_QUEUE_DEPTH;
- else
- instance->throttlequeuedepth = throttlequeuedepth;
+ /*
+ * 1. For fusion adapters, 3 commands for IOCTL and 5 commands
+ * for driver's internal DCMDs.
+ * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's
+ * internal DCMDs.
+ * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs
+ * and 5 commands for drivers's internal DCMD.
+ */
+ if (instance->ctrl_context) {
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ MEGASAS_SKINNY_INT_CMDS;
+ sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
} else {
- if (throttlequeuedepth > (instance->max_fw_cmds -
- MEGASAS_INT_CMDS))
- instance->throttlequeuedepth =
- MEGASAS_THROTTLE_QUEUE_DEPTH;
- else
- instance->throttlequeuedepth = throttlequeuedepth;
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ MEGASAS_INT_CMDS;
+ sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
}
+ /* Check for valid throttlequeuedepth module parameter */
+ if (throttlequeuedepth &&
+ throttlequeuedepth <= instance->max_scsi_cmds)
+ instance->throttlequeuedepth = throttlequeuedepth;
+ else
+ instance->throttlequeuedepth =
+ MEGASAS_THROTTLE_QUEUE_DEPTH;
+
/*
* Setup tasklet for cmd completion
*/
@@ -4947,12 +5007,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
*/
host->irq = instance->pdev->irq;
host->unique_id = instance->unique_id;
- if (instance->is_imr) {
- host->can_queue =
- instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
- } else
- host->can_queue =
- instance->max_fw_cmds - MEGASAS_INT_CMDS;
+ host->can_queue = instance->max_scsi_cmds;
host->this_id = instance->init_id;
host->sg_tablesize = instance->max_num_sge;
@@ -5130,8 +5185,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
INIT_LIST_HEAD(&fusion->cmd_pool);
spin_lock_init(&fusion->mpt_pool_lock);
- memset(fusion->load_balance_info, 0,
- sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
}
break;
default: /* For all other supported controllers */
@@ -5215,12 +5268,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->init_id = MEGASAS_DEFAULT_INIT_ID;
instance->ctrl_info = NULL;
+
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
instance->flag_ieee = 1;
- sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
- } else
- sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
megasas_dbg_lvl = 0;
instance->flag = 0;
@@ -6215,9 +6266,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- /*
- * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
- */
if (down_interruptible(&instance->ioctl_sem)) {
error = -ERESTARTSYS;
goto out_kfree_ioc;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 460c6a3d4ade..4f72287860ee 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -172,6 +172,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
int i;
+ u16 ld_count;
struct MR_DRV_RAID_MAP_ALL *drv_map =
@@ -191,9 +192,10 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
fusion->ld_map[(instance->map_id & 1)];
pFwRaidMap = &fw_map_old->raidMap;
+ ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
#if VD_EXT_DEBUG
- for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) {
+ for (i = 0; i < ld_count; i++) {
dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
"Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
instance->unique_id, i,
@@ -205,12 +207,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
memset(drv_map, 0, fusion->drv_map_sz);
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
- pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount;
+ pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
pDrvRaidMap->ldTgtIdToLd[i] =
(u8)pFwRaidMap->ldTgtIdToLd[i];
- for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) {
+ for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
+ i < MAX_LOGICAL_DRIVES_EXT; i++)
+ pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
+ for (i = 0; i < ld_count; i++) {
pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
#if VD_EXT_DEBUG
dev_dbg(&instance->pdev->dev,
@@ -252,7 +257,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
struct LD_LOAD_BALANCE_INFO *lbInfo;
PLD_SPAN_INFO ldSpanInfo;
struct MR_LD_RAID *raid;
- int ldCount, num_lds;
+ u16 ldCount, num_lds;
u16 ld;
u32 expected_size;
@@ -356,7 +361,7 @@ static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES_EXT)
+ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
continue;
raid = MR_LdRaidGet(ld, map);
dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
@@ -1157,7 +1162,7 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES_EXT)
+ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
continue;
raid = MR_LdRaidGet(ld, map);
for (element = 0; element < MAX_QUAD_DEPTH; element++) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 71557f64bb5e..675b5e7aba94 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -63,7 +63,6 @@ extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
extern void
megasas_complete_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, u8 alt_status);
-int megasas_is_ldio(struct scsi_cmnd *cmd);
int
wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
int seconds);
@@ -103,6 +102,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
regs = instance->reg_set;
+
+ instance->mask_interrupts = 0;
/* For Thunderbolt/Invader also clear intr on enable */
writel(~0, &regs->outbound_intr_status);
readl(&regs->outbound_intr_status);
@@ -111,7 +112,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
/* Dummy readl to force pci flush */
readl(&regs->outbound_intr_mask);
- instance->mask_interrupts = 0;
}
/**
@@ -196,6 +196,7 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
cmd->scmd = NULL;
cmd->sync_cmd_idx = (u32)ULONG_MAX;
+ memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
list_add(&cmd->list, (&fusion->cmd_pool)->next);
spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
@@ -689,6 +690,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
= 1;
init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb
= 1;
+ init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw
+ = 1;
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -698,12 +701,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
cpu_to_le32(lower_32_bits(ioc_init_handle));
init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
- req_desc.Words = 0;
+ req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
+ req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
req_desc.MFAIo.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- cpu_to_le32s((u32 *)&req_desc.MFAIo);
- req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
/*
* disable the intr before firing the init frame
@@ -1017,8 +1019,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
* does not exceed max cmds that the FW can support
*/
instance->max_fw_cmds = instance->max_fw_cmds-1;
- /* Only internal cmds (DCMD) need to have MFI frames */
- instance->max_mfi_cmds = MEGASAS_INT_CMDS;
+
+ /*
+ * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
+ */
+ instance->max_mfi_cmds =
+ MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
max_cmd = instance->max_fw_cmds;
@@ -1285,6 +1291,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sgl_ptr =
(struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
+ memset(sgl_ptr, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
}
}
@@ -1658,6 +1665,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
u32 device_id;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
u16 pd_index = 0;
+ u16 os_timeout_value;
+ u16 timeout_limit;
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
struct fusion_context *fusion = instance->ctrl_context;
u8 span, physArm;
@@ -1674,52 +1683,66 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
-
- /* Check if this is a system PD I/O */
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
- io_request->Function = 0;
if (fusion->fast_path_io)
io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
- io_request->RaidContext.timeoutValue =
- local_map_ptr->raidMap.fpPdIoTimeoutSec;
- io_request->RaidContext.regLockFlags = 0;
- io_request->RaidContext.regLockRowLBA = 0;
- io_request->RaidContext.regLockLength = 0;
io_request->RaidContext.RAIDFlags =
- MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
- MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
- if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
- io_request->IoFlags |= cpu_to_le16(
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- cmd->request_desc->SCSIIO.DevHandle =
- local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
cmd->request_desc->SCSIIO.MSIxIndex =
instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
- /*
- * If the command is for the tape device, set the
- * FP timeout to the os layer timeout value.
- */
- if (scmd->device->type == TYPE_TAPE) {
- if ((scmd->request->timeout / HZ) > 0xFFFF)
- io_request->RaidContext.timeoutValue =
- 0xFFFF;
- else
- io_request->RaidContext.timeoutValue =
- scmd->request->timeout / HZ;
+ os_timeout_value = scmd->request->timeout / HZ;
+
+ if (instance->secure_jbod_support &&
+ (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) {
+ /* system pd firmware path */
+ io_request->Function =
+ MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16(os_timeout_value);
+ } else {
+ /* system pd Fast Path */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+ timeout_limit = (scmd->device->type == TYPE_DISK) ?
+ 255 : 0xFFFF;
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16((os_timeout_value > timeout_limit) ?
+ timeout_limit : os_timeout_value);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ io_request->IoFlags |=
+ cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
} else {
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
goto NonFastPath;
+ /*
+ * For older firmware, Driver should not access ldTgtIdToLd
+ * beyond index 127 and for Extended VD firmware, ldTgtIdToLd
+ * should not go beyond 255.
+ */
+
+ if ((!fusion->fast_path_io) ||
+ (device_id >= instance->fw_supported_vd_count))
+ goto NonFastPath;
+
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
- if ((ld >= instance->fw_supported_vd_count) ||
- (!fusion->fast_path_io))
+
+ if (ld >= instance->fw_supported_vd_count)
goto NonFastPath;
raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -1811,7 +1834,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
- if (megasas_is_ldio(scp))
+ if (megasas_cmd_type(scp) == READ_WRITE_LDIO)
megasas_build_ldio_fusion(instance, scp, cmd);
else
megasas_build_dcdb_fusion(instance, scp, cmd);
@@ -2612,7 +2635,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
instance->host->host_no);
megaraid_sas_kill_hba(instance);
instance->skip_heartbeat_timer_del = 1;
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
retval = FAILED;
goto out;
}
@@ -2808,8 +2830,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
dev_info(&instance->pdev->dev,
"Failed from %s %d\n",
__func__, __LINE__);
- instance->adprecovery =
- MEGASAS_HW_CRITICAL_ERROR;
megaraid_sas_kill_hba(instance);
retval = FAILED;
}
@@ -2858,7 +2878,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
"adapter scsi%d.\n", instance->host->host_no);
megaraid_sas_kill_hba(instance);
instance->skip_heartbeat_timer_del = 1;
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
retval = FAILED;
} else {
/* For VF: Restart HB timer if we didn't OCR */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 5ab7daee11be..56e6db2d5874 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -306,14 +306,9 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
* MPT RAID MFA IO Descriptor.
*/
struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u32 MessageAddress1:24; /* bits 31:8*/
- u32 RequestFlags:8;
-#else
u32 RequestFlags:8;
- u32 MessageAddress1:24; /* bits 31:8*/
-#endif
- u32 MessageAddress2; /* bits 61:32 */
+ u32 MessageAddress1:24;
+ u32 MessageAddress2;
};
/* Default Request Descriptor */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 088eefa67da8..7fc6f23bd9dc 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.32
+ * mpi2.h Version: 02.00.35
*
* Version History
* ---------------
@@ -83,6 +83,9 @@
* 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
* 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
* 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -108,7 +111,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x20)
+#define MPI2_HEADER_VERSION_UNIT (0x23)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 510ef0dc8d7b..ee8d2d695d55 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.26
+ * mpi2_cnfg.h Version: 02.00.29
*
* Version History
* ---------------
@@ -157,6 +157,20 @@
* 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
* Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
* match the specification.
+ * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for
+ * MPI2_CONFIG_PAGE_MAN_7.
+ * Added EnclosureLevel and ConnectorName fields to
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added EnclosureLevel field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 01-08-14 02.00.28 Added more defines for the BiosOptions field of
+ * MPI2_CONFIG_PAGE_BIOS_1.
+ * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and
+ * more defines for the BiosOptions field.
* --------------------------------------------------------------------------
*/
@@ -706,6 +720,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
/* defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008)
#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
@@ -1224,7 +1239,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
U32 BiosOptions; /* 0x04 */
U32 IOCSettings; /* 0x08 */
- U32 Reserved1; /* 0x0C */
+ U8 SSUTimeout; /* 0x0C */
+ U8 Reserved1; /* 0x0D */
+ U16 Reserved2; /* 0x0E */
U32 DeviceSettings; /* 0x10 */
U16 NumberOfDevices; /* 0x14 */
U16 UEFIVersion; /* 0x16 */
@@ -1235,9 +1252,24 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1,
Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t;
-#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x07)
/* values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300)
+#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100)
+#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300)
+
#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
@@ -2420,13 +2452,13 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
U8 PortGroups; /* 0x2C */
U8 DmaGroup; /* 0x2D */
U8 ControlGroup; /* 0x2E */
- U8 Reserved1; /* 0x2F */
- U32 Reserved2; /* 0x30 */
+ U8 EnclosureLevel; /* 0x2F */
+ U8 ConnectorName[4]; /* 0x30 */
U32 Reserved3; /* 0x34 */
} MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t;
-#define MPI2_SASDEVICE0_PAGEVERSION (0x08)
+#define MPI2_SASDEVICE0_PAGEVERSION (0x09)
/* values for SAS Device Page 0 AccessStatus field */
#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
@@ -2464,6 +2496,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002)
#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
@@ -2732,7 +2765,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
U16 EnclosureHandle; /* 0x16 */
U16 NumSlots; /* 0x18 */
U16 StartSlot; /* 0x1A */
- U16 Reserved2; /* 0x1C */
+ U8 Reserved2; /* 0x1C */
+ U8 EnclosureLevel; /* 0x1D */
U16 SEPDevHandle; /* 0x1E */
U32 Reserved3; /* 0x20 */
U32 Reserved4; /* 0x24 */
@@ -2740,9 +2774,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t;
-#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03)
+#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
/* values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 2c3b0f28576b..b02de48be204 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.23
+ * mpi2_ioc.h Version: 02.00.24
*
* Version History
* ---------------
@@ -126,6 +126,7 @@
* Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
* Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
* Added Encrypted Hash Extended Image.
+ * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
* --------------------------------------------------------------------------
*/
@@ -1589,6 +1590,7 @@ Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t;
/* values for HashImageType */
#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
/* values for HashAlgorithm */
#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 9be03ed46180..659b8ac83ceb 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.11
+ * mpi2_tool.h Version: 02.00.12
*
* Version History
* ---------------
@@ -29,7 +29,8 @@
* MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
* 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
* it uses MPI Chain SGE as well as MPI Simple SGE.
- * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
* --------------------------------------------------------------------------
*/
@@ -101,6 +102,7 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST
#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000)
#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 58e45216d1ec..11248de92b3b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
* Copyright (C) 2007-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -639,6 +640,9 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
if (!ioc->hide_ir_msg)
desc = "Log Entry Added";
break;
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ desc = "Temperature Threshold";
+ break;
}
if (!desc)
@@ -1296,6 +1300,8 @@ _base_free_irq(struct MPT2SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
+ irq_set_affinity_hint(reply_q->vector, NULL);
+ free_cpumask_var(reply_q->affinity_hint);
synchronize_irq(reply_q->vector);
free_irq(reply_q->vector, reply_q);
kfree(reply_q);
@@ -1325,6 +1331,11 @@ _base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
reply_q->ioc = ioc;
reply_q->msix_index = index;
reply_q->vector = vector;
+
+ if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_clear(reply_q->affinity_hint);
+
atomic_set(&reply_q->busy, 0);
if (ioc->msix_enable)
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
@@ -1359,6 +1370,7 @@ static void
_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
{
unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ struct adapter_reply_queue *reply_q;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -1373,20 +1385,30 @@ _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
cpu = cpumask_first(cpu_online_mask);
- do {
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+
unsigned int i, group = nr_cpus / nr_msix;
+ if (cpu >= nr_cpus)
+ break;
+
if (index < nr_cpus % nr_msix)
group++;
for (i = 0 ; i < group ; i++) {
ioc->cpu_msix_table[cpu] = index;
+ cpumask_or(reply_q->affinity_hint,
+ reply_q->affinity_hint, get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
+ if (irq_set_affinity_hint(reply_q->vector,
+ reply_q->affinity_hint))
+ dinitprintk(ioc, pr_info(MPT2SAS_FMT
+ "error setting affinity hint for irq vector %d\n",
+ ioc->name, reply_q->vector));
index++;
-
- } while (cpu < nr_cpus);
+ }
}
/**
@@ -2338,6 +2360,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ mpt2sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
_base_display_ioc_capabilities(ioc);
/*
@@ -2355,6 +2378,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ if (ioc->iounit_pg8.NumSensors)
+ ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
}
/**
@@ -2486,9 +2511,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
/* command line tunables for max sgl entries */
if (max_sgl_entries != -1) {
- ioc->shost->sg_tablesize = (max_sgl_entries <
- MPT2SAS_SG_DEPTH) ? max_sgl_entries :
- MPT2SAS_SG_DEPTH;
+ ioc->shost->sg_tablesize = min_t(unsigned short,
+ max_sgl_entries, SCSI_MAX_SG_CHAIN_SEGMENTS);
+ if (ioc->shost->sg_tablesize > MPT2SAS_SG_DEPTH)
+ printk(MPT2SAS_WARN_FMT
+ "sg_tablesize(%u) is bigger than kernel defined"
+ " SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
+ ioc->shost->sg_tablesize, MPT2SAS_SG_DEPTH);
} else {
ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
}
@@ -3236,7 +3265,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
u16 smid;
u32 ioc_state;
unsigned long timeleft;
- u8 issue_reset;
+ bool issue_reset = false;
int rc;
void *request;
u16 wait_state_count;
@@ -3300,7 +3329,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
_debug_dump_mf(mpi_request,
sizeof(Mpi2SasIoUnitControlRequest_t)/4);
if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
- issue_reset = 1;
+ issue_reset = true;
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
@@ -3341,7 +3370,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
u16 smid;
u32 ioc_state;
unsigned long timeleft;
- u8 issue_reset;
+ bool issue_reset = false;
int rc;
void *request;
u16 wait_state_count;
@@ -3398,7 +3427,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
_debug_dump_mf(mpi_request,
sizeof(Mpi2SepRequest_t)/4);
if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
- issue_reset = 1;
+ issue_reset = true;
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
@@ -4594,6 +4623,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+ _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
r = _base_make_ioc_operational(ioc, CAN_SLEEP);
if (r)
goto out_free_resources;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 239f169b0673..caff8d10cca4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.h
* Copyright (C) 2007-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -67,10 +68,10 @@
/* driver versioning info */
#define MPT2SAS_DRIVER_NAME "mpt2sas"
-#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
+#define MPT2SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "18.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 18
+#define MPT2SAS_DRIVER_VERSION "20.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 20
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -586,6 +587,7 @@ struct adapter_reply_queue {
Mpi2ReplyDescriptorsUnion_t *reply_post_free;
char name[MPT_NAME_LENGTH];
atomic_t busy;
+ cpumask_var_t affinity_hint;
struct list_head list;
};
@@ -725,6 +727,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
* @ioc_pg8: static ioc page 8
* @iounit_pg0: static iounit page 0
* @iounit_pg1: static iounit page 1
+ * @iounit_pg8: static iounit page 8
* @sas_hba: sas host object
* @sas_expander_list: expander object list
* @sas_node_lock:
@@ -795,6 +798,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
* @reply_post_host_index: head index in the pool where FW completes IO
* @delayed_tr_list: target reset link list
* @delayed_tr_volume_list: volume target reset link list
+ * @@temp_sensors_count: flag to carry the number of temperature sensors
*/
struct MPT2SAS_ADAPTER {
struct list_head list;
@@ -892,6 +896,7 @@ struct MPT2SAS_ADAPTER {
Mpi2IOCPage8_t ioc_pg8;
Mpi2IOUnitPage0_t iounit_pg0;
Mpi2IOUnitPage1_t iounit_pg1;
+ Mpi2IOUnitPage8_t iounit_pg8;
struct _boot_device req_boot_device;
struct _boot_device req_alt_boot_device;
@@ -992,6 +997,7 @@ struct MPT2SAS_ADAPTER {
struct list_head delayed_tr_list;
struct list_head delayed_tr_volume_list;
+ u8 temp_sensors_count;
/* diag buffer support */
u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
@@ -1120,6 +1126,8 @@ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page);
int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz);
int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index c72a2fff5dbb..c43815b1a485 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -3,7 +3,8 @@
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
* Copyright (C) 2007-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -719,6 +720,42 @@ mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
}
/**
+ * mpt2sas_config_get_iounit_pg8 - obtain iounit page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
* mpt2sas_config_get_ioc_pg8 - obtain ioc page 8
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index ca4e563c01dd..4e509604b571 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
* Copyright (C) 2007-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 7f842c88abd2..46b2fc5b74af 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
* Copyright (C) 2007-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index cc57ef31d0fe..277120d45648 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -3,7 +3,8 @@
*
* This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
* Copyright (C) 2007-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 6a1c036a6f3f..3f26147bbc64 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -3,7 +3,8 @@
*
* This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
* Copyright (C) 2007-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -2729,9 +2730,18 @@ _scsih_host_reset(struct scsi_cmnd *scmd)
ioc->name, scmd);
scsi_print_command(scmd);
+ if (ioc->is_driver_loading) {
+ printk(MPT2SAS_INFO_FMT "Blocking the host reset\n",
+ ioc->name);
+ r = FAILED;
+ goto out;
+ }
+
retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
+
+ out:
printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n",
ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3647,6 +3657,31 @@ _scsih_check_volume_delete_events(struct MPT2SAS_ADAPTER *ioc,
}
/**
+ * _scsih_temp_threshold_events - display temperature threshold exceeded events
+ * @ioc: per adapter object
+ * @event_data: the temp threshold event data
+ * Context: interrupt time.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_temp_threshold_events(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataTemperature_t *event_data)
+{
+ if (ioc->temp_sensors_count >= event_data->SensorNum) {
+ printk(MPT2SAS_ERR_FMT "Temperature Threshold flags %s%s%s%s"
+ " exceeded for Sensor: %d !!!\n", ioc->name,
+ ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
+ event_data->SensorNum);
+ printk(MPT2SAS_ERR_FMT "Current Temp In Celsius: %d\n",
+ ioc->name, event_data->CurrentTemperature);
+ }
+}
+
+/**
* _scsih_flush_running_cmds - completing outstanding commands.
* @ioc: per adapter object
*
@@ -4509,6 +4544,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->result = DID_TRANSPORT_DISRUPTED << 16;
goto out;
}
+ if (log_info == 0x32010081) {
+ scmd->result = DID_RESET << 16;
+ break;
+ }
scmd->result = DID_SOFT_ERROR << 16;
break;
case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
@@ -7557,6 +7596,12 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_IR_PHYSICAL_DISK:
break;
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ _scsih_temp_threshold_events(ioc,
+ (Mpi2EventDataTemperature_t *)
+ mpi_reply->EventData);
+ break;
+
default: /* ignore the rest */
return;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e689bf20a3ea..ff2500ab9ba4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -3,7 +3,8 @@
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
* Copyright (C) 2007-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 1560115079c7..14a781b6b88d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -619,6 +620,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
case MPI2_EVENT_LOG_ENTRY_ADDED:
desc = "Log Entry Added";
break;
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ desc = "Temperature Threshold";
+ break;
}
if (!desc)
@@ -1580,6 +1584,8 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
+ irq_set_affinity_hint(reply_q->vector, NULL);
+ free_cpumask_var(reply_q->affinity_hint);
synchronize_irq(reply_q->vector);
free_irq(reply_q->vector, reply_q);
kfree(reply_q);
@@ -1609,6 +1615,11 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
reply_q->ioc = ioc;
reply_q->msix_index = index;
reply_q->vector = vector;
+
+ if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_clear(reply_q->affinity_hint);
+
atomic_set(&reply_q->busy, 0);
if (ioc->msix_enable)
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
@@ -1643,6 +1654,7 @@ static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{
unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ struct adapter_reply_queue *reply_q;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -1657,20 +1669,30 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
cpu = cpumask_first(cpu_online_mask);
- do {
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+
unsigned int i, group = nr_cpus / nr_msix;
+ if (cpu >= nr_cpus)
+ break;
+
if (index < nr_cpus % nr_msix)
group++;
for (i = 0 ; i < group ; i++) {
ioc->cpu_msix_table[cpu] = index;
+ cpumask_or(reply_q->affinity_hint,
+ reply_q->affinity_hint, get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
+ if (irq_set_affinity_hint(reply_q->vector,
+ reply_q->affinity_hint))
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "error setting affinity hint for irq vector %d\n",
+ ioc->name, reply_q->vector));
index++;
-
- } while (cpu < nr_cpus);
+ }
}
/**
@@ -2500,6 +2522,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
_base_display_ioc_capabilities(ioc);
/*
@@ -2516,6 +2539,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+
+ if (ioc->iounit_pg8.NumSensors)
+ ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
}
/**
@@ -2659,8 +2685,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
- else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS)
- sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS;
+ else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) {
+ sg_tablesize = min_t(unsigned short, sg_tablesize,
+ SCSI_MAX_SG_CHAIN_SEGMENTS);
+ pr_warn(MPT3SAS_FMT
+ "sg_tablesize(%u) is bigger than kernel"
+ " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
+ sg_tablesize, MPT3SAS_MAX_PHYS_SEGMENTS);
+ }
ioc->shost->sg_tablesize = sg_tablesize;
ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -3419,7 +3451,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
u16 smid;
u32 ioc_state;
unsigned long timeleft;
- u8 issue_reset;
+ bool issue_reset = false;
int rc;
void *request;
u16 wait_state_count;
@@ -3483,7 +3515,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
_debug_dump_mf(mpi_request,
sizeof(Mpi2SasIoUnitControlRequest_t)/4);
if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset = true;
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -3523,7 +3555,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
u16 smid;
u32 ioc_state;
unsigned long timeleft;
- u8 issue_reset;
+ bool issue_reset = false;
int rc;
void *request;
u16 wait_state_count;
@@ -3581,7 +3613,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
_debug_dump_mf(mpi_request,
sizeof(Mpi2SepRequest_t)/4);
if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset = false;
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -4720,6 +4752,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+ _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
r = _base_make_ioc_operational(ioc, CAN_SLEEP);
if (r)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 40926aa9b24d..afa881682bef 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -68,7 +69,7 @@
/* driver versioning info */
#define MPT3SAS_DRIVER_NAME "mpt3sas"
-#define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
+#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
#define MPT3SAS_DRIVER_VERSION "04.100.00.00"
#define MPT3SAS_MAJOR_VERSION 4
@@ -506,6 +507,7 @@ struct adapter_reply_queue {
Mpi2ReplyDescriptorsUnion_t *reply_post_free;
char name[MPT_NAME_LENGTH];
atomic_t busy;
+ cpumask_var_t affinity_hint;
struct list_head list;
};
@@ -659,6 +661,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @ioc_pg8: static ioc page 8
* @iounit_pg0: static iounit page 0
* @iounit_pg1: static iounit page 1
+ * @iounit_pg8: static iounit page 8
* @sas_hba: sas host object
* @sas_expander_list: expander object list
* @sas_node_lock:
@@ -728,6 +731,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @reply_post_host_index: head index in the pool where FW completes IO
* @delayed_tr_list: target reset link list
* @delayed_tr_volume_list: volume target reset link list
+ * @@temp_sensors_count: flag to carry the number of temperature sensors
*/
struct MPT3SAS_ADAPTER {
struct list_head list;
@@ -834,6 +838,7 @@ struct MPT3SAS_ADAPTER {
Mpi2IOCPage8_t ioc_pg8;
Mpi2IOUnitPage0_t iounit_pg0;
Mpi2IOUnitPage1_t iounit_pg1;
+ Mpi2IOUnitPage8_t iounit_pg8;
struct _boot_device req_boot_device;
struct _boot_device req_alt_boot_device;
@@ -934,6 +939,7 @@ struct MPT3SAS_ADAPTER {
struct list_head delayed_tr_list;
struct list_head delayed_tr_volume_list;
+ u8 temp_sensors_count;
/* diag buffer support */
u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
@@ -1082,6 +1088,8 @@ int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage8_t *config_page);
int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
u16 sz);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 4472c2af9255..e45c4613ef0c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -3,7 +3,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -871,6 +872,42 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * mpt3sas_config_get_iounit_pg8 - obtain iounit page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_ioc_pg8 - obtain ioc page 8
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index dca14877d5ab..080c8a76d23d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 5f3d7fd7c2f8..aee99ce67e54 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h
index 4778e7dd98bd..4e8a63fdb304 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_debug.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h
@@ -3,7 +3,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 94261ee9e72d..5a97e3286719 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3,7 +3,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -2392,9 +2393,17 @@ _scsih_host_reset(struct scsi_cmnd *scmd)
ioc->name, scmd);
scsi_print_command(scmd);
+ if (ioc->is_driver_loading) {
+ pr_info(MPT3SAS_FMT "Blocking the host reset\n",
+ ioc->name);
+ r = FAILED;
+ goto out;
+ }
+
retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
+out:
pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3342,6 +3351,31 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_temp_threshold_events - display temperature threshold exceeded events
+ * @ioc: per adapter object
+ * @event_data: the temp threshold event data
+ * Context: interrupt time.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataTemperature_t *event_data)
+{
+ if (ioc->temp_sensors_count >= event_data->SensorNum) {
+ pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
+ " exceeded for Sensor: %d !!!\n", ioc->name,
+ ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
+ event_data->SensorNum);
+ pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
+ ioc->name, event_data->CurrentTemperature);
+ }
+}
+
+/**
* _scsih_flush_running_cmds - completing outstanding commands.
* @ioc: per adapter object
*
@@ -7194,6 +7228,12 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_IR_PHYSICAL_DISK:
break;
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ _scsih_temp_threshold_events(ioc,
+ (Mpi2EventDataTemperature_t *)
+ mpi_reply->EventData);
+ break;
+
default: /* ignore the rest */
return 1;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 3637ae6c0171..efb98afc46e0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -3,7 +3,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index 8a2dd113f401..b60fd7a3b571 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -4,7 +4,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
index f681db56c53b..6586a463bea9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -5,7 +5,8 @@
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
* Copyright (C) 2012-2014 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 90abb03c9074..c6077cefbeca 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -1441,8 +1441,6 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m, ##args)
static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
{
@@ -1458,64 +1456,63 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
- SPRINTF("NinjaSCSI-32 status\n\n");
- SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
- SPRINTF("SCSI host No.: %d\n", hostno);
- SPRINTF("IRQ: %d\n", host->irq);
- SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
- SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
- SPRINTF("sg_tablesize: %d\n", host->sg_tablesize);
- SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
+ seq_puts(m, "NinjaSCSI-32 status\n\n");
+ seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
+ seq_printf(m, "SCSI host No.: %d\n", hostno);
+ seq_printf(m, "IRQ: %d\n", host->irq);
+ seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
+ seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
+ seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize);
+ seq_printf(m, "Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
mode_reg = nsp32_index_read1(base, CHIP_MODE);
model = data->pci_devid->driver_data;
#ifdef CONFIG_PM
- SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
+ seq_printf(m, "Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
#endif
- SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
+ seq_printf(m, "OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
spin_lock_irqsave(&(data->Lock), flags);
- SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC);
+ seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC);
spin_unlock_irqrestore(&(data->Lock), flags);
- SPRINTF("SDTR status\n");
+ seq_puts(m, "SDTR status\n");
for (id = 0; id < ARRAY_SIZE(data->target); id++) {
- SPRINTF("id %d: ", id);
+ seq_printf(m, "id %d: ", id);
if (id == host->this_id) {
- SPRINTF("----- NinjaSCSI-32 host adapter\n");
+ seq_puts(m, "----- NinjaSCSI-32 host adapter\n");
continue;
}
if (data->target[id].sync_flag == SDTR_DONE) {
if (data->target[id].period == 0 &&
data->target[id].offset == ASYNC_OFFSET ) {
- SPRINTF("async");
+ seq_puts(m, "async");
} else {
- SPRINTF(" sync");
+ seq_puts(m, " sync");
}
} else {
- SPRINTF(" none");
+ seq_puts(m, " none");
}
if (data->target[id].period != 0) {
speed = 1000000 / (data->target[id].period * 4);
- SPRINTF(" transfer %d.%dMB/s, offset %d",
+ seq_printf(m, " transfer %d.%dMB/s, offset %d",
speed / 1000,
speed % 1000,
data->target[id].offset
);
}
- SPRINTF("\n");
+ seq_putc(m, '\n');
}
return 0;
}
-#undef SPRINTF
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 34aad32829f5..1b6c8833a304 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1364,9 +1364,6 @@ static const char *nsp_info(struct Scsi_Host *shpnt)
return data->nspinfo;
}
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m, ##args)
-
static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host)
{
int id;
@@ -1378,75 +1375,74 @@ static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host)
hostno = host->host_no;
data = (nsp_hw_data *)host->hostdata;
- SPRINTF("NinjaSCSI status\n\n");
- SPRINTF("Driver version: $Revision: 1.23 $\n");
- SPRINTF("SCSI host No.: %d\n", hostno);
- SPRINTF("IRQ: %d\n", host->irq);
- SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
- SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
- SPRINTF("sg_tablesize: %d\n", host->sg_tablesize);
+ seq_puts(m, "NinjaSCSI status\n\n"
+ "Driver version: $Revision: 1.23 $\n");
+ seq_printf(m, "SCSI host No.: %d\n", hostno);
+ seq_printf(m, "IRQ: %d\n", host->irq);
+ seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
+ seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
+ seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize);
- SPRINTF("burst transfer mode: ");
+ seq_puts(m, "burst transfer mode: ");
switch (nsp_burst_mode) {
case BURST_IO8:
- SPRINTF("io8");
+ seq_puts(m, "io8");
break;
case BURST_IO32:
- SPRINTF("io32");
+ seq_puts(m, "io32");
break;
case BURST_MEM32:
- SPRINTF("mem32");
+ seq_puts(m, "mem32");
break;
default:
- SPRINTF("???");
+ seq_puts(m, "???");
break;
}
- SPRINTF("\n");
+ seq_putc(m, '\n');
spin_lock_irqsave(&(data->Lock), flags);
- SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC);
+ seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC);
spin_unlock_irqrestore(&(data->Lock), flags);
- SPRINTF("SDTR status\n");
+ seq_puts(m, "SDTR status\n");
for(id = 0; id < ARRAY_SIZE(data->Sync); id++) {
- SPRINTF("id %d: ", id);
+ seq_printf(m, "id %d: ", id);
if (id == host->this_id) {
- SPRINTF("----- NinjaSCSI-3 host adapter\n");
+ seq_puts(m, "----- NinjaSCSI-3 host adapter\n");
continue;
}
switch(data->Sync[id].SyncNegotiation) {
case SYNC_OK:
- SPRINTF(" sync");
+ seq_puts(m, " sync");
break;
case SYNC_NG:
- SPRINTF("async");
+ seq_puts(m, "async");
break;
case SYNC_NOT_YET:
- SPRINTF(" none");
+ seq_puts(m, " none");
break;
default:
- SPRINTF("?????");
+ seq_puts(m, "?????");
break;
}
if (data->Sync[id].SyncPeriod != 0) {
speed = 1000000 / (data->Sync[id].SyncPeriod * 4);
- SPRINTF(" transfer %d.%dMB/s, offset %d",
+ seq_printf(m, " transfer %d.%dMB/s, offset %d",
speed / 1000,
speed % 1000,
data->Sync[id].SyncOffset
);
}
- SPRINTF("\n");
+ seq_putc(m, '\n');
}
return 0;
}
-#undef SPRINTF
/*---------------------------------------------------------------*/
/* error handler */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 8c27b6a77ec4..ed31d8cc6266 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1473,13 +1473,7 @@ static int pmcraid_notify_aen(
}
/* send genetlink multicast message to notify appplications */
- result = genlmsg_end(skb, msg_header);
-
- if (result < 0) {
- pmcraid_err("genlmsg_end failed\n");
- nlmsg_free(skb);
- return result;
- }
+ genlmsg_end(skb, msg_header);
result = genlmsg_multicast(&pmcraid_event_family, skb,
0, 0, GFP_ATOMIC);
@@ -4223,7 +4217,7 @@ static ssize_t pmcraid_show_adapter_id(
static struct device_attribute pmcraid_adapter_id_attr = {
.attr = {
.name = "adapter_id",
- .mode = S_IRUGO | S_IWUSR,
+ .mode = S_IRUGO,
},
.show = pmcraid_show_adapter_id,
};
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 2ca39b8e7166..15cf074ffa3c 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -23,10 +23,10 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
mutex_lock(&ha->fce_mutex);
- seq_printf(s, "FCE Trace Buffer\n");
+ seq_puts(s, "FCE Trace Buffer\n");
seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
- seq_printf(s, "FCE Enable Registers\n");
+ seq_puts(s, "FCE Enable Registers\n");
seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
ha->fce_mb[5], ha->fce_mb[6]);
@@ -38,11 +38,11 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
seq_printf(s, "\n%llx: ",
(unsigned long long)((cnt * 4) + fce_start));
else
- seq_printf(s, " ");
+ seq_putc(s, ' ');
seq_printf(s, "%08x", *fce++);
}
- seq_printf(s, "\nEnd\n");
+ seq_puts(s, "\nEnd\n");
mutex_unlock(&ha->fce_mutex);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index e02885451425..c9c3b579eece 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -531,7 +531,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
*
* 3: same as 2
*
- * 4: same as 3 plus dump extra junk
+ * 4: same as 3
*/
if (unlikely(scsi_logging_level)) {
level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
@@ -540,13 +540,6 @@ void scsi_log_send(struct scsi_cmnd *cmd)
scmd_printk(KERN_INFO, cmd,
"Send: scmd 0x%p\n", cmd);
scsi_print_command(cmd);
- if (level > 3) {
- printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
- " queuecommand 0x%p\n",
- scsi_sglist(cmd), scsi_bufflen(cmd),
- cmd->device->host->hostt->queuecommand);
-
- }
}
}
}
@@ -572,7 +565,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
SCSI_LOG_MLCOMPLETE_BITS);
if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
(level > 1)) {
- scsi_print_result(cmd, "Done: ", disposition);
+ scsi_print_result(cmd, "Done", disposition);
scsi_print_command(cmd);
if (status_byte(cmd->result) & CHECK_CONDITION)
scsi_print_sense(cmd);
@@ -986,9 +979,9 @@ int scsi_device_get(struct scsi_device *sdev)
return -ENXIO;
if (!get_device(&sdev->sdev_gendev))
return -ENXIO;
- /* We can fail this if we're doing SCSI operations
+ /* We can fail try_module_get if we're doing SCSI operations
* from module exit (like cache flush) */
- try_module_get(sdev->host->hostt->module);
+ __module_get(sdev->host->hostt->module);
return 0;
}
@@ -1004,14 +997,7 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
-#ifdef CONFIG_MODULE_UNLOAD
- struct module *module = sdev->host->hostt->module;
-
- /* The module refcount will be zero if scsi_device_get()
- * was called from a module removal routine */
- if (module && module_refcount(module) != 0)
- module_put(module);
-#endif
+ module_put(sdev->host->hostt->module);
put_device(&sdev->sdev_gendev);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b8b51bc29b4..1f8e2dc9c616 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -80,6 +80,8 @@ static const char *scsi_debug_version_date = "20141022";
#define INVALID_FIELD_IN_PARAM_LIST 0x26
#define UA_RESET_ASC 0x29
#define UA_CHANGED_ASC 0x2a
+#define TARGET_CHANGED_ASC 0x3f
+#define LUNS_CHANGED_ASCQ 0x0e
#define INSUFF_RES_ASC 0x55
#define INSUFF_RES_ASCQ 0x3
#define POWER_ON_RESET_ASCQ 0x0
@@ -91,6 +93,8 @@ static const char *scsi_debug_version_date = "20141022";
#define THRESHOLD_EXCEEDED 0x5d
#define LOW_POWER_COND_ON 0x5e
#define MISCOMPARE_VERIFY_ASC 0x1d
+#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
+#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -180,7 +184,10 @@ static const char *scsi_debug_version_date = "20141022";
#define SDEBUG_UA_BUS_RESET 1
#define SDEBUG_UA_MODE_CHANGED 2
#define SDEBUG_UA_CAPACITY_CHANGED 3
-#define SDEBUG_NUM_UAS 4
+#define SDEBUG_UA_LUNS_CHANGED 4
+#define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
+#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
+#define SDEBUG_NUM_UAS 7
/* for check_readiness() */
#define UAS_ONLY 1 /* check for UAs only */
@@ -326,6 +333,7 @@ static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
struct opcode_info_t {
u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
@@ -480,8 +488,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} }, /* WRITE_BUFFER */
{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
@@ -782,6 +791,22 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
/* return -ENOTTY; // correct return but upsets fdisk */
}
+static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
+{
+ struct sdebug_host_info *sdhp;
+ struct sdebug_dev_info *dp;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+ if ((devip->sdbg_host == dp->sdbg_host) &&
+ (devip->target == dp->target))
+ clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+}
+
static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
struct sdebug_dev_info * devip)
{
@@ -817,6 +842,36 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
if (debug)
cp = "capacity data changed";
break;
+ case SDEBUG_UA_MICROCODE_CHANGED:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
+ if (debug)
+ cp = "microcode has been changed";
+ break;
+ case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ TARGET_CHANGED_ASC,
+ MICROCODE_CHANGED_WO_RESET_ASCQ);
+ if (debug)
+ cp = "microcode has been changed without reset";
+ break;
+ case SDEBUG_UA_LUNS_CHANGED:
+ /*
+ * SPC-3 behavior is to report a UNIT ATTENTION with
+ * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
+ * on the target, until a REPORT LUNS command is
+ * received. SPC-4 behavior is to report it only once.
+ * NOTE: scsi_debug_scsi_level does not use the same
+ * values as struct scsi_device->scsi_level.
+ */
+ if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */
+ clear_luns_changed_on_target(devip);
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ TARGET_CHANGED_ASC,
+ LUNS_CHANGED_ASCQ);
+ if (debug)
+ cp = "reported luns data has changed";
+ break;
default:
pr_warn("%s: unexpected unit attention code=%d\n",
__func__, k);
@@ -1623,7 +1678,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
req_opcode = cmd[3];
req_sa = get_unaligned_be16(cmd + 4);
alloc_len = get_unaligned_be32(cmd + 6);
- if (alloc_len < 4 && alloc_len > 0xffff) {
+ if (alloc_len < 4 || alloc_len > 0xffff) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
return check_condition_result;
}
@@ -1631,7 +1686,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
a_len = 8192;
else
a_len = alloc_len;
- arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
+ arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
if (NULL == arr) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
INSUFF_RES_ASCQ);
@@ -3033,6 +3088,55 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
}
+/* Note the mode field is in the same position as the (lower) service action
+ * field. For the Report supported operation codes command, SPC-4 suggests
+ * each mode of this command should be reported separately; for future. */
+static int
+resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *dp;
+ u8 mode;
+
+ mode = cmd[1] & 0x1f;
+ switch (mode) {
+ case 0x4: /* download microcode (MC) and activate (ACT) */
+ /* set UAs on this device only */
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
+ break;
+ case 0x5: /* download MC, save and ACT */
+ set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
+ break;
+ case 0x6: /* download MC with offsets and ACT */
+ /* set UAs on most devices (LUs) in this target */
+ list_for_each_entry(dp,
+ &devip->sdbg_host->dev_info_list,
+ dev_list)
+ if (dp->target == sdp->id) {
+ set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
+ if (devip != dp)
+ set_bit(SDEBUG_UA_MICROCODE_CHANGED,
+ dp->uas_bm);
+ }
+ break;
+ case 0x7: /* download MC with offsets, save, and ACT */
+ /* set UA on all devices (LUs) in this target */
+ list_for_each_entry(dp,
+ &devip->sdbg_host->dev_info_list,
+ dev_list)
+ if (dp->target == sdp->id)
+ set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
+ dp->uas_bm);
+ break;
+ default:
+ /* do nothing for this command for other mode values */
+ break;
+ }
+ return 0;
+}
+
static int
resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
@@ -3229,6 +3333,7 @@ static int resp_report_luns(struct scsi_cmnd * scp,
unsigned char arr[SDEBUG_RLUN_ARR_SZ];
unsigned char * max_addr;
+ clear_luns_changed_on_target(devip);
alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
shortish = (alloc_len < 4);
if (shortish || (select_report > 2)) {
@@ -4369,10 +4474,27 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
size_t count)
{
int n;
+ bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ changed = (scsi_debug_max_luns != n);
scsi_debug_max_luns = n;
sdebug_max_tgts_luns();
+ if (changed && (scsi_debug_scsi_level >= 5)) { /* >= SPC-3 */
+ struct sdebug_host_info *sdhp;
+ struct sdebug_dev_info *dp;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdhp, &sdebug_host_list,
+ host_list) {
+ list_for_each_entry(dp, &sdhp->dev_info_list,
+ dev_list) {
+ set_bit(SDEBUG_UA_LUNS_CHANGED,
+ dp->uas_bm);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+ }
return count;
}
return -EINVAL;
@@ -4536,10 +4658,10 @@ static ssize_t map_show(struct device_driver *ddp, char *buf)
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
sdebug_store_sectors);
- count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
-
+ count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ (int)map_size, map_storep);
buf[count++] = '\n';
- buf[count++] = 0;
+ buf[count] = '\0';
return count;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 8afb01604d51..4cdaffca17fc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -124,41 +124,37 @@ scmd_eh_abort_handler(struct work_struct *work)
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
- "scmd %p eh timeout, not aborting\n",
- scmd));
+ "eh timeout, not aborting\n"));
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
- "aborting command %p\n", scmd));
+ "aborting command\n"));
rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
if (rtn == SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
- "scmd %p eh timeout, "
- "not retrying aborted "
- "command\n", scmd));
+ "eh timeout, not retrying "
+ "aborted command\n"));
} else if (!scsi_noretry_cmd(scmd) &&
(++scmd->retries <= scmd->allowed)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
- "scmd %p retry "
- "aborted command\n", scmd));
+ "retry aborted command\n"));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
return;
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
- "scmd %p finish "
- "aborted command\n", scmd));
+ "finish aborted command\n"));
scsi_finish_command(scmd);
return;
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
- "scmd %p abort %s\n", scmd,
+ "cmd abort %s\n",
(rtn == FAST_IO_FAIL) ?
"not send" : "failed"));
}
@@ -167,8 +163,7 @@ scmd_eh_abort_handler(struct work_struct *work)
if (!scsi_eh_scmd_add(scmd, 0)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
- "scmd %p terminate "
- "aborted command\n", scmd));
+ "terminate aborted command\n"));
set_host_byte(scmd, DID_TIME_OUT);
scsi_finish_command(scmd);
}
@@ -194,7 +189,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
- "scmd %p previous abort failed\n", scmd));
+ "previous abort failed\n"));
BUG_ON(delayed_work_pending(&scmd->abort_work));
return FAILED;
}
@@ -208,8 +203,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
- "scmd %p not aborting, host in recovery\n",
- scmd));
+ "not aborting, host in recovery\n"));
return FAILED;
}
@@ -219,8 +213,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "scmd %p abort scheduled\n", scmd));
+ scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
return SUCCESS;
}
@@ -737,8 +730,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
struct completion *eh_action;
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
- "%s scmd: %p result: %x\n",
- __func__, scmd, scmd->result));
+ "%s result: %x\n", __func__, scmd->result));
eh_action = scmd->device->host->eh_action;
if (eh_action)
@@ -868,6 +860,7 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
/**
* scsi_try_to_abort_cmd - Ask host to abort a SCSI command
+ * @hostt: SCSI driver host template
* @scmd: SCSI cmd used to send a target reset
*
* Return value:
@@ -1052,8 +1045,8 @@ retry:
scsi_log_completion(scmd, rtn);
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
- "%s: scmd: %p, timeleft: %ld\n",
- __func__, scmd, timeleft));
+ "%s timeleft: %ld\n",
+ __func__, timeleft));
/*
* If there is time left scsi_eh_done got called, and we will examine
@@ -1192,8 +1185,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
continue;
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
- "sense requested for %p result %x\n",
- scmd, scmd->result));
+ "sense requested, result %x\n", scmd->result));
SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
rtn = scsi_decide_disposition(scmd);
@@ -1235,7 +1227,7 @@ retry_tur:
scmd->device->eh_timeout, 0);
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
- "%s: scmd %p rtn %x\n", __func__, scmd, rtn));
+ "%s return: %x\n", __func__, rtn));
switch (rtn) {
case NEEDS_RETRY:
@@ -2092,8 +2084,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
(++scmd->retries <= scmd->allowed)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
- "%s: flush retry cmd: %p\n",
- current->comm, scmd));
+ "%s: flush retry cmd\n",
+ current->comm));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
} else {
/*
@@ -2105,8 +2097,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
scmd->result |= (DRIVER_TIMEOUT << 24);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
- "%s: flush finish cmd: %p\n",
- current->comm, scmd));
+ "%s: flush finish cmd\n",
+ current->comm));
scsi_finish_command(scmd);
}
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ea95dd3e260..54d7a6cbb98a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -591,7 +591,6 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
{
struct scatterlist *first_chunk = NULL;
- gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC;
int ret;
BUG_ON(!nents);
@@ -606,7 +605,7 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
}
ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
- first_chunk, gfp_mask, scsi_sg_alloc);
+ first_chunk, GFP_ATOMIC, scsi_sg_alloc);
if (unlikely(ret))
scsi_free_sgtable(sdb, mq);
return ret;
@@ -1144,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd)
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs, count;
- BUG_ON(prot_sdb == NULL);
+ if (prot_sdb == NULL) {
+ /*
+ * This can happen if someone (e.g. multipath)
+ * queues a command to a device on an adapter
+ * that does not support DIX.
+ */
+ WARN_ON_ONCE(1);
+ error = BLKPREP_KILL;
+ goto err_exit;
+ }
+
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
@@ -2188,6 +2197,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
shost->tag_set.cmd_size = cmd_size;
shost->tag_set.numa_node = NUMA_NO_NODE;
shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ shost->tag_set.flags |=
+ BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
shost->tag_set.driver_data = shost;
return blk_mq_alloc_tag_set(&shost->tag_set);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
new file mode 100644
index 000000000000..bd70339c1242
--- /dev/null
+++ b/drivers/scsi/scsi_logging.c
@@ -0,0 +1,485 @@
+/*
+ * scsi_logging.c
+ *
+ * Copyright (C) 2014 SUSE Linux Products GmbH
+ * Copyright (C) 2014 Hannes Reinecke <hare@suse.de>
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dbg.h>
+
+#define SCSI_LOG_SPOOLSIZE 4096
+
+#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
+#warning SCSI logging bitmask too large
+#endif
+
+struct scsi_log_buf {
+ char buffer[SCSI_LOG_SPOOLSIZE];
+ unsigned long map;
+};
+
+static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
+
+static char *scsi_log_reserve_buffer(size_t *len)
+{
+ struct scsi_log_buf *buf;
+ unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
+ unsigned long idx = 0;
+
+ preempt_disable();
+ buf = this_cpu_ptr(&scsi_format_log);
+ idx = find_first_zero_bit(&buf->map, map_bits);
+ if (likely(idx < map_bits)) {
+ while (test_and_set_bit(idx, &buf->map)) {
+ idx = find_next_zero_bit(&buf->map, map_bits, idx);
+ if (idx >= map_bits)
+ break;
+ }
+ }
+ if (WARN_ON(idx >= map_bits)) {
+ preempt_enable();
+ return NULL;
+ }
+ *len = SCSI_LOG_BUFSIZE;
+ return buf->buffer + idx * SCSI_LOG_BUFSIZE;
+}
+
+static void scsi_log_release_buffer(char *bufptr)
+{
+ struct scsi_log_buf *buf;
+ unsigned long idx;
+ int ret;
+
+ buf = this_cpu_ptr(&scsi_format_log);
+ if (bufptr >= buf->buffer &&
+ bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
+ idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
+ ret = test_and_clear_bit(idx, &buf->map);
+ WARN_ON(!ret);
+ }
+ preempt_enable();
+}
+
+static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+{
+ return scmd->request->rq_disk ?
+ scmd->request->rq_disk->disk_name : NULL;
+}
+
+static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
+ const char *name, int tag)
+{
+ size_t off = 0;
+
+ if (name)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "[%s] ", name);
+
+ if (WARN_ON(off >= logbuf_len))
+ return off;
+
+ if (tag >= 0)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "tag#%d ", tag);
+ return off;
+}
+
+void sdev_prefix_printk(const char *level, const struct scsi_device *sdev,
+ const char *name, const char *fmt, ...)
+{
+ va_list args;
+ char *logbuf;
+ size_t off = 0, logbuf_len;
+
+ if (!sdev)
+ return;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+
+ if (name)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "[%s] ", name);
+ if (!WARN_ON(off >= logbuf_len)) {
+ va_start(args, fmt);
+ off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
+ va_end(args);
+ }
+ dev_printk(level, &sdev->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(sdev_prefix_printk);
+
+void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
+ const char *fmt, ...)
+{
+ va_list args;
+ char *logbuf;
+ size_t off = 0, logbuf_len;
+
+ if (!scmd || !scmd->cmnd)
+ return;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+ off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
+ scmd->request->tag);
+ if (off < logbuf_len) {
+ va_start(args, fmt);
+ off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
+ va_end(args);
+ }
+ dev_printk(level, &scmd->device->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scmd_printk);
+
+static size_t scsi_format_opcode_name(char *buffer, size_t buf_len,
+ const unsigned char *cdbp)
+{
+ int sa, cdb0;
+ const char *cdb_name = NULL, *sa_name = NULL;
+ size_t off;
+
+ cdb0 = cdbp[0];
+ if (cdb0 == VARIABLE_LENGTH_CMD) {
+ int len = scsi_varlen_cdb_length(cdbp);
+
+ if (len < 10) {
+ off = scnprintf(buffer, buf_len,
+ "short variable length command, len=%d",
+ len);
+ return off;
+ }
+ sa = (cdbp[8] << 8) + cdbp[9];
+ } else
+ sa = cdbp[1] & 0x1f;
+
+ if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) {
+ if (cdb_name)
+ off = scnprintf(buffer, buf_len, "%s", cdb_name);
+ else {
+ off = scnprintf(buffer, buf_len, "opcode=0x%x", cdb0);
+ if (WARN_ON(off >= buf_len))
+ return off;
+ if (cdb0 >= VENDOR_SPECIFIC_CDB)
+ off += scnprintf(buffer + off, buf_len - off,
+ " (vendor)");
+ else if (cdb0 >= 0x60 && cdb0 < 0x7e)
+ off += scnprintf(buffer + off, buf_len - off,
+ " (reserved)");
+ }
+ } else {
+ if (sa_name)
+ off = scnprintf(buffer, buf_len, "%s", sa_name);
+ else if (cdb_name)
+ off = scnprintf(buffer, buf_len, "%s, sa=0x%x",
+ cdb_name, sa);
+ else
+ off = scnprintf(buffer, buf_len,
+ "opcode=0x%x, sa=0x%x", cdb0, sa);
+ }
+ WARN_ON(off >= buf_len);
+ return off;
+}
+
+size_t __scsi_format_command(char *logbuf, size_t logbuf_len,
+ const unsigned char *cdb, size_t cdb_len)
+{
+ int len, k;
+ size_t off;
+
+ off = scsi_format_opcode_name(logbuf, logbuf_len, cdb);
+ if (off >= logbuf_len)
+ return off;
+ len = scsi_command_size(cdb);
+ if (cdb_len < len)
+ len = cdb_len;
+ /* print out all bytes in cdb */
+ for (k = 0; k < len; ++k) {
+ if (off > logbuf_len - 3)
+ break;
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ " %02x", cdb[k]);
+ }
+ return off;
+}
+EXPORT_SYMBOL(__scsi_format_command);
+
+void scsi_print_command(struct scsi_cmnd *cmd)
+{
+ int k;
+ char *logbuf;
+ size_t off, logbuf_len;
+
+ if (!cmd->cmnd)
+ return;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+
+ off = sdev_format_header(logbuf, logbuf_len,
+ scmd_name(cmd), cmd->request->tag);
+ if (off >= logbuf_len)
+ goto out_printk;
+ off += scnprintf(logbuf + off, logbuf_len - off, "CDB: ");
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+
+ off += scsi_format_opcode_name(logbuf + off, logbuf_len - off,
+ cmd->cmnd);
+ if (off >= logbuf_len)
+ goto out_printk;
+
+ /* print out all bytes in cdb */
+ if (cmd->cmd_len > 16) {
+ /* Print opcode in one line and use separate lines for CDB */
+ off += scnprintf(logbuf + off, logbuf_len - off, "\n");
+ dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+ for (k = 0; k < cmd->cmd_len; k += 16) {
+ size_t linelen = min(cmd->cmd_len - k, 16);
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ break;
+ off = sdev_format_header(logbuf, logbuf_len,
+ scmd_name(cmd),
+ cmd->request->tag);
+ if (!WARN_ON(off > logbuf_len - 58)) {
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "CDB[%02x]: ", k);
+ hex_dump_to_buffer(&cmd->cmnd[k], linelen,
+ 16, 1, logbuf + off,
+ logbuf_len - off, false);
+ }
+ dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s",
+ logbuf);
+ scsi_log_release_buffer(logbuf);
+ }
+ return;
+ }
+ if (!WARN_ON(off > logbuf_len - 49)) {
+ off += scnprintf(logbuf + off, logbuf_len - off, " ");
+ hex_dump_to_buffer(cmd->cmnd, cmd->cmd_len, 16, 1,
+ logbuf + off, logbuf_len - off,
+ false);
+ }
+out_printk:
+ dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scsi_print_command);
+
+static size_t
+scsi_format_extd_sense(char *buffer, size_t buf_len,
+ unsigned char asc, unsigned char ascq)
+{
+ size_t off = 0;
+ const char *extd_sense_fmt = NULL;
+ const char *extd_sense_str = scsi_extd_sense_format(asc, ascq,
+ &extd_sense_fmt);
+
+ if (extd_sense_str) {
+ off = scnprintf(buffer, buf_len, "Add. Sense: %s",
+ extd_sense_str);
+ if (extd_sense_fmt)
+ off += scnprintf(buffer + off, buf_len - off,
+ "(%s%x)", extd_sense_fmt, ascq);
+ } else {
+ if (asc >= 0x80)
+ off = scnprintf(buffer, buf_len, "<<vendor>>");
+ off += scnprintf(buffer + off, buf_len - off,
+ "ASC=0x%x ", asc);
+ if (ascq >= 0x80)
+ off += scnprintf(buffer + off, buf_len - off,
+ "<<vendor>>");
+ off += scnprintf(buffer + off, buf_len - off,
+ "ASCQ=0x%x ", ascq);
+ }
+ return off;
+}
+
+static size_t
+scsi_format_sense_hdr(char *buffer, size_t buf_len,
+ const struct scsi_sense_hdr *sshdr)
+{
+ const char *sense_txt;
+ size_t off;
+
+ off = scnprintf(buffer, buf_len, "Sense Key : ");
+ sense_txt = scsi_sense_key_string(sshdr->sense_key);
+ if (sense_txt)
+ off += scnprintf(buffer + off, buf_len - off,
+ "%s ", sense_txt);
+ else
+ off += scnprintf(buffer + off, buf_len - off,
+ "0x%x ", sshdr->sense_key);
+ off += scnprintf(buffer + off, buf_len - off,
+ scsi_sense_is_deferred(sshdr) ? "[deferred] " : "[current] ");
+
+ if (sshdr->response_code >= 0x72)
+ off += scnprintf(buffer + off, buf_len - off, "[descriptor] ");
+ return off;
+}
+
+static void
+scsi_log_dump_sense(const struct scsi_device *sdev, const char *name, int tag,
+ const unsigned char *sense_buffer, int sense_len)
+{
+ char *logbuf;
+ size_t logbuf_len;
+ int i;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+
+ for (i = 0; i < sense_len; i += 16) {
+ int len = min(sense_len - i, 16);
+ size_t off;
+
+ off = sdev_format_header(logbuf, logbuf_len,
+ name, tag);
+ hex_dump_to_buffer(&sense_buffer[i], len, 16, 1,
+ logbuf + off, logbuf_len - off,
+ false);
+ dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+ }
+ scsi_log_release_buffer(logbuf);
+}
+
+static void
+scsi_log_print_sense_hdr(const struct scsi_device *sdev, const char *name,
+ int tag, const struct scsi_sense_hdr *sshdr)
+{
+ char *logbuf;
+ size_t off, logbuf_len;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+ off = sdev_format_header(logbuf, logbuf_len, name, tag);
+ off += scsi_format_sense_hdr(logbuf + off, logbuf_len - off, sshdr);
+ dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+ off = sdev_format_header(logbuf, logbuf_len, name, tag);
+ off += scsi_format_extd_sense(logbuf + off, logbuf_len - off,
+ sshdr->asc, sshdr->ascq);
+ dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+
+static void
+scsi_log_print_sense(const struct scsi_device *sdev, const char *name, int tag,
+ const unsigned char *sense_buffer, int sense_len)
+{
+ struct scsi_sense_hdr sshdr;
+
+ if (scsi_normalize_sense(sense_buffer, sense_len, &sshdr))
+ scsi_log_print_sense_hdr(sdev, name, tag, &sshdr);
+ else
+ scsi_log_dump_sense(sdev, name, tag, sense_buffer, sense_len);
+}
+
+/*
+ * Print normalized SCSI sense header with a prefix.
+ */
+void
+scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name,
+ const struct scsi_sense_hdr *sshdr)
+{
+ scsi_log_print_sense_hdr(sdev, name, -1, sshdr);
+}
+EXPORT_SYMBOL(scsi_print_sense_hdr);
+
+/* Normalize and print sense buffer with name prefix */
+void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
+ const unsigned char *sense_buffer, int sense_len)
+{
+ scsi_log_print_sense(sdev, name, -1, sense_buffer, sense_len);
+}
+EXPORT_SYMBOL(__scsi_print_sense);
+
+/* Normalize and print sense buffer in SCSI command */
+void scsi_print_sense(const struct scsi_cmnd *cmd)
+{
+ scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag,
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+}
+EXPORT_SYMBOL(scsi_print_sense);
+
+void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
+ int disposition)
+{
+ char *logbuf;
+ size_t off, logbuf_len;
+ const char *mlret_string = scsi_mlreturn_string(disposition);
+ const char *hb_string = scsi_hostbyte_string(cmd->result);
+ const char *db_string = scsi_driverbyte_string(cmd->result);
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+
+ off = sdev_format_header(logbuf, logbuf_len,
+ scmd_name(cmd), cmd->request->tag);
+
+ if (off >= logbuf_len)
+ goto out_printk;
+
+ if (msg) {
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "%s: ", msg);
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+ }
+ if (mlret_string)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "%s ", mlret_string);
+ else
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "UNKNOWN(0x%02x) ", disposition);
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+
+ off += scnprintf(logbuf + off, logbuf_len - off, "Result: ");
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+
+ if (hb_string)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "hostbyte=%s ", hb_string);
+ else
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "hostbyte=0x%02x ", host_byte(cmd->result));
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+
+ if (db_string)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "driverbyte=%s", db_string);
+ else
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "driverbyte=0x%02x", driver_byte(cmd->result));
+out_printk:
+ dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 6fcefa2da503..251598eb3547 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -189,36 +189,36 @@ static int proc_print_scsidevice(struct device *dev, void *data)
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
for (i = 0; i < 8; i++) {
if (sdev->vendor[i] >= 0x20)
- seq_printf(s, "%c", sdev->vendor[i]);
+ seq_putc(s, sdev->vendor[i]);
else
- seq_printf(s, " ");
+ seq_putc(s, ' ');
}
- seq_printf(s, " Model: ");
+ seq_puts(s, " Model: ");
for (i = 0; i < 16; i++) {
if (sdev->model[i] >= 0x20)
- seq_printf(s, "%c", sdev->model[i]);
+ seq_putc(s, sdev->model[i]);
else
- seq_printf(s, " ");
+ seq_putc(s, ' ');
}
- seq_printf(s, " Rev: ");
+ seq_puts(s, " Rev: ");
for (i = 0; i < 4; i++) {
if (sdev->rev[i] >= 0x20)
- seq_printf(s, "%c", sdev->rev[i]);
+ seq_putc(s, sdev->rev[i]);
else
- seq_printf(s, " ");
+ seq_putc(s, ' ');
}
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
seq_printf(s, " Type: %s ", scsi_device_type(sdev->type));
seq_printf(s, " ANSI SCSI revision: %02x",
sdev->scsi_level - (sdev->scsi_level > 1));
if (sdev->scsi_level == 2)
- seq_printf(s, " CCS\n");
+ seq_puts(s, " CCS\n");
else
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
out:
return 0;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 983aed10ff2f..9c0a520d933c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -34,6 +34,7 @@
#include <linux/spinlock.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -98,20 +99,6 @@ char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
MODULE_PARM_DESC(scan, "sync, async or none");
-/*
- * max_scsi_report_luns: the maximum number of LUNS that will be
- * returned from the REPORT LUNS command. 8 times this value must
- * be allocated. In theory this could be up to an 8 byte value, but
- * in practice, the maximum number of LUNs suppored by any device
- * is about 16k.
- */
-static unsigned int max_scsi_report_luns = 511;
-
-module_param_named(max_report_luns, max_scsi_report_luns, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(max_report_luns,
- "REPORT LUNS maximum number of LUNS received (should be"
- " between 1 and 16384)");
-
static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
@@ -290,7 +277,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
if (!shost_use_blk_mq(sdev->host) &&
(shost->bqt || shost->hostt->use_blk_tags)) {
blk_queue_init_tags(sdev->request_queue,
- sdev->host->cmd_per_lun, shost->bqt);
+ sdev->host->cmd_per_lun, shost->bqt,
+ shost->hostt->tag_alloc_policy);
}
scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
@@ -1367,7 +1355,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
unsigned int retries;
int result;
struct scsi_lun *lunp, *lun_data;
- u8 *data;
struct scsi_sense_hdr sshdr;
struct scsi_device *sdev;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1407,16 +1394,12 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
/*
* Allocate enough to hold the header (the same size as one scsi_lun)
- * plus the max number of luns we are requesting.
- *
- * Reallocating and trying again (with the exact amount we need)
- * would be nice, but then we need to somehow limit the size
- * allocated based on the available memory and the limits of
- * kmalloc - we don't want a kmalloc() failure of a huge value to
- * prevent us from finding any LUNs on this target.
+ * plus the number of luns we are requesting. 511 was the default
+ * value of the now removed max_report_luns parameter.
*/
- length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun);
- lun_data = kmalloc(length, GFP_ATOMIC |
+ length = (511 + 1) * sizeof(struct scsi_lun);
+retry:
+ lun_data = kmalloc(length, GFP_KERNEL |
(sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
if (!lun_data) {
printk(ALLOC_FAILURE_MSG, __func__);
@@ -1433,10 +1416,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
/*
* bytes 6 - 9: length of the command.
*/
- scsi_cmd[6] = (unsigned char) (length >> 24) & 0xff;
- scsi_cmd[7] = (unsigned char) (length >> 16) & 0xff;
- scsi_cmd[8] = (unsigned char) (length >> 8) & 0xff;
- scsi_cmd[9] = (unsigned char) length & 0xff;
+ put_unaligned_be32(length, &scsi_cmd[6]);
scsi_cmd[10] = 0; /* reserved */
scsi_cmd[11] = 0; /* control */
@@ -1484,19 +1464,16 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
/*
* Get the length from the first four bytes of lun_data.
*/
- data = (u8 *) lun_data->scsi_lun;
- length = ((data[0] << 24) | (data[1] << 16) |
- (data[2] << 8) | (data[3] << 0));
+ if (get_unaligned_be32(lun_data->scsi_lun) +
+ sizeof(struct scsi_lun) > length) {
+ length = get_unaligned_be32(lun_data->scsi_lun) +
+ sizeof(struct scsi_lun);
+ kfree(lun_data);
+ goto retry;
+ }
+ length = get_unaligned_be32(lun_data->scsi_lun);
num_luns = (length / sizeof(struct scsi_lun));
- if (num_luns > max_scsi_report_luns) {
- sdev_printk(KERN_WARNING, sdev,
- "Only %d (max_scsi_report_luns)"
- " of %d luns reported, try increasing"
- " max_scsi_report_luns.\n",
- max_scsi_report_luns, num_luns);
- num_luns = max_scsi_report_luns;
- }
SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
"scsi scan: REPORT LUN scan\n"));
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 82af28b90294..08bb47b53bc3 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -143,7 +143,7 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
cmd = "WRITE_SAME";
break;
default:
- trace_seq_printf(p, "UNKNOWN");
+ trace_seq_puts(p, "UNKNOWN");
goto out;
}
@@ -204,7 +204,7 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
cmd = "GET_LBA_STATUS";
break;
default:
- trace_seq_printf(p, "UNKNOWN");
+ trace_seq_puts(p, "UNKNOWN");
goto out;
}
@@ -249,7 +249,7 @@ scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- trace_seq_printf(p, "-");
+ trace_seq_putc(p, '-');
trace_seq_putc(p, 0);
return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 399516925d80..6b78476d04bb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
sd_set_flush_flag(sdkp);
- max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
- sdkp->max_xfer_blocks);
+ max_xfer = sdkp->max_xfer_blocks;
max_xfer <<= ilog2(sdp->sector_size) - 9;
+
+ max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+ max_xfer);
blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
@@ -3318,11 +3320,8 @@ module_exit(exit_sd);
static void sd_print_sense_hdr(struct scsi_disk *sdkp,
struct scsi_sense_hdr *sshdr)
{
- scsi_show_sense_hdr(sdkp->device,
- sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
- scsi_show_extd_sense(sdkp->device,
- sdkp->disk ? sdkp->disk->disk_name : NULL,
- sshdr->asc, sshdr->ascq);
+ scsi_print_sense_hdr(sdkp->device,
+ sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
}
static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index b7e79e7646ad..dcb0d76d7312 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -47,7 +47,6 @@ struct ses_device {
struct ses_component {
u64 addr;
- unsigned char *desc;
};
static int ses_probe(struct device *dev)
@@ -68,6 +67,20 @@ static int ses_probe(struct device *dev)
#define SES_TIMEOUT (30 * HZ)
#define SES_RETRIES 3
+static void init_device_slot_control(unsigned char *dest_desc,
+ struct enclosure_component *ecomp,
+ unsigned char *status)
+{
+ memcpy(dest_desc, status, 4);
+ dest_desc[0] = 0;
+ /* only clear byte 1 for ENCLOSURE_COMPONENT_DEVICE */
+ if (ecomp->type == ENCLOSURE_COMPONENT_DEVICE)
+ dest_desc[1] = 0;
+ dest_desc[2] &= 0xde;
+ dest_desc[3] &= 0x3c;
+}
+
+
static int ses_recv_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
@@ -179,14 +192,22 @@ static int ses_set_fault(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
- unsigned char desc[4] = {0 };
+ unsigned char desc[4];
+ unsigned char *desc_ptr;
+
+ desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+ if (!desc_ptr)
+ return -EIO;
+
+ init_device_slot_control(desc, ecomp, desc_ptr);
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
- /* zero is disabled */
+ desc[3] &= 0xdf;
break;
case ENCLOSURE_SETTING_ENABLED:
- desc[3] = 0x20;
+ desc[3] |= 0x20;
break;
default:
/* SES doesn't do the SGPIO blink settings */
@@ -220,14 +241,22 @@ static int ses_set_locate(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
- unsigned char desc[4] = {0 };
+ unsigned char desc[4];
+ unsigned char *desc_ptr;
+
+ desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+ if (!desc_ptr)
+ return -EIO;
+
+ init_device_slot_control(desc, ecomp, desc_ptr);
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
- /* zero is disabled */
+ desc[2] &= 0xfd;
break;
case ENCLOSURE_SETTING_ENABLED:
- desc[2] = 0x02;
+ desc[2] |= 0x02;
break;
default:
/* SES doesn't do the SGPIO blink settings */
@@ -240,15 +269,23 @@ static int ses_set_active(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
- unsigned char desc[4] = {0 };
+ unsigned char desc[4];
+ unsigned char *desc_ptr;
+
+ desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+ if (!desc_ptr)
+ return -EIO;
+
+ init_device_slot_control(desc, ecomp, desc_ptr);
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
- /* zero is disabled */
+ desc[2] &= 0x7f;
ecomp->active = 0;
break;
case ENCLOSURE_SETTING_ENABLED:
- desc[2] = 0x80;
+ desc[2] |= 0x80;
ecomp->active = 1;
break;
default:
@@ -258,13 +295,63 @@ static int ses_set_active(struct enclosure_device *edev,
return ses_set_page2_descriptor(edev, ecomp, desc);
}
+static int ses_show_id(struct enclosure_device *edev, char *buf)
+{
+ struct ses_device *ses_dev = edev->scratch;
+ unsigned long long id = get_unaligned_be64(ses_dev->page1+8+4);
+
+ return sprintf(buf, "%#llx\n", id);
+}
+
+static void ses_get_power_status(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ unsigned char *desc;
+
+ desc = ses_get_page2_descriptor(edev, ecomp);
+ if (desc)
+ ecomp->power_status = (desc[3] & 0x10) ? 0 : 1;
+}
+
+static int ses_set_power_status(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ int val)
+{
+ unsigned char desc[4];
+ unsigned char *desc_ptr;
+
+ desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+ if (!desc_ptr)
+ return -EIO;
+
+ init_device_slot_control(desc, ecomp, desc_ptr);
+
+ switch (val) {
+ /* power = 1 is device_off = 0 and vice versa */
+ case 0:
+ desc[3] |= 0x10;
+ break;
+ case 1:
+ desc[3] &= 0xef;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ecomp->power_status = val;
+ return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
static struct enclosure_component_callbacks ses_enclosure_callbacks = {
.get_fault = ses_get_fault,
.set_fault = ses_set_fault,
.get_status = ses_get_status,
.get_locate = ses_get_locate,
.set_locate = ses_set_locate,
+ .get_power_status = ses_get_power_status,
+ .set_power_status = ses_set_power_status,
.set_active = ses_set_active,
+ .show_id = ses_show_id,
};
struct ses_host_edev {
@@ -298,19 +385,26 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
int invalid = desc[0] & 0x80;
enum scsi_protocol proto = desc[0] & 0x0f;
u64 addr = 0;
+ int slot = -1;
struct ses_component *scomp = ecomp->scratch;
unsigned char *d;
- scomp->desc = desc;
-
if (invalid)
return;
switch (proto) {
+ case SCSI_PROTOCOL_FCP:
+ if (eip) {
+ d = desc + 4;
+ slot = d[3];
+ }
+ break;
case SCSI_PROTOCOL_SAS:
- if (eip)
+ if (eip) {
+ d = desc + 4;
+ slot = d[3];
d = desc + 8;
- else
+ } else
d = desc + 4;
/* only take the phy0 addr */
addr = (u64)d[12] << 56 |
@@ -326,6 +420,7 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
/* FIXME: Need to add more protocols than just SAS */
break;
}
+ ecomp->slot = slot;
scomp->addr = addr;
}
@@ -349,7 +444,8 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
if (scomp->addr != efd->addr)
continue;
- enclosure_add_device(edev, i, efd->dev);
+ if (enclosure_add_device(edev, i, efd->dev) == 0)
+ kobject_uevent(&efd->dev->kobj, KOBJ_CHANGE);
return 1;
}
return 0;
@@ -423,16 +519,24 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
if (create)
- ecomp = enclosure_component_register(edev,
- components++,
- type_ptr[0],
- name);
+ ecomp = enclosure_component_alloc(
+ edev,
+ components++,
+ type_ptr[0],
+ name);
else
ecomp = &edev->component[components++];
- if (!IS_ERR(ecomp) && addl_desc_ptr)
- ses_process_descriptor(ecomp,
- addl_desc_ptr);
+ if (!IS_ERR(ecomp)) {
+ ses_get_power_status(edev, ecomp);
+ if (addl_desc_ptr)
+ ses_process_descriptor(
+ ecomp,
+ addl_desc_ptr);
+ if (create)
+ enclosure_component_register(
+ ecomp);
+ }
}
if (desc_ptr)
desc_ptr += len;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index b14f64cb9724..0cbc1fb45f10 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -763,7 +763,7 @@ static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
{
- int k, data_dir, at_head;
+ int k, at_head;
Sg_device *sdp = sfp->parentdp;
sg_io_hdr_t *hp = &srp->header;
@@ -793,21 +793,6 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
return -ENODEV;
}
- switch (hp->dxfer_direction) {
- case SG_DXFER_TO_FROM_DEV:
- case SG_DXFER_FROM_DEV:
- data_dir = DMA_FROM_DEVICE;
- break;
- case SG_DXFER_TO_DEV:
- data_dir = DMA_TO_DEVICE;
- break;
- case SG_DXFER_UNKNOWN:
- data_dir = DMA_BIDIRECTIONAL;
- break;
- default:
- data_dir = DMA_NONE;
- break;
- }
hp->duration = jiffies_to_msecs(jiffies);
if (hp->interface_id != '\0' && /* v3 (or later) interface */
(SG_FLAG_Q_AT_TAIL & hp->flags))
@@ -1734,22 +1719,19 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
}
if (iov_count) {
- int len, size = sizeof(struct sg_iovec) * iov_count;
+ int size = sizeof(struct iovec) * iov_count;
struct iovec *iov;
+ struct iov_iter i;
iov = memdup_user(hp->dxferp, size);
if (IS_ERR(iov))
return PTR_ERR(iov);
- len = iov_length(iov, iov_count);
- if (hp->dxfer_len < len) {
- iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
- len = hp->dxfer_len;
- }
+ iov_iter_init(&i, rw, iov, iov_count,
+ min_t(size_t, hp->dxfer_len,
+ iov_length(iov, iov_count)));
- res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
- iov_count,
- len, GFP_ATOMIC);
+ res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
} else
res = blk_rq_map_user(q, rq, md, hp->dxferp,
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index fb929fac22ba..03054c0e7689 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -245,9 +245,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
sr_printk(KERN_INFO, cd,
"CDROM not ready. Make sure there "
"is a disc in the drive.\n");
-#ifdef DEBUG
- scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
-#endif
err = -ENOMEDIUM;
break;
case ILLEGAL_REQUEST:
@@ -256,16 +253,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
sshdr.ascq == 0x00)
/* sense: Invalid command operation code */
err = -EDRIVE_CANT_DO_THIS;
-#ifdef DEBUG
- __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE);
- scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
-#endif
break;
default:
- sr_printk(KERN_ERR, cd,
- "CDROM (ioctl) error, command: ");
- __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE);
- scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
err = -EIO;
}
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 128d3b55bdd9..9a1c34205254 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4551,18 +4551,15 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
return -ENOMEM;
/* Try to fault in all of the necessary pages */
- down_read(&current->mm->mmap_sem);
/* rw==READ means read from drive, write into memory area */
- res = get_user_pages(
+ res = get_user_pages_unlocked(
current,
current->mm,
uaddr,
nr_pages,
rw == READ,
0, /* don't force */
- pages,
- NULL);
- up_read(&current->mm->mmap_sem);
+ pages);
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 4cff0ddc2c25..efc6e446b6c8 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/hyperv.h>
-#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -309,14 +308,6 @@ enum storvsc_request_type {
* This is the end of Protocol specific defines.
*/
-
-/*
- * We setup a mempool to allocate request structures for this driver
- * on a per-lun basis. The following define specifies the number of
- * elements in the pool.
- */
-
-#define STORVSC_MIN_BUF_NR 64
static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
module_param(storvsc_ringbuffer_size, int, S_IRUGO);
@@ -346,7 +337,6 @@ static void storvsc_on_channel_callback(void *context);
#define STORVSC_IDE_MAX_CHANNELS 1
struct storvsc_cmd_request {
- struct list_head entry;
struct scsi_cmnd *cmd;
unsigned int bounce_sgl_count;
@@ -357,7 +347,6 @@ struct storvsc_cmd_request {
/* Synchronize the request/response if needed */
struct completion wait_event;
- unsigned char *sense_buffer;
struct hv_multipage_buffer data_buffer;
struct vstor_packet vstor_packet;
};
@@ -389,11 +378,6 @@ struct storvsc_device {
struct storvsc_cmd_request reset_request;
};
-struct stor_mem_pools {
- struct kmem_cache *request_pool;
- mempool_t *request_mempool;
-};
-
struct hv_host_device {
struct hv_device *dev;
unsigned int port;
@@ -426,21 +410,42 @@ done:
kfree(wrk);
}
-static void storvsc_bus_scan(struct work_struct *work)
+static void storvsc_host_scan(struct work_struct *work)
{
struct storvsc_scan_work *wrk;
- int id, order_id;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev;
+ unsigned long flags;
wrk = container_of(work, struct storvsc_scan_work, work);
- for (id = 0; id < wrk->host->max_id; ++id) {
- if (wrk->host->reverse_ordering)
- order_id = wrk->host->max_id - id - 1;
- else
- order_id = id;
-
- scsi_scan_target(&wrk->host->shost_gendev, 0,
- order_id, SCAN_WILD_CARD, 1);
+ host = wrk->host;
+
+ /*
+ * Before scanning the host, first check to see if any of the
+ * currrently known devices have been hot removed. We issue a
+ * "unit ready" command against all currently known devices.
+ * This I/O will result in an error for devices that have been
+ * removed. As part of handling the I/O error, we remove the device.
+ *
+ * When a LUN is added or removed, the host sends us a signal to
+ * scan the host. Thus we are forced to discover the LUNs that
+ * may have been removed this way.
+ */
+ mutex_lock(&host->scan_mutex);
+ spin_lock_irqsave(host->host_lock, flags);
+ list_for_each_entry(sdev, &host->__devices, siblings) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ scsi_test_unit_ready(sdev, 1, 1, NULL);
+ spin_lock_irqsave(host->host_lock, flags);
+ continue;
}
+ spin_unlock_irqrestore(host->host_lock, flags);
+ mutex_unlock(&host->scan_mutex);
+ /*
+ * Now scan the host to discover LUNs that may have been added.
+ */
+ scsi_scan_host(host);
+
kfree(wrk);
}
@@ -1070,10 +1075,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
{
struct scsi_cmnd *scmnd = cmd_request->cmd;
struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
- void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
- struct stor_mem_pools *memp = scmnd->device->hostdata;
struct Scsi_Host *host;
struct storvsc_device *stor_dev;
struct hv_device *dev = host_dev->dev;
@@ -1109,14 +1112,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
cmd_request->data_buffer.len -
vm_srb->data_transfer_length);
- scsi_done_fn = scmnd->scsi_done;
-
- scmnd->host_scribble = NULL;
- scmnd->scsi_done = NULL;
-
- scsi_done_fn(scmnd);
-
- mempool_free(cmd_request, memp->request_mempool);
+ scmnd->scsi_done(scmnd);
}
static void storvsc_on_io_completion(struct hv_device *device,
@@ -1160,7 +1156,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
SRB_STATUS_AUTOSENSE_VALID) {
/* autosense data available */
- memcpy(request->sense_buffer,
+ memcpy(request->cmd->sense_buffer,
vstor_packet->vm_srb.sense_data,
vstor_packet->vm_srb.sense_info_length);
@@ -1198,7 +1194,7 @@ static void storvsc_on_receive(struct hv_device *device,
if (!work)
return;
- INIT_WORK(&work->work, storvsc_bus_scan);
+ INIT_WORK(&work->work, storvsc_host_scan);
work->host = stor_device->host;
schedule_work(&work->work);
break;
@@ -1378,55 +1374,6 @@ static int storvsc_do_io(struct hv_device *device,
return ret;
}
-static int storvsc_device_alloc(struct scsi_device *sdevice)
-{
- struct stor_mem_pools *memp;
- int number = STORVSC_MIN_BUF_NR;
-
- memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
- if (!memp)
- return -ENOMEM;
-
- memp->request_pool =
- kmem_cache_create(dev_name(&sdevice->sdev_dev),
- sizeof(struct storvsc_cmd_request), 0,
- SLAB_HWCACHE_ALIGN, NULL);
-
- if (!memp->request_pool)
- goto err0;
-
- memp->request_mempool = mempool_create(number, mempool_alloc_slab,
- mempool_free_slab,
- memp->request_pool);
-
- if (!memp->request_mempool)
- goto err1;
-
- sdevice->hostdata = memp;
-
- return 0;
-
-err1:
- kmem_cache_destroy(memp->request_pool);
-
-err0:
- kfree(memp);
- return -ENOMEM;
-}
-
-static void storvsc_device_destroy(struct scsi_device *sdevice)
-{
- struct stor_mem_pools *memp = sdevice->hostdata;
-
- if (!memp)
- return;
-
- mempool_destroy(memp->request_mempool);
- kmem_cache_destroy(memp->request_pool);
- kfree(memp);
- sdevice->hostdata = NULL;
-}
-
static int storvsc_device_configure(struct scsi_device *sdevice)
{
scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS);
@@ -1447,6 +1394,19 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
*/
sdevice->sdev_bflags |= msft_blist_flags;
+ /*
+ * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
+ * if the device is a MSFT virtual device.
+ */
+ if (!strncmp(sdevice->vendor, "Msft", 4)) {
+ switch (vmbus_proto_version) {
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
+ sdevice->scsi_level = SCSI_SPC_3;
+ break;
+ }
+ }
+
return 0;
}
@@ -1561,13 +1521,11 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
int ret;
struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
- struct storvsc_cmd_request *cmd_request;
- unsigned int request_size = 0;
+ struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
int i;
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
- struct stor_mem_pools *memp = scmnd->device->hostdata;
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
/*
@@ -1584,25 +1542,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
}
}
- request_size = sizeof(struct storvsc_cmd_request);
-
- cmd_request = mempool_alloc(memp->request_mempool,
- GFP_ATOMIC);
-
- /*
- * We might be invoked in an interrupt context; hence
- * mempool_alloc() can fail.
- */
- if (!cmd_request)
- return SCSI_MLQUEUE_DEVICE_BUSY;
-
- memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
-
/* Setup the cmd request */
cmd_request->cmd = scmnd;
- scmnd->host_scribble = (unsigned char *)cmd_request;
-
vm_srb = &cmd_request->vstor_packet.vm_srb;
vm_srb->win8_extension.time_out_value = 60;
@@ -1637,9 +1579,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
- cmd_request->sense_buffer = scmnd->sense_buffer;
-
-
cmd_request->data_buffer.len = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
@@ -1651,10 +1590,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
create_bounce_buffer(sgl, scsi_sg_count(scmnd),
scsi_bufflen(scmnd),
vm_srb->data_in);
- if (!cmd_request->bounce_sgl) {
- ret = SCSI_MLQUEUE_HOST_BUSY;
- goto queue_error;
- }
+ if (!cmd_request->bounce_sgl)
+ return SCSI_MLQUEUE_HOST_BUSY;
cmd_request->bounce_sgl_count =
ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
@@ -1692,27 +1629,21 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- ret = SCSI_MLQUEUE_DEVICE_BUSY;
- goto queue_error;
+ return SCSI_MLQUEUE_DEVICE_BUSY;
}
return 0;
-
-queue_error:
- mempool_free(cmd_request, memp->request_mempool);
- scmnd->host_scribble = NULL;
- return ret;
}
static struct scsi_host_template scsi_driver = {
.module = THIS_MODULE,
.name = "storvsc_host_t",
+ .cmd_size = sizeof(struct storvsc_cmd_request),
.bios_param = storvsc_get_chs,
.queuecommand = storvsc_queuecommand,
.eh_host_reset_handler = storvsc_host_reset_handler,
+ .proc_name = "storvsc_host",
.eh_timed_out = storvsc_eh_timed_out,
- .slave_alloc = storvsc_device_alloc,
- .slave_destroy = storvsc_device_destroy,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 255,
.can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
@@ -1760,6 +1691,9 @@ static int storvsc_probe(struct hv_device *device,
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
int target = 0;
struct storvsc_device *stor_device;
+ int max_luns_per_target;
+ int max_targets;
+ int max_channels;
/*
* Based on the windows host we are running on,
@@ -1773,12 +1707,18 @@ static int storvsc_probe(struct hv_device *device,
vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
vmstor_current_major = VMSTOR_WIN7_MAJOR;
vmstor_current_minor = VMSTOR_WIN7_MINOR;
+ max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
+ max_targets = STORVSC_IDE_MAX_TARGETS;
+ max_channels = STORVSC_IDE_MAX_CHANNELS;
break;
default:
sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
vmscsi_size_delta = 0;
vmstor_current_major = VMSTOR_WIN8_MAJOR;
vmstor_current_minor = VMSTOR_WIN8_MINOR;
+ max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
+ max_targets = STORVSC_MAX_TARGETS;
+ max_channels = STORVSC_MAX_CHANNELS;
break;
}
@@ -1826,9 +1766,9 @@ static int storvsc_probe(struct hv_device *device,
break;
case SCSI_GUID:
- host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
- host->max_id = STORVSC_MAX_TARGETS;
- host->max_channel = STORVSC_MAX_CHANNELS - 1;
+ host->max_lun = max_luns_per_target;
+ host->max_id = max_targets;
+ host->max_channel = max_channels - 1;
break;
default:
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 6e07b2afddeb..8a1f4b355416 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -70,3 +70,16 @@ config SCSI_UFSHCD_PLATFORM
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config SCSI_UFS_QCOM
+ bool "QCOM specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM
+ select PHY_QCOM_UFS
+ help
+ This selects the QCOM specific additions to UFSHCD platform driver.
+ UFS host on QCOM needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on QCOM chipset.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 1e5bd48457d6..8303bcce7a23 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,4 +1,5 @@
# UFSHCD makefile
+obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
new file mode 100644
index 000000000000..9217af9bf734
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -0,0 +1,1004 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+#include <linux/phy/phy-qcom-ufs.h>
+#include "ufshcd.h"
+#include "unipro.h"
+#include "ufs-qcom.h"
+#include "ufshci.h"
+
+static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
+
+static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result);
+static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
+ const char *speed_mode);
+static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
+
+static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
+{
+ int err = 0;
+
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
+ if (err)
+ dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
+ __func__, err);
+
+ return err;
+}
+
+static int ufs_qcom_host_clk_get(struct device *dev,
+ const char *name, struct clk **clk_out)
+{
+ struct clk *clk;
+ int err = 0;
+
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(dev, "%s: failed to get %s err %d",
+ __func__, name, err);
+ } else {
+ *clk_out = clk;
+ }
+
+ return err;
+}
+
+static int ufs_qcom_host_clk_enable(struct device *dev,
+ const char *name, struct clk *clk)
+{
+ int err = 0;
+
+ err = clk_prepare_enable(clk);
+ if (err)
+ dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
+
+ return err;
+}
+
+static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
+{
+ if (!host->is_lane_clks_enabled)
+ return;
+
+ clk_disable_unprepare(host->tx_l1_sync_clk);
+ clk_disable_unprepare(host->tx_l0_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l0_sync_clk);
+
+ host->is_lane_clks_enabled = false;
+}
+
+static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
+{
+ int err = 0;
+ struct device *dev = host->hba->dev;
+
+ if (host->is_lane_clks_enabled)
+ return 0;
+
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
+ host->rx_l0_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
+ host->tx_l0_sync_clk);
+ if (err)
+ goto disable_rx_l0;
+
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+ host->rx_l1_sync_clk);
+ if (err)
+ goto disable_tx_l0;
+
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ host->tx_l1_sync_clk);
+ if (err)
+ goto disable_rx_l1;
+
+ host->is_lane_clks_enabled = true;
+ goto out;
+
+disable_rx_l1:
+ clk_disable_unprepare(host->rx_l1_sync_clk);
+disable_tx_l0:
+ clk_disable_unprepare(host->tx_l0_sync_clk);
+disable_rx_l0:
+ clk_disable_unprepare(host->rx_l0_sync_clk);
+out:
+ return err;
+}
+
+static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
+{
+ int err = 0;
+ struct device *dev = host->hba->dev;
+
+ err = ufs_qcom_host_clk_get(dev,
+ "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_get(dev,
+ "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
+ &host->rx_l1_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+ &host->tx_l1_sync_clk);
+out:
+ return err;
+}
+
+static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ u32 tx_lanes;
+ int err = 0;
+
+ err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
+ if (err)
+ dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
+ __func__);
+
+out:
+ return err;
+}
+
+static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
+{
+ int err;
+ u32 tx_fsm_val = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+ do {
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
+ if (err || tx_fsm_val == TX_FSM_HIBERN8)
+ break;
+
+ /* sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ /*
+ * we might have scheduled out for long during polling so
+ * check the state again.
+ */
+ if (time_after(jiffies, timeout))
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
+
+ if (err) {
+ dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+ __func__, err);
+ } else if (tx_fsm_val != TX_FSM_HIBERN8) {
+ err = tx_fsm_val;
+ dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
+ __func__, err);
+ }
+
+ return err;
+}
+
+static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ int ret = 0;
+ u8 major;
+ u16 minor, step;
+ bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
+ ? true : false;
+
+ /* Assert PHY reset and apply PHY calibration values */
+ ufs_qcom_assert_reset(hba);
+ /* provide 1ms delay to let the reset pulse propagate */
+ usleep_range(1000, 1100);
+
+ ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
+ ufs_qcom_phy_save_controller_version(phy, major, minor, step);
+ ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
+ if (ret) {
+ dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /* De-assert PHY reset and start serdes */
+ ufs_qcom_deassert_reset(hba);
+
+ /*
+ * after reset deassertion, phy will need all ref clocks,
+ * voltage, current to settle down before starting serdes.
+ */
+ usleep_range(1000, 1100);
+ ret = ufs_qcom_phy_start_serdes(phy);
+ if (ret) {
+ dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = ufs_qcom_phy_is_pcs_ready(phy);
+ if (ret)
+ dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
+ __func__, ret);
+
+out:
+ return ret;
+}
+
+/*
+ * The UTP controller has a number of internal clock gating cells (CGCs).
+ * Internal hardware sub-modules within the UTP controller control the CGCs.
+ * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
+ * in a specific operation, UTP controller CGCs are by default disabled and
+ * this function enables them (after every UFS link startup) to save some power
+ * leakage.
+ */
+static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
+ REG_UFS_CFG2);
+
+ /* Ensure that HW clock gating is enabled before next operations */
+ mb();
+}
+
+static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_qcom_power_up_sequence(hba);
+ /*
+ * The PHY PLL output is the source of tx/rx lane symbol
+ * clocks, hence, enable the lane clocks only after PHY
+ * is initialized.
+ */
+ err = ufs_qcom_enable_lane_clks(host);
+ break;
+ case POST_CHANGE:
+ /* check if UFS PHY moved from DISABLED to HIBERN8 */
+ err = ufs_qcom_check_hibern8(hba);
+ ufs_qcom_enable_hw_clk_gating(hba);
+
+ break;
+ default:
+ dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+/**
+ * Returns non-zero for success (which rate of core_clk) and 0
+ * in case of a failure
+ */
+static unsigned long
+ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
+{
+ struct ufs_clk_info *clki;
+ u32 core_clk_period_in_ns;
+ u32 tx_clk_cycles_per_us = 0;
+ unsigned long core_clk_rate = 0;
+ u32 core_clk_cycles_per_us = 0;
+
+ static u32 pwm_fr_table[][2] = {
+ {UFS_PWM_G1, 0x1},
+ {UFS_PWM_G2, 0x1},
+ {UFS_PWM_G3, 0x1},
+ {UFS_PWM_G4, 0x1},
+ };
+
+ static u32 hs_fr_table_rA[][2] = {
+ {UFS_HS_G1, 0x1F},
+ {UFS_HS_G2, 0x3e},
+ };
+
+ static u32 hs_fr_table_rB[][2] = {
+ {UFS_HS_G1, 0x24},
+ {UFS_HS_G2, 0x49},
+ };
+
+ if (gear == 0) {
+ dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
+ goto out_error;
+ }
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core_clk"))
+ core_clk_rate = clk_get_rate(clki->clk);
+ }
+
+ /* If frequency is smaller than 1MHz, set to 1MHz */
+ if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
+ core_clk_rate = DEFAULT_CLK_RATE_HZ;
+
+ core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
+ ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
+
+ core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
+ core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
+ core_clk_period_in_ns &= MASK_CLK_NS_REG;
+
+ switch (hs) {
+ case FASTAUTO_MODE:
+ case FAST_MODE:
+ if (rate == PA_HS_MODE_A) {
+ if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(hs_fr_table_rA));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
+ } else if (rate == PA_HS_MODE_B) {
+ if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(hs_fr_table_rB));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
+ } else {
+ dev_err(hba->dev, "%s: invalid rate = %d\n",
+ __func__, rate);
+ goto out_error;
+ }
+ break;
+ case SLOWAUTO_MODE:
+ case SLOW_MODE:
+ if (gear > ARRAY_SIZE(pwm_fr_table)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(pwm_fr_table));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
+ break;
+ case UNCHANGED:
+ default:
+ dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
+ goto out_error;
+ }
+
+ /* this register 2 fields shall be written at once */
+ ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
+ REG_UFS_TX_SYMBOL_CLK_NS_US);
+ goto out;
+
+out_error:
+ core_clk_rate = 0;
+out:
+ return core_clk_rate;
+}
+
+static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status)
+{
+ unsigned long core_clk_rate = 0;
+ u32 core_clk_cycles_per_100ms;
+
+ switch (status) {
+ case PRE_CHANGE:
+ core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1,
+ SLOWAUTO_MODE, 0);
+ if (!core_clk_rate) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ core_clk_cycles_per_100ms =
+ (core_clk_rate / MSEC_PER_SEC) * 100;
+ ufshcd_writel(hba, core_clk_cycles_per_100ms,
+ REG_UFS_PA_LINK_STARTUP_TIMER);
+ break;
+ case POST_CHANGE:
+ ufs_qcom_link_startup_post_change(hba);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ int ret = 0;
+
+ if (ufs_qcom_is_link_off(hba)) {
+ /*
+ * Disable the tx/rx lane symbol clocks before PHY is
+ * powered down as the PLL source should be disabled
+ * after downstream clocks are disabled.
+ */
+ ufs_qcom_disable_lane_clks(host);
+ phy_power_off(phy);
+
+ /* Assert PHY soft reset */
+ ufs_qcom_assert_reset(hba);
+ goto out;
+ }
+
+ /*
+ * If UniPro link is not active, PHY ref_clk, main PHY analog power
+ * rail and low noise analog power rail for PLL can be switched off.
+ */
+ if (!ufs_qcom_is_link_active(hba))
+ phy_power_off(phy);
+
+out:
+ return ret;
+}
+
+static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ int err;
+
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->is_sys_suspended = false;
+
+out:
+ return err;
+}
+
+struct ufs_qcom_dev_params {
+ u32 pwm_rx_gear; /* pwm rx gear to work in */
+ u32 pwm_tx_gear; /* pwm tx gear to work in */
+ u32 hs_rx_gear; /* hs rx gear to work in */
+ u32 hs_tx_gear; /* hs tx gear to work in */
+ u32 rx_lanes; /* number of rx lanes */
+ u32 tx_lanes; /* number of tx lanes */
+ u32 rx_pwr_pwm; /* rx pwm working pwr */
+ u32 tx_pwr_pwm; /* tx pwm working pwr */
+ u32 rx_pwr_hs; /* rx hs working pwr */
+ u32 tx_pwr_hs; /* tx hs working pwr */
+ u32 hs_rate; /* rate A/B to work in HS */
+ u32 desired_working_mode;
+};
+
+static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
+ struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
+{
+ int min_qcom_gear;
+ int min_dev_gear;
+ bool is_dev_sup_hs = false;
+ bool is_qcom_max_hs = false;
+
+ if (dev_max->pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (qcom_param->desired_working_mode == FAST) {
+ is_qcom_max_hs = true;
+ min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
+ qcom_param->hs_tx_gear);
+ } else {
+ min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
+ qcom_param->pwm_tx_gear);
+ }
+
+ /*
+ * device doesn't support HS but qcom_param->desired_working_mode is
+ * HS, thus device and qcom_param don't agree
+ */
+ if (!is_dev_sup_hs && is_qcom_max_hs) {
+ pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
+ __func__);
+ return -ENOTSUPP;
+ } else if (is_dev_sup_hs && is_qcom_max_hs) {
+ /*
+ * since device supports HS, it supports FAST_MODE.
+ * since qcom_param->desired_working_mode is also HS
+ * then final decision (FAST/FASTAUTO) is done according
+ * to qcom_params as it is the restricting factor
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ qcom_param->rx_pwr_hs;
+ } else {
+ /*
+ * here qcom_param->desired_working_mode is PWM.
+ * it doesn't matter whether device supports HS or PWM,
+ * in both cases qcom_param->desired_working_mode will
+ * determine the mode
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ qcom_param->rx_pwr_pwm;
+ }
+
+ /*
+ * we would like tx to work in the minimum number of lanes
+ * between device capability and vendor preferences.
+ * the same decision will be made for rx
+ */
+ agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
+ qcom_param->tx_lanes);
+ agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
+ qcom_param->rx_lanes);
+
+ /* device maximum gear is the minimum between device rx and tx gears */
+ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+ /*
+ * if both device capabilities and vendor pre-defined preferences are
+ * both HS or both PWM then set the minimum gear to be the chosen
+ * working gear.
+ * if one is PWM and one is HS then the one that is PWM get to decide
+ * what is the gear, as it is the one that also decided previously what
+ * pwr the device will be configured to.
+ */
+ if ((is_dev_sup_hs && is_qcom_max_hs) ||
+ (!is_dev_sup_hs && !is_qcom_max_hs))
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx =
+ min_t(u32, min_dev_gear, min_qcom_gear);
+ else if (!is_dev_sup_hs)
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
+ else
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
+
+ agreed_pwr->hs_rate = qcom_param->hs_rate;
+ return 0;
+}
+
+static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
+{
+ int vote;
+ int err = 0;
+ char mode[BUS_VECTOR_NAME_LEN];
+
+ ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
+
+ vote = ufs_qcom_get_bus_vote(host, mode);
+ if (vote >= 0)
+ err = ufs_qcom_set_bus_vote(host, vote);
+ else
+ err = vote;
+
+ if (err)
+ dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
+ else
+ host->bus_vote.saved_vote = vote;
+ return err;
+}
+
+static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+ bool status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ u32 val;
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ struct ufs_qcom_dev_params ufs_qcom_cap;
+ int ret = 0;
+ int res = 0;
+
+ if (!dev_req_params) {
+ pr_err("%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
+ ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
+ ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
+ ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
+ ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
+ ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
+ ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
+ ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
+ ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
+ ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
+ ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
+ ufs_qcom_cap.desired_working_mode =
+ UFS_QCOM_LIMIT_DESIRED_MODE;
+
+ ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
+ dev_max_params,
+ dev_req_params);
+ if (ret) {
+ pr_err("%s: failed to determine capabilities\n",
+ __func__);
+ goto out;
+ }
+
+ break;
+ case POST_CHANGE:
+ if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
+ dev_req_params->pwr_rx,
+ dev_req_params->hs_rate)) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+ __func__);
+ /*
+ * we return error code at the end of the routine,
+ * but continue to configure UFS_PHY_TX_LANE_ENABLE
+ * and bus voting as usual
+ */
+ ret = -EINVAL;
+ }
+
+ val = ~(MAX_U32 << dev_req_params->lane_tx);
+ res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
+ if (res) {
+ dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
+ __func__, res);
+ ret = res;
+ }
+
+ /* cache the power mode parameters to use internally */
+ memcpy(&host->dev_req_params,
+ dev_req_params, sizeof(*dev_req_params));
+ ufs_qcom_update_bus_bw_vote(host);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+/**
+ * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
+ * @hba: host controller instance
+ *
+ * QCOM UFS host controller might have some non standard behaviours (quirks)
+ * than what is specified by UFSHCI specification. Advertise all such
+ * quirks to standard UFS host controller driver so standard takes them into
+ * account.
+ */
+static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
+{
+ u8 major;
+ u16 minor, step;
+
+ ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
+
+ /*
+ * TBD
+ * here we should be advertising controller quirks according to
+ * controller version.
+ */
+}
+
+static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
+ const char *speed_mode)
+{
+ struct device *dev = host->hba->dev;
+ struct device_node *np = dev->of_node;
+ int err;
+ const char *key = "qcom,bus-vector-names";
+
+ if (!speed_mode) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
+ err = of_property_match_string(np, key, "MAX");
+ else
+ err = of_property_match_string(np, key, speed_mode);
+
+out:
+ if (err < 0)
+ dev_err(dev, "%s: Invalid %s mode %d\n",
+ __func__, speed_mode, err);
+ return err;
+}
+
+static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+{
+ int err = 0;
+
+ if (vote != host->bus_vote.curr_vote)
+ host->bus_vote.curr_vote = vote;
+
+ return err;
+}
+
+static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
+{
+ int gear = max_t(u32, p->gear_rx, p->gear_tx);
+ int lanes = max_t(u32, p->lane_rx, p->lane_tx);
+ int pwr;
+
+ /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
+ if (!gear)
+ gear = 1;
+
+ if (!lanes)
+ lanes = 1;
+
+ if (!p->pwr_rx && !p->pwr_tx) {
+ pwr = SLOWAUTO_MODE;
+ snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
+ } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
+ p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
+ pwr = FAST_MODE;
+ snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
+ p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
+ } else {
+ pwr = SLOW_MODE;
+ snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
+ "PWM", gear, lanes);
+ }
+}
+
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ int err = 0;
+ int vote = 0;
+
+ /*
+ * In case ufs_qcom_init() is not yet done, simply ignore.
+ * This ufs_qcom_setup_clocks() shall be called from
+ * ufs_qcom_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ if (on) {
+ err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
+ if (err) {
+ dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
+ __func__, err);
+ ufs_qcom_phy_disable_iface_clk(host->generic_phy);
+ goto out;
+ }
+ /* enable the device ref clock */
+ ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy);
+ vote = host->bus_vote.saved_vote;
+ if (vote == host->bus_vote.min_bw_vote)
+ ufs_qcom_update_bus_bw_vote(host);
+ } else {
+ /* M-PHY RMMI interface clocks can be turned off */
+ ufs_qcom_phy_disable_iface_clk(host->generic_phy);
+ if (!ufs_qcom_is_link_active(hba)) {
+ /* turn off UFS local PHY ref_clk */
+ ufs_qcom_phy_disable_ref_clk(host->generic_phy);
+ /* disable device ref_clk */
+ ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy);
+ }
+ vote = host->bus_vote.min_bw_vote;
+ }
+
+ err = ufs_qcom_set_bus_vote(host, vote);
+ if (err)
+ dev_err(hba->dev, "%s: set bus vote failed %d\n",
+ __func__, err);
+
+out:
+ return err;
+}
+
+static ssize_t
+show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_qcom_host *host = hba->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ host->bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_qcom_host *host = hba->priv;
+ uint32_t value;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ host->bus_vote.is_max_bw_needed = !!value;
+ ufs_qcom_update_bus_bw_vote(host);
+ }
+
+ return count;
+}
+
+static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
+{
+ int err;
+ struct device *dev = host->hba->dev;
+ struct device_node *np = dev->of_node;
+
+ err = of_property_count_strings(np, "qcom,bus-vector-names");
+ if (err < 0 ) {
+ dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* cache the vote index for minimum and maximum bandwidth */
+ host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
+ host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
+
+ host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
+ host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
+ sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
+ host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
+ host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
+ err = device_create_file(dev, &host->bus_vote.max_bus_bw);
+out:
+ return err;
+}
+
+#define ANDROID_BOOT_DEV_MAX 30
+static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
+static int get_android_boot_dev(char *str)
+{
+ strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
+ return 1;
+}
+__setup("androidboot.bootdevice=", get_android_boot_dev);
+
+/**
+ * ufs_qcom_init - bind phy with controller
+ * @hba: host controller instance
+ *
+ * Binds PHY with controller and powers up PHY enabling clocks
+ * and regulators.
+ *
+ * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * on phy power up failure and returns zero on success.
+ */
+static int ufs_qcom_init(struct ufs_hba *hba)
+{
+ int err;
+ struct device *dev = hba->dev;
+ struct ufs_qcom_host *host;
+
+ if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
+ return -ENODEV;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ err = -ENOMEM;
+ dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
+ goto out;
+ }
+
+ host->hba = hba;
+ hba->priv = (void *)host;
+
+ host->generic_phy = devm_phy_get(dev, "ufsphy");
+
+ if (IS_ERR(host->generic_phy)) {
+ err = PTR_ERR(host->generic_phy);
+ dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
+ goto out;
+ }
+
+ err = ufs_qcom_bus_register(host);
+ if (err)
+ goto out_host_free;
+
+ phy_init(host->generic_phy);
+ err = phy_power_on(host->generic_phy);
+ if (err)
+ goto out_unregister_bus;
+
+ err = ufs_qcom_init_lane_clks(host);
+ if (err)
+ goto out_disable_phy;
+
+ ufs_qcom_advertise_quirks(hba);
+
+ hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
+ hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+
+ ufs_qcom_setup_clocks(hba, true);
+
+ if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
+ ufs_qcom_hosts[hba->dev->id] = host;
+
+ goto out;
+
+out_disable_phy:
+ phy_power_off(host->generic_phy);
+out_unregister_bus:
+ phy_exit(host->generic_phy);
+out_host_free:
+ devm_kfree(dev, host);
+ hba->priv = NULL;
+out:
+ return err;
+}
+
+static void ufs_qcom_exit(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+
+ ufs_qcom_disable_lane_clks(host);
+ phy_power_off(host->generic_phy);
+}
+
+static
+void ufs_qcom_clk_scale_notify(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
+
+ if (!dev_req_params)
+ return;
+
+ ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
+ dev_req_params->pwr_rx,
+ dev_req_params->hs_rate);
+}
+
+/**
+ * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
+ *
+ * The variant operations configure the necessary controller and PHY
+ * handshake during initialization.
+ */
+static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
+ .name = "qcom",
+ .init = ufs_qcom_init,
+ .exit = ufs_qcom_exit,
+ .clk_scale_notify = ufs_qcom_clk_scale_notify,
+ .setup_clocks = ufs_qcom_setup_clocks,
+ .hce_enable_notify = ufs_qcom_hce_enable_notify,
+ .link_startup_notify = ufs_qcom_link_startup_notify,
+ .pwr_change_notify = ufs_qcom_pwr_change_notify,
+ .suspend = ufs_qcom_suspend,
+ .resume = ufs_qcom_resume,
+};
+EXPORT_SYMBOL(ufs_hba_qcom_vops);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
new file mode 100644
index 000000000000..9a6febd007df
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -0,0 +1,170 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_H_
+#define UFS_QCOM_H_
+
+#define MAX_UFS_QCOM_HOSTS 1
+#define MAX_U32 (~(u32)0)
+#define MPHY_TX_FSM_STATE 0x41
+#define TX_FSM_HIBERN8 0x1
+#define HBRN8_POLL_TOUT_MS 100
+#define DEFAULT_CLK_RATE_HZ 1000000
+#define BUS_VECTOR_NAME_LEN 32
+
+#define UFS_HW_VER_MAJOR_SHFT (28)
+#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
+#define UFS_HW_VER_MINOR_SHFT (16)
+#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
+#define UFS_HW_VER_STEP_SHFT (0)
+#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_QCOM_LIMIT_NUM_LANES_RX 2
+#define UFS_QCOM_LIMIT_NUM_LANES_TX 2
+#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G2
+#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G2
+#define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4
+#define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4
+#define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE
+#define UFS_QCOM_LIMIT_TX_PWR_PWM SLOW_MODE
+#define UFS_QCOM_LIMIT_RX_PWR_HS FAST_MODE
+#define UFS_QCOM_LIMIT_TX_PWR_HS FAST_MODE
+#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
+#define UFS_QCOM_LIMIT_DESIRED_MODE FAST
+
+/* QCOM UFS host controller vendor specific registers */
+enum {
+ REG_UFS_SYS1CLK_1US = 0xC0,
+ REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
+ REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
+ REG_UFS_PA_ERR_CODE = 0xCC,
+ REG_UFS_RETRY_TIMER_REG = 0xD0,
+ REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
+ REG_UFS_CFG1 = 0xDC,
+ REG_UFS_CFG2 = 0xE0,
+ REG_UFS_HW_VERSION = 0xE4,
+
+ UFS_DBG_RD_REG_UAWM = 0x100,
+ UFS_DBG_RD_REG_UARM = 0x200,
+ UFS_DBG_RD_REG_TXUC = 0x300,
+ UFS_DBG_RD_REG_RXUC = 0x400,
+ UFS_DBG_RD_REG_DFC = 0x500,
+ UFS_DBG_RD_REG_TRLUT = 0x600,
+ UFS_DBG_RD_REG_TMRLUT = 0x700,
+ UFS_UFS_DBG_RD_REG_OCSC = 0x800,
+
+ UFS_UFS_DBG_RD_DESC_RAM = 0x1500,
+ UFS_UFS_DBG_RD_PRDT_RAM = 0x1700,
+ UFS_UFS_DBG_RD_RESP_RAM = 0x1800,
+ UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
+};
+
+/* bit definitions for REG_UFS_CFG2 register */
+#define UAWM_HW_CGC_EN (1 << 0)
+#define UARM_HW_CGC_EN (1 << 1)
+#define TXUC_HW_CGC_EN (1 << 2)
+#define RXUC_HW_CGC_EN (1 << 3)
+#define DFC_HW_CGC_EN (1 << 4)
+#define TRLUT_HW_CGC_EN (1 << 5)
+#define TMRLUT_HW_CGC_EN (1 << 6)
+#define OCSC_HW_CGC_EN (1 << 7)
+
+#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
+ TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
+ DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
+ TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
+
+/* bit offset */
+enum {
+ OFFSET_UFS_PHY_SOFT_RESET = 1,
+ OFFSET_CLK_NS_REG = 10,
+};
+
+/* bit masks */
+enum {
+ MASK_UFS_PHY_SOFT_RESET = 0x2,
+ MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
+ MASK_CLK_NS_REG = 0xFFFC00,
+};
+
+enum ufs_qcom_phy_init_type {
+ UFS_PHY_INIT_FULL,
+ UFS_PHY_INIT_CFG_RESTORE,
+};
+
+static inline void
+ufs_qcom_get_controller_revision(struct ufs_hba *hba,
+ u8 *major, u16 *minor, u16 *step)
+{
+ u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
+
+ *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
+ *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
+ *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
+};
+
+static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
+ 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+
+ /*
+ * Make sure assertion of ufs phy reset is written to
+ * register before returning
+ */
+ mb();
+}
+
+static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
+ 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+
+ /*
+ * Make sure de-assertion of ufs phy reset is written to
+ * register before returning
+ */
+ mb();
+}
+
+struct ufs_qcom_bus_vote {
+ uint32_t client_handle;
+ uint32_t curr_vote;
+ int min_bw_vote;
+ int max_bw_vote;
+ int saved_vote;
+ bool is_max_bw_needed;
+ struct device_attribute max_bus_bw;
+};
+
+struct ufs_qcom_host {
+ struct phy *generic_phy;
+ struct ufs_hba *hba;
+ struct ufs_qcom_bus_vote bus_vote;
+ struct ufs_pa_layer_attr dev_req_params;
+ struct clk *rx_l0_sync_clk;
+ struct clk *tx_l0_sync_clk;
+ struct clk *rx_l1_sync_clk;
+ struct clk *tx_l1_sync_clk;
+ bool is_lane_clks_enabled;
+};
+
+#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
+#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
+#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+
+#endif /* UFS_QCOM_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2e4614b9dddf..5d60a868830d 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4714,10 +4714,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (driver_byte(ret) & DRIVER_SENSE) {
- scsi_show_sense_hdr(sdp, NULL, &sshdr);
- scsi_show_extd_sense(sdp, NULL, sshdr.asc, sshdr.ascq);
- }
+ if (driver_byte(ret) & DRIVER_SENSE)
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
}
if (!ret)
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index c0506de4f3b6..9e09da412b92 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -2143,22 +2143,22 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d"
" dma_mode=%02x fast=%d",
hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast);
- seq_printf(m, "\nsync_xfer[] = ");
+ seq_puts(m, "\nsync_xfer[] = ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%02x", hd->sync_xfer[x]);
- seq_printf(m, "\nsync_stat[] = ");
+ seq_puts(m, "\nsync_stat[] = ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%02x", hd->sync_stat[x]);
}
#ifdef PROC_STATISTICS
if (hd->proc & PR_STATISTICS) {
- seq_printf(m, "\ncommands issued: ");
+ seq_puts(m, "\ncommands issued: ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
- seq_printf(m, "\ndisconnects allowed:");
+ seq_puts(m, "\ndisconnects allowed:");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
- seq_printf(m, "\ndisconnects done: ");
+ seq_puts(m, "\ndisconnects done: ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
seq_printf(m,
@@ -2167,7 +2167,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
}
#endif
if (hd->proc & PR_CONNECTED) {
- seq_printf(m, "\nconnected: ");
+ seq_puts(m, "\nconnected: ");
if (hd->connected) {
cmd = (struct scsi_cmnd *) hd->connected;
seq_printf(m, " %d:%llu(%02x)",
@@ -2175,7 +2175,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
}
}
if (hd->proc & PR_INPUTQ) {
- seq_printf(m, "\ninput_Q: ");
+ seq_puts(m, "\ninput_Q: ");
cmd = (struct scsi_cmnd *) hd->input_Q;
while (cmd) {
seq_printf(m, " %d:%llu(%02x)",
@@ -2184,7 +2184,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
}
}
if (hd->proc & PR_DISCQ) {
- seq_printf(m, "\ndisconnected_Q:");
+ seq_puts(m, "\ndisconnected_Q:");
cmd = (struct scsi_cmnd *) hd->disconnected_Q;
while (cmd) {
seq_printf(m, " %d:%llu(%02x)",
@@ -2192,7 +2192,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
cmd = (struct scsi_cmnd *) cmd->host_scribble;
}
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
spin_unlock_irq(&hd->lock);
#endif /* PROC_INTERFACE */
return 0;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index f94d73611ab4..0c0f17b9a3eb 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1295,9 +1295,6 @@ static void wd7000_revision(Adapter * host)
}
-#undef SPRINTF
-#define SPRINTF(args...) { seq_printf(m, ## args); }
-
static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length)
{
dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length);
@@ -1320,43 +1317,43 @@ static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host)
#endif
spin_lock_irqsave(host->host_lock, flags);
- SPRINTF("Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2);
- SPRINTF(" IO base: 0x%x\n", adapter->iobase);
- SPRINTF(" IRQ: %d\n", adapter->irq);
- SPRINTF(" DMA channel: %d\n", adapter->dma);
- SPRINTF(" Interrupts: %d\n", adapter->int_counter);
- SPRINTF(" BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125);
- SPRINTF(" BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
+ seq_printf(m, "Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2);
+ seq_printf(m, " IO base: 0x%x\n", adapter->iobase);
+ seq_printf(m, " IRQ: %d\n", adapter->irq);
+ seq_printf(m, " DMA channel: %d\n", adapter->dma);
+ seq_printf(m, " Interrupts: %d\n", adapter->int_counter);
+ seq_printf(m, " BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125);
+ seq_printf(m, " BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
#ifdef WD7000_DEBUG
ogmbs = adapter->mb.ogmb;
icmbs = adapter->mb.icmb;
- SPRINTF("\nControl port value: 0x%x\n", adapter->control);
- SPRINTF("Incoming mailbox:\n");
- SPRINTF(" size: %d\n", ICMB_CNT);
- SPRINTF(" queued messages: ");
+ seq_printf(m, "\nControl port value: 0x%x\n", adapter->control);
+ seq_puts(m, "Incoming mailbox:\n");
+ seq_printf(m, " size: %d\n", ICMB_CNT);
+ seq_puts(m, " queued messages: ");
for (i = count = 0; i < ICMB_CNT; i++)
if (icmbs[i].status) {
count++;
- SPRINTF("0x%x ", i);
+ seq_printf(m, "0x%x ", i);
}
- SPRINTF(count ? "\n" : "none\n");
+ seq_puts(m, count ? "\n" : "none\n");
- SPRINTF("Outgoing mailbox:\n");
- SPRINTF(" size: %d\n", OGMB_CNT);
- SPRINTF(" next message: 0x%x\n", adapter->next_ogmb);
- SPRINTF(" queued messages: ");
+ seq_puts(m, "Outgoing mailbox:\n");
+ seq_printf(m, " size: %d\n", OGMB_CNT);
+ seq_printf(m, " next message: 0x%x\n", adapter->next_ogmb);
+ seq_puts(m, " queued messages: ");
for (i = count = 0; i < OGMB_CNT; i++)
if (ogmbs[i].status) {
count++;
- SPRINTF("0x%x ", i);
+ seq_printf(m, "0x%x ", i);
}
- SPRINTF(count ? "\n" : "none\n");
+ seq_puts(m, count ? "\n" : "none\n");
#endif
spin_unlock_irqrestore(host->host_lock, flags);
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index 1e824fb1649b..296db7a69c27 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -161,7 +161,7 @@ static int sfi_verify_table(struct sfi_table_header *table)
* Check for common case that we can re-use mapping to SYST,
* which requires syst_pa, syst_va to be initialized.
*/
-struct sfi_table_header *sfi_map_table(u64 pa)
+static struct sfi_table_header *sfi_map_table(u64 pa)
{
struct sfi_table_header *th;
u32 length;
@@ -189,7 +189,7 @@ struct sfi_table_header *sfi_map_table(u64 pa)
* Undoes effect of sfi_map_table() by unmapping table
* if it did not completely fit on same page as SYST.
*/
-void sfi_unmap_table(struct sfi_table_header *th)
+static void sfi_unmap_table(struct sfi_table_header *th)
{
if (!TABLE_ON_PAGE(syst_va, th, th->len))
sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ?
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 99829985c1a1..95ccedabba4f 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -185,6 +185,16 @@ config SPI_DAVINCI
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
+config SPI_DLN2
+ tristate "Diolan DLN-2 USB SPI adapter"
+ depends on MFD_DLN2
+ help
+ If you say yes to this option, support will be included for Diolan
+ DLN2, a USB to SPI interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-dln2.
+
config SPI_EFM32
tristate "EFM32 SPI controller"
depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
@@ -279,7 +289,7 @@ config SPI_FSL_CPM
depends on FSL_SOC
config SPI_FSL_SPI
- bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
+ tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
depends on OF
select SPI_FSL_LIB
select SPI_FSL_CPM if FSL_SOC
@@ -292,7 +302,6 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
- select SPI_BITBANG
select REGMAP_MMIO
depends on SOC_VF610 || COMPILE_TEST
help
@@ -300,7 +309,7 @@ config SPI_FSL_DSPI
mode. VF610 platform uses the controller.
config SPI_FSL_ESPI
- bool "Freescale eSPI controller"
+ tristate "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -460,7 +469,6 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
- select S3C64XX_PL080 if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
@@ -503,6 +511,13 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_ST_SSC4
+ tristate "STMicroelectronics SPI SSC-based driver"
+ depends on ARCH_STI
+ help
+ STMicroelectronics SoCs support for SPI. If you say yes to
+ this option, support will be included for the SSC driven SPI.
+
config SPI_SUN4I
tristate "Allwinner A10 SoCs SPI controller"
depends on ARCH_SUNXI || COMPILE_TEST
@@ -595,7 +610,6 @@ config SPI_XTENSA_XTFPGA
16 bit words in SPI mode 0, automatically asserting CS on transfer
start and deasserting on end.
-
config SPI_NUC900
tristate "Nuvoton NUC900 series SPI"
depends on ARCH_W90X900
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6b9d2ac629cc..d8cbf654976b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
+obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
@@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 23d8f5f56579..9af7841f2e8c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1046,6 +1046,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
struct atmel_spi_device *asd;
int timeout;
int ret;
+ unsigned long dma_timeout;
as = spi_master_get_devdata(master);
@@ -1103,15 +1104,12 @@ static int atmel_spi_one_transfer(struct spi_master *master,
/* interrupts are disabled, so free the lock for schedule */
atmel_spi_unlock(as);
- ret = wait_for_completion_timeout(&as->xfer_completion,
- SPI_DMA_TIMEOUT);
+ dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
+ SPI_DMA_TIMEOUT);
atmel_spi_lock(as);
- if (WARN_ON(ret == 0)) {
- dev_err(&spi->dev,
- "spi trasfer timeout, err %d\n", ret);
+ if (WARN_ON(dma_timeout == 0)) {
+ dev_err(&spi->dev, "spi transfer timeout\n");
as->done_status = -EIO;
- } else {
- ret = 0;
}
if (as->done_status)
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 326f47973684..f45e085c01a6 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 98aab457b24d..419a782ab6d5 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -17,10 +17,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/clk.h>
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index c20530982e26..e73e2b052c9c 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
*/
#include <linux/kernel.h>
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index dc7d2c2d643e..5ef6638d5e8a 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index ee4f91ccd8fd..9a95862986c8 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 41b5dc4445f6..688956ff5095 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index b3707badb1e5..5e991065f5b0 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/interrupt.h>
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
new file mode 100644
index 000000000000..3b7d91d94fea
--- /dev/null
+++ b/drivers/spi/spi-dln2.c
@@ -0,0 +1,881 @@
+/*
+ * Driver for the Diolan DLN-2 USB-SPI adapter
+ *
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dln2.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include <asm/unaligned.h>
+
+#define DLN2_SPI_MODULE_ID 0x02
+#define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
+
+/* SPI commands */
+#define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00)
+#define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11)
+#define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12)
+#define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13)
+#define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14)
+#define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15)
+#define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16)
+#define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17)
+#define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18)
+#define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19)
+#define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A)
+#define DLN2_SPI_READ DLN2_SPI_CMD(0x1B)
+#define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C)
+#define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20)
+#define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21)
+#define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22)
+#define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23)
+#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24)
+#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25)
+#define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26)
+#define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27)
+#define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28)
+#define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B)
+#define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C)
+#define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D)
+#define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E)
+#define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F)
+#define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33)
+#define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34)
+#define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35)
+#define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36)
+#define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37)
+#define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38)
+#define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39)
+#define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A)
+#define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40)
+#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41)
+#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42)
+#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43)
+#define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44)
+#define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45)
+#define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48)
+#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49)
+#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C)
+
+#define DLN2_SPI_MAX_XFER_SIZE 256
+#define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16)
+#define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0)
+#define DLN2_TRANSFERS_WAIT_COMPLETE 1
+#define DLN2_TRANSFERS_CANCEL 0
+#define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000
+
+struct dln2_spi {
+ struct platform_device *pdev;
+ struct spi_master *master;
+ u8 port;
+
+ /*
+ * This buffer will be used mainly for read/write operations. Since
+ * they're quite large, we cannot use the stack. Protection is not
+ * needed because all SPI communication is serialized by the SPI core.
+ */
+ void *buf;
+
+ u8 bpw;
+ u32 speed;
+ u16 mode;
+ u8 cs;
+};
+
+/*
+ * Enable/Disable SPI module. The disable command will wait for transfers to
+ * complete first.
+ */
+static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
+{
+ u16 cmd;
+ struct {
+ u8 port;
+ u8 wait_for_completion;
+ } tx;
+ unsigned len = sizeof(tx);
+
+ tx.port = dln2->port;
+
+ if (enable) {
+ cmd = DLN2_SPI_ENABLE;
+ len -= sizeof(tx.wait_for_completion);
+ } else {
+ tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
+ cmd = DLN2_SPI_DISABLE;
+ }
+
+ return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
+}
+
+/*
+ * Select/unselect multiple CS lines. The selected lines will be automatically
+ * toggled LOW/HIGH by the board firmware during transfers, provided they're
+ * enabled first.
+ *
+ * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
+ * will toggle the lines LOW/HIGH automatically.
+ */
+static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
+{
+ struct {
+ u8 port;
+ u8 cs;
+ } tx;
+
+ tx.port = dln2->port;
+
+ /*
+ * According to Diolan docs, "a slave device can be selected by changing
+ * the corresponding bit value to 0". The rest must be set to 1. Hence
+ * the bitwise NOT in front.
+ */
+ tx.cs = ~cs_mask;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
+}
+
+/*
+ * Select one CS line. The other lines will be un-selected.
+ */
+static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
+{
+ return dln2_spi_cs_set(dln2, BIT(cs));
+}
+
+/*
+ * Enable/disable CS lines for usage. The module has to be disabled first.
+ */
+static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
+{
+ struct {
+ u8 port;
+ u8 cs;
+ } tx;
+ u16 cmd;
+
+ tx.port = dln2->port;
+ tx.cs = cs_mask;
+ cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
+
+ return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
+}
+
+static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
+{
+ u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
+
+ return dln2_spi_cs_enable(dln2, cs_mask, enable);
+}
+
+static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ __le16 cs_count;
+ } rx;
+ unsigned rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
+ &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ *cs_num = le16_to_cpu(rx.cs_count);
+
+ dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
+
+ return 0;
+}
+
+static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ __le32 speed;
+ } rx;
+ unsigned rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+
+ ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ *freq = le32_to_cpu(rx.speed);
+
+ return 0;
+}
+
+/*
+ * Get bus min/max frequencies.
+ */
+static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
+{
+ int ret;
+
+ ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
+ if (ret < 0)
+ return ret;
+
+ ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
+ *fmin, *fmax);
+
+ return 0;
+}
+
+/*
+ * Set the bus speed. The module will automatically round down to the closest
+ * available frequency and returns it. The module has to be disabled first.
+ */
+static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le32 speed;
+ } __packed tx;
+ struct {
+ __le32 speed;
+ } rx;
+ int rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+ tx.speed = cpu_to_le32(speed);
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
+ &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ return 0;
+}
+
+/*
+ * Change CPOL & CPHA. The module has to be disabled first.
+ */
+static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
+{
+ struct {
+ u8 port;
+ u8 mode;
+ } tx;
+
+ tx.port = dln2->port;
+ tx.mode = mode;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
+}
+
+/*
+ * Change frame size. The module has to be disabled first.
+ */
+static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
+{
+ struct {
+ u8 port;
+ u8 bpw;
+ } tx;
+
+ tx.port = dln2->port;
+ tx.bpw = bpw;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
+ &tx, sizeof(tx));
+}
+
+static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
+ u32 *bpw_mask)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ u8 count;
+ u8 frame_sizes[36];
+ } *rx = dln2->buf;
+ unsigned rx_len = sizeof(*rx);
+ int i;
+
+ tx.port = dln2->port;
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
+ &tx, sizeof(tx), rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(*rx))
+ return -EPROTO;
+ if (rx->count > ARRAY_SIZE(rx->frame_sizes))
+ return -EPROTO;
+
+ *bpw_mask = 0;
+ for (i = 0; i < rx->count; i++)
+ *bpw_mask |= BIT(rx->frame_sizes[i] - 1);
+
+ dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
+
+ return 0;
+}
+
+/*
+ * Copy the data to DLN2 buffer and change the byte order to LE, requested by
+ * DLN2 module. SPI core makes sure that the data length is a multiple of word
+ * size.
+ */
+static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+ memcpy(dln2_buf, src, len);
+#else
+ if (bpw <= 8) {
+ memcpy(dln2_buf, src, len);
+ } else if (bpw <= 16) {
+ __le16 *d = (__le16 *)dln2_buf;
+ u16 *s = (u16 *)src;
+
+ len = len / 2;
+ while (len--)
+ *d++ = cpu_to_le16p(s++);
+ } else {
+ __le32 *d = (__le32 *)dln2_buf;
+ u32 *s = (u32 *)src;
+
+ len = len / 4;
+ while (len--)
+ *d++ = cpu_to_le32p(s++);
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
+ * buffer is LE ordered. SPI core makes sure that the data length is a multiple
+ * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
+ * sure we avoid unaligned accesses for 32 bit case.
+ */
+static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+ memcpy(dest, dln2_buf, len);
+#else
+ if (bpw <= 8) {
+ memcpy(dest, dln2_buf, len);
+ } else if (bpw <= 16) {
+ u16 *d = (u16 *)dest;
+ __le16 *s = (__le16 *)dln2_buf;
+
+ len = len / 2;
+ while (len--)
+ *d++ = le16_to_cpup(s++);
+ } else {
+ u32 *d = (u32 *)dest;
+ __le32 *s = (__le32 *)dln2_buf;
+
+ len = len / 4;
+ while (len--)
+ *d++ = get_unaligned_le32(s++);
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Perform one write operation.
+ */
+static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
+ u16 data_len, u8 attr)
+{
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *tx = dln2->buf;
+ unsigned tx_len;
+
+ BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ tx->port = dln2->port;
+ tx->size = cpu_to_le16(data_len);
+ tx->attr = attr;
+
+ dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
+
+ tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
+}
+
+/*
+ * Perform one read operation.
+ */
+static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
+ u16 data_len, u8 attr)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ } __packed tx;
+ struct {
+ __le16 size;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *rx = dln2->buf;
+ unsigned rx_len = sizeof(*rx);
+
+ BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ tx.port = dln2->port;
+ tx.size = cpu_to_le16(data_len);
+ tx.attr = attr;
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
+ rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx->size) + data_len)
+ return -EPROTO;
+ if (le16_to_cpu(rx->size) != data_len)
+ return -EPROTO;
+
+ dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
+
+ return 0;
+}
+
+/*
+ * Perform one write & read operation.
+ */
+static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
+ u8 *rx_data, u16 data_len, u8 attr)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *tx;
+ struct {
+ __le16 size;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *rx;
+ unsigned tx_len, rx_len;
+
+ BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
+ sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ /*
+ * Since this is a pseudo full-duplex communication, we're perfectly
+ * safe to use the same buffer for both tx and rx. When DLN2 sends the
+ * response back, with the rx data, we don't need the tx buffer anymore.
+ */
+ tx = dln2->buf;
+ rx = dln2->buf;
+
+ tx->port = dln2->port;
+ tx->size = cpu_to_le16(data_len);
+ tx->attr = attr;
+
+ dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
+
+ tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+ rx_len = sizeof(*rx);
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
+ rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx->size) + data_len)
+ return -EPROTO;
+ if (le16_to_cpu(rx->size) != data_len)
+ return -EPROTO;
+
+ dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
+
+ return 0;
+}
+
+/*
+ * Read/Write wrapper. It will automatically split an operation into multiple
+ * single ones due to device buffer constraints.
+ */
+static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
+ u8 *rx_data, u16 data_len, u8 attr) {
+ int ret;
+ u16 len;
+ u8 temp_attr;
+ u16 remaining = data_len;
+ u16 offset;
+
+ do {
+ if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
+ len = DLN2_SPI_MAX_XFER_SIZE;
+ temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+ } else {
+ len = remaining;
+ temp_attr = attr;
+ }
+
+ offset = data_len - remaining;
+
+ if (tx_data && rx_data) {
+ ret = dln2_spi_read_write_one(dln2,
+ tx_data + offset,
+ rx_data + offset,
+ len, temp_attr);
+ } else if (tx_data) {
+ ret = dln2_spi_write_one(dln2,
+ tx_data + offset,
+ len, temp_attr);
+ } else if (rx_data) {
+ ret = dln2_spi_read_one(dln2,
+ rx_data + offset,
+ len, temp_attr);
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ remaining -= len;
+ } while (remaining);
+
+ return 0;
+}
+
+static int dln2_spi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ int ret;
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+
+ if (dln2->cs != spi->chip_select) {
+ ret = dln2_spi_cs_set_one(dln2, spi->chip_select);
+ if (ret < 0)
+ return ret;
+
+ dln2->cs = spi->chip_select;
+ }
+
+ return 0;
+}
+
+static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
+ u8 bpw, u8 mode)
+{
+ int ret;
+ bool bus_setup_change;
+
+ bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
+ dln2->bpw != bpw;
+
+ if (!bus_setup_change)
+ return 0;
+
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0)
+ return ret;
+
+ if (dln2->speed != speed) {
+ ret = dln2_spi_set_speed(dln2, speed);
+ if (ret < 0)
+ return ret;
+
+ dln2->speed = speed;
+ }
+
+ if (dln2->mode != mode) {
+ ret = dln2_spi_set_mode(dln2, mode & 0x3);
+ if (ret < 0)
+ return ret;
+
+ dln2->mode = mode;
+ }
+
+ if (dln2->bpw != bpw) {
+ ret = dln2_spi_set_bpw(dln2, bpw);
+ if (ret < 0)
+ return ret;
+
+ dln2->bpw = bpw;
+ }
+
+ return dln2_spi_enable(dln2, true);
+}
+
+static int dln2_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ int status;
+ u8 attr = 0;
+
+ status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
+ xfer->bits_per_word,
+ spi->mode);
+ if (status < 0) {
+ dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
+ return status;
+ }
+
+ if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
+ attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+
+ status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
+ xfer->len, attr);
+ if (status < 0)
+ dev_err(&dln2->pdev->dev, "write/read failed!\n");
+
+ return status;
+}
+
+static int dln2_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct dln2_spi *dln2;
+ struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ dln2 = spi_master_get_devdata(master);
+
+ dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
+ if (!dln2->buf) {
+ ret = -ENOMEM;
+ goto exit_free_master;
+ }
+
+ dln2->master = master;
+ dln2->pdev = pdev;
+ dln2->port = pdata->port;
+ /* cs/mode can never be 0xff, so the first transfer will set them */
+ dln2->cs = 0xff;
+ dln2->mode = 0xff;
+
+ /* disable SPI module before continuing with the setup */
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get number of CS pins\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_speed_range(dln2,
+ &master->min_speed_hz,
+ &master->max_speed_hz);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_supported_frame_sizes(dln2,
+ &master->bits_per_word_mask);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_cs_enable_all(dln2, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable CS pins\n");
+ goto exit_free_master;
+ }
+
+ master->bus_num = -1;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->prepare_message = dln2_spi_prepare_message;
+ master->transfer_one = dln2_spi_transfer_one;
+ master->auto_runtime_pm = true;
+
+ /* enable SPI module, we're good to go */
+ ret = dln2_spi_enable(dln2, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable SPI module\n");
+ goto exit_free_master;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ DLN2_RPM_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto exit_register;
+ }
+
+ return ret;
+
+exit_register:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ if (dln2_spi_enable(dln2, false) < 0)
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+exit_free_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int dln2_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ if (dln2_spi_enable(dln2, false) < 0)
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dln2_spi_suspend(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ ret = spi_master_suspend(master);
+ if (ret < 0)
+ return ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * USB power may be cut off during sleep. Resetting the following
+ * parameters will force the board to be set up before first transfer.
+ */
+ dln2->cs = 0xff;
+ dln2->speed = 0;
+ dln2->bpw = 0;
+ dln2->mode = 0xff;
+
+ return 0;
+}
+
+static int dln2_spi_resume(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = dln2_spi_cs_enable_all(dln2, true);
+ if (ret < 0)
+ return ret;
+
+ ret = dln2_spi_enable(dln2, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int dln2_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ return dln2_spi_enable(dln2, false);
+}
+
+static int dln2_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ return dln2_spi_enable(dln2, true);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops dln2_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
+ SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
+ dln2_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver spi_dln2_driver = {
+ .driver = {
+ .name = "dln2-spi",
+ .pm = &dln2_spi_pm,
+ },
+ .probe = dln2_spi_probe,
+ .remove = dln2_spi_remove,
+};
+module_platform_driver(spi_dln2_driver);
+
+MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
+MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dln2-spi");
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 7281316a5ecb..a0197fd4e95c 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -247,9 +247,9 @@ static struct dw_spi_dma_ops mid_dma_ops = {
/* Some specific info for SPI0 controller on Intel MID */
-/* HW info for MRST CLk Control Unit, one 32b reg */
+/* HW info for MRST Clk Control Unit, 32b reg per controller */
#define MRST_SPI_CLK_BASE 100000000 /* 100m */
-#define MRST_CLK_SPI0_REG 0xff11d86c
+#define MRST_CLK_SPI_REG 0xff11d86c
#define CLK_SPI_BDIV_OFFSET 0
#define CLK_SPI_BDIV_MASK 0x00000007
#define CLK_SPI_CDIV_OFFSET 9
@@ -261,17 +261,17 @@ int dw_spi_mid_init(struct dw_spi *dws)
void __iomem *clk_reg;
u32 clk_cdiv;
- clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
+ clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
if (!clk_reg)
return -ENOMEM;
- /* get SPI controller operating freq info */
- clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
+ /* Get SPI controller operating freq info */
+ clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
+ clk_cdiv &= CLK_SPI_CDIV_MASK;
+ clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
- iounmap(clk_reg);
- dws->num_cs = 16;
- dws->fifo_len = 40; /* FIFO has 40 words buffer */
+ iounmap(clk_reg);
#ifdef CONFIG_SPI_DW_MID_DMA
dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index ba68da12cdf0..5ba331047cbe 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -30,10 +30,20 @@ struct dw_spi_pci {
struct spi_pci_desc {
int (*setup)(struct dw_spi *);
+ u16 num_cs;
+ u16 bus_num;
};
-static struct spi_pci_desc spi_pci_mid_desc = {
+static struct spi_pci_desc spi_pci_mid_desc_1 = {
.setup = dw_spi_mid_init,
+ .num_cs = 32,
+ .bus_num = 0,
+};
+
+static struct spi_pci_desc spi_pci_mid_desc_2 = {
+ .setup = dw_spi_mid_init,
+ .num_cs = 4,
+ .bus_num = 1,
};
static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -65,18 +75,23 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dws->regs = pcim_iomap_table(pdev)[pci_bar];
- dws->bus_num = 0;
- dws->num_cs = 4;
dws->irq = pdev->irq;
/*
* Specific handling for paltforms, like dma setup,
* clock rate, FIFO depth.
*/
- if (desc && desc->setup) {
- ret = desc->setup(dws);
- if (ret)
- return ret;
+ if (desc) {
+ dws->num_cs = desc->num_cs;
+ dws->bus_num = desc->bus_num;
+
+ if (desc->setup) {
+ ret = desc->setup(dws);
+ if (ret)
+ return ret;
+ }
+ } else {
+ return -ENODEV;
}
ret = dw_spi_add_host(&pdev->dev, dws);
@@ -121,7 +136,14 @@ static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
static const struct pci_device_id pci_ids[] = {
/* Intel MID platform SPI controller 0 */
- { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc},
+ /*
+ * The access to the device 8086:0801 is disabled by HW, since it's
+ * exclusively used by SCU to communicate with MSIC.
+ */
+ /* Intel MID platform SPI controller 1 */
+ { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
+ /* Intel MID platform SPI controller 2 */
+ { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
{},
};
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index d0d5542efc06..5a97a62b298a 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -608,7 +608,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
}
/* Restart the controller, disable all interrupts, clean rx fifo */
-static void spi_hw_init(struct dw_spi *dws)
+static void spi_hw_init(struct device *dev, struct dw_spi *dws)
{
spi_enable_chip(dws, 0);
spi_mask_intr(dws, 0xff);
@@ -621,14 +621,15 @@ static void spi_hw_init(struct dw_spi *dws)
if (!dws->fifo_len) {
u32 fifo;
- for (fifo = 2; fifo <= 257; fifo++) {
+ for (fifo = 2; fifo <= 256; fifo++) {
dw_writew(dws, DW_SPI_TXFLTR, fifo);
if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
break;
}
-
- dws->fifo_len = (fifo == 257) ? 0 : fifo;
dw_writew(dws, DW_SPI_TXFLTR, 0);
+
+ dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
+ dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
}
}
@@ -668,12 +669,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->dev.of_node = dev->of_node;
/* Basic HW init */
- spi_hw_init(dws);
+ spi_hw_init(dev, dws);
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dws);
if (ret) {
- dev_warn(&master->dev, "DMA init failed\n");
+ dev_warn(dev, "DMA init failed\n");
dws->dma_inited = 0;
}
}
@@ -731,7 +732,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
{
int ret;
- spi_hw_init(dws);
+ spi_hw_init(&dws->master->dev, dws);
ret = spi_master_resume(dws->master);
if (ret)
dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 912b9037e9cf..286b2c81fc6b 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -353,16 +353,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
return 0;
}
-static int falcon_sflash_prepare_xfer(struct spi_master *master)
-{
- return 0;
-}
-
-static int falcon_sflash_unprepare_xfer(struct spi_master *master)
-{
- return 0;
-}
-
static int falcon_sflash_xfer_one(struct spi_master *master,
struct spi_message *m)
{
@@ -420,9 +410,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
master->mode_bits = SPI_MODE_3;
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = falcon_sflash_setup;
- master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
master->transfer_one_message = falcon_sflash_xfer_one;
- master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer;
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index e85ab1cb17a2..9c46a3058743 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -20,6 +20,7 @@
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
@@ -68,6 +69,7 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
}
}
}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
{
@@ -162,6 +164,7 @@ err_rx_dma:
dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
{
@@ -174,6 +177,7 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
mspi->xfer_in_progress = NULL;
}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
{
@@ -198,6 +202,7 @@ void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
else
complete(&mspi->done);
}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
static void *fsl_spi_alloc_dummy_rx(void)
{
@@ -375,6 +380,7 @@ err_pram:
fsl_spi_free_dummy_rx();
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
{
@@ -389,3 +395,6 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
cpm_muram_free(cpm_muram_offset(mspi->pram));
fsl_spi_free_dummy_rx();
}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4cda994d3f40..d1a39249704a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -106,7 +106,7 @@ struct chip_data {
};
struct fsl_dspi {
- struct spi_bitbang bitbang;
+ struct spi_master *master;
struct platform_device *pdev;
struct regmap *regmap;
@@ -114,6 +114,7 @@ struct fsl_dspi {
struct clk *clk;
struct spi_transfer *cur_transfer;
+ struct spi_message *cur_msg;
struct chip_data *cur_chip;
size_t len;
void *tx;
@@ -123,6 +124,7 @@ struct fsl_dspi {
char dataflags;
u8 cs;
u16 void_write_data;
+ u32 cs_change;
wait_queue_head_t waitq;
u32 waitflags;
@@ -225,6 +227,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
/* last transfer in the transfer */
dspi_pushr |= SPI_PUSHR_EOQ;
+ if ((dspi->cs_change) && (!dspi->len))
+ dspi_pushr &= ~SPI_PUSHR_CONT;
} else if (tx_word && (dspi->len == 1))
dspi_pushr |= SPI_PUSHR_EOQ;
@@ -246,6 +250,7 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
int rx_count = 0;
int rx_word = is_double_byte_mode(dspi);
u16 d;
+
while ((dspi->rx < dspi->rx_end)
&& (rx_count < DSPI_FIFO_SIZE)) {
if (rx_word) {
@@ -276,86 +281,89 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
return rx_count;
}
-static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
+static int dspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *message)
{
- struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
- dspi->cur_transfer = t;
- dspi->cur_chip = spi_get_ctldata(spi);
- dspi->cs = spi->chip_select;
- dspi->void_write_data = dspi->cur_chip->void_write_data;
-
- dspi->dataflags = 0;
- dspi->tx = (void *)t->tx_buf;
- dspi->tx_end = dspi->tx + t->len;
- dspi->rx = t->rx_buf;
- dspi->rx_end = dspi->rx + t->len;
- dspi->len = t->len;
-
- if (!dspi->rx)
- dspi->dataflags |= TRAN_STATE_RX_VOID;
-
- if (!dspi->tx)
- dspi->dataflags |= TRAN_STATE_TX_VOID;
-
- regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
- regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val);
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
-
- if (t->speed_hz)
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+ struct spi_transfer *transfer;
+ int status = 0;
+ message->actual_length = 0;
+
+ list_for_each_entry(transfer, &message->transfers, transfer_list) {
+ dspi->cur_transfer = transfer;
+ dspi->cur_msg = message;
+ dspi->cur_chip = spi_get_ctldata(spi);
+ dspi->cs = spi->chip_select;
+ if (dspi->cur_transfer->transfer_list.next
+ == &dspi->cur_msg->transfers)
+ transfer->cs_change = 1;
+ dspi->cs_change = transfer->cs_change;
+ dspi->void_write_data = dspi->cur_chip->void_write_data;
+
+ dspi->dataflags = 0;
+ dspi->tx = (void *)transfer->tx_buf;
+ dspi->tx_end = dspi->tx + transfer->len;
+ dspi->rx = transfer->rx_buf;
+ dspi->rx_end = dspi->rx + transfer->len;
+ dspi->len = transfer->len;
+
+ if (!dspi->rx)
+ dspi->dataflags |= TRAN_STATE_RX_VOID;
+
+ if (!dspi->tx)
+ dspi->dataflags |= TRAN_STATE_TX_VOID;
+
+ regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
dspi->cur_chip->ctar_val);
+ if (transfer->speed_hz)
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+ dspi->cur_chip->ctar_val);
- dspi_transfer_write(dspi);
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
+ message->actual_length += dspi_transfer_write(dspi);
- if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
- dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
- dspi->waitflags = 0;
-
- return t->len - dspi->len;
-}
+ if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
+ dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
+ dspi->waitflags = 0;
-static void dspi_chipselect(struct spi_device *spi, int value)
-{
- struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
- unsigned int pushr;
-
- regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
-
- switch (value) {
- case BITBANG_CS_ACTIVE:
- pushr |= SPI_PUSHR_CONT;
- break;
- case BITBANG_CS_INACTIVE:
- pushr &= ~SPI_PUSHR_CONT;
- break;
+ if (transfer->delay_usecs)
+ udelay(transfer->delay_usecs);
}
- regmap_write(dspi->regmap, SPI_PUSHR, pushr);
+ message->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
}
-static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+static int dspi_setup(struct spi_device *spi)
{
struct chip_data *chip;
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
unsigned char br = 0, pbr = 0, fmsz = 0;
+ if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
+ fmsz = spi->bits_per_word - 1;
+ } else {
+ pr_err("Invalid wordsize\n");
+ return -ENODEV;
+ }
+
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
- chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
- GFP_KERNEL);
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
}
chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
- if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
- fmsz = spi->bits_per_word - 1;
- } else {
- pr_err("Invalid wordsize\n");
- return -ENODEV;
- }
chip->void_write_data = 0;
@@ -374,34 +382,34 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
-static int dspi_setup(struct spi_device *spi)
+static void dspi_cleanup(struct spi_device *spi)
{
- if (!spi->max_speed_hz)
- return -EINVAL;
+ struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+ dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
+ spi->master->bus_num, spi->chip_select);
- return dspi_setup_transfer(spi, NULL);
+ kfree(chip);
}
static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
- regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
+ struct spi_message *msg = dspi->cur_msg;
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
dspi_transfer_read(dspi);
if (!dspi->len) {
if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
- SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
dspi->waitflags = 1;
wake_up_interruptible(&dspi->waitq);
- } else {
- dspi_transfer_write(dspi);
-
- return IRQ_HANDLED;
- }
+ } else
+ msg->actual_length += dspi_transfer_write(dspi);
return IRQ_HANDLED;
}
@@ -460,13 +468,14 @@ static int dspi_probe(struct platform_device *pdev)
dspi = spi_master_get_devdata(master);
dspi->pdev = pdev;
- dspi->bitbang.master = master;
- dspi->bitbang.chipselect = dspi_chipselect;
- dspi->bitbang.setup_transfer = dspi_setup_transfer;
- dspi->bitbang.txrx_bufs = dspi_txrx_transfer;
- dspi->bitbang.master->setup = dspi_setup;
- dspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+ dspi->master = master;
+
+ master->transfer = NULL;
+ master->setup = dspi_setup;
+ master->transfer_one_message = dspi_transfer_one_message;
+ master->dev.of_node = pdev->dev.of_node;
+ master->cleanup = dspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
@@ -525,7 +534,7 @@ static int dspi_probe(struct platform_device *pdev)
init_waitqueue_head(&dspi->waitq);
platform_set_drvdata(pdev, master);
- ret = spi_bitbang_start(&dspi->bitbang);
+ ret = spi_register_master(master);
if (ret != 0) {
dev_err(&pdev->dev, "Problem registering DSPI master\n");
goto out_clk_put;
@@ -547,9 +556,9 @@ static int dspi_remove(struct platform_device *pdev)
struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
- spi_bitbang_stop(&dspi->bitbang);
clk_disable_unprepare(dspi->clk);
- spi_master_put(dspi->bitbang.master);
+ spi_unregister_master(dspi->master);
+ spi_master_put(dspi->master);
return 0;
}
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 446b737e1532..cb35d2f0d0e6 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/spi/spi.h>
#ifdef CONFIG_FSL_SOC
@@ -35,7 +36,8 @@ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
type *rx = mpc8xxx_spi->rx; \
*rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
mpc8xxx_spi->rx = rx; \
-}
+} \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
#define MPC8XXX_SPI_TX_BUF(type) \
u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
@@ -47,7 +49,8 @@ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
data = *tx++ << mpc8xxx_spi->tx_shift; \
mpc8xxx_spi->tx = tx; \
return data; \
-}
+} \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
MPC8XXX_SPI_RX_BUF(u8)
MPC8XXX_SPI_RX_BUF(u16)
@@ -60,6 +63,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
{
return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
}
+EXPORT_SYMBOL_GPL(to_of_pinfo);
const char *mpc8xxx_spi_strmode(unsigned int flags)
{
@@ -75,6 +79,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
}
return "CPU";
}
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq)
@@ -102,13 +107,12 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
mpc8xxx_spi->rx_shift = 0;
mpc8xxx_spi->tx_shift = 0;
- init_completion(&mpc8xxx_spi->done);
-
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
init_completion(&mpc8xxx_spi->done);
}
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
int mpc8xxx_spi_remove(struct device *dev)
{
@@ -127,6 +131,7 @@ int mpc8xxx_spi_remove(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
{
@@ -173,3 +178,6 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
return 0;
}
+EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index b4ed04e8862f..1326a392adca 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -28,7 +28,7 @@ struct mpc8xxx_spi {
/* rx & tx bufs from the spi_transfer */
const void *tx;
void *rx;
-#ifdef CONFIG_SPI_FSL_ESPI
+#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
int len;
#endif
@@ -68,7 +68,7 @@ struct mpc8xxx_spi {
unsigned int flags;
-#ifdef CONFIG_SPI_FSL_SPI
+#if IS_ENABLED(CONFIG_SPI_FSL_SPI)
int type;
int native_chipselects;
u8 max_bits_per_word;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index aee4e7589568..1c34c9314c8a 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -92,7 +88,7 @@ struct spi_gpio {
/*----------------------------------------------------------------------*/
-static inline struct spi_gpio * __pure
+static inline struct spi_gpio *__pure
spi_to_spi_gpio(const struct spi_device *spi)
{
const struct spi_bitbang *bang;
@@ -103,7 +99,7 @@ spi_to_spi_gpio(const struct spi_device *spi)
return spi_gpio;
}
-static inline struct spi_gpio_platform_data * __pure
+static inline struct spi_gpio_platform_data *__pure
spi_to_pdata(const struct spi_device *spi)
{
return &spi_to_spi_gpio(spi)->pdata;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index aad6683db81b..c01567d53581 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
unsigned int count = 0;
u32 status;
- while (count < max) {
+ while (count < max / 4) {
spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
if (status & SPFI_INTERRUPT_SDFUL)
break;
- spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA);
- count += 4;
+ spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
+ count++;
}
- return count;
+ return count * 4;
}
static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
@@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
unsigned int count = 0;
u32 status;
- while (count < max) {
+ while (count < max / 4) {
spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
SPFI_INTERRUPT_CLEAR);
status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
if (!(status & SPFI_INTERRUPT_GDEX32BIT))
break;
- buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
- count += 4;
+ buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
+ count++;
}
- return count;
+ return count * 4;
}
static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
@@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master,
time_before(jiffies, timeout)) {
unsigned int tx_count, rx_count;
- switch (xfer->bits_per_word) {
- case 32:
+ if (tx_bytes >= 4)
tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
- rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
- break;
- case 8:
- default:
+ else
tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
+
+ if (rx_bytes >= 4)
+ rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
+ else
rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
- break;
- }
tx_buf += tx_count;
rx_buf += rx_count;
@@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master,
if (xfer->rx_buf) {
rxconf.direction = DMA_DEV_TO_MEM;
- switch (xfer->bits_per_word) {
- case 32:
+ if (xfer->len % 4 == 0) {
rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
rxconf.src_addr_width = 4;
rxconf.src_maxburst = 4;
- break;
- case 8:
- default:
+ } else {
rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
rxconf.src_addr_width = 1;
rxconf.src_maxburst = 4;
@@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master,
if (xfer->tx_buf) {
txconf.direction = DMA_MEM_TO_DEV;
- switch (xfer->bits_per_word) {
- case 32:
+ if (xfer->len % 4 == 0) {
txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
txconf.dst_addr_width = 4;
txconf.dst_maxburst = 4;
- break;
- case 8:
- default:
+ } else {
txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
txconf.dst_addr_width = 1;
txconf.dst_maxburst = 4;
- break;
}
dmaengine_slave_config(spfi->tx_ch, &txconf);
@@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable)
static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer)
{
- if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE)
- return true;
- if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
+ if (xfer->len > SPFI_32BIT_FIFO_SIZE)
return true;
return false;
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 961b97d43b43..6fea4af51c41 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -89,7 +89,6 @@ struct spi_imx_data {
struct completion xfer_done;
void __iomem *base;
- int irq;
struct clk *clk_per;
struct clk *clk_ipg;
unsigned long spi_clk;
@@ -823,6 +822,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
struct dma_slave_config slave_config = {};
int ret;
+ /* use pio mode for i.mx6dl chip TKT238285 */
+ if (of_machine_is_compatible("fsl,imx6dl"))
+ return 0;
+
/* Prepare for TX DMA: */
master->dma_tx = dma_request_slave_channel(dev, "tx");
if (!master->dma_tx) {
@@ -892,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
{
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
int ret;
+ unsigned long timeout;
u32 dma;
int left;
struct spi_master *master = spi_imx->bitbang.master;
@@ -939,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
dma_async_issue_pending(master->dma_tx);
dma_async_issue_pending(master->dma_rx);
/* Wait SDMA to finish the data transfer.*/
- ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
+ timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
IMX_DMA_TIMEOUT);
- if (!ret) {
+ if (!timeout) {
pr_warn("%s %s: I/O Error in DMA TX\n",
dev_driver_string(&master->dev),
dev_name(&master->dev));
dmaengine_terminate_all(master->dma_tx);
} else {
- ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
- IMX_DMA_TIMEOUT);
- if (!ret) {
+ timeout = wait_for_completion_timeout(
+ &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
+ if (!timeout) {
pr_warn("%s %s: I/O Error in DMA RX\n",
dev_driver_string(&master->dev),
dev_name(&master->dev));
@@ -964,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
spi_imx->dma_finished = 1;
spi_imx->devtype_data->trigger(spi_imx);
- if (!ret)
+ if (!timeout)
ret = -ETIMEDOUT;
- else if (ret > 0)
+ else
ret = transfer->len;
return ret;
@@ -1076,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev)
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
- int i, ret, num_cs;
+ int i, ret, num_cs, irq;
if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
@@ -1143,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_master_put;
}
- spi_imx->irq = platform_get_irq(pdev, 0);
- if (spi_imx->irq < 0) {
- ret = spi_imx->irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
goto out_master_put;
}
- ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
dev_name(&pdev->dev), spi_imx);
if (ret) {
- dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
+ dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
goto out_master_put;
}
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 41c5765be746..ba72347cb99d 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 1bbac0378bf7..5468fc70dbf8 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -85,7 +85,7 @@ struct meson_spifc {
struct device *dev;
};
-static struct regmap_config spifc_regmap_config = {
+static const struct regmap_config spifc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 4045a1e580e1..5b0e9a3e83f6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
dmaengine_submit(desc);
dma_async_issue_pending(ssp->dmach);
- ret = wait_for_completion_timeout(&spi->c,
- msecs_to_jiffies(SSP_TIMEOUT));
- if (!ret) {
+ if (!wait_for_completion_timeout(&spi->c,
+ msecs_to_jiffies(SSP_TIMEOUT))) {
dev_err(ssp->dev, "DMA transfer timeout\n");
ret = -ETIMEDOUT;
dmaengine_terminate_all(ssp->dmach);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 79399ae9c84c..d890d309dff9 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -16,11 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index daf1ada5cd11..3c0844457c07 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -28,10 +28,6 @@
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3bc3cbabbbc0..4df8942058de 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 3dec9e0b99b8..861664776672 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -28,7 +28,12 @@
/* Runtime PM autosuspend timeout: PM is fairly light on this driver */
#define SPI_AUTOSUSPEND_TIMEOUT 200
-#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/
+/* Some SoCs using this driver support up to 8 chip selects.
+ * It is up to the implementer to only use the chip selects
+ * that are available.
+ */
+#define ORION_NUM_CHIPSELECTS 8
+
#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
#define ORION_SPI_IF_CTRL_REG 0x00
@@ -44,6 +49,10 @@
#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF
#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
ORION_SPI_MODE_CPHA)
+#define ORION_SPI_CS_MASK 0x1C
+#define ORION_SPI_CS_SHIFT 2
+#define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \
+ ORION_SPI_CS_MASK)
enum orion_spi_type {
ORION_SPI,
@@ -215,9 +224,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
-static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
+static void orion_spi_set_cs(struct spi_device *spi, bool enable)
{
- if (enable)
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
+ ORION_SPI_CS(spi->chip_select));
+
+ /* Chip select logic is inverted from spi_set_cs */
+ if (!enable)
orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
else
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
@@ -332,64 +350,31 @@ out:
return xfer->len - count;
}
-static int orion_spi_transfer_one_message(struct spi_master *master,
- struct spi_message *m)
+static int orion_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct orion_spi *orion_spi = spi_master_get_devdata(master);
- struct spi_device *spi = m->spi;
- struct spi_transfer *t = NULL;
- int par_override = 0;
int status = 0;
- int cs_active = 0;
-
- /* Load defaults */
- status = orion_spi_setup_transfer(spi, NULL);
+ status = orion_spi_setup_transfer(spi, t);
if (status < 0)
- goto msg_done;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (par_override || t->speed_hz || t->bits_per_word) {
- par_override = 1;
- status = orion_spi_setup_transfer(spi, t);
- if (status < 0)
- break;
- if (!t->speed_hz && !t->bits_per_word)
- par_override = 0;
- }
-
- if (!cs_active) {
- orion_spi_set_cs(orion_spi, 1);
- cs_active = 1;
- }
+ return status;
- if (t->len)
- m->actual_length += orion_spi_write_read(spi, t);
+ if (t->len)
+ orion_spi_write_read(spi, t);
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (t->cs_change) {
- orion_spi_set_cs(orion_spi, 0);
- cs_active = 0;
- }
- }
-
-msg_done:
- if (cs_active)
- orion_spi_set_cs(orion_spi, 0);
-
- m->status = status;
- spi_finalize_current_message(master);
+ return status;
+}
- return 0;
+static int orion_spi_setup(struct spi_device *spi)
+{
+ return orion_spi_setup_transfer(spi, NULL);
}
static int orion_spi_reset(struct orion_spi *orion_spi)
{
/* Verify that the CS is deasserted */
- orion_spi_set_cs(orion_spi, 0);
-
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
return 0;
}
@@ -442,9 +427,10 @@ static int orion_spi_probe(struct platform_device *pdev)
/* we support only mode 0, and no options */
master->mode_bits = SPI_CPHA | SPI_CPOL;
-
- master->transfer_one_message = orion_spi_transfer_one_message;
+ master->set_cs = orion_spi_set_cs;
+ master->transfer_one = orion_spi_transfer_one;
master->num_chipselect = ORION_NUM_CHIPSELECTS;
+ master->setup = orion_spi_setup;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->auto_runtime_pm = true;
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 62a9297e96ac..66a173939be8 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -111,23 +111,24 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
* by using ->dma_running.
*/
if (atomic_dec_and_test(&drv_data->dma_running)) {
- void __iomem *reg = drv_data->ioaddr;
-
/*
* If the other CPU is still handling the ROR interrupt we
* might not know about the error yet. So we re-check the
* ROR bit here before we clear the status register.
*/
if (!error) {
- u32 status = read_SSSR(reg) & drv_data->mask_sr;
+ u32 status = pxa2xx_spi_read(drv_data, SSSR)
+ & drv_data->mask_sr;
error = status & SSSR_ROR;
}
/* Clear status & disable interrupts */
- write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1)
+ & ~drv_data->dma_cr1);
write_SSSR_CS(drv_data, drv_data->clear_sr);
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
+ pxa2xx_spi_write(drv_data, SSTO, 0);
if (!error) {
pxa2xx_spi_unmap_dma_buffers(drv_data);
@@ -139,7 +140,9 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
msg->state = pxa2xx_spi_next_transfer(drv_data);
} else {
/* In case we got an error we disable the SSP now */
- write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0)
+ & ~SSCR0_SSE);
msg->state = ERROR_STATE;
}
@@ -247,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 status;
- status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr;
+ status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
if (status & SSSR_ROR) {
dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
index e8a26f25d5c0..2e0796a0003f 100644
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
@@ -25,6 +21,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
+#include <mach/dma.h>
#include "spi-pxa2xx.h"
#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
@@ -118,11 +115,11 @@ static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
drv_data->dma_mapped = 0;
}
-static int wait_ssp_rx_stall(void const __iomem *ioaddr)
+static int wait_ssp_rx_stall(struct driver_data *drv_data)
{
unsigned long limit = loops_per_jiffy << 1;
- while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
cpu_relax();
return limit;
@@ -141,17 +138,18 @@ static int wait_dma_channel_stop(int channel)
static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
const char *msg)
{
- void __iomem *reg = drv_data->ioaddr;
-
/* Stop and reset */
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
write_SSSR_CS(drv_data, drv_data->clear_sr);
- write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1)
+ & ~drv_data->dma_cr1);
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
+ pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
pxa2xx_spi_unmap_dma_buffers(drv_data);
@@ -163,11 +161,12 @@ static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
struct spi_message *msg = drv_data->cur_msg;
/* Clear and disable interrupts on SSP and DMA channels*/
- write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1)
+ & ~drv_data->dma_cr1);
write_SSSR_CS(drv_data, drv_data->clear_sr);
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
@@ -228,7 +227,7 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
&& (drv_data->ssp_type == PXA25x_SSP)) {
/* Wait for rx to stall */
- if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+ if (wait_ssp_rx_stall(drv_data) == 0)
dev_err(&drv_data->pdev->dev,
"dma_handler: ssp rx stall failed\n");
@@ -240,9 +239,8 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 irq_status;
- void __iomem *reg = drv_data->ioaddr;
- irq_status = read_SSSR(reg) & drv_data->mask_sr;
+ irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
if (irq_status & SSSR_ROR) {
pxa2xx_spi_dma_error_stop(drv_data,
"dma_transfer: fifo overrun");
@@ -252,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
/* Check for false positive timeout */
if ((irq_status & SSSR_TINT)
&& (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
- write_SSSR(SSSR_TINT, reg);
+ pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
return IRQ_HANDLED;
}
@@ -261,7 +259,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
/* Clear and disable timeout interrupt, do the rest in
* dma_transfer_complete */
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
+ pxa2xx_spi_write(drv_data, SSTO, 0);
/* finish this transfer, start the next */
pxa2xx_spi_dma_transfer_complete(drv_data);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 05c623cfb078..6f72ad01e041 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
@@ -45,8 +41,6 @@ MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-spi");
-#define MAX_BUSES 3
-
#define TIMOUT_DFLT 1000
/*
@@ -162,7 +156,6 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
u32 mask;
switch (drv_data->ssp_type) {
@@ -174,7 +167,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
break;
}
- return (read_SSSR(reg) & mask) == mask;
+ return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
}
static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@@ -253,9 +246,6 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
unsigned offset = 0x400;
u32 value, orig;
- if (!is_lpss_ssp(drv_data))
- return;
-
/*
* Perform auto-detection of the LPSS SSP private registers. They
* can be either at 1k or 2k offset from the base address.
@@ -304,9 +294,6 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
u32 value;
- if (!is_lpss_ssp(drv_data))
- return;
-
value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
if (enable)
value &= ~SPI_CS_CONTROL_CS_HIGH;
@@ -320,7 +307,7 @@ static void cs_assert(struct driver_data *drv_data)
struct chip_data *chip = drv_data->cur_chip;
if (drv_data->ssp_type == CE4100_SSP) {
- write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr);
+ pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
return;
}
@@ -334,7 +321,8 @@ static void cs_assert(struct driver_data *drv_data)
return;
}
- lpss_ssp_cs_control(drv_data, true);
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_cs_control(drv_data, true);
}
static void cs_deassert(struct driver_data *drv_data)
@@ -354,20 +342,18 @@ static void cs_deassert(struct driver_data *drv_data)
return;
}
- lpss_ssp_cs_control(drv_data, false);
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_cs_control(drv_data, false);
}
int pxa2xx_spi_flush(struct driver_data *drv_data)
{
unsigned long limit = loops_per_jiffy << 1;
- void __iomem *reg = drv_data->ioaddr;
-
do {
- while (read_SSSR(reg) & SSSR_RNE) {
- read_SSDR(reg);
- }
- } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
+ while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ pxa2xx_spi_read(drv_data, SSDR);
+ } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
write_SSSR_CS(drv_data, SSSR_ROR);
return limit;
@@ -375,14 +361,13 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
static int null_writer(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
u8 n_bytes = drv_data->n_bytes;
if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
- write_SSDR(0, reg);
+ pxa2xx_spi_write(drv_data, SSDR, 0);
drv_data->tx += n_bytes;
return 1;
@@ -390,12 +375,11 @@ static int null_writer(struct driver_data *drv_data)
static int null_reader(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
u8 n_bytes = drv_data->n_bytes;
- while ((read_SSSR(reg) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
- read_SSDR(reg);
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += n_bytes;
}
@@ -404,13 +388,11 @@ static int null_reader(struct driver_data *drv_data)
static int u8_writer(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
-
if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
- write_SSDR(*(u8 *)(drv_data->tx), reg);
+ pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
++drv_data->tx;
return 1;
@@ -418,11 +400,9 @@ static int u8_writer(struct driver_data *drv_data)
static int u8_reader(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
-
- while ((read_SSSR(reg) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
- *(u8 *)(drv_data->rx) = read_SSDR(reg);
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
++drv_data->rx;
}
@@ -431,13 +411,11 @@ static int u8_reader(struct driver_data *drv_data)
static int u16_writer(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
-
if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
- write_SSDR(*(u16 *)(drv_data->tx), reg);
+ pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
drv_data->tx += 2;
return 1;
@@ -445,11 +423,9 @@ static int u16_writer(struct driver_data *drv_data)
static int u16_reader(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
-
- while ((read_SSSR(reg) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
- *(u16 *)(drv_data->rx) = read_SSDR(reg);
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 2;
}
@@ -458,13 +434,11 @@ static int u16_reader(struct driver_data *drv_data)
static int u32_writer(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
-
if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
- write_SSDR(*(u32 *)(drv_data->tx), reg);
+ pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
drv_data->tx += 4;
return 1;
@@ -472,11 +446,9 @@ static int u32_writer(struct driver_data *drv_data)
static int u32_reader(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
-
- while ((read_SSSR(reg) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
- *(u32 *)(drv_data->rx) = read_SSDR(reg);
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 4;
}
@@ -546,33 +518,31 @@ static void giveback(struct driver_data *drv_data)
cs_deassert(drv_data);
}
- spi_finalize_current_message(drv_data->master);
drv_data->cur_chip = NULL;
+ spi_finalize_current_message(drv_data->master);
}
static void reset_sccr1(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
struct chip_data *chip = drv_data->cur_chip;
u32 sccr1_reg;
- sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1;
+ sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
sccr1_reg &= ~SSCR1_RFT;
sccr1_reg |= chip->threshold;
- write_SSCR1(sccr1_reg, reg);
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
}
static void int_error_stop(struct driver_data *drv_data, const char* msg)
{
- void __iomem *reg = drv_data->ioaddr;
-
/* Stop and reset SSP */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
+ pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
@@ -582,13 +552,11 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
static void int_transfer_complete(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
-
/* Stop SSP */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
+ pxa2xx_spi_write(drv_data, SSTO, 0);
/* Update total byte transferred return count actual bytes read */
drv_data->cur_msg->actual_length += drv_data->len -
@@ -607,12 +575,10 @@ static void int_transfer_complete(struct driver_data *drv_data)
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
- void __iomem *reg = drv_data->ioaddr;
+ u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
+ drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
- u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
- drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
-
- u32 irq_status = read_SSSR(reg) & irq_mask;
+ u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
if (irq_status & SSSR_ROR) {
int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
@@ -620,7 +586,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
}
if (irq_status & SSSR_TINT) {
- write_SSSR(SSSR_TINT, reg);
+ pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
if (drv_data->read(drv_data)) {
int_transfer_complete(drv_data);
return IRQ_HANDLED;
@@ -644,7 +610,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
u32 bytes_left;
u32 sccr1_reg;
- sccr1_reg = read_SSCR1(reg);
+ sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
sccr1_reg &= ~SSCR1_TIE;
/*
@@ -670,7 +636,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
}
- write_SSCR1(sccr1_reg, reg);
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
}
/* We did something */
@@ -680,7 +646,6 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static irqreturn_t ssp_int(int irq, void *dev_id)
{
struct driver_data *drv_data = dev_id;
- void __iomem *reg = drv_data->ioaddr;
u32 sccr1_reg;
u32 mask = drv_data->mask_sr;
u32 status;
@@ -700,11 +665,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
* are all set to one. That means that the device is already
* powered off.
*/
- status = read_SSSR(reg);
+ status = pxa2xx_spi_read(drv_data, SSSR);
if (status == ~0)
return IRQ_NONE;
- sccr1_reg = read_SSCR1(reg);
+ sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
/* Ignore possible writes if we don't need to write */
if (!(sccr1_reg & SSCR1_TIE))
@@ -715,10 +680,14 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
if (!drv_data->cur_msg) {
- write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
- write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0)
+ & ~SSCR0_SSE);
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1)
+ & ~drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
+ pxa2xx_spi_write(drv_data, SSTO, 0);
write_SSSR_CS(drv_data, drv_data->clear_sr);
dev_err(&drv_data->pdev->dev,
@@ -787,7 +756,6 @@ static void pump_transfers(unsigned long data)
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
struct chip_data *chip = NULL;
- void __iomem *reg = drv_data->ioaddr;
u32 clk_div = 0;
u8 bits = 0;
u32 speed = 0;
@@ -931,7 +899,7 @@ static void pump_transfers(unsigned long data)
/* Clear status and start DMA engine */
cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
- write_SSSR(drv_data->clear_sr, reg);
+ pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
pxa2xx_spi_dma_start(drv_data);
} else {
@@ -944,39 +912,43 @@ static void pump_transfers(unsigned long data)
}
if (is_lpss_ssp(drv_data)) {
- if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold)
- write_SSIRF(chip->lpss_rx_threshold, reg);
- if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold)
- write_SSITF(chip->lpss_tx_threshold, reg);
+ if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
+ != chip->lpss_rx_threshold)
+ pxa2xx_spi_write(drv_data, SSIRF,
+ chip->lpss_rx_threshold);
+ if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
+ != chip->lpss_tx_threshold)
+ pxa2xx_spi_write(drv_data, SSITF,
+ chip->lpss_tx_threshold);
}
if (is_quark_x1000_ssp(drv_data) &&
- (read_DDS_RATE(reg) != chip->dds_rate))
- write_DDS_RATE(chip->dds_rate, reg);
+ (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
+ pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
/* see if we need to reload the config registers */
- if ((read_SSCR0(reg) != cr0) ||
- (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) {
-
+ if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
+ || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
+ != (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
- write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+ pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(chip->timeout, reg);
+ pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
/* first set CR1 without interrupt and service enables */
- write_SSCR1(cr1 & change_mask, reg);
+ pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
/* restart the SSP */
- write_SSCR0(cr0, reg);
+ pxa2xx_spi_write(drv_data, SSCR0, cr0);
} else {
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(chip->timeout, reg);
+ pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
}
cs_assert(drv_data);
/* after chip select, release the data by enabling service
* requests and interrupts, without changing any mode bits */
- write_SSCR1(cr1, reg);
+ pxa2xx_spi_write(drv_data, SSCR1, cr1);
}
static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
@@ -1005,8 +977,8 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
struct driver_data *drv_data = spi_master_get_devdata(master);
/* Disable the SSP now */
- write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE,
- drv_data->ioaddr);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
return 0;
}
@@ -1289,6 +1261,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
struct driver_data *drv_data;
struct ssp_device *ssp;
int status;
+ u32 tmp;
platform_info = dev_get_platdata(dev);
if (!platform_info) {
@@ -1386,38 +1359,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->max_clk_rate = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
- write_SSCR0(0, drv_data->ioaddr);
+ pxa2xx_spi_write(drv_data, SSCR0, 0);
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
- write_SSCR1(QUARK_X1000_SSCR1_RxTresh(
- RX_THRESH_QUARK_X1000_DFLT) |
- QUARK_X1000_SSCR1_TxTresh(
- TX_THRESH_QUARK_X1000_DFLT),
- drv_data->ioaddr);
+ tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
+ | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
+ pxa2xx_spi_write(drv_data, SSCR1, tmp);
/* using the Motorola SPI protocol and use 8 bit frame */
- write_SSCR0(QUARK_X1000_SSCR0_Motorola
- | QUARK_X1000_SSCR0_DataSize(8),
- drv_data->ioaddr);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ QUARK_X1000_SSCR0_Motorola
+ | QUARK_X1000_SSCR0_DataSize(8));
break;
default:
- write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
- SSCR1_TxTresh(TX_THRESH_DFLT),
- drv_data->ioaddr);
- write_SSCR0(SSCR0_SCR(2)
- | SSCR0_Motorola
- | SSCR0_DataSize(8),
- drv_data->ioaddr);
+ tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
+ SSCR1_TxTresh(TX_THRESH_DFLT);
+ pxa2xx_spi_write(drv_data, SSCR1, tmp);
+ tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
+ pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
}
if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, drv_data->ioaddr);
+ pxa2xx_spi_write(drv_data, SSTO, 0);
if (!is_quark_x1000_ssp(drv_data))
- write_SSPSP(0, drv_data->ioaddr);
+ pxa2xx_spi_write(drv_data, SSPSP, 0);
- lpss_ssp_setup(drv_data);
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_setup(drv_data);
tasklet_init(&drv_data->pump_transfers, pump_transfers,
(unsigned long)drv_data);
@@ -1460,7 +1430,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
/* Disable the SSP at the peripheral and SOC level */
- write_SSCR0(0, drv_data->ioaddr);
+ pxa2xx_spi_write(drv_data, SSCR0, 0);
clk_disable_unprepare(ssp->clk);
/* Release DMA */
@@ -1497,7 +1467,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
status = spi_master_suspend(drv_data->master);
if (status != 0)
return status;
- write_SSCR0(0, drv_data->ioaddr);
+ pxa2xx_spi_write(drv_data, SSCR0, 0);
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(ssp->clk);
@@ -1518,7 +1488,8 @@ static int pxa2xx_spi_resume(struct device *dev)
clk_prepare_enable(ssp->clk);
/* Restore LPSS private register bits */
- lpss_ssp_setup(drv_data);
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_setup(drv_data);
/* Start the queue running */
status = spi_master_resume(drv_data->master);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 6bec59c90cd4..85a58c906869 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -115,23 +115,17 @@ struct chip_data {
void (*cs_control)(u32 command);
};
-#define DEFINE_SSP_REG(reg, off) \
-static inline u32 read_##reg(void const __iomem *p) \
-{ return __raw_readl(p + (off)); } \
-\
-static inline void write_##reg(u32 v, void __iomem *p) \
-{ __raw_writel(v, p + (off)); }
-
-DEFINE_SSP_REG(SSCR0, 0x00)
-DEFINE_SSP_REG(SSCR1, 0x04)
-DEFINE_SSP_REG(SSSR, 0x08)
-DEFINE_SSP_REG(SSITR, 0x0c)
-DEFINE_SSP_REG(SSDR, 0x10)
-DEFINE_SSP_REG(DDS_RATE, 0x28) /* DDS Clock Rate */
-DEFINE_SSP_REG(SSTO, 0x28)
-DEFINE_SSP_REG(SSPSP, 0x2c)
-DEFINE_SSP_REG(SSITF, SSITF)
-DEFINE_SSP_REG(SSIRF, SSIRF)
+static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
+ unsigned reg)
+{
+ return __raw_readl(drv_data->ioaddr + reg);
+}
+
+static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
+ unsigned reg, u32 val)
+{
+ __raw_writel(val, drv_data->ioaddr + reg);
+}
#define START_STATE ((void *)0)
#define RUNNING_STATE ((void *)1)
@@ -155,13 +149,11 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
{
- void __iomem *reg = drv_data->ioaddr;
-
if (drv_data->ssp_type == CE4100_SSP ||
drv_data->ssp_type == QUARK_X1000_SSP)
- val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
+ val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
- write_SSSR(val, reg);
+ pxa2xx_spi_write(drv_data, SSSR, val);
}
extern int pxa2xx_spi_flush(struct driver_data *drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index e7fb5a0d2e8d..ff9cdbdb6672 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -337,7 +337,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
{
struct spi_qup *controller = spi_master_get_devdata(spi->master);
- u32 config, iomode, mode;
+ u32 config, iomode, mode, control;
int ret, n_words, w_size;
if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
@@ -392,6 +392,15 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
+ control = readl_relaxed(controller->base + SPI_IO_CONTROL);
+
+ if (spi->mode & SPI_CPOL)
+ control |= SPI_IO_C_CLK_IDLE_HIGH;
+ else
+ control &= ~SPI_IO_C_CLK_IDLE_HIGH;
+
+ writel_relaxed(control, controller->base + SPI_IO_CONTROL);
+
config = readl_relaxed(controller->base + SPI_CONFIG);
if (spi->mode & SPI_LOOP)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index daabbabd26b0..1a777dc261d6 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -437,6 +437,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
rs->state &= ~TXBUSY;
spin_unlock_irqrestore(&rs->lock, flags);
+ rxdesc = NULL;
if (rs->rx) {
rxconf.direction = rs->dma_rx.direction;
rxconf.src_addr = rs->dma_rx.addr;
@@ -453,6 +454,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
rxdesc->callback_param = rs;
}
+ txdesc = NULL;
if (rs->tx) {
txconf.direction = rs->dma_tx.direction;
txconf.dst_addr = rs->dma_tx.addr;
@@ -470,7 +472,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
}
/* rx must be started before tx due to spi instinct */
- if (rs->rx) {
+ if (rxdesc) {
spin_lock_irqsave(&rs->lock, flags);
rs->state |= RXBUSY;
spin_unlock_irqrestore(&rs->lock, flags);
@@ -478,7 +480,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
dma_async_issue_pending(rs->dma_rx.ch);
}
- if (rs->tx) {
+ if (txdesc) {
spin_lock_irqsave(&rs->lock, flags);
rs->state |= TXBUSY;
spin_unlock_irqrestore(&rs->lock, flags);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2071f788c6fb..46ce47076e63 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 37b19836f5cb..9231c34b5a5c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 237f2e7a7179..5a56acf8a43e 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index fc29233d0650..20e800e70442 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -16,11 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/clk.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 96a5fc0878d8..e57eec0b2f46 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -82,7 +82,9 @@ struct sh_msiof_spi_priv {
#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
#define MDR1_FLD_SHIFT 2
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
@@ -241,42 +243,80 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
static struct {
unsigned short div;
- unsigned short scr;
-} const sh_msiof_spi_clk_table[] = {
- { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 },
- { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 },
- { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 },
- { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 },
- { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 },
- { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 },
- { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 },
- { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 },
- { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 },
- { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 },
- { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
+ unsigned short brdv;
+} const sh_msiof_spi_div_table[] = {
+ { 1, SCR_BRDV_DIV_1 },
+ { 2, SCR_BRDV_DIV_2 },
+ { 4, SCR_BRDV_DIV_4 },
+ { 8, SCR_BRDV_DIV_8 },
+ { 16, SCR_BRDV_DIV_16 },
+ { 32, SCR_BRDV_DIV_32 },
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
unsigned long parent_rate, u32 spi_hz)
{
unsigned long div = 1024;
+ u32 brps, scr;
size_t k;
if (!WARN_ON(!spi_hz || !parent_rate))
div = DIV_ROUND_UP(parent_rate, spi_hz);
- /* TODO: make more fine grained */
-
- for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) {
- if (sh_msiof_spi_clk_table[k].div >= div)
+ for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
+ brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+ if (brps <= 32) /* max of brdv is 32 */
break;
}
- k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
+ k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
- sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
+ scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
+ sh_msiof_write(p, TSCR, scr);
if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
- sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+ sh_msiof_write(p, RSCR, scr);
+}
+
+static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
+{
+ /*
+ * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl
+ * b'000 : 0
+ * b'001 : 100
+ * b'010 : 200
+ * b'011 (SYNCDL only) : 300
+ * b'101 : 50
+ * b'110 : 150
+ */
+ if (dtdl_or_syncdl % 100)
+ return dtdl_or_syncdl / 100 + 5;
+ else
+ return dtdl_or_syncdl / 100;
+}
+
+static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
+{
+ u32 val;
+
+ if (!p->info)
+ return 0;
+
+ /* check if DTDL and SYNCDL is allowed value */
+ if (p->info->dtdl > 200 || p->info->syncdl > 300) {
+ dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
+ return 0;
+ }
+
+ /* check if the sum of DTDL and SYNCDL becomes an integer value */
+ if ((p->info->dtdl + p->info->syncdl) % 100) {
+ dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
+ return 0;
+ }
+
+ val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
+ val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+
+ return val;
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
@@ -296,6 +336,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
/* These bits are reserved if RX needs TX */
@@ -501,7 +542,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- pm_runtime_put_sync(&p->pdev->dev);
+ pm_runtime_put(&p->pdev->dev);
return 0;
}
@@ -595,8 +636,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
}
/* wait for tx fifo to be emptied / rx fifo to be filled */
- ret = wait_for_completion_timeout(&p->done, HZ);
- if (!ret) {
+ if (!wait_for_completion_timeout(&p->done, HZ)) {
dev_err(&p->pdev->dev, "PIO timeout\n");
ret = -ETIMEDOUT;
goto stop_reset;
@@ -706,8 +746,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
/* wait for tx fifo to be emptied / rx fifo to be filled */
- ret = wait_for_completion_timeout(&p->done, HZ);
- if (!ret) {
+ if (!wait_for_completion_timeout(&p->done, HZ)) {
dev_err(&p->pdev->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
goto stop_reset;
@@ -957,6 +996,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
&info->tx_fifo_override);
of_property_read_u32(np, "renesas,rx-fifo-size",
&info->rx_fifo_override);
+ of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
+ of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
info->num_chipselect = num_cs;
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 1cfc906dd174..502501187c9e 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index d075191476f0..f5715c9f68b0 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -818,7 +818,6 @@ static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
static const struct of_device_id spi_sirfsoc_of_match[] = {
{ .compatible = "sirf,prima2-spi", },
- { .compatible = "sirf,marco-spi", },
{}
};
MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
new file mode 100644
index 000000000000..2faeaa7b57a8
--- /dev/null
+++ b/drivers/spi/spi-st-ssc4.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2008-2014 STMicroelectronics Limited
+ *
+ * Author: Angus Clark <Angus.Clark@st.com>
+ * Patrice Chotard <patrice.chotard@st.com>
+ * Lee Jones <lee.jones@linaro.org>
+ *
+ * SPI master mode controller driver, used in STMicroelectronics devices.
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License Version 2.0 only. See linux/COPYING for more information.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+/* SSC registers */
+#define SSC_BRG 0x000
+#define SSC_TBUF 0x004
+#define SSC_RBUF 0x008
+#define SSC_CTL 0x00C
+#define SSC_IEN 0x010
+#define SSC_I2C 0x018
+
+/* SSC Control */
+#define SSC_CTL_DATA_WIDTH_9 0x8
+#define SSC_CTL_DATA_WIDTH_MSK 0xf
+#define SSC_CTL_BM 0xf
+#define SSC_CTL_HB BIT(4)
+#define SSC_CTL_PH BIT(5)
+#define SSC_CTL_PO BIT(6)
+#define SSC_CTL_SR BIT(7)
+#define SSC_CTL_MS BIT(8)
+#define SSC_CTL_EN BIT(9)
+#define SSC_CTL_LPB BIT(10)
+#define SSC_CTL_EN_TX_FIFO BIT(11)
+#define SSC_CTL_EN_RX_FIFO BIT(12)
+#define SSC_CTL_EN_CLST_RX BIT(13)
+
+/* SSC Interrupt Enable */
+#define SSC_IEN_TEEN BIT(2)
+
+#define FIFO_SIZE 8
+
+struct spi_st {
+ /* SSC SPI Controller */
+ void __iomem *base;
+ struct clk *clk;
+ struct device *dev;
+
+ /* SSC SPI current transaction */
+ const u8 *tx_ptr;
+ u8 *rx_ptr;
+ u16 bytes_per_word;
+ unsigned int words_remaining;
+ unsigned int baud;
+ struct completion done;
+};
+
+static int spi_st_clk_enable(struct spi_st *spi_st)
+{
+ /*
+ * Current platforms use one of the core clocks for SPI and I2C.
+ * If we attempt to disable the clock, the system will hang.
+ *
+ * TODO: Remove this when platform supports power domains.
+ */
+ return 0;
+
+ return clk_prepare_enable(spi_st->clk);
+}
+
+static void spi_st_clk_disable(struct spi_st *spi_st)
+{
+ /*
+ * Current platforms use one of the core clocks for SPI and I2C.
+ * If we attempt to disable the clock, the system will hang.
+ *
+ * TODO: Remove this when platform supports power domains.
+ */
+ return;
+
+ clk_disable_unprepare(spi_st->clk);
+}
+
+/* Load the TX FIFO */
+static void ssc_write_tx_fifo(struct spi_st *spi_st)
+{
+ unsigned int count, i;
+ uint32_t word = 0;
+
+ if (spi_st->words_remaining > FIFO_SIZE)
+ count = FIFO_SIZE;
+ else
+ count = spi_st->words_remaining;
+
+ for (i = 0; i < count; i++) {
+ if (spi_st->tx_ptr) {
+ if (spi_st->bytes_per_word == 1) {
+ word = *spi_st->tx_ptr++;
+ } else {
+ word = *spi_st->tx_ptr++;
+ word = *spi_st->tx_ptr++ | (word << 8);
+ }
+ }
+ writel_relaxed(word, spi_st->base + SSC_TBUF);
+ }
+}
+
+/* Read the RX FIFO */
+static void ssc_read_rx_fifo(struct spi_st *spi_st)
+{
+ unsigned int count, i;
+ uint32_t word = 0;
+
+ if (spi_st->words_remaining > FIFO_SIZE)
+ count = FIFO_SIZE;
+ else
+ count = spi_st->words_remaining;
+
+ for (i = 0; i < count; i++) {
+ word = readl_relaxed(spi_st->base + SSC_RBUF);
+
+ if (spi_st->rx_ptr) {
+ if (spi_st->bytes_per_word == 1) {
+ *spi_st->rx_ptr++ = (uint8_t)word;
+ } else {
+ *spi_st->rx_ptr++ = (word >> 8);
+ *spi_st->rx_ptr++ = word & 0xff;
+ }
+ }
+ }
+ spi_st->words_remaining -= count;
+}
+
+static int spi_st_transfer_one(struct spi_master *master,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+ uint32_t ctl = 0;
+
+ /* Setup transfer */
+ spi_st->tx_ptr = t->tx_buf;
+ spi_st->rx_ptr = t->rx_buf;
+
+ if (spi->bits_per_word > 8) {
+ /*
+ * Anything greater than 8 bits-per-word requires 2
+ * bytes-per-word in the RX/TX buffers
+ */
+ spi_st->bytes_per_word = 2;
+ spi_st->words_remaining = t->len / 2;
+
+ } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
+ /*
+ * If transfer is even-length, and 8 bits-per-word, then
+ * implement as half-length 16 bits-per-word transfer
+ */
+ spi_st->bytes_per_word = 2;
+ spi_st->words_remaining = t->len / 2;
+
+ /* Set SSC_CTL to 16 bits-per-word */
+ ctl = readl_relaxed(spi_st->base + SSC_CTL);
+ writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
+
+ readl_relaxed(spi_st->base + SSC_RBUF);
+
+ } else {
+ spi_st->bytes_per_word = 1;
+ spi_st->words_remaining = t->len;
+ }
+
+ reinit_completion(&spi_st->done);
+
+ /* Start transfer by writing to the TX FIFO */
+ ssc_write_tx_fifo(spi_st);
+ writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
+
+ /* Wait for transfer to complete */
+ wait_for_completion(&spi_st->done);
+
+ /* Restore SSC_CTL if necessary */
+ if (ctl)
+ writel_relaxed(ctl, spi_st->base + SSC_CTL);
+
+ spi_finalize_current_transfer(spi->master);
+
+ return t->len;
+}
+
+static void spi_st_cleanup(struct spi_device *spi)
+{
+ int cs = spi->cs_gpio;
+
+ if (gpio_is_valid(cs))
+ devm_gpio_free(&spi->dev, cs);
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
+static int spi_st_setup(struct spi_device *spi)
+{
+ struct spi_st *spi_st = spi_master_get_devdata(spi->master);
+ u32 spi_st_clk, sscbrg, var;
+ u32 hz = spi->max_speed_hz;
+ int cs = spi->cs_gpio;
+ int ret;
+
+ if (!hz) {
+ dev_err(&spi->dev, "max_speed_hz unspecified\n");
+ return -EINVAL;
+ }
+
+ if (!gpio_is_valid(cs)) {
+ dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
+ return -EINVAL;
+ }
+
+ if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) {
+ dev_err(&spi->dev, "could not request gpio:%d\n", cs);
+ return -EINVAL;
+ }
+
+ ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
+ if (ret)
+ return ret;
+
+ spi_st_clk = clk_get_rate(spi_st->clk);
+
+ /* Set SSC_BRF */
+ sscbrg = spi_st_clk / (2 * hz);
+ if (sscbrg < 0x07 || sscbrg > BIT(16)) {
+ dev_err(&spi->dev,
+ "baudrate %d outside valid range %d\n", sscbrg, hz);
+ return -EINVAL;
+ }
+
+ spi_st->baud = spi_st_clk / (2 * sscbrg);
+ if (sscbrg == BIT(16)) /* 16-bit counter wraps */
+ sscbrg = 0x0;
+
+ writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
+
+ dev_dbg(&spi->dev,
+ "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
+ hz, spi_st->baud, sscbrg);
+
+ /* Set SSC_CTL and enable SSC */
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var |= SSC_CTL_MS;
+
+ if (spi->mode & SPI_CPOL)
+ var |= SSC_CTL_PO;
+ else
+ var &= ~SSC_CTL_PO;
+
+ if (spi->mode & SPI_CPHA)
+ var |= SSC_CTL_PH;
+ else
+ var &= ~SSC_CTL_PH;
+
+ if ((spi->mode & SPI_LSB_FIRST) == 0)
+ var |= SSC_CTL_HB;
+ else
+ var &= ~SSC_CTL_HB;
+
+ if (spi->mode & SPI_LOOP)
+ var |= SSC_CTL_LPB;
+ else
+ var &= ~SSC_CTL_LPB;
+
+ var &= ~SSC_CTL_DATA_WIDTH_MSK;
+ var |= (spi->bits_per_word - 1);
+
+ var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
+ var |= SSC_CTL_EN;
+
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ /* Clear the status register */
+ readl_relaxed(spi_st->base + SSC_RBUF);
+
+ return 0;
+}
+
+/* Interrupt fired when TX shift register becomes empty */
+static irqreturn_t spi_st_irq(int irq, void *dev_id)
+{
+ struct spi_st *spi_st = (struct spi_st *)dev_id;
+
+ /* Read RX FIFO */
+ ssc_read_rx_fifo(spi_st);
+
+ /* Fill TX FIFO */
+ if (spi_st->words_remaining) {
+ ssc_write_tx_fifo(spi_st);
+ } else {
+ /* TX/RX complete */
+ writel_relaxed(0x0, spi_st->base + SSC_IEN);
+ /*
+ * read SSC_IEN to ensure that this bit is set
+ * before re-enabling interrupt
+ */
+ readl(spi_st->base + SSC_IEN);
+ complete(&spi_st->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int spi_st_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct resource *res;
+ struct spi_st *spi_st;
+ int irq, ret = 0;
+ u32 var;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
+ if (!master)
+ return -ENOMEM;
+
+ master->dev.of_node = np;
+ master->mode_bits = MODEBITS;
+ master->setup = spi_st_setup;
+ master->cleanup = spi_st_cleanup;
+ master->transfer_one = spi_st_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ spi_st = spi_master_get_devdata(master);
+
+ spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
+ if (IS_ERR(spi_st->clk)) {
+ dev_err(&pdev->dev, "Unable to request clock\n");
+ return PTR_ERR(spi_st->clk);
+ }
+
+ ret = spi_st_clk_enable(spi_st);
+ if (ret)
+ return ret;
+
+ init_completion(&spi_st->done);
+
+ /* Get resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi_st->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi_st->base)) {
+ ret = PTR_ERR(spi_st->base);
+ goto clk_disable;
+ }
+
+ /* Disable I2C and Reset SSC */
+ writel_relaxed(0x0, spi_st->base + SSC_I2C);
+ var = readw_relaxed(spi_st->base + SSC_CTL);
+ var |= SSC_CTL_SR;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ udelay(1);
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var &= ~SSC_CTL_SR;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ /* Set SSC into slave mode before reconfiguring PIO pins */
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var &= ~SSC_CTL_MS;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "IRQ missing or invalid\n");
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
+ pdev->name, spi_st);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
+ goto clk_disable;
+ }
+
+ /* by default the device is on */
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ platform_set_drvdata(pdev, master);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto clk_disable;
+ }
+
+ return 0;
+
+clk_disable:
+ spi_st_clk_disable(spi_st);
+
+ return ret;
+}
+
+static int spi_st_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+
+ spi_st_clk_disable(spi_st);
+
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_st_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+
+ writel_relaxed(0, spi_st->base + SSC_IEN);
+ pinctrl_pm_select_sleep_state(dev);
+
+ spi_st_clk_disable(spi_st);
+
+ return 0;
+}
+
+static int spi_st_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_st_clk_enable(spi_st);
+ pinctrl_pm_select_default_state(dev);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_st_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int spi_st_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_resume(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops spi_st_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
+ SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
+};
+
+static struct of_device_id stm_spi_match[] = {
+ { .compatible = "st,comms-ssc4-spi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm_spi_match);
+
+static struct platform_driver spi_st_driver = {
+ .driver = {
+ .name = "spi-st",
+ .pm = &spi_st_pm,
+ .of_match_table = of_match_ptr(stm_spi_match),
+ },
+ .probe = spi_st_probe,
+ .remove = spi_st_remove,
+};
+module_platform_driver(spi_st_driver);
+
+MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
+MODULE_DESCRIPTION("STM SSC SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 6146c4cd6583..884a716e50cb 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -201,7 +201,7 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
- int wlen, count, ret;
+ int wlen, count;
unsigned int cmd;
const u8 *txbuf;
@@ -230,9 +230,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
}
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT);
- if (ret == 0) {
+ if (!wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT)) {
dev_err(qspi->dev, "write timed out\n");
return -ETIMEDOUT;
}
@@ -245,7 +244,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
- int wlen, count, ret;
+ int wlen, count;
unsigned int cmd;
u8 *rxbuf;
@@ -268,9 +267,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT);
- if (ret == 0) {
+ if (!wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT)) {
dev_err(qspi->dev, "read timed out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index be692ad50442..93dfcee0f987 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/delay.h>
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 79bd84f43430..133f53a9c1d4 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -22,6 +22,8 @@
#include <linux/spi/xilinx_spi.h>
#include <linux/io.h>
+#define XILINX_SPI_MAX_CS 32
+
#define XILINX_SPI_NAME "xilinx_spi"
/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
@@ -34,7 +36,8 @@
#define XSPI_CR_MASTER_MODE 0x04
#define XSPI_CR_CPOL 0x08
#define XSPI_CR_CPHA 0x10
-#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL)
+#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \
+ XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
#define XSPI_CR_TXFIFO_RESET 0x20
#define XSPI_CR_RXFIFO_RESET 0x40
#define XSPI_CR_MANUAL_SSELECT 0x80
@@ -85,12 +88,11 @@ struct xilinx_spi {
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
- int remaining_bytes; /* the number of bytes left to transfer */
- u8 bits_per_word;
+ u8 bytes_per_word;
+ int buffer_size; /* buffer size in words */
+ u32 cs_inactive; /* Level of the CS pins when inactive*/
unsigned int (*read_fn)(void __iomem *);
void (*write_fn)(u32, void __iomem *);
- void (*tx_fn)(struct xilinx_spi *);
- void (*rx_fn)(struct xilinx_spi *);
};
static void xspi_write32(u32 val, void __iomem *addr)
@@ -113,49 +115,51 @@ static unsigned int xspi_read32_be(void __iomem *addr)
return ioread32be(addr);
}
-static void xspi_tx8(struct xilinx_spi *xspi)
+static void xilinx_spi_tx(struct xilinx_spi *xspi)
{
- xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
- xspi->tx_ptr++;
-}
-
-static void xspi_tx16(struct xilinx_spi *xspi)
-{
- xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
- xspi->tx_ptr += 2;
-}
+ u32 data = 0;
-static void xspi_tx32(struct xilinx_spi *xspi)
-{
- xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
- xspi->tx_ptr += 4;
-}
-
-static void xspi_rx8(struct xilinx_spi *xspi)
-{
- u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
- if (xspi->rx_ptr) {
- *xspi->rx_ptr = data & 0xff;
- xspi->rx_ptr++;
+ if (!xspi->tx_ptr) {
+ xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+ return;
}
-}
-static void xspi_rx16(struct xilinx_spi *xspi)
-{
- u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
- if (xspi->rx_ptr) {
- *(u16 *)(xspi->rx_ptr) = data & 0xffff;
- xspi->rx_ptr += 2;
+ switch (xspi->bytes_per_word) {
+ case 1:
+ data = *(u8 *)(xspi->tx_ptr);
+ break;
+ case 2:
+ data = *(u16 *)(xspi->tx_ptr);
+ break;
+ case 4:
+ data = *(u32 *)(xspi->tx_ptr);
+ break;
}
+
+ xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr += xspi->bytes_per_word;
}
-static void xspi_rx32(struct xilinx_spi *xspi)
+static void xilinx_spi_rx(struct xilinx_spi *xspi)
{
u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
- if (xspi->rx_ptr) {
+
+ if (!xspi->rx_ptr)
+ return;
+
+ switch (xspi->bytes_per_word) {
+ case 1:
+ *(u8 *)(xspi->rx_ptr) = data;
+ break;
+ case 2:
+ *(u16 *)(xspi->rx_ptr) = data;
+ break;
+ case 4:
*(u32 *)(xspi->rx_ptr) = data;
- xspi->rx_ptr += 4;
+ break;
}
+
+ xspi->rx_ptr += xspi->bytes_per_word;
}
static void xspi_init_hw(struct xilinx_spi *xspi)
@@ -165,46 +169,56 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
/* Reset the SPI device */
xspi->write_fn(XIPIF_V123B_RESET_MASK,
regs_base + XIPIF_V123B_RESETR_OFFSET);
- /* Disable all the interrupts just in case */
- xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
- /* Enable the global IPIF interrupt */
- xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
- regs_base + XIPIF_V123B_DGIER_OFFSET);
+ /* Enable the transmit empty interrupt, which we use to determine
+ * progress on the transmission.
+ */
+ xspi->write_fn(XSPI_INTR_TX_EMPTY,
+ regs_base + XIPIF_V123B_IIER_OFFSET);
+ /* Disable the global IPIF interrupt */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
/* Deselect the slave on the SPI bus */
xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
/* Disable the transmitter, enable Manual Slave Select Assertion,
* put SPI controller into master mode, and enable it */
- xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT |
- XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET |
- XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET);
+ xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
+ XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
+ regs_base + XSPI_CR_OFFSET);
}
static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ u16 cr;
+ u32 cs;
if (is_on == BITBANG_CS_INACTIVE) {
/* Deselect the slave on the SPI bus */
- xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET);
- } else if (is_on == BITBANG_CS_ACTIVE) {
- /* Set the SPI clock phase and polarity */
- u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
- & ~XSPI_CR_MODE_MASK;
- if (spi->mode & SPI_CPHA)
- cr |= XSPI_CR_CPHA;
- if (spi->mode & SPI_CPOL)
- cr |= XSPI_CR_CPOL;
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
-
- /* We do not check spi->max_speed_hz here as the SPI clock
- * frequency is not software programmable (the IP block design
- * parameter)
- */
-
- /* Activate the chip select */
- xspi->write_fn(~(0x0001 << spi->chip_select),
- xspi->regs + XSPI_SSR_OFFSET);
+ xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
+ return;
}
+
+ /* Set the SPI clock phase and polarity */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
+ if (spi->mode & SPI_CPHA)
+ cr |= XSPI_CR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ cr |= XSPI_CR_CPOL;
+ if (spi->mode & SPI_LSB_FIRST)
+ cr |= XSPI_CR_LSB_FIRST;
+ if (spi->mode & SPI_LOOP)
+ cr |= XSPI_CR_LOOP;
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+ /* We do not check spi->max_speed_hz here as the SPI clock
+ * frequency is not software programmable (the IP block design
+ * parameter)
+ */
+
+ cs = xspi->cs_inactive;
+ cs ^= BIT(spi->chip_select);
+
+ /* Activate the chip select */
+ xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
}
/* spi_bitbang requires custom setup_transfer() to be defined if there is a
@@ -213,85 +227,85 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- return 0;
-}
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
-static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
-{
- u8 sr;
+ if (spi->mode & SPI_CS_HIGH)
+ xspi->cs_inactive &= ~BIT(spi->chip_select);
+ else
+ xspi->cs_inactive |= BIT(spi->chip_select);
- /* Fill the Tx FIFO with as many bytes as possible */
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
- if (xspi->tx_ptr)
- xspi->tx_fn(xspi);
- else
- xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
- xspi->remaining_bytes -= xspi->bits_per_word / 8;
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- }
+ return 0;
}
static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- u32 ipif_ier;
+ int remaining_words; /* the number of words left to transfer */
+ bool use_irq = false;
+ u16 cr = 0;
/* We get here with transmitter inhibited */
xspi->tx_ptr = t->tx_buf;
xspi->rx_ptr = t->rx_buf;
- xspi->remaining_bytes = t->len;
+ remaining_words = t->len / xspi->bytes_per_word;
reinit_completion(&xspi->done);
+ if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
+ use_irq = true;
+ xspi->write_fn(XSPI_INTR_TX_EMPTY,
+ xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ /* Enable the global IPIF interrupt */
+ xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ /* Inhibit irq to avoid spurious irqs on tx_empty*/
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
+ }
- /* Enable the transmit empty interrupt, which we use to determine
- * progress on the transmission.
- */
- ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
- xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
- xspi->regs + XIPIF_V123B_IIER_OFFSET);
+ while (remaining_words) {
+ int n_words, tx_words, rx_words;
- for (;;) {
- u16 cr;
- u8 sr;
+ n_words = min(remaining_words, xspi->buffer_size);
- xilinx_spi_fill_tx_fifo(xspi);
+ tx_words = n_words;
+ while (tx_words--)
+ xilinx_spi_tx(xspi);
/* Start the transfer by not inhibiting the transmitter any
* longer
*/
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
- ~XSPI_CR_TRANS_INHIBIT;
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
- wait_for_completion(&xspi->done);
+ if (use_irq) {
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ wait_for_completion(&xspi->done);
+ } else
+ while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
+ XSPI_SR_TX_EMPTY_MASK))
+ ;
/* A transmit has just completed. Process received data and
* check for more data to transmit. Always inhibit the
* transmitter while the Isr refills the transmit register/FIFO,
* or make sure it is stopped if we're done.
*/
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ if (use_irq)
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
xspi->regs + XSPI_CR_OFFSET);
/* Read out all the data from the Rx FIFO */
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
- xspi->rx_fn(xspi);
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- }
-
- /* See if there is more data to send */
- if (xspi->remaining_bytes <= 0)
- break;
+ rx_words = n_words;
+ while (rx_words--)
+ xilinx_spi_rx(xspi);
+
+ remaining_words -= n_words;
}
- /* Disable the transmit empty interrupt */
- xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
+ if (use_irq)
+ xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
- return t->len - xspi->remaining_bytes;
+ return t->len;
}
@@ -316,6 +330,28 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
+{
+ u8 sr;
+ int n_words = 0;
+
+ /*
+ * Before the buffer_size detection we reset the core
+ * to make sure we start with a clean state.
+ */
+ xspi->write_fn(XIPIF_V123B_RESET_MASK,
+ xspi->regs + XIPIF_V123B_RESETR_OFFSET);
+
+ /* Fill the Tx FIFO with as many words as possible */
+ do {
+ xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ n_words++;
+ } while (!(sr & XSPI_SR_TX_FULL_MASK));
+
+ return n_words;
+}
+
static const struct of_device_id xilinx_spi_of_match[] = {
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
@@ -348,14 +384,21 @@ static int xilinx_spi_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (num_cs > XILINX_SPI_MAX_CS) {
+ dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ return -EINVAL;
+ }
+
master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
return -ENODEV;
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
+ SPI_CS_HIGH;
xspi = spi_master_get_devdata(master);
+ xspi->cs_inactive = 0xffffffff;
xspi->bitbang.master = master;
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
@@ -392,35 +435,20 @@ static int xilinx_spi_probe(struct platform_device *pdev)
}
master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
- xspi->bits_per_word = bits_per_word;
- if (xspi->bits_per_word == 8) {
- xspi->tx_fn = xspi_tx8;
- xspi->rx_fn = xspi_rx8;
- } else if (xspi->bits_per_word == 16) {
- xspi->tx_fn = xspi_tx16;
- xspi->rx_fn = xspi_rx16;
- } else if (xspi->bits_per_word == 32) {
- xspi->tx_fn = xspi_tx32;
- xspi->rx_fn = xspi_rx32;
- } else {
- ret = -EINVAL;
- goto put_master;
- }
-
- /* SPI controller initializations */
- xspi_init_hw(xspi);
+ xspi->bytes_per_word = bits_per_word / 8;
+ xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
xspi->irq = platform_get_irq(pdev, 0);
- if (xspi->irq < 0) {
- ret = xspi->irq;
- goto put_master;
+ if (xspi->irq >= 0) {
+ /* Register for SPI Interrupt */
+ ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
+ dev_name(&pdev->dev), xspi);
+ if (ret)
+ goto put_master;
}
- /* Register for SPI Interrupt */
- ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
- dev_name(&pdev->dev), xspi);
- if (ret)
- goto put_master;
+ /* SPI controller initializations */
+ xspi_init_hw(xspi);
ret = spi_bitbang_start(&xspi->bitbang);
if (ret) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 66a70e9bc743..c64a3e59fce3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
@@ -788,7 +784,7 @@ static int spi_transfer_one_message(struct spi_master *master,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- int ms = 1;
+ unsigned long ms = 1;
spi_set_cs(msg->spi, true);
@@ -875,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master)
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
/**
- * spi_pump_messages - kthread work function which processes spi message queue
- * @work: pointer to kthread work struct contained in the master struct
+ * __spi_pump_messages - function which processes spi message queue
+ * @master: master to process queue for
+ * @in_kthread: true if we are in the context of the message pump thread
*
* This function checks if there is any spi message in the queue that
* needs processing and if so call out to the driver to initialize hardware
* and transfer each message.
*
+ * Note that it is called both from the kthread itself and also from
+ * inside spi_sync(); the queue extraction handling at the top of the
+ * function should deal with this safely.
*/
-static void spi_pump_messages(struct kthread_work *work)
+static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
{
- struct spi_master *master =
- container_of(work, struct spi_master, pump_messages);
unsigned long flags;
bool was_busy = false;
int ret;
- /* Lock queue and check for queue work */
+ /* Lock queue */
spin_lock_irqsave(&master->queue_lock, flags);
+
+ /* Make sure we are not already running a message */
+ if (master->cur_msg) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ /* If another context is idling the device then defer */
+ if (master->idling) {
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ /* Check if the queue is idle */
if (list_empty(&master->queue) || !master->running) {
if (!master->busy) {
spin_unlock_irqrestore(&master->queue_lock, flags);
return;
}
+
+ /* Only do teardown in the thread */
+ if (!in_kthread) {
+ queue_kthread_work(&master->kworker,
+ &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
master->busy = false;
+ master->idling = true;
spin_unlock_irqrestore(&master->queue_lock, flags);
+
kfree(master->dummy_rx);
master->dummy_rx = NULL;
kfree(master->dummy_tx);
@@ -913,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work)
pm_runtime_put_autosuspend(master->dev.parent);
}
trace_spi_master_idle(master);
- return;
- }
- /* Make sure we are not already running a message */
- if (master->cur_msg) {
+ spin_lock_irqsave(&master->queue_lock, flags);
+ master->idling = false;
spin_unlock_irqrestore(&master->queue_lock, flags);
return;
}
+
/* Extract head of queue */
master->cur_msg =
list_first_entry(&master->queue, struct spi_message, queue);
@@ -985,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work)
}
}
+/**
+ * spi_pump_messages - kthread work function which processes spi message queue
+ * @work: pointer to kthread work struct contained in the master struct
+ */
+static void spi_pump_messages(struct kthread_work *work)
+{
+ struct spi_master *master =
+ container_of(work, struct spi_master, pump_messages);
+
+ __spi_pump_messages(master, true);
+}
+
static int spi_init_queue(struct spi_master *master)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
- INIT_LIST_HEAD(&master->queue);
- spin_lock_init(&master->queue_lock);
-
master->running = false;
master->busy = false;
@@ -1161,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master)
return 0;
}
-/**
- * spi_queued_transfer - transfer function for queued transfers
- * @spi: spi device which is requesting transfer
- * @msg: spi message which is to handled is queued to driver queue
- */
-static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+static int __spi_queued_transfer(struct spi_device *spi,
+ struct spi_message *msg,
+ bool need_pump)
{
struct spi_master *master = spi->master;
unsigned long flags;
@@ -1181,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &master->queue);
- if (!master->busy)
+ if (!master->busy && need_pump)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return 0;
}
+/**
+ * spi_queued_transfer - transfer function for queued transfers
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ */
+static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ return __spi_queued_transfer(spi, msg, true);
+}
+
static int spi_master_initialize_queue(struct spi_master *master)
{
int ret;
@@ -1609,6 +1648,8 @@ int spi_register_master(struct spi_master *master)
dynamic = 1;
}
+ INIT_LIST_HEAD(&master->queue);
+ spin_lock_init(&master->queue_lock);
spin_lock_init(&master->bus_lock_spinlock);
mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0;
@@ -2114,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
DECLARE_COMPLETION_ONSTACK(done);
int status;
struct spi_master *master = spi->master;
+ unsigned long flags;
+
+ status = __spi_validate(spi, message);
+ if (status != 0)
+ return status;
message->complete = spi_complete;
message->context = &done;
+ message->spi = spi;
if (!bus_locked)
mutex_lock(&master->bus_lock_mutex);
- status = spi_async_locked(spi, message);
+ /* If we're not using the legacy transfer method then we will
+ * try to transfer in the calling context so special case.
+ * This code would be less tricky if we could remove the
+ * support for driver implemented message queues.
+ */
+ if (master->transfer == spi_queued_transfer) {
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+ trace_spi_message_submit(message);
+
+ status = __spi_queued_transfer(spi, message, false);
+
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+ } else {
+ status = spi_async_locked(spi, message);
+ }
if (!bus_locked)
mutex_unlock(&master->bus_lock_mutex);
if (status == 0) {
+ /* Push out the messages in the calling context if we
+ * can.
+ */
+ if (master->transfer == spi_queued_transfer)
+ __spi_pump_messages(master, false);
+
wait_for_completion(&done);
status = message->status;
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 6941e04afb8c..4eb7a980e670 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
@@ -317,6 +313,37 @@ done:
return status;
}
+static struct spi_ioc_transfer *
+spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+ unsigned *n_ioc)
+{
+ struct spi_ioc_transfer *ioc;
+ u32 tmp;
+
+ /* Check type, command number and direction */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
+ || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
+ || _IOC_DIR(cmd) != _IOC_WRITE)
+ return ERR_PTR(-ENOTTY);
+
+ tmp = _IOC_SIZE(cmd);
+ if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+ return ERR_PTR(-EINVAL);
+ *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+ if (*n_ioc == 0)
+ return NULL;
+
+ /* copy into scratch area */
+ ioc = kmalloc(tmp, GFP_KERNEL);
+ if (!ioc)
+ return ERR_PTR(-ENOMEM);
+ if (__copy_from_user(ioc, u_ioc, tmp)) {
+ kfree(ioc);
+ return ERR_PTR(-EFAULT);
+ }
+ return ioc;
+}
+
static long
spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
@@ -456,32 +483,15 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
default:
/* segmented and/or full-duplex I/O request */
- if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
- || _IOC_DIR(cmd) != _IOC_WRITE) {
- retval = -ENOTTY;
- break;
- }
-
- tmp = _IOC_SIZE(cmd);
- if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
- retval = -EINVAL;
- break;
- }
- n_ioc = tmp / sizeof(struct spi_ioc_transfer);
- if (n_ioc == 0)
- break;
-
- /* copy into scratch area */
- ioc = kmalloc(tmp, GFP_KERNEL);
- if (!ioc) {
- retval = -ENOMEM;
- break;
- }
- if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
- kfree(ioc);
- retval = -EFAULT;
+ /* Check message and copy into scratch area */
+ ioc = spidev_get_ioc_message(cmd,
+ (struct spi_ioc_transfer __user *)arg, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
break;
}
+ if (!ioc)
+ break; /* n_ioc is also 0 */
/* translate to spi_message, execute */
retval = spidev_message(spidev, ioc, n_ioc);
@@ -496,8 +506,67 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_COMPAT
static long
+spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct spi_ioc_transfer __user *u_ioc;
+ int retval = 0;
+ struct spidev_data *spidev;
+ struct spi_device *spi;
+ unsigned n_ioc, n;
+ struct spi_ioc_transfer *ioc;
+
+ u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
+ if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ spidev = filp->private_data;
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+ mutex_lock(&spidev->buf_lock);
+
+ /* Check message and copy into scratch area */
+ ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ goto done;
+ }
+ if (!ioc)
+ goto done; /* n_ioc is also 0 */
+
+ /* Convert buffer pointers */
+ for (n = 0; n < n_ioc; n++) {
+ ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
+ ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
+ }
+
+ /* translate to spi_message, execute */
+ retval = spidev_message(spidev, ioc, n_ioc);
+ kfree(ioc);
+
+done:
+ mutex_unlock(&spidev->buf_lock);
+ spi_dev_put(spi);
+ return retval;
+}
+
+static long
spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
+ && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
+ && _IOC_DIR(cmd) == _IOC_WRITE)
+ return spidev_compat_ioc_message(filp, cmd, arg);
+
return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
}
#else
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 2fead3820849..1e180c400f17 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -90,25 +90,6 @@ found:
}
#endif /* CONFIG_SSB_PCMCIAHOST */
-#ifdef CONFIG_SSB_SDIOHOST
-struct ssb_bus *ssb_sdio_func_to_bus(struct sdio_func *func)
-{
- struct ssb_bus *bus;
-
- ssb_buses_lock();
- list_for_each_entry(bus, &buses, list) {
- if (bus->bustype == SSB_BUSTYPE_SDIO &&
- bus->host_sdio == func)
- goto found;
- }
- bus = NULL;
-found:
- ssb_buses_unlock();
-
- return bus;
-}
-#endif /* CONFIG_SSB_SDIOHOST */
-
int ssb_for_each_bus_call(unsigned long data,
int (*func)(struct ssb_bus *bus, unsigned long data))
{
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 815de379a130..45baa83be7ce 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -46,8 +46,6 @@ source "drivers/staging/rtl8723au/Kconfig"
source "drivers/staging/rts5208/Kconfig"
-source "drivers/staging/line6/Kconfig"
-
source "drivers/staging/octeon/Kconfig"
source "drivers/staging/octeon-usb/Kconfig"
@@ -58,6 +56,8 @@ source "drivers/staging/vt6656/Kconfig"
source "drivers/staging/iio/Kconfig"
+source "drivers/staging/sm7xxfb/Kconfig"
+
source "drivers/staging/xgifb/Kconfig"
source "drivers/staging/emxx_udc/Kconfig"
@@ -66,8 +66,6 @@ source "drivers/staging/ft1000/Kconfig"
source "drivers/staging/speakup/Kconfig"
-source "drivers/staging/cptm1217/Kconfig"
-
source "drivers/staging/ste_rmi4/Kconfig"
source "drivers/staging/nvec/Kconfig"
@@ -106,4 +104,8 @@ source "drivers/staging/unisys/Kconfig"
source "drivers/staging/clocking-wizard/Kconfig"
+source "drivers/staging/fbtft/Kconfig"
+
+source "drivers/staging/i2o/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 33c640b49566..29160790841f 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_R8723AU) += rtl8723au/
obj-$(CONFIG_RTS5208) += rts5208/
-obj-$(CONFIG_LINE6_USB) += line6/
obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_OCTEON_USB) += octeon-usb/
@@ -23,11 +22,11 @@ obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IIO) += iio/
+obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_FT1000) += ft1000/
obj-$(CONFIG_SPEAKUP) += speakup/
-obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_ANDROID) += android/
@@ -45,3 +44,5 @@ obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_CRYPTO_SKEIN) += skein/
obj-$(CONFIG_UNISYSSPAR) += unisys/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
+obj-$(CONFIG_FB_TFT) += fbtft/
+obj-$(CONFIG_I2O) += i2o/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 7e012f37792b..8feb9048e62f 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -14,23 +14,6 @@ config ASHMEM
It is, in theory, a good memory allocator for low-memory devices,
because it can discard shared memory units when under memory pressure.
-config ANDROID_LOGGER
- tristate "Android log driver"
- default n
- ---help---
- This adds support for system-wide logging using four log buffers.
-
- These are:
-
- 1: main
- 2: events
- 3: radio
- 4: system
-
- Log reading and writing is performed via normal Linux reads and
- optimized writes. This optimization avoids logging having too
- much overhead in the system.
-
config ANDROID_TIMED_OUTPUT
bool "Timed output class driver"
default y
@@ -45,15 +28,6 @@ config ANDROID_LOW_MEMORY_KILLER
---help---
Registers processes to be killed when memory is low
-config ANDROID_INTF_ALARM_DEV
- tristate "Android alarm driver"
- depends on RTC_CLASS
- default n
- ---help---
- Provides non-wakeup and rtc backed wakeup alarms based on rtc or
- elapsed realtime, and a non-wakeup alarm on the monotonic clock.
- Also exports the alarm interface to user-space.
-
config SYNC
bool "Synchronization framework"
default n
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 479b2b86f8c8..c7b6c99cc5ce 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -3,10 +3,8 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
-obj-$(CONFIG_ANDROID_LOGGER) += logger.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
-obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
obj-$(CONFIG_SYNC) += sync.o sync_debug.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
deleted file mode 100644
index ff4b3e8758a7..000000000000
--- a/drivers/staging/android/alarm-dev.c
+++ /dev/null
@@ -1,446 +0,0 @@
-/* drivers/rtc/alarm-dev.c
- *
- * Copyright (C) 2007-2009 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/time.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
-#include <linux/alarmtimer.h>
-#include "android_alarm.h"
-
-#define ANDROID_ALARM_PRINT_INFO (1U << 0)
-#define ANDROID_ALARM_PRINT_IO (1U << 1)
-#define ANDROID_ALARM_PRINT_INT (1U << 2)
-
-static int debug_mask = ANDROID_ALARM_PRINT_INFO;
-module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-#define alarm_dbg(debug_level_mask, fmt, ...) \
-do { \
- if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) \
- pr_info(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define ANDROID_ALARM_WAKEUP_MASK ( \
- ANDROID_ALARM_RTC_WAKEUP_MASK | \
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
-
-static int alarm_opened;
-static DEFINE_SPINLOCK(alarm_slock);
-static struct wakeup_source alarm_wake_lock;
-static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
-static uint32_t alarm_pending;
-static uint32_t alarm_enabled;
-static uint32_t wait_pending;
-
-struct devalarm {
- union {
- struct hrtimer hrt;
- struct alarm alrm;
- } u;
- enum android_alarm_type type;
-};
-
-static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT];
-
-/**
- * is_wakeup() - Checks to see if this alarm can wake the device
- * @type: The type of alarm being checked
- *
- * Return: 1 if this is a wakeup alarm, otherwise 0
- */
-static int is_wakeup(enum android_alarm_type type)
-{
- return type == ANDROID_ALARM_RTC_WAKEUP ||
- type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP;
-}
-
-static void devalarm_start(struct devalarm *alrm, ktime_t exp)
-{
- if (is_wakeup(alrm->type))
- alarm_start(&alrm->u.alrm, exp);
- else
- hrtimer_start(&alrm->u.hrt, exp, HRTIMER_MODE_ABS);
-}
-
-static int devalarm_try_to_cancel(struct devalarm *alrm)
-{
- if (is_wakeup(alrm->type))
- return alarm_try_to_cancel(&alrm->u.alrm);
- return hrtimer_try_to_cancel(&alrm->u.hrt);
-}
-
-static void devalarm_cancel(struct devalarm *alrm)
-{
- if (is_wakeup(alrm->type))
- alarm_cancel(&alrm->u.alrm);
- else
- hrtimer_cancel(&alrm->u.hrt);
-}
-
-static void alarm_clear(enum android_alarm_type alarm_type)
-{
- uint32_t alarm_type_mask = 1U << alarm_type;
- unsigned long flags;
-
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_dbg(IO, "alarm %d clear\n", alarm_type);
- devalarm_try_to_cancel(&alarms[alarm_type]);
- if (alarm_pending) {
- alarm_pending &= ~alarm_type_mask;
- if (!alarm_pending && !wait_pending)
- __pm_relax(&alarm_wake_lock);
- }
- alarm_enabled &= ~alarm_type_mask;
- spin_unlock_irqrestore(&alarm_slock, flags);
-}
-
-static void alarm_set(enum android_alarm_type alarm_type,
- struct timespec *ts)
-{
- uint32_t alarm_type_mask = 1U << alarm_type;
- unsigned long flags;
-
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_dbg(IO, "alarm %d set %ld.%09ld\n",
- alarm_type, ts->tv_sec, ts->tv_nsec);
- alarm_enabled |= alarm_type_mask;
- devalarm_start(&alarms[alarm_type], timespec_to_ktime(*ts));
- spin_unlock_irqrestore(&alarm_slock, flags);
-}
-
-static int alarm_wait(void)
-{
- unsigned long flags;
- int rv = 0;
-
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_dbg(IO, "alarm wait\n");
- if (!alarm_pending && wait_pending) {
- __pm_relax(&alarm_wake_lock);
- wait_pending = 0;
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
-
- rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
- if (rv)
- return rv;
-
- spin_lock_irqsave(&alarm_slock, flags);
- rv = alarm_pending;
- wait_pending = 1;
- alarm_pending = 0;
- spin_unlock_irqrestore(&alarm_slock, flags);
-
- return rv;
-}
-
-static int alarm_set_rtc(struct timespec *ts)
-{
- struct rtc_time new_rtc_tm;
- struct rtc_device *rtc_dev;
- unsigned long flags;
- int rv = 0;
-
- rtc_time_to_tm(ts->tv_sec, &new_rtc_tm);
- rtc_dev = alarmtimer_get_rtcdev();
- rv = do_settimeofday(ts);
- if (rv < 0)
- return rv;
- if (rtc_dev)
- rv = rtc_set_time(rtc_dev, &new_rtc_tm);
-
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
- wake_up(&alarm_wait_queue);
- spin_unlock_irqrestore(&alarm_slock, flags);
-
- return rv;
-}
-
-static int alarm_get_time(enum android_alarm_type alarm_type,
- struct timespec *ts)
-{
- int rv = 0;
-
- switch (alarm_type) {
- case ANDROID_ALARM_RTC_WAKEUP:
- case ANDROID_ALARM_RTC:
- getnstimeofday(ts);
- break;
- case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
- case ANDROID_ALARM_ELAPSED_REALTIME:
- get_monotonic_boottime(ts);
- break;
- case ANDROID_ALARM_SYSTEMTIME:
- ktime_get_ts(ts);
- break;
- default:
- rv = -EINVAL;
- }
- return rv;
-}
-
-static long alarm_do_ioctl(struct file *file, unsigned int cmd,
- struct timespec *ts)
-{
- int rv = 0;
- unsigned long flags;
- enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
-
- if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
- return -EINVAL;
-
- if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
- if ((file->f_flags & O_ACCMODE) == O_RDONLY)
- return -EPERM;
- if (file->private_data == NULL &&
- cmd != ANDROID_ALARM_SET_RTC) {
- spin_lock_irqsave(&alarm_slock, flags);
- if (alarm_opened) {
- spin_unlock_irqrestore(&alarm_slock, flags);
- return -EBUSY;
- }
- alarm_opened = 1;
- file->private_data = (void *)1;
- spin_unlock_irqrestore(&alarm_slock, flags);
- }
- }
-
- switch (ANDROID_ALARM_BASE_CMD(cmd)) {
- case ANDROID_ALARM_CLEAR(0):
- alarm_clear(alarm_type);
- break;
- case ANDROID_ALARM_SET(0):
- alarm_set(alarm_type, ts);
- break;
- case ANDROID_ALARM_SET_AND_WAIT(0):
- alarm_set(alarm_type, ts);
- /* fall though */
- case ANDROID_ALARM_WAIT:
- rv = alarm_wait();
- break;
- case ANDROID_ALARM_SET_RTC:
- rv = alarm_set_rtc(ts);
- break;
- case ANDROID_ALARM_GET_TIME(0):
- rv = alarm_get_time(alarm_type, ts);
- break;
-
- default:
- rv = -EINVAL;
- }
- return rv;
-}
-
-static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-
- struct timespec ts;
- int rv;
-
- switch (ANDROID_ALARM_BASE_CMD(cmd)) {
- case ANDROID_ALARM_SET_AND_WAIT(0):
- case ANDROID_ALARM_SET(0):
- case ANDROID_ALARM_SET_RTC:
- if (copy_from_user(&ts, (void __user *)arg, sizeof(ts)))
- return -EFAULT;
- break;
- }
-
- rv = alarm_do_ioctl(file, cmd, &ts);
- if (rv)
- return rv;
-
- switch (ANDROID_ALARM_BASE_CMD(cmd)) {
- case ANDROID_ALARM_GET_TIME(0):
- if (copy_to_user((void __user *)arg, &ts, sizeof(ts)))
- return -EFAULT;
- break;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_COMPAT
-static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
-
- struct timespec ts;
- int rv;
-
- switch (ANDROID_ALARM_BASE_CMD(cmd)) {
- case ANDROID_ALARM_SET_AND_WAIT_COMPAT(0):
- case ANDROID_ALARM_SET_COMPAT(0):
- case ANDROID_ALARM_SET_RTC_COMPAT:
- if (compat_get_timespec(&ts, (void __user *)arg))
- return -EFAULT;
- /* fall through */
- case ANDROID_ALARM_GET_TIME_COMPAT(0):
- cmd = ANDROID_ALARM_COMPAT_TO_NORM(cmd);
- break;
- }
-
- rv = alarm_do_ioctl(file, cmd, &ts);
- if (rv)
- return rv;
-
- switch (ANDROID_ALARM_BASE_CMD(cmd)) {
- case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */
- if (compat_put_timespec(&ts, (void __user *)arg))
- return -EFAULT;
- break;
- }
-
- return 0;
-}
-#endif
-
-static int alarm_open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
- return 0;
-}
-
-static int alarm_release(struct inode *inode, struct file *file)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&alarm_slock, flags);
- if (file->private_data) {
- for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
- uint32_t alarm_type_mask = 1U << i;
-
- if (alarm_enabled & alarm_type_mask) {
- alarm_dbg(INFO,
- "%s: clear alarm, pending %d\n",
- __func__,
- !!(alarm_pending & alarm_type_mask));
- alarm_enabled &= ~alarm_type_mask;
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
- devalarm_cancel(&alarms[i]);
- spin_lock_irqsave(&alarm_slock, flags);
- }
- if (alarm_pending | wait_pending) {
- if (alarm_pending)
- alarm_dbg(INFO, "%s: clear pending alarms %x\n",
- __func__, alarm_pending);
- __pm_relax(&alarm_wake_lock);
- wait_pending = 0;
- alarm_pending = 0;
- }
- alarm_opened = 0;
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
- return 0;
-}
-
-static void devalarm_triggered(struct devalarm *alarm)
-{
- unsigned long flags;
- uint32_t alarm_type_mask = 1U << alarm->type;
-
- alarm_dbg(INT, "%s: type %d\n", __func__, alarm->type);
- spin_lock_irqsave(&alarm_slock, flags);
- if (alarm_enabled & alarm_type_mask) {
- __pm_wakeup_event(&alarm_wake_lock, 5000); /* 5secs */
- alarm_enabled &= ~alarm_type_mask;
- alarm_pending |= alarm_type_mask;
- wake_up(&alarm_wait_queue);
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
-}
-
-static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt)
-{
- struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt);
-
- devalarm_triggered(devalrm);
- return HRTIMER_NORESTART;
-}
-
-static enum alarmtimer_restart devalarm_alarmhandler(struct alarm *alrm,
- ktime_t now)
-{
- struct devalarm *devalrm = container_of(alrm, struct devalarm, u.alrm);
-
- devalarm_triggered(devalrm);
- return ALARMTIMER_NORESTART;
-}
-
-
-static const struct file_operations alarm_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = alarm_ioctl,
- .open = alarm_open,
- .release = alarm_release,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = alarm_compat_ioctl,
-#endif
-};
-
-static struct miscdevice alarm_device = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "alarm",
- .fops = &alarm_fops,
-};
-
-static int __init alarm_dev_init(void)
-{
- int err;
- int i;
-
- err = misc_register(&alarm_device);
- if (err)
- return err;
-
- alarm_init(&alarms[ANDROID_ALARM_RTC_WAKEUP].u.alrm,
- ALARM_REALTIME, devalarm_alarmhandler);
- hrtimer_init(&alarms[ANDROID_ALARM_RTC].u.hrt,
- CLOCK_REALTIME, HRTIMER_MODE_ABS);
- alarm_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].u.alrm,
- ALARM_BOOTTIME, devalarm_alarmhandler);
- hrtimer_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME].u.hrt,
- CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
- hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].u.hrt,
- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-
- for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
- alarms[i].type = i;
- if (!is_wakeup(i))
- alarms[i].u.hrt.function = devalarm_hrthandler;
- }
-
- wakeup_source_init(&alarm_wake_lock, "alarm");
- return 0;
-}
-
-static void __exit alarm_dev_exit(void)
-{
- misc_deregister(&alarm_device);
- wakeup_source_trash(&alarm_wake_lock);
-}
-
-module_init(alarm_dev_init);
-module_exit(alarm_dev_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
deleted file mode 100644
index 495b20cf3bf6..000000000000
--- a/drivers/staging/android/android_alarm.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* include/linux/android_alarm.h
- *
- * Copyright (C) 2006-2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_ANDROID_ALARM_H
-#define _LINUX_ANDROID_ALARM_H
-
-#include <linux/compat.h>
-#include <linux/ioctl.h>
-
-#include "uapi/android_alarm.h"
-
-#ifdef CONFIG_COMPAT
-#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \
- struct compat_timespec)
-#define ANDROID_ALARM_SET_AND_WAIT_COMPAT(type) ALARM_IOW(3, type, \
- struct compat_timespec)
-#define ANDROID_ALARM_GET_TIME_COMPAT(type) ALARM_IOW(4, type, \
- struct compat_timespec)
-#define ANDROID_ALARM_SET_RTC_COMPAT _IOW('a', 5, \
- struct compat_timespec)
-#define ANDROID_ALARM_IOCTL_NR(cmd) (_IOC_NR(cmd) & ((1<<4)-1))
-#define ANDROID_ALARM_COMPAT_TO_NORM(cmd) \
- ALARM_IOW(ANDROID_ALARM_IOCTL_NR(cmd), \
- ANDROID_ALARM_IOCTL_TO_TYPE(cmd), \
- struct timespec)
-
-#endif
-
-#endif
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 8c7852742f4b..d140b733940c 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -447,8 +447,8 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
loff_t end = (range->pgend + 1) * PAGE_SIZE;
vfs_fallocate(range->asma->file,
- FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- start, end - start);
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
@@ -549,7 +549,6 @@ static int get_name(struct ashmem_area *asma, void __user *name)
mutex_lock(&ashmem_mutex);
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
-
/*
* Copying only `len', instead of ASHMEM_NAME_LEN, bytes
* prevents us from revealing one user's stack to another.
@@ -751,10 +750,10 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case ASHMEM_SET_NAME:
- ret = set_name(asma, (void __user *) arg);
+ ret = set_name(asma, (void __user *)arg);
break;
case ASHMEM_GET_NAME:
- ret = get_name(asma, (void __user *) arg);
+ ret = get_name(asma, (void __user *)arg);
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
@@ -775,7 +774,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case ASHMEM_PIN:
case ASHMEM_UNPIN:
case ASHMEM_GET_PIN_STATUS:
- ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
+ ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
break;
case ASHMEM_PURGE_ALL_CACHES:
ret = -EPERM;
@@ -798,7 +797,6 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
-
switch (cmd) {
case COMPAT_ASHMEM_SET_SIZE:
cmd = ASHMEM_SET_SIZE;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 296d347660fc..b8f1c491553e 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1508,6 +1508,9 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
+ spin_lock_init(&heap->free_lock);
+ heap->free_list_size = 0;
+
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_init_deferred_free(heap);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index f8cabcbc39e5..f4211f1be488 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,24 +39,6 @@ struct ion_cma_buffer_info {
struct sg_table *table;
};
-/*
- * Create scatter-list for the already allocated DMA buffer.
- * This function could be replaced by dma_common_get_sgtable
- * as soon as it will avalaible.
- */
-static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
-{
- struct page *page = virt_to_page(cpu_addr);
- int ret;
-
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- if (unlikely(ret))
- return ret;
-
- sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
- return 0;
-}
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
@@ -91,7 +73,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (!info->table)
goto free_mem;
- if (ion_cma_get_sgtable
+ if (dma_common_get_sgtable
(dev, info->table, info->cpu_addr, info->handle, len))
goto free_table;
/* keep this for memory release */
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 4605e04712aa..fd13d05b538a 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -253,8 +253,6 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
struct sched_param param = { .sched_priority = 0 };
INIT_LIST_HEAD(&heap->free_list);
- heap->free_list_size = 0;
- spin_lock_init(&heap->free_lock);
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
deleted file mode 100644
index a673ffa34aa3..000000000000
--- a/drivers/staging/android/logger.c
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- * drivers/misc/logger.c
- *
- * A Logging Subsystem
- *
- * Copyright (C) 2007-2008 Google, Inc.
- *
- * Robert Love <rlove@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "logger: " fmt
-
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/vmalloc.h>
-#include <linux/aio.h>
-#include "logger.h"
-
-#include <asm/ioctls.h>
-
-/**
- * struct logger_log - represents a specific log, such as 'main' or 'radio'
- * @buffer: The actual ring buffer
- * @misc: The "misc" device representing the log
- * @wq: The wait queue for @readers
- * @readers: This log's readers
- * @mutex: The mutex that protects the @buffer
- * @w_off: The current write head offset
- * @head: The head, or location that readers start reading at.
- * @size: The size of the log
- * @logs: The list of log channels
- *
- * This structure lives from module insertion until module removal, so it does
- * not need additional reference counting. The structure is protected by the
- * mutex 'mutex'.
- */
-struct logger_log {
- unsigned char *buffer;
- struct miscdevice misc;
- wait_queue_head_t wq;
- struct list_head readers;
- struct mutex mutex;
- size_t w_off;
- size_t head;
- size_t size;
- struct list_head logs;
-};
-
-static LIST_HEAD(log_list);
-
-
-/**
- * struct logger_reader - a logging device open for reading
- * @log: The associated log
- * @list: The associated entry in @logger_log's list
- * @r_off: The current read head offset.
- * @r_all: Reader can read all entries
- * @r_ver: Reader ABI version
- *
- * This object lives from open to release, so we don't need additional
- * reference counting. The structure is protected by log->mutex.
- */
-struct logger_reader {
- struct logger_log *log;
- struct list_head list;
- size_t r_off;
- bool r_all;
- int r_ver;
-};
-
-/* logger_offset - returns index 'n' into the log via (optimized) modulus */
-static size_t logger_offset(struct logger_log *log, size_t n)
-{
- return n & (log->size - 1);
-}
-
-
-/*
- * file_get_log - Given a file structure, return the associated log
- *
- * This isn't aesthetic. We have several goals:
- *
- * 1) Need to quickly obtain the associated log during an I/O operation
- * 2) Readers need to maintain state (logger_reader)
- * 3) Writers need to be very fast (open() should be a near no-op)
- *
- * In the reader case, we can trivially go file->logger_reader->logger_log.
- * For a writer, we don't want to maintain a logger_reader, so we just go
- * file->logger_log. Thus what file->private_data points at depends on whether
- * or not the file was opened for reading. This function hides that dirtiness.
- */
-static inline struct logger_log *file_get_log(struct file *file)
-{
- if (file->f_mode & FMODE_READ) {
- struct logger_reader *reader = file->private_data;
-
- return reader->log;
- }
- return file->private_data;
-}
-
-/*
- * get_entry_header - returns a pointer to the logger_entry header within
- * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
- * be provided. Typically the return value will be a pointer within
- * 'logger->buf'. However, a pointer to 'scratch' may be returned if
- * the log entry spans the end and beginning of the circular buffer.
- */
-static struct logger_entry *get_entry_header(struct logger_log *log,
- size_t off, struct logger_entry *scratch)
-{
- size_t len = min(sizeof(struct logger_entry), log->size - off);
-
- if (len != sizeof(struct logger_entry)) {
- memcpy(((void *) scratch), log->buffer + off, len);
- memcpy(((void *) scratch) + len, log->buffer,
- sizeof(struct logger_entry) - len);
- return scratch;
- }
-
- return (struct logger_entry *) (log->buffer + off);
-}
-
-/*
- * get_entry_msg_len - Grabs the length of the message of the entry
- * starting from from 'off'.
- *
- * An entry length is 2 bytes (16 bits) in host endian order.
- * In the log, the length does not include the size of the log entry structure.
- * This function returns the size including the log entry structure.
- *
- * Caller needs to hold log->mutex.
- */
-static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
-{
- struct logger_entry scratch;
- struct logger_entry *entry;
-
- entry = get_entry_header(log, off, &scratch);
- return entry->len;
-}
-
-static size_t get_user_hdr_len(int ver)
-{
- if (ver < 2)
- return sizeof(struct user_logger_entry_compat);
- return sizeof(struct logger_entry);
-}
-
-static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
- char __user *buf)
-{
- void *hdr;
- size_t hdr_len;
- struct user_logger_entry_compat v1;
-
- if (ver < 2) {
- v1.len = entry->len;
- v1.__pad = 0;
- v1.pid = entry->pid;
- v1.tid = entry->tid;
- v1.sec = entry->sec;
- v1.nsec = entry->nsec;
- hdr = &v1;
- hdr_len = sizeof(struct user_logger_entry_compat);
- } else {
- hdr = entry;
- hdr_len = sizeof(struct logger_entry);
- }
-
- return copy_to_user(buf, hdr, hdr_len);
-}
-
-/*
- * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
- * user-space buffer 'buf'. Returns 'count' on success.
- *
- * Caller must hold log->mutex.
- */
-static ssize_t do_read_log_to_user(struct logger_log *log,
- struct logger_reader *reader,
- char __user *buf,
- size_t count)
-{
- struct logger_entry scratch;
- struct logger_entry *entry;
- size_t len;
- size_t msg_start;
-
- /*
- * First, copy the header to userspace, using the version of
- * the header requested
- */
- entry = get_entry_header(log, reader->r_off, &scratch);
- if (copy_header_to_user(reader->r_ver, entry, buf))
- return -EFAULT;
-
- count -= get_user_hdr_len(reader->r_ver);
- buf += get_user_hdr_len(reader->r_ver);
- msg_start = logger_offset(log,
- reader->r_off + sizeof(struct logger_entry));
-
- /*
- * We read from the msg in two disjoint operations. First, we read from
- * the current msg head offset up to 'count' bytes or to the end of
- * the log, whichever comes first.
- */
- len = min(count, log->size - msg_start);
- if (copy_to_user(buf, log->buffer + msg_start, len))
- return -EFAULT;
-
- /*
- * Second, we read any remaining bytes, starting back at the head of
- * the log.
- */
- if (count != len)
- if (copy_to_user(buf + len, log->buffer, count - len))
- return -EFAULT;
-
- reader->r_off = logger_offset(log, reader->r_off +
- sizeof(struct logger_entry) + count);
-
- return count + get_user_hdr_len(reader->r_ver);
-}
-
-/*
- * get_next_entry_by_uid - Starting at 'off', returns an offset into
- * 'log->buffer' which contains the first entry readable by 'euid'
- */
-static size_t get_next_entry_by_uid(struct logger_log *log,
- size_t off, kuid_t euid)
-{
- while (off != log->w_off) {
- struct logger_entry *entry;
- struct logger_entry scratch;
- size_t next_len;
-
- entry = get_entry_header(log, off, &scratch);
-
- if (uid_eq(entry->euid, euid))
- return off;
-
- next_len = sizeof(struct logger_entry) + entry->len;
- off = logger_offset(log, off + next_len);
- }
-
- return off;
-}
-
-/*
- * logger_read - our log's read() method
- *
- * Behavior:
- *
- * - O_NONBLOCK works
- * - If there are no log entries to read, blocks until log is written to
- * - Atomically reads exactly one log entry
- *
- * Will set errno to EINVAL if read
- * buffer is insufficient to hold next entry.
- */
-static ssize_t logger_read(struct file *file, char __user *buf,
- size_t count, loff_t *pos)
-{
- struct logger_reader *reader = file->private_data;
- struct logger_log *log = reader->log;
- ssize_t ret;
- DEFINE_WAIT(wait);
-
-start:
- while (1) {
- mutex_lock(&log->mutex);
-
- prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
-
- ret = (log->w_off == reader->r_off);
- mutex_unlock(&log->mutex);
- if (!ret)
- break;
-
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
-
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
-
- schedule();
- }
-
- finish_wait(&log->wq, &wait);
- if (ret)
- return ret;
-
- mutex_lock(&log->mutex);
-
- if (!reader->r_all)
- reader->r_off = get_next_entry_by_uid(log,
- reader->r_off, current_euid());
-
- /* is there still something to read or did we race? */
- if (unlikely(log->w_off == reader->r_off)) {
- mutex_unlock(&log->mutex);
- goto start;
- }
-
- /* get the size of the next entry */
- ret = get_user_hdr_len(reader->r_ver) +
- get_entry_msg_len(log, reader->r_off);
- if (count < ret) {
- ret = -EINVAL;
- goto out;
- }
-
- /* get exactly one entry from the log */
- ret = do_read_log_to_user(log, reader, buf, ret);
-
-out:
- mutex_unlock(&log->mutex);
-
- return ret;
-}
-
-/*
- * get_next_entry - return the offset of the first valid entry at least 'len'
- * bytes after 'off'.
- *
- * Caller must hold log->mutex.
- */
-static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
-{
- size_t count = 0;
-
- do {
- size_t nr = sizeof(struct logger_entry) +
- get_entry_msg_len(log, off);
- off = logger_offset(log, off + nr);
- count += nr;
- } while (count < len);
-
- return off;
-}
-
-/*
- * is_between - is a < c < b, accounting for wrapping of a, b, and c
- * positions in the buffer
- *
- * That is, if a<b, check for c between a and b
- * and if a>b, check for c outside (not between) a and b
- *
- * |------- a xxxxxxxx b --------|
- * c^
- *
- * |xxxxx b --------- a xxxxxxxxx|
- * c^
- * or c^
- */
-static inline int is_between(size_t a, size_t b, size_t c)
-{
- if (a < b) {
- /* is c between a and b? */
- if (a < c && c <= b)
- return 1;
- } else {
- /* is c outside of b through a? */
- if (c <= b || a < c)
- return 1;
- }
-
- return 0;
-}
-
-/*
- * fix_up_readers - walk the list of all readers and "fix up" any who were
- * lapped by the writer; also do the same for the default "start head".
- * We do this by "pulling forward" the readers and start head to the first
- * entry after the new write head.
- *
- * The caller needs to hold log->mutex.
- */
-static void fix_up_readers(struct logger_log *log, size_t len)
-{
- size_t old = log->w_off;
- size_t new = logger_offset(log, old + len);
- struct logger_reader *reader;
-
- if (is_between(old, new, log->head))
- log->head = get_next_entry(log, log->head, len);
-
- list_for_each_entry(reader, &log->readers, list)
- if (is_between(old, new, reader->r_off))
- reader->r_off = get_next_entry(log, reader->r_off, len);
-}
-
-/*
- * logger_write_iter - our write method, implementing support for write(),
- * writev(), and aio_write(). Writes are our fast path, and we try to optimize
- * them above all else.
- */
-static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct logger_log *log = file_get_log(iocb->ki_filp);
- struct logger_entry header;
- struct timespec now;
- size_t len, count, w_off;
-
- count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
-
- now = current_kernel_time();
-
- header.pid = current->tgid;
- header.tid = current->pid;
- header.sec = now.tv_sec;
- header.nsec = now.tv_nsec;
- header.euid = current_euid();
- header.len = count;
- header.hdr_size = sizeof(struct logger_entry);
-
- /* null writes succeed, return zero */
- if (unlikely(!header.len))
- return 0;
-
- mutex_lock(&log->mutex);
-
- /*
- * Fix up any readers, pulling them forward to the first readable
- * entry after (what will be) the new write offset. We do this now
- * because if we partially fail, we can end up with clobbered log
- * entries that encroach on readable buffer.
- */
- fix_up_readers(log, sizeof(struct logger_entry) + header.len);
-
- len = min(sizeof(header), log->size - log->w_off);
- memcpy(log->buffer + log->w_off, &header, len);
- memcpy(log->buffer, (char *)&header + len, sizeof(header) - len);
-
- /* Work with a copy until we are ready to commit the whole entry */
- w_off = logger_offset(log, log->w_off + sizeof(struct logger_entry));
-
- len = min(count, log->size - w_off);
-
- if (copy_from_iter(log->buffer + w_off, len, from) != len) {
- /*
- * Note that by not updating log->w_off, this abandons the
- * portion of the new entry that *was* successfully
- * copied, just above. This is intentional to avoid
- * message corruption from missing fragments.
- */
- mutex_unlock(&log->mutex);
- return -EFAULT;
- }
-
- if (copy_from_iter(log->buffer, count - len, from) != count - len) {
- mutex_unlock(&log->mutex);
- return -EFAULT;
- }
-
- log->w_off = logger_offset(log, w_off + count);
- mutex_unlock(&log->mutex);
-
- /* wake up any blocked readers */
- wake_up_interruptible(&log->wq);
-
- return len;
-}
-
-static struct logger_log *get_log_from_minor(int minor)
-{
- struct logger_log *log;
-
- list_for_each_entry(log, &log_list, logs)
- if (log->misc.minor == minor)
- return log;
- return NULL;
-}
-
-/*
- * logger_open - the log's open() file operation
- *
- * Note how near a no-op this is in the write-only case. Keep it that way!
- */
-static int logger_open(struct inode *inode, struct file *file)
-{
- struct logger_log *log;
- int ret;
-
- ret = nonseekable_open(inode, file);
- if (ret)
- return ret;
-
- log = get_log_from_minor(MINOR(inode->i_rdev));
- if (!log)
- return -ENODEV;
-
- if (file->f_mode & FMODE_READ) {
- struct logger_reader *reader;
-
- reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
- if (!reader)
- return -ENOMEM;
-
- reader->log = log;
- reader->r_ver = 1;
- reader->r_all = in_egroup_p(inode->i_gid) ||
- capable(CAP_SYSLOG);
-
- INIT_LIST_HEAD(&reader->list);
-
- mutex_lock(&log->mutex);
- reader->r_off = log->head;
- list_add_tail(&reader->list, &log->readers);
- mutex_unlock(&log->mutex);
-
- file->private_data = reader;
- } else
- file->private_data = log;
-
- return 0;
-}
-
-/*
- * logger_release - the log's release file operation
- *
- * Note this is a total no-op in the write-only case. Keep it that way!
- */
-static int logger_release(struct inode *ignored, struct file *file)
-{
- if (file->f_mode & FMODE_READ) {
- struct logger_reader *reader = file->private_data;
- struct logger_log *log = reader->log;
-
- mutex_lock(&log->mutex);
- list_del(&reader->list);
- mutex_unlock(&log->mutex);
-
- kfree(reader);
- }
-
- return 0;
-}
-
-/*
- * logger_poll - the log's poll file operation, for poll/select/epoll
- *
- * Note we always return POLLOUT, because you can always write() to the log.
- * Note also that, strictly speaking, a return value of POLLIN does not
- * guarantee that the log is readable without blocking, as there is a small
- * chance that the writer can lap the reader in the interim between poll()
- * returning and the read() request.
- */
-static unsigned int logger_poll(struct file *file, poll_table *wait)
-{
- struct logger_reader *reader;
- struct logger_log *log;
- unsigned int ret = POLLOUT | POLLWRNORM;
-
- if (!(file->f_mode & FMODE_READ))
- return ret;
-
- reader = file->private_data;
- log = reader->log;
-
- poll_wait(file, &log->wq, wait);
-
- mutex_lock(&log->mutex);
- if (!reader->r_all)
- reader->r_off = get_next_entry_by_uid(log,
- reader->r_off, current_euid());
-
- if (log->w_off != reader->r_off)
- ret |= POLLIN | POLLRDNORM;
- mutex_unlock(&log->mutex);
-
- return ret;
-}
-
-static long logger_set_version(struct logger_reader *reader, void __user *arg)
-{
- int version;
-
- if (copy_from_user(&version, arg, sizeof(int)))
- return -EFAULT;
-
- if ((version < 1) || (version > 2))
- return -EINVAL;
-
- reader->r_ver = version;
- return 0;
-}
-
-static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct logger_log *log = file_get_log(file);
- struct logger_reader *reader;
- long ret = -EINVAL;
- void __user *argp = (void __user *) arg;
-
- mutex_lock(&log->mutex);
-
- switch (cmd) {
- case LOGGER_GET_LOG_BUF_SIZE:
- ret = log->size;
- break;
- case LOGGER_GET_LOG_LEN:
- if (!(file->f_mode & FMODE_READ)) {
- ret = -EBADF;
- break;
- }
- reader = file->private_data;
- if (log->w_off >= reader->r_off)
- ret = log->w_off - reader->r_off;
- else
- ret = (log->size - reader->r_off) + log->w_off;
- break;
- case LOGGER_GET_NEXT_ENTRY_LEN:
- if (!(file->f_mode & FMODE_READ)) {
- ret = -EBADF;
- break;
- }
- reader = file->private_data;
-
- if (!reader->r_all)
- reader->r_off = get_next_entry_by_uid(log,
- reader->r_off, current_euid());
-
- if (log->w_off != reader->r_off)
- ret = get_user_hdr_len(reader->r_ver) +
- get_entry_msg_len(log, reader->r_off);
- else
- ret = 0;
- break;
- case LOGGER_FLUSH_LOG:
- if (!(file->f_mode & FMODE_WRITE)) {
- ret = -EBADF;
- break;
- }
- if (!(in_egroup_p(file_inode(file)->i_gid) ||
- capable(CAP_SYSLOG))) {
- ret = -EPERM;
- break;
- }
- list_for_each_entry(reader, &log->readers, list)
- reader->r_off = log->w_off;
- log->head = log->w_off;
- ret = 0;
- break;
- case LOGGER_GET_VERSION:
- if (!(file->f_mode & FMODE_READ)) {
- ret = -EBADF;
- break;
- }
- reader = file->private_data;
- ret = reader->r_ver;
- break;
- case LOGGER_SET_VERSION:
- if (!(file->f_mode & FMODE_READ)) {
- ret = -EBADF;
- break;
- }
- reader = file->private_data;
- ret = logger_set_version(reader, argp);
- break;
- }
-
- mutex_unlock(&log->mutex);
-
- return ret;
-}
-
-static const struct file_operations logger_fops = {
- .owner = THIS_MODULE,
- .read = logger_read,
- .write_iter = logger_write_iter,
- .poll = logger_poll,
- .unlocked_ioctl = logger_ioctl,
- .compat_ioctl = logger_ioctl,
- .open = logger_open,
- .release = logger_release,
-};
-
-/*
- * Log size must must be a power of two, and greater than
- * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
- */
-static int __init create_log(char *log_name, int size)
-{
- int ret = 0;
- struct logger_log *log;
- unsigned char *buffer;
-
- buffer = vmalloc(size);
- if (buffer == NULL)
- return -ENOMEM;
-
- log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
- if (log == NULL) {
- ret = -ENOMEM;
- goto out_free_buffer;
- }
- log->buffer = buffer;
-
- log->misc.minor = MISC_DYNAMIC_MINOR;
- log->misc.name = kstrdup(log_name, GFP_KERNEL);
- if (log->misc.name == NULL) {
- ret = -ENOMEM;
- goto out_free_log;
- }
-
- log->misc.fops = &logger_fops;
- log->misc.parent = NULL;
-
- init_waitqueue_head(&log->wq);
- INIT_LIST_HEAD(&log->readers);
- mutex_init(&log->mutex);
- log->w_off = 0;
- log->head = 0;
- log->size = size;
-
- INIT_LIST_HEAD(&log->logs);
- list_add_tail(&log->logs, &log_list);
-
- /* finally, initialize the misc device for this log */
- ret = misc_register(&log->misc);
- if (unlikely(ret)) {
- pr_err("failed to register misc device for log '%s'!\n",
- log->misc.name);
- goto out_free_misc_name;
- }
-
- pr_info("created %luK log '%s'\n",
- (unsigned long) log->size >> 10, log->misc.name);
-
- return 0;
-
-out_free_misc_name:
- kfree(log->misc.name);
-
-out_free_log:
- kfree(log);
-
-out_free_buffer:
- vfree(buffer);
- return ret;
-}
-
-static int __init logger_init(void)
-{
- int ret;
-
- ret = create_log(LOGGER_LOG_MAIN, 256*1024);
- if (unlikely(ret))
- goto out;
-
- ret = create_log(LOGGER_LOG_EVENTS, 256*1024);
- if (unlikely(ret))
- goto out;
-
- ret = create_log(LOGGER_LOG_RADIO, 256*1024);
- if (unlikely(ret))
- goto out;
-
- ret = create_log(LOGGER_LOG_SYSTEM, 256*1024);
- if (unlikely(ret))
- goto out;
-
-out:
- return ret;
-}
-
-static void __exit logger_exit(void)
-{
- struct logger_log *current_log, *next_log;
-
- list_for_each_entry_safe(current_log, next_log, &log_list, logs) {
- /* we have to delete all the entry inside log_list */
- misc_deregister(&current_log->misc);
- vfree(current_log->buffer);
- kfree(current_log->misc.name);
- list_del(&current_log->logs);
- kfree(current_log);
- }
-}
-
-
-device_initcall(logger_init);
-module_exit(logger_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Robert Love, <rlove@google.com>");
-MODULE_DESCRIPTION("Android Logger");
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
deleted file mode 100644
index 70af7d805dff..000000000000
--- a/drivers/staging/android/logger.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* include/linux/logger.h
- *
- * Copyright (C) 2007-2008 Google, Inc.
- * Author: Robert Love <rlove@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_LOGGER_H
-#define _LINUX_LOGGER_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/**
- * struct user_logger_entry_compat - defines a single entry that is given to a logger
- * @len: The length of the payload
- * @__pad: Two bytes of padding that appear to be required
- * @pid: The generating process' process ID
- * @tid: The generating process' thread ID
- * @sec: The number of seconds that have elapsed since the Epoch
- * @nsec: The number of nanoseconds that have elapsed since @sec
- * @msg: The message that is to be logged
- *
- * The userspace structure for version 1 of the logger_entry ABI.
- * This structure is returned to userspace unless the caller requests
- * an upgrade to a newer ABI version.
- */
-struct user_logger_entry_compat {
- __u16 len;
- __u16 __pad;
- __s32 pid;
- __s32 tid;
- __s32 sec;
- __s32 nsec;
- char msg[0];
-};
-
-/**
- * struct logger_entry - defines a single entry that is given to a logger
- * @len: The length of the payload
- * @hdr_size: sizeof(struct logger_entry_v2)
- * @pid: The generating process' process ID
- * @tid: The generating process' thread ID
- * @sec: The number of seconds that have elapsed since the Epoch
- * @nsec: The number of nanoseconds that have elapsed since @sec
- * @euid: Effective UID of logger
- * @msg: The message that is to be logged
- *
- * The structure for version 2 of the logger_entry ABI.
- * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
- * is called with version >= 2
- */
-struct logger_entry {
- __u16 len;
- __u16 hdr_size;
- __s32 pid;
- __s32 tid;
- __s32 sec;
- __s32 nsec;
- kuid_t euid;
- char msg[0];
-};
-
-#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
-#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
-#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
-#define LOGGER_LOG_MAIN "log_main" /* everything else */
-
-#define LOGGER_ENTRY_MAX_PAYLOAD 4076
-
-#define __LOGGERIO 0xAE
-
-#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
-#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
-#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
-#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
-#define LOGGER_GET_VERSION _IO(__LOGGERIO, 5) /* abi version */
-#define LOGGER_SET_VERSION _IO(__LOGGERIO, 6) /* abi version */
-
-#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index b545d3d1da3e..feafa172b155 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -160,7 +160,12 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize);
lowmem_deathpending_timeout = jiffies + HZ;
- set_tsk_thread_flag(selected, TIF_MEMDIE);
+ /*
+ * FIXME: lowmemorykiller shouldn't abuse global OOM killer
+ * infrastructure. There is no real reason why the selected
+ * task should have access to the memory reserves.
+ */
+ mark_tsk_oom_victim(selected);
send_sig(SIGKILL, selected, 0);
rem += selected_tasksize;
}
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 1532a86404be..91ed2c4cff45 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -96,7 +96,8 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
sync_status_str(status));
if (status <= 0) {
- struct timespec64 ts64 = ktime_to_timespec64(pt->base.timestamp);
+ struct timespec64 ts64 =
+ ktime_to_timespec64(pt->base.timestamp);
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
deleted file mode 100644
index aa013f6f5f3a..000000000000
--- a/drivers/staging/android/uapi/android_alarm.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* drivers/staging/android/uapi/android_alarm.h
- *
- * Copyright (C) 2006-2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_ANDROID_ALARM_H
-#define _UAPI_LINUX_ANDROID_ALARM_H
-
-#include <linux/ioctl.h>
-#include <linux/time.h>
-
-enum android_alarm_type {
- /* return code bit numbers or set alarm arg */
- ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME,
-
- ANDROID_ALARM_TYPE_COUNT,
-
- /* return code bit numbers */
- /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
- ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
- ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT _IO('a', 1)
-
-#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
-
-#endif
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 6050fbdfd31f..d5a6abc84519 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -11,8 +11,7 @@ static bool find_by_address(u64 base_address)
struct resource res;
while (dn) {
- if (of_can_translate_address(dn)
- && !of_address_to_resource(dn, 0, &res)) {
+ if (!of_address_to_resource(dn, 0, &res)) {
if (res.start == base_address) {
of_node_put(dn);
return true;
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 471d0877f382..5455bf3d5a91 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -91,8 +91,10 @@ static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
if (ndata->clk == clk_wzrd->clk_in1)
max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
- if (ndata->clk == clk_wzrd->axi_clk)
+ else if (ndata->clk == clk_wzrd->axi_clk)
max = WZRD_ACLK_MAX_FREQ;
+ else
+ return NOTIFY_DONE; /* should never happen */
switch (event) {
case PRE_RATE_CHANGE:
@@ -239,6 +241,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
/* register div per output */
for (i = WZRD_NUM_OUTPUTS - 1; i >= 0 ; i--) {
const char *clkout_name;
+
if (of_property_read_string_index(np, "clock-output-names", i,
&clkout_name)) {
dev_err(&pdev->dev,
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index a8201fe87512..593fcb1783b4 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -168,7 +168,7 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
- depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_ISADMA if ISA_DMA_API
---help---
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
@@ -179,7 +179,7 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
- depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_ISADMA if ISA_DMA_API
---help---
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -188,7 +188,7 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
- depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_ISADMA if ISA_DMA_API
---help---
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@@ -281,7 +281,7 @@ config COMEDI_DAS08_ISA
config COMEDI_DAS16
tristate "DAS-16 compatible ISA and PC/104 card support"
- depends on ISA_DMA_API
+ select COMEDI_ISADMA if ISA_DMA_API
select COMEDI_8255
---help---
Enable support for Keithley Metrabyte/ComputerBoards DAS16
@@ -309,7 +309,7 @@ config COMEDI_DAS800
config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
- depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_ISADMA if ISA_DMA_API
---help---
Enable support for DAS1800 and compatible ISA cards
Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
@@ -372,7 +372,7 @@ config COMEDI_DT2817
config COMEDI_DT282X
tristate "Data Translation DT2821 series and DT-EZ ISA card support"
- depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_ISADMA if ISA_DMA_API
---help---
Enable support for Data Translation DT2821 series including DT-EZ
DT2821, DT2821-F-16SE, DT2821-F-8DI, DT2821-G-16SE, DT2821-G-8DI,
@@ -462,7 +462,7 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
- depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_ISADMA if ISA_DMA_API
---help---
Enable support for National Instruments AT-A2150 cards
@@ -502,7 +502,7 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
select COMEDI_NI_LABPC
- select COMEDI_NI_LABPC_ISADMA if ISA_DMA_API && VIRT_TO_BUS
+ select COMEDI_NI_LABPC_ISADMA if ISA_DMA_API
---help---
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
@@ -724,7 +724,6 @@ config COMEDI_ADL_PCI9111
config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
depends on HAS_DMA
- depends on VIRT_TO_BUS
---help---
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -1263,12 +1262,16 @@ config COMEDI_DAS08
tristate
select COMEDI_8255
+config COMEDI_ISADMA
+ tristate
+
config COMEDI_NI_LABPC
tristate
select COMEDI_8255
config COMEDI_NI_LABPC_ISADMA
tristate
+ select COMEDI_ISADMA
config COMEDI_NI_TIO
tristate
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 5a4c74f703b3..25848244c4b1 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -1,23 +1,23 @@
/*
- comedi/comedi_compat32.c
- 32-bit ioctl compatibility for 64-bit comedi kernel module.
-
- Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
- Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/comedi_compat32.c
+ * 32-bit ioctl compatibility for 64-bit comedi kernel module.
+ *
+ * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
+ * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#include <linux/uaccess.h>
#include <linux/compat.h>
@@ -27,11 +27,15 @@
#define COMEDI32_CHANINFO _IOR(CIO, 3, struct comedi32_chaninfo_struct)
#define COMEDI32_RANGEINFO _IOR(CIO, 8, struct comedi32_rangeinfo_struct)
-/* N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR.
- * It's too late to change it now, but it only affects the command number. */
+/*
+ * N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR.
+ * It's too late to change it now, but it only affects the command number.
+ */
#define COMEDI32_CMD _IOR(CIO, 9, struct comedi32_cmd_struct)
-/* N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR.
- * It's too late to change it now, but it only affects the command number. */
+/*
+ * N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR.
+ * It's too late to change it now, but it only affects the command number.
+ */
#define COMEDI32_CMDTEST _IOR(CIO, 10, struct comedi32_cmd_struct)
#define COMEDI32_INSNLIST _IOR(CIO, 11, struct comedi32_insnlist_struct)
#define COMEDI32_INSN _IOR(CIO, 12, struct comedi32_insn_struct)
@@ -39,7 +43,7 @@
struct comedi32_chaninfo_struct {
unsigned int subdev;
compat_uptr_t maxdata_list; /* 32-bit 'unsigned int *' */
- compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */
+ compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */
compat_uptr_t rangelist; /* 32-bit 'unsigned int *' */
unsigned int unused[4];
};
@@ -62,16 +66,16 @@ struct comedi32_cmd_struct {
unsigned int scan_end_arg;
unsigned int stop_src;
unsigned int stop_arg;
- compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */
+ compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */
unsigned int chanlist_len;
- compat_uptr_t data; /* 32-bit 'short *' */
+ compat_uptr_t data; /* 32-bit 'short *' */
unsigned int data_len;
};
struct comedi32_insn_struct {
unsigned int insn;
unsigned int n;
- compat_uptr_t data; /* 32-bit 'unsigned int *' */
+ compat_uptr_t data; /* 32-bit 'unsigned int *' */
unsigned int subdev;
unsigned int chanspec;
unsigned int unused[3];
@@ -79,7 +83,7 @@ struct comedi32_insn_struct {
struct comedi32_insnlist_struct {
unsigned int n_insns;
- compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */
+ compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */
};
/* Handle translated ioctl. */
@@ -215,10 +219,12 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
int err;
unsigned int temp;
- /* Copy back most of cmd structure. */
- /* Assume the pointer values are already valid. */
- /* (Could use ptr_to_compat() to set them, but that wasn't implemented
- * until kernel version 2.6.11.) */
+ /*
+ * Copy back most of cmd structure.
+ *
+ * Assume the pointer values are already valid.
+ * (Could use ptr_to_compat() to set them.)
+ */
if (!access_ok(VERIFY_READ, cmd, sizeof(*cmd)) ||
!access_ok(VERIFY_WRITE, cmd32, sizeof(*cmd32)))
return -EFAULT;
@@ -262,7 +268,7 @@ static int compat_cmd(struct file *file, unsigned long arg)
{
struct comedi_cmd __user *cmd;
struct comedi32_cmd_struct __user *cmd32;
- int rc;
+ int rc, err;
cmd32 = compat_ptr(arg);
cmd = compat_alloc_user_space(sizeof(*cmd));
@@ -271,7 +277,15 @@ static int compat_cmd(struct file *file, unsigned long arg)
if (rc)
return rc;
- return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
+ rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
+ if (rc == -EAGAIN) {
+ /* Special case: copy cmd back to user. */
+ err = put_compat_cmd(cmd32, cmd);
+ if (err)
+ rc = err;
+ }
+
+ return rc;
}
/* Handle 32-bit COMEDI_CMDTEST ioctl. */
@@ -395,10 +409,12 @@ static int compat_insn(struct file *file, unsigned long arg)
return translated_ioctl(file, COMEDI_INSN, (unsigned long)insn);
}
-/* Process untranslated ioctl. */
-/* Returns -ENOIOCTLCMD for unrecognised ioctl codes. */
-static inline int raw_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+/*
+ * compat_ioctl file operation.
+ *
+ * Returns -ENOIOCTLCMD for unrecognised ioctl codes.
+ */
+long comedi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int rc;
@@ -445,10 +461,3 @@ static inline int raw_ioctl(struct file *file, unsigned int cmd,
}
return rc;
}
-
-/* compat_ioctl file operation. */
-/* Returns -ENOIOCTLCMD for unrecognised ioctl codes. */
-long comedi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return raw_ioctl(file, cmd, arg);
-}
diff --git a/drivers/staging/comedi/comedi_compat32.h b/drivers/staging/comedi/comedi_compat32.h
index 2d0a6fcf60f3..5ce77f3e8c22 100644
--- a/drivers/staging/comedi/comedi_compat32.h
+++ b/drivers/staging/comedi/comedi_compat32.h
@@ -1,23 +1,23 @@
/*
- comedi/comedi_compat32.h
- 32-bit ioctl compatibility for 64-bit comedi kernel module.
-
- Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
- Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/comedi_compat32.h
+ * 32-bit ioctl compatibility for 64-bit comedi kernel module.
+ *
+ * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
+ * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_COMPAT32_H
#define _COMEDI_COMPAT32_H
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index f143cb64d69e..727640e89c73 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1,20 +1,20 @@
/*
- comedi/comedi_fops.c
- comedi kernel module
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/comedi_fops.c
+ * comedi kernel module
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -113,6 +113,18 @@ static void comedi_dev_kref_release(struct kref *kref)
kfree(dev);
}
+/**
+ * comedi_dev_put - release a use of a comedi device structure
+ * @dev: comedi_device struct
+ *
+ * Must be called when a user of a comedi device is finished with it.
+ * When the last user of the comedi device calls this function, the
+ * comedi device is destroyed.
+ *
+ * Return 1 if the comedi device is destroyed by this call or dev is
+ * NULL, otherwise return 0. Callers must not assume the comedi
+ * device is still valid if this function returns 0.
+ */
int comedi_dev_put(struct comedi_device *dev)
{
if (dev)
@@ -220,6 +232,18 @@ static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
return dev;
}
+/**
+ * comedi_dev_get_from_minor - get comedi device by minor device number
+ * @minor: minor device number
+ *
+ * Finds the comedi device associated by the minor device number, if any,
+ * and increments its reference count. The comedi device is prevented from
+ * being freed until a matching call is made to comedi_dev_put().
+ *
+ * Return a pointer to the comedi device if it exists, with its usage
+ * reference incremented. Return NULL if no comedi device exists with the
+ * specified minor device number.
+ */
struct comedi_device *comedi_dev_get_from_minor(unsigned minor)
{
if (minor < COMEDI_NUM_BOARD_MINORS)
@@ -323,8 +347,7 @@ static int resize_async_buffer(struct comedi_device *dev,
return -EBUSY;
}
- /* make sure buffer is an integral number of pages
- * (we round up) */
+ /* make sure buffer is an integral number of pages (we round up) */
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
retval = comedi_buf_alloc(dev, s, new_size);
@@ -600,11 +623,18 @@ static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
return runflags;
}
+/**
+ * comedi_is_subdevice_running - check if async command running on subdevice
+ * @s: comedi_subdevice struct
+ *
+ * Return true if an asynchronous comedi command is active on the comedi
+ * subdevice, else return false.
+ */
bool comedi_is_subdevice_running(struct comedi_subdevice *s)
{
unsigned runflags = comedi_get_subdevice_runflags(s);
- return (runflags & SRF_RUNNING) ? true : false;
+ return (runflags & COMEDI_SRF_RUNNING) ? true : false;
}
EXPORT_SYMBOL_GPL(comedi_is_subdevice_running);
@@ -612,14 +642,14 @@ static bool comedi_is_subdevice_in_error(struct comedi_subdevice *s)
{
unsigned runflags = comedi_get_subdevice_runflags(s);
- return (runflags & SRF_ERROR) ? true : false;
+ return (runflags & COMEDI_SRF_ERROR) ? true : false;
}
static bool comedi_is_subdevice_idle(struct comedi_subdevice *s)
{
unsigned runflags = comedi_get_subdevice_runflags(s);
- return (runflags & (SRF_ERROR | SRF_RUNNING)) ? false : true;
+ return (runflags & COMEDI_SRF_BUSY_MASK) ? false : true;
}
/**
@@ -634,20 +664,20 @@ void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size)
{
s->private = kzalloc(size, GFP_KERNEL);
if (s->private)
- s->runflags |= SRF_FREE_SPRIV;
+ s->runflags |= COMEDI_SRF_FREE_SPRIV;
return s->private;
}
EXPORT_SYMBOL_GPL(comedi_alloc_spriv);
/*
- This function restores a subdevice to an idle state.
+ * This function restores a subdevice to an idle state.
*/
static void do_become_nonbusy(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
- comedi_set_subdevice_runflags(s, SRF_RUNNING, 0);
+ comedi_set_subdevice_runflags(s, COMEDI_SRF_RUNNING, 0);
if (async) {
comedi_buf_reset(s);
async->inttrig = NULL;
@@ -709,18 +739,18 @@ static int is_device_busy(struct comedi_device *dev)
}
/*
- COMEDI_DEVCONFIG
- device config ioctl
-
- arg:
- pointer to devconfig structure
-
- reads:
- devconfig structure at arg
-
- writes:
- none
-*/
+ * COMEDI_DEVCONFIG ioctl
+ * attaches (and configures) or detaches a legacy device
+ *
+ * arg:
+ * pointer to comedi_devconfig structure (NULL if detaching)
+ *
+ * reads:
+ * comedi_devconfig structure (if attaching)
+ *
+ * writes:
+ * nothing
+ */
static int do_devconfig_ioctl(struct comedi_device *dev,
struct comedi_devconfig __user *arg)
{
@@ -761,19 +791,18 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
}
/*
- COMEDI_BUFCONFIG
- buffer configuration ioctl
-
- arg:
- pointer to bufconfig structure
-
- reads:
- bufconfig at arg
-
- writes:
- modified bufconfig at arg
-
-*/
+ * COMEDI_BUFCONFIG ioctl
+ * buffer configuration
+ *
+ * arg:
+ * pointer to comedi_bufconfig structure
+ *
+ * reads:
+ * comedi_bufconfig structure
+ *
+ * writes:
+ * modified comedi_bufconfig structure
+ */
static int do_bufconfig_ioctl(struct comedi_device *dev,
struct comedi_bufconfig __user *arg)
{
@@ -823,19 +852,18 @@ copyback:
}
/*
- COMEDI_DEVINFO
- device info ioctl
-
- arg:
- pointer to devinfo structure
-
- reads:
- none
-
- writes:
- devinfo structure
-
-*/
+ * COMEDI_DEVINFO ioctl
+ * device info
+ *
+ * arg:
+ * pointer to comedi_devinfo structure
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * comedi_devinfo structure
+ */
static int do_devinfo_ioctl(struct comedi_device *dev,
struct comedi_devinfo __user *arg,
struct file *file)
@@ -870,19 +898,18 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
}
/*
- COMEDI_SUBDINFO
- subdevice info ioctl
-
- arg:
- pointer to array of subdevice info structures
-
- reads:
- none
-
- writes:
- array of subdevice info structures at arg
-
-*/
+ * COMEDI_SUBDINFO ioctl
+ * subdevices info
+ *
+ * arg:
+ * pointer to array of comedi_subdinfo structures
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * array of comedi_subdinfo structures
+ */
static int do_subdinfo_ioctl(struct comedi_device *dev,
struct comedi_subdinfo __user *arg, void *file)
{
@@ -944,19 +971,19 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
}
/*
- COMEDI_CHANINFO
- subdevice info ioctl
-
- arg:
- pointer to chaninfo structure
-
- reads:
- chaninfo structure at arg
-
- writes:
- arrays at elements of chaninfo structure
-
-*/
+ * COMEDI_CHANINFO ioctl
+ * subdevice channel info
+ *
+ * arg:
+ * pointer to comedi_chaninfo structure
+ *
+ * reads:
+ * comedi_chaninfo structure
+ *
+ * writes:
+ * array of maxdata values to chaninfo->maxdata_list if requested
+ * array of range table lengths to chaninfo->range_table_list if requested
+ */
static int do_chaninfo_ioctl(struct comedi_device *dev,
struct comedi_chaninfo __user *arg)
{
@@ -1004,20 +1031,19 @@ static int do_chaninfo_ioctl(struct comedi_device *dev,
return 0;
}
- /*
- COMEDI_BUFINFO
- buffer information ioctl
-
- arg:
- pointer to bufinfo structure
-
- reads:
- bufinfo at arg
-
- writes:
- modified bufinfo at arg
-
- */
+/*
+ * COMEDI_BUFINFO ioctl
+ * buffer information
+ *
+ * arg:
+ * pointer to comedi_bufinfo structure
+ *
+ * reads:
+ * comedi_bufinfo structure
+ *
+ * writes:
+ * modified comedi_bufinfo structure
+ */
static int do_bufinfo_ioctl(struct comedi_device *dev,
struct comedi_bufinfo __user *arg, void *file)
{
@@ -1135,8 +1161,10 @@ static int check_insn_config_length(struct comedi_insn *insn,
if (insn->n == 6)
return 0;
break;
- /* by default we allow the insn since we don't have checks for
- * all possible cases yet */
+ /*
+ * by default we allow the insn since we don't have checks for
+ * all possible cases yet
+ */
default:
pr_warn("No check for data length of config insn id %i is implemented\n",
data[0]);
@@ -1287,9 +1315,11 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
if (insn->n != 2) {
ret = -EINVAL;
} else {
- /* Most drivers ignore the base channel in
+ /*
+ * Most drivers ignore the base channel in
* insn->chanspec. Fix this here if
- * the subdevice has <= 32 channels. */
+ * the subdevice has <= 32 channels.
+ */
unsigned int orig_mask = data[0];
unsigned int shift = 0;
@@ -1326,19 +1356,19 @@ out:
}
/*
- * COMEDI_INSNLIST
- * synchronous instructions
+ * COMEDI_INSNLIST ioctl
+ * synchronous instruction list
*
- * arg:
- * pointer to sync cmd structure
+ * arg:
+ * pointer to comedi_insnlist structure
*
- * reads:
- * sync cmd struct at arg
- * instruction list
- * data (for writes)
+ * reads:
+ * comedi_insnlist structure
+ * array of comedi_insn structures from insnlist->insns pointer
+ * data (for writes) from insns[].data pointers
*
- * writes:
- * data (for reads)
+ * writes:
+ * data (for reads) to insns[].data pointers
*/
/* arbitrary limits */
#define MAX_SAMPLES 256
@@ -1415,18 +1445,18 @@ error:
}
/*
- * COMEDI_INSN
- * synchronous instructions
+ * COMEDI_INSN ioctl
+ * synchronous instruction
*
- * arg:
- * pointer to insn
+ * arg:
+ * pointer to comedi_insn structure
*
- * reads:
- * struct comedi_insn struct at arg
- * data (for writes)
+ * reads:
+ * comedi_insn structure
+ * data (for writes) from insn->data pointer
*
- * writes:
- * data (for reads)
+ * writes:
+ * data (for reads) to insn->data pointer
*/
static int do_insn_ioctl(struct comedi_device *dev,
struct comedi_insn __user *arg, void *file)
@@ -1558,6 +1588,20 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev,
return 0;
}
+/*
+ * COMEDI_CMD ioctl
+ * asynchronous acquisition command set-up
+ *
+ * arg:
+ * pointer to comedi_cmd structure
+ *
+ * reads:
+ * comedi_cmd structure
+ * channel/range list from cmd->chanlist pointer
+ *
+ * writes:
+ * possibly modified comedi_cmd structure (when -EAGAIN returned)
+ */
static int do_cmd_ioctl(struct comedi_device *dev,
struct comedi_cmd __user *arg, void *file)
{
@@ -1634,10 +1678,13 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (async->cmd.flags & CMDF_WAKE_EOS)
async->cb_mask |= COMEDI_CB_EOS;
- comedi_set_subdevice_runflags(s, SRF_ERROR | SRF_RUNNING, SRF_RUNNING);
+ comedi_set_subdevice_runflags(s, COMEDI_SRF_BUSY_MASK,
+ COMEDI_SRF_RUNNING);
- /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
- * comedi_read() or comedi_write() */
+ /*
+ * Set s->busy _after_ setting COMEDI_SRF_RUNNING flag to avoid
+ * race with comedi_read() or comedi_write().
+ */
s->busy = file;
ret = s->do_cmd(dev, s);
if (ret == 0)
@@ -1650,20 +1697,19 @@ cleanup:
}
/*
- COMEDI_CMDTEST
- command testing ioctl
-
- arg:
- pointer to cmd structure
-
- reads:
- cmd structure at arg
- channel/range list
-
- writes:
- modified cmd structure at arg
-
-*/
+ * COMEDI_CMDTEST ioctl
+ * asynchronous aquisition command testing
+ *
+ * arg:
+ * pointer to comedi_cmd structure
+ *
+ * reads:
+ * comedi_cmd structure
+ * channel/range list from cmd->chanlist pointer
+ *
+ * writes:
+ * possibly modified comedi_cmd structure
+ */
static int do_cmdtest_ioctl(struct comedi_device *dev,
struct comedi_cmd __user *arg, void *file)
{
@@ -1706,20 +1752,18 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
}
/*
- COMEDI_LOCK
- lock subdevice
-
- arg:
- subdevice number
-
- reads:
- none
-
- writes:
- none
-
-*/
-
+ * COMEDI_LOCK ioctl
+ * lock subdevice
+ *
+ * arg:
+ * subdevice number
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * nothing
+ */
static int do_lock_ioctl(struct comedi_device *dev, unsigned long arg,
void *file)
{
@@ -1742,21 +1786,18 @@ static int do_lock_ioctl(struct comedi_device *dev, unsigned long arg,
}
/*
- COMEDI_UNLOCK
- unlock subdevice
-
- arg:
- subdevice number
-
- reads:
- none
-
- writes:
- none
-
- This function isn't protected by the semaphore, since
- we already own the lock.
-*/
+ * COMEDI_UNLOCK ioctl
+ * unlock subdevice
+ *
+ * arg:
+ * subdevice number
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * nothing
+ */
static int do_unlock_ioctl(struct comedi_device *dev, unsigned long arg,
void *file)
{
@@ -1779,19 +1820,18 @@ static int do_unlock_ioctl(struct comedi_device *dev, unsigned long arg,
}
/*
- COMEDI_CANCEL
- cancel acquisition ioctl
-
- arg:
- subdevice number
-
- reads:
- nothing
-
- writes:
- nothing
-
-*/
+ * COMEDI_CANCEL ioctl
+ * cancel asynchronous acquisition
+ *
+ * arg:
+ * subdevice number
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * nothing
+ */
static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
void *file)
{
@@ -1813,19 +1853,18 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
}
/*
- COMEDI_POLL ioctl
- instructs driver to synchronize buffers
-
- arg:
- subdevice number
-
- reads:
- nothing
-
- writes:
- nothing
-
-*/
+ * COMEDI_POLL ioctl
+ * instructs driver to synchronize buffers
+ *
+ * arg:
+ * subdevice number
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * nothing
+ */
static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg,
void *file)
{
@@ -1941,8 +1980,10 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
mutex_lock(&dev->mutex);
- /* Device config is special, because it must work on
- * an unconfigured device. */
+ /*
+ * Device config is special, because it must work on
+ * an unconfigured device.
+ */
if (cmd == COMEDI_DEVCONFIG) {
if (minor >= COMEDI_NUM_BOARD_MINORS) {
/* Device config not appropriate on non-board minors. */
@@ -1954,8 +1995,10 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
if (rc == 0) {
if (arg == 0 &&
dev->minor >= comedi_num_legacy_minors) {
- /* Successfully unconfigured a dynamically
- * allocated device. Try and remove it. */
+ /*
+ * Successfully unconfigured a dynamically
+ * allocated device. Try and remove it.
+ */
if (comedi_clear_board_dev(dev)) {
mutex_unlock(&dev->mutex);
comedi_free_board_dev(dev);
@@ -2581,6 +2624,17 @@ static const struct file_operations comedi_fops = {
.llseek = noop_llseek,
};
+/**
+ * comedi_event - handle events for asynchronous comedi command
+ * @dev: comedi_device struct
+ * @s: comedi_subdevice struct associated with dev
+ * Context: interrupt (usually), s->spin_lock spin-lock not held
+ *
+ * If an asynchronous comedi command is active on the subdevice, process
+ * any COMEDI_CB_... event flags that have been set, usually by an
+ * interrupt handler. These may change the run state of the asynchronous
+ * command, wake a task, and/or send a SIGIO signal.
+ */
void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
@@ -2591,18 +2645,21 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
return;
if (s->async->events & COMEDI_CB_CANCEL_MASK)
- runflags_mask |= SRF_RUNNING;
+ runflags_mask |= COMEDI_SRF_RUNNING;
/*
* Remember if an error event has occurred, so an error
* can be returned the next time the user does a read().
*/
if (s->async->events & COMEDI_CB_ERROR_MASK) {
- runflags_mask |= SRF_ERROR;
- runflags |= SRF_ERROR;
+ runflags_mask |= COMEDI_SRF_ERROR;
+ runflags |= COMEDI_SRF_ERROR;
}
if (runflags_mask) {
- /*sets SRF_ERROR and SRF_RUNNING together atomically */
+ /*
+ * Sets COMEDI_SRF_ERROR and COMEDI_SRF_RUNNING together
+ * atomically.
+ */
comedi_set_subdevice_runflags(s, runflags_mask, runflags);
}
diff --git a/drivers/staging/comedi/comedi_pcmcia.c b/drivers/staging/comedi/comedi_pcmcia.c
index 0529bae8e5ac..7e784399a16f 100644
--- a/drivers/staging/comedi/comedi_pcmcia.c
+++ b/drivers/staging/comedi/comedi_pcmcia.c
@@ -19,10 +19,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-
-#include "comedidev.h"
+#include "comedi_pcmcia.h"
/**
* comedi_to_pcmcia_dev() - comedi_device pointer to pcmcia_device pointer.
diff --git a/drivers/staging/comedi/comedi_pcmcia.h b/drivers/staging/comedi/comedi_pcmcia.h
new file mode 100644
index 000000000000..5d3db2b9b4a1
--- /dev/null
+++ b/drivers/staging/comedi/comedi_pcmcia.h
@@ -0,0 +1,55 @@
+/*
+ * comedi_pcmcia.h
+ * header file for Comedi PCMCIA drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMEDI_PCMCIA_H
+#define _COMEDI_PCMCIA_H
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "comedidev.h"
+
+struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *);
+
+int comedi_pcmcia_enable(struct comedi_device *,
+ int (*conf_check)(struct pcmcia_device *, void *));
+void comedi_pcmcia_disable(struct comedi_device *);
+
+int comedi_pcmcia_auto_config(struct pcmcia_device *, struct comedi_driver *);
+void comedi_pcmcia_auto_unconfig(struct pcmcia_device *);
+
+int comedi_pcmcia_driver_register(struct comedi_driver *,
+ struct pcmcia_driver *);
+void comedi_pcmcia_driver_unregister(struct comedi_driver *,
+ struct pcmcia_driver *);
+
+/**
+ * module_comedi_pcmcia_driver() - Helper macro for registering a comedi PCMCIA driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pcmcia_driver: pcmcia_driver struct
+ *
+ * Helper macro for comedi PCMCIA drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
+ module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
+ comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
+
+#endif /* _COMEDI_PCMCIA_H */
diff --git a/drivers/staging/comedi/comedi_usb.c b/drivers/staging/comedi/comedi_usb.c
index 0b862a64c049..68b75e8feec0 100644
--- a/drivers/staging/comedi/comedi_usb.c
+++ b/drivers/staging/comedi/comedi_usb.c
@@ -17,9 +17,8 @@
*/
#include <linux/module.h>
-#include <linux/usb.h>
-#include "comedidev.h"
+#include "comedi_usb.h"
/**
* comedi_to_usb_interface() - comedi_device pointer to usb_interface pointer.
diff --git a/drivers/staging/comedi/comedi_usb.h b/drivers/staging/comedi/comedi_usb.h
new file mode 100644
index 000000000000..721128bece3c
--- /dev/null
+++ b/drivers/staging/comedi/comedi_usb.h
@@ -0,0 +1,50 @@
+/*
+ * comedi_usb.h
+ * header file for USB Comedi drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMEDI_USB_H
+#define _COMEDI_USB_H
+
+#include <linux/usb.h>
+
+#include "comedidev.h"
+
+struct usb_interface *comedi_to_usb_interface(struct comedi_device *);
+struct usb_device *comedi_to_usb_dev(struct comedi_device *);
+
+int comedi_usb_auto_config(struct usb_interface *, struct comedi_driver *,
+ unsigned long context);
+void comedi_usb_auto_unconfig(struct usb_interface *);
+
+int comedi_usb_driver_register(struct comedi_driver *, struct usb_driver *);
+void comedi_usb_driver_unregister(struct comedi_driver *, struct usb_driver *);
+
+/**
+ * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
+ * @__comedi_driver: comedi_driver struct
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for comedi USB drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
+ module_driver(__comedi_driver, comedi_usb_driver_register, \
+ comedi_usb_driver_unregister, &(__usb_driver))
+
+#endif /* _COMEDI_USB_H */
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 77be191988ca..e138eb0dc374 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -299,34 +299,25 @@ struct comedi_device {
void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
-/* we can expand the number of bits used to encode devices/subdevices into
- the minor number soon, after more distros support > 8 bit minor numbers
- (like after Debian Etch gets released) */
-enum comedi_minor_bits {
- COMEDI_DEVICE_MINOR_MASK = 0xf,
- COMEDI_SUBDEVICE_MINOR_MASK = 0xf0
-};
-
-static const unsigned COMEDI_SUBDEVICE_MINOR_SHIFT = 4;
-static const unsigned COMEDI_SUBDEVICE_MINOR_OFFSET = 1;
-
struct comedi_device *comedi_dev_get_from_minor(unsigned minor);
int comedi_dev_put(struct comedi_device *dev);
-void init_polling(void);
-void cleanup_polling(void);
-void start_polling(struct comedi_device *);
-void stop_polling(struct comedi_device *);
-
-/* subdevice runflags */
-enum subdevice_runflags {
- SRF_RT = 0x00000002,
- /* indicates an COMEDI_CB_ERROR event has occurred since the last
- * command was started */
- SRF_ERROR = 0x00000004,
- SRF_RUNNING = 0x08000000,
- SRF_FREE_SPRIV = 0x80000000, /* free s->private on detach */
-};
+/**
+ * comedi_subdevice "runflags"
+ * @COMEDI_SRF_RT: DEPRECATED: command is running real-time
+ * @COMEDI_SRF_ERROR: indicates an COMEDI_CB_ERROR event has occurred
+ * since the last command was started
+ * @COMEDI_SRF_RUNNING: command is running
+ * @COMEDI_SRF_FREE_SPRIV: free s->private on detach
+ *
+ * @COMEDI_SRF_BUSY_MASK: runflags that indicate the subdevice is "busy"
+ */
+#define COMEDI_SRF_RT BIT(1)
+#define COMEDI_SRF_ERROR BIT(2)
+#define COMEDI_SRF_RUNNING BIT(27)
+#define COMEDI_SRF_FREE_SPRIV BIT(31)
+
+#define COMEDI_SRF_BUSY_MASK (COMEDI_SRF_ERROR | COMEDI_SRF_RUNNING)
bool comedi_is_subdevice_running(struct comedi_subdevice *s);
@@ -605,66 +596,4 @@ void comedi_pci_driver_unregister(struct comedi_driver *, struct pci_driver *);
module_driver(__comedi_driver, comedi_pci_driver_register, \
comedi_pci_driver_unregister, &(__pci_driver))
-/* comedi_pcmcia.c - comedi PCMCIA driver specific functions */
-
-struct pcmcia_driver;
-struct pcmcia_device;
-
-struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *);
-
-int comedi_pcmcia_enable(struct comedi_device *,
- int (*conf_check)(struct pcmcia_device *, void *));
-void comedi_pcmcia_disable(struct comedi_device *);
-
-int comedi_pcmcia_auto_config(struct pcmcia_device *, struct comedi_driver *);
-void comedi_pcmcia_auto_unconfig(struct pcmcia_device *);
-
-int comedi_pcmcia_driver_register(struct comedi_driver *,
- struct pcmcia_driver *);
-void comedi_pcmcia_driver_unregister(struct comedi_driver *,
- struct pcmcia_driver *);
-
-/**
- * module_comedi_pcmcia_driver() - Helper macro for registering a comedi PCMCIA driver
- * @__comedi_driver: comedi_driver struct
- * @__pcmcia_driver: pcmcia_driver struct
- *
- * Helper macro for comedi PCMCIA drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
- module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
- comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
-
-/* comedi_usb.c - comedi USB driver specific functions */
-
-struct usb_driver;
-struct usb_interface;
-
-struct usb_interface *comedi_to_usb_interface(struct comedi_device *);
-struct usb_device *comedi_to_usb_dev(struct comedi_device *);
-
-int comedi_usb_auto_config(struct usb_interface *, struct comedi_driver *,
- unsigned long context);
-void comedi_usb_auto_unconfig(struct usb_interface *);
-
-int comedi_usb_driver_register(struct comedi_driver *, struct usb_driver *);
-void comedi_usb_driver_unregister(struct comedi_driver *, struct usb_driver *);
-
-/**
- * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
- * @__comedi_driver: comedi_driver struct
- * @__usb_driver: usb_driver struct
- *
- * Helper macro for comedi USB drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
- module_driver(__comedi_driver, comedi_usb_driver_register, \
- comedi_usb_driver_unregister, &(__usb_driver))
-
#endif /* _COMEDIDEV_H */
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 61802d7947ae..f32e71438948 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -125,7 +125,7 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
if (dev->subdevices) {
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
- if (s->runflags & SRF_FREE_SPRIV)
+ if (s->runflags & COMEDI_SRF_FREE_SPRIV)
kfree(s->private);
comedi_free_subdevice_minor(s);
if (s->async) {
diff --git a/drivers/staging/comedi/drivers/8253.h b/drivers/staging/comedi/drivers/8253.h
index 9f4c1411719d..51b9c8d279c0 100644
--- a/drivers/staging/comedi/drivers/8253.h
+++ b/drivers/staging/comedi/drivers/8253.h
@@ -1,20 +1,20 @@
/*
- comedi/drivers/8253.h
- Header file for 8253
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/8253.h
+ * Header file for 8253
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _8253_H
#define _8253_H
@@ -44,9 +44,11 @@ static inline void i8253_cascade_ns_to_timer(int i8253_osc_base,
unsigned int start;
unsigned int ns_low, ns_high;
static const unsigned int max_count = 0x10000;
- /* exit early if everything is already correct (this can save time
+ /*
+ * exit early if everything is already correct (this can save time
* since this function may be called repeatedly during command tests
- * and execution) */
+ * and execution)
+ */
div1 = *d1 ? *d1 : max_count;
div2 = *d2 ? *d2 : max_count;
divider = div1 * div2;
@@ -114,13 +116,14 @@ static inline void i8253_cascade_ns_to_timer(int i8253_osc_base,
}
*nanosec = div1 * div2 * i8253_osc_base;
- /* masking is done since counter maps zero to 0x10000 */
+ /* masking is done since counter maps zero to 0x10000 */
*d1 = div1 & 0xffff;
*d2 = div2 & 0xffff;
}
#ifndef CMDTEST
-/* i8254_load programs 8254 counter chip. It should also work for the 8253.
+/*
+ * i8254_load programs 8254 counter chip. It should also work for the 8253.
* base_address is the lowest io address
* for the chip (the address of counter 0).
* counter_number is the counter you want to load (0,1 or 2)
@@ -158,12 +161,12 @@ static inline int i8254_load(unsigned long base_address, unsigned int regshift,
return -1;
byte = counter_number << 6;
- byte |= 0x30; /* load low then high byte */
- byte |= (mode << 1); /* set counter mode */
+ byte |= 0x30; /* load low then high byte */
+ byte |= (mode << 1); /* set counter mode */
outb(byte, base_address + (i8254_control_reg << regshift));
- byte = count & 0xff; /* lsb of counter value */
+ byte = count & 0xff; /* lsb of counter value */
outb(byte, base_address + (counter_number << regshift));
- byte = (count >> 8) & 0xff; /* msb of counter value */
+ byte = (count >> 8) & 0xff; /* msb of counter value */
outb(byte, base_address + (counter_number << regshift));
return 0;
@@ -187,18 +190,18 @@ static inline int i8254_mm_load(void __iomem *base_address,
return -1;
byte = counter_number << 6;
- byte |= 0x30; /* load low then high byte */
- byte |= (mode << 1); /* set counter mode */
+ byte |= 0x30; /* load low then high byte */
+ byte |= (mode << 1); /* set counter mode */
writeb(byte, base_address + (i8254_control_reg << regshift));
- byte = count & 0xff; /* lsb of counter value */
+ byte = count & 0xff; /* lsb of counter value */
writeb(byte, base_address + (counter_number << regshift));
- byte = (count >> 8) & 0xff; /* msb of counter value */
+ byte = (count >> 8) & 0xff; /* msb of counter value */
writeb(byte, base_address + (counter_number << regshift));
return 0;
}
-/* Returns 16 bit counter value, should work for 8253 also.*/
+/* Returns 16 bit counter value, should work for 8253 also. */
static inline int i8254_read(unsigned long base_address, unsigned int regshift,
unsigned int counter_number)
{
@@ -208,13 +211,13 @@ static inline int i8254_read(unsigned long base_address, unsigned int regshift,
if (counter_number > 2)
return -1;
- /* latch counter */
+ /* latch counter */
byte = counter_number << 6;
outb(byte, base_address + (i8254_control_reg << regshift));
- /* read lsb */
+ /* read lsb */
ret = inb(base_address + (counter_number << regshift));
- /* read msb */
+ /* read msb */
ret += inb(base_address + (counter_number << regshift)) << 8;
return ret;
@@ -230,13 +233,13 @@ static inline int i8254_mm_read(void __iomem *base_address,
if (counter_number > 2)
return -1;
- /* latch counter */
+ /* latch counter */
byte = counter_number << 6;
writeb(byte, base_address + (i8254_control_reg << regshift));
- /* read lsb */
+ /* read lsb */
ret = readb(base_address + (counter_number << regshift));
- /* read msb */
+ /* read msb */
ret += readb(base_address + (counter_number << regshift)) << 8;
return ret;
@@ -252,9 +255,9 @@ static inline void i8254_write(unsigned long base_address,
if (counter_number > 2)
return;
- byte = count & 0xff; /* lsb of counter value */
+ byte = count & 0xff; /* lsb of counter value */
outb(byte, base_address + (counter_number << regshift));
- byte = (count >> 8) & 0xff; /* msb of counter value */
+ byte = (count >> 8) & 0xff; /* msb of counter value */
outb(byte, base_address + (counter_number << regshift));
}
@@ -268,13 +271,14 @@ static inline void i8254_mm_write(void __iomem *base_address,
if (counter_number > 2)
return;
- byte = count & 0xff; /* lsb of counter value */
+ byte = count & 0xff; /* lsb of counter value */
writeb(byte, base_address + (counter_number << regshift));
- byte = (count >> 8) & 0xff; /* msb of counter value */
+ byte = (count >> 8) & 0xff; /* msb of counter value */
writeb(byte, base_address + (counter_number << regshift));
}
-/* Set counter mode, should work for 8253 also.
+/*
+ * Set counter mode, should work for 8253 also.
* Note: the 'mode' value is different to that for i8254_load() and comes
* from the INSN_CONFIG_8254_SET_MODE command:
* I8254_MODE0, I8254_MODE1, ..., I8254_MODE5
@@ -293,8 +297,8 @@ static inline int i8254_set_mode(unsigned long base_address,
return -1;
byte = counter_number << 6;
- byte |= 0x30; /* load low then high byte */
- byte |= mode; /* set counter mode and BCD|binary */
+ byte |= 0x30; /* load low then high byte */
+ byte |= mode; /* set counter mode and BCD|binary */
outb(byte, base_address + (i8254_control_reg << regshift));
return 0;
@@ -313,8 +317,8 @@ static inline int i8254_mm_set_mode(void __iomem *base_address,
return -1;
byte = counter_number << 6;
- byte |= 0x30; /* load low then high byte */
- byte |= mode; /* set counter mode and BCD|binary */
+ byte |= 0x30; /* load low then high byte */
+ byte |= mode; /* set counter mode and BCD|binary */
writeb(byte, base_address + (i8254_control_reg << regshift));
return 0;
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index 34d4d8b5f31e..c2f15de6a547 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -1,76 +1,51 @@
/*
- comedi/drivers/8255.c
- Driver for 8255
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: 8255
-Description: generic 8255 support
-Devices: [standard] 8255 (8255)
-Author: ds
-Status: works
-Updated: Fri, 7 Jun 2002 12:56:45 -0700
-
-The classic in digital I/O. The 8255 appears in Comedi as a single
-digital I/O subdevice with 24 channels. The channel 0 corresponds
-to the 8255's port A, bit 0; channel 23 corresponds to port C, bit
-7. Direction configuration is done in blocks, with channels 0-7,
-8-15, 16-19, and 20-23 making up the 4 blocks. The only 8255 mode
-supported is mode 0.
-
-You should enable compilation this driver if you plan to use a board
-that has an 8255 chip. For multifunction boards, the main driver will
-configure the 8255 subdevice automatically.
-
-This driver also works independently with ISA and PCI cards that
-directly map the 8255 registers to I/O ports, including cards with
-multiple 8255 chips. To configure the driver for such a card, the
-option list should be a list of the I/O port bases for each of the
-8255 chips. For example,
-
- comedi_config /dev/comedi0 8255 0x200,0x204,0x208,0x20c
-
-Note that most PCI 8255 boards do NOT work with this driver, and
-need a separate driver as a wrapper. For those that do work, the
-I/O port base address can be found in the output of 'lspci -v'.
-
-*/
+ * comedi/drivers/8255.c
+ * Driver for 8255
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- This file contains an exported subdevice for driving an 8255.
-
- To use this subdevice as part of another driver, you need to
- set up the subdevice in the attach function of the driver by
- calling:
-
- subdev_8255_init(device, subdevice, io_function, iobase)
-
- device and subdevice are pointers to the device and subdevice
- structures. io_function will be called to provide the
- low-level input/output to the device, i.e., actual register
- access. io_function will be called with the value of iobase
- as the last parameter. If the 8255 device is mapped as 4
- consecutive I/O ports, you can use NULL for io_function
- and the I/O port base for iobase, and an internal function will
- handle the register access.
-
- In addition, if the main driver handles interrupts, you can
- enable commands on the subdevice by calling subdev_8255_init_irq()
- instead. Then, when you get an interrupt that is likely to be
- from the 8255, you should call subdev_8255_interrupt(), which
- will copy the latched value to a Comedi buffer.
+ * Driver: 8255
+ * Description: generic 8255 support
+ * Devices: [standard] 8255 (8255)
+ * Author: ds
+ * Status: works
+ * Updated: Fri, 7 Jun 2002 12:56:45 -0700
+ *
+ * The classic in digital I/O. The 8255 appears in Comedi as a single
+ * digital I/O subdevice with 24 channels. The channel 0 corresponds
+ * to the 8255's port A, bit 0; channel 23 corresponds to port C, bit
+ * 7. Direction configuration is done in blocks, with channels 0-7,
+ * 8-15, 16-19, and 20-23 making up the 4 blocks. The only 8255 mode
+ * supported is mode 0.
+ *
+ * You should enable compilation this driver if you plan to use a board
+ * that has an 8255 chip. For multifunction boards, the main driver will
+ * configure the 8255 subdevice automatically.
+ *
+ * This driver also works independently with ISA and PCI cards that
+ * directly map the 8255 registers to I/O ports, including cards with
+ * multiple 8255 chips. To configure the driver for such a card, the
+ * option list should be a list of the I/O port bases for each of the
+ * 8255 chips. For example,
+ *
+ * comedi_config /dev/comedi0 8255 0x200,0x204,0x208,0x20c
+ *
+ * Note that most PCI 8255 boards do NOT work with this driver, and
+ * need a separate driver as a wrapper. For those that do work, the
+ * I/O port base address can be found in the output of 'lspci -v'.
*/
#include <linux/module.h>
@@ -218,6 +193,33 @@ static int __subdev_8255_init(struct comedi_device *dev,
return 0;
}
+/**
+ * subdev_8255_init - initialize DIO subdevice for driving I/O mapped 8255
+ * @dev: comedi device owning subdevice
+ * @s: comedi subdevice to initialize
+ * @io: (optional) register I/O call-back function
+ * @regbase: offset of 8255 registers from dev->iobase, or call-back context
+ *
+ * Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
+ *
+ * If the optional I/O call-back function is provided, its prototype is of
+ * the following form:
+ *
+ * int my_8255_callback(struct comedi_device *dev,
+ * struct comedi_subdevice *s, int dir, int port,
+ * int data, unsigned long regbase);
+ *
+ * where 'dev', 's', and 'regbase' match the values passed to this function,
+ * 'port' is the 8255 port number 0 to 3 (including the control port), 'dir'
+ * is the direction (0 for read, 1 for write) and 'data' is the value to be
+ * written. It should return 0 if writing or the value read if reading.
+ *
+ * If the optional I/O call-back function is not provided, an internal
+ * call-back function is used which uses consecutive I/O port addresses
+ * starting at dev->iobase + regbase.
+ *
+ * Return: -ENOMEM if failed to allocate memory, zero on success.
+ */
int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
int (*io)(struct comedi_device *,
int, int, int, unsigned long),
@@ -227,6 +229,33 @@ int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
}
EXPORT_SYMBOL_GPL(subdev_8255_init);
+/**
+ * subdev_8255_mm_init - initialize DIO subdevice for driving mmio-mapped 8255
+ * @dev: comedi device owning subdevice
+ * @s: comedi subdevice to initialize
+ * @io: (optional) register I/O call-back function
+ * @regbase: offset of 8255 registers from dev->mmio, or call-back context
+ *
+ * Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
+ *
+ * If the optional I/O call-back function is provided, its prototype is of
+ * the following form:
+ *
+ * int my_8255_callback(struct comedi_device *dev,
+ * struct comedi_subdevice *s, int dir, int port,
+ * int data, unsigned long regbase);
+ *
+ * where 'dev', 's', and 'regbase' match the values passed to this function,
+ * 'port' is the 8255 port number 0 to 3 (including the control port), 'dir'
+ * is the direction (0 for read, 1 for write) and 'data' is the value to be
+ * written. It should return 0 if writing or the value read if reading.
+ *
+ * If the optional I/O call-back function is not provided, an internal
+ * call-back function is used which uses consecutive MMIO virtual addresses
+ * starting at dev->mmio + regbase.
+ *
+ * Return: -ENOMEM if failed to allocate memory, zero on success.
+ */
int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
int (*io)(struct comedi_device *,
int, int, int, unsigned long),
@@ -235,10 +264,9 @@ int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
return __subdev_8255_init(dev, s, io, regbase, true);
}
EXPORT_SYMBOL_GPL(subdev_8255_mm_init);
-/*
-
- Start of the 8255 standalone device
+/*
+ * Start of the 8255 standalone device
*/
static int dev_8255_attach(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/8255.h b/drivers/staging/comedi/drivers/8255.h
index 5985c8e0330f..934b940ebd3c 100644
--- a/drivers/staging/comedi/drivers/8255.h
+++ b/drivers/staging/comedi/drivers/8255.h
@@ -1,20 +1,20 @@
/*
- module/8255.h
- Header file for 8255
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * module/8255.h
+ * Header file for 8255
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _8255_H
#define _8255_H
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 8b9589828855..984764211a2d 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -22,33 +22,44 @@
*/
/*
-Driver: 8255_pci
-Description: Generic PCI based 8255 Digital I/O boards
-Devices: (ADLink) PCI-7224 [adl_pci-7224] - 24 channels
- (ADLink) PCI-7248 [adl_pci-7248] - 48 channels
- (ADLink) PCI-7296 [adl_pci-7296] - 96 channels
- (Measurement Computing) PCI-DIO24 [cb_pci-dio24] - 24 channels
- (Measurement Computing) PCI-DIO24H [cb_pci-dio24h] - 24 channels
- (Measurement Computing) PCI-DIO48H [cb_pci-dio48h] - 48 channels
- (Measurement Computing) PCI-DIO96H [cb_pci-dio96h] - 96 channels
- (National Instruments) PCI-DIO-96 [ni_pci-dio-96] - 96 channels
- (National Instruments) PCI-DIO-96B [ni_pci-dio-96b] - 96 channels
- (National Instruments) PXI-6508 [ni_pxi-6508] - 96 channels
- (National Instruments) PCI-6503 [ni_pci-6503] - 24 channels
- (National Instruments) PCI-6503B [ni_pci-6503b] - 24 channels
- (National Instruments) PCI-6503X [ni_pci-6503x] - 24 channels
- (National Instruments) PXI-6503 [ni_pxi-6503] - 24 channels
-Author: H Hartley Sweeten <hsweeten@visionengravers.com>
-Updated: Wed, 12 Sep 2012 11:52:01 -0700
-Status: untested
-
-Some of these boards also have an 8254 programmable timer/counter
-chip. This chip is not currently supported by this driver.
-
-Interrupt support for these boards is also not currently supported.
-
-Configuration Options: not applicable, uses PCI auto config
-*/
+ * Driver: 8255_pci
+ * Description: Generic PCI based 8255 Digital I/O boards
+ * Devices: [ADLink] PCI-7224 (adl_pci-7224), PCI-7248 (adl_pci-7248),
+ * PCI-7296 (adl_pci-7296),
+ * [Measurement Computing] PCI-DIO24 (cb_pci-dio24),
+ * PCI-DIO24H (cb_pci-dio24h), PCI-DIO48H (cb_pci-dio48h),
+ * PCI-DIO96H (cb_pci-dio96h),
+ * [National Instruments] PCI-DIO-96 (ni_pci-dio-96),
+ * PCI-DIO-96B (ni_pci-dio-96b), PXI-6508 (ni_pxi-6508),
+ * PCI-6503 (ni_pci-6503), PCI-6503B (ni_pci-6503b),
+ * PCI-6503X (ni_pci-6503x), PXI-6503 (ni_pxi-6503)
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Wed, 12 Sep 2012 11:52:01 -0700
+ * Status: untested
+ *
+ * These boards have one or more 8255 digital I/O chips, each of which
+ * is supported as a separate 24-channel DIO subdevice.
+ *
+ * Boards with 24 DIO channels (1 DIO subdevice):
+ *
+ * PCI-7224, PCI-DIO24, PCI-DIO24H, PCI-6503, PCI-6503B, PCI-6503X,
+ * PXI-6503
+ *
+ * Boards with 48 DIO channels (2 DIO subdevices):
+ *
+ * PCI-7248, PCI-DIO48H
+ *
+ * Boards with 96 DIO channels (4 DIO subdevices):
+ *
+ * PCI-7296, PCI-DIO96H, PCI-DIO-96, PCI-DIO-96B, PXI-6508
+ *
+ * Some of these boards also have an 8254 programmable timer/counter
+ * chip. This chip is not currently supported by this driver.
+ *
+ * Interrupt support for these boards is also not currently supported.
+ *
+ * Configuration Options: not applicable, uses PCI auto config.
+ */
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 84fdf20ca986..7d1fbd53a8ab 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -3,6 +3,7 @@
ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
# Comedi "helper" modules
+obj-$(CONFIG_COMEDI_ISADMA) += comedi_isadma.o
# Comedi misc drivers
obj-$(CONFIG_COMEDI_BOND) += comedi_bond.o
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
deleted file mode 100644
index bfa9228c833f..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ /dev/null
@@ -1,2365 +0,0 @@
-/*
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- *
- */
-
-/* Card Specific information */
-#define APCI1500_ADDRESS_RANGE 4
-
-/* DIGITAL INPUT-OUTPUT DEFINE */
-
-#define APCI1500_DIGITAL_OP 2
-#define APCI1500_DIGITAL_IP 0
-#define APCI1500_AND 2
-#define APCI1500_OR 4
-#define APCI1500_OR_PRIORITY 6
-#define APCI1500_CLK_SELECT 0
-#define COUNTER1 0
-#define COUNTER2 1
-#define COUNTER3 2
-#define APCI1500_COUNTER 0x20
-#define APCI1500_TIMER 0
-#define APCI1500_WATCHDOG 0
-#define APCI1500_SINGLE 0
-#define APCI1500_CONTINUOUS 0x80
-#define APCI1500_DISABLE 0
-#define APCI1500_ENABLE 1
-#define APCI1500_SOFTWARE_TRIGGER 0x4
-#define APCI1500_HARDWARE_TRIGGER 0x10
-#define APCI1500_SOFTWARE_GATE 0
-#define APCI1500_HARDWARE_GATE 0x8
-#define START 0
-#define STOP 1
-#define TRIGGER 2
-
-/*
- * Zillog I/O enumeration
- */
-enum {
- APCI1500_Z8536_PORT_C,
- APCI1500_Z8536_PORT_B,
- APCI1500_Z8536_PORT_A,
- APCI1500_Z8536_CONTROL_REGISTER
-};
-
-/*
- * Z8536 CIO Internal Address
- */
-enum {
- APCI1500_RW_MASTER_INTERRUPT_CONTROL,
- APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- APCI1500_RW_PORT_A_INTERRUPT_CONTROL,
- APCI1500_RW_PORT_B_INTERRUPT_CONTROL,
- APCI1500_RW_TIMER_COUNTER_INTERRUPT_VECTOR,
- APCI1500_RW_PORT_C_DATA_PCITCH_POLARITY,
- APCI1500_RW_PORT_C_DATA_DIRECTION,
- APCI1500_RW_PORT_C_SPECIAL_IO_CONTROL,
-
- APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- APCI1500_RW_CPT_TMR1_CMD_STATUS,
- APCI1500_RW_CPT_TMR2_CMD_STATUS,
- APCI1500_RW_CPT_TMR3_CMD_STATUS,
- APCI1500_RW_PORT_A_DATA,
- APCI1500_RW_PORT_B_DATA,
- APCI1500_RW_PORT_C_DATA,
-
- APCI1500_R_CPT_TMR1_VALUE_HIGH,
- APCI1500_R_CPT_TMR1_VALUE_LOW,
- APCI1500_R_CPT_TMR2_VALUE_HIGH,
- APCI1500_R_CPT_TMR2_VALUE_LOW,
- APCI1500_R_CPT_TMR3_VALUE_HIGH,
- APCI1500_R_CPT_TMR3_VALUE_LOW,
- APCI1500_RW_CPT_TMR1_TIME_CST_HIGH,
- APCI1500_RW_CPT_TMR1_TIME_CST_LOW,
- APCI1500_RW_CPT_TMR2_TIME_CST_HIGH,
- APCI1500_RW_CPT_TMR2_TIME_CST_LOW,
- APCI1500_RW_CPT_TMR3_TIME_CST_HIGH,
- APCI1500_RW_CPT_TMR3_TIME_CST_LOW,
- APCI1500_RW_CPT_TMR1_MODE_SPECIFICATION,
- APCI1500_RW_CPT_TMR2_MODE_SPECIFICATION,
- APCI1500_RW_CPT_TMR3_MODE_SPECIFICATION,
- APCI1500_R_CURRENT_VECTOR,
-
- APCI1500_RW_PORT_A_SPECIFICATION,
- APCI1500_RW_PORT_A_HANDSHAKE_SPECIFICATION,
- APCI1500_RW_PORT_A_DATA_PCITCH_POLARITY,
- APCI1500_RW_PORT_A_DATA_DIRECTION,
- APCI1500_RW_PORT_A_SPECIAL_IO_CONTROL,
- APCI1500_RW_PORT_A_PATTERN_POLARITY,
- APCI1500_RW_PORT_A_PATTERN_TRANSITION,
- APCI1500_RW_PORT_A_PATTERN_MASK,
-
- APCI1500_RW_PORT_B_SPECIFICATION,
- APCI1500_RW_PORT_B_HANDSHAKE_SPECIFICATION,
- APCI1500_RW_PORT_B_DATA_PCITCH_POLARITY,
- APCI1500_RW_PORT_B_DATA_DIRECTION,
- APCI1500_RW_PORT_B_SPECIAL_IO_CONTROL,
- APCI1500_RW_PORT_B_PATTERN_POLARITY,
- APCI1500_RW_PORT_B_PATTERN_TRANSITION,
- APCI1500_RW_PORT_B_PATTERN_MASK
-};
-
-static int i_TimerCounter1Init;
-static int i_TimerCounter2Init;
-static int i_WatchdogCounter3Init;
-static int i_Event1Status, i_Event2Status;
-static int i_TimerCounterWatchdogInterrupt;
-static int i_Logic, i_CounterLogic;
-static int i_InterruptMask;
-static int i_InputChannel;
-static int i_TimerCounter1Enabled, i_TimerCounter2Enabled,
- i_WatchdogCounter3Enabled;
-
-/*
- * An event can be generated for each port. The first event is related to the
- * first 8 channels (port 1) and the second to the following 6 channels (port 2)
- * An interrupt is generated when one or both events have occurred.
- *
- * data[0] Number of the input port on which the event will take place (1 or 2)
- * data[1] The event logic for port 1 has three possibilities:
- * APCI1500_AND This logic links the inputs with an AND logic.
- * APCI1500_OR This logic links the inputs with a OR logic.
- * APCI1500_OR_PRIORITY This logic links the inputs with a priority OR
- * logic. Input 1 has the highest priority level
- * and input 8 the smallest.
- * For the second port the user has 1 possibility:
- * APCI1500_OR This logic links the inputs with a polarity OR logic
- * data[2] These 8-character word for port1 and 6-character word for port 2
- * give the mask of the event. Each place gives the state of the input
- * channels and can have one of these six characters
- * 0 This input must be on 0
- * 1 This input must be on 1
- * 2 This input reacts to a falling edge
- * 3 This input reacts to a rising edge
- * 4 This input reacts to both edges
- * 5 This input is not used for event
- */
-static int apci1500_di_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- int i_PatternPolarity = 0, i_PatternTransition = 0, i_PatternMask = 0;
- int i_MaxChannel = 0, i_Count = 0, i_EventMask = 0;
- int i_PatternTransitionCount = 0, i_RegValue;
- int i;
-
- /* Selects the master interrupt control register */
- outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Disables the main interrupt on the board */
- outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- if (data[0] == 1) {
- i_MaxChannel = 8;
- } else {
- if (data[0] == 2) {
- i_MaxChannel = 6;
- } else {
- dev_warn(dev->class_dev,
- "The specified port event does not exist\n");
- return -EINVAL;
- }
- }
- switch (data[1]) {
- case 0:
- data[1] = APCI1500_AND;
- break;
- case 1:
- data[1] = APCI1500_OR;
- break;
- case 2:
- data[1] = APCI1500_OR_PRIORITY;
- break;
- default:
- dev_warn(dev->class_dev,
- "The specified interrupt logic does not exist\n");
- return -EINVAL;
- }
-
- i_Logic = data[1];
- for (i_Count = i_MaxChannel, i = 0; i_Count > 0; i_Count--, i++) {
- i_EventMask = data[2 + i];
- switch (i_EventMask) {
- case 0:
- i_PatternMask =
- i_PatternMask | (1 << (i_MaxChannel - i_Count));
- break;
- case 1:
- i_PatternMask =
- i_PatternMask | (1 << (i_MaxChannel - i_Count));
- i_PatternPolarity =
- i_PatternPolarity | (1 << (i_MaxChannel -
- i_Count));
- break;
- case 2:
- i_PatternMask =
- i_PatternMask | (1 << (i_MaxChannel - i_Count));
- i_PatternTransition =
- i_PatternTransition | (1 << (i_MaxChannel -
- i_Count));
- break;
- case 3:
- i_PatternMask =
- i_PatternMask | (1 << (i_MaxChannel - i_Count));
- i_PatternPolarity =
- i_PatternPolarity | (1 << (i_MaxChannel -
- i_Count));
- i_PatternTransition =
- i_PatternTransition | (1 << (i_MaxChannel -
- i_Count));
- break;
- case 4:
- i_PatternTransition =
- i_PatternTransition | (1 << (i_MaxChannel -
- i_Count));
- break;
- case 5:
- break;
- default:
- dev_warn(dev->class_dev,
- "The option indicated in the event mask does not exist\n");
- return -EINVAL;
- }
- }
-
- if (data[0] == 1) {
- /* Test the interrupt logic */
-
- if (data[1] == APCI1500_AND ||
- data[1] == APCI1500_OR ||
- data[1] == APCI1500_OR_PRIORITY) {
- /* Tests if a transition was declared */
- /* for a OR PRIORITY logic */
-
- if (data[1] == APCI1500_OR_PRIORITY
- && i_PatternTransition != 0) {
- dev_warn(dev->class_dev,
- "Transition error on an OR PRIORITY logic\n");
- return -EINVAL;
- }
-
- /* Tests if more than one transition */
- /* was declared for an AND logic */
-
- if (data[1] == APCI1500_AND) {
- for (i_Count = 0; i_Count < 8; i_Count++) {
- i_PatternTransitionCount =
- i_PatternTransitionCount +
- ((i_PatternTransition >>
- i_Count) & 0x1);
-
- }
-
- if (i_PatternTransitionCount > 1) {
- dev_warn(dev->class_dev,
- "Transition error on an AND logic\n");
- return -EINVAL;
- }
- }
-
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Disable Port A */
- outb(0xF0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the polarity register of port 1 */
- outb(APCI1500_RW_PORT_A_PATTERN_POLARITY,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_PatternPolarity,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the pattern mask register of */
- /* port 1 */
- outb(APCI1500_RW_PORT_A_PATTERN_MASK,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_PatternMask,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the pattern transition register */
- /* of port 1 */
- outb(APCI1500_RW_PORT_A_PATTERN_TRANSITION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_PatternTransition,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the mode specification mask */
- /* register of port 1 */
- outb(APCI1500_RW_PORT_A_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the mode specification mask */
- /* register of port 1 */
- outb(APCI1500_RW_PORT_A_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Port A new mode */
-
- i_RegValue = (i_RegValue & 0xF9) | data[1] | 0x9;
- outb(i_RegValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- i_Event1Status = 1;
-
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
-
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Enable Port A */
- outb(0xF4,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- } else {
- dev_warn(dev->class_dev,
- "The choice for interrupt logic does not exist\n");
- return -EINVAL;
- }
- }
-
- /* Test if event setting for port 2 */
-
- if (data[0] == 2) {
- /* Test the event logic */
-
- if (data[1] == APCI1500_OR) {
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Disable Port B */
- outb(0x74,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the mode specification mask */
- /* register of port B */
- outb(APCI1500_RW_PORT_B_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the mode specification mask */
- /* register of port B */
- outb(APCI1500_RW_PORT_B_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue = i_RegValue & 0xF9;
- outb(i_RegValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects error channels 1 and 2 */
-
- i_PatternMask = (i_PatternMask | 0xC0);
- i_PatternPolarity = (i_PatternPolarity | 0xC0);
- i_PatternTransition = (i_PatternTransition | 0xC0);
-
- /* Selects the polarity register of port 2 */
- outb(APCI1500_RW_PORT_B_PATTERN_POLARITY,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_PatternPolarity,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the pattern transition register */
- /* of port 2 */
- outb(APCI1500_RW_PORT_B_PATTERN_TRANSITION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_PatternTransition,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the pattern Mask register */
- /* of port 2 */
-
- outb(APCI1500_RW_PORT_B_PATTERN_MASK,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_PatternMask,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the mode specification mask */
- /* register of port 2 */
- outb(APCI1500_RW_PORT_B_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the mode specification mask */
- /* register of port 2 */
- outb(APCI1500_RW_PORT_B_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue = (i_RegValue & 0xF9) | 4;
- outb(i_RegValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- i_Event2Status = 1;
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
-
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Enable Port B */
-
- outb(0xF4,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "The choice for interrupt logic does not exist\n");
- return -EINVAL;
- }
- }
-
- return insn->n;
-}
-
-/*
- * Allows or disallows a port event
- *
- * data[0] 0 = Start input event, 1 = Stop input event
- * data[1] Number of port (1 or 2)
- */
-static int apci1500_di_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- int i_Event1InterruptStatus = 0, i_Event2InterruptStatus =
- 0, i_RegValue;
-
- switch (data[0]) {
- case START:
- /* Tests the port number */
-
- if (data[1] == 1 || data[1] == 2) {
- /* Test if port 1 selected */
-
- if (data[1] == 1) {
- /* Test if event initialised */
- if (i_Event1Status == 1) {
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Disable Port A */
- outb(0xF0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of */
- /* port 1 */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Allows the pattern interrupt */
- outb(0xC0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Enable Port A */
- outb(0xF4,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_Event1InterruptStatus = 1;
- outb(APCI1500_RW_PORT_A_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the master interrupt control register */
- outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Authorizes the main interrupt on the board */
- outb(0xD0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- } else {
- dev_warn(dev->class_dev,
- "Event 1 not initialised\n");
- return -EINVAL;
- }
- }
- if (data[1] == 2) {
-
- if (i_Event2Status == 1) {
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Disable Port B */
- outb(0x74,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of */
- /* port 2 */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Allows the pattern interrupt */
- outb(0xC0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Enable Port B */
- outb(0xF4,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the master interrupt control register */
- outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Authorizes the main interrupt on the board */
- outb(0xD0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_Event2InterruptStatus = 1;
- } else {
- dev_warn(dev->class_dev,
- "Event 2 not initialised\n");
- return -EINVAL;
- }
- }
- } else {
- dev_warn(dev->class_dev,
- "The port parameter is in error\n");
- return -EINVAL;
- }
-
- break;
-
- case STOP:
- /* Tests the port number */
-
- if (data[1] == 1 || data[1] == 2) {
- /* Test if port 1 selected */
-
- if (data[1] == 1) {
- /* Test if event initialised */
- if (i_Event1Status == 1) {
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Disable Port A */
- outb(0xF0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of */
- /* port 1 */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Inhibits the pattern interrupt */
- outb(0xE0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Enable Port A */
- outb(0xF4,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_Event1InterruptStatus = 0;
- } else {
- dev_warn(dev->class_dev,
- "Event 1 not initialised\n");
- return -EINVAL;
- }
- }
- if (data[1] == 2) {
- /* Test if event initialised */
- if (i_Event2Status == 1) {
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Disable Port B */
- outb(0x74,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of */
- /* port 2 */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Inhibits the pattern interrupt */
- outb(0xE0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Enable Port B */
- outb(0xF4,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_Event2InterruptStatus = 0;
- } else {
-
- dev_warn(dev->class_dev,
- "Event 2 not initialised\n");
- return -EINVAL;
- }
- }
-
- } else {
- dev_warn(dev->class_dev,
- "The port parameter is in error\n");
- return -EINVAL;
- }
- break;
- default:
- dev_warn(dev->class_dev,
- "The option of START/STOP logic does not exist\n");
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-/*
- * Return the status of the digital input
- */
-static int apci1500_di_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- int i_DummyRead = 0;
-
- /* Software reset */
- i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(1, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the master configuration control register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0xF4, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the mode specification register of port A */
- outb(APCI1500_RW_PORT_A_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the data path polarity register of port A */
- outb(APCI1500_RW_PORT_A_DATA_PCITCH_POLARITY,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* High level of port A means 1 */
- outb(0xFF, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the data direction register of port A */
- outb(APCI1500_RW_PORT_A_DATA_DIRECTION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* All bits used as inputs */
- outb(0xFF, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port A */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port A */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates the interrupt management of port A: */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the handshake specification register of port A */
- outb(APCI1500_RW_PORT_A_HANDSHAKE_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the register */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the mode specification register of port B */
- outb(APCI1500_RW_PORT_B_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the data path polarity register of port B */
- outb(APCI1500_RW_PORT_B_DATA_PCITCH_POLARITY,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* A high level of port B means 1 */
- outb(0x7F, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the data direction register of port B */
- outb(APCI1500_RW_PORT_B_DATA_DIRECTION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* All bits used as inputs */
- outb(0xFF, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates the interrupt management of port B: */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the handshake specification register of port B */
- outb(APCI1500_RW_PORT_B_HANDSHAKE_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the register */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the data path polarity register of port C */
- outb(APCI1500_RW_PORT_C_DATA_PCITCH_POLARITY,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* High level of port C means 1 */
- outb(0x9, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the data direction register of port C */
- outb(APCI1500_RW_PORT_C_DATA_DIRECTION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* All bits used as inputs except channel 1 */
- outb(0x0E, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the special IO register of port C */
- outb(APCI1500_RW_PORT_C_SPECIAL_IO_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes it */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates the interrupt management of timer 1 */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates Timer 2 interrupt management: */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 3 */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of Timer 3 */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates interrupt management of timer 3: */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the master interrupt control register */
- outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes all interrupts */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- return insn->n;
-}
-
-static int apci1500_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
-
- data[1] = inw(devpriv->i_IobaseAddon + APCI1500_DIGITAL_IP);
-
- return insn->n;
-}
-
-/*
- * Configures the digital output memory and the digital output error interrupt
- *
- * data[1] 1 = Enable the voltage error interrupt
- * 2 = Disable the voltage error interrupt
- */
-static int apci1500_do_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
-
- devpriv->b_OutputMemoryStatus = data[0];
- return insn->n;
-}
-
-/*
- * Writes port value to the selected port
- */
-static int apci1500_do_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- static unsigned int ui_Temp;
- unsigned int ui_Temp1;
- unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */
-
- if (!devpriv->b_OutputMemoryStatus)
- ui_Temp = 0;
-
- if (data[3] == 0) {
- if (data[1] == 0) {
- data[0] = (data[0] << ui_NoOfChannel) | ui_Temp;
- outw(data[0],
- devpriv->i_IobaseAddon + APCI1500_DIGITAL_OP);
- } else {
- if (data[1] == 1) {
- switch (ui_NoOfChannel) {
-
- case 2:
- data[0] =
- (data[0] << (2 *
- data[2])) | ui_Temp;
- break;
-
- case 4:
- data[0] =
- (data[0] << (4 *
- data[2])) | ui_Temp;
- break;
-
- case 8:
- data[0] =
- (data[0] << (8 *
- data[2])) | ui_Temp;
- break;
-
- case 15:
- data[0] = data[0] | ui_Temp;
- break;
-
- default:
- dev_err(dev->class_dev,
- "chan spec wrong\n");
- return -EINVAL; /* "sorry channel spec wrong " */
-
- }
-
- outw(data[0],
- devpriv->i_IobaseAddon +
- APCI1500_DIGITAL_OP);
- } else {
- dev_warn(dev->class_dev,
- "Specified channel not supported\n");
- return -EINVAL;
- }
- }
- } else {
- if (data[3] == 1) {
- if (data[1] == 0) {
- data[0] = ~data[0] & 0x1;
- ui_Temp1 = 1;
- ui_Temp1 = ui_Temp1 << ui_NoOfChannel;
- ui_Temp = ui_Temp | ui_Temp1;
- data[0] =
- (data[0] << ui_NoOfChannel) ^
- 0xffffffff;
- data[0] = data[0] & ui_Temp;
- outw(data[0],
- devpriv->i_IobaseAddon +
- APCI1500_DIGITAL_OP);
- } else {
- if (data[1] == 1) {
- switch (ui_NoOfChannel) {
-
- case 2:
- data[0] = ~data[0] & 0x3;
- ui_Temp1 = 3;
- ui_Temp1 =
- ui_Temp1 << 2 * data[2];
- ui_Temp = ui_Temp | ui_Temp1;
- data[0] =
- ((data[0] << (2 *
- data
- [2])) ^
- 0xffffffff) & ui_Temp;
- break;
-
- case 4:
- data[0] = ~data[0] & 0xf;
- ui_Temp1 = 15;
- ui_Temp1 =
- ui_Temp1 << 4 * data[2];
- ui_Temp = ui_Temp | ui_Temp1;
- data[0] =
- ((data[0] << (4 *
- data
- [2])) ^
- 0xffffffff) & ui_Temp;
- break;
-
- case 8:
- data[0] = ~data[0] & 0xff;
- ui_Temp1 = 255;
- ui_Temp1 =
- ui_Temp1 << 8 * data[2];
- ui_Temp = ui_Temp | ui_Temp1;
- data[0] =
- ((data[0] << (8 *
- data
- [2])) ^
- 0xffffffff) & ui_Temp;
- break;
-
- case 15:
- break;
-
- default:
- dev_err(dev->class_dev,
- "chan spec wrong\n");
- return -EINVAL; /* "sorry channel spec wrong " */
-
- }
-
- outw(data[0],
- devpriv->i_IobaseAddon +
- APCI1500_DIGITAL_OP);
- } else {
- dev_warn(dev->class_dev,
- "Specified channel not supported\n");
- return -EINVAL;
- }
- }
- } else {
- dev_warn(dev->class_dev,
- "Specified functionality does not exist\n");
- return -EINVAL;
- }
- }
- ui_Temp = data[0];
- return insn->n;
-}
-
-/*
- * Configures The Watchdog
- *
- * data[0] 0 = APCI1500_115_KHZ, 1 = APCI1500_3_6_KHZ, 2 = APCI1500_1_8_KHZ
- * data[1] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
- * data[2] 0 = Counter, 1 = Timer/Watchdog
- * data[3] This parameter has two meanings. If the counter/timer is used as
- * a counter the limit value of the counter is given. If the counter/timer
- * is used as a timer, the divider factor for the output is given.
- * data[4] 0 = APCI1500_CONTINUOUS, 1 = APCI1500_SINGLE
- * data[5] 0 = Software Trigger, 1 = Hardware Trigger
- * data[6] 0 = Software gate, 1 = Hardware gate
- * data[7] 0 = Interrupt Disable, 1 = Interrupt Enable
- */
-static int apci1500_timer_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- int i_TimerCounterMode, i_MasterConfiguration;
-
- devpriv->tsk_Current = current;
-
- /* Selection of the input clock */
- if (data[0] == 0 || data[0] == 1 || data[0] == 2) {
- outw(data[0], devpriv->i_IobaseAddon + APCI1500_CLK_SELECT);
- } else {
- if (data[0] != 3) {
- dev_warn(dev->class_dev,
- "The option for input clock selection does not exist\n");
- return -EINVAL;
- }
- }
- /* Select the counter/timer */
- switch (data[1]) {
- case COUNTER1:
- /* selecting counter or timer */
- switch (data[2]) {
- case 0:
- data[2] = APCI1500_COUNTER;
- break;
- case 1:
- data[2] = APCI1500_TIMER;
- break;
- default:
- dev_warn(dev->class_dev,
- "This choice is not a timer nor a counter\n");
- return -EINVAL;
- }
-
- /* Selecting single or continuous mode */
- switch (data[4]) {
- case 0:
- data[4] = APCI1500_CONTINUOUS;
- break;
- case 1:
- data[4] = APCI1500_SINGLE;
- break;
- default:
- dev_warn(dev->class_dev,
- "This option for single/continuous mode does not exist\n");
- return -EINVAL;
- }
-
- i_TimerCounterMode = data[2] | data[4] | 7;
- /* Test the reload value */
-
- if ((data[3] >= 0) && (data[3] <= 65535)) {
- if (data[7] == APCI1500_ENABLE
- || data[7] == APCI1500_DISABLE) {
-
- /* Selects the mode register of timer/counter 1 */
- outb(APCI1500_RW_CPT_TMR1_MODE_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Writes the new mode */
- outb(i_TimerCounterMode,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the constant register of timer/counter 1 */
-
- outb(APCI1500_RW_CPT_TMR1_TIME_CST_LOW,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the low value */
-
- outb(data[3],
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the constant register of timer/counter 1 */
-
- outb(APCI1500_RW_CPT_TMR1_TIME_CST_HIGH,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the high value */
-
- data[3] = data[3] >> 8;
- outb(data[3],
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the master configuration register */
-
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Reads the register */
-
- i_MasterConfiguration =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Enables timer/counter 1 and triggers timer/counter 1 */
-
- i_MasterConfiguration =
- i_MasterConfiguration | 0x40;
-
- /* Selects the master configuration register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the new configuration */
- outb(i_MasterConfiguration,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the commands register of */
- /* timer/counter 1 */
-
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Disable timer/counter 1 */
-
- outb(0x0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the commands register of */
- /* timer/counter 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Trigger timer/counter 1 */
- outb(0x2,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Error in selection of interrupt enable or disable\n");
- return -EINVAL;
- }
- } else {
- dev_warn(dev->class_dev,
- "Error in selection of reload value\n");
- return -EINVAL;
- }
- i_TimerCounterWatchdogInterrupt = data[7];
- i_TimerCounter1Init = 1;
- break;
-
- case COUNTER2: /* selecting counter or timer */
- switch (data[2]) {
- case 0:
- data[2] = APCI1500_COUNTER;
- break;
- case 1:
- data[2] = APCI1500_TIMER;
- break;
- default:
- dev_warn(dev->class_dev,
- "This choice is not a timer nor a counter\n");
- return -EINVAL;
- }
-
- /* Selecting single or continuous mode */
- switch (data[4]) {
- case 0:
- data[4] = APCI1500_CONTINUOUS;
- break;
- case 1:
- data[4] = APCI1500_SINGLE;
- break;
- default:
- dev_warn(dev->class_dev,
- "This option for single/continuous mode does not exist\n");
- return -EINVAL;
- }
-
- /* Selecting software or hardware trigger */
- switch (data[5]) {
- case 0:
- data[5] = APCI1500_SOFTWARE_TRIGGER;
- break;
- case 1:
- data[5] = APCI1500_HARDWARE_TRIGGER;
- break;
- default:
- dev_warn(dev->class_dev,
- "This choice for software or hardware trigger does not exist\n");
- return -EINVAL;
- }
-
- /* Selecting software or hardware gate */
- switch (data[6]) {
- case 0:
- data[6] = APCI1500_SOFTWARE_GATE;
- break;
- case 1:
- data[6] = APCI1500_HARDWARE_GATE;
- break;
- default:
- dev_warn(dev->class_dev,
- "This choice for software or hardware gate does not exist\n");
- return -EINVAL;
- }
-
- i_TimerCounterMode = data[2] | data[4] | data[5] | data[6] | 7;
-
- /* Test the reload value */
-
- if ((data[3] >= 0) && (data[3] <= 65535)) {
- if (data[7] == APCI1500_ENABLE
- || data[7] == APCI1500_DISABLE) {
-
- /* Selects the mode register of timer/counter 2 */
- outb(APCI1500_RW_CPT_TMR2_MODE_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Writes the new mode */
- outb(i_TimerCounterMode,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the constant register of timer/counter 2 */
-
- outb(APCI1500_RW_CPT_TMR2_TIME_CST_LOW,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the low value */
-
- outb(data[3],
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the constant register of timer/counter 2 */
-
- outb(APCI1500_RW_CPT_TMR2_TIME_CST_HIGH,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the high value */
-
- data[3] = data[3] >> 8;
- outb(data[3],
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the master configuration register */
-
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Reads the register */
-
- i_MasterConfiguration =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Enables timer/counter 2 and triggers timer/counter 2 */
-
- i_MasterConfiguration =
- i_MasterConfiguration | 0x20;
-
- /* Selects the master configuration register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the new configuration */
- outb(i_MasterConfiguration,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the commands register of */
- /* timer/counter 2 */
-
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Disable timer/counter 2 */
-
- outb(0x0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the commands register of */
- /* timer/counter 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Trigger timer/counter 1 */
- outb(0x2,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Error in selection of interrupt enable or disable\n");
- return -EINVAL;
- }
- } else {
- dev_warn(dev->class_dev,
- "Error in selection of reload value\n");
- return -EINVAL;
- }
- i_TimerCounterWatchdogInterrupt = data[7];
- i_TimerCounter2Init = 1;
- break;
-
- case COUNTER3: /* selecting counter or watchdog */
- switch (data[2]) {
- case 0:
- data[2] = APCI1500_COUNTER;
- break;
- case 1:
- data[2] = APCI1500_WATCHDOG;
- break;
- default:
- dev_warn(dev->class_dev,
- "This choice is not a watchdog nor a counter\n");
- return -EINVAL;
- }
-
- /* Selecting single or continuous mode */
- switch (data[4]) {
- case 0:
- data[4] = APCI1500_CONTINUOUS;
- break;
- case 1:
- data[4] = APCI1500_SINGLE;
- break;
- default:
- dev_warn(dev->class_dev,
- "This option for single/continuous mode does not exist\n");
- return -EINVAL;
- }
-
- /* Selecting software or hardware gate */
- switch (data[6]) {
- case 0:
- data[6] = APCI1500_SOFTWARE_GATE;
- break;
- case 1:
- data[6] = APCI1500_HARDWARE_GATE;
- break;
- default:
- dev_warn(dev->class_dev,
- "This choice for software or hardware gate does not exist\n");
- return -EINVAL;
- }
-
- /* Test if used for watchdog */
-
- if (data[2] == APCI1500_WATCHDOG) {
- /* - Enables the output line */
- /* - Enables retrigger */
- /* - Pulses output */
- i_TimerCounterMode = data[2] | data[4] | 0x54;
- } else {
- i_TimerCounterMode = data[2] | data[4] | data[6] | 7;
- }
- /* Test the reload value */
-
- if ((data[3] >= 0) && (data[3] <= 65535)) {
- if (data[7] == APCI1500_ENABLE
- || data[7] == APCI1500_DISABLE) {
-
- /* Selects the mode register of watchdog/counter 3 */
- outb(APCI1500_RW_CPT_TMR3_MODE_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Writes the new mode */
- outb(i_TimerCounterMode,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the constant register of watchdog/counter 3 */
-
- outb(APCI1500_RW_CPT_TMR3_TIME_CST_LOW,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the low value */
-
- outb(data[3],
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the constant register of watchdog/counter 3 */
-
- outb(APCI1500_RW_CPT_TMR3_TIME_CST_HIGH,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the high value */
-
- data[3] = data[3] >> 8;
- outb(data[3],
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the master configuration register */
-
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Reads the register */
-
- i_MasterConfiguration =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Enables watchdog/counter 3 and triggers watchdog/counter 3 */
-
- i_MasterConfiguration =
- i_MasterConfiguration | 0x10;
-
- /* Selects the master configuration register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Writes the new configuration */
- outb(i_MasterConfiguration,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Test if COUNTER */
- if (data[2] == APCI1500_COUNTER) {
-
- /* Selects the command register of */
- /* watchdog/counter 3 */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Disable the watchdog/counter 3 and starts it */
- outb(0x0,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the command register of */
- /* watchdog/counter 3 */
-
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Trigger the watchdog/counter 3 and starts it */
- outb(0x2,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- }
-
- } else {
-
- dev_warn(dev->class_dev,
- "Error in selection of interrupt enable or disable\n");
- return -EINVAL;
- }
- } else {
- dev_warn(dev->class_dev,
- "Error in selection of reload value\n");
- return -EINVAL;
- }
- i_TimerCounterWatchdogInterrupt = data[7];
- i_WatchdogCounter3Init = 1;
- break;
-
- default:
- dev_warn(dev->class_dev,
- "The specified counter/timer option does not exist\n");
- return -EINVAL;
- }
- i_CounterLogic = data[2];
- return insn->n;
-}
-
-/*
- * Start / Stop or trigger the timer counter or Watchdog
- *
- * data[0] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
- * data[1] 0 = Start, 1 = Stop, 2 = Trigger
- * data[2] 0 = Counter, 1 = Timer/Watchdog
- */
-static int apci1500_timer_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- int i_CommandAndStatusValue;
-
- switch (data[0]) {
- case COUNTER1:
- switch (data[1]) {
- case START:
- if (i_TimerCounter1Init == 1) {
- if (i_TimerCounterWatchdogInterrupt == 1)
- i_CommandAndStatusValue = 0xC4; /* Enable the interrupt */
- else
- i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
-
- /* Starts timer/counter 1 */
- i_TimerCounter1Enabled = 1;
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Counter/Timer1 not configured\n");
- return -EINVAL;
- }
- break;
-
- case STOP:
-
- /* Stop timer/counter 1 */
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(0x00,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_TimerCounter1Enabled = 0;
- break;
-
- case TRIGGER:
- if (i_TimerCounter1Init == 1) {
- if (i_TimerCounter1Enabled == 1) {
- /* Set Trigger and gate */
-
- i_CommandAndStatusValue = 0x6;
- } else {
- /* Set Trigger */
-
- i_CommandAndStatusValue = 0x2;
- }
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Counter/Timer1 not configured\n");
- return -EINVAL;
- }
- break;
-
- default:
- dev_warn(dev->class_dev,
- "The specified option for start/stop/trigger does not exist\n");
- return -EINVAL;
- }
- break;
-
- case COUNTER2:
- switch (data[1]) {
- case START:
- if (i_TimerCounter2Init == 1) {
- if (i_TimerCounterWatchdogInterrupt == 1)
- i_CommandAndStatusValue = 0xC4; /* Enable the interrupt */
- else
- i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
-
- /* Starts timer/counter 2 */
- i_TimerCounter2Enabled = 1;
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Counter/Timer2 not configured\n");
- return -EINVAL;
- }
- break;
-
- case STOP:
-
- /* Stop timer/counter 2 */
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(0x00,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_TimerCounter2Enabled = 0;
- break;
- case TRIGGER:
- if (i_TimerCounter2Init == 1) {
- if (i_TimerCounter2Enabled == 1) {
- /* Set Trigger and gate */
-
- i_CommandAndStatusValue = 0x6;
- } else {
- /* Set Trigger */
-
- i_CommandAndStatusValue = 0x2;
- }
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Counter/Timer2 not configured\n");
- return -EINVAL;
- }
- break;
- default:
- dev_warn(dev->class_dev,
- "The specified option for start/stop/trigger does not exist\n");
- return -EINVAL;
- }
- break;
- case COUNTER3:
- switch (data[1]) {
- case START:
- if (i_WatchdogCounter3Init == 1) {
-
- if (i_TimerCounterWatchdogInterrupt == 1)
- i_CommandAndStatusValue = 0xC4; /* Enable the interrupt */
- else
- i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
-
- /* Starts Watchdog/counter 3 */
- i_WatchdogCounter3Enabled = 1;
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- } else {
- dev_warn(dev->class_dev,
- "Watchdog/Counter3 not configured\n");
- return -EINVAL;
- }
- break;
-
- case STOP:
-
- /* Stop Watchdog/counter 3 */
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(0x00,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_WatchdogCounter3Enabled = 0;
- break;
-
- case TRIGGER:
- switch (data[2]) {
- case 0: /* triggering counter 3 */
- if (i_WatchdogCounter3Init == 1) {
- if (i_WatchdogCounter3Enabled == 1) {
- /* Set Trigger and gate */
-
- i_CommandAndStatusValue = 0x6;
- } else {
- /* Set Trigger */
-
- i_CommandAndStatusValue = 0x2;
- }
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Counter3 not configured\n");
- return -EINVAL;
- }
- break;
- case 1:
- /* triggering Watchdog 3 */
- if (i_WatchdogCounter3Init == 1) {
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(0x6,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Watchdog 3 not configured\n");
- return -EINVAL;
- }
- break;
- default:
- dev_warn(dev->class_dev,
- "Wrong choice of watchdog/counter3\n");
- return -EINVAL;
- }
- break;
- default:
- dev_warn(dev->class_dev,
- "The specified option for start/stop/trigger does not exist\n");
- return -EINVAL;
- }
- break;
- default:
- dev_warn(dev->class_dev,
- "The specified choice for counter/watchdog/timer does not exist\n");
- return -EINVAL;
- }
- return insn->n;
-}
-
-/*
- * Read The Watchdog
- *
- * data[0] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
- */
-static int apci1500_timer_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- int i_CommandAndStatusValue;
-
- switch (data[0]) {
- case COUNTER1:
- /* Read counter/timer1 */
- if (i_TimerCounter1Init == 1) {
- if (i_TimerCounter1Enabled == 1) {
- /* Set RCC and gate */
-
- i_CommandAndStatusValue = 0xC;
- } else {
- /* Set RCC */
-
- i_CommandAndStatusValue = 0x8;
- }
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the counter register (high) */
- outb(APCI1500_R_CPT_TMR1_VALUE_HIGH,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] = data[0] << 8;
- data[0] = data[0] & 0xff00;
- outb(APCI1500_R_CPT_TMR1_VALUE_LOW,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] =
- data[0] | inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Timer/Counter1 not configured\n");
- return -EINVAL;
- }
- break;
- case COUNTER2:
- /* Read counter/timer2 */
- if (i_TimerCounter2Init == 1) {
- if (i_TimerCounter2Enabled == 1) {
- /* Set RCC and gate */
-
- i_CommandAndStatusValue = 0xC;
- } else {
- /* Set RCC */
-
- i_CommandAndStatusValue = 0x8;
- }
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the counter register (high) */
- outb(APCI1500_R_CPT_TMR2_VALUE_HIGH,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] = data[0] << 8;
- data[0] = data[0] & 0xff00;
- outb(APCI1500_R_CPT_TMR2_VALUE_LOW,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] =
- data[0] | inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Timer/Counter2 not configured\n");
- return -EINVAL;
- }
- break;
- case COUNTER3:
- /* Read counter/watchdog2 */
- if (i_WatchdogCounter3Init == 1) {
- if (i_WatchdogCounter3Enabled == 1) {
- /* Set RCC and gate */
-
- i_CommandAndStatusValue = 0xC;
- } else {
- /* Set RCC */
-
- i_CommandAndStatusValue = 0x8;
- }
-
- /* Selects the commands and status register */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_CommandAndStatusValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the counter register (high) */
- outb(APCI1500_R_CPT_TMR3_VALUE_HIGH,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] = data[0] << 8;
- data[0] = data[0] & 0xff00;
- outb(APCI1500_R_CPT_TMR3_VALUE_LOW,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- data[0] =
- data[0] | inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "WatchdogCounter3 not configured\n");
- return -EINVAL;
- }
- break;
- default:
- dev_warn(dev->class_dev,
- "The choice of timer/counter/watchdog does not exist\n");
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-/*
- * Read the interrupt mask
- *
- * data[0] The interrupt mask value
- * data[1] Channel Number
- */
-static int apci1500_timer_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[0] = i_InterruptMask;
- data[1] = i_InputChannel;
- i_InterruptMask = 0;
- return insn->n;
-}
-
-/*
- * Configures the interrupt registers
- */
-static int apci1500_do_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- unsigned int ui_Status;
- int i_RegValue;
- int i_Constant;
-
- devpriv->tsk_Current = current;
- outl(0x0, devpriv->i_IobaseAmcc + 0x38);
- if (data[0] == 1) {
- i_Constant = 0xC0;
- } else {
- if (data[0] == 0) {
- i_Constant = 0x00;
- } else {
- dev_warn(dev->class_dev,
- "The parameter passed to driver is in error for enabling the voltage interrupt\n");
- return -EINVAL;
- }
- }
-
- /* Selects the mode specification register of port B */
- outb(APCI1500_RW_PORT_B_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(APCI1500_RW_PORT_B_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Writes the new configuration (APCI1500_OR) */
- i_RegValue = (i_RegValue & 0xF9) | APCI1500_OR;
-
- outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Authorises the interrupt on the board */
- outb(0xC0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the pattern polarity register of port B */
- outb(APCI1500_RW_PORT_B_PATTERN_POLARITY,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the pattern transition register of port B */
- outb(APCI1500_RW_PORT_B_PATTERN_TRANSITION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the pattern mask register of port B */
- outb(APCI1500_RW_PORT_B_PATTERN_MASK,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the command and status register of port A */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of port A */
-
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of port B */
-
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the command and status register of timer 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of timer 1 */
-
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the command and status register of timer 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of timer 2 */
-
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the command and status register of timer 3 */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of timer 3 */
-
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the master interrupt control register */
- outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Authorizes the main interrupt on the board */
- outb(0xD0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Enables the PCI interrupt */
- outl(0x3000, devpriv->i_IobaseAmcc + 0x38);
- ui_Status = inl(devpriv->i_IobaseAmcc + 0x10);
- ui_Status = inl(devpriv->i_IobaseAmcc + 0x38);
- outl(0x23000, devpriv->i_IobaseAmcc + 0x38);
-
- return insn->n;
-}
-
-static irqreturn_t apci1500_interrupt(int irq, void *d)
-{
-
- struct comedi_device *dev = d;
- struct apci1500_private *devpriv = dev->private;
- unsigned int ui_InterruptStatus = 0;
- int i_RegValue = 0;
-
- /* Clear the interrupt mask */
- i_InterruptMask = 0;
-
- /* Read the board interrupt status */
- ui_InterruptStatus = inl(devpriv->i_IobaseAmcc + 0x38);
-
- /* Test if board generated a interrupt */
- if ((ui_InterruptStatus & 0x800000) == 0x800000) {
- /* Disable all Interrupt */
- /* Selects the master interrupt control register */
- /* Disables the main interrupt on the board */
- /* Selects the command and status register of port A */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- if ((i_RegValue & 0x60) == 0x60) {
- /* Selects the command and status register of port A */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of port A */
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_InterruptMask = i_InterruptMask | 1;
- if (i_Logic == APCI1500_OR_PRIORITY) {
- outb(APCI1500_RW_PORT_A_SPECIFICATION,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the interrupt vector register of port A */
- outb(APCI1500_RW_PORT_A_INTERRUPT_CONTROL,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
-
- i_InputChannel = 1 + (i_RegValue >> 1);
-
- } else {
- i_InputChannel = 0;
- }
- }
-
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- if ((i_RegValue & 0x60) == 0x60) {
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of port B */
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Reads port B */
- i_RegValue =
- inb((unsigned int) devpriv->iobase +
- APCI1500_Z8536_PORT_B);
-
- i_RegValue = i_RegValue & 0xC0;
- /* Tests if this is an external error */
-
- if (i_RegValue) {
- /* Disable the interrupt */
- /* Selects the command and status register of port B */
- outl(0x0, devpriv->i_IobaseAmcc + 0x38);
-
- if (i_RegValue & 0x80) {
- i_InterruptMask =
- i_InterruptMask | 0x40;
- }
-
- if (i_RegValue & 0x40) {
- i_InterruptMask =
- i_InterruptMask | 0x80;
- }
- } else {
- i_InterruptMask = i_InterruptMask | 2;
- }
- }
-
- /* Selects the command and status register of timer 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- if ((i_RegValue & 0x60) == 0x60) {
- /* Selects the command and status register of timer 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of timer 1 */
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_InterruptMask = i_InterruptMask | 4;
- }
- /* Selects the command and status register of timer 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- if ((i_RegValue & 0x60) == 0x60) {
- /* Selects the command and status register of timer 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of timer 2 */
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- i_InterruptMask = i_InterruptMask | 8;
- }
-
- /* Selects the command and status register of timer 3 */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_RegValue =
- inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- if ((i_RegValue & 0x60) == 0x60) {
- /* Selects the command and status register of timer 3 */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the interrupt of timer 3 */
- i_RegValue = (i_RegValue & 0x0F) | 0x20;
- outb(i_RegValue,
- devpriv->iobase +
- APCI1500_Z8536_CONTROL_REGISTER);
- if (i_CounterLogic == APCI1500_COUNTER)
- i_InterruptMask = i_InterruptMask | 0x10;
- else
- i_InterruptMask = i_InterruptMask | 0x20;
- }
-
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- /* Enable all Interrupts */
-
- /* Selects the master interrupt control register */
- outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Authorizes the main interrupt on the board */
- outb(0xD0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- } else {
- dev_warn(dev->class_dev,
- "Interrupt from unknown source\n");
-
- }
-
- return IRQ_HANDLED;
-}
-
-static int apci1500_reset(struct comedi_device *dev)
-{
- struct apci1500_private *devpriv = dev->private;
- int i_DummyRead = 0;
-
- i_TimerCounter1Init = 0;
- i_TimerCounter2Init = 0;
- i_WatchdogCounter3Init = 0;
- i_Event1Status = 0;
- i_Event2Status = 0;
- i_TimerCounterWatchdogInterrupt = 0;
- i_Logic = 0;
- i_CounterLogic = 0;
- i_InterruptMask = 0;
- i_InputChannel = 0;
- i_TimerCounter1Enabled = 0;
- i_TimerCounter2Enabled = 0;
- i_WatchdogCounter3Enabled = 0;
-
- /* Software reset */
- i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(1, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the master configuration control register */
- outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0xF4, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the mode specification register of port A */
- outb(APCI1500_RW_PORT_A_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the data path polarity register of port A */
- outb(APCI1500_RW_PORT_A_DATA_PCITCH_POLARITY,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* High level of port A means 1 */
- outb(0xFF, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the data direction register of port A */
- outb(APCI1500_RW_PORT_A_DATA_DIRECTION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* All bits used as inputs */
- outb(0xFF, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port A */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port A */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates the interrupt management of port A: */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the handshake specification register of port A */
- outb(APCI1500_RW_PORT_A_HANDSHAKE_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the register */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the mode specification register of port B */
- outb(APCI1500_RW_PORT_B_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the data path polarity register of port B */
- outb(APCI1500_RW_PORT_B_DATA_PCITCH_POLARITY,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* A high level of port B means 1 */
- outb(0x7F, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the data direction register of port B */
- outb(APCI1500_RW_PORT_B_DATA_DIRECTION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* All bits used as inputs */
- outb(0xFF, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates the interrupt management of port B: */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the handshake specification register of port B */
- outb(APCI1500_RW_PORT_B_HANDSHAKE_SPECIFICATION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes the register */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-
- /* Selects the data path polarity register of port C */
- outb(APCI1500_RW_PORT_C_DATA_PCITCH_POLARITY,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* High level of port C means 1 */
- outb(0x9, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the data direction register of port C */
- outb(APCI1500_RW_PORT_C_DATA_DIRECTION,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* All bits used as inputs except channel 1 */
- outb(0x0E, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the special IO register of port C */
- outb(APCI1500_RW_PORT_C_SPECIAL_IO_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes it */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates the interrupt management of timer 1 */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates Timer 2 interrupt management: */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 3 */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes IP and IUS */
- outb(0x20, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of Timer 3 */
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates interrupt management of timer 3: */
- outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the master interrupt control register */
- outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deletes all interrupts */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* reset all the digital outputs */
- outw(0x0, devpriv->i_IobaseAddon + APCI1500_DIGITAL_OP);
- /* Disable the board interrupt */
- /* Selects the master interrupt control register */
- outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates all interrupts */
- outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port A */
- outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates all interrupts */
- outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of port B */
- outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates all interrupts */
- outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 1 */
- outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates all interrupts */
- outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 2 */
- outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates all interrupts */
- outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Selects the command and status register of timer 3*/
- outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
- devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /* Deactivates all interrupts */
- outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- return 0;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
index 339519a3d6b5..1f2f78186d58 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
@@ -93,7 +93,6 @@ static int apci3501_write_insn_timer(struct comedi_device *dev,
{
struct apci3501_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
- int i_Temp;
if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
@@ -135,7 +134,7 @@ static int apci3501_write_insn_timer(struct comedi_device *dev,
}
}
- i_Temp = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
+ inl(dev->iobase + APCI3501_TIMER_STATUS_REG);
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index bf14165297b7..4911b627203b 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -22,6 +22,54 @@
* more details.
*/
+/*
+ * Driver: addi_apci_1032
+ * Description: ADDI-DATA APCI-1032 Digital Input Board
+ * Author: ADDI-DATA GmbH <info@addi-data.com>,
+ * H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Status: untested
+ * Devices: [ADDI-DATA] APCI-1032 (addi_apci_1032)
+ *
+ * Configuration options:
+ * None; devices are configured automatically.
+ *
+ * This driver models the APCI-1032 as a 32-channel, digital input subdevice
+ * plus an additional digital input subdevice to handle change-of-state (COS)
+ * interrupts (if an interrupt handler can be set up successfully).
+ *
+ * The COS subdevice supports comedi asynchronous read commands.
+ *
+ * Change-Of-State (COS) interrupt configuration:
+ *
+ * Channels 0 to 15 are interruptible. These channels can be configured
+ * to generate interrupts based on AND/OR logic for the desired channels.
+ *
+ * OR logic:
+ * - reacts to rising or falling edges
+ * - interrupt is generated when any enabled channel meets the desired
+ * interrupt condition
+ *
+ * AND logic:
+ * - reacts to changes in level of the selected inputs
+ * - interrupt is generated when all enabled channels meet the desired
+ * interrupt condition
+ * - after an interrupt, a change in level must occur on the selected
+ * inputs to release the IRQ logic
+ *
+ * The COS subdevice must be configured before setting up a comedi
+ * asynchronous command:
+ *
+ * data[0] : INSN_CONFIG_DIGITAL_TRIG
+ * data[1] : trigger number (= 0)
+ * data[2] : configuration operation:
+ * - COMEDI_DIGITAL_TRIG_DISABLE = no interrupts
+ * - COMEDI_DIGITAL_TRIG_ENABLE_EDGES = OR (edge) interrupts
+ * - COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = AND (level) interrupts
+ * data[3] : left-shift for data[4] and data[5]
+ * data[4] : rising-edge/high level channels
+ * data[5] : falling-edge/low level channels
+ */
+
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -62,36 +110,6 @@ static int apci1032_reset(struct comedi_device *dev)
return 0;
}
-/*
- * Change-Of-State (COS) interrupt configuration
- *
- * Channels 0 to 15 are interruptible. These channels can be configured
- * to generate interrupts based on AND/OR logic for the desired channels.
- *
- * OR logic
- * - reacts to rising or falling edges
- * - interrupt is generated when any enabled channel
- * meet the desired interrupt condition
- *
- * AND logic
- * - reacts to changes in level of the selected inputs
- * - interrupt is generated when all enabled channels
- * meet the desired interrupt condition
- * - after an interrupt, a change in level must occur on
- * the selected inputs to release the IRQ logic
- *
- * The COS interrupt must be configured before it can be enabled.
- *
- * data[0] : INSN_CONFIG_DIGITAL_TRIG
- * data[1] : trigger number (= 0)
- * data[2] : configuration operation:
- * COMEDI_DIGITAL_TRIG_DISABLE = no interrupts
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES = OR (edge) interrupts
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = AND (level) interrupts
- * data[3] : left-shift for data[4] and data[5]
- * data[4] : rising-edge/high level channels
- * data[5] : falling-edge/low level channels
- */
static int apci1032_cos_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index 30b132c3d092..f15aa1f6b476 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -1,22 +1,755 @@
+/*
+ * addi_apci_1500.c
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/sched.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
+#include "z8536.h"
+
+/*
+ * PCI Bar 0 Register map (devpriv->amcc)
+ * see amcc_s5933.h for register and bit defines
+ */
+
+/*
+ * PCI Bar 1 Register map (dev->iobase)
+ * see z8536.h for Z8536 internal registers and bit defines
+ */
+#define APCI1500_Z8536_PORTC_REG 0x00
+#define APCI1500_Z8536_PORTB_REG 0x01
+#define APCI1500_Z8536_PORTA_REG 0x02
+#define APCI1500_Z8536_CTRL_REG 0x03
+
+/*
+ * PCI Bar 2 Register map (devpriv->addon)
+ */
+#define APCI1500_CLK_SEL_REG 0x00
+#define APCI1500_DI_REG 0x00
+#define APCI1500_DO_REG 0x02
struct apci1500_private {
- int iobase;
- int i_IobaseAmcc;
- int i_IobaseAddon;
- int i_IobaseReserved;
- unsigned char b_OutputMemoryStatus;
- struct task_struct *tsk_Current;
+ unsigned long amcc;
+ unsigned long addon;
+
+ unsigned int clk_src;
+
+ /* Digital trigger configuration [0]=AND [1]=OR */
+ unsigned int pm[2]; /* Pattern Mask */
+ unsigned int pt[2]; /* Pattern Transition */
+ unsigned int pp[2]; /* Pattern Polarity */
};
-#include "addi-data/hwdrv_apci1500.c"
+static unsigned int z8536_read(struct comedi_device *dev, unsigned int reg)
+{
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&dev->spinlock, flags);
+ outb(reg, dev->iobase + APCI1500_Z8536_CTRL_REG);
+ val = inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+
+ return val;
+}
+
+static void z8536_write(struct comedi_device *dev,
+ unsigned int val, unsigned int reg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->spinlock, flags);
+ outb(reg, dev->iobase + APCI1500_Z8536_CTRL_REG);
+ outb(val, dev->iobase + APCI1500_Z8536_CTRL_REG);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+}
+
+static void z8536_reset(struct comedi_device *dev)
+{
+ unsigned long flags;
+
+ /*
+ * Even if the state of the Z8536 is not known, the following
+ * sequence will reset it and put it in State 0.
+ */
+ spin_lock_irqsave(&dev->spinlock, flags);
+ inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
+ outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
+ inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
+ outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
+ outb(1, dev->iobase + APCI1500_Z8536_CTRL_REG);
+ outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+
+ /* Disable all Ports and Counter/Timers */
+ z8536_write(dev, 0x00, Z8536_CFG_CTRL_REG);
+
+ /*
+ * Port A is connected to Ditial Input channels 0-7.
+ * Configure the port to allow interrupt detection.
+ */
+ z8536_write(dev, Z8536_PAB_MODE_PTS_BIT |
+ Z8536_PAB_MODE_SB |
+ Z8536_PAB_MODE_PMS_DISABLE,
+ Z8536_PA_MODE_REG);
+ z8536_write(dev, 0xff, Z8536_PB_DPP_REG);
+ z8536_write(dev, 0xff, Z8536_PA_DD_REG);
+
+ /*
+ * Port B is connected to Ditial Input channels 8-13.
+ * Configure the port to allow interrupt detection.
+ *
+ * NOTE: Bits 7 and 6 of Port B are connected to internal
+ * diagnostic signals and bit 7 is inverted.
+ */
+ z8536_write(dev, Z8536_PAB_MODE_PTS_BIT |
+ Z8536_PAB_MODE_SB |
+ Z8536_PAB_MODE_PMS_DISABLE,
+ Z8536_PB_MODE_REG);
+ z8536_write(dev, 0x7f, Z8536_PB_DPP_REG);
+ z8536_write(dev, 0xff, Z8536_PB_DD_REG);
+
+ /*
+ * Not sure what Port C is connected to...
+ */
+ z8536_write(dev, 0x09, Z8536_PC_DPP_REG);
+ z8536_write(dev, 0x0e, Z8536_PC_DD_REG);
+
+ /*
+ * Clear and disable all interrupt sources.
+ *
+ * Just in case, the reset of the Z8536 should have already
+ * done this.
+ */
+ z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_PA_CMDSTAT_REG);
+ z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PA_CMDSTAT_REG);
+
+ z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_PB_CMDSTAT_REG);
+ z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PB_CMDSTAT_REG);
+
+ z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(0));
+ z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(0));
+
+ z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(1));
+ z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(1));
+
+ z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(2));
+ z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(2));
+
+ /* Disable all interrupts */
+ z8536_write(dev, 0x00, Z8536_INT_CTRL_REG);
+}
+
+static void apci1500_port_enable(struct comedi_device *dev, bool enable)
+{
+ unsigned int cfg;
+
+ cfg = z8536_read(dev, Z8536_CFG_CTRL_REG);
+ if (enable)
+ cfg |= (Z8536_CFG_CTRL_PAE | Z8536_CFG_CTRL_PBE);
+ else
+ cfg &= ~(Z8536_CFG_CTRL_PAE | Z8536_CFG_CTRL_PBE);
+ z8536_write(dev, cfg, Z8536_CFG_CTRL_REG);
+}
+
+static void apci1500_timer_enable(struct comedi_device *dev,
+ unsigned int chan, bool enable)
+{
+ unsigned int bit;
+ unsigned int cfg;
+
+ if (chan == 0)
+ bit = Z8536_CFG_CTRL_CT1E;
+ else if (chan == 1)
+ bit = Z8536_CFG_CTRL_CT2E;
+ else
+ bit = Z8536_CFG_CTRL_PCE_CT3E;
+
+ cfg = z8536_read(dev, Z8536_CFG_CTRL_REG);
+ if (enable) {
+ cfg |= bit;
+ } else {
+ cfg &= ~bit;
+ z8536_write(dev, 0x00, Z8536_CT_CMDSTAT_REG(chan));
+ }
+ z8536_write(dev, cfg, Z8536_CFG_CTRL_REG);
+}
+
+static bool apci1500_ack_irq(struct comedi_device *dev,
+ unsigned int reg)
+{
+ unsigned int val;
+
+ val = z8536_read(dev, reg);
+ if ((val & Z8536_STAT_IE_IP) == Z8536_STAT_IE_IP) {
+ val &= 0x0f; /* preserve any write bits */
+ val |= Z8536_CMD_CLR_IP_IUS;
+ z8536_write(dev, val, reg);
+
+ return true;
+ }
+ return false;
+}
+
+static irqreturn_t apci1500_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct apci1500_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int status = 0;
+ unsigned int val;
+
+ val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
+ if (!(val & INTCSR_INTR_ASSERTED))
+ return IRQ_NONE;
+
+ if (apci1500_ack_irq(dev, Z8536_PA_CMDSTAT_REG))
+ status |= 0x01; /* port a event (inputs 0-7) */
+
+ if (apci1500_ack_irq(dev, Z8536_PB_CMDSTAT_REG)) {
+ /* Tests if this is an external error */
+ val = inb(dev->iobase + APCI1500_Z8536_PORTB_REG);
+ val &= 0xc0;
+ if (val) {
+ if (val & 0x80) /* voltage error */
+ status |= 0x40;
+ if (val & 0x40) /* short circuit error */
+ status |= 0x80;
+ } else {
+ status |= 0x02; /* port b event (inputs 8-13) */
+ }
+ }
+
+ /*
+ * NOTE: The 'status' returned by the sample matches the
+ * interrupt mask information from the APCI-1500 Users Manual.
+ *
+ * Mask Meaning
+ * ---------- ------------------------------------------
+ * 0x00000001 Event 1 has occured
+ * 0x00000010 Event 2 has occured
+ * 0x00000100 Counter/timer 1 has run down (not implemented)
+ * 0x00001000 Counter/timer 2 has run down (not implemented)
+ * 0x00010000 Counter 3 has run down (not implemented)
+ * 0x00100000 Watchdog has run down (not implemented)
+ * 0x01000000 Voltage error
+ * 0x10000000 Short-circuit error
+ */
+ comedi_buf_write_samples(s, &status, 1);
+ comedi_handle_events(dev, s);
+
+ return IRQ_HANDLED;
+}
+
+static int apci1500_di_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ /* Disables the main interrupt on the board */
+ z8536_write(dev, 0x00, Z8536_INT_CTRL_REG);
+
+ /* Disable Ports A & B */
+ apci1500_port_enable(dev, false);
+
+ /* Ack any pending interrupts */
+ apci1500_ack_irq(dev, Z8536_PA_CMDSTAT_REG);
+ apci1500_ack_irq(dev, Z8536_PB_CMDSTAT_REG);
+
+ /* Disable pattern interrupts */
+ z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PA_CMDSTAT_REG);
+ z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PB_CMDSTAT_REG);
+
+ /* Enable Ports A & B */
+ apci1500_port_enable(dev, true);
+
+ return 0;
+}
+
+static int apci1500_di_inttrig_start(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int trig_num)
+{
+ struct apci1500_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int pa_mode = Z8536_PAB_MODE_PMS_DISABLE;
+ unsigned int pb_mode = Z8536_PAB_MODE_PMS_DISABLE;
+ unsigned int pa_trig = trig_num & 0x01;
+ unsigned int pb_trig = (trig_num >> 1) & 0x01;
+ bool valid_trig = false;
+ unsigned int val;
+
+ if (trig_num != cmd->start_arg)
+ return -EINVAL;
+
+ /* Disable Ports A & B */
+ apci1500_port_enable(dev, false);
+
+ /* Set Port A for selected trigger pattern */
+ z8536_write(dev, devpriv->pm[pa_trig] & 0xff, Z8536_PA_PM_REG);
+ z8536_write(dev, devpriv->pt[pa_trig] & 0xff, Z8536_PA_PT_REG);
+ z8536_write(dev, devpriv->pp[pa_trig] & 0xff, Z8536_PA_PP_REG);
+
+ /* Set Port B for selected trigger pattern */
+ z8536_write(dev, (devpriv->pm[pb_trig] >> 8) & 0xff, Z8536_PB_PM_REG);
+ z8536_write(dev, (devpriv->pt[pb_trig] >> 8) & 0xff, Z8536_PB_PT_REG);
+ z8536_write(dev, (devpriv->pp[pb_trig] >> 8) & 0xff, Z8536_PB_PP_REG);
+
+ /* Set Port A trigger mode (if enabled) and enable interrupt */
+ if (devpriv->pm[pa_trig] & 0xff) {
+ pa_mode = pa_trig ? Z8536_PAB_MODE_PMS_AND
+ : Z8536_PAB_MODE_PMS_OR;
+
+ val = z8536_read(dev, Z8536_PA_MODE_REG);
+ val &= ~Z8536_PAB_MODE_PMS_MASK;
+ val |= (pa_mode | Z8536_PAB_MODE_IMO);
+ z8536_write(dev, val, Z8536_PA_MODE_REG);
+
+ z8536_write(dev, Z8536_CMD_SET_IE, Z8536_PA_CMDSTAT_REG);
+
+ valid_trig = true;
+
+ dev_dbg(dev->class_dev,
+ "Port A configured for %s mode pattern detection\n",
+ pa_trig ? "AND" : "OR");
+ }
+
+ /* Set Port B trigger mode (if enabled) and enable interrupt */
+ if (devpriv->pm[pb_trig] & 0xff00) {
+ pb_mode = pb_trig ? Z8536_PAB_MODE_PMS_AND
+ : Z8536_PAB_MODE_PMS_OR;
+
+ val = z8536_read(dev, Z8536_PB_MODE_REG);
+ val &= ~Z8536_PAB_MODE_PMS_MASK;
+ val |= (pb_mode | Z8536_PAB_MODE_IMO);
+ z8536_write(dev, val, Z8536_PB_MODE_REG);
+
+ z8536_write(dev, Z8536_CMD_SET_IE, Z8536_PB_CMDSTAT_REG);
+
+ valid_trig = true;
+
+ dev_dbg(dev->class_dev,
+ "Port B configured for %s mode pattern detection\n",
+ pb_trig ? "AND" : "OR");
+ }
+
+ /* Enable Ports A & B */
+ apci1500_port_enable(dev, true);
+
+ if (!valid_trig) {
+ dev_dbg(dev->class_dev,
+ "digital trigger %d is not configured\n", trig_num);
+ return -EINVAL;
+ }
+
+ /* Authorizes the main interrupt on the board */
+ z8536_write(dev, Z8536_INT_CTRL_MIE | Z8536_INT_CTRL_DLC,
+ Z8536_INT_CTRL_REG);
+
+ return 0;
+}
+
+static int apci1500_di_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ s->async->inttrig = apci1500_di_inttrig_start;
+
+ return 0;
+}
+
+static int apci1500_di_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_INT);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+ /* Step 2b : and mutually compatible */
+
+ /* Step 3: check if arguments are trivially valid */
+
+ /*
+ * Internal start source triggers:
+ *
+ * 0 AND mode for Port A (digital inputs 0-7)
+ * AND mode for Port B (digital inputs 8-13 and internal signals)
+ *
+ * 1 OR mode for Port A (digital inputs 0-7)
+ * AND mode for Port B (digital inputs 8-13 and internal signals)
+ *
+ * 2 AND mode for Port A (digital inputs 0-7)
+ * OR mode for Port B (digital inputs 8-13 and internal signals)
+ *
+ * 3 OR mode for Port A (digital inputs 0-7)
+ * OR mode for Port B (digital inputs 8-13 and internal signals)
+ */
+ err |= cfc_check_trigger_arg_max(&cmd->start_arg, 3);
+
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* Step 4: fix up any arguments */
+
+ /* Step 5: check channel list if it exists */
+
+ return 0;
+}
+
+/*
+ * The pattern-recognition logic must be configured before the digital
+ * input async command is started.
+ *
+ * Digital input channels 0 to 13 can generate interrupts. Channels 14
+ * and 15 are connected to internal board status/diagnostic signals.
+ *
+ * Channel 14 - Voltage error (the external supply is < 5V)
+ * Channel 15 - Short-circuit/overtemperature error
+ *
+ * data[0] : INSN_CONFIG_DIGITAL_TRIG
+ * data[1] : trigger number
+ * 0 = AND mode
+ * 1 = OR mode
+ * data[2] : configuration operation:
+ * COMEDI_DIGITAL_TRIG_DISABLE = no interrupts
+ * COMEDI_DIGITAL_TRIG_ENABLE_EDGES = edge interrupts
+ * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = level interrupts
+ * data[3] : left-shift for data[4] and data[5]
+ * data[4] : rising-edge/high level channels
+ * data[5] : falling-edge/low level channels
+ */
+static int apci1500_di_cfg_trig(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1500_private *devpriv = dev->private;
+ unsigned int trig = data[1];
+ unsigned int shift = data[3];
+ unsigned int hi_mask = data[4] << shift;
+ unsigned int lo_mask = data[5] << shift;
+ unsigned int chan_mask = hi_mask | lo_mask;
+ unsigned int old_mask = (1 << shift) - 1;
+ unsigned int pm = devpriv->pm[trig] & old_mask;
+ unsigned int pt = devpriv->pt[trig] & old_mask;
+ unsigned int pp = devpriv->pp[trig] & old_mask;
+
+ if (trig > 1) {
+ dev_dbg(dev->class_dev,
+ "invalid digital trigger number (0=AND, 1=OR)\n");
+ return -EINVAL;
+ }
+
+ if (chan_mask > 0xffff) {
+ dev_dbg(dev->class_dev, "invalid digital trigger channel\n");
+ return -EINVAL;
+ }
+
+ switch (data[2]) {
+ case COMEDI_DIGITAL_TRIG_DISABLE:
+ /* clear trigger configuration */
+ pm = 0;
+ pt = 0;
+ pp = 0;
+ break;
+ case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
+ pm |= chan_mask; /* enable channels */
+ pt |= chan_mask; /* enable edge detection */
+ pp |= hi_mask; /* rising-edge channels */
+ pp &= ~lo_mask; /* falling-edge channels */
+ break;
+ case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
+ pm |= chan_mask; /* enable channels */
+ pt &= ~chan_mask; /* enable level detection */
+ pp |= hi_mask; /* high level channels */
+ pp &= ~lo_mask; /* low level channels */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * The AND mode trigger can only have one channel (max) enabled
+ * for edge detection.
+ */
+ if (trig == 0) {
+ int ret = 0;
+ unsigned int src;
+
+ src = pt & 0xff;
+ if (src)
+ ret |= cfc_check_trigger_is_unique(src);
+
+ src = (pt >> 8) & 0xff;
+ if (src)
+ ret |= cfc_check_trigger_is_unique(src);
+
+ if (ret) {
+ dev_dbg(dev->class_dev,
+ "invalid AND trigger configuration\n");
+ return ret;
+ }
+ }
+
+ /* save the trigger configuration */
+ devpriv->pm[trig] = pm;
+ devpriv->pt[trig] = pt;
+ devpriv->pp[trig] = pp;
+
+ return insn->n;
+}
+
+static int apci1500_di_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ switch (data[0]) {
+ case INSN_CONFIG_DIGITAL_TRIG:
+ return apci1500_di_cfg_trig(dev, s, insn, data);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apci1500_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1500_private *devpriv = dev->private;
+
+ data[1] = inw(devpriv->addon + APCI1500_DI_REG);
+
+ return insn->n;
+}
+
+static int apci1500_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1500_private *devpriv = dev->private;
+
+ if (comedi_dio_update_state(s, data))
+ outw(s->state, devpriv->addon + APCI1500_DO_REG);
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int apci1500_timer_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1500_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ val = data[1] & s->maxdata;
+ z8536_write(dev, val & 0xff, Z8536_CT_RELOAD_LSB_REG(chan));
+ z8536_write(dev, (val >> 8) & 0xff,
+ Z8536_CT_RELOAD_MSB_REG(chan));
+
+ apci1500_timer_enable(dev, chan, true);
+ z8536_write(dev, Z8536_CT_CMDSTAT_GCB,
+ Z8536_CT_CMDSTAT_REG(chan));
+ break;
+ case INSN_CONFIG_DISARM:
+ apci1500_timer_enable(dev, chan, false);
+ break;
+
+ case INSN_CONFIG_GET_COUNTER_STATUS:
+ data[1] = 0;
+ val = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
+ if (val & Z8536_CT_STAT_CIP)
+ data[1] |= COMEDI_COUNTER_COUNTING;
+ if (val & Z8536_CT_CMDSTAT_GCB)
+ data[1] |= COMEDI_COUNTER_ARMED;
+ if (val & Z8536_STAT_IP) {
+ data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+ apci1500_ack_irq(dev, Z8536_CT_CMDSTAT_REG(chan));
+ }
+ data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+ COMEDI_COUNTER_TERMINAL_COUNT;
+ break;
+
+ case INSN_CONFIG_SET_COUNTER_MODE:
+ /* Simulate the 8254 timer modes */
+ switch (data[1]) {
+ case I8254_MODE0:
+ /* Interrupt on Terminal Count */
+ val = Z8536_CT_MODE_ECE |
+ Z8536_CT_MODE_DCS_ONESHOT;
+ break;
+ case I8254_MODE1:
+ /* Hardware Retriggerable One-Shot */
+ val = Z8536_CT_MODE_ETE |
+ Z8536_CT_MODE_DCS_ONESHOT;
+ break;
+ case I8254_MODE2:
+ /* Rate Generator */
+ val = Z8536_CT_MODE_CSC |
+ Z8536_CT_MODE_DCS_PULSE;
+ break;
+ case I8254_MODE3:
+ /* Square Wave Mode */
+ val = Z8536_CT_MODE_CSC |
+ Z8536_CT_MODE_DCS_SQRWAVE;
+ break;
+ case I8254_MODE4:
+ /* Software Triggered Strobe */
+ val = Z8536_CT_MODE_REB |
+ Z8536_CT_MODE_DCS_PULSE;
+ break;
+ case I8254_MODE5:
+ /* Hardware Triggered Strobe (watchdog) */
+ val = Z8536_CT_MODE_EOE |
+ Z8536_CT_MODE_ETE |
+ Z8536_CT_MODE_REB |
+ Z8536_CT_MODE_DCS_PULSE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ apci1500_timer_enable(dev, chan, false);
+ z8536_write(dev, val, Z8536_CT_MODE_REG(chan));
+ break;
+
+ case INSN_CONFIG_SET_CLOCK_SRC:
+ if (data[1] > 2)
+ return -EINVAL;
+ devpriv->clk_src = data[1];
+ if (devpriv->clk_src == 2)
+ devpriv->clk_src = 3;
+ outw(devpriv->clk_src, devpriv->addon + APCI1500_CLK_SEL_REG);
+ break;
+ case INSN_CONFIG_GET_CLOCK_SRC:
+ switch (devpriv->clk_src) {
+ case 0:
+ data[1] = 0; /* 111.86 kHz / 2 */
+ data[2] = 17879; /* 17879 ns (approx) */
+ break;
+ case 1:
+ data[1] = 1; /* 3.49 kHz / 2 */
+ data[2] = 573066; /* 573066 ns (approx) */
+ break;
+ case 3:
+ data[1] = 2; /* 1.747 kHz / 2 */
+ data[2] = 1164822; /* 1164822 ns (approx) */
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case INSN_CONFIG_SET_GATE_SRC:
+ if (chan == 0)
+ return -EINVAL;
+
+ val = z8536_read(dev, Z8536_CT_MODE_REG(chan));
+ val &= Z8536_CT_MODE_EGE;
+ if (data[1] == 1)
+ val |= Z8536_CT_MODE_EGE;
+ else if (data[1] > 1)
+ return -EINVAL;
+ z8536_write(dev, val, Z8536_CT_MODE_REG(chan));
+ break;
+ case INSN_CONFIG_GET_GATE_SRC:
+ if (chan == 0)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return insn->n;
+}
+
+static int apci1500_timer_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int cmd;
+
+ cmd = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
+ cmd &= Z8536_CT_CMDSTAT_GCB; /* preserve gate */
+ cmd |= Z8536_CT_CMD_TCB; /* set trigger */
+
+ /* software trigger a timer, it only makes sense to do one write */
+ if (insn->n)
+ z8536_write(dev, cmd, Z8536_CT_CMDSTAT_REG(chan));
+
+ return insn->n;
+}
+
+static int apci1500_timer_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int cmd;
+ unsigned int val;
+ int i;
+
+ cmd = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
+ cmd &= Z8536_CT_CMDSTAT_GCB; /* preserve gate */
+ cmd |= Z8536_CT_CMD_RCC; /* set RCC */
+
+ for (i = 0; i < insn->n; i++) {
+ z8536_write(dev, cmd, Z8536_CT_CMDSTAT_REG(chan));
+
+ val = z8536_read(dev, Z8536_CT_VAL_MSB_REG(chan)) << 8;
+ val |= z8536_read(dev, Z8536_CT_VAL_LSB_REG(chan));
+
+ data[i] = val;
+ }
+
+ return insn->n;
+}
static int apci1500_auto_attach(struct comedi_device *dev,
unsigned long context)
@@ -35,10 +768,10 @@ static int apci1500_auto_attach(struct comedi_device *dev,
return ret;
dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->iobase = dev->iobase;
- devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
- devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
- devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3);
+ devpriv->amcc = pci_resource_start(pcidev, 0);
+ devpriv->addon = pci_resource_start(pcidev, 2);
+
+ z8536_reset(dev);
if (pcidev->irq > 0) {
ret = request_irq(pcidev->irq, apci1500_interrupt, IRQF_SHARED,
@@ -51,51 +784,66 @@ static int apci1500_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- /* Allocate and Initialise DI Subdevice Structures */
+ /* Digital Input subdevice */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = apci1500_di_config;
- s->insn_read = apci1500_di_read;
- s->insn_write = apci1500_di_write;
- s->insn_bits = apci1500_di_insn_bits;
-
- /* Allocate and Initialise DO Subdevice Structures */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci1500_di_insn_bits;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 1;
+ s->insn_config = apci1500_di_insn_config;
+ s->do_cmdtest = apci1500_di_cmdtest;
+ s->do_cmd = apci1500_di_cmd;
+ s->cancel = apci1500_di_cancel;
+ }
+
+ /* Digital Output subdevice */
s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = apci1500_do_config;
- s->insn_write = apci1500_do_write;
- s->insn_bits = apci1500_do_bits;
-
- /* Allocate and Initialise Timer Subdevice Structures */
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci1500_do_insn_bits;
+
+ /* reset all the digital outputs */
+ outw(0x0, devpriv->addon + APCI1500_DO_REG);
+
+ /* Counter/Timer(Watchdog) subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
- s->range_table = &range_digital;
- s->insn_write = apci1500_timer_write;
- s->insn_read = apci1500_timer_read;
- s->insn_config = apci1500_timer_config;
- s->insn_bits = apci1500_timer_bits;
-
- apci1500_reset(dev);
+ s->type = COMEDI_SUBD_TIMER;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->n_chan = 3;
+ s->maxdata = 0xffff;
+ s->range_table = &range_unknown;
+ s->insn_config = apci1500_timer_insn_config;
+ s->insn_write = apci1500_timer_insn_write;
+ s->insn_read = apci1500_timer_insn_read;
+
+ /* Enable the PCI interrupt */
+ if (dev->irq) {
+ outl(0x2000 | INTCSR_INBOX_FULL_INT,
+ devpriv->amcc + AMCC_OP_REG_INTCSR);
+ inl(devpriv->amcc + AMCC_OP_REG_IMB1);
+ inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
+ outl(INTCSR_INBOX_INTR_STATUS | 0x2000 | INTCSR_INBOX_FULL_INT,
+ devpriv->amcc + AMCC_OP_REG_INTCSR);
+ }
return 0;
}
static void apci1500_detach(struct comedi_device *dev)
{
- if (dev->iobase)
- apci1500_reset(dev);
+ struct apci1500_private *devpriv = dev->private;
+
+ if (devpriv->amcc)
+ outl(0x0, devpriv->amcc + AMCC_OP_REG_INTCSR);
comedi_pci_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index a726efcea6a5..5961f195ba0b 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -267,7 +267,6 @@ static irqreturn_t apci3501_interrupt(int irq, void *d)
struct apci3501_private *devpriv = dev->private;
unsigned int ui_Timer_AOWatchdog;
unsigned long ul_Command1;
- int i_temp;
/* Disable Interrupt */
ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
@@ -285,7 +284,7 @@ static irqreturn_t apci3501_interrupt(int irq, void *d)
ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = ((ul_Command1 & 0xFFFFF9FDul) | 1 << 1);
outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
- i_temp = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
+ inl(dev->iobase + APCI3501_TIMER_STATUS_REG);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 528f15c25dae..a3ea4b7c18dd 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -19,8 +19,7 @@
/*
* Driver: adl_pci6208
* Description: ADLink PCI-6208/6216 Series Multi-channel Analog Output Cards
- * Devices: (ADLink) PCI-6208 [adl_pci6208]
- * (ADLink) PCI-6216 [adl_pci6216]
+ * Devices: [ADLink] PCI-6208 (adl_pci6208), PCI-6216 (adl_pci6216)
* Author: nsyeow <nsyeow@pd.jaring.my>
* Updated: Fri, 30 Jan 2004 14:44:27 +0800
* Status: untested
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index fb8e5f582496..618e641ffaac 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -22,27 +22,35 @@
*/
/*
-Driver: adl_pci7x3x
-Description: 32/64-Channel Isolated Digital I/O Boards
-Devices: (ADLink) PCI-7230 [adl_pci7230] - 16 input / 16 output
- (ADLink) PCI-7233 [adl_pci7233] - 32 input
- (ADLink) PCI-7234 [adl_pci7234] - 32 output
- (ADLink) PCI-7432 [adl_pci7432] - 32 input / 32 output
- (ADLink) PCI-7433 [adl_pci7433] - 64 input
- (ADLink) PCI-7434 [adl_pci7434] - 64 output
-Author: H Hartley Sweeten <hsweeten@visionengravers.com>
-Updated: Thu, 02 Aug 2012 14:27:46 -0700
-Status: untested
-
-The PCI-7230, PCI-7432 and PCI-7433 boards also support external
-interrupt signals on digital input channels 0 and 1. The PCI-7233
-has dual-interrupt sources for change-of-state (COS) on any 16
-digital input channels of LSB and for COS on any 16 digital input
-lines of MSB. Interrupts are not currently supported by this
-driver.
-
-Configuration Options: not applicable, uses comedi PCI auto config
-*/
+ * Driver: adl_pci7x3x
+ * Description: 32/64-Channel Isolated Digital I/O Boards
+ * Devices: [ADLink] PCI-7230 (adl_pci7230), PCI-7233 (adl_pci7233),
+ * PCI-7234 (adl_pci7234), PCI-7432 (adl_pci7432), PCI-7433 (adl_pci7433),
+ * PCI-7434 (adl_pci7434)
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Thu, 02 Aug 2012 14:27:46 -0700
+ * Status: untested
+ *
+ * One or two subdevices are setup by this driver depending on
+ * the number of digital inputs and/or outputs provided by the
+ * board. Each subdevice has a maximum of 32 channels.
+ *
+ * PCI-7230 - 2 subdevices: 0 - 16 input, 1 - 16 output
+ * PCI-7233 - 1 subdevice: 0 - 32 input
+ * PCI-7234 - 1 subdevice: 0 - 32 output
+ * PCI-7432 - 2 subdevices: 0 - 32 input, 1 - 32 output
+ * PCI-7433 - 2 subdevices: 0 - 32 input, 1 - 32 input
+ * PCI-7434 - 2 subdevices: 0 - 32 output, 1 - 32 output
+ *
+ * The PCI-7230, PCI-7432 and PCI-7433 boards also support external
+ * interrupt signals on digital input channels 0 and 1. The PCI-7233
+ * has dual-interrupt sources for change-of-state (COS) on any 16
+ * digital input channels of LSB and for COS on any 16 digital input
+ * lines of MSB. Interrupts are not currently supported by this
+ * driver.
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ */
#include <linux/module.h>
#include <linux/pci.h>
@@ -155,18 +163,6 @@ static int adl_pci7x3x_auto_attach(struct comedi_device *dev,
return ret;
dev->iobase = pci_resource_start(pcidev, 2);
- /*
- * One or two subdevices are setup by this driver depending on
- * the number of digital inputs and/or outputs provided by the
- * board. Each subdevice has a maximum of 32 channels.
- *
- * PCI-7230 - 2 subdevices: 0 - 16 input, 1 - 16 output
- * PCI-7233 - 1 subdevice: 0 - 32 input
- * PCI-7234 - 1 subdevice: 0 - 32 output
- * PCI-7432 - 2 subdevices: 0 - 32 input, 1 - 32 output
- * PCI-7433 - 2 subdevices: 0 - 32 input, 1 - 32 input
- * PCI-7434 - 2 subdevices: 0 - 32 output, 1 - 32 output
- */
ret = comedi_alloc_subdevices(dev, board->nsubdevs);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index 72bccb447a74..cc6c53b800a7 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -18,7 +18,7 @@
/*
* Driver: adl_pci8164
* Description: Driver for the Adlink PCI-8164 4 Axes Motion Control board
- * Devices: (ADLink) PCI-8164 [adl_pci8164]
+ * Devices: [ADLink] PCI-8164 (adl_pci8164)
* Author: Michel Lachaine <mike@mikelachaine.ca>
* Status: experimental
* Updated: Mon, 14 Apr 2008 15:10:32 +0100
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index 47f6c0e9f014..f68dc99f8e27 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -539,7 +539,7 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
dev_dbg(dev->class_dev, "fifo overflow\n");
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
return IRQ_HANDLED;
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 26603582e71a..f61e392c2d3e 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -749,13 +749,13 @@ static irqreturn_t pci9118_interrupt(int irq, void *d)
if (intcsr & MASTER_ABORT_INT) {
dev_err(dev->class_dev, "AMCC IRQ - MASTER DMA ABORT!\n");
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
goto interrupt_exit;
}
if (intcsr & TARGET_ABORT_INT) {
dev_err(dev->class_dev, "AMCC IRQ - TARGET DMA ABORT!\n");
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
goto interrupt_exit;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index d02df7d0c629..9800c01e6fb9 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -51,11 +51,6 @@ Configuration options:
#include "8253.h"
#include "amcc_s5933.h"
-/* hardware types of the cards */
-#define TYPE_PCI171X 0
-#define TYPE_PCI1713 2
-#define TYPE_PCI1720 3
-
#define PCI171x_AD_DATA 0 /* R: A/D data */
#define PCI171x_SOFTTRG 0 /* W: soft trigger for A/D */
#define PCI171x_RANGE 2 /* W: A/D gain/range register */
@@ -164,7 +159,7 @@ static const struct comedi_lrange range_pci17x1 = {
static const char range_codes_pci17x1[] = { 0x00, 0x01, 0x02, 0x03, 0x04 };
-static const struct comedi_lrange range_pci1720 = {
+static const struct comedi_lrange pci1720_ao_range = {
4, {
UNI_RANGE(5),
UNI_RANGE(10),
@@ -173,7 +168,7 @@ static const struct comedi_lrange range_pci1720 = {
}
};
-static const struct comedi_lrange range_pci171x_da = {
+static const struct comedi_lrange pci171x_ao_range = {
2, {
UNI_RANGE(5),
UNI_RANGE(10)
@@ -191,112 +186,81 @@ enum pci1710_boardid {
struct boardtype {
const char *name; /* board name */
- char have_irq; /* 1=card support IRQ */
- char cardtype; /* 0=1710& co. 2=1713, ... */
int n_aichan; /* num of A/D chans */
- int n_aichand; /* num of A/D chans in diff mode */
- int n_aochan; /* num of D/A chans */
- int n_dichan; /* num of DI chans */
- int n_dochan; /* num of DO chans */
- int n_counter; /* num of counters */
- int ai_maxdata; /* resolution of A/D */
- int ao_maxdata; /* resolution of D/A */
const struct comedi_lrange *rangelist_ai; /* rangelist for A/D */
const char *rangecode_ai; /* range codes for programming */
- const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
- unsigned int ai_ns_min; /* max sample speed of card v ns */
- unsigned int fifo_half_size; /* size of FIFO/2 */
+ unsigned int is_pci1713:1;
+ unsigned int is_pci1720:1;
+ unsigned int has_irq:1;
+ unsigned int has_large_fifo:1; /* 4K or 1K FIFO */
+ unsigned int has_diff_ai:1;
+ unsigned int has_ao:1;
+ unsigned int has_di_do:1;
+ unsigned int has_counter:1;
};
static const struct boardtype boardtypes[] = {
[BOARD_PCI1710] = {
.name = "pci1710",
- .have_irq = 1,
- .cardtype = TYPE_PCI171X,
.n_aichan = 16,
- .n_aichand = 8,
- .n_aochan = 2,
- .n_dichan = 16,
- .n_dochan = 16,
- .n_counter = 1,
- .ai_maxdata = 0x0fff,
- .ao_maxdata = 0x0fff,
.rangelist_ai = &range_pci1710_3,
.rangecode_ai = range_codes_pci1710_3,
- .rangelist_ao = &range_pci171x_da,
- .ai_ns_min = 10000,
- .fifo_half_size = 2048,
+ .has_irq = 1,
+ .has_large_fifo = 1,
+ .has_diff_ai = 1,
+ .has_ao = 1,
+ .has_di_do = 1,
+ .has_counter = 1,
},
[BOARD_PCI1710HG] = {
.name = "pci1710hg",
- .have_irq = 1,
- .cardtype = TYPE_PCI171X,
.n_aichan = 16,
- .n_aichand = 8,
- .n_aochan = 2,
- .n_dichan = 16,
- .n_dochan = 16,
- .n_counter = 1,
- .ai_maxdata = 0x0fff,
- .ao_maxdata = 0x0fff,
.rangelist_ai = &range_pci1710hg,
.rangecode_ai = range_codes_pci1710hg,
- .rangelist_ao = &range_pci171x_da,
- .ai_ns_min = 10000,
- .fifo_half_size = 2048,
+ .has_irq = 1,
+ .has_large_fifo = 1,
+ .has_diff_ai = 1,
+ .has_ao = 1,
+ .has_di_do = 1,
+ .has_counter = 1,
},
[BOARD_PCI1711] = {
.name = "pci1711",
- .have_irq = 1,
- .cardtype = TYPE_PCI171X,
.n_aichan = 16,
- .n_aochan = 2,
- .n_dichan = 16,
- .n_dochan = 16,
- .n_counter = 1,
- .ai_maxdata = 0x0fff,
- .ao_maxdata = 0x0fff,
.rangelist_ai = &range_pci17x1,
.rangecode_ai = range_codes_pci17x1,
- .rangelist_ao = &range_pci171x_da,
- .ai_ns_min = 10000,
- .fifo_half_size = 512,
+ .has_irq = 1,
+ .has_ao = 1,
+ .has_di_do = 1,
+ .has_counter = 1,
},
[BOARD_PCI1713] = {
.name = "pci1713",
- .have_irq = 1,
- .cardtype = TYPE_PCI1713,
.n_aichan = 32,
- .n_aichand = 16,
- .ai_maxdata = 0x0fff,
.rangelist_ai = &range_pci1710_3,
.rangecode_ai = range_codes_pci1710_3,
- .ai_ns_min = 10000,
- .fifo_half_size = 2048,
+ .is_pci1713 = 1,
+ .has_irq = 1,
+ .has_large_fifo = 1,
+ .has_diff_ai = 1,
},
[BOARD_PCI1720] = {
.name = "pci1720",
- .cardtype = TYPE_PCI1720,
- .n_aochan = 4,
- .ao_maxdata = 0x0fff,
- .rangelist_ao = &range_pci1720,
+ .is_pci1720 = 1,
+ .has_ao = 1,
},
[BOARD_PCI1731] = {
.name = "pci1731",
- .have_irq = 1,
- .cardtype = TYPE_PCI171X,
.n_aichan = 16,
- .n_dichan = 16,
- .n_dochan = 16,
- .ai_maxdata = 0x0fff,
.rangelist_ai = &range_pci17x1,
.rangecode_ai = range_codes_pci17x1,
- .ai_ns_min = 10000,
- .fifo_half_size = 512,
+ .has_irq = 1,
+ .has_di_do = 1,
},
};
struct pci1710_private {
+ unsigned int max_samples;
unsigned int CntrlReg; /* Control register */
unsigned char ai_et;
unsigned int ai_et_CntrlReg;
@@ -308,39 +272,10 @@ struct pci1710_private {
unsigned int act_chanlist[32]; /* list of scanned channel */
unsigned char saved_seglen; /* len of the non-repeating chanlist */
unsigned char da_ranges; /* copy of D/A outpit range register */
- unsigned short ao_data[4]; /* data output buffer */
unsigned int cnt0_write_wait; /* after a write, wait for update of the
* internal state */
};
-/* used for gain list programming */
-static const unsigned int muxonechan[] = {
- 0x0000, 0x0101, 0x0202, 0x0303, 0x0404, 0x0505, 0x0606, 0x0707,
- 0x0808, 0x0909, 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f,
- 0x1010, 0x1111, 0x1212, 0x1313, 0x1414, 0x1515, 0x1616, 0x1717,
- 0x1818, 0x1919, 0x1a1a, 0x1b1b, 0x1c1c, 0x1d1d, 0x1e1e, 0x1f1f
-};
-
-static int pci171x_ai_dropout(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int val)
-{
- const struct boardtype *board = dev->board_ptr;
- struct pci1710_private *devpriv = dev->private;
-
- if (board->cardtype != TYPE_PCI1713) {
- if ((val & 0xf000) != devpriv->act_chanlist[chan]) {
- dev_err(dev->class_dev,
- "A/D data droput: received from channel %d, expected %d\n",
- (val >> 12) & 0xf,
- (devpriv->act_chanlist[chan] >> 12) & 0xf);
- return -ENODATA;
- }
- }
- return 0;
-}
-
static int pci171x_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
@@ -407,33 +342,39 @@ static int pci171x_ai_check_chanlist(struct comedi_device *dev,
return 0;
}
-static void setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan,
- unsigned int seglen)
+static void pci171x_ai_setup_chanlist(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int *chanlist,
+ unsigned int n_chan,
+ unsigned int seglen)
{
- const struct boardtype *this_board = dev->board_ptr;
+ const struct boardtype *board = dev->board_ptr;
struct pci1710_private *devpriv = dev->private;
- unsigned int i, range, chanprog;
+ unsigned int first_chan = CR_CHAN(chanlist[0]);
+ unsigned int last_chan = CR_CHAN(chanlist[seglen - 1]);
+ unsigned int i;
for (i = 0; i < seglen; i++) { /* store range list to card */
- chanprog = muxonechan[CR_CHAN(chanlist[i])];
- outw(chanprog, dev->iobase + PCI171x_MUX); /* select channel */
- range = this_board->rangecode_ai[CR_RANGE(chanlist[i])];
- if (CR_AREF(chanlist[i]) == AREF_DIFF)
- range |= 0x0020;
- outw(range, dev->iobase + PCI171x_RANGE); /* select gain */
- devpriv->act_chanlist[i] =
- (CR_CHAN(chanlist[i]) << 12) & 0xf000;
- }
- for ( ; i < n_chan; i++) { /* store remainder of channel list */
- devpriv->act_chanlist[i] =
- (CR_CHAN(chanlist[i]) << 12) & 0xf000;
+ unsigned int chan = CR_CHAN(chanlist[i]);
+ unsigned int range = CR_RANGE(chanlist[i]);
+ unsigned int aref = CR_AREF(chanlist[i]);
+ unsigned int rangeval;
+
+ rangeval = board->rangecode_ai[range];
+ if (aref == AREF_DIFF)
+ rangeval |= 0x0020;
+
+ /* select channel and set range */
+ outw(chan | (chan << 8), dev->iobase + PCI171x_MUX);
+ outw(rangeval, dev->iobase + PCI171x_RANGE);
+
+ devpriv->act_chanlist[i] = chan;
}
+ for ( ; i < n_chan; i++) /* store remainder of channel list */
+ devpriv->act_chanlist[i] = CR_CHAN(chanlist[i]);
- devpriv->ai_et_MuxVal =
- CR_CHAN(chanlist[0]) | (CR_CHAN(chanlist[seglen - 1]) << 8);
/* select channel interval to scan */
+ devpriv->ai_et_MuxVal = first_chan | (last_chan << 8);
outw(devpriv->ai_et_MuxVal, dev->iobase + PCI171x_MUX);
}
@@ -450,9 +391,39 @@ static int pci171x_ai_eoc(struct comedi_device *dev,
return -EBUSY;
}
-static int pci171x_insn_read_ai(struct comedi_device *dev,
+static int pci171x_ai_read_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int cur_chan,
+ unsigned int *val)
+{
+ const struct boardtype *board = dev->board_ptr;
+ struct pci1710_private *devpriv = dev->private;
+ unsigned int sample;
+ unsigned int chan;
+
+ sample = inw(dev->iobase + PCI171x_AD_DATA);
+ if (!board->is_pci1713) {
+ /*
+ * The upper 4 bits of the 16-bit sample are the channel number
+ * that the sample was acquired from. Verify that this channel
+ * number matches the expected channel number.
+ */
+ chan = sample >> 12;
+ if (chan != devpriv->act_chanlist[cur_chan]) {
+ dev_err(dev->class_dev,
+ "A/D data droput: received from channel %d, expected %d\n",
+ chan, devpriv->act_chanlist[cur_chan]);
+ return -ENODATA;
+ }
+ }
+ *val = sample & s->maxdata;
+ return 0;
+}
+
+static int pci171x_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
@@ -465,7 +436,7 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
outb(0, dev->iobase + PCI171x_CLRFIFO);
outb(0, dev->iobase + PCI171x_CLRINT);
- setup_channel_list(dev, s, &insn->chanspec, 1, 1);
+ pci171x_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1);
for (i = 0; i < insn->n; i++) {
unsigned int val;
@@ -476,12 +447,11 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
if (ret)
break;
- val = inw(dev->iobase + PCI171x_AD_DATA);
- ret = pci171x_ai_dropout(dev, s, chan, val);
+ ret = pci171x_ai_read_sample(dev, s, chan, &val);
if (ret)
break;
- data[i] = val & s->maxdata;
+ data[i] = val;
}
outb(0, dev->iobase + PCI171x_CLRFIFO);
@@ -490,73 +460,43 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
return ret ? ret : insn->n;
}
-/*
-==============================================================================
-*/
-static int pci171x_insn_write_ao(struct comedi_device *dev,
+static int pci171x_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
- unsigned int val;
- int n, chan, range, ofs;
-
- chan = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
- if (chan) {
- devpriv->da_ranges &= 0xfb;
- devpriv->da_ranges |= (range << 2);
- outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
- ofs = PCI171x_DA2;
- } else {
- devpriv->da_ranges &= 0xfe;
- devpriv->da_ranges |= range;
- outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
- ofs = PCI171x_DA1;
- }
- val = devpriv->ao_data[chan];
-
- for (n = 0; n < insn->n; n++) {
- val = data[n];
- outw(val, dev->iobase + ofs);
- }
-
- devpriv->ao_data[chan] = val;
-
- return n;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int reg = chan ? PCI171x_DA2 : PCI171x_DA1;
+ unsigned int val = s->readback[chan];
+ int i;
-}
+ devpriv->da_ranges &= ~(1 << (chan << 1));
+ devpriv->da_ranges |= (range << (chan << 1));
+ outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
-/*
-==============================================================================
-*/
-static int pci171x_insn_read_ao(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pci1710_private *devpriv = dev->private;
- int n, chan;
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+ outw(val, dev->iobase + reg);
+ }
- chan = CR_CHAN(insn->chanspec);
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_data[chan];
+ s->readback[chan] = val;
- return n;
+ return insn->n;
}
-/*
-==============================================================================
-*/
-static int pci171x_insn_bits_di(struct comedi_device *dev,
+static int pci171x_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
data[1] = inw(dev->iobase + PCI171x_DI);
return insn->n;
}
-static int pci171x_insn_bits_do(struct comedi_device *dev,
+static int pci171x_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -584,10 +524,7 @@ static void pci171x_start_pacer(struct comedi_device *dev,
}
}
-/*
-==============================================================================
-*/
-static int pci171x_insn_counter_read(struct comedi_device *dev,
+static int pci171x_counter_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -608,10 +545,7 @@ static int pci171x_insn_counter_read(struct comedi_device *dev,
return insn->n;
}
-/*
-==============================================================================
-*/
-static int pci171x_insn_counter_write(struct comedi_device *dev,
+static int pci171x_counter_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -638,10 +572,7 @@ static int pci171x_insn_counter_write(struct comedi_device *dev,
return insn->n;
}
-/*
-==============================================================================
-*/
-static int pci171x_insn_counter_config(struct comedi_device *dev,
+static int pci171x_counter_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -677,57 +608,48 @@ static int pci171x_insn_counter_config(struct comedi_device *dev,
return 1;
}
-/*
-==============================================================================
-*/
-static int pci1720_insn_write_ao(struct comedi_device *dev,
+static int pci1720_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
unsigned int val;
- int n, rangereg, chan;
-
- chan = CR_CHAN(insn->chanspec);
- rangereg = devpriv->da_ranges & (~(0x03 << (chan << 1)));
- rangereg |= (CR_RANGE(insn->chanspec) << (chan << 1));
- if (rangereg != devpriv->da_ranges) {
- outb(rangereg, dev->iobase + PCI1720_RANGE);
- devpriv->da_ranges = rangereg;
+ int i;
+
+ val = devpriv->da_ranges & (~(0x03 << (chan << 1)));
+ val |= (range << (chan << 1));
+ if (val != devpriv->da_ranges) {
+ outb(val, dev->iobase + PCI1720_RANGE);
+ devpriv->da_ranges = val;
}
- val = devpriv->ao_data[chan];
- for (n = 0; n < insn->n; n++) {
- val = data[n];
+ val = s->readback[chan];
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
- outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
+ outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
}
- devpriv->ao_data[chan] = val;
+ s->readback[chan] = val;
- return n;
+ return insn->n;
}
-/*
-==============================================================================
-*/
static int pci171x_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- const struct boardtype *this_board = dev->board_ptr;
struct pci1710_private *devpriv = dev->private;
- switch (this_board->cardtype) {
- default:
- devpriv->CntrlReg &= Control_CNT0;
- devpriv->CntrlReg |= Control_SW;
- /* reset any operations */
- outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
- pci171x_start_pacer(dev, false);
- outb(0, dev->iobase + PCI171x_CLRFIFO);
- outb(0, dev->iobase + PCI171x_CLRINT);
- break;
- }
+ devpriv->CntrlReg &= Control_CNT0;
+ devpriv->CntrlReg |= Control_SW;
+ /* reset any operations */
+ outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
+ pci171x_start_pacer(dev, false);
+ outb(0, dev->iobase + PCI171x_CLRFIFO);
+ outb(0, dev->iobase + PCI171x_CLRINT);
return 0;
}
@@ -743,29 +665,25 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
status = inw(dev->iobase + PCI171x_STATUS);
if (status & Status_FE) {
dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", status);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
+ s->async->events |= COMEDI_CB_ERROR;
return;
}
if (status & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", status);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
+ s->async->events |= COMEDI_CB_ERROR;
return;
}
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
for (; !(inw(dev->iobase + PCI171x_STATUS) & Status_FE);) {
- val = inw(dev->iobase + PCI171x_AD_DATA);
- ret = pci171x_ai_dropout(dev, s, s->async->cur_chan, val);
+ ret = pci171x_ai_read_sample(dev, s, s->async->cur_chan, &val);
if (ret) {
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ s->async->events |= COMEDI_CB_ERROR;
break;
}
- val &= s->maxdata;
comedi_buf_write_samples(s, &val, 1);
if (cmd->stop_src == TRIG_COUNT &&
@@ -776,85 +694,53 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
}
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
-
- comedi_handle_events(dev, s);
-}
-
-/*
-==============================================================================
-*/
-static int move_block_from_fifo(struct comedi_device *dev,
- struct comedi_subdevice *s, int n, int turn)
-{
- unsigned int val;
- int ret;
- int i;
-
- for (i = 0; i < n; i++) {
- val = inw(dev->iobase + PCI171x_AD_DATA);
-
- ret = pci171x_ai_dropout(dev, s, s->async->cur_chan, val);
- if (ret) {
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- return ret;
- }
-
- val &= s->maxdata;
- comedi_buf_write_samples(s, &val, 1);
- }
- return 0;
}
static void pci1710_handle_fifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- const struct boardtype *this_board = dev->board_ptr;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int nsamples;
- unsigned int m;
-
- m = inw(dev->iobase + PCI171x_STATUS);
- if (!(m & Status_FH)) {
- dev_dbg(dev->class_dev, "A/D FIFO not half full! (%4x)\n", m);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
+ struct pci1710_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned int status;
+ int i;
+
+ status = inw(dev->iobase + PCI171x_STATUS);
+ if (!(status & Status_FH)) {
+ dev_dbg(dev->class_dev, "A/D FIFO not half full!\n");
+ async->events |= COMEDI_CB_ERROR;
return;
}
- if (m & Status_FF) {
+ if (status & Status_FF) {
dev_dbg(dev->class_dev,
- "A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
+ "A/D FIFO Full status (Fatal Error!)\n");
+ async->events |= COMEDI_CB_ERROR;
return;
}
- nsamples = this_board->fifo_half_size;
- if (comedi_samples_to_bytes(s, nsamples) >= s->async->prealloc_bufsz) {
- m = comedi_bytes_to_samples(s, s->async->prealloc_bufsz);
- if (move_block_from_fifo(dev, s, m, 0))
- return;
- nsamples -= m;
- }
+ for (i = 0; i < devpriv->max_samples; i++) {
+ unsigned int val;
+ int ret;
- if (nsamples) {
- if (move_block_from_fifo(dev, s, nsamples, 1))
- return;
- }
+ ret = pci171x_ai_read_sample(dev, s, s->async->cur_chan, &val);
+ if (ret) {
+ s->async->events |= COMEDI_CB_ERROR;
+ break;
+ }
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg) {
- s->async->events |= COMEDI_CB_EOA;
- comedi_handle_events(dev, s);
- return;
+ if (!comedi_buf_write_samples(s, &val, 1))
+ break;
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ break;
+ }
}
- outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- comedi_handle_events(dev, s);
+ outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
}
-/*
-==============================================================================
-*/
static irqreturn_t interrupt_service_pci1710(int irq, void *d)
{
struct comedi_device *dev = d;
@@ -891,6 +777,8 @@ static irqreturn_t interrupt_service_pci1710(int irq, void *d)
else
pci1710_handle_fifo(dev, s);
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
@@ -901,8 +789,8 @@ static int pci171x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
pci171x_start_pacer(dev, false);
- setup_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len,
- devpriv->saved_seglen);
+ pci171x_ai_setup_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len,
+ devpriv->saved_seglen);
outb(0, dev->iobase + PCI171x_CLRFIFO);
outb(0, dev->iobase + PCI171x_CLRINT);
@@ -937,14 +825,10 @@ static int pci171x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-/*
-==============================================================================
-*/
static int pci171x_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- const struct boardtype *this_board = dev->board_ptr;
struct pci1710_private *devpriv = dev->private;
int err = 0;
unsigned int arg;
@@ -977,8 +861,7 @@ static int pci171x_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
if (cmd->convert_src == TRIG_TIMER)
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
- this_board->ai_ns_min);
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
else /* TRIG_FOLLOW */
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
@@ -1016,12 +899,9 @@ static int pci171x_ai_cmdtest(struct comedi_device *dev,
return 0;
}
-/*
-==============================================================================
-*/
static int pci171x_reset(struct comedi_device *dev)
{
- const struct boardtype *this_board = dev->board_ptr;
+ const struct boardtype *board = dev->board_ptr;
struct pci1710_private *devpriv = dev->private;
outw(0x30, dev->iobase + PCI171x_CNTCTRL);
@@ -1033,15 +913,11 @@ static int pci171x_reset(struct comedi_device *dev)
outb(0, dev->iobase + PCI171x_CLRINT); /* clear INT request */
pci171x_start_pacer(dev, false);
devpriv->da_ranges = 0;
- if (this_board->n_aochan) {
+ if (board->has_ao) {
/* set DACs to 0..5V */
outb(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
outw(0, dev->iobase + PCI171x_DA1); /* set DA outputs to 0V */
- devpriv->ao_data[0] = 0x0000;
- if (this_board->n_aochan > 1) {
- outw(0, dev->iobase + PCI171x_DA2);
- devpriv->ao_data[1] = 0x0000;
- }
+ outw(0, dev->iobase + PCI171x_DA2);
}
outw(0, dev->iobase + PCI171x_DO); /* digital outputs to 0 */
outb(0, dev->iobase + PCI171x_CLRFIFO); /* clear FIFO */
@@ -1050,9 +926,6 @@ static int pci171x_reset(struct comedi_device *dev)
return 0;
}
-/*
-==============================================================================
-*/
static int pci1720_reset(struct comedi_device *dev)
{
struct pci1710_private *devpriv = dev->private;
@@ -1066,43 +939,35 @@ static int pci1720_reset(struct comedi_device *dev)
outw(0x0800, dev->iobase + PCI1720_DA2);
outw(0x0800, dev->iobase + PCI1720_DA3);
outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
- devpriv->ao_data[0] = 0x0800;
- devpriv->ao_data[1] = 0x0800;
- devpriv->ao_data[2] = 0x0800;
- devpriv->ao_data[3] = 0x0800;
+
return 0;
}
-/*
-==============================================================================
-*/
static int pci1710_reset(struct comedi_device *dev)
{
- const struct boardtype *this_board = dev->board_ptr;
+ const struct boardtype *board = dev->board_ptr;
- switch (this_board->cardtype) {
- case TYPE_PCI1720:
+ if (board->is_pci1720)
return pci1720_reset(dev);
- default:
- return pci171x_reset(dev);
- }
+
+ return pci171x_reset(dev);
}
static int pci1710_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct boardtype *this_board = NULL;
+ const struct boardtype *board = NULL;
struct pci1710_private *devpriv;
struct comedi_subdevice *s;
int ret, subdev, n_subdevices;
if (context < ARRAY_SIZE(boardtypes))
- this_board = &boardtypes[context];
- if (!this_board)
+ board = &boardtypes[context];
+ if (!board)
return -ENODEV;
- dev->board_ptr = this_board;
- dev->board_name = this_board->name;
+ dev->board_ptr = board;
+ dev->board_name = board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -1114,15 +979,13 @@ static int pci1710_auto_attach(struct comedi_device *dev,
dev->iobase = pci_resource_start(pcidev, 2);
n_subdevices = 0;
- if (this_board->n_aichan)
- n_subdevices++;
- if (this_board->n_aochan)
- n_subdevices++;
- if (this_board->n_dichan)
+ if (board->n_aichan)
n_subdevices++;
- if (this_board->n_dochan)
+ if (board->has_ao)
n_subdevices++;
- if (this_board->n_counter)
+ if (board->has_di_do)
+ n_subdevices += 2;
+ if (board->has_counter)
n_subdevices++;
ret = comedi_alloc_subdevices(dev, n_subdevices);
@@ -1131,7 +994,7 @@ static int pci1710_auto_attach(struct comedi_device *dev,
pci1710_reset(dev);
- if (this_board->have_irq && pcidev->irq) {
+ if (board->has_irq && pcidev->irq) {
ret = request_irq(pcidev->irq, interrupt_service_pci1710,
IRQF_SHARED, dev->board_name, dev);
if (ret == 0)
@@ -1140,85 +1003,93 @@ static int pci1710_auto_attach(struct comedi_device *dev,
subdev = 0;
- if (this_board->n_aichan) {
+ if (board->n_aichan) {
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND;
- if (this_board->n_aichand)
- s->subdev_flags |= SDF_DIFF;
- s->n_chan = this_board->n_aichan;
- s->maxdata = this_board->ai_maxdata;
- s->range_table = this_board->rangelist_ai;
- s->insn_read = pci171x_insn_read_ai;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND;
+ if (board->has_diff_ai)
+ s->subdev_flags |= SDF_DIFF;
+ s->n_chan = board->n_aichan;
+ s->maxdata = 0x0fff;
+ s->range_table = board->rangelist_ai;
+ s->insn_read = pci171x_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = pci171x_ai_cmdtest;
- s->do_cmd = pci171x_ai_cmd;
- s->cancel = pci171x_ai_cancel;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = pci171x_ai_cmdtest;
+ s->do_cmd = pci171x_ai_cmd;
+ s->cancel = pci171x_ai_cancel;
}
subdev++;
}
- if (this_board->n_aochan) {
+ if (board->has_ao) {
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = this_board->n_aochan;
- s->maxdata = this_board->ao_maxdata;
- s->len_chanlist = this_board->n_aochan;
- s->range_table = this_board->rangelist_ao;
- switch (this_board->cardtype) {
- case TYPE_PCI1720:
- s->insn_write = pci1720_insn_write_ao;
- break;
- default:
- s->insn_write = pci171x_insn_write_ao;
- break;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->maxdata = 0x0fff;
+ if (board->is_pci1720) {
+ s->n_chan = 4;
+ s->range_table = &pci1720_ao_range;
+ s->insn_write = pci1720_ao_insn_write;
+ } else {
+ s->n_chan = 2;
+ s->range_table = &pci171x_ao_range;
+ s->insn_write = pci171x_ao_insn_write;
}
- s->insn_read = pci171x_insn_read_ao;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ /* initialize the readback values to match the board reset */
+ if (board->is_pci1720) {
+ int i;
+
+ for (i = 0; i < s->n_chan; i++)
+ s->readback[i] = 0x0800;
+ }
+
subdev++;
}
- if (this_board->n_dichan) {
+ if (board->has_di_do) {
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = this_board->n_dichan;
- s->maxdata = 1;
- s->len_chanlist = this_board->n_dichan;
- s->range_table = &range_digital;
- s->insn_bits = pci171x_insn_bits_di;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pci171x_di_insn_bits;
subdev++;
- }
- if (this_board->n_dochan) {
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = this_board->n_dochan;
- s->maxdata = 1;
- s->len_chanlist = this_board->n_dochan;
- s->range_table = &range_digital;
- s->insn_bits = pci171x_insn_bits_do;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pci171x_do_insn_bits;
subdev++;
}
- if (this_board->n_counter) {
+ if (board->has_counter) {
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = this_board->n_counter;
- s->len_chanlist = this_board->n_counter;
- s->maxdata = 0xffff;
- s->range_table = &range_unknown;
- s->insn_read = pci171x_insn_counter_read;
- s->insn_write = pci171x_insn_counter_write;
- s->insn_config = pci171x_insn_counter_config;
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 1;
+ s->maxdata = 0xffff;
+ s->range_table = &range_unknown;
+ s->insn_read = pci171x_counter_insn_read;
+ s->insn_write = pci171x_counter_insn_write;
+ s->insn_config = pci171x_counter_insn_config;
subdev++;
}
+ /* max_samples is half the FIFO size (2 bytes/sample) */
+ devpriv->max_samples = (board->has_large_fifo) ? 2048 : 512;
+
return 0;
}
@@ -1312,5 +1183,5 @@ static struct pci_driver adv_pci1710_pci_driver = {
module_comedi_pci_driver(adv_pci1710_driver, adv_pci1710_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi: Advantech PCI-1710 Series Multifunction DAS Cards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 65f854e1eb66..f1945be89eff 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -20,7 +20,7 @@
* Driver: adv_pci1723
* Description: Advantech PCI-1723
* Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk>
- * Devices: (Advantech) PCI-1723 [adv_pci1723]
+ * Devices: [Advantech] PCI-1723 (adv_pci1723)
* Updated: Mon, 14 Apr 2008 15:12:56 +0100
* Status: works
*
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
index a8d28403262e..a3573ea6f9c0 100644
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ b/drivers/staging/comedi/drivers/adv_pci1724.c
@@ -22,7 +22,7 @@
/*
* Driver: adv_pci1724
* Description: Advantech PCI-1724U
- * Devices: (Advantech) PCI-1724U [adv_pci1724]
+ * Devices: [Advantech] PCI-1724U (adv_pci1724)
* Author: Frank Mori Hess <fmh6jj@gmail.com>
* Updated: 2013-02-09
* Status: works
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
index 7b5ed439c164..1c7b325a373c 100644
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ b/drivers/staging/comedi/drivers/aio_iiro_16.c
@@ -1,48 +1,155 @@
/*
+ * aio_iiro_16.c
+ * Comedi driver for Access I/O Products 104-IIRO-16 board
+ * Copyright (C) 2006 C&C Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- comedi/drivers/aio_iiro_16.c
+/*
+ * Driver: aio_iiro_16
+ * Description: Access I/O Products PC/104 Isolated Input/Relay Output Board
+ * Author: Zachary Ware <zach.ware@cctechnol.com>
+ * Devices: [Access I/O] 104-IIRO-16 (aio_iiro_16)
+ * Status: experimental
+ *
+ * Configuration Options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional)
+ *
+ * The board supports interrupts on change of state of the digital inputs.
+ * The sample data returned by the async command indicates which inputs
+ * changed state and the current state of the inputs:
+ *
+ * Bit 23 - IRQ Enable (1) / Disable (0)
+ * Bit 17 - Input 8-15 Changed State (1 = Changed, 0 = No Change)
+ * Bit 16 - Input 0-7 Changed State (1 = Changed, 0 = No Change)
+ * Bit 15 - Digital input 15
+ * ...
+ * Bit 0 - Digital input 0
+ */
- Driver for Access I/O Products PC-104 AIO-IIRO-16 Digital I/O board
- Copyright (C) 2006 C&C Technologies, Inc.
+#include <linux/module.h>
+#include <linux/interrupt.h>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+#include "../comedidev.h"
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+#include "comedi_fc.h"
-/*
+#define AIO_IIRO_16_RELAY_0_7 0x00
+#define AIO_IIRO_16_INPUT_0_7 0x01
+#define AIO_IIRO_16_IRQ 0x02
+#define AIO_IIRO_16_RELAY_8_15 0x04
+#define AIO_IIRO_16_INPUT_8_15 0x05
+#define AIO_IIRO_16_STATUS 0x07
+#define AIO_IIRO_16_STATUS_IRQE BIT(7)
+#define AIO_IIRO_16_STATUS_INPUT_8_15 BIT(1)
+#define AIO_IIRO_16_STATUS_INPUT_0_7 BIT(0)
-Driver: aio_iiro_16
-Description: Access I/O Products PC-104 IIRO16 Relay And Isolated Input Board
-Author: Zachary Ware <zach.ware@cctechnol.com>
-Devices:
- [Access I/O] PC-104 AIO12-8
-Status: experimental
+static unsigned int aio_iiro_16_read_inputs(struct comedi_device *dev)
+{
+ unsigned int val;
-Configuration Options:
- [0] - I/O port base address
+ val = inb(dev->iobase + AIO_IIRO_16_INPUT_0_7);
+ val |= inb(dev->iobase + AIO_IIRO_16_INPUT_8_15) << 8;
-*/
+ return val;
+}
-#include <linux/module.h>
-#include "../comedidev.h"
+static irqreturn_t aio_iiro_16_cos(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int status;
+ unsigned int val;
+
+ status = inb(dev->iobase + AIO_IIRO_16_STATUS);
+ if (!(status & AIO_IIRO_16_STATUS_IRQE))
+ return IRQ_NONE;
+
+ val = aio_iiro_16_read_inputs(dev);
+ val |= (status << 16);
+
+ comedi_buf_write_samples(s, &val, 1);
+ comedi_handle_events(dev, s);
+
+ return IRQ_HANDLED;
+}
+
+static void aio_iiro_enable_irq(struct comedi_device *dev, bool enable)
+{
+ if (enable)
+ inb(dev->iobase + AIO_IIRO_16_IRQ);
+ else
+ outb(0, dev->iobase + AIO_IIRO_16_IRQ);
+}
+
+static int aio_iiro_16_cos_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ aio_iiro_enable_irq(dev, false);
+
+ return 0;
+}
-#define AIO_IIRO_16_RELAY_0_7 0x00
-#define AIO_IIRO_16_INPUT_0_7 0x01
-#define AIO_IIRO_16_IRQ 0x02
-#define AIO_IIRO_16_RELAY_8_15 0x04
-#define AIO_IIRO_16_INPUT_8_15 0x05
+static int aio_iiro_16_cos_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ aio_iiro_enable_irq(dev, true);
+
+ return 0;
+}
+
+static int aio_iiro_16_cos_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+ /* Step 2b : and mutually compatible */
+
+ /* Step 3: check if arguments are trivially valid */
-static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* Step 4: fix up any arguments */
+
+ /* Step 5: check channel list if it exists */
+
+ return 0;
+}
+
+static int aio_iiro_16_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
outb(s->state & 0xff, dev->iobase + AIO_IIRO_16_RELAY_0_7);
@@ -55,14 +162,12 @@ static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev,
return insn->n;
}
-static int aio_iiro_16_dio_insn_bits_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int aio_iiro_16_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- data[1] = 0;
- data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_0_7);
- data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_8_15) << 8;
+ data[1] = aio_iiro_16_read_inputs(dev);
return insn->n;
}
@@ -77,25 +182,52 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
if (ret)
return ret;
+ aio_iiro_enable_irq(dev, false);
+
+ /*
+ * Digital input change of state interrupts are optionally supported
+ * using IRQ 2-7, 10-12, 14, or 15.
+ */
+ if ((1 << it->options[1]) & 0xdcfc) {
+ ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
+ }
+
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
+ /* Digital Output subdevice */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = aio_iiro_16_dio_insn_bits_write;
-
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = aio_iiro_16_do_insn_bits;
+
+ /* get the initial state of the relays */
+ s->state = inb(dev->iobase + AIO_IIRO_16_RELAY_0_7) |
+ (inb(dev->iobase + AIO_IIRO_16_RELAY_8_15) << 8);
+
+ /* Digital Input subdevice */
s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = aio_iiro_16_dio_insn_bits_read;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = aio_iiro_16_di_insn_bits;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL;
+ s->len_chanlist = 1;
+ s->do_cmdtest = aio_iiro_16_cos_cmdtest;
+ s->do_cmd = aio_iiro_16_cos_cmd;
+ s->cancel = aio_iiro_16_cos_cancel;
+ }
return 0;
}
@@ -109,5 +241,5 @@ static struct comedi_driver aio_iiro_16_driver = {
module_comedi_driver(aio_iiro_16_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Access I/O Products 104-IIRO-16 board");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index e7cb7032a910..1a109e30d8ff 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -22,7 +22,7 @@
* Description: Mechatronic Systems Inc. C6x_DIGIO DSP daughter card
* Author: Dan Block
* Status: unknown
- * Devices: (Mechatronic Systems Inc.) C6x_DIGIO DSP daughter card [c6xdigio]
+ * Devices: [Mechatronic Systems Inc.] C6x_DIGIO DSP daughter card (c6xdigio)
* Updated: Sun Nov 20 20:18:34 EST 2005
*
* Configuration Options:
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 0a48d2a961d5..1079b6c72b15 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -38,10 +38,7 @@ Status: experimental
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include "../comedidev.h"
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
+#include "../comedi_pcmcia.h"
#include "comedi_fc.h"
#include "8253.h"
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 669b1703eb99..dd0c65a5b5a0 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -1355,7 +1355,7 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
outw(devpriv->adc_fifo_bits | LADFUL,
devpriv->control_status + INT_ADCFIFO);
spin_unlock_irqrestore(&dev->spinlock, flags);
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
}
comedi_handle_events(dev, s);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index eddb7ace43df..5b43e4e6d037 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -439,6 +439,29 @@ static const struct comedi_lrange ai_ranges_64xx = {
}
};
+static const uint8_t ai_range_code_64xx[8] = {
+ 0x0, 0x1, 0x2, 0x3, /* bipolar 10, 5, 2,5, 1.25 */
+ 0x8, 0x9, 0xa, 0xb /* unipolar 10, 5, 2.5, 1.25 */
+};
+
+/* analog input ranges for 64-Mx boards */
+static const struct comedi_lrange ai_ranges_64_mx = {
+ 7, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
+};
+
+static const uint8_t ai_range_code_64_mx[7] = {
+ 0x0, 0x1, 0x2, 0x3, /* bipolar 5, 2.5, 1.25, 0.625 */
+ 0x9, 0xa, 0xb /* unipolar 5, 2.5, 1.25 */
+};
+
/* analog input ranges for 60xx boards */
static const struct comedi_lrange ai_ranges_60xx = {
4, {
@@ -449,6 +472,10 @@ static const struct comedi_lrange ai_ranges_60xx = {
}
};
+static const uint8_t ai_range_code_60xx[4] = {
+ 0x0, 0x1, 0x4, 0x7 /* bipolar 10, 5, 0.5, 0.05 */
+};
+
/* analog input ranges for 6030, etc boards */
static const struct comedi_lrange ai_ranges_6030 = {
14, {
@@ -469,6 +496,11 @@ static const struct comedi_lrange ai_ranges_6030 = {
}
};
+static const uint8_t ai_range_code_6030[14] = {
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */
+ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */
+};
+
/* analog input ranges for 6052, etc boards */
static const struct comedi_lrange ai_ranges_6052 = {
15, {
@@ -490,6 +522,11 @@ static const struct comedi_lrange ai_ranges_6052 = {
}
};
+static const uint8_t ai_range_code_6052[15] = {
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, /* bipolar 10 ... 0.05 */
+ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* unipolar 10 ... 0.1 */
+};
+
/* analog input ranges for 4020 board */
static const struct comedi_lrange ai_ranges_4020 = {
2, {
@@ -593,6 +630,7 @@ struct pcidas64_board {
int ai_bits; /* analog input resolution */
int ai_speed; /* fastest conversion period in ns */
const struct comedi_lrange *ai_range_table;
+ const uint8_t *ai_range_code;
int ao_nchan; /* number of analog out channels */
int ao_bits; /* analog output resolution */
int ao_scan_speed; /* analog output scan speed */
@@ -651,6 +689,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
.ai_range_table = &ai_ranges_64xx,
+ .ai_range_code = ai_range_code_64xx,
.ao_range_table = &ao_ranges_64xx,
.ao_range_code = ao_range_code_64xx,
.ai_fifo = &ai_fifo_64xx,
@@ -666,6 +705,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
.ai_range_table = &ai_ranges_64xx,
+ .ai_range_code = ai_range_code_64xx,
.ao_range_table = &ao_ranges_64xx,
.ao_range_code = ao_range_code_64xx,
.ai_fifo = &ai_fifo_64xx,
@@ -680,7 +720,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_bits = 16,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ao_range_table = &ao_ranges_64xx,
.ao_range_code = ao_range_code_64xx,
.ai_fifo = &ai_fifo_64xx,
@@ -695,7 +736,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_bits = 16,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ao_range_table = &ao_ranges_64xx,
.ao_range_code = ao_range_code_64xx,
.ai_fifo = &ai_fifo_64xx,
@@ -710,7 +752,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_bits = 16,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ao_range_table = &ao_ranges_64xx,
.ao_range_code = ao_range_code_64xx,
.ai_fifo = &ai_fifo_64xx,
@@ -725,6 +768,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_bits = 16,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_60xx,
+ .ai_range_code = ai_range_code_60xx,
.ao_range_table = &range_bipolar10,
.ao_range_code = ao_range_code_60xx,
.ai_fifo = &ai_fifo_60xx,
@@ -740,6 +784,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 100000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_60xx,
+ .ai_range_code = ai_range_code_60xx,
.ao_range_table = &range_bipolar10,
.ao_range_code = ao_range_code_60xx,
.ai_fifo = &ai_fifo_60xx,
@@ -754,6 +799,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 100000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_60xx,
+ .ai_range_code = ai_range_code_60xx,
.ao_range_table = &range_bipolar10,
.ao_range_code = ao_range_code_60xx,
.ai_fifo = &ai_fifo_60xx,
@@ -769,6 +815,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 100000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_60xx,
+ .ai_range_code = ai_range_code_60xx,
.ao_range_table = &range_bipolar10,
.ao_range_code = ao_range_code_60xx,
.ai_fifo = &ai_fifo_60xx,
@@ -784,6 +831,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 10000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_6030,
+ .ai_range_code = ai_range_code_6030,
.ao_range_table = &ao_ranges_6030,
.ao_range_code = ao_range_code_6030,
.ai_fifo = &ai_fifo_60xx,
@@ -799,6 +847,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 10000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_6030,
+ .ai_range_code = ai_range_code_6030,
.ao_range_table = &ao_ranges_6030,
.ao_range_code = ao_range_code_6030,
.ai_fifo = &ai_fifo_60xx,
@@ -812,6 +861,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_nchan = 0,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_6030,
+ .ai_range_code = ai_range_code_6030,
.ai_fifo = &ai_fifo_60xx,
.has_8255 = 0,
},
@@ -823,6 +873,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_nchan = 0,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_6030,
+ .ai_range_code = ai_range_code_6030,
.ai_fifo = &ai_fifo_60xx,
.has_8255 = 0,
},
@@ -835,6 +886,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 0,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_60xx,
+ .ai_range_code = ai_range_code_60xx,
.ai_fifo = &ai_fifo_60xx,
.has_8255 = 0,
},
@@ -848,6 +900,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 100000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_60xx,
+ .ai_range_code = ai_range_code_60xx,
.ao_range_table = &range_bipolar10,
.ao_range_code = ao_range_code_60xx,
.ai_fifo = &ai_fifo_60xx,
@@ -863,6 +916,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 100000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_60xx,
+ .ai_range_code = ai_range_code_60xx,
.ao_range_table = &range_bipolar10,
.ao_range_code = ao_range_code_60xx,
.ai_fifo = &ai_fifo_60xx,
@@ -878,6 +932,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 1000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_6052,
+ .ai_range_code = ai_range_code_6052,
.ao_range_table = &ao_ranges_6030,
.ao_range_code = ao_range_code_6030,
.ai_fifo = &ai_fifo_60xx,
@@ -893,6 +948,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 3333,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_6052,
+ .ai_range_code = ai_range_code_6052,
.ao_range_table = &ao_ranges_6030,
.ao_range_code = ao_range_code_6030,
.ai_fifo = &ai_fifo_60xx,
@@ -908,6 +964,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 1000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_6052,
+ .ai_range_code = ai_range_code_6052,
.ao_range_table = &ao_ranges_6030,
.ao_range_code = ao_range_code_6030,
.ai_fifo = &ai_fifo_60xx,
@@ -923,6 +980,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 1000,
.layout = LAYOUT_60XX,
.ai_range_table = &ai_ranges_6052,
+ .ai_range_code = ai_range_code_6052,
.ao_range_table = &ao_ranges_6030,
.ao_range_code = ao_range_code_6030,
.ai_fifo = &ai_fifo_60xx,
@@ -957,6 +1015,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
.ai_range_table = &ai_ranges_64xx,
+ .ai_range_code = ai_range_code_64xx,
.ai_fifo = ai_fifo_64xx,
.has_8255 = 1,
},
@@ -968,7 +1027,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_nchan = 0,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ai_fifo = ai_fifo_64xx,
.has_8255 = 1,
},
@@ -980,7 +1040,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_nchan = 0,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ai_fifo = ai_fifo_64xx,
.has_8255 = 1,
},
@@ -992,7 +1053,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_nchan = 0,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ai_fifo = ai_fifo_64xx,
.has_8255 = 1,
},
@@ -1004,7 +1066,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_nchan = 2,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ai_fifo = ai_fifo_64xx,
.has_8255 = 1,
},
@@ -1016,7 +1079,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_nchan = 2,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ai_fifo = ai_fifo_64xx,
.has_8255 = 1,
},
@@ -1028,7 +1092,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ao_nchan = 2,
.ao_scan_speed = 10000,
.layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
+ .ai_range_table = &ai_ranges_64_mx,
+ .ai_range_code = ai_range_code_64_mx,
.ai_fifo = ai_fifo_64xx,
.has_8255 = 1,
},
@@ -1115,45 +1180,8 @@ static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
unsigned int range_index)
{
const struct pcidas64_board *thisboard = dev->board_ptr;
- const struct comedi_krange *range =
- &thisboard->ai_range_table->range[range_index];
- unsigned int bits = 0;
- switch (range->max) {
- case 10000000:
- bits = 0x000;
- break;
- case 5000000:
- bits = 0x100;
- break;
- case 2000000:
- case 2500000:
- bits = 0x200;
- break;
- case 1000000:
- case 1250000:
- bits = 0x300;
- break;
- case 500000:
- bits = 0x400;
- break;
- case 200000:
- case 250000:
- bits = 0x500;
- break;
- case 100000:
- bits = 0x600;
- break;
- case 50000:
- bits = 0x700;
- break;
- default:
- dev_err(dev->class_dev, "bug! in %s\n", __func__);
- break;
- }
- if (range->min == 0)
- bits += 0x900;
- return bits;
+ return thisboard->ai_range_code[range_index] << 8;
}
static unsigned int hw_revision(const struct comedi_device *dev,
@@ -2776,7 +2804,7 @@ static void handle_ai_interrupt(struct comedi_device *dev,
/* check for fifo overrun */
if (status & ADC_OVERRUN_BIT) {
dev_err(dev->class_dev, "fifo overrun\n");
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
}
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 01875d7c376f..2b2cfcdda5bd 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -22,12 +22,10 @@
/*
* Driver: cb_pcidda
* Description: MeasurementComputing PCI-DDA series
- * Devices: (Measurement Computing) PCI-DDA08/12 [pci-dda08/12]
- * (Measurement Computing) PCI-DDA04/12 [pci-dda04/12]
- * (Measurement Computing) PCI-DDA02/12 [pci-dda02/12]
- * (Measurement Computing) PCI-DDA08/16 [pci-dda08/16]
- * (Measurement Computing) PCI-DDA04/16 [pci-dda04/16]
- * (Measurement Computing) PCI-DDA02/16 [pci-dda02/16]
+ * Devices: [Measurement Computing] PCI-DDA08/12 (pci-dda08/12),
+ * PCI-DDA04/12 (pci-dda04/12), PCI-DDA02/12 (pci-dda02/12),
+ * PCI-DDA08/16 (pci-dda08/16), PCI-DDA04/16 (pci-dda04/16),
+ * PCI-DDA02/16 (pci-dda02/16)
* Author: Ivan Martinez <ivanmr@altavista.com>
* Frank Mori Hess <fmhess@users.sourceforge.net>
* Status: works
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 85b2f4ab1ba4..221d3819c967 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -261,6 +261,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
{
/* Append dev:subdev to devpriv->name */
char buf[20];
+
snprintf(buf, sizeof(buf), "%u:%u ",
bdev->minor, bdev->subdev);
strlcat(devpriv->name, buf,
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
new file mode 100644
index 000000000000..dbdea71d6b95
--- /dev/null
+++ b/drivers/staging/comedi/drivers/comedi_isadma.c
@@ -0,0 +1,262 @@
+/*
+ * COMEDI ISA DMA support functions
+ * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma.h>
+
+#include "../comedidev.h"
+
+#include "comedi_isadma.h"
+
+/**
+ * comedi_isadma_program - program and enable an ISA DMA transfer
+ * @desc: the ISA DMA cookie to program and enable
+ */
+void comedi_isadma_program(struct comedi_isadma_desc *desc)
+{
+ unsigned long flags;
+
+ flags = claim_dma_lock();
+ clear_dma_ff(desc->chan);
+ set_dma_mode(desc->chan, desc->mode);
+ set_dma_addr(desc->chan, desc->hw_addr);
+ set_dma_count(desc->chan, desc->size);
+ enable_dma(desc->chan);
+ release_dma_lock(flags);
+}
+EXPORT_SYMBOL_GPL(comedi_isadma_program);
+
+/**
+ * comedi_isadma_disable - disable the ISA DMA channel
+ * @dma_chan: the DMA channel to disable
+ *
+ * Returns the residue (remaining bytes) left in the DMA transfer.
+ */
+unsigned int comedi_isadma_disable(unsigned int dma_chan)
+{
+ unsigned long flags;
+ unsigned int residue;
+
+ flags = claim_dma_lock();
+ disable_dma(dma_chan);
+ residue = get_dma_residue(dma_chan);
+ release_dma_lock(flags);
+
+ return residue;
+}
+EXPORT_SYMBOL_GPL(comedi_isadma_disable);
+
+/**
+ * comedi_isadma_disable_on_sample - disable the ISA DMA channel
+ * @dma_chan: the DMA channel to disable
+ * @size: the sample size (in bytes)
+ *
+ * Returns the residue (remaining bytes) left in the DMA transfer.
+ */
+unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
+ unsigned int size)
+{
+ int stalled = 0;
+ unsigned long flags;
+ unsigned int residue;
+ unsigned int new_residue;
+
+ residue = comedi_isadma_disable(dma_chan);
+ while (residue % size) {
+ /* residue is a partial sample, enable DMA to allow more data */
+ flags = claim_dma_lock();
+ enable_dma(dma_chan);
+ release_dma_lock(flags);
+
+ udelay(2);
+ new_residue = comedi_isadma_disable(dma_chan);
+
+ /* is DMA stalled? */
+ if (new_residue == residue) {
+ stalled++;
+ if (stalled > 10)
+ break;
+ }
+ residue = new_residue;
+ stalled = 0;
+ }
+ return residue;
+}
+EXPORT_SYMBOL_GPL(comedi_isadma_disable_on_sample);
+
+/**
+ * comedi_isadma_poll - poll the current DMA transfer
+ * @dma: the ISA DMA to poll
+ *
+ * Returns the position (in bytes) of the current DMA transfer.
+ */
+unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
+{
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
+ unsigned long flags;
+ unsigned int result;
+ unsigned int result1;
+
+ flags = claim_dma_lock();
+ clear_dma_ff(desc->chan);
+ if (!isa_dma_bridge_buggy)
+ disable_dma(desc->chan);
+ result = get_dma_residue(desc->chan);
+ /*
+ * Read the counter again and choose higher value in order to
+ * avoid reading during counter lower byte roll over if the
+ * isa_dma_bridge_buggy is set.
+ */
+ result1 = get_dma_residue(desc->chan);
+ if (!isa_dma_bridge_buggy)
+ enable_dma(desc->chan);
+ release_dma_lock(flags);
+
+ if (result < result1)
+ result = result1;
+ if (result >= desc->size || result == 0)
+ return 0;
+ else
+ return desc->size - result;
+}
+EXPORT_SYMBOL_GPL(comedi_isadma_poll);
+
+/**
+ * comedi_isadma_set_mode - set the ISA DMA transfer direction
+ * @desc: the ISA DMA cookie to set
+ * @dma_dir: the DMA direction
+ */
+void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir)
+{
+ desc->mode = (dma_dir == COMEDI_ISADMA_READ) ? DMA_MODE_READ
+ : DMA_MODE_WRITE;
+}
+EXPORT_SYMBOL_GPL(comedi_isadma_set_mode);
+
+/**
+ * comedi_isadma_alloc - allocate and initialize the ISA DMA
+ * @dev: comedi_device struct
+ * @n_desc: the number of cookies to allocate
+ * @dma_chan: DMA channel for the first cookie
+ * @dma_chan2: DMA channel for the second cookie
+ * @maxsize: the size of the buffer to allocate for each cookie
+ * @dma_dir: the DMA direction
+ *
+ * Returns the allocated and initialized ISA DMA or NULL if anything fails.
+ */
+struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
+ int n_desc, unsigned int dma_chan1,
+ unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir)
+{
+ struct comedi_isadma *dma = NULL;
+ struct comedi_isadma_desc *desc;
+ unsigned int dma_chans[2];
+ int i;
+
+ if (n_desc < 1 || n_desc > 2)
+ goto no_dma;
+
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ goto no_dma;
+
+ desc = kcalloc(n_desc, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ goto no_dma;
+ dma->desc = desc;
+ dma->n_desc = n_desc;
+
+ dma_chans[0] = dma_chan1;
+ if (dma_chan2 == 0 || dma_chan2 == dma_chan1)
+ dma_chans[1] = dma_chan1;
+ else
+ dma_chans[1] = dma_chan2;
+
+ if (request_dma(dma_chans[0], dev->board_name))
+ goto no_dma;
+ dma->chan = dma_chans[0];
+ if (dma_chans[1] != dma_chans[0]) {
+ if (request_dma(dma_chans[1], dev->board_name))
+ goto no_dma;
+ }
+ dma->chan2 = dma_chans[1];
+
+ for (i = 0; i < n_desc; i++) {
+ desc = &dma->desc[i];
+ desc->chan = dma_chans[i];
+ desc->maxsize = maxsize;
+ desc->virt_addr = dma_alloc_coherent(NULL, desc->maxsize,
+ &desc->hw_addr,
+ GFP_KERNEL);
+ if (!desc->virt_addr)
+ goto no_dma;
+ comedi_isadma_set_mode(desc, dma_dir);
+ }
+
+ return dma;
+
+no_dma:
+ comedi_isadma_free(dma);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(comedi_isadma_alloc);
+
+/**
+ * comedi_isadma_free - free the ISA DMA
+ * @dma: the ISA DMA to free
+ */
+void comedi_isadma_free(struct comedi_isadma *dma)
+{
+ struct comedi_isadma_desc *desc;
+ int i;
+
+ if (!dma)
+ return;
+
+ if (dma->desc) {
+ for (i = 0; i < dma->n_desc; i++) {
+ desc = &dma->desc[i];
+ if (desc->virt_addr)
+ dma_free_coherent(NULL, desc->maxsize,
+ desc->virt_addr, desc->hw_addr);
+ }
+ kfree(dma->desc);
+ }
+ if (dma->chan2 && dma->chan2 != dma->chan)
+ free_dma(dma->chan2);
+ if (dma->chan)
+ free_dma(dma->chan);
+ kfree(dma);
+}
+EXPORT_SYMBOL_GPL(comedi_isadma_free);
+
+static int __init comedi_isadma_init(void)
+{
+ return 0;
+}
+module_init(comedi_isadma_init);
+
+static void __exit comedi_isadma_exit(void)
+{
+}
+module_exit(comedi_isadma_exit);
+
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_DESCRIPTION("Comedi ISA DMA support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.h b/drivers/staging/comedi/drivers/comedi_isadma.h
new file mode 100644
index 000000000000..c7c524faf595
--- /dev/null
+++ b/drivers/staging/comedi/drivers/comedi_isadma.h
@@ -0,0 +1,116 @@
+/*
+ * COMEDI ISA DMA support functions
+ * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMEDI_ISADMA_H
+#define _COMEDI_ISADMA_H
+
+/*
+ * These are used to avoid issues when <asm/dma.h> and the DMA_MODE_
+ * defines are not available.
+ */
+#define COMEDI_ISADMA_READ 0
+#define COMEDI_ISADMA_WRITE 1
+
+/**
+ * struct comedi_isadma_desc - cookie for ISA DMA
+ * @virt_addr: virtual address of buffer
+ * @hw_addr: hardware (bus) address of buffer
+ * @chan: DMA channel
+ * @maxsize: allocated size of buffer (in bytes)
+ * @size: transfer size (in bytes)
+ * @mode: DMA_MODE_READ or DMA_MODE_WRITE
+ */
+struct comedi_isadma_desc {
+ void *virt_addr;
+ dma_addr_t hw_addr;
+ unsigned int chan;
+ unsigned int maxsize;
+ unsigned int size;
+ char mode;
+};
+
+/**
+ * struct comedi_isadma - ISA DMA data
+ * @desc: cookie for each DMA buffer
+ * @n_desc: the number of cookies
+ * @cur_dma: the current cookie in use
+ * @chan: the first DMA channel requested
+ * @chan2: the second DMA channel requested
+ */
+struct comedi_isadma {
+ struct comedi_isadma_desc *desc;
+ int n_desc;
+ int cur_dma;
+ unsigned int chan;
+ unsigned int chan2;
+};
+
+#if IS_ENABLED(CONFIG_ISA_DMA_API)
+
+void comedi_isadma_program(struct comedi_isadma_desc *);
+unsigned int comedi_isadma_disable(unsigned int dma_chan);
+unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
+ unsigned int size);
+unsigned int comedi_isadma_poll(struct comedi_isadma *);
+void comedi_isadma_set_mode(struct comedi_isadma_desc *, char dma_dir);
+
+struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *,
+ int n_desc, unsigned int dma_chan1,
+ unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir);
+void comedi_isadma_free(struct comedi_isadma *);
+
+#else /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+static inline void comedi_isadma_program(struct comedi_isadma_desc *desc)
+{
+}
+
+static inline unsigned int comedi_isadma_disable(unsigned int dma_chan)
+{
+ return 0;
+}
+
+static inline unsigned int
+comedi_isadma_disable_on_sample(unsigned int dma_chan, unsigned int size)
+{
+ return 0;
+}
+
+static inline unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
+{
+ return 0;
+}
+
+static inline void comedi_isadma_set_mode(struct comedi_isadma_desc *desc,
+ char dma_dir)
+{
+}
+
+static inline struct comedi_isadma *
+comedi_isadma_alloc(struct comedi_device *dev, int n_desc,
+ unsigned int dma_chan1, unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir)
+{
+ return NULL;
+}
+
+static inline void comedi_isadma_free(struct comedi_isadma *dma)
+{
+}
+
+#endif /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+#endif /* #ifndef _COMEDI_ISADMA_H */
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index 3bac903c8627..ceef6931edbe 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -24,7 +24,7 @@
* Description: Standard PC parallel port
* Author: ds
* Status: works in immediate mode
- * Devices: (standard) parallel port [comedi_parport]
+ * Devices: [standard] parallel port (comedi_parport)
* Updated: Tue, 30 Apr 2002 21:11:45 -0700
*
* A cheap and easy way to get a few more digital I/O lines. Steal
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
index beb36c8dd00a..a6798ad8fa7f 100644
--- a/drivers/staging/comedi/drivers/dac02.c
+++ b/drivers/staging/comedi/drivers/dac02.c
@@ -24,7 +24,7 @@
/*
* Driver: dac02
* Description: Comedi driver for DAC02 compatible boards
- * Devices: (Keithley Metrabyte) DAC-02 [dac02]
+ * Devices: [Keithley Metrabyte] DAC-02 (dac02)
* Author: H Hartley Sweeten <hsweeten@visionengravers.com>
* Updated: Tue, 11 Mar 2014 11:27:19 -0700
* Status: unknown
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 20a9f0eb72b5..c78c0df9bbe3 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -1,6 +1,6 @@
/*
* comedi/drivers/das08.c
- * comedi driver for common DAS08 support (used by ISA/PCI/PCMCIA drivers)
+ * comedi module for common DAS08 support (used by ISA/PCI/PCMCIA drivers)
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
@@ -18,21 +18,6 @@
* GNU General Public License for more details.
*/
-/*
- * Driver: das08
- * Description: DAS-08 compatible boards
- * Devices: various, see das08_isa, das08_cs, and das08_pci drivers
- * Author: Warren Jasper, ds, Frank Hess
- * Updated: Fri, 31 Aug 2012 19:19:06 +0100
- * Status: works
- *
- * This driver is used by the das08_isa, das08_cs, and das08_pci
- * drivers to provide the common support for the DAS-08 hardware.
- *
- * The driver doesn't support asynchronous commands, since the
- * cheap das08 hardware doesn't really support them.
- */
-
#include <linux/module.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index f3ccc2ce6d49..93fab6890161 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -41,10 +41,7 @@ Command support does not exist, but could be added for this board.
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
+#include "../comedi_pcmcia.h"
#include "das08.h"
diff --git a/drivers/staging/comedi/drivers/das08_isa.c b/drivers/staging/comedi/drivers/das08_isa.c
index e4ba268e78ab..2d9a31dab552 100644
--- a/drivers/staging/comedi/drivers/das08_isa.c
+++ b/drivers/staging/comedi/drivers/das08_isa.c
@@ -21,18 +21,12 @@
/*
* Driver: das08_isa
* Description: DAS-08 ISA/PC-104 compatible boards
- * Devices: (Keithley Metrabyte) DAS08 [isa-das08],
- * (ComputerBoards) DAS08 [isa-das08]
- * (ComputerBoards) DAS08-PGM [das08-pgm]
- * (ComputerBoards) DAS08-PGH [das08-pgh]
- * (ComputerBoards) DAS08-PGL [das08-pgl]
- * (ComputerBoards) DAS08-AOH [das08-aoh]
- * (ComputerBoards) DAS08-AOL [das08-aol]
- * (ComputerBoards) DAS08-AOM [das08-aom]
- * (ComputerBoards) DAS08/JR-AO [das08/jr-ao]
- * (ComputerBoards) DAS08/JR-16-AO [das08jr-16-ao]
- * (ComputerBoards) PC104-DAS08 [pc104-das08]
- * (ComputerBoards) DAS08/JR/16 [das08jr/16]
+ * Devices: [Keithley Metrabyte] DAS08 (isa-das08),
+ * [ComputerBoards] DAS08 (isa-das08), DAS08-PGM (das08-pgm),
+ * DAS08-PGH (das08-pgh), DAS08-PGL (das08-pgl), DAS08-AOH (das08-aoh),
+ * DAS08-AOL (das08-aol), DAS08-AOM (das08-aom), DAS08/JR-AO (das08/jr-ao),
+ * DAS08/JR-16-AO (das08jr-16-ao), PC104-DAS08 (pc104-das08),
+ * DAS08/JR/16 (das08jr/16)
* Author: Warren Jasper, ds, Frank Hess
* Updated: Fri, 31 Aug 2012 19:19:06 +0100
* Status: works
diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c
index 0987ce554945..b2ea10b848c3 100644
--- a/drivers/staging/comedi/drivers/das08_pci.c
+++ b/drivers/staging/comedi/drivers/das08_pci.c
@@ -21,7 +21,7 @@
/*
* Driver: das08_pci
* Description: DAS-08 PCI compatible boards
- * Devices: (ComputerBoards) PCI-DAS08 [pci-das08]
+ * Devices: [ComputerBoards] PCI-DAS08 (pci-das08)
* Author: Warren Jasper, ds, Frank Hess
* Updated: Fri, 31 Aug 2012 19:19:06 +0100
* Status: works
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index 2436057304a3..2c20311120f1 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -22,28 +22,17 @@
* Driver: das16
* Description: DAS16 compatible boards
* Author: Sam Moore, Warren Jasper, ds, Chris Baugher, Frank Hess, Roman Fietze
- * Devices: (Keithley Metrabyte) DAS-16 [das-16]
- * (Keithley Metrabyte) DAS-16G [das-16g]
- * (Keithley Metrabyte) DAS-16F [das-16f]
- * (Keithley Metrabyte) DAS-1201 [das-1201]
- * (Keithley Metrabyte) DAS-1202 [das-1202]
- * (Keithley Metrabyte) DAS-1401 [das-1401]
- * (Keithley Metrabyte) DAS-1402 [das-1402]
- * (Keithley Metrabyte) DAS-1601 [das-1601]
- * (Keithley Metrabyte) DAS-1602 [das-1602]
- * (ComputerBoards) PC104-DAS16/JR [pc104-das16jr]
- * (ComputerBoards) PC104-DAS16JR/16 [pc104-das16jr/16]
- * (ComputerBoards) CIO-DAS16 [cio-das16]
- * (ComputerBoards) CIO-DAS16F [cio-das16/f]
- * (ComputerBoards) CIO-DAS16/JR [cio-das16/jr]
- * (ComputerBoards) CIO-DAS16JR/16 [cio-das16jr/16]
- * (ComputerBoards) CIO-DAS1401/12 [cio-das1401/12]
- * (ComputerBoards) CIO-DAS1402/12 [cio-das1402/12]
- * (ComputerBoards) CIO-DAS1402/16 [cio-das1402/16]
- * (ComputerBoards) CIO-DAS1601/12 [cio-das1601/12]
- * (ComputerBoards) CIO-DAS1602/12 [cio-das1602/12]
- * (ComputerBoards) CIO-DAS1602/16 [cio-das1602/16]
- * (ComputerBoards) CIO-DAS16/330 [cio-das16/330]
+ * Devices: [Keithley Metrabyte] DAS-16 (das-16), DAS-16G (das-16g),
+ * DAS-16F (das-16f), DAS-1201 (das-1201), DAS-1202 (das-1202),
+ * DAS-1401 (das-1401), DAS-1402 (das-1402), DAS-1601 (das-1601),
+ * DAS-1602 (das-1602),
+ * [ComputerBoards] PC104-DAS16/JR (pc104-das16jr),
+ * PC104-DAS16JR/16 (pc104-das16jr/16), CIO-DAS16 (cio-das16),
+ * CIO-DAS16F (cio-das16/f), CIO-DAS16/JR (cio-das16/jr),
+ * CIO-DAS16JR/16 (cio-das16jr/16), CIO-DAS1401/12 (cio-das1401/12),
+ * CIO-DAS1402/12 (cio-das1402/12), CIO-DAS1402/16 (cio-das1402/16),
+ * CIO-DAS1601/12 (cio-das1601/12), CIO-DAS1602/12 (cio-das1602/12),
+ * CIO-DAS1602/16 (cio-das1602/16), CIO-DAS16/330 (cio-das16/330)
* Status: works
* Updated: 2003-10-12
*
@@ -82,17 +71,14 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <asm/dma.h>
-
#include "../comedidev.h"
+#include "comedi_isadma.h"
+#include "comedi_fc.h"
#include "8253.h"
#include "8255.h"
-#include "comedi_fc.h"
#define DAS16_DMA_SIZE 0xff00 /* size in bytes of allocated dma buffer */
@@ -451,86 +437,37 @@ static inline int timer_period(void)
}
struct das16_private_struct {
+ struct comedi_isadma *dma;
unsigned int clockbase;
unsigned int ctrl_reg;
- unsigned long adc_byte_count;
unsigned int divisor1;
unsigned int divisor2;
- unsigned int dma_chan;
- uint16_t *dma_buffer[2];
- dma_addr_t dma_buffer_addr[2];
- unsigned int current_buffer;
- unsigned int dma_transfer_size;
- struct comedi_lrange *user_ai_range_table;
- struct comedi_lrange *user_ao_range_table;
struct timer_list timer;
- short timer_running;
unsigned long extra_iobase;
unsigned int can_burst:1;
+ unsigned int timer_running:1;
};
-static void das16_ai_enable(struct comedi_device *dev,
- unsigned int mode, unsigned int src)
-{
- struct das16_private_struct *devpriv = dev->private;
-
- devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE |
- DAS16_CTRL_DMAE |
- DAS16_CTRL_PACING_MASK);
- devpriv->ctrl_reg |= mode;
-
- if (src == TRIG_EXT)
- devpriv->ctrl_reg |= DAS16_CTRL_EXT_PACER;
- else
- devpriv->ctrl_reg |= DAS16_CTRL_INT_PACER;
- outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
-}
-
-static void das16_ai_disable(struct comedi_device *dev)
+static void das16_ai_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int unread_samples)
{
struct das16_private_struct *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
+ unsigned int max_samples = comedi_bytes_to_samples(s, desc->maxsize);
+ unsigned int nsamples;
- /* disable interrupts, dma and pacer clocked conversions */
- devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE |
- DAS16_CTRL_DMAE |
- DAS16_CTRL_PACING_MASK);
- outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
-}
-
-/* the pc104-das16jr (at least) has problems if the dma
- transfer is interrupted in the middle of transferring
- a 16 bit sample, so this function takes care to get
- an even transfer count after disabling dma
- channel.
-*/
-static int disable_dma_on_even(struct comedi_device *dev)
-{
- struct das16_private_struct *devpriv = dev->private;
- static const int disable_limit = 100;
- static const int enable_timeout = 100;
- int residue;
- int new_residue;
- int i;
- int j;
-
- disable_dma(devpriv->dma_chan);
- residue = get_dma_residue(devpriv->dma_chan);
- for (i = 0; i < disable_limit && (residue % 2); ++i) {
- enable_dma(devpriv->dma_chan);
- for (j = 0; j < enable_timeout; ++j) {
- udelay(2);
- new_residue = get_dma_residue(devpriv->dma_chan);
- if (new_residue != residue)
- break;
- }
- disable_dma(devpriv->dma_chan);
- residue = get_dma_residue(devpriv->dma_chan);
- }
- if (i == disable_limit) {
- dev_err(dev->class_dev,
- "failed to get an even dma transfer, could be trouble\n");
+ /*
+ * Determine dma size based on the buffer size plus the number of
+ * unread samples and the number of samples remaining in the command.
+ */
+ nsamples = comedi_nsamples_left(s, max_samples + unread_samples);
+ if (nsamples > unread_samples) {
+ nsamples -= unread_samples;
+ desc->size = comedi_samples_to_bytes(s, nsamples);
+ comedi_isadma_program(desc);
}
- return residue;
}
static void das16_interrupt(struct comedi_device *dev)
@@ -539,11 +476,12 @@ static void das16_interrupt(struct comedi_device *dev)
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
unsigned long spin_flags;
- unsigned long dma_flags;
+ unsigned int residue;
+ unsigned int nbytes;
unsigned int nsamples;
- int num_bytes, residue;
- int buffer_index;
spin_lock_irqsave(&dev->spinlock, spin_flags);
if (!(devpriv->ctrl_reg & DAS16_CTRL_DMAE)) {
@@ -551,42 +489,36 @@ static void das16_interrupt(struct comedi_device *dev)
return;
}
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma_chan);
- residue = disable_dma_on_even(dev);
+ /*
+ * The pc104-das16jr (at least) has problems if the dma
+ * transfer is interrupted in the middle of transferring
+ * a 16 bit sample.
+ */
+ residue = comedi_isadma_disable_on_sample(desc->chan,
+ comedi_bytes_per_sample(s));
- /* figure out how many points to read */
- if (residue > devpriv->dma_transfer_size) {
+ /* figure out how many samples to read */
+ if (residue > desc->size) {
dev_err(dev->class_dev, "residue > transfer size!\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- num_bytes = 0;
- } else
- num_bytes = devpriv->dma_transfer_size - residue;
-
- if (cmd->stop_src == TRIG_COUNT &&
- num_bytes >= devpriv->adc_byte_count) {
- num_bytes = devpriv->adc_byte_count;
- async->events |= COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
+ nbytes = 0;
+ } else {
+ nbytes = desc->size - residue;
}
+ nsamples = comedi_bytes_to_samples(s, nbytes);
- buffer_index = devpriv->current_buffer;
- devpriv->current_buffer = (devpriv->current_buffer + 1) % 2;
- devpriv->adc_byte_count -= num_bytes;
-
- /* re-enable dma */
- if ((async->events & COMEDI_CB_EOA) == 0) {
- set_dma_addr(devpriv->dma_chan,
- devpriv->dma_buffer_addr[devpriv->current_buffer]);
- set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma_chan);
+ /* restart DMA if more samples are needed */
+ if (nsamples) {
+ dma->cur_dma = 1 - dma->cur_dma;
+ das16_ai_setup_dma(dev, s, nsamples);
}
- release_dma_lock(dma_flags);
spin_unlock_irqrestore(&dev->spinlock, spin_flags);
- nsamples = comedi_bytes_to_samples(s, num_bytes);
- comedi_buf_write_samples(s, devpriv->dma_buffer[buffer_index],
- nsamples);
+ comedi_buf_write_samples(s, desc->virt_addr, nsamples);
+
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
comedi_handle_events(dev, s);
}
@@ -605,6 +537,29 @@ static void das16_timer_interrupt(unsigned long arg)
spin_unlock_irqrestore(&dev->spinlock, flags);
}
+static void das16_ai_set_mux_range(struct comedi_device *dev,
+ unsigned int first_chan,
+ unsigned int last_chan,
+ unsigned int range)
+{
+ const struct das16_board *board = dev->board_ptr;
+
+ /* set multiplexer */
+ outb(first_chan | (last_chan << 4), dev->iobase + DAS16_MUX_REG);
+
+ /* some boards do not have programmable gain */
+ if (board->ai_pg == das16_pg_none)
+ return;
+
+ /*
+ * Set gain (this is also burst rate register but according to
+ * computer boards manual, burst rate does nothing, even on
+ * keithley cards).
+ */
+ outb((das16_gainlists[board->ai_pg])[range],
+ dev->iobase + DAS16_GAIN_REG);
+}
+
static int das16_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
@@ -755,13 +710,15 @@ static unsigned int das16_set_pacer(struct comedi_device *dev, unsigned int ns,
static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
{
- const struct das16_board *board = dev->board_ptr;
struct das16_private_struct *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
+ unsigned int first_chan = CR_CHAN(cmd->chanlist[0]);
+ unsigned int last_chan = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]);
+ unsigned int range = CR_RANGE(cmd->chanlist[0]);
unsigned int byte;
unsigned long flags;
- int range;
if (cmd->flags & CMDF_PRIORITY) {
dev_err(dev->class_dev,
@@ -769,24 +726,11 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
return -1;
}
- devpriv->adc_byte_count = cmd->stop_arg * comedi_bytes_per_scan(s);
-
if (devpriv->can_burst)
outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV_REG);
- /* set scan limits */
- byte = CR_CHAN(cmd->chanlist[0]);
- byte |= CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]) << 4;
- outb(byte, dev->iobase + DAS16_MUX_REG);
-
- /* set gain (this is also burst rate register but according to
- * computer boards manual, burst rate does nothing, even on
- * keithley cards) */
- if (board->ai_pg != das16_pg_none) {
- range = CR_RANGE(cmd->chanlist[0]);
- outb((das16_gainlists[board->ai_pg])[range],
- dev->iobase + DAS16_GAIN_REG);
- }
+ /* set mux and range for chanlist scan */
+ das16_ai_set_mux_range(dev, first_chan, last_chan, range);
/* set counter mode and counts */
cmd->convert_arg = das16_set_pacer(dev, cmd->convert_arg, cmd->flags);
@@ -805,19 +749,9 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
}
outb(byte, dev->iobase + DAS16_PACER_REG);
- /* set up dma transfer */
- flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma_chan);
- devpriv->current_buffer = 0;
- set_dma_addr(devpriv->dma_chan,
- devpriv->dma_buffer_addr[devpriv->current_buffer]);
- devpriv->dma_transfer_size = DAS16_DMA_SIZE;
- set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma_chan);
- release_dma_lock(flags);
+ /* set up dma transfer */
+ dma->cur_dma = 0;
+ das16_ai_setup_dma(dev, s, 0);
/* set up timer */
spin_lock_irqsave(&dev->spinlock, flags);
@@ -825,7 +759,14 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->timer.expires = jiffies + timer_period();
add_timer(&devpriv->timer);
- das16_ai_enable(dev, DAS16_CTRL_DMAE, cmd->convert_src);
+ /* enable DMA interrupt with external or internal pacing */
+ devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE | DAS16_CTRL_PACING_MASK);
+ devpriv->ctrl_reg |= DAS16_CTRL_DMAE;
+ if (cmd->convert_src == TRIG_EXT)
+ devpriv->ctrl_reg |= DAS16_CTRL_EXT_PACER;
+ else
+ devpriv->ctrl_reg |= DAS16_CTRL_INT_PACER;
+ outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
if (devpriv->can_burst)
outb(0, dev->iobase + DAS1600_CONV_REG);
@@ -837,12 +778,17 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
static int das16_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct das16_private_struct *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
- das16_ai_disable(dev);
- disable_dma(devpriv->dma_chan);
+ /* disable interrupts, dma and pacer clocked conversions */
+ devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE | DAS16_CTRL_DMAE |
+ DAS16_CTRL_PACING_MASK);
+ outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
+
+ comedi_isadma_disable(dma->chan);
/* disable SW timer */
if (devpriv->timer_running) {
@@ -893,23 +839,14 @@ static int das16_ai_insn_read(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- const struct das16_board *board = dev->board_ptr;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int val;
int ret;
int i;
- das16_ai_disable(dev);
-
- /* set multiplexer */
- outb(chan | (chan << 4), dev->iobase + DAS16_MUX_REG);
-
- /* set gain */
- if (board->ai_pg != das16_pg_none) {
- outb((das16_gainlists[board->ai_pg])[range],
- dev->iobase + DAS16_GAIN_REG);
- }
+ /* set mux and range for single channel */
+ das16_ai_set_mux_range(dev, chan, chan, range);
for (i = 0; i < insn->n; i++) {
/* trigger conversion */
@@ -1001,14 +938,107 @@ static void das16_reset(struct comedi_device *dev)
outb(0, dev->iobase + DAS16_TIMER_BASE_REG + i8254_control_reg);
}
+static void das16_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
+{
+ struct das16_private_struct *devpriv = dev->private;
+
+ /* only DMA channels 3 and 1 are valid */
+ if (!(dma_chan == 1 || dma_chan == 3))
+ return;
+
+ /* DMA uses two buffers */
+ devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
+ DAS16_DMA_SIZE, COMEDI_ISADMA_READ);
+ if (devpriv->dma) {
+ init_timer(&devpriv->timer);
+ devpriv->timer.function = das16_timer_interrupt;
+ devpriv->timer.data = (unsigned long)dev;
+ }
+}
+
+static void das16_free_dma(struct comedi_device *dev)
+{
+ struct das16_private_struct *devpriv = dev->private;
+
+ if (devpriv) {
+ if (devpriv->timer.data)
+ del_timer_sync(&devpriv->timer);
+ comedi_isadma_free(devpriv->dma);
+ }
+}
+
+static const struct comedi_lrange *das16_ai_range(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_devconfig *it,
+ unsigned int pg_type,
+ unsigned int status)
+{
+ unsigned int min = it->options[4];
+ unsigned int max = it->options[5];
+
+ /* get any user-defined input range */
+ if (pg_type == das16_pg_none && (min || max)) {
+ struct comedi_lrange *lrange;
+ struct comedi_krange *krange;
+
+ /* allocate single-range range table */
+ lrange = comedi_alloc_spriv(s,
+ sizeof(*lrange) + sizeof(*krange));
+ if (!lrange)
+ return &range_unknown;
+
+ /* initialize ai range */
+ lrange->length = 1;
+ krange = lrange->range;
+ krange->min = min;
+ krange->max = max;
+ krange->flags = UNIT_volt;
+
+ return lrange;
+ }
+
+ /* use software programmable range */
+ if (status & DAS16_STATUS_UNIPOLAR)
+ return das16_ai_uni_lranges[pg_type];
+ return das16_ai_bip_lranges[pg_type];
+}
+
+static const struct comedi_lrange *das16_ao_range(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_devconfig *it)
+{
+ unsigned int min = it->options[6];
+ unsigned int max = it->options[7];
+
+ /* get any user-defined output range */
+ if (min || max) {
+ struct comedi_lrange *lrange;
+ struct comedi_krange *krange;
+
+ /* allocate single-range range table */
+ lrange = comedi_alloc_spriv(s,
+ sizeof(*lrange) + sizeof(*krange));
+ if (!lrange)
+ return &range_unknown;
+
+ /* initialize ao range */
+ lrange->length = 1;
+ krange = lrange->range;
+ krange->min = min;
+ krange->max = max;
+ krange->flags = UNIT_volt;
+
+ return lrange;
+ }
+
+ return &range_unknown;
+}
+
static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct das16_board *board = dev->board_ptr;
struct das16_private_struct *devpriv;
struct comedi_subdevice *s;
- struct comedi_lrange *lrange;
- struct comedi_krange *krange;
- unsigned int dma_chan = it->options[2];
unsigned int status;
int ret;
@@ -1063,72 +1093,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->clockbase = I8254_OSC_BASE_1MHZ;
}
- /* initialize dma */
- if (dma_chan == 1 || dma_chan == 3) {
- unsigned long flags;
- int i;
-
- if (request_dma(dma_chan, dev->board_name)) {
- dev_err(dev->class_dev,
- "failed to request dma channel %i\n",
- dma_chan);
- return -EINVAL;
- }
- devpriv->dma_chan = dma_chan;
-
- /* allocate dma buffers */
- for (i = 0; i < 2; i++) {
- void *p;
-
- p = pci_alloc_consistent(NULL, DAS16_DMA_SIZE,
- &devpriv->dma_buffer_addr[i]);
- if (!p)
- return -ENOMEM;
- devpriv->dma_buffer[i] = p;
- }
-
- flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
- release_dma_lock(flags);
-
- init_timer(&devpriv->timer);
- devpriv->timer.function = das16_timer_interrupt;
- devpriv->timer.data = (unsigned long)dev;
- }
-
- /* get any user-defined input range */
- if (board->ai_pg == das16_pg_none &&
- (it->options[4] || it->options[5])) {
- /* allocate single-range range table */
- lrange = kzalloc(sizeof(*lrange) + sizeof(*krange), GFP_KERNEL);
- if (!lrange)
- return -ENOMEM;
-
- /* initialize ai range */
- devpriv->user_ai_range_table = lrange;
- lrange->length = 1;
- krange = devpriv->user_ai_range_table->range;
- krange->min = it->options[4];
- krange->max = it->options[5];
- krange->flags = UNIT_volt;
- }
-
- /* get any user-defined output range */
- if (it->options[6] || it->options[7]) {
- /* allocate single-range range table */
- lrange = kzalloc(sizeof(*lrange) + sizeof(*krange), GFP_KERNEL);
- if (!lrange)
- return -ENOMEM;
-
- /* initialize ao range */
- devpriv->user_ao_range_table = lrange;
- lrange->length = 1;
- krange = devpriv->user_ao_range_table->range;
- krange->min = it->options[6];
- krange->max = it->options[7];
- krange->flags = UNIT_volt;
- }
+ das16_alloc_dma(dev, it->options[2]);
ret = comedi_alloc_subdevices(dev, 4 + board->has_8255);
if (ret)
@@ -1149,15 +1114,9 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
s->len_chanlist = s->n_chan;
s->maxdata = board->ai_maxdata;
- if (devpriv->user_ai_range_table) { /* user defined ai range */
- s->range_table = devpriv->user_ai_range_table;
- } else if (status & DAS16_STATUS_UNIPOLAR) {
- s->range_table = das16_ai_uni_lranges[board->ai_pg];
- } else {
- s->range_table = das16_ai_bip_lranges[board->ai_pg];
- }
+ s->range_table = das16_ai_range(dev, s, it, board->ai_pg, status);
s->insn_read = das16_ai_insn_read;
- if (devpriv->dma_chan) {
+ if (devpriv->dma) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->do_cmdtest = das16_cmd_test;
@@ -1173,7 +1132,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 0x0fff;
- s->range_table = devpriv->user_ao_range_table;
+ s->range_table = das16_ao_range(dev, s, it);
s->insn_write = das16_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
@@ -1230,25 +1189,11 @@ static void das16_detach(struct comedi_device *dev)
{
const struct das16_board *board = dev->board_ptr;
struct das16_private_struct *devpriv = dev->private;
- int i;
if (devpriv) {
- if (devpriv->timer.data)
- del_timer_sync(&devpriv->timer);
if (dev->iobase)
das16_reset(dev);
-
- for (i = 0; i < 2; i++) {
- if (devpriv->dma_buffer[i])
- pci_free_consistent(NULL, DAS16_DMA_SIZE,
- devpriv->dma_buffer[i],
- devpriv->
- dma_buffer_addr[i]);
- }
- if (devpriv->dma_chan)
- free_dma(devpriv->dma_chan);
- kfree(devpriv->user_ai_range_table);
- kfree(devpriv->user_ao_range_table);
+ das16_free_dma(dev);
if (devpriv->extra_iobase)
release_region(devpriv->extra_iobase,
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index 80f41b7e8273..3666a68979fb 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -455,7 +455,7 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
/* this probably won't catch overruns since the card doesn't generate
* overrun interrupts, but we might as well try */
if (status & OVRUN) {
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
dev_err(dev->class_dev, "fifo overflow\n");
}
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index be825d21a185..0790a28828de 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -98,12 +98,12 @@ TODO:
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include "../comedidev.h"
-#include <asm/dma.h>
+#include "../comedidev.h"
-#include "8253.h"
+#include "comedi_isadma.h"
#include "comedi_fc.h"
+#include "8253.h"
/* misc. defines */
#define DAS1800_SIZE 16 /* uses 16 io addresses */
@@ -421,19 +421,14 @@ static const struct das1800_board das1800_boards[] = {
};
struct das1800_private {
+ struct comedi_isadma *dma;
unsigned int divisor1; /* value to load into board's counter 1 for timed conversions */
unsigned int divisor2; /* value to load into board's counter 2 for timed conversions */
int irq_dma_bits; /* bits for control register b */
/* dma bits for control register b, stored so that dma can be
* turned on and off */
int dma_bits;
- unsigned int dma0; /* dma channels used */
- unsigned int dma1;
- unsigned int dma_current; /* dma channel currently in use */
- uint16_t *ai_buf0; /* pointers to dma buffers */
- uint16_t *ai_buf1;
- uint16_t *dma_current_buf; /* pointer to dma buffer currently being used */
- unsigned int dma_transfer_size; /* size of transfer currently used, in bytes */
+ uint16_t *fifo_buf; /* bounce buffer for analog input FIFO */
unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */
unsigned short ao_update_bits; /* remembers the last write to the
* 'update' dac */
@@ -480,9 +475,9 @@ static void das1800_handle_fifo_half_full(struct comedi_device *dev,
struct das1800_private *devpriv = dev->private;
unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2);
- insw(dev->iobase + DAS1800_FIFO, devpriv->ai_buf0, nsamples);
- munge_data(dev, devpriv->ai_buf0, nsamples);
- comedi_buf_write_samples(s, devpriv->ai_buf0, nsamples);
+ insw(dev->iobase + DAS1800_FIFO, devpriv->fifo_buf, nsamples);
+ munge_data(dev, devpriv->fifo_buf, nsamples);
+ comedi_buf_write_samples(s, devpriv->fifo_buf, nsamples);
}
static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
@@ -508,29 +503,21 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
}
}
-/* Utility function used by das1800_flush_dma() and das1800_handle_dma().
- * Assumes dma lock is held */
+/* Utility function used by das1800_flush_dma() and das1800_handle_dma() */
static void das1800_flush_dma_channel(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned int channel, uint16_t *buffer)
+ struct comedi_isadma_desc *desc)
{
- struct das1800_private *devpriv = dev->private;
- unsigned int nbytes;
+ unsigned int residue = comedi_isadma_disable(desc->chan);
+ unsigned int nbytes = desc->size - residue;
unsigned int nsamples;
- disable_dma(channel);
-
- /* clear flip-flop to make sure 2-byte registers
- * get set correctly */
- clear_dma_ff(channel);
-
/* figure out how many points to read */
- nbytes = devpriv->dma_transfer_size - get_dma_residue(channel);
nsamples = comedi_bytes_to_samples(s, nbytes);
nsamples = comedi_nsamples_left(s, nsamples);
- munge_data(dev, buffer, nsamples);
- comedi_buf_write_samples(s, buffer, nsamples);
+ munge_data(dev, desc->virt_addr, nsamples);
+ comedi_buf_write_samples(s, desc->virt_addr, nsamples);
}
/* flushes remaining data from board when external trigger has stopped acquisition
@@ -539,28 +526,19 @@ static void das1800_flush_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
- unsigned long flags;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL;
- flags = claim_dma_lock();
- das1800_flush_dma_channel(dev, s, devpriv->dma_current,
- devpriv->dma_current_buf);
+ das1800_flush_dma_channel(dev, s, desc);
if (dual_dma) {
/* switch to other channel and flush it */
- if (devpriv->dma_current == devpriv->dma0) {
- devpriv->dma_current = devpriv->dma1;
- devpriv->dma_current_buf = devpriv->ai_buf1;
- } else {
- devpriv->dma_current = devpriv->dma0;
- devpriv->dma_current_buf = devpriv->ai_buf0;
- }
- das1800_flush_dma_channel(dev, s, devpriv->dma_current,
- devpriv->dma_current_buf);
+ dma->cur_dma = 1 - dma->cur_dma;
+ desc = &dma->desc[dma->cur_dma];
+ das1800_flush_dma_channel(dev, s, desc);
}
- release_dma_lock(flags);
-
/* get any remaining samples in fifo */
das1800_handle_fifo_not_empty(dev, s);
}
@@ -569,47 +547,41 @@ static void das1800_handle_dma(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int status)
{
struct das1800_private *devpriv = dev->private;
- unsigned long flags;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL;
- flags = claim_dma_lock();
- das1800_flush_dma_channel(dev, s, devpriv->dma_current,
- devpriv->dma_current_buf);
- /* re-enable dma channel */
- set_dma_addr(devpriv->dma_current,
- virt_to_bus(devpriv->dma_current_buf));
- set_dma_count(devpriv->dma_current, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma_current);
- release_dma_lock(flags);
+ das1800_flush_dma_channel(dev, s, desc);
+
+ /* re-enable dma channel */
+ comedi_isadma_program(desc);
if (status & DMATC) {
/* clear DMATC interrupt bit */
outb(CLEAR_INTR_MASK & ~DMATC, dev->iobase + DAS1800_STATUS);
/* switch dma channels for next time, if appropriate */
- if (dual_dma) {
- /* read data from the other channel next time */
- if (devpriv->dma_current == devpriv->dma0) {
- devpriv->dma_current = devpriv->dma1;
- devpriv->dma_current_buf = devpriv->ai_buf1;
- } else {
- devpriv->dma_current = devpriv->dma0;
- devpriv->dma_current_buf = devpriv->ai_buf0;
- }
- }
+ if (dual_dma)
+ dma->cur_dma = 1 - dma->cur_dma;
}
}
static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc;
+ int i;
outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */
outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */
outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */
- if (devpriv->dma0)
- disable_dma(devpriv->dma0);
- if (devpriv->dma1)
- disable_dma(devpriv->dma1);
+
+ for (i = 0; i < 2; i++) {
+ desc = &dma->desc[i];
+ if (desc->chan)
+ comedi_isadma_disable(desc->chan);
+ }
+
return 0;
}
@@ -639,7 +611,7 @@ static void das1800_ai_handler(struct comedi_device *dev)
/* clear OVF interrupt bit */
outb(CLEAR_INTR_MASK & ~OVF, dev->iobase + DAS1800_STATUS);
dev_err(dev->class_dev, "FIFO overflow\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
return;
}
@@ -963,79 +935,64 @@ static void das1800_setup_counters(struct comedi_device *dev,
}
}
-/* utility function that suggests a dma transfer size based on the conversion period 'ns' */
-static unsigned int suggest_transfer_size(const struct comedi_cmd *cmd)
+static unsigned int das1800_ai_transfer_size(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int maxbytes,
+ unsigned int ns)
{
- unsigned int size = DMA_BUF_SIZE;
- static const int sample_size = 2; /* size in bytes of one sample from board */
- unsigned int fill_time = 300000000; /* target time in nanoseconds for filling dma buffer */
- unsigned int max_size; /* maximum size we will allow for a transfer */
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int max_samples = comedi_bytes_to_samples(s, maxbytes);
+ unsigned int samples;
+
+ samples = max_samples;
- /* make dma buffer fill in 0.3 seconds for timed modes */
+ /* for timed modes, make dma buffer fill in 'ns' time */
switch (cmd->scan_begin_src) {
- case TRIG_FOLLOW: /* not in burst mode */
+ case TRIG_FOLLOW: /* not in burst mode */
if (cmd->convert_src == TRIG_TIMER)
- size = (fill_time / cmd->convert_arg) * sample_size;
+ samples = ns / cmd->convert_arg;
break;
case TRIG_TIMER:
- size = (fill_time / (cmd->scan_begin_arg * cmd->chanlist_len)) *
- sample_size;
- break;
- default:
- size = DMA_BUF_SIZE;
+ samples = ns / (cmd->scan_begin_arg * cmd->chanlist_len);
break;
}
- /* set a minimum and maximum size allowed */
- max_size = DMA_BUF_SIZE;
- /* if we are taking limited number of conversions, limit transfer size to that */
- if (cmd->stop_src == TRIG_COUNT &&
- cmd->stop_arg * cmd->chanlist_len * sample_size < max_size)
- max_size = cmd->stop_arg * cmd->chanlist_len * sample_size;
+ /* limit samples to what is remaining in the command */
+ samples = comedi_nsamples_left(s, samples);
- if (size > max_size)
- size = max_size;
- if (size < sample_size)
- size = sample_size;
+ if (samples > max_samples)
+ samples = max_samples;
+ if (samples < 1)
+ samples = 1;
- return size;
+ return comedi_samples_to_bytes(s, samples);
}
-/* sets up dma */
-static void setup_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
+static void das1800_ai_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
- unsigned long lock_flags;
- const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[0];
+ unsigned int bytes;
if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
return;
- /* determine a reasonable dma transfer size */
- devpriv->dma_transfer_size = suggest_transfer_size(cmd);
- lock_flags = claim_dma_lock();
- disable_dma(devpriv->dma0);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma0);
- set_dma_addr(devpriv->dma0, virt_to_bus(devpriv->ai_buf0));
- /* set appropriate size of transfer */
- set_dma_count(devpriv->dma0, devpriv->dma_transfer_size);
- devpriv->dma_current = devpriv->dma0;
- devpriv->dma_current_buf = devpriv->ai_buf0;
- enable_dma(devpriv->dma0);
- /* set up dual dma if appropriate */
- if (dual_dma) {
- disable_dma(devpriv->dma1);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma1);
- set_dma_addr(devpriv->dma1, virt_to_bus(devpriv->ai_buf1));
- /* set appropriate size of transfer */
- set_dma_count(devpriv->dma1, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma1);
+ dma->cur_dma = 0;
+
+ /* determine a dma transfer size to fill buffer in 0.3 sec */
+ bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
+
+ desc->size = bytes;
+ comedi_isadma_program(desc);
+
+ /* set up dual dma if appropriate */
+ if (devpriv->irq_dma_bits & DMA_DUAL) {
+ desc = &dma->desc[1];
+ desc->size = bytes;
+ comedi_isadma_program(desc);
}
- release_dma_lock(lock_flags);
}
/* programs channel/gain list into card */
@@ -1097,7 +1054,7 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
/* setup card and start */
program_chanlist(dev, cmd);
das1800_setup_counters(dev, cmd);
- setup_dma(dev, cmd);
+ das1800_ai_setup_dma(dev, s);
outb(control_c, dev->iobase + DAS1800_CONTROL_C);
/* set conversion rate and length for burst mode */
if (control_c & BMDE) {
@@ -1234,79 +1191,57 @@ static int das1800_do_wbits(struct comedi_device *dev,
return insn->n;
}
-static int das1800_init_dma(struct comedi_device *dev, unsigned int dma0,
- unsigned int dma1)
+static void das1800_init_dma(struct comedi_device *dev,
+ struct comedi_devconfig *it)
{
struct das1800_private *devpriv = dev->private;
- unsigned long flags;
+ unsigned int *dma_chan;
- /* need an irq to do dma */
- if (dev->irq && dma0) {
- /* encode dma0 and dma1 into 2 digit hexadecimal for switch */
- switch ((dma0 & 0x7) | (dma1 << 4)) {
- case 0x5: /* dma0 == 5 */
- devpriv->dma_bits |= DMA_CH5;
- break;
- case 0x6: /* dma0 == 6 */
- devpriv->dma_bits |= DMA_CH6;
- break;
- case 0x7: /* dma0 == 7 */
- devpriv->dma_bits |= DMA_CH7;
- break;
- case 0x65: /* dma0 == 5, dma1 == 6 */
- devpriv->dma_bits |= DMA_CH5_CH6;
- break;
- case 0x76: /* dma0 == 6, dma1 == 7 */
- devpriv->dma_bits |= DMA_CH6_CH7;
- break;
- case 0x57: /* dma0 == 7, dma1 == 5 */
- devpriv->dma_bits |= DMA_CH7_CH5;
- break;
- default:
- dev_err(dev->class_dev,
- "only supports dma channels 5 through 7\n");
- dev_err(dev->class_dev,
- "Dual dma only allows the following combinations:\n");
- dev_err(dev->class_dev,
- "dma 5,6 / 6,7 / or 7,5\n");
- return -EINVAL;
- }
- if (request_dma(dma0, dev->driver->driver_name)) {
- dev_err(dev->class_dev,
- "failed to allocate dma channel %i\n", dma0);
- return -EINVAL;
- }
- devpriv->dma0 = dma0;
- devpriv->dma_current = dma0;
- if (dma1) {
- if (request_dma(dma1, dev->driver->driver_name)) {
- dev_err(dev->class_dev,
- "failed to allocate dma channel %i\n",
- dma1);
- return -EINVAL;
- }
- devpriv->dma1 = dma1;
- }
- devpriv->ai_buf0 = kmalloc(DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA);
- if (devpriv->ai_buf0 == NULL)
- return -ENOMEM;
- devpriv->dma_current_buf = devpriv->ai_buf0;
- if (dma1) {
- devpriv->ai_buf1 =
- kmalloc(DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA);
- if (devpriv->ai_buf1 == NULL)
- return -ENOMEM;
- }
- flags = claim_dma_lock();
- disable_dma(devpriv->dma0);
- set_dma_mode(devpriv->dma0, DMA_MODE_READ);
- if (dma1) {
- disable_dma(devpriv->dma1);
- set_dma_mode(devpriv->dma1, DMA_MODE_READ);
- }
- release_dma_lock(flags);
+ /*
+ * it->options[2] is DMA channel 0
+ * it->options[3] is DMA channel 1
+ *
+ * Encode the DMA channels into 2 digit hexadecimal for switch.
+ */
+ dma_chan = &it->options[2];
+
+ switch ((dma_chan[0] & 0x7) | (dma_chan[1] << 4)) {
+ case 0x5: /* dma0 == 5 */
+ devpriv->dma_bits = DMA_CH5;
+ break;
+ case 0x6: /* dma0 == 6 */
+ devpriv->dma_bits = DMA_CH6;
+ break;
+ case 0x7: /* dma0 == 7 */
+ devpriv->dma_bits = DMA_CH7;
+ break;
+ case 0x65: /* dma0 == 5, dma1 == 6 */
+ devpriv->dma_bits = DMA_CH5_CH6;
+ break;
+ case 0x76: /* dma0 == 6, dma1 == 7 */
+ devpriv->dma_bits = DMA_CH6_CH7;
+ break;
+ case 0x57: /* dma0 == 7, dma1 == 5 */
+ devpriv->dma_bits = DMA_CH7_CH5;
+ break;
+ default:
+ return;
}
- return 0;
+
+ /* DMA can use 1 or 2 buffers, each with a separate channel */
+ devpriv->dma = comedi_isadma_alloc(dev, dma_chan[1] ? 2 : 1,
+ dma_chan[0], dma_chan[1],
+ DMA_BUF_SIZE, COMEDI_ISADMA_READ);
+ if (!devpriv->dma)
+ devpriv->dma_bits = 0;
+}
+
+static void das1800_free_dma(struct comedi_device *dev)
+{
+ struct das1800_private *devpriv = dev->private;
+
+ if (devpriv)
+ comedi_isadma_free(devpriv->dma);
}
static int das1800_probe(struct comedi_device *dev)
@@ -1374,8 +1309,6 @@ static int das1800_attach(struct comedi_device *dev,
struct das1800_private *devpriv;
struct comedi_subdevice *s;
unsigned int irq = it->options[1];
- unsigned int dma0 = it->options[2];
- unsigned int dma1 = it->options[3];
int board;
int ret;
@@ -1437,16 +1370,13 @@ static int das1800_attach(struct comedi_device *dev,
}
}
- ret = das1800_init_dma(dev, dma0, dma1);
- if (ret < 0)
- return ret;
+ /* an irq and one dma channel is required to use dma */
+ if (dev->irq & it->options[2])
+ das1800_init_dma(dev, it);
- if (devpriv->ai_buf0 == NULL) {
- devpriv->ai_buf0 =
- kmalloc(FIFO_SIZE * sizeof(uint16_t), GFP_KERNEL);
- if (devpriv->ai_buf0 == NULL)
- return -ENOMEM;
- }
+ devpriv->fifo_buf = kmalloc_array(FIFO_SIZE, sizeof(uint16_t), GFP_KERNEL);
+ if (!devpriv->fifo_buf)
+ return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
@@ -1523,13 +1453,9 @@ static void das1800_detach(struct comedi_device *dev)
{
struct das1800_private *devpriv = dev->private;
+ das1800_free_dma(dev);
if (devpriv) {
- if (devpriv->dma0)
- free_dma(devpriv->dma0);
- if (devpriv->dma1)
- free_dma(devpriv->dma1);
- kfree(devpriv->ai_buf0);
- kfree(devpriv->ai_buf1);
+ kfree(devpriv->fifo_buf);
if (devpriv->iobase2)
release_region(devpriv->iobase2, DAS1800_SIZE);
}
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 780f4f646ea0..b8755b50a11e 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -20,8 +20,8 @@
/*
* Driver: das6402
* Description: Keithley Metrabyte DAS6402 (& compatibles)
- * Devices: (Keithley Metrabyte) DAS6402-12 (das6402-12)
- * (Keithley Metrabyte) DAS6402-16 (das6402-16)
+ * Devices: [Keithley Metrabyte] DAS6402-12 (das6402-12),
+ * DAS6402-16 (das6402-16)
* Author: H Hartley Sweeten <hsweeten@visionengravers.com>
* Updated: Fri, 14 Mar 2014 10:18:43 -0700
* Status: unknown
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index e5bdc2423445..ff7f4be3f314 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -511,7 +511,7 @@ static irqreturn_t das800_interrupt(int irq, void *d)
if (fifo_overflow) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 6df298a99cc6..1af006609fc1 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -19,7 +19,7 @@
/*
* Driver: dmm32at
* Description: Diamond Systems Diamond-MM-32-AT
- * Devices: (Diamond Systems) Diamond-MM-32-AT [dmm32at]
+ * Devices: [Diamond Systems] Diamond-MM-32-AT (dmm32at)
* Author: Perry J. Piplani <perry.j.piplani@nasa.gov>
* Updated: Fri Jun 4 09:13:24 CDT 2004
* Status: experimental
@@ -365,7 +365,7 @@ static void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec)
/* enable the ai conversion interrupt and the clock to start scans */
outb(DMM32AT_INTCLK_ADINT |
DMM32AT_INTCLK_CLKEN | DMM32AT_INTCLK_CLKSEL,
- dev->iobase + DMM32AT_INTCLK_REG);
+ dev->iobase + DMM32AT_INTCLK_REG);
}
static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 2be98bb9a809..db21d2135856 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -20,22 +20,12 @@
* Driver: dt282x
* Description: Data Translation DT2821 series (including DT-EZ)
* Author: ds
- * Devices: (Data Translation) DT2821 [dt2821]
- * (Data Translation) DT2821-F-16SE [dt2821-f]
- * (Data Translation) DT2821-F-8DI [dt2821-f]
- * (Data Translation) DT2821-G-16SE [dt2821-g]
- * (Data Translation) DT2821-G-8DI [dt2821-g]
- * (Data Translation) DT2823 [dt2823]
- * (Data Translation) DT2824-PGH [dt2824-pgh]
- * (Data Translation) DT2824-PGL [dt2824-pgl]
- * (Data Translation) DT2825 [dt2825]
- * (Data Translation) DT2827 [dt2827]
- * (Data Translation) DT2828 [dt2828]
- * (Data Translation) DT2928 [dt2829]
- * (Data Translation) DT21-EZ [dt21-ez]
- * (Data Translation) DT23-EZ [dt23-ez]
- * (Data Translation) DT24-EZ [dt24-ez]
- * (Data Translation) DT24-EZ-PGL [dt24-ez-pgl]
+ * Devices: [Data Translation] DT2821 (dt2821), DT2821-F-16SE (dt2821-f),
+ * DT2821-F-8DI (dt2821-f), DT2821-G-16SE (dt2821-g),
+ * DT2821-G-8DI (dt2821-g), DT2823 (dt2823), DT2824-PGH (dt2824-pgh),
+ * DT2824-PGL (dt2824-pgl), DT2825 (dt2825), DT2827 (dt2827),
+ * DT2828 (dt2828), DT2928 (dt2829), DT21-EZ (dt21-ez), DT23-EZ (dt23-ez),
+ * DT24-EZ (dt24-ez), DT24-EZ-PGL (dt24-ez-pgl)
* Status: complete
* Updated: Wed, 22 Aug 2001 17:11:34 -0700
*
@@ -66,15 +56,14 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/dma.h>
+#include "../comedidev.h"
+#include "comedi_isadma.h"
#include "comedi_fc.h"
/*
@@ -311,55 +300,36 @@ static const struct dt282x_board boardtypes[] = {
};
struct dt282x_private {
+ struct comedi_isadma *dma;
unsigned int ad_2scomp:1;
-
unsigned int divisor;
-
int dacsr; /* software copies of registers */
int adcsr;
int supcsr;
-
int ntrig;
int nread;
-
- struct {
- int chan;
- unsigned short *buf; /* DMA buffer */
- int size; /* size of current transfer */
- } dma[2];
- int dma_maxsize; /* max size of DMA transfer (in bytes) */
- int current_dma_index;
int dma_dir;
};
static int dt282x_prep_ai_dma(struct comedi_device *dev, int dma_index, int n)
{
struct dt282x_private *devpriv = dev->private;
- int dma_chan;
- unsigned long dma_ptr;
- unsigned long flags;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma_index];
if (!devpriv->ntrig)
return 0;
if (n == 0)
- n = devpriv->dma_maxsize;
+ n = desc->maxsize;
if (n > devpriv->ntrig * 2)
n = devpriv->ntrig * 2;
devpriv->ntrig -= n / 2;
- devpriv->dma[dma_index].size = n;
- dma_chan = devpriv->dma[dma_index].chan;
- dma_ptr = virt_to_bus(devpriv->dma[dma_index].buf);
-
- set_dma_mode(dma_chan, DMA_MODE_READ);
- flags = claim_dma_lock();
- clear_dma_ff(dma_chan);
- set_dma_addr(dma_chan, dma_ptr);
- set_dma_count(dma_chan, n);
- release_dma_lock(flags);
+ desc->size = n;
+ comedi_isadma_set_mode(desc, devpriv->dma_dir);
- enable_dma(dma_chan);
+ comedi_isadma_program(desc);
return n;
}
@@ -367,22 +337,13 @@ static int dt282x_prep_ai_dma(struct comedi_device *dev, int dma_index, int n)
static int dt282x_prep_ao_dma(struct comedi_device *dev, int dma_index, int n)
{
struct dt282x_private *devpriv = dev->private;
- int dma_chan;
- unsigned long dma_ptr;
- unsigned long flags;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma_index];
- devpriv->dma[dma_index].size = n;
- dma_chan = devpriv->dma[dma_index].chan;
- dma_ptr = virt_to_bus(devpriv->dma[dma_index].buf);
+ desc->size = n;
+ comedi_isadma_set_mode(desc, devpriv->dma_dir);
- set_dma_mode(dma_chan, DMA_MODE_WRITE);
- flags = claim_dma_lock();
- clear_dma_ff(dma_chan);
- set_dma_addr(dma_chan, dma_ptr);
- set_dma_count(dma_chan, n);
- release_dma_lock(flags);
-
- enable_dma(dma_chan);
+ comedi_isadma_program(desc);
return n;
}
@@ -390,9 +351,14 @@ static int dt282x_prep_ao_dma(struct comedi_device *dev, int dma_index, int n)
static void dt282x_disable_dma(struct comedi_device *dev)
{
struct dt282x_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc;
+ int i;
- disable_dma(devpriv->dma[0].chan);
- disable_dma(devpriv->dma[1].chan);
+ for (i = 0; i < 2; i++) {
+ desc = &dma->desc[i];
+ comedi_isadma_disable(desc->chan);
+ }
}
static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
@@ -454,11 +420,12 @@ static unsigned int dt282x_ao_setup_dma(struct comedi_device *dev,
int cur_dma)
{
struct dt282x_private *devpriv = dev->private;
- void *ptr = devpriv->dma[cur_dma].buf;
- unsigned int nsamples = comedi_bytes_to_samples(s, devpriv->dma_maxsize);
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[cur_dma];
+ unsigned int nsamples = comedi_bytes_to_samples(s, desc->maxsize);
unsigned int nbytes;
- nbytes = comedi_buf_read_samples(s, ptr, nsamples);
+ nbytes = comedi_buf_read_samples(s, desc->virt_addr, nsamples);
if (nbytes)
dt282x_prep_ao_dma(dev, cur_dma, nbytes);
else
@@ -471,39 +438,37 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct dt282x_private *devpriv = dev->private;
- int cur_dma = devpriv->current_dma_index;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
outw(devpriv->supcsr | DT2821_SUPCSR_CLRDMADNE,
dev->iobase + DT2821_SUPCSR_REG);
- disable_dma(devpriv->dma[cur_dma].chan);
-
- devpriv->current_dma_index = 1 - cur_dma;
+ comedi_isadma_disable(desc->chan);
- if (!dt282x_ao_setup_dma(dev, s, cur_dma))
+ if (!dt282x_ao_setup_dma(dev, s, dma->cur_dma))
s->async->events |= COMEDI_CB_OVERFLOW;
+
+ dma->cur_dma = 1 - dma->cur_dma;
}
static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct dt282x_private *devpriv = dev->private;
- int cur_dma = devpriv->current_dma_index;
- void *ptr = devpriv->dma[cur_dma].buf;
- int size = devpriv->dma[cur_dma].size;
- unsigned int nsamples = comedi_bytes_to_samples(s, size);
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
+ unsigned int nsamples = comedi_bytes_to_samples(s, desc->size);
int ret;
outw(devpriv->supcsr | DT2821_SUPCSR_CLRDMADNE,
dev->iobase + DT2821_SUPCSR_REG);
- disable_dma(devpriv->dma[cur_dma].chan);
-
- devpriv->current_dma_index = 1 - cur_dma;
+ comedi_isadma_disable(desc->chan);
- dt282x_munge(dev, s, ptr, size);
- ret = comedi_buf_write_samples(s, ptr, nsamples);
- if (ret != size)
+ dt282x_munge(dev, s, desc->virt_addr, desc->size);
+ ret = comedi_buf_write_samples(s, desc->virt_addr, nsamples);
+ if (ret != desc->size)
return;
devpriv->nread -= nsamples;
@@ -524,7 +489,9 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
}
#endif
/* restart the channel */
- dt282x_prep_ai_dma(dev, cur_dma, 0);
+ dt282x_prep_ai_dma(dev, dma->cur_dma, 0);
+
+ dma->cur_dma = 1 - dma->cur_dma;
}
static irqreturn_t dt282x_interrupt(int irq, void *d)
@@ -545,7 +512,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
dacsr = inw(dev->iobase + DT2821_DACSR_REG);
supcsr = inw(dev->iobase + DT2821_SUPCSR_REG);
if (supcsr & DT2821_SUPCSR_DMAD) {
- if (devpriv->dma_dir == DMA_MODE_READ)
+ if (devpriv->dma_dir == COMEDI_ISADMA_READ)
dt282x_ai_dma_interrupt(dev, s);
else
dt282x_ao_dma_interrupt(dev, s_ao);
@@ -718,14 +685,7 @@ static int dt282x_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
- if (cmd->scan_begin_src == TRIG_FOLLOW) {
- /* internal trigger */
- err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- } else {
- /* external trigger */
- /* should be level/edge, hi/lo specification here */
- err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- }
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 4000);
@@ -757,6 +717,7 @@ static int dt282x_ai_cmdtest(struct comedi_device *dev,
static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct dt282x_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
struct comedi_cmd *cmd = &s->async->cmd;
int ret;
@@ -778,8 +739,8 @@ static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ntrig = cmd->stop_arg * cmd->scan_end_arg;
devpriv->nread = devpriv->ntrig;
- devpriv->dma_dir = DMA_MODE_READ;
- devpriv->current_dma_index = 0;
+ devpriv->dma_dir = COMEDI_ISADMA_READ;
+ dma->cur_dma = 0;
dt282x_prep_ai_dma(dev, 0, 0);
if (devpriv->ntrig) {
dt282x_prep_ai_dma(dev, 1, 0);
@@ -942,6 +903,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
static int dt282x_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct dt282x_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
struct comedi_cmd *cmd = &s->async->cmd;
dt282x_disable_dma(dev);
@@ -958,8 +920,8 @@ static int dt282x_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ntrig = cmd->stop_arg * cmd->chanlist_len;
devpriv->nread = devpriv->ntrig;
- devpriv->dma_dir = DMA_MODE_WRITE;
- devpriv->current_dma_index = 0;
+ devpriv->dma_dir = COMEDI_ISADMA_WRITE;
+ dma->cur_dma = 0;
outw(devpriv->divisor, dev->iobase + DT2821_TMRCTR_REG);
@@ -1063,46 +1025,42 @@ static const struct comedi_lrange *opt_ai_range_lkup(int ispgl, int x)
return ai_range_table[x];
}
-static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2)
+static void dt282x_alloc_dma(struct comedi_device *dev,
+ struct comedi_devconfig *it)
{
struct dt282x_private *devpriv = dev->private;
- int ret;
+ unsigned int irq_num = it->options[1];
+ unsigned int dma_chan[2];
- ret = request_dma(dma1, "dt282x A");
- if (ret)
- return -EBUSY;
- devpriv->dma[0].chan = dma1;
+ if (it->options[2] < it->options[3]) {
+ dma_chan[0] = it->options[2];
+ dma_chan[1] = it->options[3];
+ } else {
+ dma_chan[0] = it->options[3];
+ dma_chan[1] = it->options[2];
+ }
- ret = request_dma(dma2, "dt282x B");
- if (ret)
- return -EBUSY;
- devpriv->dma[1].chan = dma2;
+ if (!irq_num || dma_chan[0] == dma_chan[1] ||
+ dma_chan[0] < 5 || dma_chan[0] > 7 ||
+ dma_chan[1] < 5 || dma_chan[1] > 7)
+ return;
- devpriv->dma_maxsize = PAGE_SIZE;
- devpriv->dma[0].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
- devpriv->dma[1].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
- if (!devpriv->dma[0].buf || !devpriv->dma[1].buf)
- return -ENOMEM;
+ if (request_irq(irq_num, dt282x_interrupt, 0, dev->board_name, dev))
+ return;
- return 0;
+ /* DMA uses two 4K buffers with separate DMA channels */
+ devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan[0], dma_chan[1],
+ PAGE_SIZE, 0);
+ if (!devpriv->dma)
+ free_irq(irq_num, dev);
}
static void dt282x_free_dma(struct comedi_device *dev)
{
struct dt282x_private *devpriv = dev->private;
- int i;
-
- if (!devpriv)
- return;
- for (i = 0; i < 2; i++) {
- if (devpriv->dma[i].chan)
- free_dma(devpriv->dma[i].chan);
- if (devpriv->dma[i].buf)
- free_page((unsigned long)devpriv->dma[i].buf);
- devpriv->dma[i].chan = 0;
- devpriv->dma[i].buf = NULL;
- }
+ if (devpriv)
+ comedi_isadma_free(devpriv->dma);
}
static int dt282x_initialize(struct comedi_device *dev)
@@ -1160,36 +1118,7 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -ENOMEM;
/* an IRQ and 2 DMA channels are required for async command support */
- if (it->options[1] && it->options[2] && it->options[3]) {
- unsigned int irq = it->options[1];
- unsigned int dma1 = it->options[2];
- unsigned int dma2 = it->options[3];
-
- if (dma2 < dma1) {
- unsigned int swap;
-
- swap = dma1;
- dma1 = dma2;
- dma2 = swap;
- }
-
- if (dma1 != dma2 &&
- dma1 >= 5 && dma1 <= 7 &&
- dma2 >= 5 && dma2 <= 7) {
- ret = request_irq(irq, dt282x_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0) {
- dev->irq = irq;
-
- ret = dt282x_grab_dma(dev, dma1, dma2);
- if (ret < 0) {
- dt282x_free_dma(dev);
- free_irq(dev->irq, dev);
- dev->irq = 0;
- }
- }
- }
- }
+ dt282x_alloc_dma(dev, it);
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 1d9a7a63e06f..0aa51980e327 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -355,7 +355,7 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
dt3k_ai_empty_fifo(dev, s);
if (status & (DT3000_ADSWERR | DT3000_ADHWERR))
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
debug_n_ints++;
if (debug_n_ints >= 10)
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 06c601d8fdff..e11c216a4c85 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -42,9 +42,8 @@ for my needs.
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/uaccess.h>
-#include <linux/usb.h>
-#include "../comedidev.h"
+#include "../comedi_usb.h"
#define DT9812_DIAGS_BOARD_INFO_ADDR 0xFBFF
#define DT9812_MAX_WRITE_CMD_PIPE_SIZE 32
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index 1b6324c6eb29..6c1e442f6c81 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -14,24 +14,23 @@
*/
/*
- Driver: dyna_pci10xx
- Devices: Dynalog India PCI DAQ Cards, http://www.dynalogindia.com/
- Author: Prashant Shah <pshah.mumbai@gmail.com>
- Developed at Automation Labs, Chemical Dept., IIT Bombay, India.
- Prof. Kannan Moudgalya <kannan@iitb.ac.in>
- http://www.iitb.ac.in
- Status: Stable
- Version: 1.0
- Device Supported :
- - Dynalog PCI 1050
-
- Notes :
- - Dynalog India Pvt. Ltd. does not have a registered PCI Vendor ID and
- they are using the PLX Technlogies Vendor ID since that is the PCI Chip used
- in the card.
- - Dynalog India Pvt. Ltd. has provided the internal register specification for
- their cards in their manuals.
-*/
+ * Driver: dyna_pci10xx
+ * Description: Dynalog India PCI DAQ Cards, http://www.dynalogindia.com/
+ * Devices: [Dynalog] PCI-1050 (dyna_pci1050)
+ * Author: Prashant Shah <pshah.mumbai@gmail.com>
+ * Status: Stable
+ *
+ * Developed at Automation Labs, Chemical Dept., IIT Bombay, India.
+ * Prof. Kannan Moudgalya <kannan@iitb.ac.in>
+ * http://www.iitb.ac.in
+ *
+ * Notes :
+ * - Dynalog India Pvt. Ltd. does not have a registered PCI Vendor ID and
+ * they are using the PLX Technlogies Vendor ID since that is the PCI Chip
+ * used in the card.
+ * - Dynalog India Pvt. Ltd. has provided the internal register specification
+ * for their cards in their manuals.
+ */
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 0979f536ed39..deada9784b69 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -261,12 +261,12 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
if (hpdi_board_status & RX_OVERRUN_BIT) {
dev_err(dev->class_dev, "rx fifo overrun\n");
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
}
if (hpdi_board_status & RX_UNDERRUN_BIT) {
dev_err(dev->class_dev, "rx fifo underrun\n");
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
}
if (devpriv->dio_count == 0)
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 1085d66935fe..0768bc42a5db 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -9,7 +9,7 @@
/*
* Driver: ii_pci20kc
* Description: Intelligent Instruments PCI-20001C carrier board
- * Devices: (Intelligent Instrumentation) PCI-20001C [ii_pci20kc]
+ * Devices: [Intelligent Instrumentation] PCI-20001C (ii_pci20kc)
* Author: Markus Kempf <kempf@matsci.uni-sb.de>
* Status: works
*
diff --git a/drivers/staging/comedi/drivers/jr3_pci.h b/drivers/staging/comedi/drivers/jr3_pci.h
index 20478ae8fad6..356811defaf4 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.h
+++ b/drivers/staging/comedi/drivers/jr3_pci.h
@@ -261,8 +261,9 @@ struct intern_transform {
} link[8];
};
-/* JR3 force/torque sensor data definition. For more information see sensor and */
-/* hardware manuals. */
+/* JR3 force/torque sensor data definition. For more information see sensor
+ * and hardware manuals.
+ */
struct jr3_channel {
/* Raw_channels is the area used to store the raw data coming from */
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 77e94a34b51e..3c19e0f178ca 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -19,7 +19,7 @@
/*
* Driver: ke_counter
* Description: Driver for Kolter Electronic Counter Card
- * Devices: (Kolter Electronic) PCI Counter Card [ke_counter]
+ * Devices: [Kolter Electronic] PCI Counter Card (ke_counter)
* Author: Michael Hillmann
* Updated: Mon, 14 Apr 2008 15:42:42 +0100
* Status: tested
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 915685c1c85c..d120aa244cf9 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -1068,7 +1068,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
ME4000_AI_CTRL_BIT_SC_IRQ);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
dev_err(dev->class_dev, "FIFO overflow\n");
} else if ((tmp & ME4000_AI_STATUS_BIT_FF_DATA)
@@ -1089,7 +1089,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
ME4000_AI_CTRL_BIT_SC_IRQ);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
dev_err(dev->class_dev, "Undefined FIFO state\n");
}
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index b5278c11e622..92e23527f2cb 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -19,8 +19,7 @@
/*
* Driver: me_daq
* Description: Meilhaus PCI data acquisition cards
- * Devices: (Meilhaus) ME-2600i [me-2600i]
- * (Meilhaus) ME-2000i [me-2000i]
+ * Devices: [Meilhaus] ME-2600i (me-2600i), ME-2000i (me-2000i)
* Author: Michael Hillmann <hillmann@syscongroup.de>
* Status: experimental
*
@@ -175,7 +174,7 @@ struct me_private_data {
static inline void sleep(unsigned sec)
{
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(sec * HZ);
}
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
index af21bc180c46..db972bce2b5b 100644
--- a/drivers/staging/comedi/drivers/mf6x4.c
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -18,7 +18,7 @@
/*
* Driver: mf6x4
* Description: Humusoft MF634 and MF624 Data acquisition card driver
- * Devices: Humusoft MF634, Humusoft MF624
+ * Devices: [Humusoft] MF634 (mf634), MF624 (mf624)
* Author: Rostislav Lisovy <lisovy@gmail.com>
* Status: works
* Updated:
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index ffc9e61d6cdd..1e537a5cf862 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -494,9 +494,7 @@ EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
unsigned mite_dma_tcr(struct mite_channel *mite_chan)
{
struct mite_struct *mite = mite_chan->mite;
- int lkar;
- lkar = readl(mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
}
EXPORT_SYMBOL_GPL(mite_dma_tcr);
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index f99847f3999f..530f716f6586 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -19,8 +19,7 @@
/*
* Driver: ni_6527
* Description: National Instruments 6527
- * Devices: (National Instruments) PCI-6527 [pci-6527]
- * (National Instruments) PXI-6527 [pxi-6527]
+ * Devices: [National Instruments] PCI-6527 (pci-6527), PXI-6527 (pxi-6527)
* Author: David A. Schleef <ds@schleef.org>
* Updated: Sat, 25 Jan 2003 13:24:40 -0800
* Status: works
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index bcb326e31562..67cb758eb0cd 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -25,28 +25,14 @@
* Author: Jon Grierson <jd@renko.co.uk>,
* Frank Mori Hess <fmhess@users.sourceforge.net>
* Status: testing
- * Devices: (National Instruments) PCI-6509 [ni_65xx]
- * (National Instruments) PXI-6509 [ni_65xx]
- * (National Instruments) PCI-6510 [ni_65xx]
- * (National Instruments) PCI-6511 [ni_65xx]
- * (National Instruments) PXI-6511 [ni_65xx]
- * (National Instruments) PCI-6512 [ni_65xx]
- * (National Instruments) PXI-6512 [ni_65xx]
- * (National Instruments) PCI-6513 [ni_65xx]
- * (National Instruments) PXI-6513 [ni_65xx]
- * (National Instruments) PCI-6514 [ni_65xx]
- * (National Instruments) PXI-6514 [ni_65xx]
- * (National Instruments) PCI-6515 [ni_65xx]
- * (National Instruments) PXI-6515 [ni_65xx]
- * (National Instruments) PCI-6516 [ni_65xx]
- * (National Instruments) PCI-6517 [ni_65xx]
- * (National Instruments) PCI-6518 [ni_65xx]
- * (National Instruments) PCI-6519 [ni_65xx]
- * (National Instruments) PCI-6520 [ni_65xx]
- * (National Instruments) PCI-6521 [ni_65xx]
- * (National Instruments) PXI-6521 [ni_65xx]
- * (National Instruments) PCI-6528 [ni_65xx]
- * (National Instruments) PXI-6528 [ni_65xx]
+ * Devices: [National Instruments] PCI-6509 (pci-6509), PXI-6509 (pxi-6509),
+ * PCI-6510 (pci-6510), PCI-6511 (pci-6511), PXI-6511 (pxi-6511),
+ * PCI-6512 (pci-6512), PXI-6512 (pxi-6512), PCI-6513 (pci-6513),
+ * PXI-6513 (pxi-6513), PCI-6514 (pci-6514), PXI-6514 (pxi-6514),
+ * PCI-6515 (pxi-6515), PXI-6515 (pxi-6515), PCI-6516 (pci-6516),
+ * PCI-6517 (pci-6517), PCI-6518 (pci-6518), PCI-6519 (pci-6519),
+ * PCI-6520 (pci-6520), PCI-6521 (pci-6521), PXI-6521 (pxi-6521),
+ * PCI-6528 (pci-6528), PXI-6528 (pxi-6528)
* Updated: Mon, 21 Jul 2014 12:49:58 +0000
*
* Configuration Options: not applicable, uses PCI auto config
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 69e543a0bf22..a1ce0b0b8c41 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -62,14 +62,13 @@ TRIG_WAKE_EOS
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-#include "../comedidev.h"
-
#include <linux/io.h>
-#include <asm/dma.h>
+#include "../comedidev.h"
-#include "8253.h"
+#include "comedi_isadma.h"
#include "comedi_fc.h"
+#include "8253.h"
#define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */
@@ -146,11 +145,8 @@ static const struct a2150_board a2150_boards[] = {
};
struct a2150_private {
-
- volatile unsigned int count; /* number of data points left to be taken */
- unsigned int dma; /* dma channel */
- uint16_t *dma_buffer; /* dma buffer */
- unsigned int dma_transfer_size; /* size in bytes of dma transfers */
+ struct comedi_isadma *dma;
+ unsigned int count; /* number of data points left to be taken */
int irq_dma_bits; /* irq/dma register bits */
int config_bits; /* config register bits */
};
@@ -158,68 +154,54 @@ struct a2150_private {
/* interrupt service routine */
static irqreturn_t a2150_interrupt(int irq, void *d)
{
- int i;
- int status;
- unsigned long flags;
struct comedi_device *dev = d;
struct a2150_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[0];
struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned short *buf = desc->virt_addr;
unsigned int max_points, num_points, residue, leftover;
unsigned short dpnt;
+ int status;
+ int i;
- if (!dev->attached) {
- dev_err(dev->class_dev, "premature interrupt\n");
+ if (!dev->attached)
return IRQ_HANDLED;
- }
- /* initialize async here to make sure s is not NULL */
- async = s->async;
- cmd = &async->cmd;
status = inw(dev->iobase + STATUS_REG);
-
- if ((status & INTR_BIT) == 0) {
- dev_err(dev->class_dev, "spurious interrupt\n");
+ if ((status & INTR_BIT) == 0)
return IRQ_NONE;
- }
if (status & OVFL_BIT) {
- dev_err(dev->class_dev, "fifo overflow\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
}
if ((status & DMA_TC_BIT) == 0) {
- dev_err(dev->class_dev,
- "caught non-dma interrupt? Aborting.\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
- flags = claim_dma_lock();
- disable_dma(devpriv->dma);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma);
-
- /* figure out how many points to read */
- max_points = comedi_bytes_to_samples(s, devpriv->dma_transfer_size);
- /* residue is the number of points left to be done on the dma
+ /*
+ * residue is the number of bytes left to be done on the dma
* transfer. It should always be zero at this point unless
* the stop_src is set to external triggering.
*/
- residue = comedi_bytes_to_samples(s, get_dma_residue(devpriv->dma));
- num_points = max_points - residue;
+ residue = comedi_isadma_disable(desc->chan);
+
+ /* figure out how many points to read */
+ max_points = comedi_bytes_to_samples(s, desc->size);
+ num_points = max_points - comedi_bytes_to_samples(s, residue);
if (devpriv->count < num_points && cmd->stop_src == TRIG_COUNT)
num_points = devpriv->count;
/* figure out how many points will be stored next time */
leftover = 0;
if (cmd->stop_src == TRIG_NONE) {
- leftover = comedi_bytes_to_samples(s,
- devpriv->dma_transfer_size);
+ leftover = comedi_bytes_to_samples(s, desc->size);
} else if (devpriv->count > max_points) {
leftover = devpriv->count - max_points;
if (leftover > max_points)
@@ -234,7 +216,7 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
for (i = 0; i < num_points; i++) {
/* write data point to comedi buffer */
- dpnt = devpriv->dma_buffer[i];
+ dpnt = buf[i];
/* convert from 2's complement to unsigned coding */
dpnt ^= 0x8000;
comedi_buf_write_samples(s, &dpnt, 1);
@@ -245,14 +227,11 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
}
}
}
- /* re-enable dma */
+ /* re-enable dma */
if (leftover) {
- set_dma_addr(devpriv->dma, virt_to_bus(devpriv->dma_buffer));
- set_dma_count(devpriv->dma,
- comedi_samples_to_bytes(s, leftover));
- enable_dma(devpriv->dma);
+ desc->size = comedi_samples_to_bytes(s, leftover);
+ comedi_isadma_program(desc);
}
- release_dma_lock(flags);
comedi_handle_events(dev, s);
@@ -265,13 +244,15 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
static int a2150_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct a2150_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[0];
/* disable dma on card */
devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT;
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
/* disable computer's dma */
- disable_dma(devpriv->dma);
+ comedi_isadma_disable(desc->chan);
/* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
@@ -503,10 +484,11 @@ static int a2150_ai_cmdtest(struct comedi_device *dev,
static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct a2150_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[0];
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned long timer_base = dev->iobase + I8253_BASE_REG;
- unsigned long lock_flags;
unsigned int old_config_bits = devpriv->config_bits;
unsigned int trigger_bits;
@@ -542,27 +524,19 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* initialize number of samples remaining */
devpriv->count = cmd->stop_arg * cmd->chanlist_len;
- /* enable computer's dma */
- lock_flags = claim_dma_lock();
- disable_dma(devpriv->dma);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, virt_to_bus(devpriv->dma_buffer));
+ comedi_isadma_disable(desc->chan);
+
/* set size of transfer to fill in 1/3 second */
#define ONE_THIRD_SECOND 333333333
- devpriv->dma_transfer_size =
- sizeof(devpriv->dma_buffer[0]) * cmd->chanlist_len *
- ONE_THIRD_SECOND / cmd->scan_begin_arg;
- if (devpriv->dma_transfer_size > A2150_DMA_BUFFER_SIZE)
- devpriv->dma_transfer_size = A2150_DMA_BUFFER_SIZE;
- if (devpriv->dma_transfer_size < sizeof(devpriv->dma_buffer[0]))
- devpriv->dma_transfer_size = sizeof(devpriv->dma_buffer[0]);
- devpriv->dma_transfer_size -=
- devpriv->dma_transfer_size % sizeof(devpriv->dma_buffer[0]);
- set_dma_count(devpriv->dma, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma);
- release_dma_lock(lock_flags);
+ desc->size = comedi_bytes_per_sample(s) * cmd->chanlist_len *
+ ONE_THIRD_SECOND / cmd->scan_begin_arg;
+ if (desc->size > desc->maxsize)
+ desc->size = desc->maxsize;
+ if (desc->size < comedi_bytes_per_sample(s))
+ desc->size = comedi_bytes_per_sample(s);
+ desc->size -= desc->size % comedi_bytes_per_sample(s);
+
+ comedi_isadma_program(desc);
/* clear dma interrupt before enabling it, to try and get rid of that
* one spurious interrupt that has been happening */
@@ -677,6 +651,45 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
return n;
}
+static void a2150_alloc_irq_and_dma(struct comedi_device *dev,
+ struct comedi_devconfig *it)
+{
+ struct a2150_private *devpriv = dev->private;
+ unsigned int irq_num = it->options[1];
+ unsigned int dma_chan = it->options[2];
+
+ /*
+ * Only IRQs 15, 14, 12-9, and 7-3 are valid.
+ * Only DMA channels 7-5 and 3-0 are valid.
+ */
+ if (irq_num > 15 || dma_chan > 7 ||
+ !((1 << irq_num) & 0xdef8) || !((1 << dma_chan) & 0xef))
+ return;
+
+ if (request_irq(irq_num, a2150_interrupt, 0, dev->board_name, dev))
+ return;
+
+ /* DMA uses 1 buffer */
+ devpriv->dma = comedi_isadma_alloc(dev, 1, dma_chan, dma_chan,
+ A2150_DMA_BUFFER_SIZE,
+ COMEDI_ISADMA_READ);
+ if (!devpriv->dma) {
+ free_irq(irq_num, dev);
+ } else {
+ dev->irq = irq_num;
+ devpriv->irq_dma_bits = IRQ_LVL_BITS(irq_num) |
+ DMA_CHAN_BITS(dma_chan);
+ }
+}
+
+static void a2150_free_dma(struct comedi_device *dev)
+{
+ struct a2150_private *devpriv = dev->private;
+
+ if (devpriv)
+ comedi_isadma_free(devpriv->dma);
+}
+
/* probes board type, returns offset */
static int a2150_probe(struct comedi_device *dev)
{
@@ -690,8 +703,6 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct a2150_board *thisboard;
struct a2150_private *devpriv;
struct comedi_subdevice *s;
- unsigned int irq = it->options[1];
- unsigned int dma = it->options[2];
static const int timeout = 2000;
int i;
int ret;
@@ -712,31 +723,8 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
thisboard = dev->board_ptr;
dev->board_name = thisboard->name;
- if ((irq >= 3 && irq <= 7) || (irq >= 9 && irq <= 12) ||
- irq == 14 || irq == 15) {
- ret = request_irq(irq, a2150_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0) {
- devpriv->irq_dma_bits |= IRQ_LVL_BITS(irq);
- dev->irq = irq;
- }
- }
-
- if (dev->irq && dma <= 7 && dma != 4) {
- ret = request_dma(dma, dev->board_name);
- if (ret == 0) {
- devpriv->dma = dma;
- devpriv->dma_buffer = kmalloc(A2150_DMA_BUFFER_SIZE,
- GFP_KERNEL | GFP_DMA);
- if (!devpriv->dma_buffer)
- return -ENOMEM;
-
- disable_dma(dma);
- set_dma_mode(dma, DMA_MODE_READ);
-
- devpriv->irq_dma_bits |= DMA_CHAN_BITS(dma);
- }
- }
+ /* an IRQ and DMA are required to support async commands */
+ a2150_alloc_irq_and_dma(dev, it);
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
@@ -750,7 +738,7 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xffff;
s->range_table = &range_a2150;
s->insn_read = a2150_ai_rinsn;
- if (dev->irq && devpriv->dma) {
+ if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = s->n_chan;
@@ -791,15 +779,9 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static void a2150_detach(struct comedi_device *dev)
{
- struct a2150_private *devpriv = dev->private;
-
if (dev->iobase)
outw(APD_BIT | DPD_BIT, dev->iobase + CONFIG_REG);
- if (devpriv) {
- if (devpriv->dma)
- free_dma(devpriv->dma);
- kfree(devpriv->dma_buffer);
- }
+ a2150_free_dma(dev);
comedi_legacy_detach(dev);
};
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index 05370a4a74a5..9eeaf3c5a858 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -19,8 +19,7 @@
/*
* Driver: ni_at_ao
* Description: National Instruments AT-AO-6/10
- * Devices: (National Instruments) AT-AO-6 [at-ao-6]
- * (National Instruments) AT-AO-10 [at-ao-10]
+ * Devices: [National Instruments] AT-AO-6 (at-ao-6), AT-AO-10 (at-ao-10)
* Status: should work
* Author: David A. Schleef <ds@schleef.org>
* Updated: Sun Dec 26 12:26:28 EST 2004
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 0c5ff287dcef..301f154be813 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -300,7 +300,6 @@ static int ni_atmio_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
const struct ni_board_struct *boardtype;
- struct ni_private *devpriv;
struct pnp_dev *isapnp_dev;
int ret;
unsigned long iobase;
@@ -310,7 +309,6 @@ static int ni_atmio_attach(struct comedi_device *dev,
ret = ni_alloc_private(dev);
if (ret)
return ret;
- devpriv = dev->private;
iobase = it->options[0];
irq = it->options[1];
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 5e472cb7fbd7..8f6396edd21c 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -51,10 +51,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
+#include "../comedi_pcmcia.h"
/* daqcard700 registers */
#define DIO_W 0x04 /* WO 8bit */
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 8cfabdbaa30c..a208cb348437 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -32,11 +32,7 @@ the PCMCIA interface.
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
+#include "../comedi_pcmcia.h"
#include "8255.h"
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 1fbfdb4c80c0..a916047791b8 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -17,9 +17,8 @@
/*
* Driver: ni_labpc
* Description: National Instruments Lab-PC (& compatibles)
- * Devices: (National Instruments) Lab-PC-1200 [lab-pc-1200]
- * (National Instruments) Lab-PC-1200AI [lab-pc-1200ai]
- * (National Instruments) Lab-PC+ [lab-pc+]
+ * Devices: [National Instruments] Lab-PC-1200 (lab-pc-1200),
+ * Lab-PC-1200AI (lab-pc-1200ai), Lab-PC+ (lab-pc+)
* Author: Frank Mori Hess <fmhess@users.sourceforge.net>
* Status: works
*
@@ -85,15 +84,10 @@ static const struct labpc_boardinfo labpc_boards[] = {
static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- struct labpc_private *devpriv;
unsigned int irq = it->options[1];
unsigned int dma_chan = it->options[2];
int ret;
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
ret = comedi_request_region(dev, it->options[0], 0x20);
if (ret)
return ret;
@@ -110,11 +104,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static void labpc_detach(struct comedi_device *dev)
{
- struct labpc_private *devpriv = dev->private;
-
- if (devpriv)
- labpc_free_dma_chan(dev);
-
+ labpc_free_dma_chan(dev);
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index ac2c01f9dfdc..be89ae479afc 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -35,6 +35,8 @@ struct labpc_boardinfo {
};
struct labpc_private {
+ struct comedi_isadma *dma;
+
/* number of data points left to be taken */
unsigned long long count;
/* software copys of bits written to command registers */
@@ -61,11 +63,7 @@ struct labpc_private {
* conversions
*/
unsigned int divisor_b1;
- unsigned int dma_chan; /* dma channel to use */
- u16 *dma_buffer; /* buffer ai will dma into */
- phys_addr_t dma_addr;
- /* transfer size in bytes for current transfer */
- unsigned int dma_transfer_size;
+
/* we are using dma/fifo-half-full/etc. */
enum transfer_type current_transfer;
/*
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index d89d5852aeea..b88ee2614bfe 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -368,10 +368,6 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd,
enum scan_mode mode)
{
struct labpc_private *devpriv = dev->private;
- /* max value for 16 bit counter in mode 2 */
- const int max_counter_value = 0x10000;
- /* min value for 16 bit counter in mode 2 */
- const int min_counter_value = 2;
unsigned int base_period;
unsigned int scan_period;
unsigned int convert_period;
@@ -388,11 +384,10 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd,
* clock speed on convert and scan counters)
*/
devpriv->divisor_b0 = (scan_period - 1) /
- (I8254_OSC_BASE_2MHZ * max_counter_value) + 1;
- if (devpriv->divisor_b0 < min_counter_value)
- devpriv->divisor_b0 = min_counter_value;
- if (devpriv->divisor_b0 > max_counter_value)
- devpriv->divisor_b0 = max_counter_value;
+ (I8254_OSC_BASE_2MHZ * 0x10000) + 1;
+
+ cfc_check_trigger_arg_min(&devpriv->divisor_b0, 2);
+ cfc_check_trigger_arg_max(&devpriv->divisor_b0, 0x10000);
base_period = I8254_OSC_BASE_2MHZ * devpriv->divisor_b0;
@@ -400,16 +395,16 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd,
switch (cmd->flags & CMDF_ROUND_MASK) {
default:
case CMDF_ROUND_NEAREST:
- devpriv->divisor_a0 =
- (convert_period + (base_period / 2)) / base_period;
- devpriv->divisor_b1 =
- (scan_period + (base_period / 2)) / base_period;
+ devpriv->divisor_a0 = DIV_ROUND_CLOSEST(convert_period,
+ base_period);
+ devpriv->divisor_b1 = DIV_ROUND_CLOSEST(scan_period,
+ base_period);
break;
case CMDF_ROUND_UP:
- devpriv->divisor_a0 =
- (convert_period + (base_period - 1)) / base_period;
- devpriv->divisor_b1 =
- (scan_period + (base_period - 1)) / base_period;
+ devpriv->divisor_a0 = DIV_ROUND_UP(convert_period,
+ base_period);
+ devpriv->divisor_b1 = DIV_ROUND_UP(scan_period,
+ base_period);
break;
case CMDF_ROUND_DOWN:
devpriv->divisor_a0 = convert_period / base_period;
@@ -417,14 +412,10 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd,
break;
}
/* make sure a0 and b1 values are acceptable */
- if (devpriv->divisor_a0 < min_counter_value)
- devpriv->divisor_a0 = min_counter_value;
- if (devpriv->divisor_a0 > max_counter_value)
- devpriv->divisor_a0 = max_counter_value;
- if (devpriv->divisor_b1 < min_counter_value)
- devpriv->divisor_b1 = min_counter_value;
- if (devpriv->divisor_b1 > max_counter_value)
- devpriv->divisor_b1 = max_counter_value;
+ cfc_check_trigger_arg_min(&devpriv->divisor_a0, 2);
+ cfc_check_trigger_arg_max(&devpriv->divisor_a0, 0x10000);
+ cfc_check_trigger_arg_min(&devpriv->divisor_b1, 2);
+ cfc_check_trigger_arg_max(&devpriv->divisor_b1, 0x10000);
/* write corrected timings to command */
labpc_set_ai_convert_period(cmd, mode,
base_period * devpriv->divisor_a0);
@@ -687,7 +678,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
/* figure out what method we will use to transfer data */
- if (labpc_have_dma_chan(dev) &&
+ if (devpriv->dma &&
/* dma unsafe at RT priority,
* and too much setup time for CMDF_WAKE_EOS */
(cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0)
@@ -823,7 +814,7 @@ static int labpc_drain_fifo(struct comedi_device *dev)
}
if (i == timeout) {
dev_err(dev->class_dev, "ai timeout, fifo never empties\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
return -1;
}
@@ -875,7 +866,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
if (devpriv->stat1 & STAT1_OVERRUN) {
/* clear error interrupt */
devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
dev_err(dev->class_dev, "overrun\n");
return IRQ_HANDLED;
@@ -895,7 +886,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
if (devpriv->stat1 & STAT1_OVERFLOW) {
/* clear error interrupt */
devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
dev_err(dev->class_dev, "overflow\n");
return IRQ_HANDLED;
@@ -1215,11 +1206,15 @@ int labpc_common_attach(struct comedi_device *dev,
unsigned int irq, unsigned long isr_flags)
{
const struct labpc_boardinfo *board = dev->board_ptr;
- struct labpc_private *devpriv = dev->private;
+ struct labpc_private *devpriv;
struct comedi_subdevice *s;
int ret;
int i;
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
if (dev->mmio) {
devpriv->read_byte = labpc_readb;
devpriv->write_byte = labpc_writeb;
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 0a8b3223f74e..746c4cd9978d 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -54,19 +54,11 @@ NI manuals:
*/
#include <linux/module.h>
-#include "../comedidev.h"
-#include <linux/delay.h>
+#include "../comedi_pcmcia.h"
-#include "8253.h"
-#include "8255.h"
-#include "comedi_fc.h"
#include "ni_labpc.h"
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
static const struct labpc_boardinfo labpc_cs_boards[] = {
{
.name = "daqcard-1200",
@@ -80,7 +72,6 @@ static int labpc_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- struct labpc_private *devpriv;
int ret;
/* The ni_labpc driver needs the board_ptr */
@@ -96,10 +87,6 @@ static int labpc_auto_attach(struct comedi_device *dev,
if (!link->irq)
return -EINVAL;
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
return labpc_common_attach(dev, link->irq, IRQF_SHARED);
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.c b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
index 6d386050e59d..6b4ccd86b3d0 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
@@ -19,23 +19,25 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include "../comedidev.h"
-#include <asm/dma.h>
+#include "../comedidev.h"
+#include "comedi_isadma.h"
#include "comedi_fc.h"
#include "ni_labpc.h"
#include "ni_labpc_regs.h"
#include "ni_labpc_isadma.h"
/* size in bytes of dma buffer */
-static const int dma_buffer_size = 0xff00;
-/* 2 bytes per sample */
-static const int sample_size = 2;
+#define LABPC_ISADMA_BUFFER_SIZE 0xff00
/* utility function that suggests a dma transfer size in bytes */
-static unsigned int labpc_suggest_transfer_size(const struct comedi_cmd *cmd)
+static unsigned int labpc_suggest_transfer_size(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int maxbytes)
{
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int sample_size = comedi_bytes_per_sample(s);
unsigned int size;
unsigned int freq;
@@ -49,8 +51,8 @@ static unsigned int labpc_suggest_transfer_size(const struct comedi_cmd *cmd)
size = (freq / 3) * sample_size;
/* set a minimum and maximum size allowed */
- if (size > dma_buffer_size)
- size = dma_buffer_size - dma_buffer_size % sample_size;
+ if (size > maxbytes)
+ size = maxbytes;
else if (size < sample_size)
size = sample_size;
@@ -60,23 +62,18 @@ static unsigned int labpc_suggest_transfer_size(const struct comedi_cmd *cmd)
void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct labpc_private *devpriv = dev->private;
+ struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned long irq_flags;
-
- irq_flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma_chan);
- set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
+ unsigned int sample_size = comedi_bytes_per_sample(s);
+
/* set appropriate size of transfer */
- devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
+ desc->size = labpc_suggest_transfer_size(dev, s, desc->maxsize);
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->count * sample_size < devpriv->dma_transfer_size)
- devpriv->dma_transfer_size = devpriv->count * sample_size;
- set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma_chan);
- release_dma_lock(irq_flags);
+ devpriv->count * sample_size < desc->size)
+ desc->size = devpriv->count * sample_size;
+
+ comedi_isadma_program(desc);
+
/* set CMD3 bits for caller to enable DMA and interrupt */
devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN);
}
@@ -85,61 +82,55 @@ EXPORT_SYMBOL_GPL(labpc_setup_dma);
void labpc_drain_dma(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
+ struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- int status;
- unsigned long flags;
- unsigned int max_points, num_points, residue, leftover;
+ unsigned int max_samples = comedi_bytes_to_samples(s, desc->size);
+ unsigned int residue;
+ unsigned int nsamples;
+ unsigned int leftover;
- status = devpriv->stat1;
-
- flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma_chan);
-
- /* figure out how many points to read */
- max_points = devpriv->dma_transfer_size / sample_size;
- /* residue is the number of points left to be done on the dma
+ /*
+ * residue is the number of bytes left to be done on the dma
* transfer. It should always be zero at this point unless
* the stop_src is set to external triggering.
*/
- residue = get_dma_residue(devpriv->dma_chan) / sample_size;
- num_points = max_points - residue;
- if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_points)
- num_points = devpriv->count;
-
- /* figure out how many points will be stored next time */
- leftover = 0;
- if (cmd->stop_src != TRIG_COUNT) {
- leftover = devpriv->dma_transfer_size / sample_size;
- } else if (devpriv->count > num_points) {
- leftover = devpriv->count - num_points;
- if (leftover > max_points)
- leftover = max_points;
- }
-
- comedi_buf_write_samples(s, devpriv->dma_buffer, num_points);
+ residue = comedi_isadma_disable(desc->chan);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count -= num_points;
+ /*
+ * Figure out how many samples to read for this transfer and
+ * how many will be stored for next time.
+ */
+ nsamples = max_samples - comedi_bytes_to_samples(s, residue);
+ if (cmd->stop_src == TRIG_COUNT) {
+ if (devpriv->count <= nsamples) {
+ nsamples = devpriv->count;
+ leftover = 0;
+ } else {
+ leftover = devpriv->count - nsamples;
+ if (leftover > max_samples)
+ leftover = max_samples;
+ }
+ devpriv->count -= nsamples;
+ } else {
+ leftover = max_samples;
+ }
+ desc->size = comedi_samples_to_bytes(s, leftover);
- /* set address and count for next transfer */
- set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
- set_dma_count(devpriv->dma_chan, leftover * sample_size);
- release_dma_lock(flags);
+ comedi_buf_write_samples(s, desc->virt_addr, nsamples);
}
EXPORT_SYMBOL_GPL(labpc_drain_dma);
static void handle_isa_dma(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
+ struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
labpc_drain_dma(dev);
- enable_dma(devpriv->dma_chan);
+ if (desc->size)
+ comedi_isadma_program(desc);
/* clear dma tc interrupt */
devpriv->write_byte(dev, 0x1, DMATC_CLEAR_REG);
@@ -160,36 +151,18 @@ void labpc_handle_dma_status(struct comedi_device *dev)
}
EXPORT_SYMBOL_GPL(labpc_handle_dma_status);
-int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
+void labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
{
struct labpc_private *devpriv = dev->private;
- void *dma_buffer;
- unsigned long dma_flags;
- int ret;
+ /* only DMA channels 3 and 1 are valid */
if (dma_chan != 1 && dma_chan != 3)
- return -EINVAL;
-
- dma_buffer = kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA);
- if (!dma_buffer)
- return -ENOMEM;
-
- ret = request_dma(dma_chan, dev->board_name);
- if (ret) {
- kfree(dma_buffer);
- return ret;
- }
-
- devpriv->dma_buffer = dma_buffer;
- devpriv->dma_chan = dma_chan;
- devpriv->dma_addr = virt_to_bus(devpriv->dma_buffer);
-
- dma_flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
- release_dma_lock(dma_flags);
+ return;
- return 0;
+ /* DMA uses 1 buffer */
+ devpriv->dma = comedi_isadma_alloc(dev, 1, dma_chan, dma_chan,
+ LABPC_ISADMA_BUFFER_SIZE,
+ COMEDI_ISADMA_READ);
}
EXPORT_SYMBOL_GPL(labpc_init_dma_chan);
@@ -197,12 +170,8 @@ void labpc_free_dma_chan(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
- kfree(devpriv->dma_buffer);
- devpriv->dma_buffer = NULL;
- if (devpriv->dma_chan) {
- free_dma(devpriv->dma_chan);
- devpriv->dma_chan = 0;
- }
+ if (devpriv)
+ comedi_isadma_free(devpriv->dma);
}
EXPORT_SYMBOL_GPL(labpc_free_dma_chan);
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.h b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
index 771af4bd5a76..b8a1b0ee6290 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
@@ -5,18 +5,9 @@
#ifndef _NI_LABPC_ISADMA_H
#define _NI_LABPC_ISADMA_H
-#define NI_LABPC_HAVE_ISA_DMA IS_ENABLED(CONFIG_COMEDI_NI_LABPC_ISADMA)
+#if IS_ENABLED(CONFIG_COMEDI_NI_LABPC_ISADMA)
-#if NI_LABPC_HAVE_ISA_DMA
-
-static inline bool labpc_have_dma_chan(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
-
- return (bool)devpriv->dma_chan;
-}
-
-int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan);
+void labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan);
void labpc_free_dma_chan(struct comedi_device *dev);
void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s);
void labpc_drain_dma(struct comedi_device *dev);
@@ -24,15 +15,9 @@ void labpc_handle_dma_status(struct comedi_device *dev);
#else
-static inline bool labpc_have_dma_chan(struct comedi_device *dev)
-{
- return false;
-}
-
-static inline int labpc_init_dma_chan(struct comedi_device *dev,
- unsigned int dma_chan)
+static inline void labpc_init_dma_chan(struct comedi_device *dev,
+ unsigned int dma_chan)
{
- return -ENOTSUPP;
}
static inline void labpc_free_dma_chan(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 3fc420406564..0407ff681dfd 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -17,7 +17,7 @@
/*
* Driver: ni_labpc_pci
* Description: National Instruments Lab-PC PCI-1200
- * Devices: (National Instruments) PCI-1200 [ni_pci-1200]
+ * Devices: [National Instruments] PCI-1200 (ni_pci-1200)
* Author: Frank Mori Hess <fmhess@users.sourceforge.net>
* Status: works
*
@@ -79,7 +79,6 @@ static int labpc_pci_auto_attach(struct comedi_device *dev,
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct labpc_boardinfo *board = NULL;
- struct labpc_private *devpriv;
int ret;
if (context < ARRAY_SIZE(labpc_pci_boards))
@@ -101,10 +100,6 @@ static int labpc_pci_auto_attach(struct comedi_device *dev,
if (!dev->mmio)
return -ENOMEM;
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
return labpc_common_attach(dev, pcidev->irq, IRQF_SHARED);
}
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 11e70173712d..b6ddc015dedf 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1478,7 +1478,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
dev_err(dev->class_dev,
"unknown mite interrupt (ai_mite_status=%08x)\n",
ai_mite_status);
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
/* disable_irq(dev->irq); */
}
#endif
@@ -1491,8 +1491,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
/* we probably aren't even running a command now,
* so it's a good idea to be careful. */
if (comedi_is_subdevice_running(s)) {
- s->async->events |=
- COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
}
return;
@@ -1579,7 +1578,7 @@ static void handle_b_interrupt(struct comedi_device *dev,
dev_err(dev->class_dev,
"unknown mite interrupt (ao_mite_status=%08x)\n",
ao_mite_status);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ s->async->events |= COMEDI_CB_ERROR;
}
#endif
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 9b201e48233e..e3d821bf2d6a 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -37,16 +37,12 @@ See the notes in the ni_atmio.o driver.
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
#include <linux/delay.h>
+#include "../comedi_pcmcia.h"
#include "ni_stc.h"
#include "8255.h"
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-
/*
* AT specific setup
*/
@@ -163,7 +159,6 @@ static int mio_cs_auto_attach(struct comedi_device *dev,
{
struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
static const struct ni_board_struct *board;
- struct ni_private *devpriv;
int ret;
board = ni_getboardtype(dev, link);
@@ -188,8 +183,6 @@ static int mio_cs_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = dev->private;
-
return ni_E_init(dev, 0, 1);
}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index db7e8aac67b5..db399fe8c301 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -418,7 +418,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
CHSR_DRQ1 | CHSR_MRDY)) {
dev_dbg(dev->class_dev,
"unknown mite interrupt, disabling IRQ\n");
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
disable_irq(dev->irq);
}
}
@@ -460,7 +460,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
break;
} else if (flags & Waited) {
writeb(ClearWaited, dev->mmio + Group_1_First_Clear);
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
break;
} else if (flags & PrimaryTC) {
writeb(ClearPrimaryTC,
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 0525292c1d8b..c20c51bef3e7 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -16,29 +16,28 @@
*/
/*
-Driver: ni_tio
-Description: National Instruments general purpose counters
-Devices:
-Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
- Herman.Bruyninckx@mech.kuleuven.ac.be,
- Wim.Meeussen@mech.kuleuven.ac.be,
- Klaas.Gadeyne@mech.kuleuven.ac.be,
- Frank Mori Hess <fmhess@users.sourceforge.net>
-Updated: Thu Nov 16 09:50:32 EST 2006
-Status: works
-
-This module is not used directly by end-users. Rather, it
-is used by other drivers (for example ni_660x and ni_pcimio)
-to provide support for NI's general purpose counters. It was
-originally based on the counter code from ni_660x.c and
-ni_mio_common.c.
-
-References:
-DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
-DAQ 6601/6602 User Manual (NI 322137B-01)
-340934b.pdf DAQ-STC reference manual
+ * Module: ni_tio
+ * Description: National Instruments general purpose counters
+ * Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
+ * Herman.Bruyninckx@mech.kuleuven.ac.be,
+ * Wim.Meeussen@mech.kuleuven.ac.be,
+ * Klaas.Gadeyne@mech.kuleuven.ac.be,
+ * Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Updated: Thu Nov 16 09:50:32 EST 2006
+ * Status: works
+ *
+ * This module is not used directly by end-users. Rather, it
+ * is used by other drivers (for example ni_660x and ni_pcimio)
+ * to provide support for NI's general purpose counters. It was
+ * originally based on the counter code from ni_660x.c and
+ * ni_mio_common.c.
+ *
+ * References:
+ * DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
+ * DAQ 6601/6602 User Manual (NI 322137B-01)
+ * 340934b.pdf DAQ-STC reference manual
+ */
-*/
/*
TODO:
Support use of both banks X and Y
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 6037bec77ef1..d36c3abd3120 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -16,29 +16,28 @@
*/
/*
-Driver: ni_tiocmd
-Description: National Instruments general purpose counters command support
-Devices:
-Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
- Herman.Bruyninckx@mech.kuleuven.ac.be,
- Wim.Meeussen@mech.kuleuven.ac.be,
- Klaas.Gadeyne@mech.kuleuven.ac.be,
- Frank Mori Hess <fmhess@users.sourceforge.net>
-Updated: Fri, 11 Apr 2008 12:32:35 +0100
-Status: works
-
-This module is not used directly by end-users. Rather, it
-is used by other drivers (for example ni_660x and ni_pcimio)
-to provide command support for NI's general purpose counters.
-It was originally split out of ni_tio.c to stop the 'ni_tio'
-module depending on the 'mite' module.
-
-References:
-DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
-DAQ 6601/6602 User Manual (NI 322137B-01)
-340934b.pdf DAQ-STC reference manual
+ * Module: ni_tiocmd
+ * Description: National Instruments general purpose counters command support
+ * Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
+ * Herman.Bruyninckx@mech.kuleuven.ac.be,
+ * Wim.Meeussen@mech.kuleuven.ac.be,
+ * Klaas.Gadeyne@mech.kuleuven.ac.be,
+ * Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Updated: Fri, 11 Apr 2008 12:32:35 +0100
+ * Status: works
+ *
+ * This module is not used directly by end-users. Rather, it
+ * is used by other drivers (for example ni_660x and ni_pcimio)
+ * to provide command support for NI's general purpose counters.
+ * It was originally split out of ni_tio.c to stop the 'ni_tio'
+ * module depending on the 'mite' module.
+ *
+ * References:
+ * DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
+ * DAQ 6601/6602 User Manual (NI 322137B-01)
+ * 340934b.pdf DAQ-STC reference manual
+ */
-*/
/*
TODO:
Support use of both banks X and Y
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 3b5a1b90366d..5f649f88d55c 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -96,9 +96,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/usb.h>
-#include "../comedidev.h"
+#include "../comedi_usb.h"
#define NI6501_TIMEOUT 1000
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index 938aebc8e0ea..cb7e4c37b8b9 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -22,10 +22,8 @@
/*
* Driver: pcl711
* Description: Advantech PCL-711 and 711b, ADLink ACL-8112
- * Devices: (Advantech) PCL-711 [pcl711]
- * (Advantech) PCL-711B [pcl711b]
- * (AdLink) ACL-8112HG [acl8112hg]
- * (AdLink) ACL-8112DG [acl8112dg]
+ * Devices: [Advantech] PCL-711 (pcl711), PCL-711B (pcl711b),
+ * [ADLink] ACL-8112HG (acl8112hg), ACL-8112DG (acl8112dg)
* Author: David A. Schleef <ds@schleef.org>
* Janne Jalkanen <jalkanen@cs.hut.fi>
* Eric Bunn <ebu@cs.hut.fi>
diff --git a/drivers/staging/comedi/drivers/pcl724.c b/drivers/staging/comedi/drivers/pcl724.c
index fcc440855e66..74b07e1744c7 100644
--- a/drivers/staging/comedi/drivers/pcl724.c
+++ b/drivers/staging/comedi/drivers/pcl724.c
@@ -8,14 +8,10 @@
/*
* Driver: pcl724
* Description: Comedi driver for 8255 based ISA DIO boards
- * Devices: (Advantech) PCL-724 [pcl724]
- * (Advantech) PCL-722 [pcl722]
- * (Advantech) PCL-731 [pcl731]
- * (ADLink) ACL-7122 [acl7122]
- * (ADLink) ACL-7124 [acl7124]
- * (ADLink) PET-48DIO [pet48dio]
- * (WinSystems) PCM-IO48 [pcmio48]
- * (Diamond Systems) ONYX-MM-DIO [onyx-mm-dio]
+ * Devices: [Advantech] PCL-724 (pcl724), PCL-722 (pcl722), PCL-731 (pcl731),
+ * [ADLink] ACL-7122 (acl7122), ACL-7124 (acl7124), PET-48DIO (pet48dio),
+ * [WinSystems] PCM-IO48 (pcmio48),
+ * [Diamond Systems] ONYX-MM-DIO (onyx-mm-dio)
* Author: Michal Dobes <dobes@tesnet.cz>
* Status: untested
*
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index 86f713fdf1d0..40798150cfd8 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -21,11 +21,8 @@
* Description: Advantech PCL-726 & compatibles
* Author: David A. Schleef <ds@schleef.org>
* Status: untested
- * Devices: (Advantech) PCL-726 [pcl726]
- * (Advantech) PCL-727 [pcl727]
- * (Advantech) PCL-728 [pcl728]
- * (ADLink) ACL-6126 [acl6126]
- * (ADLink) ACL-6128 [acl6128]
+ * Devices: [Advantech] PCL-726 (pcl726), PCL-727 (pcl727), PCL-728 (pcl728),
+ * [ADLink] ACL-6126 (acl6126), ACL-6128 (acl6128)
*
* Configuration Options:
* [0] - IO Base
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index a6c5770b2808..ce958eef2a61 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -7,19 +7,12 @@
/*
* Driver: pcl730
* Description: Advantech PCL-730 (& compatibles)
- * Devices: (Advantech) PCL-730 [pcl730]
- * (ICP) ISO-730 [iso730]
- * (Adlink) ACL-7130 [acl7130]
- * (Advantech) PCM-3730 [pcm3730]
- * (Advantech) PCL-725 [pcl725]
- * (ICP) P8R8-DIO [p16r16dio]
- * (Adlink) ACL-7225b [acl7225b]
- * (ICP) P16R16-DIO [p16r16dio]
- * (Advantech) PCL-733 [pcl733]
- * (Advantech) PCL-734 [pcl734]
- * (Diamond Systems) OPMM-1616-XT [opmm-1616-xt]
- * (Diamond Systems) PEARL-MM-P [prearl-mm-p]
- * (Diamond Systems) IR104-PBF [ir104-pbf]
+ * Devices: [Advantech] PCL-730 (pcl730), PCM-3730 (pcm3730), PCL-725 (pcl725),
+ * PCL-733 (pcl733), PCL-734 (pcl734),
+ * [ADLink] ACL-7130 (acl7130), ACL-7225b (acl7225b),
+ * [ICP] ISO-730 (iso730), P8R8-DIO (p8r8dio), P16R16-DIO (p16r16dio),
+ * [Diamond Systems] OPMM-1616-XT (opmm-1616-xt), PEARL-MM-P (pearl-mm-p),
+ * IR104-PBF (ir104-pbf),
* Author: José Luis Sánchez (jsanchezv@teleline.es)
* Status: untested
*
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index ac243ca5e0f8..3ffb1ea2ecc8 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -111,12 +111,12 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
-#include "../comedidev.h"
-
#include <linux/delay.h>
#include <linux/io.h>
-#include <asm/dma.h>
+#include "../comedidev.h"
+
+#include "comedi_isadma.h"
#include "comedi_fc.h"
#include "8253.h"
@@ -507,19 +507,11 @@ static const struct pcl812_board boardtypes[] = {
};
struct pcl812_private {
- unsigned char dma; /* >0 use dma ( usedDMA channel) */
+ struct comedi_isadma *dma;
unsigned char range_correction; /* =1 we must add 1 to range number */
unsigned int last_ai_chanspec;
unsigned char mode_reg_int; /* there is stored INT number for some card */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
- unsigned int dmapages;
- unsigned int hwdmasize;
- unsigned long dmabuf[2]; /* PTR to DMA buf */
- unsigned int hwdmaptr[2]; /* HW PTR to DMA buf */
- unsigned int dmabytestomove[2]; /* how many bytes DMA transfer */
- int next_dma_buf; /* which buffer is next to use */
- unsigned int dma_runs_to_end; /* how many times we must switch DMA buffers */
- unsigned int last_dma_run; /* how many bytes to transfer on last DMA buffer */
unsigned int max_812_ai_mode0_rangewait; /* setling time for gain */
unsigned int divisor1;
unsigned int divisor2;
@@ -546,90 +538,32 @@ static void pcl812_start_pacer(struct comedi_device *dev, bool load_timers)
}
static void pcl812_ai_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
+ struct comedi_subdevice *s,
+ unsigned int unread_samples)
{
struct pcl812_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int dma_flags;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
unsigned int bytes;
+ unsigned int max_samples;
+ unsigned int nsamples;
- /* we use EOS, so adapt DMA buffer to one scan */
- if (devpriv->ai_eos) {
- devpriv->dmabytestomove[0] = comedi_bytes_per_scan(s);
- devpriv->dmabytestomove[1] = comedi_bytes_per_scan(s);
- devpriv->dma_runs_to_end = 1;
- } else {
- devpriv->dmabytestomove[0] = devpriv->hwdmasize;
- devpriv->dmabytestomove[1] = devpriv->hwdmasize;
- if (s->async->prealloc_bufsz < devpriv->hwdmasize) {
- devpriv->dmabytestomove[0] =
- s->async->prealloc_bufsz;
- devpriv->dmabytestomove[1] =
- s->async->prealloc_bufsz;
- }
- if (cmd->stop_src == TRIG_NONE) {
- devpriv->dma_runs_to_end = 1;
- } else {
- /* how many samples we must transfer? */
- bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
-
- /* how many DMA pages we must fill */
- devpriv->dma_runs_to_end =
- bytes / devpriv->dmabytestomove[0];
-
- /* on last dma transfer must be moved */
- devpriv->last_dma_run =
- bytes % devpriv->dmabytestomove[0];
- if (devpriv->dma_runs_to_end == 0)
- devpriv->dmabytestomove[0] =
- devpriv->last_dma_run;
- devpriv->dma_runs_to_end--;
- }
- }
- if (devpriv->dmabytestomove[0] > devpriv->hwdmasize) {
- devpriv->dmabytestomove[0] = devpriv->hwdmasize;
- devpriv->ai_eos = 0;
- }
- if (devpriv->dmabytestomove[1] > devpriv->hwdmasize) {
- devpriv->dmabytestomove[1] = devpriv->hwdmasize;
- devpriv->ai_eos = 0;
- }
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, devpriv->dmabytestomove[0]);
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
-}
+ comedi_isadma_disable(dma->chan);
-static void pcl812_ai_setup_next_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl812_private *devpriv = dev->private;
- unsigned long dma_flags;
-
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- disable_dma(devpriv->dma);
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->ai_eos) {
- set_dma_count(devpriv->dma,
- devpriv->dmabytestomove[devpriv->next_dma_buf]);
- } else {
- if (devpriv->dma_runs_to_end) {
- set_dma_count(devpriv->dma,
- devpriv->dmabytestomove[devpriv->
- next_dma_buf]);
- } else {
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- }
- devpriv->dma_runs_to_end--;
+ /* if using EOS, adapt DMA buffer to one scan */
+ bytes = devpriv->ai_eos ? comedi_bytes_per_scan(s) : desc->maxsize;
+ max_samples = comedi_bytes_to_samples(s, bytes);
+
+ /*
+ * Determine dma size based on the buffer size plus the number of
+ * unread samples and the number of samples remaining in the command.
+ */
+ nsamples = comedi_nsamples_left(s, max_samples + unread_samples);
+ if (nsamples > unread_samples) {
+ nsamples -= unread_samples;
+ desc->size = comedi_samples_to_bytes(s, nsamples);
+ comedi_isadma_program(desc);
}
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
}
static void pcl812_ai_set_chan_range(struct comedi_device *dev,
@@ -786,6 +720,7 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int ctrl = 0;
unsigned int i;
@@ -794,7 +729,7 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
pcl812_ai_set_chan_range(dev, cmd->chanlist[0], 1);
- if (devpriv->dma) { /* check if we can use DMA transfer */
+ if (dma) { /* check if we can use DMA transfer */
devpriv->ai_dma = 1;
for (i = 1; i < cmd->chanlist_len; i++)
if (cmd->chanlist[0] != cmd->chanlist[i]) {
@@ -817,8 +752,11 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_dma = 0;
}
- if (devpriv->ai_dma)
- pcl812_ai_setup_dma(dev, s);
+ if (devpriv->ai_dma) {
+ /* setup and enable dma for the first buffer */
+ dma->cur_dma = 0;
+ pcl812_ai_setup_dma(dev, s, 0);
+ }
switch (cmd->convert_src) {
case TRIG_TIMER:
@@ -859,7 +797,7 @@ static void pcl812_handle_eoc(struct comedi_device *dev,
if (pcl812_ai_eoc(dev, s, NULL, 0)) {
dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ s->async->events |= COMEDI_CB_ERROR;
return;
}
@@ -895,19 +833,21 @@ static void pcl812_handle_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
- int len, bufptr;
- unsigned short *ptr;
-
- ptr = (unsigned short *)devpriv->dmabuf[devpriv->next_dma_buf];
- len = (devpriv->dmabytestomove[devpriv->next_dma_buf] >> 1) -
- devpriv->ai_poll_ptr;
-
- pcl812_ai_setup_next_dma(dev, s);
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
+ unsigned int nsamples;
+ int bufptr;
+ nsamples = comedi_bytes_to_samples(s, desc->size) -
+ devpriv->ai_poll_ptr;
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
- transfer_from_dma_buf(dev, s, ptr, bufptr, len);
+ /* restart dma with the next buffer */
+ dma->cur_dma = 1 - dma->cur_dma;
+ pcl812_ai_setup_dma(dev, s, nsamples);
+
+ transfer_from_dma_buf(dev, s, desc->virt_addr, bufptr, nsamples);
}
static irqreturn_t pcl812_interrupt(int irq, void *d)
@@ -935,45 +875,37 @@ static irqreturn_t pcl812_interrupt(int irq, void *d)
static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc;
unsigned long flags;
- unsigned int top1, top2, i;
+ unsigned int poll;
+ int ret;
+ /* poll is valid only for DMA transfer */
if (!devpriv->ai_dma)
- return 0; /* poll is valid only for DMA transfer */
+ return 0;
spin_lock_irqsave(&dev->spinlock, flags);
- for (i = 0; i < 10; i++) {
- /* where is now DMA */
- top1 = get_dma_residue(devpriv->ai_dma);
- top2 = get_dma_residue(devpriv->ai_dma);
- if (top1 == top2)
- break;
- }
-
- if (top1 != top2) {
- spin_unlock_irqrestore(&dev->spinlock, flags);
- return 0;
- }
- /* where is now DMA in buffer */
- top1 = devpriv->dmabytestomove[1 - devpriv->next_dma_buf] - top1;
- top1 >>= 1; /* sample position */
- top2 = top1 - devpriv->ai_poll_ptr;
- if (top2 < 1) { /* no new samples */
- spin_unlock_irqrestore(&dev->spinlock, flags);
- return 0;
+ poll = comedi_isadma_poll(dma);
+ poll = comedi_bytes_to_samples(s, poll);
+ if (poll > devpriv->ai_poll_ptr) {
+ desc = &dma->desc[dma->cur_dma];
+ transfer_from_dma_buf(dev, s, desc->virt_addr,
+ devpriv->ai_poll_ptr,
+ poll - devpriv->ai_poll_ptr);
+ /* new buffer position */
+ devpriv->ai_poll_ptr = poll;
+
+ ret = comedi_buf_n_bytes_ready(s);
+ } else {
+ /* no new samples */
+ ret = 0;
}
- transfer_from_dma_buf(dev, s,
- (void *)devpriv->dmabuf[1 -
- devpriv->next_dma_buf],
- devpriv->ai_poll_ptr, top2);
-
- devpriv->ai_poll_ptr = top1; /* new buffer position */
-
spin_unlock_irqrestore(&dev->spinlock, flags);
- return comedi_buf_n_bytes_ready(s);
+ return ret;
}
static int pcl812_ai_cancel(struct comedi_device *dev,
@@ -982,7 +914,7 @@ static int pcl812_ai_cancel(struct comedi_device *dev,
struct pcl812_private *devpriv = dev->private;
if (devpriv->ai_dma)
- disable_dma(devpriv->dma);
+ comedi_isadma_disable(devpriv->dma->chan);
outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
dev->iobase + PCL812_CTRL_REG);
@@ -1192,6 +1124,27 @@ static void pcl812_set_ai_range_table(struct comedi_device *dev,
}
}
+static void pcl812_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
+{
+ struct pcl812_private *devpriv = dev->private;
+
+ /* only DMA channels 3 and 1 are valid */
+ if (!(dma_chan == 3 || dma_chan == 1))
+ return;
+
+ /* DMA uses two 8K buffers */
+ devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
+ PAGE_SIZE * 2, COMEDI_ISADMA_READ);
+}
+
+static void pcl812_free_dma(struct comedi_device *dev)
+{
+ struct pcl812_private *devpriv = dev->private;
+
+ if (devpriv)
+ comedi_isadma_free(devpriv->dma);
+}
+
static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl812_board *board = dev->board_ptr;
@@ -1200,7 +1153,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int n_subdevices;
int subdev;
int ret;
- int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -1218,31 +1170,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
/* we need an IRQ to do DMA on channel 3 or 1 */
- if (dev->irq && board->has_dma &&
- (it->options[2] == 3 || it->options[2] == 1)) {
- ret = request_dma(it->options[2], dev->board_name);
- if (ret) {
- dev_err(dev->class_dev,
- "unable to request DMA channel %d\n",
- it->options[2]);
- return -EBUSY;
- }
- devpriv->dma = it->options[2];
-
- devpriv->dmapages = 1; /* we want 8KB */
- devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
-
- for (i = 0; i < 2; i++) {
- unsigned long dmabuf;
-
- dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
- if (!dmabuf)
- return -ENOMEM;
-
- devpriv->dmabuf[i] = dmabuf;
- devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
- }
- }
+ if (dev->irq && board->has_dma)
+ pcl812_alloc_dma(dev, it->options[2]);
/* differential analog inputs? */
switch (board->board_type) {
@@ -1384,16 +1313,7 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static void pcl812_detach(struct comedi_device *dev)
{
- struct pcl812_private *devpriv = dev->private;
-
- if (devpriv) {
- if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages);
- if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages);
- if (devpriv->dma)
- free_dma(devpriv->dma);
- }
+ pcl812_free_dma(dev);
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 73deb4bd5c93..da35edfccbc3 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -33,14 +33,14 @@ Configuration Options:
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-#include <asm/dma.h>
+#include "../comedidev.h"
+
+#include "comedi_isadma.h"
#include "comedi_fc.h"
#include "8253.h"
@@ -114,14 +114,7 @@ static const struct pcl816_board boardtypes[] = {
};
struct pcl816_private {
- unsigned int dma; /* used DMA, 0=don't use DMA */
- unsigned int dmapages;
- unsigned int hwdmasize;
- unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */
- unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */
- int next_dma_buf; /* which DMA buffer will be used next round */
- long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
- unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
+ struct comedi_isadma *dma;
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
unsigned int divisor1;
unsigned int divisor2;
@@ -149,63 +142,27 @@ static void pcl816_start_pacer(struct comedi_device *dev, bool load_counters)
}
static void pcl816_ai_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl816_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int dma_flags;
- unsigned int bytes;
-
- bytes = devpriv->hwdmasize;
- if (cmd->stop_src == TRIG_COUNT) {
- /* how many */
- bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
-
- /* how many DMA pages we must fill */
- devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
-
- /* on last dma transfer must be moved */
- devpriv->last_dma_run = bytes % devpriv->hwdmasize;
- devpriv->dma_runs_to_end--;
- if (devpriv->dma_runs_to_end >= 0)
- bytes = devpriv->hwdmasize;
- } else
- devpriv->dma_runs_to_end = -1;
-
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, bytes);
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
-}
-
-static void pcl816_ai_setup_next_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
+ struct comedi_subdevice *s,
+ unsigned int unread_samples)
{
struct pcl816_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned long dma_flags;
-
- disable_dma(devpriv->dma);
- if (devpriv->dma_runs_to_end > -1 || cmd->stop_src == TRIG_NONE) {
- /* switch dma bufs */
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- set_dma_addr(devpriv->dma,
- devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->dma_runs_to_end)
- set_dma_count(devpriv->dma, devpriv->hwdmasize);
- else
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
+ unsigned int max_samples = comedi_bytes_to_samples(s, desc->maxsize);
+ unsigned int nsamples;
+
+ comedi_isadma_disable(dma->chan);
+
+ /*
+ * Determine dma size based on the buffer maxsize plus the number of
+ * unread samples and the number of samples remaining in the command.
+ */
+ nsamples = comedi_nsamples_left(s, max_samples + unread_samples);
+ if (nsamples > unread_samples) {
+ nsamples -= unread_samples;
+ desc->size = comedi_samples_to_bytes(s, nsamples);
+ comedi_isadma_program(desc);
}
-
- devpriv->dma_runs_to_end--;
}
static void pcl816_ai_set_chan_range(struct comedi_device *dev,
@@ -318,9 +275,10 @@ static irqreturn_t pcl816_interrupt(int irq, void *d)
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
struct pcl816_private *devpriv = dev->private;
- unsigned short *ptr;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
+ unsigned int nsamples;
unsigned int bufptr;
- unsigned int len;
if (!dev->attached || !devpriv->ai_cmd_running) {
pcl816_ai_clear_eoc(dev);
@@ -333,15 +291,16 @@ static irqreturn_t pcl816_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
- ptr = (unsigned short *)devpriv->dmabuf[devpriv->next_dma_buf];
-
- pcl816_ai_setup_next_dma(dev, s);
-
- len = (devpriv->hwdmasize >> 1) - devpriv->ai_poll_ptr;
+ nsamples = comedi_bytes_to_samples(s, desc->size) -
+ devpriv->ai_poll_ptr;
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
- transfer_from_dma_buf(dev, s, ptr, bufptr, len);
+ /* restart dma with the next buffer */
+ dma->cur_dma = 1 - dma->cur_dma;
+ pcl816_ai_setup_dma(dev, s, nsamples);
+
+ transfer_from_dma_buf(dev, s, desc->virt_addr, bufptr, nsamples);
pcl816_ai_clear_eoc(dev);
@@ -483,6 +442,7 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl816_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int ctrl;
unsigned int seglen;
@@ -502,7 +462,9 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_poll_ptr = 0;
devpriv->ai_cmd_canceled = 0;
- pcl816_ai_setup_dma(dev, s);
+ /* setup and enable dma for the first buffer */
+ dma->cur_dma = 0;
+ pcl816_ai_setup_dma(dev, s, 0);
pcl816_start_pacer(dev, true);
@@ -513,7 +475,8 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
ctrl |= PCL816_CTRL_EXT_TRIG;
outb(ctrl, dev->iobase + PCL816_CTRL_REG);
- outb((devpriv->dma << 4) | dev->irq, dev->iobase + PCL816_STATUS_REG);
+ outb((dma->chan << 4) | dev->irq,
+ dev->iobase + PCL816_STATUS_REG);
return 0;
}
@@ -521,42 +484,34 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl816_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc;
unsigned long flags;
- unsigned int top1, top2, i;
+ unsigned int poll;
+ int ret;
spin_lock_irqsave(&dev->spinlock, flags);
- for (i = 0; i < 20; i++) {
- top1 = get_dma_residue(devpriv->dma); /* where is now DMA */
- top2 = get_dma_residue(devpriv->dma);
- if (top1 == top2)
- break;
- }
- if (top1 != top2) {
- spin_unlock_irqrestore(&dev->spinlock, flags);
- return 0;
- }
-
- /* where is now DMA in buffer */
- top1 = devpriv->hwdmasize - top1;
- top1 >>= 1; /* sample position */
- top2 = top1 - devpriv->ai_poll_ptr;
- if (top2 < 1) { /* no new samples */
- spin_unlock_irqrestore(&dev->spinlock, flags);
- return 0;
- }
+ poll = comedi_isadma_poll(dma);
+ poll = comedi_bytes_to_samples(s, poll);
+ if (poll > devpriv->ai_poll_ptr) {
+ desc = &dma->desc[dma->cur_dma];
+ transfer_from_dma_buf(dev, s, desc->virt_addr,
+ devpriv->ai_poll_ptr,
+ poll - devpriv->ai_poll_ptr);
+ /* new buffer position */
+ devpriv->ai_poll_ptr = poll;
- transfer_from_dma_buf(dev, s,
- (unsigned short *)devpriv->dmabuf[devpriv->
- next_dma_buf],
- devpriv->ai_poll_ptr, top2);
+ comedi_handle_events(dev, s);
- devpriv->ai_poll_ptr = top1; /* new buffer position */
+ ret = comedi_buf_n_bytes_ready(s);
+ } else {
+ /* no new samples */
+ ret = 0;
+ }
spin_unlock_irqrestore(&dev->spinlock, flags);
- comedi_handle_events(dev, s);
-
- return comedi_buf_n_bytes_ready(s);
+ return ret;
}
static int pcl816_ai_cancel(struct comedi_device *dev,
@@ -657,13 +612,44 @@ static void pcl816_reset(struct comedi_device *dev)
outb(0, dev->iobase + PCL816_DO_DI_MSB_REG);
}
+static void pcl816_alloc_irq_and_dma(struct comedi_device *dev,
+ struct comedi_devconfig *it)
+{
+ struct pcl816_private *devpriv = dev->private;
+ unsigned int irq_num = it->options[1];
+ unsigned int dma_chan = it->options[2];
+
+ /* only IRQs 2-7 and DMA channels 3 and 1 are valid */
+ if (!(irq_num >= 2 && irq_num <= 7) ||
+ !(dma_chan == 3 || dma_chan == 1))
+ return;
+
+ if (request_irq(irq_num, pcl816_interrupt, 0, dev->board_name, dev))
+ return;
+
+ /* DMA uses two 16K buffers */
+ devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
+ PAGE_SIZE * 4, COMEDI_ISADMA_READ);
+ if (!devpriv->dma)
+ free_irq(irq_num, dev);
+ else
+ dev->irq = irq_num;
+}
+
+static void pcl816_free_dma(struct comedi_device *dev)
+{
+ struct pcl816_private *devpriv = dev->private;
+
+ if (devpriv)
+ comedi_isadma_free(devpriv->dma);
+}
+
static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl816_board *board = dev->board_ptr;
struct pcl816_private *devpriv;
struct comedi_subdevice *s;
int ret;
- int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -673,39 +659,8 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- /* we can use IRQ 2-7 for async command support */
- if (it->options[1] >= 2 && it->options[1] <= 7) {
- ret = request_irq(it->options[1], pcl816_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- /* we need an IRQ to do DMA on channel 3 or 1 */
- if (dev->irq && (it->options[2] == 3 || it->options[2] == 1)) {
- ret = request_dma(it->options[2], dev->board_name);
- if (ret) {
- dev_err(dev->class_dev,
- "unable to request DMA channel %d\n",
- it->options[2]);
- return -EBUSY;
- }
- devpriv->dma = it->options[2];
-
- devpriv->dmapages = 2; /* we need 16KB */
- devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
-
- for (i = 0; i < 2; i++) {
- unsigned long dmabuf;
-
- dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
- if (!dmabuf)
- return -ENOMEM;
-
- devpriv->dmabuf[i] = dmabuf;
- devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
- }
- }
+ /* an IRQ and DMA are required to support async commands */
+ pcl816_alloc_irq_and_dma(dev, it);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
@@ -718,7 +673,7 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = board->ai_maxdata;
s->range_table = &range_pcl816;
s->insn_read = pcl816_ai_insn_read;
- if (devpriv->dma) {
+ if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = board->ai_chanlist;
@@ -764,18 +719,11 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static void pcl816_detach(struct comedi_device *dev)
{
- struct pcl816_private *devpriv = dev->private;
-
if (dev->private) {
pcl816_ai_cancel(dev, dev->read_subdev);
pcl816_reset(dev);
- if (devpriv->dma)
- free_dma(devpriv->dma);
- if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages);
- if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages);
}
+ pcl816_free_dma(dev);
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 8edea35532a9..7e4cdea5fe59 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -1,112 +1,105 @@
/*
- comedi/drivers/pcl818.c
-
- Author: Michal Dobes <dobes@tesnet.cz>
-
- hardware driver for Advantech cards:
- card: PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818, PCL-718
- driver: pcl818l, pcl818h, pcl818hd, pcl818hg, pcl818, pcl718
-*/
-/*
-Driver: pcl818
-Description: Advantech PCL-818 cards, PCL-718
-Author: Michal Dobes <dobes@tesnet.cz>
-Devices: [Advantech] PCL-818L (pcl818l), PCL-818H (pcl818h),
- PCL-818HD (pcl818hd), PCL-818HG (pcl818hg), PCL-818 (pcl818),
- PCL-718 (pcl718)
-Status: works
-
-All cards have 16 SE/8 DIFF ADCs, one or two DACs, 16 DI and 16 DO.
-Differences are only at maximal sample speed, range list and FIFO
-support.
-The driver support AI mode 0, 1, 3 other subdevices (AO, DI, DO) support
-only mode 0. If DMA/FIFO/INT are disabled then AI support only mode 0.
-PCL-818HD and PCL-818HG support 1kword FIFO. Driver support this FIFO
-but this code is untested.
-A word or two about DMA. Driver support DMA operations at two ways:
-1) DMA uses two buffers and after one is filled then is generated
- INT and DMA restart with second buffer. With this mode I'm unable run
- more that 80Ksamples/secs without data dropouts on K6/233.
-2) DMA uses one buffer and run in autoinit mode and the data are
- from DMA buffer moved on the fly with 2kHz interrupts from RTC.
- This mode is used if the interrupt 8 is available for allocation.
- If not, then first DMA mode is used. With this I can run at
- full speed one card (100ksamples/secs) or two cards with
- 60ksamples/secs each (more is problem on account of ISA limitations).
- To use this mode you must have compiled kernel with disabled
- "Enhanced Real Time Clock Support".
- Maybe you can have problems if you use xntpd or similar.
- If you've data dropouts with DMA mode 2 then:
- a) disable IDE DMA
- b) switch text mode console to fb.
-
- Options for PCL-818L:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- [2] - DMA (0=disable, 1, 3)
- [3] - 0, 10=10MHz clock for 8254
- 1= 1MHz clock for 8254
- [4] - 0, 5=A/D input -5V.. +5V
- 1, 10=A/D input -10V..+10V
- [5] - 0, 5=D/A output 0-5V (internal reference -5V)
- 1, 10=D/A output 0-10V (internal reference -10V)
- 2 =D/A output unknown (external reference)
-
- Options for PCL-818, PCL-818H:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- [2] - DMA (0=disable, 1, 3)
- [3] - 0, 10=10MHz clock for 8254
- 1= 1MHz clock for 8254
- [4] - 0, 5=D/A output 0-5V (internal reference -5V)
- 1, 10=D/A output 0-10V (internal reference -10V)
- 2 =D/A output unknown (external reference)
-
- Options for PCL-818HD, PCL-818HG:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- [2] - DMA/FIFO (-1=use FIFO, 0=disable both FIFO and DMA,
- 1=use DMA ch 1, 3=use DMA ch 3)
- [3] - 0, 10=10MHz clock for 8254
- 1= 1MHz clock for 8254
- [4] - 0, 5=D/A output 0-5V (internal reference -5V)
- 1, 10=D/A output 0-10V (internal reference -10V)
- 2 =D/A output unknown (external reference)
-
- Options for PCL-718:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- [2] - DMA (0=disable, 1, 3)
- [3] - 0, 10=10MHz clock for 8254
- 1= 1MHz clock for 8254
- [4] - 0=A/D Range is +/-10V
- 1= +/-5V
- 2= +/-2.5V
- 3= +/-1V
- 4= +/-0.5V
- 5= user defined bipolar
- 6= 0-10V
- 7= 0-5V
- 8= 0-2V
- 9= 0-1V
- 10= user defined unipolar
- [5] - 0, 5=D/A outputs 0-5V (internal reference -5V)
- 1, 10=D/A outputs 0-10V (internal reference -10V)
- 2=D/A outputs unknown (external reference)
- [6] - 0, 60=max 60kHz A/D sampling
- 1,100=max 100kHz A/D sampling (PCL-718 with Option 001 installed)
-
-*/
+ * comedi/drivers/pcl818.c
+ *
+ * Driver: pcl818
+ * Description: Advantech PCL-818 cards, PCL-718
+ * Author: Michal Dobes <dobes@tesnet.cz>
+ * Devices: [Advantech] PCL-818L (pcl818l), PCL-818H (pcl818h),
+ * PCL-818HD (pcl818hd), PCL-818HG (pcl818hg), PCL-818 (pcl818),
+ * PCL-718 (pcl718)
+ * Status: works
+ *
+ * All cards have 16 SE/8 DIFF ADCs, one or two DACs, 16 DI and 16 DO.
+ * Differences are only at maximal sample speed, range list and FIFO
+ * support.
+ * The driver support AI mode 0, 1, 3 other subdevices (AO, DI, DO) support
+ * only mode 0. If DMA/FIFO/INT are disabled then AI support only mode 0.
+ * PCL-818HD and PCL-818HG support 1kword FIFO. Driver support this FIFO
+ * but this code is untested.
+ * A word or two about DMA. Driver support DMA operations at two ways:
+ * 1) DMA uses two buffers and after one is filled then is generated
+ * INT and DMA restart with second buffer. With this mode I'm unable run
+ * more that 80Ksamples/secs without data dropouts on K6/233.
+ * 2) DMA uses one buffer and run in autoinit mode and the data are
+ * from DMA buffer moved on the fly with 2kHz interrupts from RTC.
+ * This mode is used if the interrupt 8 is available for allocation.
+ * If not, then first DMA mode is used. With this I can run at
+ * full speed one card (100ksamples/secs) or two cards with
+ * 60ksamples/secs each (more is problem on account of ISA limitations).
+ * To use this mode you must have compiled kernel with disabled
+ * "Enhanced Real Time Clock Support".
+ * Maybe you can have problems if you use xntpd or similar.
+ * If you've data dropouts with DMA mode 2 then:
+ * a) disable IDE DMA
+ * b) switch text mode console to fb.
+ *
+ * Options for PCL-818L:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
+ * [2] - DMA (0=disable, 1, 3)
+ * [3] - 0, 10=10MHz clock for 8254
+ * 1= 1MHz clock for 8254
+ * [4] - 0, 5=A/D input -5V.. +5V
+ * 1, 10=A/D input -10V..+10V
+ * [5] - 0, 5=D/A output 0-5V (internal reference -5V)
+ * 1, 10=D/A output 0-10V (internal reference -10V)
+ * 2 =D/A output unknown (external reference)
+ *
+ * Options for PCL-818, PCL-818H:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
+ * [2] - DMA (0=disable, 1, 3)
+ * [3] - 0, 10=10MHz clock for 8254
+ * 1= 1MHz clock for 8254
+ * [4] - 0, 5=D/A output 0-5V (internal reference -5V)
+ * 1, 10=D/A output 0-10V (internal reference -10V)
+ * 2 =D/A output unknown (external reference)
+ *
+ * Options for PCL-818HD, PCL-818HG:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
+ * [2] - DMA/FIFO (-1=use FIFO, 0=disable both FIFO and DMA,
+ * 1=use DMA ch 1, 3=use DMA ch 3)
+ * [3] - 0, 10=10MHz clock for 8254
+ * 1= 1MHz clock for 8254
+ * [4] - 0, 5=D/A output 0-5V (internal reference -5V)
+ * 1, 10=D/A output 0-10V (internal reference -10V)
+ * 2 =D/A output unknown (external reference)
+ *
+ * Options for PCL-718:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
+ * [2] - DMA (0=disable, 1, 3)
+ * [3] - 0, 10=10MHz clock for 8254
+ * 1= 1MHz clock for 8254
+ * [4] - 0=A/D Range is +/-10V
+ * 1= +/-5V
+ * 2= +/-2.5V
+ * 3= +/-1V
+ * 4= +/-0.5V
+ * 5= user defined bipolar
+ * 6= 0-10V
+ * 7= 0-5V
+ * 8= 0-2V
+ * 9= 0-1V
+ * 10= user defined unipolar
+ * [5] - 0, 5=D/A outputs 0-5V (internal reference -5V)
+ * 1, 10=D/A outputs 0-10V (internal reference -10V)
+ * 2=D/A outputs unknown (external reference)
+ * [6] - 0, 60=max 60kHz A/D sampling
+ * 1,100=max 100kHz A/D sampling (PCL-718 with Option 001 installed)
+ *
+ */
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-#include <asm/dma.h>
#include "../comedidev.h"
+#include "comedi_isadma.h"
#include "comedi_fc.h"
#include "8253.h"
@@ -303,20 +296,14 @@ static const struct pcl818_board boardtypes[] = {
};
struct pcl818_private {
- unsigned int dma; /* used DMA, 0=don't use DMA */
- unsigned int dmapages;
- unsigned int hwdmasize;
- unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */
- unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */
- int next_dma_buf; /* which DMA buffer will be used next round */
- long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
- unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
- unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */
+ struct comedi_isadma *dma;
+ /* manimal allowed delay between samples (in us) for actual card */
+ unsigned int ns_min;
int i8253_osc_base; /* 1/frequency of on board oscilator in ns */
- unsigned int act_chanlist[16]; /* MUX setting for actual AI operations */
+ /* MUX setting for actual AI operations */
+ unsigned int act_chanlist[16];
unsigned int act_chanlist_len; /* how long is actual MUX list */
unsigned int act_chanlist_pos; /* actual position in MUX list */
- unsigned int ai_data_len; /* len of data buffer */
unsigned int divisor1;
unsigned int divisor2;
unsigned int usefifo:1;
@@ -340,58 +327,27 @@ static void pcl818_start_pacer(struct comedi_device *dev, bool load_counters)
}
static void pcl818_ai_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int flags;
- unsigned int bytes;
-
- disable_dma(devpriv->dma); /* disable dma */
- bytes = devpriv->hwdmasize;
- if (cmd->stop_src == TRIG_COUNT) {
- bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
- devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
- devpriv->last_dma_run = bytes % devpriv->hwdmasize;
- devpriv->dma_runs_to_end--;
- if (devpriv->dma_runs_to_end >= 0)
- bytes = devpriv->hwdmasize;
- }
-
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, bytes);
- release_dma_lock(flags);
- enable_dma(devpriv->dma);
-}
-
-static void pcl818_ai_setup_next_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
+ struct comedi_subdevice *s,
+ unsigned int unread_samples)
{
struct pcl818_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned long flags;
-
- disable_dma(devpriv->dma);
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- if (devpriv->dma_runs_to_end > -1 || cmd->stop_src == TRIG_NONE) {
- /* switch dma bufs */
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- flags = claim_dma_lock();
- set_dma_addr(devpriv->dma,
- devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->dma_runs_to_end || cmd->stop_src == TRIG_NONE)
- set_dma_count(devpriv->dma, devpriv->hwdmasize);
- else
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- release_dma_lock(flags);
- enable_dma(devpriv->dma);
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
+ unsigned int max_samples = comedi_bytes_to_samples(s, desc->maxsize);
+ unsigned int nsamples;
+
+ comedi_isadma_disable(dma->chan);
+
+ /*
+ * Determine dma size based on the buffer maxsize plus the number of
+ * unread samples and the number of samples remaining in the command.
+ */
+ nsamples = comedi_nsamples_left(s, max_samples + unread_samples);
+ if (nsamples > unread_samples) {
+ nsamples -= unread_samples;
+ desc->size = comedi_samples_to_bytes(s, nsamples);
+ comedi_isadma_program(desc);
}
-
- devpriv->dma_runs_to_end--;
}
static void pcl818_ai_set_chan_range(struct comedi_device *dev,
@@ -493,11 +449,12 @@ static int pcl818_ai_eoc(struct comedi_device *dev,
return -EBUSY;
}
-static bool pcl818_ai_dropout(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan)
+static bool pcl818_ai_write_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan, unsigned int val)
{
struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
unsigned int expected_chan;
expected_chan = devpriv->act_chanlist[devpriv->act_chanlist_pos];
@@ -507,17 +464,11 @@ static bool pcl818_ai_dropout(struct comedi_device *dev,
(devpriv->dma) ? "DMA" :
(devpriv->usefifo) ? "FIFO" : "IRQ",
chan, expected_chan);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- return true;
+ s->async->events |= COMEDI_CB_ERROR;
+ return false;
}
- return false;
-}
-static bool pcl818_ai_next_chan(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
+ comedi_buf_write_samples(s, &val, 1);
devpriv->act_chanlist_pos++;
if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
@@ -540,47 +491,35 @@ static void pcl818_handle_eoc(struct comedi_device *dev,
if (pcl818_ai_eoc(dev, s, NULL, 0)) {
dev_err(dev->class_dev, "A/D mode1/3 IRQ without DRDY!\n");
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ s->async->events |= COMEDI_CB_ERROR;
return;
}
val = pcl818_ai_get_sample(dev, s, &chan);
-
- if (pcl818_ai_dropout(dev, s, chan))
- return;
-
- comedi_buf_write_samples(s, &val, 1);
-
- pcl818_ai_next_chan(dev, s);
+ pcl818_ai_write_sample(dev, s, chan, val);
}
static void pcl818_handle_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl818_private *devpriv = dev->private;
- unsigned short *ptr;
+ struct comedi_isadma *dma = devpriv->dma;
+ struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
+ unsigned short *ptr = desc->virt_addr;
+ unsigned int nsamples = comedi_bytes_to_samples(s, desc->size);
unsigned int chan;
unsigned int val;
- int i, len, bufptr;
-
- pcl818_ai_setup_next_dma(dev, s);
-
- ptr = (unsigned short *)devpriv->dmabuf[1 - devpriv->next_dma_buf];
+ int i;
- len = devpriv->hwdmasize >> 1;
- bufptr = 0;
+ /* restart dma with the next buffer */
+ dma->cur_dma = 1 - dma->cur_dma;
+ pcl818_ai_setup_dma(dev, s, nsamples);
- for (i = 0; i < len; i++) {
- val = ptr[bufptr++];
+ for (i = 0; i < nsamples; i++) {
+ val = ptr[i];
chan = val & 0xf;
val = (val >> 4) & s->maxdata;
-
- if (pcl818_ai_dropout(dev, s, chan))
- break;
-
- comedi_buf_write_samples(s, &val, 1);
-
- if (!pcl818_ai_next_chan(dev, s))
+ if (!pcl818_ai_write_sample(dev, s, chan, val))
break;
}
}
@@ -597,14 +536,14 @@ static void pcl818_handle_fifo(struct comedi_device *dev,
if (status & 4) {
dev_err(dev->class_dev, "A/D mode1/3 FIFO overflow!\n");
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ s->async->events |= COMEDI_CB_ERROR;
return;
}
if (status & 1) {
dev_err(dev->class_dev,
"A/D mode1/3 FIFO interrupt without data!\n");
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ s->async->events |= COMEDI_CB_ERROR;
return;
}
@@ -615,13 +554,7 @@ static void pcl818_handle_fifo(struct comedi_device *dev,
for (i = 0; i < len; i++) {
val = pcl818_ai_get_fifo_sample(dev, s, &chan);
-
- if (pcl818_ai_dropout(dev, s, chan))
- break;
-
- comedi_buf_write_samples(s, &val, 1);
-
- if (!pcl818_ai_next_chan(dev, s))
+ if (!pcl818_ai_write_sample(dev, s, chan, val))
break;
}
}
@@ -687,7 +620,8 @@ static int check_channel_list(struct comedi_device *dev,
break;
nowmustbechan =
(CR_CHAN(chansegment[i - 1]) + 1) % s->n_chan;
- if (nowmustbechan != CR_CHAN(chanlist[i])) { /* channel list isn't continuous :-( */
+ if (nowmustbechan != CR_CHAN(chanlist[i])) {
+ /* channel list isn't continuous :-( */
dev_dbg(dev->class_dev,
"channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
i, CR_CHAN(chanlist[i]), nowmustbechan,
@@ -804,6 +738,7 @@ static int pcl818_ai_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl818_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int ctrl = 0;
unsigned int seglen;
@@ -818,11 +753,9 @@ static int pcl818_ai_cmd(struct comedi_device *dev,
return -EINVAL;
pcl818_ai_setup_chanlist(dev, cmd->chanlist, seglen);
- devpriv->ai_data_len = s->async->prealloc_bufsz;
devpriv->ai_cmd_running = 1;
devpriv->ai_cmd_canceled = 0;
devpriv->act_chanlist_pos = 0;
- devpriv->dma_runs_to_end = 0;
if (cmd->convert_src == TRIG_TIMER)
ctrl |= PCL818_CTRL_PACER_TRIG;
@@ -831,8 +764,10 @@ static int pcl818_ai_cmd(struct comedi_device *dev,
outb(PCL818_CNTENABLE_PACER_ENA, dev->iobase + PCL818_CNTENABLE_REG);
- if (devpriv->dma) {
- pcl818_ai_setup_dma(dev, s);
+ if (dma) {
+ /* setup and enable dma for the first buffer */
+ dma->cur_dma = 0;
+ pcl818_ai_setup_dma(dev, s, 0);
ctrl |= PCL818_CTRL_INTE | PCL818_CTRL_IRQ(dev->irq) |
PCL818_CTRL_DMAE;
@@ -854,12 +789,13 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl818_private *devpriv = dev->private;
+ struct comedi_isadma *dma = devpriv->dma;
struct comedi_cmd *cmd = &s->async->cmd;
if (!devpriv->ai_cmd_running)
return 0;
- if (devpriv->dma) {
+ if (dma) {
if (cmd->stop_src == TRIG_NONE ||
(cmd->stop_src == TRIG_COUNT &&
s->async->scans_done < cmd->stop_arg)) {
@@ -872,7 +808,7 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
return 0;
}
}
- disable_dma(devpriv->dma);
+ comedi_isadma_disable(dma->chan);
}
outb(PCL818_CTRL_DISABLE_TRIG, dev->iobase + PCL818_CTRL_REG);
@@ -1054,13 +990,33 @@ static void pcl818_set_ai_range_table(struct comedi_device *dev,
}
}
+static void pcl818_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
+{
+ struct pcl818_private *devpriv = dev->private;
+
+ /* only DMA channels 3 and 1 are valid */
+ if (!(dma_chan == 3 || dma_chan == 1))
+ return;
+
+ /* DMA uses two 16K buffers */
+ devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
+ PAGE_SIZE * 4, COMEDI_ISADMA_READ);
+}
+
+static void pcl818_free_dma(struct comedi_device *dev)
+{
+ struct pcl818_private *devpriv = dev->private;
+
+ if (devpriv)
+ comedi_isadma_free(devpriv->dma);
+}
+
static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl818_board *board = dev->board_ptr;
struct pcl818_private *devpriv;
struct comedi_subdevice *s;
int ret;
- int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -1084,31 +1040,8 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->usefifo = 1;
/* we need an IRQ to do DMA on channel 3 or 1 */
- if (dev->irq && board->has_dma &&
- (it->options[2] == 3 || it->options[2] == 1)) {
- ret = request_dma(it->options[2], dev->board_name);
- if (ret) {
- dev_err(dev->class_dev,
- "unable to request DMA channel %d\n",
- it->options[2]);
- return -EBUSY;
- }
- devpriv->dma = it->options[2];
-
- devpriv->dmapages = 2; /* we need 16KB */
- devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
-
- for (i = 0; i < 2; i++) {
- unsigned long dmabuf;
-
- dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
- if (!dmabuf)
- return -ENOMEM;
-
- devpriv->dmabuf[i] = dmabuf;
- devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
- }
- }
+ if (dev->irq && board->has_dma)
+ pcl818_alloc_dma(dev, it->options[2]);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
@@ -1194,8 +1127,10 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->ns_min = board->ns_min;
if (!board->is_818) {
- if ((it->options[6] == 1) || (it->options[6] == 100))
- devpriv->ns_min = 10000; /* extended PCL718 to 100kHz DAC */
+ if ((it->options[6] == 1) || (it->options[6] == 100)) {
+ /* extended PCL718 to 100kHz DAC */
+ devpriv->ns_min = 10000;
+ }
}
pcl818_reset(dev);
@@ -1210,13 +1145,8 @@ static void pcl818_detach(struct comedi_device *dev)
if (devpriv) {
pcl818_ai_cancel(dev, dev->read_subdev);
pcl818_reset(dev);
- if (devpriv->dma)
- free_dma(devpriv->dma);
- if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages);
- if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages);
}
+ pcl818_free_dma(dev);
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
index e3ac8ac6190e..12f94fe82f5b 100644
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ b/drivers/staging/comedi/drivers/pcmad.c
@@ -19,8 +19,7 @@
/*
* Driver: pcmad
* Description: Winsystems PCM-A/D12, PCM-A/D16
- * Devices: (Winsystems) PCM-A/D12 [pcmad12]
- * (Winsystems) PCM-A/D16 [pcmad16]
+ * Devices: [Winsystems] PCM-A/D12 (pcmad12), PCM-A/D16 (pcmad16)
* Author: ds
* Status: untested
*
diff --git a/drivers/staging/comedi/drivers/pcmda12.c b/drivers/staging/comedi/drivers/pcmda12.c
index 59108c06cedc..d86c5e2cd0c7 100644
--- a/drivers/staging/comedi/drivers/pcmda12.c
+++ b/drivers/staging/comedi/drivers/pcmda12.c
@@ -19,7 +19,7 @@
/*
* Driver: pcmda12
* Description: A driver for the Winsystems PCM-D/A-12
- * Devices: (Winsystems) PCM-D/A-12 [pcmda12]
+ * Devices: [Winsystems] PCM-D/A-12 (pcmda12)
* Author: Calin Culianu <calin@ajvar.org>
* Updated: Fri, 13 Jan 2006 12:01:01 -0500
* Status: works
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index f0059e935da0..2c0e7ecbf494 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -19,7 +19,7 @@
/*
* Driver: pcmmio
* Description: A driver for the PCM-MIO multifunction board
- * Devices: (Winsystems) PCM-MIO [pcmmio]
+ * Devices: [Winsystems] PCM-MIO (pcmmio)
* Author: Calin Culianu <calin@ajvar.org>
* Updated: Wed, May 16 2007 16:21:10 -0500
* Status: works
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 0f5483b6147f..a1641d981812 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -19,8 +19,7 @@
/*
* Driver: pcmuio
* Description: Winsystems PC-104 based 48/96-channel DIO boards.
- * Devices: (Winsystems) PCM-UIO48A [pcmuio48]
- * (Winsystems) PCM-UIO96A [pcmuio96]
+ * Devices: [Winsystems] PCM-UIO48A (pcmuio48), PCM-UIO96A (pcmuio96)
* Author: Calin Culianu <calin@ajvar.org>
* Updated: Fri, 13 Jan 2006 12:01:01 -0500
* Status: works
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 96098110b0b3..8387fd0e4b7e 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -48,15 +48,10 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
*/
#include <linux/module.h>
-#include "../comedidev.h"
#include <linux/semaphore.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
#include <linux/completion.h>
+#include "../comedi_pcmcia.h"
#include "comedi_fc.h"
struct daqp_private {
@@ -210,8 +205,7 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
unsigned short data;
if (status & DAQP_STATUS_DATA_LOST) {
- s->async->events |=
- COMEDI_CB_EOA | COMEDI_CB_OVERFLOW;
+ s->async->events |= COMEDI_CB_OVERFLOW;
dev_warn(dev->class_dev, "data lost\n");
break;
}
@@ -239,7 +233,7 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
if (loop_limit <= 0) {
dev_warn(dev->class_dev,
"loop_limit reached in daqp_interrupt()\n");
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ s->async->events |= COMEDI_CB_ERROR;
}
comedi_handle_events(dev, s);
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 581aa58d9c0a..c94ad12ed446 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -19,10 +19,8 @@
/*
* Driver: rtd520
* Description: Real Time Devices PCI4520/DM7520
- * Devices: (Real Time Devices) DM7520HR-1 [DM7520]
- * (Real Time Devices) DM7520HR-8 [DM7520]
- * (Real Time Devices) PCI4520 [PCI4520]
- * (Real Time Devices) PCI4520-8 [PCI4520]
+ * Devices: [Real Time Devices] DM7520HR-1 (DM7520), DM7520HR-8,
+ * PCI4520 (PCI4520), PCI4520-8
* Author: Dan Christian
* Status: Works. Only tested on DM7520-8. Not SMP safe.
*
@@ -1014,10 +1012,8 @@ static int rtd_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
readw(dev->mmio + LAS0_CLEAR);
/* TODO: allow multiple interrupt sources */
- if (devpriv->xfer_count > 0) /* transfer every N samples */
- writew(IRQM_ADC_ABOUT_CNT, dev->mmio + LAS0_IT);
- else /* 1/2 FIFO transfers */
- writew(IRQM_ADC_ABOUT_CNT, dev->mmio + LAS0_IT);
+ /* transfer every N samples */
+ writew(IRQM_ADC_ABOUT_CNT, dev->mmio + LAS0_IT);
/* BUG: start_src is ASSUMED to be TRIG_NOW */
/* BUG? it seems like things are running before the "start" */
@@ -1031,8 +1027,6 @@ static int rtd_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct rtd_private *devpriv = dev->private;
- u32 overrun;
- u16 status;
/* pacer stop source: SOFTWARE */
writel(0, dev->mmio + LAS0_PACER_STOP);
@@ -1040,8 +1034,6 @@ static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
writel(0, dev->mmio + LAS0_ADC_CONVERSION);
writew(0, dev->mmio + LAS0_IT);
devpriv->ai_count = 0; /* stop and don't transfer any more */
- status = readw(dev->mmio + LAS0_IT);
- overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index 67b4b378bd01..340ac776e951 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -19,8 +19,7 @@
/*
* Driver: rti800
* Description: Analog Devices RTI-800/815
- * Devices: (Analog Devices) RTI-800 [rti800]
- * (Analog Devices) RTI-815 [rti815]
+ * Devices: [Analog Devices] RTI-800 (rti800), RTI-815 (rti815)
* Author: David A. Schleef <ds@schleef.org>
* Status: unknown
* Updated: Fri, 05 Sep 2008 14:50:44 +0100
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index 96c3974207ae..6db58fcfd496 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -20,7 +20,7 @@
* Driver: rti802
* Description: Analog Devices RTI-802
* Author: Anders Blomdell <anders.blomdell@control.lth.se>
- * Devices: (Analog Devices) RTI-802 [rti802]
+ * Devices: [Analog Devices] RTI-802 (rti802)
* Status: works
*
* Configuration Options:
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 14932c5f3798..fc497dd92021 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -118,7 +118,7 @@ static void s626_mc_enable(struct comedi_device *dev,
static void s626_mc_disable(struct comedi_device *dev,
unsigned int cmd, unsigned int reg)
{
- writel(cmd << 16 , dev->mmio + reg);
+ writel(cmd << 16, dev->mmio + reg);
mmiowb();
}
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 4737dbf8e01d..1cd7403a4e9c 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -16,10 +16,11 @@
/*
* Driver: usbdux
* Description: University of Stirling USB DAQ & INCITE Technology Limited
- * Devices: (ITL) USB-DUX [usbdux]
+ * Devices: [ITL] USB-DUX (usbdux)
* Author: Bernd Porr <mail@berndporr.me.uk>
* Updated: 10 Oct 2014
* Status: Stable
+ *
* Connection scheme for the counter at the digital port:
* 0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1.
* The sampling rate of the counter is approximately 500Hz.
@@ -79,11 +80,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
-#include "../comedidev.h"
+#include "../comedi_usb.h"
#include "comedi_fc.h"
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index ddc4cb9d5ed4..7ce27c16c2f9 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -15,7 +15,7 @@
/*
* Driver: usbduxfast
* Description: University of Stirling USB DAQ & INCITE Technology Limited
- * Devices: (ITL) USB-DUX [usbduxfast]
+ * Devices: [ITL] USB-DUX-FAST (usbduxfast)
* Author: Bernd Porr <mail@berndporr.me.uk>
* Updated: 10 Oct 2014
* Status: stable
@@ -46,11 +46,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
#include "comedi_fc.h"
-#include "../comedidev.h"
+#include "../comedi_usb.h"
/*
* timeout for the USB-transfer
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index dc19435b6520..394969b7458c 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -16,7 +16,7 @@
/*
* Driver: usbduxsigma
* Description: University of Stirling USB DAQ & INCITE Technology Limited
- * Devices: (ITL) USB-DUX [usbduxsigma]
+ * Devices: [ITL] USB-DUX-SIGMA (usbduxsigma)
* Author: Bernd Porr <mail@berndporr.me.uk>
* Updated: 10 Oct 2014
* Status: stable
@@ -45,13 +45,12 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
#include <asm/unaligned.h>
#include "comedi_fc.h"
-#include "../comedidev.h"
+#include "../comedi_usb.h"
/* timeout for the USB-transfer in ms*/
#define BULK_TIMEOUT 1000
@@ -215,7 +214,6 @@ static void usbduxsigma_ai_handle_urb(struct comedi_device *dev,
struct usbduxsigma_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int dio_state;
uint32_t val;
int ret;
int i;
@@ -224,9 +222,6 @@ static void usbduxsigma_ai_handle_urb(struct comedi_device *dev,
if (devpriv->ai_counter == 0) {
devpriv->ai_counter = devpriv->ai_timer;
- /* get the state of the dio pins to allow external trigger */
- dio_state = be32_to_cpu(devpriv->in_buf[0]);
-
/* get the data from the USB bus and hand it over to comedi */
for (i = 0; i < cmd->chanlist_len; i++) {
/* transfer data, note first byte is the DIO state */
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index a19a56ee0eef..e37118321a27 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -18,21 +18,22 @@
GNU General Public License for more details.
*/
/*
-Driver: vmk80xx
-Description: Velleman USB Board Low-Level Driver
-Devices: K8055/K8061 aka VM110/VM140
-Author: Manuel Gebele <forensixs@gmx.de>
-Updated: Sun, 10 May 2009 11:14:59 +0200
-Status: works
-
-Supports:
- - analog input
- - analog output
- - digital input
- - digital output
- - counter
- - pwm
-*/
+ * Driver: vmk80xx
+ * Description: Velleman USB Board Low-Level Driver
+ * Devices: [Velleman] K8055 (K8055/VM110), K8061 (K8061/VM140),
+ * VM110 (K8055/VM110), VM140 (K8061/VM140)
+ * Author: Manuel Gebele <forensixs@gmx.de>
+ * Updated: Sun, 10 May 2009 11:14:59 +0200
+ * Status: works
+ *
+ * Supports:
+ * - analog input
+ * - analog output
+ * - digital input
+ * - digital output
+ * - counter
+ * - pwm
+ */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -41,10 +42,9 @@ Supports:
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/poll.h>
-#include <linux/usb.h>
#include <linux/uaccess.h>
-#include "../comedidev.h"
+#include "../comedi_usb.h"
enum {
DEVICE_VMK8055,
@@ -550,41 +550,35 @@ static int vmk80xx_cnt_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
- unsigned int insn_cmd;
- int chan;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int cmd;
int reg;
- int n;
-
- insn_cmd = data[0];
- if (insn_cmd != INSN_CONFIG_RESET && insn_cmd != GPCT_RESET)
- return -EINVAL;
+ int ret;
down(&devpriv->limit_sem);
-
- chan = CR_CHAN(insn->chanspec);
-
- if (devpriv->model == VMK8055_MODEL) {
- if (!chan) {
- cmd = VMK8055_CMD_RST_CNT1;
- reg = VMK8055_CNT1_REG;
+ switch (data[0]) {
+ case INSN_CONFIG_RESET:
+ if (devpriv->model == VMK8055_MODEL) {
+ if (!chan) {
+ cmd = VMK8055_CMD_RST_CNT1;
+ reg = VMK8055_CNT1_REG;
+ } else {
+ cmd = VMK8055_CMD_RST_CNT2;
+ reg = VMK8055_CNT2_REG;
+ }
+ devpriv->usb_tx_buf[reg] = 0x00;
} else {
- cmd = VMK8055_CMD_RST_CNT2;
- reg = VMK8055_CNT2_REG;
+ cmd = VMK8061_CMD_RST_CNT;
}
-
- devpriv->usb_tx_buf[reg] = 0x00;
- } else {
- cmd = VMK8061_CMD_RST_CNT;
+ ret = vmk80xx_write_packet(dev, cmd);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- for (n = 0; n < insn->n; n++)
- if (vmk80xx_write_packet(dev, cmd))
- break;
-
up(&devpriv->limit_sem);
- return n;
+ return ret ? ret : insn->n;
}
static int vmk80xx_cnt_insn_write(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/z8536.h b/drivers/staging/comedi/drivers/z8536.h
new file mode 100644
index 000000000000..7be53109cc8d
--- /dev/null
+++ b/drivers/staging/comedi/drivers/z8536.h
@@ -0,0 +1,202 @@
+/*
+ * Z8536 CIO Internal registers
+ */
+
+#ifndef _Z8536_H
+#define _Z8536_H
+
+/* Master Interrupt Control register */
+#define Z8536_INT_CTRL_REG 0x00
+#define Z8536_INT_CTRL_MIE BIT(7) /* Master Interrupt Enable */
+#define Z8536_INT_CTRL_DLC BIT(6) /* Disable Lower Chain */
+#define Z8536_INT_CTRL_NV BIT(5) /* No Vector */
+#define Z8536_INT_CTRL_PA_VIS BIT(4) /* Port A Vect Inc Status */
+#define Z8536_INT_CTRL_PB_VIS BIT(3) /* Port B Vect Inc Status */
+#define Z8536_INT_CTRL_VT_VIS BIT(2) /* C/T Vect Inc Status */
+#define Z8536_INT_CTRL_RJA BIT(1) /* Right Justified Addresses */
+#define Z8536_INT_CTRL_RESET BIT(0) /* Reset */
+
+/* Master Configuration Control register */
+#define Z8536_CFG_CTRL_REG 0x01
+#define Z8536_CFG_CTRL_PBE BIT(7) /* Port B Enable */
+#define Z8536_CFG_CTRL_CT1E BIT(6) /* C/T 1 Enable */
+#define Z8536_CFG_CTRL_CT2E BIT(5) /* C/T 2 Enable */
+#define Z8536_CFG_CTRL_PCE_CT3E BIT(4) /* Port C & C/T 3 Enable */
+#define Z8536_CFG_CTRL_PLC BIT(3) /* Port A/B Link Control */
+#define Z8536_CFG_CTRL_PAE BIT(2) /* Port A Enable */
+#define Z8536_CFG_CTRL_LC_INDEP (0 << 0)/* C/Ts Independent */
+#define Z8536_CFG_CTRL_LC_GATE (1 << 0)/* C/T 1 Out Gates C/T 2 */
+#define Z8536_CFG_CTRL_LC_TRIG (2 << 0)/* C/T 1 Out Triggers C/T 2 */
+#define Z8536_CFG_CTRL_LC_CLK (3 << 0)/* C/T 1 Out Clocks C/T 2 */
+#define Z8536_CFG_CTRL_LC_MASK (3 << 0)/* C/T Link Control mask */
+
+/* Interrupt Vector registers */
+#define Z8536_PA_INT_VECT_REG 0x02
+#define Z8536_PB_INT_VECT_REG 0x03
+#define Z8536_CT_INT_VECT_REG 0x04
+#define Z8536_CURR_INT_VECT_REG 0x1f
+
+/* Port A/B & Counter/Timer 1/2/3 Command and Status registers */
+#define Z8536_PA_CMDSTAT_REG 0x08
+#define Z8536_PB_CMDSTAT_REG 0x09
+#define Z8536_CT1_CMDSTAT_REG 0x0a
+#define Z8536_CT2_CMDSTAT_REG 0x0b
+#define Z8536_CT3_CMDSTAT_REG 0x0c
+#define Z8536_CT_CMDSTAT_REG(x) (0x0a + (x))
+#define Z8536_CMD_NULL (0 << 5)/* Null Code */
+#define Z8536_CMD_CLR_IP_IUS (1 << 5)/* Clear IP & IUS */
+#define Z8536_CMD_SET_IUS (2 << 5)/* Set IUS */
+#define Z8536_CMD_CLR_IUS (3 << 5)/* Clear IUS */
+#define Z8536_CMD_SET_IP (4 << 5)/* Set IP */
+#define Z8536_CMD_CLR_IP (5 << 5)/* Clear IP */
+#define Z8536_CMD_SET_IE (6 << 5)/* Set IE */
+#define Z8536_CMD_CLR_IE (7 << 5)/* Clear IE */
+#define Z8536_CMD_MASK (7 << 5)
+
+#define Z8536_STAT_IUS BIT(7) /* Interrupt Under Service */
+#define Z8536_STAT_IE BIT(6) /* Interrupt Enable */
+#define Z8536_STAT_IP BIT(5) /* Interrupt Pending */
+#define Z8536_STAT_ERR BIT(4) /* Interrupt Error */
+#define Z8536_STAT_IE_IP (Z8536_STAT_IE | Z8536_STAT_IP)
+
+#define Z8536_PAB_STAT_ORE BIT(3) /* Output Register Empty */
+#define Z8536_PAB_STAT_IRF BIT(2) /* Input Register Full */
+#define Z8536_PAB_STAT_PMF BIT(1) /* Pattern Match Flag */
+#define Z8536_PAB_CMDSTAT_IOE BIT(0) /* Interrupt On Error */
+
+#define Z8536_CT_CMD_RCC BIT(3) /* Read Counter Control */
+#define Z8536_CT_CMDSTAT_GCB BIT(2) /* Gate Command Bit */
+#define Z8536_CT_CMD_TCB BIT(1) /* Trigger Command Bit */
+#define Z8536_CT_STAT_CIP BIT(0) /* Count In Progress */
+
+/* Port Data registers */
+#define Z8536_PA_DATA_REG 0x0d
+#define Z8536_PB_DATA_REG 0x0e
+#define Z8536_PC_DATA_REG 0x0f
+
+/* Counter/Timer 1/2/3 Current Count registers */
+#define Z8536_CT1_VAL_MSB_REG 0x10
+#define Z8536_CT1_VAL_LSB_REG 0x11
+#define Z8536_CT2_VAL_MSB_REG 0x12
+#define Z8536_CT2_VAL_LSB_REG 0x13
+#define Z8536_CT3_VAL_MSB_REG 0x14
+#define Z8536_CT3_VAL_LSB_REG 0x15
+#define Z8536_CT_VAL_MSB_REG(x) (0x10 + ((x) * 2))
+#define Z8536_CT_VAL_LSB_REG(x) (0x11 + ((x) * 2))
+
+/* Counter/Timer 1/2/3 Time Constant registers */
+#define Z8536_CT1_RELOAD_MSB_REG 0x16
+#define Z8536_CT1_RELOAD_LSB_REG 0x17
+#define Z8536_CT2_RELOAD_MSB_REG 0x18
+#define Z8536_CT2_RELOAD_LSB_REG 0x19
+#define Z8536_CT3_RELOAD_MSB_REG 0x1a
+#define Z8536_CT3_RELOAD_LSB_REG 0x1b
+#define Z8536_CT_RELOAD_MSB_REG(x) (0x16 + ((x) * 2))
+#define Z8536_CT_RELOAD_LSB_REG(x) (0x17 + ((x) * 2))
+
+/* Counter/Timer 1/2/3 Mode Specification registers */
+#define Z8536_CT1_MODE_REG 0x1c
+#define Z8536_CT2_MODE_REG 0x1d
+#define Z8536_CT3_MODE_REG 0x1e
+#define Z8536_CT_MODE_REG(x) (0x1c + (x))
+#define Z8536_CT_MODE_CSC BIT(7) /* Continuous/Single Cycle */
+#define Z8536_CT_MODE_EOE BIT(6) /* External Output Enable */
+#define Z8536_CT_MODE_ECE BIT(5) /* External Count Enable */
+#define Z8536_CT_MODE_ETE BIT(4) /* External Trigger Enable */
+#define Z8536_CT_MODE_EGE BIT(3) /* External Gate Enable */
+#define Z8536_CT_MODE_REB BIT(2) /* Retrigger Enable Bit */
+#define Z8536_CT_MODE_DCS_PULSE (0 << 0)/* Duty Cycle - Pulse */
+#define Z8536_CT_MODE_DCS_ONESHOT (1 << 0)/* Duty Cycle - One-Shot */
+#define Z8536_CT_MODE_DCS_SQRWAVE (2 << 0)/* Duty Cycle - Square Wave */
+#define Z8536_CT_MODE_DCS_DO_NOT_USE (3 << 0)/* Duty Cycle - Do Not Use */
+#define Z8536_CT_MODE_DCS_MASK (3 << 0)/* Duty Cycle mask */
+
+/* Port A/B Mode Specification registers */
+#define Z8536_PA_MODE_REG 0x20
+#define Z8536_PB_MODE_REG 0x28
+#define Z8536_PAB_MODE_PTS_BIT (0 << 6)/* Bit Port */
+#define Z8536_PAB_MODE_PTS_INPUT (1 << 6)/* Input Port */
+#define Z8536_PAB_MODE_PTS_OUTPUT (2 << 6)/* Output Port */
+#define Z8536_PAB_MODE_PTS_BIDIR (3 << 6)/* Bidirectional Port */
+#define Z8536_PAB_MODE_PTS_MASK (3 << 6)/* Port Type Select mask */
+#define Z8536_PAB_MODE_ITB BIT(5) /* Interrupt on Two Bytes */
+#define Z8536_PAB_MODE_SB BIT(4) /* Single Buffered mode */
+#define Z8536_PAB_MODE_IMO BIT(3) /* Interrupt on Match Only */
+#define Z8536_PAB_MODE_PMS_DISABLE (0 << 1)/* Disable Pattern Match */
+#define Z8536_PAB_MODE_PMS_AND (1 << 1)/* "AND" mode */
+#define Z8536_PAB_MODE_PMS_OR (2 << 1)/* "OR" mode */
+#define Z8536_PAB_MODE_PMS_OR_PEV (3 << 1)/* "OR-Priority" mode */
+#define Z8536_PAB_MODE_PMS_MASK (3 << 1)/* Pattern Mode mask */
+#define Z8536_PAB_MODE_LPM BIT(0) /* Latch on Pattern Match */
+#define Z8536_PAB_MODE_DTE BIT(0) /* Deskew Timer Enabled */
+
+/* Port A/B Handshake Specification registers */
+#define Z8536_PA_HANDSHAKE_REG 0x21
+#define Z8536_PB_HANDSHAKE_REG 0x29
+#define Z8536_PAB_HANDSHAKE_HST_INTER (0 << 6)/* Interlocked Handshake */
+#define Z8536_PAB_HANDSHAKE_HST_STROBED (1 << 6)/* Strobed Handshake */
+#define Z8536_PAB_HANDSHAKE_HST_PULSED (2 << 6)/* Pulsed Handshake */
+#define Z8536_PAB_HANDSHAKE_HST_3WIRE (3 << 6)/* Three-Wire Handshake */
+#define Z8536_PAB_HANDSHAKE_HST_MASK (3 << 6)/* Handshake Type mask */
+#define Z8536_PAB_HANDSHAKE_RWS_DISABLE (0 << 3)/* Req/Wait Disabled */
+#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT (1 << 3)/* Output Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_INWAIT (3 << 3)/* Input Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_SPREQ (4 << 3)/* Special Request */
+#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ (5 << 4)/* Output Request */
+#define Z8536_PAB_HANDSHAKE_RWS_INREQ (7 << 3)/* Input Request */
+#define Z8536_PAB_HANDSHAKE_RWS_MASK (7 << 3)/* Req/Wait mask */
+#define Z8536_PAB_HANDSHAKE_DESKEW(x) ((x) << 0)/* Deskew Time */
+#define Z8536_PAB_HANDSHAKE_DESKEW_MASK (3 << 0)/* Deskew Time mask */
+
+/*
+ * Port A/B/C Data Path Polarity registers
+ *
+ * 0 = Non-Inverting
+ * 1 = Inverting
+ */
+#define Z8536_PA_DPP_REG 0x22
+#define Z8536_PB_DPP_REG 0x2a
+#define Z8536_PC_DPP_REG 0x05
+
+/*
+ * Port A/B/C Data Direction registers
+ *
+ * 0 = Output bit
+ * 1 = Input bit
+ */
+#define Z8536_PA_DD_REG 0x23
+#define Z8536_PB_DD_REG 0x2b
+#define Z8536_PC_DD_REG 0x06
+
+/*
+ * Port A/B/C Special I/O Control registers
+ *
+ * 0 = Normal Input or Output
+ * 1 = Output with open drain or Input with 1's catcher
+ */
+#define Z8536_PA_SIO_REG 0x24
+#define Z8536_PB_SIO_REG 0x2c
+#define Z8536_PC_SIO_REG 0x07
+
+/*
+ * Port A/B Pattern Polarity/Transition/Mask registers
+ *
+ * PM PT PP Pattern Specification
+ * -- -- -- -------------------------------------
+ * 0 0 x Bit masked off
+ * 0 1 x Any transition
+ * 1 0 0 Zero (low-level)
+ * 1 0 1 One (high-level)
+ * 1 1 0 One-to-zero transition (falling-edge)
+ * 1 1 1 Zero-to-one transition (rising-edge)
+ */
+#define Z8536_PA_PP_REG 0x25
+#define Z8536_PB_PP_REG 0x2d
+
+#define Z8536_PA_PT_REG 0x26
+#define Z8536_PB_PT_REG 0x2e
+
+#define Z8536_PA_PM_REG 0x27
+#define Z8536_PB_PM_REG 0x2f
+
+#endif /* _Z8536_H */
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index 8777f958c041..973f544e85e1 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -95,7 +95,7 @@ static int comedi_do_insn(struct comedi_device *dev,
if (s->type == COMEDI_SUBD_UNUSED) {
dev_err(dev->class_dev,
- "%d not useable subdevice\n", insn->subdev);
+ "%d not usable subdevice\n", insn->subdev);
ret = -EIO;
goto error;
}
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 9a1dc56f21d1..6a393b24bdd9 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -1,20 +1,20 @@
/*
- module/range.c
- comedi routines for voltage ranges
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/range.c
+ * comedi routines for voltage ranges
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#include <linux/uaccess.h>
#include "comedidev.h"
@@ -42,18 +42,18 @@ const struct comedi_lrange range_unknown = { 1, {{0, 1000000, UNIT_none} } };
EXPORT_SYMBOL_GPL(range_unknown);
/*
- COMEDI_RANGEINFO
- range information ioctl
-
- arg:
- pointer to rangeinfo structure
-
- reads:
- range info structure
-
- writes:
- n struct comedi_krange structures to rangeinfo->range_ptr
-*/
+ * COMEDI_RANGEINFO ioctl
+ * range information
+ *
+ * arg:
+ * pointer to comedi_rangeinfo structure
+ *
+ * reads:
+ * comedi_rangeinfo structure
+ *
+ * writes:
+ * array of comedi_krange structures to rangeinfo->range_ptr pointer
+ */
int do_rangeinfo_ioctl(struct comedi_device *dev,
struct comedi_rangeinfo __user *arg)
{
diff --git a/drivers/staging/cptm1217/Kconfig b/drivers/staging/cptm1217/Kconfig
deleted file mode 100644
index 43b1cc0a50a5..000000000000
--- a/drivers/staging/cptm1217/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config TOUCHSCREEN_CLEARPAD_TM1217
- tristate "Synaptics Clearpad TM1217"
- depends on I2C
- depends on GPIOLIB
- depends on INPUT
- help
- Say Y here if you have a Synaptics Clearpad TM1217 Controller
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called clearpad_tm1217.
diff --git a/drivers/staging/cptm1217/Makefile b/drivers/staging/cptm1217/Makefile
deleted file mode 100644
index 8961fafa80e7..000000000000
--- a/drivers/staging/cptm1217/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += clearpad_tm1217.o
-
diff --git a/drivers/staging/cptm1217/TODO b/drivers/staging/cptm1217/TODO
deleted file mode 100644
index 303922465e4d..000000000000
--- a/drivers/staging/cptm1217/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-- Wait for the official upstream general clearpad drivers as promised over
- the past few months
-- Merge any device support needed from this driver into it
-- Delete this driver
-
diff --git a/drivers/staging/cptm1217/clearpad_tm1217.c b/drivers/staging/cptm1217/clearpad_tm1217.c
deleted file mode 100644
index 7f265ce0dd13..000000000000
--- a/drivers/staging/cptm1217/clearpad_tm1217.c
+++ /dev/null
@@ -1,665 +0,0 @@
-/*
- * clearpad_tm1217.c - Touch Screen driver for Synaptics Clearpad
- * TM1217 controller
- *
- * Copyright (C) 2008 Intel Corp
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; ifnot, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * Questions/Comments/Bug fixes to Ramesh Agarwal (ramesh.agarwal@intel.com)
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/i2c.h>
-#include <linux/timer.h>
-#include <linux/gpio.h>
-#include <linux/hrtimer.h>
-#include <linux/kthread.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include "cp_tm1217.h"
-
-#define CPTM1217_DEVICE_NAME "cptm1217"
-#define CPTM1217_DRIVER_NAME CPTM1217_DEVICE_NAME
-
-#define MAX_TOUCH_SUPPORTED 2
-#define TOUCH_SUPPORTED 1
-#define SAMPLING_FREQ 80 /* Frequency in HZ */
-#define DELAY_BTWIN_SAMPLE (1000 / SAMPLING_FREQ)
-#define WAIT_FOR_RESPONSE 5 /* 5msec just works */
-#define MAX_RETRIES 5 /* As above */
-#define INCREMENTAL_DELAY 5 /* As above */
-
-/* Regster Definitions */
-#define TMA1217_DEV_STATUS 0x13 /* Device Status */
-#define TMA1217_INT_STATUS 0x14 /* Interrupt Status */
-
-/* Controller can detect up to 2 possible finger touches.
- * Each finger touch provides 12 bit X Y co-ordinates, the values are split
- * across 2 registers, and an 8 bit Z value */
-#define TMA1217_FINGER_STATE 0x18 /* Finger State */
-#define TMA1217_FINGER1_X_HIGHER8 0x19 /* Higher 8 bit of X coordinate */
-#define TMA1217_FINGER1_Y_HIGHER8 0x1A /* Higher 8 bit of Y coordinate */
-#define TMA1217_FINGER1_XY_LOWER4 0x1B /* Lower 4 bits of X and Y */
-#define TMA1217_FINGER1_Z_VALUE 0x1D /* 8 bit Z value for finger 1 */
-#define TMA1217_FINGER2_X_HIGHER8 0x1E /* Higher 8 bit of X coordinate */
-#define TMA1217_FINGER2_Y_HIGHER8 0x1F /* Higher 8 bit of Y coordinate */
-#define TMA1217_FINGER2_XY_LOWER4 0x20 /* Lower 4 bits of X and Y */
-#define TMA1217_FINGER2_Z_VALUE 0x22 /* 8 bit Z value for finger 2 */
-#define TMA1217_DEVICE_CTRL 0x23 /* Device Control */
-#define TMA1217_INTERRUPT_ENABLE 0x24 /* Interrupt Enable */
-#define TMA1217_REPORT_MODE 0x2B /* Reporting Mode */
-#define TMA1217_MAX_X_LOWER8 0x31 /* Bit 0-7 for Max X */
-#define TMA1217_MAX_X_HIGHER4 0x32 /* Bit 8-11 for Max X */
-#define TMA1217_MAX_Y_LOWER8 0x33 /* Bit 0-7 for Max Y */
-#define TMA1217_MAX_Y_HIGHER4 0x34 /* Bit 8-11 for Max Y */
-#define TMA1217_DEVICE_CMD_RESET 0x67 /* Device CMD reg for reset */
-#define TMA1217_DEVICE_CMD_REZERO 0x69 /* Device CMD reg for rezero */
-
-#define TMA1217_MANUFACTURER_ID 0x73 /* Manufacturer Id */
-#define TMA1217_PRODUCT_FAMILY 0x75 /* Product Family */
-#define TMA1217_FIRMWARE_REVISION 0x76 /* Firmware Revision */
-#define TMA1217_SERIAL_NO_HIGH 0x7C /* Bit 8-15 of device serial no. */
-#define TMA1217_SERIAL_NO_LOW 0x7D /* Bit 0-7 of device serial no. */
-#define TMA1217_PRODUCT_ID_START 0x7E /* Start address for 10 byte ID */
-#define TMA1217_DEVICE_CAPABILITY 0x8B /* Reporting capability */
-
-
-/*
- * The touch position structure.
- */
-struct touch_state {
- int x;
- int y;
- bool button;
-};
-
-/* Device Specific info given by the controller */
-struct cp_dev_info {
- u16 maxX;
- u16 maxY;
-};
-
-/* Vendor related info given by the controller */
-struct cp_vendor_info {
- u8 vendor_id;
- u8 product_family;
- u8 firmware_rev;
- u16 serial_no;
-};
-
-/*
- * Private structure to store the device details
- */
-struct cp_tm1217_device {
- struct i2c_client *client;
- struct device *dev;
- struct cp_vendor_info vinfo;
- struct cp_dev_info dinfo;
- struct input_dev_info {
- char phys[32];
- char name[128];
- struct input_dev *input;
- struct touch_state touch;
- } cp_input_info[MAX_TOUCH_SUPPORTED];
-
- int thread_running;
- struct mutex thread_mutex;
-
- int gpio;
-};
-
-
-/* The following functions are used to read/write registers on the device
- * as per the RMI prorocol. Technically, a page select should be written
- * before doing read/write but since the register offsets are below 0xFF
- * we can use the default value of page which is 0x00
- */
-static int cp_tm1217_read(struct cp_tm1217_device *ts,
- u8 *req, int size)
-{
- int i, retval;
-
- /* Send the address */
- retval = i2c_master_send(ts->client, &req[0], 1);
- if (retval != 1) {
- dev_err(ts->dev, "cp_tm1217: I2C send failed\n");
- return retval;
- }
- msleep(WAIT_FOR_RESPONSE);
- for (i = 0; i < MAX_RETRIES; i++) {
- retval = i2c_master_recv(ts->client, &req[1], size);
- if (retval == size)
- break;
-
- msleep(INCREMENTAL_DELAY);
- dev_dbg(ts->dev, "cp_tm1217: Retry count is %d\n", i);
- }
- if (retval != size)
- dev_err(ts->dev, "cp_tm1217: Read from device failed\n");
-
- return retval;
-}
-
-static int cp_tm1217_write(struct cp_tm1217_device *ts,
- u8 *req, int size)
-{
- int retval;
-
- /* Send the address and the data to be written */
- retval = i2c_master_send(ts->client, &req[0], size + 1);
- if (retval != size + 1) {
- dev_err(ts->dev, "cp_tm1217: I2C write failed: %d\n", retval);
- return retval;
- }
- /* Wait for the write to complete. TBD why this is required */
- msleep(WAIT_FOR_RESPONSE);
-
- return size;
-}
-
-static int cp_tm1217_mask_interrupt(struct cp_tm1217_device *ts)
-{
- u8 req[2];
- int retval;
-
- req[0] = TMA1217_INTERRUPT_ENABLE;
- req[1] = 0x0;
- retval = cp_tm1217_write(ts, req, 1);
- if (retval != 1)
- return -EIO;
-
- return 0;
-}
-
-static int cp_tm1217_unmask_interrupt(struct cp_tm1217_device *ts)
-{
- u8 req[2];
- int retval;
-
- req[0] = TMA1217_INTERRUPT_ENABLE;
- req[1] = 0xa;
- retval = cp_tm1217_write(ts, req, 1);
- if (retval != 1)
- return -EIO;
-
- return 0;
-}
-
-static void process_touch(struct cp_tm1217_device *ts, int index)
-{
- int retval;
- struct input_dev_info *input_info =
- (struct input_dev_info *)&ts->cp_input_info[index];
- u8 xy_data[6];
-
- if (index == 0)
- xy_data[0] = TMA1217_FINGER1_X_HIGHER8;
- else
- xy_data[0] = TMA1217_FINGER2_X_HIGHER8;
-
- retval = cp_tm1217_read(ts, xy_data, 5);
- if (retval < 5) {
- dev_err(ts->dev, "cp_tm1217: XY read from device failed\n");
- return;
- }
-
- /* Note: Currently not using the Z values but may be requried in
- the future. */
- input_info->touch.x = (xy_data[1] << 4)
- | (xy_data[3] & 0x0F);
- input_info->touch.y = (xy_data[2] << 4)
- | ((xy_data[3] & 0xF0) >> 4);
- input_report_abs(input_info->input, ABS_X, input_info->touch.x);
- input_report_abs(input_info->input, ABS_Y, input_info->touch.y);
- input_sync(input_info->input);
-}
-
-static void cp_tm1217_get_data(struct cp_tm1217_device *ts)
-{
- u8 req[2];
- int retval, i, finger_touched = 0;
-
- do {
- req[0] = TMA1217_FINGER_STATE;
- retval = cp_tm1217_read(ts, req, 1);
- if (retval != 1) {
- dev_err(ts->dev,
- "cp_tm1217: Read from device failed\n");
- continue;
- }
- finger_touched = 0;
- /* Start sampling until the pressure is below
- threshold */
- for (i = 0; i < TOUCH_SUPPORTED; i++) {
- if (req[1] & 0x3) {
- finger_touched++;
- if (ts->cp_input_info[i].touch.button == 0) {
- /* send the button touch event */
- input_report_key(
- ts->cp_input_info[i].input,
- BTN_TOUCH, 1);
- ts->cp_input_info[i].touch.button = 1;
- }
- process_touch(ts, i);
- } else {
- if (ts->cp_input_info[i].touch.button == 1) {
- /* send the button release event */
- input_report_key(
- ts->cp_input_info[i].input,
- BTN_TOUCH, 0);
- input_sync(ts->cp_input_info[i].input);
- ts->cp_input_info[i].touch.button = 0;
- }
- }
- req[1] = req[1] >> 2;
- }
- msleep(DELAY_BTWIN_SAMPLE);
- } while (finger_touched > 0);
-}
-
-static irqreturn_t cp_tm1217_sample_thread(int irq, void *handle)
-{
- struct cp_tm1217_device *ts = handle;
- u8 req[2];
- int retval;
-
- /* Chedk if another thread is already running */
- mutex_lock(&ts->thread_mutex);
- if (ts->thread_running == 1) {
- mutex_unlock(&ts->thread_mutex);
- return IRQ_HANDLED;
- }
-
- ts->thread_running = 1;
- mutex_unlock(&ts->thread_mutex);
-
- /* Mask the interrupts */
- retval = cp_tm1217_mask_interrupt(ts);
-
- /* Read the Interrupt Status register to find the cause of the
- Interrupt */
- req[0] = TMA1217_INT_STATUS;
- retval = cp_tm1217_read(ts, req, 1);
- if (retval != 1)
- goto exit_thread;
-
- if (!(req[1] & 0x8))
- goto exit_thread;
-
- cp_tm1217_get_data(ts);
-
-exit_thread:
- /* Unmask the interrupts before going to sleep */
- retval = cp_tm1217_unmask_interrupt(ts);
-
- mutex_lock(&ts->thread_mutex);
- ts->thread_running = 0;
- mutex_unlock(&ts->thread_mutex);
-
- return IRQ_HANDLED;
-}
-
-static int cp_tm1217_init_data(struct cp_tm1217_device *ts)
-{
- int retval;
- u8 req[2];
-
- /* Read the vendor id/ fw revision etc. Ignoring return check as this
- is non critical info */
- req[0] = TMA1217_MANUFACTURER_ID;
- retval = cp_tm1217_read(ts, req, 1);
- ts->vinfo.vendor_id = req[1];
-
- req[0] = TMA1217_PRODUCT_FAMILY;
- retval = cp_tm1217_read(ts, req, 1);
- ts->vinfo.product_family = req[1];
-
- req[0] = TMA1217_FIRMWARE_REVISION;
- retval = cp_tm1217_read(ts, req, 1);
- ts->vinfo.firmware_rev = req[1];
-
- req[0] = TMA1217_SERIAL_NO_HIGH;
- retval = cp_tm1217_read(ts, req, 1);
- ts->vinfo.serial_no = (req[1] << 8);
-
- req[0] = TMA1217_SERIAL_NO_LOW;
- retval = cp_tm1217_read(ts, req, 1);
- ts->vinfo.serial_no = ts->vinfo.serial_no | req[1];
-
- req[0] = TMA1217_MAX_X_HIGHER4;
- retval = cp_tm1217_read(ts, req, 1);
- ts->dinfo.maxX = (req[1] & 0xF) << 8;
-
- req[0] = TMA1217_MAX_X_LOWER8;
- retval = cp_tm1217_read(ts, req, 1);
- ts->dinfo.maxX = ts->dinfo.maxX | req[1];
-
- req[0] = TMA1217_MAX_Y_HIGHER4;
- retval = cp_tm1217_read(ts, req, 1);
- ts->dinfo.maxY = (req[1] & 0xF) << 8;
-
- req[0] = TMA1217_MAX_Y_LOWER8;
- retval = cp_tm1217_read(ts, req, 1);
- ts->dinfo.maxY = ts->dinfo.maxY | req[1];
-
- return 0;
-
-}
-
-/*
- * Set up a GPIO for use as the interrupt. We can't simply do this at
- * boot time because the GPIO drivers themselves may not be around at
- * boot/firmware set up time to do the work. Instead defer it to driver
- * detection.
- */
-
-static int cp_tm1217_setup_gpio_irq(struct cp_tm1217_device *ts)
-{
- int retval;
-
- /* Hook up the irq handler */
- retval = gpio_request(ts->gpio, "cp_tm1217_touch");
- if (retval < 0) {
- dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
- retval);
- return retval;
- }
-
- retval = gpio_direction_input(ts->gpio);
- if (retval < 0) {
- dev_err(ts->dev,
- "cp_tm1217: GPIO direction configuration failed, error %d\n",
- retval);
- gpio_free(ts->gpio);
- return retval;
- }
-
- retval = gpio_to_irq(ts->gpio);
- if (retval < 0) {
- dev_err(ts->dev,
- "cp_tm1217: GPIO to IRQ failed, error %d\n", retval);
- gpio_free(ts->gpio);
- }
- dev_dbg(ts->dev,
- "cp_tm1217: Got IRQ number is %d for GPIO %d\n",
- retval, ts->gpio);
- return retval;
-}
-
-static int cp_tm1217_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct cp_tm1217_device *ts;
- struct input_dev *input_dev;
- struct input_dev_info *input_info;
- struct cp_tm1217_platform_data *pdata;
- u8 req[2];
- int i, retval;
-
- /* No pdata is fine - we then use "normal" IRQ mode */
-
- pdata = client->dev.platform_data;
-
- ts = kzalloc(sizeof(struct cp_tm1217_device), GFP_KERNEL);
- if (!ts)
- return -ENOMEM;
-
- ts->client = client;
- ts->dev = &client->dev;
- i2c_set_clientdata(client, ts);
-
- ts->thread_running = 0;
- mutex_init(&ts->thread_mutex);
-
- /* Reset the Controller */
- req[0] = TMA1217_DEVICE_CMD_RESET;
- req[1] = 0x1;
- retval = cp_tm1217_write(ts, req, 1);
- if (retval != 1) {
- dev_err(ts->dev, "cp_tm1217: Controller reset failed\n");
- kfree(ts);
- return -EIO;
- }
-
- /* Clear up the interrupt status from reset. */
- req[0] = TMA1217_INT_STATUS;
- retval = cp_tm1217_read(ts, req, 1);
-
- /* Mask all the interrupts */
- retval = cp_tm1217_mask_interrupt(ts);
-
- /* Read the controller information */
- cp_tm1217_init_data(ts);
-
- /* The following code will register multiple event devices when
- multi-pointer is enabled, the code has not been tested
- with MPX */
- for (i = 0; i < TOUCH_SUPPORTED; i++) {
- input_dev = input_allocate_device();
- if (input_dev == NULL) {
- retval = -ENOMEM;
- goto fail;
- }
- input_info = &ts->cp_input_info[i];
- snprintf(input_info->name, sizeof(input_info->name),
- "cp_tm1217_touchscreen_%d", i);
- input_dev->name = input_info->name;
- snprintf(input_info->phys, sizeof(input_info->phys),
- "%s/input%d", dev_name(&client->dev), i);
-
- input_dev->phys = input_info->phys;
- input_dev->id.bustype = BUS_I2C;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(input_dev, ABS_X, 0, ts->dinfo.maxX, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, ts->dinfo.maxY, 0, 0);
-
- retval = input_register_device(input_dev);
- if (retval) {
- dev_err(ts->dev,
- "Input dev registration failed for %s\n",
- input_dev->name);
- input_free_device(input_dev);
- goto fail;
- }
- input_info->input = input_dev;
- }
-
- /* Setup the reporting mode to send an interrupt only when
- finger arrives or departs. */
- req[0] = TMA1217_REPORT_MODE;
- req[1] = 0x02;
- retval = cp_tm1217_write(ts, req, 1);
-
- /* Setup the device to no sleep mode for now and make it configured */
- req[0] = TMA1217_DEVICE_CTRL;
- req[1] = 0x84;
- retval = cp_tm1217_write(ts, req, 1);
-
- /* Check for the status of the device */
- req[0] = TMA1217_DEV_STATUS;
- retval = cp_tm1217_read(ts, req, 1);
- if (req[1] != 0) {
- dev_err(ts->dev,
- "cp_tm1217: Device Status 0x%x != 0: config failed\n",
- req[1]);
-
- retval = -EIO;
- goto fail;
- }
-
- if (pdata && pdata->gpio) {
- ts->gpio = pdata->gpio;
- retval = cp_tm1217_setup_gpio_irq(ts);
- } else
- retval = client->irq;
-
- if (retval < 0) {
- dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
- retval);
- goto fail;
- }
-
- client->irq = retval;
-
-
- retval = request_threaded_irq(client->irq,
- NULL, cp_tm1217_sample_thread,
- IRQF_TRIGGER_FALLING, "cp_tm1217_touch", ts);
- if (retval < 0) {
- dev_err(ts->dev, "cp_tm1217: Request IRQ error %d\n", retval);
- goto fail_gpio;
- }
-
- /* Unmask the interrupts */
- retval = cp_tm1217_unmask_interrupt(ts);
- if (retval == 0)
- return 0;
-
- free_irq(client->irq, ts);
-fail_gpio:
- if (ts->gpio)
- gpio_free(ts->gpio);
-fail:
- /* Clean up before returning failure */
- for (i = 0; i < TOUCH_SUPPORTED; i++) {
- if (ts->cp_input_info[i].input)
- input_unregister_device(ts->cp_input_info[i].input);
- }
- kfree(ts);
- return retval;
-
-}
-
-#ifdef CONFIG_PM_SLEEP
-
-/*
- * cp_tm1217 suspend
- *
- */
-static int cp_tm1217_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct cp_tm1217_device *ts = i2c_get_clientdata(client);
- u8 req[2];
- int retval;
-
- /* Put the controller to sleep */
- req[0] = TMA1217_DEVICE_CTRL;
- retval = cp_tm1217_read(ts, req, 1);
- req[1] = (req[1] & 0xF8) | 0x1;
- retval = cp_tm1217_write(ts, req, 1);
-
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(client->irq);
-
- return 0;
-}
-
-/*
- * cp_tm1217_resume
- *
- */
-static int cp_tm1217_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct cp_tm1217_device *ts = i2c_get_clientdata(client);
- u8 req[2];
- int retval;
-
- /* Take the controller out of sleep */
- req[0] = TMA1217_DEVICE_CTRL;
- retval = cp_tm1217_read(ts, req, 1);
- req[1] = (req[1] & 0xF8) | 0x4;
- retval = cp_tm1217_write(ts, req, 1);
-
- /* Restore the register settings sinc the power to the
- could have been cut off */
-
- /* Setup the reporting mode to send an interrupt only when
- finger arrives or departs. */
- req[0] = TMA1217_REPORT_MODE;
- req[1] = 0x02;
- retval = cp_tm1217_write(ts, req, 1);
-
- /* Setup the device to no sleep mode for now and make it configured */
- req[0] = TMA1217_DEVICE_CTRL;
- req[1] = 0x84;
- retval = cp_tm1217_write(ts, req, 1);
-
- /* Setup the interrupt mask */
- retval = cp_tm1217_unmask_interrupt(ts);
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(client->irq);
-
- return 0;
-}
-
-#endif
-
-static SIMPLE_DEV_PM_OPS(cp_tm1217_pm_ops, cp_tm1217_suspend,
- cp_tm1217_resume);
-
-/*
- * cp_tm1217_remove
- *
- */
-static int cp_tm1217_remove(struct i2c_client *client)
-{
- struct cp_tm1217_device *ts = i2c_get_clientdata(client);
- int i;
-
- free_irq(client->irq, ts);
- if (ts->gpio)
- gpio_free(ts->gpio);
- for (i = 0; i < TOUCH_SUPPORTED; i++)
- input_unregister_device(ts->cp_input_info[i].input);
- kfree(ts);
- return 0;
-}
-
-static struct i2c_device_id cp_tm1217_idtable[] = {
- { CPTM1217_DEVICE_NAME, 0 },
- { }
-};
-
-MODULE_DEVICE_TABLE(i2c, cp_tm1217_idtable);
-
-static struct i2c_driver cp_tm1217_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = CPTM1217_DRIVER_NAME,
- .pm = &cp_tm1217_pm_ops,
- },
- .id_table = cp_tm1217_idtable,
- .probe = cp_tm1217_probe,
- .remove = cp_tm1217_remove,
-};
-
-module_i2c_driver(cp_tm1217_driver);
-
-MODULE_AUTHOR("Ramesh Agarwal <ramesh.agarwal@intel.com>");
-MODULE_DESCRIPTION("Synaptics TM1217 TouchScreen Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/cptm1217/cp_tm1217.h b/drivers/staging/cptm1217/cp_tm1217.h
deleted file mode 100644
index 30bad357a055..000000000000
--- a/drivers/staging/cptm1217/cp_tm1217.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __LINUX_I2C_CP_TM1217_H
-#define __LINUX_I2C_CP_TM1217_H
-
-struct cp_tm1217_platform_data {
- int gpio; /* If not set uses the IRQ resource 0 */
-};
-
-#endif
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
index bdb5317e3d9d..7184747e0652 100644
--- a/drivers/staging/dgap/dgap.c
+++ b/drivers/staging/dgap/dgap.c
@@ -978,8 +978,8 @@ static int dgap_parsefile(char **in)
brd->u.board.conc1++;
conc_type = dgap_gettok(in);
- if (conc_type == 0 || conc_type != CX ||
- conc_type != EPC) {
+ if (conc_type == 0 || (conc_type != CX &&
+ conc_type != EPC)) {
pr_err("failed to set a type of concentratros");
return -1;
}
@@ -1019,8 +1019,8 @@ static int dgap_parsefile(char **in)
brd->u.board.module1++;
module_type = dgap_gettok(in);
- if (module_type == 0 || module_type != PORTS ||
- module_type != MODEM) {
+ if (module_type == 0 || (module_type != PORTS &&
+ module_type != MODEM)) {
pr_err("failed to set a type of module");
return -1;
}
@@ -1400,27 +1400,27 @@ static int dgap_remap(struct board_t *brd)
return -ENOMEM;
if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
- "dgap")) {
- release_mem_region(brd->membase, 0x200000);
- return -ENOMEM;
- }
+ "dgap"))
+ goto err_req_mem;
brd->re_map_membase = ioremap(brd->membase, 0x200000);
- if (!brd->re_map_membase) {
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- return -ENOMEM;
- }
+ if (!brd->re_map_membase)
+ goto err_remap_mem;
brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
- if (!brd->re_map_port) {
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- iounmap(brd->re_map_membase);
- return -ENOMEM;
- }
+ if (!brd->re_map_port)
+ goto err_remap_port;
return 0;
+
+err_remap_port:
+ iounmap(brd->re_map_membase);
+err_remap_mem:
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+err_req_mem:
+ release_mem_region(brd->membase, 0x200000);
+
+ return -ENOMEM;
}
static void dgap_unmap(struct board_t *brd)
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index ba98ff348112..f177d3a258c2 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -97,12 +97,12 @@ static uint dgnc_poll_stop; /* Used to tell poller to stop */
static struct timer_list dgnc_poll_timer;
-static struct pci_device_id dgnc_pci_tbl[] = {
- { DIGI_VID, PCI_DEVICE_CLASSIC_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { DIGI_VID, PCI_DEVICE_CLASSIC_4_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { DIGI_VID, PCI_DEVICE_CLASSIC_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { DIGI_VID, PCI_DEVICE_CLASSIC_8_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- {0,} /* 0 terminated list. */
+static const struct pci_device_id dgnc_pci_tbl[] = {
+ {PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_4_DID), .driver_data = 0},
+ {PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_4_422_DID), .driver_data = 1},
+ {PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_8_DID), .driver_data = 2},
+ {PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_8_422_DID), .driver_data = 3},
+ {0,}
};
MODULE_DEVICE_TABLE(pci, dgnc_pci_tbl);
@@ -238,6 +238,7 @@ static int dgnc_start(void)
{
int rc = 0;
unsigned long flags;
+ struct device *dev;
/* make sure that the globals are init'd before we do anything else */
dgnc_init_globals();
@@ -257,9 +258,20 @@ static int dgnc_start(void)
dgnc_Major = rc;
dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
- device_create(dgnc_class, NULL,
- MKDEV(dgnc_Major, 0),
- NULL, "dgnc_mgmt");
+ if (IS_ERR(dgnc_class)) {
+ rc = PTR_ERR(dgnc_class);
+ pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
+ goto failed_class;
+ }
+
+ dev = device_create(dgnc_class, NULL,
+ MKDEV(dgnc_Major, 0),
+ NULL, "dgnc_mgmt");
+ if (IS_ERR(dev)) {
+ rc = PTR_ERR(dev);
+ pr_err(DRVSTR ": Can't create device (%d)\n", rc);
+ goto failed_device;
+ }
/*
* Init any global tty stuff.
@@ -268,7 +280,7 @@ static int dgnc_start(void)
if (rc < 0) {
pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc);
- return rc;
+ goto failed_tty;
}
/* Start the poller */
@@ -282,6 +294,14 @@ static int dgnc_start(void)
add_timer(&dgnc_poll_timer);
+ return 0;
+
+failed_tty:
+ device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+failed_device:
+ class_destroy(dgnc_class);
+failed_class:
+ unregister_chrdev(dgnc_Major, "dgnc");
return rc;
}
diff --git a/drivers/staging/dgnc/dgnc_utils.c b/drivers/staging/dgnc/dgnc_utils.c
index 61efc13ec160..80b51332292c 100644
--- a/drivers/staging/dgnc/dgnc_utils.c
+++ b/drivers/staging/dgnc/dgnc_utils.c
@@ -12,7 +12,7 @@
*/
int dgnc_ms_sleep(ulong ms)
{
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((ms * HZ) / 1000);
return signal_pending(current);
}
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 3181a3590465..d6e0b9f6b24a 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -38,8 +38,8 @@
#if !defined(TIOCMODG)
-#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */
-#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */
+#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */
+#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */
#ifndef TIOCM_LE
#define TIOCM_LE 0x01 /* line enable */
@@ -58,44 +58,44 @@
#endif
#if !defined(TIOCMSET)
-#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */
-#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */
+#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
+#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
#endif
#if !defined(TIOCMBIC)
-#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */
-#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */
+#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
+#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
#endif
#if !defined(TIOCSDTR)
-#define TIOCSDTR ('e'<<8) | 0 /* set DTR */
-#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */
+#define TIOCSDTR (('e'<<8) | 0) /* set DTR */
+#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */
#endif
/************************************************************************
* Ioctl command arguments for DIGI parameters.
************************************************************************/
-#define DIGI_GETA ('e'<<8) | 94 /* Read params */
+#define DIGI_GETA (('e'<<8) | 94) /* Read params */
-#define DIGI_SETA ('e'<<8) | 95 /* Set params */
-#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */
-#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */
+#define DIGI_SETA (('e'<<8) | 95) /* Set params */
+#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
+#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
-#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
+#define DIGI_KME (('e'<<8) | 98) /* Read/Write Host */
/* Adapter Memory */
-#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */
+#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */
/* control characters */
-#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */
+#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */
/* control characters */
-#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */
+#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */
/* flow control chars */
-#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */
+#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */
/* flow control chars */
-#define DIGI_GEDELAY ('d'<<8) | 246 /* Get edelay */
-#define DIGI_SEDELAY ('d'<<8) | 247 /* Set edelay */
+#define DIGI_GEDELAY (('d'<<8) | 246) /* Get edelay */
+#define DIGI_SEDELAY (('d'<<8) | 247) /* Set edelay */
struct digiflow_t {
unsigned char startc; /* flow cntl start char */
@@ -104,8 +104,8 @@ struct digiflow_t {
#ifdef FLOW_2200
-#define F2200_GETA ('e'<<8) | 104 /* Get 2x36 flow cntl flags */
-#define F2200_SETAW ('e'<<8) | 105 /* Set 2x36 flow cntl flags */
+#define F2200_GETA (('e'<<8) | 104) /* Get 2x36 flow cntl flags */
+#define F2200_SETAW (('e'<<8) | 105) /* Set 2x36 flow cntl flags */
#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
@@ -241,7 +241,7 @@ struct digi_dinfo {
char dinfo_version[16]; /* driver version */
};
-#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
+#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */
/************************************************************************
* Structure used with ioctl commands for per-board information
@@ -261,7 +261,7 @@ struct digi_info {
char info_reserved[7]; /* for future expansion */
};
-#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
+#define DIGI_GETBD (('d'<<8) | 249) /* get board info */
struct digi_stat {
unsigned int info_chan; /* Channel number (0 based) */
@@ -276,7 +276,7 @@ struct digi_stat {
unsigned int info_reserved[8]; /* for future expansion */
};
-#define DIGI_GETSTAT ('d'<<8) | 244 /* get board info */
+#define DIGI_GETSTAT (('d'<<8) | 244) /* get board info */
/************************************************************************
*
* Structure used with ioctl commands for per-channel information
@@ -339,7 +339,7 @@ struct digi_getcounter {
#define INFO_CH_WLOW 0x0020
#define INFO_XXBUF_BUSY 0x0040
-#define DIGI_GETCH ('d'<<8) | 245 /* get board info */
+#define DIGI_GETCH (('d'<<8) | 245) /* get board info */
/* Board type definitions */
@@ -384,15 +384,15 @@ struct digi_getcounter {
#define BD_TRIBOOT 0x8
#define BD_BADKME 0x80
-#define DIGI_SPOLL ('d'<<8) | 254 /* change poller rate */
+#define DIGI_SPOLL (('d'<<8) | 254) /* change poller rate */
#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
-#define DIGI_REALPORT_GETBUFFERS ('e'<<8) | 108
-#define DIGI_REALPORT_SENDIMMEDIATE ('e'<<8) | 109
-#define DIGI_REALPORT_GETCOUNTERS ('e'<<8) | 110
-#define DIGI_REALPORT_GETEVENTS ('e'<<8) | 111
+#define DIGI_REALPORT_GETBUFFERS (('e'<<8) | 108)
+#define DIGI_REALPORT_SENDIMMEDIATE (('e'<<8) | 109)
+#define DIGI_REALPORT_GETCOUNTERS (('e'<<8) | 110)
+#define DIGI_REALPORT_GETEVENTS (('e'<<8) | 111)
#define EV_OPU 0x0001 /* !<Output paused by client */
#define EV_OPS 0x0002 /* !<Output paused by reqular sw flowctrl */
diff --git a/drivers/staging/dgnc/dpacompat.h b/drivers/staging/dgnc/dpacompat.h
index b2d2dc08f869..33cb394524b8 100644
--- a/drivers/staging/dgnc/dpacompat.h
+++ b/drivers/staging/dgnc/dpacompat.h
@@ -51,7 +51,7 @@ struct ni_info {
#define RW_READ 1
#define RW_WRITE 2
-#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
+#define DIGI_KME (('e'<<8) | 98) /* Read/Write Host */
#define SUBTYPE 0007
#define T_PCXI 0000
@@ -106,10 +106,10 @@ struct ni_info {
/* Ioctls needed for dpa operation */
-#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
-#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
-#define DIGI_GET_NI_INFO ('d'<<8) | 250 /* nonintelligent state snfo */
+#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */
+#define DIGI_GETBD (('d'<<8) | 249) /* get board info */
+#define DIGI_GET_NI_INFO (('d'<<8) | 250) /* nonintelligent state snfo */
/* Other special ioctls */
-#define DIGI_TIMERIRQ ('d'<<8) | 251 /* Enable/disable RS_TIMER use */
-#define DIGI_LOOPBACK ('d'<<8) | 252 /* Enable/disable UART internal loopback */
+#define DIGI_TIMERIRQ (('d'<<8) | 251) /* Enable/disable RS_TIMER use */
+#define DIGI_LOOPBACK (('d'<<8) | 252) /* Enable/disable UART internal loopback */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index eb178fcb7954..4be646ce8a12 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -1608,7 +1608,7 @@ static int std_req_get_status(struct nbu2ss_udc *udc)
switch (recipient) {
case USB_RECIP_DEVICE:
if (udc->ctrl.wIndex == 0x0000) {
- if (udc->self_powered)
+ if (udc->gadget.is_selfpowered)
status_data |= (1 << USB_DEVICE_SELF_POWERED);
if (udc->remote_wakeup)
@@ -3117,7 +3117,7 @@ static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget)
static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget,
int is_selfpowered)
{
- struct nbu2ss_udc *udc;
+ struct nbu2ss_udc *udc;
unsigned long flags;
/* INFO("=== %s()\n", __func__); */
@@ -3130,7 +3130,7 @@ static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget,
udc = container_of(pgadget, struct nbu2ss_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
- udc->self_powered = (is_selfpowered != 0);
+ pgadget->is_selfpowered = (is_selfpowered != 0);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
@@ -3249,42 +3249,6 @@ static const char *gp_ep_name[NUM_ENDPOINTS] = {
};
/*-------------------------------------------------------------------------*/
-static void __init nbu2ss_drv_set_ep_info(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- const char *name)
-{
- ep->udc = udc;
- ep->desc = NULL;
-
- ep->ep.driver_data = NULL;
- ep->ep.name = name;
- ep->ep.ops = &nbu2ss_ep_ops;
-
- if (isdigit(name[2])) {
-
- long num;
- int res;
- char tempbuf[2];
-
- tempbuf[0] = name[2];
- tempbuf[1] = '\0';
- res = kstrtol(tempbuf, 16, &num);
-
- if (num == 0)
- ep->ep.maxpacket = EP0_PACKETSIZE;
- else
- ep->ep.maxpacket = EP_PACKETSIZE;
-
- } else {
- ep->ep.maxpacket = EP_PACKETSIZE;
- }
-
- list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
- INIT_LIST_HEAD(&ep->queue);
-}
-
-/*-------------------------------------------------------------------------*/
static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
{
int i;
@@ -3292,9 +3256,21 @@ static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
INIT_LIST_HEAD(&udc->gadget.ep_list);
udc->gadget.ep0 = &udc->ep[0].ep;
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ struct nbu2ss_ep *ep = &udc->ep[i];
+
+ ep->udc = udc;
+ ep->desc = NULL;
+
+ ep->ep.driver_data = NULL;
+ ep->ep.name = gp_ep_name[i];
+ ep->ep.ops = &nbu2ss_ep_ops;
- for (i = 0; i < NUM_ENDPOINTS; i++)
- nbu2ss_drv_set_ep_info(udc, &udc->ep[i], gp_ep_name[i]);
+ ep->ep.maxpacket = (i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE);
+
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+ }
list_del_init(&udc->ep[0].ep.ep_list);
}
@@ -3308,7 +3284,7 @@ static int __init nbu2ss_drv_contest_init(
spin_lock_init(&udc->lock);
udc->dev = &pdev->dev;
- udc->self_powered = 1;
+ udc->gadget.is_selfpowered = 1;
udc->devstate = USB_STATE_NOTATTACHED;
udc->pdev = pdev;
udc->mA = 0;
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index ee1b80d705fa..202e2dc72bba 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -624,7 +624,6 @@ struct nbu2ss_udc {
unsigned linux_suspended:1;
unsigned linux_resume:1;
unsigned usb_suspended:1;
- unsigned self_powered:1;
unsigned remote_wakeup:1;
unsigned udc_enabled:1;
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
new file mode 100644
index 000000000000..995a9101a080
--- /dev/null
+++ b/drivers/staging/fbtft/Kconfig
@@ -0,0 +1,169 @@
+menuconfig FB_TFT
+ tristate "Support for small TFT LCD display modules"
+ depends on FB && SPI && GPIOLIB
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select FB_DEFERRED_IO
+ select FB_BACKLIGHT
+
+config FB_TFT_AGM1264K_FL
+ tristate "FB driver for the AGM1264K-FL LCD display"
+ depends on FB_TFT
+ help
+ Framebuffer support for the AGM1264K-FL LCD display (two Samsung KS0108 compatable chips)
+
+config FB_TFT_BD663474
+ tristate "FB driver for the BD663474 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for BD663474
+
+config FB_TFT_HX8340BN
+ tristate "FB driver for the HX8340BN LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for HX8340BN
+
+config FB_TFT_HX8347D
+ tristate "FB driver for the HX8347D LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for HX8347D
+
+config FB_TFT_HX8353D
+ tristate "FB driver for the HX8353D LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for HX8353D
+
+config FB_TFT_ILI9320
+ tristate "FB driver for the ILI9320 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for ILI9320
+
+config FB_TFT_ILI9325
+ tristate "FB driver for the ILI9325 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for ILI9325
+
+config FB_TFT_ILI9340
+ tristate "FB driver for the ILI9340 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for ILI9340
+
+config FB_TFT_ILI9341
+ tristate "FB driver for the ILI9341 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for ILI9341
+
+config FB_TFT_ILI9481
+ tristate "FB driver for the ILI9481 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for ILI9481
+
+config FB_TFT_ILI9486
+ tristate "FB driver for the ILI9486 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for ILI9486
+
+config FB_TFT_PCD8544
+ tristate "FB driver for the PCD8544 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for PCD8544
+
+config FB_TFT_RA8875
+ tristate "FB driver for the RA8875 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for RA8875
+
+config FB_TFT_S6D02A1
+ tristate "FB driver for the S6D02A1 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for S6D02A1
+
+config FB_TFT_S6D1121
+ tristate "FB driver for the S6D1211 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for S6D1121
+
+config FB_TFT_SSD1289
+ tristate "FB driver for the SSD1289 LCD Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1289
+
+config FB_TFT_SSD1306
+ tristate "FB driver for the SSD1306 OLED Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1306
+
+config FB_TFT_SSD1331
+ tristate "FB driver for the SSD1331 LCD Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1331
+
+config FB_TFT_SSD1351
+ tristate "FB driver for the SSD1351 LCD Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1351
+
+config FB_TFT_ST7735R
+ tristate "FB driver for the ST7735R LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for ST7735R
+
+config FB_TFT_TINYLCD
+ tristate "FB driver for tinylcd.com display"
+ depends on FB_TFT
+ help
+ Custom Framebuffer support for tinylcd.com display
+
+config FB_TFT_TLS8204
+ tristate "FB driver for the TLS8204 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for TLS8204
+
+config FB_TFT_UC1701
+ tristate "FB driver for the UC1701 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for UC1701
+
+config FB_TFT_UPD161704
+ tristate "FB driver for the uPD161704 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for uPD161704
+
+config FB_TFT_WATTEROTT
+ tristate "FB driver for the WATTEROTT LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for WATTEROTT
+
+config FB_FLEX
+ tristate "Generic FB driver for TFT LCD displays"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for TFT LCD displays.
+
+config FB_TFT_FBTFT_DEVICE
+ tristate "Module to for adding FBTFT devices"
+ depends on FB_TFT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
new file mode 100644
index 000000000000..e773f0fdcfe8
--- /dev/null
+++ b/drivers/staging/fbtft/Makefile
@@ -0,0 +1,34 @@
+# Core module
+obj-$(CONFIG_FB_TFT) += fbtft.o
+fbtft-y += fbtft-core.o fbtft-sysfs.o fbtft-bus.o fbtft-io.o
+
+# drivers
+obj-$(CONFIG_FB_TFT_AGM1264K_FL) += fb_agm1264k-fl.o
+obj-$(CONFIG_FB_TFT_BD663474) += fb_bd663474.o
+obj-$(CONFIG_FB_TFT_HX8340BN) += fb_hx8340bn.o
+obj-$(CONFIG_FB_TFT_HX8347D) += fb_hx8347d.o
+obj-$(CONFIG_FB_TFT_HX8353D) += fb_hx8353d.o
+obj-$(CONFIG_FB_TFT_ILI9320) += fb_ili9320.o
+obj-$(CONFIG_FB_TFT_ILI9325) += fb_ili9325.o
+obj-$(CONFIG_FB_TFT_ILI9340) += fb_ili9340.o
+obj-$(CONFIG_FB_TFT_ILI9341) += fb_ili9341.o
+obj-$(CONFIG_FB_TFT_ILI9481) += fb_ili9481.o
+obj-$(CONFIG_FB_TFT_ILI9486) += fb_ili9486.o
+obj-$(CONFIG_FB_TFT_PCD8544) += fb_pcd8544.o
+obj-$(CONFIG_FB_TFT_RA8875) += fb_ra8875.o
+obj-$(CONFIG_FB_TFT_S6D02A1) += fb_s6d02a1.o
+obj-$(CONFIG_FB_TFT_S6D1121) += fb_s6d1121.o
+obj-$(CONFIG_FB_TFT_SSD1289) += fb_ssd1289.o
+obj-$(CONFIG_FB_TFT_SSD1306) += fb_ssd1306.o
+obj-$(CONFIG_FB_TFT_SSD1331) += fb_ssd1331.o
+obj-$(CONFIG_FB_TFT_SSD1351) += fb_ssd1351.o
+obj-$(CONFIG_FB_TFT_ST7735R) += fb_st7735r.o
+obj-$(CONFIG_FB_TFT_TINYLCD) += fb_tinylcd.o
+obj-$(CONFIG_FB_TFT_TLS8204) += fb_tls8204.o
+obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o
+obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o
+obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o
+obj-$(CONFIG_FB_FLEX) += flexfb.o
+
+# Device modules
+obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o
diff --git a/drivers/staging/fbtft/README b/drivers/staging/fbtft/README
new file mode 100644
index 000000000000..bc89b5805f7b
--- /dev/null
+++ b/drivers/staging/fbtft/README
@@ -0,0 +1,32 @@
+ FBTFT
+=========
+
+Linux Framebuffer drivers for small TFT LCD display modules.
+The module 'fbtft' makes writing drivers for some of these displays very easy.
+
+Development is done on a Raspberry Pi running the Raspbian "wheezy" distribution.
+
+INSTALLATION
+ Download kernel sources
+
+ From Linux 3.15
+ cd drivers/video/fbdev/fbtft
+ git clone https://github.com/notro/fbtft.git
+
+ Add to drivers/video/fbdev/Kconfig: source "drivers/video/fbdev/fbtft/Kconfig"
+ Add to drivers/video/fbdev/Makefile: obj-y += fbtft/
+
+ Before Linux 3.15
+ cd drivers/video
+ git clone https://github.com/notro/fbtft.git
+
+ Add to drivers/video/Kconfig: source "drivers/video/fbtft/Kconfig"
+ Add to drivers/video/Makefile: obj-y += fbtft/
+
+ Enable driver(s) in menuconfig and build the kernel
+
+
+See wiki for more information: https://github.com/notro/fbtft/wiki
+
+
+Source: https://github.com/notro/fbtft/
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
new file mode 100644
index 000000000000..9cc7d25cf0e5
--- /dev/null
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -0,0 +1,462 @@
+/*
+ * FB driver for Two KS0108 LCD controllers in AGM1264K-FL display
+ *
+ * Copyright (C) 2014 ololoshka2871
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "fbtft.h"
+
+/* Uncomment text line to use negative image on display */
+/*#define NEGATIVE*/
+
+#define WHITE 0xff
+#define BLACK 0
+
+#define DRVNAME "fb_agm1264k-fl"
+#define WIDTH 64
+#define HEIGHT 64
+#define TOTALWIDTH (WIDTH * 2) /* because 2 x ks0108 in one display */
+#define FPS 20
+
+#define EPIN gpio.wr
+#define RS gpio.dc
+#define RW gpio.aux[2]
+#define CS0 gpio.aux[0]
+#define CS1 gpio.aux[1]
+
+
+/* diffusing error (“Floyd-Steinberg”) */
+#define DIFFUSING_MATRIX_WIDTH 2
+#define DIFFUSING_MATRIX_HEIGHT 2
+
+static const signed char
+diffusing_matrix[DIFFUSING_MATRIX_WIDTH][DIFFUSING_MATRIX_HEIGHT] = {
+ {-1, 3},
+ {3, 2},
+};
+
+static const unsigned char gamma_correction_table[] = {
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6,
+6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13,
+13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
+22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30, 30, 31, 32,
+33, 33, 34, 35, 35, 36, 37, 38, 39, 39, 40, 41, 42, 43, 43, 44, 45,
+46, 47, 48, 49, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81,
+82, 83, 84, 85, 87, 88, 89, 90, 91, 93, 94, 95, 97, 98, 99, 100, 102,
+103, 105, 106, 107, 109, 110, 111, 113, 114, 116, 117, 119, 120, 121,
+123, 124, 126, 127, 129, 130, 132, 133, 135, 137, 138, 140, 141, 143,
+145, 146, 148, 149, 151, 153, 154, 156, 158, 159, 161, 163, 165, 166,
+168, 170, 172, 173, 175, 177, 179, 181, 182, 184, 186, 188, 190, 192,
+194, 196, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219,
+221, 223, 225, 227, 229, 231, 234, 236, 238, 240, 242, 244, 246, 248,
+251, 253, 255
+};
+
+static int init_display(struct fbtft_par *par)
+{
+ u8 i;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ for (i = 0; i < 2; ++i) {
+ write_reg(par, i, 0x3f); /* display on */
+ write_reg(par, i, 0x40); /* set x to 0 */
+ write_reg(par, i, 0xb0); /* set page to 0 */
+ write_reg(par, i, 0xc0); /* set start line to 0 */
+ }
+
+ return 0;
+}
+
+static void reset(struct fbtft_par *par)
+{
+ if (par->gpio.reset == -1)
+ return;
+
+ fbtft_dev_dbg(DEBUG_RESET, par, par->info->device, "%s()\n", __func__);
+
+ gpio_set_value(par->gpio.reset, 0);
+ udelay(20);
+ gpio_set_value(par->gpio.reset, 1);
+ mdelay(120);
+}
+
+/* Check if all necessary GPIOS defined */
+static int verify_gpios(struct fbtft_par *par)
+{
+ int i;
+
+ fbtft_dev_dbg(DEBUG_VERIFY_GPIOS, par, par->info->device,
+ "%s()\n", __func__);
+
+ if (par->EPIN < 0) {
+ dev_err(par->info->device,
+ "Missing info about 'wr' (aka E) gpio. Aborting.\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < 8; ++i) {
+ if (par->gpio.db[i] < 0) {
+ dev_err(par->info->device,
+ "Missing info about 'db[%i]' gpio. Aborting.\n",
+ i);
+ return -EINVAL;
+ }
+ }
+ if (par->CS0 < 0) {
+ dev_err(par->info->device,
+ "Missing info about 'cs0' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+ if (par->CS1 < 0) {
+ dev_err(par->info->device,
+ "Missing info about 'cs1' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+ if (par->RW < 0) {
+ dev_err(par->info->device,
+ "Missing info about 'rw' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long
+request_gpios_match(struct fbtft_par *par, const struct fbtft_gpio *gpio)
+{
+ fbtft_dev_dbg(DEBUG_REQUEST_GPIOS_MATCH, par, par->info->device,
+ "%s('%s')\n", __func__, gpio->name);
+
+ if (strcasecmp(gpio->name, "wr") == 0) {
+ /* left ks0108 E pin */
+ par->EPIN = gpio->gpio;
+ return GPIOF_OUT_INIT_LOW;
+ } else if (strcasecmp(gpio->name, "cs0") == 0) {
+ /* left ks0108 controller pin */
+ par->CS0 = gpio->gpio;
+ return GPIOF_OUT_INIT_HIGH;
+ } else if (strcasecmp(gpio->name, "cs1") == 0) {
+ /* right ks0108 controller pin */
+ par->CS1 = gpio->gpio;
+ return GPIOF_OUT_INIT_HIGH;
+ }
+
+ /* if write (rw = 0) e(1->0) perform write */
+ /* if read (rw = 1) e(0->1) set data on D0-7*/
+ else if (strcasecmp(gpio->name, "rw") == 0) {
+ par->RW = gpio->gpio;
+ return GPIOF_OUT_INIT_LOW;
+ }
+
+ return FBTFT_GPIO_NO_MATCH;
+}
+
+/* This function oses to enter commands
+ * first byte - destination controller 0 or 1
+ * folowing - commands
+ */
+static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+{
+ va_list args;
+ int i, ret;
+ u8 *buf = (u8 *)par->buf;
+
+ if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) {
+ va_start(args, len);
+ for (i = 0; i < len; i++)
+ buf[i] = (u8)va_arg(args, unsigned int);
+
+ va_end(args);
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
+ par->info->device, u8, buf, len, "%s: ", __func__);
+ }
+
+ va_start(args, len);
+
+ *buf = (u8)va_arg(args, unsigned int);
+
+ if (*buf > 1) {
+ va_end(args);
+ dev_err(par->info->device, "%s: Incorrect chip sellect request (%d)\n",
+ __func__, *buf);
+ return;
+ }
+
+ /* select chip */
+ if (*buf) {
+ /* cs1 */
+ gpio_set_value(par->CS0, 1);
+ gpio_set_value(par->CS1, 0);
+ } else {
+ /* cs0 */
+ gpio_set_value(par->CS0, 0);
+ gpio_set_value(par->CS1, 1);
+ }
+
+ gpio_set_value(par->RS, 0); /* RS->0 (command mode) */
+ len--;
+
+ if (len) {
+ i = len;
+ while (i--)
+ *buf++ = (u8)va_arg(args, unsigned int);
+ ret = par->fbtftops.write(par, par->buf, len * (sizeof(u8)));
+ if (ret < 0) {
+ va_end(args);
+ dev_err(par->info->device, "%s: write() failed and returned %d\n",
+ __func__, ret);
+ return;
+ }
+ }
+
+ va_end(args);
+}
+
+static struct
+{
+ int xs, ys_page, xe, ye_page;
+} addr_win;
+
+/* save display writing zone */
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ addr_win.xs = xs;
+ addr_win.ys_page = ys / 8;
+ addr_win.xe = xe;
+ addr_win.ye_page = ye / 8;
+
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys_page=%d, xe=%d, ye_page=%d)\n", __func__,
+ addr_win.xs, addr_win.ys_page, addr_win.xe, addr_win.ye_page);
+}
+
+static void
+construct_line_bitmap(struct fbtft_par *par, u8 *dest, signed short *src,
+ int xs, int xe, int y)
+{
+ int x, i;
+
+ for (x = xs; x < xe; ++x) {
+ u8 res = 0;
+
+ for (i = 0; i < 8; i++)
+ if (src[(y * 8 + i) * par->info->var.xres + x])
+ res |= 1 << i;
+#ifdef NEGATIVE
+ *dest++ = res;
+#else
+ *dest++ = ~res;
+#endif
+ }
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_base;
+ u8 *buf = par->txbuf.buf;
+ int x, y;
+ int ret = 0;
+
+ /* buffer to convert RGB565 -> grayscale16 -> Ditherd image 1bpp */
+ signed short *convert_buf = kmalloc(par->info->var.xres *
+ par->info->var.yres * sizeof(signed short), GFP_NOIO);
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+ /* converting to grayscale16 */
+ for (x = 0; x < par->info->var.xres; ++x)
+ for (y = 0; y < par->info->var.yres; ++y) {
+ u16 pixel = vmem16[y * par->info->var.xres + x];
+ u16 b = pixel & 0x1f;
+ u16 g = (pixel & (0x3f << 5)) >> 5;
+ u16 r = (pixel & (0x1f << (5 + 6))) >> (5 + 6);
+
+ pixel = (299 * r + 587 * g + 114 * b) / 200;
+ if (pixel > 255)
+ pixel = 255;
+
+ /* gamma-correction by table */
+ convert_buf[y * par->info->var.xres + x] =
+ (signed short)gamma_correction_table[pixel];
+ }
+
+ /* Image Dithering */
+ for (x = 0; x < par->info->var.xres; ++x)
+ for (y = 0; y < par->info->var.yres; ++y) {
+ signed short pixel =
+ convert_buf[y * par->info->var.xres + x];
+ signed short error_b = pixel - BLACK;
+ signed short error_w = pixel - WHITE;
+ signed short error;
+ u16 i, j;
+
+ /* what color close? */
+ if (abs(error_b) >= abs(error_w)) {
+ /* white */
+ error = error_w;
+ pixel = 0xff;
+ } else {
+ /* black */
+ error = error_b;
+ pixel = 0;
+ }
+
+ error /= 8;
+
+ /* diffusion matrix row */
+ for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
+ /* diffusion matrix column */
+ for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
+ signed short *write_pos;
+ signed char coeff;
+
+ /* skip pixels out of zone */
+ if (x + i < 0 ||
+ x + i >= par->info->var.xres
+ || y + j >= par->info->var.yres)
+ continue;
+ write_pos = &convert_buf[
+ (y + j) * par->info->var.xres +
+ x + i];
+ coeff = diffusing_matrix[i][j];
+ if (coeff == -1)
+ /* pixel itself */
+ *write_pos = pixel;
+ else {
+ signed short p = *write_pos +
+ error * coeff;
+
+ if (p > WHITE)
+ p = WHITE;
+ if (p < BLACK)
+ p = BLACK;
+ *write_pos = p;
+ }
+ }
+ }
+
+ /* 1 string = 2 pages */
+ for (y = addr_win.ys_page; y <= addr_win.ye_page; ++y) {
+ /* left half of display */
+ if (addr_win.xs < par->info->var.xres / 2) {
+ construct_line_bitmap(par, buf, convert_buf,
+ addr_win.xs, par->info->var.xres / 2, y);
+
+ len = par->info->var.xres / 2 - addr_win.xs;
+
+ /* select left side (sc0)
+ * set addr
+ */
+ write_reg(par, 0x00, (1 << 6) | (u8)addr_win.xs);
+ write_reg(par, 0x00, (0x17 << 3) | (u8)y);
+
+ /* write bitmap */
+ gpio_set_value(par->RS, 1); /* RS->1 (data mode) */
+ ret = par->fbtftops.write(par, buf, len);
+ if (ret < 0)
+ dev_err(par->info->device,
+ "%s: write failed and returned: %d\n",
+ __func__, ret);
+ }
+ /* right half of display */
+ if (addr_win.xe >= par->info->var.xres / 2) {
+ construct_line_bitmap(par, buf,
+ convert_buf, par->info->var.xres / 2,
+ addr_win.xe + 1, y);
+
+ len = addr_win.xe + 1 - par->info->var.xres / 2;
+
+ /* select right side (sc1)
+ * set addr
+ */
+ write_reg(par, 0x01, (1 << 6));
+ write_reg(par, 0x01, (0x17 << 3) | (u8)y);
+
+ /* write bitmap */
+ gpio_set_value(par->RS, 1); /* RS->1 (data mode) */
+ par->fbtftops.write(par, buf, len);
+ if (ret < 0)
+ dev_err(par->info->device,
+ "%s: write failed and returned: %d\n",
+ __func__, ret);
+ }
+ }
+ kfree(convert_buf);
+
+ gpio_set_value(par->CS0, 1);
+ gpio_set_value(par->CS1, 1);
+
+ return ret;
+}
+
+static int write(struct fbtft_par *par, void *buf, size_t len)
+{
+ fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
+ "%s(len=%d): ", __func__, len);
+
+ gpio_set_value(par->RW, 0); /* set write mode */
+
+
+ while (len--) {
+ u8 i, data;
+
+ data = *(u8 *) buf++;
+
+ /* set data bus */
+ for (i = 0; i < 8; ++i)
+ gpio_set_value(par->gpio.db[i], data & (1 << i));
+ /* set E */
+ gpio_set_value(par->EPIN, 1);
+ udelay(5);
+ /* unset E - write */
+ gpio_set_value(par->EPIN, 0);
+ udelay(1);
+ }
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = TOTALWIDTH,
+ .height = HEIGHT,
+ .fps = FPS,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .verify_gpios = verify_gpios,
+ .request_gpios_match = request_gpios_match,
+ .reset = reset,
+ .write = write,
+ .write_register = write_reg8_bus8,
+ .write_vmem = write_vmem,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "displaytronic,fb_agm1264k-fl", &display);
+
+MODULE_ALIAS("platform:" DRVNAME);
+
+MODULE_DESCRIPTION("Two KS0108 LCD controllers in AGM1264K-FL display");
+MODULE_AUTHOR("ololoshka2871");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
new file mode 100644
index 000000000000..7e00c609c7fe
--- /dev/null
+++ b/drivers/staging/fbtft/fb_bd663474.c
@@ -0,0 +1,193 @@
+/*
+ * FB driver for the uPD161704 LCD Controller
+ *
+ * Copyright (C) 2014 Seong-Woo Kim
+ *
+ * Based on fb_ili9325.c by Noralf Tronnes
+ * Based on ili9325.c by Jeroen Domburg
+ * Init code from UTFT library by Henning Karlsen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_bd663474"
+#define WIDTH 240
+#define HEIGHT 320
+#define BPP 16
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ if (par->gpio.cs != -1)
+ gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ par->fbtftops.reset(par);
+
+ /* Initialization sequence from Lib_UTFT */
+
+ /* oscillator start */
+ write_reg(par, 0x000,0x0001); /*oscillator 0: stop, 1: operation */
+ mdelay(10);
+
+ /* Power settings */
+ write_reg(par, 0x100, 0x0000 ); /* power supply setup */
+ write_reg(par, 0x101, 0x0000 );
+ write_reg(par, 0x102, 0x3110 );
+ write_reg(par, 0x103, 0xe200 );
+ write_reg(par, 0x110, 0x009d );
+ write_reg(par, 0x111, 0x0022 );
+ write_reg(par, 0x100, 0x0120 );
+ mdelay( 20 );
+
+ write_reg(par, 0x100, 0x3120 );
+ mdelay( 80 );
+ /* Display control */
+ write_reg(par, 0x001, 0x0100 );
+ write_reg(par, 0x002, 0x0000 );
+ write_reg(par, 0x003, 0x1230 );
+ write_reg(par, 0x006, 0x0000 );
+ write_reg(par, 0x007, 0x0101 );
+ write_reg(par, 0x008, 0x0808 );
+ write_reg(par, 0x009, 0x0000 );
+ write_reg(par, 0x00b, 0x0000 );
+ write_reg(par, 0x00c, 0x0000 );
+ write_reg(par, 0x00d, 0x0018 );
+ /* LTPS control settings */
+ write_reg(par, 0x012, 0x0000 );
+ write_reg(par, 0x013, 0x0000 );
+ write_reg(par, 0x018, 0x0000 );
+ write_reg(par, 0x019, 0x0000 );
+
+ write_reg(par, 0x203, 0x0000 );
+ write_reg(par, 0x204, 0x0000 );
+
+ write_reg(par, 0x210, 0x0000 );
+ write_reg(par, 0x211, 0x00ef );
+ write_reg(par, 0x212, 0x0000 );
+ write_reg(par, 0x213, 0x013f );
+ write_reg(par, 0x214, 0x0000 );
+ write_reg(par, 0x215, 0x0000 );
+ write_reg(par, 0x216, 0x0000 );
+ write_reg(par, 0x217, 0x0000 );
+
+ /* Gray scale settings */
+ write_reg(par, 0x300, 0x5343);
+ write_reg(par, 0x301, 0x1021);
+ write_reg(par, 0x302, 0x0003);
+ write_reg(par, 0x303, 0x0011);
+ write_reg(par, 0x304, 0x050a);
+ write_reg(par, 0x305, 0x4342);
+ write_reg(par, 0x306, 0x1100);
+ write_reg(par, 0x307, 0x0003);
+ write_reg(par, 0x308, 0x1201);
+ write_reg(par, 0x309, 0x050a);
+
+ /* RAM access settings */
+ write_reg(par, 0x400, 0x4027 );
+ write_reg(par, 0x401, 0x0000 );
+ write_reg(par, 0x402, 0x0000 ); /* First screen drive position (1) */
+ write_reg(par, 0x403, 0x013f ); /* First screen drive position (2) */
+ write_reg(par, 0x404, 0x0000 );
+
+ write_reg(par, 0x200, 0x0000 );
+ write_reg(par, 0x201, 0x0000 );
+ write_reg(par, 0x100, 0x7120 );
+ write_reg(par, 0x007, 0x0103 );
+ mdelay( 10 );
+ write_reg(par, 0x007, 0x0113 );
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+ switch (par->info->var.rotate) {
+ /* R200h = Horizontal GRAM Start Address */
+ /* R201h = Vertical GRAM Start Address */
+ case 0:
+ write_reg(par, 0x0200, xs);
+ write_reg(par, 0x0201, ys);
+ break;
+ case 180:
+ write_reg(par, 0x0200, WIDTH - 1 - xs);
+ write_reg(par, 0x0201, HEIGHT - 1 - ys);
+ break;
+ case 270:
+ write_reg(par, 0x0200, WIDTH - 1 - ys);
+ write_reg(par, 0x0201, xs);
+ break;
+ case 90:
+ write_reg(par, 0x0200, ys);
+ write_reg(par, 0x0201, HEIGHT - 1 - xs);
+ break;
+ }
+ write_reg(par, 0x202); /* Write Data to GRAM */
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ /* AM: GRAM update direction */
+ case 0:
+ write_reg(par, 0x003, 0x1230);
+ break;
+ case 180:
+ write_reg(par, 0x003, 0x1200);
+ break;
+ case 270:
+ write_reg(par, 0x003, 0x1228);
+ break;
+ case 90:
+ write_reg(par, 0x003, 0x1218);
+ break;
+ }
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 16,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .bpp = BPP,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "hitachi,bd663474", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:bd663474");
+MODULE_ALIAS("platform:bd663474");
+
+MODULE_DESCRIPTION("FB driver for the uPD161704 LCD Controller");
+MODULE_AUTHOR("Seong-Woo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_hx8340bn.c b/drivers/staging/fbtft/fb_hx8340bn.c
new file mode 100644
index 000000000000..3939502f2c81
--- /dev/null
+++ b/drivers/staging/fbtft/fb_hx8340bn.c
@@ -0,0 +1,229 @@
+/*
+ * FB driver for the HX8340BN LCD Controller
+ *
+ * This display uses 9-bit SPI: Data/Command bit + 8 data bits
+ * For platforms that doesn't support 9-bit, the driver is capable
+ * of emulating this using 8-bit transfer.
+ * This is done by transfering eight 9-bit words in 9 bytes.
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_hx8340bn"
+#define WIDTH 176
+#define HEIGHT 220
+#define TXBUFLEN (4 * PAGE_SIZE)
+#define DEFAULT_GAMMA "1 3 0E 5 0 2 09 0 6 1 7 1 0 2 2\n" \
+ "3 3 17 8 4 7 05 7 6 0 3 1 6 0 0 "
+
+
+static bool emulate;
+module_param(emulate, bool, 0);
+MODULE_PARM_DESC(emulate, "Force emulation in 9-bit mode");
+
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ /* BTL221722-276L startup sequence, from datasheet */
+
+ /* SETEXTCOM: Set extended command set (C1h)
+ This command is used to set extended command set access enable.
+ Enable: After command (C1h), must write: ffh,83h,40h */
+ write_reg(par, 0xC1, 0xFF, 0x83, 0x40);
+
+ /* Sleep out
+ This command turns off sleep mode.
+ In this mode the DC/DC converter is enabled, Internal oscillator
+ is started, and panel scanning is started. */
+ write_reg(par, 0x11);
+ mdelay(150);
+
+ /* Undoc'd register? */
+ write_reg(par, 0xCA, 0x70, 0x00, 0xD9);
+
+ /* SETOSC: Set Internal Oscillator (B0h)
+ This command is used to set internal oscillator related settings */
+ /* OSC_EN: Enable internal oscillator */
+ /* Internal oscillator frequency: 125% x 2.52MHz */
+ write_reg(par, 0xB0, 0x01, 0x11);
+
+ /* Drive ability setting */
+ write_reg(par, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06);
+ mdelay(20);
+
+ /* SETPWCTR5: Set Power Control 5(B5h)
+ This command is used to set VCOM Low and VCOM High Voltage */
+ /* VCOMH 0110101 : 3.925 */
+ /* VCOML 0100000 : -1.700 */
+ /* 45h=69 VCOMH: "VMH" + 5d VCOML: "VMH" + 5d */
+ write_reg(par, 0xB5, 0x35, 0x20, 0x45);
+
+ /* SETPWCTR4: Set Power Control 4(B4h)
+ VRH[4:0]: Specify the VREG1 voltage adjusting.
+ VREG1 voltage is for gamma voltage setting.
+ BT[2:0]: Switch the output factor of step-up circuit 2
+ for VGH and VGL voltage generation. */
+ write_reg(par, 0xB4, 0x33, 0x25, 0x4C);
+ mdelay(10);
+
+ /* Interface Pixel Format (3Ah)
+ This command is used to define the format of RGB picture data,
+ which is to be transfer via the system and RGB interface. */
+ /* RGB interface: 16 Bit/Pixel */
+ write_reg(par, 0x3A, 0x05);
+
+ /* Display on (29h)
+ This command is used to recover from DISPLAY OFF mode.
+ Output from the Frame Memory is enabled. */
+ write_reg(par, 0x29);
+ mdelay(10);
+
+ return 0;
+}
+
+void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ write_reg(par, FBTFT_CASET, 0x00, xs, 0x00, xe);
+ write_reg(par, FBTFT_RASET, 0x00, ys, 0x00, ye);
+ write_reg(par, FBTFT_RAMWR);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* MADCTL - Memory data access control */
+ /* RGB/BGR can be set with H/W pin SRGB and MADCTL BGR bit */
+#define MY (1 << 7)
+#define MX (1 << 6)
+#define MV (1 << 5)
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x36, (par->bgr << 3));
+ break;
+ case 270:
+ write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ break;
+ case 180:
+ write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ break;
+ case 90:
+ write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ Gamma Curve selection, GC (only GC0 can be customized):
+ 0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0
+ Gamma string format:
+ OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1
+ ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX GC
+*/
+#define CURVE(num, idx) curves[num*par->gamma.num_values + idx]
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ unsigned long mask[] = {
+ 0b1111, 0b1111, 0b11111, 0b1111, 0b1111, 0b1111, 0b11111,
+ 0b111, 0b111, 0b111, 0b111, 0b111, 0b111, 0b11, 0b11,
+ 0b1111, 0b1111, 0b11111, 0b1111, 0b1111, 0b1111, 0b11111,
+ 0b111, 0b111, 0b111, 0b111, 0b111, 0b111, 0b0, 0b0 };
+ int i, j;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ for (i = 0; i < par->gamma.num_curves; i++)
+ for (j = 0; j < par->gamma.num_values; j++)
+ CURVE(i, j) &= mask[i * par->gamma.num_values + j];
+
+ write_reg(par, 0x26, 1 << CURVE(1, 14)); /* Gamma Set (26h) */
+
+ if (CURVE(1, 14))
+ return 0; /* only GC0 can be customized */
+
+ write_reg(par, 0xC2,
+ (CURVE(0, 8) << 4) | CURVE(0, 7),
+ (CURVE(0, 10) << 4) | CURVE(0, 9),
+ (CURVE(0, 12) << 4) | CURVE(0, 11),
+ CURVE(0, 2),
+ (CURVE(0, 4) << 4) | CURVE(0, 3),
+ CURVE(0, 5),
+ CURVE(0, 6),
+ (CURVE(0, 1) << 4) | CURVE(0, 0),
+ (CURVE(0, 14) << 2) | CURVE(0, 13));
+
+ write_reg(par, 0xC3,
+ (CURVE(1, 8) << 4) | CURVE(1, 7),
+ (CURVE(1, 10) << 4) | CURVE(1, 9),
+ (CURVE(1, 12) << 4) | CURVE(1, 11),
+ CURVE(1, 2),
+ (CURVE(1, 4) << 4) | CURVE(1, 3),
+ CURVE(1, 5),
+ CURVE(1, 6),
+ (CURVE(1, 1) << 4) | CURVE(1, 0));
+
+ mdelay(10);
+
+ return 0;
+}
+#undef CURVE
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = TXBUFLEN,
+ .gamma_num = 2,
+ .gamma_len = 15,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "himax,hx8340bn", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:hx8340bn");
+MODULE_ALIAS("platform:hx8340bn");
+
+MODULE_DESCRIPTION("FB driver for the HX8340BN LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c
new file mode 100644
index 000000000000..8139a8f587b7
--- /dev/null
+++ b/drivers/staging/fbtft/fb_hx8347d.c
@@ -0,0 +1,181 @@
+/*
+ * FB driver for the HX8347D LCD Controller
+ *
+ * Copyright (C) 2013 Christian Vogelgsang
+ *
+ * Based on driver code found here: https://github.com/watterott/r61505u-Adapter
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_hx8347d"
+#define WIDTH 320
+#define HEIGHT 240
+#define DEFAULT_GAMMA "0 0 0 0 0 0 0 0 0 0 0 0 0 0\n" \
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0"
+
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ /* driving ability */
+ write_reg(par, 0xEA, 0x00);
+ write_reg(par, 0xEB, 0x20);
+ write_reg(par, 0xEC, 0x0C);
+ write_reg(par, 0xED, 0xC4);
+ write_reg(par, 0xE8, 0x40);
+ write_reg(par, 0xE9, 0x38);
+ write_reg(par, 0xF1, 0x01);
+ write_reg(par, 0xF2, 0x10);
+ write_reg(par, 0x27, 0xA3);
+
+ /* power voltage */
+ write_reg(par, 0x1B, 0x1B);
+ write_reg(par, 0x1A, 0x01);
+ write_reg(par, 0x24, 0x2F);
+ write_reg(par, 0x25, 0x57);
+
+ /* VCOM offset */
+ write_reg(par, 0x23, 0x8D); /* for flicker adjust */
+
+ /* power on */
+ write_reg(par, 0x18, 0x36);
+ write_reg(par, 0x19, 0x01); /* start osc */
+ write_reg(par, 0x01, 0x00); /* wakeup */
+ write_reg(par, 0x1F, 0x88);
+ mdelay(5);
+ write_reg(par, 0x1F, 0x80);
+ mdelay(5);
+ write_reg(par, 0x1F, 0x90);
+ mdelay(5);
+ write_reg(par, 0x1F, 0xD0);
+ mdelay(5);
+
+ /* color selection */
+ write_reg(par, 0x17, 0x05); /* 65k */
+
+ /*panel characteristic */
+ write_reg(par, 0x36, 0x00);
+
+ /*display on */
+ write_reg(par, 0x28, 0x38);
+ mdelay(40);
+ write_reg(par, 0x28, 0x3C);
+
+ /* orientation */
+ write_reg(par, 0x16, 0x60 | (par->bgr << 3));
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ write_reg(par, 0x02, (xs >> 8) & 0xFF);
+ write_reg(par, 0x03, xs & 0xFF);
+ write_reg(par, 0x04, (xe >> 8) & 0xFF);
+ write_reg(par, 0x05, xe & 0xFF);
+ write_reg(par, 0x06, (ys >> 8) & 0xFF);
+ write_reg(par, 0x07, ys & 0xFF);
+ write_reg(par, 0x08, (ye >> 8) & 0xFF);
+ write_reg(par, 0x09, ye & 0xFF);
+ write_reg(par, 0x22);
+}
+
+/*
+ Gamma string format:
+ VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
+ VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM
+*/
+#define CURVE(num, idx) curves[num*par->gamma.num_values + idx]
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ unsigned long mask[] = {
+ 0b111111, 0b111111, 0b111111, 0b111111, 0b111111, 0b111111,
+ 0b1111111, 0b1111111,
+ 0b11111, 0b11111, 0b11111, 0b11111, 0b11111,
+ 0b1111};
+ int i, j;
+ int acc = 0;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ for (i = 0; i < par->gamma.num_curves; i++)
+ for (j = 0; j < par->gamma.num_values; j++) {
+ acc += CURVE(i, j);
+ CURVE(i, j) &= mask[j];
+ }
+
+ if (acc == 0) /* skip if all values are zero */
+ return 0;
+
+ for (i = 0; i < par->gamma.num_curves; i++) {
+ write_reg(par, 0x40 + (i * 0x10), CURVE(i, 0));
+ write_reg(par, 0x41 + (i * 0x10), CURVE(i, 1));
+ write_reg(par, 0x42 + (i * 0x10), CURVE(i, 2));
+ write_reg(par, 0x43 + (i * 0x10), CURVE(i, 3));
+ write_reg(par, 0x44 + (i * 0x10), CURVE(i, 4));
+ write_reg(par, 0x45 + (i * 0x10), CURVE(i, 5));
+ write_reg(par, 0x46 + (i * 0x10), CURVE(i, 6));
+ write_reg(par, 0x47 + (i * 0x10), CURVE(i, 7));
+ write_reg(par, 0x48 + (i * 0x10), CURVE(i, 8));
+ write_reg(par, 0x49 + (i * 0x10), CURVE(i, 9));
+ write_reg(par, 0x4A + (i * 0x10), CURVE(i, 10));
+ write_reg(par, 0x4B + (i * 0x10), CURVE(i, 11));
+ write_reg(par, 0x4C + (i * 0x10), CURVE(i, 12));
+ }
+ write_reg(par, 0x5D, (CURVE(1, 0) << 4) | CURVE(0, 0));
+
+ return 0;
+}
+#undef CURVE
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .gamma_num = 2,
+ .gamma_len = 14,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "himax,hx8347d", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:hx8347d");
+MODULE_ALIAS("platform:hx8347d");
+
+MODULE_DESCRIPTION("FB driver for the HX8347D LCD Controller");
+MODULE_AUTHOR("Christian Vogelgsang");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_hx8353d.c b/drivers/staging/fbtft/fb_hx8353d.c
new file mode 100644
index 000000000000..c9512dc5f4d3
--- /dev/null
+++ b/drivers/staging/fbtft/fb_hx8353d.c
@@ -0,0 +1,166 @@
+/*
+ * FB driver for the HX8353D LCD Controller
+ *
+ * Copyright (c) 2014 Petr Olivka
+ * Copyright (c) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_hx8353d"
+#define DEFAULT_GAMMA "50 77 40 08 BF 00 03 0F 00 01 73 00 72 03 B0 0F 08 00 0F"
+
+static int init_display(struct fbtft_par *par)
+{
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+ mdelay(150);
+
+ /* SETEXTC */
+ write_reg(par, 0xB9, 0xFF, 0x83, 0x53);
+
+ /* RADJ */
+ write_reg(par, 0xB0, 0x3C, 0x01);
+
+ /* VCOM */
+ write_reg(par, 0xB6, 0x94, 0x6C, 0x50);
+
+ /* PWR */
+ write_reg(par, 0xB1, 0x00, 0x01, 0x1B, 0x03, 0x01, 0x08, 0x77, 0x89);
+
+ /* COLMOD */
+ write_reg(par, 0x3A, 0x05);
+
+ /* MEM ACCESS */
+ write_reg(par, 0x36, 0xC0);
+
+ /* SLPOUT - Sleep out & booster on */
+ write_reg(par, 0x11);
+ mdelay(150);
+
+ /* DISPON - Display On */
+ write_reg(par, 0x29);
+
+ /* RGBSET */
+ write_reg(par, 0x2D,
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
+ 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
+ 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62);
+
+ return 0;
+};
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* column address */
+ write_reg(par, 0x2a, xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+
+ /* row adress */
+ write_reg(par, 0x2b, ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
+
+ /* memory write */
+ write_reg(par, 0x2c);
+}
+
+#define my (1 << 7)
+#define mx (1 << 6)
+#define mv (1 << 5)
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* madctl - memory data access control
+ rgb/bgr:
+ 1. mode selection pin srgb
+ rgb h/w pin for color filter setting: 0=rgb, 1=bgr
+ 2. madctl rgb bit
+ rgb-bgr order color filter panel: 0=rgb, 1=bgr */
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x36, mx | my | (par->bgr << 3));
+ break;
+ case 270:
+ write_reg(par, 0x36, my | mv | (par->bgr << 3));
+ break;
+ case 180:
+ write_reg(par, 0x36, (par->bgr << 3));
+ break;
+ case 90:
+ write_reg(par, 0x36, mx | mv | (par->bgr << 3));
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ gamma string format:
+*/
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ write_reg(par, 0xE0,
+ curves[0], curves[1], curves[2], curves[3],
+ curves[4], curves[5], curves[6], curves[7],
+ curves[8], curves[9], curves[10], curves[11],
+ curves[12], curves[13], curves[14], curves[15],
+ curves[16], curves[17], curves[18]);
+
+ return 0;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = 128,
+ .height = 160,
+ .gamma_num = 1,
+ .gamma_len = 19,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "himax,hx8353d", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:hx8353d");
+MODULE_ALIAS("platform:hx8353d");
+
+MODULE_DESCRIPTION("FB driver for the HX8353D LCD Controller");
+MODULE_AUTHOR("Petr Olivka");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
new file mode 100644
index 000000000000..b26d89368da7
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -0,0 +1,234 @@
+/*
+ * FB driver for the ILI9320 LCD Controller
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ili9320"
+#define WIDTH 240
+#define HEIGHT 320
+#define DEFAULT_GAMMA "07 07 6 0 0 0 5 5 4 0\n" \
+ "07 08 4 7 5 1 2 0 7 7"
+
+
+static unsigned read_devicecode(struct fbtft_par *par)
+{
+ int ret;
+ u8 rxbuf[8] = {0, };
+
+ write_reg(par, 0x0000);
+ ret = par->fbtftops.read(par, rxbuf, 4);
+ return (rxbuf[2] << 8) | rxbuf[3];
+}
+
+static int init_display(struct fbtft_par *par)
+{
+ unsigned devcode;
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ devcode = read_devicecode(par);
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n",
+ devcode);
+ if ((devcode != 0x0000) && (devcode != 0x9320))
+ dev_warn(par->info->device,
+ "Unrecognized Device code: 0x%04X (expected 0x9320)\n",
+ devcode);
+
+ /* Initialization sequence from ILI9320 Application Notes */
+
+ /* *********** Start Initial Sequence ********* */
+ write_reg(par, 0x00E5, 0x8000); /* Set the Vcore voltage and this setting is must. */
+ write_reg(par, 0x0000, 0x0001); /* Start internal OSC. */
+ write_reg(par, 0x0001, 0x0100); /* set SS and SM bit */
+ write_reg(par, 0x0002, 0x0700); /* set 1 line inversion */
+ write_reg(par, 0x0004, 0x0000); /* Resize register */
+ write_reg(par, 0x0008, 0x0202); /* set the back and front porch */
+ write_reg(par, 0x0009, 0x0000); /* set non-display area refresh cycle */
+ write_reg(par, 0x000A, 0x0000); /* FMARK function */
+ write_reg(par, 0x000C, 0x0000); /* RGB interface setting */
+ write_reg(par, 0x000D, 0x0000); /* Frame marker Position */
+ write_reg(par, 0x000F, 0x0000); /* RGB interface polarity */
+
+ /* ***********Power On sequence *************** */
+ write_reg(par, 0x0010, 0x0000); /* SAP, BT[3:0], AP, DSTB, SLP, STB */
+ write_reg(par, 0x0011, 0x0007); /* DC1[2:0], DC0[2:0], VC[2:0] */
+ write_reg(par, 0x0012, 0x0000); /* VREG1OUT voltage */
+ write_reg(par, 0x0013, 0x0000); /* VDV[4:0] for VCOM amplitude */
+ mdelay(200); /* Dis-charge capacitor power voltage */
+ write_reg(par, 0x0010, 0x17B0); /* SAP, BT[3:0], AP, DSTB, SLP, STB */
+ write_reg(par, 0x0011, 0x0031); /* R11h=0x0031 at VCI=3.3V DC1[2:0], DC0[2:0], VC[2:0] */
+ mdelay(50);
+ write_reg(par, 0x0012, 0x0138); /* R12h=0x0138 at VCI=3.3V VREG1OUT voltage */
+ mdelay(50);
+ write_reg(par, 0x0013, 0x1800); /* R13h=0x1800 at VCI=3.3V VDV[4:0] for VCOM amplitude */
+ write_reg(par, 0x0029, 0x0008); /* R29h=0x0008 at VCI=3.3V VCM[4:0] for VCOMH */
+ mdelay(50);
+ write_reg(par, 0x0020, 0x0000); /* GRAM horizontal Address */
+ write_reg(par, 0x0021, 0x0000); /* GRAM Vertical Address */
+
+ /* ------------------ Set GRAM area --------------- */
+ write_reg(par, 0x0050, 0x0000); /* Horizontal GRAM Start Address */
+ write_reg(par, 0x0051, 0x00EF); /* Horizontal GRAM End Address */
+ write_reg(par, 0x0052, 0x0000); /* Vertical GRAM Start Address */
+ write_reg(par, 0x0053, 0x013F); /* Vertical GRAM Start Address */
+ write_reg(par, 0x0060, 0x2700); /* Gate Scan Line */
+ write_reg(par, 0x0061, 0x0001); /* NDL,VLE, REV */
+ write_reg(par, 0x006A, 0x0000); /* set scrolling line */
+
+ /* -------------- Partial Display Control --------- */
+ write_reg(par, 0x0080, 0x0000);
+ write_reg(par, 0x0081, 0x0000);
+ write_reg(par, 0x0082, 0x0000);
+ write_reg(par, 0x0083, 0x0000);
+ write_reg(par, 0x0084, 0x0000);
+ write_reg(par, 0x0085, 0x0000);
+
+ /* -------------- Panel Control ------------------- */
+ write_reg(par, 0x0090, 0x0010);
+ write_reg(par, 0x0092, 0x0000);
+ write_reg(par, 0x0093, 0x0003);
+ write_reg(par, 0x0095, 0x0110);
+ write_reg(par, 0x0097, 0x0000);
+ write_reg(par, 0x0098, 0x0000);
+ write_reg(par, 0x0007, 0x0173); /* 262K color and display ON */
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ switch (par->info->var.rotate) {
+ /* R20h = Horizontal GRAM Start Address */
+ /* R21h = Vertical GRAM Start Address */
+ case 0:
+ write_reg(par, 0x0020, xs);
+ write_reg(par, 0x0021, ys);
+ break;
+ case 180:
+ write_reg(par, 0x0020, WIDTH - 1 - xs);
+ write_reg(par, 0x0021, HEIGHT - 1 - ys);
+ break;
+ case 270:
+ write_reg(par, 0x0020, WIDTH - 1 - ys);
+ write_reg(par, 0x0021, xs);
+ break;
+ case 90:
+ write_reg(par, 0x0020, ys);
+ write_reg(par, 0x0021, HEIGHT - 1 - xs);
+ break;
+ }
+ write_reg(par, 0x0022); /* Write Data to GRAM */
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x3, (par->bgr << 12) | 0x30);
+ break;
+ case 270:
+ write_reg(par, 0x3, (par->bgr << 12) | 0x28);
+ break;
+ case 180:
+ write_reg(par, 0x3, (par->bgr << 12) | 0x00);
+ break;
+ case 90:
+ write_reg(par, 0x3, (par->bgr << 12) | 0x18);
+ break;
+ }
+ return 0;
+}
+
+/*
+ Gamma string format:
+ VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
+ VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
+*/
+#define CURVE(num, idx) curves[num*par->gamma.num_values + idx]
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ unsigned long mask[] = {
+ 0b11111, 0b11111, 0b111, 0b111, 0b111,
+ 0b111, 0b111, 0b111, 0b111, 0b111,
+ 0b11111, 0b11111, 0b111, 0b111, 0b111,
+ 0b111, 0b111, 0b111, 0b111, 0b111 };
+ int i, j;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 10; j++)
+ CURVE(i, j) &= mask[i*par->gamma.num_values + j];
+
+ write_reg(par, 0x0030, CURVE(0, 5) << 8 | CURVE(0, 4));
+ write_reg(par, 0x0031, CURVE(0, 7) << 8 | CURVE(0, 6));
+ write_reg(par, 0x0032, CURVE(0, 9) << 8 | CURVE(0, 8));
+ write_reg(par, 0x0035, CURVE(0, 3) << 8 | CURVE(0, 2));
+ write_reg(par, 0x0036, CURVE(0, 1) << 8 | CURVE(0, 0));
+
+ write_reg(par, 0x0037, CURVE(1, 5) << 8 | CURVE(1, 4));
+ write_reg(par, 0x0038, CURVE(1, 7) << 8 | CURVE(1, 6));
+ write_reg(par, 0x0039, CURVE(1, 9) << 8 | CURVE(1, 8));
+ write_reg(par, 0x003C, CURVE(1, 3) << 8 | CURVE(1, 2));
+ write_reg(par, 0x003D, CURVE(1, 1) << 8 | CURVE(1, 0));
+
+ return 0;
+}
+#undef CURVE
+
+
+static struct fbtft_display display = {
+ .regwidth = 16,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .gamma_num = 2,
+ .gamma_len = 10,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9320", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ili9320");
+MODULE_ALIAS("platform:ili9320");
+
+MODULE_DESCRIPTION("FB driver for the ILI9320 LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
new file mode 100644
index 000000000000..5f88145fac9b
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -0,0 +1,291 @@
+/*
+ * FB driver for the ILI9325 LCD Controller
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * Based on ili9325.c by Jeroen Domburg
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ili9325"
+#define WIDTH 240
+#define HEIGHT 320
+#define BPP 16
+#define FPS 20
+#define DEFAULT_GAMMA "0F 00 7 2 0 0 6 5 4 1\n" \
+ "04 16 2 7 6 3 2 1 7 7"
+
+
+static unsigned bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
+module_param(bt, uint, 0);
+MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits");
+
+static unsigned vc = 0b011; /* Vci1=Vci*0.80 */
+module_param(vc, uint, 0);
+MODULE_PARM_DESC(vc,
+"Sets the ratio factor of Vci to generate the reference voltages Vci1");
+
+static unsigned vrh = 0b1101; /* VREG1OUT=Vci*1.85 */
+module_param(vrh, uint, 0);
+MODULE_PARM_DESC(vrh,
+"Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
+
+static unsigned vdv = 0b10010; /* VCOMH amplitude=VREG1OUT*0.98 */
+module_param(vdv, uint, 0);
+MODULE_PARM_DESC(vdv,
+"Select the factor of VREG1OUT to set the amplitude of Vcom");
+
+static unsigned vcm = 0b001010; /* VCOMH=VREG1OUT*0.735 */
+module_param(vcm, uint, 0);
+MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
+
+
+/*
+Verify that this configuration is within the Voltage limits
+
+Display module configuration: Vcc = IOVcc = Vci = 3.3V
+
+ Voltages
+----------
+Vci = 3.3
+Vci1 = Vci * 0.80 = 2.64
+DDVDH = Vci1 * 2 = 5.28
+VCL = -Vci1 = -2.64
+VREG1OUT = Vci * 1.85 = 4.88
+VCOMH = VREG1OUT * 0.735 = 3.59
+VCOM amplitude = VREG1OUT * 0.98 = 4.79
+VGH = Vci * 4 = 13.2
+VGL = -Vci * 4 = -13.2
+
+ Limits
+--------
+Power supplies
+1.65 < IOVcc < 3.30 => 1.65 < 3.3 < 3.30
+2.40 < Vcc < 3.30 => 2.40 < 3.3 < 3.30
+2.50 < Vci < 3.30 => 2.50 < 3.3 < 3.30
+
+Source/VCOM power supply voltage
+ 4.50 < DDVDH < 6.0 => 4.50 < 5.28 < 6.0
+-3.0 < VCL < -2.0 => -3.0 < -2.64 < -2.0
+VCI - VCL < 6.0 => 5.94 < 6.0
+
+Gate driver output voltage
+ 10 < VGH < 20 => 10 < 13.2 < 20
+-15 < VGL < -5 => -15 < -13.2 < -5
+VGH - VGL < 32 => 26.4 < 32
+
+VCOM driver output voltage
+VCOMH - VCOML < 6.0 => 4.79 < 6.0
+*/
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ if (par->gpio.cs != -1)
+ gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ bt &= 0b111;
+ vc &= 0b111;
+ vrh &= 0b1111;
+ vdv &= 0b11111;
+ vcm &= 0b111111;
+
+ /* Initialization sequence from ILI9325 Application Notes */
+
+ /* ----------- Start Initial Sequence ----------- */
+ write_reg(par, 0x00E3, 0x3008); /* Set internal timing */
+ write_reg(par, 0x00E7, 0x0012); /* Set internal timing */
+ write_reg(par, 0x00EF, 0x1231); /* Set internal timing */
+ write_reg(par, 0x0001, 0x0100); /* set SS and SM bit */
+ write_reg(par, 0x0002, 0x0700); /* set 1 line inversion */
+ write_reg(par, 0x0004, 0x0000); /* Resize register */
+ write_reg(par, 0x0008, 0x0207); /* set the back porch and front porch */
+ write_reg(par, 0x0009, 0x0000); /* set non-display area refresh cycle */
+ write_reg(par, 0x000A, 0x0000); /* FMARK function */
+ write_reg(par, 0x000C, 0x0000); /* RGB interface setting */
+ write_reg(par, 0x000D, 0x0000); /* Frame marker Position */
+ write_reg(par, 0x000F, 0x0000); /* RGB interface polarity */
+
+ /* ----------- Power On sequence ----------- */
+ write_reg(par, 0x0010, 0x0000); /* SAP, BT[3:0], AP, DSTB, SLP, STB */
+ write_reg(par, 0x0011, 0x0007); /* DC1[2:0], DC0[2:0], VC[2:0] */
+ write_reg(par, 0x0012, 0x0000); /* VREG1OUT voltage */
+ write_reg(par, 0x0013, 0x0000); /* VDV[4:0] for VCOM amplitude */
+ mdelay(200); /* Dis-charge capacitor power voltage */
+ write_reg(par, 0x0010, /* SAP, BT[3:0], AP, DSTB, SLP, STB */
+ (1 << 12) | (bt << 8) | (1 << 7) | (0b001 << 4));
+ write_reg(par, 0x0011, 0x220 | vc); /* DC1[2:0], DC0[2:0], VC[2:0] */
+ mdelay(50); /* Delay 50ms */
+ write_reg(par, 0x0012, vrh); /* Internal reference voltage= Vci; */
+ mdelay(50); /* Delay 50ms */
+ write_reg(par, 0x0013, vdv << 8); /* Set VDV[4:0] for VCOM amplitude */
+ write_reg(par, 0x0029, vcm); /* Set VCM[5:0] for VCOMH */
+ write_reg(par, 0x002B, 0x000C); /* Set Frame Rate */
+ mdelay(50); /* Delay 50ms */
+ write_reg(par, 0x0020, 0x0000); /* GRAM horizontal Address */
+ write_reg(par, 0x0021, 0x0000); /* GRAM Vertical Address */
+
+ /*------------------ Set GRAM area --------------- */
+ write_reg(par, 0x0050, 0x0000); /* Horizontal GRAM Start Address */
+ write_reg(par, 0x0051, 0x00EF); /* Horizontal GRAM End Address */
+ write_reg(par, 0x0052, 0x0000); /* Vertical GRAM Start Address */
+ write_reg(par, 0x0053, 0x013F); /* Vertical GRAM Start Address */
+ write_reg(par, 0x0060, 0xA700); /* Gate Scan Line */
+ write_reg(par, 0x0061, 0x0001); /* NDL,VLE, REV */
+ write_reg(par, 0x006A, 0x0000); /* set scrolling line */
+
+ /*-------------- Partial Display Control --------- */
+ write_reg(par, 0x0080, 0x0000);
+ write_reg(par, 0x0081, 0x0000);
+ write_reg(par, 0x0082, 0x0000);
+ write_reg(par, 0x0083, 0x0000);
+ write_reg(par, 0x0084, 0x0000);
+ write_reg(par, 0x0085, 0x0000);
+
+ /*-------------- Panel Control ------------------- */
+ write_reg(par, 0x0090, 0x0010);
+ write_reg(par, 0x0092, 0x0600);
+ write_reg(par, 0x0007, 0x0133); /* 262K color and display ON */
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+ switch (par->info->var.rotate) {
+ /* R20h = Horizontal GRAM Start Address */
+ /* R21h = Vertical GRAM Start Address */
+ case 0:
+ write_reg(par, 0x0020, xs);
+ write_reg(par, 0x0021, ys);
+ break;
+ case 180:
+ write_reg(par, 0x0020, WIDTH - 1 - xs);
+ write_reg(par, 0x0021, HEIGHT - 1 - ys);
+ break;
+ case 270:
+ write_reg(par, 0x0020, WIDTH - 1 - ys);
+ write_reg(par, 0x0021, xs);
+ break;
+ case 90:
+ write_reg(par, 0x0020, ys);
+ write_reg(par, 0x0021, HEIGHT - 1 - xs);
+ break;
+ }
+ write_reg(par, 0x0022); /* Write Data to GRAM */
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ /* AM: GRAM update direction */
+ case 0:
+ write_reg(par, 0x03, 0x0030 | (par->bgr << 12));
+ break;
+ case 180:
+ write_reg(par, 0x03, 0x0000 | (par->bgr << 12));
+ break;
+ case 270:
+ write_reg(par, 0x03, 0x0028 | (par->bgr << 12));
+ break;
+ case 90:
+ write_reg(par, 0x03, 0x0018 | (par->bgr << 12));
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ Gamma string format:
+ VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
+ VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
+*/
+#define CURVE(num, idx) curves[num*par->gamma.num_values + idx]
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ unsigned long mask[] = {
+ 0b11111, 0b11111, 0b111, 0b111, 0b111,
+ 0b111, 0b111, 0b111, 0b111, 0b111,
+ 0b11111, 0b11111, 0b111, 0b111, 0b111,
+ 0b111, 0b111, 0b111, 0b111, 0b111 };
+ int i, j;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 10; j++)
+ CURVE(i, j) &= mask[i*par->gamma.num_values + j];
+
+ write_reg(par, 0x0030, CURVE(0, 5) << 8 | CURVE(0, 4));
+ write_reg(par, 0x0031, CURVE(0, 7) << 8 | CURVE(0, 6));
+ write_reg(par, 0x0032, CURVE(0, 9) << 8 | CURVE(0, 8));
+ write_reg(par, 0x0035, CURVE(0, 3) << 8 | CURVE(0, 2));
+ write_reg(par, 0x0036, CURVE(0, 1) << 8 | CURVE(0, 0));
+
+ write_reg(par, 0x0037, CURVE(1, 5) << 8 | CURVE(1, 4));
+ write_reg(par, 0x0038, CURVE(1, 7) << 8 | CURVE(1, 6));
+ write_reg(par, 0x0039, CURVE(1, 9) << 8 | CURVE(1, 8));
+ write_reg(par, 0x003C, CURVE(1, 3) << 8 | CURVE(1, 2));
+ write_reg(par, 0x003D, CURVE(1, 1) << 8 | CURVE(1, 0));
+
+ return 0;
+}
+#undef CURVE
+
+
+static struct fbtft_display display = {
+ .regwidth = 16,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .bpp = BPP,
+ .fps = FPS,
+ .gamma_num = 2,
+ .gamma_len = 10,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9325", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ili9325");
+MODULE_ALIAS("platform:ili9325");
+
+MODULE_DESCRIPTION("FB driver for the ILI9325 LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
new file mode 100644
index 000000000000..985687d94ec2
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ili9340.c
@@ -0,0 +1,163 @@
+/*
+ * FB driver for the ILI9340 LCD Controller
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ili9340"
+#define WIDTH 240
+#define HEIGHT 320
+
+
+/* Init sequence taken from: Arduino Library for the Adafruit 2.2" display */
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ write_reg(par, 0xEF, 0x03, 0x80, 0x02);
+ write_reg(par, 0xCF, 0x00 , 0XC1 , 0X30);
+ write_reg(par, 0xED, 0x64 , 0x03 , 0X12 , 0X81);
+ write_reg(par, 0xE8, 0x85 , 0x00 , 0x78);
+ write_reg(par, 0xCB, 0x39 , 0x2C , 0x00 , 0x34 , 0x02);
+ write_reg(par, 0xF7, 0x20);
+ write_reg(par, 0xEA, 0x00 , 0x00);
+
+ /* Power Control 1 */
+ write_reg(par, 0xC0, 0x23);
+
+ /* Power Control 2 */
+ write_reg(par, 0xC1, 0x10);
+
+ /* VCOM Control 1 */
+ write_reg(par, 0xC5, 0x3e, 0x28);
+
+ /* VCOM Control 2 */
+ write_reg(par, 0xC7, 0x86);
+
+ /* COLMOD: Pixel Format Set */
+ /* 16 bits/pixel */
+ write_reg(par, 0x3A, 0x55);
+
+ /* Frame Rate Control */
+ /* Division ratio = fosc, Frame Rate = 79Hz */
+ write_reg(par, 0xB1, 0x00, 0x18);
+
+ /* Display Function Control */
+ write_reg(par, 0xB6, 0x08, 0x82, 0x27);
+
+ /* Gamma Function Disable */
+ write_reg(par, 0xF2, 0x00);
+
+ /* Gamma curve selected */
+ write_reg(par, 0x26, 0x01);
+
+ /* Positive Gamma Correction */
+ write_reg(par, 0xE0,
+ 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1,
+ 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00);
+
+ /* Negative Gamma Correction */
+ write_reg(par, 0xE1,
+ 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1,
+ 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F);
+
+ /* Sleep OUT */
+ write_reg(par, 0x11);
+
+ mdelay(120);
+
+ /* Display ON */
+ write_reg(par, 0x29);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Column address */
+ write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+
+ /* Row adress */
+ write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+
+ /* Memory write */
+ write_reg(par, 0x2C);
+}
+
+#define ILI9340_MADCTL_MV 0x20
+#define ILI9340_MADCTL_MX 0x40
+#define ILI9340_MADCTL_MY 0x80
+static int set_var(struct fbtft_par *par)
+{
+ u8 val;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ case 270:
+ val = ILI9340_MADCTL_MV;
+ break;
+ case 180:
+ val = ILI9340_MADCTL_MY;
+ break;
+ case 90:
+ val = ILI9340_MADCTL_MV | ILI9340_MADCTL_MY | ILI9340_MADCTL_MX;
+ break;
+ default:
+ val = ILI9340_MADCTL_MX;
+ break;
+ }
+ /* Memory Access Control */
+ write_reg(par, 0x36, val | (par->bgr << 3));
+
+ return 0;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9340", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ili9340");
+MODULE_ALIAS("platform:ili9340");
+
+MODULE_DESCRIPTION("FB driver for the ILI9340 LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ili9341.c b/drivers/staging/fbtft/fb_ili9341.c
new file mode 100644
index 000000000000..225b2d84371f
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ili9341.c
@@ -0,0 +1,179 @@
+/*
+ * FB driver for the ILI9341 LCD display controller
+ *
+ * This display uses 9-bit SPI: Data/Command bit + 8 data bits
+ * For platforms that doesn't support 9-bit, the driver is capable
+ * of emulating this using 8-bit transfer.
+ * This is done by transfering eight 9-bit words in 9 bytes.
+ *
+ * Copyright (C) 2013 Christian Vogelgsang
+ * Based on adafruit22fb.c by Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ili9341"
+#define WIDTH 240
+#define HEIGHT 320
+#define TXBUFLEN (4 * PAGE_SIZE)
+#define DEFAULT_GAMMA "1F 1A 18 0A 0F 06 45 87 32 0A 07 02 07 05 00\n" \
+ "00 25 27 05 10 09 3A 78 4D 05 18 0D 38 3A 1F"
+
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ /* startup sequence for MI0283QT-9A */
+ write_reg(par, 0x01); /* software reset */
+ mdelay(5);
+ write_reg(par, 0x28); /* display off */
+ /* --------------------------------------------------------- */
+ write_reg(par, 0xCF, 0x00, 0x83, 0x30);
+ write_reg(par, 0xED, 0x64, 0x03, 0x12, 0x81);
+ write_reg(par, 0xE8, 0x85, 0x01, 0x79);
+ write_reg(par, 0xCB, 0x39, 0X2C, 0x00, 0x34, 0x02);
+ write_reg(par, 0xF7, 0x20);
+ write_reg(par, 0xEA, 0x00, 0x00);
+ /* ------------power control-------------------------------- */
+ write_reg(par, 0xC0, 0x26);
+ write_reg(par, 0xC1, 0x11);
+ /* ------------VCOM --------- */
+ write_reg(par, 0xC5, 0x35, 0x3E);
+ write_reg(par, 0xC7, 0xBE);
+ /* ------------memory access control------------------------ */
+ write_reg(par, 0x3A, 0x55); /* 16bit pixel */
+ /* ------------frame rate----------------------------------- */
+ write_reg(par, 0xB1, 0x00, 0x1B);
+ /* ------------Gamma---------------------------------------- */
+ /* write_reg(par, 0xF2, 0x08); */ /* Gamma Function Disable */
+ write_reg(par, 0x26, 0x01);
+ /* ------------display-------------------------------------- */
+ write_reg(par, 0xB7, 0x07); /* entry mode set */
+ write_reg(par, 0xB6, 0x0A, 0x82, 0x27, 0x00);
+ write_reg(par, 0x11); /* sleep out */
+ mdelay(100);
+ write_reg(par, 0x29); /* display on */
+ mdelay(20);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Column address set */
+ write_reg(par, 0x2A,
+ (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
+
+ /* Row adress set */
+ write_reg(par, 0x2B,
+ (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
+
+ /* Memory write */
+ write_reg(par, 0x2C);
+}
+
+#define MEM_Y (7) /* MY row address order */
+#define MEM_X (6) /* MX column address order */
+#define MEM_V (5) /* MV row / column exchange */
+#define MEM_L (4) /* ML vertical refresh order */
+#define MEM_H (2) /* MH horizontal refresh order */
+#define MEM_BGR (3) /* RGB-BGR Order */
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x36, (1 << MEM_X) | (par->bgr << MEM_BGR));
+ break;
+ case 270:
+ write_reg(par, 0x36,
+ (1<<MEM_V) | (1 << MEM_L) | (par->bgr << MEM_BGR));
+ break;
+ case 180:
+ write_reg(par, 0x36, (1 << MEM_Y) | (par->bgr << MEM_BGR));
+ break;
+ case 90:
+ write_reg(par, 0x36, (1 << MEM_Y) | (1 << MEM_X) |
+ (1 << MEM_V) | (par->bgr << MEM_BGR));
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ Gamma string format:
+ Positive: Par1 Par2 [...] Par15
+ Negative: Par1 Par2 [...] Par15
+*/
+#define CURVE(num, idx) curves[num*par->gamma.num_values + idx]
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ int i;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ for (i = 0; i < par->gamma.num_curves; i++)
+ write_reg(par, 0xE0 + i,
+ CURVE(i, 0), CURVE(i, 1), CURVE(i, 2),
+ CURVE(i, 3), CURVE(i, 4), CURVE(i, 5),
+ CURVE(i, 6), CURVE(i, 7), CURVE(i, 8),
+ CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
+ CURVE(i, 12), CURVE(i, 13), CURVE(i, 14));
+
+ return 0;
+}
+#undef CURVE
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = TXBUFLEN,
+ .gamma_num = 2,
+ .gamma_len = 15,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9341", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ili9341");
+MODULE_ALIAS("platform:ili9341");
+
+MODULE_DESCRIPTION("FB driver for the ILI9341 LCD display controller");
+MODULE_AUTHOR("Christian Vogelgsang");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
new file mode 100644
index 000000000000..725157a1ac41
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -0,0 +1,117 @@
+/*
+ * FB driver for the ILI9481 LCD Controller
+ *
+ * Copyright (c) 2014 Petr Olivka
+ * Copyright (c) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ili9481"
+#define WIDTH 320
+#define HEIGHT 480
+
+static int default_init_sequence[] = {
+
+ /* SLP_OUT - Sleep out */
+ -1, 0x11,
+ -2, 50,
+ /* Power setting */
+ -1, 0xD0, 0x07, 0x42, 0x18,
+ /* VCOM */
+ -1, 0xD1, 0x00, 0x07, 0x10,
+ /* Power setting for norm. mode */
+ -1, 0xD2, 0x01, 0x02,
+ /* Panel driving setting */
+ -1, 0xC0, 0x10, 0x3B, 0x00, 0x02, 0x11,
+ /* Frame rate & inv. */
+ -1, 0xC5, 0x03,
+ /* Pixel format */
+ -1, 0x3A, 0x55,
+ /* Gamma */
+ -1, 0xC8, 0x00, 0x32, 0x36, 0x45, 0x06, 0x16,
+ 0x37, 0x75, 0x77, 0x54, 0x0C, 0x00,
+ /* DISP_ON */
+ -1, 0x29,
+ -3
+};
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* column address */
+ write_reg(par, 0x2a, xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+
+ /* row adress */
+ write_reg(par, 0x2b, ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
+
+ /* memory write */
+ write_reg(par, 0x2c);
+}
+
+#define HFLIP 0x01
+#define VFLIP 0x02
+#define ROWxCOL 0x20
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ case 270:
+ write_reg(par, 0x36, ROWxCOL | HFLIP | VFLIP | (par->bgr << 3));
+ break;
+ case 180:
+ write_reg(par, 0x36, VFLIP | (par->bgr << 3));
+ break;
+ case 90:
+ write_reg(par, 0x36, ROWxCOL | (par->bgr << 3));
+ break;
+ default:
+ write_reg(par, 0x36, HFLIP | (par->bgr << 3));
+ break;
+ }
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .init_sequence = default_init_sequence,
+ .fbtftops = {
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9481", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ili9481");
+MODULE_ALIAS("platform:ili9481");
+
+MODULE_DESCRIPTION("FB driver for the ILI9481 LCD Controller");
+MODULE_AUTHOR("Petr Olivka");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
new file mode 100644
index 000000000000..95b89999d32a
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -0,0 +1,121 @@
+/*
+ * FB driver for the ILI9486 LCD Controller
+ *
+ * Copyright (C) 2014 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ili9486"
+#define WIDTH 320
+#define HEIGHT 480
+
+
+/* this init sequence matches PiScreen */
+static int default_init_sequence[] = {
+ /* Interface Mode Control */
+ -1, 0xb0, 0x0,
+ /* Sleep OUT */
+ -1, 0x11,
+ -2, 250,
+ /* Interface Pixel Format */
+ -1, 0x3A, 0x55,
+ /* Power Control 3 */
+ -1, 0xC2, 0x44,
+ /* VCOM Control 1 */
+ -1, 0xC5, 0x00, 0x00, 0x00, 0x00,
+ /* PGAMCTRL(Positive Gamma Control) */
+ -1, 0xE0, 0x0F, 0x1F, 0x1C, 0x0C, 0x0F, 0x08, 0x48, 0x98,
+ 0x37, 0x0A, 0x13, 0x04, 0x11, 0x0D, 0x00,
+ /* NGAMCTRL(Negative Gamma Control) */
+ -1, 0xE1, 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00,
+ /* Digital Gamma Control 1 */
+ -1, 0xE2, 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00,
+ /* Sleep OUT */
+ -1, 0x11,
+ /* Display ON */
+ -1, 0x29,
+ /* end marker */
+ -3
+};
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Column address */
+ write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+
+ /* Row adress */
+ write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+
+ /* Memory write */
+ write_reg(par, 0x2C);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x36, 0x80 | (par->bgr << 3));
+ break;
+ case 90:
+ write_reg(par, 0x36, 0x20 | (par->bgr << 3));
+ break;
+ case 180:
+ write_reg(par, 0x36, 0x40 | (par->bgr << 3));
+ break;
+ case 270:
+ write_reg(par, 0x36, 0xE0 | (par->bgr << 3));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .init_sequence = default_init_sequence,
+ .fbtftops = {
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9486", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ili9486");
+MODULE_ALIAS("platform:ili9486");
+
+MODULE_DESCRIPTION("FB driver for the ILI9486 LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_pcd8544.c b/drivers/staging/fbtft/fb_pcd8544.c
new file mode 100644
index 000000000000..8b9ebfb49ef8
--- /dev/null
+++ b/drivers/staging/fbtft/fb_pcd8544.c
@@ -0,0 +1,177 @@
+/*
+ * FB driver for the PCD8544 LCD Controller
+ *
+ * The display is monochrome and the video memory is RGB565.
+ * Any pixel value except 0 turns the pixel on.
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_pcd8544"
+#define WIDTH 84
+#define HEIGHT 48
+#define TXBUFLEN (84*6)
+#define DEFAULT_GAMMA "40" /* gamma is used to control contrast in this driver */
+
+static unsigned tc;
+module_param(tc, uint, 0);
+MODULE_PARM_DESC(tc, "TC[1:0] Temperature coefficient: 0-3 (default: 0)");
+
+static unsigned bs = 4;
+module_param(bs, uint, 0);
+MODULE_PARM_DESC(bs, "BS[2:0] Bias voltage level: 0-7 (default: 4)");
+
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ /* Function set */
+ write_reg(par, 0x21); /* 5:1 1
+ 2:0 PD - Powerdown control: chip is active
+ 1:0 V - Entry mode: horizontal addressing
+ 0:1 H - Extended instruction set control: extended
+ */
+
+ /* H=1 Temperature control */
+ write_reg(par, 0x04 | (tc & 0x3)); /*
+ 2:1 1
+ 1:x TC1 - Temperature Coefficient: 0x10
+ 0:x TC0
+ */
+
+ /* H=1 Bias system */
+ write_reg(par, 0x10 | (bs & 0x7)); /*
+ 4:1 1
+ 3:0 0
+ 2:x BS2 - Bias System
+ 1:x BS1
+ 0:x BS0
+ */
+
+ /* Function set */
+ write_reg(par, 0x22); /* 5:1 1
+ 2:0 PD - Powerdown control: chip is active
+ 1:1 V - Entry mode: vertical addressing
+ 0:0 H - Extended instruction set control: basic
+ */
+
+ /* H=0 Display control */
+ write_reg(par, 0x08 | 4); /*
+ 3:1 1
+ 2:1 D - DE: 10=normal mode
+ 1:0 0
+ 0:0 E
+ */
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* H=0 Set X address of RAM */
+ write_reg(par, 0x80); /* 7:1 1
+ 6-0: X[6:0] - 0x00
+ */
+
+ /* H=0 Set Y address of RAM */
+ write_reg(par, 0x40); /* 7:0 0
+ 6:1 1
+ 2-0: Y[2:0] - 0x0
+ */
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_base;
+ u8 *buf = par->txbuf.buf;
+ int x, y, i;
+ int ret = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+ for (x = 0; x < 84; x++) {
+ for (y = 0; y < 6; y++) {
+ *buf = 0x00;
+ for (i = 0; i < 8; i++) {
+ *buf |= (vmem16[(y*8+i)*84+x] ? 1 : 0) << i;
+ }
+ buf++;
+ }
+ }
+
+ /* Write data */
+ gpio_set_value(par->gpio.dc, 1);
+ ret = par->fbtftops.write(par, par->txbuf.buf, 6*84);
+ if (ret < 0)
+ dev_err(par->info->device, "%s: write failed and returned: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ curves[0] &= 0x7F;
+
+ write_reg(par, 0x23); /* turn on extended instruction set */
+ write_reg(par, 0x80 | curves[0]);
+ write_reg(par, 0x22); /* turn off extended instruction set */
+
+ return 0;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = TXBUFLEN,
+ .gamma_num = 1,
+ .gamma_len = 1,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .write_vmem = write_vmem,
+ .set_gamma = set_gamma,
+ },
+ .backlight = 1,
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "philips,pdc8544", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("spi:pdc8544");
+
+MODULE_DESCRIPTION("FB driver for the PCD8544 LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
new file mode 100644
index 000000000000..c323c06344fd
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -0,0 +1,331 @@
+/******************************************************************************
+
+ ProjectName: FBTFT driver ***** *****
+ for the RA8875 LCD Controller * * ************
+ * ** ** * *
+ Copyright © by Pf@nne & NOTRO * * * * * **** *
+ * * * * * * *
+ Last modification by: * * * * **** *
+ - Pf@nne (pf@nne-mail.de) * * ***** *
+ * * * *******
+ ***** * *
+ Date : 10.06.2014 * *
+ Version : V1.13 *****
+ Revison : 5
+
+*******************************************************************************
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <linux/gpio.h>
+#include "fbtft.h"
+
+#define DRVNAME "fb_ra8875"
+
+static int write_spi(struct fbtft_par *par, void *buf, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = buf,
+ .len = len,
+ .speed_hz = 1000000,
+ };
+ struct spi_message m;
+
+ fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
+ "%s(len=%d): ", __func__, len);
+
+ if (!par->spi) {
+ dev_err(par->info->device,
+ "%s: par->spi is unexpectedly NULL\n", __func__);
+ return -1;
+ }
+
+ spi_message_init(&m);
+ if (par->txbuf.dma && buf == par->txbuf.buf) {
+ t.tx_dma = par->txbuf.dma;
+ m.is_dma_mapped = 1;
+ }
+ spi_message_add_tail(&t, &m);
+ return spi_sync(par->spi, &m);
+}
+
+static int init_display(struct fbtft_par *par)
+{
+ gpio_set_value(par->gpio.dc, 1);
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
+ "%s()\n", __func__);
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
+ "display size %dx%d\n", par->info->var.xres, par->info->var.yres);
+
+ par->fbtftops.reset(par);
+
+ if ((par->info->var.xres == 320) && (par->info->var.yres == 240)) {
+ /* PLL clock frequency */
+ write_reg(par, 0x88 , 0x0A);
+ write_reg(par, 0x89 , 0x02);
+ mdelay(10);
+ /* color deep / MCU Interface */
+ write_reg(par, 0x10 , 0x0C);
+ /* pixel clock period */
+ write_reg(par, 0x04 , 0x03);
+ mdelay(1);
+ /* horizontal settings */
+ write_reg(par, 0x14 , 0x27);
+ write_reg(par, 0x15 , 0x00);
+ write_reg(par, 0x16 , 0x05);
+ write_reg(par, 0x17 , 0x04);
+ write_reg(par, 0x18 , 0x03);
+ /* vertical settings */
+ write_reg(par, 0x19 , 0xEF);
+ write_reg(par, 0x1A , 0x00);
+ write_reg(par, 0x1B , 0x05);
+ write_reg(par, 0x1C , 0x00);
+ write_reg(par, 0x1D , 0x0E);
+ write_reg(par, 0x1E , 0x00);
+ write_reg(par, 0x1F , 0x02);
+ } else if ((par->info->var.xres == 480) && (par->info->var.yres == 272)) {
+ /* PLL clock frequency */
+ write_reg(par, 0x88 , 0x0A);
+ write_reg(par, 0x89 , 0x02);
+ mdelay(10);
+ /* color deep / MCU Interface */
+ write_reg(par, 0x10 , 0x0C);
+ /* pixel clock period */
+ write_reg(par, 0x04 , 0x82);
+ mdelay(1);
+ /* horizontal settings */
+ write_reg(par, 0x14 , 0x3B);
+ write_reg(par, 0x15 , 0x00);
+ write_reg(par, 0x16 , 0x01);
+ write_reg(par, 0x17 , 0x00);
+ write_reg(par, 0x18 , 0x05);
+ /* vertical settings */
+ write_reg(par, 0x19 , 0x0F);
+ write_reg(par, 0x1A , 0x01);
+ write_reg(par, 0x1B , 0x02);
+ write_reg(par, 0x1C , 0x00);
+ write_reg(par, 0x1D , 0x07);
+ write_reg(par, 0x1E , 0x00);
+ write_reg(par, 0x1F , 0x09);
+ } else if ((par->info->var.xres == 640) && (par->info->var.yres == 480)) {
+ /* PLL clock frequency */
+ write_reg(par, 0x88 , 0x0B);
+ write_reg(par, 0x89 , 0x02);
+ mdelay(10);
+ /* color deep / MCU Interface */
+ write_reg(par, 0x10 , 0x0C);
+ /* pixel clock period */
+ write_reg(par, 0x04 , 0x01);
+ mdelay(1);
+ /* horizontal settings */
+ write_reg(par, 0x14 , 0x4F);
+ write_reg(par, 0x15 , 0x05);
+ write_reg(par, 0x16 , 0x0F);
+ write_reg(par, 0x17 , 0x01);
+ write_reg(par, 0x18 , 0x00);
+ /* vertical settings */
+ write_reg(par, 0x19 , 0xDF);
+ write_reg(par, 0x1A , 0x01);
+ write_reg(par, 0x1B , 0x0A);
+ write_reg(par, 0x1C , 0x00);
+ write_reg(par, 0x1D , 0x0E);
+ write_reg(par, 0x1E , 0x00);
+ write_reg(par, 0x1F , 0x01);
+ } else if ((par->info->var.xres == 800) && (par->info->var.yres == 480)) {
+ /* PLL clock frequency */
+ write_reg(par, 0x88 , 0x0B);
+ write_reg(par, 0x89 , 0x02);
+ mdelay(10);
+ /* color deep / MCU Interface */
+ write_reg(par, 0x10 , 0x0C);
+ /* pixel clock period */
+ write_reg(par, 0x04 , 0x81);
+ mdelay(1);
+ /* horizontal settings */
+ write_reg(par, 0x14 , 0x63);
+ write_reg(par, 0x15 , 0x03);
+ write_reg(par, 0x16 , 0x03);
+ write_reg(par, 0x17 , 0x02);
+ write_reg(par, 0x18 , 0x00);
+ /* vertical settings */
+ write_reg(par, 0x19 , 0xDF);
+ write_reg(par, 0x1A , 0x01);
+ write_reg(par, 0x1B , 0x14);
+ write_reg(par, 0x1C , 0x00);
+ write_reg(par, 0x1D , 0x06);
+ write_reg(par, 0x1E , 0x00);
+ write_reg(par, 0x1F , 0x01);
+ } else {
+ dev_err(par->info->device, "display size is not supported!!");
+ return -1;
+ }
+
+ /* PWM clock */
+ write_reg(par, 0x8a , 0x81);
+ write_reg(par, 0x8b , 0xFF);
+ mdelay(10);
+
+ /* Display ON */
+ write_reg(par, 0x01 , 0x80);
+ mdelay(10);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Set_Active_Window */
+ write_reg(par, 0x30 , xs & 0x00FF);
+ write_reg(par, 0x31 , (xs & 0xFF00) >> 8);
+ write_reg(par, 0x32 , ys & 0x00FF);
+ write_reg(par, 0x33 , (ys & 0xFF00) >> 8);
+ write_reg(par, 0x34 , (xs+xe) & 0x00FF);
+ write_reg(par, 0x35 , ((xs+xe) & 0xFF00) >> 8);
+ write_reg(par, 0x36 , (ys+ye) & 0x00FF);
+ write_reg(par, 0x37 , ((ys+ye) & 0xFF00) >> 8);
+
+ /* Set_Memory_Write_Cursor */
+ write_reg(par, 0x46, xs & 0xff);
+ write_reg(par, 0x47, (xs >> 8) & 0x03);
+ write_reg(par, 0x48, ys & 0xff);
+ write_reg(par, 0x49, (ys >> 8) & 0x01);
+
+ write_reg(par, 0x02);
+}
+
+static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+{
+ va_list args;
+ int i, ret;
+ u8 *buf = (u8 *)par->buf;
+
+ /* slow down spi-speed for writing registers */
+ par->fbtftops.write = write_spi;
+
+ if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) {
+ va_start(args, len);
+ for (i = 0; i < len; i++)
+ buf[i] = (u8)va_arg(args, unsigned int);
+ va_end(args);
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device,
+ u8, buf, len, "%s: ", __func__);
+ }
+
+ va_start(args, len);
+ *buf++ = 0x80;
+ *buf = (u8)va_arg(args, unsigned int);
+ ret = par->fbtftops.write(par, par->buf, 2);
+ if (ret < 0) {
+ va_end(args);
+ dev_err(par->info->device, "%s: write() failed and returned %dn",
+ __func__, ret);
+ return;
+ }
+ len--;
+
+ udelay(100);
+
+ if (len) {
+ buf = (u8 *)par->buf;
+ *buf++ = 0x00;
+ i = len;
+ while (i--)
+ *buf++ = (u8)va_arg(args, unsigned int);
+
+ ret = par->fbtftops.write(par, par->buf, len + 1);
+ if (ret < 0) {
+ va_end(args);
+ dev_err(par->info->device, "%s: write() failed and returned %dn",
+ __func__, ret);
+ return;
+ }
+ }
+ va_end(args);
+
+ /* restore user spi-speed */
+ par->fbtftops.write = fbtft_write_spi;
+ udelay(100);
+}
+
+static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16;
+ u16 *txbuf16 = (u16 *)par->txbuf.buf;
+ size_t remain;
+ size_t to_copy;
+ size_t tx_array_size;
+ int i;
+ int ret = 0;
+ size_t startbyte_size = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
+ __func__, offset, len);
+
+ remain = len / 2;
+ vmem16 = (u16 *)(par->info->screen_base + offset);
+ tx_array_size = par->txbuf.len / 2;
+ txbuf16 = (u16 *)(par->txbuf.buf + 1);
+ tx_array_size -= 2;
+ *(u8 *)(par->txbuf.buf) = 0x00;
+ startbyte_size = 1;
+
+ while (remain) {
+ to_copy = remain > tx_array_size ? tx_array_size : remain;
+ dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
+ to_copy, remain - to_copy);
+
+ for (i = 0; i < to_copy; i++)
+ txbuf16[i] = cpu_to_be16(vmem16[i]);
+
+ vmem16 = vmem16 + to_copy;
+ ret = par->fbtftops.write(par, par->txbuf.buf,
+ startbyte_size + to_copy * 2);
+ if (ret < 0)
+ return ret;
+ remain -= to_copy;
+ }
+
+ return ret;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .write_register = write_reg8_bus8,
+ .write_vmem = write_vmem16_bus8,
+ .write = write_spi,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "raio,ra8875", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ra8875");
+MODULE_ALIAS("platform:ra8875");
+
+MODULE_DESCRIPTION("FB driver for the RA8875 LCD Controller");
+MODULE_AUTHOR("Pf@nne");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
new file mode 100644
index 000000000000..e412a42443e5
--- /dev/null
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -0,0 +1,168 @@
+/*
+ * FB driver for the S6D02A1 LCD Controller
+ *
+ * Based on fb_st7735r.c by Noralf Tronnes
+ * Init code from UTFT library by Henning Karlsen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_s6d02a1"
+
+static int default_init_sequence[] = {
+
+ -1, 0xf0, 0x5a, 0x5a,
+
+ -1, 0xfc, 0x5a, 0x5a,
+
+ -1, 0xfa, 0x02, 0x1f, 0x00, 0x10, 0x22, 0x30, 0x38, 0x3A, 0x3A, 0x3A, 0x3A, 0x3A, 0x3d, 0x02, 0x01,
+
+ -1, 0xfb, 0x21, 0x00, 0x02, 0x04, 0x07, 0x0a, 0x0b, 0x0c, 0x0c, 0x16, 0x1e, 0x30, 0x3f, 0x01, 0x02,
+
+ /* power setting sequence */
+ -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x01, 0x01, 0x00, 0x1f, 0x1f,
+
+ -1, 0xf4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+
+ -1, 0xf5, 0x00, 0x70, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x66, 0x06,
+
+ -1, 0xf6, 0x02, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x01, 0x00,
+
+ -1, 0xf2, 0x00, 0x01, 0x03, 0x08, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x04, 0x08, 0x08,
+
+ -1, 0xf8, 0x11,
+
+ -1, 0xf7, 0xc8, 0x20, 0x00, 0x00,
+
+ -1, 0xf3, 0x00, 0x00,
+
+ -1, 0x11,
+ -2, 50,
+
+ -1, 0xf3, 0x00, 0x01,
+ -2, 50,
+ -1, 0xf3, 0x00, 0x03,
+ -2, 50,
+ -1, 0xf3, 0x00, 0x07,
+ -2, 50,
+ -1, 0xf3, 0x00, 0x0f,
+ -2, 50,
+
+ -1, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+ -2, 50,
+
+ -1, 0xf3, 0x00, 0x1f,
+ -2, 50,
+ -1, 0xf3, 0x00, 0x7f,
+ -2, 50,
+
+ -1, 0xf3, 0x00, 0xff,
+ -2, 50,
+
+ -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00, 0x01, 0x00, 0x16, 0x16,
+
+ -1, 0xf4, 0x00, 0x09, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+
+ /* initializing sequence */
+
+ -1, 0x36, 0x08,
+
+ -1, 0x35, 0x00,
+
+ -1, 0x3a, 0x05,
+
+ /* gamma setting sequence */
+ -1, 0x26, 0x01, /* preset gamma curves, possible values 0x01, 0x02, 0x04, 0x08 */
+
+ -2, 150,
+ -1, 0x29,
+ -1, 0x2c,
+ /* end marker */
+ -3
+
+};
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Column address */
+ write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+
+ /* Row adress */
+ write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+
+ /* Memory write */
+ write_reg(par, 0x2C);
+}
+
+#define MY (1 << 7)
+#define MX (1 << 6)
+#define MV (1 << 5)
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* MADCTL - Memory data access control
+ RGB/BGR:
+ 1. Mode selection pin SRGB
+ RGB H/W pin for color filter setting: 0=RGB, 1=BGR
+ 2. MADCTL RGB bit
+ RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ break;
+ case 270:
+ write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ break;
+ case 180:
+ write_reg(par, 0x36, (par->bgr << 3));
+ break;
+ case 90:
+ write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ break;
+ }
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = 128,
+ .height = 160,
+ .init_sequence = default_init_sequence,
+ .fbtftops = {
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "samsung,s6d02a1", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:s6d02a1");
+MODULE_ALIAS("platform:s6d02a1");
+
+MODULE_DESCRIPTION("FB driver for the S6D02A1 LCD Controller");
+MODULE_AUTHOR("WOLFGANG BUENING");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
new file mode 100644
index 000000000000..1ef8c1ad827e
--- /dev/null
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -0,0 +1,208 @@
+/*
+ * FB driver for the S6D1121 LCD Controller
+ *
+ * Copyright (C) 2013 Roman Rolinsky
+ *
+ * Based on fb_ili9325.c by Noralf Tronnes
+ * Based on ili9325.c by Jeroen Domburg
+ * Init code from UTFT library by Henning Karlsen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_s6d1121"
+#define WIDTH 240
+#define HEIGHT 320
+#define BPP 16
+#define FPS 20
+#define DEFAULT_GAMMA "26 09 24 2C 1F 23 24 25 22 26 25 23 0D 00\n" \
+ "1C 1A 13 1D 0B 11 12 10 13 15 36 19 00 0D"
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ if (par->gpio.cs != -1)
+ gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ /* Initialization sequence from Lib_UTFT */
+
+ write_reg(par, 0x0011, 0x2004);
+ write_reg(par, 0x0013, 0xCC00);
+ write_reg(par, 0x0015, 0x2600);
+ write_reg(par, 0x0014, 0x252A);
+ write_reg(par, 0x0012, 0x0033);
+ write_reg(par, 0x0013, 0xCC04);
+ write_reg(par, 0x0013, 0xCC06);
+ write_reg(par, 0x0013, 0xCC4F);
+ write_reg(par, 0x0013, 0x674F);
+ write_reg(par, 0x0011, 0x2003);
+ write_reg(par, 0x0016, 0x0007);
+ write_reg(par, 0x0002, 0x0013);
+ write_reg(par, 0x0003, 0x0003);
+ write_reg(par, 0x0001, 0x0127);
+ write_reg(par, 0x0008, 0x0303);
+ write_reg(par, 0x000A, 0x000B);
+ write_reg(par, 0x000B, 0x0003);
+ write_reg(par, 0x000C, 0x0000);
+ write_reg(par, 0x0041, 0x0000);
+ write_reg(par, 0x0050, 0x0000);
+ write_reg(par, 0x0060, 0x0005);
+ write_reg(par, 0x0070, 0x000B);
+ write_reg(par, 0x0071, 0x0000);
+ write_reg(par, 0x0078, 0x0000);
+ write_reg(par, 0x007A, 0x0000);
+ write_reg(par, 0x0079, 0x0007);
+ write_reg(par, 0x0007, 0x0051);
+ write_reg(par, 0x0007, 0x0053);
+ write_reg(par, 0x0079, 0x0000);
+
+ write_reg(par, 0x0022); /* Write Data to GRAM */
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+ switch (par->info->var.rotate) {
+ /* R20h = Horizontal GRAM Start Address */
+ /* R21h = Vertical GRAM Start Address */
+ case 0:
+ write_reg(par, 0x0020, xs);
+ write_reg(par, 0x0021, ys);
+ break;
+ case 180:
+ write_reg(par, 0x0020, WIDTH - 1 - xs);
+ write_reg(par, 0x0021, HEIGHT - 1 - ys);
+ break;
+ case 270:
+ write_reg(par, 0x0020, WIDTH - 1 - ys);
+ write_reg(par, 0x0021, xs);
+ break;
+ case 90:
+ write_reg(par, 0x0020, ys);
+ write_reg(par, 0x0021, HEIGHT - 1 - xs);
+ break;
+ }
+ write_reg(par, 0x0022); /* Write Data to GRAM */
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ /* AM: GRAM update direction */
+ case 0:
+ write_reg(par, 0x03, 0x0003 | (par->bgr << 12));
+ break;
+ case 180:
+ write_reg(par, 0x03, 0x0000 | (par->bgr << 12));
+ break;
+ case 270:
+ write_reg(par, 0x03, 0x000A | (par->bgr << 12));
+ break;
+ case 90:
+ write_reg(par, 0x03, 0x0009 | (par->bgr << 12));
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ Gamma string format:
+ PKP0 PKP1 PKP2 PKP3 PKP4 PKP5 PKP6 PKP7 PKP8 PKP9 PKP10 PKP11 VRP0 VRP1
+ PKN0 PKN1 PKN2 PKN3 PKN4 PKN5 PKN6 PKN7 PRN8 PRN9 PRN10 PRN11 VRN0 VRN1
+*/
+#define CURVE(num, idx) curves[num*par->gamma.num_values + idx]
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ unsigned long mask[] = {
+ 0b111111, 0b111111, 0b111111, 0b111111, 0b111111, 0b111111,
+ 0b111111, 0b111111, 0b111111, 0b111111, 0b111111, 0b111111,
+ 0b11111, 0b11111,
+ 0b111111, 0b111111, 0b111111, 0b111111, 0b111111, 0b111111,
+ 0b111111, 0b111111, 0b111111, 0b111111, 0b111111, 0b111111,
+ 0b11111, 0b11111 };
+ int i, j;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 14; j++)
+ CURVE(i, j) &= mask[i*par->gamma.num_values + j];
+
+ write_reg(par, 0x0030, CURVE(0, 1) << 8 | CURVE(0, 0));
+ write_reg(par, 0x0031, CURVE(0, 3) << 8 | CURVE(0, 2));
+ write_reg(par, 0x0032, CURVE(0, 5) << 8 | CURVE(0, 3));
+ write_reg(par, 0x0033, CURVE(0, 7) << 8 | CURVE(0, 6));
+ write_reg(par, 0x0034, CURVE(0, 9) << 8 | CURVE(0, 8));
+ write_reg(par, 0x0035, CURVE(0, 11) << 8 | CURVE(0, 10));
+
+ write_reg(par, 0x0036, CURVE(1, 1) << 8 | CURVE(1, 0));
+ write_reg(par, 0x0037, CURVE(1, 3) << 8 | CURVE(1, 2));
+ write_reg(par, 0x0038, CURVE(1, 5) << 8 | CURVE(1, 4));
+ write_reg(par, 0x0039, CURVE(1, 7) << 8 | CURVE(1, 6));
+ write_reg(par, 0x003A, CURVE(1, 9) << 8 | CURVE(1, 8));
+ write_reg(par, 0x003B, CURVE(1, 11) << 8 | CURVE(1, 10));
+
+ write_reg(par, 0x003C, CURVE(0, 13) << 8 | CURVE(0, 12));
+ write_reg(par, 0x003D, CURVE(1, 13) << 8 | CURVE(1, 12));
+
+ return 0;
+}
+#undef CURVE
+
+
+static struct fbtft_display display = {
+ .regwidth = 16,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .bpp = BPP,
+ .fps = FPS,
+ .gamma_num = 2,
+ .gamma_len = 14,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "samsung,s6d1121", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:s6d1121");
+MODULE_ALIAS("platform:s6d1121");
+
+MODULE_DESCRIPTION("FB driver for the S6D1121 LCD Controller");
+MODULE_AUTHOR("Roman Rolinsky");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
new file mode 100644
index 000000000000..ef46fbca2700
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -0,0 +1,206 @@
+/*
+ * FB driver for the SSD1289 LCD Controller
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * Init sequence taken from ITDB02_Graph16.cpp - (C)2010-2011 Henning Karlsen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ssd1289"
+#define WIDTH 240
+#define HEIGHT 320
+#define DEFAULT_GAMMA "02 03 2 5 7 7 4 2 4 2\n" \
+ "02 03 2 5 7 5 4 2 4 2"
+
+static unsigned reg11 = 0x6040;
+module_param(reg11, uint, 0);
+MODULE_PARM_DESC(reg11, "Register 11h value");
+
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ if (par->gpio.cs != -1)
+ gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ write_reg(par, 0x00, 0x0001);
+ write_reg(par, 0x03, 0xA8A4);
+ write_reg(par, 0x0C, 0x0000);
+ write_reg(par, 0x0D, 0x080C);
+ write_reg(par, 0x0E, 0x2B00);
+ write_reg(par, 0x1E, 0x00B7);
+ write_reg(par, 0x01,
+ (1 << 13) | (par->bgr << 11) | (1 << 9) | (HEIGHT - 1));
+ write_reg(par, 0x02, 0x0600);
+ write_reg(par, 0x10, 0x0000);
+ write_reg(par, 0x05, 0x0000);
+ write_reg(par, 0x06, 0x0000);
+ write_reg(par, 0x16, 0xEF1C);
+ write_reg(par, 0x17, 0x0003);
+ write_reg(par, 0x07, 0x0233);
+ write_reg(par, 0x0B, 0x0000);
+ write_reg(par, 0x0F, 0x0000);
+ write_reg(par, 0x41, 0x0000);
+ write_reg(par, 0x42, 0x0000);
+ write_reg(par, 0x48, 0x0000);
+ write_reg(par, 0x49, 0x013F);
+ write_reg(par, 0x4A, 0x0000);
+ write_reg(par, 0x4B, 0x0000);
+ write_reg(par, 0x44, 0xEF00);
+ write_reg(par, 0x45, 0x0000);
+ write_reg(par, 0x46, 0x013F);
+ write_reg(par, 0x23, 0x0000);
+ write_reg(par, 0x24, 0x0000);
+ write_reg(par, 0x25, 0x8000);
+ write_reg(par, 0x4f, 0x0000);
+ write_reg(par, 0x4e, 0x0000);
+ write_reg(par, 0x22);
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ switch (par->info->var.rotate) {
+ /* R4Eh - Set GDDRAM X address counter */
+ /* R4Fh - Set GDDRAM Y address counter */
+ case 0:
+ write_reg(par, 0x4e, xs);
+ write_reg(par, 0x4f, ys);
+ break;
+ case 180:
+ write_reg(par, 0x4e, par->info->var.xres - 1 - xs);
+ write_reg(par, 0x4f, par->info->var.yres - 1 - ys);
+ break;
+ case 270:
+ write_reg(par, 0x4e, par->info->var.yres - 1 - ys);
+ write_reg(par, 0x4f, xs);
+ break;
+ case 90:
+ write_reg(par, 0x4e, ys);
+ write_reg(par, 0x4f, par->info->var.xres - 1 - xs);
+ break;
+ }
+
+ /* R22h - RAM data write */
+ write_reg(par, 0x22);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ if (par->fbtftops.init_display != init_display) {
+ /* don't risk messing up register 11h */
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
+ "%s: skipping since custom init_display() is used\n",
+ __func__);
+ return 0;
+ }
+
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x11, reg11 | 0b110000);
+ break;
+ case 270:
+ write_reg(par, 0x11, reg11 | 0b101000);
+ break;
+ case 180:
+ write_reg(par, 0x11, reg11 | 0b000000);
+ break;
+ case 90:
+ write_reg(par, 0x11, reg11 | 0b011000);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ Gamma string format:
+ VRP0 VRP1 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 PKP5
+ VRN0 VRN1 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 PKN5
+*/
+#define CURVE(num, idx) curves[num*par->gamma.num_values + idx]
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ unsigned long mask[] = {
+ 0b11111, 0b11111, 0b111, 0b111, 0b111,
+ 0b111, 0b111, 0b111, 0b111, 0b111,
+ 0b11111, 0b11111, 0b111, 0b111, 0b111,
+ 0b111, 0b111, 0b111, 0b111, 0b111 };
+ int i, j;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 10; j++)
+ CURVE(i, j) &= mask[i*par->gamma.num_values + j];
+
+ write_reg(par, 0x0030, CURVE(0, 5) << 8 | CURVE(0, 4));
+ write_reg(par, 0x0031, CURVE(0, 7) << 8 | CURVE(0, 6));
+ write_reg(par, 0x0032, CURVE(0, 9) << 8 | CURVE(0, 8));
+ write_reg(par, 0x0033, CURVE(0, 3) << 8 | CURVE(0, 2));
+ write_reg(par, 0x0034, CURVE(1, 5) << 8 | CURVE(1, 4));
+ write_reg(par, 0x0035, CURVE(1, 7) << 8 | CURVE(1, 6));
+ write_reg(par, 0x0036, CURVE(1, 9) << 8 | CURVE(1, 8));
+ write_reg(par, 0x0037, CURVE(1, 3) << 8 | CURVE(1, 2));
+ write_reg(par, 0x003A, CURVE(0, 1) << 8 | CURVE(0, 0));
+ write_reg(par, 0x003B, CURVE(1, 1) << 8 | CURVE(1, 0));
+
+ return 0;
+}
+#undef CURVE
+
+
+static struct fbtft_display display = {
+ .regwidth = 16,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .gamma_num = 2,
+ .gamma_len = 10,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1289", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1289");
+MODULE_ALIAS("platform:ssd1289");
+
+MODULE_DESCRIPTION("FB driver for the SSD1289 LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
new file mode 100644
index 000000000000..5ea195b0de1b
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -0,0 +1,229 @@
+/*
+ * FB driver for the SSD1306 OLED Controller
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ssd1306"
+#define WIDTH 128
+#define HEIGHT 64
+
+
+/*
+ write_reg() caveat:
+
+ This doesn't work because D/C has to be LOW for both values:
+ write_reg(par, val1, val2);
+
+ Do it like this:
+ write_reg(par, val1);
+ write_reg(par, val2);
+*/
+
+/* Init sequence taken from the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ if (par->gamma.curves[0] == 0) {
+ mutex_lock(&par->gamma.lock);
+ if (par->info->var.yres == 64)
+ par->gamma.curves[0] = 0xCF;
+ else
+ par->gamma.curves[0] = 0x8F;
+ mutex_unlock(&par->gamma.lock);
+ }
+
+ /* Set Display OFF */
+ write_reg(par, 0xAE);
+
+ /* Set Display Clock Divide Ratio/ Oscillator Frequency */
+ write_reg(par, 0xD5);
+ write_reg(par, 0x80);
+
+ /* Set Multiplex Ratio */
+ write_reg(par, 0xA8);
+ if (par->info->var.yres == 64)
+ write_reg(par, 0x3F);
+ else
+ write_reg(par, 0x1F);
+
+ /* Set Display Offset */
+ write_reg(par, 0xD3);
+ write_reg(par, 0x0);
+
+ /* Set Display Start Line */
+ write_reg(par, 0x40 | 0x0);
+
+ /* Charge Pump Setting */
+ write_reg(par, 0x8D);
+ /* A[2] = 1b, Enable charge pump during display on */
+ write_reg(par, 0x14);
+
+ /* Set Memory Addressing Mode */
+ write_reg(par, 0x20);
+ /* Vertical addressing mode */
+ write_reg(par, 0x01);
+
+ /*Set Segment Re-map */
+ /* column address 127 is mapped to SEG0 */
+ write_reg(par, 0xA0 | 0x1);
+
+ /* Set COM Output Scan Direction */
+ /* remapped mode. Scan from COM[N-1] to COM0 */
+ write_reg(par, 0xC8);
+
+ /* Set COM Pins Hardware Configuration */
+ write_reg(par, 0xDA);
+ if (par->info->var.yres == 64)
+ /* A[4]=1b, Alternative COM pin configuration */
+ write_reg(par, 0x12);
+ else
+ /* A[4]=0b, Sequential COM pin configuration */
+ write_reg(par, 0x02);
+
+ /* Set Pre-charge Period */
+ write_reg(par, 0xD9);
+ write_reg(par, 0xF1);
+
+ /* Set VCOMH Deselect Level */
+ write_reg(par, 0xDB);
+ /* according to the datasheet, this value is out of bounds */
+ write_reg(par, 0x40);
+
+ /* Entire Display ON */
+ /* Resume to RAM content display. Output follows RAM content */
+ write_reg(par, 0xA4);
+
+ /* Set Normal Display
+ 0 in RAM: OFF in display panel
+ 1 in RAM: ON in display panel */
+ write_reg(par, 0xA6);
+
+ /* Set Display ON */
+ write_reg(par, 0xAF);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Set Lower Column Start Address for Page Addressing Mode */
+ write_reg(par, 0x00 | 0x0);
+ /* Set Higher Column Start Address for Page Addressing Mode */
+ write_reg(par, 0x10 | 0x0);
+ /* Set Display Start Line */
+ write_reg(par, 0x40 | 0x0);
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ __func__, on ? "true" : "false");
+
+ if (on)
+ write_reg(par, 0xAE);
+ else
+ write_reg(par, 0xAF);
+ return 0;
+}
+
+/* Gamma is used to control Contrast */
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ curves[0] &= 0xFF;
+
+ /* Set Contrast Control for BANK0 */
+ write_reg(par, 0x81);
+ write_reg(par, curves[0]);
+
+ return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_base;
+ u8 *buf = par->txbuf.buf;
+ int x, y, i;
+ int ret = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+ for (x = 0; x < par->info->var.xres; x++) {
+ for (y = 0; y < par->info->var.yres/8; y++) {
+ *buf = 0x00;
+ for (i = 0; i < 8; i++)
+ *buf |= (vmem16[(y*8+i)*par->info->var.xres+x] ? 1 : 0) << i;
+ buf++;
+ }
+ }
+
+ /* Write data */
+ gpio_set_value(par->gpio.dc, 1);
+ ret = par->fbtftops.write(par, par->txbuf.buf,
+ par->info->var.xres*par->info->var.yres/8);
+ if (ret < 0)
+ dev_err(par->info->device,
+ "%s: write failed and returned: %d\n", __func__, ret);
+
+ return ret;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .gamma_num = 1,
+ .gamma_len = 1,
+ .gamma = "00",
+ .fbtftops = {
+ .write_vmem = write_vmem,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .blank = blank,
+ .set_gamma = set_gamma,
+ },
+};
+
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1306", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1306");
+MODULE_ALIAS("platform:ssd1306");
+
+MODULE_DESCRIPTION("SSD1306 OLED Driver");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
new file mode 100644
index 000000000000..da7464f90e37
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -0,0 +1,205 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ssd1331"
+#define WIDTH 96
+#define HEIGHT 64
+#define GAMMA_NUM 1
+#define GAMMA_LEN 63
+#define DEFAULT_GAMMA "0 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2" \
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ write_reg(par, 0xae); /* Display Off */
+ write_reg(par, 0xa0, 0x70 | (par->bgr << 2)); /* Set Colour Depth */
+ write_reg(par, 0x72); // RGB colour
+ write_reg(par, 0xa1, 0x00); /* Set Display Start Line */
+ write_reg(par, 0xa2, 0x00); /* Set Display Offset */
+ write_reg(par, 0xa4); /* NORMALDISPLAY */
+ write_reg(par, 0xa8, 0x3f); // Set multiplex
+ write_reg(par, 0xad, 0x8e); // Set master
+ // write_reg(par, 0xb0, 0x0b); // Set power mode
+ write_reg(par, 0xb1, 0x31); // Precharge
+ write_reg(par, 0xb3, 0xf0); // Clock div
+ write_reg(par, 0x8a, 0x64); // Precharge A
+ write_reg(par, 0x8b, 0x78); // Precharge B
+ write_reg(par, 0x8c, 0x64); // Precharge C
+ write_reg(par, 0xbb, 0x3a); // Precharge level
+ write_reg(par, 0xbe, 0x3e); // vcomh
+ write_reg(par, 0x87, 0x06); // Master current
+ write_reg(par, 0x81, 0x91); // Contrast A
+ write_reg(par, 0x82, 0x50); // Contrast B
+ write_reg(par, 0x83, 0x7d); // Contrast C
+ write_reg(par, 0xaf); /* Set Sleep Mode Display On */
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ write_reg(par, 0x15, xs, xe);
+ write_reg(par, 0x75, ys, ye);
+}
+
+static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+{
+ va_list args;
+ int i, ret;
+ u8 *buf = (u8 *)par->buf;
+
+ if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) {
+ va_start(args, len);
+ for (i = 0; i < len; i++) {
+ buf[i] = (u8)va_arg(args, unsigned int);
+ }
+ va_end(args);
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, u8, buf, len, "%s: ", __func__);
+ }
+
+ va_start(args, len);
+
+ *buf = (u8)va_arg(args, unsigned int);
+ if (par->gpio.dc != -1)
+ gpio_set_value(par->gpio.dc, 0);
+ ret = par->fbtftops.write(par, par->buf, sizeof(u8));
+ if (ret < 0) {
+ va_end(args);
+ dev_err(par->info->device, "%s: write() failed and returned %d\n", __func__, ret);
+ return;
+ }
+ len--;
+
+ if (len) {
+ i = len;
+ while (i--) {
+ *buf++ = (u8)va_arg(args, unsigned int);
+ }
+ ret = par->fbtftops.write(par, par->buf, len * (sizeof(u8)));
+ if (ret < 0) {
+ va_end(args);
+ dev_err(par->info->device, "%s: write() failed and returned %d\n", __func__, ret);
+ return;
+ }
+ }
+ if (par->gpio.dc != -1)
+ gpio_set_value(par->gpio.dc, 1);
+ va_end(args);
+}
+
+/*
+ Grayscale Lookup Table
+ GS1 - GS63
+ The driver Gamma curve contains the relative values between the entries
+ in the Lookup table.
+
+ From datasheet:
+ 8.8 Gray Scale Decoder
+
+ there are total 180 Gamma Settings (Setting 0 to Setting 180)
+ available for the Gray Scale table.
+
+ The gray scale is defined in incremental way, with reference
+ to the length of previous table entry:
+ Setting of GS1 has to be >= 0
+ Setting of GS2 has to be > Setting of GS1 +1
+ Setting of GS3 has to be > Setting of GS2 +1
+ :
+ Setting of GS63 has to be > Setting of GS62 +1
+
+
+*/
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ unsigned long tmp[GAMMA_NUM * GAMMA_LEN];
+ int i, acc = 0;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ for (i = 0; i < 63; i++) {
+ if (i > 0 && curves[i] < 2) {
+ dev_err(par->info->device,
+ "Illegal value in Grayscale Lookup Table at index %d. " \
+ "Must be greater than 1\n", i);
+ return -EINVAL;
+ }
+ acc += curves[i];
+ tmp[i] = acc;
+ if (acc > 180) {
+ dev_err(par->info->device,
+ "Illegal value(s) in Grayscale Lookup Table. " \
+ "At index=%d, the accumulated value has exceeded 180\n", i);
+ return -EINVAL;
+ }
+ }
+
+ write_reg(par, 0xB8,
+ tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7],
+ tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14], tmp[15],
+ tmp[16], tmp[17], tmp[18], tmp[19], tmp[20], tmp[21], tmp[22], tmp[23],
+ tmp[24], tmp[25], tmp[26], tmp[27], tmp[28], tmp[29], tmp[30], tmp[31],
+ tmp[32], tmp[33], tmp[34], tmp[35], tmp[36], tmp[37], tmp[38], tmp[39],
+ tmp[40], tmp[41], tmp[42], tmp[43], tmp[44], tmp[45], tmp[46], tmp[47],
+ tmp[48], tmp[49], tmp[50], tmp[51], tmp[52], tmp[53], tmp[54], tmp[55],
+ tmp[56], tmp[57], tmp[58], tmp[59], tmp[60], tmp[61], tmp[62]);
+
+ return 0;
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ __func__, on ? "true" : "false");
+ if (on)
+ write_reg(par, 0xAE);
+ else
+ write_reg(par, 0xAF);
+ return 0;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .gamma_num = GAMMA_NUM,
+ .gamma_len = GAMMA_LEN,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .write_register = write_reg8_bus8,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_gamma = set_gamma,
+ .blank = blank,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1331", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1331");
+MODULE_ALIAS("platform:ssd1331");
+
+MODULE_DESCRIPTION("SSD1331 OLED Driver");
+MODULE_AUTHOR("Alec Smecher (adapted from SSD1351 by James Davies)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
new file mode 100644
index 000000000000..062d98660f63
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -0,0 +1,258 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ssd1351"
+#define WIDTH 128
+#define HEIGHT 128
+#define GAMMA_NUM 1
+#define GAMMA_LEN 63
+#define DEFAULT_GAMMA "0 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2" \
+
+static void register_onboard_backlight(struct fbtft_par *par);
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ if (par->pdata
+ && par->pdata->display.backlight == FBTFT_ONBOARD_BACKLIGHT) {
+ /* module uses onboard GPIO for panel power */
+ par->fbtftops.register_backlight = register_onboard_backlight;
+ }
+
+ par->fbtftops.reset(par);
+
+ write_reg(par, 0xfd, 0x12); /* Command Lock */
+ write_reg(par, 0xfd, 0xb1); /* Command Lock */
+ write_reg(par, 0xae); /* Display Off */
+ write_reg(par, 0xb3, 0xf1); /* Front Clock Div */
+ write_reg(par, 0xca, 0x7f); /* Set Mux Ratio */
+ write_reg(par, 0x15, 0x00, 0x7f); /* Set Column Address */
+ write_reg(par, 0x75, 0x00, 0x7f); /* Set Row Address */
+ write_reg(par, 0xa1, 0x00); /* Set Display Start Line */
+ write_reg(par, 0xa2, 0x00); /* Set Display Offset */
+ write_reg(par, 0xb5, 0x00); /* Set GPIO */
+ write_reg(par, 0xab, 0x01); /* Set Function Selection */
+ write_reg(par, 0xb1, 0x32); /* Set Phase Length */
+ write_reg(par, 0xb4, 0xa0, 0xb5, 0x55); /* Set Segment Low Voltage */
+ write_reg(par, 0xbb, 0x17); /* Set Precharge Voltage */
+ write_reg(par, 0xbe, 0x05); /* Set VComH Voltage */
+ write_reg(par, 0xc1, 0xc8, 0x80, 0xc8); /* Set Contrast */
+ write_reg(par, 0xc7, 0x0f); /* Set Master Contrast */
+ write_reg(par, 0xb6, 0x01); /* Set Second Precharge Period */
+ write_reg(par, 0xa6); /* Set Display Mode Reset */
+ write_reg(par, 0xaf); /* Set Sleep Mode Display On */
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ write_reg(par, 0x15, xs, xe);
+ write_reg(par, 0x75, ys, ye);
+ write_reg(par, 0x5c);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ unsigned remap;
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ if (par->fbtftops.init_display != init_display) {
+ /* don't risk messing up register A0h */
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
+ "%s: skipping since custom init_display() is used\n",
+ __func__);
+ return 0;
+ }
+
+ remap = 0x60 | (par->bgr << 2); /* Set Colour Depth */
+
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0xA0, remap | 0b00 | 1<<4);
+ break;
+ case 270:
+ write_reg(par, 0xA0, remap | 0b11 | 1<<4);
+ break;
+ case 180:
+ write_reg(par, 0xA0, remap | 0b10);
+ break;
+ case 90:
+ write_reg(par, 0xA0, remap | 0b01);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ Grayscale Lookup Table
+ GS1 - GS63
+ The driver Gamma curve contains the relative values between the entries
+ in the Lookup table.
+
+ From datasheet:
+ 8.8 Gray Scale Decoder
+
+ there are total 180 Gamma Settings (Setting 0 to Setting 180)
+ available for the Gray Scale table.
+
+ The gray scale is defined in incremental way, with reference
+ to the length of previous table entry:
+ Setting of GS1 has to be >= 0
+ Setting of GS2 has to be > Setting of GS1 +1
+ Setting of GS3 has to be > Setting of GS2 +1
+ :
+ Setting of GS63 has to be > Setting of GS62 +1
+
+
+*/
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ unsigned long tmp[GAMMA_NUM * GAMMA_LEN];
+ int i, acc = 0;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ for (i = 0; i < 63; i++) {
+ if (i > 0 && curves[i] < 2) {
+ dev_err(par->info->device,
+ "Illegal value in Grayscale Lookup Table at index %d. " \
+ "Must be greater than 1\n", i);
+ return -EINVAL;
+ }
+ acc += curves[i];
+ tmp[i] = acc;
+ if (acc > 180) {
+ dev_err(par->info->device,
+ "Illegal value(s) in Grayscale Lookup Table. " \
+ "At index=%d, the accumulated value has exceeded 180\n", i);
+ return -EINVAL;
+ }
+ }
+
+ write_reg(par, 0xB8,
+ tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7],
+ tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14], tmp[15],
+ tmp[16], tmp[17], tmp[18], tmp[19], tmp[20], tmp[21], tmp[22], tmp[23],
+ tmp[24], tmp[25], tmp[26], tmp[27], tmp[28], tmp[29], tmp[30], tmp[31],
+ tmp[32], tmp[33], tmp[34], tmp[35], tmp[36], tmp[37], tmp[38], tmp[39],
+ tmp[40], tmp[41], tmp[42], tmp[43], tmp[44], tmp[45], tmp[46], tmp[47],
+ tmp[48], tmp[49], tmp[50], tmp[51], tmp[52], tmp[53], tmp[54], tmp[55],
+ tmp[56], tmp[57], tmp[58], tmp[59], tmp[60], tmp[61], tmp[62]);
+
+ return 0;
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ __func__, on ? "true" : "false");
+ if (on)
+ write_reg(par, 0xAE);
+ else
+ write_reg(par, 0xAF);
+ return 0;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .gamma_num = GAMMA_NUM,
+ .gamma_len = GAMMA_LEN,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ .blank = blank,
+ },
+};
+
+#ifdef CONFIG_FB_BACKLIGHT
+static int update_onboard_backlight(struct backlight_device *bd)
+{
+ struct fbtft_par *par = bl_get_data(bd);
+ bool on;
+
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par,
+ "%s: power=%d, fb_blank=%d\n",
+ __func__, bd->props.power, bd->props.fb_blank);
+
+ on = (bd->props.power == FB_BLANK_UNBLANK)
+ && (bd->props.fb_blank == FB_BLANK_UNBLANK);
+ /* Onboard backlight connected to GPIO0 on SSD1351, GPIO1 unused */
+ write_reg(par, 0xB5, on ? 0x03 : 0x02);
+
+ return 0;
+}
+
+static void register_onboard_backlight(struct fbtft_par *par)
+{
+ struct backlight_device *bd;
+ struct backlight_properties bl_props = { 0, };
+ struct backlight_ops *bl_ops;
+
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s()\n", __func__);
+
+ bl_ops = devm_kzalloc(par->info->device, sizeof(struct backlight_ops),
+ GFP_KERNEL);
+ if (!bl_ops) {
+ dev_err(par->info->device,
+ "%s: could not allocate memory for backlight operations.\n",
+ __func__);
+ return;
+ }
+
+ bl_ops->update_status = update_onboard_backlight;
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.power = FB_BLANK_POWERDOWN;
+
+ bd = backlight_device_register(dev_driver_string(par->info->device),
+ par->info->device, par, bl_ops, &bl_props);
+ if (IS_ERR(bd)) {
+ dev_err(par->info->device,
+ "cannot register backlight device (%ld)\n",
+ PTR_ERR(bd));
+ return;
+ }
+ par->info->bl_dev = bd;
+
+ if (!par->fbtftops.unregister_backlight)
+ par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
+}
+#else
+static void register_onboard_backlight(struct fbtft_par *par) { };
+#endif
+
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1351");
+MODULE_ALIAS("platform:ssd1351");
+
+MODULE_DESCRIPTION("SSD1351 OLED Driver");
+MODULE_AUTHOR("James Davies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
new file mode 100644
index 000000000000..b63aa38e51cf
--- /dev/null
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -0,0 +1,195 @@
+/*
+ * FB driver for the ST7735R LCD Controller
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_st7735r"
+#define DEFAULT_GAMMA "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \
+ "0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
+
+
+static int default_init_sequence[] = {
+ /* SWRESET - Software reset */
+ -1, 0x01,
+ -2, 150, /* delay */
+
+ /* SLPOUT - Sleep out & booster on */
+ -1, 0x11,
+ -2, 500, /* delay */
+
+ /* FRMCTR1 - frame rate control: normal mode
+ frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D) */
+ -1, 0xB1, 0x01, 0x2C, 0x2D,
+
+ /* FRMCTR2 - frame rate control: idle mode
+ frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D) */
+ -1, 0xB2, 0x01, 0x2C, 0x2D,
+
+ /* FRMCTR3 - frame rate control - partial mode
+ dot inversion mode, line inversion mode */
+ -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
+
+ /* INVCTR - display inversion control
+ no inversion */
+ -1, 0xB4, 0x07,
+
+ /* PWCTR1 - Power Control
+ -4.6V, AUTO mode */
+ -1, 0xC0, 0xA2, 0x02, 0x84,
+
+ /* PWCTR2 - Power Control
+ VGH25 = 2.4C VGSEL = -10 VGH = 3 * AVDD */
+ -1, 0xC1, 0xC5,
+
+ /* PWCTR3 - Power Control
+ Opamp current small, Boost frequency */
+ -1, 0xC2, 0x0A, 0x00,
+
+ /* PWCTR4 - Power Control
+ BCLK/2, Opamp current small & Medium low */
+ -1, 0xC3,0x8A,0x2A,
+
+ /* PWCTR5 - Power Control */
+ -1, 0xC4, 0x8A, 0xEE,
+
+ /* VMCTR1 - Power Control */
+ -1, 0xC5, 0x0E,
+
+ /* INVOFF - Display inversion off */
+ -1, 0x20,
+
+ /* COLMOD - Interface pixel format */
+ -1, 0x3A, 0x05,
+
+ /* DISPON - Display On */
+ -1, 0x29,
+ -2, 100, /* delay */
+
+ /* NORON - Partial off (Normal) */
+ -1, 0x13,
+ -2, 10, /* delay */
+
+ /* end marker */
+ -3
+};
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Column address */
+ write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+
+ /* Row adress */
+ write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+
+ /* Memory write */
+ write_reg(par, 0x2C);
+}
+
+#define MY (1 << 7)
+#define MX (1 << 6)
+#define MV (1 << 5)
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* MADCTL - Memory data access control
+ RGB/BGR:
+ 1. Mode selection pin SRGB
+ RGB H/W pin for color filter setting: 0=RGB, 1=BGR
+ 2. MADCTL RGB bit
+ RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ break;
+ case 270:
+ write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ break;
+ case 180:
+ write_reg(par, 0x36, (par->bgr << 3));
+ break;
+ case 90:
+ write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ Gamma string format:
+ VRF0P VOS0P PK0P PK1P PK2P PK3P PK4P PK5P PK6P PK7P PK8P PK9P SELV0P SELV1P SELV62P SELV63P
+ VRF0N VOS0N PK0N PK1N PK2N PK3N PK4N PK5N PK6N PK7N PK8N PK9N SELV0N SELV1N SELV62N SELV63N
+*/
+#define CURVE(num, idx) curves[num*par->gamma.num_values + idx]
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ int i,j;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ for (i = 0; i < par->gamma.num_curves; i++)
+ for (j = 0; j < par->gamma.num_values; j++)
+ CURVE(i,j) &= 0b111111;
+
+ for (i = 0; i < par->gamma.num_curves; i++)
+ write_reg(par, 0xE0 + i,
+ CURVE(i, 0), CURVE(i, 1), CURVE(i, 2), CURVE(i, 3),
+ CURVE(i, 4), CURVE(i, 5), CURVE(i, 6), CURVE(i, 7),
+ CURVE(i, 8), CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
+ CURVE(i, 12), CURVE(i, 13), CURVE(i, 14), CURVE(i,15));
+
+ return 0;
+}
+#undef CURVE
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = 128,
+ .height = 160,
+ .init_sequence = default_init_sequence,
+ .gamma_num = 2,
+ .gamma_len = 16,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "sitronix,st7735r", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:st7735r");
+MODULE_ALIAS("platform:st7735r");
+
+MODULE_DESCRIPTION("FB driver for the ST7735R LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_tinylcd.c b/drivers/staging/fbtft/fb_tinylcd.c
new file mode 100644
index 000000000000..ca98bfb5c7df
--- /dev/null
+++ b/drivers/staging/fbtft/fb_tinylcd.c
@@ -0,0 +1,124 @@
+/*
+ * Custom FB driver for tinylcd.com display
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_tinylcd"
+#define WIDTH 320
+#define HEIGHT 480
+
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ write_reg(par, 0xB0, 0x80);
+ write_reg(par, 0xC0, 0x0A, 0x0A);
+ write_reg(par, 0xC1, 0x45, 0x07);
+ write_reg(par, 0xC2, 0x33);
+ write_reg(par, 0xC5, 0x00, 0x42, 0x80);
+ write_reg(par, 0xB1, 0xD0, 0x11);
+ write_reg(par, 0xB4, 0x02);
+ write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
+ write_reg(par, 0xB7, 0x07);
+ write_reg(par, 0x36, 0x58);
+ write_reg(par, 0xF0, 0x36, 0xA5, 0xD3);
+ write_reg(par, 0xE5, 0x80);
+ write_reg(par, 0xE5, 0x01);
+ write_reg(par, 0xB3, 0x00);
+ write_reg(par, 0xE5, 0x00);
+ write_reg(par, 0xF0, 0x36, 0xA5, 0x53);
+ write_reg(par, 0xE0, 0x00, 0x35, 0x33, 0x00, 0x00, 0x00,
+ 0x00, 0x35, 0x33, 0x00, 0x00, 0x00);
+ write_reg(par, 0x3A, 0x55);
+ write_reg(par, 0x11);
+ udelay(250);
+ write_reg(par, 0x29);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Column address */
+ write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+
+ /* Row adress */
+ write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+
+ /* Memory write */
+ write_reg(par, 0x2C);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ case 270:
+ write_reg(par, 0xB6, 0x00, 0x02, 0x3B);
+ write_reg(par, 0x36, 0x28);
+ break;
+ case 180:
+ write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
+ write_reg(par, 0x36, 0x58);
+ break;
+ case 90:
+ write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
+ write_reg(par, 0x36, 0x38);
+ break;
+ default:
+ write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
+ write_reg(par, 0x36, 0x08);
+ break;
+ }
+
+ return 0;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "neosec,tinylcd", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("spi:tinylcd");
+
+MODULE_DESCRIPTION("Custom FB driver for tinylcd.com display");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_tls8204.c b/drivers/staging/fbtft/fb_tls8204.c
new file mode 100644
index 000000000000..8738c7a7bfda
--- /dev/null
+++ b/drivers/staging/fbtft/fb_tls8204.c
@@ -0,0 +1,176 @@
+/*
+ * FB driver for the TLS8204 LCD Controller
+ *
+ * The display is monochrome and the video memory is RGB565.
+ * Any pixel value except 0 turns the pixel on.
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ * Copyright (C) 2014 Michael Hope (adapted for the TLS8204)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_tls8204"
+#define WIDTH 84
+#define HEIGHT 48
+#define TXBUFLEN WIDTH
+#define DEFAULT_GAMMA "40" /* gamma is used to control contrast in this driver */
+
+static unsigned bs = 4;
+module_param(bs, uint, 0);
+MODULE_PARM_DESC(bs, "BS[2:0] Bias voltage level: 0-7 (default: 4)");
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ /* Enter extended command mode */
+ write_reg(par, 0x21); /* 5:1 1
+ 2:0 PD - Powerdown control: chip is active
+ 1:0 V - Entry mode: horizontal addressing
+ 0:1 H - Extended instruction set control: extended
+ */
+
+ /* H=1 Bias system */
+ write_reg(par, 0x10 | (bs & 0x7)); /*
+ 4:1 1
+ 3:0 0
+ 2:x BS2 - Bias System
+ 1:x BS1
+ 0:x BS0
+ */
+
+ /* Set the address of the first display line. */
+ write_reg(par, 0x04 | (64 >> 6));
+ write_reg(par, 0x40 | (64 & 0x3F));
+
+ /* Enter H=0 standard command mode */
+ write_reg(par, 0x20);
+
+ /* H=0 Display control */
+ write_reg(par, 0x08 | 4); /*
+ 3:1 1
+ 2:1 D - DE: 10=normal mode
+ 1:0 0
+ 0:0 E
+ */
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* H=0 Set X address of RAM */
+ write_reg(par, 0x80); /* 7:1 1
+ 6-0: X[6:0] - 0x00
+ */
+
+ /* H=0 Set Y address of RAM */
+ write_reg(par, 0x40); /* 7:0 0
+ 6:1 1
+ 2-0: Y[2:0] - 0x0
+ */
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_base;
+ int x, y, i;
+ int ret = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+ for (y = 0; y < HEIGHT/8; y++) {
+ u8 *buf = par->txbuf.buf;
+ /* The display is 102x68 but the LCD is 84x48. Set
+ the write pointer at the start of each row. */
+ gpio_set_value(par->gpio.dc, 0);
+ write_reg(par, 0x80 | 0);
+ write_reg(par, 0x40 | y);
+
+ for (x = 0; x < WIDTH; x++) {
+ u8 ch = 0;
+ for (i = 0; i < 8*WIDTH; i += WIDTH) {
+ ch >>= 1;
+ if (vmem16[(y*8*WIDTH)+i+x])
+ ch |= 0x80;
+ }
+ *buf++ = ch;
+ }
+ /* Write the row */
+ gpio_set_value(par->gpio.dc, 1);
+ ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
+ if (ret < 0) {
+ dev_err(par->info->device,
+ "%s: write failed and returned: %d\n", __func__, ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* apply mask */
+ curves[0] &= 0x7F;
+
+ write_reg(par, 0x21); /* turn on extended instruction set */
+ write_reg(par, 0x80 | curves[0]);
+ write_reg(par, 0x20); /* turn off extended instruction set */
+
+ return 0;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = TXBUFLEN,
+ .gamma_num = 1,
+ .gamma_len = 1,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .write_vmem = write_vmem,
+ .set_gamma = set_gamma,
+ },
+ .backlight = 1,
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "teralane,tls8204", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("spi:tls8204");
+
+MODULE_DESCRIPTION("FB driver for the TLS8204 LCD Controller");
+MODULE_AUTHOR("Michael Hope");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c
new file mode 100644
index 000000000000..d70ac524278c
--- /dev/null
+++ b/drivers/staging/fbtft/fb_uc1701.c
@@ -0,0 +1,210 @@
+/*
+ * FB driver for the UC1701 LCD Controller
+ *
+ * The display is monochrome and the video memory is RGB565.
+ * Any pixel value except 0 turns the pixel on.
+ *
+ * Copyright (C) 2014 Juergen Holzmann
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_uc1701"
+#define WIDTH 102
+#define HEIGHT 64
+#define PAGES (HEIGHT/8)
+
+/* 1: Display on/off */
+#define LCD_DISPLAY_ENABLE 0xAE
+/* 2: display start line set */
+#define LCD_START_LINE 0x40
+/* 3: Page address set (lower 4 bits select one of the pages) */
+#define LCD_PAGE_ADDRESS 0xB0
+/* 4: column address */
+#define LCD_COL_ADDRESS 0x10
+/* 8: select orientation */
+#define LCD_BOTTOMVIEW 0xA0
+/* 9: inverted display */
+#define LCD_DISPLAY_INVERT 0xA6
+/* 10: show memory content or switch all pixels on */
+#define LCD_ALL_PIXEL 0xA4
+/* 11: lcd bias set */
+#define LCD_BIAS 0xA2
+/* 14: Reset Controller */
+#define LCD_RESET_CMD 0xE2
+/* 15: output mode select (turns display upside-down) */
+#define LCD_SCAN_DIR 0xC0
+/* 16: power control set */
+#define LCD_POWER_CONTROL 0x28
+/* 17: voltage regulator resistor ratio set */
+#define LCD_VOLTAGE 0x20
+/* 18: Volume mode set */
+#define LCD_VOLUME_MODE 0x81
+/* 22: NOP command */
+#define LCD_NO_OP 0xE3
+/* 25: advanced program control */
+#define LCD_ADV_PROG_CTRL 0xFA
+/* 25: advanced program control2 */
+#define LCD_ADV_PROG_CTRL2 0x10
+#define LCD_TEMPCOMP_HIGH 0x80
+/* column offset for normal orientation */
+#define SHIFT_ADDR_NORMAL 0
+/* column offset for bottom view orientation */
+#define SHIFT_ADDR_TOPVIEW 30
+
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ /* softreset of LCD */
+ write_reg(par, LCD_RESET_CMD);
+ mdelay(10);
+
+ /* set startpoint */
+ /* LCD_START_LINE | (pos & 0x3F) */
+ write_reg(par, LCD_START_LINE);
+
+ /* select orientation BOTTOMVIEW */
+ write_reg(par, LCD_BOTTOMVIEW | 1);
+ /* output mode select (turns display upside-down) */
+ write_reg(par, LCD_SCAN_DIR | 0x00);
+
+ /* Normal Pixel mode */
+ write_reg(par, LCD_ALL_PIXEL | 0);
+
+ /* positive display */
+ write_reg(par, LCD_DISPLAY_INVERT | 0);
+
+ /* bias 1/9 */
+ write_reg(par, LCD_BIAS | 0);
+
+ /* power control mode: all features on */
+ /* LCD_POWER_CONTROL | (val&0x07) */
+ write_reg(par, LCD_POWER_CONTROL | 0x07);
+
+ /* set voltage regulator R/R */
+ /* LCD_VOLTAGE | (val&0x07) */
+ write_reg(par, LCD_VOLTAGE | 0x07);
+
+ /* volume mode set */
+ /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
+ write_reg(par, LCD_VOLUME_MODE);
+ /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
+ write_reg(par, 0x09);
+ /* ???? */
+ /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
+ write_reg(par, LCD_NO_OP);
+
+ /* advanced program control */
+ write_reg(par, LCD_ADV_PROG_CTRL);
+ write_reg(par, LCD_ADV_PROG_CTRL2|LCD_TEMPCOMP_HIGH);
+
+ /* enable display */
+ write_reg(par, LCD_DISPLAY_ENABLE | 1);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* goto address */
+ /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
+ (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
+ LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+ write_reg(par, LCD_PAGE_ADDRESS);
+ /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
+ (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
+ LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+ write_reg(par, 0x00);
+ /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
+ (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
+ LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+ write_reg(par, LCD_COL_ADDRESS);
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_base;
+ u8 *buf = par->txbuf.buf;
+ int x, y, i;
+ int ret = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+ for (y = 0; y < PAGES; y++) {
+ buf = par->txbuf.buf;
+ for (x = 0; x < WIDTH; x++) {
+ *buf = 0x00;
+ for (i = 0; i < 8; i++)
+ *buf |= (vmem16[((y*8*WIDTH)+(i*WIDTH))+x] ? 1 : 0) << i;
+ buf++;
+ }
+ /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
+ (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
+ LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+ write_reg(par, LCD_PAGE_ADDRESS|(u8)y);
+ /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
+ (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
+ LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+ write_reg(par, 0x00);
+ /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
+ (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
+ LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+ write_reg(par, LCD_COL_ADDRESS);
+ gpio_set_value(par->gpio.dc, 1);
+ ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
+ gpio_set_value(par->gpio.dc, 0);
+ }
+
+ if (ret < 0)
+ dev_err(par->info->device, "%s: write failed and returned: %d\n", __func__, ret);
+
+ return ret;
+}
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .write_vmem = write_vmem,
+ },
+ .backlight = 1,
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "UltraChip,uc1701", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("spi:uc1701");
+
+MODULE_DESCRIPTION("FB driver for the UC1701 LCD Controller");
+MODULE_AUTHOR("Juergen Holzmann");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
new file mode 100644
index 000000000000..fff57b330ba2
--- /dev/null
+++ b/drivers/staging/fbtft/fb_upd161704.c
@@ -0,0 +1,206 @@
+/*
+ * FB driver for the uPD161704 LCD Controller
+ *
+ * Copyright (C) 2014 Seong-Woo Kim
+ *
+ * Based on fb_ili9325.c by Noralf Tronnes
+ * Based on ili9325.c by Jeroen Domburg
+ * Init code from UTFT library by Henning Karlsen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_upd161704"
+#define WIDTH 240
+#define HEIGHT 320
+#define BPP 16
+
+static int init_display(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ par->fbtftops.reset(par);
+
+ if (par->gpio.cs != -1)
+ gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ /* Initialization sequence from Lib_UTFT */
+
+ /* register reset */
+ write_reg(par, 0x0003,0x0001); /* Soft reset */
+
+ /* oscillator start */
+ write_reg(par, 0x003A,0x0001); /*Oscillator 0: stop, 1: operation */
+ udelay(100);
+
+ /* y-setting */
+ write_reg(par, 0x0024,0x007B); /* amplitude setting */
+ udelay(10);
+ write_reg(par, 0x0025,0x003B); /* amplitude setting */
+ write_reg(par, 0x0026,0x0034); /* amplitude setting */
+ udelay(10);
+ write_reg(par, 0x0027,0x0004); /* amplitude setting */
+ write_reg(par, 0x0052,0x0025); /* circuit setting 1 */
+ udelay(10);
+ write_reg(par, 0x0053,0x0033); /* circuit setting 2 */
+ write_reg(par, 0x0061,0x001C); /* adjustment V10 positive polarity */
+ udelay(10);
+ write_reg(par, 0x0062,0x002C); /* adjustment V9 negative polarity */
+ write_reg(par, 0x0063,0x0022); /* adjustment V34 positive polarity */
+ udelay(10);
+ write_reg(par, 0x0064,0x0027); /* adjustment V31 negative polarity */
+ udelay(10);
+ write_reg(par, 0x0065,0x0014); /* adjustment V61 negative polarity */
+ udelay(10);
+ write_reg(par, 0x0066,0x0010); /* adjustment V61 negative polarity */
+
+ /* Basical clock for 1 line (BASECOUNT[7:0]) number specified */
+ write_reg(par, 0x002E,0x002D);
+
+ /* Power supply setting */
+ write_reg(par, 0x0019,0x0000); /* DC/DC output setting */
+ udelay(200);
+ write_reg(par, 0x001A,0x1000); /* DC/DC frequency setting */
+ write_reg(par, 0x001B,0x0023); /* DC/DC rising setting */
+ write_reg(par, 0x001C,0x0C01); /* Regulator voltage setting */
+ write_reg(par, 0x001D,0x0000); /* Regulator current setting */
+ write_reg(par, 0x001E,0x0009); /* VCOM output setting */
+ write_reg(par, 0x001F,0x0035); /* VCOM amplitude setting */
+ write_reg(par, 0x0020,0x0015); /* VCOMM cencter setting */
+ write_reg(par, 0x0018,0x1E7B); /* DC/DC operation setting */
+
+ /* windows setting */
+ write_reg(par, 0x0008,0x0000); /* Minimum X address */
+ write_reg(par, 0x0009,0x00EF); /* Maximum X address */
+ write_reg(par, 0x000a,0x0000); /* Minimum Y address */
+ write_reg(par, 0x000b,0x013F); /* Maximum Y address */
+
+ /* LCD display area setting */
+ write_reg(par, 0x0029,0x0000); /* [LCDSIZE] X MIN. size set */
+ write_reg(par, 0x002A,0x0000); /* [LCDSIZE] Y MIN. size set */
+ write_reg(par, 0x002B,0x00EF); /* [LCDSIZE] X MAX. size set */
+ write_reg(par, 0x002C,0x013F); /* [LCDSIZE] Y MAX. size set */
+
+ /* Gate scan setting */
+ write_reg(par, 0x0032,0x0002);
+
+ /* n line inversion line number */
+ write_reg(par, 0x0033,0x0000);
+
+ /* Line inversion/frame inversion/interlace setting */
+ write_reg(par, 0x0037,0x0000);
+
+ /* Gate scan operation setting register */
+ write_reg(par, 0x003B,0x0001);
+
+ /* Color mode */
+ /*GS = 0: 260-k color (64 gray scale), GS = 1: 8 color (2 gray scale) */
+ write_reg(par, 0x0004,0x0000);
+
+ /* RAM control register */
+ write_reg(par, 0x0005,0x0000); /*Window access 00:Normal, 10:Window */
+
+ /* Display setting register 2 */
+ write_reg(par, 0x0001,0x0000);
+
+ /* display setting */
+ write_reg(par, 0x0000,0x0000); /* display on */
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+ switch (par->info->var.rotate) {
+ /* R20h = Horizontal GRAM Start Address */
+ /* R21h = Vertical GRAM Start Address */
+ case 0:
+ write_reg(par, 0x0006, xs);
+ write_reg(par, 0x0007, ys);
+ break;
+ case 180:
+ write_reg(par, 0x0006, WIDTH - 1 - xs);
+ write_reg(par, 0x0007, HEIGHT - 1 - ys);
+ break;
+ case 270:
+ write_reg(par, 0x0006, WIDTH - 1 - ys);
+ write_reg(par, 0x0007, xs);
+ break;
+ case 90:
+ write_reg(par, 0x0006, ys);
+ write_reg(par, 0x0007, HEIGHT - 1 - xs);
+ break;
+ }
+
+ write_reg(par, 0x0e); /* Write Data to GRAM */
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ switch (par->info->var.rotate) {
+ /* AM: GRAM update direction */
+ case 0:
+ write_reg(par, 0x01, 0x0000);
+ write_reg(par, 0x05, 0x0000);
+ break;
+ case 180:
+ write_reg(par, 0x01, 0x00C0);
+ write_reg(par, 0x05, 0x0000);
+ break;
+ case 270:
+ write_reg(par, 0x01, 0x0080);
+ write_reg(par, 0x05, 0x0001);
+ break;
+ case 90:
+ write_reg(par, 0x01, 0x0040);
+ write_reg(par, 0x05, 0x0001);
+ break;
+ }
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 16,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "nec,upd161704", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:upd161704");
+MODULE_ALIAS("platform:upd161704");
+
+MODULE_DESCRIPTION("FB driver for the uPD161704 LCD Controller");
+MODULE_AUTHOR("Seong-Woo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
new file mode 100644
index 000000000000..975b579359f3
--- /dev/null
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -0,0 +1,324 @@
+/*
+ * FB driver for the Watterott LCD Controller
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_watterott"
+#define WIDTH 320
+#define HEIGHT 240
+#define FPS 5
+#define TXBUFLEN 1024
+#define DEFAULT_BRIGHTNESS 50
+
+#define CMD_VERSION 0x01
+#define CMD_LCD_LED 0x10
+#define CMD_LCD_RESET 0x11
+#define CMD_LCD_ORIENTATION 0x20
+#define CMD_LCD_DRAWIMAGE 0x27
+#define COLOR_RGB323 8
+#define COLOR_RGB332 9
+#define COLOR_RGB233 10
+#define COLOR_RGB565 16
+
+
+static short mode = 565;
+module_param(mode, short, 0);
+MODULE_PARM_DESC(mode, "RGB color transfer mode: 332, 565 (default)");
+
+static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+{
+ va_list args;
+ int i, ret;
+ u8 *buf = par->buf;
+
+ va_start(args, len);
+ for (i = 0; i < len; i++)
+ *buf++ = (u8)va_arg(args, unsigned int);
+ va_end(args);
+
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
+ par->info->device, u8, par->buf, len, "%s: ", __func__);
+
+ ret = par->fbtftops.write(par, par->buf, len);
+ if (ret < 0) {
+ dev_err(par->info->device,
+ "%s: write() failed and returned %d\n", __func__, ret);
+ return;
+ }
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ unsigned start_line, end_line;
+ u16 *vmem16 = (u16 *)(par->info->screen_base + offset);
+ u16 *pos = par->txbuf.buf + 1;
+ u16 *buf16 = par->txbuf.buf + 10;
+ int i, j;
+ int ret = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+ start_line = offset / par->info->fix.line_length;
+ end_line = start_line + (len / par->info->fix.line_length) - 1;
+
+ /* Set command header. pos: x, y, w, h */
+ ((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
+ pos[0] = 0;
+ pos[2] = cpu_to_be16(par->info->var.xres);
+ pos[3] = cpu_to_be16(1);
+ ((u8 *)par->txbuf.buf)[9] = COLOR_RGB565;
+
+ for (i = start_line; i <= end_line; i++) {
+ pos[1] = cpu_to_be16(i);
+ for (j = 0; j < par->info->var.xres; j++)
+ buf16[j] = cpu_to_be16(*vmem16++);
+ ret = par->fbtftops.write(par,
+ par->txbuf.buf, 10 + par->info->fix.line_length);
+ if (ret < 0)
+ return ret;
+ udelay(300);
+ }
+
+ return 0;
+}
+
+#define RGB565toRGB323(c) (((c&0xE000)>>8) | ((c&0600)>>6) | ((c&0x001C)>>2))
+#define RGB565toRGB332(c) (((c&0xE000)>>8) | ((c&0700)>>6) | ((c&0x0018)>>3))
+#define RGB565toRGB233(c) (((c&0xC000)>>8) | ((c&0700)>>5) | ((c&0x001C)>>2))
+
+static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
+{
+ unsigned start_line, end_line;
+ u16 *vmem16 = (u16 *)(par->info->screen_base + offset);
+ u16 *pos = par->txbuf.buf + 1;
+ u8 *buf8 = par->txbuf.buf + 10;
+ int i, j;
+ int ret = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+ start_line = offset / par->info->fix.line_length;
+ end_line = start_line + (len / par->info->fix.line_length) - 1;
+
+ /* Set command header. pos: x, y, w, h */
+ ((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
+ pos[0] = 0;
+ pos[2] = cpu_to_be16(par->info->var.xres);
+ pos[3] = cpu_to_be16(1);
+ ((u8 *)par->txbuf.buf)[9] = COLOR_RGB332;
+
+ for (i = start_line; i <= end_line; i++) {
+ pos[1] = cpu_to_be16(i);
+ for (j = 0; j < par->info->var.xres; j++) {
+ buf8[j] = RGB565toRGB332(*vmem16);
+ vmem16++;
+ }
+ ret = par->fbtftops.write(par,
+ par->txbuf.buf, 10 + par->info->var.xres);
+ if (ret < 0)
+ return ret;
+ udelay(700);
+ }
+
+ return 0;
+}
+
+static unsigned firmware_version(struct fbtft_par *par)
+{
+ u8 rxbuf[4] = {0, };
+
+ write_reg(par, CMD_VERSION);
+ par->fbtftops.read(par, rxbuf, 4);
+ if (rxbuf[1] != '.')
+ return 0;
+
+ return (rxbuf[0] - '0') << 8 | (rxbuf[2] - '0') << 4 | (rxbuf[3] - '0');
+}
+
+static int init_display(struct fbtft_par *par)
+{
+ int ret;
+ unsigned version;
+ u8 save_mode;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* enable SPI interface by having CS and MOSI low during reset */
+ save_mode = par->spi->mode;
+ par->spi->mode |= SPI_CS_HIGH;
+ ret = par->spi->master->setup(par->spi); /* set CS inactive low */
+ if (ret) {
+ dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
+ return ret;
+ }
+ write_reg(par, 0x00); /* make sure mode is set */
+
+ mdelay(50);
+ par->fbtftops.reset(par);
+ mdelay(1000);
+ par->spi->mode = save_mode;
+ ret = par->spi->master->setup(par->spi);
+ if (ret) {
+ dev_err(par->info->device, "Could not restore SPI mode\n");
+ return ret;
+ }
+ write_reg(par, 0x00);
+
+ version = firmware_version(par);
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Firmware version: %x.%02x\n",
+ version >> 8, version & 0xFF);
+
+ if (mode == 332)
+ par->fbtftops.write_vmem = write_vmem_8bit;
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ /* not used on this controller */
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ u8 rotate;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* this controller rotates clock wise */
+ switch (par->info->var.rotate) {
+ case 90:
+ rotate = 27;
+ break;
+ case 180:
+ rotate = 18;
+ break;
+ case 270:
+ rotate = 9;
+ break;
+ default:
+ rotate = 0;
+ }
+ write_reg(par, CMD_LCD_ORIENTATION, rotate);
+
+ return 0;
+}
+
+static int verify_gpios(struct fbtft_par *par)
+{
+ if (par->gpio.reset < 0) {
+ dev_err(par->info->device, "Missing 'reset' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_FB_BACKLIGHT
+static int backlight_chip_update_status(struct backlight_device *bd)
+{
+ struct fbtft_par *par = bl_get_data(bd);
+ int brightness = bd->props.brightness;
+
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par,
+ "%s: brightness=%d, power=%d, fb_blank=%d\n",
+ __func__, bd->props.brightness, bd->props.power,
+ bd->props.fb_blank);
+
+ if (bd->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bd->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ write_reg(par, CMD_LCD_LED, brightness);
+
+ return 0;
+}
+
+static void register_chip_backlight(struct fbtft_par *par)
+{
+ struct backlight_device *bd;
+ struct backlight_properties bl_props = { 0, };
+ struct backlight_ops *bl_ops;
+
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s()\n", __func__);
+
+ bl_ops = devm_kzalloc(par->info->device, sizeof(struct backlight_ops),
+ GFP_KERNEL);
+ if (!bl_ops) {
+ dev_err(par->info->device,
+ "%s: could not allocate memory for backlight operations.\n",
+ __func__);
+ return;
+ }
+
+ bl_ops->update_status = backlight_chip_update_status;
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.power = FB_BLANK_POWERDOWN;
+ bl_props.max_brightness = 100;
+ bl_props.brightness = DEFAULT_BRIGHTNESS;
+
+ bd = backlight_device_register(dev_driver_string(par->info->device),
+ par->info->device, par, bl_ops, &bl_props);
+ if (IS_ERR(bd)) {
+ dev_err(par->info->device,
+ "cannot register backlight device (%ld)\n",
+ PTR_ERR(bd));
+ return;
+ }
+ par->info->bl_dev = bd;
+
+ if (!par->fbtftops.unregister_backlight)
+ par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
+}
+#else
+#define register_chip_backlight NULL
+#endif
+
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .buswidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fps = FPS,
+ .txbuflen = TXBUFLEN,
+ .fbtftops = {
+ .write_register = write_reg8_bus8,
+ .write_vmem = write_vmem,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .verify_gpios = verify_gpios,
+ .register_backlight = register_chip_backlight,
+ },
+};
+FBTFT_REGISTER_DRIVER(DRVNAME, "watterott,openlcd", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+
+MODULE_DESCRIPTION("FB driver for the Watterott LCD Controller");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
new file mode 100644
index 000000000000..b3cddb0b3d69
--- /dev/null
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -0,0 +1,256 @@
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include "fbtft.h"
+
+
+
+
+/*****************************************************************************
+ *
+ * void (*write_reg)(struct fbtft_par *par, int len, ...);
+ *
+ *****************************************************************************/
+
+#define define_fbtft_write_reg(func, type, modifier) \
+void func(struct fbtft_par *par, int len, ...) \
+{ \
+ va_list args; \
+ int i, ret; \
+ int offset = 0; \
+ type *buf = (type *)par->buf; \
+ \
+ if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) { \
+ va_start(args, len); \
+ for (i = 0; i < len; i++) { \
+ buf[i] = (type)va_arg(args, unsigned int); \
+ } \
+ va_end(args); \
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, type, buf, len, "%s: ", __func__); \
+ } \
+ \
+ va_start(args, len); \
+ \
+ if (par->startbyte) { \
+ *(u8 *)par->buf = par->startbyte; \
+ buf = (type *)(par->buf + 1); \
+ offset = 1; \
+ } \
+ \
+ *buf = modifier((type)va_arg(args, unsigned int)); \
+ if (par->gpio.dc != -1) \
+ gpio_set_value(par->gpio.dc, 0); \
+ ret = par->fbtftops.write(par, par->buf, sizeof(type)+offset); \
+ if (ret < 0) { \
+ va_end(args); \
+ dev_err(par->info->device, "%s: write() failed and returned %d\n", __func__, ret); \
+ return; \
+ } \
+ len--; \
+ \
+ if (par->startbyte) \
+ *(u8 *)par->buf = par->startbyte | 0x2; \
+ \
+ if (len) { \
+ i = len; \
+ while (i--) { \
+ *buf++ = modifier((type)va_arg(args, unsigned int)); \
+ } \
+ if (par->gpio.dc != -1) \
+ gpio_set_value(par->gpio.dc, 1); \
+ ret = par->fbtftops.write(par, par->buf, len * (sizeof(type)+offset)); \
+ if (ret < 0) { \
+ va_end(args); \
+ dev_err(par->info->device, "%s: write() failed and returned %d\n", __func__, ret); \
+ return; \
+ } \
+ } \
+ va_end(args); \
+} \
+EXPORT_SYMBOL(func);
+
+define_fbtft_write_reg(fbtft_write_reg8_bus8, u8, )
+define_fbtft_write_reg(fbtft_write_reg16_bus8, u16, cpu_to_be16)
+define_fbtft_write_reg(fbtft_write_reg16_bus16, u16, )
+
+void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...)
+{
+ va_list args;
+ int i, ret;
+ int pad = 0;
+ u16 *buf = (u16 *)par->buf;
+
+ if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) {
+ va_start(args, len);
+ for (i = 0; i < len; i++)
+ *(((u8 *)buf) + i) = (u8)va_arg(args, unsigned int);
+ va_end(args);
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
+ par->info->device, u8, buf, len, "%s: ", __func__);
+ }
+ if (len <= 0)
+ return;
+
+ if (par->spi && (par->spi->bits_per_word == 8)) {
+ /* we're emulating 9-bit, pad start of buffer with no-ops
+ (assuming here that zero is a no-op) */
+ pad = (len % 4) ? 4 - (len % 4) : 0;
+ for (i = 0; i < pad; i++)
+ *buf++ = 0x000;
+ }
+
+ va_start(args, len);
+ *buf++ = (u8)va_arg(args, unsigned int);
+ i = len - 1;
+ while (i--) {
+ *buf = (u8)va_arg(args, unsigned int);
+ *buf++ |= 0x100; /* dc=1 */
+ }
+ va_end(args);
+ ret = par->fbtftops.write(par, par->buf, (len + pad) * sizeof(u16));
+ if (ret < 0) {
+ dev_err(par->info->device,
+ "%s: write() failed and returned %d\n", __func__, ret);
+ return;
+ }
+}
+EXPORT_SYMBOL(fbtft_write_reg8_bus9);
+
+
+
+
+/*****************************************************************************
+ *
+ * int (*write_vmem)(struct fbtft_par *par);
+ *
+ *****************************************************************************/
+
+/* 16 bit pixel over 8-bit databus */
+int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16;
+ u16 *txbuf16 = (u16 *)par->txbuf.buf;
+ size_t remain;
+ size_t to_copy;
+ size_t tx_array_size;
+ int i;
+ int ret = 0;
+ size_t startbyte_size = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
+ __func__, offset, len);
+
+ remain = len / 2;
+ vmem16 = (u16 *)(par->info->screen_base + offset);
+
+ if (par->gpio.dc != -1)
+ gpio_set_value(par->gpio.dc, 1);
+
+ /* non buffered write */
+ if (!par->txbuf.buf)
+ return par->fbtftops.write(par, vmem16, len);
+
+ /* buffered write */
+ tx_array_size = par->txbuf.len / 2;
+
+ if (par->startbyte) {
+ txbuf16 = (u16 *)(par->txbuf.buf + 1);
+ tx_array_size -= 2;
+ *(u8 *)(par->txbuf.buf) = par->startbyte | 0x2;
+ startbyte_size = 1;
+ }
+
+ while (remain) {
+ to_copy = remain > tx_array_size ? tx_array_size : remain;
+ dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
+ to_copy, remain - to_copy);
+
+ for (i = 0; i < to_copy; i++)
+ txbuf16[i] = cpu_to_be16(vmem16[i]);
+
+ vmem16 = vmem16 + to_copy;
+ ret = par->fbtftops.write(par, par->txbuf.buf,
+ startbyte_size + to_copy * 2);
+ if (ret < 0)
+ return ret;
+ remain -= to_copy;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(fbtft_write_vmem16_bus8);
+
+/* 16 bit pixel over 9-bit SPI bus: dc + high byte, dc + low byte */
+int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u8 *vmem8;
+ u16 *txbuf16 = par->txbuf.buf;
+ size_t remain;
+ size_t to_copy;
+ size_t tx_array_size;
+ int i;
+ int ret = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
+ __func__, offset, len);
+
+ if (!par->txbuf.buf) {
+ dev_err(par->info->device, "%s: txbuf.buf is NULL\n", __func__);
+ return -1;
+ }
+
+ remain = len;
+ vmem8 = par->info->screen_base + offset;
+
+ tx_array_size = par->txbuf.len / 2;
+
+ while (remain) {
+ to_copy = remain > tx_array_size ? tx_array_size : remain;
+ dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
+ to_copy, remain - to_copy);
+
+#ifdef __LITTLE_ENDIAN
+ for (i = 0; i < to_copy; i += 2) {
+ txbuf16[i] = 0x0100 | vmem8[i+1];
+ txbuf16[i+1] = 0x0100 | vmem8[i];
+ }
+#else
+ for (i = 0; i < to_copy; i++)
+ txbuf16[i] = 0x0100 | vmem8[i];
+#endif
+ vmem8 = vmem8 + to_copy;
+ ret = par->fbtftops.write(par, par->txbuf.buf, to_copy*2);
+ if (ret < 0)
+ return ret;
+ remain -= to_copy;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(fbtft_write_vmem16_bus9);
+
+int fbtft_write_vmem8_bus8(struct fbtft_par *par, size_t offset, size_t len)
+{
+ dev_err(par->info->device, "%s: function not implemented\n", __func__);
+ return -1;
+}
+EXPORT_SYMBOL(fbtft_write_vmem8_bus8);
+
+/* 16 bit pixel over 16-bit databus */
+int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
+ __func__, offset, len);
+
+ vmem16 = (u16 *)(par->info->screen_base + offset);
+
+ if (par->gpio.dc != -1)
+ gpio_set_value(par->gpio.dc, 1);
+
+ /* no need for buffered write with 16-bit bus */
+ return par->fbtftops.write(par, vmem16, len);
+}
+EXPORT_SYMBOL(fbtft_write_vmem16_bus16);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
new file mode 100644
index 000000000000..37dcf7eb191a
--- /dev/null
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -0,0 +1,1521 @@
+/*
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This driver is inspired by:
+ * st7735fb.c, Copyright (C) 2011, Matt Porter
+ * broadsheetfb.c, Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+
+#include "fbtft.h"
+
+extern void fbtft_sysfs_init(struct fbtft_par *par);
+extern void fbtft_sysfs_exit(struct fbtft_par *par);
+extern void fbtft_expand_debug_value(unsigned long *debug);
+extern int fbtft_gamma_parse_str(struct fbtft_par *par, unsigned long *curves,
+ const char *str, int size);
+
+static unsigned long debug;
+module_param(debug, ulong , 0);
+MODULE_PARM_DESC(debug, "override device debug level");
+
+static bool dma = true;
+module_param(dma, bool, 0);
+MODULE_PARM_DESC(dma, "Use DMA buffer");
+
+
+void fbtft_dbg_hex(const struct device *dev, int groupsize,
+ void *buf, size_t len, const char *fmt, ...)
+{
+ va_list args;
+ static char textbuf[512];
+ char *text = textbuf;
+ size_t text_len;
+
+ va_start(args, fmt);
+ text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
+ va_end(args);
+
+ hex_dump_to_buffer(buf, len, 32, groupsize, text + text_len,
+ 512 - text_len, false);
+
+ if (len > 32)
+ dev_info(dev, "%s ...\n", text);
+ else
+ dev_info(dev, "%s\n", text);
+}
+EXPORT_SYMBOL(fbtft_dbg_hex);
+
+static unsigned long fbtft_request_gpios_match(struct fbtft_par *par,
+ const struct fbtft_gpio *gpio)
+{
+ int ret;
+ long val;
+
+ fbtft_par_dbg(DEBUG_REQUEST_GPIOS_MATCH, par, "%s('%s')\n",
+ __func__, gpio->name);
+
+ if (strcasecmp(gpio->name, "reset") == 0) {
+ par->gpio.reset = gpio->gpio;
+ return GPIOF_OUT_INIT_HIGH;
+ } else if (strcasecmp(gpio->name, "dc") == 0) {
+ par->gpio.dc = gpio->gpio;
+ return GPIOF_OUT_INIT_LOW;
+ } else if (strcasecmp(gpio->name, "cs") == 0) {
+ par->gpio.cs = gpio->gpio;
+ return GPIOF_OUT_INIT_HIGH;
+ } else if (strcasecmp(gpio->name, "wr") == 0) {
+ par->gpio.wr = gpio->gpio;
+ return GPIOF_OUT_INIT_HIGH;
+ } else if (strcasecmp(gpio->name, "rd") == 0) {
+ par->gpio.rd = gpio->gpio;
+ return GPIOF_OUT_INIT_HIGH;
+ } else if (strcasecmp(gpio->name, "latch") == 0) {
+ par->gpio.latch = gpio->gpio;
+ return GPIOF_OUT_INIT_LOW;
+ } else if (gpio->name[0] == 'd' && gpio->name[1] == 'b') {
+ ret = kstrtol(&gpio->name[2], 10, &val);
+ if (ret == 0 && val < 16) {
+ par->gpio.db[val] = gpio->gpio;
+ return GPIOF_OUT_INIT_LOW;
+ }
+ } else if (strcasecmp(gpio->name, "led") == 0) {
+ par->gpio.led[0] = gpio->gpio;
+ return GPIOF_OUT_INIT_LOW;
+ } else if (strcasecmp(gpio->name, "led_") == 0) {
+ par->gpio.led[0] = gpio->gpio;
+ return GPIOF_OUT_INIT_HIGH;
+ }
+
+ return FBTFT_GPIO_NO_MATCH;
+}
+
+static int fbtft_request_gpios(struct fbtft_par *par)
+{
+ struct fbtft_platform_data *pdata = par->pdata;
+ const struct fbtft_gpio *gpio;
+ unsigned long flags;
+ int ret;
+
+ if (pdata && pdata->gpios) {
+ gpio = pdata->gpios;
+ while (gpio->name[0]) {
+ flags = FBTFT_GPIO_NO_MATCH;
+ /* if driver provides match function, try it first,
+ if no match use our own */
+ if (par->fbtftops.request_gpios_match)
+ flags = par->fbtftops.request_gpios_match(par, gpio);
+ if (flags == FBTFT_GPIO_NO_MATCH)
+ flags = fbtft_request_gpios_match(par, gpio);
+ if (flags != FBTFT_GPIO_NO_MATCH) {
+ ret = devm_gpio_request_one(par->info->device,
+ gpio->gpio, flags,
+ par->info->device->driver->name);
+ if (ret < 0) {
+ dev_err(par->info->device,
+ "%s: gpio_request_one('%s'=%d) failed with %d\n",
+ __func__, gpio->name,
+ gpio->gpio, ret);
+ return ret;
+ }
+ fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par,
+ "%s: '%s' = GPIO%d\n",
+ __func__, gpio->name, gpio->gpio);
+ }
+ gpio++;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int fbtft_request_one_gpio(struct fbtft_par *par,
+ const char *name, int index, int *gpiop)
+{
+ struct device *dev = par->info->device;
+ struct device_node *node = dev->of_node;
+ int gpio, flags, ret = 0;
+ enum of_gpio_flags of_flags;
+
+ if (of_find_property(node, name, NULL)) {
+ gpio = of_get_named_gpio_flags(node, name, index, &of_flags);
+ if (gpio == -ENOENT)
+ return 0;
+ if (gpio == -EPROBE_DEFER)
+ return gpio;
+ if (gpio < 0) {
+ dev_err(dev,
+ "failed to get '%s' from DT\n", name);
+ return gpio;
+ }
+
+ /* active low translates to initially low */
+ flags = (of_flags & OF_GPIO_ACTIVE_LOW) ? GPIOF_OUT_INIT_LOW :
+ GPIOF_OUT_INIT_HIGH;
+ ret = devm_gpio_request_one(dev, gpio, flags,
+ dev->driver->name);
+ if (ret) {
+ dev_err(dev,
+ "gpio_request_one('%s'=%d) failed with %d\n",
+ name, gpio, ret);
+ return ret;
+ }
+ if (gpiop)
+ *gpiop = gpio;
+ fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' = GPIO%d\n",
+ __func__, name, gpio);
+ }
+
+ return ret;
+}
+
+static int fbtft_request_gpios_dt(struct fbtft_par *par)
+{
+ int i;
+ int ret;
+
+ if (!par->info->device->of_node)
+ return -EINVAL;
+
+ ret = fbtft_request_one_gpio(par, "reset-gpios", 0, &par->gpio.reset);
+ if (ret)
+ return ret;
+ ret = fbtft_request_one_gpio(par, "dc-gpios", 0, &par->gpio.dc);
+ if (ret)
+ return ret;
+ ret = fbtft_request_one_gpio(par, "rd-gpios", 0, &par->gpio.rd);
+ if (ret)
+ return ret;
+ ret = fbtft_request_one_gpio(par, "wr-gpios", 0, &par->gpio.wr);
+ if (ret)
+ return ret;
+ ret = fbtft_request_one_gpio(par, "cs-gpios", 0, &par->gpio.cs);
+ if (ret)
+ return ret;
+ ret = fbtft_request_one_gpio(par, "latch-gpios", 0, &par->gpio.latch);
+ if (ret)
+ return ret;
+ for (i = 0; i < 16; i++) {
+ ret = fbtft_request_one_gpio(par, "db-gpios", i,
+ &par->gpio.db[i]);
+ if (ret)
+ return ret;
+ ret = fbtft_request_one_gpio(par, "led-gpios", i,
+ &par->gpio.led[i]);
+ if (ret)
+ return ret;
+ ret = fbtft_request_one_gpio(par, "aux-gpios", i,
+ &par->gpio.aux[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_FB_BACKLIGHT
+static int fbtft_backlight_update_status(struct backlight_device *bd)
+{
+ struct fbtft_par *par = bl_get_data(bd);
+ bool polarity = !!(bd->props.state & BL_CORE_DRIVER1);
+
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par,
+ "%s: polarity=%d, power=%d, fb_blank=%d\n",
+ __func__, polarity, bd->props.power, bd->props.fb_blank);
+
+ if ((bd->props.power == FB_BLANK_UNBLANK) && (bd->props.fb_blank == FB_BLANK_UNBLANK))
+ gpio_set_value(par->gpio.led[0], polarity);
+ else
+ gpio_set_value(par->gpio.led[0], !polarity);
+
+ return 0;
+}
+
+static int fbtft_backlight_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+void fbtft_unregister_backlight(struct fbtft_par *par)
+{
+ const struct backlight_ops *bl_ops;
+
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s()\n", __func__);
+
+ if (par->info->bl_dev) {
+ par->info->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(par->info->bl_dev);
+ bl_ops = par->info->bl_dev->ops;
+ backlight_device_unregister(par->info->bl_dev);
+ par->info->bl_dev = NULL;
+ }
+}
+
+void fbtft_register_backlight(struct fbtft_par *par)
+{
+ struct backlight_device *bd;
+ struct backlight_properties bl_props = { 0, };
+ struct backlight_ops *bl_ops;
+
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s()\n", __func__);
+
+ if (par->gpio.led[0] == -1) {
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par,
+ "%s(): led pin not set, exiting.\n", __func__);
+ return;
+ }
+
+ bl_ops = devm_kzalloc(par->info->device, sizeof(struct backlight_ops),
+ GFP_KERNEL);
+ if (!bl_ops) {
+ dev_err(par->info->device,
+ "%s: could not allocate memeory for backlight operations.\n",
+ __func__);
+ return;
+ }
+
+ bl_ops->get_brightness = fbtft_backlight_get_brightness;
+ bl_ops->update_status = fbtft_backlight_update_status;
+ bl_props.type = BACKLIGHT_RAW;
+ /* Assume backlight is off, get polarity from current state of pin */
+ bl_props.power = FB_BLANK_POWERDOWN;
+ if (!gpio_get_value(par->gpio.led[0]))
+ bl_props.state |= BL_CORE_DRIVER1;
+
+ bd = backlight_device_register(dev_driver_string(par->info->device),
+ par->info->device, par, bl_ops, &bl_props);
+ if (IS_ERR(bd)) {
+ dev_err(par->info->device,
+ "cannot register backlight device (%ld)\n",
+ PTR_ERR(bd));
+ return;
+ }
+ par->info->bl_dev = bd;
+
+ if (!par->fbtftops.unregister_backlight)
+ par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
+}
+#else
+void fbtft_register_backlight(struct fbtft_par *par) { };
+void fbtft_unregister_backlight(struct fbtft_par *par) { };
+#endif
+EXPORT_SYMBOL(fbtft_register_backlight);
+EXPORT_SYMBOL(fbtft_unregister_backlight);
+
+static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
+ int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ /* Column address set */
+ write_reg(par, 0x2A,
+ (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
+
+ /* Row adress set */
+ write_reg(par, 0x2B,
+ (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
+
+ /* Memory write */
+ write_reg(par, 0x2C);
+}
+
+
+static void fbtft_reset(struct fbtft_par *par)
+{
+ if (par->gpio.reset == -1)
+ return;
+ fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
+ gpio_set_value(par->gpio.reset, 0);
+ udelay(20);
+ gpio_set_value(par->gpio.reset, 1);
+ mdelay(120);
+}
+
+
+static void fbtft_update_display(struct fbtft_par *par, unsigned start_line,
+ unsigned end_line)
+{
+ size_t offset, len;
+ struct timespec ts_start, ts_end, ts_fps, ts_duration;
+ long fps_ms, fps_us, duration_ms, duration_us;
+ long fps, throughput;
+ bool timeit = false;
+ int ret = 0;
+
+ if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE | DEBUG_TIME_EACH_UPDATE))) {
+ if ((par->debug & DEBUG_TIME_EACH_UPDATE) || \
+ ((par->debug & DEBUG_TIME_FIRST_UPDATE) && !par->first_update_done)) {
+ getnstimeofday(&ts_start);
+ timeit = true;
+ }
+ }
+
+ /* Sanity checks */
+ if (start_line > end_line) {
+ dev_warn(par->info->device,
+ "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
+ __func__, start_line, end_line);
+ start_line = 0;
+ end_line = par->info->var.yres - 1;
+ }
+ if (start_line > par->info->var.yres - 1 || end_line > par->info->var.yres - 1) {
+ dev_warn(par->info->device,
+ "%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
+ __func__, start_line, end_line, par->info->var.yres - 1);
+ start_line = 0;
+ end_line = par->info->var.yres - 1;
+ }
+
+ fbtft_par_dbg(DEBUG_UPDATE_DISPLAY, par, "%s(start_line=%u, end_line=%u)\n",
+ __func__, start_line, end_line);
+
+ if (par->fbtftops.set_addr_win)
+ par->fbtftops.set_addr_win(par, 0, start_line,
+ par->info->var.xres-1, end_line);
+
+ offset = start_line * par->info->fix.line_length;
+ len = (end_line - start_line + 1) * par->info->fix.line_length;
+ ret = par->fbtftops.write_vmem(par, offset, len);
+ if (ret < 0)
+ dev_err(par->info->device,
+ "%s: write_vmem failed to update display buffer\n",
+ __func__);
+
+ if (unlikely(timeit)) {
+ getnstimeofday(&ts_end);
+ if (par->update_time.tv_nsec == 0 && par->update_time.tv_sec == 0) {
+ par->update_time.tv_sec = ts_start.tv_sec;
+ par->update_time.tv_nsec = ts_start.tv_nsec;
+ }
+ ts_fps = timespec_sub(ts_start, par->update_time);
+ par->update_time.tv_sec = ts_start.tv_sec;
+ par->update_time.tv_nsec = ts_start.tv_nsec;
+ fps_ms = (ts_fps.tv_sec * 1000) + ((ts_fps.tv_nsec / 1000000) % 1000);
+ fps_us = (ts_fps.tv_nsec / 1000) % 1000;
+ fps = fps_ms * 1000 + fps_us;
+ fps = fps ? 1000000 / fps : 0;
+
+ ts_duration = timespec_sub(ts_end, ts_start);
+ duration_ms = (ts_duration.tv_sec * 1000) + ((ts_duration.tv_nsec / 1000000) % 1000);
+ duration_us = (ts_duration.tv_nsec / 1000) % 1000;
+ throughput = duration_ms * 1000 + duration_us;
+ throughput = throughput ? (len * 1000) / throughput : 0;
+ throughput = throughput * 1000 / 1024;
+
+ dev_info(par->info->device,
+ "Display update: %ld kB/s (%ld.%.3ld ms), fps=%ld (%ld.%.3ld ms)\n",
+ throughput, duration_ms, duration_us,
+ fps, fps_ms, fps_us);
+ par->first_update_done = true;
+ }
+}
+
+
+static void fbtft_mkdirty(struct fb_info *info, int y, int height)
+{
+ struct fbtft_par *par = info->par;
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
+ /* special case, needed ? */
+ if (y == -1) {
+ y = 0;
+ height = info->var.yres - 1;
+ }
+
+ /* Mark display lines/area as dirty */
+ spin_lock(&par->dirty_lock);
+ if (y < par->dirty_lines_start)
+ par->dirty_lines_start = y;
+ if (y + height - 1 > par->dirty_lines_end)
+ par->dirty_lines_end = y + height - 1;
+ spin_unlock(&par->dirty_lock);
+
+ /* Schedule deferred_io to update display (no-op if already on queue)*/
+ schedule_delayed_work(&info->deferred_work, fbdefio->delay);
+}
+
+static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ struct fbtft_par *par = info->par;
+ unsigned dirty_lines_start, dirty_lines_end;
+ struct page *page;
+ unsigned long index;
+ unsigned y_low = 0, y_high = 0;
+ int count = 0;
+
+ spin_lock(&par->dirty_lock);
+ dirty_lines_start = par->dirty_lines_start;
+ dirty_lines_end = par->dirty_lines_end;
+ /* set display line markers as clean */
+ par->dirty_lines_start = par->info->var.yres - 1;
+ par->dirty_lines_end = 0;
+ spin_unlock(&par->dirty_lock);
+
+ /* Mark display lines as dirty */
+ list_for_each_entry(page, pagelist, lru) {
+ count++;
+ index = page->index << PAGE_SHIFT;
+ y_low = index / info->fix.line_length;
+ y_high = (index + PAGE_SIZE - 1) / info->fix.line_length;
+ fbtft_dev_dbg(DEBUG_DEFERRED_IO, par, info->device,
+ "page->index=%lu y_low=%d y_high=%d\n",
+ page->index, y_low, y_high);
+ if (y_high > info->var.yres - 1)
+ y_high = info->var.yres - 1;
+ if (y_low < dirty_lines_start)
+ dirty_lines_start = y_low;
+ if (y_high > dirty_lines_end)
+ dirty_lines_end = y_high;
+ }
+
+ par->fbtftops.update_display(info->par,
+ dirty_lines_start, dirty_lines_end);
+}
+
+
+static void fbtft_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct fbtft_par *par = info->par;
+
+ fbtft_dev_dbg(DEBUG_FB_FILLRECT, par, info->dev,
+ "%s: dx=%d, dy=%d, width=%d, height=%d\n",
+ __func__, rect->dx, rect->dy, rect->width, rect->height);
+ sys_fillrect(info, rect);
+
+ par->fbtftops.mkdirty(info, rect->dy, rect->height);
+}
+
+static void fbtft_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct fbtft_par *par = info->par;
+
+ fbtft_dev_dbg(DEBUG_FB_COPYAREA, par, info->dev,
+ "%s: dx=%d, dy=%d, width=%d, height=%d\n",
+ __func__, area->dx, area->dy, area->width, area->height);
+ sys_copyarea(info, area);
+
+ par->fbtftops.mkdirty(info, area->dy, area->height);
+}
+
+static void fbtft_fb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct fbtft_par *par = info->par;
+
+ fbtft_dev_dbg(DEBUG_FB_IMAGEBLIT, par, info->dev,
+ "%s: dx=%d, dy=%d, width=%d, height=%d\n",
+ __func__, image->dx, image->dy, image->width, image->height);
+ sys_imageblit(info, image);
+
+ par->fbtftops.mkdirty(info, image->dy, image->height);
+}
+
+static ssize_t fbtft_fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct fbtft_par *par = info->par;
+ ssize_t res;
+
+ fbtft_dev_dbg(DEBUG_FB_WRITE, par, info->dev,
+ "%s: count=%zd, ppos=%llu\n", __func__, count, *ppos);
+ res = fb_sys_write(info, buf, count, ppos);
+
+ /* TODO: only mark changed area
+ update all for now */
+ par->fbtftops.mkdirty(info, -1, 0);
+
+ return res;
+}
+
+/* from pxafb.c */
+static unsigned int chan_to_field(unsigned chan, struct fb_bitfield *bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf->length;
+ return chan << bf->offset;
+}
+
+static int fbtft_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct fbtft_par *par = info->par;
+ unsigned val;
+ int ret = 1;
+
+ fbtft_dev_dbg(DEBUG_FB_SETCOLREG, par, info->dev,
+ "%s(regno=%u, red=0x%X, green=0x%X, blue=0x%X, trans=0x%X)\n",
+ __func__, regno, red, green, blue, transp);
+
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ if (regno < 16) {
+ u32 *pal = info->pseudo_palette;
+
+ val = chan_to_field(red, &info->var.red);
+ val |= chan_to_field(green, &info->var.green);
+ val |= chan_to_field(blue, &info->var.blue);
+
+ pal[regno] = val;
+ ret = 0;
+ }
+ break;
+
+ }
+ return ret;
+}
+
+static int fbtft_fb_blank(int blank, struct fb_info *info)
+{
+ struct fbtft_par *par = info->par;
+ int ret = -EINVAL;
+
+ fbtft_dev_dbg(DEBUG_FB_BLANK, par, info->dev, "%s(blank=%d)\n",
+ __func__, blank);
+
+ if (!par->fbtftops.blank)
+ return ret;
+
+ switch (blank) {
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ ret = par->fbtftops.blank(par, true);
+ break;
+ case FB_BLANK_UNBLANK:
+ ret = par->fbtftops.blank(par, false);
+ break;
+ }
+ return ret;
+}
+
+static void fbtft_merge_fbtftops(struct fbtft_ops *dst, struct fbtft_ops *src)
+{
+ if (src->write)
+ dst->write = src->write;
+ if (src->read)
+ dst->read = src->read;
+ if (src->write_vmem)
+ dst->write_vmem = src->write_vmem;
+ if (src->write_register)
+ dst->write_register = src->write_register;
+ if (src->set_addr_win)
+ dst->set_addr_win = src->set_addr_win;
+ if (src->reset)
+ dst->reset = src->reset;
+ if (src->mkdirty)
+ dst->mkdirty = src->mkdirty;
+ if (src->update_display)
+ dst->update_display = src->update_display;
+ if (src->init_display)
+ dst->init_display = src->init_display;
+ if (src->blank)
+ dst->blank = src->blank;
+ if (src->request_gpios_match)
+ dst->request_gpios_match = src->request_gpios_match;
+ if (src->request_gpios)
+ dst->request_gpios = src->request_gpios;
+ if (src->verify_gpios)
+ dst->verify_gpios = src->verify_gpios;
+ if (src->register_backlight)
+ dst->register_backlight = src->register_backlight;
+ if (src->unregister_backlight)
+ dst->unregister_backlight = src->unregister_backlight;
+ if (src->set_var)
+ dst->set_var = src->set_var;
+ if (src->set_gamma)
+ dst->set_gamma = src->set_gamma;
+}
+
+/**
+ * fbtft_framebuffer_alloc - creates a new frame buffer info structure
+ *
+ * @display: pointer to structure describing the display
+ * @dev: pointer to the device for this fb, this can be NULL
+ *
+ * Creates a new frame buffer info structure.
+ *
+ * Also creates and populates the following structures:
+ * info->fbops
+ * info->fbdefio
+ * info->pseudo_palette
+ * par->fbtftops
+ * par->txbuf
+ *
+ * Returns the new structure, or NULL if an error occurred.
+ *
+ */
+struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
+ struct device *dev)
+{
+ struct fb_info *info;
+ struct fbtft_par *par;
+ struct fb_ops *fbops = NULL;
+ struct fb_deferred_io *fbdefio = NULL;
+ struct fbtft_platform_data *pdata = dev->platform_data;
+ u8 *vmem = NULL;
+ void *txbuf = NULL;
+ void *buf = NULL;
+ unsigned width;
+ unsigned height;
+ int txbuflen = display->txbuflen;
+ unsigned bpp = display->bpp;
+ unsigned fps = display->fps;
+ int vmem_size, i;
+ int *init_sequence = display->init_sequence;
+ char *gamma = display->gamma;
+ unsigned long *gamma_curves = NULL;
+
+ /* sanity check */
+ if (display->gamma_num * display->gamma_len > FBTFT_GAMMA_MAX_VALUES_TOTAL) {
+ dev_err(dev,
+ "%s: FBTFT_GAMMA_MAX_VALUES_TOTAL=%d is exceeded\n",
+ __func__, FBTFT_GAMMA_MAX_VALUES_TOTAL);
+ return NULL;
+ }
+
+ /* defaults */
+ if (!fps)
+ fps = 20;
+ if (!bpp)
+ bpp = 16;
+
+ if (!pdata) {
+ dev_err(dev, "platform data is missing\n");
+ return NULL;
+ }
+
+ /* override driver values? */
+ if (pdata->fps)
+ fps = pdata->fps;
+ if (pdata->txbuflen)
+ txbuflen = pdata->txbuflen;
+ if (pdata->display.init_sequence)
+ init_sequence = pdata->display.init_sequence;
+ if (pdata->gamma)
+ gamma = pdata->gamma;
+ if (pdata->display.debug)
+ display->debug = pdata->display.debug;
+ if (pdata->display.backlight)
+ display->backlight = pdata->display.backlight;
+ if (pdata->display.width)
+ display->width = pdata->display.width;
+ if (pdata->display.height)
+ display->height = pdata->display.height;
+ if (pdata->display.buswidth)
+ display->buswidth = pdata->display.buswidth;
+ if (pdata->display.regwidth)
+ display->regwidth = pdata->display.regwidth;
+
+ display->debug |= debug;
+ fbtft_expand_debug_value(&display->debug);
+
+ switch (pdata->rotate) {
+ case 90:
+ case 270:
+ width = display->height;
+ height = display->width;
+ break;
+ default:
+ width = display->width;
+ height = display->height;
+ }
+
+ vmem_size = display->width * display->height * bpp / 8;
+ vmem = vzalloc(vmem_size);
+ if (!vmem)
+ goto alloc_fail;
+
+ fbops = devm_kzalloc(dev, sizeof(struct fb_ops), GFP_KERNEL);
+ if (!fbops)
+ goto alloc_fail;
+
+ fbdefio = devm_kzalloc(dev, sizeof(struct fb_deferred_io), GFP_KERNEL);
+ if (!fbdefio)
+ goto alloc_fail;
+
+ buf = devm_kzalloc(dev, 128, GFP_KERNEL);
+ if (!buf)
+ goto alloc_fail;
+
+ if (display->gamma_num && display->gamma_len) {
+ gamma_curves = devm_kzalloc(dev, display->gamma_num * display->gamma_len * sizeof(gamma_curves[0]),
+ GFP_KERNEL);
+ if (!gamma_curves)
+ goto alloc_fail;
+ }
+
+ info = framebuffer_alloc(sizeof(struct fbtft_par), dev);
+ if (!info)
+ goto alloc_fail;
+
+ info->screen_base = (u8 __force __iomem *)vmem;
+ info->fbops = fbops;
+ info->fbdefio = fbdefio;
+
+ fbops->owner = dev->driver->owner;
+ fbops->fb_read = fb_sys_read;
+ fbops->fb_write = fbtft_fb_write;
+ fbops->fb_fillrect = fbtft_fb_fillrect;
+ fbops->fb_copyarea = fbtft_fb_copyarea;
+ fbops->fb_imageblit = fbtft_fb_imageblit;
+ fbops->fb_setcolreg = fbtft_fb_setcolreg;
+ fbops->fb_blank = fbtft_fb_blank;
+
+ fbdefio->delay = HZ/fps;
+ fbdefio->deferred_io = fbtft_deferred_io;
+ fb_deferred_io_init(info);
+
+ strncpy(info->fix.id, dev->driver->name, 16);
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.xpanstep = 0;
+ info->fix.ypanstep = 0;
+ info->fix.ywrapstep = 0;
+ info->fix.line_length = width*bpp/8;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.smem_len = vmem_size;
+
+ info->var.rotate = pdata->rotate;
+ info->var.xres = width;
+ info->var.yres = height;
+ info->var.xres_virtual = info->var.xres;
+ info->var.yres_virtual = info->var.yres;
+ info->var.bits_per_pixel = bpp;
+ info->var.nonstd = 1;
+
+ /* RGB565 */
+ info->var.red.offset = 11;
+ info->var.red.length = 5;
+ info->var.green.offset = 5;
+ info->var.green.length = 6;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+
+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+
+ par = info->par;
+ par->info = info;
+ par->pdata = dev->platform_data;
+ par->debug = display->debug;
+ par->buf = buf;
+ spin_lock_init(&par->dirty_lock);
+ par->bgr = pdata->bgr;
+ par->startbyte = pdata->startbyte;
+ par->init_sequence = init_sequence;
+ par->gamma.curves = gamma_curves;
+ par->gamma.num_curves = display->gamma_num;
+ par->gamma.num_values = display->gamma_len;
+ mutex_init(&par->gamma.lock);
+ info->pseudo_palette = par->pseudo_palette;
+
+ if (par->gamma.curves && gamma) {
+ if (fbtft_gamma_parse_str(par,
+ par->gamma.curves, gamma, strlen(gamma)))
+ goto alloc_fail;
+ }
+
+ /* Transmit buffer */
+ if (txbuflen == -1)
+ txbuflen = vmem_size + 2; /* add in case startbyte is used */
+
+#ifdef __LITTLE_ENDIAN
+ if ((!txbuflen) && (bpp > 8))
+ txbuflen = PAGE_SIZE; /* need buffer for byteswapping */
+#endif
+
+ if (txbuflen > 0) {
+ if (dma) {
+ dev->coherent_dma_mask = ~0;
+ txbuf = dmam_alloc_coherent(dev, txbuflen, &par->txbuf.dma, GFP_DMA);
+ } else {
+ txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
+ }
+ if (!txbuf)
+ goto alloc_fail;
+ par->txbuf.buf = txbuf;
+ par->txbuf.len = txbuflen;
+ }
+
+ /* Initialize gpios to disabled */
+ par->gpio.reset = -1;
+ par->gpio.dc = -1;
+ par->gpio.rd = -1;
+ par->gpio.wr = -1;
+ par->gpio.cs = -1;
+ par->gpio.latch = -1;
+ for (i = 0; i < 16; i++) {
+ par->gpio.db[i] = -1;
+ par->gpio.led[i] = -1;
+ par->gpio.aux[i] = -1;
+ }
+
+ /* default fbtft operations */
+ par->fbtftops.write = fbtft_write_spi;
+ par->fbtftops.read = fbtft_read_spi;
+ par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
+ par->fbtftops.write_register = fbtft_write_reg8_bus8;
+ par->fbtftops.set_addr_win = fbtft_set_addr_win;
+ par->fbtftops.reset = fbtft_reset;
+ par->fbtftops.mkdirty = fbtft_mkdirty;
+ par->fbtftops.update_display = fbtft_update_display;
+ par->fbtftops.request_gpios = fbtft_request_gpios;
+ if (display->backlight)
+ par->fbtftops.register_backlight = fbtft_register_backlight;
+
+ /* use driver provided functions */
+ fbtft_merge_fbtftops(&par->fbtftops, &display->fbtftops);
+
+ return info;
+
+alloc_fail:
+ vfree(vmem);
+
+ return NULL;
+}
+EXPORT_SYMBOL(fbtft_framebuffer_alloc);
+
+/**
+ * fbtft_framebuffer_release - frees up all memory used by the framebuffer
+ *
+ * @info: frame buffer info structure
+ *
+ */
+void fbtft_framebuffer_release(struct fb_info *info)
+{
+ fb_deferred_io_cleanup(info);
+ vfree(info->screen_base);
+ framebuffer_release(info);
+}
+EXPORT_SYMBOL(fbtft_framebuffer_release);
+
+/**
+ * fbtft_register_framebuffer - registers a tft frame buffer device
+ * @fb_info: frame buffer info structure
+ *
+ * Sets SPI driverdata if needed
+ * Requests needed gpios.
+ * Initializes display
+ * Updates display.
+ * Registers a frame buffer device @fb_info.
+ *
+ * Returns negative errno on error, or zero for success.
+ *
+ */
+int fbtft_register_framebuffer(struct fb_info *fb_info)
+{
+ int ret;
+ char text1[50] = "";
+ char text2[50] = "";
+ struct fbtft_par *par = fb_info->par;
+ struct spi_device *spi = par->spi;
+
+ /* sanity checks */
+ if (!par->fbtftops.init_display) {
+ dev_err(fb_info->device, "missing fbtftops.init_display()\n");
+ return -EINVAL;
+ }
+
+ if (spi)
+ spi_set_drvdata(spi, fb_info);
+ if (par->pdev)
+ platform_set_drvdata(par->pdev, fb_info);
+
+ ret = par->fbtftops.request_gpios(par);
+ if (ret < 0)
+ goto reg_fail;
+
+ if (par->fbtftops.verify_gpios) {
+ ret = par->fbtftops.verify_gpios(par);
+ if (ret < 0)
+ goto reg_fail;
+ }
+
+ ret = par->fbtftops.init_display(par);
+ if (ret < 0)
+ goto reg_fail;
+ if (par->fbtftops.set_var) {
+ ret = par->fbtftops.set_var(par);
+ if (ret < 0)
+ goto reg_fail;
+ }
+
+ /* update the entire display */
+ par->fbtftops.update_display(par, 0, par->info->var.yres - 1);
+
+ if (par->fbtftops.set_gamma && par->gamma.curves) {
+ ret = par->fbtftops.set_gamma(par, par->gamma.curves);
+ if (ret)
+ goto reg_fail;
+ }
+
+ if (par->fbtftops.register_backlight)
+ par->fbtftops.register_backlight(par);
+
+ ret = register_framebuffer(fb_info);
+ if (ret < 0)
+ goto reg_fail;
+
+ fbtft_sysfs_init(par);
+
+ if (par->txbuf.buf)
+ sprintf(text1, ", %d KiB %sbuffer memory",
+ par->txbuf.len >> 10, par->txbuf.dma ? "DMA " : "");
+ if (spi)
+ sprintf(text2, ", spi%d.%d at %d MHz", spi->master->bus_num,
+ spi->chip_select, spi->max_speed_hz/1000000);
+ dev_info(fb_info->dev,
+ "%s frame buffer, %dx%d, %d KiB video memory%s, fps=%lu%s\n",
+ fb_info->fix.id, fb_info->var.xres, fb_info->var.yres,
+ fb_info->fix.smem_len >> 10, text1,
+ HZ/fb_info->fbdefio->delay, text2);
+
+#ifdef CONFIG_FB_BACKLIGHT
+ /* Turn on backlight if available */
+ if (fb_info->bl_dev) {
+ fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
+ fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
+ }
+#endif
+
+ return 0;
+
+reg_fail:
+ if (par->fbtftops.unregister_backlight)
+ par->fbtftops.unregister_backlight(par);
+ if (spi)
+ spi_set_drvdata(spi, NULL);
+ if (par->pdev)
+ platform_set_drvdata(par->pdev, NULL);
+
+ return ret;
+}
+EXPORT_SYMBOL(fbtft_register_framebuffer);
+
+/**
+ * fbtft_unregister_framebuffer - releases a tft frame buffer device
+ * @fb_info: frame buffer info structure
+ *
+ * Frees SPI driverdata if needed
+ * Frees gpios.
+ * Unregisters frame buffer device.
+ *
+ */
+int fbtft_unregister_framebuffer(struct fb_info *fb_info)
+{
+ struct fbtft_par *par = fb_info->par;
+ struct spi_device *spi = par->spi;
+ int ret;
+
+ if (spi)
+ spi_set_drvdata(spi, NULL);
+ if (par->pdev)
+ platform_set_drvdata(par->pdev, NULL);
+ if (par->fbtftops.unregister_backlight)
+ par->fbtftops.unregister_backlight(par);
+ fbtft_sysfs_exit(par);
+ ret = unregister_framebuffer(fb_info);
+ return ret;
+}
+EXPORT_SYMBOL(fbtft_unregister_framebuffer);
+
+#ifdef CONFIG_OF
+/**
+ * fbtft_init_display_dt() - Device Tree init_display() function
+ * @par: Driver data
+ *
+ * Return: 0 if successful, negative if error
+ */
+static int fbtft_init_display_dt(struct fbtft_par *par)
+{
+ struct device_node *node = par->info->device->of_node;
+ struct property *prop;
+ const __be32 *p;
+ u32 val;
+ int buf[64], i, j;
+ char msg[128];
+ char str[16];
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ if (!node)
+ return -EINVAL;
+
+ prop = of_find_property(node, "init", NULL);
+ p = of_prop_next_u32(prop, NULL, &val);
+ if (!p)
+ return -EINVAL;
+ while (p) {
+ if (val & FBTFT_OF_INIT_CMD) {
+ val &= 0xFFFF;
+ i = 0;
+ while (p && !(val & 0xFFFF0000)) {
+ if (i > 63) {
+ dev_err(par->info->device,
+ "%s: Maximum register values exceeded\n",
+ __func__);
+ return -EINVAL;
+ }
+ buf[i++] = val;
+ p = of_prop_next_u32(prop, p, &val);
+ }
+ /* make debug message */
+ msg[0] = '\0';
+ for (j = 0; j < i; j++) {
+ snprintf(str, 128, " %02X", buf[j]);
+ strcat(msg, str);
+ }
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
+ "init: write_register:%s\n", msg);
+
+ par->fbtftops.write_register(par, i,
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], buf[15],
+ buf[16], buf[17], buf[18], buf[19],
+ buf[20], buf[21], buf[22], buf[23],
+ buf[24], buf[25], buf[26], buf[27],
+ buf[28], buf[29], buf[30], buf[31],
+ buf[32], buf[33], buf[34], buf[35],
+ buf[36], buf[37], buf[38], buf[39],
+ buf[40], buf[41], buf[42], buf[43],
+ buf[44], buf[45], buf[46], buf[47],
+ buf[48], buf[49], buf[50], buf[51],
+ buf[52], buf[53], buf[54], buf[55],
+ buf[56], buf[57], buf[58], buf[59],
+ buf[60], buf[61], buf[62], buf[63]);
+ } else if (val & FBTFT_OF_INIT_DELAY) {
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
+ "init: msleep(%u)\n", val & 0xFFFF);
+ msleep(val & 0xFFFF);
+ p = of_prop_next_u32(prop, p, &val);
+ } else {
+ dev_err(par->info->device, "illegal init value 0x%X\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+/**
+ * fbtft_init_display() - Generic init_display() function
+ * @par: Driver data
+ *
+ * Uses par->init_sequence to do the initialization
+ *
+ * Return: 0 if successful, negative if error
+ */
+int fbtft_init_display(struct fbtft_par *par)
+{
+ int buf[64];
+ char msg[128];
+ char str[16];
+ int i = 0;
+ int j;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* sanity check */
+ if (!par->init_sequence) {
+ dev_err(par->info->device,
+ "error: init_sequence is not set\n");
+ return -EINVAL;
+ }
+
+ /* make sure stop marker exists */
+ for (i = 0; i < FBTFT_MAX_INIT_SEQUENCE; i++)
+ if (par->init_sequence[i] == -3)
+ break;
+ if (i == FBTFT_MAX_INIT_SEQUENCE) {
+ dev_err(par->info->device,
+ "missing stop marker at end of init sequence\n");
+ return -EINVAL;
+ }
+
+ par->fbtftops.reset(par);
+ if (par->gpio.cs != -1)
+ gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ i = 0;
+ while (i < FBTFT_MAX_INIT_SEQUENCE) {
+ if (par->init_sequence[i] == -3) {
+ /* done */
+ return 0;
+ }
+ if (par->init_sequence[i] >= 0) {
+ dev_err(par->info->device,
+ "missing delimiter at position %d\n", i);
+ return -EINVAL;
+ }
+ if (par->init_sequence[i+1] < 0) {
+ dev_err(par->info->device,
+ "missing value after delimiter %d at position %d\n",
+ par->init_sequence[i], i);
+ return -EINVAL;
+ }
+ switch (par->init_sequence[i]) {
+ case -1:
+ i++;
+ /* make debug message */
+ strcpy(msg, "");
+ j = i + 1;
+ while (par->init_sequence[j] >= 0) {
+ sprintf(str, "0x%02X ", par->init_sequence[j]);
+ strcat(msg, str);
+ j++;
+ }
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
+ "init: write(0x%02X) %s\n",
+ par->init_sequence[i], msg);
+
+ /* Write */
+ j = 0;
+ while (par->init_sequence[i] >= 0) {
+ if (j > 63) {
+ dev_err(par->info->device,
+ "%s: Maximum register values exceeded\n",
+ __func__);
+ return -EINVAL;
+ }
+ buf[j++] = par->init_sequence[i++];
+ }
+ par->fbtftops.write_register(par, j,
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], buf[15],
+ buf[16], buf[17], buf[18], buf[19],
+ buf[20], buf[21], buf[22], buf[23],
+ buf[24], buf[25], buf[26], buf[27],
+ buf[28], buf[29], buf[30], buf[31],
+ buf[32], buf[33], buf[34], buf[35],
+ buf[36], buf[37], buf[38], buf[39],
+ buf[40], buf[41], buf[42], buf[43],
+ buf[44], buf[45], buf[46], buf[47],
+ buf[48], buf[49], buf[50], buf[51],
+ buf[52], buf[53], buf[54], buf[55],
+ buf[56], buf[57], buf[58], buf[59],
+ buf[60], buf[61], buf[62], buf[63]);
+ break;
+ case -2:
+ i++;
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
+ "init: mdelay(%d)\n", par->init_sequence[i]);
+ mdelay(par->init_sequence[i++]);
+ break;
+ default:
+ dev_err(par->info->device,
+ "unknown delimiter %d at position %d\n",
+ par->init_sequence[i], i);
+ return -EINVAL;
+ }
+ }
+
+ dev_err(par->info->device,
+ "%s: something is wrong. Shouldn't get here.\n", __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(fbtft_init_display);
+
+/**
+ * fbtft_verify_gpios() - Generic verify_gpios() function
+ * @par: Driver data
+ *
+ * Uses @spi, @pdev and @buswidth to determine which GPIOs is needed
+ *
+ * Return: 0 if successful, negative if error
+ */
+static int fbtft_verify_gpios(struct fbtft_par *par)
+{
+ struct fbtft_platform_data *pdata;
+ int i;
+
+ fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
+
+ pdata = par->info->device->platform_data;
+ if (pdata->display.buswidth != 9 && par->startbyte == 0 && \
+ par->gpio.dc < 0) {
+ dev_err(par->info->device,
+ "Missing info about 'dc' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+
+ if (!par->pdev)
+ return 0;
+
+ if (par->gpio.wr < 0) {
+ dev_err(par->info->device, "Missing 'wr' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pdata->display.buswidth; i++) {
+ if (par->gpio.db[i] < 0) {
+ dev_err(par->info->device,
+ "Missing 'db%02d' gpio. Aborting.\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+/* returns 0 if the property is not present */
+static u32 fbtft_of_value(struct device_node *node, const char *propname)
+{
+ int ret;
+ u32 val = 0;
+
+ ret = of_property_read_u32(node, propname, &val);
+ if (ret == 0)
+ pr_info("%s: %s = %u\n", __func__, propname, val);
+
+ return val;
+}
+
+static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ struct fbtft_platform_data *pdata;
+
+ if (!node) {
+ dev_err(dev, "Missing platform data or DT\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->display.width = fbtft_of_value(node, "width");
+ pdata->display.height = fbtft_of_value(node, "height");
+ pdata->display.regwidth = fbtft_of_value(node, "regwidth");
+ pdata->display.buswidth = fbtft_of_value(node, "buswidth");
+ pdata->display.backlight = fbtft_of_value(node, "backlight");
+ pdata->display.bpp = fbtft_of_value(node, "bpp");
+ pdata->display.debug = fbtft_of_value(node, "debug");
+ pdata->rotate = fbtft_of_value(node, "rotate");
+ pdata->bgr = of_property_read_bool(node, "bgr");
+ pdata->fps = fbtft_of_value(node, "fps");
+ pdata->txbuflen = fbtft_of_value(node, "txbuflen");
+ pdata->startbyte = fbtft_of_value(node, "startbyte");
+ of_property_read_string(node, "gamma", (const char **)&pdata->gamma);
+
+ if (of_find_property(node, "led-gpios", NULL))
+ pdata->display.backlight = 1;
+ if (of_find_property(node, "init", NULL))
+ pdata->display.fbtftops.init_display = fbtft_init_display_dt;
+ pdata->display.fbtftops.request_gpios = fbtft_request_gpios_dt;
+
+ return pdata;
+}
+#else
+static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
+{
+ dev_err(dev, "Missing platform data\n");
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+/**
+ * fbtft_probe_common() - Generic device probe() helper function
+ * @display: Display properties
+ * @sdev: SPI device
+ * @pdev: Platform device
+ *
+ * Allocates, initializes and registers a framebuffer
+ *
+ * Either @sdev or @pdev should be NULL
+ *
+ * Return: 0 if successful, negative if error
+ */
+int fbtft_probe_common(struct fbtft_display *display,
+ struct spi_device *sdev, struct platform_device *pdev)
+{
+ struct device *dev;
+ struct fb_info *info;
+ struct fbtft_par *par;
+ struct fbtft_platform_data *pdata;
+ int ret;
+
+ if (sdev)
+ dev = &sdev->dev;
+ else
+ dev = &pdev->dev;
+
+ if (unlikely(display->debug & DEBUG_DRIVER_INIT_FUNCTIONS))
+ dev_info(dev, "%s()\n", __func__);
+
+ pdata = dev->platform_data;
+ if (!pdata) {
+ pdata = fbtft_probe_dt(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ dev->platform_data = pdata;
+ }
+
+ info = fbtft_framebuffer_alloc(display, dev);
+ if (!info)
+ return -ENOMEM;
+
+ par = info->par;
+ par->spi = sdev;
+ par->pdev = pdev;
+
+ if (display->buswidth == 0) {
+ dev_err(dev, "buswidth is not set\n");
+ return -EINVAL;
+ }
+
+ /* write register functions */
+ if (display->regwidth == 8 && display->buswidth == 8) {
+ par->fbtftops.write_register = fbtft_write_reg8_bus8;
+ } else
+ if (display->regwidth == 8 && display->buswidth == 9 && par->spi) {
+ par->fbtftops.write_register = fbtft_write_reg8_bus9;
+ } else if (display->regwidth == 16 && display->buswidth == 8) {
+ par->fbtftops.write_register = fbtft_write_reg16_bus8;
+ } else if (display->regwidth == 16 && display->buswidth == 16) {
+ par->fbtftops.write_register = fbtft_write_reg16_bus16;
+ } else {
+ dev_warn(dev,
+ "no default functions for regwidth=%d and buswidth=%d\n",
+ display->regwidth, display->buswidth);
+ }
+
+ /* write_vmem() functions */
+ if (display->buswidth == 8)
+ par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
+ else if (display->buswidth == 9)
+ par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
+ else if (display->buswidth == 16)
+ par->fbtftops.write_vmem = fbtft_write_vmem16_bus16;
+
+ /* GPIO write() functions */
+ if (par->pdev) {
+ if (display->buswidth == 8)
+ par->fbtftops.write = fbtft_write_gpio8_wr;
+ else if (display->buswidth == 16)
+ par->fbtftops.write = fbtft_write_gpio16_wr;
+ }
+
+ /* 9-bit SPI setup */
+ if (par->spi && display->buswidth == 9) {
+ par->spi->bits_per_word = 9;
+ ret = par->spi->master->setup(par->spi);
+ if (ret) {
+ dev_warn(&par->spi->dev,
+ "9-bit SPI not available, emulating using 8-bit.\n");
+ par->spi->bits_per_word = 8;
+ ret = par->spi->master->setup(par->spi);
+ if (ret)
+ goto out_release;
+ /* allocate buffer with room for dc bits */
+ par->extra = devm_kzalloc(par->info->device,
+ par->txbuf.len + (par->txbuf.len / 8) + 8,
+ GFP_KERNEL);
+ if (!par->extra) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+ par->fbtftops.write = fbtft_write_spi_emulate_9;
+ }
+ }
+
+ if (!par->fbtftops.verify_gpios)
+ par->fbtftops.verify_gpios = fbtft_verify_gpios;
+
+ /* make sure we still use the driver provided functions */
+ fbtft_merge_fbtftops(&par->fbtftops, &display->fbtftops);
+
+ /* use init_sequence if provided */
+ if (par->init_sequence)
+ par->fbtftops.init_display = fbtft_init_display;
+
+ /* use platform_data provided functions above all */
+ fbtft_merge_fbtftops(&par->fbtftops, &pdata->display.fbtftops);
+
+ ret = fbtft_register_framebuffer(info);
+ if (ret < 0)
+ goto out_release;
+
+ return 0;
+
+out_release:
+ fbtft_framebuffer_release(info);
+
+ return ret;
+}
+EXPORT_SYMBOL(fbtft_probe_common);
+
+/**
+ * fbtft_remove_common() - Generic device remove() helper function
+ * @dev: Device
+ * @info: Framebuffer
+ *
+ * Unregisters and releases the framebuffer
+ *
+ * Return: 0 if successful, negative if error
+ */
+int fbtft_remove_common(struct device *dev, struct fb_info *info)
+{
+ struct fbtft_par *par;
+
+ if (!info)
+ return -EINVAL;
+ par = info->par;
+ if (par)
+ fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par,
+ "%s()\n", __func__);
+ fbtft_unregister_framebuffer(info);
+ fbtft_framebuffer_release(info);
+
+ return 0;
+}
+EXPORT_SYMBOL(fbtft_remove_common);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
new file mode 100644
index 000000000000..32155a7b2a62
--- /dev/null
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -0,0 +1,239 @@
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include "fbtft.h"
+
+int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = buf,
+ .len = len,
+ };
+ struct spi_message m;
+
+ fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
+ "%s(len=%d): ", __func__, len);
+
+ if (!par->spi) {
+ dev_err(par->info->device,
+ "%s: par->spi is unexpectedly NULL\n", __func__);
+ return -1;
+ }
+
+ spi_message_init(&m);
+ if (par->txbuf.dma && buf == par->txbuf.buf) {
+ t.tx_dma = par->txbuf.dma;
+ m.is_dma_mapped = 1;
+ }
+ spi_message_add_tail(&t, &m);
+ return spi_sync(par->spi, &m);
+}
+EXPORT_SYMBOL(fbtft_write_spi);
+
+/**
+ * fbtft_write_spi_emulate_9() - write SPI emulating 9-bit
+ * @par: Driver data
+ * @buf: Buffer to write
+ * @len: Length of buffer (must be divisible by 8)
+ *
+ * When 9-bit SPI is not available, this function can be used to emulate that.
+ * par->extra must hold a transformation buffer used for transfer.
+ */
+int fbtft_write_spi_emulate_9(struct fbtft_par *par, void *buf, size_t len)
+{
+ u16 *src = buf;
+ u8 *dst = par->extra;
+ size_t size = len / 2;
+ size_t added = 0;
+ int bits, i, j;
+ u64 val, dc, tmp;
+
+ fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
+ "%s(len=%d): ", __func__, len);
+
+ if (!par->extra) {
+ dev_err(par->info->device, "%s: error: par->extra is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if ((len % 8) != 0) {
+ dev_err(par->info->device,
+ "%s: error: len=%d must be divisible by 8\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i += 8) {
+ tmp = 0;
+ bits = 63;
+ for (j = 0; j < 7; j++) {
+ dc = (*src & 0x0100) ? 1 : 0;
+ val = *src & 0x00FF;
+ tmp |= dc << bits;
+ bits -= 8;
+ tmp |= val << bits--;
+ src++;
+ }
+ tmp |= ((*src & 0x0100) ? 1 : 0);
+ *(u64 *)dst = cpu_to_be64(tmp);
+ dst += 8;
+ *dst++ = (u8)(*src++ & 0x00FF);
+ added++;
+ }
+
+ return spi_write(par->spi, par->extra, size + added);
+}
+EXPORT_SYMBOL(fbtft_write_spi_emulate_9);
+
+int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len)
+{
+ int ret;
+ u8 txbuf[32] = { 0, };
+ struct spi_transfer t = {
+ .speed_hz = 2000000,
+ .rx_buf = buf,
+ .len = len,
+ };
+ struct spi_message m;
+
+ if (!par->spi) {
+ dev_err(par->info->device,
+ "%s: par->spi is unexpectedly NULL\n", __func__);
+ return -ENODEV;
+ }
+
+ if (par->startbyte) {
+ if (len > 32) {
+ dev_err(par->info->device,
+ "%s: len=%d can't be larger than 32 when using 'startbyte'\n",
+ __func__, len);
+ return -EINVAL;
+ }
+ txbuf[0] = par->startbyte | 0x3;
+ t.tx_buf = txbuf;
+ fbtft_par_dbg_hex(DEBUG_READ, par, par->info->device, u8,
+ txbuf, len, "%s(len=%d) txbuf => ", __func__, len);
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(par->spi, &m);
+ fbtft_par_dbg_hex(DEBUG_READ, par, par->info->device, u8, buf, len,
+ "%s(len=%d) buf <= ", __func__, len);
+
+ return ret;
+}
+EXPORT_SYMBOL(fbtft_read_spi);
+
+/*
+ * Optimized use of gpiolib is twice as fast as no optimization
+ * only one driver can use the optimized version at a time
+ */
+int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
+{
+ u8 data;
+ int i;
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ static u8 prev_data;
+#endif
+
+ fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
+ "%s(len=%d): ", __func__, len);
+
+ while (len--) {
+ data = *(u8 *) buf;
+
+ /* Start writing by pulling down /WR */
+ gpio_set_value(par->gpio.wr, 0);
+
+ /* Set data */
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ if (data == prev_data) {
+ gpio_set_value(par->gpio.wr, 0); /* used as delay */
+ } else {
+ for (i = 0; i < 8; i++) {
+ if ((data & 1) != (prev_data & 1))
+ gpio_set_value(par->gpio.db[i],
+ (data & 1));
+ data >>= 1;
+ prev_data >>= 1;
+ }
+ }
+#else
+ for (i = 0; i < 8; i++) {
+ gpio_set_value(par->gpio.db[i], (data & 1));
+ data >>= 1;
+ }
+#endif
+
+ /* Pullup /WR */
+ gpio_set_value(par->gpio.wr, 1);
+
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ prev_data = *(u8 *) buf;
+#endif
+ buf++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(fbtft_write_gpio8_wr);
+
+int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
+{
+ u16 data;
+ int i;
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ static u16 prev_data;
+#endif
+
+ fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
+ "%s(len=%d): ", __func__, len);
+
+ while (len) {
+ data = *(u16 *) buf;
+
+ /* Start writing by pulling down /WR */
+ gpio_set_value(par->gpio.wr, 0);
+
+ /* Set data */
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ if (data == prev_data) {
+ gpio_set_value(par->gpio.wr, 0); /* used as delay */
+ } else {
+ for (i = 0; i < 16; i++) {
+ if ((data & 1) != (prev_data & 1))
+ gpio_set_value(par->gpio.db[i],
+ (data & 1));
+ data >>= 1;
+ prev_data >>= 1;
+ }
+ }
+#else
+ for (i = 0; i < 16; i++) {
+ gpio_set_value(par->gpio.db[i], (data & 1));
+ data >>= 1;
+ }
+#endif
+
+ /* Pullup /WR */
+ gpio_set_value(par->gpio.wr, 1);
+
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ prev_data = *(u16 *) buf;
+#endif
+ buf += 2;
+ len -= 2;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(fbtft_write_gpio16_wr);
+
+int fbtft_write_gpio16_wr_latched(struct fbtft_par *par, void *buf, size_t len)
+{
+ dev_err(par->info->device, "%s: function not implemented\n", __func__);
+ return -1;
+}
+EXPORT_SYMBOL(fbtft_write_gpio16_wr_latched);
diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c
new file mode 100644
index 000000000000..45f8de3d11ad
--- /dev/null
+++ b/drivers/staging/fbtft/fbtft-sysfs.c
@@ -0,0 +1,222 @@
+#include "fbtft.h"
+
+
+static int get_next_ulong(char **str_p, unsigned long *val, char *sep, int base)
+{
+ char *p_val;
+ int ret;
+
+ if (!str_p || !(*str_p))
+ return -EINVAL;
+
+ p_val = strsep(str_p, sep);
+
+ if (!p_val)
+ return -EINVAL;
+
+ ret = kstrtoul(p_val, base, val);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+int fbtft_gamma_parse_str(struct fbtft_par *par, unsigned long *curves,
+ const char *str, int size)
+{
+ char *str_p, *curve_p = NULL;
+ char *tmp;
+ unsigned long val = 0;
+ int ret = 0;
+ int curve_counter, value_counter;
+
+ fbtft_par_dbg(DEBUG_SYSFS, par, "%s() str=\n", __func__);
+
+ if (!str || !curves)
+ return -EINVAL;
+
+ fbtft_par_dbg(DEBUG_SYSFS, par, "%s\n", str);
+
+ tmp = kmalloc(size+1, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ memcpy(tmp, str, size+1);
+
+ /* replace optional separators */
+ str_p = tmp;
+ while (*str_p) {
+ if (*str_p == ',')
+ *str_p = ' ';
+ if (*str_p == ';')
+ *str_p = '\n';
+ str_p++;
+ }
+
+ str_p = strim(tmp);
+
+ curve_counter = 0;
+ while (str_p) {
+ if (curve_counter == par->gamma.num_curves) {
+ dev_err(par->info->device, "Gamma: Too many curves\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ curve_p = strsep(&str_p, "\n");
+ value_counter = 0;
+ while (curve_p) {
+ if (value_counter == par->gamma.num_values) {
+ dev_err(par->info->device,
+ "Gamma: Too many values\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = get_next_ulong(&curve_p, &val, " ", 16);
+ if (ret)
+ goto out;
+ curves[curve_counter * par->gamma.num_values + value_counter] = val;
+ value_counter++;
+ }
+ if (value_counter != par->gamma.num_values) {
+ dev_err(par->info->device, "Gamma: Too few values\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ curve_counter++;
+ }
+ if (curve_counter != par->gamma.num_curves) {
+ dev_err(par->info->device, "Gamma: Too few curves\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(tmp);
+ return ret;
+}
+
+static ssize_t
+sprintf_gamma(struct fbtft_par *par, unsigned long *curves, char *buf)
+{
+ ssize_t len = 0;
+ unsigned int i, j;
+
+ mutex_lock(&par->gamma.lock);
+ for (i = 0; i < par->gamma.num_curves; i++) {
+ for (j = 0; j < par->gamma.num_values; j++)
+ len += scnprintf(&buf[len], PAGE_SIZE,
+ "%04lx ", curves[i*par->gamma.num_values + j]);
+ buf[len-1] = '\n';
+ }
+ mutex_unlock(&par->gamma.lock);
+
+ return len;
+}
+
+static ssize_t store_gamma_curve(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fb_info = dev_get_drvdata(device);
+ struct fbtft_par *par = fb_info->par;
+ unsigned long tmp_curves[FBTFT_GAMMA_MAX_VALUES_TOTAL];
+ int ret;
+
+ ret = fbtft_gamma_parse_str(par, tmp_curves, buf, count);
+ if (ret)
+ return ret;
+
+ ret = par->fbtftops.set_gamma(par, tmp_curves);
+ if (ret)
+ return ret;
+
+ mutex_lock(&par->gamma.lock);
+ memcpy(par->gamma.curves, tmp_curves,
+ par->gamma.num_curves * par->gamma.num_values * sizeof(tmp_curves[0]));
+ mutex_unlock(&par->gamma.lock);
+
+ return count;
+}
+
+static ssize_t show_gamma_curve(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fb_info = dev_get_drvdata(device);
+ struct fbtft_par *par = fb_info->par;
+
+ return sprintf_gamma(par, par->gamma.curves, buf);
+}
+
+static struct device_attribute gamma_device_attrs[] = {
+ __ATTR(gamma, 0660, show_gamma_curve, store_gamma_curve),
+};
+
+
+void fbtft_expand_debug_value(unsigned long *debug)
+{
+ switch (*debug & 0b111) {
+ case 1:
+ *debug |= DEBUG_LEVEL_1;
+ break;
+ case 2:
+ *debug |= DEBUG_LEVEL_2;
+ break;
+ case 3:
+ *debug |= DEBUG_LEVEL_3;
+ break;
+ case 4:
+ *debug |= DEBUG_LEVEL_4;
+ break;
+ case 5:
+ *debug |= DEBUG_LEVEL_5;
+ break;
+ case 6:
+ *debug |= DEBUG_LEVEL_6;
+ break;
+ case 7:
+ *debug = 0xFFFFFFFF;
+ break;
+ }
+}
+
+static ssize_t store_debug(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fb_info = dev_get_drvdata(device);
+ struct fbtft_par *par = fb_info->par;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &par->debug);
+ if (ret)
+ return ret;
+ fbtft_expand_debug_value(&par->debug);
+
+ return count;
+}
+
+static ssize_t show_debug(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fb_info = dev_get_drvdata(device);
+ struct fbtft_par *par = fb_info->par;
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", par->debug);
+}
+
+static struct device_attribute debug_device_attr = \
+ __ATTR(debug, 0660, show_debug, store_debug);
+
+
+void fbtft_sysfs_init(struct fbtft_par *par)
+{
+ device_create_file(par->info->dev, &debug_device_attr);
+ if (par->gamma.curves && par->fbtftops.set_gamma)
+ device_create_file(par->info->dev, &gamma_device_attrs[0]);
+}
+
+void fbtft_sysfs_exit(struct fbtft_par *par)
+{
+ device_remove_file(par->info->dev, &debug_device_attr);
+ if (par->gamma.curves && par->fbtftops.set_gamma)
+ device_remove_file(par->info->dev, &gamma_device_attrs[0]);
+}
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
new file mode 100644
index 000000000000..0dbf3f95fe78
--- /dev/null
+++ b/drivers/staging/fbtft/fbtft.h
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_FBTFT_H
+#define __LINUX_FBTFT_H
+
+#include <linux/fb.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+
+
+#define FBTFT_NOP 0x00
+#define FBTFT_SWRESET 0x01
+#define FBTFT_RDDID 0x04
+#define FBTFT_RDDST 0x09
+#define FBTFT_CASET 0x2A
+#define FBTFT_RASET 0x2B
+#define FBTFT_RAMWR 0x2C
+
+#define FBTFT_ONBOARD_BACKLIGHT 2
+
+#define FBTFT_GPIO_NO_MATCH 0xFFFF
+#define FBTFT_GPIO_NAME_SIZE 32
+#define FBTFT_MAX_INIT_SEQUENCE 512
+#define FBTFT_GAMMA_MAX_VALUES_TOTAL 128
+
+#define FBTFT_OF_INIT_CMD BIT(24)
+#define FBTFT_OF_INIT_DELAY BIT(25)
+
+/**
+ * struct fbtft_gpio - Structure that holds one pinname to gpio mapping
+ * @name: pinname (reset, dc, etc.)
+ * @gpio: GPIO number
+ *
+ */
+struct fbtft_gpio {
+ char name[FBTFT_GPIO_NAME_SIZE];
+ unsigned gpio;
+};
+
+struct fbtft_par;
+
+/**
+ * struct fbtft_ops - FBTFT operations structure
+ * @write: Writes to interface bus
+ * @read: Reads from interface bus
+ * @write_vmem: Writes video memory to display
+ * @write_reg: Writes to controller register
+ * @set_addr_win: Set the GRAM update window
+ * @reset: Reset the LCD controller
+ * @mkdirty: Marks display lines for update
+ * @update_display: Updates the display
+ * @init_display: Initializes the display
+ * @blank: Blank the display (optional)
+ * @request_gpios_match: Do pinname to gpio matching
+ * @request_gpios: Request gpios from the kernel
+ * @free_gpios: Free previously requested gpios
+ * @verify_gpios: Verify that necessary gpios is present (optional)
+ * @register_backlight: Used to register backlight device (optional)
+ * @unregister_backlight: Unregister backlight device (optional)
+ * @set_var: Configure LCD with values from variables like @rotate and @bgr
+ * (optional)
+ * @set_gamma: Set Gamma curve (optional)
+ *
+ * Most of these operations have default functions assigned to them in
+ * fbtft_framebuffer_alloc()
+ */
+struct fbtft_ops {
+ int (*write)(struct fbtft_par *par, void *buf, size_t len);
+ int (*read)(struct fbtft_par *par, void *buf, size_t len);
+ int (*write_vmem)(struct fbtft_par *par, size_t offset, size_t len);
+ void (*write_register)(struct fbtft_par *par, int len, ...);
+
+ void (*set_addr_win)(struct fbtft_par *par,
+ int xs, int ys, int xe, int ye);
+ void (*reset)(struct fbtft_par *par);
+ void (*mkdirty)(struct fb_info *info, int from, int to);
+ void (*update_display)(struct fbtft_par *par,
+ unsigned start_line, unsigned end_line);
+ int (*init_display)(struct fbtft_par *par);
+ int (*blank)(struct fbtft_par *par, bool on);
+
+ unsigned long (*request_gpios_match)(struct fbtft_par *par,
+ const struct fbtft_gpio *gpio);
+ int (*request_gpios)(struct fbtft_par *par);
+ int (*verify_gpios)(struct fbtft_par *par);
+
+ void (*register_backlight)(struct fbtft_par *par);
+ void (*unregister_backlight)(struct fbtft_par *par);
+
+ int (*set_var)(struct fbtft_par *par);
+ int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
+};
+
+/**
+ * struct fbtft_display - Describes the display properties
+ * @width: Width of display in pixels
+ * @height: Height of display in pixels
+ * @regwidth: LCD Controller Register width in bits
+ * @buswidth: Display interface bus width in bits
+ * @backlight: Backlight type.
+ * @fbtftops: FBTFT operations provided by driver or device (platform_data)
+ * @bpp: Bits per pixel
+ * @fps: Frames per second
+ * @txbuflen: Size of transmit buffer
+ * @init_sequence: Pointer to LCD initialization array
+ * @gamma: String representation of Gamma curve(s)
+ * @gamma_num: Number of Gamma curves
+ * @gamma_len: Number of values per Gamma curve
+ * @debug: Initial debug value
+ *
+ * This structure is not stored by FBTFT except for init_sequence.
+ */
+struct fbtft_display {
+ unsigned width;
+ unsigned height;
+ unsigned regwidth;
+ unsigned buswidth;
+ unsigned backlight;
+ struct fbtft_ops fbtftops;
+ unsigned bpp;
+ unsigned fps;
+ int txbuflen;
+ int *init_sequence;
+ char *gamma;
+ int gamma_num;
+ int gamma_len;
+ unsigned long debug;
+};
+
+/**
+ * struct fbtft_platform_data - Passes display specific data to the driver
+ * @display: Display properties
+ * @gpios: Pointer to an array of piname to gpio mappings
+ * @rotate: Display rotation angle
+ * @bgr: LCD Controller BGR bit
+ * @fps: Frames per second (this will go away, use @fps in @fbtft_display)
+ * @txbuflen: Size of transmit buffer
+ * @startbyte: When set, enables use of Startbyte in transfers
+ * @gamma: String representation of Gamma curve(s)
+ * @extra: A way to pass extra info
+ */
+struct fbtft_platform_data {
+ struct fbtft_display display;
+ const struct fbtft_gpio *gpios;
+ unsigned rotate;
+ bool bgr;
+ unsigned fps;
+ int txbuflen;
+ u8 startbyte;
+ char *gamma;
+ void *extra;
+};
+
+/**
+ * struct fbtft_par - Main FBTFT data structure
+ *
+ * This structure holds all relevant data to operate the display
+ *
+ * See sourcefile for documentation since nested structs is not
+ * supported by kernel-doc.
+ *
+ */
+/* @spi: Set if it is a SPI device
+ * @pdev: Set if it is a platform device
+ * @info: Pointer to framebuffer fb_info structure
+ * @pdata: Pointer to platform data
+ * @ssbuf: Not used
+ * @pseudo_palette: Used by fb_set_colreg()
+ * @txbuf.buf: Transmit buffer
+ * @txbuf.len: Transmit buffer length
+ * @buf: Small buffer used when writing init data over SPI
+ * @startbyte: Used by some controllers when in SPI mode.
+ * Format: 6 bit Device id + RS bit + RW bit
+ * @fbtftops: FBTFT operations provided by driver or device (platform_data)
+ * @dirty_lock: Protects dirty_lines_start and dirty_lines_end
+ * @dirty_lines_start: Where to begin updating display
+ * @dirty_lines_end: Where to end updating display
+ * @gpio.reset: GPIO used to reset display
+ * @gpio.dc: Data/Command signal, also known as RS
+ * @gpio.rd: Read latching signal
+ * @gpio.wr: Write latching signal
+ * @gpio.latch: Bus latch signal, eg. 16->8 bit bus latch
+ * @gpio.cs: LCD Chip Select with parallel interface bus
+ * @gpio.db[16]: Parallel databus
+ * @gpio.led[16]: Led control signals
+ * @gpio.aux[16]: Auxillary signals, not used by core
+ * @init_sequence: Pointer to LCD initialization array
+ * @gamma.lock: Mutex for Gamma curve locking
+ * @gamma.curves: Pointer to Gamma curve array
+ * @gamma.num_values: Number of values per Gamma curve
+ * @gamma.num_curves: Number of Gamma curves
+ * @debug: Pointer to debug value
+ * @current_debug:
+ * @first_update_done: Used to only time the first display update
+ * @update_time: Used to calculate 'fps' in debug output
+ * @bgr: BGR mode/\n
+ * @extra: Extra info needed by driver
+ */
+struct fbtft_par {
+ struct spi_device *spi;
+ struct platform_device *pdev;
+ struct fb_info *info;
+ struct fbtft_platform_data *pdata;
+ u16 *ssbuf;
+ u32 pseudo_palette[16];
+ struct {
+ void *buf;
+ dma_addr_t dma;
+ size_t len;
+ } txbuf;
+ u8 *buf;
+ u8 startbyte;
+ struct fbtft_ops fbtftops;
+ spinlock_t dirty_lock;
+ unsigned dirty_lines_start;
+ unsigned dirty_lines_end;
+ struct {
+ int reset;
+ int dc;
+ int rd;
+ int wr;
+ int latch;
+ int cs;
+ int db[16];
+ int led[16];
+ int aux[16];
+ } gpio;
+ int *init_sequence;
+ struct {
+ struct mutex lock;
+ unsigned long *curves;
+ int num_values;
+ int num_curves;
+ } gamma;
+ unsigned long debug;
+ bool first_update_done;
+ struct timespec update_time;
+ bool bgr;
+ void *extra;
+};
+
+#define NUMARGS(...) (sizeof((int[]){__VA_ARGS__})/sizeof(int))
+
+#define write_reg(par, ...) \
+do { \
+ par->fbtftops.write_register(par, NUMARGS(__VA_ARGS__), __VA_ARGS__); \
+} while (0)
+
+/* fbtft-core.c */
+extern void fbtft_dbg_hex(const struct device *dev,
+ int groupsize, void *buf, size_t len, const char *fmt, ...);
+extern struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
+ struct device *dev);
+extern void fbtft_framebuffer_release(struct fb_info *info);
+extern int fbtft_register_framebuffer(struct fb_info *fb_info);
+extern int fbtft_unregister_framebuffer(struct fb_info *fb_info);
+extern void fbtft_register_backlight(struct fbtft_par *par);
+extern void fbtft_unregister_backlight(struct fbtft_par *par);
+extern int fbtft_init_display(struct fbtft_par *par);
+extern int fbtft_probe_common(struct fbtft_display *display,
+ struct spi_device *sdev, struct platform_device *pdev);
+extern int fbtft_remove_common(struct device *dev, struct fb_info *info);
+
+/* fbtft-io.c */
+extern int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len);
+extern int fbtft_write_spi_emulate_9(struct fbtft_par *par,
+ void *buf, size_t len);
+extern int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len);
+extern int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len);
+extern int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len);
+extern int fbtft_write_gpio16_wr_latched(struct fbtft_par *par,
+ void *buf, size_t len);
+
+/* fbtft-bus.c */
+extern int fbtft_write_vmem8_bus8(struct fbtft_par *par, size_t offset, size_t len);
+extern int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len);
+extern int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len);
+extern int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len);
+extern void fbtft_write_reg8_bus8(struct fbtft_par *par, int len, ...);
+extern void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...);
+extern void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...);
+extern void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...);
+
+
+#define FBTFT_REGISTER_DRIVER(_name, _compatible, _display) \
+ \
+static int fbtft_driver_probe_spi(struct spi_device *spi) \
+{ \
+ return fbtft_probe_common(_display, spi, NULL); \
+} \
+ \
+static int fbtft_driver_remove_spi(struct spi_device *spi) \
+{ \
+ struct fb_info *info = spi_get_drvdata(spi); \
+ \
+ return fbtft_remove_common(&spi->dev, info); \
+} \
+ \
+static int fbtft_driver_probe_pdev(struct platform_device *pdev) \
+{ \
+ return fbtft_probe_common(_display, NULL, pdev); \
+} \
+ \
+static int fbtft_driver_remove_pdev(struct platform_device *pdev) \
+{ \
+ struct fb_info *info = platform_get_drvdata(pdev); \
+ \
+ return fbtft_remove_common(&pdev->dev, info); \
+} \
+ \
+static const struct of_device_id dt_ids[] = { \
+ { .compatible = _compatible }, \
+ {}, \
+}; \
+ \
+MODULE_DEVICE_TABLE(of, dt_ids); \
+ \
+ \
+static struct spi_driver fbtft_driver_spi_driver = { \
+ .driver = { \
+ .name = _name, \
+ .owner = THIS_MODULE, \
+ .of_match_table = of_match_ptr(dt_ids), \
+ }, \
+ .probe = fbtft_driver_probe_spi, \
+ .remove = fbtft_driver_remove_spi, \
+}; \
+ \
+static struct platform_driver fbtft_driver_platform_driver = { \
+ .driver = { \
+ .name = _name, \
+ .owner = THIS_MODULE, \
+ .of_match_table = of_match_ptr(dt_ids), \
+ }, \
+ .probe = fbtft_driver_probe_pdev, \
+ .remove = fbtft_driver_remove_pdev, \
+}; \
+ \
+static int __init fbtft_driver_module_init(void) \
+{ \
+ int ret; \
+ \
+ ret = spi_register_driver(&fbtft_driver_spi_driver); \
+ if (ret < 0) \
+ return ret; \
+ return platform_driver_register(&fbtft_driver_platform_driver); \
+} \
+ \
+static void __exit fbtft_driver_module_exit(void) \
+{ \
+ spi_unregister_driver(&fbtft_driver_spi_driver); \
+ platform_driver_unregister(&fbtft_driver_platform_driver); \
+} \
+ \
+module_init(fbtft_driver_module_init); \
+module_exit(fbtft_driver_module_exit);
+
+
+/* Debug macros */
+
+/* shorthand debug levels */
+#define DEBUG_LEVEL_1 DEBUG_REQUEST_GPIOS
+#define DEBUG_LEVEL_2 (DEBUG_LEVEL_1 | DEBUG_DRIVER_INIT_FUNCTIONS | DEBUG_TIME_FIRST_UPDATE)
+#define DEBUG_LEVEL_3 (DEBUG_LEVEL_2 | DEBUG_RESET | DEBUG_INIT_DISPLAY | DEBUG_BLANK | DEBUG_REQUEST_GPIOS | DEBUG_FREE_GPIOS | DEBUG_VERIFY_GPIOS | DEBUG_BACKLIGHT | DEBUG_SYSFS)
+#define DEBUG_LEVEL_4 (DEBUG_LEVEL_2 | DEBUG_FB_READ | DEBUG_FB_WRITE | DEBUG_FB_FILLRECT | DEBUG_FB_COPYAREA | DEBUG_FB_IMAGEBLIT | DEBUG_FB_BLANK)
+#define DEBUG_LEVEL_5 (DEBUG_LEVEL_3 | DEBUG_UPDATE_DISPLAY)
+#define DEBUG_LEVEL_6 (DEBUG_LEVEL_4 | DEBUG_LEVEL_5)
+#define DEBUG_LEVEL_7 0xFFFFFFFF
+
+#define DEBUG_DRIVER_INIT_FUNCTIONS (1<<3)
+#define DEBUG_TIME_FIRST_UPDATE (1<<4)
+#define DEBUG_TIME_EACH_UPDATE (1<<5)
+#define DEBUG_DEFERRED_IO (1<<6)
+#define DEBUG_FBTFT_INIT_FUNCTIONS (1<<7)
+
+/* fbops */
+#define DEBUG_FB_READ (1<<8)
+#define DEBUG_FB_WRITE (1<<9)
+#define DEBUG_FB_FILLRECT (1<<10)
+#define DEBUG_FB_COPYAREA (1<<11)
+#define DEBUG_FB_IMAGEBLIT (1<<12)
+#define DEBUG_FB_SETCOLREG (1<<13)
+#define DEBUG_FB_BLANK (1<<14)
+
+#define DEBUG_SYSFS (1<<16)
+
+/* fbtftops */
+#define DEBUG_BACKLIGHT (1<<17)
+#define DEBUG_READ (1<<18)
+#define DEBUG_WRITE (1<<19)
+#define DEBUG_WRITE_VMEM (1<<20)
+#define DEBUG_WRITE_REGISTER (1<<21)
+#define DEBUG_SET_ADDR_WIN (1<<22)
+#define DEBUG_RESET (1<<23)
+#define DEBUG_MKDIRTY (1<<24)
+#define DEBUG_UPDATE_DISPLAY (1<<25)
+#define DEBUG_INIT_DISPLAY (1<<26)
+#define DEBUG_BLANK (1<<27)
+#define DEBUG_REQUEST_GPIOS (1<<28)
+#define DEBUG_FREE_GPIOS (1<<29)
+#define DEBUG_REQUEST_GPIOS_MATCH (1<<30)
+#define DEBUG_VERIFY_GPIOS (1<<31)
+
+
+#define fbtft_init_dbg(dev, format, arg...) \
+do { \
+ if (unlikely((dev)->platform_data && \
+ (((struct fbtft_platform_data *)(dev)->platform_data)->display.debug & DEBUG_DRIVER_INIT_FUNCTIONS))) \
+ dev_info(dev, format, ##arg); \
+} while (0)
+
+#define fbtft_par_dbg(level, par, format, arg...) \
+do { \
+ if (unlikely(par->debug & level)) \
+ dev_info(par->info->device, format, ##arg); \
+} while (0)
+
+#define fbtft_dev_dbg(level, par, dev, format, arg...) \
+do { \
+ if (unlikely(par->debug & level)) \
+ dev_info(dev, format, ##arg); \
+} while (0)
+
+#define fbtft_par_dbg_hex(level, par, dev, type, buf, num, format, arg...) \
+do { \
+ if (unlikely(par->debug & level)) \
+ fbtft_dbg_hex(dev, sizeof(type), buf, num * sizeof(type), format, ##arg); \
+} while (0)
+
+#endif /* __LINUX_FBTFT_H */
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
new file mode 100644
index 000000000000..b9f4c30e39c6
--- /dev/null
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -0,0 +1,1444 @@
+/*
+ *
+ * Copyright (C) 2013, Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fbtft_device"
+
+#define MAX_GPIOS 32
+
+struct spi_device *spi_device;
+struct platform_device *p_device;
+
+static char *name;
+module_param(name, charp, 0);
+MODULE_PARM_DESC(name, "Devicename (required). " \
+"name=list => list all supported devices.");
+
+static unsigned rotate;
+module_param(rotate, uint, 0);
+MODULE_PARM_DESC(rotate,
+"Angle to rotate display counter clockwise: 0, 90, 180, 270");
+
+static unsigned busnum;
+module_param(busnum, uint, 0);
+MODULE_PARM_DESC(busnum, "SPI bus number (default=0)");
+
+static unsigned cs;
+module_param(cs, uint, 0);
+MODULE_PARM_DESC(cs, "SPI chip select (default=0)");
+
+static unsigned speed;
+module_param(speed, uint, 0);
+MODULE_PARM_DESC(speed, "SPI speed (override device default)");
+
+static int mode = -1;
+module_param(mode, int, 0);
+MODULE_PARM_DESC(mode, "SPI mode (override device default)");
+
+static char *gpios;
+module_param(gpios, charp, 0);
+MODULE_PARM_DESC(gpios,
+"List of gpios. Comma separated with the form: reset:23,dc:24 " \
+"(when overriding the default, all gpios must be specified)");
+
+static unsigned fps;
+module_param(fps, uint, 0);
+MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
+
+static char *gamma;
+module_param(gamma, charp, 0);
+MODULE_PARM_DESC(gamma,
+"String representation of Gamma Curve(s). Driver specific.");
+
+static int txbuflen;
+module_param(txbuflen, int, 0);
+MODULE_PARM_DESC(txbuflen, "txbuflen (override driver default)");
+
+static int bgr = -1;
+module_param(bgr, int, 0);
+MODULE_PARM_DESC(bgr,
+"BGR bit (supported by some drivers).");
+
+static unsigned startbyte;
+module_param(startbyte, uint, 0);
+MODULE_PARM_DESC(startbyte, "Sets the Start byte used by some SPI displays.");
+
+static bool custom;
+module_param(custom, bool, 0);
+MODULE_PARM_DESC(custom, "Add a custom display device. " \
+"Use speed= argument to make it a SPI device, else platform_device");
+
+static unsigned width;
+module_param(width, uint, 0);
+MODULE_PARM_DESC(width, "Display width, used with the custom argument");
+
+static unsigned height;
+module_param(height, uint, 0);
+MODULE_PARM_DESC(height, "Display height, used with the custom argument");
+
+static unsigned buswidth = 8;
+module_param(buswidth, uint, 0);
+MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
+
+static int init[FBTFT_MAX_INIT_SEQUENCE];
+static int init_num;
+module_param_array(init, int, &init_num, 0);
+MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
+
+static unsigned long debug;
+module_param(debug, ulong , 0);
+MODULE_PARM_DESC(debug,
+"level: 0-7 (the remaining 29 bits is for advanced usage)");
+
+static unsigned verbose = 3;
+module_param(verbose, uint, 0);
+MODULE_PARM_DESC(verbose,
+"0 silent, >0 show gpios, >1 show devices, >2 show devices before (default=3)");
+
+
+struct fbtft_device_display {
+ char *name;
+ struct spi_board_info *spi;
+ struct platform_device *pdev;
+};
+
+static void fbtft_device_pdev_release(struct device *dev);
+
+static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len);
+static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
+ int xs, int ys, int xe, int ye);
+
+#define ADAFRUIT18_GAMMA \
+ "02 1c 07 12 37 32 29 2d 29 25 2B 39 00 01 03 10\n" \
+ "03 1d 07 06 2E 2C 29 2D 2E 2E 37 3F 00 00 02 10"
+
+static int hy28b_init_sequence[] = {
+ -1,0x00e7,0x0010,-1,0x0000,0x0001,-1,0x0001,0x0100,-1,0x0002,0x0700,
+ -1,0x0003,0x1030,-1,0x0004,0x0000,-1,0x0008,0x0207,-1,0x0009,0x0000,
+ -1,0x000a,0x0000,-1,0x000c,0x0001,-1,0x000d,0x0000,-1,0x000f,0x0000,
+ -1,0x0010,0x0000,-1,0x0011,0x0007,-1,0x0012,0x0000,-1,0x0013,0x0000,
+ -2,50,-1,0x0010,0x1590,-1,0x0011,0x0227,-2,50,-1,0x0012,0x009c,-2,50,
+ -1,0x0013,0x1900,-1,0x0029,0x0023,-1,0x002b,0x000e,-2,50,
+ -1,0x0020,0x0000,-1,0x0021,0x0000,-2,50,-1,0x0050,0x0000,
+ -1,0x0051,0x00ef,-1,0x0052,0x0000,-1,0x0053,0x013f,-1,0x0060,0xa700,
+ -1,0x0061,0x0001,-1,0x006a,0x0000,-1,0x0080,0x0000,-1,0x0081,0x0000,
+ -1,0x0082,0x0000,-1,0x0083,0x0000,-1,0x0084,0x0000,-1,0x0085,0x0000,
+ -1,0x0090,0x0010,-1,0x0092,0x0000,-1,0x0093,0x0003,-1,0x0095,0x0110,
+ -1,0x0097,0x0000,-1,0x0098,0x0000,-1,0x0007,0x0133,-1,0x0020,0x0000,
+ -1,0x0021,0x0000,-2,100,-3 };
+
+#define HY28B_GAMMA \
+ "04 1F 4 7 7 0 7 7 6 0\n" \
+ "0F 00 1 7 4 0 0 0 6 7"
+
+static int pitft_init_sequence[] = {
+ -1,0x01,-2,5,-1,0x28,-1,0xEF,0x03,0x80,0x02,-1,0xCF,0x00,0xC1,0x30,
+ -1,0xED,0x64,0x03,0x12,0x81,-1,0xE8,0x85,0x00,0x78,
+ -1,0xCB,0x39,0x2C,0x00,0x34,0x02,-1,0xF7,0x20,-1,0xEA,0x00,0x00,
+ -1,0xC0,0x23,-1,0xC1,0x10,-1,0xC5,0x3e,0x28,-1,0xC7,0x86,-1,0x3A,0x55,
+ -1,0xB1,0x00,0x18,-1,0xB6,0x08,0x82,0x27,-1,0xF2,0x00,-1,0x26,0x01,
+ -1,0xE0,0x0F,0x31,0x2B,0x0C,0x0E,0x08,0x4E,0xF1,0x37,0x07,0x10,0x03,
+ 0x0E,0x09,0x00,-1,0xE1,0x00,0x0E,0x14,0x03,0x11,0x07,0x31,0xC1,0x48,
+ 0x08,0x0F,0x0C,0x31,0x36,0x0F,-1,0x11,-2,100,-1,0x29,-2,20,-3 };
+
+static int waveshare32b_init_sequence[] = {
+ -1,0xCB,0x39,0x2C,0x00,0x34,0x02,-1,0xCF,0x00,0xC1,0x30,
+ -1,0xE8,0x85,0x00,0x78,-1,0xEA,0x00,0x00,-1,0xED,0x64,0x03,0x12,0x81,
+ -1,0xF7,0x20,-1,0xC0,0x23,-1,0xC1,0x10,-1,0xC5,0x3e,0x28,-1,0xC7,0x86,
+ -1,0x36,0x28,-1,0x3A,0x55,-1,0xB1,0x00,0x18,-1,0xB6,0x08,0x82,0x27,
+ -1,0xF2,0x00,-1,0x26,0x01,
+ -1,0xE0,0x0F,0x31,0x2B,0x0C,0x0E,0x08,0x4E,0xF1,0x37,0x07,0x10,0x03,0x0E,0x09,0x00,
+ -1,0xE1,0x00,0x0E,0x14,0x03,0x11,0x07,0x31,0xC1,0x48,0x08,0x0F,0x0C,0x31,0x36,0x0F,
+ -1,0x11,-2,120,-1,0x29,-1,0x2c,-3 };
+
+/* Supported displays in alphabetical order */
+static struct fbtft_device_display displays[] = {
+ {
+ .name = "adafruit18",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_st7735r",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 18 },
+ {},
+ },
+ .gamma = ADAFRUIT18_GAMMA,
+ }
+ }
+ }, {
+ .name = "adafruit18_green",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_st7735r",
+ .max_speed_hz = 4000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ .fbtftops.set_addr_win = \
+ adafruit18_green_tab_set_addr_win,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 18 },
+ {},
+ },
+ .gamma = ADAFRUIT18_GAMMA,
+ }
+ }
+ }, {
+ .name = "adafruit22",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_hx8340bn",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 9,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "led", 23 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "adafruit22a",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9340",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "adafruit28",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9341",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "adafruit13m",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ssd1306",
+ .max_speed_hz = 16000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "agm1264k-fl",
+ .pdev = &(struct platform_device) {
+ .name = "fb_agm1264k-fl",
+ .id = 0,
+ .dev = {
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = FBTFT_ONBOARD_BACKLIGHT,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ },
+ }
+ }
+ }, {
+ .name = "dogs102",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_uc1701",
+ .max_speed_hz = 8000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 13 },
+ { "dc", 6 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "er_tftm050_2",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ra8875",
+ .max_speed_hz = 5000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ .width = 480,
+ .height = 272,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "er_tftm070_5",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ra8875",
+ .max_speed_hz = 5000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ .width = 800,
+ .height = 480,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "flexfb",
+ .spi = &(struct spi_board_info) {
+ .modalias = "flexfb",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "flexpfb",
+ .pdev = &(struct platform_device) {
+ .name = "flexpfb",
+ .id = 0,
+ .dev = {
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 17 },
+ { "dc", 1 },
+ { "wr", 0 },
+ { "cs", 21 },
+ { "db00", 9 },
+ { "db01", 11 },
+ { "db02", 18 },
+ { "db03", 23 },
+ { "db04", 24 },
+ { "db05", 25 },
+ { "db06", 8 },
+ { "db07", 7 },
+ { "led", 4 },
+ {},
+ },
+ },
+ }
+ }
+ }, {
+ .name = "freetronicsoled128",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ssd1351",
+ .max_speed_hz = 20000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = FBTFT_ONBOARD_BACKLIGHT,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 24 },
+ { "dc", 25 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "hx8353d",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_hx8353d",
+ .max_speed_hz = 16000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 23 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "hy28a",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9320",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .startbyte = 0b01110000,
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "led", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "hy28b",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9325",
+ .max_speed_hz = 48000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ .init_sequence = hy28b_init_sequence,
+ },
+ .startbyte = 0b01110000,
+ .bgr = true,
+ .fps= 50,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "led", 18 },
+ {},
+ },
+ .gamma = HY28B_GAMMA,
+ }
+ }
+ }, {
+ .name = "ili9481",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9481",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .regwidth = 16,
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 22 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "itdb24",
+ .pdev = &(struct platform_device) {
+ .name = "fb_s6d1121",
+ .id = 0,
+ .dev = {
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = false,
+ .gpios = (const struct fbtft_gpio []) {
+ /* Wiring for LCD adapter kit */
+ { "reset", 7 },
+ { "dc", 0 }, /* rev 2: 2 */
+ { "wr", 1 }, /* rev 2: 3 */
+ { "cs", 8 },
+ { "db00", 17 },
+ { "db01", 18 },
+ { "db02", 21 }, /* rev 2: 27 */
+ { "db03", 22 },
+ { "db04", 23 },
+ { "db05", 24 },
+ { "db06", 25 },
+ { "db07", 4 },
+ {}
+ },
+ },
+ }
+ }
+ }, {
+ .name = "itdb28",
+ .pdev = &(struct platform_device) {
+ .name = "fb_ili9325",
+ .id = 0,
+ .dev = {
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ },
+ }
+ }
+ }, {
+ .name = "itdb28_spi",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9325",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "mi0283qt-2",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_hx8347d",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .startbyte = 0b01110000,
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "mi0283qt-9a",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9341",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 9,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "led", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "mi0283qt-v2",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_watterott",
+ .max_speed_hz = 4000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "nokia3310",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_pcd8544",
+ .max_speed_hz = 400000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 23 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "nokia3310a",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_tls8204",
+ .max_speed_hz = 1000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 23 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "piscreen",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9486",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .regwidth = 16,
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 22 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "pitft",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9340",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .chip_select = 0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ .init_sequence = pitft_init_sequence,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "dc", 25 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "pioled",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ssd1351",
+ .max_speed_hz = 20000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 24 },
+ { "dc", 25 },
+ {},
+ },
+ .gamma = "0 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 3 " \
+ "3 3 3 3 3 3 3 3 " \
+ "3 3 3 3 3 3 3 3 " \
+ "3 3 3 4 4 4 4 4 " \
+ "4 4 4 4 4 4 4"
+ }
+ }
+ }, {
+ .name = "rpi-display",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9341",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 23 },
+ { "dc", 24 },
+ { "led", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "s6d02a1",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_s6d02a1",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 23 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "sainsmart18",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_st7735r",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "sainsmart32",
+ .pdev = &(struct platform_device) {
+ .name = "fb_ssd1289",
+ .id = 0,
+ .dev = {
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 16,
+ .txbuflen = -2, /* disable buffer */
+ .backlight = 1,
+ .fbtftops.write = write_gpio16_wr_slow,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ },
+ },
+ }
+ }, {
+ .name = "sainsmart32_fast",
+ .pdev = &(struct platform_device) {
+ .name = "fb_ssd1289",
+ .id = 0,
+ .dev = {
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 16,
+ .txbuflen = -2, /* disable buffer */
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ },
+ },
+ }
+ }, {
+ .name = "sainsmart32_latched",
+ .pdev = &(struct platform_device) {
+ .name = "fb_ssd1289",
+ .id = 0,
+ .dev = {
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 16,
+ .txbuflen = -2, /* disable buffer */
+ .backlight = 1,
+ .fbtftops.write = \
+ fbtft_write_gpio16_wr_latched,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ },
+ },
+ }
+ }, {
+ .name = "sainsmart32_spi",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ssd1289",
+ .max_speed_hz = 16000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "spidev",
+ .spi = &(struct spi_board_info) {
+ .modalias = "spidev",
+ .max_speed_hz = 500000,
+ .bus_num = 0,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "ssd1331",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ssd1331",
+ .max_speed_hz = 20000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 24 },
+ { "dc", 25 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "tinylcd35",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_tinylcd",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "tm022hdh26",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9341",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 25 },
+ { "dc", 24 },
+ { "led", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "tontec35_9481", /* boards before 02 July 2014 */
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9481",
+ .max_speed_hz = 128000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 15 },
+ { "dc", 25 },
+ { "led_", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "tontec35_9486", /* boards after 02 July 2014 */
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9486",
+ .max_speed_hz = 128000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 15 },
+ { "dc", 25 },
+ { "led_", 18 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "upd161704",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_upd161704",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 24 },
+ { "dc", 25 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "waveshare32b",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_ili9340",
+ .max_speed_hz = 48000000,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ .backlight = 1,
+ .init_sequence = waveshare32b_init_sequence,
+ },
+ .bgr = true,
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 27 },
+ { "dc", 22 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "waveshare22",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_bd663474",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "reset", 24 },
+ { "dc", 25 },
+ {},
+ },
+ }
+ }
+ }, {
+ /* This should be the last item.
+ Used with the custom argument */
+ .name = "",
+ .spi = &(struct spi_board_info) {
+ .modalias = "",
+ .max_speed_hz = 0,
+ .mode = SPI_MODE_0,
+ .platform_data = &(struct fbtft_platform_data) {
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ }
+ },
+ .pdev = &(struct platform_device) {
+ .name = "",
+ .id = 0,
+ .dev = {
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ },
+ },
+ },
+ }
+};
+
+static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
+{
+ u16 data;
+ int i;
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ static u16 prev_data;
+#endif
+
+ fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
+ "%s(len=%d): ", __func__, len);
+
+ while (len) {
+ data = *(u16 *) buf;
+
+ /* Start writing by pulling down /WR */
+ gpio_set_value(par->gpio.wr, 0);
+
+ /* Set data */
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ if (data == prev_data) {
+ gpio_set_value(par->gpio.wr, 0); /* used as delay */
+ } else {
+ for (i = 0; i < 16; i++) {
+ if ((data & 1) != (prev_data & 1))
+ gpio_set_value(par->gpio.db[i],
+ (data & 1));
+ data >>= 1;
+ prev_data >>= 1;
+ }
+ }
+#else
+ for (i = 0; i < 16; i++) {
+ gpio_set_value(par->gpio.db[i], (data & 1));
+ data >>= 1;
+ }
+#endif
+
+ /* Pullup /WR */
+ gpio_set_value(par->gpio.wr, 1);
+
+#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
+ prev_data = *(u16 *) buf;
+#endif
+ buf += 2;
+ len -= 2;
+ }
+
+ return 0;
+}
+
+static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
+ int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+ write_reg(par, 0x2A, 0, xs + 2, 0, xe + 2);
+ write_reg(par, 0x2B, 0, ys + 1, 0, ye + 1);
+ write_reg(par, 0x2C);
+}
+
+/* used if gpios parameter is present */
+static struct fbtft_gpio fbtft_device_param_gpios[MAX_GPIOS+1] = { };
+
+static void fbtft_device_pdev_release(struct device *dev)
+{
+/* Needed to silence this message:
+Device 'xxx' does not have a release() function, it is broken and must be fixed
+*/
+}
+
+static int spi_device_found(struct device *dev, void *data)
+{
+ struct spi_device *spi = container_of(dev, struct spi_device, dev);
+
+ pr_info(DRVNAME": %s %s %dkHz %d bits mode=0x%02X\n",
+ spi->modalias, dev_name(dev), spi->max_speed_hz/1000,
+ spi->bits_per_word, spi->mode);
+
+ return 0;
+}
+
+static void pr_spi_devices(void)
+{
+ pr_info(DRVNAME": SPI devices registered:\n");
+ bus_for_each_dev(&spi_bus_type, NULL, NULL, spi_device_found);
+}
+
+static int p_device_found(struct device *dev, void *data)
+{
+ struct platform_device
+ *pdev = container_of(dev, struct platform_device, dev);
+
+ if (strstr(pdev->name, "fb"))
+ pr_info(DRVNAME": %s id=%d pdata? %s\n",
+ pdev->name, pdev->id,
+ pdev->dev.platform_data ? "yes" : "no");
+
+ return 0;
+}
+
+static void pr_p_devices(void)
+{
+ pr_info(DRVNAME": 'fb' Platform devices registered:\n");
+ bus_for_each_dev(&platform_bus_type, NULL, NULL, p_device_found);
+}
+
+#ifdef MODULE
+static void fbtft_device_spi_delete(struct spi_master *master, unsigned cs)
+{
+ struct device *dev;
+ char str[32];
+
+ snprintf(str, sizeof(str), "%s.%u", dev_name(&master->dev), cs);
+
+ dev = bus_find_device_by_name(&spi_bus_type, NULL, str);
+ if (dev) {
+ if (verbose)
+ pr_info(DRVNAME": Deleting %s\n", str);
+ device_del(dev);
+ }
+}
+
+static int fbtft_device_spi_device_register(struct spi_board_info *spi)
+{
+ struct spi_master *master;
+
+ master = spi_busnum_to_master(spi->bus_num);
+ if (!master) {
+ pr_err(DRVNAME ": spi_busnum_to_master(%d) returned NULL\n",
+ spi->bus_num);
+ return -EINVAL;
+ }
+ /* make sure it's available */
+ fbtft_device_spi_delete(master, spi->chip_select);
+ spi_device = spi_new_device(master, spi);
+ put_device(&master->dev);
+ if (!spi_device) {
+ pr_err(DRVNAME ": spi_new_device() returned NULL\n");
+ return -EPERM;
+ }
+ return 0;
+}
+#else
+static int fbtft_device_spi_device_register(struct spi_board_info *spi)
+{
+ return spi_register_board_info(spi, 1);
+}
+#endif
+
+static int __init fbtft_device_init(void)
+{
+ struct spi_board_info *spi = NULL;
+ struct fbtft_platform_data *pdata;
+ const struct fbtft_gpio *gpio = NULL;
+ char *p_gpio, *p_name, *p_num;
+ bool found = false;
+ int i = 0;
+ long val;
+ int ret = 0;
+
+ pr_debug("\n\n"DRVNAME": init\n");
+
+ if (name == NULL) {
+#ifdef MODULE
+ pr_err(DRVNAME": missing module parameter: 'name'\n");
+ return -EINVAL;
+#else
+ return 0;
+#endif
+ }
+
+ if (init_num > FBTFT_MAX_INIT_SEQUENCE) {
+ pr_err(DRVNAME \
+ ": init parameter: exceeded max array size: %d\n",
+ FBTFT_MAX_INIT_SEQUENCE);
+ return -EINVAL;
+ }
+
+ /* parse module parameter: gpios */
+ while ((p_gpio = strsep(&gpios, ","))) {
+ if (strchr(p_gpio, ':') == NULL) {
+ pr_err(DRVNAME \
+ ": error: missing ':' in gpios parameter: %s\n",
+ p_gpio);
+ return -EINVAL;
+ }
+ p_num = p_gpio;
+ p_name = strsep(&p_num, ":");
+ if (p_name == NULL || p_num == NULL) {
+ pr_err(DRVNAME \
+ ": something bad happened parsing gpios parameter: %s\n",
+ p_gpio);
+ return -EINVAL;
+ }
+ ret = kstrtol(p_num, 10, &val);
+ if (ret) {
+ pr_err(DRVNAME \
+ ": could not parse number in gpios parameter: %s:%s\n",
+ p_name, p_num);
+ return -EINVAL;
+ }
+ strcpy(fbtft_device_param_gpios[i].name, p_name);
+ fbtft_device_param_gpios[i++].gpio = (int) val;
+ if (i == MAX_GPIOS) {
+ pr_err(DRVNAME \
+ ": gpios parameter: exceeded max array size: %d\n",
+ MAX_GPIOS);
+ return -EINVAL;
+ }
+ }
+ if (fbtft_device_param_gpios[0].name[0])
+ gpio = fbtft_device_param_gpios;
+
+ if (verbose > 2)
+ pr_spi_devices(); /* print list of registered SPI devices */
+
+ if (verbose > 2)
+ pr_p_devices(); /* print list of 'fb' platform devices */
+
+ pr_debug(DRVNAME": name='%s', busnum=%d, cs=%d\n", name, busnum, cs);
+
+ if (rotate > 0 && rotate < 4) {
+ rotate = (4 - rotate) * 90;
+ pr_warn("argument 'rotate' should be an angle. Values 1-3 is deprecated. Setting it to %d.\n",
+ rotate);
+ }
+ if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270) {
+ pr_warn("argument 'rotate' illegal value: %d. Setting it to 0.\n",
+ rotate);
+ rotate = 0;
+ }
+
+ /* name=list lists all supported displays */
+ if (strncmp(name, "list", 32) == 0) {
+ pr_info(DRVNAME": Supported displays:\n");
+
+ for (i = 0; i < ARRAY_SIZE(displays); i++)
+ pr_info(DRVNAME": %s\n", displays[i].name);
+ return -ECANCELED;
+ }
+
+ if (custom) {
+ i = ARRAY_SIZE(displays) - 1;
+ displays[i].name = name;
+ if (speed == 0) {
+ displays[i].pdev->name = name;
+ displays[i].spi = NULL;
+ } else {
+ strncpy(displays[i].spi->modalias, name, SPI_NAME_SIZE);
+ displays[i].pdev = NULL;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(displays); i++) {
+ if (strncmp(name, displays[i].name, 32) == 0) {
+ if (displays[i].spi) {
+ spi = displays[i].spi;
+ spi->chip_select = cs;
+ spi->bus_num = busnum;
+ if (speed)
+ spi->max_speed_hz = speed;
+ if (mode != -1)
+ spi->mode = mode;
+ pdata = (void *)spi->platform_data;
+ } else if (displays[i].pdev) {
+ p_device = displays[i].pdev;
+ pdata = p_device->dev.platform_data;
+ } else {
+ pr_err(DRVNAME": broken displays array\n");
+ return -EINVAL;
+ }
+
+ pdata->rotate = rotate;
+ if (bgr == 0)
+ pdata->bgr = false;
+ else if (bgr == 1)
+ pdata->bgr = true;
+ if (startbyte)
+ pdata->startbyte = startbyte;
+ if (gamma)
+ pdata->gamma = gamma;
+ pdata->display.debug = debug;
+ if (fps)
+ pdata->fps = fps;
+ if (txbuflen)
+ pdata->txbuflen = txbuflen;
+ if (init_num)
+ pdata->display.init_sequence = init;
+ if (gpio)
+ pdata->gpios = gpio;
+ if (custom) {
+ pdata->display.width = width;
+ pdata->display.height = height;
+ pdata->display.buswidth = buswidth;
+ pdata->display.backlight = 1;
+ }
+
+ if (displays[i].spi) {
+ ret = fbtft_device_spi_device_register(spi);
+ if (ret) {
+ pr_err(DRVNAME \
+ ": failed to register SPI device\n");
+ return ret;
+ }
+ found = true;
+ break;
+ } else {
+ ret = platform_device_register(p_device);
+ if (ret < 0) {
+ pr_err(DRVNAME \
+ ": platform_device_register() returned %d\n",
+ ret);
+ return ret;
+ }
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ pr_err(DRVNAME": display not supported: '%s'\n", name);
+ return -EINVAL;
+ }
+
+ if (verbose && pdata && pdata->gpios) {
+ gpio = pdata->gpios;
+ pr_info(DRVNAME": GPIOS used by '%s':\n", name);
+ found = false;
+ while (verbose && gpio->name[0]) {
+ pr_info(DRVNAME": '%s' = GPIO%d\n",
+ gpio->name, gpio->gpio);
+ gpio++;
+ found = true;
+ }
+ if (!found)
+ pr_info(DRVNAME": (none)\n");
+ }
+
+ if (spi_device && (verbose > 1))
+ pr_spi_devices();
+ if (p_device && (verbose > 1))
+ pr_p_devices();
+
+ return 0;
+}
+
+static void __exit fbtft_device_exit(void)
+{
+ pr_debug(DRVNAME" - exit\n");
+
+ if (spi_device) {
+ device_del(&spi_device->dev);
+ kfree(spi_device);
+ }
+
+ if (p_device)
+ platform_device_unregister(p_device);
+
+}
+
+arch_initcall(fbtft_device_init);
+module_exit(fbtft_device_exit);
+
+MODULE_DESCRIPTION("Add a FBTFT device.");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
new file mode 100644
index 000000000000..90832c36e557
--- /dev/null
+++ b/drivers/staging/fbtft/flexfb.c
@@ -0,0 +1,592 @@
+/*
+ * Generic FB driver for TFT LCD displays
+ *
+ * Copyright (C) 2013 Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "flexfb"
+
+
+static char *chip;
+module_param(chip, charp, 0);
+MODULE_PARM_DESC(chip, "LCD controller");
+
+static unsigned int width;
+module_param(width, uint, 0);
+MODULE_PARM_DESC(width, "Display width");
+
+static unsigned int height;
+module_param(height, uint, 0);
+MODULE_PARM_DESC(height, "Display height");
+
+static int init[512];
+static int init_num;
+module_param_array(init, int, &init_num, 0);
+MODULE_PARM_DESC(init, "Init sequence");
+
+static unsigned int setaddrwin;
+module_param(setaddrwin, uint, 0);
+MODULE_PARM_DESC(setaddrwin, "Which set_addr_win() implementation to use");
+
+static unsigned int buswidth = 8;
+module_param(buswidth, uint, 0);
+MODULE_PARM_DESC(buswidth, "Width of databus (default: 8)");
+
+static unsigned int regwidth = 8;
+module_param(regwidth, uint, 0);
+MODULE_PARM_DESC(regwidth, "Width of controller register (default: 8)");
+
+static bool nobacklight;
+module_param(nobacklight, bool, 0);
+MODULE_PARM_DESC(nobacklight, "Turn off backlight functionality.");
+
+static bool latched;
+module_param(latched, bool, 0);
+MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
+
+
+static int *initp;
+static int initp_num;
+
+/* default init sequences */
+static int st7735r_init[] = { \
+-1,0x01,-2,150,-1,0x11,-2,500,-1,0xB1,0x01,0x2C,0x2D,-1,0xB2,0x01,0x2C,0x2D,-1,0xB3,0x01,0x2C,0x2D,0x01,0x2C,0x2D, \
+-1,0xB4,0x07,-1,0xC0,0xA2,0x02,0x84,-1,0xC1,0xC5,-1,0xC2,0x0A,0x00,-1,0xC3,0x8A,0x2A,-1,0xC4,0x8A,0xEE,-1,0xC5,0x0E, \
+-1,0x20,-1,0x36,0xC0,-1,0x3A,0x05,-1,0xE0,0x0f,0x1a,0x0f,0x18,0x2f,0x28,0x20,0x22,0x1f,0x1b,0x23,0x37,0x00,0x07,0x02,0x10, \
+-1,0xE1,0x0f,0x1b,0x0f,0x17,0x33,0x2c,0x29,0x2e,0x30,0x30,0x39,0x3f,0x00,0x07,0x03,0x10,-1,0x29,-2,100,-1,0x13,-2,10,-3 };
+
+static int ssd1289_init[] = { \
+-1,0x00,0x0001,-1,0x03,0xA8A4,-1,0x0C,0x0000,-1,0x0D,0x080C,-1,0x0E,0x2B00,-1,0x1E,0x00B7,-1,0x01,0x2B3F,-1,0x02,0x0600, \
+-1,0x10,0x0000,-1,0x11,0x6070,-1,0x05,0x0000,-1,0x06,0x0000,-1,0x16,0xEF1C,-1,0x17,0x0003,-1,0x07,0x0233,-1,0x0B,0x0000, \
+-1,0x0F,0x0000,-1,0x41,0x0000,-1,0x42,0x0000,-1,0x48,0x0000,-1,0x49,0x013F,-1,0x4A,0x0000,-1,0x4B,0x0000,-1,0x44,0xEF00, \
+-1,0x45,0x0000,-1,0x46,0x013F,-1,0x30,0x0707,-1,0x31,0x0204,-1,0x32,0x0204,-1,0x33,0x0502,-1,0x34,0x0507,-1,0x35,0x0204, \
+-1,0x36,0x0204,-1,0x37,0x0502,-1,0x3A,0x0302,-1,0x3B,0x0302,-1,0x23,0x0000,-1,0x24,0x0000,-1,0x25,0x8000,-1,0x4f,0x0000, \
+-1,0x4e,0x0000,-1,0x22,-3 };
+
+static int hx8340bn_init[] = { \
+-1,0xC1,0xFF,0x83,0x40,-1,0x11,-2,150,-1,0xCA,0x70,0x00,0xD9,-1,0xB0,0x01,0x11, \
+-1,0xC9,0x90,0x49,0x10,0x28,0x28,0x10,0x00,0x06,-2,20,-1,0xC2,0x60,0x71,0x01,0x0E,0x05,0x02,0x09,0x31,0x0A, \
+-1,0xC3,0x67,0x30,0x61,0x17,0x48,0x07,0x05,0x33,-2,10,-1,0xB5,0x35,0x20,0x45,-1,0xB4,0x33,0x25,0x4C,-2,10, \
+-1,0x3A,0x05,-1,0x29,-2,10,-3 };
+
+static int ili9225_init[] = { \
+-1,0x0001,0x011C,-1,0x0002,0x0100,-1,0x0003,0x1030,-1,0x0008,0x0808,-1,0x000C,0x0000,-1,0x000F,0x0A01,-1,0x0020,0x0000, \
+-1,0x0021,0x0000,-2,50,-1,0x0010,0x0A00,-1,0x0011,0x1038,-2,50,-1,0x0012,0x1121,-1,0x0013,0x004E,-1,0x0014,0x676F, \
+-1,0x0030,0x0000,-1,0x0031,0x00DB,-1,0x0032,0x0000,-1,0x0033,0x0000,-1,0x0034,0x00DB,-1,0x0035,0x0000,-1,0x0036,0x00AF, \
+-1,0x0037,0x0000,-1,0x0038,0x00DB,-1,0x0039,0x0000,-1,0x0050,0x0000,-1,0x0051,0x060A,-1,0x0052,0x0D0A,-1,0x0053,0x0303, \
+-1,0x0054,0x0A0D,-1,0x0055,0x0A06,-1,0x0056,0x0000,-1,0x0057,0x0303,-1,0x0058,0x0000,-1,0x0059,0x0000,-2,50, \
+-1,0x0007,0x1017,-2,50,-3 };
+
+static int ili9320_init[] = { \
+-1,0x00E5,0x8000,-1,0x0000,0x0001,-1,0x0001,0x0100,-1,0x0002,0x0700,-1,0x0003,0x1030,-1,0x0004,0x0000,-1,0x0008,0x0202, \
+-1,0x0009,0x0000,-1,0x000A,0x0000,-1,0x000C,0x0000,-1,0x000D,0x0000,-1,0x000F,0x0000,-1,0x0010,0x0000,-1,0x0011,0x0007, \
+-1,0x0012,0x0000,-1,0x0013,0x0000,-2,200,-1,0x0010,0x17B0,-1,0x0011,0x0031,-2,50,-1,0x0012,0x0138,-2,50,-1,0x0013,0x1800, \
+-1,0x0029,0x0008,-2,50,-1,0x0020,0x0000,-1,0x0021,0x0000,-1,0x0030,0x0000,-1,0x0031,0x0505,-1,0x0032,0x0004, \
+-1,0x0035,0x0006,-1,0x0036,0x0707,-1,0x0037,0x0105,-1,0x0038,0x0002,-1,0x0039,0x0707,-1,0x003C,0x0704,-1,0x003D,0x0807, \
+-1,0x0050,0x0000,-1,0x0051,0x00EF,-1,0x0052,0x0000,-1,0x0053,0x013F,-1,0x0060,0x2700,-1,0x0061,0x0001,-1,0x006A,0x0000, \
+-1,0x0080,0x0000,-1,0x0081,0x0000,-1,0x0082,0x0000,-1,0x0083,0x0000,-1,0x0084,0x0000,-1,0x0085,0x0000,-1,0x0090,0x0010, \
+-1,0x0092,0x0000,-1,0x0093,0x0003,-1,0x0095,0x0110,-1,0x0097,0x0000,-1,0x0098,0x0000,-1,0x0007,0x0173,-3 };
+
+static int ili9325_init[] = { \
+-1,0x00E3,0x3008,-1,0x00E7,0x0012,-1,0x00EF,0x1231,-1,0x0001,0x0100,-1,0x0002,0x0700,-1,0x0003,0x1030,-1,0x0004,0x0000, \
+-1,0x0008,0x0207,-1,0x0009,0x0000,-1,0x000A,0x0000,-1,0x000C,0x0000,-1,0x000D,0x0000,-1,0x000F,0x0000,-1,0x0010,0x0000, \
+-1,0x0011,0x0007,-1,0x0012,0x0000,-1,0x0013,0x0000,-2,200,-1,0x0010,0x1690,-1,0x0011,0x0223,-2,50,-1,0x0012,0x000D,-2,50, \
+-1,0x0013,0x1200,-1,0x0029,0x000A,-1,0x002B,0x000C,-2,50,-1,0x0020,0x0000,-1,0x0021,0x0000,-1,0x0030,0x0000, \
+-1,0x0031,0x0506,-1,0x0032,0x0104,-1,0x0035,0x0207,-1,0x0036,0x000F,-1,0x0037,0x0306,-1,0x0038,0x0102,-1,0x0039,0x0707, \
+-1,0x003C,0x0702,-1,0x003D,0x1604,-1,0x0050,0x0000,-1,0x0051,0x00EF,-1,0x0052,0x0000,-1,0x0053,0x013F,-1,0x0060,0xA700, \
+-1,0x0061,0x0001,-1,0x006A,0x0000,-1,0x0080,0x0000,-1,0x0081,0x0000,-1,0x0082,0x0000,-1,0x0083,0x0000,-1,0x0084,0x0000, \
+-1,0x0085,0x0000,-1,0x0090,0x0010,-1,0x0092,0x0600,-1,0x0007,0x0133,-3 };
+
+static int ili9341_init[] = { \
+-1,0x28,-2,20,-1,0xCF,0x00,0x83,0x30,-1,0xED,0x64,0x03,0x12,0x81,-1,0xE8,0x85,0x01,0x79, \
+-1,0xCB,0x39,0x2c,0x00,0x34,0x02,-1,0xF7,0x20,-1,0xEA,0x00,0x00,-1,0xC0,0x26,-1,0xC1,0x11, \
+-1,0xC5,0x35,0x3E,-1,0xC7,0xBE,-1,0xB1,0x00,0x1B,-1,0xB6,0x0a,0x82,0x27,0x00,-1,0xB7,0x07, \
+-1,0x3A,0x55,-1,0x36,0x48,-1,0x11,-2,120,-1,0x29,-2,20,-3 };
+
+static int ssd1351_init[] = { -1,0xfd,0x12,-1,0xfd,0xb1,-1,0xae,-1,0xb3,0xf1,-1,0xca,0x7f,-1,0xa0,0x74, \
+ -1,0x15,0x00,0x7f,-1,0x75,0x00,0x7f,-1,0xa1,0x00,-1,0xa2,0x00,-1,0xb5,0x00, \
+ -1,0xab,0x01,-1,0xb1,0x32,-1,0xb4,0xa0,0xb5,0x55,-1,0xbb,0x17,-1,0xbe,0x05, \
+ -1,0xc1,0xc8,0x80,0xc8,-1,0xc7,0x0f,-1,0xb6,0x01,-1,0xa6,-1,0xaf,-3 };
+
+
+/* ili9320, ili9325 */
+static void flexfb_set_addr_win_1(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+ switch (par->info->var.rotate) {
+ /* R20h = Horizontal GRAM Start Address */
+ /* R21h = Vertical GRAM Start Address */
+ case 0:
+ write_reg(par, 0x0020, xs);
+ write_reg(par, 0x0021, ys);
+ break;
+ case 180:
+ write_reg(par, 0x0020, width - 1 - xs);
+ write_reg(par, 0x0021, height - 1 - ys);
+ break;
+ case 270:
+ write_reg(par, 0x0020, width - 1 - ys);
+ write_reg(par, 0x0021, xs);
+ break;
+ case 90:
+ write_reg(par, 0x0020, ys);
+ write_reg(par, 0x0021, height - 1 - xs);
+ break;
+ }
+ write_reg(par, 0x0022); /* Write Data to GRAM */
+}
+
+/* ssd1289 */
+static void flexfb_set_addr_win_2(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ switch (par->info->var.rotate) {
+ /* R4Eh - Set GDDRAM X address counter */
+ /* R4Fh - Set GDDRAM Y address counter */
+ case 0:
+ write_reg(par, 0x4e, xs);
+ write_reg(par, 0x4f, ys);
+ break;
+ case 180:
+ write_reg(par, 0x4e, par->info->var.xres - 1 - xs);
+ write_reg(par, 0x4f, par->info->var.yres - 1 - ys);
+ break;
+ case 270:
+ write_reg(par, 0x4e, par->info->var.yres - 1 - ys);
+ write_reg(par, 0x4f, xs);
+ break;
+ case 90:
+ write_reg(par, 0x4e, ys);
+ write_reg(par, 0x4f, par->info->var.xres - 1 - xs);
+ break;
+ }
+
+ /* R22h - RAM data write */
+ write_reg(par, 0x22, 0);
+}
+
+/* ssd1351 */
+static void set_addr_win_3(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
+
+ write_reg(par, 0x15, xs, xe);
+ write_reg(par, 0x75, ys, ye);
+ write_reg(par, 0x5C);
+}
+
+static int flexfb_verify_gpios_dc(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
+
+ if (par->gpio.dc < 0) {
+ dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int flexfb_verify_gpios_db(struct fbtft_par *par)
+{
+ int i;
+ int num_db = buswidth;
+
+ fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
+
+ if (par->gpio.dc < 0) {
+ dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+ if (par->gpio.wr < 0) {
+ dev_err(par->info->device, "Missing info about 'wr' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+ if (latched && (par->gpio.latch < 0)) {
+ dev_err(par->info->device, "Missing info about 'latch' gpio. Aborting.\n");
+ return -EINVAL;
+ }
+ if (latched)
+ num_db=buswidth/2;
+ for (i=0;i < num_db;i++) {
+ if (par->gpio.db[i] < 0) {
+ dev_err(par->info->device, "Missing info about 'db%02d' gpio. Aborting.\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static struct fbtft_display flex_display = { };
+
+static int flexfb_probe_common(struct spi_device *sdev, struct platform_device *pdev)
+{
+ struct device *dev;
+ struct fb_info *info;
+ struct fbtft_par *par;
+ int ret;
+
+ initp = init;
+ initp_num = init_num;
+
+ if (sdev)
+ dev = &sdev->dev;
+ else
+ dev = &pdev->dev;
+
+ fbtft_init_dbg(dev, "%s(%s)\n", __func__, sdev ? "'SPI device'" : "'Platform device'");
+
+ if (chip) {
+
+ if (!strcmp(chip, "st7735r")) {
+ if (!width)
+ width = 128;
+ if (!height)
+ height = 160;
+ if (init_num == 0) {
+ initp = st7735r_init;
+ initp_num = ARRAY_SIZE(st7735r_init);
+ }
+
+
+ } else if (!strcmp(chip, "hx8340bn")) {
+ if (!width)
+ width = 176;
+ if (!height)
+ height = 220;
+ setaddrwin = 0;
+ if (init_num == 0) {
+ initp = hx8340bn_init;
+ initp_num = ARRAY_SIZE(hx8340bn_init);
+ }
+
+
+ } else if (!strcmp(chip, "ili9225")) {
+ if (!width)
+ width = 176;
+ if (!height)
+ height = 220;
+ setaddrwin = 0;
+ regwidth = 16;
+ if (init_num == 0) {
+ initp = ili9225_init;
+ initp_num = ARRAY_SIZE(ili9225_init);
+ }
+
+
+
+ } else if (!strcmp(chip, "ili9320")) {
+ if (!width)
+ width = 240;
+ if (!height)
+ height = 320;
+ setaddrwin = 1;
+ regwidth = 16;
+ if (init_num == 0) {
+ initp = ili9320_init;
+ initp_num = ARRAY_SIZE(ili9320_init);
+ }
+
+
+ } else if (!strcmp(chip, "ili9325")) {
+ if (!width)
+ width = 240;
+ if (!height)
+ height = 320;
+ setaddrwin = 1;
+ regwidth = 16;
+ if (init_num == 0) {
+ initp = ili9325_init;
+ initp_num = ARRAY_SIZE(ili9325_init);
+ }
+
+ } else if (!strcmp(chip, "ili9341")) {
+ if (!width)
+ width = 240;
+ if (!height)
+ height = 320;
+ setaddrwin = 0;
+ regwidth = 8;
+ if (init_num == 0) {
+ initp = ili9341_init;
+ initp_num = ARRAY_SIZE(ili9341_init);
+ }
+
+
+ } else if (!strcmp(chip, "ssd1289")) {
+ if (!width)
+ width = 240;
+ if (!height)
+ height = 320;
+ setaddrwin = 2;
+ regwidth = 16;
+ if (init_num == 0) {
+ initp = ssd1289_init;
+ initp_num = ARRAY_SIZE(ssd1289_init);
+ }
+
+
+
+ } else if (!strcmp(chip, "ssd1351")) {
+ if (!width)
+ width = 128;
+ if (!height)
+ height = 128;
+ setaddrwin = 3;
+ if (init_num == 0) {
+ initp = ssd1351_init;
+ initp_num = ARRAY_SIZE(ssd1351_init);
+ }
+ } else {
+ dev_err(dev, "chip=%s is not supported\n", chip);
+ return -EINVAL;
+ }
+ }
+
+ if (width == 0 || height == 0) {
+ dev_err(dev, "argument(s) missing: width and height has to be set.\n");
+ return -EINVAL;
+ }
+ flex_display.width = width;
+ flex_display.height = height;
+ fbtft_init_dbg(dev, "Display resolution: %dx%d\n", width, height);
+ fbtft_init_dbg(dev, "chip = %s\n", chip ? chip : "not set");
+ fbtft_init_dbg(dev, "setaddrwin = %d\n", setaddrwin);
+ fbtft_init_dbg(dev, "regwidth = %d\n", regwidth);
+ fbtft_init_dbg(dev, "buswidth = %d\n", buswidth);
+
+ info = fbtft_framebuffer_alloc(&flex_display, dev);
+ if (!info)
+ return -ENOMEM;
+
+ par = info->par;
+ if (sdev)
+ par->spi = sdev;
+ else
+ par->pdev = pdev;
+ if (!par->init_sequence)
+ par->init_sequence = initp;
+ par->fbtftops.init_display = fbtft_init_display;
+
+ /* registerwrite functions */
+ switch (regwidth) {
+ case 8:
+ par->fbtftops.write_register = fbtft_write_reg8_bus8;
+ break;
+ case 16:
+ par->fbtftops.write_register = fbtft_write_reg16_bus8;
+ break;
+ default:
+ dev_err(dev, "argument 'regwidth': %d is not supported.\n", regwidth);
+ return -EINVAL;
+ }
+
+ /* bus functions */
+ if (sdev) {
+ par->fbtftops.write = fbtft_write_spi;
+ switch (buswidth) {
+ case 8:
+ par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
+ if (!par->startbyte)
+ par->fbtftops.verify_gpios = flexfb_verify_gpios_dc;
+ break;
+ case 9:
+ if (regwidth == 16) {
+ dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n", regwidth, buswidth);
+ return -EINVAL;
+ }
+ par->fbtftops.write_register = fbtft_write_reg8_bus9;
+ par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
+ sdev->bits_per_word=9;
+ ret = sdev->master->setup(sdev);
+ if (ret) {
+ dev_warn(dev,
+ "9-bit SPI not available, emulating using 8-bit.\n");
+ sdev->bits_per_word = 8;
+ ret = sdev->master->setup(sdev);
+ if (ret)
+ goto out_release;
+ /* allocate buffer with room for dc bits */
+ par->extra = devm_kzalloc(par->info->device,
+ par->txbuf.len + (par->txbuf.len / 8) + 8,
+ GFP_KERNEL);
+ if (!par->extra) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+ par->fbtftops.write = fbtft_write_spi_emulate_9;
+ }
+ break;
+ default:
+ dev_err(dev, "argument 'buswidth': %d is not supported with SPI.\n", buswidth);
+ return -EINVAL;
+ }
+ } else {
+ par->fbtftops.verify_gpios = flexfb_verify_gpios_db;
+ switch (buswidth) {
+ case 8:
+ par->fbtftops.write = fbtft_write_gpio8_wr;
+ par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
+ break;
+ case 16:
+ par->fbtftops.write_register = fbtft_write_reg16_bus16;
+ if (latched)
+ par->fbtftops.write = fbtft_write_gpio16_wr_latched;
+ else
+ par->fbtftops.write = fbtft_write_gpio16_wr;
+ par->fbtftops.write_vmem = fbtft_write_vmem16_bus16;
+ break;
+ default:
+ dev_err(dev, "argument 'buswidth': %d is not supported with parallel.\n", buswidth);
+ return -EINVAL;
+ }
+ }
+
+ /* set_addr_win function */
+ switch (setaddrwin) {
+ case 0:
+ /* use default */
+ break;
+ case 1:
+ par->fbtftops.set_addr_win = flexfb_set_addr_win_1;
+ break;
+ case 2:
+ par->fbtftops.set_addr_win = flexfb_set_addr_win_2;
+ break;
+ case 3:
+ par->fbtftops.set_addr_win = set_addr_win_3;
+ break;
+ default:
+ dev_err(dev, "argument 'setaddrwin': unknown value %d.\n", setaddrwin);
+ return -EINVAL;
+ }
+
+ if (!nobacklight)
+ par->fbtftops.register_backlight = fbtft_register_backlight;
+
+ ret = fbtft_register_framebuffer(info);
+ if (ret < 0)
+ goto out_release;
+
+ return 0;
+
+out_release:
+ fbtft_framebuffer_release(info);
+
+ return ret;
+}
+
+static int flexfb_remove_common(struct device *dev, struct fb_info *info)
+{
+ struct fbtft_par *par;
+
+ if (!info)
+ return -EINVAL;
+ par = info->par;
+ if (par)
+ fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par,
+ "%s()\n", __func__);
+ fbtft_unregister_framebuffer(info);
+ fbtft_framebuffer_release(info);
+
+ return 0;
+}
+
+static int flexfb_probe_spi(struct spi_device *spi)
+{
+ return flexfb_probe_common(spi, NULL);
+}
+
+static int flexfb_remove_spi(struct spi_device *spi)
+{
+ struct fb_info *info = spi_get_drvdata(spi);
+
+ return flexfb_remove_common(&spi->dev, info);
+}
+
+static int flexfb_probe_pdev(struct platform_device *pdev)
+{
+ return flexfb_probe_common(NULL, pdev);
+}
+
+static int flexfb_remove_pdev(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+
+ return flexfb_remove_common(&pdev->dev, info);
+}
+
+static struct spi_driver flexfb_spi_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = flexfb_probe_spi,
+ .remove = flexfb_remove_spi,
+};
+
+static const struct platform_device_id flexfb_platform_ids[] = {
+ { "flexpfb", 0 },
+ { },
+};
+
+static struct platform_driver flexfb_platform_driver = {
+ .driver = {
+ .name = DRVNAME,
+ },
+ .id_table = flexfb_platform_ids,
+ .probe = flexfb_probe_pdev,
+ .remove = flexfb_remove_pdev,
+};
+
+static int __init flexfb_init(void)
+{
+ int ret, ret2;
+
+ ret = spi_register_driver(&flexfb_spi_driver);
+ ret2 = platform_driver_register(&flexfb_platform_driver);
+ if (ret < 0)
+ return ret;
+ return ret2;
+}
+
+static void __exit flexfb_exit(void)
+{
+ spi_unregister_driver(&flexfb_spi_driver);
+ platform_driver_unregister(&flexfb_platform_driver);
+}
+
+/* ------------------------------------------------------------------------- */
+
+module_init(flexfb_init);
+module_exit(flexfb_exit);
+
+MODULE_DESCRIPTION("Generic FB driver for TFT LCD displays");
+MODULE_AUTHOR("Noralf Tronnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index d5475b7270a8..017c3b92f51b 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -172,11 +172,11 @@ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
/* check if we want to read upper or lower 32-bit word */
- if (Index) {
+ if (Index)
data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAL);
- } else {
+ else
data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAH);
- }
+
spin_unlock_irqrestore(&info->dpram_lock, flags);
return data;
@@ -204,11 +204,11 @@ static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
/* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- if (Index) {
+ if (Index)
ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAL, value);
- } else {
+ else
ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAH, value);
- }
+
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
@@ -440,9 +440,8 @@ static int ft1000_reset_card(struct net_device *dev)
tempword =
ft1000_read_dpram_mag_16(dev, FT1000_MAG_DPRAM_FEFE,
FT1000_MAG_DPRAM_FEFE_INDX);
- if (tempword == 0xfefe) {
+ if (tempword == 0xfefe)
break;
- }
mdelay(20);
}
@@ -570,12 +569,12 @@ static void ft1000_hbchk(u_long data)
pr_debug("hi_ho value = 0x%x\n", tempword);
/* Let's perform another check if ho is not detected */
if (tempword != ho) {
- if (info->AsicID == ELECTRABUZZ_ID) {
+ if (info->AsicID == ELECTRABUZZ_ID)
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- }
- else {
- tempword = ntohs(ft1000_read_dpram_mag_16(dev, FT1000_MAG_HI_HO, FT1000_MAG_HI_HO_INDX));
- }
+ else
+ tempword = ntohs(ft1000_read_dpram_mag_16(dev,
+ FT1000_MAG_HI_HO,
+ FT1000_MAG_HI_HO_INDX));
}
if (tempword != ho) {
pr_info("heartbeat failed - no ho detected\n");
@@ -621,9 +620,9 @@ static void ft1000_hbchk(u_long data)
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
/* Let's check doorbell again if fail */
- if (tempword & FT1000_DB_HB) {
+ if (tempword & FT1000_DB_HB)
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- }
+
if (tempword & FT1000_DB_HB) {
pr_info("heartbeat doorbell not clear by firmware\n");
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -686,19 +685,15 @@ static void ft1000_hbchk(u_long data)
}
/* Let's write hi again if fail */
if (tempword != hi) {
- if (info->AsicID == ELECTRABUZZ_ID) {
+ if (info->AsicID == ELECTRABUZZ_ID)
ft1000_write_dpram(dev, FT1000_HI_HO, hi);
- }
- else {
+ else
ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, hi_mag, FT1000_MAG_HI_HO_INDX);
- }
- if (info->AsicID == ELECTRABUZZ_ID) {
+ if (info->AsicID == ELECTRABUZZ_ID)
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- }
- else {
+ else
tempword = ntohs(ft1000_read_dpram_mag_16(dev, FT1000_MAG_HI_HO, FT1000_MAG_HI_HO_INDX));
- }
}
@@ -770,9 +765,8 @@ static void ft1000_send_cmd(struct net_device *dev, u16 *ptempbuffer, int size,
size += sizeof(struct pseudo_hdr);
/* check for odd byte and increment to 16-bit word align value */
- if ((size & 0x0001)) {
+ if ((size & 0x0001))
size++;
- }
pr_debug("total length = %d\n", size);
pr_debug("length = %d\n", ntohs(*ptempbuffer));
/*
@@ -915,9 +909,8 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
* Calculate pseudo header checksum
*/
tempword = *ppseudohdr++;
- for (i = 1; i < 7; i++) {
+ for (i = 1; i < 7; i++)
tempword ^= *ppseudohdr++;
- }
if ((tempword != *ppseudohdr)) {
pr_debug("Pseudo header checksum mismatch\n");
/* Drop this message */
@@ -957,12 +950,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
u16 wrd;
} convert;
- if (info->AsicID == ELECTRABUZZ_ID) {
+ if (info->AsicID == ELECTRABUZZ_ID)
tempword = FT1000_DPRAM_RX_BASE+2;
- }
- else {
+ else
tempword = FT1000_DPRAM_MAG_RX_BASE;
- }
+
if (ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword)) {
/* Get the message type which is total_len + PSEUDO header + msgtype + message body */
@@ -982,9 +974,8 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
while (tempword & FT1000_DB_DPRAM_TX) {
mdelay(5);
i++;
- if (i == 10) {
+ if (i == 10)
break;
- }
}
ptr =
list_entry(info->prov_list.next,
@@ -1039,8 +1030,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
info->ConTm = 0;
}
}
- }
- else {
+ } else {
pr_debug("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
@@ -1105,9 +1095,8 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
mdelay(10);
tempword =
ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
+ if (tempword & FT1000_DB_DPRAM_TX)
mdelay(10);
- }
}
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
@@ -1134,9 +1123,9 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
ppseudo_hdr->portsrc = 0;
/* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++) {
+ for (i = 1; i < 7; i++)
ppseudo_hdr->checksum ^= *pmsg++;
- }
+
info->DSPInfoBlk[8] = 0x7200;
info->DSPInfoBlk[9] =
htons(info->DSPInfoBlklen);
@@ -1156,9 +1145,8 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
mdelay(10);
tempword =
ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
+ if (tempword & FT1000_DB_DPRAM_TX)
mdelay(10);
- }
}
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
@@ -1184,9 +1172,9 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
ppseudo_hdr->portsrc = 0;
/* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++) {
+ for (i = 1; i < 7; i++)
ppseudo_hdr->checksum ^= *pmsg++;
- }
+
pmsg = (u16 *)&tempbuffer[16];
*pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
*pmsg++ = htons(0x000e);
@@ -1508,9 +1496,8 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
tempword = inw(dev->base_addr + FT1000_REG_MAG_DFSR);
pr_debug("FT1000_REG_MAG_DFSR = 0x%x\n", tempword);
}
- if (DrvErrNum) {
+ if (DrvErrNum)
pcmcia->PktIntfErr++;
- }
}
}
@@ -1567,9 +1554,9 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
if (skb == NULL) {
pr_debug("No Network buffers available\n");
/* Read High word to complete 32 bit access */
- if (info->AsicID == MAGNEMITE_ID) {
+ if (info->AsicID == MAGNEMITE_ID)
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- }
+
ft1000_flush_fifo(dev, 0);
info->stats.rx_errors++;
return FAILURE;
@@ -1673,9 +1660,8 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
}
pr_debug("Data passed to Protocol layer:\n");
- for (i = 0; i < len + 12; i++) {
+ for (i = 0; i < len + 12; i++)
pr_debug("Protocol Data: 0x%x\n", *ptemp++);
- }
skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
@@ -1729,21 +1715,16 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 *packet, u16 len)
/* Check if there is room on the FIFO */
if (len > ft1000_read_fifo_len(dev)) {
udelay(10);
- if (len > ft1000_read_fifo_len(dev)) {
+ if (len > ft1000_read_fifo_len(dev))
udelay(20);
- }
- if (len > ft1000_read_fifo_len(dev)) {
+ if (len > ft1000_read_fifo_len(dev))
udelay(20);
- }
- if (len > ft1000_read_fifo_len(dev)) {
+ if (len > ft1000_read_fifo_len(dev))
udelay(20);
- }
- if (len > ft1000_read_fifo_len(dev)) {
+ if (len > ft1000_read_fifo_len(dev))
udelay(20);
- }
- if (len > ft1000_read_fifo_len(dev)) {
+ if (len > ft1000_read_fifo_len(dev))
udelay(20);
- }
if (len > ft1000_read_fifo_len(dev)) {
pr_debug("Transmit FIFO is full - pkt drop\n");
info->stats.tx_errors++;
@@ -1751,11 +1732,11 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 *packet, u16 len)
}
}
/* Create pseudo header and send pseudo/ip to hardware */
- if (info->AsicID == ELECTRABUZZ_ID) {
+ if (info->AsicID == ELECTRABUZZ_ID)
pseudo.blk.length = len;
- } else {
+ else
pseudo.blk.length = ntohs(len);
- }
+
pseudo.blk.source = DSPID; /* Need to swap to get in correct order */
pseudo.blk.destination = HOSTID;
pseudo.blk.portdest = NETWORKID; /* Need to swap to get in correct order */
@@ -1768,9 +1749,8 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 *packet, u16 len)
pseudo.blk.qos_class = 0;
/* Calculate pseudo header checksum */
pseudo.blk.checksum = pseudo.buff[0];
- for (i = 1; i < 7; i++) {
+ for (i = 1; i < 7; i++)
pseudo.blk.checksum ^= pseudo.buff[i];
- }
/* Production Mode */
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -1835,9 +1815,8 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 *packet, u16 len)
plong = (u32 *)packet;
/* Write PPP type + IP Packet into Downlink FIFO */
- for (i = 0; i < (len >> 2); i++) {
+ for (i = 0; i < (len >> 2); i++)
outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
- }
/* Check for odd alignment */
if (len & 0x0003) {
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index d12cfc9aa32a..f0ac43838461 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -332,15 +332,15 @@ int card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
pr_debug("enter card_send_command... size=%d\n", size);
+ ret = ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
+ if (ret)
+ return ret;
+
commandbuf = kmalloc(size + 2, GFP_KERNEL);
if (!commandbuf)
return -ENOMEM;
memcpy((void *)commandbuf + 2, (void *)ptempbuffer, size);
- ret = ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
- if (ret)
- return ret;
-
if (temp & 0x0100)
usleep_range(900, 1100);
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 73eede163820..7c4a77bb94aa 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -281,7 +281,8 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
icmp6_out.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
icmp6_out.icmp6_code = 0;
icmp6_out.icmp6_cksum = 0;
- icmp6_out.icmp6_dataun.un_data32[0] = htonl(0x60000000); /* R=0, S=1, O=1 */
+ /* R=0, S=1, O=1 */
+ icmp6_out.icmp6_dataun.un_data32[0] = htonl(0x60000000);
ns = (struct neighbour_solicitation *)
(skb_in->data + mac_header_len +
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index b5b063a738f8..d1ab996b3305 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -220,7 +220,7 @@ static int up_to_host(struct mux_rx *r)
static void do_rx(struct work_struct *work)
{
struct mux_dev *mux_dev =
- container_of(work, struct mux_dev , work_rx.work);
+ container_of(work, struct mux_dev, work_rx.work);
struct mux_rx *r;
struct rx_cxt *rx = (struct rx_cxt *)&mux_dev->rx;
unsigned long flags;
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
index b260e45c6698..819db53da64d 100644
--- a/drivers/staging/gs_fpgaboot/io.c
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -79,15 +79,6 @@ void xl_shift_bytes_out(enum wbus bus_byte, unsigned char *pdata)
/*
* generic bit swap for xilinx SYSTEMMAP FPGA programming
*/
-static inline unsigned char bitswap(unsigned char s)
-{
- unsigned char d;
-
- d = (((s&0x80)>>7) | ((s&0x40)>>5) | ((s&0x20)>>3) | ((s&0x10)>>1) |
- ((s&0x08)<<1) | ((s&0x04)<<3) | ((s&0x02)<<5) | ((s&0x01)<<7));
- return d;
-}
-
void xl_program_b(int32_t i)
{
}
diff --git a/drivers/message/i2o/Kconfig b/drivers/staging/i2o/Kconfig
index 5afa0e393ecf..286c53f4b13d 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/staging/i2o/Kconfig
@@ -1,4 +1,3 @@
-
menuconfig I2O
tristate "I2O device support"
depends on PCI
diff --git a/drivers/message/i2o/Makefile b/drivers/staging/i2o/Makefile
index b0982dacfd0a..b0982dacfd0a 100644
--- a/drivers/message/i2o/Makefile
+++ b/drivers/staging/i2o/Makefile
diff --git a/drivers/message/i2o/README b/drivers/staging/i2o/README
index f072a8eb3041..f072a8eb3041 100644
--- a/drivers/message/i2o/README
+++ b/drivers/staging/i2o/README
diff --git a/drivers/message/i2o/README.ioctl b/drivers/staging/i2o/README.ioctl
index 4a7d2ebdfc97..4a7d2ebdfc97 100644
--- a/drivers/message/i2o/README.ioctl
+++ b/drivers/staging/i2o/README.ioctl
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/staging/i2o/bus-osm.c
index c463dc2efc09..7aa0339aea05 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/staging/i2o/bus-osm.c
@@ -14,7 +14,7 @@
*/
#include <linux/module.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#define OSM_NAME "bus-osm"
#define OSM_VERSION "1.317"
diff --git a/drivers/message/i2o/config-osm.c b/drivers/staging/i2o/config-osm.c
index 3bba7aa82e58..519f52f9f688 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/staging/i2o/config-osm.c
@@ -14,7 +14,7 @@
*/
#include <linux/module.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/fs.h>
diff --git a/drivers/message/i2o/core.h b/drivers/staging/i2o/core.h
index 91614f11f89a..91614f11f89a 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/staging/i2o/core.h
diff --git a/drivers/message/i2o/debug.c b/drivers/staging/i2o/debug.c
index ce62d8bfe1c8..7a16114ed8ea 100644
--- a/drivers/message/i2o/debug.c
+++ b/drivers/staging/i2o/debug.c
@@ -1,7 +1,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/i2o.h>
+#include "i2o.h"
static void i2o_report_util_cmd(u8 cmd);
static void i2o_report_exec_cmd(u8 cmd);
diff --git a/drivers/message/i2o/device.c b/drivers/staging/i2o/device.c
index 98348f420b52..2af22553dd4e 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/staging/i2o/device.c
@@ -14,7 +14,7 @@
*/
#include <linux/module.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/message/i2o/driver.c b/drivers/staging/i2o/driver.c
index 1b18a0d1d05b..111c3edde035 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/staging/i2o/driver.c
@@ -16,7 +16,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/rwsem.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/workqueue.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/staging/i2o/exec-osm.c
index a3970e56ae53..16d857d5e655 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/staging/i2o/exec-osm.c
@@ -28,7 +28,7 @@
*/
#include <linux/module.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/string.h>
diff --git a/include/linux/i2o.h b/drivers/staging/i2o/i2o.h
index d23c3c20b201..d23c3c20b201 100644
--- a/include/linux/i2o.h
+++ b/drivers/staging/i2o/i2o.h
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/staging/i2o/i2o_block.c
index 6fc3866965df..0a13c64ce000 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/staging/i2o/i2o_block.c
@@ -52,7 +52,7 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/mutex.h>
#include <linux/mempool.h>
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/staging/i2o/i2o_block.h
index cf8873cbca3f..cf8873cbca3f 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/staging/i2o/i2o_block.h
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/staging/i2o/i2o_config.c
index 04bd3b6de401..04bd3b6de401 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/staging/i2o/i2o_config.c
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
index b7d87cd227a9..ad84f3304f3c 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/staging/i2o/i2o_proc.c
@@ -39,7 +39,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/staging/i2o/i2o_scsi.c
index 8152e9fa9d95..1b11dcb3faea 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/staging/i2o/i2o_scsi.c
@@ -53,7 +53,7 @@
#include <linux/prefetch.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/scatterlist.h>
#include <asm/dma.h>
diff --git a/drivers/message/i2o/iop.c b/drivers/staging/i2o/iop.c
index 92752fb5b2d3..52334fc8b547 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/staging/i2o/iop.c
@@ -26,7 +26,7 @@
*/
#include <linux/module.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/message/i2o/memory.c b/drivers/staging/i2o/memory.c
index 292b41e49fbd..8f9509d275a4 100644
--- a/drivers/message/i2o/memory.c
+++ b/drivers/staging/i2o/memory.c
@@ -11,7 +11,7 @@
*/
#include <linux/module.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/message/i2o/pci.c b/drivers/staging/i2o/pci.c
index 0f9f3e1a2b6b..b3b8a61dd4a6 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/staging/i2o/pci.c
@@ -30,7 +30,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-#include <linux/i2o.h>
+#include "i2o.h"
#include <linux/module.h>
#include "core.h"
diff --git a/drivers/staging/iio/Documentation/iio_event_monitor.c b/drivers/staging/iio/Documentation/iio_event_monitor.c
index 940ed2399e73..72c96aa6e992 100644
--- a/drivers/staging/iio/Documentation/iio_event_monitor.c
+++ b/drivers/staging/iio/Documentation/iio_event_monitor.c
@@ -49,6 +49,8 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_CCT] = "cct",
[IIO_PRESSURE] = "pressure",
[IIO_HUMIDITYRELATIVE] = "humidityrelative",
+ [IIO_ACTIVITY] = "activity",
+ [IIO_STEPS] = "steps",
};
static const char * const iio_ev_type_text[] = {
@@ -57,6 +59,7 @@ static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_ROC] = "roc",
[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
+ [IIO_EV_TYPE_CHANGE] = "change",
};
static const char * const iio_ev_dir_text[] = {
@@ -92,6 +95,10 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_NORTH_TRUE] = "from_north_true",
[IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
[IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
+ [IIO_MOD_RUNNING] = "running",
+ [IIO_MOD_JOGGING] = "jogging",
+ [IIO_MOD_WALKING] = "walking",
+ [IIO_MOD_STILL] = "still",
};
static bool event_is_known(struct iio_event_data *event)
@@ -121,6 +128,8 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_CCT:
case IIO_PRESSURE:
case IIO_HUMIDITYRELATIVE:
+ case IIO_ACTIVITY:
+ case IIO_STEPS:
break;
default:
return false;
@@ -154,6 +163,10 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_NORTH_TRUE:
case IIO_MOD_NORTH_MAGN_TILT_COMP:
case IIO_MOD_NORTH_TRUE_TILT_COMP:
+ case IIO_MOD_RUNNING:
+ case IIO_MOD_JOGGING:
+ case IIO_MOD_WALKING:
+ case IIO_MOD_STILL:
break;
default:
return false;
@@ -165,6 +178,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_EV_TYPE_ROC:
case IIO_EV_TYPE_THRESH_ADAPTIVE:
case IIO_EV_TYPE_MAG_ADAPTIVE:
+ case IIO_EV_TYPE_CHANGE:
break;
default:
return false;
@@ -174,6 +188,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_EV_DIR_EITHER:
case IIO_EV_DIR_RISING:
case IIO_EV_DIR_FALLING:
+ case IIO_EV_DIR_NONE:
break;
default:
return false;
@@ -214,9 +229,11 @@ static void print_event(struct iio_event_data *event)
else if (chan >= 0)
printf("channel: %d, ", chan);
- printf("evtype: %s, direction: %s\n",
- iio_ev_type_text[ev_type],
- iio_ev_dir_text[dir]);
+ printf("evtype: %s", iio_ev_type_text[ev_type]);
+
+ if (dir != IIO_EV_DIR_NONE)
+ printf(", direction: %s", iio_ev_dir_text[dir]);
+ printf("\n");
}
int main(int argc, char **argv)
diff --git a/drivers/staging/iio/Documentation/ring.txt b/drivers/staging/iio/Documentation/ring.txt
index e1da43381d0e..18718fcaf259 100644
--- a/drivers/staging/iio/Documentation/ring.txt
+++ b/drivers/staging/iio/Documentation/ring.txt
@@ -39,9 +39,9 @@ request_update
If parameters have changed that require reinitialization or configuration of
the buffer this will trigger it.
-get_bytes_per_datum, set_bytes_per_datum
- Get/set the number of bytes for a complete scan. (All samples + timestamp)
+set_bytes_per_datum
+ Set the number of bytes for a complete scan. (All samples + timestamp)
-get_length / set_length
- Get/set the number of complete scans that may be held by the buffer.
+set_length
+ Set the number of complete scans that may be held by the buffer.
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index f5e145caffa9..b78c9c5d5588 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -716,14 +716,6 @@ static int lis3l02dq_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = iio_buffer_register(indio_dev,
- lis3l02dq_channels,
- ARRAY_SIZE(lis3l02dq_channels));
- if (ret) {
- dev_err(&spi->dev, "failed to initialize the buffer\n");
- goto error_unreg_buffer_funcs;
- }
-
if (spi->irq) {
ret = request_threaded_irq(st->us->irq,
&lis3l02dq_th,
@@ -732,7 +724,7 @@ static int lis3l02dq_probe(struct spi_device *spi)
"lis3l02dq",
indio_dev);
if (ret)
- goto error_uninitialize_buffer;
+ goto error_unreg_buffer_funcs;
ret = lis3l02dq_probe_trigger(indio_dev);
if (ret)
@@ -756,8 +748,6 @@ error_remove_trigger:
error_free_interrupt:
if (spi->irq)
free_irq(st->us->irq, indio_dev);
-error_uninitialize_buffer:
- iio_buffer_unregister(indio_dev);
error_unreg_buffer_funcs:
lis3l02dq_unconfigure_buffer(indio_dev);
return ret;
@@ -804,7 +794,6 @@ static int lis3l02dq_remove(struct spi_device *spi)
free_irq(st->us->irq, indio_dev);
lis3l02dq_remove_trigger(indio_dev);
- iio_buffer_unregister(indio_dev);
lis3l02dq_unconfigure_buffer(indio_dev);
return 0;
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 9efc77b0ebdd..1fd90090a633 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -393,7 +393,7 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
int ret;
struct iio_buffer *buffer;
- buffer = iio_kfifo_allocate(indio_dev);
+ buffer = iio_kfifo_allocate();
if (!buffer)
return -ENOMEM;
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index e4e56391487a..31fb2182c198 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -223,33 +223,6 @@ error_ret:
return ret;
}
-#ifdef SCA3000_DEBUG
-/**
- * sca3000_check_status() check the status register
- *
- * Only used for debugging purposes
- **/
-static int sca3000_check_status(struct device *dev)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_STATUS, 1);
- if (ret < 0)
- goto error_ret;
- if (st->rx[0] & SCA3000_EEPROM_CS_ERROR)
- dev_err(dev, "eeprom error\n");
- if (st->rx[0] & SCA3000_SPI_FRAME_ERROR)
- dev_err(dev, "Previous SPI Frame was corrupt\n");
-
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-#endif /* SCA3000_DEBUG */
-
/**
* sca3000_show_rev() - sysfs interface to read the chip revision number
**/
@@ -459,6 +432,8 @@ static const struct iio_chan_spec sca3000_channels_with_temp[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
+ /* No buffer support */
+ .scan_index = -1,
},
};
@@ -1154,17 +1129,6 @@ static int sca3000_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ret = iio_buffer_register(indio_dev,
- sca3000_channels,
- ARRAY_SIZE(sca3000_channels));
- if (ret < 0)
- goto error_unregister_dev;
- if (indio_dev->buffer) {
- iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
- iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
- iio_scan_mask_set(indio_dev, indio_dev->buffer, 2);
- }
-
if (spi->irq) {
ret = request_threaded_irq(spi->irq,
NULL,
@@ -1173,7 +1137,7 @@ static int sca3000_probe(struct spi_device *spi)
"sca3000",
indio_dev);
if (ret)
- goto error_unregister_ring;
+ goto error_unregister_dev;
}
sca3000_register_ring_funcs(indio_dev);
ret = sca3000_clean_setup(st);
@@ -1184,8 +1148,6 @@ static int sca3000_probe(struct spi_device *spi)
error_free_irq:
if (spi->irq)
free_irq(spi->irq, indio_dev);
-error_unregister_ring:
- iio_buffer_unregister(indio_dev);
error_unregister_dev:
iio_device_unregister(indio_dev);
return ret;
@@ -1219,7 +1181,6 @@ static int sca3000_remove(struct spi_device *spi)
if (spi->irq)
free_irq(spi->irq, indio_dev);
iio_device_unregister(indio_dev);
- iio_buffer_unregister(indio_dev);
sca3000_unconfigure_ring(indio_dev);
return 0;
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 157827651bfa..f76a26885808 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -129,26 +129,11 @@ error_ret:
return ret ? ret : num_read;
}
-/* This is only valid with all 3 elements enabled */
-static int sca3000_ring_get_length(struct iio_buffer *r)
-{
- return 64;
-}
-
-/* only valid if resolution is kept at 11bits */
-static int sca3000_ring_get_bytes_per_datum(struct iio_buffer *r)
-{
- return 6;
-}
-
static bool sca3000_ring_buf_data_available(struct iio_buffer *r)
{
return r->stufftoread;
}
-static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_LENGTH_ATTR;
-
/**
* sca3000_query_ring_int() is the hardware ring status interrupt enabled
**/
@@ -238,20 +223,13 @@ static IIO_DEVICE_ATTR(in_accel_scale,
* only apply to the ring buffer. At all times full rate and accuracy
* is available via direct reading from registers.
*/
-static struct attribute *sca3000_ring_attributes[] = {
- &dev_attr_length.attr,
- &dev_attr_enable.attr,
+static const struct attribute *sca3000_ring_attributes[] = {
&iio_dev_attr_50_percent.dev_attr.attr,
&iio_dev_attr_75_percent.dev_attr.attr,
&iio_dev_attr_in_accel_scale.dev_attr.attr,
NULL,
};
-static struct attribute_group sca3000_ring_attr = {
- .attrs = sca3000_ring_attributes,
- .name = "buffer",
-};
-
static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
{
struct iio_buffer *buf;
@@ -264,7 +242,8 @@ static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
ring->private = indio_dev;
buf = &ring->buf;
buf->stufftoread = 0;
- buf->attrs = &sca3000_ring_attr;
+ buf->length = 64;
+ buf->attrs = sca3000_ring_attributes;
iio_buffer_init(buf);
return buf;
@@ -277,8 +256,6 @@ static void sca3000_ring_release(struct iio_buffer *r)
static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
.read_first_n = &sca3000_read_first_n_hw_rb,
- .get_length = &sca3000_ring_get_length,
- .get_bytes_per_datum = &sca3000_ring_get_bytes_per_datum,
.data_available = sca3000_ring_buf_data_available,
.release = sca3000_ring_release,
};
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index f6526aa22e8a..6f8ce6c6574b 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -612,7 +612,7 @@ static int ad7192_probe(struct spi_device *spi)
const struct ad7192_platform_data *pdata = spi->dev.platform_data;
struct ad7192_state *st;
struct iio_dev *indio_dev;
- int ret , voltage_uv = 0;
+ int ret, voltage_uv = 0;
if (!pdata) {
dev_err(&spi->dev, "no platform data?\n");
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index f053535385bf..d9d6fad7cb00 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -436,7 +436,14 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
*/
mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch));
- /* prepare the delay/loop unit according to the oversampling count */
+ /*
+ * prepare the delay/loop unit according to the oversampling count
+ *
+ * from the datasheet:
+ * "The DELAY fields in HW_LRADC_DELAY0, HW_LRADC_DELAY1,
+ * HW_LRADC_DELAY2, and HW_LRADC_DELAY3 must be non-zero; otherwise,
+ * the LRADC will not trigger the delay group."
+ */
mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch) |
LRADC_DELAY_TRIGGER_DELAYS(0) |
LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) |
@@ -1495,20 +1502,38 @@ static int mxs_lradc_probe_touchscreen(struct mxs_lradc *lradc,
return -EINVAL;
}
- lradc->over_sample_cnt = 4;
- ret = of_property_read_u32(lradc_node, "fsl,ave-ctrl", &adapt);
- if (ret == 0)
+ if (of_property_read_u32(lradc_node, "fsl,ave-ctrl", &adapt)) {
+ lradc->over_sample_cnt = 4;
+ } else {
+ if (adapt < 1 || adapt > 32) {
+ dev_err(lradc->dev, "Invalid sample count (%u)\n",
+ adapt);
+ return -EINVAL;
+ }
lradc->over_sample_cnt = adapt;
+ }
- lradc->over_sample_delay = 2;
- ret = of_property_read_u32(lradc_node, "fsl,ave-delay", &adapt);
- if (ret == 0)
+ if (of_property_read_u32(lradc_node, "fsl,ave-delay", &adapt)) {
+ lradc->over_sample_delay = 2;
+ } else {
+ if (adapt < 2 || adapt > LRADC_DELAY_DELAY_MASK + 1) {
+ dev_err(lradc->dev, "Invalid sample delay (%u)\n",
+ adapt);
+ return -EINVAL;
+ }
lradc->over_sample_delay = adapt;
+ }
- lradc->settling_delay = 10;
- ret = of_property_read_u32(lradc_node, "fsl,settling", &adapt);
- if (ret == 0)
+ if (of_property_read_u32(lradc_node, "fsl,settling", &adapt)) {
+ lradc->settling_delay = 10;
+ } else {
+ if (adapt < 1 || adapt > LRADC_DELAY_DELAY_MASK) {
+ dev_err(lradc->dev, "Invalid settling delay (%u)\n",
+ adapt);
+ return -EINVAL;
+ }
lradc->settling_delay = adapt;
+ }
return 0;
}
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index 5a804f16ec2f..59ad5a3efe9c 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -33,6 +33,7 @@
* @base: base of irq range
* @enabled: mask of which irqs are enabled
* @inuse: mask of which irqs are connected
+ * @regs: irq regs we are faking
* @lock: protect the evgen state
*/
struct iio_dummy_eventgen {
@@ -40,6 +41,7 @@ struct iio_dummy_eventgen {
int base;
bool enabled[IIO_EVENTGEN_NO];
bool inuse[IIO_EVENTGEN_NO];
+ struct iio_dummy_regs regs[IIO_EVENTGEN_NO];
struct mutex lock;
};
@@ -136,6 +138,12 @@ int iio_dummy_evgen_release_irq(int irq)
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_release_irq);
+struct iio_dummy_regs *iio_dummy_evgen_get_regs(int irq)
+{
+ return &iio_evgen->regs[irq - iio_evgen->base];
+}
+EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_regs);
+
static void iio_dummy_evgen_free(void)
{
irq_free_descs(iio_evgen->base, IIO_EVENTGEN_NO);
@@ -153,6 +161,15 @@ static ssize_t iio_evgen_poke(struct device *dev,
size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long event;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &event);
+ if (ret)
+ return ret;
+
+ iio_evgen->regs[this_attr->address].reg_id = this_attr->address;
+ iio_evgen->regs[this_attr->address].reg_data = event;
if (iio_evgen->enabled[this_attr->address])
handle_nested_irq(iio_evgen->base + this_attr->address);
diff --git a/drivers/staging/iio/iio_dummy_evgen.h b/drivers/staging/iio/iio_dummy_evgen.h
index 3a180811b315..2ac293ab7c8f 100644
--- a/drivers/staging/iio/iio_dummy_evgen.h
+++ b/drivers/staging/iio/iio_dummy_evgen.h
@@ -1,6 +1,12 @@
#ifndef _IIO_DUMMY_EVGEN_H_
#define _IIO_DUMMY_EVGEN_H_
+struct iio_dummy_regs {
+ u32 reg_id;
+ u32 reg_data;
+};
+
+struct iio_dummy_regs *iio_dummy_evgen_get_regs(int irq);
int iio_dummy_evgen_get_irq(void);
int iio_dummy_evgen_release_irq(int irq);
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index bf78e6f0311f..e4520213f627 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -69,6 +69,34 @@ static const struct iio_event_spec iio_dummy_event = {
.mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
};
+/*
+ * simple step detect event - triggered when a step is detected
+ */
+static const struct iio_event_spec step_detect_event = {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_NONE,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+};
+
+/*
+ * simple transition event - triggered when the reported running confidence
+ * value rises above a threshold value
+ */
+static const struct iio_event_spec iio_running_event = {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+};
+
+/*
+ * simple transition event - triggered when the reported walking confidence
+ * value falls under a threshold value
+ */
+static const struct iio_event_spec iio_walking_event = {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+};
#endif
/*
@@ -211,10 +239,44 @@ static const struct iio_chan_spec iio_dummy_channels[] = {
{
.type = IIO_VOLTAGE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_index = -1, /* No buffer support */
.output = 1,
.indexed = 1,
.channel = 0,
},
+ {
+ .type = IIO_STEPS,
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_CALIBHEIGHT),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = -1, /* No buffer support */
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
+ .event_spec = &step_detect_event,
+ .num_event_specs = 1,
+#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_RUNNING,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = -1, /* No buffer support */
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
+ .event_spec = &iio_running_event,
+ .num_event_specs = 1,
+#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
+ },
+ {
+ .type = IIO_ACTIVITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_WALKING,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = -1, /* No buffer support */
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
+ .event_spec = &iio_walking_event,
+ .num_event_specs = 1,
+#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
+ },
};
/**
@@ -263,24 +325,55 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = st->steps;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_ACTIVITY:
+ switch (chan->channel2) {
+ case IIO_MOD_RUNNING:
+ *val = st->activity_running;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_MOD_WALKING:
+ *val = st->activity_walking;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- switch (chan->differential) {
- case 0:
- /* only single ended adc -> 0.001333 */
- *val = 0;
- *val2 = 1333;
- ret = IIO_VAL_INT_PLUS_MICRO;
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ switch (chan->differential) {
+ case 0:
+ /* only single ended adc -> 0.001333 */
+ *val = 0;
+ *val2 = 1333;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case 1:
+ /* all differential adc channels ->
+ * 0.000001344 */
+ *val = 0;
+ *val2 = 1344;
+ ret = IIO_VAL_INT_PLUS_NANO;
+ }
+ break;
+ default:
break;
- case 1:
- /* all differential adc channels -> 0.000001344 */
- *val = 0;
- *val2 = 1344;
- ret = IIO_VAL_INT_PLUS_NANO;
}
break;
case IIO_CHAN_INFO_CALIBBIAS:
@@ -298,6 +391,27 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
*val2 = 33;
ret = IIO_VAL_INT_PLUS_NANO;
break;
+ case IIO_CHAN_INFO_ENABLE:
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = st->steps_enabled;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBHEIGHT:
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = st->height;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+
default:
break;
}
@@ -330,14 +444,45 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (chan->output == 0)
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output == 0)
+ return -EINVAL;
+
+ /* Locking not required as writing single value */
+ mutex_lock(&st->lock);
+ st->dac_val = val;
+ mutex_unlock(&st->lock);
+ return 0;
+ default:
return -EINVAL;
-
- /* Locking not required as writing single value */
- mutex_lock(&st->lock);
- st->dac_val = val;
- mutex_unlock(&st->lock);
- return 0;
+ }
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_STEPS:
+ mutex_lock(&st->lock);
+ st->steps = val;
+ mutex_unlock(&st->lock);
+ return 0;
+ case IIO_ACTIVITY:
+ if (val < 0)
+ val = 0;
+ if (val > 100)
+ val = 100;
+ switch (chan->channel2) {
+ case IIO_MOD_RUNNING:
+ st->activity_running = val;
+ return 0;
+ case IIO_MOD_WALKING:
+ st->activity_walking = val;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_CALIBSCALE:
mutex_lock(&st->lock);
/* Compare against table - hard matching here */
@@ -356,6 +501,24 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
st->accel_calibbias = val;
mutex_unlock(&st->lock);
return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ switch (chan->type) {
+ case IIO_STEPS:
+ mutex_lock(&st->lock);
+ st->steps_enabled = val;
+ mutex_unlock(&st->lock);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBHEIGHT:
+ switch (chan->type) {
+ case IIO_STEPS:
+ st->height = val;
+ return 0;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
@@ -395,6 +558,9 @@ static int iio_dummy_init_device(struct iio_dev *indio_dev)
st->accel_val = 34;
st->accel_calibbias = -7;
st->accel_calibscale = &dummy_scales[0];
+ st->steps = 47;
+ st->activity_running = 98;
+ st->activity_walking = 4;
return 0;
}
@@ -475,13 +641,7 @@ static int iio_dummy_probe(int index)
if (ret < 0)
goto error_free_device;
- /*
- * Configure buffered capture support and register the channels with the
- * buffer, but avoid the output channel being registered by reducing the
- * number of channels by 1.
- */
- ret = iio_simple_dummy_configure_buffer(indio_dev,
- iio_dummy_channels, 5);
+ ret = iio_simple_dummy_configure_buffer(indio_dev);
if (ret < 0)
goto error_unregister_events;
diff --git a/drivers/staging/iio/iio_simple_dummy.h b/drivers/staging/iio/iio_simple_dummy.h
index 3027aed79093..34989bf248a7 100644
--- a/drivers/staging/iio/iio_simple_dummy.h
+++ b/drivers/staging/iio/iio_simple_dummy.h
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
struct iio_dummy_accel_calibscale;
+struct iio_dummy_regs;
/**
* struct iio_dummy_state - device instance specific state.
@@ -33,8 +34,14 @@ struct iio_dummy_state {
int differential_adc_val[2];
int accel_val;
int accel_calibbias;
+ int activity_running;
+ int activity_walking;
const struct iio_dummy_accel_calibscale *accel_calibscale;
struct mutex lock;
+ struct iio_dummy_regs *regs;
+ int steps_enabled;
+ int steps;
+ int height;
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
int event_irq;
int event_val;
@@ -107,12 +114,10 @@ enum iio_simple_dummy_scan_elements {
};
#ifdef CONFIG_IIO_SIMPLE_DUMMY_BUFFER
-int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev,
- const struct iio_chan_spec *channels, unsigned int num_channels);
+int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev);
void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev);
#else
-static inline int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev,
- const struct iio_chan_spec *channels, unsigned int num_channels)
+static inline int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
{
return 0;
};
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index fd74f9166a5f..360a4c980722 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -115,14 +115,13 @@ static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = {
.predisable = &iio_triggered_buffer_predisable,
};
-int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev,
- const struct iio_chan_spec *channels, unsigned int num_channels)
+int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
{
int ret;
struct iio_buffer *buffer;
/* Allocate a buffer to use - here a kfifo */
- buffer = iio_kfifo_allocate(indio_dev);
+ buffer = iio_kfifo_allocate();
if (buffer == NULL) {
ret = -ENOMEM;
goto error_ret;
@@ -173,14 +172,8 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev,
*/
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
- ret = iio_buffer_register(indio_dev, channels, num_channels);
- if (ret)
- goto error_dealloc_pollfunc;
-
return 0;
-error_dealloc_pollfunc:
- iio_dealloc_pollfunc(indio_dev->pollfunc);
error_free_buffer:
iio_kfifo_free(indio_dev->buffer);
error_ret:
@@ -194,7 +187,6 @@ error_ret:
*/
void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev)
{
- iio_buffer_unregister(indio_dev);
iio_dealloc_pollfunc(indio_dev->pollfunc);
iio_kfifo_free(indio_dev->buffer);
}
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index 64b45b077549..a5cd3bb219fe 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -72,6 +72,22 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
st->event_en = state;
else
return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_ACTIVITY:
+ switch (type) {
+ case IIO_EV_TYPE_THRESH:
+ st->event_en = state;
+ break;
+ default:
+ return -EINVAL;
+ }
+ case IIO_STEPS:
+ switch (type) {
+ case IIO_EV_TYPE_CHANGE:
+ st->event_en = state;
break;
default:
return -EINVAL;
@@ -148,12 +164,50 @@ int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
static irqreturn_t iio_simple_dummy_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "id %x event %x\n",
+ st->regs->reg_id, st->regs->reg_data);
+
+ switch (st->regs->reg_data) {
+ case 0:
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_VOLTAGE, 0, 0,
+ IIO_EV_DIR_RISING,
+ IIO_EV_TYPE_THRESH, 0, 0, 0),
+ iio_get_time_ns());
+ break;
+ case 1:
+ if (st->activity_running > st->event_val)
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_ACTIVITY, 0,
+ IIO_MOD_RUNNING,
+ IIO_EV_DIR_RISING,
+ IIO_EV_TYPE_THRESH,
+ 0, 0, 0),
+ iio_get_time_ns());
+ break;
+ case 2:
+ if (st->activity_walking < st->event_val)
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_ACTIVITY, 0,
+ IIO_MOD_WALKING,
+ IIO_EV_DIR_FALLING,
+ IIO_EV_TYPE_THRESH,
+ 0, 0, 0),
+ iio_get_time_ns());
+ break;
+ case 3:
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
+ IIO_EV_DIR_NONE,
+ IIO_EV_TYPE_CHANGE, 0, 0, 0),
+ iio_get_time_ns());
+ break;
+ default:
+ break;
+ }
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_VOLTAGE, 0, 0,
- IIO_EV_DIR_RISING,
- IIO_EV_TYPE_THRESH, 0, 0, 0),
- iio_get_time_ns());
return IRQ_HANDLED;
}
@@ -179,6 +233,8 @@ int iio_simple_dummy_events_register(struct iio_dev *indio_dev)
ret = st->event_irq;
goto error_ret;
}
+ st->regs = iio_dummy_evgen_get_regs(st->event_irq);
+
ret = request_threaded_irq(st->event_irq,
NULL,
&iio_simple_dummy_event_handler,
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index b6bd609c3655..79194399e040 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -89,7 +89,6 @@
struct ad5933_state {
struct i2c_client *client;
struct regulator *reg;
- struct ad5933_platform_data *pdata;
struct delayed_work work;
unsigned long mclk_hz;
unsigned char ctrl_hb;
@@ -113,7 +112,8 @@ static const struct iio_chan_spec ad5933_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
.address = AD5933_REG_TEMP_DATA,
.scan_index = -1,
.scan_type = {
@@ -360,11 +360,11 @@ static ssize_t ad5933_show(struct device *dev,
mutex_lock(&indio_dev->mlock);
switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
- len = sprintf(buf, "%d\n",
+ len = sprintf(buf, "%u\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
break;
case AD5933_OUT_RANGE_AVAIL:
- len = sprintf(buf, "%d %d %d %d\n", st->range_avail[0],
+ len = sprintf(buf, "%u %u %u %u\n", st->range_avail[0],
st->range_avail[3], st->range_avail[2],
st->range_avail[1]);
break;
@@ -520,12 +520,11 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
{
struct ad5933_state *st = iio_priv(indio_dev);
__be16 dat;
- int ret = -EINVAL;
+ int ret;
- mutex_lock(&indio_dev->mlock);
switch (m) {
case IIO_CHAN_INFO_RAW:
- case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto out;
@@ -543,16 +542,16 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
goto out;
mutex_unlock(&indio_dev->mlock);
- ret = be16_to_cpu(dat);
- /* Temp in Milli degrees Celsius */
- if (ret < 8192)
- *val = ret * 1000 / 32;
- else
- *val = (ret - 16384) * 1000 / 32;
+ *val = sign_extend32(be16_to_cpu(dat), 13);
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000;
+ *val2 = 5;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
+ return -EINVAL;
out:
mutex_unlock(&indio_dev->mlock);
return ret;
@@ -626,7 +625,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
struct iio_buffer *buffer;
- buffer = iio_kfifo_allocate(indio_dev);
+ buffer = iio_kfifo_allocate();
if (!buffer)
return -ENOMEM;
@@ -712,9 +711,7 @@ static int ad5933_probe(struct i2c_client *client,
st->client = client;
if (!pdata)
- st->pdata = &ad5933_default_pdata;
- else
- st->pdata = pdata;
+ pdata = &ad5933_default_pdata;
st->reg = devm_regulator_get(&client->dev, "vcc");
if (!IS_ERR(st->reg)) {
@@ -727,10 +724,10 @@ static int ad5933_probe(struct i2c_client *client,
if (voltage_uv)
st->vref_mv = voltage_uv / 1000;
else
- st->vref_mv = st->pdata->vref_mv;
+ st->vref_mv = pdata->vref_mv;
- if (st->pdata->ext_clk_Hz) {
- st->mclk_hz = st->pdata->ext_clk_Hz;
+ if (pdata->ext_clk_Hz) {
+ st->mclk_hz = pdata->ext_clk_Hz;
st->ctrl_lb = AD5933_CTRL_EXT_SYSCLK;
} else {
st->mclk_hz = AD5933_INT_OSC_FREQ_Hz;
@@ -752,27 +749,16 @@ static int ad5933_probe(struct i2c_client *client,
if (ret)
goto error_disable_reg;
- ret = iio_buffer_register(indio_dev, ad5933_channels,
- ARRAY_SIZE(ad5933_channels));
- if (ret)
- goto error_unreg_ring;
-
- /* enable both REAL and IMAG channels by default */
- iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
- iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
-
ret = ad5933_setup(st);
if (ret)
- goto error_uninitialize_ring;
+ goto error_unreg_ring;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_uninitialize_ring;
+ goto error_unreg_ring;
return 0;
-error_uninitialize_ring:
- iio_buffer_unregister(indio_dev);
error_unreg_ring:
iio_kfifo_free(indio_dev->buffer);
error_disable_reg:
@@ -788,7 +774,6 @@ static int ad5933_remove(struct i2c_client *client)
struct ad5933_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- iio_buffer_unregister(indio_dev);
iio_kfifo_free(indio_dev->buffer);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index e969107ddb47..6440e3b293ca 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -537,8 +537,8 @@ static const struct i2c_device_id isl29028_id[] = {
MODULE_DEVICE_TABLE(i2c, isl29028_id);
static const struct of_device_id isl29028_of_match[] = {
- { .compatible = "isl,isl29028", },
- { .compatible = "isil,isl29028", },/* deprecated, don't use */
+ { .compatible = "isl,isl29028", }, /* for backward compat., don't use */
+ { .compatible = "isil,isl29028", },
{ },
};
MODULE_DEVICE_TABLE(of, isl29028_of_match);
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index cc4ddcce4ff9..8afae8e33d56 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -692,7 +692,7 @@ static ssize_t taos_luxtable_show(struct device *dev,
int offset = 0;
for (i = 0; i < ARRAY_SIZE(taos_device_lux); i++) {
- offset += sprintf(buf + offset, "%d,%d,%d,",
+ offset += sprintf(buf + offset, "%u,%u,%u,",
taos_device_lux[i].ratio,
taos_device_lux[i].ch0,
taos_device_lux[i].ch1);
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 423f96bdf595..4a5dc26fed4c 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -1147,7 +1147,7 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
int offset = 0;
while (i < (TSL2X7X_MAX_LUX_TABLE_SIZE * 3)) {
- offset += snprintf(buf + offset, PAGE_SIZE, "%d,%d,%d,",
+ offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,%u,",
chip->tsl2x7x_device_lux[i].ratio,
chip->tsl2x7x_device_lux[i].ch0,
chip->tsl2x7x_device_lux[i].ch1);
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
index e8c98cf57070..f6739e2c24b1 100644
--- a/drivers/staging/iio/meter/ade7758.h
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -145,7 +145,6 @@ ssize_t ade7758_read_data_from_ring(struct device *dev,
int ade7758_configure_ring(struct iio_dev *indio_dev);
void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
-void ade7758_uninitialize_ring(struct iio_dev *indio_dev);
int ade7758_set_irq(struct device *dev, bool enable);
int ade7758_spi_write_reg_8(struct device *dev,
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index fb373b89dcc2..70e96b20c2eb 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -850,23 +850,15 @@ static int ade7758_probe(struct spi_device *spi)
if (ret)
goto error_free_tx;
- ret = iio_buffer_register(indio_dev,
- &ade7758_channels[0],
- ARRAY_SIZE(ade7758_channels));
- if (ret) {
- dev_err(&spi->dev, "failed to initialize the ring\n");
- goto error_unreg_ring_funcs;
- }
-
/* Get the device into a sane initial state */
ret = ade7758_initial_setup(indio_dev);
if (ret)
- goto error_uninitialize_ring;
+ goto error_unreg_ring_funcs;
if (spi->irq) {
ret = ade7758_probe_trigger(indio_dev);
if (ret)
- goto error_uninitialize_ring;
+ goto error_unreg_ring_funcs;
}
ret = iio_device_register(indio_dev);
@@ -878,8 +870,6 @@ static int ade7758_probe(struct spi_device *spi)
error_remove_trigger:
if (spi->irq)
ade7758_remove_trigger(indio_dev);
-error_uninitialize_ring:
- ade7758_uninitialize_ring(indio_dev);
error_unreg_ring_funcs:
ade7758_unconfigure_ring(indio_dev);
error_free_tx:
@@ -897,7 +887,6 @@ static int ade7758_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ade7758_stop_device(&indio_dev->dev);
ade7758_remove_trigger(indio_dev);
- ade7758_uninitialize_ring(indio_dev);
ade7758_unconfigure_ring(indio_dev);
kfree(st->tx);
kfree(st->rx);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 6e9006490742..3792b5761645 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -118,7 +118,7 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
struct iio_buffer *buffer;
int ret = 0;
- buffer = iio_kfifo_allocate(indio_dev);
+ buffer = iio_kfifo_allocate();
if (!buffer) {
ret = -ENOMEM;
return ret;
@@ -180,8 +180,3 @@ error_iio_kfifo_free:
iio_kfifo_free(indio_dev->buffer);
return ret;
}
-
-void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
-{
- iio_buffer_unregister(indio_dev);
-}
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 7d217430616a..b0c7dbc8a428 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -116,7 +116,7 @@ static int ade7759_spi_read_reg_40(struct device *dev,
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7759_READ_REG(reg_address);
- memset(&st->tx[1], 0 , 5);
+ memset(&st->tx[1], 0, 5);
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
diff --git a/drivers/staging/line6/Kconfig b/drivers/staging/line6/Kconfig
deleted file mode 100644
index 4f1219b4c692..000000000000
--- a/drivers/staging/line6/Kconfig
+++ /dev/null
@@ -1,38 +0,0 @@
-menuconfig LINE6_USB
- tristate "Line6 USB support"
- depends on USB && SND
- select SND_RAWMIDI
- select SND_PCM
- help
- This is a driver for the guitar amp, cab, and effects modeller
- PODxt Pro by Line6 (and similar devices), supporting the
- following features:
- * Reading/writing individual parameters
- * Reading/writing complete channel, effects setup, and amp
- setup data
- * Channel switching
- * Virtual MIDI interface
- * Tuner access
- * Playback/capture/mixer device for any ALSA-compatible PCM
- audio application
- * Signal routing (record clean/processed guitar signal,
- re-amping)
-
- Preliminary support for the Variax Workbench and TonePort
- devices is included.
-
-if LINE6_USB
-
-config LINE6_USB_IMPULSE_RESPONSE
- bool "measure impulse response"
- default n
- help
- Say Y here to add code to measure the impulse response of a Line6
- device. This is more accurate than user-space methods since it
- bypasses any PCM data buffering (e.g., by ALSA or jack). This is
- useful for assessing the performance of new devices, but is not
- required for normal operation.
-
- If unsure, say N.
-
-endif # LINE6_USB
diff --git a/drivers/staging/line6/Makefile b/drivers/staging/line6/Makefile
deleted file mode 100644
index ae5c374b0f87..000000000000
--- a/drivers/staging/line6/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-obj-$(CONFIG_LINE6_USB) += line6usb.o
-
-line6usb-y := \
- audio.o \
- capture.o \
- driver.o \
- midi.o \
- midibuf.o \
- pcm.o \
- playback.o \
- pod.o \
- toneport.o \
- variax.o \
- podhd.o
diff --git a/drivers/staging/line6/audio.c b/drivers/staging/line6/audio.c
deleted file mode 100644
index 171d80c1b020..000000000000
--- a/drivers/staging/line6/audio.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <linux/export.h>
-
-#include "driver.h"
-#include "audio.h"
-
-/*
- Initialize the Line6 USB audio system.
-*/
-int line6_init_audio(struct usb_line6 *line6)
-{
- struct snd_card *card;
- int err;
-
- err = snd_card_new(line6->ifcdev,
- SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
- THIS_MODULE, 0, &card);
- if (err < 0)
- return err;
-
- line6->card = card;
-
- strcpy(card->id, line6->properties->id);
- strcpy(card->driver, DRIVER_NAME);
- strcpy(card->shortname, line6->properties->name);
- /* longname is 80 chars - see asound.h */
- sprintf(card->longname, "Line6 %s at USB %s", line6->properties->name,
- dev_name(line6->ifcdev));
- return 0;
-}
-
-/*
- Register the Line6 USB audio system.
-*/
-int line6_register_audio(struct usb_line6 *line6)
-{
- int err;
-
- err = snd_card_register(line6->card);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/*
- Cleanup the Line6 USB audio system.
-*/
-void line6_cleanup_audio(struct usb_line6 *line6)
-{
- struct snd_card *card = line6->card;
-
- if (card == NULL)
- return;
-
- snd_card_disconnect(card);
- snd_card_free(card);
- line6->card = NULL;
-}
diff --git a/drivers/staging/line6/audio.h b/drivers/staging/line6/audio.h
deleted file mode 100644
index 5f8a09a0fa95..000000000000
--- a/drivers/staging/line6/audio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#ifndef AUDIO_H
-#define AUDIO_H
-
-#include "driver.h"
-
-extern void line6_cleanup_audio(struct usb_line6 *);
-extern int line6_init_audio(struct usb_line6 *);
-extern int line6_register_audio(struct usb_line6 *);
-
-#endif
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
deleted file mode 100644
index e6ca631e3f79..000000000000
--- a/drivers/staging/line6/capture.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#include <linux/slab.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
-#include "audio.h"
-#include "capture.h"
-#include "driver.h"
-#include "pcm.h"
-#include "pod.h"
-
-/*
- Find a free URB and submit it.
-*/
-static int submit_audio_in_urb(struct snd_line6_pcm *line6pcm)
-{
- int index;
- unsigned long flags;
- int i, urb_size;
- int ret;
- struct urb *urb_in;
-
- spin_lock_irqsave(&line6pcm->lock_audio_in, flags);
- index =
- find_first_zero_bit(&line6pcm->active_urb_in, LINE6_ISO_BUFFERS);
-
- if (index < 0 || index >= LINE6_ISO_BUFFERS) {
- spin_unlock_irqrestore(&line6pcm->lock_audio_in, flags);
- dev_err(line6pcm->line6->ifcdev, "no free URB found\n");
- return -EINVAL;
- }
-
- urb_in = line6pcm->urb_audio_in[index];
- urb_size = 0;
-
- for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
- struct usb_iso_packet_descriptor *fin =
- &urb_in->iso_frame_desc[i];
- fin->offset = urb_size;
- fin->length = line6pcm->max_packet_size;
- urb_size += line6pcm->max_packet_size;
- }
-
- urb_in->transfer_buffer =
- line6pcm->buffer_in +
- index * LINE6_ISO_PACKETS * line6pcm->max_packet_size;
- urb_in->transfer_buffer_length = urb_size;
- urb_in->context = line6pcm;
-
- ret = usb_submit_urb(urb_in, GFP_ATOMIC);
-
- if (ret == 0)
- set_bit(index, &line6pcm->active_urb_in);
- else
- dev_err(line6pcm->line6->ifcdev,
- "URB in #%d submission failed (%d)\n", index, ret);
-
- spin_unlock_irqrestore(&line6pcm->lock_audio_in, flags);
- return 0;
-}
-
-/*
- Submit all currently available capture URBs.
-*/
-int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm)
-{
- int ret, i;
-
- for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
- ret = submit_audio_in_urb(line6pcm);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-/*
- Unlink all currently active capture URBs.
-*/
-void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm)
-{
- unsigned int i;
-
- for (i = LINE6_ISO_BUFFERS; i--;) {
- if (test_bit(i, &line6pcm->active_urb_in)) {
- if (!test_and_set_bit(i, &line6pcm->unlink_urb_in)) {
- struct urb *u = line6pcm->urb_audio_in[i];
-
- usb_unlink_urb(u);
- }
- }
- }
-}
-
-/*
- Wait until unlinking of all currently active capture URBs has been
- finished.
-*/
-void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
-{
- int timeout = HZ;
- unsigned int i;
- int alive;
-
- do {
- alive = 0;
- for (i = LINE6_ISO_BUFFERS; i--;) {
- if (test_bit(i, &line6pcm->active_urb_in))
- alive++;
- }
- if (!alive)
- break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- } while (--timeout > 0);
- if (alive)
- snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive);
-}
-
-/*
- Unlink all currently active capture URBs, and wait for finishing.
-*/
-void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
-{
- line6_unlink_audio_in_urbs(line6pcm);
- line6_wait_clear_audio_in_urbs(line6pcm);
-}
-
-/*
- Copy data into ALSA capture buffer.
-*/
-void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf, int fsize)
-{
- struct snd_pcm_substream *substream =
- get_substream(line6pcm, SNDRV_PCM_STREAM_CAPTURE);
- struct snd_pcm_runtime *runtime = substream->runtime;
- const int bytes_per_frame = line6pcm->properties->bytes_per_frame;
- int frames = fsize / bytes_per_frame;
-
- if (runtime == NULL)
- return;
-
- if (line6pcm->pos_in_done + frames > runtime->buffer_size) {
- /*
- The transferred area goes over buffer boundary,
- copy two separate chunks.
- */
- int len;
-
- len = runtime->buffer_size - line6pcm->pos_in_done;
-
- if (len > 0) {
- memcpy(runtime->dma_area +
- line6pcm->pos_in_done * bytes_per_frame, fbuf,
- len * bytes_per_frame);
- memcpy(runtime->dma_area, fbuf + len * bytes_per_frame,
- (frames - len) * bytes_per_frame);
- } else {
- /* this is somewhat paranoid */
- dev_err(line6pcm->line6->ifcdev,
- "driver bug: len = %d\n", len);
- }
- } else {
- /* copy single chunk */
- memcpy(runtime->dma_area +
- line6pcm->pos_in_done * bytes_per_frame, fbuf, fsize);
- }
-
- line6pcm->pos_in_done += frames;
- if (line6pcm->pos_in_done >= runtime->buffer_size)
- line6pcm->pos_in_done -= runtime->buffer_size;
-}
-
-void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
-{
- struct snd_pcm_substream *substream =
- get_substream(line6pcm, SNDRV_PCM_STREAM_CAPTURE);
-
- line6pcm->bytes_in += length;
- if (line6pcm->bytes_in >= line6pcm->period_in) {
- line6pcm->bytes_in %= line6pcm->period_in;
- snd_pcm_period_elapsed(substream);
- }
-}
-
-void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm)
-{
- kfree(line6pcm->buffer_in);
- line6pcm->buffer_in = NULL;
-}
-
-/*
- * Callback for completed capture URB.
- */
-static void audio_in_callback(struct urb *urb)
-{
- int i, index, length = 0, shutdown = 0;
- unsigned long flags;
-
- struct snd_line6_pcm *line6pcm = (struct snd_line6_pcm *)urb->context;
-
- line6pcm->last_frame_in = urb->start_frame;
-
- /* find index of URB */
- for (index = 0; index < LINE6_ISO_BUFFERS; ++index)
- if (urb == line6pcm->urb_audio_in[index])
- break;
-
- spin_lock_irqsave(&line6pcm->lock_audio_in, flags);
-
- for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
- char *fbuf;
- int fsize;
- struct usb_iso_packet_descriptor *fin = &urb->iso_frame_desc[i];
-
- if (fin->status == -EXDEV) {
- shutdown = 1;
- break;
- }
-
- fbuf = urb->transfer_buffer + fin->offset;
- fsize = fin->actual_length;
-
- if (fsize > line6pcm->max_packet_size) {
- dev_err(line6pcm->line6->ifcdev,
- "driver and/or device bug: packet too large (%d > %d)\n",
- fsize, line6pcm->max_packet_size);
- }
-
- length += fsize;
-
- /* the following assumes LINE6_ISO_PACKETS == 1: */
- line6pcm->prev_fbuf = fbuf;
- line6pcm->prev_fsize = fsize;
-
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
-#endif
- if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM,
- &line6pcm->flags) && (fsize > 0))
- line6_capture_copy(line6pcm, fbuf, fsize);
- }
-
- clear_bit(index, &line6pcm->active_urb_in);
-
- if (test_and_clear_bit(index, &line6pcm->unlink_urb_in))
- shutdown = 1;
-
- spin_unlock_irqrestore(&line6pcm->lock_audio_in, flags);
-
- if (!shutdown) {
- submit_audio_in_urb(line6pcm);
-
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
-#endif
- if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM,
- &line6pcm->flags))
- line6_capture_check_period(line6pcm, length);
- }
-}
-
-/* open capture callback */
-static int snd_line6_capture_open(struct snd_pcm_substream *substream)
-{
- int err;
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- err = snd_pcm_hw_constraint_ratdens(runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE,
- (&line6pcm->
- properties->snd_line6_rates));
- if (err < 0)
- return err;
-
- runtime->hw = line6pcm->properties->snd_line6_capture_hw;
- return 0;
-}
-
-/* close capture callback */
-static int snd_line6_capture_close(struct snd_pcm_substream *substream)
-{
- return 0;
-}
-
-/* hw_params capture callback */
-static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- int ret;
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- /* -- Florian Demski [FD] */
- /* don't ask me why, but this fixes the bug on my machine */
- if (line6pcm == NULL) {
- if (substream->pcm == NULL)
- return -ENOMEM;
- if (substream->pcm->private_data == NULL)
- return -ENOMEM;
- substream->private_data = substream->pcm->private_data;
- line6pcm = snd_pcm_substream_chip(substream);
- }
- /* -- [FD] end */
-
- ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
-
- if (ret < 0)
- return ret;
-
- ret = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- if (ret < 0) {
- line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
- return ret;
- }
-
- line6pcm->period_in = params_period_bytes(hw_params);
- return 0;
-}
-
-/* hw_free capture callback */
-static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
- return snd_pcm_lib_free_pages(substream);
-}
-
-/* trigger callback */
-int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
-{
- int err;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
-#ifdef CONFIG_PM
- case SNDRV_PCM_TRIGGER_RESUME:
-#endif
- err = line6_pcm_acquire(line6pcm,
- LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
-
- if (err < 0)
- return err;
-
- break;
-
- case SNDRV_PCM_TRIGGER_STOP:
-#ifdef CONFIG_PM
- case SNDRV_PCM_TRIGGER_SUSPEND:
-#endif
- err = line6_pcm_release(line6pcm,
- LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
-
- if (err < 0)
- return err;
-
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* capture pointer callback */
-static snd_pcm_uframes_t
-snd_line6_capture_pointer(struct snd_pcm_substream *substream)
-{
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- return line6pcm->pos_in_done;
-}
-
-/* capture operators */
-struct snd_pcm_ops snd_line6_capture_ops = {
- .open = snd_line6_capture_open,
- .close = snd_line6_capture_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_line6_capture_hw_params,
- .hw_free = snd_line6_capture_hw_free,
- .prepare = snd_line6_prepare,
- .trigger = snd_line6_trigger,
- .pointer = snd_line6_capture_pointer,
-};
-
-int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm)
-{
- int i;
-
- /* create audio URBs and fill in constant values: */
- for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
- struct urb *urb;
-
- /* URB for audio in: */
- urb = line6pcm->urb_audio_in[i] =
- usb_alloc_urb(LINE6_ISO_PACKETS, GFP_KERNEL);
-
- if (urb == NULL) {
- dev_err(line6pcm->line6->ifcdev, "Out of memory\n");
- return -ENOMEM;
- }
-
- urb->dev = line6pcm->line6->usbdev;
- urb->pipe =
- usb_rcvisocpipe(line6pcm->line6->usbdev,
- line6pcm->ep_audio_read &
- USB_ENDPOINT_NUMBER_MASK);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->start_frame = -1;
- urb->number_of_packets = LINE6_ISO_PACKETS;
- urb->interval = LINE6_ISO_INTERVAL;
- urb->error_count = 0;
- urb->complete = audio_in_callback;
- }
-
- return 0;
-}
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
deleted file mode 100644
index 503b2d763595..000000000000
--- a/drivers/staging/line6/driver.c
+++ /dev/null
@@ -1,1162 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-
-#include "audio.h"
-#include "capture.h"
-#include "driver.h"
-#include "midi.h"
-#include "playback.h"
-#include "pod.h"
-#include "podhd.h"
-#include "revision.h"
-#include "toneport.h"
-#include "usbdefs.h"
-#include "variax.h"
-
-#define DRIVER_AUTHOR "Markus Grabner <grabner@icg.tugraz.at>"
-#define DRIVER_DESC "Line6 USB Driver"
-#define DRIVER_VERSION "0.9.1beta" DRIVER_REVISION
-
-/* table of devices that work with this driver */
-static const struct usb_device_id line6_id_table[] = {
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_BASSPODXT)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_BASSPODXTLIVE)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_BASSPODXTPRO)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_GUITARPORT)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_POCKETPOD)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD300)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD400)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD500)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_GX)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX1)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX2)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODX3)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODX3LIVE)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODXT)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODXTLIVE)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODXTPRO)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_TONEPORT_GX)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_TONEPORT_UX1)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_TONEPORT_UX2)},
- {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_VARIAX)},
- {},
-};
-
-MODULE_DEVICE_TABLE(usb, line6_id_table);
-
-#define L6PROP(dev_bit, dev_id, dev_name, dev_cap)\
- {.device_bit = LINE6_BIT_##dev_bit, .id = dev_id,\
- .name = dev_name, .capabilities = LINE6_BIT_##dev_cap}
-
-/* *INDENT-OFF* */
-static const struct line6_properties line6_properties_table[] = {
- L6PROP(BASSPODXT, "BassPODxt", "BassPODxt", CTRL_PCM_HW),
- L6PROP(BASSPODXTLIVE, "BassPODxtLive", "BassPODxt Live", CTRL_PCM_HW),
- L6PROP(BASSPODXTPRO, "BassPODxtPro", "BassPODxt Pro", CTRL_PCM_HW),
- L6PROP(GUITARPORT, "GuitarPort", "GuitarPort", PCM),
- L6PROP(POCKETPOD, "PocketPOD", "Pocket POD", CONTROL),
- L6PROP(PODHD300, "PODHD300", "POD HD300", CTRL_PCM_HW),
- L6PROP(PODHD400, "PODHD400", "POD HD400", CTRL_PCM_HW),
- L6PROP(PODHD500, "PODHD500", "POD HD500", CTRL_PCM_HW),
- L6PROP(PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", PCM),
- L6PROP(PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", PCM),
- L6PROP(PODSTUDIO_UX2, "PODStudioUX2", "POD Studio UX2", PCM),
- L6PROP(PODX3, "PODX3", "POD X3", PCM),
- L6PROP(PODX3LIVE, "PODX3Live", "POD X3 Live", PCM),
- L6PROP(PODXT, "PODxt", "PODxt", CTRL_PCM_HW),
- L6PROP(PODXTLIVE, "PODxtLive", "PODxt Live", CTRL_PCM_HW),
- L6PROP(PODXTPRO, "PODxtPro", "PODxt Pro", CTRL_PCM_HW),
- L6PROP(TONEPORT_GX, "TonePortGX", "TonePort GX", PCM),
- L6PROP(TONEPORT_UX1, "TonePortUX1", "TonePort UX1", PCM),
- L6PROP(TONEPORT_UX2, "TonePortUX2", "TonePort UX2", PCM),
- L6PROP(VARIAX, "Variax", "Variax Workbench", CONTROL),
-};
-/* *INDENT-ON* */
-
-/*
- This is Line6's MIDI manufacturer ID.
-*/
-const unsigned char line6_midi_id[] = {
- 0x00, 0x01, 0x0c
-};
-
-/*
- Code to request version of POD, Variax interface
- (and maybe other devices).
-*/
-static const char line6_request_version[] = {
- 0xf0, 0x7e, 0x7f, 0x06, 0x01, 0xf7
-};
-
-/**
- Class for asynchronous messages.
-*/
-struct message {
- struct usb_line6 *line6;
- const char *buffer;
- int size;
- int done;
-};
-
-/*
- Forward declarations.
-*/
-static void line6_data_received(struct urb *urb);
-static int line6_send_raw_message_async_part(struct message *msg,
- struct urb *urb);
-
-/*
- Start to listen on endpoint.
-*/
-static int line6_start_listen(struct usb_line6 *line6)
-{
- int err;
-
- usb_fill_int_urb(line6->urb_listen, line6->usbdev,
- usb_rcvintpipe(line6->usbdev, line6->ep_control_read),
- line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
- line6_data_received, line6, line6->interval);
- line6->urb_listen->actual_length = 0;
- err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC);
- return err;
-}
-
-/*
- Stop listening on endpoint.
-*/
-static void line6_stop_listen(struct usb_line6 *line6)
-{
- usb_kill_urb(line6->urb_listen);
-}
-
-/*
- Send raw message in pieces of wMaxPacketSize bytes.
-*/
-int line6_send_raw_message(struct usb_line6 *line6, const char *buffer,
- int size)
-{
- int i, done = 0;
-
- for (i = 0; i < size; i += line6->max_packet_size) {
- int partial;
- const char *frag_buf = buffer + i;
- int frag_size = min(line6->max_packet_size, size - i);
- int retval;
-
- retval = usb_interrupt_msg(line6->usbdev,
- usb_sndintpipe(line6->usbdev,
- line6->ep_control_write),
- (char *)frag_buf, frag_size,
- &partial, LINE6_TIMEOUT * HZ);
-
- if (retval) {
- dev_err(line6->ifcdev,
- "usb_interrupt_msg failed (%d)\n", retval);
- break;
- }
-
- done += frag_size;
- }
-
- return done;
-}
-
-/*
- Notification of completion of asynchronous request transmission.
-*/
-static void line6_async_request_sent(struct urb *urb)
-{
- struct message *msg = (struct message *)urb->context;
-
- if (msg->done >= msg->size) {
- usb_free_urb(urb);
- kfree(msg);
- } else
- line6_send_raw_message_async_part(msg, urb);
-}
-
-/*
- Asynchronously send part of a raw message.
-*/
-static int line6_send_raw_message_async_part(struct message *msg,
- struct urb *urb)
-{
- int retval;
- struct usb_line6 *line6 = msg->line6;
- int done = msg->done;
- int bytes = min(msg->size - done, line6->max_packet_size);
-
- usb_fill_int_urb(urb, line6->usbdev,
- usb_sndintpipe(line6->usbdev, line6->ep_control_write),
- (char *)msg->buffer + done, bytes,
- line6_async_request_sent, msg, line6->interval);
-
- msg->done += bytes;
- retval = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (retval < 0) {
- dev_err(line6->ifcdev, "%s: usb_submit_urb failed (%d)\n",
- __func__, retval);
- usb_free_urb(urb);
- kfree(msg);
- return retval;
- }
-
- return 0;
-}
-
-/*
- Setup and start timer.
-*/
-void line6_start_timer(struct timer_list *timer, unsigned int msecs,
- void (*function)(unsigned long), unsigned long data)
-{
- setup_timer(timer, function, data);
- timer->expires = jiffies + msecs * HZ / 1000;
- add_timer(timer);
-}
-
-/*
- Asynchronously send raw message.
-*/
-int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer,
- int size)
-{
- struct message *msg;
- struct urb *urb;
-
- /* create message: */
- msg = kmalloc(sizeof(struct message), GFP_ATOMIC);
- if (msg == NULL)
- return -ENOMEM;
-
- /* create URB: */
- urb = usb_alloc_urb(0, GFP_ATOMIC);
-
- if (urb == NULL) {
- kfree(msg);
- dev_err(line6->ifcdev, "Out of memory\n");
- return -ENOMEM;
- }
-
- /* set message data: */
- msg->line6 = line6;
- msg->buffer = buffer;
- msg->size = size;
- msg->done = 0;
-
- /* start sending: */
- return line6_send_raw_message_async_part(msg, urb);
-}
-
-/*
- Send asynchronous device version request.
-*/
-int line6_version_request_async(struct usb_line6 *line6)
-{
- char *buffer;
- int retval;
-
- buffer = kmemdup(line6_request_version,
- sizeof(line6_request_version), GFP_ATOMIC);
- if (buffer == NULL) {
- dev_err(line6->ifcdev, "Out of memory");
- return -ENOMEM;
- }
-
- retval = line6_send_raw_message_async(line6, buffer,
- sizeof(line6_request_version));
- kfree(buffer);
- return retval;
-}
-
-/*
- Send sysex message in pieces of wMaxPacketSize bytes.
-*/
-int line6_send_sysex_message(struct usb_line6 *line6, const char *buffer,
- int size)
-{
- return line6_send_raw_message(line6, buffer,
- size + SYSEX_EXTRA_SIZE) -
- SYSEX_EXTRA_SIZE;
-}
-
-/*
- Allocate buffer for sysex message and prepare header.
- @param code sysex message code
- @param size number of bytes between code and sysex end
-*/
-char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, int code2,
- int size)
-{
- char *buffer = kmalloc(size + SYSEX_EXTRA_SIZE, GFP_ATOMIC);
-
- if (!buffer)
- return NULL;
-
- buffer[0] = LINE6_SYSEX_BEGIN;
- memcpy(buffer + 1, line6_midi_id, sizeof(line6_midi_id));
- buffer[sizeof(line6_midi_id) + 1] = code1;
- buffer[sizeof(line6_midi_id) + 2] = code2;
- buffer[sizeof(line6_midi_id) + 3 + size] = LINE6_SYSEX_END;
- return buffer;
-}
-
-/*
- Notification of data received from the Line6 device.
-*/
-static void line6_data_received(struct urb *urb)
-{
- struct usb_line6 *line6 = (struct usb_line6 *)urb->context;
- struct midi_buffer *mb = &line6->line6midi->midibuf_in;
- int done;
-
- if (urb->status == -ESHUTDOWN)
- return;
-
- done =
- line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length);
-
- if (done < urb->actual_length) {
- line6_midibuf_ignore(mb, done);
- dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n",
- done, urb->actual_length);
- }
-
- for (;;) {
- done =
- line6_midibuf_read(mb, line6->buffer_message,
- LINE6_MESSAGE_MAXLEN);
-
- if (done == 0)
- break;
-
- line6->message_length = done;
- line6_midi_receive(line6, line6->buffer_message, done);
-
- switch (le16_to_cpu(line6->usbdev->descriptor.idProduct)) {
- case LINE6_DEVID_BASSPODXT:
- case LINE6_DEVID_BASSPODXTLIVE:
- case LINE6_DEVID_BASSPODXTPRO:
- case LINE6_DEVID_PODXT:
- case LINE6_DEVID_PODXTPRO:
- case LINE6_DEVID_POCKETPOD:
- line6_pod_process_message((struct usb_line6_pod *)
- line6);
- break;
-
- case LINE6_DEVID_PODHD300:
- case LINE6_DEVID_PODHD400:
- case LINE6_DEVID_PODHD500:
- break; /* let userspace handle MIDI */
-
- case LINE6_DEVID_PODXTLIVE:
- switch (line6->interface_number) {
- case PODXTLIVE_INTERFACE_POD:
- line6_pod_process_message((struct usb_line6_pod
- *)line6);
- break;
-
- case PODXTLIVE_INTERFACE_VARIAX:
- line6_variax_process_message((struct
- usb_line6_variax
- *)line6);
- break;
-
- default:
- dev_err(line6->ifcdev,
- "PODxt Live interface %d not supported\n",
- line6->interface_number);
- }
- break;
-
- case LINE6_DEVID_VARIAX:
- line6_variax_process_message((struct usb_line6_variax *)
- line6);
- break;
-
- default:
- MISSING_CASE;
- }
- }
-
- line6_start_listen(line6);
-}
-
-/*
- Send channel number (i.e., switch to a different sound).
-*/
-int line6_send_program(struct usb_line6 *line6, u8 value)
-{
- int retval;
- unsigned char *buffer;
- int partial;
-
- buffer = kmalloc(2, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- buffer[0] = LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST;
- buffer[1] = value;
-
- retval = usb_interrupt_msg(line6->usbdev,
- usb_sndintpipe(line6->usbdev,
- line6->ep_control_write),
- buffer, 2, &partial, LINE6_TIMEOUT * HZ);
-
- if (retval)
- dev_err(line6->ifcdev, "usb_interrupt_msg failed (%d)\n",
- retval);
-
- kfree(buffer);
- return retval;
-}
-
-/*
- Transmit Line6 control parameter.
-*/
-int line6_transmit_parameter(struct usb_line6 *line6, int param, u8 value)
-{
- int retval;
- unsigned char *buffer;
- int partial;
-
- buffer = kmalloc(3, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- buffer[0] = LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST;
- buffer[1] = param;
- buffer[2] = value;
-
- retval = usb_interrupt_msg(line6->usbdev,
- usb_sndintpipe(line6->usbdev,
- line6->ep_control_write),
- buffer, 3, &partial, LINE6_TIMEOUT * HZ);
-
- if (retval)
- dev_err(line6->ifcdev, "usb_interrupt_msg failed (%d)\n",
- retval);
-
- kfree(buffer);
- return retval;
-}
-
-/*
- Read data from device.
-*/
-int line6_read_data(struct usb_line6 *line6, int address, void *data,
- size_t datalen)
-{
- struct usb_device *usbdev = line6->usbdev;
- int ret;
- unsigned char len;
-
- /* query the serial number: */
- ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- (datalen << 8) | 0x21, address,
- NULL, 0, LINE6_TIMEOUT * HZ);
-
- if (ret < 0) {
- dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
- return ret;
- }
-
- /* Wait for data length. We'll get 0xff until length arrives. */
- do {
- ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE |
- USB_DIR_IN,
- 0x0012, 0x0000, &len, 1,
- LINE6_TIMEOUT * HZ);
- if (ret < 0) {
- dev_err(line6->ifcdev,
- "receive length failed (error %d)\n", ret);
- return ret;
- }
- } while (len == 0xff);
-
- if (len != datalen) {
- /* should be equal or something went wrong */
- dev_err(line6->ifcdev,
- "length mismatch (expected %d, got %d)\n",
- (int)datalen, (int)len);
- return -EINVAL;
- }
-
- /* receive the result: */
- ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x0013, 0x0000, data, datalen,
- LINE6_TIMEOUT * HZ);
-
- if (ret < 0) {
- dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- Write data to device.
-*/
-int line6_write_data(struct usb_line6 *line6, int address, void *data,
- size_t datalen)
-{
- struct usb_device *usbdev = line6->usbdev;
- int ret;
- unsigned char status;
-
- ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- 0x0022, address, data, datalen,
- LINE6_TIMEOUT * HZ);
-
- if (ret < 0) {
- dev_err(line6->ifcdev,
- "write request failed (error %d)\n", ret);
- return ret;
- }
-
- do {
- ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
- 0x67,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE |
- USB_DIR_IN,
- 0x0012, 0x0000,
- &status, 1, LINE6_TIMEOUT * HZ);
-
- if (ret < 0) {
- dev_err(line6->ifcdev,
- "receiving status failed (error %d)\n", ret);
- return ret;
- }
- } while (status == 0xff);
-
- if (status != 0) {
- dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- Read Line6 device serial number.
- (POD, TonePort, GuitarPort)
-*/
-int line6_read_serial_number(struct usb_line6 *line6, int *serial_number)
-{
- return line6_read_data(line6, 0x80d0, serial_number,
- sizeof(*serial_number));
-}
-
-/*
- No operation (i.e., unsupported).
-*/
-ssize_t line6_nop_read(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return 0;
-}
-
-/*
- Generic destructor.
-*/
-static void line6_destruct(struct usb_interface *interface)
-{
- struct usb_line6 *line6;
-
- if (interface == NULL)
- return;
- line6 = usb_get_intfdata(interface);
- if (line6 == NULL)
- return;
-
- /* free buffer memory first: */
- kfree(line6->buffer_message);
- kfree(line6->buffer_listen);
-
- /* then free URBs: */
- usb_free_urb(line6->urb_listen);
-
- /* make sure the device isn't destructed twice: */
- usb_set_intfdata(interface, NULL);
-
- /* free interface data: */
- kfree(line6);
-}
-
-/*
- Probe USB device.
-*/
-static int line6_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- int devtype;
- struct usb_device *usbdev;
- struct usb_line6 *line6;
- const struct line6_properties *properties;
- int interface_number, alternate = 0;
- int product;
- int size = 0;
- int ep_read = 0, ep_write = 0;
- int ret;
-
- if (interface == NULL)
- return -ENODEV;
- usbdev = interface_to_usbdev(interface);
- if (usbdev == NULL)
- return -ENODEV;
-
- /* we don't handle multiple configurations */
- if (usbdev->descriptor.bNumConfigurations != 1) {
- ret = -ENODEV;
- goto err_put;
- }
-
- /* check vendor and product id */
- for (devtype = ARRAY_SIZE(line6_id_table) - 1; devtype--;) {
- u16 idVendor = le16_to_cpu(usbdev->descriptor.idVendor);
- u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
-
- if (idVendor == line6_id_table[devtype].idVendor &&
- idProduct == line6_id_table[devtype].idProduct)
- break;
- }
-
- if (devtype < 0) {
- ret = -ENODEV;
- goto err_put;
- }
-
- /* initialize device info: */
- properties = &line6_properties_table[devtype];
- dev_info(&interface->dev, "Line6 %s found\n", properties->name);
- product = le16_to_cpu(usbdev->descriptor.idProduct);
-
- /* query interface number */
- interface_number = interface->cur_altsetting->desc.bInterfaceNumber;
-
- switch (product) {
- case LINE6_DEVID_BASSPODXTLIVE:
- case LINE6_DEVID_PODXTLIVE:
- case LINE6_DEVID_VARIAX:
- alternate = 1;
- break;
-
- case LINE6_DEVID_POCKETPOD:
- switch (interface_number) {
- case 0:
- return -ENODEV; /* this interface has no endpoints */
- case 1:
- alternate = 0;
- break;
- default:
- MISSING_CASE;
- }
- break;
-
- case LINE6_DEVID_PODHD500:
- case LINE6_DEVID_PODX3:
- case LINE6_DEVID_PODX3LIVE:
- switch (interface_number) {
- case 0:
- alternate = 1;
- break;
- case 1:
- alternate = 0;
- break;
- default:
- MISSING_CASE;
- }
- break;
-
- case LINE6_DEVID_BASSPODXT:
- case LINE6_DEVID_BASSPODXTPRO:
- case LINE6_DEVID_PODXT:
- case LINE6_DEVID_PODXTPRO:
- case LINE6_DEVID_PODHD300:
- case LINE6_DEVID_PODHD400:
- alternate = 5;
- break;
-
- case LINE6_DEVID_GUITARPORT:
- case LINE6_DEVID_PODSTUDIO_GX:
- case LINE6_DEVID_PODSTUDIO_UX1:
- case LINE6_DEVID_TONEPORT_GX:
- case LINE6_DEVID_TONEPORT_UX1:
- alternate = 2; /* 1..4 seem to be ok */
- break;
-
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_PODSTUDIO_UX2:
- switch (interface_number) {
- case 0:
- /* defaults to 44.1kHz, 16-bit */
- alternate = 2;
- break;
- case 1:
- /* don't know yet what this is ...
- alternate = 1;
- break;
- */
- return -ENODEV;
- default:
- MISSING_CASE;
- }
- break;
-
- default:
- MISSING_CASE;
- ret = -ENODEV;
- goto err_put;
- }
-
- ret = usb_set_interface(usbdev, interface_number, alternate);
- if (ret < 0) {
- dev_err(&interface->dev, "set_interface failed\n");
- goto err_put;
- }
-
- /* initialize device data based on product id: */
- switch (product) {
- case LINE6_DEVID_BASSPODXT:
- case LINE6_DEVID_BASSPODXTLIVE:
- case LINE6_DEVID_BASSPODXTPRO:
- case LINE6_DEVID_PODXT:
- case LINE6_DEVID_PODXTPRO:
- size = sizeof(struct usb_line6_pod);
- ep_read = 0x84;
- ep_write = 0x03;
- break;
-
- case LINE6_DEVID_PODHD300:
- case LINE6_DEVID_PODHD400:
- size = sizeof(struct usb_line6_podhd);
- ep_read = 0x84;
- ep_write = 0x03;
- break;
-
- case LINE6_DEVID_PODHD500:
- size = sizeof(struct usb_line6_podhd);
- ep_read = 0x81;
- ep_write = 0x01;
- break;
-
- case LINE6_DEVID_POCKETPOD:
- size = sizeof(struct usb_line6_pod);
- ep_read = 0x82;
- ep_write = 0x02;
- break;
-
- case LINE6_DEVID_PODX3:
- case LINE6_DEVID_PODX3LIVE:
- /* currently unused! */
- size = sizeof(struct usb_line6_pod);
- ep_read = 0x81;
- ep_write = 0x01;
- break;
-
- case LINE6_DEVID_PODSTUDIO_GX:
- case LINE6_DEVID_PODSTUDIO_UX1:
- case LINE6_DEVID_PODSTUDIO_UX2:
- case LINE6_DEVID_TONEPORT_GX:
- case LINE6_DEVID_TONEPORT_UX1:
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_GUITARPORT:
- size = sizeof(struct usb_line6_toneport);
- /* these don't have a control channel */
- break;
-
- case LINE6_DEVID_PODXTLIVE:
- switch (interface_number) {
- case PODXTLIVE_INTERFACE_POD:
- size = sizeof(struct usb_line6_pod);
- ep_read = 0x84;
- ep_write = 0x03;
- break;
-
- case PODXTLIVE_INTERFACE_VARIAX:
- size = sizeof(struct usb_line6_variax);
- ep_read = 0x86;
- ep_write = 0x05;
- break;
-
- default:
- ret = -ENODEV;
- goto err_put;
- }
- break;
-
- case LINE6_DEVID_VARIAX:
- size = sizeof(struct usb_line6_variax);
- ep_read = 0x82;
- ep_write = 0x01;
- break;
-
- default:
- MISSING_CASE;
- ret = -ENODEV;
- goto err_put;
- }
-
- if (size == 0) {
- dev_err(&interface->dev,
- "driver bug: interface data size not set\n");
- ret = -ENODEV;
- goto err_put;
- }
-
- line6 = kzalloc(size, GFP_KERNEL);
- if (line6 == NULL) {
- ret = -ENODEV;
- goto err_put;
- }
-
- /* store basic data: */
- line6->interface_number = interface_number;
- line6->properties = properties;
- line6->usbdev = usbdev;
- line6->ifcdev = &interface->dev;
- line6->ep_control_read = ep_read;
- line6->ep_control_write = ep_write;
- line6->product = product;
-
- /* get data from endpoint descriptor (see usb_maxpacket): */
- {
- struct usb_host_endpoint *ep;
- unsigned epnum =
- usb_pipeendpoint(usb_rcvintpipe(usbdev, ep_read));
- ep = usbdev->ep_in[epnum];
-
- if (ep != NULL) {
- line6->interval = ep->desc.bInterval;
- line6->max_packet_size =
- le16_to_cpu(ep->desc.wMaxPacketSize);
- } else {
- line6->interval = LINE6_FALLBACK_INTERVAL;
- line6->max_packet_size = LINE6_FALLBACK_MAXPACKETSIZE;
- dev_err(line6->ifcdev,
- "endpoint not available, using fallback values");
- }
- }
-
- usb_set_intfdata(interface, line6);
-
- if (properties->capabilities & LINE6_BIT_CONTROL) {
- /* initialize USB buffers: */
- line6->buffer_listen =
- kmalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
- if (line6->buffer_listen == NULL) {
- ret = -ENOMEM;
- goto err_destruct;
- }
-
- line6->buffer_message =
- kmalloc(LINE6_MESSAGE_MAXLEN, GFP_KERNEL);
- if (line6->buffer_message == NULL) {
- ret = -ENOMEM;
- goto err_destruct;
- }
-
- line6->urb_listen = usb_alloc_urb(0, GFP_KERNEL);
-
- if (line6->urb_listen == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
- line6_destruct(interface);
- ret = -ENOMEM;
- goto err_destruct;
- }
-
- ret = line6_start_listen(line6);
- if (ret < 0) {
- dev_err(&interface->dev, "%s: usb_submit_urb failed\n",
- __func__);
- goto err_destruct;
- }
- }
-
- /* initialize device data based on product id: */
- switch (product) {
- case LINE6_DEVID_BASSPODXT:
- case LINE6_DEVID_BASSPODXTLIVE:
- case LINE6_DEVID_BASSPODXTPRO:
- case LINE6_DEVID_POCKETPOD:
- case LINE6_DEVID_PODX3:
- case LINE6_DEVID_PODX3LIVE:
- case LINE6_DEVID_PODXT:
- case LINE6_DEVID_PODXTPRO:
- ret = line6_pod_init(interface, (struct usb_line6_pod *)line6);
- break;
-
- case LINE6_DEVID_PODHD300:
- case LINE6_DEVID_PODHD400:
- case LINE6_DEVID_PODHD500:
- ret = line6_podhd_init(interface,
- (struct usb_line6_podhd *)line6);
- break;
-
- case LINE6_DEVID_PODXTLIVE:
- switch (interface_number) {
- case PODXTLIVE_INTERFACE_POD:
- ret =
- line6_pod_init(interface,
- (struct usb_line6_pod *)line6);
- break;
-
- case PODXTLIVE_INTERFACE_VARIAX:
- ret =
- line6_variax_init(interface,
- (struct usb_line6_variax *)line6);
- break;
-
- default:
- dev_err(&interface->dev,
- "PODxt Live interface %d not supported\n",
- interface_number);
- ret = -ENODEV;
- }
-
- break;
-
- case LINE6_DEVID_VARIAX:
- ret =
- line6_variax_init(interface,
- (struct usb_line6_variax *)line6);
- break;
-
- case LINE6_DEVID_PODSTUDIO_GX:
- case LINE6_DEVID_PODSTUDIO_UX1:
- case LINE6_DEVID_PODSTUDIO_UX2:
- case LINE6_DEVID_TONEPORT_GX:
- case LINE6_DEVID_TONEPORT_UX1:
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_GUITARPORT:
- ret =
- line6_toneport_init(interface,
- (struct usb_line6_toneport *)line6);
- break;
-
- default:
- MISSING_CASE;
- ret = -ENODEV;
- }
-
- if (ret < 0)
- goto err_destruct;
-
- ret = sysfs_create_link(&interface->dev.kobj, &usbdev->dev.kobj,
- "usb_device");
- if (ret < 0)
- goto err_destruct;
-
- /* creation of additional special files should go here */
-
- dev_info(&interface->dev, "Line6 %s now attached\n",
- line6->properties->name);
-
- switch (product) {
- case LINE6_DEVID_PODX3:
- case LINE6_DEVID_PODX3LIVE:
- dev_info(&interface->dev,
- "NOTE: the Line6 %s is detected, but not yet supported\n",
- line6->properties->name);
- }
-
- /* increment reference counters: */
- usb_get_intf(interface);
- usb_get_dev(usbdev);
-
- return 0;
-
-err_destruct:
- line6_destruct(interface);
-err_put:
- return ret;
-}
-
-/*
- Line6 device disconnected.
-*/
-static void line6_disconnect(struct usb_interface *interface)
-{
- struct usb_line6 *line6;
- struct usb_device *usbdev;
- int interface_number;
-
- if (interface == NULL)
- return;
- usbdev = interface_to_usbdev(interface);
- if (usbdev == NULL)
- return;
-
- /* removal of additional special files should go here */
-
- sysfs_remove_link(&interface->dev.kobj, "usb_device");
-
- interface_number = interface->cur_altsetting->desc.bInterfaceNumber;
- line6 = usb_get_intfdata(interface);
-
- if (line6 != NULL) {
- if (line6->urb_listen != NULL)
- line6_stop_listen(line6);
-
- if (usbdev != line6->usbdev)
- dev_err(line6->ifcdev,
- "driver bug: inconsistent usb device\n");
-
- switch (le16_to_cpu(line6->usbdev->descriptor.idProduct)) {
- case LINE6_DEVID_BASSPODXT:
- case LINE6_DEVID_BASSPODXTLIVE:
- case LINE6_DEVID_BASSPODXTPRO:
- case LINE6_DEVID_POCKETPOD:
- case LINE6_DEVID_PODX3:
- case LINE6_DEVID_PODX3LIVE:
- case LINE6_DEVID_PODXT:
- case LINE6_DEVID_PODXTPRO:
- line6_pod_disconnect(interface);
- break;
-
- case LINE6_DEVID_PODHD300:
- case LINE6_DEVID_PODHD400:
- case LINE6_DEVID_PODHD500:
- line6_podhd_disconnect(interface);
- break;
-
- case LINE6_DEVID_PODXTLIVE:
- switch (interface_number) {
- case PODXTLIVE_INTERFACE_POD:
- line6_pod_disconnect(interface);
- break;
-
- case PODXTLIVE_INTERFACE_VARIAX:
- line6_variax_disconnect(interface);
- break;
- }
-
- break;
-
- case LINE6_DEVID_VARIAX:
- line6_variax_disconnect(interface);
- break;
-
- case LINE6_DEVID_PODSTUDIO_GX:
- case LINE6_DEVID_PODSTUDIO_UX1:
- case LINE6_DEVID_PODSTUDIO_UX2:
- case LINE6_DEVID_TONEPORT_GX:
- case LINE6_DEVID_TONEPORT_UX1:
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_GUITARPORT:
- line6_toneport_disconnect(interface);
- break;
-
- default:
- MISSING_CASE;
- }
-
- dev_info(&interface->dev, "Line6 %s now disconnected\n",
- line6->properties->name);
- }
-
- line6_destruct(interface);
-
- /* decrement reference counters: */
- usb_put_intf(interface);
- usb_put_dev(usbdev);
-}
-
-#ifdef CONFIG_PM
-
-/*
- Suspend Line6 device.
-*/
-static int line6_suspend(struct usb_interface *interface, pm_message_t message)
-{
- struct usb_line6 *line6 = usb_get_intfdata(interface);
- struct snd_line6_pcm *line6pcm = line6->line6pcm;
-
- snd_power_change_state(line6->card, SNDRV_CTL_POWER_D3hot);
-
- if (line6->properties->capabilities & LINE6_BIT_CONTROL)
- line6_stop_listen(line6);
-
- if (line6pcm != NULL) {
- snd_pcm_suspend_all(line6pcm->pcm);
- line6_pcm_disconnect(line6pcm);
- line6pcm->flags = 0;
- }
-
- return 0;
-}
-
-/*
- Resume Line6 device.
-*/
-static int line6_resume(struct usb_interface *interface)
-{
- struct usb_line6 *line6 = usb_get_intfdata(interface);
-
- if (line6->properties->capabilities & LINE6_BIT_CONTROL)
- line6_start_listen(line6);
-
- snd_power_change_state(line6->card, SNDRV_CTL_POWER_D0);
- return 0;
-}
-
-/*
- Resume Line6 device after reset.
-*/
-static int line6_reset_resume(struct usb_interface *interface)
-{
- struct usb_line6 *line6 = usb_get_intfdata(interface);
-
- switch (le16_to_cpu(line6->usbdev->descriptor.idProduct)) {
- case LINE6_DEVID_PODSTUDIO_GX:
- case LINE6_DEVID_PODSTUDIO_UX1:
- case LINE6_DEVID_PODSTUDIO_UX2:
- case LINE6_DEVID_TONEPORT_GX:
- case LINE6_DEVID_TONEPORT_UX1:
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_GUITARPORT:
- line6_toneport_reset_resume((struct usb_line6_toneport *)line6);
- }
-
- return line6_resume(interface);
-}
-
-#endif /* CONFIG_PM */
-
-static struct usb_driver line6_driver = {
- .name = DRIVER_NAME,
- .probe = line6_probe,
- .disconnect = line6_disconnect,
-#ifdef CONFIG_PM
- .suspend = line6_suspend,
- .resume = line6_resume,
- .reset_resume = line6_reset_resume,
-#endif
- .id_table = line6_id_table,
-};
-
-module_usb_driver(line6_driver);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
deleted file mode 100644
index a3136b189ee5..000000000000
--- a/drivers/staging/line6/pcm.c
+++ /dev/null
@@ -1,576 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#include <linux/slab.h>
-#include <sound/core.h>
-#include <sound/control.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
-#include "audio.h"
-#include "capture.h"
-#include "driver.h"
-#include "playback.h"
-#include "pod.h"
-
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
-
-static struct snd_line6_pcm *dev2pcm(struct device *dev)
-{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6 *line6 = usb_get_intfdata(interface);
- struct snd_line6_pcm *line6pcm = line6->line6pcm;
- return line6pcm;
-}
-
-/*
- "read" request on "impulse_volume" special file.
-*/
-static ssize_t impulse_volume_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", dev2pcm(dev)->impulse_volume);
-}
-
-/*
- "write" request on "impulse_volume" special file.
-*/
-static ssize_t impulse_volume_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct snd_line6_pcm *line6pcm = dev2pcm(dev);
- int value;
- int ret;
-
- ret = kstrtoint(buf, 10, &value);
- if (ret < 0)
- return ret;
-
- line6pcm->impulse_volume = value;
-
- if (value > 0)
- line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_IMPULSE);
- else
- line6_pcm_release(line6pcm, LINE6_BITS_PCM_IMPULSE);
-
- return count;
-}
-static DEVICE_ATTR_RW(impulse_volume);
-
-/*
- "read" request on "impulse_period" special file.
-*/
-static ssize_t impulse_period_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", dev2pcm(dev)->impulse_period);
-}
-
-/*
- "write" request on "impulse_period" special file.
-*/
-static ssize_t impulse_period_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int value;
- int ret;
-
- ret = kstrtoint(buf, 10, &value);
- if (ret < 0)
- return ret;
-
- dev2pcm(dev)->impulse_period = value;
- return count;
-}
-static DEVICE_ATTR_RW(impulse_period);
-
-#endif
-
-static bool test_flags(unsigned long flags0, unsigned long flags1,
- unsigned long mask)
-{
- return ((flags0 & mask) == 0) && ((flags1 & mask) != 0);
-}
-
-int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels)
-{
- unsigned long flags_old, flags_new, flags_final;
- int err;
-
- do {
- flags_old = ACCESS_ONCE(line6pcm->flags);
- flags_new = flags_old | channels;
- } while (cmpxchg(&line6pcm->flags, flags_old, flags_new) != flags_old);
-
- flags_final = flags_old;
-
- line6pcm->prev_fbuf = NULL;
-
- if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_BUFFER)) {
- /* Invoked multiple times in a row so allocate once only */
- if (!line6pcm->buffer_in) {
- line6pcm->buffer_in =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
- if (!line6pcm->buffer_in) {
- err = -ENOMEM;
- goto pcm_acquire_error;
- }
-
- flags_final |= channels & LINE6_BITS_CAPTURE_BUFFER;
- }
- }
-
- if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_STREAM)) {
- /*
- Waiting for completion of active URBs in the stop handler is
- a bug, we therefore report an error if capturing is restarted
- too soon.
- */
- if (line6pcm->active_urb_in | line6pcm->unlink_urb_in) {
- dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
- return -EBUSY;
- }
-
- line6pcm->count_in = 0;
- line6pcm->prev_fsize = 0;
- err = line6_submit_audio_in_all_urbs(line6pcm);
-
- if (err < 0)
- goto pcm_acquire_error;
-
- flags_final |= channels & LINE6_BITS_CAPTURE_STREAM;
- }
-
- if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_BUFFER)) {
- /* Invoked multiple times in a row so allocate once only */
- if (!line6pcm->buffer_out) {
- line6pcm->buffer_out =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
- if (!line6pcm->buffer_out) {
- err = -ENOMEM;
- goto pcm_acquire_error;
- }
-
- flags_final |= channels & LINE6_BITS_PLAYBACK_BUFFER;
- }
- }
-
- if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_STREAM)) {
- /*
- See comment above regarding PCM restart.
- */
- if (line6pcm->active_urb_out | line6pcm->unlink_urb_out) {
- dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
- return -EBUSY;
- }
-
- line6pcm->count_out = 0;
- err = line6_submit_audio_out_all_urbs(line6pcm);
-
- if (err < 0)
- goto pcm_acquire_error;
-
- flags_final |= channels & LINE6_BITS_PLAYBACK_STREAM;
- }
-
- return 0;
-
-pcm_acquire_error:
- /*
- If not all requested resources/streams could be obtained, release
- those which were successfully obtained (if any).
- */
- line6_pcm_release(line6pcm, flags_final & channels);
- return err;
-}
-
-int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels)
-{
- unsigned long flags_old, flags_new;
-
- do {
- flags_old = ACCESS_ONCE(line6pcm->flags);
- flags_new = flags_old & ~channels;
- } while (cmpxchg(&line6pcm->flags, flags_old, flags_new) != flags_old);
-
- if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_STREAM))
- line6_unlink_audio_in_urbs(line6pcm);
-
- if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_BUFFER)) {
- line6_wait_clear_audio_in_urbs(line6pcm);
- line6_free_capture_buffer(line6pcm);
- }
-
- if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_STREAM))
- line6_unlink_audio_out_urbs(line6pcm);
-
- if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_BUFFER)) {
- line6_wait_clear_audio_out_urbs(line6pcm);
- line6_free_playback_buffer(line6pcm);
- }
-
- return 0;
-}
-
-/* trigger callback */
-int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
- struct snd_pcm_substream *s;
- int err;
- unsigned long flags;
-
- spin_lock_irqsave(&line6pcm->lock_trigger, flags);
- clear_bit(LINE6_INDEX_PREPARED, &line6pcm->flags);
-
- snd_pcm_group_for_each_entry(s, substream) {
- switch (s->stream) {
- case SNDRV_PCM_STREAM_PLAYBACK:
- err = snd_line6_playback_trigger(line6pcm, cmd);
-
- if (err < 0) {
- spin_unlock_irqrestore(&line6pcm->lock_trigger,
- flags);
- return err;
- }
-
- break;
-
- case SNDRV_PCM_STREAM_CAPTURE:
- err = snd_line6_capture_trigger(line6pcm, cmd);
-
- if (err < 0) {
- spin_unlock_irqrestore(&line6pcm->lock_trigger,
- flags);
- return err;
- }
-
- break;
-
- default:
- dev_err(line6pcm->line6->ifcdev,
- "Unknown stream direction %d\n", s->stream);
- }
- }
-
- spin_unlock_irqrestore(&line6pcm->lock_trigger, flags);
- return 0;
-}
-
-/* control info callback */
-static int snd_line6_control_playback_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 2;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = 256;
- return 0;
-}
-
-/* control get callback */
-static int snd_line6_control_playback_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int i;
- struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
-
- for (i = 2; i--;)
- ucontrol->value.integer.value[i] = line6pcm->volume_playback[i];
-
- return 0;
-}
-
-/* control put callback */
-static int snd_line6_control_playback_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int i, changed = 0;
- struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
-
- for (i = 2; i--;)
- if (line6pcm->volume_playback[i] !=
- ucontrol->value.integer.value[i]) {
- line6pcm->volume_playback[i] =
- ucontrol->value.integer.value[i];
- changed = 1;
- }
-
- return changed;
-}
-
-/* control definition */
-static struct snd_kcontrol_new line6_control_playback = {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Volume",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_line6_control_playback_info,
- .get = snd_line6_control_playback_get,
- .put = snd_line6_control_playback_put
-};
-
-/*
- Cleanup the PCM device.
-*/
-static void line6_cleanup_pcm(struct snd_pcm *pcm)
-{
- int i;
- struct snd_line6_pcm *line6pcm = snd_pcm_chip(pcm);
-
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- device_remove_file(line6pcm->line6->ifcdev, &dev_attr_impulse_volume);
- device_remove_file(line6pcm->line6->ifcdev, &dev_attr_impulse_period);
-#endif
-
- for (i = LINE6_ISO_BUFFERS; i--;) {
- if (line6pcm->urb_audio_out[i]) {
- usb_kill_urb(line6pcm->urb_audio_out[i]);
- usb_free_urb(line6pcm->urb_audio_out[i]);
- }
- if (line6pcm->urb_audio_in[i]) {
- usb_kill_urb(line6pcm->urb_audio_in[i]);
- usb_free_urb(line6pcm->urb_audio_in[i]);
- }
- }
-}
-
-/* create a PCM device */
-static int snd_line6_new_pcm(struct snd_line6_pcm *line6pcm)
-{
- struct snd_pcm *pcm;
- int err;
-
- err = snd_pcm_new(line6pcm->line6->card,
- (char *)line6pcm->line6->properties->name,
- 0, 1, 1, &pcm);
- if (err < 0)
- return err;
-
- pcm->private_data = line6pcm;
- pcm->private_free = line6_cleanup_pcm;
- line6pcm->pcm = pcm;
- strcpy(pcm->name, line6pcm->line6->properties->name);
-
- /* set operators */
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &snd_line6_playback_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_line6_capture_ops);
-
- /* pre-allocation of buffers */
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data
- (GFP_KERNEL), 64 * 1024,
- 128 * 1024);
-
- return 0;
-}
-
-/* PCM device destructor */
-static int snd_line6_pcm_free(struct snd_device *device)
-{
- return 0;
-}
-
-/*
- Stop substream if still running.
-*/
-static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
-{
- if (substream->runtime && snd_pcm_running(substream)) {
- snd_pcm_stream_lock_irq(substream);
- snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
- snd_pcm_stream_unlock_irq(substream);
- }
-}
-
-/*
- Stop PCM stream.
-*/
-void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm)
-{
- pcm_disconnect_substream(get_substream
- (line6pcm, SNDRV_PCM_STREAM_CAPTURE));
- pcm_disconnect_substream(get_substream
- (line6pcm, SNDRV_PCM_STREAM_PLAYBACK));
- line6_unlink_wait_clear_audio_out_urbs(line6pcm);
- line6_unlink_wait_clear_audio_in_urbs(line6pcm);
-}
-
-/*
- Create and register the PCM device and mixer entries.
- Create URBs for playback and capture.
-*/
-int line6_init_pcm(struct usb_line6 *line6,
- struct line6_pcm_properties *properties)
-{
- static struct snd_device_ops pcm_ops = {
- .dev_free = snd_line6_pcm_free,
- };
-
- int err;
- int ep_read = 0, ep_write = 0;
- struct snd_line6_pcm *line6pcm;
-
- if (!(line6->properties->capabilities & LINE6_BIT_PCM))
- return 0; /* skip PCM initialization and report success */
-
- /* initialize PCM subsystem based on product id: */
- switch (line6->product) {
- case LINE6_DEVID_BASSPODXT:
- case LINE6_DEVID_BASSPODXTLIVE:
- case LINE6_DEVID_BASSPODXTPRO:
- case LINE6_DEVID_PODXT:
- case LINE6_DEVID_PODXTLIVE:
- case LINE6_DEVID_PODXTPRO:
- case LINE6_DEVID_PODHD300:
- case LINE6_DEVID_PODHD400:
- ep_read = 0x82;
- ep_write = 0x01;
- break;
-
- case LINE6_DEVID_PODHD500:
- case LINE6_DEVID_PODX3:
- case LINE6_DEVID_PODX3LIVE:
- ep_read = 0x86;
- ep_write = 0x02;
- break;
-
- case LINE6_DEVID_POCKETPOD:
- ep_read = 0x82;
- ep_write = 0x02;
- break;
-
- case LINE6_DEVID_GUITARPORT:
- case LINE6_DEVID_PODSTUDIO_GX:
- case LINE6_DEVID_PODSTUDIO_UX1:
- case LINE6_DEVID_PODSTUDIO_UX2:
- case LINE6_DEVID_TONEPORT_GX:
- case LINE6_DEVID_TONEPORT_UX1:
- case LINE6_DEVID_TONEPORT_UX2:
- ep_read = 0x82;
- ep_write = 0x01;
- break;
-
- /* this is for interface_number == 1:
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_PODSTUDIO_UX2:
- ep_read = 0x87;
- ep_write = 0x00;
- break; */
-
- default:
- MISSING_CASE;
- }
-
- line6pcm = kzalloc(sizeof(*line6pcm), GFP_KERNEL);
-
- if (line6pcm == NULL)
- return -ENOMEM;
-
- line6pcm->volume_playback[0] = line6pcm->volume_playback[1] = 255;
- line6pcm->volume_monitor = 255;
- line6pcm->line6 = line6;
- line6pcm->ep_audio_read = ep_read;
- line6pcm->ep_audio_write = ep_write;
-
- /* Read and write buffers are sized identically, so choose minimum */
- line6pcm->max_packet_size = min(
- usb_maxpacket(line6->usbdev,
- usb_rcvisocpipe(line6->usbdev, ep_read), 0),
- usb_maxpacket(line6->usbdev,
- usb_sndisocpipe(line6->usbdev, ep_write), 1));
-
- line6pcm->properties = properties;
- line6->line6pcm = line6pcm;
-
- /* PCM device: */
- err = snd_device_new(line6->card, SNDRV_DEV_PCM, line6, &pcm_ops);
- if (err < 0)
- return err;
-
- err = snd_line6_new_pcm(line6pcm);
- if (err < 0)
- return err;
-
- spin_lock_init(&line6pcm->lock_audio_out);
- spin_lock_init(&line6pcm->lock_audio_in);
- spin_lock_init(&line6pcm->lock_trigger);
-
- err = line6_create_audio_out_urbs(line6pcm);
- if (err < 0)
- return err;
-
- err = line6_create_audio_in_urbs(line6pcm);
- if (err < 0)
- return err;
-
- /* mixer: */
- err =
- snd_ctl_add(line6->card,
- snd_ctl_new1(&line6_control_playback, line6pcm));
- if (err < 0)
- return err;
-
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- /* impulse response test: */
- err = device_create_file(line6->ifcdev, &dev_attr_impulse_volume);
- if (err < 0)
- return err;
-
- err = device_create_file(line6->ifcdev, &dev_attr_impulse_period);
- if (err < 0)
- return err;
-
- line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
-#endif
-
- return 0;
-}
-
-/* prepare pcm callback */
-int snd_line6_prepare(struct snd_pcm_substream *substream)
-{
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- switch (substream->stream) {
- case SNDRV_PCM_STREAM_PLAYBACK:
- if ((line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM) == 0)
- line6_unlink_wait_clear_audio_out_urbs(line6pcm);
-
- break;
-
- case SNDRV_PCM_STREAM_CAPTURE:
- if ((line6pcm->flags & LINE6_BITS_CAPTURE_STREAM) == 0)
- line6_unlink_wait_clear_audio_in_urbs(line6pcm);
-
- break;
-
- default:
- MISSING_CASE;
- }
-
- if (!test_and_set_bit(LINE6_INDEX_PREPARED, &line6pcm->flags)) {
- line6pcm->count_out = 0;
- line6pcm->pos_out = 0;
- line6pcm->pos_out_done = 0;
- line6pcm->bytes_out = 0;
- line6pcm->count_in = 0;
- line6pcm->pos_in_done = 0;
- line6pcm->bytes_in = 0;
- }
-
- return 0;
-}
diff --git a/drivers/staging/line6/pcm.h b/drivers/staging/line6/pcm.h
deleted file mode 100644
index 6aa0d46a2890..000000000000
--- a/drivers/staging/line6/pcm.h
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-/*
- PCM interface to POD series devices.
-*/
-
-#ifndef PCM_H
-#define PCM_H
-
-#include <sound/pcm.h>
-
-#include "driver.h"
-#include "usbdefs.h"
-
-/* number of URBs */
-#define LINE6_ISO_BUFFERS 2
-
-/*
- number of USB frames per URB
- The Line6 Windows driver always transmits two frames per packet, but
- the Linux driver performs significantly better (i.e., lower latency)
- with only one frame per packet.
-*/
-#define LINE6_ISO_PACKETS 1
-
-/* in a "full speed" device (such as the PODxt Pro) this means 1ms */
-#define LINE6_ISO_INTERVAL 1
-
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
-#define LINE6_IMPULSE_DEFAULT_PERIOD 100
-#endif
-
-/*
- Get substream from Line6 PCM data structure
-*/
-#define get_substream(line6pcm, stream) \
- (line6pcm->pcm->streams[stream].substream)
-
-/*
- PCM mode bits.
-
- There are several features of the Line6 USB driver which require PCM
- data to be exchanged with the device:
- *) PCM playback and capture via ALSA
- *) software monitoring (for devices without hardware monitoring)
- *) optional impulse response measurement
- However, from the device's point of view, there is just a single
- capture and playback stream, which must be shared between these
- subsystems. It is therefore necessary to maintain the state of the
- subsystems with respect to PCM usage. We define several constants of
- the form LINE6_BIT_PCM_<subsystem>_<direction>_<resource> with the
- following meanings:
- *) <subsystem> is one of
- -) ALSA: PCM playback and capture via ALSA
- -) MONITOR: software monitoring
- -) IMPULSE: optional impulse response measurement
- *) <direction> is one of
- -) PLAYBACK: audio output (from host to device)
- -) CAPTURE: audio input (from device to host)
- *) <resource> is one of
- -) BUFFER: buffer required by PCM data stream
- -) STREAM: actual PCM data stream
-
- The subsystems call line6_pcm_acquire() to acquire the (shared)
- resources needed for a particular operation (e.g., allocate the buffer
- for ALSA playback or start the capture stream for software monitoring).
- When a resource is no longer needed, it is released by calling
- line6_pcm_release(). Buffer allocation and stream startup are handled
- separately to allow the ALSA kernel driver to perform them at
- appropriate places (since the callback which starts a PCM stream is not
- allowed to sleep).
-*/
-enum {
- /* individual bit indices: */
- LINE6_INDEX_PCM_ALSA_PLAYBACK_BUFFER,
- LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM,
- LINE6_INDEX_PCM_ALSA_CAPTURE_BUFFER,
- LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM,
- LINE6_INDEX_PCM_MONITOR_PLAYBACK_BUFFER,
- LINE6_INDEX_PCM_MONITOR_PLAYBACK_STREAM,
- LINE6_INDEX_PCM_MONITOR_CAPTURE_BUFFER,
- LINE6_INDEX_PCM_MONITOR_CAPTURE_STREAM,
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- LINE6_INDEX_PCM_IMPULSE_PLAYBACK_BUFFER,
- LINE6_INDEX_PCM_IMPULSE_PLAYBACK_STREAM,
- LINE6_INDEX_PCM_IMPULSE_CAPTURE_BUFFER,
- LINE6_INDEX_PCM_IMPULSE_CAPTURE_STREAM,
-#endif
- LINE6_INDEX_PAUSE_PLAYBACK,
- LINE6_INDEX_PREPARED,
-
- /* individual bit masks: */
- LINE6_BIT(PCM_ALSA_PLAYBACK_BUFFER),
- LINE6_BIT(PCM_ALSA_PLAYBACK_STREAM),
- LINE6_BIT(PCM_ALSA_CAPTURE_BUFFER),
- LINE6_BIT(PCM_ALSA_CAPTURE_STREAM),
- LINE6_BIT(PCM_MONITOR_PLAYBACK_BUFFER),
- LINE6_BIT(PCM_MONITOR_PLAYBACK_STREAM),
- LINE6_BIT(PCM_MONITOR_CAPTURE_BUFFER),
- LINE6_BIT(PCM_MONITOR_CAPTURE_STREAM),
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- LINE6_BIT(PCM_IMPULSE_PLAYBACK_BUFFER),
- LINE6_BIT(PCM_IMPULSE_PLAYBACK_STREAM),
- LINE6_BIT(PCM_IMPULSE_CAPTURE_BUFFER),
- LINE6_BIT(PCM_IMPULSE_CAPTURE_STREAM),
-#endif
- LINE6_BIT(PAUSE_PLAYBACK),
- LINE6_BIT(PREPARED),
-
- /* combined bit masks (by operation): */
- LINE6_BITS_PCM_ALSA_BUFFER =
- LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
- LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER,
-
- LINE6_BITS_PCM_ALSA_STREAM =
- LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
- LINE6_BIT_PCM_ALSA_CAPTURE_STREAM,
-
- LINE6_BITS_PCM_MONITOR =
- LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER |
- LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM |
- LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER |
- LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
-
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- LINE6_BITS_PCM_IMPULSE =
- LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
- LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
- LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
- LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM,
-#endif
-
- /* combined bit masks (by direction): */
- LINE6_BITS_PLAYBACK_BUFFER =
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
-#endif
- LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
- LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER ,
-
- LINE6_BITS_PLAYBACK_STREAM =
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
-#endif
- LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
- LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM ,
-
- LINE6_BITS_CAPTURE_BUFFER =
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
-#endif
- LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER |
- LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER ,
-
- LINE6_BITS_CAPTURE_STREAM =
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM |
-#endif
- LINE6_BIT_PCM_ALSA_CAPTURE_STREAM |
- LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
-
- LINE6_BITS_STREAM =
- LINE6_BITS_PLAYBACK_STREAM |
- LINE6_BITS_CAPTURE_STREAM
-};
-
-struct line6_pcm_properties {
- struct snd_pcm_hardware snd_line6_playback_hw, snd_line6_capture_hw;
- struct snd_pcm_hw_constraint_ratdens snd_line6_rates;
- int bytes_per_frame;
-};
-
-struct snd_line6_pcm {
- /**
- Pointer back to the Line6 driver data structure.
- */
- struct usb_line6 *line6;
-
- /**
- Properties.
- */
- struct line6_pcm_properties *properties;
-
- /**
- ALSA pcm stream
- */
- struct snd_pcm *pcm;
-
- /**
- URBs for audio playback.
- */
- struct urb *urb_audio_out[LINE6_ISO_BUFFERS];
-
- /**
- URBs for audio capture.
- */
- struct urb *urb_audio_in[LINE6_ISO_BUFFERS];
-
- /**
- Temporary buffer for playback.
- Since the packet size is not known in advance, this buffer is
- large enough to store maximum size packets.
- */
- unsigned char *buffer_out;
-
- /**
- Temporary buffer for capture.
- Since the packet size is not known in advance, this buffer is
- large enough to store maximum size packets.
- */
- unsigned char *buffer_in;
-
- /**
- Previously captured frame (for software monitoring).
- */
- unsigned char *prev_fbuf;
-
- /**
- Size of previously captured frame (for software monitoring).
- */
- int prev_fsize;
-
- /**
- Free frame position in the playback buffer.
- */
- snd_pcm_uframes_t pos_out;
-
- /**
- Count processed bytes for playback.
- This is modulo period size (to determine when a period is
- finished).
- */
- unsigned bytes_out;
-
- /**
- Counter to create desired playback sample rate.
- */
- unsigned count_out;
-
- /**
- Playback period size in bytes
- */
- unsigned period_out;
-
- /**
- Processed frame position in the playback buffer.
- The contents of the output ring buffer have been consumed by
- the USB subsystem (i.e., sent to the USB device) up to this
- position.
- */
- snd_pcm_uframes_t pos_out_done;
-
- /**
- Count processed bytes for capture.
- This is modulo period size (to determine when a period is
- finished).
- */
- unsigned bytes_in;
-
- /**
- Counter to create desired capture sample rate.
- */
- unsigned count_in;
-
- /**
- Capture period size in bytes
- */
- unsigned period_in;
-
- /**
- Processed frame position in the capture buffer.
- The contents of the output ring buffer have been consumed by
- the USB subsystem (i.e., sent to the USB device) up to this
- position.
- */
- snd_pcm_uframes_t pos_in_done;
-
- /**
- Bit mask of active playback URBs.
- */
- unsigned long active_urb_out;
-
- /**
- Maximum size of USB packet.
- */
- int max_packet_size;
-
- /**
- USB endpoint for listening to audio data.
- */
- int ep_audio_read;
-
- /**
- USB endpoint for writing audio data.
- */
- int ep_audio_write;
-
- /**
- Bit mask of active capture URBs.
- */
- unsigned long active_urb_in;
-
- /**
- Bit mask of playback URBs currently being unlinked.
- */
- unsigned long unlink_urb_out;
-
- /**
- Bit mask of capture URBs currently being unlinked.
- */
- unsigned long unlink_urb_in;
-
- /**
- Spin lock to protect updates of the playback buffer positions (not
- contents!)
- */
- spinlock_t lock_audio_out;
-
- /**
- Spin lock to protect updates of the capture buffer positions (not
- contents!)
- */
- spinlock_t lock_audio_in;
-
- /**
- Spin lock to protect trigger.
- */
- spinlock_t lock_trigger;
-
- /**
- PCM playback volume (left and right).
- */
- int volume_playback[2];
-
- /**
- PCM monitor volume.
- */
- int volume_monitor;
-
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- /**
- Volume of impulse response test signal (if zero, test is disabled).
- */
- int impulse_volume;
-
- /**
- Period of impulse response test signal.
- */
- int impulse_period;
-
- /**
- Counter for impulse response test signal.
- */
- int impulse_count;
-#endif
-
- /**
- Several status bits (see LINE6_BIT_*).
- */
- unsigned long flags;
-
- int last_frame_in, last_frame_out;
-};
-
-extern int line6_init_pcm(struct usb_line6 *line6,
- struct line6_pcm_properties *properties);
-extern int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd);
-extern int snd_line6_prepare(struct snd_pcm_substream *substream);
-extern void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm);
-extern int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels);
-extern int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels);
-
-#endif
diff --git a/drivers/staging/line6/pod.h b/drivers/staging/line6/pod.h
deleted file mode 100644
index 3e3f1671337a..000000000000
--- a/drivers/staging/line6/pod.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#ifndef POD_H
-#define POD_H
-
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-
-#include <sound/core.h>
-
-#include "driver.h"
-
-/*
- PODxt Live interfaces
-*/
-#define PODXTLIVE_INTERFACE_POD 0
-#define PODXTLIVE_INTERFACE_VARIAX 1
-
-/*
- Locate name in binary program dump
-*/
-#define POD_NAME_OFFSET 0
-#define POD_NAME_LENGTH 16
-
-/*
- Other constants
-*/
-#define POD_CONTROL_SIZE 0x80
-#define POD_BUFSIZE_DUMPREQ 7
-#define POD_STARTUP_DELAY 1000
-
-/*
- Stages of POD startup procedure
-*/
-enum {
- POD_STARTUP_INIT = 1,
- POD_STARTUP_VERSIONREQ,
- POD_STARTUP_WORKQUEUE,
- POD_STARTUP_SETUP,
- POD_STARTUP_LAST = POD_STARTUP_SETUP - 1
-};
-
-struct usb_line6_pod {
- /**
- Generic Line6 USB data.
- */
- struct usb_line6 line6;
-
- /**
- Instrument monitor level.
- */
- int monitor_level;
-
- /**
- Timer for device initializaton.
- */
- struct timer_list startup_timer;
-
- /**
- Work handler for device initializaton.
- */
- struct work_struct startup_work;
-
- /**
- Current progress in startup procedure.
- */
- int startup_progress;
-
- /**
- Serial number of device.
- */
- int serial_number;
-
- /**
- Firmware version (x 100).
- */
- int firmware_version;
-
- /**
- Device ID.
- */
- int device_id;
-};
-
-extern void line6_pod_disconnect(struct usb_interface *interface);
-extern int line6_pod_init(struct usb_interface *interface,
- struct usb_line6_pod *pod);
-extern void line6_pod_process_message(struct usb_line6_pod *pod);
-extern void line6_pod_transmit_parameter(struct usb_line6_pod *pod, int param,
- u8 value);
-
-#endif
diff --git a/drivers/staging/line6/podhd.c b/drivers/staging/line6/podhd.c
deleted file mode 100644
index 7ef45437b4f2..000000000000
--- a/drivers/staging/line6/podhd.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Line6 Pod HD
- *
- * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-
-#include "audio.h"
-#include "driver.h"
-#include "pcm.h"
-#include "podhd.h"
-
-#define PODHD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
-
-static struct snd_ratden podhd_ratden = {
- .num_min = 48000,
- .num_max = 48000,
- .num_step = 1,
- .den = 1,
-};
-
-static struct line6_pcm_properties podhd_pcm_properties = {
- .snd_line6_playback_hw = {
- .info = (SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_PAUSE |
-#ifdef CONFIG_PM
- SNDRV_PCM_INFO_RESUME |
-#endif
- SNDRV_PCM_INFO_SYNC_START),
- .formats = SNDRV_PCM_FMTBIT_S24_3LE,
- .rates = SNDRV_PCM_RATE_48000,
- .rate_min = 48000,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 60000,
- .period_bytes_min = 64,
- .period_bytes_max = 8192,
- .periods_min = 1,
- .periods_max = 1024},
- .snd_line6_capture_hw = {
- .info = (SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP_VALID |
-#ifdef CONFIG_PM
- SNDRV_PCM_INFO_RESUME |
-#endif
- SNDRV_PCM_INFO_SYNC_START),
- .formats = SNDRV_PCM_FMTBIT_S24_3LE,
- .rates = SNDRV_PCM_RATE_48000,
- .rate_min = 48000,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 60000,
- .period_bytes_min = 64,
- .period_bytes_max = 8192,
- .periods_min = 1,
- .periods_max = 1024},
- .snd_line6_rates = {
- .nrats = 1,
- .rats = &podhd_ratden},
- .bytes_per_frame = PODHD_BYTES_PER_FRAME
-};
-
-/*
- POD HD destructor.
-*/
-static void podhd_destruct(struct usb_interface *interface)
-{
- struct usb_line6_podhd *podhd = usb_get_intfdata(interface);
-
- if (podhd == NULL)
- return;
- line6_cleanup_audio(&podhd->line6);
-}
-
-/*
- Try to init POD HD device.
-*/
-static int podhd_try_init(struct usb_interface *interface,
- struct usb_line6_podhd *podhd)
-{
- int err;
- struct usb_line6 *line6 = &podhd->line6;
-
- if ((interface == NULL) || (podhd == NULL))
- return -ENODEV;
-
- /* initialize audio system: */
- err = line6_init_audio(line6);
- if (err < 0)
- return err;
-
- /* initialize MIDI subsystem: */
- err = line6_init_midi(line6);
- if (err < 0)
- return err;
-
- /* initialize PCM subsystem: */
- err = line6_init_pcm(line6, &podhd_pcm_properties);
- if (err < 0)
- return err;
-
- /* register USB audio system: */
- err = line6_register_audio(line6);
- return err;
-}
-
-/*
- Init POD HD device (and clean up in case of failure).
-*/
-int line6_podhd_init(struct usb_interface *interface,
- struct usb_line6_podhd *podhd)
-{
- int err = podhd_try_init(interface, podhd);
-
- if (err < 0)
- podhd_destruct(interface);
-
- return err;
-}
-
-/*
- POD HD device disconnected.
-*/
-void line6_podhd_disconnect(struct usb_interface *interface)
-{
- struct usb_line6_podhd *podhd;
-
- if (interface == NULL)
- return;
- podhd = usb_get_intfdata(interface);
-
- if (podhd != NULL) {
- struct snd_line6_pcm *line6pcm = podhd->line6.line6pcm;
-
- if (line6pcm != NULL)
- line6_pcm_disconnect(line6pcm);
- }
-
- podhd_destruct(interface);
-}
diff --git a/drivers/staging/line6/podhd.h b/drivers/staging/line6/podhd.h
deleted file mode 100644
index 652f74056bb9..000000000000
--- a/drivers/staging/line6/podhd.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Line6 Pod HD
- *
- * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#ifndef PODHD_H
-#define PODHD_H
-
-#include <linux/usb.h>
-
-#include "driver.h"
-
-struct usb_line6_podhd {
- /**
- Generic Line6 USB data.
- */
- struct usb_line6 line6;
-};
-
-extern void line6_podhd_disconnect(struct usb_interface *interface);
-extern int line6_podhd_init(struct usb_interface *interface,
- struct usb_line6_podhd *podhd);
-
-#endif /* PODHD_H */
diff --git a/drivers/staging/line6/revision.h b/drivers/staging/line6/revision.h
deleted file mode 100644
index b4eee2b73831..000000000000
--- a/drivers/staging/line6/revision.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef DRIVER_REVISION
-/* current subversion revision */
-#define DRIVER_REVISION " (904)"
-#endif
diff --git a/drivers/staging/line6/toneport.h b/drivers/staging/line6/toneport.h
deleted file mode 100644
index 8576b7263648..000000000000
--- a/drivers/staging/line6/toneport.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#ifndef TONEPORT_H
-#define TONEPORT_H
-
-#include <linux/usb.h>
-#include <sound/core.h>
-
-#include "driver.h"
-
-struct usb_line6_toneport {
- /**
- Generic Line6 USB data.
- */
- struct usb_line6 line6;
-
- /**
- Source selector.
- */
- int source;
-
- /**
- Serial number of device.
- */
- int serial_number;
-
- /**
- Firmware version (x 100).
- */
- int firmware_version;
-
- /**
- Timer for delayed PCM startup.
- */
- struct timer_list timer;
-};
-
-extern void line6_toneport_disconnect(struct usb_interface *interface);
-extern int line6_toneport_init(struct usb_interface *interface,
- struct usb_line6_toneport *toneport);
-extern void line6_toneport_reset_resume(struct usb_line6_toneport *toneport);
-
-#endif
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
deleted file mode 100644
index 2d1cc472bead..000000000000
--- a/drivers/staging/line6/usbdefs.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2005-2008 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#ifndef USBDEFS_H
-#define USBDEFS_H
-
-#define LINE6_VENDOR_ID 0x0e41
-
-#define USB_INTERVALS_PER_SECOND 1000
-
-/*
- Device ids.
-*/
-#define LINE6_DEVID_BASSPODXT 0x4250
-#define LINE6_DEVID_BASSPODXTLIVE 0x4642
-#define LINE6_DEVID_BASSPODXTPRO 0x4252
-#define LINE6_DEVID_GUITARPORT 0x4750
-#define LINE6_DEVID_POCKETPOD 0x5051
-#define LINE6_DEVID_PODHD300 0x5057
-#define LINE6_DEVID_PODHD400 0x5058
-#define LINE6_DEVID_PODHD500 0x414D
-#define LINE6_DEVID_PODSTUDIO_GX 0x4153
-#define LINE6_DEVID_PODSTUDIO_UX1 0x4150
-#define LINE6_DEVID_PODSTUDIO_UX2 0x4151
-#define LINE6_DEVID_PODX3 0x414a
-#define LINE6_DEVID_PODX3LIVE 0x414b
-#define LINE6_DEVID_PODXT 0x5044
-#define LINE6_DEVID_PODXTLIVE 0x4650
-#define LINE6_DEVID_PODXTPRO 0x5050
-#define LINE6_DEVID_TONEPORT_GX 0x4147
-#define LINE6_DEVID_TONEPORT_UX1 0x4141
-#define LINE6_DEVID_TONEPORT_UX2 0x4142
-#define LINE6_DEVID_VARIAX 0x534d
-
-#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_INDEX_ ## x
-
-enum {
- LINE6_INDEX_BASSPODXT,
- LINE6_INDEX_BASSPODXTLIVE,
- LINE6_INDEX_BASSPODXTPRO,
- LINE6_INDEX_GUITARPORT,
- LINE6_INDEX_POCKETPOD,
- LINE6_INDEX_PODHD300,
- LINE6_INDEX_PODHD400,
- LINE6_INDEX_PODHD500,
- LINE6_INDEX_PODSTUDIO_GX,
- LINE6_INDEX_PODSTUDIO_UX1,
- LINE6_INDEX_PODSTUDIO_UX2,
- LINE6_INDEX_PODX3,
- LINE6_INDEX_PODX3LIVE,
- LINE6_INDEX_PODXT,
- LINE6_INDEX_PODXTLIVE,
- LINE6_INDEX_PODXTPRO,
- LINE6_INDEX_TONEPORT_GX,
- LINE6_INDEX_TONEPORT_UX1,
- LINE6_INDEX_TONEPORT_UX2,
- LINE6_INDEX_VARIAX,
-
- LINE6_BIT(BASSPODXT),
- LINE6_BIT(BASSPODXTLIVE),
- LINE6_BIT(BASSPODXTPRO),
- LINE6_BIT(GUITARPORT),
- LINE6_BIT(POCKETPOD),
- LINE6_BIT(PODHD300),
- LINE6_BIT(PODHD400),
- LINE6_BIT(PODHD500),
- LINE6_BIT(PODSTUDIO_GX),
- LINE6_BIT(PODSTUDIO_UX1),
- LINE6_BIT(PODSTUDIO_UX2),
- LINE6_BIT(PODX3),
- LINE6_BIT(PODX3LIVE),
- LINE6_BIT(PODXT),
- LINE6_BIT(PODXTLIVE),
- LINE6_BIT(PODXTPRO),
- LINE6_BIT(TONEPORT_GX),
- LINE6_BIT(TONEPORT_UX1),
- LINE6_BIT(TONEPORT_UX2),
- LINE6_BIT(VARIAX),
-
- LINE6_BITS_PRO = LINE6_BIT_BASSPODXTPRO | LINE6_BIT_PODXTPRO,
- LINE6_BITS_LIVE = LINE6_BIT_BASSPODXTLIVE | LINE6_BIT_PODXTLIVE |
- LINE6_BIT_PODX3LIVE,
- LINE6_BITS_PODXTALL = LINE6_BIT_PODXT | LINE6_BIT_PODXTLIVE |
- LINE6_BIT_PODXTPRO,
- LINE6_BITS_PODX3ALL = LINE6_BIT_PODX3 | LINE6_BIT_PODX3LIVE,
- LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 |
- LINE6_BIT_PODHD400 |
- LINE6_BIT_PODHD500,
- LINE6_BITS_BASSPODXTALL = LINE6_BIT_BASSPODXT |
- LINE6_BIT_BASSPODXTLIVE |
- LINE6_BIT_BASSPODXTPRO
-};
-
-/* device supports settings parameter via USB */
-#define LINE6_BIT_CONTROL (1 << 0)
-/* device supports PCM input/output via USB */
-#define LINE6_BIT_PCM (1 << 1)
-/* device support hardware monitoring */
-#define LINE6_BIT_HWMON (1 << 2)
-
-#define LINE6_BIT_CTRL_PCM_HW (LINE6_BIT_CONTROL | \
- LINE6_BIT_PCM | \
- LINE6_BIT_HWMON)
-
-#define LINE6_FALLBACK_INTERVAL 10
-#define LINE6_FALLBACK_MAXPACKETSIZE 16
-
-#endif
diff --git a/drivers/staging/line6/variax.h b/drivers/staging/line6/variax.h
deleted file mode 100644
index 24de79620d89..000000000000
--- a/drivers/staging/line6/variax.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Line6 Linux USB driver - 0.9.1beta
- *
- * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- */
-
-#ifndef VARIAX_H
-#define VARIAX_H
-
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-#include <sound/core.h>
-
-#include "driver.h"
-
-#define VARIAX_STARTUP_DELAY1 1000
-#define VARIAX_STARTUP_DELAY3 100
-#define VARIAX_STARTUP_DELAY4 100
-
-/*
- Stages of Variax startup procedure
-*/
-enum {
- VARIAX_STARTUP_INIT = 1,
- VARIAX_STARTUP_VERSIONREQ,
- VARIAX_STARTUP_WAIT,
- VARIAX_STARTUP_ACTIVATE,
- VARIAX_STARTUP_WORKQUEUE,
- VARIAX_STARTUP_SETUP,
- VARIAX_STARTUP_LAST = VARIAX_STARTUP_SETUP - 1
-};
-
-struct usb_line6_variax {
- /**
- Generic Line6 USB data.
- */
- struct usb_line6 line6;
-
- /**
- Buffer for activation code.
- */
- unsigned char *buffer_activate;
-
- /**
- Handler for device initializaton.
- */
- struct work_struct startup_work;
-
- /**
- Timers for device initializaton.
- */
- struct timer_list startup_timer1;
- struct timer_list startup_timer2;
-
- /**
- Current progress in startup procedure.
- */
- int startup_progress;
-};
-
-extern void line6_variax_disconnect(struct usb_interface *interface);
-extern int line6_variax_init(struct usb_interface *interface,
- struct usb_line6_variax *variax);
-extern void line6_variax_process_message(struct usb_line6_variax *variax);
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index a6b2f906bb1a..4410d7fdc1b4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -85,6 +85,9 @@ static inline int __is_po2(unsigned long long val)
#include <linux/list.h>
+int libcfs_arch_init(void);
+void libcfs_arch_cleanup(void);
+
/* libcfs tcpip */
int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask);
int libcfs_ipif_enumerate(char ***names);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 375586bf7312..808e49411a30 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -454,25 +454,23 @@ cfs_hash_bkt_size(struct cfs_hash *hs)
hs->hs_extra_bytes;
}
-#define CFS_HOP(hs, op) (hs)->hs_ops->hs_ ## op
-
static inline unsigned
cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
{
- return CFS_HOP(hs, hash)(hs, key, mask);
+ return hs->hs_ops->hs_hash(hs, key, mask);
}
static inline void *
cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
{
- return CFS_HOP(hs, key)(hnode);
+ return hs->hs_ops->hs_key(hnode);
}
static inline void
cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
{
- if (CFS_HOP(hs, keycpy) != NULL)
- CFS_HOP(hs, keycpy)(hnode, key);
+ if (hs->hs_ops->hs_keycpy)
+ hs->hs_ops->hs_keycpy(hnode, key);
}
/**
@@ -481,42 +479,38 @@ cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
static inline int
cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
- return CFS_HOP(hs, keycmp)(key, hnode);
+ return hs->hs_ops->hs_keycmp(key, hnode);
}
static inline void *
cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
{
- return CFS_HOP(hs, object)(hnode);
+ return hs->hs_ops->hs_object(hnode);
}
static inline void
cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
- return CFS_HOP(hs, get)(hs, hnode);
+ return hs->hs_ops->hs_get(hs, hnode);
}
static inline void
cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
- LASSERT(CFS_HOP(hs, put_locked) != NULL);
-
- return CFS_HOP(hs, put_locked)(hs, hnode);
+ return hs->hs_ops->hs_put_locked(hs, hnode);
}
static inline void
cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
{
- LASSERT(CFS_HOP(hs, put) != NULL);
-
- return CFS_HOP(hs, put)(hs, hnode);
+ return hs->hs_ops->hs_put(hs, hnode);
}
static inline void
cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
{
- if (CFS_HOP(hs, exit))
- CFS_HOP(hs, exit)(hs, hnode);
+ if (hs->hs_ops->hs_exit)
+ hs->hs_ops->hs_exit(hs, hnode);
}
static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 2817112c0633..3d86fb5b5481 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -458,14 +458,6 @@ struct libcfs_device_userstate {
struct page *ldu_memhog_root_page;
};
-/* what used to be in portals_lib.h */
-#ifndef MIN
-# define MIN(a, b) (((a) < (b)) ? (a) : (b))
-#endif
-#ifndef MAX
-# define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#endif
-
#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
static inline int cfs_size_round4(int val)
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 7e89b3be1a74..0038d29a37fe 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -636,6 +636,7 @@ lnet_net2rnethash(__u32 net)
}
extern lnd_t the_lolnd;
+extern int avoid_asym_router_failure;
int lnet_cpt_of_nid_locked(lnet_nid_t nid);
int lnet_cpt_of_nid(lnet_nid_t nid);
@@ -752,9 +753,9 @@ int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold);
void lnet_counters_get(lnet_counters_t *counters);
void lnet_counters_reset(void);
-unsigned int lnet_iov_nob(unsigned int niov, struct iovec *iov);
-int lnet_extract_iov(int dst_niov, struct iovec *dst,
- int src_niov, struct iovec *src,
+unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
+int lnet_extract_iov(int dst_niov, struct kvec *dst,
+ int src_niov, struct kvec *src,
unsigned int offset, unsigned int len);
unsigned int lnet_kiov_nob(unsigned int niov, lnet_kiov_t *iov);
@@ -762,17 +763,17 @@ int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
int src_niov, lnet_kiov_t *src,
unsigned int offset, unsigned int len);
-void lnet_copy_iov2iov(unsigned int ndiov, struct iovec *diov,
+void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov,
unsigned int doffset,
- unsigned int nsiov, struct iovec *siov,
+ unsigned int nsiov, struct kvec *siov,
unsigned int soffset, unsigned int nob);
-void lnet_copy_kiov2iov(unsigned int niov, struct iovec *iov,
+void lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov,
unsigned int iovoffset,
unsigned int nkiov, lnet_kiov_t *kiov,
unsigned int kiovoffset, unsigned int nob);
void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
unsigned int kiovoffset,
- unsigned int niov, struct iovec *iov,
+ unsigned int niov, struct kvec *iov,
unsigned int iovoffset, unsigned int nob);
void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov,
unsigned int doffset,
@@ -781,10 +782,10 @@ void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov,
static inline void
lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset,
- unsigned int nsiov, struct iovec *siov, unsigned int soffset,
+ unsigned int nsiov, struct kvec *siov, unsigned int soffset,
unsigned int nob)
{
- struct iovec diov = {/*.iov_base = */ dest, /*.iov_len = */ dlen};
+ struct kvec diov = {/*.iov_base = */ dest, /*.iov_len = */ dlen};
lnet_copy_iov2iov(1, &diov, doffset,
nsiov, siov, soffset, nob);
@@ -795,17 +796,17 @@ lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset,
unsigned int nsiov, lnet_kiov_t *skiov,
unsigned int soffset, unsigned int nob)
{
- struct iovec diov = {/* .iov_base = */ dest, /* .iov_len = */ dlen};
+ struct kvec diov = {/* .iov_base = */ dest, /* .iov_len = */ dlen};
lnet_copy_kiov2iov(1, &diov, doffset,
nsiov, skiov, soffset, nob);
}
static inline void
-lnet_copy_flat2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
+lnet_copy_flat2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
int slen, void *src, unsigned int soffset, unsigned int nob)
{
- struct iovec siov = {/*.iov_base = */ src, /*.iov_len = */slen};
+ struct kvec siov = {/*.iov_base = */ src, /*.iov_len = */slen};
lnet_copy_iov2iov(ndiov, diov, doffset,
1, &siov, soffset, nob);
@@ -816,7 +817,7 @@ lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov,
unsigned int doffset, int slen, void *src,
unsigned int soffset, unsigned int nob)
{
- struct iovec siov = {/* .iov_base = */ src, /* .iov_len = */ slen};
+ struct kvec siov = {/* .iov_base = */ src, /* .iov_len = */ slen};
lnet_copy_iov2kiov(ndiov, dkiov, doffset,
1, &siov, soffset, nob);
@@ -851,6 +852,7 @@ int lnet_peer_buffer_credits(lnet_ni_t *ni);
int lnet_router_checker_start(void);
void lnet_router_checker_stop(void);
+void lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net);
void lnet_swap_pinginfo(lnet_ping_info_t *info);
int lnet_ping_target_init(void);
@@ -870,4 +872,12 @@ void lnet_peer_tables_destroy(void);
int lnet_peer_tables_create(void);
void lnet_debug_peer(lnet_nid_t nid);
+static inline void lnet_peer_set_alive(lnet_peer_t *lp)
+{
+ lp->lp_last_alive = lp->lp_last_query = get_seconds();
+ if (!lp->lp_alive)
+ lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
+}
+
+
#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index f16213f1771a..50537668f59d 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -217,7 +217,7 @@ typedef struct lnet_msg {
unsigned int msg_wanted;
unsigned int msg_offset;
unsigned int msg_niov;
- struct iovec *msg_iov;
+ struct kvec *msg_iov;
lnet_kiov_t *msg_kiov;
lnet_event_t msg_ev;
@@ -271,7 +271,7 @@ typedef struct lnet_libmd {
lnet_eq_t *md_eq;
unsigned int md_niov; /* # frags */
union {
- struct iovec iov[LNET_MAX_IOV];
+ struct kvec iov[LNET_MAX_IOV];
lnet_kiov_t kiov[LNET_MAX_IOV];
} md_iov;
} lnet_libmd_t;
@@ -346,7 +346,7 @@ typedef struct lnet_lnd {
* credit if the LND does flow control. */
int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
int delayed, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
/* lnet_parse() has had to delay processing of this message
@@ -622,7 +622,7 @@ typedef struct lnet_portal {
/* Match table for each CPT */
struct lnet_match_table **ptl_mtables;
/* spread rotor of incoming "PUT" */
- int ptl_rotor;
+ unsigned int ptl_rotor;
/* # active entries for this portal */
int ptl_mt_nmaps;
/* array of active entries' cpu-partition-id */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 62b575deac3a..651016919669 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1538,7 +1538,7 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
fmr->fmr_pfmr = NULL;
spin_lock(&fps->fps_lock);
- fpo->fpo_map_count --; /* decref the pool */
+ fpo->fpo_map_count--; /* decref the pool */
list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
/* the first pool is persistent */
@@ -1547,7 +1547,7 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
if (kiblnd_fmr_pool_is_idle(fpo, now)) {
list_move(&fpo->fpo_list, &zombies);
- fps->fps_version ++;
+ fps->fps_version++;
}
}
spin_unlock(&fps->fps_lock);
@@ -1752,7 +1752,7 @@ kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
LASSERT (pool->po_allocated > 0);
list_add(node, &pool->po_free_list);
- pool->po_allocated --;
+ pool->po_allocated--;
list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
/* the first pool is persistent */
@@ -1781,7 +1781,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
if (list_empty(&pool->po_free_list))
continue;
- pool->po_allocated ++;
+ pool->po_allocated++;
pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
node = pool->po_free_list.next;
list_del(node);
@@ -1864,7 +1864,7 @@ kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
return -EAGAIN;
}
- for (i = 0; i < rd->rd_nfrags; i ++) {
+ for (i = 0; i < rd->rd_nfrags; i++) {
pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob;
}
@@ -2117,7 +2117,7 @@ kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
tps_poolset);
kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
- tx->tx_cookie = tps->tps_next_tx_cookie ++;
+ tx->tx_cookie = tps->tps_next_tx_cookie++;
}
static void
@@ -2326,7 +2326,7 @@ kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
}
for (hdev->ibh_mr_shift = 0;
- hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift ++) {
+ hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) {
if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
return 0;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index b02b4ec1e29d..ab128dee9483 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -1026,5 +1026,5 @@ int kiblnd_post_rx (kib_rx_t *rx, int credit);
int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index b48d7edf5669..48d885dc51d9 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -697,7 +697,7 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
static int
kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- unsigned int niov, struct iovec *iov, int offset, int nob)
+ unsigned int niov, struct kvec *iov, int offset, int nob)
{
kib_net_t *net = ni->ni_data;
struct page *page;
@@ -1125,8 +1125,9 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
break;
}
- wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
- kiblnd_rd_frag_size(dstrd, dstidx)), resid);
+ wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx),
+ kiblnd_rd_frag_size(dstrd, dstidx)),
+ (__u32) resid);
sge = &tx->tx_sge[tx->tx_nwrq];
sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
@@ -1461,7 +1462,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
int target_is_router = lntmsg->msg_target_is_router;
int routing = lntmsg->msg_routing;
unsigned int payload_niov = lntmsg->msg_niov;
- struct iovec *payload_iov = lntmsg->msg_iov;
+ struct kvec *payload_iov = lntmsg->msg_iov;
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
@@ -1628,7 +1629,7 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
{
lnet_process_id_t target = lntmsg->msg_target;
unsigned int niov = lntmsg->msg_niov;
- struct iovec *iov = lntmsg->msg_iov;
+ struct kvec *iov = lntmsg->msg_iov;
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
@@ -1687,7 +1688,7 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
int
kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kib_rx_t *rx = private;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 9188b34e6948..5956dbac5d04 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -773,7 +773,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
/* Only match interfaces for additional connections
* if I have > 1 interface */
n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
- MIN(n_peerips, net->ksnn_ninterfaces);
+ min(n_peerips, net->ksnn_ninterfaces);
for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
/* ^ yes really... */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index a29d4da6e343..03488d289c74 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -69,7 +69,7 @@ typedef struct /* per scheduler state */
int kss_nconns;
struct ksock_sched_info *kss_info; /* owner of it */
struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
- struct iovec kss_scratch_iov[LNET_MAX_IOV];
+ struct kvec kss_scratch_iov[LNET_MAX_IOV];
} ksock_sched_t;
struct ksock_sched_info {
@@ -213,7 +213,7 @@ typedef struct /* transmit packet */
int tx_nob; /* # packet bytes */
int tx_resid; /* residual bytes */
int tx_niov; /* # packet iovec frags */
- struct iovec *tx_iov; /* packet iovec frags */
+ struct kvec *tx_iov; /* packet iovec frags */
int tx_nkiov; /* # packet page frags */
unsigned short tx_zc_aborted; /* aborted ZC request */
unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
@@ -227,11 +227,11 @@ typedef struct /* transmit packet */
int tx_desc_size; /* size of this descriptor */
union {
struct {
- struct iovec iov; /* virt hdr */
+ struct kvec iov; /* virt hdr */
lnet_kiov_t kiov[0]; /* paged payload */
} paged;
struct {
- struct iovec iov[1]; /* virt hdr + payload */
+ struct kvec iov[1]; /* virt hdr + payload */
} virt;
} tx_frags;
} ksock_tx_t;
@@ -243,7 +243,7 @@ typedef struct /* transmit packet */
/* space for the rx frag descriptors; we either read a single contiguous
* header, or up to LNET_MAX_IOV frags of payload of either type. */
typedef union {
- struct iovec iov[LNET_MAX_IOV];
+ struct kvec iov[LNET_MAX_IOV];
lnet_kiov_t kiov[LNET_MAX_IOV];
} ksock_rxiovspace_t;
@@ -284,7 +284,7 @@ typedef struct ksock_conn {
int ksnc_rx_nob_left; /* # bytes to next hdr/body */
int ksnc_rx_nob_wanted; /* bytes actually wanted */
int ksnc_rx_niov; /* # iovec frags */
- struct iovec *ksnc_rx_iov; /* the iovec frags */
+ struct kvec *ksnc_rx_iov; /* the iovec frags */
int ksnc_rx_nkiov; /* # page frags */
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
ksock_rxiovspace_t ksnc_rx_iov_space;/* space for frag descriptors */
@@ -517,7 +517,7 @@ int ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
int ksocknal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int delayed, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index e6c1d3647952..92760fe94184 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -110,7 +110,7 @@ ksocknal_free_tx (ksock_tx_t *tx)
static int
ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
{
- struct iovec *iov = tx->tx_iov;
+ struct kvec *iov = tx->tx_iov;
int nob;
int rc;
@@ -251,7 +251,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
static int
ksocknal_recv_iov (ksock_conn_t *conn)
{
- struct iovec *iov = conn->ksnc_rx_iov;
+ struct kvec *iov = conn->ksnc_rx_iov;
int nob;
int rc;
@@ -926,7 +926,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
- struct iovec *payload_iov = lntmsg->msg_iov;
+ struct kvec *payload_iov = lntmsg->msg_iov;
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
@@ -1047,8 +1047,8 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
case KSOCK_PROTO_V2:
case KSOCK_PROTO_V3:
conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
+ conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
@@ -1061,8 +1061,8 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
+ conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
break;
@@ -1082,12 +1082,12 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
conn->ksnc_rx_nob_left = nob_to_skip;
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
skipped = 0;
niov = 0;
do {
- nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
+ nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
conn->ksnc_rx_iov[niov].iov_len = nob;
@@ -1212,8 +1212,8 @@ ksocknal_process_receive (ksock_conn_t *conn)
conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
+ conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
conn->ksnc_rx_niov = 1;
@@ -1311,7 +1311,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
int
ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
ksock_conn_t *conn = (ksock_conn_t *)private;
@@ -1950,10 +1950,10 @@ ksocknal_connect (ksock_route_t *route)
/* This is a retry rather than a new connection */
route->ksnr_retry_interval *= 2;
route->ksnr_retry_interval =
- MAX(route->ksnr_retry_interval,
+ max(route->ksnr_retry_interval,
cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
route->ksnr_retry_interval =
- MIN(route->ksnr_retry_interval,
+ min(route->ksnr_retry_interval,
cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
LASSERT (route->ksnr_retry_interval != 0);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index 245c9d7560af..66cc509295e5 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -92,11 +92,11 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
{
#if SOCKNAL_SINGLE_FRAG_TX
- struct iovec scratch;
- struct iovec *scratchiov = &scratch;
+ struct kvec scratch;
+ struct kvec *scratchiov = &scratch;
unsigned int niov = 1;
#else
- struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
+ struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_niov;
#endif
struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
@@ -111,7 +111,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
- rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
+ rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
}
return rc;
}
@@ -153,14 +153,14 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
}
} else {
#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
- struct iovec scratch;
- struct iovec *scratchiov = &scratch;
+ struct kvec scratch;
+ struct kvec *scratchiov = &scratch;
unsigned int niov = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
- struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
+ struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_nkiov;
#endif
struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
@@ -203,14 +203,14 @@ int
ksocknal_lib_recv_iov (ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX
- struct iovec scratch;
- struct iovec *scratchiov = &scratch;
+ struct kvec scratch;
+ struct kvec *scratchiov = &scratch;
unsigned int niov = 1;
#else
- struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
+ struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = conn->ksnc_rx_niov;
#endif
- struct iovec *iov = conn->ksnc_rx_iov;
+ struct kvec *iov = conn->ksnc_rx_iov;
struct msghdr msg = {
.msg_flags = 0
};
@@ -232,7 +232,7 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn)
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
rc = kernel_recvmsg(conn->ksnc_sock, &msg,
- (struct kvec *)scratchiov, niov, nob, MSG_DONTWAIT);
+ scratchiov, niov, nob, MSG_DONTWAIT);
saved_csum = 0;
if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -269,7 +269,7 @@ ksocknal_lib_kiov_vunmap(void *addr)
static void *
ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
- struct iovec *iov, struct page **pages)
+ struct kvec *iov, struct page **pages)
{
void *addr;
int nob;
@@ -307,15 +307,15 @@ int
ksocknal_lib_recv_kiov (ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
- struct iovec scratch;
- struct iovec *scratchiov = &scratch;
+ struct kvec scratch;
+ struct kvec *scratchiov = &scratch;
struct page **pages = NULL;
unsigned int niov = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
- struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
+ struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
unsigned int niov = conn->ksnc_rx_nkiov;
#endif
@@ -390,13 +390,13 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
__u32 csum;
void *base;
- LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg);
+ LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
LASSERT(tx->tx_conn != NULL);
LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
tx->tx_msg.ksm_csum = 0;
- csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base,
+ csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
tx->tx_iov[0].iov_len);
if (tx->tx_kiov != NULL) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index ea9d80f40cab..b2f88eb47bba 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -717,7 +717,7 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_lnetmsg != NULL);
- tx->tx_iov[0].iov_base = (void *)&tx->tx_lnetmsg->msg_hdr;
+ tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t);
tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
@@ -726,7 +726,7 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
static void
ksocknal_pack_msg_v2(ksock_tx_t *tx)
{
- tx->tx_iov[0].iov_base = (void *)&tx->tx_msg;
+ tx->tx_iov[0].iov_base = &tx->tx_msg;
if (tx->tx_lnetmsg != NULL) {
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index c8c1ed84fe5c..0f53c761f1a9 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -158,7 +158,7 @@ fail_peer(lnet_nid_t nid, int outgoing)
}
unsigned int
-lnet_iov_nob(unsigned int niov, struct iovec *iov)
+lnet_iov_nob(unsigned int niov, struct kvec *iov)
{
unsigned int nob = 0;
@@ -170,8 +170,8 @@ lnet_iov_nob(unsigned int niov, struct iovec *iov)
EXPORT_SYMBOL(lnet_iov_nob);
void
-lnet_copy_iov2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
- unsigned int nsiov, struct iovec *siov, unsigned int soffset,
+lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
+ unsigned int nsiov, struct kvec *siov, unsigned int soffset,
unsigned int nob)
{
/* NB diov, siov are READ-ONLY */
@@ -201,9 +201,9 @@ lnet_copy_iov2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
do {
LASSERT(ndiov > 0);
LASSERT(nsiov > 0);
- this_nob = MIN(diov->iov_len - doffset,
+ this_nob = min(diov->iov_len - doffset,
siov->iov_len - soffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min(this_nob, nob);
memcpy((char *)diov->iov_base + doffset,
(char *)siov->iov_base + soffset, this_nob);
@@ -229,8 +229,8 @@ lnet_copy_iov2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
EXPORT_SYMBOL(lnet_copy_iov2iov);
int
-lnet_extract_iov(int dst_niov, struct iovec *dst,
- int src_niov, struct iovec *src,
+lnet_extract_iov(int dst_niov, struct kvec *dst,
+ int src_niov, struct kvec *src,
unsigned int offset, unsigned int len)
{
/* Initialise 'dst' to the subset of 'src' starting at 'offset',
@@ -322,9 +322,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
do {
LASSERT(ndiov > 0);
LASSERT(nsiov > 0);
- this_nob = MIN(diov->kiov_len - doffset,
+ this_nob = min(diov->kiov_len - doffset,
siov->kiov_len - soffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min(this_nob, nob);
if (daddr == NULL)
daddr = ((char *)kmap(diov->kiov_page)) +
@@ -371,7 +371,7 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
EXPORT_SYMBOL(lnet_copy_kiov2kiov);
void
-lnet_copy_kiov2iov(unsigned int niov, struct iovec *iov, unsigned int iovoffset,
+lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
unsigned int nkiov, lnet_kiov_t *kiov,
unsigned int kiovoffset, unsigned int nob)
{
@@ -403,9 +403,9 @@ lnet_copy_kiov2iov(unsigned int niov, struct iovec *iov, unsigned int iovoffset,
do {
LASSERT(niov > 0);
LASSERT(nkiov > 0);
- this_nob = MIN(iov->iov_len - iovoffset,
- kiov->kiov_len - kiovoffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min(iov->iov_len - iovoffset,
+ (__kernel_size_t) kiov->kiov_len - kiovoffset);
+ this_nob = min(this_nob, nob);
if (addr == NULL)
addr = ((char *)kmap(kiov->kiov_page)) +
@@ -443,7 +443,7 @@ EXPORT_SYMBOL(lnet_copy_kiov2iov);
void
lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
unsigned int kiovoffset, unsigned int niov,
- struct iovec *iov, unsigned int iovoffset,
+ struct kvec *iov, unsigned int iovoffset,
unsigned int nob)
{
/* NB kiov, iov are READ-ONLY */
@@ -474,9 +474,9 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
do {
LASSERT(nkiov > 0);
LASSERT(niov > 0);
- this_nob = MIN(kiov->kiov_len - kiovoffset,
+ this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
iov->iov_len - iovoffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min(this_nob, nob);
if (addr == NULL)
addr = ((char *)kmap(kiov->kiov_page)) +
@@ -566,7 +566,7 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
unsigned int niov = 0;
- struct iovec *iov = NULL;
+ struct kvec *iov = NULL;
lnet_kiov_t *kiov = NULL;
int rc;
@@ -1530,7 +1530,7 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
LASSERT(md->md_offset == 0);
rlength = hdr->payload_length;
- mlength = MIN(rlength, (int)md->md_length);
+ mlength = min_t(int, rlength, md->md_length);
if (mlength < rlength &&
(md->md_options & LNET_MD_TRUNCATE) == 0) {
@@ -1877,6 +1877,19 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
goto drop;
}
+ if (lnet_isrouter(msg->msg_rxpeer)) {
+ lnet_peer_set_alive(msg->msg_rxpeer);
+ if (avoid_asym_router_failure &&
+ LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
+ /* received a remote message from router, update
+ * remote NI status on this router.
+ * NB: multi-hop routed message will be ignored.
+ */
+ lnet_router_ni_update_locked(msg->msg_rxpeer,
+ LNET_NIDNET(src_nid));
+ }
+ }
+
lnet_msg_commit(msg, cpt);
if (!for_me) {
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 19ed696344fe..3ba0da919b41 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -262,10 +262,10 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
{
struct lnet_match_table *mtable;
struct lnet_portal *ptl;
- int nmaps;
- int rotor;
- int routed;
- int cpt;
+ unsigned int nmaps;
+ unsigned int rotor;
+ unsigned int cpt;
+ bool routed;
/* NB: called w/o lock */
LASSERT(info->mi_portal < the_lnet.ln_nportals);
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 17e1643fd675..f708c2e649d7 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -47,7 +47,7 @@ lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
static int
lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int delayed, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
lnet_msg_t *sendmsg = private;
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 3c23677bc280..72b7fbc83718 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -108,7 +108,7 @@ lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
}
}
-DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
+static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
static int __init
init_lnet(void)
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index c667b5b76761..52ec0ab7e3c3 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer i
static int large_router_buffers;
module_param(large_router_buffers, int, 0444);
MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
-static int peer_buffer_credits = 0;
+static int peer_buffer_credits;
module_param(peer_buffer_credits, int, 0444);
MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
@@ -80,11 +80,11 @@ lnet_peer_buffer_credits(lnet_ni_t *ni)
#endif
-static int check_routers_before_use = 0;
+static int check_routers_before_use;
module_param(check_routers_before_use, int, 0444);
MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
-static int avoid_asym_router_failure = 1;
+int avoid_asym_router_failure = 1;
module_param(avoid_asym_router_failure, int, 0644);
MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
@@ -245,7 +245,7 @@ lnet_find_net_locked (__u32 net)
static void lnet_shuffle_seed(void)
{
- static int seeded = 0;
+ static int seeded;
int lnd_type, seed[2];
struct timeval tv;
lnet_ni_t *ni;
@@ -783,6 +783,21 @@ lnet_wait_known_routerstate(void)
}
}
+void
+lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
+{
+ lnet_route_t *rte;
+
+ if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
+ list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
+ if (rte->lr_net == net) {
+ rte->lr_downis = 0;
+ break;
+ }
+ }
+ }
+}
+
static void
lnet_update_ni_status_locked(void)
{
@@ -793,7 +808,7 @@ lnet_update_ni_status_locked(void)
LASSERT(the_lnet.ln_routing);
timeout = router_ping_timeout +
- MAX(live_router_check_interval, dead_router_check_interval);
+ max(live_router_check_interval, dead_router_check_interval);
now = get_seconds();
list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
@@ -1578,8 +1593,8 @@ lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
void
lnet_router_checker (void)
{
- static time_t last = 0;
- static int running = 0;
+ static time_t last;
+ static int running;
time_t now = get_seconds();
int interval = now - last;
@@ -1593,7 +1608,7 @@ lnet_router_checker (void)
return;
if (last != 0 &&
- interval > MAX(live_router_check_interval,
+ interval > max(live_router_check_interval,
dead_router_check_interval))
CNETERR("Checker(%d/%d) not called for %d seconds\n",
live_router_check_interval, dead_router_check_interval,
@@ -1664,13 +1679,16 @@ lnet_get_tunables (void)
char *s;
s = getenv("LNET_ROUTER_PING_TIMEOUT");
- if (s != NULL) router_ping_timeout = atoi(s);
+ if (s != NULL)
+ router_ping_timeout = atoi(s);
s = getenv("LNET_LIVE_ROUTER_CHECK_INTERVAL");
- if (s != NULL) live_router_check_interval = atoi(s);
+ if (s != NULL)
+ live_router_check_interval = atoi(s);
s = getenv("LNET_DEAD_ROUTER_CHECK_INTERVAL");
- if (s != NULL) dead_router_check_interval = atoi(s);
+ if (s != NULL)
+ dead_router_check_interval = atoi(s);
/* This replaces old lnd_notify mechanism */
check_routers_before_use = 1;
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 46cde7036f1d..c055afc86eb4 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -49,7 +49,7 @@ enum {
*/
#define LNET_PROC_CPT_BITS (LNET_CPT_BITS + 1)
/* change version, 16 bits or 8 bits */
-#define LNET_PROC_VER_BITS MAX(((MIN(LNET_LOFFT_BITS, 64)) / 4), 8)
+#define LNET_PROC_VER_BITS max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, 64) / 4, 8)
#define LNET_PROC_HASH_BITS LNET_PEER_HASH_BITS
/*
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 5bc615309e72..fbff84cea52f 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -721,7 +721,7 @@ lst_stat_query_ioctl(lstio_stat_args_t *args)
return rc;
}
-int lst_test_add_ioctl(lstio_test_args_t *args)
+static int lst_test_add_ioctl(lstio_test_args_t *args)
{
char *batch_name;
char *src_name = NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 9999b0dc03e4..77f02b76128e 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -703,7 +703,7 @@ lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
return 0;
}
-lnet_process_id_packed_t *
+static lnet_process_id_packed_t *
lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
{
lnet_process_id_packed_t *pid;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index fc1cb56cbab2..2353889c6eac 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -54,7 +54,7 @@
#define LST_TRANS_TIMEOUT 30
#define LST_TRANS_MIN_TIMEOUT 3
-#define LST_VALIDATE_TIMEOUT(t) MIN(MAX(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT)
+#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT)
#define LST_PING_INTERVAL 8
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 49cb6543d538..1e0afc286847 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -46,17 +46,17 @@
#include "console.h"
#include "conrpc.h"
-#define LST_NODE_STATE_COUNTER(nd, p) \
-do { \
- if ((nd)->nd_state == LST_NODE_ACTIVE) \
- (p)->nle_nactive ++; \
+#define LST_NODE_STATE_COUNTER(nd, p) \
+do { \
+ if ((nd)->nd_state == LST_NODE_ACTIVE) \
+ (p)->nle_nactive++; \
else if ((nd)->nd_state == LST_NODE_BUSY) \
- (p)->nle_nbusy ++; \
+ (p)->nle_nbusy++; \
else if ((nd)->nd_state == LST_NODE_DOWN) \
- (p)->nle_ndown ++; \
- else \
- (p)->nle_nunknown ++; \
- (p)->nle_nnode ++; \
+ (p)->nle_ndown++; \
+ else \
+ (p)->nle_nunknown++; \
+ (p)->nle_nnode++; \
} while (0)
lstcon_session_t console_session;
@@ -223,7 +223,7 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
static void
lstcon_group_addref(lstcon_group_t *grp)
{
- grp->grp_ref ++;
+ grp->grp_ref++;
}
static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *);
@@ -298,7 +298,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
return 0;
list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
- grp->grp_nnode ++;
+ grp->grp_nnode++;
return 0;
}
@@ -324,7 +324,7 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
- new->grp_nnode ++;
+ new->grp_nnode++;
return;
}
@@ -767,7 +767,7 @@ lstcon_nodes_getent(struct list_head *head, int *index_p,
&nd->nd_state, sizeof(nd->nd_state)))
return -EFAULT;
- count ++;
+ count++;
}
if (index <= *index_p)
@@ -1343,7 +1343,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
/* add to test list anyway, so user can check what's going on */
list_add_tail(&test->tes_link, &batch->bat_test_list);
- batch->bat_ntest ++;
+ batch->bat_ntest++;
test->tes_hdr.tsb_index = batch->bat_ntest;
/* hold groups so nobody can change them */
@@ -1986,7 +1986,7 @@ static void lstcon_init_acceptor_service(void)
extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
-DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry);
+static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry);
/* initialize console */
int
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index cc9d1826ae66..570914828b8c 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -103,7 +103,7 @@ do { \
#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0)
#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
-struct smoketest_framework {
+static struct smoketest_framework {
struct list_head fw_zombie_rpcs; /* RPCs to be recycled */
struct list_head fw_zombie_sessions; /* stopping sessions */
struct list_head fw_tests; /* registered test cases */
@@ -194,9 +194,9 @@ sfw_del_session_timer (void)
return EBUSY; /* racing with sfw_session_expired() */
}
-/* called with sfw_data.fw_lock held */
static void
sfw_deactivate_session (void)
+ __must_hold(&sfw_data.fw_lock)
{
sfw_session_t *sn = sfw_data.fw_session;
int nactive = 0;
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index c6ef5b0d8de1..faf409802372 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -61,31 +61,31 @@ lnet_selftest_fini(void)
int i;
switch (lst_init_step) {
- case LST_INIT_CONSOLE:
- lstcon_console_fini();
- case LST_INIT_FW:
- sfw_shutdown();
- case LST_INIT_RPC:
- srpc_shutdown();
- case LST_INIT_WI_TEST:
- for (i = 0;
- i < cfs_cpt_number(lnet_cpt_table()); i++) {
- if (lst_sched_test[i] == NULL)
- continue;
- cfs_wi_sched_destroy(lst_sched_test[i]);
- }
- LIBCFS_FREE(lst_sched_test,
- sizeof(lst_sched_test[0]) *
- cfs_cpt_number(lnet_cpt_table()));
- lst_sched_test = NULL;
-
- case LST_INIT_WI_SERIAL:
- cfs_wi_sched_destroy(lst_sched_serial);
- lst_sched_serial = NULL;
- case LST_INIT_NONE:
- break;
- default:
- LBUG();
+ case LST_INIT_CONSOLE:
+ lstcon_console_fini();
+ case LST_INIT_FW:
+ sfw_shutdown();
+ case LST_INIT_RPC:
+ srpc_shutdown();
+ case LST_INIT_WI_TEST:
+ for (i = 0;
+ i < cfs_cpt_number(lnet_cpt_table()); i++) {
+ if (lst_sched_test[i] == NULL)
+ continue;
+ cfs_wi_sched_destroy(lst_sched_test[i]);
+ }
+ LIBCFS_FREE(lst_sched_test,
+ sizeof(lst_sched_test[0]) *
+ cfs_cpt_number(lnet_cpt_table()));
+ lst_sched_test = NULL;
+
+ case LST_INIT_WI_SERIAL:
+ cfs_wi_sched_destroy(lst_sched_serial);
+ lst_sched_serial = NULL;
+ case LST_INIT_NONE:
+ break;
+ default:
+ LBUG();
}
return;
}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index f753add7bfb3..1f7d9a6248db 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -54,7 +54,7 @@ typedef enum {
SRPC_STATE_STOPPING,
} srpc_state_t;
-struct smoketest_rpc {
+static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
@@ -468,6 +468,7 @@ srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
static int
srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+ __must_hold(&scd->scd_lock)
{
struct srpc_service *sv = scd->scd_svc;
struct srpc_msg *msg = &buf->buf_msg;
@@ -559,7 +560,7 @@ srpc_add_buffer(struct swi_workitem *wi)
LASSERT(scd->scd_buf_posting > 0);
scd->scd_buf_posting--;
scd->scd_buf_total++;
- scd->scd_buf_low = MAX(2, scd->scd_buf_total / 4);
+ scd->scd_buf_low = max(2, scd->scd_buf_total / 4);
}
if (rc != 0) {
@@ -697,6 +698,7 @@ srpc_finish_service(struct srpc_service *sv)
/* called with sv->sv_lock held */
static void
srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
+ __must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
if (srpc_service_post_buffer(scd, buf) != 0) {
@@ -1486,7 +1488,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
scd->scd_buf_adjust == 0 &&
scd->scd_buf_nposted < scd->scd_buf_low) {
- scd->scd_buf_adjust = MAX(scd->scd_buf_total / 2,
+ scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
SFW_TEST_WI_MIN);
swi_schedule_workitem(&scd->scd_buf_wi);
}
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 9b5c5df6eb2c..d48701834b18 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -609,4 +609,16 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
}
}
+extern sfw_test_client_ops_t brw_test_client;
+void brw_init_test_client(void);
+
+extern srpc_service_t brw_test_service;
+void brw_init_test_service(void);
+
+extern sfw_test_client_ops_t ping_test_client;
+void ping_init_test_client(void);
+
+extern srpc_service_t ping_test_service;
+void ping_init_test_service(void);
+
#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index f8352c2a7d37..441f9472a834 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -57,7 +57,7 @@
#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
(STTIMER_NSLOTS - 1))])
-struct st_timer_data {
+static struct st_timer_data {
spinlock_t stt_lock;
/* start time of the slot processed previously */
unsigned long stt_prev_slot;
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
index eb607c52ef3b..b5e8da8956f2 100644
--- a/drivers/staging/lustre/lustre/fid/fid_internal.h
+++ b/drivers/staging/lustre/lustre/fid/fid_internal.h
@@ -47,7 +47,7 @@
int seq_client_alloc_super(struct lu_client_seq *seq,
const struct lu_env *env);
-#if defined (CONFIG_PROC_FS)
+#if defined(CONFIG_PROC_FS)
extern struct lprocfs_vars seq_client_proc_list[];
#endif
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 64b1d80c64b0..063441abfcfc 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(seq_client_flush);
static void seq_client_proc_fini(struct lu_client_seq *seq)
{
-#if defined (CONFIG_PROC_FS)
+#if defined(CONFIG_PROC_FS)
if (seq->lcs_proc_dir) {
if (!IS_ERR(seq->lcs_proc_dir))
lprocfs_remove(&seq->lcs_proc_dir);
@@ -413,7 +413,7 @@ static void seq_client_proc_fini(struct lu_client_seq *seq)
static int seq_client_proc_init(struct lu_client_seq *seq)
{
-#if defined (CONFIG_PROC_FS)
+#if defined(CONFIG_PROC_FS)
int rc;
seq->lcs_proc_dir = lprocfs_register(seq->lcs_name,
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 5d95d0b358b8..0d0a73745065 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -257,9 +257,9 @@ void fld_cache_flush(struct fld_cache *cache)
* entry accordingly.
*/
-void fld_cache_punch_hole(struct fld_cache *cache,
- struct fld_cache_entry *f_curr,
- struct fld_cache_entry *f_new)
+static void fld_cache_punch_hole(struct fld_cache *cache,
+ struct fld_cache_entry *f_curr,
+ struct fld_cache_entry *f_new)
{
const struct lu_seq_range *range = &f_new->fce_range;
const u64 new_start = range->lsr_start;
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 8806b6096953..6125bbe822b5 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -111,7 +111,7 @@ struct fld_cache {
/**
* Cache name used for debug and messages. */
- char fci_name[80];
+ char fci_name[LUSTRE_MDT_MAXNAMELEN];
unsigned int fci_no_shrink:1;
};
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 0d361ff43212..b8d17e109a96 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -131,11 +131,20 @@ fld_rrb_scan(struct lu_client_fld *fld, u64 seq)
else
hash = 0;
+again:
list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == hash)
return target;
}
+ if (hash != 0) {
+ /* It is possible the remote target(MDT) are not connected to
+ * with client yet, so we will refer this to MDT0, which should
+ * be connected during mount */
+ hash = 0;
+ goto again;
+ }
+
CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n",
fld->lcf_name, hash, seq, fld->lcf_count);
@@ -269,7 +278,7 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
}
EXPORT_SYMBOL(fld_client_del_target);
-struct proc_dir_entry *fld_type_proc_dir = NULL;
+static struct proc_dir_entry *fld_type_proc_dir;
#if defined (CONFIG_PROC_FS)
static int fld_client_proc_init(struct lu_client_fld *fld)
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index 95e7de18d2f1..8c5a65704a37 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -87,13 +87,21 @@ fld_proc_hash_seq_show(struct seq_file *m, void *unused)
}
static ssize_t
-fld_proc_hash_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+fld_proc_hash_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct lu_client_fld *fld;
struct lu_fld_hash *hash = NULL;
+ char fh_name[8];
int i;
+ if (count > sizeof(fh_name))
+ return -ENAMETOOLONG;
+
+ if (copy_from_user(fh_name, buffer, count) != 0)
+ return -EFAULT;
+
fld = ((struct seq_file *)file->private_data)->private;
LASSERT(fld != NULL);
@@ -101,7 +109,7 @@ fld_proc_hash_seq_write(struct file *file, const char *buffer,
if (count != strlen(fld_hash[i].fh_name))
continue;
- if (!strncmp(fld_hash[i].fh_name, buffer, count)) {
+ if (!strncmp(fld_hash[i].fh_name, fh_name, count)) {
hash = &fld_hash[i];
break;
}
@@ -146,7 +154,7 @@ static int fld_proc_cache_flush_release(struct inode *inode, struct file *file)
return 0;
}
-struct file_operations fld_proc_cache_flush_fops = {
+static struct file_operations fld_proc_cache_flush_fops = {
.owner = THIS_MODULE,
.open = fld_proc_cache_flush_open,
.write = fld_proc_cache_flush_write,
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
index b3b841f4d6e6..c5c3a8d9eaa4 100644
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -135,6 +135,7 @@ static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
{
struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
+
memset(attr, 0, sizeof(*attr));
return attr;
}
@@ -142,6 +143,7 @@ static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
{
struct cl_io *io = &ccc_env_info(env)->cti_io;
+
memset(io, 0, sizeof(*io));
return io;
}
@@ -323,6 +325,7 @@ void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
int ccc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
struct cl_io *io, __u32 enqflags);
+int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
int ccc_lock_fits_into(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index cfe503b7df62..8a25cf6f6825 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -627,16 +627,16 @@ struct adaptive_timeout;
extern int lprocfs_at_hist_helper(struct seq_file *m,
struct adaptive_timeout *at);
extern int lprocfs_rd_timeouts(struct seq_file *m, void *data);
-extern int lprocfs_wr_timeouts(struct file *file, const char *buffer,
+extern int lprocfs_wr_timeouts(struct file *file, const char __user *buffer,
unsigned long count, void *data);
-extern int lprocfs_wr_evict_client(struct file *file, const char *buffer,
+extern int lprocfs_wr_evict_client(struct file *file, const char __user *buffer,
size_t count, loff_t *off);
-extern int lprocfs_wr_ping(struct file *file, const char *buffer,
+extern int lprocfs_wr_ping(struct file *file, const char __user *buffer,
size_t count, loff_t *off);
-extern int lprocfs_wr_import(struct file *file, const char *buffer,
+extern int lprocfs_wr_import(struct file *file, const char __user *buffer,
size_t count, loff_t *off);
extern int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
-extern int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
+extern int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
size_t count, loff_t *off);
/* Statfs helpers */
@@ -650,8 +650,8 @@ extern int lprocfs_rd_filesfree(struct seq_file *m, void *data);
extern int lprocfs_write_helper(const char __user *buffer, unsigned long count,
int *val);
extern int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
-extern int lprocfs_write_u64_helper(const char *buffer, unsigned long count,
- __u64 *val);
+extern int lprocfs_write_u64_helper(const char __user *buffer,
+ unsigned long count, __u64 *val);
extern int lprocfs_write_frac_u64_helper(const char *buffer,
unsigned long count,
__u64 *val, int mult);
@@ -716,7 +716,8 @@ static struct file_operations name##_fops = { \
return lprocfs_rd_##type(m, m->private); \
} \
static ssize_t name##_##type##_seq_write(struct file *file, \
- const char *buffer, size_t count, loff_t *off) \
+ const char __user *buffer, size_t count, \
+ loff_t *off) \
{ \
struct seq_file *seq = file->private_data; \
return lprocfs_wr_##type(file, buffer, \
@@ -726,7 +727,8 @@ static struct file_operations name##_fops = { \
#define LPROC_SEQ_FOPS_WR_ONLY(name, type) \
static ssize_t name##_##type##_write(struct file *file, \
- const char *buffer, size_t count, loff_t *off) \
+ const char __user *buffer, size_t count, \
+ loff_t *off) \
{ \
return lprocfs_wr_##type(file, buffer, count, off); \
} \
@@ -939,20 +941,24 @@ static inline int lprocfs_at_hist_helper(struct seq_file *m,
static inline int lprocfs_rd_timeouts(struct seq_file *m, void *data)
{ return 0; }
static inline int lprocfs_wr_timeouts(struct file *file,
- const char *buffer,
- unsigned long count, void *data)
+ const char __user *buffer,
+ unsigned long count, void *data)
{ return 0; }
-static inline int lprocfs_wr_evict_client(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static inline int lprocfs_wr_evict_client(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{ return 0; }
-static inline int lprocfs_wr_ping(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static inline int lprocfs_wr_ping(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{ return 0; }
-static inline int lprocfs_wr_import(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static inline int lprocfs_wr_import(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{ return 0; }
-static inline int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static inline int lprocfs_wr_pinger_recov(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{ return 0; }
/* Statfs helpers */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 7b7457cf70e3..305ecbee9b78 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -105,6 +105,11 @@
* FOO_BULK_PORTAL is for incoming bulk on the FOO
*/
+/* Lustre service names are following the format
+ * service name + MDT + seq name
+ */
+#define LUSTRE_MDT_MAXNAMELEN 80
+
#define CONNMGR_REQUEST_PORTAL 1
#define CONNMGR_REPLY_PORTAL 2
//#define OSC_REQUEST_PORTAL 3
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 2d6fbb4b1b39..0a0929fd9023 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -358,7 +358,7 @@ struct lu_client_seq {
* Service uuid, passed from MDT + seq name to form unique seq name to
* use it with procfs.
*/
- char lcs_name[80];
+ char lcs_name[LUSTRE_MDT_MAXNAMELEN];
/*
* Sequence width, that is how many objects may be allocated in one
@@ -408,7 +408,7 @@ struct lu_server_seq {
* Service uuid, passed from MDT + seq name to form unique seq name to
* use it with procfs.
*/
- char lss_name[80];
+ char lss_name[LUSTRE_MDT_MAXNAMELEN];
/*
* Allocation chunks for super and meta sequences. Default values are
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 64c504849a22..5ee4b1ed0995 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -93,7 +93,7 @@ struct lu_server_fld {
/**
* Fld service name in form "fld-srv-lustre-MDTXXX" */
- char lsf_name[80];
+ char lsf_name[LUSTRE_MDT_MAXNAMELEN];
};
@@ -124,7 +124,7 @@ struct lu_client_fld {
/**
* Client fld proc entry name. */
- char lcf_name[80];
+ char lcf_name[LUSTRE_MDT_MAXNAMELEN];
int lcf_flags;
};
diff --git a/drivers/staging/lustre/lustre/include/lustre_update.h b/drivers/staging/lustre/lustre/include/lustre_update.h
deleted file mode 100644
index 84defce0f623..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_update.h
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.htm
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2013, Intel Corporation.
- */
-/*
- * lustre/include/lustre_update.h
- *
- * Author: Di Wang <di.wang@intel.com>
- */
-
-#ifndef _LUSTRE_UPDATE_H
-#define _LUSTRE_UPDATE_H
-
-#define UPDATE_BUFFER_SIZE 8192
-struct update_request {
- struct dt_device *ur_dt;
- struct list_head ur_list; /* attached itself to thandle */
- int ur_flags;
- int ur_rc; /* request result */
- int ur_batchid; /* Current batch(trans) id */
- struct update_buf *ur_buf; /* Holding the update req */
-};
-
-static inline unsigned long update_size(struct update *update)
-{
- unsigned long size;
- int i;
-
- size = cfs_size_round(offsetof(struct update, u_bufs[0]));
- for (i = 0; i < UPDATE_BUF_COUNT; i++)
- size += cfs_size_round(update->u_lens[i]);
-
- return size;
-}
-
-static inline void *update_param_buf(struct update *update, int index,
- int *size)
-{
- int i;
- void *ptr;
-
- if (index >= UPDATE_BUF_COUNT)
- return NULL;
-
- ptr = (char *)update + cfs_size_round(offsetof(struct update,
- u_bufs[0]));
- for (i = 0; i < index; i++) {
- LASSERT(update->u_lens[i] > 0);
- ptr += cfs_size_round(update->u_lens[i]);
- }
-
- if (size != NULL)
- *size = update->u_lens[index];
-
- return ptr;
-}
-
-static inline unsigned long update_buf_size(struct update_buf *buf)
-{
- unsigned long size;
- int i = 0;
-
- size = cfs_size_round(offsetof(struct update_buf, ub_bufs[0]));
- for (i = 0; i < buf->ub_count; i++) {
- struct update *update;
-
- update = (struct update *)((char *)buf + size);
- size += update_size(update);
- }
- LASSERT(size <= UPDATE_BUFFER_SIZE);
- return size;
-}
-
-static inline void *update_buf_get(struct update_buf *buf, int index, int *size)
-{
- int count = buf->ub_count;
- void *ptr;
- int i = 0;
-
- if (index >= count)
- return NULL;
-
- ptr = (char *)buf + cfs_size_round(offsetof(struct update_buf,
- ub_bufs[0]));
- for (i = 0; i < index; i++)
- ptr += update_size((struct update *)ptr);
-
- if (size != NULL)
- *size = update_size((struct update *)ptr);
-
- return ptr;
-}
-
-static inline void update_init_reply_buf(struct update_reply *reply, int count)
-{
- reply->ur_version = UPDATE_REPLY_V1;
- reply->ur_count = count;
-}
-
-static inline void *update_get_buf_internal(struct update_reply *reply,
- int index, int *size)
-{
- char *ptr;
- int count = reply->ur_count;
- int i;
-
- if (index >= count)
- return NULL;
-
- ptr = (char *)reply + cfs_size_round(offsetof(struct update_reply,
- ur_lens[count]));
- for (i = 0; i < index; i++) {
- LASSERT(reply->ur_lens[i] > 0);
- ptr += cfs_size_round(reply->ur_lens[i]);
- }
-
- if (size != NULL)
- *size = reply->ur_lens[index];
-
- return ptr;
-}
-
-static inline void update_insert_reply(struct update_reply *reply, void *data,
- int data_len, int index, int rc)
-{
- char *ptr;
-
- ptr = update_get_buf_internal(reply, index, NULL);
- LASSERT(ptr != NULL);
-
- *(int *)ptr = cpu_to_le32(rc);
- ptr += sizeof(int);
- if (data_len > 0) {
- LASSERT(data != NULL);
- memcpy(ptr, data, data_len);
- }
- reply->ur_lens[index] = data_len + sizeof(int);
-}
-
-static inline int update_get_reply_buf(struct update_reply *reply, void **buf,
- int index)
-{
- char *ptr;
- int size = 0;
- int result;
-
- ptr = update_get_buf_internal(reply, index, &size);
- result = *(int *)ptr;
-
- if (result < 0)
- return result;
-
- LASSERT((ptr != NULL && size >= sizeof(int)));
- *buf = ptr + sizeof(int);
- return size - sizeof(int);
-}
-
-static inline int update_get_reply_result(struct update_reply *reply,
- void **buf, int index)
-{
- void *ptr;
- int size;
-
- ptr = update_get_buf_internal(reply, index, &size);
- LASSERT(ptr != NULL && size > sizeof(int));
- return *(int *)ptr;
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index 24d26ab35346..23095bb75226 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -586,6 +586,12 @@ int ccc_lock_enqueue(const struct lu_env *env,
return 0;
}
+int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
+{
+ CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
+ return 0;
+}
+
int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
{
CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 6c6c57ca91de..20e64cddb1f4 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -249,8 +249,9 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
struct __##var##__dummy_read {; } /* semicolon catcher */
#define LDLM_POOL_PROC_WRITER(var, type) \
- static int lprocfs_wr_##var(struct file *file, const char *buffer, \
- unsigned long count, void *data) \
+ static int lprocfs_wr_##var(struct file *file, \
+ const char __user *buffer, \
+ unsigned long count, void *data) \
{ \
struct ldlm_pool *pl = data; \
type tmp; \
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 4c838f615a64..d20d277dc2f7 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -470,6 +470,7 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
+ int ret;
recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
@@ -490,16 +491,15 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
*/
ldlm_cli_pool_pop_slv(pl);
- pl->pl_recalc_time = get_seconds();
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
- recalc_interval_sec);
spin_unlock(&pl->pl_lock);
/*
* Do not cancel locks in case lru resize is disabled for this ns.
*/
- if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
- return 0;
+ if (!ns_connect_lru_resize(ldlm_pl2ns(pl))) {
+ ret = 0;
+ goto out;
+ }
/*
* In the time of canceling locks on client we do not need to maintain
@@ -507,7 +507,19 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
* It may be called when SLV has changed much, this is why we do not
* take into account pl->pl_recalc_time here.
*/
- return ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
+ ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
+
+out:
+ spin_lock(&pl->pl_lock);
+ /*
+ * Time of LRU resizing might be longer than period,
+ * so update after LRU resizing rather than before it.
+ */
+ pl->pl_recalc_time = get_seconds();
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ recalc_interval_sec);
+ spin_unlock(&pl->pl_lock);
+ return ret;
}
/**
@@ -591,6 +603,14 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
}
recalc_interval_sec = pl->pl_recalc_time - get_seconds() +
pl->pl_recalc_period;
+ if (recalc_interval_sec <= 0) {
+ /* Prevent too frequent recalculation. */
+ CDEBUG(D_DLMTRACE, "Negative interval(%ld), "
+ "too short period(%ld)",
+ recalc_interval_sec,
+ pl->pl_recalc_period);
+ recalc_interval_sec = 1;
+ }
return recalc_interval_sec;
}
@@ -697,8 +717,8 @@ LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
LDLM_POOL_PROC_WRITER(recalc_period, int);
static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
- const char *buf, size_t len,
- loff_t *off)
+ const char __user *buf,
+ size_t len, loff_t *off)
{
struct seq_file *seq = file->private_data;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 1f150e46f50e..c6f62a91b233 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -72,7 +72,7 @@ extern unsigned int ldlm_cancel_unused_locks_before_replay;
unsigned int ldlm_dump_granted_max = 256;
#if defined(CONFIG_PROC_FS)
-static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer,
+static ssize_t lprocfs_wr_dump_ns(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
@@ -287,8 +287,9 @@ static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
return lprocfs_rd_uint(m, &supp);
}
-static ssize_t lprocfs_elc_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t lprocfs_elc_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
unsigned int supp = -1;
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index a7a7ac626aaf..76c62e87a415 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -57,7 +57,7 @@ module_param(libcfs_debug, int, 0644);
MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
EXPORT_SYMBOL(libcfs_debug);
-unsigned int libcfs_debug_mb = 0;
+static unsigned int libcfs_debug_mb;
module_param(libcfs_debug_mb, uint, 0644);
MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size.");
EXPORT_SYMBOL(libcfs_debug_mb);
@@ -93,7 +93,7 @@ EXPORT_SYMBOL(libcfs_debug_binary);
unsigned int libcfs_stack = 3 * THREAD_SIZE / 4;
EXPORT_SYMBOL(libcfs_stack);
-unsigned int portal_enter_debugger;
+static unsigned int portal_enter_debugger;
EXPORT_SYMBOL(portal_enter_debugger);
unsigned int libcfs_catastrophe;
@@ -115,7 +115,7 @@ static wait_queue_head_t debug_ctlwq;
char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
/* We need to pass a pointer here, but elsewhere this must be a const */
-char *libcfs_debug_file_path;
+static char *libcfs_debug_file_path;
module_param(libcfs_debug_file_path, charp, 0644);
MODULE_PARM_DESC(libcfs_debug_file_path,
"Path for dumping debug logs, set 'NONE' to prevent log dumping");
@@ -124,7 +124,7 @@ int libcfs_panic_in_progress;
/* libcfs_debug_token2mask() expects the returned
* string in lower-case */
-const char *
+static const char *
libcfs_debug_subsys2str(int subsys)
{
switch (1 << subsys) {
@@ -185,7 +185,7 @@ libcfs_debug_subsys2str(int subsys)
/* libcfs_debug_token2mask() expects the returned
* string in lower-case */
-const char *
+static const char *
libcfs_debug_dbg2str(int debug)
{
switch (1 << debug) {
@@ -350,7 +350,7 @@ void libcfs_debug_dumplog_internal(void *arg)
current->journal_info = journal_info;
}
-int libcfs_debug_dumplog_thread(void *arg)
+static int libcfs_debug_dumplog_thread(void *arg)
{
libcfs_debug_dumplog_internal(arg);
wake_up(&debug_ctlwq);
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 2d1e6729e996..ec3a2a8b8b2c 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -126,18 +126,21 @@ cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
static inline void
cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
+ __acquires(&lock->spin)
{
spin_lock(&lock->spin);
}
static inline void
cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
+ __releases(&lock->spin)
{
spin_unlock(&lock->spin);
}
static inline void
cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
+ __acquires(&lock->rw)
{
if (!exclusive)
read_lock(&lock->rw);
@@ -147,6 +150,7 @@ cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
static inline void
cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
+ __releases(&lock->rw)
{
if (!exclusive)
read_unlock(&lock->rw);
@@ -1580,7 +1584,7 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) ||
- CFS_HOP(hs, put_locked) == NULL;
+ hs->hs_ops->hs_put_locked == NULL;
cfs_hash_lock(hs, 0);
LASSERT(!cfs_hash_is_rehashing(hs));
@@ -1635,9 +1639,9 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs,
!cfs_hash_with_no_itemref(hs))
return -EOPNOTSUPP;
- if (CFS_HOP(hs, get) == NULL ||
- (CFS_HOP(hs, put) == NULL &&
- CFS_HOP(hs, put_locked) == NULL))
+ if (hs->hs_ops->hs_get == NULL ||
+ (hs->hs_ops->hs_put == NULL &&
+ hs->hs_ops->hs_put_locked == NULL))
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
@@ -1667,9 +1671,9 @@ cfs_hash_for_each_empty(struct cfs_hash *hs,
if (cfs_hash_with_no_lock(hs))
return -EOPNOTSUPP;
- if (CFS_HOP(hs, get) == NULL ||
- (CFS_HOP(hs, put) == NULL &&
- CFS_HOP(hs, put_locked) == NULL))
+ if (hs->hs_ops->hs_get == NULL ||
+ (hs->hs_ops->hs_put == NULL &&
+ hs->hs_ops->hs_put_locked == NULL))
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
index e2aa637abcf9..d9b7c6b69db4 100644
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
@@ -228,12 +228,12 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
if (kkuc_groups[group].next == NULL)
return 0;
- down_read(&kg_sem);
+ down_write(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
if (reg->kr_fp != NULL)
rc = cb_func(reg->kr_data, cb_arg);
}
- up_read(&kg_sem);
+ up_write(&kg_sem);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
index fb88733607a9..76d4392bd282 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
@@ -47,7 +47,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
int *oldmask, int minmask, int allmask)
{
const char *debugstr;
- char op = 0;
+ char op = '\0';
int newmask = minmask, i, len, found = 0;
/* <str> must be a list of tokens separated by whitespace
@@ -55,10 +55,10 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
* appears first in <str>, '*oldmask' is used as the starting point
* (relative), otherwise minmask is used (absolute). An operator
* applies to all following tokens up to the next operator. */
- while (*str != 0) {
+ while (*str != '\0') {
while (isspace(*str))
str++;
- if (*str == 0)
+ if (*str == '\0')
break;
if (*str == '+' || *str == '-') {
op = *str++;
@@ -67,13 +67,15 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
newmask = *oldmask;
while (isspace(*str))
str++;
- if (*str == 0) /* trailing op */
+ if (*str == '\0') /* trailing op */
return -EINVAL;
}
/* find token length */
- for (len = 0; str[len] != 0 && !isspace(str[len]) &&
- str[len] != '+' && str[len] != '-'; len++);
+ len = 0;
+ while (str[len] != '\0' && !isspace(str[len]) &&
+ str[len] != '+' && str[len] != '-')
+ len++;
/* match token */
found = 0;
@@ -132,7 +134,7 @@ char *cfs_firststr(char *str, size_t size)
++end;
}
- *end= '\0';
+ *end = '\0';
out:
return str;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
index d71ad5ed1f6d..277f6b890e09 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -82,17 +82,12 @@ int cfs_cap_raised(cfs_cap_t cap)
return cap_raised(current_cap(), cap);
}
-void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
+static void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
{
/* XXX lost high byte */
*cap = kcap.cap[0];
}
-void cfs_kernel_cap_unpack(kernel_cap_t *kcap, cfs_cap_t cap)
-{
- kcap->cap[0] = cap;
-}
-
cfs_cap_t cfs_curproc_cap_pack(void)
{
cfs_cap_t cap;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
index 83d3f08a37b2..c539e3741583 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
@@ -232,8 +232,9 @@ static int proc_debug_mb(struct ctl_table *table, int write,
__proc_debug_mb);
}
-int proc_console_max_delay_cs(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int proc_console_max_delay_cs(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
{
int rc, max_delay_cs;
struct ctl_table dummy = *table;
@@ -264,8 +265,9 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
return rc;
}
-int proc_console_min_delay_cs(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int proc_console_min_delay_cs(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
{
int rc, min_delay_cs;
struct ctl_table dummy = *table;
@@ -296,8 +298,8 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
return rc;
}
-int proc_console_backoff(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int proc_console_backoff(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int rc, backoff;
struct ctl_table dummy = *table;
@@ -324,16 +326,18 @@ int proc_console_backoff(struct ctl_table *table, int write,
return rc;
}
-int libcfs_force_lbug(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int libcfs_force_lbug(struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
if (write)
LBUG();
return 0;
}
-int proc_fail_loc(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_fail_loc(struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
int rc;
long old_fail_loc = cfs_fail_loc;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
index b91a1f95bbd0..cd2fc01dea4c 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
@@ -43,7 +43,7 @@
/* For sys_open & sys_close */
#include <linux/syscalls.h>
-int
+static int
libcfs_sock_ioctl(int cmd, unsigned long arg)
{
mm_segment_t oldmm = get_fs();
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
index 976c61ed49f4..c8e293002e07 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
@@ -151,6 +151,7 @@ cfs_trace_buf_type_t cfs_trace_buf_idx_get(void)
* for details.
*/
int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
+ __acquires(&tcd->tc_lock)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
@@ -165,6 +166,7 @@ int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
}
void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
+ __releases(&tcd->tcd_lock)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
@@ -269,5 +271,5 @@ int cfs_trace_max_debug_mb(void)
{
int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
- return MAX(512, (total_mb * 80)/100);
+ return max(512, (total_mb * 80)/100);
}
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index 2c4fc74505bc..7dc77dd402b8 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -42,8 +42,7 @@
#include "../../include/linux/lnet/lnet.h"
#include "tracefile.h"
-void
-kportal_memhog_free (struct libcfs_device_userstate *ldu)
+static void kportal_memhog_free (struct libcfs_device_userstate *ldu)
{
struct page **level0p = &ldu->ldu_memhog_root_page;
struct page **level1p;
@@ -86,8 +85,7 @@ kportal_memhog_free (struct libcfs_device_userstate *ldu)
LASSERT (ldu->ldu_memhog_pages == 0);
}
-int
-kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
+static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
gfp_t flags)
{
struct page **level0p;
@@ -334,8 +332,6 @@ extern struct mutex cfs_trace_thread_mutex;
extern struct cfs_wi_sched *cfs_sched_rehash;
extern void libcfs_init_nidstrings(void);
-extern int libcfs_arch_init(void);
-extern void libcfs_arch_cleanup(void);
static int init_libcfs_module(void)
{
diff --git a/drivers/staging/lustre/lustre/libcfs/nidstrings.c b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
index 47c239f22ba8..087449f4e6c1 100644
--- a/drivers/staging/lustre/lustre/libcfs/nidstrings.c
+++ b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
@@ -81,14 +81,105 @@ libcfs_next_nidstring(void)
return str;
}
-static int libcfs_lo_str2addr(const char *str, int nob, __u32 *addr);
-static void libcfs_ip_addr2str(__u32 addr, char *str);
-static int libcfs_ip_str2addr(const char *str, int nob, __u32 *addr);
-static void libcfs_decnum_addr2str(__u32 addr, char *str);
-static void libcfs_hexnum_addr2str(__u32 addr, char *str);
-static int libcfs_num_str2addr(const char *str, int nob, __u32 *addr);
-static int libcfs_num_parse(char *str, int len, struct list_head *list);
-static int libcfs_num_match(__u32 addr, struct list_head *list);
+static int libcfs_lo_str2addr(const char *str, int nob, __u32 *addr)
+{
+ *addr = 0;
+ return 1;
+}
+
+static void libcfs_ip_addr2str(__u32 addr, char *str)
+{
+ snprintf(str, LNET_NIDSTR_SIZE, "%u.%u.%u.%u",
+ (addr >> 24) & 0xff, (addr >> 16) & 0xff,
+ (addr >> 8) & 0xff, addr & 0xff);
+}
+
+static int libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
+{
+ unsigned int a;
+ unsigned int b;
+ unsigned int c;
+ unsigned int d;
+ int n = nob; /* XscanfX */
+
+ /* numeric IP? */
+ if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
+ n == nob &&
+ (a & ~0xff) == 0 && (b & ~0xff) == 0 &&
+ (c & ~0xff) == 0 && (d & ~0xff) == 0) {
+ *addr = ((a<<24)|(b<<16)|(c<<8)|d);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void libcfs_decnum_addr2str(__u32 addr, char *str)
+{
+ snprintf(str, LNET_NIDSTR_SIZE, "%u", addr);
+}
+
+static void libcfs_hexnum_addr2str(__u32 addr, char *str)
+{
+ snprintf(str, LNET_NIDSTR_SIZE, "0x%x", addr);
+}
+
+static int libcfs_num_str2addr(const char *str, int nob, __u32 *addr)
+{
+ int n;
+
+ n = nob;
+ if (sscanf(str, "0x%x%n", addr, &n) >= 1 && n == nob)
+ return 1;
+
+ n = nob;
+ if (sscanf(str, "0X%x%n", addr, &n) >= 1 && n == nob)
+ return 1;
+
+ n = nob;
+ if (sscanf(str, "%u%n", addr, &n) >= 1 && n == nob)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Nf_parse_addrlist method for networks using numeric addresses.
+ *
+ * Examples of such networks are gm and elan.
+ *
+ * \retval 0 if \a str parsed to numeric address
+ * \retval errno otherwise
+ */
+static int
+libcfs_num_parse(char *str, int len, struct list_head *list)
+{
+ struct cfs_expr_list *el;
+ int rc;
+
+ rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
+ if (rc == 0)
+ list_add_tail(&el->el_link, list);
+
+ return rc;
+}
+
+/*
+ * Nf_match_addr method for networks using numeric addresses
+ *
+ * \retval 1 on match
+ * \retval 0 otherwise
+ */
+static int
+libcfs_num_match(__u32 addr, struct list_head *numaddr)
+{
+ struct cfs_expr_list *el;
+
+ LASSERT(!list_empty(numaddr));
+ el = list_entry(numaddr->next, struct cfs_expr_list, el_link);
+
+ return cfs_expr_list_match(addr, el);
+}
struct netstrfns {
int nf_type;
@@ -197,24 +288,7 @@ static struct netstrfns libcfs_netstrfns[] = {
{/* .nf_type */ -1},
};
-const int libcfs_nnetstrfns = ARRAY_SIZE(libcfs_netstrfns);
-
-int
-libcfs_lo_str2addr(const char *str, int nob, __u32 *addr)
-{
- *addr = 0;
- return 1;
-}
-
-void
-libcfs_ip_addr2str(__u32 addr, char *str)
-{
-#if 0 /* never lookup */
-#endif
- snprintf(str, LNET_NIDSTR_SIZE, "%u.%u.%u.%u",
- (addr >> 24) & 0xff, (addr >> 16) & 0xff,
- (addr >> 8) & 0xff, addr & 0xff);
-}
+static const int libcfs_nnetstrfns = ARRAY_SIZE(libcfs_netstrfns);
/* CAVEAT EMPTOR XscanfX
* I use "%n" at the end of a sscanf format to detect trailing junk. However
@@ -223,60 +297,7 @@ libcfs_ip_addr2str(__u32 addr, char *str)
* fine, if it doesn't, then the scan ended at the end of the string, which is
* fine too :) */
-int
-libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
-{
- unsigned int a;
- unsigned int b;
- unsigned int c;
- unsigned int d;
- int n = nob; /* XscanfX */
-
- /* numeric IP? */
- if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
- n == nob &&
- (a & ~0xff) == 0 && (b & ~0xff) == 0 &&
- (c & ~0xff) == 0 && (d & ~0xff) == 0) {
- *addr = ((a<<24)|(b<<16)|(c<<8)|d);
- return 1;
- }
-
- return 0;
-}
-
-void
-libcfs_decnum_addr2str(__u32 addr, char *str)
-{
- snprintf(str, LNET_NIDSTR_SIZE, "%u", addr);
-}
-
-void
-libcfs_hexnum_addr2str(__u32 addr, char *str)
-{
- snprintf(str, LNET_NIDSTR_SIZE, "0x%x", addr);
-}
-
-int
-libcfs_num_str2addr(const char *str, int nob, __u32 *addr)
-{
- int n;
-
- n = nob;
- if (sscanf(str, "0x%x%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- n = nob;
- if (sscanf(str, "0X%x%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- n = nob;
- if (sscanf(str, "%u%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- return 0;
-}
-
-struct netstrfns *
+static struct netstrfns *
libcfs_lnd2netstrfns(int lnd)
{
int i;
@@ -289,7 +310,7 @@ libcfs_lnd2netstrfns(int lnd)
return NULL;
}
-struct netstrfns *
+static struct netstrfns *
libcfs_namenum2netstrfns(const char *name)
{
struct netstrfns *nf;
@@ -304,7 +325,7 @@ libcfs_namenum2netstrfns(const char *name)
return NULL;
}
-struct netstrfns *
+static struct netstrfns *
libcfs_name2netstrfns(const char *name)
{
int i;
@@ -343,7 +364,7 @@ libcfs_lnd2str(int lnd)
return nf->nf_name;
str = libcfs_next_nidstring();
- snprintf(str, LNET_NIDSTR_SIZE, "?%u?", lnd);
+ snprintf(str, LNET_NIDSTR_SIZE, "?%d?", lnd);
return str;
}
EXPORT_SYMBOL(libcfs_lnd2str);
@@ -369,11 +390,11 @@ libcfs_net2str(__u32 net)
char *str = libcfs_next_nidstring();
if (nf == NULL)
- snprintf(str, LNET_NIDSTR_SIZE, "<%u:%u>", lnd, num);
+ snprintf(str, LNET_NIDSTR_SIZE, "<%d:%d>", lnd, num);
else if (num == 0)
snprintf(str, LNET_NIDSTR_SIZE, "%s", nf->nf_name);
else
- snprintf(str, LNET_NIDSTR_SIZE, "%s%u", nf->nf_name, num);
+ snprintf(str, LNET_NIDSTR_SIZE, "%s%d", nf->nf_name, num);
return str;
}
@@ -397,7 +418,7 @@ libcfs_nid2str(lnet_nid_t nid)
str = libcfs_next_nidstring();
if (nf == NULL)
- snprintf(str, LNET_NIDSTR_SIZE, "%x@<%u:%u>", addr, lnd, nnum);
+ snprintf(str, LNET_NIDSTR_SIZE, "%x@<%d:%d>", addr, lnd, nnum);
else {
nf->nf_addr2str(addr, str);
nob = strlen(str);
@@ -405,7 +426,7 @@ libcfs_nid2str(lnet_nid_t nid)
snprintf(str + nob, LNET_NIDSTR_SIZE - nob, "@%s",
nf->nf_name);
else
- snprintf(str + nob, LNET_NIDSTR_SIZE - nob, "@%s%u",
+ snprintf(str + nob, LNET_NIDSTR_SIZE - nob, "@%s%d",
nf->nf_name, nnum);
}
@@ -586,27 +607,6 @@ struct addrrange {
};
/**
- * Nf_parse_addrlist method for networks using numeric addresses.
- *
- * Examples of such networks are gm and elan.
- *
- * \retval 0 if \a str parsed to numeric address
- * \retval errno otherwise
- */
-static int
-libcfs_num_parse(char *str, int len, struct list_head *list)
-{
- struct cfs_expr_list *el;
- int rc;
-
- rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
- if (rc == 0)
- list_add_tail(&el->el_link, list);
-
- return rc;
-}
-
-/**
* Parses \<addrrange\> token on the syntax.
*
* Allocates struct addrrange and links to \a nidrange via
@@ -812,23 +812,6 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
}
EXPORT_SYMBOL(cfs_parse_nidlist);
-/*
- * Nf_match_addr method for networks using numeric addresses
- *
- * \retval 1 on match
- * \retval 0 otherwise
- */
-static int
-libcfs_num_match(__u32 addr, struct list_head *numaddr)
-{
- struct cfs_expr_list *el;
-
- LASSERT(!list_empty(numaddr));
- el = list_entry(numaddr->next, struct cfs_expr_list, el_link);
-
- return cfs_expr_list_match(addr, el);
-}
-
/**
* Matches a nid (\a nid) against the compiled list of nidranges (\a nidlist).
*
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 5917c31c7ed6..eb65b50f832d 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -55,7 +55,7 @@ static struct tracefiled_ctl trace_tctl;
struct mutex cfs_trace_thread_mutex;
static int thread_running = 0;
-atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
+static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
struct cfs_trace_cpu_data *tcd);
@@ -1037,6 +1037,7 @@ static int tracefiled(void *arg)
tage->used, rc);
put_pages_back(&pc);
__LASSERT(list_empty(&pc.pc_pages));
+ break;
}
}
MMSPACE_CLOSE;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 1ac7a702ce26..a18201913273 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -183,7 +183,10 @@ static int ll_dir_filler(void *_hash, struct page *page0)
op_data->op_offset = hash;
rc = md_readpage(exp, op_data, page_pool, &request);
ll_finish_md_op_data(op_data);
- if (rc == 0) {
+ if (rc < 0) {
+ /* page0 is special, which was added into page cache early */
+ delete_from_page_cache(page0);
+ } else if (rc == 0) {
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
/* Checked by mdc_readpage() */
LASSERT(body != NULL);
@@ -278,7 +281,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
spin_lock_irq(&mapping->tree_lock);
found = radix_tree_gang_lookup(&mapping->page_tree,
(void **)&page, offset, 1);
- if (found > 0) {
+ if (found > 0 && !radix_tree_exceptional_entry(page)) {
struct lu_dirpage *dp;
page_cache_get(page);
@@ -652,8 +655,8 @@ static int ll_send_mgc_param(struct obd_export *mgc, char *string)
return rc;
}
-int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
- char *filename)
+static int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
+ char *filename)
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 35a2df01528c..7c7ef7ec908e 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1553,6 +1553,11 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
struct ccc_grouplock grouplock;
int rc;
+ if (arg == 0) {
+ CWARN("group id for group lock must not be 0\n");
+ return -EINVAL;
+ }
+
if (ll_file_nolock(file))
return -EOPNOTSUPP;
@@ -1587,7 +1592,8 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
return 0;
}
-int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
+static int ll_put_grouplock(struct inode *inode, struct file *file,
+ unsigned long arg)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index a3367bfb1456..45aaa1cc56bc 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -987,7 +987,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
if (err)
goto out_free;
lsi->lsi_flags |= LSI_BDI_INITIALIZED;
- lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
+ lsi->lsi_bdi.capabilities = 0;
err = ll_bdi_register(&lsi->lsi_bdi);
if (err)
goto out_free;
@@ -1812,10 +1812,6 @@ void ll_read_inode2(struct inode *inode, void *opaque)
/* OIDEBUG(inode); */
- /* initializing backing dev info. */
- inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
-
-
if (S_ISREG(inode->i_mode)) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index e6a909e6faf0..aaa13bd3e8de 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -399,9 +399,6 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
return -ERANGE;
}
- if (sbi->ll_dt_exp == NULL)
- return -ENODEV;
-
spin_lock(&sbi->ll_lock);
diff = pages_number - cache->ccc_lru_max;
spin_unlock(&sbi->ll_lock);
@@ -437,6 +434,11 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
if (diff <= 0)
break;
+ if (sbi->ll_dt_exp == NULL) { /* being initialized */
+ rc = -ENODEV;
+ break;
+ }
+
/* difficult - have to ask OSCs to drop LRU slots. */
tmp = diff << 1;
rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 6aff155651cc..7c1e02a031ba 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -72,21 +72,6 @@ static void ll_destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, ll_inode_destroy_callback);
}
-static int ll_init_inodecache(void)
-{
- ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
- sizeof(struct ll_inode_info),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (ll_inode_cachep == NULL)
- return -ENOMEM;
- return 0;
-}
-
-static void ll_destroy_inodecache(void)
-{
- kmem_cache_destroy(ll_inode_cachep);
-}
-
/* exported operations */
struct super_operations lustre_super_operations = {
.alloc_inode = ll_alloc_inode,
@@ -104,9 +89,10 @@ void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg));
static int __init init_lustre_lite(void)
{
- int i, rc, seed[2];
- struct timeval tv;
+ struct proc_dir_entry *entry;
lnet_process_id_t lnet_id;
+ struct timeval tv;
+ int i, rc, seed[2];
CLASSERT(sizeof(LUSTRE_VOLATILE_HDR) == LUSTRE_VOLATILE_HDR_LEN + 1);
@@ -116,59 +102,52 @@ static int __init init_lustre_lite(void)
CDEBUG(D_INFO, "Lustre client module (%p).\n",
&lustre_super_operations);
- rc = ll_init_inodecache();
- if (rc)
- return -ENOMEM;
+ rc = -ENOMEM;
+ ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
+ sizeof(struct ll_inode_info),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (ll_inode_cachep == NULL)
+ goto out_cache;
+
ll_file_data_slab = kmem_cache_create("ll_file_data",
sizeof(struct ll_file_data), 0,
SLAB_HWCACHE_ALIGN, NULL);
- if (ll_file_data_slab == NULL) {
- ll_destroy_inodecache();
- return -ENOMEM;
- }
+ if (ll_file_data_slab == NULL)
+ goto out_cache;
ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
sizeof(struct ll_remote_perm),
0, 0, NULL);
- if (ll_remote_perm_cachep == NULL) {
- kmem_cache_destroy(ll_file_data_slab);
- ll_file_data_slab = NULL;
- ll_destroy_inodecache();
- return -ENOMEM;
- }
+ if (ll_remote_perm_cachep == NULL)
+ goto out_cache;
ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
REMOTE_PERM_HASHSIZE *
sizeof(struct list_head),
0, 0, NULL);
- if (ll_rmtperm_hash_cachep == NULL) {
- kmem_cache_destroy(ll_remote_perm_cachep);
- ll_remote_perm_cachep = NULL;
- kmem_cache_destroy(ll_file_data_slab);
- ll_file_data_slab = NULL;
- ll_destroy_inodecache();
- return -ENOMEM;
+ if (ll_rmtperm_hash_cachep == NULL)
+ goto out_cache;
+
+ entry = lprocfs_register("llite", proc_lustre_root, NULL, NULL);
+ if (IS_ERR(entry)) {
+ rc = PTR_ERR(entry);
+ CERROR("cannot register '/proc/fs/lustre/llite': rc = %d\n",
+ rc);
+ goto out_cache;
}
- proc_lustre_fs_root = proc_lustre_root ?
- lprocfs_register("llite", proc_lustre_root, NULL, NULL) : NULL;
-
- lustre_register_client_fill_super(ll_fill_super);
- lustre_register_kill_super_cb(ll_kill_super);
-
- lustre_register_client_process_config(ll_process_config);
+ proc_lustre_fs_root = entry;
cfs_get_random_bytes(seed, sizeof(seed));
- /* Nodes with small feet have little entropy
- * the NID for this node gives the most entropy in the low bits */
- for (i = 0; ; i++) {
- if (LNetGetId(i, &lnet_id) == -ENOENT) {
+ /* Nodes with small feet have little entropy. The NID for this
+ * node gives the most entropy in the low bits */
+ for (i = 0;; i++) {
+ if (LNetGetId(i, &lnet_id) == -ENOENT)
break;
- }
- if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND) {
+
+ if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND)
seed[0] ^= LNET_NIDADDR(lnet_id.nid);
- }
}
do_gettimeofday(&tv);
@@ -177,20 +156,54 @@ static int __init init_lustre_lite(void)
init_timer(&ll_capa_timer);
ll_capa_timer.function = ll_capa_timer_callback;
rc = ll_capa_thread_start();
- /*
- * XXX normal cleanup is needed here.
- */
- if (rc == 0)
- rc = vvp_global_init();
+ if (rc != 0)
+ goto out_proc;
- if (rc == 0)
- rc = ll_xattr_init();
+ rc = vvp_global_init();
+ if (rc != 0)
+ goto out_capa;
+
+ rc = ll_xattr_init();
+ if (rc != 0)
+ goto out_vvp;
+
+ lustre_register_client_fill_super(ll_fill_super);
+ lustre_register_kill_super_cb(ll_kill_super);
+ lustre_register_client_process_config(ll_process_config);
+
+ return 0;
+
+out_vvp:
+ vvp_global_fini();
+out_capa:
+ del_timer(&ll_capa_timer);
+ ll_capa_thread_stop();
+out_proc:
+ lprocfs_remove(&proc_lustre_fs_root);
+out_cache:
+ if (ll_inode_cachep != NULL)
+ kmem_cache_destroy(ll_inode_cachep);
+
+ if (ll_file_data_slab != NULL)
+ kmem_cache_destroy(ll_file_data_slab);
+
+ if (ll_remote_perm_cachep != NULL)
+ kmem_cache_destroy(ll_remote_perm_cachep);
+
+ if (ll_rmtperm_hash_cachep != NULL)
+ kmem_cache_destroy(ll_rmtperm_hash_cachep);
return rc;
}
static void __exit exit_lustre_lite(void)
{
+ lustre_register_client_fill_super(NULL);
+ lustre_register_kill_super_cb(NULL);
+ lustre_register_client_process_config(NULL);
+
+ lprocfs_remove(&proc_lustre_fs_root);
+
ll_xattr_fini();
vvp_global_fini();
del_timer(&ll_capa_timer);
@@ -199,22 +212,12 @@ static void __exit exit_lustre_lite(void)
"client remaining capa count %d\n",
capa_count[CAPA_SITE_CLIENT]);
- lustre_register_client_fill_super(NULL);
- lustre_register_kill_super_cb(NULL);
-
- lustre_register_client_process_config(NULL);
-
- ll_destroy_inodecache();
-
+ kmem_cache_destroy(ll_inode_cachep);
kmem_cache_destroy(ll_rmtperm_hash_cachep);
- ll_rmtperm_hash_cachep = NULL;
kmem_cache_destroy(ll_remote_perm_cachep);
- ll_remote_perm_cachep = NULL;
kmem_cache_destroy(ll_file_data_slab);
- if (proc_lustre_fs_root && !IS_ERR(proc_lustre_fs_root))
- lprocfs_remove(&proc_lustre_fs_root);
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 930f6010203e..91bba79678cf 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -307,18 +307,13 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
static int vvp_io_read_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
- struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
+ struct cl_io *io = ios->cis_io;
+ struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
int result;
- /* XXX: Layer violation, we shouldn't see lsm at llite level. */
- if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
- result = vvp_io_rw_lock(env, io, CLM_READ,
- io->u.ci_rd.rd.crw_pos,
- io->u.ci_rd.rd.crw_pos +
- io->u.ci_rd.rd.crw_count - 1);
- else
- result = 0;
+ result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
+ rd->crw_pos + rd->crw_count - 1);
+
return result;
}
@@ -632,7 +627,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
return 0;
}
- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index 372633e164b9..f354e82d4ae7 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -71,6 +71,7 @@ static const struct cl_lock_operations vvp_lock_ops = {
.clo_fini = ccc_lock_fini,
.clo_enqueue = ccc_lock_enqueue,
.clo_wait = ccc_lock_wait,
+ .clo_use = ccc_lock_use,
.clo_unuse = ccc_lock_unuse,
.clo_fits_into = ccc_lock_fits_into,
.clo_state = ccc_lock_state,
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 9f3837412cdf..b779f47384c5 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -128,7 +128,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
return rc;
}
-struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
+static struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
@@ -335,7 +335,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
#define MAX_STRING_SIZE 128
-int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
+static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
{
struct proc_dir_entry *lmv_proc_dir;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -1663,10 +1663,10 @@ struct lmv_tgt_desc
return tgt;
}
-int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid,
- __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
- struct ptlrpc_request **request)
+static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
+ const void *data, int datalen, int mode, __u32 uid,
+ __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
+ struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2387,9 +2387,9 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
return -EINVAL;
}
-int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
+static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
+ u32 keylen, void *key, u32 vallen,
+ void *val, struct ptlrpc_request_set *set)
{
struct lmv_tgt_desc *tgt;
struct obd_device *obd;
@@ -2425,8 +2425,8 @@ int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
return -EINVAL;
}
-int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
+static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
+ struct lov_stripe_md *lsm)
{
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2473,8 +2473,8 @@ int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
return mea_size;
}
-int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_size)
+static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_mds_md *lmm, int lmm_size)
{
struct obd_device *obd = class_exp2obd(exp);
struct lmv_stripe_md **tmea = (struct lmv_stripe_md **)lsmp;
@@ -2551,8 +2551,8 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
return rc;
}
-int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
- __u64 *bits)
+static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
+ __u64 *bits)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
int rc;
@@ -2561,10 +2561,10 @@ int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
return rc;
}
-ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh)
+static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, ldlm_type_t type,
+ ldlm_policy_data_t *policy, ldlm_mode_t mode,
+ struct lustre_handle *lockh)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2594,16 +2594,18 @@ ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
return 0;
}
-int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
- struct obd_export *dt_exp, struct obd_export *md_exp,
- struct lustre_md *md)
+static int lmv_get_lustre_md(struct obd_export *exp,
+ struct ptlrpc_request *req,
+ struct obd_export *dt_exp,
+ struct obd_export *md_exp,
+ struct lustre_md *md)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md);
}
-int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
+static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2613,9 +2615,9 @@ int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md);
}
-int lmv_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it)
+static int lmv_set_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och,
+ struct lookup_intent *it)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2628,8 +2630,8 @@ int lmv_set_open_replay_data(struct obd_export *exp,
return md_set_open_replay_data(tgt->ltd_exp, och, it);
}
-int lmv_clear_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och)
+static int lmv_clear_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2684,17 +2686,18 @@ static int lmv_renew_capa(struct obd_export *exp, struct obd_capa *oc,
return rc;
}
-int lmv_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
- const struct req_msg_field *field, struct obd_capa **oc)
+static int lmv_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
+ const struct req_msg_field *field,
+ struct obd_capa **oc)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
return md_unpack_capa(lmv->tgts[0]->ltd_exp, req, field, oc);
}
-int lmv_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo,
- struct ldlm_enqueue_info *einfo)
+static int lmv_intent_getattr_async(struct obd_export *exp,
+ struct md_enqueue_info *minfo,
+ struct ldlm_enqueue_info *einfo)
{
struct md_op_data *op_data = &minfo->mi_data;
struct obd_device *obd = exp->exp_obd;
@@ -2714,8 +2717,8 @@ int lmv_intent_getattr_async(struct obd_export *exp,
return rc;
}
-int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits)
+static int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
+ struct lu_fid *fid, __u64 *bits)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2739,8 +2742,8 @@ int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
* process with other slave MDTs. The only exception is Q_GETOQUOTA for which
* we directly fetch data from the slave MDTs.
*/
-int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
+static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
{
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2786,8 +2789,8 @@ int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
return rc;
}
-int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
+static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
{
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2810,7 +2813,7 @@ int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
return rc;
}
-struct obd_ops lmv_obd_ops = {
+static struct obd_ops lmv_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = lmv_setup,
.o_cleanup = lmv_cleanup,
@@ -2830,7 +2833,7 @@ struct obd_ops lmv_obd_ops = {
.o_quotactl = lmv_quotactl
};
-struct md_ops lmv_md_ops = {
+static struct md_ops lmv_md_ops = {
.m_getstatus = lmv_getstatus,
.m_null_inode = lmv_null_inode,
.m_find_cbdata = lmv_find_cbdata,
@@ -2864,7 +2867,7 @@ struct md_ops lmv_md_ops = {
.m_revalidate_lock = lmv_revalidate_lock
};
-int __init lmv_init(void)
+static int __init lmv_init(void)
{
struct lprocfs_static_vars lvars;
int rc;
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index 117002097b28..5be4176829d3 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -175,7 +175,7 @@ static int lmv_tgt_seq_show(struct seq_file *p, void *v)
tgt->ltd_uuid.uuid, tgt->ltd_active ? "" : "IN");
}
-struct seq_operations lmv_tgt_sops = {
+static struct seq_operations lmv_tgt_sops = {
.start = lmv_tgt_seq_start,
.stop = lmv_tgt_seq_stop,
.next = lmv_tgt_seq_next,
@@ -199,7 +199,7 @@ static int lmv_target_seq_open(struct inode *inode, struct file *file)
LPROC_SEQ_FOPS_RO_TYPE(lmv, uuid);
-struct lprocfs_vars lprocfs_lmv_obd_vars[] = {
+static struct lprocfs_vars lprocfs_lmv_obd_vars[] = {
{ "numobd", &lmv_numobd_fops, NULL, 0 },
{ "placement", &lmv_placement_fops, NULL, 0 },
{ "activeobd", &lmv_activeobd_fops, NULL, 0 },
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index c993f25fb303..c99f2f44ec62 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -51,8 +51,9 @@ static int lov_stripesize_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%llu\n", desc->ld_default_stripe_size);
}
-static ssize_t lov_stripesize_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t lov_stripesize_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct lov_desc *desc;
@@ -81,8 +82,9 @@ static int lov_stripeoffset_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%llu\n", desc->ld_default_stripe_offset);
}
-static ssize_t lov_stripeoffset_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t lov_stripeoffset_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct lov_desc *desc;
@@ -110,8 +112,9 @@ static int lov_stripetype_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%u\n", desc->ld_pattern);
}
-static ssize_t lov_stripetype_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t lov_stripetype_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct lov_desc *desc;
@@ -140,8 +143,9 @@ static int lov_stripecount_seq_show(struct seq_file *m, void *v)
(__s16)(desc->ld_default_stripe_count + 1) - 1);
}
-static ssize_t lov_stripecount_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t lov_stripecount_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct lov_desc *desc;
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 16341c818358..c791941bd810 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -52,7 +52,7 @@ static int mdc_max_rpcs_in_flight_seq_show(struct seq_file *m, void *v)
}
static ssize_t mdc_max_rpcs_in_flight_seq_write(struct file *file,
- const char *buffer,
+ const char __user *buffer,
size_t count,
loff_t *off)
{
@@ -82,8 +82,9 @@ static int mdc_kuc_open(struct inode *inode, struct file *file)
}
/* temporary for testing */
-static ssize_t mdc_kuc_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t mdc_kuc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd =
((struct seq_file *)file->private_data)->private;
@@ -105,6 +106,8 @@ static ssize_t mdc_kuc_write(struct file *file, const char *buffer,
/* for mockup below */ 2 * cfs_size_round(sizeof(*hai));
OBD_ALLOC(lh, len);
+ if (!lh)
+ return -ENOMEM;
lh->kuc_magic = KUC_MAGIC;
lh->kuc_transport = KUC_TRANSPORT_HSM;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index 4e59995e0042..d3234cb1ea22 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -222,10 +222,9 @@ void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid());
rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid());
rec->cr_cap = cfs_curproc_cap_pack();
- if (op_data != NULL) {
- rec->cr_fid1 = op_data->op_fid1;
- rec->cr_fid2 = op_data->op_fid2;
- }
+ rec->cr_fid1 = op_data->op_fid1;
+ rec->cr_fid2 = op_data->op_fid2;
+
rec->cr_mode = mode;
cr_flags = mds_pack_open_flags(flags, mode);
rec->cr_rdev = rdev;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 8c9b4f5494e9..d1c224ecd2b7 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -828,6 +828,7 @@ resend:
einfo->ei_type);
policy = (ldlm_policy_data_t *)lmm;
res_id.name[3] = LDLM_FLOCK;
+ req = NULL;
} else if (it->it_op & IT_OPEN) {
req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize,
einfo->ei_cbdata);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 3b0f245a8780..ef2744700d8b 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -855,8 +855,8 @@ static void mdc_close_handle_reply(struct ptlrpc_request *req,
}
}
-int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod, struct ptlrpc_request **request)
+static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_open_data *mod, struct ptlrpc_request **request)
{
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
@@ -974,8 +974,8 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
return rc < 0 ? rc : saved_rc;
}
-int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod)
+static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_open_data *mod)
{
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
@@ -1044,8 +1044,8 @@ int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
}
-int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
- struct page **pages, struct ptlrpc_request **request)
+static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
+ struct page **pages, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
struct ptlrpc_bulk_desc *desc;
@@ -1908,8 +1908,8 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
- min((int) data->ioc_plen2,
- (int) sizeof(struct obd_uuid)))) {
+ min_t(size_t, data->ioc_plen2,
+ sizeof(struct obd_uuid)))) {
rc = -EFAULT;
goto out;
}
@@ -1921,8 +1921,8 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
goto out;
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min((int) data->ioc_plen1,
- (int) sizeof(stat_buf)))) {
+ min_t(size_t, data->ioc_plen1,
+ sizeof(stat_buf)))) {
rc = -EFAULT;
goto out;
}
@@ -1974,9 +1974,9 @@ out:
return rc;
}
-int mdc_get_info_rpc(struct obd_export *exp,
- u32 keylen, void *key,
- int vallen, void *val)
+static int mdc_get_info_rpc(struct obd_export *exp,
+ u32 keylen, void *key,
+ int vallen, void *val)
{
struct obd_import *imp = class_exp2cliimp(exp);
struct ptlrpc_request *req;
@@ -2148,11 +2148,11 @@ static int mdc_kuc_reregister(struct obd_import *imp)
(void *)imp);
}
-int mdc_set_info_async(const struct lu_env *env,
- struct obd_export *exp,
- u32 keylen, void *key,
- u32 vallen, void *val,
- struct ptlrpc_request_set *set)
+static int mdc_set_info_async(const struct lu_env *env,
+ struct obd_export *exp,
+ u32 keylen, void *key,
+ u32 vallen, void *val,
+ struct ptlrpc_request_set *set)
{
struct obd_import *imp = class_exp2cliimp(exp);
int rc;
@@ -2199,9 +2199,9 @@ int mdc_set_info_async(const struct lu_env *env,
return -EINVAL;
}
-int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
{
int rc = -EINVAL;
@@ -2263,8 +2263,8 @@ int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
return rc;
}
-int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, struct ptlrpc_request **request)
+static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
@@ -2356,7 +2356,7 @@ int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
return seq_client_alloc_fid(NULL, seq, fid);
}
-struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
+static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
@@ -2390,7 +2390,7 @@ static int mdc_resource_inode_free(struct ldlm_resource *res)
return 0;
}
-struct ldlm_valblock_ops inode_lvbo = {
+static struct ldlm_valblock_ops inode_lvbo = {
.lvbo_free = mdc_resource_inode_free,
};
@@ -2550,9 +2550,9 @@ static int mdc_process_config(struct obd_device *obd, u32 len, void *buf)
/* get remote permission for current user on fid */
-int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, __u32 suppgid,
- struct ptlrpc_request **request)
+static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, __u32 suppgid,
+ struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
@@ -2647,7 +2647,7 @@ static int mdc_renew_capa(struct obd_export *exp, struct obd_capa *oc,
return 0;
}
-struct obd_ops mdc_obd_ops = {
+static struct obd_ops mdc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = mdc_setup,
.o_precleanup = mdc_precleanup,
@@ -2670,7 +2670,7 @@ struct obd_ops mdc_obd_ops = {
.o_quotacheck = mdc_quotacheck
};
-struct md_ops mdc_md_ops = {
+static struct md_ops mdc_md_ops = {
.m_getstatus = mdc_getstatus,
.m_null_inode = mdc_null_inode,
.m_find_cbdata = mdc_find_cbdata,
@@ -2705,7 +2705,7 @@ struct md_ops mdc_md_ops = {
.m_revalidate_lock = mdc_revalidate_lock
};
-int __init mdc_init(void)
+static int __init mdc_init(void)
{
int rc;
struct lprocfs_static_vars lvars = { NULL };
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index ce96bd279111..f13d1fbffd9d 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -193,6 +193,7 @@ static spinlock_t *cl_object_attr_guard(struct cl_object *o)
* cl_object_attr_get(), cl_object_attr_set().
*/
void cl_object_attr_lock(struct cl_object *o)
+ __acquires(cl_object_attr_guard(o))
{
spin_lock(cl_object_attr_guard(o));
}
@@ -202,6 +203,7 @@ EXPORT_SYMBOL(cl_object_attr_lock);
* Releases data-attributes lock, acquired by cl_object_attr_lock().
*/
void cl_object_attr_unlock(struct cl_object *o)
+ __releases(cl_object_attr_guard(o))
{
spin_unlock(cl_object_attr_guard(o));
}
@@ -662,7 +664,8 @@ static int cl_env_store_init(void) {
return cl_env_hash != NULL ? 0 :-ENOMEM;
}
-static void cl_env_store_fini(void) {
+static void cl_env_store_fini(void)
+{
cfs_hash_putref(cl_env_hash);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 89a3fb2e56b2..29456e1ad225 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -61,7 +61,7 @@ __u64 obd_alloc;
EXPORT_SYMBOL(obd_alloc);
__u64 obd_pages;
EXPORT_SYMBOL(obd_pages);
-DEFINE_SPINLOCK(obd_updatemax_lock);
+static DEFINE_SPINLOCK(obd_updatemax_lock);
/* The following are visible and mutable through /proc/sys/lustre/. */
unsigned int obd_alloc_fail_rate = 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 736ca410aca3..82508210465e 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -1151,22 +1151,24 @@ void class_export_recovery_cleanup(struct obd_export *exp)
exp->exp_obd->obd_stale_clients++;
}
spin_unlock(&obd->obd_recovery_task_lock);
+
+ spin_lock(&exp->exp_lock);
/** Cleanup req replay fields */
if (exp->exp_req_replay_needed) {
- spin_lock(&exp->exp_lock);
exp->exp_req_replay_needed = 0;
- spin_unlock(&exp->exp_lock);
+
LASSERT(atomic_read(&obd->obd_req_replay_clients));
atomic_dec(&obd->obd_req_replay_clients);
}
+
/** Cleanup lock replay data */
if (exp->exp_lock_replay_needed) {
- spin_lock(&exp->exp_lock);
exp->exp_lock_replay_needed = 0;
- spin_unlock(&exp->exp_lock);
+
LASSERT(atomic_read(&obd->obd_lock_replay_clients));
atomic_dec(&obd->obd_lock_replay_clients);
}
+ spin_unlock(&exp->exp_lock);
}
/* This function removes 1-3 references from the export:
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 66ceab20c743..b94aeac18a37 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -83,9 +83,8 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
int err;
int offset = 0;
- err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
- if (err)
- return err;
+ if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
+ return -EFAULT;
if (hdr.ioc_version != OBD_IOCTL_VERSION) {
CERROR("Version mismatch kernel (%x) vs application (%x)\n",
@@ -117,18 +116,19 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
*len = hdr.ioc_len;
data = (struct obd_ioctl_data *)*buf;
- err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
- if (err) {
- OBD_FREE_LARGE(*buf, hdr.ioc_len);
- return err;
+ if (copy_from_user(*buf, (void *)arg, hdr.ioc_len)) {
+ err = -EFAULT;
+ goto free_buf;
+ }
+ if (hdr.ioc_len != data->ioc_len) {
+ err = -EINVAL;
+ goto free_buf;
}
- if (hdr.ioc_len != data->ioc_len)
- return -EINVAL;
if (obd_ioctl_is_invalid(data)) {
CERROR("ioctl not correctly formatted\n");
- OBD_FREE_LARGE(*buf, hdr.ioc_len);
- return -EINVAL;
+ err = -EINVAL;
+ goto free_buf;
}
if (data->ioc_inllen1) {
@@ -151,6 +151,10 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
}
return 0;
+
+free_buf:
+ OBD_FREE_LARGE(*buf, hdr.ioc_len);
+ return err;
}
EXPORT_SYMBOL(obd_ioctl_getdata);
@@ -272,8 +276,9 @@ static int obd_proc_jobid_var_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%s\n", obd_jobid_var);
}
-static ssize_t obd_proc_jobid_var_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t obd_proc_jobid_var_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
if (!count || count > JOBSTATS_JOBID_VAR_MAX_LEN)
return -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index d3ec90e85eb9..a2d5aa105d6b 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -168,7 +168,8 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
}
case CHANGELOG_REC:
{
- struct llog_changelog_rec *cr = (struct llog_changelog_rec *)rec;
+ struct llog_changelog_rec *cr =
+ (struct llog_changelog_rec *)rec;
__swab16s(&cr->cr.cr_namelen);
__swab16s(&cr->cr.cr_flags);
@@ -188,6 +189,8 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
} else {
tail = &cr->cr_tail;
}
+ tail = (struct llog_rec_tail *)((char *)tail +
+ cr->cr.cr_namelen);
break;
}
case CHANGELOG_USER_REC:
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 3b7dfc367722..ddab94d7ee82 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -45,6 +45,7 @@
#include "../include/lprocfs_status.h"
#include "../include/lustre/lustre_idl.h"
#include <linux/seq_file.h>
+#include <linux/ctype.h>
static const char * const obd_connect_names[] = {
"read_only",
@@ -1849,7 +1850,7 @@ int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult)
}
EXPORT_SYMBOL(lprocfs_seq_read_frac_helper);
-int lprocfs_write_u64_helper(const char *buffer, unsigned long count,
+int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count,
__u64 *val)
{
return lprocfs_write_frac_u64_helper(buffer, count, val, 1);
@@ -1862,6 +1863,7 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
char kernbuf[22], *end, *pbuf;
__u64 whole, frac = 0, units;
unsigned frac_d = 1;
+ int sign = 1;
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
@@ -1872,7 +1874,7 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
kernbuf[count] = '\0';
pbuf = kernbuf;
if (*pbuf == '-') {
- mult = -mult;
+ sign = -1;
pbuf++;
}
@@ -1880,7 +1882,7 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
if (pbuf == end)
return -EINVAL;
- if (end != NULL && *end == '.') {
+ if (*end == '.') {
int i;
pbuf = end + 1;
@@ -1895,25 +1897,25 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
}
units = 1;
- switch (*end) {
- case 'p': case 'P':
+ switch (tolower(*end)) {
+ case 'p':
units <<= 10;
- case 't': case 'T':
+ case 't':
units <<= 10;
- case 'g': case 'G':
+ case 'g':
units <<= 10;
- case 'm': case 'M':
+ case 'm':
units <<= 10;
- case 'k': case 'K':
+ case 'k':
units <<= 10;
}
/* Specified units override the multiplier */
- if (units)
- mult = mult < 0 ? -units : units;
+ if (units > 1)
+ mult = units;
frac *= mult;
do_div(frac, frac_d);
- *val = whole * mult + frac;
+ *val = sign * (whole * mult + frac);
return 0;
}
EXPORT_SYMBOL(lprocfs_write_frac_u64_helper);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 4f39cdee1b5c..3c0c9109cefd 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -376,6 +376,11 @@ int lustre_start_mgc(struct super_block *sb)
/* Random uuid for MGC allows easier reconnects */
OBD_ALLOC_PTR(uuid);
+ if (!uuid) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
ll_generate_random_uuid(uuidc);
class_uuid_unparse(uuidc, uuid);
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 9f719bcecab3..1795d3a7a029 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -53,8 +53,9 @@ static int osc_active_seq_show(struct seq_file *m, void *v)
return rc;
}
-static ssize_t osc_active_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t osc_active_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
int val, rc;
@@ -88,7 +89,8 @@ static int osc_max_rpcs_in_flight_seq_show(struct seq_file *m, void *v)
}
static ssize_t osc_max_rpcs_in_flight_seq_write(struct file *file,
- const char *buffer, size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct client_obd *cli = &dev->u.cli;
@@ -130,8 +132,9 @@ static int osc_max_dirty_mb_seq_show(struct seq_file *m, void *v)
return lprocfs_seq_read_frac_helper(m, val, mult);
}
-static ssize_t osc_max_dirty_mb_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t osc_max_dirty_mb_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct client_obd *cli = &dev->u.cli;
@@ -233,8 +236,9 @@ static int osc_cur_grant_bytes_seq_show(struct seq_file *m, void *v)
return rc;
}
-static ssize_t osc_cur_grant_bytes_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t osc_cur_grant_bytes_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
struct client_obd *cli = &obd->u.cli;
@@ -290,7 +294,8 @@ static int osc_grant_shrink_interval_seq_show(struct seq_file *m, void *v)
}
static ssize_t osc_grant_shrink_interval_seq_write(struct file *file,
- const char *buffer, size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
int val, rc;
@@ -322,8 +327,9 @@ static int osc_checksum_seq_show(struct seq_file *m, void *v)
obd->u.cli.cl_checksum ? 1 : 0);
}
-static ssize_t osc_checksum_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t osc_checksum_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
int val, rc;
@@ -358,11 +364,12 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v)
else
seq_printf(m, "%s ", cksum_name[i]);
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
return 0;
}
-static ssize_t osc_checksum_type_seq_write(struct file *file, const char *buffer,
+static ssize_t osc_checksum_type_seq_write(struct file *file,
+ const char __user *buffer,
size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
@@ -401,8 +408,9 @@ static int osc_resend_count_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%u\n", atomic_read(&obd->u.cli.cl_resends));
}
-static ssize_t osc_resend_count_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t osc_resend_count_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
int val, rc;
@@ -428,8 +436,9 @@ static int osc_contention_seconds_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%u\n", od->od_contention_time);
}
-static ssize_t osc_contention_seconds_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t osc_contention_seconds_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
struct osc_device *od = obd2osc_dev(obd);
@@ -447,8 +456,9 @@ static int osc_lockless_truncate_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%u\n", od->od_lockless_truncate);
}
-static ssize_t osc_lockless_truncate_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t osc_lockless_truncate_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
struct osc_device *od = obd2osc_dev(obd);
@@ -472,7 +482,8 @@ static int osc_obd_max_pages_per_rpc_seq_show(struct seq_file *m, void *v)
}
static ssize_t osc_obd_max_pages_per_rpc_seq_write(struct file *file,
- const char *buffer, size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct client_obd *cli = &dev->u.cli;
@@ -590,9 +601,9 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "pending read pages: %d\n",
atomic_read(&cli->cl_pending_r_pages));
- seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_printf(seq, "pages per rpc rpcs %% cum %% |");
- seq_printf(seq, " rpcs %% cum %%\n");
+ seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
+ seq_puts(seq, "pages per rpc rpcs % cum % |");
+ seq_puts(seq, " rpcs % cum %\n");
read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
@@ -613,9 +624,9 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
break;
}
- seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_printf(seq, "rpcs in flight rpcs %% cum %% |");
- seq_printf(seq, " rpcs %% cum %%\n");
+ seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
+ seq_puts(seq, "rpcs in flight rpcs % cum % |");
+ seq_puts(seq, " rpcs % cum %\n");
read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
@@ -636,9 +647,9 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
break;
}
- seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_printf(seq, "offset rpcs %% cum %% |");
- seq_printf(seq, " rpcs %% cum %%\n");
+ seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
+ seq_puts(seq, "offset rpcs % cum % |");
+ seq_puts(seq, " rpcs % cum %\n");
read_tot = lprocfs_oh_sum(&cli->cl_read_offset_hist);
write_tot = lprocfs_oh_sum(&cli->cl_write_offset_hist);
@@ -664,8 +675,9 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
}
#undef pct
-static ssize_t osc_rpc_stats_seq_write(struct file *file, const char *buf,
- size_t len, loff_t *off)
+static ssize_t osc_rpc_stats_seq_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct obd_device *dev = seq->private;
@@ -702,8 +714,9 @@ static int osc_stats_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static ssize_t osc_stats_seq_write(struct file *file, const char *buf,
- size_t len, loff_t *off)
+static ssize_t osc_stats_seq_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct obd_device *dev = seq->private;
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 370e6d4896c6..7022ed42d2d1 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1820,6 +1820,9 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
int *pc, unsigned int *max_pages)
{
struct osc_extent *tmp;
+ struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
+ struct osc_async_page,
+ oap_pending_item);
EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
ext);
@@ -1829,6 +1832,10 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
return 0;
list_for_each_entry(tmp, rpclist, oe_link) {
+ struct osc_async_page *oap2;
+
+ oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
+ oap_pending_item);
EASSERT(tmp->oe_owner == current, tmp);
#if 0
if (overlapped(tmp, ext)) {
@@ -1836,6 +1843,11 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
EASSERT(0, ext);
}
#endif
+ if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
+ CDEBUG(D_CACHE, "Do not permit different type of IO"
+ " for a same RPC\n");
+ return 0;
+ }
if (tmp->oe_srvlock != ext->oe_srvlock ||
!tmp->oe_grants != !ext->oe_grants)
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index d788dac93cd0..af96c7bc7764 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -160,11 +160,6 @@ static inline unsigned long rpcs_in_flight(struct client_obd *cli)
return cli->cl_r_in_flight + cli->cl_w_in_flight;
}
-#ifndef min_t
-#define min_t(type, x, y) \
- ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
-#endif
-
struct osc_device {
struct cl_device od_cl;
struct obd_export *od_exp;
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index a7f08bc48166..445655724904 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -100,14 +100,14 @@ static int osc_lock_invariant(struct osc_lock *ols)
/*
* If all the following "ergo"s are true, return 1, otherwise 0
*/
- if (! ergo(olock != NULL, handle_used))
+ if (!ergo(olock != NULL, handle_used))
return 0;
- if (! ergo(olock != NULL,
+ if (!ergo(olock != NULL,
olock->l_handle.h_cookie == ols->ols_handle.cookie))
return 0;
- if (! ergo(handle_used,
+ if (!ergo(handle_used,
ergo(lock != NULL && olock != NULL, lock == olock) &&
ergo(lock == NULL, olock == NULL)))
return 0;
@@ -115,18 +115,18 @@ static int osc_lock_invariant(struct osc_lock *ols)
* Check that ->ols_handle and ->ols_lock are consistent, but
* take into account that they are set at the different time.
*/
- if (! ergo(ols->ols_state == OLS_CANCELLED,
+ if (!ergo(ols->ols_state == OLS_CANCELLED,
olock == NULL && !handle_used))
return 0;
/*
* DLM lock is destroyed only after we have seen cancellation
* ast.
*/
- if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
+ if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
return 0;
- if (! ergo(ols->ols_state == OLS_GRANTED,
+ if (!ergo(ols->ols_state == OLS_GRANTED,
olock != NULL &&
olock->l_req_mode == olock->l_granted_mode &&
ols->ols_hold))
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index b9450b95f1c5..0adfa707a763 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -3332,7 +3332,7 @@ extern struct lu_kmem_descr osc_caches[];
extern spinlock_t osc_ast_guard;
extern struct lock_class_key osc_ast_guard_class;
-int __init osc_init(void)
+static int __init osc_init(void)
{
struct lprocfs_static_vars lvars = { NULL };
int rc;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index dc9e406f3212..4882dd0a4483 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -1247,7 +1247,9 @@ static int after_reply(struct ptlrpc_request *req)
time_t now = get_seconds();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
req->rq_nr_resend++;
/* allocate new xid to avoid reply reconstruction */
@@ -1497,11 +1499,13 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
struct list_head *tmp, *next;
+ struct list_head comp_reqs;
int force_timer_recalc = 0;
if (atomic_read(&set->set_remaining) == 0)
return 1;
+ INIT_LIST_HEAD(&comp_reqs);
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
@@ -1576,8 +1580,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
ptlrpc_rqphase_move(req, req->rq_next_phase);
}
- if (req->rq_phase == RQ_PHASE_COMPLETE)
+ if (req->rq_phase == RQ_PHASE_COMPLETE) {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
continue;
+ }
if (req->rq_phase == RQ_PHASE_INTERPRET)
goto interpret;
@@ -1860,9 +1866,15 @@ interpret:
if (req->rq_status != 0)
set->set_rc = req->rq_status;
ptlrpc_req_finished(req);
+ } else {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
}
}
+ /* move completed request at the head of list so it's easier for
+ * caller to find them */
+ list_splice(&comp_reqs, &set->set_requests);
+
/* If we hit an error, we want to recover promptly. */
return atomic_read(&set->set_remaining) == 0 || force_timer_recalc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index dc5ceb55d001..bbef666b1d16 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -65,7 +65,6 @@
#endif
/* struct ptlrpc_request, lustre_msg* */
#include "../include/lustre_req_layout.h"
-#include "../include/lustre_update.h"
#include "../include/lustre_acl.h"
#include "../include/lustre_debug.h"
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 4011e0050fcb..0e2071b8a36e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -45,7 +45,7 @@
#include "ptlrpc_internal.h"
-struct ll_rpc_opcode {
+static struct ll_rpc_opcode {
__u32 opcode;
const char *opname;
} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
@@ -136,7 +136,7 @@ struct ll_rpc_opcode {
{ UPDATE_OBJ, "update_obj" },
};
-struct ll_eopcode {
+static struct ll_eopcode {
__u32 opcode;
const char *opname;
} ll_eopcode_table[EXTRA_LAST_OPC] = {
@@ -175,15 +175,17 @@ const char *ll_opcode2str(__u32 opcode)
return ll_rpc_opcode_table[offset].opname;
}
-const char *ll_eopcode2str(__u32 opcode)
+static const char *ll_eopcode2str(__u32 opcode)
{
LASSERT(ll_eopcode_table[opcode].opcode == opcode);
return ll_eopcode_table[opcode].opname;
}
+
#if defined (CONFIG_PROC_FS)
-void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
- char *name, struct proc_dir_entry **procroot_ret,
- struct lprocfs_stats **stats_ret)
+static void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
+ char *name,
+ struct proc_dir_entry **procroot_ret,
+ struct lprocfs_stats **stats_ret)
{
struct proc_dir_entry *svc_procroot;
struct lprocfs_stats *svc_stats;
@@ -284,8 +286,9 @@ ptlrpc_lprocfs_req_history_max_seq_show(struct seq_file *m, void *n)
}
static ssize_t
-ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
int bufpages;
@@ -329,8 +332,9 @@ ptlrpc_lprocfs_threads_min_seq_show(struct seq_file *m, void *n)
}
static ssize_t
-ptlrpc_lprocfs_threads_min_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+ptlrpc_lprocfs_threads_min_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
int val;
@@ -381,8 +385,9 @@ ptlrpc_lprocfs_threads_max_seq_show(struct seq_file *m, void *n)
}
static ssize_t
-ptlrpc_lprocfs_threads_max_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+ptlrpc_lprocfs_threads_max_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
int val;
@@ -723,7 +728,7 @@ struct ptlrpc_srh_iterator {
struct ptlrpc_request *srhi_req;
};
-int
+static int
ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
struct ptlrpc_srh_iterator *srhi,
__u64 seq)
@@ -1025,7 +1030,7 @@ static int ptlrpc_lprocfs_hp_ratio_seq_show(struct seq_file *m, void *v)
}
static ssize_t ptlrpc_lprocfs_hp_ratio_seq_write(struct file *file,
- const char *buffer,
+ const char __user *buffer,
size_t count,
loff_t *off)
{
@@ -1175,7 +1180,7 @@ EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
#define BUFLEN (UUID_MAX + 5)
-int lprocfs_wr_evict_client(struct file *file, const char *buffer,
+int lprocfs_wr_evict_client(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
@@ -1223,7 +1228,7 @@ EXPORT_SYMBOL(lprocfs_wr_evict_client);
#undef BUFLEN
-int lprocfs_wr_ping(struct file *file, const char *buffer,
+int lprocfs_wr_ping(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
@@ -1251,7 +1256,7 @@ EXPORT_SYMBOL(lprocfs_wr_ping);
* The connection UUID is a node's primary NID. For example,
* "echo connection=192.168.0.1@tcp0::instance > .../import".
*/
-int lprocfs_wr_import(struct file *file, const char *buffer,
+int lprocfs_wr_import(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
@@ -1329,7 +1334,7 @@ int lprocfs_rd_pinger_recov(struct seq_file *m, void *n)
}
EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
-int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
+int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index cbcc541cac43..4621b71fe0b6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -306,21 +306,16 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
if (atomic_read(&set->set_remaining))
rc |= ptlrpc_check_set(env, set);
- if (!list_empty(&set->set_requests)) {
- /*
- * XXX: our set never completes, so we prune the completed
- * reqs after each iteration. boy could this be smarter.
- */
- list_for_each_safe(pos, tmp, &set->set_requests) {
- req = list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
- if (req->rq_phase != RQ_PHASE_COMPLETE)
- continue;
+ /* NB: ptlrpc_check_set has already moved completed request at the
+ * head of seq::set_requests */
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
+ if (req->rq_phase != RQ_PHASE_COMPLETE)
+ break;
- list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
- ptlrpc_req_finished(req);
- }
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+ ptlrpc_req_finished(req);
}
if (rc == 0) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index c500aff66193..81de68edb04e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -47,6 +47,8 @@
#include "../include/lustre_net.h"
#include "../include/lustre_sec.h"
+#include "ptlrpc_internal.h"
+
#define SEC_GC_INTERVAL (30 * 60)
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 2a054a99d433..96498b7fc20e 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -27,18 +27,12 @@ source "drivers/staging/media/davinci_vpfe/Kconfig"
source "drivers/staging/media/dt3155v4l/Kconfig"
-source "drivers/staging/media/tlg2300/Kconfig"
-
source "drivers/staging/media/mn88472/Kconfig"
source "drivers/staging/media/mn88473/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
-source "drivers/staging/media/parport/Kconfig"
-
-source "drivers/staging/media/vino/Kconfig"
-
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 412b28408398..a9006bcb4472 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -6,7 +6,3 @@ obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_DVB_MN88472) += mn88472/
obj-$(CONFIG_DVB_MN88473) += mn88473/
-obj-y += parport/
-obj-$(CONFIG_VIDEO_TLG2300) += tlg2300/
-obj-y += vino/
-
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 60a57b2a8fb2..538250667918 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2684,9 +2684,7 @@ static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
vd = bdev->videodev;
bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
-
- if (vd)
- video_unregister_device(vd);
+ video_unregister_device(vd);
if (bdev->power_state)
bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
@@ -2699,8 +2697,6 @@ static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
kfree(bdev);
}
- i2c_set_clientdata(client, NULL);
-
return 0;
}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index 704fa202ee18..a425f71dfb97 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -901,7 +901,7 @@ static int ipipe_set_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
struct device *dev = ipipe->subdev.v4l2_dev->dev;
if (!gbce_param) {
- memset(gbce, 0 , sizeof(struct vpfe_ipipe_gbce));
+ memset(gbce, 0, sizeof(struct vpfe_ipipe_gbce));
} else {
memcpy(gbce, gbce_param, sizeof(struct vpfe_ipipe_gbce));
if (ipipe_validate_gbce_params(gbce) < 0) {
@@ -1086,7 +1086,7 @@ static int ipipe_set_car_params(struct vpfe_ipipe_device *ipipe, void *param)
struct vpfe_ipipe_car *car = &ipipe->config.car;
if (!car_param) {
- memset(car , 0, sizeof(struct vpfe_ipipe_car));
+ memset(car, 0, sizeof(struct vpfe_ipipe_car));
} else {
memcpy(car, car_param, sizeof(struct vpfe_ipipe_car));
if (ipipe_validate_car_params(car) < 0) {
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 0ba0bf2c1cff..bcf762bc233d 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -1535,7 +1535,7 @@ isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
}
/*
- * isif_pad_set_crop() - set crop rectangle on pad
+ * isif_pad_set_selection() - set crop rectangle on pad
* @sd: VPFE isif V4L2 subdevice
* @fh: V4L2 subdev file handle
* @code: pointer to v4l2_subdev_mbus_code_enum structure
@@ -1543,35 +1543,36 @@ isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
* Return 0 on success, -EINVAL if pad is invalid
*/
static int
-isif_pad_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+isif_pad_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- /* check wether its a valid pad */
- if (crop->pad != ISIF_PAD_SINK)
+ /* check whether it's a valid pad and target */
+ if (sel->pad != ISIF_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- format = __isif_get_format(vpfe_isif, fh, crop->pad, crop->which);
+ format = __isif_get_format(vpfe_isif, fh, sel->pad, sel->which);
if (format == NULL)
return -EINVAL;
/* check wether crop rect is within limits */
- if (crop->rect.top < 0 || crop->rect.left < 0 ||
- (crop->rect.left + crop->rect.width >
+ if (sel->r.top < 0 || sel->r.left < 0 ||
+ (sel->r.left + sel->r.width >
vpfe_isif->formats[ISIF_PAD_SINK].width) ||
- (crop->rect.top + crop->rect.height >
+ (sel->r.top + sel->r.height >
vpfe_isif->formats[ISIF_PAD_SINK].height)) {
- crop->rect.left = 0;
- crop->rect.top = 0;
- crop->rect.width = format->width;
- crop->rect.height = format->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
}
/* adjust the width to 16 pixel boundary */
- crop->rect.width = ((crop->rect.width + 15) & ~0xf);
- vpfe_isif->crop = crop->rect;
- if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sel->r.width = ((sel->r.width + 15) & ~0xf);
+ vpfe_isif->crop = sel->r;
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
isif_set_image_window(vpfe_isif);
} else {
struct v4l2_rect *rect;
@@ -1583,7 +1584,7 @@ isif_pad_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
}
/*
- * isif_pad_get_crop() - get crop rectangle on pad
+ * isif_pad_get_selection() - get crop rectangle on pad
* @sd: VPFE isif V4L2 subdevice
* @fh: V4L2 subdev file handle
* @code: pointer to v4l2_subdev_mbus_code_enum structure
@@ -1591,22 +1592,23 @@ isif_pad_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
* Return 0 on success, -EINVAL if pad is invalid
*/
static int
-isif_pad_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+isif_pad_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
- /* check wether its a valid pad */
- if (crop->pad != ISIF_PAD_SINK)
+ /* check whether it's a valid pad and target */
+ if (sel->pad != ISIF_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_rect *rect;
rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
- memcpy(&crop->rect, rect, sizeof(*rect));
+ memcpy(&sel->r, rect, sizeof(*rect));
} else {
- crop->rect = vpfe_isif->crop;
+ sel->r = vpfe_isif->crop;
}
return 0;
@@ -1626,7 +1628,7 @@ isif_init_formats(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format;
- struct v4l2_subdev_crop crop;
+ struct v4l2_subdev_selection sel;
memset(&format, 0, sizeof(format));
format.pad = ISIF_PAD_SINK;
@@ -1644,12 +1646,13 @@ isif_init_formats(struct v4l2_subdev *sd,
format.format.height = MAX_HEIGHT;
isif_set_format(sd, fh, &format);
- memset(&crop, 0, sizeof(crop));
- crop.pad = ISIF_PAD_SINK;
- crop.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- crop.rect.width = MAX_WIDTH;
- crop.rect.height = MAX_HEIGHT;
- isif_pad_set_crop(sd, fh, &crop);
+ memset(&sel, 0, sizeof(sel));
+ sel.pad = ISIF_PAD_SINK;
+ sel.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r.width = MAX_WIDTH;
+ sel.r.height = MAX_HEIGHT;
+ isif_pad_set_selection(sd, fh, &sel);
return 0;
}
@@ -1675,8 +1678,8 @@ static const struct v4l2_subdev_pad_ops isif_v4l2_pad_ops = {
.enum_frame_size = isif_enum_frame_size,
.get_fmt = isif_get_format,
.set_fmt = isif_set_format,
- .set_crop = isif_pad_set_crop,
- .get_crop = isif_pad_get_crop,
+ .set_selection = isif_pad_set_selection,
+ .get_selection = isif_pad_get_selection,
};
/* subdev operations */
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index eb4ccb8d2a93..19628d0104ab 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -107,7 +107,7 @@ static int io;
static int irq;
static bool iommap;
static int ioshift;
-static bool softcarrier = 1;
+static bool softcarrier = true;
static bool share_irq;
static bool debug;
static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
@@ -266,7 +266,7 @@ static unsigned long space_width;
/* fetch serial input packet (1 byte) from register offset */
static u8 sinp(int offset)
{
- if (iommap != 0)
+ if (iommap)
/* the register is memory-mapped */
offset <<= ioshift;
@@ -276,7 +276,7 @@ static u8 sinp(int offset)
/* write serial output packet (1 byte) of value to register offset */
static void soutp(int offset, u8 value)
{
- if (iommap != 0)
+ if (iommap)
/* the register is memory-mapped */
offset <<= ioshift;
@@ -799,10 +799,10 @@ static int lirc_serial_probe(struct platform_device *dev)
* For memory mapped I/O you *might* need to use ioremap() first,
* for the NSLU2 it's done in boot code.
*/
- if (((iommap != 0)
+ if (((iommap)
&& (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
LIRC_DRIVER_NAME) == NULL))
- || ((iommap == 0)
+ || ((!iommap)
&& (devm_request_region(&dev->dev, io, 8,
LIRC_DRIVER_NAME) == NULL))) {
dev_err(&dev->dev, "port %04x already in use\n", io);
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index cc872fb4ca68..e16627ca488e 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -369,17 +369,17 @@ static int add_to_buf(struct IR *ir)
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
dev_err(ir->l.dev, "i2c_master_send failed with %d\n",
- ret);
+ ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
- dev_err(ir->l.dev, "unable to read from the IR chip "
- "after 3 resets, giving up\n");
+ dev_err(ir->l.dev,
+ "unable to read from the IR chip after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
- dev_err(ir->l.dev, "polling the IR receiver chip failed, "
- "trying reset\n");
+ dev_err(ir->l.dev,
+ "polling the IR receiver chip failed, trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
if (kthread_should_stop()) {
@@ -405,14 +405,16 @@ static int add_to_buf(struct IR *ir)
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
- dev_err(ir->l.dev, "i2c_master_recv failed with %d -- "
- "keeping last read buffer\n", ret);
+ dev_err(ir->l.dev,
+ "i2c_master_recv failed with %d -- keeping last read buffer\n",
+ ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
- dev_dbg(ir->l.dev, "key (0x%02x/0x%02x)\n",
- rx->b[0], rx->b[1]);
+ dev_dbg(ir->l.dev,
+ "key (0x%02x/0x%02x)\n",
+ rx->b[0], rx->b[1]);
}
/* key pressed ? */
@@ -656,8 +658,8 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
dev_dbg(tx->ir->l.dev, "%*ph", 5, buf);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n",
- ret);
+ dev_err(tx->ir->l.dev,
+ "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
i += tosend;
@@ -710,11 +712,12 @@ static int send_boot_data(struct IR_tx *tx)
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
dev_err(tx->ir->l.dev, "unexpected IR TX init response: %02x\n",
- buf[0]);
+ buf[0]);
return 0;
}
- dev_notice(tx->ir->l.dev, "Zilog/Hauppauge IR blaster firmware version "
- "%d.%d.%d loaded\n", buf[1], buf[2], buf[3]);
+ dev_notice(tx->ir->l.dev,
+ "Zilog/Hauppauge IR blaster firmware version %d.%d.%d loaded\n",
+ buf[1], buf[2], buf[3]);
return 0;
}
@@ -759,8 +762,9 @@ static int fw_load(struct IR_tx *tx)
/* Request codeset data file */
ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
if (ret != 0) {
- dev_err(tx->ir->l.dev, "firmware haup-ir-blaster.bin not available (%d)\n",
- ret);
+ dev_err(tx->ir->l.dev,
+ "firmware haup-ir-blaster.bin not available (%d)\n",
+ ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
@@ -792,9 +796,9 @@ static int fw_load(struct IR_tx *tx)
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
- dev_err(tx->ir->l.dev, "unsupported code set file version (%u, expected"
- "1) -- please upgrade to a newer driver",
- version);
+ dev_err(tx->ir->l.dev,
+ "unsupported code set file version (%u, expected 1) -- please upgrade to a newer driver\n",
+ version);
fw_unload_locked();
ret = -EFAULT;
goto out;
@@ -810,7 +814,7 @@ static int fw_load(struct IR_tx *tx)
goto corrupt;
dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n",
- tx_data->num_code_sets);
+ tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
tx_data->num_code_sets * sizeof(char *));
@@ -940,8 +944,9 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
unsigned char buf[MAX_XFER_SIZE];
if (rbuf->chunk_size > sizeof(buf)) {
- dev_err(ir->l.dev, "chunk_size is too big (%d)!\n",
- rbuf->chunk_size);
+ dev_err(ir->l.dev,
+ "chunk_size is too big (%d)!\n",
+ rbuf->chunk_size);
ret = -EINVAL;
break;
}
@@ -964,8 +969,8 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
- dev_dbg(ir->l.dev, "read result = %d (%s)\n",
- ret, ret ? "Error" : "OK");
+ dev_dbg(ir->l.dev, "read result = %d (%s)\n", ret,
+ ret ? "Error" : "OK");
return ret ? ret : written;
}
@@ -981,8 +986,9 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
- dev_err(tx->ir->l.dev, "failed to get data for code %u, key %u -- check "
- "lircd.conf entries\n", code, key);
+ dev_err(tx->ir->l.dev,
+ "failed to get data for code %u, key %u -- check lircd.conf entries\n",
+ code, key);
return ret;
} else if (ret != 0)
return ret;
@@ -1057,12 +1063,14 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
- dev_dbg(tx->ir->l.dev, "NAK expected: i2c_master_send "
- "failed with %d (try %d)\n", ret, i+1);
+ dev_dbg(tx->ir->l.dev,
+ "NAK expected: i2c_master_send failed with %d (try %d)\n",
+ ret, i+1);
}
if (ret != 1) {
- dev_err(tx->ir->l.dev, "IR TX chip never got ready: last i2c_master_send "
- "failed with %d\n", ret);
+ dev_err(tx->ir->l.dev,
+ "IR TX chip never got ready: last i2c_master_send failed with %d\n",
+ ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1074,7 +1082,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
}
if (buf[0] != 0x80) {
dev_err(tx->ir->l.dev, "unexpected IR TX response #2: %02x\n",
- buf[0]);
+ buf[0]);
return -EFAULT;
}
@@ -1165,12 +1173,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
- dev_err(tx->ir->l.dev, "sending to the IR transmitter chip "
- "failed, trying reset\n");
+ dev_err(tx->ir->l.dev,
+ "sending to the IR transmitter chip failed, trying reset\n");
if (failures >= 3) {
- dev_err(tx->ir->l.dev, "unable to send to the IR chip "
- "after 3 resets, giving up\n");
+ dev_err(tx->ir->l.dev,
+ "unable to send to the IR chip after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
@@ -1226,7 +1234,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN|POLLRDNORM);
dev_dbg(ir->l.dev, "poll result = %s\n",
- ret ? "POLLIN|POLLRDNORM" : "none");
+ ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
@@ -1333,7 +1341,8 @@ static int close(struct inode *node, struct file *filep)
struct IR *ir = filep->private_data;
if (ir == NULL) {
- dev_err(ir->l.dev, "close: no private_data attached to the file!\n");
+ dev_err(ir->l.dev,
+ "close: no private_data attached to the file!\n");
return -ENODEV;
}
@@ -1540,8 +1549,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Rx client is also ready or not needed */
if (rx == NULL && !tx_only) {
- dev_info(tx->ir->l.dev, "probe of IR Tx on %s (i2c-%d) done. Waiting"
- " on IR Rx.\n", adap->name, adap->nr);
+ dev_info(tx->ir->l.dev,
+ "probe of IR Tx on %s (i2c-%d) done. Waiting on IR Rx.\n",
+ adap->name, adap->nr);
goto out_ok;
}
} else {
@@ -1579,8 +1589,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
- dev_err(tx->ir->l.dev, "%s: could not start IR Rx polling thread"
- "\n", __func__);
+ dev_err(tx->ir->l.dev,
+ "%s: could not start IR Rx polling thread\n",
+ __func__);
/* Failed kthread, so put back the ir ref */
put_ir_device(ir, true);
/* Failure exit, so put back rx ref from i2c_client */
@@ -1592,8 +1603,8 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Tx client is also ready */
if (tx == NULL) {
- pr_info("probe of IR Rx on %s (i2c-%d) done. Waiting"
- " on IR Tx.\n", adap->name, adap->nr);
+ pr_info("probe of IR Rx on %s (i2c-%d) done. Waiting on IR Tx.\n",
+ adap->name, adap->nr);
goto out_ok;
}
}
@@ -1602,13 +1613,15 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->l.minor = minor; /* module option: user requested minor number */
ir->l.minor = lirc_register_driver(&ir->l);
if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
- dev_err(tx->ir->l.dev, "%s: \"minor\" must be between 0 and %d (%d)!\n",
- __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
+ dev_err(tx->ir->l.dev,
+ "%s: \"minor\" must be between 0 and %d (%d)!\n",
+ __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
ret = -EBADRQC;
goto out_put_xx;
}
- dev_info(ir->l.dev, "IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
- adap->name, adap->nr, ir->l.minor);
+ dev_info(ir->l.dev,
+ "IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
+ adap->name, adap->nr, ir->l.minor);
out_ok:
if (rx != NULL)
@@ -1616,8 +1629,9 @@ out_ok:
if (tx != NULL)
put_ir_tx(tx, true);
put_ir_device(ir, true);
- dev_info(ir->l.dev, "probe of IR %s on %s (i2c-%d) done\n",
- tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
+ dev_info(ir->l.dev,
+ "probe of IR %s on %s (i2c-%d) done\n",
+ tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
return 0;
@@ -1629,9 +1643,9 @@ out_put_xx:
out_put_ir:
put_ir_device(ir, true);
out_no_ir:
- dev_err(&client->dev, "%s: probing IR %s on %s (i2c-%d) failed with %d\n",
- __func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
- ret);
+ dev_err(&client->dev,
+ "%s: probing IR %s on %s (i2c-%d) failed with %d\n",
+ __func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr, ret);
mutex_unlock(&ir_devices_lock);
return ret;
}
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
index 52de8f85d36c..6eebe564e557 100644
--- a/drivers/staging/media/mn88472/mn88472.c
+++ b/drivers/staging/media/mn88472/mn88472.c
@@ -30,6 +30,7 @@ static int mn88472_set_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u32 if_frequency = 0;
+ u64 tmp;
u8 delivery_system_val, if_val[3], bw_val[7], bw_val2;
dev_dbg(&client->dev,
@@ -57,36 +58,22 @@ static int mn88472_set_frontend(struct dvb_frontend *fe)
goto err;
}
- switch (c->delivery_system) {
- case SYS_DVBT:
- case SYS_DVBT2:
- if (c->bandwidth_hz <= 6000000) {
- /* IF 3570000 Hz, BW 6000000 Hz */
- memcpy(if_val, "\x2c\x94\xdb", 3);
- memcpy(bw_val, "\xbf\x55\x55\x15\x6b\x15\x6b", 7);
- bw_val2 = 0x02;
- } else if (c->bandwidth_hz <= 7000000) {
- /* IF 4570000 Hz, BW 7000000 Hz */
- memcpy(if_val, "\x39\x11\xbc", 3);
- memcpy(bw_val, "\xa4\x00\x00\x0f\x2c\x0f\x2c", 7);
- bw_val2 = 0x01;
- } else if (c->bandwidth_hz <= 8000000) {
- /* IF 4570000 Hz, BW 8000000 Hz */
- memcpy(if_val, "\x39\x11\xbc", 3);
- memcpy(bw_val, "\x8f\x80\x00\x08\xee\x08\xee", 7);
- bw_val2 = 0x00;
- } else {
- ret = -EINVAL;
- goto err;
- }
- break;
- case SYS_DVBC_ANNEX_A:
- /* IF 5070000 Hz, BW 8000000 Hz */
- memcpy(if_val, "\x3f\x50\x2c", 3);
+ if (c->bandwidth_hz <= 5000000) {
+ memcpy(bw_val, "\xe5\x99\x9a\x1b\xa9\x1b\xa9", 7);
+ bw_val2 = 0x03;
+ } else if (c->bandwidth_hz <= 6000000) {
+ /* IF 3570000 Hz, BW 6000000 Hz */
+ memcpy(bw_val, "\xbf\x55\x55\x15\x6b\x15\x6b", 7);
+ bw_val2 = 0x02;
+ } else if (c->bandwidth_hz <= 7000000) {
+ /* IF 4570000 Hz, BW 7000000 Hz */
+ memcpy(bw_val, "\xa4\x00\x00\x0f\x2c\x0f\x2c", 7);
+ bw_val2 = 0x01;
+ } else if (c->bandwidth_hz <= 8000000) {
+ /* IF 4570000 Hz, BW 8000000 Hz */
memcpy(bw_val, "\x8f\x80\x00\x08\xee\x08\xee", 7);
bw_val2 = 0x00;
- break;
- default:
+ } else {
ret = -EINVAL;
goto err;
}
@@ -106,17 +93,12 @@ static int mn88472_set_frontend(struct dvb_frontend *fe)
dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
}
- switch (if_frequency) {
- case 3570000:
- case 4570000:
- case 5070000:
- break;
- default:
- dev_err(&client->dev, "IF frequency %d not supported\n",
- if_frequency);
- ret = -EINVAL;
- goto err;
- }
+ /* Calculate IF registers ( (1<<24)*IF / Xtal ) */
+ tmp = div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
+ dev->xtal);
+ if_val[0] = ((tmp >> 16) & 0xff);
+ if_val[1] = ((tmp >> 8) & 0xff);
+ if_val[2] = ((tmp >> 0) & 0xff);
ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
ret = regmap_write(dev->regmap[2], 0xef, 0x13);
@@ -198,6 +180,8 @@ static int mn88472_set_frontend(struct dvb_frontend *fe)
ret = regmap_write(dev->regmap[0], 0xae, 0x00);
ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
ret = regmap_write(dev->regmap[0], 0xd9, 0xe3);
+
+ /* Reset demod */
ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
if (ret)
goto err;
@@ -411,6 +395,7 @@ static int mn88472_probe(struct i2c_client *client,
}
dev->i2c_wr_max = config->i2c_wr_max;
+ dev->xtal = config->xtal;
dev->client[0] = client;
dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
if (IS_ERR(dev->regmap[0])) {
diff --git a/drivers/staging/media/mn88472/mn88472_priv.h b/drivers/staging/media/mn88472/mn88472_priv.h
index 1095949f040d..b12b731e2d4e 100644
--- a/drivers/staging/media/mn88472/mn88472_priv.h
+++ b/drivers/staging/media/mn88472/mn88472_priv.h
@@ -31,6 +31,7 @@ struct mn88472_dev {
u16 i2c_wr_max;
fe_delivery_system_t delivery_system;
bool warm; /* FW running */
+ u32 xtal;
};
#endif
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index cc1dfadd91eb..44b81a2c8b6f 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -560,41 +560,28 @@ static int iss_pipeline_link_notify(struct media_link *link, u32 flags,
*/
/*
- * iss_pipeline_enable - Enable streaming on a pipeline
+ * iss_pipeline_disable - Disable streaming on a pipeline
* @pipe: ISS pipeline
- * @mode: Stream mode (single shot or continuous)
+ * @until: entity at which to stop pipeline walk
*
- * Walk the entities chain starting at the pipeline output video node and start
- * all modules in the chain in the given mode.
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain. Wait synchronously for the modules to be stopped if
+ * necessary.
*
- * Return 0 if successful, or the return value of the failed video::s_stream
- * operation otherwise.
+ * If the until argument isn't NULL, stop the pipeline walk when reaching the
+ * until entity. This is used to disable a partially started pipeline due to a
+ * subdev start error.
*/
-static int iss_pipeline_enable(struct iss_pipeline *pipe,
- enum iss_pipeline_stream_state mode)
+static int iss_pipeline_disable(struct iss_pipeline *pipe,
+ struct media_entity *until)
{
struct iss_device *iss = pipe->output->iss;
struct media_entity *entity;
struct media_pad *pad;
struct v4l2_subdev *subdev;
- unsigned long flags;
+ int failure = 0;
int ret;
- /* If one of the entities in the pipeline has crashed it will not work
- * properly. Refuse to start streaming in that case. This check must be
- * performed before the loop below to avoid starting entities if the
- * pipeline won't start anyway (those entities would then likely fail to
- * stop, making the problem worse).
- */
- if (pipe->entities & iss->crashed)
- return -EIO;
-
- spin_lock_irqsave(&pipe->lock, flags);
- pipe->state &= ~(ISS_PIPELINE_IDLE_INPUT | ISS_PIPELINE_IDLE_OUTPUT);
- spin_unlock_irqrestore(&pipe->lock, flags);
-
- pipe->do_propagation = false;
-
entity = &pipe->output->video.entity;
while (1) {
pad = &entity->pads[0];
@@ -607,33 +594,62 @@ static int iss_pipeline_enable(struct iss_pipeline *pipe,
break;
entity = pad->entity;
- subdev = media_entity_to_v4l2_subdev(entity);
+ if (entity == until)
+ break;
- ret = v4l2_subdev_call(subdev, video, s_stream, mode);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
+ subdev = media_entity_to_v4l2_subdev(entity);
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0) {
+ dev_dbg(iss->dev, "%s: module stop timeout.\n",
+ subdev->name);
+ /* If the entity failed to stopped, assume it has
+ * crashed. Mark it as such, the ISS will be reset when
+ * applications will release it.
+ */
+ iss->crashed |= 1U << subdev->entity.id;
+ failure = -ETIMEDOUT;
+ }
}
- iss_print_status(pipe->output->iss);
- return 0;
+
+ return failure;
}
/*
- * iss_pipeline_disable - Disable streaming on a pipeline
+ * iss_pipeline_enable - Enable streaming on a pipeline
* @pipe: ISS pipeline
+ * @mode: Stream mode (single shot or continuous)
*
- * Walk the entities chain starting at the pipeline output video node and stop
- * all modules in the chain. Wait synchronously for the modules to be stopped if
- * necessary.
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
*/
-static int iss_pipeline_disable(struct iss_pipeline *pipe)
+static int iss_pipeline_enable(struct iss_pipeline *pipe,
+ enum iss_pipeline_stream_state mode)
{
struct iss_device *iss = pipe->output->iss;
struct media_entity *entity;
struct media_pad *pad;
struct v4l2_subdev *subdev;
- int failure = 0;
+ unsigned long flags;
int ret;
+ /* If one of the entities in the pipeline has crashed it will not work
+ * properly. Refuse to start streaming in that case. This check must be
+ * performed before the loop below to avoid starting entities if the
+ * pipeline won't start anyway (those entities would then likely fail to
+ * stop, making the problem worse).
+ */
+ if (pipe->entities & iss->crashed)
+ return -EIO;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~(ISS_PIPELINE_IDLE_INPUT | ISS_PIPELINE_IDLE_OUTPUT);
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ pipe->do_propagation = false;
+
entity = &pipe->output->video.entity;
while (1) {
pad = &entity->pads[0];
@@ -648,20 +664,19 @@ static int iss_pipeline_disable(struct iss_pipeline *pipe)
entity = pad->entity;
subdev = media_entity_to_v4l2_subdev(entity);
- ret = v4l2_subdev_call(subdev, video, s_stream, 0);
- if (ret < 0) {
- dev_dbg(iss->dev, "%s: module stop timeout.\n",
- subdev->name);
- /* If the entity failed to stopped, assume it has
- * crashed. Mark it as such, the ISS will be reset when
- * applications will release it.
- */
- iss->crashed |= 1U << subdev->entity.id;
- failure = -ETIMEDOUT;
+ ret = v4l2_subdev_call(subdev, video, s_stream, mode);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ iss_pipeline_disable(pipe, entity);
+ return ret;
}
+
+ if (subdev == &iss->csi2a.subdev ||
+ subdev == &iss->csi2b.subdev)
+ pipe->do_propagation = true;
}
- return failure;
+ iss_print_status(pipe->output->iss);
+ return 0;
}
/*
@@ -682,7 +697,7 @@ int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
int ret;
if (state == ISS_PIPELINE_STREAM_STOPPED)
- ret = iss_pipeline_disable(pipe);
+ ret = iss_pipeline_disable(pipe, NULL);
else
ret = iss_pipeline_enable(pipe, state);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index 21971c675b8c..2d96fb3eca53 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -319,6 +319,8 @@ static void csi2_ctx_config(struct iss_csi2_device *csi2,
{
u32 reg = 0;
+ ctx->frame = 0;
+
/* Set up CSI2_CTx_CTRL1 */
if (ctx->eof_enabled)
reg = CSI2_CTX_CTRL1_EOF_EN;
@@ -396,21 +398,18 @@ static void csi2_timing_config(struct iss_csi2_device *csi2,
*/
static void csi2_irq_ctx_set(struct iss_csi2_device *csi2, int enable)
{
- u32 reg = CSI2_CTX_IRQ_FE;
+ const u32 mask = CSI2_CTX_IRQ_FE | CSI2_CTX_IRQ_FS;
int i;
- if (csi2->use_fs_irq)
- reg |= CSI2_CTX_IRQ_FS;
-
for (i = 0; i < 8; i++) {
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(i),
- reg);
+ mask);
if (enable)
iss_reg_set(csi2->iss, csi2->regs1,
- CSI2_CTX_IRQENABLE(i), reg);
+ CSI2_CTX_IRQENABLE(i), mask);
else
iss_reg_clr(csi2->iss, csi2->regs1,
- CSI2_CTX_IRQENABLE(i), reg);
+ CSI2_CTX_IRQENABLE(i), mask);
}
}
@@ -679,8 +678,34 @@ static void csi2_isr_ctx(struct iss_csi2_device *csi2,
if (status & CSI2_CTX_IRQ_FS) {
struct iss_pipeline *pipe =
to_iss_pipeline(&csi2->subdev.entity);
- if (pipe->do_propagation)
+ u16 frame;
+ u16 delta;
+
+ frame = iss_reg_read(csi2->iss, csi2->regs1,
+ CSI2_CTX_CTRL2(ctx->ctxnum))
+ >> CSI2_CTX_CTRL2_FRAME_SHIFT;
+
+ if (frame == 0) {
+ /* A zero value means that the counter isn't implemented
+ * by the source. Increment the frame number in software
+ * in that case.
+ */
atomic_inc(&pipe->frame_number);
+ } else {
+ /* Extend the 16 bit frame number to 32 bits by
+ * computing the delta between two consecutive CSI2
+ * frame numbers and adding it to the software frame
+ * number. The hardware counter starts at 1 and wraps
+ * from 0xffff to 1 without going through 0, so subtract
+ * 1 when the counter wraps.
+ */
+ delta = frame - ctx->frame;
+ if (frame < ctx->frame)
+ delta--;
+ ctx->frame = frame;
+
+ atomic_add(delta, &pipe->frame_number);
+ }
}
if (!(status & CSI2_CTX_IRQ_FE))
@@ -1039,7 +1064,6 @@ static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct iss_device *iss = csi2->iss;
- struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
struct iss_video *video_out = &csi2->video_out;
int ret = 0;
@@ -1058,7 +1082,6 @@ static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
if (omap4iss_csiphy_acquire(csi2->phy) < 0)
return -ENODEV;
- csi2->use_fs_irq = pipe->do_propagation;
csi2_configure(csi2);
csi2_print_status(csi2);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.h b/drivers/staging/media/omap4iss/iss_csi2.h
index 971aa7b08013..3b37978a3bdf 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.h
+++ b/drivers/staging/media/omap4iss/iss_csi2.h
@@ -82,6 +82,7 @@ struct iss_csi2_ctx_cfg {
u8 virtual_id;
u16 format_id; /* as in CSI2_CTx_CTRL2[9:0] */
u8 dpcm_predictor; /* 1: simple, 0: advanced */
+ u16 frame;
/* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
u16 alpha;
@@ -137,7 +138,6 @@ struct iss_csi2_device {
u32 output; /* output to IPIPEIF, memory or both? */
bool dpcm_decompress;
unsigned int frame_skip;
- bool use_fs_irq;
struct iss_csiphy *phy;
struct iss_csi2_ctx_cfg contexts[ISS_CSI2_MAX_CTX_NUM + 1];
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
index 32a748398ced..3943fae699ee 100644
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -242,23 +242,6 @@ static void ipipeif_isr_buffer(struct iss_ipipeif_device *ipipeif)
}
/*
- * ipipeif_isif0_isr - Handle ISIF0 event
- * @ipipeif: Pointer to ISP IPIPEIF device.
- *
- * Executes LSC deferred enablement before next frame starts.
- */
-static void ipipeif_isif0_isr(struct iss_ipipeif_device *ipipeif)
-{
- struct iss_pipeline *pipe =
- to_iss_pipeline(&ipipeif->subdev.entity);
- if (pipe->do_propagation)
- atomic_inc(&pipe->frame_number);
-
- if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
- ipipeif_isr_buffer(ipipeif);
-}
-
-/*
* omap4iss_ipipeif_isr - Configure ipipeif during interframe time.
* @ipipeif: Pointer to ISP IPIPEIF device.
* @events: IPIPEIF events
@@ -269,8 +252,9 @@ void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events)
&ipipeif->stopping))
return;
- if (events & ISP5_IRQ_ISIF_INT(0))
- ipipeif_isif0_isr(ipipeif);
+ if ((events & ISP5_IRQ_ISIF_INT(0)) &&
+ (ipipeif->output & IPIPEIF_OUTPUT_MEMORY))
+ ipipeif_isr_buffer(ipipeif);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/staging/media/omap4iss/iss_regs.h b/drivers/staging/media/omap4iss/iss_regs.h
index efd0291a86f7..d2b6b6ae9174 100644
--- a/drivers/staging/media/omap4iss/iss_regs.h
+++ b/drivers/staging/media/omap4iss/iss_regs.h
@@ -215,6 +215,8 @@
#define CSI2_CTX_CTRL1_CTX_EN (1 << 0)
#define CSI2_CTX_CTRL2(i) (0x74 + (0x20 * i))
+#define CSI2_CTX_CTRL2_FRAME_MASK (0xffff << 16)
+#define CSI2_CTX_CTRL2_FRAME_SHIFT 16
#define CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT 13
#define CSI2_CTX_CTRL2_USER_DEF_MAP_MASK \
(0x3 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT)
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
index 88522a8cdf56..3ab972818f1b 100644
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -283,22 +283,6 @@ static void resizer_isr_buffer(struct iss_resizer_device *resizer)
}
/*
- * resizer_isif0_isr - Handle ISIF0 event
- * @resizer: Pointer to ISP RESIZER device.
- *
- * Executes LSC deferred enablement before next frame starts.
- */
-static void resizer_int_dma_isr(struct iss_resizer_device *resizer)
-{
- struct iss_pipeline *pipe =
- to_iss_pipeline(&resizer->subdev.entity);
- if (pipe->do_propagation)
- atomic_inc(&pipe->frame_number);
-
- resizer_isr_buffer(resizer);
-}
-
-/*
* omap4iss_resizer_isr - Configure resizer during interframe time.
* @resizer: Pointer to ISP RESIZER device.
* @events: RESIZER events
@@ -322,7 +306,7 @@ void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events)
return;
if (events & ISP5_IRQ_RSZ_INT_DMA)
- resizer_int_dma_isr(resizer);
+ resizer_isr_buffer(resizer);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index cdee5966cbca..69550445a341 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -25,9 +25,6 @@
#include "iss_video.h"
#include "iss.h"
-static unsigned debug;
-module_param(debug, uint, 0644);
-MODULE_PARM_DESC(debug, "activates debug info");
/* -----------------------------------------------------------------------------
* Helper functions
@@ -773,6 +770,14 @@ iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
}
static int
+iss_video_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *e)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_expbuf(&vfh->queue, e);
+}
+
+static int
iss_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
@@ -1021,6 +1026,7 @@ static const struct v4l2_ioctl_ops iss_video_ioctl_ops = {
.vidioc_reqbufs = iss_video_reqbufs,
.vidioc_querybuf = iss_video_querybuf,
.vidioc_qbuf = iss_video_qbuf,
+ .vidioc_expbuf = iss_video_expbuf,
.vidioc_dqbuf = iss_video_dqbuf,
.vidioc_streamon = iss_video_streamon,
.vidioc_streamoff = iss_video_streamoff,
@@ -1044,8 +1050,6 @@ static int iss_video_open(struct file *file)
if (handle == NULL)
return -ENOMEM;
- video->video.debug = debug;
-
v4l2_fh_init(&handle->vfh, &video->video);
v4l2_fh_add(&handle->vfh);
@@ -1071,7 +1075,7 @@ static int iss_video_open(struct file *file)
q = &handle->queue;
q->type = video->type;
- q->io_modes = VB2_MMAP;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
q->drv_priv = handle;
q->ops = &iss_video_vb2ops;
q->mem_ops = &vb2_dma_contig_memops;
diff --git a/drivers/staging/media/parport/Kconfig b/drivers/staging/media/parport/Kconfig
deleted file mode 100644
index 15974efdba1d..000000000000
--- a/drivers/staging/media/parport/Kconfig
+++ /dev/null
@@ -1,69 +0,0 @@
-menuconfig MEDIA_PARPORT_SUPPORT
- bool "ISA and parallel port devices"
- depends on (ISA || PARPORT) && MEDIA_CAMERA_SUPPORT
- help
- Enables drivers for ISA and parallel port bus. If you
- need media drivers using those legacy buses, say Y.
-
-if MEDIA_PARPORT_SUPPORT
-config VIDEO_BWQCAM
- tristate "Quickcam BW Video For Linux (Deprecated)"
- depends on PARPORT && VIDEO_V4L2
- select VIDEOBUF2_VMALLOC
- help
- Say Y have if you the black and white version of the QuickCam
- camera. See the next option for the color version.
-
- This driver is deprecated and will be removed soon. If you have
- hardware for this and you want to work on this driver, then contact
- the linux-media mailinglist.
-
- To compile this driver as a module, choose M here: the
- module will be called bw-qcam.
-
-config VIDEO_CQCAM
- tristate "QuickCam Colour Video For Linux (Deprecated)"
- depends on PARPORT && VIDEO_V4L2
- help
- This is the video4linux driver for the colour version of the
- Connectix QuickCam. If you have one of these cameras, say Y here,
- otherwise say N. This driver does not work with the original
- monochrome QuickCam, QuickCam VC or QuickClip. It is also available
- as a module (c-qcam).
- Read <file:Documentation/video4linux/CQcam.txt> for more information.
-
- This driver is deprecated and will be removed soon. If you have
- hardware for this and you want to work on this driver, then contact
- the linux-media mailinglist.
-
-config VIDEO_PMS
- tristate "Mediavision Pro Movie Studio Video For Linux (Deprecated)"
- depends on ISA && VIDEO_V4L2
- help
- Say Y if you have the ISA Mediavision Pro Movie Studio
- capture card.
-
- This driver is deprecated and will be removed soon. If you have
- hardware for this and you want to work on this driver, then contact
- the linux-media mailinglist.
-
- To compile this driver as a module, choose M here: the
- module will be called pms.
-
-config VIDEO_W9966
- tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux (Deprecated)"
- depends on PARPORT_1284 && PARPORT && VIDEO_V4L2
- help
- Video4linux driver for Winbond's w9966 based Webcams.
- Currently tested with the LifeView FlyCam Supra.
- If you have one of these cameras, say Y here
- otherwise say N.
- This driver is also available as a module (w9966).
-
- Check out <file:Documentation/video4linux/w9966.txt> for more
- information.
-
- This driver is deprecated and will be removed soon. If you have
- hardware for this and you want to work on this driver, then contact
- the linux-media mailinglist.
-endif
diff --git a/drivers/staging/media/parport/Makefile b/drivers/staging/media/parport/Makefile
deleted file mode 100644
index 4eea06d7af5b..000000000000
--- a/drivers/staging/media/parport/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_VIDEO_CQCAM) += c-qcam.o
-obj-$(CONFIG_VIDEO_BWQCAM) += bw-qcam.o
-obj-$(CONFIG_VIDEO_W9966) += w9966.o
-obj-$(CONFIG_VIDEO_PMS) += pms.o
diff --git a/drivers/staging/media/parport/bw-qcam.c b/drivers/staging/media/parport/bw-qcam.c
deleted file mode 100644
index 67b9da1dc43f..000000000000
--- a/drivers/staging/media/parport/bw-qcam.c
+++ /dev/null
@@ -1,1177 +0,0 @@
-/*
- * QuickCam Driver For Video4Linux.
- *
- * Video4Linux conversion work by Alan Cox.
- * Parport compatibility by Phil Blundell.
- * Busy loop avoidance by Mark Cooke.
- *
- * Module parameters:
- *
- * maxpoll=<1 - 5000>
- *
- * When polling the QuickCam for a response, busy-wait for a
- * maximum of this many loops. The default of 250 gives little
- * impact on interactive response.
- *
- * NOTE: If this parameter is set too high, the processor
- * will busy wait until this loop times out, and then
- * slowly poll for a further 5 seconds before failing
- * the transaction. You have been warned.
- *
- * yieldlines=<1 - 250>
- *
- * When acquiring a frame from the camera, the data gathering
- * loop will yield back to the scheduler after completing
- * this many lines. The default of 4 provides a trade-off
- * between increased frame acquisition time and impact on
- * interactive response.
- */
-
-/* qcam-lib.c -- Library for programming with the Connectix QuickCam.
- * See the included documentation for usage instructions and details
- * of the protocol involved. */
-
-
-/* Version 0.5, August 4, 1996 */
-/* Version 0.7, August 27, 1996 */
-/* Version 0.9, November 17, 1996 */
-
-
-/******************************************************************
-
-Copyright (C) 1996 by Scott Laird
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL SCOTT LAIRD BE LIABLE FOR ANY CLAIM, DAMAGES OR
-OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-******************************************************************/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/parport.h>
-#include <linux/sched.h>
-#include <linux/videodev2.h>
-#include <linux/mutex.h>
-#include <asm/uaccess.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-event.h>
-#include <media/videobuf2-vmalloc.h>
-
-/* One from column A... */
-#define QC_NOTSET 0
-#define QC_UNIDIR 1
-#define QC_BIDIR 2
-#define QC_SERIAL 3
-
-/* ... and one from column B */
-#define QC_ANY 0x00
-#define QC_FORCE_UNIDIR 0x10
-#define QC_FORCE_BIDIR 0x20
-#define QC_FORCE_SERIAL 0x30
-/* in the port_mode member */
-
-#define QC_MODE_MASK 0x07
-#define QC_FORCE_MASK 0x70
-
-#define MAX_HEIGHT 243
-#define MAX_WIDTH 336
-
-/* Bit fields for status flags */
-#define QC_PARAM_CHANGE 0x01 /* Camera status change has occurred */
-
-struct qcam {
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct v4l2_ctrl_handler hdl;
- struct vb2_queue vb_vidq;
- struct pardevice *pdev;
- struct parport *pport;
- struct mutex lock;
- struct mutex queue_lock;
- int width, height;
- int bpp;
- int mode;
- int contrast, brightness, whitebal;
- int port_mode;
- int transfer_scale;
- int top, left;
- int status;
- unsigned int saved_bits;
- unsigned long in_use;
-};
-
-static unsigned int maxpoll = 250; /* Maximum busy-loop count for qcam I/O */
-static unsigned int yieldlines = 4; /* Yield after this many during capture */
-static int video_nr = -1;
-static unsigned int force_init; /* Whether to probe aggressively */
-
-module_param(maxpoll, int, 0);
-module_param(yieldlines, int, 0);
-module_param(video_nr, int, 0);
-
-/* Set force_init=1 to avoid detection by polling status register and
- * immediately attempt to initialize qcam */
-module_param(force_init, int, 0);
-
-#define MAX_CAMS 4
-static struct qcam *qcams[MAX_CAMS];
-static unsigned int num_cams;
-
-static inline int read_lpstatus(struct qcam *q)
-{
- return parport_read_status(q->pport);
-}
-
-static inline int read_lpdata(struct qcam *q)
-{
- return parport_read_data(q->pport);
-}
-
-static inline void write_lpdata(struct qcam *q, int d)
-{
- parport_write_data(q->pport, d);
-}
-
-static void write_lpcontrol(struct qcam *q, int d)
-{
- if (d & 0x20) {
- /* Set bidirectional mode to reverse (data in) */
- parport_data_reverse(q->pport);
- } else {
- /* Set bidirectional mode to forward (data out) */
- parport_data_forward(q->pport);
- }
-
- /* Now issue the regular port command, but strip out the
- * direction flag */
- d &= ~0x20;
- parport_write_control(q->pport, d);
-}
-
-
-/* qc_waithand busy-waits for a handshake signal from the QuickCam.
- * Almost all communication with the camera requires handshaking. */
-
-static int qc_waithand(struct qcam *q, int val)
-{
- int status;
- int runs = 0;
-
- if (val) {
- while (!((status = read_lpstatus(q)) & 8)) {
- /* 1000 is enough spins on the I/O for all normal
- cases, at that point we start to poll slowly
- until the camera wakes up. However, we are
- busy blocked until the camera responds, so
- setting it lower is much better for interactive
- response. */
-
- if (runs++ > maxpoll)
- msleep_interruptible(5);
- if (runs > (maxpoll + 1000)) /* 5 seconds */
- return -1;
- }
- } else {
- while (((status = read_lpstatus(q)) & 8)) {
- /* 1000 is enough spins on the I/O for all normal
- cases, at that point we start to poll slowly
- until the camera wakes up. However, we are
- busy blocked until the camera responds, so
- setting it lower is much better for interactive
- response. */
-
- if (runs++ > maxpoll)
- msleep_interruptible(5);
- if (runs++ > (maxpoll + 1000)) /* 5 seconds */
- return -1;
- }
- }
-
- return status;
-}
-
-/* Waithand2 is used when the qcam is in bidirectional mode, and the
- * handshaking signal is CamRdy2 (bit 0 of data reg) instead of CamRdy1
- * (bit 3 of status register). It also returns the last value read,
- * since this data is useful. */
-
-static unsigned int qc_waithand2(struct qcam *q, int val)
-{
- unsigned int status;
- int runs = 0;
-
- do {
- status = read_lpdata(q);
- /* 1000 is enough spins on the I/O for all normal
- cases, at that point we start to poll slowly
- until the camera wakes up. However, we are
- busy blocked until the camera responds, so
- setting it lower is much better for interactive
- response. */
-
- if (runs++ > maxpoll)
- msleep_interruptible(5);
- if (runs++ > (maxpoll + 1000)) /* 5 seconds */
- return 0;
- } while ((status & 1) != val);
-
- return status;
-}
-
-/* qc_command is probably a bit of a misnomer -- it's used to send
- * bytes *to* the camera. Generally, these bytes are either commands
- * or arguments to commands, so the name fits, but it still bugs me a
- * bit. See the documentation for a list of commands. */
-
-static int qc_command(struct qcam *q, int command)
-{
- int n1, n2;
- int cmd;
-
- write_lpdata(q, command);
- write_lpcontrol(q, 6);
-
- n1 = qc_waithand(q, 1);
-
- write_lpcontrol(q, 0xe);
- n2 = qc_waithand(q, 0);
-
- cmd = (n1 & 0xf0) | ((n2 & 0xf0) >> 4);
- return cmd;
-}
-
-static int qc_readparam(struct qcam *q)
-{
- int n1, n2;
- int cmd;
-
- write_lpcontrol(q, 6);
- n1 = qc_waithand(q, 1);
-
- write_lpcontrol(q, 0xe);
- n2 = qc_waithand(q, 0);
-
- cmd = (n1 & 0xf0) | ((n2 & 0xf0) >> 4);
- return cmd;
-}
-
-
-/* Try to detect a QuickCam. It appears to flash the upper 4 bits of
- the status register at 5-10 Hz. This is only used in the autoprobe
- code. Be aware that this isn't the way Connectix detects the
- camera (they send a reset and try to handshake), but this should be
- almost completely safe, while their method screws up my printer if
- I plug it in before the camera. */
-
-static int qc_detect(struct qcam *q)
-{
- int reg, lastreg;
- int count = 0;
- int i;
-
- if (force_init)
- return 1;
-
- lastreg = reg = read_lpstatus(q) & 0xf0;
-
- for (i = 0; i < 500; i++) {
- reg = read_lpstatus(q) & 0xf0;
- if (reg != lastreg)
- count++;
- lastreg = reg;
- mdelay(2);
- }
-
-
-#if 0
- /* Force camera detection during testing. Sometimes the camera
- won't be flashing these bits. Possibly unloading the module
- in the middle of a grab? Or some timeout condition?
- I've seen this parameter as low as 19 on my 450Mhz box - mpc */
- printk(KERN_DEBUG "Debugging: QCam detection counter <30-200 counts as detected>: %d\n", count);
- return 1;
-#endif
-
- /* Be (even more) liberal in what you accept... */
-
- if (count > 20 && count < 400) {
- return 1; /* found */
- } else {
- printk(KERN_ERR "No Quickcam found on port %s\n",
- q->pport->name);
- printk(KERN_DEBUG "Quickcam detection counter: %u\n", count);
- return 0; /* not found */
- }
-}
-
-/* Decide which scan mode to use. There's no real requirement that
- * the scanmode match the resolution in q->height and q-> width -- the
- * camera takes the picture at the resolution specified in the
- * "scanmode" and then returns the image at the resolution specified
- * with the resolution commands. If the scan is bigger than the
- * requested resolution, the upper-left hand corner of the scan is
- * returned. If the scan is smaller, then the rest of the image
- * returned contains garbage. */
-
-static int qc_setscanmode(struct qcam *q)
-{
- int old_mode = q->mode;
-
- switch (q->transfer_scale) {
- case 1:
- q->mode = 0;
- break;
- case 2:
- q->mode = 4;
- break;
- case 4:
- q->mode = 8;
- break;
- }
-
- switch (q->bpp) {
- case 4:
- break;
- case 6:
- q->mode += 2;
- break;
- }
-
- switch (q->port_mode & QC_MODE_MASK) {
- case QC_BIDIR:
- q->mode += 1;
- break;
- case QC_NOTSET:
- case QC_UNIDIR:
- break;
- }
-
- if (q->mode != old_mode)
- q->status |= QC_PARAM_CHANGE;
-
- return 0;
-}
-
-
-/* Reset the QuickCam. This uses the same sequence the Windows
- * QuickPic program uses. Someone with a bi-directional port should
- * check that bi-directional mode is detected right, and then
- * implement bi-directional mode in qc_readbyte(). */
-
-static void qc_reset(struct qcam *q)
-{
- switch (q->port_mode & QC_FORCE_MASK) {
- case QC_FORCE_UNIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- break;
-
- case QC_FORCE_BIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- break;
-
- case QC_ANY:
- write_lpcontrol(q, 0x20);
- write_lpdata(q, 0x75);
-
- if (read_lpdata(q) != 0x75)
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- else
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- break;
- }
-
- write_lpcontrol(q, 0xb);
- udelay(250);
- write_lpcontrol(q, 0xe);
- qc_setscanmode(q); /* in case port_mode changed */
-}
-
-
-
-/* Reset the QuickCam and program for brightness, contrast,
- * white-balance, and resolution. */
-
-static void qc_set(struct qcam *q)
-{
- int val;
- int val2;
-
- /* Set the brightness. Yes, this is repetitive, but it works.
- * Shorter versions seem to fail subtly. Feel free to try :-). */
- /* I think the problem was in qc_command, not here -- bls */
-
- qc_command(q, 0xb);
- qc_command(q, q->brightness);
-
- val = q->height / q->transfer_scale;
- qc_command(q, 0x11);
- qc_command(q, val);
- if ((q->port_mode & QC_MODE_MASK) == QC_UNIDIR && q->bpp == 6) {
- /* The normal "transfers per line" calculation doesn't seem to work
- as expected here (and yet it works fine in qc_scan). No idea
- why this case is the odd man out. Fortunately, Laird's original
- working version gives me a good way to guess at working values.
- -- bls */
- val = q->width;
- val2 = q->transfer_scale * 4;
- } else {
- val = q->width * q->bpp;
- val2 = (((q->port_mode & QC_MODE_MASK) == QC_BIDIR) ? 24 : 8) *
- q->transfer_scale;
- }
- val = DIV_ROUND_UP(val, val2);
- qc_command(q, 0x13);
- qc_command(q, val);
-
- /* Setting top and left -- bls */
- qc_command(q, 0xd);
- qc_command(q, q->top);
- qc_command(q, 0xf);
- qc_command(q, q->left / 2);
-
- qc_command(q, 0x19);
- qc_command(q, q->contrast);
- qc_command(q, 0x1f);
- qc_command(q, q->whitebal);
-
- /* Clear flag that we must update the grabbing parameters on the camera
- before we grab the next frame */
- q->status &= (~QC_PARAM_CHANGE);
-}
-
-/* Qc_readbytes reads some bytes from the QC and puts them in
- the supplied buffer. It returns the number of bytes read,
- or -1 on error. */
-
-static inline int qc_readbytes(struct qcam *q, char buffer[])
-{
- int ret = 1;
- unsigned int hi, lo;
- unsigned int hi2, lo2;
- static int state;
-
- if (buffer == NULL) {
- state = 0;
- return 0;
- }
-
- switch (q->port_mode & QC_MODE_MASK) {
- case QC_BIDIR: /* Bi-directional Port */
- write_lpcontrol(q, 0x26);
- lo = (qc_waithand2(q, 1) >> 1);
- hi = (read_lpstatus(q) >> 3) & 0x1f;
- write_lpcontrol(q, 0x2e);
- lo2 = (qc_waithand2(q, 0) >> 1);
- hi2 = (read_lpstatus(q) >> 3) & 0x1f;
- switch (q->bpp) {
- case 4:
- buffer[0] = lo & 0xf;
- buffer[1] = ((lo & 0x70) >> 4) | ((hi & 1) << 3);
- buffer[2] = (hi & 0x1e) >> 1;
- buffer[3] = lo2 & 0xf;
- buffer[4] = ((lo2 & 0x70) >> 4) | ((hi2 & 1) << 3);
- buffer[5] = (hi2 & 0x1e) >> 1;
- ret = 6;
- break;
- case 6:
- buffer[0] = lo & 0x3f;
- buffer[1] = ((lo & 0x40) >> 6) | (hi << 1);
- buffer[2] = lo2 & 0x3f;
- buffer[3] = ((lo2 & 0x40) >> 6) | (hi2 << 1);
- ret = 4;
- break;
- }
- break;
-
- case QC_UNIDIR: /* Unidirectional Port */
- write_lpcontrol(q, 6);
- lo = (qc_waithand(q, 1) & 0xf0) >> 4;
- write_lpcontrol(q, 0xe);
- hi = (qc_waithand(q, 0) & 0xf0) >> 4;
-
- switch (q->bpp) {
- case 4:
- buffer[0] = lo;
- buffer[1] = hi;
- ret = 2;
- break;
- case 6:
- switch (state) {
- case 0:
- buffer[0] = (lo << 2) | ((hi & 0xc) >> 2);
- q->saved_bits = (hi & 3) << 4;
- state = 1;
- ret = 1;
- break;
- case 1:
- buffer[0] = lo | q->saved_bits;
- q->saved_bits = hi << 2;
- state = 2;
- ret = 1;
- break;
- case 2:
- buffer[0] = ((lo & 0xc) >> 2) | q->saved_bits;
- buffer[1] = ((lo & 3) << 4) | hi;
- state = 0;
- ret = 2;
- break;
- }
- break;
- }
- break;
- }
- return ret;
-}
-
-/* requests a scan from the camera. It sends the correct instructions
- * to the camera and then reads back the correct number of bytes. In
- * previous versions of this routine the return structure contained
- * the raw output from the camera, and there was a 'qc_convertscan'
- * function that converted that to a useful format. In version 0.3 I
- * rolled qc_convertscan into qc_scan and now I only return the
- * converted scan. The format is just an one-dimensional array of
- * characters, one for each pixel, with 0=black up to n=white, where
- * n=2^(bit depth)-1. Ask me for more details if you don't understand
- * this. */
-
-static long qc_capture(struct qcam *q, u8 *buf, unsigned long len)
-{
- int i, j, k, yield;
- int bytes;
- int linestotrans, transperline;
- int divisor;
- int pixels_per_line;
- int pixels_read = 0;
- int got = 0;
- char buffer[6];
- int shift = 8 - q->bpp;
- char invert;
-
- if (q->mode == -1)
- return -ENXIO;
-
- qc_command(q, 0x7);
- qc_command(q, q->mode);
-
- if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR) {
- write_lpcontrol(q, 0x2e); /* turn port around */
- write_lpcontrol(q, 0x26);
- qc_waithand(q, 1);
- write_lpcontrol(q, 0x2e);
- qc_waithand(q, 0);
- }
-
- /* strange -- should be 15:63 below, but 4bpp is odd */
- invert = (q->bpp == 4) ? 16 : 63;
-
- linestotrans = q->height / q->transfer_scale;
- pixels_per_line = q->width / q->transfer_scale;
- transperline = q->width * q->bpp;
- divisor = (((q->port_mode & QC_MODE_MASK) == QC_BIDIR) ? 24 : 8) *
- q->transfer_scale;
- transperline = DIV_ROUND_UP(transperline, divisor);
-
- for (i = 0, yield = yieldlines; i < linestotrans; i++) {
- for (pixels_read = j = 0; j < transperline; j++) {
- bytes = qc_readbytes(q, buffer);
- for (k = 0; k < bytes && (pixels_read + k) < pixels_per_line; k++) {
- int o;
- if (buffer[k] == 0 && invert == 16) {
- /* 4bpp is odd (again) -- inverter is 16, not 15, but output
- must be 0-15 -- bls */
- buffer[k] = 16;
- }
- o = i * pixels_per_line + pixels_read + k;
- if (o < len) {
- u8 ch = invert - buffer[k];
- got++;
- buf[o] = ch << shift;
- }
- }
- pixels_read += bytes;
- }
- qc_readbytes(q, NULL); /* reset state machine */
-
- /* Grabbing an entire frame from the quickcam is a lengthy
- process. We don't (usually) want to busy-block the
- processor for the entire frame. yieldlines is a module
- parameter. If we yield every line, the minimum frame
- time will be 240 / 200 = 1.2 seconds. The compile-time
- default is to yield every 4 lines. */
- if (i >= yield) {
- msleep_interruptible(5);
- yield = i + yieldlines;
- }
- }
-
- if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR) {
- write_lpcontrol(q, 2);
- write_lpcontrol(q, 6);
- udelay(3);
- write_lpcontrol(q, 0xe);
- }
- if (got < len)
- return got;
- return len;
-}
-
-/* ------------------------------------------------------------------
- Videobuf operations
- ------------------------------------------------------------------*/
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
-{
- struct qcam *dev = vb2_get_drv_priv(vq);
-
- if (0 == *nbuffers)
- *nbuffers = 3;
- *nplanes = 1;
- mutex_lock(&dev->lock);
- if (fmt)
- sizes[0] = fmt->fmt.pix.width * fmt->fmt.pix.height;
- else
- sizes[0] = (dev->width / dev->transfer_scale) *
- (dev->height / dev->transfer_scale);
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static void buffer_queue(struct vb2_buffer *vb)
-{
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-}
-
-static void buffer_finish(struct vb2_buffer *vb)
-{
- struct qcam *qcam = vb2_get_drv_priv(vb->vb2_queue);
- void *vbuf = vb2_plane_vaddr(vb, 0);
- int size = vb->vb2_queue->plane_sizes[0];
- int len;
-
- if (!vb2_is_streaming(vb->vb2_queue))
- return;
-
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
-
- qc_reset(qcam);
-
- /* Update the camera parameters if we need to */
- if (qcam->status & QC_PARAM_CHANGE)
- qc_set(qcam);
-
- len = qc_capture(qcam, vbuf, size);
-
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
- if (len != size)
- vb->state = VB2_BUF_STATE_ERROR;
- vb2_set_plane_payload(vb, 0, len);
-}
-
-static struct vb2_ops qcam_video_qops = {
- .queue_setup = queue_setup,
- .buf_queue = buffer_queue,
- .buf_finish = buffer_finish,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
-
-/*
- * Video4linux interfacing
- */
-
-static int qcam_querycap(struct file *file, void *priv,
- struct v4l2_capability *vcap)
-{
- struct qcam *qcam = video_drvdata(file);
-
- strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
- strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
- strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
- vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
- return 0;
-}
-
-static int qcam_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
-{
- if (vin->index > 0)
- return -EINVAL;
- strlcpy(vin->name, "Camera", sizeof(vin->name));
- vin->type = V4L2_INPUT_TYPE_CAMERA;
- vin->audioset = 0;
- vin->tuner = 0;
- vin->std = 0;
- vin->status = 0;
- return 0;
-}
-
-static int qcam_g_input(struct file *file, void *fh, unsigned int *inp)
-{
- *inp = 0;
- return 0;
-}
-
-static int qcam_s_input(struct file *file, void *fh, unsigned int inp)
-{
- return (inp > 0) ? -EINVAL : 0;
-}
-
-static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct qcam *qcam = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
-
- pix->width = qcam->width / qcam->transfer_scale;
- pix->height = qcam->height / qcam->transfer_scale;
- pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = pix->width;
- pix->sizeimage = pix->width * pix->height;
- /* Just a guess */
- pix->colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
-}
-
-static int qcam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
-
- if (pix->height <= 60 || pix->width <= 80) {
- pix->height = 60;
- pix->width = 80;
- } else if (pix->height <= 120 || pix->width <= 160) {
- pix->height = 120;
- pix->width = 160;
- } else {
- pix->height = 240;
- pix->width = 320;
- }
- if (pix->pixelformat != V4L2_PIX_FMT_Y4 &&
- pix->pixelformat != V4L2_PIX_FMT_Y6)
- pix->pixelformat = V4L2_PIX_FMT_Y4;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = pix->width;
- pix->sizeimage = pix->width * pix->height;
- /* Just a guess */
- pix->colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
-}
-
-static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct qcam *qcam = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
- int ret = qcam_try_fmt_vid_cap(file, fh, fmt);
-
- if (ret)
- return ret;
- if (vb2_is_busy(&qcam->vb_vidq))
- return -EBUSY;
- qcam->width = 320;
- qcam->height = 240;
- if (pix->height == 60)
- qcam->transfer_scale = 4;
- else if (pix->height == 120)
- qcam->transfer_scale = 2;
- else
- qcam->transfer_scale = 1;
- if (pix->pixelformat == V4L2_PIX_FMT_Y6)
- qcam->bpp = 6;
- else
- qcam->bpp = 4;
-
- qc_setscanmode(qcam);
- /* We must update the camera before we grab. We could
- just have changed the grab size */
- qcam->status |= QC_PARAM_CHANGE;
- return 0;
-}
-
-static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
-{
- static struct v4l2_fmtdesc formats[] = {
- { 0, 0, 0,
- "4-Bit Monochrome", V4L2_PIX_FMT_Y4,
- { 0, 0, 0, 0 }
- },
- { 1, 0, 0,
- "6-Bit Monochrome", V4L2_PIX_FMT_Y6,
- { 0, 0, 0, 0 }
- },
- };
- enum v4l2_buf_type type = fmt->type;
-
- if (fmt->index > 1)
- return -EINVAL;
-
- *fmt = formats[fmt->index];
- fmt->type = type;
- return 0;
-}
-
-static int qcam_enum_framesizes(struct file *file, void *fh,
- struct v4l2_frmsizeenum *fsize)
-{
- static const struct v4l2_frmsize_discrete sizes[] = {
- { 80, 60 },
- { 160, 120 },
- { 320, 240 },
- };
-
- if (fsize->index > 2)
- return -EINVAL;
- if (fsize->pixel_format != V4L2_PIX_FMT_Y4 &&
- fsize->pixel_format != V4L2_PIX_FMT_Y6)
- return -EINVAL;
- fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete = sizes[fsize->index];
- return 0;
-}
-
-static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct qcam *qcam =
- container_of(ctrl->handler, struct qcam, hdl);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- qcam->brightness = ctrl->val;
- break;
- case V4L2_CID_CONTRAST:
- qcam->contrast = ctrl->val;
- break;
- case V4L2_CID_GAMMA:
- qcam->whitebal = ctrl->val;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret == 0)
- qcam->status |= QC_PARAM_CHANGE;
- return ret;
-}
-
-static const struct v4l2_file_operations qcam_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = vb2_fop_release,
- .poll = vb2_fop_poll,
- .unlocked_ioctl = video_ioctl2,
- .read = vb2_fop_read,
- .mmap = vb2_fop_mmap,
-};
-
-static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
- .vidioc_querycap = qcam_querycap,
- .vidioc_g_input = qcam_g_input,
- .vidioc_s_input = qcam_s_input,
- .vidioc_enum_input = qcam_enum_input,
- .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
- .vidioc_enum_framesizes = qcam_enum_framesizes,
- .vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct v4l2_ctrl_ops qcam_ctrl_ops = {
- .s_ctrl = qcam_s_ctrl,
-};
-
-/* Initialize the QuickCam driver control structure. This is where
- * defaults are set for people who don't have a config file.*/
-
-static struct qcam *qcam_init(struct parport *port)
-{
- struct qcam *qcam;
- struct v4l2_device *v4l2_dev;
- struct vb2_queue *q;
- int err;
-
- qcam = kzalloc(sizeof(struct qcam), GFP_KERNEL);
- if (qcam == NULL)
- return NULL;
-
- v4l2_dev = &qcam->v4l2_dev;
- snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "bw-qcam%u", num_cams);
-
- if (v4l2_device_register(port->dev, v4l2_dev) < 0) {
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- kfree(qcam);
- return NULL;
- }
-
- v4l2_ctrl_handler_init(&qcam->hdl, 3);
- v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 255, 1, 180);
- v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
- V4L2_CID_CONTRAST, 0, 255, 1, 192);
- v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
- V4L2_CID_GAMMA, 0, 255, 1, 105);
- if (qcam->hdl.error) {
- v4l2_err(v4l2_dev, "couldn't register controls\n");
- goto exit;
- }
-
- mutex_init(&qcam->lock);
- mutex_init(&qcam->queue_lock);
-
- /* initialize queue */
- q = &qcam->vb_vidq;
- q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
- q->drv_priv = qcam;
- q->ops = &qcam_video_qops;
- q->mem_ops = &vb2_vmalloc_memops;
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- err = vb2_queue_init(q);
- if (err < 0) {
- v4l2_err(v4l2_dev, "couldn't init vb2_queue for %s.\n", port->name);
- goto exit;
- }
- qcam->vdev.queue = q;
- qcam->vdev.queue->lock = &qcam->queue_lock;
-
- qcam->pport = port;
- qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
- NULL, 0, NULL);
- if (qcam->pdev == NULL) {
- v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
- goto exit;
- }
-
- strlcpy(qcam->vdev.name, "Connectix QuickCam", sizeof(qcam->vdev.name));
- qcam->vdev.v4l2_dev = v4l2_dev;
- qcam->vdev.ctrl_handler = &qcam->hdl;
- qcam->vdev.fops = &qcam_fops;
- qcam->vdev.lock = &qcam->lock;
- qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
- qcam->vdev.release = video_device_release_empty;
- video_set_drvdata(&qcam->vdev, qcam);
-
- qcam->port_mode = (QC_ANY | QC_NOTSET);
- qcam->width = 320;
- qcam->height = 240;
- qcam->bpp = 4;
- qcam->transfer_scale = 2;
- qcam->contrast = 192;
- qcam->brightness = 180;
- qcam->whitebal = 105;
- qcam->top = 1;
- qcam->left = 14;
- qcam->mode = -1;
- qcam->status = QC_PARAM_CHANGE;
- return qcam;
-
-exit:
- v4l2_ctrl_handler_free(&qcam->hdl);
- kfree(qcam);
- return NULL;
-}
-
-static int qc_calibrate(struct qcam *q)
-{
- /*
- * Bugfix by Hanno Mueller hmueller@kabel.de, Mai 21 96
- * The white balance is an individual value for each
- * quickcam.
- */
-
- int value;
- int count = 0;
-
- qc_command(q, 27); /* AutoAdjustOffset */
- qc_command(q, 0); /* Dummy Parameter, ignored by the camera */
-
- /* GetOffset (33) will read 255 until autocalibration */
- /* is finished. After that, a value of 1-254 will be */
- /* returned. */
-
- do {
- qc_command(q, 33);
- value = qc_readparam(q);
- mdelay(1);
- schedule();
- count++;
- } while (value == 0xff && count < 2048);
-
- q->whitebal = value;
- return value;
-}
-
-static int init_bwqcam(struct parport *port)
-{
- struct qcam *qcam;
-
- if (num_cams == MAX_CAMS) {
- printk(KERN_ERR "Too many Quickcams (max %d)\n", MAX_CAMS);
- return -ENOSPC;
- }
-
- qcam = qcam_init(port);
- if (qcam == NULL)
- return -ENODEV;
-
- parport_claim_or_block(qcam->pdev);
-
- qc_reset(qcam);
-
- if (qc_detect(qcam) == 0) {
- parport_release(qcam->pdev);
- parport_unregister_device(qcam->pdev);
- kfree(qcam);
- return -ENODEV;
- }
- qc_calibrate(qcam);
- v4l2_ctrl_handler_setup(&qcam->hdl);
-
- parport_release(qcam->pdev);
-
- v4l2_info(&qcam->v4l2_dev, "Connectix Quickcam on %s\n", qcam->pport->name);
-
- if (video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
- parport_unregister_device(qcam->pdev);
- kfree(qcam);
- return -ENODEV;
- }
-
- qcams[num_cams++] = qcam;
-
- return 0;
-}
-
-static void close_bwqcam(struct qcam *qcam)
-{
- video_unregister_device(&qcam->vdev);
- v4l2_ctrl_handler_free(&qcam->hdl);
- parport_unregister_device(qcam->pdev);
- kfree(qcam);
-}
-
-/* The parport parameter controls which parports will be scanned.
- * Scanning all parports causes some printers to print a garbage page.
- * -- March 14, 1999 Billy Donahue <billy@escape.com> */
-#ifdef MODULE
-static char *parport[MAX_CAMS] = { NULL, };
-module_param_array(parport, charp, NULL, 0);
-#endif
-
-static int accept_bwqcam(struct parport *port)
-{
-#ifdef MODULE
- int n;
-
- if (parport[0] && strncmp(parport[0], "auto", 4) != 0) {
- /* user gave parport parameters */
- for (n = 0; n < MAX_CAMS && parport[n]; n++) {
- char *ep;
- unsigned long r;
- r = simple_strtoul(parport[n], &ep, 0);
- if (ep == parport[n]) {
- printk(KERN_ERR
- "bw-qcam: bad port specifier \"%s\"\n",
- parport[n]);
- continue;
- }
- if (r == port->number)
- return 1;
- }
- return 0;
- }
-#endif
- return 1;
-}
-
-static void bwqcam_attach(struct parport *port)
-{
- if (accept_bwqcam(port))
- init_bwqcam(port);
-}
-
-static void bwqcam_detach(struct parport *port)
-{
- int i;
- for (i = 0; i < num_cams; i++) {
- struct qcam *qcam = qcams[i];
- if (qcam && qcam->pdev->port == port) {
- qcams[i] = NULL;
- close_bwqcam(qcam);
- }
- }
-}
-
-static struct parport_driver bwqcam_driver = {
- .name = "bw-qcam",
- .attach = bwqcam_attach,
- .detach = bwqcam_detach,
-};
-
-static void __exit exit_bw_qcams(void)
-{
- parport_unregister_driver(&bwqcam_driver);
-}
-
-static int __init init_bw_qcams(void)
-{
-#ifdef MODULE
- /* Do some sanity checks on the module parameters. */
- if (maxpoll > 5000) {
- printk(KERN_INFO "Connectix Quickcam max-poll was above 5000. Using 5000.\n");
- maxpoll = 5000;
- }
-
- if (yieldlines < 1) {
- printk(KERN_INFO "Connectix Quickcam yieldlines was less than 1. Using 1.\n");
- yieldlines = 1;
- }
-#endif
- return parport_register_driver(&bwqcam_driver);
-}
-
-module_init(init_bw_qcams);
-module_exit(exit_bw_qcams);
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.0.3");
diff --git a/drivers/staging/media/parport/c-qcam.c b/drivers/staging/media/parport/c-qcam.c
deleted file mode 100644
index b9010bd3ed3e..000000000000
--- a/drivers/staging/media/parport/c-qcam.c
+++ /dev/null
@@ -1,882 +0,0 @@
-/*
- * Video4Linux Colour QuickCam driver
- * Copyright 1997-2000 Philip Blundell <philb@gnu.org>
- *
- * Module parameters:
- *
- * parport=auto -- probe all parports (default)
- * parport=0 -- parport0 becomes qcam1
- * parport=2,0,1 -- parports 2,0,1 are tried in that order
- *
- * probe=0 -- do no probing, assume camera is present
- * probe=1 -- use IEEE-1284 autoprobe data only (default)
- * probe=2 -- probe aggressively for cameras
- *
- * force_rgb=1 -- force data format to RGB (default is BGR)
- *
- * The parport parameter controls which parports will be scanned.
- * Scanning all parports causes some printers to print a garbage page.
- * -- March 14, 1999 Billy Donahue <billy@escape.com>
- *
- * Fixed data format to BGR, added force_rgb parameter. Added missing
- * parport_unregister_driver() on module removal.
- * -- May 28, 2000 Claudio Matsuoka <claudio@conectiva.com>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/parport.h>
-#include <linux/sched.h>
-#include <linux/mutex.h>
-#include <linux/jiffies.h>
-#include <linux/videodev2.h>
-#include <asm/uaccess.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-event.h>
-
-struct qcam {
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct v4l2_ctrl_handler hdl;
- struct pardevice *pdev;
- struct parport *pport;
- int width, height;
- int ccd_width, ccd_height;
- int mode;
- int contrast, brightness, whitebal;
- int top, left;
- unsigned int bidirectional;
- struct mutex lock;
-};
-
-/* cameras maximum */
-#define MAX_CAMS 4
-
-/* The three possible QuickCam modes */
-#define QC_MILLIONS 0x18
-#define QC_BILLIONS 0x10
-#define QC_THOUSANDS 0x08 /* with VIDEC compression (not supported) */
-
-/* The three possible decimations */
-#define QC_DECIMATION_1 0
-#define QC_DECIMATION_2 2
-#define QC_DECIMATION_4 4
-
-#define BANNER "Colour QuickCam for Video4Linux v0.06"
-
-static int parport[MAX_CAMS] = { [1 ... MAX_CAMS-1] = -1 };
-static int probe = 2;
-static bool force_rgb;
-static int video_nr = -1;
-
-/* FIXME: parport=auto would never have worked, surely? --RR */
-MODULE_PARM_DESC(parport, "parport=<auto|n[,n]...> for port detection method\n"
- "probe=<0|1|2> for camera detection method\n"
- "force_rgb=<0|1> for RGB data format (default BGR)");
-module_param_array(parport, int, NULL, 0);
-module_param(probe, int, 0);
-module_param(force_rgb, bool, 0);
-module_param(video_nr, int, 0);
-
-static struct qcam *qcams[MAX_CAMS];
-static unsigned int num_cams;
-
-static inline void qcam_set_ack(struct qcam *qcam, unsigned int i)
-{
- /* note: the QC specs refer to the PCAck pin by voltage, not
- software level. PC ports have builtin inverters. */
- parport_frob_control(qcam->pport, 8, i ? 8 : 0);
-}
-
-static inline unsigned int qcam_ready1(struct qcam *qcam)
-{
- return (parport_read_status(qcam->pport) & 0x8) ? 1 : 0;
-}
-
-static inline unsigned int qcam_ready2(struct qcam *qcam)
-{
- return (parport_read_data(qcam->pport) & 0x1) ? 1 : 0;
-}
-
-static unsigned int qcam_await_ready1(struct qcam *qcam, int value)
-{
- struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
- unsigned long oldjiffies = jiffies;
- unsigned int i;
-
- for (oldjiffies = jiffies;
- time_before(jiffies, oldjiffies + msecs_to_jiffies(40));)
- if (qcam_ready1(qcam) == value)
- return 0;
-
- /* If the camera didn't respond within 1/25 second, poll slowly
- for a while. */
- for (i = 0; i < 50; i++) {
- if (qcam_ready1(qcam) == value)
- return 0;
- msleep_interruptible(100);
- }
-
- /* Probably somebody pulled the plug out. Not much we can do. */
- v4l2_err(v4l2_dev, "ready1 timeout (%d) %x %x\n", value,
- parport_read_status(qcam->pport),
- parport_read_control(qcam->pport));
- return 1;
-}
-
-static unsigned int qcam_await_ready2(struct qcam *qcam, int value)
-{
- struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
- unsigned long oldjiffies = jiffies;
- unsigned int i;
-
- for (oldjiffies = jiffies;
- time_before(jiffies, oldjiffies + msecs_to_jiffies(40));)
- if (qcam_ready2(qcam) == value)
- return 0;
-
- /* If the camera didn't respond within 1/25 second, poll slowly
- for a while. */
- for (i = 0; i < 50; i++) {
- if (qcam_ready2(qcam) == value)
- return 0;
- msleep_interruptible(100);
- }
-
- /* Probably somebody pulled the plug out. Not much we can do. */
- v4l2_err(v4l2_dev, "ready2 timeout (%d) %x %x %x\n", value,
- parport_read_status(qcam->pport),
- parport_read_control(qcam->pport),
- parport_read_data(qcam->pport));
- return 1;
-}
-
-static int qcam_read_data(struct qcam *qcam)
-{
- unsigned int idata;
-
- qcam_set_ack(qcam, 0);
- if (qcam_await_ready1(qcam, 1))
- return -1;
- idata = parport_read_status(qcam->pport) & 0xf0;
- qcam_set_ack(qcam, 1);
- if (qcam_await_ready1(qcam, 0))
- return -1;
- idata |= parport_read_status(qcam->pport) >> 4;
- return idata;
-}
-
-static int qcam_write_data(struct qcam *qcam, unsigned int data)
-{
- struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
- unsigned int idata;
-
- parport_write_data(qcam->pport, data);
- idata = qcam_read_data(qcam);
- if (data != idata) {
- v4l2_warn(v4l2_dev, "sent %x but received %x\n", data,
- idata);
- return 1;
- }
- return 0;
-}
-
-static inline int qcam_set(struct qcam *qcam, unsigned int cmd, unsigned int data)
-{
- if (qcam_write_data(qcam, cmd))
- return -1;
- if (qcam_write_data(qcam, data))
- return -1;
- return 0;
-}
-
-static inline int qcam_get(struct qcam *qcam, unsigned int cmd)
-{
- if (qcam_write_data(qcam, cmd))
- return -1;
- return qcam_read_data(qcam);
-}
-
-static int qc_detect(struct qcam *qcam)
-{
- unsigned int stat, ostat, i, count = 0;
-
- /* The probe routine below is not very reliable. The IEEE-1284
- probe takes precedence. */
- /* XXX Currently parport provides no way to distinguish between
- "the IEEE probe was not done" and "the probe was done, but
- no device was found". Fix this one day. */
- if (qcam->pport->probe_info[0].class == PARPORT_CLASS_MEDIA
- && qcam->pport->probe_info[0].model
- && !strcmp(qcam->pdev->port->probe_info[0].model,
- "Color QuickCam 2.0")) {
- printk(KERN_DEBUG "QuickCam: Found by IEEE1284 probe.\n");
- return 1;
- }
-
- if (probe < 2)
- return 0;
-
- parport_write_control(qcam->pport, 0xc);
-
- /* look for a heartbeat */
- ostat = stat = parport_read_status(qcam->pport);
- for (i = 0; i < 250; i++) {
- mdelay(1);
- stat = parport_read_status(qcam->pport);
- if (ostat != stat) {
- if (++count >= 3)
- return 1;
- ostat = stat;
- }
- }
-
- /* Reset the camera and try again */
- parport_write_control(qcam->pport, 0xc);
- parport_write_control(qcam->pport, 0x8);
- mdelay(1);
- parport_write_control(qcam->pport, 0xc);
- mdelay(1);
- count = 0;
-
- ostat = stat = parport_read_status(qcam->pport);
- for (i = 0; i < 250; i++) {
- mdelay(1);
- stat = parport_read_status(qcam->pport);
- if (ostat != stat) {
- if (++count >= 3)
- return 1;
- ostat = stat;
- }
- }
-
- /* no (or flatline) camera, give up */
- return 0;
-}
-
-static void qc_reset(struct qcam *qcam)
-{
- parport_write_control(qcam->pport, 0xc);
- parport_write_control(qcam->pport, 0x8);
- mdelay(1);
- parport_write_control(qcam->pport, 0xc);
- mdelay(1);
-}
-
-/* Reset the QuickCam and program for brightness, contrast,
- * white-balance, and resolution. */
-
-static void qc_setup(struct qcam *qcam)
-{
- qc_reset(qcam);
-
- /* Set the brightness. */
- qcam_set(qcam, 11, qcam->brightness);
-
- /* Set the height and width. These refer to the actual
- CCD area *before* applying the selected decimation. */
- qcam_set(qcam, 17, qcam->ccd_height);
- qcam_set(qcam, 19, qcam->ccd_width / 2);
-
- /* Set top and left. */
- qcam_set(qcam, 0xd, qcam->top);
- qcam_set(qcam, 0xf, qcam->left);
-
- /* Set contrast and white balance. */
- qcam_set(qcam, 0x19, qcam->contrast);
- qcam_set(qcam, 0x1f, qcam->whitebal);
-
- /* Set the speed. */
- qcam_set(qcam, 45, 2);
-}
-
-/* Read some bytes from the camera and put them in the buffer.
- nbytes should be a multiple of 3, because bidirectional mode gives
- us three bytes at a time. */
-
-static unsigned int qcam_read_bytes(struct qcam *qcam, unsigned char *buf, unsigned int nbytes)
-{
- unsigned int bytes = 0;
-
- qcam_set_ack(qcam, 0);
- if (qcam->bidirectional) {
- /* It's a bidirectional port */
- while (bytes < nbytes) {
- unsigned int lo1, hi1, lo2, hi2;
- unsigned char r, g, b;
-
- if (qcam_await_ready2(qcam, 1))
- return bytes;
- lo1 = parport_read_data(qcam->pport) >> 1;
- hi1 = ((parport_read_status(qcam->pport) >> 3) & 0x1f) ^ 0x10;
- qcam_set_ack(qcam, 1);
- if (qcam_await_ready2(qcam, 0))
- return bytes;
- lo2 = parport_read_data(qcam->pport) >> 1;
- hi2 = ((parport_read_status(qcam->pport) >> 3) & 0x1f) ^ 0x10;
- qcam_set_ack(qcam, 0);
- r = lo1 | ((hi1 & 1) << 7);
- g = ((hi1 & 0x1e) << 3) | ((hi2 & 0x1e) >> 1);
- b = lo2 | ((hi2 & 1) << 7);
- if (force_rgb) {
- buf[bytes++] = r;
- buf[bytes++] = g;
- buf[bytes++] = b;
- } else {
- buf[bytes++] = b;
- buf[bytes++] = g;
- buf[bytes++] = r;
- }
- }
- } else {
- /* It's a unidirectional port */
- int i = 0, n = bytes;
- unsigned char rgb[3];
-
- while (bytes < nbytes) {
- unsigned int hi, lo;
-
- if (qcam_await_ready1(qcam, 1))
- return bytes;
- hi = (parport_read_status(qcam->pport) & 0xf0);
- qcam_set_ack(qcam, 1);
- if (qcam_await_ready1(qcam, 0))
- return bytes;
- lo = (parport_read_status(qcam->pport) & 0xf0);
- qcam_set_ack(qcam, 0);
- /* flip some bits */
- rgb[(i = bytes++ % 3)] = (hi | (lo >> 4)) ^ 0x88;
- if (i >= 2) {
-get_fragment:
- if (force_rgb) {
- buf[n++] = rgb[0];
- buf[n++] = rgb[1];
- buf[n++] = rgb[2];
- } else {
- buf[n++] = rgb[2];
- buf[n++] = rgb[1];
- buf[n++] = rgb[0];
- }
- }
- }
- if (i) {
- i = 0;
- goto get_fragment;
- }
- }
- return bytes;
-}
-
-#define BUFSZ 150
-
-static long qc_capture(struct qcam *qcam, char __user *buf, unsigned long len)
-{
- struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
- unsigned lines, pixelsperline;
- unsigned int is_bi_dir = qcam->bidirectional;
- size_t wantlen, outptr = 0;
- char tmpbuf[BUFSZ];
-
- if (!access_ok(VERIFY_WRITE, buf, len))
- return -EFAULT;
-
- /* Wait for camera to become ready */
- for (;;) {
- int i = qcam_get(qcam, 41);
-
- if (i == -1) {
- qc_setup(qcam);
- return -EIO;
- }
- if ((i & 0x80) == 0)
- break;
- schedule();
- }
-
- if (qcam_set(qcam, 7, (qcam->mode | (is_bi_dir ? 1 : 0)) + 1))
- return -EIO;
-
- lines = qcam->height;
- pixelsperline = qcam->width;
-
- if (is_bi_dir) {
- /* Turn the port around */
- parport_data_reverse(qcam->pport);
- mdelay(3);
- qcam_set_ack(qcam, 0);
- if (qcam_await_ready1(qcam, 1)) {
- qc_setup(qcam);
- return -EIO;
- }
- qcam_set_ack(qcam, 1);
- if (qcam_await_ready1(qcam, 0)) {
- qc_setup(qcam);
- return -EIO;
- }
- }
-
- wantlen = lines * pixelsperline * 24 / 8;
-
- while (wantlen) {
- size_t t, s;
-
- s = (wantlen > BUFSZ) ? BUFSZ : wantlen;
- t = qcam_read_bytes(qcam, tmpbuf, s);
- if (outptr < len) {
- size_t sz = len - outptr;
-
- if (sz > t)
- sz = t;
- if (__copy_to_user(buf + outptr, tmpbuf, sz))
- break;
- outptr += sz;
- }
- wantlen -= t;
- if (t < s)
- break;
- cond_resched();
- }
-
- len = outptr;
-
- if (wantlen) {
- v4l2_err(v4l2_dev, "short read.\n");
- if (is_bi_dir)
- parport_data_forward(qcam->pport);
- qc_setup(qcam);
- return len;
- }
-
- if (is_bi_dir) {
- int l;
-
- do {
- l = qcam_read_bytes(qcam, tmpbuf, 3);
- cond_resched();
- } while (l && (tmpbuf[0] == 0x7e || tmpbuf[1] == 0x7e || tmpbuf[2] == 0x7e));
- if (force_rgb) {
- if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- v4l2_err(v4l2_dev, "bad EOF\n");
- } else {
- if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- v4l2_err(v4l2_dev, "bad EOF\n");
- }
- qcam_set_ack(qcam, 0);
- if (qcam_await_ready1(qcam, 1)) {
- v4l2_err(v4l2_dev, "no ack after EOF\n");
- parport_data_forward(qcam->pport);
- qc_setup(qcam);
- return len;
- }
- parport_data_forward(qcam->pport);
- mdelay(3);
- qcam_set_ack(qcam, 1);
- if (qcam_await_ready1(qcam, 0)) {
- v4l2_err(v4l2_dev, "no ack to port turnaround\n");
- qc_setup(qcam);
- return len;
- }
- } else {
- int l;
-
- do {
- l = qcam_read_bytes(qcam, tmpbuf, 1);
- cond_resched();
- } while (l && tmpbuf[0] == 0x7e);
- l = qcam_read_bytes(qcam, tmpbuf + 1, 2);
- if (force_rgb) {
- if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- v4l2_err(v4l2_dev, "bad EOF\n");
- } else {
- if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- v4l2_err(v4l2_dev, "bad EOF\n");
- }
- }
-
- qcam_write_data(qcam, 0);
- return len;
-}
-
-/*
- * Video4linux interfacing
- */
-
-static int qcam_querycap(struct file *file, void *priv,
- struct v4l2_capability *vcap)
-{
- struct qcam *qcam = video_drvdata(file);
-
- strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
- strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card));
- strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
- vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
- return 0;
-}
-
-static int qcam_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
-{
- if (vin->index > 0)
- return -EINVAL;
- strlcpy(vin->name, "Camera", sizeof(vin->name));
- vin->type = V4L2_INPUT_TYPE_CAMERA;
- vin->audioset = 0;
- vin->tuner = 0;
- vin->std = 0;
- vin->status = 0;
- return 0;
-}
-
-static int qcam_g_input(struct file *file, void *fh, unsigned int *inp)
-{
- *inp = 0;
- return 0;
-}
-
-static int qcam_s_input(struct file *file, void *fh, unsigned int inp)
-{
- return (inp > 0) ? -EINVAL : 0;
-}
-
-static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct qcam *qcam = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
-
- pix->width = qcam->width;
- pix->height = qcam->height;
- pix->pixelformat = V4L2_PIX_FMT_RGB24;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = 3 * qcam->width;
- pix->sizeimage = 3 * qcam->width * qcam->height;
- /* Just a guess */
- pix->colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
-}
-
-static int qcam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
-
- if (pix->height < 60 || pix->width < 80) {
- pix->height = 60;
- pix->width = 80;
- } else if (pix->height < 120 || pix->width < 160) {
- pix->height = 120;
- pix->width = 160;
- } else {
- pix->height = 240;
- pix->width = 320;
- }
- pix->pixelformat = V4L2_PIX_FMT_RGB24;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = 3 * pix->width;
- pix->sizeimage = 3 * pix->width * pix->height;
- /* Just a guess */
- pix->colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
-}
-
-static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct qcam *qcam = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
- int ret = qcam_try_fmt_vid_cap(file, fh, fmt);
-
- if (ret)
- return ret;
- switch (pix->height) {
- case 60:
- qcam->mode = QC_DECIMATION_4;
- break;
- case 120:
- qcam->mode = QC_DECIMATION_2;
- break;
- default:
- qcam->mode = QC_DECIMATION_1;
- break;
- }
-
- mutex_lock(&qcam->lock);
- qcam->mode |= QC_MILLIONS;
- qcam->height = pix->height;
- qcam->width = pix->width;
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
-}
-
-static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
-{
- static struct v4l2_fmtdesc formats[] = {
- { 0, 0, 0,
- "RGB 8:8:8", V4L2_PIX_FMT_RGB24,
- { 0, 0, 0, 0 }
- },
- };
- enum v4l2_buf_type type = fmt->type;
-
- if (fmt->index > 0)
- return -EINVAL;
-
- *fmt = formats[fmt->index];
- fmt->type = type;
- return 0;
-}
-
-static ssize_t qcam_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qcam *qcam = video_drvdata(file);
- int len;
-
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
- /* Probably should have a semaphore against multiple users */
- len = qc_capture(qcam, buf, count);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return len;
-}
-
-static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct qcam *qcam =
- container_of(ctrl->handler, struct qcam, hdl);
- int ret = 0;
-
- mutex_lock(&qcam->lock);
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- qcam->brightness = ctrl->val;
- break;
- case V4L2_CID_CONTRAST:
- qcam->contrast = ctrl->val;
- break;
- case V4L2_CID_GAMMA:
- qcam->whitebal = ctrl->val;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret == 0) {
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- }
- mutex_unlock(&qcam->lock);
- return ret;
-}
-
-static const struct v4l2_file_operations qcam_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = v4l2_fh_release,
- .poll = v4l2_ctrl_poll,
- .unlocked_ioctl = video_ioctl2,
- .read = qcam_read,
-};
-
-static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
- .vidioc_querycap = qcam_querycap,
- .vidioc_g_input = qcam_g_input,
- .vidioc_s_input = qcam_s_input,
- .vidioc_enum_input = qcam_enum_input,
- .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct v4l2_ctrl_ops qcam_ctrl_ops = {
- .s_ctrl = qcam_s_ctrl,
-};
-
-/* Initialize the QuickCam driver control structure. */
-
-static struct qcam *qcam_init(struct parport *port)
-{
- struct qcam *qcam;
- struct v4l2_device *v4l2_dev;
-
- qcam = kzalloc(sizeof(*qcam), GFP_KERNEL);
- if (qcam == NULL)
- return NULL;
-
- v4l2_dev = &qcam->v4l2_dev;
- strlcpy(v4l2_dev->name, "c-qcam", sizeof(v4l2_dev->name));
-
- if (v4l2_device_register(NULL, v4l2_dev) < 0) {
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- kfree(qcam);
- return NULL;
- }
-
- v4l2_ctrl_handler_init(&qcam->hdl, 3);
- v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 255, 1, 240);
- v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
- V4L2_CID_CONTRAST, 0, 255, 1, 192);
- v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
- V4L2_CID_GAMMA, 0, 255, 1, 128);
- if (qcam->hdl.error) {
- v4l2_err(v4l2_dev, "couldn't register controls\n");
- v4l2_ctrl_handler_free(&qcam->hdl);
- kfree(qcam);
- return NULL;
- }
-
- qcam->pport = port;
- qcam->pdev = parport_register_device(port, "c-qcam", NULL, NULL,
- NULL, 0, NULL);
-
- qcam->bidirectional = (qcam->pport->modes & PARPORT_MODE_TRISTATE) ? 1 : 0;
-
- if (qcam->pdev == NULL) {
- v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
- v4l2_ctrl_handler_free(&qcam->hdl);
- kfree(qcam);
- return NULL;
- }
-
- strlcpy(qcam->vdev.name, "Colour QuickCam", sizeof(qcam->vdev.name));
- qcam->vdev.v4l2_dev = v4l2_dev;
- qcam->vdev.fops = &qcam_fops;
- qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
- qcam->vdev.release = video_device_release_empty;
- qcam->vdev.ctrl_handler = &qcam->hdl;
- video_set_drvdata(&qcam->vdev, qcam);
-
- mutex_init(&qcam->lock);
- qcam->width = qcam->ccd_width = 320;
- qcam->height = qcam->ccd_height = 240;
- qcam->mode = QC_MILLIONS | QC_DECIMATION_1;
- qcam->contrast = 192;
- qcam->brightness = 240;
- qcam->whitebal = 128;
- qcam->top = 1;
- qcam->left = 14;
- return qcam;
-}
-
-static int init_cqcam(struct parport *port)
-{
- struct qcam *qcam;
- struct v4l2_device *v4l2_dev;
-
- if (parport[0] != -1) {
- /* The user gave specific instructions */
- int i, found = 0;
-
- for (i = 0; i < MAX_CAMS && parport[i] != -1; i++) {
- if (parport[0] == port->number)
- found = 1;
- }
- if (!found)
- return -ENODEV;
- }
-
- if (num_cams == MAX_CAMS)
- return -ENOSPC;
-
- qcam = qcam_init(port);
- if (qcam == NULL)
- return -ENODEV;
-
- v4l2_dev = &qcam->v4l2_dev;
-
- parport_claim_or_block(qcam->pdev);
-
- qc_reset(qcam);
-
- if (probe && qc_detect(qcam) == 0) {
- parport_release(qcam->pdev);
- parport_unregister_device(qcam->pdev);
- kfree(qcam);
- return -ENODEV;
- }
-
- qc_setup(qcam);
-
- parport_release(qcam->pdev);
-
- if (video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
- v4l2_err(v4l2_dev, "Unable to register Colour QuickCam on %s\n",
- qcam->pport->name);
- parport_unregister_device(qcam->pdev);
- kfree(qcam);
- return -ENODEV;
- }
-
- v4l2_info(v4l2_dev, "%s: Colour QuickCam found on %s\n",
- video_device_node_name(&qcam->vdev), qcam->pport->name);
-
- qcams[num_cams++] = qcam;
-
- return 0;
-}
-
-static void close_cqcam(struct qcam *qcam)
-{
- video_unregister_device(&qcam->vdev);
- v4l2_ctrl_handler_free(&qcam->hdl);
- parport_unregister_device(qcam->pdev);
- kfree(qcam);
-}
-
-static void cq_attach(struct parport *port)
-{
- init_cqcam(port);
-}
-
-static void cq_detach(struct parport *port)
-{
- /* Write this some day. */
-}
-
-static struct parport_driver cqcam_driver = {
- .name = "cqcam",
- .attach = cq_attach,
- .detach = cq_detach,
-};
-
-static int __init cqcam_init(void)
-{
- printk(KERN_INFO BANNER "\n");
-
- return parport_register_driver(&cqcam_driver);
-}
-
-static void __exit cqcam_cleanup(void)
-{
- unsigned int i;
-
- for (i = 0; i < num_cams; i++)
- close_cqcam(qcams[i]);
-
- parport_unregister_driver(&cqcam_driver);
-}
-
-MODULE_AUTHOR("Philip Blundell <philb@gnu.org>");
-MODULE_DESCRIPTION(BANNER);
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.0.4");
-
-module_init(cqcam_init);
-module_exit(cqcam_cleanup);
diff --git a/drivers/staging/media/parport/pms.c b/drivers/staging/media/parport/pms.c
deleted file mode 100644
index e6b497528cea..000000000000
--- a/drivers/staging/media/parport/pms.c
+++ /dev/null
@@ -1,1156 +0,0 @@
-/*
- * Media Vision Pro Movie Studio
- * or
- * "all you need is an I2C bus some RAM and a prayer"
- *
- * This draws heavily on code
- *
- * (c) Wolfgang Koehler, wolf@first.gmd.de, Dec. 1994
- * Kiefernring 15
- * 14478 Potsdam, Germany
- *
- * Most of this code is directly derived from his userspace driver.
- * His driver works so send any reports to alan@lxorguk.ukuu.org.uk
- * unless the userspace driver also doesn't work for you...
- *
- * Changes:
- * 25-11-2009 Hans Verkuil <hverkuil@xs4all.nl>
- * - converted to version 2 of the V4L API.
- * 08/07/2003 Daniele Bellucci <bellucda@tiscali.it>
- * - pms_capture: report back -EFAULT
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/isa.h>
-#include <asm/io.h>
-
-#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-device.h>
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.0.5");
-
-#define MOTOROLA 1
-#define PHILIPS2 2 /* SAA7191 */
-#define PHILIPS1 3
-#define MVVMEMORYWIDTH 0x40 /* 512 bytes */
-
-struct i2c_info {
- u8 slave;
- u8 sub;
- u8 data;
- u8 hits;
-};
-
-struct pms {
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct v4l2_ctrl_handler hdl;
- int height;
- int width;
- int depth;
- int input;
- struct mutex lock;
- int i2c_count;
- struct i2c_info i2cinfo[64];
-
- int decoder;
- int standard; /* 0 - auto 1 - ntsc 2 - pal 3 - secam */
- v4l2_std_id std;
- int io;
- int data;
- void __iomem *mem;
-};
-
-/*
- * I/O ports and Shared Memory
- */
-
-static int io_port = 0x250;
-module_param(io_port, int, 0);
-
-static int mem_base = 0xc8000;
-module_param(mem_base, int, 0);
-
-static int video_nr = -1;
-module_param(video_nr, int, 0);
-
-
-static inline void mvv_write(struct pms *dev, u8 index, u8 value)
-{
- outw(index | (value << 8), dev->io);
-}
-
-static inline u8 mvv_read(struct pms *dev, u8 index)
-{
- outb(index, dev->io);
- return inb(dev->data);
-}
-
-static int pms_i2c_stat(struct pms *dev, u8 slave)
-{
- int counter = 0;
- int i;
-
- outb(0x28, dev->io);
-
- while ((inb(dev->data) & 0x01) == 0)
- if (counter++ == 256)
- break;
-
- while ((inb(dev->data) & 0x01) != 0)
- if (counter++ == 256)
- break;
-
- outb(slave, dev->io);
-
- counter = 0;
- while ((inb(dev->data) & 0x01) == 0)
- if (counter++ == 256)
- break;
-
- while ((inb(dev->data) & 0x01) != 0)
- if (counter++ == 256)
- break;
-
- for (i = 0; i < 12; i++) {
- char st = inb(dev->data);
-
- if ((st & 2) != 0)
- return -1;
- if ((st & 1) == 0)
- break;
- }
- outb(0x29, dev->io);
- return inb(dev->data);
-}
-
-static int pms_i2c_write(struct pms *dev, u16 slave, u16 sub, u16 data)
-{
- int skip = 0;
- int count;
- int i;
-
- for (i = 0; i < dev->i2c_count; i++) {
- if ((dev->i2cinfo[i].slave == slave) &&
- (dev->i2cinfo[i].sub == sub)) {
- if (dev->i2cinfo[i].data == data)
- skip = 1;
- dev->i2cinfo[i].data = data;
- i = dev->i2c_count + 1;
- }
- }
-
- if (i == dev->i2c_count && dev->i2c_count < 64) {
- dev->i2cinfo[dev->i2c_count].slave = slave;
- dev->i2cinfo[dev->i2c_count].sub = sub;
- dev->i2cinfo[dev->i2c_count].data = data;
- dev->i2c_count++;
- }
-
- if (skip)
- return 0;
-
- mvv_write(dev, 0x29, sub);
- mvv_write(dev, 0x2A, data);
- mvv_write(dev, 0x28, slave);
-
- outb(0x28, dev->io);
-
- count = 0;
- while ((inb(dev->data) & 1) == 0)
- if (count > 255)
- break;
- while ((inb(dev->data) & 1) != 0)
- if (count > 255)
- break;
-
- count = inb(dev->data);
-
- if (count & 2)
- return -1;
- return count;
-}
-
-static int pms_i2c_read(struct pms *dev, int slave, int sub)
-{
- int i;
-
- for (i = 0; i < dev->i2c_count; i++) {
- if (dev->i2cinfo[i].slave == slave && dev->i2cinfo[i].sub == sub)
- return dev->i2cinfo[i].data;
- }
- return 0;
-}
-
-
-static void pms_i2c_andor(struct pms *dev, int slave, int sub, int and, int or)
-{
- u8 tmp;
-
- tmp = pms_i2c_read(dev, slave, sub);
- tmp = (tmp & and) | or;
- pms_i2c_write(dev, slave, sub, tmp);
-}
-
-/*
- * Control functions
- */
-
-
-static void pms_videosource(struct pms *dev, short source)
-{
- switch (dev->decoder) {
- case MOTOROLA:
- break;
- case PHILIPS2:
- pms_i2c_andor(dev, 0x8a, 0x06, 0x7f, source ? 0x80 : 0);
- break;
- case PHILIPS1:
- break;
- }
- mvv_write(dev, 0x2E, 0x31);
- /* Was: mvv_write(dev, 0x2E, source ? 0x31 : 0x30);
- But could not make this work correctly. Only Composite input
- worked for me. */
-}
-
-static void pms_hue(struct pms *dev, short hue)
-{
- switch (dev->decoder) {
- case MOTOROLA:
- pms_i2c_write(dev, 0x8a, 0x00, hue);
- break;
- case PHILIPS2:
- pms_i2c_write(dev, 0x8a, 0x07, hue);
- break;
- case PHILIPS1:
- pms_i2c_write(dev, 0x42, 0x07, hue);
- break;
- }
-}
-
-static void pms_saturation(struct pms *dev, short sat)
-{
- switch (dev->decoder) {
- case MOTOROLA:
- pms_i2c_write(dev, 0x8a, 0x00, sat);
- break;
- case PHILIPS1:
- pms_i2c_write(dev, 0x42, 0x12, sat);
- break;
- }
-}
-
-
-static void pms_contrast(struct pms *dev, short contrast)
-{
- switch (dev->decoder) {
- case MOTOROLA:
- pms_i2c_write(dev, 0x8a, 0x00, contrast);
- break;
- case PHILIPS1:
- pms_i2c_write(dev, 0x42, 0x13, contrast);
- break;
- }
-}
-
-static void pms_brightness(struct pms *dev, short brightness)
-{
- switch (dev->decoder) {
- case MOTOROLA:
- pms_i2c_write(dev, 0x8a, 0x00, brightness);
- pms_i2c_write(dev, 0x8a, 0x00, brightness);
- pms_i2c_write(dev, 0x8a, 0x00, brightness);
- break;
- case PHILIPS1:
- pms_i2c_write(dev, 0x42, 0x19, brightness);
- break;
- }
-}
-
-
-static void pms_format(struct pms *dev, short format)
-{
- int target;
-
- dev->standard = format;
-
- if (dev->decoder == PHILIPS1)
- target = 0x42;
- else if (dev->decoder == PHILIPS2)
- target = 0x8a;
- else
- return;
-
- switch (format) {
- case 0: /* Auto */
- pms_i2c_andor(dev, target, 0x0d, 0xfe, 0x00);
- pms_i2c_andor(dev, target, 0x0f, 0x3f, 0x80);
- break;
- case 1: /* NTSC */
- pms_i2c_andor(dev, target, 0x0d, 0xfe, 0x00);
- pms_i2c_andor(dev, target, 0x0f, 0x3f, 0x40);
- break;
- case 2: /* PAL */
- pms_i2c_andor(dev, target, 0x0d, 0xfe, 0x00);
- pms_i2c_andor(dev, target, 0x0f, 0x3f, 0x00);
- break;
- case 3: /* SECAM */
- pms_i2c_andor(dev, target, 0x0d, 0xfe, 0x01);
- pms_i2c_andor(dev, target, 0x0f, 0x3f, 0x00);
- break;
- }
-}
-
-#ifdef FOR_FUTURE_EXPANSION
-
-/*
- * These features of the PMS card are not currently exposes. They
- * could become a private v4l ioctl for PMSCONFIG or somesuch if
- * people need it. We also don't yet use the PMS interrupt.
- */
-
-static void pms_hstart(struct pms *dev, short start)
-{
- switch (dev->decoder) {
- case PHILIPS1:
- pms_i2c_write(dev, 0x8a, 0x05, start);
- pms_i2c_write(dev, 0x8a, 0x18, start);
- break;
- case PHILIPS2:
- pms_i2c_write(dev, 0x42, 0x05, start);
- pms_i2c_write(dev, 0x42, 0x18, start);
- break;
- }
-}
-
-/*
- * Bandpass filters
- */
-
-static void pms_bandpass(struct pms *dev, short pass)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x06, 0xcf, (pass & 0x03) << 4);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x06, 0xcf, (pass & 0x03) << 4);
-}
-
-static void pms_antisnow(struct pms *dev, short snow)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x06, 0xf3, (snow & 0x03) << 2);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x06, 0xf3, (snow & 0x03) << 2);
-}
-
-static void pms_sharpness(struct pms *dev, short sharp)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x06, 0xfc, sharp & 0x03);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x06, 0xfc, sharp & 0x03);
-}
-
-static void pms_chromaagc(struct pms *dev, short agc)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x0c, 0x9f, (agc & 0x03) << 5);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x0c, 0x9f, (agc & 0x03) << 5);
-}
-
-static void pms_vertnoise(struct pms *dev, short noise)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x10, 0xfc, noise & 3);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x10, 0xfc, noise & 3);
-}
-
-static void pms_forcecolour(struct pms *dev, short colour)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x0c, 0x7f, (colour & 1) << 7);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x0c, 0x7, (colour & 1) << 7);
-}
-
-static void pms_antigamma(struct pms *dev, short gamma)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0xb8, 0x00, 0x7f, (gamma & 1) << 7);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x20, 0x7, (gamma & 1) << 7);
-}
-
-static void pms_prefilter(struct pms *dev, short filter)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x06, 0xbf, (filter & 1) << 6);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x06, 0xbf, (filter & 1) << 6);
-}
-
-static void pms_hfilter(struct pms *dev, short filter)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0xb8, 0x04, 0x1f, (filter & 7) << 5);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x24, 0x1f, (filter & 7) << 5);
-}
-
-static void pms_vfilter(struct pms *dev, short filter)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0xb8, 0x08, 0x9f, (filter & 3) << 5);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x28, 0x9f, (filter & 3) << 5);
-}
-
-static void pms_killcolour(struct pms *dev, short colour)
-{
- if (dev->decoder == PHILIPS2) {
- pms_i2c_andor(dev, 0x8a, 0x08, 0x07, (colour & 0x1f) << 3);
- pms_i2c_andor(dev, 0x8a, 0x09, 0x07, (colour & 0x1f) << 3);
- } else if (dev->decoder == PHILIPS1) {
- pms_i2c_andor(dev, 0x42, 0x08, 0x07, (colour & 0x1f) << 3);
- pms_i2c_andor(dev, 0x42, 0x09, 0x07, (colour & 0x1f) << 3);
- }
-}
-
-static void pms_chromagain(struct pms *dev, short chroma)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_write(dev, 0x8a, 0x11, chroma);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_write(dev, 0x42, 0x11, chroma);
-}
-
-
-static void pms_spacialcompl(struct pms *dev, short data)
-{
- mvv_write(dev, 0x3b, data);
-}
-
-static void pms_spacialcomph(struct pms *dev, short data)
-{
- mvv_write(dev, 0x3a, data);
-}
-
-static void pms_vstart(struct pms *dev, short start)
-{
- mvv_write(dev, 0x16, start);
- mvv_write(dev, 0x17, (start >> 8) & 0x01);
-}
-
-#endif
-
-static void pms_secamcross(struct pms *dev, short cross)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x0f, 0xdf, (cross & 1) << 5);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x0f, 0xdf, (cross & 1) << 5);
-}
-
-
-static void pms_swsense(struct pms *dev, short sense)
-{
- if (dev->decoder == PHILIPS2) {
- pms_i2c_write(dev, 0x8a, 0x0a, sense);
- pms_i2c_write(dev, 0x8a, 0x0b, sense);
- } else if (dev->decoder == PHILIPS1) {
- pms_i2c_write(dev, 0x42, 0x0a, sense);
- pms_i2c_write(dev, 0x42, 0x0b, sense);
- }
-}
-
-
-static void pms_framerate(struct pms *dev, short frr)
-{
- int fps = (dev->std & V4L2_STD_525_60) ? 30 : 25;
-
- if (frr == 0)
- return;
- fps = fps/frr;
- mvv_write(dev, 0x14, 0x80 | fps);
- mvv_write(dev, 0x15, 1);
-}
-
-static void pms_vert(struct pms *dev, u8 deciden, u8 decinum)
-{
- mvv_write(dev, 0x1c, deciden); /* Denominator */
- mvv_write(dev, 0x1d, decinum); /* Numerator */
-}
-
-/*
- * Turn 16bit ratios into best small ratio the chipset can grok
- */
-
-static void pms_vertdeci(struct pms *dev, unsigned short decinum, unsigned short deciden)
-{
- /* Knock it down by / 5 once */
- if (decinum % 5 == 0) {
- deciden /= 5;
- decinum /= 5;
- }
- /*
- * 3's
- */
- while (decinum % 3 == 0 && deciden % 3 == 0) {
- deciden /= 3;
- decinum /= 3;
- }
- /*
- * 2's
- */
- while (decinum % 2 == 0 && deciden % 2 == 0) {
- decinum /= 2;
- deciden /= 2;
- }
- /*
- * Fudgyify
- */
- while (deciden > 32) {
- deciden /= 2;
- decinum = (decinum + 1) / 2;
- }
- if (deciden == 32)
- deciden--;
- pms_vert(dev, deciden, decinum);
-}
-
-static void pms_horzdeci(struct pms *dev, short decinum, short deciden)
-{
- if (decinum <= 512) {
- if (decinum % 5 == 0) {
- decinum /= 5;
- deciden /= 5;
- }
- } else {
- decinum = 512;
- deciden = 640; /* 768 would be ideal */
- }
-
- while (((decinum | deciden) & 1) == 0) {
- decinum >>= 1;
- deciden >>= 1;
- }
- while (deciden > 32) {
- deciden >>= 1;
- decinum = (decinum + 1) >> 1;
- }
- if (deciden == 32)
- deciden--;
-
- mvv_write(dev, 0x24, 0x80 | deciden);
- mvv_write(dev, 0x25, decinum);
-}
-
-static void pms_resolution(struct pms *dev, short width, short height)
-{
- int fg_height;
-
- fg_height = height;
- if (fg_height > 280)
- fg_height = 280;
-
- mvv_write(dev, 0x18, fg_height);
- mvv_write(dev, 0x19, fg_height >> 8);
-
- if (dev->std & V4L2_STD_525_60) {
- mvv_write(dev, 0x1a, 0xfc);
- mvv_write(dev, 0x1b, 0x00);
- if (height > fg_height)
- pms_vertdeci(dev, 240, 240);
- else
- pms_vertdeci(dev, fg_height, 240);
- } else {
- mvv_write(dev, 0x1a, 0x1a);
- mvv_write(dev, 0x1b, 0x01);
- if (fg_height > 256)
- pms_vertdeci(dev, 270, 270);
- else
- pms_vertdeci(dev, fg_height, 270);
- }
- mvv_write(dev, 0x12, 0);
- mvv_write(dev, 0x13, MVVMEMORYWIDTH);
- mvv_write(dev, 0x42, 0x00);
- mvv_write(dev, 0x43, 0x00);
- mvv_write(dev, 0x44, MVVMEMORYWIDTH);
-
- mvv_write(dev, 0x22, width + 8);
- mvv_write(dev, 0x23, (width + 8) >> 8);
-
- if (dev->std & V4L2_STD_525_60)
- pms_horzdeci(dev, width, 640);
- else
- pms_horzdeci(dev, width + 8, 768);
-
- mvv_write(dev, 0x30, mvv_read(dev, 0x30) & 0xfe);
- mvv_write(dev, 0x08, mvv_read(dev, 0x08) | 0x01);
- mvv_write(dev, 0x01, mvv_read(dev, 0x01) & 0xfd);
- mvv_write(dev, 0x32, 0x00);
- mvv_write(dev, 0x33, MVVMEMORYWIDTH);
-}
-
-
-/*
- * Set Input
- */
-
-static void pms_vcrinput(struct pms *dev, short input)
-{
- if (dev->decoder == PHILIPS2)
- pms_i2c_andor(dev, 0x8a, 0x0d, 0x7f, (input & 1) << 7);
- else if (dev->decoder == PHILIPS1)
- pms_i2c_andor(dev, 0x42, 0x0d, 0x7f, (input & 1) << 7);
-}
-
-
-static int pms_capture(struct pms *dev, char __user *buf, int rgb555, int count)
-{
- int y;
- int dw = 2 * dev->width;
- char *tmp; /* using a temp buffer is faster than direct */
- int cnt = 0;
- int len = 0;
- unsigned char r8 = 0x5; /* value for reg8 */
-
- tmp = kmalloc(dw + 32, GFP_KERNEL);
- if (!tmp)
- return 0;
-
- if (rgb555)
- r8 |= 0x20; /* else use untranslated rgb = 565 */
- mvv_write(dev, 0x08, r8); /* capture rgb555/565, init DRAM, PC enable */
-
-/* printf("%d %d %d %d %d %x %x\n",width,height,voff,nom,den,mvv_buf); */
-
- for (y = 0; y < dev->height; y++) {
- writeb(0, dev->mem); /* synchronisiert neue Zeile */
-
- /*
- * This is in truth a fifo, be very careful as if you
- * forgot this odd things will occur 8)
- */
-
- memcpy_fromio(tmp, dev->mem, dw + 32); /* discard 16 word */
- cnt -= dev->height;
- while (cnt <= 0) {
- /*
- * Don't copy too far
- */
- int dt = dw;
- if (dt + len > count)
- dt = count - len;
- cnt += dev->height;
- if (copy_to_user(buf, tmp + 32, dt))
- return len ? len : -EFAULT;
- buf += dt;
- len += dt;
- }
- }
- kfree(tmp);
- return len;
-}
-
-
-/*
- * Video4linux interfacing
- */
-
-static int pms_querycap(struct file *file, void *priv,
- struct v4l2_capability *vcap)
-{
- struct pms *dev = video_drvdata(file);
-
- strlcpy(vcap->driver, dev->v4l2_dev.name, sizeof(vcap->driver));
- strlcpy(vcap->card, "Mediavision PMS", sizeof(vcap->card));
- snprintf(vcap->bus_info, sizeof(vcap->bus_info),
- "ISA:%s", dev->v4l2_dev.name);
- vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
- vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
- return 0;
-}
-
-static int pms_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
-{
- static const char *inputs[4] = {
- "Composite",
- "S-Video",
- "Composite (VCR)",
- "S-Video (VCR)"
- };
-
- if (vin->index > 3)
- return -EINVAL;
- strlcpy(vin->name, inputs[vin->index], sizeof(vin->name));
- vin->type = V4L2_INPUT_TYPE_CAMERA;
- vin->audioset = 0;
- vin->tuner = 0;
- vin->std = V4L2_STD_ALL;
- vin->status = 0;
- return 0;
-}
-
-static int pms_g_input(struct file *file, void *fh, unsigned int *inp)
-{
- struct pms *dev = video_drvdata(file);
-
- *inp = dev->input;
- return 0;
-}
-
-static int pms_s_input(struct file *file, void *fh, unsigned int inp)
-{
- struct pms *dev = video_drvdata(file);
-
- if (inp > 3)
- return -EINVAL;
-
- dev->input = inp;
- pms_videosource(dev, inp & 1);
- pms_vcrinput(dev, inp >> 1);
- return 0;
-}
-
-static int pms_g_std(struct file *file, void *fh, v4l2_std_id *std)
-{
- struct pms *dev = video_drvdata(file);
-
- *std = dev->std;
- return 0;
-}
-
-static int pms_s_std(struct file *file, void *fh, v4l2_std_id std)
-{
- struct pms *dev = video_drvdata(file);
- int ret = 0;
-
- dev->std = std;
- if (dev->std & V4L2_STD_NTSC) {
- pms_framerate(dev, 30);
- pms_secamcross(dev, 0);
- pms_format(dev, 1);
- } else if (dev->std & V4L2_STD_PAL) {
- pms_framerate(dev, 25);
- pms_secamcross(dev, 0);
- pms_format(dev, 2);
- } else if (dev->std & V4L2_STD_SECAM) {
- pms_framerate(dev, 25);
- pms_secamcross(dev, 1);
- pms_format(dev, 2);
- } else {
- ret = -EINVAL;
- }
- /*
- switch (v->mode) {
- case VIDEO_MODE_AUTO:
- pms_framerate(dev, 25);
- pms_secamcross(dev, 0);
- pms_format(dev, 0);
- break;
- }*/
- return ret;
-}
-
-static int pms_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct pms *dev = container_of(ctrl->handler, struct pms, hdl);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- pms_brightness(dev, ctrl->val);
- break;
- case V4L2_CID_CONTRAST:
- pms_contrast(dev, ctrl->val);
- break;
- case V4L2_CID_SATURATION:
- pms_saturation(dev, ctrl->val);
- break;
- case V4L2_CID_HUE:
- pms_hue(dev, ctrl->val);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int pms_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct pms *dev = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
-
- pix->width = dev->width;
- pix->height = dev->height;
- pix->pixelformat = dev->width == 15 ?
- V4L2_PIX_FMT_RGB555 : V4L2_PIX_FMT_RGB565;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = 2 * dev->width;
- pix->sizeimage = 2 * dev->width * dev->height;
- /* Just a guess */
- pix->colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
-}
-
-static int pms_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
-
- if (pix->height < 16 || pix->height > 480)
- return -EINVAL;
- if (pix->width < 16 || pix->width > 640)
- return -EINVAL;
- if (pix->pixelformat != V4L2_PIX_FMT_RGB555 &&
- pix->pixelformat != V4L2_PIX_FMT_RGB565)
- return -EINVAL;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = 2 * pix->width;
- pix->sizeimage = 2 * pix->width * pix->height;
- /* Just a guess */
- pix->colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
-}
-
-static int pms_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct pms *dev = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
- int ret = pms_try_fmt_vid_cap(file, fh, fmt);
-
- if (ret)
- return ret;
- dev->width = pix->width;
- dev->height = pix->height;
- dev->depth = (pix->pixelformat == V4L2_PIX_FMT_RGB555) ? 15 : 16;
- pms_resolution(dev, dev->width, dev->height);
- /* Ok we figured out what to use from our wide choice */
- return 0;
-}
-
-static int pms_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
-{
- static struct v4l2_fmtdesc formats[] = {
- { 0, 0, 0,
- "RGB 5:5:5", V4L2_PIX_FMT_RGB555,
- { 0, 0, 0, 0 }
- },
- { 1, 0, 0,
- "RGB 5:6:5", V4L2_PIX_FMT_RGB565,
- { 0, 0, 0, 0 }
- },
- };
- enum v4l2_buf_type type = fmt->type;
-
- if (fmt->index > 1)
- return -EINVAL;
-
- *fmt = formats[fmt->index];
- fmt->type = type;
- return 0;
-}
-
-static ssize_t pms_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct pms *dev = video_drvdata(file);
- int len;
-
- len = pms_capture(dev, buf, (dev->depth == 15), count);
- return len;
-}
-
-static unsigned int pms_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct v4l2_fh *fh = file->private_data;
- unsigned int res = POLLIN | POLLRDNORM;
-
- if (v4l2_event_pending(fh))
- res |= POLLPRI;
- poll_wait(file, &fh->wait, wait);
- return res;
-}
-
-static const struct v4l2_file_operations pms_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = v4l2_fh_release,
- .poll = pms_poll,
- .unlocked_ioctl = video_ioctl2,
- .read = pms_read,
-};
-
-static const struct v4l2_ioctl_ops pms_ioctl_ops = {
- .vidioc_querycap = pms_querycap,
- .vidioc_g_input = pms_g_input,
- .vidioc_s_input = pms_s_input,
- .vidioc_enum_input = pms_enum_input,
- .vidioc_g_std = pms_g_std,
- .vidioc_s_std = pms_s_std,
- .vidioc_enum_fmt_vid_cap = pms_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = pms_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = pms_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = pms_try_fmt_vid_cap,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-/*
- * Probe for and initialise the Mediavision PMS
- */
-
-static int init_mediavision(struct pms *dev)
-{
- int idec, decst;
- int i;
- static const unsigned char i2c_defs[] = {
- 0x4c, 0x30, 0x00, 0xe8,
- 0xb6, 0xe2, 0x00, 0x00,
- 0xff, 0xff, 0x00, 0x00,
- 0x00, 0x00, 0x78, 0x98,
- 0x00, 0x00, 0x00, 0x00,
- 0x34, 0x0a, 0xf4, 0xce,
- 0xe4
- };
-
- dev->mem = ioremap(mem_base, 0x800);
- if (!dev->mem)
- return -ENOMEM;
-
- if (!request_region(0x9a01, 1, "Mediavision PMS config")) {
- printk(KERN_WARNING "mediavision: unable to detect: 0x9a01 in use.\n");
- iounmap(dev->mem);
- return -EBUSY;
- }
- if (!request_region(dev->io, 3, "Mediavision PMS")) {
- printk(KERN_WARNING "mediavision: I/O port %d in use.\n", dev->io);
- release_region(0x9a01, 1);
- iounmap(dev->mem);
- return -EBUSY;
- }
- outb(0xb8, 0x9a01); /* Unlock */
- outb(dev->io >> 4, 0x9a01); /* Set IO port */
-
-
- decst = pms_i2c_stat(dev, 0x43);
-
- if (decst != -1)
- idec = 2;
- else if (pms_i2c_stat(dev, 0xb9) != -1)
- idec = 3;
- else if (pms_i2c_stat(dev, 0x8b) != -1)
- idec = 1;
- else
- idec = 0;
-
- printk(KERN_INFO "PMS type is %d\n", idec);
- if (idec == 0) {
- release_region(dev->io, 3);
- release_region(0x9a01, 1);
- iounmap(dev->mem);
- return -ENODEV;
- }
-
- /*
- * Ok we have a PMS of some sort
- */
-
- mvv_write(dev, 0x04, mem_base >> 12); /* Set the memory area */
-
- /* Ok now load the defaults */
-
- for (i = 0; i < 0x19; i++) {
- if (i2c_defs[i] == 0xff)
- pms_i2c_andor(dev, 0x8a, i, 0x07, 0x00);
- else
- pms_i2c_write(dev, 0x8a, i, i2c_defs[i]);
- }
-
- pms_i2c_write(dev, 0xb8, 0x00, 0x12);
- pms_i2c_write(dev, 0xb8, 0x04, 0x00);
- pms_i2c_write(dev, 0xb8, 0x07, 0x00);
- pms_i2c_write(dev, 0xb8, 0x08, 0x00);
- pms_i2c_write(dev, 0xb8, 0x09, 0xff);
- pms_i2c_write(dev, 0xb8, 0x0a, 0x00);
- pms_i2c_write(dev, 0xb8, 0x0b, 0x10);
- pms_i2c_write(dev, 0xb8, 0x10, 0x03);
-
- mvv_write(dev, 0x01, 0x00);
- mvv_write(dev, 0x05, 0xa0);
- mvv_write(dev, 0x08, 0x25);
- mvv_write(dev, 0x09, 0x00);
- mvv_write(dev, 0x0a, 0x20 | MVVMEMORYWIDTH);
-
- mvv_write(dev, 0x10, 0x02);
- mvv_write(dev, 0x1e, 0x0c);
- mvv_write(dev, 0x1f, 0x03);
- mvv_write(dev, 0x26, 0x06);
-
- mvv_write(dev, 0x2b, 0x00);
- mvv_write(dev, 0x2c, 0x20);
- mvv_write(dev, 0x2d, 0x00);
- mvv_write(dev, 0x2f, 0x70);
- mvv_write(dev, 0x32, 0x00);
- mvv_write(dev, 0x33, MVVMEMORYWIDTH);
- mvv_write(dev, 0x34, 0x00);
- mvv_write(dev, 0x35, 0x00);
- mvv_write(dev, 0x3a, 0x80);
- mvv_write(dev, 0x3b, 0x10);
- mvv_write(dev, 0x20, 0x00);
- mvv_write(dev, 0x21, 0x00);
- mvv_write(dev, 0x30, 0x22);
- return 0;
-}
-
-/*
- * Initialization and module stuff
- */
-
-#ifndef MODULE
-static int enable;
-module_param(enable, int, 0);
-#endif
-
-static const struct v4l2_ctrl_ops pms_ctrl_ops = {
- .s_ctrl = pms_s_ctrl,
-};
-
-static int pms_probe(struct device *pdev, unsigned int card)
-{
- struct pms *dev;
- struct v4l2_device *v4l2_dev;
- struct v4l2_ctrl_handler *hdl;
- int res;
-
-#ifndef MODULE
- if (!enable) {
- pr_err("PMS: not enabled, use pms.enable=1 to probe\n");
- return -ENODEV;
- }
-#endif
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL)
- return -ENOMEM;
-
- dev->decoder = PHILIPS2;
- dev->io = io_port;
- dev->data = io_port + 1;
- v4l2_dev = &dev->v4l2_dev;
- hdl = &dev->hdl;
-
- res = v4l2_device_register(pdev, v4l2_dev);
- if (res < 0) {
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- goto free_dev;
- }
- v4l2_info(v4l2_dev, "Mediavision Pro Movie Studio driver 0.05\n");
-
- res = init_mediavision(dev);
- if (res) {
- v4l2_err(v4l2_dev, "Board not found.\n");
- goto free_io;
- }
-
- v4l2_ctrl_handler_init(hdl, 4);
- v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 255, 1, 139);
- v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
- V4L2_CID_CONTRAST, 0, 255, 1, 70);
- v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
- V4L2_CID_SATURATION, 0, 255, 1, 64);
- v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
- V4L2_CID_HUE, 0, 255, 1, 0);
- if (hdl->error) {
- res = hdl->error;
- goto free_hdl;
- }
-
- mutex_init(&dev->lock);
- strlcpy(dev->vdev.name, v4l2_dev->name, sizeof(dev->vdev.name));
- dev->vdev.v4l2_dev = v4l2_dev;
- dev->vdev.ctrl_handler = hdl;
- dev->vdev.fops = &pms_fops;
- dev->vdev.ioctl_ops = &pms_ioctl_ops;
- dev->vdev.release = video_device_release_empty;
- dev->vdev.lock = &dev->lock;
- dev->vdev.tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
- video_set_drvdata(&dev->vdev, dev);
- dev->std = V4L2_STD_NTSC_M;
- dev->height = 240;
- dev->width = 320;
- dev->depth = 16;
- pms_swsense(dev, 75);
- pms_resolution(dev, 320, 240);
- pms_videosource(dev, 0);
- pms_vcrinput(dev, 0);
- v4l2_ctrl_handler_setup(hdl);
- res = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr);
- if (res >= 0)
- return 0;
-
-free_hdl:
- v4l2_ctrl_handler_free(hdl);
- v4l2_device_unregister(&dev->v4l2_dev);
-free_io:
- release_region(dev->io, 3);
- release_region(0x9a01, 1);
- iounmap(dev->mem);
-free_dev:
- kfree(dev);
- return res;
-}
-
-static int pms_remove(struct device *pdev, unsigned int card)
-{
- struct pms *dev = dev_get_drvdata(pdev);
-
- video_unregister_device(&dev->vdev);
- v4l2_ctrl_handler_free(&dev->hdl);
- release_region(dev->io, 3);
- release_region(0x9a01, 1);
- iounmap(dev->mem);
- return 0;
-}
-
-static struct isa_driver pms_driver = {
- .probe = pms_probe,
- .remove = pms_remove,
- .driver = {
- .name = "pms",
- },
-};
-
-static int __init pms_init(void)
-{
- return isa_register_driver(&pms_driver, 1);
-}
-
-static void __exit pms_exit(void)
-{
- isa_unregister_driver(&pms_driver);
-}
-
-module_init(pms_init);
-module_exit(pms_exit);
diff --git a/drivers/staging/media/parport/w9966.c b/drivers/staging/media/parport/w9966.c
deleted file mode 100644
index f7502f3a6a3c..000000000000
--- a/drivers/staging/media/parport/w9966.c
+++ /dev/null
@@ -1,980 +0,0 @@
-/*
- Winbond w9966cf Webcam parport driver.
-
- Version 0.33
-
- Copyright (C) 2001 Jakob Kemi <jakob.kemi@post.utfors.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-/*
- Supported devices:
- *Lifeview FlyCam Supra (using the Philips saa7111a chip)
-
- Does any other model using the w9966 interface chip exist ?
-
- Todo:
-
- *Add a working EPP mode, since DMA ECP read isn't implemented
- in the parport drivers. (That's why it's so sloow)
-
- *Add support for other ccd-control chips than the saa7111
- please send me feedback on what kind of chips you have.
-
- *Add proper probing. I don't know what's wrong with the IEEE1284
- parport drivers but (IEEE1284_MODE_NIBBLE|IEEE1284_DEVICE_ID)
- and nibble read seems to be broken for some peripherals.
-
- *Add probing for onboard SRAM, port directions etc. (if possible)
-
- *Add support for the hardware compressed modes (maybe using v4l2)
-
- *Fix better support for the capture window (no skewed images, v4l
- interface to capt. window)
-
- *Probably some bugs that I don't know of
-
- Please support me by sending feedback!
-
- Changes:
-
- Alan Cox: Removed RGB mode for kernel merge, added THIS_MODULE
- and owner support for newer module locks
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/videodev2.h>
-#include <linux/slab.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-event.h>
-#include <linux/parport.h>
-
-/*#define DEBUG*/ /* Undef me for production */
-
-#ifdef DEBUG
-#define DPRINTF(x, a...) printk(KERN_DEBUG "W9966: %s(): "x, __func__ , ##a)
-#else
-#define DPRINTF(x...)
-#endif
-
-/*
- * Defines, simple typedefs etc.
- */
-
-#define W9966_DRIVERNAME "W9966CF Webcam"
-#define W9966_MAXCAMS 4 /* Maximum number of cameras */
-#define W9966_RBUFFER 2048 /* Read buffer (must be an even number) */
-#define W9966_SRAMSIZE 131072 /* 128kb */
-#define W9966_SRAMID 0x02 /* check w9966cf.pdf */
-
-/* Empirically determined window limits */
-#define W9966_WND_MIN_X 16
-#define W9966_WND_MIN_Y 14
-#define W9966_WND_MAX_X 705
-#define W9966_WND_MAX_Y 253
-#define W9966_WND_MAX_W (W9966_WND_MAX_X - W9966_WND_MIN_X)
-#define W9966_WND_MAX_H (W9966_WND_MAX_Y - W9966_WND_MIN_Y)
-
-/* Keep track of our current state */
-#define W9966_STATE_PDEV 0x01
-#define W9966_STATE_CLAIMED 0x02
-#define W9966_STATE_VDEV 0x04
-
-#define W9966_I2C_W_ID 0x48
-#define W9966_I2C_R_ID 0x49
-#define W9966_I2C_R_DATA 0x08
-#define W9966_I2C_R_CLOCK 0x04
-#define W9966_I2C_W_DATA 0x02
-#define W9966_I2C_W_CLOCK 0x01
-
-struct w9966 {
- struct v4l2_device v4l2_dev;
- struct v4l2_ctrl_handler hdl;
- unsigned char dev_state;
- unsigned char i2c_state;
- unsigned short ppmode;
- struct parport *pport;
- struct pardevice *pdev;
- struct video_device vdev;
- unsigned short width;
- unsigned short height;
- unsigned char brightness;
- signed char contrast;
- signed char color;
- signed char hue;
- struct mutex lock;
-};
-
-/*
- * Module specific properties
- */
-
-MODULE_AUTHOR("Jakob Kemi <jakob.kemi@post.utfors.se>");
-MODULE_DESCRIPTION("Winbond w9966cf WebCam driver (0.32)");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.33.1");
-
-#ifdef MODULE
-static char *pardev[] = {[0 ... W9966_MAXCAMS] = ""};
-#else
-static char *pardev[] = {[0 ... W9966_MAXCAMS] = "aggressive"};
-#endif
-module_param_array(pardev, charp, NULL, 0);
-MODULE_PARM_DESC(pardev, "pardev: where to search for\n"
- "\teach camera. 'aggressive' means brute-force search.\n"
- "\tEg: >pardev=parport3,aggressive,parport2,parport1< would assign\n"
- "\tcam 1 to parport3 and search every parport for cam 2 etc...");
-
-static int parmode;
-module_param(parmode, int, 0);
-MODULE_PARM_DESC(parmode, "parmode: transfer mode (0=auto, 1=ecp, 2=epp");
-
-static int video_nr = -1;
-module_param(video_nr, int, 0);
-
-static struct w9966 w9966_cams[W9966_MAXCAMS];
-
-/*
- * Private function defines
- */
-
-
-/* Set camera phase flags, so we know what to uninit when terminating */
-static inline void w9966_set_state(struct w9966 *cam, int mask, int val)
-{
- cam->dev_state = (cam->dev_state & ~mask) ^ val;
-}
-
-/* Get camera phase flags */
-static inline int w9966_get_state(struct w9966 *cam, int mask, int val)
-{
- return ((cam->dev_state & mask) == val);
-}
-
-/* Claim parport for ourself */
-static void w9966_pdev_claim(struct w9966 *cam)
-{
- if (w9966_get_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED))
- return;
- parport_claim_or_block(cam->pdev);
- w9966_set_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED);
-}
-
-/* Release parport for others to use */
-static void w9966_pdev_release(struct w9966 *cam)
-{
- if (w9966_get_state(cam, W9966_STATE_CLAIMED, 0))
- return;
- parport_release(cam->pdev);
- w9966_set_state(cam, W9966_STATE_CLAIMED, 0);
-}
-
-/* Read register from W9966 interface-chip
- Expects a claimed pdev
- -1 on error, else register data (byte) */
-static int w9966_read_reg(struct w9966 *cam, int reg)
-{
- /* ECP, read, regtransfer, REG, REG, REG, REG, REG */
- const unsigned char addr = 0x80 | (reg & 0x1f);
- unsigned char val;
-
- if (parport_negotiate(cam->pport, cam->ppmode | IEEE1284_ADDR) != 0)
- return -1;
- if (parport_write(cam->pport, &addr, 1) != 1)
- return -1;
- if (parport_negotiate(cam->pport, cam->ppmode | IEEE1284_DATA) != 0)
- return -1;
- if (parport_read(cam->pport, &val, 1) != 1)
- return -1;
-
- return val;
-}
-
-/* Write register to W9966 interface-chip
- Expects a claimed pdev
- -1 on error */
-static int w9966_write_reg(struct w9966 *cam, int reg, int data)
-{
- /* ECP, write, regtransfer, REG, REG, REG, REG, REG */
- const unsigned char addr = 0xc0 | (reg & 0x1f);
- const unsigned char val = data;
-
- if (parport_negotiate(cam->pport, cam->ppmode | IEEE1284_ADDR) != 0)
- return -1;
- if (parport_write(cam->pport, &addr, 1) != 1)
- return -1;
- if (parport_negotiate(cam->pport, cam->ppmode | IEEE1284_DATA) != 0)
- return -1;
- if (parport_write(cam->pport, &val, 1) != 1)
- return -1;
-
- return 0;
-}
-
-/*
- * Ugly and primitive i2c protocol functions
- */
-
-/* Sets the data line on the i2c bus.
- Expects a claimed pdev. */
-static void w9966_i2c_setsda(struct w9966 *cam, int state)
-{
- if (state)
- cam->i2c_state |= W9966_I2C_W_DATA;
- else
- cam->i2c_state &= ~W9966_I2C_W_DATA;
-
- w9966_write_reg(cam, 0x18, cam->i2c_state);
- udelay(5);
-}
-
-/* Get peripheral clock line
- Expects a claimed pdev. */
-static int w9966_i2c_getscl(struct w9966 *cam)
-{
- const unsigned char state = w9966_read_reg(cam, 0x18);
- return ((state & W9966_I2C_R_CLOCK) > 0);
-}
-
-/* Sets the clock line on the i2c bus.
- Expects a claimed pdev. -1 on error */
-static int w9966_i2c_setscl(struct w9966 *cam, int state)
-{
- unsigned long timeout;
-
- if (state)
- cam->i2c_state |= W9966_I2C_W_CLOCK;
- else
- cam->i2c_state &= ~W9966_I2C_W_CLOCK;
-
- w9966_write_reg(cam, 0x18, cam->i2c_state);
- udelay(5);
-
- /* we go to high, we also expect the peripheral to ack. */
- if (state) {
- timeout = jiffies + 100;
- while (!w9966_i2c_getscl(cam)) {
- if (time_after(jiffies, timeout))
- return -1;
- }
- }
- return 0;
-}
-
-#if 0
-/* Get peripheral data line
- Expects a claimed pdev. */
-static int w9966_i2c_getsda(struct w9966 *cam)
-{
- const unsigned char state = w9966_read_reg(cam, 0x18);
- return ((state & W9966_I2C_R_DATA) > 0);
-}
-#endif
-
-/* Write a byte with ack to the i2c bus.
- Expects a claimed pdev. -1 on error */
-static int w9966_i2c_wbyte(struct w9966 *cam, int data)
-{
- int i;
-
- for (i = 7; i >= 0; i--) {
- w9966_i2c_setsda(cam, (data >> i) & 0x01);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setscl(cam, 0);
- }
-
- w9966_i2c_setsda(cam, 1);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setscl(cam, 0);
-
- return 0;
-}
-
-/* Read a data byte with ack from the i2c-bus
- Expects a claimed pdev. -1 on error */
-#if 0
-static int w9966_i2c_rbyte(struct w9966 *cam)
-{
- unsigned char data = 0x00;
- int i;
-
- w9966_i2c_setsda(cam, 1);
-
- for (i = 0; i < 8; i++) {
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- data = data << 1;
- if (w9966_i2c_getsda(cam))
- data |= 0x01;
-
- w9966_i2c_setscl(cam, 0);
- }
- return data;
-}
-#endif
-
-/* Read a register from the i2c device.
- Expects claimed pdev. -1 on error */
-#if 0
-static int w9966_read_reg_i2c(struct w9966 *cam, int reg)
-{
- int data;
-
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
- w9966_i2c_wbyte(cam, reg) == -1)
- return -1;
-
- w9966_i2c_setsda(cam, 1);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (w9966_i2c_wbyte(cam, W9966_I2C_R_ID) == -1)
- return -1;
- data = w9966_i2c_rbyte(cam);
- if (data == -1)
- return -1;
-
- w9966_i2c_setsda(cam, 0);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setsda(cam, 1);
-
- return data;
-}
-#endif
-
-/* Write a register to the i2c device.
- Expects claimed pdev. -1 on error */
-static int w9966_write_reg_i2c(struct w9966 *cam, int reg, int data)
-{
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
- w9966_i2c_wbyte(cam, reg) == -1 ||
- w9966_i2c_wbyte(cam, data) == -1)
- return -1;
-
- w9966_i2c_setsda(cam, 0);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
-
- w9966_i2c_setsda(cam, 1);
-
- return 0;
-}
-
-/* Find a good length for capture window (used both for W and H)
- A bit ugly but pretty functional. The capture length
- have to match the downscale */
-static int w9966_findlen(int near, int size, int maxlen)
-{
- int bestlen = size;
- int besterr = abs(near - bestlen);
- int len;
-
- for (len = size + 1; len < maxlen; len++) {
- int err;
- if (((64 * size) % len) != 0)
- continue;
-
- err = abs(near - len);
-
- /* Only continue as long as we keep getting better values */
- if (err > besterr)
- break;
-
- besterr = err;
- bestlen = len;
- }
-
- return bestlen;
-}
-
-/* Modify capture window (if necessary)
- and calculate downscaling
- Return -1 on error */
-static int w9966_calcscale(int size, int min, int max, int *beg, int *end, unsigned char *factor)
-{
- int maxlen = max - min;
- int len = *end - *beg + 1;
- int newlen = w9966_findlen(len, size, maxlen);
- int err = newlen - len;
-
- /* Check for bad format */
- if (newlen > maxlen || newlen < size)
- return -1;
-
- /* Set factor (6 bit fixed) */
- *factor = (64 * size) / newlen;
- if (*factor == 64)
- *factor = 0x00; /* downscale is disabled */
- else
- *factor |= 0x80; /* set downscale-enable bit */
-
- /* Modify old beginning and end */
- *beg -= err / 2;
- *end += err - (err / 2);
-
- /* Move window if outside borders */
- if (*beg < min) {
- *end += min - *beg;
- *beg += min - *beg;
- }
- if (*end > max) {
- *beg -= *end - max;
- *end -= *end - max;
- }
-
- return 0;
-}
-
-/* Setup the cameras capture window etc.
- Expects a claimed pdev
- return -1 on error */
-static int w9966_setup(struct w9966 *cam, int x1, int y1, int x2, int y2, int w, int h)
-{
- unsigned int i;
- unsigned int enh_s, enh_e;
- unsigned char scale_x, scale_y;
- unsigned char regs[0x1c];
- unsigned char saa7111_regs[] = {
- 0x21, 0x00, 0xd8, 0x23, 0x00, 0x80, 0x80, 0x00,
- 0x88, 0x10, 0x80, 0x40, 0x40, 0x00, 0x01, 0x00,
- 0x48, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x71, 0xe7, 0x00, 0x00, 0xc0
- };
-
-
- if (w * h * 2 > W9966_SRAMSIZE) {
- DPRINTF("capture window exceeds SRAM size!.\n");
- w = 200; h = 160; /* Pick default values */
- }
-
- w &= ~0x1;
- if (w < 2)
- w = 2;
- if (h < 1)
- h = 1;
- if (w > W9966_WND_MAX_W)
- w = W9966_WND_MAX_W;
- if (h > W9966_WND_MAX_H)
- h = W9966_WND_MAX_H;
-
- cam->width = w;
- cam->height = h;
-
- enh_s = 0;
- enh_e = w * h * 2;
-
- /* Modify capture window if necessary and calculate downscaling */
- if (w9966_calcscale(w, W9966_WND_MIN_X, W9966_WND_MAX_X, &x1, &x2, &scale_x) != 0 ||
- w9966_calcscale(h, W9966_WND_MIN_Y, W9966_WND_MAX_Y, &y1, &y2, &scale_y) != 0)
- return -1;
-
- DPRINTF("%dx%d, x: %d<->%d, y: %d<->%d, sx: %d/64, sy: %d/64.\n",
- w, h, x1, x2, y1, y2, scale_x & ~0x80, scale_y & ~0x80);
-
- /* Setup registers */
- regs[0x00] = 0x00; /* Set normal operation */
- regs[0x01] = 0x18; /* Capture mode */
- regs[0x02] = scale_y; /* V-scaling */
- regs[0x03] = scale_x; /* H-scaling */
-
- /* Capture window */
- regs[0x04] = (x1 & 0x0ff); /* X-start (8 low bits) */
- regs[0x05] = (x1 & 0x300)>>8; /* X-start (2 high bits) */
- regs[0x06] = (y1 & 0x0ff); /* Y-start (8 low bits) */
- regs[0x07] = (y1 & 0x300)>>8; /* Y-start (2 high bits) */
- regs[0x08] = (x2 & 0x0ff); /* X-end (8 low bits) */
- regs[0x09] = (x2 & 0x300)>>8; /* X-end (2 high bits) */
- regs[0x0a] = (y2 & 0x0ff); /* Y-end (8 low bits) */
-
- regs[0x0c] = W9966_SRAMID; /* SRAM-banks (1x 128kb) */
-
- /* Enhancement layer */
- regs[0x0d] = (enh_s & 0x000ff); /* Enh. start (0-7) */
- regs[0x0e] = (enh_s & 0x0ff00) >> 8; /* Enh. start (8-15) */
- regs[0x0f] = (enh_s & 0x70000) >> 16; /* Enh. start (16-17/18??) */
- regs[0x10] = (enh_e & 0x000ff); /* Enh. end (0-7) */
- regs[0x11] = (enh_e & 0x0ff00) >> 8; /* Enh. end (8-15) */
- regs[0x12] = (enh_e & 0x70000) >> 16; /* Enh. end (16-17/18??) */
-
- /* Misc */
- regs[0x13] = 0x40; /* VEE control (raw 4:2:2) */
- regs[0x17] = 0x00; /* ??? */
- regs[0x18] = cam->i2c_state = 0x00; /* Serial bus */
- regs[0x19] = 0xff; /* I/O port direction control */
- regs[0x1a] = 0xff; /* I/O port data register */
- regs[0x1b] = 0x10; /* ??? */
-
- /* SAA7111 chip settings */
- saa7111_regs[0x0a] = cam->brightness;
- saa7111_regs[0x0b] = cam->contrast;
- saa7111_regs[0x0c] = cam->color;
- saa7111_regs[0x0d] = cam->hue;
-
- /* Reset (ECP-fifo & serial-bus) */
- if (w9966_write_reg(cam, 0x00, 0x03) == -1)
- return -1;
-
- /* Write regs to w9966cf chip */
- for (i = 0; i < 0x1c; i++)
- if (w9966_write_reg(cam, i, regs[i]) == -1)
- return -1;
-
- /* Write regs to saa7111 chip */
- for (i = 0; i < 0x20; i++)
- if (w9966_write_reg_i2c(cam, i, saa7111_regs[i]) == -1)
- return -1;
-
- return 0;
-}
-
-/*
- * Video4linux interfacing
- */
-
-static int cam_querycap(struct file *file, void *priv,
- struct v4l2_capability *vcap)
-{
- struct w9966 *cam = video_drvdata(file);
-
- strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
- strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
- strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
- vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
- return 0;
-}
-
-static int cam_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
-{
- if (vin->index > 0)
- return -EINVAL;
- strlcpy(vin->name, "Camera", sizeof(vin->name));
- vin->type = V4L2_INPUT_TYPE_CAMERA;
- vin->audioset = 0;
- vin->tuner = 0;
- vin->std = 0;
- vin->status = 0;
- return 0;
-}
-
-static int cam_g_input(struct file *file, void *fh, unsigned int *inp)
-{
- *inp = 0;
- return 0;
-}
-
-static int cam_s_input(struct file *file, void *fh, unsigned int inp)
-{
- return (inp > 0) ? -EINVAL : 0;
-}
-
-static int cam_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct w9966 *cam =
- container_of(ctrl->handler, struct w9966, hdl);
- int ret = 0;
-
- mutex_lock(&cam->lock);
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- cam->brightness = ctrl->val;
- break;
- case V4L2_CID_CONTRAST:
- cam->contrast = ctrl->val;
- break;
- case V4L2_CID_SATURATION:
- cam->color = ctrl->val;
- break;
- case V4L2_CID_HUE:
- cam->hue = ctrl->val;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (ret == 0) {
- w9966_pdev_claim(cam);
-
- if (w9966_write_reg_i2c(cam, 0x0a, cam->brightness) == -1 ||
- w9966_write_reg_i2c(cam, 0x0b, cam->contrast) == -1 ||
- w9966_write_reg_i2c(cam, 0x0c, cam->color) == -1 ||
- w9966_write_reg_i2c(cam, 0x0d, cam->hue) == -1) {
- ret = -EIO;
- }
-
- w9966_pdev_release(cam);
- }
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-static int cam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct w9966 *cam = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
-
- pix->width = cam->width;
- pix->height = cam->height;
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = 2 * cam->width;
- pix->sizeimage = 2 * cam->width * cam->height;
- /* Just a guess */
- pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- return 0;
-}
-
-static int cam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
-
- if (pix->width < 2)
- pix->width = 2;
- if (pix->height < 1)
- pix->height = 1;
- if (pix->width > W9966_WND_MAX_W)
- pix->width = W9966_WND_MAX_W;
- if (pix->height > W9966_WND_MAX_H)
- pix->height = W9966_WND_MAX_H;
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = 2 * pix->width;
- pix->sizeimage = 2 * pix->width * pix->height;
- /* Just a guess */
- pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- return 0;
-}
-
-static int cam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
-{
- struct w9966 *cam = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
- int ret = cam_try_fmt_vid_cap(file, fh, fmt);
-
- if (ret)
- return ret;
-
- mutex_lock(&cam->lock);
- /* Update camera regs */
- w9966_pdev_claim(cam);
- ret = w9966_setup(cam, 0, 0, 1023, 1023, pix->width, pix->height);
- w9966_pdev_release(cam);
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-static int cam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
-{
- static struct v4l2_fmtdesc formats[] = {
- { 0, 0, 0,
- "YUV 4:2:2", V4L2_PIX_FMT_YUYV,
- { 0, 0, 0, 0 }
- },
- };
- enum v4l2_buf_type type = fmt->type;
-
- if (fmt->index > 0)
- return -EINVAL;
-
- *fmt = formats[fmt->index];
- fmt->type = type;
- return 0;
-}
-
-/* Capture data */
-static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct w9966 *cam = video_drvdata(file);
- unsigned char addr = 0xa0; /* ECP, read, CCD-transfer, 00000 */
- unsigned char __user *dest = (unsigned char __user *)buf;
- unsigned long dleft = count;
- unsigned char *tbuf;
-
- /* Why would anyone want more than this?? */
- if (count > cam->width * cam->height * 2)
- return -EINVAL;
-
- mutex_lock(&cam->lock);
- w9966_pdev_claim(cam);
- w9966_write_reg(cam, 0x00, 0x02); /* Reset ECP-FIFO buffer */
- w9966_write_reg(cam, 0x00, 0x00); /* Return to normal operation */
- w9966_write_reg(cam, 0x01, 0x98); /* Enable capture */
-
- /* write special capture-addr and negotiate into data transfer */
- if ((parport_negotiate(cam->pport, cam->ppmode|IEEE1284_ADDR) != 0) ||
- (parport_write(cam->pport, &addr, 1) != 1) ||
- (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_DATA) != 0)) {
- w9966_pdev_release(cam);
- mutex_unlock(&cam->lock);
- return -EFAULT;
- }
-
- tbuf = kmalloc(W9966_RBUFFER, GFP_KERNEL);
- if (tbuf == NULL) {
- count = -ENOMEM;
- goto out;
- }
-
- while (dleft > 0) {
- unsigned long tsize = (dleft > W9966_RBUFFER) ? W9966_RBUFFER : dleft;
-
- if (parport_read(cam->pport, tbuf, tsize) < tsize) {
- count = -EFAULT;
- goto out;
- }
- if (copy_to_user(dest, tbuf, tsize) != 0) {
- count = -EFAULT;
- goto out;
- }
- dest += tsize;
- dleft -= tsize;
- }
-
- w9966_write_reg(cam, 0x01, 0x18); /* Disable capture */
-
-out:
- kfree(tbuf);
- w9966_pdev_release(cam);
- mutex_unlock(&cam->lock);
-
- return count;
-}
-
-static const struct v4l2_file_operations w9966_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = v4l2_fh_release,
- .poll = v4l2_ctrl_poll,
- .unlocked_ioctl = video_ioctl2,
- .read = w9966_v4l_read,
-};
-
-static const struct v4l2_ioctl_ops w9966_ioctl_ops = {
- .vidioc_querycap = cam_querycap,
- .vidioc_g_input = cam_g_input,
- .vidioc_s_input = cam_s_input,
- .vidioc_enum_input = cam_enum_input,
- .vidioc_enum_fmt_vid_cap = cam_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = cam_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = cam_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = cam_try_fmt_vid_cap,
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct v4l2_ctrl_ops cam_ctrl_ops = {
- .s_ctrl = cam_s_ctrl,
-};
-
-
-/* Initialize camera device. Setup all internal flags, set a
- default video mode, setup ccd-chip, register v4l device etc..
- Also used for 'probing' of hardware.
- -1 on error */
-static int w9966_init(struct w9966 *cam, struct parport *port)
-{
- struct v4l2_device *v4l2_dev = &cam->v4l2_dev;
-
- if (cam->dev_state != 0)
- return -1;
-
- strlcpy(v4l2_dev->name, "w9966", sizeof(v4l2_dev->name));
-
- if (v4l2_device_register(NULL, v4l2_dev) < 0) {
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- return -1;
- }
-
- v4l2_ctrl_handler_init(&cam->hdl, 4);
- v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
- v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
- V4L2_CID_CONTRAST, -64, 64, 1, 64);
- v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
- V4L2_CID_SATURATION, -64, 64, 1, 64);
- v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
- V4L2_CID_HUE, -128, 127, 1, 0);
- if (cam->hdl.error) {
- v4l2_err(v4l2_dev, "couldn't register controls\n");
- return -1;
- }
- cam->pport = port;
- cam->brightness = 128;
- cam->contrast = 64;
- cam->color = 64;
- cam->hue = 0;
-
- /* Select requested transfer mode */
- switch (parmode) {
- default: /* Auto-detect (priority: hw-ecp, hw-epp, sw-ecp) */
- case 0:
- if (port->modes & PARPORT_MODE_ECP)
- cam->ppmode = IEEE1284_MODE_ECP;
- else if (port->modes & PARPORT_MODE_EPP)
- cam->ppmode = IEEE1284_MODE_EPP;
- else
- cam->ppmode = IEEE1284_MODE_ECP;
- break;
- case 1: /* hw- or sw-ecp */
- cam->ppmode = IEEE1284_MODE_ECP;
- break;
- case 2: /* hw- or sw-epp */
- cam->ppmode = IEEE1284_MODE_EPP;
- break;
- }
-
- /* Tell the parport driver that we exists */
- cam->pdev = parport_register_device(port, "w9966", NULL, NULL, NULL, 0, NULL);
- if (cam->pdev == NULL) {
- DPRINTF("parport_register_device() failed\n");
- return -1;
- }
- w9966_set_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV);
-
- w9966_pdev_claim(cam);
-
- /* Setup a default capture mode */
- if (w9966_setup(cam, 0, 0, 1023, 1023, 200, 160) != 0) {
- DPRINTF("w9966_setup() failed.\n");
- return -1;
- }
-
- w9966_pdev_release(cam);
-
- /* Fill in the video_device struct and register us to v4l */
- strlcpy(cam->vdev.name, W9966_DRIVERNAME, sizeof(cam->vdev.name));
- cam->vdev.v4l2_dev = v4l2_dev;
- cam->vdev.fops = &w9966_fops;
- cam->vdev.ioctl_ops = &w9966_ioctl_ops;
- cam->vdev.release = video_device_release_empty;
- cam->vdev.ctrl_handler = &cam->hdl;
- video_set_drvdata(&cam->vdev, cam);
-
- mutex_init(&cam->lock);
-
- if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
- return -1;
-
- w9966_set_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV);
-
- /* All ok */
- v4l2_info(v4l2_dev, "Found and initialized a webcam on %s.\n",
- cam->pport->name);
- return 0;
-}
-
-
-/* Terminate everything gracefully */
-static void w9966_term(struct w9966 *cam)
-{
- /* Unregister from v4l */
- if (w9966_get_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV)) {
- video_unregister_device(&cam->vdev);
- w9966_set_state(cam, W9966_STATE_VDEV, 0);
- }
-
- v4l2_ctrl_handler_free(&cam->hdl);
-
- /* Terminate from IEEE1284 mode and release pdev block */
- if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
- w9966_pdev_claim(cam);
- parport_negotiate(cam->pport, IEEE1284_MODE_COMPAT);
- w9966_pdev_release(cam);
- }
-
- /* Unregister from parport */
- if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
- parport_unregister_device(cam->pdev);
- w9966_set_state(cam, W9966_STATE_PDEV, 0);
- }
- memset(cam, 0, sizeof(*cam));
-}
-
-
-/* Called once for every parport on init */
-static void w9966_attach(struct parport *port)
-{
- int i;
-
- for (i = 0; i < W9966_MAXCAMS; i++) {
- if (w9966_cams[i].dev_state != 0) /* Cam is already assigned */
- continue;
- if (strcmp(pardev[i], "aggressive") == 0 || strcmp(pardev[i], port->name) == 0) {
- if (w9966_init(&w9966_cams[i], port) != 0)
- w9966_term(&w9966_cams[i]);
- break; /* return */
- }
- }
-}
-
-/* Called once for every parport on termination */
-static void w9966_detach(struct parport *port)
-{
- int i;
-
- for (i = 0; i < W9966_MAXCAMS; i++)
- if (w9966_cams[i].dev_state != 0 && w9966_cams[i].pport == port)
- w9966_term(&w9966_cams[i]);
-}
-
-
-static struct parport_driver w9966_ppd = {
- .name = W9966_DRIVERNAME,
- .attach = w9966_attach,
- .detach = w9966_detach,
-};
-
-/* Module entry point */
-static int __init w9966_mod_init(void)
-{
- int i;
-
- for (i = 0; i < W9966_MAXCAMS; i++)
- w9966_cams[i].dev_state = 0;
-
- return parport_register_driver(&w9966_ppd);
-}
-
-/* Module cleanup */
-static void __exit w9966_mod_term(void)
-{
- parport_unregister_driver(&w9966_ppd);
-}
-
-module_init(w9966_mod_init);
-module_exit(w9966_mod_term);
diff --git a/drivers/staging/media/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig
deleted file mode 100644
index 81784c6f7b88..000000000000
--- a/drivers/staging/media/tlg2300/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
-config VIDEO_TLG2300
- tristate "Telegent TLG2300 USB video capture support (Deprecated)"
- depends on VIDEO_DEV && I2C && SND && DVB_CORE
- select VIDEO_TUNER
- select VIDEO_TVEEPROM
- depends on RC_CORE
- select VIDEOBUF_VMALLOC
- select SND_PCM
- select VIDEOBUF_DVB
-
- ---help---
- This is a video4linux driver for Telegent tlg2300 based TV cards.
- The driver supports V4L2, DVB-T and radio.
-
- This driver is deprecated and will be removed soon. If you have
- hardware for this and you want to work on this driver, then contact
- the linux-media mailinglist.
-
- To compile this driver as a module, choose M here: the
- module will be called poseidon
diff --git a/drivers/staging/media/tlg2300/Makefile b/drivers/staging/media/tlg2300/Makefile
deleted file mode 100644
index 137f8e38cdec..000000000000
--- a/drivers/staging/media/tlg2300/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-poseidon-objs := pd-video.o pd-alsa.o pd-dvb.o pd-radio.o pd-main.o
-
-obj-$(CONFIG_VIDEO_TLG2300) += poseidon.o
-
-ccflags-y += -Idrivers/media/i2c
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-core
-ccflags-y += -Idrivers/media/dvb-frontends
-
diff --git a/drivers/staging/media/tlg2300/pd-alsa.c b/drivers/staging/media/tlg2300/pd-alsa.c
deleted file mode 100644
index dd8fe100590f..000000000000
--- a/drivers/staging/media/tlg2300/pd-alsa.c
+++ /dev/null
@@ -1,337 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/usb.h>
-#include <linux/init.h>
-#include <linux/sound.h>
-#include <linux/spinlock.h>
-#include <linux/soundcard.h>
-#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/info.h>
-#include <sound/initval.h>
-#include <sound/control.h>
-#include <media/v4l2-common.h>
-#include "pd-common.h"
-#include "vendorcmds.h"
-
-static void complete_handler_audio(struct urb *urb);
-#define AUDIO_EP (0x83)
-#define AUDIO_BUF_SIZE (512)
-#define PERIOD_SIZE (1024 * 8)
-#define PERIOD_MIN (4)
-#define PERIOD_MAX PERIOD_MIN
-
-static struct snd_pcm_hardware snd_pd_hw_capture = {
- .info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_MMAP_VALID,
-
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_48000,
-
- .rate_min = 48000,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = PERIOD_SIZE * PERIOD_MIN,
- .period_bytes_min = PERIOD_SIZE,
- .period_bytes_max = PERIOD_SIZE,
- .periods_min = PERIOD_MIN,
- .periods_max = PERIOD_MAX,
- /*
- .buffer_bytes_max = 62720 * 8,
- .period_bytes_min = 64,
- .period_bytes_max = 12544,
- .periods_min = 2,
- .periods_max = 98
- */
-};
-
-static int snd_pd_capture_open(struct snd_pcm_substream *substream)
-{
- struct poseidon *p = snd_pcm_substream_chip(substream);
- struct poseidon_audio *pa = &p->audio;
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- if (!p)
- return -ENODEV;
- pa->users++;
- pa->card_close = 0;
- pa->capture_pcm_substream = substream;
- runtime->private_data = p;
-
- runtime->hw = snd_pd_hw_capture;
- snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
- usb_autopm_get_interface(p->interface);
- kref_get(&p->kref);
- return 0;
-}
-
-static int snd_pd_pcm_close(struct snd_pcm_substream *substream)
-{
- struct poseidon *p = snd_pcm_substream_chip(substream);
- struct poseidon_audio *pa = &p->audio;
-
- pa->users--;
- pa->card_close = 1;
- usb_autopm_put_interface(p->interface);
- kref_put(&p->kref, poseidon_delete);
- return 0;
-}
-
-static int snd_pd_hw_capture_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned int size;
-
- size = params_buffer_bytes(hw_params);
- if (runtime->dma_area) {
- if (runtime->dma_bytes > size)
- return 0;
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc(size);
- if (!runtime->dma_area)
- return -ENOMEM;
- else
- runtime->dma_bytes = size;
- return 0;
-}
-
-static int audio_buf_free(struct poseidon *p)
-{
- struct poseidon_audio *pa = &p->audio;
- int i;
-
- for (i = 0; i < AUDIO_BUFS; i++)
- if (pa->urb_array[i])
- usb_kill_urb(pa->urb_array[i]);
- free_all_urb_generic(pa->urb_array, AUDIO_BUFS);
- logpm();
- return 0;
-}
-
-static int snd_pd_hw_capture_free(struct snd_pcm_substream *substream)
-{
- struct poseidon *p = snd_pcm_substream_chip(substream);
-
- logpm();
- audio_buf_free(p);
- return 0;
-}
-
-static int snd_pd_prepare(struct snd_pcm_substream *substream)
-{
- return 0;
-}
-
-#define AUDIO_TRAILER_SIZE (16)
-static inline void handle_audio_data(struct urb *urb, int *period_elapsed)
-{
- struct poseidon_audio *pa = urb->context;
- struct snd_pcm_runtime *runtime = pa->capture_pcm_substream->runtime;
-
- int stride = runtime->frame_bits >> 3;
- int len = urb->actual_length / stride;
- unsigned char *cp = urb->transfer_buffer;
- unsigned int oldptr = pa->rcv_position;
-
- if (urb->actual_length == AUDIO_BUF_SIZE - 4)
- len -= (AUDIO_TRAILER_SIZE / stride);
-
- /* do the copy */
- if (oldptr + len >= runtime->buffer_size) {
- unsigned int cnt = runtime->buffer_size - oldptr;
-
- memcpy(runtime->dma_area + oldptr * stride, cp, cnt * stride);
- memcpy(runtime->dma_area, (cp + cnt * stride),
- (len * stride - cnt * stride));
- } else
- memcpy(runtime->dma_area + oldptr * stride, cp, len * stride);
-
- /* update the statas */
- snd_pcm_stream_lock(pa->capture_pcm_substream);
- pa->rcv_position += len;
- if (pa->rcv_position >= runtime->buffer_size)
- pa->rcv_position -= runtime->buffer_size;
-
- pa->copied_position += (len);
- if (pa->copied_position >= runtime->period_size) {
- pa->copied_position -= runtime->period_size;
- *period_elapsed = 1;
- }
- snd_pcm_stream_unlock(pa->capture_pcm_substream);
-}
-
-static void complete_handler_audio(struct urb *urb)
-{
- struct poseidon_audio *pa = urb->context;
- struct snd_pcm_substream *substream = pa->capture_pcm_substream;
- int period_elapsed = 0;
- int ret;
-
- if (1 == pa->card_close || pa->capture_stream != STREAM_ON)
- return;
-
- if (urb->status != 0) {
- /*if (urb->status == -ESHUTDOWN)*/
- return;
- }
-
- if (substream) {
- if (urb->actual_length) {
- handle_audio_data(urb, &period_elapsed);
- if (period_elapsed)
- snd_pcm_period_elapsed(substream);
- }
- }
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0)
- log("audio urb failed (errcod = %i)", ret);
- return;
-}
-
-static int fire_audio_urb(struct poseidon *p)
-{
- int i, ret = 0;
- struct poseidon_audio *pa = &p->audio;
-
- alloc_bulk_urbs_generic(pa->urb_array, AUDIO_BUFS,
- p->udev, AUDIO_EP,
- AUDIO_BUF_SIZE, GFP_ATOMIC,
- complete_handler_audio, pa);
-
- for (i = 0; i < AUDIO_BUFS; i++) {
- ret = usb_submit_urb(pa->urb_array[i], GFP_KERNEL);
- if (ret)
- log("urb err : %d", ret);
- }
- log();
- return ret;
-}
-
-static int snd_pd_capture_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct poseidon *p = snd_pcm_substream_chip(substream);
- struct poseidon_audio *pa = &p->audio;
-
- if (debug_mode)
- log("cmd %d, audio stat : %d\n", cmd, pa->capture_stream);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_START:
- if (pa->capture_stream == STREAM_ON)
- return 0;
-
- pa->rcv_position = pa->copied_position = 0;
- pa->capture_stream = STREAM_ON;
-
- if (in_hibernation(p))
- return 0;
- fire_audio_urb(p);
- return 0;
-
- case SNDRV_PCM_TRIGGER_SUSPEND:
- pa->capture_stream = STREAM_SUSPEND;
- return 0;
- case SNDRV_PCM_TRIGGER_STOP:
- pa->capture_stream = STREAM_OFF;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static snd_pcm_uframes_t
-snd_pd_capture_pointer(struct snd_pcm_substream *substream)
-{
- struct poseidon *p = snd_pcm_substream_chip(substream);
- struct poseidon_audio *pa = &p->audio;
- return pa->rcv_position;
-}
-
-static struct page *snd_pcm_pd_get_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
- return vmalloc_to_page(pageptr);
-}
-
-static struct snd_pcm_ops pcm_capture_ops = {
- .open = snd_pd_capture_open,
- .close = snd_pd_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_pd_hw_capture_params,
- .hw_free = snd_pd_hw_capture_free,
- .prepare = snd_pd_prepare,
- .trigger = snd_pd_capture_trigger,
- .pointer = snd_pd_capture_pointer,
- .page = snd_pcm_pd_get_page,
-};
-
-#ifdef CONFIG_PM
-int pm_alsa_suspend(struct poseidon *p)
-{
- logpm(p);
- audio_buf_free(p);
- return 0;
-}
-
-int pm_alsa_resume(struct poseidon *p)
-{
- logpm(p);
- fire_audio_urb(p);
- return 0;
-}
-#endif
-
-int poseidon_audio_init(struct poseidon *p)
-{
- struct poseidon_audio *pa = &p->audio;
- struct snd_card *card;
- struct snd_pcm *pcm;
- int ret;
-
- ret = snd_card_new(&p->interface->dev, -1, "Telegent",
- THIS_MODULE, 0, &card);
- if (ret != 0)
- return ret;
-
- ret = snd_pcm_new(card, "poseidon audio", 0, 0, 1, &pcm);
- if (ret < 0) {
- snd_card_free(card);
- return ret;
- }
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_capture_ops);
- pcm->info_flags = 0;
- pcm->private_data = p;
- strcpy(pcm->name, "poseidon audio capture");
-
- strcpy(card->driver, "ALSA driver");
- strcpy(card->shortname, "poseidon Audio");
- strcpy(card->longname, "poseidon ALSA Audio");
-
- if (snd_card_register(card)) {
- snd_card_free(card);
- return -ENOMEM;
- }
- pa->card = card;
- return 0;
-}
-
-int poseidon_audio_free(struct poseidon *p)
-{
- struct poseidon_audio *pa = &p->audio;
-
- if (pa->card)
- snd_card_free(pa->card);
- return 0;
-}
diff --git a/drivers/staging/media/tlg2300/pd-common.h b/drivers/staging/media/tlg2300/pd-common.h
deleted file mode 100644
index 9e23ad32d2fe..000000000000
--- a/drivers/staging/media/tlg2300/pd-common.h
+++ /dev/null
@@ -1,271 +0,0 @@
-#ifndef PD_COMMON_H
-#define PD_COMMON_H
-
-#include <linux/fs.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/videodev2.h>
-#include <linux/semaphore.h>
-#include <linux/usb.h>
-#include <linux/poll.h>
-#include <media/videobuf-vmalloc.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-
-#include "dvb_frontend.h"
-#include "dvbdev.h"
-#include "dvb_demux.h"
-#include "dmxdev.h"
-
-#define SBUF_NUM 8
-#define MAX_BUFFER_NUM 6
-#define PK_PER_URB 32
-#define ISO_PKT_SIZE 3072
-
-#define POSEIDON_STATE_NONE (0x0000)
-#define POSEIDON_STATE_ANALOG (0x0001)
-#define POSEIDON_STATE_FM (0x0002)
-#define POSEIDON_STATE_DVBT (0x0004)
-#define POSEIDON_STATE_DISCONNECT (0x0080)
-
-#define PM_SUSPEND_DELAY 3
-
-#define V4L_PAL_VBI_LINES 18
-#define V4L_NTSC_VBI_LINES 12
-#define V4L_PAL_VBI_FRAMESIZE (V4L_PAL_VBI_LINES * 1440 * 2)
-#define V4L_NTSC_VBI_FRAMESIZE (V4L_NTSC_VBI_LINES * 1440 * 2)
-
-#define TUNER_FREQ_MIN (45000000U)
-#define TUNER_FREQ_MAX (862000000U)
-
-struct vbi_data {
- struct video_device v_dev;
- struct video_data *video;
- struct front_face *front;
-
- unsigned int copied;
- unsigned int vbi_size; /* the whole size of two fields */
- int users;
-};
-
-/*
- * This is the running context of the video, it is useful for
- * resume()
- */
-struct running_context {
- u32 freq; /* VIDIOC_S_FREQUENCY */
- int audio_idx; /* VIDIOC_S_TUNER */
- v4l2_std_id tvnormid; /* VIDIOC_S_STD */
- int sig_index; /* VIDIOC_S_INPUT */
- struct v4l2_pix_format pix; /* VIDIOC_S_FMT */
-};
-
-struct video_data {
- /* v4l2 video device */
- struct video_device v_dev;
- struct v4l2_ctrl_handler ctrl_handler;
-
- /* the working context */
- struct running_context context;
-
- /* for data copy */
- int field_count;
-
- char *dst;
- int lines_copied;
- int prev_left;
-
- int lines_per_field;
- int lines_size;
-
- /* for communication */
- u8 endpoint_addr;
- struct urb *urb_array[SBUF_NUM];
- struct vbi_data *vbi;
- struct poseidon *pd;
- struct front_face *front;
-
- int is_streaming;
- int users;
-
- /* for bubble handler */
- struct work_struct bubble_work;
-};
-
-enum pcm_stream_state {
- STREAM_OFF,
- STREAM_ON,
- STREAM_SUSPEND,
-};
-
-#define AUDIO_BUFS (3)
-#define CAPTURE_STREAM_EN 1
-struct poseidon_audio {
- struct urb *urb_array[AUDIO_BUFS];
- unsigned int copied_position;
- struct snd_pcm_substream *capture_pcm_substream;
-
- unsigned int rcv_position;
- struct snd_card *card;
- int card_close;
-
- int users;
- int pm_state;
- enum pcm_stream_state capture_stream;
-};
-
-struct radio_data {
- __u32 fm_freq;
- unsigned int is_radio_streaming;
- int pre_emphasis;
- struct video_device fm_dev;
- struct v4l2_ctrl_handler ctrl_handler;
-};
-
-#define DVB_SBUF_NUM 4
-#define DVB_URB_BUF_SIZE 0x2000
-struct pd_dvb_adapter {
- struct dvb_adapter dvb_adap;
- struct dvb_frontend dvb_fe;
- struct dmxdev dmxdev;
- struct dvb_demux demux;
-
- atomic_t users;
- atomic_t active_feed;
-
- /* data transfer */
- s32 is_streaming;
- struct urb *urb_array[DVB_SBUF_NUM];
- struct poseidon *pd_device;
- u8 ep_addr;
- u8 reserved[3];
-
- /* data for power resume*/
- struct dtv_frontend_properties fe_param;
-
- /* for channel scanning */
- int prev_freq;
- int bandwidth;
- unsigned long last_jiffies;
-};
-
-struct front_face {
- /* use this field to distinguish VIDEO and VBI */
- enum v4l2_buf_type type;
-
- /* for host */
- struct videobuf_queue q;
-
- /* the bridge for host and device */
- struct videobuf_buffer *curr_frame;
-
- /* for device */
- spinlock_t queue_lock;
- struct list_head active;
- struct poseidon *pd;
-};
-
-struct poseidon {
- struct list_head device_list;
-
- struct mutex lock;
- struct kref kref;
-
- /* for V4L2 */
- struct v4l2_device v4l2_dev;
-
- /* hardware info */
- struct usb_device *udev;
- struct usb_interface *interface;
- int cur_transfer_mode;
-
- struct video_data video_data; /* video */
- struct vbi_data vbi_data; /* vbi */
- struct poseidon_audio audio; /* audio (alsa) */
- struct radio_data radio_data; /* FM */
- struct pd_dvb_adapter dvb_data; /* DVB */
-
- u32 state;
- struct file *file_for_stream; /* the active stream*/
-
-#ifdef CONFIG_PM
- int (*pm_suspend)(struct poseidon *);
- int (*pm_resume)(struct poseidon *);
- pm_message_t msg;
-
- struct work_struct pm_work;
- u8 portnum;
-#endif
-};
-
-struct poseidon_format {
- char *name;
- int fourcc; /* video4linux 2 */
- int depth; /* bit/pixel */
- int flags;
-};
-
-struct poseidon_tvnorm {
- v4l2_std_id v4l2_id;
- char name[12];
- u32 tlg_tvnorm;
-};
-
-/* video */
-int pd_video_init(struct poseidon *);
-void pd_video_exit(struct poseidon *);
-int stop_all_video_stream(struct poseidon *);
-
-/* alsa audio */
-int poseidon_audio_init(struct poseidon *);
-int poseidon_audio_free(struct poseidon *);
-#ifdef CONFIG_PM
-int pm_alsa_suspend(struct poseidon *);
-int pm_alsa_resume(struct poseidon *);
-#endif
-
-/* dvb */
-int pd_dvb_usb_device_init(struct poseidon *);
-void pd_dvb_usb_device_exit(struct poseidon *);
-void pd_dvb_usb_device_cleanup(struct poseidon *);
-int pd_dvb_get_adapter_num(struct pd_dvb_adapter *);
-void dvb_stop_streaming(struct pd_dvb_adapter *);
-
-/* FM */
-int poseidon_fm_init(struct poseidon *);
-int poseidon_fm_exit(struct poseidon *);
-
-/* vendor command ops */
-int send_set_req(struct poseidon*, u8, s32, s32*);
-int send_get_req(struct poseidon*, u8, s32, void*, s32*, s32);
-s32 set_tuner_mode(struct poseidon*, unsigned char);
-
-/* bulk urb alloc/free */
-int alloc_bulk_urbs_generic(struct urb **urb_array, int num,
- struct usb_device *udev, u8 ep_addr,
- int buf_size, gfp_t gfp_flags,
- usb_complete_t complete_fn, void *context);
-void free_all_urb_generic(struct urb **urb_array, int num);
-
-/* misc */
-void poseidon_delete(struct kref *kref);
-extern int debug_mode;
-void set_debug_mode(struct video_device *vfd, int debug_mode);
-
-#ifdef CONFIG_PM
-#define in_hibernation(pd) (pd->msg.event == PM_EVENT_FREEZE)
-#else
-#define in_hibernation(pd) (0)
-#endif
-#define get_pm_count(p) (atomic_read(&(p)->interface->pm_usage_cnt))
-
-#define log(a, ...) printk(KERN_DEBUG "\t[ %s : %.3d ] "a"\n", \
- __func__, __LINE__, ## __VA_ARGS__)
-
-/* for power management */
-#define logpm(pd) do {\
- if (debug_mode & 0x10)\
- log();\
- } while (0)
-
-#endif
diff --git a/drivers/staging/media/tlg2300/pd-dvb.c b/drivers/staging/media/tlg2300/pd-dvb.c
deleted file mode 100644
index ca4994a5190c..000000000000
--- a/drivers/staging/media/tlg2300/pd-dvb.c
+++ /dev/null
@@ -1,597 +0,0 @@
-#include "pd-common.h"
-#include <linux/kernel.h>
-#include <linux/usb.h>
-#include <linux/time.h>
-#include <linux/dvb/dmx.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-
-#include "vendorcmds.h"
-#include <linux/sched.h>
-#include <linux/atomic.h>
-
-static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb);
-
-static int dvb_bandwidth[][2] = {
- { TLG_BW_8, 8000000 },
- { TLG_BW_7, 7000000 },
- { TLG_BW_6, 6000000 }
-};
-static int dvb_bandwidth_length = ARRAY_SIZE(dvb_bandwidth);
-
-static s32 dvb_start_streaming(struct pd_dvb_adapter *pd_dvb);
-static int poseidon_check_mode_dvbt(struct poseidon *pd)
-{
- s32 ret = 0, cmd_status = 0;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/4);
-
- ret = usb_set_interface(pd->udev, 0, BULK_ALTERNATE_IFACE);
- if (ret != 0)
- return ret;
-
- ret = set_tuner_mode(pd, TLG_MODE_CAPS_DVB_T);
- if (ret)
- return ret;
-
- /* signal source */
- ret = send_set_req(pd, SGNL_SRC_SEL, TLG_SIG_SRC_ANTENNA, &cmd_status);
- if (ret|cmd_status)
- return ret;
-
- return 0;
-}
-
-/* acquire :
- * 1 == open
- * 0 == release
- */
-static int poseidon_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
-{
- struct poseidon *pd = fe->demodulator_priv;
- struct pd_dvb_adapter *pd_dvb;
- int ret = 0;
-
- if (!pd)
- return -ENODEV;
-
- pd_dvb = container_of(fe, struct pd_dvb_adapter, dvb_fe);
- if (acquire) {
- mutex_lock(&pd->lock);
- if (pd->state & POSEIDON_STATE_DISCONNECT) {
- ret = -ENODEV;
- goto open_out;
- }
-
- if (pd->state && !(pd->state & POSEIDON_STATE_DVBT)) {
- ret = -EBUSY;
- goto open_out;
- }
-
- usb_autopm_get_interface(pd->interface);
- if (0 == pd->state) {
- ret = poseidon_check_mode_dvbt(pd);
- if (ret < 0) {
- usb_autopm_put_interface(pd->interface);
- goto open_out;
- }
- pd->state |= POSEIDON_STATE_DVBT;
- pd_dvb->bandwidth = 0;
- pd_dvb->prev_freq = 0;
- }
- atomic_inc(&pd_dvb->users);
- kref_get(&pd->kref);
-open_out:
- mutex_unlock(&pd->lock);
- } else {
- dvb_stop_streaming(pd_dvb);
-
- if (atomic_dec_and_test(&pd_dvb->users)) {
- mutex_lock(&pd->lock);
- pd->state &= ~POSEIDON_STATE_DVBT;
- mutex_unlock(&pd->lock);
- }
- kref_put(&pd->kref, poseidon_delete);
- usb_autopm_put_interface(pd->interface);
- }
- return ret;
-}
-
-#ifdef CONFIG_PM
-static void poseidon_fe_release(struct dvb_frontend *fe)
-{
- struct poseidon *pd = fe->demodulator_priv;
-
- pd->pm_suspend = NULL;
- pd->pm_resume = NULL;
-}
-#else
-#define poseidon_fe_release NULL
-#endif
-
-static s32 poseidon_fe_sleep(struct dvb_frontend *fe)
-{
- return 0;
-}
-
-/*
- * return true if we can satisfy the conditions, else return false.
- */
-static bool check_scan_ok(__u32 freq, int bandwidth,
- struct pd_dvb_adapter *adapter)
-{
- if (bandwidth < 0)
- return false;
-
- if (adapter->prev_freq == freq
- && adapter->bandwidth == bandwidth) {
- long nl = jiffies - adapter->last_jiffies;
- unsigned int msec ;
-
- msec = jiffies_to_msecs(abs(nl));
- return msec > 15000 ? true : false;
- }
- return true;
-}
-
-/*
- * Check if the firmware delays too long for an invalid frequency.
- */
-static int fw_delay_overflow(struct pd_dvb_adapter *adapter)
-{
- long nl = jiffies - adapter->last_jiffies;
- unsigned int msec ;
-
- msec = jiffies_to_msecs(abs(nl));
- return msec > 800 ? true : false;
-}
-
-static int poseidon_set_fe(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
- s32 ret = 0, cmd_status = 0;
- s32 i, bandwidth = -1;
- struct poseidon *pd = fe->demodulator_priv;
- struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
-
- if (in_hibernation(pd))
- return -EBUSY;
-
- mutex_lock(&pd->lock);
- for (i = 0; i < dvb_bandwidth_length; i++)
- if (fep->bandwidth_hz == dvb_bandwidth[i][1])
- bandwidth = dvb_bandwidth[i][0];
-
- if (check_scan_ok(fep->frequency, bandwidth, pd_dvb)) {
- ret = send_set_req(pd, TUNE_FREQ_SELECT,
- fep->frequency / 1000, &cmd_status);
- if (ret | cmd_status) {
- log("error line");
- goto front_out;
- }
-
- ret = send_set_req(pd, DVBT_BANDW_SEL,
- bandwidth, &cmd_status);
- if (ret | cmd_status) {
- log("error line");
- goto front_out;
- }
-
- ret = send_set_req(pd, TAKE_REQUEST, 0, &cmd_status);
- if (ret | cmd_status) {
- log("error line");
- goto front_out;
- }
-
- /* save the context for future */
- memcpy(&pd_dvb->fe_param, fep, sizeof(*fep));
- pd_dvb->bandwidth = bandwidth;
- pd_dvb->prev_freq = fep->frequency;
- pd_dvb->last_jiffies = jiffies;
- }
-front_out:
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int pm_dvb_suspend(struct poseidon *pd)
-{
- struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
- dvb_stop_streaming(pd_dvb);
- dvb_urb_cleanup(pd_dvb);
- msleep(500);
- return 0;
-}
-
-static int pm_dvb_resume(struct poseidon *pd)
-{
- struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
-
- poseidon_check_mode_dvbt(pd);
- msleep(300);
- poseidon_set_fe(&pd_dvb->dvb_fe);
-
- dvb_start_streaming(pd_dvb);
- return 0;
-}
-#endif
-
-static s32 poseidon_fe_init(struct dvb_frontend *fe)
-{
- struct poseidon *pd = fe->demodulator_priv;
- struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
-
-#ifdef CONFIG_PM
- pd->pm_suspend = pm_dvb_suspend;
- pd->pm_resume = pm_dvb_resume;
-#endif
- memset(&pd_dvb->fe_param, 0,
- sizeof(struct dtv_frontend_properties));
- return 0;
-}
-
-static int poseidon_get_fe(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
- struct poseidon *pd = fe->demodulator_priv;
- struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
-
- memcpy(fep, &pd_dvb->fe_param, sizeof(*fep));
- return 0;
-}
-
-static int poseidon_fe_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *tune)
-{
- tune->min_delay_ms = 1000;
- return 0;
-}
-
-static int poseidon_read_status(struct dvb_frontend *fe, fe_status_t *stat)
-{
- struct poseidon *pd = fe->demodulator_priv;
- s32 ret = -1, cmd_status;
- struct tuner_dtv_sig_stat_s status = {};
-
- if (in_hibernation(pd))
- return -EBUSY;
- mutex_lock(&pd->lock);
-
- ret = send_get_req(pd, TUNER_STATUS, TLG_MODE_DVB_T,
- &status, &cmd_status, sizeof(status));
- if (ret | cmd_status) {
- log("get tuner status error");
- goto out;
- }
-
- if (debug_mode)
- log("P : %d, L %d, LB :%d", status.sig_present,
- status.sig_locked, status.sig_lock_busy);
-
- if (status.sig_lock_busy) {
- goto out;
- } else if (status.sig_present || status.sig_locked) {
- *stat |= FE_HAS_LOCK | FE_HAS_SIGNAL | FE_HAS_CARRIER
- | FE_HAS_SYNC | FE_HAS_VITERBI;
- } else {
- if (fw_delay_overflow(&pd->dvb_data))
- *stat |= FE_TIMEDOUT;
- }
-out:
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-static int poseidon_read_ber(struct dvb_frontend *fe, u32 *ber)
-{
- struct poseidon *pd = fe->demodulator_priv;
- struct tuner_ber_rate_s tlg_ber = {};
- s32 ret = -1, cmd_status;
-
- mutex_lock(&pd->lock);
- ret = send_get_req(pd, TUNER_BER_RATE, 0,
- &tlg_ber, &cmd_status, sizeof(tlg_ber));
- if (ret | cmd_status)
- goto out;
- *ber = tlg_ber.ber_rate;
-out:
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-static s32 poseidon_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
-{
- struct poseidon *pd = fe->demodulator_priv;
- struct tuner_dtv_sig_stat_s status = {};
- s32 ret = 0, cmd_status;
-
- mutex_lock(&pd->lock);
- ret = send_get_req(pd, TUNER_STATUS, TLG_MODE_DVB_T,
- &status, &cmd_status, sizeof(status));
- if (ret | cmd_status)
- goto out;
- if ((status.sig_present || status.sig_locked) && !status.sig_strength)
- *strength = 0xFFFF;
- else
- *strength = status.sig_strength;
-out:
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-static int poseidon_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- return 0;
-}
-
-static int poseidon_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
-{
- *unc = 0;
- return 0;
-}
-
-static struct dvb_frontend_ops poseidon_frontend_ops = {
- .delsys = { SYS_DVBT },
- .info = {
- .name = "Poseidon DVB-T",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 62500,/* FIXME */
- .caps = FE_CAN_INVERSION_AUTO |
- FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
- FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
- FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_RECOVER |
- FE_CAN_HIERARCHY_AUTO,
- },
-
- .release = poseidon_fe_release,
-
- .init = poseidon_fe_init,
- .sleep = poseidon_fe_sleep,
-
- .set_frontend = poseidon_set_fe,
- .get_frontend = poseidon_get_fe,
- .get_tune_settings = poseidon_fe_get_tune_settings,
-
- .read_status = poseidon_read_status,
- .read_ber = poseidon_read_ber,
- .read_signal_strength = poseidon_read_signal_strength,
- .read_snr = poseidon_read_snr,
- .read_ucblocks = poseidon_read_unc_blocks,
-
- .ts_bus_ctrl = poseidon_ts_bus_ctrl,
-};
-
-static void dvb_urb_irq(struct urb *urb)
-{
- struct pd_dvb_adapter *pd_dvb = urb->context;
- int len = urb->transfer_buffer_length;
- struct dvb_demux *demux = &pd_dvb->demux;
- s32 ret;
-
- if (!pd_dvb->is_streaming || urb->status) {
- if (urb->status == -EPROTO)
- goto resend;
- return;
- }
-
- if (urb->actual_length == len)
- dvb_dmx_swfilter(demux, urb->transfer_buffer, len);
- else if (urb->actual_length == len - 4) {
- int offset;
- u8 *buf = urb->transfer_buffer;
-
- /*
- * The packet size is 512,
- * last packet contains 456 bytes tsp data
- */
- for (offset = 456; offset < len; offset += 512) {
- if (!strncmp(buf + offset, "DVHS", 4)) {
- dvb_dmx_swfilter(demux, buf, offset);
- if (len > offset + 52 + 4) {
- /*16 bytes trailer + 36 bytes padding */
- buf += offset + 52;
- len -= offset + 52 + 4;
- dvb_dmx_swfilter(demux, buf, len);
- }
- break;
- }
- }
- }
-
-resend:
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret)
- log(" usb_submit_urb failed: error %d", ret);
-}
-
-static int dvb_urb_init(struct pd_dvb_adapter *pd_dvb)
-{
- if (pd_dvb->urb_array[0])
- return 0;
-
- alloc_bulk_urbs_generic(pd_dvb->urb_array, DVB_SBUF_NUM,
- pd_dvb->pd_device->udev, pd_dvb->ep_addr,
- DVB_URB_BUF_SIZE, GFP_KERNEL,
- dvb_urb_irq, pd_dvb);
- return 0;
-}
-
-static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb)
-{
- free_all_urb_generic(pd_dvb->urb_array, DVB_SBUF_NUM);
-}
-
-static s32 dvb_start_streaming(struct pd_dvb_adapter *pd_dvb)
-{
- struct poseidon *pd = pd_dvb->pd_device;
- int ret = 0;
-
- if (pd->state & POSEIDON_STATE_DISCONNECT)
- return -ENODEV;
-
- mutex_lock(&pd->lock);
- if (!pd_dvb->is_streaming) {
- s32 i, cmd_status = 0;
- /*
- * Once upon a time, there was a difficult bug lying here.
- * ret = send_set_req(pd, TAKE_REQUEST, 0, &cmd_status);
- */
-
- ret = send_set_req(pd, PLAY_SERVICE, 1, &cmd_status);
- if (ret | cmd_status)
- goto out;
-
- ret = dvb_urb_init(pd_dvb);
- if (ret < 0)
- goto out;
-
- pd_dvb->is_streaming = 1;
- for (i = 0; i < DVB_SBUF_NUM; i++) {
- ret = usb_submit_urb(pd_dvb->urb_array[i],
- GFP_KERNEL);
- if (ret) {
- log(" submit urb error %d", ret);
- goto out;
- }
- }
- }
-out:
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-void dvb_stop_streaming(struct pd_dvb_adapter *pd_dvb)
-{
- struct poseidon *pd = pd_dvb->pd_device;
-
- mutex_lock(&pd->lock);
- if (pd_dvb->is_streaming) {
- s32 i, ret, cmd_status = 0;
-
- pd_dvb->is_streaming = 0;
-
- for (i = 0; i < DVB_SBUF_NUM; i++)
- if (pd_dvb->urb_array[i])
- usb_kill_urb(pd_dvb->urb_array[i]);
-
- ret = send_set_req(pd, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_STOP,
- &cmd_status);
- if (ret | cmd_status)
- log("error");
- }
- mutex_unlock(&pd->lock);
-}
-
-static int pd_start_feed(struct dvb_demux_feed *feed)
-{
- struct pd_dvb_adapter *pd_dvb = feed->demux->priv;
- int ret = 0;
-
- if (!pd_dvb)
- return -1;
- if (atomic_inc_return(&pd_dvb->active_feed) == 1)
- ret = dvb_start_streaming(pd_dvb);
- return ret;
-}
-
-static int pd_stop_feed(struct dvb_demux_feed *feed)
-{
- struct pd_dvb_adapter *pd_dvb = feed->demux->priv;
-
- if (!pd_dvb)
- return -1;
- if (atomic_dec_and_test(&pd_dvb->active_feed))
- dvb_stop_streaming(pd_dvb);
- return 0;
-}
-
-DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-int pd_dvb_usb_device_init(struct poseidon *pd)
-{
- struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
- struct dvb_demux *dvbdemux;
- int ret = 0;
-
- pd_dvb->ep_addr = 0x82;
- atomic_set(&pd_dvb->users, 0);
- atomic_set(&pd_dvb->active_feed, 0);
- pd_dvb->pd_device = pd;
-
- ret = dvb_register_adapter(&pd_dvb->dvb_adap,
- "Poseidon dvbt adapter",
- THIS_MODULE,
- NULL /* for hibernation correctly*/,
- adapter_nr);
- if (ret < 0)
- goto error1;
-
- /* register frontend */
- pd_dvb->dvb_fe.demodulator_priv = pd;
- memcpy(&pd_dvb->dvb_fe.ops, &poseidon_frontend_ops,
- sizeof(struct dvb_frontend_ops));
- ret = dvb_register_frontend(&pd_dvb->dvb_adap, &pd_dvb->dvb_fe);
- if (ret < 0)
- goto error2;
-
- /* register demux device */
- dvbdemux = &pd_dvb->demux;
- dvbdemux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
- dvbdemux->priv = pd_dvb;
- dvbdemux->feednum = dvbdemux->filternum = 64;
- dvbdemux->start_feed = pd_start_feed;
- dvbdemux->stop_feed = pd_stop_feed;
- dvbdemux->write_to_decoder = NULL;
-
- ret = dvb_dmx_init(dvbdemux);
- if (ret < 0)
- goto error3;
-
- pd_dvb->dmxdev.filternum = pd_dvb->demux.filternum;
- pd_dvb->dmxdev.demux = &pd_dvb->demux.dmx;
- pd_dvb->dmxdev.capabilities = 0;
-
- ret = dvb_dmxdev_init(&pd_dvb->dmxdev, &pd_dvb->dvb_adap);
- if (ret < 0)
- goto error3;
- return 0;
-
-error3:
- dvb_unregister_frontend(&pd_dvb->dvb_fe);
-error2:
- dvb_unregister_adapter(&pd_dvb->dvb_adap);
-error1:
- return ret;
-}
-
-void pd_dvb_usb_device_exit(struct poseidon *pd)
-{
- struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
-
- while (atomic_read(&pd_dvb->users) != 0
- || atomic_read(&pd_dvb->active_feed) != 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- }
- dvb_dmxdev_release(&pd_dvb->dmxdev);
- dvb_unregister_frontend(&pd_dvb->dvb_fe);
- dvb_unregister_adapter(&pd_dvb->dvb_adap);
- pd_dvb_usb_device_cleanup(pd);
-}
-
-void pd_dvb_usb_device_cleanup(struct poseidon *pd)
-{
- struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
-
- dvb_urb_cleanup(pd_dvb);
-}
-
-int pd_dvb_get_adapter_num(struct pd_dvb_adapter *pd_dvb)
-{
- return pd_dvb->dvb_adap.num;
-}
diff --git a/drivers/staging/media/tlg2300/pd-main.c b/drivers/staging/media/tlg2300/pd-main.c
deleted file mode 100644
index b31f4791b8ff..000000000000
--- a/drivers/staging/media/tlg2300/pd-main.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/*
- * device driver for Telegent tlg2300 based TV cards
- *
- * Author :
- * Kang Yong <kangyong@telegent.com>
- * Zhang Xiaobing <xbzhang@telegent.com>
- * Huang Shijie <zyziii@telegent.com> or <shijie8@gmail.com>
- *
- * (c) 2009 Telegent Systems
- * (c) 2010 Telegent Systems
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kref.h>
-#include <linux/suspend.h>
-#include <linux/usb/quirks.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/firmware.h>
-
-#include "vendorcmds.h"
-#include "pd-common.h"
-
-#define VENDOR_ID 0x1B24
-#define PRODUCT_ID 0x4001
-static struct usb_device_id id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, PRODUCT_ID, 255, 1, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, PRODUCT_ID, 255, 1, 1) },
- { },
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-int debug_mode;
-module_param(debug_mode, int, 0644);
-MODULE_PARM_DESC(debug_mode, "0 = disable, 1 = enable, 2 = verbose");
-
-#define TLG2300_FIRMWARE "tlg2300_firmware.bin"
-static const char *firmware_name = TLG2300_FIRMWARE;
-static LIST_HEAD(pd_device_list);
-
-/*
- * send set request to USB firmware.
- */
-s32 send_set_req(struct poseidon *pd, u8 cmdid, s32 param, s32 *cmd_status)
-{
- s32 ret;
- s8 data[32] = {};
- u16 lower_16, upper_16;
-
- if (pd->state & POSEIDON_STATE_DISCONNECT)
- return -ENODEV;
-
- mdelay(30);
-
- if (param == 0) {
- upper_16 = lower_16 = 0;
- } else {
- /* send 32 bit param as two 16 bit param,little endian */
- lower_16 = (unsigned short)(param & 0xffff);
- upper_16 = (unsigned short)((param >> 16) & 0xffff);
- }
- ret = usb_control_msg(pd->udev,
- usb_rcvctrlpipe(pd->udev, 0),
- REQ_SET_CMD | cmdid,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- lower_16,
- upper_16,
- &data,
- sizeof(*cmd_status),
- USB_CTRL_GET_TIMEOUT);
-
- if (!ret) {
- return -ENXIO;
- } else {
- /* 1st 4 bytes into cmd_status */
- memcpy((char *)cmd_status, &(data[0]), sizeof(*cmd_status));
- }
- return 0;
-}
-
-/*
- * send get request to Poseidon firmware.
- */
-s32 send_get_req(struct poseidon *pd, u8 cmdid, s32 param,
- void *buf, s32 *cmd_status, s32 datalen)
-{
- s32 ret;
- s8 data[128] = {};
- u16 lower_16, upper_16;
-
- if (pd->state & POSEIDON_STATE_DISCONNECT)
- return -ENODEV;
-
- mdelay(30);
- if (param == 0) {
- upper_16 = lower_16 = 0;
- } else {
- /*send 32 bit param as two 16 bit param, little endian */
- lower_16 = (unsigned short)(param & 0xffff);
- upper_16 = (unsigned short)((param >> 16) & 0xffff);
- }
- ret = usb_control_msg(pd->udev,
- usb_rcvctrlpipe(pd->udev, 0),
- REQ_GET_CMD | cmdid,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- lower_16,
- upper_16,
- &data,
- (datalen + sizeof(*cmd_status)),
- USB_CTRL_GET_TIMEOUT);
-
- if (ret < 0) {
- return -ENXIO;
- } else {
- /* 1st 4 bytes into cmd_status, remaining data into cmd_data */
- memcpy((char *)cmd_status, &data[0], sizeof(*cmd_status));
- memcpy((char *)buf, &data[sizeof(*cmd_status)], datalen);
- }
- return 0;
-}
-
-static int pm_notifier_block(struct notifier_block *nb,
- unsigned long event, void *dummy)
-{
- struct poseidon *pd = NULL;
- struct list_head *node, *next;
-
- switch (event) {
- case PM_POST_HIBERNATION:
- list_for_each_safe(node, next, &pd_device_list) {
- struct usb_device *udev;
- struct usb_interface *iface;
- int rc = 0;
-
- pd = container_of(node, struct poseidon, device_list);
- udev = pd->udev;
- iface = pd->interface;
-
- /* It will cause the system to reload the firmware */
- rc = usb_lock_device_for_reset(udev, iface);
- if (rc >= 0) {
- usb_reset_device(udev);
- usb_unlock_device(udev);
- }
- }
- break;
- default:
- break;
- }
- log("event :%ld\n", event);
- return 0;
-}
-
-static struct notifier_block pm_notifer = {
- .notifier_call = pm_notifier_block,
-};
-
-int set_tuner_mode(struct poseidon *pd, unsigned char mode)
-{
- s32 ret, cmd_status;
-
- if (pd->state & POSEIDON_STATE_DISCONNECT)
- return -ENODEV;
-
- ret = send_set_req(pd, TUNE_MODE_SELECT, mode, &cmd_status);
- if (ret || cmd_status)
- return -ENXIO;
- return 0;
-}
-
-void poseidon_delete(struct kref *kref)
-{
- struct poseidon *pd = container_of(kref, struct poseidon, kref);
-
- if (!pd)
- return;
- list_del_init(&pd->device_list);
-
- pd_dvb_usb_device_cleanup(pd);
- /* clean_audio_data(&pd->audio_data);*/
-
- if (pd->udev) {
- usb_put_dev(pd->udev);
- pd->udev = NULL;
- }
- if (pd->interface) {
- usb_put_intf(pd->interface);
- pd->interface = NULL;
- }
- kfree(pd);
- log();
-}
-
-static int firmware_download(struct usb_device *udev)
-{
- int ret = 0, actual_length;
- const struct firmware *fw = NULL;
- void *fwbuf = NULL;
- size_t fwlength = 0, offset;
- size_t max_packet_size;
-
- ret = request_firmware(&fw, firmware_name, &udev->dev);
- if (ret) {
- log("download err : %d", ret);
- return ret;
- }
-
- fwlength = fw->size;
-
- fwbuf = kmemdup(fw->data, fwlength, GFP_KERNEL);
- if (!fwbuf) {
- ret = -ENOMEM;
- goto out;
- }
-
- max_packet_size = le16_to_cpu(udev->ep_out[0x1]->desc.wMaxPacketSize);
- log("\t\t download size : %d", (int)max_packet_size);
-
- for (offset = 0; offset < fwlength; offset += max_packet_size) {
- actual_length = 0;
- ret = usb_bulk_msg(udev,
- usb_sndbulkpipe(udev, 0x01), /* ep 1 */
- fwbuf + offset,
- min(max_packet_size, fwlength - offset),
- &actual_length,
- HZ * 10);
- if (ret)
- break;
- }
- kfree(fwbuf);
-out:
- release_firmware(fw);
- return ret;
-}
-
-static inline struct poseidon *get_pd(struct usb_interface *intf)
-{
- return usb_get_intfdata(intf);
-}
-
-#ifdef CONFIG_PM
-/* one-to-one map : poseidon{} <----> usb_device{}'s port */
-static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
-{
- pd->portnum = udev->portnum;
-}
-
-static inline int get_autopm_ref(struct poseidon *pd)
-{
- return pd->video_data.users + pd->vbi_data.users + pd->audio.users
- + atomic_read(&pd->dvb_data.users) +
- !list_empty(&pd->radio_data.fm_dev.fh_list);
-}
-
-/* fixup something for poseidon */
-static inline struct poseidon *fixup(struct poseidon *pd)
-{
- int count;
-
- /* old udev and interface have gone, so put back reference . */
- count = get_autopm_ref(pd);
- log("count : %d, ref count : %d", count, get_pm_count(pd));
- while (count--)
- usb_autopm_put_interface(pd->interface);
- /*usb_autopm_set_interface(pd->interface); */
-
- usb_put_dev(pd->udev);
- usb_put_intf(pd->interface);
- log("event : %d\n", pd->msg.event);
- return pd;
-}
-
-static struct poseidon *find_old_poseidon(struct usb_device *udev)
-{
- struct poseidon *pd;
-
- list_for_each_entry(pd, &pd_device_list, device_list) {
- if (pd->portnum == udev->portnum && in_hibernation(pd))
- return fixup(pd);
- }
- return NULL;
-}
-
-/* Is the card working now ? */
-static inline int is_working(struct poseidon *pd)
-{
- return get_pm_count(pd) > 0;
-}
-
-static int poseidon_suspend(struct usb_interface *intf, pm_message_t msg)
-{
- struct poseidon *pd = get_pd(intf);
-
- if (!pd)
- return 0;
- if (!is_working(pd)) {
- if (get_pm_count(pd) <= 0 && !in_hibernation(pd)) {
- pd->msg.event = PM_EVENT_AUTO_SUSPEND;
- pd->pm_resume = NULL; /* a good guard */
- printk(KERN_DEBUG "TLG2300 auto suspend\n");
- }
- return 0;
- }
- pd->msg = msg; /* save it here */
- logpm(pd);
- return pd->pm_suspend ? pd->pm_suspend(pd) : 0;
-}
-
-static int poseidon_resume(struct usb_interface *intf)
-{
- struct poseidon *pd = get_pd(intf);
-
- if (!pd)
- return 0;
- printk(KERN_DEBUG "TLG2300 resume\n");
-
- if (!is_working(pd)) {
- if (PM_EVENT_AUTO_SUSPEND == pd->msg.event)
- pd->msg = PMSG_ON;
- return 0;
- }
- if (in_hibernation(pd)) {
- logpm(pd);
- return 0;
- }
- logpm(pd);
- return pd->pm_resume ? pd->pm_resume(pd) : 0;
-}
-
-static void hibernation_resume(struct work_struct *w)
-{
- struct poseidon *pd = container_of(w, struct poseidon, pm_work);
- int count;
-
- pd->msg.event = 0; /* clear it here */
- pd->state &= ~POSEIDON_STATE_DISCONNECT;
-
- /* set the new interface's reference */
- count = get_autopm_ref(pd);
- while (count--)
- usb_autopm_get_interface(pd->interface);
-
- /* resume the context */
- logpm(pd);
- if (pd->pm_resume)
- pd->pm_resume(pd);
-}
-#else /* CONFIG_PM is not enabled: */
-static inline struct poseidon *find_old_poseidon(struct usb_device *udev)
-{
- return NULL;
-}
-
-static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
-{
-}
-#endif
-
-static int check_firmware(struct usb_device *udev)
-{
- void *buf;
- int ret;
- struct cmd_firmware_vers_s *cmd_firm;
-
- buf = kzalloc(sizeof(*cmd_firm) + sizeof(u32), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- ret = usb_control_msg(udev,
- usb_rcvctrlpipe(udev, 0),
- REQ_GET_CMD | GET_FW_ID,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0,
- 0,
- buf,
- sizeof(*cmd_firm) + sizeof(u32),
- USB_CTRL_GET_TIMEOUT);
- kfree(buf);
-
- if (ret < 0)
- return firmware_download(udev);
- return 0;
-}
-
-static int poseidon_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(interface);
- struct poseidon *pd = NULL;
- int ret = 0;
- int new_one = 0;
-
- /* download firmware */
- ret = check_firmware(udev);
- if (ret)
- return ret;
-
- /* Do I recovery from the hibernate ? */
- pd = find_old_poseidon(udev);
- if (!pd) {
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
- kref_init(&pd->kref);
- set_map_flags(pd, udev);
- new_one = 1;
- }
-
- pd->udev = usb_get_dev(udev);
- pd->interface = usb_get_intf(interface);
- usb_set_intfdata(interface, pd);
-
- if (new_one) {
- logpm(pd);
- mutex_init(&pd->lock);
-
- /* register v4l2 device */
- ret = v4l2_device_register(&interface->dev, &pd->v4l2_dev);
- if (ret)
- goto err_v4l2;
-
- /* register devices in directory /dev */
- ret = pd_video_init(pd);
- if (ret)
- goto err_video;
- ret = poseidon_audio_init(pd);
- if (ret)
- goto err_audio;
- ret = poseidon_fm_init(pd);
- if (ret)
- goto err_fm;
- ret = pd_dvb_usb_device_init(pd);
- if (ret)
- goto err_dvb;
-
- INIT_LIST_HEAD(&pd->device_list);
- list_add_tail(&pd->device_list, &pd_device_list);
- }
-
- device_init_wakeup(&udev->dev, 1);
-#ifdef CONFIG_PM
- pm_runtime_set_autosuspend_delay(&pd->udev->dev,
- 1000 * PM_SUSPEND_DELAY);
- usb_enable_autosuspend(pd->udev);
-
- if (in_hibernation(pd)) {
- INIT_WORK(&pd->pm_work, hibernation_resume);
- schedule_work(&pd->pm_work);
- }
-#endif
- return 0;
-err_dvb:
- poseidon_fm_exit(pd);
-err_fm:
- poseidon_audio_free(pd);
-err_audio:
- pd_video_exit(pd);
-err_video:
- v4l2_device_unregister(&pd->v4l2_dev);
-err_v4l2:
- usb_put_intf(pd->interface);
- usb_put_dev(pd->udev);
- kfree(pd);
- return ret;
-}
-
-static void poseidon_disconnect(struct usb_interface *interface)
-{
- struct poseidon *pd = get_pd(interface);
-
- if (!pd)
- return;
- logpm(pd);
- if (in_hibernation(pd))
- return;
-
- mutex_lock(&pd->lock);
- pd->state |= POSEIDON_STATE_DISCONNECT;
- mutex_unlock(&pd->lock);
-
- /* stop urb transferring */
- stop_all_video_stream(pd);
- dvb_stop_streaming(&pd->dvb_data);
-
- /*unregister v4l2 device */
- v4l2_device_unregister(&pd->v4l2_dev);
-
- pd_dvb_usb_device_exit(pd);
- poseidon_fm_exit(pd);
-
- poseidon_audio_free(pd);
- pd_video_exit(pd);
-
- usb_set_intfdata(interface, NULL);
- kref_put(&pd->kref, poseidon_delete);
-}
-
-static struct usb_driver poseidon_driver = {
- .name = "poseidon",
- .probe = poseidon_probe,
- .disconnect = poseidon_disconnect,
- .id_table = id_table,
-#ifdef CONFIG_PM
- .suspend = poseidon_suspend,
- .resume = poseidon_resume,
-#endif
- .supports_autosuspend = 1,
-};
-
-static int __init poseidon_init(void)
-{
- int ret;
-
- ret = usb_register(&poseidon_driver);
- if (ret)
- return ret;
- register_pm_notifier(&pm_notifer);
- return ret;
-}
-
-static void __exit poseidon_exit(void)
-{
- log();
- unregister_pm_notifier(&pm_notifer);
- usb_deregister(&poseidon_driver);
-}
-
-module_init(poseidon_init);
-module_exit(poseidon_exit);
-
-MODULE_AUTHOR("Telegent Systems");
-MODULE_DESCRIPTION("For tlg2300-based USB device");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.0.2");
-MODULE_FIRMWARE(TLG2300_FIRMWARE);
diff --git a/drivers/staging/media/tlg2300/pd-radio.c b/drivers/staging/media/tlg2300/pd-radio.c
deleted file mode 100644
index b391194a840c..000000000000
--- a/drivers/staging/media/tlg2300/pd-radio.c
+++ /dev/null
@@ -1,339 +0,0 @@
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitmap.h>
-#include <linux/usb.h>
-#include <linux/i2c.h>
-#include <media/v4l2-dev.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-fh.h>
-#include <linux/sched.h>
-
-#include "pd-common.h"
-#include "vendorcmds.h"
-
-static int set_frequency(struct poseidon *p, __u32 frequency);
-static int poseidon_fm_close(struct file *filp);
-static int poseidon_fm_open(struct file *filp);
-
-#define TUNER_FREQ_MIN_FM 76000000U
-#define TUNER_FREQ_MAX_FM 108000000U
-
-#define MAX_PREEMPHASIS (V4L2_PREEMPHASIS_75_uS + 1)
-static int preemphasis[MAX_PREEMPHASIS] = {
- TLG_TUNE_ASTD_NONE, /* V4L2_PREEMPHASIS_DISABLED */
- TLG_TUNE_ASTD_FM_EUR, /* V4L2_PREEMPHASIS_50_uS */
- TLG_TUNE_ASTD_FM_US, /* V4L2_PREEMPHASIS_75_uS */
-};
-
-static int poseidon_check_mode_radio(struct poseidon *p)
-{
- int ret;
- u32 status;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/2);
- ret = usb_set_interface(p->udev, 0, BULK_ALTERNATE_IFACE);
- if (ret < 0)
- goto out;
-
- ret = set_tuner_mode(p, TLG_MODE_FM_RADIO);
- if (ret != 0)
- goto out;
-
- ret = send_set_req(p, SGNL_SRC_SEL, TLG_SIG_SRC_ANTENNA, &status);
- ret = send_set_req(p, TUNER_AUD_ANA_STD,
- p->radio_data.pre_emphasis, &status);
- ret |= send_set_req(p, TUNER_AUD_MODE,
- TLG_TUNE_TVAUDIO_MODE_STEREO, &status);
- ret |= send_set_req(p, AUDIO_SAMPLE_RATE_SEL,
- ATV_AUDIO_RATE_48K, &status);
- ret |= send_set_req(p, TUNE_FREQ_SELECT, TUNER_FREQ_MIN_FM, &status);
-out:
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int pm_fm_suspend(struct poseidon *p)
-{
- logpm(p);
- pm_alsa_suspend(p);
- usb_set_interface(p->udev, 0, 0);
- msleep(300);
- return 0;
-}
-
-static int pm_fm_resume(struct poseidon *p)
-{
- logpm(p);
- poseidon_check_mode_radio(p);
- set_frequency(p, p->radio_data.fm_freq);
- pm_alsa_resume(p);
- return 0;
-}
-#endif
-
-static int poseidon_fm_open(struct file *filp)
-{
- struct poseidon *p = video_drvdata(filp);
- int ret = 0;
-
- mutex_lock(&p->lock);
- if (p->state & POSEIDON_STATE_DISCONNECT) {
- ret = -ENODEV;
- goto out;
- }
-
- if (p->state && !(p->state & POSEIDON_STATE_FM)) {
- ret = -EBUSY;
- goto out;
- }
- ret = v4l2_fh_open(filp);
- if (ret)
- goto out;
-
- usb_autopm_get_interface(p->interface);
- if (0 == p->state) {
- struct video_device *vfd = &p->radio_data.fm_dev;
-
- /* default pre-emphasis */
- if (p->radio_data.pre_emphasis == 0)
- p->radio_data.pre_emphasis = TLG_TUNE_ASTD_FM_EUR;
- set_debug_mode(vfd, debug_mode);
-
- ret = poseidon_check_mode_radio(p);
- if (ret < 0) {
- usb_autopm_put_interface(p->interface);
- goto out;
- }
- p->state |= POSEIDON_STATE_FM;
- }
- kref_get(&p->kref);
-out:
- mutex_unlock(&p->lock);
- return ret;
-}
-
-static int poseidon_fm_close(struct file *filp)
-{
- struct poseidon *p = video_drvdata(filp);
- struct radio_data *fm = &p->radio_data;
- uint32_t status;
-
- mutex_lock(&p->lock);
- if (v4l2_fh_is_singular_file(filp))
- p->state &= ~POSEIDON_STATE_FM;
-
- if (fm->is_radio_streaming && filp == p->file_for_stream) {
- fm->is_radio_streaming = 0;
- send_set_req(p, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_STOP, &status);
- }
- usb_autopm_put_interface(p->interface);
- mutex_unlock(&p->lock);
-
- kref_put(&p->kref, poseidon_delete);
- return v4l2_fh_release(filp);
-}
-
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *v)
-{
- struct poseidon *p = video_drvdata(file);
-
- strlcpy(v->driver, "tele-radio", sizeof(v->driver));
- strlcpy(v->card, "Telegent Poseidon", sizeof(v->card));
- usb_make_path(p->udev, v->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- /* Report all capabilities of the USB device */
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS |
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE |
- V4L2_CAP_AUDIO | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- return 0;
-}
-
-static const struct v4l2_file_operations poseidon_fm_fops = {
- .owner = THIS_MODULE,
- .open = poseidon_fm_open,
- .release = poseidon_fm_close,
- .poll = v4l2_ctrl_poll,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static int tlg_fm_vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *vt)
-{
- struct poseidon *p = video_drvdata(file);
- struct tuner_fm_sig_stat_s fm_stat = {};
- int ret, status, count = 5;
-
- if (vt->index != 0)
- return -EINVAL;
-
- vt->type = V4L2_TUNER_RADIO;
- vt->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
- vt->rangelow = TUNER_FREQ_MIN_FM * 2 / 125;
- vt->rangehigh = TUNER_FREQ_MAX_FM * 2 / 125;
- vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
- vt->audmode = V4L2_TUNER_MODE_STEREO;
- vt->signal = 0;
- vt->afc = 0;
- strlcpy(vt->name, "Radio", sizeof(vt->name));
-
- mutex_lock(&p->lock);
- ret = send_get_req(p, TUNER_STATUS, TLG_MODE_FM_RADIO,
- &fm_stat, &status, sizeof(fm_stat));
-
- while (fm_stat.sig_lock_busy && count-- && !ret) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
-
- ret = send_get_req(p, TUNER_STATUS, TLG_MODE_FM_RADIO,
- &fm_stat, &status, sizeof(fm_stat));
- }
- mutex_unlock(&p->lock);
-
- if (ret || status) {
- vt->signal = 0;
- } else if ((fm_stat.sig_present || fm_stat.sig_locked)
- && fm_stat.sig_strength == 0) {
- vt->signal = 0xffff;
- } else
- vt->signal = (fm_stat.sig_strength * 255 / 10) << 8;
-
- return 0;
-}
-
-static int fm_get_freq(struct file *file, void *priv,
- struct v4l2_frequency *argp)
-{
- struct poseidon *p = video_drvdata(file);
-
- if (argp->tuner)
- return -EINVAL;
- argp->frequency = p->radio_data.fm_freq;
- return 0;
-}
-
-static int set_frequency(struct poseidon *p, __u32 frequency)
-{
- __u32 freq ;
- int ret, status;
-
- mutex_lock(&p->lock);
-
- ret = send_set_req(p, TUNER_AUD_ANA_STD,
- p->radio_data.pre_emphasis, &status);
-
- freq = (frequency * 125) / 2; /* Hz */
- freq = clamp(freq, TUNER_FREQ_MIN_FM, TUNER_FREQ_MAX_FM);
-
- ret = send_set_req(p, TUNE_FREQ_SELECT, freq, &status);
- if (ret < 0)
- goto error ;
- ret = send_set_req(p, TAKE_REQUEST, 0, &status);
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/4);
- if (!p->radio_data.is_radio_streaming) {
- ret = send_set_req(p, TAKE_REQUEST, 0, &status);
- ret = send_set_req(p, PLAY_SERVICE,
- TLG_TUNE_PLAY_SVC_START, &status);
- p->radio_data.is_radio_streaming = 1;
- }
- p->radio_data.fm_freq = freq * 2 / 125;
-error:
- mutex_unlock(&p->lock);
- return ret;
-}
-
-static int fm_set_freq(struct file *file, void *priv,
- const struct v4l2_frequency *argp)
-{
- struct poseidon *p = video_drvdata(file);
-
- if (argp->tuner)
- return -EINVAL;
- p->file_for_stream = file;
-#ifdef CONFIG_PM
- p->pm_suspend = pm_fm_suspend;
- p->pm_resume = pm_fm_resume;
-#endif
- return set_frequency(p, argp->frequency);
-}
-
-static int tlg_fm_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct poseidon *p = container_of(ctrl->handler, struct poseidon,
- radio_data.ctrl_handler);
- int pre_emphasis;
- u32 status;
-
- switch (ctrl->id) {
- case V4L2_CID_TUNE_PREEMPHASIS:
- pre_emphasis = preemphasis[ctrl->val];
- send_set_req(p, TUNER_AUD_ANA_STD, pre_emphasis, &status);
- p->radio_data.pre_emphasis = pre_emphasis;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
-{
- return vt->index > 0 ? -EINVAL : 0;
-}
-
-static const struct v4l2_ctrl_ops tlg_fm_ctrl_ops = {
- .s_ctrl = tlg_fm_s_ctrl,
-};
-
-static const struct v4l2_ioctl_ops poseidon_fm_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_tuner = tlg_fm_vidioc_g_tuner,
- .vidioc_g_frequency = fm_get_freq,
- .vidioc_s_frequency = fm_set_freq,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static struct video_device poseidon_fm_template = {
- .name = "Telegent-Radio",
- .fops = &poseidon_fm_fops,
- .minor = -1,
- .release = video_device_release_empty,
- .ioctl_ops = &poseidon_fm_ioctl_ops,
-};
-
-int poseidon_fm_init(struct poseidon *p)
-{
- struct video_device *vfd = &p->radio_data.fm_dev;
- struct v4l2_ctrl_handler *hdl = &p->radio_data.ctrl_handler;
-
- *vfd = poseidon_fm_template;
-
- set_frequency(p, TUNER_FREQ_MIN_FM);
- v4l2_ctrl_handler_init(hdl, 1);
- v4l2_ctrl_new_std_menu(hdl, &tlg_fm_ctrl_ops, V4L2_CID_TUNE_PREEMPHASIS,
- V4L2_PREEMPHASIS_75_uS, 0, V4L2_PREEMPHASIS_50_uS);
- if (hdl->error) {
- v4l2_ctrl_handler_free(hdl);
- return hdl->error;
- }
- vfd->v4l2_dev = &p->v4l2_dev;
- vfd->ctrl_handler = hdl;
- video_set_drvdata(vfd, p);
- return video_register_device(vfd, VFL_TYPE_RADIO, -1);
-}
-
-int poseidon_fm_exit(struct poseidon *p)
-{
- video_unregister_device(&p->radio_data.fm_dev);
- v4l2_ctrl_handler_free(&p->radio_data.ctrl_handler);
- return 0;
-}
diff --git a/drivers/staging/media/tlg2300/pd-video.c b/drivers/staging/media/tlg2300/pd-video.c
deleted file mode 100644
index 8cd7f02fcf9f..000000000000
--- a/drivers/staging/media/tlg2300/pd-video.c
+++ /dev/null
@@ -1,1570 +0,0 @@
-#include <linux/fs.h>
-#include <linux/vmalloc.h>
-#include <linux/videodev2.h>
-#include <linux/usb.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-ctrls.h>
-
-#include "pd-common.h"
-#include "vendorcmds.h"
-
-#ifdef CONFIG_PM
-static int pm_video_suspend(struct poseidon *pd);
-static int pm_video_resume(struct poseidon *pd);
-#endif
-static void iso_bubble_handler(struct work_struct *w);
-
-static int usb_transfer_mode;
-module_param(usb_transfer_mode, int, 0644);
-MODULE_PARM_DESC(usb_transfer_mode, "0 = Bulk, 1 = Isochronous");
-
-static const struct poseidon_format poseidon_formats[] = {
- { "YUV 422", V4L2_PIX_FMT_YUYV, 16, 0},
- { "RGB565", V4L2_PIX_FMT_RGB565, 16, 0},
-};
-
-static const struct poseidon_tvnorm poseidon_tvnorms[] = {
- { V4L2_STD_PAL_D, "PAL-D", TLG_TUNE_VSTD_PAL_D },
- { V4L2_STD_PAL_B, "PAL-B", TLG_TUNE_VSTD_PAL_B },
- { V4L2_STD_PAL_G, "PAL-G", TLG_TUNE_VSTD_PAL_G },
- { V4L2_STD_PAL_H, "PAL-H", TLG_TUNE_VSTD_PAL_H },
- { V4L2_STD_PAL_I, "PAL-I", TLG_TUNE_VSTD_PAL_I },
- { V4L2_STD_PAL_M, "PAL-M", TLG_TUNE_VSTD_PAL_M },
- { V4L2_STD_PAL_N, "PAL-N", TLG_TUNE_VSTD_PAL_N_COMBO },
- { V4L2_STD_PAL_Nc, "PAL-Nc", TLG_TUNE_VSTD_PAL_N_COMBO },
- { V4L2_STD_NTSC_M, "NTSC-M", TLG_TUNE_VSTD_NTSC_M },
- { V4L2_STD_NTSC_M_JP, "NTSC-JP", TLG_TUNE_VSTD_NTSC_M_J },
- { V4L2_STD_SECAM_B, "SECAM-B", TLG_TUNE_VSTD_SECAM_B },
- { V4L2_STD_SECAM_D, "SECAM-D", TLG_TUNE_VSTD_SECAM_D },
- { V4L2_STD_SECAM_G, "SECAM-G", TLG_TUNE_VSTD_SECAM_G },
- { V4L2_STD_SECAM_H, "SECAM-H", TLG_TUNE_VSTD_SECAM_H },
- { V4L2_STD_SECAM_K, "SECAM-K", TLG_TUNE_VSTD_SECAM_K },
- { V4L2_STD_SECAM_K1, "SECAM-K1", TLG_TUNE_VSTD_SECAM_K1 },
- { V4L2_STD_SECAM_L, "SECAM-L", TLG_TUNE_VSTD_SECAM_L },
- { V4L2_STD_SECAM_LC, "SECAM-LC", TLG_TUNE_VSTD_SECAM_L1 },
-};
-static const unsigned int POSEIDON_TVNORMS = ARRAY_SIZE(poseidon_tvnorms);
-
-struct pd_audio_mode {
- u32 tlg_audio_mode;
- u32 v4l2_audio_sub;
- u32 v4l2_audio_mode;
-};
-
-static const struct pd_audio_mode pd_audio_modes[] = {
- { TLG_TUNE_TVAUDIO_MODE_MONO, V4L2_TUNER_SUB_MONO,
- V4L2_TUNER_MODE_MONO },
- { TLG_TUNE_TVAUDIO_MODE_STEREO, V4L2_TUNER_SUB_STEREO,
- V4L2_TUNER_MODE_STEREO },
- { TLG_TUNE_TVAUDIO_MODE_LANG_A, V4L2_TUNER_SUB_LANG1,
- V4L2_TUNER_MODE_LANG1 },
- { TLG_TUNE_TVAUDIO_MODE_LANG_B, V4L2_TUNER_SUB_LANG2,
- V4L2_TUNER_MODE_LANG2 },
- { TLG_TUNE_TVAUDIO_MODE_LANG_C, V4L2_TUNER_SUB_LANG1,
- V4L2_TUNER_MODE_LANG1_LANG2 }
-};
-static const unsigned int POSEIDON_AUDIOMODS = ARRAY_SIZE(pd_audio_modes);
-
-struct pd_input {
- char *name;
- uint32_t tlg_src;
-};
-
-static const struct pd_input pd_inputs[] = {
- { "TV Antenna", TLG_SIG_SRC_ANTENNA },
- { "TV Cable", TLG_SIG_SRC_CABLE },
- { "TV SVideo", TLG_SIG_SRC_SVIDEO },
- { "TV Composite", TLG_SIG_SRC_COMPOSITE }
-};
-static const unsigned int POSEIDON_INPUTS = ARRAY_SIZE(pd_inputs);
-
-struct video_std_to_audio_std {
- v4l2_std_id video_std;
- int audio_std;
-};
-
-static const struct video_std_to_audio_std video_to_audio_map[] = {
- /* country : { 27, 32, 33, 34, 36, 44, 45, 46, 47, 48, 64,
- 65, 86, 351, 352, 353, 354, 358, 372, 852, 972 } */
- { (V4L2_STD_PAL_I | V4L2_STD_PAL_B | V4L2_STD_PAL_D |
- V4L2_STD_SECAM_L | V4L2_STD_SECAM_D), TLG_TUNE_ASTD_NICAM },
-
- /* country : { 1, 52, 54, 55, 886 } */
- {V4L2_STD_NTSC_M | V4L2_STD_PAL_N | V4L2_STD_PAL_M, TLG_TUNE_ASTD_BTSC},
-
- /* country : { 81 } */
- { V4L2_STD_NTSC_M_JP, TLG_TUNE_ASTD_EIAJ },
-
- /* other country : TLG_TUNE_ASTD_A2 */
-};
-static const unsigned int map_size = ARRAY_SIZE(video_to_audio_map);
-
-static int get_audio_std(v4l2_std_id v4l2_std)
-{
- int i = 0;
-
- for (; i < map_size; i++) {
- if (v4l2_std & video_to_audio_map[i].video_std)
- return video_to_audio_map[i].audio_std;
- }
- return TLG_TUNE_ASTD_A2;
-}
-
-static int vidioc_querycap(struct file *file, void *fh,
- struct v4l2_capability *cap)
-{
- struct video_device *vdev = video_devdata(file);
- struct poseidon *p = video_get_drvdata(vdev);
-
- strcpy(cap->driver, "tele-video");
- strcpy(cap->card, "Telegent Poseidon");
- usb_make_path(p->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_TUNER | V4L2_CAP_AUDIO |
- V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
- if (vdev->vfl_type == VFL_TYPE_VBI)
- cap->device_caps |= V4L2_CAP_VBI_CAPTURE;
- else
- cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS |
- V4L2_CAP_RADIO | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_VIDEO_CAPTURE;
- return 0;
-}
-
-/*====================================================================*/
-static void init_copy(struct video_data *video, bool index)
-{
- struct front_face *front = video->front;
-
- video->field_count = index;
- video->lines_copied = 0;
- video->prev_left = 0 ;
- video->dst = (char *)videobuf_to_vmalloc(front->curr_frame)
- + index * video->lines_size;
- video->vbi->copied = 0; /* set it here */
-}
-
-static bool get_frame(struct front_face *front, int *need_init)
-{
- struct videobuf_buffer *vb = front->curr_frame;
-
- if (vb)
- return true;
-
- spin_lock(&front->queue_lock);
- if (!list_empty(&front->active)) {
- vb = list_entry(front->active.next,
- struct videobuf_buffer, queue);
- if (need_init)
- *need_init = 1;
- front->curr_frame = vb;
- list_del_init(&vb->queue);
- }
- spin_unlock(&front->queue_lock);
-
- return !!vb;
-}
-
-/* check if the video's buffer is ready */
-static bool get_video_frame(struct front_face *front, struct video_data *video)
-{
- int need_init = 0;
- bool ret = true;
-
- ret = get_frame(front, &need_init);
- if (ret && need_init)
- init_copy(video, 0);
- return ret;
-}
-
-static void submit_frame(struct front_face *front)
-{
- struct videobuf_buffer *vb = front->curr_frame;
-
- if (vb == NULL)
- return;
-
- front->curr_frame = NULL;
- vb->state = VIDEOBUF_DONE;
- vb->field_count++;
- v4l2_get_timestamp(&vb->ts);
-
- wake_up(&vb->done);
-}
-
-/*
- * A frame is composed of two fields. If we receive all the two fields,
- * call the submit_frame() to submit the whole frame to applications.
- */
-static void end_field(struct video_data *video)
-{
- if (1 == video->field_count)
- submit_frame(video->front);
- else
- init_copy(video, 1);
-}
-
-static void copy_video_data(struct video_data *video, char *src,
- unsigned int count)
-{
-#define copy_data(len) \
- do { \
- if (++video->lines_copied > video->lines_per_field) \
- goto overflow; \
- memcpy(video->dst, src, len);\
- video->dst += len + video->lines_size; \
- src += len; \
- count -= len; \
- } while (0)
-
- while (count && count >= video->lines_size) {
- if (video->prev_left) {
- copy_data(video->prev_left);
- video->prev_left = 0;
- continue;
- }
- copy_data(video->lines_size);
- }
- if (count && count < video->lines_size) {
- memcpy(video->dst, src, count);
-
- video->prev_left = video->lines_size - count;
- video->dst += count;
- }
- return;
-
-overflow:
- end_field(video);
-}
-
-static void check_trailer(struct video_data *video, char *src, int count)
-{
- struct vbi_data *vbi = video->vbi;
- int offset; /* trailer's offset */
- char *buf;
-
- offset = (video->context.pix.sizeimage / 2 + vbi->vbi_size / 2)
- - (vbi->copied + video->lines_size * video->lines_copied);
- if (video->prev_left)
- offset -= (video->lines_size - video->prev_left);
-
- if (offset > count || offset <= 0)
- goto short_package;
-
- buf = src + offset;
-
- /* trailer : (VFHS) + U32 + U32 + field_num */
- if (!strncmp(buf, "VFHS", 4)) {
- int field_num = *((u32 *)(buf + 12));
-
- if ((field_num & 1) ^ video->field_count) {
- init_copy(video, video->field_count);
- return;
- }
- copy_video_data(video, src, offset);
- }
-short_package:
- end_field(video);
-}
-
-/* ========== Check this more carefully! =========== */
-static inline void copy_vbi_data(struct vbi_data *vbi,
- char *src, unsigned int count)
-{
- struct front_face *front = vbi->front;
-
- if (front && get_frame(front, NULL)) {
- char *buf = videobuf_to_vmalloc(front->curr_frame);
-
- if (vbi->video->field_count)
- buf += (vbi->vbi_size / 2);
- memcpy(buf + vbi->copied, src, count);
- }
- vbi->copied += count;
-}
-
-/*
- * Copy the normal data (VBI or VIDEO) without the trailer.
- * VBI is not interlaced, while VIDEO is interlaced.
- */
-static inline void copy_vbi_video_data(struct video_data *video,
- char *src, unsigned int count)
-{
- struct vbi_data *vbi = video->vbi;
- unsigned int vbi_delta = (vbi->vbi_size / 2) - vbi->copied;
-
- if (vbi_delta >= count) {
- copy_vbi_data(vbi, src, count);
- } else {
- if (vbi_delta) {
- copy_vbi_data(vbi, src, vbi_delta);
-
- /* we receive the two fields of the VBI*/
- if (vbi->front && video->field_count)
- submit_frame(vbi->front);
- }
- copy_video_data(video, src + vbi_delta, count - vbi_delta);
- }
-}
-
-static void urb_complete_bulk(struct urb *urb)
-{
- struct front_face *front = urb->context;
- struct video_data *video = &front->pd->video_data;
- char *src = (char *)urb->transfer_buffer;
- int count = urb->actual_length;
- int ret = 0;
-
- if (!video->is_streaming || urb->status) {
- if (urb->status == -EPROTO)
- goto resend_it;
- return;
- }
- if (!get_video_frame(front, video))
- goto resend_it;
-
- if (count == urb->transfer_buffer_length)
- copy_vbi_video_data(video, src, count);
- else
- check_trailer(video, src, count);
-
-resend_it:
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret)
- log(" submit failed: error %d", ret);
-}
-
-/************************* for ISO *********************/
-#define GET_SUCCESS (0)
-#define GET_TRAILER (1)
-#define GET_TOO_MUCH_BUBBLE (2)
-#define GET_NONE (3)
-static int get_chunk(int start, struct urb *urb,
- int *head, int *tail, int *bubble_err)
-{
- struct usb_iso_packet_descriptor *pkt = NULL;
- int ret = GET_SUCCESS;
-
- for (*head = *tail = -1; start < urb->number_of_packets; start++) {
- pkt = &urb->iso_frame_desc[start];
-
- /* handle the bubble of the Hub */
- if (-EOVERFLOW == pkt->status) {
- if (++*bubble_err > urb->number_of_packets / 3)
- return GET_TOO_MUCH_BUBBLE;
- continue;
- }
-
- /* This is the gap */
- if (pkt->status || pkt->actual_length <= 0
- || pkt->actual_length > ISO_PKT_SIZE) {
- if (*head != -1)
- break;
- continue;
- }
-
- /* a good isochronous packet */
- if (pkt->actual_length == ISO_PKT_SIZE) {
- if (*head == -1)
- *head = start;
- *tail = start;
- continue;
- }
-
- /* trailer is here */
- if (pkt->actual_length < ISO_PKT_SIZE) {
- if (*head == -1) {
- *head = start;
- *tail = start;
- return GET_TRAILER;
- }
- break;
- }
- }
-
- if (*head == -1 && *tail == -1)
- ret = GET_NONE;
- return ret;
-}
-
-/*
- * |__|------|___|-----|_______|
- * ^ ^
- * | |
- * gap gap
- */
-static void urb_complete_iso(struct urb *urb)
-{
- struct front_face *front = urb->context;
- struct video_data *video = &front->pd->video_data;
- int bubble_err = 0, head = 0, tail = 0;
- char *src = (char *)urb->transfer_buffer;
- int ret = 0;
-
- if (!video->is_streaming)
- return;
-
- do {
- if (!get_video_frame(front, video))
- goto out;
-
- switch (get_chunk(head, urb, &head, &tail, &bubble_err)) {
- case GET_SUCCESS:
- copy_vbi_video_data(video, src + (head * ISO_PKT_SIZE),
- (tail - head + 1) * ISO_PKT_SIZE);
- break;
- case GET_TRAILER:
- check_trailer(video, src + (head * ISO_PKT_SIZE),
- ISO_PKT_SIZE);
- break;
- case GET_NONE:
- goto out;
- case GET_TOO_MUCH_BUBBLE:
- log("\t We got too much bubble");
- schedule_work(&video->bubble_work);
- return;
- }
- } while (head = tail + 1, head < urb->number_of_packets);
-
-out:
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret)
- log("usb_submit_urb err : %d", ret);
-}
-/*============================= [ end ] =====================*/
-
-static int prepare_iso_urb(struct video_data *video)
-{
- struct usb_device *udev = video->pd->udev;
- int i;
-
- if (video->urb_array[0])
- return 0;
-
- for (i = 0; i < SBUF_NUM; i++) {
- struct urb *urb;
- void *mem;
- int j;
-
- urb = usb_alloc_urb(PK_PER_URB, GFP_KERNEL);
- if (urb == NULL)
- goto out;
-
- video->urb_array[i] = urb;
- mem = usb_alloc_coherent(udev,
- ISO_PKT_SIZE * PK_PER_URB,
- GFP_KERNEL,
- &urb->transfer_dma);
-
- urb->complete = urb_complete_iso; /* handler */
- urb->dev = udev;
- urb->context = video->front;
- urb->pipe = usb_rcvisocpipe(udev,
- video->endpoint_addr);
- urb->interval = 1;
- urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
- urb->number_of_packets = PK_PER_URB;
- urb->transfer_buffer = mem;
- urb->transfer_buffer_length = PK_PER_URB * ISO_PKT_SIZE;
-
- for (j = 0; j < PK_PER_URB; j++) {
- urb->iso_frame_desc[j].offset = ISO_PKT_SIZE * j;
- urb->iso_frame_desc[j].length = ISO_PKT_SIZE;
- }
- }
- return 0;
-out:
- for (; i > 0; i--)
- ;
- return -ENOMEM;
-}
-
-/* return the succeeded number of the allocation */
-int alloc_bulk_urbs_generic(struct urb **urb_array, int num,
- struct usb_device *udev, u8 ep_addr,
- int buf_size, gfp_t gfp_flags,
- usb_complete_t complete_fn, void *context)
-{
- int i = 0;
-
- for (; i < num; i++) {
- void *mem;
- struct urb *urb = usb_alloc_urb(0, gfp_flags);
- if (urb == NULL)
- return i;
-
- mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
- &urb->transfer_dma);
- if (mem == NULL) {
- usb_free_urb(urb);
- return i;
- }
-
- usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_addr),
- mem, buf_size, complete_fn, context);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- urb_array[i] = urb;
- }
- return i;
-}
-
-void free_all_urb_generic(struct urb **urb_array, int num)
-{
- int i;
- struct urb *urb;
-
- for (i = 0; i < num; i++) {
- urb = urb_array[i];
- if (urb) {
- usb_free_coherent(urb->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
- usb_free_urb(urb);
- urb_array[i] = NULL;
- }
- }
-}
-
-static int prepare_bulk_urb(struct video_data *video)
-{
- if (video->urb_array[0])
- return 0;
-
- alloc_bulk_urbs_generic(video->urb_array, SBUF_NUM,
- video->pd->udev, video->endpoint_addr,
- 0x2000, GFP_KERNEL,
- urb_complete_bulk, video->front);
- return 0;
-}
-
-/* free the URBs */
-static void free_all_urb(struct video_data *video)
-{
- free_all_urb_generic(video->urb_array, SBUF_NUM);
-}
-
-static void pd_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-{
- videobuf_vmalloc_free(vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
-}
-
-static void pd_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-{
- struct front_face *front = q->priv_data;
- vb->state = VIDEOBUF_QUEUED;
- list_add_tail(&vb->queue, &front->active);
-}
-
-static int pd_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
-{
- struct front_face *front = q->priv_data;
- int rc;
-
- switch (front->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- struct v4l2_pix_format *pix;
-
- pix = &front->pd->video_data.context.pix;
- vb->size = pix->sizeimage; /* real frame size */
- vb->width = pix->width;
- vb->height = pix->height;
- rc = videobuf_iolock(q, vb, NULL);
- if (rc < 0)
- return rc;
- }
- break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->size = front->pd->vbi_data.vbi_size;
- rc = videobuf_iolock(q, vb, NULL);
- if (rc < 0)
- return rc;
- }
- break;
- default:
- return -EINVAL;
- }
- vb->field = field;
- vb->state = VIDEOBUF_PREPARED;
- return 0;
-}
-
-static int fire_all_urb(struct video_data *video)
-{
- int i, ret;
-
- video->is_streaming = 1;
-
- for (i = 0; i < SBUF_NUM; i++) {
- ret = usb_submit_urb(video->urb_array[i], GFP_KERNEL);
- if (ret)
- log("(%d) failed: error %d", i, ret);
- }
- return ret;
-}
-
-static int start_video_stream(struct poseidon *pd)
-{
- struct video_data *video = &pd->video_data;
- s32 cmd_status;
-
- send_set_req(pd, TAKE_REQUEST, 0, &cmd_status);
- send_set_req(pd, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_START, &cmd_status);
-
- if (pd->cur_transfer_mode) {
- prepare_iso_urb(video);
- INIT_WORK(&video->bubble_work, iso_bubble_handler);
- } else {
- /* The bulk mode does not need a bubble handler */
- prepare_bulk_urb(video);
- }
- fire_all_urb(video);
- return 0;
-}
-
-static int pd_buf_setup(struct videobuf_queue *q, unsigned int *count,
- unsigned int *size)
-{
- struct front_face *front = q->priv_data;
- struct poseidon *pd = front->pd;
-
- switch (front->type) {
- default:
- return -EINVAL;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
- struct video_data *video = &pd->video_data;
- struct v4l2_pix_format *pix = &video->context.pix;
-
- *size = PAGE_ALIGN(pix->sizeimage);/* page aligned frame size */
- if (*count < 4)
- *count = 4;
- if (1) {
- /* same in different altersetting */
- video->endpoint_addr = 0x82;
- video->vbi = &pd->vbi_data;
- video->vbi->video = video;
- video->pd = pd;
- video->lines_per_field = pix->height / 2;
- video->lines_size = pix->width * 2;
- video->front = front;
- }
- return start_video_stream(pd);
- }
-
- case V4L2_BUF_TYPE_VBI_CAPTURE: {
- struct vbi_data *vbi = &pd->vbi_data;
-
- *size = PAGE_ALIGN(vbi->vbi_size);
- log("size : %d", *size);
- if (*count == 0)
- *count = 4;
- }
- break;
- }
- return 0;
-}
-
-static struct videobuf_queue_ops pd_video_qops = {
- .buf_setup = pd_buf_setup,
- .buf_prepare = pd_buf_prepare,
- .buf_queue = pd_buf_queue,
- .buf_release = pd_buf_release,
-};
-
-static int vidioc_enum_fmt(struct file *file, void *fh,
- struct v4l2_fmtdesc *f)
-{
- if (ARRAY_SIZE(poseidon_formats) <= f->index)
- return -EINVAL;
- f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- f->flags = 0;
- f->pixelformat = poseidon_formats[f->index].fourcc;
- strcpy(f->description, poseidon_formats[f->index].name);
- return 0;
-}
-
-static int vidioc_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
-
- f->fmt.pix = pd->video_data.context.pix;
- return 0;
-}
-
-/*
- * VLC calls VIDIOC_S_STD before VIDIOC_S_FMT, while
- * Mplayer calls them in the reverse order.
- */
-static int pd_vidioc_s_fmt(struct poseidon *pd, struct v4l2_pix_format *pix)
-{
- struct video_data *video = &pd->video_data;
- struct running_context *context = &video->context;
- struct v4l2_pix_format *pix_def = &context->pix;
- s32 ret = 0, cmd_status = 0, vid_resol;
-
- /* set the pixel format to firmware */
- if (pix->pixelformat == V4L2_PIX_FMT_RGB565) {
- vid_resol = TLG_TUNER_VID_FORMAT_RGB_565;
- } else {
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
- vid_resol = TLG_TUNER_VID_FORMAT_YUV;
- }
- ret = send_set_req(pd, VIDEO_STREAM_FMT_SEL,
- vid_resol, &cmd_status);
-
- /* set the resolution to firmware */
- vid_resol = TLG_TUNE_VID_RES_720;
- switch (pix->width) {
- case 704:
- vid_resol = TLG_TUNE_VID_RES_704;
- break;
- default:
- pix->width = 720;
- case 720:
- break;
- }
- ret |= send_set_req(pd, VIDEO_ROSOLU_SEL,
- vid_resol, &cmd_status);
- if (ret || cmd_status)
- return -EBUSY;
-
- pix_def->pixelformat = pix->pixelformat; /* save it */
- pix->height = (context->tvnormid & V4L2_STD_525_60) ? 480 : 576;
-
- /* Compare with the default setting */
- if ((pix_def->width != pix->width)
- || (pix_def->height != pix->height)) {
- pix_def->width = pix->width;
- pix_def->height = pix->height;
- pix_def->bytesperline = pix->width * 2;
- pix_def->sizeimage = pix->width * pix->height * 2;
- }
- *pix = *pix_def;
-
- return 0;
-}
-
-static int vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
-
- /* stop VBI here */
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != f->type)
- return -EINVAL;
-
- mutex_lock(&pd->lock);
- if (pd->file_for_stream == NULL)
- pd->file_for_stream = file;
- else if (file != pd->file_for_stream) {
- mutex_unlock(&pd->lock);
- return -EINVAL;
- }
-
- pd_vidioc_s_fmt(pd, &f->fmt.pix);
- mutex_unlock(&pd->lock);
- return 0;
-}
-
-static int vidioc_g_fmt_vbi(struct file *file, void *fh,
- struct v4l2_format *v4l2_f)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
- struct v4l2_vbi_format *vbi_fmt = &v4l2_f->fmt.vbi;
-
- vbi_fmt->samples_per_line = 720 * 2;
- vbi_fmt->sampling_rate = 6750000 * 4;
- vbi_fmt->sample_format = V4L2_PIX_FMT_GREY;
- vbi_fmt->offset = 64 * 4; /*FIXME: why offset */
- if (pd->video_data.context.tvnormid & V4L2_STD_525_60) {
- vbi_fmt->start[0] = 10;
- vbi_fmt->start[1] = 264;
- vbi_fmt->count[0] = V4L_NTSC_VBI_LINES;
- vbi_fmt->count[1] = V4L_NTSC_VBI_LINES;
- } else {
- vbi_fmt->start[0] = 6;
- vbi_fmt->start[1] = 314;
- vbi_fmt->count[0] = V4L_PAL_VBI_LINES;
- vbi_fmt->count[1] = V4L_PAL_VBI_LINES;
- }
- vbi_fmt->flags = V4L2_VBI_UNSYNC;
- return 0;
-}
-
-static int set_std(struct poseidon *pd, v4l2_std_id norm)
-{
- struct video_data *video = &pd->video_data;
- struct vbi_data *vbi = &pd->vbi_data;
- struct running_context *context;
- struct v4l2_pix_format *pix;
- s32 i, ret = 0, cmd_status, param;
- int height;
-
- for (i = 0; i < POSEIDON_TVNORMS; i++) {
- if (norm & poseidon_tvnorms[i].v4l2_id) {
- param = poseidon_tvnorms[i].tlg_tvnorm;
- log("name : %s", poseidon_tvnorms[i].name);
- goto found;
- }
- }
- return -EINVAL;
-found:
- mutex_lock(&pd->lock);
- ret = send_set_req(pd, VIDEO_STD_SEL, param, &cmd_status);
- if (ret || cmd_status)
- goto out;
-
- /* Set vbi size and check the height of the frame */
- context = &video->context;
- context->tvnormid = poseidon_tvnorms[i].v4l2_id;
- if (context->tvnormid & V4L2_STD_525_60) {
- vbi->vbi_size = V4L_NTSC_VBI_FRAMESIZE;
- height = 480;
- } else {
- vbi->vbi_size = V4L_PAL_VBI_FRAMESIZE;
- height = 576;
- }
-
- pix = &context->pix;
- if (pix->height != height) {
- pix->height = height;
- pix->sizeimage = pix->width * pix->height * 2;
- }
-
-out:
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id norm)
-{
- struct front_face *front = fh;
-
- return set_std(front->pd, norm);
-}
-
-static int vidioc_g_std(struct file *file, void *fh, v4l2_std_id *norm)
-{
- struct front_face *front = fh;
-
- *norm = front->pd->video_data.context.tvnormid;
- return 0;
-}
-
-static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *in)
-{
- if (in->index >= POSEIDON_INPUTS)
- return -EINVAL;
- strcpy(in->name, pd_inputs[in->index].name);
- in->type = V4L2_INPUT_TYPE_TUNER;
-
- /*
- * the audio input index mixed with this video input,
- * Poseidon only have one audio/video, set to "0"
- */
- in->audioset = 1;
- in->tuner = 0;
- in->std = V4L2_STD_ALL;
- in->status = 0;
- return 0;
-}
-
-static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
- struct running_context *context = &pd->video_data.context;
-
- *i = context->sig_index;
- return 0;
-}
-
-/* We can support several inputs */
-static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
- s32 ret, cmd_status;
-
- if (i >= POSEIDON_INPUTS)
- return -EINVAL;
- ret = send_set_req(pd, SGNL_SRC_SEL,
- pd_inputs[i].tlg_src, &cmd_status);
- if (ret)
- return ret;
-
- pd->video_data.context.sig_index = i;
- return 0;
-}
-
-static int tlg_s_ctrl(struct v4l2_ctrl *c)
-{
- struct poseidon *pd = container_of(c->handler, struct poseidon,
- video_data.ctrl_handler);
- struct tuner_custom_parameter_s param = {0};
- s32 ret = 0, cmd_status, params;
-
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- param.param_id = CUST_PARM_ID_BRIGHTNESS_CTRL;
- break;
- case V4L2_CID_CONTRAST:
- param.param_id = CUST_PARM_ID_CONTRAST_CTRL;
- break;
- case V4L2_CID_HUE:
- param.param_id = CUST_PARM_ID_HUE_CTRL;
- break;
- case V4L2_CID_SATURATION:
- param.param_id = CUST_PARM_ID_SATURATION_CTRL;
- break;
- }
- param.param_value = c->val;
- params = *(s32 *)&param; /* temp code */
-
- mutex_lock(&pd->lock);
- ret = send_set_req(pd, TUNER_CUSTOM_PARAMETER, params, &cmd_status);
- ret = send_set_req(pd, TAKE_REQUEST, 0, &cmd_status);
- mutex_unlock(&pd->lock);
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/4);
- return ret;
-}
-
-/* Audio ioctls */
-static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
-{
- if (0 != a->index)
- return -EINVAL;
- a->capability = V4L2_AUDCAP_STEREO;
- strcpy(a->name, "USB audio in");
- /*Poseidon have no AVL function.*/
- a->mode = 0;
- return 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
-{
- a->index = 0;
- a->capability = V4L2_AUDCAP_STEREO;
- strcpy(a->name, "USB audio in");
- a->mode = 0;
- return 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *a)
-{
- return (0 == a->index) ? 0 : -EINVAL;
-}
-
-/* Tuner ioctls */
-static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *tuner)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
- struct tuner_atv_sig_stat_s atv_stat;
- s32 count = 5, ret, cmd_status;
- int index;
-
- if (0 != tuner->index)
- return -EINVAL;
-
- mutex_lock(&pd->lock);
- ret = send_get_req(pd, TUNER_STATUS, TLG_MODE_ANALOG_TV,
- &atv_stat, &cmd_status, sizeof(atv_stat));
-
- while (atv_stat.sig_lock_busy && count-- && !ret) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
-
- ret = send_get_req(pd, TUNER_STATUS, TLG_MODE_ANALOG_TV,
- &atv_stat, &cmd_status, sizeof(atv_stat));
- }
- mutex_unlock(&pd->lock);
-
- if (debug_mode)
- log("P:%d,S:%d", atv_stat.sig_present, atv_stat.sig_strength);
-
- if (ret || cmd_status)
- tuner->signal = 0;
- else if (atv_stat.sig_present && !atv_stat.sig_strength)
- tuner->signal = 0xFFFF;
- else
- tuner->signal = (atv_stat.sig_strength * 255 / 10) << 8;
-
- strcpy(tuner->name, "Telegent Systems");
- tuner->type = V4L2_TUNER_ANALOG_TV;
- tuner->rangelow = TUNER_FREQ_MIN / 62500;
- tuner->rangehigh = TUNER_FREQ_MAX / 62500;
- tuner->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
- V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
- index = pd->video_data.context.audio_idx;
- tuner->rxsubchans = pd_audio_modes[index].v4l2_audio_sub;
- tuner->audmode = pd_audio_modes[index].v4l2_audio_mode;
- tuner->afc = 0;
- return 0;
-}
-
-static int pd_vidioc_s_tuner(struct poseidon *pd, int index)
-{
- s32 ret = 0, cmd_status, param, audiomode;
-
- mutex_lock(&pd->lock);
- param = pd_audio_modes[index].tlg_audio_mode;
- ret = send_set_req(pd, TUNER_AUD_MODE, param, &cmd_status);
- audiomode = get_audio_std(pd->video_data.context.tvnormid);
- ret |= send_set_req(pd, TUNER_AUD_ANA_STD, audiomode,
- &cmd_status);
- if (!ret)
- pd->video_data.context.audio_idx = index;
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *a)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
- int index;
-
- if (0 != a->index)
- return -EINVAL;
- for (index = 0; index < POSEIDON_AUDIOMODS; index++)
- if (a->audmode == pd_audio_modes[index].v4l2_audio_mode)
- return pd_vidioc_s_tuner(pd, index);
- return -EINVAL;
-}
-
-static int vidioc_g_frequency(struct file *file, void *fh,
- struct v4l2_frequency *freq)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
- struct running_context *context = &pd->video_data.context;
-
- if (0 != freq->tuner)
- return -EINVAL;
- freq->frequency = context->freq;
- freq->type = V4L2_TUNER_ANALOG_TV;
- return 0;
-}
-
-static int set_frequency(struct poseidon *pd, u32 *frequency)
-{
- s32 ret = 0, param, cmd_status;
- struct running_context *context = &pd->video_data.context;
-
- *frequency = clamp(*frequency,
- TUNER_FREQ_MIN / 62500, TUNER_FREQ_MAX / 62500);
- param = (*frequency) * 62500 / 1000;
-
- mutex_lock(&pd->lock);
- ret = send_set_req(pd, TUNE_FREQ_SELECT, param, &cmd_status);
- ret = send_set_req(pd, TAKE_REQUEST, 0, &cmd_status);
-
- msleep(250); /* wait for a while until the hardware is ready. */
- context->freq = *frequency;
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-static int vidioc_s_frequency(struct file *file, void *fh,
- const struct v4l2_frequency *freq)
-{
- struct front_face *front = fh;
- struct poseidon *pd = front->pd;
- u32 frequency = freq->frequency;
-
- if (freq->tuner)
- return -EINVAL;
-#ifdef CONFIG_PM
- pd->pm_suspend = pm_video_suspend;
- pd->pm_resume = pm_video_resume;
-#endif
- return set_frequency(pd, &frequency);
-}
-
-static int vidioc_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *b)
-{
- struct front_face *front = file->private_data;
- return videobuf_reqbufs(&front->q, b);
-}
-
-static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
-{
- struct front_face *front = file->private_data;
- return videobuf_querybuf(&front->q, b);
-}
-
-static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
-{
- struct front_face *front = file->private_data;
- return videobuf_qbuf(&front->q, b);
-}
-
-static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
-{
- struct front_face *front = file->private_data;
- return videobuf_dqbuf(&front->q, b, file->f_flags & O_NONBLOCK);
-}
-
-/* Just stop the URBs, do not free the URBs */
-static int usb_transfer_stop(struct video_data *video)
-{
- if (video->is_streaming) {
- int i;
- s32 cmd_status;
- struct poseidon *pd = video->pd;
-
- video->is_streaming = 0;
- for (i = 0; i < SBUF_NUM; ++i) {
- if (video->urb_array[i])
- usb_kill_urb(video->urb_array[i]);
- }
-
- send_set_req(pd, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_STOP,
- &cmd_status);
- }
- return 0;
-}
-
-int stop_all_video_stream(struct poseidon *pd)
-{
- struct video_data *video = &pd->video_data;
- struct vbi_data *vbi = &pd->vbi_data;
-
- mutex_lock(&pd->lock);
- if (video->is_streaming) {
- struct front_face *front = video->front;
-
- /* stop the URBs */
- usb_transfer_stop(video);
- free_all_urb(video);
-
- /* stop the host side of VIDEO */
- videobuf_stop(&front->q);
- videobuf_mmap_free(&front->q);
-
- /* stop the host side of VBI */
- front = vbi->front;
- if (front) {
- videobuf_stop(&front->q);
- videobuf_mmap_free(&front->q);
- }
- }
- mutex_unlock(&pd->lock);
- return 0;
-}
-
-/*
- * The bubbles can seriously damage the video's quality,
- * though it occurs in very rare situation.
- */
-static void iso_bubble_handler(struct work_struct *w)
-{
- struct video_data *video;
- struct poseidon *pd;
-
- video = container_of(w, struct video_data, bubble_work);
- pd = video->pd;
-
- mutex_lock(&pd->lock);
- usb_transfer_stop(video);
- msleep(500);
- start_video_stream(pd);
- mutex_unlock(&pd->lock);
-}
-
-
-static int vidioc_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct front_face *front = fh;
-
- if (unlikely(type != front->type))
- return -EINVAL;
- return videobuf_streamon(&front->q);
-}
-
-static int vidioc_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct front_face *front = file->private_data;
-
- if (unlikely(type != front->type))
- return -EINVAL;
- return videobuf_streamoff(&front->q);
-}
-
-/* Set the firmware's default values : need altersetting */
-static int pd_video_checkmode(struct poseidon *pd)
-{
- s32 ret = 0, cmd_status, audiomode;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/2);
-
- /* choose the altersetting */
- ret = usb_set_interface(pd->udev, 0,
- (pd->cur_transfer_mode ?
- ISO_3K_BULK_ALTERNATE_IFACE :
- BULK_ALTERNATE_IFACE));
- if (ret < 0)
- goto error;
-
- /* set default parameters for PAL-D , with the VBI enabled*/
- ret = set_tuner_mode(pd, TLG_MODE_ANALOG_TV);
- ret |= send_set_req(pd, SGNL_SRC_SEL,
- TLG_SIG_SRC_ANTENNA, &cmd_status);
- ret |= send_set_req(pd, VIDEO_STD_SEL,
- TLG_TUNE_VSTD_PAL_D, &cmd_status);
- ret |= send_set_req(pd, VIDEO_STREAM_FMT_SEL,
- TLG_TUNER_VID_FORMAT_YUV, &cmd_status);
- ret |= send_set_req(pd, VIDEO_ROSOLU_SEL,
- TLG_TUNE_VID_RES_720, &cmd_status);
- ret |= send_set_req(pd, TUNE_FREQ_SELECT, TUNER_FREQ_MIN, &cmd_status);
- ret |= send_set_req(pd, VBI_DATA_SEL, 1, &cmd_status);/* enable vbi */
-
- /* set the audio */
- audiomode = get_audio_std(pd->video_data.context.tvnormid);
- ret |= send_set_req(pd, TUNER_AUD_ANA_STD, audiomode, &cmd_status);
- ret |= send_set_req(pd, TUNER_AUD_MODE,
- TLG_TUNE_TVAUDIO_MODE_STEREO, &cmd_status);
- ret |= send_set_req(pd, AUDIO_SAMPLE_RATE_SEL,
- ATV_AUDIO_RATE_48K, &cmd_status);
-error:
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int pm_video_suspend(struct poseidon *pd)
-{
- /* stop audio */
- pm_alsa_suspend(pd);
-
- /* stop and free all the URBs */
- usb_transfer_stop(&pd->video_data);
- free_all_urb(&pd->video_data);
-
- /* reset the interface */
- usb_set_interface(pd->udev, 0, 0);
- msleep(300);
- return 0;
-}
-
-static int restore_v4l2_context(struct poseidon *pd,
- struct running_context *context)
-{
- struct front_face *front = pd->video_data.front;
-
- pd_video_checkmode(pd);
-
- set_std(pd, context->tvnormid);
- vidioc_s_input(NULL, front, context->sig_index);
- pd_vidioc_s_tuner(pd, context->audio_idx);
- pd_vidioc_s_fmt(pd, &context->pix);
- set_frequency(pd, &context->freq);
- return 0;
-}
-
-static int pm_video_resume(struct poseidon *pd)
-{
- struct video_data *video = &pd->video_data;
-
- /* resume the video */
- /* [1] restore the origin V4L2 parameters */
- restore_v4l2_context(pd, &video->context);
-
- /* [2] initiate video copy variables */
- if (video->front->curr_frame)
- init_copy(video, 0);
-
- /* [3] fire urbs */
- start_video_stream(pd);
-
- /* resume the audio */
- pm_alsa_resume(pd);
- return 0;
-}
-#endif
-
-void set_debug_mode(struct video_device *vfd, int debug_mode)
-{
- vfd->debug = 0;
- if (debug_mode & 0x1)
- vfd->debug = V4L2_DEBUG_IOCTL;
- if (debug_mode & 0x2)
- vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
-}
-
-static void init_video_context(struct running_context *context)
-{
- context->sig_index = 0;
- context->audio_idx = 1; /* stereo */
- context->tvnormid = V4L2_STD_PAL_D;
- context->pix = (struct v4l2_pix_format) {
- .width = 720,
- .height = 576,
- .pixelformat = V4L2_PIX_FMT_YUYV,
- .field = V4L2_FIELD_INTERLACED,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 576 * 2,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- };
-}
-
-static int pd_video_open(struct file *file)
-{
- struct video_device *vfd = video_devdata(file);
- struct poseidon *pd = video_get_drvdata(vfd);
- struct front_face *front = NULL;
- int ret = -ENOMEM;
-
- mutex_lock(&pd->lock);
- usb_autopm_get_interface(pd->interface);
-
- if (pd->state && !(pd->state & POSEIDON_STATE_ANALOG)) {
- ret = -EBUSY;
- goto out;
- }
- front = kzalloc(sizeof(struct front_face), GFP_KERNEL);
- if (!front)
- goto out;
- if (vfd->vfl_type == VFL_TYPE_GRABBER) {
- pd->cur_transfer_mode = usb_transfer_mode;/* bulk or iso */
- init_video_context(&pd->video_data.context);
-
- ret = pd_video_checkmode(pd);
- if (ret < 0) {
- kfree(front);
- ret = -1;
- goto out;
- }
-
- front->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- pd->video_data.users++;
- set_debug_mode(vfd, debug_mode);
-
- videobuf_queue_vmalloc_init(&front->q, &pd_video_qops,
- NULL, &front->queue_lock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,/* video is interlacd */
- sizeof(struct videobuf_buffer),/*it's enough*/
- front, NULL);
- } else {
- front->type = V4L2_BUF_TYPE_VBI_CAPTURE;
- pd->vbi_data.front = front;
- pd->vbi_data.users++;
-
- videobuf_queue_vmalloc_init(&front->q, &pd_video_qops,
- NULL, &front->queue_lock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_NONE, /* vbi is NONE mode */
- sizeof(struct videobuf_buffer),
- front, NULL);
- }
-
- pd->state |= POSEIDON_STATE_ANALOG;
- front->pd = pd;
- front->curr_frame = NULL;
- INIT_LIST_HEAD(&front->active);
- spin_lock_init(&front->queue_lock);
-
- file->private_data = front;
- kref_get(&pd->kref);
-
- mutex_unlock(&pd->lock);
- return 0;
-out:
- usb_autopm_put_interface(pd->interface);
- mutex_unlock(&pd->lock);
- return ret;
-}
-
-static int pd_video_release(struct file *file)
-{
- struct front_face *front = file->private_data;
- struct poseidon *pd = front->pd;
- s32 cmd_status = 0;
-
- mutex_lock(&pd->lock);
-
- if (front->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- /* stop the device, and free the URBs */
- usb_transfer_stop(&pd->video_data);
- free_all_urb(&pd->video_data);
-
- /* stop the firmware */
- send_set_req(pd, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_STOP,
- &cmd_status);
-
- pd->file_for_stream = NULL;
- pd->video_data.users--;
- } else if (front->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- pd->vbi_data.front = NULL;
- pd->vbi_data.users--;
- }
- if (!pd->vbi_data.users && !pd->video_data.users)
- pd->state &= ~POSEIDON_STATE_ANALOG;
- videobuf_stop(&front->q);
- videobuf_mmap_free(&front->q);
-
- usb_autopm_put_interface(pd->interface);
- mutex_unlock(&pd->lock);
-
- kfree(front);
- file->private_data = NULL;
- kref_put(&pd->kref, poseidon_delete);
- return 0;
-}
-
-static int pd_video_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct front_face *front = file->private_data;
- return videobuf_mmap_mapper(&front->q, vma);
-}
-
-static unsigned int pd_video_poll(struct file *file, poll_table *table)
-{
- struct front_face *front = file->private_data;
- return videobuf_poll_stream(file, &front->q, table);
-}
-
-static ssize_t pd_video_read(struct file *file, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct front_face *front = file->private_data;
- return videobuf_read_stream(&front->q, buffer, count, ppos,
- 0, file->f_flags & O_NONBLOCK);
-}
-
-/* This struct works for both VIDEO and VBI */
-static const struct v4l2_file_operations pd_video_fops = {
- .owner = THIS_MODULE,
- .open = pd_video_open,
- .release = pd_video_release,
- .read = pd_video_read,
- .poll = pd_video_poll,
- .mmap = pd_video_mmap,
- .ioctl = video_ioctl2, /* maybe changed in future */
-};
-
-static const struct v4l2_ioctl_ops pd_video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
-
- /* Video format */
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
- .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi, /* VBI */
-
- /* Input */
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_enum_input = vidioc_enum_input,
-
- /* Audio ioctls */
- .vidioc_enumaudio = vidioc_enumaudio,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
-
- /* Tuner ioctls */
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_std = vidioc_g_std,
- .vidioc_s_std = vidioc_s_std,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
-
- /* Buffer handlers */
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
-
- /* Stream on/off */
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
-};
-
-static struct video_device pd_video_template = {
- .name = "Telegent-Video",
- .fops = &pd_video_fops,
- .minor = -1,
- .release = video_device_release_empty,
- .tvnorms = V4L2_STD_ALL,
- .ioctl_ops = &pd_video_ioctl_ops,
-};
-
-static const struct v4l2_ctrl_ops tlg_ctrl_ops = {
- .s_ctrl = tlg_s_ctrl,
-};
-
-void pd_video_exit(struct poseidon *pd)
-{
- struct video_data *video = &pd->video_data;
- struct vbi_data *vbi = &pd->vbi_data;
-
- video_unregister_device(&video->v_dev);
- video_unregister_device(&vbi->v_dev);
- v4l2_ctrl_handler_free(&video->ctrl_handler);
- log();
-}
-
-int pd_video_init(struct poseidon *pd)
-{
- struct video_data *video = &pd->video_data;
- struct vbi_data *vbi = &pd->vbi_data;
- struct v4l2_ctrl_handler *hdl = &video->ctrl_handler;
- u32 freq = TUNER_FREQ_MIN / 62500;
- int ret = -ENOMEM;
-
- v4l2_ctrl_handler_init(hdl, 4);
- v4l2_ctrl_new_std(hdl, &tlg_ctrl_ops, V4L2_CID_BRIGHTNESS,
- 0, 10000, 1, 100);
- v4l2_ctrl_new_std(hdl, &tlg_ctrl_ops, V4L2_CID_CONTRAST,
- 0, 10000, 1, 100);
- v4l2_ctrl_new_std(hdl, &tlg_ctrl_ops, V4L2_CID_HUE,
- 0, 10000, 1, 100);
- v4l2_ctrl_new_std(hdl, &tlg_ctrl_ops, V4L2_CID_SATURATION,
- 0, 10000, 1, 100);
- if (hdl->error) {
- v4l2_ctrl_handler_free(hdl);
- return hdl->error;
- }
- set_frequency(pd, &freq);
- video->v_dev = pd_video_template;
- video->v_dev.v4l2_dev = &pd->v4l2_dev;
- video->v_dev.ctrl_handler = hdl;
- video_set_drvdata(&video->v_dev, pd);
-
- ret = video_register_device(&video->v_dev, VFL_TYPE_GRABBER, -1);
- if (ret != 0)
- goto out;
-
- /* VBI uses the same template as video */
- vbi->v_dev = pd_video_template;
- vbi->v_dev.v4l2_dev = &pd->v4l2_dev;
- vbi->v_dev.ctrl_handler = hdl;
- video_set_drvdata(&vbi->v_dev, pd);
- ret = video_register_device(&vbi->v_dev, VFL_TYPE_VBI, -1);
- if (ret != 0)
- goto out;
- log("register VIDEO/VBI devices");
- return 0;
-out:
- log("VIDEO/VBI devices register failed, : %d", ret);
- pd_video_exit(pd);
- return ret;
-}
diff --git a/drivers/staging/media/tlg2300/vendorcmds.h b/drivers/staging/media/tlg2300/vendorcmds.h
deleted file mode 100644
index ba6f4ae3b2c2..000000000000
--- a/drivers/staging/media/tlg2300/vendorcmds.h
+++ /dev/null
@@ -1,243 +0,0 @@
-#ifndef VENDOR_CMD_H_
-#define VENDOR_CMD_H_
-
-#define BULK_ALTERNATE_IFACE (2)
-#define ISO_3K_BULK_ALTERNATE_IFACE (1)
-#define REQ_SET_CMD (0X00)
-#define REQ_GET_CMD (0X80)
-
-enum tlg__analog_audio_standard {
- TLG_TUNE_ASTD_NONE = 0x00000000,
- TLG_TUNE_ASTD_A2 = 0x00000001,
- TLG_TUNE_ASTD_NICAM = 0x00000002,
- TLG_TUNE_ASTD_EIAJ = 0x00000004,
- TLG_TUNE_ASTD_BTSC = 0x00000008,
- TLG_TUNE_ASTD_FM_US = 0x00000010,
- TLG_TUNE_ASTD_FM_EUR = 0x00000020,
- TLG_TUNE_ASTD_ALL = 0x0000003f
-};
-
-/*
- * identifiers for Custom Parameter messages.
- * @typedef cmd_custom_param_id_t
- */
-enum cmd_custom_param_id {
- CUST_PARM_ID_NONE = 0x00,
- CUST_PARM_ID_BRIGHTNESS_CTRL = 0x01,
- CUST_PARM_ID_CONTRAST_CTRL = 0x02,
- CUST_PARM_ID_HUE_CTRL = 0x03,
- CUST_PARM_ID_SATURATION_CTRL = 0x04,
- CUST_PARM_ID_AUDIO_SNR_THRESHOLD = 0x10,
- CUST_PARM_ID_AUDIO_AGC_THRESHOLD = 0x11,
- CUST_PARM_ID_MAX
-};
-
-struct tuner_custom_parameter_s {
- uint16_t param_id; /* Parameter identifier */
- uint16_t param_value; /* Parameter value */
-};
-
-struct tuner_ber_rate_s {
- uint32_t ber_rate; /* BER sample rate in seconds */
-};
-
-struct tuner_atv_sig_stat_s {
- uint32_t sig_present;
- uint32_t sig_locked;
- uint32_t sig_lock_busy;
- uint32_t sig_strength; /* milliDb */
- uint32_t tv_audio_chan; /* mono/stereo/sap*/
- uint32_t mvision_stat; /* macrovision status */
-};
-
-struct tuner_dtv_sig_stat_s {
- uint32_t sig_present; /* Boolean*/
- uint32_t sig_locked; /* Boolean */
- uint32_t sig_lock_busy; /* Boolean (Can this time-out?) */
- uint32_t sig_strength; /* milliDb*/
-};
-
-struct tuner_fm_sig_stat_s {
- uint32_t sig_present; /* Boolean*/
- uint32_t sig_locked; /* Boolean */
- uint32_t sig_lock_busy; /* Boolean */
- uint32_t sig_stereo_mono;/* TBD*/
- uint32_t sig_strength; /* milliDb*/
-};
-
-enum _tag_tlg_tune_srv_cmd {
- TLG_TUNE_PLAY_SVC_START = 1,
- TLG_TUNE_PLAY_SVC_STOP
-};
-
-enum _tag_tune_atv_audio_mode_caps {
- TLG_TUNE_TVAUDIO_MODE_MONO = 0x00000001,
- TLG_TUNE_TVAUDIO_MODE_STEREO = 0x00000002,
- TLG_TUNE_TVAUDIO_MODE_LANG_A = 0x00000010,/* Primary language*/
- TLG_TUNE_TVAUDIO_MODE_LANG_B = 0x00000020,/* 2nd avail language*/
- TLG_TUNE_TVAUDIO_MODE_LANG_C = 0x00000040
-};
-
-
-enum _tag_tuner_atv_audio_rates {
- ATV_AUDIO_RATE_NONE = 0x00,/* Audio not supported*/
- ATV_AUDIO_RATE_32K = 0x01,/* Audio rate = 32 KHz*/
- ATV_AUDIO_RATE_48K = 0x02, /* Audio rate = 48 KHz*/
- ATV_AUDIO_RATE_31_25K = 0x04 /* Audio rate = 31.25KHz */
-};
-
-enum _tag_tune_atv_vid_res_caps {
- TLG_TUNE_VID_RES_NONE = 0x00000000,
- TLG_TUNE_VID_RES_720 = 0x00000001,
- TLG_TUNE_VID_RES_704 = 0x00000002,
- TLG_TUNE_VID_RES_360 = 0x00000004
-};
-
-enum _tag_tuner_analog_video_format {
- TLG_TUNER_VID_FORMAT_YUV = 0x00000001,
- TLG_TUNER_VID_FORMAT_YCRCB = 0x00000002,
- TLG_TUNER_VID_FORMAT_RGB_565 = 0x00000004,
-};
-
-enum tlg_ext_audio_support {
- TLG_EXT_AUDIO_NONE = 0x00,/* No external audio input supported */
- TLG_EXT_AUDIO_LR = 0x01/* LR external audio inputs supported*/
-};
-
-enum {
- TLG_MODE_NONE = 0x00, /* No Mode specified*/
- TLG_MODE_ANALOG_TV = 0x01, /* Analog Television mode*/
- TLG_MODE_ANALOG_TV_UNCOMP = 0x01, /* Analog Television mode*/
- TLG_MODE_ANALOG_TV_COMP = 0x02, /* Analog TV mode (compressed)*/
- TLG_MODE_FM_RADIO = 0x04, /* FM Radio mode*/
- TLG_MODE_DVB_T = 0x08, /* Digital TV (DVB-T)*/
-};
-
-enum tlg_signal_sources_t {
- TLG_SIG_SRC_NONE = 0x00,/* Signal source not specified */
- TLG_SIG_SRC_ANTENNA = 0x01,/* Signal src is: Antenna */
- TLG_SIG_SRC_CABLE = 0x02,/* Signal src is: Coax Cable*/
- TLG_SIG_SRC_SVIDEO = 0x04,/* Signal src is: S_VIDEO */
- TLG_SIG_SRC_COMPOSITE = 0x08 /* Signal src is: Composite Video */
-};
-
-enum tuner_analog_video_standard {
- TLG_TUNE_VSTD_NONE = 0x00000000,
- TLG_TUNE_VSTD_NTSC_M = 0x00000001,
- TLG_TUNE_VSTD_NTSC_M_J = 0x00000002,/* Japan */
- TLG_TUNE_VSTD_PAL_B = 0x00000010,
- TLG_TUNE_VSTD_PAL_D = 0x00000020,
- TLG_TUNE_VSTD_PAL_G = 0x00000040,
- TLG_TUNE_VSTD_PAL_H = 0x00000080,
- TLG_TUNE_VSTD_PAL_I = 0x00000100,
- TLG_TUNE_VSTD_PAL_M = 0x00000200,
- TLG_TUNE_VSTD_PAL_N = 0x00000400,
- TLG_TUNE_VSTD_SECAM_B = 0x00001000,
- TLG_TUNE_VSTD_SECAM_D = 0x00002000,
- TLG_TUNE_VSTD_SECAM_G = 0x00004000,
- TLG_TUNE_VSTD_SECAM_H = 0x00008000,
- TLG_TUNE_VSTD_SECAM_K = 0x00010000,
- TLG_TUNE_VSTD_SECAM_K1 = 0x00020000,
- TLG_TUNE_VSTD_SECAM_L = 0x00040000,
- TLG_TUNE_VSTD_SECAM_L1 = 0x00080000,
- TLG_TUNE_VSTD_PAL_N_COMBO = 0x00100000
-};
-
-enum tlg_mode_caps {
- TLG_MODE_CAPS_NONE = 0x00, /* No Mode specified */
- TLG_MODE_CAPS_ANALOG_TV_UNCOMP = 0x01, /* Analog TV mode */
- TLG_MODE_CAPS_ANALOG_TV_COMP = 0x02, /* Analog TV (compressed)*/
- TLG_MODE_CAPS_FM_RADIO = 0x04, /* FM Radio mode */
- TLG_MODE_CAPS_DVB_T = 0x08, /* Digital TV (DVB-T) */
-};
-
-enum poseidon_vendor_cmds {
- LAST_CMD_STAT = 0x00,
- GET_CHIP_ID = 0x01,
- GET_FW_ID = 0x02,
- PRODUCT_CAPS = 0x03,
-
- TUNE_MODE_CAP_ATV = 0x10,
- TUNE_MODE_CAP_ATVCOMP = 0X10,
- TUNE_MODE_CAP_DVBT = 0x10,
- TUNE_MODE_CAP_FM = 0x10,
- TUNE_MODE_SELECT = 0x11,
- TUNE_FREQ_SELECT = 0x12,
- SGNL_SRC_SEL = 0x13,
-
- VIDEO_STD_SEL = 0x14,
- VIDEO_STREAM_FMT_SEL = 0x15,
- VIDEO_ROSOLU_AVAIL = 0x16,
- VIDEO_ROSOLU_SEL = 0x17,
- VIDEO_CONT_PROTECT = 0x20,
-
- VCR_TIMING_MODSEL = 0x21,
- EXT_AUDIO_CAP = 0x22,
- EXT_AUDIO_SEL = 0x23,
- TEST_PATTERN_SEL = 0x24,
- VBI_DATA_SEL = 0x25,
- AUDIO_SAMPLE_RATE_CAP = 0x28,
- AUDIO_SAMPLE_RATE_SEL = 0x29,
- TUNER_AUD_MODE = 0x2a,
- TUNER_AUD_MODE_AVAIL = 0x2b,
- TUNER_AUD_ANA_STD = 0x2c,
- TUNER_CUSTOM_PARAMETER = 0x2f,
-
- DVBT_TUNE_MODE_SEL = 0x30,
- DVBT_BANDW_CAP = 0x31,
- DVBT_BANDW_SEL = 0x32,
- DVBT_GUARD_INTERV_CAP = 0x33,
- DVBT_GUARD_INTERV_SEL = 0x34,
- DVBT_MODULATION_CAP = 0x35,
- DVBT_MODULATION_SEL = 0x36,
- DVBT_INNER_FEC_RATE_CAP = 0x37,
- DVBT_INNER_FEC_RATE_SEL = 0x38,
- DVBT_TRANS_MODE_CAP = 0x39,
- DVBT_TRANS_MODE_SEL = 0x3a,
- DVBT_SEARCH_RANG = 0x3c,
-
- TUNER_SETUP_ANALOG = 0x40,
- TUNER_SETUP_DIGITAL = 0x41,
- TUNER_SETUP_FM_RADIO = 0x42,
- TAKE_REQUEST = 0x43, /* Take effect of the command */
- PLAY_SERVICE = 0x44, /* Play start or Play stop */
- TUNER_STATUS = 0x45,
- TUNE_PROP_DVBT = 0x46,
- ERR_RATE_STATS = 0x47,
- TUNER_BER_RATE = 0x48,
-
- SCAN_CAPS = 0x50,
- SCAN_SETUP = 0x51,
- SCAN_SERVICE = 0x52,
- SCAN_STATS = 0x53,
-
- PID_SET = 0x58,
- PID_UNSET = 0x59,
- PID_LIST = 0x5a,
-
- IRD_CAP = 0x60,
- IRD_MODE_SEL = 0x61,
- IRD_SETUP = 0x62,
-
- PTM_MODE_CAP = 0x70,
- PTM_MODE_SEL = 0x71,
- PTM_SERVICE = 0x72,
- TUNER_REG_SCRIPT = 0x73,
- CMD_CHIP_RST = 0x74,
-};
-
-enum tlg_bw {
- TLG_BW_5 = 5,
- TLG_BW_6 = 6,
- TLG_BW_7 = 7,
- TLG_BW_8 = 8,
- TLG_BW_12 = 12,
- TLG_BW_15 = 15
-};
-
-struct cmd_firmware_vers_s {
- uint8_t fw_rev_major;
- uint8_t fw_rev_minor;
- uint16_t fw_patch;
-};
-#endif /* VENDOR_CMD_H_ */
diff --git a/drivers/staging/media/vino/Kconfig b/drivers/staging/media/vino/Kconfig
deleted file mode 100644
index 03700dadafd8..000000000000
--- a/drivers/staging/media/vino/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
-config VIDEO_VINO
- tristate "SGI Vino Video For Linux (Deprecated)"
- depends on I2C && SGI_IP22 && VIDEO_V4L2
- select VIDEO_SAA7191 if MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to build in support for the Vino video input system found
- on SGI Indy machines.
-
- This driver is deprecated and will be removed soon. If you have
- hardware for this and you want to work on this driver, then contact
- the linux-media mailinglist.
-
-config VIDEO_SAA7191
- tristate "Philips SAA7191 video decoder (Deprecated)"
- depends on VIDEO_V4L2 && I2C
- ---help---
- Support for the Philips SAA7191 video decoder.
-
- This driver is deprecated and will be removed soon. If you have
- hardware for this and you want to work on this driver, then contact
- the linux-media mailinglist.
-
- To compile this driver as a module, choose M here: the
- module will be called saa7191.
diff --git a/drivers/staging/media/vino/Makefile b/drivers/staging/media/vino/Makefile
deleted file mode 100644
index 914c2513687c..000000000000
--- a/drivers/staging/media/vino/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_VIDEO_VINO) += indycam.o
-obj-$(CONFIG_VIDEO_VINO) += vino.o
-obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
diff --git a/drivers/staging/media/vino/indycam.c b/drivers/staging/media/vino/indycam.c
deleted file mode 100644
index f1d192bbcb4c..000000000000
--- a/drivers/staging/media/vino/indycam.c
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * indycam.c - Silicon Graphics IndyCam digital camera driver
- *
- * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
- * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-/* IndyCam decodes stream of photons into digital image representation ;-) */
-#include <linux/videodev2.h>
-#include <linux/i2c.h>
-#include <media/v4l2-device.h>
-
-#include "indycam.h"
-
-#define INDYCAM_MODULE_VERSION "0.0.5"
-
-MODULE_DESCRIPTION("SGI IndyCam driver");
-MODULE_VERSION(INDYCAM_MODULE_VERSION);
-MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
-MODULE_LICENSE("GPL");
-
-
-// #define INDYCAM_DEBUG
-
-#ifdef INDYCAM_DEBUG
-#define dprintk(x...) printk("IndyCam: " x);
-#define indycam_regdump(client) indycam_regdump_debug(client)
-#else
-#define dprintk(x...)
-#define indycam_regdump(client)
-#endif
-
-struct indycam {
- struct v4l2_subdev sd;
- u8 version;
-};
-
-static inline struct indycam *to_indycam(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct indycam, sd);
-}
-
-static const u8 initseq[] = {
- INDYCAM_CONTROL_AGCENA, /* INDYCAM_CONTROL */
- INDYCAM_SHUTTER_60, /* INDYCAM_SHUTTER */
- INDYCAM_GAIN_DEFAULT, /* INDYCAM_GAIN */
- 0x00, /* INDYCAM_BRIGHTNESS (read-only) */
- INDYCAM_RED_BALANCE_DEFAULT, /* INDYCAM_RED_BALANCE */
- INDYCAM_BLUE_BALANCE_DEFAULT, /* INDYCAM_BLUE_BALANCE */
- INDYCAM_RED_SATURATION_DEFAULT, /* INDYCAM_RED_SATURATION */
- INDYCAM_BLUE_SATURATION_DEFAULT,/* INDYCAM_BLUE_SATURATION */
-};
-
-/* IndyCam register handling */
-
-static int indycam_read_reg(struct v4l2_subdev *sd, u8 reg, u8 *value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (reg == INDYCAM_REG_RESET) {
- dprintk("indycam_read_reg(): "
- "skipping write-only register %d\n", reg);
- *value = 0;
- return 0;
- }
-
- ret = i2c_smbus_read_byte_data(client, reg);
-
- if (ret < 0) {
- printk(KERN_ERR "IndyCam: indycam_read_reg(): read failed, "
- "register = 0x%02x\n", reg);
- return ret;
- }
-
- *value = (u8)ret;
-
- return 0;
-}
-
-static int indycam_write_reg(struct v4l2_subdev *sd, u8 reg, u8 value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int err;
-
- if (reg == INDYCAM_REG_BRIGHTNESS || reg == INDYCAM_REG_VERSION) {
- dprintk("indycam_write_reg(): "
- "skipping read-only register %d\n", reg);
- return 0;
- }
-
- dprintk("Writing Reg %d = 0x%02x\n", reg, value);
- err = i2c_smbus_write_byte_data(client, reg, value);
-
- if (err) {
- printk(KERN_ERR "IndyCam: indycam_write_reg(): write failed, "
- "register = 0x%02x, value = 0x%02x\n", reg, value);
- }
- return err;
-}
-
-static int indycam_write_block(struct v4l2_subdev *sd, u8 reg,
- u8 length, u8 *data)
-{
- int i, err;
-
- for (i = 0; i < length; i++) {
- err = indycam_write_reg(sd, reg + i, data[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/* Helper functions */
-
-#ifdef INDYCAM_DEBUG
-static void indycam_regdump_debug(struct v4l2_subdev *sd)
-{
- int i;
- u8 val;
-
- for (i = 0; i < 9; i++) {
- indycam_read_reg(sd, i, &val);
- dprintk("Reg %d = 0x%02x\n", i, val);
- }
-}
-#endif
-
-static int indycam_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct indycam *camera = to_indycam(sd);
- u8 reg;
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg);
- if (ret)
- return -EIO;
- if (ctrl->id == V4L2_CID_AUTOGAIN)
- ctrl->value = (reg & INDYCAM_CONTROL_AGCENA)
- ? 1 : 0;
- else
- ctrl->value = (reg & INDYCAM_CONTROL_AWBCTL)
- ? 1 : 0;
- break;
- case V4L2_CID_EXPOSURE:
- ret = indycam_read_reg(sd, INDYCAM_REG_SHUTTER, &reg);
- if (ret)
- return -EIO;
- ctrl->value = ((s32)reg == 0x00) ? 0xff : ((s32)reg - 1);
- break;
- case V4L2_CID_GAIN:
- ret = indycam_read_reg(sd, INDYCAM_REG_GAIN, &reg);
- if (ret)
- return -EIO;
- ctrl->value = (s32)reg;
- break;
- case V4L2_CID_RED_BALANCE:
- ret = indycam_read_reg(sd, INDYCAM_REG_RED_BALANCE, &reg);
- if (ret)
- return -EIO;
- ctrl->value = (s32)reg;
- break;
- case V4L2_CID_BLUE_BALANCE:
- ret = indycam_read_reg(sd, INDYCAM_REG_BLUE_BALANCE, &reg);
- if (ret)
- return -EIO;
- ctrl->value = (s32)reg;
- break;
- case INDYCAM_CONTROL_RED_SATURATION:
- ret = indycam_read_reg(sd,
- INDYCAM_REG_RED_SATURATION, &reg);
- if (ret)
- return -EIO;
- ctrl->value = (s32)reg;
- break;
- case INDYCAM_CONTROL_BLUE_SATURATION:
- ret = indycam_read_reg(sd,
- INDYCAM_REG_BLUE_SATURATION, &reg);
- if (ret)
- return -EIO;
- ctrl->value = (s32)reg;
- break;
- case V4L2_CID_GAMMA:
- if (camera->version == CAMERA_VERSION_MOOSE) {
- ret = indycam_read_reg(sd,
- INDYCAM_REG_GAMMA, &reg);
- if (ret)
- return -EIO;
- ctrl->value = (s32)reg;
- } else {
- ctrl->value = INDYCAM_GAMMA_DEFAULT;
- }
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int indycam_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct indycam *camera = to_indycam(sd);
- u8 reg;
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg);
- if (ret)
- break;
-
- if (ctrl->id == V4L2_CID_AUTOGAIN) {
- if (ctrl->value)
- reg |= INDYCAM_CONTROL_AGCENA;
- else
- reg &= ~INDYCAM_CONTROL_AGCENA;
- } else {
- if (ctrl->value)
- reg |= INDYCAM_CONTROL_AWBCTL;
- else
- reg &= ~INDYCAM_CONTROL_AWBCTL;
- }
-
- ret = indycam_write_reg(sd, INDYCAM_REG_CONTROL, reg);
- break;
- case V4L2_CID_EXPOSURE:
- reg = (ctrl->value == 0xff) ? 0x00 : (ctrl->value + 1);
- ret = indycam_write_reg(sd, INDYCAM_REG_SHUTTER, reg);
- break;
- case V4L2_CID_GAIN:
- ret = indycam_write_reg(sd, INDYCAM_REG_GAIN, ctrl->value);
- break;
- case V4L2_CID_RED_BALANCE:
- ret = indycam_write_reg(sd, INDYCAM_REG_RED_BALANCE,
- ctrl->value);
- break;
- case V4L2_CID_BLUE_BALANCE:
- ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_BALANCE,
- ctrl->value);
- break;
- case INDYCAM_CONTROL_RED_SATURATION:
- ret = indycam_write_reg(sd, INDYCAM_REG_RED_SATURATION,
- ctrl->value);
- break;
- case INDYCAM_CONTROL_BLUE_SATURATION:
- ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_SATURATION,
- ctrl->value);
- break;
- case V4L2_CID_GAMMA:
- if (camera->version == CAMERA_VERSION_MOOSE) {
- ret = indycam_write_reg(sd, INDYCAM_REG_GAMMA,
- ctrl->value);
- }
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-/* I2C-interface */
-
-/* ----------------------------------------------------------------------- */
-
-static const struct v4l2_subdev_core_ops indycam_core_ops = {
- .g_ctrl = indycam_g_ctrl,
- .s_ctrl = indycam_s_ctrl,
-};
-
-static const struct v4l2_subdev_ops indycam_ops = {
- .core = &indycam_core_ops,
-};
-
-static int indycam_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int err = 0;
- struct indycam *camera;
- struct v4l2_subdev *sd;
-
- v4l_info(client, "chip found @ 0x%x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- camera = kzalloc(sizeof(struct indycam), GFP_KERNEL);
- if (!camera)
- return -ENOMEM;
-
- sd = &camera->sd;
- v4l2_i2c_subdev_init(sd, client, &indycam_ops);
-
- camera->version = i2c_smbus_read_byte_data(client,
- INDYCAM_REG_VERSION);
- if (camera->version != CAMERA_VERSION_INDY &&
- camera->version != CAMERA_VERSION_MOOSE) {
- kfree(camera);
- return -ENODEV;
- }
-
- printk(KERN_INFO "IndyCam v%d.%d detected\n",
- INDYCAM_VERSION_MAJOR(camera->version),
- INDYCAM_VERSION_MINOR(camera->version));
-
- indycam_regdump(sd);
-
- // initialize
- err = indycam_write_block(sd, 0, sizeof(initseq), (u8 *)&initseq);
- if (err) {
- printk(KERN_ERR "IndyCam initialization failed\n");
- kfree(camera);
- return -EIO;
- }
-
- indycam_regdump(sd);
-
- // white balance
- err = indycam_write_reg(sd, INDYCAM_REG_CONTROL,
- INDYCAM_CONTROL_AGCENA | INDYCAM_CONTROL_AWBCTL);
- if (err) {
- printk(KERN_ERR "IndyCam: White balancing camera failed\n");
- kfree(camera);
- return -EIO;
- }
-
- indycam_regdump(sd);
-
- printk(KERN_INFO "IndyCam initialized\n");
-
- return 0;
-}
-
-static int indycam_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
-
- v4l2_device_unregister_subdev(sd);
- kfree(to_indycam(sd));
- return 0;
-}
-
-static const struct i2c_device_id indycam_id[] = {
- { "indycam", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, indycam_id);
-
-static struct i2c_driver indycam_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "indycam",
- },
- .probe = indycam_probe,
- .remove = indycam_remove,
- .id_table = indycam_id,
-};
-
-module_i2c_driver(indycam_driver);
diff --git a/drivers/staging/media/vino/indycam.h b/drivers/staging/media/vino/indycam.h
deleted file mode 100644
index 881f21c474c4..000000000000
--- a/drivers/staging/media/vino/indycam.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * indycam.h - Silicon Graphics IndyCam digital camera driver
- *
- * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
- * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _INDYCAM_H_
-#define _INDYCAM_H_
-
-/* I2C address for the Guinness Camera */
-#define INDYCAM_ADDR 0x56
-
-/* Camera version */
-#define CAMERA_VERSION_INDY 0x10 /* v1.0 */
-#define CAMERA_VERSION_MOOSE 0x12 /* v1.2 */
-#define INDYCAM_VERSION_MAJOR(x) (((x) & 0xf0) >> 4)
-#define INDYCAM_VERSION_MINOR(x) ((x) & 0x0f)
-
-/* Register bus addresses */
-#define INDYCAM_REG_CONTROL 0x00
-#define INDYCAM_REG_SHUTTER 0x01
-#define INDYCAM_REG_GAIN 0x02
-#define INDYCAM_REG_BRIGHTNESS 0x03 /* read-only */
-#define INDYCAM_REG_RED_BALANCE 0x04
-#define INDYCAM_REG_BLUE_BALANCE 0x05
-#define INDYCAM_REG_RED_SATURATION 0x06
-#define INDYCAM_REG_BLUE_SATURATION 0x07
-#define INDYCAM_REG_GAMMA 0x08
-#define INDYCAM_REG_VERSION 0x0e /* read-only */
-#define INDYCAM_REG_RESET 0x0f /* write-only */
-
-#define INDYCAM_REG_LED 0x46
-#define INDYCAM_REG_ORIENTATION 0x47
-#define INDYCAM_REG_BUTTON 0x48
-
-/* Field definitions of registers */
-#define INDYCAM_CONTROL_AGCENA (1<<0) /* automatic gain control */
-#define INDYCAM_CONTROL_AWBCTL (1<<1) /* automatic white balance */
- /* 2-3 are reserved */
-#define INDYCAM_CONTROL_EVNFLD (1<<4) /* read-only */
-
-#define INDYCAM_SHUTTER_10000 0x02 /* 1/10000 second */
-#define INDYCAM_SHUTTER_4000 0x04 /* 1/4000 second */
-#define INDYCAM_SHUTTER_2000 0x08 /* 1/2000 second */
-#define INDYCAM_SHUTTER_1000 0x10 /* 1/1000 second */
-#define INDYCAM_SHUTTER_500 0x20 /* 1/500 second */
-#define INDYCAM_SHUTTER_250 0x3f /* 1/250 second */
-#define INDYCAM_SHUTTER_125 0x7e /* 1/125 second */
-#define INDYCAM_SHUTTER_100 0x9e /* 1/100 second */
-#define INDYCAM_SHUTTER_60 0x00 /* 1/60 second */
-
-#define INDYCAM_LED_ACTIVE 0x10
-#define INDYCAM_LED_INACTIVE 0x30
-#define INDYCAM_ORIENTATION_BOTTOM_TO_TOP 0x40
-#define INDYCAM_BUTTON_RELEASED 0x10
-
-/* Values for controls */
-#define INDYCAM_SHUTTER_MIN 0x00
-#define INDYCAM_SHUTTER_MAX 0xff
-#define INDYCAM_GAIN_MIN 0x00
-#define INDYCAM_GAIN_MAX 0xff
-#define INDYCAM_RED_BALANCE_MIN 0x00
-#define INDYCAM_RED_BALANCE_MAX 0xff
-#define INDYCAM_BLUE_BALANCE_MIN 0x00
-#define INDYCAM_BLUE_BALANCE_MAX 0xff
-#define INDYCAM_RED_SATURATION_MIN 0x00
-#define INDYCAM_RED_SATURATION_MAX 0xff
-#define INDYCAM_BLUE_SATURATION_MIN 0x00
-#define INDYCAM_BLUE_SATURATION_MAX 0xff
-#define INDYCAM_GAMMA_MIN 0x00
-#define INDYCAM_GAMMA_MAX 0xff
-
-#define INDYCAM_AGC_DEFAULT 1
-#define INDYCAM_AWB_DEFAULT 0
-#define INDYCAM_SHUTTER_DEFAULT 0xff
-#define INDYCAM_GAIN_DEFAULT 0x80
-#define INDYCAM_RED_BALANCE_DEFAULT 0x18
-#define INDYCAM_BLUE_BALANCE_DEFAULT 0xa4
-#define INDYCAM_RED_SATURATION_DEFAULT 0x80
-#define INDYCAM_BLUE_SATURATION_DEFAULT 0xc0
-#define INDYCAM_GAMMA_DEFAULT 0x80
-
-/* Driver interface definitions */
-
-#define INDYCAM_CONTROL_RED_SATURATION (V4L2_CID_PRIVATE_BASE + 0)
-#define INDYCAM_CONTROL_BLUE_SATURATION (V4L2_CID_PRIVATE_BASE + 1)
-
-#endif
diff --git a/drivers/staging/media/vino/saa7191.c b/drivers/staging/media/vino/saa7191.c
deleted file mode 100644
index 8e9699268a63..000000000000
--- a/drivers/staging/media/vino/saa7191.c
+++ /dev/null
@@ -1,649 +0,0 @@
-/*
- * saa7191.c - Philips SAA7191 video decoder driver
- *
- * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
- * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-#include <linux/videodev2.h>
-#include <linux/i2c.h>
-#include <media/v4l2-device.h>
-
-#include "saa7191.h"
-
-#define SAA7191_MODULE_VERSION "0.0.5"
-
-MODULE_DESCRIPTION("Philips SAA7191 video decoder driver");
-MODULE_VERSION(SAA7191_MODULE_VERSION);
-MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
-MODULE_LICENSE("GPL");
-
-
-// #define SAA7191_DEBUG
-
-#ifdef SAA7191_DEBUG
-#define dprintk(x...) printk("SAA7191: " x);
-#else
-#define dprintk(x...)
-#endif
-
-#define SAA7191_SYNC_COUNT 30
-#define SAA7191_SYNC_DELAY 100 /* milliseconds */
-
-struct saa7191 {
- struct v4l2_subdev sd;
-
- /* the register values are stored here as the actual
- * I2C-registers are write-only */
- u8 reg[25];
-
- int input;
- v4l2_std_id norm;
-};
-
-static inline struct saa7191 *to_saa7191(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct saa7191, sd);
-}
-
-static const u8 initseq[] = {
- 0, /* Subaddress */
-
- 0x50, /* (0x50) SAA7191_REG_IDEL */
-
- /* 50 Hz signal timing */
- 0x30, /* (0x30) SAA7191_REG_HSYB */
- 0x00, /* (0x00) SAA7191_REG_HSYS */
- 0xe8, /* (0xe8) SAA7191_REG_HCLB */
- 0xb6, /* (0xb6) SAA7191_REG_HCLS */
- 0xf4, /* (0xf4) SAA7191_REG_HPHI */
-
- /* control */
- SAA7191_LUMA_APER_1, /* (0x01) SAA7191_REG_LUMA - CVBS mode */
- 0x00, /* (0x00) SAA7191_REG_HUEC */
- 0xf8, /* (0xf8) SAA7191_REG_CKTQ */
- 0xf8, /* (0xf8) SAA7191_REG_CKTS */
- 0x90, /* (0x90) SAA7191_REG_PLSE */
- 0x90, /* (0x90) SAA7191_REG_SESE */
- 0x00, /* (0x00) SAA7191_REG_GAIN */
- SAA7191_STDC_NFEN | SAA7191_STDC_HRMV, /* (0x0c) SAA7191_REG_STDC
- * - not SECAM,
- * slow time constant */
- SAA7191_IOCK_OEDC | SAA7191_IOCK_OEHS | SAA7191_IOCK_OEVS
- | SAA7191_IOCK_OEDY, /* (0x78) SAA7191_REG_IOCK
- * - chroma from CVBS, GPSW1 & 2 off */
- SAA7191_CTL3_AUFD | SAA7191_CTL3_SCEN | SAA7191_CTL3_OFTS
- | SAA7191_CTL3_YDEL0, /* (0x99) SAA7191_REG_CTL3
- * - automatic field detection */
- 0x00, /* (0x00) SAA7191_REG_CTL4 */
- 0x2c, /* (0x2c) SAA7191_REG_CHCV - PAL nominal value */
- 0x00, /* unused */
- 0x00, /* unused */
-
- /* 60 Hz signal timing */
- 0x34, /* (0x34) SAA7191_REG_HS6B */
- 0x0a, /* (0x0a) SAA7191_REG_HS6S */
- 0xf4, /* (0xf4) SAA7191_REG_HC6B */
- 0xce, /* (0xce) SAA7191_REG_HC6S */
- 0xf4, /* (0xf4) SAA7191_REG_HP6I */
-};
-
-/* SAA7191 register handling */
-
-static u8 saa7191_read_reg(struct v4l2_subdev *sd, u8 reg)
-{
- return to_saa7191(sd)->reg[reg];
-}
-
-static int saa7191_read_status(struct v4l2_subdev *sd, u8 *value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- ret = i2c_master_recv(client, value, 1);
- if (ret < 0) {
- printk(KERN_ERR "SAA7191: saa7191_read_status(): read failed\n");
- return ret;
- }
-
- return 0;
-}
-
-
-static int saa7191_write_reg(struct v4l2_subdev *sd, u8 reg, u8 value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- to_saa7191(sd)->reg[reg] = value;
- return i2c_smbus_write_byte_data(client, reg, value);
-}
-
-/* the first byte of data must be the first subaddress number (register) */
-static int saa7191_write_block(struct v4l2_subdev *sd,
- u8 length, const u8 *data)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct saa7191 *decoder = to_saa7191(sd);
- int i;
- int ret;
-
- for (i = 0; i < (length - 1); i++) {
- decoder->reg[data[0] + i] = data[i + 1];
- }
-
- ret = i2c_master_send(client, data, length);
- if (ret < 0) {
- printk(KERN_ERR "SAA7191: saa7191_write_block(): "
- "write failed\n");
- return ret;
- }
-
- return 0;
-}
-
-/* Helper functions */
-
-static int saa7191_s_routing(struct v4l2_subdev *sd,
- u32 input, u32 output, u32 config)
-{
- struct saa7191 *decoder = to_saa7191(sd);
- u8 luma = saa7191_read_reg(sd, SAA7191_REG_LUMA);
- u8 iock = saa7191_read_reg(sd, SAA7191_REG_IOCK);
- int err;
-
- switch (input) {
- case SAA7191_INPUT_COMPOSITE: /* Set Composite input */
- iock &= ~(SAA7191_IOCK_CHRS | SAA7191_IOCK_GPSW1
- | SAA7191_IOCK_GPSW2);
- /* Chrominance trap active */
- luma &= ~SAA7191_LUMA_BYPS;
- break;
- case SAA7191_INPUT_SVIDEO: /* Set S-Video input */
- iock |= SAA7191_IOCK_CHRS | SAA7191_IOCK_GPSW2;
- /* Chrominance trap bypassed */
- luma |= SAA7191_LUMA_BYPS;
- break;
- default:
- return -EINVAL;
- }
-
- err = saa7191_write_reg(sd, SAA7191_REG_LUMA, luma);
- if (err)
- return -EIO;
- err = saa7191_write_reg(sd, SAA7191_REG_IOCK, iock);
- if (err)
- return -EIO;
-
- decoder->input = input;
-
- return 0;
-}
-
-static int saa7191_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
-{
- struct saa7191 *decoder = to_saa7191(sd);
- u8 stdc = saa7191_read_reg(sd, SAA7191_REG_STDC);
- u8 ctl3 = saa7191_read_reg(sd, SAA7191_REG_CTL3);
- u8 chcv = saa7191_read_reg(sd, SAA7191_REG_CHCV);
- int err;
-
- if (norm & V4L2_STD_PAL) {
- stdc &= ~SAA7191_STDC_SECS;
- ctl3 &= ~(SAA7191_CTL3_AUFD | SAA7191_CTL3_FSEL);
- chcv = SAA7191_CHCV_PAL;
- } else if (norm & V4L2_STD_NTSC) {
- stdc &= ~SAA7191_STDC_SECS;
- ctl3 &= ~SAA7191_CTL3_AUFD;
- ctl3 |= SAA7191_CTL3_FSEL;
- chcv = SAA7191_CHCV_NTSC;
- } else if (norm & V4L2_STD_SECAM) {
- stdc |= SAA7191_STDC_SECS;
- ctl3 &= ~(SAA7191_CTL3_AUFD | SAA7191_CTL3_FSEL);
- chcv = SAA7191_CHCV_PAL;
- } else {
- return -EINVAL;
- }
-
- err = saa7191_write_reg(sd, SAA7191_REG_CTL3, ctl3);
- if (err)
- return -EIO;
- err = saa7191_write_reg(sd, SAA7191_REG_STDC, stdc);
- if (err)
- return -EIO;
- err = saa7191_write_reg(sd, SAA7191_REG_CHCV, chcv);
- if (err)
- return -EIO;
-
- decoder->norm = norm;
-
- dprintk("ctl3: %02x stdc: %02x chcv: %02x\n", ctl3,
- stdc, chcv);
- dprintk("norm: %llx\n", norm);
-
- return 0;
-}
-
-static int saa7191_wait_for_signal(struct v4l2_subdev *sd, u8 *status)
-{
- int i = 0;
-
- dprintk("Checking for signal...\n");
-
- for (i = 0; i < SAA7191_SYNC_COUNT; i++) {
- if (saa7191_read_status(sd, status))
- return -EIO;
-
- if (((*status) & SAA7191_STATUS_HLCK) == 0) {
- dprintk("Signal found\n");
- return 0;
- }
-
- msleep(SAA7191_SYNC_DELAY);
- }
-
- dprintk("No signal\n");
-
- return -EBUSY;
-}
-
-static int saa7191_querystd(struct v4l2_subdev *sd, v4l2_std_id *norm)
-{
- struct saa7191 *decoder = to_saa7191(sd);
- u8 stdc = saa7191_read_reg(sd, SAA7191_REG_STDC);
- u8 ctl3 = saa7191_read_reg(sd, SAA7191_REG_CTL3);
- u8 status;
- v4l2_std_id old_norm = decoder->norm;
- int err = 0;
-
- dprintk("SAA7191 extended signal auto-detection...\n");
-
- *norm &= V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
- stdc &= ~SAA7191_STDC_SECS;
- ctl3 &= ~(SAA7191_CTL3_FSEL);
-
- err = saa7191_write_reg(sd, SAA7191_REG_STDC, stdc);
- if (err) {
- err = -EIO;
- goto out;
- }
- err = saa7191_write_reg(sd, SAA7191_REG_CTL3, ctl3);
- if (err) {
- err = -EIO;
- goto out;
- }
-
- ctl3 |= SAA7191_CTL3_AUFD;
- err = saa7191_write_reg(sd, SAA7191_REG_CTL3, ctl3);
- if (err) {
- err = -EIO;
- goto out;
- }
-
- msleep(SAA7191_SYNC_DELAY);
-
- err = saa7191_wait_for_signal(sd, &status);
- if (err)
- goto out;
-
- if (status & SAA7191_STATUS_FIDT) {
- /* 60Hz signal -> NTSC */
- dprintk("60Hz signal: NTSC\n");
- *norm &= V4L2_STD_NTSC;
- return 0;
- }
-
- /* 50Hz signal */
- dprintk("50Hz signal: Trying PAL...\n");
-
- /* try PAL first */
- err = saa7191_s_std(sd, V4L2_STD_PAL);
- if (err)
- goto out;
-
- msleep(SAA7191_SYNC_DELAY);
-
- err = saa7191_wait_for_signal(sd, &status);
- if (err)
- goto out;
-
- /* not 50Hz ? */
- if (status & SAA7191_STATUS_FIDT) {
- dprintk("No 50Hz signal\n");
- saa7191_s_std(sd, old_norm);
- *norm = V4L2_STD_UNKNOWN;
- return 0;
- }
-
- if (status & SAA7191_STATUS_CODE) {
- dprintk("PAL\n");
- *norm &= V4L2_STD_PAL;
- return saa7191_s_std(sd, old_norm);
- }
-
- dprintk("No color detected with PAL - Trying SECAM...\n");
-
- /* no color detected ? -> try SECAM */
- err = saa7191_s_std(sd, V4L2_STD_SECAM);
- if (err)
- goto out;
-
- msleep(SAA7191_SYNC_DELAY);
-
- err = saa7191_wait_for_signal(sd, &status);
- if (err)
- goto out;
-
- /* not 50Hz ? */
- if (status & SAA7191_STATUS_FIDT) {
- dprintk("No 50Hz signal\n");
- *norm = V4L2_STD_UNKNOWN;
- goto out;
- }
-
- if (status & SAA7191_STATUS_CODE) {
- /* Color detected -> SECAM */
- dprintk("SECAM\n");
- *norm &= V4L2_STD_SECAM;
- return saa7191_s_std(sd, old_norm);
- }
-
- dprintk("No color detected with SECAM - Going back to PAL.\n");
- *norm = V4L2_STD_UNKNOWN;
-
-out:
- return saa7191_s_std(sd, old_norm);
-}
-
-static int saa7191_autodetect_norm(struct v4l2_subdev *sd)
-{
- u8 status;
-
- dprintk("SAA7191 signal auto-detection...\n");
-
- dprintk("Reading status...\n");
-
- if (saa7191_read_status(sd, &status))
- return -EIO;
-
- dprintk("Checking for signal...\n");
-
- /* no signal ? */
- if (status & SAA7191_STATUS_HLCK) {
- dprintk("No signal\n");
- return -EBUSY;
- }
-
- dprintk("Signal found\n");
-
- if (status & SAA7191_STATUS_FIDT) {
- /* 60hz signal -> NTSC */
- dprintk("NTSC\n");
- return saa7191_s_std(sd, V4L2_STD_NTSC);
- } else {
- /* 50hz signal -> PAL */
- dprintk("PAL\n");
- return saa7191_s_std(sd, V4L2_STD_PAL);
- }
-}
-
-static int saa7191_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- u8 reg;
- int ret = 0;
-
- switch (ctrl->id) {
- case SAA7191_CONTROL_BANDPASS:
- case SAA7191_CONTROL_BANDPASS_WEIGHT:
- case SAA7191_CONTROL_CORING:
- reg = saa7191_read_reg(sd, SAA7191_REG_LUMA);
- switch (ctrl->id) {
- case SAA7191_CONTROL_BANDPASS:
- ctrl->value = ((s32)reg & SAA7191_LUMA_BPSS_MASK)
- >> SAA7191_LUMA_BPSS_SHIFT;
- break;
- case SAA7191_CONTROL_BANDPASS_WEIGHT:
- ctrl->value = ((s32)reg & SAA7191_LUMA_APER_MASK)
- >> SAA7191_LUMA_APER_SHIFT;
- break;
- case SAA7191_CONTROL_CORING:
- ctrl->value = ((s32)reg & SAA7191_LUMA_CORI_MASK)
- >> SAA7191_LUMA_CORI_SHIFT;
- break;
- }
- break;
- case SAA7191_CONTROL_FORCE_COLOUR:
- case SAA7191_CONTROL_CHROMA_GAIN:
- reg = saa7191_read_reg(sd, SAA7191_REG_GAIN);
- if (ctrl->id == SAA7191_CONTROL_FORCE_COLOUR)
- ctrl->value = ((s32)reg & SAA7191_GAIN_COLO) ? 1 : 0;
- else
- ctrl->value = ((s32)reg & SAA7191_GAIN_LFIS_MASK)
- >> SAA7191_GAIN_LFIS_SHIFT;
- break;
- case V4L2_CID_HUE:
- reg = saa7191_read_reg(sd, SAA7191_REG_HUEC);
- if (reg < 0x80)
- reg += 0x80;
- else
- reg -= 0x80;
- ctrl->value = (s32)reg;
- break;
- case SAA7191_CONTROL_VTRC:
- reg = saa7191_read_reg(sd, SAA7191_REG_STDC);
- ctrl->value = ((s32)reg & SAA7191_STDC_VTRC) ? 1 : 0;
- break;
- case SAA7191_CONTROL_LUMA_DELAY:
- reg = saa7191_read_reg(sd, SAA7191_REG_CTL3);
- ctrl->value = ((s32)reg & SAA7191_CTL3_YDEL_MASK)
- >> SAA7191_CTL3_YDEL_SHIFT;
- if (ctrl->value >= 4)
- ctrl->value -= 8;
- break;
- case SAA7191_CONTROL_VNR:
- reg = saa7191_read_reg(sd, SAA7191_REG_CTL4);
- ctrl->value = ((s32)reg & SAA7191_CTL4_VNOI_MASK)
- >> SAA7191_CTL4_VNOI_SHIFT;
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int saa7191_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- u8 reg;
- int ret = 0;
-
- switch (ctrl->id) {
- case SAA7191_CONTROL_BANDPASS:
- case SAA7191_CONTROL_BANDPASS_WEIGHT:
- case SAA7191_CONTROL_CORING:
- reg = saa7191_read_reg(sd, SAA7191_REG_LUMA);
- switch (ctrl->id) {
- case SAA7191_CONTROL_BANDPASS:
- reg &= ~SAA7191_LUMA_BPSS_MASK;
- reg |= (ctrl->value << SAA7191_LUMA_BPSS_SHIFT)
- & SAA7191_LUMA_BPSS_MASK;
- break;
- case SAA7191_CONTROL_BANDPASS_WEIGHT:
- reg &= ~SAA7191_LUMA_APER_MASK;
- reg |= (ctrl->value << SAA7191_LUMA_APER_SHIFT)
- & SAA7191_LUMA_APER_MASK;
- break;
- case SAA7191_CONTROL_CORING:
- reg &= ~SAA7191_LUMA_CORI_MASK;
- reg |= (ctrl->value << SAA7191_LUMA_CORI_SHIFT)
- & SAA7191_LUMA_CORI_MASK;
- break;
- }
- ret = saa7191_write_reg(sd, SAA7191_REG_LUMA, reg);
- break;
- case SAA7191_CONTROL_FORCE_COLOUR:
- case SAA7191_CONTROL_CHROMA_GAIN:
- reg = saa7191_read_reg(sd, SAA7191_REG_GAIN);
- if (ctrl->id == SAA7191_CONTROL_FORCE_COLOUR) {
- if (ctrl->value)
- reg |= SAA7191_GAIN_COLO;
- else
- reg &= ~SAA7191_GAIN_COLO;
- } else {
- reg &= ~SAA7191_GAIN_LFIS_MASK;
- reg |= (ctrl->value << SAA7191_GAIN_LFIS_SHIFT)
- & SAA7191_GAIN_LFIS_MASK;
- }
- ret = saa7191_write_reg(sd, SAA7191_REG_GAIN, reg);
- break;
- case V4L2_CID_HUE:
- reg = ctrl->value & 0xff;
- if (reg < 0x80)
- reg += 0x80;
- else
- reg -= 0x80;
- ret = saa7191_write_reg(sd, SAA7191_REG_HUEC, reg);
- break;
- case SAA7191_CONTROL_VTRC:
- reg = saa7191_read_reg(sd, SAA7191_REG_STDC);
- if (ctrl->value)
- reg |= SAA7191_STDC_VTRC;
- else
- reg &= ~SAA7191_STDC_VTRC;
- ret = saa7191_write_reg(sd, SAA7191_REG_STDC, reg);
- break;
- case SAA7191_CONTROL_LUMA_DELAY: {
- s32 value = ctrl->value;
- if (value < 0)
- value += 8;
- reg = saa7191_read_reg(sd, SAA7191_REG_CTL3);
- reg &= ~SAA7191_CTL3_YDEL_MASK;
- reg |= (value << SAA7191_CTL3_YDEL_SHIFT)
- & SAA7191_CTL3_YDEL_MASK;
- ret = saa7191_write_reg(sd, SAA7191_REG_CTL3, reg);
- break;
- }
- case SAA7191_CONTROL_VNR:
- reg = saa7191_read_reg(sd, SAA7191_REG_CTL4);
- reg &= ~SAA7191_CTL4_VNOI_MASK;
- reg |= (ctrl->value << SAA7191_CTL4_VNOI_SHIFT)
- & SAA7191_CTL4_VNOI_MASK;
- ret = saa7191_write_reg(sd, SAA7191_REG_CTL4, reg);
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-/* I2C-interface */
-
-static int saa7191_g_input_status(struct v4l2_subdev *sd, u32 *status)
-{
- u8 status_reg;
- int res = V4L2_IN_ST_NO_SIGNAL;
-
- if (saa7191_read_status(sd, &status_reg))
- return -EIO;
- if ((status_reg & SAA7191_STATUS_HLCK) == 0)
- res = 0;
- if (!(status_reg & SAA7191_STATUS_CODE))
- res |= V4L2_IN_ST_NO_COLOR;
- *status = res;
- return 0;
-}
-
-
-/* ----------------------------------------------------------------------- */
-
-static const struct v4l2_subdev_core_ops saa7191_core_ops = {
- .g_ctrl = saa7191_g_ctrl,
- .s_ctrl = saa7191_s_ctrl,
-};
-
-static const struct v4l2_subdev_video_ops saa7191_video_ops = {
- .s_std = saa7191_s_std,
- .s_routing = saa7191_s_routing,
- .querystd = saa7191_querystd,
- .g_input_status = saa7191_g_input_status,
-};
-
-static const struct v4l2_subdev_ops saa7191_ops = {
- .core = &saa7191_core_ops,
- .video = &saa7191_video_ops,
-};
-
-static int saa7191_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int err = 0;
- struct saa7191 *decoder;
- struct v4l2_subdev *sd;
-
- v4l_info(client, "chip found @ 0x%x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- decoder = devm_kzalloc(&client->dev, sizeof(*decoder), GFP_KERNEL);
- if (!decoder)
- return -ENOMEM;
-
- sd = &decoder->sd;
- v4l2_i2c_subdev_init(sd, client, &saa7191_ops);
-
- err = saa7191_write_block(sd, sizeof(initseq), initseq);
- if (err) {
- printk(KERN_ERR "SAA7191 initialization failed\n");
- return err;
- }
-
- printk(KERN_INFO "SAA7191 initialized\n");
-
- decoder->input = SAA7191_INPUT_COMPOSITE;
- decoder->norm = V4L2_STD_PAL;
-
- err = saa7191_autodetect_norm(sd);
- if (err && (err != -EBUSY))
- printk(KERN_ERR "SAA7191: Signal auto-detection failed\n");
-
- return 0;
-}
-
-static int saa7191_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
-
- v4l2_device_unregister_subdev(sd);
- return 0;
-}
-
-static const struct i2c_device_id saa7191_id[] = {
- { "saa7191", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, saa7191_id);
-
-static struct i2c_driver saa7191_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "saa7191",
- },
- .probe = saa7191_probe,
- .remove = saa7191_remove,
- .id_table = saa7191_id,
-};
-
-module_i2c_driver(saa7191_driver);
diff --git a/drivers/staging/media/vino/saa7191.h b/drivers/staging/media/vino/saa7191.h
deleted file mode 100644
index 803c74d6066f..000000000000
--- a/drivers/staging/media/vino/saa7191.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * saa7191.h - Philips SAA7191 video decoder driver
- *
- * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
- * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _SAA7191_H_
-#define _SAA7191_H_
-
-/* Philips SAA7191 DMSD I2C bus address */
-#define SAA7191_ADDR 0x8a
-
-/* Register subaddresses. */
-#define SAA7191_REG_IDEL 0x00
-#define SAA7191_REG_HSYB 0x01
-#define SAA7191_REG_HSYS 0x02
-#define SAA7191_REG_HCLB 0x03
-#define SAA7191_REG_HCLS 0x04
-#define SAA7191_REG_HPHI 0x05
-#define SAA7191_REG_LUMA 0x06
-#define SAA7191_REG_HUEC 0x07
-#define SAA7191_REG_CKTQ 0x08 /* bits 3-7 */
-#define SAA7191_REG_CKTS 0x09 /* bits 3-7 */
-#define SAA7191_REG_PLSE 0x0a
-#define SAA7191_REG_SESE 0x0b
-#define SAA7191_REG_GAIN 0x0c
-#define SAA7191_REG_STDC 0x0d
-#define SAA7191_REG_IOCK 0x0e
-#define SAA7191_REG_CTL3 0x0f
-#define SAA7191_REG_CTL4 0x10
-#define SAA7191_REG_CHCV 0x11
-#define SAA7191_REG_HS6B 0x14
-#define SAA7191_REG_HS6S 0x15
-#define SAA7191_REG_HC6B 0x16
-#define SAA7191_REG_HC6S 0x17
-#define SAA7191_REG_HP6I 0x18
-#define SAA7191_REG_STATUS 0xff /* not really a subaddress */
-
-/* Status Register definitions */
-#define SAA7191_STATUS_CODE 0x01 /* color detected flag */
-#define SAA7191_STATUS_FIDT 0x20 /* signal type 50/60 Hz */
-#define SAA7191_STATUS_HLCK 0x40 /* PLL unlocked(1)/locked(0) */
-#define SAA7191_STATUS_STTC 0x80 /* tv/vtr time constant */
-
-/* Luminance Control Register definitions */
-/* input mode select bit:
- * 0=CVBS (chrominance trap active), 1=S-Video (trap bypassed) */
-#define SAA7191_LUMA_BYPS 0x80
-/* pre-filter (only when chrominance trap is active) */
-#define SAA7191_LUMA_PREF 0x40
-/* aperture bandpass to select different characteristics with maximums
- * (bits 4-5) */
-#define SAA7191_LUMA_BPSS_MASK 0x30
-#define SAA7191_LUMA_BPSS_SHIFT 4
-#define SAA7191_LUMA_BPSS_3 0x30
-#define SAA7191_LUMA_BPSS_2 0x20
-#define SAA7191_LUMA_BPSS_1 0x10
-#define SAA7191_LUMA_BPSS_0 0x00
-/* coring range for high frequency components according to 8-bit luminance
- * (bits 2-3)
- * 0=coring off, n= (+-)n LSB */
-#define SAA7191_LUMA_CORI_MASK 0x0c
-#define SAA7191_LUMA_CORI_SHIFT 2
-#define SAA7191_LUMA_CORI_3 0x0c
-#define SAA7191_LUMA_CORI_2 0x08
-#define SAA7191_LUMA_CORI_1 0x04
-#define SAA7191_LUMA_CORI_0 0x00
-/* aperture bandpass filter weights high frequency components of luminance
- * signal (bits 0-1)
- * 0=factor 0, 1=0.25, 2=0.5, 3=1 */
-#define SAA7191_LUMA_APER_MASK 0x03
-#define SAA7191_LUMA_APER_SHIFT 0
-#define SAA7191_LUMA_APER_3 0x03
-#define SAA7191_LUMA_APER_2 0x02
-#define SAA7191_LUMA_APER_1 0x01
-#define SAA7191_LUMA_APER_0 0x00
-
-/* Chrominance Gain Control Settings Register definitions */
-/* colour on: 0=automatic colour-killer enabled, 1=forced colour on */
-#define SAA7191_GAIN_COLO 0x80
-/* chrominance gain control (AGC filter)
- * 0=loop filter time constant slow, 1=medium, 2=fast, 3=actual gain */
-#define SAA7191_GAIN_LFIS_MASK 0x60
-#define SAA7191_GAIN_LFIS_SHIFT 5
-#define SAA7191_GAIN_LFIS_3 0x60
-#define SAA7191_GAIN_LFIS_2 0x40
-#define SAA7191_GAIN_LFIS_1 0x20
-#define SAA7191_GAIN_LFIS_0 0x00
-
-/* Standard/Mode Control Register definitions */
-/* tv/vtr mode bit: 0=TV mode (slow time constant),
- * 1=VTR mode (fast time constant) */
-#define SAA7191_STDC_VTRC 0x80
-/* SAA7191B-specific functions enable (RTCO, ODD and GPSW0 outputs)
- * 0=outputs set to high-impedance (circuit equals SAA7191), 1=enabled */
-#define SAA7191_STDC_NFEN 0x08
-/* HREF generation: 0=like SAA7191, 1=HREF is 8xLLC2 clocks earlier */
-#define SAA7191_STDC_HRMV 0x04
-/* general purpose switch 0
- * (not used with VINO afaik) */
-#define SAA7191_STDC_GPSW0 0x02
-/* SECAM mode bit: 0=other standards, 1=SECAM */
-#define SAA7191_STDC_SECS 0x01
-
-/* I/O and Clock Control Register definitions */
-/* horizontal clock PLL: 0=PLL closed,
- * 1=PLL circuit open and horizontal freq fixed */
-#define SAA7191_IOCK_HPLL 0x80
-/* colour-difference output enable (outputs UV0-UV7) */
-#define SAA7191_IOCK_OEDC 0x40
-/* H-sync output enable */
-#define SAA7191_IOCK_OEHS 0x20
-/* V-sync output enable */
-#define SAA7191_IOCK_OEVS 0x10
-/* luminance output enable (outputs Y0-Y7) */
-#define SAA7191_IOCK_OEDY 0x08
-/* S-VHS bit (chrominance from CVBS or from chrominance input):
- * 0=controlled by BYPS-bit, 1=from chrominance input */
-#define SAA7191_IOCK_CHRS 0x04
-/* general purpose switch 2
- * VINO-specific: 0=used with CVBS, 1=used with S-Video */
-#define SAA7191_IOCK_GPSW2 0x02
-/* general purpose switch 1 */
-/* VINO-specific: 0=always, 1=not used!*/
-#define SAA7191_IOCK_GPSW1 0x01
-
-/* Miscellaneous Control #1 Register definitions */
-/* automatic field detection (50/60Hz standard) */
-#define SAA7191_CTL3_AUFD 0x80
-/* field select: (if AUFD=0)
- * 0=50Hz (625 lines), 1=60Hz (525 lines) */
-#define SAA7191_CTL3_FSEL 0x40
-/* SECAM cross-colour reduction enable */
-#define SAA7191_CTL3_SXCR 0x20
-/* sync and clamping pulse enable (HCL and HSY outputs) */
-#define SAA7191_CTL3_SCEN 0x10
-/* output format: 0=4:1:1, 1=4:2:2 (4:2:2 for VINO) */
-#define SAA7191_CTL3_OFTS 0x08
-/* luminance delay compensation
- * 0=0*2/LLC, 1=+1*2/LLC, 2=+2*2/LLC, 3=+3*2/LLC,
- * 4=-4*2/LLC, 5=-3*2/LLC, 6=-2*2/LLC, 7=-1*2/LLC
- * step size = 2/LLC = 67.8ns for 50Hz, 81.5ns for 60Hz */
-#define SAA7191_CTL3_YDEL_MASK 0x07
-#define SAA7191_CTL3_YDEL_SHIFT 0
-#define SAA7191_CTL3_YDEL2 0x04
-#define SAA7191_CTL3_YDEL1 0x02
-#define SAA7191_CTL3_YDEL0 0x01
-
-/* Miscellaneous Control #2 Register definitions */
-/* select HREF position
- * 0=normal, HREF is matched to YUV output port,
- * 1=HREF is matched to CVBS input port */
-#define SAA7191_CTL4_HRFS 0x04
-/* vertical noise reduction
- * 0=normal, 1=searching window, 2=auto-deflection, 3=reduction bypassed */
-#define SAA7191_CTL4_VNOI_MASK 0x03
-#define SAA7191_CTL4_VNOI_SHIFT 0
-#define SAA7191_CTL4_VNOI_3 0x03
-#define SAA7191_CTL4_VNOI_2 0x02
-#define SAA7191_CTL4_VNOI_1 0x01
-#define SAA7191_CTL4_VNOI_0 0x00
-
-/* Chrominance Gain Control Register definitions
- * - for QAM-modulated input signals, effects output amplitude
- * (SECAM gain fixed)
- * (nominal values for UV CCIR level) */
-#define SAA7191_CHCV_NTSC 0x2c
-#define SAA7191_CHCV_PAL 0x59
-
-/* Driver interface definitions */
-#define SAA7191_INPUT_COMPOSITE 0
-#define SAA7191_INPUT_SVIDEO 1
-
-#define SAA7191_NORM_PAL 1
-#define SAA7191_NORM_NTSC 2
-#define SAA7191_NORM_SECAM 3
-
-struct saa7191_status {
- /* 0=no signal, 1=signal detected */
- int signal;
- /* 0=50hz (pal) signal, 1=60hz (ntsc) signal */
- int signal_60hz;
- /* 0=no color detected, 1=color detected */
- int color;
-
- /* current SAA7191_INPUT_ */
- int input;
- /* current SAA7191_NORM_ */
- int norm;
-};
-
-#define SAA7191_BANDPASS_MIN 0x00
-#define SAA7191_BANDPASS_MAX 0x03
-#define SAA7191_BANDPASS_DEFAULT 0x00
-
-#define SAA7191_BANDPASS_WEIGHT_MIN 0x00
-#define SAA7191_BANDPASS_WEIGHT_MAX 0x03
-#define SAA7191_BANDPASS_WEIGHT_DEFAULT 0x01
-
-#define SAA7191_CORING_MIN 0x00
-#define SAA7191_CORING_MAX 0x03
-#define SAA7191_CORING_DEFAULT 0x00
-
-#define SAA7191_HUE_MIN 0x00
-#define SAA7191_HUE_MAX 0xff
-#define SAA7191_HUE_DEFAULT 0x80
-
-#define SAA7191_VTRC_MIN 0x00
-#define SAA7191_VTRC_MAX 0x01
-#define SAA7191_VTRC_DEFAULT 0x00
-
-#define SAA7191_FORCE_COLOUR_MIN 0x00
-#define SAA7191_FORCE_COLOUR_MAX 0x01
-#define SAA7191_FORCE_COLOUR_DEFAULT 0x00
-
-#define SAA7191_CHROMA_GAIN_MIN 0x00
-#define SAA7191_CHROMA_GAIN_MAX 0x03
-#define SAA7191_CHROMA_GAIN_DEFAULT 0x00
-
-#define SAA7191_LUMA_DELAY_MIN -0x04
-#define SAA7191_LUMA_DELAY_MAX 0x03
-#define SAA7191_LUMA_DELAY_DEFAULT 0x01
-
-#define SAA7191_VNR_MIN 0x00
-#define SAA7191_VNR_MAX 0x03
-#define SAA7191_VNR_DEFAULT 0x00
-
-#define SAA7191_CONTROL_BANDPASS (V4L2_CID_PRIVATE_BASE + 0)
-#define SAA7191_CONTROL_BANDPASS_WEIGHT (V4L2_CID_PRIVATE_BASE + 1)
-#define SAA7191_CONTROL_CORING (V4L2_CID_PRIVATE_BASE + 2)
-#define SAA7191_CONTROL_FORCE_COLOUR (V4L2_CID_PRIVATE_BASE + 3)
-#define SAA7191_CONTROL_CHROMA_GAIN (V4L2_CID_PRIVATE_BASE + 4)
-#define SAA7191_CONTROL_VTRC (V4L2_CID_PRIVATE_BASE + 5)
-#define SAA7191_CONTROL_LUMA_DELAY (V4L2_CID_PRIVATE_BASE + 6)
-#define SAA7191_CONTROL_VNR (V4L2_CID_PRIVATE_BASE + 7)
-
-#define DECODER_SAA7191_GET_STATUS _IOR('d', 195, struct saa7191_status)
-#define DECODER_SAA7191_SET_NORM _IOW('d', 196, int)
-
-#endif
diff --git a/drivers/staging/media/vino/vino.c b/drivers/staging/media/vino/vino.c
deleted file mode 100644
index 2c85357f774d..000000000000
--- a/drivers/staging/media/vino/vino.c
+++ /dev/null
@@ -1,4345 +0,0 @@
-/*
- * Driver for the VINO (Video In No Out) system found in SGI Indys.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License version 2 as published by the Free Software Foundation.
- *
- * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
- *
- * Based on the previous version of the driver for 2.4 kernels by:
- * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
- *
- * v4l2_device/v4l2_subdev conversion by:
- * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
- *
- * Note: this conversion is untested! Please contact the linux-media
- * mailinglist if you can test this, together with the test results.
- */
-
-/*
- * TODO:
- * - remove "mark pages reserved-hacks" from memory allocation code
- * and implement fault()
- * - check decimation, calculating and reporting image size when
- * using decimation
- * - implement read(), user mode buffers and overlay (?)
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/time.h>
-#include <linux/kmod.h>
-
-#include <linux/i2c.h>
-
-#include <linux/videodev2.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <linux/mutex.h>
-
-#include <asm/paccess.h>
-#include <asm/io.h>
-#include <asm/sgi/ip22.h>
-#include <asm/sgi/mc.h>
-
-#include "vino.h"
-#include "saa7191.h"
-#include "indycam.h"
-
-/* Uncomment the following line to get lots and lots of (mostly useless)
- * debug info.
- * Note that the debug output also slows down the driver significantly */
-// #define VINO_DEBUG
-// #define VINO_DEBUG_INT
-
-#define VINO_MODULE_VERSION "0.0.7"
-
-MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver");
-MODULE_VERSION(VINO_MODULE_VERSION);
-MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
-MODULE_LICENSE("GPL");
-
-#ifdef VINO_DEBUG
-#define dprintk(x...) printk("VINO: " x);
-#else
-#define dprintk(x...)
-#endif
-
-#define VINO_NO_CHANNEL 0
-#define VINO_CHANNEL_A 1
-#define VINO_CHANNEL_B 2
-
-#define VINO_PAL_WIDTH 768
-#define VINO_PAL_HEIGHT 576
-#define VINO_NTSC_WIDTH 640
-#define VINO_NTSC_HEIGHT 480
-
-#define VINO_MIN_WIDTH 32
-#define VINO_MIN_HEIGHT 32
-
-#define VINO_CLIPPING_START_ODD_D1 1
-#define VINO_CLIPPING_START_ODD_PAL 15
-#define VINO_CLIPPING_START_ODD_NTSC 12
-
-#define VINO_CLIPPING_START_EVEN_D1 2
-#define VINO_CLIPPING_START_EVEN_PAL 15
-#define VINO_CLIPPING_START_EVEN_NTSC 12
-
-#define VINO_INPUT_CHANNEL_COUNT 3
-
-/* the number is the index for vino_inputs */
-#define VINO_INPUT_NONE -1
-#define VINO_INPUT_COMPOSITE 0
-#define VINO_INPUT_SVIDEO 1
-#define VINO_INPUT_D1 2
-
-#define VINO_PAGE_RATIO (PAGE_SIZE / VINO_PAGE_SIZE)
-
-#define VINO_FIFO_THRESHOLD_DEFAULT 16
-
-#define VINO_FRAMEBUFFER_SIZE ((VINO_PAL_WIDTH \
- * VINO_PAL_HEIGHT * 4 \
- + 3 * PAGE_SIZE) & ~(PAGE_SIZE - 1))
-
-#define VINO_FRAMEBUFFER_COUNT_MAX 8
-
-#define VINO_FRAMEBUFFER_UNUSED 0
-#define VINO_FRAMEBUFFER_IN_USE 1
-#define VINO_FRAMEBUFFER_READY 2
-
-#define VINO_QUEUE_ERROR -1
-#define VINO_QUEUE_MAGIC 0x20050125
-
-#define VINO_MEMORY_NONE 0
-#define VINO_MEMORY_MMAP 1
-#define VINO_MEMORY_USERPTR 2
-
-#define VINO_DUMMY_DESC_COUNT 4
-#define VINO_DESC_FETCH_DELAY 5 /* microseconds */
-
-#define VINO_MAX_FRAME_SKIP_COUNT 128
-
-/* the number is the index for vino_data_formats */
-#define VINO_DATA_FMT_NONE -1
-#define VINO_DATA_FMT_GREY 0
-#define VINO_DATA_FMT_RGB332 1
-#define VINO_DATA_FMT_RGB32 2
-#define VINO_DATA_FMT_YUV 3
-
-#define VINO_DATA_FMT_COUNT 4
-
-/* the number is the index for vino_data_norms */
-#define VINO_DATA_NORM_NONE -1
-#define VINO_DATA_NORM_NTSC 0
-#define VINO_DATA_NORM_PAL 1
-#define VINO_DATA_NORM_SECAM 2
-#define VINO_DATA_NORM_D1 3
-
-#define VINO_DATA_NORM_COUNT 4
-
-/* I2C controller flags */
-#define SGI_I2C_FORCE_IDLE (0 << 0)
-#define SGI_I2C_NOT_IDLE (1 << 0)
-#define SGI_I2C_WRITE (0 << 1)
-#define SGI_I2C_READ (1 << 1)
-#define SGI_I2C_RELEASE_BUS (0 << 2)
-#define SGI_I2C_HOLD_BUS (1 << 2)
-#define SGI_I2C_XFER_DONE (0 << 4)
-#define SGI_I2C_XFER_BUSY (1 << 4)
-#define SGI_I2C_ACK (0 << 5)
-#define SGI_I2C_NACK (1 << 5)
-#define SGI_I2C_BUS_OK (0 << 7)
-#define SGI_I2C_BUS_ERR (1 << 7)
-
-/* Internal data structure definitions */
-
-struct vino_input {
- char *name;
- v4l2_std_id std;
-};
-
-struct vino_clipping {
- unsigned int left, right, top, bottom;
-};
-
-struct vino_data_format {
- /* the description */
- char *description;
- /* bytes per pixel */
- unsigned int bpp;
- /* V4L2 fourcc code */
- __u32 pixelformat;
- /* V4L2 colorspace (duh!) */
- enum v4l2_colorspace colorspace;
-};
-
-struct vino_data_norm {
- char *description;
- unsigned int width, height;
- struct vino_clipping odd;
- struct vino_clipping even;
-
- v4l2_std_id std;
- unsigned int fps_min, fps_max;
- __u32 framelines;
-};
-
-struct vino_descriptor_table {
- /* the number of PAGE_SIZE sized pages in the buffer */
- unsigned int page_count;
- /* virtual (kmalloc'd) pointers to the actual data
- * (in PAGE_SIZE chunks, used with mmap streaming) */
- unsigned long *virtual;
-
- /* cpu address for the VINO descriptor table
- * (contains DMA addresses, VINO_PAGE_SIZE chunks) */
- unsigned long *dma_cpu;
- /* dma address for the VINO descriptor table
- * (contains DMA addresses, VINO_PAGE_SIZE chunks) */
- dma_addr_t dma;
-};
-
-struct vino_framebuffer {
- /* identifier nubmer */
- unsigned int id;
- /* the length of the whole buffer */
- unsigned int size;
- /* the length of actual data in buffer */
- unsigned int data_size;
- /* the data format */
- unsigned int data_format;
- /* the state of buffer data */
- unsigned int state;
- /* is the buffer mapped in user space? */
- unsigned int map_count;
- /* memory offset for mmap() */
- unsigned int offset;
- /* frame counter */
- unsigned int frame_counter;
- /* timestamp (written when image capture finishes) */
- struct timeval timestamp;
-
- struct vino_descriptor_table desc_table;
-
- spinlock_t state_lock;
-};
-
-struct vino_framebuffer_fifo {
- unsigned int length;
-
- unsigned int used;
- unsigned int head;
- unsigned int tail;
-
- unsigned int data[VINO_FRAMEBUFFER_COUNT_MAX];
-};
-
-struct vino_framebuffer_queue {
- unsigned int magic;
-
- /* VINO_MEMORY_NONE, VINO_MEMORY_MMAP or VINO_MEMORY_USERPTR */
- unsigned int type;
- unsigned int length;
-
- /* data field of in and out contain index numbers for buffer */
- struct vino_framebuffer_fifo in;
- struct vino_framebuffer_fifo out;
-
- struct vino_framebuffer *buffer[VINO_FRAMEBUFFER_COUNT_MAX];
-
- spinlock_t queue_lock;
- struct mutex queue_mutex;
- wait_queue_head_t frame_wait_queue;
-};
-
-struct vino_interrupt_data {
- struct timeval timestamp;
- unsigned int frame_counter;
- unsigned int skip_count;
- unsigned int skip;
-};
-
-struct vino_channel_settings {
- unsigned int channel;
-
- int input;
- unsigned int data_format;
- unsigned int data_norm;
- struct vino_clipping clipping;
- unsigned int decimation;
- unsigned int line_size;
- unsigned int alpha;
- unsigned int fps;
- unsigned int framert_reg;
-
- unsigned int fifo_threshold;
-
- struct vino_framebuffer_queue fb_queue;
-
- /* number of the current field */
- unsigned int field;
-
- /* read in progress */
- int reading;
- /* streaming is active */
- int streaming;
- /* the driver is currently processing the queue */
- int capturing;
-
- struct mutex mutex;
- spinlock_t capture_lock;
-
- unsigned int users;
-
- struct vino_interrupt_data int_data;
-
- /* V4L support */
- struct video_device *vdev;
-};
-
-struct vino_settings {
- struct v4l2_device v4l2_dev;
- struct vino_channel_settings a;
- struct vino_channel_settings b;
-
- /* the channel which owns this client:
- * VINO_NO_CHANNEL, VINO_CHANNEL_A or VINO_CHANNEL_B */
- unsigned int decoder_owner;
- struct v4l2_subdev *decoder;
- unsigned int camera_owner;
- struct v4l2_subdev *camera;
-
- /* a lock for vino register access */
- spinlock_t vino_lock;
- /* a lock for channel input changes */
- spinlock_t input_lock;
-
- unsigned long dummy_page;
- struct vino_descriptor_table dummy_desc_table;
-};
-
-/* Module parameters */
-
-/*
- * Using vino_pixel_conversion the ABGR32-format pixels supplied
- * by the VINO chip can be converted to more common formats
- * like RGBA32 (or probably RGB24 in the future). This way we
- * can give out data that can be specified correctly with
- * the V4L2-definitions.
- *
- * The pixel format is specified as RGBA32 when no conversion
- * is used.
- *
- * Note that this only affects the 32-bit bit depth.
- *
- * Use non-zero value to enable conversion.
- */
-static int vino_pixel_conversion;
-
-module_param_named(pixelconv, vino_pixel_conversion, int, 0);
-
-MODULE_PARM_DESC(pixelconv,
- "enable pixel conversion (non-zero value enables)");
-
-/* Internal data structures */
-
-static struct sgi_vino *vino;
-
-static struct vino_settings *vino_drvdata;
-
-#define camera_call(o, f, args...) \
- v4l2_subdev_call(vino_drvdata->camera, o, f, ##args)
-#define decoder_call(o, f, args...) \
- v4l2_subdev_call(vino_drvdata->decoder, o, f, ##args)
-
-static const char *vino_driver_name = "vino";
-static const char *vino_driver_description = "SGI VINO";
-static const char *vino_bus_name = "GIO64 bus";
-static const char *vino_vdev_name_a = "SGI VINO Channel A";
-static const char *vino_vdev_name_b = "SGI VINO Channel B";
-
-static void vino_capture_tasklet(unsigned long channel);
-
-DECLARE_TASKLET(vino_tasklet_a, vino_capture_tasklet, VINO_CHANNEL_A);
-DECLARE_TASKLET(vino_tasklet_b, vino_capture_tasklet, VINO_CHANNEL_B);
-
-static const struct vino_input vino_inputs[] = {
- {
- .name = "Composite",
- .std = V4L2_STD_NTSC | V4L2_STD_PAL
- | V4L2_STD_SECAM,
- }, {
- .name = "S-Video",
- .std = V4L2_STD_NTSC | V4L2_STD_PAL
- | V4L2_STD_SECAM,
- }, {
- .name = "D1/IndyCam",
- .std = V4L2_STD_NTSC,
- }
-};
-
-static const struct vino_data_format vino_data_formats[] = {
- {
- .description = "8-bit greyscale",
- .bpp = 1,
- .pixelformat = V4L2_PIX_FMT_GREY,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- }, {
- .description = "8-bit dithered RGB 3-3-2",
- .bpp = 1,
- .pixelformat = V4L2_PIX_FMT_RGB332,
- .colorspace = V4L2_COLORSPACE_SRGB,
- }, {
- .description = "32-bit RGB",
- .bpp = 4,
- .pixelformat = V4L2_PIX_FMT_RGB32,
- .colorspace = V4L2_COLORSPACE_SRGB,
- }, {
- .description = "YUV 4:2:2",
- .bpp = 2,
- .pixelformat = V4L2_PIX_FMT_YUYV, // XXX: swapped?
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- }
-};
-
-static const struct vino_data_norm vino_data_norms[] = {
- {
- .description = "NTSC",
- .std = V4L2_STD_NTSC,
- .fps_min = 6,
- .fps_max = 30,
- .framelines = 525,
- .width = VINO_NTSC_WIDTH,
- .height = VINO_NTSC_HEIGHT,
- .odd = {
- .top = VINO_CLIPPING_START_ODD_NTSC,
- .left = 0,
- .bottom = VINO_CLIPPING_START_ODD_NTSC
- + VINO_NTSC_HEIGHT / 2 - 1,
- .right = VINO_NTSC_WIDTH,
- },
- .even = {
- .top = VINO_CLIPPING_START_EVEN_NTSC,
- .left = 0,
- .bottom = VINO_CLIPPING_START_EVEN_NTSC
- + VINO_NTSC_HEIGHT / 2 - 1,
- .right = VINO_NTSC_WIDTH,
- },
- }, {
- .description = "PAL",
- .std = V4L2_STD_PAL,
- .fps_min = 5,
- .fps_max = 25,
- .framelines = 625,
- .width = VINO_PAL_WIDTH,
- .height = VINO_PAL_HEIGHT,
- .odd = {
- .top = VINO_CLIPPING_START_ODD_PAL,
- .left = 0,
- .bottom = VINO_CLIPPING_START_ODD_PAL
- + VINO_PAL_HEIGHT / 2 - 1,
- .right = VINO_PAL_WIDTH,
- },
- .even = {
- .top = VINO_CLIPPING_START_EVEN_PAL,
- .left = 0,
- .bottom = VINO_CLIPPING_START_EVEN_PAL
- + VINO_PAL_HEIGHT / 2 - 1,
- .right = VINO_PAL_WIDTH,
- },
- }, {
- .description = "SECAM",
- .std = V4L2_STD_SECAM,
- .fps_min = 5,
- .fps_max = 25,
- .framelines = 625,
- .width = VINO_PAL_WIDTH,
- .height = VINO_PAL_HEIGHT,
- .odd = {
- .top = VINO_CLIPPING_START_ODD_PAL,
- .left = 0,
- .bottom = VINO_CLIPPING_START_ODD_PAL
- + VINO_PAL_HEIGHT / 2 - 1,
- .right = VINO_PAL_WIDTH,
- },
- .even = {
- .top = VINO_CLIPPING_START_EVEN_PAL,
- .left = 0,
- .bottom = VINO_CLIPPING_START_EVEN_PAL
- + VINO_PAL_HEIGHT / 2 - 1,
- .right = VINO_PAL_WIDTH,
- },
- }, {
- .description = "NTSC/D1",
- .std = V4L2_STD_NTSC,
- .fps_min = 6,
- .fps_max = 30,
- .framelines = 525,
- .width = VINO_NTSC_WIDTH,
- .height = VINO_NTSC_HEIGHT,
- .odd = {
- .top = VINO_CLIPPING_START_ODD_D1,
- .left = 0,
- .bottom = VINO_CLIPPING_START_ODD_D1
- + VINO_NTSC_HEIGHT / 2 - 1,
- .right = VINO_NTSC_WIDTH,
- },
- .even = {
- .top = VINO_CLIPPING_START_EVEN_D1,
- .left = 0,
- .bottom = VINO_CLIPPING_START_EVEN_D1
- + VINO_NTSC_HEIGHT / 2 - 1,
- .right = VINO_NTSC_WIDTH,
- },
- }
-};
-
-#define VINO_INDYCAM_V4L2_CONTROL_COUNT 9
-
-struct v4l2_queryctrl vino_indycam_v4l2_controls[] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Gain Control",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = INDYCAM_AGC_DEFAULT,
- }, {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = INDYCAM_AWB_DEFAULT,
- }, {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = INDYCAM_GAIN_MIN,
- .maximum = INDYCAM_GAIN_MAX,
- .step = 1,
- .default_value = INDYCAM_GAIN_DEFAULT,
- }, {
- .id = INDYCAM_CONTROL_RED_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Saturation",
- .minimum = INDYCAM_RED_SATURATION_MIN,
- .maximum = INDYCAM_RED_SATURATION_MAX,
- .step = 1,
- .default_value = INDYCAM_RED_SATURATION_DEFAULT,
- }, {
- .id = INDYCAM_CONTROL_BLUE_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Saturation",
- .minimum = INDYCAM_BLUE_SATURATION_MIN,
- .maximum = INDYCAM_BLUE_SATURATION_MAX,
- .step = 1,
- .default_value = INDYCAM_BLUE_SATURATION_DEFAULT,
- }, {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = INDYCAM_RED_BALANCE_MIN,
- .maximum = INDYCAM_RED_BALANCE_MAX,
- .step = 1,
- .default_value = INDYCAM_RED_BALANCE_DEFAULT,
- }, {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = INDYCAM_BLUE_BALANCE_MIN,
- .maximum = INDYCAM_BLUE_BALANCE_MAX,
- .step = 1,
- .default_value = INDYCAM_BLUE_BALANCE_DEFAULT,
- }, {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Shutter Control",
- .minimum = INDYCAM_SHUTTER_MIN,
- .maximum = INDYCAM_SHUTTER_MAX,
- .step = 1,
- .default_value = INDYCAM_SHUTTER_DEFAULT,
- }, {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = INDYCAM_GAMMA_MIN,
- .maximum = INDYCAM_GAMMA_MAX,
- .step = 1,
- .default_value = INDYCAM_GAMMA_DEFAULT,
- }
-};
-
-#define VINO_SAA7191_V4L2_CONTROL_COUNT 9
-
-struct v4l2_queryctrl vino_saa7191_v4l2_controls[] = {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = SAA7191_HUE_MIN,
- .maximum = SAA7191_HUE_MAX,
- .step = 1,
- .default_value = SAA7191_HUE_DEFAULT,
- }, {
- .id = SAA7191_CONTROL_BANDPASS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Luminance Bandpass",
- .minimum = SAA7191_BANDPASS_MIN,
- .maximum = SAA7191_BANDPASS_MAX,
- .step = 1,
- .default_value = SAA7191_BANDPASS_DEFAULT,
- }, {
- .id = SAA7191_CONTROL_BANDPASS_WEIGHT,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Luminance Bandpass Weight",
- .minimum = SAA7191_BANDPASS_WEIGHT_MIN,
- .maximum = SAA7191_BANDPASS_WEIGHT_MAX,
- .step = 1,
- .default_value = SAA7191_BANDPASS_WEIGHT_DEFAULT,
- }, {
- .id = SAA7191_CONTROL_CORING,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "HF Luminance Coring",
- .minimum = SAA7191_CORING_MIN,
- .maximum = SAA7191_CORING_MAX,
- .step = 1,
- .default_value = SAA7191_CORING_DEFAULT,
- }, {
- .id = SAA7191_CONTROL_FORCE_COLOUR,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Force Colour",
- .minimum = SAA7191_FORCE_COLOUR_MIN,
- .maximum = SAA7191_FORCE_COLOUR_MAX,
- .step = 1,
- .default_value = SAA7191_FORCE_COLOUR_DEFAULT,
- }, {
- .id = SAA7191_CONTROL_CHROMA_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Chrominance Gain Control",
- .minimum = SAA7191_CHROMA_GAIN_MIN,
- .maximum = SAA7191_CHROMA_GAIN_MAX,
- .step = 1,
- .default_value = SAA7191_CHROMA_GAIN_DEFAULT,
- }, {
- .id = SAA7191_CONTROL_VTRC,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "VTR Time Constant",
- .minimum = SAA7191_VTRC_MIN,
- .maximum = SAA7191_VTRC_MAX,
- .step = 1,
- .default_value = SAA7191_VTRC_DEFAULT,
- }, {
- .id = SAA7191_CONTROL_LUMA_DELAY,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Luminance Delay Compensation",
- .minimum = SAA7191_LUMA_DELAY_MIN,
- .maximum = SAA7191_LUMA_DELAY_MAX,
- .step = 1,
- .default_value = SAA7191_LUMA_DELAY_DEFAULT,
- }, {
- .id = SAA7191_CONTROL_VNR,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Vertical Noise Reduction",
- .minimum = SAA7191_VNR_MIN,
- .maximum = SAA7191_VNR_MAX,
- .step = 1,
- .default_value = SAA7191_VNR_DEFAULT,
- }
-};
-
-/* VINO framebuffer/DMA descriptor management */
-
-static void vino_free_buffer_with_count(struct vino_framebuffer *fb,
- unsigned int count)
-{
- unsigned int i;
-
- dprintk("vino_free_buffer_with_count(): count = %d\n", count);
-
- for (i = 0; i < count; i++) {
- ClearPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
- dma_unmap_single(NULL,
- fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
- PAGE_SIZE, DMA_FROM_DEVICE);
- free_page(fb->desc_table.virtual[i]);
- }
-
- dma_free_coherent(NULL,
- VINO_PAGE_RATIO * (fb->desc_table.page_count + 4) *
- sizeof(dma_addr_t), (void *)fb->desc_table.dma_cpu,
- fb->desc_table.dma);
- kfree(fb->desc_table.virtual);
-
- memset(fb, 0, sizeof(struct vino_framebuffer));
-}
-
-static void vino_free_buffer(struct vino_framebuffer *fb)
-{
- vino_free_buffer_with_count(fb, fb->desc_table.page_count);
-}
-
-static int vino_allocate_buffer(struct vino_framebuffer *fb,
- unsigned int size)
-{
- unsigned int count, i, j;
- int ret = 0;
-
- dprintk("vino_allocate_buffer():\n");
-
- if (size < 1)
- return -EINVAL;
-
- memset(fb, 0, sizeof(struct vino_framebuffer));
-
- count = ((size / PAGE_SIZE) + 4) & ~3;
-
- dprintk("vino_allocate_buffer(): size = %d, count = %d\n",
- size, count);
-
- /* allocate memory for table with virtual (page) addresses */
- fb->desc_table.virtual =
- kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
- if (!fb->desc_table.virtual)
- return -ENOMEM;
-
- /* allocate memory for table with dma addresses
- * (has space for four extra descriptors) */
- fb->desc_table.dma_cpu =
- dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
- sizeof(dma_addr_t), &fb->desc_table.dma,
- GFP_KERNEL | GFP_DMA);
- if (!fb->desc_table.dma_cpu) {
- ret = -ENOMEM;
- goto out_free_virtual;
- }
-
- /* allocate pages for the buffer and acquire the according
- * dma addresses */
- for (i = 0; i < count; i++) {
- dma_addr_t dma_data_addr;
-
- fb->desc_table.virtual[i] =
- get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!fb->desc_table.virtual[i]) {
- ret = -ENOBUFS;
- break;
- }
-
- dma_data_addr =
- dma_map_single(NULL,
- (void *)fb->desc_table.virtual[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- for (j = 0; j < VINO_PAGE_RATIO; j++) {
- fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
- dma_data_addr + VINO_PAGE_SIZE * j;
- }
-
- SetPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
- }
-
- /* page_count needs to be set anyway, because the descriptor table has
- * been allocated according to this number */
- fb->desc_table.page_count = count;
-
- if (ret) {
- /* the descriptor with index i doesn't contain
- * a valid address yet */
- vino_free_buffer_with_count(fb, i);
- return ret;
- }
-
- //fb->size = size;
- fb->size = count * PAGE_SIZE;
- fb->data_format = VINO_DATA_FMT_NONE;
-
- /* set the dma stop-bit for the last (count+1)th descriptor */
- fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
- return 0;
-
- out_free_virtual:
- kfree(fb->desc_table.virtual);
- return ret;
-}
-
-#if 0
-/* user buffers not fully implemented yet */
-static int vino_prepare_user_buffer(struct vino_framebuffer *fb,
- void *user,
- unsigned int size)
-{
- unsigned int count, i, j;
- int ret = 0;
-
- dprintk("vino_prepare_user_buffer():\n");
-
- if (size < 1)
- return -EINVAL;
-
- memset(fb, 0, sizeof(struct vino_framebuffer));
-
- count = ((size / PAGE_SIZE)) & ~3;
-
- dprintk("vino_prepare_user_buffer(): size = %d, count = %d\n",
- size, count);
-
- /* allocate memory for table with virtual (page) addresses */
- fb->desc_table.virtual = (unsigned long *)
- kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
- if (!fb->desc_table.virtual)
- return -ENOMEM;
-
- /* allocate memory for table with dma addresses
- * (has space for four extra descriptors) */
- fb->desc_table.dma_cpu =
- dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
- sizeof(dma_addr_t), &fb->desc_table.dma,
- GFP_KERNEL | GFP_DMA);
- if (!fb->desc_table.dma_cpu) {
- ret = -ENOMEM;
- goto out_free_virtual;
- }
-
- /* allocate pages for the buffer and acquire the according
- * dma addresses */
- for (i = 0; i < count; i++) {
- dma_addr_t dma_data_addr;
-
- fb->desc_table.virtual[i] =
- get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!fb->desc_table.virtual[i]) {
- ret = -ENOBUFS;
- break;
- }
-
- dma_data_addr =
- dma_map_single(NULL,
- (void *)fb->desc_table.virtual[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- for (j = 0; j < VINO_PAGE_RATIO; j++) {
- fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
- dma_data_addr + VINO_PAGE_SIZE * j;
- }
-
- SetPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
- }
-
- /* page_count needs to be set anyway, because the descriptor table has
- * been allocated according to this number */
- fb->desc_table.page_count = count;
-
- if (ret) {
- /* the descriptor with index i doesn't contain
- * a valid address yet */
- vino_free_buffer_with_count(fb, i);
- return ret;
- }
-
- //fb->size = size;
- fb->size = count * PAGE_SIZE;
-
- /* set the dma stop-bit for the last (count+1)th descriptor */
- fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
- return 0;
-
- out_free_virtual:
- kfree(fb->desc_table.virtual);
- return ret;
-}
-#endif
-
-static void vino_sync_buffer(struct vino_framebuffer *fb)
-{
- int i;
-
- dprintk("vino_sync_buffer():\n");
-
- for (i = 0; i < fb->desc_table.page_count; i++)
- dma_sync_single_for_cpu(NULL,
- fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
- PAGE_SIZE, DMA_FROM_DEVICE);
-}
-
-/* Framebuffer fifo functions (need to be locked externally) */
-
-static inline void vino_fifo_init(struct vino_framebuffer_fifo *f,
- unsigned int length)
-{
- f->length = 0;
- f->used = 0;
- f->head = 0;
- f->tail = 0;
-
- if (length > VINO_FRAMEBUFFER_COUNT_MAX)
- length = VINO_FRAMEBUFFER_COUNT_MAX;
-
- f->length = length;
-}
-
-/* returns true/false */
-static inline int vino_fifo_has_id(struct vino_framebuffer_fifo *f,
- unsigned int id)
-{
- unsigned int i;
-
- for (i = f->head; i == (f->tail - 1); i = (i + 1) % f->length) {
- if (f->data[i] == id)
- return 1;
- }
-
- return 0;
-}
-
-#if 0
-/* returns true/false */
-static inline int vino_fifo_full(struct vino_framebuffer_fifo *f)
-{
- return (f->used == f->length);
-}
-#endif
-
-static inline unsigned int vino_fifo_get_used(struct vino_framebuffer_fifo *f)
-{
- return f->used;
-}
-
-static int vino_fifo_enqueue(struct vino_framebuffer_fifo *f, unsigned int id)
-{
- if (id >= f->length) {
- return VINO_QUEUE_ERROR;
- }
-
- if (vino_fifo_has_id(f, id)) {
- return VINO_QUEUE_ERROR;
- }
-
- if (f->used < f->length) {
- f->data[f->tail] = id;
- f->tail = (f->tail + 1) % f->length;
- f->used++;
- } else {
- return VINO_QUEUE_ERROR;
- }
-
- return 0;
-}
-
-static int vino_fifo_peek(struct vino_framebuffer_fifo *f, unsigned int *id)
-{
- if (f->used > 0) {
- *id = f->data[f->head];
- } else {
- return VINO_QUEUE_ERROR;
- }
-
- return 0;
-}
-
-static int vino_fifo_dequeue(struct vino_framebuffer_fifo *f, unsigned int *id)
-{
- if (f->used > 0) {
- *id = f->data[f->head];
- f->head = (f->head + 1) % f->length;
- f->used--;
- } else {
- return VINO_QUEUE_ERROR;
- }
-
- return 0;
-}
-
-/* Framebuffer queue functions */
-
-/* execute with queue_lock locked */
-static void vino_queue_free_with_count(struct vino_framebuffer_queue *q,
- unsigned int length)
-{
- unsigned int i;
-
- q->length = 0;
- memset(&q->in, 0, sizeof(struct vino_framebuffer_fifo));
- memset(&q->out, 0, sizeof(struct vino_framebuffer_fifo));
- for (i = 0; i < length; i++) {
- dprintk("vino_queue_free_with_count(): freeing buffer %d\n",
- i);
- vino_free_buffer(q->buffer[i]);
- kfree(q->buffer[i]);
- }
-
- q->type = VINO_MEMORY_NONE;
- q->magic = 0;
-}
-
-static void vino_queue_free(struct vino_framebuffer_queue *q)
-{
- dprintk("vino_queue_free():\n");
-
- if (q->magic != VINO_QUEUE_MAGIC)
- return;
- if (q->type != VINO_MEMORY_MMAP)
- return;
-
- mutex_lock(&q->queue_mutex);
-
- vino_queue_free_with_count(q, q->length);
-
- mutex_unlock(&q->queue_mutex);
-}
-
-static int vino_queue_init(struct vino_framebuffer_queue *q,
- unsigned int *length)
-{
- unsigned int i;
- int ret = 0;
-
- dprintk("vino_queue_init(): length = %d\n", *length);
-
- if (q->magic == VINO_QUEUE_MAGIC) {
- dprintk("vino_queue_init(): queue already initialized!\n");
- return -EINVAL;
- }
-
- if (q->type != VINO_MEMORY_NONE) {
- dprintk("vino_queue_init(): queue already initialized!\n");
- return -EINVAL;
- }
-
- if (*length < 1)
- return -EINVAL;
-
- mutex_lock(&q->queue_mutex);
-
- if (*length > VINO_FRAMEBUFFER_COUNT_MAX)
- *length = VINO_FRAMEBUFFER_COUNT_MAX;
-
- q->length = 0;
-
- for (i = 0; i < *length; i++) {
- dprintk("vino_queue_init(): allocating buffer %d\n", i);
- q->buffer[i] = kmalloc(sizeof(struct vino_framebuffer),
- GFP_KERNEL);
- if (!q->buffer[i]) {
- dprintk("vino_queue_init(): kmalloc() failed\n");
- ret = -ENOMEM;
- break;
- }
-
- ret = vino_allocate_buffer(q->buffer[i],
- VINO_FRAMEBUFFER_SIZE);
- if (ret) {
- kfree(q->buffer[i]);
- dprintk("vino_queue_init(): "
- "vino_allocate_buffer() failed\n");
- break;
- }
-
- q->buffer[i]->id = i;
- if (i > 0) {
- q->buffer[i]->offset = q->buffer[i - 1]->offset +
- q->buffer[i - 1]->size;
- } else {
- q->buffer[i]->offset = 0;
- }
-
- spin_lock_init(&q->buffer[i]->state_lock);
-
- dprintk("vino_queue_init(): buffer = %d, offset = %d, "
- "size = %d\n", i, q->buffer[i]->offset,
- q->buffer[i]->size);
- }
-
- if (ret) {
- vino_queue_free_with_count(q, i);
- *length = 0;
- } else {
- q->length = *length;
- vino_fifo_init(&q->in, q->length);
- vino_fifo_init(&q->out, q->length);
- q->type = VINO_MEMORY_MMAP;
- q->magic = VINO_QUEUE_MAGIC;
- }
-
- mutex_unlock(&q->queue_mutex);
-
- return ret;
-}
-
-static struct vino_framebuffer *vino_queue_add(struct
- vino_framebuffer_queue *q,
- unsigned int id)
-{
- struct vino_framebuffer *ret = NULL;
- unsigned int total;
- unsigned long flags;
-
- dprintk("vino_queue_add(): id = %d\n", id);
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return ret;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0)
- goto out;
-
- if (id >= q->length)
- goto out;
-
- /* not needed?: if (vino_fifo_full(&q->out)) {
- goto out;
- }*/
- /* check that outgoing queue isn't already full
- * (or that it won't become full) */
- total = vino_fifo_get_used(&q->in) +
- vino_fifo_get_used(&q->out);
- if (total >= q->length)
- goto out;
-
- if (vino_fifo_enqueue(&q->in, id))
- goto out;
-
- ret = q->buffer[id];
-
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-static struct vino_framebuffer *vino_queue_transfer(struct
- vino_framebuffer_queue *q)
-{
- struct vino_framebuffer *ret = NULL;
- struct vino_framebuffer *fb;
- int id;
- unsigned long flags;
-
- dprintk("vino_queue_transfer():\n");
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return ret;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0)
- goto out;
-
- // now this actually removes an entry from the incoming queue
- if (vino_fifo_dequeue(&q->in, &id)) {
- goto out;
- }
-
- dprintk("vino_queue_transfer(): id = %d\n", id);
- fb = q->buffer[id];
-
- // we have already checked that the outgoing queue is not full, but...
- if (vino_fifo_enqueue(&q->out, id)) {
- printk(KERN_ERR "vino_queue_transfer(): "
- "outgoing queue is full, this shouldn't happen!\n");
- goto out;
- }
-
- ret = fb;
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-/* returns true/false */
-static int vino_queue_incoming_contains(struct vino_framebuffer_queue *q,
- unsigned int id)
-{
- int ret = 0;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return ret;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0)
- goto out;
-
- ret = vino_fifo_has_id(&q->in, id);
-
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-/* returns true/false */
-static int vino_queue_outgoing_contains(struct vino_framebuffer_queue *q,
- unsigned int id)
-{
- int ret = 0;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return ret;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0)
- goto out;
-
- ret = vino_fifo_has_id(&q->out, id);
-
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-static int vino_queue_get_incoming(struct vino_framebuffer_queue *q,
- unsigned int *used)
-{
- int ret = 0;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return VINO_QUEUE_ERROR;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0) {
- ret = VINO_QUEUE_ERROR;
- goto out;
- }
-
- *used = vino_fifo_get_used(&q->in);
-
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-static int vino_queue_get_outgoing(struct vino_framebuffer_queue *q,
- unsigned int *used)
-{
- int ret = 0;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return VINO_QUEUE_ERROR;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0) {
- ret = VINO_QUEUE_ERROR;
- goto out;
- }
-
- *used = vino_fifo_get_used(&q->out);
-
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-#if 0
-static int vino_queue_get_total(struct vino_framebuffer_queue *q,
- unsigned int *total)
-{
- int ret = 0;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return VINO_QUEUE_ERROR;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0) {
- ret = VINO_QUEUE_ERROR;
- goto out;
- }
-
- *total = vino_fifo_get_used(&q->in) +
- vino_fifo_get_used(&q->out);
-
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-#endif
-
-static struct vino_framebuffer *vino_queue_peek(struct
- vino_framebuffer_queue *q,
- unsigned int *id)
-{
- struct vino_framebuffer *ret = NULL;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return ret;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0)
- goto out;
-
- if (vino_fifo_peek(&q->in, id)) {
- goto out;
- }
-
- ret = q->buffer[*id];
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-static struct vino_framebuffer *vino_queue_remove(struct
- vino_framebuffer_queue *q,
- unsigned int *id)
-{
- struct vino_framebuffer *ret = NULL;
- unsigned long flags;
- dprintk("vino_queue_remove():\n");
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return ret;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0)
- goto out;
-
- if (vino_fifo_dequeue(&q->out, id)) {
- goto out;
- }
-
- dprintk("vino_queue_remove(): id = %d\n", *id);
- ret = q->buffer[*id];
-out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-static struct
-vino_framebuffer *vino_queue_get_buffer(struct vino_framebuffer_queue *q,
- unsigned int id)
-{
- struct vino_framebuffer *ret = NULL;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return ret;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
-
- if (q->length == 0)
- goto out;
-
- if (id >= q->length)
- goto out;
-
- ret = q->buffer[id];
- out:
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-static unsigned int vino_queue_get_length(struct vino_framebuffer_queue *q)
-{
- unsigned int length = 0;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return length;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
- length = q->length;
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return length;
-}
-
-static int vino_queue_has_mapped_buffers(struct vino_framebuffer_queue *q)
-{
- unsigned int i;
- int ret = 0;
- unsigned long flags;
-
- if (q->magic != VINO_QUEUE_MAGIC) {
- return ret;
- }
-
- spin_lock_irqsave(&q->queue_lock, flags);
- for (i = 0; i < q->length; i++) {
- if (q->buffer[i]->map_count > 0) {
- ret = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return ret;
-}
-
-/* VINO functions */
-
-/* execute with input_lock locked */
-static void vino_update_line_size(struct vino_channel_settings *vcs)
-{
- unsigned int w = vcs->clipping.right - vcs->clipping.left;
- unsigned int d = vcs->decimation;
- unsigned int bpp = vino_data_formats[vcs->data_format].bpp;
- unsigned int lsize;
-
- dprintk("update_line_size(): before: w = %d, d = %d, "
- "line_size = %d\n", w, d, vcs->line_size);
-
- /* line size must be multiple of 8 bytes */
- lsize = (bpp * (w / d)) & ~7;
- w = (lsize / bpp) * d;
-
- vcs->clipping.right = vcs->clipping.left + w;
- vcs->line_size = lsize;
-
- dprintk("update_line_size(): after: w = %d, d = %d, "
- "line_size = %d\n", w, d, vcs->line_size);
-}
-
-/* execute with input_lock locked */
-static void vino_set_clipping(struct vino_channel_settings *vcs,
- unsigned int x, unsigned int y,
- unsigned int w, unsigned int h)
-{
- unsigned int maxwidth, maxheight;
- unsigned int d;
-
- maxwidth = vino_data_norms[vcs->data_norm].width;
- maxheight = vino_data_norms[vcs->data_norm].height;
- d = vcs->decimation;
-
- y &= ~1; /* odd/even fields */
-
- if (x > maxwidth) {
- x = 0;
- }
- if (y > maxheight) {
- y = 0;
- }
-
- if (((w / d) < VINO_MIN_WIDTH)
- || ((h / d) < VINO_MIN_HEIGHT)) {
- w = VINO_MIN_WIDTH * d;
- h = VINO_MIN_HEIGHT * d;
- }
-
- if ((x + w) > maxwidth) {
- w = maxwidth - x;
- if ((w / d) < VINO_MIN_WIDTH)
- x = maxwidth - VINO_MIN_WIDTH * d;
- }
- if ((y + h) > maxheight) {
- h = maxheight - y;
- if ((h / d) < VINO_MIN_HEIGHT)
- y = maxheight - VINO_MIN_HEIGHT * d;
- }
-
- vcs->clipping.left = x;
- vcs->clipping.top = y;
- vcs->clipping.right = x + w;
- vcs->clipping.bottom = y + h;
-
- vino_update_line_size(vcs);
-
- dprintk("clipping %d, %d, %d, %d / %d - %d\n",
- vcs->clipping.left, vcs->clipping.top, vcs->clipping.right,
- vcs->clipping.bottom, vcs->decimation, vcs->line_size);
-}
-
-/* execute with input_lock locked */
-static inline void vino_set_default_clipping(struct vino_channel_settings *vcs)
-{
- vino_set_clipping(vcs, 0, 0, vino_data_norms[vcs->data_norm].width,
- vino_data_norms[vcs->data_norm].height);
-}
-
-/* execute with input_lock locked */
-static void vino_set_scaling(struct vino_channel_settings *vcs,
- unsigned int w, unsigned int h)
-{
- unsigned int x, y, curw, curh, d;
-
- x = vcs->clipping.left;
- y = vcs->clipping.top;
- curw = vcs->clipping.right - vcs->clipping.left;
- curh = vcs->clipping.bottom - vcs->clipping.top;
-
- d = max(curw / w, curh / h);
-
- dprintk("scaling w: %d, h: %d, curw: %d, curh: %d, d: %d\n",
- w, h, curw, curh, d);
-
- if (d < 1) {
- d = 1;
- } else if (d > 8) {
- d = 8;
- }
-
- vcs->decimation = d;
- vino_set_clipping(vcs, x, y, w * d, h * d);
-
- dprintk("scaling %d, %d, %d, %d / %d - %d\n", vcs->clipping.left,
- vcs->clipping.top, vcs->clipping.right, vcs->clipping.bottom,
- vcs->decimation, vcs->line_size);
-}
-
-/* execute with input_lock locked */
-static inline void vino_set_default_scaling(struct vino_channel_settings *vcs)
-{
- vino_set_scaling(vcs, vcs->clipping.right - vcs->clipping.left,
- vcs->clipping.bottom - vcs->clipping.top);
-}
-
-/* execute with input_lock locked */
-static void vino_set_framerate(struct vino_channel_settings *vcs,
- unsigned int fps)
-{
- unsigned int mask;
-
- switch (vcs->data_norm) {
- case VINO_DATA_NORM_NTSC:
- case VINO_DATA_NORM_D1:
- fps = (unsigned int)(fps / 6) * 6; // FIXME: round!
-
- if (fps < vino_data_norms[vcs->data_norm].fps_min)
- fps = vino_data_norms[vcs->data_norm].fps_min;
- if (fps > vino_data_norms[vcs->data_norm].fps_max)
- fps = vino_data_norms[vcs->data_norm].fps_max;
-
- switch (fps) {
- case 6:
- mask = 0x003;
- break;
- case 12:
- mask = 0x0c3;
- break;
- case 18:
- mask = 0x333;
- break;
- case 24:
- mask = 0x3ff;
- break;
- case 30:
- mask = 0xfff;
- break;
- default:
- mask = VINO_FRAMERT_FULL;
- }
- vcs->framert_reg = VINO_FRAMERT_RT(mask);
- break;
- case VINO_DATA_NORM_PAL:
- case VINO_DATA_NORM_SECAM:
- fps = (unsigned int)(fps / 5) * 5; // FIXME: round!
-
- if (fps < vino_data_norms[vcs->data_norm].fps_min)
- fps = vino_data_norms[vcs->data_norm].fps_min;
- if (fps > vino_data_norms[vcs->data_norm].fps_max)
- fps = vino_data_norms[vcs->data_norm].fps_max;
-
- switch (fps) {
- case 5:
- mask = 0x003;
- break;
- case 10:
- mask = 0x0c3;
- break;
- case 15:
- mask = 0x333;
- break;
- case 20:
- mask = 0x0ff;
- break;
- case 25:
- mask = 0x3ff;
- break;
- default:
- mask = VINO_FRAMERT_FULL;
- }
- vcs->framert_reg = VINO_FRAMERT_RT(mask) | VINO_FRAMERT_PAL;
- break;
- }
-
- vcs->fps = fps;
-}
-
-/* execute with input_lock locked */
-static inline void vino_set_default_framerate(struct
- vino_channel_settings *vcs)
-{
- vino_set_framerate(vcs, vino_data_norms[vcs->data_norm].fps_max);
-}
-
-/* VINO I2C bus functions */
-
-struct i2c_algo_sgi_data {
- void *data; /* private data for lowlevel routines */
- unsigned (*getctrl)(void *data);
- void (*setctrl)(void *data, unsigned val);
- unsigned (*rdata)(void *data);
- void (*wdata)(void *data, unsigned val);
-
- int xfer_timeout;
- int ack_timeout;
-};
-
-static int wait_xfer_done(struct i2c_algo_sgi_data *adap)
-{
- int i;
-
- for (i = 0; i < adap->xfer_timeout; i++) {
- if ((adap->getctrl(adap->data) & SGI_I2C_XFER_BUSY) == 0)
- return 0;
- udelay(1);
- }
-
- return -ETIMEDOUT;
-}
-
-static int wait_ack(struct i2c_algo_sgi_data *adap)
-{
- int i;
-
- if (wait_xfer_done(adap))
- return -ETIMEDOUT;
- for (i = 0; i < adap->ack_timeout; i++) {
- if ((adap->getctrl(adap->data) & SGI_I2C_NACK) == 0)
- return 0;
- udelay(1);
- }
-
- return -ETIMEDOUT;
-}
-
-static int force_idle(struct i2c_algo_sgi_data *adap)
-{
- int i;
-
- adap->setctrl(adap->data, SGI_I2C_FORCE_IDLE);
- for (i = 0; i < adap->xfer_timeout; i++) {
- if ((adap->getctrl(adap->data) & SGI_I2C_NOT_IDLE) == 0)
- goto out;
- udelay(1);
- }
- return -ETIMEDOUT;
-out:
- if (adap->getctrl(adap->data) & SGI_I2C_BUS_ERR)
- return -EIO;
- return 0;
-}
-
-static int do_address(struct i2c_algo_sgi_data *adap, unsigned int addr,
- int rd)
-{
- if (rd)
- adap->setctrl(adap->data, SGI_I2C_NOT_IDLE);
- /* Check if bus is idle, eventually force it to do so */
- if (adap->getctrl(adap->data) & SGI_I2C_NOT_IDLE)
- if (force_idle(adap))
- return -EIO;
- /* Write out the i2c chip address and specify operation */
- adap->setctrl(adap->data,
- SGI_I2C_HOLD_BUS | SGI_I2C_WRITE | SGI_I2C_NOT_IDLE);
- if (rd)
- addr |= 1;
- adap->wdata(adap->data, addr);
- if (wait_ack(adap))
- return -EIO;
- return 0;
-}
-
-static int i2c_read(struct i2c_algo_sgi_data *adap, unsigned char *buf,
- unsigned int len)
-{
- int i;
-
- adap->setctrl(adap->data,
- SGI_I2C_HOLD_BUS | SGI_I2C_READ | SGI_I2C_NOT_IDLE);
- for (i = 0; i < len; i++) {
- if (wait_xfer_done(adap))
- return -EIO;
- buf[i] = adap->rdata(adap->data);
- }
- adap->setctrl(adap->data, SGI_I2C_RELEASE_BUS | SGI_I2C_FORCE_IDLE);
-
- return 0;
-
-}
-
-static int i2c_write(struct i2c_algo_sgi_data *adap, unsigned char *buf,
- unsigned int len)
-{
- int i;
-
- /* We are already in write state */
- for (i = 0; i < len; i++) {
- adap->wdata(adap->data, buf[i]);
- if (wait_ack(adap))
- return -EIO;
- }
- return 0;
-}
-
-static int sgi_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
- int num)
-{
- struct i2c_algo_sgi_data *adap = i2c_adap->algo_data;
- struct i2c_msg *p;
- int i, err = 0;
-
- for (i = 0; !err && i < num; i++) {
- p = &msgs[i];
- err = do_address(adap, p->addr, p->flags & I2C_M_RD);
- if (err || !p->len)
- continue;
- if (p->flags & I2C_M_RD)
- err = i2c_read(adap, p->buf, p->len);
- else
- err = i2c_write(adap, p->buf, p->len);
- }
-
- return (err < 0) ? err : i;
-}
-
-static u32 sgi_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm sgi_algo = {
- .master_xfer = sgi_xfer,
- .functionality = sgi_func,
-};
-
-static unsigned i2c_vino_getctrl(void *data)
-{
- return vino->i2c_control;
-}
-
-static void i2c_vino_setctrl(void *data, unsigned val)
-{
- vino->i2c_control = val;
-}
-
-static unsigned i2c_vino_rdata(void *data)
-{
- return vino->i2c_data;
-}
-
-static void i2c_vino_wdata(void *data, unsigned val)
-{
- vino->i2c_data = val;
-}
-
-static struct i2c_algo_sgi_data i2c_sgi_vino_data = {
- .getctrl = &i2c_vino_getctrl,
- .setctrl = &i2c_vino_setctrl,
- .rdata = &i2c_vino_rdata,
- .wdata = &i2c_vino_wdata,
- .xfer_timeout = 200,
- .ack_timeout = 1000,
-};
-
-static struct i2c_adapter vino_i2c_adapter = {
- .name = "VINO I2C bus",
- .algo = &sgi_algo,
- .algo_data = &i2c_sgi_vino_data,
- .owner = THIS_MODULE,
-};
-
-/*
- * Prepare VINO for DMA transfer...
- * (execute only with vino_lock and input_lock locked)
- */
-static int vino_dma_setup(struct vino_channel_settings *vcs,
- struct vino_framebuffer *fb)
-{
- u32 ctrl, intr;
- struct sgi_vino_channel *ch;
- const struct vino_data_norm *norm;
-
- dprintk("vino_dma_setup():\n");
-
- vcs->field = 0;
- fb->frame_counter = 0;
-
- ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
- norm = &vino_data_norms[vcs->data_norm];
-
- ch->page_index = 0;
- ch->line_count = 0;
-
- /* VINO line size register is set 8 bytes less than actual */
- ch->line_size = vcs->line_size - 8;
-
- /* let VINO know where to transfer data */
- ch->start_desc_tbl = fb->desc_table.dma;
- ch->next_4_desc = fb->desc_table.dma;
-
- /* give vino time to fetch the first four descriptors, 5 usec
- * should be more than enough time */
- udelay(VINO_DESC_FETCH_DELAY);
-
- dprintk("vino_dma_setup(): start desc = %08x, next 4 desc = %08x\n",
- ch->start_desc_tbl, ch->next_4_desc);
-
- /* set the alpha register */
- ch->alpha = vcs->alpha;
-
- /* set clipping registers */
- ch->clip_start = VINO_CLIP_ODD(norm->odd.top + vcs->clipping.top / 2) |
- VINO_CLIP_EVEN(norm->even.top +
- vcs->clipping.top / 2) |
- VINO_CLIP_X(vcs->clipping.left);
- ch->clip_end = VINO_CLIP_ODD(norm->odd.top +
- vcs->clipping.bottom / 2 - 1) |
- VINO_CLIP_EVEN(norm->even.top +
- vcs->clipping.bottom / 2 - 1) |
- VINO_CLIP_X(vcs->clipping.right);
-
- /* set the size of actual content in the buffer (DECIMATION !) */
- fb->data_size = ((vcs->clipping.right - vcs->clipping.left) /
- vcs->decimation) *
- ((vcs->clipping.bottom - vcs->clipping.top) /
- vcs->decimation) *
- vino_data_formats[vcs->data_format].bpp;
-
- ch->frame_rate = vcs->framert_reg;
-
- ctrl = vino->control;
- intr = vino->intr_status;
-
- if (vcs->channel == VINO_CHANNEL_A) {
- /* All interrupt conditions for this channel was cleared
- * so clear the interrupt status register and enable
- * interrupts */
- intr &= ~VINO_INTSTAT_A;
- ctrl |= VINO_CTRL_A_INT;
-
- /* enable synchronization */
- ctrl |= VINO_CTRL_A_SYNC_ENBL;
-
- /* enable frame assembly */
- ctrl |= VINO_CTRL_A_INTERLEAVE_ENBL;
-
- /* set decimation used */
- if (vcs->decimation < 2)
- ctrl &= ~VINO_CTRL_A_DEC_ENBL;
- else {
- ctrl |= VINO_CTRL_A_DEC_ENBL;
- ctrl &= ~VINO_CTRL_A_DEC_SCALE_MASK;
- ctrl |= (vcs->decimation - 1) <<
- VINO_CTRL_A_DEC_SCALE_SHIFT;
- }
-
- /* select input interface */
- if (vcs->input == VINO_INPUT_D1)
- ctrl |= VINO_CTRL_A_SELECT;
- else
- ctrl &= ~VINO_CTRL_A_SELECT;
-
- /* palette */
- ctrl &= ~(VINO_CTRL_A_LUMA_ONLY | VINO_CTRL_A_RGB |
- VINO_CTRL_A_DITHER);
- } else {
- intr &= ~VINO_INTSTAT_B;
- ctrl |= VINO_CTRL_B_INT;
-
- ctrl |= VINO_CTRL_B_SYNC_ENBL;
- ctrl |= VINO_CTRL_B_INTERLEAVE_ENBL;
-
- if (vcs->decimation < 2)
- ctrl &= ~VINO_CTRL_B_DEC_ENBL;
- else {
- ctrl |= VINO_CTRL_B_DEC_ENBL;
- ctrl &= ~VINO_CTRL_B_DEC_SCALE_MASK;
- ctrl |= (vcs->decimation - 1) <<
- VINO_CTRL_B_DEC_SCALE_SHIFT;
-
- }
- if (vcs->input == VINO_INPUT_D1)
- ctrl |= VINO_CTRL_B_SELECT;
- else
- ctrl &= ~VINO_CTRL_B_SELECT;
-
- ctrl &= ~(VINO_CTRL_B_LUMA_ONLY | VINO_CTRL_B_RGB |
- VINO_CTRL_B_DITHER);
- }
-
- /* set palette */
- fb->data_format = vcs->data_format;
-
- switch (vcs->data_format) {
- case VINO_DATA_FMT_GREY:
- ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
- VINO_CTRL_A_LUMA_ONLY : VINO_CTRL_B_LUMA_ONLY;
- break;
- case VINO_DATA_FMT_RGB32:
- ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
- VINO_CTRL_A_RGB : VINO_CTRL_B_RGB;
- break;
- case VINO_DATA_FMT_YUV:
- /* nothing needs to be done */
- break;
- case VINO_DATA_FMT_RGB332:
- ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
- VINO_CTRL_A_RGB | VINO_CTRL_A_DITHER :
- VINO_CTRL_B_RGB | VINO_CTRL_B_DITHER;
- break;
- }
-
- vino->intr_status = intr;
- vino->control = ctrl;
-
- return 0;
-}
-
-/* (execute only with vino_lock locked) */
-static inline void vino_dma_start(struct vino_channel_settings *vcs)
-{
- u32 ctrl = vino->control;
-
- dprintk("vino_dma_start():\n");
- ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
- VINO_CTRL_A_DMA_ENBL : VINO_CTRL_B_DMA_ENBL;
- vino->control = ctrl;
-}
-
-/* (execute only with vino_lock locked) */
-static inline void vino_dma_stop(struct vino_channel_settings *vcs)
-{
- u32 ctrl = vino->control;
-
- ctrl &= (vcs->channel == VINO_CHANNEL_A) ?
- ~VINO_CTRL_A_DMA_ENBL : ~VINO_CTRL_B_DMA_ENBL;
- ctrl &= (vcs->channel == VINO_CHANNEL_A) ?
- ~VINO_CTRL_A_INT : ~VINO_CTRL_B_INT;
- vino->control = ctrl;
- dprintk("vino_dma_stop():\n");
-}
-
-/*
- * Load dummy page to descriptor registers. This prevents generating of
- * spurious interrupts. (execute only with vino_lock locked)
- */
-static void vino_clear_interrupt(struct vino_channel_settings *vcs)
-{
- struct sgi_vino_channel *ch;
-
- ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
-
- ch->page_index = 0;
- ch->line_count = 0;
-
- ch->start_desc_tbl = vino_drvdata->dummy_desc_table.dma;
- ch->next_4_desc = vino_drvdata->dummy_desc_table.dma;
-
- udelay(VINO_DESC_FETCH_DELAY);
- dprintk("channel %c clear interrupt condition\n",
- (vcs->channel == VINO_CHANNEL_A) ? 'A':'B');
-}
-
-static int vino_capture(struct vino_channel_settings *vcs,
- struct vino_framebuffer *fb)
-{
- int err = 0;
- unsigned long flags, flags2;
-
- spin_lock_irqsave(&fb->state_lock, flags);
-
- if (fb->state == VINO_FRAMEBUFFER_IN_USE)
- err = -EBUSY;
- fb->state = VINO_FRAMEBUFFER_IN_USE;
-
- spin_unlock_irqrestore(&fb->state_lock, flags);
-
- if (err)
- return err;
-
- spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
- spin_lock_irqsave(&vino_drvdata->input_lock, flags2);
-
- vino_dma_setup(vcs, fb);
- vino_dma_start(vcs);
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags2);
- spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
-
- return err;
-}
-
-static
-struct vino_framebuffer *vino_capture_enqueue(struct
- vino_channel_settings *vcs,
- unsigned int index)
-{
- struct vino_framebuffer *fb;
- unsigned long flags;
-
- dprintk("vino_capture_enqueue():\n");
-
- spin_lock_irqsave(&vcs->capture_lock, flags);
-
- fb = vino_queue_add(&vcs->fb_queue, index);
- if (fb == NULL) {
- dprintk("vino_capture_enqueue(): vino_queue_add() failed, "
- "queue full?\n");
- goto out;
- }
-out:
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
-
- return fb;
-}
-
-static int vino_capture_next(struct vino_channel_settings *vcs, int start)
-{
- struct vino_framebuffer *fb;
- unsigned int incoming, id;
- int err = 0;
- unsigned long flags;
-
- dprintk("vino_capture_next():\n");
-
- spin_lock_irqsave(&vcs->capture_lock, flags);
-
- if (start) {
- /* start capture only if capture isn't in progress already */
- if (vcs->capturing) {
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
- return 0;
- }
-
- } else {
- /* capture next frame:
- * stop capture if capturing is not set */
- if (!vcs->capturing) {
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
- return 0;
- }
- }
-
- err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
- if (err) {
- dprintk("vino_capture_next(): vino_queue_get_incoming() "
- "failed\n");
- err = -EINVAL;
- goto out;
- }
- if (incoming == 0) {
- dprintk("vino_capture_next(): no buffers available\n");
- goto out;
- }
-
- fb = vino_queue_peek(&vcs->fb_queue, &id);
- if (fb == NULL) {
- dprintk("vino_capture_next(): vino_queue_peek() failed\n");
- err = -EINVAL;
- goto out;
- }
-
- if (start) {
- vcs->capturing = 1;
- }
-
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
-
- err = vino_capture(vcs, fb);
-
- return err;
-
-out:
- vcs->capturing = 0;
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
-
- return err;
-}
-
-static inline int vino_is_capturing(struct vino_channel_settings *vcs)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&vcs->capture_lock, flags);
-
- ret = vcs->capturing;
-
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
-
- return ret;
-}
-
-/* waits until a frame is captured */
-static int vino_wait_for_frame(struct vino_channel_settings *vcs)
-{
- wait_queue_t wait;
- int err = 0;
-
- dprintk("vino_wait_for_frame():\n");
-
- init_waitqueue_entry(&wait, current);
- /* add ourselves into wait queue */
- add_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
-
- /* to ensure that schedule_timeout will return immediately
- * if VINO interrupt was triggered meanwhile */
- schedule_timeout_interruptible(msecs_to_jiffies(100));
-
- if (signal_pending(current))
- err = -EINTR;
-
- remove_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
-
- dprintk("vino_wait_for_frame(): waiting for frame %s\n",
- err ? "failed" : "ok");
-
- return err;
-}
-
-/* the function assumes that PAGE_SIZE % 4 == 0 */
-static void vino_convert_to_rgba(struct vino_framebuffer *fb) {
- unsigned char *pageptr;
- unsigned int page, i;
- unsigned char a;
-
- for (page = 0; page < fb->desc_table.page_count; page++) {
- pageptr = (unsigned char *)fb->desc_table.virtual[page];
-
- for (i = 0; i < PAGE_SIZE; i += 4) {
- a = pageptr[0];
- pageptr[0] = pageptr[3];
- pageptr[1] = pageptr[2];
- pageptr[2] = pageptr[1];
- pageptr[3] = a;
- pageptr += 4;
- }
- }
-}
-
-/* checks if the buffer is in correct state and syncs data */
-static int vino_check_buffer(struct vino_channel_settings *vcs,
- struct vino_framebuffer *fb)
-{
- int err = 0;
- unsigned long flags;
-
- dprintk("vino_check_buffer():\n");
-
- spin_lock_irqsave(&fb->state_lock, flags);
- switch (fb->state) {
- case VINO_FRAMEBUFFER_IN_USE:
- err = -EIO;
- break;
- case VINO_FRAMEBUFFER_READY:
- vino_sync_buffer(fb);
- fb->state = VINO_FRAMEBUFFER_UNUSED;
- break;
- default:
- err = -EINVAL;
- }
- spin_unlock_irqrestore(&fb->state_lock, flags);
-
- if (!err) {
- if (vino_pixel_conversion
- && (fb->data_format == VINO_DATA_FMT_RGB32)) {
- vino_convert_to_rgba(fb);
- }
- } else if (err && (err != -EINVAL)) {
- dprintk("vino_check_buffer(): buffer not ready\n");
-
- spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
- vino_dma_stop(vcs);
- vino_clear_interrupt(vcs);
- spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
- }
-
- return err;
-}
-
-/* forcefully terminates capture */
-static void vino_capture_stop(struct vino_channel_settings *vcs)
-{
- unsigned int incoming = 0, outgoing = 0, id;
- unsigned long flags, flags2;
-
- dprintk("vino_capture_stop():\n");
-
- spin_lock_irqsave(&vcs->capture_lock, flags);
-
- /* unset capturing to stop queue processing */
- vcs->capturing = 0;
-
- spin_lock_irqsave(&vino_drvdata->vino_lock, flags2);
-
- vino_dma_stop(vcs);
- vino_clear_interrupt(vcs);
-
- spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags2);
-
- /* remove all items from the queue */
- if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
- dprintk("vino_capture_stop(): "
- "vino_queue_get_incoming() failed\n");
- goto out;
- }
- while (incoming > 0) {
- vino_queue_transfer(&vcs->fb_queue);
-
- if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
- dprintk("vino_capture_stop(): "
- "vino_queue_get_incoming() failed\n");
- goto out;
- }
- }
-
- if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
- dprintk("vino_capture_stop(): "
- "vino_queue_get_outgoing() failed\n");
- goto out;
- }
- while (outgoing > 0) {
- vino_queue_remove(&vcs->fb_queue, &id);
-
- if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
- dprintk("vino_capture_stop(): "
- "vino_queue_get_outgoing() failed\n");
- goto out;
- }
- }
-
-out:
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
-}
-
-#if 0
-static int vino_capture_failed(struct vino_channel_settings *vcs)
-{
- struct vino_framebuffer *fb;
- unsigned long flags;
- unsigned int i;
- int ret;
-
- dprintk("vino_capture_failed():\n");
-
- spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
-
- vino_dma_stop(vcs);
- vino_clear_interrupt(vcs);
-
- spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
-
- ret = vino_queue_get_incoming(&vcs->fb_queue, &i);
- if (ret == VINO_QUEUE_ERROR) {
- dprintk("vino_queue_get_incoming() failed\n");
- return -EINVAL;
- }
- if (i == 0) {
- /* no buffers to process */
- return 0;
- }
-
- fb = vino_queue_peek(&vcs->fb_queue, &i);
- if (fb == NULL) {
- dprintk("vino_queue_peek() failed\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&fb->state_lock, flags);
- if (fb->state == VINO_FRAMEBUFFER_IN_USE) {
- fb->state = VINO_FRAMEBUFFER_UNUSED;
- vino_queue_transfer(&vcs->fb_queue);
- vino_queue_remove(&vcs->fb_queue, &i);
- /* we should actually discard the newest frame,
- * but who cares ... */
- }
- spin_unlock_irqrestore(&fb->state_lock, flags);
-
- return 0;
-}
-#endif
-
-static void vino_skip_frame(struct vino_channel_settings *vcs)
-{
- struct vino_framebuffer *fb;
- unsigned long flags;
- unsigned int id;
-
- spin_lock_irqsave(&vcs->capture_lock, flags);
- fb = vino_queue_peek(&vcs->fb_queue, &id);
- if (!fb) {
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
- dprintk("vino_skip_frame(): vino_queue_peek() failed!\n");
- return;
- }
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
-
- spin_lock_irqsave(&fb->state_lock, flags);
- fb->state = VINO_FRAMEBUFFER_UNUSED;
- spin_unlock_irqrestore(&fb->state_lock, flags);
-
- vino_capture_next(vcs, 0);
-}
-
-static void vino_frame_done(struct vino_channel_settings *vcs)
-{
- struct vino_framebuffer *fb;
- unsigned long flags;
-
- spin_lock_irqsave(&vcs->capture_lock, flags);
- fb = vino_queue_transfer(&vcs->fb_queue);
- if (!fb) {
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
- dprintk("vino_frame_done(): vino_queue_transfer() failed!\n");
- return;
- }
- spin_unlock_irqrestore(&vcs->capture_lock, flags);
-
- fb->frame_counter = vcs->int_data.frame_counter;
- memcpy(&fb->timestamp, &vcs->int_data.timestamp,
- sizeof(struct timeval));
-
- spin_lock_irqsave(&fb->state_lock, flags);
- if (fb->state == VINO_FRAMEBUFFER_IN_USE)
- fb->state = VINO_FRAMEBUFFER_READY;
- spin_unlock_irqrestore(&fb->state_lock, flags);
-
- wake_up(&vcs->fb_queue.frame_wait_queue);
-
- vino_capture_next(vcs, 0);
-}
-
-static void vino_capture_tasklet(unsigned long channel) {
- struct vino_channel_settings *vcs;
-
- vcs = (channel == VINO_CHANNEL_A)
- ? &vino_drvdata->a : &vino_drvdata->b;
-
- if (vcs->int_data.skip)
- vcs->int_data.skip_count++;
-
- if (vcs->int_data.skip && (vcs->int_data.skip_count
- <= VINO_MAX_FRAME_SKIP_COUNT)) {
- vino_skip_frame(vcs);
- } else {
- vcs->int_data.skip_count = 0;
- vino_frame_done(vcs);
- }
-}
-
-static irqreturn_t vino_interrupt(int irq, void *dev_id)
-{
- u32 ctrl, intr;
- unsigned int fc_a, fc_b;
- int handled_a = 0, skip_a = 0, done_a = 0;
- int handled_b = 0, skip_b = 0, done_b = 0;
-
-#ifdef VINO_DEBUG_INT
- int loop = 0;
- unsigned int line_count = vino->a.line_count,
- page_index = vino->a.page_index,
- field_counter = vino->a.field_counter,
- start_desc_tbl = vino->a.start_desc_tbl,
- next_4_desc = vino->a.next_4_desc;
- unsigned int line_count_2,
- page_index_2,
- field_counter_2,
- start_desc_tbl_2,
- next_4_desc_2;
-#endif
-
- spin_lock(&vino_drvdata->vino_lock);
-
- while ((intr = vino->intr_status)) {
- fc_a = vino->a.field_counter >> 1;
- fc_b = vino->b.field_counter >> 1;
-
- /* handle error-interrupts in some special way ?
- * --> skips frames */
- if (intr & VINO_INTSTAT_A) {
- if (intr & VINO_INTSTAT_A_EOF) {
- vino_drvdata->a.field++;
- if (vino_drvdata->a.field > 1) {
- vino_dma_stop(&vino_drvdata->a);
- vino_clear_interrupt(&vino_drvdata->a);
- vino_drvdata->a.field = 0;
- done_a = 1;
- } else {
- if (vino->a.page_index
- != vino_drvdata->a.line_size) {
- vino->a.line_count = 0;
- vino->a.page_index =
- vino_drvdata->
- a.line_size;
- vino->a.next_4_desc =
- vino->a.start_desc_tbl;
- }
- }
- dprintk("channel A end-of-field "
- "interrupt: %04x\n", intr);
- } else {
- vino_dma_stop(&vino_drvdata->a);
- vino_clear_interrupt(&vino_drvdata->a);
- vino_drvdata->a.field = 0;
- skip_a = 1;
- dprintk("channel A error interrupt: %04x\n",
- intr);
- }
-
-#ifdef VINO_DEBUG_INT
- line_count_2 = vino->a.line_count;
- page_index_2 = vino->a.page_index;
- field_counter_2 = vino->a.field_counter;
- start_desc_tbl_2 = vino->a.start_desc_tbl;
- next_4_desc_2 = vino->a.next_4_desc;
-
- printk("intr = %04x, loop = %d, field = %d\n",
- intr, loop, vino_drvdata->a.field);
- printk("1- line count = %04d, page index = %04d, "
- "start = %08x, next = %08x\n"
- " fieldc = %d, framec = %d\n",
- line_count, page_index, start_desc_tbl,
- next_4_desc, field_counter, fc_a);
- printk("12-line count = %04d, page index = %04d, "
- " start = %08x, next = %08x\n",
- line_count_2, page_index_2, start_desc_tbl_2,
- next_4_desc_2);
-
- if (done_a)
- printk("\n");
-#endif
- }
-
- if (intr & VINO_INTSTAT_B) {
- if (intr & VINO_INTSTAT_B_EOF) {
- vino_drvdata->b.field++;
- if (vino_drvdata->b.field > 1) {
- vino_dma_stop(&vino_drvdata->b);
- vino_clear_interrupt(&vino_drvdata->b);
- vino_drvdata->b.field = 0;
- done_b = 1;
- }
- dprintk("channel B end-of-field "
- "interrupt: %04x\n", intr);
- } else {
- vino_dma_stop(&vino_drvdata->b);
- vino_clear_interrupt(&vino_drvdata->b);
- vino_drvdata->b.field = 0;
- skip_b = 1;
- dprintk("channel B error interrupt: %04x\n",
- intr);
- }
- }
-
- /* Always remember to clear interrupt status.
- * Disable VINO interrupts while we do this. */
- ctrl = vino->control;
- vino->control = ctrl & ~(VINO_CTRL_A_INT | VINO_CTRL_B_INT);
- vino->intr_status = ~intr;
- vino->control = ctrl;
-
- spin_unlock(&vino_drvdata->vino_lock);
-
- if ((!handled_a) && (done_a || skip_a)) {
- if (!skip_a) {
- v4l2_get_timestamp(
- &vino_drvdata->a.int_data.timestamp);
- vino_drvdata->a.int_data.frame_counter = fc_a;
- }
- vino_drvdata->a.int_data.skip = skip_a;
-
- dprintk("channel A %s, interrupt: %d\n",
- skip_a ? "skipping frame" : "frame done",
- intr);
- tasklet_hi_schedule(&vino_tasklet_a);
- handled_a = 1;
- }
-
- if ((!handled_b) && (done_b || skip_b)) {
- if (!skip_b) {
- v4l2_get_timestamp(
- &vino_drvdata->b.int_data.timestamp);
- vino_drvdata->b.int_data.frame_counter = fc_b;
- }
- vino_drvdata->b.int_data.skip = skip_b;
-
- dprintk("channel B %s, interrupt: %d\n",
- skip_b ? "skipping frame" : "frame done",
- intr);
- tasklet_hi_schedule(&vino_tasklet_b);
- handled_b = 1;
- }
-
-#ifdef VINO_DEBUG_INT
- loop++;
-#endif
- spin_lock(&vino_drvdata->vino_lock);
- }
-
- spin_unlock(&vino_drvdata->vino_lock);
-
- return IRQ_HANDLED;
-}
-
-/* VINO video input management */
-
-static int vino_get_saa7191_input(int input)
-{
- switch (input) {
- case VINO_INPUT_COMPOSITE:
- return SAA7191_INPUT_COMPOSITE;
- case VINO_INPUT_SVIDEO:
- return SAA7191_INPUT_SVIDEO;
- default:
- printk(KERN_ERR "VINO: vino_get_saa7191_input(): "
- "invalid input!\n");
- return -1;
- }
-}
-
-/* execute with input_lock locked */
-static int vino_is_input_owner(struct vino_channel_settings *vcs)
-{
- switch(vcs->input) {
- case VINO_INPUT_COMPOSITE:
- case VINO_INPUT_SVIDEO:
- return vino_drvdata->decoder_owner == vcs->channel;
- case VINO_INPUT_D1:
- return vino_drvdata->camera_owner == vcs->channel;
- default:
- return 0;
- }
-}
-
-static int vino_acquire_input(struct vino_channel_settings *vcs)
-{
- unsigned long flags;
- int ret = 0;
-
- dprintk("vino_acquire_input():\n");
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- /* First try D1 and then SAA7191 */
- if (vino_drvdata->camera
- && (vino_drvdata->camera_owner == VINO_NO_CHANNEL)) {
- vino_drvdata->camera_owner = vcs->channel;
- vcs->input = VINO_INPUT_D1;
- vcs->data_norm = VINO_DATA_NORM_D1;
- } else if (vino_drvdata->decoder
- && (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
- int input;
- int data_norm = 0;
- v4l2_std_id norm;
-
- input = VINO_INPUT_COMPOSITE;
-
- ret = decoder_call(video, s_routing,
- vino_get_saa7191_input(input), 0, 0);
- if (ret) {
- ret = -EINVAL;
- goto out;
- }
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- /* Don't hold spinlocks while auto-detecting norm
- * as it may take a while... */
-
- ret = decoder_call(video, querystd, &norm);
- if (!ret) {
- for (data_norm = 0; data_norm < 3; data_norm++) {
- if (vino_data_norms[data_norm].std & norm)
- break;
- }
- if (data_norm == 3)
- data_norm = VINO_DATA_NORM_PAL;
- ret = decoder_call(video, s_std, norm);
- }
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- if (ret) {
- ret = -EINVAL;
- goto out;
- }
-
- vino_drvdata->decoder_owner = vcs->channel;
-
- vcs->input = input;
- vcs->data_norm = data_norm;
- } else {
- vcs->input = (vcs->channel == VINO_CHANNEL_A) ?
- vino_drvdata->b.input : vino_drvdata->a.input;
- vcs->data_norm = (vcs->channel == VINO_CHANNEL_A) ?
- vino_drvdata->b.data_norm : vino_drvdata->a.data_norm;
- }
-
- if (vcs->input == VINO_INPUT_NONE) {
- ret = -ENODEV;
- goto out;
- }
-
- vino_set_default_clipping(vcs);
- vino_set_default_scaling(vcs);
- vino_set_default_framerate(vcs);
-
- dprintk("vino_acquire_input(): %s\n", vino_inputs[vcs->input].name);
-
-out:
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return ret;
-}
-
-static int vino_set_input(struct vino_channel_settings *vcs, int input)
-{
- struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
- &vino_drvdata->b : &vino_drvdata->a;
- unsigned long flags;
- int ret = 0;
-
- dprintk("vino_set_input():\n");
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- if (vcs->input == input)
- goto out;
-
- switch (input) {
- case VINO_INPUT_COMPOSITE:
- case VINO_INPUT_SVIDEO:
- if (!vino_drvdata->decoder) {
- ret = -EINVAL;
- goto out;
- }
-
- if (vino_drvdata->decoder_owner == VINO_NO_CHANNEL) {
- vino_drvdata->decoder_owner = vcs->channel;
- }
-
- if (vino_drvdata->decoder_owner == vcs->channel) {
- int data_norm = 0;
- v4l2_std_id norm;
-
- ret = decoder_call(video, s_routing,
- vino_get_saa7191_input(input), 0, 0);
- if (ret) {
- vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
- ret = -EINVAL;
- goto out;
- }
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- /* Don't hold spinlocks while auto-detecting norm
- * as it may take a while... */
-
- ret = decoder_call(video, querystd, &norm);
- if (!ret) {
- for (data_norm = 0; data_norm < 3; data_norm++) {
- if (vino_data_norms[data_norm].std & norm)
- break;
- }
- if (data_norm == 3)
- data_norm = VINO_DATA_NORM_PAL;
- ret = decoder_call(video, s_std, norm);
- }
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- if (ret) {
- vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
- ret = -EINVAL;
- goto out;
- }
-
- vcs->input = input;
- vcs->data_norm = data_norm;
- } else {
- if (input != vcs2->input) {
- ret = -EBUSY;
- goto out;
- }
-
- vcs->input = input;
- vcs->data_norm = vcs2->data_norm;
- }
-
- if (vino_drvdata->camera_owner == vcs->channel) {
- /* Transfer the ownership or release the input */
- if (vcs2->input == VINO_INPUT_D1) {
- vino_drvdata->camera_owner = vcs2->channel;
- } else {
- vino_drvdata->camera_owner = VINO_NO_CHANNEL;
- }
- }
- break;
- case VINO_INPUT_D1:
- if (!vino_drvdata->camera) {
- ret = -EINVAL;
- goto out;
- }
-
- if (vino_drvdata->camera_owner == VINO_NO_CHANNEL)
- vino_drvdata->camera_owner = vcs->channel;
-
- if (vino_drvdata->decoder_owner == vcs->channel) {
- /* Transfer the ownership or release the input */
- if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
- (vcs2->input == VINO_INPUT_SVIDEO)) {
- vino_drvdata->decoder_owner = vcs2->channel;
- } else {
- vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
- }
- }
-
- vcs->input = input;
- vcs->data_norm = VINO_DATA_NORM_D1;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- vino_set_default_clipping(vcs);
- vino_set_default_scaling(vcs);
- vino_set_default_framerate(vcs);
-
- dprintk("vino_set_input(): %s\n", vino_inputs[vcs->input].name);
-
-out:
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return ret;
-}
-
-static void vino_release_input(struct vino_channel_settings *vcs)
-{
- struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
- &vino_drvdata->b : &vino_drvdata->a;
- unsigned long flags;
-
- dprintk("vino_release_input():\n");
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- /* Release ownership of the channel
- * and if the other channel takes input from
- * the same source, transfer the ownership */
- if (vino_drvdata->camera_owner == vcs->channel) {
- if (vcs2->input == VINO_INPUT_D1) {
- vino_drvdata->camera_owner = vcs2->channel;
- } else {
- vino_drvdata->camera_owner = VINO_NO_CHANNEL;
- }
- } else if (vino_drvdata->decoder_owner == vcs->channel) {
- if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
- (vcs2->input == VINO_INPUT_SVIDEO)) {
- vino_drvdata->decoder_owner = vcs2->channel;
- } else {
- vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
- }
- }
- vcs->input = VINO_INPUT_NONE;
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-}
-
-/* execute with input_lock locked */
-static int vino_set_data_norm(struct vino_channel_settings *vcs,
- unsigned int data_norm,
- unsigned long *flags)
-{
- int err = 0;
-
- if (data_norm == vcs->data_norm)
- return 0;
-
- switch (vcs->input) {
- case VINO_INPUT_D1:
- /* only one "norm" supported */
- if (data_norm != VINO_DATA_NORM_D1)
- return -EINVAL;
- break;
- case VINO_INPUT_COMPOSITE:
- case VINO_INPUT_SVIDEO: {
- v4l2_std_id norm;
-
- if ((data_norm != VINO_DATA_NORM_PAL)
- && (data_norm != VINO_DATA_NORM_NTSC)
- && (data_norm != VINO_DATA_NORM_SECAM))
- return -EINVAL;
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, *flags);
-
- /* Don't hold spinlocks while setting norm
- * as it may take a while... */
-
- norm = vino_data_norms[data_norm].std;
- err = decoder_call(video, s_std, norm);
-
- spin_lock_irqsave(&vino_drvdata->input_lock, *flags);
-
- if (err)
- goto out;
-
- vcs->data_norm = data_norm;
-
- vino_set_default_clipping(vcs);
- vino_set_default_scaling(vcs);
- vino_set_default_framerate(vcs);
- break;
- }
- default:
- return -EINVAL;
- }
-
-out:
- return err;
-}
-
-/* V4L2 helper functions */
-
-static int vino_find_data_format(__u32 pixelformat)
-{
- int i;
-
- for (i = 0; i < VINO_DATA_FMT_COUNT; i++) {
- if (vino_data_formats[i].pixelformat == pixelformat)
- return i;
- }
-
- return VINO_DATA_FMT_NONE;
-}
-
-static int vino_int_enum_input(struct vino_channel_settings *vcs, __u32 index)
-{
- int input = VINO_INPUT_NONE;
- unsigned long flags;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
- if (vino_drvdata->decoder && vino_drvdata->camera) {
- switch (index) {
- case 0:
- input = VINO_INPUT_COMPOSITE;
- break;
- case 1:
- input = VINO_INPUT_SVIDEO;
- break;
- case 2:
- input = VINO_INPUT_D1;
- break;
- }
- } else if (vino_drvdata->decoder) {
- switch (index) {
- case 0:
- input = VINO_INPUT_COMPOSITE;
- break;
- case 1:
- input = VINO_INPUT_SVIDEO;
- break;
- }
- } else if (vino_drvdata->camera) {
- switch (index) {
- case 0:
- input = VINO_INPUT_D1;
- break;
- }
- }
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return input;
-}
-
-/* execute with input_lock locked */
-static __u32 vino_find_input_index(struct vino_channel_settings *vcs)
-{
- __u32 index = 0;
- // FIXME: detect when no inputs available
-
- if (vino_drvdata->decoder && vino_drvdata->camera) {
- switch (vcs->input) {
- case VINO_INPUT_COMPOSITE:
- index = 0;
- break;
- case VINO_INPUT_SVIDEO:
- index = 1;
- break;
- case VINO_INPUT_D1:
- index = 2;
- break;
- }
- } else if (vino_drvdata->decoder) {
- switch (vcs->input) {
- case VINO_INPUT_COMPOSITE:
- index = 0;
- break;
- case VINO_INPUT_SVIDEO:
- index = 1;
- break;
- }
- } else if (vino_drvdata->camera) {
- switch (vcs->input) {
- case VINO_INPUT_D1:
- index = 0;
- break;
- }
- }
-
- return index;
-}
-
-/* V4L2 ioctls */
-
-static int vino_querycap(struct file *file, void *__fh,
- struct v4l2_capability *cap)
-{
- memset(cap, 0, sizeof(struct v4l2_capability));
-
- strcpy(cap->driver, vino_driver_name);
- strcpy(cap->card, vino_driver_description);
- strcpy(cap->bus_info, vino_bus_name);
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
- return 0;
-}
-
-static int vino_enum_input(struct file *file, void *__fh,
- struct v4l2_input *i)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- __u32 index = i->index;
- int input;
- dprintk("requested index = %d\n", index);
-
- input = vino_int_enum_input(vcs, index);
- if (input == VINO_INPUT_NONE)
- return -EINVAL;
-
- i->type = V4L2_INPUT_TYPE_CAMERA;
- i->std = vino_inputs[input].std;
- strcpy(i->name, vino_inputs[input].name);
-
- if (input == VINO_INPUT_COMPOSITE || input == VINO_INPUT_SVIDEO)
- decoder_call(video, g_input_status, &i->status);
- return 0;
-}
-
-static int vino_g_input(struct file *file, void *__fh,
- unsigned int *i)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- __u32 index;
- int input;
- unsigned long flags;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
- input = vcs->input;
- index = vino_find_input_index(vcs);
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- dprintk("input = %d\n", input);
-
- if (input == VINO_INPUT_NONE) {
- return -EINVAL;
- }
-
- *i = index;
-
- return 0;
-}
-
-static int vino_s_input(struct file *file, void *__fh,
- unsigned int i)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- int input;
- dprintk("requested input = %d\n", i);
-
- input = vino_int_enum_input(vcs, i);
- if (input == VINO_INPUT_NONE)
- return -EINVAL;
-
- return vino_set_input(vcs, input);
-}
-
-static int vino_querystd(struct file *file, void *__fh,
- v4l2_std_id *std)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
- int err = 0;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- switch (vcs->input) {
- case VINO_INPUT_D1:
- *std = vino_inputs[vcs->input].std;
- break;
- case VINO_INPUT_COMPOSITE:
- case VINO_INPUT_SVIDEO: {
- decoder_call(video, querystd, std);
- break;
- }
- default:
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return err;
-}
-
-static int vino_g_std(struct file *file, void *__fh,
- v4l2_std_id *std)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- *std = vino_data_norms[vcs->data_norm].std;
- dprintk("current standard = %d\n", vcs->data_norm);
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return 0;
-}
-
-static int vino_s_std(struct file *file, void *__fh,
- v4l2_std_id std)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- if (!vino_is_input_owner(vcs)) {
- ret = -EBUSY;
- goto out;
- }
-
- /* check if the standard is valid for the current input */
- if (std & vino_inputs[vcs->input].std) {
- dprintk("standard accepted\n");
-
- /* change the video norm for SAA7191
- * and accept NTSC for D1 (do nothing) */
-
- if (vcs->input == VINO_INPUT_D1)
- goto out;
-
- if (std & V4L2_STD_PAL) {
- ret = vino_set_data_norm(vcs, VINO_DATA_NORM_PAL,
- &flags);
- } else if (std & V4L2_STD_NTSC) {
- ret = vino_set_data_norm(vcs, VINO_DATA_NORM_NTSC,
- &flags);
- } else if (std & V4L2_STD_SECAM) {
- ret = vino_set_data_norm(vcs, VINO_DATA_NORM_SECAM,
- &flags);
- } else {
- ret = -EINVAL;
- }
-
- if (ret) {
- ret = -EINVAL;
- }
- } else {
- ret = -EINVAL;
- }
-
-out:
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return ret;
-}
-
-static int vino_enum_fmt_vid_cap(struct file *file, void *__fh,
- struct v4l2_fmtdesc *fd)
-{
- dprintk("format index = %d\n", fd->index);
-
- if (fd->index >= VINO_DATA_FMT_COUNT)
- return -EINVAL;
- dprintk("format name = %s\n", vino_data_formats[fd->index].description);
-
- fd->pixelformat = vino_data_formats[fd->index].pixelformat;
- strcpy(fd->description, vino_data_formats[fd->index].description);
- return 0;
-}
-
-static int vino_try_fmt_vid_cap(struct file *file, void *__fh,
- struct v4l2_format *f)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- struct vino_channel_settings tempvcs;
- unsigned long flags;
- struct v4l2_pix_format *pf = &f->fmt.pix;
-
- dprintk("requested: w = %d, h = %d\n",
- pf->width, pf->height);
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
- memcpy(&tempvcs, vcs, sizeof(struct vino_channel_settings));
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- tempvcs.data_format = vino_find_data_format(pf->pixelformat);
- if (tempvcs.data_format == VINO_DATA_FMT_NONE) {
- tempvcs.data_format = VINO_DATA_FMT_GREY;
- pf->pixelformat =
- vino_data_formats[tempvcs.data_format].
- pixelformat;
- }
-
- /* data format must be set before clipping/scaling */
- vino_set_scaling(&tempvcs, pf->width, pf->height);
-
- dprintk("data format = %s\n",
- vino_data_formats[tempvcs.data_format].description);
-
- pf->width = (tempvcs.clipping.right - tempvcs.clipping.left) /
- tempvcs.decimation;
- pf->height = (tempvcs.clipping.bottom - tempvcs.clipping.top) /
- tempvcs.decimation;
-
- pf->field = V4L2_FIELD_INTERLACED;
- pf->bytesperline = tempvcs.line_size;
- pf->sizeimage = tempvcs.line_size *
- (tempvcs.clipping.bottom - tempvcs.clipping.top) /
- tempvcs.decimation;
- pf->colorspace =
- vino_data_formats[tempvcs.data_format].colorspace;
-
- return 0;
-}
-
-static int vino_g_fmt_vid_cap(struct file *file, void *__fh,
- struct v4l2_format *f)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
- struct v4l2_pix_format *pf = &f->fmt.pix;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- pf->width = (vcs->clipping.right - vcs->clipping.left) /
- vcs->decimation;
- pf->height = (vcs->clipping.bottom - vcs->clipping.top) /
- vcs->decimation;
- pf->pixelformat =
- vino_data_formats[vcs->data_format].pixelformat;
-
- pf->field = V4L2_FIELD_INTERLACED;
- pf->bytesperline = vcs->line_size;
- pf->sizeimage = vcs->line_size *
- (vcs->clipping.bottom - vcs->clipping.top) /
- vcs->decimation;
- pf->colorspace =
- vino_data_formats[vcs->data_format].colorspace;
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
- return 0;
-}
-
-static int vino_s_fmt_vid_cap(struct file *file, void *__fh,
- struct v4l2_format *f)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- int data_format;
- unsigned long flags;
- struct v4l2_pix_format *pf = &f->fmt.pix;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- data_format = vino_find_data_format(pf->pixelformat);
-
- if (data_format == VINO_DATA_FMT_NONE) {
- vcs->data_format = VINO_DATA_FMT_GREY;
- pf->pixelformat =
- vino_data_formats[vcs->data_format].
- pixelformat;
- } else {
- vcs->data_format = data_format;
- }
-
- /* data format must be set before clipping/scaling */
- vino_set_scaling(vcs, pf->width, pf->height);
-
- dprintk("data format = %s\n",
- vino_data_formats[vcs->data_format].description);
-
- pf->width = vcs->clipping.right - vcs->clipping.left;
- pf->height = vcs->clipping.bottom - vcs->clipping.top;
-
- pf->field = V4L2_FIELD_INTERLACED;
- pf->bytesperline = vcs->line_size;
- pf->sizeimage = vcs->line_size *
- (vcs->clipping.bottom - vcs->clipping.top) /
- vcs->decimation;
- pf->colorspace =
- vino_data_formats[vcs->data_format].colorspace;
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
- return 0;
-}
-
-static int vino_cropcap(struct file *file, void *__fh,
- struct v4l2_cropcap *ccap)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- const struct vino_data_norm *norm;
- unsigned long flags;
-
- switch (ccap->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- norm = &vino_data_norms[vcs->data_norm];
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- ccap->bounds.left = 0;
- ccap->bounds.top = 0;
- ccap->bounds.width = norm->width;
- ccap->bounds.height = norm->height;
- memcpy(&ccap->defrect, &ccap->bounds,
- sizeof(struct v4l2_rect));
-
- ccap->pixelaspect.numerator = 1;
- ccap->pixelaspect.denominator = 1;
- break;
- case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vino_g_crop(struct file *file, void *__fh,
- struct v4l2_crop *c)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
-
- switch (c->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- c->c.left = vcs->clipping.left;
- c->c.top = vcs->clipping.top;
- c->c.width = vcs->clipping.right - vcs->clipping.left;
- c->c.height = vcs->clipping.bottom - vcs->clipping.top;
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
- break;
- case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vino_s_crop(struct file *file, void *__fh,
- const struct v4l2_crop *c)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
-
- switch (c->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- vino_set_clipping(vcs, c->c.left, c->c.top,
- c->c.width, c->c.height);
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
- break;
- case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vino_g_parm(struct file *file, void *__fh,
- struct v4l2_streamparm *sp)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
- struct v4l2_captureparm *cp = &sp->parm.capture;
-
- cp->capability = V4L2_CAP_TIMEPERFRAME;
- cp->timeperframe.numerator = 1;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- cp->timeperframe.denominator = vcs->fps;
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- /* TODO: cp->readbuffers = xxx; */
-
- return 0;
-}
-
-static int vino_s_parm(struct file *file, void *__fh,
- struct v4l2_streamparm *sp)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
- struct v4l2_captureparm *cp = &sp->parm.capture;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- if ((cp->timeperframe.numerator == 0) ||
- (cp->timeperframe.denominator == 0)) {
- /* reset framerate */
- vino_set_default_framerate(vcs);
- } else {
- vino_set_framerate(vcs, cp->timeperframe.denominator /
- cp->timeperframe.numerator);
- }
-
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return 0;
-}
-
-static int vino_reqbufs(struct file *file, void *__fh,
- struct v4l2_requestbuffers *rb)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
-
- if (vcs->reading)
- return -EBUSY;
-
- /* TODO: check queue type */
- if (rb->memory != V4L2_MEMORY_MMAP) {
- dprintk("type not mmap\n");
- return -EINVAL;
- }
-
- dprintk("count = %d\n", rb->count);
- if (rb->count > 0) {
- if (vino_is_capturing(vcs)) {
- dprintk("busy, capturing\n");
- return -EBUSY;
- }
-
- if (vino_queue_has_mapped_buffers(&vcs->fb_queue)) {
- dprintk("busy, buffers still mapped\n");
- return -EBUSY;
- } else {
- vcs->streaming = 0;
- vino_queue_free(&vcs->fb_queue);
- vino_queue_init(&vcs->fb_queue, &rb->count);
- }
- } else {
- vcs->streaming = 0;
- vino_capture_stop(vcs);
- vino_queue_free(&vcs->fb_queue);
- }
-
- return 0;
-}
-
-static void vino_v4l2_get_buffer_status(struct vino_channel_settings *vcs,
- struct vino_framebuffer *fb,
- struct v4l2_buffer *b)
-{
- if (vino_queue_outgoing_contains(&vcs->fb_queue,
- fb->id)) {
- b->flags &= ~V4L2_BUF_FLAG_QUEUED;
- b->flags |= V4L2_BUF_FLAG_DONE;
- } else if (vino_queue_incoming_contains(&vcs->fb_queue,
- fb->id)) {
- b->flags &= ~V4L2_BUF_FLAG_DONE;
- b->flags |= V4L2_BUF_FLAG_QUEUED;
- } else {
- b->flags &= ~(V4L2_BUF_FLAG_DONE |
- V4L2_BUF_FLAG_QUEUED);
- }
-
- b->flags &= ~(V4L2_BUF_FLAG_TIMECODE);
-
- if (fb->map_count > 0)
- b->flags |= V4L2_BUF_FLAG_MAPPED;
-
- b->flags &= ~V4L2_BUF_FLAG_TIMESTAMP_MASK;
- b->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-
- b->index = fb->id;
- b->memory = (vcs->fb_queue.type == VINO_MEMORY_MMAP) ?
- V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR;
- b->m.offset = fb->offset;
- b->bytesused = fb->data_size;
- b->length = fb->size;
- b->field = V4L2_FIELD_INTERLACED;
- b->sequence = fb->frame_counter;
- memcpy(&b->timestamp, &fb->timestamp,
- sizeof(struct timeval));
- // b->input ?
-
- dprintk("buffer %d: length = %d, bytesused = %d, offset = %d\n",
- fb->id, fb->size, fb->data_size, fb->offset);
-}
-
-static int vino_querybuf(struct file *file, void *__fh,
- struct v4l2_buffer *b)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- struct vino_framebuffer *fb;
-
- if (vcs->reading)
- return -EBUSY;
-
- /* TODO: check queue type */
- if (b->index >= vino_queue_get_length(&vcs->fb_queue)) {
- dprintk("invalid index = %d\n",
- b->index);
- return -EINVAL;
- }
-
- fb = vino_queue_get_buffer(&vcs->fb_queue,
- b->index);
- if (fb == NULL) {
- dprintk("vino_queue_get_buffer() failed");
- return -EINVAL;
- }
-
- vino_v4l2_get_buffer_status(vcs, fb, b);
-
- return 0;
-}
-
-static int vino_qbuf(struct file *file, void *__fh,
- struct v4l2_buffer *b)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- struct vino_framebuffer *fb;
- int ret;
-
- if (vcs->reading)
- return -EBUSY;
-
- /* TODO: check queue type */
- if (b->memory != V4L2_MEMORY_MMAP) {
- dprintk("type not mmap\n");
- return -EINVAL;
- }
-
- fb = vino_capture_enqueue(vcs, b->index);
- if (fb == NULL)
- return -EINVAL;
-
- vino_v4l2_get_buffer_status(vcs, fb, b);
-
- if (vcs->streaming) {
- ret = vino_capture_next(vcs, 1);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int vino_dqbuf(struct file *file, void *__fh,
- struct v4l2_buffer *b)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned int nonblocking = file->f_flags & O_NONBLOCK;
- struct vino_framebuffer *fb;
- unsigned int incoming, outgoing;
- int err;
-
- if (vcs->reading)
- return -EBUSY;
-
- /* TODO: check queue type */
-
- err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
- if (err) {
- dprintk("vino_queue_get_incoming() failed\n");
- return -EINVAL;
- }
- err = vino_queue_get_outgoing(&vcs->fb_queue, &outgoing);
- if (err) {
- dprintk("vino_queue_get_outgoing() failed\n");
- return -EINVAL;
- }
-
- dprintk("incoming = %d, outgoing = %d\n", incoming, outgoing);
-
- if (outgoing == 0) {
- if (incoming == 0) {
- dprintk("no incoming or outgoing buffers\n");
- return -EINVAL;
- }
- if (nonblocking) {
- dprintk("non-blocking I/O was selected and "
- "there are no buffers to dequeue\n");
- return -EAGAIN;
- }
-
- err = vino_wait_for_frame(vcs);
- if (err) {
- err = vino_wait_for_frame(vcs);
- if (err) {
- /* interrupted or no frames captured because of
- * frame skipping */
- /* vino_capture_failed(vcs); */
- return -EIO;
- }
- }
- }
-
- fb = vino_queue_remove(&vcs->fb_queue, &b->index);
- if (fb == NULL) {
- dprintk("vino_queue_remove() failed\n");
- return -EINVAL;
- }
-
- err = vino_check_buffer(vcs, fb);
-
- vino_v4l2_get_buffer_status(vcs, fb, b);
-
- if (err)
- return -EIO;
-
- return 0;
-}
-
-static int vino_streamon(struct file *file, void *__fh,
- enum v4l2_buf_type i)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned int incoming;
- int ret;
- if (vcs->reading)
- return -EBUSY;
-
- if (vcs->streaming)
- return 0;
-
- // TODO: check queue type
-
- if (vino_queue_get_length(&vcs->fb_queue) < 1) {
- dprintk("no buffers allocated\n");
- return -EINVAL;
- }
-
- ret = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
- if (ret) {
- dprintk("vino_queue_get_incoming() failed\n");
- return -EINVAL;
- }
-
- vcs->streaming = 1;
-
- if (incoming > 0) {
- ret = vino_capture_next(vcs, 1);
- if (ret) {
- vcs->streaming = 0;
-
- dprintk("couldn't start capture\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int vino_streamoff(struct file *file, void *__fh,
- enum v4l2_buf_type i)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- if (vcs->reading)
- return -EBUSY;
-
- if (!vcs->streaming)
- return 0;
-
- vcs->streaming = 0;
- vino_capture_stop(vcs);
-
- return 0;
-}
-
-static int vino_queryctrl(struct file *file, void *__fh,
- struct v4l2_queryctrl *queryctrl)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
- int i;
- int err = 0;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- switch (vcs->input) {
- case VINO_INPUT_D1:
- for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
- if (vino_indycam_v4l2_controls[i].id ==
- queryctrl->id) {
- memcpy(queryctrl,
- &vino_indycam_v4l2_controls[i],
- sizeof(struct v4l2_queryctrl));
- queryctrl->reserved[0] = 0;
- goto found;
- }
- }
-
- err = -EINVAL;
- break;
- case VINO_INPUT_COMPOSITE:
- case VINO_INPUT_SVIDEO:
- for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
- if (vino_saa7191_v4l2_controls[i].id ==
- queryctrl->id) {
- memcpy(queryctrl,
- &vino_saa7191_v4l2_controls[i],
- sizeof(struct v4l2_queryctrl));
- queryctrl->reserved[0] = 0;
- goto found;
- }
- }
-
- err = -EINVAL;
- break;
- default:
- err = -EINVAL;
- }
-
- found:
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return err;
-}
-
-static int vino_g_ctrl(struct file *file, void *__fh,
- struct v4l2_control *control)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
- int i;
- int err = 0;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- switch (vcs->input) {
- case VINO_INPUT_D1: {
- err = -EINVAL;
- for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
- if (vino_indycam_v4l2_controls[i].id == control->id) {
- err = 0;
- break;
- }
- }
-
- if (err)
- goto out;
-
- err = camera_call(core, g_ctrl, control);
- if (err)
- err = -EINVAL;
- break;
- }
- case VINO_INPUT_COMPOSITE:
- case VINO_INPUT_SVIDEO: {
- err = -EINVAL;
- for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
- if (vino_saa7191_v4l2_controls[i].id == control->id) {
- err = 0;
- break;
- }
- }
-
- if (err)
- goto out;
-
- err = decoder_call(core, g_ctrl, control);
- if (err)
- err = -EINVAL;
- break;
- }
- default:
- err = -EINVAL;
- }
-
-out:
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return err;
-}
-
-static int vino_s_ctrl(struct file *file, void *__fh,
- struct v4l2_control *control)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned long flags;
- int i;
- int err = 0;
-
- spin_lock_irqsave(&vino_drvdata->input_lock, flags);
-
- if (!vino_is_input_owner(vcs)) {
- err = -EBUSY;
- goto out;
- }
-
- switch (vcs->input) {
- case VINO_INPUT_D1: {
- err = -EINVAL;
- for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
- if (vino_indycam_v4l2_controls[i].id == control->id) {
- err = 0;
- break;
- }
- }
- if (err)
- goto out;
- if (control->value < vino_indycam_v4l2_controls[i].minimum ||
- control->value > vino_indycam_v4l2_controls[i].maximum) {
- err = -ERANGE;
- goto out;
- }
- err = camera_call(core, s_ctrl, control);
- if (err)
- err = -EINVAL;
- break;
- }
- case VINO_INPUT_COMPOSITE:
- case VINO_INPUT_SVIDEO: {
- err = -EINVAL;
- for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
- if (vino_saa7191_v4l2_controls[i].id == control->id) {
- err = 0;
- break;
- }
- }
- if (err)
- goto out;
- if (control->value < vino_saa7191_v4l2_controls[i].minimum ||
- control->value > vino_saa7191_v4l2_controls[i].maximum) {
- err = -ERANGE;
- goto out;
- }
-
- err = decoder_call(core, s_ctrl, control);
- if (err)
- err = -EINVAL;
- break;
- }
- default:
- err = -EINVAL;
- }
-
-out:
- spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
-
- return err;
-}
-
-/* File operations */
-
-static int vino_open(struct file *file)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- int ret = 0;
- dprintk("open(): channel = %c\n",
- (vcs->channel == VINO_CHANNEL_A) ? 'A' : 'B');
-
- mutex_lock(&vcs->mutex);
-
- if (vcs->users) {
- dprintk("open(): driver busy\n");
- ret = -EBUSY;
- goto out;
- }
-
- ret = vino_acquire_input(vcs);
- if (ret) {
- dprintk("open(): vino_acquire_input() failed\n");
- goto out;
- }
-
- vcs->users++;
-
- out:
- mutex_unlock(&vcs->mutex);
-
- dprintk("open(): %s!\n", ret ? "failed" : "complete");
-
- return ret;
-}
-
-static int vino_close(struct file *file)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- dprintk("close():\n");
-
- mutex_lock(&vcs->mutex);
-
- vcs->users--;
-
- if (!vcs->users) {
- vino_release_input(vcs);
-
- /* stop DMA and free buffers */
- vino_capture_stop(vcs);
- vino_queue_free(&vcs->fb_queue);
- }
-
- mutex_unlock(&vcs->mutex);
-
- return 0;
-}
-
-static void vino_vm_open(struct vm_area_struct *vma)
-{
- struct vino_framebuffer *fb = vma->vm_private_data;
-
- fb->map_count++;
- dprintk("vino_vm_open(): count = %d\n", fb->map_count);
-}
-
-static void vino_vm_close(struct vm_area_struct *vma)
-{
- struct vino_framebuffer *fb = vma->vm_private_data;
-
- fb->map_count--;
- dprintk("vino_vm_close(): count = %d\n", fb->map_count);
-}
-
-static const struct vm_operations_struct vino_vm_ops = {
- .open = vino_vm_open,
- .close = vino_vm_close,
-};
-
-static int vino_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
-
- unsigned long start = vma->vm_start;
- unsigned long size = vma->vm_end - vma->vm_start;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-
- struct vino_framebuffer *fb = NULL;
- unsigned int i, length;
- int ret = 0;
-
- dprintk("mmap():\n");
-
- // TODO: reject mmap if already mapped
-
- if (mutex_lock_interruptible(&vcs->mutex))
- return -EINTR;
-
- if (vcs->reading) {
- ret = -EBUSY;
- goto out;
- }
-
- // TODO: check queue type
-
- if (!(vma->vm_flags & VM_WRITE)) {
- dprintk("mmap(): app bug: PROT_WRITE please\n");
- ret = -EINVAL;
- goto out;
- }
- if (!(vma->vm_flags & VM_SHARED)) {
- dprintk("mmap(): app bug: MAP_SHARED please\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* find the correct buffer using offset */
- length = vino_queue_get_length(&vcs->fb_queue);
- if (length == 0) {
- dprintk("mmap(): queue not initialized\n");
- ret = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < length; i++) {
- fb = vino_queue_get_buffer(&vcs->fb_queue, i);
- if (fb == NULL) {
- dprintk("mmap(): vino_queue_get_buffer() failed\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (fb->offset == offset)
- goto found;
- }
-
- dprintk("mmap(): invalid offset = %lu\n", offset);
- ret = -EINVAL;
- goto out;
-
-found:
- dprintk("mmap(): buffer = %d\n", i);
-
- if (size > (fb->desc_table.page_count * PAGE_SIZE)) {
- dprintk("mmap(): failed: size = %lu > %lu\n",
- size, fb->desc_table.page_count * PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < fb->desc_table.page_count; i++) {
- unsigned long pfn =
- virt_to_phys((void *)fb->desc_table.virtual[i]) >>
- PAGE_SHIFT;
-
- if (size < PAGE_SIZE)
- break;
-
- // protection was: PAGE_READONLY
- if (remap_pfn_range(vma, start, pfn, PAGE_SIZE,
- vma->vm_page_prot)) {
- dprintk("mmap(): remap_pfn_range() failed\n");
- ret = -EAGAIN;
- goto out;
- }
-
- start += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- fb->map_count = 1;
-
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_flags &= ~VM_IO;
- vma->vm_private_data = fb;
- vma->vm_file = file;
- vma->vm_ops = &vino_vm_ops;
-
-out:
- mutex_unlock(&vcs->mutex);
-
- return ret;
-}
-
-static unsigned int vino_poll(struct file *file, poll_table *pt)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- unsigned int outgoing;
- unsigned int ret = 0;
-
- // lock mutex (?)
- // TODO: this has to be corrected for different read modes
-
- dprintk("poll():\n");
-
- if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
- dprintk("poll(): vino_queue_get_outgoing() failed\n");
- ret = POLLERR;
- goto error;
- }
- if (outgoing > 0)
- goto over;
-
- poll_wait(file, &vcs->fb_queue.frame_wait_queue, pt);
-
- if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
- dprintk("poll(): vino_queue_get_outgoing() failed\n");
- ret = POLLERR;
- goto error;
- }
-
-over:
- dprintk("poll(): data %savailable\n",
- (outgoing > 0) ? "" : "not ");
-
- if (outgoing > 0)
- ret = POLLIN | POLLRDNORM;
-
-error:
- return ret;
-}
-
-static long vino_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct vino_channel_settings *vcs = video_drvdata(file);
- long ret;
-
- if (mutex_lock_interruptible(&vcs->mutex))
- return -EINTR;
-
- ret = video_ioctl2(file, cmd, arg);
-
- mutex_unlock(&vcs->mutex);
-
- return ret;
-}
-
-/* Initialization and cleanup */
-
-/* __initdata */
-static int vino_init_stage;
-
-const struct v4l2_ioctl_ops vino_ioctl_ops = {
- .vidioc_enum_fmt_vid_cap = vino_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vino_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vino_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vino_try_fmt_vid_cap,
- .vidioc_querycap = vino_querycap,
- .vidioc_enum_input = vino_enum_input,
- .vidioc_g_input = vino_g_input,
- .vidioc_s_input = vino_s_input,
- .vidioc_g_std = vino_g_std,
- .vidioc_s_std = vino_s_std,
- .vidioc_querystd = vino_querystd,
- .vidioc_cropcap = vino_cropcap,
- .vidioc_s_crop = vino_s_crop,
- .vidioc_g_crop = vino_g_crop,
- .vidioc_s_parm = vino_s_parm,
- .vidioc_g_parm = vino_g_parm,
- .vidioc_reqbufs = vino_reqbufs,
- .vidioc_querybuf = vino_querybuf,
- .vidioc_qbuf = vino_qbuf,
- .vidioc_dqbuf = vino_dqbuf,
- .vidioc_streamon = vino_streamon,
- .vidioc_streamoff = vino_streamoff,
- .vidioc_queryctrl = vino_queryctrl,
- .vidioc_g_ctrl = vino_g_ctrl,
- .vidioc_s_ctrl = vino_s_ctrl,
-};
-
-static const struct v4l2_file_operations vino_fops = {
- .owner = THIS_MODULE,
- .open = vino_open,
- .release = vino_close,
- .unlocked_ioctl = vino_ioctl,
- .mmap = vino_mmap,
- .poll = vino_poll,
-};
-
-static struct video_device vdev_template = {
- .name = "NOT SET",
- .fops = &vino_fops,
- .ioctl_ops = &vino_ioctl_ops,
- .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
-};
-
-static void vino_module_cleanup(int stage)
-{
- switch(stage) {
- case 11:
- video_unregister_device(vino_drvdata->b.vdev);
- vino_drvdata->b.vdev = NULL;
- case 10:
- video_unregister_device(vino_drvdata->a.vdev);
- vino_drvdata->a.vdev = NULL;
- case 9:
- i2c_del_adapter(&vino_i2c_adapter);
- case 8:
- free_irq(SGI_VINO_IRQ, NULL);
- case 7:
- if (vino_drvdata->b.vdev) {
- video_device_release(vino_drvdata->b.vdev);
- vino_drvdata->b.vdev = NULL;
- }
- case 6:
- if (vino_drvdata->a.vdev) {
- video_device_release(vino_drvdata->a.vdev);
- vino_drvdata->a.vdev = NULL;
- }
- case 5:
- /* all entries in dma_cpu dummy table have the same address */
- dma_unmap_single(NULL,
- vino_drvdata->dummy_desc_table.dma_cpu[0],
- PAGE_SIZE, DMA_FROM_DEVICE);
- dma_free_coherent(NULL, VINO_DUMMY_DESC_COUNT
- * sizeof(dma_addr_t),
- (void *)vino_drvdata->
- dummy_desc_table.dma_cpu,
- vino_drvdata->dummy_desc_table.dma);
- case 4:
- free_page(vino_drvdata->dummy_page);
- case 3:
- v4l2_device_unregister(&vino_drvdata->v4l2_dev);
- case 2:
- kfree(vino_drvdata);
- case 1:
- iounmap(vino);
- case 0:
- break;
- default:
- dprintk("vino_module_cleanup(): invalid cleanup stage = %d\n",
- stage);
- }
-}
-
-static int vino_probe(void)
-{
- unsigned long rev_id;
-
- if (ip22_is_fullhouse()) {
- printk(KERN_ERR "VINO doesn't exist in IP22 Fullhouse\n");
- return -ENODEV;
- }
-
- if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) {
- printk(KERN_ERR "VINO is not found (EISA BUS not present)\n");
- return -ENODEV;
- }
-
- vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino));
- if (!vino) {
- printk(KERN_ERR "VINO: ioremap() failed\n");
- return -EIO;
- }
- vino_init_stage++;
-
- if (get_dbe(rev_id, &(vino->rev_id))) {
- printk(KERN_ERR "Failed to read VINO revision register\n");
- vino_module_cleanup(vino_init_stage);
- return -ENODEV;
- }
-
- if (VINO_ID_VALUE(rev_id) != VINO_CHIP_ID) {
- printk(KERN_ERR "Unknown VINO chip ID (Rev/ID: 0x%02lx)\n",
- rev_id);
- vino_module_cleanup(vino_init_stage);
- return -ENODEV;
- }
-
- printk(KERN_INFO "VINO revision %ld found\n", VINO_REV_NUM(rev_id));
-
- return 0;
-}
-
-static int vino_init(void)
-{
- dma_addr_t dma_dummy_address;
- int err;
- int i;
-
- vino_drvdata = kzalloc(sizeof(struct vino_settings), GFP_KERNEL);
- if (!vino_drvdata) {
- vino_module_cleanup(vino_init_stage);
- return -ENOMEM;
- }
- vino_init_stage++;
- strlcpy(vino_drvdata->v4l2_dev.name, "vino",
- sizeof(vino_drvdata->v4l2_dev.name));
- err = v4l2_device_register(NULL, &vino_drvdata->v4l2_dev);
- if (err)
- return err;
- vino_init_stage++;
-
- /* create a dummy dma descriptor */
- vino_drvdata->dummy_page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!vino_drvdata->dummy_page) {
- vino_module_cleanup(vino_init_stage);
- return -ENOMEM;
- }
- vino_init_stage++;
-
- // TODO: use page_count in dummy_desc_table
-
- vino_drvdata->dummy_desc_table.dma_cpu =
- dma_alloc_coherent(NULL,
- VINO_DUMMY_DESC_COUNT * sizeof(dma_addr_t),
- &vino_drvdata->dummy_desc_table.dma,
- GFP_KERNEL | GFP_DMA);
- if (!vino_drvdata->dummy_desc_table.dma_cpu) {
- vino_module_cleanup(vino_init_stage);
- return -ENOMEM;
- }
- vino_init_stage++;
-
- dma_dummy_address = dma_map_single(NULL,
- (void *)vino_drvdata->dummy_page,
- PAGE_SIZE, DMA_FROM_DEVICE);
- for (i = 0; i < VINO_DUMMY_DESC_COUNT; i++) {
- vino_drvdata->dummy_desc_table.dma_cpu[i] = dma_dummy_address;
- }
-
- /* initialize VINO */
-
- vino->control = 0;
- vino->a.next_4_desc = vino_drvdata->dummy_desc_table.dma;
- vino->b.next_4_desc = vino_drvdata->dummy_desc_table.dma;
- udelay(VINO_DESC_FETCH_DELAY);
-
- vino->intr_status = 0;
-
- vino->a.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
- vino->b.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
-
- return 0;
-}
-
-static int vino_init_channel_settings(struct vino_channel_settings *vcs,
- unsigned int channel, const char *name)
-{
- vcs->channel = channel;
- vcs->input = VINO_INPUT_NONE;
- vcs->alpha = 0;
- vcs->users = 0;
- vcs->data_format = VINO_DATA_FMT_GREY;
- vcs->data_norm = VINO_DATA_NORM_NTSC;
- vcs->decimation = 1;
- vino_set_default_clipping(vcs);
- vino_set_default_framerate(vcs);
-
- vcs->capturing = 0;
-
- mutex_init(&vcs->mutex);
- spin_lock_init(&vcs->capture_lock);
-
- mutex_init(&vcs->fb_queue.queue_mutex);
- spin_lock_init(&vcs->fb_queue.queue_lock);
- init_waitqueue_head(&vcs->fb_queue.frame_wait_queue);
-
- vcs->vdev = video_device_alloc();
- if (!vcs->vdev) {
- vino_module_cleanup(vino_init_stage);
- return -ENOMEM;
- }
- vino_init_stage++;
-
- memcpy(vcs->vdev, &vdev_template,
- sizeof(struct video_device));
- strcpy(vcs->vdev->name, name);
- vcs->vdev->release = video_device_release;
- vcs->vdev->v4l2_dev = &vino_drvdata->v4l2_dev;
-
- video_set_drvdata(vcs->vdev, vcs);
-
- return 0;
-}
-
-static int __init vino_module_init(void)
-{
- int ret;
-
- printk(KERN_INFO "SGI VINO driver version %s\n",
- VINO_MODULE_VERSION);
-
- ret = vino_probe();
- if (ret)
- return ret;
-
- ret = vino_init();
- if (ret)
- return ret;
-
- /* initialize data structures */
-
- spin_lock_init(&vino_drvdata->vino_lock);
- spin_lock_init(&vino_drvdata->input_lock);
-
- ret = vino_init_channel_settings(&vino_drvdata->a, VINO_CHANNEL_A,
- vino_vdev_name_a);
- if (ret)
- return ret;
-
- ret = vino_init_channel_settings(&vino_drvdata->b, VINO_CHANNEL_B,
- vino_vdev_name_b);
- if (ret)
- return ret;
-
- /* initialize hardware and register V4L devices */
-
- ret = request_irq(SGI_VINO_IRQ, vino_interrupt, 0,
- vino_driver_description, NULL);
- if (ret) {
- printk(KERN_ERR "VINO: requesting IRQ %02d failed\n",
- SGI_VINO_IRQ);
- vino_module_cleanup(vino_init_stage);
- return -EAGAIN;
- }
- vino_init_stage++;
-
- ret = i2c_add_adapter(&vino_i2c_adapter);
- if (ret) {
- printk(KERN_ERR "VINO I2C bus registration failed\n");
- vino_module_cleanup(vino_init_stage);
- return ret;
- }
- i2c_set_adapdata(&vino_i2c_adapter, &vino_drvdata->v4l2_dev);
- vino_init_stage++;
-
- ret = video_register_device(vino_drvdata->a.vdev,
- VFL_TYPE_GRABBER, -1);
- if (ret < 0) {
- printk(KERN_ERR "VINO channel A Video4Linux-device "
- "registration failed\n");
- vino_module_cleanup(vino_init_stage);
- return -EINVAL;
- }
- vino_init_stage++;
-
- ret = video_register_device(vino_drvdata->b.vdev,
- VFL_TYPE_GRABBER, -1);
- if (ret < 0) {
- printk(KERN_ERR "VINO channel B Video4Linux-device "
- "registration failed\n");
- vino_module_cleanup(vino_init_stage);
- return -EINVAL;
- }
- vino_init_stage++;
-
- vino_drvdata->decoder =
- v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
- "saa7191", 0, I2C_ADDRS(0x45));
- vino_drvdata->camera =
- v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
- "indycam", 0, I2C_ADDRS(0x2b));
-
- dprintk("init complete!\n");
-
- return 0;
-}
-
-static void __exit vino_module_exit(void)
-{
- dprintk("exiting, stage = %d ...\n", vino_init_stage);
- vino_module_cleanup(vino_init_stage);
- dprintk("cleanup complete, exit!\n");
-}
-
-module_init(vino_module_init);
-module_exit(vino_module_exit);
diff --git a/drivers/staging/media/vino/vino.h b/drivers/staging/media/vino/vino.h
deleted file mode 100644
index de2d615ae7c9..000000000000
--- a/drivers/staging/media/vino/vino.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Driver for the VINO (Video In No Out) system found in SGI Indys.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License version 2 as published by the Free Software Foundation.
- *
- * Copyright (C) 1999 Ulf Karlsson <ulfc@bun.falkenberg.se>
- * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
- */
-
-#ifndef _VINO_H_
-#define _VINO_H_
-
-#define VINO_BASE 0x00080000 /* Vino is in the EISA address space,
- * but it is not an EISA bus card */
-#define VINO_PAGE_SIZE 4096
-
-struct sgi_vino_channel {
- u32 _pad_alpha;
- volatile u32 alpha;
-
-#define VINO_CLIP_X(x) ((x) & 0x3ff) /* bits 0:9 */
-#define VINO_CLIP_ODD(x) (((x) & 0x1ff) << 10) /* bits 10:18 */
-#define VINO_CLIP_EVEN(x) (((x) & 0x1ff) << 19) /* bits 19:27 */
- u32 _pad_clip_start;
- volatile u32 clip_start;
- u32 _pad_clip_end;
- volatile u32 clip_end;
-
-#define VINO_FRAMERT_FULL 0xfff
-#define VINO_FRAMERT_PAL (1<<0) /* 0=NTSC 1=PAL */
-#define VINO_FRAMERT_RT(x) (((x) & 0xfff) << 1) /* bits 1:12 */
- u32 _pad_frame_rate;
- volatile u32 frame_rate;
-
- u32 _pad_field_counter;
- volatile u32 field_counter;
- u32 _pad_line_size;
- volatile u32 line_size;
- u32 _pad_line_count;
- volatile u32 line_count;
- u32 _pad_page_index;
- volatile u32 page_index;
- u32 _pad_next_4_desc;
- volatile u32 next_4_desc;
- u32 _pad_start_desc_tbl;
- volatile u32 start_desc_tbl;
-
-#define VINO_DESC_JUMP (1<<30)
-#define VINO_DESC_STOP (1<<31)
-#define VINO_DESC_VALID (1<<32)
- u32 _pad_desc_0;
- volatile u32 desc_0;
- u32 _pad_desc_1;
- volatile u32 desc_1;
- u32 _pad_desc_2;
- volatile u32 desc_2;
- u32 _pad_Bdesc_3;
- volatile u32 desc_3;
-
- u32 _pad_fifo_thres;
- volatile u32 fifo_thres;
- u32 _pad_fifo_read;
- volatile u32 fifo_read;
- u32 _pad_fifo_write;
- volatile u32 fifo_write;
-};
-
-struct sgi_vino {
-#define VINO_CHIP_ID 0xb
-#define VINO_REV_NUM(x) ((x) & 0x0f)
-#define VINO_ID_VALUE(x) (((x) & 0xf0) >> 4)
- u32 _pad_rev_id;
- volatile u32 rev_id;
-
-#define VINO_CTRL_LITTLE_ENDIAN (1<<0)
-#define VINO_CTRL_A_EOF_INT (1<<1) /* Field transferred int */
-#define VINO_CTRL_A_FIFO_INT (1<<2) /* FIFO overflow int */
-#define VINO_CTRL_A_EOD_INT (1<<3) /* End of desc table int */
-#define VINO_CTRL_A_INT (VINO_CTRL_A_EOF_INT | \
- VINO_CTRL_A_FIFO_INT | \
- VINO_CTRL_A_EOD_INT)
-#define VINO_CTRL_B_EOF_INT (1<<4) /* Field transferred int */
-#define VINO_CTRL_B_FIFO_INT (1<<5) /* FIFO overflow int */
-#define VINO_CTRL_B_EOD_INT (1<<6) /* End of desc table int */
-#define VINO_CTRL_B_INT (VINO_CTRL_B_EOF_INT | \
- VINO_CTRL_B_FIFO_INT | \
- VINO_CTRL_B_EOD_INT)
-#define VINO_CTRL_A_DMA_ENBL (1<<7)
-#define VINO_CTRL_A_INTERLEAVE_ENBL (1<<8)
-#define VINO_CTRL_A_SYNC_ENBL (1<<9)
-#define VINO_CTRL_A_SELECT (1<<10) /* 1=D1 0=Philips */
-#define VINO_CTRL_A_RGB (1<<11) /* 1=RGB 0=YUV */
-#define VINO_CTRL_A_LUMA_ONLY (1<<12)
-#define VINO_CTRL_A_DEC_ENBL (1<<13) /* Decimation */
-#define VINO_CTRL_A_DEC_SCALE_MASK 0x1c000 /* bits 14:17 */
-#define VINO_CTRL_A_DEC_SCALE_SHIFT (14)
-#define VINO_CTRL_A_DEC_HOR_ONLY (1<<17) /* Horizontal only */
-#define VINO_CTRL_A_DITHER (1<<18) /* 24 -> 8 bit dither */
-#define VINO_CTRL_B_DMA_ENBL (1<<19)
-#define VINO_CTRL_B_INTERLEAVE_ENBL (1<<20)
-#define VINO_CTRL_B_SYNC_ENBL (1<<21)
-#define VINO_CTRL_B_SELECT (1<<22) /* 1=D1 0=Philips */
-#define VINO_CTRL_B_RGB (1<<23) /* 1=RGB 0=YUV */
-#define VINO_CTRL_B_LUMA_ONLY (1<<24)
-#define VINO_CTRL_B_DEC_ENBL (1<<25) /* Decimation */
-#define VINO_CTRL_B_DEC_SCALE_MASK 0x1c000000 /* bits 26:28 */
-#define VINO_CTRL_B_DEC_SCALE_SHIFT (26)
-#define VINO_CTRL_B_DEC_HOR_ONLY (1<<29) /* Decimation horizontal only */
-#define VINO_CTRL_B_DITHER (1<<30) /* ChanB 24 -> 8 bit dither */
- u32 _pad_control;
- volatile u32 control;
-
-#define VINO_INTSTAT_A_EOF (1<<0) /* Field transferred int */
-#define VINO_INTSTAT_A_FIFO (1<<1) /* FIFO overflow int */
-#define VINO_INTSTAT_A_EOD (1<<2) /* End of desc table int */
-#define VINO_INTSTAT_A (VINO_INTSTAT_A_EOF | \
- VINO_INTSTAT_A_FIFO | \
- VINO_INTSTAT_A_EOD)
-#define VINO_INTSTAT_B_EOF (1<<3) /* Field transferred int */
-#define VINO_INTSTAT_B_FIFO (1<<4) /* FIFO overflow int */
-#define VINO_INTSTAT_B_EOD (1<<5) /* End of desc table int */
-#define VINO_INTSTAT_B (VINO_INTSTAT_B_EOF | \
- VINO_INTSTAT_B_FIFO | \
- VINO_INTSTAT_B_EOD)
- u32 _pad_intr_status;
- volatile u32 intr_status;
-
- u32 _pad_i2c_control;
- volatile u32 i2c_control;
- u32 _pad_i2c_data;
- volatile u32 i2c_data;
-
- struct sgi_vino_channel a;
- struct sgi_vino_channel b;
-};
-
-#endif
diff --git a/drivers/staging/mt29f_spinand/Kconfig b/drivers/staging/mt29f_spinand/Kconfig
index 403174817be7..f3f9cb3b5c35 100644
--- a/drivers/staging/mt29f_spinand/Kconfig
+++ b/drivers/staging/mt29f_spinand/Kconfig
@@ -12,5 +12,5 @@ config MTD_SPINAND_ONDIEECC
bool "Use SPINAND internal ECC"
depends on MTD_SPINAND_MT29F
help
- Internel ECC.
+ Internal ECC.
Enables Hardware ECC support for Micron SPI NAND.
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 3628bcb840c3..3b191fce45ec 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -626,7 +626,8 @@ static int spinand_write_page_hwecc(struct mtd_info *mtd,
static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- u8 retval, status;
+ int retval;
+ u8 status;
uint8_t *p = buf;
int eccsize = chip->ecc.size;
int eccsteps = chip->ecc.steps;
@@ -640,6 +641,13 @@ static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
while (1) {
retval = spinand_read_status(info->spi, &status);
+ if (retval < 0) {
+ dev_err(&mtd->dev,
+ "error %d reading status register\n",
+ retval);
+ return retval;
+ }
+
if ((status & STATUS_OIP_MASK) == STATUS_READY) {
if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
pr_info("spinand: ECC error\n");
@@ -685,6 +693,13 @@ static int spinand_wait(struct mtd_info *mtd, struct nand_chip *chip)
while (time_before(jiffies, timeo)) {
retval = spinand_read_status(info->spi, &status);
+ if (retval < 0) {
+ dev_err(&mtd->dev,
+ "error %d reading status register\n",
+ retval);
+ return retval;
+ }
+
if ((status & STATUS_OIP_MASK) == STATUS_READY)
return 0;
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 5ecb3e6a5bb3..e8aae09d1624 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -788,7 +788,7 @@ void xlr_set_gmac_speed(struct xlr_net_priv *priv)
xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7117);
priv->phy_speed = speed;
}
- /* Set SGMII speed in Interface controll reg */
+ /* Set SGMII speed in Interface control reg */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
if (speed == SPEED_10)
xlr_nae_wreg(priv->base_addr,
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 093535c6217b..5868ebb8389e 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
static const struct mfd_cell nvec_devices[] = {
{
.name = "nvec-kbd",
- .id = 1,
},
{
.name = "nvec-mouse",
- .id = 1,
},
{
.name = "nvec-power",
- .id = 1,
+ .id = 0,
},
{
.name = "nvec-power",
- .id = 2,
+ .id = 1,
},
{
.name = "nvec-paz00",
- .id = 1,
},
};
@@ -259,7 +256,7 @@ static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
* and return immediately.
*
* Returns: 0 on success, a negative error code on failure. If a failure
- * occured, the nvec driver may print an error.
+ * occurred, the nvec driver may print an error.
*/
int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
short size)
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
nvec_msg_free(nvec, msg);
}
- ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
+ ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
if (ret)
dev_err(nvec->dev, "error adding subdevices\n");
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 6b8b108c4e6d..1daeb3125a1f 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -2687,7 +2687,7 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
/*
* Read the channel config info so we can figure out how much data
- * transfered
+ * transferred
*/
usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCCHARX(channel, usb->index));
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index fcbe836aa997..22667dbb10d8 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -109,6 +109,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
int interface = cvmx_helper_get_interface_num(work->ipprt);
int index = cvmx_helper_get_interface_index_num(work->ipprt);
union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
+
gmxx_rxx_frm_ctl.u64 =
cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
@@ -214,6 +215,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
did_work_request = 0;
if (work == NULL) {
union cvmx_pow_wq_int wq_int;
+
wq_int.u64 = 0;
wq_int.s.iq_dis = 1 << pow_receive_group;
wq_int.s.wq_int = 1 << pow_receive_group;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ee321496dcdd..460e8545904f 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -798,7 +798,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
cvm_oct_rx_initialize();
/*
- * 150 uS: about 10 1500-byte packtes at 1GE.
+ * 150 uS: about 10 1500-byte packets at 1GE.
*/
cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 6a9a8815477c..bc7e664cc8a7 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -780,7 +780,7 @@ static const struct i2c_device_id dcon_idtable[] = {
};
MODULE_DEVICE_TABLE(i2c, dcon_idtable);
-struct i2c_driver dcon_driver = {
+static struct i2c_driver dcon_driver = {
.driver = {
.name = "olpc_dcon",
.pm = &dcon_pm_ops,
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 77e8eb5a5abd..0c5a10c69401 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -52,7 +52,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
* Determine the current state by reading the GPIO bit; earlier
* stages of the boot process have established the state.
*
- * Note that we read GPIO_OUPUT_VAL rather than GPIO_READ_BACK here;
+ * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
* this is because OFW will disable input for the pin and set a value..
* READ_BACK will only contain a valid value if input is enabled and
* then a value is set. So, future readings of the pin can use
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 98325b7b4462..6ed35b6ecf0d 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -130,6 +130,30 @@
#define LCD_FLAG_N 0x0040 /* 2-rows mode */
#define LCD_FLAG_L 0x0080 /* backlight enabled */
+/* LCD commands */
+#define LCD_CMD_DISPLAY_CLEAR 0x01 /* Clear entire display */
+
+#define LCD_CMD_ENTRY_MODE 0x04 /* Set entry mode */
+#define LCD_CMD_CURSOR_INC 0x02 /* Increment cursor */
+
+#define LCD_CMD_DISPLAY_CTRL 0x08 /* Display control */
+#define LCD_CMD_DISPLAY_ON 0x04 /* Set display on */
+#define LCD_CMD_CURSOR_ON 0x02 /* Set cursor on */
+#define LCD_CMD_BLINK_ON 0x01 /* Set blink on */
+
+#define LCD_CMD_SHIFT 0x10 /* Shift cursor/display */
+#define LCD_CMD_DISPLAY_SHIFT 0x08 /* Shift display instead of cursor */
+#define LCD_CMD_SHIFT_RIGHT 0x04 /* Shift display/cursor to the right */
+
+#define LCD_CMD_FUNCTION_SET 0x20 /* Set function */
+#define LCD_CMD_DATA_LEN_8BITS 0x10 /* Set data length to 8 bits */
+#define LCD_CMD_TWO_LINES 0x08 /* Set to two display lines */
+#define LCD_CMD_FONT_5X10_DOTS 0x04 /* Set char font to 5x10 dots */
+
+#define LCD_CMD_SET_CGRAM_ADDR 0x40 /* Set char generator RAM address */
+
+#define LCD_CMD_SET_DDRAM_ADDR 0x80 /* Set display data RAM address */
+
#define LCD_ESCAPE_LEN 24 /* max chars for LCD escape command */
#define LCD_ESCAPE_CHAR 27 /* use char 27 for escape command */
@@ -228,9 +252,6 @@ static struct {
bool initialized;
bool must_clear;
- /* TODO: use bool here? */
- char left_shift;
-
int height;
int width;
int bwidth;
@@ -759,7 +780,7 @@ static void long_sleep(int ms)
if (in_interrupt()) {
mdelay(ms);
} else {
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((ms * HZ + 999) / 1000);
}
}
@@ -886,7 +907,7 @@ static void lcd_write_data_tilcd(int data)
static void lcd_gotoxy(void)
{
- lcd_write_cmd(0x80 /* set DDRAM address */
+ lcd_write_cmd(LCD_CMD_SET_DDRAM_ADDR
| (lcd.addr.y ? lcd.hwidth : 0)
/* we force the cursor to stay at the end of the
line if it wants to go farther */
@@ -994,7 +1015,7 @@ static void lcd_clear_fast_tilcd(void)
/* clears the display and resets X/Y */
static void lcd_clear_display(void)
{
- lcd_write_cmd(0x01); /* clear display */
+ lcd_write_cmd(LCD_CMD_DISPLAY_CLEAR);
lcd.addr.x = 0;
lcd.addr.y = 0;
/* we must wait a few milliseconds (15) */
@@ -1008,26 +1029,29 @@ static void lcd_init_display(void)
long_sleep(20); /* wait 20 ms after power-up for the paranoid */
- lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */
+ /* 8bits, 1 line, small fonts; let's do it 3 times */
+ lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
long_sleep(10);
- lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */
+ lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
long_sleep(10);
- lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */
+ lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
long_sleep(10);
- lcd_write_cmd(0x30 /* set font height and lines number */
- | ((lcd.flags & LCD_FLAG_F) ? 4 : 0)
- | ((lcd.flags & LCD_FLAG_N) ? 8 : 0)
+ /* set font height and lines number */
+ lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS
+ | ((lcd.flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0)
+ | ((lcd.flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0)
);
long_sleep(10);
- lcd_write_cmd(0x08); /* display off, cursor off, blink off */
+ /* display off, cursor off, blink off */
+ lcd_write_cmd(LCD_CMD_DISPLAY_CTRL);
long_sleep(10);
- lcd_write_cmd(0x08 /* set display mode */
- | ((lcd.flags & LCD_FLAG_D) ? 4 : 0)
- | ((lcd.flags & LCD_FLAG_C) ? 2 : 0)
- | ((lcd.flags & LCD_FLAG_B) ? 1 : 0)
+ lcd_write_cmd(LCD_CMD_DISPLAY_CTRL /* set display mode */
+ | ((lcd.flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0)
+ | ((lcd.flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0)
+ | ((lcd.flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0)
);
lcd_backlight((lcd.flags & LCD_FLAG_L) ? 1 : 0);
@@ -1035,7 +1059,7 @@ static void lcd_init_display(void)
long_sleep(10);
/* entry mode set : increment, cursor shifting */
- lcd_write_cmd(0x06);
+ lcd_write_cmd(LCD_CMD_ENTRY_MODE | LCD_CMD_CURSOR_INC);
lcd_clear_display();
}
@@ -1119,7 +1143,7 @@ static inline int handle_lcd_special_code(void)
if (lcd.addr.x > 0) {
/* back one char if not at end of line */
if (lcd.addr.x < lcd.bwidth)
- lcd_write_cmd(0x10);
+ lcd_write_cmd(LCD_CMD_SHIFT);
lcd.addr.x--;
}
processed = 1;
@@ -1127,21 +1151,20 @@ static inline int handle_lcd_special_code(void)
case 'r': /* shift cursor right */
if (lcd.addr.x < lcd.width) {
/* allow the cursor to pass the end of the line */
- if (lcd.addr.x <
- (lcd.bwidth - 1))
- lcd_write_cmd(0x14);
+ if (lcd.addr.x < (lcd.bwidth - 1))
+ lcd_write_cmd(LCD_CMD_SHIFT |
+ LCD_CMD_SHIFT_RIGHT);
lcd.addr.x++;
}
processed = 1;
break;
case 'L': /* shift display left */
- lcd.left_shift++;
- lcd_write_cmd(0x18);
+ lcd_write_cmd(LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT);
processed = 1;
break;
case 'R': /* shift display right */
- lcd.left_shift--;
- lcd_write_cmd(0x1C);
+ lcd_write_cmd(LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT |
+ LCD_CMD_SHIFT_RIGHT);
processed = 1;
break;
case 'k': { /* kill end of line */
@@ -1157,7 +1180,6 @@ static inline int handle_lcd_special_code(void)
}
case 'I': /* reinitialize display */
lcd_init_display();
- lcd.left_shift = 0;
processed = 1;
break;
case 'G': {
@@ -1211,7 +1233,7 @@ static inline int handle_lcd_special_code(void)
esc++;
}
- lcd_write_cmd(0x40 | (cgaddr * 8));
+ lcd_write_cmd(LCD_CMD_SET_CGRAM_ADDR | (cgaddr * 8));
for (addr = 0; addr < cgoffset; addr++)
lcd_write_data(cgbytes[addr]);
@@ -1244,21 +1266,29 @@ static inline int handle_lcd_special_code(void)
break;
}
+ /* TODO: This indent party here got ugly, clean it! */
/* Check whether one flag was changed */
if (oldflags != lcd.flags) {
/* check whether one of B,C,D flags were changed */
if ((oldflags ^ lcd.flags) &
(LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
/* set display mode */
- lcd_write_cmd(0x08
- | ((lcd.flags & LCD_FLAG_D) ? 4 : 0)
- | ((lcd.flags & LCD_FLAG_C) ? 2 : 0)
- | ((lcd.flags & LCD_FLAG_B) ? 1 : 0));
+ lcd_write_cmd(LCD_CMD_DISPLAY_CTRL
+ | ((lcd.flags & LCD_FLAG_D)
+ ? LCD_CMD_DISPLAY_ON : 0)
+ | ((lcd.flags & LCD_FLAG_C)
+ ? LCD_CMD_CURSOR_ON : 0)
+ | ((lcd.flags & LCD_FLAG_B)
+ ? LCD_CMD_BLINK_ON : 0));
/* check whether one of F,N flags was changed */
else if ((oldflags ^ lcd.flags) & (LCD_FLAG_F | LCD_FLAG_N))
- lcd_write_cmd(0x30
- | ((lcd.flags & LCD_FLAG_F) ? 4 : 0)
- | ((lcd.flags & LCD_FLAG_N) ? 8 : 0));
+ lcd_write_cmd(LCD_CMD_FUNCTION_SET
+ | LCD_CMD_DATA_LEN_8BITS
+ | ((lcd.flags & LCD_FLAG_F)
+ ? LCD_CMD_TWO_LINES : 0)
+ | ((lcd.flags & LCD_FLAG_N)
+ ? LCD_CMD_FONT_5X10_DOTS
+ : 0));
/* check whether L flag was changed */
else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L)) {
if (lcd.flags & (LCD_FLAG_L))
@@ -1297,13 +1327,13 @@ static void lcd_write_char(char c)
end of the line */
if (lcd.addr.x < lcd.bwidth)
/* back one char */
- lcd_write_cmd(0x10);
+ lcd_write_cmd(LCD_CMD_SHIFT);
lcd.addr.x--;
}
/* replace with a space */
lcd_write_data(' ');
/* back one char again */
- lcd_write_cmd(0x10);
+ lcd_write_cmd(LCD_CMD_SHIFT);
break;
case '\014':
/* quickly clear the display */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index d61842ed673e..da19145c49c5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -509,7 +509,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
tx_ra_bitmap |= ((raid<<28)&0xf0000000);
DBG_88E("%s => mac_id:%d , raid:%d , bitmap = 0x%x, arg = 0x%x\n",
- __func__ , psta->mac_id, raid , tx_ra_bitmap, arg);
+ __func__, psta->mac_id, raid, tx_ra_bitmap, arg);
/* bitmap[0:27] = tx_rate_bitmap */
/* bitmap[28:31]= Rate Adaptive id */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index e4b7ee4c99d5..cd12dd70dd88 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -484,17 +484,8 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
/* fall through */
case WIFI_ASSOCREQ:
case WIFI_REASSOCREQ:
- _mgt_dispatcher(padapter, ptable, precv_frame);
- break;
case WIFI_PROBEREQ:
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- _mgt_dispatcher(padapter, ptable, precv_frame);
- else
- _mgt_dispatcher(padapter, ptable, precv_frame);
- break;
case WIFI_BEACON:
- _mgt_dispatcher(padapter, ptable, precv_frame);
- break;
case WIFI_ACTION:
_mgt_dispatcher(padapter, ptable, precv_frame);
break;
@@ -577,13 +568,14 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
uint len = precv_frame->len;
struct wlan_bssid_ex *pbss;
int ret = _SUCCESS;
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
report_survey_event(padapter, precv_frame);
return _SUCCESS;
}
- if (!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
+ if (!memcmp(GetAddr3Ptr(pframe), pnetwork->MacAddress, ETH_ALEN)) {
if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
/* we should update current network before auth, or some IE is wrong */
pbss = (struct wlan_bssid_ex *)rtw_malloc(sizeof(struct wlan_bssid_ex));
@@ -1445,10 +1437,10 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->rx_data;
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
/* check A3 */
- if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network),
- ETH_ALEN))
+ if (memcmp(GetAddr3Ptr(pframe), pnetwork->MacAddress, ETH_ALEN))
return _SUCCESS;
reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
@@ -1499,10 +1491,10 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->rx_data;
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
/* check A3 */
- if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network),
- ETH_ALEN))
+ if (memcmp(GetAddr3Ptr(pframe), pnetwork->MacAddress, ETH_ALEN))
return _SUCCESS;
reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
@@ -2018,7 +2010,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, cur_network->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
/* pmlmeext->mgnt_seq++; */
@@ -2422,6 +2414,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (pmgntframe == NULL)
@@ -2487,9 +2480,9 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
} else {
__le32 le_tmp32;
__le16 le_tmp16;
- memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+ memcpy(pwlanhdr->addr1, pnetwork->MacAddress, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
/* setting auth algo number */
val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
@@ -2582,7 +2575,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
memcpy((void *)GetAddr2Ptr(pwlanhdr), myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy((void *)GetAddr3Ptr(pwlanhdr), get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy((void *)GetAddr3Ptr(pwlanhdr), pnetwork->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
@@ -2687,6 +2680,7 @@ void issue_assocreq(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
int bssrate_len = 0, sta_bssrate_len = 0;
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (pmgntframe == NULL)
@@ -2702,9 +2696,9 @@ void issue_assocreq(struct adapter *padapter)
fctrl = &(pwlanhdr->frame_ctl);
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr1, pnetwork->MacAddress, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
@@ -2879,6 +2873,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
struct xmit_priv *pxmitpriv;
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
+ struct wlan_bssid_ex *pnetwork;
if (!padapter)
goto exit;
@@ -2886,6 +2881,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
pxmitpriv = &(padapter->xmitpriv);
pmlmeext = &(padapter->mlmeextpriv);
pmlmeinfo = &(pmlmeext->mlmext_info);
+ pnetwork = &(pmlmeinfo->network);
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (pmgntframe == NULL)
@@ -2914,7 +2910,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
@@ -2946,10 +2942,11 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
u32 start = jiffies;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
/* da == NULL, assume it's null data for sta to ap*/
if (da == NULL)
- da = get_my_bssid(&(pmlmeinfo->network));
+ da = pnetwork->MacAddress;
do {
ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0 ? true : false);
@@ -2995,6 +2992,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
DBG_88E("%s\n", __func__);
@@ -3038,7 +3036,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
@@ -3069,10 +3067,11 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
u32 start = jiffies;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
/* da == NULL, assume it's null data for sta to ap*/
if (da == NULL)
- da = get_my_bssid(&(pmlmeinfo->network));
+ da = pnetwork->MacAddress;
do {
ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0 ? true : false);
@@ -3115,6 +3114,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
int ret = _FAIL;
__le16 le_tmp;
@@ -3137,7 +3137,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
@@ -3288,6 +3288,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
DBG_88E("%s, category=%d, action=%d, status=%d\n", __func__, category, action, status);
@@ -3310,7 +3311,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
/* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
@@ -3420,6 +3421,8 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
struct __queue *queue = &(pmlmepriv->scanned_queue);
u8 InfoContent[16] = {0};
u8 ICS[8][15];
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
if ((pmlmepriv->num_FortyMHzIntolerant == 0) || (pmlmepriv->num_sta_no_ht == 0))
return;
@@ -3449,9 +3452,9 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
fctrl = &(pwlanhdr->frame_ctl);
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr1, cur_network->MacAddress, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, cur_network->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
@@ -4042,9 +4045,10 @@ unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
/* check A3 */
- if (memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
+ if (memcmp(MacAddr, pnetwork->MacAddress, ETH_ALEN))
return _SUCCESS;
DBG_88E("%s\n", __func__);
@@ -4924,11 +4928,6 @@ void addba_timer_hdl(void *function_context)
}
}
-u8 NULL_hdl(struct adapter *padapter, u8 *pbuf)
-{
- return H2C_SUCCESS;
-}
-
u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
{
u8 type;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 324c1a7fd0bc..a3ffc691be9a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -405,11 +405,6 @@ int get_bsstype(unsigned short capability)
return 0;
}
-__inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
-{
- return pnetwork->MacAddress;
-}
-
u16 get_beacon_interval(struct wlan_bssid_ex *bss)
{
__le16 val;
@@ -936,6 +931,8 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
+ if (!bssid)
+ return _FAIL;
subtype = GetFrameSubType(pframe) >> 4;
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 9873998011d2..06477e834653 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -534,13 +534,8 @@ void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
pDM_DigTable->FAHighThresh = DM_false_ALARM_THRESH_HIGH;
- if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- } else {
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- }
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
@@ -1138,16 +1133,9 @@ static void FindMinimumRSSI(struct adapter *pAdapter)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
struct dm_priv *pdmpriv = &pHalData->dmpriv;
- struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
-
- /* 1 1.Determine the minimum RSSI */
- if ((check_fwstate(pmlmepriv, _FW_LINKED) == false) &&
- (pdmpriv->EntryMinUndecoratedSmoothedPWDB == 0))
- pdmpriv->MinUndecoratedPWDBForDM = 0;
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) /* Default port */
- pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
- else /* associated entry pwdb */
- pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+
+ /* 1 1.Unconditionally set RSSI */
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
}
void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 7f30dea1b53b..86347f2ccdfd 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -127,22 +127,6 @@ exit:
return ret;
}
-u8 rtl8188e_set_rssi_cmd(struct adapter *adapt, u8 *param)
-{
- u8 res = _SUCCESS;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-
- if (haldata->fw_ractrl) {
- ;
- } else {
- DBG_88E("==>%s fw dont support RA\n", __func__);
- res = _FAIL;
- }
-
-
- return res;
-}
-
u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
{
u8 buf[3];
@@ -276,7 +260,7 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, cur_network->MacAddress, ETH_ALEN);
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
SetFrameSubType(pframe, WIFI_BEACON);
@@ -350,6 +334,7 @@ static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
__le16 *fctrl;
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
@@ -363,7 +348,7 @@ static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
SetDuration(pframe, (pmlmeinfo->aid | 0xc000));
/* BSSID. */
- memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr1, pnetwork->MacAddress, ETH_ALEN);
/* TA. */
memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
@@ -386,6 +371,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
struct wlan_network *cur_network = &pmlmepriv->cur_network;
struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
@@ -397,21 +383,21 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
switch (cur_network->network.InfrastructureMode) {
case Ndis802_11Infrastructure:
SetToDs(fctrl);
- memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr1, pnetwork->MacAddress, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, StaAddr, ETH_ALEN);
break;
case Ndis802_11APMode:
SetFrDs(fctrl);
memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pnetwork->MacAddress, ETH_ALEN);
memcpy(pwlanhdr->addr3, myid(&(adapt->eeprompriv)), ETH_ALEN);
break;
case Ndis802_11IBSS:
default:
memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
break;
}
@@ -498,6 +484,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
u16 BufIndex;
u32 TotalPacketLen;
struct rsvdpage_loc RsvdPageLoc;
+ struct wlan_bssid_ex *pnetwork;
DBG_88E("%s\n", __func__);
ReservedPagePacket = kzalloc(1000, GFP_KERNEL);
@@ -510,6 +497,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
pxmitpriv = &adapt->xmitpriv;
pmlmeext = &adapt->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
+ pnetwork = &(pmlmeinfo->network);
TxDescLen = TXDESC_SIZE;
PageNum = 0;
@@ -541,7 +529,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
/* 3 (3) null data * 1 page */
RsvdPageLoc.LocNullData = PageNum;
- ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex], &NullDataLength, get_my_bssid(&pmlmeinfo->network), false, 0, 0, false);
+ ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex], &NullDataLength, pnetwork->MacAddress, false, 0, 0, false);
rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false);
PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
@@ -551,7 +539,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
/* 3 (4) probe response * 1page */
RsvdPageLoc.LocProbeRsp = PageNum;
- ConstructProbeRsp(adapt, &ReservedPagePacket[BufIndex], &ProbeRspLength, get_my_bssid(&pmlmeinfo->network), false);
+ ConstructProbeRsp(adapt, &ReservedPagePacket[BufIndex], &ProbeRspLength, pnetwork->MacAddress, false);
rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], ProbeRspLength, false, false);
PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength);
@@ -562,7 +550,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
/* 3 (5) Qos null data */
RsvdPageLoc.LocQosNull = PageNum;
ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex],
- &QosNullLength, get_my_bssid(&pmlmeinfo->network), true, 0, 0, false);
+ &QosNullLength, pnetwork->MacAddress, true, 0, 0, false);
rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], QosNullLength, false, false);
PageNeed = (u8)PageNum_128(TxDescLen + QosNullLength);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 7d460eaafa35..3222d8d08b5b 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -242,20 +242,6 @@ void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc)
pHalFunc->hal_notch_filter = &hal_notch_filter_8188e;
}
-u8 GetEEPROMSize8188E(struct adapter *padapter)
-{
- u8 size = 0;
- u32 cr;
-
- cr = usb_read16(padapter, REG_9346CR);
- /* 6: EEPROM used is 93C46, 4: boot from E-Fuse. */
- size = (cr & BOOT_FROM_EEPROM) ? 6 : 4;
-
- MSG_88E("EEPROM type is %s\n", size == 4 ? "E-FUSE" : "93C46");
-
- return size;
-}
-
/* */
/* */
/* LLT R/W/Init function */
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 82f58f87656a..3a274770364b 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -87,7 +87,7 @@ static inline void _init_timer(struct timer_list *ptimer,
static inline void _set_timer(struct timer_list *ptimer, u32 delay_time)
{
- mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
+ mod_timer(ptimer , (jiffies+msecs_to_jiffies(delay_time)));
}
#define RTW_TIMER_HDL_ARGS void *FunctionContext
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index 0e78e2a357bd..42b1f22424eb 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -107,7 +107,6 @@ struct P2P_PS_CTWPeriod_t {
/* host message to firmware cmd */
void rtl8188e_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *padapter, u8 mstatus);
-u8 rtl8188e_set_rssi_cmd(struct adapter *padapter, u8 *param);
u8 rtl8188e_set_raid_cmd(struct adapter *padapter, u32 mask);
void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
u8 rssi_level);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 42ab1d288bdc..b8c42eed98c4 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -391,7 +391,6 @@ void rtl8188e_InitializeFirmwareVars(struct adapter *padapter);
s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy);
/* EFuse */
-u8 GetEEPROMSize8188E(struct adapter *padapter);
void Hal_InitPGData88E(struct adapter *padapter);
void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo);
void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *hwinfo,
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 8d72ccf5f2a0..4f05aee93c9c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -496,7 +496,6 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
struct adapter *adapter, bool update_ie);
int get_bsstype(unsigned short capability);
-u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork);
u16 get_beacon_interval(struct wlan_bssid_ex *bss);
int is_client_associated_to_ap(struct adapter *padapter);
@@ -516,7 +515,7 @@ void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
void VCS_update(struct adapter *padapter, struct sta_info *psta);
void update_beacon_info(struct adapter *padapter, u8 *pframe, uint len,
- struct sta_info *psta);
+ struct sta_info *psta);
int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len);
void update_IOT_info(struct adapter *padapter);
void update_capinfo(struct adapter *adapter, u16 updatecap);
@@ -679,7 +678,6 @@ u8 read_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
u8 write_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
u8 read_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
u8 write_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
-u8 NULL_hdl(struct adapter *padapter, u8 *pbuf);
u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf);
u8 disconnect_hdl(struct adapter *padapter, u8 *pbuf);
u8 createbss_hdl(struct adapter *padapter, u8 *pbuf);
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 01b3810379ec..4fdc536cba79 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -79,7 +79,6 @@ void usb_read_port_cancel(struct adapter *adapter);
int usb_write8(struct adapter *adapter, u32 addr, u8 val);
int usb_write16(struct adapter *adapter, u32 addr, u16 val);
int usb_write32(struct adapter *adapter, u32 addr, u32 val);
-int usb_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata);
u32 usb_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
void usb_write_port_cancel(struct adapter *adapter);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index d2efa9dfc8c0..80e7ef96d807 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -615,33 +615,6 @@ int usb_write32(struct adapter *adapter, u32 addr, u32 val)
return ret;
}
-int usb_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata)
-{
- u8 request;
- u8 requesttype;
- u16 wvalue;
- u16 index;
- u16 len;
- u8 buf[VENDOR_CMD_MAX_DATA_LEN] = {0};
- int ret;
-
-
- request = 0x05;
- requesttype = 0x00;/* write_out */
- index = 0;/* n/a */
-
- wvalue = (u16)(addr&0x0000ffff);
- len = length;
- memcpy(buf, pdata, len);
-
- ret = usbctrl_vendorreq(adapter, request, wvalue, index, buf, len, requesttype);
-
-
- return RTW_STATUS_CODE(ret);
-}
-
-
-
static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
{
struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index 6c64e0899ffd..89ea70b0d3aa 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -167,35 +167,6 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
RT_TRACE(COMP_SEC, "=========>after set key, usconfig:%x\n", usConfig);
}
-void CAM_read_entry(struct net_device *dev, u32 iIndex)
-{
- u32 target_command = 0;
- u32 target_content = 0;
- u8 entry_i = 0;
- u32 ulStatus;
- s32 i = 100;
-
- for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
- target_command = entry_i+CAM_CONTENT_COUNT*iIndex;
- target_command = target_command | BIT31;
-
- while ((i--) >= 0) {
- ulStatus = read_nic_dword(dev, RWCAM);
- if (ulStatus & BIT31)
- continue;
- else
- break;
- }
- write_nic_dword(dev, RWCAM, target_command);
- RT_TRACE(COMP_SEC, "CAM_read_entry(): WRITE A0: %x\n",
- target_command);
- target_content = read_nic_dword(dev, RCAMO);
- RT_TRACE(COMP_SEC, "CAM_read_entry(): WRITE A8: %x\n",
- target_content);
- }
- printk(KERN_INFO "\n");
-}
-
void CamRestoreAllEntry(struct net_device *dev)
{
u8 EntryId = 0;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index 7d075d3cbe62..3c4c0e61c181 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -41,6 +41,4 @@ void write_cam(struct net_device *dev, u8 addr, u32 data);
void CamRestoreAllEntry(struct net_device *dev);
-void CAM_read_entry(struct net_device *dev, u32 iIndex);
-
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 885315cac3a4..b8891c62af3e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1661,8 +1661,8 @@ void dm_change_dynamic_initgain_thresh(struct net_device *dev,
dm_digtable.rssi_low_thresh = dm_value;
} else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) {
dm_digtable.rssi_high_power_highthresh = dm_value;
- } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) {
- dm_digtable.rssi_high_power_highthresh = dm_value;
+ } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_LOW) {
+ dm_digtable.rssi_high_power_lowthresh = dm_value;
} else if (dm_type == DIG_TYPE_ENABLE) {
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_enable_flag = true;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 8e1a5d55dce8..0b4f76481bf4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -22,12 +22,6 @@
#include "r8190P_rtl8256.h"
#include "rtl_pm.h"
-int rtl8192E_save_state(struct pci_dev *dev, pm_message_t state)
-{
- printk(KERN_NOTICE "r8192E save state call (state %u).\n", state.event);
- return -EAGAIN;
-}
-
int rtl8192E_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -124,11 +118,3 @@ out:
return 0;
}
-
-int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable)
-{
- printk(KERN_NOTICE "r8192E enable wake call (state %u, enable %d).\n",
- state.event, enable);
- return -EAGAIN;
-}
-
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
index e5299fc3b34a..7bfe44817f23 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
@@ -23,9 +23,7 @@
#include <linux/types.h>
#include <linux/pci.h>
-int rtl8192E_save_state(struct pci_dev *dev, pm_message_t state);
int rtl8192E_suspend(struct pci_dev *dev, pm_message_t state);
int rtl8192E_resume(struct pci_dev *dev);
-int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable);
#endif
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index c7f45080061f..1ea426b7b7ac 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -34,13 +34,13 @@ u16 MCS_DATA_RATE[2][2][77] = {
468, 520, 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182,
182, 208, 156, 195, 195, 234, 273, 273, 312, 130, 156, 181, 156,
181, 208, 234, 208, 234, 260, 260, 286, 195, 234, 273, 234, 273,
- 312, 351, 312, 351, 390, 390, 429} ,
+ 312, 351, 312, 351, 390, 390, 429},
{14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520,
578, 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231,
173, 217, 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260,
231, 260, 289, 289, 318, 217, 260, 303, 260, 303, 347, 390, 347, 390,
- 433, 433, 477} } ,
+ 433, 433, 477} },
{{27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486,
540, 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648,
864, 972, 1080, 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324,
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 91e98e8e5bfc..0cf38091f8c5 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -202,9 +202,7 @@ void free_rtllib(struct net_device *dev)
EXPORT_SYMBOL(free_rtllib);
u32 rtllib_debug_level;
-static int debug = \
- RTLLIB_DL_ERR
- ;
+static int debug = RTLLIB_DL_ERR;
static struct proc_dir_entry *rtllib_proc;
static int show_debug_level(struct seq_file *m, void *v)
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index cf11b042b93a..1664040efdab 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -70,8 +70,7 @@ rtllib_frag_cache_find(struct rtllib_device *ieee, unsigned int seq,
if (entry->skb != NULL &&
time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
RTLLIB_DEBUG_FRAG(
- "expiring fragment cache entry "
- "seq=%u last_frag=%u\n",
+ "expiring fragment cache entry seq=%u last_frag=%u\n",
entry->seq, entry->last_frag);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -188,8 +187,7 @@ static int rtllib_frag_cache_invalidate(struct rtllib_device *ieee,
if (entry == NULL) {
RTLLIB_DEBUG_FRAG(
- "could not invalidate fragment cache "
- "entry (seq=%u)\n", seq);
+ "could not invalidate fragment cache entry (seq=%u)\n", seq);
return -1;
}
@@ -305,11 +303,9 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
atomic_dec(&crypt->refcnt);
if (res < 0) {
RTLLIB_DEBUG_DROP(
- "decryption failed (SA= %pM"
- ") res=%d\n", hdr->addr2, res);
+ "decryption failed (SA= %pM) res=%d\n", hdr->addr2, res);
if (res == -2)
- RTLLIB_DEBUG_DROP("Decryption failed ICV "
- "mismatch (key %d)\n",
+ RTLLIB_DEBUG_DROP("Decryption failed ICV mismatch (key %d)\n",
skb->data[hdrlen + 3] >> 6);
ieee->ieee_stats.rx_discards_undecryptable++;
return -1;
@@ -345,8 +341,7 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
- printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
- " (SA= %pM keyidx=%d)\n",
+ printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed (SA= %pM keyidx=%d)\n",
ieee->dev->name, hdr->addr2, keyidx);
return -1;
}
@@ -559,8 +554,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
bool bMatchWinStart = false, bPktInBuf = false;
unsigned long flags;
- RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Seq is %d, pTS->RxIndicateSeq"
- " is %d, WinSize is %d\n", __func__, SeqNum,
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Seq is %d, pTS->RxIndicateSeq is %d, WinSize is %d\n", __func__, SeqNum,
pTS->RxIndicateSeq, WinSize);
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
@@ -600,8 +594,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
pTS->RxIndicateSeq = SeqNum + 1 - WinSize;
else
pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum + 1)) + 1;
- RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Window Shift! IndicateSeq: %d,"
- " NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum);
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum);
}
/*
@@ -617,8 +610,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
*/
if (bMatchWinStart) {
/* Current packet is going to be indicated.*/
- RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Packets indication!! "
- "IndicateSeq: %d, NewSeq: %d\n",
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",
pTS->RxIndicateSeq, SeqNum);
ieee->prxbIndicateArray[0] = prxb;
index = 1;
@@ -636,9 +628,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
if (!AddReorderEntry(pTS, pReorderEntry)) {
RTLLIB_DEBUG(RTLLIB_DL_REORDER,
- "%s(): Duplicate packet is "
- "dropped!! IndicateSeq: %d, "
- "NewSeq: %d\n",
+ "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n",
__func__, pTS->RxIndicateSeq,
SeqNum);
list_add_tail(&pReorderEntry->List,
@@ -652,8 +642,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
}
} else {
RTLLIB_DEBUG(RTLLIB_DL_REORDER,
- "Pkt insert into struct buffer!! "
- "IndicateSeq: %d, NewSeq: %d\n",
+ "Pkt insert into struct buffer!! IndicateSeq: %d, NewSeq: %d\n",
pTS->RxIndicateSeq, SeqNum);
}
} else {
@@ -663,9 +652,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
* indicate all the packets in struct buffer and get
* reorder entries.
*/
- RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():"
- " There is no reorder entry!! Packet is "
- "dropped!!\n");
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket(): There is no reorder entry!! Packet is dropped!!\n");
{
int i;
@@ -687,8 +674,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) {
/* This protect struct buffer from overflow. */
if (index >= REORDER_WIN_SIZE) {
- RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicate"
- "Packet(): Buffer overflow!!\n");
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!!\n");
bPktInBuf = true;
break;
}
@@ -699,8 +685,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
ieee->prxbIndicateArray[index] = pReorderEntry->prxb;
- RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate SeqNum"
- " %d!\n", __func__, pReorderEntry->SeqNum);
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate SeqNum %d!\n", __func__, pReorderEntry->SeqNum);
index++;
list_add_tail(&pReorderEntry->List,
@@ -719,8 +704,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
pTS->RxTimeoutIndicateSeq = 0xffff;
if (index > REORDER_WIN_SIZE) {
- RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():"
- " Rx Reorder struct buffer full!!\n");
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder struct buffer full!!\n");
spin_unlock_irqrestore(&(ieee->reorder_spinlock),
flags);
return;
@@ -809,14 +793,11 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
(nSubframe_Length << 8);
if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
- printk(KERN_INFO "%s: A-MSDU parse error!! "
- "pRfd->nTotalSubframe : %d\n",\
+ printk(KERN_INFO "%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\
__func__, rxb->nr_subframes);
- printk(KERN_INFO "%s: A-MSDU parse error!! "
- "Subframe Length: %d\n", __func__,
+ printk(KERN_INFO "%s: A-MSDU parse error!! Subframe Length: %d\n", __func__,
nSubframe_Length);
- printk(KERN_INFO "nRemain_Length is %d and "
- "nSubframe_Length is : %d\n", skb->len,
+ printk(KERN_INFO "nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len,
nSubframe_Length);
printk(KERN_INFO "The Packet SeqNum is %d\n", SeqNum);
return 0;
@@ -844,8 +825,7 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
sub_skb->dev = ieee->dev;
rxb->subframes[rxb->nr_subframes++] = sub_skb;
if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) {
- RTLLIB_DEBUG_RX("ParseSubframe(): Too many "
- "Subframes! Packets dropped!\n");
+ RTLLIB_DEBUG_RX("ParseSubframe(): Too many Subframes! Packets dropped!\n");
break;
}
skb_pull(skb, nSubframe_Length);
@@ -922,8 +902,7 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
pRxTS->RxLastFragNum = frag;
pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc);
} else {
- RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR!!%s(): No TS!! Skip"
- " the check!!\n", __func__);
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR!!%s(): No TS!! Skip the check!!\n", __func__);
return -1;
}
}
@@ -996,9 +975,7 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
stype != RTLLIB_STYPE_QOS_DATA) {
if (stype != RTLLIB_STYPE_NULLFUNC)
RTLLIB_DEBUG_DROP(
- "RX: dropped data frame "
- "with no data (type=0x%02x, "
- "subtype=0x%02x)\n",
+ "RX: dropped data frame with no data (type=0x%02x, subtype=0x%02x)\n",
type, stype);
return -1;
}
@@ -1041,8 +1018,7 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
* frames from other than current BSS, so just drop the
* frames silently instead of filling system log with
* these reports. */
- RTLLIB_DEBUG_DROP("Decryption failed (not set)"
- " (SA= %pM)\n",
+ RTLLIB_DEBUG_DROP("Decryption failed (not set) (SA= %pM)\n",
hdr->addr2);
ieee->ieee_stats.rx_discards_undecryptable++;
return -1;
@@ -1086,8 +1062,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
if (!frag_skb) {
RTLLIB_DEBUG(RTLLIB_DL_RX | RTLLIB_DL_FRAG,
- "Rx cannot get skb from fragment "
- "cache (morefrag=%d seq=%u frag=%u)\n",
+ "Rx cannot get skb from fragment cache (morefrag=%d seq=%u frag=%u)\n",
(fc & RTLLIB_FCTL_MOREFRAGS) != 0,
WLAN_GET_SEQ_SEQ(sc), frag);
return -1;
@@ -1097,8 +1072,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
flen -= hdrlen;
if (frag_skb->tail + flen > frag_skb->end) {
- printk(KERN_WARNING "%s: host decrypted and "
- "reassembled frame did not fit skb\n",
+ printk(KERN_WARNING "%s: host decrypted and reassembled frame did not fit skb\n",
__func__);
rtllib_frag_cache_invalidate(ieee, hdr);
return -1;
@@ -1152,8 +1126,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
eap_get_type(eap->type));
} else {
RTLLIB_DEBUG_DROP(
- "encryption configured, but RX "
- "frame not encrypted (SA= %pM)\n",
+ "encryption configured, but RX frame not encrypted (SA= %pM)\n",
hdr->addr2);
return -1;
}
@@ -1170,9 +1143,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep &&
!rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
RTLLIB_DEBUG_DROP(
- "dropped unencrypted RX data "
- "frame from %pM"
- " (drop_unencrypted=1)\n",
+ "dropped unencrypted RX data frame from %pM (drop_unencrypted=1)\n",
hdr->addr2);
return -1;
}
@@ -1762,9 +1733,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
while (length >= sizeof(*info_element)) {
if (sizeof(*info_element) + info_element->len > length) {
- RTLLIB_DEBUG_MGMT("Info elem: parse failed: "
- "info_element->len + 2 > left : "
- "info_element->len+2=%zd left=%d, id=%d.\n",
+ RTLLIB_DEBUG_MGMT("Info elem: parse failed: info_element->len + 2 > left : info_element->len+2=%zd left=%d, id=%d.\n",
info_element->len +
sizeof(*info_element),
length, info_element->id);
@@ -2207,34 +2176,6 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
return 0;
}
-static inline u8 rtllib_SignalStrengthTranslate(u8 CurrSS)
-{
- u8 RetSS;
-
- if (CurrSS >= 71 && CurrSS <= 100)
- RetSS = 90 + ((CurrSS - 70) / 3);
- else if (CurrSS >= 41 && CurrSS <= 70)
- RetSS = 78 + ((CurrSS - 40) / 3);
- else if (CurrSS >= 31 && CurrSS <= 40)
- RetSS = 66 + (CurrSS - 30);
- else if (CurrSS >= 21 && CurrSS <= 30)
- RetSS = 54 + (CurrSS - 20);
- else if (CurrSS >= 5 && CurrSS <= 20)
- RetSS = 42 + (((CurrSS - 5) * 2) / 3);
- else if (CurrSS == 4)
- RetSS = 36;
- else if (CurrSS == 3)
- RetSS = 27;
- else if (CurrSS == 2)
- RetSS = 18;
- else if (CurrSS == 1)
- RetSS = 9;
- else
- RetSS = CurrSS;
-
- return RetSS;
-}
-
static long rtllib_translate_todbm(u8 signal_strength_index)
{
long signal_power;
@@ -2321,8 +2262,7 @@ static inline int rtllib_network_init(
}
if (network->mode == 0) {
- RTLLIB_DEBUG_SCAN("Filtered out '%s (%pM)' "
- "network.\n",
+ RTLLIB_DEBUG_SCAN("Filtered out '%s (%pM)' network.\n",
escape_essid(network->ssid,
network->ssid_len),
network->bssid);
@@ -2363,13 +2303,6 @@ static inline int is_same_network(struct rtllib_network *src,
(dst->capability & WLAN_CAPABILITY_ESS)));
}
-static inline void update_ibss_network(struct rtllib_network *dst,
- struct rtllib_network *src)
-{
- memcpy(&dst->stats, &src->stats, sizeof(struct rtllib_rx_stats));
- dst->last_scanned = jiffies;
-}
-
static inline void update_network(struct rtllib_network *dst,
struct rtllib_network *src)
@@ -2568,8 +2501,7 @@ static inline void rtllib_process_probe_response(
if (WLAN_FC_GET_STYPE(le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP) {
if (IsPassiveChannel(ieee, network->channel)) {
- printk(KERN_INFO "GetScanInfo(): For Global Domain, "
- "filter probe response at channel(%d).\n",
+ printk(KERN_INFO "GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n",
network->channel);
goto free_network;
}
@@ -2618,8 +2550,7 @@ static inline void rtllib_process_probe_response(
/* If there are no more slots, expire the oldest */
list_del(&oldest->list);
target = oldest;
- RTLLIB_DEBUG_SCAN("Expired '%s' ( %pM) from "
- "network list.\n",
+ RTLLIB_DEBUG_SCAN("Expired '%s' ( %pM) from network list.\n",
escape_essid(target->ssid,
target->ssid_len),
target->bssid);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index cd196cec0dd9..1b4623c3f95e 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -4,6 +4,8 @@
* ADDBAREQ ADDBARSP and DELBA packet is still on consideration. Temporarily use MANAGE QUEUE instead of Normal Queue.
* WB 2008-05-27
* *****************************************************************************************************************************/
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include "ieee80211.h"
#include "rtl819x_BA.h"
@@ -110,13 +112,12 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
struct sk_buff *skb = NULL;
struct ieee80211_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
- __le16 tmp = 0;
u16 len = ieee->tx_headroom + 9;
//category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) + BA Timeout Value(2) + BA Start SeqCtrl(2)(or StatusCode(2))
IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "========>%s(), frame(%d) sentd to:%pM, ieee->dev:%p\n", __func__, type, Dst, ieee->dev);
- if (pBA == NULL||ieee == NULL)
+ if (pBA == NULL)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "pBA(%p) is NULL or ieee(%p) is NULL\n", pBA, ieee);
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "pBA is NULL\n");
return NULL;
}
skb = dev_alloc_skb(len + sizeof( struct ieee80211_hdr_3addr)); //need to add something others? FIXME
@@ -149,17 +150,17 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
{
// Status Code
printk("=====>to send ADDBARSP\n");
- tmp = cpu_to_le16(StatusCode);
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(StatusCode, tag);
tag += 2;
}
// BA Parameter Set
- tmp = cpu_to_le16(pBA->BaParamSet.shortData);
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(pBA->BaParamSet.shortData, tag);
tag += 2;
// BA Timeout Value
- tmp = cpu_to_le16(pBA->BaTimeoutValue);
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(pBA->BaTimeoutValue, tag);
tag += 2;
if (ACT_ADDBAREQ == type)
@@ -196,7 +197,6 @@ static struct sk_buff *ieee80211_DELBA(
struct sk_buff *skb = NULL;
struct ieee80211_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
- __le16 tmp = 0;
//len = head len + DELBA Parameter Set(2) + Reason Code(2)
u16 len = 6 + ieee->tx_headroom;
@@ -230,12 +230,12 @@ static struct sk_buff *ieee80211_DELBA(
*tag ++= ACT_DELBA;
// DELBA Parameter Set
- tmp = cpu_to_le16(DelbaParamSet.shortData);
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(DelbaParamSet.shortData, tag);
tag += 2;
// Reason Code
- tmp = cpu_to_le16(ReasonCode);
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(ReasonCode, tag);
tag += 2;
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index c4514109d0ee..acaa723817e7 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -241,7 +241,7 @@ static PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee,
{
//DIRECTION_VALUE dir;
u8 dir;
- bool search_dir[4] = {0, 0, 0, 0};
+ bool search_dir[4] = {0};
struct list_head *psearch_list; //FIXME
PTS_COMMON_INFO pRet = NULL;
if(ieee->iw_mode == IW_MODE_MASTER) //ap mode
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 45514aa97698..1868352d3789 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -23,7 +23,7 @@
* Return: NONE
* Note: 8226 support both 20M and 40 MHz
*---------------------------------------------------------------------------*/
-void PHY_SetRF8256Bandwidth(struct net_device *dev , HT_CHANNEL_WIDTH Bandwidth)
+void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth)
{
u8 eRFPath;
struct r8192_priv *priv = ieee80211_priv(dev);
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 936565d46014..ee6b936efef2 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -21,14 +21,13 @@ Major Change History:
#include "r8190_rtl8256.h"
#include "r819xU_cmdpkt.h"
/*---------------------------Define Local Constant---------------------------*/
-//
-// Indicate different AP vendor for IOT issue.
-//
-static u32 edca_setting_DL[HT_IOT_PEER_MAX] =
- { 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5ea44f};
-static u32 edca_setting_UL[HT_IOT_PEER_MAX] =
- { 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f};
-
+/* Indicate different AP vendor for IOT issue. */
+static u32 edca_setting_DL[HT_IOT_PEER_MAX] = {
+ 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0x00a44f, 0x5ea44f
+};
+static u32 edca_setting_UL[HT_IOT_PEER_MAX] = {
+ 0x5e4322, 0x00a44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f
+};
#define RTK_UL_EDCA 0xa44f
#define RTK_DL_EDCA 0x5e4322
@@ -36,11 +35,11 @@ static u32 edca_setting_UL[HT_IOT_PEER_MAX] =
/*------------------------Define global variable-----------------------------*/
-// Debug variable ?
+/* Debug variable ? */
dig_t dm_digtable;
-// Store current software write register content for MAC PHY.
-u8 dm_shadow[16][256] = {{0}};
-// For Dynamic Rx Path Selection by Signal Strength
+/* Store current software write register content for MAC PHY. */
+u8 dm_shadow[16][256] = { {0} };
+/* For Dynamic Rx Path Selection by Signal Strength */
DRxPathSel DM_RxPathSelTable;
/*------------------------Define global variable-----------------------------*/
@@ -56,24 +55,21 @@ extern void dm_check_fsync(struct net_device *dev);
/*---------------------Define local function prototype-----------------------*/
-// DM --> Rate Adaptive
+/* DM --> Rate Adaptive */
static void dm_check_rate_adaptive(struct net_device *dev);
-// DM --> Bandwidth switch
+/* DM --> Bandwidth switch */
static void dm_init_bandwidth_autoswitch(struct net_device *dev);
static void dm_bandwidth_autoswitch(struct net_device *dev);
-// DM --> TX power control
-//static void dm_initialize_txpower_tracking(struct net_device *dev);
+/* DM --> TX power control */
+/*static void dm_initialize_txpower_tracking(struct net_device *dev);*/
static void dm_check_txpower_tracking(struct net_device *dev);
+/*static void dm_txpower_reset_recovery(struct net_device *dev);*/
-
-//static void dm_txpower_reset_recovery(struct net_device *dev);
-
-
-// DM --> Dynamic Init Gain by RSSI
+/* DM --> Dynamic Init Gain by RSSI */
static void dm_dig_init(struct net_device *dev);
static void dm_ctrl_initgain_byrssi(struct net_device *dev);
static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev);
@@ -84,61 +80,58 @@ static void dm_pd_th(struct net_device *dev);
static void dm_cs_ratio(struct net_device *dev);
static void dm_init_ctstoself(struct net_device *dev);
-// DM --> EDCA turbo mode control
+/* DM --> EDCA turbo mode control */
static void dm_check_edca_turbo(struct net_device *dev);
-//static void dm_gpio_change_rf(struct net_device *dev);
-// DM --> Check PBC
+/*static void dm_gpio_change_rf(struct net_device *dev);*/
+/* DM --> Check PBC */
static void dm_check_pbc_gpio(struct net_device *dev);
-
-// DM --> Check current RX RF path state
+/* DM --> Check current RX RF path state */
static void dm_check_rx_path_selection(struct net_device *dev);
static void dm_init_rxpath_selection(struct net_device *dev);
static void dm_rxpath_sel_byrssi(struct net_device *dev);
-
-// DM --> Fsync for broadcom ap
+/* DM --> Fsync for broadcom ap */
static void dm_init_fsync(struct net_device *dev);
static void dm_deInit_fsync(struct net_device *dev);
-//Added by vivi, 20080522
+/* Added by vivi, 20080522 */
static void dm_check_txrateandretrycount(struct net_device *dev);
/*---------------------Define local function prototype-----------------------*/
-/*---------------------Define of Tx Power Control For Near/Far Range --------*/ //Add by Jacken 2008/02/18
+/*---------------------Define of Tx Power Control For Near/Far Range --------*/ /*Add by Jacken 2008/02/18 */
static void dm_init_dynamic_txpower(struct net_device *dev);
static void dm_dynamic_txpower(struct net_device *dev);
-
-// DM --> For rate adaptive and DIG, we must send RSSI to firmware
+/* DM --> For rate adaptive and DIG, we must send RSSI to firmware */
static void dm_send_rssi_tofw(struct net_device *dev);
static void dm_ctstoself(struct net_device *dev);
/*---------------------------Define function prototype------------------------*/
-//================================================================================
-// HW Dynamic mechanism interface.
-//================================================================================
-
-//
-// Description:
-// Prepare SW resource for HW dynamic mechanism.
-//
-// Assumption:
-// This function is only invoked at driver intialization once.
-//
-//
+/*
+ * ================================================================================
+ * HW Dynamic mechanism interface.
+ * ================================================================================
+ *
+ *
+ * Description:
+ * Prepare SW resource for HW dynamic mechanism.
+ *
+ * Assumption:
+ * This function is only invoked at driver intialization once.
+ */
void init_hal_dm(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- // Undecorated Smoothed Signal Strength, it can utilized to dynamic mechanism.
+ /* Undecorated Smoothed Signal Strength, it can utilized to dynamic mechanism. */
priv->undecorated_smoothed_pwdb = -1;
- //Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code.
+ /* Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code. */
dm_init_dynamic_txpower(dev);
init_rate_adaptive(dev);
- //dm_initialize_txpower_tracking(dev);
+ /*dm_initialize_txpower_tracking(dev);*/
dm_dig_init(dev);
dm_init_edca_turbo(dev);
dm_init_bandwidth_autoswitch(dev);
@@ -146,18 +139,16 @@ void init_hal_dm(struct net_device *dev)
dm_init_rxpath_selection(dev);
dm_init_ctstoself(dev);
-} // InitHalDm
+} /* InitHalDm */
void deinit_hal_dm(struct net_device *dev)
{
-
dm_deInit_fsync(dev);
-
}
-
#ifdef USB_RX_AGGREGATION_SUPPORT
-void dm_CheckRxAggregation(struct net_device *dev) {
+void dm_CheckRxAggregation(struct net_device *dev)
+{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
static unsigned long lastTxOkCnt;
@@ -186,14 +177,15 @@ void dm_CheckRxAggregation(struct net_device *dev) {
if ((curTxOkCnt + curRxOkCnt) < 15000000)
return;
- if(curTxOkCnt > 4*curRxOkCnt) {
+ if (curTxOkCnt > 4*curRxOkCnt) {
if (priv->bCurrentRxAggrEnable) {
write_nic_dword(dev, 0x1a8, 0);
priv->bCurrentRxAggrEnable = false;
}
- }else{
+ } else {
if (!priv->bCurrentRxAggrEnable && !pHTInfo->bCurrentRT2RTAggregation) {
u32 ulValue;
+
ulValue = (pHTInfo->UsbRxFwAggrEn<<24) | (pHTInfo->UsbRxFwAggrPageNum<<16) |
(pHTInfo->UsbRxFwAggrPacketNum<<8) | (pHTInfo->UsbRxFwAggrTimeout);
/*
@@ -208,16 +200,14 @@ void dm_CheckRxAggregation(struct net_device *dev) {
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
-} // dm_CheckEdcaTurbo
+} /* dm_CheckEdcaTurbo */
#endif
-
-
void hal_dm_watchdog(struct net_device *dev)
{
- //struct r8192_priv *priv = ieee80211_priv(dev);
+ /*struct r8192_priv *priv = ieee80211_priv(dev);*/
- //static u8 previous_bssid[6] ={0};
+ /*static u8 previous_bssid[6] ={0};*/
/*Add by amy 2008/05/15 ,porting from windows code.*/
dm_check_rate_adaptive(dev);
@@ -230,25 +220,23 @@ void hal_dm_watchdog(struct net_device *dev)
dm_check_rx_path_selection(dev);
dm_check_fsync(dev);
- // Add by amy 2008-05-15 porting from windows code.
+ /* Add by amy 2008-05-15 porting from windows code. */
dm_check_pbc_gpio(dev);
dm_send_rssi_tofw(dev);
dm_ctstoself(dev);
#ifdef USB_RX_AGGREGATION_SUPPORT
dm_CheckRxAggregation(dev);
#endif
-} //HalDmWatchDog
-
+} /* HalDmWatchDog */
/*
- * Decide Rate Adaptive Set according to distance (signal strength)
- * 01/11/2008 MHC Modify input arguments and RATR table level.
- * 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call
- * the function after making sure RF_Type.
- */
+ * Decide Rate Adaptive Set according to distance (signal strength)
+ * 01/11/2008 MHC Modify input arguments and RATR table level.
+ * 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call
+ * the function after making sure RF_Type.
+ */
void init_rate_adaptive(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive;
@@ -261,36 +249,33 @@ void init_rate_adaptive(struct net_device *dev)
pra->low_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M;
pra->low_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M;
- if(priv->CustomerID == RT_CID_819x_Netcore)
+ if (priv->CustomerID == RT_CID_819x_Netcore)
pra->ping_rssi_enable = 1;
else
pra->ping_rssi_enable = 0;
pra->ping_rssi_thresh_for_ra = 15;
-
- if (priv->rf_type == RF_2T4R)
- {
- // 07/10/08 MH Modify for RA smooth scheme.
- /* 2008/01/11 MH Modify 2T RATR table for different RSSI. 080515 porting by amy from windows code.*/
+ if (priv->rf_type == RF_2T4R) {
+ /*
+ * 07/10/08 MH Modify for RA smooth scheme.
+ * 2008/01/11 MH Modify 2T RATR table for different RSSI. 080515 porting by amy from windows code.
+ */
pra->upper_rssi_threshold_ratr = 0x8f0f0000;
pra->middle_rssi_threshold_ratr = 0x8f0ff000;
pra->low_rssi_threshold_ratr = 0x8f0ff001;
pra->low_rssi_threshold_ratr_40M = 0x8f0ff005;
pra->low_rssi_threshold_ratr_20M = 0x8f0ff001;
- pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
- }
- else if (priv->rf_type == RF_1T2R)
- {
+ pra->ping_rssi_ratr = 0x0000000d;/* cosa add for test */
+ } else if (priv->rf_type == RF_1T2R) {
pra->upper_rssi_threshold_ratr = 0x000f0000;
pra->middle_rssi_threshold_ratr = 0x000ff000;
pra->low_rssi_threshold_ratr = 0x000ff001;
pra->low_rssi_threshold_ratr_40M = 0x000ff005;
pra->low_rssi_threshold_ratr_20M = 0x000ff001;
- pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
+ pra->ping_rssi_ratr = 0x0000000d;/* cosa add for test */
}
-} // InitRateAdaptive
-
+} /* InitRateAdaptive */
/*-----------------------------------------------------------------------------
* Function: dm_check_rate_adaptive()
@@ -318,149 +303,122 @@ static void dm_check_rate_adaptive(struct net_device *dev)
bool bshort_gi_enabled = false;
static u8 ping_rssi_state;
-
- if(!priv->up)
- {
+ if (!priv->up) {
RT_TRACE(COMP_RATE, "<---- dm_check_rate_adaptive(): driver is going to unload\n");
return;
}
- if(pra->rate_adaptive_disabled)//this variable is set by ioctl.
+ if (pra->rate_adaptive_disabled) /* this variable is set by ioctl. */
return;
- // TODO: Only 11n mode is implemented currently,
- if(!(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
- priv->ieee80211->mode == WIRELESS_MODE_N_5G))
- return;
+ /* TODO: Only 11n mode is implemented currently, */
+ if (!(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
+ priv->ieee80211->mode == WIRELESS_MODE_N_5G))
+ return;
- if(priv->ieee80211->state == IEEE80211_LINKED)
- {
- // RT_TRACE(COMP_RATE, "dm_CheckRateAdaptive(): \t");
+ if (priv->ieee80211->state == IEEE80211_LINKED) {
+ /*RT_TRACE(COMP_RATE, "dm_CheckRateAdaptive(): \t");*/
- //
- // Check whether Short GI is enabled
- //
+ /* Check whether Short GI is enabled */
bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI40MHz) ||
(!pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI20MHz);
-
pra->upper_rssi_threshold_ratr =
- (pra->upper_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
+ (pra->upper_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31:0);
pra->middle_rssi_threshold_ratr =
- (pra->middle_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
+ (pra->middle_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31:0);
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
pra->low_rssi_threshold_ratr =
- (pra->low_rssi_threshold_ratr_40M & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
- }
- else
- {
+ (pra->low_rssi_threshold_ratr_40M & (~BIT31)) | ((bshort_gi_enabled) ? BIT31:0);
+ } else {
pra->low_rssi_threshold_ratr =
- (pra->low_rssi_threshold_ratr_20M & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
+ (pra->low_rssi_threshold_ratr_20M & (~BIT31)) | ((bshort_gi_enabled) ? BIT31:0);
}
- //cosa add for test
+ /* cosa add for test */
pra->ping_rssi_ratr =
- (pra->ping_rssi_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
+ (pra->ping_rssi_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31:0);
/* 2007/10/08 MH We support RA smooth scheme now. When it is the first
time to link with AP. We will not change upper/lower threshold. If
STA stay in high or low level, we must change two different threshold
to prevent jumping frequently. */
- if (pra->ratr_state == DM_RATR_STA_HIGH)
- {
+ if (pra->ratr_state == DM_RATR_STA_HIGH) {
HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
+ LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
(pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
- }
- else if (pra->ratr_state == DM_RATR_STA_LOW)
- {
+ } else if (pra->ratr_state == DM_RATR_STA_LOW) {
HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
+ LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
(pra->low2high_rssi_thresh_for_ra40M):(pra->low2high_rssi_thresh_for_ra20M);
- }
- else
- {
+ } else {
HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
+ LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
(pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
}
- //DbgPrint("[DM] THresh H/L=%d/%d\n\r", RATR.HighRSSIThreshForRA, RATR.LowRSSIThreshForRA);
- if(priv->undecorated_smoothed_pwdb >= (long)HighRSSIThreshForRA)
- {
- //DbgPrint("[DM] RSSI=%d STA=HIGH\n\r", pHalData->UndecoratedSmoothedPWDB);
+ /*DbgPrint("[DM] THresh H/L=%d/%d\n\r", RATR.HighRSSIThreshForRA, RATR.LowRSSIThreshForRA);*/
+ if (priv->undecorated_smoothed_pwdb >= (long)HighRSSIThreshForRA) {
+ /*DbgPrint("[DM] RSSI=%d STA=HIGH\n\r", pHalData->UndecoratedSmoothedPWDB);*/
pra->ratr_state = DM_RATR_STA_HIGH;
targetRATR = pra->upper_rssi_threshold_ratr;
- }else if(priv->undecorated_smoothed_pwdb >= (long)LowRSSIThreshForRA)
- {
- //DbgPrint("[DM] RSSI=%d STA=Middle\n\r", pHalData->UndecoratedSmoothedPWDB);
+ } else if (priv->undecorated_smoothed_pwdb >= (long)LowRSSIThreshForRA) {
+ /*DbgPrint("[DM] RSSI=%d STA=Middle\n\r", pHalData->UndecoratedSmoothedPWDB);*/
pra->ratr_state = DM_RATR_STA_MIDDLE;
targetRATR = pra->middle_rssi_threshold_ratr;
- }else
- {
- //DbgPrint("[DM] RSSI=%d STA=LOW\n\r", pHalData->UndecoratedSmoothedPWDB);
+ } else {
+ /*DbgPrint("[DM] RSSI=%d STA=LOW\n\r", pHalData->UndecoratedSmoothedPWDB);*/
pra->ratr_state = DM_RATR_STA_LOW;
targetRATR = pra->low_rssi_threshold_ratr;
}
- //cosa add for test
- if(pra->ping_rssi_enable)
- {
- //pHalData->UndecoratedSmoothedPWDB = 19;
- if(priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5))
- {
- if((priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) ||
- ping_rssi_state)
- {
- //DbgPrint("TestRSSI = %d, set RATR to 0x%x \n", pHalData->UndecoratedSmoothedPWDB, pRA->TestRSSIRATR);
+ /* cosa add for test */
+ if (pra->ping_rssi_enable) {
+ /*pHalData->UndecoratedSmoothedPWDB = 19;*/
+ if (priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5)) {
+ if ((priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) ||
+ ping_rssi_state) {
+ /*DbgPrint("TestRSSI = %d, set RATR to 0x%x\n", pHalData->UndecoratedSmoothedPWDB, pRA->TestRSSIRATR);*/
pra->ratr_state = DM_RATR_STA_LOW;
targetRATR = pra->ping_rssi_ratr;
ping_rssi_state = 1;
}
- //else
- // DbgPrint("TestRSSI is between the range. \n");
- }
- else
- {
- //DbgPrint("TestRSSI Recover to 0x%x \n", targetRATR);
+ /*else
+ DbgPrint("TestRSSI is between the range.\n");*/
+ } else {
+ /*DbgPrint("TestRSSI Recover to 0x%x\n", targetRATR);*/
ping_rssi_state = 0;
}
}
- // 2008.04.01
- // For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
- if(priv->ieee80211->GetHalfNmodeSupportByAPsHandler(dev))
- targetRATR &= 0xf00fffff;
+ /*
+ * 2008.04.01
+ * For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
+ */
+ if (priv->ieee80211->GetHalfNmodeSupportByAPsHandler(dev))
+ targetRATR &= 0xf00fffff;
- //
- // Check whether updating of RATR0 is required
- //
+ /* Check whether updating of RATR0 is required */
read_nic_dword(dev, RATR0, &currentRATR);
- if(targetRATR != currentRATR)
- {
+ if (targetRATR != currentRATR) {
u32 ratr_value;
+
ratr_value = targetRATR;
- RT_TRACE(COMP_RATE,"currentRATR = %x, targetRATR = %x\n", currentRATR, targetRATR);
- if(priv->rf_type == RF_1T2R)
- {
+ RT_TRACE(COMP_RATE, "currentRATR = %x, targetRATR = %x\n", currentRATR, targetRATR);
+ if (priv->rf_type == RF_1T2R)
ratr_value &= ~(RATE_ALL_OFDM_2SS);
- }
write_nic_dword(dev, RATR0, ratr_value);
write_nic_byte(dev, UFWP, 1);
pra->last_ratr = targetRATR;
}
- }
- else
- {
+ } else {
pra->ratr_state = DM_RATR_STA_MAX;
}
-} // dm_CheckRateAdaptive
-
+} /* dm_CheckRateAdaptive */
static void dm_init_bandwidth_autoswitch(struct net_device *dev)
{
@@ -471,78 +429,74 @@ static void dm_init_bandwidth_autoswitch(struct net_device *dev)
priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable = false;
-} // dm_init_bandwidth_autoswitch
-
+} /* dm_init_bandwidth_autoswitch */
static void dm_bandwidth_autoswitch(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ||!priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable){
+ if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 || !priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable)
return;
- }else{
- if(priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz == false){//If send packets in 40 Mhz in 20/40
- if(priv->undecorated_smoothed_pwdb <= priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
- priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = true;
- }else{//in force send packets in 20 Mhz in 20/40
- if(priv->undecorated_smoothed_pwdb >= priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz)
- priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
-
- }
+ if (priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz == false) { /* If send packets in 40 Mhz in 20/40 */
+ if (priv->undecorated_smoothed_pwdb <= priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
+ priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = true;
+ } else { /* in force send packets in 20 Mhz in 20/40 */
+ if (priv->undecorated_smoothed_pwdb >= priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz)
+ priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
}
-} // dm_BandwidthAutoSwitch
+} /* dm_BandwidthAutoSwitch */
-//OFDM default at 0db, index=6.
+/* OFDM default at 0db, index=6. */
static u32 OFDMSwingTable[OFDM_Table_Length] = {
- 0x7f8001fe, // 0, +6db
- 0x71c001c7, // 1, +5db
- 0x65400195, // 2, +4db
- 0x5a400169, // 3, +3db
- 0x50800142, // 4, +2db
- 0x47c0011f, // 5, +1db
- 0x40000100, // 6, +0db ===> default, upper for higher temperature, lower for low temperature
- 0x390000e4, // 7, -1db
- 0x32c000cb, // 8, -2db
- 0x2d4000b5, // 9, -3db
- 0x288000a2, // 10, -4db
- 0x24000090, // 11, -5db
- 0x20000080, // 12, -6db
- 0x1c800072, // 13, -7db
- 0x19800066, // 14, -8db
- 0x26c0005b, // 15, -9db
- 0x24400051, // 16, -10db
- 0x12000048, // 17, -11db
- 0x10000040 // 18, -12db
+ 0x7f8001fe, /* 0, +6db */
+ 0x71c001c7, /* 1, +5db */
+ 0x65400195, /* 2, +4db */
+ 0x5a400169, /* 3, +3db */
+ 0x50800142, /* 4, +2db */
+ 0x47c0011f, /* 5, +1db */
+ 0x40000100, /* 6, +0db ===> default, upper for higher temperature, lower for low temperature */
+ 0x390000e4, /* 7, -1db */
+ 0x32c000cb, /* 8, -2db */
+ 0x2d4000b5, /* 9, -3db */
+ 0x288000a2, /* 10, -4db */
+ 0x24000090, /* 11, -5db */
+ 0x20000080, /* 12, -6db */
+ 0x1c800072, /* 13, -7db */
+ 0x19800066, /* 14, -8db */
+ 0x26c0005b, /* 15, -9db */
+ 0x24400051, /* 16, -10db */
+ 0x12000048, /* 17, -11db */
+ 0x10000040 /* 18, -12db */
};
static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, // 0, +0db ===> CCK40M default
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, // 1, -1db
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, // 2, -2db
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, // 3, -3db
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, // 4, -4db
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, // 5, -5db
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, // 6, -6db ===> CCK20M default
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, // 7, -7db
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, // 8, -8db
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, // 9, -9db
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 10, -10db
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01} // 11, -11db
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0db ===> CCK40M default */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 1, -1db */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 2, -2db */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 3, -3db */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 4, -4db */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 5, -5db */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 6, -6db ===> CCK20M default */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 7, -7db */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 8, -8db */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 9, -9db */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 10, -10db */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01} /* 11, -11db */
};
static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, // 0, +0db ===> CCK40M default
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, // 1, -1db
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, // 2, -2db
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, // 3, -3db
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, // 4, -4db
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, // 5, -5db
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 6, -6db ===> CCK20M default
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, // 7, -7db
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 8, -8db
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 9, -9db
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 10, -10db
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00} // 11, -11db
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0db ===> CCK40M default */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 1, -1db */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 2, -2db */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 3, -3db */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 4, -4db */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 5, -5db */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 6, -6db ===> CCK20M default */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 7, -7db */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 8, -8db */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 9, -9db */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 10, -10db */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00} /* 11, -11db */
};
static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
@@ -551,14 +505,14 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
bool bHighpowerstate, viviflag = FALSE;
DCMD_TXCMD_T tx_cmd;
u8 powerlevelOFDM24G;
- int i =0, j = 0, k = 0;
- u8 RF_Type, tmp_report[5]={0, 0, 0, 0, 0};
+ int i = 0, j = 0, k = 0;
+ u8 RF_Type, tmp_report[5] = {0, 0, 0, 0, 0};
u32 Value;
u8 Pwr_Flag;
- u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver=0;
- //RT_STATUS rtStatus = RT_STATUS_SUCCESS;
+ u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver = 0;
+ /*RT_STATUS rtStatus = RT_STATUS_SUCCESS;*/
bool rtStatus = true;
- u32 delta=0;
+ u32 delta = 0;
write_nic_byte(dev, 0x1ba, 0);
@@ -571,109 +525,87 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n", powerlevelOFDM24G);
- for(j = 0; j<=30; j++)
-{ //fill tx_cmd
-
- tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
- tx_cmd.Length = 4;
- tx_cmd.Value = Value;
- rtStatus = SendTxCommandPacket(dev, &tx_cmd, 12);
- if (rtStatus == RT_STATUS_FAILURE)
- {
- RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n");
- }
- mdelay(1);
- //DbgPrint("hi, vivi, strange\n");
- for(i = 0;i <= 30; i++)
- {
- read_nic_byte(dev, 0x1ba, &Pwr_Flag);
-
- if (Pwr_Flag == 0)
- {
- mdelay(1);
- continue;
- }
- read_nic_word(dev, 0x13c, &Avg_TSSI_Meas);
- if(Avg_TSSI_Meas == 0)
- {
- write_nic_byte(dev, 0x1ba, 0);
- break;
- }
+ for (j = 0; j <= 30; j++) { /* fill tx_cmd */
+ tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
+ tx_cmd.Length = 4;
+ tx_cmd.Value = Value;
+ rtStatus = SendTxCommandPacket(dev, &tx_cmd, 12);
+ if (rtStatus == RT_STATUS_FAILURE)
+ RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n");
+ mdelay(1);
+ /*DbgPrint("hi, vivi, strange\n");*/
+ for (i = 0; i <= 30; i++) {
+ read_nic_byte(dev, 0x1ba, &Pwr_Flag);
+
+ if (Pwr_Flag == 0) {
+ mdelay(1);
+ continue;
+ }
+ read_nic_word(dev, 0x13c, &Avg_TSSI_Meas);
+ if (Avg_TSSI_Meas == 0) {
+ write_nic_byte(dev, 0x1ba, 0);
+ break;
+ }
- for(k = 0;k < 5; k++)
- {
- if(k !=4)
- read_nic_byte(dev, 0x134+k, &tmp_report[k]);
- else
- read_nic_byte(dev, 0x13e, &tmp_report[k]);
- RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]);
- }
+ for (k = 0; k < 5; k++) {
+ if (k != 4)
+ read_nic_byte(dev, 0x134+k, &tmp_report[k]);
+ else
+ read_nic_byte(dev, 0x13e, &tmp_report[k]);
+ RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]);
+ }
- //check if the report value is right
- for(k = 0;k < 5; k++)
- {
- if(tmp_report[k] <= 20)
- {
- viviflag =TRUE;
+ /* check if the report value is right */
+ for (k = 0; k < 5; k++) {
+ if (tmp_report[k] <= 20) {
+ viviflag = TRUE;
+ break;
+ }
+ }
+ if (viviflag == TRUE) {
+ write_nic_byte(dev, 0x1ba, 0);
+ viviflag = FALSE;
+ RT_TRACE(COMP_POWER_TRACKING, "we filtered the data\n");
+ for (k = 0; k < 5; k++)
+ tmp_report[k] = 0;
break;
}
- }
- if(viviflag ==TRUE)
- {
- write_nic_byte(dev, 0x1ba, 0);
- viviflag = FALSE;
- RT_TRACE(COMP_POWER_TRACKING, "we filtered the data\n");
- for(k = 0;k < 5; k++)
- tmp_report[k] = 0;
- break;
- }
- for(k = 0;k < 5; k++)
- {
- Avg_TSSI_Meas_from_driver += tmp_report[k];
- }
-
- Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5;
- RT_TRACE(COMP_POWER_TRACKING, "Avg_TSSI_Meas_from_driver = %d\n", Avg_TSSI_Meas_from_driver);
- TSSI_13dBm = priv->TSSI_13dBm;
- RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n", TSSI_13dBm);
+ for (k = 0; k < 5; k++)
+ Avg_TSSI_Meas_from_driver += tmp_report[k];
- //if(abs(Avg_TSSI_Meas_from_driver - TSSI_13dBm) <= E_FOR_TX_POWER_TRACK)
- // For MacOS-compatible
- if(Avg_TSSI_Meas_from_driver > TSSI_13dBm)
- delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
- else
- delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver;
+ Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5;
+ RT_TRACE(COMP_POWER_TRACKING, "Avg_TSSI_Meas_from_driver = %d\n", Avg_TSSI_Meas_from_driver);
+ TSSI_13dBm = priv->TSSI_13dBm;
+ RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n", TSSI_13dBm);
- if(delta <= E_FOR_TX_POWER_TRACK)
- {
- priv->ieee80211->bdynamic_txpower_enable = TRUE;
- write_nic_byte(dev, 0x1ba, 0);
- RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n");
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
- return;
- }
- else
- {
- if(Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK)
- {
- if (priv->rfa_txpowertrackingindex > 0)
- {
+ /*if (abs(Avg_TSSI_Meas_from_driver - TSSI_13dBm) <= E_FOR_TX_POWER_TRACK)*/
+ /* For MacOS-compatible */
+ if (Avg_TSSI_Meas_from_driver > TSSI_13dBm)
+ delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
+ else
+ delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver;
+
+ if (delta <= E_FOR_TX_POWER_TRACK) {
+ priv->ieee80211->bdynamic_txpower_enable = TRUE;
+ write_nic_byte(dev, 0x1ba, 0);
+ RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n");
+ RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
+ return;
+ }
+ if (Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK) {
+ if (priv->rfa_txpowertrackingindex > 0) {
priv->rfa_txpowertrackingindex--;
- if(priv->rfa_txpowertrackingindex_real > 4)
- {
+ if (priv->rfa_txpowertrackingindex_real > 4) {
priv->rfa_txpowertrackingindex_real--;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
}
}
- }
- else
- {
- if (priv->rfa_txpowertrackingindex < 36)
- {
+ } else {
+ if (priv->rfa_txpowertrackingindex < 36) {
priv->rfa_txpowertrackingindex++;
priv->rfa_txpowertrackingindex_real++;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
@@ -683,52 +615,44 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
priv->cck_present_attentuation_difference
= priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default;
- if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
priv->cck_present_attentuation
- = priv->cck_present_attentuation_20Mdefault + priv->cck_present_attentuation_difference;
+ = priv->cck_present_attentuation_20Mdefault + priv->cck_present_attentuation_difference;
else
priv->cck_present_attentuation
- = priv->cck_present_attentuation_40Mdefault + priv->cck_present_attentuation_difference;
+ = priv->cck_present_attentuation_40Mdefault + priv->cck_present_attentuation_difference;
- if(priv->cck_present_attentuation > -1&&priv->cck_present_attentuation <23)
- {
- if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14)
- {
+ if (priv->cck_present_attentuation > -1 && priv->cck_present_attentuation < 23) {
+ if (priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) {
priv->bcck_in_ch14 = TRUE;
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
- }
- else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
- {
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else if (priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) {
priv->bcck_in_ch14 = FALSE;
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
- }
- else
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
-
- if (priv->cck_present_attentuation_difference <= -12||priv->cck_present_attentuation_difference >= 24)
- {
- priv->ieee80211->bdynamic_txpower_enable = TRUE;
- write_nic_byte(dev, 0x1ba, 0);
- RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n");
- return;
- }
+ RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
+ if (priv->cck_present_attentuation_difference <= -12 || priv->cck_present_attentuation_difference >= 24) {
+ priv->ieee80211->bdynamic_txpower_enable = TRUE;
+ write_nic_byte(dev, 0x1ba, 0);
+ RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n");
+ return;
+ }
+ write_nic_byte(dev, 0x1ba, 0);
+ Avg_TSSI_Meas_from_driver = 0;
+ for (k = 0; k < 5; k++)
+ tmp_report[k] = 0;
+ break;
+ }
}
- write_nic_byte(dev, 0x1ba, 0);
- Avg_TSSI_Meas_from_driver = 0;
- for(k = 0;k < 5; k++)
- tmp_report[k] = 0;
- break;
- }
-}
- priv->ieee80211->bdynamic_txpower_enable = TRUE;
- write_nic_byte(dev, 0x1ba, 0);
+ priv->ieee80211->bdynamic_txpower_enable = TRUE;
+ write_nic_byte(dev, 0x1ba, 0);
}
static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
@@ -737,107 +661,96 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u32 tmpRegA, TempCCk;
u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval;
- int i =0, CCKSwingNeedUpdate=0;
-
- if(!priv->btxpower_trackingInit)
- {
- //Query OFDM default setting
- tmpRegA= rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance, bMaskDWord);
- for(i=0; i<OFDM_Table_Length; i++) //find the index
- {
- if(tmpRegA == OFDMSwingTable[i])
- {
- priv->OFDM_index= (u8)i;
+ int i = 0, CCKSwingNeedUpdate = 0;
+
+ if (!priv->btxpower_trackingInit) {
+ /* Query OFDM default setting */
+ tmpRegA = rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance, bMaskDWord);
+ for (i = 0; i < OFDM_Table_Length; i++) { /* find the index */
+ if (tmpRegA == OFDMSwingTable[i]) {
+ priv->OFDM_index = (u8)i;
RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, OFDM_index=0x%x\n",
rOFDM0_XATxIQImbalance, tmpRegA, priv->OFDM_index);
}
}
- //Query CCK default setting From 0xa22
+ /* Query CCK default setting From 0xa22 */
TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2);
- for(i=0 ; i<CCK_Table_length ; i++)
- {
- if(TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0])
- {
- priv->CCK_index =(u8) i;
+ for (i = 0; i < CCK_Table_length; i++) {
+ if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
+ priv->CCK_index = (u8) i;
RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, CCK_index=0x%x\n",
rCCK0_TxFilter1, TempCCk, priv->CCK_index);
break;
}
}
priv->btxpower_trackingInit = TRUE;
- //pHalData->TXPowercount = 0;
+ /*pHalData->TXPowercount = 0;*/
return;
}
- //==========================
- // this is only for test, should be masked
- //==========================
-
- // read and filter out unreasonable value
- tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078); // 0x12: RF Reg[10:7]
- RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d \n", tmpRegA);
- if(tmpRegA < 3 || tmpRegA > 13)
+ /*
+ * ==========================
+ * this is only for test, should be masked
+ * ==========================
+ */
+
+ /* read and filter out unreasonable value */
+ tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078); /* 0x12: RF Reg[10:7] */
+ RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA);
+ if (tmpRegA < 3 || tmpRegA > 13)
return;
- if(tmpRegA >= 12) // if over 12, TP will be bad when high temperature
+ if (tmpRegA >= 12) /* if over 12, TP will be bad when high temperature */
tmpRegA = 12;
- RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d \n", tmpRegA);
- priv->ThermalMeter[0] = ThermalMeterVal; //We use fixed value by Bryant's suggestion
- priv->ThermalMeter[1] = ThermalMeterVal; //We use fixed value by Bryant's suggestion
+ RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d\n", tmpRegA);
+ priv->ThermalMeter[0] = ThermalMeterVal; /* We use fixed value by Bryant's suggestion */
+ priv->ThermalMeter[1] = ThermalMeterVal; /* We use fixed value by Bryant's suggestion */
- //Get current RF-A temperature index
- if(priv->ThermalMeter[0] >= (u8)tmpRegA) //lower temperature
- {
+ /* Get current RF-A temperature index */
+ if (priv->ThermalMeter[0] >= (u8)tmpRegA) { /* lower temperature */
tmpOFDMindex = tmpCCK20Mindex = 6+(priv->ThermalMeter[0]-(u8)tmpRegA);
tmpCCK40Mindex = tmpCCK20Mindex - 6;
- if(tmpOFDMindex >= OFDM_Table_Length)
+ if (tmpOFDMindex >= OFDM_Table_Length)
tmpOFDMindex = OFDM_Table_Length-1;
- if(tmpCCK20Mindex >= CCK_Table_length)
+ if (tmpCCK20Mindex >= CCK_Table_length)
tmpCCK20Mindex = CCK_Table_length-1;
- if(tmpCCK40Mindex >= CCK_Table_length)
+ if (tmpCCK40Mindex >= CCK_Table_length)
tmpCCK40Mindex = CCK_Table_length-1;
- }
- else
- {
+ } else {
tmpval = ((u8)tmpRegA - priv->ThermalMeter[0]);
- if(tmpval >= 6) // higher temperature
- tmpOFDMindex = tmpCCK20Mindex = 0; // max to +6dB
+
+ if (tmpval >= 6) /* higher temperature */
+ tmpOFDMindex = tmpCCK20Mindex = 0; /* max to +6dB */
else
tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval;
tmpCCK40Mindex = 0;
}
- //DbgPrint("%ddb, tmpOFDMindex = %d, tmpCCK20Mindex = %d, tmpCCK40Mindex = %d",
- //((u1Byte)tmpRegA - pHalData->ThermalMeter[0]),
- //tmpOFDMindex, tmpCCK20Mindex, tmpCCK40Mindex);
- if(priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) //40M
+ /*DbgPrint("%ddb, tmpOFDMindex = %d, tmpCCK20Mindex = %d, tmpCCK40Mindex = %d",
+ ((u1Byte)tmpRegA - pHalData->ThermalMeter[0]),
+ tmpOFDMindex, tmpCCK20Mindex, tmpCCK40Mindex);*/
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) /* 40M */
tmpCCKindex = tmpCCK40Mindex;
else
tmpCCKindex = tmpCCK20Mindex;
- if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14)
- {
+ if (priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) {
priv->bcck_in_ch14 = TRUE;
CCKSwingNeedUpdate = 1;
- }
- else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
- {
+ } else if (priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) {
priv->bcck_in_ch14 = FALSE;
CCKSwingNeedUpdate = 1;
}
- if(priv->CCK_index != tmpCCKindex)
- {
+ if (priv->CCK_index != tmpCCKindex) {
priv->CCK_index = tmpCCKindex;
CCKSwingNeedUpdate = 1;
}
- if(CCKSwingNeedUpdate)
- {
- //DbgPrint("Update CCK Swing, CCK_index = %d\n", pHalData->CCK_index);
+ if (CCKSwingNeedUpdate) {
+ /*DbgPrint("Update CCK Swing, CCK_index = %d\n", pHalData->CCK_index);*/
dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
- if(priv->OFDM_index != tmpOFDMindex)
- {
+ if (priv->OFDM_index != tmpOFDMindex) {
priv->OFDM_index = tmpOFDMindex;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable[priv->OFDM_index]);
RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
@@ -848,100 +761,100 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
void dm_txpower_trackingcallback(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,txpower_tracking_wq);
- struct net_device *dev = priv->ieee80211->dev;
+ struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct r8192_priv *priv = container_of(dwork, struct r8192_priv, txpower_tracking_wq);
+ struct net_device *dev = priv->ieee80211->dev;
- if(priv->bDcut == TRUE)
+ if (priv->bDcut == TRUE)
dm_TXPowerTrackingCallback_TSSI(dev);
else
dm_TXPowerTrackingCallback_ThermalMeter(dev);
}
-
static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
- //Initial the Tx BB index and mapping value
+ /* Initial the Tx BB index and mapping value */
priv->txbbgain_table[0].txbb_iq_amplifygain = 12;
- priv->txbbgain_table[0].txbbgain_value=0x7f8001fe;
+ priv->txbbgain_table[0].txbbgain_value = 0x7f8001fe;
priv->txbbgain_table[1].txbb_iq_amplifygain = 11;
- priv->txbbgain_table[1].txbbgain_value=0x788001e2;
+ priv->txbbgain_table[1].txbbgain_value = 0x788001e2;
priv->txbbgain_table[2].txbb_iq_amplifygain = 10;
- priv->txbbgain_table[2].txbbgain_value=0x71c001c7;
+ priv->txbbgain_table[2].txbbgain_value = 0x71c001c7;
priv->txbbgain_table[3].txbb_iq_amplifygain = 9;
- priv->txbbgain_table[3].txbbgain_value=0x6b8001ae;
+ priv->txbbgain_table[3].txbbgain_value = 0x6b8001ae;
priv->txbbgain_table[4].txbb_iq_amplifygain = 8;
- priv->txbbgain_table[4].txbbgain_value=0x65400195;
+ priv->txbbgain_table[4].txbbgain_value = 0x65400195;
priv->txbbgain_table[5].txbb_iq_amplifygain = 7;
- priv->txbbgain_table[5].txbbgain_value=0x5fc0017f;
+ priv->txbbgain_table[5].txbbgain_value = 0x5fc0017f;
priv->txbbgain_table[6].txbb_iq_amplifygain = 6;
- priv->txbbgain_table[6].txbbgain_value=0x5a400169;
+ priv->txbbgain_table[6].txbbgain_value = 0x5a400169;
priv->txbbgain_table[7].txbb_iq_amplifygain = 5;
- priv->txbbgain_table[7].txbbgain_value=0x55400155;
+ priv->txbbgain_table[7].txbbgain_value = 0x55400155;
priv->txbbgain_table[8].txbb_iq_amplifygain = 4;
- priv->txbbgain_table[8].txbbgain_value=0x50800142;
+ priv->txbbgain_table[8].txbbgain_value = 0x50800142;
priv->txbbgain_table[9].txbb_iq_amplifygain = 3;
- priv->txbbgain_table[9].txbbgain_value=0x4c000130;
+ priv->txbbgain_table[9].txbbgain_value = 0x4c000130;
priv->txbbgain_table[10].txbb_iq_amplifygain = 2;
- priv->txbbgain_table[10].txbbgain_value=0x47c0011f;
+ priv->txbbgain_table[10].txbbgain_value = 0x47c0011f;
priv->txbbgain_table[11].txbb_iq_amplifygain = 1;
- priv->txbbgain_table[11].txbbgain_value=0x43c0010f;
+ priv->txbbgain_table[11].txbbgain_value = 0x43c0010f;
priv->txbbgain_table[12].txbb_iq_amplifygain = 0;
- priv->txbbgain_table[12].txbbgain_value=0x40000100;
+ priv->txbbgain_table[12].txbbgain_value = 0x40000100;
priv->txbbgain_table[13].txbb_iq_amplifygain = -1;
- priv->txbbgain_table[13].txbbgain_value=0x3c8000f2;
+ priv->txbbgain_table[13].txbbgain_value = 0x3c8000f2;
priv->txbbgain_table[14].txbb_iq_amplifygain = -2;
- priv->txbbgain_table[14].txbbgain_value=0x390000e4;
+ priv->txbbgain_table[14].txbbgain_value = 0x390000e4;
priv->txbbgain_table[15].txbb_iq_amplifygain = -3;
- priv->txbbgain_table[15].txbbgain_value=0x35c000d7;
+ priv->txbbgain_table[15].txbbgain_value = 0x35c000d7;
priv->txbbgain_table[16].txbb_iq_amplifygain = -4;
- priv->txbbgain_table[16].txbbgain_value=0x32c000cb;
+ priv->txbbgain_table[16].txbbgain_value = 0x32c000cb;
priv->txbbgain_table[17].txbb_iq_amplifygain = -5;
- priv->txbbgain_table[17].txbbgain_value=0x300000c0;
+ priv->txbbgain_table[17].txbbgain_value = 0x300000c0;
priv->txbbgain_table[18].txbb_iq_amplifygain = -6;
- priv->txbbgain_table[18].txbbgain_value=0x2d4000b5;
+ priv->txbbgain_table[18].txbbgain_value = 0x2d4000b5;
priv->txbbgain_table[19].txbb_iq_amplifygain = -7;
- priv->txbbgain_table[19].txbbgain_value=0x2ac000ab;
+ priv->txbbgain_table[19].txbbgain_value = 0x2ac000ab;
priv->txbbgain_table[20].txbb_iq_amplifygain = -8;
- priv->txbbgain_table[20].txbbgain_value=0x288000a2;
+ priv->txbbgain_table[20].txbbgain_value = 0x288000a2;
priv->txbbgain_table[21].txbb_iq_amplifygain = -9;
- priv->txbbgain_table[21].txbbgain_value=0x26000098;
+ priv->txbbgain_table[21].txbbgain_value = 0x26000098;
priv->txbbgain_table[22].txbb_iq_amplifygain = -10;
- priv->txbbgain_table[22].txbbgain_value=0x24000090;
+ priv->txbbgain_table[22].txbbgain_value = 0x24000090;
priv->txbbgain_table[23].txbb_iq_amplifygain = -11;
- priv->txbbgain_table[23].txbbgain_value=0x22000088;
+ priv->txbbgain_table[23].txbbgain_value = 0x22000088;
priv->txbbgain_table[24].txbb_iq_amplifygain = -12;
- priv->txbbgain_table[24].txbbgain_value=0x20000080;
+ priv->txbbgain_table[24].txbbgain_value = 0x20000080;
priv->txbbgain_table[25].txbb_iq_amplifygain = -13;
- priv->txbbgain_table[25].txbbgain_value=0x1a00006c;
+ priv->txbbgain_table[25].txbbgain_value = 0x1a00006c;
priv->txbbgain_table[26].txbb_iq_amplifygain = -14;
- priv->txbbgain_table[26].txbbgain_value=0x1c800072;
+ priv->txbbgain_table[26].txbbgain_value = 0x1c800072;
priv->txbbgain_table[27].txbb_iq_amplifygain = -15;
- priv->txbbgain_table[27].txbbgain_value=0x18000060;
+ priv->txbbgain_table[27].txbbgain_value = 0x18000060;
priv->txbbgain_table[28].txbb_iq_amplifygain = -16;
- priv->txbbgain_table[28].txbbgain_value=0x19800066;
+ priv->txbbgain_table[28].txbbgain_value = 0x19800066;
priv->txbbgain_table[29].txbb_iq_amplifygain = -17;
- priv->txbbgain_table[29].txbbgain_value=0x15800056;
+ priv->txbbgain_table[29].txbbgain_value = 0x15800056;
priv->txbbgain_table[30].txbb_iq_amplifygain = -18;
- priv->txbbgain_table[30].txbbgain_value=0x26c0005b;
+ priv->txbbgain_table[30].txbbgain_value = 0x26c0005b;
priv->txbbgain_table[31].txbb_iq_amplifygain = -19;
- priv->txbbgain_table[31].txbbgain_value=0x14400051;
+ priv->txbbgain_table[31].txbbgain_value = 0x14400051;
priv->txbbgain_table[32].txbb_iq_amplifygain = -20;
- priv->txbbgain_table[32].txbbgain_value=0x24400051;
+ priv->txbbgain_table[32].txbbgain_value = 0x24400051;
priv->txbbgain_table[33].txbb_iq_amplifygain = -21;
- priv->txbbgain_table[33].txbbgain_value=0x1300004c;
+ priv->txbbgain_table[33].txbbgain_value = 0x1300004c;
priv->txbbgain_table[34].txbb_iq_amplifygain = -22;
- priv->txbbgain_table[34].txbbgain_value=0x12000048;
+ priv->txbbgain_table[34].txbbgain_value = 0x12000048;
priv->txbbgain_table[35].txbb_iq_amplifygain = -23;
- priv->txbbgain_table[35].txbbgain_value=0x11000044;
+ priv->txbbgain_table[35].txbbgain_value = 0x11000044;
priv->txbbgain_table[36].txbb_iq_amplifygain = -24;
- priv->txbbgain_table[36].txbbgain_value=0x10000040;
+ priv->txbbgain_table[36].txbbgain_value = 0x10000040;
- //ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
- //This Table is for CH1~CH13
+ /*
+ * ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
+ * This Table is for CH1~CH13
+ */
priv->cck_txbbgain_table[0].ccktxbb_valuearray[0] = 0x36;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[1] = 0x35;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[2] = 0x2e;
@@ -1149,8 +1062,10 @@ static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev)
priv->cck_txbbgain_table[22].ccktxbb_valuearray[6] = 0x03;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[7] = 0x01;
- //ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
- //This Table is for CH14
+ /*
+ * ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
+ * This Table is for CH14
+ */
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[0] = 0x36;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[1] = 0x35;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[2] = 0x2e;
@@ -1368,10 +1283,12 @@ static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- // Tx Power tracking by Thermal Meter requires Firmware R/W 3-wire. This mechanism
- // can be enabled only when Firmware R/W 3-wire is enabled. Otherwise, frequent r/w
- // 3-wire by driver causes RF to go into a wrong state.
- if(priv->ieee80211->FwRWRF)
+ /*
+ * Tx Power tracking by Thermal Meter requires Firmware R/W 3-wire. This mechanism
+ * can be enabled only when Firmware R/W 3-wire is enabled. Otherwise, frequent r/w
+ * 3-wire by driver causes RF to go into a wrong state.
+ */
+ if (priv->ieee80211->FwRWRF)
priv->btxpower_tracking = TRUE;
else
priv->btxpower_tracking = FALSE;
@@ -1379,57 +1296,46 @@ static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev)
priv->btxpower_trackingInit = FALSE;
}
-
void dm_initialize_txpower_tracking(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- if(priv->bDcut == TRUE)
+
+ if (priv->bDcut == TRUE)
dm_InitializeTXPowerTracking_TSSI(dev);
else
dm_InitializeTXPowerTracking_ThermalMeter(dev);
-}// dm_InitializeTXPowerTracking
-
+} /* dm_InitializeTXPowerTracking */
static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u32 tx_power_track_counter;
- if(!priv->btxpower_tracking)
+ if (!priv->btxpower_tracking)
return;
- else
- {
- if((tx_power_track_counter % 30 == 0)&&(tx_power_track_counter != 0))
- {
- queue_delayed_work(priv->priv_wq,&priv->txpower_tracking_wq,0);
- }
- tx_power_track_counter++;
- }
-
+ if ((tx_power_track_counter % 30 == 0) && (tx_power_track_counter != 0))
+ queue_delayed_work(priv->priv_wq, &priv->txpower_tracking_wq, 0);
+ tx_power_track_counter++;
}
-
static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u8 TM_Trigger;
- //DbgPrint("dm_CheckTXPowerTracking() \n");
- if(!priv->btxpower_tracking)
+ /*DbgPrint("dm_CheckTXPowerTracking()\n");*/
+ if (!priv->btxpower_tracking)
+ return;
+ if (priv->txpower_count <= 2) {
+ priv->txpower_count++;
return;
- else
- {
- if(priv->txpower_count <= 2)
- {
- priv->txpower_count++;
- return;
- }
}
- if(!TM_Trigger)
- {
- //Attention!! You have to write all 12bits of data to RF, or it may cause RF to crash
- //actually write reg0x02 bit1=0, then bit1=1.
- //DbgPrint("Trigger ThermalMeter, write RF reg0x2 = 0x4d to 0x4f\n");
+ if (!TM_Trigger) {
+ /*
+ * Attention!! You have to write all 12bits of data to RF, or it may cause RF to crash
+ * actually write reg0x02 bit1=0, then bit1=1.
+ * DbgPrint("Trigger ThermalMeter, write RF reg0x2 = 0x4d to 0x4f\n");
+ */
rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
@@ -1437,93 +1343,84 @@ static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev)
TM_Trigger = 1;
return;
}
- else
- {
- //DbgPrint("Schedule TxPowerTrackingWorkItem\n");
- queue_delayed_work(priv->priv_wq,&priv->txpower_tracking_wq,0);
- TM_Trigger = 0;
- }
+ /*DbgPrint("Schedule TxPowerTrackingWorkItem\n");*/
+ queue_delayed_work(priv->priv_wq, &priv->txpower_tracking_wq, 0);
+ TM_Trigger = 0;
}
-
static void dm_check_txpower_tracking(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //static u32 tx_power_track_counter = 0;
+ /*static u32 tx_power_track_counter = 0;*/
-#ifdef RTL8190P
+#ifdef RTL8190P
dm_CheckTXPowerTracking_TSSI(dev);
#else
- if(priv->bDcut == TRUE)
+ if (priv->bDcut == TRUE)
dm_CheckTXPowerTracking_TSSI(dev);
else
dm_CheckTXPowerTracking_ThermalMeter(dev);
#endif
-} // dm_CheckTXPowerTracking
-
+} /* dm_CheckTXPowerTracking */
static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
{
u32 TempVal;
struct r8192_priv *priv = ieee80211_priv(dev);
- //Write 0xa22 0xa23
+
+ /* Write 0xa22 0xa23 */
TempVal = 0;
- if(!bInCH14){
- //Write 0xa22 0xa23
+ if (!bInCH14) {
+ /* Write 0xa22 0xa23 */
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
- (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8) ;
+ (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8);
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- //Write 0xa24 ~ 0xa27
+ /* Write 0xa24 ~ 0xa27 */
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- //Write 0xa28 0xa29
+ /* Write 0xa28 0xa29 */
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
- (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
+ (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8);
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- }
- else
- {
+ } else {
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8) ;
+ (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8);
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- //Write 0xa24 ~ 0xa27
+ /* Write 0xa24 ~ 0xa27 */
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- //Write 0xa28 0xa29
+ /* Write 0xa28 0xa29 */
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
+ (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8);
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
}
-
-
}
-static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH14)
+static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH14)
{
u32 TempVal;
struct r8192_priv *priv = ieee80211_priv(dev);
TempVal = 0;
- if(!bInCH14)
- {
- //Write 0xa22 0xa23
+ if (!bInCH14) {
+ /* Write 0xa22 0xa23 */
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8) ;
+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8);
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
- //Write 0xa24 ~ 0xa27
+ /* Write 0xa24 ~ 0xa27 */
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+
@@ -1531,25 +1428,23 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
- //Write 0xa28 0xa29
+ /* Write 0xa28 0xa29 */
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8);
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_DebugPort, TempVal);
- }
- else
- {
-// priv->CCKTxPowerAdjustCntNotCh14++; //cosa add for debug.
- //Write 0xa22 0xa23
+ } else {
+ /*priv->CCKTxPowerAdjustCntNotCh14++; cosa add for debug.*/
+ /* Write 0xa22 0xa23 */
TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
- (CCKSwingTable_Ch14[priv->CCK_index][1]<<8) ;
+ (CCKSwingTable_Ch14[priv->CCK_index][1]<<8);
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
- //Write 0xa24 ~ 0xa27
+ /* Write 0xa24 ~ 0xa27 */
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+
@@ -1557,9 +1452,9 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
- //Write 0xa28 0xa29
+ /* Write 0xa28 0xa29 */
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
- (CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
+ (CCKSwingTable_Ch14[priv->CCK_index][7]<<8);
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
@@ -1567,20 +1462,17 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
}
}
-
-
void dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
-{ // dm_CCKTxPowerAdjust
-
+{ /* dm_CCKTxPowerAdjust */
struct r8192_priv *priv = ieee80211_priv(dev);
- if(priv->bDcut == TRUE)
+
+ if (priv->bDcut == TRUE)
dm_CCKTxPowerAdjust_TSSI(dev, binch14);
else
dm_CCKTxPowerAdjust_ThermalMeter(dev, binch14);
}
-
-#ifndef RTL8192U
+#ifndef RTL8192U
static void dm_txpower_reset_recovery(
struct net_device *dev
)
@@ -1589,75 +1481,71 @@ static void dm_txpower_reset_recovery(
RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n");
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n",priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n",priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF A I/Q Amplify Gain is %ld\n",priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbb_iq_amplifygain);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n",priv->cck_present_attentuation);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n", priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n", priv->rfa_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF A I/Q Amplify Gain is %ld\n", priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbb_iq_amplifygain);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n", priv->cck_present_attentuation);
dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n",priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n",priv->rfc_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF C I/Q Amplify Gain is %ld\n",priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbb_iq_amplifygain);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n", priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n", priv->rfc_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF C I/Q Amplify Gain is %ld\n", priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbb_iq_amplifygain);
-} // dm_TXPowerResetRecovery
+} /* dm_TXPowerResetRecovery */
void dm_restore_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 reg_ratr = priv->rate_adaptive.last_ratr;
- if(!priv->up)
- {
+ if (!priv->up) {
RT_TRACE(COMP_RATE, "<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n");
return;
}
- //
- // Restore previous state for rate adaptive
- //
- if(priv->rate_adaptive.rate_adaptive_disabled)
+ /* Restore previous state for rate adaptive */
+ if (priv->rate_adaptive.rate_adaptive_disabled)
return;
- // TODO: Only 11n mode is implemented currently,
- if(!(priv->ieee80211->mode==WIRELESS_MODE_N_24G ||
- priv->ieee80211->mode==WIRELESS_MODE_N_5G))
- return;
+ /* TODO: Only 11n mode is implemented currently, */
+ if (!(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
+ priv->ieee80211->mode == WIRELESS_MODE_N_5G))
+ return;
+
{
/* 2007/11/15 MH Copy from 8190PCI. */
u32 ratr_value;
+
ratr_value = reg_ratr;
- if(priv->rf_type == RF_1T2R) // 1T2R, Spatial Stream 2 should be disabled
- {
+ if (priv->rf_type == RF_1T2R) { /* 1T2R, Spatial Stream 2 should be disabled */
ratr_value &= ~(RATE_ALL_OFDM_2SS);
- //DbgPrint("HW_VAR_TATR_0 from 0x%x ==> 0x%x\n", ((pu4Byte)(val))[0], ratr_value);
+ /*DbgPrint("HW_VAR_TATR_0 from 0x%x ==> 0x%x\n", ((pu4Byte)(val))[0], ratr_value);*/
}
- //DbgPrint("set HW_VAR_TATR_0 = 0x%x\n", ratr_value);
- //cosa PlatformEFIOWrite4Byte(Adapter, RATR0, ((pu4Byte)(val))[0]);
+ /*DbgPrint("set HW_VAR_TATR_0 = 0x%x\n", ratr_value);*/
+ /*cosa PlatformEFIOWrite4Byte(Adapter, RATR0, ((pu4Byte)(val))[0]);*/
write_nic_dword(dev, RATR0, ratr_value);
write_nic_byte(dev, UFWP, 1);
}
- //Restore TX Power Tracking Index
+ /* Restore TX Power Tracking Index */
if (priv->btxpower_trackingInit && priv->btxpower_tracking)
dm_txpower_reset_recovery(dev);
- //
- //Restore BB Initial Gain
- //
+ /* Restore BB Initial Gain */
dm_bb_initialgain_restore(dev);
-} // DM_RestoreDynamicMechanismState
+} /* DM_RestoreDynamicMechanismState */
static void dm_bb_initialgain_restore(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u32 bit_mask = 0x7f; //Bit0~ Bit6
+ u32 bit_mask = 0x7f; /* Bit0~ Bit6 */
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
return;
- //Disable Initial Gain
- //PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x800);
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
+ /* Disable Initial Gain */
+ /*PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x800);*/
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); /* Only clear byte 1 and rewrite. */
rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bit_mask, (u32)priv->initgain_backup.xaagccore1);
rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bit_mask, (u32)priv->initgain_backup.xbagccore1);
rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bit_mask, (u32)priv->initgain_backup.xcagccore1);
@@ -1665,41 +1553,39 @@ static void dm_bb_initialgain_restore(struct net_device *dev)
bit_mask = bMaskByte2;
rtl8192_setBBreg(dev, rCCK0_CCA, bit_mask, (u32)priv->initgain_backup.cca);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n",priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n",priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n",priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n",priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n",priv->initgain_backup.cca);
- //Enable Initial Gain
- //PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x100);
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite.
-
-} // dm_BBInitialGainRestore
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n", priv->initgain_backup.xaagccore1);
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n", priv->initgain_backup.xbagccore1);
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n", priv->initgain_backup.xcagccore1);
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n", priv->initgain_backup.xdagccore1);
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n", priv->initgain_backup.cca);
+ /* Enable Initial Gain */
+ /*PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x100);*/
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); /* Only clear byte 1 and rewrite. */
+} /* dm_BBInitialGainRestore */
void dm_backup_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- // Fsync to avoid reset
+ /* Fsync to avoid reset */
priv->bswitch_fsync = false;
priv->bfsync_processing = false;
- //Backup BB InitialGain
+ /* Backup BB InitialGain */
dm_bb_initialgain_backup(dev);
-} // DM_BackupDynamicMechanismState
-
+} /* DM_BackupDynamicMechanismState */
static void dm_bb_initialgain_backup(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u32 bit_mask = bMaskByte0; //Bit0~ Bit6
+ u32 bit_mask = bMaskByte0; /* Bit0~ Bit6 */
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
return;
- //PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x800);
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
+ /*PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x800);*/
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); /* Only clear byte 1 and rewrite. */
priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bit_mask);
priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bit_mask);
priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bit_mask);
@@ -1707,13 +1593,13 @@ static void dm_bb_initialgain_backup(struct net_device *dev)
bit_mask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bit_mask);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n",priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n",priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n",priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n",priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n",priv->initgain_backup.cca);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n", priv->initgain_backup.xaagccore1);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n", priv->initgain_backup.xbagccore1);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n", priv->initgain_backup.xcagccore1);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n", priv->initgain_backup.xdagccore1);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n", priv->initgain_backup.cca);
-} // dm_BBInitialGainBakcup
+} /* dm_BBInitialGainBakcup */
#endif
/*-----------------------------------------------------------------------------
@@ -1736,67 +1622,44 @@ static void dm_bb_initialgain_backup(struct net_device *dev)
void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type,
u32 dm_value)
{
- if (dm_type == DIG_TYPE_THRESH_HIGH)
- {
+ if (dm_type == DIG_TYPE_THRESH_HIGH) {
dm_digtable.rssi_high_thresh = dm_value;
- }
- else if (dm_type == DIG_TYPE_THRESH_LOW)
- {
+ } else if (dm_type == DIG_TYPE_THRESH_LOW) {
dm_digtable.rssi_low_thresh = dm_value;
- }
- else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH)
- {
+ } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) {
dm_digtable.rssi_high_power_highthresh = dm_value;
- }
- else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH)
- {
+ } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) {
dm_digtable.rssi_high_power_highthresh = dm_value;
- }
- else if (dm_type == DIG_TYPE_ENABLE)
- {
+ } else if (dm_type == DIG_TYPE_ENABLE) {
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_enable_flag = true;
- }
- else if (dm_type == DIG_TYPE_DISABLE)
- {
+ } else if (dm_type == DIG_TYPE_DISABLE) {
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_enable_flag = false;
- }
- else if (dm_type == DIG_TYPE_DBG_MODE)
- {
- if(dm_value >= DM_DBG_MAX)
+ } else if (dm_type == DIG_TYPE_DBG_MODE) {
+ if (dm_value >= DM_DBG_MAX)
dm_value = DM_DBG_OFF;
dm_digtable.dbg_mode = (u8)dm_value;
- }
- else if (dm_type == DIG_TYPE_RSSI)
- {
- if(dm_value > 100)
+ } else if (dm_type == DIG_TYPE_RSSI) {
+ if (dm_value > 100)
dm_value = 30;
dm_digtable.rssi_val = (long)dm_value;
- }
- else if (dm_type == DIG_TYPE_ALGORITHM)
- {
+ } else if (dm_type == DIG_TYPE_ALGORITHM) {
if (dm_value >= DIG_ALGO_MAX)
dm_value = DIG_ALGO_BY_FALSE_ALARM;
- if(dm_digtable.dig_algorithm != (u8)dm_value)
+ if (dm_digtable.dig_algorithm != (u8)dm_value)
dm_digtable.dig_algorithm_switch = 1;
dm_digtable.dig_algorithm = (u8)dm_value;
- }
- else if (dm_type == DIG_TYPE_BACKOFF)
- {
- if(dm_value > 30)
+ } else if (dm_type == DIG_TYPE_BACKOFF) {
+ if (dm_value > 30)
dm_value = 30;
dm_digtable.backoff_val = (u8)dm_value;
- }
- else if(dm_type == DIG_TYPE_RX_GAIN_MIN)
- {
- if(dm_value == 0)
+ } else if (dm_type == DIG_TYPE_RX_GAIN_MIN) {
+ if (dm_value == 0)
dm_value = 0x1;
dm_digtable.rx_gain_range_min = (u8)dm_value;
- }
- else if(dm_type == DIG_TYPE_RX_GAIN_MAX)
- {
- if(dm_value > 0x50)
+ } else if (dm_type == DIG_TYPE_RX_GAIN_MAX) {
+ if (dm_value > 0x50)
dm_value = 0x50;
dm_digtable.rx_gain_range_max = (u8)dm_value;
}
@@ -1824,7 +1687,7 @@ static void dm_dig_init(struct net_device *dev)
/* 2007/10/05 MH Disable DIG scheme now. Not tested. */
dm_digtable.dig_enable_flag = true;
dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI;
- dm_digtable.dbg_mode = DM_DBG_OFF; //off=by real rssi value, on=by DM_DigTable.Rssi_val for new dig
+ dm_digtable.dbg_mode = DM_DBG_OFF; /* off=by real rssi value, on=by DM_DigTable.Rssi_val for new dig */
dm_digtable.dig_algorithm_switch = 0;
/* 2007/10/04 MH Define init gain threshold. */
@@ -1838,17 +1701,16 @@ static void dm_dig_init(struct net_device *dev)
dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
- dm_digtable.rssi_val = 50; //for new dig debug rssi value
+ dm_digtable.rssi_val = 50; /* for new dig debug rssi value */
dm_digtable.backoff_val = DM_DIG_BACKOFF;
dm_digtable.rx_gain_range_max = DM_DIG_MAX;
- if(priv->CustomerID == RT_CID_819x_Netcore)
+ if (priv->CustomerID == RT_CID_819x_Netcore)
dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore;
else
dm_digtable.rx_gain_range_min = DM_DIG_MIN;
} /* dm_dig_init */
-
/*-----------------------------------------------------------------------------
* Function: dm_ctrl_initgain_byrssi()
*
@@ -1868,20 +1730,18 @@ static void dm_dig_init(struct net_device *dev)
*---------------------------------------------------------------------------*/
static void dm_ctrl_initgain_byrssi(struct net_device *dev)
{
-
if (dm_digtable.dig_enable_flag == false)
return;
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
dm_ctrl_initgain_byrssi_by_fwfalse_alarm(dev);
- else if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
+ else if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
dm_ctrl_initgain_byrssi_by_driverrssi(dev);
-// ;
+ /* ; */
else
return;
}
-
static void dm_ctrl_initgain_byrssi_by_driverrssi(
struct net_device *dev)
{
@@ -1892,32 +1752,33 @@ static void dm_ctrl_initgain_byrssi_by_driverrssi(
if (dm_digtable.dig_enable_flag == false)
return;
- //DbgPrint("Dig by Sw Rssi \n");
- if(dm_digtable.dig_algorithm_switch) // if switched algorithm, we have to disable FW Dig.
+ /*DbgPrint("Dig by Sw Rssi\n");*/
+ if (dm_digtable.dig_algorithm_switch) /* if switched algorithm, we have to disable FW Dig. */
fw_dig = 0;
- if(fw_dig <= 3) // execute several times to make sure the FW Dig is disabled
- {// FW DIG Off
- for(i=0; i<3; i++)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
+
+ if (fw_dig <= 3) { /* execute several times to make sure the FW Dig is disabled */
+ /* FW DIG Off */
+ for (i = 0; i < 3; i++)
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); /* Only clear byte 1 and rewrite. */
fw_dig++;
- dm_digtable.dig_state = DM_STA_DIG_OFF; //fw dig off.
+ dm_digtable.dig_state = DM_STA_DIG_OFF; /* fw dig off. */
}
- if(priv->ieee80211->state == IEEE80211_LINKED)
+ if (priv->ieee80211->state == IEEE80211_LINKED)
dm_digtable.cur_connect_state = DIG_CONNECT;
else
dm_digtable.cur_connect_state = DIG_DISCONNECT;
- //DbgPrint("DM_DigTable.PreConnectState = %d, DM_DigTable.CurConnectState = %d \n",
- //DM_DigTable.PreConnectState, DM_DigTable.CurConnectState);
+ /*DbgPrint("DM_DigTable.PreConnectState = %d, DM_DigTable.CurConnectState = %d\n",
+ DM_DigTable.PreConnectState, DM_DigTable.CurConnectState);*/
- if(dm_digtable.dbg_mode == DM_DBG_OFF)
+ if (dm_digtable.dbg_mode == DM_DBG_OFF)
dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
- //DbgPrint("DM_DigTable.Rssi_val = %d \n", DM_DigTable.Rssi_val);
+ /*DbgPrint("DM_DigTable.Rssi_val = %d\n", DM_DigTable.Rssi_val);*/
dm_initial_gain(dev);
dm_pd_th(dev);
dm_cs_ratio(dev);
- if(dm_digtable.dig_algorithm_switch)
+ if (dm_digtable.dig_algorithm_switch)
dm_digtable.dig_algorithm_switch = 0;
dm_digtable.pre_connect_state = dm_digtable.cur_connect_state;
@@ -1933,152 +1794,138 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
if (dm_digtable.dig_enable_flag == false)
return;
- if(dm_digtable.dig_algorithm_switch)
- {
+ if (dm_digtable.dig_algorithm_switch) {
dm_digtable.dig_state = DM_STA_DIG_MAX;
- // Fw DIG On.
- for(i=0; i<3; i++)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite.
+ /* Fw DIG On. */
+ for (i = 0; i < 3; i++)
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); /* Only clear byte 1 and rewrite.*/
dm_digtable.dig_algorithm_switch = 0;
}
if (priv->ieee80211->state != IEEE80211_LINKED)
return;
- // For smooth, we can not change DIG state.
+ /* For smooth, we can not change DIG state. */
if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
(priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
- {
return;
- }
- //DbgPrint("Dig by Fw False Alarm\n");
- //if (DM_DigTable.Dig_State == DM_STA_DIG_OFF)
+
+ /*DbgPrint("Dig by Fw False Alarm\n");*/
+ /*if (DM_DigTable.Dig_State == DM_STA_DIG_OFF)*/
/*DbgPrint("DIG Check\n\r RSSI=%d LOW=%d HIGH=%d STATE=%d",
pHalData->UndecoratedSmoothedPWDB, DM_DigTable.RssiLowThresh,
DM_DigTable.RssiHighThresh, DM_DigTable.Dig_State);*/
/* 1. When RSSI decrease, We have to judge if it is smaller than a threshold
and then execute the step below. */
- if ((priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh))
- {
+ if (priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh) {
/* 2008/02/05 MH When we execute silent reset, the DIG PHY parameters
will be reset to init value. We must prevent the condition. */
if (dm_digtable.dig_state == DM_STA_DIG_OFF &&
- (priv->reset_count == reset_cnt))
- {
+ (priv->reset_count == reset_cnt)) {
return;
}
- else
- {
- reset_cnt = priv->reset_count;
- }
+ reset_cnt = priv->reset_count;
- // If DIG is off, DIG high power state must reset.
+ /* If DIG is off, DIG high power state must reset. */
dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
dm_digtable.dig_state = DM_STA_DIG_OFF;
- // 1.1 DIG Off.
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
+ /* 1.1 DIG Off. */
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); /* Only clear byte 1 and rewrite. */
- // 1.2 Set initial gain.
+ /* 1.2 Set initial gain. */
write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x17);
write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x17);
write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x17);
write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x17);
- // 1.3 Lower PD_TH for OFDM.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
- // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
+ /* 1.3 Lower PD_TH for OFDM. */
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
+ /*
+ * 2008/01/11 MH 40MHZ 90/92 register are not the same.
+ * 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
+ */
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(pAdapter, rOFDM0_RxDetector1, 0x40);
+ else if (pAdapter->HardwareType == HARDWARE_TYPE_RTL8192E)
+ else
+ PlatformEFIOWrite1Byte(pAdapter, rOFDM0_RxDetector1, 0x40);
*/
- //else if (pAdapter->HardwareType == HARDWARE_TYPE_RTL8192E)
-
-
- //else
- //PlatformEFIOWrite1Byte(pAdapter, rOFDM0_RxDetector1, 0x40);
- }
- else
+ } else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
- // 1.4 Lower CS ratio for CCK.
+ /* 1.4 Lower CS ratio for CCK. */
write_nic_byte(dev, 0xa0a, 0x08);
- // 1.5 Higher EDCCA.
- //PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x325);
+ /* 1.5 Higher EDCCA. */
+ /*PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x325);*/
return;
}
/* 2. When RSSI increase, We have to judge if it is larger than a threshold
and then execute the step below. */
- if ((priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh))
- {
+ if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) {
u8 reset_flag = 0;
if (dm_digtable.dig_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt))
- {
+ (priv->reset_count == reset_cnt)) {
dm_ctrl_initgain_byrssi_highpwr(dev);
return;
}
- else
- {
- if (priv->reset_count != reset_cnt)
- reset_flag = 1;
+ if (priv->reset_count != reset_cnt)
+ reset_flag = 1;
- reset_cnt = priv->reset_count;
- }
+ reset_cnt = priv->reset_count;
dm_digtable.dig_state = DM_STA_DIG_ON;
- //DbgPrint("DIG ON\n\r");
+ /*DbgPrint("DIG ON\n\r");*/
- // 2.1 Set initial gain.
- // 2008/02/26 MH SD3-Jerry suggest to prevent dirty environment.
- if (reset_flag == 1)
- {
+ /*
+ * 2.1 Set initial gain.
+ * 2008/02/26 MH SD3-Jerry suggest to prevent dirty environment.
+ */
+ if (reset_flag == 1) {
write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x2c);
write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x2c);
write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x2c);
write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x2c);
- }
- else
- {
+ } else {
write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x20);
write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x20);
write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x20);
write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x20);
}
- // 2.2 Higher PD_TH for OFDM.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
- // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
+ /* 2.2 Higher PD_TH for OFDM. */
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
+ /*
+ * 2008/01/11 MH 40MHZ 90/92 register are not the same.
+ * 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
+ */
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
/*
else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
+ else if (pAdapter->HardwareType == HARDWARE_TYPE_RTL8192E)
+ else
+ PlatformEFIOWrite1Byte(pAdapter, rOFDM0_RxDetector1, 0x42);
*/
- //else if (pAdapter->HardwareType == HARDWARE_TYPE_RTL8192E)
-
- //else
- //PlatformEFIOWrite1Byte(pAdapter, rOFDM0_RxDetector1, 0x42);
- }
- else
+ } else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
- // 2.3 Higher CS ratio for CCK.
+ /* 2.3 Higher CS ratio for CCK. */
write_nic_byte(dev, 0xa0a, 0xcd);
- // 2.4 Lower EDCCA.
- /* 2008/01/11 MH 90/92 series are the same. */
- //PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x346);
+ /*
+ * 2.4 Lower EDCCA.
+ * 2008/01/11 MH 90/92 series are the same.
+ */
+ /*PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x346);*/
- // 2.5 DIG On.
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite.
+ /* 2.5 DIG On. */
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); /* Only clear byte 1 and rewrite. */
}
@@ -2086,7 +1933,6 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
} /* dm_CtrlInitGainByRssi */
-
/*-----------------------------------------------------------------------------
* Function: dm_ctrl_initgain_byrssi_highpwr()
*
@@ -2109,58 +1955,49 @@ static void dm_ctrl_initgain_byrssi_highpwr(
struct r8192_priv *priv = ieee80211_priv(dev);
static u32 reset_cnt_highpwr;
- // For smooth, we can not change high power DIG state in the range.
+ /* For smooth, we can not change high power DIG state in the range. */
if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_high_power_lowthresh) &&
(priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_highthresh))
- {
return;
- }
- /* 3. When RSSI >75% or <70%, it is a high power issue. We have to judge if
- it is larger than a threshold and then execute the step below. */
- // 2008/02/05 MH SD3-Jerry Modify PD_TH for high power issue.
- if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh)
- {
+ /*
+ * 3. When RSSI >75% or <70%, it is a high power issue. We have to judge if
+ * it is larger than a threshold and then execute the step below.
+ *
+ * 2008/02/05 MH SD3-Jerry Modify PD_TH for high power issue.
+ */
+ if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh) {
if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON &&
(priv->reset_count == reset_cnt_highpwr))
return;
- else
- dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
+ dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
- // 3.1 Higher PD_TH for OFDM for high power state.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
+ /* 3.1 Higher PD_TH for OFDM for high power state. */
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
*/
- }
- else
+ } else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
- }
- else
- {
- if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF&&
+ } else {
+ if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF &&
(priv->reset_count == reset_cnt_highpwr))
return;
- else
- dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
+ dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
if (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_lowthresh &&
- priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh)
- {
- // 3.2 Recover PD_TH for OFDM for normal power region.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
+ priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) {
+ /* 3.2 Recover PD_TH for OFDM for normal power region. */
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
*/
- }
- else
+ } else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
}
}
@@ -2169,51 +2006,42 @@ static void dm_ctrl_initgain_byrssi_highpwr(
} /* dm_CtrlInitGainByRssiHighPwr */
-
static void dm_initial_gain(
struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u8 initial_gain=0;
+ u8 initial_gain = 0;
static u8 initialized, force_write;
static u32 reset_cnt;
u8 tmp;
- if(dm_digtable.dig_algorithm_switch)
- {
+ if (dm_digtable.dig_algorithm_switch) {
initialized = 0;
reset_cnt = 0;
}
- if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
- {
- if(dm_digtable.cur_connect_state == DIG_CONNECT)
- {
- if((dm_digtable.rssi_val+10-dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max)
+ if (dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) {
+ if (dm_digtable.cur_connect_state == DIG_CONNECT) {
+ if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max)
dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_max;
- else if((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
+ else if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_min;
else
dm_digtable.cur_ig_value = dm_digtable.rssi_val+10-dm_digtable.backoff_val;
- }
- else //current state is disconnected
- {
- if(dm_digtable.cur_ig_value == 0)
+ } else { /* current state is disconnected */
+ if (dm_digtable.cur_ig_value == 0)
dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
else
dm_digtable.cur_ig_value = dm_digtable.pre_ig_value;
}
- }
- else // disconnected -> connected or connected -> disconnected
- {
+ } else { /* disconnected -> connected or connected -> disconnected */
dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
dm_digtable.pre_ig_value = 0;
}
- //DbgPrint("DM_DigTable.CurIGValue = 0x%x, DM_DigTable.PreIGValue = 0x%x\n", DM_DigTable.CurIGValue, DM_DigTable.PreIGValue);
+ /*DbgPrint("DM_DigTable.CurIGValue = 0x%x, DM_DigTable.PreIGValue = 0x%x\n", DM_DigTable.CurIGValue, DM_DigTable.PreIGValue);*/
- // if silent reset happened, we should rewrite the values back
- if(priv->reset_count != reset_cnt)
- {
+ /* if silent reset happened, we should rewrite the values back */
+ if (priv->reset_count != reset_cnt) {
force_write = 1;
reset_cnt = priv->reset_count;
}
@@ -2223,12 +2051,11 @@ static void dm_initial_gain(
force_write = 1;
{
- if((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
- || !initialized || force_write)
- {
+ if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
+ || !initialized || force_write) {
initial_gain = (u8)dm_digtable.cur_ig_value;
- //DbgPrint("Write initial gain = 0x%x\n", initial_gain);
- // Set initial gain.
+ /*DbgPrint("Write initial gain = 0x%x\n", initial_gain);*/
+ /* Set initial gain. */
write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
@@ -2247,93 +2074,77 @@ static void dm_pd_th(
static u8 initialized, force_write;
static u32 reset_cnt;
- if(dm_digtable.dig_algorithm_switch)
- {
+ if (dm_digtable.dig_algorithm_switch) {
initialized = 0;
reset_cnt = 0;
}
- if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
- {
- if(dm_digtable.cur_connect_state == DIG_CONNECT)
- {
+ if (dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) {
+ if (dm_digtable.cur_connect_state == DIG_CONNECT) {
if (dm_digtable.rssi_val >= dm_digtable.rssi_high_power_highthresh)
dm_digtable.curpd_thstate = DIG_PD_AT_HIGH_POWER;
- else if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
+ else if (dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) &&
(dm_digtable.rssi_val < dm_digtable.rssi_high_power_lowthresh))
dm_digtable.curpd_thstate = DIG_PD_AT_NORMAL_POWER;
else
dm_digtable.curpd_thstate = dm_digtable.prepd_thstate;
- }
- else
- {
+ } else {
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
}
- }
- else // disconnected -> connected or connected -> disconnected
- {
+ } else { /* disconnected -> connected or connected -> disconnected */
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
}
- // if silent reset happened, we should rewrite the values back
- if(priv->reset_count != reset_cnt)
- {
+ /* if silent reset happened, we should rewrite the values back */
+ if (priv->reset_count != reset_cnt) {
force_write = 1;
reset_cnt = priv->reset_count;
}
{
- if((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
- (initialized<=3) || force_write)
- {
- //DbgPrint("Write PD_TH state = %d\n", DM_DigTable.CurPD_THState);
- if(dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER)
- {
- // Lower PD_TH for OFDM.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
- // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
+ if ((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
+ (initialized <= 3) || force_write) {
+ /*DbgPrint("Write PD_TH state = %d\n", DM_DigTable.CurPD_THState);*/
+ if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
+ /* Lower PD_TH for OFDM. */
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
+ /*
+ * 2008/01/11 MH 40MHZ 90/92 register are not the same.
+ * 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
+ */
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x40);
*/
- }
- else
+ } else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
- }
- else if(dm_digtable.curpd_thstate == DIG_PD_AT_NORMAL_POWER)
- {
- // Higher PD_TH for OFDM.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
- // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
+ } else if (dm_digtable.curpd_thstate == DIG_PD_AT_NORMAL_POWER) {
+ /* Higher PD_TH for OFDM. */
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
+ /*
+ * 2008/01/11 MH 40MHZ 90/92 register are not the same.
+ * 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
+ */
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
*/
- }
- else
+ } else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
- }
- else if(dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER)
- {
- // Higher PD_TH for OFDM for high power state.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
+ } else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) {
+ /* Higher PD_TH for OFDM for high power state. */
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
*/
- }
- else
+ } else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
}
dm_digtable.prepd_thstate = dm_digtable.curpd_thstate;
- if(initialized <= 3)
+ if (initialized <= 3)
initialized++;
force_write = 0;
}
@@ -2347,54 +2158,40 @@ static void dm_cs_ratio(
static u8 initialized, force_write;
static u32 reset_cnt;
- if(dm_digtable.dig_algorithm_switch)
- {
+ if (dm_digtable.dig_algorithm_switch) {
initialized = 0;
reset_cnt = 0;
}
- if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
- {
- if(dm_digtable.cur_connect_state == DIG_CONNECT)
- {
- if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
+ if (dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) {
+ if (dm_digtable.cur_connect_state == DIG_CONNECT) {
+ if (dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
- else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh))
+ else if (dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh)
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER;
else
dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state;
- }
- else
- {
+ } else {
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
}
- }
- else // disconnected -> connected or connected -> disconnected
- {
+ } else /* disconnected -> connected or connected -> disconnected */
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
- }
- // if silent reset happened, we should rewrite the values back
- if(priv->reset_count != reset_cnt)
- {
+ /* if silent reset happened, we should rewrite the values back */
+ if (priv->reset_count != reset_cnt) {
force_write = 1;
reset_cnt = priv->reset_count;
}
-
{
- if((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
- !initialized || force_write)
- {
- //DbgPrint("Write CS_ratio state = %d\n", DM_DigTable.CurCS_ratioState);
- if(dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
- {
- // Lower CS ratio for CCK.
+ if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
+ !initialized || force_write) {
+ /*DbgPrint("Write CS_ratio state = %d\n", DM_DigTable.CurCS_ratioState);*/
+ if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER) {
+ /* Lower CS ratio for CCK. */
write_nic_byte(dev, 0xa0a, 0x08);
- }
- else if(dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER)
- {
- // Higher CS ratio for CCK.
+ } else if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER) {
+ /* Higher CS ratio for CCK. */
write_nic_byte(dev, 0xa0a, 0xcd);
}
dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state;
@@ -2411,53 +2208,46 @@ void dm_init_edca_turbo(struct net_device *dev)
priv->bcurrent_turbo_EDCA = false;
priv->ieee80211->bis_any_nonbepkts = false;
priv->bis_cur_rdlstate = false;
-} // dm_init_edca_turbo
+} /* dm_init_edca_turbo */
static void dm_check_edca_turbo(
struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- //PSTA_QOS pStaQos = pMgntInfo->pStaQos;
+ /*PSTA_QOS pStaQos = pMgntInfo->pStaQos;*/
- // Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.
+ /* Keep past Tx/Rx packet count for RT-to-RT EDCA turbo. */
static unsigned long lastTxOkCnt;
static unsigned long lastRxOkCnt;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
- //
- // Do not be Turbo if it's under WiFi config and Qos Enabled, because the EDCA parameters
- // should follow the settings from QAP. By Bruce, 2007-12-07.
- //
- if(priv->ieee80211->state != IEEE80211_LINKED)
+ /*
+ * Do not be Turbo if it's under WiFi config and Qos Enabled, because the EDCA parameters
+ * should follow the settings from QAP. By Bruce, 2007-12-07.
+ */
+ if (priv->ieee80211->state != IEEE80211_LINKED)
goto dm_CheckEdcaTurbo_EXIT;
- // We do not turn on EDCA turbo mode for some AP that has IOT issue
- if(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
+ /* We do not turn on EDCA turbo mode for some AP that has IOT issue */
+ if (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
goto dm_CheckEdcaTurbo_EXIT;
-// printk("========>%s():bis_any_nonbepkts is %d\n",__func__,priv->bis_any_nonbepkts);
- // Check the status for current condition.
- if(!priv->ieee80211->bis_any_nonbepkts)
- {
+ /*printk("========>%s():bis_any_nonbepkts is %d\n", __func__, priv->bis_any_nonbepkts);*/
+ /* Check the status for current condition. */
+ if (!priv->ieee80211->bis_any_nonbepkts) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- // For RT-AP, we needs to turn it on when Rx>Tx
- if(curRxOkCnt > 4*curTxOkCnt)
- {
- //printk("%s():curRxOkCnt > 4*curTxOkCnt\n");
- if(!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
- {
+ /* For RT-AP, we needs to turn it on when Rx>Tx */
+ if (curRxOkCnt > 4*curTxOkCnt) {
+ /*printk("%s():curRxOkCnt > 4*curTxOkCnt\n");*/
+ if (!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) {
write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = true;
}
- }
- else
- {
-
- //printk("%s():curRxOkCnt < 4*curTxOkCnt\n");
- if(priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
- {
+ } else {
+ /*printk("%s():curRxOkCnt < 4*curTxOkCnt\n");*/
+ if (priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) {
write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = false;
}
@@ -2465,50 +2255,47 @@ static void dm_check_edca_turbo(
}
priv->bcurrent_turbo_EDCA = true;
- }
- else
- {
- //
- // Turn Off EDCA turbo here.
- // Restore original EDCA according to the declaration of AP.
- //
- if(priv->bcurrent_turbo_EDCA)
- {
-
+ } else {
+ /*
+ * Turn Off EDCA turbo here.
+ * Restore original EDCA according to the declaration of AP.
+ */
+ if (priv->bcurrent_turbo_EDCA) {
{
u8 u1bAIFS;
u32 u4bAcParam;
struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
u8 mode = priv->ieee80211->mode;
- // For Each time updating EDCA parameter, reset EDCA turbo mode status.
+ /* For Each time updating EDCA parameter, reset EDCA turbo mode status. */
dm_init_edca_turbo(dev);
- u1bAIFS = qos_parameters->aifs[0] * ((mode&(IEEE_G|IEEE_N_24G)) ?9:20) + aSifsTime;
- u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[0]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(qos_parameters->cw_max[0]))<< AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(qos_parameters->cw_min[0]))<< AC_PARAM_ECW_MIN_OFFSET)|
+ u1bAIFS = qos_parameters->aifs[0] * ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
+ u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[0])) << AC_PARAM_TXOP_LIMIT_OFFSET)|
+ (((u32)(qos_parameters->cw_max[0])) << AC_PARAM_ECW_MAX_OFFSET)|
+ (((u32)(qos_parameters->cw_min[0])) << AC_PARAM_ECW_MIN_OFFSET)|
((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
- //write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
+ /*write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);*/
write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
- // Check ACM bit.
- // If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13.
+ /*
+ * Check ACM bit.
+ * If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13.
+ */
{
- // TODO: Modified this part and try to set acm control in only 1 IO processing!!
+ /* TODO: Modified this part and try to set acm control in only 1 IO processing!! */
PACI_AIFSN pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]);
u8 AcmCtrl;
+
read_nic_byte(dev, AcmHwCtrl, &AcmCtrl);
- if(pAciAifsn->f.ACM)
- { // ACM bit is 1.
+
+ if (pAciAifsn->f.ACM) { /* ACM bit is 1. */
AcmCtrl |= AcmHw_BeqEn;
- }
- else
- { // ACM bit is 0.
+ } else { /* ACM bit is 0. */
AcmCtrl &= (~AcmHw_BeqEn);
}
- RT_TRACE(COMP_QOS,"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl) ;
+ RT_TRACE(COMP_QOS, "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl);
write_nic_byte(dev, AcmHwCtrl, AcmCtrl);
}
}
@@ -2516,13 +2303,12 @@ static void dm_check_edca_turbo(
}
}
-
dm_CheckEdcaTurbo_EXIT:
- // Set variables for next time.
+ /* Set variables for next time. */
priv->ieee80211->bis_any_nonbepkts = false;
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
-} // dm_CheckEdcaTurbo
+} /* dm_CheckEdcaTurbo */
static void dm_init_ctstoself(struct net_device *dev)
{
@@ -2541,8 +2327,7 @@ static void dm_ctstoself(struct net_device *dev)
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
- if(priv->ieee80211->bCTSToSelfEnable != TRUE)
- {
+ if (priv->ieee80211->bCTSToSelfEnable != TRUE) {
pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
return;
}
@@ -2552,17 +2337,13 @@ static void dm_ctstoself(struct net_device *dev)
3. <50 disable, >55 enable
*/
- if(pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM)
- {
+ if (pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- if(curRxOkCnt > 4*curTxOkCnt) //downlink, disable CTS to self
- {
+ if (curRxOkCnt > 4*curTxOkCnt) { /* downlink, disable CTS to self */
pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
- //DbgPrint("dm_CTSToSelf() ==> CTS to self disabled -- downlink\n");
- }
- else //uplink
- {
+ /*DbgPrint("dm_CTSToSelf() ==> CTS to self disabled -- downlink\n");*/
+ } else { /* uplink */
pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF;
}
@@ -2592,15 +2373,15 @@ static void dm_check_pbc_gpio(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u8 tmp1byte;
-
read_nic_byte(dev, GPI, &tmp1byte);
- if(tmp1byte == 0xff)
+ if (tmp1byte == 0xff)
return;
- if (tmp1byte&BIT6 || tmp1byte&BIT0)
- {
- // Here we only set bPbcPressed to TRUE
- // After trigger PBC, the variable will be set to FALSE
+ if (tmp1byte&BIT6 || tmp1byte&BIT0) {
+ /*
+ * Here we only set bPbcPressed to TRUE
+ * After trigger PBC, the variable will be set to FALSE
+ */
RT_TRACE(COMP_IO, "CheckPbcGPIO - PBC is pressed\n");
priv->bpbc_pressed = true;
}
@@ -2625,26 +2406,24 @@ static void dm_check_pbc_gpio(struct net_device *dev)
*---------------------------------------------------------------------------*/
void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,rfpath_check_wq);
- struct net_device *dev =priv->ieee80211->dev;
- //bool bactually_set = false;
+ struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct r8192_priv *priv = container_of(dwork, struct r8192_priv, rfpath_check_wq);
+ struct net_device *dev = priv->ieee80211->dev;
+ /*bool bactually_set = false;*/
u8 rfpath = 0, i;
-
/* 2008/01/30 MH After discussing with SD3 Jerry, 0xc04/0xd04 register will
always be the same. We only read 0xc04 now. */
read_nic_byte(dev, 0xc04, &rfpath);
- // Check Bit 0-3, it means if RF A-D is enabled.
- for (i = 0; i < RF90_PATH_MAX; i++)
- {
+ /* Check Bit 0-3, it means if RF A-D is enabled. */
+ for (i = 0; i < RF90_PATH_MAX; i++) {
if (rfpath & (0x01<<i))
priv->brfpath_rxenable[i] = 1;
else
priv->brfpath_rxenable[i] = 0;
}
- if(!DM_RxPathSelTable.Enable)
+ if (!DM_RxPathSelTable.Enable)
return;
dm_rxpath_sel_byrssi(dev);
@@ -2654,17 +2433,17 @@ static void dm_init_rxpath_selection(struct net_device *dev)
{
u8 i;
struct r8192_priv *priv = ieee80211_priv(dev);
- DM_RxPathSelTable.Enable = 1; //default enabled
+
+ DM_RxPathSelTable.Enable = 1; /* default enabled */
DM_RxPathSelTable.SS_TH_low = RxPathSelection_SS_TH_low;
DM_RxPathSelTable.diff_TH = RxPathSelection_diff_TH;
- if(priv->CustomerID == RT_CID_819x_Netcore)
+ if (priv->CustomerID == RT_CID_819x_Netcore)
DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
else
DM_RxPathSelTable.cck_method = CCK_Rx_Version_1;
DM_RxPathSelTable.DbgMode = DM_DBG_OFF;
DM_RxPathSelTable.disabledRF = 0;
- for(i=0; i<4; i++)
- {
+ for (i = 0; i < 4; i++) {
DM_RxPathSelTable.rf_rssi[i] = 50;
DM_RxPathSelTable.cck_pwdb_sta[i] = -64;
DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
@@ -2674,22 +2453,21 @@ static void dm_init_rxpath_selection(struct net_device *dev)
static void dm_rxpath_sel_byrssi(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u8 i, max_rssi_index=0, min_rssi_index=0, sec_rssi_index=0, rf_num=0;
- u8 tmp_max_rssi=0, tmp_min_rssi=0, tmp_sec_rssi=0;
- u8 cck_default_Rx=0x2; //RF-C
- u8 cck_optional_Rx=0x3;//RF-D
- long tmp_cck_max_pwdb=0, tmp_cck_min_pwdb=0, tmp_cck_sec_pwdb=0;
- u8 cck_rx_ver2_max_index=0, cck_rx_ver2_min_index=0, cck_rx_ver2_sec_index=0;
+ u8 i, max_rssi_index = 0, min_rssi_index = 0, sec_rssi_index = 0, rf_num = 0;
+ u8 tmp_max_rssi = 0, tmp_min_rssi = 0, tmp_sec_rssi = 0;
+ u8 cck_default_Rx = 0x2; /* RF-C */
+ u8 cck_optional_Rx = 0x3; /* RF-D */
+ long tmp_cck_max_pwdb = 0, tmp_cck_min_pwdb = 0, tmp_cck_sec_pwdb = 0;
+ u8 cck_rx_ver2_max_index = 0, cck_rx_ver2_min_index = 0, cck_rx_ver2_sec_index = 0;
u8 cur_rf_rssi;
long cur_cck_pwdb;
static u8 disabled_rf_cnt, cck_Rx_Path_initialized;
u8 update_cck_rx_path;
- if(priv->rf_type != RF_2T4R)
+ if (priv->rf_type != RF_2T4R)
return;
- if(!cck_Rx_Path_initialized)
- {
+ if (!cck_Rx_Path_initialized) {
read_nic_byte(dev, 0xa07, &DM_RxPathSelTable.cck_Rx_path);
DM_RxPathSelTable.cck_Rx_path &= 0xf;
cck_Rx_Path_initialized = 1;
@@ -2698,90 +2476,63 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
read_nic_byte(dev, 0xc04, &DM_RxPathSelTable.disabledRF);
DM_RxPathSelTable.disabledRF = ~DM_RxPathSelTable.disabledRF & 0xf;
- if(priv->ieee80211->mode == WIRELESS_MODE_B)
- {
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; //pure B mode, fixed cck version2
- //DbgPrint("Pure B mode, use cck rx version2 \n");
+ if (priv->ieee80211->mode == WIRELESS_MODE_B) {
+ DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; /* pure B mode, fixed cck version2 */
+ /*DbgPrint("Pure B mode, use cck rx version2\n");*/
}
- //decide max/sec/min rssi index
- for (i=0; i<RF90_PATH_MAX; i++)
- {
- if(!DM_RxPathSelTable.DbgMode)
+ /* decide max/sec/min rssi index */
+ for (i = 0; i < RF90_PATH_MAX; i++) {
+ if (!DM_RxPathSelTable.DbgMode)
DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
- if(priv->brfpath_rxenable[i])
- {
+ if (priv->brfpath_rxenable[i]) {
rf_num++;
cur_rf_rssi = DM_RxPathSelTable.rf_rssi[i];
- if(rf_num == 1) // find first enabled rf path and the rssi values
- { //initialize, set all rssi index to the same one
+ if (rf_num == 1) { /* find first enabled rf path and the rssi values */
+ /* initialize, set all rssi index to the same one */
max_rssi_index = min_rssi_index = sec_rssi_index = i;
tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi;
- }
- else if(rf_num == 2)
- { // we pick up the max index first, and let sec and min to be the same one
- if(cur_rf_rssi >= tmp_max_rssi)
- {
+ } else if (rf_num == 2) { /* we pick up the max index first, and let sec and min to be the same one */
+ if (cur_rf_rssi >= tmp_max_rssi) {
tmp_max_rssi = cur_rf_rssi;
max_rssi_index = i;
- }
- else
- {
+ } else {
tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi;
sec_rssi_index = min_rssi_index = i;
}
- }
- else
- {
- if(cur_rf_rssi > tmp_max_rssi)
- {
+ } else {
+ if (cur_rf_rssi > tmp_max_rssi) {
tmp_sec_rssi = tmp_max_rssi;
sec_rssi_index = max_rssi_index;
tmp_max_rssi = cur_rf_rssi;
max_rssi_index = i;
- }
- else if(cur_rf_rssi == tmp_max_rssi)
- { // let sec and min point to the different index
+ } else if (cur_rf_rssi == tmp_max_rssi) { /* let sec and min point to the different index */
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
- }
- else if((cur_rf_rssi < tmp_max_rssi) &&(cur_rf_rssi > tmp_sec_rssi))
- {
+ } else if ((cur_rf_rssi < tmp_max_rssi) && (cur_rf_rssi > tmp_sec_rssi)) {
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
- }
- else if(cur_rf_rssi == tmp_sec_rssi)
- {
- if(tmp_sec_rssi == tmp_min_rssi)
- { // let sec and min point to the different index
+ } else if (cur_rf_rssi == tmp_sec_rssi) {
+ if (tmp_sec_rssi == tmp_min_rssi) {
+ /* let sec and min point to the different index */
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
+ } else {
+ /* This case we don't need to set any index */
}
- else
- {
- // This case we don't need to set any index
- }
- }
- else if((cur_rf_rssi < tmp_sec_rssi) && (cur_rf_rssi > tmp_min_rssi))
- {
- // This case we don't need to set any index
- }
- else if(cur_rf_rssi == tmp_min_rssi)
- {
- if(tmp_sec_rssi == tmp_min_rssi)
- { // let sec and min point to the different index
+ } else if ((cur_rf_rssi < tmp_sec_rssi) && (cur_rf_rssi > tmp_min_rssi)) {
+ /* This case we don't need to set any index */
+ } else if (cur_rf_rssi == tmp_min_rssi) {
+ if (tmp_sec_rssi == tmp_min_rssi) {
+ /* let sec and min point to the different index */
tmp_min_rssi = cur_rf_rssi;
min_rssi_index = i;
+ } else {
+ /* This case we don't need to set any index */
}
- else
- {
- // This case we don't need to set any index
- }
- }
- else if(cur_rf_rssi < tmp_min_rssi)
- {
+ } else if (cur_rf_rssi < tmp_min_rssi) {
tmp_min_rssi = cur_rf_rssi;
min_rssi_index = i;
}
@@ -2790,83 +2541,51 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
}
rf_num = 0;
- // decide max/sec/min cck pwdb index
- if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_2)
- {
- for (i=0; i<RF90_PATH_MAX; i++)
- {
- if(priv->brfpath_rxenable[i])
- {
+ /* decide max/sec/min cck pwdb index */
+ if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) {
+ for (i = 0; i < RF90_PATH_MAX; i++) {
+ if (priv->brfpath_rxenable[i]) {
rf_num++;
cur_cck_pwdb = DM_RxPathSelTable.cck_pwdb_sta[i];
- if(rf_num == 1) // find first enabled rf path and the rssi values
- { //initialize, set all rssi index to the same one
+ if (rf_num == 1) { /* find first enabled rf path and the rssi values */
+ /* initialize, set all rssi index to the same one */
cck_rx_ver2_max_index = cck_rx_ver2_min_index = cck_rx_ver2_sec_index = i;
tmp_cck_max_pwdb = tmp_cck_min_pwdb = tmp_cck_sec_pwdb = cur_cck_pwdb;
- }
- else if(rf_num == 2)
- { // we pick up the max index first, and let sec and min to be the same one
- if(cur_cck_pwdb >= tmp_cck_max_pwdb)
- {
+ } else if (rf_num == 2) { /* we pick up the max index first, and let sec and min to be the same one */
+ if (cur_cck_pwdb >= tmp_cck_max_pwdb) {
tmp_cck_max_pwdb = cur_cck_pwdb;
cck_rx_ver2_max_index = i;
- }
- else
- {
+ } else {
tmp_cck_sec_pwdb = tmp_cck_min_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = cck_rx_ver2_min_index = i;
}
- }
- else
- {
- if(cur_cck_pwdb > tmp_cck_max_pwdb)
- {
+ } else {
+ if (cur_cck_pwdb > tmp_cck_max_pwdb) {
tmp_cck_sec_pwdb = tmp_cck_max_pwdb;
cck_rx_ver2_sec_index = cck_rx_ver2_max_index;
tmp_cck_max_pwdb = cur_cck_pwdb;
cck_rx_ver2_max_index = i;
- }
- else if(cur_cck_pwdb == tmp_cck_max_pwdb)
- { // let sec and min point to the different index
+ } else if (cur_cck_pwdb == tmp_cck_max_pwdb) {
+ /* let sec and min point to the different index */
tmp_cck_sec_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
- }
- else if((cur_cck_pwdb < tmp_cck_max_pwdb) &&(cur_cck_pwdb > tmp_cck_sec_pwdb))
- {
+ } else if ((cur_cck_pwdb < tmp_cck_max_pwdb) && (cur_cck_pwdb > tmp_cck_sec_pwdb)) {
tmp_cck_sec_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
- }
- else if(cur_cck_pwdb == tmp_cck_sec_pwdb)
- {
- if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
- { // let sec and min point to the different index
- tmp_cck_sec_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = i;
- }
- else
- {
- // This case we don't need to set any index
- }
- }
- else if((cur_cck_pwdb < tmp_cck_sec_pwdb) && (cur_cck_pwdb > tmp_cck_min_pwdb))
- {
- // This case we don't need to set any index
- }
- else if(cur_cck_pwdb == tmp_cck_min_pwdb)
- {
- if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
- { // let sec and min point to the different index
- tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
- }
- else
- {
- // This case we don't need to set any index
- }
- }
- else if(cur_cck_pwdb < tmp_cck_min_pwdb)
- {
+ } else if (cur_cck_pwdb == tmp_cck_sec_pwdb && tmp_cck_sec_pwdb == tmp_cck_min_pwdb) {
+ /* let sec and min point to the different index */
+ tmp_cck_sec_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_sec_index = i;
+ /* otherwise we don't need to set any index */
+ } else if ((cur_cck_pwdb < tmp_cck_sec_pwdb) && (cur_cck_pwdb > tmp_cck_min_pwdb)) {
+ /* This case we don't need to set any index */
+ } else if (cur_cck_pwdb == tmp_cck_min_pwdb && tmp_cck_sec_pwdb == tmp_cck_min_pwdb) {
+ /* let sec and min point to the different index */
+ tmp_cck_min_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_min_index = i;
+ /* otherwise we don't need to set any index */
+ } else if (cur_cck_pwdb < tmp_cck_min_pwdb) {
tmp_cck_min_pwdb = cur_cck_pwdb;
cck_rx_ver2_min_index = i;
}
@@ -2876,56 +2595,48 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
}
}
-
- // Set CCK Rx path
- // reg0xA07[3:2]=cck default rx path, reg0xa07[1:0]=cck optional rx path.
+ /*
+ * Set CCK Rx path
+ * reg0xA07[3:2]=cck default rx path, reg0xa07[1:0]=cck optional rx path.
+ */
update_cck_rx_path = 0;
- if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_2)
- {
+ if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) {
cck_default_Rx = cck_rx_ver2_max_index;
cck_optional_Rx = cck_rx_ver2_sec_index;
- if(tmp_cck_max_pwdb != -64)
+ if (tmp_cck_max_pwdb != -64)
update_cck_rx_path = 1;
}
- if(tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2)
- {
- if((tmp_max_rssi - tmp_min_rssi) >= DM_RxPathSelTable.diff_TH)
- {
- //record the enabled rssi threshold
+ if (tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2) {
+ if ((tmp_max_rssi - tmp_min_rssi) >= DM_RxPathSelTable.diff_TH) {
+ /* record the enabled rssi threshold */
DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] = tmp_max_rssi+5;
- //disable the BB Rx path, OFDM
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<min_rssi_index, 0x0); // 0xc04[3:0]
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<min_rssi_index, 0x0); // 0xd04[3:0]
+ /* disable the BB Rx path, OFDM */
+ rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<min_rssi_index, 0x0); /* 0xc04[3:0] */
+ rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<min_rssi_index, 0x0); /* 0xd04[3:0] */
disabled_rf_cnt++;
}
- if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_1)
- {
+ if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_1) {
cck_default_Rx = max_rssi_index;
cck_optional_Rx = sec_rssi_index;
- if(tmp_max_rssi)
+ if (tmp_max_rssi)
update_cck_rx_path = 1;
}
}
- if(update_cck_rx_path)
- {
+ if (update_cck_rx_path) {
DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2)|(cck_optional_Rx);
rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000, DM_RxPathSelTable.cck_Rx_path);
}
- if(DM_RxPathSelTable.disabledRF)
- {
- for(i=0; i<4; i++)
- {
- if((DM_RxPathSelTable.disabledRF>>i) & 0x1) //disabled rf
- {
- if(tmp_max_rssi >= DM_RxPathSelTable.rf_enable_rssi_th[i])
- {
- //enable the BB Rx path
- //DbgPrint("RF-%d is enabled. \n", 0x1<<i);
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<i, 0x1); // 0xc04[3:0]
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<i, 0x1); // 0xd04[3:0]
+ if (DM_RxPathSelTable.disabledRF) {
+ for (i = 0; i < 4; i++) {
+ if ((DM_RxPathSelTable.disabledRF>>i) & 0x1) { /* disabled rf */
+ if (tmp_max_rssi >= DM_RxPathSelTable.rf_enable_rssi_th[i]) {
+ /* enable the BB Rx path */
+ /*DbgPrint("RF-%d is enabled.\n", 0x1<<i);*/
+ rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<i, 0x1); /* 0xc04[3:0] */
+ rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<i, 0x1); /* 0xd04[3:0] */
DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
disabled_rf_cnt--;
}
@@ -2950,14 +2661,14 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
* 05/28/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void dm_check_rx_path_selection(struct net_device *dev)
+static void dm_check_rx_path_selection(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- queue_delayed_work(priv->priv_wq,&priv->rfpath_check_wq,0);
-} /* dm_CheckRxRFPath */
+ queue_delayed_work(priv->priv_wq, &priv->rfpath_check_wq, 0);
+} /* dm_CheckRxRFPath */
-static void dm_init_fsync (struct net_device *dev)
+static void dm_init_fsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2966,20 +2677,20 @@ static void dm_init_fsync (struct net_device *dev)
priv->ieee80211->fsync_rssi_threshold = 30;
priv->ieee80211->bfsync_enable = false;
priv->ieee80211->fsync_multiple_timeinterval = 3;
- priv->ieee80211->fsync_firstdiff_ratethreshold= 100;
- priv->ieee80211->fsync_seconddiff_ratethreshold= 200;
+ priv->ieee80211->fsync_firstdiff_ratethreshold = 100;
+ priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
priv->ieee80211->fsync_state = Default_Fsync;
- priv->framesyncMonitor = 1; // current default 0xc38 monitor on
+ priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */
init_timer(&priv->fsync_timer);
priv->fsync_timer.data = (unsigned long)dev;
priv->fsync_timer.function = dm_fsync_timer_callback;
}
-
static void dm_deInit_fsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
+
del_timer_sync(&priv->fsync_timer);
}
@@ -2987,102 +2698,84 @@ void dm_fsync_timer_callback(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
- u32 rate_index, rate_count = 0, rate_count_diff=0;
+ u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
- if(priv->ieee80211->state == IEEE80211_LINKED &&
+ if (priv->ieee80211->state == IEEE80211_LINKED &&
priv->ieee80211->bfsync_enable &&
- (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC))
- {
- // Count rate 54, MCS [7], [12, 13, 14, 15]
+ (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) {
+ /* Count rate 54, MCS [7], [12, 13, 14, 15] */
u32 rate_bitmap;
- for(rate_index = 0; rate_index <= 27; rate_index++)
- {
+
+ for (rate_index = 0; rate_index <= 27; rate_index++) {
rate_bitmap = 1 << rate_index;
- if(priv->ieee80211->fsync_rate_bitmap & rate_bitmap)
- rate_count+= priv->stats.received_rate_histogram[1][rate_index];
+ if (priv->ieee80211->fsync_rate_bitmap & rate_bitmap)
+ rate_count += priv->stats.received_rate_histogram[1][rate_index];
}
- if(rate_count < priv->rate_record)
+ if (rate_count < priv->rate_record)
rate_count_diff = 0xffffffff - rate_count + priv->rate_record;
else
rate_count_diff = rate_count - priv->rate_record;
- if(rate_count_diff < priv->rateCountDiffRecord)
- {
-
+ if (rate_count_diff < priv->rateCountDiffRecord) {
u32 DiffNum = priv->rateCountDiffRecord - rate_count_diff;
- // Continue count
- if(DiffNum >= priv->ieee80211->fsync_seconddiff_ratethreshold)
+ /* Continue count */
+ if (DiffNum >= priv->ieee80211->fsync_seconddiff_ratethreshold)
priv->ContinueDiffCount++;
else
priv->ContinueDiffCount = 0;
- // Continue count over
- if(priv->ContinueDiffCount >=2)
- {
+ /* Continue count over */
+ if (priv->ContinueDiffCount >= 2) {
bSwitchFromCountDiff = true;
priv->ContinueDiffCount = 0;
}
- }
- else
- {
- // Stop the continued count
+ } else {
+ /* Stop the continued count */
priv->ContinueDiffCount = 0;
}
- //If Count diff <= FsyncRateCountThreshold
- if(rate_count_diff <= priv->ieee80211->fsync_firstdiff_ratethreshold)
- {
+ /* If Count diff <= FsyncRateCountThreshold */
+ if (rate_count_diff <= priv->ieee80211->fsync_firstdiff_ratethreshold) {
bSwitchFromCountDiff = true;
priv->ContinueDiffCount = 0;
}
priv->rate_record = rate_count;
priv->rateCountDiffRecord = rate_count_diff;
- RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync);
- // if we never receive those mcs rate and rssi > 30 % then switch fsyn
- if(priv->undecorated_smoothed_pwdb > priv->ieee80211->fsync_rssi_threshold && bSwitchFromCountDiff)
- {
+ RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff, priv->bswitch_fsync);
+ /* if we never receive those mcs rate and rssi > 30 % then switch fsyn */
+ if (priv->undecorated_smoothed_pwdb > priv->ieee80211->fsync_rssi_threshold && bSwitchFromCountDiff) {
bDoubleTimeInterval = true;
priv->bswitch_fsync = !priv->bswitch_fsync;
- if(priv->bswitch_fsync)
- {
+ if (priv->bswitch_fsync) {
write_nic_byte(dev, 0xC36, 0x1c);
write_nic_byte(dev, 0xC3e, 0x90);
- }
- else
- {
+ } else {
write_nic_byte(dev, 0xC36, 0x5c);
write_nic_byte(dev, 0xC3e, 0x96);
}
- }
- else if(priv->undecorated_smoothed_pwdb <= priv->ieee80211->fsync_rssi_threshold)
- {
- if(priv->bswitch_fsync)
- {
+ } else if (priv->undecorated_smoothed_pwdb <= priv->ieee80211->fsync_rssi_threshold) {
+ if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
write_nic_byte(dev, 0xC36, 0x5c);
write_nic_byte(dev, 0xC3e, 0x96);
}
}
- if(bDoubleTimeInterval){
- if(timer_pending(&priv->fsync_timer))
+ if (bDoubleTimeInterval) {
+ if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
add_timer(&priv->fsync_timer);
- }
- else{
- if(timer_pending(&priv->fsync_timer))
+ } else {
+ if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
add_timer(&priv->fsync_timer);
}
- }
- else
- {
- // Let Register return to default value;
- if(priv->bswitch_fsync)
- {
+ } else {
+ /* Let Register return to default value; */
+ if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
write_nic_byte(dev, 0xC36, 0x5c);
write_nic_byte(dev, 0xC3e, 0x96);
@@ -3091,7 +2784,7 @@ void dm_fsync_timer_callback(unsigned long data)
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
RT_TRACE(COMP_HALDM, "ContinueDiffCount %d\n", priv->ContinueDiffCount);
- RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync);
+ RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff, priv->bswitch_fsync);
}
static void dm_StartHWFsync(struct net_device *dev)
@@ -3108,9 +2801,8 @@ static void dm_EndSWFsync(struct net_device *dev)
RT_TRACE(COMP_HALDM, "%s\n", __func__);
del_timer_sync(&(priv->fsync_timer));
- // Let Register return to default value;
- if(priv->bswitch_fsync)
- {
+ /* Let Register return to default value; */
+ if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
write_nic_byte(dev, 0xC36, 0x5c);
@@ -3130,30 +2822,26 @@ static void dm_StartSWFsync(struct net_device *dev)
u32 rateBitmap;
RT_TRACE(COMP_HALDM, "%s\n", __func__);
- // Initial rate record to zero, start to record.
+ /* Initial rate record to zero, start to record. */
priv->rate_record = 0;
- // Initialize continue diff count to zero, start to record.
+ /* Initialize continue diff count to zero, start to record. */
priv->ContinueDiffCount = 0;
priv->rateCountDiffRecord = 0;
priv->bswitch_fsync = false;
- if(priv->ieee80211->mode == WIRELESS_MODE_N_24G)
- {
- priv->ieee80211->fsync_firstdiff_ratethreshold= 600;
+ if (priv->ieee80211->mode == WIRELESS_MODE_N_24G) {
+ priv->ieee80211->fsync_firstdiff_ratethreshold = 600;
priv->ieee80211->fsync_seconddiff_ratethreshold = 0xffff;
- }
- else
- {
- priv->ieee80211->fsync_firstdiff_ratethreshold= 200;
+ } else {
+ priv->ieee80211->fsync_firstdiff_ratethreshold = 200;
priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
}
- for(rateIndex = 0; rateIndex <= 27; rateIndex++)
- {
- rateBitmap = 1 << rateIndex;
- if(priv->ieee80211->fsync_rate_bitmap & rateBitmap)
+ for (rateIndex = 0; rateIndex <= 27; rateIndex++) {
+ rateBitmap = 1 << rateIndex;
+ if (priv->ieee80211->fsync_rate_bitmap & rateBitmap)
priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex];
}
- if(timer_pending(&priv->fsync_timer))
+ if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
add_timer(&priv->fsync_timer);
@@ -3173,139 +2861,112 @@ static void dm_EndHWFsync(struct net_device *dev)
void dm_check_fsync(struct net_device *dev)
{
#define RegC38_Default 0
-#define RegC38_NonFsync_Other_AP 1
-#define RegC38_Fsync_AP_BCM 2
+#define RegC38_NonFsync_Other_AP 1
+#define RegC38_Fsync_AP_BCM 2
struct r8192_priv *priv = ieee80211_priv(dev);
- //u32 framesyncC34;
- static u8 reg_c38_State=RegC38_Default;
+ /*u32 framesyncC34;*/
+ static u8 reg_c38_State = RegC38_Default;
static u32 reset_cnt;
RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval %d\n", priv->ieee80211->fsync_rssi_threshold, priv->ieee80211->fsync_time_interval, priv->ieee80211->fsync_multiple_timeinterval);
RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n", priv->ieee80211->fsync_rate_bitmap, priv->ieee80211->fsync_firstdiff_ratethreshold, priv->ieee80211->fsync_seconddiff_ratethreshold);
- if(priv->ieee80211->state == IEEE80211_LINKED &&
- (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC))
- {
- if(priv->ieee80211->bfsync_enable == 0)
- {
- switch (priv->ieee80211->fsync_state)
- {
- case Default_Fsync:
- dm_StartHWFsync(dev);
- priv->ieee80211->fsync_state = HW_Fsync;
- break;
- case SW_Fsync:
- dm_EndSWFsync(dev);
- dm_StartHWFsync(dev);
- priv->ieee80211->fsync_state = HW_Fsync;
- break;
- case HW_Fsync:
- default:
- break;
+ if (priv->ieee80211->state == IEEE80211_LINKED &&
+ (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) {
+ if (priv->ieee80211->bfsync_enable == 0) {
+ switch (priv->ieee80211->fsync_state) {
+ case Default_Fsync:
+ dm_StartHWFsync(dev);
+ priv->ieee80211->fsync_state = HW_Fsync;
+ break;
+ case SW_Fsync:
+ dm_EndSWFsync(dev);
+ dm_StartHWFsync(dev);
+ priv->ieee80211->fsync_state = HW_Fsync;
+ break;
+ case HW_Fsync:
+ default:
+ break;
}
- }
- else
- {
- switch (priv->ieee80211->fsync_state)
- {
- case Default_Fsync:
- dm_StartSWFsync(dev);
- priv->ieee80211->fsync_state = SW_Fsync;
- break;
- case HW_Fsync:
- dm_EndHWFsync(dev);
- dm_StartSWFsync(dev);
- priv->ieee80211->fsync_state = SW_Fsync;
- break;
- case SW_Fsync:
- default:
- break;
-
+ } else {
+ switch (priv->ieee80211->fsync_state) {
+ case Default_Fsync:
+ dm_StartSWFsync(dev);
+ priv->ieee80211->fsync_state = SW_Fsync;
+ break;
+ case HW_Fsync:
+ dm_EndHWFsync(dev);
+ dm_StartSWFsync(dev);
+ priv->ieee80211->fsync_state = SW_Fsync;
+ break;
+ case SW_Fsync:
+ default:
+ break;
}
}
- if(priv->framesyncMonitor)
- {
- if(reg_c38_State != RegC38_Fsync_AP_BCM)
- { //For broadcom AP we write different default value
+ if (priv->framesyncMonitor) {
+ if (reg_c38_State != RegC38_Fsync_AP_BCM) {
+ /* For broadcom AP we write different default value */
write_nic_byte(dev, rOFDM0_RxDetector3, 0x95);
reg_c38_State = RegC38_Fsync_AP_BCM;
}
}
- }
- else
- {
- switch (priv->ieee80211->fsync_state)
- {
- case HW_Fsync:
- dm_EndHWFsync(dev);
- priv->ieee80211->fsync_state = Default_Fsync;
- break;
- case SW_Fsync:
- dm_EndSWFsync(dev);
- priv->ieee80211->fsync_state = Default_Fsync;
- break;
- case Default_Fsync:
- default:
- break;
+ } else {
+ switch (priv->ieee80211->fsync_state) {
+ case HW_Fsync:
+ dm_EndHWFsync(dev);
+ priv->ieee80211->fsync_state = Default_Fsync;
+ break;
+ case SW_Fsync:
+ dm_EndSWFsync(dev);
+ priv->ieee80211->fsync_state = Default_Fsync;
+ break;
+ case Default_Fsync:
+ default:
+ break;
}
- if(priv->framesyncMonitor)
- {
- if(priv->ieee80211->state == IEEE80211_LINKED)
- {
- if(priv->undecorated_smoothed_pwdb <= RegC38_TH)
- {
- if(reg_c38_State != RegC38_NonFsync_Other_AP)
- {
+ if (priv->framesyncMonitor) {
+ if (priv->ieee80211->state == IEEE80211_LINKED) {
+ if (priv->undecorated_smoothed_pwdb <= RegC38_TH) {
+ if (reg_c38_State != RegC38_NonFsync_Other_AP) {
write_nic_byte(dev, rOFDM0_RxDetector3, 0x90);
reg_c38_State = RegC38_NonFsync_Other_AP;
}
- }
- else if(priv->undecorated_smoothed_pwdb >= (RegC38_TH+5))
- {
- if(reg_c38_State)
- {
+ } else if (priv->undecorated_smoothed_pwdb >= (RegC38_TH+5)) {
+ if (reg_c38_State) {
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
- //DbgPrint("Fsync is idle, rssi>=40, write 0xc38 = 0x%x \n", pHalData->framesync);
+ /*DbgPrint("Fsync is idle, rssi>=40, write 0xc38 = 0x%x\n", pHalData->framesync);*/
}
}
- }
- else
- {
- if(reg_c38_State)
- {
+ } else {
+ if (reg_c38_State) {
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
- //DbgPrint("Fsync is idle, not connected, write 0xc38 = 0x%x \n", pHalData->framesync);
+ /*DbgPrint("Fsync is idle, not connected, write 0xc38 = 0x%x\n", pHalData->framesync);*/
}
}
}
}
- if(priv->framesyncMonitor)
- {
- if(priv->reset_count != reset_cnt)
- { //After silent reset, the reg_c38_State will be returned to default value
+ if (priv->framesyncMonitor) {
+ if (priv->reset_count != reset_cnt) { /* After silent reset, the reg_c38_State will be returned to default value */
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
reset_cnt = priv->reset_count;
- //DbgPrint("reg_c38_State = 0 for silent reset. \n");
+ /*DbgPrint("reg_c38_State = 0 for silent reset.\n");*/
}
- }
- else
- {
- if(reg_c38_State)
- {
+ } else {
+ if (reg_c38_State) {
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
- //DbgPrint("framesync no monitor, write 0xc38 = 0x%x \n", pHalData->framesync);
+ /*DbgPrint("framesync no monitor, write 0xc38 = 0x%x\n", pHalData->framesync);*/
}
}
}
-
/*-----------------------------------------------------------------------------
* Function: dm_shadow_init()
*
@@ -3328,10 +2989,9 @@ void dm_shadow_init(struct net_device *dev)
u16 offset;
for (page = 0; page < 5; page++)
- for (offset = 0; offset < 256; offset++)
- {
+ for (offset = 0; offset < 256; offset++) {
read_nic_byte(dev, offset+page*256, &dm_shadow[page][offset]);
- //DbgPrint("P-%d/O-%02x=%02x\r\n", page, offset, DM_Shadow[page][offset]);
+ /*DbgPrint("P-%d/O-%02x=%02x\r\n", page, offset, DM_Shadow[page][offset]);*/
}
for (page = 8; page < 11; page++)
@@ -3366,8 +3026,8 @@ static void dm_init_dynamic_txpower(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code.
- priv->ieee80211->bdynamic_txpower_enable = true; //Default to enable Tx Power Control
+ /* Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code. */
+ priv->ieee80211->bdynamic_txpower_enable = true; /* Default to enable Tx Power Control */
priv->bLastDTPFlag_High = false;
priv->bLastDTPFlag_Low = false;
priv->bDynamicTxHighPower = false;
@@ -3377,91 +3037,77 @@ static void dm_init_dynamic_txpower(struct net_device *dev)
static void dm_dynamic_txpower(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- unsigned int txhipower_threshhold=0;
- unsigned int txlowpower_threshold=0;
- if(priv->ieee80211->bdynamic_txpower_enable != true)
- {
+ unsigned int txhipower_threshhold = 0;
+ unsigned int txlowpower_threshold = 0;
+
+ if (priv->ieee80211->bdynamic_txpower_enable != true) {
priv->bDynamicTxHighPower = false;
priv->bDynamicTxLowPower = false;
return;
}
- //printk("priv->ieee80211->current_network.unknown_cap_exist is %d ,priv->ieee80211->current_network.broadcom_cap_exist is %d\n",priv->ieee80211->current_network.unknown_cap_exist,priv->ieee80211->current_network.broadcom_cap_exist);
- if((priv->ieee80211->current_network.atheros_cap_exist) && (priv->ieee80211->mode == IEEE_G)){
+ /*printk("priv->ieee80211->current_network.unknown_cap_exist is %d , priv->ieee80211->current_network.broadcom_cap_exist is %d\n", priv->ieee80211->current_network.unknown_cap_exist, priv->ieee80211->current_network.broadcom_cap_exist);*/
+ if ((priv->ieee80211->current_network.atheros_cap_exist) && (priv->ieee80211->mode == IEEE_G)) {
txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH;
txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
- }
- else
- {
+ } else {
txhipower_threshhold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
}
-// printk("=======>%s(): txhipower_threshhold is %d,txlowpower_threshold is %d\n",__func__,txhipower_threshhold,txlowpower_threshold);
- RT_TRACE(COMP_TXAGC,"priv->undecorated_smoothed_pwdb = %ld \n" , priv->undecorated_smoothed_pwdb);
+ /*printk("=======>%s(): txhipower_threshhold is %d, txlowpower_threshold is %d\n", __func__, txhipower_threshhold, txlowpower_threshold);*/
+ RT_TRACE(COMP_TXAGC, "priv->undecorated_smoothed_pwdb = %ld\n", priv->undecorated_smoothed_pwdb);
- if(priv->ieee80211->state == IEEE80211_LINKED)
- {
- if(priv->undecorated_smoothed_pwdb >= txhipower_threshhold)
- {
+ if (priv->ieee80211->state == IEEE80211_LINKED) {
+ if (priv->undecorated_smoothed_pwdb >= txhipower_threshhold) {
priv->bDynamicTxHighPower = true;
priv->bDynamicTxLowPower = false;
- }
- else
- {
- // high power state check
- if(priv->undecorated_smoothed_pwdb < txlowpower_threshold && priv->bDynamicTxHighPower == true)
- {
+ } else {
+ /* high power state check */
+ if (priv->undecorated_smoothed_pwdb < txlowpower_threshold && priv->bDynamicTxHighPower == true)
priv->bDynamicTxHighPower = false;
- }
- // low power state check
- if(priv->undecorated_smoothed_pwdb < 35)
- {
+
+ /* low power state check */
+ if (priv->undecorated_smoothed_pwdb < 35)
priv->bDynamicTxLowPower = true;
- }
- else if(priv->undecorated_smoothed_pwdb >= 40)
- {
+ else if (priv->undecorated_smoothed_pwdb >= 40)
priv->bDynamicTxLowPower = false;
- }
}
- }
- else
- {
- //pHalData->bTXPowerCtrlforNearFarRange = !pHalData->bTXPowerCtrlforNearFarRange;
+ } else {
+ /*pHalData->bTXPowerCtrlforNearFarRange = !pHalData->bTXPowerCtrlforNearFarRange;*/
priv->bDynamicTxHighPower = false;
priv->bDynamicTxLowPower = false;
}
- if((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) ||
- (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low))
- {
- RT_TRACE(COMP_TXAGC,"SetTxPowerLevel8190() channel = %d \n" , priv->ieee80211->current_network.channel);
+ if ((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) ||
+ (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low)) {
+ RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190() channel = %d\n", priv->ieee80211->current_network.channel);
#if defined(RTL8190P) || defined(RTL8192E)
- SetTxPowerLevel8190(Adapter,pHalData->CurrentChannel);
+ SetTxPowerLevel8190(Adapter, pHalData->CurrentChannel);
#endif
- rtl8192_phy_setTxPower(dev,priv->ieee80211->current_network.channel);
- //pHalData->bStartTxCtrlByTPCNFR = FALSE; //Clear th flag of Set TX Power from Sitesurvey
+ rtl8192_phy_setTxPower(dev, priv->ieee80211->current_network.channel);
+ /*pHalData->bStartTxCtrlByTPCNFR = FALSE; Clear th flag of Set TX Power from Sitesurvey*/
}
priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
} /* dm_dynamic_txpower */
-//added by vivi, for read tx rate and retrycount
+/* added by vivi, for read tx rate and retrycount */
static void dm_check_txrateandretrycount(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
- //for 11n tx rate
-// priv->stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg);
+ /* for 11n tx rate */
+ /*priv->stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg);*/
read_nic_byte(dev, Current_Tx_Rate_Reg, &ieee->softmac_stats.CurrentShowTxate);
- //printk("=============>tx_rate_reg:%x\n", ieee->softmac_stats.CurrentShowTxate);
- //for initial tx rate
-// priv->stats.last_packet_rate = read_nic_byte(dev, Initial_Tx_Rate_Reg);
+ /*printk("=============>tx_rate_reg:%x\n", ieee->softmac_stats.CurrentShowTxate);*/
+ /* for initial tx rate */
+ /*priv->stats.last_packet_rate = read_nic_byte(dev, Initial_Tx_Rate_Reg);*/
read_nic_byte(dev, Initial_Tx_Rate_Reg, &ieee->softmac_stats.last_packet_rate);
- //for tx tx retry count
-// priv->stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg);
+ /* for tx tx retry count */
+ /*priv->stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg);*/
read_nic_dword(dev, Tx_Retry_Count_Reg, &ieee->softmac_stats.txretrycount);
}
@@ -3469,11 +3115,12 @@ static void dm_send_rssi_tofw(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- // If we test chariot, we should stop the TX command ?
- // Because 92E will always silent reset when we send tx command. We use register
- // 0x1e0(byte) to notify driver.
+ /*
+ * If we test chariot, we should stop the TX command ?
+ * Because 92E will always silent reset when we send tx command. We use register
+ * 0x1e0(byte) to notify driver.
+ */
write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
- return;
}
/*---------------------------Define function prototype------------------------*/
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 3d0a98b6d8e5..e62543d22b86 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -129,8 +129,8 @@ struct dvobj_priv {
struct _adapter *padapter;
u32 nr_endpoint;
u8 ishighspeed;
- uint(*inirp_init)(struct _adapter *adapter);
- uint(*inirp_deinit)(struct _adapter *adapter);
+ uint (*inirp_init)(struct _adapter *adapter);
+ uint (*inirp_deinit)(struct _adapter *adapter);
struct usb_device *pusbdev;
};
@@ -166,7 +166,7 @@ struct _adapter {
pid_t evtThread;
struct task_struct *xmitThread;
pid_t recvThread;
- uint(*dvobj_init)(struct _adapter *adapter);
+ uint (*dvobj_init)(struct _adapter *adapter);
void (*dvobj_deinit)(struct _adapter *adapter);
struct net_device *pnetdev;
int bup;
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 5153ad9c2c75..36348d900d34 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -71,7 +71,7 @@ static inline void _init_timer(struct timer_list *ptimer,
static inline void _set_timer(struct timer_list *ptimer, u32 delay_time)
{
- mod_timer(ptimer, (jiffies+(delay_time*HZ/1000)));
+ mod_timer(ptimer, (jiffies+msecs_to_jiffies(delay_time)));
}
static inline void _cancel_timer(struct timer_list *ptimer, u8 *bcancelled)
@@ -101,12 +101,9 @@ static inline void sleep_schedulable(int ms)
{
u32 delta;
- delta = (ms * HZ) / 1000;/*(ms)*/
- if (delta == 0)
- delta = 1;/* 1 ms */
+ delta = msecs_to_jiffies(ms);/*(ms)*/
set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(delta) != 0)
- return;
+ schedule_timeout(delta);
}
static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 0631f3638257..409c8c897256 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -137,20 +137,6 @@ _recv_indicatepkt_drop:
precvpriv->rx_drop++;
}
-void r8712_os_read_port(struct _adapter *padapter, struct recv_buf *precvbuf)
-{
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- precvbuf->ref_cnt--;
- /*free skb in recv_buf*/
- dev_kfree_skb_any(precvbuf->pskb);
- precvbuf->pskb = NULL;
- precvbuf->reuse = false;
- if (!precvbuf->irp_pending)
- r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
- (unsigned char *)precvbuf);
-}
-
static void _r8712_reordering_ctrl_timeout_handler (void *FunctionContext)
{
struct recv_reorder_ctrl *preorder_ctrl =
diff --git a/drivers/staging/rtl8712/recv_osdep.h b/drivers/staging/rtl8712/recv_osdep.h
index f4384ef00868..1f4986e940a3 100644
--- a/drivers/staging/rtl8712/recv_osdep.h
+++ b/drivers/staging/rtl8712/recv_osdep.h
@@ -46,7 +46,6 @@ int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter,
struct recv_buf *precvbuf);
int r8712_os_recvbuf_resource_free(struct _adapter *padapter,
struct recv_buf *precvbuf);
-void r8712_os_read_port(struct _adapter *padapter, struct recv_buf *precvbuf);
void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
#endif
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 039ab3e97172..67e9e910aef9 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -109,16 +109,16 @@ enum rtl8712_h2c_cmd {
GEN_CMD_CODE(_DisconnectCtrlEx), /*61*/
/* To do, modify these h2c cmd, add or delete */
- GEN_CMD_CODE(_GetH2cLbk) ,
+ GEN_CMD_CODE(_GetH2cLbk),
/* WPS extra IE */
- GEN_CMD_CODE(_SetProbeReqExtraIE) ,
- GEN_CMD_CODE(_SetAssocReqExtraIE) ,
- GEN_CMD_CODE(_SetProbeRspExtraIE) ,
- GEN_CMD_CODE(_SetAssocRspExtraIE) ,
+ GEN_CMD_CODE(_SetProbeReqExtraIE),
+ GEN_CMD_CODE(_SetAssocReqExtraIE),
+ GEN_CMD_CODE(_SetProbeRspExtraIE),
+ GEN_CMD_CODE(_SetAssocRspExtraIE),
/* the following is driver will do */
- GEN_CMD_CODE(_GetCurDataRate) ,
+ GEN_CMD_CODE(_GetCurDataRate),
GEN_CMD_CODE(_GetTxRetrycnt), /* to record times that Tx retry to
* transmit packet after association
diff --git a/drivers/staging/rtl8712/rtl8712_event.h b/drivers/staging/rtl8712/rtl8712_event.h
index 3d7f79efa2c1..29a4c23a0d23 100644
--- a/drivers/staging/rtl8712/rtl8712_event.h
+++ b/drivers/staging/rtl8712/rtl8712_event.h
@@ -27,7 +27,7 @@
#define _RTL8712_EVENT_H_
void r8712_event_handle(struct _adapter *padapter, uint *peventbuf);
-void r8712_got_addbareq_event_callback(struct _adapter *adapter , u8 *pbuf);
+void r8712_got_addbareq_event_callback(struct _adapter *adapter, u8 *pbuf);
enum rtl8712_c2h_event {
GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 73b7d864ccbd..9bb364f04fd4 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -196,7 +196,7 @@ static inline char *translate_scan(struct _adapter *padapter,
if (p && ht_ielen > 0) {
ht_cap = true;
pht_capie = (struct ieee80211_ht_cap *)(p + 2);
- memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2);
+ memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
@@ -1436,7 +1436,7 @@ static int r8711_wx_get_rate(struct net_device *dev,
if (p && ht_ielen > 0) {
ht_cap = true;
pht_capie = (struct ieee80211_ht_cap *)(p + 2);
- memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2);
+ memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
bw_40MHz = (pht_capie->cap_info &
IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
short_GI = (pht_capie->cap_info &
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index b7462e8145d6..977a83358056 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -93,7 +93,7 @@ struct wlan_network *_r8712_alloc_network(struct mlme_priv *pmlmepriv)
return NULL;
spin_lock_irqsave(&free_queue->lock, irqL);
plist = free_queue->queue.next;
- pnetwork = LIST_CONTAINOR(plist , struct wlan_network, list);
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
list_del_init(&pnetwork->list);
pnetwork->last_scanned = jiffies;
pmlmepriv->num_of_scanned++;
@@ -499,7 +499,7 @@ static int is_desired_network(struct _adapter *adapter,
}
/* TODO: Perry : For Power Management */
-void r8712_atimdone_event_callback(struct _adapter *adapter , u8 *pbuf)
+void r8712_atimdone_event_callback(struct _adapter *adapter, u8 *pbuf)
{
}
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index a16f15e91992..0b5461208eb9 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -575,26 +575,6 @@ uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
- struct ndis_802_11_ssid *pssid;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- *poid_par_priv->bytes_needed = (u32)sizeof(struct ndis_802_11_ssid);
- *poid_par_priv->bytes_rw = 0;
- if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return RNDIS_STATUS_INVALID_LENGTH;
- pssid = (struct ndis_802_11_ssid *)poid_par_priv->information_buf;
- if (mp_start_joinbss(Adapter, pssid) == _FAIL)
- status = RNDIS_STATUS_NOT_ACCEPTED;
- *poid_par_priv->bytes_rw = sizeof(struct ndis_802_11_ssid);
- return status;
-}
-
uint oid_rt_pro_read_register_hdl(struct oid_par_priv
*poid_par_priv)
{
@@ -696,172 +676,6 @@ uint oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv)
return status;
}
-uint oid_rt_pro_burst_read_register_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- struct burst_rw_reg *pBstRwReg;
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- pBstRwReg = (struct burst_rw_reg *)poid_par_priv->information_buf;
- r8712_read_mem(Adapter, pBstRwReg->offset, (u32)pBstRwReg->len,
- pBstRwReg->Data);
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_burst_write_register_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- struct burst_rw_reg *pBstRwReg;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- pBstRwReg = (struct burst_rw_reg *)poid_par_priv->information_buf;
- r8712_write_mem(Adapter, pBstRwReg->offset, (u32)pBstRwReg->len,
- pBstRwReg->Data);
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv)
-{
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- struct eeprom_rw_param *pEEPROM;
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- pEEPROM = (struct eeprom_rw_param *)poid_par_priv->information_buf;
- pEEPROM->value = r8712_eeprom_read16(Adapter,
- (u16)(pEEPROM->offset >> 1));
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- struct eeprom_rw_param *pEEPROM;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- pEEPROM = (struct eeprom_rw_param *)poid_par_priv->information_buf;
- r8712_eeprom_write16(Adapter, (u16)(pEEPROM->offset >> 1),
- pEEPROM->value);
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- struct mp_wiparam *pwi_param;
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- if (poid_par_priv->information_buf_len < sizeof(struct mp_wiparam))
- return RNDIS_STATUS_INVALID_LENGTH;
- if (Adapter->mppriv.workparam.bcompleted == false)
- return RNDIS_STATUS_NOT_ACCEPTED;
- pwi_param = (struct mp_wiparam *)poid_par_priv->information_buf;
- memcpy(pwi_param, &Adapter->mppriv.workparam,
- sizeof(struct mp_wiparam));
- Adapter->mppriv.act_in_progress = false;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- if (poid_par_priv->information_buf_len < sizeof(uint) * 2)
- return RNDIS_STATUS_INVALID_LENGTH;
- if (*(uint *)poid_par_priv->information_buf == 1)
- Adapter->mppriv.rx_pktloss = 0;
- *((uint *)poid_par_priv->information_buf+1) =
- Adapter->mppriv.rx_pktloss;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv)
-{
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_wr_attrib_mem_hdl(struct oid_par_priv *poid_par_priv)
-{
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- if (r8712_setrfintfs_cmd(Adapter, *(unsigned char *)
- poid_par_priv->information_buf) == _FAIL)
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
-}
-
-uint oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- memcpy(poid_par_priv->information_buf,
- (unsigned char *)&Adapter->mppriv.rxstat,
- sizeof(struct recv_stat));
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- if (r8712_setdatarate_cmd(Adapter,
- poid_par_priv->information_buf) != _SUCCESS)
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
-}
-
uint oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
@@ -890,251 +704,6 @@ uint oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- if (poid_par_priv->information_buf_len < sizeof(u8))
- return RNDIS_STATUS_INVALID_LENGTH;
- if (!r8712_setptm_cmd(Adapter, *((u8 *)poid_par_priv->information_buf)))
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
-}
-
-uint oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
- uint status = RNDIS_STATUS_SUCCESS;
- u32 ratevalue;
- u8 datarates[NumRates];
- int i;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- ratevalue = *((u32 *)poid_par_priv->information_buf);
- for (i = 0; i < NumRates; i++) {
- if (ratevalue == mpdatarate[i])
- datarates[i] = mpdatarate[i];
- else
- datarates[i] = 0xff;
- }
- if (r8712_setbasicrate_cmd(Adapter, datarates) != _SUCCESS)
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
-}
-
-uint oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- if (poid_par_priv->information_buf_len < 8)
- return RNDIS_STATUS_INVALID_LENGTH;
- *poid_par_priv->bytes_rw = 8;
- memcpy(poid_par_priv->information_buf,
- &(Adapter->pwrctrlpriv.pwr_mode), 8);
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- uint pwr_mode, smart_ps;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- *poid_par_priv->bytes_rw = 0;
- *poid_par_priv->bytes_needed = 8;
- if (poid_par_priv->information_buf_len < 8)
- return RNDIS_STATUS_INVALID_LENGTH;
- pwr_mode = *(uint *)(poid_par_priv->information_buf);
- smart_ps = *(uint *)((addr_t)poid_par_priv->information_buf + 4);
- if (pwr_mode != Adapter->pwrctrlpriv.pwr_mode || smart_ps !=
- Adapter->pwrctrlpriv.smart_ps)
- r8712_set_ps_mode(Adapter, pwr_mode, smart_ps);
- *poid_par_priv->bytes_rw = 8;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
- struct setratable_parm *prate_table;
- u8 res;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- *poid_par_priv->bytes_needed = sizeof(struct setratable_parm);
- if (poid_par_priv->information_buf_len <
- sizeof(struct setratable_parm))
- return RNDIS_STATUS_INVALID_LENGTH;
- prate_table = (struct setratable_parm *)poid_par_priv->information_buf;
- res = r8712_setrttbl_cmd(Adapter, prate_table);
- if (res == _FAIL)
- status = RNDIS_STATUS_FAILURE;
- return status;
-}
-
-uint oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
- enum ENCRY_CTRL_STATE encry_mode = 0;
-
- *poid_par_priv->bytes_needed = sizeof(u8);
- if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return RNDIS_STATUS_INVALID_LENGTH;
-
- if (poid_par_priv->type_of_oid == SET_OID) {
- encry_mode = *((u8 *)poid_par_priv->information_buf);
- switch (encry_mode) {
- case HW_CONTROL:
- psecuritypriv->sw_decrypt = false;
- psecuritypriv->sw_encrypt = false;
- break;
- case SW_CONTROL:
- psecuritypriv->sw_decrypt = true;
- psecuritypriv->sw_encrypt = true;
- break;
- case HW_ENCRY_SW_DECRY:
- psecuritypriv->sw_decrypt = true;
- psecuritypriv->sw_encrypt = false;
- break;
- case SW_ENCRY_HW_DECRY:
- psecuritypriv->sw_decrypt = false;
- psecuritypriv->sw_encrypt = true;
- break;
- }
- } else {
- if ((psecuritypriv->sw_encrypt == false) &&
- (psecuritypriv->sw_decrypt == false))
- encry_mode = HW_CONTROL;
- else if ((psecuritypriv->sw_encrypt == false) &&
- (psecuritypriv->sw_decrypt == true))
- encry_mode = HW_ENCRY_SW_DECRY;
- else if ((psecuritypriv->sw_encrypt == true) &&
- (psecuritypriv->sw_decrypt == false))
- encry_mode = SW_ENCRY_HW_DECRY;
- else if ((psecuritypriv->sw_encrypt == true) &&
- (psecuritypriv->sw_decrypt == true))
- encry_mode = SW_CONTROL;
- *(u8 *)poid_par_priv->information_buf = encry_mode;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- }
- return RNDIS_STATUS_SUCCESS;
-}
-/*----------------------------------------------------------------------*/
-uint oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
-
- uint status = RNDIS_STATUS_SUCCESS;
-
- struct sta_info *psta = NULL;
- u8 *macaddr;
-
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
-
- *poid_par_priv->bytes_needed = ETH_ALEN;
- if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return RNDIS_STATUS_INVALID_LENGTH;
- macaddr = (u8 *) poid_par_priv->information_buf;
- psta = r8712_get_stainfo(&Adapter->stapriv, macaddr);
- if (psta == NULL) { /* the sta in sta_info_queue => do nothing*/
- psta = r8712_alloc_stainfo(&Adapter->stapriv, macaddr);
- if (psta == NULL)
- status = RNDIS_STATUS_FAILURE;
- }
- return status;
-}
-/*-------------------------------------------------------------------------*/
-uint oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
-
- unsigned long irqL;
-
- struct sta_info *psta = NULL;
- u8 *macaddr;
-
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
-
- *poid_par_priv->bytes_needed = ETH_ALEN;
- if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return RNDIS_STATUS_INVALID_LENGTH;
-
- macaddr = (u8 *)poid_par_priv->information_buf;
-
- psta = r8712_get_stainfo(&Adapter->stapriv, macaddr);
- if (psta != NULL) {
- spin_lock_irqsave(&(Adapter->stapriv.sta_hash_lock), irqL);
- r8712_free_stainfo(Adapter, psta);
- spin_unlock_irqrestore(&(Adapter->stapriv.sta_hash_lock), irqL);
- }
-
- return RNDIS_STATUS_SUCCESS;
-}
-/*--------------------------------------------------------------------------*/
-static u32 mp_query_drv_var(struct _adapter *padapter, u8 offset, u32 var)
-{
- return var;
-}
-
-uint oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
-
- struct DR_VARIABLE_STRUCT *pdrv_var;
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- *poid_par_priv->bytes_needed = sizeof(struct DR_VARIABLE_STRUCT);
- if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return RNDIS_STATUS_INVALID_LENGTH;
- pdrv_var = (struct DR_VARIABLE_STRUCT *)poid_par_priv->information_buf;
- pdrv_var->variable = mp_query_drv_var(Adapter, pdrv_var->offset,
- pdrv_var->variable);
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return RNDIS_STATUS_SUCCESS;
-}
-
-/*--------------------------------------------------------------------------*/
-uint oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
-{
- return RNDIS_STATUS_SUCCESS;
-}
-/*------------------------------------------------------------------------*/
uint oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
@@ -1192,38 +761,6 @@ uint oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
return status;
}
/*----------------------------------------------------------------------*/
-uint oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
- struct PGPKT_STRUCT *ppgpkt;
-
- *poid_par_priv->bytes_rw = 0;
- if (poid_par_priv->information_buf_len < sizeof(struct PGPKT_STRUCT))
- return RNDIS_STATUS_INVALID_LENGTH;
- ppgpkt = (struct PGPKT_STRUCT *)poid_par_priv->information_buf;
- if (poid_par_priv->type_of_oid == QUERY_OID) {
- if (r8712_efuse_pg_packet_read(Adapter, ppgpkt->offset,
- ppgpkt->data) == true)
- *poid_par_priv->bytes_rw =
- poid_par_priv->information_buf_len;
- else
- status = RNDIS_STATUS_FAILURE;
- } else {
- if (r8712_efuse_reg_init(Adapter) == true) {
- if (r8712_efuse_pg_packet_write(Adapter, ppgpkt->offset,
- ppgpkt->word_en, ppgpkt->data) == true)
- *poid_par_priv->bytes_rw =
- poid_par_priv->information_buf_len;
- else
- status = RNDIS_STATUS_FAILURE;
- r8712_efuse_reg_uninit(Adapter);
- } else
- status = RNDIS_STATUS_FAILURE;
- }
- return status;
-}
uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv
*poid_par_priv)
@@ -1319,24 +856,6 @@ uint oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- u32 crystal_cap = 0;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- if (poid_par_priv->information_buf_len < sizeof(u32))
- return RNDIS_STATUS_INVALID_LENGTH;
- crystal_cap = *((u32 *)poid_par_priv->information_buf);/*4*/
- if (crystal_cap > 0xf)
- return RNDIS_STATUS_NOT_ACCEPTED;
- Adapter->mppriv.curr_crystalcap = crystal_cap;
- r8712_SetCrystalCap(Adapter);
- return RNDIS_STATUS_SUCCESS;
-}
-
uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv
*poid_par_priv)
{
@@ -1378,50 +897,6 @@ uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- u32 txagc;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
- if (poid_par_priv->information_buf_len < sizeof(u32))
- return RNDIS_STATUS_INVALID_LENGTH;
- txagc = *(u32 *)poid_par_priv->information_buf;
- r8712_SetTxAGCOffset(Adapter, txagc);
- return RNDIS_STATUS_SUCCESS;
-}
-
-uint oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv
- *poid_par_priv)
-{
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
- struct mp_priv *pmppriv = &Adapter->mppriv;
- u32 type;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return RNDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(u32))
- return RNDIS_STATUS_INVALID_LENGTH;
-
- type = *(u32 *)poid_par_priv->information_buf;
-
- if (_LOOPBOOK_MODE_ == type) {
- pmppriv->mode = type;
- set_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE); /*append txdesc*/
- } else if (_2MAC_MODE_ == type) {
- pmppriv->mode = type;
- _clr_fwstate_(pmlmepriv, WIFI_MP_LPBK_STATE);
- } else
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
-}
/*--------------------------------------------------------------------------*/
/*Linux*/
unsigned int mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv)
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 850143d5dee3..8e7c7f8b69f9 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -86,41 +86,8 @@ struct DR_VARIABLE_STRUCT {
int mp_start_joinbss(struct _adapter *padapter, struct ndis_802_11_ssid *pssid);
/* oid_rtl_seg_87_11_00 */
-uint oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_burst_read_register_hdl(struct oid_par_priv*
- poid_par_priv);
-uint oid_rt_pro_burst_write_register_hdl(struct oid_par_priv*
- poid_par_priv);
-uint oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_wr_attrib_mem_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv);
-/* oid_rtl_seg_87_11_20 */
-uint oid_rt_pro_cfg_debug_message_hdl(
- struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_set_data_rate_ex_hdl(
- struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_set_basic_rate_hdl(
- struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_set_power_tracking_hdl(
- struct oid_par_priv *poid_par_priv);
-/* oid_rtl_seg_87_11_50 */
-uint oid_rt_pro_qry_pwrstate_hdl(
- struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_set_pwrstate_hdl(
- struct oid_par_priv *poid_par_priv);
-/* oid_rtl_seg_87_11_F0 */
-uint oid_rt_pro_h2c_set_rate_table_hdl(
- struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_h2c_get_rate_table_hdl(
- struct oid_par_priv *poid_par_priv);
/* oid_rtl_seg_81_80_00 */
uint oid_rt_pro_set_data_rate_hdl(
struct oid_par_priv *poid_par_priv);
@@ -159,28 +126,15 @@ uint oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv);
/* oid_rtl_seg_81_85 */
uint oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
-/* oid_rtl_seg_87_12_00 */
-uint oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_query_dr_variable_hdl(
- struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_get_efuse_current_size_hdl(
struct oid_par_priv *poid_par_priv);
uint oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv);
uint oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_set_tx_agc_offset_hdl(
- struct oid_par_priv *poid_par_priv);
-uint oid_rt_pro_set_pkt_test_mode_hdl(
- struct oid_par_priv *poid_par_priv);
uint oid_rt_get_thermal_meter_hdl(
struct oid_par_priv *poid_par_priv);
uint oid_rt_reset_phy_rx_packet_count_hdl(
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index 0526ba077bfc..dbfb55523545 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -33,17 +33,17 @@
#define CMD_ALIVE BIT(2)
enum Power_Mgnt {
- PS_MODE_ACTIVE = 0 ,
- PS_MODE_MIN ,
- PS_MODE_MAX ,
- PS_MODE_DTIM ,
- PS_MODE_VOIP ,
- PS_MODE_UAPSD_WMM ,
- PS_MODE_UAPSD ,
- PS_MODE_IBSS ,
- PS_MODE_WWLAN ,
- PM_Radio_Off ,
- PM_Card_Disable ,
+ PS_MODE_ACTIVE = 0,
+ PS_MODE_MIN,
+ PS_MODE_MAX,
+ PS_MODE_DTIM,
+ PS_MODE_VOIP,
+ PS_MODE_UAPSD_WMM,
+ PS_MODE_UAPSD,
+ PS_MODE_IBSS,
+ PS_MODE_WWLAN,
+ PM_Radio_Off,
+ PM_Card_Disable,
PS_MODE_NUM
};
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 4c9b98e8210e..1752121ff494 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -83,9 +83,8 @@ static void mfree_all_stainfo(struct sta_priv *pstapriv)
spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL);
phead = &pstapriv->free_sta_queue.queue;
plist = phead->next;
- while ((end_of_queue_search(phead, plist)) == false) {
+ while ((end_of_queue_search(phead, plist)) == false)
plist = plist->next;
- }
spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL);
}
@@ -228,7 +227,7 @@ void r8712_free_all_stainfo(struct _adapter *padapter)
struct sta_info, hash_list);
plist = plist->next;
if (pbcmc_stainfo != psta)
- r8712_free_stainfo(padapter , psta);
+ r8712_free_stainfo(padapter, psta);
}
}
spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 62a377e7fdc7..a28af03c9d8a 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -471,7 +471,7 @@ static sint xmitframe_swencrypt(struct _adapter *padapter,
return _SUCCESS;
}
-static sint make_wlanhdr(struct _adapter *padapter , u8 *hdr,
+static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
struct pkt_attrib *pattrib)
{
u16 *qc;
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index c4e0ef2f52c6..742dfa0ca817 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -135,7 +135,7 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv);
u32 _r8712_free_sta_priv(struct sta_priv *pstapriv);
struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv,
u8 *hwaddr);
-void r8712_free_stainfo(struct _adapter *padapter , struct sta_info *psta);
+void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta);
void r8712_free_all_stainfo(struct _adapter *padapter);
struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr);
void r8712_init_bcmc_stainfo(struct _adapter *padapter);
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 7d0d1719b136..f8b5b332e7c3 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -366,7 +366,6 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
struct net_device *pnetdev;
struct usb_device *udev;
- printk(KERN_INFO "r8712u: Staging version\n");
/* In this probe function, O.S. will provide the usb interface pointer
* to driver. We have to increase the reference count of the usb device
* structure by using the usb_get_dev function.
@@ -463,7 +462,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
/* Use the mac address stored in the Efuse
* offset = 0x12 for usb in efuse
*/
- memcpy(mac, &pdata[0x12], ETH_ALEN);
+ ether_addr_copy(mac, &pdata[0x12]);
}
eeprom_CustomerID = pdata[0x52];
switch (eeprom_CustomerID) {
@@ -580,7 +579,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
} else
dev_info(&udev->dev,
"r8712u: MAC Address from efuse = %pM\n", mac);
- memcpy(pnetdev->dev_addr, mac, ETH_ALEN);
+ ether_addr_copy(pnetdev->dev_addr, mac);
}
/* step 6. Load the firmware asynchronously */
if (rtl871x_load_fw(padapter))
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index e394d12c36b0..c6327c072918 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -456,8 +456,8 @@ static void update_bmc_sta(struct rtw_adapter *padapter)
sizeof(struct stainfo_stats));
/* prepare for add_RATid23a */
- supportRateNum = rtw_get_rateset_len23a((u8*)&pcur_network->SupportedRates);
- network_type = rtw_check_network_type23a((u8*)&pcur_network->SupportedRates, supportRateNum, 1);
+ supportRateNum = rtw_get_rateset_len23a((u8 *)&pcur_network->SupportedRates);
+ network_type = rtw_check_network_type23a((u8 *)&pcur_network->SupportedRates, supportRateNum, 1);
memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
psta->bssratelen = supportRateNum;
@@ -897,7 +897,7 @@ int rtw_check_beacon_data23a(struct rtw_adapter *padapter,
pairwise_cipher = 0;
psecuritypriv->wpa_group_cipher = 0;
psecuritypriv->wpa_pairwise_cipher = 0;
- for (p = ie; ;p += (ie_len + 2)) {
+ for (p = ie; ; p += (ie_len + 2)) {
p = rtw_get_ie23a(p, WLAN_EID_VENDOR_SPECIFIC, &ie_len,
pbss_network->IELength - (ie_len + 2));
if ((p) && (!memcmp(p+2, RTW_WPA_OUI23A_TYPE, 4))) {
@@ -924,7 +924,7 @@ int rtw_check_beacon_data23a(struct rtw_adapter *padapter,
ie_len = 0;
pmlmepriv->qos_option = 0;
if (pregistrypriv->wmm_enable) {
- for (p = ie; ;p += (ie_len + 2)) {
+ for (p = ie; ; p += (ie_len + 2)) {
p = rtw_get_ie23a(p, WLAN_EID_VENDOR_SPECIFIC, &ie_len,
(pbss_network->IELength -
(ie_len + 2)));
@@ -1204,7 +1204,7 @@ static void update_bcn_p2p_ie(struct rtw_adapter *padapter)
{
}
-static void update_bcn_vendor_spec_ie(struct rtw_adapter *padapter, u8*oui)
+static void update_bcn_vendor_spec_ie(struct rtw_adapter *padapter, u8 *oui)
{
DBG_8723A("%s\n", __func__);
diff --git a/drivers/staging/rtl8723au/core/rtw_cmd.c b/drivers/staging/rtl8723au/core/rtw_cmd.c
index 60e0ded8ae02..2447a56df838 100644
--- a/drivers/staging/rtl8723au/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723au/core/rtw_cmd.c
@@ -245,11 +245,6 @@ exit:
return res;
}
-void rtw_cmd_clr_isr23a(struct cmd_priv *pcmdpriv)
-{
- pcmdpriv->cmd_done_cnt++;
-}
-
void rtw_free_cmd_obj23a(struct cmd_obj *pcmd)
{
@@ -852,62 +847,6 @@ exit:
return res;
}
-/*
- * This is only ever called from on_action_spct23a_ch_switch () which isn't
- * called from anywhere itself
- */
-int rtw_set_ch_cmd23a(struct rtw_adapter *padapter, u8 ch, u8 bw, u8 ch_offset,
- u8 enqueue)
-{
- struct cmd_obj *pcmdobj;
- struct set_ch_parm *set_ch_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- DBG_8723A("%s(%s): ch:%u, bw:%u, ch_offset:%u\n", __func__,
- padapter->pnetdev->name, ch, bw, ch_offset);
-
- /* check input parameter */
-
- /* prepare cmd parameter */
- set_ch_parm = kzalloc(sizeof(*set_ch_parm), GFP_KERNEL);
- if (!set_ch_parm) {
- res = _FAIL;
- goto exit;
- }
- set_ch_parm->ch = ch;
- set_ch_parm->bw = bw;
- set_ch_parm->ch_offset = ch_offset;
-
- if (enqueue) {
- /* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!pcmdobj) {
- kfree(set_ch_parm);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(pcmdobj, set_ch_parm,
- GEN_CMD_CODE(_SetChannel));
- res = rtw_enqueue_cmd23a(pcmdpriv, pcmdobj);
- } else {
- /* no need to enqueue, do the cmd hdl directly and
- free cmd parameter */
- if (H2C_SUCCESS != set_ch_hdl23a(padapter, (u8 *)set_ch_parm))
- res = _FAIL;
-
- kfree(set_ch_parm);
- }
-
- /* do something based on res... */
-exit:
-
- DBG_8723A("%s(%s): res:%u\n", __func__, padapter->pnetdev->name, res);
-
- return res;
-}
-
static void traffic_status_watchdog(struct rtw_adapter *padapter)
{
u8 bEnterPS;
diff --git a/drivers/staging/rtl8723au/core/rtw_efuse.c b/drivers/staging/rtl8723au/core/rtw_efuse.c
index 81960e788f89..a6deddc02291 100644
--- a/drivers/staging/rtl8723au/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723au/core/rtw_efuse.c
@@ -327,15 +327,9 @@ EFUSE_Read1Byte23a(struct rtw_adapter *Adapter, u16 Address)
*---------------------------------------------------------------------------*/
void
-EFUSE_Write1Byte(
- struct rtw_adapter * Adapter,
- u16 Address,
- u8 Value);
+EFUSE_Write1Byte(struct rtw_adapter *Adapter, u16 Address, u8 Value);
void
-EFUSE_Write1Byte(
- struct rtw_adapter * Adapter,
- u16 Address,
- u8 Value)
+EFUSE_Write1Byte(struct rtw_adapter *Adapter, u16 Address, u8 Value)
{
u8 Bytetemp = {0x00};
u8 temp = {0x00};
@@ -635,10 +629,7 @@ Efuse_ReadAllMap(struct rtw_adapter *pAdapter, u8 efuseType, u8 *Efuse)
*
*---------------------------------------------------------------------------*/
static void
-efuse_ShadowRead1Byte(
- struct rtw_adapter * pAdapter,
- u16 Offset,
- u8 *Value)
+efuse_ShadowRead1Byte(struct rtw_adapter *pAdapter, u16 Offset, u8 *Value)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
@@ -647,10 +638,7 @@ efuse_ShadowRead1Byte(
/* Read Two Bytes */
static void
-efuse_ShadowRead2Byte(
- struct rtw_adapter * pAdapter,
- u16 Offset,
- u16 *Value)
+efuse_ShadowRead2Byte(struct rtw_adapter *pAdapter, u16 Offset, u16 *Value)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
@@ -660,10 +648,7 @@ efuse_ShadowRead2Byte(
/* Read Four Bytes */
static void
-efuse_ShadowRead4Byte(
- struct rtw_adapter * pAdapter,
- u16 Offset,
- u32 *Value)
+efuse_ShadowRead4Byte(struct rtw_adapter *pAdapter, u16 Offset, u32 *Value)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
@@ -722,11 +707,8 @@ void EFUSE_ShadowMapUpdate23a(struct rtw_adapter *pAdapter, u8 efuseType)
*
*---------------------------------------------------------------------------*/
void
-EFUSE_ShadowRead23a(
- struct rtw_adapter * pAdapter,
- u8 Type,
- u16 Offset,
- u32 *Value)
+EFUSE_ShadowRead23a(struct rtw_adapter *pAdapter,
+ u8 Type, u16 Offset, u32 *Value)
{
if (Type == 1)
efuse_ShadowRead1Byte(pAdapter, Offset, (u8 *)Value);
diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c
index 7a5e6bf0d1ae..1c82dffcf596 100644
--- a/drivers/staging/rtl8723au/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723au/core/rtw_xmit.c
@@ -2372,12 +2372,3 @@ int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms)
return rtw_sctx_wait23a(pack_tx_ops);
}
-void rtw_ack_tx_done23a(struct xmit_priv *pxmitpriv, int status)
-{
- struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
-
- if (pxmitpriv->ack_tx)
- rtw23a_sctx_done_err(&pack_tx_ops, status);
- else
- DBG_8723A("%s ack_tx not set\n", __func__);
-}
diff --git a/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c
index 1da4eece6f9a..33777d2852f4 100644
--- a/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c
@@ -47,11 +47,11 @@ u8 HalPwrSeqCmdParsing23a(struct rtw_adapter *padapter, u8 CutVersion,
u8 FabVersion, u8 InterfaceType,
struct wlan_pwr_cfg PwrSeqCmd[])
{
- struct wlan_pwr_cfg PwrCfgCmd = { 0 };
- u8 bPollingBit = false;
+ struct wlan_pwr_cfg PwrCfgCmd;
+ u8 bPollingBit;
u32 AryIdx = 0;
- u8 value = 0;
- u32 offset = 0;
+ u8 value;
+ u32 offset;
u32 pollingCount = 0; /* polling autoload done. */
u32 maxPollingCnt = 5000;
diff --git a/drivers/staging/rtl8723au/hal/odm.c b/drivers/staging/rtl8723au/hal/odm.c
index 1c0f106d5996..5269b46445f4 100644
--- a/drivers/staging/rtl8723au/hal/odm.c
+++ b/drivers/staging/rtl8723au/hal/odm.c
@@ -187,24 +187,13 @@ void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm);
void odm_DynamicBBPowerSaving23a(struct dm_odm_t *pDM_Odm);
-void odm_1R_CCA23a(struct dm_odm_t *pDM_Odm);
/* END---------BB POWER SAVE----------------------- */
-void odm_RefreshRateAdaptiveMask23aMP23a(struct dm_odm_t *pDM_Odm);
-
void odm_RefreshRateAdaptiveMask23aCE23a(struct dm_odm_t *pDM_Odm);
-void odm_RefreshRateAdaptiveMask23aAPADSL23a(struct dm_odm_t *pDM_Odm);
-
void odm_DynamicTxPower23aInit(struct dm_odm_t *pDM_Odm);
-void odm_RSSIMonitorInit(struct dm_odm_t *pDM_Odm);
-
-void odm_RSSIMonitorCheck23aMP(struct dm_odm_t *pDM_Odm);
-
void odm_RSSIMonitorCheck23aCE(struct dm_odm_t *pDM_Odm);
-void odm_RSSIMonitorCheck23aAP(struct dm_odm_t *pDM_Odm);
-
void odm_RSSIMonitorCheck23a(struct dm_odm_t *pDM_Odm);
void odm_DynamicTxPower23a(struct dm_odm_t *pDM_Odm);
@@ -212,16 +201,12 @@ void odm_RefreshRateAdaptiveMask23a(struct dm_odm_t *pDM_Odm);
void ODM_TXPowerTrackingCheck23a(struct dm_odm_t *pDM_Odm);
-void odm_TXPowerTrackingCheckAP(struct dm_odm_t *pDM_Odm);
-
void odm_RateAdaptiveMaskInit23a(struct dm_odm_t *pDM_Odm);
void odm_TXPowerTrackingThermalMeterInit23a(struct dm_odm_t *pDM_Odm);
void odm_TXPowerTrackingInit23a(struct dm_odm_t *pDM_Odm);
-void odm_TXPowerTrackingCheckMP(struct dm_odm_t *pDM_Odm);
-
void odm_TXPowerTrackingCheckCE23a(struct dm_odm_t *pDM_Odm);
static void odm_EdcaTurboCheck23a(struct dm_odm_t *pDM_Odm);
@@ -946,47 +931,13 @@ void odm_DynamicBBPowerSaving23a(struct dm_odm_t *pDM_Odm)
return;
}
-void odm_1R_CCA23a(struct dm_odm_t *pDM_Odm)
-{
- struct dynamic_pwr_sav *pDM_PSTable = &pDM_Odm->DM_PSTable;
-
- if (pDM_Odm->RSSI_Min != 0xFF) {
- if (pDM_PSTable->PreCCAState == CCA_2R) {
- if (pDM_Odm->RSSI_Min >= 35)
- pDM_PSTable->CurCCAState = CCA_1R;
- else
- pDM_PSTable->CurCCAState = CCA_2R;
- } else {
- if (pDM_Odm->RSSI_Min <= 30)
- pDM_PSTable->CurCCAState = CCA_2R;
- else
- pDM_PSTable->CurCCAState = CCA_1R;
- }
- } else {
- pDM_PSTable->CurCCAState = CCA_MAX;
- }
-
- if (pDM_PSTable->PreCCAState != pDM_PSTable->CurCCAState) {
- if (pDM_PSTable->CurCCAState == CCA_1R) {
- if (pDM_Odm->RFType == ODM_2T2R)
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x13);
- else
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x23);
- } else {
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x33);
- /* PHY_SetBBReg(pAdapter, 0xe70, bMaskByte3, 0x63); */
- }
- pDM_PSTable->PreCCAState = pDM_PSTable->CurCCAState;
- }
-}
-
void ODM_RF_Saving23a(struct dm_odm_t *pDM_Odm, u8 bForceInNormal)
{
struct dynamic_pwr_sav *pDM_PSTable = &pDM_Odm->DM_PSTable;
- u8 Rssi_Up_bound = 30 ;
+ u8 Rssi_Up_bound = 30;
u8 Rssi_Low_bound = 25;
if (pDM_Odm->PatchID == 40) { /* RT_CID_819x_FUNAI_TV */
- Rssi_Up_bound = 50 ;
+ Rssi_Up_bound = 50;
Rssi_Low_bound = 45;
}
if (pDM_PSTable->initialize == 0) {
@@ -1177,10 +1128,6 @@ void odm_RefreshRateAdaptiveMask23a(struct dm_odm_t *pDM_Odm)
odm_RefreshRateAdaptiveMask23aCE23a(pDM_Odm);
}
-void odm_RefreshRateAdaptiveMask23aMP23a(struct dm_odm_t *pDM_Odm)
-{
-}
-
void odm_RefreshRateAdaptiveMask23aCE23a(struct dm_odm_t *pDM_Odm)
{
u8 i;
@@ -1216,10 +1163,6 @@ void odm_RefreshRateAdaptiveMask23aCE23a(struct dm_odm_t *pDM_Odm)
}
-void odm_RefreshRateAdaptiveMask23aAPADSL23a(struct dm_odm_t *pDM_Odm)
-{
-}
-
/* Return Value: bool */
/* - true: RATRState is changed. */
bool ODM_RAStateCheck23a(struct dm_odm_t *pDM_Odm, s32 RSSI, bool bForceUpdate,
@@ -1284,14 +1227,6 @@ void odm_DynamicTxPower23aInit(struct dm_odm_t *pDM_Odm)
pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
}
-/* 3 ============================================================ */
-/* 3 RSSI Monitor */
-/* 3 ============================================================ */
-
-void odm_RSSIMonitorInit(struct dm_odm_t *pDM_Odm)
-{
-}
-
void odm_RSSIMonitorCheck23a(struct dm_odm_t *pDM_Odm)
{
/* For AP/ADSL use struct rtl8723a_priv * */
@@ -1306,10 +1241,6 @@ void odm_RSSIMonitorCheck23a(struct dm_odm_t *pDM_Odm)
odm_RSSIMonitorCheck23aCE(pDM_Odm);
} /* odm_RSSIMonitorCheck23a */
-void odm_RSSIMonitorCheck23aMP(struct dm_odm_t *pDM_Odm)
-{
-}
-
static void
FindMinimumRSSI(
struct rtw_adapter *pAdapter
@@ -1378,10 +1309,6 @@ void odm_RSSIMonitorCheck23aCE(struct dm_odm_t *pDM_Odm)
ODM_CmnInfoUpdate23a(&pHalData->odmpriv, ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
}
-void odm_RSSIMonitorCheck23aAP(struct dm_odm_t *pDM_Odm)
-{
-}
-
/* endif */
/* 3 ============================================================ */
/* 3 Tx Power Tracking */
@@ -1422,19 +1349,12 @@ void odm_TXPowerTrackingCheckCE23a(struct dm_odm_t *pDM_Odm)
{
}
-void odm_TXPowerTrackingCheckMP(struct dm_odm_t *pDM_Odm)
-{
-}
-
-void odm_TXPowerTrackingCheckAP(struct dm_odm_t *pDM_Odm)
-{
-}
-
/* EDCA Turbo */
static void ODM_EdcaTurboInit23a(struct dm_odm_t *pDM_Odm)
{
struct rtw_adapter *Adapter = pDM_Odm->Adapter;
+
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
Adapter->recvpriv.bIsAnyNonBEPkts = false;
@@ -1591,6 +1511,7 @@ ConvertTo_dB23a(
void ODM_SingleDualAntennaDefaultSetting(struct dm_odm_t *pDM_Odm)
{
struct sw_ant_sw *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+
pDM_SWAT_Table->ANTA_ON = true;
pDM_SWAT_Table->ANTB_ON = true;
}
diff --git a/drivers/staging/rtl8723au/hal/odm_HWConfig.c b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
index fb3cc872f205..33aafa01f900 100644
--- a/drivers/staging/rtl8723au/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
@@ -113,7 +113,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct dm_odm_t *pDM_Odm,
cck_highpwr = pDM_Odm->bCckHighPower;
- cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
+ cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a;
/* The RSSI formula should be modified according to the gain table */
if (!cck_highpwr) {
@@ -138,16 +138,16 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct dm_odm_t *pDM_Odm,
report = (cck_agc_rpt & 0x60)>>5;
switch (report) {
case 0x3:
- rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f)<<1) ;
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f)<<1);
break;
case 0x2:
rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f)<<1);
break;
case 0x1:
- rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f)<<1) ;
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f)<<1);
break;
case 0x0:
- rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f)<<1) ;
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f)<<1);
break;
}
}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
index 86a83975f4f0..73cfddd6df9a 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
@@ -7255,63 +7255,19 @@ btdm_2AntTdmaDurationAdjust(struct rtw_adapter *padapter, u8 bScoHid,
RTPRINT(FBT, BT_TRACE, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
if (bScoHid) {
if (bTxPause) {
- if (maxInterval == 1) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (maxInterval == 2) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (maxInterval == 3) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- }
+ btdm_2AntPsTdma(padapter, true, 15);
+ pBtdm8723->psTdmaDuAdjType = 15;
} else {
- if (maxInterval == 1) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (maxInterval == 2) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (maxInterval == 3) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- }
+ btdm_2AntPsTdma(padapter, true, 11);
+ pBtdm8723->psTdmaDuAdjType = 11;
}
} else {
if (bTxPause) {
- if (maxInterval == 1) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (maxInterval == 2) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (maxInterval == 3) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- }
+ btdm_2AntPsTdma(padapter, true, 7);
+ pBtdm8723->psTdmaDuAdjType = 7;
} else {
- if (maxInterval == 1) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (maxInterval == 2) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (maxInterval == 3) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- }
+ btdm_2AntPsTdma(padapter, true, 3);
+ pBtdm8723->psTdmaDuAdjType = 3;
}
}
up = 0;
@@ -9145,7 +9101,7 @@ u32 BTDM_BtTxRxCounterL(struct rtw_adapter *padapter)
u32 counters = 0;
counters = pHalData->bt_coexist.halCoex8723.lowPriorityTx+
- pHalData->bt_coexist.halCoex8723.lowPriorityRx ;
+ pHalData->bt_coexist.halCoex8723.lowPriorityRx;
return counters;
}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
index 88e91cd8ebb9..19dc5e3b2e2e 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
@@ -698,7 +698,7 @@ storePwrIndexDiffRateOffset(struct rtw_adapter *Adapter, u32 RegAddr,
* 11/10/2008 tynli Modify to mew files.
*---------------------------------------------------------------------------*/
static int
-phy_ConfigBBWithPgHeaderFile(struct rtw_adapter *Adapter, u8 ConfigType)
+phy_ConfigBBWithPgHeaderFile(struct rtw_adapter *Adapter)
{
int i;
u32 *Rtl819XPHY_REGArray_Table_PG;
@@ -707,17 +707,15 @@ phy_ConfigBBWithPgHeaderFile(struct rtw_adapter *Adapter, u8 ConfigType)
PHY_REGArrayPGLen = Rtl8723_PHY_REG_Array_PGLength;
Rtl819XPHY_REGArray_Table_PG = (u32 *)Rtl8723_PHY_REG_Array_PG;
- if (ConfigType == BaseBand_Config_PHY_REG) {
- for (i = 0; i < PHY_REGArrayPGLen; i = i + 3) {
- storePwrIndexDiffRateOffset(Adapter,
- Rtl819XPHY_REGArray_Table_PG[i],
- Rtl819XPHY_REGArray_Table_PG[i+1],
- Rtl819XPHY_REGArray_Table_PG[i+2]);
- }
+ for (i = 0; i < PHY_REGArrayPGLen; i = i + 3) {
+ storePwrIndexDiffRateOffset(Adapter,
+ Rtl819XPHY_REGArray_Table_PG[i],
+ Rtl819XPHY_REGArray_Table_PG[i+1],
+ Rtl819XPHY_REGArray_Table_PG[i+2]);
}
return _SUCCESS;
-} /* phy_ConfigBBWithPgHeaderFile */
+}
static void
phy_BB8192C_Config_1T(struct rtw_adapter *Adapter)
@@ -768,8 +766,7 @@ phy_BB8723a_Config_ParaFile(struct rtw_adapter *Adapter)
if (pEEPROM->bautoload_fail_flag == false) {
pHalData->pwrGroupCnt = 0;
- rtStatus = phy_ConfigBBWithPgHeaderFile(Adapter,
- BaseBand_Config_PHY_REG);
+ rtStatus = phy_ConfigBBWithPgHeaderFile(Adapter);
}
if (rtStatus != _SUCCESS)
@@ -923,9 +920,6 @@ _PHY_SetBWMode23a92C(struct rtw_adapter *Adapter)
u8 regBwOpMode;
u8 regRRSR_RSC;
- if (pHalData->rf_chip == RF_PSEUDO_11N)
- return;
-
/* There is no 40MHz mode in RF_8225. */
if (pHalData->rf_chip == RF_8225)
return;
@@ -1021,10 +1015,6 @@ _PHY_SetBWMode23a92C(struct rtw_adapter *Adapter)
/* PHY_SetRF8258Bandwidth(); */
break;
- case RF_PSEUDO_11N:
- /* Do Nothing */
- break;
-
case RF_6052:
rtl8723a_phy_rf6052set_bw(Adapter, pHalData->CurrentChannelBW);
break;
@@ -1074,7 +1064,7 @@ PHY_SetBWMode23a8723A(struct rtw_adapter *Adapter,
static void _PHY_SwChnl8723A(struct rtw_adapter *Adapter, u8 channel)
{
- u8 eRFPath;
+ enum RF_RADIO_PATH eRFPath;
u32 param1, param2;
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
@@ -1088,7 +1078,7 @@ static void _PHY_SwChnl8723A(struct rtw_adapter *Adapter, u8 channel)
for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
pHalData->RfRegChnlVal[eRFPath] =
(pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2;
- PHY_SetRFReg(Adapter, (enum RF_RADIO_PATH)eRFPath, param1,
+ PHY_SetRFReg(Adapter, eRFPath, param1,
bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
}
@@ -1101,11 +1091,6 @@ void PHY_SwChnl8723A(struct rtw_adapter *Adapter, u8 channel)
u8 tmpchannel = pHalData->CurrentChannel;
bool result = true;
- if (pHalData->rf_chip == RF_PSEUDO_11N) {
- /* return immediately if it is peudo-phy */
- return;
- }
-
if (channel == 0)
channel = 1;
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
index 6070510bb470..1759487329ab 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
@@ -79,7 +79,7 @@ static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxd
}
}
-static void fill_txdesc_vcs(struct pkt_attrib *pattrib, u32 *pdw)
+static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
{
/* DBG_8723A("cvs_mode =%d\n", pattrib->vcs_mode); */
@@ -114,7 +114,7 @@ static void fill_txdesc_vcs(struct pkt_attrib *pattrib, u32 *pdw)
}
}
-static void fill_txdesc_phy(struct pkt_attrib *pattrib, u32 *pdw)
+static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
{
if (pattrib->ht_en) {
*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
diff --git a/drivers/staging/rtl8723au/hal/usb_halinit.c b/drivers/staging/rtl8723au/hal/usb_halinit.c
index febe5cedef8f..adbf1c2dd383 100644
--- a/drivers/staging/rtl8723au/hal/usb_halinit.c
+++ b/drivers/staging/rtl8723au/hal/usb_halinit.c
@@ -625,10 +625,10 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
}
/* reducing 80M spur */
- PHY_SetBBReg(Adapter, RF_T_METER, bMaskDWord, 0x0381808d);
- PHY_SetBBReg(Adapter, RF_SYN_G4, bMaskDWord, 0xf2ffff83);
- PHY_SetBBReg(Adapter, RF_SYN_G4, bMaskDWord, 0xf2ffff82);
- PHY_SetBBReg(Adapter, RF_SYN_G4, bMaskDWord, 0xf2ffff83);
+ PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, bMaskDWord, 0x0381808d);
+ PHY_SetBBReg(Adapter, REG_AFE_PLL_CTRL, bMaskDWord, 0xf0ffff83);
+ PHY_SetBBReg(Adapter, REG_AFE_PLL_CTRL, bMaskDWord, 0xf0ffff82);
+ PHY_SetBBReg(Adapter, REG_AFE_PLL_CTRL, bMaskDWord, 0xf0ffff83);
/* RFSW Control */
PHY_SetBBReg(Adapter, rFPGA0_TxInfo, bMaskDWord, 0x00000003); /* 0x804[14]= 0 */
@@ -640,8 +640,10 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
/* */
/* Joseph Note: Keep RfRegChnlVal for later use. */
/* */
- pHalData->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum RF_RADIO_PATH)0, RF_CHNLBW, bRFRegOffsetMask);
- pHalData->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum RF_RADIO_PATH)1, RF_CHNLBW, bRFRegOffsetMask);
+ pHalData->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, RF_PATH_A,
+ RF_CHNLBW, bRFRegOffsetMask);
+ pHalData->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, RF_PATH_B,
+ RF_CHNLBW, bRFRegOffsetMask);
if (!mac_on) {
_InitQueueReservedPage(Adapter);
diff --git a/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h b/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
index 688f20412bca..2247d9874719 100644
--- a/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
+++ b/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
@@ -17,46 +17,9 @@
#define __INC_HAL8723PHYCFG_H__
/*--------------------------Define Parameters-------------------------------*/
-#define LOOP_LIMIT 5
-#define MAX_STALL_TIME 50 /* us */
-#define AntennaDiversityValue 0x80
-#define MAX_TXPWR_IDX_NMODE_92S 63
-#define Reset_Cnt_Limit 3
-
-
#define MAX_AGGR_NUM 0x0909
-/*--------------------------Define Parameters-------------------------------*/
-
-
/*------------------------------Define structure----------------------------*/
-enum swchnlcmdid {
- CmdID_End,
- CmdID_SetTxPowerLevel,
- CmdID_BBRegWrite10,
- CmdID_WritePortUlong,
- CmdID_WritePortUshort,
- CmdID_WritePortUchar,
- CmdID_RF_WriteReg,
-};
-
-
-/* 1. Switch channel related */
-struct swchnlcmd {
- enum swchnlcmdid CmdID;
- u32 Para1;
- u32 Para2;
- u32 msDelay;
-};
-
-enum HW90_BLOCK {
- HW90_BLOCK_MAC = 0,
- HW90_BLOCK_PHY0 = 1,
- HW90_BLOCK_PHY1 = 2,
- HW90_BLOCK_RF = 3,
- HW90_BLOCK_MAXIMUM = 4, /* Never use this */
-};
-
enum RF_RADIO_PATH {
RF_PATH_A = 0, /* Radio Path A */
RF_PATH_B = 1, /* Radio Path B */
@@ -64,7 +27,6 @@ enum RF_RADIO_PATH {
};
#define CHANNEL_MAX_NUMBER 14 /* 14 is the max channel number */
-#define CHANNEL_GROUP_MAX 3 /* ch1~3, ch4~9, ch10~14 total three groups */
enum WIRELESS_MODE {
WIRELESS_MODE_UNKNOWN = 0x00,
@@ -77,22 +39,6 @@ enum WIRELESS_MODE {
WIRELESS_MODE_AC = BIT(6)
};
-enum baseband_config_type {
- BaseBand_Config_PHY_REG = 0, /* Radio Path A */
- BaseBand_Config_AGC_TAB = 1, /* Radio Path B */
-};
-
-enum ra_offset_area {
- RA_OFFSET_LEGACY_OFDM1,
- RA_OFFSET_LEGACY_OFDM2,
- RA_OFFSET_HT_OFDM1,
- RA_OFFSET_HT_OFDM2,
- RA_OFFSET_HT_OFDM3,
- RA_OFFSET_HT_OFDM4,
- RA_OFFSET_HT_CCK,
-};
-
-
/* BB/RF related */
enum rf_type_8190p {
RF_TYPE_MIN, /* 0 */
@@ -100,7 +46,6 @@ enum rf_type_8190p {
RF_8256 = 2, /* 2 11b/g/n */
RF_8258 = 3, /* 3 11a/b/g/n RF */
RF_6052 = 4, /* 4 11b/g/n RF */
- RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
};
struct bb_reg_define {
diff --git a/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h b/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h
index 4a1f58f2982c..3771d6bb5774 100644
--- a/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h
+++ b/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h
@@ -39,10 +39,10 @@
* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here
*/
#define RTL8723A_TRANS_CARDEMU_TO_ACT \
- {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \
- {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \
- {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \
- {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0}, /*0x00[5] = 1b'0 release analog Ips to digital , 1:isolation*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \
+ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0}, /*0x00[5] = 1b'0 release analog Ips to digital , 1:isolation*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), 0},/* disable SW LPS 0x04[10]= 0*/ \
{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)},/* wait till 0x04[17] = 1 power ready*/ \
{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},/* release WLON reset 0x04[16]= 1*/ \
@@ -57,48 +57,28 @@
{0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/*0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */\
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
- {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, /*0x00[5] = 1b'1 analog Ips to digital , 1:isolation*/ \
- {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, /*0x00[5] = 1b'1 analog Ips to digital , 1:isolation*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/ \
#define RTL8723A_TRANS_CARDEMU_TO_SUS \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3), (BIT(4)|BIT(3))}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4)}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/
#define RTL8723A_TRANS_SUS_TO_CARDEMU \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, /*clear suspend enable and power down enable*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, /*wait power state to suspend*/\
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
#define RTL8723A_TRANS_CARDEMU_TO_CARDDIS \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07 = 0x20 , SOP option to disable BG/MB*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)}, /*0x04[10] = 1, enable SW LPS*/ \
- {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/
#define RTL8723A_TRANS_CARDDIS_TO_CARDEMU \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, /*clear suspend enable and power down enable*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, /*wait power state to suspend*/\
{0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/\
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
- {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
-
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
#define RTL8723A_TRANS_CARDEMU_TO_PDN \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK|PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \
{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/* 0x04[16] = 0*/\
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},/* 0x04[15] = 1*/
@@ -106,7 +86,6 @@
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/* 0x04[15] = 0*/
#define RTL8723A_TRANS_ACT_TO_LPS \
- {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \
{0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},/*Tx Pause*/ \
{0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
{0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
@@ -117,13 +96,10 @@
{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},/*Whole BB is reset*/ \
{0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \
{0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},/*check if removed later*/ \
- {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00},/*When driver enter Sus/ Disable, enable LOP for BT*/ \
{0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)},/*Respond TxOK to scheduler*/
#define RTL8723A_TRANS_LPS_TO_ACT \
- {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
{0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
- {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
{0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, /*Polling 0x109[7]= 0 TSF in 40M*/\
diff --git a/drivers/staging/rtl8723au/include/osdep_intf.h b/drivers/staging/rtl8723au/include/osdep_intf.h
index 33afa62f31b1..a157eb2e78df 100644
--- a/drivers/staging/rtl8723au/include/osdep_intf.h
+++ b/drivers/staging/rtl8723au/include/osdep_intf.h
@@ -19,9 +19,6 @@
#include <osdep_service.h>
#include <drv_types.h>
-int rtw_hw_suspend23a(struct rtw_adapter *padapter);
-int rtw_hw_resume23a(struct rtw_adapter *padapter);
-
int rtw_init_drv_sw23a(struct rtw_adapter *padapter);
int rtw_free_drv_sw23a(struct rtw_adapter *padapter);
int rtw_reset_drv_sw23a(struct rtw_adapter *padapter);
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h b/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h
index 05069652bae1..7add5dfe015f 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h
@@ -31,8 +31,8 @@ enum rt_media_status {
void BT_SignalCompensation(struct rtw_adapter *padapter,
u8 *rssi_wifi, u8 *rssi_bt);
-void BT_HaltProcess(struct rtw_adapter * padapter);
-void BT_LpsLeave(struct rtw_adapter * padapter);
+void BT_HaltProcess(struct rtw_adapter *padapter);
+void BT_LpsLeave(struct rtw_adapter *padapter);
#define BT_HsConnectionEstablished(Adapter) false
@@ -1092,17 +1092,20 @@ enum hci_ext_bp_operation {
BTHCI_StateMachine(_Adapter, _StateToEnter, _StateCmd, _EntryNum);\
}
-void BTHCI_EventParse(struct rtw_adapter * padapter, void *pEvntData, u32 dataLen);
+void BTHCI_EventParse(struct rtw_adapter *padapter, void *pEvntData,
+ u32 dataLen);
#define BT_EventParse BTHCI_EventParse
-u8 BTHCI_HsConnectionEstablished(struct rtw_adapter * padapter);
-void BTHCI_UpdateBTProfileRTKToMoto(struct rtw_adapter * padapter);
-void BTHCI_WifiScanNotify(struct rtw_adapter * padapter, u8 scanType);
-void BTHCI_StateMachine(struct rtw_adapter * padapter, u8 StateToEnter, enum hci_state_with_cmd StateCmd, u8 EntryNum);
-void BTHCI_DisconnectPeer(struct rtw_adapter * padapter, u8 EntryNum);
-void BTHCI_EventNumOfCompletedDataBlocks(struct rtw_adapter * padapter);
-void BTHCI_EventAMPStatusChange(struct rtw_adapter * padapter, u8 AMP_Status);
-void BTHCI_DisconnectAll(struct rtw_adapter * padapter);
-enum hci_status BTHCI_HandleHCICMD(struct rtw_adapter * padapter, struct packet_irp_hcicmd_data *pHciCmd);
+u8 BTHCI_HsConnectionEstablished(struct rtw_adapter *padapter);
+void BTHCI_UpdateBTProfileRTKToMoto(struct rtw_adapter *padapter);
+void BTHCI_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType);
+void BTHCI_StateMachine(struct rtw_adapter *padapter, u8 StateToEnter,
+ enum hci_state_with_cmd StateCmd, u8 EntryNum);
+void BTHCI_DisconnectPeer(struct rtw_adapter *padapter, u8 EntryNum);
+void BTHCI_EventNumOfCompletedDataBlocks(struct rtw_adapter *padapter);
+void BTHCI_EventAMPStatusChange(struct rtw_adapter *padapter, u8 AMP_Status);
+void BTHCI_DisconnectAll(struct rtw_adapter *padapter);
+enum hci_status BTHCI_HandleHCICMD(struct rtw_adapter *padapter,
+ struct packet_irp_hcicmd_data *pHciCmd);
/* ===== End of sync from SD7 driver COMMON/bt_hci.h ===== */
@@ -1157,9 +1160,10 @@ struct btdm_8723a_1ant {
u8 bRAChanged;
};
-void BTDM_1AntSignalCompensation(struct rtw_adapter * padapter, u8 *rssi_wifi, u8 *rssi_bt);
-void BTDM_1AntForDhcp(struct rtw_adapter * padapter);
-void BTDM_1AntBtCoexist8723A(struct rtw_adapter * padapter);
+void BTDM_1AntSignalCompensation(struct rtw_adapter *padapter,
+ u8 *rssi_wifi, u8 *rssi_bt);
+void BTDM_1AntForDhcp(struct rtw_adapter *padapter);
+void BTDM_1AntBtCoexist8723A(struct rtw_adapter *padapter);
/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.h ===== */
@@ -1241,7 +1245,7 @@ struct btdm_8723a_2ant {
u8 btStatus;
};
-void BTDM_2AntBtCoexist8723A(struct rtw_adapter * padapter);
+void BTDM_2AntBtCoexist8723A(struct rtw_adapter *padapter);
/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.h ===== */
/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc8723.h ===== */
@@ -1310,15 +1314,17 @@ struct bt_coexist_8723a {
struct btdm_8723a_1ant btdm1Ant;
};
-void BTDM_SetFwChnlInfo(struct rtw_adapter * padapter, enum rt_media_status mstatus);
-u8 BTDM_IsWifiConnectionExist(struct rtw_adapter * padapter);
-void BTDM_SetFw3a(struct rtw_adapter * padapter, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5);
-void BTDM_QueryBtInformation(struct rtw_adapter * padapter);
-void BTDM_SetSwRfRxLpfCorner(struct rtw_adapter * padapter, u8 type);
-void BTDM_SetSwPenaltyTxRateAdaptive(struct rtw_adapter * padapter, u8 raType);
-void BTDM_SetFwDecBtPwr(struct rtw_adapter * padapter, u8 bDecBtPwr);
-u8 BTDM_BtProfileSupport(struct rtw_adapter * padapter);
-void BTDM_LpsLeave(struct rtw_adapter * padapter);
+void BTDM_SetFwChnlInfo(struct rtw_adapter *padapter,
+ enum rt_media_status mstatus);
+u8 BTDM_IsWifiConnectionExist(struct rtw_adapter *padapter);
+void BTDM_SetFw3a(struct rtw_adapter *padapter, u8 byte1, u8 byte2, u8 byte3,
+ u8 byte4, u8 byte5);
+void BTDM_QueryBtInformation(struct rtw_adapter *padapter);
+void BTDM_SetSwRfRxLpfCorner(struct rtw_adapter *padapter, u8 type);
+void BTDM_SetSwPenaltyTxRateAdaptive(struct rtw_adapter *padapter, u8 raType);
+void BTDM_SetFwDecBtPwr(struct rtw_adapter *padapter, u8 bDecBtPwr);
+u8 BTDM_BtProfileSupport(struct rtw_adapter *padapter);
+void BTDM_LpsLeave(struct rtw_adapter *padapter);
/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc8723.h ===== */
@@ -1340,8 +1346,9 @@ enum BT_A2DP_INDEX{
#define BTDM_ANT_BT 2
-void BTDM_SingleAnt(struct rtw_adapter * padapter, u8 bSingleAntOn, u8 bInterruptOn, u8 bMultiNAVOn);
-void BTDM_CheckBTIdleChange1Ant(struct rtw_adapter * padapter);
+void BTDM_SingleAnt(struct rtw_adapter *padapter, u8 bSingleAntOn,
+ u8 bInterruptOn, u8 bMultiNAVOn);
+void BTDM_CheckBTIdleChange1Ant(struct rtw_adapter *padapter);
/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.h ===== */
@@ -1361,7 +1368,8 @@ void BTDM_CheckBTIdleChange1Ant(struct rtw_adapter * padapter);
#define BT_DACSWING_M7 2
#define BT_DACSWING_M10 3
-void BTDM_DiminishWiFi(struct rtw_adapter * Adapter, u8 bDACOn, u8 bInterruptOn, u8 DACSwingLevel, u8 bNAVOn);
+void BTDM_DiminishWiFi(struct rtw_adapter *Adapter, u8 bDACOn, u8 bInterruptOn,
+ u8 DACSwingLevel, u8 bNAVOn);
/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.h ===== */
@@ -1534,58 +1542,63 @@ struct bt_coexist_str {
u8 fw3aVal[5];
};
-void BTDM_CheckAntSelMode(struct rtw_adapter * padapter);
-void BTDM_FwC2hBtRssi(struct rtw_adapter * padapter, u8 *tmpBuf);
+void BTDM_CheckAntSelMode(struct rtw_adapter *padapter);
+void BTDM_FwC2hBtRssi(struct rtw_adapter *padapter, u8 *tmpBuf);
#define BT_FwC2hBtRssi BTDM_FwC2hBtRssi
-void BTDM_DisplayBtCoexInfo(struct rtw_adapter * padapter);
+void BTDM_DisplayBtCoexInfo(struct rtw_adapter *padapter);
#define BT_DisplayBtCoexInfo BTDM_DisplayBtCoexInfo
-void BTDM_RejectAPAggregatedPacket(struct rtw_adapter * padapter, u8 bReject);
-u8 BTDM_IsHT40(struct rtw_adapter * padapter);
-u8 BTDM_Legacy(struct rtw_adapter * padapter);
-void BTDM_CheckWiFiState(struct rtw_adapter * padapter);
-s32 BTDM_GetRxSS(struct rtw_adapter * padapter);
-u8 BTDM_CheckCoexBcnRssiState(struct rtw_adapter * padapter, u8 levelNum, u8 RssiThresh, u8 RssiThresh1);
-u8 BTDM_CheckCoexRSSIState1(struct rtw_adapter * padapter, u8 levelNum, u8 RssiThresh, u8 RssiThresh1);
-u8 BTDM_CheckCoexRSSIState(struct rtw_adapter * padapter, u8 levelNum, u8 RssiThresh, u8 RssiThresh1);
-void BTDM_Balance(struct rtw_adapter * padapter, u8 bBalanceOn, u8 ms0, u8 ms1);
-void BTDM_AGCTable(struct rtw_adapter * padapter, u8 type);
-void BTDM_BBBackOffLevel(struct rtw_adapter * padapter, u8 type);
-void BTDM_FWCoexAllOff(struct rtw_adapter * padapter);
-void BTDM_SWCoexAllOff(struct rtw_adapter * padapter);
-void BTDM_HWCoexAllOff(struct rtw_adapter * padapter);
-void BTDM_CoexAllOff(struct rtw_adapter * padapter);
-void BTDM_TurnOffBtCoexistBeforeEnterIPS(struct rtw_adapter * padapter);
-void BTDM_SignalCompensation(struct rtw_adapter * padapter, u8 *rssi_wifi, u8 *rssi_bt);
-void BTDM_UpdateCoexState(struct rtw_adapter * padapter);
-u8 BTDM_IsSameCoexistState(struct rtw_adapter * padapter);
-void BTDM_PWDBMonitor(struct rtw_adapter * padapter);
-u8 BTDM_IsBTBusy(struct rtw_adapter * padapter);
+void BTDM_RejectAPAggregatedPacket(struct rtw_adapter *padapter, u8 bReject);
+u8 BTDM_IsHT40(struct rtw_adapter *padapter);
+u8 BTDM_Legacy(struct rtw_adapter *padapter);
+void BTDM_CheckWiFiState(struct rtw_adapter *padapter);
+s32 BTDM_GetRxSS(struct rtw_adapter *padapter);
+u8 BTDM_CheckCoexBcnRssiState(struct rtw_adapter *padapter, u8 levelNum,
+ u8 RssiThresh, u8 RssiThresh1);
+u8 BTDM_CheckCoexRSSIState1(struct rtw_adapter *padapter, u8 levelNum,
+ u8 RssiThresh, u8 RssiThresh1);
+u8 BTDM_CheckCoexRSSIState(struct rtw_adapter *padapter, u8 levelNum,
+ u8 RssiThresh, u8 RssiThresh1);
+void BTDM_Balance(struct rtw_adapter *padapter, u8 bBalanceOn, u8 ms0, u8 ms1);
+void BTDM_AGCTable(struct rtw_adapter *padapter, u8 type);
+void BTDM_BBBackOffLevel(struct rtw_adapter *padapter, u8 type);
+void BTDM_FWCoexAllOff(struct rtw_adapter *padapter);
+void BTDM_SWCoexAllOff(struct rtw_adapter *padapter);
+void BTDM_HWCoexAllOff(struct rtw_adapter *padapter);
+void BTDM_CoexAllOff(struct rtw_adapter *padapter);
+void BTDM_TurnOffBtCoexistBeforeEnterIPS(struct rtw_adapter *padapter);
+void BTDM_SignalCompensation(struct rtw_adapter *padapter, u8 *rssi_wifi,
+ u8 *rssi_bt);
+void BTDM_UpdateCoexState(struct rtw_adapter *padapter);
+u8 BTDM_IsSameCoexistState(struct rtw_adapter *padapter);
+void BTDM_PWDBMonitor(struct rtw_adapter *padapter);
+u8 BTDM_IsBTBusy(struct rtw_adapter *padapter);
#define BT_IsBtBusy BTDM_IsBTBusy
-u8 BTDM_IsWifiBusy(struct rtw_adapter * padapter);
-u8 BTDM_IsCoexistStateChanged(struct rtw_adapter * padapter);
-u8 BTDM_IsWifiUplink(struct rtw_adapter * padapter);
-u8 BTDM_IsWifiDownlink(struct rtw_adapter * padapter);
-u8 BTDM_IsBTHSMode(struct rtw_adapter * padapter);
-u8 BTDM_IsBTUplink(struct rtw_adapter * padapter);
-u8 BTDM_IsBTDownlink(struct rtw_adapter * padapter);
-void BTDM_AdjustForBtOperation(struct rtw_adapter * padapter);
-void BTDM_ForHalt(struct rtw_adapter * padapter);
-void BTDM_WifiScanNotify(struct rtw_adapter * padapter, u8 scanType);
-void BTDM_WifiAssociateNotify(struct rtw_adapter * padapter, u8 action);
-void BTDM_MediaStatusNotify(struct rtw_adapter * padapter, enum rt_media_status mstatus);
-void BTDM_ForDhcp(struct rtw_adapter * padapter);
-void BTDM_ResetActionProfileState(struct rtw_adapter * padapter);
-void BTDM_SetBtCoexCurrAntNum(struct rtw_adapter * padapter, u8 antNum);
+u8 BTDM_IsWifiBusy(struct rtw_adapter *padapter);
+u8 BTDM_IsCoexistStateChanged(struct rtw_adapter *padapter);
+u8 BTDM_IsWifiUplink(struct rtw_adapter *padapter);
+u8 BTDM_IsWifiDownlink(struct rtw_adapter *padapter);
+u8 BTDM_IsBTHSMode(struct rtw_adapter *padapter);
+u8 BTDM_IsBTUplink(struct rtw_adapter *padapter);
+u8 BTDM_IsBTDownlink(struct rtw_adapter *padapter);
+void BTDM_AdjustForBtOperation(struct rtw_adapter *padapter);
+void BTDM_ForHalt(struct rtw_adapter *padapter);
+void BTDM_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType);
+void BTDM_WifiAssociateNotify(struct rtw_adapter *padapter, u8 action);
+void BTDM_MediaStatusNotify(struct rtw_adapter *padapter,
+ enum rt_media_status mstatus);
+void BTDM_ForDhcp(struct rtw_adapter *padapter);
+void BTDM_ResetActionProfileState(struct rtw_adapter *padapter);
+void BTDM_SetBtCoexCurrAntNum(struct rtw_adapter *padapter, u8 antNum);
#define BT_SetBtCoexCurrAntNum BTDM_SetBtCoexCurrAntNum
-u8 BTDM_IsActionSCO(struct rtw_adapter * padapter);
-u8 BTDM_IsActionHID(struct rtw_adapter * padapter);
-u8 BTDM_IsActionA2DP(struct rtw_adapter * padapter);
-u8 BTDM_IsActionPAN(struct rtw_adapter * padapter);
-u8 BTDM_IsActionHIDA2DP(struct rtw_adapter * padapter);
-u8 BTDM_IsActionHIDPAN(struct rtw_adapter * padapter);
-u8 BTDM_IsActionPANA2DP(struct rtw_adapter * padapter);
-u32 BTDM_BtTxRxCounterH(struct rtw_adapter * padapter);
-u32 BTDM_BtTxRxCounterL(struct rtw_adapter * padapter);
+u8 BTDM_IsActionSCO(struct rtw_adapter *padapter);
+u8 BTDM_IsActionHID(struct rtw_adapter *padapter);
+u8 BTDM_IsActionA2DP(struct rtw_adapter *padapter);
+u8 BTDM_IsActionPAN(struct rtw_adapter *padapter);
+u8 BTDM_IsActionHIDA2DP(struct rtw_adapter *padapter);
+u8 BTDM_IsActionHIDPAN(struct rtw_adapter *padapter);
+u8 BTDM_IsActionPANA2DP(struct rtw_adapter *padapter);
+u32 BTDM_BtTxRxCounterH(struct rtw_adapter *padapter);
+u32 BTDM_BtTxRxCounterL(struct rtw_adapter *padapter);
/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtCoexist.h ===== */
@@ -1593,14 +1606,14 @@ u32 BTDM_BtTxRxCounterL(struct rtw_adapter * padapter);
#define RTS_CTS_NO_LEN_LIMIT 0
-u8 HALBT_GetPGAntNum(struct rtw_adapter * padapter);
+u8 HALBT_GetPGAntNum(struct rtw_adapter *padapter);
#define BT_GetPGAntNum HALBT_GetPGAntNum
-void HALBT_SetKey(struct rtw_adapter * padapter, u8 EntryNum);
-void HALBT_RemoveKey(struct rtw_adapter * padapter, u8 EntryNum);
-u8 HALBT_IsBTExist(struct rtw_adapter * padapter);
+void HALBT_SetKey(struct rtw_adapter *padapter, u8 EntryNum);
+void HALBT_RemoveKey(struct rtw_adapter *padapter, u8 EntryNum);
+u8 HALBT_IsBTExist(struct rtw_adapter *padapter);
#define BT_IsBtExist HALBT_IsBTExist
-u8 HALBT_BTChipType(struct rtw_adapter * padapter);
-void HALBT_SetRtsCtsNoLenLimit(struct rtw_adapter * padapter);
+u8 HALBT_BTChipType(struct rtw_adapter *padapter);
+void HALBT_SetRtsCtsNoLenLimit(struct rtw_adapter *padapter);
/* ===== End of sync from SD7 driver HAL/HalBT.c ===== */
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_recv.h b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
index 0177bbc1c1cf..875d37b3b94c 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_recv.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
@@ -56,8 +56,8 @@ struct interrupt_msg_format {
unsigned int MSG_EX;
};
-int rtl8723au_init_recv_priv(struct rtw_adapter * padapter);
-void rtl8723au_free_recv_priv(struct rtw_adapter * padapter);
+int rtl8723au_init_recv_priv(struct rtw_adapter *padapter);
+void rtl8723au_free_recv_priv(struct rtw_adapter *padapter);
void rtl8723a_process_phy_info(struct rtw_adapter *padapter, void *prframe);
void update_recvframe_attrib(struct recv_frame *precvframe, struct recv_stat *prxstat);
void update_recvframe_phyinfo(struct recv_frame *precvframe, struct phy_stat *pphy_info);
diff --git a/drivers/staging/rtl8723au/include/rtw_cmd.h b/drivers/staging/rtl8723au/include/rtw_cmd.h
index 71044107d13b..775dcdc1e7b9 100644
--- a/drivers/staging/rtl8723au/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723au/include/rtw_cmd.h
@@ -99,7 +99,6 @@ int rtw_init_cmd_priv23a(struct cmd_priv *pcmdpriv);
u32 rtw_init_evt_priv23a (struct evt_priv *pevtpriv);
void rtw_free_evt_priv23a (struct evt_priv *pevtpriv);
-void rtw_cmd_clr_isr23a(struct cmd_priv *pcmdpriv);
void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
enum rtw_drvextra_cmd_id
@@ -689,10 +688,10 @@ int rtw_disassoc_cmd23a(struct rtw_adapter *padapter, u32 deauth_timeout_ms, boo
int rtw_setopmode_cmd23a(struct rtw_adapter *padapter, enum nl80211_iftype ifmode);
int rtw_setdatarate_cmd(struct rtw_adapter *padapter, u8 *rateset);
int rtw_setbasicrate_cmd(struct rtw_adapter *padapter, u8 *rateset);
-int rtw_setbbreg_cmd(struct rtw_adapter * padapter, u8 offset, u8 val);
-int rtw_setrfreg_cmd(struct rtw_adapter * padapter, u8 offset, u32 val);
-int rtw_getbbreg_cmd(struct rtw_adapter * padapter, u8 offset, u8 * pval);
-int rtw_getrfreg_cmd(struct rtw_adapter * padapter, u8 offset, u8 * pval);
+int rtw_setbbreg_cmd(struct rtw_adapter *padapter, u8 offset, u8 val);
+int rtw_setrfreg_cmd(struct rtw_adapter *padapter, u8 offset, u32 val);
+int rtw_getbbreg_cmd(struct rtw_adapter *padapter, u8 offset, u8 *pval);
+int rtw_getrfreg_cmd(struct rtw_adapter *padapter, u8 offset, u8 *pval);
int rtw_setrfintfs_cmd(struct rtw_adapter *padapter, u8 mode);
int rtw_setrttbl_cmd(struct rtw_adapter *padapter, struct setratable_parm *prate_table);
int rtw_getrttbl_cmd(struct rtw_adapter *padapter, struct getratable_rsp *pval);
@@ -713,7 +712,6 @@ int rtw_ps_cmd23a(struct rtw_adapter*padapter);
int rtw_chk_hi_queue_cmd23a(struct rtw_adapter*padapter);
#endif
-int rtw_set_ch_cmd23a(struct rtw_adapter*padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue);
int rtw_set_chplan_cmd(struct rtw_adapter*padapter, u8 chplan, u8 enqueue);
int rtw_led_blink_cmd(struct rtw_adapter*padapter, struct led_8723a *pLed);
int rtw_set_csa_cmd(struct rtw_adapter*padapter, u8 new_ch_no);
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
index 51dba1fa4c5d..ffb37b252fc1 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
@@ -509,7 +509,7 @@ int rtw_check_bcn_info23a(struct rtw_adapter *Adapter,
struct ieee80211_mgmt *mgmt, u32 packet_len);
void update_IOT_info23a(struct rtw_adapter *padapter);
void update_capinfo23a(struct rtw_adapter *Adapter, u16 updateCap);
-void update_wireless_mode23a(struct rtw_adapter * padapter);
+void update_wireless_mode23a(struct rtw_adapter *padapter);
void update_tx_basic_rate23a(struct rtw_adapter *padapter, u8 modulation);
void update_bmc_sta_support_rate23a(struct rtw_adapter *padapter, u32 mac_id);
int update_sta_support_rate23a(struct rtw_adapter *padapter, u8 *pvar_ie,
diff --git a/drivers/staging/rtl8723au/include/wifi.h b/drivers/staging/rtl8723au/include/wifi.h
index fd3da3b5cf31..25d573c3e232 100644
--- a/drivers/staging/rtl8723au/include/wifi.h
+++ b/drivers/staging/rtl8723au/include/wifi.h
@@ -26,9 +26,9 @@
------------------------------------------------------------------------------*/
struct AC_param {
- unsigned char ACI_AIFSN;
- unsigned char CW;
- unsigned short TXOP_limit;
+ u8 ACI_AIFSN;
+ u8 CW;
+ __le16 TXOP_limit;
} __packed;
struct WMM_para_element {
@@ -38,10 +38,10 @@ struct WMM_para_element {
} __packed;
struct ADDBA_request {
- unsigned char dialog_token;
- unsigned short BA_para_set;
- unsigned short BA_timeout_value;
- unsigned short BA_starting_seqctrl;
+ u8 dialog_token;
+ __le16 BA_para_set;
+ __le16 BA_timeout_value;
+ __le16 BA_starting_seqctrl;
} __packed;
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 82a8c06ab347..537bd8214efe 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -1091,17 +1091,17 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
goto exit;
}
- sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
sinfo->signal = translate_percentage_to_dbm(padapter->recvpriv.
signal_strength);
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy = rtw_get_cur_max_rate(padapter);
- sinfo->filled |= STATION_INFO_RX_PACKETS;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
sinfo->rx_packets = sta_rx_data_pkts(psta);
- sinfo->filled |= STATION_INFO_TX_PACKETS;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = psta->sta_stats.tx_pkts;
}
@@ -1468,9 +1468,8 @@ static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv,
return 0;
}
- if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) {
+ if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPAPSK;
- }
/*
if (wpa_version & NL80211_WPA_VERSION_2)
@@ -2363,7 +2362,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
ie_offset = offsetof(struct ieee80211_mgmt,
u.reassoc_req.variable);
- sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ sinfo.filled = 0;
sinfo.assoc_req_ies = pmgmt_frame + ie_offset;
sinfo.assoc_req_ies_len = frame_len - ie_offset;
cfg80211_new_sta(ndev, hdr->addr2, &sinfo, GFP_ATOMIC);
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index 9966d16342b3..1b23eb13222b 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -91,7 +91,7 @@ static int rtw_bt_iso = 2;/* 0:Low, 1:High, 2:From Efuse */
/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy, 5.OtherBusy */
static int rtw_bt_sco = 3;
/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
-static int rtw_bt_ampdu = 1 ;
+static int rtw_bt_ampdu = 1;
#endif
/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 373a617ace54..05755b870a5f 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -80,11 +80,9 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_config_descriptor *pconf_desc;
struct usb_host_interface *phost_iface;
struct usb_interface_descriptor *piface_desc;
- struct usb_host_endpoint *phost_endp;
struct usb_endpoint_descriptor *pendp_desc;
- struct usb_device *pusbd;
- int i;
- int status = _FAIL;
+ struct usb_device *pusbd;
+ int i, status = _FAIL;
pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
if (!pdvobjpriv)
@@ -114,42 +112,38 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
- phost_endp = phost_iface->endpoint + i;
- if (phost_endp) {
- pendp_desc = &phost_endp->desc;
-
- DBG_8723A("\nusb_endpoint_descriptor(%d):\n", i);
- DBG_8723A("bLength =%x\n", pendp_desc->bLength);
- DBG_8723A("bDescriptorType =%x\n",
- pendp_desc->bDescriptorType);
- DBG_8723A("bEndpointAddress =%x\n",
- pendp_desc->bEndpointAddress);
- DBG_8723A("wMaxPacketSize =%d\n",
- le16_to_cpu(pendp_desc->wMaxPacketSize));
- DBG_8723A("bInterval =%x\n", pendp_desc->bInterval);
-
- if (usb_endpoint_is_bulk_in(pendp_desc)) {
- DBG_8723A("usb_endpoint_is_bulk_in = %x\n",
- usb_endpoint_num(pendp_desc));
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
- usb_endpoint_num(pendp_desc);
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_int_in(pendp_desc)) {
- DBG_8723A("usb_endpoint_is_int_in = %x, Interval = %x\n",
- usb_endpoint_num(pendp_desc),
- pendp_desc->bInterval);
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
- usb_endpoint_num(pendp_desc);
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
- DBG_8723A("usb_endpoint_is_bulk_out = %x\n",
- usb_endpoint_num(pendp_desc));
- pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
- usb_endpoint_num(pendp_desc);
- pdvobjpriv->RtNumOutPipes++;
- }
- pdvobjpriv->ep_num[i] = usb_endpoint_num(pendp_desc);
+ pendp_desc = &phost_iface->endpoint[i].desc;
+
+ DBG_8723A("\nusb_endpoint_descriptor(%d):\n", i);
+ DBG_8723A("bLength =%x\n", pendp_desc->bLength);
+ DBG_8723A("bDescriptorType =%x\n", pendp_desc->bDescriptorType);
+ DBG_8723A("bEndpointAddress =%x\n",
+ pendp_desc->bEndpointAddress);
+ DBG_8723A("wMaxPacketSize =%d\n",
+ le16_to_cpu(pendp_desc->wMaxPacketSize));
+ DBG_8723A("bInterval =%x\n", pendp_desc->bInterval);
+
+ if (usb_endpoint_is_bulk_in(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_bulk_in = %x\n",
+ usb_endpoint_num(pendp_desc));
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
+ usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumInPipes++;
+ } else if (usb_endpoint_is_int_in(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_int_in = %x, Interval = "
+ "%x\n", usb_endpoint_num(pendp_desc),
+ pendp_desc->bInterval);
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
+ usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumInPipes++;
+ } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_bulk_out = %x\n",
+ usb_endpoint_num(pendp_desc));
+ pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
+ usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumOutPipes++;
}
+ pdvobjpriv->ep_num[i] = usb_endpoint_num(pendp_desc);
}
DBG_8723A("nr_endpoint =%d, in_num =%d, out_num =%d\n\n",
pdvobjpriv->nr_endpoint, pdvobjpriv->RtNumInPipes,
@@ -273,104 +267,6 @@ static void rtw_dev_unload(struct rtw_adapter *padapter)
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-rtw_dev_unload\n"));
}
-int rtw_hw_suspend23a(struct rtw_adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct net_device *pnetdev = padapter->pnetdev;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if ((!padapter->bup) || (padapter->bDriverStopped) ||
- (padapter->bSurpriseRemoved)) {
- DBG_8723A("padapter->bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n",
- padapter->bup, padapter->bDriverStopped,
- padapter->bSurpriseRemoved);
- goto error_exit;
- }
-
- if (padapter) { /* system suspend */
- LeaveAllPowerSaveMode23a(padapter);
-
- DBG_8723A("==> rtw_hw_suspend23a\n");
- down(&pwrpriv->lock);
- pwrpriv->bips_processing = true;
- /* padapter->net_closed = true; */
- /* s1. */
- if (pnetdev) {
- netif_carrier_off(pnetdev);
- netif_tx_stop_all_queues(pnetdev);
- }
-
- /* s2. */
- rtw_disassoc_cmd23a(padapter, 500, false);
-
- /* s2-2. indicate disconnect to os */
- /* rtw_indicate_disconnect23a(padapter); */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- _clr_fwstate_(pmlmepriv, _FW_LINKED);
-
- rtw_os_indicate_disconnect23a(padapter);
-
- /* donnot enqueue cmd */
- rtw_lps_ctrl_wk_cmd23a(padapter,
- LPS_CTRL_DISCONNECT, 0);
- }
- /* s2-3. */
- rtw_free_assoc_resources23a(padapter, 1);
-
- /* s2-4. */
- rtw_free_network_queue23a(padapter);
- rtw_ips_dev_unload23a(padapter);
- pwrpriv->rf_pwrstate = rf_off;
- pwrpriv->bips_processing = false;
- up(&pwrpriv->lock);
- } else {
- goto error_exit;
- }
- return 0;
-error_exit:
- DBG_8723A("%s, failed\n", __func__);
- return -1;
-}
-
-int rtw_hw_resume23a(struct rtw_adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct net_device *pnetdev = padapter->pnetdev;
-
- if (padapter) { /* system resume */
- DBG_8723A("==> rtw_hw_resume23a\n");
- down(&pwrpriv->lock);
- pwrpriv->bips_processing = true;
- rtw_reset_drv_sw23a(padapter);
-
- if (pm_netdev_open23a(pnetdev, false)) {
- up(&pwrpriv->lock);
- goto error_exit;
- }
-
- netif_device_attach(pnetdev);
- netif_carrier_on(pnetdev);
-
- if (!rtw_netif_queue_stopped(pnetdev))
- netif_tx_start_all_queues(pnetdev);
- else
- netif_tx_wake_all_queues(pnetdev);
-
- pwrpriv->bkeepfwalive = false;
-
- pwrpriv->rf_pwrstate = rf_on;
- pwrpriv->bips_processing = false;
-
- up(&pwrpriv->lock);
- } else {
- goto error_exit;
- }
- return 0;
-error_exit:
- DBG_8723A("%s, Open net dev failed\n", __func__);
- return -1;
-}
-
static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
{
struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index b4612fb615f6..a47a19135d49 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -781,7 +781,7 @@ static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
buf[4] = 0;
buf[5] = 0;
- retval = ms_write_bytes(chip, PRO_WRITE_REG , 6, NO_WAIT_INT, buf, 6);
+ retval = ms_write_bytes(chip, PRO_WRITE_REG, 6, NO_WAIT_INT, buf, 6);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
@@ -1291,7 +1291,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip,
for (i = 6; i < MS_EXTRA_SIZE + 6; i++)
data[i] = buf[i - 6];
- retval = ms_write_bytes(chip, WRITE_REG , (6+MS_EXTRA_SIZE),
+ retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE),
NO_WAIT_INT, data, 16);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
@@ -1342,7 +1342,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
data[4] = 0x20;
data[5] = page_num;
- retval = ms_write_bytes(chip, WRITE_REG , 6, NO_WAIT_INT, data, 6);
+ retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
@@ -1619,7 +1619,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
data[4] = 0x20;
data[5] = i;
- retval = ms_write_bytes(chip, WRITE_REG , 6, NO_WAIT_INT,
+ retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
data, 6);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
@@ -1695,7 +1695,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, (6+MS_EXTRA_SIZE));
+ MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE));
ms_set_err_code(chip, MS_NO_ERROR);
@@ -1988,7 +1988,7 @@ RE_SEARCH:
RTSX_WRITE_REG(chip, PPBUF_BASE2, 0xFF, 0x88);
RTSX_WRITE_REG(chip, PPBUF_BASE2 + 1, 0xFF, 0);
- retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG , 1,
+ retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1,
NO_WAIT_INT);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 756a9687c293..dab1995d1a6a 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -271,7 +271,7 @@ int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, timeout * HZ / 1000);
+ &trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
@@ -431,7 +431,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, timeout * HZ / 1000);
+ &trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
@@ -455,7 +455,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, timeout * HZ / 1000);
+ &trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
@@ -575,7 +575,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, timeout * HZ / 1000);
+ &trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
@@ -602,7 +602,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, timeout * HZ / 1000);
+ &trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
@@ -688,7 +688,7 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, timeout * HZ / 1000);
+ &trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c
index 66261ab25c88..9bd69ce3be00 100644
--- a/drivers/staging/skein/skein_block.c
+++ b/drivers/staging/skein/skein_block.c
@@ -82,10 +82,7 @@ do { \
} while (0)
#else
/* looping version */
-#define R256(p0, p1, p2, p3, ROT, r_num) \
-do { \
- ROUND256(p0, p1, p2, p3, ROT, r_num); \
-} while (0)
+#define R256(p0, p1, p2, p3, ROT, r_num) ROUND256(p0, p1, p2, p3, ROT, r_num)
#define I256(R) \
do { \
@@ -174,9 +171,7 @@ do { \
#else /* looping version */
#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
-do { \
- ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num); \
-} while (0)
+ ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
#define I512(R) \
do { \
@@ -263,10 +258,8 @@ do { \
#if SKEIN_UNROLL_1024 == 0
#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
ROT, rn) \
-do { \
ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, rn); \
-} while (0)
+ pF, ROT, rn) \
#define I1024(R) \
do { \
@@ -291,10 +284,8 @@ do { \
#else /* looping version */
#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
ROT, rn) \
-do { \
ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, rn); \
-} while (0)
+ pF, ROT, rn) \
#define I1024(R) \
do { \
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
index 85bd7d0168b0..899078f1b8bc 100644
--- a/drivers/staging/skein/skein_generic.c
+++ b/drivers/staging/skein/skein_generic.c
@@ -191,7 +191,6 @@ static int __init skein_generic_init(void)
return 0;
-
unreg512:
crypto_unregister_shash(&alg512);
unreg256:
diff --git a/drivers/staging/sm7xxfb/Kconfig b/drivers/staging/sm7xxfb/Kconfig
new file mode 100644
index 000000000000..e2922ae3a3ee
--- /dev/null
+++ b/drivers/staging/sm7xxfb/Kconfig
@@ -0,0 +1,13 @@
+config FB_SM7XX
+ tristate "Silicon Motion SM7XX framebuffer support"
+ depends on FB && PCI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Frame buffer driver for the Silicon Motion SM710, SM712, SM721
+ and SM722 chips.
+
+ This driver is also available as a module. The module will be
+ called sm7xxfb. If you want to compile it as a module, say M
+ here and read <file:Documentation/kbuild/modules.txt>.
diff --git a/drivers/staging/sm7xxfb/Makefile b/drivers/staging/sm7xxfb/Makefile
new file mode 100644
index 000000000000..48f471cf9f36
--- /dev/null
+++ b/drivers/staging/sm7xxfb/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_FB_SM7XX) += sm7xxfb.o
diff --git a/drivers/staging/sm7xxfb/TODO b/drivers/staging/sm7xxfb/TODO
new file mode 100644
index 000000000000..7cb0b242f204
--- /dev/null
+++ b/drivers/staging/sm7xxfb/TODO
@@ -0,0 +1,12 @@
+TODO:
+- Dual head support
+- 2D acceleration support
+- use kernel coding style
+- refine the code and remove unused code
+- move it to drivers/video/fbdev/sm7xxfb.c
+
+Please send any patches to
+ Greg Kroah-Hartman <greg@kroah.com>
+ Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+ Teddy Wang <teddy.wang@siliconmotion.com>
+ Sudip Mukherjee <sudip@vectorindia.org>
diff --git a/drivers/staging/sm7xxfb/sm7xx.h b/drivers/staging/sm7xxfb/sm7xx.h
new file mode 100644
index 000000000000..7cc1896938b6
--- /dev/null
+++ b/drivers/staging/sm7xxfb/sm7xx.h
@@ -0,0 +1,779 @@
+/*
+ * Silicon Motion SM712 frame buffer device
+ *
+ * Copyright (C) 2006 Silicon Motion Technology Corp.
+ * Authors: Ge Wang, gewang@siliconmotion.com
+ * Boyod boyod.yang@siliconmotion.com.cn
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#define NR_PALETTE 256
+
+#define FB_ACCEL_SMI_LYNX 88
+
+#define SCREEN_X_RES 1024
+#define SCREEN_Y_RES 600
+#define SCREEN_BPP 16
+
+/*Assume SM712 graphics chip has 4MB VRAM */
+#define SM712_VIDEOMEMORYSIZE 0x00400000
+/*Assume SM722 graphics chip has 8MB VRAM */
+#define SM722_VIDEOMEMORYSIZE 0x00800000
+
+#define dac_reg (0x3c8)
+#define dac_val (0x3c9)
+
+extern void __iomem *smtc_regbaseaddress;
+#define smtc_mmiowb(dat, reg) writeb(dat, smtc_regbaseaddress + reg)
+#define smtc_mmioww(dat, reg) writew(dat, smtc_regbaseaddress + reg)
+#define smtc_mmiowl(dat, reg) writel(dat, smtc_regbaseaddress + reg)
+
+#define smtc_mmiorb(reg) readb(smtc_regbaseaddress + reg)
+#define smtc_mmiorw(reg) readw(smtc_regbaseaddress + reg)
+#define smtc_mmiorl(reg) readl(smtc_regbaseaddress + reg)
+
+#define SIZE_SR00_SR04 (0x04 - 0x00 + 1)
+#define SIZE_SR10_SR24 (0x24 - 0x10 + 1)
+#define SIZE_SR30_SR75 (0x75 - 0x30 + 1)
+#define SIZE_SR80_SR93 (0x93 - 0x80 + 1)
+#define SIZE_SRA0_SRAF (0xAF - 0xA0 + 1)
+#define SIZE_GR00_GR08 (0x08 - 0x00 + 1)
+#define SIZE_AR00_AR14 (0x14 - 0x00 + 1)
+#define SIZE_CR00_CR18 (0x18 - 0x00 + 1)
+#define SIZE_CR30_CR4D (0x4D - 0x30 + 1)
+#define SIZE_CR90_CRA7 (0xA7 - 0x90 + 1)
+#define SIZE_VPR (0x6C + 1)
+#define SIZE_DPR (0x44 + 1)
+
+static inline void smtc_crtcw(int reg, int val)
+{
+ smtc_mmiowb(reg, 0x3d4);
+ smtc_mmiowb(val, 0x3d5);
+}
+
+static inline unsigned int smtc_crtcr(int reg)
+{
+ smtc_mmiowb(reg, 0x3d4);
+ return smtc_mmiorb(0x3d5);
+}
+
+static inline void smtc_grphw(int reg, int val)
+{
+ smtc_mmiowb(reg, 0x3ce);
+ smtc_mmiowb(val, 0x3cf);
+}
+
+static inline unsigned int smtc_grphr(int reg)
+{
+ smtc_mmiowb(reg, 0x3ce);
+ return smtc_mmiorb(0x3cf);
+}
+
+static inline void smtc_attrw(int reg, int val)
+{
+ smtc_mmiorb(0x3da);
+ smtc_mmiowb(reg, 0x3c0);
+ smtc_mmiorb(0x3c1);
+ smtc_mmiowb(val, 0x3c0);
+}
+
+static inline void smtc_seqw(int reg, int val)
+{
+ smtc_mmiowb(reg, 0x3c4);
+ smtc_mmiowb(val, 0x3c5);
+}
+
+static inline unsigned int smtc_seqr(int reg)
+{
+ smtc_mmiowb(reg, 0x3c4);
+ return smtc_mmiorb(0x3c5);
+}
+
+/* The next structure holds all information relevant for a specific video mode.
+ */
+
+struct ModeInit {
+ int mmsizex;
+ int mmsizey;
+ int bpp;
+ int hz;
+ unsigned char init_misc;
+ unsigned char init_sr00_sr04[SIZE_SR00_SR04];
+ unsigned char init_sr10_sr24[SIZE_SR10_SR24];
+ unsigned char init_sr30_sr75[SIZE_SR30_SR75];
+ unsigned char init_sr80_sr93[SIZE_SR80_SR93];
+ unsigned char init_sra0_sraf[SIZE_SRA0_SRAF];
+ unsigned char init_gr00_gr08[SIZE_GR00_GR08];
+ unsigned char init_ar00_ar14[SIZE_AR00_AR14];
+ unsigned char init_cr00_cr18[SIZE_CR00_CR18];
+ unsigned char init_cr30_cr4d[SIZE_CR30_CR4D];
+ unsigned char init_cr90_cra7[SIZE_CR90_CRA7];
+};
+
+/**********************************************************************
+ SM712 Mode table.
+ **********************************************************************/
+struct ModeInit vgamode[] = {
+ {
+ /* mode#0: 640 x 480 16Bpp 60Hz */
+ 640, 480, 16, 60,
+ /* Init_MISC */
+ 0xE3,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x00, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+ 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+ 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+ 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+ 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+ 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+ 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+ 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+ 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+ },
+ },
+ {
+ /* mode#1: 640 x 480 24Bpp 60Hz */
+ 640, 480, 24, 60,
+ /* Init_MISC */
+ 0xE3,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x00, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+ 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+ 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+ 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+ 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+ 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+ 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+ 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+ 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+ },
+ },
+ {
+ /* mode#0: 640 x 480 32Bpp 60Hz */
+ 640, 480, 32, 60,
+ /* Init_MISC */
+ 0xE3,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x00, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+ 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+ 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+ 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+ 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+ 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+ 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+ 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+ 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+ },
+ },
+
+ { /* mode#2: 800 x 600 16Bpp 60Hz */
+ 800, 600, 16, 60,
+ /* Init_MISC */
+ 0x2B,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
+ 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
+ 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+ 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
+ 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+ 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+ 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+ 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+ 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+ 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+ },
+ },
+ { /* mode#3: 800 x 600 24Bpp 60Hz */
+ 800, 600, 24, 60,
+ 0x2B,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x36, 0x03, 0x20, 0x09, 0xC0, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x36, 0x36, 0x36,
+ 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+ 0x04, 0x55, 0x59, 0x36, 0x36, 0x00, 0x00, 0x36,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+ 0x02, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x36,
+ 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x36, 0x36,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+ 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+ 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+ 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+ 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+ },
+ },
+ { /* mode#7: 800 x 600 32Bpp 60Hz */
+ 800, 600, 32, 60,
+ /* Init_MISC */
+ 0x2B,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
+ 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
+ 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+ 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
+ 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+ 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+ 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+ 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+ 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+ 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+ },
+ },
+ /* We use 1024x768 table to light 1024x600 panel for lemote */
+ { /* mode#4: 1024 x 600 16Bpp 60Hz */
+ 1024, 600, 16, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x00, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xC8, 0x40, 0x14, 0x60, 0x00, 0x0A, 0x17, 0x20,
+ 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x00, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x22, 0x03, 0x24, 0x09, 0xC0, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x22, 0x22, 0x22,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x22, 0x22, 0x00, 0x00, 0x22,
+ 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x16, 0x02, 0x0D, 0x82, 0x09, 0x02,
+ 0x04, 0x45, 0x3F, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x82, 0x0b, 0x6f, 0x57, 0x00,
+ 0x5c, 0x0f, 0xE0, 0xe0, 0x7F, 0x57,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+ { /* mode#5: 1024 x 768 24Bpp 60Hz */
+ 1024, 768, 24, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+ { /* mode#4: 1024 x 768 32Bpp 60Hz */
+ 1024, 768, 32, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x32, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+ { /* mode#6: 320 x 240 16Bpp 60Hz */
+ 320, 240, 16, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x32, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
+ 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+
+ { /* mode#8: 320 x 240 32Bpp 60Hz */
+ 320, 240, 32, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x32, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
+ 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+};
+
+#define numvgamodes ARRAY_SIZE(vgamode)
diff --git a/drivers/staging/sm7xxfb/sm7xxfb.c b/drivers/staging/sm7xxfb/sm7xxfb.c
new file mode 100644
index 000000000000..ebd95365ffae
--- /dev/null
+++ b/drivers/staging/sm7xxfb/sm7xxfb.c
@@ -0,0 +1,1024 @@
+/*
+ * Silicon Motion SM7XX frame buffer device
+ *
+ * Copyright (C) 2006 Silicon Motion Technology Corp.
+ * Authors: Ge Wang, gewang@siliconmotion.com
+ * Boyod boyod.yang@siliconmotion.com.cn
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ *
+ * Copyright (C) 2011 Igalia, S.L.
+ * Author: Javier M. Mellid <jmunhoz@igalia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Framebuffer driver for Silicon Motion SM710, SM712, SM721 and SM722 chips
+ */
+
+#include <linux/io.h>
+#include <linux/fb.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+
+#ifdef CONFIG_PM
+#include <linux/pm.h>
+#endif
+
+#include "sm7xx.h"
+
+/*
+* Private structure
+*/
+struct smtcfb_info {
+ struct pci_dev *pdev;
+ struct fb_info fb;
+ u16 chip_id;
+ u8 chip_rev_id;
+
+ void __iomem *lfb; /* linear frame buffer */
+ void __iomem *dp_regs; /* drawing processor control regs */
+ void __iomem *vp_regs; /* video processor control regs */
+ void __iomem *cp_regs; /* capture processor control regs */
+ void __iomem *mmio; /* memory map IO port */
+
+ u_int width;
+ u_int height;
+ u_int hz;
+
+ u32 colreg[17];
+};
+
+void __iomem *smtc_regbaseaddress; /* Memory Map IO starting address */
+
+static struct fb_var_screeninfo smtcfb_var = {
+ .xres = 1024,
+ .yres = 600,
+ .xres_virtual = 1024,
+ .yres_virtual = 600,
+ .bits_per_pixel = 16,
+ .red = {16, 8, 0},
+ .green = {8, 8, 0},
+ .blue = {0, 8, 0},
+ .activate = FB_ACTIVATE_NOW,
+ .height = -1,
+ .width = -1,
+ .vmode = FB_VMODE_NONINTERLACED,
+ .nonstd = 0,
+ .accel_flags = FB_ACCELF_TEXT,
+};
+
+static struct fb_fix_screeninfo smtcfb_fix = {
+ .id = "smXXXfb",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .line_length = 800 * 3,
+ .accel = FB_ACCEL_SMI_LYNX,
+ .type_aux = 0,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+};
+
+struct vesa_mode {
+ char index[6];
+ u16 lfb_width;
+ u16 lfb_height;
+ u16 lfb_depth;
+};
+
+static struct vesa_mode vesa_mode_table[] = {
+ {"0x301", 640, 480, 8},
+ {"0x303", 800, 600, 8},
+ {"0x305", 1024, 768, 8},
+ {"0x307", 1280, 1024, 8},
+
+ {"0x311", 640, 480, 16},
+ {"0x314", 800, 600, 16},
+ {"0x317", 1024, 768, 16},
+ {"0x31A", 1280, 1024, 16},
+
+ {"0x312", 640, 480, 24},
+ {"0x315", 800, 600, 24},
+ {"0x318", 1024, 768, 24},
+ {"0x31B", 1280, 1024, 24},
+};
+
+static struct screen_info smtc_scr_info;
+
+/* process command line options, get vga parameter */
+static int __init sm7xx_vga_setup(char *options)
+{
+ int i;
+
+ if (!options || !*options)
+ return -EINVAL;
+
+ smtc_scr_info.lfb_width = 0;
+ smtc_scr_info.lfb_height = 0;
+ smtc_scr_info.lfb_depth = 0;
+
+ pr_debug("sm7xx_vga_setup = %s\n", options);
+
+ for (i = 0; i < ARRAY_SIZE(vesa_mode_table); i++) {
+ if (strstr(options, vesa_mode_table[i].index)) {
+ smtc_scr_info.lfb_width = vesa_mode_table[i].lfb_width;
+ smtc_scr_info.lfb_height =
+ vesa_mode_table[i].lfb_height;
+ smtc_scr_info.lfb_depth = vesa_mode_table[i].lfb_depth;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+__setup("vga=", sm7xx_vga_setup);
+
+static void sm712_setpalette(int regno, unsigned red, unsigned green,
+ unsigned blue, struct fb_info *info)
+{
+ /* set bit 5:4 = 01 (write LCD RAM only) */
+ smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10);
+
+ smtc_mmiowb(regno, dac_reg);
+ smtc_mmiowb(red >> 10, dac_val);
+ smtc_mmiowb(green >> 10, dac_val);
+ smtc_mmiowb(blue >> 10, dac_val);
+}
+
+/* chan_to_field
+ *
+ * convert a colour value into a field position
+ *
+ * from pxafb.c
+ */
+
+static inline unsigned int chan_to_field(unsigned int chan,
+ struct fb_bitfield *bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf->length;
+ return chan << bf->offset;
+}
+
+static int smtc_blank(int blank_mode, struct fb_info *info)
+{
+ /* clear DPMS setting */
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ /* Screen On: HSync: On, VSync : On */
+ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
+ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
+ break;
+ case FB_BLANK_NORMAL:
+ /* Screen Off: HSync: On, VSync : On Soft blank */
+ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ /* Screen On: HSync: On, VSync : Off */
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ /* Screen On: HSync: Off, VSync : On */
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ break;
+ case FB_BLANK_POWERDOWN:
+ /* Screen On: HSync: Off, VSync : Off */
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned trans, struct fb_info *info)
+{
+ struct smtcfb_info *sfb;
+ u32 val;
+
+ sfb = info->par;
+
+ if (regno > 255)
+ return 1;
+
+ switch (sfb->fb.fix.visual) {
+ case FB_VISUAL_DIRECTCOLOR:
+ case FB_VISUAL_TRUECOLOR:
+ /*
+ * 16/32 bit true-colour, use pseudo-palette for 16 base color
+ */
+ if (regno < 16) {
+ if (sfb->fb.var.bits_per_pixel == 16) {
+ u32 *pal = sfb->fb.pseudo_palette;
+
+ val = chan_to_field(red, &sfb->fb.var.red);
+ val |= chan_to_field(green, &sfb->fb.var.green);
+ val |= chan_to_field(blue, &sfb->fb.var.blue);
+#ifdef __BIG_ENDIAN
+ pal[regno] =
+ ((red & 0xf800) >> 8) |
+ ((green & 0xe000) >> 13) |
+ ((green & 0x1c00) << 3) |
+ ((blue & 0xf800) >> 3);
+#else
+ pal[regno] = val;
+#endif
+ } else {
+ u32 *pal = sfb->fb.pseudo_palette;
+
+ val = chan_to_field(red, &sfb->fb.var.red);
+ val |= chan_to_field(green, &sfb->fb.var.green);
+ val |= chan_to_field(blue, &sfb->fb.var.blue);
+#ifdef __BIG_ENDIAN
+ val =
+ (val & 0xff00ff00 >> 8) |
+ (val & 0x00ff00ff << 8);
+#endif
+ pal[regno] = val;
+ }
+ }
+ break;
+
+ case FB_VISUAL_PSEUDOCOLOR:
+ /* color depth 8 bit */
+ sm712_setpalette(regno, red, green, blue, info);
+ break;
+
+ default:
+ return 1; /* unknown type */
+ }
+
+ return 0;
+}
+
+#ifdef __BIG_ENDIAN
+static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, size_t
+ count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+
+ u32 *buffer, *dst;
+ u32 __iomem *src;
+ int c, i, cnt = 0, err = 0;
+ unsigned long total_size;
+
+ if (!info || !info->screen_base)
+ return -ENODEV;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return -EPERM;
+
+ total_size = info->screen_size;
+
+ if (total_size == 0)
+ total_size = info->fix.smem_len;
+
+ if (p >= total_size)
+ return 0;
+
+ if (count >= total_size)
+ count = total_size;
+
+ if (count + p > total_size)
+ count = total_size - p;
+
+ buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ src = (u32 __iomem *) (info->screen_base + p);
+
+ if (info->fbops->fb_sync)
+ info->fbops->fb_sync(info);
+
+ while (count) {
+ c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+ dst = buffer;
+ for (i = c >> 2; i--;) {
+ *dst = fb_readl(src++);
+ *dst =
+ (*dst & 0xff00ff00 >> 8) |
+ (*dst & 0x00ff00ff << 8);
+ dst++;
+ }
+ if (c & 3) {
+ u8 *dst8 = (u8 *)dst;
+ u8 __iomem *src8 = (u8 __iomem *)src;
+
+ for (i = c & 3; i--;) {
+ if (i & 1) {
+ *dst8++ = fb_readb(++src8);
+ } else {
+ *dst8++ = fb_readb(--src8);
+ src8 += 2;
+ }
+ }
+ src = (u32 __iomem *)src8;
+ }
+
+ if (copy_to_user(buf, buffer, c)) {
+ err = -EFAULT;
+ break;
+ }
+ *ppos += c;
+ buf += c;
+ cnt += c;
+ count -= c;
+ }
+
+ kfree(buffer);
+
+ return (err) ? err : cnt;
+}
+
+static ssize_t
+smtcfb_write(struct fb_info *info, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned long p = *ppos;
+
+ u32 *buffer, *src;
+ u32 __iomem *dst;
+ int c, i, cnt = 0, err = 0;
+ unsigned long total_size;
+
+ if (!info || !info->screen_base)
+ return -ENODEV;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return -EPERM;
+
+ total_size = info->screen_size;
+
+ if (total_size == 0)
+ total_size = info->fix.smem_len;
+
+ if (p > total_size)
+ return -EFBIG;
+
+ if (count > total_size) {
+ err = -EFBIG;
+ count = total_size;
+ }
+
+ if (count + p > total_size) {
+ if (!err)
+ err = -ENOSPC;
+
+ count = total_size - p;
+ }
+
+ buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ dst = (u32 __iomem *) (info->screen_base + p);
+
+ if (info->fbops->fb_sync)
+ info->fbops->fb_sync(info);
+
+ while (count) {
+ c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+ src = buffer;
+
+ if (copy_from_user(src, buf, c)) {
+ err = -EFAULT;
+ break;
+ }
+
+ for (i = c >> 2; i--;) {
+ fb_writel((*src & 0xff00ff00 >> 8) |
+ (*src & 0x00ff00ff << 8), dst++);
+ src++;
+ }
+ if (c & 3) {
+ u8 *src8 = (u8 *)src;
+ u8 __iomem *dst8 = (u8 __iomem *)dst;
+
+ for (i = c & 3; i--;) {
+ if (i & 1) {
+ fb_writeb(*src8++, ++dst8);
+ } else {
+ fb_writeb(*src8++, --dst8);
+ dst8 += 2;
+ }
+ }
+ dst = (u32 __iomem *)dst8;
+ }
+
+ *ppos += c;
+ buf += c;
+ cnt += c;
+ count -= c;
+ }
+
+ kfree(buffer);
+
+ return (cnt) ? cnt : err;
+}
+#endif /* ! __BIG_ENDIAN */
+
+static void sm7xx_set_timing(struct smtcfb_info *sfb)
+{
+ int i = 0, j = 0;
+ u32 m_nscreenstride;
+
+ dev_dbg(&sfb->pdev->dev,
+ "sfb->width=%d sfb->height=%d sfb->fb.var.bits_per_pixel=%d sfb->hz=%d\n",
+ sfb->width, sfb->height, sfb->fb.var.bits_per_pixel, sfb->hz);
+
+ for (j = 0; j < numvgamodes; j++) {
+ if (vgamode[j].mmsizex == sfb->width &&
+ vgamode[j].mmsizey == sfb->height &&
+ vgamode[j].bpp == sfb->fb.var.bits_per_pixel &&
+ vgamode[j].hz == sfb->hz) {
+ dev_dbg(&sfb->pdev->dev,
+ "vgamode[j].mmsizex=%d vgamode[j].mmSizeY=%d vgamode[j].bpp=%d vgamode[j].hz=%d\n",
+ vgamode[j].mmsizex, vgamode[j].mmsizey,
+ vgamode[j].bpp, vgamode[j].hz);
+
+ dev_dbg(&sfb->pdev->dev, "vgamode index=%d\n", j);
+
+ smtc_mmiowb(0x0, 0x3c6);
+
+ smtc_seqw(0, 0x1);
+
+ smtc_mmiowb(vgamode[j].init_misc, 0x3c2);
+
+ /* init SEQ register SR00 - SR04 */
+ for (i = 0; i < SIZE_SR00_SR04; i++)
+ smtc_seqw(i, vgamode[j].init_sr00_sr04[i]);
+
+ /* init SEQ register SR10 - SR24 */
+ for (i = 0; i < SIZE_SR10_SR24; i++)
+ smtc_seqw(i + 0x10,
+ vgamode[j].init_sr10_sr24[i]);
+
+ /* init SEQ register SR30 - SR75 */
+ for (i = 0; i < SIZE_SR30_SR75; i++)
+ if ((i + 0x30) != 0x62 &&
+ (i + 0x30) != 0x6a &&
+ (i + 0x30) != 0x6b)
+ smtc_seqw(i + 0x30,
+ vgamode[j].init_sr30_sr75[i]);
+
+ /* init SEQ register SR80 - SR93 */
+ for (i = 0; i < SIZE_SR80_SR93; i++)
+ smtc_seqw(i + 0x80,
+ vgamode[j].init_sr80_sr93[i]);
+
+ /* init SEQ register SRA0 - SRAF */
+ for (i = 0; i < SIZE_SRA0_SRAF; i++)
+ smtc_seqw(i + 0xa0,
+ vgamode[j].init_sra0_sraf[i]);
+
+ /* init Graphic register GR00 - GR08 */
+ for (i = 0; i < SIZE_GR00_GR08; i++)
+ smtc_grphw(i, vgamode[j].init_gr00_gr08[i]);
+
+ /* init Attribute register AR00 - AR14 */
+ for (i = 0; i < SIZE_AR00_AR14; i++)
+ smtc_attrw(i, vgamode[j].init_ar00_ar14[i]);
+
+ /* init CRTC register CR00 - CR18 */
+ for (i = 0; i < SIZE_CR00_CR18; i++)
+ smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
+
+ /* init CRTC register CR30 - CR4D */
+ for (i = 0; i < SIZE_CR30_CR4D; i++)
+ smtc_crtcw(i + 0x30,
+ vgamode[j].init_cr30_cr4d[i]);
+
+ /* init CRTC register CR90 - CRA7 */
+ for (i = 0; i < SIZE_CR90_CRA7; i++)
+ smtc_crtcw(i + 0x90,
+ vgamode[j].init_cr90_cra7[i]);
+ }
+ }
+ smtc_mmiowb(0x67, 0x3c2);
+
+ /* set VPR registers */
+ writel(0x0, sfb->vp_regs + 0x0C);
+ writel(0x0, sfb->vp_regs + 0x40);
+
+ /* set data width */
+ m_nscreenstride =
+ (sfb->width * sfb->fb.var.bits_per_pixel) / 64;
+ switch (sfb->fb.var.bits_per_pixel) {
+ case 8:
+ writel(0x0, sfb->vp_regs + 0x0);
+ break;
+ case 16:
+ writel(0x00020000, sfb->vp_regs + 0x0);
+ break;
+ case 24:
+ writel(0x00040000, sfb->vp_regs + 0x0);
+ break;
+ case 32:
+ writel(0x00030000, sfb->vp_regs + 0x0);
+ break;
+ }
+ writel((u32) (((m_nscreenstride + 2) << 16) | m_nscreenstride),
+ sfb->vp_regs + 0x10);
+}
+
+static void smtc_set_timing(struct smtcfb_info *sfb)
+{
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ case 0x720:
+ sm7xx_set_timing(sfb);
+ break;
+ }
+}
+
+static void smtcfb_setmode(struct smtcfb_info *sfb)
+{
+ switch (sfb->fb.var.bits_per_pixel) {
+ case 32:
+ sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ sfb->fb.fix.line_length = sfb->fb.var.xres * 4;
+ sfb->fb.var.red.length = 8;
+ sfb->fb.var.green.length = 8;
+ sfb->fb.var.blue.length = 8;
+ sfb->fb.var.red.offset = 16;
+ sfb->fb.var.green.offset = 8;
+ sfb->fb.var.blue.offset = 0;
+ break;
+ case 24:
+ sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ sfb->fb.fix.line_length = sfb->fb.var.xres * 3;
+ sfb->fb.var.red.length = 8;
+ sfb->fb.var.green.length = 8;
+ sfb->fb.var.blue.length = 8;
+ sfb->fb.var.red.offset = 16;
+ sfb->fb.var.green.offset = 8;
+ sfb->fb.var.blue.offset = 0;
+ break;
+ case 8:
+ sfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ sfb->fb.fix.line_length = sfb->fb.var.xres;
+ sfb->fb.var.red.length = 3;
+ sfb->fb.var.green.length = 3;
+ sfb->fb.var.blue.length = 2;
+ sfb->fb.var.red.offset = 5;
+ sfb->fb.var.green.offset = 2;
+ sfb->fb.var.blue.offset = 0;
+ break;
+ case 16:
+ default:
+ sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ sfb->fb.fix.line_length = sfb->fb.var.xres * 2;
+ sfb->fb.var.red.length = 5;
+ sfb->fb.var.green.length = 6;
+ sfb->fb.var.blue.length = 5;
+ sfb->fb.var.red.offset = 11;
+ sfb->fb.var.green.offset = 5;
+ sfb->fb.var.blue.offset = 0;
+ break;
+ }
+
+ sfb->width = sfb->fb.var.xres;
+ sfb->height = sfb->fb.var.yres;
+ sfb->hz = 60;
+ smtc_set_timing(sfb);
+}
+
+static int smtc_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ /* sanity checks */
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+
+ if (var->yres_virtual < var->yres)
+ var->yres_virtual = var->yres;
+
+ /* set valid default bpp */
+ if ((var->bits_per_pixel != 8) && (var->bits_per_pixel != 16) &&
+ (var->bits_per_pixel != 24) && (var->bits_per_pixel != 32))
+ var->bits_per_pixel = 16;
+
+ return 0;
+}
+
+static int smtc_set_par(struct fb_info *info)
+{
+ smtcfb_setmode(info->par);
+
+ return 0;
+}
+
+static struct fb_ops smtcfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = smtc_check_var,
+ .fb_set_par = smtc_set_par,
+ .fb_setcolreg = smtc_setcolreg,
+ .fb_blank = smtc_blank,
+ .fb_fillrect = cfb_fillrect,
+ .fb_imageblit = cfb_imageblit,
+ .fb_copyarea = cfb_copyarea,
+#ifdef __BIG_ENDIAN
+ .fb_read = smtcfb_read,
+ .fb_write = smtcfb_write,
+#endif
+};
+
+/*
+ * alloc struct smtcfb_info and assign default values
+ */
+static struct smtcfb_info *smtc_alloc_fb_info(struct pci_dev *pdev)
+{
+ struct smtcfb_info *sfb;
+
+ sfb = kzalloc(sizeof(*sfb), GFP_KERNEL);
+
+ if (!sfb)
+ return NULL;
+
+ sfb->pdev = pdev;
+
+ sfb->fb.flags = FBINFO_FLAG_DEFAULT;
+ sfb->fb.fbops = &smtcfb_ops;
+ sfb->fb.fix = smtcfb_fix;
+ sfb->fb.var = smtcfb_var;
+ sfb->fb.pseudo_palette = sfb->colreg;
+ sfb->fb.par = sfb;
+
+ return sfb;
+}
+
+/*
+ * free struct smtcfb_info
+ */
+static void smtc_free_fb_info(struct smtcfb_info *sfb)
+{
+ kfree(sfb);
+}
+
+/*
+ * Unmap in the memory mapped IO registers
+ */
+
+static void smtc_unmap_mmio(struct smtcfb_info *sfb)
+{
+ if (sfb && smtc_regbaseaddress)
+ smtc_regbaseaddress = NULL;
+}
+
+/*
+ * Map in the screen memory
+ */
+
+static int smtc_map_smem(struct smtcfb_info *sfb,
+ struct pci_dev *pdev, u_long smem_len)
+{
+ sfb->fb.fix.smem_start = pci_resource_start(pdev, 0);
+
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 32)
+ sfb->fb.fix.smem_start += 0x800000;
+#endif
+
+ sfb->fb.fix.smem_len = smem_len;
+
+ sfb->fb.screen_base = sfb->lfb;
+
+ if (!sfb->fb.screen_base) {
+ dev_err(&pdev->dev,
+ "%s: unable to map screen memory\n", sfb->fb.fix.id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Unmap in the screen memory
+ *
+ */
+static void smtc_unmap_smem(struct smtcfb_info *sfb)
+{
+ if (sfb && sfb->fb.screen_base) {
+ iounmap(sfb->fb.screen_base);
+ sfb->fb.screen_base = NULL;
+ }
+}
+
+/*
+ * We need to wake up the device and make sure its in linear memory mode.
+ */
+static inline void sm7xx_init_hw(void)
+{
+ outb_p(0x18, 0x3c4);
+ outb_p(0x11, 0x3c5);
+}
+
+static int smtcfb_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct smtcfb_info *sfb;
+ u_long smem_size = 0x00800000; /* default 8MB */
+ int err;
+ unsigned long mmio_base;
+
+ dev_info(&pdev->dev, "Silicon Motion display driver.");
+
+ err = pci_enable_device(pdev); /* enable SMTC chip */
+ if (err)
+ return err;
+
+ sprintf(smtcfb_fix.id, "sm%Xfb", ent->device);
+
+ sfb = smtc_alloc_fb_info(pdev);
+
+ if (!sfb) {
+ err = -ENOMEM;
+ goto failed_free;
+ }
+
+ sfb->chip_id = ent->device;
+
+ pci_set_drvdata(pdev, sfb);
+
+ sm7xx_init_hw();
+
+ /* get mode parameter from smtc_scr_info */
+ if (smtc_scr_info.lfb_width != 0) {
+ sfb->fb.var.xres = smtc_scr_info.lfb_width;
+ sfb->fb.var.yres = smtc_scr_info.lfb_height;
+ sfb->fb.var.bits_per_pixel = smtc_scr_info.lfb_depth;
+ } else {
+ /* default resolution 1024x600 16bit mode */
+ sfb->fb.var.xres = SCREEN_X_RES;
+ sfb->fb.var.yres = SCREEN_Y_RES;
+ sfb->fb.var.bits_per_pixel = SCREEN_BPP;
+ }
+
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 24)
+ sfb->fb.var.bits_per_pixel = (smtc_scr_info.lfb_depth = 32);
+#endif
+ /* Map address and memory detection */
+ mmio_base = pci_resource_start(pdev, 0);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
+
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ sfb->fb.fix.mmio_start = mmio_base + 0x00400000;
+ sfb->fb.fix.mmio_len = 0x00400000;
+ smem_size = SM712_VIDEOMEMORYSIZE;
+#ifdef __BIG_ENDIAN
+ sfb->lfb = ioremap(mmio_base, 0x00c00000);
+#else
+ sfb->lfb = ioremap(mmio_base, 0x00800000);
+#endif
+ sfb->mmio = (smtc_regbaseaddress =
+ sfb->lfb + 0x00700000);
+ sfb->dp_regs = sfb->lfb + 0x00408000;
+ sfb->vp_regs = sfb->lfb + 0x0040c000;
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 32) {
+ sfb->lfb += 0x800000;
+ dev_info(&pdev->dev, "sfb->lfb=%p", sfb->lfb);
+ }
+#endif
+ if (!smtc_regbaseaddress) {
+ dev_err(&pdev->dev,
+ "%s: unable to map memory mapped IO!",
+ sfb->fb.fix.id);
+ err = -ENOMEM;
+ goto failed_fb;
+ }
+
+ /* set MCLK = 14.31818 * (0x16 / 0x2) */
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x62, 0x3e);
+ /* enable PCI burst */
+ smtc_seqw(0x17, 0x20);
+ /* enable word swap */
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 32)
+ smtc_seqw(0x17, 0x30);
+#endif
+ break;
+ case 0x720:
+ sfb->fb.fix.mmio_start = mmio_base;
+ sfb->fb.fix.mmio_len = 0x00200000;
+ smem_size = SM722_VIDEOMEMORYSIZE;
+ sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
+ sfb->lfb = sfb->dp_regs + 0x00200000;
+ sfb->mmio = (smtc_regbaseaddress =
+ sfb->dp_regs + 0x000c0000);
+ sfb->vp_regs = sfb->dp_regs + 0x800;
+
+ smtc_seqw(0x62, 0xff);
+ smtc_seqw(0x6a, 0x0d);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "No valid Silicon Motion display chip was detected!");
+
+ goto failed_fb;
+ }
+
+ /* can support 32 bpp */
+ if (15 == sfb->fb.var.bits_per_pixel)
+ sfb->fb.var.bits_per_pixel = 16;
+
+ sfb->fb.var.xres_virtual = sfb->fb.var.xres;
+ sfb->fb.var.yres_virtual = sfb->fb.var.yres;
+ err = smtc_map_smem(sfb, pdev, smem_size);
+ if (err)
+ goto failed;
+
+ smtcfb_setmode(sfb);
+
+ err = register_framebuffer(&sfb->fb);
+ if (err < 0)
+ goto failed;
+
+ dev_info(&pdev->dev,
+ "Silicon Motion SM%X Rev%X primary display mode %dx%d-%d Init Complete.",
+ sfb->chip_id, sfb->chip_rev_id, sfb->fb.var.xres,
+ sfb->fb.var.yres, sfb->fb.var.bits_per_pixel);
+
+ return 0;
+
+failed:
+ dev_err(&pdev->dev, "Silicon Motion, Inc. primary display init fail.");
+
+ smtc_unmap_smem(sfb);
+ smtc_unmap_mmio(sfb);
+failed_fb:
+ smtc_free_fb_info(sfb);
+
+failed_free:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+/*
+ * 0x710 (LynxEM)
+ * 0x712 (LynxEM+)
+ * 0x720 (Lynx3DM, Lynx3DM+)
+ */
+static const struct pci_device_id smtcfb_pci_table[] = {
+ { PCI_DEVICE(0x126f, 0x710), },
+ { PCI_DEVICE(0x126f, 0x712), },
+ { PCI_DEVICE(0x126f, 0x720), },
+ {0,}
+};
+
+static void smtcfb_pci_remove(struct pci_dev *pdev)
+{
+ struct smtcfb_info *sfb;
+
+ sfb = pci_get_drvdata(pdev);
+ smtc_unmap_smem(sfb);
+ smtc_unmap_mmio(sfb);
+ unregister_framebuffer(&sfb->fb);
+ smtc_free_fb_info(sfb);
+}
+
+#ifdef CONFIG_PM
+static int smtcfb_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct smtcfb_info *sfb;
+
+ sfb = pci_get_drvdata(pdev);
+
+ /* set the hw in sleep mode use external clock and self memory refresh
+ * so that we can turn off internal PLLs later on
+ */
+ smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0));
+ smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7));
+
+ console_lock();
+ fb_set_suspend(&sfb->fb, 1);
+ console_unlock();
+
+ /* additionally turn off all function blocks including internal PLLs */
+ smtc_seqw(0x21, 0xff);
+
+ return 0;
+}
+
+static int smtcfb_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct smtcfb_info *sfb;
+
+ sfb = pci_get_drvdata(pdev);
+
+ /* reinit hardware */
+ sm7xx_init_hw();
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ /* set MCLK = 14.31818 * (0x16 / 0x2) */
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x62, 0x3e);
+ /* enable PCI burst */
+ smtc_seqw(0x17, 0x20);
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 32)
+ smtc_seqw(0x17, 0x30);
+#endif
+ break;
+ case 0x720:
+ smtc_seqw(0x62, 0xff);
+ smtc_seqw(0x6a, 0x0d);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ }
+
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0));
+ smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb));
+
+ smtcfb_setmode(sfb);
+
+ console_lock();
+ fb_set_suspend(&sfb->fb, 0);
+ console_unlock();
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(sm7xx_pm_ops, smtcfb_pci_suspend, smtcfb_pci_resume);
+#define SM7XX_PM_OPS (&sm7xx_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define SM7XX_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+static struct pci_driver smtcfb_driver = {
+ .name = "smtcfb",
+ .id_table = smtcfb_pci_table,
+ .probe = smtcfb_pci_probe,
+ .remove = smtcfb_pci_remove,
+ .driver.pm = SM7XX_PM_OPS,
+};
+
+module_pci_driver(smtcfb_driver);
+
+MODULE_AUTHOR("Siliconmotion ");
+MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/speakup/i18n.h b/drivers/staging/speakup/i18n.h
index 16a0871373d9..326d086f9d5a 100644
--- a/drivers/staging/speakup/i18n.h
+++ b/drivers/staging/speakup/i18n.h
@@ -3,7 +3,7 @@
/* Internationalization declarations */
enum msg_index_t {
- MSG_FIRST_INDEX ,
+ MSG_FIRST_INDEX,
MSG_ANNOUNCEMENTS_START = MSG_FIRST_INDEX,
MSG_BLANK = MSG_ANNOUNCEMENTS_START,
MSG_IAM_ALIVE,
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index b12c76de60b0..3708bc13ae86 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -566,7 +566,7 @@ ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
if (ch >= ' ' && ch < '~')
*cp1++ = ch;
else
- cp1 += sprintf(cp1, "\\""x%02x", ch);
+ cp1 += sprintf(cp1, "\\x%02x", ch);
}
*cp1++ = '"';
*cp1++ = '\n';
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 507fc9a1776e..a0315701c7d9 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -157,7 +157,7 @@ static void __speakup_paste_selection(struct work_struct *work)
pasted += count;
}
remove_wait_queue(&vc->paste_wait, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index 4e059ea78d4c..89592c0b9151 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -325,7 +325,7 @@ static struct synth_settings *synth_interrogate(struct spk_synth *synth)
static int synth_probe(struct spk_synth *synth)
{
- unsigned int port_val = 0;
+ unsigned int port_val = 0;
int i = 0;
struct synth_settings *sp;
@@ -361,7 +361,8 @@ static int synth_probe(struct spk_synth *synth)
port_val &= 0xfbff;
if (port_val != 0x107f) {
pr_info("DoubleTalk PC: not found\n");
- synth_release_region(synth_lpc, SYNTH_IO_EXTENT);
+ if (synth_lpc)
+ synth_release_region(synth_lpc, SYNTH_IO_EXTENT);
return -ENODEV;
}
while (inw_p(synth_lpc) != 0x147f)
@@ -369,7 +370,7 @@ static int synth_probe(struct spk_synth *synth)
sp = synth_interrogate(synth);
pr_info("%s: %03x-%03x, ROM ver %s, s/n %u, driver: %s\n",
synth->long_name, synth_lpc, synth_lpc+SYNTH_IO_EXTENT - 1,
- sp->rom_version, sp->serial_number, synth->version);
+ sp->rom_version, sp->serial_number, synth->version);
synth->alive = 1;
return 0;
}
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index f3aa4239dc68..01eddab93c66 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -30,9 +30,9 @@ struct speakup_info_t speakup_info = {
* must be taken at each kernel->speakup transition and released at
* each corresponding speakup->kernel transition.
*
- * The progression thread only interferes with the speakup machinery through
- * the synth buffer, so only needs to take the lock while tinkering with
- * the buffer.
+ * The progression thread only interferes with the speakup machinery
+ * through the synth buffer, so only needs to take the lock
+ * while tinkering with the buffer.
*
* We use spin_lock/trylock_irqsave and spin_unlock_irqrestore with this
* spinlock because speakup needs to disable the keyboard IRQ.
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
index ac080c9dcf46..19fcb3465509 100644
--- a/drivers/staging/unisys/Kconfig
+++ b/drivers/staging/unisys/Kconfig
@@ -12,7 +12,6 @@ if UNISYSSPAR
source "drivers/staging/unisys/visorutil/Kconfig"
source "drivers/staging/unisys/visorchannel/Kconfig"
source "drivers/staging/unisys/visorchipset/Kconfig"
-source "drivers/staging/unisys/channels/Kconfig"
source "drivers/staging/unisys/uislib/Kconfig"
source "drivers/staging/unisys/virtpci/Kconfig"
source "drivers/staging/unisys/virthba/Kconfig"
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
index b988d6940aae..68b9925e7d5e 100644
--- a/drivers/staging/unisys/Makefile
+++ b/drivers/staging/unisys/Makefile
@@ -4,7 +4,6 @@
obj-$(CONFIG_UNISYS_VISORUTIL) += visorutil/
obj-$(CONFIG_UNISYS_VISORCHANNEL) += visorchannel/
obj-$(CONFIG_UNISYS_VISORCHIPSET) += visorchipset/
-obj-$(CONFIG_UNISYS_CHANNELSTUB) += channels/
obj-$(CONFIG_UNISYS_UISLIB) += uislib/
obj-$(CONFIG_UNISYS_VIRTPCI) += virtpci/
obj-$(CONFIG_UNISYS_VIRTHBA) += virthba/
diff --git a/drivers/staging/unisys/channels/Kconfig b/drivers/staging/unisys/channels/Kconfig
deleted file mode 100644
index 179c6cea2824..000000000000
--- a/drivers/staging/unisys/channels/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Unisys channels configuration
-#
-
-config UNISYS_CHANNELSTUB
- tristate "Unisys channelstub driver"
- depends on UNISYSSPAR && UNISYS_VISORUTIL
- ---help---
- If you say Y here, you will enable the Unisys channels driver.
-
diff --git a/drivers/staging/unisys/channels/Makefile b/drivers/staging/unisys/channels/Makefile
deleted file mode 100644
index adc184206035..000000000000
--- a/drivers/staging/unisys/channels/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for Unisys channelstub
-#
-
-obj-$(CONFIG_UNISYS_CHANNELSTUB) += visorchannelstub.o
-
-visorchannelstub-y := channel.o chanstub.o
-
-ccflags-y += -Idrivers/staging/unisys/include
-ccflags-y += -Idrivers/staging/unisys/common-spar/include
-ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
diff --git a/drivers/staging/unisys/channels/channel.c b/drivers/staging/unisys/channels/channel.c
deleted file mode 100644
index 74cc4d6b515f..000000000000
--- a/drivers/staging/unisys/channels/channel.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#include <linux/kernel.h>
-#ifdef CONFIG_MODVERSIONS
-#include <config/modversions.h>
-#endif
-#include <linux/module.h>
-#include <linux/init.h> /* for module_init and module_exit */
-#include <linux/slab.h> /* for memcpy */
-#include <linux/types.h>
-
-/* Implementation of exported functions for Supervisor channels */
-#include "channel.h"
-
-/*
- * Routine Description:
- * Tries to insert the prebuilt signal pointed to by pSignal into the nth
- * Queue of the Channel pointed to by pChannel
- *
- * Parameters:
- * pChannel: (IN) points to the IO Channel
- * Queue: (IN) nth Queue of the IO Channel
- * pSignal: (IN) pointer to the signal
- *
- * Assumptions:
- * - pChannel, Queue and pSignal are valid.
- * - If insertion fails due to a full queue, the caller will determine the
- * retry policy (e.g. wait & try again, report an error, etc.).
- *
- * Return value:
- * 1 if the insertion succeeds, 0 if the queue was full.
- */
-unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue,
- void *sig)
-{
- void __iomem *psignal;
- unsigned int head, tail, nof;
-
- struct signal_queue_header __iomem *pqhdr =
- (struct signal_queue_header __iomem *)
- ((char __iomem *)ch + readq(&ch->ch_space_offset))
- + queue;
-
- /* capture current head and tail */
- head = readl(&pqhdr->head);
- tail = readl(&pqhdr->tail);
-
- /* queue is full if (head + 1) % n equals tail */
- if (((head + 1) % readl(&pqhdr->max_slots)) == tail) {
- nof = readq(&pqhdr->num_overflows) + 1;
- writeq(nof, &pqhdr->num_overflows);
- return 0;
- }
-
- /* increment the head index */
- head = (head + 1) % readl(&pqhdr->max_slots);
-
- /* copy signal to the head location from the area pointed to
- * by pSignal
- */
- psignal = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
- (head * readl(&pqhdr->signal_size));
- memcpy_toio(psignal, sig, readl(&pqhdr->signal_size));
-
- mb(); /* channel synch */
- writel(head, &pqhdr->head);
-
- writeq(readq(&pqhdr->num_sent) + 1, &pqhdr->num_sent);
- return 1;
-}
-EXPORT_SYMBOL_GPL(spar_signal_insert);
-
-/*
- * Routine Description:
- * Removes one signal from Channel pChannel's nth Queue at the
- * time of the call and copies it into the memory pointed to by
- * pSignal.
- *
- * Parameters:
- * pChannel: (IN) points to the IO Channel
- * Queue: (IN) nth Queue of the IO Channel
- * pSignal: (IN) pointer to where the signals are to be copied
- *
- * Assumptions:
- * - pChannel and Queue are valid.
- * - pSignal points to a memory area large enough to hold queue's SignalSize
- *
- * Return value:
- * 1 if the removal succeeds, 0 if the queue was empty.
- */
-unsigned char
-spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig)
-{
- void __iomem *psource;
- unsigned int head, tail;
- struct signal_queue_header __iomem *pqhdr =
- (struct signal_queue_header __iomem *)((char __iomem *)ch +
- readq(&ch->ch_space_offset)) + queue;
-
- /* capture current head and tail */
- head = readl(&pqhdr->head);
- tail = readl(&pqhdr->tail);
-
- /* queue is empty if the head index equals the tail index */
- if (head == tail) {
- writeq(readq(&pqhdr->num_empty) + 1, &pqhdr->num_empty);
- return 0;
- }
-
- /* advance past the 'empty' front slot */
- tail = (tail + 1) % readl(&pqhdr->max_slots);
-
- /* copy signal from tail location to the area pointed to by pSignal */
- psource = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
- (tail * readl(&pqhdr->signal_size));
- memcpy_fromio(sig, psource, readl(&pqhdr->signal_size));
-
- mb(); /* channel synch */
- writel(tail, &pqhdr->tail);
-
- writeq(readq(&pqhdr->num_received) + 1,
- &pqhdr->num_received);
- return 1;
-}
-EXPORT_SYMBOL_GPL(spar_signal_remove);
-
-/*
- * Routine Description:
- * Removes all signals present in Channel pChannel's nth Queue at the
- * time of the call and copies them into the memory pointed to by
- * pSignal. Returns the # of signals copied as the value of the routine.
- *
- * Parameters:
- * pChannel: (IN) points to the IO Channel
- * Queue: (IN) nth Queue of the IO Channel
- * pSignal: (IN) pointer to where the signals are to be copied
- *
- * Assumptions:
- * - pChannel and Queue are valid.
- * - pSignal points to a memory area large enough to hold Queue's MaxSignals
- * # of signals, each of which is Queue's SignalSize.
- *
- * Return value:
- * # of signals copied.
- */
-unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
- void *sig)
-{
- void *psource;
- unsigned int head, tail, count = 0;
- struct signal_queue_header *pqhdr =
- (struct signal_queue_header *)((char *)ch +
- ch->ch_space_offset) + queue;
-
- /* capture current head and tail */
- head = pqhdr->head;
- tail = pqhdr->tail;
-
- /* queue is empty if the head index equals the tail index */
- if (head == tail)
- return 0;
-
- while (head != tail) {
- /* advance past the 'empty' front slot */
- tail = (tail + 1) % pqhdr->max_slots;
-
- /* copy signal from tail location to the area pointed
- * to by pSignal
- */
- psource =
- (char *)pqhdr + pqhdr->sig_base_offset +
- (tail * pqhdr->signal_size);
- memcpy((char *)sig + (pqhdr->signal_size * count),
- psource, pqhdr->signal_size);
-
- mb(); /* channel synch */
- pqhdr->tail = tail;
-
- count++;
- pqhdr->num_received++;
- }
-
- return count;
-}
-
-/*
- * Routine Description:
- * Determine whether a signal queue is empty.
- *
- * Parameters:
- * pChannel: (IN) points to the IO Channel
- * Queue: (IN) nth Queue of the IO Channel
- *
- * Return value:
- * 1 if the signal queue is empty, 0 otherwise.
- */
-unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
- u32 queue)
-{
- struct signal_queue_header __iomem *pqhdr =
- (struct signal_queue_header __iomem *)((char __iomem *)ch +
- readq(&ch->ch_space_offset)) + queue;
- return readl(&pqhdr->head) == readl(&pqhdr->tail);
-}
-EXPORT_SYMBOL_GPL(spar_signalqueue_empty);
-
diff --git a/drivers/staging/unisys/channels/chanstub.c b/drivers/staging/unisys/channels/chanstub.c
deleted file mode 100644
index b6fd126f16f1..000000000000
--- a/drivers/staging/unisys/channels/chanstub.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#define EXPORT_SYMTAB
-#include <linux/kernel.h>
-#ifdef CONFIG_MODVERSIONS
-#include <config/modversions.h>
-#endif
-#include <linux/module.h>
-#include <linux/init.h> /* for module_init and module_exit */
-#include <linux/slab.h> /* for memcpy */
-#include <linux/types.h>
-
-#include "channel.h"
-#include "chanstub.h"
-#include "timskmod.h"
-#include "version.h"
-
-static __init int
-channel_mod_init(void)
-{
- if (!unisys_spar_platform)
- return -ENODEV;
- return 0;
-}
-
-static __exit void
-channel_mod_exit(void)
-{
-}
-
-unsigned char
-SignalInsert_withLock(struct channel_header __iomem *pChannel, u32 Queue,
- void *pSignal, spinlock_t *lock)
-{
- unsigned char result;
- unsigned long flags;
-
- spin_lock_irqsave(lock, flags);
- result = spar_signal_insert(pChannel, Queue, pSignal);
- spin_unlock_irqrestore(lock, flags);
- return result;
-}
-
-unsigned char
-SignalRemove_withLock(struct channel_header __iomem *pChannel, u32 Queue,
- void *pSignal, spinlock_t *lock)
-{
- unsigned char result;
-
- spin_lock(lock);
- result = spar_signal_remove(pChannel, Queue, pSignal);
- spin_unlock(lock);
- return result;
-}
-
-module_init(channel_mod_init);
-module_exit(channel_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bryan Glaudel");
-MODULE_ALIAS("uischan");
- /* this is extracted during depmod and kept in modules.dep */
diff --git a/drivers/staging/unisys/channels/chanstub.h b/drivers/staging/unisys/channels/chanstub.h
deleted file mode 100644
index 1531759a1b31..000000000000
--- a/drivers/staging/unisys/channels/chanstub.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#ifndef __CHANSTUB_H__
-#define __CHANSTUB_H__
-unsigned char SignalInsert_withLock(struct channel_header __iomem *pChannel,
- u32 Queue, void *pSignal, spinlock_t *lock);
-unsigned char SignalRemove_withLock(struct channel_header __iomem *pChannel,
- u32 Queue, void *pSignal, spinlock_t *lock);
-
-#endif
diff --git a/drivers/staging/unisys/common-spar/include/version.h b/drivers/staging/unisys/common-spar/include/version.h
index f25208fc3ed1..83d1da7a2f81 100644
--- a/drivers/staging/unisys/common-spar/include/version.h
+++ b/drivers/staging/unisys/common-spar/include/version.h
@@ -30,7 +30,6 @@
#define SPARVER4 "0"
#define VERSION SPARVER1 "." SPARVER2 "." SPARVER3 "." SPARVER4
-#define VERSIONDATE __DATE__
/* Here are various version forms needed in Windows environments.
*/
diff --git a/drivers/staging/unisys/include/timskmod.h b/drivers/staging/unisys/include/timskmod.h
index cff7983dab85..4019a0d63645 100644
--- a/drivers/staging/unisys/include/timskmod.h
+++ b/drivers/staging/unisys/include/timskmod.h
@@ -133,7 +133,7 @@
* x - the number of seconds to sleep.
*/
#define SLEEP(x) \
- do { current->state = TASK_INTERRUPTIBLE; \
+ do { __set_current_state(TASK_INTERRUPTIBLE); \
schedule_timeout((x)*HZ); \
} while (0)
@@ -141,7 +141,7 @@
* x - the number of jiffies to sleep.
*/
#define SLEEPJIFFIES(x) \
- do { current->state = TASK_INTERRUPTIBLE; \
+ do { __set_current_state(TASK_INTERRUPTIBLE); \
schedule_timeout(x); \
} while (0)
diff --git a/drivers/staging/unisys/uislib/Kconfig b/drivers/staging/unisys/uislib/Kconfig
index 6b134e267904..a712eb82224a 100644
--- a/drivers/staging/unisys/uislib/Kconfig
+++ b/drivers/staging/unisys/uislib/Kconfig
@@ -4,7 +4,7 @@
config UNISYS_UISLIB
tristate "Unisys uislib driver"
- depends on UNISYSSPAR && UNISYS_VISORCHIPSET && UNISYS_CHANNELSTUB && HAS_IOMEM
+ depends on UNISYSSPAR && UNISYS_VISORCHIPSET && HAS_IOMEM
---help---
If you say Y here, you will enable the Unisys uislib driver.
diff --git a/drivers/staging/unisys/uislib/uislib.c b/drivers/staging/unisys/uislib/uislib.c
index 7c87452a9f14..a9eeddeba735 100644
--- a/drivers/staging/unisys/uislib/uislib.c
+++ b/drivers/staging/unisys/uislib/uislib.c
@@ -41,7 +41,6 @@
#include "sparstop.h"
#include "visorchipset.h"
-#include "chanstub.h"
#include "version.h"
#include "guestlinuxdebug.h"
@@ -59,8 +58,8 @@
/* global function pointers that act as callback functions into virtpcimod */
int (*virt_control_chan_func)(struct guest_msgs *);
-static int ProcReadBufferValid;
-static char *ProcReadBuffer; /* Note this MUST be global,
+static int debug_buf_valid;
+static char *debug_buf; /* Note this MUST be global,
* because the contents must */
static unsigned int chipset_inited;
@@ -71,24 +70,24 @@ static unsigned int chipset_inited;
UIS_THREAD_WAIT; \
} while (1)
-static struct bus_info *BusListHead;
-static rwlock_t BusListLock;
-static int BusListCount; /* number of buses in the list */
-static int MaxBusCount; /* maximum number of buses expected */
-static u64 PhysicalDataChan;
-static int PlatformNumber;
+static struct bus_info *bus_list;
+static rwlock_t bus_list_lock;
+static int bus_list_count; /* number of buses in the list */
+static int max_bus_count; /* maximum number of buses expected */
+static u64 phys_data_chan;
+static int platform_no;
-static struct uisthread_info Incoming_ThreadInfo;
-static BOOL Incoming_Thread_Started = FALSE;
-static LIST_HEAD(List_Polling_Device_Channels);
+static struct uisthread_info incoming_ti;
+static BOOL incoming_started = FALSE;
+static LIST_HEAD(poll_dev_chan);
static unsigned long long tot_moved_to_tail_cnt;
static unsigned long long tot_wait_cnt;
static unsigned long long tot_wakeup_cnt;
static unsigned long long tot_schedule_cnt;
static int en_smart_wakeup = 1;
-static DEFINE_SEMAPHORE(Lock_Polling_Device_Channels); /* unlocked */
-static DECLARE_WAIT_QUEUE_HEAD(Wakeup_Polling_Device_Channels);
-static int Go_Polling_Device_Channels;
+static DEFINE_SEMAPHORE(poll_dev_lock); /* unlocked */
+static DECLARE_WAIT_QUEUE_HEAD(poll_dev_wake_q);
+static int poll_dev_start;
#define CALLHOME_PROC_ENTRY_FN "callhome"
#define CALLHOME_THROTTLED_PROC_ENTRY_FN "callhome_throttled"
@@ -115,7 +114,7 @@ static unsigned long long cycles_before_wait, wait_cycles;
/*****************************************************/
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset);
+ size_t len, loff_t *offset);
static const struct file_operations debugfs_info_fops = {
.read = info_debugfs_read,
};
@@ -129,58 +128,52 @@ init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr)
msg->hdr.flags.server = svr;
}
-static __iomem void *
-init_vbus_channel(u64 channelAddr, u32 channelBytes)
+static __iomem void *init_vbus_channel(u64 ch_addr, u32 ch_bytes)
{
- void __iomem *rc = NULL;
- void __iomem *pChan = uislib_ioremap_cache(channelAddr, channelBytes);
+ void __iomem *ch = uislib_ioremap_cache(ch_addr, ch_bytes);
- if (!pChan) {
+ if (!ch) {
LOGERR("CONTROLVM_BUS_CREATE error: ioremap_cache of channelAddr:%Lx for channelBytes:%llu failed",
- (unsigned long long) channelAddr,
- (unsigned long long) channelBytes);
- rc = NULL;
- goto Away;
+ (unsigned long long)ch_addr,
+ (unsigned long long)ch_bytes);
+ return NULL;
}
- if (!SPAR_VBUS_CHANNEL_OK_CLIENT(pChan)) {
+ if (!SPAR_VBUS_CHANNEL_OK_CLIENT(ch)) {
ERRDRV("%s channel cannot be used", __func__);
- uislib_iounmap(pChan);
- rc = NULL;
- goto Away;
+ uislib_iounmap(ch);
+ return NULL;
}
- rc = pChan;
-Away:
- return rc;
+ return ch;
}
static int
create_bus(struct controlvm_message *msg, char *buf)
{
- u32 busNo, deviceCount;
+ u32 bus_no, dev_count;
struct bus_info *tmp, *bus;
size_t size;
- if (MaxBusCount == BusListCount) {
+ if (max_bus_count == bus_list_count) {
LOGERR("CONTROLVM_BUS_CREATE Failed: max buses:%d already created\n",
- MaxBusCount);
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, MaxBusCount,
+ max_bus_count);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, max_bus_count,
POSTCODE_SEVERITY_ERR);
return CONTROLVM_RESP_ERROR_MAX_BUSES;
}
- busNo = msg->cmd.create_bus.bus_no;
- deviceCount = msg->cmd.create_bus.dev_count;
+ bus_no = msg->cmd.create_bus.bus_no;
+ dev_count = msg->cmd.create_bus.dev_count;
- POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, busNo, deviceCount,
+ POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, bus_no, dev_count,
POSTCODE_SEVERITY_INFO);
size =
sizeof(struct bus_info) +
- (deviceCount * sizeof(struct device_info *));
+ (dev_count * sizeof(struct device_info *));
bus = kzalloc(size, GFP_ATOMIC);
if (!bus) {
LOGERR("CONTROLVM_BUS_CREATE Failed: kmalloc for bus failed.\n");
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
}
@@ -191,27 +184,29 @@ create_bus(struct controlvm_message *msg, char *buf)
if (msg->hdr.flags.test_message) {
/* This implies we're the IOVM so set guest handle to 0... */
bus->guest_handle = 0;
- bus->bus_no = busNo;
+ bus->bus_no = bus_no;
bus->local_vnic = 1;
- } else
- bus->bus_no = bus->guest_handle = busNo;
- sprintf(bus->name, "%d", (int) bus->bus_no);
- bus->device_count = deviceCount;
+ } else {
+ bus->bus_no = bus_no;
+ bus->guest_handle = bus_no;
+ }
+ sprintf(bus->name, "%d", (int)bus->bus_no);
+ bus->device_count = dev_count;
bus->device =
- (struct device_info **) ((char *) bus + sizeof(struct bus_info));
+ (struct device_info **)((char *)bus + sizeof(struct bus_info));
bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid;
bus->bus_channel_bytes = 0;
bus->bus_channel = NULL;
/* add bus to our bus list - but check for duplicates first */
- read_lock(&BusListLock);
- for (tmp = BusListHead; tmp; tmp = tmp->next) {
+ read_lock(&bus_list_lock);
+ for (tmp = bus_list; tmp; tmp = tmp->next) {
if (tmp->bus_no == bus->bus_no)
break;
}
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
if (tmp) {
- /* found a bus already in the list with same busNo -
+ /* found a bus already in the list with same bus_no -
* reject add
*/
LOGERR("CONTROLVM_BUS_CREATE Failed: bus %d already exists.\n",
@@ -221,8 +216,8 @@ create_bus(struct controlvm_message *msg, char *buf)
kfree(bus);
return CONTROLVM_RESP_ERROR_ALREADY_DONE;
}
- if ((msg->cmd.create_bus.channel_addr != 0)
- && (msg->cmd.create_bus.channel_bytes != 0)) {
+ if ((msg->cmd.create_bus.channel_addr != 0) &&
+ (msg->cmd.create_bus.channel_bytes != 0)) {
bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes;
bus->bus_channel =
init_vbus_channel(msg->cmd.create_bus.channel_addr,
@@ -233,9 +228,9 @@ create_bus(struct controlvm_message *msg, char *buf)
struct guest_msgs cmd;
cmd.msgtype = GUEST_ADD_VBUS;
- cmd.add_vbus.bus_no = busNo;
+ cmd.add_vbus.bus_no = bus_no;
cmd.add_vbus.chanptr = bus->bus_channel;
- cmd.add_vbus.dev_count = deviceCount;
+ cmd.add_vbus.dev_count = dev_count;
cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid;
cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid;
if (!virt_control_chan_func) {
@@ -256,15 +251,15 @@ create_bus(struct controlvm_message *msg, char *buf)
}
/* add bus at the head of our list */
- write_lock(&BusListLock);
- if (!BusListHead)
- BusListHead = bus;
- else {
- bus->next = BusListHead;
- BusListHead = bus;
+ write_lock(&bus_list_lock);
+ if (!bus_list) {
+ bus_list = bus;
+ } else {
+ bus->next = bus_list;
+ bus_list = bus;
}
- BusListCount++;
- write_unlock(&BusListLock);
+ bus_list_count++;
+ write_unlock(&bus_list_lock);
POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->bus_no,
POSTCODE_SEVERITY_INFO);
@@ -277,15 +272,15 @@ destroy_bus(struct controlvm_message *msg, char *buf)
int i;
struct bus_info *bus, *prev = NULL;
struct guest_msgs cmd;
- u32 busNo;
+ u32 bus_no;
- busNo = msg->cmd.destroy_bus.bus_no;
+ bus_no = msg->cmd.destroy_bus.bus_no;
- read_lock(&BusListLock);
+ read_lock(&bus_list_lock);
- bus = BusListHead;
+ bus = bus_list;
while (bus) {
- if (bus->bus_no == busNo)
+ if (bus->bus_no == bus_no)
break;
prev = bus;
bus = bus->next;
@@ -293,8 +288,8 @@ destroy_bus(struct controlvm_message *msg, char *buf)
if (!bus) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: failed to find bus %d.\n",
- busNo);
- read_unlock(&BusListLock);
+ bus_no);
+ read_unlock(&bus_list_lock);
return CONTROLVM_RESP_ERROR_ALREADY_DONE;
}
@@ -302,12 +297,12 @@ destroy_bus(struct controlvm_message *msg, char *buf)
for (i = 0; i < bus->device_count; i++) {
if (bus->device[i] != NULL) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: device %i attached to bus %d.",
- i, busNo);
- read_unlock(&BusListLock);
+ i, bus_no);
+ read_unlock(&bus_list_lock);
return CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED;
}
}
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
if (msg->hdr.flags.server)
goto remove;
@@ -315,7 +310,7 @@ destroy_bus(struct controlvm_message *msg, char *buf)
/* client messages require us to call the virtpci callback associated
with this bus. */
cmd.msgtype = GUEST_DEL_VBUS;
- cmd.del_vbus.bus_no = busNo;
+ cmd.del_vbus.bus_no = bus_no;
if (!virt_control_chan_func) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci callback not registered.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
@@ -327,13 +322,13 @@ destroy_bus(struct controlvm_message *msg, char *buf)
/* finally, remove the bus from the list */
remove:
- write_lock(&BusListLock);
+ write_lock(&bus_list_lock);
if (prev) /* not at head */
prev->next = bus->next;
else
- BusListHead = bus->next;
- BusListCount--;
- write_unlock(&BusListLock);
+ bus_list = bus->next;
+ bus_list_count--;
+ write_unlock(&bus_list_lock);
if (bus->bus_channel) {
uislib_iounmap(bus->bus_channel);
@@ -344,26 +339,26 @@ remove:
return CONTROLVM_RESP_SUCCESS;
}
-static int
-create_device(struct controlvm_message *msg, char *buf)
+static int create_device(struct controlvm_message *msg, char *buf)
{
struct device_info *dev;
struct bus_info *bus;
- u32 busNo, devNo;
+ struct guest_msgs cmd;
+ u32 bus_no, dev_no;
int result = CONTROLVM_RESP_SUCCESS;
- u64 minSize = MIN_IO_CHANNEL_SIZE;
- struct req_handler_info *pReqHandler;
+ u64 min_size = MIN_IO_CHANNEL_SIZE;
+ struct req_handler_info *req_handler;
- busNo = msg->cmd.create_device.bus_no;
- devNo = msg->cmd.create_device.dev_no;
+ bus_no = msg->cmd.create_device.bus_no;
+ dev_no = msg->cmd.create_device.dev_no;
- POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
- dev = kzalloc(sizeof(struct device_info), GFP_ATOMIC);
+ dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
if (!dev) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: kmalloc for dev failed.\n");
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
}
@@ -371,169 +366,157 @@ create_device(struct controlvm_message *msg, char *buf)
dev->channel_uuid = msg->cmd.create_device.data_type_uuid;
dev->intr = msg->cmd.create_device.intr;
dev->channel_addr = msg->cmd.create_device.channel_addr;
- dev->bus_no = busNo;
- dev->dev_no = devNo;
+ dev->bus_no = bus_no;
+ dev->dev_no = dev_no;
sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */
- sprintf(dev->devid, "vbus%u:dev%u", (unsigned) busNo, (unsigned) devNo);
+ sprintf(dev->devid, "vbus%u:dev%u", (unsigned)bus_no, (unsigned)dev_no);
/* map the channel memory for the device. */
- if (msg->hdr.flags.test_message)
+ if (msg->hdr.flags.test_message) {
dev->chanptr = (void __iomem *)__va(dev->channel_addr);
- else {
- pReqHandler = req_handler_find(dev->channel_uuid);
- if (pReqHandler)
+ } else {
+ req_handler = req_handler_find(dev->channel_uuid);
+ if (req_handler)
/* generic service handler registered for this
* channel
*/
- minSize = pReqHandler->min_channel_bytes;
- if (minSize > msg->cmd.create_device.channel_bytes) {
+ min_size = req_handler->min_channel_bytes;
+ if (min_size > msg->cmd.create_device.channel_bytes) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: channel size is too small, channel size:0x%lx, required size:0x%lx",
- (ulong) msg->cmd.create_device.channel_bytes,
- (ulong) minSize);
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
- POSTCODE_SEVERITY_ERR);
+ (ulong)msg->cmd.create_device.channel_bytes,
+ (ulong)min_size);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
result = CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL;
- goto Away;
+ goto cleanup;
}
dev->chanptr =
uislib_ioremap_cache(dev->channel_addr,
msg->cmd.create_device.channel_bytes);
if (!dev->chanptr) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: ioremap_cache of channelAddr:%Lx for channelBytes:%llu failed",
- dev->channel_addr,
- msg->cmd.create_device.channel_bytes);
+ dev->channel_addr,
+ msg->cmd.create_device.channel_bytes);
result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
- POSTCODE_SEVERITY_ERR);
- goto Away;
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ goto cleanup;
}
}
dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid;
dev->channel_bytes = msg->cmd.create_device.channel_bytes;
- read_lock(&BusListLock);
- for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->bus_no == busNo) {
- /* make sure the device number is valid */
- if (devNo >= bus->device_count) {
- LOGERR("CONTROLVM_DEVICE_CREATE Failed: device (%d) >= deviceCount (%d).",
- devNo, bus->device_count);
- result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no != bus_no)
+ continue;
+ /* make sure the device number is valid */
+ if (dev_no >= bus->device_count) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: device (%d) >= deviceCount (%d).",
+ dev_no, bus->device_count);
+ result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ read_unlock(&bus_list_lock);
+ goto cleanup;
+ }
+ /* make sure this device is not already set */
+ if (bus->device[dev_no]) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: device %d is already exists.",
+ dev_no);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
+ dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ read_unlock(&bus_list_lock);
+ goto cleanup;
+ }
+ read_unlock(&bus_list_lock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (msg->hdr.flags.server) {
+ bus->device[dev_no] = dev;
+ POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
+ }
+ if (uuid_le_cmp(dev->channel_uuid,
+ spar_vhba_channel_protocol_uuid) == 0) {
+ wait_for_valid_guid(&((struct channel_header __iomem *)
+ (dev->chanptr))->chtype);
+ if (!SPAR_VHBA_CHANNEL_OK_CLIENT(dev->chanptr)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed:[CLIENT]VHBA dev %d chan invalid.",
+ dev_no);
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
- devNo, busNo,
+ dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
- read_unlock(&BusListLock);
- goto Away;
+ result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
+ goto cleanup;
}
- /* make sure this device is not already set */
- if (bus->device[devNo]) {
- LOGERR("CONTROLVM_DEVICE_CREATE Failed: device %d is already exists.",
- devNo);
+ cmd.msgtype = GUEST_ADD_VHBA;
+ cmd.add_vhba.chanptr = dev->chanptr;
+ cmd.add_vhba.bus_no = bus_no;
+ cmd.add_vhba.device_no = dev_no;
+ cmd.add_vhba.instance_uuid = dev->instance_uuid;
+ cmd.add_vhba.intr = dev->intr;
+ } else if (uuid_le_cmp(dev->channel_uuid,
+ spar_vnic_channel_protocol_uuid) == 0) {
+ wait_for_valid_guid(&((struct channel_header __iomem *)
+ (dev->chanptr))->chtype);
+ if (!SPAR_VNIC_CHANNEL_OK_CLIENT(dev->chanptr)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: VNIC[CLIENT] dev %d chan invalid.",
+ dev_no);
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
- devNo, busNo,
+ dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
- result = CONTROLVM_RESP_ERROR_ALREADY_DONE;
- read_unlock(&BusListLock);
- goto Away;
+ result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
+ goto cleanup;
}
- read_unlock(&BusListLock);
- /* the msg is bound for virtpci; send
- * guest_msgs struct to callback
- */
- if (!msg->hdr.flags.server) {
- struct guest_msgs cmd;
-
- if (!uuid_le_cmp(dev->channel_uuid,
- spar_vhba_channel_protocol_uuid)) {
- wait_for_valid_guid(&((
- struct channel_header
- __iomem *) (dev->
- chanptr))->
- chtype);
- if (!SPAR_VHBA_CHANNEL_OK_CLIENT
- (dev->chanptr)) {
- LOGERR("CONTROLVM_DEVICE_CREATE Failed:[CLIENT]VHBA dev %d chan invalid.",
- devNo);
- POSTCODE_LINUX_4
- (DEVICE_CREATE_FAILURE_PC,
- devNo, busNo,
- POSTCODE_SEVERITY_ERR);
- result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
- goto Away;
- }
- cmd.msgtype = GUEST_ADD_VHBA;
- cmd.add_vhba.chanptr = dev->chanptr;
- cmd.add_vhba.bus_no = busNo;
- cmd.add_vhba.device_no = devNo;
- cmd.add_vhba.instance_uuid =
- dev->instance_uuid;
- cmd.add_vhba.intr = dev->intr;
- } else
- if (!uuid_le_cmp(dev->channel_uuid,
- spar_vnic_channel_protocol_uuid)) {
- wait_for_valid_guid(&((
- struct channel_header
- __iomem *) (dev->
- chanptr))->
- chtype);
- if (!SPAR_VNIC_CHANNEL_OK_CLIENT
- (dev->chanptr)) {
- LOGERR("CONTROLVM_DEVICE_CREATE Failed: VNIC[CLIENT] dev %d chan invalid.",
- devNo);
- POSTCODE_LINUX_4
- (DEVICE_CREATE_FAILURE_PC,
- devNo, busNo,
- POSTCODE_SEVERITY_ERR);
- result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
- goto Away;
- }
- cmd.msgtype = GUEST_ADD_VNIC;
- cmd.add_vnic.chanptr = dev->chanptr;
- cmd.add_vnic.bus_no = busNo;
- cmd.add_vnic.device_no = devNo;
- cmd.add_vnic.instance_uuid =
- dev->instance_uuid;
- cmd.add_vhba.intr = dev->intr;
- } else {
- LOGERR("CONTROLVM_DEVICE_CREATE Failed: unknown channelTypeGuid.\n");
- POSTCODE_LINUX_4
- (DEVICE_CREATE_FAILURE_PC, devNo,
- busNo, POSTCODE_SEVERITY_ERR);
- result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
- goto Away;
- }
+ cmd.msgtype = GUEST_ADD_VNIC;
+ cmd.add_vnic.chanptr = dev->chanptr;
+ cmd.add_vnic.bus_no = bus_no;
+ cmd.add_vnic.device_no = dev_no;
+ cmd.add_vnic.instance_uuid = dev->instance_uuid;
+ cmd.add_vhba.intr = dev->intr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: unknown channelTypeGuid.\n");
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ goto cleanup;
+ }
- if (!virt_control_chan_func) {
- LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci callback not registered.");
- POSTCODE_LINUX_4
- (DEVICE_CREATE_FAILURE_PC, devNo,
- busNo, POSTCODE_SEVERITY_ERR);
- result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
- goto Away;
- }
+ if (!virt_control_chan_func) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci callback not registered.");
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ goto cleanup;
+ }
- if (!virt_control_chan_func(&cmd)) {
- LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci GUEST_ADD_[VHBA||VNIC] returned error.");
- POSTCODE_LINUX_4
- (DEVICE_CREATE_FAILURE_PC, devNo,
- busNo, POSTCODE_SEVERITY_ERR);
- result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
- goto Away;
- }
- }
- bus->device[devNo] = dev;
- POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, devNo, busNo,
- POSTCODE_SEVERITY_INFO);
- return CONTROLVM_RESP_SUCCESS;
+ if (!virt_control_chan_func(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci GUEST_ADD_[VHBA||VNIC] returned error.");
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ result =
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ goto cleanup;
}
+
+ bus->device[dev_no] = dev;
+ POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
}
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
- LOGERR("CONTROLVM_DEVICE_CREATE Failed: failed to find bus %d.", busNo);
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: failed to find bus %d.",
+ bus_no);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
result = CONTROLVM_RESP_ERROR_BUS_INVALID;
-Away:
+cleanup:
if (!msg->hdr.flags.test_message) {
uislib_iounmap(dev->chanptr);
dev->chanptr = NULL;
@@ -543,32 +526,31 @@ Away:
return result;
}
-static int
-pause_device(struct controlvm_message *msg)
+static int pause_device(struct controlvm_message *msg)
{
- u32 busNo, devNo;
+ u32 bus_no, dev_no;
struct bus_info *bus;
struct device_info *dev;
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.device_change_state.bus_no;
- devNo = msg->cmd.device_change_state.dev_no;
+ bus_no = msg->cmd.device_change_state.bus_no;
+ dev_no = msg->cmd.device_change_state.dev_no;
- read_lock(&BusListLock);
- for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->bus_no == busNo) {
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no == bus_no) {
/* make sure the device number is valid */
- if (devNo >= bus->device_count) {
+ if (dev_no >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->device_count);
+ dev_no, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
- dev = bus->device[devNo];
+ dev = bus->device[dev_no];
if (!dev) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: device %d does not exist.",
- devNo);
+ dev_no);
retval =
CONTROLVM_RESP_ERROR_ALREADY_DONE;
}
@@ -578,20 +560,20 @@ pause_device(struct controlvm_message *msg)
}
if (!bus) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: bus %d does not exist",
- busNo);
+ bus_no);
retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
}
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
if (retval == CONTROLVM_RESP_SUCCESS) {
/* the msg is bound for virtpci; send
* guest_msgs struct to callback
*/
- if (!uuid_le_cmp(dev->channel_uuid,
- spar_vhba_channel_protocol_uuid)) {
+ if (uuid_le_cmp(dev->channel_uuid,
+ spar_vhba_channel_protocol_uuid) == 0) {
cmd.msgtype = GUEST_PAUSE_VHBA;
cmd.pause_vhba.chanptr = dev->chanptr;
- } else if (!uuid_le_cmp(dev->channel_uuid,
- spar_vnic_channel_protocol_uuid)) {
+ } else if (uuid_le_cmp(dev->channel_uuid,
+ spar_vnic_channel_protocol_uuid) == 0) {
cmd.msgtype = GUEST_PAUSE_VNIC;
cmd.pause_vnic.chanptr = dev->chanptr;
} else {
@@ -611,32 +593,31 @@ pause_device(struct controlvm_message *msg)
return retval;
}
-static int
-resume_device(struct controlvm_message *msg)
+static int resume_device(struct controlvm_message *msg)
{
- u32 busNo, devNo;
+ u32 bus_no, dev_no;
struct bus_info *bus;
struct device_info *dev;
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.device_change_state.bus_no;
- devNo = msg->cmd.device_change_state.dev_no;
+ bus_no = msg->cmd.device_change_state.bus_no;
+ dev_no = msg->cmd.device_change_state.dev_no;
- read_lock(&BusListLock);
- for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->bus_no == busNo) {
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no == bus_no) {
/* make sure the device number is valid */
- if (devNo >= bus->device_count) {
+ if (dev_no >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->device_count);
+ dev_no, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
- dev = bus->device[devNo];
+ dev = bus->device[dev_no];
if (!dev) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: device %d does not exist.",
- devNo);
+ dev_no);
retval =
CONTROLVM_RESP_ERROR_ALREADY_DONE;
}
@@ -647,20 +628,20 @@ resume_device(struct controlvm_message *msg)
if (!bus) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: bus %d does not exist",
- busNo);
+ bus_no);
retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
}
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
/* the msg is bound for virtpci; send
* guest_msgs struct to callback
*/
if (retval == CONTROLVM_RESP_SUCCESS) {
- if (!uuid_le_cmp(dev->channel_uuid,
- spar_vhba_channel_protocol_uuid)) {
+ if (uuid_le_cmp(dev->channel_uuid,
+ spar_vhba_channel_protocol_uuid) == 0) {
cmd.msgtype = GUEST_RESUME_VHBA;
cmd.resume_vhba.chanptr = dev->chanptr;
- } else if (!uuid_le_cmp(dev->channel_uuid,
- spar_vnic_channel_protocol_uuid)) {
+ } else if (uuid_le_cmp(dev->channel_uuid,
+ spar_vnic_channel_protocol_uuid) == 0) {
cmd.msgtype = GUEST_RESUME_VNIC;
cmd.resume_vnic.chanptr = dev->chanptr;
} else {
@@ -680,33 +661,33 @@ resume_device(struct controlvm_message *msg)
return retval;
}
-static int
-destroy_device(struct controlvm_message *msg, char *buf)
+static int destroy_device(struct controlvm_message *msg, char *buf)
{
- u32 busNo, devNo;
+ u32 bus_no, dev_no;
struct bus_info *bus;
struct device_info *dev;
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.destroy_device.bus_no;
- devNo = msg->cmd.destroy_device.bus_no;
+ bus_no = msg->cmd.destroy_device.bus_no;
+ dev_no = msg->cmd.destroy_device.bus_no;
- read_lock(&BusListLock);
- LOGINF("destroy_device called for busNo=%u, devNo=%u", busNo, devNo);
- for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->bus_no == busNo) {
+ read_lock(&bus_list_lock);
+ LOGINF("destroy_device called for bus_no=%u, dev_no=%u", bus_no,
+ dev_no);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no == bus_no) {
/* make sure the device number is valid */
- if (devNo >= bus->device_count) {
- LOGERR("CONTROLVM_DEVICE_DESTORY Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->device_count);
+ if (dev_no >= bus->device_count) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: device(%d) >= device_count(%d).",
+ dev_no, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
- dev = bus->device[devNo];
+ dev = bus->device[dev_no];
if (!dev) {
LOGERR("CONTROLVM_DEVICE_DESTROY Failed: device %d does not exist.",
- devNo);
+ dev_no);
retval =
CONTROLVM_RESP_ERROR_ALREADY_DONE;
}
@@ -717,20 +698,20 @@ destroy_device(struct controlvm_message *msg, char *buf)
if (!bus) {
LOGERR("CONTROLVM_DEVICE_DESTROY Failed: bus %d does not exist",
- busNo);
+ bus_no);
retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
}
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
if (retval == CONTROLVM_RESP_SUCCESS) {
/* the msg is bound for virtpci; send
* guest_msgs struct to callback
*/
- if (!uuid_le_cmp(dev->channel_uuid,
- spar_vhba_channel_protocol_uuid)) {
+ if (uuid_le_cmp(dev->channel_uuid,
+ spar_vhba_channel_protocol_uuid) == 0) {
cmd.msgtype = GUEST_DEL_VHBA;
cmd.del_vhba.chanptr = dev->chanptr;
- } else if (!uuid_le_cmp(dev->channel_uuid,
- spar_vnic_channel_protocol_uuid)) {
+ } else if (uuid_le_cmp(dev->channel_uuid,
+ spar_vnic_channel_protocol_uuid) == 0) {
cmd.msgtype = GUEST_DEL_VNIC;
cmd.del_vnic.chanptr = dev->chanptr;
} else {
@@ -739,7 +720,7 @@ destroy_device(struct controlvm_message *msg, char *buf)
CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
}
if (!virt_control_chan_func) {
- LOGERR("CONTROLVM_DEVICE_DESTORY Failed: virtpci callback not registered.");
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: virtpci callback not registered.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
@@ -755,7 +736,7 @@ destroy_device(struct controlvm_message *msg, char *buf)
*/
if (dev->polling) {
LOGINF("calling uislib_disable_channel_interrupts");
- uislib_disable_channel_interrupts(busNo, devNo);
+ uislib_disable_channel_interrupts(bus_no, dev_no);
}
/* unmap the channel memory for the device. */
if (!msg->hdr.flags.test_message) {
@@ -763,7 +744,7 @@ destroy_device(struct controlvm_message *msg, char *buf)
uislib_iounmap(dev->chanptr);
}
kfree(dev);
- bus->device[devNo] = NULL;
+ bus->device[dev_no] = NULL;
}
return retval;
}
@@ -773,9 +754,9 @@ init_chipset(struct controlvm_message *msg, char *buf)
{
POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
- MaxBusCount = msg->cmd.init_chipset.bus_count;
- PlatformNumber = msg->cmd.init_chipset.platform_number;
- PhysicalDataChan = 0;
+ max_bus_count = msg->cmd.init_chipset.bus_count;
+ platform_no = msg->cmd.init_chipset.platform_number;
+ phys_data_chan = 0;
/* We need to make sure we have our functions registered
* before processing messages. If we are a test vehicle the
@@ -793,31 +774,29 @@ init_chipset(struct controlvm_message *msg, char *buf)
return CONTROLVM_RESP_SUCCESS;
}
-static int
-delete_bus_glue(u32 busNo)
+static int delete_bus_glue(u32 bus_no)
{
struct controlvm_message msg;
init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
- msg.cmd.destroy_bus.bus_no = busNo;
+ msg.cmd.destroy_bus.bus_no = bus_no;
if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
- LOGERR("destroy_bus failed. busNo=0x%x\n", busNo);
+ LOGERR("destroy_bus failed. bus_no=0x%x\n", bus_no);
return 0;
}
return 1;
}
-static int
-delete_device_glue(u32 busNo, u32 devNo)
+static int delete_device_glue(u32 bus_no, u32 dev_no)
{
struct controlvm_message msg;
init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
- msg.cmd.destroy_device.bus_no = busNo;
- msg.cmd.destroy_device.dev_no = devNo;
+ msg.cmd.destroy_device.bus_no = bus_no;
+ msg.cmd.destroy_device.dev_no = dev_no;
if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
- LOGERR("destroy_device failed. busNo=0x%x devNo=0x%x\n", busNo,
- devNo);
+ LOGERR("destroy_device failed. bus_no=0x%x dev_no=0x%x\n",
+ bus_no, dev_no);
return 0;
}
return 1;
@@ -874,7 +853,6 @@ uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
}
EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
-
int
uislib_client_inject_del_bus(u32 bus_no)
{
@@ -919,7 +897,6 @@ uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no)
return rc;
}
return 0;
-
}
EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
@@ -956,7 +933,7 @@ uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
msg.cmd.create_device.channel_addr = phys_chan_addr;
if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
- chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
+ chan_bytes, (unsigned int)MIN_IO_CHANNEL_SIZE);
POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, chan_bytes,
MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
return 0;
@@ -1015,7 +992,7 @@ uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
msg.cmd.create_device.channel_addr = phys_chan_addr;
if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
- chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
+ chan_bytes, (unsigned int)MIN_IO_CHANNEL_SIZE);
POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, chan_bytes,
MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
return 0;
@@ -1072,7 +1049,6 @@ uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no)
return -1;
}
return 0;
-
}
EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
@@ -1129,14 +1105,12 @@ info_debugfs_read_helper(char **buff, int *buff_len)
if (PLINE("\nBuses:\n") < 0)
goto err_done;
- read_lock(&BusListLock);
- for (bus = BusListHead; bus; bus = bus->next) {
-
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n",
bus, bus->bus_no, bus->device_count) < 0)
goto err_done_unlock;
-
if (PLINE(" Devices:\n") < 0)
goto err_done_unlock;
@@ -1156,7 +1130,7 @@ info_debugfs_read_helper(char **buff, int *buff_len)
}
}
}
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
if (PLINE("UisUtils_Registered_Services: %d\n",
atomic_read(&uisutils_registered_services)) < 0)
@@ -1175,70 +1149,68 @@ info_debugfs_read_helper(char **buff, int *buff_len)
return tot;
err_done_unlock:
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
err_done:
return -1;
}
-static ssize_t
-info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
+static ssize_t info_debugfs_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
{
char *temp;
- int totalBytes = 0;
+ int total_bytes = 0;
int remaining_bytes = PROC_READ_BUFFER_SIZE;
/* *start = buf; */
- if (ProcReadBuffer == NULL) {
- DBGINF("ProcReadBuffer == NULL; allocating buffer.\n.");
- ProcReadBuffer = vmalloc(PROC_READ_BUFFER_SIZE);
+ if (debug_buf == NULL) {
+ DBGINF("debug_buf == NULL; allocating buffer.\n.");
+ debug_buf = vmalloc(PROC_READ_BUFFER_SIZE);
- if (ProcReadBuffer == NULL) {
+ if (debug_buf == NULL) {
LOGERR("failed to allocate buffer to provide proc data.\n");
return -ENOMEM;
}
}
- temp = ProcReadBuffer;
+ temp = debug_buf;
- if ((*offset == 0) || (!ProcReadBufferValid)) {
+ if ((*offset == 0) || (!debug_buf_valid)) {
DBGINF("calling info_debugfs_read_helper.\n");
/* if the read fails, then -1 will be returned */
- totalBytes = info_debugfs_read_helper(&temp, &remaining_bytes);
- ProcReadBufferValid = 1;
- } else
- totalBytes = strlen(ProcReadBuffer);
+ total_bytes = info_debugfs_read_helper(&temp, &remaining_bytes);
+ debug_buf_valid = 1;
+ } else {
+ total_bytes = strlen(debug_buf);
+ }
return simple_read_from_buffer(buf, len, offset,
- ProcReadBuffer, totalBytes);
+ debug_buf, total_bytes);
}
-static struct device_info *
-find_dev(u32 busNo, u32 devNo)
+static struct device_info *find_dev(u32 bus_no, u32 dev_no)
{
struct bus_info *bus;
struct device_info *dev = NULL;
- read_lock(&BusListLock);
- for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->bus_no == busNo) {
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no == bus_no) {
/* make sure the device number is valid */
- if (devNo >= bus->device_count) {
- LOGERR("%s bad busNo, devNo=%d,%d",
+ if (dev_no >= bus->device_count) {
+ LOGERR("%s bad bus_no, dev_no=%d,%d",
__func__,
- (int) (busNo), (int) (devNo));
- goto Away;
+ (int)bus_no, (int)dev_no);
+ break;
}
- dev = bus->device[devNo];
+ dev = bus->device[dev_no];
if (!dev)
- LOGERR("%s bad busNo, devNo=%d,%d",
+ LOGERR("%s bad bus_no, dev_no=%d,%d",
__func__,
- (int) (busNo), (int) (devNo));
- goto Away;
+ (int)bus_no, (int)dev_no);
+ break;
}
}
-Away:
- read_unlock(&BusListLock);
+ read_unlock(&bus_list_lock);
return dev;
}
@@ -1262,8 +1234,7 @@ Away:
* less-busy ones.
*
*/
-static int
-Process_Incoming(void *v)
+static int process_incoming(void *v)
{
unsigned long long cur_cycles, old_cycles, idle_cycles, delta_cycles;
struct list_head *new_tail = NULL;
@@ -1272,7 +1243,7 @@ Process_Incoming(void *v)
UIS_DAEMONIZE("dev_incoming");
for (i = 0; i < 16; i++) {
old_cycles = get_cycles();
- wait_event_timeout(Wakeup_Polling_Device_Channels,
+ wait_event_timeout(poll_dev_wake_q,
0, POLLJIFFIES_NORMAL);
cur_cycles = get_cycles();
if (wait_cycles == 0) {
@@ -1285,15 +1256,15 @@ Process_Incoming(void *v)
LOGINF("wait_cycles=%llu", wait_cycles);
cycles_before_wait = wait_cycles;
idle_cycles = 0;
- Go_Polling_Device_Channels = 0;
+ poll_dev_start = 0;
while (1) {
struct list_head *lelt, *tmp;
struct device_info *dev = NULL;
/* poll each channel for input */
- down(&Lock_Polling_Device_Channels);
+ down(&poll_dev_lock);
new_tail = NULL;
- list_for_each_safe(lelt, tmp, &List_Polling_Device_Channels) {
+ list_for_each_safe(lelt, tmp, &poll_dev_chan) {
int rc = 0;
dev = list_entry(lelt, struct device_info,
@@ -1315,22 +1286,22 @@ Process_Incoming(void *v)
if (!
(list_is_last
(lelt,
- &List_Polling_Device_Channels))) {
+ &poll_dev_chan))) {
new_tail = lelt;
dev->moved_to_tail_cnt++;
- } else
+ } else {
dev->last_on_list_cnt++;
+ }
}
-
}
- if (Incoming_ThreadInfo.should_stop)
+ if (incoming_ti.should_stop)
break;
}
if (new_tail != NULL) {
tot_moved_to_tail_cnt++;
- list_move_tail(new_tail, &List_Polling_Device_Channels);
+ list_move_tail(new_tail, &poll_dev_chan);
}
- up(&Lock_Polling_Device_Channels);
+ up(&poll_dev_lock);
cur_cycles = get_cycles();
delta_cycles = cur_cycles - old_cycles;
old_cycles = cur_cycles;
@@ -1340,24 +1311,24 @@ Process_Incoming(void *v)
* - there is no input waiting on any of the channels
* - we have received a signal to stop this thread
*/
- if (Incoming_ThreadInfo.should_stop)
+ if (incoming_ti.should_stop)
break;
if (en_smart_wakeup == 0xFF) {
LOGINF("en_smart_wakeup set to 0xff, to force exiting process_incoming");
break;
}
/* wait for POLLJIFFIES_NORMAL jiffies, or until
- * someone wakes up Wakeup_Polling_Device_Channels,
+ * someone wakes up poll_dev_wake_q,
* whichever comes first only do a wait when we have
* been idle for cycles_before_wait cycles.
*/
if (idle_cycles > cycles_before_wait) {
- Go_Polling_Device_Channels = 0;
+ poll_dev_start = 0;
tot_wait_cnt++;
- wait_event_timeout(Wakeup_Polling_Device_Channels,
- Go_Polling_Device_Channels,
+ wait_event_timeout(poll_dev_wake_q,
+ poll_dev_start,
POLLJIFFIES_NORMAL);
- Go_Polling_Device_Channels = 1;
+ poll_dev_start = 1;
} else {
tot_schedule_cnt++;
schedule();
@@ -1365,25 +1336,25 @@ Process_Incoming(void *v)
}
}
DBGINF("exiting.\n");
- complete_and_exit(&Incoming_ThreadInfo.has_stopped, 0);
+ complete_and_exit(&incoming_ti.has_stopped, 0);
}
static BOOL
-Initialize_incoming_thread(void)
+initialize_incoming_thread(void)
{
- if (Incoming_Thread_Started)
+ if (incoming_started)
return TRUE;
- if (!uisthread_start(&Incoming_ThreadInfo,
- &Process_Incoming, NULL, "dev_incoming")) {
- LOGERR("uisthread_start Initialize_incoming_thread ****FAILED");
+ if (!uisthread_start(&incoming_ti,
+ &process_incoming, NULL, "dev_incoming")) {
+ LOGERR("uisthread_start initialize_incoming_thread ****FAILED");
return FALSE;
}
- Incoming_Thread_Started = TRUE;
+ incoming_started = TRUE;
return TRUE;
}
/* Add a new device/channel to the list being processed by
- * Process_Incoming().
+ * process_incoming().
* <interrupt> - indicates the function to call periodically.
* <interrupt_context> - indicates the data to pass to the <interrupt>
* function.
@@ -1397,23 +1368,23 @@ uislib_enable_channel_interrupts(u32 bus_no, u32 dev_no,
dev = find_dev(bus_no, dev_no);
if (!dev) {
- LOGERR("%s busNo=%d, devNo=%d", __func__, (int) (bus_no),
- (int) (dev_no));
+ LOGERR("%s busNo=%d, devNo=%d", __func__, (int)(bus_no),
+ (int)(dev_no));
return;
}
- down(&Lock_Polling_Device_Channels);
- Initialize_incoming_thread();
+ down(&poll_dev_lock);
+ initialize_incoming_thread();
dev->interrupt = interrupt;
dev->interrupt_context = interrupt_context;
dev->polling = TRUE;
- list_add_tail(&(dev->list_polling_device_channels),
- &List_Polling_Device_Channels);
- up(&Lock_Polling_Device_Channels);
+ list_add_tail(&dev->list_polling_device_channels,
+ &poll_dev_chan);
+ up(&poll_dev_lock);
}
EXPORT_SYMBOL_GPL(uislib_enable_channel_interrupts);
/* Remove a device/channel from the list being processed by
- * Process_Incoming().
+ * process_incoming().
*/
void
uislib_disable_channel_interrupts(u32 bus_no, u32 dev_no)
@@ -1422,31 +1393,31 @@ uislib_disable_channel_interrupts(u32 bus_no, u32 dev_no)
dev = find_dev(bus_no, dev_no);
if (!dev) {
- LOGERR("%s busNo=%d, devNo=%d", __func__, (int) (bus_no),
- (int) (dev_no));
+ LOGERR("%s busNo=%d, devNo=%d", __func__, (int)(bus_no),
+ (int)(dev_no));
return;
}
- down(&Lock_Polling_Device_Channels);
+ down(&poll_dev_lock);
list_del(&dev->list_polling_device_channels);
dev->polling = FALSE;
dev->interrupt = NULL;
- up(&Lock_Polling_Device_Channels);
+ up(&poll_dev_lock);
}
EXPORT_SYMBOL_GPL(uislib_disable_channel_interrupts);
static void
do_wakeup_polling_device_channels(struct work_struct *dummy)
{
- if (!Go_Polling_Device_Channels) {
- Go_Polling_Device_Channels = 1;
- wake_up(&Wakeup_Polling_Device_Channels);
+ if (!poll_dev_start) {
+ poll_dev_start = 1;
+ wake_up(&poll_dev_wake_q);
}
}
-static DECLARE_WORK(Work_wakeup_polling_device_channels,
+static DECLARE_WORK(work_wakeup_polling_device_channels,
do_wakeup_polling_device_channels);
-/* Call this function when you want to send a hint to Process_Incoming() that
+/* Call this function when you want to send a hint to process_incoming() that
* your device might have more requests.
*/
void
@@ -1454,14 +1425,14 @@ uislib_force_channel_interrupt(u32 bus_no, u32 dev_no)
{
if (en_smart_wakeup == 0)
return;
- if (Go_Polling_Device_Channels)
+ if (poll_dev_start)
return;
/* The point of using schedule_work() instead of just doing
* the work inline is to force a slight delay before waking up
- * the Process_Incoming() thread.
+ * the process_incoming() thread.
*/
tot_wakeup_cnt++;
- schedule_work(&Work_wakeup_polling_device_channels);
+ schedule_work(&work_wakeup_polling_device_channels);
}
EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt);
@@ -1472,35 +1443,35 @@ EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt);
static int __init
uislib_mod_init(void)
{
-
if (!unisys_spar_platform)
return -ENODEV;
LOGINF("MONITORAPIS");
LOGINF("sizeof(struct uiscmdrsp):%lu bytes\n",
- (ulong) sizeof(struct uiscmdrsp));
+ (ulong)sizeof(struct uiscmdrsp));
LOGINF("sizeof(struct phys_info):%lu\n",
- (ulong) sizeof(struct phys_info));
+ (ulong)sizeof(struct phys_info));
LOGINF("sizeof(uiscmdrsp_scsi):%lu\n",
- (ulong) sizeof(struct uiscmdrsp_scsi));
+ (ulong)sizeof(struct uiscmdrsp_scsi));
LOGINF("sizeof(uiscmdrsp_net):%lu\n",
- (ulong) sizeof(struct uiscmdrsp_net));
+ (ulong)sizeof(struct uiscmdrsp_net));
LOGINF("sizeof(CONTROLVM_MESSAGE):%lu bytes\n",
- (ulong) sizeof(struct controlvm_message));
+ (ulong)sizeof(struct controlvm_message));
LOGINF("sizeof(struct spar_controlvm_channel_protocol):%lu bytes\n",
- (ulong) sizeof(struct spar_controlvm_channel_protocol));
+ (ulong)sizeof(struct spar_controlvm_channel_protocol));
LOGINF("sizeof(CHANNEL_HEADER):%lu bytes\n",
- (ulong) sizeof(struct channel_header));
+ (ulong)sizeof(struct channel_header));
LOGINF("sizeof(struct spar_io_channel_protocol):%lu bytes\n",
- (ulong) sizeof(struct spar_io_channel_protocol));
+ (ulong)sizeof(struct spar_io_channel_protocol));
LOGINF("SIZEOF_CMDRSP:%lu bytes\n", SIZEOF_CMDRSP);
LOGINF("SIZEOF_PROTOCOL:%lu bytes\n", SIZEOF_PROTOCOL);
/* initialize global pointers to NULL */
- BusListHead = NULL;
- BusListCount = MaxBusCount = 0;
- rwlock_init(&BusListLock);
+ bus_list = NULL;
+ bus_list_count = 0;
+ max_bus_count = 0;
+ rwlock_init(&bus_list_lock);
virt_control_chan_func = NULL;
/* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
@@ -1515,7 +1486,7 @@ uislib_mod_init(void)
platformnumber_debugfs_read = debugfs_create_u32(
PLATFORMNUMBER_DEBUGFS_ENTRY_FN, 0444, dir_debugfs,
- &PlatformNumber);
+ &platform_no);
cycles_before_wait_debugfs_read = debugfs_create_u64(
CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
@@ -1533,9 +1504,9 @@ uislib_mod_init(void)
static void __exit
uislib_mod_exit(void)
{
- if (ProcReadBuffer) {
- vfree(ProcReadBuffer);
- ProcReadBuffer = NULL;
+ if (debug_buf) {
+ vfree(debug_buf);
+ debug_buf = NULL;
}
debugfs_remove(info_debugfs_entry);
diff --git a/drivers/staging/unisys/uislib/uisqueue.c b/drivers/staging/unisys/uislib/uisqueue.c
index f9f8442d58c5..71bb7b608e9a 100644
--- a/drivers/staging/unisys/uislib/uisqueue.c
+++ b/drivers/staging/unisys/uislib/uisqueue.c
@@ -21,8 +21,6 @@
#include "uisutils.h"
-#include "chanstub.h"
-
/* this is shorter than using __FILE__ (full path name) in
* debug/info/error messages */
#define CURRENT_FILE_PC UISLIB_PC_uisqueue_c
@@ -33,9 +31,202 @@
/*****************************************************/
/* Exported functions */
/*****************************************************/
+
+/*
+ * Routine Description:
+ * Tries to insert the prebuilt signal pointed to by pSignal into the nth
+ * Queue of the Channel pointed to by pChannel
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to the signal
+ *
+ * Assumptions:
+ * - pChannel, Queue and pSignal are valid.
+ * - If insertion fails due to a full queue, the caller will determine the
+ * retry policy (e.g. wait & try again, report an error, etc.).
+ *
+ * Return value:
+ * 1 if the insertion succeeds, 0 if the queue was full.
+ */
+unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue,
+ void *sig)
+{
+ void __iomem *psignal;
+ unsigned int head, tail, nof;
+
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)
+ ((char __iomem *)ch + readq(&ch->ch_space_offset))
+ + queue;
+
+ /* capture current head and tail */
+ head = readl(&pqhdr->head);
+ tail = readl(&pqhdr->tail);
+
+ /* queue is full if (head + 1) % n equals tail */
+ if (((head + 1) % readl(&pqhdr->max_slots)) == tail) {
+ nof = readq(&pqhdr->num_overflows) + 1;
+ writeq(nof, &pqhdr->num_overflows);
+ return 0;
+ }
+
+ /* increment the head index */
+ head = (head + 1) % readl(&pqhdr->max_slots);
+
+ /* copy signal to the head location from the area pointed to
+ * by pSignal
+ */
+ psignal = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
+ (head * readl(&pqhdr->signal_size));
+ memcpy_toio(psignal, sig, readl(&pqhdr->signal_size));
+
+ mb(); /* channel synch */
+ writel(head, &pqhdr->head);
+
+ writeq(readq(&pqhdr->num_sent) + 1, &pqhdr->num_sent);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(spar_signal_insert);
+
+/*
+ * Routine Description:
+ * Removes one signal from Channel pChannel's nth Queue at the
+ * time of the call and copies it into the memory pointed to by
+ * pSignal.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to where the signals are to be copied
+ *
+ * Assumptions:
+ * - pChannel and Queue are valid.
+ * - pSignal points to a memory area large enough to hold queue's SignalSize
+ *
+ * Return value:
+ * 1 if the removal succeeds, 0 if the queue was empty.
+ */
+unsigned char
+spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig)
+{
+ void __iomem *psource;
+ unsigned int head, tail;
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)((char __iomem *)ch +
+ readq(&ch->ch_space_offset)) + queue;
+
+ /* capture current head and tail */
+ head = readl(&pqhdr->head);
+ tail = readl(&pqhdr->tail);
+
+ /* queue is empty if the head index equals the tail index */
+ if (head == tail) {
+ writeq(readq(&pqhdr->num_empty) + 1, &pqhdr->num_empty);
+ return 0;
+ }
+
+ /* advance past the 'empty' front slot */
+ tail = (tail + 1) % readl(&pqhdr->max_slots);
+
+ /* copy signal from tail location to the area pointed to by pSignal */
+ psource = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
+ (tail * readl(&pqhdr->signal_size));
+ memcpy_fromio(sig, psource, readl(&pqhdr->signal_size));
+
+ mb(); /* channel synch */
+ writel(tail, &pqhdr->tail);
+
+ writeq(readq(&pqhdr->num_received) + 1,
+ &pqhdr->num_received);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(spar_signal_remove);
+
+/*
+ * Routine Description:
+ * Removes all signals present in Channel pChannel's nth Queue at the
+ * time of the call and copies them into the memory pointed to by
+ * pSignal. Returns the # of signals copied as the value of the routine.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to where the signals are to be copied
+ *
+ * Assumptions:
+ * - pChannel and Queue are valid.
+ * - pSignal points to a memory area large enough to hold Queue's MaxSignals
+ * # of signals, each of which is Queue's SignalSize.
+ *
+ * Return value:
+ * # of signals copied.
+ */
+unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
+ void *sig)
+{
+ void *psource;
+ unsigned int head, tail, count = 0;
+ struct signal_queue_header *pqhdr =
+ (struct signal_queue_header *)((char *)ch +
+ ch->ch_space_offset) + queue;
+
+ /* capture current head and tail */
+ head = pqhdr->head;
+ tail = pqhdr->tail;
+
+ /* queue is empty if the head index equals the tail index */
+ if (head == tail)
+ return 0;
+
+ while (head != tail) {
+ /* advance past the 'empty' front slot */
+ tail = (tail + 1) % pqhdr->max_slots;
+
+ /* copy signal from tail location to the area pointed
+ * to by pSignal
+ */
+ psource =
+ (char *)pqhdr + pqhdr->sig_base_offset +
+ (tail * pqhdr->signal_size);
+ memcpy((char *)sig + (pqhdr->signal_size * count),
+ psource, pqhdr->signal_size);
+
+ mb(); /* channel synch */
+ pqhdr->tail = tail;
+
+ count++;
+ pqhdr->num_received++;
+ }
+
+ return count;
+}
+
+/*
+ * Routine Description:
+ * Determine whether a signal queue is empty.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ *
+ * Return value:
+ * 1 if the signal queue is empty, 0 otherwise.
+ */
+unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
+ u32 queue)
+{
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)((char __iomem *)ch +
+ readq(&ch->ch_space_offset)) + queue;
+ return readl(&pqhdr->head) == readl(&pqhdr->tail);
+}
+EXPORT_SYMBOL_GPL(spar_signalqueue_empty);
+
unsigned long long
uisqueue_interlocked_or(unsigned long long __iomem *tgt,
- unsigned long long set)
+ unsigned long long set)
{
unsigned long long i;
unsigned long long j;
@@ -53,7 +244,7 @@ EXPORT_SYMBOL_GPL(uisqueue_interlocked_or);
unsigned long long
uisqueue_interlocked_and(unsigned long long __iomem *tgt,
- unsigned long long set)
+ unsigned long long set)
{
unsigned long long i;
unsigned long long j;
@@ -72,22 +263,21 @@ EXPORT_SYMBOL_GPL(uisqueue_interlocked_and);
static u8
do_locked_client_insert(struct uisqueue_info *queueinfo,
unsigned int whichqueue,
- void *pSignal,
+ void *signal,
spinlock_t *lock,
- unsigned char issueInterruptIfEmpty,
- u64 interruptHandle, u8 *channelId)
+ u8 *channel_id)
{
unsigned long flags;
u8 rc = 0;
spin_lock_irqsave(lock, flags);
- if (!spar_channel_client_acquire_os(queueinfo->chan, channelId))
+ if (!spar_channel_client_acquire_os(queueinfo->chan, channel_id))
goto unlock;
- if (spar_signal_insert(queueinfo->chan, whichqueue, pSignal)) {
+ if (spar_signal_insert(queueinfo->chan, whichqueue, signal)) {
queueinfo->packets_sent++;
rc = 1;
}
- spar_channel_client_release_os(queueinfo->chan, channelId);
+ spar_channel_client_release_os(queueinfo->chan, channel_id);
unlock:
spin_unlock_irqrestore((spinlock_t *)lock, flags);
return rc;
@@ -103,9 +293,8 @@ uisqueue_put_cmdrsp_with_lock_client(struct uisqueue_info *queueinfo,
char oktowait, u8 *channel_id)
{
while (!do_locked_client_insert(queueinfo, whichqueue, cmdrsp,
- (spinlock_t *) insertlock,
- issue_irq_if_empty,
- irq_handle, channel_id)) {
+ (spinlock_t *)insertlock,
+ channel_id)) {
if (oktowait != OK_TO_WAIT) {
LOGERR("****FAILED visor_signal_insert failed; cannot wait; insert aborted\n");
return 0; /* failed to queue */
diff --git a/drivers/staging/unisys/uislib/uisthread.c b/drivers/staging/unisys/uislib/uisthread.c
index c0fc812f751e..25adf1a7307c 100644
--- a/drivers/staging/unisys/uislib/uisthread.c
+++ b/drivers/staging/unisys/uislib/uisthread.c
@@ -53,7 +53,6 @@ uisthread_start(struct uisthread_info *thrinfo,
wake_up_process(thrinfo->task);
LOGINF("started thread pid:%d\n", thrinfo->id);
return 1;
-
}
EXPORT_SYMBOL_GPL(uisthread_start);
diff --git a/drivers/staging/unisys/uislib/uisutils.c b/drivers/staging/unisys/uislib/uisutils.c
index 4a5b86773927..31318d246252 100644
--- a/drivers/staging/unisys/uislib/uisutils.c
+++ b/drivers/staging/unisys/uislib/uisutils.c
@@ -42,14 +42,13 @@ atomic_t uisutils_registered_services = ATOMIC_INIT(0);
* uisctrl_register_req_handler() or
* uisctrl_register_req_handler_ex() */
-
/*****************************************************/
/* Utility functions */
/*****************************************************/
int
uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
- char *format, ...)
+ char *format, ...)
{
va_list args;
int len;
@@ -57,6 +56,7 @@ uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
DBGINF("buffer = 0x%p : *buffer = 0x%p.\n", buffer, *buffer);
va_start(args, format);
len = vsnprintf(*buffer, *buffer_remaining, format, args);
+ va_end(args);
if (len >= *buffer_remaining) {
*buffer += *buffer_remaining;
*total += *buffer_remaining;
@@ -96,7 +96,7 @@ uisctrl_register_req_handler(int type, void *fptr,
}
if (chipset_driver_info)
bus_device_info_init(chipset_driver_info, "chipset", "uislib",
- VERSION, NULL);
+ VERSION, NULL);
return 1;
}
@@ -113,66 +113,57 @@ uisctrl_register_req_handler_ex(uuid_le switch_uuid,
u32 client_str_len, u64 bytes),
struct ultra_vbus_deviceinfo *chipset_driver_info)
{
- struct req_handler_info *pReqHandlerInfo;
- int rc = 0; /* assume failure */
+ struct req_handler_info *req_handler;
LOGINF("type=%pUL, controlfunc=0x%p.\n",
&switch_uuid, controlfunc);
if (!controlfunc) {
LOGERR("%pUL: controlfunc must be supplied\n", &switch_uuid);
- goto Away;
+ return 0;
}
if (!server_channel_ok) {
LOGERR("%pUL: Server_Channel_Ok must be supplied\n",
&switch_uuid);
- goto Away;
+ return 0;
}
if (!server_channel_init) {
LOGERR("%pUL: Server_Channel_Init must be supplied\n",
&switch_uuid);
- goto Away;
+ return 0;
}
- pReqHandlerInfo = req_handler_add(switch_uuid,
- switch_type_name,
- controlfunc,
- min_channel_bytes,
- server_channel_ok, server_channel_init);
- if (!pReqHandlerInfo) {
+ req_handler = req_handler_add(switch_uuid,
+ switch_type_name,
+ controlfunc,
+ min_channel_bytes,
+ server_channel_ok, server_channel_init);
+ if (!req_handler) {
LOGERR("failed to add %pUL to server list\n", &switch_uuid);
- goto Away;
+ return 0;
}
atomic_inc(&uisutils_registered_services);
- rc = 1; /* success */
-Away:
- if (rc) {
- if (chipset_driver_info)
- bus_device_info_init(chipset_driver_info, "chipset",
- "uislib", VERSION, NULL);
- } else
- LOGERR("failed to register type %pUL.\n", &switch_uuid);
+ if (chipset_driver_info) {
+ bus_device_info_init(chipset_driver_info, "chipset",
+ "uislib", VERSION, NULL);
+ return 1;
+ }
- return rc;
+ LOGERR("failed to register type %pUL.\n", &switch_uuid);
+ return 0;
}
EXPORT_SYMBOL_GPL(uisctrl_register_req_handler_ex);
int
uisctrl_unregister_req_handler_ex(uuid_le switch_uuid)
{
- int rc = 0; /* assume failure */
-
LOGINF("type=%pUL.\n", &switch_uuid);
if (req_handler_del(switch_uuid) < 0) {
LOGERR("failed to remove %pUL from server list\n",
- &switch_uuid);
- goto Away;
+ &switch_uuid);
+ return 0;
}
atomic_dec(&uisutils_registered_services);
- rc = 1; /* success */
-Away:
- if (!rc)
- LOGERR("failed to unregister type %pUL.\n", &switch_uuid);
- return rc;
+ return 1;
}
EXPORT_SYMBOL_GPL(uisctrl_unregister_req_handler_ex);
@@ -214,10 +205,10 @@ uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx, void *skb_in,
frags[count].pi_pfn =
page_to_pfn(virt_to_page(skb->data + offset));
frags[count].pi_off =
- (unsigned long) (skb->data + offset) & PI_PAGE_MASK;
+ (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
size =
min(firstfraglen,
- (unsigned int) (PI_PAGE_SIZE - frags[count].pi_off));
+ (unsigned int)(PI_PAGE_SIZE - frags[count].pi_off));
/* can take smallest of firstfraglen(what's left) OR
* bytes left in the page
*/
@@ -231,7 +222,7 @@ uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx, void *skb_in,
if ((count + numfrags) > frags_max) {
LOGERR("**** FAILED %s frags array too small: max:%d count+nr_frags:%d\n",
- calling_ctx, frags_max, count + numfrags);
+ calling_ctx, frags_max, count + numfrags);
return -1; /* failure */
}
@@ -255,7 +246,6 @@ dolist: if (skb_shinfo(skb)->frag_list) {
for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
skbinlist = skbinlist->next) {
-
c = uisutil_copy_fragsinfo_from_skb("recursive",
skbinlist,
skbinlist->len - skbinlist->data_len,
@@ -272,17 +262,18 @@ dolist: if (skb_shinfo(skb)->frag_list) {
}
EXPORT_SYMBOL_GPL(uisutil_copy_fragsinfo_from_skb);
-static LIST_HEAD(ReqHandlerInfo_list); /* list of struct req_handler_info */
-static DEFINE_SPINLOCK(ReqHandlerInfo_list_lock);
+static LIST_HEAD(req_handler_info_list); /* list of struct req_handler_info */
+static DEFINE_SPINLOCK(req_handler_info_list_lock);
struct req_handler_info *
req_handler_add(uuid_le switch_uuid,
const char *switch_type_name,
int (*controlfunc)(struct io_msgs *),
unsigned long min_channel_bytes,
- int (*Server_Channel_Ok)(unsigned long channelBytes),
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr, u32 clientStrLen, u64 bytes))
+ int (*server_channel_ok)(unsigned long channel_bytes),
+ int (*server_channel_init)
+ (void *x, unsigned char *clientstr, u32 clientstr_len,
+ u64 bytes))
{
struct req_handler_info *rc = NULL;
@@ -292,14 +283,14 @@ req_handler_add(uuid_le switch_uuid,
rc->switch_uuid = switch_uuid;
rc->controlfunc = controlfunc;
rc->min_channel_bytes = min_channel_bytes;
- rc->server_channel_ok = Server_Channel_Ok;
- rc->server_channel_init = Server_Channel_Init;
+ rc->server_channel_ok = server_channel_ok;
+ rc->server_channel_init = server_channel_init;
if (switch_type_name)
strncpy(rc->switch_type_name, switch_type_name,
sizeof(rc->switch_type_name) - 1);
- spin_lock(&ReqHandlerInfo_list_lock);
- list_add_tail(&(rc->list_link), &ReqHandlerInfo_list);
- spin_unlock(&ReqHandlerInfo_list_lock);
+ spin_lock(&req_handler_info_list_lock);
+ list_add_tail(&rc->list_link, &req_handler_info_list);
+ spin_unlock(&req_handler_info_list_lock);
return rc;
}
@@ -310,15 +301,15 @@ req_handler_find(uuid_le switch_uuid)
struct list_head *lelt, *tmp;
struct req_handler_info *entry = NULL;
- spin_lock(&ReqHandlerInfo_list_lock);
- list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
+ spin_lock(&req_handler_info_list_lock);
+ list_for_each_safe(lelt, tmp, &req_handler_info_list) {
entry = list_entry(lelt, struct req_handler_info, list_link);
if (uuid_le_cmp(entry->switch_uuid, switch_uuid) == 0) {
- spin_unlock(&ReqHandlerInfo_list_lock);
+ spin_unlock(&req_handler_info_list_lock);
return entry;
}
}
- spin_unlock(&ReqHandlerInfo_list_lock);
+ spin_unlock(&req_handler_info_list_lock);
return NULL;
}
@@ -329,8 +320,8 @@ req_handler_del(uuid_le switch_uuid)
struct req_handler_info *entry = NULL;
int rc = -1;
- spin_lock(&ReqHandlerInfo_list_lock);
- list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
+ spin_lock(&req_handler_info_list_lock);
+ list_for_each_safe(lelt, tmp, &req_handler_info_list) {
entry = list_entry(lelt, struct req_handler_info, list_link);
if (uuid_le_cmp(entry->switch_uuid, switch_uuid) == 0) {
list_del(lelt);
@@ -338,6 +329,6 @@ req_handler_del(uuid_le switch_uuid)
rc++;
}
}
- spin_unlock(&ReqHandlerInfo_list_lock);
+ spin_unlock(&req_handler_info_list_lock);
return rc;
}
diff --git a/drivers/staging/unisys/virthba/Kconfig b/drivers/staging/unisys/virthba/Kconfig
index c0d7986e78cb..9af98fc7acbc 100644
--- a/drivers/staging/unisys/virthba/Kconfig
+++ b/drivers/staging/unisys/virthba/Kconfig
@@ -4,7 +4,7 @@
config UNISYS_VIRTHBA
tristate "Unisys virthba driver"
- depends on UNISYSSPAR && UNISYS_VISORCHIPSET && UNISYS_CHANNELSTUB && UNISYS_UISLIB && UNISYS_VIRTPCI && SCSI
+ depends on UNISYSSPAR && UNISYS_VISORCHIPSET && UNISYS_UISLIB && UNISYS_VIRTPCI && SCSI
---help---
If you say Y here, you will enable the Unisys virthba driver.
diff --git a/drivers/staging/unisys/virthba/virthba.c b/drivers/staging/unisys/virthba/virthba.c
index d7a629b5f111..e6ecea560495 100644
--- a/drivers/staging/unisys/virthba/virthba.c
+++ b/drivers/staging/unisys/virthba/virthba.c
@@ -85,7 +85,8 @@ static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd);
static const char *virthba_get_info(struct Scsi_Host *shp);
static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
- void (*virthba_cmnd_done)(struct scsi_cmnd *));
+ void (*virthba_cmnd_done)
+ (struct scsi_cmnd *));
static const struct x86_cpu_id unisys_spar_ids[] = {
{ X86_VENDOR_INTEL, 6, 62, X86_FEATURE_ANY },
@@ -107,19 +108,20 @@ static void virthba_slave_destroy(struct scsi_device *scsidev);
static int process_incoming_rsps(void *);
static int virthba_serverup(struct virtpci_dev *virtpcidev);
static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state);
-static void doDiskAddRemove(struct work_struct *work);
+static void do_disk_add_remove(struct work_struct *work);
static void virthba_serverdown_complete(struct work_struct *work);
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset);
+ size_t len, loff_t *offset);
static ssize_t enable_ints_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos);
+ const char __user *buffer, size_t count,
+ loff_t *ppos);
/*****************************************************/
/* Globals */
/*****************************************************/
static int rsltq_wait_usecs = 4000; /* Default 4ms */
-static unsigned int MaxBuffLen;
+static unsigned int max_buff_len;
/* Module options */
static char *virthba_options = "NONE";
@@ -165,6 +167,7 @@ struct virtdisk_info {
atomic_t error_count;
struct virtdisk_info *next;
};
+
/* Each Scsi_Host has a host_data area that contains this struct. */
struct virthba_info {
struct Scsi_Host *scsihost;
@@ -193,7 +196,7 @@ struct virthba_info {
struct virtdisk_info head;
};
-/* Work Data for DARWorkQ */
+/* Work Data for dar_work_queue */
struct diskaddremove {
u8 add; /* 0-remove, 1-add */
struct Scsi_Host *shost; /* Scsi Host for this virthba instance */
@@ -244,7 +247,7 @@ static const struct file_operations debugfs_enable_ints_fops = {
#define VIRTHBASOPENMAX 1
/* array of open devices maintained by open() and close(); */
-static struct virthba_devices_open VirtHbasOpen[VIRTHBASOPENMAX];
+static struct virthba_devices_open virthbas_open[VIRTHBASOPENMAX];
static struct dentry *virthba_debugfs_dir;
/*****************************************************/
@@ -260,7 +263,7 @@ add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new)
insert_location = vhbainfo->nextinsert;
while (vhbainfo->pending[insert_location].sent != NULL) {
insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
- if (insert_location == (int) vhbainfo->nextinsert) {
+ if (insert_location == (int)vhbainfo->nextinsert) {
LOGERR("Queue should be full. insert_location<<%d>> Unable to find open slot for pending commands.\n",
insert_location);
spin_unlock_irqrestore(&vhbainfo->privlock, flags);
@@ -289,7 +292,7 @@ add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype,
insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
}
- return (unsigned int) insert_location;
+ return (unsigned int)insert_location;
}
static void *
@@ -300,13 +303,13 @@ del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del)
if (del >= MAX_PENDING_REQUESTS) {
LOGERR("Invalid queue position <<%lu>> given to delete. MAX_PENDING_REQUESTS <<%d>>\n",
- (unsigned long) del, MAX_PENDING_REQUESTS);
+ (unsigned long)del, MAX_PENDING_REQUESTS);
} else {
spin_lock_irqsave(&vhbainfo->privlock, flags);
if (vhbainfo->pending[del].sent == NULL)
LOGERR("Deleting already cleared queue entry at <<%lu>>.\n",
- (unsigned long) del);
+ (unsigned long)del);
sent = vhbainfo->pending[del].sent;
@@ -318,30 +321,30 @@ del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del)
return sent;
}
-/* DARWorkQ (Disk Add/Remove) */
-static struct work_struct DARWorkQ;
-static struct diskaddremove *DARWorkQHead;
-static spinlock_t DARWorkQLock;
-static unsigned short DARWorkQSched;
+/* dar_work_queue (Disk Add/Remove) */
+static struct work_struct dar_work_queue;
+static struct diskaddremove *dar_work_queue_head;
+static spinlock_t dar_work_queue_lock;
+static unsigned short dar_work_queue_sched;
#define QUEUE_DISKADDREMOVE(dar) { \
- spin_lock_irqsave(&DARWorkQLock, flags); \
- if (!DARWorkQHead) { \
- DARWorkQHead = dar; \
+ spin_lock_irqsave(&dar_work_queue_lock, flags); \
+ if (!dar_work_queue_head) { \
+ dar_work_queue_head = dar; \
dar->next = NULL; \
} \
else { \
- dar->next = DARWorkQHead; \
- DARWorkQHead = dar; \
+ dar->next = dar_work_queue_head; \
+ dar_work_queue_head = dar; \
} \
- if (!DARWorkQSched) { \
- schedule_work(&DARWorkQ); \
- DARWorkQSched = 1; \
+ if (!dar_work_queue_sched) { \
+ schedule_work(&dar_work_queue); \
+ dar_work_queue_sched = 1; \
} \
- spin_unlock_irqrestore(&DARWorkQLock, flags); \
+ spin_unlock_irqrestore(&dar_work_queue_lock, flags); \
}
static inline void
-SendDiskAddRemove(struct diskaddremove *dar)
+send_disk_add_remove(struct diskaddremove *dar)
{
struct scsi_device *sdev;
int error;
@@ -365,31 +368,31 @@ SendDiskAddRemove(struct diskaddremove *dar)
}
/*****************************************************/
-/* DARWorkQ Handler Thread */
+/* dar_work_queue Handler Thread */
/*****************************************************/
static void
-doDiskAddRemove(struct work_struct *work)
+do_disk_add_remove(struct work_struct *work)
{
struct diskaddremove *dar;
struct diskaddremove *tmphead;
int i = 0;
unsigned long flags;
- spin_lock_irqsave(&DARWorkQLock, flags);
- tmphead = DARWorkQHead;
- DARWorkQHead = NULL;
- DARWorkQSched = 0;
- spin_unlock_irqrestore(&DARWorkQLock, flags);
+ spin_lock_irqsave(&dar_work_queue_lock, flags);
+ tmphead = dar_work_queue_head;
+ dar_work_queue_head = NULL;
+ dar_work_queue_sched = 0;
+ spin_unlock_irqrestore(&dar_work_queue_lock, flags);
while (tmphead) {
dar = tmphead;
tmphead = dar->next;
- SendDiskAddRemove(dar);
+ send_disk_add_remove(dar);
i++;
}
}
/*****************************************************/
-/* Routine to add entry to DARWorkQ */
+/* Routine to add entry to dar_work_queue */
/*****************************************************/
static void
process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
@@ -397,7 +400,7 @@ process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
struct diskaddremove *dar;
unsigned long flags;
- dar = kzalloc(sizeof(struct diskaddremove), GFP_ATOMIC);
+ dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
if (dar) {
dar->add = cmdrsp->disknotify.add;
dar->shost = shost;
@@ -416,10 +419,10 @@ process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
/* Probe Remove Functions */
/*****************************************************/
static irqreturn_t
-virthba_ISR(int irq, void *dev_id)
+virthba_isr(int irq, void *dev_id)
{
- struct virthba_info *virthbainfo = (struct virthba_info *) dev_id;
- struct channel_header __iomem *pChannelHeader;
+ struct virthba_info *virthbainfo = (struct virthba_info *)dev_id;
+ struct channel_header __iomem *channel_header;
struct signal_queue_header __iomem *pqhdr;
u64 mask;
unsigned long long rc1;
@@ -427,23 +430,23 @@ virthba_ISR(int irq, void *dev_id)
if (virthbainfo == NULL)
return IRQ_NONE;
virthbainfo->interrupts_rcvd++;
- pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
- if (((readq(&pChannelHeader->features)
- & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0)
- && ((readq(&pChannelHeader->features) &
+ channel_header = virthbainfo->chinfo.queueinfo->chan;
+ if (((readq(&channel_header->features)
+ & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0) &&
+ ((readq(&channel_header->features) &
ULTRA_IO_DRIVER_DISABLES_INTS) !=
0)) {
virthbainfo->interrupts_disabled++;
mask = ~ULTRA_CHANNEL_ENABLE_INTS;
rc1 = uisqueue_interlocked_and(virthbainfo->flags_addr, mask);
}
- if (spar_signalqueue_empty(pChannelHeader, IOCHAN_FROM_IOPART)) {
+ if (spar_signalqueue_empty(channel_header, IOCHAN_FROM_IOPART)) {
virthbainfo->interrupts_notme++;
return IRQ_NONE;
}
pqhdr = (struct signal_queue_header __iomem *)
- ((char __iomem *) pChannelHeader +
- readq(&pChannelHeader->ch_space_offset)) + IOCHAN_FROM_IOPART;
+ ((char __iomem *)channel_header +
+ readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART;
writeq(readq(&pqhdr->num_irq_received) + 1,
&pqhdr->num_irq_received);
atomic_set(&virthbainfo->interrupt_rcvd, 1);
@@ -459,8 +462,8 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
struct virthba_info *virthbainfo;
int rsp;
int i;
- irq_handler_t handler = virthba_ISR;
- struct channel_header __iomem *pChannelHeader;
+ irq_handler_t handler = virthba_isr;
+ struct channel_header __iomem *channel_header;
struct signal_queue_header __iomem *pqhdr;
u64 mask;
@@ -476,7 +479,7 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
* instance - this virthba that has just been created is an
* instance of a scsi host adapter. This scsi_host_alloc
* function allocates a new Scsi_Host struct & performs basic
- * initializatoin. The host is not published to the scsi
+ * initialization. The host is not published to the scsi
* midlayer until scsi_add_host is called.
*/
DBGINF("calling scsi_host_alloc.\n");
@@ -501,19 +504,19 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
* the max-channel value.
*/
LOGINF("virtpcidev->scsi.max.max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_io_size=%u\n",
- (unsigned) virtpcidev->scsi.max.max_channel - 1,
- (unsigned) virtpcidev->scsi.max.max_id,
- (unsigned) virtpcidev->scsi.max.max_lun,
- (unsigned) virtpcidev->scsi.max.cmd_per_lun,
- (unsigned) virtpcidev->scsi.max.max_io_size);
- scsihost->max_channel = (unsigned) virtpcidev->scsi.max.max_channel;
- scsihost->max_id = (unsigned) virtpcidev->scsi.max.max_id;
- scsihost->max_lun = (unsigned) virtpcidev->scsi.max.max_lun;
- scsihost->cmd_per_lun = (unsigned) virtpcidev->scsi.max.cmd_per_lun;
+ (unsigned)virtpcidev->scsi.max.max_channel - 1,
+ (unsigned)virtpcidev->scsi.max.max_id,
+ (unsigned)virtpcidev->scsi.max.max_lun,
+ (unsigned)virtpcidev->scsi.max.cmd_per_lun,
+ (unsigned)virtpcidev->scsi.max.max_io_size);
+ scsihost->max_channel = (unsigned)virtpcidev->scsi.max.max_channel;
+ scsihost->max_id = (unsigned)virtpcidev->scsi.max.max_id;
+ scsihost->max_lun = (unsigned)virtpcidev->scsi.max.max_lun;
+ scsihost->cmd_per_lun = (unsigned)virtpcidev->scsi.max.cmd_per_lun;
scsihost->max_sectors =
- (unsigned short) (virtpcidev->scsi.max.max_io_size >> 9);
+ (unsigned short)(virtpcidev->scsi.max.max_io_size >> 9);
scsihost->sg_tablesize =
- (unsigned short) (virtpcidev->scsi.max.max_io_size / PAGE_SIZE);
+ (unsigned short)(virtpcidev->scsi.max.max_io_size / PAGE_SIZE);
if (scsihost->sg_tablesize > MAX_PHYS_INFO)
scsihost->sg_tablesize = MAX_PHYS_INFO;
LOGINF("scsihost->max_channel=%u, max_id=%u, max_lun=%llu, cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
@@ -544,11 +547,11 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
return -ENODEV;
}
- virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ virthbainfo = (struct virthba_info *)scsihost->hostdata;
memset(virthbainfo, 0, sizeof(struct virthba_info));
for (i = 0; i < VIRTHBASOPENMAX; i++) {
- if (VirtHbasOpen[i].virthbainfo == NULL) {
- VirtHbasOpen[i].virthbainfo = virthbainfo;
+ if (virthbas_open[i].virthbainfo == NULL) {
+ virthbas_open[i].virthbainfo = virthbainfo;
break;
}
}
@@ -584,10 +587,10 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n",
virthbainfo->chinfo.queueinfo, &virthbainfo->chinfo.threadinfo);
- pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
+ channel_header = virthbainfo->chinfo.queueinfo->chan;
pqhdr = (struct signal_queue_header __iomem *)
- ((char __iomem *)pChannelHeader +
- readq(&pChannelHeader->ch_space_offset)) + IOCHAN_FROM_IOPART;
+ ((char __iomem *)channel_header +
+ readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART;
virthbainfo->flags_addr = &pqhdr->features;
if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
@@ -646,11 +649,11 @@ virthba_remove(struct virtpci_dev *virtpcidev)
{
struct virthba_info *virthbainfo;
struct Scsi_Host *scsihost =
- (struct Scsi_Host *) virtpcidev->scsi.scsihost;
+ (struct Scsi_Host *)virtpcidev->scsi.scsihost;
LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
virtpcidev->device_no);
- virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ virthbainfo = (struct virthba_info *)scsihost->hostdata;
if (virthbainfo->interrupt_vector != -1)
free_irq(virthbainfo->interrupt_vector, virthbainfo);
LOGINF("Removing virtpcidev: 0x%p, virthbainfo: 0x%p\n", virtpcidev,
@@ -679,7 +682,7 @@ forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype,
{
struct uiscmdrsp *cmdrsp;
struct virthba_info *virthbainfo =
- (struct virthba_info *) scsihost->hostdata;
+ (struct virthba_info *)scsihost->hostdata;
int notifyresult = 0xffff;
wait_queue_head_t notifyevent;
@@ -706,8 +709,8 @@ forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype,
/* specify the event that has to be triggered when this cmd is
* complete
*/
- cmdrsp->vdiskmgmt.notify = (void *) &notifyevent;
- cmdrsp->vdiskmgmt.notifyresult = (void *) &notifyresult;
+ cmdrsp->vdiskmgmt.notify = (void *)&notifyevent;
+ cmdrsp->vdiskmgmt.notifyresult = (void *)&notifyresult;
/* save destination */
cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype;
@@ -715,14 +718,14 @@ forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype,
cmdrsp->vdiskmgmt.vdest.id = vdest->id;
cmdrsp->vdiskmgmt.vdest.lun = vdest->lun;
cmdrsp->vdiskmgmt.scsicmd =
- (void *) (uintptr_t)
+ (void *)(uintptr_t)
add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE,
- (void *) cmdrsp);
+ (void *)cmdrsp);
uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
cmdrsp, IOCHAN_TO_IOPART,
&virthbainfo->chinfo.insertlock,
- DONT_ISSUE_INTERRUPT, (u64) NULL,
+ DONT_ISSUE_INTERRUPT, (u64)NULL,
OK_TO_WAIT, "vhba");
LOGINF("VdiskMgmt waiting on event notifyevent=0x%p\n",
cmdrsp->scsitaskmgmt.notify);
@@ -742,7 +745,7 @@ forward_taskmgmt_command(enum task_mgmt_types tasktype,
{
struct uiscmdrsp *cmdrsp;
struct virthba_info *virthbainfo =
- (struct virthba_info *) scsidev->host->hostdata;
+ (struct virthba_info *)scsidev->host->hostdata;
int notifyresult = 0xffff;
wait_queue_head_t notifyevent;
@@ -767,8 +770,8 @@ forward_taskmgmt_command(enum task_mgmt_types tasktype,
cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
/* specify the event that has to be triggered when this */
/* cmd is complete */
- cmdrsp->scsitaskmgmt.notify = (void *) &notifyevent;
- cmdrsp->scsitaskmgmt.notifyresult = (void *) &notifyresult;
+ cmdrsp->scsitaskmgmt.notify = (void *)&notifyevent;
+ cmdrsp->scsitaskmgmt.notifyresult = (void *)&notifyresult;
/* save destination */
cmdrsp->scsitaskmgmt.tasktype = tasktype;
@@ -776,15 +779,15 @@ forward_taskmgmt_command(enum task_mgmt_types tasktype,
cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
cmdrsp->scsitaskmgmt.scsicmd =
- (void *) (uintptr_t)
+ (void *)(uintptr_t)
add_scsipending_entry_with_wait(virthbainfo,
CMD_SCSITASKMGMT_TYPE,
- (void *) cmdrsp);
+ (void *)cmdrsp);
uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
cmdrsp, IOCHAN_TO_IOPART,
&virthbainfo->chinfo.insertlock,
- DONT_ISSUE_INTERRUPT, (u64) NULL,
+ DONT_ISSUE_INTERRUPT, (u64)NULL,
OK_TO_WAIT, "vhba");
LOGINF("TaskMgmt waiting on event notifyevent=0x%p\n",
cmdrsp->scsitaskmgmt.notify);
@@ -805,11 +808,11 @@ virthba_abort_handler(struct scsi_cmnd *scsicmd)
struct virtdisk_info *vdisk;
scsidev = scsicmd->device;
- for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
vdisk->next; vdisk = vdisk->next) {
- if ((scsidev->channel == vdisk->channel)
- && (scsidev->id == vdisk->id)
- && (scsidev->lun == vdisk->lun)) {
+ if ((scsidev->channel == vdisk->channel) &&
+ (scsidev->id == vdisk->id) &&
+ (scsidev->lun == vdisk->lun)) {
if (atomic_read(&vdisk->error_count) <
VIRTHBA_ERROR_COUNT) {
atomic_inc(&vdisk->error_count);
@@ -831,11 +834,11 @@ virthba_bus_reset_handler(struct scsi_cmnd *scsicmd)
struct virtdisk_info *vdisk;
scsidev = scsicmd->device;
- for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
vdisk->next; vdisk = vdisk->next) {
- if ((scsidev->channel == vdisk->channel)
- && (scsidev->id == vdisk->id)
- && (scsidev->lun == vdisk->lun)) {
+ if ((scsidev->channel == vdisk->channel) &&
+ (scsidev->id == vdisk->id) &&
+ (scsidev->lun == vdisk->lun)) {
if (atomic_read(&vdisk->error_count) <
VIRTHBA_ERROR_COUNT) {
atomic_inc(&vdisk->error_count);
@@ -857,11 +860,11 @@ virthba_device_reset_handler(struct scsi_cmnd *scsicmd)
struct virtdisk_info *vdisk;
scsidev = scsicmd->device;
- for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
vdisk->next; vdisk = vdisk->next) {
- if ((scsidev->channel == vdisk->channel)
- && (scsidev->id == vdisk->id)
- && (scsidev->lun == vdisk->lun)) {
+ if ((scsidev->channel == vdisk->channel) &&
+ (scsidev->id == vdisk->id) &&
+ (scsidev->lun == vdisk->lun)) {
if (atomic_read(&vdisk->error_count) <
VIRTHBA_ERROR_COUNT) {
atomic_inc(&vdisk->error_count);
@@ -915,7 +918,7 @@ virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
struct uiscmdrsp *cmdrsp;
unsigned int i;
struct virthba_info *virthbainfo =
- (struct virthba_info *) scsihost->hostdata;
+ (struct virthba_info *)scsihost->hostdata;
struct scatterlist *sg = NULL;
struct scatterlist *sgl = NULL;
int sg_failed = 0;
@@ -940,9 +943,9 @@ virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
* will return the scsicmd pointer for completion
*/
insert_location =
- add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *) scsicmd);
+ add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *)scsicmd);
if (insert_location != -1) {
- cmdrsp->scsi.scsicmd = (void *) (uintptr_t) insert_location;
+ cmdrsp->scsi.scsicmd = (void *)(uintptr_t)insert_location;
} else {
LOGERR("Queue is full. Returning busy.\n");
kfree(cmdrsp);
@@ -961,13 +964,13 @@ virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
/* keep track of the max buffer length so far. */
- if (cmdrsp->scsi.bufflen > MaxBuffLen)
- MaxBuffLen = cmdrsp->scsi.bufflen;
+ if (cmdrsp->scsi.bufflen > max_buff_len)
+ max_buff_len = cmdrsp->scsi.bufflen;
if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
LOGERR("scsicmd use_sg:%d greater than MAX:%d\n",
scsi_sg_count(scsicmd), MAX_PHYS_INFO);
- del_scsipending_entry(virthbainfo, (uintptr_t) insert_location);
+ del_scsipending_entry(virthbainfo, (uintptr_t)insert_location);
kfree(cmdrsp);
return 1; /* reject the command */
}
@@ -989,22 +992,21 @@ virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
sgl = scsi_sglist(scsicmd);
for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
-
cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
cmdrsp->scsi.gpi_list[i].length = sg->length;
if ((i != 0) && (sg->offset != 0))
LOGINF("Offset on a sg_entry other than zero =<<%d>>.\n",
- sg->offset);
+ sg->offset);
}
if (sg_failed) {
LOGERR("Start sg_list dump (entries %d, bufflen %d)...\n",
- scsi_sg_count(scsicmd), cmdrsp->scsi.bufflen);
+ scsi_sg_count(scsicmd), cmdrsp->scsi.bufflen);
for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
LOGERR(" Entry(%d): page->[0x%p], phys->[0x%Lx], off(%d), len(%d)\n",
- i, sg_page(sg),
- (unsigned long long) sg_phys(sg),
- sg->offset, sg->length);
+ i, sg_page(sg),
+ (unsigned long long)sg_phys(sg),
+ sg->offset, sg->length);
}
LOGERR("Done sg_list dump.\n");
/* BUG(); ***** For now, let it fail in uissd
@@ -1022,12 +1024,12 @@ virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
&virthbainfo->chinfo.
insertlock,
DONT_ISSUE_INTERRUPT,
- (u64) NULL, DONT_WAIT, "vhba");
+ (u64)NULL, DONT_WAIT, "vhba");
if (i == 0) {
/* queue must be full - and we said don't wait - return busy */
LOGERR("uisqueue_put_cmdrsp_with_lock ****FAILED\n");
kfree(cmdrsp);
- del_scsipending_entry(virthbainfo, (uintptr_t) insert_location);
+ del_scsipending_entry(virthbainfo, (uintptr_t)insert_location);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
@@ -1047,9 +1049,9 @@ virthba_slave_alloc(struct scsi_device *scsidev)
struct virtdisk_info *vdisk;
struct virtdisk_info *tmpvdisk;
struct virthba_info *virthbainfo;
- struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host;
+ struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
- virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ virthbainfo = (struct virthba_info *)scsihost->hostdata;
if (!virthbainfo) {
LOGERR("Could not find virthba_info for scsihost\n");
return 0; /* even though we errored, treat as success */
@@ -1061,7 +1063,7 @@ virthba_slave_alloc(struct scsi_device *scsidev)
(vdisk->next->lun == scsidev->lun))
return 0;
}
- tmpvdisk = kzalloc(sizeof(struct virtdisk_info), GFP_ATOMIC);
+ tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
if (!tmpvdisk) { /* error allocating */
LOGERR("Could not allocate memory for disk\n");
return 0;
@@ -1089,9 +1091,9 @@ virthba_slave_destroy(struct scsi_device *scsidev)
*/
struct virtdisk_info *vdisk, *delvdisk;
struct virthba_info *virthbainfo;
- struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host;
+ struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
- virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ virthbainfo = (struct virthba_info *)scsihost->hostdata;
if (!virthbainfo)
LOGERR("Could not find virthba_info for scsihost\n");
for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
@@ -1120,7 +1122,7 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
scsidev = scsicmd->device;
memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
- sd = (struct sense_data *) scsicmd->sense_buffer;
+ sd = (struct sense_data *)scsicmd->sense_buffer;
/* Do not log errors for disk-not-present inquiries */
if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
@@ -1129,11 +1131,11 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
return;
/* Okay see what our error_count is here.... */
- for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
vdisk->next; vdisk = vdisk->next) {
- if ((scsidev->channel != vdisk->channel)
- || (scsidev->id != vdisk->id)
- || (scsidev->lun != vdisk->lun))
+ if ((scsidev->channel != vdisk->channel) ||
+ (scsidev->id != vdisk->id) ||
+ (scsidev->lun != vdisk->lun))
continue;
if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) {
@@ -1148,8 +1150,8 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
if (atomic_read(&vdisk->error_count) ==
VIRTHBA_ERROR_COUNT) {
LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%llu>\n",
- scsidev->host->host_no, scsidev->id,
- scsidev->channel, scsidev->lun);
+ scsidev->host->host_no, scsidev->id,
+ scsidev->channel, scsidev->lun);
}
atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
}
@@ -1169,8 +1171,8 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
struct virtdisk_info *vdisk;
scsidev = scsicmd->device;
- if ((cmdrsp->scsi.cmnd[0] == INQUIRY)
- && (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
+ if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
+ (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
if (cmdrsp->scsi.no_disk_result == 0)
return;
@@ -1198,21 +1200,20 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
sg = scsi_sglist(scsicmd);
for (i = 0; i < scsi_sg_count(scsicmd); i++) {
DBGVER("copying OUT OF buf into 0x%p %d\n",
- sg_page(sg + i), sg[i].length);
+ sg_page(sg + i), sg[i].length);
thispage_orig = kmap_atomic(sg_page(sg + i));
- thispage = (void *) ((unsigned long)thispage_orig |
+ thispage = (void *)((unsigned long)thispage_orig |
sg[i].offset);
memcpy(thispage, buf + bufind, sg[i].length);
kunmap_atomic(thispage_orig);
bufind += sg[i].length;
}
} else {
-
vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
for ( ; vdisk->next; vdisk = vdisk->next) {
- if ((scsidev->channel != vdisk->channel)
- || (scsidev->id != vdisk->id)
- || (scsidev->lun != vdisk->lun))
+ if ((scsidev->channel != vdisk->channel) ||
+ (scsidev->id != vdisk->id) ||
+ (scsidev->lun != vdisk->lun))
continue;
if (atomic_read(&vdisk->ios_threshold) > 0) {
@@ -1249,8 +1250,8 @@ complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
{
/* copy the result of the taskmgmt and */
/* wake up the error handler that is waiting for this */
- *(int *) cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
- wake_up_all((wait_queue_head_t *) cmdrsp->vdiskmgmt.notify);
+ *(int *)cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
+ wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify);
LOGINF("set notify result to %d\n", cmdrsp->vdiskmgmt.result);
}
@@ -1259,15 +1260,15 @@ complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
{
/* copy the result of the taskmgmt and */
/* wake up the error handler that is waiting for this */
- *(int *) cmdrsp->scsitaskmgmt.notifyresult =
+ *(int *)cmdrsp->scsitaskmgmt.notifyresult =
cmdrsp->scsitaskmgmt.result;
- wake_up_all((wait_queue_head_t *) cmdrsp->scsitaskmgmt.notify);
+ wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify);
LOGINF("set notify result to %d\n", cmdrsp->scsitaskmgmt.result);
}
static void
drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
- struct uiscmdrsp *cmdrsp)
+ struct uiscmdrsp *cmdrsp)
{
unsigned long flags;
int qrslt = 0;
@@ -1277,7 +1278,7 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
while (1) {
spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
if (!spar_channel_client_acquire_os(dc->queueinfo->chan,
- "vhba")) {
+ "vhba")) {
spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
flags);
virthbainfo->acquire_failed_cnt++;
@@ -1294,14 +1295,15 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
* deletion
*/
scsicmd = del_scsipending_entry(virthbainfo,
- (uintptr_t) cmdrsp->scsi.scsicmd);
+ (uintptr_t)
+ cmdrsp->scsi.scsicmd);
if (!scsicmd)
break;
/* complete the orig cmd */
complete_scsi_command(cmdrsp, scsicmd);
} else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
if (!del_scsipending_entry(virthbainfo,
- (uintptr_t) cmdrsp->scsitaskmgmt.scsicmd))
+ (uintptr_t)cmdrsp->scsitaskmgmt.scsicmd))
break;
complete_taskmgmt_command(cmdrsp);
} else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
@@ -1313,7 +1315,8 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
process_disk_notify(shost, cmdrsp);
} else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
if (!del_scsipending_entry(virthbainfo,
- (uintptr_t) cmdrsp->vdiskmgmt.scsicmd))
+ (uintptr_t)
+ cmdrsp->vdiskmgmt.scsicmd))
break;
complete_vdiskmgmt_command(cmdrsp);
} else
@@ -1347,7 +1350,7 @@ process_incoming_rsps(void *v)
while (1) {
wait_event_interruptible_timeout(virthbainfo->rsp_queue,
(atomic_read(&virthbainfo->interrupt_rcvd) == 1),
- usecs_to_jiffies(rsltq_wait_usecs));
+ usecs_to_jiffies(rsltq_wait_usecs));
atomic_set(&virthbainfo->interrupt_rcvd, 0);
/* drain queue */
drain_queue(virthbainfo, dc, cmdrsp);
@@ -1367,7 +1370,7 @@ process_incoming_rsps(void *v)
/*****************************************************/
static ssize_t info_debugfs_read(struct file *file,
- char __user *buf, size_t len, loff_t *offset)
+ char __user *buf, size_t len, loff_t *offset)
{
ssize_t bytes_read = 0;
int str_pos = 0;
@@ -1383,13 +1386,14 @@ static ssize_t info_debugfs_read(struct file *file,
return -ENOMEM;
for (i = 0; i < VIRTHBASOPENMAX; i++) {
- if (VirtHbasOpen[i].virthbainfo == NULL)
+ if (virthbas_open[i].virthbainfo == NULL)
continue;
- virthbainfo = VirtHbasOpen[i].virthbainfo;
+ virthbainfo = virthbas_open[i].virthbainfo;
str_pos += scnprintf(vbuf + str_pos,
- len - str_pos, "MaxBuffLen:%u\n", MaxBuffLen);
+ len - str_pos, "max_buff_len:%u\n",
+ max_buff_len);
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
"\nvirthba result queue poll wait:%d usecs.\n",
@@ -1418,14 +1422,14 @@ static ssize_t info_debugfs_read(struct file *file,
return bytes_read;
}
-static ssize_t enable_ints_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t enable_ints_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
char buf[4];
int i, new_value;
struct virthba_info *virthbainfo;
- u64 __iomem *Features_addr;
+ u64 __iomem *features_addr;
u64 mask;
if (count >= ARRAY_SIZE(buf))
@@ -1434,37 +1438,37 @@ static ssize_t enable_ints_write(struct file *file,
buf[count] = '\0';
if (copy_from_user(buf, buffer, count)) {
LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
- (int) count, buf, count);
+ (int)count, buf, count);
return -EFAULT;
}
- i = kstrtoint(buf, 10 , &new_value);
+ i = kstrtoint(buf, 10, &new_value);
if (i != 0) {
LOGERR("Failed to scan value for enable_ints, buf<<%.*s>>",
- (int) count, buf);
+ (int)count, buf);
return -EFAULT;
}
/* set all counts to new_value usually 0 */
for (i = 0; i < VIRTHBASOPENMAX; i++) {
- if (VirtHbasOpen[i].virthbainfo != NULL) {
- virthbainfo = VirtHbasOpen[i].virthbainfo;
- Features_addr =
+ if (virthbas_open[i].virthbainfo != NULL) {
+ virthbainfo = virthbas_open[i].virthbainfo;
+ features_addr =
&virthbainfo->chinfo.queueinfo->chan->features;
if (new_value == 1) {
mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
ULTRA_IO_DRIVER_DISABLES_INTS);
- uisqueue_interlocked_and(Features_addr, mask);
+ uisqueue_interlocked_and(features_addr, mask);
mask = ULTRA_IO_DRIVER_ENABLES_INTS;
- uisqueue_interlocked_or(Features_addr, mask);
+ uisqueue_interlocked_or(features_addr, mask);
rsltq_wait_usecs = 4000000;
} else {
mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS |
ULTRA_IO_DRIVER_DISABLES_INTS);
- uisqueue_interlocked_and(Features_addr, mask);
+ uisqueue_interlocked_and(features_addr, mask);
mask = ULTRA_IO_CHANNEL_IS_POLLING;
- uisqueue_interlocked_or(Features_addr, mask);
+ uisqueue_interlocked_or(features_addr, mask);
rsltq_wait_usecs = 4000;
}
}
@@ -1477,7 +1481,7 @@ static int
virthba_serverup(struct virtpci_dev *virtpcidev)
{
struct virthba_info *virthbainfo =
- (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
+ (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi.
scsihost)->hostdata;
DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
@@ -1535,26 +1539,26 @@ virthba_serverdown_complete(struct work_struct *work)
/* Fail Commands that weren't completed */
spin_lock_irqsave(&virthbainfo->privlock, flags);
for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
- pendingdel = &(virthbainfo->pending[i]);
+ pendingdel = &virthbainfo->pending[i];
switch (pendingdel->cmdtype) {
case CMD_SCSI_TYPE:
- scsicmd = (struct scsi_cmnd *) pendingdel->sent;
+ scsicmd = (struct scsi_cmnd *)pendingdel->sent;
scsicmd->result = (DID_RESET << 16);
if (scsicmd->scsi_done)
scsicmd->scsi_done(scsicmd);
break;
case CMD_SCSITASKMGMT_TYPE:
- cmdrsp = (struct uiscmdrsp *) pendingdel->sent;
+ cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
DBGINF("cmdrsp=0x%x, notify=0x%x\n", cmdrsp,
cmdrsp->scsitaskmgmt.notify);
- *(int *) cmdrsp->scsitaskmgmt.notifyresult =
+ *(int *)cmdrsp->scsitaskmgmt.notifyresult =
TASK_MGMT_FAILED;
wake_up_all((wait_queue_head_t *)
cmdrsp->scsitaskmgmt.notify);
break;
case CMD_VDISKMGMT_TYPE:
- cmdrsp = (struct uiscmdrsp *) pendingdel->sent;
- *(int *) cmdrsp->vdiskmgmt.notifyresult =
+ cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
+ *(int *)cmdrsp->vdiskmgmt.notifyresult =
VDISK_MGMT_FAILED;
wake_up_all((wait_queue_head_t *)
cmdrsp->vdiskmgmt.notify);
@@ -1584,8 +1588,10 @@ virthba_serverdown_complete(struct work_struct *work)
static int
virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
{
+ int stat = 1;
+
struct virthba_info *virthbainfo =
- (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
+ (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi.
scsihost)->hostdata;
DBGINF("virthba_serverdown");
@@ -1598,11 +1604,12 @@ virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
&virthbainfo->serverdown_completion);
} else if (virthbainfo->serverchangingstate) {
LOGERR("Server already processing change state message\n");
- return 0;
- } else
+ stat = 0;
+ } else {
LOGERR("Server already down, but another server down message received.");
+ }
- return 1;
+ return stat;
}
/*****************************************************/
@@ -1655,23 +1662,22 @@ virthba_mod_init(void)
POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error,
POSTCODE_SEVERITY_ERR);
} else {
-
/* create the debugfs directories and entries */
virthba_debugfs_dir = debugfs_create_dir("virthba", NULL);
debugfs_create_file("info", S_IRUSR, virthba_debugfs_dir,
- NULL, &debugfs_info_fops);
+ NULL, &debugfs_info_fops);
debugfs_create_u32("rqwait_usecs", S_IRUSR | S_IWUSR,
- virthba_debugfs_dir, &rsltq_wait_usecs);
+ virthba_debugfs_dir, &rsltq_wait_usecs);
debugfs_create_file("enable_ints", S_IWUSR,
- virthba_debugfs_dir, NULL,
- &debugfs_enable_ints_fops);
- /* Initialize DARWorkQ */
- INIT_WORK(&DARWorkQ, doDiskAddRemove);
- spin_lock_init(&DARWorkQLock);
+ virthba_debugfs_dir, NULL,
+ &debugfs_enable_ints_fops);
+ /* Initialize dar_work_queue */
+ INIT_WORK(&dar_work_queue, do_disk_add_remove);
+ spin_lock_init(&dar_work_queue_lock);
/* clear out array */
for (i = 0; i < VIRTHBASOPENMAX; i++)
- VirtHbasOpen[i].virthbainfo = NULL;
+ virthbas_open[i].virthbainfo = NULL;
/* Initialize the serverdown workqueue */
virthba_serverdown_workqueue =
create_singlethread_workqueue("virthba_serverdown");
@@ -1746,7 +1752,6 @@ virthba_mod_exit(void)
debugfs_remove_recursive(virthba_debugfs_dir);
LOGINF("Leaving virthba_mod_exit\n");
-
}
/* specify function to be run at module insertion time */
diff --git a/drivers/staging/unisys/virtpci/virtpci.c b/drivers/staging/unisys/virtpci/virtpci.c
index 39b828dce503..8fdfd6f3605f 100644
--- a/drivers/staging/unisys/virtpci/virtpci.c
+++ b/drivers/staging/unisys/virtpci/virtpci.c
@@ -104,8 +104,6 @@ static ssize_t virtpci_driver_attr_store(struct kobject *kobj,
const char *buf, size_t count);
static int virtpci_bus_match(struct device *dev, struct device_driver *drv);
static int virtpci_uevent(struct device *dev, struct kobj_uevent_env *env);
-static int virtpci_device_suspend(struct device *dev, pm_message_t state);
-static int virtpci_device_resume(struct device *dev);
static int virtpci_device_probe(struct device *dev);
static int virtpci_device_remove(struct device *dev);
@@ -128,8 +126,6 @@ static struct bus_type virtpci_bus_type = {
.name = "uisvirtpci",
.match = virtpci_bus_match,
.uevent = virtpci_uevent,
- .suspend = virtpci_device_suspend,
- .resume = virtpci_device_resume,
};
static struct device virtpci_rootbus_device = {
@@ -279,9 +275,9 @@ static int add_vbus(struct add_vbus_guestpart *addparams)
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return 0;
}
- write_vbus_chp_info(vbus->platform_data /* chanptr */ ,
+ write_vbus_chp_info(vbus->platform_data /* chanptr */,
&chipset_driver_info);
- write_vbus_bus_info(vbus->platform_data /* chanptr */ ,
+ write_vbus_bus_info(vbus->platform_data /* chanptr */,
&bus_driver_info);
LOGINF("Added vbus %d; device %s created successfully\n",
addparams->bus_no, BUS_ID(vbus));
@@ -466,7 +462,7 @@ static int pause_vhba(struct pause_virt_guestpart *pauseparams)
GET_SCSIADAPINFO_FROM_CHANPTR(pauseparams->chanptr);
LOGINF("Pausing vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
- i = virtpci_device_serverdown(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ i = virtpci_device_serverdown(NULL /*no parent bus */, VIRTHBA_TYPE,
&scsi.wwnn, NULL);
if (i)
LOGINF("Paused vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
@@ -487,7 +483,7 @@ static int pause_vnic(struct pause_virt_guestpart *pauseparams)
LOGINF("Pausing vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
- i = virtpci_device_serverdown(NULL /*no parent bus */ , VIRTNIC_TYPE,
+ i = virtpci_device_serverdown(NULL /*no parent bus */, VIRTNIC_TYPE,
NULL, net.mac_addr);
if (i) {
LOGINF(" Paused vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -508,7 +504,7 @@ static int resume_vhba(struct resume_virt_guestpart *resumeparams)
GET_SCSIADAPINFO_FROM_CHANPTR(resumeparams->chanptr);
LOGINF("Resuming vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
- i = virtpci_device_serverup(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ i = virtpci_device_serverup(NULL /*no parent bus */, VIRTHBA_TYPE,
&scsi.wwnn, NULL);
if (i)
LOGINF("Resumed vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
@@ -530,7 +526,7 @@ resume_vnic(struct resume_virt_guestpart *resumeparams)
LOGINF("Resuming vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
- i = virtpci_device_serverup(NULL /*no parent bus */ , VIRTNIC_TYPE,
+ i = virtpci_device_serverup(NULL /*no parent bus */, VIRTNIC_TYPE,
NULL, net.mac_addr);
if (i) {
LOGINF(" Resumed vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -551,7 +547,7 @@ static int delete_vhba(struct del_virt_guestpart *delparams)
GET_SCSIADAPINFO_FROM_CHANPTR(delparams->chanptr);
LOGINF("Deleting vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
- i = virtpci_device_del(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ i = virtpci_device_del(NULL /*no parent bus */, VIRTHBA_TYPE,
&scsi.wwnn, NULL);
if (i) {
LOGINF("Deleted vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
@@ -574,7 +570,7 @@ static int delete_vnic(struct del_virt_guestpart *delparams)
LOGINF("Deleting vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
- i = virtpci_device_del(NULL /*no parent bus */ , VIRTNIC_TYPE, NULL,
+ i = virtpci_device_del(NULL /*no parent bus */, VIRTNIC_TYPE, NULL,
net.mac_addr);
if (i) {
LOGINF("Deleted vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -757,18 +753,6 @@ static int virtpci_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static int virtpci_device_suspend(struct device *dev, pm_message_t state)
-{
- DBGINF("In virtpci_device_suspend -NYI ****\n");
- return 0;
-}
-
-static int virtpci_device_resume(struct device *dev)
-{
- DBGINF("In virtpci_device_resume -NYI ****\n");
- return 0;
-}
-
/* For a child device just created on a client bus, fill in
* information about the driver that is controlling this device into
* the appropriate slot within the vbus channel of the bus
@@ -1338,18 +1322,13 @@ static ssize_t virtpci_driver_attr_show(struct kobject *kobj,
ssize_t ret = 0;
struct driver_private *dprivate = to_driver(kobj);
- struct device_driver *driver;
+ struct device_driver *driver = dprivate->driver;
- if (dprivate != NULL)
- driver = dprivate->driver;
- else
- driver = NULL;
+ DBGINF("In virtpci_driver_attr_show driver->name:%s\n", driver->name);
+
+ if (dattr->show)
+ ret = dattr->show(driver, buf);
- DBGINF("In virtpci_driver_attr_show driver->name:%s\n", driver->name);
- if (driver) {
- if (dattr->show)
- ret = dattr->show(driver, buf);
- }
return ret;
}
@@ -1361,19 +1340,13 @@ static ssize_t virtpci_driver_attr_store(struct kobject *kobj,
ssize_t ret = 0;
struct driver_private *dprivate = to_driver(kobj);
- struct device_driver *driver;
-
- if (dprivate != NULL)
- driver = dprivate->driver;
- else
- driver = NULL;
+ struct device_driver *driver = dprivate->driver;
DBGINF("In virtpci_driver_attr_store driver->name:%s\n", driver->name);
- if (driver) {
- if (dattr->store)
- ret = dattr->store(driver, buf, count);
- }
+ if (dattr->store)
+ ret = dattr->store(driver, buf, count);
+
return ret;
}
diff --git a/drivers/staging/unisys/visorchannel/visorchannel.h b/drivers/staging/unisys/visorchannel/visorchannel.h
index 5061edff959a..63f1b9760373 100644
--- a/drivers/staging/unisys/visorchannel/visorchannel.h
+++ b/drivers/staging/unisys/visorchannel/visorchannel.h
@@ -29,49 +29,48 @@
#define BOOL int
#endif
-/* VISORCHANNEL is an opaque structure to users.
- * Fields are declared only in the implementation .c files.
- */
-typedef struct VISORCHANNEL_Tag VISORCHANNEL;
-
/* Note that for visorchannel_create() and visorchannel_create_overlapped(),
- * <channelBytes> and <guid> arguments may be 0 if we are a channel CLIENT.
+ * <channel_bytes> and <guid> arguments may be 0 if we are a channel CLIENT.
* In this case, the values can simply be read from the channel header.
*/
-VISORCHANNEL *visorchannel_create(HOSTADDRESS physaddr,
- ulong channelBytes, uuid_le guid);
-VISORCHANNEL *visorchannel_create_overlapped(ulong channelBytes,
- VISORCHANNEL *parent, ulong off,
- uuid_le guid);
-VISORCHANNEL *visorchannel_create_with_lock(HOSTADDRESS physaddr,
- ulong channelBytes, uuid_le guid);
-VISORCHANNEL *visorchannel_create_overlapped_with_lock(ulong channelBytes,
- VISORCHANNEL *parent,
- ulong off, uuid_le guid);
-void visorchannel_destroy(VISORCHANNEL *channel);
-int visorchannel_read(VISORCHANNEL *channel, ulong offset,
+struct visorchannel *visorchannel_create(HOSTADDRESS physaddr,
+ ulong channel_bytes, uuid_le guid);
+struct visorchannel *visorchannel_create_overlapped(ulong channel_bytes,
+ struct visorchannel *parent,
+ ulong off, uuid_le guid);
+struct visorchannel *visorchannel_create_with_lock(HOSTADDRESS physaddr,
+ ulong channel_bytes,
+ uuid_le guid);
+struct visorchannel *visorchannel_create_overlapped_with_lock(
+ ulong channel_bytes,
+ struct visorchannel *parent,
+ ulong off, uuid_le guid);
+void visorchannel_destroy(struct visorchannel *channel);
+int visorchannel_read(struct visorchannel *channel, ulong offset,
void *local, ulong nbytes);
-int visorchannel_write(VISORCHANNEL *channel, ulong offset,
+int visorchannel_write(struct visorchannel *channel, ulong offset,
void *local, ulong nbytes);
-int visorchannel_clear(VISORCHANNEL *channel, ulong offset,
+int visorchannel_clear(struct visorchannel *channel, ulong offset,
u8 ch, ulong nbytes);
-BOOL visorchannel_signalremove(VISORCHANNEL *channel, u32 queue, void *msg);
-BOOL visorchannel_signalinsert(VISORCHANNEL *channel, u32 queue, void *msg);
-int visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, u32 queue);
-int visorchannel_signalqueue_max_slots(VISORCHANNEL *channel, u32 queue);
-
-HOSTADDRESS visorchannel_get_physaddr(VISORCHANNEL *channel);
-ulong visorchannel_get_nbytes(VISORCHANNEL *channel);
-char *visorchannel_id(VISORCHANNEL *channel, char *s);
-char *visorchannel_zoneid(VISORCHANNEL *channel, char *s);
-u64 visorchannel_get_clientpartition(VISORCHANNEL *channel);
-uuid_le visorchannel_get_uuid(VISORCHANNEL *channel);
-struct memregion *visorchannel_get_memregion(VISORCHANNEL *channel);
+BOOL visorchannel_signalremove(struct visorchannel *channel, u32 queue,
+ void *msg);
+BOOL visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
+ void *msg);
+int visorchannel_signalqueue_slots_avail(struct visorchannel *channel,
+ u32 queue);
+int visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue);
+HOSTADDRESS visorchannel_get_physaddr(struct visorchannel *channel);
+ulong visorchannel_get_nbytes(struct visorchannel *channel);
+char *visorchannel_id(struct visorchannel *channel, char *s);
+char *visorchannel_zoneid(struct visorchannel *channel, char *s);
+u64 visorchannel_get_clientpartition(struct visorchannel *channel);
+uuid_le visorchannel_get_uuid(struct visorchannel *channel);
+struct memregion *visorchannel_get_memregion(struct visorchannel *channel);
char *visorchannel_uuid_id(uuid_le *guid, char *s);
-void visorchannel_debug(VISORCHANNEL *channel, int nQueues,
+void visorchannel_debug(struct visorchannel *channel, int num_queues,
struct seq_file *seq, u32 off);
-void visorchannel_dump_section(VISORCHANNEL *chan, char *s,
+void visorchannel_dump_section(struct visorchannel *chan, char *s,
int off, int len, struct seq_file *seq);
-void __iomem *visorchannel_get_header(VISORCHANNEL *channel);
+void __iomem *visorchannel_get_header(struct visorchannel *channel);
#endif
diff --git a/drivers/staging/unisys/visorchannel/visorchannel_funcs.c b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
index 36559d5fa673..0188ef866fdd 100644
--- a/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
+++ b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
@@ -28,14 +28,15 @@
#define MYDRVNAME "visorchannel"
-struct VISORCHANNEL_Tag {
+struct visorchannel {
struct memregion *memregion; /* from visor_memregion_create() */
struct channel_header chan_hdr;
uuid_le guid;
ulong size;
- BOOL needs_lock;
- spinlock_t insert_lock;
- spinlock_t remove_lock;
+ BOOL needs_lock; /* channel creator knows if more than one
+ * thread will be inserting or removing */
+ spinlock_t insert_lock; /* protect head writes in chan_hdr */
+ spinlock_t remove_lock; /* protect tail writes in chan_hdr */
struct {
struct signal_queue_header req_queue;
@@ -45,18 +46,18 @@ struct VISORCHANNEL_Tag {
} safe_uis_queue;
};
-/* Creates the VISORCHANNEL abstraction for a data area in memory, but does
- * NOT modify this data area.
+/* Creates the struct visorchannel abstraction for a data area in memory,
+ * but does NOT modify this data area.
*/
-static VISORCHANNEL *
-visorchannel_create_guts(HOSTADDRESS physaddr, ulong channelBytes,
- VISORCHANNEL *parent, ulong off, uuid_le guid,
+static struct visorchannel *
+visorchannel_create_guts(HOSTADDRESS physaddr, ulong channel_bytes,
+ struct visorchannel *parent, ulong off, uuid_le guid,
BOOL needs_lock)
{
- VISORCHANNEL *p = NULL;
+ struct visorchannel *p = NULL;
void *rc = NULL;
- p = kmalloc(sizeof(VISORCHANNEL), GFP_KERNEL|__GFP_NORETRY);
+ p = kmalloc(sizeof(*p), GFP_KERNEL|__GFP_NORETRY);
if (p == NULL) {
ERRDRV("allocation failed: (status=0)\n");
rc = NULL;
@@ -87,18 +88,18 @@ visorchannel_create_guts(HOSTADDRESS physaddr, ulong channelBytes,
rc = NULL;
goto cleanup;
}
- if (channelBytes == 0)
+ if (channel_bytes == 0)
/* we had better be a CLIENT of this channel */
- channelBytes = (ulong)p->chan_hdr.size;
+ channel_bytes = (ulong)p->chan_hdr.size;
if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
/* we had better be a CLIENT of this channel */
guid = p->chan_hdr.chtype;
- if (visor_memregion_resize(p->memregion, channelBytes) < 0) {
+ if (visor_memregion_resize(p->memregion, channel_bytes) < 0) {
ERRDRV("visor_memregion_resize failed: (status=0)\n");
rc = NULL;
goto cleanup;
}
- p->size = channelBytes;
+ p->size = channel_bytes;
p->guid = guid;
rc = p;
@@ -113,44 +114,45 @@ cleanup:
return rc;
}
-VISORCHANNEL *
-visorchannel_create(HOSTADDRESS physaddr, ulong channelBytes, uuid_le guid)
+struct visorchannel *
+visorchannel_create(HOSTADDRESS physaddr, ulong channel_bytes, uuid_le guid)
{
- return visorchannel_create_guts(physaddr, channelBytes, NULL, 0, guid,
+ return visorchannel_create_guts(physaddr, channel_bytes, NULL, 0, guid,
FALSE);
}
EXPORT_SYMBOL_GPL(visorchannel_create);
-VISORCHANNEL *
-visorchannel_create_with_lock(HOSTADDRESS physaddr, ulong channelBytes,
+struct visorchannel *
+visorchannel_create_with_lock(HOSTADDRESS physaddr, ulong channel_bytes,
uuid_le guid)
{
- return visorchannel_create_guts(physaddr, channelBytes, NULL, 0, guid,
+ return visorchannel_create_guts(physaddr, channel_bytes, NULL, 0, guid,
TRUE);
}
EXPORT_SYMBOL_GPL(visorchannel_create_with_lock);
-VISORCHANNEL *
-visorchannel_create_overlapped(ulong channelBytes,
- VISORCHANNEL *parent, ulong off, uuid_le guid)
+struct visorchannel *
+visorchannel_create_overlapped(ulong channel_bytes,
+ struct visorchannel *parent, ulong off,
+ uuid_le guid)
{
- return visorchannel_create_guts(0, channelBytes, parent, off, guid,
+ return visorchannel_create_guts(0, channel_bytes, parent, off, guid,
FALSE);
}
EXPORT_SYMBOL_GPL(visorchannel_create_overlapped);
-VISORCHANNEL *
-visorchannel_create_overlapped_with_lock(ulong channelBytes,
- VISORCHANNEL *parent, ulong off,
+struct visorchannel *
+visorchannel_create_overlapped_with_lock(ulong channel_bytes,
+ struct visorchannel *parent, ulong off,
uuid_le guid)
{
- return visorchannel_create_guts(0, channelBytes, parent, off, guid,
+ return visorchannel_create_guts(0, channel_bytes, parent, off, guid,
TRUE);
}
EXPORT_SYMBOL_GPL(visorchannel_create_overlapped_with_lock);
void
-visorchannel_destroy(VISORCHANNEL *channel)
+visorchannel_destroy(struct visorchannel *channel)
{
if (channel == NULL)
return;
@@ -163,14 +165,14 @@ visorchannel_destroy(VISORCHANNEL *channel)
EXPORT_SYMBOL_GPL(visorchannel_destroy);
HOSTADDRESS
-visorchannel_get_physaddr(VISORCHANNEL *channel)
+visorchannel_get_physaddr(struct visorchannel *channel)
{
return visor_memregion_get_physaddr(channel->memregion);
}
EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
ulong
-visorchannel_get_nbytes(VISORCHANNEL *channel)
+visorchannel_get_nbytes(struct visorchannel *channel)
{
return channel->size;
}
@@ -185,42 +187,42 @@ visorchannel_uuid_id(uuid_le *guid, char *s)
EXPORT_SYMBOL_GPL(visorchannel_uuid_id);
char *
-visorchannel_id(VISORCHANNEL *channel, char *s)
+visorchannel_id(struct visorchannel *channel, char *s)
{
return visorchannel_uuid_id(&channel->guid, s);
}
EXPORT_SYMBOL_GPL(visorchannel_id);
char *
-visorchannel_zoneid(VISORCHANNEL *channel, char *s)
+visorchannel_zoneid(struct visorchannel *channel, char *s)
{
return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s);
}
EXPORT_SYMBOL_GPL(visorchannel_zoneid);
HOSTADDRESS
-visorchannel_get_clientpartition(VISORCHANNEL *channel)
+visorchannel_get_clientpartition(struct visorchannel *channel)
{
return channel->chan_hdr.partition_handle;
}
EXPORT_SYMBOL_GPL(visorchannel_get_clientpartition);
uuid_le
-visorchannel_get_uuid(VISORCHANNEL *channel)
+visorchannel_get_uuid(struct visorchannel *channel)
{
return channel->guid;
}
EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
struct memregion *
-visorchannel_get_memregion(VISORCHANNEL *channel)
+visorchannel_get_memregion(struct visorchannel *channel)
{
return channel->memregion;
}
EXPORT_SYMBOL_GPL(visorchannel_get_memregion);
int
-visorchannel_read(VISORCHANNEL *channel, ulong offset,
+visorchannel_read(struct visorchannel *channel, ulong offset,
void *local, ulong nbytes)
{
int rc = visor_memregion_read(channel->memregion, offset,
@@ -235,7 +237,7 @@ visorchannel_read(VISORCHANNEL *channel, ulong offset,
EXPORT_SYMBOL_GPL(visorchannel_read);
int
-visorchannel_write(VISORCHANNEL *channel, ulong offset,
+visorchannel_write(struct visorchannel *channel, ulong offset,
void *local, ulong nbytes)
{
if (offset == 0 && nbytes >= sizeof(struct channel_header))
@@ -246,7 +248,8 @@ visorchannel_write(VISORCHANNEL *channel, ulong offset,
EXPORT_SYMBOL_GPL(visorchannel_write);
int
-visorchannel_clear(VISORCHANNEL *channel, ulong offset, u8 ch, ulong nbytes)
+visorchannel_clear(struct visorchannel *channel, ulong offset, u8 ch,
+ ulong nbytes)
{
int rc = -1;
int bufsize = 65536;
@@ -285,7 +288,7 @@ cleanup:
EXPORT_SYMBOL_GPL(visorchannel_clear);
void __iomem *
-visorchannel_get_header(VISORCHANNEL *channel)
+visorchannel_get_header(struct visorchannel *channel)
{
return (void __iomem *)&channel->chan_hdr;
}
@@ -316,7 +319,7 @@ EXPORT_SYMBOL_GPL(visorchannel_get_header);
sizeof((sig_hdr)->FIELD)) >= 0)
static BOOL
-sig_read_header(VISORCHANNEL *channel, u32 queue,
+sig_read_header(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr)
{
BOOL rc = FALSE;
@@ -344,7 +347,7 @@ cleanup:
}
static BOOL
-sig_do_data(VISORCHANNEL *channel, u32 queue,
+sig_do_data(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr, u32 slot, void *data,
BOOL is_write)
{
@@ -373,14 +376,14 @@ cleanup:
}
static inline BOOL
-sig_read_data(VISORCHANNEL *channel, u32 queue,
+sig_read_data(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
return sig_do_data(channel, queue, sig_hdr, slot, data, FALSE);
}
static inline BOOL
-sig_write_data(VISORCHANNEL *channel, u32 queue,
+sig_write_data(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
return sig_do_data(channel, queue, sig_hdr, slot, data, TRUE);
@@ -408,27 +411,21 @@ safe_sig_queue_validate(struct signal_queue_header *psafe_sqh,
return 1;
} /* end safe_sig_queue_validate */
-BOOL
-visorchannel_signalremove(VISORCHANNEL *channel, u32 queue, void *msg)
+static BOOL
+signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
{
- BOOL rc = FALSE;
struct signal_queue_header sig_hdr;
- if (channel->needs_lock)
- spin_lock(&channel->remove_lock);
-
if (!sig_read_header(channel, queue, &sig_hdr)) {
- rc = FALSE;
- goto cleanup;
- }
- if (sig_hdr.head == sig_hdr.tail) {
- rc = FALSE; /* no signals to remove */
- goto cleanup;
+ return FALSE;
}
+ if (sig_hdr.head == sig_hdr.tail)
+ return FALSE; /* no signals to remove */
+
sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg)) {
- ERRDRV("sig_read_data failed: (status=%d)\n", rc);
- goto cleanup;
+ ERRDRV("sig_read_data failed\n");
+ return FALSE;
}
sig_hdr.num_received++;
@@ -437,53 +434,54 @@ visorchannel_signalremove(VISORCHANNEL *channel, u32 queue, void *msg)
*/
mb(); /* required for channel synch */
if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail)) {
- ERRDRV("visor_memregion_write of Tail failed: (status=%d)\n",
- rc);
- goto cleanup;
+ ERRDRV("visor_memregion_write of Tail failed\n");
+ return FALSE;
}
if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received)) {
- ERRDRV("visor_memregion_write of NumSignalsReceived failed: (status=%d)\n",
- rc);
- goto cleanup;
+ ERRDRV("visor_memregion_write of NumSignalsReceived failed\n");
+ return FALSE;
}
- rc = TRUE;
-cleanup:
- if (channel->needs_lock)
+ return TRUE;
+}
+
+BOOL
+visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
+{
+ BOOL rc;
+
+ if (channel->needs_lock) {
+ spin_lock(&channel->remove_lock);
+ rc = signalremove_inner(channel, queue, msg);
spin_unlock(&channel->remove_lock);
+ } else {
+ rc = signalremove_inner(channel, queue, msg);
+ }
return rc;
}
EXPORT_SYMBOL_GPL(visorchannel_signalremove);
-BOOL
-visorchannel_signalinsert(VISORCHANNEL *channel, u32 queue, void *msg)
+static BOOL
+signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
{
- BOOL rc = FALSE;
struct signal_queue_header sig_hdr;
- if (channel->needs_lock)
- spin_lock(&channel->insert_lock);
-
if (!sig_read_header(channel, queue, &sig_hdr)) {
- rc = FALSE;
- goto cleanup;
+ return FALSE;
}
sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots);
if (sig_hdr.head == sig_hdr.tail) {
sig_hdr.num_overflows++;
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows)) {
- ERRDRV("visor_memregion_write of NumOverflows failed: (status=%d)\n",
- rc);
- goto cleanup;
- }
- rc = FALSE;
- goto cleanup;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows))
+ ERRDRV("visor_memregion_write of NumOverflows failed\n");
+
+ return FALSE;
}
if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg)) {
- ERRDRV("sig_write_data failed: (status=%d)\n", rc);
- goto cleanup;
+ ERRDRV("sig_write_data failed\n");
+ return FALSE;
}
sig_hdr.num_sent++;
@@ -492,26 +490,36 @@ visorchannel_signalinsert(VISORCHANNEL *channel, u32 queue, void *msg)
*/
mb(); /* required for channel synch */
if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, head)) {
- ERRDRV("visor_memregion_write of Head failed: (status=%d)\n",
- rc);
- goto cleanup;
+ ERRDRV("visor_memregion_write of Head failed\n");
+ return FALSE;
}
if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent)) {
- ERRDRV("visor_memregion_write of NumSignalsSent failed: (status=%d)\n",
- rc);
- goto cleanup;
+ ERRDRV("visor_memregion_write of NumSignalsSent failed\n");
+ return FALSE;
}
- rc = TRUE;
-cleanup:
- if (channel->needs_lock)
+
+ return TRUE;
+}
+
+BOOL
+visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg)
+{
+ BOOL rc;
+
+ if (channel->needs_lock) {
+ spin_lock(&channel->insert_lock);
+ rc = signalinsert_inner(channel, queue, msg);
spin_unlock(&channel->insert_lock);
+ } else {
+ rc = signalinsert_inner(channel, queue, msg);
+ }
return rc;
}
EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
int
-visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, u32 queue)
+visorchannel_signalqueue_slots_avail(struct visorchannel *channel, u32 queue)
{
struct signal_queue_header sig_hdr;
u32 slots_avail, slots_used;
@@ -530,7 +538,7 @@ visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, u32 queue)
EXPORT_SYMBOL_GPL(visorchannel_signalqueue_slots_avail);
int
-visorchannel_signalqueue_max_slots(VISORCHANNEL *channel, u32 queue)
+visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue)
{
struct signal_queue_header sig_hdr;
@@ -565,7 +573,7 @@ sigqueue_debug(struct signal_queue_header *q, int which, struct seq_file *seq)
}
void
-visorchannel_debug(VISORCHANNEL *channel, int nQueues,
+visorchannel_debug(struct visorchannel *channel, int num_queues,
struct seq_file *seq, u32 off)
{
HOSTADDRESS addr = 0;
@@ -625,7 +633,7 @@ visorchannel_debug(VISORCHANNEL *channel, int nQueues,
if ((phdr->ch_space_offset == 0) || (errcode < 0))
;
else
- for (i = 0; i < nQueues; i++) {
+ for (i = 0; i < num_queues; i++) {
struct signal_queue_header q;
errcode = visorchannel_read(channel,
@@ -647,7 +655,7 @@ visorchannel_debug(VISORCHANNEL *channel, int nQueues,
EXPORT_SYMBOL_GPL(visorchannel_debug);
void
-visorchannel_dump_section(VISORCHANNEL *chan, char *s,
+visorchannel_dump_section(struct visorchannel *chan, char *s,
int off, int len, struct seq_file *seq)
{
char *buf, *tbuf, *fmtbuf;
diff --git a/drivers/staging/unisys/visorchipset/file.c b/drivers/staging/unisys/visorchipset/file.c
index 373fa36b7119..e51fd4e3fa2d 100644
--- a/drivers/staging/unisys/visorchipset/file.c
+++ b/drivers/staging/unisys/visorchipset/file.c
@@ -28,85 +28,75 @@
#define CURRENT_FILE_PC VISOR_CHIPSET_PC_file_c
-static struct cdev Cdev;
-static VISORCHANNEL **PControlVm_channel;
-static dev_t MajorDev = -1; /**< indicates major num for device */
-static BOOL Registered = FALSE;
+static struct cdev file_cdev;
+static struct visorchannel **file_controlvm_channel;
+static dev_t majordev = -1; /**< indicates major num for device */
+static BOOL registered = FALSE;
static int visorchipset_open(struct inode *inode, struct file *file);
static int visorchipset_release(struct inode *inode, struct file *file);
static int visorchipset_mmap(struct file *file, struct vm_area_struct *vma);
-#ifdef HAVE_UNLOCKED_IOCTL
long visorchipset_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-#else
-int visorchipset_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
-#endif
static const struct file_operations visorchipset_fops = {
.owner = THIS_MODULE,
.open = visorchipset_open,
.read = NULL,
.write = NULL,
-#ifdef HAVE_UNLOCKED_IOCTL
.unlocked_ioctl = visorchipset_ioctl,
-#else
- .ioctl = visorchipset_ioctl,
-#endif
.release = visorchipset_release,
.mmap = visorchipset_mmap,
};
int
-visorchipset_file_init(dev_t majorDev, VISORCHANNEL **pControlVm_channel)
+visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
{
- int rc = -1;
+ int rc = 0;
- PControlVm_channel = pControlVm_channel;
- MajorDev = majorDev;
- cdev_init(&Cdev, &visorchipset_fops);
- Cdev.owner = THIS_MODULE;
- if (MAJOR(MajorDev) == 0) {
+ file_controlvm_channel = controlvm_channel;
+ majordev = major_dev;
+ cdev_init(&file_cdev, &visorchipset_fops);
+ file_cdev.owner = THIS_MODULE;
+ if (MAJOR(majordev) == 0) {
/* dynamic major device number registration required */
- if (alloc_chrdev_region(&MajorDev, 0, 1, MYDRVNAME) < 0) {
+ if (alloc_chrdev_region(&majordev, 0, 1, MYDRVNAME) < 0) {
ERRDRV("Unable to allocate+register char device %s",
MYDRVNAME);
- goto Away;
+ return -1;
}
- Registered = TRUE;
- INFODRV("New major number %d registered\n", MAJOR(MajorDev));
+ registered = TRUE;
+ INFODRV("New major number %d registered\n", MAJOR(majordev));
} else {
/* static major device number registration required */
- if (register_chrdev_region(MajorDev, 1, MYDRVNAME) < 0) {
+ if (register_chrdev_region(majordev, 1, MYDRVNAME) < 0) {
ERRDRV("Unable to register char device %s", MYDRVNAME);
- goto Away;
+ return -1;
}
- Registered = TRUE;
- INFODRV("Static major number %d registered\n", MAJOR(MajorDev));
+ registered = TRUE;
+ INFODRV("Static major number %d registered\n", MAJOR(majordev));
}
- if (cdev_add(&Cdev, MKDEV(MAJOR(MajorDev), 0), 1) < 0) {
+ rc = cdev_add(&file_cdev, MKDEV(MAJOR(majordev), 0), 1);
+ if (rc < 0) {
ERRDRV("failed to create char device: (status=%d)\n", rc);
- goto Away;
+ return -1;
}
INFODRV("Registered char device for %s (major=%d)",
- MYDRVNAME, MAJOR(MajorDev));
- rc = 0;
-Away:
- return rc;
+ MYDRVNAME, MAJOR(majordev));
+ return 0;
}
void
visorchipset_file_cleanup(void)
{
- if (Cdev.ops != NULL)
- cdev_del(&Cdev);
- Cdev.ops = NULL;
- if (Registered) {
- if (MAJOR(MajorDev) >= 0) {
- unregister_chrdev_region(MajorDev, 1);
- MajorDev = MKDEV(0, 0);
+ if (file_cdev.ops != NULL)
+ cdev_del(&file_cdev);
+ file_cdev.ops = NULL;
+ if (registered) {
+ if (MAJOR(majordev) >= 0) {
+ unregister_chrdev_region(majordev, 1);
+ majordev = MKDEV(0, 0);
}
- Registered = FALSE;
+ registered = FALSE;
}
}
@@ -114,17 +104,12 @@ static int
visorchipset_open(struct inode *inode, struct file *file)
{
unsigned minor_number = iminor(inode);
- int rc = -ENODEV;
DEBUGDRV("%s", __func__);
if (minor_number != 0)
- goto Away;
+ return -ENODEV;
file->private_data = NULL;
- rc = 0;
-Away:
- if (rc < 0)
- ERRDRV("%s minor=%d failed", __func__, minor_number);
- return rc;
+ return 0;
}
static int
@@ -137,7 +122,7 @@ visorchipset_release(struct inode *inode, struct file *file)
static int
visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
{
- ulong physAddr = 0;
+ ulong physaddr = 0;
ulong offset = vma->vm_pgoff << PAGE_SHIFT;
GUEST_PHYSICAL_ADDRESS addr = 0;
@@ -150,11 +135,11 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
switch (offset) {
case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
vma->vm_flags |= VM_IO;
- if (*PControlVm_channel == NULL) {
+ if (*file_controlvm_channel == NULL) {
ERRDRV("%s no controlvm channel yet", __func__);
return -ENXIO;
}
- visorchannel_read(*PControlVm_channel,
+ visorchannel_read(*file_controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
gp_control_channel),
&addr, sizeof(addr));
@@ -162,10 +147,10 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
ERRDRV("%s control channel address is 0", __func__);
return -ENXIO;
}
- physAddr = (ulong) (addr);
- DEBUGDRV("mapping physical address = 0x%lx", physAddr);
+ physaddr = (ulong)addr;
+ DEBUGDRV("mapping physical address = 0x%lx", physaddr);
if (remap_pfn_range(vma, vma->vm_start,
- physAddr >> PAGE_SHIFT,
+ physaddr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
/*pgprot_noncached */
(vma->vm_page_prot))) {
@@ -180,16 +165,8 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-#ifdef HAVE_UNLOCKED_IOCTL
-long
-visorchipset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-#else
-int
-visorchipset_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-#endif
+long visorchipset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- int rc = SUCCESS;
s64 adjustment;
s64 vrtc_offset;
@@ -200,28 +177,21 @@ visorchipset_ioctl(struct inode *inode, struct file *file,
vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
if (copy_to_user
((void __user *)arg, &vrtc_offset, sizeof(vrtc_offset))) {
- rc = -EFAULT;
- goto Away;
+ return -EFAULT;
}
DBGINF("insde visorchipset_ioctl, cmd=%d, vrtc_offset=%lld",
cmd, vrtc_offset);
- break;
+ return SUCCESS;
case VMCALL_UPDATE_PHYSICAL_TIME:
if (copy_from_user
(&adjustment, (void __user *)arg, sizeof(adjustment))) {
- rc = -EFAULT;
- goto Away;
+ return -EFAULT;
}
DBGINF("insde visorchipset_ioctl, cmd=%d, adjustment=%lld", cmd,
adjustment);
- rc = issue_vmcall_update_physical_time(adjustment);
- break;
+ return issue_vmcall_update_physical_time(adjustment);
default:
LOGERR("visorchipset_ioctl received invalid command");
- rc = -EFAULT;
- break;
+ return -EFAULT;
}
-Away:
- DBGINF("exiting %d!", rc);
- return rc;
}
diff --git a/drivers/staging/unisys/visorchipset/file.h b/drivers/staging/unisys/visorchipset/file.h
index 21bb906242e1..dc7a19556b3f 100644
--- a/drivers/staging/unisys/visorchipset/file.h
+++ b/drivers/staging/unisys/visorchipset/file.h
@@ -20,7 +20,8 @@
#include "globals.h"
-int visorchipset_file_init(dev_t majorDev, VISORCHANNEL **pControlVm_channel);
+int visorchipset_file_init(dev_t majorDev,
+ struct visorchannel **pControlVm_channel);
void visorchipset_file_cleanup(void);
#endif
diff --git a/drivers/staging/unisys/visorchipset/globals.h b/drivers/staging/unisys/visorchipset/globals.h
index 0fe14599f185..a1d35d4bef2e 100644
--- a/drivers/staging/unisys/visorchipset/globals.h
+++ b/drivers/staging/unisys/visorchipset/globals.h
@@ -15,7 +15,6 @@
* details.
*/
-
#ifndef __VISORCHIPSET_GLOBALS_H__
#define __VISORCHIPSET_GLOBALS_H__
@@ -28,7 +27,6 @@
#define MYDRVNAME "visorchipset"
-
/* module parameters */
extern int visorchipset_testvnic;
diff --git a/drivers/staging/unisys/visorchipset/testing.h b/drivers/staging/unisys/visorchipset/testing.h
deleted file mode 100644
index 573aa8b5ba6a..000000000000
--- a/drivers/staging/unisys/visorchipset/testing.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* testing.h
- *
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#ifndef __VISORCHIPSET_TESTING_H__
-#define __VISORCHIPSET_TESTING_H__
-
-#define VISORCHIPSET_TEST_PROC
-#include <linux/uuid.h>
-#include "globals.h"
-#include "controlvmchannel.h"
-
-void test_produce_test_message(struct controlvm_message *msg,
- int isLocalTestAddr);
-BOOL test_consume_test_message(struct controlvm_message *msg);
-void test_manufacture_vnic_client_add(void *p);
-void test_manufacture_vnic_client_add_phys(HOSTADDRESS addr);
-void test_manufacture_preamble_messages(void);
-void test_manufacture_device_attach(ulong busNo, ulong devNo);
-void test_manufacture_device_add(ulong busNo, ulong devNo, uuid_le dataTypeGuid,
- void *pChannel);
-void test_manufacture_add_bus(ulong busNo, ulong maxDevices,
- uuid_le id, u8 *name, BOOL isServer);
-void test_manufacture_device_destroy(ulong busNo, ulong devNo);
-void test_manufacture_bus_destroy(ulong busNo);
-void test_manufacture_detach_externalPort(ulong switchNo, ulong externalPortNo);
-void test_manufacture_detach_internalPort(ulong switchNo, ulong internalPortNo);
-void test_cleanup(void);
-
-#endif
diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
index 46dad63fa2c8..98f3ba4c13ac 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset.h
+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
@@ -158,61 +158,6 @@ findbus(struct list_head *list, u32 bus_no)
return NULL;
}
-/** Attributes for a particular Supervisor switch.
- */
-struct visorchipset_switch_info {
- u32 switch_no;
- struct visorchipset_state state;
- uuid_le switch_type_uuid;
- u8 *authservice1;
- u8 *authservice2;
- u8 *authservice3;
- u8 *security_context;
- u64 reserved;
- u32 reserved2; /* control_vm_id */
- struct device dev;
- BOOL dev_exists;
- struct controlvm_message_header pending_msg_hdr;
-};
-
-/** Attributes for a particular Supervisor external port, which is connected
- * to a specific switch.
- */
-struct visorchipset_externalport_info {
- u32 switch_no;
- u32 external_port_no;
- struct visorchipset_state state;
- uuid_le network_zone_uuid;
- int pd_port;
- u8 *ip;
- u8 *ip_netmask;
- u8 *ip_broadcast;
- u8 *ip_network;
- u8 *ip_gateway;
- u8 *ip_dns;
- u64 reserved1;
- u32 reserved2; /* control_vm_id */
- struct device dev;
- BOOL dev_exists;
- struct controlvm_message_header pending_msg_hdr;
-};
-
-/** Attributes for a particular Supervisor internal port, which is how a
- * device connects to a particular switch.
- */
-struct visorchipset_internalport_info {
- u32 switch_no;
- u32 internal_port_no;
- struct visorchipset_state state;
- u32 bus_no; /* valid only when state.attached == 1 */
- u32 dev_no; /* valid only when state.attached == 1 */
- u64 reserved1;
- u32 reserved2; /* CONTROLVM_ID */
- struct controlvm_message_header pending_msg_hdr;
- MYPROCOBJECT *proc_object;
-
-};
-
/* These functions will be called from within visorchipset when certain
* events happen. (The implementation of these functions is outside of
* visorchipset.)
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_main.c b/drivers/staging/unisys/visorchipset/visorchipset_main.c
index 7e6be32cf7bb..f606ee9e0de9 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset_main.c
+++ b/drivers/staging/unisys/visorchipset/visorchipset_main.c
@@ -20,7 +20,6 @@
#include "procobjecttree.h"
#include "visorchannel.h"
#include "periodic_work.h"
-#include "testing.h"
#include "file.h"
#include "parser.h"
#include "uniklog.h"
@@ -102,7 +101,7 @@ static struct controlvm_message_packet g_DeviceChangeStatePacket;
static LIST_HEAD(BusInfoList);
static LIST_HEAD(DevInfoList);
-static VISORCHANNEL *ControlVm_channel;
+static struct visorchannel *ControlVm_channel;
typedef struct {
u8 __iomem *ptr; /* pointer to base address of payload pool */
@@ -1595,7 +1594,7 @@ parahotplug_next_id(void)
static unsigned long
parahotplug_next_expiration(void)
{
- return jiffies + PARAHOTPLUG_TIMEOUT_MS * HZ / 1000;
+ return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
}
/*
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_umode.h b/drivers/staging/unisys/visorchipset/visorchipset_umode.h
index 06ba5b7e4254..6cf6eccb3f4a 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset_umode.h
+++ b/drivers/staging/unisys/visorchipset/visorchipset_umode.h
@@ -26,8 +26,6 @@
#ifndef __VISORCHIPSET_UMODE_H
#define __VISORCHIPSET_UMODE_H
-
-
/** The user-mode program can access the control channel buffer directly
* via this memory map.
*/
diff --git a/drivers/staging/unisys/visorutil/charqueue.c b/drivers/staging/unisys/visorutil/charqueue.c
index 1ce7003c3a90..ac7acb7c5b79 100644
--- a/drivers/staging/unisys/visorutil/charqueue.c
+++ b/drivers/staging/unisys/visorutil/charqueue.c
@@ -28,7 +28,7 @@
struct charqueue {
int alloc_size;
int nslots;
- spinlock_t lock;
+ spinlock_t lock; /* read/write lock for this structure */
int head, tail;
unsigned char buf[0];
};
diff --git a/drivers/staging/unisys/visorutil/procobjecttree.c b/drivers/staging/unisys/visorutil/procobjecttree.c
index 195772d22c9e..82279ca5fbe1 100644
--- a/drivers/staging/unisys/visorutil/procobjecttree.c
+++ b/drivers/staging/unisys/visorutil/procobjecttree.c
@@ -25,12 +25,12 @@
* need in order to call the callback function that supplies the /proc read
* info for that file.
*/
-typedef struct {
+struct proc_dir_entry_context {
void (*show_property)(struct seq_file *, void *, int);
MYPROCOBJECT *procObject;
int propertyIndex;
-} PROCDIRENTRYCONTEXT;
+};
/** This describes the attributes of a tree rooted at
* <procDirRoot>/<name[0]>/<name[1]>/...
@@ -86,7 +86,7 @@ struct MYPROCOBJECT_Tag {
/** this is a holding area for the context information that is needed
* to run the /proc callback function */
- PROCDIRENTRYCONTEXT *procDirPropertyContexts;
+ struct proc_dir_entry_context *procDirPropertyContexts;
};
@@ -254,15 +254,16 @@ MYPROCOBJECT *visor_proc_CreateObject(MYPROCTYPE *type,
goto Away;
}
obj->procDirPropertyContexts =
- kzalloc((type->nProperties + 1) * sizeof(PROCDIRENTRYCONTEXT),
+ kzalloc((type->nProperties + 1) *
+ sizeof(struct proc_dir_entry_context),
GFP_KERNEL | __GFP_NORETRY);
if (obj->procDirPropertyContexts == NULL) {
ERRDRV("out of memory\n");
goto Away;
}
- obj->procDirProperties =
- kzalloc((type->nProperties + 1) * sizeof(struct proc_dir_entry *),
- GFP_KERNEL | __GFP_NORETRY);
+ obj->procDirProperties = kzalloc((type->nProperties + 1) *
+ sizeof(struct proc_dir_entry *),
+ GFP_KERNEL | __GFP_NORETRY);
if (obj->procDirProperties == NULL) {
ERRDRV("out of memory\n");
goto Away;
@@ -276,8 +277,8 @@ MYPROCOBJECT *visor_proc_CreateObject(MYPROCTYPE *type,
/* only create properties that have names */
obj->procDirProperties[i] =
createProcFile(type->propertyNames[i],
- obj->procDir, &proc_fops,
- &obj->procDirPropertyContexts[i]);
+ obj->procDir, &proc_fops,
+ &obj->procDirPropertyContexts[i]);
if (obj->procDirProperties[i] == NULL) {
rc = NULL;
goto Away;
@@ -340,7 +341,7 @@ EXPORT_SYMBOL_GPL(visor_proc_DestroyObject);
static int seq_show(struct seq_file *seq, void *offset)
{
- PROCDIRENTRYCONTEXT *ctx = (PROCDIRENTRYCONTEXT *)(seq->private);
+ struct proc_dir_entry_context *ctx = seq->private;
if (ctx == NULL) {
ERRDRV("I don't have a freakin' clue...");
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 86c72ba0a0cd..565ba189afb2 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -27,7 +27,8 @@
*
* Functions:
* BBuGetFrameTime - Calculate data frame transmitting time
- * BBvCaculateParameter - Caculate PhyLength, PhyService and Phy Signal parameter for baseband Tx
+ * BBvCaculateParameter - Caculate PhyLength, PhyService and Phy Signal
+ * parameter for baseband Tx
* BBbReadEmbedded - Embedded read baseband register via MAC
* BBbWriteEmbedded - Embedded write baseband register via MAC
* BBbVT3253Init - VIA VT3253 baseband chip init code
@@ -1698,46 +1699,6 @@ static const unsigned short awcFrameTime[MAX_RATE] = {
10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216
};
-/*--------------------- Static Functions --------------------------*/
-
-static
-unsigned long
-s_ulGetRatio(struct vnt_private *priv);
-
-static
-void
-s_vChangeAntenna(
- struct vnt_private *priv
-);
-
-static
-void
-s_vChangeAntenna(
- struct vnt_private *priv
-)
-{
- if (priv->dwRxAntennaSel == 0) {
- priv->dwRxAntennaSel = 1;
- if (priv->bTxRxAntInv == true)
- BBvSetRxAntennaMode(priv, ANT_A);
- else
- BBvSetRxAntennaMode(priv, ANT_B);
- } else {
- priv->dwRxAntennaSel = 0;
- if (priv->bTxRxAntInv == true)
- BBvSetRxAntennaMode(priv, ANT_B);
- else
- BBvSetRxAntennaMode(priv, ANT_A);
- }
- if (priv->dwTxAntennaSel == 0) {
- priv->dwTxAntennaSel = 1;
- BBvSetTxAntennaMode(priv, ANT_B);
- } else {
- priv->dwTxAntennaSel = 0;
- BBvSetTxAntennaMode(priv, ANT_A);
- }
-}
-
/*--------------------- Export Variables --------------------------*/
/*
* Description: Calculate data frame transmitting time
@@ -2177,7 +2138,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06);
+ bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
/* }} */
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
@@ -2412,303 +2373,3 @@ BBvExitDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
BBbWriteEmbedded(priv, 0x0C, 0x00); /* CR12 */
BBbWriteEmbedded(priv, 0x0D, 0x01); /* CR13 */
}
-
-static
-unsigned long
-s_ulGetRatio(struct vnt_private *priv)
-{
- unsigned long ulRatio = 0;
- unsigned long ulMaxPacket;
- unsigned long ulPacketNum;
-
- /* This is a thousand-ratio */
- ulMaxPacket = priv->uNumSQ3[RATE_54M];
- if (priv->uNumSQ3[RATE_54M] != 0) {
- ulPacketNum = priv->uNumSQ3[RATE_54M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_54M;
- }
- if (priv->uNumSQ3[RATE_48M] > ulMaxPacket) {
- ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_48M;
- ulMaxPacket = priv->uNumSQ3[RATE_48M];
- }
- if (priv->uNumSQ3[RATE_36M] > ulMaxPacket) {
- ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
- priv->uNumSQ3[RATE_36M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_36M;
- ulMaxPacket = priv->uNumSQ3[RATE_36M];
- }
- if (priv->uNumSQ3[RATE_24M] > ulMaxPacket) {
- ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
- priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_24M;
- ulMaxPacket = priv->uNumSQ3[RATE_24M];
- }
- if (priv->uNumSQ3[RATE_18M] > ulMaxPacket) {
- ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
- priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M] +
- priv->uNumSQ3[RATE_18M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_18M;
- ulMaxPacket = priv->uNumSQ3[RATE_18M];
- }
- if (priv->uNumSQ3[RATE_12M] > ulMaxPacket) {
- ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
- priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M] +
- priv->uNumSQ3[RATE_18M] + priv->uNumSQ3[RATE_12M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_12M;
- ulMaxPacket = priv->uNumSQ3[RATE_12M];
- }
- if (priv->uNumSQ3[RATE_11M] > ulMaxPacket) {
- ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
- priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M] -
- priv->uNumSQ3[RATE_6M] - priv->uNumSQ3[RATE_9M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_11M;
- ulMaxPacket = priv->uNumSQ3[RATE_11M];
- }
- if (priv->uNumSQ3[RATE_9M] > ulMaxPacket) {
- ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
- priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M] -
- priv->uNumSQ3[RATE_6M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_9M;
- ulMaxPacket = priv->uNumSQ3[RATE_9M];
- }
- if (priv->uNumSQ3[RATE_6M] > ulMaxPacket) {
- ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
- priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_6M;
- ulMaxPacket = priv->uNumSQ3[RATE_6M];
- }
- if (priv->uNumSQ3[RATE_5M] > ulMaxPacket) {
- ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
- priv->uNumSQ3[RATE_2M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_55M;
- ulMaxPacket = priv->uNumSQ3[RATE_5M];
- }
- if (priv->uNumSQ3[RATE_2M] > ulMaxPacket) {
- ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M];
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_2M;
- ulMaxPacket = priv->uNumSQ3[RATE_2M];
- }
- if (priv->uNumSQ3[RATE_1M] > ulMaxPacket) {
- ulPacketNum = priv->uDiversityCnt;
- ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
- ulRatio += TOP_RATE_1M;
- }
-
- return ulRatio;
-}
-
-void
-BBvClearAntDivSQ3Value(struct vnt_private *priv)
-{
- unsigned int ii;
-
- priv->uDiversityCnt = 0;
- for (ii = 0; ii < MAX_RATE; ii++)
- priv->uNumSQ3[ii] = 0;
-}
-
-/*
- * Description: Antenna Diversity
- *
- * Parameters:
- * In:
- * priv - Device Structure
- * byRSR - RSR from received packet
- * bySQ3 - SQ3 value from received packet
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void BBvAntennaDiversity(struct vnt_private *priv,
- unsigned char byRxRate, unsigned char bySQ3)
-{
- if ((byRxRate >= MAX_RATE) || (priv->wAntDiversityMaxRate >= MAX_RATE))
- return;
-
- priv->uDiversityCnt++;
-
- priv->uNumSQ3[byRxRate]++;
-
- if (priv->byAntennaState == 0) {
- if (priv->uDiversityCnt > priv->ulDiversityNValue) {
- pr_debug("ulDiversityNValue=[%d],54M-[%d]\n",
- (int)priv->ulDiversityNValue,
- (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate]);
-
- if (priv->uNumSQ3[priv->wAntDiversityMaxRate] < priv->uDiversityCnt/2) {
- priv->ulRatio_State0 = s_ulGetRatio(priv);
- pr_debug("SQ3_State0, rate = [%08x]\n",
- (int)priv->ulRatio_State0);
-
- if (priv->byTMax == 0)
- return;
- pr_debug("1.[%08x], uNumSQ3[%d]=%d, %d\n",
- (int)priv->ulRatio_State0,
- (int)priv->wAntDiversityMaxRate,
- (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
- (int)priv->uDiversityCnt);
-
- s_vChangeAntenna(priv);
- priv->byAntennaState = 1;
- del_timer(&priv->TimerSQ3Tmax3);
- del_timer(&priv->TimerSQ3Tmax2);
- priv->TimerSQ3Tmax1.expires = RUN_AT(priv->byTMax * HZ);
- add_timer(&priv->TimerSQ3Tmax1);
-
- } else {
- priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
- add_timer(&priv->TimerSQ3Tmax3);
- }
- BBvClearAntDivSQ3Value(priv);
-
- }
- } else { /* byAntennaState == 1 */
-
- if (priv->uDiversityCnt > priv->ulDiversityMValue) {
- del_timer(&priv->TimerSQ3Tmax1);
-
- priv->ulRatio_State1 = s_ulGetRatio(priv);
- pr_debug("RX:SQ3_State1, rate0 = %08x,rate1 = %08x\n",
- (int)priv->ulRatio_State0,
- (int)priv->ulRatio_State1);
-
- if (priv->ulRatio_State1 < priv->ulRatio_State0) {
- pr_debug("2.[%08x][%08x], uNumSQ3[%d]=%d, %d\n",
- (int)priv->ulRatio_State0,
- (int)priv->ulRatio_State1,
- (int)priv->wAntDiversityMaxRate,
- (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
- (int)priv->uDiversityCnt);
-
- s_vChangeAntenna(priv);
- priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
- priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
- add_timer(&priv->TimerSQ3Tmax3);
- add_timer(&priv->TimerSQ3Tmax2);
- }
- priv->byAntennaState = 0;
- BBvClearAntDivSQ3Value(priv);
- }
- } /* byAntennaState */
-}
-
-/*+
- *
- * Description:
- * Timer for SQ3 antenna diversity
- *
- * Parameters:
- * In:
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-
-void
-TimerSQ3CallBack(
- unsigned long data
-)
-{
- struct vnt_private *priv = (struct vnt_private *)data;
- unsigned long flags;
-
- pr_debug("TimerSQ3CallBack...\n");
-
- spin_lock_irqsave(&priv->lock, flags);
-
- pr_debug("3.[%08x][%08x], %d\n",
- (int)priv->ulRatio_State0, (int)priv->ulRatio_State1,
- (int)priv->uDiversityCnt);
-
- s_vChangeAntenna(priv);
- priv->byAntennaState = 0;
- BBvClearAntDivSQ3Value(priv);
-
- priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
- priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
- add_timer(&priv->TimerSQ3Tmax3);
- add_timer(&priv->TimerSQ3Tmax2);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/*+
- *
- * Description:
- * Timer for SQ3 antenna diversity
- *
- * Parameters:
- * In:
- * pvSysSpec1
- * hDeviceContext - Pointer to the adapter
- * pvSysSpec2
- * pvSysSpec3
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-
-void
-TimerState1CallBack(
- unsigned long data
-)
-{
- struct vnt_private *priv = (struct vnt_private *)data;
- unsigned long flags;
-
- pr_debug("TimerState1CallBack...\n");
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->uDiversityCnt < priv->ulDiversityMValue/100) {
- s_vChangeAntenna(priv);
- priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
- priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
- add_timer(&priv->TimerSQ3Tmax3);
- add_timer(&priv->TimerSQ3Tmax2);
- } else {
- priv->ulRatio_State1 = s_ulGetRatio(priv);
- pr_debug("SQ3_State1, rate0 = %08x,rate1 = %08x\n",
- (int)priv->ulRatio_State0,
- (int)priv->ulRatio_State1);
-
- if (priv->ulRatio_State1 < priv->ulRatio_State0) {
- pr_debug("2.[%08x][%08x], uNumSQ3[%d]=%d, %d\n",
- (int)priv->ulRatio_State0,
- (int)priv->ulRatio_State1,
- (int)priv->wAntDiversityMaxRate,
- (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
- (int)priv->uDiversityCnt);
-
- s_vChangeAntenna(priv);
-
- priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
- priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
- add_timer(&priv->TimerSQ3Tmax3);
- add_timer(&priv->TimerSQ3Tmax2);
- }
- }
- priv->byAntennaState = 0;
- BBvClearAntDivSQ3Value(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index d9f6d63e4ab7..43a4fb1f3570 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -93,21 +93,4 @@ void BBvSetRxAntennaMode(struct vnt_private *, unsigned char byAntennaMode);
void BBvSetDeepSleep(struct vnt_private *, unsigned char byLocalID);
void BBvExitDeepSleep(struct vnt_private *, unsigned char byLocalID);
-/* timer for antenna diversity */
-
-void
-TimerSQ3CallBack(
- unsigned long
-);
-
-void
-TimerState1CallBack(
- unsigned long
-);
-
-void BBvAntennaDiversity(struct vnt_private *,
- unsigned char byRxRate, unsigned char bySQ3);
-void
-BBvClearAntDivSQ3Value(struct vnt_private *);
-
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index a0796405c308..1cdcf49b2445 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -68,8 +68,8 @@
/*--------------------- Static Variables --------------------------*/
-static const unsigned short cwRXBCNTSFOff[MAX_RATE] =
-{17, 17, 17, 17, 34, 23, 17, 11, 8, 5, 4, 3};
+static const unsigned short cwRXBCNTSFOff[MAX_RATE] = {
+ 17, 17, 17, 17, 34, 23, 17, 11, 8, 5, 4, 3};
/*--------------------- Static Functions --------------------------*/
@@ -670,6 +670,9 @@ void CARDvSetRSPINF(struct vnt_private *pDevice, u8 bb_type)
{
union vnt_phy_field_swap phy;
unsigned char byTxRate, byRsvTime; /* For OFDM */
+ unsigned long flags;
+
+ spin_lock_irqsave(&pDevice->lock, flags);
/* Set to Page1 */
MACvSelectPage1(pDevice->PortOffset);
@@ -767,6 +770,8 @@ void CARDvSetRSPINF(struct vnt_private *pDevice, u8 bb_type)
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime));
/* Set to Page0 */
MACvSelectPage0(pDevice->PortOffset);
+
+ spin_unlock_irqrestore(&pDevice->lock, flags);
}
void CARDvUpdateBasicTopRate(struct vnt_private *pDevice)
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index c8f739dd346e..3c17725d5910 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -174,14 +174,22 @@ void vnt_init_bands(struct vnt_private *priv)
* Return Value: true if succeeded; false if failed.
*
*/
-bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
+bool set_channel(void *pDeviceHandler, struct ieee80211_channel *ch)
{
struct vnt_private *pDevice = pDeviceHandler;
bool bResult = true;
- if (pDevice->byCurrentCh == uConnectionChannel)
+ if (pDevice->byCurrentCh == ch->hw_value)
return bResult;
+ /* Set VGA to max sensitivity */
+ if (pDevice->bUpdateBBVGA &&
+ pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
+ pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+
+ BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+ }
+
/* clear NAV */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
@@ -189,19 +197,23 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
if (pDevice->byRFType == RF_AIROHA7230)
RFbAL7230SelectChannelPostProcess(pDevice, pDevice->byCurrentCh,
- (unsigned char)uConnectionChannel);
+ ch->hw_value);
- pDevice->byCurrentCh = (unsigned char)uConnectionChannel;
+ pDevice->byCurrentCh = ch->hw_value;
bResult &= RFbSelectChannel(pDevice, pDevice->byRFType,
- (unsigned char)uConnectionChannel);
+ ch->hw_value);
/* Init Synthesizer Table */
if (pDevice->bEnablePSMode)
- RFvWriteWakeProgSyn(pDevice, pDevice->byRFType, uConnectionChannel);
+ RFvWriteWakeProgSyn(pDevice, pDevice->byRFType, ch->hw_value);
BBvSoftwareReset(pDevice);
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&pDevice->lock, flags);
+
/* set HW default power register */
MACvSelectPage1(pDevice->PortOffset);
RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
@@ -209,6 +221,8 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWROFDM, pDevice->byCurPwr);
MACvSelectPage0(pDevice->PortOffset);
+
+ spin_unlock_irqrestore(&pDevice->lock, flags);
}
if (pDevice->byBBType == BB_TYPE_11B)
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 4f4264e23462..e2be6fca5f26 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -27,6 +27,6 @@
void vnt_init_bands(struct vnt_private *);
-bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel);
+bool set_channel(void *pDeviceHandler, struct ieee80211_channel *);
#endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 83efbfb57c79..440537e47121 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -367,7 +367,7 @@ struct vnt_private {
bool bIsBeaconBufReadySet;
unsigned int cbBeaconBufReadySetCnt;
bool bFixRate;
- unsigned char byCurrentCh;
+ u16 byCurrentCh;
bool bAES;
@@ -407,29 +407,6 @@ struct vnt_private {
unsigned char byBBCR88;
unsigned char byBBCR09;
- bool bDiversityRegCtlON;
- bool bDiversityEnable;
- unsigned long ulDiversityNValue;
- unsigned long ulDiversityMValue;
- unsigned char byTMax;
- unsigned char byTMax2;
- unsigned char byTMax3;
- unsigned long ulSQ3TH;
-
- /* ANT diversity */
- unsigned long uDiversityCnt;
- unsigned char byAntennaState;
- unsigned long ulRatio_State0;
- unsigned long ulRatio_State1;
-
- /* SQ3 functions for antenna diversity */
- struct timer_list TimerSQ3Tmax1;
- struct timer_list TimerSQ3Tmax2;
- struct timer_list TimerSQ3Tmax3;
-
- unsigned long uNumSQ3[MAX_RATE];
- unsigned short wAntDiversityMaxRate;
-
unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; /* unsigned long alignment */
unsigned short wBeaconInterval;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 83e4162c0094..4324282afe49 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -126,10 +126,6 @@ DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
DEVICE_PARAM(BasebandType, "baseband type");
-#define DIVERSITY_ANT_DEF 0
-
-DEVICE_PARAM(bDiversityANTEnable, "ANT diversity mode");
-
//
// Static vars definitions
//
@@ -152,7 +148,6 @@ static void vt6655_init_info(struct pci_dev *pcid,
static void device_free_info(struct vnt_private *pDevice);
static bool device_get_pci_info(struct vnt_private *, struct pci_dev *pcid);
static void device_print_info(struct vnt_private *pDevice);
-static void device_init_diversity_timer(struct vnt_private *pDevice);
static irqreturn_t device_intr(int irq, void *dev_instance);
#ifdef CONFIG_PM
@@ -216,7 +211,6 @@ static void device_get_options(struct vnt_private *pDevice)
pOpts->short_retry = SHORT_RETRY_DEF;
pOpts->long_retry = LONG_RETRY_DEF;
pOpts->bbp_type = BBP_TYPE_DEF;
- pOpts->flags |= DEVICE_FLAGS_DiversityANT;
}
static void
@@ -224,7 +218,6 @@ device_set_options(struct vnt_private *pDevice)
{
pDevice->byShortRetryLimit = pDevice->sOpts.short_retry;
pDevice->byLongRetryLimit = pDevice->sOpts.long_retry;
- pDevice->bDiversityRegCtlON = (pDevice->sOpts.flags & DEVICE_FLAGS_DiversityANT) ? 1 : 0;
pDevice->byBBType = pDevice->sOpts.bbp_type;
pDevice->byPacketType = pDevice->byBBType;
pDevice->byAutoFBCtrl = AUTO_FB_0;
@@ -236,8 +229,6 @@ device_set_options(struct vnt_private *pDevice)
pr_debug(" byPreambleType= %d\n", (int)pDevice->byPreambleType);
pr_debug(" byShortPreamble= %d\n", (int)pDevice->byShortPreamble);
pr_debug(" byBBType= %d\n", (int)pDevice->byBBType);
- pr_debug(" pDevice->bDiversityRegCtlON= %d\n",
- (int)pDevice->bDiversityRegCtlON);
}
//
@@ -249,7 +240,6 @@ static void device_init_registers(struct vnt_private *pDevice)
unsigned long flags;
unsigned int ii;
unsigned char byValue;
- unsigned char byValue1;
unsigned char byCCKPwrdBm = 0;
unsigned char byOFDMPwrdBm = 0;
@@ -301,13 +291,6 @@ static void device_init_registers(struct vnt_private *pDevice)
if (byValue == 0)
byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
- pDevice->ulDiversityNValue = 100*260;
- pDevice->ulDiversityMValue = 100*16;
- pDevice->byTMax = 1;
- pDevice->byTMax2 = 4;
- pDevice->ulSQ3TH = 0;
- pDevice->byTMax3 = 64;
-
if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
pDevice->byAntennaCount = 2;
pDevice->byTxAntennaMode = ANT_B;
@@ -318,16 +301,7 @@ static void device_init_registers(struct vnt_private *pDevice)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
-
- byValue1 = SROMbyReadEmbedded(pDevice->PortOffset,
- EEP_OFS_ANTENNA);
-
- if ((byValue1 & 0x08) == 0)
- pDevice->bDiversityEnable = false;
- else
- pDevice->bDiversityEnable = true;
} else {
- pDevice->bDiversityEnable = false;
pDevice->byAntennaCount = 1;
pDevice->dwTxAntennaSel = 0;
pDevice->dwRxAntennaSel = 0;
@@ -349,10 +323,9 @@ static void device_init_registers(struct vnt_private *pDevice)
}
}
- pr_debug("bDiversityEnable=[%d],NValue=[%d],MValue=[%d],TMax=[%d],TMax2=[%d]\n",
- pDevice->bDiversityEnable, (int)pDevice->ulDiversityNValue,
- (int)pDevice->ulDiversityMValue, pDevice->byTMax,
- pDevice->byTMax2);
+ /* Set initial antenna mode */
+ BBvSetTxAntennaMode(pDevice, pDevice->byTxAntennaMode);
+ BBvSetRxAntennaMode(pDevice, pDevice->byRxAntennaMode);
/* zonetype initial */
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
@@ -493,24 +466,6 @@ static void device_init_registers(struct vnt_private *pDevice)
MACvStart(pDevice->PortOffset);
}
-static void device_init_diversity_timer(struct vnt_private *pDevice)
-{
- init_timer(&pDevice->TimerSQ3Tmax1);
- pDevice->TimerSQ3Tmax1.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax1.function = TimerSQ3CallBack;
- pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ);
-
- init_timer(&pDevice->TimerSQ3Tmax2);
- pDevice->TimerSQ3Tmax2.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax2.function = TimerSQ3CallBack;
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ);
-
- init_timer(&pDevice->TimerSQ3Tmax3);
- pDevice->TimerSQ3Tmax3.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax3.function = TimerState1CallBack;
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ);
-}
-
static void device_print_info(struct vnt_private *pDevice)
{
dev_info(&pDevice->pcid->dev, "%s\n", get_chip_name(pDevice->chip_id));
@@ -1053,6 +1008,58 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
pTDInfo->byFlags = 0;
}
+static void vnt_check_bb_vga(struct vnt_private *priv)
+{
+ long dbm;
+ int i;
+
+ if (!priv->bUpdateBBVGA)
+ return;
+
+ if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ if (!(priv->vif->bss_conf.assoc && priv->uCurrRSSI))
+ return;
+
+ RFvRSSITodBm(priv, (u8)priv->uCurrRSSI, &dbm);
+
+ for (i = 0; i < BB_VGA_LEVEL; i++) {
+ if (dbm < priv->ldBmThreshold[i]) {
+ priv->byBBVGANew = priv->abyBBVGA[i];
+ break;
+ }
+ }
+
+ if (priv->byBBVGANew == priv->byBBVGACurrent) {
+ priv->uBBVGADiffCount = 1;
+ return;
+ }
+
+ priv->uBBVGADiffCount++;
+
+ if (priv->uBBVGADiffCount == 1) {
+ /* first VGA diff gain */
+ BBvSetVGAGainOffset(priv, priv->byBBVGANew);
+
+ dev_dbg(&priv->pcid->dev,
+ "First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
+ (int)dbm, priv->byBBVGANew,
+ priv->byBBVGACurrent,
+ (int)priv->uBBVGADiffCount);
+ }
+
+ if (priv->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
+ dev_dbg(&priv->pcid->dev,
+ "RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
+ (int)dbm, priv->byBBVGANew,
+ priv->byBBVGACurrent,
+ (int)priv->uBBVGADiffCount);
+
+ BBvSetVGAGainOffset(priv, priv->byBBVGANew);
+ }
+}
+
static irqreturn_t device_intr(int irq, void *dev_instance)
{
struct vnt_private *pDevice = dev_instance;
@@ -1060,7 +1067,6 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
unsigned long dwMIBCounter = 0;
unsigned char byOrgPageSel = 0;
int handled = 0;
- int ii = 0;
unsigned long flags;
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
@@ -1090,7 +1096,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
// Must do this after doing rx/tx, cause ISR bit is slow
// than RD/TD write back
// update ISR counter
- STAvUpdate802_11Counter(&pDevice->s802_11Counter, &pDevice->scStatistic , dwMIBCounter);
+ STAvUpdate802_11Counter(&pDevice->s802_11Counter, &pDevice->scStatistic, dwMIBCounter);
while (pDevice->dwIsr != 0) {
STAvUpdateIsrStatCounter(&pDevice->scStatistic, pDevice->dwIsr);
MACvWriteISR(pDevice->PortOffset, pDevice->dwIsr);
@@ -1104,44 +1110,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
if (pDevice->dwIsr & ISR_TBTT) {
if (pDevice->vif &&
- pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
- if (pDevice->bUpdateBBVGA &&
- !(pDevice->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
- pDevice->vif->bss_conf.assoc &&
- pDevice->uCurrRSSI) {
- long ldBm;
-
- RFvRSSITodBm(pDevice, (unsigned char) pDevice->uCurrRSSI, &ldBm);
- for (ii = 0; ii < BB_VGA_LEVEL; ii++) {
- if (ldBm < pDevice->ldBmThreshold[ii]) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[ii];
- break;
- }
- }
- if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
- pDevice->uBBVGADiffCount++;
- if (pDevice->uBBVGADiffCount == 1) {
- // first VGA diff gain
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGANew);
- pr_debug("First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
- (int)ldBm,
- pDevice->byBBVGANew,
- pDevice->byBBVGACurrent,
- (int)pDevice->uBBVGADiffCount);
- }
- if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
- pr_debug("RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
- (int)ldBm,
- pDevice->byBBVGANew,
- pDevice->byBBVGACurrent,
- (int)pDevice->uBBVGADiffCount);
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGANew);
- }
- } else {
- pDevice->uBBVGADiffCount = 1;
- }
- }
- }
+ pDevice->op_mode != NL80211_IFTYPE_ADHOC)
+ vnt_check_bb_vga(pDevice);
pDevice->bBeaconSent = false;
if (pDevice->bEnablePSMode)
@@ -1232,7 +1202,7 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
head_td = priv->apCurrTD[dma_idx];
- head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
+ head_td->m_td1TD1.byTCR = 0;
head_td->pTDInfo->skb = skb;
@@ -1257,12 +1227,20 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
priv->bPWBitOn = false;
- head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
+ /* Set TSR1 & ReqCount in TxDescHead */
+ head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
+ head_td->m_td1TD1.wReqCount =
+ cpu_to_le16((u16)head_td->pTDInfo->dwReqCount);
+
+ head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
+
+ if (dma_idx == TYPE_AC0DMA) {
+ head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
- if (dma_idx == TYPE_AC0DMA)
MACvTransmitAC0(priv->PortOffset);
- else
+ } else {
MACvTransmit0(priv->PortOffset);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1343,8 +1321,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (priv->bDiversityRegCtlON)
- device_init_diversity_timer(priv);
break;
case NL80211_IFTYPE_ADHOC:
MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
@@ -1374,11 +1350,6 @@ static void vnt_remove_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (priv->bDiversityRegCtlON) {
- del_timer(&priv->TimerSQ3Tmax1);
- del_timer(&priv->TimerSQ3Tmax2);
- del_timer(&priv->TimerSQ3Tmax3);
- }
break;
case NL80211_IFTYPE_ADHOC:
MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
@@ -1415,7 +1386,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
(conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
- set_channel(priv, conf->chandef.chan->hw_value);
+ set_channel(priv, conf->chandef.chan);
if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
bb_type = BB_TYPE_11A;
@@ -1500,9 +1471,11 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (conf->enable_beacon) {
vnt_beacon_enable(priv, vif, conf);
- MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR,
+ TCR_AUTOBCNTX);
} else {
- MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR,
+ TCR_AUTOBCNTX);
}
}
@@ -1565,6 +1538,10 @@ static void vnt_configure(struct ieee80211_hw *hw,
if (changed_flags & FIF_ALLMULTI) {
if (*total_flags & FIF_ALLMULTI) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
if (priv->mc_list_count > 2) {
MACvSelectPage1(priv->PortOffset);
@@ -1586,6 +1563,8 @@ static void vnt_configure(struct ieee80211_hw *hw,
MACvSelectPage0(priv->PortOffset);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
+
rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
} else {
rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
@@ -1669,7 +1648,7 @@ static const struct ieee80211_ops vnt_mac_ops = {
.reset_tsf = vnt_reset_tsf,
};
-int vnt_init(struct vnt_private *priv)
+static int vnt_init(struct vnt_private *priv)
{
SET_IEEE80211_PERM_ADDR(priv->hw, priv->abyCurrentNetAddr);
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 977683cb7391..3c5b87ffdcac 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -91,6 +91,8 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
new_rsr = skb_data + bytes_received - 3;
rssi = skb_data + bytes_received - 2;
rsr = skb_data + bytes_received - 1;
+ if (*rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
+ return false;
RFvRSSITodBm(priv, *rssi, &rx_dbm);
@@ -106,6 +108,9 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
rx_status.flag = 0;
rx_status.freq = hw->conf.chandef.chan->center_freq;
+ if (!(*rsr & RSR_CRCOK))
+ rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
+
hdr = (struct ieee80211_hdr *)(skb->data);
fc = hdr->frame_control;
@@ -113,13 +118,11 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
if (ieee80211_has_protected(fc)) {
if (priv->byLocalID > REV_ID_VT3253_A1)
- rx_status.flag = RX_FLAG_DECRYPTED;
- }
+ rx_status.flag |= RX_FLAG_DECRYPTED;
- if (priv->vif && priv->bDiversityEnable) {
- if (ieee80211_is_data(fc) &&
- (frame_size > 50) && priv->vif->bss_conf.assoc)
- BBvAntennaDiversity(priv, priv->rx_rate, 0);
+ /* Drop packet */
+ if (!(*new_rsr & NEWRSR_DECRYPTOK))
+ return false;
}
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 8f0d652fea7c..3653a2bd1e36 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -30,7 +30,6 @@
* MACbIsRegBitsOff - Test if All test Bits Off
* MACbIsIntDisable - Test if MAC interrupt disable
* MACvSetShortRetryLimit - Set 802.11 Short Retry limit
- * MACvGetShortRetryLimit - Get 802.11 Short Retry limit
* MACvSetLongRetryLimit - Set 802.11 Long Retry limit
* MACvSetLoopbackMode - Set MAC Loopback Mode
* MACvSaveContext - Save Context of MAC Registers
@@ -146,24 +145,6 @@ void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
VNSvOutPortB(dwIoBase + MAC_REG_SRT, byRetryLimit);
}
-/*
- * Description:
- * Get 802.11 Short Retry Limit
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyRetryLimit - Retry Limit Get
- *
- * Return Value: none
- *
- */
-void MACvGetShortRetryLimit(void __iomem *dwIoBase, unsigned char *pbyRetryLimit)
-{
- // get SRT
- VNSvInPortB(dwIoBase + MAC_REG_SRT, pbyRetryLimit);
-}
/*
* Description:
@@ -356,7 +337,7 @@ bool MACbSafeSoftwareReset(void __iomem *dwIoBase)
/*
* Description:
- * Trun Off MAC Rx
+ * Turn Off MAC Rx
*
* Parameters:
* In:
@@ -417,7 +398,7 @@ bool MACbSafeRxOff(void __iomem *dwIoBase)
/*
* Description:
- * Trun Off MAC Tx
+ * Turn Off MAC Tx
*
* Parameters:
* In:
@@ -808,7 +789,7 @@ bool MACbPSWakeup(void __iomem *dwIoBase)
// Check if SyncFlushOK
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_PSCTL , &byOrgValue);
+ VNSvInPortB(dwIoBase + MAC_REG_PSCTL, &byOrgValue);
if (byOrgValue & PSCTL_WAKEDONE)
break;
}
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index e1e7e10435f6..8e0200a78b19 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -38,13 +38,11 @@
#include "upc.h"
/*--------------------- Export Definitions -------------------------*/
-//
-// Registers in the MAC
-//
+/* Registers in the MAC */
#define MAC_MAX_CONTEXT_SIZE_PAGE0 256
#define MAC_MAX_CONTEXT_SIZE_PAGE1 128
-// Registers not related to 802.11b
+/* Registers not related to 802.11b */
#define MAC_REG_BCFG0 0x00
#define MAC_REG_BCFG1 0x01
#define MAC_REG_FCR0 0x02
@@ -69,15 +67,16 @@
#define MAC_REG_TMCTL0 0x18
#define MAC_REG_TMCTL1 0x19
#define MAC_REG_TMDATA0 0x1C
-// MAC Parameter related
-#define MAC_REG_LRT 0x20 //
-#define MAC_REG_SRT 0x21 //
-#define MAC_REG_SIFS 0x22 //
-#define MAC_REG_DIFS 0x23 //
-#define MAC_REG_EIFS 0x24 //
-#define MAC_REG_SLOT 0x25 //
-#define MAC_REG_BI 0x26 //
-#define MAC_REG_CWMAXMIN0 0x28 //
+
+/* MAC Parameter related */
+#define MAC_REG_LRT 0x20
+#define MAC_REG_SRT 0x21
+#define MAC_REG_SIFS 0x22
+#define MAC_REG_DIFS 0x23
+#define MAC_REG_EIFS 0x24
+#define MAC_REG_SLOT 0x25
+#define MAC_REG_BI 0x26
+#define MAC_REG_CWMAXMIN0 0x28
#define MAC_REG_LINKOFFTOTM 0x2A
#define MAC_REG_SWTMOT 0x2B
#define MAC_REG_MIBCNTR 0x2C
@@ -85,26 +84,29 @@
#define MAC_REG_RTSFAILCNT 0x2D
#define MAC_REG_ACKFAILCNT 0x2E
#define MAC_REG_FCSERRCNT 0x2F
-// TSF Related
-#define MAC_REG_TSFCNTR 0x30 //
-#define MAC_REG_NEXTTBTT 0x38 //
-#define MAC_REG_TSFOFST 0x40 //
-#define MAC_REG_TFTCTL 0x48 //
-// WMAC Control/Status Related
-#define MAC_REG_ENCFG 0x4C //
-#define MAC_REG_PAGE1SEL 0x4F //
-#define MAC_REG_CFG 0x50 //
-#define MAC_REG_TEST 0x52 //
-#define MAC_REG_HOSTCR 0x54 //
-#define MAC_REG_MACCR 0x55 //
-#define MAC_REG_RCR 0x56 //
-#define MAC_REG_TCR 0x57 //
-#define MAC_REG_IMR 0x58 //
+
+/* TSF Related */
+#define MAC_REG_TSFCNTR 0x30
+#define MAC_REG_NEXTTBTT 0x38
+#define MAC_REG_TSFOFST 0x40
+#define MAC_REG_TFTCTL 0x48
+
+/* WMAC Control/Status Related */
+#define MAC_REG_ENCFG 0x4C
+#define MAC_REG_PAGE1SEL 0x4F
+#define MAC_REG_CFG 0x50
+#define MAC_REG_TEST 0x52
+#define MAC_REG_HOSTCR 0x54
+#define MAC_REG_MACCR 0x55
+#define MAC_REG_RCR 0x56
+#define MAC_REG_TCR 0x57
+#define MAC_REG_IMR 0x58
#define MAC_REG_ISR 0x5C
-// Power Saving Related
-#define MAC_REG_PSCFG 0x60 //
-#define MAC_REG_PSCTL 0x61 //
-#define MAC_REG_PSPWRSIG 0x62 //
+
+/* Power Saving Related */
+#define MAC_REG_PSCFG 0x60
+#define MAC_REG_PSCTL 0x61
+#define MAC_REG_PSPWRSIG 0x62
#define MAC_REG_BBCR13 0x63
#define MAC_REG_AIDATIM 0x64
#define MAC_REG_PWBT 0x66
@@ -112,41 +114,45 @@
#define MAC_REG_CALTMR 0x69
#define MAC_REG_SYNSPACCNT 0x6A
#define MAC_REG_WAKSYNOPT 0x6B
-// Baseband/IF Control Group
-#define MAC_REG_BBREGCTL 0x6C //
+
+/* Baseband/IF Control Group */
+#define MAC_REG_BBREGCTL 0x6C
#define MAC_REG_CHANNEL 0x6D
#define MAC_REG_BBREGADR 0x6E
#define MAC_REG_BBREGDATA 0x6F
-#define MAC_REG_IFREGCTL 0x70 //
-#define MAC_REG_IFDATA 0x71 //
-#define MAC_REG_ITRTMSET 0x74 //
+#define MAC_REG_IFREGCTL 0x70
+#define MAC_REG_IFDATA 0x71
+#define MAC_REG_ITRTMSET 0x74
#define MAC_REG_PAPEDELAY 0x77
-#define MAC_REG_SOFTPWRCTL 0x78 //
-#define MAC_REG_GPIOCTL0 0x7A //
-#define MAC_REG_GPIOCTL1 0x7B //
-
-// MAC DMA Related Group
-#define MAC_REG_TXDMACTL0 0x7C //
-#define MAC_REG_TXDMAPTR0 0x80 //
-#define MAC_REG_AC0DMACTL 0x84 //
-#define MAC_REG_AC0DMAPTR 0x88 //
-#define MAC_REG_BCNDMACTL 0x8C //
-#define MAC_REG_BCNDMAPTR 0x90 //
-#define MAC_REG_RXDMACTL0 0x94 //
-#define MAC_REG_RXDMAPTR0 0x98 //
-#define MAC_REG_RXDMACTL1 0x9C //
-#define MAC_REG_RXDMAPTR1 0xA0 //
-#define MAC_REG_SYNCDMACTL 0xA4 //
+#define MAC_REG_SOFTPWRCTL 0x78
+#define MAC_REG_GPIOCTL0 0x7A
+#define MAC_REG_GPIOCTL1 0x7B
+
+/* MAC DMA Related Group */
+#define MAC_REG_TXDMACTL0 0x7C
+#define MAC_REG_TXDMAPTR0 0x80
+#define MAC_REG_AC0DMACTL 0x84
+#define MAC_REG_AC0DMAPTR 0x88
+#define MAC_REG_BCNDMACTL 0x8C
+#define MAC_REG_BCNDMAPTR 0x90
+#define MAC_REG_RXDMACTL0 0x94
+#define MAC_REG_RXDMAPTR0 0x98
+#define MAC_REG_RXDMACTL1 0x9C
+#define MAC_REG_RXDMAPTR1 0xA0
+#define MAC_REG_SYNCDMACTL 0xA4
#define MAC_REG_SYNCDMAPTR 0xA8
#define MAC_REG_ATIMDMACTL 0xAC
#define MAC_REG_ATIMDMAPTR 0xB0
-// MiscFF PIO related
+
+/* MiscFF PIO related */
#define MAC_REG_MISCFFNDEX 0xB4
#define MAC_REG_MISCFFCTL 0xB6
#define MAC_REG_MISCFFDATA 0xB8
-// Extend SW Timer
+
+/* Extend SW Timer */
#define MAC_REG_TMDATA1 0xBC
-// WOW Related Group
+
+/* WOW Related Group */
#define MAC_REG_WAKEUPEN0 0xC0
#define MAC_REG_WAKEUPEN1 0xC1
#define MAC_REG_WAKEUPSR0 0xC2
@@ -156,19 +162,21 @@
#define MAC_REG_WAKE128_2 0xE4
#define MAC_REG_WAKE128_3 0xF4
-/////////////// Page 1 ///////////////////
+/************** Page 1 ******************/
#define MAC_REG_CRC_128_0 0x04
#define MAC_REG_CRC_128_1 0x06
#define MAC_REG_CRC_128_2 0x08
#define MAC_REG_CRC_128_3 0x0A
-// MAC Configuration Group
+
+/* MAC Configuration Group */
#define MAC_REG_PAR0 0x0C
#define MAC_REG_PAR4 0x10
#define MAC_REG_BSSID0 0x14
#define MAC_REG_BSSID4 0x18
#define MAC_REG_MAR0 0x1C
#define MAC_REG_MAR4 0x20
-// MAC RSPPKT INFO Group
+
+/* MAC RSPPKT INFO Group */
#define MAC_REG_RSPINF_B_1 0x24
#define MAC_REG_RSPINF_B_2 0x28
#define MAC_REG_RSPINF_B_5 0x2C
@@ -183,7 +191,7 @@
#define MAC_REG_RSPINF_A_54 0x42
#define MAC_REG_RSPINF_A_72 0x44
-// 802.11h relative
+/* 802.11h relative */
#define MAC_REG_QUIETINIT 0x60
#define MAC_REG_QUIETGAP 0x62
#define MAC_REG_QUIETDUR 0x64
@@ -195,9 +203,7 @@
#define MAC_REG_PWRCCK 0x73
#define MAC_REG_PWROFDM 0x7C
-//
-// Bits in the BCFG0 register
-//
+/* Bits in the BCFG0 register */
#define BCFG0_PERROFF 0x40
#define BCFG0_MRDMDIS 0x20
#define BCFG0_MRDLDIS 0x10
@@ -205,9 +211,7 @@
#define BCFG0_VSERREN 0x02
#define BCFG0_LATMEN 0x01
-//
-// Bits in the BCFG1 register
-//
+/* Bits in the BCFG1 register */
#define BCFG1_CFUNOPT 0x80
#define BCFG1_CREQOPT 0x40
#define BCFG1_DMA8 0x10
@@ -216,25 +220,23 @@
#define BCFG1_MIOEN 0x02
#define BCFG1_CISDLYEN 0x01
-// Bits in RAMBIST registers
-#define BISTCMD_TSTPAT5 0x00 //
-#define BISTCMD_TSTPATA 0x80 //
-#define BISTCMD_TSTERR 0x20 //
-#define BISTCMD_TSTPATF 0x18 //
-#define BISTCMD_TSTPAT0 0x10 //
-#define BISTCMD_TSTMODE 0x04 //
-#define BISTCMD_TSTITTX 0x03 //
-#define BISTCMD_TSTATRX 0x02 //
-#define BISTCMD_TSTATTX 0x01 //
-#define BISTCMD_TSTRX 0x00 //
-#define BISTSR0_BISTGO 0x01 //
-#define BISTSR1_TSTSR 0x01 //
-#define BISTSR2_CMDPRTEN 0x02 //
-#define BISTSR2_RAMTSTEN 0x01 //
-
-//
-// Bits in the I2MCFG EEPROM register
-//
+/* Bits in RAMBIST registers */
+#define BISTCMD_TSTPAT5 0x00
+#define BISTCMD_TSTPATA 0x80
+#define BISTCMD_TSTERR 0x20
+#define BISTCMD_TSTPATF 0x18
+#define BISTCMD_TSTPAT0 0x10
+#define BISTCMD_TSTMODE 0x04
+#define BISTCMD_TSTITTX 0x03
+#define BISTCMD_TSTATRX 0x02
+#define BISTCMD_TSTATTX 0x01
+#define BISTCMD_TSTRX 0x00
+#define BISTSR0_BISTGO 0x01
+#define BISTSR1_TSTSR 0x01
+#define BISTSR2_CMDPRTEN 0x02
+#define BISTSR2_RAMTSTEN 0x01
+
+/* Bits in the I2MCFG EEPROM register */
#define I2MCFG_BOUNDCTL 0x80
#define I2MCFG_WAITCTL 0x20
#define I2MCFG_SCLOECTL 0x10
@@ -243,50 +245,38 @@
#define I2MCFG_I2MLDSEQ 0x02
#define I2MCFG_I2CMFAST 0x01
-//
-// Bits in the I2MCSR EEPROM register
-//
+/* Bits in the I2MCSR EEPROM register */
#define I2MCSR_EEMW 0x80
#define I2MCSR_EEMR 0x40
#define I2MCSR_AUTOLD 0x08
#define I2MCSR_NACK 0x02
#define I2MCSR_DONE 0x01
-//
-// Bits in the PMC1 register
-//
+/* Bits in the PMC1 register */
#define SPS_RST 0x80
#define PCISTIKY 0x40
#define PME_OVR 0x02
-//
-// Bits in the STICKYHW register
-//
+/* Bits in the STICKYHW register */
#define STICKHW_DS1_SHADOW 0x02
#define STICKHW_DS0_SHADOW 0x01
-//
-// Bits in the TMCTL register
-//
+/* Bits in the TMCTL register */
#define TMCTL_TSUSP 0x04
#define TMCTL_TMD 0x02
#define TMCTL_TE 0x01
-//
-// Bits in the TFTCTL register
-//
-#define TFTCTL_HWUTSF 0x80 //
+/* Bits in the TFTCTL register */
+#define TFTCTL_HWUTSF 0x80
#define TFTCTL_TBTTSYNC 0x40
#define TFTCTL_HWUTSFEN 0x20
-#define TFTCTL_TSFCNTRRD 0x10 //
-#define TFTCTL_TBTTSYNCEN 0x08 //
-#define TFTCTL_TSFSYNCEN 0x04 //
-#define TFTCTL_TSFCNTRST 0x02 //
-#define TFTCTL_TSFCNTREN 0x01 //
-
-//
-// Bits in the EnhanceCFG register
-//
+#define TFTCTL_TSFCNTRRD 0x10
+#define TFTCTL_TBTTSYNCEN 0x08
+#define TFTCTL_TSFSYNCEN 0x04
+#define TFTCTL_TSFCNTRST 0x02
+#define TFTCTL_TSFCNTREN 0x01
+
+/* Bits in the EnhanceCFG register */
#define EnCFG_BarkerPream 0x00020000
#define EnCFG_NXTBTTCFPSTR 0x00010000
#define EnCFG_BcnSusClr 0x00000200
@@ -300,14 +290,10 @@
#define EnCFG_BBType_b 0x00000001
#define EnCFG_BBType_a 0x00000000
-//
-// Bits in the Page1Sel register
-//
+/* Bits in the Page1Sel register */
#define PAGE1_SEL 0x01
-//
-// Bits in the CFG register
-//
+/* Bits in the CFG register */
#define CFG_TKIPOPT 0x80
#define CFG_RXDMAOPT 0x40
#define CFG_TMOT_SW 0x20
@@ -318,242 +304,196 @@
#define CFG_NOTXTIMEOUT 0x02
#define CFG_NOBUFOPT 0x01
-//
-// Bits in the TEST register
-//
-#define TEST_LBEXT 0x80 //
-#define TEST_LBINT 0x40 //
-#define TEST_LBNONE 0x00 //
-#define TEST_SOFTINT 0x20 //
-#define TEST_CONTTX 0x10 //
-#define TEST_TXPE 0x08 //
-#define TEST_NAVDIS 0x04 //
-#define TEST_NOCTS 0x02 //
-#define TEST_NOACK 0x01 //
-
-//
-// Bits in the HOSTCR register
-//
-#define HOSTCR_TXONST 0x80 //
-#define HOSTCR_RXONST 0x40 //
-#define HOSTCR_ADHOC 0x20 // Network Type 1 = Ad-hoc
-#define HOSTCR_AP 0x10 // Port Type 1 = AP
-#define HOSTCR_TXON 0x08 //0000 1000
-#define HOSTCR_RXON 0x04 //0000 0100
-#define HOSTCR_MACEN 0x02 //0000 0010
-#define HOSTCR_SOFTRST 0x01 //0000 0001
-
-//
-// Bits in the MACCR register
-//
-#define MACCR_SYNCFLUSHOK 0x04 //
-#define MACCR_SYNCFLUSH 0x02 //
-#define MACCR_CLRNAV 0x01 //
-
-// Bits in the MAC_REG_GPIOCTL0 register
-//
-#define LED_ACTSET 0x01 //
-#define LED_RFOFF 0x02 //
-#define LED_NOCONNECT 0x04 //
-//
-// Bits in the RCR register
-//
+/* Bits in the TEST register */
+#define TEST_LBEXT 0x80
+#define TEST_LBINT 0x40
+#define TEST_LBNONE 0x00
+#define TEST_SOFTINT 0x20
+#define TEST_CONTTX 0x10
+#define TEST_TXPE 0x08
+#define TEST_NAVDIS 0x04
+#define TEST_NOCTS 0x02
+#define TEST_NOACK 0x01
+
+/* Bits in the HOSTCR register */
+#define HOSTCR_TXONST 0x80
+#define HOSTCR_RXONST 0x40
+#define HOSTCR_ADHOC 0x20 /* Network Type 1 = Ad-hoc */
+#define HOSTCR_AP 0x10 /* Port Type 1 = AP */
+#define HOSTCR_TXON 0x08 /* 0000 1000 */
+#define HOSTCR_RXON 0x04 /* 0000 0100 */
+#define HOSTCR_MACEN 0x02 /* 0000 0010 */
+#define HOSTCR_SOFTRST 0x01 /* 0000 0001 */
+
+/* Bits in the MACCR register */
+#define MACCR_SYNCFLUSHOK 0x04
+#define MACCR_SYNCFLUSH 0x02
+#define MACCR_CLRNAV 0x01
+
+/* Bits in the MAC_REG_GPIOCTL0 register */
+#define LED_ACTSET 0x01
+#define LED_RFOFF 0x02
+#define LED_NOCONNECT 0x04
+
+/* Bits in the RCR register */
#define RCR_SSID 0x80
-#define RCR_RXALLTYPE 0x40 //
-#define RCR_UNICAST 0x20 //
-#define RCR_BROADCAST 0x10 //
-#define RCR_MULTICAST 0x08 //
-#define RCR_WPAERR 0x04 //
-#define RCR_ERRCRC 0x02 //
-#define RCR_BSSID 0x01 //
-
-//
-// Bits in the TCR register
-//
-#define TCR_SYNCDCFOPT 0x02 //
-#define TCR_AUTOBCNTX 0x01 // Beacon automatically transmit enable
-
-//
-// Bits in the IMR register
-//
-#define IMR_MEASURESTART 0x80000000 //
-#define IMR_QUIETSTART 0x20000000 //
-#define IMR_RADARDETECT 0x10000000 //
-#define IMR_MEASUREEND 0x08000000 //
-#define IMR_SOFTTIMER1 0x00200000 //
-#define IMR_RXDMA1 0x00001000 //0000 0000 0001 0000 0000 0000
-#define IMR_RXNOBUF 0x00000800 //
-#define IMR_MIBNEARFULL 0x00000400 //
-#define IMR_SOFTINT 0x00000200 //
-#define IMR_FETALERR 0x00000100 //
-#define IMR_WATCHDOG 0x00000080 //
-#define IMR_SOFTTIMER 0x00000040 //
-#define IMR_GPIO 0x00000020 //
-#define IMR_TBTT 0x00000010 //
-#define IMR_RXDMA0 0x00000008 //
-#define IMR_BNTX 0x00000004 //
-#define IMR_AC0DMA 0x00000002 //
-#define IMR_TXDMA0 0x00000001 //
-
-//
-// Bits in the ISR register
-//
-
-#define ISR_MEASURESTART 0x80000000 //
-#define ISR_QUIETSTART 0x20000000 //
-#define ISR_RADARDETECT 0x10000000 //
-#define ISR_MEASUREEND 0x08000000 //
-#define ISR_SOFTTIMER1 0x00200000 //
-#define ISR_RXDMA1 0x00001000 //0000 0000 0001 0000 0000 0000
-#define ISR_RXNOBUF 0x00000800 //0000 0000 0000 1000 0000 0000
-#define ISR_MIBNEARFULL 0x00000400 //0000 0000 0000 0100 0000 0000
-#define ISR_SOFTINT 0x00000200 //
-#define ISR_FETALERR 0x00000100 //
-#define ISR_WATCHDOG 0x00000080 //
-#define ISR_SOFTTIMER 0x00000040 //
-#define ISR_GPIO 0x00000020 //
-#define ISR_TBTT 0x00000010 //
-#define ISR_RXDMA0 0x00000008 //
-#define ISR_BNTX 0x00000004 //
-#define ISR_AC0DMA 0x00000002 //
-#define ISR_TXDMA0 0x00000001 //
-
-//
-// Bits in the PSCFG register
-//
-#define PSCFG_PHILIPMD 0x40 //
-#define PSCFG_WAKECALEN 0x20 //
-#define PSCFG_WAKETMREN 0x10 //
-#define PSCFG_BBPSPROG 0x08 //
-#define PSCFG_WAKESYN 0x04 //
-#define PSCFG_SLEEPSYN 0x02 //
-#define PSCFG_AUTOSLEEP 0x01 //
-
-//
-// Bits in the PSCTL register
-//
-#define PSCTL_WAKEDONE 0x20 //
-#define PSCTL_PS 0x10 //
-#define PSCTL_GO2DOZE 0x08 //
-#define PSCTL_LNBCN 0x04 //
-#define PSCTL_ALBCN 0x02 //
-#define PSCTL_PSEN 0x01 //
-
-//
-// Bits in the PSPWSIG register
-//
-#define PSSIG_WPE3 0x80 //
-#define PSSIG_WPE2 0x40 //
-#define PSSIG_WPE1 0x20 //
-#define PSSIG_WRADIOPE 0x10 //
-#define PSSIG_SPE3 0x08 //
-#define PSSIG_SPE2 0x04 //
-#define PSSIG_SPE1 0x02 //
-#define PSSIG_SRADIOPE 0x01 //
-
-//
-// Bits in the BBREGCTL register
-//
-#define BBREGCTL_DONE 0x04 //
-#define BBREGCTL_REGR 0x02 //
-#define BBREGCTL_REGW 0x01 //
-
-//
-// Bits in the IFREGCTL register
-//
-#define IFREGCTL_DONE 0x04 //
-#define IFREGCTL_IFRF 0x02 //
-#define IFREGCTL_REGW 0x01 //
-
-//
-// Bits in the SOFTPWRCTL register
-//
-#define SOFTPWRCTL_RFLEOPT 0x0800 //
-#define SOFTPWRCTL_TXPEINV 0x0200 //
-#define SOFTPWRCTL_SWPECTI 0x0100 //
-#define SOFTPWRCTL_SWPAPE 0x0020 //
-#define SOFTPWRCTL_SWCALEN 0x0010 //
-#define SOFTPWRCTL_SWRADIO_PE 0x0008 //
-#define SOFTPWRCTL_SWPE2 0x0004 //
-#define SOFTPWRCTL_SWPE1 0x0002 //
-#define SOFTPWRCTL_SWPE3 0x0001 //
-
-//
-// Bits in the GPIOCTL1 register
-//
-#define GPIO1_DATA1 0x20 //
-#define GPIO1_MD1 0x10 //
-#define GPIO1_DATA0 0x02 //
-#define GPIO1_MD0 0x01 //
-
-//
-// Bits in the DMACTL register
-//
-#define DMACTL_CLRRUN 0x00080000 //
-#define DMACTL_RUN 0x00000008 //
-#define DMACTL_WAKE 0x00000004 //
-#define DMACTL_DEAD 0x00000002 //
-#define DMACTL_ACTIVE 0x00000001 //
-//
-// Bits in the RXDMACTL0 register
-//
-#define RX_PERPKT 0x00000100 //
-#define RX_PERPKTCLR 0x01000000 //
-//
-// Bits in the BCNDMACTL register
-//
-#define BEACON_READY 0x01 //
-//
-// Bits in the MISCFFCTL register
-//
-#define MISCFFCTL_WRITE 0x0001 //
-
-//
-// Bits in WAKEUPEN0
-//
+#define RCR_RXALLTYPE 0x40
+#define RCR_UNICAST 0x20
+#define RCR_BROADCAST 0x10
+#define RCR_MULTICAST 0x08
+#define RCR_WPAERR 0x04
+#define RCR_ERRCRC 0x02
+#define RCR_BSSID 0x01
+
+/* Bits in the TCR register */
+#define TCR_SYNCDCFOPT 0x02
+#define TCR_AUTOBCNTX 0x01 /* Beacon automatically transmit enable */
+
+/* Bits in the IMR register */
+#define IMR_MEASURESTART 0x80000000
+#define IMR_QUIETSTART 0x20000000
+#define IMR_RADARDETECT 0x10000000
+#define IMR_MEASUREEND 0x08000000
+#define IMR_SOFTTIMER1 0x00200000
+#define IMR_RXDMA1 0x00001000 /* 0000 0000 0001 0000 0000 0000 */
+#define IMR_RXNOBUF 0x00000800
+#define IMR_MIBNEARFULL 0x00000400
+#define IMR_SOFTINT 0x00000200
+#define IMR_FETALERR 0x00000100
+#define IMR_WATCHDOG 0x00000080
+#define IMR_SOFTTIMER 0x00000040
+#define IMR_GPIO 0x00000020
+#define IMR_TBTT 0x00000010
+#define IMR_RXDMA0 0x00000008
+#define IMR_BNTX 0x00000004
+#define IMR_AC0DMA 0x00000002
+#define IMR_TXDMA0 0x00000001
+
+/* Bits in the ISR register */
+#define ISR_MEASURESTART 0x80000000
+#define ISR_QUIETSTART 0x20000000
+#define ISR_RADARDETECT 0x10000000
+#define ISR_MEASUREEND 0x08000000
+#define ISR_SOFTTIMER1 0x00200000
+#define ISR_RXDMA1 0x00001000 /* 0000 0000 0001 0000 0000 0000 */
+#define ISR_RXNOBUF 0x00000800 /* 0000 0000 0000 1000 0000 0000 */
+#define ISR_MIBNEARFULL 0x00000400 /* 0000 0000 0000 0100 0000 0000 */
+#define ISR_SOFTINT 0x00000200
+#define ISR_FETALERR 0x00000100
+#define ISR_WATCHDOG 0x00000080
+#define ISR_SOFTTIMER 0x00000040
+#define ISR_GPIO 0x00000020
+#define ISR_TBTT 0x00000010
+#define ISR_RXDMA0 0x00000008
+#define ISR_BNTX 0x00000004
+#define ISR_AC0DMA 0x00000002
+#define ISR_TXDMA0 0x00000001
+
+/* Bits in the PSCFG register */
+#define PSCFG_PHILIPMD 0x40
+#define PSCFG_WAKECALEN 0x20
+#define PSCFG_WAKETMREN 0x10
+#define PSCFG_BBPSPROG 0x08
+#define PSCFG_WAKESYN 0x04
+#define PSCFG_SLEEPSYN 0x02
+#define PSCFG_AUTOSLEEP 0x01
+
+/* Bits in the PSCTL register */
+#define PSCTL_WAKEDONE 0x20
+#define PSCTL_PS 0x10
+#define PSCTL_GO2DOZE 0x08
+#define PSCTL_LNBCN 0x04
+#define PSCTL_ALBCN 0x02
+#define PSCTL_PSEN 0x01
+
+/* Bits in the PSPWSIG register */
+#define PSSIG_WPE3 0x80
+#define PSSIG_WPE2 0x40
+#define PSSIG_WPE1 0x20
+#define PSSIG_WRADIOPE 0x10
+#define PSSIG_SPE3 0x08
+#define PSSIG_SPE2 0x04
+#define PSSIG_SPE1 0x02
+#define PSSIG_SRADIOPE 0x01
+
+/* Bits in the BBREGCTL register */
+#define BBREGCTL_DONE 0x04
+#define BBREGCTL_REGR 0x02
+#define BBREGCTL_REGW 0x01
+
+/* Bits in the IFREGCTL register */
+#define IFREGCTL_DONE 0x04
+#define IFREGCTL_IFRF 0x02
+#define IFREGCTL_REGW 0x01
+
+/* Bits in the SOFTPWRCTL register */
+#define SOFTPWRCTL_RFLEOPT 0x0800
+#define SOFTPWRCTL_TXPEINV 0x0200
+#define SOFTPWRCTL_SWPECTI 0x0100
+#define SOFTPWRCTL_SWPAPE 0x0020
+#define SOFTPWRCTL_SWCALEN 0x0010
+#define SOFTPWRCTL_SWRADIO_PE 0x0008
+#define SOFTPWRCTL_SWPE2 0x0004
+#define SOFTPWRCTL_SWPE1 0x0002
+#define SOFTPWRCTL_SWPE3 0x0001
+
+/* Bits in the GPIOCTL1 register */
+#define GPIO1_DATA1 0x20
+#define GPIO1_MD1 0x10
+#define GPIO1_DATA0 0x02
+#define GPIO1_MD0 0x01
+
+/* Bits in the DMACTL register */
+#define DMACTL_CLRRUN 0x00080000
+#define DMACTL_RUN 0x00000008
+#define DMACTL_WAKE 0x00000004
+#define DMACTL_DEAD 0x00000002
+#define DMACTL_ACTIVE 0x00000001
+
+/* Bits in the RXDMACTL0 register */
+#define RX_PERPKT 0x00000100
+#define RX_PERPKTCLR 0x01000000
+
+/* Bits in the BCNDMACTL register */
+#define BEACON_READY 0x01
+
+/* Bits in the MISCFFCTL register */
+#define MISCFFCTL_WRITE 0x0001
+
+/* Bits in WAKEUPEN0 */
#define WAKEUPEN0_DIRPKT 0x10
#define WAKEUPEN0_LINKOFF 0x08
#define WAKEUPEN0_ATIMEN 0x04
#define WAKEUPEN0_TIMEN 0x02
#define WAKEUPEN0_MAGICEN 0x01
-//
-// Bits in WAKEUPEN1
-//
+/* Bits in WAKEUPEN1 */
#define WAKEUPEN1_128_3 0x08
#define WAKEUPEN1_128_2 0x04
#define WAKEUPEN1_128_1 0x02
#define WAKEUPEN1_128_0 0x01
-//
-// Bits in WAKEUPSR0
-//
+/* Bits in WAKEUPSR0 */
#define WAKEUPSR0_DIRPKT 0x10
#define WAKEUPSR0_LINKOFF 0x08
#define WAKEUPSR0_ATIMEN 0x04
#define WAKEUPSR0_TIMEN 0x02
#define WAKEUPSR0_MAGICEN 0x01
-//
-// Bits in WAKEUPSR1
-//
+/* Bits in WAKEUPSR1 */
#define WAKEUPSR1_128_3 0x08
#define WAKEUPSR1_128_2 0x04
#define WAKEUPSR1_128_1 0x02
#define WAKEUPSR1_128_0 0x01
-//
-// Bits in the MAC_REG_GPIOCTL register
-//
-#define GPIO0_MD 0x01 //
-#define GPIO0_DATA 0x02 //
-#define GPIO0_INTMD 0x04 //
-#define GPIO1_MD 0x10 //
-#define GPIO1_DATA 0x20 //
-
-//
-// Bits in the MSRCTL register
-//
+/* Bits in the MAC_REG_GPIOCTL register */
+#define GPIO0_MD 0x01
+#define GPIO0_DATA 0x02
+#define GPIO0_INTMD 0x04
+#define GPIO1_MD 0x10
+#define GPIO1_DATA 0x20
+
+/* Bits in the MSRCTL register */
#define MSRCTL_FINISH 0x80
#define MSRCTL_READY 0x40
#define MSRCTL_RADARDETECT 0x20
@@ -562,28 +502,27 @@
#define MSRCTL_QUIETRPT 0x04
#define MSRCTL_QUIETINT 0x02
#define MSRCTL_QUIETEN 0x01
-//
-// Bits in the MSRCTL1 register
-//
+
+/* Bits in the MSRCTL1 register */
#define MSRCTL1_TXPWR 0x08
#define MSRCTL1_CSAPAREN 0x04
#define MSRCTL1_TXPAUSE 0x01
-// Loopback mode
-#define MAC_LB_EXT 0x02 //
-#define MAC_LB_INTERNAL 0x01 //
-#define MAC_LB_NONE 0x00 //
+/* Loopback mode */
+#define MAC_LB_EXT 0x02
+#define MAC_LB_INTERNAL 0x01
+#define MAC_LB_NONE 0x00
#define Default_BI 0x200
-// MiscFIFO Offset
+/* MiscFIFO Offset */
#define MISCFIFO_KEYETRY0 32
#define MISCFIFO_KEYENTRYSIZE 22
#define MISCFIFO_SYNINFO_IDX 10
#define MISCFIFO_SYNDATA_IDX 11
#define MISCFIFO_SYNDATASIZE 21
-// enabled mask value of irq
+/* enabled mask value of irq */
#define IMR_MASK_VALUE (IMR_SOFTTIMER1 | \
IMR_RXDMA1 | \
IMR_RXNOBUF | \
@@ -599,15 +538,13 @@
IMR_AC0DMA | \
IMR_TXDMA0)
-// max time out delay time
-#define W_MAX_TIMEOUT 0xFFF0U //
+/* max time out delay time */
+#define W_MAX_TIMEOUT 0xFFF0U
-// wait time within loop
-#define CB_DELAY_LOOP_WAIT 10 // 10ms
+/* wait time within loop */
+#define CB_DELAY_LOOP_WAIT 10 /* 10ms */
-//
-// revision id
-//
+/* revision id */
#define REV_ID_VT3253_A0 0x00
#define REV_ID_VT3253_A1 0x01
#define REV_ID_VT3253_B0 0x08
@@ -691,12 +628,12 @@ do { \
VNSvInPortD(dwIoBase + MAC_REG_ATIMDMAPTR, \
(unsigned long *)pdwCurrDescAddr)
-// set the chip with current BCN tx descriptor address
+/* set the chip with current BCN tx descriptor address */
#define MACvSetCurrBCNTxDescAddr(dwIoBase, dwCurrDescAddr) \
VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, \
dwCurrDescAddr)
-// set the chip with current BCN length
+/* set the chip with current BCN length */
#define MACvSetCurrBCNLength(dwIoBase, wCurrBCNLength) \
VNSvOutPortW(dwIoBase + MAC_REG_BCNDMACTL+2, \
wCurrBCNLength)
@@ -888,7 +825,7 @@ do { \
VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1)
#define MACvReadMIBCounter(dwIoBase, pdwCounter) \
- VNSvInPortD(dwIoBase + MAC_REG_MIBCNTR , pdwCounter)
+ VNSvInPortD(dwIoBase + MAC_REG_MIBCNTR, pdwCounter)
#define MACvPwrEvntDisable(dwIoBase) \
VNSvOutPortW(dwIoBase + MAC_REG_WAKEUPEN0, 0x0000)
@@ -896,7 +833,7 @@ do { \
#define MACvEnableProtectMD(dwIoBase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
+ VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
@@ -904,7 +841,7 @@ do { \
#define MACvDisableProtectMD(dwIoBase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
+ VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
@@ -912,7 +849,7 @@ do { \
#define MACvEnableBarkerPreambleMd(dwIoBase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
+ VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
@@ -920,7 +857,7 @@ do { \
#define MACvDisableBarkerPreambleMd(dwIoBase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
+ VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
@@ -928,7 +865,7 @@ do { \
#define MACvSetBBType(dwIoBase, byTyp) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
+ VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \
dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
@@ -953,15 +890,18 @@ do { \
#define MACvSetRFLE_LatchBase(dwIoBase) \
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
-bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs, unsigned char byTestBits);
-bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs, unsigned char byTestBits);
+bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs,
+ unsigned char byTestBits);
+bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs,
+ unsigned char byTestBits);
bool MACbIsIntDisable(void __iomem *dwIoBase);
void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
-void MACvGetLongRetryLimit(void __iomem *dwIoBase, unsigned char *pbyRetryLimit);
+void MACvGetLongRetryLimit(void __iomem *dwIoBase,
+ unsigned char *pbyRetryLimit);
void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode);
@@ -975,22 +915,32 @@ bool MACbSafeTxOff(void __iomem *dwIoBase);
bool MACbSafeStop(void __iomem *dwIoBase);
bool MACbShutdown(void __iomem *dwIoBase);
void MACvInitialize(void __iomem *dwIoBase);
-void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
-void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
-void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
-void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
-void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
-void MACvSetCurrSyncDescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
-void MACvSetCurrATIMDescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
+void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase,
+ unsigned long dwCurrDescAddr);
+void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase,
+ unsigned long dwCurrDescAddr);
+void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase,
+ unsigned long dwCurrDescAddr);
+void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase,
+ unsigned long dwCurrDescAddr);
+void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase,
+ unsigned long dwCurrDescAddr);
+void MACvSetCurrSyncDescAddrEx(void __iomem *dwIoBase,
+ unsigned long dwCurrDescAddr);
+void MACvSetCurrATIMDescAddrEx(void __iomem *dwIoBase,
+ unsigned long dwCurrDescAddr);
void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay);
void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime);
-void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset, unsigned long dwData);
+void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset,
+ unsigned long dwData);
bool MACbPSWakeup(void __iomem *dwIoBase);
-void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
- unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID);
+void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl,
+ unsigned int uEntryIdx, unsigned int uKeyIdx,
+ unsigned char *pbyAddr, u32 *pdwKey,
+ unsigned char byLocalID);
void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx);
-#endif // __MAC_H__
+#endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index e826f07e91c0..be3c4e949b6a 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -71,33 +71,33 @@ PSvEnablePowerSaving(
struct vnt_private *pDevice = hDeviceContext;
u16 wAID = pDevice->current_aid | BIT(14) | BIT(15);
- // set period of power up before TBTT
+ /* set period of power up before TBTT */
VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT);
if (pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
- // set AID
+ /* set AID */
VNSvOutPortW(pDevice->PortOffset + MAC_REG_AIDATIM, wAID);
} else {
- // set ATIM Window
+ /* set ATIM Window */
#if 0 /* TODO atim window */
MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
#endif
}
- // Set AutoSleep
+ /* Set AutoSleep */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
- // Set HWUTSF
+ /* Set HWUTSF */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
if (wListenInterval >= 2) {
- // clear always listen beacon
+ /* clear always listen beacon */
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
- // first time set listen next beacon
+ /* first time set listen next beacon */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
} else {
- // always listen beacon
+ /* always listen beacon */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
}
- // enable power saving hw function
+ /* enable power saving hw function */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
pDevice->bEnablePSMode = true;
@@ -122,13 +122,13 @@ PSvDisablePowerSaving(
{
struct vnt_private *pDevice = hDeviceContext;
- // disable power saving hw function
+ /* disable power saving hw function */
MACbPSWakeup(pDevice->PortOffset);
- //clear AutoSleep
+ /* clear AutoSleep */
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
- //clear HWUTSF
+ /* clear HWUTSF */
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
- // set always listen beacon
+ /* set always listen beacon */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
pDevice->bEnablePSMode = false;
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 32ef99341e20..941b2adca95a 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -651,7 +651,8 @@ bool RFbInit(
* Return Value: true if succeeded; false if failed.
*
*/
-bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, unsigned char byChannel)
+bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
+ u16 byChannel)
{
bool bResult = true;
@@ -687,7 +688,8 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, unsigned
* Return Value: None.
*
*/
-bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, unsigned int uChannel)
+bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
+ u16 uChannel)
{
void __iomem *dwIoBase = priv->PortOffset;
int ii;
@@ -767,13 +769,12 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, unsig
bool RFbSetPower(
struct vnt_private *priv,
unsigned int uRATE,
- unsigned int uCH
+ u16 uCH
)
{
bool bResult = true;
unsigned char byPwr = 0;
unsigned char byDec = 0;
- unsigned char byPwrdBm = 0;
if (priv->dwDiagRefCount != 0)
return true;
@@ -786,8 +787,10 @@ bool RFbSetPower(
case RATE_2M:
case RATE_5M:
case RATE_11M:
+ if (uCH > CB_MAX_CHANNEL_24G)
+ return false;
+
byPwr = priv->abyCCKPwrTbl[uCH];
- byPwrdBm = priv->abyCCKDefaultPwr[uCH];
break;
case RATE_6M:
case RATE_9M:
@@ -801,15 +804,6 @@ bool RFbSetPower(
if (byDec >= priv->byMaxPwrLevel)
byDec = priv->byMaxPwrLevel-1;
- if (priv->byRFType == RF_UW2452) {
- byPwrdBm = byDec - byPwr;
- byPwrdBm /= 3;
- } else {
- byPwrdBm = byDec - byPwr;
- byPwrdBm >>= 1;
- }
-
- byPwrdBm += priv->abyOFDMDefaultPwr[uCH];
byPwr = byDec;
break;
case RATE_24M:
@@ -817,7 +811,6 @@ bool RFbSetPower(
case RATE_48M:
case RATE_54M:
byPwr = priv->abyOFDMPwrTbl[uCH];
- byPwrdBm = priv->abyOFDMDefaultPwr[uCH];
break;
}
@@ -937,8 +930,8 @@ RFvRSSITodBm(
/* Post processing for the 11b/g and 11a.
* for save time on changing Reg2,3,5,7,10,12,15 */
bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv,
- unsigned char byOldChannel,
- unsigned char byNewChannel)
+ u16 byOldChannel,
+ u16 byNewChannel)
{
bool bResult;
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 8a6e2cfedaa5..2ea21e2b00f2 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -74,12 +74,12 @@
/*--------------------- Export Functions --------------------------*/
bool IFRFbWriteEmbedded(struct vnt_private *, unsigned long dwData);
-bool RFbSelectChannel(struct vnt_private *, unsigned char byRFType, unsigned char byChannel);
+bool RFbSelectChannel(struct vnt_private *, unsigned char byRFType, u16);
bool RFbInit(
struct vnt_private *
);
-bool RFvWriteWakeProgSyn(struct vnt_private *, unsigned char byRFType, unsigned int uChannel);
-bool RFbSetPower(struct vnt_private *, unsigned int uRATE, unsigned int uCH);
+bool RFvWriteWakeProgSyn(struct vnt_private *, unsigned char byRFType, u16);
+bool RFbSetPower(struct vnt_private *, unsigned int uRATE, u16);
bool RFbRawSetPower(
struct vnt_private *,
unsigned char byPwr,
@@ -94,7 +94,7 @@ RFvRSSITodBm(
);
//{{ RobertYu: 20050104
-bool RFbAL7230SelectChannelPostProcess(struct vnt_private *, unsigned char byOldChannel, unsigned char byNewChannel);
+bool RFbAL7230SelectChannelPostProcess(struct vnt_private *, u16, u16);
//}} RobertYu
#endif // __RF_H__
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 61c39dd7ad01..07ce3fd88e70 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -205,7 +205,7 @@ s_uGetRTSCTSRsvTime(
unsigned short wCurrentRate
)
{
- unsigned int uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
+ unsigned int uRrvTime, uRTSTime, uCTSTime, uAckTime, uDataTime;
uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
@@ -1204,13 +1204,9 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
ptdCurr = (PSTxDesc)pHeadTD;
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
+ ptdCurr->pTDInfo->dwReqCount = cbReqCount;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- /* Set TSR1 & ReqCount in TxDescHead */
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
return cbHeaderLength;
}
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index c53703a772f5..cc63dc8d47f7 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -33,9 +33,9 @@
/*--------------------- Export Definitions -------------------------*/
-//
-// For memory mapped IO
-//
+
+/* For memory mapped IO */
+
#define VNSvInPortB(dwIOAddress, pbyData) \
do { \
@@ -86,4 +86,4 @@ do { \
/*--------------------- Export Functions --------------------------*/
-#endif // __UPC_H__
+#endif /* __UPC_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 9340f1508cff..67ff13f4f731 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -78,7 +78,7 @@ void vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
vnt_mac_reg_bits_off(priv, MAC_REG_CHANNEL, 0xb0);
- vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNLE,
+ vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNEL,
connection_channel, 0, 0, NULL);
vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG, MAC_REG_CHANNEL,
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 5a7ca527106e..f71d59fa3b21 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -160,7 +160,7 @@
#define MESSAGE_TYPE_CLRKEYENTRY 0x9
#define MESSAGE_TYPE_WRITE_MISCFF 0xa
#define MESSAGE_TYPE_SET_ANTMD 0xb
-#define MESSAGE_TYPE_SELECT_CHANNLE 0xc
+#define MESSAGE_TYPE_SELECT_CHANNEL 0xc
#define MESSAGE_TYPE_SET_TSFTBTT 0xd
#define MESSAGE_TYPE_SET_SSTIFS 0xe
#define MESSAGE_TYPE_CHANGE_BBTYPE 0xf
@@ -307,8 +307,8 @@ struct vnt_private {
struct vnt_cmd_card_init init_command;
struct vnt_rsp_card_init init_response;
- u8 current_net_addr[ETH_ALEN];
- u8 permanent_net_addr[ETH_ALEN];
+ u8 current_net_addr[ETH_ALEN] __aligned(2);
+ u8 permanent_net_addr[ETH_ALEN] __aligned(2);
u8 exist_sw_net_addr;
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index fab195f8c3f5..95e0e83a487e 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -32,6 +32,6 @@
#include "device.h"
int vnt_rx_data(struct vnt_private *, struct vnt_rcb *,
- unsigned long bytes_recieved);
+ unsigned long bytes_received);
#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index b95d5b1efcc7..71adc1f61838 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -34,6 +34,7 @@
*/
#undef __NO_VERSION__
+#include <linux/etherdevice.h>
#include <linux/file.h>
#include "device.h"
#include "card.h"
@@ -319,7 +320,7 @@ static int vnt_init_registers(struct vnt_private *priv)
/* get permanent network address */
memcpy(priv->permanent_net_addr, init_rsp->net_addr, 6);
- memcpy(priv->current_net_addr, priv->permanent_net_addr, ETH_ALEN);
+ ether_addr_copy(priv->current_net_addr, priv->permanent_net_addr);
/* if exist SW network address, use it */
dev_dbg(&priv->usb->dev, "Network address = %pM\n",
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index ea5140ab2b41..33baf26de4b5 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -36,6 +36,7 @@
*
*/
+#include <linux/etherdevice.h>
#include "device.h"
#include "rxtx.h"
#include "card.h"
@@ -55,7 +56,7 @@ static const u16 vnt_fb_opt0[2][5] = {
static const u16 vnt_fb_opt1[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, /* fallback_rate0 */
- {RATE_6M , RATE_6M, RATE_12M, RATE_12M, RATE_18M}, /* fallback_rate1 */
+ {RATE_6M, RATE_6M, RATE_12M, RATE_12M, RATE_18M}, /* fallback_rate1 */
};
#define RTSDUR_BB 0
@@ -392,8 +393,8 @@ static int vnt_fill_ieee80211_rts(struct vnt_usb_send_context *tx_context,
rts->frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
- memcpy(rts->ra, hdr->addr1, ETH_ALEN);
- memcpy(rts->ta, hdr->addr2, ETH_ALEN);
+ ether_addr_copy(rts->ra, hdr->addr1);
+ ether_addr_copy(rts->ta, hdr->addr2);
return 0;
}
@@ -517,65 +518,66 @@ static u16 vnt_rxtx_rts_a_fb_head(struct vnt_usb_send_context *tx_context,
return vnt_rxtx_datahead_a_fb(tx_context, &buf->data_head);
}
-static u16 vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
- union vnt_tx_data_head *head)
+static u16 vnt_fill_cts_fb_head(struct vnt_usb_send_context *tx_context,
+ union vnt_tx_data_head *head)
{
struct vnt_private *priv = tx_context->priv;
+ struct vnt_cts_fb *buf = &head->cts_g_fb;
u32 cts_frame_len = 14;
u16 current_rate = tx_context->tx_rate;
- if (!head)
- return 0;
+ /* Get SignalField,ServiceField,Length */
+ vnt_get_phy_field(priv, cts_frame_len, priv->top_cck_basic_rate,
+ PK_TYPE_11B, &buf->b);
- if (tx_context->fb_option) {
- /* Auto Fall back */
- struct vnt_cts_fb *buf = &head->cts_g_fb;
- /* Get SignalField,ServiceField,Length */
- vnt_get_phy_field(priv, cts_frame_len,
- priv->top_cck_basic_rate, PK_TYPE_11B, &buf->b);
- buf->duration_ba =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA,
- tx_context->pkt_type,
- current_rate);
- /* Get CTSDuration_ba_f0 */
- buf->cts_duration_ba_f0 =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA_F0,
- tx_context->pkt_type,
- priv->tx_rate_fb0);
- /* Get CTSDuration_ba_f1 */
- buf->cts_duration_ba_f1 =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA_F1,
- tx_context->pkt_type,
- priv->tx_rate_fb1);
- /* Get CTS Frame body */
- buf->data.duration = buf->duration_ba;
- buf->data.frame_control =
- cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
+ buf->duration_ba =
+ vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA,
+ tx_context->pkt_type,
+ current_rate);
+ /* Get CTSDuration_ba_f0 */
+ buf->cts_duration_ba_f0 =
+ vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA_F0,
+ tx_context->pkt_type,
+ priv->tx_rate_fb0);
+ /* Get CTSDuration_ba_f1 */
+ buf->cts_duration_ba_f1 =
+ vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA_F1,
+ tx_context->pkt_type,
+ priv->tx_rate_fb1);
+ /* Get CTS Frame body */
+ buf->data.duration = buf->duration_ba;
+ buf->data.frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
- memcpy(buf->data.ra, priv->current_net_addr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra, priv->current_net_addr);
- return vnt_rxtx_datahead_g_fb(tx_context, &buf->data_head);
- } else {
- struct vnt_cts *buf = &head->cts_g;
- /* Get SignalField,ServiceField,Length */
- vnt_get_phy_field(priv, cts_frame_len,
- priv->top_cck_basic_rate, PK_TYPE_11B, &buf->b);
- /* Get CTSDuration_ba */
- buf->duration_ba =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA,
- tx_context->pkt_type,
- current_rate);
- /*Get CTS Frame body*/
- buf->data.duration = buf->duration_ba;
- buf->data.frame_control =
- cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
+ return vnt_rxtx_datahead_g_fb(tx_context, &buf->data_head);
+}
+
+static u16 vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
+ union vnt_tx_data_head *head)
+{
+ struct vnt_private *priv = tx_context->priv;
+ struct vnt_cts *buf = &head->cts_g;
+ u32 cts_frame_len = 14;
+ u16 current_rate = tx_context->tx_rate;
- memcpy(buf->data.ra, priv->current_net_addr, ETH_ALEN);
+ /* Get SignalField,ServiceField,Length */
+ vnt_get_phy_field(priv, cts_frame_len, priv->top_cck_basic_rate,
+ PK_TYPE_11B, &buf->b);
+ /* Get CTSDuration_ba */
+ buf->duration_ba =
+ vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA,
+ tx_context->pkt_type,
+ current_rate);
+ /*Get CTS Frame body*/
+ buf->data.duration = buf->duration_ba;
+ buf->data.frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
- return vnt_rxtx_datahead_g(tx_context, &buf->data_head);
- }
+ ether_addr_copy(buf->data.ra, priv->current_net_addr);
- return 0;
+ return vnt_rxtx_datahead_g(tx_context, &buf->data_head);
}
static u16 vnt_rxtx_rts(struct vnt_usb_send_context *tx_context,
@@ -632,6 +634,9 @@ static u16 vnt_rxtx_cts(struct vnt_usb_send_context *tx_context,
head = &tx_head->tx_cts.tx.mic.head;
/* Fill CTS */
+ if (tx_context->fb_option)
+ return vnt_fill_cts_fb_head(tx_context, head);
+
return vnt_fill_cts_head(tx_context, head);
}
@@ -739,7 +744,7 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
mic_hdr->id = 0x59;
mic_hdr->payload_len = cpu_to_be16(payload_len);
- memcpy(mic_hdr->mic_addr2, hdr->addr2, ETH_ALEN);
+ ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
ieee80211_get_key_tx_seq(tx_key, &seq);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 8942dcb44180..7c87aecf4744 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -325,9 +325,9 @@ static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
if (result == 0) {
sinfo->txrate.legacy = quality.txrate.data;
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
sinfo->signal = quality.level.data;
- sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
}
return result;
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 20d146b61ba7..8f2091070491 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -879,7 +879,7 @@ typedef struct hfa384x_usb_error {
/* Unions for packaging all the known packet types together */
typedef union hfa384x_usbout {
- u16 type;
+ __le16 type;
hfa384x_usb_txfrm_t txfrm;
hfa384x_usb_cmdreq_t cmdreq;
hfa384x_usb_wridreq_t wridreq;
@@ -889,7 +889,7 @@ typedef union hfa384x_usbout {
} __packed hfa384x_usbout_t;
typedef union hfa384x_usbin {
- u16 type;
+ __le16 type;
hfa384x_usb_rxfrm_t rxfrm;
hfa384x_usb_txfrm_t txfrm;
hfa384x_usb_infofrm_t infofrm;
@@ -1268,7 +1268,7 @@ typedef struct hfa384x {
hfa384x_downloadbuffer_t bufinfo;
u16 dltimeout;
- int scanflag; /* to signal scan comlete */
+ int scanflag; /* to signal scan complete */
int join_ap; /* are we joined to a specific ap */
int join_retries; /* number of join retries till we fail */
hfa384x_JoinRequest_data_t joinreq; /* join request saved data */
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 55d2f563e308..28cd1c4c02c8 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -382,7 +382,7 @@ done:
*
* Arguments:
* hw device struct
-* tx_urb URB of data for tranmission
+* tx_urb URB of data for transmission
* memflags memory allocation flags
*
* Returns:
@@ -2391,7 +2391,7 @@ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
-* -ETIMEDOUT timout waiting for the cmd regs to become
+* -ETIMEDOUT timeout waiting for the cmd regs to become
* available, or waiting for the control reg
* to indicate the Aux port is enabled.
* -ENODATA the buffer does NOT contain a valid PDA.
@@ -3346,7 +3346,7 @@ retry:
if (unlocked_usbctlx_cancel_async(hw, ctlx) == 0)
run_queue = 1;
} else {
- const u16 intype = (usbin->type & ~cpu_to_le16(0x8000));
+ const __le16 intype = (usbin->type & ~cpu_to_le16(0x8000));
/*
* Check that our message is what we're expecting ...
@@ -4123,12 +4123,11 @@ static int hfa384x_isgood_pdrcode(u16 pdrcode)
pr_debug("Encountered unknown PDR#=0x%04x, assuming it's ok.\n",
pdrcode);
return 1;
- } else {
- /* bad code */
- pr_debug("Encountered unknown PDR#=0x%04x, (>=0x1000), assuming it's bad.\n",
- pdrcode);
- return 0;
}
+ break;
}
- return 0; /* avoid compiler warnings */
+ /* bad code */
+ pr_debug("Encountered unknown PDR#=0x%04x, (>=0x1000), assuming it's bad.\n",
+ pdrcode);
+ return 0;
}
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 7eaaf9a63503..bd69e8cf200f 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -511,7 +511,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
* protocol.
*
* Arguments:
-* proto protocl number (in host order) to search for.
+* proto protocol number (in host order) to search for.
*
* Returns:
* 1 - if the table is empty or a match is found.
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index 7221379c9742..4b84b568f6ca 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -80,7 +80,7 @@ static void p80211req_mibset_mibget(wlandevice_t *wlandev,
/*----------------------------------------------------------------
* p80211req_dorequest
*
-* Handles an MLME reqest/confirm message.
+* Handles an MLME request/confirm message.
*
* Arguments:
* wlandev WLAN device struct
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index b62fdcba94e6..16f123959104 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -45,7 +45,7 @@
* --------------------------------------------------------------------
*
* This file contains the constants and data structures for interaction
-* with the hfa384x Wireless LAN (WLAN) Media Access Contoller (MAC).
+* with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC).
* The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset.
*
* [Implementation and usage notes]
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index df577dfe7ffb..10ad24a89ddd 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -199,7 +199,7 @@ static int prism2sta_close(wlandevice_t *wlandev)
/*----------------------------------------------------------------
* prism2sta_reset
*
-* Not currently implented.
+* Currently not implemented.
*
* Arguments:
* wlandev wlan device structure
@@ -662,7 +662,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
"ident: ap f/w: id=0x%02x %d.%d.%d\n",
hw->ident_sta_fw.id, hw->ident_sta_fw.major,
hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
- netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmeare loaded!\n");
+ netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmware loaded!\n");
goto failed;
}
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 709d49e7f3c9..935c714f592a 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -930,7 +930,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
+ var->hsync_len;
unsigned int vtotal = var->upper_margin + var->yres + var->lower_margin
+ var->vsync_len;
-#if defined(__powerpc__)
+#if defined(__BIG_ENDIAN)
u8 cr_data;
#endif
unsigned int drate = 0, hrate = 0;
@@ -952,7 +952,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
} pr_debug("var->pixclock=%d, htotal=%d, vtotal=%d\n",
var->pixclock, htotal, vtotal);
- if (var->pixclock && htotal && vtotal) {
+ if (var->pixclock) {
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
xgifb_info->refresh_rate = (unsigned int) (hrate * 2
@@ -1044,7 +1044,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->DstColor = 0x0000;
xgifb_info->XGI310_AccelDepth = 0x00000000;
xgifb_info->video_cmap_len = 256;
-#if defined(__powerpc__)
+#if defined(__BIG_ENDIAN)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, (cr_data & 0xE0));
#endif
@@ -1052,7 +1052,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
case 16:
xgifb_info->DstColor = 0x8000;
xgifb_info->XGI310_AccelDepth = 0x00010000;
-#if defined(__powerpc__)
+#if defined(__BIG_ENDIAN)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x0B));
#endif
@@ -1062,7 +1062,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->DstColor = 0xC000;
xgifb_info->XGI310_AccelDepth = 0x00020000;
xgifb_info->video_cmap_len = 16;
-#if defined(__powerpc__)
+#if defined(__BIG_ENDIAN)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x15));
#endif
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 55f6774f706f..aebde3289c50 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
}
if (!strncmp("=All", text_ptr, 4)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+ cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+ cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
return -ENOMEM;
}
/*
- * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+ * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
* explicit case..
*/
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
text_ptr = strchr(text_in, '=');
if (!text_ptr) {
pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
spin_lock(&tiqn_lock);
list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
- if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+ if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
strcmp(tiqn->tiqn, text_ptr)) {
continue;
}
@@ -3512,7 +3512,7 @@ eob:
if (end_of_buf)
break;
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
break;
}
spin_unlock(&tiqn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 09a522bae222..cbcff38ac9b7 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -135,8 +135,8 @@ enum cmd_flags_table {
ICF_CONTIG_MEMORY = 0x00000020,
ICF_ATTACHED_TO_RQUEUE = 0x00000040,
ICF_OOO_CMDSN = 0x00000080,
- IFC_SENDTARGETS_ALL = 0x00000100,
- IFC_SENDTARGETS_SINGLE = 0x00000200,
+ ICF_SENDTARGETS_ALL = 0x00000100,
+ ICF_SENDTARGETS_SINGLE = 0x00000200,
};
/* struct iscsi_cmd->i_state */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7653cfb027a2..58f49ff69b14 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
EXPORT_SYMBOL(se_dev_set_queue_depth);
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
- int block_size = dev->dev_attrib.block_size;
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " fabric_max_sectors while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (!fabric_max_sectors) {
- pr_err("dev[%p]: Illegal ZERO value for"
- " fabric_max_sectors\n", dev);
- return -EINVAL;
- }
- if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
- " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MIN);
- return -EINVAL;
- }
- if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MAX);
- return -EINVAL;
- }
- /*
- * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
- */
- if (!block_size) {
- block_size = 512;
- pr_warn("Defaulting to 512 for zero block_size\n");
- }
- fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
- block_size);
-
- dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
- pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
- dev, fabric_max_sectors);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
-
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, dev->export_count);
return -EINVAL;
}
- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+ if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
- " greater than fabric_max_sectors: %u\n", dev,
- optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+ " greater than hw_max_sectors: %u\n", dev,
+ optimal_sectors, dev->dev_attrib.hw_max_sectors);
return -EINVAL;
}
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
- dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
xcopy_lun = &dev->xcopy_lun;
xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
+ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
dev->creation_time = get_jiffies_64();
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c2aea099ea4a..d836de200a03 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct fd_prot fd_prot;
sense_reason_t rc;
int ret = 0;
-
+ /*
+ * We are currently limited by the number of iovecs (2048) per
+ * single vfs_[writev,readv] call.
+ */
+ if (cmd->data_length > FD_MAX_BYTES) {
+ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+ "FD_MAX_BYTES: %u iovec count limitiation\n",
+ cmd->data_length, FD_MAX_BYTES);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
&fileio_dev_attrib_hw_block_size.attr,
&fileio_dev_attrib_block_size.attr,
&fileio_dev_attrib_hw_max_sectors.attr,
- &fileio_dev_attrib_fabric_max_sectors.attr,
&fileio_dev_attrib_optimal_sectors.attr,
&fileio_dev_attrib_hw_queue_depth.attr,
&fileio_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 3efff94fbd97..78346b850968 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
q = bdev_get_queue(bd);
dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
- dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
&iblock_dev_attrib_hw_block_size.attr,
&iblock_dev_attrib_block_size.attr,
&iblock_dev_attrib_hw_max_sectors.attr,
- &iblock_dev_attrib_fabric_max_sectors.attr,
&iblock_dev_attrib_optimal_sectors.attr,
&iblock_dev_attrib_hw_queue_depth.attr,
&iblock_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d56f2aaba9af..283cf786ef98 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
return 0;
}
+ } else if (we && registered_nexus) {
+ /*
+ * Reads are allowed for Write Exclusive locks
+ * from all registrants.
+ */
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ pr_debug("Allowing READ CDB: 0x%02x for %s"
+ " reservation\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+
+ return 0;
+ }
}
pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
" for %s reservation\n", transport_dump_cmd_direction(cmd),
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 60ebd170a561..98e83ac5661b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
&rd_mcp_dev_attrib_hw_block_size.attr,
&rd_mcp_dev_attrib_block_size.attr,
&rd_mcp_dev_attrib_hw_max_sectors.attr,
- &rd_mcp_dev_attrib_fabric_max_sectors.attr,
&rd_mcp_dev_attrib_optimal_sectors.attr,
&rd_mcp_dev_attrib_hw_queue_depth.attr,
&rd_mcp_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 11bea1952435..cd4bed7b2757 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
-
- if (sectors > dev->dev_attrib.fabric_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds fabric_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.fabric_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
- if (sectors > dev->dev_attrib.hw_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds backend hw_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.hw_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
check_lba:
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 1307600fe726..4c71657da56a 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -505,7 +505,6 @@ static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
- u32 max_sectors;
int have_tp = 0;
int opt, min;
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- max_sectors = min(dev->dev_attrib.fabric_max_sectors,
- dev->dev_attrib.hw_max_sectors);
- put_unaligned_be32(max_sectors, &buf[8]);
+ put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 8bfa61c9693d..1a1bcf71ec9d 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -784,9 +784,7 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
if (ret < 0)
goto free_skb;
- ret = genlmsg_end(skb, msg_header);
- if (ret < 0)
- goto free_skb;
+ genlmsg_end(skb, msg_header);
ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
@@ -1118,7 +1116,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
&tcmu_dev_attrib_hw_block_size.attr,
&tcmu_dev_attrib_block_size.attr,
&tcmu_dev_attrib_hw_max_sectors.attr,
- &tcmu_dev_attrib_fabric_max_sectors.attr,
&tcmu_dev_attrib_optimal_sectors.attr,
&tcmu_dev_attrib_hw_queue_depth.attr,
&tcmu_dev_attrib_queue_depth.attr,
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c1188ac053c9..2ccbc0788353 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -608,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
data->mode = THERMAL_DEVICE_DISABLED;
+ clk_disable_unprepare(data->thermal_clk);
return 0;
}
@@ -617,6 +618,7 @@ static int imx_thermal_resume(struct device *dev)
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
+ clk_prepare_enable(data->thermal_clk);
/* Enabled thermal sensor after resume */
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index 231cabc16e16..2c2ec7666eb1 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
continue;
result = acpi_bus_get_device(trt->source, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
result = acpi_bus_get_device(trt->target, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get target ACPI device\n");
}
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
if (art->source) {
result = acpi_bus_get_device(art->source, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
}
if (art->target) {
result = acpi_bus_get_device(art->target, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
}
}
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 31bb553aac26..0fe5dbbea968 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -130,6 +130,8 @@ static int proc_thermal_add(struct device *dev,
int ret;
adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
if (ACPI_FAILURE(status))
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index e145b66df444..d717f3dab6f1 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
*
* Return: pointer to trip points table, NULL otherwise
*/
-const struct thermal_trip * const
+const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
struct __thermal_zone *data = tz->devdata;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 8803e693fe68..2580a4872f90 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -63,7 +63,7 @@ struct rcar_thermal_priv {
struct mutex lock;
struct list_head list;
int id;
- int ctemp;
+ u32 ctemp;
};
#define rcar_thermal_for_each_priv(pos, common) \
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
{
struct device *dev = rcar_priv_to_dev(priv);
int i;
- int ctemp, old, new;
+ u32 ctemp, old, new;
int ret = -EINVAL;
mutex_lock(&priv->lock);
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
int i;
int ret = -ENODEV;
int idle = IDLE_INTERVAL;
+ u32 enr_bits = 0;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common)
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
/*
* platform has IRQ support.
- * Then, drier use common register
+ * Then, driver uses common registers
*/
ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (IS_ERR(common->base))
return PTR_ERR(common->base);
- /* enable temperature comparation */
- rcar_thermal_common_write(common, ENR, 0x00030303);
-
idle = 0; /* polling delay is not needed */
}
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
rcar_thermal_irq_enable(priv);
list_move_tail(&priv->list, &common->head);
+
+ /* update ENR bits */
+ enr_bits |= 3 << (i * 8);
}
+ /* enable temperature comparation */
+ if (irq)
+ rcar_thermal_common_write(common, ENR, enr_bits);
+
platform_set_drvdata(pdev, common);
dev_info(dev, "%d sensor probed\n", i);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 87e0b0782023..48491d1a81d6 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1759,11 +1759,7 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz,
thermal_event->event = event;
/* send multicast genetlink message */
- result = genlmsg_end(skb, msg_header);
- if (result < 0) {
- nlmsg_free(skb);
- return result;
- }
+ genlmsg_end(skb, msg_header);
result = genlmsg_multicast(&thermal_event_genl_family, skb, 0,
0, GFP_ATOMIC);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 9083e7520623..0531c752fbbb 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void);
void of_thermal_destroy_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
-const struct thermal_trip * const
+const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
{
return 0;
}
-static inline const struct thermal_trip * const
+static inline const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
return NULL;
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index d9f85f95eb2a..b2d760055952 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -931,7 +931,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_send_char"))
+ if (serial_paranoia_check(info, tty->name, "rs_send_xchar"))
return;
info->x_char = ch;
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 3c60923b0957..342b36b9ad35 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -112,7 +112,6 @@ static void disable_tx_interrupt(struct ehv_bc_data *bc)
static int find_console_handle(void)
{
struct device_node *np = of_stdout;
- const char *sprop = NULL;
const uint32_t *iprop;
/* We don't care what the aliased node is actually called. We only
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index ea74460f3638..f78a87b07872 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1,10 +1,10 @@
/*
- * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
+ * z/VM IUCV hypervisor console (HVC) device driver
*
* This HVC device driver provides terminal access using
* z/VM IUCV communication paths.
*
- * Copyright IBM Corp. 2008, 2009
+ * Copyright IBM Corp. 2008, 2013
*
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
@@ -102,6 +102,7 @@ static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
#define IUCV_HVC_CON_IDX (0)
/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
#define MAX_VMID_FILTER (500)
+#define FILTER_WILDCARD_CHAR '*'
static size_t hvc_iucv_filter_size;
static void *hvc_iucv_filter;
static const char *hvc_iucv_filter_string;
@@ -734,20 +735,31 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
* hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
* @ipvmid: Originating z/VM user ID (right padded with blanks)
*
- * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
- * non-zero.
+ * Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
+ * connect, otherwise non-zero.
*/
static int hvc_iucv_filter_connreq(u8 ipvmid[8])
{
- size_t i;
+ const char *wildcard, *filter_entry;
+ size_t i, len;
/* Note: default policy is ACCEPT if no filter is set */
if (!hvc_iucv_filter_size)
return 0;
- for (i = 0; i < hvc_iucv_filter_size; i++)
- if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
+ for (i = 0; i < hvc_iucv_filter_size; i++) {
+ filter_entry = hvc_iucv_filter + (8 * i);
+
+ /* If a filter entry contains the filter wildcard character,
+ * reduce the length to match the leading portion of the user
+ * ID only (wildcard match). Characters following the wildcard
+ * are ignored.
+ */
+ wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
+ len = (wildcard) ? wildcard - filter_entry : 8;
+ if (0 == memcmp(ipvmid, filter_entry, len))
return 0;
+ }
return 1;
}
@@ -1166,6 +1178,7 @@ static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
/**
* hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
* @filter: String containing a comma-separated list of z/VM user IDs
+ * @dest: Location where to store the parsed z/VM user ID
*/
static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
{
@@ -1188,6 +1201,10 @@ static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
if (filter[len - 1] == '\n')
len--;
+ /* prohibit filter entries containing the wildcard character only */
+ if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
+ return ERR_PTR(-EINVAL);
+
if (len > 8)
return ERR_PTR(-EINVAL);
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 59ed783c4bcd..2054427992e0 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -1055,7 +1055,7 @@ static int isicom_send_break(struct tty_struct *tty, int length)
outw(0x8000 | ((port->channel) << (card->shift_count)) | 0x3, base);
outw((length & 0xff) << 8 | 0x00, base);
- outw((length & 0xff00), base);
+ outw((length & 0xff00u), base);
InterruptTheCard(base);
unlock_card(card);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index d2b496750d59..cf6e0f2e1331 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -90,6 +90,7 @@
struct n_tty_data {
/* producer-published */
size_t read_head;
+ size_t commit_head;
size_t canon_head;
size_t echo_head;
size_t echo_commit;
@@ -161,36 +162,11 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
-static int receive_room(struct tty_struct *tty)
-{
- struct n_tty_data *ldata = tty->disc_data;
- int left;
-
- if (I_PARMRK(tty)) {
- /* Multiply read_cnt by 3, since each byte might take up to
- * three times as many spaces when PARMRK is set (depending on
- * its flags, e.g. parity error). */
- left = N_TTY_BUF_SIZE - read_cnt(ldata) * 3 - 1;
- } else
- left = N_TTY_BUF_SIZE - read_cnt(ldata) - 1;
-
- /*
- * If we are doing input canonicalization, and there are no
- * pending newlines, let characters through without limit, so
- * that erase characters will be handled. Other excess
- * characters will be beeped.
- */
- if (left <= 0)
- left = ldata->icanon && ldata->canon_head == ldata->read_tail;
-
- return left;
-}
-
/**
- * n_tty_set_room - receive space
+ * n_tty_kick_worker - start input worker (if required)
* @tty: terminal
*
- * Re-schedules the flip buffer work if space just became available.
+ * Re-schedules the flip buffer work if it may have stopped
*
* Caller holds exclusive termios_rwsem
* or
@@ -198,12 +174,12 @@ static int receive_room(struct tty_struct *tty)
* holds non-exclusive termios_rwsem
*/
-static void n_tty_set_room(struct tty_struct *tty)
+static void n_tty_kick_worker(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
- /* Did this open up the receive buffer? We may need to flip */
- if (unlikely(ldata->no_room) && receive_room(tty)) {
+ /* Did the input worker stop? Restart it */
+ if (unlikely(ldata->no_room)) {
ldata->no_room = 0;
WARN_RATELIMIT(tty->port->itty == NULL,
@@ -224,7 +200,7 @@ static ssize_t chars_in_buffer(struct tty_struct *tty)
ssize_t n = 0;
if (!ldata->icanon)
- n = read_cnt(ldata);
+ n = ldata->commit_head - ldata->read_tail;
else
n = ldata->canon_head - ldata->read_tail;
return n;
@@ -247,17 +223,20 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
static void n_tty_check_throttle(struct tty_struct *tty)
{
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY)
- return;
+ struct n_tty_data *ldata = tty->disc_data;
+
/*
* Check the remaining room for the input canonicalization
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
+ if (ldata->icanon && ldata->canon_head == ldata->read_tail)
+ return;
+
while (1) {
int throttled;
tty_set_flow_change(tty, TTY_THROTTLE_SAFE);
- if (receive_room(tty) >= TTY_THRESHOLD_THROTTLE)
+ if (N_TTY_BUF_SIZE - read_cnt(ldata) >= TTY_THRESHOLD_THROTTLE)
break;
throttled = tty_throttle_safe(tty);
if (!throttled)
@@ -274,7 +253,7 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
return;
if (!tty->count)
return;
- n_tty_set_room(tty);
+ n_tty_kick_worker(tty);
n_tty_write_wakeup(tty->link);
if (waitqueue_active(&tty->link->write_wait))
wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
@@ -296,7 +275,7 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
break;
if (!tty->count)
break;
- n_tty_set_room(tty);
+ n_tty_kick_worker(tty);
unthrottled = tty_unthrottle_safe(tty);
if (!unthrottled)
break;
@@ -313,10 +292,6 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
*
* n_tty_receive_buf()/producer path:
* caller holds non-exclusive termios_rwsem
- * modifies read_head
- *
- * read_head is only considered 'published' if canonical mode is
- * not active.
*/
static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
@@ -340,6 +315,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
+ ldata->commit_head = 0;
ldata->echo_mark = 0;
ldata->line_start = 0;
@@ -379,7 +355,7 @@ static void n_tty_flush_buffer(struct tty_struct *tty)
{
down_write(&tty->termios_rwsem);
reset_buffer_flags(tty->disc_data);
- n_tty_set_room(tty);
+ n_tty_kick_worker(tty);
if (tty->link)
n_tty_packet_mode_flush(tty);
@@ -987,10 +963,6 @@ static inline void finish_erasing(struct n_tty_data *ldata)
*
* n_tty_receive_buf()/producer path:
* caller holds non-exclusive termios_rwsem
- * modifies read_head
- *
- * Modifying the read_head is not considered a publish in this context
- * because canonical mode is active -- only canon_head publishes
*/
static void eraser(unsigned char c, struct tty_struct *tty)
@@ -1118,16 +1090,45 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* Called when a signal is being sent due to terminal input.
* Called from the driver receive_buf path so serialized.
*
+ * Performs input and output flush if !NOFLSH. In this context, the echo
+ * buffer is 'output'. The signal is processed first to alert any current
+ * readers or writers to discontinue and exit their i/o loops.
+ *
* Locking: ctrl_lock
*/
static void isig(int sig, struct tty_struct *tty)
{
+ struct n_tty_data *ldata = tty->disc_data;
struct pid *tty_pgrp = tty_get_pgrp(tty);
if (tty_pgrp) {
kill_pgrp(tty_pgrp, sig, 1);
put_pid(tty_pgrp);
}
+
+ if (!L_NOFLSH(tty)) {
+ up_read(&tty->termios_rwsem);
+ down_write(&tty->termios_rwsem);
+
+ /* clear echo buffer */
+ mutex_lock(&ldata->output_lock);
+ ldata->echo_head = ldata->echo_tail = 0;
+ ldata->echo_mark = ldata->echo_commit = 0;
+ mutex_unlock(&ldata->output_lock);
+
+ /* clear output buffer */
+ tty_driver_flush_buffer(tty);
+
+ /* clear input buffer */
+ reset_buffer_flags(tty->disc_data);
+
+ /* notify pty master of flush */
+ if (tty->link)
+ n_tty_packet_mode_flush(tty);
+
+ up_write(&tty->termios_rwsem);
+ down_read(&tty->termios_rwsem);
+ }
}
/**
@@ -1139,7 +1140,6 @@ static void isig(int sig, struct tty_struct *tty)
*
* n_tty_receive_buf()/producer path:
* caller holds non-exclusive termios_rwsem
- * publishes read_head via put_tty_queue()
*
* Note: may get exclusive termios_rwsem if flushing input buffer
*/
@@ -1152,13 +1152,6 @@ static void n_tty_receive_break(struct tty_struct *tty)
return;
if (I_BRKINT(tty)) {
isig(SIGINT, tty);
- if (!L_NOFLSH(tty)) {
- /* flushing needs exclusive termios_rwsem */
- up_read(&tty->termios_rwsem);
- n_tty_flush_buffer(tty);
- tty_driver_flush_buffer(tty);
- down_read(&tty->termios_rwsem);
- }
return;
}
if (I_PARMRK(tty)) {
@@ -1209,7 +1202,6 @@ static void n_tty_receive_overrun(struct tty_struct *tty)
*
* n_tty_receive_buf()/producer path:
* caller holds non-exclusive termios_rwsem
- * publishes read_head via put_tty_queue()
*/
static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
{
@@ -1233,13 +1225,7 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
static void
n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
{
- if (!L_NOFLSH(tty)) {
- /* flushing needs exclusive termios_rwsem */
- up_read(&tty->termios_rwsem);
- n_tty_flush_buffer(tty);
- tty_driver_flush_buffer(tty);
- down_read(&tty->termios_rwsem);
- }
+ isig(signal, tty);
if (I_IXON(tty))
start_tty(tty);
if (L_ECHO(tty)) {
@@ -1247,7 +1233,6 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
commit_echoes(tty);
} else
process_echoes(tty);
- isig(signal, tty);
return;
}
@@ -1263,7 +1248,6 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
* n_tty_receive_buf()/producer path:
* caller holds non-exclusive termios_rwsem
* publishes canon_head if canonical mode is active
- * otherwise, publishes read_head via put_tty_queue()
*
* Returns 1 if LNEXT was received, else returns 0
*/
@@ -1376,7 +1360,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
handle_newline:
set_bit(ldata->read_head & (N_TTY_BUF_SIZE - 1), ldata->read_flags);
put_tty_queue(c, ldata);
- ldata->canon_head = ldata->read_head;
+ smp_store_release(&ldata->canon_head, ldata->read_head);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible_poll(&tty->read_wait, POLLIN);
@@ -1512,23 +1496,6 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
n_tty_receive_char_flagged(tty, c, flag);
}
-/**
- * n_tty_receive_buf - data receive
- * @tty: terminal device
- * @cp: buffer
- * @fp: flag buffer
- * @count: characters
- *
- * Called by the terminal driver when a block of characters has
- * been received. This function must be called from soft contexts
- * not from interrupt context. The driver is responsible for making
- * calls one at a time and in order (or using flush_to_ldisc)
- *
- * n_tty_receive_buf()/producer path:
- * claims non-exclusive termios_rwsem
- * publishes read_head and canon_head
- */
-
static void
n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
@@ -1537,16 +1504,14 @@ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
size_t n, head;
head = ldata->read_head & (N_TTY_BUF_SIZE - 1);
- n = N_TTY_BUF_SIZE - max(read_cnt(ldata), head);
- n = min_t(size_t, count, n);
+ n = min_t(size_t, count, N_TTY_BUF_SIZE - head);
memcpy(read_buf_addr(ldata, head), cp, n);
ldata->read_head += n;
cp += n;
count -= n;
head = ldata->read_head & (N_TTY_BUF_SIZE - 1);
- n = N_TTY_BUF_SIZE - max(read_cnt(ldata), head);
- n = min_t(size_t, count, n);
+ n = min_t(size_t, count, N_TTY_BUF_SIZE - head);
memcpy(read_buf_addr(ldata, head), cp, n);
ldata->read_head += n;
}
@@ -1676,32 +1641,98 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
tty->ops->flush_chars(tty);
}
- if ((!ldata->icanon && (read_cnt(ldata) >= ldata->minimum_to_wake)) ||
- L_EXTPROC(tty)) {
+ if (ldata->icanon && !L_EXTPROC(tty))
+ return;
+
+ /* publish read_head to consumer */
+ smp_store_release(&ldata->commit_head, ldata->read_head);
+
+ if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible_poll(&tty->read_wait, POLLIN);
}
}
+/**
+ * n_tty_receive_buf_common - process input
+ * @tty: device to receive input
+ * @cp: input chars
+ * @fp: flags for each char (if NULL, all chars are TTY_NORMAL)
+ * @count: number of input chars in @cp
+ *
+ * Called by the terminal driver when a block of characters has
+ * been received. This function must be called from soft contexts
+ * not from interrupt context. The driver is responsible for making
+ * calls one at a time and in order (or using flush_to_ldisc)
+ *
+ * Returns the # of input chars from @cp which were processed.
+ *
+ * In canonical mode, the maximum line length is 4096 chars (including
+ * the line termination char); lines longer than 4096 chars are
+ * truncated. After 4095 chars, input data is still processed but
+ * not stored. Overflow processing ensures the tty can always
+ * receive more input until at least one line can be read.
+ *
+ * In non-canonical mode, the read buffer will only accept 4095 chars;
+ * this provides the necessary space for a newline char if the input
+ * mode is switched to canonical.
+ *
+ * Note it is possible for the read buffer to _contain_ 4096 chars
+ * in non-canonical mode: the read buffer could already contain the
+ * maximum canon line of 4096 chars when the mode is switched to
+ * non-canonical.
+ *
+ * n_tty_receive_buf()/producer path:
+ * claims non-exclusive termios_rwsem
+ * publishes commit_head or canon_head
+ */
static int
n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count, int flow)
{
struct n_tty_data *ldata = tty->disc_data;
- int room, n, rcvd = 0;
+ int room, n, rcvd = 0, overflow;
down_read(&tty->termios_rwsem);
while (1) {
- room = receive_room(tty);
+ /*
+ * When PARMRK is set, each input char may take up to 3 chars
+ * in the read buf; reduce the buffer space avail by 3x
+ *
+ * If we are doing input canonicalization, and there are no
+ * pending newlines, let characters through without limit, so
+ * that erase characters will be handled. Other excess
+ * characters will be beeped.
+ *
+ * paired with store in *_copy_from_read_buf() -- guarantees
+ * the consumer has loaded the data in read_buf up to the new
+ * read_tail (so this producer will not overwrite unread data)
+ */
+ size_t tail = smp_load_acquire(&ldata->read_tail);
+
+ room = N_TTY_BUF_SIZE - (ldata->read_head - tail);
+ if (I_PARMRK(tty))
+ room = (room + 2) / 3;
+ room--;
+ if (room <= 0) {
+ overflow = ldata->icanon && ldata->canon_head == tail;
+ if (overflow && room < 0)
+ ldata->read_head--;
+ room = overflow;
+ ldata->no_room = flow && !room;
+ } else
+ overflow = 0;
+
n = min(count, room);
- if (!n) {
- if (flow && !room)
- ldata->no_room = 1;
+ if (!n)
break;
- }
- __receive_buf(tty, cp, fp, n);
+
+ /* ignore parity errors if handling overflow */
+ if (!overflow || !fp || *fp != TTY_PARITY)
+ __receive_buf(tty, cp, fp, n);
+
cp += n;
if (fp)
fp += n;
@@ -1710,7 +1741,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
}
tty->receive_room = room;
- n_tty_check_throttle(tty);
+
+ /* Unthrottle if handling overflow on pty */
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
+ if (overflow) {
+ tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
+ tty_unthrottle_safe(tty);
+ __tty_set_flow_change(tty, 0);
+ }
+ } else
+ n_tty_check_throttle(tty);
+
up_read(&tty->termios_rwsem);
return rcvd;
@@ -1764,6 +1805,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
ldata->canon_head = ldata->read_head;
ldata->push = 1;
}
+ ldata->commit_head = ldata->read_head;
ldata->erasing = 0;
ldata->lnext = 0;
}
@@ -1817,7 +1859,6 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
else
ldata->real_raw = 0;
}
- n_tty_set_room(tty);
/*
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
@@ -1905,7 +1946,7 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
if (ldata->icanon && !L_EXTPROC(tty))
return ldata->canon_head != ldata->read_tail;
else
- return read_cnt(ldata) >= amt;
+ return ldata->commit_head - ldata->read_tail >= amt;
}
/**
@@ -1937,10 +1978,11 @@ static int copy_from_read_buf(struct tty_struct *tty,
int retval;
size_t n;
bool is_eof;
+ size_t head = smp_load_acquire(&ldata->commit_head);
size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
retval = 0;
- n = min(read_cnt(ldata), N_TTY_BUF_SIZE - tail);
+ n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail);
n = min(*nr, n);
if (n) {
retval = copy_to_user(*b, read_buf_addr(ldata, tail), n);
@@ -1948,9 +1990,10 @@ static int copy_from_read_buf(struct tty_struct *tty,
is_eof = n == 1 && read_buf(ldata, tail) == EOF_CHAR(tty);
tty_audit_add_data(tty, read_buf_addr(ldata, tail), n,
ldata->icanon);
- ldata->read_tail += n;
+ smp_store_release(&ldata->read_tail, ldata->read_tail + n);
/* Turn single EOF into zero-length read */
- if (L_EXTPROC(tty) && ldata->icanon && is_eof && !read_cnt(ldata))
+ if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
+ (head == ldata->read_tail))
n = 0;
*b += n;
*nr -= n;
@@ -1993,7 +2036,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
bool eof_push = 0;
/* N.B. avoid overrun if nr == 0 */
- n = min(*nr, read_cnt(ldata));
+ n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail);
if (!n)
return 0;
@@ -2043,8 +2086,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
if (found)
clear_bit(eol, ldata->read_flags);
- smp_mb__after_atomic();
- ldata->read_tail += c;
+ smp_store_release(&ldata->read_tail, ldata->read_tail + c);
if (found) {
if (!ldata->push)
@@ -2130,6 +2172,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
ssize_t retval = 0;
long timeout;
int packet;
+ size_t tail;
c = job_control(tty, file);
if (c < 0)
@@ -2166,6 +2209,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
}
packet = tty->packet;
+ tail = ldata->read_tail;
add_wait_queue(&tty->read_wait, &wait);
while (nr) {
@@ -2208,7 +2252,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
retval = -ERESTARTSYS;
break;
}
- n_tty_set_room(tty);
up_read(&tty->termios_rwsem);
timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
@@ -2253,7 +2296,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
if (time)
timeout = time;
}
- n_tty_set_room(tty);
+ if (tail != ldata->read_tail)
+ n_tty_kick_worker(tty);
up_read(&tty->termios_rwsem);
remove_wait_queue(&tty->read_wait, &wait);
@@ -2399,17 +2443,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
- mask |= POLLHUP;
if (input_available_p(tty, 1))
mask |= POLLIN | POLLRDNORM;
- else if (mask & POLLHUP) {
- tty_flush_to_ldisc(tty);
- if (input_available_p(tty, 1))
- mask |= POLLIN | POLLRDNORM;
- }
if (tty->packet && tty->link->ctrl_status)
mask |= POLLPRI | POLLIN | POLLRDNORM;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= POLLHUP;
if (tty_hung_up_p(file))
mask |= POLLHUP;
if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index a9d256d6e909..e72ee629cead 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -88,19 +88,6 @@ static void pty_unthrottle(struct tty_struct *tty)
}
/**
- * pty_space - report space left for writing
- * @to: tty we are writing into
- *
- * Limit the buffer space used by ptys to 8k.
- */
-
-static int pty_space(struct tty_struct *to)
-{
- int n = tty_buffer_space_avail(to->port);
- return min(n, 8192);
-}
-
-/**
* pty_write - write to a pty
* @tty: the tty we write from
* @buf: kernel buffer of data
@@ -141,7 +128,7 @@ static int pty_write_room(struct tty_struct *tty)
{
if (tty->stopped)
return 0;
- return pty_space(tty->link);
+ return tty_buffer_space_avail(tty->link->port);
}
/**
@@ -210,6 +197,9 @@ static int pty_signal(struct tty_struct *tty, int sig)
{
struct pid *pgrp;
+ if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP)
+ return -EINVAL;
+
if (tty->link) {
pgrp = tty_get_pgrp(tty->link);
if (pgrp)
@@ -222,10 +212,16 @@ static int pty_signal(struct tty_struct *tty, int sig)
static void pty_flush_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
+ struct tty_ldisc *ld;
if (!to)
return;
- /* tty_buffer_flush(to); FIXME */
+
+ ld = tty_ldisc_ref(to);
+ tty_buffer_flush(to, ld);
+ if (ld)
+ tty_ldisc_deref(ld);
+
if (to->packet) {
spin_lock_irq(&tty->ctrl_lock);
tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
@@ -399,6 +395,7 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
goto err_put_module;
tty_set_lock_subclass(o_tty);
+ lockdep_set_subclass(&o_tty->termios_rwsem, TTY_LOCK_SLAVE);
if (legacy) {
/* We always use new tty termios data so we can do this
@@ -429,10 +426,14 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
o_tty->link = tty;
tty_port_init(ports[0]);
tty_port_init(ports[1]);
+ tty_buffer_set_limit(ports[0], 8192);
+ tty_buffer_set_limit(ports[1], 8192);
o_tty->port = ports[0];
tty->port = ports[1];
o_tty->port->itty = o_tty;
+ tty_buffer_set_lock_subclass(o_tty->port);
+
tty_driver_kref_get(driver);
tty->count++;
o_tty->count++;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 383c4c796637..c8dd8dc31086 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1390,7 +1390,7 @@ static void rp_unthrottle(struct tty_struct *tty)
tty->ldisc.chars_in_buffer(tty));
#endif
- if (rocket_paranoia_check(info, "rp_throttle"))
+ if (rocket_paranoia_check(info, "rp_unthrottle"))
return;
if (I_IXOFF(tty))
@@ -1458,7 +1458,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
orig_jiffies = jiffies;
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_INFO "In RP_wait_until_sent(%d) (jiff=%lu)...\n", timeout,
+ printk(KERN_INFO "In %s(%d) (jiff=%lu)...\n", __func__, timeout,
jiffies);
printk(KERN_INFO "cps=%d...\n", info->cps);
#endif
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 11c66856ba2f..e3b9570a1eff 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -329,6 +329,17 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
+/* tx_loadsz is set to 63-bytes instead of 64-bytes to implement
+workaround of errata A-008006 which states that tx_loadsz should be
+configured less than Maximum supported fifo bytes */
+ [PORT_16550A_FSL64] = {
+ .name = "16550A_FSL64",
+ .fifo_size = 64,
+ .tx_loadsz = 63,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR7_64BYTE,
+ .flags = UART_CAP_FIFO,
+ },
};
/* Uart divisor latch read */
@@ -956,7 +967,17 @@ static void autoconfig_16550a(struct uart_8250_port *up)
up->port.type = PORT_16650;
up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
} else {
- DEBUG_AUTOCONF("Motorola 8xxx DUART ");
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR7_64BYTE);
+ status1 = serial_in(up, UART_IIR) >> 5;
+ serial_out(up, UART_FCR, 0);
+ serial_out(up, UART_LCR, 0);
+
+ if (status1 == 7)
+ up->port.type = PORT_16550A_FSL64;
+ else
+ DEBUG_AUTOCONF("Motorola 8xxx DUART ");
}
serial_out(up, UART_EFR, 0);
return;
@@ -1355,9 +1376,11 @@ static void serial8250_start_tx(struct uart_port *port)
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_rpm_get_tx(up);
- if (up->dma && !up->dma->tx_dma(up)) {
+
+ if (up->dma && !up->dma->tx_dma(up))
return;
- } else if (!(up->ier & UART_IER_THRI)) {
+
+ if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_port_out(port, UART_IER, up->ier);
@@ -1365,7 +1388,7 @@ static void serial8250_start_tx(struct uart_port *port)
unsigned char lsr;
lsr = serial_in(up, UART_LSR);
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
- if (lsr & UART_LSR_TEMT)
+ if (lsr & UART_LSR_THRE)
serial8250_tx_chars(up);
}
}
@@ -1924,7 +1947,7 @@ static unsigned int serial8250_get_mctrl(struct uart_port *port)
return ret;
}
-static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned char mcr = 0;
@@ -1944,6 +1967,14 @@ static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
serial_port_out(port, UART_MCR, mcr);
}
+EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
+
+static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ if (port->set_mctrl)
+ return port->set_mctrl(port, mctrl);
+ return serial8250_do_set_mctrl(port, mctrl);
+}
static void serial8250_break_ctl(struct uart_port *port, int break_state)
{
@@ -2382,13 +2413,34 @@ static void serial8250_shutdown(struct uart_port *port)
serial8250_do_shutdown(port);
}
-static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int baud)
+/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ * Calculate divisor with extra 4-bit fractional portion
+ */
+static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up,
+ unsigned int baud,
+ unsigned int *frac)
+{
+ struct uart_port *port = &up->port;
+ unsigned int quot_16;
+
+ quot_16 = DIV_ROUND_CLOSEST(port->uartclk, baud);
+ *frac = quot_16 & 0x0f;
+
+ return quot_16 >> 4;
+}
+
+static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
+ unsigned int baud,
+ unsigned int *frac)
{
+ struct uart_port *port = &up->port;
unsigned int quot;
/*
* Handle magic divisors for baud rates above baud_base on
* SMSC SuperIO chips.
+ *
*/
if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
baud == (port->uartclk/4))
@@ -2396,22 +2448,26 @@ static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int
else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
baud == (port->uartclk/8))
quot = 0x8002;
+ else if (up->port.type == PORT_XR17V35X)
+ quot = xr17v35x_get_divisor(up, baud, frac);
else
quot = uart_get_divisor(port, baud);
+ /*
+ * Oxford Semi 952 rev B workaround
+ */
+ if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
+ quot++;
+
return quot;
}
-void
-serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
+ tcflag_t c_cflag)
{
- struct uart_8250_port *up = up_to_u8250p(port);
unsigned char cval;
- unsigned long flags;
- unsigned int baud, quot;
- switch (termios->c_cflag & CSIZE) {
+ switch (c_cflag & CSIZE) {
case CS5:
cval = UART_LCR_WLEN5;
break;
@@ -2427,33 +2483,80 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
break;
}
- if (termios->c_cflag & CSTOPB)
+ if (c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
- if (termios->c_cflag & PARENB) {
+ if (c_cflag & PARENB) {
cval |= UART_LCR_PARITY;
if (up->bugs & UART_BUG_PARITY)
up->fifo_bug = true;
}
- if (!(termios->c_cflag & PARODD))
+ if (!(c_cflag & PARODD))
cval |= UART_LCR_EPAR;
#ifdef CMSPAR
- if (termios->c_cflag & CMSPAR)
+ if (c_cflag & CMSPAR)
cval |= UART_LCR_SPAR;
#endif
+ return cval;
+}
+
+static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ /* Workaround to enable 115200 baud on OMAP1510 internal ports */
+ if (is_omap1510_8250(up)) {
+ if (baud == 115200) {
+ quot = 1;
+ serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
+ } else
+ serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
+ }
+
+ /*
+ * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
+ * otherwise just set DLAB
+ */
+ if (up->capabilities & UART_NATSEMI)
+ serial_port_out(port, UART_LCR, 0xe0);
+ else
+ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
+
+ serial_dl_write(up, quot);
+
+ /* XR17V35x UARTs have an extra fractional divisor register (DLD) */
+ if (up->port.type == PORT_XR17V35X)
+ serial_port_out(port, 0x2, quot_frac);
+}
+
+void
+serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned char cval;
+ unsigned long flags;
+ unsigned int baud, quot, frac = 0;
+
+ cval = serial8250_compute_lcr(up, termios->c_cflag);
+
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 0xffff,
port->uartclk / 16);
- quot = serial8250_get_divisor(port, baud);
+ quot = serial8250_get_divisor(up, baud, &frac);
/*
- * Oxford Semi 952 rev B workaround
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
*/
- if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
- quot++;
+ serial8250_rpm_get(up);
+ spin_lock_irqsave(&port->lock, flags);
+
+ up->lcr = cval; /* Save computed LCR */
if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
/* NOTE: If fifo_bug is not set, a user can set RX_trigger. */
@@ -2478,13 +2581,6 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
}
/*
- * Ok, we're now changing the port state. Do it with
- * interrupts disabled.
- */
- serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
-
- /*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
@@ -2548,43 +2644,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
serial_port_out(port, UART_EFR, efr);
}
- /* Workaround to enable 115200 baud on OMAP1510 internal ports */
- if (is_omap1510_8250(up)) {
- if (baud == 115200) {
- quot = 1;
- serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
- } else
- serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
- }
-
- /*
- * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
- * otherwise just set DLAB
- */
- if (up->capabilities & UART_NATSEMI)
- serial_port_out(port, UART_LCR, 0xe0);
- else
- serial_port_out(port, UART_LCR, cval | UART_LCR_DLAB);
-
- serial_dl_write(up, quot);
-
- /*
- * XR17V35x UARTs have an extra fractional divisor register (DLD)
- *
- * We need to recalculate all of the registers, because DLM and DLL
- * are already rounded to a whole integer.
- *
- * When recalculating we use a 32x clock instead of a 16x clock to
- * allow 1-bit for rounding in the fractional part.
- */
- if (up->port.type == PORT_XR17V35X) {
- unsigned int baud_x32 = (port->uartclk * 2) / baud;
- u16 quot = baud_x32 / 32;
- u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2);
-
- serial_dl_write(up, quot);
- serial_port_out(port, 0x2, quot_frac & 0xf);
- }
+ serial8250_set_divisor(port, baud, quot, frac);
/*
* LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
@@ -2593,8 +2653,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
if (port->type == PORT_16750)
serial_port_out(port, UART_FCR, up->fcr);
- serial_port_out(port, UART_LCR, cval); /* reset DLAB */
- up->lcr = cval; /* Save LCR */
+ serial_port_out(port, UART_LCR, up->lcr); /* reset DLAB */
if (port->type != PORT_16750) {
/* emulated UARTs (Lucent Venus 167x) need two steps */
if (up->fcr & UART_FCR_ENABLE_FIFO)
@@ -3208,6 +3267,27 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
else
serial_port_out(port, UART_IER, 0);
+ /* check scratch reg to see if port powered off during system sleep */
+ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
+ struct ktermios termios;
+ unsigned int baud, quot, frac = 0;
+
+ termios.c_cflag = port->cons->cflag;
+ if (port->state->port.tty && termios.c_cflag == 0)
+ termios.c_cflag = port->state->port.tty->termios.c_cflag;
+
+ baud = uart_get_baud_rate(port, &termios, NULL,
+ port->uartclk / 16 / 0xffff,
+ port->uartclk / 16);
+ quot = serial8250_get_divisor(up, baud, &frac);
+
+ serial8250_set_divisor(port, baud, quot, frac);
+ serial_port_out(port, UART_LCR, up->lcr);
+ serial_port_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
+
+ up->canary = 0;
+ }
+
uart_console_write(port, s, count, serial8250_console_putchar);
/*
@@ -3358,7 +3438,17 @@ int __init early_serial_setup(struct uart_port *port)
*/
void serial8250_suspend_port(int line)
{
- uart_suspend_port(&serial8250_reg, &serial8250_ports[line].port);
+ struct uart_8250_port *up = &serial8250_ports[line];
+ struct uart_port *port = &up->port;
+
+ if (!console_suspend_enabled && uart_console(port) &&
+ port->type != PORT_8250) {
+ unsigned char canary = 0xa5;
+ serial_out(up, UART_SCR, canary);
+ up->canary = canary;
+ }
+
+ uart_suspend_port(&serial8250_reg, port);
}
/**
@@ -3372,6 +3462,8 @@ void serial8250_resume_port(int line)
struct uart_8250_port *up = &serial8250_ports[line];
struct uart_port *port = &up->port;
+ up->canary = 0;
+
if (up->capabilities & UART_NATSEMI) {
/* Ensure it's still in high speed mode */
serial_port_out(port, UART_LCR, 0xE0);
@@ -3605,6 +3697,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
/* Possibly override set_termios call */
if (up->port.set_termios)
uart->port.set_termios = up->port.set_termios;
+ if (up->port.set_mctrl)
+ uart->port.set_mctrl = up->port.set_mctrl;
if (up->port.startup)
uart->port.startup = up->port.startup;
if (up->port.shutdown)
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index fcd7ac6af2fc..21d01a491405 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -59,7 +59,6 @@ static void __dma_rx_complete(void *param)
dma->rx_running = 0;
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
- dmaengine_terminate_all(dma->rxchan);
count = dma->rx_size - state.residue;
@@ -81,6 +80,10 @@ int serial8250_tx_dma(struct uart_8250_port *p)
return 0;
dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (dma->tx_size < p->port.fifosize) {
+ ret = -EINVAL;
+ goto err;
+ }
desc = dmaengine_prep_slave_single(dma->txchan,
dma->tx_addr + xmit->tail,
@@ -131,6 +134,7 @@ int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
if (dma->rx_running) {
dmaengine_pause(dma->rxchan);
__dma_rx_complete(p);
+ dmaengine_terminate_all(dma->rxchan);
}
return -ETIMEDOUT;
default:
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 555de07db593..e60116235836 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -351,10 +351,20 @@ static int dw8250_probe_of(struct uart_port *p,
static int dw8250_probe_acpi(struct uart_8250_port *up,
struct dw8250_data *data)
{
+ const struct acpi_device_id *id;
struct uart_port *p = &up->port;
dw8250_setup_port(up);
+ id = acpi_match_device(p->dev->driver->acpi_match_table, p->dev);
+ if (!id)
+ return -ENODEV;
+
+ if (!p->uartclk)
+ if (device_property_read_u32(p->dev, "clock-frequency",
+ &p->uartclk))
+ return -EINVAL;
+
p->iotype = UPIO_MEM32;
p->serial_in = dw8250_serial_in32;
p->serial_out = dw8250_serial_out32;
@@ -577,6 +587,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT3435", 0 },
{ "80860F0A", 0 },
{ "8086228A", 0 },
+ { "APMC0D08", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 4858b8a99d3b..c31a22b4f845 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -93,15 +93,18 @@ static void __init early_serial8250_write(struct console *console,
struct uart_port *port = &early_device->port;
unsigned int ier;
- /* Save the IER and disable interrupts */
+ /* Save the IER and disable interrupts preserving the UUE bit */
ier = serial8250_early_in(port, UART_IER);
- serial8250_early_out(port, UART_IER, 0);
+ if (ier)
+ serial8250_early_out(port, UART_IER, ier & UART_IER_UUE);
uart_console_write(port, s, count, serial_putc);
/* Wait for transmitter to become empty and restore the IER */
wait_for_xmitr(port);
- serial8250_early_out(port, UART_IER, ier);
+
+ if (ier)
+ serial8250_early_out(port, UART_IER, ier);
}
static unsigned int __init probe_baud(struct uart_port *port)
@@ -124,9 +127,11 @@ static void __init init_port(struct earlycon_device *device)
struct uart_port *port = &device->port;
unsigned int divisor;
unsigned char c;
+ unsigned int ier;
serial8250_early_out(port, UART_LCR, 0x3); /* 8n1 */
- serial8250_early_out(port, UART_IER, 0); /* no interrupt */
+ ier = serial8250_early_in(port, UART_IER);
+ serial8250_early_out(port, UART_IER, ier & UART_IER_UUE); /* no interrupt */
serial8250_early_out(port, UART_FCR, 0); /* no fifo */
serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 96b69bfd773f..fe6d2e51da09 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -106,6 +106,28 @@ static u32 uart_read(struct uart_8250_port *up, u32 reg)
return readl(up->port.membase + (reg << up->port.regshift));
}
+static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ struct omap8250_priv *priv = up->port.private_data;
+ u8 lcr;
+
+ serial8250_do_set_mctrl(port, mctrl);
+
+ /*
+ * Turn off autoRTS if RTS is lowered and restore autoRTS setting
+ * if RTS is raised
+ */
+ lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+ priv->efr |= UART_EFR_RTS;
+ else
+ priv->efr &= ~UART_EFR_RTS;
+ serial_out(up, UART_EFR, priv->efr);
+ serial_out(up, UART_LCR, lcr);
+}
+
/*
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
* The access to uart register after MDR1 Access
@@ -397,12 +419,12 @@ static void omap_8250_set_termios(struct uart_port *port,
priv->efr = 0;
up->mcr &= ~(UART_MCR_RTS | UART_MCR_XONANY);
- if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
- /* Enable AUTORTS and AUTOCTS */
- priv->efr |= UART_EFR_CTS | UART_EFR_RTS;
+ up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
- /* Ensure MCR RTS is asserted */
- up->mcr |= UART_MCR_RTS;
+ if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
+ /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
+ up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
+ priv->efr |= UART_EFR_CTS;
} else if (up->port.flags & UPF_SOFT_FLOW) {
/*
* IXON Flag:
@@ -417,8 +439,10 @@ static void omap_8250_set_termios(struct uart_port *port,
* Enable XON/XOFF flow control on output.
* Transmit XON1, XOFF1
*/
- if (termios->c_iflag & IXOFF)
+ if (termios->c_iflag & IXOFF) {
+ up->port.status |= UPSTAT_AUTOXOFF;
priv->efr |= OMAP_UART_SW_TX;
+ }
/*
* IXANY Flag:
@@ -450,18 +474,18 @@ static void omap_8250_set_termios(struct uart_port *port,
static void omap_8250_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
- struct omap8250_priv *priv = up->port.private_data;
+ struct uart_8250_port *up = up_to_u8250p(port);
+ u8 efr;
pm_runtime_get_sync(port->dev);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, priv->efr | UART_EFR_ECB);
+ efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, priv->efr);
+ serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
pm_runtime_mark_last_busy(port->dev);
@@ -1007,6 +1031,7 @@ static int omap8250_probe(struct platform_device *pdev)
up.capabilities |= UART_CAP_RPM;
#endif
up.port.set_termios = omap_8250_set_termios;
+ up.port.set_mctrl = omap8250_set_mctrl;
up.port.pm = omap_8250_pm;
up.port.startup = omap_8250_startup;
up.port.shutdown = omap_8250_shutdown;
@@ -1248,6 +1273,46 @@ static int omap8250_runtime_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_SERIAL_8250_OMAP_TTYO_FIXUP
+static int __init omap8250_console_fixup(void)
+{
+ char *omap_str;
+ char *options;
+ u8 idx;
+
+ if (strstr(boot_command_line, "console=ttyS"))
+ /* user set a ttyS based name for the console */
+ return 0;
+
+ omap_str = strstr(boot_command_line, "console=ttyO");
+ if (!omap_str)
+ /* user did not set ttyO based console, so we don't care */
+ return 0;
+
+ omap_str += 12;
+ if ('0' <= *omap_str && *omap_str <= '9')
+ idx = *omap_str - '0';
+ else
+ return 0;
+
+ omap_str++;
+ if (omap_str[0] == ',') {
+ omap_str++;
+ options = omap_str;
+ } else {
+ options = NULL;
+ }
+
+ add_preferred_console("ttyS", idx, options);
+ pr_err("WARNING: Your 'console=ttyO%d' has been replaced by 'ttyS%d'\n",
+ idx, idx);
+ pr_err("This ensures that you still see kernel messages. Please\n");
+ pr_err("update your kernel commandline.\n");
+ return 0;
+}
+console_initcall(omap8250_console_fixup);
+#endif
+
static const struct dev_pm_ops omap8250_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume)
SET_RUNTIME_PM_OPS(omap8250_runtime_suspend,
@@ -1269,7 +1334,6 @@ static struct platform_driver omap8250_platform_driver = {
.name = "omap8250",
.pm = &omap8250_dev_pm_ops,
.of_match_table = omap8250_dt_ids,
- .owner = THIS_MODULE,
},
.probe = omap8250_probe,
.remove = omap8250_remove,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 31feeb2d0a66..daf2c82984e9 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -221,13 +221,13 @@ pci_hp_diva_setup(struct serial_private *priv,
*/
static int pci_inteli960ni_init(struct pci_dev *dev)
{
- unsigned long oldval;
+ u32 oldval;
if (!(dev->subsystem_device & 0x1000))
return -ENODEV;
/* is firmware started? */
- pci_read_config_dword(dev, 0x44, (void *)&oldval);
+ pci_read_config_dword(dev, 0x44, &oldval);
if (oldval == 0x00001000L) { /* RESET value */
dev_dbg(&dev->dev, "Local i960 firmware missing\n");
return -ENODEV;
@@ -1815,7 +1815,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
}
static int
-pci_wch_ch382_setup(struct serial_private *priv,
+pci_wch_ch38x_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
{
@@ -1880,6 +1880,7 @@ pci_wch_ch382_setup(struct serial_private *priv,
#define PCIE_VENDOR_ID_WCH 0x1c00
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
+#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -2571,13 +2572,21 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
- /* WCH CH382 2S1P card (16750 clone) */
+ /* WCH CH382 2S1P card (16850 clone) */
{
.vendor = PCIE_VENDOR_ID_WCH,
.device = PCIE_DEVICE_ID_WCH_CH382_2S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .setup = pci_wch_ch382_setup,
+ .setup = pci_wch_ch38x_setup,
+ },
+ /* WCH CH384 4S card (16850 clone) */
+ {
+ .vendor = PCIE_VENDOR_ID_WCH,
+ .device = PCIE_DEVICE_ID_WCH_CH384_4S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch38x_setup,
},
/*
* ASIX devices with FIFO bug
@@ -2876,6 +2885,7 @@ enum pci_board_num_t {
pbn_fintek_4,
pbn_fintek_8,
pbn_fintek_12,
+ pbn_wch384_4,
};
/*
@@ -3675,6 +3685,14 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.first_offset = 0x40,
},
+
+ [pbn_wch384_4] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0xC0,
+ },
};
static const struct pci_device_id blacklist[] = {
@@ -3687,6 +3705,7 @@ static const struct pci_device_id blacklist[] = {
{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+ { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
};
/*
@@ -5400,6 +5419,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
+ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_wch384_4 },
+
/*
* Commtech, Inc. Fastcom adapters
*/
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 682a2fbe5c06..50a09cd76d50 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -426,7 +426,7 @@ static int serial_pnp_guess_board(struct pnp_dev *dev)
static int
serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
- struct uart_8250_port uart;
+ struct uart_8250_port uart, *port;
int ret, line, flags = dev_id->driver_data;
if (flags & UNKNOWN_DEV) {
@@ -471,6 +471,10 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
if (line < 0 || (flags & CIR_PORT))
return -ENODEV;
+ port = serial8250_get_port(line);
+ if (uart_console(&port->port))
+ dev->capabilities |= PNP_CONSOLE;
+
pnp_set_drvdata(dev, (void *)((long)line + 1));
return 0;
}
@@ -478,6 +482,8 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
static void serial_pnp_remove(struct pnp_dev *dev)
{
long line = (long)pnp_get_drvdata(dev);
+
+ dev->capabilities &= ~PNP_CONSOLE;
if (line)
serial8250_unregister_port(line - 1);
}
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 0fcbcd29502f..6f7f2d753def 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -308,6 +308,25 @@ config SERIAL_8250_OMAP
This driver uses ttyS instead of ttyO.
+config SERIAL_8250_OMAP_TTYO_FIXUP
+ bool "Replace ttyO with ttyS"
+ depends on SERIAL_8250_OMAP=y && SERIAL_8250_CONSOLE
+ default y
+ help
+ This option replaces the "console=ttyO" argument with the matching
+ ttyS argument if the user did not specified it on the command line.
+ This ensures that the user can see the kernel output during boot
+ which he wouldn't see otherwise. The getty has still to be configured
+ for ttyS instead of ttyO regardless of this option.
+ This option is intended for people who "automatically" enable this
+ driver without knowing that this driver requires a different console=
+ argument. If you read this, please keep this option disabled and
+ instead update your kernel command line. If you prepare a kernel for a
+ distribution or other kind of larger user base then you probably want
+ to keep this option enabled. Otherwise people might complain about a
+ not booting kernel because the serial console remains silent in case
+ they forgot to update the command line.
+
config SERIAL_8250_FINTEK
tristate "Support for Fintek F81216A LPC to 4 UART"
depends on SERIAL_8250 && PNP
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index c79b43cd6014..5d916c7a216b 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -241,6 +241,7 @@ config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
depends on PLAT_SAMSUNG || ARCH_EXYNOS
select SERIAL_CORE
+ select SERIAL_EARLYCON
help
Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
providing /dev/ttySAC0, 1 and 2 (note, some machines may not
@@ -482,16 +483,6 @@ config SERIAL_SA1100_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
-config SERIAL_MRST_MAX3110
- tristate "SPI UART driver for Max3110"
- depends on SPI_DW_PCI
- select SERIAL_CORE
- select SERIAL_CORE_CONSOLE
- help
- This is the UART protocol driver for the MAX3110 device on
- the Intel Moorestown platform. On other systems use the max3100
- driver.
-
config SERIAL_MFD_HSU
tristate "Medfield High Speed UART support"
depends on PCI
@@ -1094,6 +1085,16 @@ config SERIAL_VT8500_CONSOLE
depends on SERIAL_VT8500=y
select SERIAL_CORE_CONSOLE
+config SERIAL_ETRAXFS
+ bool "ETRAX FS serial port support"
+ depends on ETRAX_ARCH_V32 && OF
+ select SERIAL_CORE
+
+config SERIAL_ETRAXFS_CONSOLE
+ bool "ETRAX FS serial console support"
+ depends on SERIAL_ETRAXFS
+ select SERIAL_CORE_CONSOLE
+
config SERIAL_NETX
tristate "NetX serial port support"
depends on ARCH_NETX
@@ -1549,6 +1550,21 @@ config SERIAL_FSL_LPUART_CONSOLE
If you have enabled the lpuart serial port on the Freescale SoCs,
you can make it the console by answering Y to this option.
+config SERIAL_CONEXANT_DIGICOLOR
+ tristate "Conexant Digicolor CX92xxx USART serial port support"
+ depends on OF
+ select SERIAL_CORE
+ help
+ Support for the on-chip USART on Conexant Digicolor SoCs.
+
+config SERIAL_CONEXANT_DIGICOLOR_CONSOLE
+ bool "Console on Conexant Digicolor serial port"
+ depends on SERIAL_CONEXANT_DIGICOLOR=y
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have enabled the USART serial port on Conexant Digicolor
+ SoCs, you can make it the console by answering Y to this option.
+
config SERIAL_ST_ASC
tristate "ST ASC serial port support"
select SERIAL_CORE
@@ -1577,6 +1593,24 @@ config SERIAL_MEN_Z135
This driver can also be build as a module. If so, the module will be called
men_z135_uart.ko
+config SERIAL_SPRD
+ tristate "Support for Spreadtrum serial"
+ depends on ARCH_SPRD
+ select SERIAL_CORE
+ help
+ This enables the driver for the Spreadtrum's serial.
+
+config SERIAL_SPRD_CONSOLE
+ bool "Spreadtrum UART console support"
+ depends on SERIAL_SPRD=y
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ help
+ Support for early debug console using Spreadtrum's serial. This enables
+ the console before standard serial driver is probed. This is enabled
+ with "earlycon" on the kernel command line. The console is
+ enabled when early_param is processed.
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 9a548acf5fdc..599be4b05a26 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
obj-$(CONFIG_SERIAL_MESON) += meson_uart.o
obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
+obj-$(CONFIG_SERIAL_ETRAXFS) += etraxfs-uart.o
obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
obj-$(CONFIG_SERIAL_SC16IS7XX) += sc16is7xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
@@ -77,7 +78,6 @@ obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
-obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
@@ -92,7 +92,9 @@ obj-$(CONFIG_SERIAL_EFM32_UART) += efm32-uart.o
obj-$(CONFIG_SERIAL_ARC) += arc_uart.o
obj-$(CONFIG_SERIAL_RP2) += rp2.o
obj-$(CONFIG_SERIAL_FSL_LPUART) += fsl_lpuart.o
+obj-$(CONFIG_SERIAL_CONEXANT_DIGICOLOR) += digicolor-usart.o
obj-$(CONFIG_SERIAL_MEN_Z135) += men_z135_uart.o
+obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 192d0435bb86..0fefdd8931a2 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -441,6 +441,7 @@ static int altera_jtaguart_probe(struct platform_device *pdev)
port->iotype = SERIAL_IO_MEM;
port->ops = &altera_jtaguart_ops;
port->flags = UPF_BOOT_AUTOCONF;
+ port->dev = &pdev->dev;
uart_add_one_port(&altera_jtaguart_driver, port);
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index eb15a50623cb..b2859fe07e14 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -589,6 +589,7 @@ static int altera_uart_probe(struct platform_device *pdev)
port->iotype = SERIAL_IO_MEM;
port->ops = &altera_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
+ port->dev = &pdev->dev;
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4d848a29e223..846552bff67d 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -341,13 +341,37 @@ static u_int atmel_tx_empty(struct uart_port *port)
static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
{
unsigned int control = 0;
- unsigned int mode;
+ unsigned int mode = UART_GET_MR(port);
+ unsigned int rts_paused, rts_ready;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ /* override mode to RS485 if needed, otherwise keep the current mode */
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ if ((port->rs485.delay_rts_after_send) > 0)
+ UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+ mode &= ~ATMEL_US_USMODE;
+ mode |= ATMEL_US_USMODE_RS485;
+ }
+
+ /* set the RTS line state according to the mode */
+ if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+ /* force RTS line to high level */
+ rts_paused = ATMEL_US_RTSEN;
+
+ /* give the control of the RTS line back to the hardware */
+ rts_ready = ATMEL_US_RTSDIS;
+ } else {
+ /* force RTS line to high level */
+ rts_paused = ATMEL_US_RTSDIS;
+
+ /* force RTS line to low level */
+ rts_ready = ATMEL_US_RTSEN;
+ }
+
if (mctrl & TIOCM_RTS)
- control |= ATMEL_US_RTSEN;
+ control |= rts_ready;
else
- control |= ATMEL_US_RTSDIS;
+ control |= rts_paused;
if (mctrl & TIOCM_DTR)
control |= ATMEL_US_DTREN;
@@ -359,23 +383,12 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
mctrl_gpio_set(atmel_port->gpios, mctrl);
/* Local loopback mode? */
- mode = UART_GET_MR(port) & ~ATMEL_US_CHMODE;
+ mode &= ~ATMEL_US_CHMODE;
if (mctrl & TIOCM_LOOP)
mode |= ATMEL_US_CHMODE_LOC_LOOP;
else
mode |= ATMEL_US_CHMODE_NORMAL;
- /* Resetting serial mode to RS232 (0x0) */
- mode &= ~ATMEL_US_USMODE;
-
- if (port->rs485.flags & SER_RS485_ENABLED) {
- dev_dbg(port->dev, "Setting UART to RS485\n");
- if ((port->rs485.delay_rts_after_send) > 0)
- UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
- mode |= ATMEL_US_USMODE_RS485;
- } else {
- dev_dbg(port->dev, "Setting UART to RS232\n");
- }
UART_PUT_MR(port, mode);
}
@@ -725,7 +738,11 @@ static void atmel_complete_tx_dma(void *arg)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- /* Do we really need this? */
+ /*
+ * xmit is a circular buffer so, if we have just send data from
+ * xmit->tail to the end of xmit->buf, now we have to transmit the
+ * remaining data from the beginning of xmit->buf to xmit->head.
+ */
if (!uart_circ_empty(xmit))
tasklet_schedule(&atmel_port->tasklet);
@@ -784,17 +801,17 @@ static void atmel_tx_dma(struct uart_port *port)
BUG_ON(!sg_dma_len(sg));
desc = dmaengine_prep_slave_sg(chan,
- sg,
- 1,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT |
- DMA_CTRL_ACK);
+ sg,
+ 1,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
if (!desc) {
dev_err(port->dev, "Failed to send via dma!\n");
return;
}
- dma_sync_sg_for_device(port->dev, sg, 1, DMA_MEM_TO_DEV);
+ dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
atmel_port->desc_tx = desc;
desc->callback = atmel_complete_tx_dma;
@@ -927,7 +944,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
dma_sync_sg_for_cpu(port->dev,
&atmel_port->sg_rx,
1,
- DMA_DEV_TO_MEM);
+ DMA_FROM_DEVICE);
/*
* ring->head points to the end of data already written by the DMA.
@@ -974,7 +991,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
dma_sync_sg_for_device(port->dev,
&atmel_port->sg_rx,
1,
- DMA_DEV_TO_MEM);
+ DMA_FROM_DEVICE);
/*
* Drop the lock here since it might end up calling
@@ -1012,13 +1029,13 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
/* UART circular rx buffer is an aligned page. */
BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
sg_set_page(&atmel_port->sg_rx,
- virt_to_page(ring->buf),
- ATMEL_SERIAL_RINGSIZE,
- (int)ring->buf & ~PAGE_MASK);
- nent = dma_map_sg(port->dev,
- &atmel_port->sg_rx,
- 1,
- DMA_FROM_DEVICE);
+ virt_to_page(ring->buf),
+ ATMEL_SERIAL_RINGSIZE,
+ (int)ring->buf & ~PAGE_MASK);
+ nent = dma_map_sg(port->dev,
+ &atmel_port->sg_rx,
+ 1,
+ DMA_FROM_DEVICE);
if (!nent) {
dev_dbg(port->dev, "need to release resource of dma\n");
@@ -1047,11 +1064,11 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
* each one is half ring buffer size
*/
desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
- sg_dma_address(&atmel_port->sg_rx),
- sg_dma_len(&atmel_port->sg_rx),
- sg_dma_len(&atmel_port->sg_rx)/2,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT);
+ sg_dma_address(&atmel_port->sg_rx),
+ sg_dma_len(&atmel_port->sg_rx),
+ sg_dma_len(&atmel_port->sg_rx)/2,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
desc->callback = atmel_complete_rx_dma;
desc->callback_param = port;
atmel_port->desc_rx = desc;
@@ -1921,12 +1938,14 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned long flags;
- unsigned int mode, imr, quot, baud;
+ unsigned int old_mode, mode, imr, quot, baud;
- /* Get current mode register */
- mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
- | ATMEL_US_NBSTOP | ATMEL_US_PAR
- | ATMEL_US_USMODE);
+ /* save the current mode register */
+ mode = old_mode = UART_GET_MR(port);
+
+ /* reset the mode, clock divisor, parity, stop bits and data size */
+ mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
+ ATMEL_US_PAR | ATMEL_US_USMODE);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
@@ -1971,12 +1990,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
} else
mode |= ATMEL_US_PAR_NONE;
- /* hardware handshake (RTS/CTS) */
- if (termios->c_cflag & CRTSCTS)
- mode |= ATMEL_US_USMODE_HWHS;
- else
- mode |= ATMEL_US_USMODE_NORMAL;
-
spin_lock_irqsave(&port->lock, flags);
port->read_status_mask = ATMEL_US_OVRE;
@@ -2020,18 +2033,40 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* disable receiver and transmitter */
UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
- /* Resetting serial mode to RS232 (0x0) */
- mode &= ~ATMEL_US_USMODE;
-
+ /* mode */
if (port->rs485.flags & SER_RS485_ENABLED) {
if ((port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
+ } else if (termios->c_cflag & CRTSCTS) {
+ /* RS232 with hardware handshake (RTS/CTS) */
+ mode |= ATMEL_US_USMODE_HWHS;
+ } else {
+ /* RS232 without hadware handshake */
+ mode |= ATMEL_US_USMODE_NORMAL;
}
- /* set the parity, stop bits and data size */
+ /* set the mode, clock divisor, parity, stop bits and data size */
UART_PUT_MR(port, mode);
+ /*
+ * when switching the mode, set the RTS line state according to the
+ * new mode, otherwise keep the former state
+ */
+ if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
+ unsigned int rts_state;
+
+ if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+ /* let the hardware control the RTS line */
+ rts_state = ATMEL_US_RTSDIS;
+ } else {
+ /* force RTS line to low level */
+ rts_state = ATMEL_US_RTSEN;
+ }
+
+ UART_PUT_CR(port, rts_state);
+ }
+
/* set the baud rate */
UART_PUT_BRGR(port, quot);
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
@@ -2565,7 +2600,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
ret = atmel_init_port(port, pdev);
if (ret)
- goto err;
+ goto err_clear_bit;
if (!atmel_use_pdc_rx(&port->uart)) {
ret = -ENOMEM;
@@ -2596,6 +2631,12 @@ static int atmel_serial_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, port);
+ /*
+ * The peripheral clock has been disabled by atmel_init_port():
+ * enable it before accessing I/O registers
+ */
+ clk_prepare_enable(port->clk);
+
if (rs485_enabled) {
UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
@@ -2606,6 +2647,12 @@ static int atmel_serial_probe(struct platform_device *pdev)
*/
atmel_get_ip_name(&port->uart);
+ /*
+ * The peripheral clock can now safely be disabled till the port
+ * is used
+ */
+ clk_disable_unprepare(port->clk);
+
return 0;
err_add_port:
@@ -2616,6 +2663,8 @@ err_alloc_ring:
clk_put(port->clk);
port->clk = NULL;
}
+err_clear_bit:
+ clear_bit(port->uart.line, atmel_ports_in_use);
err:
return ret;
}
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
new file mode 100644
index 000000000000..a80cdad114f3
--- /dev/null
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -0,0 +1,560 @@
+/*
+ * Driver for Conexant Digicolor serial ports (USART)
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2014 Paradox Innovation Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#define UA_ENABLE 0x00
+#define UA_ENABLE_ENABLE BIT(0)
+
+#define UA_CONTROL 0x01
+#define UA_CONTROL_RX_ENABLE BIT(0)
+#define UA_CONTROL_TX_ENABLE BIT(1)
+#define UA_CONTROL_SOFT_RESET BIT(2)
+
+#define UA_STATUS 0x02
+#define UA_STATUS_PARITY_ERR BIT(0)
+#define UA_STATUS_FRAME_ERR BIT(1)
+#define UA_STATUS_OVERRUN_ERR BIT(2)
+#define UA_STATUS_TX_READY BIT(6)
+
+#define UA_CONFIG 0x03
+#define UA_CONFIG_CHAR_LEN BIT(0)
+#define UA_CONFIG_STOP_BITS BIT(1)
+#define UA_CONFIG_PARITY BIT(2)
+#define UA_CONFIG_ODD_PARITY BIT(4)
+
+#define UA_EMI_REC 0x04
+
+#define UA_HBAUD_LO 0x08
+#define UA_HBAUD_HI 0x09
+
+#define UA_STATUS_FIFO 0x0a
+#define UA_STATUS_FIFO_RX_EMPTY BIT(2)
+#define UA_STATUS_FIFO_RX_INT_ALMOST BIT(3)
+#define UA_STATUS_FIFO_TX_FULL BIT(4)
+#define UA_STATUS_FIFO_TX_INT_ALMOST BIT(7)
+
+#define UA_CONFIG_FIFO 0x0b
+#define UA_CONFIG_FIFO_RX_THRESH 7
+#define UA_CONFIG_FIFO_RX_FIFO_MODE BIT(3)
+#define UA_CONFIG_FIFO_TX_FIFO_MODE BIT(7)
+
+#define UA_INTFLAG_CLEAR 0x1c
+#define UA_INTFLAG_SET 0x1d
+#define UA_INT_ENABLE 0x1e
+#define UA_INT_STATUS 0x1f
+
+#define UA_INT_TX BIT(0)
+#define UA_INT_RX BIT(1)
+
+#define DIGICOLOR_USART_NR 3
+
+/*
+ * We use the 16 bytes hardware FIFO to buffer Rx traffic. Rx interrupt is
+ * only produced when the FIFO is filled more than a certain configurable
+ * threshold. Unfortunately, there is no way to set this threshold below half
+ * FIFO. This means that we must periodically poll the FIFO status register to
+ * see whether there are waiting Rx bytes.
+ */
+
+struct digicolor_port {
+ struct uart_port port;
+ struct delayed_work rx_poll_work;
+};
+
+static struct uart_port *digicolor_ports[DIGICOLOR_USART_NR];
+
+static bool digicolor_uart_tx_full(struct uart_port *port)
+{
+ return !!(readb_relaxed(port->membase + UA_STATUS_FIFO) &
+ UA_STATUS_FIFO_TX_FULL);
+}
+
+static bool digicolor_uart_rx_empty(struct uart_port *port)
+{
+ return !!(readb_relaxed(port->membase + UA_STATUS_FIFO) &
+ UA_STATUS_FIFO_RX_EMPTY);
+}
+
+static void digicolor_uart_stop_tx(struct uart_port *port)
+{
+ u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE);
+
+ int_enable &= ~UA_INT_TX;
+ writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE);
+}
+
+static void digicolor_uart_start_tx(struct uart_port *port)
+{
+ u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE);
+
+ int_enable |= UA_INT_TX;
+ writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE);
+}
+
+static void digicolor_uart_stop_rx(struct uart_port *port)
+{
+ u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE);
+
+ int_enable &= ~UA_INT_RX;
+ writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE);
+}
+
+static void digicolor_rx_poll(struct work_struct *work)
+{
+ struct digicolor_port *dp =
+ container_of(to_delayed_work(work),
+ struct digicolor_port, rx_poll_work);
+
+ if (!digicolor_uart_rx_empty(&dp->port))
+ /* force RX interrupt */
+ writeb_relaxed(UA_INT_RX, dp->port.membase + UA_INTFLAG_SET);
+
+ schedule_delayed_work(&dp->rx_poll_work, msecs_to_jiffies(100));
+}
+
+static void digicolor_uart_rx(struct uart_port *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while (1) {
+ u8 status, ch;
+ unsigned int ch_flag;
+
+ if (digicolor_uart_rx_empty(port))
+ break;
+
+ ch = readb_relaxed(port->membase + UA_EMI_REC);
+ status = readb_relaxed(port->membase + UA_STATUS);
+
+ port->icount.rx++;
+ ch_flag = TTY_NORMAL;
+
+ if (status) {
+ if (status & UA_STATUS_PARITY_ERR)
+ port->icount.parity++;
+ else if (status & UA_STATUS_FRAME_ERR)
+ port->icount.frame++;
+ else if (status & UA_STATUS_OVERRUN_ERR)
+ port->icount.overrun++;
+
+ status &= port->read_status_mask;
+
+ if (status & UA_STATUS_PARITY_ERR)
+ ch_flag = TTY_PARITY;
+ else if (status & UA_STATUS_FRAME_ERR)
+ ch_flag = TTY_FRAME;
+ else if (status & UA_STATUS_OVERRUN_ERR)
+ ch_flag = TTY_OVERRUN;
+ }
+
+ if (status & port->ignore_status_mask)
+ continue;
+
+ uart_insert_char(port, status, UA_STATUS_OVERRUN_ERR, ch,
+ ch_flag);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ tty_flip_buffer_push(&port->state->port);
+}
+
+static void digicolor_uart_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+
+ if (digicolor_uart_tx_full(port))
+ return;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (port->x_char) {
+ writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
+ port->icount.tx++;
+ port->x_char = 0;
+ goto out;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ digicolor_uart_stop_tx(port);
+ goto out;
+ }
+
+ while (!uart_circ_empty(xmit)) {
+ writeb(xmit->buf[xmit->tail], port->membase + UA_EMI_REC);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+
+ if (digicolor_uart_tx_full(port))
+ break;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ u8 int_status = readb_relaxed(port->membase + UA_INT_STATUS);
+
+ writeb_relaxed(UA_INT_RX | UA_INT_TX,
+ port->membase + UA_INTFLAG_CLEAR);
+
+ if (int_status & UA_INT_RX)
+ digicolor_uart_rx(port);
+ if (int_status & UA_INT_TX)
+ digicolor_uart_tx(port);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int digicolor_uart_tx_empty(struct uart_port *port)
+{
+ u8 status = readb_relaxed(port->membase + UA_STATUS);
+
+ return (status & UA_STATUS_TX_READY) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int digicolor_uart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS;
+}
+
+static void digicolor_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void digicolor_uart_break_ctl(struct uart_port *port, int state)
+{
+}
+
+static int digicolor_uart_startup(struct uart_port *port)
+{
+ struct digicolor_port *dp =
+ container_of(port, struct digicolor_port, port);
+
+ writeb_relaxed(UA_ENABLE_ENABLE, port->membase + UA_ENABLE);
+ writeb_relaxed(UA_CONTROL_SOFT_RESET, port->membase + UA_CONTROL);
+ writeb_relaxed(0, port->membase + UA_CONTROL);
+
+ writeb_relaxed(UA_CONFIG_FIFO_RX_FIFO_MODE
+ | UA_CONFIG_FIFO_TX_FIFO_MODE | UA_CONFIG_FIFO_RX_THRESH,
+ port->membase + UA_CONFIG_FIFO);
+ writeb_relaxed(UA_STATUS_FIFO_RX_INT_ALMOST,
+ port->membase + UA_STATUS_FIFO);
+ writeb_relaxed(UA_CONTROL_RX_ENABLE | UA_CONTROL_TX_ENABLE,
+ port->membase + UA_CONTROL);
+ writeb_relaxed(UA_INT_TX | UA_INT_RX,
+ port->membase + UA_INT_ENABLE);
+
+ schedule_delayed_work(&dp->rx_poll_work, msecs_to_jiffies(100));
+
+ return 0;
+}
+
+static void digicolor_uart_shutdown(struct uart_port *port)
+{
+ struct digicolor_port *dp =
+ container_of(port, struct digicolor_port, port);
+
+ writeb_relaxed(0, port->membase + UA_ENABLE);
+ cancel_delayed_work_sync(&dp->rx_poll_work);
+}
+
+static void digicolor_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud, divisor;
+ u8 config = 0;
+ unsigned long flags;
+
+ /* Mask termios capabilities we don't support */
+ termios->c_cflag &= ~CMSPAR;
+ termios->c_iflag &= ~(BRKINT | IGNBRK);
+
+ /* Limit baud rates so that we don't need the fractional divider */
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk / (0x10000*16),
+ port->uartclk / 256);
+ divisor = uart_get_divisor(port, baud) - 1;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS7:
+ break;
+ case CS8:
+ default:
+ config |= UA_CONFIG_CHAR_LEN;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ config |= UA_CONFIG_STOP_BITS;
+
+ if (termios->c_cflag & PARENB) {
+ config |= UA_CONFIG_PARITY;
+ if (termios->c_cflag & PARODD)
+ config |= UA_CONFIG_ODD_PARITY;
+ }
+
+ /* Set read status mask */
+ port->read_status_mask = UA_STATUS_OVERRUN_ERR;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UA_STATUS_PARITY_ERR
+ | UA_STATUS_FRAME_ERR;
+
+ /* Set status ignore mask */
+ port->ignore_status_mask = 0;
+ if (!(termios->c_cflag & CREAD))
+ port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
+ | UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ writeb_relaxed(config, port->membase + UA_CONFIG);
+ writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
+ writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *digicolor_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_DIGICOLOR) ? "DIGICOLOR USART" : NULL;
+}
+
+static void digicolor_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_DIGICOLOR;
+}
+
+static void digicolor_uart_release_port(struct uart_port *port)
+{
+}
+
+static int digicolor_uart_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static const struct uart_ops digicolor_uart_ops = {
+ .tx_empty = digicolor_uart_tx_empty,
+ .set_mctrl = digicolor_uart_set_mctrl,
+ .get_mctrl = digicolor_uart_get_mctrl,
+ .stop_tx = digicolor_uart_stop_tx,
+ .start_tx = digicolor_uart_start_tx,
+ .stop_rx = digicolor_uart_stop_rx,
+ .break_ctl = digicolor_uart_break_ctl,
+ .startup = digicolor_uart_startup,
+ .shutdown = digicolor_uart_shutdown,
+ .set_termios = digicolor_uart_set_termios,
+ .type = digicolor_uart_type,
+ .config_port = digicolor_uart_config_port,
+ .release_port = digicolor_uart_release_port,
+ .request_port = digicolor_uart_request_port,
+};
+
+static void digicolor_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (digicolor_uart_tx_full(port))
+ cpu_relax();
+
+ writeb_relaxed(ch, port->membase + UA_EMI_REC);
+}
+
+static void digicolor_uart_console_write(struct console *co, const char *c,
+ unsigned n)
+{
+ struct uart_port *port = digicolor_ports[co->index];
+ u8 status;
+ unsigned long flags;
+ int locked = 1;
+
+ if (oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+
+ uart_console_write(port, c, n, digicolor_uart_console_putchar);
+
+ if (locked)
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Wait for transmitter to become empty */
+ do {
+ status = readb_relaxed(port->membase + UA_STATUS);
+ } while ((status & UA_STATUS_TX_READY) == 0);
+}
+
+static int digicolor_uart_console_setup(struct console *co, char *options)
+{
+ int baud = 115200, bits = 8, parity = 'n', flow = 'n';
+ struct uart_port *port;
+
+ if (co->index < 0 || co->index >= DIGICOLOR_USART_NR)
+ return -EINVAL;
+
+ port = digicolor_ports[co->index];
+ if (!port)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct console digicolor_console = {
+ .name = "ttyS",
+ .device = uart_console_device,
+ .write = digicolor_uart_console_write,
+ .setup = digicolor_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static struct uart_driver digicolor_uart = {
+ .driver_name = "digicolor-usart",
+ .dev_name = "ttyS",
+ .nr = DIGICOLOR_USART_NR,
+};
+
+static int digicolor_uart_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret, index;
+ struct digicolor_port *dp;
+ struct resource *res;
+ struct clk *uart_clk;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing device tree node\n");
+ return -ENXIO;
+ }
+
+ index = of_alias_get_id(np, "serial");
+ if (index < 0 || index >= DIGICOLOR_USART_NR)
+ return -EINVAL;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ uart_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(uart_clk))
+ return PTR_ERR(uart_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dp->port.mapbase = res->start;
+ dp->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp->port.membase))
+ return PTR_ERR(dp->port.membase);
+
+ dp->port.irq = platform_get_irq(pdev, 0);
+ if (IS_ERR_VALUE(dp->port.irq))
+ return dp->port.irq;
+
+ dp->port.iotype = UPIO_MEM;
+ dp->port.uartclk = clk_get_rate(uart_clk);
+ dp->port.fifosize = 16;
+ dp->port.dev = &pdev->dev;
+ dp->port.ops = &digicolor_uart_ops;
+ dp->port.line = index;
+ dp->port.type = PORT_DIGICOLOR;
+ spin_lock_init(&dp->port.lock);
+
+ digicolor_ports[index] = &dp->port;
+ platform_set_drvdata(pdev, &dp->port);
+
+ INIT_DELAYED_WORK(&dp->rx_poll_work, digicolor_rx_poll);
+
+ ret = devm_request_irq(&pdev->dev, dp->port.irq, digicolor_uart_int, 0,
+ dev_name(&pdev->dev), &dp->port);
+ if (ret)
+ return ret;
+
+ return uart_add_one_port(&digicolor_uart, &dp->port);
+}
+
+static int digicolor_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&digicolor_uart, port);
+
+ return 0;
+}
+
+static const struct of_device_id digicolor_uart_dt_ids[] = {
+ { .compatible = "cnxt,cx92755-usart", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, digicolor_uart_dt_ids);
+
+static struct platform_driver digicolor_uart_platform = {
+ .driver = {
+ .name = "digicolor-usart",
+ .of_match_table = of_match_ptr(digicolor_uart_dt_ids),
+ },
+ .probe = digicolor_uart_probe,
+ .remove = digicolor_uart_remove,
+};
+
+static int __init digicolor_uart_init(void)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_SERIAL_CONEXANT_DIGICOLOR_CONSOLE)) {
+ digicolor_uart.cons = &digicolor_console;
+ digicolor_console.data = &digicolor_uart;
+ }
+
+ ret = uart_register_driver(&digicolor_uart);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&digicolor_uart_platform);
+}
+module_init(digicolor_uart_init);
+
+static void __exit digicolor_uart_exit(void)
+{
+ platform_driver_unregister(&digicolor_uart_platform);
+ uart_unregister_driver(&digicolor_uart);
+}
+module_exit(digicolor_uart_exit);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Conexant Digicolor USART serial driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
new file mode 100644
index 000000000000..a57301a6fe42
--- /dev/null
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -0,0 +1,996 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/tty_flip.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <hwregs/ser_defs.h>
+
+#define DRV_NAME "etraxfs-uart"
+#define UART_NR CONFIG_ETRAX_SERIAL_PORTS
+
+#define MODIFY_REG(instance, reg, var) \
+ do { \
+ if (REG_RD_INT(ser, instance, reg) != \
+ REG_TYPE_CONV(int, reg_ser_##reg, var)) \
+ REG_WR(ser, instance, reg, var); \
+ } while (0)
+
+struct uart_cris_port {
+ struct uart_port port;
+
+ int initialized;
+ int irq;
+
+ void __iomem *regi_ser;
+
+ struct gpio_desc *dtr_pin;
+ struct gpio_desc *dsr_pin;
+ struct gpio_desc *ri_pin;
+ struct gpio_desc *cd_pin;
+
+ int write_ongoing;
+};
+
+static struct uart_driver etraxfs_uart_driver;
+static struct uart_port *console_port;
+static int console_baud = 115200;
+static struct uart_cris_port *etraxfs_uart_ports[UART_NR];
+
+static void cris_serial_port_init(struct uart_port *port, int line);
+static void etraxfs_uart_stop_rx(struct uart_port *port);
+static inline void etraxfs_uart_start_tx_bottom(struct uart_port *port);
+
+#ifdef CONFIG_SERIAL_ETRAXFS_CONSOLE
+static void
+cris_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_cris_port *up;
+ int i;
+ reg_ser_r_stat_din stat;
+ reg_ser_rw_tr_dma_en tr_dma_en, old;
+
+ up = etraxfs_uart_ports[co->index];
+
+ if (!up)
+ return;
+
+ /* Switch to manual mode. */
+ tr_dma_en = old = REG_RD(ser, up->regi_ser, rw_tr_dma_en);
+ if (tr_dma_en.en == regk_ser_yes) {
+ tr_dma_en.en = regk_ser_no;
+ REG_WR(ser, up->regi_ser, rw_tr_dma_en, tr_dma_en);
+ }
+
+ /* Send data. */
+ for (i = 0; i < count; i++) {
+ /* LF -> CRLF */
+ if (s[i] == '\n') {
+ do {
+ stat = REG_RD(ser, up->regi_ser, r_stat_din);
+ } while (!stat.tr_rdy);
+ REG_WR_INT(ser, up->regi_ser, rw_dout, '\r');
+ }
+ /* Wait until transmitter is ready and send. */
+ do {
+ stat = REG_RD(ser, up->regi_ser, r_stat_din);
+ } while (!stat.tr_rdy);
+ REG_WR_INT(ser, up->regi_ser, rw_dout, s[i]);
+ }
+
+ /* Restore mode. */
+ if (tr_dma_en.en != old.en)
+ REG_WR(ser, up->regi_ser, rw_tr_dma_en, old);
+}
+
+static int __init
+cris_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= UART_NR)
+ co->index = 0;
+ port = &etraxfs_uart_ports[co->index]->port;
+ console_port = port;
+
+ co->flags |= CON_CONSDEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ console_baud = baud;
+ cris_serial_port_init(port, co->index);
+ uart_set_options(port, co, baud, parity, bits, flow);
+
+ return 0;
+}
+
+static struct tty_driver *cris_console_device(struct console *co, int *index)
+{
+ struct uart_driver *p = co->data;
+ *index = co->index;
+ return p->tty_driver;
+}
+
+static struct console cris_console = {
+ .name = "ttyS",
+ .write = cris_console_write,
+ .device = cris_console_device,
+ .setup = cris_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &etraxfs_uart_driver,
+};
+#endif /* CONFIG_SERIAL_ETRAXFS_CONSOLE */
+
+static struct uart_driver etraxfs_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = UART_NR,
+#ifdef CONFIG_SERIAL_ETRAXFS_CONSOLE
+ .cons = &cris_console,
+#endif /* CONFIG_SERIAL_ETRAXFS_CONSOLE */
+};
+
+static inline int crisv32_serial_get_rts(struct uart_cris_port *up)
+{
+ void __iomem *regi_ser = up->regi_ser;
+ /*
+ * Return what the user has controlled rts to or
+ * what the pin is? (if auto_rts is used it differs during tx)
+ */
+ reg_ser_r_stat_din rstat = REG_RD(ser, regi_ser, r_stat_din);
+
+ return !(rstat.rts_n == regk_ser_active);
+}
+
+/*
+ * A set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
+ * 0=0V , 1=3.3V
+ */
+static inline void crisv32_serial_set_rts(struct uart_cris_port *up,
+ int set, int force)
+{
+ void __iomem *regi_ser = up->regi_ser;
+
+ unsigned long flags;
+ reg_ser_rw_rec_ctrl rec_ctrl;
+
+ local_irq_save(flags);
+ rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
+
+ if (set)
+ rec_ctrl.rts_n = regk_ser_active;
+ else
+ rec_ctrl.rts_n = regk_ser_inactive;
+ REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
+ local_irq_restore(flags);
+}
+
+static inline int crisv32_serial_get_cts(struct uart_cris_port *up)
+{
+ void __iomem *regi_ser = up->regi_ser;
+ reg_ser_r_stat_din rstat = REG_RD(ser, regi_ser, r_stat_din);
+
+ return (rstat.cts_n == regk_ser_active);
+}
+
+/*
+ * Send a single character for XON/XOFF purposes. We do it in this separate
+ * function instead of the alternative support port.x_char, in the ...start_tx
+ * function, so we don't mix up this case with possibly enabling transmission
+ * of queued-up data (in case that's disabled after *receiving* an XOFF or
+ * negative CTS). This function is used for both DMA and non-DMA case; see HW
+ * docs specifically blessing sending characters manually when DMA for
+ * transmission is enabled and running. We may be asked to transmit despite
+ * the transmitter being disabled by a ..._stop_tx call so we need to enable
+ * it temporarily but restore the state afterwards.
+ */
+static void etraxfs_uart_send_xchar(struct uart_port *port, char ch)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ reg_ser_rw_dout dout = { .data = ch };
+ reg_ser_rw_ack_intr ack_intr = { .tr_rdy = regk_ser_yes };
+ reg_ser_r_stat_din rstat;
+ reg_ser_rw_tr_ctrl prev_tr_ctrl, tr_ctrl;
+ void __iomem *regi_ser = up->regi_ser;
+ unsigned long flags;
+
+ /*
+ * Wait for tr_rdy in case a character is already being output. Make
+ * sure we have integrity between the register reads and the writes
+ * below, but don't busy-wait with interrupts off and the port lock
+ * taken.
+ */
+ spin_lock_irqsave(&port->lock, flags);
+ do {
+ spin_unlock_irqrestore(&port->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
+ prev_tr_ctrl = tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
+ rstat = REG_RD(ser, regi_ser, r_stat_din);
+ } while (!rstat.tr_rdy);
+
+ /*
+ * Ack an interrupt if one was just issued for the previous character
+ * that was output. This is required for non-DMA as the interrupt is
+ * used as the only indicator that the transmitter is ready and it
+ * isn't while this x_char is being transmitted.
+ */
+ REG_WR(ser, regi_ser, rw_ack_intr, ack_intr);
+
+ /* Enable the transmitter in case it was disabled. */
+ tr_ctrl.stop = 0;
+ REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
+
+ /*
+ * Finally, send the blessed character; nothing should stop it now,
+ * except for an xoff-detected state, which we'll handle below.
+ */
+ REG_WR(ser, regi_ser, rw_dout, dout);
+ up->port.icount.tx++;
+
+ /* There might be an xoff state to clear. */
+ rstat = REG_RD(ser, up->regi_ser, r_stat_din);
+
+ /*
+ * Clear any xoff state that *may* have been there to
+ * inhibit transmission of the character.
+ */
+ if (rstat.xoff_detect) {
+ reg_ser_rw_xoff_clr xoff_clr = { .clr = 1 };
+ reg_ser_rw_tr_dma_en tr_dma_en;
+
+ REG_WR(ser, regi_ser, rw_xoff_clr, xoff_clr);
+ tr_dma_en = REG_RD(ser, regi_ser, rw_tr_dma_en);
+
+ /*
+ * If we had an xoff state but cleared it, instead sneak in a
+ * disabled state for the transmitter, after the character we
+ * sent. Thus we keep the port disabled, just as if the xoff
+ * state was still in effect (or actually, as if stop_tx had
+ * been called, as we stop DMA too).
+ */
+ prev_tr_ctrl.stop = 1;
+
+ tr_dma_en.en = 0;
+ REG_WR(ser, regi_ser, rw_tr_dma_en, tr_dma_en);
+ }
+
+ /* Restore "previous" enabled/disabled state of the transmitter. */
+ REG_WR(ser, regi_ser, rw_tr_ctrl, prev_tr_ctrl);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ * Do not spin_lock_irqsave or disable interrupts by other means here; it's
+ * already done by the caller.
+ */
+static void etraxfs_uart_start_tx(struct uart_port *port)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+
+ /* we have already done below if a write is ongoing */
+ if (up->write_ongoing)
+ return;
+
+ /* Signal that write is ongoing */
+ up->write_ongoing = 1;
+
+ etraxfs_uart_start_tx_bottom(port);
+}
+
+static inline void etraxfs_uart_start_tx_bottom(struct uart_port *port)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ void __iomem *regi_ser = up->regi_ser;
+ reg_ser_rw_tr_ctrl tr_ctrl;
+ reg_ser_rw_intr_mask intr_mask;
+
+ tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
+ tr_ctrl.stop = regk_ser_no;
+ REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
+ intr_mask = REG_RD(ser, regi_ser, rw_intr_mask);
+ intr_mask.tr_rdy = regk_ser_yes;
+ REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
+}
+
+/*
+ * This function handles both the DMA and non-DMA case by ordering the
+ * transmitter to stop of after the current character. We don't need to wait
+ * for any such character to be completely transmitted; we do that where it
+ * matters, like in etraxfs_uart_set_termios. Don't busy-wait here; see
+ * Documentation/serial/driver: this function is called within
+ * spin_lock_irq{,save} and thus separate ones would be disastrous (when SMP).
+ * There's no documented need to set the txd pin to any particular value;
+ * break setting is controlled solely by etraxfs_uart_break_ctl.
+ */
+static void etraxfs_uart_stop_tx(struct uart_port *port)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ void __iomem *regi_ser = up->regi_ser;
+ reg_ser_rw_tr_ctrl tr_ctrl;
+ reg_ser_rw_intr_mask intr_mask;
+ reg_ser_rw_tr_dma_en tr_dma_en = {0};
+ reg_ser_rw_xoff_clr xoff_clr = {0};
+
+ /*
+ * For the non-DMA case, we'd get a tr_rdy interrupt that we're not
+ * interested in as we're not transmitting any characters. For the
+ * DMA case, that interrupt is already turned off, but no reason to
+ * waste code on conditionals here.
+ */
+ intr_mask = REG_RD(ser, regi_ser, rw_intr_mask);
+ intr_mask.tr_rdy = regk_ser_no;
+ REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
+
+ tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
+ tr_ctrl.stop = 1;
+ REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
+
+ /*
+ * Always clear possible hardware xoff-detected state here, no need to
+ * unnecessary consider mctrl settings and when they change. We clear
+ * it here rather than in start_tx: both functions are called as the
+ * effect of XOFF processing, but start_tx is also called when upper
+ * levels tell the driver that there are more characters to send, so
+ * avoid adding code there.
+ */
+ xoff_clr.clr = 1;
+ REG_WR(ser, regi_ser, rw_xoff_clr, xoff_clr);
+
+ /*
+ * Disable transmitter DMA, so that if we're in XON/XOFF, we can send
+ * those single characters without also giving go-ahead for queued up
+ * DMA data.
+ */
+ tr_dma_en.en = 0;
+ REG_WR(ser, regi_ser, rw_tr_dma_en, tr_dma_en);
+
+ /*
+ * Make sure that write_ongoing is reset when stopping tx.
+ */
+ up->write_ongoing = 0;
+}
+
+static void etraxfs_uart_stop_rx(struct uart_port *port)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ void __iomem *regi_ser = up->regi_ser;
+ reg_ser_rw_rec_ctrl rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
+
+ rec_ctrl.en = regk_ser_no;
+ REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
+}
+
+static void etraxfs_uart_enable_ms(struct uart_port *port)
+{
+}
+
+static void check_modem_status(struct uart_cris_port *up)
+{
+}
+
+static unsigned int etraxfs_uart_tx_empty(struct uart_port *port)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ unsigned long flags;
+ unsigned int ret;
+ reg_ser_r_stat_din rstat = {0};
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ rstat = REG_RD(ser, up->regi_ser, r_stat_din);
+ ret = rstat.tr_empty ? TIOCSER_TEMT : 0;
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return ret;
+}
+static unsigned int etraxfs_uart_get_mctrl(struct uart_port *port)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ unsigned int ret;
+
+ ret = 0;
+ if (crisv32_serial_get_rts(up))
+ ret |= TIOCM_RTS;
+ /* DTR is active low */
+ if (up->dtr_pin && !gpiod_get_raw_value(up->dtr_pin))
+ ret |= TIOCM_DTR;
+ /* CD is active low */
+ if (up->cd_pin && !gpiod_get_raw_value(up->cd_pin))
+ ret |= TIOCM_CD;
+ /* RI is active low */
+ if (up->ri_pin && !gpiod_get_raw_value(up->ri_pin))
+ ret |= TIOCM_RI;
+ /* DSR is active low */
+ if (up->dsr_pin && !gpiod_get_raw_value(up->dsr_pin))
+ ret |= TIOCM_DSR;
+ if (crisv32_serial_get_cts(up))
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void etraxfs_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+
+ crisv32_serial_set_rts(up, mctrl & TIOCM_RTS ? 1 : 0, 0);
+ /* DTR is active low */
+ if (up->dtr_pin)
+ gpiod_set_raw_value(up->dtr_pin, mctrl & TIOCM_DTR ? 0 : 1);
+ /* RI is active low */
+ if (up->ri_pin)
+ gpiod_set_raw_value(up->ri_pin, mctrl & TIOCM_RNG ? 0 : 1);
+ /* CD is active low */
+ if (up->cd_pin)
+ gpiod_set_raw_value(up->cd_pin, mctrl & TIOCM_CD ? 0 : 1);
+}
+
+static void etraxfs_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ unsigned long flags;
+ reg_ser_rw_tr_ctrl tr_ctrl;
+ reg_ser_rw_tr_dma_en tr_dma_en;
+ reg_ser_rw_intr_mask intr_mask;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ tr_ctrl = REG_RD(ser, up->regi_ser, rw_tr_ctrl);
+ tr_dma_en = REG_RD(ser, up->regi_ser, rw_tr_dma_en);
+ intr_mask = REG_RD(ser, up->regi_ser, rw_intr_mask);
+
+ if (break_state != 0) { /* Send break */
+ /*
+ * We need to disable DMA (if used) or tr_rdy interrupts if no
+ * DMA. No need to make this conditional on use of DMA;
+ * disabling will be a no-op for the other mode.
+ */
+ intr_mask.tr_rdy = regk_ser_no;
+ tr_dma_en.en = 0;
+
+ /*
+ * Stop transmission and set the txd pin to 0 after the
+ * current character. The txd setting will take effect after
+ * any current transmission has completed.
+ */
+ tr_ctrl.stop = 1;
+ tr_ctrl.txd = 0;
+ } else {
+ /* Re-enable the serial interrupt. */
+ intr_mask.tr_rdy = regk_ser_yes;
+
+ tr_ctrl.stop = 0;
+ tr_ctrl.txd = 1;
+ }
+ REG_WR(ser, up->regi_ser, rw_tr_ctrl, tr_ctrl);
+ REG_WR(ser, up->regi_ser, rw_tr_dma_en, tr_dma_en);
+ REG_WR(ser, up->regi_ser, rw_intr_mask, intr_mask);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void
+transmit_chars_no_dma(struct uart_cris_port *up)
+{
+ int max_count;
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ void __iomem *regi_ser = up->regi_ser;
+ reg_ser_r_stat_din rstat;
+ reg_ser_rw_ack_intr ack_intr = { .tr_rdy = regk_ser_yes };
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ /* No more to send, so disable the interrupt. */
+ reg_ser_rw_intr_mask intr_mask;
+
+ intr_mask = REG_RD(ser, regi_ser, rw_intr_mask);
+ intr_mask.tr_rdy = 0;
+ intr_mask.tr_empty = 0;
+ REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
+ up->write_ongoing = 0;
+ return;
+ }
+
+ /* If the serport is fast, we send up to max_count bytes before
+ exiting the loop. */
+ max_count = 64;
+ do {
+ reg_ser_rw_dout dout = { .data = xmit->buf[xmit->tail] };
+
+ REG_WR(ser, regi_ser, rw_dout, dout);
+ REG_WR(ser, regi_ser, rw_ack_intr, ack_intr);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
+ up->port.icount.tx++;
+ if (xmit->head == xmit->tail)
+ break;
+ rstat = REG_RD(ser, regi_ser, r_stat_din);
+ } while ((--max_count > 0) && rstat.tr_rdy);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+}
+
+static void receive_chars_no_dma(struct uart_cris_port *up)
+{
+ reg_ser_rs_stat_din stat_din;
+ reg_ser_r_stat_din rstat;
+ struct tty_port *port;
+ struct uart_icount *icount;
+ int max_count = 16;
+ char flag;
+ reg_ser_rw_ack_intr ack_intr = { 0 };
+
+ rstat = REG_RD(ser, up->regi_ser, r_stat_din);
+ icount = &up->port.icount;
+ port = &up->port.state->port;
+
+ do {
+ stat_din = REG_RD(ser, up->regi_ser, rs_stat_din);
+
+ flag = TTY_NORMAL;
+ ack_intr.dav = 1;
+ REG_WR(ser, up->regi_ser, rw_ack_intr, ack_intr);
+ icount->rx++;
+
+ if (stat_din.framing_err | stat_din.par_err | stat_din.orun) {
+ if (stat_din.data == 0x00 &&
+ stat_din.framing_err) {
+ /* Most likely a break. */
+ flag = TTY_BREAK;
+ icount->brk++;
+ } else if (stat_din.par_err) {
+ flag = TTY_PARITY;
+ icount->parity++;
+ } else if (stat_din.orun) {
+ flag = TTY_OVERRUN;
+ icount->overrun++;
+ } else if (stat_din.framing_err) {
+ flag = TTY_FRAME;
+ icount->frame++;
+ }
+ }
+
+ /*
+ * If this becomes important, we probably *could* handle this
+ * gracefully by keeping track of the unhandled character.
+ */
+ if (!tty_insert_flip_char(port, stat_din.data, flag))
+ panic("%s: No tty buffer space", __func__);
+ rstat = REG_RD(ser, up->regi_ser, r_stat_din);
+ } while (rstat.dav && (max_count-- > 0));
+ spin_unlock(&up->port.lock);
+ tty_flip_buffer_push(port);
+ spin_lock(&up->port.lock);
+}
+
+static irqreturn_t
+ser_interrupt(int irq, void *dev_id)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)dev_id;
+ void __iomem *regi_ser;
+ int handled = 0;
+
+ spin_lock(&up->port.lock);
+
+ regi_ser = up->regi_ser;
+
+ if (regi_ser) {
+ reg_ser_r_masked_intr masked_intr;
+
+ masked_intr = REG_RD(ser, regi_ser, r_masked_intr);
+ /*
+ * Check what interrupts are active before taking
+ * actions. If DMA is used the interrupt shouldn't
+ * be enabled.
+ */
+ if (masked_intr.dav) {
+ receive_chars_no_dma(up);
+ handled = 1;
+ }
+ check_modem_status(up);
+
+ if (masked_intr.tr_rdy) {
+ transmit_chars_no_dma(up);
+ handled = 1;
+ }
+ }
+ spin_unlock(&up->port.lock);
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int etraxfs_uart_get_poll_char(struct uart_port *port)
+{
+ reg_ser_rs_stat_din stat;
+ reg_ser_rw_ack_intr ack_intr = { 0 };
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+
+ do {
+ stat = REG_RD(ser, up->regi_ser, rs_stat_din);
+ } while (!stat.dav);
+
+ /* Ack the data_avail interrupt. */
+ ack_intr.dav = 1;
+ REG_WR(ser, up->regi_ser, rw_ack_intr, ack_intr);
+
+ return stat.data;
+}
+
+static void etraxfs_uart_put_poll_char(struct uart_port *port,
+ unsigned char c)
+{
+ reg_ser_r_stat_din stat;
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+
+ do {
+ stat = REG_RD(ser, up->regi_ser, r_stat_din);
+ } while (!stat.tr_rdy);
+ REG_WR_INT(ser, up->regi_ser, rw_dout, c);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
+static int etraxfs_uart_startup(struct uart_port *port)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ unsigned long flags;
+ reg_ser_rw_intr_mask ser_intr_mask = {0};
+
+ ser_intr_mask.dav = regk_ser_yes;
+
+ if (request_irq(etraxfs_uart_ports[port->line]->irq, ser_interrupt,
+ 0, DRV_NAME, etraxfs_uart_ports[port->line]))
+ panic("irq ser%d", port->line);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ REG_WR(ser, up->regi_ser, rw_intr_mask, ser_intr_mask);
+
+ etraxfs_uart_set_mctrl(&up->port, up->port.mctrl);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return 0;
+}
+
+static void etraxfs_uart_shutdown(struct uart_port *port)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ etraxfs_uart_stop_tx(port);
+ etraxfs_uart_stop_rx(port);
+
+ free_irq(etraxfs_uart_ports[port->line]->irq,
+ etraxfs_uart_ports[port->line]);
+
+ etraxfs_uart_set_mctrl(&up->port, up->port.mctrl);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+}
+
+static void
+etraxfs_uart_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+ unsigned long flags;
+ reg_ser_rw_xoff xoff;
+ reg_ser_rw_xoff_clr xoff_clr = {0};
+ reg_ser_rw_tr_ctrl tx_ctrl = {0};
+ reg_ser_rw_tr_dma_en tx_dma_en = {0};
+ reg_ser_rw_rec_ctrl rx_ctrl = {0};
+ reg_ser_rw_tr_baud_div tx_baud_div = {0};
+ reg_ser_rw_rec_baud_div rx_baud_div = {0};
+ int baud;
+
+ if (old &&
+ termios->c_cflag == old->c_cflag &&
+ termios->c_iflag == old->c_iflag)
+ return;
+
+ /* Tx: 8 bit, no/even parity, 1 stop bit, no cts. */
+ tx_ctrl.base_freq = regk_ser_f29_493;
+ tx_ctrl.en = 0;
+ tx_ctrl.stop = 0;
+ tx_ctrl.auto_rts = regk_ser_no;
+ tx_ctrl.txd = 1;
+ tx_ctrl.auto_cts = 0;
+ /* Rx: 8 bit, no/even parity. */
+ rx_ctrl.dma_err = regk_ser_stop;
+ rx_ctrl.sampling = regk_ser_majority;
+ rx_ctrl.timeout = 1;
+
+ rx_ctrl.rts_n = regk_ser_inactive;
+
+ /* Common for tx and rx: 8N1. */
+ tx_ctrl.data_bits = regk_ser_bits8;
+ rx_ctrl.data_bits = regk_ser_bits8;
+ tx_ctrl.par = regk_ser_even;
+ rx_ctrl.par = regk_ser_even;
+ tx_ctrl.par_en = regk_ser_no;
+ rx_ctrl.par_en = regk_ser_no;
+
+ tx_ctrl.stop_bits = regk_ser_bits1;
+
+ /*
+ * Change baud-rate and write it to the hardware.
+ *
+ * baud_clock = base_freq / (divisor*8)
+ * divisor = base_freq / (baud_clock * 8)
+ * base_freq is either:
+ * off, ext, 29.493MHz, 32.000 MHz, 32.768 MHz or 100 MHz
+ * 20.493MHz is used for standard baudrates
+ */
+
+ /*
+ * For the console port we keep the original baudrate here. Not very
+ * beautiful.
+ */
+ if ((port != console_port) || old)
+ baud = uart_get_baud_rate(port, termios, old, 0,
+ port->uartclk / 8);
+ else
+ baud = console_baud;
+
+ tx_baud_div.div = 29493000 / (8 * baud);
+ /* Rx uses same as tx. */
+ rx_baud_div.div = tx_baud_div.div;
+ rx_ctrl.base_freq = tx_ctrl.base_freq;
+
+ if ((termios->c_cflag & CSIZE) == CS7) {
+ /* Set 7 bit mode. */
+ tx_ctrl.data_bits = regk_ser_bits7;
+ rx_ctrl.data_bits = regk_ser_bits7;
+ }
+
+ if (termios->c_cflag & CSTOPB) {
+ /* Set 2 stop bit mode. */
+ tx_ctrl.stop_bits = regk_ser_bits2;
+ }
+
+ if (termios->c_cflag & PARENB) {
+ /* Enable parity. */
+ tx_ctrl.par_en = regk_ser_yes;
+ rx_ctrl.par_en = regk_ser_yes;
+ }
+
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD) {
+ /* Set mark parity if PARODD and CMSPAR. */
+ tx_ctrl.par = regk_ser_mark;
+ rx_ctrl.par = regk_ser_mark;
+ } else {
+ tx_ctrl.par = regk_ser_space;
+ rx_ctrl.par = regk_ser_space;
+ }
+ } else {
+ if (termios->c_cflag & PARODD) {
+ /* Set odd parity. */
+ tx_ctrl.par = regk_ser_odd;
+ rx_ctrl.par = regk_ser_odd;
+ }
+ }
+
+ if (termios->c_cflag & CRTSCTS) {
+ /* Enable automatic CTS handling. */
+ tx_ctrl.auto_cts = regk_ser_yes;
+ }
+
+ /* Make sure the tx and rx are enabled. */
+ tx_ctrl.en = regk_ser_yes;
+ rx_ctrl.en = regk_ser_yes;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ tx_dma_en.en = 0;
+ REG_WR(ser, up->regi_ser, rw_tr_dma_en, tx_dma_en);
+
+ /* Actually write the control regs (if modified) to the hardware. */
+ uart_update_timeout(port, termios->c_cflag, port->uartclk/8);
+ MODIFY_REG(up->regi_ser, rw_rec_baud_div, rx_baud_div);
+ MODIFY_REG(up->regi_ser, rw_rec_ctrl, rx_ctrl);
+
+ MODIFY_REG(up->regi_ser, rw_tr_baud_div, tx_baud_div);
+ MODIFY_REG(up->regi_ser, rw_tr_ctrl, tx_ctrl);
+
+ tx_dma_en.en = 0;
+ REG_WR(ser, up->regi_ser, rw_tr_dma_en, tx_dma_en);
+
+ xoff = REG_RD(ser, up->regi_ser, rw_xoff);
+
+ if (up->port.state && up->port.state->port.tty &&
+ (up->port.state->port.tty->termios.c_iflag & IXON)) {
+ xoff.chr = STOP_CHAR(up->port.state->port.tty);
+ xoff.automatic = regk_ser_yes;
+ } else
+ xoff.automatic = regk_ser_no;
+
+ MODIFY_REG(up->regi_ser, rw_xoff, xoff);
+
+ /*
+ * Make sure we don't start in an automatically shut-off state due to
+ * a previous early exit.
+ */
+ xoff_clr.clr = 1;
+ REG_WR(ser, up->regi_ser, rw_xoff_clr, xoff_clr);
+
+ etraxfs_uart_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static const char *
+etraxfs_uart_type(struct uart_port *port)
+{
+ return "CRISv32";
+}
+
+static void etraxfs_uart_release_port(struct uart_port *port)
+{
+}
+
+static int etraxfs_uart_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void etraxfs_uart_config_port(struct uart_port *port, int flags)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+
+ up->port.type = PORT_CRIS;
+}
+
+static const struct uart_ops etraxfs_uart_pops = {
+ .tx_empty = etraxfs_uart_tx_empty,
+ .set_mctrl = etraxfs_uart_set_mctrl,
+ .get_mctrl = etraxfs_uart_get_mctrl,
+ .stop_tx = etraxfs_uart_stop_tx,
+ .start_tx = etraxfs_uart_start_tx,
+ .send_xchar = etraxfs_uart_send_xchar,
+ .stop_rx = etraxfs_uart_stop_rx,
+ .enable_ms = etraxfs_uart_enable_ms,
+ .break_ctl = etraxfs_uart_break_ctl,
+ .startup = etraxfs_uart_startup,
+ .shutdown = etraxfs_uart_shutdown,
+ .set_termios = etraxfs_uart_set_termios,
+ .type = etraxfs_uart_type,
+ .release_port = etraxfs_uart_release_port,
+ .request_port = etraxfs_uart_request_port,
+ .config_port = etraxfs_uart_config_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = etraxfs_uart_get_poll_char,
+ .poll_put_char = etraxfs_uart_put_poll_char,
+#endif
+};
+
+static void cris_serial_port_init(struct uart_port *port, int line)
+{
+ struct uart_cris_port *up = (struct uart_cris_port *)port;
+
+ if (up->initialized)
+ return;
+ up->initialized = 1;
+ port->line = line;
+ spin_lock_init(&port->lock);
+ port->ops = &etraxfs_uart_pops;
+ port->irq = up->irq;
+ port->iobase = (unsigned long) up->regi_ser;
+ port->uartclk = 29493000;
+
+ /*
+ * We can't fit any more than 255 here (unsigned char), though
+ * actually UART_XMIT_SIZE characters could be pending output.
+ * At time of this writing, the definition of "fifosize" is here the
+ * amount of characters that can be pending output after a start_tx call
+ * until tx_empty returns 1: see serial_core.c:uart_wait_until_sent.
+ * This matters for timeout calculations unfortunately, but keeping
+ * larger amounts at the DMA wouldn't win much so let's just play nice.
+ */
+ port->fifosize = 255;
+ port->flags = UPF_BOOT_AUTOCONF;
+}
+
+static int etraxfs_uart_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct uart_cris_port *up;
+ int dev_id;
+
+ if (!np)
+ return -ENODEV;
+
+ dev_id = of_alias_get_id(np, "serial");
+ if (dev_id < 0)
+ dev_id = 0;
+
+ if (dev_id >= UART_NR)
+ return -EINVAL;
+
+ if (etraxfs_uart_ports[dev_id])
+ return -EBUSY;
+
+ up = devm_kzalloc(&pdev->dev, sizeof(struct uart_cris_port),
+ GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
+
+ up->irq = irq_of_parse_and_map(np, 0);
+ up->regi_ser = of_iomap(np, 0);
+ up->dtr_pin = devm_gpiod_get_optional(&pdev->dev, "dtr");
+ up->dsr_pin = devm_gpiod_get_optional(&pdev->dev, "dsr");
+ up->ri_pin = devm_gpiod_get_optional(&pdev->dev, "ri");
+ up->cd_pin = devm_gpiod_get_optional(&pdev->dev, "cd");
+ up->port.dev = &pdev->dev;
+ cris_serial_port_init(&up->port, dev_id);
+
+ etraxfs_uart_ports[dev_id] = up;
+ platform_set_drvdata(pdev, &up->port);
+ uart_add_one_port(&etraxfs_uart_driver, &up->port);
+
+ return 0;
+}
+
+static int etraxfs_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port;
+
+ port = platform_get_drvdata(pdev);
+ uart_remove_one_port(&etraxfs_uart_driver, port);
+ etraxfs_uart_ports[pdev->id] = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id etraxfs_uart_dt_ids[] = {
+ { .compatible = "axis,etraxfs-uart" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, etraxfs_uart_dt_ids);
+
+static struct platform_driver etraxfs_uart_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(etraxfs_uart_dt_ids),
+ },
+ .probe = etraxfs_uart_probe,
+ .remove = etraxfs_uart_remove,
+};
+
+static int __init etraxfs_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&etraxfs_uart_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&etraxfs_uart_platform_driver);
+ if (ret)
+ uart_unregister_driver(&etraxfs_uart_driver);
+
+ return ret;
+}
+
+static void __exit etraxfs_uart_exit(void)
+{
+ platform_driver_unregister(&etraxfs_uart_platform_driver);
+ uart_unregister_driver(&etraxfs_uart_driver);
+}
+
+module_init(etraxfs_uart_init);
+module_exit(etraxfs_uart_exit);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index e7cde3a9566d..b1893f3f88f1 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -237,7 +237,8 @@ struct lpuart_port {
unsigned int rxfifo_size;
bool lpuart32;
- bool lpuart_dma_use;
+ bool lpuart_dma_tx_use;
+ bool lpuart_dma_rx_use;
struct dma_chan *dma_tx_chan;
struct dma_chan *dma_rx_chan;
struct dma_async_tx_descriptor *dma_tx_desc;
@@ -454,6 +455,15 @@ static int lpuart_dma_rx(struct lpuart_port *sport)
return 0;
}
+static void lpuart_flush_buffer(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ if (sport->lpuart_dma_tx_use) {
+ dmaengine_terminate_all(sport->dma_tx_chan);
+ sport->dma_tx_in_progress = 0;
+ }
+}
+
static void lpuart_dma_rx_complete(void *arg)
{
struct lpuart_port *sport = arg;
@@ -461,6 +471,7 @@ static void lpuart_dma_rx_complete(void *arg)
unsigned long flags;
async_tx_ack(sport->dma_rx_desc);
+ mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
spin_lock_irqsave(&sport->port.lock, flags);
@@ -506,9 +517,6 @@ static inline void lpuart_prepare_rx(struct lpuart_port *sport)
spin_lock_irqsave(&sport->port.lock, flags);
- init_timer(&sport->lpuart_timer);
- sport->lpuart_timer.function = lpuart_timer_func;
- sport->lpuart_timer.data = (unsigned long)sport;
sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
add_timer(&sport->lpuart_timer);
@@ -571,7 +579,7 @@ static void lpuart_start_tx(struct uart_port *port)
temp = readb(port->membase + UARTCR2);
writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
- if (sport->lpuart_dma_use) {
+ if (sport->lpuart_dma_tx_use) {
if (!uart_circ_empty(xmit) && !sport->dma_tx_in_progress)
lpuart_prepare_tx(sport);
} else {
@@ -758,19 +766,19 @@ out:
static irqreturn_t lpuart_int(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
- unsigned char sts;
+ unsigned char sts, crdma;
sts = readb(sport->port.membase + UARTSR1);
+ crdma = readb(sport->port.membase + UARTCR5);
- if (sts & UARTSR1_RDRF) {
- if (sport->lpuart_dma_use)
+ if (sts & UARTSR1_RDRF && !(crdma & UARTCR5_RDMAS)) {
+ if (sport->lpuart_dma_rx_use)
lpuart_prepare_rx(sport);
else
lpuart_rxint(irq, dev_id);
}
- if (sts & UARTSR1_TDRE &&
- !(readb(sport->port.membase + UARTCR5) & UARTCR5_TDMAS)) {
- if (sport->lpuart_dma_use)
+ if (sts & UARTSR1_TDRE && !(crdma & UARTCR5_TDMAS)) {
+ if (sport->lpuart_dma_tx_use)
lpuart_pio_tx(sport);
else
lpuart_txint(irq, dev_id);
@@ -953,26 +961,17 @@ static int lpuart_dma_tx_request(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- struct dma_chan *tx_chan;
struct dma_slave_config dma_tx_sconfig;
dma_addr_t dma_bus;
unsigned char *dma_buf;
int ret;
- tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
-
- if (!tx_chan) {
- dev_err(sport->port.dev, "Dma tx channel request failed!\n");
- return -ENODEV;
- }
-
- dma_bus = dma_map_single(tx_chan->device->dev,
+ dma_bus = dma_map_single(sport->dma_tx_chan->device->dev,
sport->port.state->xmit.buf,
UART_XMIT_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_chan->device->dev, dma_bus)) {
+ if (dma_mapping_error(sport->dma_tx_chan->device->dev, dma_bus)) {
dev_err(sport->port.dev, "dma_map_single tx failed\n");
- dma_release_channel(tx_chan);
return -ENOMEM;
}
@@ -981,16 +980,14 @@ static int lpuart_dma_tx_request(struct uart_port *port)
dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_tx_sconfig.dst_maxburst = sport->txfifo_size;
dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
- ret = dmaengine_slave_config(tx_chan, &dma_tx_sconfig);
+ ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
if (ret < 0) {
dev_err(sport->port.dev,
"Dma slave config failed, err = %d\n", ret);
- dma_release_channel(tx_chan);
return ret;
}
- sport->dma_tx_chan = tx_chan;
sport->dma_tx_buf_virt = dma_buf;
sport->dma_tx_buf_bus = dma_bus;
sport->dma_tx_in_progress = 0;
@@ -1002,34 +999,24 @@ static int lpuart_dma_rx_request(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- struct dma_chan *rx_chan;
struct dma_slave_config dma_rx_sconfig;
dma_addr_t dma_bus;
unsigned char *dma_buf;
int ret;
- rx_chan = dma_request_slave_channel(sport->port.dev, "rx");
-
- if (!rx_chan) {
- dev_err(sport->port.dev, "Dma rx channel request failed!\n");
- return -ENODEV;
- }
-
dma_buf = devm_kzalloc(sport->port.dev,
FSL_UART_RX_DMA_BUFFER_SIZE, GFP_KERNEL);
if (!dma_buf) {
dev_err(sport->port.dev, "Dma rx alloc failed\n");
- dma_release_channel(rx_chan);
return -ENOMEM;
}
- dma_bus = dma_map_single(rx_chan->device->dev, dma_buf,
+ dma_bus = dma_map_single(sport->dma_rx_chan->device->dev, dma_buf,
FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_chan->device->dev, dma_bus)) {
+ if (dma_mapping_error(sport->dma_rx_chan->device->dev, dma_bus)) {
dev_err(sport->port.dev, "dma_map_single rx failed\n");
- dma_release_channel(rx_chan);
return -ENOMEM;
}
@@ -1037,16 +1024,14 @@ static int lpuart_dma_rx_request(struct uart_port *port)
dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_rx_sconfig.src_maxburst = 1;
dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
- ret = dmaengine_slave_config(rx_chan, &dma_rx_sconfig);
+ ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig);
if (ret < 0) {
dev_err(sport->port.dev,
"Dma slave config failed, err = %d\n", ret);
- dma_release_channel(rx_chan);
return ret;
}
- sport->dma_rx_chan = rx_chan;
sport->dma_rx_buf_virt = dma_buf;
sport->dma_rx_buf_bus = dma_bus;
sport->dma_rx_in_progress = 0;
@@ -1058,31 +1043,24 @@ static void lpuart_dma_tx_free(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- struct dma_chan *dma_chan;
dma_unmap_single(sport->port.dev, sport->dma_tx_buf_bus,
UART_XMIT_SIZE, DMA_TO_DEVICE);
- dma_chan = sport->dma_tx_chan;
- sport->dma_tx_chan = NULL;
+
sport->dma_tx_buf_bus = 0;
sport->dma_tx_buf_virt = NULL;
- dma_release_channel(dma_chan);
}
static void lpuart_dma_rx_free(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- struct dma_chan *dma_chan;
dma_unmap_single(sport->port.dev, sport->dma_rx_buf_bus,
FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
- dma_chan = sport->dma_rx_chan;
- sport->dma_rx_chan = NULL;
sport->dma_rx_buf_bus = 0;
sport->dma_rx_buf_virt = NULL;
- dma_release_channel(dma_chan);
}
static int lpuart_startup(struct uart_port *port)
@@ -1101,14 +1079,21 @@ static int lpuart_startup(struct uart_port *port)
sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK) + 1);
- /* Whether use dma support by dma request results */
- if (lpuart_dma_tx_request(port) || lpuart_dma_rx_request(port)) {
- sport->lpuart_dma_use = false;
- } else {
- sport->lpuart_dma_use = true;
+ if (sport->dma_rx_chan && !lpuart_dma_rx_request(port)) {
+ sport->lpuart_dma_rx_use = true;
+ setup_timer(&sport->lpuart_timer, lpuart_timer_func,
+ (unsigned long)sport);
+ } else
+ sport->lpuart_dma_rx_use = false;
+
+
+ if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) {
+ sport->lpuart_dma_tx_use = true;
temp = readb(port->membase + UARTCR5);
+ temp &= ~UARTCR5_RDMAS;
writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
- }
+ } else
+ sport->lpuart_dma_tx_use = false;
ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
DRIVER_NAME, sport);
@@ -1179,10 +1164,13 @@ static void lpuart_shutdown(struct uart_port *port)
devm_free_irq(port->dev, port->irq, sport);
- if (sport->lpuart_dma_use) {
- lpuart_dma_tx_free(port);
- lpuart_dma_rx_free(port);
+ if (sport->lpuart_dma_rx_use) {
+ lpuart_dma_rx_free(&sport->port);
+ del_timer_sync(&sport->lpuart_timer);
}
+
+ if (sport->lpuart_dma_tx_use)
+ lpuart_dma_tx_free(&sport->port);
}
static void lpuart32_shutdown(struct uart_port *port)
@@ -1304,7 +1292,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- if (sport->lpuart_dma_use) {
+ if (sport->lpuart_dma_rx_use) {
/* Calculate delay for 1.5 DMA buffers */
sport->dma_rx_timeout = (sport->port.timeout - HZ / 50) *
FSL_UART_RX_DMA_BUFFER_SIZE * 3 /
@@ -1517,6 +1505,7 @@ static struct uart_ops lpuart_pops = {
.release_port = lpuart_release_port,
.config_port = lpuart_config_port,
.verify_port = lpuart_verify_port,
+ .flush_buffer = lpuart_flush_buffer,
};
static struct uart_ops lpuart32_pops = {
@@ -1535,6 +1524,7 @@ static struct uart_ops lpuart32_pops = {
.release_port = lpuart_release_port,
.config_port = lpuart_config_port,
.verify_port = lpuart_verify_port,
+ .flush_buffer = lpuart_flush_buffer,
};
static struct lpuart_port *lpuart_ports[UART_NR];
@@ -1833,6 +1823,16 @@ static int lpuart_probe(struct platform_device *pdev)
return ret;
}
+ sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
+ if (!sport->dma_tx_chan)
+ dev_info(sport->port.dev, "DMA tx channel request failed, "
+ "operating without tx DMA\n");
+
+ sport->dma_rx_chan = dma_request_slave_channel(sport->port.dev, "rx");
+ if (!sport->dma_rx_chan)
+ dev_info(sport->port.dev, "DMA rx channel request failed, "
+ "operating without rx DMA\n");
+
return 0;
}
@@ -1844,6 +1844,12 @@ static int lpuart_remove(struct platform_device *pdev)
clk_disable_unprepare(sport->clk);
+ if (sport->dma_tx_chan)
+ dma_release_channel(sport->dma_tx_chan);
+
+ if (sport->dma_rx_chan)
+ dma_release_channel(sport->dma_rx_chan);
+
return 0;
}
@@ -1851,6 +1857,19 @@ static int lpuart_remove(struct platform_device *pdev)
static int lpuart_suspend(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
+ unsigned long temp;
+
+ if (sport->lpuart32) {
+ /* disable Rx/Tx and interrupts */
+ temp = lpuart32_read(sport->port.membase + UARTCTRL);
+ temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
+ lpuart32_write(temp, sport->port.membase + UARTCTRL);
+ } else {
+ /* disable Rx/Tx and interrupts */
+ temp = readb(sport->port.membase + UARTCR2);
+ temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
+ writeb(temp, sport->port.membase + UARTCR2);
+ }
uart_suspend_port(&lpuart_reg, &sport->port);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 4c5e9092e2d7..0eb29b1c47ac 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -74,6 +74,7 @@
#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
/* UART Control Register Bit Fields.*/
+#define URXD_DUMMY_READ (1<<16)
#define URXD_CHARRDY (1<<15)
#define URXD_ERR (1<<14)
#define URXD_OVRRUN (1<<13)
@@ -463,13 +464,17 @@ static void imx_enable_ms(struct uart_port *port)
mod_timer(&sport->timer, jiffies);
}
+static void imx_dma_tx(struct imx_port *sport);
static inline void imx_transmit_buffer(struct imx_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long temp;
if (sport->port.x_char) {
/* Send next char */
writel(sport->port.x_char, sport->port.membase + URTX0);
+ sport->port.icount.tx++;
+ sport->port.x_char = 0;
return;
}
@@ -478,6 +483,22 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
return;
}
+ if (sport->dma_is_enabled) {
+ /*
+ * We've just sent a X-char Ensure the TX DMA is enabled
+ * and the TX IRQ is disabled.
+ **/
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~UCR1_TXMPTYEN;
+ if (sport->dma_is_txing) {
+ temp |= UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
+ } else {
+ writel(temp, sport->port.membase + UCR1);
+ imx_dma_tx(sport);
+ }
+ }
+
while (!uart_circ_empty(xmit) &&
!(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
/* send xmit->buf[xmit->tail]
@@ -500,26 +521,39 @@ static void dma_tx_callback(void *data)
struct scatterlist *sgl = &sport->tx_sgl[0];
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
+ unsigned long temp;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
- sport->dma_is_txing = 0;
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
/* update the stat */
- spin_lock_irqsave(&sport->port.lock, flags);
xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx += sport->tx_bytes;
- spin_unlock_irqrestore(&sport->port.lock, flags);
dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
- uart_write_wakeup(&sport->port);
+ sport->dma_is_txing = 0;
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
if (waitqueue_active(&sport->dma_wait)) {
wake_up(&sport->dma_wait);
dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
return;
}
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
+ imx_dma_tx(sport);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
static void imx_dma_tx(struct imx_port *sport)
@@ -529,24 +563,23 @@ static void imx_dma_tx(struct imx_port *sport)
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
- enum dma_status status;
+ unsigned long temp;
int ret;
- status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL);
- if (DMA_IN_PROGRESS == status)
+ if (sport->dma_is_txing)
return;
sport->tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail > xmit->head && xmit->head > 0) {
+ if (xmit->tail < xmit->head) {
+ sport->dma_tx_nents = 1;
+ sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
+ } else {
sport->dma_tx_nents = 2;
sg_init_table(sgl, 2);
sg_set_buf(sgl, xmit->buf + xmit->tail,
UART_XMIT_SIZE - xmit->tail);
sg_set_buf(sgl + 1, xmit->buf, xmit->head);
- } else {
- sport->dma_tx_nents = 1;
- sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
}
ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
@@ -557,6 +590,8 @@ static void imx_dma_tx(struct imx_port *sport)
desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!desc) {
+ dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
+ DMA_TO_DEVICE);
dev_err(dev, "We cannot prepare for the TX slave dma!\n");
return;
}
@@ -565,6 +600,11 @@ static void imx_dma_tx(struct imx_port *sport)
dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
uart_circ_chars_pending(xmit));
+
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
+
/* fire it */
sport->dma_is_txing = 1;
dmaengine_submit(desc);
@@ -590,13 +630,6 @@ static void imx_start_tx(struct uart_port *port)
temp &= ~(UCR1_RRDYEN);
writel(temp, sport->port.membase + UCR1);
}
- /* Clear any pending ORE flag before enabling interrupt */
- temp = readl(sport->port.membase + USR2);
- writel(temp | USR2_ORE, sport->port.membase + USR2);
-
- temp = readl(sport->port.membase + UCR4);
- temp |= UCR4_OREN;
- writel(temp, sport->port.membase + UCR4);
if (!sport->dma_is_enabled) {
temp = readl(sport->port.membase + UCR1);
@@ -614,15 +647,21 @@ static void imx_start_tx(struct uart_port *port)
}
if (sport->dma_is_enabled) {
- /* FIXME: port->x_char must be transmitted if != 0 */
+ if (sport->port.x_char) {
+ /* We have X-char to send, so enable TX IRQ and
+ * disable TX DMA to let TX interrupt to send X-char */
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~UCR1_TDMAEN;
+ temp |= UCR1_TXMPTYEN;
+ writel(temp, sport->port.membase + UCR1);
+ return;
+ }
+
if (!uart_circ_empty(&port->state->xmit) &&
!uart_tx_stopped(port))
imx_dma_tx(sport);
return;
}
-
- if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
- imx_transmit_buffer(sport);
}
static irqreturn_t imx_rtsint(int irq, void *dev_id)
@@ -694,7 +733,7 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
continue;
}
- rx &= sport->port.read_status_mask;
+ rx &= (sport->port.read_status_mask | 0xFF);
if (rx & URXD_BRK)
flg = TTY_BREAK;
@@ -710,6 +749,9 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
#endif
}
+ if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
+ goto out;
+
tty_insert_flip_char(port, rx, flg);
}
@@ -727,6 +769,9 @@ static int start_rx_dma(struct imx_port *sport);
static void imx_dma_rxint(struct imx_port *sport)
{
unsigned long temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
temp = readl(sport->port.membase + USR2);
if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
@@ -740,6 +785,8 @@ static void imx_dma_rxint(struct imx_port *sport)
/* tell the DMA to receive the data. */
start_rx_dma(sport);
}
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
static irqreturn_t imx_int(int irq, void *dev_id)
@@ -869,6 +916,9 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
static void imx_rx_dma_done(struct imx_port *sport)
{
unsigned long temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
/* Enable this interrupt when the RXFIFO is empty. */
temp = readl(sport->port.membase + UCR1);
@@ -880,6 +930,8 @@ static void imx_rx_dma_done(struct imx_port *sport)
/* Is the shutdown waiting for us? */
if (waitqueue_active(&sport->dma_wait))
wake_up(&sport->dma_wait);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
/*
@@ -910,12 +962,26 @@ static void dma_rx_callback(void *data)
dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
if (count) {
- tty_insert_flip_string(port, sport->rx_buf, count);
+ if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ))
+ tty_insert_flip_string(port, sport->rx_buf, count);
tty_flip_buffer_push(port);
start_rx_dma(sport);
- } else
+ } else if (readl(sport->port.membase + USR2) & USR2_RDR) {
+ /*
+ * start rx_dma directly once data in RXFIFO, more efficient
+ * than before:
+ * 1. call imx_rx_dma_done to stop dma if no data received
+ * 2. wait next RDR interrupt to start dma transfer.
+ */
+ start_rx_dma(sport);
+ } else {
+ /*
+ * stop dma to prevent too many IDLE event trigged if no data
+ * in RXFIFO
+ */
imx_rx_dma_done(sport);
+ }
}
static int start_rx_dma(struct imx_port *sport)
@@ -935,6 +1001,7 @@ static int start_rx_dma(struct imx_port *sport)
desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!desc) {
+ dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE);
dev_err(dev, "We cannot prepare for the RX slave dma!\n");
return -EINVAL;
}
@@ -1108,12 +1175,20 @@ static int imx_startup(struct uart_port *port)
while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
udelay(1);
+ /* Can we enable the DMA support? */
+ if (is_imx6q_uart(sport) && !uart_console(port) &&
+ !sport->dma_is_inited)
+ imx_uart_dma_init(sport);
+
spin_lock_irqsave(&sport->port.lock, flags);
/*
* Finally, clear and enable interrupts
*/
writel(USR1_RTSD, sport->port.membase + USR1);
+ if (sport->dma_is_inited && !sport->dma_is_enabled)
+ imx_enable_dma(sport);
+
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
@@ -1124,6 +1199,14 @@ static int imx_startup(struct uart_port *port)
writel(temp, sport->port.membase + UCR1);
+ /* Clear any pending ORE flag before enabling interrupt */
+ temp = readl(sport->port.membase + USR2);
+ writel(temp | USR2_ORE, sport->port.membase + USR2);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_OREN;
+ writel(temp, sport->port.membase + UCR4);
+
temp = readl(sport->port.membase + UCR2);
temp |= (UCR2_RXEN | UCR2_TXEN);
if (!sport->have_rtscts)
@@ -1189,9 +1272,11 @@ static void imx_shutdown(struct uart_port *port)
dmaengine_terminate_all(sport->dma_chan_tx);
dmaengine_terminate_all(sport->dma_chan_rx);
}
+ spin_lock_irqsave(&sport->port.lock, flags);
imx_stop_tx(port);
imx_stop_rx(port);
imx_disable_dma(sport);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
imx_uart_dma_exit(sport);
}
@@ -1233,11 +1318,48 @@ static void imx_shutdown(struct uart_port *port)
static void imx_flush_buffer(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
+ struct scatterlist *sgl = &sport->tx_sgl[0];
+ unsigned long temp;
+ int i = 100, ubir, ubmr, ubrc, uts;
- if (sport->dma_is_enabled) {
- sport->tx_bytes = 0;
- dmaengine_terminate_all(sport->dma_chan_tx);
+ if (!sport->dma_chan_tx)
+ return;
+
+ sport->tx_bytes = 0;
+ dmaengine_terminate_all(sport->dma_chan_tx);
+ if (sport->dma_is_txing) {
+ dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents,
+ DMA_TO_DEVICE);
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
+ sport->dma_is_txing = false;
}
+
+ /*
+ * According to the Reference Manual description of the UART SRST bit:
+ * "Reset the transmit and receive state machines,
+ * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
+ * and UTS[6-3]". As we don't need to restore the old values from
+ * USR1, USR2, URXD, UTXD, only save/restore the other four registers
+ */
+ ubir = readl(sport->port.membase + UBIR);
+ ubmr = readl(sport->port.membase + UBMR);
+ ubrc = readl(sport->port.membase + UBRC);
+ uts = readl(sport->port.membase + IMX21_UTS);
+
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~UCR2_SRST;
+ writel(temp, sport->port.membase + UCR2);
+
+ while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
+ udelay(1);
+
+ /* Restore the registers */
+ writel(ubir, sport->port.membase + UBIR);
+ writel(ubmr, sport->port.membase + UBMR);
+ writel(ubrc, sport->port.membase + UBRC);
+ writel(uts, sport->port.membase + IMX21_UTS);
}
static void
@@ -1280,11 +1402,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
if (sport->have_rtscts) {
ucr2 &= ~UCR2_IRTS;
ucr2 |= UCR2_CTSC;
-
- /* Can we enable the DMA support? */
- if (is_imx6q_uart(sport) && !uart_console(port)
- && !sport->dma_is_inited)
- imx_uart_dma_init(sport);
} else {
termios->c_cflag &= ~CRTSCTS;
}
@@ -1319,7 +1436,7 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
*/
sport->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= URXD_PRERR;
+ sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR;
if (termios->c_iflag & IGNBRK) {
sport->port.ignore_status_mask |= URXD_BRK;
/*
@@ -1330,6 +1447,9 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.ignore_status_mask |= URXD_OVRRUN;
}
+ if ((termios->c_cflag & CREAD) == 0)
+ sport->port.ignore_status_mask |= URXD_DUMMY_READ;
+
/*
* Update the per-port timeout.
*/
@@ -1403,8 +1523,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_enable_ms(&sport->port);
- if (sport->dma_is_inited && !sport->dma_is_enabled)
- imx_enable_dma(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 10496672dfdb..a9b0ab38a68c 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -3,7 +3,7 @@
/*
* mcf.c -- Freescale ColdFire UART driver
*
- * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@uclinux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -198,7 +198,6 @@ static void mcf_shutdown(struct uart_port *port)
static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
unsigned int baud, baudclk;
#if defined(CONFIG_M5272)
@@ -441,7 +440,6 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/* Enable or disable the RS485 support */
static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
{
- struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned char mr1, mr2;
/* Get mode registers */
@@ -631,6 +629,7 @@ static int mcf_probe(struct platform_device *pdev)
port->mapbase = platp[i].mapbase;
port->membase = (platp[i].membase) ? platp[i].membase :
(unsigned char __iomem *) platp[i].mapbase;
+ port->dev = &pdev->dev;
port->iotype = SERIAL_IO_MEM;
port->irq = platp[i].irq;
port->uartclk = MCF_BUSCLK;
@@ -702,7 +701,7 @@ static void __exit mcf_exit(void)
module_init(mcf_init);
module_exit(mcf_exit);
-MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
+MODULE_AUTHOR("Greg Ungerer <gerg@uclinux.org>");
MODULE_DESCRIPTION("Freescale ColdFire UART driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mcfuart");
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 517cd073dc08..35c55505b3eb 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -23,7 +23,6 @@
#define MEN_Z135_MAX_PORTS 12
#define MEN_Z135_BASECLK 29491200
#define MEN_Z135_FIFO_SIZE 1024
-#define MEN_Z135_NUM_MSI_VECTORS 2
#define MEN_Z135_FIFO_WATERMARK 1020
#define MEN_Z135_STAT_REG 0x0
@@ -34,12 +33,11 @@
#define MEN_Z135_CONF_REG 0x808
#define MEN_Z135_UART_FREQ 0x80c
#define MEN_Z135_BAUD_REG 0x810
-#define MENZ135_TIMEOUT 0x814
+#define MEN_Z135_TIMEOUT 0x814
#define MEN_Z135_MEM_SIZE 0x818
-#define IS_IRQ(x) ((x) & 1)
-#define IRQ_ID(x) (((x) >> 1) & 7)
+#define IRQ_ID(x) ((x) & 0x1f)
#define MEN_Z135_IER_RXCIEN BIT(0) /* RX Space IRQ */
#define MEN_Z135_IER_TXCIEN BIT(1) /* TX Space IRQ */
@@ -94,11 +92,11 @@
#define MEN_Z135_LSR_TEXP BIT(6)
#define MEN_Z135_LSR_RXFIFOERR BIT(7)
-#define MEN_Z135_IRQ_ID_MST 0
-#define MEN_Z135_IRQ_ID_TSA 1
-#define MEN_Z135_IRQ_ID_RDA 2
-#define MEN_Z135_IRQ_ID_RLS 3
-#define MEN_Z135_IRQ_ID_CTI 6
+#define MEN_Z135_IRQ_ID_RLS BIT(0)
+#define MEN_Z135_IRQ_ID_RDA BIT(1)
+#define MEN_Z135_IRQ_ID_CTI BIT(2)
+#define MEN_Z135_IRQ_ID_TSA BIT(3)
+#define MEN_Z135_IRQ_ID_MST BIT(4)
#define LCR(x) (((x) >> MEN_Z135_LCR_SHIFT) & 0xff)
@@ -118,12 +116,18 @@ static int align;
module_param(align, int, S_IRUGO);
MODULE_PARM_DESC(align, "Keep hardware FIFO write pointer aligned, default 0");
+static uint rx_timeout;
+module_param(rx_timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(rx_timeout, "RX timeout. "
+ "Timeout in seconds = (timeout_reg * baud_reg * 4) / freq_reg");
+
struct men_z135_port {
struct uart_port port;
struct mcb_device *mdev;
unsigned char *rxbuf;
u32 stat_reg;
spinlock_t lock;
+ bool automode;
};
#define to_men_z135(port) container_of((port), struct men_z135_port, port)
@@ -180,12 +184,16 @@ static inline void men_z135_reg_clr(struct men_z135_port *uart,
*/
static void men_z135_handle_modem_status(struct men_z135_port *uart)
{
- if (uart->stat_reg & MEN_Z135_MSR_DDCD)
+ u8 msr;
+
+ msr = (uart->stat_reg >> 8) & 0xff;
+
+ if (msr & MEN_Z135_MSR_DDCD)
uart_handle_dcd_change(&uart->port,
- uart->stat_reg & ~MEN_Z135_MSR_DCD);
- if (uart->stat_reg & MEN_Z135_MSR_DCTS)
+ msr & MEN_Z135_MSR_DCD);
+ if (msr & MEN_Z135_MSR_DCTS)
uart_handle_cts_change(&uart->port,
- uart->stat_reg & ~MEN_Z135_MSR_CTS);
+ msr & MEN_Z135_MSR_CTS);
}
static void men_z135_handle_lsr(struct men_z135_port *uart)
@@ -322,7 +330,8 @@ static void men_z135_handle_tx(struct men_z135_port *uart)
txfree = MEN_Z135_FIFO_WATERMARK - txc;
if (txfree <= 0) {
- pr_err("Not enough room in TX FIFO have %d, need %d\n",
+ dev_err(&uart->mdev->dev,
+ "Not enough room in TX FIFO have %d, need %d\n",
txfree, qlen);
goto irq_en;
}
@@ -373,43 +382,54 @@ out:
* @irq: The IRQ number
* @data: Pointer to UART port
*
- * Check IIR register to see which tasklet to start.
+ * Check IIR register to find the cause of the interrupt and handle it.
+ * It is possible that multiple interrupts reason bits are set and reading
+ * the IIR is a destructive read, so we always need to check for all possible
+ * interrupts and handle them.
*/
static irqreturn_t men_z135_intr(int irq, void *data)
{
struct men_z135_port *uart = (struct men_z135_port *)data;
struct uart_port *port = &uart->port;
+ bool handled = false;
+ unsigned long flags;
int irq_id;
uart->stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG);
- /* IRQ pending is low active */
- if (IS_IRQ(uart->stat_reg))
- return IRQ_NONE;
-
irq_id = IRQ_ID(uart->stat_reg);
- switch (irq_id) {
- case MEN_Z135_IRQ_ID_MST:
- men_z135_handle_modem_status(uart);
- break;
- case MEN_Z135_IRQ_ID_TSA:
- men_z135_handle_tx(uart);
- break;
- case MEN_Z135_IRQ_ID_CTI:
- dev_dbg(&uart->mdev->dev, "Character Timeout Indication\n");
- /* Fallthrough */
- case MEN_Z135_IRQ_ID_RDA:
- /* Reading data clears RX IRQ */
- men_z135_handle_rx(uart);
- break;
- case MEN_Z135_IRQ_ID_RLS:
+
+ if (!irq_id)
+ goto out;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* It's save to write to IIR[7:6] RXC[9:8] */
+ iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
+
+ if (irq_id & MEN_Z135_IRQ_ID_RLS) {
men_z135_handle_lsr(uart);
- break;
- default:
- dev_warn(&uart->mdev->dev, "Unknown IRQ id %d\n", irq_id);
- return IRQ_NONE;
+ handled = true;
+ }
+
+ if (irq_id & (MEN_Z135_IRQ_ID_RDA | MEN_Z135_IRQ_ID_CTI)) {
+ if (irq_id & MEN_Z135_IRQ_ID_CTI)
+ dev_dbg(&uart->mdev->dev, "Character Timeout Indication\n");
+ men_z135_handle_rx(uart);
+ handled = true;
+ }
+
+ if (irq_id & MEN_Z135_IRQ_ID_TSA) {
+ men_z135_handle_tx(uart);
+ handled = true;
}
- return IRQ_HANDLED;
+ if (irq_id & MEN_Z135_IRQ_ID_MST) {
+ men_z135_handle_modem_status(uart);
+ handled = true;
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+out:
+ return IRQ_RETVAL(handled);
}
/**
@@ -464,21 +484,37 @@ static unsigned int men_z135_tx_empty(struct uart_port *port)
*/
static void men_z135_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct men_z135_port *uart = to_men_z135(port);
- u32 conf_reg = 0;
+ u32 old;
+ u32 conf_reg;
+ conf_reg = old = ioread32(port->membase + MEN_Z135_CONF_REG);
if (mctrl & TIOCM_RTS)
conf_reg |= MEN_Z135_MCR_RTS;
+ else
+ conf_reg &= ~MEN_Z135_MCR_RTS;
+
if (mctrl & TIOCM_DTR)
conf_reg |= MEN_Z135_MCR_DTR;
+ else
+ conf_reg &= ~MEN_Z135_MCR_DTR;
+
if (mctrl & TIOCM_OUT1)
conf_reg |= MEN_Z135_MCR_OUT1;
+ else
+ conf_reg &= ~MEN_Z135_MCR_OUT1;
+
if (mctrl & TIOCM_OUT2)
conf_reg |= MEN_Z135_MCR_OUT2;
+ else
+ conf_reg &= ~MEN_Z135_MCR_OUT2;
+
if (mctrl & TIOCM_LOOP)
conf_reg |= MEN_Z135_MCR_LOOP;
+ else
+ conf_reg &= ~MEN_Z135_MCR_LOOP;
- men_z135_reg_set(uart, MEN_Z135_CONF_REG, conf_reg);
+ if (conf_reg != old)
+ iowrite32(conf_reg, port->membase + MEN_Z135_CONF_REG);
}
/**
@@ -490,12 +526,9 @@ static void men_z135_set_mctrl(struct uart_port *port, unsigned int mctrl)
static unsigned int men_z135_get_mctrl(struct uart_port *port)
{
unsigned int mctrl = 0;
- u32 stat_reg;
u8 msr;
- stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG);
-
- msr = ~((stat_reg >> 8) & 0xff);
+ msr = ioread8(port->membase + MEN_Z135_STAT_REG + 1);
if (msr & MEN_Z135_MSR_CTS)
mctrl |= TIOCM_CTS;
@@ -524,6 +557,19 @@ static void men_z135_stop_tx(struct uart_port *port)
men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN);
}
+/*
+ * men_z135_disable_ms() - Disable Modem Status
+ * port: The UART port
+ *
+ * Enable Modem Status IRQ.
+ */
+static void men_z135_disable_ms(struct uart_port *port)
+{
+ struct men_z135_port *uart = to_men_z135(port);
+
+ men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_MSIEN);
+}
+
/**
* men_z135_start_tx() - Start transmitting characters
* @port: The UART port
@@ -535,6 +581,9 @@ static void men_z135_start_tx(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
+ if (uart->automode)
+ men_z135_disable_ms(port);
+
men_z135_handle_tx(uart);
}
@@ -584,6 +633,9 @@ static int men_z135_startup(struct uart_port *port)
iowrite32(conf_reg, port->membase + MEN_Z135_CONF_REG);
+ if (rx_timeout)
+ iowrite32(rx_timeout, port->membase + MEN_Z135_TIMEOUT);
+
return 0;
}
@@ -603,6 +655,7 @@ static void men_z135_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
+ struct men_z135_port *uart = to_men_z135(port);
unsigned int baud;
u32 conf_reg;
u32 bd_reg;
@@ -643,6 +696,16 @@ static void men_z135_set_termios(struct uart_port *port,
} else
lcr |= MEN_Z135_PAR_DIS << MEN_Z135_PEN_SHIFT;
+ conf_reg |= MEN_Z135_IER_MSIEN;
+ if (termios->c_cflag & CRTSCTS) {
+ conf_reg |= MEN_Z135_MCR_RCFC;
+ uart->automode = true;
+ termios->c_cflag &= ~CLOCAL;
+ } else {
+ conf_reg &= ~MEN_Z135_MCR_RCFC;
+ uart->automode = false;
+ }
+
termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
conf_reg |= lcr << MEN_Z135_LCR_SHIFT;
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
deleted file mode 100644
index 77239d5e620d..000000000000
--- a/drivers/tty/serial/mrst_max3110.c
+++ /dev/null
@@ -1,909 +0,0 @@
-/*
- * mrst_max3110.c - spi uart protocol driver for Maxim 3110
- *
- * Copyright (c) 2008-2010, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-/*
- * Note:
- * 1. From Max3110 spec, the Rx FIFO has 8 words, while the Tx FIFO only has
- * 1 word. If SPI master controller doesn't support sclk frequency change,
- * then the char need be sent out one by one with some delay
- *
- * 2. Currently only RX available interrupt is used, no need for waiting TXE
- * interrupt for a low speed UART device
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#ifdef CONFIG_MAGIC_SYSRQ
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial_reg.h>
-
-#include <linux/kthread.h>
-#include <linux/spi/spi.h>
-#include <linux/pm.h>
-
-#include "mrst_max3110.h"
-
-#define UART_TX_NEEDED 1
-#define CON_TX_NEEDED 2
-#define BIT_IRQ_PENDING 3
-
-struct uart_max3110 {
- struct uart_port port;
- struct spi_device *spi;
- char name[SPI_NAME_SIZE];
-
- wait_queue_head_t wq;
- struct task_struct *main_thread;
- struct task_struct *read_thread;
- struct mutex thread_mutex;
- struct mutex io_mutex;
-
- u32 baud;
- u16 cur_conf;
- u8 clock;
- u8 parity, word_7bits;
- u16 irq;
-
- unsigned long uart_flags;
-
- /* console related */
- struct circ_buf con_xmit;
-};
-
-/* global data structure, may need be removed */
-static struct uart_max3110 *pmax;
-
-static int receive_chars(struct uart_max3110 *max,
- unsigned short *str, int len);
-static int max3110_read_multi(struct uart_max3110 *max);
-static void max3110_con_receive(struct uart_max3110 *max);
-
-static int max3110_write_then_read(struct uart_max3110 *max,
- const void *txbuf, void *rxbuf, unsigned len, int always_fast)
-{
- struct spi_device *spi = max->spi;
- struct spi_message message;
- struct spi_transfer x;
- int ret;
-
- mutex_lock(&max->io_mutex);
- spi_message_init(&message);
- memset(&x, 0, sizeof x);
- x.len = len;
- x.tx_buf = txbuf;
- x.rx_buf = rxbuf;
- spi_message_add_tail(&x, &message);
-
- if (always_fast)
- x.speed_hz = spi->max_speed_hz;
- else if (max->baud)
- x.speed_hz = max->baud;
-
- /* Do the i/o */
- ret = spi_sync(spi, &message);
- mutex_unlock(&max->io_mutex);
- return ret;
-}
-
-/* Write a 16b word to the device */
-static int max3110_out(struct uart_max3110 *max, const u16 out)
-{
- void *buf;
- u16 *obuf, *ibuf;
- int ret;
-
- buf = kzalloc(8, GFP_KERNEL | GFP_DMA);
- if (!buf)
- return -ENOMEM;
-
- obuf = buf;
- ibuf = buf + 4;
- *obuf = out;
- ret = max3110_write_then_read(max, obuf, ibuf, 2, 1);
- if (ret) {
- pr_warn("%s: get err msg %d when sending 0x%x\n",
- __func__, ret, out);
- goto exit;
- }
-
- receive_chars(max, ibuf, 1);
-
-exit:
- kfree(buf);
- return ret;
-}
-
-/*
- * This is usually used to read data from SPIC RX FIFO, which doesn't
- * need any delay like flushing character out.
- *
- * Return how many valide bytes are read back
- */
-static int max3110_read_multi(struct uart_max3110 *max)
-{
- void *buf;
- u16 *obuf, *ibuf;
- int ret, blen;
-
- blen = M3110_RX_FIFO_DEPTH * sizeof(u16);
- buf = kzalloc(blen * 2, GFP_KERNEL | GFP_DMA);
- if (!buf)
- return 0;
-
- /* tx/rx always have the same length */
- obuf = buf;
- ibuf = buf + blen;
-
- if (max3110_write_then_read(max, obuf, ibuf, blen, 1)) {
- kfree(buf);
- return 0;
- }
-
- ret = receive_chars(max, ibuf, M3110_RX_FIFO_DEPTH);
-
- kfree(buf);
- return ret;
-}
-
-static void serial_m3110_con_putchar(struct uart_port *port, int ch)
-{
- struct uart_max3110 *max =
- container_of(port, struct uart_max3110, port);
- struct circ_buf *xmit = &max->con_xmit;
-
- if (uart_circ_chars_free(xmit)) {
- xmit->buf[xmit->head] = (char)ch;
- xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1);
- }
-}
-
-/*
- * Print a string to the serial port trying not to disturb
- * any possible real use of the port...
- *
- * The console_lock must be held when we get here.
- */
-static void serial_m3110_con_write(struct console *co,
- const char *s, unsigned int count)
-{
- if (!pmax)
- return;
-
- uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
-
- if (!test_and_set_bit(CON_TX_NEEDED, &pmax->uart_flags))
- wake_up(&pmax->wq);
-}
-
-static int __init
-serial_m3110_con_setup(struct console *co, char *options)
-{
- struct uart_max3110 *max = pmax;
- int baud = 115200;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- pr_info("setting up console\n");
-
- if (co->index == -1)
- co->index = 0;
-
- if (!max) {
- pr_err("pmax is NULL, return\n");
- return -ENODEV;
- }
-
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(&max->port, co, baud, parity, bits, flow);
-}
-
-static struct tty_driver *serial_m3110_con_device(struct console *co,
- int *index)
-{
- struct uart_driver *p = co->data;
- *index = co->index;
- return p->tty_driver;
-}
-
-static struct uart_driver serial_m3110_reg;
-static struct console serial_m3110_console = {
- .name = "ttyS",
- .write = serial_m3110_con_write,
- .device = serial_m3110_con_device,
- .setup = serial_m3110_con_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &serial_m3110_reg,
-};
-
-static unsigned int serial_m3110_tx_empty(struct uart_port *port)
-{
- return 1;
-}
-
-static void serial_m3110_stop_tx(struct uart_port *port)
-{
- return;
-}
-
-/* stop_rx will be called in spin_lock env */
-static void serial_m3110_stop_rx(struct uart_port *port)
-{
- return;
-}
-
-#define WORDS_PER_XFER 128
-static void send_circ_buf(struct uart_max3110 *max,
- struct circ_buf *xmit)
-{
- void *buf;
- u16 *obuf, *ibuf;
- int i, len, blen, dma_size, left, ret = 0;
-
-
- dma_size = WORDS_PER_XFER * sizeof(u16) * 2;
- buf = kzalloc(dma_size, GFP_KERNEL | GFP_DMA);
- if (!buf)
- return;
- obuf = buf;
- ibuf = buf + dma_size/2;
-
- while (!uart_circ_empty(xmit)) {
- left = uart_circ_chars_pending(xmit);
- while (left) {
- len = min(left, WORDS_PER_XFER);
- blen = len * sizeof(u16);
- memset(ibuf, 0, blen);
-
- for (i = 0; i < len; i++) {
- obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG;
- xmit->tail = (xmit->tail + 1) &
- (UART_XMIT_SIZE - 1);
- }
-
- /* Fail to send msg to console is not very critical */
-
- ret = max3110_write_then_read(max, obuf, ibuf, blen, 0);
- if (ret)
- pr_warn("%s: get err msg %d\n", __func__, ret);
-
- receive_chars(max, ibuf, len);
-
- max->port.icount.tx += len;
- left -= len;
- }
- }
-
- kfree(buf);
-}
-
-static void transmit_char(struct uart_max3110 *max)
-{
- struct uart_port *port = &max->port;
- struct circ_buf *xmit = &port->state->xmit;
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(port))
- return;
-
- send_circ_buf(max, xmit);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- if (uart_circ_empty(xmit))
- serial_m3110_stop_tx(port);
-}
-
-/*
- * This will be called by uart_write() and tty_write, can't
- * go to sleep
- */
-static void serial_m3110_start_tx(struct uart_port *port)
-{
- struct uart_max3110 *max =
- container_of(port, struct uart_max3110, port);
-
- if (!test_and_set_bit(UART_TX_NEEDED, &max->uart_flags))
- wake_up(&max->wq);
-}
-
-static int
-receive_chars(struct uart_max3110 *max, unsigned short *str, int len)
-{
- struct uart_port *port = &max->port;
- struct tty_port *tport;
- char buf[M3110_RX_FIFO_DEPTH];
- int r, w, usable;
-
- /* If uart is not opened, just return */
- if (!port->state)
- return 0;
-
- tport = &port->state->port;
-
- for (r = 0, w = 0; r < len; r++) {
- if (str[r] & MAX3110_BREAK &&
- uart_handle_break(port))
- continue;
-
- if (str[r] & MAX3110_READ_DATA_AVAILABLE) {
- if (uart_handle_sysrq_char(port, str[r] & 0xff))
- continue;
-
- buf[w++] = str[r] & 0xff;
- }
- }
-
- if (!w)
- return 0;
-
- for (r = 0; w; r += usable, w -= usable) {
- usable = tty_buffer_request_room(tport, w);
- if (usable) {
- tty_insert_flip_string(tport, buf + r, usable);
- port->icount.rx += usable;
- }
- }
- tty_flip_buffer_push(tport);
-
- return r;
-}
-
-/*
- * This routine will be used in read_thread or RX IRQ handling,
- * it will first do one round buffer read(8 words), if there is some
- * valid RX data, will try to read 5 more rounds till all data
- * is read out.
- *
- * Use stack space as data buffer to save some system load, and chose
- * 504 Btyes as a threadhold to do a bulk push to upper tty layer when
- * receiving bulk data, a much bigger buffer may cause stack overflow
- */
-static void max3110_con_receive(struct uart_max3110 *max)
-{
- int loop = 1, num;
-
- do {
- num = max3110_read_multi(max);
-
- if (num) {
- loop = 5;
- }
- } while (--loop);
-}
-
-static int max3110_main_thread(void *_max)
-{
- struct uart_max3110 *max = _max;
- wait_queue_head_t *wq = &max->wq;
- int ret = 0;
- struct circ_buf *xmit = &max->con_xmit;
-
- pr_info("start main thread\n");
-
- do {
- wait_event_interruptible(*wq,
- max->uart_flags || kthread_should_stop());
-
- mutex_lock(&max->thread_mutex);
-
- if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
- max3110_con_receive(max);
-
- /* first handle console output */
- if (test_and_clear_bit(CON_TX_NEEDED, &max->uart_flags))
- send_circ_buf(max, xmit);
-
- /* handle uart output */
- if (test_and_clear_bit(UART_TX_NEEDED, &max->uart_flags))
- transmit_char(max);
-
- mutex_unlock(&max->thread_mutex);
-
- } while (!kthread_should_stop());
-
- return ret;
-}
-
-static irqreturn_t serial_m3110_irq(int irq, void *dev_id)
-{
- struct uart_max3110 *max = dev_id;
-
- /* max3110's irq is a falling edge, not level triggered,
- * so no need to disable the irq */
-
- if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
- wake_up(&max->wq);
-
- return IRQ_HANDLED;
-}
-
-/* if don't use RX IRQ, then need a thread to polling read */
-static int max3110_read_thread(void *_max)
-{
- struct uart_max3110 *max = _max;
-
- pr_info("start read thread\n");
- do {
- /*
- * If can't acquire the mutex, it means the main thread
- * is running which will also perform the rx job
- */
- if (mutex_trylock(&max->thread_mutex)) {
- max3110_con_receive(max);
- mutex_unlock(&max->thread_mutex);
- }
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 20);
- } while (!kthread_should_stop());
-
- return 0;
-}
-
-static int serial_m3110_startup(struct uart_port *port)
-{
- struct uart_max3110 *max =
- container_of(port, struct uart_max3110, port);
- u16 config = 0;
- int ret = 0;
-
- if (port->line != 0) {
- pr_err("uart port startup failed\n");
- return -1;
- }
-
- /* Disable all IRQ and config it to 115200, 8n1 */
- config = WC_TAG | WC_FIFO_ENABLE
- | WC_1_STOPBITS
- | WC_8BIT_WORD
- | WC_BAUD_DR2;
-
- /* as we use thread to handle tx/rx, need set low latency */
- port->state->port.low_latency = 1;
-
- if (max->irq) {
- /* Enable RX IRQ only */
- config |= WC_RXA_IRQ_ENABLE;
- } else {
- /* If IRQ is disabled, start a read thread for input data */
- max->read_thread =
- kthread_run(max3110_read_thread, max, "max3110_read");
- if (IS_ERR(max->read_thread)) {
- ret = PTR_ERR(max->read_thread);
- max->read_thread = NULL;
- pr_err("Can't create read thread!\n");
- return ret;
- }
- }
-
- ret = max3110_out(max, config);
- if (ret) {
- if (max->read_thread)
- kthread_stop(max->read_thread);
- max->read_thread = NULL;
- return ret;
- }
-
- max->cur_conf = config;
- return 0;
-}
-
-static void serial_m3110_shutdown(struct uart_port *port)
-{
- struct uart_max3110 *max =
- container_of(port, struct uart_max3110, port);
- u16 config;
-
- if (max->read_thread) {
- kthread_stop(max->read_thread);
- max->read_thread = NULL;
- }
-
- /* Disable interrupts from this port */
- config = WC_TAG | WC_SW_SHDI;
- max3110_out(max, config);
-}
-
-static void serial_m3110_release_port(struct uart_port *port)
-{
-}
-
-static int serial_m3110_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-static void serial_m3110_config_port(struct uart_port *port, int flags)
-{
- port->type = PORT_MAX3100;
-}
-
-static int
-serial_m3110_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- /* we don't want the core code to modify any port params */
- return -EINVAL;
-}
-
-
-static const char *serial_m3110_type(struct uart_port *port)
-{
- struct uart_max3110 *max =
- container_of(port, struct uart_max3110, port);
- return max->name;
-}
-
-static void
-serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- struct uart_max3110 *max =
- container_of(port, struct uart_max3110, port);
- unsigned char cval;
- unsigned int baud, parity = 0;
- int clk_div = -1;
- u16 new_conf = max->cur_conf;
-
- switch (termios->c_cflag & CSIZE) {
- case CS7:
- cval = UART_LCR_WLEN7;
- new_conf |= WC_7BIT_WORD;
- break;
- default:
- /* We only support CS7 & CS8 */
- termios->c_cflag &= ~CSIZE;
- termios->c_cflag |= CS8;
- case CS8:
- cval = UART_LCR_WLEN8;
- new_conf |= WC_8BIT_WORD;
- break;
- }
-
- baud = uart_get_baud_rate(port, termios, old, 0, 230400);
-
- /* First calc the div for 1.8MHZ clock case */
- switch (baud) {
- case 300:
- clk_div = WC_BAUD_DR384;
- break;
- case 600:
- clk_div = WC_BAUD_DR192;
- break;
- case 1200:
- clk_div = WC_BAUD_DR96;
- break;
- case 2400:
- clk_div = WC_BAUD_DR48;
- break;
- case 4800:
- clk_div = WC_BAUD_DR24;
- break;
- case 9600:
- clk_div = WC_BAUD_DR12;
- break;
- case 19200:
- clk_div = WC_BAUD_DR6;
- break;
- case 38400:
- clk_div = WC_BAUD_DR3;
- break;
- case 57600:
- clk_div = WC_BAUD_DR2;
- break;
- case 115200:
- clk_div = WC_BAUD_DR1;
- break;
- case 230400:
- if (max->clock & MAX3110_HIGH_CLK)
- break;
- default:
- /* Pick the previous baud rate */
- baud = max->baud;
- clk_div = max->cur_conf & WC_BAUD_DIV_MASK;
- tty_termios_encode_baud_rate(termios, baud, baud);
- }
-
- if (max->clock & MAX3110_HIGH_CLK) {
- clk_div += 1;
- /* High clk version max3110 doesn't support B300 */
- if (baud == 300) {
- baud = 600;
- clk_div = WC_BAUD_DR384;
- }
- if (baud == 230400)
- clk_div = WC_BAUD_DR1;
- tty_termios_encode_baud_rate(termios, baud, baud);
- }
-
- new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div;
-
- if (unlikely(termios->c_cflag & CMSPAR))
- termios->c_cflag &= ~CMSPAR;
-
- if (termios->c_cflag & CSTOPB)
- new_conf |= WC_2_STOPBITS;
- else
- new_conf &= ~WC_2_STOPBITS;
-
- if (termios->c_cflag & PARENB) {
- new_conf |= WC_PARITY_ENABLE;
- parity |= UART_LCR_PARITY;
- } else
- new_conf &= ~WC_PARITY_ENABLE;
-
- if (!(termios->c_cflag & PARODD))
- parity |= UART_LCR_EPAR;
- max->parity = parity;
-
- uart_update_timeout(port, termios->c_cflag, baud);
-
- new_conf |= WC_TAG;
- if (new_conf != max->cur_conf) {
- if (!max3110_out(max, new_conf)) {
- max->cur_conf = new_conf;
- max->baud = baud;
- }
- }
-}
-
-/* Don't handle hw handshaking */
-static unsigned int serial_m3110_get_mctrl(struct uart_port *port)
-{
- return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR;
-}
-
-static void serial_m3110_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-}
-
-static void serial_m3110_break_ctl(struct uart_port *port, int break_state)
-{
-}
-
-static void serial_m3110_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate)
-{
-}
-
-static struct uart_ops serial_m3110_ops = {
- .tx_empty = serial_m3110_tx_empty,
- .set_mctrl = serial_m3110_set_mctrl,
- .get_mctrl = serial_m3110_get_mctrl,
- .stop_tx = serial_m3110_stop_tx,
- .start_tx = serial_m3110_start_tx,
- .stop_rx = serial_m3110_stop_rx,
- .break_ctl = serial_m3110_break_ctl,
- .startup = serial_m3110_startup,
- .shutdown = serial_m3110_shutdown,
- .set_termios = serial_m3110_set_termios,
- .pm = serial_m3110_pm,
- .type = serial_m3110_type,
- .release_port = serial_m3110_release_port,
- .request_port = serial_m3110_request_port,
- .config_port = serial_m3110_config_port,
- .verify_port = serial_m3110_verify_port,
-};
-
-static struct uart_driver serial_m3110_reg = {
- .owner = THIS_MODULE,
- .driver_name = "MRST serial",
- .dev_name = "ttyS",
- .major = TTY_MAJOR,
- .minor = 64,
- .nr = 1,
- .cons = &serial_m3110_console,
-};
-
-#ifdef CONFIG_PM_SLEEP
-static int serial_m3110_suspend(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct uart_max3110 *max = spi_get_drvdata(spi);
-
- if (max->irq > 0)
- disable_irq(max->irq);
- uart_suspend_port(&serial_m3110_reg, &max->port);
- max3110_out(max, max->cur_conf | WC_SW_SHDI);
- return 0;
-}
-
-static int serial_m3110_resume(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct uart_max3110 *max = spi_get_drvdata(spi);
-
- max3110_out(max, max->cur_conf);
- uart_resume_port(&serial_m3110_reg, &max->port);
- if (max->irq > 0)
- enable_irq(max->irq);
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(serial_m3110_pm_ops, serial_m3110_suspend,
- serial_m3110_resume);
-#define SERIAL_M3110_PM_OPS (&serial_m3110_pm_ops)
-
-#else
-#define SERIAL_M3110_PM_OPS NULL
-#endif
-
-static int serial_m3110_probe(struct spi_device *spi)
-{
- struct uart_max3110 *max;
- void *buffer;
- u16 res;
- int ret = 0;
-
- max = kzalloc(sizeof(*max), GFP_KERNEL);
- if (!max)
- return -ENOMEM;
-
- /* Set spi info */
- spi->bits_per_word = 16;
- max->clock = MAX3110_HIGH_CLK;
-
- spi_setup(spi);
-
- max->port.type = PORT_MAX3100;
- max->port.fifosize = 2; /* Only have 16b buffer */
- max->port.ops = &serial_m3110_ops;
- max->port.line = 0;
- max->port.dev = &spi->dev;
- max->port.uartclk = 115200;
-
- max->spi = spi;
- strcpy(max->name, spi->modalias);
- max->irq = (u16)spi->irq;
-
- mutex_init(&max->thread_mutex);
- mutex_init(&max->io_mutex);
-
- max->word_7bits = 0;
- max->parity = 0;
- max->baud = 0;
-
- max->cur_conf = 0;
- max->uart_flags = 0;
-
- /* Check if reading configuration register returns something sane */
-
- res = RC_TAG;
- ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
- if (ret < 0 || res == 0 || res == 0xffff) {
- dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
- res);
- ret = -ENODEV;
- goto err_get_page;
- }
-
- buffer = (void *)__get_free_page(GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto err_get_page;
- }
- max->con_xmit.buf = buffer;
- max->con_xmit.head = 0;
- max->con_xmit.tail = 0;
-
- init_waitqueue_head(&max->wq);
-
- max->main_thread = kthread_run(max3110_main_thread,
- max, "max3110_main");
- if (IS_ERR(max->main_thread)) {
- ret = PTR_ERR(max->main_thread);
- goto err_kthread;
- }
-
- if (max->irq) {
- ret = request_irq(max->irq, serial_m3110_irq,
- IRQ_TYPE_EDGE_FALLING, "max3110", max);
- if (ret) {
- max->irq = 0;
- dev_warn(&spi->dev,
- "unable to allocate IRQ, will use polling method\n");
- }
- }
-
- spi_set_drvdata(spi, max);
- pmax = max;
-
- /* Give membase a psudo value to pass serial_core's check */
- max->port.membase = (unsigned char __iomem *)0xff110000;
- uart_add_one_port(&serial_m3110_reg, &max->port);
-
- return 0;
-
-err_kthread:
- free_page((unsigned long)buffer);
-err_get_page:
- kfree(max);
- return ret;
-}
-
-static int serial_m3110_remove(struct spi_device *dev)
-{
- struct uart_max3110 *max = spi_get_drvdata(dev);
-
- if (!max)
- return 0;
-
- uart_remove_one_port(&serial_m3110_reg, &max->port);
-
- free_page((unsigned long)max->con_xmit.buf);
-
- if (max->irq)
- free_irq(max->irq, max);
-
- if (max->main_thread)
- kthread_stop(max->main_thread);
-
- kfree(max);
- return 0;
-}
-
-static struct spi_driver uart_max3110_driver = {
- .driver = {
- .name = "spi_max3111",
- .owner = THIS_MODULE,
- .pm = SERIAL_M3110_PM_OPS,
- },
- .probe = serial_m3110_probe,
- .remove = serial_m3110_remove,
-};
-
-static int __init serial_m3110_init(void)
-{
- int ret = 0;
-
- ret = uart_register_driver(&serial_m3110_reg);
- if (ret)
- return ret;
-
- ret = spi_register_driver(&uart_max3110_driver);
- if (ret)
- uart_unregister_driver(&serial_m3110_reg);
-
- return ret;
-}
-
-static void __exit serial_m3110_exit(void)
-{
- spi_unregister_driver(&uart_max3110_driver);
- uart_unregister_driver(&serial_m3110_reg);
-}
-
-module_init(serial_m3110_init);
-module_exit(serial_m3110_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:max3110-uart");
diff --git a/drivers/tty/serial/mrst_max3110.h b/drivers/tty/serial/mrst_max3110.h
deleted file mode 100644
index 35af0739513b..000000000000
--- a/drivers/tty/serial/mrst_max3110.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef _MRST_MAX3110_H
-#define _MRST_MAX3110_H
-
-#define MAX3110_HIGH_CLK 0x1 /* 3.6864 MHZ */
-#define MAX3110_LOW_CLK 0x0 /* 1.8432 MHZ */
-
-/* status bits for all 4 MAX3110 operate modes */
-#define MAX3110_READ_DATA_AVAILABLE (1 << 15)
-#define MAX3110_WRITE_BUF_EMPTY (1 << 14)
-#define MAX3110_BREAK (1 << 10)
-
-#define WC_TAG (3 << 14)
-#define RC_TAG (1 << 14)
-#define WD_TAG (2 << 14)
-#define RD_TAG (0 << 14)
-
-/* bits def for write configuration */
-#define WC_FIFO_ENABLE_MASK (1 << 13)
-#define WC_FIFO_ENABLE (0 << 13)
-
-#define WC_SW_SHDI (1 << 12)
-
-#define WC_IRQ_MASK (0xF << 8)
-#define WC_TXE_IRQ_ENABLE (1 << 11) /* TX empty irq */
-#define WC_RXA_IRQ_ENABLE (1 << 10) /* RX available irq */
-#define WC_PAR_HIGH_IRQ_ENABLE (1 << 9)
-#define WC_REC_ACT_IRQ_ENABLE (1 << 8)
-
-#define WC_IRDA_ENABLE (1 << 7)
-
-#define WC_STOPBITS_MASK (1 << 6)
-#define WC_2_STOPBITS (1 << 6)
-#define WC_1_STOPBITS (0 << 6)
-
-#define WC_PARITY_ENABLE_MASK (1 << 5)
-#define WC_PARITY_ENABLE (1 << 5)
-
-#define WC_WORDLEN_MASK (1 << 4)
-#define WC_7BIT_WORD (1 << 4)
-#define WC_8BIT_WORD (0 << 4)
-
-#define WC_BAUD_DIV_MASK (0xF)
-#define WC_BAUD_DR1 (0x0)
-#define WC_BAUD_DR2 (0x1)
-#define WC_BAUD_DR4 (0x2)
-#define WC_BAUD_DR8 (0x3)
-#define WC_BAUD_DR16 (0x4)
-#define WC_BAUD_DR32 (0x5)
-#define WC_BAUD_DR64 (0x6)
-#define WC_BAUD_DR128 (0x7)
-#define WC_BAUD_DR3 (0x8)
-#define WC_BAUD_DR6 (0x9)
-#define WC_BAUD_DR12 (0xA)
-#define WC_BAUD_DR24 (0xB)
-#define WC_BAUD_DR48 (0xC)
-#define WC_BAUD_DR96 (0xD)
-#define WC_BAUD_DR192 (0xE)
-#define WC_BAUD_DR384 (0xF)
-
-#define M3110_RX_FIFO_DEPTH 8
-#endif
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index c88b522ccd73..b73889c8ed4b 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -920,14 +920,15 @@ static void msm_console_write(struct console *co, const char *s,
static int __init msm_console_setup(struct console *co, char *options)
{
struct uart_port *port;
- struct msm_port *msm_port;
- int baud = 0, flow, bits, parity;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
if (unlikely(co->index >= UART_NR || co->index < 0))
return -ENXIO;
port = get_port_from_line(co->index);
- msm_port = UART_TO_MSM(port);
if (unlikely(!port->membase))
return -ENXIO;
@@ -937,23 +938,6 @@ static int __init msm_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- bits = 8;
- parity = 'n';
- flow = 'n';
- msm_write(port, UART_MR2_BITS_PER_CHAR_8 | UART_MR2_STOP_BIT_LEN_ONE,
- UART_MR2); /* 8N1 */
-
- if (baud < 300 || baud > 115200)
- baud = 115200;
- msm_set_baud_rate(port, baud);
-
- msm_reset(port);
-
- if (msm_port->is_uartdm) {
- msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
- msm_write(port, UART_CR_TX_ENABLE, UART_CR);
- }
-
pr_info("msm_serial: console setup on port #%d\n", port->line);
return uart_set_options(port, co, baud, parity, bits, flow);
@@ -1142,9 +1126,6 @@ static int __init msm_serial_init(void)
static void __exit msm_serial_exit(void)
{
-#ifdef CONFIG_SERIAL_MSM_CONSOLE
- unregister_console(&msm_console);
-#endif
platform_driver_unregister(&msm_platform_driver);
uart_unregister_driver(&msm_uart_driver);
}
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index ec553f8eb218..d1298b6cc68e 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -152,8 +152,6 @@ struct mxs_auart_port {
unsigned int mctrl_prev;
enum mxs_auart_type devtype;
- unsigned int irq;
-
struct clk *clk;
struct device *dev;
@@ -1228,37 +1226,32 @@ static int mxs_auart_probe(struct platform_device *pdev)
of_match_device(mxs_auart_dt_ids, &pdev->dev);
struct mxs_auart_port *s;
u32 version;
- int ret = 0;
+ int ret, irq;
struct resource *r;
- s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL);
- if (!s) {
- ret = -ENOMEM;
- goto out;
- }
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
ret = serial_mxs_probe_dt(s, pdev);
if (ret > 0)
s->port.line = pdev->id < 0 ? 0 : pdev->id;
else if (ret < 0)
- goto out_free;
+ return ret;
if (of_id) {
pdev->id_entry = of_id->data;
s->devtype = pdev->id_entry->driver_data;
}
- s->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(s->clk)) {
- ret = PTR_ERR(s->clk);
- goto out_free;
- }
+ s->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(s->clk))
+ return PTR_ERR(s->clk);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- ret = -ENXIO;
- goto out_free_clk;
- }
+ if (!r)
+ return -ENXIO;
+
s->port.mapbase = r->start;
s->port.membase = ioremap(r->start, resource_size(r));
@@ -1271,11 +1264,15 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->mctrl_prev = 0;
- s->irq = platform_get_irq(pdev, 0);
- s->port.irq = s->irq;
- ret = request_irq(s->irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ s->port.irq = irq;
+ ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0,
+ dev_name(&pdev->dev), s);
if (ret)
- goto out_free_clk;
+ return ret;
platform_set_drvdata(pdev, s);
@@ -1288,7 +1285,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
*/
ret = mxs_auart_request_gpio_irq(s);
if (ret)
- goto out_free_irq;
+ return ret;
auart_port[s->port.line] = s;
@@ -1307,14 +1304,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
out_free_gpio_irq:
mxs_auart_free_gpio_irq(s);
-out_free_irq:
auart_port[pdev->id] = NULL;
- free_irq(s->irq, s);
-out_free_clk:
- clk_put(s->clk);
-out_free:
- kfree(s);
-out:
return ret;
}
@@ -1323,13 +1313,8 @@ static int mxs_auart_remove(struct platform_device *pdev)
struct mxs_auart_port *s = platform_get_drvdata(pdev);
uart_remove_one_port(&auart_driver, &s->port);
-
auart_port[pdev->id] = NULL;
-
mxs_auart_free_gpio_irq(s);
- clk_put(s->clk);
- free_irq(s->irq, s);
- kfree(s);
return 0;
}
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 64f1bab7e9d7..7ff61e24a195 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -102,6 +102,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
if (of_property_read_u32(np, "fifo-size", &prop) == 0)
port->fifosize = prop;
+ /* Check for a fixed line number */
+ ret = of_alias_get_id(np, "serial");
+ if (ret >= 0)
+ port->line = ret;
+
port->irq = irq_of_parse_and_map(np, 0);
port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
@@ -128,6 +133,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
if (of_find_property(np, "no-loopback-test", NULL))
port->flags |= UPF_SKIP_TEST;
+ ret = of_alias_get_id(np, "serial");
+ if (ret >= 0)
+ port->line = ret;
+
port->dev = &ofdev->dev;
switch (type) {
@@ -331,6 +340,10 @@ static struct of_device_id of_platform_serial_table[] = {
.data = (void *)PORT_ALTR_16550_F64, },
{ .compatible = "altr,16550-FIFO128",
.data = (void *)PORT_ALTR_16550_F128, },
+ { .compatible = "mrvl,mmp-uart",
+ .data = (void *)PORT_XSCALE, },
+ { .compatible = "mrvl,pxa-uart",
+ .data = (void *)PORT_XSCALE, },
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
{ .compatible = "ibm,qpace-nwp-serial",
.data = (void *)PORT_NWPSERIAL, },
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 2e1073da6719..10256fa04b40 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -63,7 +63,7 @@
#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1)
-#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/
+#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz */
/* SCR register bitmasks */
#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
@@ -93,7 +93,7 @@
/* WER = 0x7F
* Enable module level wakeup in WER reg
*/
-#define OMAP_UART_WER_MOD_WKUP 0X7F
+#define OMAP_UART_WER_MOD_WKUP 0x7F
/* Enable XON/XOFF flow control on output */
#define OMAP_UART_SW_TX 0x08
@@ -114,7 +114,7 @@ struct uart_omap_dma {
dma_addr_t tx_buf_dma_phys;
unsigned int uart_base;
/*
- * Buffer for rx dma.It is not required for tx because the buffer
+ * Buffer for rx dma. It is not required for tx because the buffer
* comes from port structure.
*/
unsigned char *rx_buf;
@@ -151,7 +151,7 @@ struct uart_omap_port {
int use_dma;
/*
* Some bits in registers are cleared on a read, so they must
- * be saved whenever the register is read but the bits will not
+ * be saved whenever the register is read, but the bits will not
* be immediately processed.
*/
unsigned int lsr_break_flag;
@@ -681,7 +681,7 @@ static unsigned int serial_omap_get_mctrl(struct uart_port *port)
static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_omap_port *up = to_uart_omap_port(port);
- unsigned char mcr = 0, old_mcr;
+ unsigned char mcr = 0, old_mcr, lcr;
dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
if (mctrl & TIOCM_RTS)
@@ -701,6 +701,17 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
UART_MCR_DTR | UART_MCR_RTS);
up->mcr = old_mcr | mcr;
serial_out(up, UART_MCR, up->mcr);
+
+ /* Turn off autoRTS if RTS is lowered; restore autoRTS if RTS raised */
+ lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+ up->efr |= UART_EFR_RTS;
+ else
+ up->efr &= UART_EFR_RTS;
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, lcr);
+
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
@@ -756,8 +767,6 @@ static int serial_omap_startup(struct uart_port *port)
* (they will be reenabled in set_termios())
*/
serial_omap_clear_fifos(up);
- /* For Hardware flow control */
- serial_out(up, UART_MCR, UART_MCR_RTS);
/*
* Clear the interrupt registers.
@@ -1053,12 +1062,12 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
- if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
- /* Enable AUTORTS and AUTOCTS */
- up->efr |= UART_EFR_CTS | UART_EFR_RTS;
+ up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
- /* Ensure MCR RTS is asserted */
- up->mcr |= UART_MCR_RTS;
+ if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
+ /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
+ up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
+ up->efr |= UART_EFR_CTS;
} else {
/* Disable AUTORTS and AUTOCTS */
up->efr &= ~(UART_EFR_CTS | UART_EFR_RTS);
@@ -1081,8 +1090,10 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
* Enable XON/XOFF flow control on output.
* Transmit XON1, XOFF1
*/
- if (termios->c_iflag & IXOFF)
+ if (termios->c_iflag & IXOFF) {
+ up->port.status |= UPSTAT_AUTOXOFF;
up->efr |= OMAP_UART_SW_TX;
+ }
/*
* IXANY Flag:
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 19273e31d224..af821a908720 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -28,6 +28,9 @@
#define SUPPORT_SYSRQ
#endif
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -78,6 +81,10 @@ static void dbg(const char *fmt, ...)
#define S3C24XX_SERIAL_MAJOR 204
#define S3C24XX_SERIAL_MINOR 64
+#define S3C24XX_TX_PIO 1
+#define S3C24XX_TX_DMA 2
+#define S3C24XX_RX_PIO 1
+#define S3C24XX_RX_DMA 2
/* macros to change one thing to another */
#define tx_enabled(port) ((port)->unused[0])
@@ -154,39 +161,272 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
static void s3c24xx_serial_stop_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct dma_tx_state state;
+ int count;
- if (tx_enabled(port)) {
- if (s3c24xx_serial_has_interrupt_mask(port))
- __set_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
- else
- disable_irq_nosync(ourport->tx_irq);
- tx_enabled(port) = 0;
- if (port->flags & UPF_CONS_FLOW)
- s3c24xx_serial_rx_enable(port);
+ if (!tx_enabled(port))
+ return;
+
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __set_bit(S3C64XX_UINTM_TXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ disable_irq_nosync(ourport->tx_irq);
+
+ if (dma && dma->tx_chan && ourport->tx_in_progress == S3C24XX_TX_DMA) {
+ dmaengine_pause(dma->tx_chan);
+ dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
+ dmaengine_terminate_all(dma->tx_chan);
+ dma_sync_single_for_cpu(ourport->port.dev,
+ dma->tx_transfer_addr, dma->tx_size, DMA_TO_DEVICE);
+ async_tx_ack(dma->tx_desc);
+ count = dma->tx_bytes_requested - state.residue;
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += count;
+ }
+
+ tx_enabled(port) = 0;
+ ourport->tx_in_progress = 0;
+
+ if (port->flags & UPF_CONS_FLOW)
+ s3c24xx_serial_rx_enable(port);
+
+ ourport->tx_mode = 0;
+}
+
+static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport);
+
+static void s3c24xx_serial_tx_dma_complete(void *args)
+{
+ struct s3c24xx_uart_port *ourport = args;
+ struct uart_port *port = &ourport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct dma_tx_state state;
+ unsigned long flags;
+ int count;
+
+
+ dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
+ count = dma->tx_bytes_requested - state.residue;
+ async_tx_ack(dma->tx_desc);
+
+ dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr,
+ dma->tx_size, DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += count;
+ ourport->tx_in_progress = 0;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ s3c24xx_serial_start_next_tx(ourport);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ u32 ucon;
+
+ /* Mask Tx interrupt */
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __set_bit(S3C64XX_UINTM_TXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ disable_irq_nosync(ourport->tx_irq);
+
+ /* Enable tx dma mode */
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK);
+ ucon |= (dma_get_cache_alignment() >= 16) ?
+ S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1;
+ ucon |= S3C64XX_UCON_TXMODE_DMA;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ ourport->tx_mode = S3C24XX_TX_DMA;
+}
+
+static void enable_tx_pio(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ u32 ucon, ufcon;
+
+ /* Set ufcon txtrig */
+ ourport->tx_in_progress = S3C24XX_TX_PIO;
+ ufcon = rd_regl(port, S3C2410_UFCON);
+ wr_regl(port, S3C2410_UFCON, ufcon);
+
+ /* Enable tx pio mode */
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~(S3C64XX_UCON_TXMODE_MASK);
+ ucon |= S3C64XX_UCON_TXMODE_CPU;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ /* Unmask Tx interrupt */
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __clear_bit(S3C64XX_UINTM_TXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ enable_irq(ourport->tx_irq);
+
+ ourport->tx_mode = S3C24XX_TX_PIO;
+}
+
+static void s3c24xx_serial_start_tx_pio(struct s3c24xx_uart_port *ourport)
+{
+ if (ourport->tx_mode != S3C24XX_TX_PIO)
+ enable_tx_pio(ourport);
+}
+
+static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
+ unsigned int count)
+{
+ struct uart_port *port = &ourport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+
+
+ if (ourport->tx_mode != S3C24XX_TX_DMA)
+ enable_tx_dma(ourport);
+
+ while (xmit->tail & (dma_get_cache_alignment() - 1)) {
+ if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
+ return 0;
+ wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ count--;
+ }
+
+ dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
+ dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
+
+ dma_sync_single_for_device(ourport->port.dev, dma->tx_transfer_addr,
+ dma->tx_size, DMA_TO_DEVICE);
+
+ dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,
+ dma->tx_transfer_addr, dma->tx_size,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!dma->tx_desc) {
+ dev_err(ourport->port.dev, "Unable to get desc for Tx\n");
+ return -EIO;
}
+
+ dma->tx_desc->callback = s3c24xx_serial_tx_dma_complete;
+ dma->tx_desc->callback_param = ourport;
+ dma->tx_bytes_requested = dma->tx_size;
+
+ ourport->tx_in_progress = S3C24XX_TX_DMA;
+ dma->tx_cookie = dmaengine_submit(dma->tx_desc);
+ dma_async_issue_pending(dma->tx_chan);
+ return 0;
}
-static void s3c24xx_serial_start_tx(struct uart_port *port)
+static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long count;
+
+ /* Get data size up to the end of buffer */
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ if (!count) {
+ s3c24xx_serial_stop_tx(port);
+ return;
+ }
+
+ if (!ourport->dma || !ourport->dma->tx_chan || count < port->fifosize)
+ s3c24xx_serial_start_tx_pio(ourport);
+ else
+ s3c24xx_serial_start_tx_dma(ourport, count);
+}
+
+void s3c24xx_serial_start_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ struct circ_buf *xmit = &port->state->xmit;
if (!tx_enabled(port)) {
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_disable(port);
- if (s3c24xx_serial_has_interrupt_mask(port))
- __clear_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
- else
- enable_irq(ourport->tx_irq);
tx_enabled(port) = 1;
+ if (!ourport->dma || !ourport->dma->tx_chan)
+ s3c24xx_serial_start_tx_pio(ourport);
+ }
+
+ if (ourport->dma && ourport->dma->tx_chan) {
+ if (!uart_circ_empty(xmit) && !ourport->tx_in_progress)
+ s3c24xx_serial_start_next_tx(ourport);
}
}
+static void s3c24xx_uart_copy_rx_to_tty(struct s3c24xx_uart_port *ourport,
+ struct tty_port *tty, int count)
+{
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ int copied;
+
+ if (!count)
+ return;
+
+ dma_sync_single_for_cpu(ourport->port.dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ ourport->port.icount.rx += count;
+ if (!tty) {
+ dev_err(ourport->port.dev, "No tty port\n");
+ return;
+ }
+ copied = tty_insert_flip_string(tty,
+ ((unsigned char *)(ourport->dma->rx_buf)), count);
+ if (copied != count) {
+ WARN_ON(1);
+ dev_err(ourport->port.dev, "RxData copy to tty layer failed\n");
+ }
+}
+
+static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
+ unsigned long ufstat);
+
+static void uart_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ struct tty_port *tty = &port->state->port;
+ unsigned int ch, ufstat;
+ unsigned int count;
+
+ ufstat = rd_regl(port, S3C2410_UFSTAT);
+ count = s3c24xx_serial_rx_fifocnt(ourport, ufstat);
+
+ if (!count)
+ return;
+
+ while (count-- > 0) {
+ ch = rd_regb(port, S3C2410_URXH);
+
+ ourport->port.icount.rx++;
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
static void s3c24xx_serial_stop_rx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct tty_port *t = &port->state->port;
+ struct dma_tx_state state;
+ enum dma_status dma_status;
+ unsigned int received;
if (rx_enabled(port)) {
dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
@@ -197,6 +437,17 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
disable_irq_nosync(ourport->rx_irq);
rx_enabled(port) = 0;
}
+ if (dma && dma->rx_chan) {
+ dmaengine_pause(dma->tx_chan);
+ dma_status = dmaengine_tx_status(dma->rx_chan,
+ dma->rx_cookie, &state);
+ if (dma_status == DMA_IN_PROGRESS ||
+ dma_status == DMA_PAUSED) {
+ received = dma->rx_bytes_requested - state.residue;
+ dmaengine_terminate_all(dma->rx_chan);
+ s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
+ }
+ }
}
static inline struct s3c24xx_uart_info
@@ -228,12 +479,157 @@ static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
}
+static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport);
+static void s3c24xx_serial_rx_dma_complete(void *args)
+{
+ struct s3c24xx_uart_port *ourport = args;
+ struct uart_port *port = &ourport->port;
+
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct tty_port *t = &port->state->port;
+ struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
+
+ struct dma_tx_state state;
+ unsigned long flags;
+ int received;
+
+ dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
+ received = dma->rx_bytes_requested - state.residue;
+ async_tx_ack(dma->rx_desc);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (received)
+ s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
+
+ if (tty) {
+ tty_flip_buffer_push(t);
+ tty_kref_put(tty);
+ }
+
+ s3c64xx_start_rx_dma(ourport);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
+{
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+
+ dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
+ dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!dma->rx_desc) {
+ dev_err(ourport->port.dev, "Unable to get desc for Rx\n");
+ return;
+ }
+
+ dma->rx_desc->callback = s3c24xx_serial_rx_dma_complete;
+ dma->rx_desc->callback_param = ourport;
+ dma->rx_bytes_requested = dma->rx_size;
+
+ dma->rx_cookie = dmaengine_submit(dma->rx_desc);
+ dma_async_issue_pending(dma->rx_chan);
+}
/* ? - where has parity gone?? */
#define S3C2410_UERSTAT_PARITY (0x1000)
-static irqreturn_t
-s3c24xx_serial_rx_chars(int irq, void *dev_id)
+static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ unsigned int ucon;
+
+ /* set Rx mode to DMA mode */
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~(S3C64XX_UCON_RXBURST_MASK |
+ S3C64XX_UCON_TIMEOUT_MASK |
+ S3C64XX_UCON_EMPTYINT_EN |
+ S3C64XX_UCON_DMASUS_EN |
+ S3C64XX_UCON_TIMEOUT_EN |
+ S3C64XX_UCON_RXMODE_MASK);
+ ucon |= S3C64XX_UCON_RXBURST_16 |
+ 0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
+ S3C64XX_UCON_EMPTYINT_EN |
+ S3C64XX_UCON_TIMEOUT_EN |
+ S3C64XX_UCON_RXMODE_DMA;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ ourport->rx_mode = S3C24XX_RX_DMA;
+}
+
+static void enable_rx_pio(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ unsigned int ucon;
+
+ /* set Rx mode to DMA mode */
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~(S3C64XX_UCON_TIMEOUT_MASK |
+ S3C64XX_UCON_EMPTYINT_EN |
+ S3C64XX_UCON_DMASUS_EN |
+ S3C64XX_UCON_TIMEOUT_EN |
+ S3C64XX_UCON_RXMODE_MASK);
+ ucon |= 0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
+ S3C64XX_UCON_TIMEOUT_EN |
+ S3C64XX_UCON_RXMODE_CPU;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ ourport->rx_mode = S3C24XX_RX_PIO;
+}
+
+static irqreturn_t s3c24xx_serial_rx_chars_dma(int irq, void *dev_id)
+{
+ unsigned int utrstat, ufstat, received;
+ struct s3c24xx_uart_port *ourport = dev_id;
+ struct uart_port *port = &ourport->port;
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
+ struct tty_port *t = &port->state->port;
+ unsigned long flags;
+ struct dma_tx_state state;
+
+ utrstat = rd_regl(port, S3C2410_UTRSTAT);
+ ufstat = rd_regl(port, S3C2410_UFSTAT);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
+ s3c64xx_start_rx_dma(ourport);
+ if (ourport->rx_mode == S3C24XX_RX_PIO)
+ enable_rx_dma(ourport);
+ goto finish;
+ }
+
+ if (ourport->rx_mode == S3C24XX_RX_DMA) {
+ dmaengine_pause(dma->rx_chan);
+ dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
+ dmaengine_terminate_all(dma->rx_chan);
+ received = dma->rx_bytes_requested - state.residue;
+ s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
+
+ enable_rx_pio(ourport);
+ }
+
+ uart_rx_drain_fifo(ourport);
+
+ if (tty) {
+ tty_flip_buffer_push(t);
+ tty_kref_put(tty);
+ }
+
+ wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
+
+finish:
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t s3c24xx_serial_rx_chars_pio(int irq, void *dev_id)
{
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
@@ -324,16 +720,33 @@ out:
return IRQ_HANDLED;
}
+
+static irqreturn_t s3c24xx_serial_rx_chars(int irq, void *dev_id)
+{
+ struct s3c24xx_uart_port *ourport = dev_id;
+
+ if (ourport->dma && ourport->dma->rx_chan)
+ return s3c24xx_serial_rx_chars_dma(irq, dev_id);
+ return s3c24xx_serial_rx_chars_pio(irq, dev_id);
+}
+
static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
{
struct s3c24xx_uart_port *ourport = id;
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
- int count = port->fifosize;
+ int count;
spin_lock_irqsave(&port->lock, flags);
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ if (ourport->dma && ourport->dma->tx_chan && count >= port->fifosize) {
+ s3c24xx_serial_start_tx_dma(ourport, count);
+ goto out;
+ }
+
if (port->x_char) {
wr_regb(port, S3C2410_UTXH, port->x_char);
port->icount.tx++;
@@ -352,6 +765,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
/* try and drain the buffer... */
+ count = port->fifosize;
while (!uart_circ_empty(xmit) && count-- > 0) {
if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
break;
@@ -453,6 +867,93 @@ static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&port->lock, flags);
}
+static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
+{
+ struct s3c24xx_uart_dma *dma = p->dma;
+ dma_cap_mask_t mask;
+ unsigned long flags;
+
+ /* Default slave configuration parameters */
+ dma->rx_conf.direction = DMA_DEV_TO_MEM;
+ dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
+ dma->rx_conf.src_maxburst = 16;
+
+ dma->tx_conf.direction = DMA_MEM_TO_DEV;
+ dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
+ if (dma_get_cache_alignment() >= 16)
+ dma->tx_conf.dst_maxburst = 16;
+ else
+ dma->tx_conf.dst_maxburst = 1;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dma->rx_chan = dma_request_slave_channel_compat(mask, dma->fn,
+ dma->rx_param, p->port.dev, "rx");
+ if (!dma->rx_chan)
+ return -ENODEV;
+
+ dmaengine_slave_config(dma->rx_chan, &dma->rx_conf);
+
+ dma->tx_chan = dma_request_slave_channel_compat(mask, dma->fn,
+ dma->tx_param, p->port.dev, "tx");
+ if (!dma->tx_chan) {
+ dma_release_channel(dma->rx_chan);
+ return -ENODEV;
+ }
+
+ dmaengine_slave_config(dma->tx_chan, &dma->tx_conf);
+
+ /* RX buffer */
+ dma->rx_size = PAGE_SIZE;
+
+ dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL);
+
+ if (!dma->rx_buf) {
+ dma_release_channel(dma->rx_chan);
+ dma_release_channel(dma->tx_chan);
+ return -ENOMEM;
+ }
+
+ dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&p->port.lock, flags);
+
+ /* TX buffer */
+ dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
+ p->port.state->xmit.buf,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ spin_unlock_irqrestore(&p->port.lock, flags);
+
+ return 0;
+}
+
+static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
+{
+ struct s3c24xx_uart_dma *dma = p->dma;
+
+ if (dma->rx_chan) {
+ dmaengine_terminate_all(dma->rx_chan);
+ dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+ kfree(dma->rx_buf);
+ dma_release_channel(dma->rx_chan);
+ dma->rx_chan = NULL;
+ }
+
+ if (dma->tx_chan) {
+ dmaengine_terminate_all(dma->tx_chan);
+ dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->tx_chan);
+ dma->tx_chan = NULL;
+ }
+}
+
static void s3c24xx_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
@@ -478,6 +979,11 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
wr_regl(port, S3C64XX_UINTP, 0xf);
wr_regl(port, S3C64XX_UINTM, 0xf);
}
+
+ if (ourport->dma)
+ s3c24xx_serial_release_dma(ourport);
+
+ ourport->tx_in_progress = 0;
}
static int s3c24xx_serial_startup(struct uart_port *port)
@@ -529,12 +1035,21 @@ err:
static int s3c64xx_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ unsigned long flags;
+ unsigned int ufcon;
int ret;
dbg("s3c64xx_serial_startup: port=%p (%08llx,%p)\n",
port, (unsigned long long)port->mapbase, port->membase);
wr_regl(port, S3C64XX_UINTM, 0xf);
+ if (ourport->dma) {
+ ret = s3c24xx_serial_request_dma(ourport);
+ if (ret < 0) {
+ dev_warn(port->dev, "DMA request failed\n");
+ return ret;
+ }
+ }
ret = request_irq(port->irq, s3c64xx_serial_handle_irq, IRQF_SHARED,
s3c24xx_serial_portname(port), ourport);
@@ -549,8 +1064,20 @@ static int s3c64xx_serial_startup(struct uart_port *port)
tx_enabled(port) = 0;
ourport->tx_claimed = 1;
+ spin_lock_irqsave(&port->lock, flags);
+
+ ufcon = rd_regl(port, S3C2410_UFCON);
+ ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX |
+ S5PV210_UFCON_RXTRIG8;
+ wr_regl(port, S3C2410_UFCON, ufcon);
+
+ enable_rx_pio(ourport);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
/* Enable Rx Interrupt */
__clear_bit(S3C64XX_UINTM_RXD, portaddrl(port, S3C64XX_UINTM));
+
dbg("s3c64xx_serial_startup ok\n");
return ret;
}
@@ -1209,6 +1736,18 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ret = platform_get_irq(platdev, 1);
if (ret > 0)
ourport->tx_irq = ret;
+ /*
+ * DMA is currently supported only on DT platforms, if DMA properties
+ * are specified.
+ */
+ if (platdev->dev.of_node && of_find_property(platdev->dev.of_node,
+ "dmas", NULL)) {
+ ourport->dma = devm_kzalloc(port->dev,
+ sizeof(*ourport->dma),
+ GFP_KERNEL);
+ if (!ourport->dma)
+ return -ENOMEM;
+ }
ourport->clk = clk_get(&platdev->dev, "uart");
if (IS_ERR(ourport->clk)) {
@@ -1757,32 +2296,43 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
#endif
#if defined(CONFIG_ARCH_EXYNOS)
+#define EXYNOS_COMMON_SERIAL_DRV_DATA \
+ .info = &(struct s3c24xx_uart_info) { \
+ .name = "Samsung Exynos UART", \
+ .type = PORT_S3C6400, \
+ .has_divslot = 1, \
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
+ .def_clk_sel = S3C2410_UCON_CLKSEL0, \
+ .num_clks = 1, \
+ .clksel_mask = 0, \
+ .clksel_shift = 0, \
+ }, \
+ .def_cfg = &(struct s3c2410_uartcfg) { \
+ .ucon = S5PV210_UCON_DEFAULT, \
+ .ufcon = S5PV210_UFCON_DEFAULT, \
+ .has_fracval = 1, \
+ } \
+
static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
- .info = &(struct s3c24xx_uart_info) {
- .name = "Samsung Exynos4 UART",
- .type = PORT_S3C6400,
- .has_divslot = 1,
- .rx_fifomask = S5PV210_UFSTAT_RXMASK,
- .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
- .rx_fifofull = S5PV210_UFSTAT_RXFULL,
- .tx_fifofull = S5PV210_UFSTAT_TXFULL,
- .tx_fifomask = S5PV210_UFSTAT_TXMASK,
- .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
- .def_clk_sel = S3C2410_UCON_CLKSEL0,
- .num_clks = 1,
- .clksel_mask = 0,
- .clksel_shift = 0,
- },
- .def_cfg = &(struct s3c2410_uartcfg) {
- .ucon = S5PV210_UCON_DEFAULT,
- .ufcon = S5PV210_UFCON_DEFAULT,
- .has_fracval = 1,
- },
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 256, 64, 16, 16 },
};
+
+static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
+ .fifosize = { 64, 256, 16, 256 },
+};
+
#define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
+#define EXYNOS5433_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos5433_serial_drv_data)
#else
#define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL
#endif
static struct platform_device_id s3c24xx_serial_driver_ids[] = {
@@ -1804,6 +2354,9 @@ static struct platform_device_id s3c24xx_serial_driver_ids[] = {
}, {
.name = "exynos4210-uart",
.driver_data = EXYNOS4210_SERIAL_DRV_DATA,
+ }, {
+ .name = "exynos5433-uart",
+ .driver_data = EXYNOS5433_SERIAL_DRV_DATA,
},
{ },
};
@@ -1823,6 +2376,8 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
.data = (void *)S5PV210_SERIAL_DRV_DATA },
{ .compatible = "samsung,exynos4210-uart",
.data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
+ { .compatible = "samsung,exynos5433-uart",
+ .data = (void *)EXYNOS5433_SERIAL_DRV_DATA },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
@@ -1841,6 +2396,111 @@ static struct platform_driver samsung_serial_driver = {
module_platform_driver(samsung_serial_driver);
+#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
+/*
+ * Early console.
+ */
+
+struct samsung_early_console_data {
+ u32 txfull_mask;
+};
+
+static void samsung_early_busyuart(struct uart_port *port)
+{
+ while (!(readl(port->membase + S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXFE))
+ ;
+}
+
+static void samsung_early_busyuart_fifo(struct uart_port *port)
+{
+ struct samsung_early_console_data *data = port->private_data;
+
+ while (readl(port->membase + S3C2410_UFSTAT) & data->txfull_mask)
+ ;
+}
+
+static void samsung_early_putc(struct uart_port *port, int c)
+{
+ if (readl(port->membase + S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE)
+ samsung_early_busyuart_fifo(port);
+ else
+ samsung_early_busyuart(port);
+
+ writeb(c, port->membase + S3C2410_UTXH);
+}
+
+static void samsung_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, samsung_early_putc);
+}
+
+static int __init samsung_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = samsung_early_write;
+ return 0;
+}
+
+/* S3C2410 */
+static struct samsung_early_console_data s3c2410_early_console_data = {
+ .txfull_mask = S3C2410_UFSTAT_TXFULL,
+};
+
+static int __init s3c2410_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->port.private_data = &s3c2410_early_console_data;
+ return samsung_early_console_setup(device, opt);
+}
+OF_EARLYCON_DECLARE(s3c2410, "samsung,s3c2410-uart",
+ s3c2410_early_console_setup);
+EARLYCON_DECLARE(s3c2410, s3c2410_early_console_setup);
+
+/* S3C2412, S3C2440, S3C64xx */
+static struct samsung_early_console_data s3c2440_early_console_data = {
+ .txfull_mask = S3C2440_UFSTAT_TXFULL,
+};
+
+static int __init s3c2440_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->port.private_data = &s3c2440_early_console_data;
+ return samsung_early_console_setup(device, opt);
+}
+OF_EARLYCON_DECLARE(s3c2412, "samsung,s3c2412-uart",
+ s3c2440_early_console_setup);
+OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
+ s3c2440_early_console_setup);
+OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
+ s3c2440_early_console_setup);
+EARLYCON_DECLARE(s3c2412, s3c2440_early_console_setup);
+EARLYCON_DECLARE(s3c2440, s3c2440_early_console_setup);
+EARLYCON_DECLARE(s3c6400, s3c2440_early_console_setup);
+
+/* S5PV210, EXYNOS */
+static struct samsung_early_console_data s5pv210_early_console_data = {
+ .txfull_mask = S5PV210_UFSTAT_TXFULL,
+};
+
+static int __init s5pv210_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->port.private_data = &s5pv210_early_console_data;
+ return samsung_early_console_setup(device, opt);
+}
+OF_EARLYCON_DECLARE(s5pv210, "samsung,s5pv210-uart",
+ s5pv210_early_console_setup);
+OF_EARLYCON_DECLARE(exynos4210, "samsung,exynos4210-uart",
+ s5pv210_early_console_setup);
+EARLYCON_DECLARE(s5pv210, s5pv210_early_console_setup);
+EARLYCON_DECLARE(exynos4210, s5pv210_early_console_setup);
+#endif
+
MODULE_ALIAS("platform:samsung-uart");
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index eb071dd19b2d..d275032aa68d 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -12,6 +12,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
+
struct s3c24xx_uart_info {
char *name;
unsigned int type;
@@ -41,6 +43,40 @@ struct s3c24xx_serial_drv_data {
unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
};
+struct s3c24xx_uart_dma {
+ dma_filter_fn fn;
+ void *rx_param;
+ void *tx_param;
+
+ unsigned int rx_chan_id;
+ unsigned int tx_chan_id;
+
+ struct dma_slave_config rx_conf;
+ struct dma_slave_config tx_conf;
+
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+
+ dma_addr_t rx_addr;
+ dma_addr_t tx_addr;
+
+ dma_cookie_t rx_cookie;
+ dma_cookie_t tx_cookie;
+
+ char *rx_buf;
+
+ dma_addr_t tx_transfer_addr;
+
+ size_t rx_size;
+ size_t tx_size;
+
+ struct dma_async_tx_descriptor *tx_desc;
+ struct dma_async_tx_descriptor *rx_desc;
+
+ int tx_bytes_requested;
+ int rx_bytes_requested;
+};
+
struct s3c24xx_uart_port {
unsigned char rx_claimed;
unsigned char tx_claimed;
@@ -50,6 +86,10 @@ struct s3c24xx_uart_port {
unsigned int rx_irq;
unsigned int tx_irq;
+ unsigned int tx_in_progress;
+ unsigned int tx_mode;
+ unsigned int rx_mode;
+
struct s3c24xx_uart_info *info;
struct clk *clk;
struct clk *baudclk;
@@ -59,6 +99,8 @@ struct s3c24xx_uart_port {
/* reference to platform data */
struct s3c2410_uartcfg *cfg;
+ struct s3c24xx_uart_dma *dma;
+
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 57ca61b14670..6a1055ae3437 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -179,14 +179,6 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
if (tty->termios.c_cflag & CBAUD)
uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
}
-
- spin_lock_irq(&uport->lock);
- if (uart_cts_enabled(uport) &&
- !(uport->ops->get_mctrl(uport) & TIOCM_CTS))
- uport->hw_stopped = 1;
- else
- uport->hw_stopped = 0;
- spin_unlock_irq(&uport->lock);
}
/*
@@ -442,6 +434,7 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
{
struct uart_port *uport = state->uart_port;
struct ktermios *termios;
+ int hw_stopped;
/*
* If we have no tty, termios, or the port does not exist,
@@ -466,6 +459,18 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
uport->status &= ~UPSTAT_DCD_ENABLE;
else
uport->status |= UPSTAT_DCD_ENABLE;
+
+ /* reset sw-assisted CTS flow control based on (possibly) new mode */
+ hw_stopped = uport->hw_stopped;
+ uport->hw_stopped = uart_softcts_mode(uport) &&
+ !(uport->ops->get_mctrl(uport) & TIOCM_CTS);
+ if (uport->hw_stopped) {
+ if (!hw_stopped)
+ uport->ops->stop_tx(uport);
+ } else {
+ if (hw_stopped)
+ __uart_start(tty);
+ }
spin_unlock_irq(&uport->lock);
}
@@ -619,22 +624,22 @@ static void uart_throttle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
- upf_t mask = 0;
+ upstat_t mask = 0;
if (I_IXOFF(tty))
- mask |= UPF_SOFT_FLOW;
+ mask |= UPSTAT_AUTOXOFF;
if (tty->termios.c_cflag & CRTSCTS)
- mask |= UPF_HARD_FLOW;
+ mask |= UPSTAT_AUTORTS;
- if (port->flags & mask) {
+ if (port->status & mask) {
port->ops->throttle(port);
- mask &= ~port->flags;
+ mask &= ~port->status;
}
- if (mask & UPF_SOFT_FLOW)
+ if (mask & UPSTAT_AUTOXOFF)
uart_send_xchar(tty, STOP_CHAR(tty));
- if (mask & UPF_HARD_FLOW)
+ if (mask & UPSTAT_AUTORTS)
uart_clear_mctrl(port, TIOCM_RTS);
}
@@ -642,22 +647,22 @@ static void uart_unthrottle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
- upf_t mask = 0;
+ upstat_t mask = 0;
if (I_IXOFF(tty))
- mask |= UPF_SOFT_FLOW;
+ mask |= UPSTAT_AUTOXOFF;
if (tty->termios.c_cflag & CRTSCTS)
- mask |= UPF_HARD_FLOW;
+ mask |= UPSTAT_AUTORTS;
- if (port->flags & mask) {
+ if (port->status & mask) {
port->ops->unthrottle(port);
- mask &= ~port->flags;
+ mask &= ~port->status;
}
- if (mask & UPF_SOFT_FLOW)
+ if (mask & UPSTAT_AUTOXOFF)
uart_send_xchar(tty, START_CHAR(tty));
- if (mask & UPF_HARD_FLOW)
+ if (mask & UPSTAT_AUTORTS)
uart_set_mctrl(port, TIOCM_RTS);
}
@@ -1351,30 +1356,6 @@ static void uart_set_termios(struct tty_struct *tty,
mask |= TIOCM_RTS;
uart_set_mctrl(uport, mask);
}
-
- /*
- * If the port is doing h/w assisted flow control, do nothing.
- * We assume that port->hw_stopped has never been set.
- */
- if (uport->flags & UPF_HARD_FLOW)
- return;
-
- /* Handle turning off CRTSCTS */
- if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
- spin_lock_irq(&uport->lock);
- uport->hw_stopped = 0;
- __uart_start(tty);
- spin_unlock_irq(&uport->lock);
- }
- /* Handle turning on CRTSCTS */
- else if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
- spin_lock_irq(&uport->lock);
- if (!(uport->ops->get_mctrl(uport) & TIOCM_CTS)) {
- uport->hw_stopped = 1;
- uport->ops->stop_tx(uport);
- }
- spin_unlock_irq(&uport->lock);
- }
}
/*
@@ -2008,23 +1989,24 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
}
put_device(tty_dev);
- if (console_suspend_enabled || !uart_console(uport))
- uport->suspended = 1;
+ /* Nothing to do if the console is not suspending */
+ if (!console_suspend_enabled && uart_console(uport))
+ goto unlock;
+
+ uport->suspended = 1;
if (port->flags & ASYNC_INITIALIZED) {
const struct uart_ops *ops = uport->ops;
int tries;
- if (console_suspend_enabled || !uart_console(uport)) {
- set_bit(ASYNCB_SUSPENDED, &port->flags);
- clear_bit(ASYNCB_INITIALIZED, &port->flags);
-
- spin_lock_irq(&uport->lock);
- ops->stop_tx(uport);
- ops->set_mctrl(uport, 0);
- ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
- }
+ set_bit(ASYNCB_SUSPENDED, &port->flags);
+ clear_bit(ASYNCB_INITIALIZED, &port->flags);
+
+ spin_lock_irq(&uport->lock);
+ ops->stop_tx(uport);
+ ops->set_mctrl(uport, 0);
+ ops->stop_rx(uport);
+ spin_unlock_irq(&uport->lock);
/*
* Wait for the transmitter to empty.
@@ -2036,19 +2018,17 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
drv->dev_name,
drv->tty_driver->name_base + uport->line);
- if (console_suspend_enabled || !uart_console(uport))
- ops->shutdown(uport);
+ ops->shutdown(uport);
}
/*
* Disable the console device before suspending.
*/
- if (console_suspend_enabled && uart_console(uport))
+ if (uart_console(uport))
console_stop(uport->cons);
- if (console_suspend_enabled || !uart_console(uport))
- uart_change_pm(state, UART_PM_STATE_OFF);
-
+ uart_change_pm(state, UART_PM_STATE_OFF);
+unlock:
mutex_unlock(&port->mutex);
return 0;
@@ -2164,7 +2144,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
break;
}
- dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+ printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+ port->dev ? dev_name(port->dev) : "",
+ port->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + port->line,
address, port->irq, port->uartclk / 16, uart_type(port));
@@ -2854,7 +2836,7 @@ void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
uport->icount.cts++;
- if (uart_cts_enabled(uport)) {
+ if (uart_softcts_mode(uport)) {
if (uport->hw_stopped) {
if (status) {
uport->hw_stopped = 0;
@@ -2867,6 +2849,7 @@ void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
uport->ops->stop_tx(uport);
}
}
+
}
}
EXPORT_SYMBOL_GPL(uart_handle_cts_change);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index e032963989fc..5b50c792ad5f 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -858,7 +858,7 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
tty_flip_buffer_push(tport);
- dev_notice(port->dev, "overrun error\n");
+ dev_dbg(port->dev, "overrun error\n");
copied++;
}
@@ -997,12 +997,15 @@ static inline unsigned long port_rx_irq_mask(struct uart_port *port)
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
{
unsigned short ssr_status, scr_status, err_enabled;
+ unsigned short slr_status = 0;
struct uart_port *port = ptr;
struct sci_port *s = to_sci_port(port);
irqreturn_t ret = IRQ_NONE;
ssr_status = serial_port_in(port, SCxSR);
scr_status = serial_port_in(port, SCSCR);
+ if (port->type == PORT_SCIF || port->type == PORT_HSCIF)
+ slr_status = serial_port_in(port, SCLSR);
err_enabled = scr_status & port_rx_irq_mask(port);
/* Tx Interrupt */
@@ -1015,8 +1018,11 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
* DR flags
*/
if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
- (scr_status & SCSCR_RIE))
+ (scr_status & SCSCR_RIE)) {
+ if (port->type == PORT_SCIF || port->type == PORT_HSCIF)
+ sci_handle_fifo_overrun(port);
ret = sci_rx_interrupt(irq, ptr);
+ }
/* Error Interrupt */
if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
@@ -1026,6 +1032,12 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
ret = sci_br_interrupt(irq, ptr);
+ /* Overrun Interrupt */
+ if (port->type == PORT_SCIF || port->type == PORT_HSCIF) {
+ if (slr_status & 0x01)
+ sci_handle_fifo_overrun(port);
+ }
+
return ret;
}
@@ -2605,7 +2617,7 @@ static int sci_probe(struct platform_device *dev)
return 0;
}
-static int sci_suspend(struct device *dev)
+static __maybe_unused int sci_suspend(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
@@ -2615,7 +2627,7 @@ static int sci_suspend(struct device *dev)
return 0;
}
-static int sci_resume(struct device *dev)
+static __maybe_unused int sci_resume(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
@@ -2625,10 +2637,7 @@ static int sci_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops sci_dev_pm_ops = {
- .suspend = sci_suspend,
- .resume = sci_resume,
-};
+static SIMPLE_DEV_PM_OPS(sci_dev_pm_ops, sci_suspend, sci_resume);
static struct platform_driver sci_driver = {
.probe = sci_probe,
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index b269f6bd16d6..27ed0e960880 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -177,7 +177,7 @@ static void sirfsoc_uart_stop_tx(struct uart_port *port)
dmaengine_pause(sirfport->tx_dma_chan);
sirfport->tx_dma_state = TX_DMA_PAUSE;
} else {
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~uint_en->sirfsoc_txfifo_empty_en);
@@ -186,7 +186,7 @@ static void sirfsoc_uart_stop_tx(struct uart_port *port)
uint_en->sirfsoc_txfifo_empty_en);
}
} else {
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~uint_en->sirfsoc_txfifo_empty_en);
@@ -217,7 +217,7 @@ static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
}
if (sirfport->tx_dma_state == TX_DMA_RUNNING)
return;
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)&
~(uint_en->sirfsoc_txfifo_empty_en));
@@ -244,7 +244,7 @@ static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
}
if (tran_size < 4)
sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)|
uint_en->sirfsoc_txfifo_empty_en);
@@ -293,7 +293,7 @@ static void sirfsoc_uart_start_tx(struct uart_port *port)
sirfsoc_uart_pio_tx_chars(sirfport,
SIRFSOC_UART_IO_TX_REASONABLE_CNT);
wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)|
uint_en->sirfsoc_txfifo_empty_en);
@@ -311,7 +311,7 @@ static void sirfsoc_uart_stop_rx(struct uart_port *port)
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
if (sirfport->rx_dma_chan) {
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
@@ -322,7 +322,7 @@ static void sirfsoc_uart_stop_rx(struct uart_port *port)
uint_en->sirfsoc_rx_done_en);
dmaengine_terminate_all(sirfport->rx_dma_chan);
} else {
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)&
~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
@@ -344,7 +344,7 @@ static void sirfsoc_uart_disable_ms(struct uart_port *port)
if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
wr_regl(port, ureg->sirfsoc_afc_ctrl,
rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)&
~uint_en->sirfsoc_cts_en);
@@ -380,7 +380,7 @@ static void sirfsoc_uart_enable_ms(struct uart_port *port)
wr_regl(port, ureg->sirfsoc_afc_ctrl,
rd_regl(port, ureg->sirfsoc_afc_ctrl) |
SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)
| uint_en->sirfsoc_cts_en);
@@ -544,7 +544,7 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
sirfport->rx_io_count = 0;
wr_regl(port, ureg->sirfsoc_int_st_reg,
uint_st->sirfsoc_rx_done);
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~(uint_en->sirfsoc_rx_done_en));
@@ -555,7 +555,7 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
} else {
wr_regl(port, ureg->sirfsoc_int_st_reg,
uint_st->sirfsoc_rx_done);
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) |
(uint_en->sirfsoc_rx_done_en));
@@ -578,7 +578,7 @@ static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
dmaengine_terminate_all(sirfport->rx_dma_chan);
sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~(uint_en->sirfsoc_rx_timeout_en));
@@ -598,7 +598,7 @@ static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
if (sirfport->rx_io_count == 4) {
sirfport->rx_io_count = 0;
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~(uint_en->sirfsoc_rx_done_en));
@@ -748,7 +748,7 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
sirfsoc_rx_submit_one_dma_desc(port, i);
sirfport->rx_completed = sirfport->rx_issued = 0;
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) |
SIRFUART_RX_DMA_INT_EN(port, uint_en));
@@ -770,7 +770,7 @@ static void sirfsoc_uart_start_rx(struct uart_port *port)
if (sirfport->rx_dma_chan)
sirfsoc_uart_start_next_rx_dma(port);
else {
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) |
SIRFUART_RX_IO_INT_EN(port, uint_en));
@@ -1124,7 +1124,7 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- if (!sirfport->is_marco)
+ if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
else
wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
@@ -1271,7 +1271,7 @@ static struct uart_driver sirfsoc_uart_drv = {
static struct of_device_id sirfsoc_uart_ids[] = {
{ .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
- { .compatible = "sirf,marco-uart", .data = &sirfsoc_uart},
+ { .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart},
{ .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
{}
};
@@ -1350,8 +1350,8 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
gpio_direction_output(sirfport->rts_gpio, 1);
}
usp_no_flow_control:
- if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart"))
- sirfport->is_marco = true;
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart"))
+ sirfport->is_atlas7 = true;
if (of_property_read_u32(pdev->dev.of_node,
"fifosize",
@@ -1393,7 +1393,7 @@ usp_no_flow_control:
goto err;
}
port->uartclk = clk_get_rate(sirfport->clk);
- if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-bt-uart")) {
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-bt-uart")) {
sirfport->clk_general = devm_clk_get(&pdev->dev, "general");
if (IS_ERR(sirfport->clk_general)) {
ret = PTR_ERR(sirfport->clk_general);
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 275d03893990..727eb6b88fff 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -421,8 +421,8 @@ struct sirfsoc_uart_port {
bool is_bt_uart;
struct clk *clk_general;
struct clk *clk_noc;
- /* for SiRFmarco, there are SET/CLR for UART_INT_EN */
- bool is_marco;
+ /* for SiRFatlas7, there are SET/CLR for UART_INT_EN */
+ bool is_atlas7;
struct sirfsoc_uart_register *uart_reg;
struct dma_chan *rx_dma_chan;
struct dma_chan *tx_dma_chan;
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
new file mode 100644
index 000000000000..594b63331ef4
--- /dev/null
+++ b/drivers/tty/serial/sprd_serial.c
@@ -0,0 +1,793 @@
+/*
+ * Copyright (C) 2012-2015 Spreadtrum Communications Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if defined(CONFIG_SERIAL_SPRD_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+/* device name */
+#define UART_NR_MAX 8
+#define SPRD_TTY_NAME "ttyS"
+#define SPRD_FIFO_SIZE 128
+#define SPRD_DEF_RATE 26000000
+#define SPRD_BAUD_IO_LIMIT 3000000
+#define SPRD_TIMEOUT 256
+
+/* the offset of serial registers and BITs for them */
+/* data registers */
+#define SPRD_TXD 0x0000
+#define SPRD_RXD 0x0004
+
+/* line status register and its BITs */
+#define SPRD_LSR 0x0008
+#define SPRD_LSR_OE BIT(4)
+#define SPRD_LSR_FE BIT(3)
+#define SPRD_LSR_PE BIT(2)
+#define SPRD_LSR_BI BIT(7)
+#define SPRD_LSR_TX_OVER BIT(15)
+
+/* data number in TX and RX fifo */
+#define SPRD_STS1 0x000C
+
+/* interrupt enable register and its BITs */
+#define SPRD_IEN 0x0010
+#define SPRD_IEN_RX_FULL BIT(0)
+#define SPRD_IEN_TX_EMPTY BIT(1)
+#define SPRD_IEN_BREAK_DETECT BIT(7)
+#define SPRD_IEN_TIMEOUT BIT(13)
+
+/* interrupt clear register */
+#define SPRD_ICLR 0x0014
+
+/* line control register */
+#define SPRD_LCR 0x0018
+#define SPRD_LCR_STOP_1BIT 0x10
+#define SPRD_LCR_STOP_2BIT 0x30
+#define SPRD_LCR_DATA_LEN (BIT(2) | BIT(3))
+#define SPRD_LCR_DATA_LEN5 0x0
+#define SPRD_LCR_DATA_LEN6 0x4
+#define SPRD_LCR_DATA_LEN7 0x8
+#define SPRD_LCR_DATA_LEN8 0xc
+#define SPRD_LCR_PARITY (BIT(0) | BIT(1))
+#define SPRD_LCR_PARITY_EN 0x2
+#define SPRD_LCR_EVEN_PAR 0x0
+#define SPRD_LCR_ODD_PAR 0x1
+
+/* control register 1 */
+#define SPRD_CTL1 0x001C
+#define RX_HW_FLOW_CTL_THLD BIT(6)
+#define RX_HW_FLOW_CTL_EN BIT(7)
+#define TX_HW_FLOW_CTL_EN BIT(8)
+#define RX_TOUT_THLD_DEF 0x3E00
+#define RX_HFC_THLD_DEF 0x40
+
+/* fifo threshold register */
+#define SPRD_CTL2 0x0020
+#define THLD_TX_EMPTY 0x40
+#define THLD_RX_FULL 0x40
+
+/* config baud rate register */
+#define SPRD_CLKD0 0x0024
+#define SPRD_CLKD1 0x0028
+
+/* interrupt mask status register */
+#define SPRD_IMSR 0x002C
+#define SPRD_IMSR_RX_FIFO_FULL BIT(0)
+#define SPRD_IMSR_TX_FIFO_EMPTY BIT(1)
+#define SPRD_IMSR_BREAK_DETECT BIT(7)
+#define SPRD_IMSR_TIMEOUT BIT(13)
+
+struct reg_backup {
+ u32 ien;
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 ctrl2;
+ u32 clkd0;
+ u32 clkd1;
+ u32 dspwait;
+};
+
+struct sprd_uart_port {
+ struct uart_port port;
+ struct reg_backup reg_bak;
+ char name[16];
+};
+
+static struct sprd_uart_port *sprd_port[UART_NR_MAX];
+static int sprd_ports_num;
+
+static inline unsigned int serial_in(struct uart_port *port, int offset)
+{
+ return readl_relaxed(port->membase + offset);
+}
+
+static inline void serial_out(struct uart_port *port, int offset, int value)
+{
+ writel_relaxed(value, port->membase + offset);
+}
+
+static unsigned int sprd_tx_empty(struct uart_port *port)
+{
+ if (serial_in(port, SPRD_STS1) & 0xff00)
+ return 0;
+ else
+ return TIOCSER_TEMT;
+}
+
+static unsigned int sprd_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_DSR | TIOCM_CTS;
+}
+
+static void sprd_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* nothing to do */
+}
+
+static void sprd_stop_tx(struct uart_port *port)
+{
+ unsigned int ien, iclr;
+
+ iclr = serial_in(port, SPRD_ICLR);
+ ien = serial_in(port, SPRD_IEN);
+
+ iclr |= SPRD_IEN_TX_EMPTY;
+ ien &= ~SPRD_IEN_TX_EMPTY;
+
+ serial_out(port, SPRD_ICLR, iclr);
+ serial_out(port, SPRD_IEN, ien);
+}
+
+static void sprd_start_tx(struct uart_port *port)
+{
+ unsigned int ien;
+
+ ien = serial_in(port, SPRD_IEN);
+ if (!(ien & SPRD_IEN_TX_EMPTY)) {
+ ien |= SPRD_IEN_TX_EMPTY;
+ serial_out(port, SPRD_IEN, ien);
+ }
+}
+
+static void sprd_stop_rx(struct uart_port *port)
+{
+ unsigned int ien, iclr;
+
+ iclr = serial_in(port, SPRD_ICLR);
+ ien = serial_in(port, SPRD_IEN);
+
+ ien &= ~(SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT);
+ iclr |= SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT;
+
+ serial_out(port, SPRD_IEN, ien);
+ serial_out(port, SPRD_ICLR, iclr);
+}
+
+/* The Sprd serial does not support this function. */
+static void sprd_break_ctl(struct uart_port *port, int break_state)
+{
+ /* nothing to do */
+}
+
+static int handle_lsr_errors(struct uart_port *port,
+ unsigned int *flag,
+ unsigned int *lsr)
+{
+ int ret = 0;
+
+ /* statistics */
+ if (*lsr & SPRD_LSR_BI) {
+ *lsr &= ~(SPRD_LSR_FE | SPRD_LSR_PE);
+ port->icount.brk++;
+ ret = uart_handle_break(port);
+ if (ret)
+ return ret;
+ } else if (*lsr & SPRD_LSR_PE)
+ port->icount.parity++;
+ else if (*lsr & SPRD_LSR_FE)
+ port->icount.frame++;
+ if (*lsr & SPRD_LSR_OE)
+ port->icount.overrun++;
+
+ /* mask off conditions which should be ignored */
+ *lsr &= port->read_status_mask;
+ if (*lsr & SPRD_LSR_BI)
+ *flag = TTY_BREAK;
+ else if (*lsr & SPRD_LSR_PE)
+ *flag = TTY_PARITY;
+ else if (*lsr & SPRD_LSR_FE)
+ *flag = TTY_FRAME;
+
+ return ret;
+}
+
+static inline void sprd_rx(struct uart_port *port)
+{
+ struct tty_port *tty = &port->state->port;
+ unsigned int ch, flag, lsr, max_count = SPRD_TIMEOUT;
+
+ while ((serial_in(port, SPRD_STS1) & 0x00ff) && max_count--) {
+ lsr = serial_in(port, SPRD_LSR);
+ ch = serial_in(port, SPRD_RXD);
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
+ SPRD_LSR_FE | SPRD_LSR_OE))
+ if (handle_lsr_errors(port, &lsr, &flag))
+ continue;
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+
+ uart_insert_char(port, lsr, SPRD_LSR_OE, ch, flag);
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
+static inline void sprd_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ int count;
+
+ if (port->x_char) {
+ serial_out(port, SPRD_TXD, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ sprd_stop_tx(port);
+ return;
+ }
+
+ count = THLD_TX_EMPTY;
+ do {
+ serial_out(port, SPRD_TXD, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ sprd_stop_tx(port);
+}
+
+/* this handles the interrupt from one port */
+static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned int ims;
+
+ spin_lock(&port->lock);
+
+ ims = serial_in(port, SPRD_IMSR);
+
+ if (!ims)
+ return IRQ_NONE;
+
+ serial_out(port, SPRD_ICLR, ~0);
+
+ if (ims & (SPRD_IMSR_RX_FIFO_FULL |
+ SPRD_IMSR_BREAK_DETECT | SPRD_IMSR_TIMEOUT))
+ sprd_rx(port);
+
+ if (ims & SPRD_IMSR_TX_FIFO_EMPTY)
+ sprd_tx(port);
+
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_startup(struct uart_port *port)
+{
+ int ret = 0;
+ unsigned int ien, fc;
+ unsigned int timeout;
+ struct sprd_uart_port *sp;
+ unsigned long flags;
+
+ serial_out(port, SPRD_CTL2, ((THLD_TX_EMPTY << 8) | THLD_RX_FULL));
+
+ /* clear rx fifo */
+ timeout = SPRD_TIMEOUT;
+ while (timeout-- && serial_in(port, SPRD_STS1) & 0x00ff)
+ serial_in(port, SPRD_RXD);
+
+ /* clear tx fifo */
+ timeout = SPRD_TIMEOUT;
+ while (timeout-- && serial_in(port, SPRD_STS1) & 0xff00)
+ cpu_relax();
+
+ /* clear interrupt */
+ serial_out(port, SPRD_IEN, 0);
+ serial_out(port, SPRD_ICLR, ~0);
+
+ /* allocate irq */
+ sp = container_of(port, struct sprd_uart_port, port);
+ snprintf(sp->name, sizeof(sp->name), "sprd_serial%d", port->line);
+ ret = devm_request_irq(port->dev, port->irq, sprd_handle_irq,
+ IRQF_SHARED, sp->name, port);
+ if (ret) {
+ dev_err(port->dev, "fail to request serial irq %d, ret=%d\n",
+ port->irq, ret);
+ return ret;
+ }
+ fc = serial_in(port, SPRD_CTL1);
+ fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
+ serial_out(port, SPRD_CTL1, fc);
+
+ /* enable interrupt */
+ spin_lock_irqsave(&port->lock, flags);
+ ien = serial_in(port, SPRD_IEN);
+ ien |= SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
+ serial_out(port, SPRD_IEN, ien);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void sprd_shutdown(struct uart_port *port)
+{
+ serial_out(port, SPRD_IEN, 0);
+ serial_out(port, SPRD_ICLR, ~0);
+ devm_free_irq(port->dev, port->irq, port);
+}
+
+static void sprd_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud, quot;
+ unsigned int lcr = 0, fc;
+ unsigned long flags;
+
+ /* ask the core to calculate the divisor for us */
+ baud = uart_get_baud_rate(port, termios, old, 0, SPRD_BAUD_IO_LIMIT);
+
+ quot = (unsigned int)((port->uartclk + baud / 2) / baud);
+
+ /* set data length */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr |= SPRD_LCR_DATA_LEN5;
+ break;
+ case CS6:
+ lcr |= SPRD_LCR_DATA_LEN6;
+ break;
+ case CS7:
+ lcr |= SPRD_LCR_DATA_LEN7;
+ break;
+ case CS8:
+ default:
+ lcr |= SPRD_LCR_DATA_LEN8;
+ break;
+ }
+
+ /* calculate stop bits */
+ lcr &= ~(SPRD_LCR_STOP_1BIT | SPRD_LCR_STOP_2BIT);
+ if (termios->c_cflag & CSTOPB)
+ lcr |= SPRD_LCR_STOP_2BIT;
+ else
+ lcr |= SPRD_LCR_STOP_1BIT;
+
+ /* calculate parity */
+ lcr &= ~SPRD_LCR_PARITY;
+ termios->c_cflag &= ~CMSPAR; /* no support mark/space */
+ if (termios->c_cflag & PARENB) {
+ lcr |= SPRD_LCR_PARITY_EN;
+ if (termios->c_cflag & PARODD)
+ lcr |= SPRD_LCR_ODD_PAR;
+ else
+ lcr |= SPRD_LCR_EVEN_PAR;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = SPRD_LSR_OE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= SPRD_LSR_FE | SPRD_LSR_PE;
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= SPRD_LSR_BI;
+
+ /* characters to ignore */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= SPRD_LSR_PE | SPRD_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= SPRD_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= SPRD_LSR_OE;
+ }
+
+ /* flow control */
+ fc = serial_in(port, SPRD_CTL1);
+ fc &= ~(RX_HW_FLOW_CTL_THLD | RX_HW_FLOW_CTL_EN | TX_HW_FLOW_CTL_EN);
+ if (termios->c_cflag & CRTSCTS) {
+ fc |= RX_HW_FLOW_CTL_THLD;
+ fc |= RX_HW_FLOW_CTL_EN;
+ fc |= TX_HW_FLOW_CTL_EN;
+ }
+
+ /* clock divider bit0~bit15 */
+ serial_out(port, SPRD_CLKD0, quot & 0xffff);
+
+ /* clock divider bit16~bit20 */
+ serial_out(port, SPRD_CLKD1, (quot & 0x1f0000) >> 16);
+ serial_out(port, SPRD_LCR, lcr);
+ fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
+ serial_out(port, SPRD_CTL1, fc);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+}
+
+static const char *sprd_type(struct uart_port *port)
+{
+ return "SPX";
+}
+
+static void sprd_release_port(struct uart_port *port)
+{
+ /* nothing to do */
+}
+
+static int sprd_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void sprd_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_SPRD;
+}
+
+static int sprd_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_SPRD)
+ return -EINVAL;
+ if (port->irq != ser->irq)
+ return -EINVAL;
+ return 0;
+}
+
+static struct uart_ops serial_sprd_ops = {
+ .tx_empty = sprd_tx_empty,
+ .get_mctrl = sprd_get_mctrl,
+ .set_mctrl = sprd_set_mctrl,
+ .stop_tx = sprd_stop_tx,
+ .start_tx = sprd_start_tx,
+ .stop_rx = sprd_stop_rx,
+ .break_ctl = sprd_break_ctl,
+ .startup = sprd_startup,
+ .shutdown = sprd_shutdown,
+ .set_termios = sprd_set_termios,
+ .type = sprd_type,
+ .release_port = sprd_release_port,
+ .request_port = sprd_request_port,
+ .config_port = sprd_config_port,
+ .verify_port = sprd_verify_port,
+};
+
+#ifdef CONFIG_SERIAL_SPRD_CONSOLE
+static inline void wait_for_xmitr(struct uart_port *port)
+{
+ unsigned int status, tmout = 10000;
+
+ /* wait up to 10ms for the character(s) to be sent */
+ do {
+ status = serial_in(port, SPRD_STS1);
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while (status & 0xff00);
+}
+
+static void sprd_console_putchar(struct uart_port *port, int ch)
+{
+ wait_for_xmitr(port);
+ serial_out(port, SPRD_TXD, ch);
+}
+
+static void sprd_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &sprd_port[co->index]->port;
+ int locked = 1;
+ unsigned long flags;
+
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+
+ uart_console_write(port, s, count, sprd_console_putchar);
+
+ /* wait for transmitter to become empty */
+ wait_for_xmitr(port);
+
+ if (locked)
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int __init sprd_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index >= UART_NR_MAX || co->index < 0)
+ co->index = 0;
+
+ port = &sprd_port[co->index]->port;
+ if (port == NULL) {
+ pr_info("serial port %d not yet initialized\n", co->index);
+ return -ENODEV;
+ }
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver sprd_uart_driver;
+static struct console sprd_console = {
+ .name = SPRD_TTY_NAME,
+ .write = sprd_console_write,
+ .device = uart_console_device,
+ .setup = sprd_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sprd_uart_driver,
+};
+
+#define SPRD_CONSOLE (&sprd_console)
+
+/* Support for earlycon */
+static void sprd_putc(struct uart_port *port, int c)
+{
+ unsigned int timeout = SPRD_TIMEOUT;
+
+ while (timeout-- &&
+ !(readl(port->membase + SPRD_LSR) & SPRD_LSR_TX_OVER))
+ cpu_relax();
+
+ writeb(c, port->membase + SPRD_TXD);
+}
+
+static void sprd_early_write(struct console *con, const char *s,
+ unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, sprd_putc);
+}
+
+static int __init sprd_early_console_setup(
+ struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = sprd_early_write;
+ return 0;
+}
+
+EARLYCON_DECLARE(sprd_serial, sprd_early_console_setup);
+OF_EARLYCON_DECLARE(sprd_serial, "sprd,sc9836-uart",
+ sprd_early_console_setup);
+
+#else /* !CONFIG_SERIAL_SPRD_CONSOLE */
+#define SPRD_CONSOLE NULL
+#endif
+
+static struct uart_driver sprd_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "sprd_serial",
+ .dev_name = SPRD_TTY_NAME,
+ .major = 0,
+ .minor = 0,
+ .nr = UART_NR_MAX,
+ .cons = SPRD_CONSOLE,
+};
+
+static int sprd_probe_dt_alias(int index, struct device *dev)
+{
+ struct device_node *np;
+ int ret = index;
+
+ if (!IS_ENABLED(CONFIG_OF))
+ return ret;
+
+ np = dev->of_node;
+ if (!np)
+ return ret;
+
+ ret = of_alias_get_id(np, "serial");
+ if (IS_ERR_VALUE(ret))
+ ret = index;
+ else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) {
+ dev_warn(dev, "requested serial port %d not available.\n", ret);
+ ret = index;
+ }
+
+ return ret;
+}
+
+static int sprd_remove(struct platform_device *dev)
+{
+ struct sprd_uart_port *sup = platform_get_drvdata(dev);
+
+ if (sup) {
+ uart_remove_one_port(&sprd_uart_driver, &sup->port);
+ sprd_port[sup->port.line] = NULL;
+ sprd_ports_num--;
+ }
+
+ if (!sprd_ports_num)
+ uart_unregister_driver(&sprd_uart_driver);
+
+ return 0;
+}
+
+static int sprd_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct uart_port *up;
+ struct clk *clk;
+ int irq;
+ int index;
+ int ret;
+
+ for (index = 0; index < ARRAY_SIZE(sprd_port); index++)
+ if (sprd_port[index] == NULL)
+ break;
+
+ if (index == ARRAY_SIZE(sprd_port))
+ return -EBUSY;
+
+ index = sprd_probe_dt_alias(index, &pdev->dev);
+
+ sprd_port[index] = devm_kzalloc(&pdev->dev,
+ sizeof(*sprd_port[index]), GFP_KERNEL);
+ if (!sprd_port[index])
+ return -ENOMEM;
+
+ up = &sprd_port[index]->port;
+ up->dev = &pdev->dev;
+ up->line = index;
+ up->type = PORT_SPRD;
+ up->iotype = SERIAL_IO_PORT;
+ up->uartclk = SPRD_DEF_RATE;
+ up->fifosize = SPRD_FIFO_SIZE;
+ up->ops = &serial_sprd_ops;
+ up->flags = UPF_BOOT_AUTOCONF;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk))
+ up->uartclk = clk_get_rate(clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "not provide mem resource\n");
+ return -ENODEV;
+ }
+ up->mapbase = res->start;
+ up->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(up->membase))
+ return PTR_ERR(up->membase);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "not provide irq resource\n");
+ return -ENODEV;
+ }
+ up->irq = irq;
+
+ if (!sprd_ports_num) {
+ ret = uart_register_driver(&sprd_uart_driver);
+ if (ret < 0) {
+ pr_err("Failed to register SPRD-UART driver\n");
+ return ret;
+ }
+ }
+ sprd_ports_num++;
+
+ ret = uart_add_one_port(&sprd_uart_driver, up);
+ if (ret) {
+ sprd_port[index] = NULL;
+ sprd_remove(pdev);
+ }
+
+ platform_set_drvdata(pdev, up);
+
+ return ret;
+}
+
+static int sprd_suspend(struct device *dev)
+{
+ struct sprd_uart_port *sup = dev_get_drvdata(dev);
+
+ uart_suspend_port(&sprd_uart_driver, &sup->port);
+
+ return 0;
+}
+
+static int sprd_resume(struct device *dev)
+{
+ struct sprd_uart_port *sup = dev_get_drvdata(dev);
+
+ uart_resume_port(&sprd_uart_driver, &sup->port);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(sprd_pm_ops, sprd_suspend, sprd_resume);
+
+static const struct of_device_id serial_ids[] = {
+ {.compatible = "sprd,sc9836-uart",},
+ {}
+};
+
+static struct platform_driver sprd_platform_driver = {
+ .probe = sprd_probe,
+ .remove = sprd_remove,
+ .driver = {
+ .name = "sprd_serial",
+ .of_match_table = of_match_ptr(serial_ids),
+ .pm = &sprd_pm_ops,
+ },
+};
+
+module_platform_driver(sprd_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum SoC serial driver series");
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 542bab37e502..cff531a51a78 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -637,10 +637,12 @@ static void cdns_uart_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
- /* Empty the receive FIFO 1st before making changes */
- while ((cdns_uart_readl(CDNS_UART_SR_OFFSET) &
- CDNS_UART_SR_RXEMPTY) != CDNS_UART_SR_RXEMPTY) {
- cdns_uart_readl(CDNS_UART_FIFO_OFFSET);
+ /* Wait for the transmit FIFO to empty before making changes */
+ if (!(cdns_uart_readl(CDNS_UART_CR_OFFSET) & CDNS_UART_CR_TX_DIS)) {
+ while (!(cdns_uart_readl(CDNS_UART_SR_OFFSET) &
+ CDNS_UART_SR_TXEMPTY)) {
+ cpu_relax();
+ }
}
/* Disable the TX and RX to set baud rate */
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 42bad18c66c9..259a4d5a4e8f 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -90,7 +90,7 @@ static void sysrq_handle_loglevel(int key)
i = key - '0';
console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
- printk("Loglevel set to %d\n", i);
+ pr_info("Loglevel set to %d\n", i);
console_loglevel = i;
}
static struct sysrq_key_op sysrq_loglevel_op = {
@@ -220,7 +220,7 @@ static void showacpu(void *dummy)
return;
spin_lock_irqsave(&show_lock, flags);
- printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+ pr_info("CPU%d:\n", smp_processor_id());
show_stack(NULL, NULL);
spin_unlock_irqrestore(&show_lock, flags);
}
@@ -243,7 +243,7 @@ static void sysrq_handle_showallcpus(int key)
struct pt_regs *regs = get_irq_regs();
if (regs) {
- printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+ pr_info("CPU%d:\n", smp_processor_id());
show_regs(regs);
}
schedule_work(&sysrq_showallcpus);
@@ -355,8 +355,9 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), GFP_KERNEL,
- 0, NULL, true);
+ if (!out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL),
+ GFP_KERNEL, 0, NULL, true))
+ pr_info("OOM request ignored because killer is disabled\n");
}
static DECLARE_WORK(moom_work, moom_callback);
@@ -522,7 +523,7 @@ void __handle_sysrq(int key, bool check_mask)
*/
orig_log_level = console_loglevel;
console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
- printk(KERN_INFO "SysRq : ");
+ pr_info("SysRq : ");
op_p = __sysrq_get_key_op(key);
if (op_p) {
@@ -531,14 +532,14 @@ void __handle_sysrq(int key, bool check_mask)
* should not) and is the invoked operation enabled?
*/
if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
- printk("%s\n", op_p->action_msg);
+ pr_cont("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
op_p->handler(key);
} else {
- printk("This sysrq operation is disabled.\n");
+ pr_cont("This sysrq operation is disabled.\n");
}
} else {
- printk("HELP : ");
+ pr_cont("HELP : ");
/* Only print the help msg once per handler */
for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
if (sysrq_key_table[i]) {
@@ -549,10 +550,10 @@ void __handle_sysrq(int key, bool check_mask)
;
if (j != i)
continue;
- printk("%s ", sysrq_key_table[i]->help_msg);
+ pr_cont("%s ", sysrq_key_table[i]->help_msg);
}
}
- printk("\n");
+ pr_cont("\n");
console_loglevel = orig_log_level;
}
rcu_read_unlock();
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 3605103fc1ac..75661641f5fe 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -557,3 +557,9 @@ int tty_buffer_set_limit(struct tty_port *port, int limit)
return 0;
}
EXPORT_SYMBOL_GPL(tty_buffer_set_limit);
+
+/* slave ptys can claim nested buffer lock when handling BRK and INTR */
+void tty_buffer_set_lock_subclass(struct tty_port *port)
+{
+ lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE);
+}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 4f35b43e2475..51f066aa375e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1464,6 +1464,9 @@ static int tty_reopen(struct tty_struct *tty)
driver->subtype == PTY_TYPE_MASTER)
return -EIO;
+ if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+ return -EBUSY;
+
tty->count++;
WARN_ON(!tty->ldisc);
@@ -2106,10 +2109,6 @@ retry_open:
retval = -ENODEV;
filp->f_flags = saved_flags;
- if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
- !capable(CAP_SYS_ADMIN))
- retval = -EBUSY;
-
if (retval) {
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 1787fa4d9448..a5cf253b2544 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -530,7 +530,7 @@ EXPORT_SYMBOL(tty_termios_hw_change);
* Locking: termios_rwsem
*/
-int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
+static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
{
struct ktermios old_termios;
struct tty_ldisc *ld;
@@ -563,7 +563,6 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
up_write(&tty->termios_rwsem);
return 0;
}
-EXPORT_SYMBOL_GPL(tty_set_termios);
/**
* set_termios - set termios values for a tty
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 4486741190c4..0efcf713b756 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -4,18 +4,8 @@
#include <linux/semaphore.h>
#include <linux/sched.h>
-/*
- * Nested tty locks are necessary for releasing pty pairs.
- * The stable lock order is master pty first, then slave pty.
- */
-
/* Legacy tty mutex glue */
-enum {
- TTY_MUTEX_NORMAL,
- TTY_MUTEX_SLAVE,
-};
-
/*
* Getting the big tty mutex.
*/
@@ -46,12 +36,8 @@ EXPORT_SYMBOL(tty_unlock);
void __lockfunc tty_lock_slave(struct tty_struct *tty)
{
- if (tty && tty != tty->link) {
- WARN_ON(!mutex_is_locked(&tty->link->legacy_mutex) ||
- !tty->driver->type == TTY_DRIVER_TYPE_PTY ||
- !tty->driver->type == PTY_TYPE_SLAVE);
+ if (tty && tty != tty->link)
tty_lock(tty);
- }
}
void __lockfunc tty_unlock_slave(struct tty_struct *tty)
@@ -62,5 +48,5 @@ void __lockfunc tty_unlock_slave(struct tty_struct *tty)
void tty_set_lock_subclass(struct tty_struct *tty)
{
- lockdep_set_subclass(&tty->legacy_mutex, TTY_MUTEX_SLAVE);
+ lockdep_set_subclass(&tty->legacy_mutex, TTY_LOCK_SLAVE);
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index f3fbbbca9bde..6e00572cbeb9 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -500,6 +500,7 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
#endif
if (DO_UPDATE(vc))
do_update_region(vc, (unsigned long) p, count);
+ notify_update(vc);
}
/* used by selection: complement pointer position */
@@ -516,6 +517,7 @@ void complement_pos(struct vc_data *vc, int offset)
scr_writew(old, screenpos(vc, old_offset, 1));
if (DO_UPDATE(vc))
vc->vc_sw->con_putc(vc, old, oldy, oldx);
+ notify_update(vc);
}
old_offset = offset;
@@ -533,8 +535,8 @@ void complement_pos(struct vc_data *vc, int offset)
oldy = (offset >> 1) / vc->vc_cols;
vc->vc_sw->con_putc(vc, new, oldy, oldx);
}
+ notify_update(vc);
}
-
}
static void insert_char(struct vc_data *vc, unsigned int nr)
@@ -3318,11 +3320,8 @@ static int vt_bind(struct con_driver *con)
if (first == 0 && last == MAX_NR_CONSOLES -1)
deflt = 1;
- if (first != -1) {
- console_lock();
+ if (first != -1)
do_bind_con_driver(csw, first, last, deflt);
- console_unlock();
- }
first = -1;
last = -1;
@@ -3362,9 +3361,7 @@ static int vt_unbind(struct con_driver *con)
deflt = 1;
if (first != -1) {
- console_lock();
ret = do_unbind_con_driver(csw, first, last, deflt);
- console_unlock();
if (ret != 0)
return ret;
}
@@ -3394,11 +3391,15 @@ static ssize_t store_bind(struct device *dev, struct device_attribute *attr,
struct con_driver *con = dev_get_drvdata(dev);
int bind = simple_strtoul(buf, NULL, 0);
+ console_lock();
+
if (bind)
vt_bind(con);
else
vt_unbind(con);
+ console_unlock();
+
return count;
}
@@ -3665,8 +3666,7 @@ int do_unregister_con_driver(const struct consw *csw)
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con_driver = &registered_con_driver[i];
- if (con_driver->con == csw &&
- con_driver->flag & CON_DRIVER_FLAG_INIT) {
+ if (con_driver->con == csw) {
vtconsole_deinit_device(con_driver);
device_destroy(vtconsole_class,
MKDEV(0, con_driver->node));
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 5a90914d856d..8a15c323c030 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -104,6 +104,26 @@ config UIO_NETX
To compile this driver as a module, choose M here; the module
will be called uio_netx.
+config UIO_FSL_ELBC_GPCM
+ tristate "eLBC/GPCM driver"
+ depends on FSL_LBC
+ help
+ Generic driver for accessing a peripheral connected to an eLBC port
+ that is running in GPCM mode. GPCM is an interface for simple lower
+ performance memories and memory-mapped devices. For devices using
+ FCM or UPM eLBC modes, other device-specific drivers are available.
+
+config UIO_FSL_ELBC_GPCM_NETX5152
+ bool "eLBC/GPCM netX 51/52 support"
+ depends on UIO_FSL_ELBC_GPCM
+ help
+ This will add support for netX 51/52 devices connected via eLBC/GPCM.
+ In particular, it implements interrupt handling. This can be used
+ together with the userspace netX stack from Hilscher.
+
+ Information about this hardware can be found at:
+ http://www.hilscher.com/netx
+
config UIO_PRUSS
tristate "Texas Instruments PRUSS driver"
depends on ARCH_DAVINCI_DA850
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index d3218bde3aeb..8560dad52d0f 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_UIO_PCI_GENERIC) += uio_pci_generic.o
obj-$(CONFIG_UIO_NETX) += uio_netx.o
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
+obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
new file mode 100644
index 000000000000..b6cac91c2ced
--- /dev/null
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -0,0 +1,499 @@
+/* uio_fsl_elbc_gpcm: UIO driver for eLBC/GPCM peripherals
+
+ Copyright (C) 2014 Linutronix GmbH
+ Author: John Ogness <john.ogness@linutronix.de>
+
+ This driver provides UIO access to memory of a peripheral connected
+ to the Freescale enhanced local bus controller (eLBC) interface
+ using the general purpose chip-select mode (GPCM).
+
+ Here is an example of the device tree entries:
+
+ localbus@ffe05000 {
+ ranges = <0x2 0x0 0x0 0xff810000 0x10000>;
+
+ dpm@2,0 {
+ compatible = "fsl,elbc-gpcm-uio";
+ reg = <0x2 0x0 0x10000>;
+ elbc-gpcm-br = <0xff810800>;
+ elbc-gpcm-or = <0xffff09f7>;
+ interrupt-parent = <&mpic>;
+ interrupts = <4 1>;
+ device_type = "netx5152";
+ uio_name = "netx_custom";
+ netx5152,init-win0-offset = <0x0>;
+ };
+ };
+
+ Only the entries reg (to identify bank) and elbc-gpcm-* (initial BR/OR
+ values) are required. The entries interrupt*, device_type, and uio_name
+ are optional (as well as any type-specific options such as
+ netx5152,init-win0-offset). As long as no interrupt handler is needed,
+ this driver can be used without any type-specific implementation.
+
+ The netx5152 type has been tested to work with the netX 51/52 hardware
+ from Hilscher using the Hilscher userspace netX stack.
+
+ The netx5152 type should serve as a model to add new type-specific
+ devices as needed.
+*/
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/uio_driver.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/fsl_lbc.h>
+
+#define MAX_BANKS 8
+
+struct fsl_elbc_gpcm {
+ struct device *dev;
+ struct fsl_lbc_regs __iomem *lbc;
+ u32 bank;
+ const char *name;
+
+ void (*init)(struct uio_info *info);
+ void (*shutdown)(struct uio_info *info, bool init_err);
+ irqreturn_t (*irq_handler)(int irq, struct uio_info *info);
+};
+
+static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+
+DEVICE_ATTR(reg_br, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store);
+DEVICE_ATTR(reg_or, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store);
+
+static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct uio_info *info = platform_get_drvdata(pdev);
+ struct fsl_elbc_gpcm *priv = info->priv;
+ struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank];
+
+ if (attr == &dev_attr_reg_br) {
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n",
+ in_be32(&bank->br));
+
+ } else if (attr == &dev_attr_reg_or) {
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n",
+ in_be32(&bank->or));
+ }
+
+ return 0;
+}
+
+static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct uio_info *info = platform_get_drvdata(pdev);
+ struct fsl_elbc_gpcm *priv = info->priv;
+ struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank];
+ unsigned long val;
+ u32 reg_br_cur;
+ u32 reg_or_cur;
+ u32 reg_new;
+
+ /* parse use input */
+ if (kstrtoul(buf, 0, &val) != 0)
+ return -EINVAL;
+ reg_new = (u32)val;
+
+ /* read current values */
+ reg_br_cur = in_be32(&bank->br);
+ reg_or_cur = in_be32(&bank->or);
+
+ if (attr == &dev_attr_reg_br) {
+ /* not allowed to change effective base address */
+ if ((reg_br_cur & reg_or_cur & BR_BA) !=
+ (reg_new & reg_or_cur & BR_BA)) {
+ return -EINVAL;
+ }
+
+ /* not allowed to change mode */
+ if ((reg_new & BR_MSEL) != BR_MS_GPCM)
+ return -EINVAL;
+
+ /* write new value (force valid) */
+ out_be32(&bank->br, reg_new | BR_V);
+
+ } else if (attr == &dev_attr_reg_or) {
+ /* not allowed to change access mask */
+ if ((reg_or_cur & OR_GPCM_AM) != (reg_new & OR_GPCM_AM))
+ return -EINVAL;
+
+ /* write new value */
+ out_be32(&bank->or, reg_new);
+
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+#ifdef CONFIG_UIO_FSL_ELBC_GPCM_NETX5152
+#define DPM_HOST_WIN0_OFFSET 0xff00
+#define DPM_HOST_INT_STAT0 0xe0
+#define DPM_HOST_INT_EN0 0xf0
+#define DPM_HOST_INT_MASK 0xe600ffff
+#define DPM_HOST_INT_GLOBAL_EN 0x80000000
+
+static irqreturn_t netx5152_irq_handler(int irq, struct uio_info *info)
+{
+ void __iomem *reg_int_en = info->mem[0].internal_addr +
+ DPM_HOST_WIN0_OFFSET +
+ DPM_HOST_INT_EN0;
+ void __iomem *reg_int_stat = info->mem[0].internal_addr +
+ DPM_HOST_WIN0_OFFSET +
+ DPM_HOST_INT_STAT0;
+
+ /* check if an interrupt is enabled and active */
+ if ((ioread32(reg_int_en) & ioread32(reg_int_stat) &
+ DPM_HOST_INT_MASK) == 0) {
+ return IRQ_NONE;
+ }
+
+ /* disable interrupts */
+ iowrite32(ioread32(reg_int_en) & ~DPM_HOST_INT_GLOBAL_EN, reg_int_en);
+
+ return IRQ_HANDLED;
+}
+
+static void netx5152_init(struct uio_info *info)
+{
+ unsigned long win0_offset = DPM_HOST_WIN0_OFFSET;
+ struct fsl_elbc_gpcm *priv = info->priv;
+ const void *prop;
+
+ /* get an optional initial win0 offset */
+ prop = of_get_property(priv->dev->of_node,
+ "netx5152,init-win0-offset", NULL);
+ if (prop)
+ win0_offset = of_read_ulong(prop, 1);
+
+ /* disable interrupts */
+ iowrite32(0, info->mem[0].internal_addr + win0_offset +
+ DPM_HOST_INT_EN0);
+}
+
+static void netx5152_shutdown(struct uio_info *info, bool init_err)
+{
+ if (init_err)
+ return;
+
+ /* disable interrupts */
+ iowrite32(0, info->mem[0].internal_addr + DPM_HOST_WIN0_OFFSET +
+ DPM_HOST_INT_EN0);
+}
+#endif
+
+static void setup_periph(struct fsl_elbc_gpcm *priv,
+ const char *type)
+{
+#ifdef CONFIG_UIO_FSL_ELBC_GPCM_NETX5152
+ if (strcmp(type, "netx5152") == 0) {
+ priv->irq_handler = netx5152_irq_handler;
+ priv->init = netx5152_init;
+ priv->shutdown = netx5152_shutdown;
+ priv->name = "netX 51/52";
+ return;
+ }
+#endif
+}
+
+static int check_of_data(struct fsl_elbc_gpcm *priv,
+ struct resource *res,
+ u32 reg_br, u32 reg_or)
+{
+ /* check specified bank */
+ if (priv->bank >= MAX_BANKS) {
+ dev_err(priv->dev, "invalid bank\n");
+ return -ENODEV;
+ }
+
+ /* check specified mode (BR_MS_GPCM is 0) */
+ if ((reg_br & BR_MSEL) != BR_MS_GPCM) {
+ dev_err(priv->dev, "unsupported mode\n");
+ return -ENODEV;
+ }
+
+ /* check specified mask vs. resource size */
+ if ((~(reg_or & OR_GPCM_AM) + 1) != resource_size(res)) {
+ dev_err(priv->dev, "address mask / size mismatch\n");
+ return -ENODEV;
+ }
+
+ /* check specified address */
+ if ((reg_br & reg_or & BR_BA) != fsl_lbc_addr(res->start)) {
+ dev_err(priv->dev, "base address mismatch\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int get_of_data(struct fsl_elbc_gpcm *priv, struct device_node *node,
+ struct resource *res, u32 *reg_br,
+ u32 *reg_or, unsigned int *irq, char **name)
+{
+ const char *dt_name;
+ const char *type;
+ int ret;
+
+ /* get the memory resource */
+ ret = of_address_to_resource(node, 0, res);
+ if (ret) {
+ dev_err(priv->dev, "failed to get resource\n");
+ return ret;
+ }
+
+ /* get the bank number */
+ ret = of_property_read_u32(node, "reg", &priv->bank);
+ if (ret) {
+ dev_err(priv->dev, "failed to get bank number\n");
+ return ret;
+ }
+
+ /* get BR value to set */
+ ret = of_property_read_u32(node, "elbc-gpcm-br", reg_br);
+ if (ret) {
+ dev_err(priv->dev, "missing elbc-gpcm-br value\n");
+ return ret;
+ }
+
+ /* get OR value to set */
+ ret = of_property_read_u32(node, "elbc-gpcm-or", reg_or);
+ if (ret) {
+ dev_err(priv->dev, "missing elbc-gpcm-or value\n");
+ return ret;
+ }
+
+ /* get optional peripheral type */
+ priv->name = "generic";
+ if (of_property_read_string(node, "device_type", &type) == 0)
+ setup_periph(priv, type);
+
+ /* get optional irq value */
+ *irq = irq_of_parse_and_map(node, 0);
+
+ /* sanity check device tree data */
+ ret = check_of_data(priv, res, *reg_br, *reg_or);
+ if (ret)
+ return ret;
+
+ /* get optional uio name */
+ if (of_property_read_string(node, "uio_name", &dt_name) != 0)
+ dt_name = "eLBC_GPCM";
+ *name = kstrdup(dt_name, GFP_KERNEL);
+ if (!*name)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct fsl_elbc_gpcm *priv;
+ struct uio_info *info;
+ char *uio_name = NULL;
+ struct resource res;
+ unsigned int irq;
+ u32 reg_br_cur;
+ u32 reg_or_cur;
+ u32 reg_br_new;
+ u32 reg_or_new;
+ int ret;
+
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+
+ /* allocate private data */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = &pdev->dev;
+ priv->lbc = fsl_lbc_ctrl_dev->regs;
+
+ /* get device tree data */
+ ret = get_of_data(priv, node, &res, &reg_br_new, &reg_or_new,
+ &irq, &uio_name);
+ if (ret)
+ goto out_err0;
+
+ /* allocate UIO structure */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ /* get current BR/OR values */
+ reg_br_cur = in_be32(&priv->lbc->bank[priv->bank].br);
+ reg_or_cur = in_be32(&priv->lbc->bank[priv->bank].or);
+
+ /* if bank already configured, make sure it matches */
+ if ((reg_br_cur & BR_V)) {
+ if ((reg_br_cur & BR_MSEL) != BR_MS_GPCM ||
+ (reg_br_cur & reg_or_cur & BR_BA)
+ != fsl_lbc_addr(res.start)) {
+ dev_err(priv->dev,
+ "bank in use by another peripheral\n");
+ ret = -ENODEV;
+ goto out_err1;
+ }
+
+ /* warn if behavior settings changing */
+ if ((reg_br_cur & ~(BR_BA | BR_V)) !=
+ (reg_br_new & ~(BR_BA | BR_V))) {
+ dev_warn(priv->dev,
+ "modifying BR settings: 0x%08x -> 0x%08x",
+ reg_br_cur, reg_br_new);
+ }
+ if ((reg_or_cur & ~OR_GPCM_AM) != (reg_or_new & ~OR_GPCM_AM)) {
+ dev_warn(priv->dev,
+ "modifying OR settings: 0x%08x -> 0x%08x",
+ reg_or_cur, reg_or_new);
+ }
+ }
+
+ /* configure the bank (force base address and GPCM) */
+ reg_br_new &= ~(BR_BA | BR_MSEL);
+ reg_br_new |= fsl_lbc_addr(res.start) | BR_MS_GPCM | BR_V;
+ out_be32(&priv->lbc->bank[priv->bank].or, reg_or_new);
+ out_be32(&priv->lbc->bank[priv->bank].br, reg_br_new);
+
+ /* map the memory resource */
+ info->mem[0].internal_addr = ioremap(res.start, resource_size(&res));
+ if (!info->mem[0].internal_addr) {
+ dev_err(priv->dev, "failed to map chip region\n");
+ ret = -ENODEV;
+ goto out_err1;
+ }
+
+ /* set all UIO data */
+ if (node->name)
+ info->mem[0].name = kstrdup(node->name, GFP_KERNEL);
+ info->mem[0].addr = res.start;
+ info->mem[0].size = resource_size(&res);
+ info->mem[0].memtype = UIO_MEM_PHYS;
+ info->priv = priv;
+ info->name = uio_name;
+ info->version = "0.0.1";
+ if (irq != NO_IRQ) {
+ if (priv->irq_handler) {
+ info->irq = irq;
+ info->irq_flags = IRQF_SHARED;
+ info->handler = priv->irq_handler;
+ } else {
+ irq = NO_IRQ;
+ dev_warn(priv->dev, "ignoring irq, no handler\n");
+ }
+ }
+
+ if (priv->init)
+ priv->init(info);
+
+ /* register UIO device */
+ if (uio_register_device(priv->dev, info) != 0) {
+ dev_err(priv->dev, "UIO registration failed\n");
+ ret = -ENODEV;
+ goto out_err2;
+ }
+
+ /* store private data */
+ platform_set_drvdata(pdev, info);
+
+ /* create sysfs files */
+ ret = device_create_file(priv->dev, &dev_attr_reg_br);
+ if (ret)
+ goto out_err3;
+ ret = device_create_file(priv->dev, &dev_attr_reg_or);
+ if (ret)
+ goto out_err4;
+
+ dev_info(priv->dev,
+ "eLBC/GPCM device (%s) at 0x%llx, bank %d, irq=%d\n",
+ priv->name, (unsigned long long)res.start, priv->bank,
+ irq != NO_IRQ ? irq : -1);
+
+ return 0;
+out_err4:
+ device_remove_file(priv->dev, &dev_attr_reg_br);
+out_err3:
+ platform_set_drvdata(pdev, NULL);
+ uio_unregister_device(info);
+out_err2:
+ if (priv->shutdown)
+ priv->shutdown(info, true);
+ iounmap(info->mem[0].internal_addr);
+out_err1:
+ kfree(info->mem[0].name);
+ kfree(info);
+out_err0:
+ kfree(uio_name);
+ kfree(priv);
+ return ret;
+}
+
+static int uio_fsl_elbc_gpcm_remove(struct platform_device *pdev)
+{
+ struct uio_info *info = platform_get_drvdata(pdev);
+ struct fsl_elbc_gpcm *priv = info->priv;
+
+ device_remove_file(priv->dev, &dev_attr_reg_or);
+ device_remove_file(priv->dev, &dev_attr_reg_br);
+ platform_set_drvdata(pdev, NULL);
+ uio_unregister_device(info);
+ if (priv->shutdown)
+ priv->shutdown(info, false);
+ iounmap(info->mem[0].internal_addr);
+ kfree(info->mem[0].name);
+ kfree(info->name);
+ kfree(info);
+ kfree(priv);
+
+ return 0;
+
+}
+
+static const struct of_device_id uio_fsl_elbc_gpcm_match[] = {
+ { .compatible = "fsl,elbc-gpcm-uio", },
+ {}
+};
+
+static struct platform_driver uio_fsl_elbc_gpcm_driver = {
+ .driver = {
+ .name = "fsl,elbc-gpcm-uio",
+ .owner = THIS_MODULE,
+ .of_match_table = uio_fsl_elbc_gpcm_match,
+ },
+ .probe = uio_fsl_elbc_gpcm_probe,
+ .remove = uio_fsl_elbc_gpcm_remove,
+};
+
+static int __init uio_fsl_elbc_gpcm_init(void)
+{
+ return platform_driver_register(&uio_fsl_elbc_gpcm_driver);
+}
+
+static void __exit uio_fsl_elbc_gpcm_exit(void)
+{
+ platform_driver_unregister(&uio_fsl_elbc_gpcm_driver);
+}
+
+module_init(uio_fsl_elbc_gpcm_init);
+module_exit(uio_fsl_elbc_gpcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Ogness <john.ogness@linutronix.de>");
+MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller GPCM driver");
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 077ae12269ce..d0b508b68f3c 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -91,7 +91,8 @@ static int probe(struct pci_dev *pdev,
gdev->info.handler = irqhandler;
gdev->pdev = pdev;
- if (uio_register_device(&pdev->dev, &gdev->info))
+ err = uio_register_device(&pdev->dev, &gdev->info);
+ if (err)
goto err_register;
pci_set_drvdata(pdev, gdev);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index ae481c37a208..8ed451dd651e 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -104,6 +104,8 @@ source "drivers/usb/dwc2/Kconfig"
source "drivers/usb/chipidea/Kconfig"
+source "drivers/usb/isp1760/Kconfig"
+
comment "USB port drivers"
if USB
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index d7be71778059..2f1e2aa42b44 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB) += core/
obj-$(CONFIG_USB_DWC3) += dwc3/
obj-$(CONFIG_USB_DWC2) += dwc2/
+obj-$(CONFIG_USB_ISP1760) += isp1760/
obj-$(CONFIG_USB_MON) += mon/
@@ -23,7 +24,6 @@ obj-$(CONFIG_USB_ISP1362_HCD) += host/
obj-$(CONFIG_USB_U132_HCD) += host/
obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_HWA_HCD) += host/
-obj-$(CONFIG_USB_ISP1760_HCD) += host/
obj-$(CONFIG_USB_IMX21_HCD) += host/
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/
obj-$(CONFIG_USB_FUSBH200_HCD) += host/
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index 241ae3444fde..4df669437211 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -111,6 +111,9 @@ static void ci_hdrc_pci_remove(struct pci_dev *pdev)
* PCI device structure
*
* Check "pci.h" for details
+ *
+ * Note: ehci-pci driver may try to probe the device first. You have to add an
+ * ID to the bypass_pci_id_table in ehci-pci driver to prevent this.
*/
static const struct pci_device_id ci_hdrc_pci_id_table[] = {
{
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5b9825a4538a..a57dc8866fc5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -669,7 +669,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ci)
return -ENOMEM;
- platform_set_drvdata(pdev, ci);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -783,6 +782,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
}
+ platform_set_drvdata(pdev, ci);
ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED,
ci->platdata->name, ci);
if (ret)
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index c1694cff1eaf..48731d0bab35 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -91,6 +91,7 @@ static int host_start(struct ci_hdrc *ci)
if (!hcd)
return -ENOMEM;
+ dev_set_drvdata(ci->dev, ci);
hcd->rsrc_start = ci->hw_bank.phys;
hcd->rsrc_len = ci->hw_bank.size;
hcd->regs = ci->hw_bank.abs;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 4fe18ce3bd5a..ff451048c1ac 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -819,8 +819,8 @@ __acquires(hwep->lock)
}
if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
- /* Assume that device is bus powered for now. */
- *(u16 *)req->buf = ci->remote_wakeup << 1;
+ *(u16 *)req->buf = (ci->remote_wakeup << 1) |
+ ci->gadget.is_selfpowered;
} else if ((setup->bRequestType & USB_RECIP_MASK) \
== USB_RECIP_ENDPOINT) {
dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
@@ -1520,6 +1520,19 @@ static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
return -ENOTSUPP;
}
+static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
+{
+ struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
+ struct ci_hw_ep *hwep = ci->ep0in;
+ unsigned long flags;
+
+ spin_lock_irqsave(hwep->lock, flags);
+ _gadget->is_selfpowered = (is_on != 0);
+ spin_unlock_irqrestore(hwep->lock, flags);
+
+ return 0;
+}
+
/* Change Data+ pullup status
* this func is used by usb_gadget_connect/disconnet
*/
@@ -1549,6 +1562,7 @@ static int ci_udc_stop(struct usb_gadget *gadget);
static const struct usb_gadget_ops usb_gadget_ops = {
.vbus_session = ci_udc_vbus_session,
.wakeup = ci_udc_wakeup,
+ .set_selfpowered = ci_udc_selfpowered,
.pullup = ci_udc_pullup,
.vbus_draw = ci_udc_vbus_draw,
.udc_start = ci_udc_start,
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 546a17e8ad5b..e78720b59d67 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1091,6 +1091,7 @@ static int acm_probe(struct usb_interface *intf,
unsigned long quirks;
int num_rx_buf;
int i;
+ unsigned int elength = 0;
int combined_interfaces = 0;
struct device *tty_dev;
int rv = -ENOMEM;
@@ -1136,9 +1137,12 @@ static int acm_probe(struct usb_interface *intf,
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
+ elength = buffer[0];
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
+ if (elength < sizeof(struct usb_cdc_union_desc))
+ goto next_desc;
if (union_header) {
dev_err(&intf->dev, "More than one "
"union descriptor, skipping ...\n");
@@ -1147,29 +1151,36 @@ static int acm_probe(struct usb_interface *intf,
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
+ if (elength < sizeof(struct usb_cdc_country_functional_desc))
+ goto next_desc;
cfd = (struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE: /* maybe check version */
break; /* for now we ignore it */
case USB_CDC_ACM_TYPE:
+ if (elength < 4)
+ goto next_desc;
ac_management_function = buffer[3];
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
+ if (elength < 5)
+ goto next_desc;
call_management_function = buffer[3];
call_interface_num = buffer[4];
break;
default:
- /* there are LOTS more CDC descriptors that
+ /*
+ * there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: "
- "type %02x, length %d\n",
- buffer[2], buffer[0]);
+ "type %02x, length %ud\n",
+ buffer[2], elength);
break;
}
next_desc:
- buflen -= buffer[0];
- buffer += buffer[0];
+ buflen -= elength;
+ buffer += elength;
}
if (!union_header) {
@@ -1287,10 +1298,8 @@ made_compressed_probe:
dev_dbg(&intf->dev, "interfaces are valid\n");
acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
- if (acm == NULL) {
- dev_err(&intf->dev, "out of memory (acm kzalloc)\n");
+ if (acm == NULL)
goto alloc_fail;
- }
minor = acm_alloc_minor(acm);
if (minor == ACM_TTY_MINORS) {
@@ -1329,42 +1338,32 @@ made_compressed_probe:
acm->quirks = quirks;
buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
- if (!buf) {
- dev_err(&intf->dev, "out of memory (ctrl buffer alloc)\n");
+ if (!buf)
goto alloc_fail2;
- }
acm->ctrl_buffer = buf;
- if (acm_write_buffers_alloc(acm) < 0) {
- dev_err(&intf->dev, "out of memory (write buffer alloc)\n");
+ if (acm_write_buffers_alloc(acm) < 0)
goto alloc_fail4;
- }
acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL);
- if (!acm->ctrlurb) {
- dev_err(&intf->dev, "out of memory (ctrlurb kmalloc)\n");
+ if (!acm->ctrlurb)
goto alloc_fail5;
- }
+
for (i = 0; i < num_rx_buf; i++) {
struct acm_rb *rb = &(acm->read_buffers[i]);
struct urb *urb;
rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
&rb->dma);
- if (!rb->base) {
- dev_err(&intf->dev, "out of memory "
- "(read bufs usb_alloc_coherent)\n");
+ if (!rb->base)
goto alloc_fail6;
- }
rb->index = i;
rb->instance = acm;
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- dev_err(&intf->dev,
- "out of memory (read urbs usb_alloc_urb)\n");
+ if (!urb)
goto alloc_fail6;
- }
+
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = rb->dma;
if (acm->is_int_ep) {
@@ -1389,11 +1388,8 @@ made_compressed_probe:
struct acm_wb *snd = &(acm->wb[i]);
snd->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (snd->urb == NULL) {
- dev_err(&intf->dev,
- "out of memory (write urbs usb_alloc_urb)\n");
+ if (snd->urb == NULL)
goto alloc_fail7;
- }
if (usb_endpoint_xfer_int(epwrite))
usb_fill_int_urb(snd->urb, usb_dev,
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 684ef70dc09d..506b969ea7fd 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -22,17 +22,25 @@
*/
/* FIXME tune these based on pool statistics ... */
-static const size_t pool_max[HCD_BUFFER_POOLS] = {
- /* platforms without dma-friendly caches might need to
- * prevent cacheline sharing...
- */
- 32,
- 128,
- 512,
- PAGE_SIZE / 2
- /* bigger --> allocate pages */
+static size_t pool_max[HCD_BUFFER_POOLS] = {
+ 32, 128, 512, 2048,
};
+void __init usb_init_pool_max(void)
+{
+ /*
+ * The pool_max values must never be smaller than
+ * ARCH_KMALLOC_MINALIGN.
+ */
+ if (ARCH_KMALLOC_MINALIGN <= 32)
+ ; /* Original value is okay */
+ else if (ARCH_KMALLOC_MINALIGN <= 64)
+ pool_max[0] = 64;
+ else if (ARCH_KMALLOC_MINALIGN <= 128)
+ pool_max[0] = 0; /* Don't use this pool */
+ else
+ BUILD_BUG(); /* We don't allow this */
+}
/* SETUP primitives */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 0b59731c3021..66abdbcfbfa5 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1689,7 +1689,7 @@ static struct async *reap_as(struct usb_dev_state *ps)
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
as = async_getcompleted(ps);
- if (as)
+ if (as || !connected(ps))
break;
if (signal_pending(current))
break;
@@ -1712,7 +1712,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
}
if (signal_pending(current))
return -EINTR;
- return -EIO;
+ return -ENODEV;
}
static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
@@ -1721,10 +1721,11 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
struct async *as;
as = async_getcompleted(ps);
- retval = -EAGAIN;
if (as) {
retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
+ } else {
+ retval = (connected(ps) ? -EAGAIN : -ENODEV);
}
return retval;
}
@@ -1854,7 +1855,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
}
if (signal_pending(current))
return -EINTR;
- return -EIO;
+ return -ENODEV;
}
static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *arg)
@@ -1862,11 +1863,12 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
int retval;
struct async *as;
- retval = -EAGAIN;
as = async_getcompleted(ps);
if (as) {
retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
+ } else {
+ retval = (connected(ps) ? -EAGAIN : -ENODEV);
}
return retval;
}
@@ -2038,7 +2040,8 @@ static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg)
{
__u32 caps;
- caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM;
+ caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
+ USBDEVFS_CAP_REAP_AFTER_DISCONNECT;
if (!ps->dev->bus->no_stop_on_short)
caps |= USBDEVFS_CAP_BULK_CONTINUATION;
if (ps->dev->bus->sg_tablesize)
@@ -2138,6 +2141,32 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
return -EPERM;
usb_lock_device(dev);
+
+ /* Reap operations are allowed even after disconnection */
+ switch (cmd) {
+ case USBDEVFS_REAPURB:
+ snoop(&dev->dev, "%s: REAPURB\n", __func__);
+ ret = proc_reapurb(ps, p);
+ goto done;
+
+ case USBDEVFS_REAPURBNDELAY:
+ snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
+ ret = proc_reapurbnonblock(ps, p);
+ goto done;
+
+#ifdef CONFIG_COMPAT
+ case USBDEVFS_REAPURB32:
+ snoop(&dev->dev, "%s: REAPURB32\n", __func__);
+ ret = proc_reapurb_compat(ps, p);
+ goto done;
+
+ case USBDEVFS_REAPURBNDELAY32:
+ snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
+ ret = proc_reapurbnonblock_compat(ps, p);
+ goto done;
+#endif
+ }
+
if (!connected(ps)) {
usb_unlock_device(dev);
return -ENODEV;
@@ -2231,16 +2260,6 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
inode->i_mtime = CURRENT_TIME;
break;
- case USBDEVFS_REAPURB32:
- snoop(&dev->dev, "%s: REAPURB32\n", __func__);
- ret = proc_reapurb_compat(ps, p);
- break;
-
- case USBDEVFS_REAPURBNDELAY32:
- snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
- ret = proc_reapurbnonblock_compat(ps, p);
- break;
-
case USBDEVFS_IOCTL32:
snoop(&dev->dev, "%s: IOCTL32\n", __func__);
ret = proc_ioctl_compat(ps, ptr_to_compat(p));
@@ -2252,16 +2271,6 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
ret = proc_unlinkurb(ps, p);
break;
- case USBDEVFS_REAPURB:
- snoop(&dev->dev, "%s: REAPURB\n", __func__);
- ret = proc_reapurb(ps, p);
- break;
-
- case USBDEVFS_REAPURBNDELAY:
- snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
- ret = proc_reapurbnonblock(ps, p);
- break;
-
case USBDEVFS_DISCSIGNAL:
snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__);
ret = proc_disconnectsignal(ps, p);
@@ -2304,6 +2313,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
ret = proc_free_streams(ps, p);
break;
}
+
+ done:
usb_unlock_device(dev);
if (ret >= 0)
inode->i_atime = CURRENT_TIME;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 874dec31a111..818369afff63 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -275,21 +275,6 @@ static int usb_unbind_device(struct device *dev)
return 0;
}
-/*
- * Cancel any pending scheduled resets
- *
- * [see usb_queue_reset_device()]
- *
- * Called after unconfiguring / when releasing interfaces. See
- * comments in __usb_queue_reset_device() regarding
- * udev->reset_running.
- */
-static void usb_cancel_queued_reset(struct usb_interface *iface)
-{
- if (iface->reset_running == 0)
- cancel_work_sync(&iface->reset_ws);
-}
-
/* called from driver core with dev locked */
static int usb_probe_interface(struct device *dev)
{
@@ -380,7 +365,6 @@ static int usb_probe_interface(struct device *dev)
usb_set_intfdata(intf, NULL);
intf->needs_remote_wakeup = 0;
intf->condition = USB_INTERFACE_UNBOUND;
- usb_cancel_queued_reset(intf);
/* If the LPM disable succeeded, balance the ref counts. */
if (!lpm_disable_error)
@@ -425,7 +409,6 @@ static int usb_unbind_interface(struct device *dev)
usb_disable_interface(udev, intf, false);
driver->disconnect(intf);
- usb_cancel_queued_reset(intf);
/* Free streams */
for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
@@ -1797,6 +1780,18 @@ static int autosuspend_check(struct usb_device *udev)
dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
return -EOPNOTSUPP;
}
+
+ /*
+ * If the device is a direct child of the root hub and the HCD
+ * doesn't handle wakeup requests, don't allow autosuspend when
+ * wakeup is needed.
+ */
+ if (w && udev->parent == udev->bus->root_hub &&
+ bus_to_hcd(udev->bus)->cant_recv_wakeups) {
+ dev_dbg(&udev->dev, "HCD doesn't handle wakeup requests\n");
+ return -EOPNOTSUPP;
+ }
+
udev->do_remote_wakeup = w;
return 0;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 11cee55ae397..45a915ccd71c 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1618,6 +1618,7 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
int usb_hcd_unlink_urb (struct urb *urb, int status)
{
struct usb_hcd *hcd;
+ struct usb_device *udev = urb->dev;
int retval = -EIDRM;
unsigned long flags;
@@ -1629,20 +1630,19 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
if (atomic_read(&urb->use_count) > 0) {
retval = 0;
- usb_get_dev(urb->dev);
+ usb_get_dev(udev);
}
spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
if (retval == 0) {
hcd = bus_to_hcd(urb->dev->bus);
retval = unlink1(hcd, urb, status);
- usb_put_dev(urb->dev);
+ if (retval == 0)
+ retval = -EINPROGRESS;
+ else if (retval != -EIDRM && retval != -EBUSY)
+ dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
+ urb, retval);
+ usb_put_dev(udev);
}
-
- if (retval == 0)
- retval = -EINPROGRESS;
- else if (retval != -EIDRM && retval != -EBUSY)
- dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
- urb, retval);
return retval;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index aeb50bb6ba9c..d7c3d5a35946 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2896,10 +2896,12 @@ static int port_is_suspended(struct usb_hub *hub, unsigned portstatus)
*/
static int check_port_resume_type(struct usb_device *udev,
struct usb_hub *hub, int port1,
- int status, unsigned portchange, unsigned portstatus)
+ int status, u16 portchange, u16 portstatus)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
+ int retries = 3;
+ retry:
/* Is a warm reset needed to recover the connection? */
if (status == 0 && udev->reset_resume
&& hub_port_warm_reset_required(hub, port1, portstatus)) {
@@ -2907,10 +2909,17 @@ static int check_port_resume_type(struct usb_device *udev,
}
/* Is the device still present? */
else if (status || port_is_suspended(hub, portstatus) ||
- !port_is_power_on(hub, portstatus) ||
- !(portstatus & USB_PORT_STAT_CONNECTION)) {
+ !port_is_power_on(hub, portstatus)) {
if (status >= 0)
status = -ENODEV;
+ } else if (!(portstatus & USB_PORT_STAT_CONNECTION)) {
+ if (retries--) {
+ usleep_range(200, 300);
+ status = hub_port_status(hub, port1, &portstatus,
+ &portchange);
+ goto retry;
+ }
+ status = -ENODEV;
}
/* Can't do a normal resume if the port isn't enabled,
@@ -3452,8 +3461,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
return status;
}
-#ifdef CONFIG_PM
-
int usb_remote_wakeup(struct usb_device *udev)
{
int status = 0;
@@ -3512,16 +3519,6 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
return connect_change;
}
-#else
-
-static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
- u16 portstatus, u16 portchange)
-{
- return 0;
-}
-
-#endif
-
static int check_ports_changed(struct usb_hub *hub)
{
int port1;
@@ -4655,9 +4652,13 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
test_bit(port1, hub->removed_bits)) {
- /* maybe switch power back on (e.g. root hub was reset) */
+ /*
+ * maybe switch power back on (e.g. root hub was reset)
+ * but only if the port isn't owned by someone else.
+ */
if (hub_is_port_power_switchable(hub)
- && !port_is_power_on(hub, portstatus))
+ && !port_is_power_on(hub, portstatus)
+ && !port_dev->port_owner)
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (portstatus & USB_PORT_STAT_ENABLE)
@@ -4882,7 +4883,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
static void port_event(struct usb_hub *hub, int port1)
__must_hold(&port_dev->status_lock)
{
- int connect_change, reset_device = 0;
+ int connect_change;
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
struct usb_device *hdev = hub->hdev;
@@ -4970,30 +4971,14 @@ static void port_event(struct usb_hub *hub, int port1)
if (hub_port_reset(hub, port1, NULL,
HUB_BH_RESET_TIME, true) < 0)
hub_port_disable(hub, port1, 1);
- } else
- reset_device = 1;
- }
-
- /*
- * On disconnect USB3 protocol ports transit from U0 to
- * SS.Inactive to Rx.Detect. If this happens a warm-
- * reset is not needed, but a (re)connect may happen
- * before hub_wq runs and sees the disconnect, and the
- * device may be an unknown state.
- *
- * If the port went through SS.Inactive without hub_wq
- * seeing it the C_LINK_STATE change flag will be set,
- * and we reset the dev to put it in a known state.
- */
- if (reset_device || (udev && hub_is_superspeed(hub->hdev)
- && (portchange & USB_PORT_STAT_C_LINK_STATE)
- && (portstatus & USB_PORT_STAT_CONNECTION))) {
- usb_unlock_port(port_dev);
- usb_lock_device(udev);
- usb_reset_device(udev);
- usb_unlock_device(udev);
- usb_lock_port(port_dev);
- connect_change = 0;
+ } else {
+ usb_unlock_port(port_dev);
+ usb_lock_device(udev);
+ usb_reset_device(udev);
+ usb_unlock_device(udev);
+ usb_lock_port(port_dev);
+ connect_change = 0;
+ }
}
if (connect_change)
@@ -5589,26 +5574,19 @@ EXPORT_SYMBOL_GPL(usb_reset_device);
* possible; depending on how the driver attached to each interface
* handles ->pre_reset(), the second reset might happen or not.
*
- * - If a driver is unbound and it had a pending reset, the reset will
- * be cancelled.
- *
- * - This function can be called during .probe() or .disconnect()
- * times. On return from .disconnect(), any pending resets will be
- * cancelled.
- *
- * There is no no need to lock/unlock the @reset_ws as schedule_work()
- * does its own.
+ * - If the reset is delayed so long that the interface is unbound from
+ * its driver, the reset will be skipped.
*
- * NOTE: We don't do any reference count tracking because it is not
- * needed. The lifecycle of the work_struct is tied to the
- * usb_interface. Before destroying the interface we cancel the
- * work_struct, so the fact that work_struct is queued and or
- * running means the interface (and thus, the device) exist and
- * are referenced.
+ * - This function can be called during .probe(). It can also be called
+ * during .disconnect(), but doing so is pointless because the reset
+ * will not occur. If you really want to reset the device during
+ * .disconnect(), call usb_reset_device() directly -- but watch out
+ * for nested unbinding issues!
*/
void usb_queue_reset_device(struct usb_interface *iface)
{
- schedule_work(&iface->reset_ws);
+ if (schedule_work(&iface->reset_ws))
+ usb_get_intf(iface);
}
EXPORT_SYMBOL_GPL(usb_queue_reset_device);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index f7b7713cfb2a..f368d2053da5 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1551,6 +1551,7 @@ static void usb_release_interface(struct device *dev)
altsetting_to_usb_interface_cache(intf->altsetting);
kref_put(&intfc->ref, usb_release_interface_cache);
+ usb_put_dev(interface_to_usbdev(intf));
kfree(intf);
}
@@ -1626,24 +1627,6 @@ static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
/*
* Internal function to queue a device reset
- *
- * This is initialized into the workstruct in 'struct
- * usb_device->reset_ws' that is launched by
- * message.c:usb_set_configuration() when initializing each 'struct
- * usb_interface'.
- *
- * It is safe to get the USB device without reference counts because
- * the life cycle of @iface is bound to the life cycle of @udev. Then,
- * this function will be ran only if @iface is alive (and before
- * freeing it any scheduled instances of it will have been cancelled).
- *
- * We need to set a flag (usb_dev->reset_running) because when we call
- * the reset, the interfaces might be unbound. The current interface
- * cannot try to remove the queued work as it would cause a deadlock
- * (you cannot remove your work from within your executing
- * workqueue). This flag lets it know, so that
- * usb_cancel_queued_reset() doesn't try to do it.
- *
* See usb_queue_reset_device() for more details
*/
static void __usb_queue_reset_device(struct work_struct *ws)
@@ -1655,11 +1638,10 @@ static void __usb_queue_reset_device(struct work_struct *ws)
rc = usb_lock_device_for_reset(udev, iface);
if (rc >= 0) {
- iface->reset_running = 1;
usb_reset_device(udev);
- iface->reset_running = 0;
usb_unlock_device(udev);
}
+ usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */
}
@@ -1854,6 +1836,7 @@ free_interfaces:
dev_set_name(&intf->dev, "%d-%s:%d.%d",
dev->bus->busnum, dev->devpath,
configuration, alt->desc.bInterfaceNumber);
+ usb_get_dev(dev);
}
kfree(new_interfaces);
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index de0c9c9d7091..a6315abe7b7c 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
return 0;
+ /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
+ return 1;
+
/* NOTE: can't use usb_match_id() since interface caches
* aren't set up yet. this is cut/paste from that code.
*/
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 0ffb4ed0a945..41e510ae8c83 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Protocol and OTG Electrical Test Device */
+ { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 2a92b97f0144..b1fb9aef0f5b 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -1049,6 +1049,7 @@ static int __init usb_init(void)
pr_info("%s: USB support disabled\n", usbcore_name);
return 0;
}
+ usb_init_pool_max();
retval = usb_debugfs_init();
if (retval)
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index b323c4c11b0a..76b9ba4dc925 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -23,7 +23,7 @@ choice
config USB_DWC2_HOST
bool "Host only mode"
- depends on USB
+ depends on USB=y || (USB_DWC2=m && USB)
help
The Designware USB2.0 high-speed host controller
integrated into many SoCs. Select this option if you want the
@@ -42,7 +42,7 @@ config USB_DWC2_PERIPHERAL
config USB_DWC2_DUAL_ROLE
bool "Dual Role mode"
- depends on (USB=y || USB=USB_DWC2) && (USB_GADGET=y || USB_GADGET=USB_DWC2)
+ depends on (USB=y && USB_GADGET=y) || (USB_DWC2=m && USB && USB_GADGET)
help
Select this option if you want the driver to work in a dual-role
mode. In this mode both host and gadget features are enabled, and
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 7605850b7a9c..d5197d492e21 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -462,7 +462,7 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
dwc2_enable_common_interrupts(hsotg);
/*
- * Do device or host intialization based on mode during PCD and
+ * Do device or host initialization based on mode during PCD and
* HCD initialization
*/
if (dwc2_is_host_mode(hsotg)) {
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 7a70a1349334..f74304b12652 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -108,7 +108,7 @@ struct s3c_hsotg_req;
* @halted: Set if the endpoint has been halted.
* @periodic: Set if this is a periodic ep, such as Interrupt
* @isochronous: Set if this is a isochronous ep
- * @sent_zlp: Set if we've sent a zero-length packet.
+ * @send_zlp: Set if we need to send a zero-length packet.
* @total_data: The total number of data bytes done.
* @fifo_size: The size of the FIFO (for periodic IN endpoints)
* @fifo_load: The amount of data loaded into the FIFO (periodic IN)
@@ -149,7 +149,7 @@ struct s3c_hsotg_ep {
unsigned int halted:1;
unsigned int periodic:1;
unsigned int isochronous:1;
- unsigned int sent_zlp:1;
+ unsigned int send_zlp:1;
char name[10];
};
@@ -158,14 +158,12 @@ struct s3c_hsotg_ep {
* struct s3c_hsotg_req - data transfer request
* @req: The USB gadget request
* @queue: The list of requests for the endpoint this is queued for.
- * @in_progress: Has already had size/packets written to core
- * @mapped: DMA buffer for this request has been mapped via dma_map_single().
+ * @saved_req_buf: variable to save req.buf when bounce buffers are used.
*/
struct s3c_hsotg_req {
struct usb_request req;
struct list_head queue;
- unsigned char in_progress;
- unsigned char mapped;
+ void *saved_req_buf;
};
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
@@ -193,6 +191,22 @@ enum dwc2_lx_state {
DWC2_L3, /* Off state */
};
+/*
+ * Gadget periodic tx fifo sizes as used by legacy driver
+ * EP0 is not included
+ */
+#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \
+ 768, 0, 0, 0, 0, 0, 0, 0}
+
+/* Gadget ep0 states */
+enum dwc2_ep0_state {
+ DWC2_EP0_SETUP,
+ DWC2_EP0_DATA_IN,
+ DWC2_EP0_DATA_OUT,
+ DWC2_EP0_STATUS_IN,
+ DWC2_EP0_STATUS_OUT,
+};
+
/**
* struct dwc2_core_params - Parameters for configuring the core
*
@@ -381,7 +395,7 @@ struct dwc2_core_params {
* @power_optimized Are power optimizations enabled?
* @num_dev_ep Number of device endpoints available
* @num_dev_perio_in_ep Number of device periodic IN endpoints
- * avaialable
+ * available
* @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
* Depth
* 0 to 30
@@ -434,6 +448,9 @@ struct dwc2_hw_params {
u32 snpsid;
};
+/* Size of control and EP0 buffers */
+#define DWC2_CTRL_BUFF_SIZE 8
+
/**
* struct dwc2_hsotg - Holds the state of the driver, including the non-periodic
* and periodic schedules
@@ -552,14 +569,20 @@ struct dwc2_hw_params {
* @num_of_eps: Number of available EPs (excluding EP0)
* @debug_root: Root directrory for debugfs.
* @debug_file: Main status file for debugfs.
+ * @debug_testmode: Testmode status file for debugfs.
* @debug_fifo: FIFO status file for debugfs.
* @ep0_reply: Request used for ep0 reply.
* @ep0_buff: Buffer for EP0 reply data, if needed.
* @ctrl_buff: Buffer for EP0 control requests.
* @ctrl_req: Request for EP0 control packets.
- * @setup: NAK management for EP0 SETUP
+ * @ep0_state: EP0 control transfers state
+ * @test_mode: USB test mode requested by the host
* @last_rst: Time of last reset
* @eps: The endpoints being supplied to the gadget framework
+ * @g_using_dma: Indicate if dma usage is enabled
+ * @g_rx_fifo_sz: Contains rx fifo size value
+ * @g_np_g_tx_fifo_sz: Contains Non-Periodic tx fifo size value
+ * @g_tx_fifo_sz: Contains tx fifo size value per endpoints
*/
struct dwc2_hsotg {
struct device *dev;
@@ -591,6 +614,7 @@ struct dwc2_hsotg {
struct dentry *debug_root;
struct dentry *debug_file;
+ struct dentry *debug_testmode;
struct dentry *debug_fifo;
/* DWC OTG HW Release versions */
@@ -684,15 +708,21 @@ struct dwc2_hsotg {
struct usb_request *ep0_reply;
struct usb_request *ctrl_req;
- u8 ep0_buff[8];
- u8 ctrl_buff[8];
+ void *ep0_buff;
+ void *ctrl_buff;
+ enum dwc2_ep0_state ep0_state;
+ u8 test_mode;
struct usb_gadget gadget;
unsigned int enabled:1;
unsigned int connected:1;
- unsigned int setup:1;
unsigned long last_rst;
- struct s3c_hsotg_ep *eps;
+ struct s3c_hsotg_ep *eps_in[MAX_EPS_CHANNELS];
+ struct s3c_hsotg_ep *eps_out[MAX_EPS_CHANNELS];
+ u32 g_using_dma;
+ u32 g_rx_fifo_sz;
+ u32 g_np_g_tx_fifo_sz;
+ u32 g_tx_fifo_sz[MAX_EPS_CHANNELS];
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
};
@@ -969,7 +999,8 @@ extern int s3c_hsotg_remove(struct dwc2_hsotg *hsotg);
extern int s3c_hsotg_suspend(struct dwc2_hsotg *dwc2);
extern int s3c_hsotg_resume(struct dwc2_hsotg *dwc2);
extern int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq);
-extern void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2);
+extern void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
+ bool reset);
extern void s3c_hsotg_core_connect(struct dwc2_hsotg *hsotg);
extern void s3c_hsotg_disconnect(struct dwc2_hsotg *dwc2);
#else
@@ -981,7 +1012,8 @@ static inline int s3c_hsotg_resume(struct dwc2_hsotg *dwc2)
{ return 0; }
static inline int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
{ return 0; }
-static inline void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2) {}
+static inline void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
+ bool reset) {}
static inline void s3c_hsotg_core_connect(struct dwc2_hsotg *hsotg) {}
static inline void s3c_hsotg_disconnect(struct dwc2_hsotg *dwc2) {}
#endif
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index ad43c5bc1ef1..02e3e2d4ea56 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
+ spin_lock(&hsotg->lock);
+
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
- spin_lock(&hsotg->lock);
-
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT)
retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
}
}
- spin_unlock(&hsotg->lock);
out:
+ spin_unlock(&hsotg->lock);
return retval;
}
EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 200168ec2d75..6a30887082cd 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -35,6 +35,7 @@
#include <linux/usb/gadget.h>
#include <linux/usb/phy.h>
#include <linux/platform_data/s3c-hsotg.h>
+#include <linux/uaccess.h>
#include "core.h"
#include "hw.h"
@@ -65,7 +66,16 @@ static inline void __bic32(void __iomem *ptr, u32 val)
writel(readl(ptr) & ~val, ptr);
}
-/* forward decleration of functions */
+static inline struct s3c_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
+ u32 ep_index, u32 dir_in)
+{
+ if (dir_in)
+ return hsotg->eps_in[ep_index];
+ else
+ return hsotg->eps_out[ep_index];
+}
+
+/* forward declaration of functions */
static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg);
/**
@@ -85,11 +95,11 @@ static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg);
* a core reset. This means we either need to fix the gadgets to take
* account of DMA alignment, or add bounce buffers (yuerk).
*
- * Until this issue is sorted out, we always return 'false'.
+ * g_using_dma is set depending on dts flag.
*/
static inline bool using_dma(struct dwc2_hsotg *hsotg)
{
- return false; /* support is not complete */
+ return hsotg->g_using_dma;
}
/**
@@ -165,15 +175,18 @@ static void s3c_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
{
unsigned int ep;
unsigned int addr;
- unsigned int size;
int timeout;
u32 val;
- /* set FIFO sizes to 2048/1024 */
+ /* Reset fifo map if not correctly cleared during previous session */
+ WARN_ON(hsotg->fifo_map);
+ hsotg->fifo_map = 0;
- writel(2048, hsotg->regs + GRXFSIZ);
- writel((2048 << FIFOSIZE_STARTADDR_SHIFT) |
- (1024 << FIFOSIZE_DEPTH_SHIFT), hsotg->regs + GNPTXFSIZ);
+ /* set RX/NPTX FIFO sizes */
+ writel(hsotg->g_rx_fifo_sz, hsotg->regs + GRXFSIZ);
+ writel((hsotg->g_rx_fifo_sz << FIFOSIZE_STARTADDR_SHIFT) |
+ (hsotg->g_np_g_tx_fifo_sz << FIFOSIZE_DEPTH_SHIFT),
+ hsotg->regs + GNPTXFSIZ);
/*
* arange all the rest of the TX FIFOs, as some versions of this
@@ -183,35 +196,21 @@ static void s3c_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
*/
/* start at the end of the GNPTXFSIZ, rounded up */
- addr = 2048 + 1024;
+ addr = hsotg->g_rx_fifo_sz + hsotg->g_np_g_tx_fifo_sz;
/*
- * Because we have not enough memory to have each TX FIFO of size at
- * least 3072 bytes (the maximum single packet size), we create four
- * FIFOs of lenght 1024, and four of length 3072 bytes, and assing
+ * Configure fifos sizes from provided configuration and assign
* them to endpoints dynamically according to maxpacket size value of
* given endpoint.
*/
-
- /* 256*4=1024 bytes FIFO length */
- size = 256;
- for (ep = 1; ep <= 4; ep++) {
- val = addr;
- val |= size << FIFOSIZE_DEPTH_SHIFT;
- WARN_ONCE(addr + size > hsotg->fifo_mem,
- "insufficient fifo memory");
- addr += size;
-
- writel(val, hsotg->regs + DPTXFSIZN(ep));
- }
- /* 768*4=3072 bytes FIFO length */
- size = 768;
- for (ep = 5; ep <= 8; ep++) {
+ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
+ if (!hsotg->g_tx_fifo_sz[ep])
+ continue;
val = addr;
- val |= size << FIFOSIZE_DEPTH_SHIFT;
- WARN_ONCE(addr + size > hsotg->fifo_mem,
+ val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
"insufficient fifo memory");
- addr += size;
+ addr += hsotg->g_tx_fifo_sz[ep];
writel(val, hsotg->regs + DPTXFSIZN(ep));
}
@@ -236,6 +235,7 @@ static void s3c_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
dev_err(hsotg->dev,
"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
__func__, val);
+ break;
}
udelay(1);
@@ -566,11 +566,6 @@ static void s3c_hsotg_start_req(struct dwc2_hsotg *hsotg,
length = ureq->length - ureq->actual;
dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
ureq->length, ureq->actual);
- if (0)
- dev_dbg(hsotg->dev,
- "REQ buf %p len %d dma %pad noi=%d zp=%d snok=%d\n",
- ureq->buf, length, &ureq->dma,
- ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
maxreq = get_ep_limit(hs_ep);
if (length > maxreq) {
@@ -604,14 +599,15 @@ static void s3c_hsotg_start_req(struct dwc2_hsotg *hsotg,
else
epsize = 0;
- if (index != 0 && ureq->zero) {
- /*
- * test for the packets being exactly right for the
- * transfer
- */
-
- if (length == (packets * hs_ep->ep.maxpacket))
- packets++;
+ /*
+ * zero length packet should be programmed on its own and should not
+ * be counted in DIEPTSIZ.PktCnt with other packets.
+ */
+ if (dir_in && ureq->zero && !continuing) {
+ /* Test if zlp is actually required. */
+ if ((ureq->length >= hs_ep->ep.maxpacket) &&
+ !(ureq->length % hs_ep->ep.maxpacket))
+ hs_ep->send_zlp = 1;
}
epsize |= DXEPTSIZ_PKTCNT(packets);
@@ -644,15 +640,12 @@ static void s3c_hsotg_start_req(struct dwc2_hsotg *hsotg,
ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
ctrl |= DXEPCTL_USBACTEP;
- dev_dbg(hsotg->dev, "setup req:%d\n", hsotg->setup);
+ dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
/* For Setup request do not clear NAK */
- if (hsotg->setup && index == 0)
- hsotg->setup = 0;
- else
+ if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
-
dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
writel(ctrl, hsotg->regs + epctrl_reg);
@@ -686,7 +679,7 @@ static void s3c_hsotg_start_req(struct dwc2_hsotg *hsotg,
/* check ep is enabled */
if (!(readl(hsotg->regs + epctrl_reg) & DXEPCTL_EPENA))
- dev_warn(hsotg->dev,
+ dev_dbg(hsotg->dev,
"ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
index, readl(hsotg->regs + epctrl_reg));
@@ -733,6 +726,59 @@ dma_error:
return -EIO;
}
+static int s3c_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req)
+{
+ void *req_buf = hs_req->req.buf;
+
+ /* If dma is not being used or buffer is aligned */
+ if (!using_dma(hsotg) || !((long)req_buf & 3))
+ return 0;
+
+ WARN_ON(hs_req->saved_req_buf);
+
+ dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
+ hs_ep->ep.name, req_buf, hs_req->req.length);
+
+ hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
+ if (!hs_req->req.buf) {
+ hs_req->req.buf = req_buf;
+ dev_err(hsotg->dev,
+ "%s: unable to allocate memory for bounce buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Save actual buffer */
+ hs_req->saved_req_buf = req_buf;
+
+ if (hs_ep->dir_in)
+ memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
+ return 0;
+}
+
+static void s3c_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req)
+{
+ /* If dma is not being used or buffer was aligned */
+ if (!using_dma(hsotg) || !hs_req->saved_req_buf)
+ return;
+
+ dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
+ hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
+
+ /* Copy data from bounce buffer on successful out transfer */
+ if (!hs_ep->dir_in && !hs_req->req.status)
+ memcpy(hs_req->saved_req_buf, hs_req->req.buf,
+ hs_req->req.actual);
+
+ /* Free bounce buffer */
+ kfree(hs_req->req.buf);
+
+ hs_req->req.buf = hs_req->saved_req_buf;
+ hs_req->saved_req_buf = NULL;
+}
+
static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
@@ -740,6 +786,7 @@ static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
bool first;
+ int ret;
dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
ep->name, req, req->length, req->buf, req->no_interrupt,
@@ -750,9 +797,13 @@ static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
req->actual = 0;
req->status = -EINPROGRESS;
+ ret = s3c_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
+ if (ret)
+ return ret;
+
/* if we're using DMA, sync the buffers as necessary */
if (using_dma(hs)) {
- int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
+ ret = s3c_hsotg_map_dma(hs, hs_ep, req);
if (ret)
return ret;
}
@@ -819,7 +870,7 @@ static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
static struct s3c_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
- struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
+ struct s3c_hsotg_ep *ep;
int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F;
@@ -829,6 +880,8 @@ static struct s3c_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
if (idx > hsotg->num_of_eps)
return NULL;
+ ep = index_to_ep(hsotg, idx, dir);
+
if (idx && ep->dir_in != dir)
return NULL;
@@ -836,6 +889,32 @@ static struct s3c_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
}
/**
+ * s3c_hsotg_set_test_mode - Enable usb Test Modes
+ * @hsotg: The driver state.
+ * @testmode: requested usb test mode
+ * Enable usb Test Mode requested by the Host.
+ */
+static int s3c_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
+{
+ int dctl = readl(hsotg->regs + DCTL);
+
+ dctl &= ~DCTL_TSTCTL_MASK;
+ switch (testmode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ dctl |= testmode << DCTL_TSTCTL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ writel(dctl, hsotg->regs + DCTL);
+ return 0;
+}
+
+/**
* s3c_hsotg_send_reply - send reply to control request
* @hsotg: The device state
* @ep: Endpoint 0
@@ -864,13 +943,15 @@ static int s3c_hsotg_send_reply(struct dwc2_hsotg *hsotg,
req->buf = hsotg->ep0_buff;
req->length = length;
- req->zero = 1; /* always do zero-length final transfer */
+ /*
+ * zero flag is for sending zlp in DATA IN stage. It has no impact on
+ * STATUS stage.
+ */
+ req->zero = 0;
req->complete = s3c_hsotg_complete_oursetup;
if (length)
memcpy(req->buf, buff, length);
- else
- ep->sent_zlp = 1;
ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
if (ret) {
@@ -889,7 +970,7 @@ static int s3c_hsotg_send_reply(struct dwc2_hsotg *hsotg,
static int s3c_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
- struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+ struct s3c_hsotg_ep *ep0 = hsotg->eps_out[0];
struct s3c_hsotg_ep *ep;
__le16 reply;
int ret;
@@ -953,33 +1034,62 @@ static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
}
/**
- * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
+ * s3c_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
* @hsotg: The device state
* @ctrl: USB control request
*/
static int s3c_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
- struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+ struct s3c_hsotg_ep *ep0 = hsotg->eps_out[0];
struct s3c_hsotg_req *hs_req;
bool restart;
bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
struct s3c_hsotg_ep *ep;
int ret;
bool halted;
+ u32 recip;
+ u32 wValue;
+ u32 wIndex;
dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
__func__, set ? "SET" : "CLEAR");
- if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
- ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ switch (wValue) {
+ case USB_DEVICE_TEST_MODE:
+ if ((wIndex & 0xff) != 0)
+ return -EINVAL;
+ if (!set)
+ return -EINVAL;
+
+ hsotg->test_mode = wIndex >> 8;
+ ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+ if (ret) {
+ dev_err(hsotg->dev,
+ "%s: failed to send reply\n", __func__);
+ return ret;
+ }
+ break;
+ default:
+ return -ENOENT;
+ }
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ ep = ep_from_windex(hsotg, wIndex);
if (!ep) {
dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
- __func__, le16_to_cpu(ctrl->wIndex));
+ __func__, wIndex);
return -ENOENT;
}
- switch (le16_to_cpu(ctrl->wValue)) {
+ switch (wValue) {
case USB_ENDPOINT_HALT:
halted = ep->halted;
@@ -1006,16 +1116,22 @@ static int s3c_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
hs_req = ep->req;
ep->req = NULL;
list_del_init(&hs_req->queue);
- usb_gadget_giveback_request(&ep->ep,
- &hs_req->req);
+ if (hs_req->req.complete) {
+ spin_unlock(&hsotg->lock);
+ usb_gadget_giveback_request(
+ &ep->ep, &hs_req->req);
+ spin_lock(&hsotg->lock);
+ }
}
/* If we have pending request, then start it */
- restart = !list_empty(&ep->queue);
- if (restart) {
- hs_req = get_ep_head(ep);
- s3c_hsotg_start_req(hsotg, ep,
- hs_req, false);
+ if (!ep->req) {
+ restart = !list_empty(&ep->queue);
+ if (restart) {
+ hs_req = get_ep_head(ep);
+ s3c_hsotg_start_req(hsotg, ep,
+ hs_req, false);
+ }
}
}
@@ -1024,9 +1140,10 @@ static int s3c_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
default:
return -ENOENT;
}
- } else
- return -ENOENT; /* currently only deal with endpoint */
-
+ break;
+ default:
+ return -ENOENT;
+ }
return 1;
}
@@ -1040,7 +1157,7 @@ static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
*/
static void s3c_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
{
- struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+ struct s3c_hsotg_ep *ep0 = hsotg->eps_out[0];
u32 reg;
u32 ctrl;
@@ -1080,34 +1197,29 @@ static void s3c_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
static void s3c_hsotg_process_control(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
- struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+ struct s3c_hsotg_ep *ep0 = hsotg->eps_out[0];
int ret = 0;
u32 dcfg;
- ep0->sent_zlp = 0;
-
dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
ctrl->bRequest, ctrl->bRequestType,
ctrl->wValue, ctrl->wLength);
- /*
- * record the direction of the request, for later use when enquing
- * packets onto EP0.
- */
-
- ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
- dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
-
- /*
- * if we've no data with this request, then the last part of the
- * transaction is going to implicitly be IN.
- */
- if (ctrl->wLength == 0)
+ if (ctrl->wLength == 0) {
+ ep0->dir_in = 1;
+ hsotg->ep0_state = DWC2_EP0_STATUS_IN;
+ } else if (ctrl->bRequestType & USB_DIR_IN) {
ep0->dir_in = 1;
+ hsotg->ep0_state = DWC2_EP0_DATA_IN;
+ } else {
+ ep0->dir_in = 0;
+ hsotg->ep0_state = DWC2_EP0_DATA_OUT;
+ }
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_SET_ADDRESS:
+ hsotg->connected = 1;
dcfg = readl(hsotg->regs + DCFG);
dcfg &= ~DCFG_DEVADDR_MASK;
dcfg |= (le16_to_cpu(ctrl->wValue) <<
@@ -1201,9 +1313,11 @@ static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
return;
}
- hsotg->eps[0].dir_in = 0;
+ hsotg->eps_out[0]->dir_in = 0;
+ hsotg->eps_out[0]->send_zlp = 0;
+ hsotg->ep0_state = DWC2_EP0_SETUP;
- ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
+ ret = s3c_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
if (ret < 0) {
dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
/*
@@ -1213,6 +1327,32 @@ static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
}
}
+static void s3c_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep)
+{
+ u32 ctrl;
+ u8 index = hs_ep->index;
+ u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+ u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
+
+ if (hs_ep->dir_in)
+ dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
+ index);
+ else
+ dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
+ index);
+
+ writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+ DXEPTSIZ_XFERSIZE(0), hsotg->regs +
+ epsiz_reg);
+
+ ctrl = readl(hsotg->regs + epctl_reg);
+ ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
+ ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
+ ctrl |= DXEPCTL_USBACTEP;
+ writel(ctrl, hsotg->regs + epctl_reg);
+}
+
/**
* s3c_hsotg_complete_request - complete a request given to us
* @hsotg: The device state.
@@ -1249,6 +1389,8 @@ static void s3c_hsotg_complete_request(struct dwc2_hsotg *hsotg,
if (hs_req->req.status == -EINPROGRESS)
hs_req->req.status = result;
+ s3c_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
+
hs_ep->req = NULL;
list_del_init(&hs_req->queue);
@@ -1293,7 +1435,7 @@ static void s3c_hsotg_complete_request(struct dwc2_hsotg *hsotg,
*/
static void s3c_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
{
- struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
+ struct s3c_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
struct s3c_hsotg_req *hs_req = hs_ep->req;
void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
int to_read;
@@ -1305,7 +1447,7 @@ static void s3c_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
u32 epctl = readl(hsotg->regs + DOEPCTL(ep_idx));
int ptr;
- dev_warn(hsotg->dev,
+ dev_dbg(hsotg->dev,
"%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
__func__, size, ep_idx, epctl);
@@ -1345,9 +1487,9 @@ static void s3c_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
}
/**
- * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
+ * s3c_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
* @hsotg: The device instance
- * @req: The request currently on this endpoint
+ * @dir_in: If IN zlp
*
* Generate a zero-length IN packet request for terminating a SETUP
* transaction.
@@ -1356,53 +1498,28 @@ static void s3c_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
* currently believed that we do not need to wait for any space in
* the TxFIFO.
*/
-static void s3c_hsotg_send_zlp(struct dwc2_hsotg *hsotg,
- struct s3c_hsotg_req *req)
+static void s3c_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
{
- u32 ctrl;
+ /* eps_out[0] is used in both directions */
+ hsotg->eps_out[0]->dir_in = dir_in;
+ hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
- if (!req) {
- dev_warn(hsotg->dev, "%s: no request?\n", __func__);
- return;
- }
-
- if (req->req.length == 0) {
- hsotg->eps[0].sent_zlp = 1;
- s3c_hsotg_enqueue_setup(hsotg);
- return;
- }
-
- hsotg->eps[0].dir_in = 1;
- hsotg->eps[0].sent_zlp = 1;
-
- dev_dbg(hsotg->dev, "sending zero-length packet\n");
-
- /* issue a zero-sized packet to terminate this */
- writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
- DXEPTSIZ_XFERSIZE(0), hsotg->regs + DIEPTSIZ(0));
-
- ctrl = readl(hsotg->regs + DIEPCTL0);
- ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
- ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
- ctrl |= DXEPCTL_USBACTEP;
- writel(ctrl, hsotg->regs + DIEPCTL0);
+ s3c_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
}
/**
* s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
* @hsotg: The device instance
* @epnum: The endpoint received from
- * @was_setup: Set if processing a SetupDone event.
*
* The RXFIFO has delivered an OutDone event, which means that the data
* transfer for an OUT endpoint has been completed, either by a short
* packet or by the finish of a transfer.
*/
-static void s3c_hsotg_handle_outdone(struct dwc2_hsotg *hsotg,
- int epnum, bool was_setup)
+static void s3c_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
{
u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum));
- struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
+ struct s3c_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
struct s3c_hsotg_req *hs_req = hs_ep->req;
struct usb_request *req = &hs_req->req;
unsigned size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
@@ -1413,6 +1530,13 @@ static void s3c_hsotg_handle_outdone(struct dwc2_hsotg *hsotg,
return;
}
+ if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
+ dev_dbg(hsotg->dev, "zlp packet received\n");
+ s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+ s3c_hsotg_enqueue_setup(hsotg);
+ return;
+ }
+
if (using_dma(hsotg)) {
unsigned size_done;
@@ -1435,12 +1559,6 @@ static void s3c_hsotg_handle_outdone(struct dwc2_hsotg *hsotg,
if (req->actual < req->length && size_left == 0) {
s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
return;
- } else if (epnum == 0) {
- /*
- * After was_setup = 1 =>
- * set CNAK for non Setup requests
- */
- hsotg->setup = was_setup ? 0 : 1;
}
if (req->actual < req->length && req->short_not_ok) {
@@ -1453,13 +1571,10 @@ static void s3c_hsotg_handle_outdone(struct dwc2_hsotg *hsotg,
*/
}
- if (epnum == 0) {
- /*
- * Condition req->complete != s3c_hsotg_complete_setup says:
- * send ZLP when we have an asynchronous request from gadget
- */
- if (!was_setup && req->complete != s3c_hsotg_complete_setup)
- s3c_hsotg_send_zlp(hsotg, hs_req);
+ if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+ /* Move to STATUS IN */
+ s3c_hsotg_ep0_zlp(hsotg, true);
+ return;
}
s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
@@ -1511,8 +1626,7 @@ static void s3c_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
size = grxstsr & GRXSTS_BYTECNT_MASK;
size >>= GRXSTS_BYTECNT_SHIFT;
- if (1)
- dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
+ dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
__func__, grxstsr, size, epnum);
switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
@@ -1525,7 +1639,7 @@ static void s3c_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
s3c_hsotg_read_frameno(hsotg));
if (!using_dma(hsotg))
- s3c_hsotg_handle_outdone(hsotg, epnum, false);
+ s3c_hsotg_handle_outdone(hsotg, epnum);
break;
case GRXSTS_PKTSTS_SETUPDONE:
@@ -1533,8 +1647,13 @@ static void s3c_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
s3c_hsotg_read_frameno(hsotg),
readl(hsotg->regs + DOEPCTL(0)));
-
- s3c_hsotg_handle_outdone(hsotg, epnum, true);
+ /*
+ * Call s3c_hsotg_handle_outdone here if it was not called from
+ * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
+ * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
+ */
+ if (hsotg->ep0_state == DWC2_EP0_SETUP)
+ s3c_hsotg_handle_outdone(hsotg, epnum);
break;
case GRXSTS_PKTSTS_OUTRX:
@@ -1547,6 +1666,8 @@ static void s3c_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
s3c_hsotg_read_frameno(hsotg),
readl(hsotg->regs + DOEPCTL(0)));
+ WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
+
s3c_hsotg_rx_data(hsotg, epnum, size);
break;
@@ -1591,14 +1712,18 @@ static u32 s3c_hsotg_ep0_mps(unsigned int mps)
* the hardware control registers to reflect this.
*/
static void s3c_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
- unsigned int ep, unsigned int mps)
+ unsigned int ep, unsigned int mps, unsigned int dir_in)
{
- struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
+ struct s3c_hsotg_ep *hs_ep;
void __iomem *regs = hsotg->regs;
u32 mpsval;
u32 mcval;
u32 reg;
+ hs_ep = index_to_ep(hsotg, ep, dir_in);
+ if (!hs_ep)
+ return;
+
if (ep == 0) {
/* EP0 is a special case */
mpsval = s3c_hsotg_ep0_mps(mps);
@@ -1617,17 +1742,12 @@ static void s3c_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
hs_ep->ep.maxpacket = mpsval;
}
- /*
- * update both the in and out endpoint controldir_ registers, even
- * if one of the directions may not be in use.
- */
-
- reg = readl(regs + DIEPCTL(ep));
- reg &= ~DXEPCTL_MPS_MASK;
- reg |= mpsval;
- writel(reg, regs + DIEPCTL(ep));
-
- if (ep) {
+ if (dir_in) {
+ reg = readl(regs + DIEPCTL(ep));
+ reg &= ~DXEPCTL_MPS_MASK;
+ reg |= mpsval;
+ writel(reg, regs + DIEPCTL(ep));
+ } else {
reg = readl(regs + DOEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
reg |= mpsval;
@@ -1727,9 +1847,21 @@ static void s3c_hsotg_complete_in(struct dwc2_hsotg *hsotg,
}
/* Finish ZLP handling for IN EP0 transactions */
- if (hsotg->eps[0].sent_zlp) {
- dev_dbg(hsotg->dev, "zlp packet received\n");
+ if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
+ dev_dbg(hsotg->dev, "zlp packet sent\n");
s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+ if (hsotg->test_mode) {
+ int ret;
+
+ ret = s3c_hsotg_set_test_mode(hsotg, hsotg->test_mode);
+ if (ret < 0) {
+ dev_dbg(hsotg->dev, "Invalid Test #%d\n",
+ hsotg->test_mode);
+ s3c_hsotg_stall_ep0(hsotg);
+ return;
+ }
+ }
+ s3c_hsotg_enqueue_setup(hsotg);
return;
}
@@ -1756,31 +1888,27 @@ static void s3c_hsotg_complete_in(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
- /*
- * Check if dealing with Maximum Packet Size(MPS) IN transfer at EP0
- * When sent data is a multiple MPS size (e.g. 64B ,128B ,192B
- * ,256B ... ), after last MPS sized packet send IN ZLP packet to
- * inform the host that no more data is available.
- * The state of req.zero member is checked to be sure that the value to
- * send is smaller than wValue expected from host.
- * Check req.length to NOT send another ZLP when the current one is
- * under completion (the one for which this completion has been called).
- */
- if (hs_req->req.length && hs_ep->index == 0 && hs_req->req.zero &&
- hs_req->req.length == hs_req->req.actual &&
- !(hs_req->req.length % hs_ep->ep.maxpacket)) {
+ if (!size_left && hs_req->req.actual < hs_req->req.length) {
+ dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
+ s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+ return;
+ }
- dev_dbg(hsotg->dev, "ep0 zlp IN packet sent\n");
- s3c_hsotg_send_zlp(hsotg, hs_req);
+ /* Zlp for all endpoints, for ep0 only in DATA IN stage */
+ if (hs_ep->send_zlp) {
+ s3c_hsotg_program_zlp(hsotg, hs_ep);
+ hs_ep->send_zlp = 0;
+ /* transfer will be completed on next complete interrupt */
+ return;
+ }
+ if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
+ /* Move to STATUS OUT */
+ s3c_hsotg_ep0_zlp(hsotg, false);
return;
}
- if (!size_left && hs_req->req.actual < hs_req->req.length) {
- dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
- s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
- } else
- s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+ s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
}
/**
@@ -1794,7 +1922,7 @@ static void s3c_hsotg_complete_in(struct dwc2_hsotg *hsotg,
static void s3c_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
int dir_in)
{
- struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
+ struct s3c_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
@@ -1807,9 +1935,19 @@ static void s3c_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
/* Clear endpoint interrupts */
writel(ints, hsotg->regs + epint_reg);
+ if (!hs_ep) {
+ dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
+ __func__, idx, dir_in ? "in" : "out");
+ return;
+ }
+
dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
__func__, idx, dir_in ? "in" : "out", ints);
+ /* Don't process XferCompl interrupt if it is a setup packet */
+ if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
+ ints &= ~DXEPINT_XFERCOMPL;
+
if (ints & DXEPINT_XFERCOMPL) {
if (hs_ep->isochronous && hs_ep->interval == 1) {
if (ctrl & DXEPCTL_EOFRNUM)
@@ -1839,7 +1977,7 @@ static void s3c_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
* as we ignore the RXFIFO.
*/
- s3c_hsotg_handle_outdone(hsotg, idx, false);
+ s3c_hsotg_handle_outdone(hsotg, idx);
}
}
@@ -1878,7 +2016,7 @@ static void s3c_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (dir_in)
WARN_ON_ONCE(1);
else
- s3c_hsotg_handle_outdone(hsotg, 0, true);
+ s3c_hsotg_handle_outdone(hsotg, 0);
}
}
@@ -1969,9 +2107,15 @@ static void s3c_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
if (ep0_mps) {
int i;
- s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
- for (i = 1; i < hsotg->num_of_eps; i++)
- s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
+ /* Initialize ep0 for both in and out directions */
+ s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 1);
+ s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0);
+ for (i = 1; i < hsotg->num_of_eps; i++) {
+ if (hsotg->eps_in[i])
+ s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 1);
+ if (hsotg->eps_out[i])
+ s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 0);
+ }
}
/* ensure after enumeration our EP0 is active */
@@ -1988,30 +2132,23 @@ static void s3c_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
* @hsotg: The device state.
* @ep: The endpoint the requests may be on.
* @result: The result code to use.
- * @force: Force removal of any current requests
*
* Go through the requests on the given endpoint and mark them
* completed with the given result code.
*/
static void kill_all_requests(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *ep,
- int result, bool force)
+ int result)
{
struct s3c_hsotg_req *req, *treq;
unsigned size;
- list_for_each_entry_safe(req, treq, &ep->queue, queue) {
- /*
- * currently, we can't do much about an already
- * running request on an in endpoint
- */
-
- if (ep->req == req && ep->dir_in && !force)
- continue;
+ ep->req = NULL;
+ list_for_each_entry_safe(req, treq, &ep->queue, queue)
s3c_hsotg_complete_request(hsotg, ep, req,
result);
- }
+
if (!hsotg->dedicated_fifos)
return;
size = (readl(hsotg->regs + DTXFSTS(ep->index)) & 0xffff) * 4;
@@ -2035,8 +2172,16 @@ void s3c_hsotg_disconnect(struct dwc2_hsotg *hsotg)
return;
hsotg->connected = 0;
- for (ep = 0; ep < hsotg->num_of_eps; ep++)
- kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
+ hsotg->test_mode = 0;
+
+ for (ep = 0; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ kill_all_requests(hsotg, hsotg->eps_in[ep],
+ -ESHUTDOWN);
+ if (hsotg->eps_out[ep])
+ kill_all_requests(hsotg, hsotg->eps_out[ep],
+ -ESHUTDOWN);
+ }
call_gadget(hsotg, disconnect);
}
@@ -2053,9 +2198,11 @@ static void s3c_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
int epno, ret;
/* look through for any more data to transmit */
-
for (epno = 0; epno < hsotg->num_of_eps; epno++) {
- ep = &hsotg->eps[epno];
+ ep = index_to_ep(hsotg, epno, 1);
+
+ if (!ep)
+ continue;
if (!ep->dir_in)
continue;
@@ -2129,9 +2276,13 @@ static int s3c_hsotg_corereset(struct dwc2_hsotg *hsotg)
*
* Issue a soft reset to the core, and await the core finishing it.
*/
-void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
+void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
+ bool is_usb_reset)
{
- s3c_hsotg_corereset(hsotg);
+ u32 val;
+
+ if (!is_usb_reset)
+ s3c_hsotg_corereset(hsotg);
/*
* we must now enable ep0 ready for host detection and then
@@ -2139,14 +2290,16 @@ void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
*/
/* set the PLL on, remove the HNP/SRP and set the PHY */
+ val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (0x5 << 10), hsotg->regs + GUSBCFG);
+ (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG);
s3c_hsotg_init_fifo(hsotg);
- __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
+ if (!is_usb_reset)
+ __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
- writel(1 << 18 | DCFG_DEVSPD_HS, hsotg->regs + DCFG);
+ writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS, hsotg->regs + DCFG);
/* Clear any pending OTG interrupts */
writel(0xffffffff, hsotg->regs + GOTGINT);
@@ -2163,7 +2316,7 @@ void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
if (using_dma(hsotg))
writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
- GAHBCFG_HBSTLEN_INCR4,
+ (GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT),
hsotg->regs + GAHBCFG);
else
writel(((hsotg->dedicated_fifos) ? (GAHBCFG_NP_TXF_EMP_LVL |
@@ -2177,8 +2330,8 @@ void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
* interrupts.
*/
- writel(((hsotg->dedicated_fifos) ? DIEPMSK_TXFIFOEMPTY |
- DIEPMSK_INTKNTXFEMPMSK : 0) |
+ writel(((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
+ DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
DIEPMSK_INTKNEPMISMSK,
@@ -2215,9 +2368,11 @@ void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
- __orr32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
- udelay(10); /* see openiboot */
- __bic32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
+ if (!is_usb_reset) {
+ __orr32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
+ udelay(10); /* see openiboot */
+ __bic32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
+ }
dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + DCTL));
@@ -2230,13 +2385,13 @@ void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
DXEPTSIZ_XFERSIZE(8), hsotg->regs + DOEPTSIZ0);
- writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+ writel(s3c_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
DXEPCTL_CNAK | DXEPCTL_EPENA |
DXEPCTL_USBACTEP,
hsotg->regs + DOEPCTL0);
/* enable, but don't activate EP0in */
- writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+ writel(s3c_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
s3c_hsotg_enqueue_setup(hsotg);
@@ -2246,8 +2401,10 @@ void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
readl(hsotg->regs + DOEPCTL0));
/* clear global NAKs */
- writel(DCTL_CGOUTNAK | DCTL_CGNPINNAK | DCTL_SFTDISCON,
- hsotg->regs + DCTL);
+ val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
+ if (!is_usb_reset)
+ val |= DCTL_SFTDISCON;
+ __orr32(hsotg->regs + DCTL, val);
/* must be at-least 3ms to allow bus to see disconnect */
mdelay(3);
@@ -2293,7 +2450,6 @@ irq_retry:
writel(GINTSTS_ENUMDONE, hsotg->regs + GINTSTS);
s3c_hsotg_irq_enumdone(hsotg);
- hsotg->connected = 1;
}
if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
@@ -2308,12 +2464,14 @@ irq_retry:
dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
- for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
+ for (ep = 0; ep < hsotg->num_of_eps && daint_out;
+ ep++, daint_out >>= 1) {
if (daint_out & 1)
s3c_hsotg_epint(hsotg, ep, 0);
}
- for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
+ for (ep = 0; ep < hsotg->num_of_eps && daint_in;
+ ep++, daint_in >>= 1) {
if (daint_in & 1)
s3c_hsotg_epint(hsotg, ep, 1);
}
@@ -2329,15 +2487,17 @@ irq_retry:
writel(GINTSTS_USBRST, hsotg->regs + GINTSTS);
+ /* Report disconnection if it is not already done. */
+ s3c_hsotg_disconnect(hsotg);
+
if (usb_status & GOTGCTL_BSESVLD) {
if (time_after(jiffies, hsotg->last_rst +
msecs_to_jiffies(200))) {
- kill_all_requests(hsotg, &hsotg->eps[0],
- -ECONNRESET, true);
+ kill_all_requests(hsotg, hsotg->eps_out[0],
+ -ECONNRESET);
- s3c_hsotg_core_init_disconnected(hsotg);
- s3c_hsotg_core_connect(hsotg);
+ s3c_hsotg_core_init_disconnected(hsotg, true);
}
}
}
@@ -2429,12 +2589,12 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
unsigned long flags;
- int index = hs_ep->index;
+ unsigned int index = hs_ep->index;
u32 epctrl_reg;
u32 epctrl;
u32 mps;
- int dir_in;
- int i, val, size;
+ unsigned int dir_in;
+ unsigned int i, val, size;
int ret = 0;
dev_dbg(hsotg->dev,
@@ -2482,7 +2642,7 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
epctrl |= DXEPCTL_SNAK;
/* update the endpoint state */
- s3c_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps);
+ s3c_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
/* default, set to non-periodic */
hs_ep->isochronous = 0;
@@ -2518,30 +2678,48 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
break;
}
+ /* If fifo is already allocated for this ep */
+ if (hs_ep->fifo_index) {
+ size = hs_ep->ep.maxpacket * hs_ep->mc;
+ /* If bigger fifo is required deallocate current one */
+ if (size > hs_ep->fifo_size) {
+ hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
+ hs_ep->fifo_index = 0;
+ hs_ep->fifo_size = 0;
+ }
+ }
+
/*
* if the hardware has dedicated fifos, we must give each IN EP
* a unique tx-fifo even if it is non-periodic.
*/
- if (dir_in && hsotg->dedicated_fifos) {
+ if (dir_in && hsotg->dedicated_fifos && !hs_ep->fifo_index) {
+ u32 fifo_index = 0;
+ u32 fifo_size = UINT_MAX;
size = hs_ep->ep.maxpacket*hs_ep->mc;
- for (i = 1; i <= 8; ++i) {
+ for (i = 1; i < hsotg->num_of_eps; ++i) {
if (hsotg->fifo_map & (1<<i))
continue;
val = readl(hsotg->regs + DPTXFSIZN(i));
val = (val >> FIFOSIZE_DEPTH_SHIFT)*4;
if (val < size)
continue;
- hsotg->fifo_map |= 1<<i;
-
- epctrl |= DXEPCTL_TXFNUM(i);
- hs_ep->fifo_index = i;
- hs_ep->fifo_size = val;
- break;
+ /* Search for smallest acceptable fifo */
+ if (val < fifo_size) {
+ fifo_size = val;
+ fifo_index = i;
+ }
}
- if (i == 8) {
+ if (!fifo_index) {
+ dev_err(hsotg->dev,
+ "%s: No suitable fifo found\n", __func__);
ret = -ENOMEM;
goto error;
}
+ hsotg->fifo_map |= 1 << fifo_index;
+ epctrl |= DXEPCTL_TXFNUM(fifo_index);
+ hs_ep->fifo_index = fifo_index;
+ hs_ep->fifo_size = fifo_size;
}
/* for non control endpoints, set PID to D0 */
@@ -2567,7 +2745,7 @@ error:
* s3c_hsotg_ep_disable - disable given endpoint
* @ep: The endpoint to disable.
*/
-static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
@@ -2579,7 +2757,7 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
- if (ep == &hsotg->eps[0].ep) {
+ if (ep == &hsotg->eps_out[0]->ep) {
dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
return -EINVAL;
}
@@ -2587,8 +2765,6 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
- /* terminate all requests with shutdown */
- kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
hs_ep->fifo_index = 0;
@@ -2605,10 +2781,17 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
/* disable endpoint interrupts */
s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
+ /* terminate all requests with shutdown */
+ kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
+
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
}
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+ return s3c_hsotg_ep_disable_force(ep, false);
+}
/**
* on_list - check request is on the given endpoint
* @ep: The endpoint to check.
@@ -2678,40 +2861,39 @@ static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
return 0;
}
- /* write both IN and OUT control registers */
-
- epreg = DIEPCTL(index);
- epctl = readl(hs->regs + epreg);
-
- if (value) {
- epctl |= DXEPCTL_STALL + DXEPCTL_SNAK;
- if (epctl & DXEPCTL_EPENA)
- epctl |= DXEPCTL_EPDIS;
+ if (hs_ep->dir_in) {
+ epreg = DIEPCTL(index);
+ epctl = readl(hs->regs + epreg);
+
+ if (value) {
+ epctl |= DXEPCTL_STALL + DXEPCTL_SNAK;
+ if (epctl & DXEPCTL_EPENA)
+ epctl |= DXEPCTL_EPDIS;
+ } else {
+ epctl &= ~DXEPCTL_STALL;
+ xfertype = epctl & DXEPCTL_EPTYPE_MASK;
+ if (xfertype == DXEPCTL_EPTYPE_BULK ||
+ xfertype == DXEPCTL_EPTYPE_INTERRUPT)
+ epctl |= DXEPCTL_SETD0PID;
+ }
+ writel(epctl, hs->regs + epreg);
} else {
- epctl &= ~DXEPCTL_STALL;
- xfertype = epctl & DXEPCTL_EPTYPE_MASK;
- if (xfertype == DXEPCTL_EPTYPE_BULK ||
- xfertype == DXEPCTL_EPTYPE_INTERRUPT)
- epctl |= DXEPCTL_SETD0PID;
- }
-
- writel(epctl, hs->regs + epreg);
- epreg = DOEPCTL(index);
- epctl = readl(hs->regs + epreg);
+ epreg = DOEPCTL(index);
+ epctl = readl(hs->regs + epreg);
- if (value)
- epctl |= DXEPCTL_STALL;
- else {
- epctl &= ~DXEPCTL_STALL;
- xfertype = epctl & DXEPCTL_EPTYPE_MASK;
- if (xfertype == DXEPCTL_EPTYPE_BULK ||
- xfertype == DXEPCTL_EPTYPE_INTERRUPT)
- epctl |= DXEPCTL_SETD0PID;
+ if (value)
+ epctl |= DXEPCTL_STALL;
+ else {
+ epctl &= ~DXEPCTL_STALL;
+ xfertype = epctl & DXEPCTL_EPTYPE_MASK;
+ if (xfertype == DXEPCTL_EPTYPE_BULK ||
+ xfertype == DXEPCTL_EPTYPE_INTERRUPT)
+ epctl |= DXEPCTL_SETD0PID;
+ }
+ writel(epctl, hs->regs + epreg);
}
- writel(epctl, hs->regs + epreg);
-
hs_ep->halted = value;
return 0;
@@ -2797,6 +2979,7 @@ static void s3c_hsotg_phy_disable(struct dwc2_hsotg *hsotg)
*/
static void s3c_hsotg_init(struct dwc2_hsotg *hsotg)
{
+ u32 trdtim;
/* unmask subset of endpoint interrupts */
writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -2812,12 +2995,6 @@ static void s3c_hsotg_init(struct dwc2_hsotg *hsotg)
/* Be in disconnected state until gadget is registered */
__orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
- if (0) {
- /* post global nak until we're ready */
- writel(DCTL_SGNPINNAK | DCTL_SGOUTNAK,
- hsotg->regs + DCTL);
- }
-
/* setup fifos */
dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
@@ -2827,11 +3004,13 @@ static void s3c_hsotg_init(struct dwc2_hsotg *hsotg)
s3c_hsotg_init_fifo(hsotg);
/* set the PLL on, remove the HNP/SRP and set the PHY */
- writel(GUSBCFG_PHYIF16 | GUSBCFG_TOUTCAL(7) | (0x5 << 10),
- hsotg->regs + GUSBCFG);
+ trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
+ writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+ (trdtim << GUSBCFG_USBTRDTIM_SHIFT),
+ hsotg->regs + GUSBCFG);
- writel(using_dma(hsotg) ? GAHBCFG_DMA_EN : 0x0,
- hsotg->regs + GAHBCFG);
+ if (using_dma(hsotg))
+ __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
}
/**
@@ -2885,10 +3064,12 @@ static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
}
s3c_hsotg_phy_enable(hsotg);
+ if (!IS_ERR_OR_NULL(hsotg->uphy))
+ otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
spin_lock_irqsave(&hsotg->lock, flags);
s3c_hsotg_init(hsotg);
- s3c_hsotg_core_init_disconnected(hsotg);
+ s3c_hsotg_core_init_disconnected(hsotg, false);
hsotg->enabled = 0;
spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -2923,8 +3104,12 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
mutex_lock(&hsotg->init_mutex);
/* all endpoints should be shutdown */
- for (ep = 1; ep < hsotg->num_of_eps; ep++)
- s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ s3c_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ s3c_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ }
spin_lock_irqsave(&hsotg->lock, flags);
@@ -2934,6 +3119,8 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
spin_unlock_irqrestore(&hsotg->lock, flags);
+ if (!IS_ERR_OR_NULL(hsotg->uphy))
+ otg_set_peripheral(hsotg->uphy->otg, NULL);
s3c_hsotg_phy_disable(hsotg);
regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
@@ -2975,9 +3162,11 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
if (is_on) {
clk_enable(hsotg->clk);
hsotg->enabled = 1;
+ s3c_hsotg_core_init_disconnected(hsotg, false);
s3c_hsotg_core_connect(hsotg);
} else {
s3c_hsotg_core_disconnect(hsotg);
+ s3c_hsotg_disconnect(hsotg);
hsotg->enabled = 0;
clk_disable(hsotg->clk);
}
@@ -2989,11 +3178,52 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
+static int s3c_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+ unsigned long flags;
+
+ dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ if (is_active) {
+ /* Kill any ep0 requests as controller will be reinitialized */
+ kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
+ s3c_hsotg_core_init_disconnected(hsotg, false);
+ if (hsotg->enabled)
+ s3c_hsotg_core_connect(hsotg);
+ } else {
+ s3c_hsotg_core_disconnect(hsotg);
+ s3c_hsotg_disconnect(hsotg);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return 0;
+}
+
+/**
+ * s3c_hsotg_vbus_draw - report bMaxPower field
+ * @gadget: The usb gadget state
+ * @mA: Amount of current
+ *
+ * Report how much power the device may consume to the phy.
+ */
+static int s3c_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+
+ if (IS_ERR_OR_NULL(hsotg->uphy))
+ return -ENOTSUPP;
+ return usb_phy_set_power(hsotg->uphy, mA);
+}
+
static const struct usb_gadget_ops s3c_hsotg_gadget_ops = {
.get_frame = s3c_hsotg_gadget_getframe,
.udc_start = s3c_hsotg_udc_start,
.udc_stop = s3c_hsotg_udc_stop,
.pullup = s3c_hsotg_pullup,
+ .vbus_session = s3c_hsotg_vbus_session,
+ .vbus_draw = s3c_hsotg_vbus_draw,
};
/**
@@ -3008,19 +3238,19 @@ static const struct usb_gadget_ops s3c_hsotg_gadget_ops = {
*/
static void s3c_hsotg_initep(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep,
- int epnum)
+ int epnum,
+ bool dir_in)
{
char *dir;
if (epnum == 0)
dir = "";
- else if ((epnum % 2) == 0) {
- dir = "out";
- } else {
+ else if (dir_in)
dir = "in";
- hs_ep->dir_in = 1;
- }
+ else
+ dir = "out";
+ hs_ep->dir_in = dir_in;
hs_ep->index = epnum;
snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
@@ -3044,8 +3274,10 @@ static void s3c_hsotg_initep(struct dwc2_hsotg *hsotg,
if (using_dma(hsotg)) {
u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
- writel(next, hsotg->regs + DIEPCTL(epnum));
- writel(next, hsotg->regs + DOEPCTL(epnum));
+ if (dir_in)
+ writel(next, hsotg->regs + DIEPCTL(epnum));
+ else
+ writel(next, hsotg->regs + DOEPCTL(epnum));
}
}
@@ -3055,24 +3287,56 @@ static void s3c_hsotg_initep(struct dwc2_hsotg *hsotg,
*
* Read the USB core HW configuration registers
*/
-static void s3c_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
+static int s3c_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
{
- u32 cfg2, cfg3, cfg4;
+ u32 cfg;
+ u32 ep_type;
+ u32 i;
+
/* check hardware configuration */
- cfg2 = readl(hsotg->regs + 0x48);
- hsotg->num_of_eps = (cfg2 >> 10) & 0xF;
+ cfg = readl(hsotg->regs + GHWCFG2);
+ hsotg->num_of_eps = (cfg >> GHWCFG2_NUM_DEV_EP_SHIFT) & 0xF;
+ /* Add ep0 */
+ hsotg->num_of_eps++;
+
+ hsotg->eps_in[0] = devm_kzalloc(hsotg->dev, sizeof(struct s3c_hsotg_ep),
+ GFP_KERNEL);
+ if (!hsotg->eps_in[0])
+ return -ENOMEM;
+ /* Same s3c_hsotg_ep is used in both directions for ep0 */
+ hsotg->eps_out[0] = hsotg->eps_in[0];
+
+ cfg = readl(hsotg->regs + GHWCFG1);
+ for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
+ ep_type = cfg & 3;
+ /* Direction in or both */
+ if (!(ep_type & 2)) {
+ hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
+ sizeof(struct s3c_hsotg_ep), GFP_KERNEL);
+ if (!hsotg->eps_in[i])
+ return -ENOMEM;
+ }
+ /* Direction out or both */
+ if (!(ep_type & 1)) {
+ hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
+ sizeof(struct s3c_hsotg_ep), GFP_KERNEL);
+ if (!hsotg->eps_out[i])
+ return -ENOMEM;
+ }
+ }
- cfg3 = readl(hsotg->regs + 0x4C);
- hsotg->fifo_mem = (cfg3 >> 16);
+ cfg = readl(hsotg->regs + GHWCFG3);
+ hsotg->fifo_mem = (cfg >> GHWCFG3_DFIFO_DEPTH_SHIFT);
- cfg4 = readl(hsotg->regs + 0x50);
- hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
+ cfg = readl(hsotg->regs + GHWCFG4);
+ hsotg->dedicated_fifos = (cfg >> GHWCFG4_DED_FIFO_SHIFT) & 1;
dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
hsotg->num_of_eps,
hsotg->dedicated_fifos ? "dedicated" : "shared",
hsotg->fifo_mem);
+ return 0;
}
/**
@@ -3091,22 +3355,22 @@ static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg)
readl(regs + DCFG), readl(regs + DCTL),
readl(regs + DIEPMSK));
- dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
- readl(regs + GAHBCFG), readl(regs + 0x44));
+ dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
+ readl(regs + GAHBCFG), readl(regs + GHWCFG1));
dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
readl(regs + GRXFSIZ), readl(regs + GNPTXFSIZ));
/* show periodic fifo settings */
- for (idx = 1; idx <= 15; idx++) {
+ for (idx = 1; idx < hsotg->num_of_eps; idx++) {
val = readl(regs + DPTXFSIZN(idx));
dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
val >> FIFOSIZE_DEPTH_SHIFT,
val & FIFOSIZE_STARTADDR_MASK);
}
- for (idx = 0; idx < 15; idx++) {
+ for (idx = 0; idx < hsotg->num_of_eps; idx++) {
dev_info(dev,
"ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
readl(regs + DIEPCTL(idx)),
@@ -3128,6 +3392,103 @@ static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg)
}
/**
+ * testmode_write - debugfs: change usb test mode
+ * @seq: The seq file to write to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry modify the current usb test mode.
+ */
+static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t
+ count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc2_hsotg *hsotg = s->private;
+ unsigned long flags;
+ u32 testmode = 0;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "test_j", 6))
+ testmode = TEST_J;
+ else if (!strncmp(buf, "test_k", 6))
+ testmode = TEST_K;
+ else if (!strncmp(buf, "test_se0_nak", 12))
+ testmode = TEST_SE0_NAK;
+ else if (!strncmp(buf, "test_packet", 11))
+ testmode = TEST_PACKET;
+ else if (!strncmp(buf, "test_force_enable", 17))
+ testmode = TEST_FORCE_EN;
+ else
+ testmode = 0;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ s3c_hsotg_set_test_mode(hsotg, testmode);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return count;
+}
+
+/**
+ * testmode_show - debugfs: show usb test mode state
+ * @seq: The seq file to write to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows which usb test mode is currently enabled.
+ */
+static int testmode_show(struct seq_file *s, void *unused)
+{
+ struct dwc2_hsotg *hsotg = s->private;
+ unsigned long flags;
+ int dctl;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dctl = readl(hsotg->regs + DCTL);
+ dctl &= DCTL_TSTCTL_MASK;
+ dctl >>= DCTL_TSTCTL_SHIFT;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ switch (dctl) {
+ case 0:
+ seq_puts(s, "no test\n");
+ break;
+ case TEST_J:
+ seq_puts(s, "test_j\n");
+ break;
+ case TEST_K:
+ seq_puts(s, "test_k\n");
+ break;
+ case TEST_SE0_NAK:
+ seq_puts(s, "test_se0_nak\n");
+ break;
+ case TEST_PACKET:
+ seq_puts(s, "test_packet\n");
+ break;
+ case TEST_FORCE_EN:
+ seq_puts(s, "test_force_enable\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN %d\n", dctl);
+ }
+
+ return 0;
+}
+
+static int testmode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, testmode_show, inode->i_private);
+}
+
+static const struct file_operations testmode_fops = {
+ .owner = THIS_MODULE,
+ .open = testmode_open,
+ .write = testmode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
* state_show - debugfs: show overall driver and device state.
* @seq: The seq file to write to.
* @v: Unused parameter.
@@ -3164,7 +3525,7 @@ static int state_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nEndpoint status:\n");
- for (idx = 0; idx < 15; idx++) {
+ for (idx = 0; idx < hsotg->num_of_eps; idx++) {
u32 in, out;
in = readl(regs + DIEPCTL(idx));
@@ -3223,7 +3584,7 @@ static int fifo_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
- for (idx = 1; idx <= 15; idx++) {
+ for (idx = 1; idx < hsotg->num_of_eps; idx++) {
val = readl(regs + DPTXFSIZN(idx));
seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
@@ -3355,29 +3716,53 @@ static void s3c_hsotg_create_debug(struct dwc2_hsotg *hsotg)
/* create general state file */
- hsotg->debug_file = debugfs_create_file("state", 0444, root,
+ hsotg->debug_file = debugfs_create_file("state", S_IRUGO, root,
hsotg, &state_fops);
if (IS_ERR(hsotg->debug_file))
dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
- hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
+ hsotg->debug_testmode = debugfs_create_file("testmode",
+ S_IRUGO | S_IWUSR, root,
+ hsotg, &testmode_fops);
+
+ if (IS_ERR(hsotg->debug_testmode))
+ dev_err(hsotg->dev, "%s: failed to create testmode\n",
+ __func__);
+
+ hsotg->debug_fifo = debugfs_create_file("fifo", S_IRUGO, root,
hsotg, &fifo_fops);
if (IS_ERR(hsotg->debug_fifo))
dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
- /* create one file for each endpoint */
-
+ /* Create one file for each out endpoint */
for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
- struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+ struct s3c_hsotg_ep *ep;
- ep->debugfs = debugfs_create_file(ep->name, 0444,
- root, ep, &ep_fops);
+ ep = hsotg->eps_out[epidx];
+ if (ep) {
+ ep->debugfs = debugfs_create_file(ep->name, S_IRUGO,
+ root, ep, &ep_fops);
- if (IS_ERR(ep->debugfs))
- dev_err(hsotg->dev, "failed to create %s debug file\n",
- ep->name);
+ if (IS_ERR(ep->debugfs))
+ dev_err(hsotg->dev, "failed to create %s debug file\n",
+ ep->name);
+ }
+ }
+ /* Create one file for each in endpoint. EP0 is handled with out eps */
+ for (epidx = 1; epidx < hsotg->num_of_eps; epidx++) {
+ struct s3c_hsotg_ep *ep;
+
+ ep = hsotg->eps_in[epidx];
+ if (ep) {
+ ep->debugfs = debugfs_create_file(ep->name, S_IRUGO,
+ root, ep, &ep_fops);
+
+ if (IS_ERR(ep->debugfs))
+ dev_err(hsotg->dev, "failed to create %s debug file\n",
+ ep->name);
+ }
}
}
@@ -3392,15 +3777,63 @@ static void s3c_hsotg_delete_debug(struct dwc2_hsotg *hsotg)
unsigned epidx;
for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
- struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
- debugfs_remove(ep->debugfs);
+ if (hsotg->eps_in[epidx])
+ debugfs_remove(hsotg->eps_in[epidx]->debugfs);
+ if (hsotg->eps_out[epidx])
+ debugfs_remove(hsotg->eps_out[epidx]->debugfs);
}
debugfs_remove(hsotg->debug_file);
+ debugfs_remove(hsotg->debug_testmode);
debugfs_remove(hsotg->debug_fifo);
debugfs_remove(hsotg->debug_root);
}
+#ifdef CONFIG_OF
+static void s3c_hsotg_of_probe(struct dwc2_hsotg *hsotg)
+{
+ struct device_node *np = hsotg->dev->of_node;
+ u32 len = 0;
+ u32 i = 0;
+
+ /* Enable dma if requested in device tree */
+ hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
+
+ /*
+ * Register TX periodic fifo size per endpoint.
+ * EP0 is excluded since it has no fifo configuration.
+ */
+ if (!of_find_property(np, "g-tx-fifo-size", &len))
+ goto rx_fifo;
+
+ len /= sizeof(u32);
+
+ /* Read tx fifo sizes other than ep0 */
+ if (of_property_read_u32_array(np, "g-tx-fifo-size",
+ &hsotg->g_tx_fifo_sz[1], len))
+ goto rx_fifo;
+
+ /* Add ep0 */
+ len++;
+
+ /* Make remaining TX fifos unavailable */
+ if (len < MAX_EPS_CHANNELS) {
+ for (i = len; i < MAX_EPS_CHANNELS; i++)
+ hsotg->g_tx_fifo_sz[i] = 0;
+ }
+
+rx_fifo:
+ /* Register RX fifo size */
+ of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
+
+ /* Register NPTX fifo size */
+ of_property_read_u32(np, "g-np-tx-fifo-size",
+ &hsotg->g_np_g_tx_fifo_sz);
+}
+#else
+static inline void s3c_hsotg_of_probe(struct dwc2_hsotg *hsotg) { }
+#endif
+
/**
* dwc2_gadget_init - init function for gadget
* @dwc2: The data structure for the DWC2 driver.
@@ -3410,41 +3843,47 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
{
struct device *dev = hsotg->dev;
struct s3c_hsotg_plat *plat = dev->platform_data;
- struct phy *phy;
- struct usb_phy *uphy;
- struct s3c_hsotg_ep *eps;
int epnum;
int ret;
int i;
+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
/* Set default UTMI width */
hsotg->phyif = GUSBCFG_PHYIF16;
+ s3c_hsotg_of_probe(hsotg);
+
+ /* Initialize to legacy fifo configuration values */
+ hsotg->g_rx_fifo_sz = 2048;
+ hsotg->g_np_g_tx_fifo_sz = 1024;
+ memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
+ /* Device tree specific probe */
+ s3c_hsotg_of_probe(hsotg);
+ /* Dump fifo information */
+ dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
+ hsotg->g_np_g_tx_fifo_sz);
+ dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++)
+ dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
+ hsotg->g_tx_fifo_sz[i]);
/*
- * Attempt to find a generic PHY, then look for an old style
- * USB PHY, finally fall back to pdata
+ * If platform probe couldn't find a generic PHY or an old style
+ * USB PHY, fall back to pdata
*/
- phy = devm_phy_get(dev, "usb2-phy");
- if (IS_ERR(phy)) {
- uphy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
- if (IS_ERR(uphy)) {
- /* Fallback for pdata */
- plat = dev_get_platdata(dev);
- if (!plat) {
- dev_err(dev,
- "no platform data or transceiver defined\n");
- return -EPROBE_DEFER;
- }
- hsotg->plat = plat;
- } else
- hsotg->uphy = uphy;
- } else {
- hsotg->phy = phy;
+ if (IS_ERR_OR_NULL(hsotg->phy) && IS_ERR_OR_NULL(hsotg->uphy)) {
+ plat = dev_get_platdata(dev);
+ if (!plat) {
+ dev_err(dev,
+ "no platform data or transceiver defined\n");
+ return -EPROBE_DEFER;
+ }
+ hsotg->plat = plat;
+ } else if (hsotg->phy) {
/*
* If using the generic PHY framework, check if the PHY bus
* width is 8-bit and set the phyif appropriately.
*/
- if (phy_get_bus_width(phy) == 8)
+ if (phy_get_bus_width(hsotg->phy) == 8)
hsotg->phyif = GUSBCFG_PHYIF8;
}
@@ -3484,16 +3923,53 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
if (ret) {
dev_err(dev, "failed to enable supplies: %d\n", ret);
- goto err_supplies;
+ goto err_clk;
}
/* usb phy enable */
s3c_hsotg_phy_enable(hsotg);
+ /*
+ * Force Device mode before initialization.
+ * This allows correctly configuring fifo for device mode.
+ */
+ __bic32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEHOSTMODE);
+ __orr32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEDEVMODE);
+
+ /*
+ * According to Synopsys databook, this sleep is needed for the force
+ * device mode to take effect.
+ */
+ msleep(25);
+
s3c_hsotg_corereset(hsotg);
- s3c_hsotg_hw_cfg(hsotg);
+ ret = s3c_hsotg_hw_cfg(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
+ goto err_clk;
+ }
+
s3c_hsotg_init(hsotg);
+ /* Switch back to default configuration */
+ __bic32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEDEVMODE);
+
+ hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
+ DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
+ if (!hsotg->ctrl_buff) {
+ dev_err(dev, "failed to allocate ctrl request buff\n");
+ ret = -ENOMEM;
+ goto err_supplies;
+ }
+
+ hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
+ DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
+ if (!hsotg->ep0_buff) {
+ dev_err(dev, "failed to allocate ctrl reply buff\n");
+ ret = -ENOMEM;
+ goto err_supplies;
+ }
+
ret = devm_request_irq(hsotg->dev, irq, s3c_hsotg_irq, IRQF_SHARED,
dev_name(hsotg->dev), hsotg);
if (ret < 0) {
@@ -3502,7 +3978,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
dev_err(dev, "cannot claim IRQ for gadget\n");
- goto err_clk;
+ goto err_supplies;
}
/* hsotg->num_of_eps holds number of EPs other than ep0 */
@@ -3513,33 +3989,30 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
goto err_supplies;
}
- eps = kcalloc(hsotg->num_of_eps + 1, sizeof(struct s3c_hsotg_ep),
- GFP_KERNEL);
- if (!eps) {
- ret = -ENOMEM;
- goto err_supplies;
- }
-
- hsotg->eps = eps;
-
/* setup endpoint information */
INIT_LIST_HEAD(&hsotg->gadget.ep_list);
- hsotg->gadget.ep0 = &hsotg->eps[0].ep;
+ hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
/* allocate EP0 request */
- hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
+ hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
GFP_KERNEL);
if (!hsotg->ctrl_req) {
dev_err(dev, "failed to allocate ctrl req\n");
ret = -ENOMEM;
- goto err_ep_mem;
+ goto err_supplies;
}
/* initialise the endpoints now the core has been initialised */
- for (epnum = 0; epnum < hsotg->num_of_eps; epnum++)
- s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
+ for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
+ if (hsotg->eps_in[epnum])
+ s3c_hsotg_initep(hsotg, hsotg->eps_in[epnum],
+ epnum, 1);
+ if (hsotg->eps_out[epnum])
+ s3c_hsotg_initep(hsotg, hsotg->eps_out[epnum],
+ epnum, 0);
+ }
/* disable power and clock */
s3c_hsotg_phy_disable(hsotg);
@@ -3548,12 +4021,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
hsotg->supplies);
if (ret) {
dev_err(dev, "failed to disable supplies: %d\n", ret);
- goto err_ep_mem;
+ goto err_supplies;
}
ret = usb_add_gadget_udc(dev, &hsotg->gadget);
if (ret)
- goto err_ep_mem;
+ goto err_supplies;
s3c_hsotg_create_debug(hsotg);
@@ -3561,8 +4034,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
return 0;
-err_ep_mem:
- kfree(eps);
err_supplies:
s3c_hsotg_phy_disable(hsotg);
err_clk:
@@ -3608,8 +4079,12 @@ int s3c_hsotg_suspend(struct dwc2_hsotg *hsotg)
s3c_hsotg_phy_disable(hsotg);
- for (ep = 0; ep < hsotg->num_of_eps; ep++)
- s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+ for (ep = 0; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ s3c_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ s3c_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ }
ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
@@ -3640,7 +4115,7 @@ int s3c_hsotg_resume(struct dwc2_hsotg *hsotg)
s3c_hsotg_phy_enable(hsotg);
spin_lock_irqsave(&hsotg->lock, flags);
- s3c_hsotg_core_init_disconnected(hsotg);
+ s3c_hsotg_core_init_disconnected(hsotg, false);
if (hsotg->enabled)
s3c_hsotg_core_connect(hsotg);
spin_unlock_irqrestore(&hsotg->lock, flags);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index a0cd9db6f4cd..c78c8740db1d 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -316,10 +316,12 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
*/
static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
{
- if (hsotg->lx_state == DWC2_L2)
+ if (hsotg->lx_state == DWC2_L2) {
hsotg->flags.b.port_suspend_change = 1;
- else
+ usb_hcd_resume_root_hub(hsotg->priv);
+ } else {
hsotg->flags.b.port_l1_change = 1;
+ }
}
/**
@@ -1371,7 +1373,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
dwc2_core_init(hsotg, false, -1);
dwc2_enable_global_interrupts(hsotg);
- s3c_hsotg_core_init_disconnected(hsotg);
+ s3c_hsotg_core_init_disconnected(hsotg, false);
s3c_hsotg_core_connect(hsotg);
} else {
/* A-Device connector (Host Mode) */
@@ -1473,30 +1475,6 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
}
}
-static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
-{
- u32 hprt0;
-
- /* After clear the Stop PHY clock bit, we should wait for a moment
- * for PLL work stable with clock output.
- */
- writel(0, hsotg->regs + PCGCTL);
- usleep_range(2000, 4000);
-
- hprt0 = dwc2_read_hprt0(hsotg);
- hprt0 |= HPRT0_RES;
- writel(hprt0, hsotg->regs + HPRT0);
- hprt0 &= ~HPRT0_SUSP;
- /* according to USB2.0 Spec 7.1.7.7, the host must send the resume
- * signal for at least 20ms
- */
- usleep_range(20000, 25000);
-
- hprt0 &= ~HPRT0_RES;
- writel(hprt0, hsotg->regs + HPRT0);
- hsotg->lx_state = DWC2_L0;
-}
-
/* Handles hub class-specific requests */
static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u16 wvalue, u16 windex, char *buf, u16 wlength)
@@ -1542,7 +1520,17 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
- dwc2_port_resume(hsotg);
+ writel(0, hsotg->regs + PCGCTL);
+ usleep_range(20000, 40000);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+ writel(hprt0, hsotg->regs + HPRT0);
+ hprt0 &= ~HPRT0_SUSP;
+ usleep_range(100000, 150000);
+
+ hprt0 &= ~HPRT0_RES;
+ writel(hprt0, hsotg->regs + HPRT0);
break;
case USB_PORT_FEAT_POWER:
@@ -1622,7 +1610,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
hub_desc->bDescLength = 9;
hub_desc->bDescriptorType = 0x29;
hub_desc->bNbrPorts = 1;
- hub_desc->wHubCharacteristics = cpu_to_le16(0x08);
+ hub_desc->wHubCharacteristics =
+ cpu_to_le16(HUB_CHAR_COMMON_LPSM |
+ HUB_CHAR_INDV_PORT_OCPM);
hub_desc->bPwrOn2PwrGood = 1;
hub_desc->bHubContrCurrent = 0;
hub_desc->u.hs.DeviceRemovable[0] = 0;
@@ -2315,55 +2305,6 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
usleep_range(1000, 3000);
}
-static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
-{
- struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- u32 hprt0;
-
- if (!((hsotg->op_state == OTG_STATE_B_HOST) ||
- (hsotg->op_state == OTG_STATE_A_HOST)))
- return 0;
-
- /* TODO: We get into suspend from 'on' state, maybe we need to do
- * something if we get here from DWC2_L1(LPM sleep) state one day.
- */
- if (hsotg->lx_state != DWC2_L0)
- return 0;
-
- hprt0 = dwc2_read_hprt0(hsotg);
- if (hprt0 & HPRT0_CONNSTS) {
- dwc2_port_suspend(hsotg, 1);
- } else {
- u32 pcgctl = readl(hsotg->regs + PCGCTL);
-
- pcgctl |= PCGCTL_STOPPCLK;
- writel(pcgctl, hsotg->regs + PCGCTL);
- }
-
- return 0;
-}
-
-static int _dwc2_hcd_resume(struct usb_hcd *hcd)
-{
- struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- u32 hprt0;
-
- if (!((hsotg->op_state == OTG_STATE_B_HOST) ||
- (hsotg->op_state == OTG_STATE_A_HOST)))
- return 0;
-
- if (hsotg->lx_state != DWC2_L2)
- return 0;
-
- hprt0 = dwc2_read_hprt0(hsotg);
- if ((hprt0 & HPRT0_CONNSTS) && (hprt0 & HPRT0_SUSP))
- dwc2_port_resume(hsotg);
- else
- writel(0, hsotg->regs + PCGCTL);
-
- return 0;
-}
-
/* Returns the current frame number */
static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
{
@@ -2734,9 +2675,6 @@ static struct hc_driver dwc2_hc_driver = {
.hub_status_data = _dwc2_hcd_hub_status_data,
.hub_control = _dwc2_hcd_hub_control,
.clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
-
- .bus_suspend = _dwc2_hcd_suspend,
- .bus_resume = _dwc2_hcd_resume,
};
/*
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 51248b935493..d0a5ed8fa15a 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -294,6 +294,7 @@
#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26)
#define GHWCFG4_NUM_IN_EPS_SHIFT 26
#define GHWCFG4_DED_FIFO_EN (1 << 25)
+#define GHWCFG4_DED_FIFO_SHIFT 25
#define GHWCFG4_SESSION_END_FILT_EN (1 << 24)
#define GHWCFG4_B_VALID_FILT_EN (1 << 23)
#define GHWCFG4_A_VALID_FILT_EN (1 << 22)
@@ -541,6 +542,7 @@
#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20))
#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20))
+#define DXEPINT_SETUP_RCVD (1 << 15)
#define DXEPINT_INEPNAKEFF (1 << 6)
#define DXEPINT_BACK2BACKSETUP (1 << 6)
#define DXEPINT_INTKNEPMIS (1 << 5)
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 6a795aa2ff05..ae095f009b4f 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -155,6 +155,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
struct dwc2_core_params defparams;
struct dwc2_hsotg *hsotg;
struct resource *res;
+ struct phy *phy;
+ struct usb_phy *uphy;
int retval;
int irq;
@@ -212,6 +214,24 @@ static int dwc2_driver_probe(struct platform_device *dev)
hsotg->dr_mode = of_usb_get_dr_mode(dev->dev.of_node);
+ /*
+ * Attempt to find a generic PHY, then look for an old style
+ * USB PHY
+ */
+ phy = devm_phy_get(&dev->dev, "usb2-phy");
+ if (IS_ERR(phy)) {
+ hsotg->phy = NULL;
+ uphy = devm_usb_get_phy(&dev->dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(uphy))
+ hsotg->uphy = NULL;
+ else
+ hsotg->uphy = uphy;
+ } else {
+ hsotg->phy = phy;
+ phy_power_on(hsotg->phy);
+ phy_init(hsotg->phy);
+ }
+
spin_lock_init(&hsotg->lock);
mutex_init(&hsotg->init_mutex);
retval = dwc2_gadget_init(hsotg, irq);
@@ -231,8 +251,15 @@ static int __maybe_unused dwc2_suspend(struct device *dev)
struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
int ret = 0;
- if (dwc2_is_device_mode(dwc2))
+ if (dwc2_is_device_mode(dwc2)) {
ret = s3c_hsotg_suspend(dwc2);
+ } else {
+ if (dwc2->lx_state == DWC2_L0)
+ return 0;
+ phy_exit(dwc2->phy);
+ phy_power_off(dwc2->phy);
+
+ }
return ret;
}
@@ -241,8 +268,13 @@ static int __maybe_unused dwc2_resume(struct device *dev)
struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
int ret = 0;
- if (dwc2_is_device_mode(dwc2))
+ if (dwc2_is_device_mode(dwc2)) {
ret = s3c_hsotg_resume(dwc2);
+ } else {
+ phy_power_on(dwc2->phy);
+ phy_init(dwc2->phy);
+
+ }
return ret;
}
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 58b5b2cde4c5..edbf9c85af7e 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -104,12 +104,6 @@ config USB_DWC3_DEBUG
help
Say Y here to enable debugging messages on DWC3 Driver.
-config USB_DWC3_VERBOSE
- bool "Enable Verbose Debugging Messages"
- depends on USB_DWC3_DEBUG
- help
- Say Y here to enable verbose debugging messages on DWC3 Driver.
-
config DWC3_HOST_USB3_LPM_ENABLE
bool "Enable USB3 LPM Capability"
depends on USB_DWC3_HOST=y || USB_DWC3_DUAL_ROLE=y
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index bb34fbcfeab3..46172f47f02d 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -2,7 +2,6 @@
CFLAGS_trace.o := -I$(src)
ccflags-$(CONFIG_USB_DWC3_DEBUG) := -DDEBUG
-ccflags-$(CONFIG_USB_DWC3_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC3) += dwc3.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 25ddc39efad8..9f0e209b8f6c 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -345,7 +345,7 @@ static void dwc3_core_num_eps(struct dwc3 *dwc)
dwc->num_in_eps = DWC3_NUM_IN_EPS(parms);
dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps;
- dev_vdbg(dwc->dev, "found %d IN and %d OUT endpoints\n",
+ dwc3_trace(trace_dwc3_core, "found %d IN and %d OUT endpoints",
dwc->num_in_eps, dwc->num_out_eps);
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 4bb9aa696ede..d201910b892f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -431,7 +431,6 @@ struct dwc3_event_buffer {
* @dwc: pointer to DWC controller
* @saved_state: ep state saved during hibernation
* @flags: endpoint flags (wedged, stalled, ...)
- * @current_trb: index of current used trb
* @number: endpoint number (1 - 15)
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
* @resource_index: Resource transfer index
@@ -464,8 +463,6 @@ struct dwc3_ep {
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN (1 << 31)
- unsigned current_trb;
-
u8 number;
u8 type;
u8 resource_index;
@@ -685,7 +682,6 @@ struct dwc3_scratchpad_array {
* @is_utmi_l1_suspend: the core asserts output signal
* 0 - utmi_sleep_n
* 1 - utmi_l1_suspend_n
- * @is_selfpowered: true when we are selfpowered
* @is_fpga: true when we are using the FPGA board
* @needs_fifo_resize: not all users might want fifo resizing, flag it
* @pullups_connected: true when Run/Stop bit is set
@@ -809,7 +805,6 @@ struct dwc3 {
unsigned has_hibernation:1;
unsigned has_lpm_erratum:1;
unsigned is_utmi_l1_suspend:1;
- unsigned is_selfpowered:1;
unsigned is_fpga:1;
unsigned needs_fifo_resize:1;
unsigned pullups_connected:1;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7c4faf738747..8d950569d557 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -22,9 +22,6 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/usb_phy_generic.h>
-
#include "platform_data.h"
/* FIXME define these in <linux/pci_ids.h> */
@@ -33,67 +30,44 @@
#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
#define PCI_DEVICE_ID_INTEL_BSW 0x22B7
+#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
+#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
-struct dwc3_pci {
- struct device *dev;
- struct platform_device *dwc3;
- struct platform_device *usb2_phy;
- struct platform_device *usb3_phy;
-};
-
-static int dwc3_pci_register_phys(struct dwc3_pci *glue)
+static int dwc3_pci_quirks(struct pci_dev *pdev)
{
- struct usb_phy_generic_platform_data pdata;
- struct platform_device *pdev;
- int ret;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
+ struct dwc3_platform_data pdata;
- memset(&pdata, 0x00, sizeof(pdata));
+ memset(&pdata, 0, sizeof(pdata));
- pdev = platform_device_alloc("usb_phy_generic", 0);
- if (!pdev)
- return -ENOMEM;
-
- glue->usb2_phy = pdev;
- pdata.type = USB_PHY_TYPE_USB2;
- pdata.gpio_reset = -1;
+ pdata.has_lpm_erratum = true;
+ pdata.lpm_nyet_threshold = 0xf;
- ret = platform_device_add_data(glue->usb2_phy, &pdata, sizeof(pdata));
- if (ret)
- goto err1;
+ pdata.u2exit_lfps_quirk = true;
+ pdata.u2ss_inp3_quirk = true;
+ pdata.req_p1p2p3_quirk = true;
+ pdata.del_p1p2p3_quirk = true;
+ pdata.del_phy_power_chg_quirk = true;
+ pdata.lfps_filter_quirk = true;
+ pdata.rx_detect_poll_quirk = true;
- pdev = platform_device_alloc("usb_phy_generic", 1);
- if (!pdev) {
- ret = -ENOMEM;
- goto err1;
- }
+ pdata.tx_de_emphasis_quirk = true;
+ pdata.tx_de_emphasis = 1;
- glue->usb3_phy = pdev;
- pdata.type = USB_PHY_TYPE_USB3;
-
- ret = platform_device_add_data(glue->usb3_phy, &pdata, sizeof(pdata));
- if (ret)
- goto err2;
-
- ret = platform_device_add(glue->usb2_phy);
- if (ret)
- goto err2;
+ /*
+ * FIXME these quirks should be removed when AMD NL
+ * taps out
+ */
+ pdata.disable_scramble_quirk = true;
+ pdata.dis_u3_susphy_quirk = true;
+ pdata.dis_u2_susphy_quirk = true;
- ret = platform_device_add(glue->usb3_phy);
- if (ret)
- goto err3;
+ return platform_device_add_data(pci_get_drvdata(pdev), &pdata,
+ sizeof(pdata));
+ }
return 0;
-
-err3:
- platform_device_del(glue->usb2_phy);
-
-err2:
- platform_device_put(glue->usb3_phy);
-
-err1:
- platform_device_put(glue->usb2_phy);
-
- return ret;
}
static int dwc3_pci_probe(struct pci_dev *pci,
@@ -101,18 +75,8 @@ static int dwc3_pci_probe(struct pci_dev *pci,
{
struct resource res[2];
struct platform_device *dwc3;
- struct dwc3_pci *glue;
int ret;
struct device *dev = &pci->dev;
- struct dwc3_platform_data dwc3_pdata;
-
- memset(&dwc3_pdata, 0x00, sizeof(dwc3_pdata));
-
- glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
- if (!glue)
- return -ENOMEM;
-
- glue->dev = dev;
ret = pcim_enable_device(pci);
if (ret) {
@@ -122,12 +86,6 @@ static int dwc3_pci_probe(struct pci_dev *pci,
pci_set_master(pci);
- ret = dwc3_pci_register_phys(glue);
- if (ret) {
- dev_err(dev, "couldn't register PHYs\n");
- return ret;
- }
-
dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
if (!dwc3) {
dev_err(dev, "couldn't allocate dwc3 device\n");
@@ -145,70 +103,34 @@ static int dwc3_pci_probe(struct pci_dev *pci,
res[1].name = "dwc_usb3";
res[1].flags = IORESOURCE_IRQ;
- if (pci->vendor == PCI_VENDOR_ID_AMD &&
- pci->device == PCI_DEVICE_ID_AMD_NL_USB) {
- dwc3_pdata.has_lpm_erratum = true;
- dwc3_pdata.lpm_nyet_threshold = 0xf;
-
- dwc3_pdata.u2exit_lfps_quirk = true;
- dwc3_pdata.u2ss_inp3_quirk = true;
- dwc3_pdata.req_p1p2p3_quirk = true;
- dwc3_pdata.del_p1p2p3_quirk = true;
- dwc3_pdata.del_phy_power_chg_quirk = true;
- dwc3_pdata.lfps_filter_quirk = true;
- dwc3_pdata.rx_detect_poll_quirk = true;
-
- dwc3_pdata.tx_de_emphasis_quirk = true;
- dwc3_pdata.tx_de_emphasis = 1;
-
- /*
- * FIXME these quirks should be removed when AMD NL
- * taps out
- */
- dwc3_pdata.disable_scramble_quirk = true;
- dwc3_pdata.dis_u3_susphy_quirk = true;
- dwc3_pdata.dis_u2_susphy_quirk = true;
- }
-
ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
return ret;
}
- pci_set_drvdata(pci, glue);
-
- ret = platform_device_add_data(dwc3, &dwc3_pdata, sizeof(dwc3_pdata));
+ pci_set_drvdata(pci, dwc3);
+ ret = dwc3_pci_quirks(pci);
if (ret)
- goto err3;
-
- dma_set_coherent_mask(&dwc3->dev, dev->coherent_dma_mask);
+ goto err;
- dwc3->dev.dma_mask = dev->dma_mask;
- dwc3->dev.dma_parms = dev->dma_parms;
dwc3->dev.parent = dev;
- glue->dwc3 = dwc3;
ret = platform_device_add(dwc3);
if (ret) {
dev_err(dev, "failed to register dwc3 device\n");
- goto err3;
+ goto err;
}
return 0;
-
-err3:
+err:
platform_device_put(dwc3);
return ret;
}
static void dwc3_pci_remove(struct pci_dev *pci)
{
- struct dwc3_pci *glue = pci_get_drvdata(pci);
-
- platform_device_unregister(glue->dwc3);
- platform_device_unregister(glue->usb2_phy);
- platform_device_unregister(glue->usb3_phy);
+ platform_device_unregister(pci_get_drvdata(pci));
}
static const struct pci_device_id dwc3_pci_id_table[] = {
@@ -219,50 +141,18 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_pci_suspend(struct device *dev)
-{
- struct pci_dev *pci = to_pci_dev(dev);
-
- pci_disable_device(pci);
-
- return 0;
-}
-
-static int dwc3_pci_resume(struct device *dev)
-{
- struct pci_dev *pci = to_pci_dev(dev);
- int ret;
-
- ret = pci_enable_device(pci);
- if (ret) {
- dev_err(dev, "can't re-enable device --> %d\n", ret);
- return ret;
- }
-
- pci_set_master(pci);
-
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct dev_pm_ops dwc3_pci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
-};
-
static struct pci_driver dwc3_pci_driver = {
.name = "dwc3-pci",
.id_table = dwc3_pci_id_table,
.probe = dwc3_pci_probe,
.remove = dwc3_pci_remove,
- .driver = {
- .pm = &dwc3_pci_dev_pm_ops,
- },
};
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 1bc77a3b4997..2ef3c8d6a9db 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -344,7 +344,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
/*
* LTM will be set once we know how to set this in HW.
*/
- usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
+ usb_status |= dwc->gadget.is_selfpowered;
if (dwc->speed == DWC3_DSTS_SUPERSPEED) {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f03b136ecfce..a03a485205c7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -139,7 +139,8 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
udelay(5);
}
- dev_vdbg(dwc->dev, "link state change request timed out\n");
+ dwc3_trace(trace_dwc3_gadget,
+ "link state change request timed out");
return -ETIMEDOUT;
}
@@ -219,7 +220,7 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
fifo_size |= (last_fifo_depth << 16);
- dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
+ dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
dep->name, last_fifo_depth, fifo_size & 0xffff);
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
@@ -287,7 +288,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
do {
reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
if (!(reg & DWC3_DGCMD_CMDACT)) {
- dev_vdbg(dwc->dev, "Command Complete --> %d\n",
+ dwc3_trace(trace_dwc3_gadget,
+ "Command Complete --> %d",
DWC3_DGCMD_STATUS(reg));
return 0;
}
@@ -297,8 +299,11 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
* interrupt context.
*/
timeout--;
- if (!timeout)
+ if (!timeout) {
+ dwc3_trace(trace_dwc3_gadget,
+ "Command Timed Out");
return -ETIMEDOUT;
+ }
udelay(1);
} while (1);
}
@@ -320,7 +325,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
do {
reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
if (!(reg & DWC3_DEPCMD_CMDACT)) {
- dev_vdbg(dwc->dev, "Command Complete --> %d\n",
+ dwc3_trace(trace_dwc3_gadget,
+ "Command Complete --> %d",
DWC3_DEPCMD_STATUS(reg));
return 0;
}
@@ -330,8 +336,11 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
* interrupt context.
*/
timeout--;
- if (!timeout)
+ if (!timeout) {
+ dwc3_trace(trace_dwc3_gadget,
+ "Command Timed Out");
return -ETIMEDOUT;
+ }
udelay(1);
} while (1);
@@ -352,9 +361,6 @@ static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
if (dep->trb_pool)
return 0;
- if (dep->number == 0 || dep->number == 1)
- return 0;
-
dep->trb_pool = dma_alloc_coherent(dwc->dev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
&dep->trb_pool_dma, GFP_KERNEL);
@@ -492,7 +498,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
u32 reg;
int ret;
- dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
+ dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
if (!(dep->flags & DWC3_EP_ENABLED)) {
ret = dwc3_gadget_start_config(dwc, dep);
@@ -729,10 +735,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
struct dwc3_request *req, dma_addr_t dma,
unsigned length, unsigned last, unsigned chain, unsigned node)
{
- struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
- dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
+ dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
dep->name, req, (unsigned long long) dma,
length, last ? " last" : "",
chain ? " chain" : "");
@@ -882,8 +887,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (i == (request->num_mapped_sgs - 1) ||
sg_is_last(s)) {
- if (list_is_last(&req->list,
- &dep->request_list))
+ if (list_empty(&dep->request_list))
last_one = true;
chain = false;
}
@@ -901,6 +905,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (last_one)
break;
}
+
+ if (last_one)
+ break;
} else {
dma = req->request.dma;
length = req->request.length;
@@ -932,7 +939,7 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
u32 cmd;
if (start_new && (dep->flags & DWC3_EP_BUSY)) {
- dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
+ dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
return -EBUSY;
}
dep->flags &= ~DWC3_EP_PENDING_REQUEST;
@@ -1003,8 +1010,9 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
u32 uf;
if (list_empty(&dep->request_list)) {
- dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
- dep->name);
+ dwc3_trace(trace_dwc3_gadget,
+ "ISOC ep %s run out for requests",
+ dep->name);
dep->flags |= DWC3_EP_PENDING_REQUEST;
return;
}
@@ -1111,15 +1119,10 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* handled.
*/
if (dep->stream_capable) {
- int ret;
-
ret = __dwc3_gadget_kick_transfer(dep, 0, true);
- if (ret && ret != -EBUSY) {
- struct dwc3 *dwc = dep->dwc;
-
+ if (ret && ret != -EBUSY)
dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
dep->name);
- }
}
return 0;
@@ -1150,8 +1153,6 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
goto out;
}
- dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
- request, ep->name, request->length);
trace_dwc3_ep_queue(req);
ret = __dwc3_gadget_ep_queue(dep, req);
@@ -1414,7 +1415,7 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
- dwc->is_selfpowered = !!is_selfpowered;
+ g->is_selfpowered = !!is_selfpowered;
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
@@ -1466,7 +1467,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
udelay(1);
} while (1);
- dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
+ dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
dwc->gadget_driver
? dwc->gadget_driver->function : "no-function",
is_on ? "connect" : "disconnect");
@@ -1686,7 +1687,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->endpoint.name = dep->name;
- dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
+ dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
if (epnum == 0 || epnum == 1) {
usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
@@ -1723,13 +1724,15 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
if (ret < 0) {
- dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
+ dwc3_trace(trace_dwc3_gadget,
+ "failed to allocate OUT endpoints");
return ret;
}
ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
if (ret < 0) {
- dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
+ dwc3_trace(trace_dwc3_gadget,
+ "failed to allocate IN endpoints");
return ret;
}
@@ -1975,7 +1978,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
} else {
int ret;
- dev_vdbg(dwc->dev, "%s: reason %s\n",
+ dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
dep->name, event->status &
DEPEVT_STATUS_TRANSFER_ACTIVE
? "Transfer Active"
@@ -1999,7 +2002,8 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
switch (event->status) {
case DEPEVT_STREAMEVT_FOUND:
- dev_vdbg(dwc->dev, "Stream %d found and started\n",
+ dwc3_trace(trace_dwc3_gadget,
+ "Stream %d found and started",
event->parameters);
break;
@@ -2013,7 +2017,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
break;
case DWC3_DEPEVT_EPCMDCMPLT:
- dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
+ dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
break;
}
}
@@ -2041,6 +2045,7 @@ static void dwc3_resume_gadget(struct dwc3 *dwc)
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(&dwc->gadget);
+ spin_lock(&dwc->lock);
}
}
@@ -2077,7 +2082,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
* We have discussed this with the IP Provider and it was
* suggested to giveback all requests here, but give HW some
* extra time to synchronize with the interconnect. We're using
- * an arbitraty 100us delay for that.
+ * an arbitrary 100us delay for that.
*
* Note also that a similar handling was tested by Synopsys
* (thanks a lot Paul) and nothing bad has come out of it.
@@ -2387,7 +2392,8 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
(next == DWC3_LINK_STATE_RESUME)) {
- dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
+ dwc3_trace(trace_dwc3_gadget,
+ "ignoring transition U3 -> Resume");
return;
}
}
@@ -2509,22 +2515,22 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_EOPF:
- dev_vdbg(dwc->dev, "End of Periodic Frame\n");
+ dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
break;
case DWC3_DEVICE_EVENT_SOF:
- dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
+ dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
- dev_vdbg(dwc->dev, "Erratic Error\n");
+ dwc3_trace(trace_dwc3_gadget, "Erratic Error");
break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
- dev_vdbg(dwc->dev, "Command Complete\n");
+ dwc3_trace(trace_dwc3_gadget, "Command Complete");
break;
case DWC3_DEVICE_EVENT_OVERFLOW:
- dev_vdbg(dwc->dev, "Overflow\n");
+ dwc3_trace(trace_dwc3_gadget, "Overflow");
break;
default:
- dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
+ dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
}
}
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 9fc20b33dd8e..9c10669ab91f 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -47,6 +47,16 @@ DEFINE_EVENT(dwc3_log_msg, dwc3_writel,
TP_ARGS(vaf)
);
+DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(dwc3_log_msg, dwc3_core,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
DEFINE_EVENT(dwc3_log_msg, dwc3_ep0,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 747ef53bda14..96539038c03a 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -423,6 +423,17 @@ config USB_CONFIGFS_F_HID
For more information, see Documentation/usb/gadget_hid.txt.
+config USB_CONFIGFS_F_UVC
+ bool "USB Webcam function"
+ depends on USB_CONFIGFS
+ depends on VIDEO_DEV
+ select VIDEOBUF2_VMALLOC
+ select USB_F_UVC
+ help
+ The Webcam function acts as a composite USB Audio and Video Class
+ device. It provides a userspace API to process UVC control requests
+ and stream video data to the host.
+
source "drivers/usb/gadget/legacy/Kconfig"
endchoice
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 617835348569..13adfd1a3f54 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1655,7 +1655,7 @@ unknown:
* OS descriptors handling
*/
if (cdev->use_os_string && cdev->os_desc_config &&
- (ctrl->bRequest & USB_TYPE_VENDOR) &&
+ (ctrl->bRequestType & USB_TYPE_VENDOR) &&
ctrl->bRequest == cdev->b_vendor_code) {
struct usb_request *req;
struct usb_configuration *os_desc_cfg;
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index dd68091d92f0..f71b1aaa0edf 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -36,7 +36,7 @@ usb_f_uac1-y := f_uac1.o u_uac1.o
obj-$(CONFIG_USB_F_UAC1) += usb_f_uac1.o
usb_f_uac2-y := f_uac2.o
obj-$(CONFIG_USB_F_UAC2) += usb_f_uac2.o
-usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o
+usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o
obj-$(CONFIG_USB_F_UVC) += usb_f_uvc.o
usb_f_midi-y := f_midi.o
obj-$(CONFIG_USB_F_MIDI) += usb_f_midi.o
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 63314ede7ba6..af98b096af2f 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -31,6 +31,7 @@
#include <linux/aio.h>
#include <linux/mmu_context.h>
#include <linux/poll.h>
+#include <linux/eventfd.h>
#include "u_fs.h"
#include "u_f.h"
@@ -153,6 +154,8 @@ struct ffs_io_data {
struct usb_ep *ep;
struct usb_request *req;
+
+ struct ffs_data *ffs;
};
struct ffs_desc_helper {
@@ -390,17 +393,20 @@ done_spin:
return ret;
}
+/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
size_t n)
{
/*
- * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
- * to release them.
+ * n cannot be bigger than ffs->ev.count, which cannot be bigger than
+ * size of ffs->ev.types array (which is four) so that's how much space
+ * we reserve.
*/
- struct usb_functionfs_event events[n];
+ struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
+ const size_t size = n * sizeof *events;
unsigned i = 0;
- memset(events, 0, sizeof events);
+ memset(events, 0, size);
do {
events[i].type = ffs->ev.types[i];
@@ -410,19 +416,15 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
}
} while (++i < n);
- if (n < ffs->ev.count) {
- ffs->ev.count -= n;
+ ffs->ev.count -= n;
+ if (ffs->ev.count)
memmove(ffs->ev.types, ffs->ev.types + n,
ffs->ev.count * sizeof *ffs->ev.types);
- } else {
- ffs->ev.count = 0;
- }
spin_unlock_irq(&ffs->ev.waitq.lock);
mutex_unlock(&ffs->mutex);
- return unlikely(__copy_to_user(buf, events, sizeof events))
- ? -EFAULT : sizeof events;
+ return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size;
}
static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
@@ -606,6 +608,8 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
}
case FFS_CLOSING:
break;
+ case FFS_DEACTIVATED:
+ break;
}
mutex_unlock(&ffs->mutex);
@@ -673,6 +677,9 @@ static void ffs_user_copy_worker(struct work_struct *work)
aio_complete(io_data->kiocb, ret, ret);
+ if (io_data->ffs->ffs_eventfd && !io_data->kiocb->ki_eventfd)
+ eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+
usb_ep_free_request(io_data->ep, io_data->req);
io_data->kiocb->private = NULL;
@@ -826,6 +833,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
io_data->buf = data;
io_data->ep = ep->ep;
io_data->req = req;
+ io_data->ffs = epfile->ffs;
req->context = io_data;
req->complete = ffs_epfile_async_io_complete;
@@ -1180,6 +1188,7 @@ struct ffs_sb_fill_data {
struct ffs_file_perms perms;
umode_t root_mode;
const char *dev_name;
+ bool no_disconnect;
struct ffs_data *ffs_data;
};
@@ -1250,6 +1259,12 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
/* Interpret option */
switch (eq - opts) {
+ case 13:
+ if (!memcmp(opts, "no_disconnect", 13))
+ data->no_disconnect = !!value;
+ else
+ goto invalid;
+ break;
case 5:
if (!memcmp(opts, "rmode", 5))
data->root_mode = (value & 0555) | S_IFDIR;
@@ -1314,6 +1329,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
.gid = GLOBAL_ROOT_GID,
},
.root_mode = S_IFDIR | 0500,
+ .no_disconnect = false,
};
struct dentry *rv;
int ret;
@@ -1330,6 +1346,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
if (unlikely(!ffs))
return ERR_PTR(-ENOMEM);
ffs->file_perms = data.perms;
+ ffs->no_disconnect = data.no_disconnect;
ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
if (unlikely(!ffs->dev_name)) {
@@ -1361,6 +1378,7 @@ ffs_fs_kill_sb(struct super_block *sb)
kill_litter_super(sb);
if (sb->s_fs_info) {
ffs_release_dev(sb->s_fs_info);
+ ffs_data_closed(sb->s_fs_info);
ffs_data_put(sb->s_fs_info);
}
}
@@ -1417,7 +1435,11 @@ static void ffs_data_opened(struct ffs_data *ffs)
ENTER();
atomic_inc(&ffs->ref);
- atomic_inc(&ffs->opened);
+ if (atomic_add_return(1, &ffs->opened) == 1 &&
+ ffs->state == FFS_DEACTIVATED) {
+ ffs->state = FFS_CLOSING;
+ ffs_data_reset(ffs);
+ }
}
static void ffs_data_put(struct ffs_data *ffs)
@@ -1439,6 +1461,21 @@ static void ffs_data_closed(struct ffs_data *ffs)
ENTER();
if (atomic_dec_and_test(&ffs->opened)) {
+ if (ffs->no_disconnect) {
+ ffs->state = FFS_DEACTIVATED;
+ if (ffs->epfiles) {
+ ffs_epfiles_destroy(ffs->epfiles,
+ ffs->eps_count);
+ ffs->epfiles = NULL;
+ }
+ if (ffs->setup_state == FFS_SETUP_PENDING)
+ __ffs_ep0_stall(ffs);
+ } else {
+ ffs->state = FFS_CLOSING;
+ ffs_data_reset(ffs);
+ }
+ }
+ if (atomic_read(&ffs->opened) < 0) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
@@ -1480,6 +1517,9 @@ static void ffs_data_clear(struct ffs_data *ffs)
if (ffs->epfiles)
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+ if (ffs->ffs_eventfd)
+ eventfd_ctx_put(ffs->ffs_eventfd);
+
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
@@ -1581,10 +1621,10 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
mutex_init(&epfile->mutex);
init_waitqueue_head(&epfile->wait);
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
- sprintf(epfiles->name, "ep%02x", ffs->eps_addrmap[i]);
+ sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
else
- sprintf(epfiles->name, "ep%u", i);
- epfile->dentry = ffs_sb_create_file(ffs->sb, epfiles->name,
+ sprintf(epfile->name, "ep%u", i);
+ epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
epfile,
&ffs_epfile_operations);
if (unlikely(!epfile->dentry)) {
@@ -1616,7 +1656,6 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
kfree(epfiles);
}
-
static void ffs_func_eps_disable(struct ffs_function *func)
{
struct ffs_ep *ep = func->eps;
@@ -1629,10 +1668,12 @@ static void ffs_func_eps_disable(struct ffs_function *func)
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
- epfile->ep = NULL;
-
++ep;
- ++epfile;
+
+ if (epfile) {
+ epfile->ep = NULL;
+ ++epfile;
+ }
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
@@ -2138,7 +2179,8 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
FUNCTIONFS_HAS_HS_DESC |
FUNCTIONFS_HAS_SS_DESC |
FUNCTIONFS_HAS_MS_OS_DESC |
- FUNCTIONFS_VIRTUAL_ADDR)) {
+ FUNCTIONFS_VIRTUAL_ADDR |
+ FUNCTIONFS_EVENTFD)) {
ret = -ENOSYS;
goto error;
}
@@ -2149,6 +2191,20 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
goto error;
}
+ if (flags & FUNCTIONFS_EVENTFD) {
+ if (len < 4)
+ goto error;
+ ffs->ffs_eventfd =
+ eventfd_ctx_fdget((int)get_unaligned_le32(data));
+ if (IS_ERR(ffs->ffs_eventfd)) {
+ ret = PTR_ERR(ffs->ffs_eventfd);
+ ffs->ffs_eventfd = NULL;
+ goto error;
+ }
+ data += 4;
+ len -= 4;
+ }
+
/* Read fs_count, hs_count and ss_count (if present) */
for (i = 0; i < 3; ++i) {
if (!(flags & (1 << i))) {
@@ -2377,6 +2433,13 @@ static void __ffs_event_add(struct ffs_data *ffs,
if (ffs->setup_state == FFS_SETUP_PENDING)
ffs->setup_state = FFS_SETUP_CANCELLED;
+ /*
+ * Logic of this function guarantees that there are at most four pending
+ * evens on ffs->ev.types queue. This is important because the queue
+ * has space for four elements only and __ffs_ep0_read_events function
+ * depends on that limit as well. If more event types are added, those
+ * limits have to be revisited or guaranteed to still hold.
+ */
switch (type) {
case FUNCTIONFS_RESUME:
rem_type2 = FUNCTIONFS_SUSPEND;
@@ -2416,6 +2479,8 @@ static void __ffs_event_add(struct ffs_data *ffs,
pr_vdebug("adding event %d\n", type);
ffs->ev.types[ffs->ev.count++] = type;
wake_up_locked(&ffs->ev.waitq);
+ if (ffs->ffs_eventfd)
+ eventfd_signal(ffs->ffs_eventfd, 1);
}
static void ffs_event_add(struct ffs_data *ffs,
@@ -2888,6 +2953,13 @@ static int ffs_func_bind(struct usb_configuration *c,
/* Other USB function hooks *************************************************/
+static void ffs_reset_work(struct work_struct *work)
+{
+ struct ffs_data *ffs = container_of(work,
+ struct ffs_data, reset_work);
+ ffs_data_reset(ffs);
+}
+
static int ffs_func_set_alt(struct usb_function *f,
unsigned interface, unsigned alt)
{
@@ -2904,6 +2976,13 @@ static int ffs_func_set_alt(struct usb_function *f,
if (ffs->func)
ffs_func_eps_disable(ffs->func);
+ if (ffs->state == FFS_DEACTIVATED) {
+ ffs->state = FFS_CLOSING;
+ INIT_WORK(&ffs->reset_work, ffs_reset_work);
+ schedule_work(&ffs->reset_work);
+ return -ENODEV;
+ }
+
if (ffs->state != FFS_ACTIVE)
return -ENODEV;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 6e04e302dc3a..426d69a9c018 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -399,8 +399,9 @@ static int hidg_setup(struct usb_function *f,
value = __le16_to_cpu(ctrl->wValue);
length = __le16_to_cpu(ctrl->wLength);
- VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x "
- "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value);
+ VDBG(cdev,
+ "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
+ __func__, ctrl->bRequestType, ctrl->bRequest, value);
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
@@ -758,7 +759,7 @@ static struct f_hid_opts_attribute f_hid_opts_##name = \
F_HID_OPT(subclass, 8, 255);
F_HID_OPT(protocol, 8, 255);
-F_HID_OPT(report_length, 16, 65536);
+F_HID_OPT(report_length, 16, 65535);
static ssize_t f_hid_opts_report_desc_show(struct f_hid_opts *opts, char *page)
{
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index a90440300735..259b656c0b3e 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -520,7 +520,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
req = midi_alloc_ep_req(ep, midi->buflen);
if (!req) {
- ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
+ ERROR(midi, "%s: alloc_ep_request failed\n", __func__);
return;
}
req->length = 0;
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 80be25b32cd7..e07c50ced64d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -1214,7 +1214,7 @@ static ssize_t f_ss_opts_pattern_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->pattern);
+ result = sprintf(page, "%u", opts->pattern);
mutex_unlock(&opts->lock);
return result;
@@ -1258,7 +1258,7 @@ static ssize_t f_ss_opts_isoc_interval_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->isoc_interval);
+ result = sprintf(page, "%u", opts->isoc_interval);
mutex_unlock(&opts->lock);
return result;
@@ -1302,7 +1302,7 @@ static ssize_t f_ss_opts_isoc_maxpacket_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->isoc_maxpacket);
+ result = sprintf(page, "%u", opts->isoc_maxpacket);
mutex_unlock(&opts->lock);
return result;
@@ -1346,7 +1346,7 @@ static ssize_t f_ss_opts_isoc_mult_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->isoc_mult);
+ result = sprintf(page, "%u", opts->isoc_mult);
mutex_unlock(&opts->lock);
return result;
@@ -1390,7 +1390,7 @@ static ssize_t f_ss_opts_isoc_maxburst_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->isoc_maxburst);
+ result = sprintf(page, "%u", opts->isoc_maxburst);
mutex_unlock(&opts->lock);
return result;
@@ -1434,7 +1434,7 @@ static ssize_t f_ss_opts_bulk_buflen_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->bulk_buflen);
+ result = sprintf(page, "%u", opts->bulk_buflen);
mutex_unlock(&opts->lock);
return result;
@@ -1473,7 +1473,7 @@ static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->int_interval);
+ result = sprintf(page, "%u", opts->int_interval);
mutex_unlock(&opts->lock);
return result;
@@ -1517,7 +1517,7 @@ static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->int_maxpacket);
+ result = sprintf(page, "%u", opts->int_maxpacket);
mutex_unlock(&opts->lock);
return result;
@@ -1561,7 +1561,7 @@ static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->int_mult);
+ result = sprintf(page, "%u", opts->int_mult);
mutex_unlock(&opts->lock);
return result;
@@ -1605,7 +1605,7 @@ static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = sprintf(page, "%d", opts->int_maxburst);
+ result = sprintf(page, "%u", opts->int_maxburst);
mutex_unlock(&opts->lock);
return result;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index f7b203293205..9719abfb6145 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -31,7 +31,7 @@ static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
*/
#define F_AUDIO_AC_INTERFACE 0
#define F_AUDIO_AS_INTERFACE 1
-#define F_AUDIO_NUM_INTERFACES 2
+#define F_AUDIO_NUM_INTERFACES 1
/* B.3.1 Standard AC Interface Descriptor */
static struct usb_interface_descriptor ac_interface_desc = {
@@ -42,14 +42,18 @@ static struct usb_interface_descriptor ac_interface_desc = {
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
};
-DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+/*
+ * The number of AudioStreaming and MIDIStreaming interfaces
+ * in the Audio Interface Collection
+ */
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
/* 1 input terminal, 1 output terminal and 1 feature unit */
#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
+ UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
/* B.3.2 Class-Specific AC Interface Descriptor */
-static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+static struct uac1_ac_header_descriptor_1 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
@@ -57,8 +61,8 @@ static struct uac1_ac_header_descriptor_2 ac_header_desc = {
.wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
.bInCollection = F_AUDIO_NUM_INTERFACES,
.baInterfaceNr = {
- [0] = F_AUDIO_AC_INTERFACE,
- [1] = F_AUDIO_AS_INTERFACE,
+ /* Interface number of the first AudioStream interface */
+ [0] = 1,
}
};
@@ -584,6 +588,7 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (intf == 1) {
if (alt == 1) {
+ config_ep_by_speed(cdev->gadget, f, out_ep);
usb_ep_enable(out_ep);
out_ep->driver_data = audio;
audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
@@ -669,7 +674,6 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
audio->card.gadget = c->cdev->gadget;
- audio_opts->card = &audio->card;
/* set up ASLA audio devices */
if (!audio_opts->bound) {
status = gaudio_setup(&audio->card);
@@ -897,7 +901,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
struct f_uac1_opts *opts;
opts = container_of(f, struct f_uac1_opts, func_inst);
- gaudio_cleanup(opts->card);
if (opts->fn_play_alloc)
kfree(opts->fn_play);
if (opts->fn_cap_alloc)
@@ -935,6 +938,7 @@ static void f_audio_free(struct usb_function *f)
struct f_audio *audio = func_to_audio(f);
struct f_uac1_opts *opts;
+ gaudio_cleanup(&audio->card);
opts = container_of(f->fi, struct f_uac1_opts, func_inst);
kfree(audio);
mutex_lock(&opts->lock);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 945b3bd2ca98..76891adfba7a 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -27,10 +27,11 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
+#include "u_uvc.h"
#include "uvc.h"
+#include "uvc_configfs.h"
#include "uvc_v4l2.h"
#include "uvc_video.h"
-#include "u_uvc.h"
unsigned int uvc_gadget_trace_param;
@@ -509,6 +510,9 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
break;
}
+ if (!uvc_control_desc || !uvc_streaming_cls)
+ return ERR_PTR(-ENODEV);
+
/* Descriptors layout
*
* uvc_iad
@@ -605,7 +609,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
INFO(cdev, "uvc_function_bind\n");
- opts = to_f_uvc_opts(f->fi);
+ opts = fi_to_f_uvc_opts(f->fi);
/* Sanity check the streaming endpoint module parameters.
*/
opts->streaming_interval = clamp(opts->streaming_interval, 1U, 16U);
@@ -700,10 +704,27 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
/* Copy descriptors */
f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
- if (gadget_is_dualspeed(cdev->gadget))
+ if (IS_ERR(f->fs_descriptors)) {
+ ret = PTR_ERR(f->fs_descriptors);
+ f->fs_descriptors = NULL;
+ goto error;
+ }
+ if (gadget_is_dualspeed(cdev->gadget)) {
f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
- if (gadget_is_superspeed(c->cdev->gadget))
+ if (IS_ERR(f->hs_descriptors)) {
+ ret = PTR_ERR(f->hs_descriptors);
+ f->hs_descriptors = NULL;
+ goto error;
+ }
+ }
+ if (gadget_is_superspeed(c->cdev->gadget)) {
f->ss_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER);
+ if (IS_ERR(f->ss_descriptors)) {
+ ret = PTR_ERR(f->ss_descriptors);
+ f->ss_descriptors = NULL;
+ goto error;
+ }
+ }
/* Preallocate control endpoint request. */
uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
@@ -766,27 +787,106 @@ error:
static void uvc_free_inst(struct usb_function_instance *f)
{
- struct f_uvc_opts *opts = to_f_uvc_opts(f);
+ struct f_uvc_opts *opts = fi_to_f_uvc_opts(f);
+ mutex_destroy(&opts->lock);
kfree(opts);
}
static struct usb_function_instance *uvc_alloc_inst(void)
{
struct f_uvc_opts *opts;
+ struct uvc_camera_terminal_descriptor *cd;
+ struct uvc_processing_unit_descriptor *pd;
+ struct uvc_output_terminal_descriptor *od;
+ struct uvc_color_matching_descriptor *md;
+ struct uvc_descriptor_header **ctl_cls;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = uvc_free_inst;
-
+ mutex_init(&opts->lock);
+
+ cd = &opts->uvc_camera_terminal;
+ cd->bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3);
+ cd->bDescriptorType = USB_DT_CS_INTERFACE;
+ cd->bDescriptorSubType = UVC_VC_INPUT_TERMINAL;
+ cd->bTerminalID = 1;
+ cd->wTerminalType = cpu_to_le16(0x0201);
+ cd->bAssocTerminal = 0;
+ cd->iTerminal = 0;
+ cd->wObjectiveFocalLengthMin = cpu_to_le16(0);
+ cd->wObjectiveFocalLengthMax = cpu_to_le16(0);
+ cd->wOcularFocalLength = cpu_to_le16(0);
+ cd->bControlSize = 3;
+ cd->bmControls[0] = 2;
+ cd->bmControls[1] = 0;
+ cd->bmControls[2] = 0;
+
+ pd = &opts->uvc_processing;
+ pd->bLength = UVC_DT_PROCESSING_UNIT_SIZE(2);
+ pd->bDescriptorType = USB_DT_CS_INTERFACE;
+ pd->bDescriptorSubType = UVC_VC_PROCESSING_UNIT;
+ pd->bUnitID = 2;
+ pd->bSourceID = 1;
+ pd->wMaxMultiplier = cpu_to_le16(16*1024);
+ pd->bControlSize = 2;
+ pd->bmControls[0] = 1;
+ pd->bmControls[1] = 0;
+ pd->iProcessing = 0;
+
+ od = &opts->uvc_output_terminal;
+ od->bLength = UVC_DT_OUTPUT_TERMINAL_SIZE;
+ od->bDescriptorType = USB_DT_CS_INTERFACE;
+ od->bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL;
+ od->bTerminalID = 3;
+ od->wTerminalType = cpu_to_le16(0x0101);
+ od->bAssocTerminal = 0;
+ od->bSourceID = 2;
+ od->iTerminal = 0;
+
+ md = &opts->uvc_color_matching;
+ md->bLength = UVC_DT_COLOR_MATCHING_SIZE;
+ md->bDescriptorType = USB_DT_CS_INTERFACE;
+ md->bDescriptorSubType = UVC_VS_COLORFORMAT;
+ md->bColorPrimaries = 1;
+ md->bTransferCharacteristics = 1;
+ md->bMatrixCoefficients = 4;
+
+ /* Prepare fs control class descriptors for configfs-based gadgets */
+ ctl_cls = opts->uvc_fs_control_cls;
+ ctl_cls[0] = NULL; /* assigned elsewhere by configfs */
+ ctl_cls[1] = (struct uvc_descriptor_header *)cd;
+ ctl_cls[2] = (struct uvc_descriptor_header *)pd;
+ ctl_cls[3] = (struct uvc_descriptor_header *)od;
+ ctl_cls[4] = NULL; /* NULL-terminate */
+ opts->fs_control =
+ (const struct uvc_descriptor_header * const *)ctl_cls;
+
+ /* Prepare hs control class descriptors for configfs-based gadgets */
+ ctl_cls = opts->uvc_ss_control_cls;
+ ctl_cls[0] = NULL; /* assigned elsewhere by configfs */
+ ctl_cls[1] = (struct uvc_descriptor_header *)cd;
+ ctl_cls[2] = (struct uvc_descriptor_header *)pd;
+ ctl_cls[3] = (struct uvc_descriptor_header *)od;
+ ctl_cls[4] = NULL; /* NULL-terminate */
+ opts->ss_control =
+ (const struct uvc_descriptor_header * const *)ctl_cls;
+
+ opts->streaming_interval = 1;
+ opts->streaming_maxpacket = 1024;
+
+ uvcg_attach_configfs(opts);
return &opts->func_inst;
}
static void uvc_free(struct usb_function *f)
{
struct uvc_device *uvc = to_uvc(f);
-
+ struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts,
+ func_inst);
+ --opts->refcnt;
kfree(uvc);
}
@@ -812,19 +912,39 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
{
struct uvc_device *uvc;
struct f_uvc_opts *opts;
+ struct uvc_descriptor_header **strm_cls;
uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
if (uvc == NULL)
return ERR_PTR(-ENOMEM);
uvc->state = UVC_STATE_DISCONNECTED;
- opts = to_f_uvc_opts(fi);
+ opts = fi_to_f_uvc_opts(fi);
+
+ mutex_lock(&opts->lock);
+ if (opts->uvc_fs_streaming_cls) {
+ strm_cls = opts->uvc_fs_streaming_cls;
+ opts->fs_streaming =
+ (const struct uvc_descriptor_header * const *)strm_cls;
+ }
+ if (opts->uvc_hs_streaming_cls) {
+ strm_cls = opts->uvc_hs_streaming_cls;
+ opts->hs_streaming =
+ (const struct uvc_descriptor_header * const *)strm_cls;
+ }
+ if (opts->uvc_ss_streaming_cls) {
+ strm_cls = opts->uvc_ss_streaming_cls;
+ opts->ss_streaming =
+ (const struct uvc_descriptor_header * const *)strm_cls;
+ }
uvc->desc.fs_control = opts->fs_control;
uvc->desc.ss_control = opts->ss_control;
uvc->desc.fs_streaming = opts->fs_streaming;
uvc->desc.hs_streaming = opts->hs_streaming;
uvc->desc.ss_streaming = opts->ss_streaming;
+ ++opts->refcnt;
+ mutex_unlock(&opts->lock);
/* Register the function. */
uvc->func.name = "uvc";
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 6e6f87656e7b..f1fd777ef4ec 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -729,9 +729,7 @@ static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
if (len < 18)
return -EINVAL;
- snprintf(str, len, "%02x:%02x:%02x:%02x:%02x:%02x",
- dev_addr[0], dev_addr[1], dev_addr[2],
- dev_addr[3], dev_addr[4], dev_addr[5]);
+ snprintf(str, len, "%pM", dev_addr);
return 18;
}
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index cd128e31f808..60139854e0b1 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -19,6 +19,7 @@
#include <linux/usb/composite.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/workqueue.h>
#ifdef VERBOSE_DEBUG
#ifndef pr_vdebug
@@ -93,6 +94,26 @@ enum ffs_state {
FFS_ACTIVE,
/*
+ * Function is visible to host, but it's not functional. All
+ * setup requests are stalled and transfers on another endpoints
+ * are refused. All epfiles, except ep0, are deleted so there
+ * is no way to perform any operations on them.
+ *
+ * This state is set after closing all functionfs files, when
+ * mount parameter "no_disconnect=1" has been set. Function will
+ * remain in deactivated state until filesystem is umounted or
+ * ep0 is opened again. In the second case functionfs state will
+ * be reset, and it will be ready for descriptors and strings
+ * writing.
+ *
+ * This is useful only when functionfs is composed to gadget
+ * with another function which can perform some critical
+ * operations, and it's strongly desired to have this operations
+ * completed, even after functionfs files closure.
+ */
+ FFS_DEACTIVATED,
+
+ /*
* All endpoints have been closed. This state is also set if
* we encounter an unrecoverable error. The only
* unrecoverable error is situation when after reading strings
@@ -251,6 +272,10 @@ struct ffs_data {
kgid_t gid;
} file_perms;
+ struct eventfd_ctx *ffs_eventfd;
+ bool no_disconnect;
+ struct work_struct reset_work;
+
/*
* The endpoint files, filled by ffs_epfiles_create(),
* destroyed by ffs_epfiles_destroy().
diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
index 53842a1b947f..c78c84138a28 100644
--- a/drivers/usb/gadget/function/u_uac1.c
+++ b/drivers/usb/gadget/function/u_uac1.c
@@ -308,8 +308,7 @@ int gaudio_setup(struct gaudio *card)
*/
void gaudio_cleanup(struct gaudio *the_card)
{
- if (the_card) {
+ if (the_card)
gaudio_close_snd_dev(the_card);
- }
}
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index f8b17fe82efe..fe386df6dd3e 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -70,7 +70,6 @@ struct f_uac1_opts {
unsigned fn_play_alloc:1;
unsigned fn_cap_alloc:1;
unsigned fn_cntl_alloc:1;
- struct gaudio *card;
struct mutex lock;
int refcnt;
};
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 2a8dfdff0332..4676b60a5063 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -17,8 +17,9 @@
#define U_UVC_H
#include <linux/usb/composite.h>
+#include <linux/usb/video.h>
-#define to_f_uvc_opts(f) container_of(f, struct f_uvc_opts, func_inst)
+#define fi_to_f_uvc_opts(f) container_of(f, struct f_uvc_opts, func_inst)
struct f_uvc_opts {
struct usb_function_instance func_inst;
@@ -26,11 +27,60 @@ struct f_uvc_opts {
unsigned int streaming_interval;
unsigned int streaming_maxpacket;
unsigned int streaming_maxburst;
+
+ /*
+ * Control descriptors array pointers for full-/high-speed and
+ * super-speed. They point by default to the uvc_fs_control_cls and
+ * uvc_ss_control_cls arrays respectively. Legacy gadgets must
+ * override them in their gadget bind callback.
+ */
const struct uvc_descriptor_header * const *fs_control;
const struct uvc_descriptor_header * const *ss_control;
+
+ /*
+ * Streaming descriptors array pointers for full-speed, high-speed and
+ * super-speed. They will point to the uvc_[fhs]s_streaming_cls arrays
+ * for configfs-based gadgets. Legacy gadgets must initialize them in
+ * their gadget bind callback.
+ */
const struct uvc_descriptor_header * const *fs_streaming;
const struct uvc_descriptor_header * const *hs_streaming;
const struct uvc_descriptor_header * const *ss_streaming;
+
+ /* Default control descriptors for configfs-based gadgets. */
+ struct uvc_camera_terminal_descriptor uvc_camera_terminal;
+ struct uvc_processing_unit_descriptor uvc_processing;
+ struct uvc_output_terminal_descriptor uvc_output_terminal;
+ struct uvc_color_matching_descriptor uvc_color_matching;
+
+ /*
+ * Control descriptors pointers arrays for full-/high-speed and
+ * super-speed. The first element is a configurable control header
+ * descriptor, the other elements point to the fixed default control
+ * descriptors. Used by configfs only, must not be touched by legacy
+ * gadgets.
+ */
+ struct uvc_descriptor_header *uvc_fs_control_cls[5];
+ struct uvc_descriptor_header *uvc_ss_control_cls[5];
+
+ /*
+ * Streaming descriptors for full-speed, high-speed and super-speed.
+ * Used by configfs only, must not be touched by legacy gadgets. The
+ * arrays are allocated at runtime as the number of descriptors isn't
+ * known in advance.
+ */
+ struct uvc_descriptor_header **uvc_fs_streaming_cls;
+ struct uvc_descriptor_header **uvc_hs_streaming_cls;
+ struct uvc_descriptor_header **uvc_ss_streaming_cls;
+
+ /*
+ * Read/write access to configfs attributes is handled by configfs.
+ *
+ * This lock protects the descriptors from concurrent access by
+ * read/write and symlink creation/removal.
+ */
+ struct mutex lock;
+ int refcnt;
};
void uvc_set_trace_param(unsigned int trace);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
new file mode 100644
index 000000000000..3c0467bcb14f
--- /dev/null
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -0,0 +1,2468 @@
+/*
+ * uvc_configfs.c
+ *
+ * Configfs support for the uvc function.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "u_uvc.h"
+#include "uvc_configfs.h"
+
+#define UVCG_STREAMING_CONTROL_SIZE 1
+
+#define CONFIGFS_ATTR_OPS_RO(_item) \
+static ssize_t _item##_attr_show(struct config_item *item, \
+ struct configfs_attribute *attr, \
+ char *page) \
+{ \
+ struct _item *_item = to_##_item(item); \
+ struct _item##_attribute *_item##_attr = \
+ container_of(attr, struct _item##_attribute, attr); \
+ ssize_t ret = 0; \
+ \
+ if (_item##_attr->show) \
+ ret = _item##_attr->show(_item, page); \
+ return ret; \
+}
+
+static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item);
+
+/* control/header/<NAME> */
+DECLARE_UVC_HEADER_DESCRIPTOR(1);
+
+struct uvcg_control_header {
+ struct config_item item;
+ struct UVC_HEADER_DESCRIPTOR(1) desc;
+ unsigned linked;
+};
+
+static struct uvcg_control_header *to_uvcg_control_header(struct config_item *item)
+{
+ return container_of(item, struct uvcg_control_header, item);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_control_header);
+CONFIGFS_ATTR_OPS(uvcg_control_header);
+
+static struct configfs_item_operations uvcg_control_header_item_ops = {
+ .show_attribute = uvcg_control_header_attr_show,
+ .store_attribute = uvcg_control_header_attr_store,
+};
+
+#define UVCG_CTRL_HDR_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
+static ssize_t uvcg_control_header_##cname##_show( \
+ struct uvcg_control_header *ch, char *page) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = ch->item.ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(ch->desc.aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static ssize_t \
+uvcg_control_header_##cname##_store(struct uvcg_control_header *ch, \
+ const char *page, size_t len) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\
+ int ret; \
+ uxx num; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = ch->item.ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ if (ch->linked || opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = str2u(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ if (num > limit) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ ch->desc.aname = vnoc(num); \
+ ret = len; \
+end: \
+ mutex_unlock(&opts->lock); \
+ mutex_unlock(su_mutex); \
+ return ret; \
+} \
+ \
+static struct uvcg_control_header_attribute \
+ uvcg_control_header_##cname = \
+ __CONFIGFS_ATTR(aname, S_IRUGO | S_IWUSR, \
+ uvcg_control_header_##cname##_show, \
+ uvcg_control_header_##cname##_store)
+
+UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, le16_to_cpu, kstrtou16, u16, cpu_to_le16,
+ 0xffff);
+
+UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, le32_to_cpu, kstrtou32,
+ u32, cpu_to_le32, 0x7fffffff);
+
+#undef UVCG_CTRL_HDR_ATTR
+
+static struct configfs_attribute *uvcg_control_header_attrs[] = {
+ &uvcg_control_header_bcd_uvc.attr,
+ &uvcg_control_header_dw_clock_frequency.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_control_header_type = {
+ .ct_item_ops = &uvcg_control_header_item_ops,
+ .ct_attrs = uvcg_control_header_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *uvcg_control_header_make(struct config_group *group,
+ const char *name)
+{
+ struct uvcg_control_header *h;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ h->desc.bLength = UVC_DT_HEADER_SIZE(1);
+ h->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ h->desc.bDescriptorSubType = UVC_VC_HEADER;
+ h->desc.bcdUVC = cpu_to_le16(0x0100);
+ h->desc.dwClockFrequency = cpu_to_le32(48000000);
+
+ config_item_init_type_name(&h->item, name, &uvcg_control_header_type);
+
+ return &h->item;
+}
+
+static void uvcg_control_header_drop(struct config_group *group,
+ struct config_item *item)
+{
+ struct uvcg_control_header *h = to_uvcg_control_header(item);
+
+ kfree(h);
+}
+
+/* control/header */
+static struct uvcg_control_header_grp {
+ struct config_group group;
+} uvcg_control_header_grp;
+
+static struct configfs_group_operations uvcg_control_header_grp_ops = {
+ .make_item = uvcg_control_header_make,
+ .drop_item = uvcg_control_header_drop,
+};
+
+static struct config_item_type uvcg_control_header_grp_type = {
+ .ct_group_ops = &uvcg_control_header_grp_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* control/processing/default */
+static struct uvcg_default_processing {
+ struct config_group group;
+} uvcg_default_processing;
+
+static inline struct uvcg_default_processing
+*to_uvcg_default_processing(struct config_item *item)
+{
+ return container_of(to_config_group(item),
+ struct uvcg_default_processing, group);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_default_processing);
+CONFIGFS_ATTR_OPS_RO(uvcg_default_processing);
+
+static struct configfs_item_operations uvcg_default_processing_item_ops = {
+ .show_attribute = uvcg_default_processing_attr_show,
+};
+
+#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, conv) \
+static ssize_t uvcg_default_processing_##cname##_show( \
+ struct uvcg_default_processing *dp, char *page) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex; \
+ struct uvc_processing_unit_descriptor *pd; \
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ pd = &opts->uvc_processing; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(pd->aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static struct uvcg_default_processing_attribute \
+ uvcg_default_processing_##cname = \
+ __CONFIGFS_ATTR_RO(aname, uvcg_default_processing_##cname##_show)
+
+#define identity_conv(x) (x)
+
+UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, identity_conv);
+UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, identity_conv);
+UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, le16_to_cpu);
+UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, identity_conv);
+
+#undef identity_conv
+
+#undef UVCG_DEFAULT_PROCESSING_ATTR
+
+static ssize_t uvcg_default_processing_bm_controls_show(
+ struct uvcg_default_processing *dp, char *page)
+{
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex;
+ struct uvc_processing_unit_descriptor *pd;
+ int result, i;
+ char *pg = page;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+ pd = &opts->uvc_processing;
+
+ mutex_lock(&opts->lock);
+ for (result = 0, i = 0; i < pd->bControlSize; ++i) {
+ result += sprintf(pg, "%d\n", pd->bmControls[i]);
+ pg = page + result;
+ }
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+static struct uvcg_default_processing_attribute
+ uvcg_default_processing_bm_controls =
+ __CONFIGFS_ATTR_RO(bmControls,
+ uvcg_default_processing_bm_controls_show);
+
+static struct configfs_attribute *uvcg_default_processing_attrs[] = {
+ &uvcg_default_processing_b_unit_id.attr,
+ &uvcg_default_processing_b_source_id.attr,
+ &uvcg_default_processing_w_max_multiplier.attr,
+ &uvcg_default_processing_bm_controls.attr,
+ &uvcg_default_processing_i_processing.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_default_processing_type = {
+ .ct_item_ops = &uvcg_default_processing_item_ops,
+ .ct_attrs = uvcg_default_processing_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* struct uvcg_processing {}; */
+
+static struct config_group *uvcg_processing_default_groups[] = {
+ &uvcg_default_processing.group,
+ NULL,
+};
+
+/* control/processing */
+static struct uvcg_processing_grp {
+ struct config_group group;
+} uvcg_processing_grp;
+
+static struct config_item_type uvcg_processing_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+/* control/terminal/camera/default */
+static struct uvcg_default_camera {
+ struct config_group group;
+} uvcg_default_camera;
+
+static inline struct uvcg_default_camera
+*to_uvcg_default_camera(struct config_item *item)
+{
+ return container_of(to_config_group(item),
+ struct uvcg_default_camera, group);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_default_camera);
+CONFIGFS_ATTR_OPS_RO(uvcg_default_camera);
+
+static struct configfs_item_operations uvcg_default_camera_item_ops = {
+ .show_attribute = uvcg_default_camera_attr_show,
+};
+
+#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, conv) \
+static ssize_t uvcg_default_camera_##cname##_show( \
+ struct uvcg_default_camera *dc, char *page) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \
+ struct uvc_camera_terminal_descriptor *cd; \
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent-> \
+ ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ cd = &opts->uvc_camera_terminal; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(cd->aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ \
+ return result; \
+} \
+ \
+static struct uvcg_default_camera_attribute \
+ uvcg_default_camera_##cname = \
+ __CONFIGFS_ATTR_RO(aname, uvcg_default_camera_##cname##_show)
+
+#define identity_conv(x) (x)
+
+UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, identity_conv);
+UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, le16_to_cpu);
+UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv);
+UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, identity_conv);
+UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_min, wObjectiveFocalLengthMin,
+ le16_to_cpu);
+UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_max, wObjectiveFocalLengthMax,
+ le16_to_cpu);
+UVCG_DEFAULT_CAMERA_ATTR(w_ocular_focal_length, wOcularFocalLength,
+ le16_to_cpu);
+
+#undef identity_conv
+
+#undef UVCG_DEFAULT_CAMERA_ATTR
+
+static ssize_t uvcg_default_camera_bm_controls_show(
+ struct uvcg_default_camera *dc, char *page)
+{
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex;
+ struct uvc_camera_terminal_descriptor *cd;
+ int result, i;
+ char *pg = page;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent->
+ ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+ cd = &opts->uvc_camera_terminal;
+
+ mutex_lock(&opts->lock);
+ for (result = 0, i = 0; i < cd->bControlSize; ++i) {
+ result += sprintf(pg, "%d\n", cd->bmControls[i]);
+ pg = page + result;
+ }
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+ return result;
+}
+
+static struct uvcg_default_camera_attribute
+ uvcg_default_camera_bm_controls =
+ __CONFIGFS_ATTR_RO(bmControls, uvcg_default_camera_bm_controls_show);
+
+static struct configfs_attribute *uvcg_default_camera_attrs[] = {
+ &uvcg_default_camera_b_terminal_id.attr,
+ &uvcg_default_camera_w_terminal_type.attr,
+ &uvcg_default_camera_b_assoc_terminal.attr,
+ &uvcg_default_camera_i_terminal.attr,
+ &uvcg_default_camera_w_objective_focal_length_min.attr,
+ &uvcg_default_camera_w_objective_focal_length_max.attr,
+ &uvcg_default_camera_w_ocular_focal_length.attr,
+ &uvcg_default_camera_bm_controls.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_default_camera_type = {
+ .ct_item_ops = &uvcg_default_camera_item_ops,
+ .ct_attrs = uvcg_default_camera_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* struct uvcg_camera {}; */
+
+static struct config_group *uvcg_camera_default_groups[] = {
+ &uvcg_default_camera.group,
+ NULL,
+};
+
+/* control/terminal/camera */
+static struct uvcg_camera_grp {
+ struct config_group group;
+} uvcg_camera_grp;
+
+static struct config_item_type uvcg_camera_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+/* control/terminal/output/default */
+static struct uvcg_default_output {
+ struct config_group group;
+} uvcg_default_output;
+
+static inline struct uvcg_default_output
+*to_uvcg_default_output(struct config_item *item)
+{
+ return container_of(to_config_group(item),
+ struct uvcg_default_output, group);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_default_output);
+CONFIGFS_ATTR_OPS_RO(uvcg_default_output);
+
+static struct configfs_item_operations uvcg_default_output_item_ops = {
+ .show_attribute = uvcg_default_output_attr_show,
+};
+
+#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, conv) \
+static ssize_t uvcg_default_output_##cname##_show( \
+ struct uvcg_default_output *dout, char *page) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &dout->group.cg_subsys->su_mutex; \
+ struct uvc_output_terminal_descriptor *cd; \
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = dout->group.cg_item.ci_parent->ci_parent-> \
+ ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ cd = &opts->uvc_output_terminal; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(cd->aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ \
+ return result; \
+} \
+ \
+static struct uvcg_default_output_attribute \
+ uvcg_default_output_##cname = \
+ __CONFIGFS_ATTR_RO(aname, uvcg_default_output_##cname##_show)
+
+#define identity_conv(x) (x)
+
+UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, identity_conv);
+UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, le16_to_cpu);
+UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv);
+UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, identity_conv);
+UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, identity_conv);
+
+#undef identity_conv
+
+#undef UVCG_DEFAULT_OUTPUT_ATTR
+
+static struct configfs_attribute *uvcg_default_output_attrs[] = {
+ &uvcg_default_output_b_terminal_id.attr,
+ &uvcg_default_output_w_terminal_type.attr,
+ &uvcg_default_output_b_assoc_terminal.attr,
+ &uvcg_default_output_b_source_id.attr,
+ &uvcg_default_output_i_terminal.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_default_output_type = {
+ .ct_item_ops = &uvcg_default_output_item_ops,
+ .ct_attrs = uvcg_default_output_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* struct uvcg_output {}; */
+
+static struct config_group *uvcg_output_default_groups[] = {
+ &uvcg_default_output.group,
+ NULL,
+};
+
+/* control/terminal/output */
+static struct uvcg_output_grp {
+ struct config_group group;
+} uvcg_output_grp;
+
+static struct config_item_type uvcg_output_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *uvcg_terminal_default_groups[] = {
+ &uvcg_camera_grp.group,
+ &uvcg_output_grp.group,
+ NULL,
+};
+
+/* control/terminal */
+static struct uvcg_terminal_grp {
+ struct config_group group;
+} uvcg_terminal_grp;
+
+static struct config_item_type uvcg_terminal_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+/* control/class/{fs} */
+static struct uvcg_control_class {
+ struct config_group group;
+} uvcg_control_class_fs, uvcg_control_class_ss;
+
+
+static inline struct uvc_descriptor_header
+**uvcg_get_ctl_class_arr(struct config_item *i, struct f_uvc_opts *o)
+{
+ struct uvcg_control_class *cl = container_of(to_config_group(i),
+ struct uvcg_control_class, group);
+
+ if (cl == &uvcg_control_class_fs)
+ return o->uvc_fs_control_cls;
+
+ if (cl == &uvcg_control_class_ss)
+ return o->uvc_ss_control_cls;
+
+ return NULL;
+}
+
+static int uvcg_control_class_allow_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct config_item *control, *header;
+ struct f_uvc_opts *opts;
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvc_descriptor_header **class_array;
+ struct uvcg_control_header *target_hdr;
+ int ret = -EINVAL;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ control = src->ci_parent->ci_parent;
+ header = config_group_find_item(to_config_group(control), "header");
+ if (!header || target->ci_parent != header)
+ goto out;
+
+ opts = to_f_uvc_opts(control->ci_parent);
+
+ mutex_lock(&opts->lock);
+
+ class_array = uvcg_get_ctl_class_arr(src, opts);
+ if (!class_array)
+ goto unlock;
+ if (opts->refcnt || class_array[0]) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ target_hdr = to_uvcg_control_header(target);
+ ++target_hdr->linked;
+ class_array[0] = (struct uvc_descriptor_header *)&target_hdr->desc;
+ ret = 0;
+
+unlock:
+ mutex_unlock(&opts->lock);
+out:
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+static int uvcg_control_class_drop_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct config_item *control, *header;
+ struct f_uvc_opts *opts;
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvc_descriptor_header **class_array;
+ struct uvcg_control_header *target_hdr;
+ int ret = -EINVAL;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ control = src->ci_parent->ci_parent;
+ header = config_group_find_item(to_config_group(control), "header");
+ if (!header || target->ci_parent != header)
+ goto out;
+
+ opts = to_f_uvc_opts(control->ci_parent);
+
+ mutex_lock(&opts->lock);
+
+ class_array = uvcg_get_ctl_class_arr(src, opts);
+ if (!class_array)
+ goto unlock;
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ target_hdr = to_uvcg_control_header(target);
+ --target_hdr->linked;
+ class_array[0] = NULL;
+ ret = 0;
+
+unlock:
+ mutex_unlock(&opts->lock);
+out:
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+static struct configfs_item_operations uvcg_control_class_item_ops = {
+ .allow_link = uvcg_control_class_allow_link,
+ .drop_link = uvcg_control_class_drop_link,
+};
+
+static struct config_item_type uvcg_control_class_type = {
+ .ct_item_ops = &uvcg_control_class_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *uvcg_control_class_default_groups[] = {
+ &uvcg_control_class_fs.group,
+ &uvcg_control_class_ss.group,
+ NULL,
+};
+
+/* control/class */
+static struct uvcg_control_class_grp {
+ struct config_group group;
+} uvcg_control_class_grp;
+
+static struct config_item_type uvcg_control_class_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *uvcg_control_default_groups[] = {
+ &uvcg_control_header_grp.group,
+ &uvcg_processing_grp.group,
+ &uvcg_terminal_grp.group,
+ &uvcg_control_class_grp.group,
+ NULL,
+};
+
+/* control */
+static struct uvcg_control_grp {
+ struct config_group group;
+} uvcg_control_grp;
+
+static struct config_item_type uvcg_control_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+/* streaming/uncompressed */
+static struct uvcg_uncompressed_grp {
+ struct config_group group;
+} uvcg_uncompressed_grp;
+
+/* streaming/mjpeg */
+static struct uvcg_mjpeg_grp {
+ struct config_group group;
+} uvcg_mjpeg_grp;
+
+static struct config_item *fmt_parent[] = {
+ &uvcg_uncompressed_grp.group.cg_item,
+ &uvcg_mjpeg_grp.group.cg_item,
+};
+
+enum uvcg_format_type {
+ UVCG_UNCOMPRESSED = 0,
+ UVCG_MJPEG,
+};
+
+struct uvcg_format {
+ struct config_group group;
+ enum uvcg_format_type type;
+ unsigned linked;
+ unsigned num_frames;
+ __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
+};
+
+static struct uvcg_format *to_uvcg_format(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct uvcg_format, group);
+}
+
+static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page)
+{
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &f->group.cg_subsys->su_mutex;
+ int result, i;
+ char *pg = page;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = f->group.cg_item.ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result = sprintf(pg, "0x");
+ pg += result;
+ for (i = 0; i < UVCG_STREAMING_CONTROL_SIZE; ++i) {
+ result += sprintf(pg, "%x\n", f->bmaControls[i]);
+ pg = page + result;
+ }
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+ return result;
+}
+
+static ssize_t uvcg_format_bma_controls_store(struct uvcg_format *ch,
+ const char *page, size_t len)
+{
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &ch->group.cg_subsys->su_mutex;
+ int ret = -EINVAL;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = ch->group.cg_item.ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ if (ch->linked || opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ if (len < 4 || *page != '0' ||
+ (*(page + 1) != 'x' && *(page + 1) != 'X'))
+ goto end;
+ ret = hex2bin(ch->bmaControls, page + 2, 1);
+ if (ret < 0)
+ goto end;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+struct uvcg_format_ptr {
+ struct uvcg_format *fmt;
+ struct list_head entry;
+};
+
+/* streaming/header/<NAME> */
+struct uvcg_streaming_header {
+ struct config_item item;
+ struct uvc_input_header_descriptor desc;
+ unsigned linked;
+ struct list_head formats;
+ unsigned num_fmt;
+};
+
+static struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item)
+{
+ return container_of(item, struct uvcg_streaming_header, item);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_streaming_header);
+CONFIGFS_ATTR_OPS(uvcg_streaming_header);
+
+static int uvcg_streaming_header_allow_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ struct uvcg_streaming_header *src_hdr;
+ struct uvcg_format *target_fmt = NULL;
+ struct uvcg_format_ptr *format_ptr;
+ int i, ret = -EINVAL;
+
+ src_hdr = to_uvcg_streaming_header(src);
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = src->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ if (src_hdr->linked) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fmt_parent); ++i)
+ if (target->ci_parent == fmt_parent[i])
+ break;
+ if (i == ARRAY_SIZE(fmt_parent))
+ goto out;
+
+ target_fmt = container_of(to_config_group(target), struct uvcg_format,
+ group);
+ if (!target_fmt)
+ goto out;
+
+ format_ptr = kzalloc(sizeof(*format_ptr), GFP_KERNEL);
+ if (!format_ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = 0;
+ format_ptr->fmt = target_fmt;
+ list_add_tail(&format_ptr->entry, &src_hdr->formats);
+ ++src_hdr->num_fmt;
+
+out:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+static int uvcg_streaming_header_drop_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ struct uvcg_streaming_header *src_hdr;
+ struct uvcg_format *target_fmt = NULL;
+ struct uvcg_format_ptr *format_ptr, *tmp;
+ int ret = -EINVAL;
+
+ src_hdr = to_uvcg_streaming_header(src);
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = src->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ target_fmt = container_of(to_config_group(target), struct uvcg_format,
+ group);
+ if (!target_fmt)
+ goto out;
+
+ list_for_each_entry_safe(format_ptr, tmp, &src_hdr->formats, entry)
+ if (format_ptr->fmt == target_fmt) {
+ list_del(&format_ptr->entry);
+ kfree(format_ptr);
+ --src_hdr->num_fmt;
+ break;
+ }
+
+out:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+
+}
+
+static struct configfs_item_operations uvcg_streaming_header_item_ops = {
+ .show_attribute = uvcg_streaming_header_attr_show,
+ .store_attribute = uvcg_streaming_header_attr_store,
+ .allow_link = uvcg_streaming_header_allow_link,
+ .drop_link = uvcg_streaming_header_drop_link,
+};
+
+#define UVCG_STREAMING_HEADER_ATTR(cname, aname, conv) \
+static ssize_t uvcg_streaming_header_##cname##_show( \
+ struct uvcg_streaming_header *sh, char *page) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &sh->item.ci_group->cg_subsys->su_mutex;\
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = sh->item.ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(sh->desc.aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static struct uvcg_streaming_header_attribute \
+ uvcg_streaming_header_##cname = \
+ __CONFIGFS_ATTR_RO(aname, uvcg_streaming_header_##cname##_show)
+
+#define identity_conv(x) (x)
+
+UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, identity_conv);
+UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, identity_conv);
+UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod,
+ identity_conv);
+UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, identity_conv);
+UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, identity_conv);
+
+#undef identity_conv
+
+#undef UVCG_STREAMING_HEADER_ATTR
+
+static struct configfs_attribute *uvcg_streaming_header_attrs[] = {
+ &uvcg_streaming_header_bm_info.attr,
+ &uvcg_streaming_header_b_terminal_link.attr,
+ &uvcg_streaming_header_b_still_capture_method.attr,
+ &uvcg_streaming_header_b_trigger_support.attr,
+ &uvcg_streaming_header_b_trigger_usage.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_streaming_header_type = {
+ .ct_item_ops = &uvcg_streaming_header_item_ops,
+ .ct_attrs = uvcg_streaming_header_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item
+*uvcg_streaming_header_make(struct config_group *group, const char *name)
+{
+ struct uvcg_streaming_header *h;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&h->formats);
+ h->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ h->desc.bDescriptorSubType = UVC_VS_INPUT_HEADER;
+ h->desc.bTerminalLink = 3;
+ h->desc.bControlSize = UVCG_STREAMING_CONTROL_SIZE;
+
+ config_item_init_type_name(&h->item, name, &uvcg_streaming_header_type);
+
+ return &h->item;
+}
+
+static void uvcg_streaming_header_drop(struct config_group *group,
+ struct config_item *item)
+{
+ struct uvcg_streaming_header *h = to_uvcg_streaming_header(item);
+
+ kfree(h);
+}
+
+/* streaming/header */
+static struct uvcg_streaming_header_grp {
+ struct config_group group;
+} uvcg_streaming_header_grp;
+
+static struct configfs_group_operations uvcg_streaming_header_grp_ops = {
+ .make_item = uvcg_streaming_header_make,
+ .drop_item = uvcg_streaming_header_drop,
+};
+
+static struct config_item_type uvcg_streaming_header_grp_type = {
+ .ct_group_ops = &uvcg_streaming_header_grp_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* streaming/<mode>/<format>/<NAME> */
+struct uvcg_frame {
+ struct {
+ u8 b_length;
+ u8 b_descriptor_type;
+ u8 b_descriptor_subtype;
+ u8 b_frame_index;
+ u8 bm_capabilities;
+ u16 w_width;
+ u16 w_height;
+ u32 dw_min_bit_rate;
+ u32 dw_max_bit_rate;
+ u32 dw_max_video_frame_buffer_size;
+ u32 dw_default_frame_interval;
+ u8 b_frame_interval_type;
+ } __attribute__((packed)) frame;
+ u32 *dw_frame_interval;
+ enum uvcg_format_type fmt_type;
+ struct config_item item;
+};
+
+static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
+{
+ return container_of(item, struct uvcg_frame, item);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_frame);
+CONFIGFS_ATTR_OPS(uvcg_frame);
+
+static struct configfs_item_operations uvcg_frame_item_ops = {
+ .show_attribute = uvcg_frame_attr_show,
+ .store_attribute = uvcg_frame_attr_store,
+};
+
+#define UVCG_FRAME_ATTR(cname, aname, to_cpu_endian, to_little_endian, bits) \
+static ssize_t uvcg_frame_##cname##_show(struct uvcg_frame *f, char *page)\
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = f->item.ci_parent->ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", to_cpu_endian(f->frame.cname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static ssize_t uvcg_frame_##cname##_store(struct uvcg_frame *f, \
+ const char *page, size_t len)\
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct uvcg_format *fmt; \
+ struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\
+ int ret; \
+ u##bits num; \
+ \
+ ret = kstrtou##bits(page, 0, &num); \
+ if (ret) \
+ return ret; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = f->item.ci_parent->ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ fmt = to_uvcg_format(f->item.ci_parent); \
+ \
+ mutex_lock(&opts->lock); \
+ if (fmt->linked || opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ f->frame.cname = to_little_endian(num); \
+ ret = len; \
+end: \
+ mutex_unlock(&opts->lock); \
+ mutex_unlock(su_mutex); \
+ return ret; \
+} \
+ \
+static struct uvcg_frame_attribute \
+ uvcg_frame_##cname = \
+ __CONFIGFS_ATTR(aname, S_IRUGO | S_IWUSR, \
+ uvcg_frame_##cname##_show, \
+ uvcg_frame_##cname##_store)
+
+#define noop_conversion(x) (x)
+
+UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, noop_conversion,
+ noop_conversion, 8);
+UVCG_FRAME_ATTR(w_width, wWidth, le16_to_cpu, cpu_to_le16, 16);
+UVCG_FRAME_ATTR(w_height, wHeight, le16_to_cpu, cpu_to_le16, 16);
+UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, le32_to_cpu, cpu_to_le32, 32);
+UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, le32_to_cpu, cpu_to_le32, 32);
+UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize,
+ le32_to_cpu, cpu_to_le32, 32);
+UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval,
+ le32_to_cpu, cpu_to_le32, 32);
+
+#undef noop_conversion
+
+#undef UVCG_FRAME_ATTR
+
+static ssize_t uvcg_frame_dw_frame_interval_show(struct uvcg_frame *frm,
+ char *page)
+{
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &frm->item.ci_group->cg_subsys->su_mutex;
+ int result, i;
+ char *pg = page;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = frm->item.ci_parent->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ for (result = 0, i = 0; i < frm->frame.b_frame_interval_type; ++i) {
+ result += sprintf(pg, "%d\n",
+ le32_to_cpu(frm->dw_frame_interval[i]));
+ pg = page + result;
+ }
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+ return result;
+}
+
+static inline int __uvcg_count_frm_intrv(char *buf, void *priv)
+{
+ ++*((int *)priv);
+ return 0;
+}
+
+static inline int __uvcg_fill_frm_intrv(char *buf, void *priv)
+{
+ u32 num, **interv;
+ int ret;
+
+ ret = kstrtou32(buf, 0, &num);
+ if (ret)
+ return ret;
+
+ interv = priv;
+ **interv = cpu_to_le32(num);
+ ++*interv;
+
+ return 0;
+}
+
+static int __uvcg_iter_frm_intrv(const char *page, size_t len,
+ int (*fun)(char *, void *), void *priv)
+{
+ /* sign, base 2 representation, newline, terminator */
+ char buf[1 + sizeof(u32) * 8 + 1 + 1];
+ const char *pg = page;
+ int i, ret;
+
+ if (!fun)
+ return -EINVAL;
+
+ while (pg - page < len) {
+ i = 0;
+ while (i < sizeof(buf) && (pg - page < len) &&
+ *pg != '\0' && *pg != '\n')
+ buf[i++] = *pg++;
+ if (i == sizeof(buf))
+ return -EINVAL;
+ while ((pg - page < len) && (*pg == '\0' || *pg == '\n'))
+ ++pg;
+ buf[i] = '\0';
+ ret = fun(buf, priv);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t uvcg_frame_dw_frame_interval_store(struct uvcg_frame *ch,
+ const char *page, size_t len)
+{
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct uvcg_format *fmt;
+ struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;
+ int ret = 0, n = 0;
+ u32 *frm_intrv, *tmp;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = ch->item.ci_parent->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+ fmt = to_uvcg_format(ch->item.ci_parent);
+
+ mutex_lock(&opts->lock);
+ if (fmt->linked || opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = __uvcg_iter_frm_intrv(page, len, __uvcg_count_frm_intrv, &n);
+ if (ret)
+ goto end;
+
+ tmp = frm_intrv = kcalloc(n, sizeof(u32), GFP_KERNEL);
+ if (!frm_intrv) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ ret = __uvcg_iter_frm_intrv(page, len, __uvcg_fill_frm_intrv, &tmp);
+ if (ret) {
+ kfree(frm_intrv);
+ goto end;
+ }
+
+ kfree(ch->dw_frame_interval);
+ ch->dw_frame_interval = frm_intrv;
+ ch->frame.b_frame_interval_type = n;
+ ret = len;
+
+end:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+static struct uvcg_frame_attribute
+ uvcg_frame_dw_frame_interval =
+ __CONFIGFS_ATTR(dwFrameInterval, S_IRUGO | S_IWUSR,
+ uvcg_frame_dw_frame_interval_show,
+ uvcg_frame_dw_frame_interval_store);
+
+static struct configfs_attribute *uvcg_frame_attrs[] = {
+ &uvcg_frame_bm_capabilities.attr,
+ &uvcg_frame_w_width.attr,
+ &uvcg_frame_w_height.attr,
+ &uvcg_frame_dw_min_bit_rate.attr,
+ &uvcg_frame_dw_max_bit_rate.attr,
+ &uvcg_frame_dw_max_video_frame_buffer_size.attr,
+ &uvcg_frame_dw_default_frame_interval.attr,
+ &uvcg_frame_dw_frame_interval.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_frame_type = {
+ .ct_item_ops = &uvcg_frame_item_ops,
+ .ct_attrs = uvcg_frame_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *uvcg_frame_make(struct config_group *group,
+ const char *name)
+{
+ struct uvcg_frame *h;
+ struct uvcg_format *fmt;
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ h->frame.b_descriptor_type = USB_DT_CS_INTERFACE;
+ h->frame.b_frame_index = 1;
+ h->frame.w_width = cpu_to_le16(640);
+ h->frame.w_height = cpu_to_le16(360);
+ h->frame.dw_min_bit_rate = cpu_to_le32(18432000);
+ h->frame.dw_max_bit_rate = cpu_to_le32(55296000);
+ h->frame.dw_max_video_frame_buffer_size = cpu_to_le32(460800);
+ h->frame.dw_default_frame_interval = cpu_to_le32(666666);
+
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ fmt = to_uvcg_format(&group->cg_item);
+ if (fmt->type == UVCG_UNCOMPRESSED) {
+ h->frame.b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED;
+ h->fmt_type = UVCG_UNCOMPRESSED;
+ } else if (fmt->type == UVCG_MJPEG) {
+ h->frame.b_descriptor_subtype = UVC_VS_FRAME_MJPEG;
+ h->fmt_type = UVCG_MJPEG;
+ } else {
+ mutex_unlock(&opts->lock);
+ kfree(h);
+ return ERR_PTR(-EINVAL);
+ }
+ ++fmt->num_frames;
+ mutex_unlock(&opts->lock);
+
+ config_item_init_type_name(&h->item, name, &uvcg_frame_type);
+
+ return &h->item;
+}
+
+static void uvcg_frame_drop(struct config_group *group, struct config_item *item)
+{
+ struct uvcg_frame *h = to_uvcg_frame(item);
+ struct uvcg_format *fmt;
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ fmt = to_uvcg_format(&group->cg_item);
+ --fmt->num_frames;
+ kfree(h);
+ mutex_unlock(&opts->lock);
+}
+
+/* streaming/uncompressed/<NAME> */
+struct uvcg_uncompressed {
+ struct uvcg_format fmt;
+ struct uvc_format_uncompressed desc;
+};
+
+static struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item)
+{
+ return container_of(
+ container_of(to_config_group(item), struct uvcg_format, group),
+ struct uvcg_uncompressed, fmt);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_uncompressed);
+CONFIGFS_ATTR_OPS(uvcg_uncompressed);
+
+static struct configfs_item_operations uvcg_uncompressed_item_ops = {
+ .show_attribute = uvcg_uncompressed_attr_show,
+ .store_attribute = uvcg_uncompressed_attr_store,
+};
+
+static struct configfs_group_operations uvcg_uncompressed_group_ops = {
+ .make_item = uvcg_frame_make,
+ .drop_item = uvcg_frame_drop,
+};
+
+static ssize_t uvcg_uncompressed_guid_format_show(struct uvcg_uncompressed *ch,
+ char *page)
+{
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ memcpy(page, ch->desc.guidFormat, sizeof(ch->desc.guidFormat));
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return sizeof(ch->desc.guidFormat);
+}
+
+static ssize_t uvcg_uncompressed_guid_format_store(struct uvcg_uncompressed *ch,
+ const char *page, size_t len)
+{
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
+ int ret;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ if (ch->fmt.linked || opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ memcpy(ch->desc.guidFormat, page,
+ min(sizeof(ch->desc.guidFormat), len));
+ ret = sizeof(ch->desc.guidFormat);
+
+end:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+static struct uvcg_uncompressed_attribute uvcg_uncompressed_guid_format =
+ __CONFIGFS_ATTR(guidFormat, S_IRUGO | S_IWUSR,
+ uvcg_uncompressed_guid_format_show,
+ uvcg_uncompressed_guid_format_store);
+
+
+#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, conv) \
+static ssize_t uvcg_uncompressed_##cname##_show( \
+ struct uvcg_uncompressed *u, char *page) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static struct uvcg_uncompressed_attribute \
+ uvcg_uncompressed_##cname = \
+ __CONFIGFS_ATTR_RO(aname, uvcg_uncompressed_##cname##_show)
+
+#define UVCG_UNCOMPRESSED_ATTR(cname, aname, conv) \
+static ssize_t uvcg_uncompressed_##cname##_show( \
+ struct uvcg_uncompressed *u, char *page) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static ssize_t \
+uvcg_uncompressed_##cname##_store(struct uvcg_uncompressed *u, \
+ const char *page, size_t len) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
+ int ret; \
+ u8 num; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ if (u->fmt.linked || opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = kstrtou8(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ if (num > 255) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ u->desc.aname = num; \
+ ret = len; \
+end: \
+ mutex_unlock(&opts->lock); \
+ mutex_unlock(su_mutex); \
+ return ret; \
+} \
+ \
+static struct uvcg_uncompressed_attribute \
+ uvcg_uncompressed_##cname = \
+ __CONFIGFS_ATTR(aname, S_IRUGO | S_IWUSR, \
+ uvcg_uncompressed_##cname##_show, \
+ uvcg_uncompressed_##cname##_store)
+
+#define identity_conv(x) (x)
+
+UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, identity_conv);
+UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex,
+ identity_conv);
+UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv);
+UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv);
+UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv);
+
+#undef identity_conv
+
+#undef UVCG_UNCOMPRESSED_ATTR
+#undef UVCG_UNCOMPRESSED_ATTR_RO
+
+static inline ssize_t
+uvcg_uncompressed_bma_controls_show(struct uvcg_uncompressed *unc, char *page)
+{
+ return uvcg_format_bma_controls_show(&unc->fmt, page);
+}
+
+static inline ssize_t
+uvcg_uncompressed_bma_controls_store(struct uvcg_uncompressed *ch,
+ const char *page, size_t len)
+{
+ return uvcg_format_bma_controls_store(&ch->fmt, page, len);
+}
+
+static struct uvcg_uncompressed_attribute uvcg_uncompressed_bma_controls =
+ __CONFIGFS_ATTR(bmaControls, S_IRUGO | S_IWUSR,
+ uvcg_uncompressed_bma_controls_show,
+ uvcg_uncompressed_bma_controls_store);
+
+static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
+ &uvcg_uncompressed_guid_format.attr,
+ &uvcg_uncompressed_b_bits_per_pixel.attr,
+ &uvcg_uncompressed_b_default_frame_index.attr,
+ &uvcg_uncompressed_b_aspect_ratio_x.attr,
+ &uvcg_uncompressed_b_aspect_ratio_y.attr,
+ &uvcg_uncompressed_bm_interface_flags.attr,
+ &uvcg_uncompressed_bma_controls.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_uncompressed_type = {
+ .ct_item_ops = &uvcg_uncompressed_item_ops,
+ .ct_group_ops = &uvcg_uncompressed_group_ops,
+ .ct_attrs = uvcg_uncompressed_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *uvcg_uncompressed_make(struct config_group *group,
+ const char *name)
+{
+ static char guid[] = {
+ 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71
+ };
+ struct uvcg_uncompressed *h;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ h->desc.bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE;
+ h->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ h->desc.bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED;
+ memcpy(h->desc.guidFormat, guid, sizeof(guid));
+ h->desc.bBitsPerPixel = 16;
+ h->desc.bDefaultFrameIndex = 1;
+ h->desc.bAspectRatioX = 0;
+ h->desc.bAspectRatioY = 0;
+ h->desc.bmInterfaceFlags = 0;
+ h->desc.bCopyProtect = 0;
+
+ h->fmt.type = UVCG_UNCOMPRESSED;
+ config_group_init_type_name(&h->fmt.group, name,
+ &uvcg_uncompressed_type);
+
+ return &h->fmt.group;
+}
+
+static void uvcg_uncompressed_drop(struct config_group *group,
+ struct config_item *item)
+{
+ struct uvcg_uncompressed *h = to_uvcg_uncompressed(item);
+
+ kfree(h);
+}
+
+static struct configfs_group_operations uvcg_uncompressed_grp_ops = {
+ .make_group = uvcg_uncompressed_make,
+ .drop_item = uvcg_uncompressed_drop,
+};
+
+static struct config_item_type uvcg_uncompressed_grp_type = {
+ .ct_group_ops = &uvcg_uncompressed_grp_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* streaming/mjpeg/<NAME> */
+struct uvcg_mjpeg {
+ struct uvcg_format fmt;
+ struct uvc_format_mjpeg desc;
+};
+
+static struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
+{
+ return container_of(
+ container_of(to_config_group(item), struct uvcg_format, group),
+ struct uvcg_mjpeg, fmt);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_mjpeg);
+CONFIGFS_ATTR_OPS(uvcg_mjpeg);
+
+static struct configfs_item_operations uvcg_mjpeg_item_ops = {
+ .show_attribute = uvcg_mjpeg_attr_show,
+ .store_attribute = uvcg_mjpeg_attr_store,
+};
+
+static struct configfs_group_operations uvcg_mjpeg_group_ops = {
+ .make_item = uvcg_frame_make,
+ .drop_item = uvcg_frame_drop,
+};
+
+#define UVCG_MJPEG_ATTR_RO(cname, aname, conv) \
+static ssize_t uvcg_mjpeg_##cname##_show(struct uvcg_mjpeg *u, char *page)\
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static struct uvcg_mjpeg_attribute \
+ uvcg_mjpeg_##cname = \
+ __CONFIGFS_ATTR_RO(aname, uvcg_mjpeg_##cname##_show)
+
+#define UVCG_MJPEG_ATTR(cname, aname, conv) \
+static ssize_t uvcg_mjpeg_##cname##_show(struct uvcg_mjpeg *u, char *page)\
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static ssize_t \
+uvcg_mjpeg_##cname##_store(struct uvcg_mjpeg *u, \
+ const char *page, size_t len) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
+ int ret; \
+ u8 num; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ if (u->fmt.linked || opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = kstrtou8(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ if (num > 255) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ u->desc.aname = num; \
+ ret = len; \
+end: \
+ mutex_unlock(&opts->lock); \
+ mutex_unlock(su_mutex); \
+ return ret; \
+} \
+ \
+static struct uvcg_mjpeg_attribute \
+ uvcg_mjpeg_##cname = \
+ __CONFIGFS_ATTR(aname, S_IRUGO | S_IWUSR, \
+ uvcg_mjpeg_##cname##_show, \
+ uvcg_mjpeg_##cname##_store)
+
+#define identity_conv(x) (x)
+
+UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex,
+ identity_conv);
+UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, identity_conv);
+UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv);
+UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv);
+UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv);
+
+#undef identity_conv
+
+#undef UVCG_MJPEG_ATTR
+#undef UVCG_MJPEG_ATTR_RO
+
+static inline ssize_t
+uvcg_mjpeg_bma_controls_show(struct uvcg_mjpeg *unc, char *page)
+{
+ return uvcg_format_bma_controls_show(&unc->fmt, page);
+}
+
+static inline ssize_t
+uvcg_mjpeg_bma_controls_store(struct uvcg_mjpeg *ch,
+ const char *page, size_t len)
+{
+ return uvcg_format_bma_controls_store(&ch->fmt, page, len);
+}
+
+static struct uvcg_mjpeg_attribute uvcg_mjpeg_bma_controls =
+ __CONFIGFS_ATTR(bmaControls, S_IRUGO | S_IWUSR,
+ uvcg_mjpeg_bma_controls_show,
+ uvcg_mjpeg_bma_controls_store);
+
+static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
+ &uvcg_mjpeg_b_default_frame_index.attr,
+ &uvcg_mjpeg_bm_flags.attr,
+ &uvcg_mjpeg_b_aspect_ratio_x.attr,
+ &uvcg_mjpeg_b_aspect_ratio_y.attr,
+ &uvcg_mjpeg_bm_interface_flags.attr,
+ &uvcg_mjpeg_bma_controls.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_mjpeg_type = {
+ .ct_item_ops = &uvcg_mjpeg_item_ops,
+ .ct_group_ops = &uvcg_mjpeg_group_ops,
+ .ct_attrs = uvcg_mjpeg_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *uvcg_mjpeg_make(struct config_group *group,
+ const char *name)
+{
+ struct uvcg_mjpeg *h;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ h->desc.bLength = UVC_DT_FORMAT_MJPEG_SIZE;
+ h->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ h->desc.bDescriptorSubType = UVC_VS_FORMAT_MJPEG;
+ h->desc.bDefaultFrameIndex = 1;
+ h->desc.bAspectRatioX = 0;
+ h->desc.bAspectRatioY = 0;
+ h->desc.bmInterfaceFlags = 0;
+ h->desc.bCopyProtect = 0;
+
+ h->fmt.type = UVCG_MJPEG;
+ config_group_init_type_name(&h->fmt.group, name,
+ &uvcg_mjpeg_type);
+
+ return &h->fmt.group;
+}
+
+static void uvcg_mjpeg_drop(struct config_group *group,
+ struct config_item *item)
+{
+ struct uvcg_mjpeg *h = to_uvcg_mjpeg(item);
+
+ kfree(h);
+}
+
+static struct configfs_group_operations uvcg_mjpeg_grp_ops = {
+ .make_group = uvcg_mjpeg_make,
+ .drop_item = uvcg_mjpeg_drop,
+};
+
+static struct config_item_type uvcg_mjpeg_grp_type = {
+ .ct_group_ops = &uvcg_mjpeg_grp_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* streaming/color_matching/default */
+static struct uvcg_default_color_matching {
+ struct config_group group;
+} uvcg_default_color_matching;
+
+static inline struct uvcg_default_color_matching
+*to_uvcg_default_color_matching(struct config_item *item)
+{
+ return container_of(to_config_group(item),
+ struct uvcg_default_color_matching, group);
+}
+
+CONFIGFS_ATTR_STRUCT(uvcg_default_color_matching);
+CONFIGFS_ATTR_OPS_RO(uvcg_default_color_matching);
+
+static struct configfs_item_operations uvcg_default_color_matching_item_ops = {
+ .show_attribute = uvcg_default_color_matching_attr_show,
+};
+
+#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, conv) \
+static ssize_t uvcg_default_color_matching_##cname##_show( \
+ struct uvcg_default_color_matching *dc, char *page) \
+{ \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \
+ struct uvc_color_matching_descriptor *cd; \
+ int result; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ cd = &opts->uvc_color_matching; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(cd->aname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ return result; \
+} \
+ \
+static struct uvcg_default_color_matching_attribute \
+ uvcg_default_color_matching_##cname = \
+ __CONFIGFS_ATTR_RO(aname, uvcg_default_color_matching_##cname##_show)
+
+#define identity_conv(x) (x)
+
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries,
+ identity_conv);
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_transfer_characteristics,
+ bTransferCharacteristics, identity_conv);
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients,
+ identity_conv);
+
+#undef identity_conv
+
+#undef UVCG_DEFAULT_COLOR_MATCHING_ATTR
+
+static struct configfs_attribute *uvcg_default_color_matching_attrs[] = {
+ &uvcg_default_color_matching_b_color_primaries.attr,
+ &uvcg_default_color_matching_b_transfer_characteristics.attr,
+ &uvcg_default_color_matching_b_matrix_coefficients.attr,
+ NULL,
+};
+
+static struct config_item_type uvcg_default_color_matching_type = {
+ .ct_item_ops = &uvcg_default_color_matching_item_ops,
+ .ct_attrs = uvcg_default_color_matching_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* struct uvcg_color_matching {}; */
+
+static struct config_group *uvcg_color_matching_default_groups[] = {
+ &uvcg_default_color_matching.group,
+ NULL,
+};
+
+/* streaming/color_matching */
+static struct uvcg_color_matching_grp {
+ struct config_group group;
+} uvcg_color_matching_grp;
+
+static struct config_item_type uvcg_color_matching_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+/* streaming/class/{fs|hs|ss} */
+static struct uvcg_streaming_class {
+ struct config_group group;
+} uvcg_streaming_class_fs, uvcg_streaming_class_hs, uvcg_streaming_class_ss;
+
+
+static inline struct uvc_descriptor_header
+***__uvcg_get_stream_class_arr(struct config_item *i, struct f_uvc_opts *o)
+{
+ struct uvcg_streaming_class *cl = container_of(to_config_group(i),
+ struct uvcg_streaming_class, group);
+
+ if (cl == &uvcg_streaming_class_fs)
+ return &o->uvc_fs_streaming_cls;
+
+ if (cl == &uvcg_streaming_class_hs)
+ return &o->uvc_hs_streaming_cls;
+
+ if (cl == &uvcg_streaming_class_ss)
+ return &o->uvc_ss_streaming_cls;
+
+ return NULL;
+}
+
+enum uvcg_strm_type {
+ UVCG_HEADER = 0,
+ UVCG_FORMAT,
+ UVCG_FRAME
+};
+
+/*
+ * Iterate over a hierarchy of streaming descriptors' config items.
+ * The items are created by the user with configfs.
+ *
+ * It "processes" the header pointed to by @priv1, then for each format
+ * that follows the header "processes" the format itself and then for
+ * each frame inside a format "processes" the frame.
+ *
+ * As a "processing" function the @fun is used.
+ *
+ * __uvcg_iter_strm_cls() is used in two context: first, to calculate
+ * the amount of memory needed for an array of streaming descriptors
+ * and second, to actually fill the array.
+ *
+ * @h: streaming header pointer
+ * @priv2: an "inout" parameter (the caller might want to see the changes to it)
+ * @priv3: an "inout" parameter (the caller might want to see the changes to it)
+ * @fun: callback function for processing each level of the hierarchy
+ */
+static int __uvcg_iter_strm_cls(struct uvcg_streaming_header *h,
+ void *priv2, void *priv3,
+ int (*fun)(void *, void *, void *, int, enum uvcg_strm_type type))
+{
+ struct uvcg_format_ptr *f;
+ struct config_group *grp;
+ struct config_item *item;
+ struct uvcg_frame *frm;
+ int ret, i, j;
+
+ if (!fun)
+ return -EINVAL;
+
+ i = j = 0;
+ ret = fun(h, priv2, priv3, 0, UVCG_HEADER);
+ if (ret)
+ return ret;
+ list_for_each_entry(f, &h->formats, entry) {
+ ret = fun(f->fmt, priv2, priv3, i++, UVCG_FORMAT);
+ if (ret)
+ return ret;
+ grp = &f->fmt->group;
+ list_for_each_entry(item, &grp->cg_children, ci_entry) {
+ frm = to_uvcg_frame(item);
+ ret = fun(frm, priv2, priv3, j++, UVCG_FRAME);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Count how many bytes are needed for an array of streaming descriptors.
+ *
+ * @priv1: pointer to a header, format or frame
+ * @priv2: inout parameter, accumulated size of the array
+ * @priv3: inout parameter, accumulated number of the array elements
+ * @n: unused, this function's prototype must match @fun in __uvcg_iter_strm_cls
+ */
+static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n,
+ enum uvcg_strm_type type)
+{
+ size_t *size = priv2;
+ size_t *count = priv3;
+
+ switch (type) {
+ case UVCG_HEADER: {
+ struct uvcg_streaming_header *h = priv1;
+
+ *size += sizeof(h->desc);
+ /* bmaControls */
+ *size += h->num_fmt * UVCG_STREAMING_CONTROL_SIZE;
+ }
+ break;
+ case UVCG_FORMAT: {
+ struct uvcg_format *fmt = priv1;
+
+ if (fmt->type == UVCG_UNCOMPRESSED) {
+ struct uvcg_uncompressed *u =
+ container_of(fmt, struct uvcg_uncompressed,
+ fmt);
+
+ *size += sizeof(u->desc);
+ } else if (fmt->type == UVCG_MJPEG) {
+ struct uvcg_mjpeg *m =
+ container_of(fmt, struct uvcg_mjpeg, fmt);
+
+ *size += sizeof(m->desc);
+ } else {
+ return -EINVAL;
+ }
+ }
+ break;
+ case UVCG_FRAME: {
+ struct uvcg_frame *frm = priv1;
+ int sz = sizeof(frm->dw_frame_interval);
+
+ *size += sizeof(frm->frame);
+ *size += frm->frame.b_frame_interval_type * sz;
+ }
+ break;
+ }
+
+ ++*count;
+
+ return 0;
+}
+
+/*
+ * Fill an array of streaming descriptors.
+ *
+ * @priv1: pointer to a header, format or frame
+ * @priv2: inout parameter, pointer into a block of memory
+ * @priv3: inout parameter, pointer to a 2-dimensional array
+ */
+static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
+ enum uvcg_strm_type type)
+{
+ void **dest = priv2;
+ struct uvc_descriptor_header ***array = priv3;
+ size_t sz;
+
+ **array = *dest;
+ ++*array;
+
+ switch (type) {
+ case UVCG_HEADER: {
+ struct uvc_input_header_descriptor *ihdr = *dest;
+ struct uvcg_streaming_header *h = priv1;
+ struct uvcg_format_ptr *f;
+
+ memcpy(*dest, &h->desc, sizeof(h->desc));
+ *dest += sizeof(h->desc);
+ sz = UVCG_STREAMING_CONTROL_SIZE;
+ list_for_each_entry(f, &h->formats, entry) {
+ memcpy(*dest, f->fmt->bmaControls, sz);
+ *dest += sz;
+ }
+ ihdr->bLength = sizeof(h->desc) + h->num_fmt * sz;
+ ihdr->bNumFormats = h->num_fmt;
+ }
+ break;
+ case UVCG_FORMAT: {
+ struct uvcg_format *fmt = priv1;
+
+ if (fmt->type == UVCG_UNCOMPRESSED) {
+ struct uvc_format_uncompressed *unc = *dest;
+ struct uvcg_uncompressed *u =
+ container_of(fmt, struct uvcg_uncompressed,
+ fmt);
+
+ memcpy(*dest, &u->desc, sizeof(u->desc));
+ *dest += sizeof(u->desc);
+ unc->bNumFrameDescriptors = fmt->num_frames;
+ unc->bFormatIndex = n + 1;
+ } else if (fmt->type == UVCG_MJPEG) {
+ struct uvc_format_mjpeg *mjp = *dest;
+ struct uvcg_mjpeg *m =
+ container_of(fmt, struct uvcg_mjpeg, fmt);
+
+ memcpy(*dest, &m->desc, sizeof(m->desc));
+ *dest += sizeof(m->desc);
+ mjp->bNumFrameDescriptors = fmt->num_frames;
+ mjp->bFormatIndex = n + 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+ break;
+ case UVCG_FRAME: {
+ struct uvcg_frame *frm = priv1;
+ struct uvc_descriptor_header *h = *dest;
+
+ sz = sizeof(frm->frame);
+ memcpy(*dest, &frm->frame, sz);
+ *dest += sz;
+ sz = frm->frame.b_frame_interval_type *
+ sizeof(*frm->dw_frame_interval);
+ memcpy(*dest, frm->dw_frame_interval, sz);
+ *dest += sz;
+ if (frm->fmt_type == UVCG_UNCOMPRESSED)
+ h->bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(
+ frm->frame.b_frame_interval_type);
+ else if (frm->fmt_type == UVCG_MJPEG)
+ h->bLength = UVC_DT_FRAME_MJPEG_SIZE(
+ frm->frame.b_frame_interval_type);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int uvcg_streaming_class_allow_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct config_item *streaming, *header;
+ struct f_uvc_opts *opts;
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvc_descriptor_header ***class_array, **cl_arr;
+ struct uvcg_streaming_header *target_hdr;
+ void *data, *data_save;
+ size_t size = 0, count = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ streaming = src->ci_parent->ci_parent;
+ header = config_group_find_item(to_config_group(streaming), "header");
+ if (!header || target->ci_parent != header)
+ goto out;
+
+ opts = to_f_uvc_opts(streaming->ci_parent);
+
+ mutex_lock(&opts->lock);
+
+ class_array = __uvcg_get_stream_class_arr(src, opts);
+ if (!class_array || *class_array || opts->refcnt) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ target_hdr = to_uvcg_streaming_header(target);
+ ret = __uvcg_iter_strm_cls(target_hdr, &size, &count, __uvcg_cnt_strm);
+ if (ret)
+ goto unlock;
+
+ count += 2; /* color_matching, NULL */
+ *class_array = kcalloc(count, sizeof(void *), GFP_KERNEL);
+ if (!*class_array) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ data = data_save = kzalloc(size, GFP_KERNEL);
+ if (!data) {
+ kfree(*class_array);
+ *class_array = NULL;
+ ret = PTR_ERR(data);
+ goto unlock;
+ }
+ cl_arr = *class_array;
+ ret = __uvcg_iter_strm_cls(target_hdr, &data, &cl_arr,
+ __uvcg_fill_strm);
+ if (ret) {
+ kfree(*class_array);
+ *class_array = NULL;
+ /*
+ * __uvcg_fill_strm() called from __uvcg_iter_stream_cls()
+ * might have advanced the "data", so use a backup copy
+ */
+ kfree(data_save);
+ goto unlock;
+ }
+ *cl_arr = (struct uvc_descriptor_header *)&opts->uvc_color_matching;
+
+ ++target_hdr->linked;
+ ret = 0;
+
+unlock:
+ mutex_unlock(&opts->lock);
+out:
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+static int uvcg_streaming_class_drop_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct config_item *streaming, *header;
+ struct f_uvc_opts *opts;
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvc_descriptor_header ***class_array;
+ struct uvcg_streaming_header *target_hdr;
+ int ret = -EINVAL;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ streaming = src->ci_parent->ci_parent;
+ header = config_group_find_item(to_config_group(streaming), "header");
+ if (!header || target->ci_parent != header)
+ goto out;
+
+ opts = to_f_uvc_opts(streaming->ci_parent);
+
+ mutex_lock(&opts->lock);
+
+ class_array = __uvcg_get_stream_class_arr(src, opts);
+ if (!class_array || !*class_array)
+ goto unlock;
+
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ target_hdr = to_uvcg_streaming_header(target);
+ --target_hdr->linked;
+ kfree(**class_array);
+ kfree(*class_array);
+ *class_array = NULL;
+ ret = 0;
+
+unlock:
+ mutex_unlock(&opts->lock);
+out:
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+static struct configfs_item_operations uvcg_streaming_class_item_ops = {
+ .allow_link = uvcg_streaming_class_allow_link,
+ .drop_link = uvcg_streaming_class_drop_link,
+};
+
+static struct config_item_type uvcg_streaming_class_type = {
+ .ct_item_ops = &uvcg_streaming_class_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *uvcg_streaming_class_default_groups[] = {
+ &uvcg_streaming_class_fs.group,
+ &uvcg_streaming_class_hs.group,
+ &uvcg_streaming_class_ss.group,
+ NULL,
+};
+
+/* streaming/class */
+static struct uvcg_streaming_class_grp {
+ struct config_group group;
+} uvcg_streaming_class_grp;
+
+static struct config_item_type uvcg_streaming_class_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *uvcg_streaming_default_groups[] = {
+ &uvcg_streaming_header_grp.group,
+ &uvcg_uncompressed_grp.group,
+ &uvcg_mjpeg_grp.group,
+ &uvcg_color_matching_grp.group,
+ &uvcg_streaming_class_grp.group,
+ NULL,
+};
+
+/* streaming */
+static struct uvcg_streaming_grp {
+ struct config_group group;
+} uvcg_streaming_grp;
+
+static struct config_item_type uvcg_streaming_grp_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *uvcg_default_groups[] = {
+ &uvcg_control_grp.group,
+ &uvcg_streaming_grp.group,
+ NULL,
+};
+
+static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_uvc_opts,
+ func_inst.group);
+}
+
+CONFIGFS_ATTR_STRUCT(f_uvc_opts);
+CONFIGFS_ATTR_OPS(f_uvc_opts);
+
+static void uvc_attr_release(struct config_item *item)
+{
+ struct f_uvc_opts *opts = to_f_uvc_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations uvc_item_ops = {
+ .release = uvc_attr_release,
+ .show_attribute = f_uvc_opts_attr_show,
+ .store_attribute = f_uvc_opts_attr_store,
+};
+
+#define UVCG_OPTS_ATTR(cname, conv, str2u, uxx, vnoc, limit) \
+static ssize_t f_uvc_opts_##cname##_show( \
+ struct f_uvc_opts *opts, char *page) \
+{ \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", conv(opts->cname)); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t \
+f_uvc_opts_##cname##_store(struct f_uvc_opts *opts, \
+ const char *page, size_t len) \
+{ \
+ int ret; \
+ uxx num; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = str2u(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ if (num > limit) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ opts->cname = vnoc(num); \
+ ret = len; \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+static struct f_uvc_opts_attribute \
+ f_uvc_opts_attribute_##cname = \
+ __CONFIGFS_ATTR(cname, S_IRUGO | S_IWUSR, \
+ f_uvc_opts_##cname##_show, \
+ f_uvc_opts_##cname##_store)
+
+#define identity_conv(x) (x)
+
+UVCG_OPTS_ATTR(streaming_interval, identity_conv, kstrtou8, u8, identity_conv,
+ 16);
+UVCG_OPTS_ATTR(streaming_maxpacket, le16_to_cpu, kstrtou16, u16, le16_to_cpu,
+ 3072);
+UVCG_OPTS_ATTR(streaming_maxburst, identity_conv, kstrtou8, u8, identity_conv,
+ 15);
+
+#undef identity_conv
+
+#undef UVCG_OPTS_ATTR
+
+static struct configfs_attribute *uvc_attrs[] = {
+ &f_uvc_opts_attribute_streaming_interval.attr,
+ &f_uvc_opts_attribute_streaming_maxpacket.attr,
+ &f_uvc_opts_attribute_streaming_maxburst.attr,
+ NULL,
+};
+
+static struct config_item_type uvc_func_type = {
+ .ct_item_ops = &uvc_item_ops,
+ .ct_attrs = uvc_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static inline void uvcg_init_group(struct config_group *g,
+ struct config_group **default_groups,
+ const char *name,
+ struct config_item_type *type)
+{
+ g->default_groups = default_groups;
+ config_group_init_type_name(g, name, type);
+}
+
+int uvcg_attach_configfs(struct f_uvc_opts *opts)
+{
+ config_group_init_type_name(&uvcg_control_header_grp.group,
+ "header",
+ &uvcg_control_header_grp_type);
+ config_group_init_type_name(&uvcg_default_processing.group,
+ "default",
+ &uvcg_default_processing_type);
+ uvcg_init_group(&uvcg_processing_grp.group,
+ uvcg_processing_default_groups,
+ "processing",
+ &uvcg_processing_grp_type);
+ config_group_init_type_name(&uvcg_default_camera.group,
+ "default",
+ &uvcg_default_camera_type);
+ uvcg_init_group(&uvcg_camera_grp.group,
+ uvcg_camera_default_groups,
+ "camera",
+ &uvcg_camera_grp_type);
+ config_group_init_type_name(&uvcg_default_output.group,
+ "default",
+ &uvcg_default_output_type);
+ uvcg_init_group(&uvcg_output_grp.group,
+ uvcg_output_default_groups,
+ "output",
+ &uvcg_output_grp_type);
+ uvcg_init_group(&uvcg_terminal_grp.group,
+ uvcg_terminal_default_groups,
+ "terminal",
+ &uvcg_terminal_grp_type);
+ config_group_init_type_name(&uvcg_control_class_fs.group,
+ "fs",
+ &uvcg_control_class_type);
+ config_group_init_type_name(&uvcg_control_class_ss.group,
+ "ss",
+ &uvcg_control_class_type);
+ uvcg_init_group(&uvcg_control_class_grp.group,
+ uvcg_control_class_default_groups,
+ "class",
+ &uvcg_control_class_grp_type);
+ uvcg_init_group(&uvcg_control_grp.group,
+ uvcg_control_default_groups,
+ "control",
+ &uvcg_control_grp_type);
+ config_group_init_type_name(&uvcg_streaming_header_grp.group,
+ "header",
+ &uvcg_streaming_header_grp_type);
+ config_group_init_type_name(&uvcg_uncompressed_grp.group,
+ "uncompressed",
+ &uvcg_uncompressed_grp_type);
+ config_group_init_type_name(&uvcg_mjpeg_grp.group,
+ "mjpeg",
+ &uvcg_mjpeg_grp_type);
+ config_group_init_type_name(&uvcg_default_color_matching.group,
+ "default",
+ &uvcg_default_color_matching_type);
+ uvcg_init_group(&uvcg_color_matching_grp.group,
+ uvcg_color_matching_default_groups,
+ "color_matching",
+ &uvcg_color_matching_grp_type);
+ config_group_init_type_name(&uvcg_streaming_class_fs.group,
+ "fs",
+ &uvcg_streaming_class_type);
+ config_group_init_type_name(&uvcg_streaming_class_hs.group,
+ "hs",
+ &uvcg_streaming_class_type);
+ config_group_init_type_name(&uvcg_streaming_class_ss.group,
+ "ss",
+ &uvcg_streaming_class_type);
+ uvcg_init_group(&uvcg_streaming_class_grp.group,
+ uvcg_streaming_class_default_groups,
+ "class",
+ &uvcg_streaming_class_grp_type);
+ uvcg_init_group(&uvcg_streaming_grp.group,
+ uvcg_streaming_default_groups,
+ "streaming",
+ &uvcg_streaming_grp_type);
+ uvcg_init_group(&opts->func_inst.group,
+ uvcg_default_groups,
+ "",
+ &uvc_func_type);
+ return 0;
+}
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
new file mode 100644
index 000000000000..085e67be7c71
--- /dev/null
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -0,0 +1,22 @@
+/*
+ * uvc_configfs.h
+ *
+ * Configfs support for the uvc function.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef UVC_CONFIGFS_H
+#define UVC_CONFIGFS_H
+
+struct f_uvc_opts;
+
+int uvcg_attach_configfs(struct f_uvc_opts *opts);
+
+#endif /* UVC_CONFIGFS_H */
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index c744e4975d74..db49ec4c748e 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -441,6 +441,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
kbuf = memdup_user(buf, len);
if (IS_ERR(kbuf)) {
value = PTR_ERR(kbuf);
+ kbuf = NULL;
goto free1;
}
@@ -449,6 +450,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
data->name, len, (int) value);
free1:
mutex_unlock(&data->lock);
+ kfree (kbuf);
return value;
}
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index c862656d18b8..c0ec5f71f16f 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -176,7 +176,7 @@ static int proc_udc_show(struct seq_file *s, void *unused)
udc->enabled
? (udc->vbus ? "active" : "enabled")
: "disabled",
- udc->selfpowered ? "self" : "VBUS",
+ udc->gadget.is_selfpowered ? "self" : "VBUS",
udc->suspended ? ", suspended" : "",
udc->driver ? udc->driver->driver.name : "(none)");
@@ -1000,7 +1000,7 @@ static int at91_set_selfpowered(struct usb_gadget *gadget, int is_on)
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
- udc->selfpowered = (is_on != 0);
+ gadget->is_selfpowered = (is_on != 0);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
@@ -1149,7 +1149,7 @@ static void handle_setup(struct at91_udc *udc, struct at91_ep *ep, u32 csr)
*/
case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
| USB_REQ_GET_STATUS:
- tmp = (udc->selfpowered << USB_DEVICE_SELF_POWERED);
+ tmp = (udc->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED);
if (at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_ESR)
tmp |= (1 << USB_DEVICE_REMOTE_WAKEUP);
PACKET("get device status\n");
@@ -1653,7 +1653,7 @@ static int at91_start(struct usb_gadget *gadget,
udc->driver = driver;
udc->gadget.dev.of_node = udc->pdev->dev.of_node;
udc->enabled = 1;
- udc->selfpowered = 1;
+ udc->gadget.is_selfpowered = 1;
return 0;
}
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
index 017524663381..467dcfb74a51 100644
--- a/drivers/usb/gadget/udc/at91_udc.h
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -122,7 +122,6 @@ struct at91_udc {
unsigned req_pending:1;
unsigned wait_for_addr_ack:1;
unsigned wait_for_config_ack:1;
- unsigned selfpowered:1;
unsigned active_suspend:1;
u8 addr;
struct at91_udc_data board;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index ce882371786b..c0410862c2a1 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
+#include <linux/clk/at91_pmc.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -315,6 +316,17 @@ static inline void usba_cleanup_debugfs(struct usba_udc *udc)
}
#endif
+static inline u32 usba_int_enb_get(struct usba_udc *udc)
+{
+ return udc->int_enb_cache;
+}
+
+static inline void usba_int_enb_set(struct usba_udc *udc, u32 val)
+{
+ usba_writel(udc, INT_ENB, val);
+ udc->int_enb_cache = val;
+}
+
static int vbus_is_present(struct usba_udc *udc)
{
if (gpio_is_valid(udc->vbus_pin))
@@ -324,27 +336,22 @@ static int vbus_is_present(struct usba_udc *udc)
return 1;
}
-#if defined(CONFIG_ARCH_AT91SAM9RL)
-
-#include <linux/clk/at91_pmc.h>
-
-static void toggle_bias(int is_on)
+static void toggle_bias(struct usba_udc *udc, int is_on)
{
- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
-
- if (is_on)
- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
- else
- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
+ if (udc->errata && udc->errata->toggle_bias)
+ udc->errata->toggle_bias(udc, is_on);
}
-#else
-
-static void toggle_bias(int is_on)
+static void generate_bias_pulse(struct usba_udc *udc)
{
-}
+ if (!udc->bias_pulse_needed)
+ return;
-#endif /* CONFIG_ARCH_AT91SAM9RL */
+ if (udc->errata && udc->errata->pulse_bias)
+ udc->errata->pulse_bias(udc);
+
+ udc->bias_pulse_needed = false;
+}
static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
{
@@ -601,16 +608,14 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
if (ep->can_dma) {
u32 ctrl;
- usba_writel(udc, INT_ENB,
- (usba_readl(udc, INT_ENB)
- | USBA_BF(EPT_INT, 1 << ep->index)
- | USBA_BF(DMA_INT, 1 << ep->index)));
+ usba_int_enb_set(udc, usba_int_enb_get(udc) |
+ USBA_BF(EPT_INT, 1 << ep->index) |
+ USBA_BF(DMA_INT, 1 << ep->index));
ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
usba_ep_writel(ep, CTL_ENB, ctrl);
} else {
- usba_writel(udc, INT_ENB,
- (usba_readl(udc, INT_ENB)
- | USBA_BF(EPT_INT, 1 << ep->index)));
+ usba_int_enb_set(udc, usba_int_enb_get(udc) |
+ USBA_BF(EPT_INT, 1 << ep->index));
}
spin_unlock_irqrestore(&udc->lock, flags);
@@ -618,7 +623,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
(unsigned long)usba_ep_readl(ep, CFG));
DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
- (unsigned long)usba_readl(udc, INT_ENB));
+ (unsigned long)usba_int_enb_get(udc));
return 0;
}
@@ -654,9 +659,8 @@ static int usba_ep_disable(struct usb_ep *_ep)
usba_dma_readl(ep, STATUS);
}
usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
- usba_writel(udc, INT_ENB,
- usba_readl(udc, INT_ENB)
- & ~USBA_BF(EPT_INT, 1 << ep->index));
+ usba_int_enb_set(udc, usba_int_enb_get(udc) &
+ ~USBA_BF(EPT_INT, 1 << ep->index));
request_complete_list(ep, &req_list, -ESHUTDOWN);
@@ -716,10 +720,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
req->using_dma = 1;
req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
| USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
- | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
+ | USBA_DMA_END_BUF_EN;
- if (ep->is_in)
- req->ctrl |= USBA_DMA_END_BUF_EN;
+ if (!ep->is_in)
+ req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
/*
* Add this request to the queue and submit for DMA if
@@ -828,7 +832,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
- struct usba_request *req = to_usba_req(_req);
+ struct usba_request *req;
unsigned long flags;
u32 status;
@@ -837,6 +841,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
spin_lock_irqsave(&udc->lock, flags);
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req)
+ break;
+ }
+
+ if (&req->req != _req) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+
if (req->using_dma) {
/*
* If this request is currently being transferred,
@@ -975,6 +989,7 @@ usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
struct usba_udc *udc = to_usba_udc(gadget);
unsigned long flags;
+ gadget->is_selfpowered = (is_selfpowered != 0);
spin_lock_irqsave(&udc->lock, flags);
if (is_selfpowered)
udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
@@ -1563,7 +1578,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
receive_data(ep);
- usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
}
}
@@ -1610,18 +1624,21 @@ static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
static irqreturn_t usba_udc_irq(int irq, void *devid)
{
struct usba_udc *udc = devid;
- u32 status;
+ u32 status, int_enb;
u32 dma_status;
u32 ep_status;
spin_lock(&udc->lock);
- status = usba_readl(udc, INT_STA);
+ int_enb = usba_int_enb_get(udc);
+ status = usba_readl(udc, INT_STA) & int_enb;
DBG(DBG_INT, "irq, status=%#08x\n", status);
if (status & USBA_DET_SUSPEND) {
- toggle_bias(0);
+ toggle_bias(udc, 0);
usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
+ usba_int_enb_set(udc, int_enb | USBA_WAKE_UP);
+ udc->bias_pulse_needed = true;
DBG(DBG_BUS, "Suspend detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->suspend) {
@@ -1632,13 +1649,15 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
}
if (status & USBA_WAKE_UP) {
- toggle_bias(1);
+ toggle_bias(udc, 1);
usba_writel(udc, INT_CLR, USBA_WAKE_UP);
+ usba_int_enb_set(udc, int_enb & ~USBA_WAKE_UP);
DBG(DBG_BUS, "Wake Up CPU detected\n");
}
if (status & USBA_END_OF_RESUME) {
usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
+ generate_bias_pulse(udc);
DBG(DBG_BUS, "Resume detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->resume) {
@@ -1674,6 +1693,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
struct usba_ep *ep0;
usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
+ generate_bias_pulse(udc);
reset_all_endpoints(udc);
if (udc->gadget.speed != USB_SPEED_UNKNOWN && udc->driver) {
@@ -1699,11 +1719,8 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
| USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
usba_ep_writel(ep0, CTL_ENB,
USBA_EPT_ENABLE | USBA_RX_SETUP);
- usba_writel(udc, INT_ENB,
- (usba_readl(udc, INT_ENB)
- | USBA_BF(EPT_INT, 1)
- | USBA_DET_SUSPEND
- | USBA_END_OF_RESUME));
+ usba_int_enb_set(udc, int_enb | USBA_BF(EPT_INT, 1) |
+ USBA_DET_SUSPEND | USBA_END_OF_RESUME);
/*
* Unclear why we hit this irregularly, e.g. in usbtest,
@@ -1736,13 +1753,13 @@ static irqreturn_t usba_vbus_irq(int irq, void *devid)
vbus = vbus_is_present(udc);
if (vbus != udc->vbus_prev) {
if (vbus) {
- toggle_bias(1);
+ toggle_bias(udc, 1);
usba_writel(udc, CTRL, USBA_ENABLE_MASK);
- usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
+ usba_int_enb_set(udc, USBA_END_OF_RESET);
} else {
udc->gadget.speed = USB_SPEED_UNKNOWN;
reset_all_endpoints(udc);
- toggle_bias(0);
+ toggle_bias(udc, 0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
if (udc->driver->disconnect) {
spin_unlock(&udc->lock);
@@ -1788,9 +1805,9 @@ static int atmel_usba_start(struct usb_gadget *gadget,
/* If Vbus is present, enable the controller and wait for reset */
spin_lock_irqsave(&udc->lock, flags);
if (vbus_is_present(udc) && udc->vbus_prev == 0) {
- toggle_bias(1);
+ toggle_bias(udc, 1);
usba_writel(udc, CTRL, USBA_ENABLE_MASK);
- usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
+ usba_int_enb_set(udc, USBA_END_OF_RESET);
}
spin_unlock_irqrestore(&udc->lock, flags);
@@ -1811,7 +1828,7 @@ static int atmel_usba_stop(struct usb_gadget *gadget)
spin_unlock_irqrestore(&udc->lock, flags);
/* This will also disable the DP pullup */
- toggle_bias(0);
+ toggle_bias(udc, 0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
clk_disable_unprepare(udc->hclk);
@@ -1823,6 +1840,41 @@ static int atmel_usba_stop(struct usb_gadget *gadget)
}
#ifdef CONFIG_OF
+static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on)
+{
+ unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
+
+ if (is_on)
+ at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
+ else
+ at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
+}
+
+static void at91sam9g45_pulse_bias(struct usba_udc *udc)
+{
+ unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
+
+ at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
+ at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
+}
+
+static const struct usba_udc_errata at91sam9rl_errata = {
+ .toggle_bias = at91sam9rl_toggle_bias,
+};
+
+static const struct usba_udc_errata at91sam9g45_errata = {
+ .pulse_bias = at91sam9g45_pulse_bias,
+};
+
+static const struct of_device_id atmel_udc_dt_ids[] = {
+ { .compatible = "atmel,at91sam9rl-udc", .data = &at91sam9rl_errata },
+ { .compatible = "atmel,at91sam9g45-udc", .data = &at91sam9g45_errata },
+ { .compatible = "atmel,sama5d3-udc" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
+
static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
@@ -1830,10 +1882,17 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
const char *name;
enum of_gpio_flags flags;
struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
struct device_node *pp;
int i, ret;
struct usba_ep *eps, *ep;
+ match = of_match_node(atmel_udc_dt_ids, np);
+ if (!match)
+ return ERR_PTR(-EINVAL);
+
+ udc->errata = match->data;
+
udc->num_ep = 0;
udc->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0,
@@ -2024,7 +2083,7 @@ static int usba_udc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n");
return ret;
}
- toggle_bias(0);
+
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
clk_disable_unprepare(pclk);
@@ -2033,6 +2092,8 @@ static int usba_udc_probe(struct platform_device *pdev)
else
udc->usba_ep = usba_udc_pdata(pdev, udc);
+ toggle_bias(udc, 0);
+
if (IS_ERR(udc->usba_ep))
return PTR_ERR(udc->usba_ep);
@@ -2092,15 +2153,6 @@ static int __exit usba_udc_remove(struct platform_device *pdev)
return 0;
}
-#if defined(CONFIG_OF)
-static const struct of_device_id atmel_udc_dt_ids[] = {
- { .compatible = "atmel,at91sam9rl-udc" },
- { /* sentinel */ }
-};
-
-MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
-#endif
-
static struct platform_driver udc_driver = {
.remove = __exit_p(usba_udc_remove),
.driver = {
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index a70706e8cb02..497cd18836f3 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -304,6 +304,11 @@ struct usba_request {
unsigned int mapped:1;
};
+struct usba_udc_errata {
+ void (*toggle_bias)(struct usba_udc *udc, int is_on);
+ void (*pulse_bias)(struct usba_udc *udc);
+};
+
struct usba_udc {
/* Protect hw registers from concurrent modifications */
spinlock_t lock;
@@ -314,6 +319,7 @@ struct usba_udc {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct platform_device *pdev;
+ const struct usba_udc_errata *errata;
int irq;
int vbus_pin;
int vbus_pin_inverted;
@@ -321,12 +327,15 @@ struct usba_udc {
struct clk *pclk;
struct clk *hclk;
struct usba_ep *usba_ep;
+ bool bias_pulse_needed;
u16 devstatus;
u16 test_mode;
int vbus_prev;
+ u32 int_enb_cache;
+
#ifdef CONFIG_USB_GADGET_DEBUG_FS
struct dentry *debugfs_root;
struct dentry *debugfs_regs;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index c6dfef8c7bbc..5c8f4effb62a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -521,7 +521,6 @@ static int bdc_remove(struct platform_device *pdev)
static struct platform_driver bdc_driver = {
.driver = {
.name = BRCM_BDC_NAME,
- .owner = THIS_MODULE
},
.probe = bdc_probe,
.remove = bdc_remove,
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index ff67ceac77c4..b04980cf6dc4 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -718,10 +718,11 @@ static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
struct bdc *bdc;
int ret = 0;
- bdc = ep->bdc;
- if (!req || !ep || !ep->usb_ep.desc)
+ if (!req || !ep->usb_ep.desc)
return -EINVAL;
+ bdc = ep->bdc;
+
req->usb_req.actual = 0;
req->usb_req.status = -EINPROGRESS;
req->epnum = ep->ep_num;
@@ -881,8 +882,8 @@ static int ep_set_halt(struct bdc_ep *ep, u32 value)
ret = bdc_ep_set_stall(bdc, ep->ep_num);
if (ret)
- dev_err(bdc->dev, "failed to %s STALL on %s\n",
- value ? "set" : "clear", ep->name);
+ dev_err(bdc->dev, "failed to set STALL on %s\n",
+ ep->name);
else
ep->flags |= BDC_EP_STALL;
} else {
@@ -890,8 +891,8 @@ static int ep_set_halt(struct bdc_ep *ep, u32 value)
dev_dbg(bdc->dev, "Before Clear\n");
ret = bdc_ep_clear_stall(bdc, ep->ep_num);
if (ret)
- dev_err(bdc->dev, "failed to %s STALL on %s\n",
- value ? "set" : "clear", ep->name);
+ dev_err(bdc->dev, "failed to clear STALL on %s\n",
+ ep->name);
else
ep->flags &= ~BDC_EP_STALL;
dev_dbg(bdc->dev, "After Clear\n");
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 3700ce70b0be..7f77db5d1278 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -454,6 +454,7 @@ static int bdc_udc_set_selfpowered(struct usb_gadget *gadget,
unsigned long flags;
dev_dbg(bdc->dev, "%s()\n", __func__);
+ gadget->is_selfpowered = (is_self != 0);
spin_lock_irqsave(&bdc->lock, flags);
if (!is_self)
bdc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 9c598801404c..8dda48445f6f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -802,6 +802,7 @@ static int dummy_set_selfpowered(struct usb_gadget *_gadget, int value)
{
struct dummy *dum;
+ _gadget->is_selfpowered = (value != 0);
dum = gadget_to_dummy_hcd(_gadget)->dum;
if (value)
dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
@@ -1924,7 +1925,9 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc)
memset(desc, 0, sizeof *desc);
desc->bDescriptorType = 0x2a;
desc->bDescLength = 12;
- desc->wHubCharacteristics = cpu_to_le16(0x0001);
+ desc->wHubCharacteristics = cpu_to_le16(
+ HUB_CHAR_INDV_PORT_LPSM |
+ HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
desc->u.ss.DeviceRemovable = 0xffff;
@@ -1935,7 +1938,9 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
memset(desc, 0, sizeof *desc);
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
- desc->wHubCharacteristics = cpu_to_le16(0x0001);
+ desc->wHubCharacteristics = cpu_to_le16(
+ HUB_CHAR_INDV_PORT_LPSM |
+ HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
desc->u.hs.DeviceRemovable[0] = 0xff;
desc->u.hs.DeviceRemovable[1] = 0xff;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 795c99c77697..e0822f1b6639 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2630,7 +2630,7 @@ static int qe_udc_remove(struct platform_device *ofdev)
struct qe_udc *udc = platform_get_drvdata(ofdev);
struct qe_ep *ep;
unsigned int size;
- DECLARE_COMPLETION(done);
+ DECLARE_COMPLETION_ONSTACK(done);
usb_del_gadget_udc(&udc->gadget);
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 2df807403f22..55fcb930f92e 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1337,7 +1337,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
/* Get device status */
- tmp = 1 << USB_DEVICE_SELF_POWERED;
+ tmp = udc->gadget.is_selfpowered;
tmp |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
/* Get interface status */
@@ -1948,6 +1948,7 @@ static int fsl_udc_start(struct usb_gadget *g,
/* hook up the driver */
udc_controller->driver = driver;
spin_unlock_irqrestore(&udc_controller->lock, flags);
+ g->is_selfpowered = 1;
if (!IS_ERR_OR_NULL(udc_controller->transceiver)) {
/* Suspend the controller until OTG enable it */
@@ -2529,7 +2530,7 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
- DECLARE_COMPLETION(done);
+ DECLARE_COMPLETION_ONSTACK(done);
if (!udc_controller)
return -ENODEV;
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 34d9b7b831b3..27fd41333f71 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -191,7 +191,6 @@ struct lpc32xx_udc {
bool enabled;
bool clocked;
bool suspended;
- bool selfpowered;
int ep0state;
atomic_t enabled_ep_cnt;
wait_queue_head_t ep_disable_wait_queue;
@@ -547,7 +546,7 @@ static int proc_udc_show(struct seq_file *s, void *unused)
udc->vbus ? "present" : "off",
udc->enabled ? (udc->vbus ? "active" : "enabled") :
"disabled",
- udc->selfpowered ? "self" : "VBUS",
+ udc->gadget.is_selfpowered ? "self" : "VBUS",
udc->suspended ? ", suspended" : "",
udc->driver ? udc->driver->driver.name : "(none)");
@@ -2212,7 +2211,7 @@ static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
break; /* Not supported */
case USB_RECIP_DEVICE:
- ep0buff = (udc->selfpowered << USB_DEVICE_SELF_POWERED);
+ ep0buff = udc->gadget.is_selfpowered;
if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
break;
@@ -2498,10 +2497,7 @@ static int lpc32xx_wakeup(struct usb_gadget *gadget)
static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
{
- struct lpc32xx_udc *udc = to_udc(gadget);
-
- /* Always self-powered */
- udc->selfpowered = (is_on != 0);
+ gadget->is_selfpowered = (is_on != 0);
return 0;
}
@@ -2946,7 +2942,7 @@ static int lpc32xx_start(struct usb_gadget *gadget,
udc->driver = driver;
udc->gadget.dev.of_node = udc->dev->of_node;
udc->enabled = 1;
- udc->selfpowered = 1;
+ udc->gadget.is_selfpowered = 1;
udc->vbus = 0;
/* Force VBUS process once to check for cable insertion */
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 253f3df8326a..d32160d6463f 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1378,9 +1378,6 @@ static int mv_udc_start(struct usb_gadget *gadget,
}
}
- /* pullup is always on */
- mv_udc_pullup(&udc->gadget, 1);
-
/* When boot with cable attached, there will be no vbus irq occurred */
if (udc->qwork)
queue_work(udc->qwork, &udc->vbus_work);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index d20de1fab08e..195baf3e1fcd 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1132,13 +1132,10 @@ net2272_wakeup(struct usb_gadget *_gadget)
static int
net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
{
- struct net2272 *dev;
-
if (!_gadget)
return -ENODEV;
- dev = container_of(_gadget, struct net2272, gadget);
- dev->is_selfpowered = value;
+ _gadget->is_selfpowered = (value != 0);
return 0;
}
@@ -1844,7 +1841,7 @@ net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
case USB_RECIP_DEVICE:
if (u.r.wLength > 2)
goto do_stall;
- if (dev->is_selfpowered)
+ if (dev->gadget.is_selfpowered)
status = (1 << USB_DEVICE_SELF_POWERED);
/* don't bother with a request object! */
diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h
index e59505789359..127ab03fcde3 100644
--- a/drivers/usb/gadget/udc/net2272.h
+++ b/drivers/usb/gadget/udc/net2272.h
@@ -458,7 +458,6 @@ struct net2272 {
struct usb_gadget_driver *driver;
unsigned protocol_stall:1,
softconnect:1,
- is_selfpowered:1,
wakeup:1,
dma_eot_polarity:1,
dma_dack_polarity:1,
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index d6411e0a8e03..d2c0bf65e345 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -12,11 +12,7 @@
* the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
* as well as Gadget Zero and Gadgetfs.
*
- * DMA is enabled by default. Drivers using transfer queues might use
- * DMA chaining to remove IRQ latencies between transfers. (Except when
- * short OUT transfers happen.) Drivers can use the req->no_interrupt
- * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
- * and DMA chaining is enabled.
+ * DMA is enabled by default.
*
* MSI is enabled by default. The legacy IRQ is used if MSI couldn't
* be enabled.
@@ -84,23 +80,6 @@ static const char *const ep_name[] = {
"ep-e", "ep-f", "ep-g", "ep-h",
};
-/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
- * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
- *
- * The net2280 DMA engines are not tightly integrated with their FIFOs;
- * not all cases are (yet) handled well in this driver or the silicon.
- * Some gadget drivers work better with the dma support here than others.
- * These two parameters let you use PIO or more aggressive DMA.
- */
-static bool use_dma = true;
-static bool use_dma_chaining;
-static bool use_msi = true;
-
-/* "modprobe net2280 use_dma=n" etc */
-module_param(use_dma, bool, 0444);
-module_param(use_dma_chaining, bool, 0444);
-module_param(use_msi, bool, 0444);
-
/* mode 0 == ep-{a,b,c,d} 1K fifo each
* mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
* mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
@@ -120,11 +99,6 @@ static bool enable_suspend;
/* "modprobe net2280 enable_suspend=1" etc */
module_param(enable_suspend, bool, 0444);
-/* force full-speed operation */
-static bool full_speed;
-module_param(full_speed, bool, 0444);
-MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!");
-
#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
static char *type_string(u8 bmAttributes)
@@ -202,15 +176,6 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
/* set speed-dependent max packet; may kick in high bandwidth */
set_max_speed(ep, max);
- /* FIFO lines can't go to different packets. PIO is ok, so
- * use it instead of troublesome (non-bulk) multi-packet DMA.
- */
- if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
- ep_dbg(ep->dev, "%s, no dma for maxpacket %d\n",
- ep->ep.name, ep->ep.maxpacket);
- ep->dma = NULL;
- }
-
/* set type, direction, address; reset fifo counters */
writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
@@ -478,7 +443,7 @@ static int net2280_disable(struct usb_ep *_ep)
/* synch memory views with the device */
(void)readl(&ep->cfg->ep_cfg);
- if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
+ if (!ep->dma && ep->num >= 1 && ep->num <= 4)
ep->dma = &ep->dev->dma[ep->num - 1];
spin_unlock_irqrestore(&ep->dev->lock, flags);
@@ -610,9 +575,15 @@ static void out_flush(struct net2280_ep *ep)
u32 __iomem *statp;
u32 tmp;
- ASSERT_OUT_NAKING(ep);
-
statp = &ep->regs->ep_stat;
+
+ tmp = readl(statp);
+ if (tmp & BIT(NAK_OUT_PACKETS)) {
+ ep_dbg(ep->dev, "%s %s %08x !NAK\n",
+ ep->ep.name, __func__, tmp);
+ writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+ }
+
writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
BIT(DATA_PACKET_RECEIVED_INTERRUPT),
statp);
@@ -747,8 +718,7 @@ static void fill_dma_desc(struct net2280_ep *ep,
req->valid = valid;
if (valid)
dmacount |= BIT(VALID_BIT);
- if (likely(!req->req.no_interrupt || !use_dma_chaining))
- dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
+ dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
/* td->dmadesc = previously set by caller */
td->dmaaddr = cpu_to_le32 (req->req.dma);
@@ -862,27 +832,11 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fill_dma_desc(ep, req, 1);
- if (!use_dma_chaining)
- req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
+ req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
start_queue(ep, tmp, req->td_dma);
}
-static inline void resume_dma(struct net2280_ep *ep)
-{
- writel(readl(&ep->dma->dmactl) | BIT(DMA_ENABLE), &ep->dma->dmactl);
-
- ep->dma_started = true;
-}
-
-static inline void ep_stop_dma(struct net2280_ep *ep)
-{
- writel(readl(&ep->dma->dmactl) & ~BIT(DMA_ENABLE), &ep->dma->dmactl);
- spin_stop_dma(ep->dma);
-
- ep->dma_started = false;
-}
-
static inline void
queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
{
@@ -973,10 +927,8 @@ net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return ret;
}
-#if 0
ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
_ep->name, _req, _req->length, _req->buf);
-#endif
spin_lock_irqsave(&dev->lock, flags);
@@ -984,24 +936,12 @@ net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
_req->actual = 0;
/* kickstart this i/o queue? */
- if (list_empty(&ep->queue) && !ep->stopped) {
- /* DMA request while EP halted */
- if (ep->dma &&
- (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) &&
- (dev->quirks & PLX_SUPERSPEED)) {
- int valid = 1;
- if (ep->is_in) {
- int expect;
- expect = likely(req->req.zero ||
- ((req->req.length %
- ep->ep.maxpacket) != 0));
- if (expect != ep->in_fifo_validate)
- valid = 0;
- }
- queue_dma(ep, req, valid);
- }
+ if (list_empty(&ep->queue) && !ep->stopped &&
+ !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
+ (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
+
/* use DMA if the endpoint supports it, else pio */
- else if (ep->dma)
+ if (ep->dma)
start_dma(ep, req);
else {
/* maybe there's no control data, just status ack */
@@ -1084,8 +1024,6 @@ dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
done(ep, req, status);
}
-static void restart_dma(struct net2280_ep *ep);
-
static void scan_dma_completions(struct net2280_ep *ep)
{
/* only look at descriptors that were "naturally" retired,
@@ -1117,9 +1055,8 @@ static void scan_dma_completions(struct net2280_ep *ep)
dma_done(ep, req, tmp, 0);
break;
} else if (!ep->is_in &&
- (req->req.length % ep->ep.maxpacket) != 0) {
- if (ep->dev->quirks & PLX_SUPERSPEED)
- return dma_done(ep, req, tmp, 0);
+ (req->req.length % ep->ep.maxpacket) &&
+ !(ep->dev->quirks & PLX_SUPERSPEED)) {
tmp = readl(&ep->regs->ep_stat);
/* AVOID TROUBLE HERE by not issuing short reads from
@@ -1150,67 +1087,15 @@ static void scan_dma_completions(struct net2280_ep *ep)
static void restart_dma(struct net2280_ep *ep)
{
struct net2280_request *req;
- u32 dmactl = dmactl_default;
if (ep->stopped)
return;
req = list_entry(ep->queue.next, struct net2280_request, queue);
- if (!use_dma_chaining) {
- start_dma(ep, req);
- return;
- }
-
- /* the 2280 will be processing the queue unless queue hiccups after
- * the previous transfer:
- * IN: wanted automagic zlp, head doesn't (or vice versa)
- * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
- * OUT: was "usb-short", we must restart.
- */
- if (ep->is_in && !req->valid) {
- struct net2280_request *entry, *prev = NULL;
- int reqmode, done = 0;
-
- ep_dbg(ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
- ep->in_fifo_validate = likely(req->req.zero ||
- (req->req.length % ep->ep.maxpacket) != 0);
- if (ep->in_fifo_validate)
- dmactl |= BIT(DMA_FIFO_VALIDATE);
- list_for_each_entry(entry, &ep->queue, queue) {
- __le32 dmacount;
-
- if (entry == req)
- continue;
- dmacount = entry->td->dmacount;
- if (!done) {
- reqmode = likely(entry->req.zero ||
- (entry->req.length % ep->ep.maxpacket));
- if (reqmode == ep->in_fifo_validate) {
- entry->valid = 1;
- dmacount |= valid_bit;
- entry->td->dmacount = dmacount;
- prev = entry;
- continue;
- } else {
- /* force a hiccup */
- prev->td->dmacount |= dma_done_ie;
- done = 1;
- }
- }
-
- /* walk the rest of the queue so unlinks behave */
- entry->valid = 0;
- dmacount &= ~valid_bit;
- entry->td->dmacount = dmacount;
- prev = entry;
- }
- }
-
- writel(0, &ep->dma->dmactl);
- start_queue(ep, dmactl, req->td_dma);
+ start_dma(ep, req);
}
-static void abort_dma_228x(struct net2280_ep *ep)
+static void abort_dma(struct net2280_ep *ep)
{
/* abort the current transfer */
if (likely(!list_empty(&ep->queue))) {
@@ -1222,19 +1107,6 @@ static void abort_dma_228x(struct net2280_ep *ep)
scan_dma_completions(ep);
}
-static void abort_dma_338x(struct net2280_ep *ep)
-{
- writel(BIT(DMA_ABORT), &ep->dma->dmastat);
- spin_stop_dma(ep->dma);
-}
-
-static void abort_dma(struct net2280_ep *ep)
-{
- if (ep->dev->quirks & PLX_LEGACY)
- return abort_dma_228x(ep);
- return abort_dma_338x(ep);
-}
-
/* dequeue ALL requests */
static void nuke(struct net2280_ep *ep)
{
@@ -1306,25 +1178,6 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
done(ep, req, -ECONNRESET);
}
req = NULL;
-
- /* patch up hardware chaining data */
- } else if (ep->dma && use_dma_chaining) {
- if (req->queue.prev == ep->queue.next) {
- writel(le32_to_cpu(req->td->dmadesc),
- &ep->dma->dmadesc);
- if (req->td->dmacount & dma_done_ie)
- writel(readl(&ep->dma->dmacount) |
- le32_to_cpu(dma_done_ie),
- &ep->dma->dmacount);
- } else {
- struct net2280_request *prev;
-
- prev = list_entry(req->queue.prev,
- struct net2280_request, queue);
- prev->td->dmadesc = req->td->dmadesc;
- if (req->td->dmacount & dma_done_ie)
- prev->td->dmacount |= dma_done_ie;
- }
}
if (req)
@@ -1512,10 +1365,10 @@ static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
tmp = readl(&dev->usb->usbctl);
if (value) {
tmp |= BIT(SELF_POWERED_STATUS);
- dev->selfpowered = 1;
+ _gadget->is_selfpowered = 1;
} else {
tmp &= ~BIT(SELF_POWERED_STATUS);
- dev->selfpowered = 0;
+ _gadget->is_selfpowered = 0;
}
writel(tmp, &dev->usb->usbctl);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -1604,14 +1457,11 @@ static ssize_t registers_show(struct device *_dev,
/* Main Control Registers */
t = scnprintf(next, size, "%s version " DRIVER_VERSION
- ", chiprev %04x, dma %s\n\n"
+ ", chiprev %04x\n\n"
"devinit %03x fifoctl %08x gadget '%s'\n"
"pci irqenb0 %02x irqenb1 %08x "
"irqstat0 %04x irqstat1 %08x\n",
driver_name, dev->chiprev,
- use_dma
- ? (use_dma_chaining ? "chaining" : "enabled")
- : "disabled",
readl(&dev->regs->devinit),
readl(&dev->regs->fifoctl),
s,
@@ -1913,76 +1763,73 @@ static void defect7374_disable_data_eps(struct net2280 *dev)
static void defect7374_enable_data_eps_zero(struct net2280 *dev)
{
u32 tmp = 0, tmp_reg;
- u32 fsmvalue, scratch;
+ u32 scratch;
int i;
unsigned char ep_sel;
scratch = get_idx_reg(dev->regs, SCRATCH);
- fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
+
+ WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
+ == DEFECT7374_FSM_SS_CONTROL_READ);
+
scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
- /*See if firmware needs to set up for workaround*/
- if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) {
- ep_warn(dev, "Operate Defect 7374 workaround soft this time");
- ep_warn(dev, "It will operate on cold-reboot and SS connect");
-
- /*GPEPs:*/
- tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
- (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
- ((dev->enhanced_mode) ?
- BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
- BIT(IN_ENDPOINT_ENABLE));
-
- for (i = 1; i < 5; i++)
- writel(tmp, &dev->ep[i].cfg->ep_cfg);
-
- /* CSRIN, PCIIN, STATIN, RCIN*/
- tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
- writel(tmp, &dev->dep[1].dep_cfg);
- writel(tmp, &dev->dep[3].dep_cfg);
- writel(tmp, &dev->dep[4].dep_cfg);
- writel(tmp, &dev->dep[5].dep_cfg);
-
- /*Implemented for development and debug.
- * Can be refined/tuned later.*/
- for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
- /* Select an endpoint for subsequent operations: */
- tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
- writel(((tmp_reg & ~0x1f) | ep_sel),
- &dev->plregs->pl_ep_ctrl);
-
- if (ep_sel == 1) {
- tmp =
- (readl(&dev->plregs->pl_ep_ctrl) |
- BIT(CLEAR_ACK_ERROR_CODE) | 0);
- writel(tmp, &dev->plregs->pl_ep_ctrl);
- continue;
- }
+ ep_warn(dev, "Operate Defect 7374 workaround soft this time");
+ ep_warn(dev, "It will operate on cold-reboot and SS connect");
- if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
- ep_sel == 18 || ep_sel == 20)
- continue;
+ /*GPEPs:*/
+ tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
+ (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
+ ((dev->enhanced_mode) ?
+ BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
+ BIT(IN_ENDPOINT_ENABLE));
- tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
- BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
- writel(tmp, &dev->plregs->pl_ep_cfg_4);
+ for (i = 1; i < 5; i++)
+ writel(tmp, &dev->ep[i].cfg->ep_cfg);
- tmp = readl(&dev->plregs->pl_ep_ctrl) &
- ~BIT(EP_INITIALIZED);
- writel(tmp, &dev->plregs->pl_ep_ctrl);
+ /* CSRIN, PCIIN, STATIN, RCIN*/
+ tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
+ writel(tmp, &dev->dep[1].dep_cfg);
+ writel(tmp, &dev->dep[3].dep_cfg);
+ writel(tmp, &dev->dep[4].dep_cfg);
+ writel(tmp, &dev->dep[5].dep_cfg);
+
+ /*Implemented for development and debug.
+ * Can be refined/tuned later.*/
+ for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
+ /* Select an endpoint for subsequent operations: */
+ tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
+ writel(((tmp_reg & ~0x1f) | ep_sel),
+ &dev->plregs->pl_ep_ctrl);
+ if (ep_sel == 1) {
+ tmp =
+ (readl(&dev->plregs->pl_ep_ctrl) |
+ BIT(CLEAR_ACK_ERROR_CODE) | 0);
+ writel(tmp, &dev->plregs->pl_ep_ctrl);
+ continue;
}
- /* Set FSM to focus on the first Control Read:
- * - Tip: Connection speed is known upon the first
- * setup request.*/
- scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
- set_idx_reg(dev->regs, SCRATCH, scratch);
+ if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
+ ep_sel == 18 || ep_sel == 20)
+ continue;
+
+ tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
+ BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
+ writel(tmp, &dev->plregs->pl_ep_cfg_4);
+
+ tmp = readl(&dev->plregs->pl_ep_ctrl) &
+ ~BIT(EP_INITIALIZED);
+ writel(tmp, &dev->plregs->pl_ep_ctrl);
- } else{
- ep_warn(dev, "Defect 7374 workaround soft will NOT operate");
- ep_warn(dev, "It will operate on cold-reboot and SS connect");
}
+
+ /* Set FSM to focus on the first Control Read:
+ * - Tip: Connection speed is known upon the first
+ * setup request.*/
+ scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
+ set_idx_reg(dev->regs, SCRATCH, scratch);
+
}
/* keeping it simple:
@@ -2033,21 +1880,13 @@ static void usb_reset_228x(struct net2280 *dev)
static void usb_reset_338x(struct net2280 *dev)
{
u32 tmp;
- u32 fsmvalue;
dev->gadget.speed = USB_SPEED_UNKNOWN;
(void)readl(&dev->usb->usbctl);
net2280_led_init(dev);
- fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
- (0xf << DEFECT7374_FSM_FIELD);
-
- /* See if firmware needs to set up for workaround: */
- if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) {
- ep_info(dev, "%s: Defect 7374 FsmValue 0x%08x\n", __func__,
- fsmvalue);
- } else {
+ if (dev->bug7734_patched) {
/* disable automatic responses, and irqs */
writel(0, &dev->usb->stdrsp);
writel(0, &dev->regs->pciirqenb0);
@@ -2064,7 +1903,7 @@ static void usb_reset_338x(struct net2280 *dev)
writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
- if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
+ if (dev->bug7734_patched) {
/* reset, and enable pci */
tmp = readl(&dev->regs->devinit) |
BIT(PCI_ENABLE) |
@@ -2093,10 +1932,6 @@ static void usb_reset(struct net2280 *dev)
static void usb_reinit_228x(struct net2280 *dev)
{
u32 tmp;
- int init_dma;
-
- /* use_dma changes are ignored till next device re-init */
- init_dma = use_dma;
/* basic endpoint init */
for (tmp = 0; tmp < 7; tmp++) {
@@ -2108,8 +1943,7 @@ static void usb_reinit_228x(struct net2280 *dev)
if (tmp > 0 && tmp <= 4) {
ep->fifo_size = 1024;
- if (init_dma)
- ep->dma = &dev->dma[tmp - 1];
+ ep->dma = &dev->dma[tmp - 1];
} else
ep->fifo_size = 64;
ep->regs = &dev->epregs[tmp];
@@ -2133,17 +1967,12 @@ static void usb_reinit_228x(struct net2280 *dev)
static void usb_reinit_338x(struct net2280 *dev)
{
- int init_dma;
int i;
u32 tmp, val;
- u32 fsmvalue;
static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
0x00, 0xC0, 0x00, 0xC0 };
- /* use_dma changes are ignored till next device re-init */
- init_dma = use_dma;
-
/* basic endpoint init */
for (i = 0; i < dev->n_ep; i++) {
struct net2280_ep *ep = &dev->ep[i];
@@ -2152,7 +1981,7 @@ static void usb_reinit_338x(struct net2280 *dev)
ep->dev = dev;
ep->num = i;
- if (i > 0 && i <= 4 && init_dma)
+ if (i > 0 && i <= 4)
ep->dma = &dev->dma[i - 1];
if (dev->enhanced_mode) {
@@ -2177,14 +2006,7 @@ static void usb_reinit_338x(struct net2280 *dev)
dev->ep[0].stopped = 0;
/* Link layer set up */
- fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
- (0xf << DEFECT7374_FSM_FIELD);
-
- /* See if driver needs to set up for workaround: */
- if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ)
- ep_info(dev, "%s: Defect 7374 FsmValue %08x\n",
- __func__, fsmvalue);
- else {
+ if (dev->bug7734_patched) {
tmp = readl(&dev->usb_ext->usbctl2) &
~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
writel(tmp, &dev->usb_ext->usbctl2);
@@ -2291,15 +2113,8 @@ static void ep0_start_228x(struct net2280 *dev)
static void ep0_start_338x(struct net2280 *dev)
{
- u32 fsmvalue;
-
- fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
- (0xf << DEFECT7374_FSM_FIELD);
- if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ)
- ep_info(dev, "%s: Defect 7374 FsmValue %08x\n", __func__,
- fsmvalue);
- else
+ if (dev->bug7734_patched)
writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
BIT(SET_EP_HIDE_STATUS_PHASE),
&dev->epregs[0].ep_rsp);
@@ -2382,16 +2197,12 @@ static int net2280_start(struct usb_gadget *_gadget,
if (retval)
goto err_func;
- /* Enable force-full-speed testing mode, if desired */
- if (full_speed && (dev->quirks & PLX_LEGACY))
- writel(BIT(FORCE_FULL_SPEED_MODE), &dev->usb->xcvrdiag);
-
- /* ... then enable host detection and ep0; and we're ready
+ /* enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
net2280_led_active(dev, 1);
- if (dev->quirks & PLX_SUPERSPEED)
+ if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
defect7374_enable_data_eps_zero(dev);
ep0_start(dev);
@@ -2444,10 +2255,6 @@ static int net2280_stop(struct usb_gadget *_gadget)
net2280_led_active(dev, 0);
- /* Disable full-speed test mode */
- if (dev->quirks & PLX_LEGACY)
- writel(0, &dev->usb->xcvrdiag);
-
device_remove_file(&dev->pdev->dev, &dev_attr_function);
device_remove_file(&dev->pdev->dev, &dev_attr_queues);
@@ -2478,10 +2285,10 @@ static void handle_ep_small(struct net2280_ep *ep)
/* ack all, and handle what we care about */
t = readl(&ep->regs->ep_stat);
ep->irqs++;
-#if 0
+
ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
- ep->ep.name, t, req ? &req->req : 0);
-#endif
+ ep->ep.name, t, req ? &req->req : NULL);
+
if (!ep->is_in || (ep->dev->quirks & PLX_2280))
writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
else
@@ -2717,6 +2524,7 @@ static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
* run after the next USB connection.
*/
scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
+ dev->bug7734_patched = 1;
goto restore_data_eps;
}
@@ -2730,6 +2538,7 @@ static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
(state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
+ dev->bug7734_patched = 1;
break;
}
@@ -2766,80 +2575,19 @@ restore_data_eps:
return;
}
-static void ep_stall(struct net2280_ep *ep, int stall)
+static void ep_clear_seqnum(struct net2280_ep *ep)
{
struct net2280 *dev = ep->dev;
u32 val;
static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
- if (stall) {
- writel(BIT(SET_ENDPOINT_HALT) |
- /* BIT(SET_NAK_PACKETS) | */
- BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
- &ep->regs->ep_rsp);
- ep->is_halt = 1;
- } else {
- if (dev->gadget.speed == USB_SPEED_SUPER) {
- /*
- * Workaround for SS SeqNum not cleared via
- * Endpoint Halt (Clear) bit. select endpoint
- */
- val = readl(&dev->plregs->pl_ep_ctrl);
- val = (val & ~0x1f) | ep_pl[ep->num];
- writel(val, &dev->plregs->pl_ep_ctrl);
-
- val |= BIT(SEQUENCE_NUMBER_RESET);
- writel(val, &dev->plregs->pl_ep_ctrl);
- }
- val = readl(&ep->regs->ep_rsp);
- val |= BIT(CLEAR_ENDPOINT_HALT) |
- BIT(CLEAR_ENDPOINT_TOGGLE);
- writel(val,
- /* | BIT(CLEAR_NAK_PACKETS),*/
- &ep->regs->ep_rsp);
- ep->is_halt = 0;
- val = readl(&ep->regs->ep_rsp);
- }
-}
-
-static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged)
-{
- /* set/clear, then synch memory views with the device */
- if (value) {
- ep->stopped = 1;
- if (ep->num == 0)
- ep->dev->protocol_stall = 1;
- else {
- if (ep->dma)
- ep_stop_dma(ep);
- ep_stall(ep, true);
- }
-
- if (wedged)
- ep->wedged = 1;
- } else {
- ep->stopped = 0;
- ep->wedged = 0;
-
- ep_stall(ep, false);
+ val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
+ val |= ep_pl[ep->num];
+ writel(val, &dev->plregs->pl_ep_ctrl);
+ val |= BIT(SEQUENCE_NUMBER_RESET);
+ writel(val, &dev->plregs->pl_ep_ctrl);
- /* Flush the queue */
- if (!list_empty(&ep->queue)) {
- struct net2280_request *req =
- list_entry(ep->queue.next, struct net2280_request,
- queue);
- if (ep->dma)
- resume_dma(ep);
- else {
- if (ep->is_in)
- write_fifo(ep, &req->req);
- else {
- if (read_fifo(ep, req))
- done(ep, req, 0);
- }
- }
- }
- }
+ return;
}
static void handle_stat0_irqs_superspeed(struct net2280 *dev,
@@ -2863,7 +2611,7 @@ static void handle_stat0_irqs_superspeed(struct net2280 *dev,
switch (r.bRequestType) {
case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
status = dev->wakeup_enable ? 0x02 : 0x00;
- if (dev->selfpowered)
+ if (dev->gadget.is_selfpowered)
status |= BIT(0);
status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
dev->ltm_enable << 4);
@@ -2940,7 +2688,12 @@ static void handle_stat0_irqs_superspeed(struct net2280 *dev,
if (w_value != USB_ENDPOINT_HALT)
goto do_stall3;
ep_vdbg(dev, "%s clear halt\n", e->ep.name);
- ep_stall(e, false);
+ /*
+ * Workaround for SS SeqNum not cleared via
+ * Endpoint Halt (Clear) bit. select endpoint
+ */
+ ep_clear_seqnum(e);
+ clear_halt(e);
if (!list_empty(&e->queue) && e->td_dma)
restart_dma(e);
allow_status(ep);
@@ -2998,7 +2751,14 @@ static void handle_stat0_irqs_superspeed(struct net2280 *dev,
e = get_ep_by_addr(dev, w_index);
if (!e || (w_value != USB_ENDPOINT_HALT))
goto do_stall3;
- ep_stdrsp(e, true, false);
+ ep->stopped = 1;
+ if (ep->num == 0)
+ ep->dev->protocol_stall = 1;
+ else {
+ if (ep->dma)
+ abort_dma(ep);
+ set_halt(ep);
+ }
allow_status_338x(ep);
break;
@@ -3026,7 +2786,7 @@ do_stall3:
r.bRequestType, r.bRequest, tmp);
dev->protocol_stall = 1;
/* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
- ep_stall(ep, true);
+ set_halt(ep);
}
next_endpoints3:
@@ -3091,9 +2851,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
}
ep->stopped = 0;
dev->protocol_stall = 0;
- if (dev->quirks & PLX_SUPERSPEED)
- ep->is_halt = 0;
- else{
+ if (!(dev->quirks & PLX_SUPERSPEED)) {
if (ep->dev->quirks & PLX_2280)
tmp = BIT(FIFO_OVERFLOW) |
BIT(FIFO_UNDERFLOW);
@@ -3120,7 +2878,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
cpu_to_le32s(&u.raw[0]);
cpu_to_le32s(&u.raw[1]);
- if (dev->quirks & PLX_SUPERSPEED)
+ if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
defect7374_workaround(dev, u.r);
tmp = 0;
@@ -3423,17 +3181,12 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
continue;
}
- /* chaining should stop on abort, short OUT from fifo,
- * or (stat0 codepath) short OUT transfer.
- */
- if (!use_dma_chaining) {
- if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
- ep_dbg(ep->dev, "%s no xact done? %08x\n",
- ep->ep.name, tmp);
- continue;
- }
- stop_dma(ep->dma);
+ if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
+ ep_dbg(ep->dev, "%s no xact done? %08x\n",
+ ep->ep.name, tmp);
+ continue;
}
+ stop_dma(ep->dma);
/* OUT transfers terminate when the data from the
* host is in our memory. Process whatever's done.
@@ -3448,30 +3201,9 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
scan_dma_completions(ep);
/* disable dma on inactive queues; else maybe restart */
- if (list_empty(&ep->queue)) {
- if (use_dma_chaining)
- stop_dma(ep->dma);
- } else {
+ if (!list_empty(&ep->queue)) {
tmp = readl(&dma->dmactl);
- if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0)
- restart_dma(ep);
- else if (ep->is_in && use_dma_chaining) {
- struct net2280_request *req;
- __le32 dmacount;
-
- /* the descriptor at the head of the chain
- * may still have VALID_BIT clear; that's
- * used to trigger changing DMA_FIFO_VALIDATE
- * (affects automagic zlp writes).
- */
- req = list_entry(ep->queue.next,
- struct net2280_request, queue);
- dmacount = req->td->dmacount;
- dmacount &= cpu_to_le32(BIT(VALID_BIT) |
- DMA_BYTE_COUNT_MASK);
- if (dmacount && (dmacount & valid_bit) == 0)
- restart_dma(ep);
- }
+ restart_dma(ep);
}
ep->irqs++;
}
@@ -3556,7 +3288,7 @@ static void net2280_remove(struct pci_dev *pdev)
}
if (dev->got_irq)
free_irq(pdev->irq, dev);
- if (use_msi && dev->quirks & PLX_SUPERSPEED)
+ if (dev->quirks & PLX_SUPERSPEED)
pci_disable_msi(pdev);
if (dev->regs)
iounmap(dev->regs);
@@ -3581,9 +3313,6 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem *base = NULL;
int retval, i;
- if (!use_dma)
- use_dma_chaining = 0;
-
/* alloc, and start init */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
@@ -3663,9 +3392,12 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
(0xf << DEFECT7374_FSM_FIELD);
/* See if firmware needs to set up for workaround: */
- if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ)
+ if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
+ dev->bug7734_patched = 1;
writel(0, &dev->usb->usbctl);
- } else{
+ } else
+ dev->bug7734_patched = 0;
+ } else {
dev->enhanced_mode = 0;
dev->n_ep = 7;
/* put into initial config, link up all endpoints */
@@ -3682,7 +3414,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto done;
}
- if (use_msi && (dev->quirks & PLX_SUPERSPEED))
+ if (dev->quirks & PLX_SUPERSPEED)
if (pci_enable_msi(pdev))
ep_err(dev, "Failed to enable MSI mode\n");
@@ -3741,9 +3473,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ep_info(dev, "%s\n", driver_desc);
ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
pdev->irq, base, dev->chiprev);
- ep_info(dev, "version: " DRIVER_VERSION "; dma %s %s\n",
- use_dma ? (use_dma_chaining ? "chaining" : "enabled")
- : "disabled",
+ ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
dev->enhanced_mode ? "enhanced mode" : "legacy mode");
retval = device_create_file(&pdev->dev, &dev_attr_registers);
if (retval)
@@ -3776,9 +3506,6 @@ static void net2280_shutdown(struct pci_dev *pdev)
/* disable the pullup so the host will think we're gone */
writel(0, &dev->usb->usbctl);
- /* Disable full-speed test mode */
- if (dev->quirks & PLX_LEGACY)
- writel(0, &dev->usb->xcvrdiag);
}
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 03f15242d794..ac8d5a20a378 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -100,7 +100,6 @@ struct net2280_ep {
dma_addr_t td_dma; /* of dummy */
struct net2280 *dev;
unsigned long irqs;
- unsigned is_halt:1, dma_started:1;
/* analogous to a host-side qh */
struct list_head queue;
@@ -126,7 +125,7 @@ static inline void allow_status(struct net2280_ep *ep)
ep->stopped = 1;
}
-static void allow_status_338x(struct net2280_ep *ep)
+static inline void allow_status_338x(struct net2280_ep *ep)
{
/*
* Control Status Phase Handshake was set by the chip when the setup
@@ -165,8 +164,8 @@ struct net2280 {
u2_enable:1,
ltm_enable:1,
wakeup_enable:1,
- selfpowered:1,
- addressed_state:1;
+ addressed_state:1,
+ bug7734_patched:1;
u16 chiprev;
int enhanced_mode;
int n_ep;
@@ -356,23 +355,6 @@ static inline void start_out_naking(struct net2280_ep *ep)
readl(&ep->regs->ep_rsp);
}
-#ifdef DEBUG
-static inline void assert_out_naking(struct net2280_ep *ep, const char *where)
-{
- u32 tmp = readl(&ep->regs->ep_stat);
-
- if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
- ep_dbg(ep->dev, "%s %s %08x !NAK\n",
- ep->ep.name, where, tmp);
- writel(BIT(SET_NAK_OUT_PACKETS),
- &ep->regs->ep_rsp);
- }
-}
-#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
-#else
-#define ASSERT_OUT_NAKING(ep) do {} while (0)
-#endif
-
static inline void stop_out_naking(struct net2280_ep *ep)
{
u32 tmp;
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 288c087220a8..e2fcdb8e5596 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1171,6 +1171,7 @@ omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
unsigned long flags;
u16 syscon1;
+ gadget->is_selfpowered = (is_selfpowered != 0);
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
syscon1 = omap_readw(UDC_SYSCON1);
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 1c7379ac2379..613547f07828 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1161,6 +1161,7 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
if (!gadget)
return -EINVAL;
+ gadget->is_selfpowered = (value != 0);
dev = container_of(gadget, struct pch_udc_dev, gadget);
if (value)
pch_udc_set_selfpowered(dev);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 8550b2d5db32..f6cbe667ce39 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1272,7 +1272,6 @@ static int pxa25x_udc_start(struct usb_gadget *g,
goto bind_fail;
}
- pullup(dev);
dump_state(dev);
return 0;
bind_fail:
@@ -1339,7 +1338,6 @@ static int pxa25x_udc_stop(struct usb_gadget*g)
local_irq_disable();
dev->pullup = 0;
- pullup(dev);
stop_activity(dev, NULL);
local_irq_enable();
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index c61a896061fa..6a855fc9bd84 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -1809,7 +1809,6 @@ static int pxa27x_udc_start(struct usb_gadget *g,
/* first hook up the driver ... */
udc->driver = driver;
- dplus_pullup(udc, 1);
if (!IS_ERR_OR_NULL(udc->transceiver)) {
retval = otg_set_peripheral(udc->transceiver->otg,
@@ -1862,7 +1861,6 @@ static int pxa27x_udc_stop(struct usb_gadget *g)
stop_activity(udc, NULL);
udc_disable(udc);
- dplus_pullup(udc, 0);
udc->driver = NULL;
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 06870da0b988..2495fe9c95c5 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1803,6 +1803,7 @@ static int r8a66597_set_selfpowered(struct usb_gadget *gadget, int is_self)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+ gadget->is_selfpowered = (is_self != 0);
if (is_self)
r8a66597->device_status |= 1 << USB_DEVICE_SELF_POWERED;
else
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index 824cf12e9add..b808951491cc 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -238,14 +238,6 @@ static inline void s3c2410_udc_set_ep0_de_out(void __iomem *base)
S3C2410_UDC_EP0_CSR_REG);
}
-static inline void s3c2410_udc_set_ep0_sse_out(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, (S3C2410_UDC_EP0_CSR_SOPKTRDY
- | S3C2410_UDC_EP0_CSR_SSE),
- S3C2410_UDC_EP0_CSR_REG);
-}
-
static inline void s3c2410_udc_set_ep0_de_in(void __iomem *base)
{
udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
@@ -291,18 +283,6 @@ static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
}
}
-static inline void s3c2410_udc_clear_ep_state(struct s3c2410_udc *dev)
-{
- unsigned i;
-
- /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
- * fifos, and pending transactions mustn't be continued in any case.
- */
-
- for (i = 1; i < S3C2410_ENDPOINTS; i++)
- s3c2410_udc_nuke(dev, &dev->ep[i], -ECONNABORTED);
-}
-
static inline int s3c2410_udc_fifo_count_out(void)
{
int tmp;
@@ -1454,6 +1434,7 @@ static int s3c2410_udc_set_selfpowered(struct usb_gadget *gadget, int value)
dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+ gadget->is_selfpowered = (value != 0);
if (value)
udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
else
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index e31d574d8860..5a81cb086b99 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -564,6 +564,7 @@ static USB_UDC_ATTR(is_a_peripheral);
static USB_UDC_ATTR(b_hnp_enable);
static USB_UDC_ATTR(a_hnp_support);
static USB_UDC_ATTR(a_alt_hnp_support);
+static USB_UDC_ATTR(is_selfpowered);
static struct attribute *usb_udc_attrs[] = {
&dev_attr_srp.attr,
@@ -577,6 +578,7 @@ static struct attribute *usb_udc_attrs[] = {
&dev_attr_b_hnp_enable.attr,
&dev_attr_a_hnp_support.attr,
&dev_attr_a_alt_hnp_support.attr,
+ &dev_attr_is_selfpowered.attr,
NULL,
};
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 1eac56fc384d..dd3e9fd31b80 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2131,8 +2131,8 @@ static int xudc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, udc);
- dev_vdbg(&pdev->dev, "%s at 0x%08X mapped to 0x%08X %s\n",
- driver_name, (u32)res->start, (u32 __force)udc->addr,
+ dev_vdbg(&pdev->dev, "%s at 0x%08X mapped to %p %s\n",
+ driver_name, (u32)res->start, udc->addr,
udc->dma_enabled ? "with DMA" : "without DMA");
return 0;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index fafc628480e0..5ad60e46dc2b 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -219,7 +219,7 @@ config USB_EHCI_TEGRA
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
- depends on PPC_OF
+ depends on PPC
default y
---help---
Enables support for the USB controller present on the PowerPC
@@ -331,20 +331,6 @@ config USB_ISP116X_HCD
To compile this driver as a module, choose M here: the
module will be called isp116x-hcd.
-config USB_ISP1760_HCD
- tristate "ISP 1760 HCD support"
- ---help---
- The ISP1760 chip is a USB 2.0 host controller.
-
- This driver does not support isochronous transfers or OTG.
- This USB controller is usually attached to a non-DMA-Master
- capable bus. NXP's eval kit brings this chip on PCI card
- where the chip itself is behind a PLB to simulate such
- a bus.
-
- To compile this driver as a module, choose M here: the
- module will be called isp1760.
-
config USB_ISP1362_HCD
tristate "ISP1362 HCD support"
---help---
@@ -494,7 +480,7 @@ config USB_OHCI_ATH79
config USB_OHCI_HCD_PPC_OF_BE
bool "OHCI support for OF platform bus (big endian)"
- depends on PPC_OF
+ depends on PPC
select USB_OHCI_BIG_ENDIAN_DESC
select USB_OHCI_BIG_ENDIAN_MMIO
---help---
@@ -503,7 +489,7 @@ config USB_OHCI_HCD_PPC_OF_BE
config USB_OHCI_HCD_PPC_OF_LE
bool "OHCI support for OF platform bus (little endian)"
- depends on PPC_OF
+ depends on PPC
select USB_OHCI_LITTLE_ENDIAN
---help---
Enables support for little-endian USB controllers present on the
@@ -511,7 +497,7 @@ config USB_OHCI_HCD_PPC_OF_LE
config USB_OHCI_HCD_PPC_OF
bool
- depends on PPC_OF
+ depends on PPC
default USB_OHCI_HCD_PPC_OF_BE || USB_OHCI_HCD_PPC_OF_LE
config USB_OHCI_HCD_PCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index d6216a493bab..65b0b6a58599 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -5,8 +5,6 @@
# tell define_trace.h where to find the xhci trace header
CFLAGS_xhci-trace.o := -I$(src)
-isp1760-y := isp1760-hcd.o isp1760-if.o
-
fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o
fhci-y += fhci-mem.o fhci-tds.o fhci-sched.o
@@ -69,7 +67,6 @@ obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
-obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 56a88506febe..663f7908b15c 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -27,44 +27,66 @@
#define DRIVER_DESC "EHCI Atmel driver"
static const char hcd_name[] = "ehci-atmel";
-static struct hc_driver __read_mostly ehci_atmel_hc_driver;
/* interface and function clocks */
-static struct clk *iclk, *fclk, *uclk;
-static int clocked;
+#define hcd_to_atmel_ehci_priv(h) \
+ ((struct atmel_ehci_priv *)hcd_to_ehci(h)->priv)
+
+struct atmel_ehci_priv {
+ struct clk *iclk;
+ struct clk *fclk;
+ struct clk *uclk;
+ bool clocked;
+};
+
+static struct hc_driver __read_mostly ehci_atmel_hc_driver;
+
+static const struct ehci_driver_overrides ehci_atmel_drv_overrides __initconst = {
+ .extra_priv_size = sizeof(struct atmel_ehci_priv),
+};
/*-------------------------------------------------------------------------*/
-static void atmel_start_clock(void)
+static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci)
{
+ if (atmel_ehci->clocked)
+ return;
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
- clk_set_rate(uclk, 48000000);
- clk_prepare_enable(uclk);
+ clk_set_rate(atmel_ehci->uclk, 48000000);
+ clk_prepare_enable(atmel_ehci->uclk);
}
- clk_prepare_enable(iclk);
- clk_prepare_enable(fclk);
- clocked = 1;
+ clk_prepare_enable(atmel_ehci->iclk);
+ clk_prepare_enable(atmel_ehci->fclk);
+ atmel_ehci->clocked = true;
}
-static void atmel_stop_clock(void)
+static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci)
{
- clk_disable_unprepare(fclk);
- clk_disable_unprepare(iclk);
+ if (!atmel_ehci->clocked)
+ return;
+ clk_disable_unprepare(atmel_ehci->fclk);
+ clk_disable_unprepare(atmel_ehci->iclk);
if (IS_ENABLED(CONFIG_COMMON_CLK))
- clk_disable_unprepare(uclk);
- clocked = 0;
+ clk_disable_unprepare(atmel_ehci->uclk);
+ atmel_ehci->clocked = false;
}
static void atmel_start_ehci(struct platform_device *pdev)
{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+
dev_dbg(&pdev->dev, "start\n");
- atmel_start_clock();
+ atmel_start_clock(atmel_ehci);
}
static void atmel_stop_ehci(struct platform_device *pdev)
{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+
dev_dbg(&pdev->dev, "stop\n");
- atmel_stop_clock();
+ atmel_stop_clock(atmel_ehci);
}
/*-------------------------------------------------------------------------*/
@@ -75,6 +97,7 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
const struct hc_driver *driver = &ehci_atmel_hc_driver;
struct resource *res;
struct ehci_hcd *ehci;
+ struct atmel_ehci_priv *atmel_ehci;
int irq;
int retval;
@@ -105,6 +128,7 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
retval = -ENOMEM;
goto fail_create_hcd;
}
+ atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -116,23 +140,23 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- iclk = devm_clk_get(&pdev->dev, "ehci_clk");
- if (IS_ERR(iclk)) {
+ atmel_ehci->iclk = devm_clk_get(&pdev->dev, "ehci_clk");
+ if (IS_ERR(atmel_ehci->iclk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = -ENOENT;
goto fail_request_resource;
}
- fclk = devm_clk_get(&pdev->dev, "uhpck");
- if (IS_ERR(fclk)) {
+ atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck");
+ if (IS_ERR(atmel_ehci->fclk)) {
dev_err(&pdev->dev, "Error getting function clock\n");
retval = -ENOENT;
goto fail_request_resource;
}
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
- uclk = devm_clk_get(&pdev->dev, "usb_clk");
- if (IS_ERR(uclk)) {
+ atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
+ if (IS_ERR(atmel_ehci->uclk)) {
dev_err(&pdev->dev, "failed to get uclk\n");
- retval = PTR_ERR(uclk);
+ retval = PTR_ERR(atmel_ehci->uclk);
goto fail_request_resource;
}
}
@@ -169,11 +193,35 @@ static int ehci_atmel_drv_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
atmel_stop_ehci(pdev);
- fclk = iclk = NULL;
return 0;
}
+#ifdef CONFIG_PM
+static int ehci_atmel_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+ int ret;
+
+ ret = ehci_suspend(hcd, false);
+ if (ret)
+ return ret;
+
+ atmel_stop_clock(atmel_ehci);
+ return 0;
+}
+
+static int ehci_atmel_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+
+ atmel_start_clock(atmel_ehci);
+ return ehci_resume(hcd, false);
+}
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id atmel_ehci_dt_ids[] = {
{ .compatible = "atmel,at91sam9g45-ehci" },
@@ -183,12 +231,16 @@ static const struct of_device_id atmel_ehci_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmel_ehci_dt_ids);
#endif
+static SIMPLE_DEV_PM_OPS(ehci_atmel_pm_ops, ehci_atmel_drv_suspend,
+ ehci_atmel_drv_resume);
+
static struct platform_driver ehci_atmel_driver = {
.probe = ehci_atmel_drv_probe,
.remove = ehci_atmel_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "atmel-ehci",
+ .pm = &ehci_atmel_pm_ops,
.of_match_table = of_match_ptr(atmel_ehci_dt_ids),
},
};
@@ -199,7 +251,7 @@ static int __init ehci_atmel_init(void)
return -ENODEV;
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
- ehci_init_driver(&ehci_atmel_hc_driver, NULL);
+ ehci_init_driver(&ehci_atmel_hc_driver, &ehci_atmel_drv_overrides);
return platform_driver_register(&ehci_atmel_driver);
}
module_init(ehci_atmel_init);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index fb7bd0c7dc15..ab4eee3df97a 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -709,7 +709,6 @@ static struct platform_driver ehci_fsl_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "fsl-ehci",
- .owner = THIS_MODULE,
.pm = EHCI_FSL_PM_OPS,
},
};
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 495b6fbcbcd9..21650044b09e 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -187,7 +187,6 @@ static struct platform_driver ehci_grlib_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "grlib-ehci",
- .owner = THIS_MODULE,
.of_match_table = ehci_hcd_grlib_of_match,
},
};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 38bfeedae1d0..85e56d1abd23 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1110,7 +1110,7 @@ int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
EXPORT_SYMBOL_GPL(ehci_suspend);
/* Returns 0 if power was preserved, 1 if power was lost */
-int ehci_resume(struct usb_hcd *hcd, bool hibernated)
+int ehci_resume(struct usb_hcd *hcd, bool force_reset)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -1124,12 +1124,12 @@ int ehci_resume(struct usb_hcd *hcd, bool hibernated)
return 0; /* Controller is dead */
/*
- * If CF is still set and we aren't resuming from hibernation
+ * If CF is still set and reset isn't forced
* then we maintained suspend power.
* Just undo the effect of ehci_suspend().
*/
if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
- !hibernated) {
+ !force_reset) {
int mask = INTR_MASK;
ehci_prepare_ports_for_controller_resume(ehci);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 118edb7bdca2..87cf86f38b36 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -700,15 +700,15 @@ ehci_hub_descriptor (
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
- temp = 0x0008; /* per-port overcurrent reporting */
+ temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
if (HCS_PPC (ehci->hcs_params))
- temp |= 0x0001; /* per-port power control */
+ temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
else
- temp |= 0x0002; /* no power switching */
+ temp |= HUB_CHAR_NO_LPSM; /* no power switching */
#if 0
// re-enable when we support USB_PORT_FEAT_INDICATOR below.
if (HCS_INDICATOR (ehci->hcs_params))
- temp |= 0x0080; /* per-port indicators (LEDs) */
+ temp |= HUB_CHAR_PORTIND; /* per-port indicators (LEDs) */
#endif
desc->wHubCharacteristics = cpu_to_le16(temp);
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 851006a0d97b..2a5d2fd76040 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -43,6 +43,24 @@ static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
}
/*
+ * This is the list of PCI IDs for the devices that have EHCI USB class and
+ * specific drivers for that. One of the example is a ChipIdea device installed
+ * on some Intel MID platforms.
+ */
+static const struct pci_device_id bypass_pci_id_table[] = {
+ /* ChipIdea on Intel MID platform */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0811), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006), },
+ {}
+};
+
+static inline bool is_bypassed_id(struct pci_dev *pdev)
+{
+ return !!pci_match_id(bypass_pci_id_table, pdev);
+}
+
+/*
* 0x84 is the offset of in/out threshold register,
* and it is the same offset as the register of 'hostpc'.
*/
@@ -352,6 +370,13 @@ static const struct ehci_driver_overrides pci_overrides __initconst = {
/*-------------------------------------------------------------------------*/
+static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ if (is_bypassed_id(pdev))
+ return -ENODEV;
+ return usb_hcd_pci_probe(pdev, id);
+}
+
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids [] = { {
/* handle any USB 2.0 EHCI controller */
@@ -370,7 +395,7 @@ static struct pci_driver ehci_pci_driver = {
.name = (char *) hcd_name,
.id_table = pci_ids,
- .probe = usb_hcd_pci_probe,
+ .probe = ehci_pci_probe,
.remove = usb_hcd_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 8557803e6154..d8a75a51d6d4 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -43,7 +43,8 @@
struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
struct reset_control *rst;
- struct phy *phy;
+ struct phy **phys;
+ int num_phys;
};
static const char hcd_name[] = "ehci-platform";
@@ -78,7 +79,7 @@ static int ehci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
- int clk, ret;
+ int clk, ret, phy_num;
for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++) {
ret = clk_prepare_enable(priv->clks[clk]);
@@ -86,20 +87,28 @@ static int ehci_platform_power_on(struct platform_device *dev)
goto err_disable_clks;
}
- if (priv->phy) {
- ret = phy_init(priv->phy);
- if (ret)
- goto err_disable_clks;
-
- ret = phy_power_on(priv->phy);
- if (ret)
- goto err_exit_phy;
+ for (phy_num = 0; phy_num < priv->num_phys; phy_num++) {
+ if (priv->phys[phy_num]) {
+ ret = phy_init(priv->phys[phy_num]);
+ if (ret)
+ goto err_exit_phy;
+ ret = phy_power_on(priv->phys[phy_num]);
+ if (ret) {
+ phy_exit(priv->phys[phy_num]);
+ goto err_exit_phy;
+ }
+ }
}
return 0;
err_exit_phy:
- phy_exit(priv->phy);
+ while (--phy_num >= 0) {
+ if (priv->phys[phy_num]) {
+ phy_power_off(priv->phys[phy_num]);
+ phy_exit(priv->phys[phy_num]);
+ }
+ }
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(priv->clks[clk]);
@@ -111,11 +120,13 @@ static void ehci_platform_power_off(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
- int clk;
+ int clk, phy_num;
- if (priv->phy) {
- phy_power_off(priv->phy);
- phy_exit(priv->phy);
+ for (phy_num = 0; phy_num < priv->num_phys; phy_num++) {
+ if (priv->phys[phy_num]) {
+ phy_power_off(priv->phys[phy_num]);
+ phy_exit(priv->phys[phy_num]);
+ }
}
for (clk = EHCI_MAX_CLKS - 1; clk >= 0; clk--)
@@ -143,7 +154,8 @@ static int ehci_platform_probe(struct platform_device *dev)
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv;
struct ehci_hcd *ehci;
- int err, irq, clk = 0;
+ const char *phy_name;
+ int err, irq, phy_num, clk = 0;
if (usb_disabled())
return -ENODEV;
@@ -155,7 +167,8 @@ static int ehci_platform_probe(struct platform_device *dev)
if (!pdata)
pdata = &ehci_platform_defaults;
- err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ err = dma_coerce_mask_and_coherent(&dev->dev,
+ pdata->dma_mask_64 ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
if (err)
return err;
@@ -185,12 +198,42 @@ static int ehci_platform_probe(struct platform_device *dev)
if (of_property_read_bool(dev->dev.of_node, "big-endian"))
ehci->big_endian_mmio = ehci->big_endian_desc = 1;
- priv->phy = devm_phy_get(&dev->dev, "usb");
- if (IS_ERR(priv->phy)) {
- err = PTR_ERR(priv->phy);
- if (err == -EPROBE_DEFER)
- goto err_put_hcd;
- priv->phy = NULL;
+ if (of_property_read_bool(dev->dev.of_node,
+ "needs-reset-on-resume"))
+ pdata->reset_on_resume = 1;
+
+ priv->num_phys = of_count_phandle_with_args(dev->dev.of_node,
+ "phys", "#phy-cells");
+ priv->num_phys = priv->num_phys > 0 ? priv->num_phys : 1;
+
+ priv->phys = devm_kcalloc(&dev->dev, priv->num_phys,
+ sizeof(struct phy *), GFP_KERNEL);
+ if (!priv->phys)
+ return -ENOMEM;
+
+ for (phy_num = 0; phy_num < priv->num_phys; phy_num++) {
+ err = of_property_read_string_index(
+ dev->dev.of_node,
+ "phy-names", phy_num,
+ &phy_name);
+
+ if (err < 0) {
+ if (priv->num_phys > 1) {
+ dev_err(&dev->dev, "phy-names not provided");
+ goto err_put_hcd;
+ } else
+ phy_name = "usb";
+ }
+
+ priv->phys[phy_num] = devm_phy_get(&dev->dev,
+ phy_name);
+ if (IS_ERR(priv->phys[phy_num])) {
+ err = PTR_ERR(priv->phys[phy_num]);
+ if ((priv->num_phys > 1) ||
+ (err == -EPROBE_DEFER))
+ goto err_put_hcd;
+ priv->phys[phy_num] = NULL;
+ }
}
for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
@@ -340,7 +383,7 @@ static int ehci_platform_resume(struct device *dev)
return err;
}
- ehci_resume(hcd, false);
+ ehci_resume(hcd, pdata->reset_on_resume);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
@@ -349,6 +392,7 @@ static const struct of_device_id vt8500_ehci_ids[] = {
{ .compatible = "via,vt8500-ehci", },
{ .compatible = "wm,prizm-ehci", },
{ .compatible = "generic-ehci", },
+ { .compatible = "cavium,octeon-6335-ehci", },
{}
};
MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 7d75465d97c7..342816a7f8b1 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -325,6 +325,5 @@ static struct platform_driver ehci_hcd_msp_driver = {
.remove = ehci_hcd_msp_drv_remove,
.driver = {
.name = "pmcmsp-ehci",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 547924796d29..1a10c8d542ca 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -234,7 +234,6 @@ static struct platform_driver ehci_hcd_ppc_of_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ehci",
- .owner = THIS_MODULE,
.of_match_table = ehci_hcd_ppc_of_match,
},
};
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index e113fd73aeae..f9a332775c47 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1581,6 +1581,10 @@ iso_stream_schedule (
else
next = (now + 2 + 7) & ~0x07; /* full frame cache */
+ /* If needed, initialize last_iso_frame so that this URB will be seen */
+ if (ehci->isoc_count == 0)
+ ehci->last_iso_frame = now >> 3;
+
/*
* Use ehci->last_iso_frame as the base. There can't be any
* TDs scheduled for earlier than that.
@@ -1600,11 +1604,11 @@ iso_stream_schedule (
*/
now2 = (now - base) & (mod - 1);
- /* Is the schedule already full? */
+ /* Is the schedule about to wrap around? */
if (unlikely(!empty && start < period)) {
- ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
+ ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
urb, stream->next_uframe, base, period, mod);
- status = -ENOSPC;
+ status = -EFBIG;
goto fail;
}
@@ -1671,10 +1675,6 @@ iso_stream_schedule (
urb->start_frame = start & (mod - 1);
if (!stream->highspeed)
urb->start_frame >>= 3;
-
- /* Make sure scan_isoc() sees these */
- if (ehci->isoc_count == 0)
- ehci->last_iso_frame = now >> 3;
return status;
fail:
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index 9b6e8d0eac43..3d86cc2ffe68 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -178,7 +178,6 @@ static struct platform_driver ehci_hcd_sead3_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "sead3-ehci",
- .owner = THIS_MODULE,
.pm = SEAD3_EHCI_PMOPS,
}
};
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 0e0ce684aff3..5caf88d679e4 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -189,7 +189,6 @@ static struct platform_driver ehci_hcd_sh_driver = {
.shutdown = ehci_hcd_sh_shutdown,
.driver = {
.name = "sh_ehci",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 19a9af1b4d74..ff9af29b4e9f 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -451,7 +451,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(u_phy)) {
- err = PTR_ERR(u_phy);
+ err = -EPROBE_DEFER;
goto cleanup_clk_en;
}
hcd->usb_phy = u_phy;
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index 0d247673c3ca..bdb93b6a356f 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -210,7 +210,6 @@ static struct platform_driver ehci_hcd_tilegx_driver = {
.shutdown = ehci_hcd_tilegx_drv_shutdown,
.driver = {
.name = "tilegx-ehci",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index a2328361dc80..f54480850bb8 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -236,7 +236,6 @@ static struct platform_driver ehci_hcd_xilinx_of_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xilinx-of-ehci",
- .owner = THIS_MODULE,
.of_match_table = ehci_hcd_xilinx_of_match,
},
};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6f0577b0a5ae..52ef0844a180 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -871,7 +871,7 @@ extern int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
#ifdef CONFIG_PM
extern int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup);
-extern int ehci_resume(struct usb_hcd *hcd, bool hibernated);
+extern int ehci_resume(struct usb_hcd *hcd, bool force_reset);
#endif /* CONFIG_PM */
extern int ehci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index 6af2512f8378..70116a65262c 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -32,8 +32,8 @@ static u8 root_hub_des[] = {
0x09, /* blength */
0x29, /* bDescriptorType;hub-descriptor */
0x01, /* bNbrPorts */
- 0x00, /* wHubCharacteristics */
- 0x00,
+ HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_NO_OCPM, /* wHubCharacteristics */
+ 0x00, /* per-port power, no overcurrent */
0x01, /* bPwrOn2pwrGood;2ms */
0x00, /* bHubContrCurrent;0mA */
0x00, /* DeviceRemoveable */
@@ -208,7 +208,6 @@ int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
{
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
int retval = 0;
- int len = 0;
struct usb_hub_status *hub_status;
struct usb_port_status *port_status;
unsigned long flags;
@@ -272,8 +271,6 @@ int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetHubDescriptor:
memcpy(buf, root_hub_des, sizeof(root_hub_des));
- buf[3] = 0x11; /* per-port power, no ovrcrnt */
- len = (buf[0] < wLength) ? buf[0] : wLength;
break;
case GetHubStatus:
hub_status = (struct usb_hub_status *)buf;
@@ -281,7 +278,6 @@ int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
cpu_to_le16(fhci->vroot_hub->hub.wHubStatus);
hub_status->wHubChange =
cpu_to_le16(fhci->vroot_hub->hub.wHubChange);
- len = 4;
break;
case GetPortStatus:
port_status = (struct usb_port_status *)buf;
@@ -289,7 +285,6 @@ int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
cpu_to_le16(fhci->vroot_hub->port.wPortStatus);
port_status->wPortChange =
cpu_to_le16(fhci->vroot_hub->port.wPortChange);
- len = 4;
break;
case SetHubFeature:
switch (wValue) {
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index ecf02b2623e8..475b21fd373b 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1521,8 +1521,8 @@ fotg210_hub_descriptor(
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
- temp = 0x0008; /* per-port overcurrent reporting */
- temp |= 0x0002; /* no power switching */
+ temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
+ temp |= HUB_CHAR_NO_LPSM; /* no power switching */
desc->wHubCharacteristics = cpu_to_le16(temp);
}
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
index 664d2aa1239c..a83eefefffda 100644
--- a/drivers/usb/host/fusbh200-hcd.c
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -1479,8 +1479,8 @@ fusbh200_hub_descriptor (
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
- temp = 0x0008; /* per-port overcurrent reporting */
- temp |= 0x0002; /* no power switching */
+ temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
+ temp |= HUB_CHAR_NO_LPSM; /* no power switching */
desc->wHubCharacteristics = cpu_to_le16(temp);
}
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index eb4efba9f1ad..6a2ad550b120 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1482,9 +1482,8 @@ static int get_hub_descriptor(struct usb_hcd *hcd,
desc->bDescLength = 9;
desc->bPwrOn2PwrGood = 0;
desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
- 0x0002 | /* No power switching */
- 0x0010 | /* No over current protection */
- 0);
+ HUB_CHAR_NO_LPSM | /* No power switching */
+ HUB_CHAR_NO_OCPM); /* No over current protection */
desc->u.hs.DeviceRemovable[0] = 1 << 1;
desc->u.hs.DeviceRemovable[1] = ~0;
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 31c9c4d0fa0b..113d0cc6cc43 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -948,7 +948,10 @@ static void isp116x_hub_descriptor(struct isp116x *isp116x,
desc->bHubContrCurrent = 0;
desc->bNbrPorts = (u8) (reg & 0x3);
/* Power switching, device type, overcurrent. */
- desc->wHubCharacteristics = cpu_to_le16((u16) ((reg >> 8) & 0x1f));
+ desc->wHubCharacteristics = cpu_to_le16((u16) ((reg >> 8) &
+ (HUB_CHAR_LPSM |
+ HUB_CHAR_COMPOUND |
+ HUB_CHAR_OCPM)));
desc->bPwrOn2PwrGood = (u8) ((reg >> 24) & 0xff);
/* ports removable, and legacy PortPwrCtrlMask */
desc->u.hs.DeviceRemovable[0] = 0;
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 75e5876f9d7c..b32ab60cad1e 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1543,8 +1543,12 @@ static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
desc->bHubContrCurrent = 0;
desc->bNbrPorts = reg & 0x3;
/* Power switching, device type, overcurrent. */
- desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
- DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
+ desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
+ (HUB_CHAR_LPSM |
+ HUB_CHAR_COMPOUND |
+ HUB_CHAR_OCPM));
+ DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
+ desc->wHubCharacteristics);
desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
/* ports removable, and legacy PortPwrCtrlMask */
desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
deleted file mode 100644
index 33dc79ccaa6b..000000000000
--- a/drivers/usb/host/isp1760-hcd.h
+++ /dev/null
@@ -1,208 +0,0 @@
-#ifndef _ISP1760_HCD_H_
-#define _ISP1760_HCD_H_
-
-/* exports for if */
-struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
- int irq, unsigned long irqflags,
- int rst_gpio,
- struct device *dev, const char *busname,
- unsigned int devflags);
-int init_kmem_once(void);
-void deinit_kmem_cache(void);
-
-/* EHCI capability registers */
-#define HC_CAPLENGTH 0x00
-#define HC_HCSPARAMS 0x04
-#define HC_HCCPARAMS 0x08
-
-/* EHCI operational registers */
-#define HC_USBCMD 0x20
-#define HC_USBSTS 0x24
-#define HC_FRINDEX 0x2c
-#define HC_CONFIGFLAG 0x60
-#define HC_PORTSC1 0x64
-#define HC_ISO_PTD_DONEMAP_REG 0x130
-#define HC_ISO_PTD_SKIPMAP_REG 0x134
-#define HC_ISO_PTD_LASTPTD_REG 0x138
-#define HC_INT_PTD_DONEMAP_REG 0x140
-#define HC_INT_PTD_SKIPMAP_REG 0x144
-#define HC_INT_PTD_LASTPTD_REG 0x148
-#define HC_ATL_PTD_DONEMAP_REG 0x150
-#define HC_ATL_PTD_SKIPMAP_REG 0x154
-#define HC_ATL_PTD_LASTPTD_REG 0x158
-
-/* Configuration Register */
-#define HC_HW_MODE_CTRL 0x300
-#define ALL_ATX_RESET (1 << 31)
-#define HW_ANA_DIGI_OC (1 << 15)
-#define HW_DATA_BUS_32BIT (1 << 8)
-#define HW_DACK_POL_HIGH (1 << 6)
-#define HW_DREQ_POL_HIGH (1 << 5)
-#define HW_INTR_HIGH_ACT (1 << 2)
-#define HW_INTR_EDGE_TRIG (1 << 1)
-#define HW_GLOBAL_INTR_EN (1 << 0)
-
-#define HC_CHIP_ID_REG 0x304
-#define HC_SCRATCH_REG 0x308
-
-#define HC_RESET_REG 0x30c
-#define SW_RESET_RESET_HC (1 << 1)
-#define SW_RESET_RESET_ALL (1 << 0)
-
-#define HC_BUFFER_STATUS_REG 0x334
-#define ISO_BUF_FILL (1 << 2)
-#define INT_BUF_FILL (1 << 1)
-#define ATL_BUF_FILL (1 << 0)
-
-#define HC_MEMORY_REG 0x33c
-#define ISP_BANK(x) ((x) << 16)
-
-#define HC_PORT1_CTRL 0x374
-#define PORT1_POWER (3 << 3)
-#define PORT1_INIT1 (1 << 7)
-#define PORT1_INIT2 (1 << 23)
-#define HW_OTG_CTRL_SET 0x374
-#define HW_OTG_CTRL_CLR 0x376
-
-/* Interrupt Register */
-#define HC_INTERRUPT_REG 0x310
-
-#define HC_INTERRUPT_ENABLE 0x314
-#define HC_ISO_INT (1 << 9)
-#define HC_ATL_INT (1 << 8)
-#define HC_INTL_INT (1 << 7)
-#define HC_EOT_INT (1 << 3)
-#define HC_SOT_INT (1 << 1)
-#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT)
-
-#define HC_ISO_IRQ_MASK_OR_REG 0x318
-#define HC_INT_IRQ_MASK_OR_REG 0x31C
-#define HC_ATL_IRQ_MASK_OR_REG 0x320
-#define HC_ISO_IRQ_MASK_AND_REG 0x324
-#define HC_INT_IRQ_MASK_AND_REG 0x328
-#define HC_ATL_IRQ_MASK_AND_REG 0x32C
-
-/* urb state*/
-#define DELETE_URB (0x0008)
-#define NO_TRANSFER_ACTIVE (0xffffffff)
-
-/* Philips Proprietary Transfer Descriptor (PTD) */
-typedef __u32 __bitwise __dw;
-struct ptd {
- __dw dw0;
- __dw dw1;
- __dw dw2;
- __dw dw3;
- __dw dw4;
- __dw dw5;
- __dw dw6;
- __dw dw7;
-};
-#define PTD_OFFSET 0x0400
-#define ISO_PTD_OFFSET 0x0400
-#define INT_PTD_OFFSET 0x0800
-#define ATL_PTD_OFFSET 0x0c00
-#define PAYLOAD_OFFSET 0x1000
-
-struct slotinfo {
- struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
- unsigned long timestamp;
-};
-
-
-typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd);
-
-/*
- * Device flags that can vary from board to board. All of these
- * indicate the most "atypical" case, so that a devflags of 0 is
- * a sane default configuration.
- */
-#define ISP1760_FLAG_BUS_WIDTH_16 0x00000002 /* 16-bit data bus width */
-#define ISP1760_FLAG_OTG_EN 0x00000004 /* Port 1 supports OTG */
-#define ISP1760_FLAG_ANALOG_OC 0x00000008 /* Analog overcurrent */
-#define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */
-#define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */
-#define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */
-#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */
-#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */
-#define ISP1760_FLAG_RESET_ACTIVE_HIGH 0x80000000 /* RESET GPIO active high */
-
-/* chip memory management */
-struct memory_chunk {
- unsigned int start;
- unsigned int size;
- unsigned int free;
-};
-
-/*
- * 60kb divided in:
- * - 32 blocks @ 256 bytes
- * - 20 blocks @ 1024 bytes
- * - 4 blocks @ 8192 bytes
- */
-
-#define BLOCK_1_NUM 32
-#define BLOCK_2_NUM 20
-#define BLOCK_3_NUM 4
-
-#define BLOCK_1_SIZE 256
-#define BLOCK_2_SIZE 1024
-#define BLOCK_3_SIZE 8192
-#define BLOCKS (BLOCK_1_NUM + BLOCK_2_NUM + BLOCK_3_NUM)
-#define MAX_PAYLOAD_SIZE BLOCK_3_SIZE
-#define PAYLOAD_AREA_SIZE 0xf000
-
-/* ATL */
-/* DW0 */
-#define DW0_VALID_BIT 1
-#define FROM_DW0_VALID(x) ((x) & 0x01)
-#define TO_DW0_LENGTH(x) (((u32) x) << 3)
-#define TO_DW0_MAXPACKET(x) (((u32) x) << 18)
-#define TO_DW0_MULTI(x) (((u32) x) << 29)
-#define TO_DW0_ENDPOINT(x) (((u32) x) << 31)
-/* DW1 */
-#define TO_DW1_DEVICE_ADDR(x) (((u32) x) << 3)
-#define TO_DW1_PID_TOKEN(x) (((u32) x) << 10)
-#define DW1_TRANS_BULK ((u32) 2 << 12)
-#define DW1_TRANS_INT ((u32) 3 << 12)
-#define DW1_TRANS_SPLIT ((u32) 1 << 14)
-#define DW1_SE_USB_LOSPEED ((u32) 2 << 16)
-#define TO_DW1_PORT_NUM(x) (((u32) x) << 18)
-#define TO_DW1_HUB_NUM(x) (((u32) x) << 25)
-/* DW2 */
-#define TO_DW2_DATA_START_ADDR(x) (((u32) x) << 8)
-#define TO_DW2_RL(x) ((x) << 25)
-#define FROM_DW2_RL(x) (((x) >> 25) & 0xf)
-/* DW3 */
-#define FROM_DW3_NRBYTESTRANSFERRED(x) ((x) & 0x7fff)
-#define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) ((x) & 0x07ff)
-#define TO_DW3_NAKCOUNT(x) ((x) << 19)
-#define FROM_DW3_NAKCOUNT(x) (((x) >> 19) & 0xf)
-#define TO_DW3_CERR(x) ((x) << 23)
-#define FROM_DW3_CERR(x) (((x) >> 23) & 0x3)
-#define TO_DW3_DATA_TOGGLE(x) ((x) << 25)
-#define FROM_DW3_DATA_TOGGLE(x) (((x) >> 25) & 0x1)
-#define TO_DW3_PING(x) ((x) << 26)
-#define FROM_DW3_PING(x) (((x) >> 26) & 0x1)
-#define DW3_ERROR_BIT (1 << 28)
-#define DW3_BABBLE_BIT (1 << 29)
-#define DW3_HALT_BIT (1 << 30)
-#define DW3_ACTIVE_BIT (1 << 31)
-#define FROM_DW3_ACTIVE(x) (((x) >> 31) & 0x01)
-
-#define INT_UNDERRUN (1 << 2)
-#define INT_BABBLE (1 << 1)
-#define INT_EXACT (1 << 0)
-
-#define SETUP_PID (2)
-#define IN_PID (1)
-#define OUT_PID (0)
-
-/* Errata 1 */
-#define RL_COUNTER (0)
-#define NAK_COUNTER (0)
-#define ERR_COUNTER (2)
-
-#endif /* _ISP1760_HCD_H_ */
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
deleted file mode 100644
index 09254a43bc01..000000000000
--- a/drivers/usb/host/isp1760-if.c
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Glue code for the ISP1760 driver and bus
- * Currently there is support for
- * - OpenFirmware
- * - PCI
- * - PDEV (generic platform device centralized driver model)
- *
- * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
- *
- */
-
-#include <linux/usb.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/usb/isp1760.h>
-#include <linux/usb/hcd.h>
-
-#include "isp1760-hcd.h"
-
-#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
-#endif
-
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#endif
-
-#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
-struct isp1760 {
- struct usb_hcd *hcd;
- int rst_gpio;
-};
-
-static int of_isp1760_probe(struct platform_device *dev)
-{
- struct isp1760 *drvdata;
- struct device_node *dp = dev->dev.of_node;
- struct resource *res;
- struct resource memory;
- int virq;
- resource_size_t res_len;
- int ret;
- unsigned int devflags = 0;
- enum of_gpio_flags gpio_flags;
- u32 bus_width = 0;
-
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- ret = of_address_to_resource(dp, 0, &memory);
- if (ret) {
- ret = -ENXIO;
- goto free_data;
- }
-
- res_len = resource_size(&memory);
-
- res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
- if (!res) {
- ret = -EBUSY;
- goto free_data;
- }
-
- virq = irq_of_parse_and_map(dp, 0);
- if (!virq) {
- ret = -ENODEV;
- goto release_reg;
- }
-
- if (of_device_is_compatible(dp, "nxp,usb-isp1761"))
- devflags |= ISP1760_FLAG_ISP1761;
-
- /* Some systems wire up only 16 of the 32 data lines */
- of_property_read_u32(dp, "bus-width", &bus_width);
- if (bus_width == 16)
- devflags |= ISP1760_FLAG_BUS_WIDTH_16;
-
- if (of_get_property(dp, "port1-otg", NULL) != NULL)
- devflags |= ISP1760_FLAG_OTG_EN;
-
- if (of_get_property(dp, "analog-oc", NULL) != NULL)
- devflags |= ISP1760_FLAG_ANALOG_OC;
-
- if (of_get_property(dp, "dack-polarity", NULL) != NULL)
- devflags |= ISP1760_FLAG_DACK_POL_HIGH;
-
- if (of_get_property(dp, "dreq-polarity", NULL) != NULL)
- devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
-
- drvdata->rst_gpio = of_get_gpio_flags(dp, 0, &gpio_flags);
- if (gpio_is_valid(drvdata->rst_gpio)) {
- ret = gpio_request(drvdata->rst_gpio, dev_name(&dev->dev));
- if (!ret) {
- if (!(gpio_flags & OF_GPIO_ACTIVE_LOW)) {
- devflags |= ISP1760_FLAG_RESET_ACTIVE_HIGH;
- gpio_direction_output(drvdata->rst_gpio, 0);
- } else {
- gpio_direction_output(drvdata->rst_gpio, 1);
- }
- } else {
- drvdata->rst_gpio = ret;
- }
- }
-
- drvdata->hcd = isp1760_register(memory.start, res_len, virq,
- IRQF_SHARED, drvdata->rst_gpio,
- &dev->dev, dev_name(&dev->dev),
- devflags);
- if (IS_ERR(drvdata->hcd)) {
- ret = PTR_ERR(drvdata->hcd);
- goto free_gpio;
- }
-
- platform_set_drvdata(dev, drvdata);
- return ret;
-
-free_gpio:
- if (gpio_is_valid(drvdata->rst_gpio))
- gpio_free(drvdata->rst_gpio);
-release_reg:
- release_mem_region(memory.start, res_len);
-free_data:
- kfree(drvdata);
- return ret;
-}
-
-static int of_isp1760_remove(struct platform_device *dev)
-{
- struct isp1760 *drvdata = platform_get_drvdata(dev);
-
- usb_remove_hcd(drvdata->hcd);
- iounmap(drvdata->hcd->regs);
- release_mem_region(drvdata->hcd->rsrc_start, drvdata->hcd->rsrc_len);
- usb_put_hcd(drvdata->hcd);
-
- if (gpio_is_valid(drvdata->rst_gpio))
- gpio_free(drvdata->rst_gpio);
-
- kfree(drvdata);
- return 0;
-}
-
-static const struct of_device_id of_isp1760_match[] = {
- {
- .compatible = "nxp,usb-isp1760",
- },
- {
- .compatible = "nxp,usb-isp1761",
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, of_isp1760_match);
-
-static struct platform_driver isp1760_of_driver = {
- .driver = {
- .name = "nxp-isp1760",
- .of_match_table = of_isp1760_match,
- },
- .probe = of_isp1760_probe,
- .remove = of_isp1760_remove,
-};
-#endif
-
-#ifdef CONFIG_PCI
-static int isp1761_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- u8 latency, limit;
- __u32 reg_data;
- int retry_count;
- struct usb_hcd *hcd;
- unsigned int devflags = 0;
- int ret_status = 0;
-
- resource_size_t pci_mem_phy0;
- resource_size_t memlength;
-
- u8 __iomem *chip_addr;
- u8 __iomem *iobase;
- resource_size_t nxp_pci_io_base;
- resource_size_t iolength;
-
- if (usb_disabled())
- return -ENODEV;
-
- if (pci_enable_device(dev) < 0)
- return -ENODEV;
-
- if (!dev->irq)
- return -ENODEV;
-
- /* Grab the PLX PCI mem maped port start address we need */
- nxp_pci_io_base = pci_resource_start(dev, 0);
- iolength = pci_resource_len(dev, 0);
-
- if (!request_mem_region(nxp_pci_io_base, iolength, "ISP1761 IO MEM")) {
- printk(KERN_ERR "request region #1\n");
- return -EBUSY;
- }
-
- iobase = ioremap_nocache(nxp_pci_io_base, iolength);
- if (!iobase) {
- printk(KERN_ERR "ioremap #1\n");
- ret_status = -ENOMEM;
- goto cleanup1;
- }
- /* Grab the PLX PCI shared memory of the ISP 1761 we need */
- pci_mem_phy0 = pci_resource_start(dev, 3);
- memlength = pci_resource_len(dev, 3);
- if (memlength < 0xffff) {
- printk(KERN_ERR "memory length for this resource is wrong\n");
- ret_status = -ENOMEM;
- goto cleanup2;
- }
-
- if (!request_mem_region(pci_mem_phy0, memlength, "ISP-PCI")) {
- printk(KERN_ERR "host controller already in use\n");
- ret_status = -EBUSY;
- goto cleanup2;
- }
-
- /* map available memory */
- chip_addr = ioremap_nocache(pci_mem_phy0,memlength);
- if (!chip_addr) {
- printk(KERN_ERR "Error ioremap failed\n");
- ret_status = -ENOMEM;
- goto cleanup3;
- }
-
- /* bad pci latencies can contribute to overruns */
- pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency);
- if (latency) {
- pci_read_config_byte(dev, PCI_MAX_LAT, &limit);
- if (limit && limit < latency)
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit);
- }
-
- /* Try to check whether we can access Scratch Register of
- * Host Controller or not. The initial PCI access is retried until
- * local init for the PCI bridge is completed
- */
- retry_count = 20;
- reg_data = 0;
- while ((reg_data != 0xFACE) && retry_count) {
- /*by default host is in 16bit mode, so
- * io operations at this stage must be 16 bit
- * */
- writel(0xface, chip_addr + HC_SCRATCH_REG);
- udelay(100);
- reg_data = readl(chip_addr + HC_SCRATCH_REG) & 0x0000ffff;
- retry_count--;
- }
-
- iounmap(chip_addr);
-
- /* Host Controller presence is detected by writing to scratch register
- * and reading back and checking the contents are same or not
- */
- if (reg_data != 0xFACE) {
- dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
- ret_status = -ENOMEM;
- goto cleanup3;
- }
-
- pci_set_master(dev);
-
- /* configure PLX PCI chip to pass interrupts */
-#define PLX_INT_CSR_REG 0x68
- reg_data = readl(iobase + PLX_INT_CSR_REG);
- reg_data |= 0x900;
- writel(reg_data, iobase + PLX_INT_CSR_REG);
-
- dev->dev.dma_mask = NULL;
- hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
- IRQF_SHARED, -ENOENT, &dev->dev, dev_name(&dev->dev),
- devflags);
- if (IS_ERR(hcd)) {
- ret_status = -ENODEV;
- goto cleanup3;
- }
-
- /* done with PLX IO access */
- iounmap(iobase);
- release_mem_region(nxp_pci_io_base, iolength);
-
- pci_set_drvdata(dev, hcd);
- return 0;
-
-cleanup3:
- release_mem_region(pci_mem_phy0, memlength);
-cleanup2:
- iounmap(iobase);
-cleanup1:
- release_mem_region(nxp_pci_io_base, iolength);
- return ret_status;
-}
-
-static void isp1761_pci_remove(struct pci_dev *dev)
-{
- struct usb_hcd *hcd;
-
- hcd = pci_get_drvdata(dev);
-
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-
- pci_disable_device(dev);
-}
-
-static void isp1761_pci_shutdown(struct pci_dev *dev)
-{
- printk(KERN_ERR "ips1761_pci_shutdown\n");
-}
-
-static const struct pci_device_id isp1760_plx [] = {
- {
- .class = PCI_CLASS_BRIDGE_OTHER << 8,
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_PLX,
- .device = 0x5406,
- .subvendor = PCI_VENDOR_ID_PLX,
- .subdevice = 0x9054,
- },
- { }
-};
-MODULE_DEVICE_TABLE(pci, isp1760_plx);
-
-static struct pci_driver isp1761_pci_driver = {
- .name = "isp1760",
- .id_table = isp1760_plx,
- .probe = isp1761_pci_probe,
- .remove = isp1761_pci_remove,
- .shutdown = isp1761_pci_shutdown,
-};
-#endif
-
-static int isp1760_plat_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct usb_hcd *hcd;
- struct resource *mem_res;
- struct resource *irq_res;
- resource_size_t mem_size;
- struct isp1760_platform_data *priv = dev_get_platdata(&pdev->dev);
- unsigned int devflags = 0;
- unsigned long irqflags = IRQF_SHARED;
-
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- pr_warning("isp1760: Memory resource not available\n");
- ret = -ENODEV;
- goto out;
- }
- mem_size = resource_size(mem_res);
- if (!request_mem_region(mem_res->start, mem_size, "isp1760")) {
- pr_warning("isp1760: Cannot reserve the memory resource\n");
- ret = -EBUSY;
- goto out;
- }
-
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- pr_warning("isp1760: IRQ resource not available\n");
- ret = -ENODEV;
- goto cleanup;
- }
-
- irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
-
- if (priv) {
- if (priv->is_isp1761)
- devflags |= ISP1760_FLAG_ISP1761;
- if (priv->bus_width_16)
- devflags |= ISP1760_FLAG_BUS_WIDTH_16;
- if (priv->port1_otg)
- devflags |= ISP1760_FLAG_OTG_EN;
- if (priv->analog_oc)
- devflags |= ISP1760_FLAG_ANALOG_OC;
- if (priv->dack_polarity_high)
- devflags |= ISP1760_FLAG_DACK_POL_HIGH;
- if (priv->dreq_polarity_high)
- devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
- }
-
- hcd = isp1760_register(mem_res->start, mem_size, irq_res->start,
- irqflags, -ENOENT,
- &pdev->dev, dev_name(&pdev->dev), devflags);
-
- platform_set_drvdata(pdev, hcd);
-
- if (IS_ERR(hcd)) {
- pr_warning("isp1760: Failed to register the HCD device\n");
- ret = -ENODEV;
- goto cleanup;
- }
-
- pr_info("ISP1760 USB device initialised\n");
- return ret;
-
-cleanup:
- release_mem_region(mem_res->start, mem_size);
-out:
- return ret;
-}
-
-static int isp1760_plat_remove(struct platform_device *pdev)
-{
- struct resource *mem_res;
- resource_size_t mem_size;
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
-
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem_size = resource_size(mem_res);
- release_mem_region(mem_res->start, mem_size);
-
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static struct platform_driver isp1760_plat_driver = {
- .probe = isp1760_plat_probe,
- .remove = isp1760_plat_remove,
- .driver = {
- .name = "isp1760",
- },
-};
-
-static int __init isp1760_init(void)
-{
- int ret, any_ret = -ENODEV;
-
- init_kmem_once();
-
- ret = platform_driver_register(&isp1760_plat_driver);
- if (!ret)
- any_ret = 0;
-#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
- ret = platform_driver_register(&isp1760_of_driver);
- if (!ret)
- any_ret = 0;
-#endif
-#ifdef CONFIG_PCI
- ret = pci_register_driver(&isp1761_pci_driver);
- if (!ret)
- any_ret = 0;
-#endif
-
- if (any_ret)
- deinit_kmem_cache();
- return any_ret;
-}
-module_init(isp1760_init);
-
-static void __exit isp1760_exit(void)
-{
- platform_driver_unregister(&isp1760_plat_driver);
-#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
- platform_driver_unregister(&isp1760_of_driver);
-#endif
-#ifdef CONFIG_PCI
- pci_unregister_driver(&isp1761_pci_driver);
-#endif
- deinit_kmem_cache();
-}
-module_exit(isp1760_exit);
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 6234c75da33f..a98833cbfcf3 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -55,6 +55,7 @@
* single thread (max3421_spi_thread).
*/
+#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/usb.h>
@@ -1291,7 +1292,7 @@ max3421_handle_irqs(struct usb_hcd *hcd)
char sbuf[16 * 16], *dp, *end;
int i;
- if (jiffies - last_time > 5*HZ) {
+ if (time_after(jiffies, last_time + 5*HZ)) {
dp = sbuf;
end = sbuf + sizeof(sbuf);
*dp = '\0';
@@ -1660,7 +1661,8 @@ hub_descriptor(struct usb_hub_descriptor *desc)
*/
desc->bDescriptorType = 0x29; /* hub descriptor */
desc->bDescLength = 9;
- desc->wHubCharacteristics = cpu_to_le16(0x0001);
+ desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM |
+ HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index dc9e4e61f1c8..7cce85a1f7dc 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -33,7 +33,17 @@
for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++)
/* interface, function and usb clocks; sometimes also an AHB clock */
-static struct clk *iclk, *fclk, *uclk, *hclk;
+#define hcd_to_ohci_at91_priv(h) \
+ ((struct ohci_at91_priv *)hcd_to_ohci(h)->priv)
+
+struct ohci_at91_priv {
+ struct clk *iclk;
+ struct clk *fclk;
+ struct clk *uclk;
+ struct clk *hclk;
+ bool clocked;
+ bool wakeup; /* Saved wake-up state for resume */
+};
/* interface and function clocks; sometimes also an AHB clock */
#define DRIVER_DESC "OHCI Atmel driver"
@@ -41,45 +51,53 @@ static struct clk *iclk, *fclk, *uclk, *hclk;
static const char hcd_name[] = "ohci-atmel";
static struct hc_driver __read_mostly ohci_at91_hc_driver;
-static int clocked;
+
+static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst = {
+ .extra_priv_size = sizeof(struct ohci_at91_priv),
+};
extern int usb_disabled(void);
/*-------------------------------------------------------------------------*/
-static void at91_start_clock(void)
+static void at91_start_clock(struct ohci_at91_priv *ohci_at91)
{
+ if (ohci_at91->clocked)
+ return;
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
- clk_set_rate(uclk, 48000000);
- clk_prepare_enable(uclk);
+ clk_set_rate(ohci_at91->uclk, 48000000);
+ clk_prepare_enable(ohci_at91->uclk);
}
- clk_prepare_enable(hclk);
- clk_prepare_enable(iclk);
- clk_prepare_enable(fclk);
- clocked = 1;
+ clk_prepare_enable(ohci_at91->hclk);
+ clk_prepare_enable(ohci_at91->iclk);
+ clk_prepare_enable(ohci_at91->fclk);
+ ohci_at91->clocked = true;
}
-static void at91_stop_clock(void)
+static void at91_stop_clock(struct ohci_at91_priv *ohci_at91)
{
- clk_disable_unprepare(fclk);
- clk_disable_unprepare(iclk);
- clk_disable_unprepare(hclk);
+ if (!ohci_at91->clocked)
+ return;
+ clk_disable_unprepare(ohci_at91->fclk);
+ clk_disable_unprepare(ohci_at91->iclk);
+ clk_disable_unprepare(ohci_at91->hclk);
if (IS_ENABLED(CONFIG_COMMON_CLK))
- clk_disable_unprepare(uclk);
- clocked = 0;
+ clk_disable_unprepare(ohci_at91->uclk);
+ ohci_at91->clocked = false;
}
static void at91_start_hc(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_regs __iomem *regs = hcd->regs;
+ struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
dev_dbg(&pdev->dev, "start\n");
/*
* Start the USB clocks.
*/
- at91_start_clock();
+ at91_start_clock(ohci_at91);
/*
* The USB host controller must remain in reset.
@@ -91,6 +109,7 @@ static void at91_stop_hc(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_regs __iomem *regs = hcd->regs;
+ struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
dev_dbg(&pdev->dev, "stop\n");
@@ -102,7 +121,7 @@ static void at91_stop_hc(struct platform_device *pdev)
/*
* Stop the USB clocks.
*/
- at91_stop_clock();
+ at91_stop_clock(ohci_at91);
}
@@ -128,7 +147,8 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct at91_usbh_data *board;
struct ohci_hcd *ohci;
int retval;
- struct usb_hcd *hcd = NULL;
+ struct usb_hcd *hcd;
+ struct ohci_at91_priv *ohci_at91;
struct device *dev = &pdev->dev;
struct resource *res;
int irq;
@@ -142,6 +162,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
hcd = usb_create_hcd(driver, dev, "at91");
if (!hcd)
return -ENOMEM;
+ ohci_at91 = hcd_to_ohci_at91_priv(hcd);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(dev, res);
@@ -152,29 +173,29 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- iclk = devm_clk_get(dev, "ohci_clk");
- if (IS_ERR(iclk)) {
+ ohci_at91->iclk = devm_clk_get(dev, "ohci_clk");
+ if (IS_ERR(ohci_at91->iclk)) {
dev_err(dev, "failed to get ohci_clk\n");
- retval = PTR_ERR(iclk);
+ retval = PTR_ERR(ohci_at91->iclk);
goto err;
}
- fclk = devm_clk_get(dev, "uhpck");
- if (IS_ERR(fclk)) {
+ ohci_at91->fclk = devm_clk_get(dev, "uhpck");
+ if (IS_ERR(ohci_at91->fclk)) {
dev_err(dev, "failed to get uhpck\n");
- retval = PTR_ERR(fclk);
+ retval = PTR_ERR(ohci_at91->fclk);
goto err;
}
- hclk = devm_clk_get(dev, "hclk");
- if (IS_ERR(hclk)) {
+ ohci_at91->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(ohci_at91->hclk)) {
dev_err(dev, "failed to get hclk\n");
- retval = PTR_ERR(hclk);
+ retval = PTR_ERR(ohci_at91->hclk);
goto err;
}
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
- uclk = devm_clk_get(dev, "usb_clk");
- if (IS_ERR(uclk)) {
+ ohci_at91->uclk = devm_clk_get(dev, "usb_clk");
+ if (IS_ERR(ohci_at91->uclk)) {
dev_err(dev, "failed to get uclk\n");
- retval = PTR_ERR(uclk);
+ retval = PTR_ERR(ohci_at91->uclk);
goto err;
}
}
@@ -347,11 +368,13 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
*/
desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
- desc->wHubCharacteristics |= cpu_to_le16(0x0001);
+ desc->wHubCharacteristics |=
+ cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM);
if (pdata->overcurrent_supported) {
desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
- desc->wHubCharacteristics |= cpu_to_le16(0x0008|0x0001);
+ desc->wHubCharacteristics |=
+ cpu_to_le16(HUB_CHAR_INDV_PORT_OCPM);
}
dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
@@ -593,19 +616,27 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int
-ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
+ohci_hcd_at91_drv_suspend(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- bool do_wakeup = device_may_wakeup(&pdev->dev);
+ struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
int ret;
- if (do_wakeup)
+ /*
+ * Disable wakeup if we are going to sleep with slow clock mode
+ * enabled.
+ */
+ ohci_at91->wakeup = device_may_wakeup(dev)
+ && !at91_suspend_entering_slow_clock();
+
+ if (ohci_at91->wakeup)
enable_irq_wake(hcd->irq);
- ret = ohci_suspend(hcd, do_wakeup);
+ ret = ohci_suspend(hcd, ohci_at91->wakeup);
if (ret) {
- disable_irq_wake(hcd->irq);
+ if (ohci_at91->wakeup)
+ disable_irq_wake(hcd->irq);
return ret;
}
/*
@@ -615,7 +646,7 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
*
* REVISIT: some boards will be able to turn VBUS off...
*/
- if (at91_suspend_entering_slow_clock()) {
+ if (!ohci_at91->wakeup) {
ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
@@ -623,38 +654,37 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
- at91_stop_clock();
+ at91_stop_clock(ohci_at91);
}
return ret;
}
-static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
+static int ohci_hcd_at91_drv_resume(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
- if (device_may_wakeup(&pdev->dev))
+ if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
- if (!clocked)
- at91_start_clock();
+ at91_start_clock(ohci_at91);
ohci_resume(hcd, false);
return 0;
}
-#else
-#define ohci_hcd_at91_drv_suspend NULL
-#define ohci_hcd_at91_drv_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ohci_hcd_at91_pm_ops, ohci_hcd_at91_drv_suspend,
+ ohci_hcd_at91_drv_resume);
+
static struct platform_driver ohci_hcd_at91_driver = {
.probe = ohci_hcd_at91_drv_probe,
.remove = ohci_hcd_at91_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
- .suspend = ohci_hcd_at91_drv_suspend,
- .resume = ohci_hcd_at91_drv_resume,
.driver = {
.name = "at91_ohci",
+ .pm = &ohci_hcd_at91_pm_ops,
.of_match_table = of_match_ptr(at91_ohci_dt_ids),
},
};
@@ -665,7 +695,7 @@ static int __init ohci_at91_init(void)
return -ENODEV;
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
- ohci_init_driver(&ohci_at91_hc_driver, NULL);
+ ohci_init_driver(&ohci_at91_hc_driver, &ohci_at91_drv_overrides);
/*
* The Atmel HW has some unusual quirks, which require Atmel-specific
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 1c76999b2184..e5c33bc98ea4 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -431,7 +431,6 @@ static struct platform_driver ohci_hcd_da8xx_driver = {
.resume = ohci_da8xx_resume,
#endif
.driver = {
- .owner = THIS_MODULE,
.name = "ohci",
},
};
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 0aa17c937115..fe2aedd8a54d 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -544,15 +544,15 @@ ohci_hub_descriptor (
temp = 1 + (ohci->num_ports / 8);
desc->bDescLength = 7 + 2 * temp;
- temp = 0;
+ temp = HUB_CHAR_COMMON_LPSM | HUB_CHAR_COMMON_OCPM;
if (rh & RH_A_NPS) /* no power switching? */
- temp |= 0x0002;
+ temp |= HUB_CHAR_NO_LPSM;
if (rh & RH_A_PSM) /* per-port power switching? */
- temp |= 0x0001;
+ temp |= HUB_CHAR_INDV_PORT_LPSM;
if (rh & RH_A_NOCP) /* no overcurrent reporting? */
- temp |= 0x0010;
+ temp |= HUB_CHAR_NO_OCPM;
else if (rh & RH_A_OCPM) /* per-port overcurrent reporting? */
- temp |= 0x0008;
+ temp |= HUB_CHAR_INDV_PORT_OCPM;
desc->wHubCharacteristics = cpu_to_le16(temp);
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index 8ddd8f5470cb..4db78f169256 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -239,7 +239,6 @@ static struct platform_driver ohci_hcd_jz4740_driver = {
.remove = jz4740_ohci_remove,
.driver = {
.name = "jz4740-ohci",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index b81d202b15a2..185ceee52d47 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -38,7 +38,8 @@
struct ohci_platform_priv {
struct clk *clks[OHCI_MAX_CLKS];
struct reset_control *rst;
- struct phy *phy;
+ struct phy **phys;
+ int num_phys;
};
static const char hcd_name[] = "ohci-platform";
@@ -47,7 +48,7 @@ static int ohci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
- int clk, ret;
+ int clk, ret, phy_num;
for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++) {
ret = clk_prepare_enable(priv->clks[clk]);
@@ -55,20 +56,28 @@ static int ohci_platform_power_on(struct platform_device *dev)
goto err_disable_clks;
}
- if (priv->phy) {
- ret = phy_init(priv->phy);
- if (ret)
- goto err_disable_clks;
-
- ret = phy_power_on(priv->phy);
- if (ret)
- goto err_exit_phy;
+ for (phy_num = 0; phy_num < priv->num_phys; phy_num++) {
+ if (priv->phys[phy_num]) {
+ ret = phy_init(priv->phys[phy_num]);
+ if (ret)
+ goto err_exit_phy;
+ ret = phy_power_on(priv->phys[phy_num]);
+ if (ret) {
+ phy_exit(priv->phys[phy_num]);
+ goto err_exit_phy;
+ }
+ }
}
return 0;
err_exit_phy:
- phy_exit(priv->phy);
+ while (--phy_num >= 0) {
+ if (priv->phys[phy_num]) {
+ phy_power_off(priv->phys[phy_num]);
+ phy_exit(priv->phys[phy_num]);
+ }
+ }
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(priv->clks[clk]);
@@ -80,11 +89,13 @@ static void ohci_platform_power_off(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
- int clk;
+ int clk, phy_num;
- if (priv->phy) {
- phy_power_off(priv->phy);
- phy_exit(priv->phy);
+ for (phy_num = 0; phy_num < priv->num_phys; phy_num++) {
+ if (priv->phys[phy_num]) {
+ phy_power_off(priv->phys[phy_num]);
+ phy_exit(priv->phys[phy_num]);
+ }
}
for (clk = OHCI_MAX_CLKS - 1; clk >= 0; clk--)
@@ -112,7 +123,8 @@ static int ohci_platform_probe(struct platform_device *dev)
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv;
struct ohci_hcd *ohci;
- int err, irq, clk = 0;
+ const char *phy_name;
+ int err, irq, phy_num, clk = 0;
if (usb_disabled())
return -ENODEV;
@@ -160,12 +172,38 @@ static int ohci_platform_probe(struct platform_device *dev)
of_property_read_u32(dev->dev.of_node, "num-ports",
&ohci->num_ports);
- priv->phy = devm_phy_get(&dev->dev, "usb");
- if (IS_ERR(priv->phy)) {
- err = PTR_ERR(priv->phy);
- if (err == -EPROBE_DEFER)
- goto err_put_hcd;
- priv->phy = NULL;
+ priv->num_phys = of_count_phandle_with_args(dev->dev.of_node,
+ "phys", "#phy-cells");
+ priv->num_phys = priv->num_phys > 0 ? priv->num_phys : 1;
+
+ priv->phys = devm_kcalloc(&dev->dev, priv->num_phys,
+ sizeof(struct phy *), GFP_KERNEL);
+ if (!priv->phys)
+ return -ENOMEM;
+
+ for (phy_num = 0; phy_num < priv->num_phys; phy_num++) {
+ err = of_property_read_string_index(
+ dev->dev.of_node,
+ "phy-names", phy_num,
+ &phy_name);
+
+ if (err < 0) {
+ if (priv->num_phys > 1) {
+ dev_err(&dev->dev, "phy-names not provided");
+ goto err_put_hcd;
+ } else
+ phy_name = "usb";
+ }
+
+ priv->phys[phy_num] = devm_phy_get(&dev->dev,
+ phy_name);
+ if (IS_ERR(priv->phys[phy_num])) {
+ err = PTR_ERR(priv->phys[phy_num]);
+ if ((priv->num_phys > 1) ||
+ (err == -EPROBE_DEFER))
+ goto err_put_hcd;
+ priv->phys[phy_num] = NULL;
+ }
}
for (clk = 0; clk < OHCI_MAX_CLKS; clk++) {
@@ -328,6 +366,7 @@ static int ohci_platform_resume(struct device *dev)
static const struct of_device_id ohci_platform_ids[] = {
{ .compatible = "generic-ohci", },
+ { .compatible = "cavium,octeon-6335-ohci", },
{ }
};
MODULE_DEVICE_TABLE(of, ohci_platform_ids);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 965e3e9e688a..4f87a5c61b08 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -229,7 +229,6 @@ static struct platform_driver ohci_hcd_ppc_of_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ohci",
- .owner = THIS_MODULE,
.of_match_table = ohci_hcd_ppc_of_match,
},
};
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 095113ea1fcb..7a1919ca543a 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -249,14 +249,14 @@ static int ohci_s3c2410_hub_control(
*/
desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
- desc->wHubCharacteristics |= cpu_to_le16(0x0001);
+ desc->wHubCharacteristics |= cpu_to_le16(
+ HUB_CHAR_INDV_PORT_LPSM);
if (info->enable_oc) {
desc->wHubCharacteristics &= ~cpu_to_le16(
HUB_CHAR_OCPM);
desc->wHubCharacteristics |= cpu_to_le16(
- 0x0008 |
- 0x0001);
+ HUB_CHAR_INDV_PORT_OCPM);
}
dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 4e81c804c73e..a8b8d8b8d9f3 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -265,7 +265,6 @@ static struct platform_driver ohci_hcd_sm501_driver = {
.suspend = ohci_sm501_suspend,
.resume = ohci_sm501_resume,
.driver = {
- .owner = THIS_MODULE,
.name = "sm501-usb",
},
};
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
index bef6dfb0405a..e1b208da460a 100644
--- a/drivers/usb/host/ohci-tilegx.c
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -199,7 +199,6 @@ static struct platform_driver ohci_hcd_tilegx_driver = {
.shutdown = ohci_hcd_tilegx_drv_shutdown,
.driver = {
.name = "tilegx-ohci",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index bb409588d39c..e9a6eec39142 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -368,6 +368,5 @@ static struct platform_driver ohci_hcd_tmio_driver = {
.resume = ohci_hcd_tmio_drv_resume,
.driver = {
.name = "tmio-ohci",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 036924e640f5..ef7efb278b15 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -457,11 +457,11 @@ static void ehci_hub_descriptor(struct oxu_hcd *oxu,
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
- temp = 0x0008; /* per-port overcurrent reporting */
+ temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
if (HCS_PPC(oxu->hcs_params))
- temp |= 0x0001; /* per-port power control */
+ temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
else
- temp |= 0x0002; /* no power switching */
+ temp |= HUB_CHAR_NO_LPSM; /* no power switching */
desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
}
@@ -2598,9 +2598,7 @@ static int oxu_hcd_init(struct usb_hcd *hcd)
spin_lock_init(&oxu->lock);
- init_timer(&oxu->watchdog);
- oxu->watchdog.function = oxu_watchdog;
- oxu->watchdog.data = (unsigned long) oxu;
+ setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
/*
* hw default: 1K periodic list heads, one per frame.
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index dd483c13565b..f9400564cb72 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -567,7 +567,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
u32 control;
- u32 fminterval;
+ u32 fminterval = 0;
+ bool no_fminterval = false;
int cnt;
if (!mmio_resource_enabled(pdev, 0))
@@ -577,6 +578,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
if (base == NULL)
return;
+ /*
+ * ULi M5237 OHCI controller locks the whole system when accessing
+ * the OHCI_FMINTERVAL offset.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
+ no_fminterval = true;
+
control = readl(base + OHCI_CONTROL);
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
@@ -595,9 +603,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
msleep(10);
}
if (wait_time <= 0)
- dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
- " (BIOS bug?) %08x\n",
- readl(base + OHCI_CONTROL));
+ dev_warn(&pdev->dev,
+ "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
+ readl(base + OHCI_CONTROL));
}
#endif
@@ -615,7 +623,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
}
/* software reset of the controller, preserving HcFmInterval */
- fminterval = readl(base + OHCI_FMINTERVAL);
+ if (!no_fminterval)
+ fminterval = readl(base + OHCI_FMINTERVAL);
+
writel(OHCI_HCR, base + OHCI_CMDSTATUS);
/* reset requires max 10 us delay */
@@ -624,7 +634,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
break;
udelay(1);
}
- writel(fminterval, base + OHCI_FMINTERVAL);
+
+ if (!no_fminterval)
+ writel(fminterval, base + OHCI_FMINTERVAL);
/* Now the controller is safely in SUSPEND and nothing can wake it up */
iounmap(base);
@@ -721,8 +733,9 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
* and hope nothing goes too wrong
*/
if (try_handoff)
- dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
- " (BIOS bug?) %08x\n", cap);
+ dev_warn(&pdev->dev,
+ "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
+ cap);
pci_write_config_byte(pdev, offset + 2, 0);
}
@@ -769,8 +782,9 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
case 0: /* Illegal reserved cap, set cap=0 so we exit */
cap = 0; /* then fallthrough... */
default:
- dev_warn(&pdev->dev, "EHCI: unrecognized capability "
- "%02x\n", cap & 0xff);
+ dev_warn(&pdev->dev,
+ "EHCI: unrecognized capability %02x\n",
+ cap & 0xff);
}
offset = (cap >> 8) & 0xff;
}
@@ -881,8 +895,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
*/
if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
dev_warn(&xhci_pdev->dev,
- "CONFIG_USB_XHCI_HCD is turned off, "
- "defaulting to EHCI.\n");
+ "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
dev_warn(&xhci_pdev->dev,
"USB 3.0 devices will work at USB 2.0 speeds.\n");
usb_disable_xhci_ports(xhci_pdev);
@@ -907,8 +920,9 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
&ports_available);
- dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
- "under xHCI: 0x%x\n", ports_available);
+ dev_dbg(&xhci_pdev->dev,
+ "USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
+ ports_available);
/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
* Indicate the USB 2.0 ports to be controlled by the xHCI host.
@@ -929,8 +943,9 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
&ports_available);
- dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
- "to xHCI: 0x%x\n", ports_available);
+ dev_dbg(&xhci_pdev->dev,
+ "USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
+ ports_available);
}
EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
@@ -998,8 +1013,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
/* Assume a buggy BIOS and take HC ownership anyway */
if (timeout) {
- dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
- " (BIOS bug ?) %08x\n", val);
+ dev_warn(&pdev->dev,
+ "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
+ val);
writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
}
}
@@ -1027,8 +1043,8 @@ hc_init:
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
dev_warn(&pdev->dev,
- "xHCI HW not ready after 5 sec (HC bug?) "
- "status = 0x%x\n", val);
+ "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
+ val);
}
/* Send the halt and disable interrupts command */
@@ -1042,8 +1058,8 @@ hc_init:
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
dev_warn(&pdev->dev,
- "xHCI HW did not halt within %d usec "
- "status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
+ "xHCI HW did not halt within %d usec status = 0x%x\n",
+ XHCI_MAX_HALT_USEC, val);
}
iounmap(base);
@@ -1063,8 +1079,8 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
return;
if (pci_enable_device(pdev) < 0) {
- dev_warn(&pdev->dev, "Can't enable PCI device, "
- "BIOS handoff failed.\n");
+ dev_warn(&pdev->dev,
+ "Can't enable PCI device, BIOS handoff failed.\n");
return;
}
if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index c4bcfaedeec9..bdc82fea0a1f 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2141,7 +2141,8 @@ static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597,
desc->bNbrPorts = r8a66597->max_root_hub;
desc->bDescLength = 9;
desc->bPwrOn2PwrGood = 0;
- desc->wHubCharacteristics = cpu_to_le16(0x0011);
+ desc->wHubCharacteristics =
+ cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_NO_OCPM);
desc->u.hs.DeviceRemovable[0] =
((1 << r8a66597->max_root_hub) - 1) << 1;
desc->u.hs.DeviceRemovable[1] = ~0;
@@ -2483,9 +2484,8 @@ static int r8a66597_probe(struct platform_device *pdev)
r8a66597->max_root_hub = 2;
spin_lock_init(&r8a66597->lock);
- init_timer(&r8a66597->rh_timer);
- r8a66597->rh_timer.function = r8a66597_timer;
- r8a66597->rh_timer.data = (unsigned long)r8a66597;
+ setup_timer(&r8a66597->rh_timer, r8a66597_timer,
+ (unsigned long)r8a66597);
r8a66597->reg = reg;
/* make sure no interrupts are pending */
@@ -2496,9 +2496,8 @@ static int r8a66597_probe(struct platform_device *pdev)
for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
- init_timer(&r8a66597->td_timer[i]);
- r8a66597->td_timer[i].function = r8a66597_td_timer;
- r8a66597->td_timer[i].data = (unsigned long)r8a66597;
+ setup_timer(&r8a66597->td_timer[i], r8a66597_td_timer,
+ (unsigned long)r8a66597);
setup_timer(&r8a66597->interval_timer[i],
r8a66597_interval_timer,
(unsigned long)r8a66597);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 25fb1da8d3d7..4f4ba1ea9e9b 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1103,12 +1103,12 @@ sl811h_hub_descriptor (
desc->bPwrOn2PwrGood = sl811->board->potpg;
if (!desc->bPwrOn2PwrGood)
desc->bPwrOn2PwrGood = 10;
- temp = 0x0001;
+ temp = HUB_CHAR_INDV_PORT_LPSM;
} else
- temp = 0x0002;
+ temp = HUB_CHAR_NO_LPSM;
/* no overcurrent errors detection/handling */
- temp |= 0x0010;
+ temp |= HUB_CHAR_NO_OCPM;
desc->wHubCharacteristics = cpu_to_le16(temp);
@@ -1691,9 +1691,7 @@ sl811h_probe(struct platform_device *dev)
spin_lock_init(&sl811->lock);
INIT_LIST_HEAD(&sl811->async);
sl811->board = dev_get_platdata(&dev->dev);
- init_timer(&sl811->timer);
- sl811->timer.function = sl811h_timer;
- sl811->timer.data = (unsigned long) sl811;
+ setup_timer(&sl811->timer, sl811h_timer, (unsigned long)sl811);
sl811->addr_reg = addr_reg;
sl811->data_reg = data_reg;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 2894e54e5b9c..ad97e8a1ad1c 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2590,15 +2590,15 @@ static int u132_roothub_descriptor(struct u132 *u132,
desc->bNbrPorts = u132->num_ports;
temp = 1 + (u132->num_ports / 8);
desc->bDescLength = 7 + 2 * temp;
- temp = 0;
+ temp = HUB_CHAR_COMMON_LPSM | HUB_CHAR_COMMON_OCPM;
if (rh_a & RH_A_NPS)
- temp |= 0x0002;
+ temp |= HUB_CHAR_NO_LPSM;
if (rh_a & RH_A_PSM)
- temp |= 0x0001;
+ temp |= HUB_CHAR_INDV_PORT_LPSM;
if (rh_a & RH_A_NOCP)
- temp |= 0x0010;
+ temp |= HUB_CHAR_NO_OCPM;
else if (rh_a & RH_A_OCPM)
- temp |= 0x0008;
+ temp |= HUB_CHAR_INDV_PORT_OCPM;
desc->wHubCharacteristics = cpu_to_le16(temp);
retval = u132_read_pcimem(u132, roothub.b, &rh_b);
if (retval)
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index 05f57ffdf9ab..0342991c9507 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -188,7 +188,6 @@ static struct platform_driver uhci_grlib_driver = {
.shutdown = uhci_hcd_grlib_shutdown,
.driver = {
.name = "grlib-uhci",
- .owner = THIS_MODULE,
.of_match_table = uhci_hcd_grlib_of_match,
},
};
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 93e17b12fb33..19ba5eafb31e 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -17,8 +17,9 @@ static const __u8 root_hub_hub_des[] =
0x09, /* __u8 bLength; */
0x29, /* __u8 bDescriptorType; Hub-descriptor */
0x02, /* __u8 bNbrPorts; */
- 0x0a, /* __u16 wHubCharacteristics; */
- 0x00, /* (per-port OC, no power switching) */
+ HUB_CHAR_NO_LPSM | /* __u16 wHubCharacteristics; */
+ HUB_CHAR_INDV_PORT_OCPM, /* (per-port OC, no power switching) */
+ 0x00,
0x01, /* __u8 bPwrOn2pwrGood; 2ms */
0x00, /* __u8 bHubContrCurrent; 0 mA */
0x00, /* __u8 DeviceRemovable; *** 7 Ports max */
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index cf8f46003f62..3a3e3eeba291 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -147,7 +147,6 @@ static struct platform_driver uhci_platform_driver = {
.shutdown = uhci_hcd_platform_shutdown,
.driver = {
.name = "platform-uhci",
- .owner = THIS_MODULE,
.of_match_table = platform_uhci_ids,
},
};
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index ba61dae9e4d2..774b89d28fae 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -86,17 +86,14 @@ static void qset_print(struct seq_file *s, struct whc_qset *qset)
static int di_print(struct seq_file *s, void *p)
{
struct whc *whc = s->private;
- char buf[72];
int d;
for (d = 0; d < whc->n_devices; d++) {
struct di_buf_entry *di = &whc->di_buf[d];
- bitmap_scnprintf(buf, sizeof(buf),
- (unsigned long *)di->availability_info, UWB_NUM_MAS);
-
seq_printf(s, "DI[%d]\n", d);
- seq_printf(s, " availability: %s\n", buf);
+ seq_printf(s, " availability: %*pb\n",
+ UWB_NUM_MAS, (unsigned long *)di->availability_info);
seq_printf(s, " %c%c key idx: %d dev addr: %d\n",
(di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
(di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index bb89175ca6e5..745717ec9c89 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -552,7 +552,7 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
if (ctx->type == XHCI_CTX_TYPE_INPUT) {
struct xhci_input_control_ctx *ctrl_ctx =
- xhci_get_input_control_ctx(xhci, ctx);
+ xhci_get_input_control_ctx(ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "Could not get input context, bad type.\n");
return;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 5cb3d7a10017..f8336408ef07 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -535,7 +535,7 @@ static void xhci_free_container_ctx(struct xhci_hcd *xhci,
kfree(ctx);
}
-struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
+struct xhci_input_control_ctx *xhci_get_input_control_ctx(
struct xhci_container_ctx *ctx)
{
if (ctx->type != XHCI_CTX_TYPE_INPUT)
@@ -784,8 +784,7 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
* Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
* not at the beginning of the ring).
*/
-void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
- struct xhci_ep_ctx *ep_ctx,
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep)
{
dma_addr_t addr;
@@ -833,9 +832,8 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
- init_timer(&ep->stop_cmd_timer);
- ep->stop_cmd_timer.data = (unsigned long) ep;
- ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
+ setup_timer(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
+ (unsigned long)ep);
ep->xhci = xhci;
}
@@ -1342,8 +1340,7 @@ static u32 xhci_get_endpoint_mult(struct usb_device *udev,
return ep->ss_ep_comp.bmAttributes;
}
-static u32 xhci_get_endpoint_type(struct usb_device *udev,
- struct usb_host_endpoint *ep)
+static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
{
int in;
u32 type;
@@ -1376,8 +1373,7 @@ static u32 xhci_get_endpoint_type(struct usb_device *udev,
* Basically, this is the maxpacket size, multiplied by the burst size
* and mult size.
*/
-static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
- struct usb_device *udev,
+static u32 xhci_get_max_esit_payload(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int max_burst;
@@ -1418,7 +1414,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
- endpoint_type = xhci_get_endpoint_type(udev, ep);
+ endpoint_type = xhci_get_endpoint_type(ep);
if (!endpoint_type)
return -EINVAL;
ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
@@ -1484,7 +1480,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
}
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
MAX_BURST(max_burst));
- max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
+ max_esit_payload = xhci_get_max_esit_payload(udev, ep);
ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
/*
@@ -1773,7 +1769,7 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
return command;
}
-void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
+void xhci_urb_free_priv(struct urb_priv *urb_priv)
{
if (urb_priv) {
kfree(urb_priv->td[0]);
@@ -1926,7 +1922,7 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
}
/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
+static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
{
struct {
dma_addr_t input_dma;
@@ -2452,7 +2448,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
flags);
if (!xhci->event_ring)
goto fail;
- if (xhci_check_trb_in_td_math(xhci, flags) < 0)
+ if (xhci_check_trb_in_td_math(xhci) < 0)
goto fail;
xhci->erst.entries = dma_alloc_coherent(dev,
@@ -2509,9 +2505,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_print_ir_set(xhci, 0);
/* init command timeout timer */
- init_timer(&xhci->cmd_timer);
- xhci->cmd_timer.data = (unsigned long) xhci;
- xhci->cmd_timer.function = xhci_handle_command_timeout;
+ setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
+ (unsigned long)xhci);
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 142b601f9563..7f76c8a12f89 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"must be suspended extra slowly",
pdev->revision);
}
+ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
/* Fresco Logic confirms: all revisions of this chip do not
* support MSI, even though some of them claim to in their PCI
* capabilities.
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e692e769c50c..88da8d629820 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -299,7 +299,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
* seconds), then it should assume that the there are
* larger problems with the xHC and assert HCRST.
*/
- ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
+ ret = xhci_handshake(&xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
if (ret < 0) {
xhci_err(xhci, "Stopped the command ring failed, "
@@ -609,7 +609,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
- xhci_urb_free_priv(xhci, urb_priv);
+ xhci_urb_free_priv(urb_priv);
spin_lock(&xhci->lock);
}
}
@@ -1110,7 +1110,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
* is not waiting on the configure endpoint command.
*/
virt_dev = xhci->devs[slot_id];
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "Could not get input context, bad type.\n");
return;
@@ -2354,8 +2354,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
status = 0;
break;
}
- xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
- "busted\n");
+ xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
+ trb_comp_code);
goto cleanup;
}
@@ -2497,7 +2497,7 @@ cleanup:
urb = td->urb;
urb_priv = urb->hcpriv;
- xhci_urb_free_priv(xhci, urb_priv);
+ xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
if ((urb->actual_length != urb->transfer_buffer_length &&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 01fcbb5eb06e..ec8ac1674854 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -60,8 +60,7 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*/
-int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
- u32 mask, u32 done, int usec)
+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
{
u32 result;
@@ -111,7 +110,7 @@ int xhci_halt(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
xhci_quiesce(xhci);
- ret = xhci_handshake(xhci, &xhci->op_regs->status,
+ ret = xhci_handshake(&xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
if (!ret) {
xhci->xhc_state |= XHCI_STATE_HALTED;
@@ -140,7 +139,7 @@ static int xhci_start(struct xhci_hcd *xhci)
* Wait for the HCHalted Status bit to be 0 to indicate the host is
* running.
*/
- ret = xhci_handshake(xhci, &xhci->op_regs->status,
+ ret = xhci_handshake(&xhci->op_regs->status,
STS_HALT, 0, XHCI_MAX_HALT_USEC);
if (ret == -ETIMEDOUT)
xhci_err(xhci, "Host took too long to start, "
@@ -175,7 +174,7 @@ int xhci_reset(struct xhci_hcd *xhci)
command |= CMD_RESET;
writel(command, &xhci->op_regs->command);
- ret = xhci_handshake(xhci, &xhci->op_regs->command,
+ ret = xhci_handshake(&xhci->op_regs->command,
CMD_RESET, 0, 10 * 1000 * 1000);
if (ret)
return ret;
@@ -186,7 +185,7 @@ int xhci_reset(struct xhci_hcd *xhci)
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
*/
- ret = xhci_handshake(xhci, &xhci->op_regs->status,
+ ret = xhci_handshake(&xhci->op_regs->status,
STS_CNR, 0, 10 * 1000 * 1000);
for (i = 0; i < 2; ++i) {
@@ -473,10 +472,8 @@ static void compliance_mode_recovery(unsigned long arg)
static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
{
xhci->port_status_u0 = 0;
- init_timer(&xhci->comp_mode_recovery_timer);
-
- xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
- xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
+ setup_timer(&xhci->comp_mode_recovery_timer,
+ compliance_mode_recovery, (unsigned long)xhci);
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
@@ -929,7 +926,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
/* Some chips from Fresco Logic need an extraordinary delay */
delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
- if (xhci_handshake(xhci, &xhci->op_regs->status,
+ if (xhci_handshake(&xhci->op_regs->status,
STS_HALT, STS_HALT, delay)) {
xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
spin_unlock_irq(&xhci->lock);
@@ -944,7 +941,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
command = readl(&xhci->op_regs->command);
command |= CMD_CSS;
writel(command, &xhci->op_regs->command);
- if (xhci_handshake(xhci, &xhci->op_regs->status,
+ if (xhci_handshake(&xhci->op_regs->status,
STS_SAVE, 0, 10 * 1000)) {
xhci_warn(xhci, "WARN: xHC save state timeout\n");
spin_unlock_irq(&xhci->lock);
@@ -1011,7 +1008,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command = readl(&xhci->op_regs->command);
command |= CMD_CRS;
writel(command, &xhci->op_regs->command);
- if (xhci_handshake(xhci, &xhci->op_regs->status,
+ if (xhci_handshake(&xhci->op_regs->status,
STS_RESTORE, 0, 10 * 1000)) {
xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
@@ -1082,7 +1079,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command = readl(&xhci->op_regs->command);
command |= CMD_RUN;
writel(command, &xhci->op_regs->command);
- xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT,
+ xhci_handshake(&xhci->op_regs->status, STS_HALT,
0, 250 * 1000);
/* step 5: walk topology and initialize portsc,
@@ -1276,7 +1273,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
return -ENOMEM;
command->in_ctx = xhci->devs[slot_id]->in_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -1374,7 +1371,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb);
if (ret < 0) {
- xhci_urb_free_priv(xhci, urb_priv);
+ xhci_urb_free_priv(urb_priv);
urb->hcpriv = NULL;
return ret;
}
@@ -1440,7 +1437,7 @@ dying:
urb->ep->desc.bEndpointAddress, urb);
ret = -ESHUTDOWN;
free_priv:
- xhci_urb_free_priv(xhci, urb_priv);
+ xhci_urb_free_priv(urb_priv);
urb->hcpriv = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
@@ -1553,7 +1550,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&xhci->lock, flags);
usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
- xhci_urb_free_priv(xhci, urb_priv);
+ xhci_urb_free_priv(urb_priv);
return ret;
}
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
@@ -1660,7 +1657,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
in_ctx = xhci->devs[udev->slot_id]->in_ctx;
out_ctx = xhci->devs[udev->slot_id]->out_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -1676,8 +1673,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
cpu_to_le32(EP_STATE_DISABLED)) ||
le32_to_cpu(ctrl_ctx->drop_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
- xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
- __func__, ep);
+ /* Do not warn when called after a usb_device_reset */
+ if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
+ xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+ __func__, ep);
return 0;
}
@@ -1714,7 +1713,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
- struct xhci_container_ctx *in_ctx, *out_ctx;
+ struct xhci_container_ctx *in_ctx;
unsigned int ep_index;
struct xhci_input_control_ctx *ctrl_ctx;
u32 added_ctxs;
@@ -1745,8 +1744,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
virt_dev = xhci->devs[udev->slot_id];
in_ctx = virt_dev->in_ctx;
- out_ctx = virt_dev->out_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -1758,8 +1756,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* to add it again without dropping it, reject the addition.
*/
if (virt_dev->eps[ep_index].ring &&
- !(le32_to_cpu(ctrl_ctx->drop_flags) &
- xhci_get_endpoint_flag(&ep->desc))) {
+ !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
xhci_warn(xhci, "Trying to add endpoint 0x%x "
"without dropping it.\n",
(unsigned int) ep->desc.bEndpointAddress);
@@ -1769,8 +1766,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* If the HCD has already noted the endpoint is enabled,
* ignore this request.
*/
- if (le32_to_cpu(ctrl_ctx->add_flags) &
- xhci_get_endpoint_flag(&ep->desc)) {
+ if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
__func__, ep);
return 0;
@@ -1816,7 +1812,7 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
struct xhci_slot_ctx *slot_ctx;
int i;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -2542,7 +2538,7 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
if (virt_dev->tt_info)
old_active_eps = virt_dev->tt_info->active_eps;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -2639,7 +2635,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
- ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
@@ -2758,7 +2754,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
command->in_ctx = virt_dev->in_ctx;
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
- ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -2883,7 +2879,7 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
dma_addr_t addr;
in_ctx = xhci->devs[slot_id]->in_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -3173,7 +3169,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
}
- ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -3328,7 +3324,7 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
*/
ep_index = xhci_get_endpoint_index(&eps[0]->desc);
command = vdev->eps[ep_index].stream_info->free_streams_command;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
@@ -3346,7 +3342,7 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_endpoint_copy(xhci, command->in_ctx,
vdev->out_ctx, ep_index);
- xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
+ xhci_setup_no_streams_ep_input_ctx(ep_ctx,
&vdev->eps[ep_index]);
}
xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
@@ -3803,6 +3799,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
return -EINVAL;
}
+ if (setup == SETUP_CONTEXT_ONLY) {
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+ SLOT_STATE_DEFAULT) {
+ xhci_dbg(xhci, "Slot already in default state\n");
+ return 0;
+ }
+ }
+
command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -3811,7 +3816,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
command->completion = &xhci->addr_dev;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -3994,7 +3999,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
/* Attempt to issue an Evaluate Context command to change the MEL. */
command = xhci->lpm_command;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
@@ -4732,7 +4737,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
}
- ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
@@ -4901,6 +4906,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (retval)
goto error;
xhci_dbg(xhci, "Called HCD init\n");
+
+ xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
+ xhci->hcc_params, xhci->hci_version, xhci->quirks);
+
return 0;
error:
kfree(xhci);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index cc7c5bb7cbcf..974514762a14 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1605,6 +1605,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_warn_ratelimited(xhci, fmt, args...) \
dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_info(xhci, fmt, args...) \
+ dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
/*
* Registers should always be accessed with double word or quad word accesses.
@@ -1712,8 +1714,7 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_stream_info *stream_info);
-void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
- struct xhci_ep_ctx *ep_ctx,
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep);
void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev, bool drop_control_ep);
@@ -1727,14 +1728,13 @@ struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_in_ctx, bool allocate_completion,
gfp_t mem_flags);
-void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv);
+void xhci_urb_free_priv(struct urb_priv *urb_priv);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
-int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
- u32 mask, u32 done, int usec);
+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
@@ -1864,7 +1864,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
/* xHCI contexts */
-struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx);
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 37b44b04a701..6431d08c8d9d 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -299,9 +299,7 @@ static inline void mts_show_command(struct scsi_cmnd *srb)
MTS_DEBUG( "Command %s (%d bytes)\n", what, srb->cmd_len);
out:
- MTS_DEBUG( " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- srb->cmnd[0], srb->cmnd[1], srb->cmnd[2], srb->cmnd[3], srb->cmnd[4], srb->cmnd[5],
- srb->cmnd[6], srb->cmnd[7], srb->cmnd[8], srb->cmnd[9]);
+ MTS_DEBUG( " %10ph\n", srb->cmnd);
}
#else
diff --git a/drivers/usb/isp1760/Kconfig b/drivers/usb/isp1760/Kconfig
new file mode 100644
index 000000000000..c94b7d953399
--- /dev/null
+++ b/drivers/usb/isp1760/Kconfig
@@ -0,0 +1,59 @@
+config USB_ISP1760
+ tristate "NXP ISP 1760/1761 support"
+ depends on USB || USB_GADGET
+ help
+ Say Y or M here if your system as an ISP1760 USB host controller
+ or an ISP1761 USB dual-role controller.
+
+ This driver does not support isochronous transfers or OTG.
+ This USB controller is usually attached to a non-DMA-Master
+ capable bus. NXP's eval kit brings this chip on PCI card
+ where the chip itself is behind a PLB to simulate such
+ a bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called isp1760.
+
+config USB_ISP1760_HCD
+ bool
+
+config USB_ISP1761_UDC
+ bool
+
+if USB_ISP1760
+
+choice
+ bool "ISP1760 Mode Selection"
+ default USB_ISP1760_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_ISP1760_HOST_ROLE if (USB && !USB_GADGET)
+ default USB_ISP1760_GADGET_ROLE if (!USB && USB_GADGET)
+
+config USB_ISP1760_HOST_ROLE
+ bool "Host only mode"
+ depends on USB=y || USB=USB_ISP1760
+ select USB_ISP1760_HCD
+ help
+ Select this if you want to use the ISP1760 in host mode only. The
+ gadget function will be disabled.
+
+config USB_ISP1760_GADGET_ROLE
+ bool "Gadget only mode"
+ depends on USB_GADGET=y || USB_GADGET=USB_ISP1760
+ select USB_ISP1761_UDC
+ help
+ Select this if you want to use the ISP1760 in peripheral mode only.
+ The host function will be disabled.
+
+config USB_ISP1760_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on USB=y || USB=USB_ISP1760
+ depends on USB_GADGET=y || USB_GADGET=USB_ISP1760
+ select USB_ISP1760_HCD
+ select USB_ISP1761_UDC
+ help
+ Select this if you want to use the ISP1760 in both host and
+ peripheral modes.
+
+endchoice
+
+endif
diff --git a/drivers/usb/isp1760/Makefile b/drivers/usb/isp1760/Makefile
new file mode 100644
index 000000000000..2b741074ad2b
--- /dev/null
+++ b/drivers/usb/isp1760/Makefile
@@ -0,0 +1,5 @@
+isp1760-y := isp1760-core.o isp1760-if.o
+isp1760-$(CONFIG_USB_ISP1760_HCD) += isp1760-hcd.o
+isp1760-$(CONFIG_USB_ISP1761_UDC) += isp1760-udc.o
+
+obj-$(CONFIG_USB_ISP1760) += isp1760.o
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
new file mode 100644
index 000000000000..b9827556455f
--- /dev/null
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -0,0 +1,177 @@
+/*
+ * Driver for the NXP ISP1760 chip
+ *
+ * Copyright 2014 Laurent Pinchart
+ * Copyright 2007 Sebastian Siewior
+ *
+ * Contacts:
+ * Sebastian Siewior <bigeasy@linutronix.de>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "isp1760-core.h"
+#include "isp1760-hcd.h"
+#include "isp1760-regs.h"
+#include "isp1760-udc.h"
+
+static void isp1760_init_core(struct isp1760_device *isp)
+{
+ u32 otgctrl;
+ u32 hwmode;
+
+ /* Low-level chip reset */
+ if (isp->rst_gpio) {
+ gpiod_set_value_cansleep(isp->rst_gpio, 1);
+ mdelay(50);
+ gpiod_set_value_cansleep(isp->rst_gpio, 0);
+ }
+
+ /*
+ * Reset the host controller, including the CPU interface
+ * configuration.
+ */
+ isp1760_write32(isp->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
+ msleep(100);
+
+ /* Setup HW Mode Control: This assumes a level active-low interrupt */
+ hwmode = HW_DATA_BUS_32BIT;
+
+ if (isp->devflags & ISP1760_FLAG_BUS_WIDTH_16)
+ hwmode &= ~HW_DATA_BUS_32BIT;
+ if (isp->devflags & ISP1760_FLAG_ANALOG_OC)
+ hwmode |= HW_ANA_DIGI_OC;
+ if (isp->devflags & ISP1760_FLAG_DACK_POL_HIGH)
+ hwmode |= HW_DACK_POL_HIGH;
+ if (isp->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
+ hwmode |= HW_DREQ_POL_HIGH;
+ if (isp->devflags & ISP1760_FLAG_INTR_POL_HIGH)
+ hwmode |= HW_INTR_HIGH_ACT;
+ if (isp->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
+ hwmode |= HW_INTR_EDGE_TRIG;
+
+ /*
+ * The ISP1761 has a dedicated DC IRQ line but supports sharing the HC
+ * IRQ line for both the host and device controllers. Hardcode IRQ
+ * sharing for now and disable the DC interrupts globally to avoid
+ * spurious interrupts during HCD registration.
+ */
+ if (isp->devflags & ISP1760_FLAG_ISP1761) {
+ isp1760_write32(isp->regs, DC_MODE, 0);
+ hwmode |= HW_COMN_IRQ;
+ }
+
+ /*
+ * We have to set this first in case we're in 16-bit mode.
+ * Write it twice to ensure correct upper bits if switching
+ * to 16-bit mode.
+ */
+ isp1760_write32(isp->regs, HC_HW_MODE_CTRL, hwmode);
+ isp1760_write32(isp->regs, HC_HW_MODE_CTRL, hwmode);
+
+ /*
+ * PORT 1 Control register of the ISP1760 is the OTG control register
+ * on ISP1761.
+ *
+ * TODO: Really support OTG. For now we configure port 1 in device mode
+ * when OTG is requested.
+ */
+ if ((isp->devflags & ISP1760_FLAG_ISP1761) &&
+ (isp->devflags & ISP1760_FLAG_OTG_EN))
+ otgctrl = ((HW_DM_PULLDOWN | HW_DP_PULLDOWN) << 16)
+ | HW_OTG_DISABLE;
+ else
+ otgctrl = (HW_SW_SEL_HC_DC << 16)
+ | (HW_VBUS_DRV | HW_SEL_CP_EXT);
+
+ isp1760_write32(isp->regs, HC_PORT1_CTRL, otgctrl);
+
+ dev_info(isp->dev, "bus width: %u, oc: %s\n",
+ isp->devflags & ISP1760_FLAG_BUS_WIDTH_16 ? 16 : 32,
+ isp->devflags & ISP1760_FLAG_ANALOG_OC ? "analog" : "digital");
+}
+
+void isp1760_set_pullup(struct isp1760_device *isp, bool enable)
+{
+ isp1760_write32(isp->regs, HW_OTG_CTRL_SET,
+ enable ? HW_DP_PULLUP : HW_DP_PULLUP << 16);
+}
+
+int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
+ struct device *dev, unsigned int devflags)
+{
+ struct isp1760_device *isp;
+ bool udc_disabled = !(devflags & ISP1760_FLAG_ISP1761);
+ int ret;
+
+ /*
+ * If neither the HCD not the UDC is enabled return an error, as no
+ * device would be registered.
+ */
+ if ((!IS_ENABLED(CONFIG_USB_ISP1760_HCD) || usb_disabled()) &&
+ (!IS_ENABLED(CONFIG_USB_ISP1761_UDC) || udc_disabled))
+ return -ENODEV;
+
+ /* prevent usb-core allocating DMA pages */
+ dev->dma_mask = NULL;
+
+ isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->dev = dev;
+ isp->devflags = devflags;
+
+ isp->rst_gpio = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
+ if (IS_ERR(isp->rst_gpio))
+ return PTR_ERR(isp->rst_gpio);
+
+ isp->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(isp->regs))
+ return PTR_ERR(isp->regs);
+
+ isp1760_init_core(isp);
+
+ if (IS_ENABLED(CONFIG_USB_ISP1760_HCD) && !usb_disabled()) {
+ ret = isp1760_hcd_register(&isp->hcd, isp->regs, mem, irq,
+ irqflags | IRQF_SHARED, dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) {
+ ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED |
+ IRQF_DISABLED);
+ if (ret < 0) {
+ isp1760_hcd_unregister(&isp->hcd);
+ return ret;
+ }
+ }
+
+ dev_set_drvdata(dev, isp);
+
+ return 0;
+}
+
+void isp1760_unregister(struct device *dev)
+{
+ struct isp1760_device *isp = dev_get_drvdata(dev);
+
+ isp1760_udc_unregister(isp);
+ isp1760_hcd_unregister(&isp->hcd);
+}
+
+MODULE_DESCRIPTION("Driver for the ISP1760 USB-controller from NXP");
+MODULE_AUTHOR("Sebastian Siewior <bigeasy@linuxtronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/isp1760/isp1760-core.h b/drivers/usb/isp1760/isp1760-core.h
new file mode 100644
index 000000000000..c70f8368a794
--- /dev/null
+++ b/drivers/usb/isp1760/isp1760-core.h
@@ -0,0 +1,68 @@
+/*
+ * Driver for the NXP ISP1760 chip
+ *
+ * Copyright 2014 Laurent Pinchart
+ * Copyright 2007 Sebastian Siewior
+ *
+ * Contacts:
+ * Sebastian Siewior <bigeasy@linutronix.de>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ISP1760_CORE_H_
+#define _ISP1760_CORE_H_
+
+#include <linux/ioport.h>
+
+#include "isp1760-hcd.h"
+#include "isp1760-udc.h"
+
+struct device;
+struct gpio_desc;
+
+/*
+ * Device flags that can vary from board to board. All of these
+ * indicate the most "atypical" case, so that a devflags of 0 is
+ * a sane default configuration.
+ */
+#define ISP1760_FLAG_BUS_WIDTH_16 0x00000002 /* 16-bit data bus width */
+#define ISP1760_FLAG_OTG_EN 0x00000004 /* Port 1 supports OTG */
+#define ISP1760_FLAG_ANALOG_OC 0x00000008 /* Analog overcurrent */
+#define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */
+#define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */
+#define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */
+#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */
+#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */
+
+struct isp1760_device {
+ struct device *dev;
+
+ void __iomem *regs;
+ unsigned int devflags;
+ struct gpio_desc *rst_gpio;
+
+ struct isp1760_hcd hcd;
+ struct isp1760_udc udc;
+};
+
+int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
+ struct device *dev, unsigned int devflags);
+void isp1760_unregister(struct device *dev);
+
+void isp1760_set_pullup(struct isp1760_device *isp, bool enable);
+
+static inline u32 isp1760_read32(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void isp1760_write32(void __iomem *base, u32 reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+#endif
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 395649f357aa..eba9b82e2d70 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -11,6 +11,7 @@
* (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
*
*/
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -24,73 +25,96 @@
#include <linux/timer.h>
#include <asm/unaligned.h>
#include <asm/cacheflush.h>
-#include <linux/gpio.h>
+#include "isp1760-core.h"
#include "isp1760-hcd.h"
+#include "isp1760-regs.h"
static struct kmem_cache *qtd_cachep;
static struct kmem_cache *qh_cachep;
static struct kmem_cache *urb_listitem_cachep;
-enum queue_head_types {
- QH_CONTROL,
- QH_BULK,
- QH_INTERRUPT,
- QH_END
-};
-
-struct isp1760_hcd {
- u32 hcs_params;
- spinlock_t lock;
- struct slotinfo atl_slots[32];
- int atl_done_map;
- struct slotinfo int_slots[32];
- int int_done_map;
- struct memory_chunk memory_pool[BLOCKS];
- struct list_head qh_list[QH_END];
-
- /* periodic schedule support */
-#define DEFAULT_I_TDPS 1024
- unsigned periodic_size;
- unsigned i_thresh;
- unsigned long reset_done;
- unsigned long next_statechange;
- unsigned int devflags;
-
- int rst_gpio;
-};
+typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd);
static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
{
- return (struct isp1760_hcd *) (hcd->hcd_priv);
+ return *(struct isp1760_hcd **)hcd->hcd_priv;
}
-/* Section 2.2 Host Controller Capability Registers */
-#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
-#define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
-#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
-#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
-#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
-#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
-#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
-
-/* Section 2.3 Host Controller Operational Registers */
-#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
-#define CMD_RESET (1<<1) /* reset HC not bus */
-#define CMD_RUN (1<<0) /* start/stop HC */
-#define STS_PCD (1<<2) /* port change detect */
-#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
-
-#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
-#define PORT_POWER (1<<12) /* true: has power (see PPC) */
-#define PORT_USB11(x) (((x) & (3 << 10)) == (1 << 10)) /* USB 1.1 device */
-#define PORT_RESET (1<<8) /* reset port */
-#define PORT_SUSPEND (1<<7) /* suspend port */
-#define PORT_RESUME (1<<6) /* resume it */
-#define PORT_PE (1<<2) /* port enable */
-#define PORT_CSC (1<<1) /* connect status change */
-#define PORT_CONNECT (1<<0) /* device connected */
-#define PORT_RWC_BITS (PORT_CSC)
+/* urb state*/
+#define DELETE_URB (0x0008)
+#define NO_TRANSFER_ACTIVE (0xffffffff)
+
+/* Philips Proprietary Transfer Descriptor (PTD) */
+typedef __u32 __bitwise __dw;
+struct ptd {
+ __dw dw0;
+ __dw dw1;
+ __dw dw2;
+ __dw dw3;
+ __dw dw4;
+ __dw dw5;
+ __dw dw6;
+ __dw dw7;
+};
+#define PTD_OFFSET 0x0400
+#define ISO_PTD_OFFSET 0x0400
+#define INT_PTD_OFFSET 0x0800
+#define ATL_PTD_OFFSET 0x0c00
+#define PAYLOAD_OFFSET 0x1000
+
+
+/* ATL */
+/* DW0 */
+#define DW0_VALID_BIT 1
+#define FROM_DW0_VALID(x) ((x) & 0x01)
+#define TO_DW0_LENGTH(x) (((u32) x) << 3)
+#define TO_DW0_MAXPACKET(x) (((u32) x) << 18)
+#define TO_DW0_MULTI(x) (((u32) x) << 29)
+#define TO_DW0_ENDPOINT(x) (((u32) x) << 31)
+/* DW1 */
+#define TO_DW1_DEVICE_ADDR(x) (((u32) x) << 3)
+#define TO_DW1_PID_TOKEN(x) (((u32) x) << 10)
+#define DW1_TRANS_BULK ((u32) 2 << 12)
+#define DW1_TRANS_INT ((u32) 3 << 12)
+#define DW1_TRANS_SPLIT ((u32) 1 << 14)
+#define DW1_SE_USB_LOSPEED ((u32) 2 << 16)
+#define TO_DW1_PORT_NUM(x) (((u32) x) << 18)
+#define TO_DW1_HUB_NUM(x) (((u32) x) << 25)
+/* DW2 */
+#define TO_DW2_DATA_START_ADDR(x) (((u32) x) << 8)
+#define TO_DW2_RL(x) ((x) << 25)
+#define FROM_DW2_RL(x) (((x) >> 25) & 0xf)
+/* DW3 */
+#define FROM_DW3_NRBYTESTRANSFERRED(x) ((x) & 0x7fff)
+#define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) ((x) & 0x07ff)
+#define TO_DW3_NAKCOUNT(x) ((x) << 19)
+#define FROM_DW3_NAKCOUNT(x) (((x) >> 19) & 0xf)
+#define TO_DW3_CERR(x) ((x) << 23)
+#define FROM_DW3_CERR(x) (((x) >> 23) & 0x3)
+#define TO_DW3_DATA_TOGGLE(x) ((x) << 25)
+#define FROM_DW3_DATA_TOGGLE(x) (((x) >> 25) & 0x1)
+#define TO_DW3_PING(x) ((x) << 26)
+#define FROM_DW3_PING(x) (((x) >> 26) & 0x1)
+#define DW3_ERROR_BIT (1 << 28)
+#define DW3_BABBLE_BIT (1 << 29)
+#define DW3_HALT_BIT (1 << 30)
+#define DW3_ACTIVE_BIT (1 << 31)
+#define FROM_DW3_ACTIVE(x) (((x) >> 31) & 0x01)
+
+#define INT_UNDERRUN (1 << 2)
+#define INT_BABBLE (1 << 1)
+#define INT_EXACT (1 << 0)
+
+#define SETUP_PID (2)
+#define IN_PID (1)
+#define OUT_PID (0)
+
+/* Errata 1 */
+#define RL_COUNTER (0)
+#define NAK_COUNTER (0)
+#define ERR_COUNTER (2)
struct isp1760_qtd {
u8 packet_type;
@@ -137,12 +161,12 @@ struct urb_listitem {
*/
static u32 reg_read32(void __iomem *base, u32 reg)
{
- return readl(base + reg);
+ return isp1760_read32(base, reg);
}
static void reg_write32(void __iomem *base, u32 reg, u32 val)
{
- writel(val, base + reg);
+ isp1760_write32(base, reg, val);
}
/*
@@ -443,42 +467,6 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
int result;
u32 scratch, hwmode;
- /* low-level chip reset */
- if (gpio_is_valid(priv->rst_gpio)) {
- unsigned int rst_lvl;
-
- rst_lvl = (priv->devflags &
- ISP1760_FLAG_RESET_ACTIVE_HIGH) ? 1 : 0;
-
- gpio_set_value(priv->rst_gpio, rst_lvl);
- mdelay(50);
- gpio_set_value(priv->rst_gpio, !rst_lvl);
- }
-
- /* Setup HW Mode Control: This assumes a level active-low interrupt */
- hwmode = HW_DATA_BUS_32BIT;
-
- if (priv->devflags & ISP1760_FLAG_BUS_WIDTH_16)
- hwmode &= ~HW_DATA_BUS_32BIT;
- if (priv->devflags & ISP1760_FLAG_ANALOG_OC)
- hwmode |= HW_ANA_DIGI_OC;
- if (priv->devflags & ISP1760_FLAG_DACK_POL_HIGH)
- hwmode |= HW_DACK_POL_HIGH;
- if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
- hwmode |= HW_DREQ_POL_HIGH;
- if (priv->devflags & ISP1760_FLAG_INTR_POL_HIGH)
- hwmode |= HW_INTR_HIGH_ACT;
- if (priv->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
- hwmode |= HW_INTR_EDGE_TRIG;
-
- /*
- * We have to set this first in case we're in 16-bit mode.
- * Write it twice to ensure correct upper bits if switching
- * to 16-bit mode.
- */
- reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
- reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
-
reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
/* Change bus pattern */
scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
@@ -488,46 +476,33 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
return -ENODEV;
}
- /* pre reset */
+ /*
+ * The RESET_HC bit in the SW_RESET register is supposed to reset the
+ * host controller without touching the CPU interface registers, but at
+ * least on the ISP1761 it seems to behave as the RESET_ALL bit and
+ * reset the whole device. We thus can't use it here, so let's reset
+ * the host controller through the EHCI USB Command register. The device
+ * has been reset in core code anyway, so this shouldn't matter.
+ */
reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
- /* reset */
- reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
- mdelay(100);
-
- reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_HC);
- mdelay(100);
-
result = ehci_reset(hcd);
if (result)
return result;
/* Step 11 passed */
- dev_info(hcd->self.controller, "bus width: %d, oc: %s\n",
- (priv->devflags & ISP1760_FLAG_BUS_WIDTH_16) ?
- 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
- "analog" : "digital");
-
/* ATL reset */
+ hwmode = reg_read32(hcd->regs, HC_HW_MODE_CTRL) & ~ALL_ATX_RESET;
reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
mdelay(10);
reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
- /*
- * PORT 1 Control register of the ISP1760 is the OTG control
- * register on ISP1761. Since there is no OTG or device controller
- * support in this driver, we use port 1 as a "normal" USB host port on
- * both chips.
- */
- reg_write32(hcd->regs, HC_PORT1_CTRL, PORT1_POWER | PORT1_INIT2);
- mdelay(10);
-
priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
return priv_init(hcd);
@@ -743,8 +718,9 @@ static void qtd_free(struct isp1760_qtd *qtd)
}
static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
- struct slotinfo *slots, struct isp1760_qtd *qtd,
- struct isp1760_qh *qh, struct ptd *ptd)
+ struct isp1760_slotinfo *slots,
+ struct isp1760_qtd *qtd, struct isp1760_qh *qh,
+ struct ptd *ptd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int skip_map;
@@ -857,7 +833,7 @@ static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int ptd_offset;
- struct slotinfo *slots;
+ struct isp1760_slotinfo *slots;
int curr_slot, free_slot;
int n;
struct ptd ptd;
@@ -1097,7 +1073,7 @@ static void handle_done_ptds(struct usb_hcd *hcd)
struct isp1760_qh *qh;
int slot;
int state;
- struct slotinfo *slots;
+ struct isp1760_slotinfo *slots;
u32 ptd_offset;
struct isp1760_qtd *qtd;
int modified;
@@ -1359,9 +1335,7 @@ static int isp1760_run(struct usb_hcd *hcd)
if (retval)
return retval;
- init_timer(&errata2_timer);
- errata2_timer.function = errata2_function;
- errata2_timer.data = (unsigned long) hcd;
+ setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd);
errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
add_timer(&errata2_timer);
@@ -1798,13 +1772,13 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
/* per-port overcurrent reporting */
- temp = 0x0008;
+ temp = HUB_CHAR_INDV_PORT_OCPM;
if (HCS_PPC(priv->hcs_params))
/* per-port power control */
- temp |= 0x0001;
+ temp |= HUB_CHAR_INDV_PORT_LPSM;
else
/* no power switching */
- temp |= 0x0002;
+ temp |= HUB_CHAR_NO_LPSM;
desc->wHubCharacteristics = cpu_to_le16(temp);
}
@@ -2163,7 +2137,7 @@ static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
static const struct hc_driver isp1760_hc_driver = {
.description = "isp1760-hcd",
.product_desc = "NXP ISP1760 USB Host Controller",
- .hcd_priv_size = sizeof(struct isp1760_hcd),
+ .hcd_priv_size = sizeof(struct isp1760_hcd *),
.irq = isp1760_irq,
.flags = HCD_MEMORY | HCD_USB2,
.reset = isp1760_hc_setup,
@@ -2179,7 +2153,7 @@ static const struct hc_driver isp1760_hc_driver = {
.clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
};
-int __init init_kmem_once(void)
+int __init isp1760_init_kmem_once(void)
{
urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
@@ -2206,63 +2180,56 @@ int __init init_kmem_once(void)
return 0;
}
-void deinit_kmem_cache(void)
+void isp1760_deinit_kmem_cache(void)
{
kmem_cache_destroy(qtd_cachep);
kmem_cache_destroy(qh_cachep);
kmem_cache_destroy(urb_listitem_cachep);
}
-struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
- int irq, unsigned long irqflags,
- int rst_gpio,
- struct device *dev, const char *busname,
- unsigned int devflags)
+int isp1760_hcd_register(struct isp1760_hcd *priv, void __iomem *regs,
+ struct resource *mem, int irq, unsigned long irqflags,
+ struct device *dev)
{
struct usb_hcd *hcd;
- struct isp1760_hcd *priv;
int ret;
- if (usb_disabled())
- return ERR_PTR(-ENODEV);
-
- /* prevent usb-core allocating DMA pages */
- dev->dma_mask = NULL;
-
hcd = usb_create_hcd(&isp1760_hc_driver, dev, dev_name(dev));
if (!hcd)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
+
+ *(struct isp1760_hcd **)hcd->hcd_priv = priv;
+
+ priv->hcd = hcd;
- priv = hcd_to_priv(hcd);
- priv->devflags = devflags;
- priv->rst_gpio = rst_gpio;
init_memory(priv);
- hcd->regs = ioremap(res_start, res_len);
- if (!hcd->regs) {
- ret = -EIO;
- goto err_put;
- }
hcd->irq = irq;
- hcd->rsrc_start = res_start;
- hcd->rsrc_len = res_len;
+ hcd->regs = regs;
+ hcd->rsrc_start = mem->start;
+ hcd->rsrc_len = resource_size(mem);
+
+ /* This driver doesn't support wakeup requests */
+ hcd->cant_recv_wakeups = 1;
ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
- goto err_unmap;
+ goto error;
+
device_wakeup_enable(hcd->self.controller);
- return hcd;
+ return 0;
-err_unmap:
- iounmap(hcd->regs);
+error:
+ usb_put_hcd(hcd);
+ return ret;
+}
-err_put:
- usb_put_hcd(hcd);
+void isp1760_hcd_unregister(struct isp1760_hcd *priv)
+{
+ if (!priv->hcd)
+ return;
- return ERR_PTR(ret);
+ usb_remove_hcd(priv->hcd);
+ usb_put_hcd(priv->hcd);
}
-
-MODULE_DESCRIPTION("Driver for the ISP1760 USB-controller from NXP");
-MODULE_AUTHOR("Sebastian Siewior <bigeasy@linuxtronix.de>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/isp1760/isp1760-hcd.h b/drivers/usb/isp1760/isp1760-hcd.h
new file mode 100644
index 000000000000..0c1c98d6ea08
--- /dev/null
+++ b/drivers/usb/isp1760/isp1760-hcd.h
@@ -0,0 +1,102 @@
+#ifndef _ISP1760_HCD_H_
+#define _ISP1760_HCD_H_
+
+#include <linux/spinlock.h>
+
+struct isp1760_qh;
+struct isp1760_qtd;
+struct resource;
+struct usb_hcd;
+
+/*
+ * 60kb divided in:
+ * - 32 blocks @ 256 bytes
+ * - 20 blocks @ 1024 bytes
+ * - 4 blocks @ 8192 bytes
+ */
+
+#define BLOCK_1_NUM 32
+#define BLOCK_2_NUM 20
+#define BLOCK_3_NUM 4
+
+#define BLOCK_1_SIZE 256
+#define BLOCK_2_SIZE 1024
+#define BLOCK_3_SIZE 8192
+#define BLOCKS (BLOCK_1_NUM + BLOCK_2_NUM + BLOCK_3_NUM)
+#define MAX_PAYLOAD_SIZE BLOCK_3_SIZE
+#define PAYLOAD_AREA_SIZE 0xf000
+
+struct isp1760_slotinfo {
+ struct isp1760_qh *qh;
+ struct isp1760_qtd *qtd;
+ unsigned long timestamp;
+};
+
+/* chip memory management */
+struct isp1760_memory_chunk {
+ unsigned int start;
+ unsigned int size;
+ unsigned int free;
+};
+
+enum isp1760_queue_head_types {
+ QH_CONTROL,
+ QH_BULK,
+ QH_INTERRUPT,
+ QH_END
+};
+
+struct isp1760_hcd {
+#ifdef CONFIG_USB_ISP1760_HCD
+ struct usb_hcd *hcd;
+
+ u32 hcs_params;
+ spinlock_t lock;
+ struct isp1760_slotinfo atl_slots[32];
+ int atl_done_map;
+ struct isp1760_slotinfo int_slots[32];
+ int int_done_map;
+ struct isp1760_memory_chunk memory_pool[BLOCKS];
+ struct list_head qh_list[QH_END];
+
+ /* periodic schedule support */
+#define DEFAULT_I_TDPS 1024
+ unsigned periodic_size;
+ unsigned i_thresh;
+ unsigned long reset_done;
+ unsigned long next_statechange;
+#endif
+};
+
+#ifdef CONFIG_USB_ISP1760_HCD
+int isp1760_hcd_register(struct isp1760_hcd *priv, void __iomem *regs,
+ struct resource *mem, int irq, unsigned long irqflags,
+ struct device *dev);
+void isp1760_hcd_unregister(struct isp1760_hcd *priv);
+
+int isp1760_init_kmem_once(void);
+void isp1760_deinit_kmem_cache(void);
+#else
+static inline int isp1760_hcd_register(struct isp1760_hcd *priv,
+ void __iomem *regs, struct resource *mem,
+ int irq, unsigned long irqflags,
+ struct device *dev)
+{
+ return 0;
+}
+
+static inline void isp1760_hcd_unregister(struct isp1760_hcd *priv)
+{
+}
+
+static inline int isp1760_init_kmem_once(void)
+{
+ return 0;
+}
+
+static inline void isp1760_deinit_kmem_cache(void)
+{
+}
+#endif
+
+#endif /* _ISP1760_HCD_H_ */
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
new file mode 100644
index 000000000000..264be4d21706
--- /dev/null
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -0,0 +1,309 @@
+/*
+ * Glue code for the ISP1760 driver and bus
+ * Currently there is support for
+ * - OpenFirmware
+ * - PCI
+ * - PDEV (generic platform device centralized driver model)
+ *
+ * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
+ *
+ */
+
+#include <linux/usb.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/usb/isp1760.h>
+#include <linux/usb/hcd.h>
+
+#include "isp1760-core.h"
+#include "isp1760-regs.h"
+
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#endif
+
+#ifdef CONFIG_PCI
+static int isp1761_pci_init(struct pci_dev *dev)
+{
+ resource_size_t mem_start;
+ resource_size_t mem_length;
+ u8 __iomem *iobase;
+ u8 latency, limit;
+ int retry_count;
+ u32 reg_data;
+
+ /* Grab the PLX PCI shared memory of the ISP 1761 we need */
+ mem_start = pci_resource_start(dev, 3);
+ mem_length = pci_resource_len(dev, 3);
+ if (mem_length < 0xffff) {
+ printk(KERN_ERR "memory length for this resource is wrong\n");
+ return -ENOMEM;
+ }
+
+ if (!request_mem_region(mem_start, mem_length, "ISP-PCI")) {
+ printk(KERN_ERR "host controller already in use\n");
+ return -EBUSY;
+ }
+
+ /* map available memory */
+ iobase = ioremap_nocache(mem_start, mem_length);
+ if (!iobase) {
+ printk(KERN_ERR "Error ioremap failed\n");
+ release_mem_region(mem_start, mem_length);
+ return -ENOMEM;
+ }
+
+ /* bad pci latencies can contribute to overruns */
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency);
+ if (latency) {
+ pci_read_config_byte(dev, PCI_MAX_LAT, &limit);
+ if (limit && limit < latency)
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit);
+ }
+
+ /* Try to check whether we can access Scratch Register of
+ * Host Controller or not. The initial PCI access is retried until
+ * local init for the PCI bridge is completed
+ */
+ retry_count = 20;
+ reg_data = 0;
+ while ((reg_data != 0xFACE) && retry_count) {
+ /*by default host is in 16bit mode, so
+ * io operations at this stage must be 16 bit
+ * */
+ writel(0xface, iobase + HC_SCRATCH_REG);
+ udelay(100);
+ reg_data = readl(iobase + HC_SCRATCH_REG) & 0x0000ffff;
+ retry_count--;
+ }
+
+ iounmap(iobase);
+ release_mem_region(mem_start, mem_length);
+
+ /* Host Controller presence is detected by writing to scratch register
+ * and reading back and checking the contents are same or not
+ */
+ if (reg_data != 0xFACE) {
+ dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
+ return -ENOMEM;
+ }
+
+ /* Grab the PLX PCI mem maped port start address we need */
+ mem_start = pci_resource_start(dev, 0);
+ mem_length = pci_resource_len(dev, 0);
+
+ if (!request_mem_region(mem_start, mem_length, "ISP1761 IO MEM")) {
+ printk(KERN_ERR "request region #1\n");
+ return -EBUSY;
+ }
+
+ iobase = ioremap_nocache(mem_start, mem_length);
+ if (!iobase) {
+ printk(KERN_ERR "ioremap #1\n");
+ release_mem_region(mem_start, mem_length);
+ return -ENOMEM;
+ }
+
+ /* configure PLX PCI chip to pass interrupts */
+#define PLX_INT_CSR_REG 0x68
+ reg_data = readl(iobase + PLX_INT_CSR_REG);
+ reg_data |= 0x900;
+ writel(reg_data, iobase + PLX_INT_CSR_REG);
+
+ /* done with PLX IO access */
+ iounmap(iobase);
+ release_mem_region(mem_start, mem_length);
+
+ return 0;
+}
+
+static int isp1761_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ unsigned int devflags = 0;
+ int ret;
+
+ if (!dev->irq)
+ return -ENODEV;
+
+ if (pci_enable_device(dev) < 0)
+ return -ENODEV;
+
+ ret = isp1761_pci_init(dev);
+ if (ret < 0)
+ goto error;
+
+ pci_set_master(dev);
+
+ dev->dev.dma_mask = NULL;
+ ret = isp1760_register(&dev->resource[3], dev->irq, 0, &dev->dev,
+ devflags);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ pci_disable_device(dev);
+ return ret;
+}
+
+static void isp1761_pci_remove(struct pci_dev *dev)
+{
+ isp1760_unregister(&dev->dev);
+
+ pci_disable_device(dev);
+}
+
+static void isp1761_pci_shutdown(struct pci_dev *dev)
+{
+ printk(KERN_ERR "ips1761_pci_shutdown\n");
+}
+
+static const struct pci_device_id isp1760_plx [] = {
+ {
+ .class = PCI_CLASS_BRIDGE_OTHER << 8,
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x5406,
+ .subvendor = PCI_VENDOR_ID_PLX,
+ .subdevice = 0x9054,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, isp1760_plx);
+
+static struct pci_driver isp1761_pci_driver = {
+ .name = "isp1760",
+ .id_table = isp1760_plx,
+ .probe = isp1761_pci_probe,
+ .remove = isp1761_pci_remove,
+ .shutdown = isp1761_pci_shutdown,
+};
+#endif
+
+static int isp1760_plat_probe(struct platform_device *pdev)
+{
+ unsigned long irqflags;
+ unsigned int devflags = 0;
+ struct resource *mem_res;
+ struct resource *irq_res;
+ int ret;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ pr_warning("isp1760: IRQ resource not available\n");
+ return -ENODEV;
+ }
+ irqflags = irq_res->flags & IRQF_TRIGGER_MASK;
+
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+ struct device_node *dp = pdev->dev.of_node;
+ u32 bus_width = 0;
+
+ if (of_device_is_compatible(dp, "nxp,usb-isp1761"))
+ devflags |= ISP1760_FLAG_ISP1761;
+
+ /* Some systems wire up only 16 of the 32 data lines */
+ of_property_read_u32(dp, "bus-width", &bus_width);
+ if (bus_width == 16)
+ devflags |= ISP1760_FLAG_BUS_WIDTH_16;
+
+ if (of_property_read_bool(dp, "port1-otg"))
+ devflags |= ISP1760_FLAG_OTG_EN;
+
+ if (of_property_read_bool(dp, "analog-oc"))
+ devflags |= ISP1760_FLAG_ANALOG_OC;
+
+ if (of_property_read_bool(dp, "dack-polarity"))
+ devflags |= ISP1760_FLAG_DACK_POL_HIGH;
+
+ if (of_property_read_bool(dp, "dreq-polarity"))
+ devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
+ } else if (dev_get_platdata(&pdev->dev)) {
+ struct isp1760_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+
+ if (pdata->is_isp1761)
+ devflags |= ISP1760_FLAG_ISP1761;
+ if (pdata->bus_width_16)
+ devflags |= ISP1760_FLAG_BUS_WIDTH_16;
+ if (pdata->port1_otg)
+ devflags |= ISP1760_FLAG_OTG_EN;
+ if (pdata->analog_oc)
+ devflags |= ISP1760_FLAG_ANALOG_OC;
+ if (pdata->dack_polarity_high)
+ devflags |= ISP1760_FLAG_DACK_POL_HIGH;
+ if (pdata->dreq_polarity_high)
+ devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
+ }
+
+ ret = isp1760_register(mem_res, irq_res->start, irqflags, &pdev->dev,
+ devflags);
+ if (ret < 0)
+ return ret;
+
+ pr_info("ISP1760 USB device initialised\n");
+ return 0;
+}
+
+static int isp1760_plat_remove(struct platform_device *pdev)
+{
+ isp1760_unregister(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id isp1760_of_match[] = {
+ { .compatible = "nxp,usb-isp1760", },
+ { .compatible = "nxp,usb-isp1761", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, isp1760_of_match);
+#endif
+
+static struct platform_driver isp1760_plat_driver = {
+ .probe = isp1760_plat_probe,
+ .remove = isp1760_plat_remove,
+ .driver = {
+ .name = "isp1760",
+ .of_match_table = of_match_ptr(isp1760_of_match),
+ },
+};
+
+static int __init isp1760_init(void)
+{
+ int ret, any_ret = -ENODEV;
+
+ isp1760_init_kmem_once();
+
+ ret = platform_driver_register(&isp1760_plat_driver);
+ if (!ret)
+ any_ret = 0;
+#ifdef CONFIG_PCI
+ ret = pci_register_driver(&isp1761_pci_driver);
+ if (!ret)
+ any_ret = 0;
+#endif
+
+ if (any_ret)
+ isp1760_deinit_kmem_cache();
+ return any_ret;
+}
+module_init(isp1760_init);
+
+static void __exit isp1760_exit(void)
+{
+ platform_driver_unregister(&isp1760_plat_driver);
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&isp1761_pci_driver);
+#endif
+ isp1760_deinit_kmem_cache();
+}
+module_exit(isp1760_exit);
diff --git a/drivers/usb/isp1760/isp1760-regs.h b/drivers/usb/isp1760/isp1760-regs.h
new file mode 100644
index 000000000000..b67095c9a9d4
--- /dev/null
+++ b/drivers/usb/isp1760/isp1760-regs.h
@@ -0,0 +1,230 @@
+/*
+ * Driver for the NXP ISP1760 chip
+ *
+ * Copyright 2014 Laurent Pinchart
+ * Copyright 2007 Sebastian Siewior
+ *
+ * Contacts:
+ * Sebastian Siewior <bigeasy@linutronix.de>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ISP1760_REGS_H_
+#define _ISP1760_REGS_H_
+
+/* -----------------------------------------------------------------------------
+ * Host Controller
+ */
+
+/* EHCI capability registers */
+#define HC_CAPLENGTH 0x000
+#define HC_LENGTH(p) (((p) >> 00) & 0x00ff) /* bits 7:0 */
+#define HC_VERSION(p) (((p) >> 16) & 0xffff) /* bits 31:16 */
+
+#define HC_HCSPARAMS 0x004
+#define HCS_INDICATOR(p) ((p) & (1 << 16)) /* true: has port indicators */
+#define HCS_PPC(p) ((p) & (1 << 4)) /* true: port power control */
+#define HCS_N_PORTS(p) (((p) >> 0) & 0xf) /* bits 3:0, ports on HC */
+
+#define HC_HCCPARAMS 0x008
+#define HCC_ISOC_CACHE(p) ((p) & (1 << 7)) /* true: can cache isoc frame */
+#define HCC_ISOC_THRES(p) (((p) >> 4) & 0x7) /* bits 6:4, uframes cached */
+
+/* EHCI operational registers */
+#define HC_USBCMD 0x020
+#define CMD_LRESET (1 << 7) /* partial reset (no ports, etc) */
+#define CMD_RESET (1 << 1) /* reset HC not bus */
+#define CMD_RUN (1 << 0) /* start/stop HC */
+
+#define HC_USBSTS 0x024
+#define STS_PCD (1 << 2) /* port change detect */
+
+#define HC_FRINDEX 0x02c
+
+#define HC_CONFIGFLAG 0x060
+#define FLAG_CF (1 << 0) /* true: we'll support "high speed" */
+
+#define HC_PORTSC1 0x064
+#define PORT_OWNER (1 << 13) /* true: companion hc owns this port */
+#define PORT_POWER (1 << 12) /* true: has power (see PPC) */
+#define PORT_USB11(x) (((x) & (3 << 10)) == (1 << 10)) /* USB 1.1 device */
+#define PORT_RESET (1 << 8) /* reset port */
+#define PORT_SUSPEND (1 << 7) /* suspend port */
+#define PORT_RESUME (1 << 6) /* resume it */
+#define PORT_PE (1 << 2) /* port enable */
+#define PORT_CSC (1 << 1) /* connect status change */
+#define PORT_CONNECT (1 << 0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC)
+
+#define HC_ISO_PTD_DONEMAP_REG 0x130
+#define HC_ISO_PTD_SKIPMAP_REG 0x134
+#define HC_ISO_PTD_LASTPTD_REG 0x138
+#define HC_INT_PTD_DONEMAP_REG 0x140
+#define HC_INT_PTD_SKIPMAP_REG 0x144
+#define HC_INT_PTD_LASTPTD_REG 0x148
+#define HC_ATL_PTD_DONEMAP_REG 0x150
+#define HC_ATL_PTD_SKIPMAP_REG 0x154
+#define HC_ATL_PTD_LASTPTD_REG 0x158
+
+/* Configuration Register */
+#define HC_HW_MODE_CTRL 0x300
+#define ALL_ATX_RESET (1 << 31)
+#define HW_ANA_DIGI_OC (1 << 15)
+#define HW_DEV_DMA (1 << 11)
+#define HW_COMN_IRQ (1 << 10)
+#define HW_COMN_DMA (1 << 9)
+#define HW_DATA_BUS_32BIT (1 << 8)
+#define HW_DACK_POL_HIGH (1 << 6)
+#define HW_DREQ_POL_HIGH (1 << 5)
+#define HW_INTR_HIGH_ACT (1 << 2)
+#define HW_INTR_EDGE_TRIG (1 << 1)
+#define HW_GLOBAL_INTR_EN (1 << 0)
+
+#define HC_CHIP_ID_REG 0x304
+#define HC_SCRATCH_REG 0x308
+
+#define HC_RESET_REG 0x30c
+#define SW_RESET_RESET_HC (1 << 1)
+#define SW_RESET_RESET_ALL (1 << 0)
+
+#define HC_BUFFER_STATUS_REG 0x334
+#define ISO_BUF_FILL (1 << 2)
+#define INT_BUF_FILL (1 << 1)
+#define ATL_BUF_FILL (1 << 0)
+
+#define HC_MEMORY_REG 0x33c
+#define ISP_BANK(x) ((x) << 16)
+
+#define HC_PORT1_CTRL 0x374
+#define PORT1_POWER (3 << 3)
+#define PORT1_INIT1 (1 << 7)
+#define PORT1_INIT2 (1 << 23)
+#define HW_OTG_CTRL_SET 0x374
+#define HW_OTG_CTRL_CLR 0x376
+#define HW_OTG_DISABLE (1 << 10)
+#define HW_OTG_SE0_EN (1 << 9)
+#define HW_BDIS_ACON_EN (1 << 8)
+#define HW_SW_SEL_HC_DC (1 << 7)
+#define HW_VBUS_CHRG (1 << 6)
+#define HW_VBUS_DISCHRG (1 << 5)
+#define HW_VBUS_DRV (1 << 4)
+#define HW_SEL_CP_EXT (1 << 3)
+#define HW_DM_PULLDOWN (1 << 2)
+#define HW_DP_PULLDOWN (1 << 1)
+#define HW_DP_PULLUP (1 << 0)
+
+/* Interrupt Register */
+#define HC_INTERRUPT_REG 0x310
+
+#define HC_INTERRUPT_ENABLE 0x314
+#define HC_ISO_INT (1 << 9)
+#define HC_ATL_INT (1 << 8)
+#define HC_INTL_INT (1 << 7)
+#define HC_EOT_INT (1 << 3)
+#define HC_SOT_INT (1 << 1)
+#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT)
+
+#define HC_ISO_IRQ_MASK_OR_REG 0x318
+#define HC_INT_IRQ_MASK_OR_REG 0x31c
+#define HC_ATL_IRQ_MASK_OR_REG 0x320
+#define HC_ISO_IRQ_MASK_AND_REG 0x324
+#define HC_INT_IRQ_MASK_AND_REG 0x328
+#define HC_ATL_IRQ_MASK_AND_REG 0x32c
+
+/* -----------------------------------------------------------------------------
+ * Peripheral Controller
+ */
+
+/* Initialization Registers */
+#define DC_ADDRESS 0x0200
+#define DC_DEVEN (1 << 7)
+
+#define DC_MODE 0x020c
+#define DC_DMACLKON (1 << 9)
+#define DC_VBUSSTAT (1 << 8)
+#define DC_CLKAON (1 << 7)
+#define DC_SNDRSU (1 << 6)
+#define DC_GOSUSP (1 << 5)
+#define DC_SFRESET (1 << 4)
+#define DC_GLINTENA (1 << 3)
+#define DC_WKUPCS (1 << 2)
+
+#define DC_INTCONF 0x0210
+#define DC_CDBGMOD_ACK_NAK (0 << 6)
+#define DC_CDBGMOD_ACK (1 << 6)
+#define DC_CDBGMOD_ACK_1NAK (2 << 6)
+#define DC_DDBGMODIN_ACK_NAK (0 << 4)
+#define DC_DDBGMODIN_ACK (1 << 4)
+#define DC_DDBGMODIN_ACK_1NAK (2 << 4)
+#define DC_DDBGMODOUT_ACK_NYET_NAK (0 << 2)
+#define DC_DDBGMODOUT_ACK_NYET (1 << 2)
+#define DC_DDBGMODOUT_ACK_NYET_1NAK (2 << 2)
+#define DC_INTLVL (1 << 1)
+#define DC_INTPOL (1 << 0)
+
+#define DC_DEBUG 0x0212
+#define DC_INTENABLE 0x0214
+#define DC_IEPTX(n) (1 << (11 + 2 * (n)))
+#define DC_IEPRX(n) (1 << (10 + 2 * (n)))
+#define DC_IEPRXTX(n) (3 << (10 + 2 * (n)))
+#define DC_IEP0SETUP (1 << 8)
+#define DC_IEVBUS (1 << 7)
+#define DC_IEDMA (1 << 6)
+#define DC_IEHS_STA (1 << 5)
+#define DC_IERESM (1 << 4)
+#define DC_IESUSP (1 << 3)
+#define DC_IEPSOF (1 << 2)
+#define DC_IESOF (1 << 1)
+#define DC_IEBRST (1 << 0)
+
+/* Data Flow Registers */
+#define DC_EPINDEX 0x022c
+#define DC_EP0SETUP (1 << 5)
+#define DC_ENDPIDX(n) ((n) << 1)
+#define DC_EPDIR (1 << 0)
+
+#define DC_CTRLFUNC 0x0228
+#define DC_CLBUF (1 << 4)
+#define DC_VENDP (1 << 3)
+#define DC_DSEN (1 << 2)
+#define DC_STATUS (1 << 1)
+#define DC_STALL (1 << 0)
+
+#define DC_DATAPORT 0x0220
+#define DC_BUFLEN 0x021c
+#define DC_DATACOUNT_MASK 0xffff
+#define DC_BUFSTAT 0x021e
+#define DC_EPMAXPKTSZ 0x0204
+
+#define DC_EPTYPE 0x0208
+#define DC_NOEMPKT (1 << 4)
+#define DC_EPENABLE (1 << 3)
+#define DC_DBLBUF (1 << 2)
+#define DC_ENDPTYP_ISOC (1 << 0)
+#define DC_ENDPTYP_BULK (2 << 0)
+#define DC_ENDPTYP_INTERRUPT (3 << 0)
+
+/* DMA Registers */
+#define DC_DMACMD 0x0230
+#define DC_DMATXCOUNT 0x0234
+#define DC_DMACONF 0x0238
+#define DC_DMAHW 0x023c
+#define DC_DMAINTREASON 0x0250
+#define DC_DMAINTEN 0x0254
+#define DC_DMAEP 0x0258
+#define DC_DMABURSTCOUNT 0x0264
+
+/* General Registers */
+#define DC_INTERRUPT 0x0218
+#define DC_CHIPID 0x0270
+#define DC_FRAMENUM 0x0274
+#define DC_SCRATCH 0x0278
+#define DC_UNLOCKDEV 0x027c
+#define DC_INTPULSEWIDTH 0x0280
+#define DC_TESTMODE 0x0284
+
+#endif
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
new file mode 100644
index 000000000000..9612d7990565
--- /dev/null
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -0,0 +1,1498 @@
+/*
+ * Driver for the NXP ISP1761 device controller
+ *
+ * Copyright 2014 Ideas on Board Oy
+ *
+ * Contacts:
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+
+#include "isp1760-core.h"
+#include "isp1760-regs.h"
+#include "isp1760-udc.h"
+
+#define ISP1760_VBUS_POLL_INTERVAL msecs_to_jiffies(500)
+
+struct isp1760_request {
+ struct usb_request req;
+ struct list_head queue;
+ struct isp1760_ep *ep;
+ unsigned int packet_size;
+};
+
+static inline struct isp1760_udc *gadget_to_udc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct isp1760_udc, gadget);
+}
+
+static inline struct isp1760_ep *ep_to_udc_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct isp1760_ep, ep);
+}
+
+static inline struct isp1760_request *req_to_udc_req(struct usb_request *req)
+{
+ return container_of(req, struct isp1760_request, req);
+}
+
+static inline u32 isp1760_udc_read(struct isp1760_udc *udc, u16 reg)
+{
+ return isp1760_read32(udc->regs, reg);
+}
+
+static inline void isp1760_udc_write(struct isp1760_udc *udc, u16 reg, u32 val)
+{
+ isp1760_write32(udc->regs, reg, val);
+}
+
+/* -----------------------------------------------------------------------------
+ * Endpoint Management
+ */
+
+static struct isp1760_ep *isp1760_udc_find_ep(struct isp1760_udc *udc,
+ u16 index)
+{
+ unsigned int i;
+
+ if (index == 0)
+ return &udc->ep[0];
+
+ for (i = 1; i < ARRAY_SIZE(udc->ep); ++i) {
+ if (udc->ep[i].addr == index)
+ return udc->ep[i].desc ? &udc->ep[i] : NULL;
+ }
+
+ return NULL;
+}
+
+static void __isp1760_udc_select_ep(struct isp1760_ep *ep, int dir)
+{
+ isp1760_udc_write(ep->udc, DC_EPINDEX,
+ DC_ENDPIDX(ep->addr & USB_ENDPOINT_NUMBER_MASK) |
+ (dir == USB_DIR_IN ? DC_EPDIR : 0));
+}
+
+/**
+ * isp1760_udc_select_ep - Select an endpoint for register access
+ * @ep: The endpoint
+ *
+ * The ISP1761 endpoint registers are banked. This function selects the target
+ * endpoint for banked register access. The selection remains valid until the
+ * next call to this function, the next direct access to the EPINDEX register
+ * or the next reset, whichever comes first.
+ *
+ * Called with the UDC spinlock held.
+ */
+static void isp1760_udc_select_ep(struct isp1760_ep *ep)
+{
+ __isp1760_udc_select_ep(ep, ep->addr & USB_ENDPOINT_DIR_MASK);
+}
+
+/* Called with the UDC spinlock held. */
+static void isp1760_udc_ctrl_send_status(struct isp1760_ep *ep, int dir)
+{
+ struct isp1760_udc *udc = ep->udc;
+
+ /*
+ * Proceed to the status stage. The status stage data packet flows in
+ * the direction opposite to the data stage data packets, we thus need
+ * to select the OUT/IN endpoint for IN/OUT transfers.
+ */
+ isp1760_udc_write(udc, DC_EPINDEX, DC_ENDPIDX(0) |
+ (dir == USB_DIR_IN ? 0 : DC_EPDIR));
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_STATUS);
+
+ /*
+ * The hardware will terminate the request automatically and go back to
+ * the setup stage without notifying us.
+ */
+ udc->ep0_state = ISP1760_CTRL_SETUP;
+}
+
+/* Called without the UDC spinlock held. */
+static void isp1760_udc_request_complete(struct isp1760_ep *ep,
+ struct isp1760_request *req,
+ int status)
+{
+ struct isp1760_udc *udc = ep->udc;
+ unsigned long flags;
+
+ dev_dbg(ep->udc->isp->dev, "completing request %p with status %d\n",
+ req, status);
+
+ req->ep = NULL;
+ req->req.status = status;
+ req->req.complete(&ep->ep, &req->req);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /*
+ * When completing control OUT requests, move to the status stage after
+ * calling the request complete callback. This gives the gadget an
+ * opportunity to stall the control transfer if needed.
+ */
+ if (status == 0 && ep->addr == 0 && udc->ep0_dir == USB_DIR_OUT)
+ isp1760_udc_ctrl_send_status(ep, USB_DIR_OUT);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static void isp1760_udc_ctrl_send_stall(struct isp1760_ep *ep)
+{
+ struct isp1760_udc *udc = ep->udc;
+ unsigned long flags;
+
+ dev_dbg(ep->udc->isp->dev, "%s(ep%02x)\n", __func__, ep->addr);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Stall both the IN and OUT endpoints. */
+ __isp1760_udc_select_ep(ep, USB_DIR_OUT);
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_STALL);
+ __isp1760_udc_select_ep(ep, USB_DIR_IN);
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_STALL);
+
+ /* A protocol stall completes the control transaction. */
+ udc->ep0_state = ISP1760_CTRL_SETUP;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Data Endpoints
+ */
+
+/* Called with the UDC spinlock held. */
+static bool isp1760_udc_receive(struct isp1760_ep *ep,
+ struct isp1760_request *req)
+{
+ struct isp1760_udc *udc = ep->udc;
+ unsigned int len;
+ u32 *buf;
+ int i;
+
+ isp1760_udc_select_ep(ep);
+ len = isp1760_udc_read(udc, DC_BUFLEN) & DC_DATACOUNT_MASK;
+
+ dev_dbg(udc->isp->dev, "%s: received %u bytes (%u/%u done)\n",
+ __func__, len, req->req.actual, req->req.length);
+
+ len = min(len, req->req.length - req->req.actual);
+
+ if (!len) {
+ /*
+ * There's no data to be read from the FIFO, acknowledge the RX
+ * interrupt by clearing the buffer.
+ *
+ * TODO: What if another packet arrives in the meantime ? The
+ * datasheet doesn't clearly document how this should be
+ * handled.
+ */
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_CLBUF);
+ return false;
+ }
+
+ buf = req->req.buf + req->req.actual;
+
+ /*
+ * Make sure not to read more than one extra byte, otherwise data from
+ * the next packet might be removed from the FIFO.
+ */
+ for (i = len; i > 2; i -= 4, ++buf)
+ *buf = le32_to_cpu(isp1760_udc_read(udc, DC_DATAPORT));
+ if (i > 0)
+ *(u16 *)buf = le16_to_cpu(readw(udc->regs + DC_DATAPORT));
+
+ req->req.actual += len;
+
+ /*
+ * TODO: The short_not_ok flag isn't supported yet, but isn't used by
+ * any gadget driver either.
+ */
+
+ dev_dbg(udc->isp->dev,
+ "%s: req %p actual/length %u/%u maxpacket %u packet size %u\n",
+ __func__, req, req->req.actual, req->req.length, ep->maxpacket,
+ len);
+
+ ep->rx_pending = false;
+
+ /*
+ * Complete the request if all data has been received or if a short
+ * packet has been received.
+ */
+ if (req->req.actual == req->req.length || len < ep->maxpacket) {
+ list_del(&req->queue);
+ return true;
+ }
+
+ return false;
+}
+
+static void isp1760_udc_transmit(struct isp1760_ep *ep,
+ struct isp1760_request *req)
+{
+ struct isp1760_udc *udc = ep->udc;
+ u32 *buf = req->req.buf + req->req.actual;
+ int i;
+
+ req->packet_size = min(req->req.length - req->req.actual,
+ ep->maxpacket);
+
+ dev_dbg(udc->isp->dev, "%s: transferring %u bytes (%u/%u done)\n",
+ __func__, req->packet_size, req->req.actual,
+ req->req.length);
+
+ __isp1760_udc_select_ep(ep, USB_DIR_IN);
+
+ if (req->packet_size)
+ isp1760_udc_write(udc, DC_BUFLEN, req->packet_size);
+
+ /*
+ * Make sure not to write more than one extra byte, otherwise extra data
+ * will stay in the FIFO and will be transmitted during the next control
+ * request. The endpoint control CLBUF bit is supposed to allow flushing
+ * the FIFO for this kind of conditions, but doesn't seem to work.
+ */
+ for (i = req->packet_size; i > 2; i -= 4, ++buf)
+ isp1760_udc_write(udc, DC_DATAPORT, cpu_to_le32(*buf));
+ if (i > 0)
+ writew(cpu_to_le16(*(u16 *)buf), udc->regs + DC_DATAPORT);
+
+ if (ep->addr == 0)
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_DSEN);
+ if (!req->packet_size)
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_VENDP);
+}
+
+static void isp1760_ep_rx_ready(struct isp1760_ep *ep)
+{
+ struct isp1760_udc *udc = ep->udc;
+ struct isp1760_request *req;
+ bool complete;
+
+ spin_lock(&udc->lock);
+
+ if (ep->addr == 0 && udc->ep0_state != ISP1760_CTRL_DATA_OUT) {
+ spin_unlock(&udc->lock);
+ dev_dbg(udc->isp->dev, "%s: invalid ep0 state %u\n", __func__,
+ udc->ep0_state);
+ return;
+ }
+
+ if (ep->addr != 0 && !ep->desc) {
+ spin_unlock(&udc->lock);
+ dev_dbg(udc->isp->dev, "%s: ep%02x is disabled\n", __func__,
+ ep->addr);
+ return;
+ }
+
+ if (list_empty(&ep->queue)) {
+ ep->rx_pending = true;
+ spin_unlock(&udc->lock);
+ dev_dbg(udc->isp->dev, "%s: ep%02x (%p) has no request queued\n",
+ __func__, ep->addr, ep);
+ return;
+ }
+
+ req = list_first_entry(&ep->queue, struct isp1760_request,
+ queue);
+ complete = isp1760_udc_receive(ep, req);
+
+ spin_unlock(&udc->lock);
+
+ if (complete)
+ isp1760_udc_request_complete(ep, req, 0);
+}
+
+static void isp1760_ep_tx_complete(struct isp1760_ep *ep)
+{
+ struct isp1760_udc *udc = ep->udc;
+ struct isp1760_request *complete = NULL;
+ struct isp1760_request *req;
+ bool need_zlp;
+
+ spin_lock(&udc->lock);
+
+ if (ep->addr == 0 && udc->ep0_state != ISP1760_CTRL_DATA_IN) {
+ spin_unlock(&udc->lock);
+ dev_dbg(udc->isp->dev, "TX IRQ: invalid endpoint state %u\n",
+ udc->ep0_state);
+ return;
+ }
+
+ if (list_empty(&ep->queue)) {
+ /*
+ * This can happen for the control endpoint when the reply to
+ * the GET_STATUS IN control request is sent directly by the
+ * setup IRQ handler. Just proceed to the status stage.
+ */
+ if (ep->addr == 0) {
+ isp1760_udc_ctrl_send_status(ep, USB_DIR_IN);
+ spin_unlock(&udc->lock);
+ return;
+ }
+
+ spin_unlock(&udc->lock);
+ dev_dbg(udc->isp->dev, "%s: ep%02x has no request queued\n",
+ __func__, ep->addr);
+ return;
+ }
+
+ req = list_first_entry(&ep->queue, struct isp1760_request,
+ queue);
+ req->req.actual += req->packet_size;
+
+ need_zlp = req->req.actual == req->req.length &&
+ !(req->req.length % ep->maxpacket) &&
+ req->packet_size && req->req.zero;
+
+ dev_dbg(udc->isp->dev,
+ "TX IRQ: req %p actual/length %u/%u maxpacket %u packet size %u zero %u need zlp %u\n",
+ req, req->req.actual, req->req.length, ep->maxpacket,
+ req->packet_size, req->req.zero, need_zlp);
+
+ /*
+ * Complete the request if all data has been sent and we don't need to
+ * transmit a zero length packet.
+ */
+ if (req->req.actual == req->req.length && !need_zlp) {
+ complete = req;
+ list_del(&req->queue);
+
+ if (ep->addr == 0)
+ isp1760_udc_ctrl_send_status(ep, USB_DIR_IN);
+
+ if (!list_empty(&ep->queue))
+ req = list_first_entry(&ep->queue,
+ struct isp1760_request, queue);
+ else
+ req = NULL;
+ }
+
+ /*
+ * Transmit the next packet or start the next request, if any.
+ *
+ * TODO: If the endpoint is stalled the next request shouldn't be
+ * started, but what about the next packet ?
+ */
+ if (req)
+ isp1760_udc_transmit(ep, req);
+
+ spin_unlock(&udc->lock);
+
+ if (complete)
+ isp1760_udc_request_complete(ep, complete, 0);
+}
+
+static int __isp1760_udc_set_halt(struct isp1760_ep *ep, bool halt)
+{
+ struct isp1760_udc *udc = ep->udc;
+
+ dev_dbg(udc->isp->dev, "%s: %s halt on ep%02x\n", __func__,
+ halt ? "set" : "clear", ep->addr);
+
+ if (ep->desc && usb_endpoint_xfer_isoc(ep->desc)) {
+ dev_dbg(udc->isp->dev, "%s: ep%02x is isochronous\n", __func__,
+ ep->addr);
+ return -EINVAL;
+ }
+
+ isp1760_udc_select_ep(ep);
+ isp1760_udc_write(udc, DC_CTRLFUNC, halt ? DC_STALL : 0);
+
+ if (ep->addr == 0) {
+ /* When halting the control endpoint, stall both IN and OUT. */
+ __isp1760_udc_select_ep(ep, USB_DIR_IN);
+ isp1760_udc_write(udc, DC_CTRLFUNC, halt ? DC_STALL : 0);
+ } else if (!halt) {
+ /* Reset the data PID by cycling the endpoint enable bit. */
+ u16 eptype = isp1760_udc_read(udc, DC_EPTYPE);
+
+ isp1760_udc_write(udc, DC_EPTYPE, eptype & ~DC_EPENABLE);
+ isp1760_udc_write(udc, DC_EPTYPE, eptype);
+
+ /*
+ * Disabling the endpoint emptied the transmit FIFO, fill it
+ * again if a request is pending.
+ *
+ * TODO: Does the gadget framework require synchronizatino with
+ * the TX IRQ handler ?
+ */
+ if ((ep->addr & USB_DIR_IN) && !list_empty(&ep->queue)) {
+ struct isp1760_request *req;
+
+ req = list_first_entry(&ep->queue,
+ struct isp1760_request, queue);
+ isp1760_udc_transmit(ep, req);
+ }
+ }
+
+ ep->halted = halt;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Control Endpoint
+ */
+
+static int isp1760_udc_get_status(struct isp1760_udc *udc,
+ const struct usb_ctrlrequest *req)
+{
+ struct isp1760_ep *ep;
+ u16 status;
+
+ if (req->wLength != cpu_to_le16(2) || req->wValue != cpu_to_le16(0))
+ return -EINVAL;
+
+ switch (req->bRequestType) {
+ case USB_DIR_IN | USB_RECIP_DEVICE:
+ status = udc->devstatus;
+ break;
+
+ case USB_DIR_IN | USB_RECIP_INTERFACE:
+ status = 0;
+ break;
+
+ case USB_DIR_IN | USB_RECIP_ENDPOINT:
+ ep = isp1760_udc_find_ep(udc, le16_to_cpu(req->wIndex));
+ if (!ep)
+ return -EINVAL;
+
+ status = 0;
+ if (ep->halted)
+ status |= 1 << USB_ENDPOINT_HALT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ isp1760_udc_write(udc, DC_EPINDEX, DC_ENDPIDX(0) | DC_EPDIR);
+ isp1760_udc_write(udc, DC_BUFLEN, 2);
+
+ writew(cpu_to_le16(status), udc->regs + DC_DATAPORT);
+
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_DSEN);
+
+ dev_dbg(udc->isp->dev, "%s: status 0x%04x\n", __func__, status);
+
+ return 0;
+}
+
+static int isp1760_udc_set_address(struct isp1760_udc *udc, u16 addr)
+{
+ if (addr > 127) {
+ dev_dbg(udc->isp->dev, "invalid device address %u\n", addr);
+ return -EINVAL;
+ }
+
+ if (udc->gadget.state != USB_STATE_DEFAULT &&
+ udc->gadget.state != USB_STATE_ADDRESS) {
+ dev_dbg(udc->isp->dev, "can't set address in state %u\n",
+ udc->gadget.state);
+ return -EINVAL;
+ }
+
+ usb_gadget_set_state(&udc->gadget, addr ? USB_STATE_ADDRESS :
+ USB_STATE_DEFAULT);
+
+ isp1760_udc_write(udc, DC_ADDRESS, DC_DEVEN | addr);
+
+ spin_lock(&udc->lock);
+ isp1760_udc_ctrl_send_status(&udc->ep[0], USB_DIR_OUT);
+ spin_unlock(&udc->lock);
+
+ return 0;
+}
+
+static bool isp1760_ep0_setup_standard(struct isp1760_udc *udc,
+ struct usb_ctrlrequest *req)
+{
+ bool stall;
+
+ switch (req->bRequest) {
+ case USB_REQ_GET_STATUS:
+ return isp1760_udc_get_status(udc, req);
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (req->bRequestType) {
+ case USB_DIR_OUT | USB_RECIP_DEVICE: {
+ /* TODO: Handle remote wakeup feature. */
+ return true;
+ }
+
+ case USB_DIR_OUT | USB_RECIP_ENDPOINT: {
+ u16 index = le16_to_cpu(req->wIndex);
+ struct isp1760_ep *ep;
+
+ if (req->wLength != cpu_to_le16(0) ||
+ req->wValue != cpu_to_le16(USB_ENDPOINT_HALT))
+ return true;
+
+ ep = isp1760_udc_find_ep(udc, index);
+ if (!ep)
+ return true;
+
+ spin_lock(&udc->lock);
+
+ /*
+ * If the endpoint is wedged only the gadget can clear
+ * the halt feature. Pretend success in that case, but
+ * keep the endpoint halted.
+ */
+ if (!ep->wedged)
+ stall = __isp1760_udc_set_halt(ep, false);
+ else
+ stall = false;
+
+ if (!stall)
+ isp1760_udc_ctrl_send_status(&udc->ep[0],
+ USB_DIR_OUT);
+
+ spin_unlock(&udc->lock);
+ return stall;
+ }
+
+ default:
+ return true;
+ }
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ switch (req->bRequestType) {
+ case USB_DIR_OUT | USB_RECIP_DEVICE: {
+ /* TODO: Handle remote wakeup and test mode features */
+ return true;
+ }
+
+ case USB_DIR_OUT | USB_RECIP_ENDPOINT: {
+ u16 index = le16_to_cpu(req->wIndex);
+ struct isp1760_ep *ep;
+
+ if (req->wLength != cpu_to_le16(0) ||
+ req->wValue != cpu_to_le16(USB_ENDPOINT_HALT))
+ return true;
+
+ ep = isp1760_udc_find_ep(udc, index);
+ if (!ep)
+ return true;
+
+ spin_lock(&udc->lock);
+
+ stall = __isp1760_udc_set_halt(ep, true);
+ if (!stall)
+ isp1760_udc_ctrl_send_status(&udc->ep[0],
+ USB_DIR_OUT);
+
+ spin_unlock(&udc->lock);
+ return stall;
+ }
+
+ default:
+ return true;
+ }
+ break;
+
+ case USB_REQ_SET_ADDRESS:
+ if (req->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
+ return true;
+
+ return isp1760_udc_set_address(udc, le16_to_cpu(req->wValue));
+
+ case USB_REQ_SET_CONFIGURATION:
+ if (req->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
+ return true;
+
+ if (udc->gadget.state != USB_STATE_ADDRESS &&
+ udc->gadget.state != USB_STATE_CONFIGURED)
+ return true;
+
+ stall = udc->driver->setup(&udc->gadget, req) < 0;
+ if (stall)
+ return true;
+
+ usb_gadget_set_state(&udc->gadget, req->wValue ?
+ USB_STATE_CONFIGURED : USB_STATE_ADDRESS);
+
+ /*
+ * SET_CONFIGURATION (and SET_INTERFACE) must reset the halt
+ * feature on all endpoints. There is however no need to do so
+ * explicitly here as the gadget driver will disable and
+ * reenable endpoints, clearing the halt feature.
+ */
+ return false;
+
+ default:
+ return udc->driver->setup(&udc->gadget, req) < 0;
+ }
+}
+
+static void isp1760_ep0_setup(struct isp1760_udc *udc)
+{
+ union {
+ struct usb_ctrlrequest r;
+ u32 data[2];
+ } req;
+ unsigned int count;
+ bool stall = false;
+
+ spin_lock(&udc->lock);
+
+ isp1760_udc_write(udc, DC_EPINDEX, DC_EP0SETUP);
+
+ count = isp1760_udc_read(udc, DC_BUFLEN) & DC_DATACOUNT_MASK;
+ if (count != sizeof(req)) {
+ spin_unlock(&udc->lock);
+
+ dev_err(udc->isp->dev, "invalid length %u for setup packet\n",
+ count);
+
+ isp1760_udc_ctrl_send_stall(&udc->ep[0]);
+ return;
+ }
+
+ req.data[0] = isp1760_udc_read(udc, DC_DATAPORT);
+ req.data[1] = isp1760_udc_read(udc, DC_DATAPORT);
+
+ if (udc->ep0_state != ISP1760_CTRL_SETUP) {
+ spin_unlock(&udc->lock);
+ dev_dbg(udc->isp->dev, "unexpected SETUP packet\n");
+ return;
+ }
+
+ /* Move to the data stage. */
+ if (!req.r.wLength)
+ udc->ep0_state = ISP1760_CTRL_STATUS;
+ else if (req.r.bRequestType & USB_DIR_IN)
+ udc->ep0_state = ISP1760_CTRL_DATA_IN;
+ else
+ udc->ep0_state = ISP1760_CTRL_DATA_OUT;
+
+ udc->ep0_dir = req.r.bRequestType & USB_DIR_IN;
+ udc->ep0_length = le16_to_cpu(req.r.wLength);
+
+ spin_unlock(&udc->lock);
+
+ dev_dbg(udc->isp->dev,
+ "%s: bRequestType 0x%02x bRequest 0x%02x wValue 0x%04x wIndex 0x%04x wLength 0x%04x\n",
+ __func__, req.r.bRequestType, req.r.bRequest,
+ le16_to_cpu(req.r.wValue), le16_to_cpu(req.r.wIndex),
+ le16_to_cpu(req.r.wLength));
+
+ if ((req.r.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ stall = isp1760_ep0_setup_standard(udc, &req.r);
+ else
+ stall = udc->driver->setup(&udc->gadget, &req.r) < 0;
+
+ if (stall)
+ isp1760_udc_ctrl_send_stall(&udc->ep[0]);
+}
+
+/* -----------------------------------------------------------------------------
+ * Gadget Endpoint Operations
+ */
+
+static int isp1760_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct isp1760_ep *uep = ep_to_udc_ep(ep);
+ struct isp1760_udc *udc = uep->udc;
+ unsigned long flags;
+ unsigned int type;
+
+ dev_dbg(uep->udc->isp->dev, "%s\n", __func__);
+
+ /*
+ * Validate the descriptor. The control endpoint can't be enabled
+ * manually.
+ */
+ if (desc->bDescriptorType != USB_DT_ENDPOINT ||
+ desc->bEndpointAddress == 0 ||
+ desc->bEndpointAddress != uep->addr ||
+ le16_to_cpu(desc->wMaxPacketSize) > ep->maxpacket) {
+ dev_dbg(udc->isp->dev,
+ "%s: invalid descriptor type %u addr %02x ep addr %02x max packet size %u/%u\n",
+ __func__, desc->bDescriptorType,
+ desc->bEndpointAddress, uep->addr,
+ le16_to_cpu(desc->wMaxPacketSize), ep->maxpacket);
+ return -EINVAL;
+ }
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_ISOC:
+ type = DC_ENDPTYP_ISOC;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ type = DC_ENDPTYP_BULK;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ type = DC_ENDPTYP_INTERRUPT;
+ break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ default:
+ dev_dbg(udc->isp->dev, "%s: control endpoints unsupported\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ uep->desc = desc;
+ uep->maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ uep->rx_pending = false;
+ uep->halted = false;
+ uep->wedged = false;
+
+ isp1760_udc_select_ep(uep);
+ isp1760_udc_write(udc, DC_EPMAXPKTSZ, uep->maxpacket);
+ isp1760_udc_write(udc, DC_BUFLEN, uep->maxpacket);
+ isp1760_udc_write(udc, DC_EPTYPE, DC_EPENABLE | type);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int isp1760_ep_disable(struct usb_ep *ep)
+{
+ struct isp1760_ep *uep = ep_to_udc_ep(ep);
+ struct isp1760_udc *udc = uep->udc;
+ struct isp1760_request *req, *nreq;
+ LIST_HEAD(req_list);
+ unsigned long flags;
+
+ dev_dbg(udc->isp->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!uep->desc) {
+ dev_dbg(udc->isp->dev, "%s: endpoint not enabled\n", __func__);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+
+ uep->desc = NULL;
+ uep->maxpacket = 0;
+
+ isp1760_udc_select_ep(uep);
+ isp1760_udc_write(udc, DC_EPTYPE, 0);
+
+ /* TODO Synchronize with the IRQ handler */
+
+ list_splice_init(&uep->queue, &req_list);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ list_for_each_entry_safe(req, nreq, &req_list, queue) {
+ list_del(&req->queue);
+ isp1760_udc_request_complete(uep, req, -ESHUTDOWN);
+ }
+
+ return 0;
+}
+
+static struct usb_request *isp1760_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct isp1760_request *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+
+ return &req->req;
+}
+
+static void isp1760_ep_free_request(struct usb_ep *ep, struct usb_request *_req)
+{
+ struct isp1760_request *req = req_to_udc_req(_req);
+
+ kfree(req);
+}
+
+static int isp1760_ep_queue(struct usb_ep *ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct isp1760_request *req = req_to_udc_req(_req);
+ struct isp1760_ep *uep = ep_to_udc_ep(ep);
+ struct isp1760_udc *udc = uep->udc;
+ bool complete = false;
+ unsigned long flags;
+ int ret = 0;
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ dev_dbg(udc->isp->dev,
+ "%s: req %p (%u bytes%s) ep %p(0x%02x)\n", __func__, _req,
+ _req->length, _req->zero ? " (zlp)" : "", uep, uep->addr);
+
+ req->ep = uep;
+
+ if (uep->addr == 0) {
+ if (_req->length != udc->ep0_length &&
+ udc->ep0_state != ISP1760_CTRL_DATA_IN) {
+ dev_dbg(udc->isp->dev,
+ "%s: invalid length %u for req %p\n",
+ __func__, _req->length, req);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (udc->ep0_state) {
+ case ISP1760_CTRL_DATA_IN:
+ dev_dbg(udc->isp->dev, "%s: transmitting req %p\n",
+ __func__, req);
+
+ list_add_tail(&req->queue, &uep->queue);
+ isp1760_udc_transmit(uep, req);
+ break;
+
+ case ISP1760_CTRL_DATA_OUT:
+ list_add_tail(&req->queue, &uep->queue);
+ __isp1760_udc_select_ep(uep, USB_DIR_OUT);
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_DSEN);
+ break;
+
+ case ISP1760_CTRL_STATUS:
+ complete = true;
+ break;
+
+ default:
+ dev_dbg(udc->isp->dev, "%s: invalid ep0 state\n",
+ __func__);
+ ret = -EINVAL;
+ break;
+ }
+ } else if (uep->desc) {
+ bool empty = list_empty(&uep->queue);
+
+ list_add_tail(&req->queue, &uep->queue);
+ if ((uep->addr & USB_DIR_IN) && !uep->halted && empty)
+ isp1760_udc_transmit(uep, req);
+ else if (!(uep->addr & USB_DIR_IN) && uep->rx_pending)
+ complete = isp1760_udc_receive(uep, req);
+ } else {
+ dev_dbg(udc->isp->dev,
+ "%s: can't queue request to disabled ep%02x\n",
+ __func__, uep->addr);
+ ret = -ESHUTDOWN;
+ }
+
+done:
+ if (ret < 0)
+ req->ep = NULL;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (complete)
+ isp1760_udc_request_complete(uep, req, 0);
+
+ return ret;
+}
+
+static int isp1760_ep_dequeue(struct usb_ep *ep, struct usb_request *_req)
+{
+ struct isp1760_request *req = req_to_udc_req(_req);
+ struct isp1760_ep *uep = ep_to_udc_ep(ep);
+ struct isp1760_udc *udc = uep->udc;
+ unsigned long flags;
+
+ dev_dbg(uep->udc->isp->dev, "%s(ep%02x)\n", __func__, uep->addr);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (req->ep != uep)
+ req = NULL;
+ else
+ list_del(&req->queue);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (!req)
+ return -EINVAL;
+
+ isp1760_udc_request_complete(uep, req, -ECONNRESET);
+ return 0;
+}
+
+static int __isp1760_ep_set_halt(struct isp1760_ep *uep, bool stall, bool wedge)
+{
+ struct isp1760_udc *udc = uep->udc;
+ int ret;
+
+ if (!uep->addr) {
+ /*
+ * Halting the control endpoint is only valid as a delayed error
+ * response to a SETUP packet. Make sure EP0 is in the right
+ * stage and that the gadget isn't trying to clear the halt
+ * condition.
+ */
+ if (WARN_ON(udc->ep0_state == ISP1760_CTRL_SETUP || !stall ||
+ wedge)) {
+ return -EINVAL;
+ }
+ }
+
+ if (uep->addr && !uep->desc) {
+ dev_dbg(udc->isp->dev, "%s: ep%02x is disabled\n", __func__,
+ uep->addr);
+ return -EINVAL;
+ }
+
+ if (uep->addr & USB_DIR_IN) {
+ /* Refuse to halt IN endpoints with active transfers. */
+ if (!list_empty(&uep->queue)) {
+ dev_dbg(udc->isp->dev,
+ "%s: ep%02x has request pending\n", __func__,
+ uep->addr);
+ return -EAGAIN;
+ }
+ }
+
+ ret = __isp1760_udc_set_halt(uep, stall);
+ if (ret < 0)
+ return ret;
+
+ if (!uep->addr) {
+ /*
+ * Stalling EP0 completes the control transaction, move back to
+ * the SETUP state.
+ */
+ udc->ep0_state = ISP1760_CTRL_SETUP;
+ return 0;
+ }
+
+ if (wedge)
+ uep->wedged = true;
+ else if (!stall)
+ uep->wedged = false;
+
+ return 0;
+}
+
+static int isp1760_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct isp1760_ep *uep = ep_to_udc_ep(ep);
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(uep->udc->isp->dev, "%s: %s halt on ep%02x\n", __func__,
+ value ? "set" : "clear", uep->addr);
+
+ spin_lock_irqsave(&uep->udc->lock, flags);
+ ret = __isp1760_ep_set_halt(uep, value, false);
+ spin_unlock_irqrestore(&uep->udc->lock, flags);
+
+ return ret;
+}
+
+static int isp1760_ep_set_wedge(struct usb_ep *ep)
+{
+ struct isp1760_ep *uep = ep_to_udc_ep(ep);
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(uep->udc->isp->dev, "%s: set wedge on ep%02x)\n", __func__,
+ uep->addr);
+
+ spin_lock_irqsave(&uep->udc->lock, flags);
+ ret = __isp1760_ep_set_halt(uep, true, true);
+ spin_unlock_irqrestore(&uep->udc->lock, flags);
+
+ return ret;
+}
+
+static void isp1760_ep_fifo_flush(struct usb_ep *ep)
+{
+ struct isp1760_ep *uep = ep_to_udc_ep(ep);
+ struct isp1760_udc *udc = uep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ isp1760_udc_select_ep(uep);
+
+ /*
+ * Set the CLBUF bit twice to flush both buffers in case double
+ * buffering is enabled.
+ */
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_CLBUF);
+ isp1760_udc_write(udc, DC_CTRLFUNC, DC_CLBUF);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static const struct usb_ep_ops isp1760_ep_ops = {
+ .enable = isp1760_ep_enable,
+ .disable = isp1760_ep_disable,
+ .alloc_request = isp1760_ep_alloc_request,
+ .free_request = isp1760_ep_free_request,
+ .queue = isp1760_ep_queue,
+ .dequeue = isp1760_ep_dequeue,
+ .set_halt = isp1760_ep_set_halt,
+ .set_wedge = isp1760_ep_set_wedge,
+ .fifo_flush = isp1760_ep_fifo_flush,
+};
+
+/* -----------------------------------------------------------------------------
+ * Device States
+ */
+
+/* Called with the UDC spinlock held. */
+static void isp1760_udc_connect(struct isp1760_udc *udc)
+{
+ usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
+ mod_timer(&udc->vbus_timer, jiffies + ISP1760_VBUS_POLL_INTERVAL);
+}
+
+/* Called with the UDC spinlock held. */
+static void isp1760_udc_disconnect(struct isp1760_udc *udc)
+{
+ if (udc->gadget.state < USB_STATE_POWERED)
+ return;
+
+ dev_dbg(udc->isp->dev, "Device disconnected in state %u\n",
+ udc->gadget.state);
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ usb_gadget_set_state(&udc->gadget, USB_STATE_ATTACHED);
+
+ if (udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
+
+ del_timer(&udc->vbus_timer);
+
+ /* TODO Reset all endpoints ? */
+}
+
+static void isp1760_udc_init_hw(struct isp1760_udc *udc)
+{
+ /*
+ * The device controller currently shares its interrupt with the host
+ * controller, the DC_IRQ polarity and signaling mode are ignored. Set
+ * the to active-low level-triggered.
+ *
+ * Configure the control, in and out pipes to generate interrupts on
+ * ACK tokens only (and NYET for the out pipe). The default
+ * configuration also generates an interrupt on the first NACK token.
+ */
+ isp1760_udc_write(udc, DC_INTCONF, DC_CDBGMOD_ACK | DC_DDBGMODIN_ACK |
+ DC_DDBGMODOUT_ACK_NYET);
+
+ isp1760_udc_write(udc, DC_INTENABLE, DC_IEPRXTX(7) | DC_IEPRXTX(6) |
+ DC_IEPRXTX(5) | DC_IEPRXTX(4) | DC_IEPRXTX(3) |
+ DC_IEPRXTX(2) | DC_IEPRXTX(1) | DC_IEPRXTX(0) |
+ DC_IEP0SETUP | DC_IEVBUS | DC_IERESM | DC_IESUSP |
+ DC_IEHS_STA | DC_IEBRST);
+
+ if (udc->connected)
+ isp1760_set_pullup(udc->isp, true);
+
+ isp1760_udc_write(udc, DC_ADDRESS, DC_DEVEN);
+}
+
+static void isp1760_udc_reset(struct isp1760_udc *udc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /*
+ * The bus reset has reset most registers to their default value,
+ * reinitialize the UDC hardware.
+ */
+ isp1760_udc_init_hw(udc);
+
+ udc->ep0_state = ISP1760_CTRL_SETUP;
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static void isp1760_udc_suspend(struct isp1760_udc *udc)
+{
+ if (udc->gadget.state < USB_STATE_DEFAULT)
+ return;
+
+ if (udc->driver->suspend)
+ udc->driver->suspend(&udc->gadget);
+}
+
+static void isp1760_udc_resume(struct isp1760_udc *udc)
+{
+ if (udc->gadget.state < USB_STATE_DEFAULT)
+ return;
+
+ if (udc->driver->resume)
+ udc->driver->resume(&udc->gadget);
+}
+
+/* -----------------------------------------------------------------------------
+ * Gadget Operations
+ */
+
+static int isp1760_udc_get_frame(struct usb_gadget *gadget)
+{
+ struct isp1760_udc *udc = gadget_to_udc(gadget);
+
+ return isp1760_udc_read(udc, DC_FRAMENUM) & ((1 << 11) - 1);
+}
+
+static int isp1760_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct isp1760_udc *udc = gadget_to_udc(gadget);
+
+ dev_dbg(udc->isp->dev, "%s\n", __func__);
+ return -ENOTSUPP;
+}
+
+static int isp1760_udc_set_selfpowered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct isp1760_udc *udc = gadget_to_udc(gadget);
+
+ if (is_selfpowered)
+ udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
+ else
+ udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+
+ return 0;
+}
+
+static int isp1760_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct isp1760_udc *udc = gadget_to_udc(gadget);
+
+ isp1760_set_pullup(udc->isp, is_on);
+ udc->connected = is_on;
+
+ return 0;
+}
+
+static int isp1760_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct isp1760_udc *udc = gadget_to_udc(gadget);
+
+ /* The hardware doesn't support low speed. */
+ if (driver->max_speed < USB_SPEED_FULL) {
+ dev_err(udc->isp->dev, "Invalid gadget driver\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&udc->lock);
+
+ if (udc->driver) {
+ dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
+ spin_unlock(&udc->lock);
+ return -EBUSY;
+ }
+
+ udc->driver = driver;
+
+ spin_unlock(&udc->lock);
+
+ dev_dbg(udc->isp->dev, "starting UDC with driver %s\n",
+ driver->function);
+
+ udc->devstatus = 0;
+ udc->connected = true;
+
+ usb_gadget_set_state(&udc->gadget, USB_STATE_ATTACHED);
+
+ /* DMA isn't supported yet, don't enable the DMA clock. */
+ isp1760_udc_write(udc, DC_MODE, DC_GLINTENA);
+
+ isp1760_udc_init_hw(udc);
+
+ dev_dbg(udc->isp->dev, "UDC started with driver %s\n",
+ driver->function);
+
+ return 0;
+}
+
+static int isp1760_udc_stop(struct usb_gadget *gadget)
+{
+ struct isp1760_udc *udc = gadget_to_udc(gadget);
+
+ dev_dbg(udc->isp->dev, "%s\n", __func__);
+
+ del_timer_sync(&udc->vbus_timer);
+
+ isp1760_udc_write(udc, DC_MODE, 0);
+
+ spin_lock(&udc->lock);
+ udc->driver = NULL;
+ spin_unlock(&udc->lock);
+
+ return 0;
+}
+
+static struct usb_gadget_ops isp1760_udc_ops = {
+ .get_frame = isp1760_udc_get_frame,
+ .wakeup = isp1760_udc_wakeup,
+ .set_selfpowered = isp1760_udc_set_selfpowered,
+ .pullup = isp1760_udc_pullup,
+ .udc_start = isp1760_udc_start,
+ .udc_stop = isp1760_udc_stop,
+};
+
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
+
+static irqreturn_t isp1760_udc_irq(int irq, void *dev)
+{
+ struct isp1760_udc *udc = dev;
+ unsigned int i;
+ u32 status;
+
+ status = isp1760_udc_read(udc, DC_INTERRUPT)
+ & isp1760_udc_read(udc, DC_INTENABLE);
+ isp1760_udc_write(udc, DC_INTERRUPT, status);
+
+ if (status & DC_IEVBUS) {
+ dev_dbg(udc->isp->dev, "%s(VBUS)\n", __func__);
+ /* The VBUS interrupt is only triggered when VBUS appears. */
+ spin_lock(&udc->lock);
+ isp1760_udc_connect(udc);
+ spin_unlock(&udc->lock);
+ }
+
+ if (status & DC_IEBRST) {
+ dev_dbg(udc->isp->dev, "%s(BRST)\n", __func__);
+
+ isp1760_udc_reset(udc);
+ }
+
+ for (i = 0; i <= 7; ++i) {
+ struct isp1760_ep *ep = &udc->ep[i*2];
+
+ if (status & DC_IEPTX(i)) {
+ dev_dbg(udc->isp->dev, "%s(EPTX%u)\n", __func__, i);
+ isp1760_ep_tx_complete(ep);
+ }
+
+ if (status & DC_IEPRX(i)) {
+ dev_dbg(udc->isp->dev, "%s(EPRX%u)\n", __func__, i);
+ isp1760_ep_rx_ready(i ? ep - 1 : ep);
+ }
+ }
+
+ if (status & DC_IEP0SETUP) {
+ dev_dbg(udc->isp->dev, "%s(EP0SETUP)\n", __func__);
+
+ isp1760_ep0_setup(udc);
+ }
+
+ if (status & DC_IERESM) {
+ dev_dbg(udc->isp->dev, "%s(RESM)\n", __func__);
+ isp1760_udc_resume(udc);
+ }
+
+ if (status & DC_IESUSP) {
+ dev_dbg(udc->isp->dev, "%s(SUSP)\n", __func__);
+
+ spin_lock(&udc->lock);
+ if (!(isp1760_udc_read(udc, DC_MODE) & DC_VBUSSTAT))
+ isp1760_udc_disconnect(udc);
+ else
+ isp1760_udc_suspend(udc);
+ spin_unlock(&udc->lock);
+ }
+
+ if (status & DC_IEHS_STA) {
+ dev_dbg(udc->isp->dev, "%s(HS_STA)\n", __func__);
+ udc->gadget.speed = USB_SPEED_HIGH;
+ }
+
+ return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void isp1760_udc_vbus_poll(unsigned long data)
+{
+ struct isp1760_udc *udc = (struct isp1760_udc *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!(isp1760_udc_read(udc, DC_MODE) & DC_VBUSSTAT))
+ isp1760_udc_disconnect(udc);
+ else if (udc->gadget.state >= USB_STATE_POWERED)
+ mod_timer(&udc->vbus_timer,
+ jiffies + ISP1760_VBUS_POLL_INTERVAL);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Registration
+ */
+
+static void isp1760_udc_init_eps(struct isp1760_udc *udc)
+{
+ unsigned int i;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ for (i = 0; i < ARRAY_SIZE(udc->ep); ++i) {
+ struct isp1760_ep *ep = &udc->ep[i];
+ unsigned int ep_num = (i + 1) / 2;
+ bool is_in = !(i & 1);
+
+ ep->udc = udc;
+
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->addr = (ep_num && is_in ? USB_DIR_IN : USB_DIR_OUT)
+ | ep_num;
+ ep->desc = NULL;
+
+ sprintf(ep->name, "ep%u%s", ep_num,
+ ep_num ? (is_in ? "in" : "out") : "");
+
+ ep->ep.ops = &isp1760_ep_ops;
+ ep->ep.name = ep->name;
+
+ /*
+ * Hardcode the maximum packet sizes for now, to 64 bytes for
+ * the control endpoint and 512 bytes for all other endpoints.
+ * This fits in the 8kB FIFO without double-buffering.
+ */
+ if (ep_num == 0) {
+ ep->ep.maxpacket = 64;
+ ep->maxpacket = 64;
+ udc->gadget.ep0 = &ep->ep;
+ } else {
+ ep->ep.maxpacket = 512;
+ ep->maxpacket = 0;
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ }
+ }
+}
+
+static int isp1760_udc_init(struct isp1760_udc *udc)
+{
+ u16 scratch;
+ u32 chipid;
+
+ /*
+ * Check that the controller is present by writing to the scratch
+ * register, modifying the bus pattern by reading from the chip ID
+ * register, and reading the scratch register value back. The chip ID
+ * and scratch register contents must match the expected values.
+ */
+ isp1760_udc_write(udc, DC_SCRATCH, 0xbabe);
+ chipid = isp1760_udc_read(udc, DC_CHIPID);
+ scratch = isp1760_udc_read(udc, DC_SCRATCH);
+
+ if (scratch != 0xbabe) {
+ dev_err(udc->isp->dev,
+ "udc: scratch test failed (0x%04x/0x%08x)\n",
+ scratch, chipid);
+ return -ENODEV;
+ }
+
+ if (chipid != 0x00011582) {
+ dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid);
+ return -ENODEV;
+ }
+
+ /* Reset the device controller. */
+ isp1760_udc_write(udc, DC_MODE, DC_SFRESET);
+ usleep_range(10000, 11000);
+ isp1760_udc_write(udc, DC_MODE, 0);
+ usleep_range(10000, 11000);
+
+ return 0;
+}
+
+int isp1760_udc_register(struct isp1760_device *isp, int irq,
+ unsigned long irqflags)
+{
+ struct isp1760_udc *udc = &isp->udc;
+ const char *devname;
+ int ret;
+
+ udc->irq = -1;
+ udc->isp = isp;
+ udc->regs = isp->regs;
+
+ spin_lock_init(&udc->lock);
+ setup_timer(&udc->vbus_timer, isp1760_udc_vbus_poll,
+ (unsigned long)udc);
+
+ ret = isp1760_udc_init(udc);
+ if (ret < 0)
+ return ret;
+
+ devname = dev_name(isp->dev);
+ udc->irqname = kmalloc(strlen(devname) + 7, GFP_KERNEL);
+ if (!udc->irqname)
+ return -ENOMEM;
+
+ sprintf(udc->irqname, "%s (udc)", devname);
+
+ ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED |
+ irqflags, udc->irqname, udc);
+ if (ret < 0)
+ goto error;
+
+ udc->irq = irq;
+
+ /*
+ * Initialize the gadget static fields and register its device. Gadget
+ * fields that vary during the life time of the gadget are initialized
+ * by the UDC core.
+ */
+ udc->gadget.ops = &isp1760_udc_ops;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.name = "isp1761_udc";
+
+ isp1760_udc_init_eps(udc);
+
+ ret = usb_add_gadget_udc(isp->dev, &udc->gadget);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ if (udc->irq >= 0)
+ free_irq(udc->irq, udc);
+ kfree(udc->irqname);
+
+ return ret;
+}
+
+void isp1760_udc_unregister(struct isp1760_device *isp)
+{
+ struct isp1760_udc *udc = &isp->udc;
+
+ if (!udc->isp)
+ return;
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ free_irq(udc->irq, udc);
+ kfree(udc->irqname);
+}
diff --git a/drivers/usb/isp1760/isp1760-udc.h b/drivers/usb/isp1760/isp1760-udc.h
new file mode 100644
index 000000000000..26899ed81145
--- /dev/null
+++ b/drivers/usb/isp1760/isp1760-udc.h
@@ -0,0 +1,106 @@
+/*
+ * Driver for the NXP ISP1761 device controller
+ *
+ * Copyright 2014 Ideas on Board Oy
+ *
+ * Contacts:
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ISP1760_UDC_H_
+#define _ISP1760_UDC_H_
+
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/usb/gadget.h>
+
+struct isp1760_device;
+struct isp1760_udc;
+
+enum isp1760_ctrl_state {
+ ISP1760_CTRL_SETUP, /* Waiting for a SETUP transaction */
+ ISP1760_CTRL_DATA_IN, /* Setup received, data IN stage */
+ ISP1760_CTRL_DATA_OUT, /* Setup received, data OUT stage */
+ ISP1760_CTRL_STATUS, /* 0-length request in status stage */
+};
+
+struct isp1760_ep {
+ struct isp1760_udc *udc;
+ struct usb_ep ep;
+
+ struct list_head queue;
+
+ unsigned int addr;
+ unsigned int maxpacket;
+ char name[7];
+
+ const struct usb_endpoint_descriptor *desc;
+
+ bool rx_pending;
+ bool halted;
+ bool wedged;
+};
+
+/**
+ * struct isp1760_udc - UDC state information
+ * irq: IRQ number
+ * irqname: IRQ name (as passed to request_irq)
+ * regs: Base address of the UDC registers
+ * driver: Gadget driver
+ * gadget: Gadget device
+ * lock: Protects driver, vbus_timer, ep, ep0_*, DC_EPINDEX register
+ * ep: Array of endpoints
+ * ep0_state: Control request state for endpoint 0
+ * ep0_dir: Direction of the current control request
+ * ep0_length: Length of the current control request
+ * connected: Tracks gadget driver bus connection state
+ */
+struct isp1760_udc {
+#ifdef CONFIG_USB_ISP1761_UDC
+ struct isp1760_device *isp;
+
+ int irq;
+ char *irqname;
+ void __iomem *regs;
+
+ struct usb_gadget_driver *driver;
+ struct usb_gadget gadget;
+
+ spinlock_t lock;
+ struct timer_list vbus_timer;
+
+ struct isp1760_ep ep[15];
+
+ enum isp1760_ctrl_state ep0_state;
+ u8 ep0_dir;
+ u16 ep0_length;
+
+ bool connected;
+
+ unsigned int devstatus;
+#endif
+};
+
+#ifdef CONFIG_USB_ISP1761_UDC
+int isp1760_udc_register(struct isp1760_device *isp, int irq,
+ unsigned long irqflags);
+void isp1760_udc_unregister(struct isp1760_device *isp);
+#else
+static inline int isp1760_udc_register(struct isp1760_device *isp, int irq,
+ unsigned long irqflags)
+{
+ return 0;
+}
+
+static inline void isp1760_udc_unregister(struct isp1760_device *isp)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 40ef40affe83..588d62a73e1a 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -124,12 +124,8 @@ static void async_complete(struct urb *urb)
} else if (rq->dr->bRequest == 3) {
memcpy(priv->reg, rq->reg, sizeof(priv->reg));
#if 0
- dev_dbg(&priv->usbdev->dev,
- "async_complete regs %02x %02x %02x %02x %02x %02x %02x\n",
- (unsigned int)priv->reg[0], (unsigned int)priv->reg[1],
- (unsigned int)priv->reg[2], (unsigned int)priv->reg[3],
- (unsigned int)priv->reg[4], (unsigned int)priv->reg[5],
- (unsigned int)priv->reg[6]);
+ dev_dbg(&priv->usbdev->dev, "async_complete regs %7ph\n",
+ priv->reg);
#endif
/* if nAck interrupts are enabled and we have an interrupt, call the interrupt procedure */
if (rq->reg[2] & rq->reg[1] & 0x10 && pp)
@@ -742,9 +738,7 @@ static int uss720_probe(struct usb_interface *intf,
set_1284_register(pp, 2, 0x0c, GFP_KERNEL);
/* debugging */
get_1284_register(pp, 0, &reg, GFP_KERNEL);
- dev_dbg(&intf->dev, "reg: %02x %02x %02x %02x %02x %02x %02x\n",
- priv->reg[0], priv->reg[1], priv->reg[2], priv->reg[3],
- priv->reg[4], priv->reg[5], priv->reg[6]);
+ dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
endpoint = &interface->endpoint[2];
dev_dbg(&intf->dev, "epaddr %d interval %d\n",
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 9d68372dd9aa..14e1628483d9 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -63,39 +63,48 @@ comment "Platform Glue Layer"
config USB_MUSB_DAVINCI
tristate "DaVinci"
depends on ARCH_DAVINCI_DMx
+ depends on NOP_USB_XCEIV
depends on BROKEN
config USB_MUSB_DA8XX
tristate "DA8xx/OMAP-L1x"
depends on ARCH_DAVINCI_DA8XX
+ depends on NOP_USB_XCEIV
depends on BROKEN
config USB_MUSB_TUSB6010
tristate "TUSB6010"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
- depends on ARCH_OMAP2PLUS && USB
+ depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY
select GENERIC_PHY
config USB_MUSB_AM35X
tristate "AM35x"
depends on ARCH_OMAP
+ depends on NOP_USB_XCEIV
config USB_MUSB_DSPS
tristate "TI DSPS platforms"
select USB_MUSB_AM335X_CHILD
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on OF_IRQ
config USB_MUSB_BLACKFIN
tristate "Blackfin"
depends on (BF54x && !BF544) || (BF52x && ! BF522 && !BF523)
+ depends on NOP_USB_XCEIV
config USB_MUSB_UX500
tristate "Ux500 platforms"
+ depends on ARCH_U8500 || COMPILE_TEST
config USB_MUSB_JZ4740
tristate "JZ4740"
+ depends on NOP_USB_XCEIV
depends on MACH_JZ4740 || COMPILE_TEST
depends on USB_MUSB_GADGET
depends on USB_OTG_BLACKLIST_HUB
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index a441a2de8619..6123b748d262 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -63,7 +63,7 @@ static void bfin_writew(void __iomem *addr, unsigned offset, u16 data)
bfin_write16(addr + offset, data);
}
-static void binf_writel(void __iomem *addr, unsigned offset, u32 data)
+static void bfin_writel(void __iomem *addr, unsigned offset, u32 data)
{
bfin_write16(addr + offset, (u16)data);
}
@@ -608,7 +608,7 @@ static SIMPLE_DEV_PM_OPS(bfin_pm_ops, bfin_suspend, bfin_resume);
static struct platform_driver bfin_driver = {
.probe = bfin_probe,
- .remove = __exit_p(bfin_remove),
+ .remove = bfin_remove,
.driver = {
.name = "musb-blackfin",
.pm = &bfin_pm_ops,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 34cce3e38c49..e6f4cbfeed97 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -567,6 +567,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->xceiv->otg->state = OTG_STATE_A_HOST;
musb->is_active = 1;
+ musb_host_resume_root_hub(musb);
break;
case OTG_STATE_B_WAIT_ACON:
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -2500,6 +2501,12 @@ static int musb_runtime_resume(struct device *dev)
musb_restore_context(musb);
first = 0;
+ if (musb->need_finish_resume) {
+ musb->need_finish_resume = 0;
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(20));
+ }
+
return 0;
}
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index f64fd964dc6d..be84562d021b 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -9,9 +9,9 @@
#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
-#define EP_MODE_AUTOREG_NONE 0
-#define EP_MODE_AUTOREG_ALL_NEOP 1
-#define EP_MODE_AUTOREG_ALWAYS 3
+#define EP_MODE_AUTOREQ_NONE 0
+#define EP_MODE_AUTOREQ_ALL_NEOP 1
+#define EP_MODE_AUTOREQ_ALWAYS 3
#define EP_MODE_DMA_TRANSPARENT 0
#define EP_MODE_DMA_RNDIS 1
@@ -404,19 +404,19 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
/* auto req */
cppi41_set_autoreq_mode(cppi41_channel,
- EP_MODE_AUTOREG_ALL_NEOP);
+ EP_MODE_AUTOREQ_ALL_NEOP);
} else {
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), 0);
cppi41_set_dma_mode(cppi41_channel,
EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel,
- EP_MODE_AUTOREG_NONE);
+ EP_MODE_AUTOREQ_NONE);
}
} else {
/* fallback mode */
cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
- cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
+ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
len = min_t(u32, packet_sz, len);
}
cppi41_channel->prog_len = len;
@@ -549,10 +549,15 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
} else {
+ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
+
csr = musb_readw(epio, MUSB_RXCSR);
csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
musb_writew(epio, MUSB_RXCSR, csr);
+ /* wait to drain cppi dma pipe line */
+ udelay(50);
+
csr = musb_readw(epio, MUSB_RXCSR);
if (csr & MUSB_RXCSR_RXPKTRDY) {
csr |= MUSB_RXCSR_FLUSHFIFO;
@@ -566,13 +571,14 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
tdbit <<= 16;
do {
- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ if (is_tx)
+ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
ret = dmaengine_terminate_all(cppi41_channel->dc);
} while (ret == -EAGAIN);
- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
-
if (is_tx) {
+ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_TXPKTRDY) {
csr |= MUSB_TXCSR_FLUSHFIFO;
@@ -628,9 +634,9 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
ret = of_property_read_string_index(np, "dma-names", i, &str);
if (ret)
goto err;
- if (!strncmp(str, "tx", 2))
+ if (strstarts(str, "tx"))
is_tx = 1;
- else if (!strncmp(str, "rx", 2))
+ else if (strstarts(str, "rx"))
is_tx = 0;
else {
dev_err(dev, "Wrong dmatype %s\n", str);
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index ad3701a97389..78a283e9ce40 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -59,20 +59,12 @@ static const struct musb_register_map musb_regmap[] = {
{ "RxMaxPp", MUSB_RXMAXP, 16 },
{ "RxCSR", MUSB_RXCSR, 16 },
{ "RxCount", MUSB_RXCOUNT, 16 },
- { "ConfigData", MUSB_CONFIGDATA,8 },
{ "IntrRxE", MUSB_INTRRXE, 16 },
{ "IntrTxE", MUSB_INTRTXE, 16 },
{ "IntrUsbE", MUSB_INTRUSBE, 8 },
{ "DevCtl", MUSB_DEVCTL, 8 },
- { "BabbleCtl", MUSB_BABBLE_CTL,8 },
- { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
- { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
- { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
- { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
{ "VControl", 0x68, 32 },
{ "HWVers", 0x69, 16 },
- { "EPInfo", MUSB_EPINFO, 8 },
- { "RAMInfo", MUSB_RAMINFO, 8 },
{ "LinkInfo", MUSB_LINKINFO, 8 },
{ "VPLen", MUSB_VPLEN, 8 },
{ "HS_EOF1", MUSB_HS_EOF1, 8 },
@@ -103,6 +95,16 @@ static const struct musb_register_map musb_regmap[] = {
{ "DMA_CNTLch7", 0x274, 16 },
{ "DMA_ADDRch7", 0x278, 32 },
{ "DMA_COUNTch7", 0x27C, 32 },
+#ifndef CONFIG_BLACKFIN
+ { "ConfigData", MUSB_CONFIGDATA,8 },
+ { "BabbleCtl", MUSB_BABBLE_CTL,8 },
+ { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
+ { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
+ { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
+ { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
+ { "EPInfo", MUSB_EPINFO, 8 },
+ { "RAMInfo", MUSB_RAMINFO, 8 },
+#endif
{ } /* Terminating Entry */
};
@@ -194,33 +196,33 @@ static ssize_t musb_test_mode_write(struct file *file,
memset(buf, 0x00, sizeof(buf));
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- if (!strncmp(buf, "force host", 9))
+ if (strstarts(buf, "force host"))
test = MUSB_TEST_FORCE_HOST;
- if (!strncmp(buf, "fifo access", 11))
+ if (strstarts(buf, "fifo access"))
test = MUSB_TEST_FIFO_ACCESS;
- if (!strncmp(buf, "force full-speed", 15))
+ if (strstarts(buf, "force full-speed"))
test = MUSB_TEST_FORCE_FS;
- if (!strncmp(buf, "force high-speed", 15))
+ if (strstarts(buf, "force high-speed"))
test = MUSB_TEST_FORCE_HS;
- if (!strncmp(buf, "test packet", 10)) {
+ if (strstarts(buf, "test packet")) {
test = MUSB_TEST_PACKET;
musb_load_testpacket(musb);
}
- if (!strncmp(buf, "test K", 6))
+ if (strstarts(buf, "test K"))
test = MUSB_TEST_K;
- if (!strncmp(buf, "test J", 6))
+ if (strstarts(buf, "test J"))
test = MUSB_TEST_J;
- if (!strncmp(buf, "test SE0 NAK", 12))
+ if (strstarts(buf, "test SE0 NAK"))
test = MUSB_TEST_SE0_NAK;
musb_writeb(musb->mregs, MUSB_TESTMODE, test);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 49b04cb6f5ca..b2d9040c7685 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1612,9 +1612,7 @@ done:
static int
musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
{
- struct musb *musb = gadget_to_musb(gadget);
-
- musb->is_self_powered = !!is_selfpowered;
+ gadget->is_selfpowered = !!is_selfpowered;
return 0;
}
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 2af45a0c8930..10d30afe4a3c 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -85,7 +85,7 @@ static int service_tx_status_request(
switch (recip) {
case USB_RECIP_DEVICE:
- result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
+ result[0] = musb->g.is_selfpowered << USB_DEVICE_SELF_POWERED;
result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
if (musb->g.is_otg) {
result[0] |= musb->g.b_hnp_enable
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 23d474d3d7f4..883a9adfdfff 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb)
if (musb->port_mode == MUSB_PORT_MODE_GADGET)
return;
usb_remove_hcd(musb->hcd);
- musb->hcd = NULL;
}
void musb_host_free(struct musb *musb)
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index b072420e44f5..294e159f4afe 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -72,7 +72,6 @@ void musb_host_finish_resume(struct work_struct *work)
musb->xceiv->otg->state = OTG_STATE_A_HOST;
spin_unlock_irqrestore(&musb->lock, flags);
- musb_host_resume_root_hub(musb);
}
void musb_port_suspend(struct musb *musb, bool do_suspend)
@@ -349,9 +348,9 @@ int musb_hub_control(
desc->bDescriptorType = 0x29;
desc->bNbrPorts = 1;
desc->wHubCharacteristics = cpu_to_le16(
- 0x0001 /* per-port power switching */
- | 0x0010 /* no overcurrent reporting */
- );
+ HUB_CHAR_INDV_PORT_LPSM /* per-port power switching */
+ | HUB_CHAR_NO_OCPM /* no overcurrent reporting */
+ );
desc->bPwrOn2PwrGood = 5; /* msec/2 */
desc->bHubContrCurrent = 0;
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index ab38aa32a6c1..94eb2923afed 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -107,19 +107,6 @@ static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
#define fsl_writel(val, addr) writel(val, addr)
#endif /* CONFIG_PPC32 */
-/* Routines to access transceiver ULPI registers */
-u8 view_ulpi(u8 addr)
-{
- u32 temp;
-
- temp = 0x40000000 | (addr << 16);
- fsl_writel(temp, &usb_dr_regs->ulpiview);
- udelay(1000);
- while (temp & 0x40)
- temp = fsl_readl(&usb_dr_regs->ulpiview);
- return (le32_to_cpu(temp) & 0x0000ff00) >> 8;
-}
-
int write_ulpi(u8 addr, u8 data)
{
u32 temp;
@@ -460,28 +447,6 @@ static void fsl_otg_fsm_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
fsl_otg_del_timer(fsm, timer);
}
-/*
- * Reduce timer count by 1, and find timeout conditions.
- * Called by fsl_otg 1ms timer interrupt
- */
-int fsl_otg_tick_timer(void)
-{
- struct fsl_otg_timer *tmp_timer, *del_tmp;
- int expired = 0;
-
- list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
- tmp_timer->count--;
- /* check if timer expires */
- if (!tmp_timer->count) {
- list_del(&tmp_timer->list);
- tmp_timer->function(tmp_timer->data);
- expired = 1;
- }
- }
-
- return expired;
-}
-
/* Reset controller, not reset the bus */
void otg_reset_controller(void)
{
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index f1b719b45a53..70be50b734b2 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/slab.h>
@@ -39,6 +40,10 @@
#include "phy-generic.h"
+#define VBUS_IRQ_FLAGS \
+ (IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | \
+ IRQF_ONESHOT)
+
struct platform_device *usb_phy_generic_register(void)
{
return platform_device_register_simple("usb_phy_generic",
@@ -59,19 +64,79 @@ static int nop_set_suspend(struct usb_phy *x, int suspend)
static void nop_reset_set(struct usb_phy_generic *nop, int asserted)
{
- int value;
+ if (!nop->gpiod_reset)
+ return;
+
+ gpiod_direction_output(nop->gpiod_reset, !asserted);
+ usleep_range(10000, 20000);
+ gpiod_set_value(nop->gpiod_reset, asserted);
+}
+
+/* interface to regulator framework */
+static void nop_set_vbus_draw(struct usb_phy_generic *nop, unsigned mA)
+{
+ struct regulator *vbus_draw = nop->vbus_draw;
+ int enabled;
+ int ret;
- if (!gpio_is_valid(nop->gpio_reset))
+ if (!vbus_draw)
return;
- value = asserted;
- if (nop->reset_active_low)
- value = !value;
+ enabled = nop->vbus_draw_enabled;
+ if (mA) {
+ regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
+ if (!enabled) {
+ ret = regulator_enable(vbus_draw);
+ if (ret < 0)
+ return;
+ nop->vbus_draw_enabled = 1;
+ }
+ } else {
+ if (enabled) {
+ ret = regulator_disable(vbus_draw);
+ if (ret < 0)
+ return;
+ nop->vbus_draw_enabled = 0;
+ }
+ }
+ nop->mA = mA;
+}
+
+
+static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
+{
+ struct usb_phy_generic *nop = data;
+ struct usb_otg *otg = nop->phy.otg;
+ int vbus, status;
+
+ vbus = gpiod_get_value(nop->gpiod_vbus);
+ if ((vbus ^ nop->vbus) == 0)
+ return IRQ_HANDLED;
+ nop->vbus = vbus;
+
+ if (vbus) {
+ status = USB_EVENT_VBUS;
+ otg->state = OTG_STATE_B_PERIPHERAL;
+ nop->phy.last_event = status;
+ usb_gadget_vbus_connect(otg->gadget);
+
+ /* drawing a "unit load" is *always* OK, except for OTG */
+ nop_set_vbus_draw(nop, 100);
+
+ atomic_notifier_call_chain(&nop->phy.notifier, status,
+ otg->gadget);
+ } else {
+ nop_set_vbus_draw(nop, 0);
- gpio_set_value_cansleep(nop->gpio_reset, value);
+ usb_gadget_vbus_disconnect(otg->gadget);
+ status = USB_EVENT_NONE;
+ otg->state = OTG_STATE_B_IDLE;
+ nop->phy.last_event = status;
- if (!asserted)
- usleep_range(10000, 20000);
+ atomic_notifier_call_chain(&nop->phy.notifier, status,
+ otg->gadget);
+ }
+ return IRQ_HANDLED;
}
int usb_gen_phy_init(struct usb_phy *phy)
@@ -143,36 +208,47 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
struct usb_phy_generic_platform_data *pdata)
{
enum usb_phy_type type = USB_PHY_TYPE_USB2;
- int err;
+ int err = 0;
u32 clk_rate = 0;
bool needs_vcc = false;
- nop->reset_active_low = true; /* default behaviour */
-
if (dev->of_node) {
struct device_node *node = dev->of_node;
- enum of_gpio_flags flags = 0;
if (of_property_read_u32(node, "clock-frequency", &clk_rate))
clk_rate = 0;
needs_vcc = of_property_read_bool(node, "vcc-supply");
- nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios",
- 0, &flags);
- if (nop->gpio_reset == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
-
+ nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset");
+ err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
+ if (!err) {
+ nop->gpiod_vbus = devm_gpiod_get_optional(dev,
+ "vbus-detect");
+ err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
+ }
} else if (pdata) {
type = pdata->type;
clk_rate = pdata->clk_rate;
needs_vcc = pdata->needs_vcc;
- nop->gpio_reset = pdata->gpio_reset;
- } else {
- nop->gpio_reset = -1;
+ if (gpio_is_valid(pdata->gpio_reset)) {
+ err = devm_gpio_request_one(dev, pdata->gpio_reset, 0,
+ dev_name(dev));
+ if (!err)
+ nop->gpiod_reset =
+ gpio_to_desc(pdata->gpio_reset);
+ }
+ nop->gpiod_vbus = pdata->gpiod_vbus;
+ }
+
+ if (err == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (err) {
+ dev_err(dev, "Error requesting RESET or VBUS GPIO\n");
+ return err;
}
+ if (nop->gpiod_reset)
+ gpiod_direction_output(nop->gpiod_reset, 1);
nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg),
GFP_KERNEL);
@@ -201,24 +277,6 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
return -EPROBE_DEFER;
}
- if (gpio_is_valid(nop->gpio_reset)) {
- unsigned long gpio_flags;
-
- /* Assert RESET */
- if (nop->reset_active_low)
- gpio_flags = GPIOF_OUT_INIT_LOW;
- else
- gpio_flags = GPIOF_OUT_INIT_HIGH;
-
- err = devm_gpio_request_one(dev, nop->gpio_reset,
- gpio_flags, dev_name(dev));
- if (err) {
- dev_err(dev, "Error requesting RESET GPIO %d\n",
- nop->gpio_reset);
- return err;
- }
- }
-
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
@@ -247,6 +305,18 @@ static int usb_phy_generic_probe(struct platform_device *pdev)
err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
if (err)
return err;
+ if (nop->gpiod_vbus) {
+ err = devm_request_threaded_irq(&pdev->dev,
+ gpiod_to_irq(nop->gpiod_vbus),
+ NULL, nop_gpio_vbus_thread,
+ VBUS_IRQ_FLAGS, "vbus_detect",
+ nop);
+ if (err) {
+ dev_err(&pdev->dev, "can't request irq %i, err: %d\n",
+ gpiod_to_irq(nop->gpiod_vbus), err);
+ return err;
+ }
+ }
nop->phy.init = usb_gen_phy_init;
nop->phy.shutdown = usb_gen_phy_shutdown;
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index d8feacc0b7fb..0d0eadd54ed9 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -2,14 +2,20 @@
#define _PHY_GENERIC_H_
#include <linux/usb/usb_phy_generic.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
struct usb_phy_generic {
struct usb_phy phy;
struct device *dev;
struct clk *clk;
struct regulator *vcc;
- int gpio_reset;
- bool reset_active_low;
+ struct gpio_desc *gpiod_reset;
+ struct gpio_desc *gpiod_vbus;
+ struct regulator *vbus_draw;
+ bool vbus_draw_enabled;
+ unsigned long mA;
+ unsigned int vbus;
};
int usb_gen_phy_init(struct usb_phy *phy);
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 699e38c73d82..697a741a0cb1 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -338,7 +338,6 @@ static void mv_otg_update_inputs(struct mv_otg *mvotg)
static void mv_otg_update_state(struct mv_otg *mvotg)
{
struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
- struct usb_phy *phy = &mvotg->phy;
int old_state = mvotg->phy.otg->state;
switch (old_state) {
@@ -858,10 +857,10 @@ static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
- if (mvotg->phy.state != OTG_STATE_B_IDLE) {
+ if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) {
dev_info(&pdev->dev,
"OTG state is not B_IDLE, it is %d!\n",
- mvotg->phy.state);
+ mvotg->phy.otg->state);
return -EAGAIN;
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index b9589f663683..8f7cb068d29b 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -40,6 +40,7 @@
#define BM_USBPHY_CTRL_SFTRST BIT(31)
#define BM_USBPHY_CTRL_CLKGATE BIT(30)
+#define BM_USBPHY_CTRL_OTG_ID_VALUE BIT(27)
#define BM_USBPHY_CTRL_ENAUTOSET_USBCLKS BIT(26)
#define BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE BIT(25)
#define BM_USBPHY_CTRL_ENVBUSCHG_WKUP BIT(23)
@@ -69,6 +70,9 @@
#define ANADIG_USB2_LOOPBACK_SET 0x244
#define ANADIG_USB2_LOOPBACK_CLR 0x248
+#define ANADIG_USB1_MISC 0x1f0
+#define ANADIG_USB2_MISC 0x250
+
#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG BIT(12)
#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL BIT(11)
@@ -80,6 +84,11 @@
#define BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 BIT(2)
#define BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN BIT(5)
+#define BM_ANADIG_USB1_MISC_RX_VPIN_FS BIT(29)
+#define BM_ANADIG_USB1_MISC_RX_VMIN_FS BIT(28)
+#define BM_ANADIG_USB2_MISC_RX_VPIN_FS BIT(29)
+#define BM_ANADIG_USB2_MISC_RX_VMIN_FS BIT(28)
+
#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
/* Do disconnection between PHY and controller without vbus */
@@ -131,8 +140,7 @@ static const struct mxs_phy_data vf610_phy_data = {
};
static const struct mxs_phy_data imx6sx_phy_data = {
- .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
- MXS_PHY_NEED_IP_FIX,
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
};
static const struct of_device_id mxs_phy_dt_ids[] = {
@@ -256,6 +264,18 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
usleep_range(500, 1000);
}
+static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
+{
+ void __iomem *base = mxs_phy->phy.io_priv;
+ u32 phyctrl = readl(base + HW_USBPHY_CTRL);
+
+ if (IS_ENABLED(CONFIG_USB_OTG) &&
+ !(phyctrl & BM_USBPHY_CTRL_OTG_ID_VALUE))
+ return true;
+
+ return false;
+}
+
static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
{
bool vbus_is_on = false;
@@ -270,7 +290,7 @@ static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
- if (on && !vbus_is_on)
+ if (on && !vbus_is_on && !mxs_phy_is_otg_host(mxs_phy))
__mxs_phy_disconnect_line(mxs_phy, true);
else
__mxs_phy_disconnect_line(mxs_phy, false);
@@ -293,6 +313,17 @@ static int mxs_phy_init(struct usb_phy *phy)
static void mxs_phy_shutdown(struct usb_phy *phy)
{
struct mxs_phy *mxs_phy = to_mxs_phy(phy);
+ u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP |
+ BM_USBPHY_CTRL_ENDPDMCHG_WKUP |
+ BM_USBPHY_CTRL_ENIDCHG_WKUP |
+ BM_USBPHY_CTRL_ENAUTOSET_USBCLKS |
+ BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE |
+ BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD |
+ BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE |
+ BM_USBPHY_CTRL_ENAUTO_PWRON_PLL;
+
+ writel(value, phy->io_priv + HW_USBPHY_CTRL_CLR);
+ writel(0xffffffff, phy->io_priv + HW_USBPHY_PWD);
writel(BM_USBPHY_CTRL_CLKGATE,
phy->io_priv + HW_USBPHY_CTRL_SET);
@@ -300,13 +331,56 @@ static void mxs_phy_shutdown(struct usb_phy *phy)
clk_disable_unprepare(mxs_phy->clk);
}
+static bool mxs_phy_is_low_speed_connection(struct mxs_phy *mxs_phy)
+{
+ unsigned int line_state;
+ /* bit definition is the same for all controllers */
+ unsigned int dp_bit = BM_ANADIG_USB1_MISC_RX_VPIN_FS,
+ dm_bit = BM_ANADIG_USB1_MISC_RX_VMIN_FS;
+ unsigned int reg = ANADIG_USB1_MISC;
+
+ /* If the SoCs don't have anatop, quit */
+ if (!mxs_phy->regmap_anatop)
+ return false;
+
+ if (mxs_phy->port_id == 0)
+ reg = ANADIG_USB1_MISC;
+ else if (mxs_phy->port_id == 1)
+ reg = ANADIG_USB2_MISC;
+
+ regmap_read(mxs_phy->regmap_anatop, reg, &line_state);
+
+ if ((line_state & (dp_bit | dm_bit)) == dm_bit)
+ return true;
+ else
+ return false;
+}
+
static int mxs_phy_suspend(struct usb_phy *x, int suspend)
{
int ret;
struct mxs_phy *mxs_phy = to_mxs_phy(x);
+ bool low_speed_connection, vbus_is_on;
+
+ low_speed_connection = mxs_phy_is_low_speed_connection(mxs_phy);
+ vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
if (suspend) {
- writel(0xffffffff, x->io_priv + HW_USBPHY_PWD);
+ /*
+ * FIXME: Do not power down RXPWD1PT1 bit for low speed
+ * connect. The low speed connection will have problem at
+ * very rare cases during usb suspend and resume process.
+ */
+ if (low_speed_connection & vbus_is_on) {
+ /*
+ * If value to be set as pwd value is not 0xffffffff,
+ * several 32Khz cycles are needed.
+ */
+ mxs_phy_clock_switch_delay();
+ writel(0xffbfffff, x->io_priv + HW_USBPHY_PWD);
+ } else {
+ writel(0xffffffff, x->io_priv + HW_USBPHY_PWD);
+ }
writel(BM_USBPHY_CTRL_CLKGATE,
x->io_priv + HW_USBPHY_CTRL_SET);
clk_disable_unprepare(mxs_phy->clk);
@@ -359,7 +433,9 @@ static int mxs_phy_on_disconnect(struct usb_phy *phy,
dev_dbg(phy->dev, "%s device has disconnected\n",
(speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
- if (speed == USB_SPEED_HIGH)
+ /* Sometimes, the speed is not high speed when the error occurs */
+ if (readl(phy->io_priv + HW_USBPHY_CTRL) &
+ BM_USBPHY_CTRL_ENHOSTDISCONDETECT)
writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
phy->io_priv + HW_USBPHY_CTRL_CLR);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index b4066a001ba0..2f9735b35338 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -59,6 +59,9 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
{
struct usb_phy *phy;
+ if (!of_device_is_available(node))
+ return ERR_PTR(-ENODEV);
+
list_for_each_entry(phy, &phy_list, head) {
if (node != phy->dev->of_node)
continue;
@@ -66,7 +69,7 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
return phy;
}
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EPROBE_DEFER);
}
static void devm_usb_phy_release(struct device *dev, void *res)
@@ -190,10 +193,13 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
spin_lock_irqsave(&phy_lock, flags);
phy = __of_usb_find_phy(node);
- if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- if (!IS_ERR(phy))
- phy = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(phy)) {
+ devres_free(ptr);
+ goto err1;
+ }
+ if (!try_module_get(phy->dev->driver->owner)) {
+ phy = ERR_PTR(-ENODEV);
devres_free(ptr);
goto err1;
}
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 371478704899..4cf77d3c3bd2 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -363,6 +363,7 @@ static void usbhsc_hotplug(struct usbhs_priv *priv)
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
int id;
int enable;
+ int cable;
int ret;
/*
@@ -376,6 +377,16 @@ static void usbhsc_hotplug(struct usbhs_priv *priv)
id = usbhs_platform_call(priv, get_id, pdev);
if (enable && !mod) {
+ if (priv->edev) {
+ cable = extcon_get_cable_state(priv->edev, "USB-HOST");
+ if ((cable > 0 && id != USBHS_HOST) ||
+ (!cable && id != USBHS_GADGET)) {
+ dev_info(&pdev->dev,
+ "USB cable plugged in doesn't match the selected role!\n");
+ return;
+ }
+ }
+
ret = usbhs_mod_change(priv, id);
if (ret < 0)
return;
@@ -514,6 +525,12 @@ static int usbhs_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ if (of_property_read_bool(pdev->dev.of_node, "extcon")) {
+ priv->edev = extcon_get_edev_by_phandle(&pdev->dev, 0);
+ if (IS_ERR(priv->edev))
+ return PTR_ERR(priv->edev);
+ }
+
/*
* care platform info
*/
@@ -615,7 +632,7 @@ static int usbhs_probe(struct platform_device *pdev)
*/
ret = usbhs_platform_call(priv, hardware_init, pdev);
if (ret < 0) {
- dev_err(&pdev->dev, "platform prove failed.\n");
+ dev_err(&pdev->dev, "platform init failed.\n");
goto probe_end_mod_exit;
}
@@ -632,16 +649,12 @@ static int usbhs_probe(struct platform_device *pdev)
/*
* manual call notify_hotplug for cold plug
*/
- ret = usbhsc_drvcllbck_notify_hotplug(pdev);
- if (ret < 0)
- goto probe_end_call_remove;
+ usbhsc_drvcllbck_notify_hotplug(pdev);
dev_info(&pdev->dev, "probed\n");
return ret;
-probe_end_call_remove:
- usbhs_platform_call(priv, hardware_exit, pdev);
probe_end_mod_exit:
usbhs_mod_remove(priv);
probe_end_fifo_exit:
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 0427cdd1a483..fc96e924edc4 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -17,6 +17,7 @@
#ifndef RENESAS_USB_DRIVER_H
#define RENESAS_USB_DRIVER_H
+#include <linux/extcon.h>
#include <linux/platform_device.h>
#include <linux/usb/renesas_usbhs.h>
@@ -254,6 +255,8 @@ struct usbhs_priv {
struct delayed_work notify_hotplug_work;
struct platform_device *pdev;
+ struct extcon_dev *edev;
+
spinlock_t lock;
u32 flags;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index f46271ce1b15..d891bff39d66 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1054,10 +1054,8 @@ static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
fifo->rx_chan = NULL;
}
-static void usbhsf_dma_init(struct usbhs_priv *priv,
- struct usbhs_fifo *fifo)
+static void usbhsf_dma_init_pdev(struct usbhs_fifo *fifo)
{
- struct device *dev = usbhs_priv_to_dev(priv);
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -1069,6 +1067,27 @@ static void usbhsf_dma_init(struct usbhs_priv *priv,
dma_cap_set(DMA_SLAVE, mask);
fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
&fifo->rx_slave);
+}
+
+static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo)
+{
+ fifo->tx_chan = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(fifo->tx_chan))
+ fifo->tx_chan = NULL;
+ fifo->rx_chan = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(fifo->rx_chan))
+ fifo->rx_chan = NULL;
+}
+
+static void usbhsf_dma_init(struct usbhs_priv *priv,
+ struct usbhs_fifo *fifo)
+{
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ if (dev->of_node)
+ usbhsf_dma_init_dt(dev, fifo);
+ else
+ usbhsf_dma_init_pdev(fifo);
if (fifo->tx_chan || fifo->rx_chan)
dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 8697e6efcabf..e0384af77e56 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -926,6 +926,8 @@ static int usbhsg_set_selfpowered(struct usb_gadget *gadget, int is_self)
else
usbhsg_status_clr(gpriv, USBHSG_STATUS_SELF_POWERED);
+ gadget->is_selfpowered = (is_self != 0);
+
return 0;
}
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index f0d323125871..96eead619282 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1234,7 +1234,8 @@ static int __usbhsh_hub_get_status(struct usbhsh_hpriv *hpriv,
desc->bNbrPorts = roothub_id;
desc->bDescLength = 9;
desc->bPwrOn2PwrGood = 0;
- desc->wHubCharacteristics = cpu_to_le16(0x0011);
+ desc->wHubCharacteristics =
+ cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_NO_OCPM);
desc->u.hs.DeviceRemovable[0] = (roothub_id << 1);
desc->u.hs.DeviceRemovable[1] = ~0;
dev_dbg(dev, "%s :: GetHubDescriptor\n", __func__);
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 8d7fc48b1f30..29fa1c3d0089 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -46,6 +46,8 @@ static struct console usbcons;
* ------------------------------------------------------------
*/
+static const struct tty_operations usb_console_fake_tty_ops = {
+};
/*
* The parsing of the command line works exactly like the
@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
goto reset_open_count;
}
kref_init(&tty->kref);
- tty_port_tty_set(&port->port, tty);
tty->driver = usb_serial_tty_driver;
tty->index = co->index;
+ init_ldsem(&tty->ldisc_sem);
+ INIT_LIST_HEAD(&tty->tty_files);
+ kref_get(&tty->driver->kref);
+ tty->ops = &usb_console_fake_tty_ops;
if (tty_init_termios(tty)) {
retval = -ENOMEM;
- goto free_tty;
+ goto put_tty;
}
+ tty_port_tty_set(&port->port, tty);
}
/* only call the device specific open if this
@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
- kfree(tty);
+ tty_kref_put(tty);
}
set_bit(ASYNCB_INITIALIZED, &port->port.flags);
}
@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
fail:
tty_port_tty_set(&port->port, NULL);
- free_tty:
- kfree(tty);
+ put_tty:
+ tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
usb_autopm_put_interface(serial->interface);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 6c4eb3cf5efd..f40c856ff758 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
@@ -120,10 +121,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
- { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
+ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
+ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 1bd192290b08..ccf1df7c4b80 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -286,7 +286,7 @@ static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
res = usb_submit_urb(port->read_urbs[index], mem_flags);
if (res) {
- if (res != -EPERM) {
+ if (res != -EPERM && res != -ENODEV) {
dev_err(&port->dev,
"%s - usb_submit_urb failed: %d\n",
__func__, res);
@@ -373,7 +373,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
__func__, urb->status);
return;
default:
- dev_err(&port->dev, "%s - nonzero urb status: %d\n",
+ dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
goto resubmit;
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 077c714f1285..e07b15ed5814 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
}
port = serial->port[msg->portNumber];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
port = serial->port[0];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 220b4be89641..e4473a9109cf 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1309,35 +1309,6 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *current_position = data;
unsigned char *data1;
-#ifdef NOTMOS7840
- Data = 0x00;
- status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
- mos7840_port->shadowLCR = Data;
- dev_dbg(&port->dev, "%s: LINE_CONTROL_REGISTER is %x\n", __func__, Data);
- dev_dbg(&port->dev, "%s: mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR);
-
- /* Data = 0x03; */
- /* status = mos7840_set_uart_reg(port,LINE_CONTROL_REGISTER,Data); */
- /* mos7840_port->shadowLCR=Data;//Need to add later */
-
- Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */
- status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
-
- /* Data = 0x0c; */
- /* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */
- Data = 0x00;
- status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data);
- dev_dbg(&port->dev, "%s: DLL value is %x\n", __func__, Data);
-
- Data = 0x0;
- status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data);
- dev_dbg(&port->dev, "%s: DLM value is %x\n", __func__, Data);
-
- Data = Data & ~SERIAL_LCR_DLAB;
- dev_dbg(&port->dev, "%s: mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR);
- status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
-#endif
-
if (mos7840_port_paranoia_check(port, __func__))
return -1;
@@ -1614,37 +1585,6 @@ static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port,
*clk_sel_val = 0x70;
}
return 0;
-
-#ifdef NOTMCS7840
-
- for (i = 0; i < ARRAY_SIZE(mos7840_divisor_table); i++) {
- if (mos7840_divisor_table[i].BaudRate == baudrate) {
- *divisor = mos7840_divisor_table[i].Divisor;
- return 0;
- }
- }
-
- /* After trying for all the standard baud rates *
- * Try calculating the divisor for this baud rate */
-
- if (baudrate > 75 && baudrate < 230400) {
- /* get the divisor */
- custom = (__u16) (230400L / baudrate);
-
- /* Check for round off */
- round1 = (__u16) (2304000L / baudrate);
- round = (__u16) (round1 - (custom * 10));
- if (round > 4)
- custom++;
- *divisor = custom;
-
- dev_dbg(&port->dev, " Baud %d = %d\n", baudrate, custom);
- return 0;
- }
-
- dev_dbg(&port->dev, "%s", " Baud calculation Failed...\n");
- return -1;
-#endif
}
/*****************************************************************************
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7a4c21b4f676..f0c0c53359ad 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
#define QUALCOMM_VENDOR_ID 0x05C6
+#define SIERRA_VENDOR_ID 0x1199
+
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
#define CMOTECH_PRODUCT_CMU_300 0x6002
@@ -505,18 +507,10 @@ static void option_instat_callback(struct urb *urb);
#define VIATELECOM_VENDOR_ID 0x15eb
#define VIATELECOM_PRODUCT_CDS7 0x0001
-/* some devices interfaces need special handling due to a number of reasons */
-enum option_blacklist_reason {
- OPTION_BLACKLIST_NONE = 0,
- OPTION_BLACKLIST_SENDSETUP = 1,
- OPTION_BLACKLIST_RESERVED_IF = 2
-};
-
-#define MAX_BL_NUM 8
struct option_blacklist_info {
- /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
+ /* bitmask of interface numbers blacklisted for send_setup */
const unsigned long sendsetup;
- /* bitfield of interface numbers for OPTION_BLACKLIST_RESERVED_IF */
+ /* bitmask of interface numbers that are reserved */
const unsigned long reserved;
};
@@ -601,6 +595,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info sierra_mc73xx_blacklist = {
+ .sendsetup = BIT(0) | BIT(2),
+ .reserved = BIT(8) | BIT(10) | BIT(11),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1098,6 +1097,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
+ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1813,36 +1814,13 @@ struct option_private {
module_usb_serial_driver(serial_drivers, option_ids);
-static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
- const struct option_blacklist_info *blacklist)
-{
- unsigned long num;
- const unsigned long *intf_list;
-
- if (blacklist) {
- if (reason == OPTION_BLACKLIST_SENDSETUP)
- intf_list = &blacklist->sendsetup;
- else if (reason == OPTION_BLACKLIST_RESERVED_IF)
- intf_list = &blacklist->reserved;
- else {
- BUG_ON(reason);
- return false;
- }
-
- for_each_set_bit(num, intf_list, MAX_BL_NUM + 1) {
- if (num == ifnum)
- return true;
- }
- }
- return false;
-}
-
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
struct usb_interface_descriptor *iface_desc =
&serial->interface->cur_altsetting->desc;
struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
+ const struct option_blacklist_info *blacklist;
/* Never bind to the CD-Rom emulation interface */
if (iface_desc->bInterfaceClass == 0x08)
@@ -1853,10 +1831,9 @@ static int option_probe(struct usb_serial *serial,
* the same class/subclass/protocol as the serial interfaces. Look at
* the Windows driver .INF files for reserved interface numbers.
*/
- if (is_blacklisted(
- iface_desc->bInterfaceNumber,
- OPTION_BLACKLIST_RESERVED_IF,
- (const struct option_blacklist_info *) id->driver_info))
+ blacklist = (void *)id->driver_info;
+ if (blacklist && test_bit(iface_desc->bInterfaceNumber,
+ &blacklist->reserved))
return -ENODEV;
/*
* Don't bind network interface on Samsung GT-B3730, it is handled by
@@ -1867,8 +1844,8 @@ static int option_probe(struct usb_serial *serial,
iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
return -ENODEV;
- /* Store device id so we can use it during attach. */
- usb_set_serial_data(serial, (void *)id);
+ /* Store the blacklist info so we can use it during attach. */
+ usb_set_serial_data(serial, (void *)blacklist);
return 0;
}
@@ -1876,7 +1853,7 @@ static int option_probe(struct usb_serial *serial,
static int option_attach(struct usb_serial *serial)
{
struct usb_interface_descriptor *iface_desc;
- const struct usb_device_id *id;
+ const struct option_blacklist_info *blacklist;
struct usb_wwan_intf_private *data;
struct option_private *priv;
@@ -1890,16 +1867,16 @@ static int option_attach(struct usb_serial *serial)
return -ENOMEM;
}
- /* Retrieve device id stored at probe. */
- id = usb_get_serial_data(serial);
+ /* Retrieve blacklist info stored at probe. */
+ blacklist = usb_get_serial_data(serial);
+
iface_desc = &serial->interface->cur_altsetting->desc;
priv->bInterfaceNumber = iface_desc->bInterfaceNumber;
data->private = priv;
- if (!is_blacklisted(iface_desc->bInterfaceNumber,
- OPTION_BLACKLIST_SENDSETUP,
- (struct option_blacklist_info *)id->driver_info)) {
+ if (!blacklist || !test_bit(iface_desc->bInterfaceNumber,
+ &blacklist->sendsetup)) {
data->send_setup = option_send_setup;
}
spin_lock_init(&data->susp_lock);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index cb3e14780a7e..9c63897b3a56 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
{DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
{DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
- {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 8a6f371ed6e7..9893d696fc97 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -69,16 +69,39 @@ static int uas_use_uas_driver(struct usb_interface *intf,
return 0;
/*
- * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
- * broken on the ASM1051, use the number of streams to differentiate.
- * New ASM1053-s also support 32 streams, but have a different prod-id.
+ * ASMedia has a number of usb3 to sata bridge chips, at the time of
+ * this writing the following versions exist:
+ * ASM1051 - no uas support version
+ * ASM1051 - with broken (*) uas support
+ * ASM1053 - with working uas support
+ * ASM1153 - with working uas support
+ *
+ * Devices with these chips re-use a number of device-ids over the
+ * entire line, so the device-id is useless to determine if we're
+ * dealing with an ASM1051 (which we want to avoid).
+ *
+ * The ASM1153 can be identified by config.MaxPower == 0,
+ * where as the ASM105x models have config.MaxPower == 36.
+ *
+ * Differentiating between the ASM1053 and ASM1051 is trickier, when
+ * connected over USB-3 we can look at the number of streams supported,
+ * ASM1051 supports 32 streams, where as early ASM1053 versions support
+ * 16 streams, newer ASM1053-s also support 32 streams, but have a
+ * different prod-id.
+ *
+ * (*) ASM1051 chips do work with UAS with some disks (with the
+ * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks
*/
if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
- le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
- if (udev->speed < USB_SPEED_SUPER) {
+ (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 ||
+ le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) {
+ if (udev->actconfig->desc.bMaxPower == 0) {
+ /* ASM1153, do nothing */
+ } else if (udev->speed < USB_SPEED_SUPER) {
/* No streams info, assume ASM1051 */
flags |= US_FL_IGNORE_UAS;
} else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+ /* Possibly an ASM1051, disable uas */
flags |= US_FL_IGNORE_UAS;
}
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 11c7a9676441..d684b4b8108f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100,
UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999,
"SCM Microsystems",
"eUSB SCSI Adapter (Bus Powered)",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+ USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
+UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
* and Mac USB Dock USB-SCSI */
UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 18a283d6de1c..dbc00e56c7f5 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -40,6 +40,16 @@
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
+/*
+ * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+ * commands in UAS mode. Observed with the 1.28 firmware; are there others?
+ */
+UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
+ "Apricorn",
+ "",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999,
"Seagate",
@@ -68,6 +78,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Marcin ZajÄ…czkowski <mszpak@wp.pl> */
+UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
+ "Seagate",
+ "Backup Plus",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
+ "Seagate",
+ "Backup Plus Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
"Seagate",
@@ -82,6 +106,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */
+UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+ "Seagate",
+ "BUP Fast HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
"JMicron",
@@ -89,14 +120,6 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
-/* Most ASM1051 based devices have issues with uas, blacklist them all */
-/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
-UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
- "ASMedia",
- "ASM1051",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_IGNORE_UAS),
-
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
@@ -104,9 +127,23 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
+ "JMicron",
+ "JMS566",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
"Hitachi",
"External HDD",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+
+/* Reported-by: Richard Henderson <rth@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+ "SimpleTech",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 1ae9d40f96bf..11f6f61c2381 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -218,7 +218,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
- desc->wHubCharacteristics = (__constant_cpu_to_le16(0x0001));
+ desc->wHubCharacteristics = __constant_cpu_to_le16(
+ HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = VHCI_NPORTS;
desc->u.hs.DeviceRemovable[0] = 0xff;
desc->u.hs.DeviceRemovable[1] = 0xff;
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
index d5efd0f07d2b..7b1b2e2fb673 100644
--- a/drivers/usb/wusbcore/reservation.c
+++ b/drivers/usb/wusbcore/reservation.c
@@ -49,14 +49,13 @@ static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
struct wusbhc *wusbhc = rsv->pal_priv;
struct device *dev = wusbhc->dev;
struct uwb_mas_bm mas;
- char buf[72];
dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state);
switch (rsv->state) {
case UWB_RSV_STATE_O_ESTABLISHED:
uwb_rsv_get_usable_mas(rsv, &mas);
- bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
- dev_dbg(dev, "established reservation: %s\n", buf);
+ dev_dbg(dev, "established reservation: %*pb\n",
+ UWB_NUM_MAS, mas.bm);
wusbhc_bwa_set(wusbhc, rsv->stream, &mas);
break;
case UWB_RSV_STATE_NONE:
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index fe8bc777ab88..aa5af817f31c 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -185,9 +185,9 @@ static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
descr->bDescriptorType = 0x29; /* HUB type */
descr->bNbrPorts = wusbhc->ports_max;
descr->wHubCharacteristics = cpu_to_le16(
- 0x00 /* All ports power at once */
+ HUB_CHAR_COMMON_LPSM /* All ports power at once */
| 0x00 /* not part of compound device */
- | 0x10 /* No overcurrent protection */
+ | HUB_CHAR_NO_OCPM /* No overcurrent protection */
| 0x00 /* 8 FS think time FIXME ?? */
| 0x00); /* No port indicators */
descr->bPwrOn2PwrGood = 0;
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index a80c5d284b59..c7ecdbe19a32 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -496,10 +496,9 @@ void wa_rpipes_destroy(struct wahc *wa)
struct device *dev = &wa->usb_iface->dev;
if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
- char buf[256];
WARN_ON(1);
- bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes);
- dev_err(dev, "BUG: pipes not released on exit: %s\n", buf);
+ dev_err(dev, "BUG: pipes not released on exit: %*pb\n",
+ wa->rpipes, wa->rpipe_bm);
}
kfree(wa->rpipe_bm);
}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 3e1ba51d1a43..94f401ab859f 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -496,11 +496,8 @@ static void __exit wusbcore_exit(void)
{
clear_bit(0, wusb_cluster_id_table);
if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
- char buf[256];
- bitmap_scnprintf(buf, sizeof(buf), wusb_cluster_id_table,
- CLUSTER_IDS);
- printk(KERN_ERR "BUG: WUSB Cluster IDs not released "
- "on exit: %s\n", buf);
+ printk(KERN_ERR "BUG: WUSB Cluster IDs not released on exit: %*pb\n",
+ CLUSTER_IDS, wusb_cluster_id_table);
WARN_ON(1);
}
usb_unregister_notify(&wusb_usb_notifier);
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
index 05b7bd762254..8fc1b787dced 100644
--- a/drivers/uwb/drp.c
+++ b/drivers/uwb/drp.c
@@ -619,11 +619,9 @@ static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_i
struct device *dev = &rc->uwb_dev.dev;
struct uwb_mas_bm mas;
struct uwb_cnflt_alien *cnflt;
- char buf[72];
unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
uwb_drp_ie_to_bm(&mas, drp_ie);
- bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
index 8c7cfab5cee3..72033589db19 100644
--- a/drivers/uwb/lc-dev.c
+++ b/drivers/uwb/lc-dev.c
@@ -43,13 +43,6 @@ static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr)
memset(&addr->data, 0xff, sizeof(addr->data));
}
-/* @returns !0 if a device @addr is a broadcast address */
-static inline int uwb_dev_addr_bcast(const struct uwb_dev_addr *addr)
-{
- static const struct uwb_dev_addr bcast = { .data = { 0xff, 0xff } };
- return !uwb_dev_addr_cmp(addr, &bcast);
-}
-
/*
* Add callback @new to be called when an event occurs in @rc.
*/
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
index 6ec45beb7af5..0b1e5a9449b5 100644
--- a/drivers/uwb/uwb-debug.c
+++ b/drivers/uwb/uwb-debug.c
@@ -217,7 +217,6 @@ static int reservations_print(struct seq_file *s, void *p)
struct uwb_dev_addr devaddr;
char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
bool is_owner;
- char buf[72];
uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
if (rsv->target.type == UWB_RSV_TARGET_DEV) {
@@ -234,8 +233,7 @@ static int reservations_print(struct seq_file *s, void *p)
owner, target, uwb_rsv_state_str(rsv->state));
seq_printf(s, " stream: %d type: %s\n",
rsv->stream, uwb_rsv_type_str(rsv->type));
- bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
- seq_printf(s, " %s\n", buf);
+ seq_printf(s, " %*pb\n", UWB_NUM_MAS, rsv->mas.bm);
}
mutex_unlock(&rc->rsvs_mutex);
@@ -259,14 +257,10 @@ static const struct file_operations reservations_fops = {
static int drp_avail_print(struct seq_file *s, void *p)
{
struct uwb_rc *rc = s->private;
- char buf[72];
-
- bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.global, UWB_NUM_MAS);
- seq_printf(s, "global: %s\n", buf);
- bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.local, UWB_NUM_MAS);
- seq_printf(s, "local: %s\n", buf);
- bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.pending, UWB_NUM_MAS);
- seq_printf(s, "pending: %s\n", buf);
+
+ seq_printf(s, "global: %*pb\n", UWB_NUM_MAS, rc->drp_avail.global);
+ seq_printf(s, "local: %*pb\n", UWB_NUM_MAS, rc->drp_avail.local);
+ seq_printf(s, "pending: %*pb\n", UWB_NUM_MAS, rc->drp_avail.pending);
return 0;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d415d69dc237..8dccca9013ed 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -84,10 +84,6 @@ struct vhost_net_ubuf_ref {
struct vhost_net_virtqueue {
struct vhost_virtqueue vq;
- /* hdr is used to store the virtio header.
- * Since each iovec has >= 1 byte length, we never need more than
- * header length entries to store the header. */
- struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
size_t vhost_hlen;
size_t sock_hlen;
/* vhost zerocopy support fields below: */
@@ -235,44 +231,6 @@ static bool vhost_sock_zcopy(struct socket *sock)
sock_flag(sock->sk, SOCK_ZEROCOPY);
}
-/* Pop first len bytes from iovec. Return number of segments used. */
-static int move_iovec_hdr(struct iovec *from, struct iovec *to,
- size_t len, int iov_count)
-{
- int seg = 0;
- size_t size;
-
- while (len && seg < iov_count) {
- size = min(from->iov_len, len);
- to->iov_base = from->iov_base;
- to->iov_len = size;
- from->iov_len -= size;
- from->iov_base += size;
- len -= size;
- ++from;
- ++to;
- ++seg;
- }
- return seg;
-}
-/* Copy iovec entries for len bytes from iovec. */
-static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
- size_t len, int iovcount)
-{
- int seg = 0;
- size_t size;
-
- while (len && seg < iovcount) {
- size = min(from->iov_len, len);
- to->iov_base = from->iov_base;
- to->iov_len = size;
- len -= size;
- ++from;
- ++to;
- ++seg;
- }
-}
-
/* In case of DMA done not in order in lower device driver for some reason.
* upend_idx is used to track end of used idx, done_idx is used to track head
* of used idx. Once lower device DMA done contiguously, we will signal KVM
@@ -336,7 +294,7 @@ static void handle_tx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
- unsigned out, in, s;
+ unsigned out, in;
int head;
struct msghdr msg = {
.msg_name = NULL,
@@ -395,16 +353,17 @@ static void handle_tx(struct vhost_net *net)
break;
}
/* Skip header. TODO: support TSO. */
- s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
len = iov_length(vq->iov, out);
iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
+ iov_iter_advance(&msg.msg_iter, hdr_size);
/* Sanity check */
- if (!len) {
+ if (!iov_iter_count(&msg.msg_iter)) {
vq_err(vq, "Unexpected header len for TX: "
"%zd expected %zd\n",
- iov_length(nvq->hdr, s), hdr_size);
+ len, hdr_size);
break;
}
+ len = iov_iter_count(&msg.msg_iter);
zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
&& (nvq->upend_idx + 1) % UIO_MAXIOV !=
@@ -469,7 +428,7 @@ static int peek_head_len(struct sock *sk)
head = skb_peek(&sk->sk_receive_queue);
if (likely(head)) {
len = head->len;
- if (vlan_tx_tag_present(head))
+ if (skb_vlan_tag_present(head))
len += VLAN_HLEN;
}
@@ -579,6 +538,7 @@ static void handle_rx(struct vhost_net *net)
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock;
+ struct iov_iter fixup;
mutex_lock(&vq->mutex);
sock = vq->private_data;
@@ -623,14 +583,19 @@ static void handle_rx(struct vhost_net *net)
break;
}
/* We don't need to be notified again. */
- if (unlikely((vhost_hlen)))
- /* Skip header. TODO: support TSO. */
- move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
- else
- /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
- * needed because recvmsg can modify msg_iov. */
- copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
- iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len);
+ iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
+ fixup = msg.msg_iter;
+ if (unlikely((vhost_hlen))) {
+ /* We will supply the header ourselves
+ * TODO: support TSO.
+ */
+ iov_iter_advance(&msg.msg_iter, vhost_hlen);
+ } else {
+ /* It'll come from socket; we'll need to patch
+ * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+ */
+ iov_iter_advance(&fixup, sizeof(hdr));
+ }
err = sock->ops->recvmsg(NULL, sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
/* Userspace might have consumed the packet meanwhile:
@@ -642,18 +607,18 @@ static void handle_rx(struct vhost_net *net)
vhost_discard_vq_desc(vq, headcount);
continue;
}
+ /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
if (unlikely(vhost_hlen) &&
- memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
- vhost_hlen)) {
+ copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
vq->iov->iov_base);
break;
}
/* TODO: Should check and handle checksum. */
+
+ hdr.num_buffers = cpu_to_vhost16(vq, headcount);
if (likely(mergeable) &&
- memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
- offsetof(typeof(hdr), num_buffers),
- sizeof hdr.num_buffers)) {
+ copy_to_iter(&hdr.num_buffers, 2, &fixup) != 2) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
break;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 01c01cb3933f..dc78d87e0fc2 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
return 0;
}
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+ switch (attr) {
+ case VIRTIO_SCSI_S_SIMPLE:
+ return TCM_SIMPLE_TAG;
+ case VIRTIO_SCSI_S_ORDERED:
+ return TCM_ORDERED_TAG;
+ case VIRTIO_SCSI_S_HEAD:
+ return TCM_HEAD_TAG;
+ case VIRTIO_SCSI_S_ACA:
+ return TCM_ACA_TAG;
+ default:
+ break;
+ }
+ return TCM_SIMPLE_TAG;
+}
+
static void tcm_vhost_submission_work(struct work_struct *work)
{
struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
cmd->tvc_lun, cmd->tvc_exp_data_len,
- cmd->tvc_task_attr, cmd->tvc_data_direction,
- TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
- NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
+ vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
+ sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+ cmd->tvc_prot_sgl_count);
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -1061,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
req_size, vq->iov[0].iov_len);
break;
}
- ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
+ ret = copy_from_user(req, vq->iov[0].iov_base, req_size);
if (unlikely(ret)) {
vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
break;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index cb807d0ea498..2ee28266fd07 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1125,6 +1125,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
u32 len = vhost32_to_cpu(vq, indirect->len);
+ struct iov_iter from;
int ret;
/* Sanity check */
@@ -1142,6 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
}
+ iov_iter_init(&from, READ, vq->indirect, ret, len);
/* We will use the result as an address to read from, so most
* architectures only need a compiler barrier here. */
@@ -1164,8 +1166,8 @@ static int get_indirect(struct vhost_virtqueue *vq,
i, count);
return -EINVAL;
}
- if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
- vq->indirect, sizeof desc))) {
+ if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
+ sizeof(desc))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index fe1cd0148e13..ba97efc3bf70 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -77,18 +77,22 @@ config DUMMY_CONSOLE
config DUMMY_CONSOLE_COLUMNS
int "Initial number of console screen columns"
- depends on PARISC && DUMMY_CONSOLE
- default "160"
+ depends on DUMMY_CONSOLE && !ARM
+ default 160 if PARISC
+ default 80
help
- The default value is 160, which should fit a 1280x1024 monitor.
+ On PA-RISC, the default value is 160, which should fit a 1280x1024
+ monitor.
Select 80 if you use a 640x480 resolution by default.
config DUMMY_CONSOLE_ROWS
int "Initial number of console screen rows"
- depends on PARISC && DUMMY_CONSOLE
- default "64"
+ depends on DUMMY_CONSOLE && !ARM
+ default 64 if PARISC
+ default 25
help
- The default value is 64, which should fit a 1280x1024 monitor.
+ On PA-RISC, the default value is 64, which should fit a 1280x1024
+ monitor.
Select 25 if you use a 640x480 resolution by default.
config FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 40bec8d64b0a..0efc52f11ad0 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -20,13 +20,10 @@
#if defined(__arm__)
#define DUMMY_COLUMNS screen_info.orig_video_cols
#define DUMMY_ROWS screen_info.orig_video_lines
-#elif defined(__hppa__)
+#else
/* set by Kconfig. Use 80x25 for 640x480 and 160x64 for 1280x1024 */
#define DUMMY_COLUMNS CONFIG_DUMMY_CONSOLE_COLUMNS
#define DUMMY_ROWS CONFIG_DUMMY_CONSOLE_ROWS
-#else
-#define DUMMY_COLUMNS 80
-#define DUMMY_ROWS 25
#endif
static const char *dummycon_startup(void)
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index ea437245562e..b97210671a81 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -146,9 +146,6 @@ static const struct consw fb_con;
static int fbcon_set_origin(struct vc_data *);
-#define CURSOR_DRAW_DELAY (1)
-
-static int vbl_cursor_cnt;
static int fbcon_cursor_noblink;
#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1)
@@ -1329,7 +1326,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
- vbl_cursor_cnt = CURSOR_DRAW_DELAY;
}
static int scrollback_phys_max = 0;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 4916c97216f8..b3dd417b4719 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1530,13 +1530,11 @@ config FB_SIS_315
config FB_VIA
tristate "VIA UniChrome (Pro) and Chrome9 display support"
- depends on FB && PCI && X86
+ depends on FB && PCI && X86 && GPIOLIB && I2C
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select I2C_ALGOBIT
- select I2C
- select GPIOLIB
help
This is the frame buffer device driver for Graphics chips of VIA
UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
@@ -2151,7 +2149,6 @@ config FB_PS3
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
---help---
Include support for the virtual frame buffer in the PS3 platform.
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index 4953b657635e..cb9ee2556850 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -3118,8 +3118,7 @@ int __init atafb_init(void)
printk("atafb_init: initializing Falcon hw\n");
fbhw = &falcon_switch;
atafb_ops.fb_setcolreg = &falcon_setcolreg;
- error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher,
- IRQ_TYPE_PRIO,
+ error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 0,
"framebuffer:modeswitch",
falcon_vbl_switcher);
if (error)
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 37ec09b3fffd..8789e487b96e 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3948,7 +3948,7 @@ static struct notifier_block atyfb_reboot_notifier = {
.notifier_call = atyfb_reboot_notify,
};
-static const struct dmi_system_id atyfb_reboot_ids[] = {
+static const struct dmi_system_id atyfb_reboot_ids[] __initconst = {
{
.ident = "HP OmniBook 500",
.matches = {
@@ -3960,6 +3960,7 @@ static const struct dmi_system_id atyfb_reboot_ids[] = {
{ }
};
+static bool registered_notifier = false;
static int __init atyfb_init(void)
{
@@ -3982,15 +3983,17 @@ static int __init atyfb_init(void)
if (err1 && err2)
return -ENODEV;
- if (dmi_check_system(atyfb_reboot_ids))
+ if (dmi_check_system(atyfb_reboot_ids)) {
register_reboot_notifier(&atyfb_reboot_notifier);
+ registered_notifier = true;
+ }
return 0;
}
static void __exit atyfb_exit(void)
{
- if (dmi_check_system(atyfb_reboot_ids))
+ if (registered_notifier)
unregister_reboot_notifier(&atyfb_reboot_notifier);
#ifdef CONFIG_PCI
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 1c29bd19e3d5..0e5fde1d3ffb 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
err = broadsheet_spiflash_read_range(par, start_sector_addr,
data_start_addr, sector_buffer);
if (err)
- return err;
+ goto out;
}
/* now we copy our data into the right place in the sector buffer */
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
err = broadsheet_spiflash_read_range(par, tail_start_addr,
tail_len, sector_buffer + tail_start_addr);
if (err)
- return err;
+ goto out;
}
/* if we got here we have the full sector that we want to rewrite. */
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
/* first erase the sector */
err = broadsheet_spiflash_erase_sector(par, start_sector_addr);
if (err)
- return err;
+ goto out;
/* now write it */
err = broadsheet_spiflash_write_sector(par, start_sector_addr,
sector_buffer, sector_size);
+out:
+ kfree(sector_buffer);
return err;
}
diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
index 7cb715dfc0e1..55d2bd0ce5c0 100644
--- a/drivers/video/fbdev/core/fbcvt.c
+++ b/drivers/video/fbdev/core/fbcvt.c
@@ -369,9 +369,9 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
cvt.h_back_porch = cvt.hblank/2 + cvt.h_margin;
cvt.h_front_porch = cvt.hblank - cvt.hsync - cvt.h_back_porch +
2 * cvt.h_margin;
- cvt.v_back_porch = 3 + cvt.v_margin;
- cvt.v_front_porch = cvt.vtotal - cvt.yres/cvt.interlace -
- cvt.v_back_porch - cvt.vsync;
+ cvt.v_front_porch = 3 + cvt.v_margin;
+ cvt.v_back_porch = cvt.vtotal - cvt.yres/cvt.interlace -
+ cvt.v_front_porch - cvt.vsync;
fb_cvt_print_name(&cvt);
fb_cvt_convert_to_mode(&cvt, mode);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 5b0e313849bd..95338593ebf4 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -496,56 +496,71 @@ static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
}
static int get_std_timing(unsigned char *block, struct fb_videomode *mode,
- int ver, int rev)
+ int ver, int rev, const struct fb_monspecs *specs)
{
- int xres, yres = 0, refresh, ratio, i;
-
- xres = (block[0] + 31) * 8;
- if (xres <= 256)
- return 0;
+ int i;
- ratio = (block[1] & 0xc0) >> 6;
- switch (ratio) {
- case 0:
- /* in EDID 1.3 the meaning of 0 changed to 16:10 (prior 1:1) */
- if (ver < 1 || (ver == 1 && rev < 3))
- yres = xres;
- else
- yres = (xres * 10)/16;
- break;
- case 1:
- yres = (xres * 3)/4;
- break;
- case 2:
- yres = (xres * 4)/5;
- break;
- case 3:
- yres = (xres * 9)/16;
- break;
+ for (i = 0; i < DMT_SIZE; i++) {
+ u32 std_2byte_code = block[0] << 8 | block[1];
+ if (std_2byte_code == dmt_modes[i].std_2byte_code)
+ break;
}
- refresh = (block[1] & 0x3f) + 60;
-
- DPRINTK(" %dx%d@%dHz\n", xres, yres, refresh);
- for (i = 0; i < VESA_MODEDB_SIZE; i++) {
- if (vesa_modes[i].xres == xres &&
- vesa_modes[i].yres == yres &&
- vesa_modes[i].refresh == refresh) {
- *mode = vesa_modes[i];
- mode->flag |= FB_MODE_IS_STANDARD;
- return 1;
+
+ if (i < DMT_SIZE && dmt_modes[i].mode) {
+ /* DMT mode found */
+ *mode = *dmt_modes[i].mode;
+ mode->flag |= FB_MODE_IS_STANDARD;
+ DPRINTK(" DMT id=%d\n", dmt_modes[i].dmt_id);
+
+ } else {
+ int xres, yres = 0, refresh, ratio;
+
+ xres = (block[0] + 31) * 8;
+ if (xres <= 256)
+ return 0;
+
+ ratio = (block[1] & 0xc0) >> 6;
+ switch (ratio) {
+ case 0:
+ /* in EDID 1.3 the meaning of 0 changed to 16:10 (prior 1:1) */
+ if (ver < 1 || (ver == 1 && rev < 3))
+ yres = xres;
+ else
+ yres = (xres * 10)/16;
+ break;
+ case 1:
+ yres = (xres * 3)/4;
+ break;
+ case 2:
+ yres = (xres * 4)/5;
+ break;
+ case 3:
+ yres = (xres * 9)/16;
+ break;
}
+ refresh = (block[1] & 0x3f) + 60;
+ DPRINTK(" %dx%d@%dHz\n", xres, yres, refresh);
+
+ calc_mode_timings(xres, yres, refresh, mode);
}
- calc_mode_timings(xres, yres, refresh, mode);
+
+ /* Check the mode we got is within valid spec of the monitor */
+ if (specs && specs->dclkmax
+ && PICOS2KHZ(mode->pixclock) * 1000 > specs->dclkmax) {
+ DPRINTK(" mode exceed max DCLK\n");
+ return 0;
+ }
+
return 1;
}
-static int get_dst_timing(unsigned char *block,
- struct fb_videomode *mode, int ver, int rev)
+static int get_dst_timing(unsigned char *block, struct fb_videomode *mode,
+ int ver, int rev, const struct fb_monspecs *specs)
{
int j, num = 0;
for (j = 0; j < 6; j++, block += STD_TIMING_DESCRIPTION_SIZE)
- num += get_std_timing(block, &mode[num], ver, rev);
+ num += get_std_timing(block, &mode[num], ver, rev, specs);
return num;
}
@@ -601,7 +616,8 @@ static void get_detailed_timing(unsigned char *block,
* This function builds a mode database using the contents of the EDID
* data
*/
-static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
+static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
+ const struct fb_monspecs *specs)
{
struct fb_videomode *mode, *m;
unsigned char *block;
@@ -643,12 +659,13 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
DPRINTK(" Standard Timings\n");
block = edid + STD_TIMING_DESCRIPTIONS_START;
for (i = 0; i < STD_TIMING; i++, block += STD_TIMING_DESCRIPTION_SIZE)
- num += get_std_timing(block, &mode[num], ver, rev);
+ num += get_std_timing(block, &mode[num], ver, rev, specs);
block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
for (i = 0; i < 4; i++, block+= DETAILED_TIMING_DESCRIPTION_SIZE) {
if (block[0] == 0x00 && block[1] == 0x00 && block[3] == 0xfa)
- num += get_dst_timing(block + 5, &mode[num], ver, rev);
+ num += get_dst_timing(block + 5, &mode[num],
+ ver, rev, specs);
}
/* Yikes, EDID data is totally useless */
@@ -707,7 +724,7 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
int num_modes, hz, hscan, pixclock;
int vtotal, htotal;
- modes = fb_create_modedb(edid, &num_modes);
+ modes = fb_create_modedb(edid, &num_modes, specs);
if (!modes) {
DPRINTK("None Available\n");
return 1;
@@ -964,7 +981,7 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
DPRINTK(" Display Characteristics:\n");
get_monspecs(edid, specs);
- specs->modedb = fb_create_modedb(edid, &specs->modedb_len);
+ specs->modedb = fb_create_modedb(edid, &specs->modedb_len, specs);
/*
* Workaround for buggy EDIDs that sets that the first
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 388f7971494b..7d07cf824b64 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -468,8 +468,119 @@ const struct fb_videomode vesa_modes[] = {
/* 33 1920x1440-75 VESA */
{ NULL, 75, 1920, 1440, 3367, 352, 144, 56, 1, 224, 3,
FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 34 1920x1200-60 RB VESA */
+ { NULL, 60, 1920, 1200, 6493, 80, 48, 26, 3, 32, 6,
+ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 35 1920x1200-60 VESA */
+ { NULL, 60, 1920, 1200, 5174, 336, 136, 36, 3, 200, 6,
+ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 36 1920x1200-75 VESA */
+ { NULL, 75, 1920, 1200, 4077, 344, 136, 46, 3, 208, 6,
+ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 37 1920x1200-85 VESA */
+ { NULL, 85, 1920, 1200, 3555, 352, 144, 53, 3, 208, 6,
+ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 38 2560x1600-60 RB VESA */
+ { NULL, 60, 2560, 1600, 3724, 80, 48, 37, 3, 32, 6,
+ FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 39 2560x1600-60 VESA */
+ { NULL, 60, 2560, 1600, 2869, 472, 192, 49, 3, 280, 6,
+ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 40 2560x1600-75 VESA */
+ { NULL, 75, 2560, 1600, 2256, 488, 208, 63, 3, 280, 6,
+ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 41 2560x1600-85 VESA */
+ { NULL, 85, 2560, 1600, 1979, 488, 208, 73, 3, 280, 6,
+ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 42 2560x1600-120 RB VESA */
+ { NULL, 120, 2560, 1600, 1809, 80, 48, 85, 3, 32, 6,
+ FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
};
EXPORT_SYMBOL(vesa_modes);
+
+const struct dmt_videomode dmt_modes[DMT_SIZE] = {
+ { 0x01, 0x0000, 0x000000, &vesa_modes[0] },
+ { 0x02, 0x3119, 0x000000, &vesa_modes[1] },
+ { 0x03, 0x0000, 0x000000, &vesa_modes[2] },
+ { 0x04, 0x3140, 0x000000, &vesa_modes[3] },
+ { 0x05, 0x314c, 0x000000, &vesa_modes[4] },
+ { 0x06, 0x314f, 0x000000, &vesa_modes[5] },
+ { 0x07, 0x3159, 0x000000, &vesa_modes[6] },
+ { 0x08, 0x0000, 0x000000, &vesa_modes[7] },
+ { 0x09, 0x4540, 0x000000, &vesa_modes[8] },
+ { 0x0a, 0x454c, 0x000000, &vesa_modes[9] },
+ { 0x0b, 0x454f, 0x000000, &vesa_modes[10] },
+ { 0x0c, 0x4559, 0x000000, &vesa_modes[11] },
+ { 0x0d, 0x0000, 0x000000, NULL },
+ { 0x0e, 0x0000, 0x000000, NULL },
+ { 0x0f, 0x0000, 0x000000, &vesa_modes[12] },
+ { 0x10, 0x6140, 0x000000, &vesa_modes[13] },
+ { 0x11, 0x614a, 0x000000, &vesa_modes[14] },
+ { 0x12, 0x614f, 0x000000, &vesa_modes[15] },
+ { 0x13, 0x6159, 0x000000, &vesa_modes[16] },
+ { 0x14, 0x0000, 0x000000, NULL },
+ { 0x15, 0x714f, 0x000000, &vesa_modes[17] },
+ { 0x16, 0x0000, 0x7f1c21, NULL },
+ { 0x17, 0x0000, 0x7f1c28, NULL },
+ { 0x18, 0x0000, 0x7f1c44, NULL },
+ { 0x19, 0x0000, 0x7f1c62, NULL },
+ { 0x1a, 0x0000, 0x000000, NULL },
+ { 0x1b, 0x0000, 0x8f1821, NULL },
+ { 0x1c, 0x8100, 0x8f1828, NULL },
+ { 0x1d, 0x810f, 0x8f1844, NULL },
+ { 0x1e, 0x8119, 0x8f1862, NULL },
+ { 0x1f, 0x0000, 0x000000, NULL },
+ { 0x20, 0x8140, 0x000000, &vesa_modes[18] },
+ { 0x21, 0x8159, 0x000000, &vesa_modes[19] },
+ { 0x22, 0x0000, 0x000000, NULL },
+ { 0x23, 0x8180, 0x000000, &vesa_modes[20] },
+ { 0x24, 0x818f, 0x000000, &vesa_modes[21] },
+ { 0x25, 0x8199, 0x000000, &vesa_modes[22] },
+ { 0x26, 0x0000, 0x000000, NULL },
+ { 0x27, 0x0000, 0x000000, NULL },
+ { 0x28, 0x0000, 0x000000, NULL },
+ { 0x29, 0x0000, 0x0c2021, NULL },
+ { 0x2a, 0x9040, 0x0c2028, NULL },
+ { 0x2b, 0x904f, 0x0c2044, NULL },
+ { 0x2c, 0x9059, 0x0c2062, NULL },
+ { 0x2d, 0x0000, 0x000000, NULL },
+ { 0x2e, 0x9500, 0xc11821, NULL },
+ { 0x2f, 0x9500, 0xc11828, NULL },
+ { 0x30, 0x950f, 0xc11844, NULL },
+ { 0x31, 0x9519, 0xc11868, NULL },
+ { 0x32, 0x0000, 0x000000, NULL },
+ { 0x33, 0xa940, 0x000000, &vesa_modes[23] },
+ { 0x34, 0xa945, 0x000000, &vesa_modes[24] },
+ { 0x35, 0xa94a, 0x000000, &vesa_modes[25] },
+ { 0x36, 0xa94f, 0x000000, &vesa_modes[26] },
+ { 0x37, 0xa959, 0x000000, &vesa_modes[27] },
+ { 0x38, 0x0000, 0x000000, NULL },
+ { 0x39, 0x0000, 0x0c2821, NULL },
+ { 0x3a, 0xb300, 0x0c2828, NULL },
+ { 0x3b, 0xb30f, 0x0c2844, NULL },
+ { 0x3c, 0xb319, 0x0c2868, NULL },
+ { 0x3d, 0x0000, 0x000000, NULL },
+ { 0x3e, 0xc140, 0x000000, &vesa_modes[28] },
+ { 0x3f, 0xc14f, 0x000000, &vesa_modes[29] },
+ { 0x40, 0x0000, 0x000000, NULL},
+ { 0x41, 0xc940, 0x000000, &vesa_modes[30] },
+ { 0x42, 0xc94f, 0x000000, &vesa_modes[31] },
+ { 0x43, 0x0000, 0x000000, NULL },
+ { 0x44, 0x0000, 0x572821, &vesa_modes[34] },
+ { 0x45, 0xd100, 0x572828, &vesa_modes[35] },
+ { 0x46, 0xd10f, 0x572844, &vesa_modes[36] },
+ { 0x47, 0xd119, 0x572862, &vesa_modes[37] },
+ { 0x48, 0x0000, 0x000000, NULL },
+ { 0x49, 0xd140, 0x000000, &vesa_modes[32] },
+ { 0x4a, 0xd14f, 0x000000, &vesa_modes[33] },
+ { 0x4b, 0x0000, 0x000000, NULL },
+ { 0x4c, 0x0000, 0x1f3821, &vesa_modes[38] },
+ { 0x4d, 0x0000, 0x1f3828, &vesa_modes[39] },
+ { 0x4e, 0x0000, 0x1f3844, &vesa_modes[40] },
+ { 0x4f, 0x0000, 0x1f3862, &vesa_modes[41] },
+ { 0x50, 0x0000, 0x000000, &vesa_modes[42] },
+};
+EXPORT_SYMBOL(dmt_modes);
#endif /* CONFIG_FB_MODE_HELPERS */
/**
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index 844a32fd38ed..c1eda3190968 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -25,8 +25,8 @@
*/
static void
-bitcpy(struct fb_info *p, unsigned long *dst, int dst_idx,
- const unsigned long *src, int src_idx, int bits, unsigned n)
+bitcpy(struct fb_info *p, unsigned long *dst, unsigned dst_idx,
+ const unsigned long *src, unsigned src_idx, int bits, unsigned n)
{
unsigned long first, last;
int const shift = dst_idx-src_idx;
@@ -86,15 +86,15 @@ bitcpy(struct fb_info *p, unsigned long *dst, int dst_idx,
first &= last;
if (shift > 0) {
/* Single source word */
- *dst = comp(*src >> right, *dst, first);
+ *dst = comp(*src << left, *dst, first);
} else if (src_idx+n <= bits) {
/* Single source word */
- *dst = comp(*src << left, *dst, first);
+ *dst = comp(*src >> right, *dst, first);
} else {
/* 2 source words */
d0 = *src++;
d1 = *src;
- *dst = comp(d0 << left | d1 >> right, *dst,
+ *dst = comp(d0 >> right | d1 << left, *dst,
first);
}
} else {
@@ -109,13 +109,14 @@ bitcpy(struct fb_info *p, unsigned long *dst, int dst_idx,
/* Leading bits */
if (shift > 0) {
/* Single source word */
- *dst = comp(d0 >> right, *dst, first);
+ *dst = comp(d0 << left, *dst, first);
dst++;
n -= bits - dst_idx;
} else {
/* 2 source words */
d1 = *src++;
- *dst = comp(d0 << left | *dst >> right, *dst, first);
+ *dst = comp(d0 >> right | d1 << left, *dst,
+ first);
d0 = d1;
dst++;
n -= bits - dst_idx;
@@ -126,36 +127,36 @@ bitcpy(struct fb_info *p, unsigned long *dst, int dst_idx,
n /= bits;
while (n >= 4) {
d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
+ *dst++ = d0 >> right | d1 << left;
d0 = d1;
d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
+ *dst++ = d0 >> right | d1 << left;
d0 = d1;
d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
+ *dst++ = d0 >> right | d1 << left;
d0 = d1;
d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
+ *dst++ = d0 >> right | d1 << left;
d0 = d1;
n -= 4;
}
while (n--) {
d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
+ *dst++ = d0 >> right | d1 << left;
d0 = d1;
}
/* Trailing bits */
- if (last) {
- if (m <= right) {
+ if (m) {
+ if (m <= bits - right) {
/* Single source word */
- *dst = comp(d0 << left, *dst, last);
+ d0 >>= right;
} else {
/* 2 source words */
d1 = *src;
- *dst = comp(d0 << left | d1 >> right,
- *dst, last);
+ d0 = d0 >> right | d1 << left;
}
+ *dst = comp(d0, *dst, last);
}
}
}
@@ -166,40 +167,35 @@ bitcpy(struct fb_info *p, unsigned long *dst, int dst_idx,
*/
static void
-bitcpy_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
- const unsigned long *src, int src_idx, int bits, unsigned n)
+bitcpy_rev(struct fb_info *p, unsigned long *dst, unsigned dst_idx,
+ const unsigned long *src, unsigned src_idx, unsigned bits,
+ unsigned n)
{
unsigned long first, last;
int shift;
- dst += (n-1)/bits;
- src += (n-1)/bits;
- if ((n-1) % bits) {
- dst_idx += (n-1) % bits;
- dst += dst_idx >> (ffs(bits) - 1);
- dst_idx &= bits - 1;
- src_idx += (n-1) % bits;
- src += src_idx >> (ffs(bits) - 1);
- src_idx &= bits - 1;
- }
+ dst += (dst_idx + n - 1) / bits;
+ src += (src_idx + n - 1) / bits;
+ dst_idx = (dst_idx + n - 1) % bits;
+ src_idx = (src_idx + n - 1) % bits;
shift = dst_idx-src_idx;
- first = FB_SHIFT_LOW(p, ~0UL, bits - 1 - dst_idx);
- last = ~(FB_SHIFT_LOW(p, ~0UL, bits - 1 - ((dst_idx-n) % bits)));
+ first = ~FB_SHIFT_HIGH(p, ~0UL, (dst_idx + 1) % bits);
+ last = FB_SHIFT_HIGH(p, ~0UL, (bits + dst_idx + 1 - n) % bits);
if (!shift) {
/* Same alignment for source and dest */
if ((unsigned long)dst_idx+1 >= n) {
/* Single word */
- if (last)
- first &= last;
- *dst = comp(*src, *dst, first);
+ if (first)
+ last &= first;
+ *dst = comp(*src, *dst, last);
} else {
/* Multiple destination words */
/* Leading bits */
- if (first != ~0UL) {
+ if (first) {
*dst = comp(*src, *dst, first);
dst--;
src--;
@@ -222,29 +218,29 @@ bitcpy_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
while (n--)
*dst-- = *src--;
/* Trailing bits */
- if (last)
+ if (last != -1UL)
*dst = comp(*src, *dst, last);
}
} else {
/* Different alignment for source and dest */
- int const left = -shift & (bits-1);
- int const right = shift & (bits-1);
+ int const left = shift & (bits-1);
+ int const right = -shift & (bits-1);
if ((unsigned long)dst_idx+1 >= n) {
/* Single destination word */
- if (last)
- first &= last;
+ if (first)
+ last &= first;
if (shift < 0) {
/* Single source word */
- *dst = comp(*src << left, *dst, first);
+ *dst = comp(*src >> right, *dst, last);
} else if (1+(unsigned long)src_idx >= n) {
/* Single source word */
- *dst = comp(*src >> right, *dst, first);
+ *dst = comp(*src << left, *dst, last);
} else {
/* 2 source words */
- *dst = comp(*src >> right | *(src-1) << left,
- *dst, first);
+ *dst = comp(*src << left | *(src-1) >> right,
+ *dst, last);
}
} else {
/* Multiple destination words */
@@ -261,14 +257,18 @@ bitcpy_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
/* Leading bits */
if (shift < 0) {
/* Single source word */
- *dst = comp(d0 << left, *dst, first);
+ d1 = d0;
+ d0 >>= right;
} else {
/* 2 source words */
d1 = *src--;
- *dst = comp(d0 >> right | d1 << left, *dst,
- first);
- d0 = d1;
+ d0 = d0 << left | d1 >> right;
}
+ if (!first)
+ *dst = d0;
+ else
+ *dst = comp(d0, *dst, first);
+ d0 = d1;
dst--;
n -= dst_idx+1;
@@ -277,36 +277,36 @@ bitcpy_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
n /= bits;
while (n >= 4) {
d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
+ *dst-- = d0 << left | d1 >> right;
d0 = d1;
d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
+ *dst-- = d0 << left | d1 >> right;
d0 = d1;
d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
+ *dst-- = d0 << left | d1 >> right;
d0 = d1;
d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
+ *dst-- = d0 << left | d1 >> right;
d0 = d1;
n -= 4;
}
while (n--) {
d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
+ *dst-- = d0 << left | d1 >> right;
d0 = d1;
}
/* Trailing bits */
- if (last) {
- if (m <= left) {
+ if (m) {
+ if (m <= bits - left) {
/* Single source word */
- *dst = comp(d0 >> right, *dst, last);
+ d0 <<= left;
} else {
/* 2 source words */
d1 = *src;
- *dst = comp(d0 >> right | d1 << left,
- *dst, last);
+ d0 = d0 << left | d1 >> right;
}
+ *dst = comp(d0, *dst, last);
}
}
}
@@ -317,9 +317,9 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 height = area->height, width = area->width;
unsigned long const bits_per_line = p->fix.line_length*8u;
- unsigned long *dst = NULL, *src = NULL;
+ unsigned long *base = NULL;
int bits = BITS_PER_LONG, bytes = bits >> 3;
- int dst_idx = 0, src_idx = 0, rev_copy = 0;
+ unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
if (p->state != FBINFO_STATE_RUNNING)
return;
@@ -334,8 +334,7 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
/* split the base of the framebuffer into a long-aligned address and
the index of the first bit */
- dst = src = (unsigned long *)((unsigned long)p->screen_base &
- ~(bytes-1));
+ base = (unsigned long *)((unsigned long)p->screen_base & ~(bytes-1));
dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
/* add offset of source and target area */
dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
@@ -348,20 +347,14 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
while (height--) {
dst_idx -= bits_per_line;
src_idx -= bits_per_line;
- dst += dst_idx >> (ffs(bits) - 1);
- dst_idx &= (bytes - 1);
- src += src_idx >> (ffs(bits) - 1);
- src_idx &= (bytes - 1);
- bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
+ bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
+ base + (src_idx / bits), src_idx % bits, bits,
width*p->var.bits_per_pixel);
}
} else {
while (height--) {
- dst += dst_idx >> (ffs(bits) - 1);
- dst_idx &= (bytes - 1);
- src += src_idx >> (ffs(bits) - 1);
- src_idx &= (bytes - 1);
- bitcpy(p, dst, dst_idx, src, src_idx, bits,
+ bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
+ base + (src_idx / bits), src_idx % bits, bits,
width*p->var.bits_per_pixel);
dst_idx += bits_per_line;
src_idx += bits_per_line;
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 2794ba11f332..9bee8744c438 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -374,10 +374,8 @@ static int gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release_mem_region(gx1_gx_base() + 0x8300, 0x100);
}
- if (info) {
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
return ret;
}
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index 1790f14bab15..124d7c7e2d14 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -444,10 +444,8 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_release_region(pdev, 1);
}
- if (info) {
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
return ret;
}
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index 9e1d19d673a1..138da6cb6cbc 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -577,10 +577,8 @@ err:
pci_release_region(pdev, 3);
}
- if (info) {
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
return ret;
}
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 5ff9fe2116a4..15d3ccff2965 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -417,8 +417,7 @@ static int hgafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0 ||
- var->yoffset >= info->var.yres_virtual ||
+ if (var->yoffset >= info->var.yres_virtual ||
var->xoffset)
return -EINVAL;
} else {
diff --git a/drivers/video/fbdev/mmp/Makefile b/drivers/video/fbdev/mmp/Makefile
index a014cb358bf8..924dd0930cc7 100644
--- a/drivers/video/fbdev/mmp/Makefile
+++ b/drivers/video/fbdev/mmp/Makefile
@@ -1 +1,3 @@
-obj-y += core.o hw/ panel/ fb/
+obj-$(CONFIG_MMP_DISP) += mmp_disp.o hw/ panel/ fb/
+
+mmp_disp-y += core.o
diff --git a/drivers/video/fbdev/mmp/fb/Kconfig b/drivers/video/fbdev/mmp/fb/Kconfig
index 9b0141f105f5..985e1a7cd254 100644
--- a/drivers/video/fbdev/mmp/fb/Kconfig
+++ b/drivers/video/fbdev/mmp/fb/Kconfig
@@ -1,7 +1,7 @@
if MMP_DISP
config MMP_FB
- bool "fb driver for Marvell MMP Display Subsystem"
+ tristate "fb driver for Marvell MMP Display Subsystem"
depends on FB
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index 7f9dc9bec309..de9819660ca0 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -61,7 +61,7 @@ struct ocfb_dev {
/* flag indicating whether the regs are little endian accessed */
int little_endian;
/* Physical and virtual addresses of framebuffer */
- phys_addr_t fb_phys;
+ dma_addr_t fb_phys;
void __iomem *fb_virt;
u32 pseudo_palette[PALETTE_SIZE];
};
diff --git a/drivers/video/fbdev/omap2/displays-new/Kconfig b/drivers/video/fbdev/omap2/displays-new/Kconfig
index e6cfc38160d3..574710141a61 100644
--- a/drivers/video/fbdev/omap2/displays-new/Kconfig
+++ b/drivers/video/fbdev/omap2/displays-new/Kconfig
@@ -1,6 +1,12 @@
menu "OMAP Display Device Drivers (new device model)"
depends on OMAP2_DSS
+config DISPLAY_ENCODER_OPA362
+ tristate "OPA362 external analog amplifier"
+ help
+ Driver for OPA362 external analog TV amplifier controlled
+ through a GPIO.
+
config DISPLAY_ENCODER_TFP410
tristate "TFP410 DPI to DVI Encoder"
help
diff --git a/drivers/video/fbdev/omap2/displays-new/Makefile b/drivers/video/fbdev/omap2/displays-new/Makefile
index 0323a8a1c682..9aa176bfbf2e 100644
--- a/drivers/video/fbdev/omap2/displays-new/Makefile
+++ b/drivers/video/fbdev/omap2/displays-new/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o
obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o
obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o
obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
index 9a2b5ce58545..8511c648a15c 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
@@ -208,7 +208,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
ddata->in = in;
ddata->connector_type = pdata->connector_type;
- ddata->invert_polarity = ddata->invert_polarity;
+ ddata->invert_polarity = pdata->invert_polarity;
dssdev = &ddata->dssdev;
dssdev->name = pdata->name;
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c b/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c
new file mode 100644
index 000000000000..84a6b3367124
--- /dev/null
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c
@@ -0,0 +1,285 @@
+/*
+ * OPA362 analog video amplifier with output/power control
+ *
+ * Copyright (C) 2014 Golden Delicious Computers
+ * Author: H. Nikolaus Schaller <hns@goldelico.com>
+ *
+ * based on encoder-tfp410
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+
+#include <video/omapdss.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ struct gpio_desc *enable_gpio;
+
+ struct omap_video_timings timings;
+};
+
+#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
+
+static int opa362_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(dssdev->dev, "connect\n");
+
+ if (omapdss_device_is_connected(dssdev))
+ return -EBUSY;
+
+ r = in->ops.atv->connect(in, dssdev);
+ if (r)
+ return r;
+
+ dst->src = dssdev;
+ dssdev->dst = dst;
+
+ return 0;
+}
+
+static void opa362_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(dssdev->dev, "disconnect\n");
+
+ WARN_ON(!omapdss_device_is_connected(dssdev));
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ WARN_ON(dst != dssdev->dst);
+ if (dst != dssdev->dst)
+ return;
+
+ dst->src = NULL;
+ dssdev->dst = NULL;
+
+ in->ops.atv->disconnect(in, &ddata->dssdev);
+}
+
+static int opa362_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ dev_dbg(dssdev->dev, "enable\n");
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.atv->set_timings(in, &ddata->timings);
+
+ r = in->ops.atv->enable(in);
+ if (r)
+ return r;
+
+ if (ddata->enable_gpio)
+ gpiod_set_value_cansleep(ddata->enable_gpio, 1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void opa362_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(dssdev->dev, "disable\n");
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ if (ddata->enable_gpio)
+ gpiod_set_value_cansleep(ddata->enable_gpio, 0);
+
+ in->ops.atv->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void opa362_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(dssdev->dev, "set_timings\n");
+
+ ddata->timings = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.atv->set_timings(in, timings);
+}
+
+static void opa362_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ dev_dbg(dssdev->dev, "get_timings\n");
+
+ *timings = ddata->timings;
+}
+
+static int opa362_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(dssdev->dev, "check_timings\n");
+
+ return in->ops.atv->check_timings(in, timings);
+}
+
+static void opa362_set_type(struct omap_dss_device *dssdev,
+ enum omap_dss_venc_type type)
+{
+ /* we can only drive a COMPOSITE output */
+ WARN_ON(type != OMAP_DSS_VENC_TYPE_COMPOSITE);
+
+}
+
+static const struct omapdss_atv_ops opa362_atv_ops = {
+ .connect = opa362_connect,
+ .disconnect = opa362_disconnect,
+
+ .enable = opa362_enable,
+ .disable = opa362_disable,
+
+ .check_timings = opa362_check_timings,
+ .set_timings = opa362_set_timings,
+ .get_timings = opa362_get_timings,
+
+ .set_type = opa362_set_type,
+};
+
+static int opa362_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev, *in;
+ struct gpio_desc *gpio;
+ int r;
+
+ dev_dbg(&pdev->dev, "probe\n");
+
+ if (node == NULL) {
+ dev_err(&pdev->dev, "Unable to find device tree\n");
+ return -EINVAL;
+ }
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ gpio = devm_gpiod_get(&pdev->dev, "enable");
+ if (IS_ERR(gpio)) {
+ if (PTR_ERR(gpio) != -ENOENT)
+ return PTR_ERR(gpio);
+
+ gpio = NULL;
+ } else {
+ gpiod_direction_output(gpio, 0);
+ }
+
+ ddata->enable_gpio = gpio;
+
+ in = omapdss_of_find_source_for_first_ep(node);
+ if (IS_ERR(in)) {
+ dev_err(&pdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
+ ddata->in = in;
+
+ dssdev = &ddata->dssdev;
+ dssdev->ops.atv = &opa362_atv_ops;
+ dssdev->dev = &pdev->dev;
+ dssdev->type = OMAP_DISPLAY_TYPE_VENC;
+ dssdev->output_type = OMAP_DISPLAY_TYPE_VENC;
+ dssdev->owner = THIS_MODULE;
+
+ r = omapdss_register_output(dssdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to register output\n");
+ goto err_reg;
+ }
+
+ return 0;
+err_reg:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int __exit opa362_remove(struct platform_device *pdev)
+{
+ struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ omapdss_unregister_output(&ddata->dssdev);
+
+ WARN_ON(omapdss_device_is_enabled(dssdev));
+ if (omapdss_device_is_enabled(dssdev))
+ opa362_disable(dssdev);
+
+ WARN_ON(omapdss_device_is_connected(dssdev));
+ if (omapdss_device_is_connected(dssdev))
+ opa362_disconnect(dssdev, dssdev->dst);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static const struct of_device_id opa362_of_match[] = {
+ { .compatible = "omapdss,ti,opa362", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, opa362_of_match);
+
+static struct platform_driver opa362_driver = {
+ .probe = opa362_probe,
+ .remove = __exit_p(opa362_remove),
+ .driver = {
+ .name = "amplifier-opa362",
+ .owner = THIS_MODULE,
+ .of_match_table = opa362_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(opa362_driver);
+
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_DESCRIPTION("OPA362 analog video amplifier with output/power control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
index 7f3e11b16c86..990af6baeb0f 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
@@ -29,33 +29,10 @@ struct panel_drv_data {
int hpd_gpio;
struct omap_video_timings timings;
-
- struct completion hpd_completion;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static irqreturn_t tpd_hpd_irq_handler(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
- bool hpd;
-
- hpd = gpio_get_value_cansleep(ddata->hpd_gpio);
-
- dev_dbg(ddata->dssdev.dev, "hpd %d\n", hpd);
-
- if (gpio_is_valid(ddata->ls_oe_gpio)) {
- if (hpd)
- gpio_set_value_cansleep(ddata->ls_oe_gpio, 1);
- else
- gpio_set_value_cansleep(ddata->ls_oe_gpio, 0);
- }
-
- complete_all(&ddata->hpd_completion);
-
- return IRQ_HANDLED;
-}
-
static int tpd_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
@@ -70,23 +47,10 @@ static int tpd_connect(struct omap_dss_device *dssdev,
dst->src = dssdev;
dssdev->dst = dst;
- reinit_completion(&ddata->hpd_completion);
-
gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
- /*
- * If there's a cable connected, wait for the hpd irq to trigger,
- * which turns on the level shifters.
- */
- if (gpio_get_value_cansleep(ddata->hpd_gpio)) {
- unsigned long to;
- to = wait_for_completion_timeout(&ddata->hpd_completion,
- msecs_to_jiffies(250));
- WARN_ON_ONCE(to == 0);
- }
-
return 0;
}
@@ -179,11 +143,20 @@ static int tpd_read_edid(struct omap_dss_device *dssdev,
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
+ int r;
if (!gpio_get_value_cansleep(ddata->hpd_gpio))
return -ENODEV;
- return in->ops.hdmi->read_edid(in, edid, len);
+ if (gpio_is_valid(ddata->ls_oe_gpio))
+ gpio_set_value_cansleep(ddata->ls_oe_gpio, 1);
+
+ r = in->ops.hdmi->read_edid(in, edid, len);
+
+ if (gpio_is_valid(ddata->ls_oe_gpio))
+ gpio_set_value_cansleep(ddata->ls_oe_gpio, 0);
+
+ return r;
}
static bool tpd_detect(struct omap_dss_device *dssdev)
@@ -309,8 +282,6 @@ static int tpd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- init_completion(&ddata->hpd_completion);
-
if (dev_get_platdata(&pdev->dev)) {
r = tpd_probe_pdata(pdev);
if (r)
@@ -340,13 +311,6 @@ static int tpd_probe(struct platform_device *pdev)
if (r)
goto err_gpio;
- r = devm_request_threaded_irq(&pdev->dev, gpio_to_irq(ddata->hpd_gpio),
- NULL, tpd_hpd_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT, "hpd", ddata);
- if (r)
- goto err_irq;
-
dssdev = &ddata->dssdev;
dssdev->ops.hdmi = &tpd_hdmi_ops;
dssdev->dev = &pdev->dev;
@@ -365,7 +329,6 @@ static int tpd_probe(struct platform_device *pdev)
return 0;
err_reg:
-err_irq:
err_gpio:
omap_dss_put_device(ddata->in);
return r;
diff --git a/drivers/video/fbdev/omap2/dss/Makefile b/drivers/video/fbdev/omap2/dss/Makefile
index 2ea9d382354c..b5136d3d4b77 100644
--- a/drivers/video/fbdev/omap2/dss/Makefile
+++ b/drivers/video/fbdev/omap2/dss/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
- output.o dss-of.o pll.o
+ output.o dss-of.o pll.o video-pll.o
# DSS compat layer files
omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
dispc-compat.o display-sysfs.o
diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c
index 9850d9ef9a9d..31b743c70272 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/dss/dispc.c
@@ -36,6 +36,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
#include <video/omapdss.h>
@@ -117,6 +120,9 @@ static struct {
const struct dispc_features *feat;
bool is_enabled;
+
+ struct regmap *syscon_pol;
+ u32 syscon_pol_offset;
} dispc;
enum omap_color_component {
@@ -2958,6 +2964,25 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
FLD_VAL(vsync_level, 12, 12);
dispc_write_reg(DISPC_POL_FREQ(channel), l);
+
+ if (dispc.syscon_pol) {
+ const int shifts[] = {
+ [OMAP_DSS_CHANNEL_LCD] = 0,
+ [OMAP_DSS_CHANNEL_LCD2] = 1,
+ [OMAP_DSS_CHANNEL_LCD3] = 2,
+ };
+
+ u32 mask, val;
+
+ mask = (1 << 0) | (1 << 3) | (1 << 6);
+ val = (rf << 0) | (ipc << 3) | (onoff << 6);
+
+ mask <<= 16 + shifts[channel];
+ val <<= 16 + shifts[channel];
+
+ regmap_update_bits(dispc.syscon_pol, dispc.syscon_pol_offset,
+ mask, val);
+ }
}
/* change name to mode? */
@@ -3037,10 +3062,16 @@ unsigned long dispc_fclk_rate(void)
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
pll = dss_pll_find("dsi0");
+ if (!pll)
+ pll = dss_pll_find("video0");
+
r = pll->cinfo.clkout[0];
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
pll = dss_pll_find("dsi1");
+ if (!pll)
+ pll = dss_pll_find("video1");
+
r = pll->cinfo.clkout[0];
break;
default:
@@ -3069,10 +3100,16 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
pll = dss_pll_find("dsi0");
+ if (!pll)
+ pll = dss_pll_find("video0");
+
r = pll->cinfo.clkout[0];
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
pll = dss_pll_find("dsi1");
+ if (!pll)
+ pll = dss_pll_find("video1");
+
r = pll->cinfo.clkout[0];
break;
default:
@@ -3668,6 +3705,7 @@ static int __init dispc_init_features(struct platform_device *pdev)
break;
case OMAPDSS_VER_OMAP5:
+ case OMAPDSS_VER_DRA7xx:
src = &omap54xx_dispc_feats;
break;
@@ -3728,6 +3766,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
u32 rev;
int r = 0;
struct resource *dispc_mem;
+ struct device_node *np = pdev->dev.of_node;
dispc.pdev = pdev;
@@ -3754,6 +3793,20 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (np && of_property_read_bool(np, "syscon-pol")) {
+ dispc.syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
+ if (IS_ERR(dispc.syscon_pol)) {
+ dev_err(&pdev->dev, "failed to get syscon-pol regmap\n");
+ return PTR_ERR(dispc.syscon_pol);
+ }
+
+ if (of_property_read_u32_index(np, "syscon-pol", 1,
+ &dispc.syscon_pol_offset)) {
+ dev_err(&pdev->dev, "failed to get syscon-pol offset\n");
+ return -EINVAL;
+ }
+ }
+
pm_runtime_enable(&pdev->dev);
r = dispc_runtime_get();
@@ -3832,6 +3885,7 @@ static const struct of_device_id dispc_of_match[] = {
{ .compatible = "ti,omap3-dispc", },
{ .compatible = "ti,omap4-dispc", },
{ .compatible = "ti,omap5-dispc", },
+ { .compatible = "ti,dra7-dispc", },
{},
};
diff --git a/drivers/video/fbdev/omap2/dss/dpi.c b/drivers/video/fbdev/omap2/dss/dpi.c
index 9a2f8c3b102d..f83e7b030249 100644
--- a/drivers/video/fbdev/omap2/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/dss/dpi.c
@@ -106,6 +106,17 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel)
return NULL;
}
+ case OMAPDSS_VER_DRA7xx:
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ case OMAP_DSS_CHANNEL_LCD2:
+ return dss_pll_find("video0");
+ case OMAP_DSS_CHANNEL_LCD3:
+ return dss_pll_find("video1");
+ default:
+ return NULL;
+ }
+
default:
return NULL;
}
@@ -590,6 +601,10 @@ static void dpi_init_pll(struct dpi_data *dpi)
if (!pll)
return;
+ /* On DRA7 we need to set a mux to use the PLL */
+ if (omapdss_get_version() == OMAPDSS_VER_DRA7xx)
+ dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel);
+
if (dpi_verify_dsi_pll(pll)) {
DSSWARN("DSI PLL not operational\n");
return;
@@ -615,6 +630,17 @@ static enum omap_channel dpi_get_channel(int port_num)
case OMAPDSS_VER_AM43xx:
return OMAP_DSS_CHANNEL_LCD;
+ case OMAPDSS_VER_DRA7xx:
+ switch (port_num) {
+ case 2:
+ return OMAP_DSS_CHANNEL_LCD3;
+ case 1:
+ return OMAP_DSS_CHANNEL_LCD2;
+ case 0:
+ default:
+ return OMAP_DSS_CHANNEL_LCD;
+ }
+
case OMAPDSS_VER_OMAP4430_ES1:
case OMAPDSS_VER_OMAP4430_ES2:
case OMAPDSS_VER_OMAP4:
diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
index 3e44c580b1f8..5081f6fb1737 100644
--- a/drivers/video/fbdev/omap2/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -5238,6 +5238,7 @@ static int dsi_init_pll_data(struct platform_device *dsidev)
}
pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1";
+ pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2;
pll->clkin = clk;
pll->base = dsi->pll_base;
diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
index 9987154d50b4..a6d10d4279f3 100644
--- a/drivers/video/fbdev/omap2/dss/dss.c
+++ b/drivers/video/fbdev/omap2/dss/dss.c
@@ -34,7 +34,10 @@
#include <linux/pm_runtime.h>
#include <linux/gfp.h>
#include <linux/sizes.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
@@ -63,14 +66,11 @@ struct dss_reg {
#define REG_FLD_MOD(idx, val, start, end) \
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
-static int dss_runtime_get(void);
-static void dss_runtime_put(void);
-
struct dss_features {
u8 fck_div_max;
u8 dss_fck_multiplier;
const char *parent_clk_name;
- enum omap_display_type *ports;
+ const enum omap_display_type *ports;
int num_ports;
int (*dpi_select_source)(int port, enum omap_channel channel);
};
@@ -78,6 +78,8 @@ struct dss_features {
static struct {
struct platform_device *pdev;
void __iomem *base;
+ struct regmap *syscon_pll_ctrl;
+ u32 syscon_pll_ctrl_offset;
struct clk *parent_clk;
struct clk *dss_clk;
@@ -95,6 +97,9 @@ static struct {
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
const struct dss_features *feat;
+
+ struct dss_pll *video1_pll;
+ struct dss_pll *video2_pll;
} dss;
static const char * const dss_generic_clk_source_names[] = {
@@ -158,6 +163,99 @@ static void dss_restore_context(void)
#undef SR
#undef RR
+void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
+{
+ unsigned shift;
+ unsigned val;
+
+ if (!dss.syscon_pll_ctrl)
+ return;
+
+ val = !enable;
+
+ switch (pll_id) {
+ case DSS_PLL_VIDEO1:
+ shift = 0;
+ break;
+ case DSS_PLL_VIDEO2:
+ shift = 1;
+ break;
+ case DSS_PLL_HDMI:
+ shift = 2;
+ break;
+ default:
+ DSSERR("illegal DSS PLL ID %d\n", pll_id);
+ return;
+ }
+
+ regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
+ 1 << shift, val << shift);
+}
+
+void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
+ enum omap_channel channel)
+{
+ unsigned shift, val;
+
+ if (!dss.syscon_pll_ctrl)
+ return;
+
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ shift = 3;
+
+ switch (pll_id) {
+ case DSS_PLL_VIDEO1:
+ val = 0; break;
+ case DSS_PLL_HDMI:
+ val = 1; break;
+ default:
+ DSSERR("error in PLL mux config for LCD\n");
+ return;
+ }
+
+ break;
+ case OMAP_DSS_CHANNEL_LCD2:
+ shift = 5;
+
+ switch (pll_id) {
+ case DSS_PLL_VIDEO1:
+ val = 0; break;
+ case DSS_PLL_VIDEO2:
+ val = 1; break;
+ case DSS_PLL_HDMI:
+ val = 2; break;
+ default:
+ DSSERR("error in PLL mux config for LCD2\n");
+ return;
+ }
+
+ break;
+ case OMAP_DSS_CHANNEL_LCD3:
+ shift = 7;
+
+ switch (pll_id) {
+ case DSS_PLL_VIDEO1:
+ val = 1; break;
+ case DSS_PLL_VIDEO2:
+ val = 0; break;
+ case DSS_PLL_HDMI:
+ val = 2; break;
+ default:
+ DSSERR("error in PLL mux config for LCD3\n");
+ return;
+ }
+
+ break;
+ default:
+ DSSERR("error in PLL mux config\n");
+ return;
+ }
+
+ regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
+ 0x3 << shift, val << shift);
+}
+
void dss_sdi_init(int datapairs)
{
u32 l;
@@ -605,6 +703,26 @@ static int dss_dpi_select_source_omap5(int port, enum omap_channel channel)
return 0;
}
+static int dss_dpi_select_source_dra7xx(int port, enum omap_channel channel)
+{
+ switch (port) {
+ case 0:
+ return dss_dpi_select_source_omap5(port, channel);
+ case 1:
+ if (channel != OMAP_DSS_CHANNEL_LCD2)
+ return -EINVAL;
+ break;
+ case 2:
+ if (channel != OMAP_DSS_CHANNEL_LCD3)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int dss_dpi_select_source(int port, enum omap_channel channel)
{
return dss.feat->dpi_select_source(port, channel);
@@ -643,7 +761,7 @@ static void dss_put_clocks(void)
clk_put(dss.parent_clk);
}
-static int dss_runtime_get(void)
+int dss_runtime_get(void)
{
int r;
@@ -654,7 +772,7 @@ static int dss_runtime_get(void)
return r < 0 ? r : 0;
}
-static void dss_runtime_put(void)
+void dss_runtime_put(void)
{
int r;
@@ -677,15 +795,21 @@ void dss_debug_dump_clocks(struct seq_file *s)
#endif
-static enum omap_display_type omap2plus_ports[] = {
+static const enum omap_display_type omap2plus_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
};
-static enum omap_display_type omap34xx_ports[] = {
+static const enum omap_display_type omap34xx_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
OMAP_DISPLAY_TYPE_SDI,
};
+static const enum omap_display_type dra7xx_ports[] = {
+ OMAP_DISPLAY_TYPE_DPI,
+ OMAP_DISPLAY_TYPE_DPI,
+ OMAP_DISPLAY_TYPE_DPI,
+};
+
static const struct dss_features omap24xx_dss_feats __initconst = {
/*
* fck div max is really 16, but the divider range has gaps. The range
@@ -744,6 +868,15 @@ static const struct dss_features am43xx_dss_feats __initconst = {
.num_ports = ARRAY_SIZE(omap2plus_ports),
};
+static const struct dss_features dra7xx_dss_feats __initconst = {
+ .fck_div_max = 64,
+ .dss_fck_multiplier = 1,
+ .parent_clk_name = "dpll_per_x2_ck",
+ .dpi_select_source = &dss_dpi_select_source_dra7xx,
+ .ports = dra7xx_ports,
+ .num_ports = ARRAY_SIZE(dra7xx_ports),
+};
+
static int __init dss_init_features(struct platform_device *pdev)
{
const struct dss_features *src;
@@ -784,6 +917,10 @@ static int __init dss_init_features(struct platform_device *pdev)
src = &am43xx_dss_feats;
break;
+ case OMAPDSS_VER_DRA7xx:
+ src = &dra7xx_dss_feats;
+ break;
+
default:
return -ENODEV;
}
@@ -884,8 +1021,10 @@ static void __exit dss_uninit_ports(struct platform_device *pdev)
static int __init omap_dsshw_probe(struct platform_device *pdev)
{
struct resource *dss_mem;
+ struct device_node *np = pdev->dev.of_node;
u32 rev;
int r;
+ struct regulator *pll_regulator;
dss.pdev = pdev;
@@ -940,6 +1079,57 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
dss_init_ports(pdev);
+ if (np && of_property_read_bool(np, "syscon-pll-ctrl")) {
+ dss.syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np,
+ "syscon-pll-ctrl");
+ if (IS_ERR(dss.syscon_pll_ctrl)) {
+ dev_err(&pdev->dev,
+ "failed to get syscon-pll-ctrl regmap\n");
+ return PTR_ERR(dss.syscon_pll_ctrl);
+ }
+
+ if (of_property_read_u32_index(np, "syscon-pll-ctrl", 1,
+ &dss.syscon_pll_ctrl_offset)) {
+ dev_err(&pdev->dev,
+ "failed to get syscon-pll-ctrl offset\n");
+ return -EINVAL;
+ }
+ }
+
+ pll_regulator = devm_regulator_get(&pdev->dev, "vdda_video");
+ if (IS_ERR(pll_regulator)) {
+ r = PTR_ERR(pll_regulator);
+
+ switch (r) {
+ case -ENOENT:
+ pll_regulator = NULL;
+ break;
+
+ case -EPROBE_DEFER:
+ return -EPROBE_DEFER;
+
+ default:
+ DSSERR("can't get DPLL VDDA regulator\n");
+ return r;
+ }
+ }
+
+ if (of_property_match_string(np, "reg-names", "pll1") >= 0) {
+ dss.video1_pll = dss_video_pll_init(pdev, 0, pll_regulator);
+ if (IS_ERR(dss.video1_pll)) {
+ r = PTR_ERR(dss.video1_pll);
+ goto err_pll_init;
+ }
+ }
+
+ if (of_property_match_string(np, "reg-names", "pll2") >= 0) {
+ dss.video2_pll = dss_video_pll_init(pdev, 1, pll_regulator);
+ if (IS_ERR(dss.video2_pll)) {
+ r = PTR_ERR(dss.video2_pll);
+ goto err_pll_init;
+ }
+ }
+
rev = dss_read_reg(DSS_REVISION);
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
@@ -950,6 +1140,12 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
return 0;
+err_pll_init:
+ if (dss.video1_pll)
+ dss_video_pll_uninit(dss.video1_pll);
+
+ if (dss.video2_pll)
+ dss_video_pll_uninit(dss.video2_pll);
err_runtime_get:
pm_runtime_disable(&pdev->dev);
err_setup_clocks:
@@ -959,6 +1155,12 @@ err_setup_clocks:
static int __exit omap_dsshw_remove(struct platform_device *pdev)
{
+ if (dss.video1_pll)
+ dss_video_pll_uninit(dss.video1_pll);
+
+ if (dss.video2_pll)
+ dss_video_pll_uninit(dss.video2_pll);
+
dss_uninit_ports(pdev);
pm_runtime_disable(&pdev->dev);
@@ -1003,6 +1205,7 @@ static const struct of_device_id dss_of_match[] = {
{ .compatible = "ti,omap3-dss", },
{ .compatible = "ti,omap4-dss", },
{ .compatible = "ti,omap5-dss", },
+ { .compatible = "ti,dra7-dss", },
{},
};
diff --git a/drivers/video/fbdev/omap2/dss/dss.h b/drivers/video/fbdev/omap2/dss/dss.h
index 14fb0c23f4a2..4812eee2622a 100644
--- a/drivers/video/fbdev/omap2/dss/dss.h
+++ b/drivers/video/fbdev/omap2/dss/dss.h
@@ -100,6 +100,14 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
+enum dss_pll_id {
+ DSS_PLL_DSI1,
+ DSS_PLL_DSI2,
+ DSS_PLL_HDMI,
+ DSS_PLL_VIDEO1,
+ DSS_PLL_VIDEO2,
+};
+
struct dss_pll;
#define DSS_PLL_MAX_HSDIVS 4
@@ -150,6 +158,7 @@ struct dss_pll_hw {
struct dss_pll {
const char *name;
+ enum dss_pll_id id;
struct clk *clkin;
struct regulator *regulator;
@@ -250,6 +259,9 @@ void dss_overlay_kobj_uninit(struct omap_overlay *ovl);
int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
+int dss_runtime_get(void);
+void dss_runtime_put(void);
+
unsigned long dss_get_dispc_clk_rate(void);
int dss_dpi_select_source(int port, enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
@@ -257,6 +269,11 @@ enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
+/* DSS VIDEO PLL */
+struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
+ struct regulator *regulator);
+void dss_video_pll_uninit(struct dss_pll *pll);
+
/* dss-of */
struct device_node *dss_of_port_get_parent_device(struct device_node *port);
u32 dss_of_port_get_port_number(struct device_node *port);
@@ -265,6 +282,10 @@ u32 dss_of_port_get_port_number(struct device_node *port);
void dss_debug_dump_clocks(struct seq_file *s);
#endif
+void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
+void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
+ enum omap_channel channel);
+
void dss_sdi_init(int datapairs);
int dss_sdi_enable(void);
void dss_sdi_disable(void);
@@ -446,5 +467,6 @@ int dss_pll_write_config_type_a(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
int dss_pll_write_config_type_b(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
+int dss_pll_wait_reset_done(struct dss_pll *pll);
#endif
diff --git a/drivers/video/fbdev/omap2/dss/dss_features.c b/drivers/video/fbdev/omap2/dss/dss_features.c
index 0e3da809473c..376270b777f8 100644
--- a/drivers/video/fbdev/omap2/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/dss/dss_features.c
@@ -223,7 +223,7 @@ static const enum omap_dss_output_id omap5_dss_supported_outputs[] = {
OMAP_DSS_OUTPUT_DSI1 | OMAP_DSS_OUTPUT_DSI2,
/* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_HDMI | OMAP_DSS_OUTPUT_DPI,
+ OMAP_DSS_OUTPUT_HDMI,
/* OMAP_DSS_CHANNEL_LCD2 */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
@@ -943,6 +943,7 @@ void dss_features_init(enum omapdss_version version)
break;
case OMAPDSS_VER_OMAP5:
+ case OMAPDSS_VER_DRA7xx:
omap_current_dss_features = &omap5_dss_features;
break;
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5.c b/drivers/video/fbdev/omap2/dss/hdmi5.c
index 39aae3aa7136..3f0b34a7031a 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5.c
@@ -787,6 +787,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap5-hdmi", },
+ { .compatible = "ti,dra7-hdmi", },
{},
};
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/dss/hdmi_phy.c
index bc9e07d2afbe..1f5d19c119ce 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_phy.c
@@ -208,6 +208,7 @@ static int hdmi_phy_init_features(struct platform_device *pdev)
break;
case OMAPDSS_VER_OMAP5:
+ case OMAPDSS_VER_DRA7xx:
src = &omap54xx_phy_feats;
break;
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index ac83ef5cfd7d..06e23a7c432c 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -104,6 +104,8 @@ static int hdmi_pll_enable(struct dss_pll *dsspll)
struct hdmi_wp_data *wp = pll->wp;
u16 r = 0;
+ dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
+
r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
if (r)
return r;
@@ -117,6 +119,8 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
struct hdmi_wp_data *wp = pll->wp;
hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
+
+ dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
}
static const struct dss_pll_ops dsi_pll_ops = {
@@ -185,6 +189,7 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data
}
pll->name = "hdmi";
+ pll->id = DSS_PLL_HDMI;
pll->base = hpll->base;
pll->clkin = clk;
@@ -196,6 +201,7 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data
break;
case OMAPDSS_VER_OMAP5:
+ case OMAPDSS_VER_DRA7xx:
pll->hw = &dss_omap5_hdmi_pll_hw;
break;
diff --git a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
index 2f0822ee3ff9..42b87f95267c 100644
--- a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
+++ b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
@@ -186,6 +186,7 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
{ .compatible = "ti,omap3-dss", },
{ .compatible = "ti,omap4-dss", },
{ .compatible = "ti,omap5-dss", },
+ { .compatible = "ti,dra7-dss", },
{},
};
diff --git a/drivers/video/fbdev/omap2/dss/pll.c b/drivers/video/fbdev/omap2/dss/pll.c
index 335ffac224b9..f974ddcd3b6e 100644
--- a/drivers/video/fbdev/omap2/dss/pll.c
+++ b/drivers/video/fbdev/omap2/dss/pll.c
@@ -222,6 +222,16 @@ static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
return !value;
}
+int dss_pll_wait_reset_done(struct dss_pll *pll)
+{
+ void __iomem *base = pll->base;
+
+ if (wait_for_bit_change(base + PLL_STATUS, 0, 1) != 1)
+ return -ETIMEDOUT;
+ else
+ return 0;
+}
+
static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask)
{
int t = 100;
diff --git a/drivers/video/fbdev/omap2/dss/video-pll.c b/drivers/video/fbdev/omap2/dss/video-pll.c
new file mode 100644
index 000000000000..b1ec59e42940
--- /dev/null
+++ b/drivers/video/fbdev/omap2/dss/video-pll.c
@@ -0,0 +1,211 @@
+/*
+* Copyright (C) 2014 Texas Instruments Ltd
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 as published by
+* the Free Software Foundation.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "dss_features.h"
+
+struct dss_video_pll {
+ struct dss_pll pll;
+
+ struct device *dev;
+
+ void __iomem *clkctrl_base;
+};
+
+#define REG_MOD(reg, val, start, end) \
+ writel_relaxed(FLD_MOD(readl_relaxed(reg), val, start, end), reg)
+
+static void dss_dpll_enable_scp_clk(struct dss_video_pll *vpll)
+{
+ REG_MOD(vpll->clkctrl_base, 1, 14, 14); /* CIO_CLK_ICG */
+}
+
+static void dss_dpll_disable_scp_clk(struct dss_video_pll *vpll)
+{
+ REG_MOD(vpll->clkctrl_base, 0, 14, 14); /* CIO_CLK_ICG */
+}
+
+static void dss_dpll_power_enable(struct dss_video_pll *vpll)
+{
+ REG_MOD(vpll->clkctrl_base, 2, 31, 30); /* PLL_POWER_ON_ALL */
+
+ /*
+ * DRA7x PLL CTRL's PLL_PWR_STATUS seems to always return 0,
+ * so we have to use fixed delay here.
+ */
+ msleep(1);
+}
+
+static void dss_dpll_power_disable(struct dss_video_pll *vpll)
+{
+ REG_MOD(vpll->clkctrl_base, 0, 31, 30); /* PLL_POWER_OFF */
+}
+
+static int dss_video_pll_enable(struct dss_pll *pll)
+{
+ struct dss_video_pll *vpll = container_of(pll, struct dss_video_pll, pll);
+ int r;
+
+ r = dss_runtime_get();
+ if (r)
+ return r;
+
+ dss_ctrl_pll_enable(pll->id, true);
+
+ dss_dpll_enable_scp_clk(vpll);
+
+ r = dss_pll_wait_reset_done(pll);
+ if (r)
+ goto err_reset;
+
+ dss_dpll_power_enable(vpll);
+
+ return 0;
+
+err_reset:
+ dss_dpll_disable_scp_clk(vpll);
+ dss_ctrl_pll_enable(pll->id, false);
+ dss_runtime_put();
+
+ return r;
+}
+
+static void dss_video_pll_disable(struct dss_pll *pll)
+{
+ struct dss_video_pll *vpll = container_of(pll, struct dss_video_pll, pll);
+
+ dss_dpll_power_disable(vpll);
+
+ dss_dpll_disable_scp_clk(vpll);
+
+ dss_ctrl_pll_enable(pll->id, false);
+
+ dss_runtime_put();
+}
+
+static const struct dss_pll_ops dss_pll_ops = {
+ .enable = dss_video_pll_enable,
+ .disable = dss_video_pll_disable,
+ .set_config = dss_pll_write_config_type_a,
+};
+
+static const struct dss_pll_hw dss_dra7_video_pll_hw = {
+ .n_max = (1 << 8) - 1,
+ .m_max = (1 << 12) - 1,
+ .mX_max = (1 << 5) - 1,
+ .fint_min = 500000,
+ .fint_max = 2500000,
+ .clkdco_max = 1800000000,
+
+ .n_msb = 8,
+ .n_lsb = 1,
+ .m_msb = 20,
+ .m_lsb = 9,
+
+ .mX_msb[0] = 25,
+ .mX_lsb[0] = 21,
+ .mX_msb[1] = 30,
+ .mX_lsb[1] = 26,
+
+ .has_refsel = true,
+};
+
+struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
+ struct regulator *regulator)
+{
+ const char * const reg_name[] = { "pll1", "pll2" };
+ const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" };
+ const char * const clkin_name[] = { "video1_clk", "video2_clk" };
+
+ struct resource *res;
+ struct dss_video_pll *vpll;
+ void __iomem *pll_base, *clkctrl_base;
+ struct clk *clk;
+ struct dss_pll *pll;
+ int r;
+
+ /* PLL CONTROL */
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "missing platform resource data for pll%d\n", id);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pll_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pll_base)) {
+ dev_err(&pdev->dev, "failed to ioremap pll%d reg_name\n", id);
+ return ERR_CAST(pll_base);
+ }
+
+ /* CLOCK CONTROL */
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ clkctrl_name[id]);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "missing platform resource data for pll%d\n", id);
+ return ERR_PTR(-ENODEV);
+ }
+
+ clkctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(clkctrl_base)) {
+ dev_err(&pdev->dev, "failed to ioremap pll%d clkctrl\n", id);
+ return ERR_CAST(clkctrl_base);
+ }
+
+ /* CLKIN */
+
+ clk = devm_clk_get(&pdev->dev, clkin_name[id]);
+ if (IS_ERR(clk)) {
+ DSSERR("can't get video pll clkin\n");
+ return ERR_CAST(clk);
+ }
+
+ vpll = devm_kzalloc(&pdev->dev, sizeof(*vpll), GFP_KERNEL);
+ if (!vpll)
+ return ERR_PTR(-ENOMEM);
+
+ vpll->dev = &pdev->dev;
+ vpll->clkctrl_base = clkctrl_base;
+
+ pll = &vpll->pll;
+
+ pll->name = id == 0 ? "video0" : "video1";
+ pll->id = id == 0 ? DSS_PLL_VIDEO1 : DSS_PLL_VIDEO2;
+ pll->clkin = clk;
+ pll->regulator = regulator;
+ pll->base = pll_base;
+ pll->hw = &dss_dra7_video_pll_hw;
+ pll->ops = &dss_pll_ops;
+
+ r = dss_pll_register(pll);
+ if (r)
+ return ERR_PTR(r);
+
+ return pll;
+}
+
+void dss_video_pll_uninit(struct dss_pll *pll)
+{
+ dss_pll_unregister(pll);
+}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 146b6f5428db..9ddfdd63b84c 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -137,8 +137,11 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
goto undo;
}
- if (ovl->manager)
- ovl->manager->apply(ovl->manager);
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ goto undo;
+ }
if (pi->enabled) {
r = ovl->enable(ovl);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 7c74f58fc101..0e24eb9c219c 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,10 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, (unsigned long)buf,
- nr_pages, WRITE, 0, pages, NULL);
- up_read(&current->mm->mmap_sem);
+ ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf,
+ nr_pages, WRITE, 0, pages);
if (ret < nr_pages) {
nr_pages = ret;
diff --git a/drivers/video/fbdev/savage/savagefb.h b/drivers/video/fbdev/savage/savagefb.h
index dcaab9012ca2..8ff4ab1cb69b 100644
--- a/drivers/video/fbdev/savage/savagefb.h
+++ b/drivers/video/fbdev/savage/savagefb.h
@@ -351,32 +351,26 @@ static inline void VGAwSEQ(u8 index, u8 val, struct savagefb_par *par)
static inline void VGAenablePalette(struct savagefb_par *par)
{
- u8 tmp;
-
- tmp = vga_in8(0x3da, par);
+ vga_in8(0x3da, par);
vga_out8(0x3c0, 0x00, par);
par->paletteEnabled = 1;
}
static inline void VGAdisablePalette(struct savagefb_par *par)
{
- u8 tmp;
-
- tmp = vga_in8(0x3da, par);
+ vga_in8(0x3da, par);
vga_out8(0x3c0, 0x20, par);
par->paletteEnabled = 0;
}
static inline void VGAwATTR(u8 index, u8 value, struct savagefb_par *par)
{
- u8 tmp;
-
if (par->paletteEnabled)
index &= ~0x20;
else
index |= 0x20;
- tmp = vga_in8(0x3da, par);
+ vga_in8(0x3da, par);
vga_out8(0x3c0, index, par);
vga_out8 (0x3c0, value, par);
}
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 92cac803dee3..1085c0432158 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -402,7 +402,7 @@ static int __init simplefb_init(void)
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_OF) && of_chosen) {
+ if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
for_each_child_of_node(of_chosen, np) {
if (of_device_is_compatible(np, "simple-framebuffer"))
of_platform_device_create(np, NULL, NULL);
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index f4daa59f0a80..f7ed6d9016f7 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -122,23 +122,6 @@ static inline int ssd1307fb_write_cmd(struct i2c_client *client, u8 cmd)
return ret;
}
-static inline int ssd1307fb_write_data(struct i2c_client *client, u8 data)
-{
- struct ssd1307fb_array *array;
- int ret;
-
- array = ssd1307fb_alloc_array(1, SSD1307FB_DATA);
- if (!array)
- return -ENOMEM;
-
- array->data[0] = data;
-
- ret = ssd1307fb_write_array(client, array, 1);
- kfree(array);
-
- return ret;
-}
-
static void ssd1307fb_update_display(struct ssd1307fb_par *par)
{
struct ssd1307fb_array *array;
@@ -320,7 +303,10 @@ static int ssd1307fb_ssd1306_init(struct ssd1307fb_par *par)
/* Set initial contrast */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_CONTRAST);
- ret = ret & ssd1307fb_write_cmd(par->client, 0x7f);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, 0x7f);
if (ret < 0)
return ret;
@@ -336,63 +322,99 @@ static int ssd1307fb_ssd1306_init(struct ssd1307fb_par *par)
/* Set multiplex ratio value */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_MULTIPLEX_RATIO);
- ret = ret & ssd1307fb_write_cmd(par->client, par->height - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, par->height - 1);
if (ret < 0)
return ret;
/* set display offset value */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_DISPLAY_OFFSET);
+ if (ret < 0)
+ return ret;
+
ret = ssd1307fb_write_cmd(par->client, 0x20);
if (ret < 0)
return ret;
/* Set clock frequency */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_CLOCK_FREQ);
- ret = ret & ssd1307fb_write_cmd(par->client, 0xf0);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, 0xf0);
if (ret < 0)
return ret;
/* Set precharge period in number of ticks from the internal clock */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_PRECHARGE_PERIOD);
- ret = ret & ssd1307fb_write_cmd(par->client, 0x22);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, 0x22);
if (ret < 0)
return ret;
/* Set COM pins configuration */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_COM_PINS_CONFIG);
- ret = ret & ssd1307fb_write_cmd(par->client, 0x22);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, 0x22);
if (ret < 0)
return ret;
/* Set VCOMH */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_VCOMH);
- ret = ret & ssd1307fb_write_cmd(par->client, 0x49);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, 0x49);
if (ret < 0)
return ret;
/* Turn on the DC-DC Charge Pump */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_CHARGE_PUMP);
- ret = ret & ssd1307fb_write_cmd(par->client, 0x14);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, 0x14);
if (ret < 0)
return ret;
/* Switch to horizontal addressing mode */
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_ADDRESS_MODE);
- ret = ret & ssd1307fb_write_cmd(par->client,
- SSD1307FB_SET_ADDRESS_MODE_HORIZONTAL);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client,
+ SSD1307FB_SET_ADDRESS_MODE_HORIZONTAL);
if (ret < 0)
return ret;
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_COL_RANGE);
- ret = ret & ssd1307fb_write_cmd(par->client, 0x0);
- ret = ret & ssd1307fb_write_cmd(par->client, par->width - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, par->width - 1);
if (ret < 0)
return ret;
ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_PAGE_RANGE);
- ret = ret & ssd1307fb_write_cmd(par->client, 0x0);
- ret = ret & ssd1307fb_write_cmd(par->client,
- par->page_offset + (par->height / 8) - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd1307fb_write_cmd(par->client,
+ par->page_offset + (par->height / 8) - 1);
if (ret < 0)
return ret;
@@ -460,7 +482,7 @@ static int ssd1307fb_probe(struct i2c_client *client,
par->width = 96;
if (of_property_read_u32(node, "solomon,height", &par->height))
- par->width = 16;
+ par->height = 16;
if (of_property_read_u32(node, "solomon,page-offset", &par->page_offset))
par->page_offset = 1;
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index ffaf29eeaaba..1a1176bf0906 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -113,10 +113,8 @@ static int vt8500lcd_set_par(struct fb_info *info)
}
for (i = 0; i < 8; i++) {
- if (bpp_values[i] == info->var.bits_per_pixel) {
+ if (bpp_values[i] == info->var.bits_per_pixel)
reg_bpp = i;
- continue;
- }
}
control0 = readl(fbi->regbase) & ~0xf;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 9e758a8f890d..162689227a23 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -27,10 +27,12 @@
#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/string.h>
+#include <linux/device.h>
-static void hdmi_infoframe_checksum(void *buffer, size_t size)
+#define hdmi_log(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
+
+static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
{
- u8 *ptr = buffer;
u8 csum = 0;
size_t i;
@@ -38,7 +40,14 @@ static void hdmi_infoframe_checksum(void *buffer, size_t size)
for (i = 0; i < size; i++)
csum += ptr[i];
- ptr[3] = 256 - csum;
+ return 256 - csum;
+}
+
+static void hdmi_infoframe_set_checksum(void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+
+ ptr[3] = hdmi_infoframe_checksum(buffer, size);
}
/**
@@ -136,7 +145,7 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
ptr[11] = frame->right_bar & 0xff;
ptr[12] = (frame->right_bar >> 8) & 0xff;
- hdmi_infoframe_checksum(buffer, length);
+ hdmi_infoframe_set_checksum(buffer, length);
return length;
}
@@ -206,7 +215,7 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
ptr[24] = frame->sdi;
- hdmi_infoframe_checksum(buffer, length);
+ hdmi_infoframe_set_checksum(buffer, length);
return length;
}
@@ -281,7 +290,7 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
if (frame->downmix_inhibit)
ptr[4] |= BIT(7);
- hdmi_infoframe_checksum(buffer, length);
+ hdmi_infoframe_set_checksum(buffer, length);
return length;
}
@@ -373,7 +382,7 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
}
- hdmi_infoframe_checksum(buffer, length);
+ hdmi_infoframe_set_checksum(buffer, length);
return length;
}
@@ -434,3 +443,802 @@ hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
return length;
}
EXPORT_SYMBOL(hdmi_infoframe_pack);
+
+static const char *hdmi_infoframe_type_get_name(enum hdmi_infoframe_type type)
+{
+ if (type < 0x80 || type > 0x9f)
+ return "Invalid";
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return "Vendor";
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return "Auxiliary Video Information (AVI)";
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return "Source Product Description (SPD)";
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return "Audio";
+ }
+ return "Reserved";
+}
+
+static void hdmi_infoframe_log_header(const char *level,
+ struct device *dev,
+ struct hdmi_any_infoframe *frame)
+{
+ hdmi_log("HDMI infoframe: %s, version %u, length %u\n",
+ hdmi_infoframe_type_get_name(frame->type),
+ frame->version, frame->length);
+}
+
+static const char *hdmi_colorspace_get_name(enum hdmi_colorspace colorspace)
+{
+ switch (colorspace) {
+ case HDMI_COLORSPACE_RGB:
+ return "RGB";
+ case HDMI_COLORSPACE_YUV422:
+ return "YCbCr 4:2:2";
+ case HDMI_COLORSPACE_YUV444:
+ return "YCbCr 4:4:4";
+ case HDMI_COLORSPACE_YUV420:
+ return "YCbCr 4:2:0";
+ case HDMI_COLORSPACE_RESERVED4:
+ return "Reserved (4)";
+ case HDMI_COLORSPACE_RESERVED5:
+ return "Reserved (5)";
+ case HDMI_COLORSPACE_RESERVED6:
+ return "Reserved (6)";
+ case HDMI_COLORSPACE_IDO_DEFINED:
+ return "IDO Defined";
+ }
+ return "Invalid";
+}
+
+static const char *hdmi_scan_mode_get_name(enum hdmi_scan_mode scan_mode)
+{
+ switch (scan_mode) {
+ case HDMI_SCAN_MODE_NONE:
+ return "No Data";
+ case HDMI_SCAN_MODE_OVERSCAN:
+ return "Overscan";
+ case HDMI_SCAN_MODE_UNDERSCAN:
+ return "Underscan";
+ case HDMI_SCAN_MODE_RESERVED:
+ return "Reserved";
+ }
+ return "Invalid";
+}
+
+static const char *hdmi_colorimetry_get_name(enum hdmi_colorimetry colorimetry)
+{
+ switch (colorimetry) {
+ case HDMI_COLORIMETRY_NONE:
+ return "No Data";
+ case HDMI_COLORIMETRY_ITU_601:
+ return "ITU601";
+ case HDMI_COLORIMETRY_ITU_709:
+ return "ITU709";
+ case HDMI_COLORIMETRY_EXTENDED:
+ return "Extended";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect)
+{
+ switch (picture_aspect) {
+ case HDMI_PICTURE_ASPECT_NONE:
+ return "No Data";
+ case HDMI_PICTURE_ASPECT_4_3:
+ return "4:3";
+ case HDMI_PICTURE_ASPECT_16_9:
+ return "16:9";
+ case HDMI_PICTURE_ASPECT_RESERVED:
+ return "Reserved";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_active_aspect_get_name(enum hdmi_active_aspect active_aspect)
+{
+ if (active_aspect < 0 || active_aspect > 0xf)
+ return "Invalid";
+
+ switch (active_aspect) {
+ case HDMI_ACTIVE_ASPECT_16_9_TOP:
+ return "16:9 Top";
+ case HDMI_ACTIVE_ASPECT_14_9_TOP:
+ return "14:9 Top";
+ case HDMI_ACTIVE_ASPECT_16_9_CENTER:
+ return "16:9 Center";
+ case HDMI_ACTIVE_ASPECT_PICTURE:
+ return "Same as Picture";
+ case HDMI_ACTIVE_ASPECT_4_3:
+ return "4:3";
+ case HDMI_ACTIVE_ASPECT_16_9:
+ return "16:9";
+ case HDMI_ACTIVE_ASPECT_14_9:
+ return "14:9";
+ case HDMI_ACTIVE_ASPECT_4_3_SP_14_9:
+ return "4:3 SP 14:9";
+ case HDMI_ACTIVE_ASPECT_16_9_SP_14_9:
+ return "16:9 SP 14:9";
+ case HDMI_ACTIVE_ASPECT_16_9_SP_4_3:
+ return "16:9 SP 4:3";
+ }
+ return "Reserved";
+}
+
+static const char *
+hdmi_extended_colorimetry_get_name(enum hdmi_extended_colorimetry ext_col)
+{
+ switch (ext_col) {
+ case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601:
+ return "xvYCC 601";
+ case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709:
+ return "xvYCC 709";
+ case HDMI_EXTENDED_COLORIMETRY_S_YCC_601:
+ return "sYCC 601";
+ case HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601:
+ return "Adobe YCC 601";
+ case HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB:
+ return "Adobe RGB";
+ case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM:
+ return "BT.2020 Constant Luminance";
+ case HDMI_EXTENDED_COLORIMETRY_BT2020:
+ return "BT.2020";
+ case HDMI_EXTENDED_COLORIMETRY_RESERVED:
+ return "Reserved";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_quantization_range_get_name(enum hdmi_quantization_range qrange)
+{
+ switch (qrange) {
+ case HDMI_QUANTIZATION_RANGE_DEFAULT:
+ return "Default";
+ case HDMI_QUANTIZATION_RANGE_LIMITED:
+ return "Limited";
+ case HDMI_QUANTIZATION_RANGE_FULL:
+ return "Full";
+ case HDMI_QUANTIZATION_RANGE_RESERVED:
+ return "Reserved";
+ }
+ return "Invalid";
+}
+
+static const char *hdmi_nups_get_name(enum hdmi_nups nups)
+{
+ switch (nups) {
+ case HDMI_NUPS_UNKNOWN:
+ return "Unknown Non-uniform Scaling";
+ case HDMI_NUPS_HORIZONTAL:
+ return "Horizontally Scaled";
+ case HDMI_NUPS_VERTICAL:
+ return "Vertically Scaled";
+ case HDMI_NUPS_BOTH:
+ return "Horizontally and Vertically Scaled";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_ycc_quantization_range_get_name(enum hdmi_ycc_quantization_range qrange)
+{
+ switch (qrange) {
+ case HDMI_YCC_QUANTIZATION_RANGE_LIMITED:
+ return "Limited";
+ case HDMI_YCC_QUANTIZATION_RANGE_FULL:
+ return "Full";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_content_type_get_name(enum hdmi_content_type content_type)
+{
+ switch (content_type) {
+ case HDMI_CONTENT_TYPE_GRAPHICS:
+ return "Graphics";
+ case HDMI_CONTENT_TYPE_PHOTO:
+ return "Photo";
+ case HDMI_CONTENT_TYPE_CINEMA:
+ return "Cinema";
+ case HDMI_CONTENT_TYPE_GAME:
+ return "Game";
+ }
+ return "Invalid";
+}
+
+/**
+ * hdmi_avi_infoframe_log() - log info of HDMI AVI infoframe
+ * @level: logging level
+ * @dev: device
+ * @frame: HDMI AVI infoframe
+ */
+static void hdmi_avi_infoframe_log(const char *level,
+ struct device *dev,
+ struct hdmi_avi_infoframe *frame)
+{
+ hdmi_infoframe_log_header(level, dev,
+ (struct hdmi_any_infoframe *)frame);
+
+ hdmi_log(" colorspace: %s\n",
+ hdmi_colorspace_get_name(frame->colorspace));
+ hdmi_log(" scan mode: %s\n",
+ hdmi_scan_mode_get_name(frame->scan_mode));
+ hdmi_log(" colorimetry: %s\n",
+ hdmi_colorimetry_get_name(frame->colorimetry));
+ hdmi_log(" picture aspect: %s\n",
+ hdmi_picture_aspect_get_name(frame->picture_aspect));
+ hdmi_log(" active aspect: %s\n",
+ hdmi_active_aspect_get_name(frame->active_aspect));
+ hdmi_log(" itc: %s\n", frame->itc ? "IT Content" : "No Data");
+ hdmi_log(" extended colorimetry: %s\n",
+ hdmi_extended_colorimetry_get_name(frame->extended_colorimetry));
+ hdmi_log(" quantization range: %s\n",
+ hdmi_quantization_range_get_name(frame->quantization_range));
+ hdmi_log(" nups: %s\n", hdmi_nups_get_name(frame->nups));
+ hdmi_log(" video code: %u\n", frame->video_code);
+ hdmi_log(" ycc quantization range: %s\n",
+ hdmi_ycc_quantization_range_get_name(frame->ycc_quantization_range));
+ hdmi_log(" hdmi content type: %s\n",
+ hdmi_content_type_get_name(frame->content_type));
+ hdmi_log(" pixel repeat: %u\n", frame->pixel_repeat);
+ hdmi_log(" bar top %u, bottom %u, left %u, right %u\n",
+ frame->top_bar, frame->bottom_bar,
+ frame->left_bar, frame->right_bar);
+}
+
+static const char *hdmi_spd_sdi_get_name(enum hdmi_spd_sdi sdi)
+{
+ if (sdi < 0 || sdi > 0xff)
+ return "Invalid";
+ switch (sdi) {
+ case HDMI_SPD_SDI_UNKNOWN:
+ return "Unknown";
+ case HDMI_SPD_SDI_DSTB:
+ return "Digital STB";
+ case HDMI_SPD_SDI_DVDP:
+ return "DVD Player";
+ case HDMI_SPD_SDI_DVHS:
+ return "D-VHS";
+ case HDMI_SPD_SDI_HDDVR:
+ return "HDD Videorecorder";
+ case HDMI_SPD_SDI_DVC:
+ return "DVC";
+ case HDMI_SPD_SDI_DSC:
+ return "DSC";
+ case HDMI_SPD_SDI_VCD:
+ return "Video CD";
+ case HDMI_SPD_SDI_GAME:
+ return "Game";
+ case HDMI_SPD_SDI_PC:
+ return "PC General";
+ case HDMI_SPD_SDI_BD:
+ return "Blu-Ray Disc (BD)";
+ case HDMI_SPD_SDI_SACD:
+ return "Super Audio CD";
+ case HDMI_SPD_SDI_HDDVD:
+ return "HD DVD";
+ case HDMI_SPD_SDI_PMP:
+ return "PMP";
+ }
+ return "Reserved";
+}
+
+/**
+ * hdmi_spd_infoframe_log() - log info of HDMI SPD infoframe
+ * @level: logging level
+ * @dev: device
+ * @frame: HDMI SPD infoframe
+ */
+static void hdmi_spd_infoframe_log(const char *level,
+ struct device *dev,
+ struct hdmi_spd_infoframe *frame)
+{
+ u8 buf[17];
+
+ hdmi_infoframe_log_header(level, dev,
+ (struct hdmi_any_infoframe *)frame);
+
+ memset(buf, 0, sizeof(buf));
+
+ strncpy(buf, frame->vendor, 8);
+ hdmi_log(" vendor: %s\n", buf);
+ strncpy(buf, frame->product, 16);
+ hdmi_log(" product: %s\n", buf);
+ hdmi_log(" source device information: %s (0x%x)\n",
+ hdmi_spd_sdi_get_name(frame->sdi), frame->sdi);
+}
+
+static const char *
+hdmi_audio_coding_type_get_name(enum hdmi_audio_coding_type coding_type)
+{
+ switch (coding_type) {
+ case HDMI_AUDIO_CODING_TYPE_STREAM:
+ return "Refer to Stream Header";
+ case HDMI_AUDIO_CODING_TYPE_PCM:
+ return "PCM";
+ case HDMI_AUDIO_CODING_TYPE_AC3:
+ return "AC-3";
+ case HDMI_AUDIO_CODING_TYPE_MPEG1:
+ return "MPEG1";
+ case HDMI_AUDIO_CODING_TYPE_MP3:
+ return "MP3";
+ case HDMI_AUDIO_CODING_TYPE_MPEG2:
+ return "MPEG2";
+ case HDMI_AUDIO_CODING_TYPE_AAC_LC:
+ return "AAC";
+ case HDMI_AUDIO_CODING_TYPE_DTS:
+ return "DTS";
+ case HDMI_AUDIO_CODING_TYPE_ATRAC:
+ return "ATRAC";
+ case HDMI_AUDIO_CODING_TYPE_DSD:
+ return "One Bit Audio";
+ case HDMI_AUDIO_CODING_TYPE_EAC3:
+ return "Dolby Digital +";
+ case HDMI_AUDIO_CODING_TYPE_DTS_HD:
+ return "DTS-HD";
+ case HDMI_AUDIO_CODING_TYPE_MLP:
+ return "MAT (MLP)";
+ case HDMI_AUDIO_CODING_TYPE_DST:
+ return "DST";
+ case HDMI_AUDIO_CODING_TYPE_WMA_PRO:
+ return "WMA PRO";
+ case HDMI_AUDIO_CODING_TYPE_CXT:
+ return "Refer to CXT";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_audio_sample_size_get_name(enum hdmi_audio_sample_size sample_size)
+{
+ switch (sample_size) {
+ case HDMI_AUDIO_SAMPLE_SIZE_STREAM:
+ return "Refer to Stream Header";
+ case HDMI_AUDIO_SAMPLE_SIZE_16:
+ return "16 bit";
+ case HDMI_AUDIO_SAMPLE_SIZE_20:
+ return "20 bit";
+ case HDMI_AUDIO_SAMPLE_SIZE_24:
+ return "24 bit";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_audio_sample_frequency_get_name(enum hdmi_audio_sample_frequency freq)
+{
+ switch (freq) {
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM:
+ return "Refer to Stream Header";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_32000:
+ return "32 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_44100:
+ return "44.1 kHz (CD)";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_48000:
+ return "48 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_88200:
+ return "88.2 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_96000:
+ return "96 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_176400:
+ return "176.4 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_192000:
+ return "192 kHz";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_audio_coding_type_ext_get_name(enum hdmi_audio_coding_type_ext ctx)
+{
+ if (ctx < 0 || ctx > 0x1f)
+ return "Invalid";
+
+ switch (ctx) {
+ case HDMI_AUDIO_CODING_TYPE_EXT_CT:
+ return "Refer to CT";
+ case HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC:
+ return "HE AAC";
+ case HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2:
+ return "HE AAC v2";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND:
+ return "MPEG SURROUND";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC:
+ return "MPEG-4 HE AAC";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2:
+ return "MPEG-4 HE AAC v2";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC:
+ return "MPEG-4 AAC LC";
+ case HDMI_AUDIO_CODING_TYPE_EXT_DRA:
+ return "DRA";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND:
+ return "MPEG-4 HE AAC + MPEG Surround";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND:
+ return "MPEG-4 AAC LC + MPEG Surround";
+ }
+ return "Reserved";
+}
+
+/**
+ * hdmi_audio_infoframe_log() - log info of HDMI AUDIO infoframe
+ * @level: logging level
+ * @dev: device
+ * @frame: HDMI AUDIO infoframe
+ */
+static void hdmi_audio_infoframe_log(const char *level,
+ struct device *dev,
+ struct hdmi_audio_infoframe *frame)
+{
+ hdmi_infoframe_log_header(level, dev,
+ (struct hdmi_any_infoframe *)frame);
+
+ if (frame->channels)
+ hdmi_log(" channels: %u\n", frame->channels - 1);
+ else
+ hdmi_log(" channels: Refer to stream header\n");
+ hdmi_log(" coding type: %s\n",
+ hdmi_audio_coding_type_get_name(frame->coding_type));
+ hdmi_log(" sample size: %s\n",
+ hdmi_audio_sample_size_get_name(frame->sample_size));
+ hdmi_log(" sample frequency: %s\n",
+ hdmi_audio_sample_frequency_get_name(frame->sample_frequency));
+ hdmi_log(" coding type ext: %s\n",
+ hdmi_audio_coding_type_ext_get_name(frame->coding_type_ext));
+ hdmi_log(" channel allocation: 0x%x\n",
+ frame->channel_allocation);
+ hdmi_log(" level shift value: %u dB\n",
+ frame->level_shift_value);
+ hdmi_log(" downmix inhibit: %s\n",
+ frame->downmix_inhibit ? "Yes" : "No");
+}
+
+static const char *
+hdmi_3d_structure_get_name(enum hdmi_3d_structure s3d_struct)
+{
+ if (s3d_struct < 0 || s3d_struct > 0xf)
+ return "Invalid";
+
+ switch (s3d_struct) {
+ case HDMI_3D_STRUCTURE_FRAME_PACKING:
+ return "Frame Packing";
+ case HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE:
+ return "Field Alternative";
+ case HDMI_3D_STRUCTURE_LINE_ALTERNATIVE:
+ return "Line Alternative";
+ case HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL:
+ return "Side-by-side (Full)";
+ case HDMI_3D_STRUCTURE_L_DEPTH:
+ return "L + Depth";
+ case HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH:
+ return "L + Depth + Graphics + Graphics-depth";
+ case HDMI_3D_STRUCTURE_TOP_AND_BOTTOM:
+ return "Top-and-Bottom";
+ case HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF:
+ return "Side-by-side (Half)";
+ default:
+ break;
+ }
+ return "Reserved";
+}
+
+/**
+ * hdmi_vendor_infoframe_log() - log info of HDMI VENDOR infoframe
+ * @level: logging level
+ * @dev: device
+ * @frame: HDMI VENDOR infoframe
+ */
+static void
+hdmi_vendor_any_infoframe_log(const char *level,
+ struct device *dev,
+ union hdmi_vendor_any_infoframe *frame)
+{
+ struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+
+ hdmi_infoframe_log_header(level, dev,
+ (struct hdmi_any_infoframe *)frame);
+
+ if (frame->any.oui != HDMI_IEEE_OUI) {
+ hdmi_log(" not a HDMI vendor infoframe\n");
+ return;
+ }
+ if (hvf->vic == 0 && hvf->s3d_struct == HDMI_3D_STRUCTURE_INVALID) {
+ hdmi_log(" empty frame\n");
+ return;
+ }
+
+ if (hvf->vic)
+ hdmi_log(" HDMI VIC: %u\n", hvf->vic);
+ if (hvf->s3d_struct != HDMI_3D_STRUCTURE_INVALID) {
+ hdmi_log(" 3D structure: %s\n",
+ hdmi_3d_structure_get_name(hvf->s3d_struct));
+ if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ hdmi_log(" 3D extension data: %d\n",
+ hvf->s3d_ext_data);
+ }
+}
+
+/**
+ * hdmi_infoframe_log() - log info of HDMI infoframe
+ * @level: logging level
+ * @dev: device
+ * @frame: HDMI infoframe
+ */
+void hdmi_infoframe_log(const char *level,
+ struct device *dev,
+ union hdmi_infoframe *frame)
+{
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ hdmi_avi_infoframe_log(level, dev, &frame->avi);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ hdmi_spd_infoframe_log(level, dev, &frame->spd);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ hdmi_audio_infoframe_log(level, dev, &frame->audio);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ hdmi_vendor_any_infoframe_log(level, dev, &frame->vendor);
+ break;
+ }
+}
+EXPORT_SYMBOL(hdmi_infoframe_log);
+
+/**
+ * hdmi_avi_infoframe_unpack() - unpack binary buffer to a HDMI AVI infoframe
+ * @buffer: source buffer
+ * @frame: HDMI AVI infoframe
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Auxiliary Video (AVI) information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
+ void *buffer)
+{
+ u8 *ptr = buffer;
+ int ret;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_AVI ||
+ ptr[1] != 2 ||
+ ptr[2] != HDMI_AVI_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AVI)) != 0)
+ return -EINVAL;
+
+ ret = hdmi_avi_infoframe_init(frame);
+ if (ret)
+ return ret;
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ frame->colorspace = (ptr[0] >> 5) & 0x3;
+ if (ptr[0] & 0x10)
+ frame->active_aspect = ptr[1] & 0xf;
+ if (ptr[0] & 0x8) {
+ frame->top_bar = (ptr[5] << 8) + ptr[6];
+ frame->bottom_bar = (ptr[7] << 8) + ptr[8];
+ }
+ if (ptr[0] & 0x4) {
+ frame->left_bar = (ptr[9] << 8) + ptr[10];
+ frame->right_bar = (ptr[11] << 8) + ptr[12];
+ }
+ frame->scan_mode = ptr[0] & 0x3;
+
+ frame->colorimetry = (ptr[1] >> 6) & 0x3;
+ frame->picture_aspect = (ptr[1] >> 4) & 0x3;
+ frame->active_aspect = ptr[1] & 0xf;
+
+ frame->itc = ptr[2] & 0x80 ? true : false;
+ frame->extended_colorimetry = (ptr[2] >> 4) & 0x7;
+ frame->quantization_range = (ptr[2] >> 2) & 0x3;
+ frame->nups = ptr[2] & 0x3;
+
+ frame->video_code = ptr[3] & 0x7f;
+ frame->ycc_quantization_range = (ptr[4] >> 6) & 0x3;
+ frame->content_type = (ptr[4] >> 4) & 0x3;
+
+ frame->pixel_repeat = ptr[4] & 0xf;
+
+ return 0;
+}
+
+/**
+ * hdmi_spd_infoframe_unpack() - unpack binary buffer to a HDMI SPD infoframe
+ * @buffer: source buffer
+ * @frame: HDMI SPD infoframe
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Source Product Description (SPD) information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
+ void *buffer)
+{
+ u8 *ptr = buffer;
+ int ret;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_SPD ||
+ ptr[1] != 1 ||
+ ptr[2] != HDMI_SPD_INFOFRAME_SIZE) {
+ return -EINVAL;
+ }
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(SPD)) != 0)
+ return -EINVAL;
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ret = hdmi_spd_infoframe_init(frame, ptr, ptr + 8);
+ if (ret)
+ return ret;
+
+ frame->sdi = ptr[24];
+
+ return 0;
+}
+
+/**
+ * hdmi_audio_infoframe_unpack() - unpack binary buffer to a HDMI AUDIO infoframe
+ * @buffer: source buffer
+ * @frame: HDMI Audio infoframe
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Audio information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
+ void *buffer)
+{
+ u8 *ptr = buffer;
+ int ret;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_AUDIO ||
+ ptr[1] != 1 ||
+ ptr[2] != HDMI_AUDIO_INFOFRAME_SIZE) {
+ return -EINVAL;
+ }
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AUDIO)) != 0)
+ return -EINVAL;
+
+ ret = hdmi_audio_infoframe_init(frame);
+ if (ret)
+ return ret;
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ frame->channels = ptr[0] & 0x7;
+ frame->coding_type = (ptr[0] >> 4) & 0xf;
+ frame->sample_size = ptr[1] & 0x3;
+ frame->sample_frequency = (ptr[1] >> 2) & 0x7;
+ frame->coding_type_ext = ptr[2] & 0x1f;
+ frame->channel_allocation = ptr[3];
+ frame->level_shift_value = (ptr[4] >> 3) & 0xf;
+ frame->downmix_inhibit = ptr[4] & 0x80 ? true : false;
+
+ return 0;
+}
+
+/**
+ * hdmi_vendor_infoframe_unpack() - unpack binary buffer to a HDMI vendor infoframe
+ * @buffer: source buffer
+ * @frame: HDMI Vendor infoframe
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Vendor information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int
+hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
+ void *buffer)
+{
+ u8 *ptr = buffer;
+ size_t length;
+ int ret;
+ u8 hdmi_video_format;
+ struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
+ ptr[1] != 1 ||
+ (ptr[2] != 5 && ptr[2] != 6))
+ return -EINVAL;
+
+ length = ptr[2];
+
+ if (hdmi_infoframe_checksum(buffer,
+ HDMI_INFOFRAME_HEADER_SIZE + length) != 0)
+ return -EINVAL;
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ /* HDMI OUI */
+ if ((ptr[0] != 0x03) ||
+ (ptr[1] != 0x0c) ||
+ (ptr[2] != 0x00))
+ return -EINVAL;
+
+ hdmi_video_format = ptr[3] >> 5;
+
+ if (hdmi_video_format > 0x2)
+ return -EINVAL;
+
+ ret = hdmi_vendor_infoframe_init(hvf);
+ if (ret)
+ return ret;
+
+ hvf->length = length;
+
+ if (hdmi_video_format == 0x1) {
+ hvf->vic = ptr[4];
+ } else if (hdmi_video_format == 0x2) {
+ hvf->s3d_struct = ptr[4] >> 4;
+ if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) {
+ if (length == 6)
+ hvf->s3d_ext_data = ptr[5] >> 4;
+ else
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe
+ * @buffer: source buffer
+ * @frame: HDMI infoframe
+ *
+ * Unpacks the information contained in binary buffer @buffer into a structured
+ * @frame of a HDMI infoframe.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer)
+{
+ int ret;
+ u8 *ptr = buffer;
+
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(hdmi_infoframe_unpack);
diff --git a/drivers/video/vgastate.c b/drivers/video/vgastate.c
index b91c466225b9..548c751d2415 100644
--- a/drivers/video/vgastate.c
+++ b/drivers/video/vgastate.c
@@ -2,7 +2,7 @@
* linux/drivers/video/vgastate.c -- VGA state save/restore
*
* Copyright 2002 James Simmons
- *
+ *
* Copyright history from vga16fb.c:
* Copyright 1999 Ben Pfaff and Petr Vandrovec
* Based on VGA info at http://www.goodnet.com/~tinara/FreeVGA/home.htm
@@ -10,7 +10,7 @@
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
- * archive for more details.
+ * archive for more details.
*
*/
#include <linux/module.h>
@@ -29,16 +29,16 @@ struct regstate {
__u8 *gfx;
__u8 *seq;
__u8 misc;
-};
+};
-static inline unsigned char vga_rcrtcs(void __iomem *regbase, unsigned short iobase,
+static inline unsigned char vga_rcrtcs(void __iomem *regbase, unsigned short iobase,
unsigned char reg)
{
vga_w(regbase, iobase + 0x4, reg);
return vga_r(regbase, iobase + 0x5);
}
-static inline void vga_wcrtcs(void __iomem *regbase, unsigned short iobase,
+static inline void vga_wcrtcs(void __iomem *regbase, unsigned short iobase,
unsigned char reg, unsigned char val)
{
vga_w(regbase, iobase + 0x4, reg);
@@ -71,7 +71,7 @@ static void save_vga_text(struct vgastate *state, void __iomem *fbbase)
gr6 = vga_rgfx(state->vgabase, VGA_GFX_MISC);
seq2 = vga_rseq(state->vgabase, VGA_SEQ_PLANE_WRITE);
seq4 = vga_rseq(state->vgabase, VGA_SEQ_MEMORY_MODE);
-
+
/* blank screen */
seq1 = vga_rseq(state->vgabase, VGA_SEQ_CLOCK_MODE);
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
@@ -85,7 +85,7 @@ static void save_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x2);
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x5);
- for (i = 0; i < 4 * 8192; i++)
+ for (i = 0; i < 4 * 8192; i++)
saved->vga_font0[i] = vga_r(fbbase, i);
}
@@ -96,10 +96,10 @@ static void save_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x3);
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x5);
- for (i = 0; i < state->memsize; i++)
+ for (i = 0; i < state->memsize; i++)
saved->vga_font1[i] = vga_r(fbbase, i);
}
-
+
/* save font at plane 0/1 */
if (state->flags & VGA_SAVE_TEXT) {
vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x1);
@@ -107,7 +107,7 @@ static void save_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x5);
- for (i = 0; i < 8192; i++)
+ for (i = 0; i < 8192; i++)
saved->vga_text[i] = vga_r(fbbase, i);
vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x2);
@@ -115,8 +115,8 @@ static void save_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x1);
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x5);
- for (i = 0; i < 8192; i++)
- saved->vga_text[8192+i] = vga_r(fbbase + 2 * 8192, i);
+ for (i = 0; i < 8192; i++)
+ saved->vga_text[8192+i] = vga_r(fbbase + 2 * 8192, i);
}
/* restore regs */
@@ -151,7 +151,7 @@ static void restore_vga_text(struct vgastate *state, void __iomem *fbbase)
gr8 = vga_rgfx(state->vgabase, VGA_GFX_BIT_MASK);
seq2 = vga_rseq(state->vgabase, VGA_SEQ_PLANE_WRITE);
seq4 = vga_rseq(state->vgabase, VGA_SEQ_MEMORY_MODE);
-
+
/* blank screen */
seq1 = vga_rseq(state->vgabase, VGA_SEQ_CLOCK_MODE);
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
@@ -163,7 +163,7 @@ static void restore_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wgfx(state->vgabase, VGA_GFX_BIT_MASK, 0xff);
vga_wgfx(state->vgabase, VGA_GFX_SR_ENABLE, 0x00);
}
-
+
/* restore font at plane 2 */
if (state->flags & VGA_SAVE_FONT0) {
vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x4);
@@ -171,7 +171,7 @@ static void restore_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x2);
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x5);
- for (i = 0; i < 4 * 8192; i++)
+ for (i = 0; i < 4 * 8192; i++)
vga_w(fbbase, i, saved->vga_font0[i]);
}
@@ -182,10 +182,10 @@ static void restore_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x3);
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x5);
- for (i = 0; i < state->memsize; i++)
+ for (i = 0; i < state->memsize; i++)
vga_w(fbbase, i, saved->vga_font1[i]);
}
-
+
/* restore font at plane 0/1 */
if (state->flags & VGA_SAVE_TEXT) {
vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x1);
@@ -193,16 +193,16 @@ static void restore_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x5);
- for (i = 0; i < 8192; i++)
+ for (i = 0; i < 8192; i++)
vga_w(fbbase, i, saved->vga_text[i]);
-
+
vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x2);
vga_wseq(state->vgabase, VGA_SEQ_MEMORY_MODE, 0x6);
vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x1);
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x0);
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x5);
- for (i = 0; i < 8192; i++)
- vga_w(fbbase, i, saved->vga_text[8192+i]);
+ for (i = 0; i < 8192; i++)
+ vga_w(fbbase, i, saved->vga_text[8192+i]);
}
/* unblank screen */
@@ -222,7 +222,7 @@ static void restore_vga_text(struct vgastate *state, void __iomem *fbbase)
vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, seq2);
vga_wseq(state->vgabase, VGA_SEQ_MEMORY_MODE, seq4);
}
-
+
static void save_vga_mode(struct vgastate *state)
{
struct regstate *saved = (struct regstate *) state->vidstate;
@@ -235,10 +235,10 @@ static void save_vga_mode(struct vgastate *state)
else
iobase = 0x3b0;
- for (i = 0; i < state->num_crtc; i++)
+ for (i = 0; i < state->num_crtc; i++)
saved->crtc[i] = vga_rcrtcs(state->vgabase, iobase, i);
-
- vga_r(state->vgabase, iobase + 0xa);
+
+ vga_r(state->vgabase, iobase + 0xa);
vga_w(state->vgabase, VGA_ATT_W, 0x00);
for (i = 0; i < state->num_attr; i++) {
vga_r(state->vgabase, iobase + 0xa);
@@ -247,10 +247,10 @@ static void save_vga_mode(struct vgastate *state)
vga_r(state->vgabase, iobase + 0xa);
vga_w(state->vgabase, VGA_ATT_W, 0x20);
- for (i = 0; i < state->num_gfx; i++)
+ for (i = 0; i < state->num_gfx; i++)
saved->gfx[i] = vga_rgfx(state->vgabase, i);
- for (i = 0; i < state->num_seq; i++)
+ for (i = 0; i < state->num_seq; i++)
saved->seq[i] = vga_rseq(state->vgabase, i);
}
@@ -268,26 +268,26 @@ static void restore_vga_mode(struct vgastate *state)
iobase = 0x3b0;
/* turn off display */
- vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE,
+ vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE,
saved->seq[VGA_SEQ_CLOCK_MODE] | 0x20);
/* disable sequencer */
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01);
-
+
/* enable palette addressing */
vga_r(state->vgabase, iobase + 0xa);
vga_w(state->vgabase, VGA_ATT_W, 0x00);
- for (i = 2; i < state->num_seq; i++)
+ for (i = 2; i < state->num_seq; i++)
vga_wseq(state->vgabase, i, saved->seq[i]);
/* unprotect vga regs */
vga_wcrtcs(state->vgabase, iobase, 17, saved->crtc[17] & ~0x80);
- for (i = 0; i < state->num_crtc; i++)
+ for (i = 0; i < state->num_crtc; i++)
vga_wcrtcs(state->vgabase, iobase, i, saved->crtc[i]);
-
- for (i = 0; i < state->num_gfx; i++)
+
+ for (i = 0; i < state->num_gfx; i++)
vga_wgfx(state->vgabase, i, saved->gfx[i]);
for (i = 0; i < state->num_attr; i++) {
@@ -298,7 +298,7 @@ static void restore_vga_mode(struct vgastate *state)
/* reenable sequencer */
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x03);
/* turn display on */
- vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE,
+ vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE,
saved->seq[VGA_SEQ_CLOCK_MODE] & ~(1 << 5));
/* disable video/palette source */
@@ -312,7 +312,7 @@ static void save_vga_cmap(struct vgastate *state)
int i;
vga_w(state->vgabase, VGA_PEL_MSK, 0xff);
-
+
/* assumes DAC is readable and writable */
vga_w(state->vgabase, VGA_PEL_IR, 0x00);
for (i = 0; i < 768; i++)
@@ -346,7 +346,7 @@ static void vga_cleanup(struct vgastate *state)
state->vidstate = NULL;
}
}
-
+
int save_vga(struct vgastate *state)
{
struct regstate *saved;
@@ -357,7 +357,7 @@ int save_vga(struct vgastate *state)
return 1;
state->vidstate = (void *)saved;
-
+
if (state->flags & VGA_SAVE_CMAP) {
saved->vga_cmap = vmalloc(768);
if (!saved->vga_cmap) {
@@ -403,7 +403,7 @@ int save_vga(struct vgastate *state)
}
if (!state->memsize)
state->memsize = 8 * 8192;
-
+
if (!state->membase)
state->membase = 0xA0000;
@@ -414,7 +414,7 @@ int save_vga(struct vgastate *state)
return 1;
}
- /*
+ /*
* save only first 32K used by vgacon
*/
if (state->flags & VGA_SAVE_FONT0) {
@@ -425,7 +425,7 @@ int save_vga(struct vgastate *state)
return 1;
}
}
- /*
+ /*
* largely unused, but if required by the caller
* we'll just save everything.
*/
@@ -448,7 +448,7 @@ int save_vga(struct vgastate *state)
return 1;
}
}
-
+
save_vga_text(state, fbbase);
iounmap(fbbase);
}
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 7516030037a1..d95fb848dd03 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -502,7 +502,7 @@ int vme_master_get(struct vme_resource *resource, int *enabled,
image = list_entry(resource->entry, struct vme_master_resource, list);
if (bridge->master_get == NULL) {
- printk(KERN_WARNING "vme_master_set not supported\n");
+ printk(KERN_WARNING "%s not supported\n", __func__);
return -EINVAL;
}
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 5927c0a98a74..bcfd2a22208f 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = {
.shutdown = cdns_wdt_shutdown,
.driver = {
.name = "cdns-wdt",
- .owner = THIS_MODULE,
.of_match_table = cdns_wdt_of_match,
.pm = &cdns_wdt_pm_ops,
},
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index d6add516a7a7..5142bbabe027 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -52,6 +52,8 @@
#define IMX2_WDT_WRSR 0x04 /* Reset Status Register */
#define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */
+#define IMX2_WDT_WMCR 0x08 /* Misc Register */
+
#define IMX2_WDT_MAX_TIME 128
#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
@@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
imx2_wdt_ping_if_active(wdog);
+ /*
+ * Disable the watchdog power down counter at boot. Otherwise the power
+ * down counter will pull down the #WDOG interrupt line for one clock
+ * cycle.
+ */
+ regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
+
ret = watchdog_register_device(wdog);
if (ret) {
dev_err(&pdev->dev, "cannot register watchdog device\n");
@@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-/* Disable watchdog if it is active during suspend */
+/* Disable watchdog if it is active or non-active but still running */
static int imx2_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
- imx2_wdt_ping(wdog);
+ /* The watchdog IP block is running */
+ if (imx2_wdt_is_running(wdev)) {
+ imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+ imx2_wdt_ping(wdog);
- /* Watchdog has been stopped but IP block is still running */
- if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev))
- del_timer_sync(&wdev->timer);
+ /* The watchdog is not active */
+ if (!watchdog_active(wdog))
+ del_timer_sync(&wdev->timer);
+ }
clk_disable_unprepare(wdev->clk);
@@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev)
clk_prepare_enable(wdev->clk);
if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
- /* Resumes from deep sleep we need restart
- * the watchdog again.
+ /*
+ * If the watchdog is still active and resumes
+ * from deep sleep state, need to restart the
+ * watchdog again.
*/
imx2_wdt_setup(wdog);
imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
} else if (imx2_wdt_is_running(wdev)) {
+ /* Resuming from non-deep sleep state. */
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
- mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
+ /*
+ * But the watchdog is not active, then start
+ * the timer again.
+ */
+ if (!watchdog_active(wdog))
+ mod_timer(&wdev->timer,
+ jiffies + wdog->timeout * HZ / 2);
}
return 0;
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index ef6a298e8c45..1f4155ee3404 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = {
.remove = meson_wdt_remove,
.shutdown = meson_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = meson_wdt_dt_ids,
},
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 3860d02729dc..0b52d92cb2e5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -92,7 +92,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
-static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
/* List of ballooned pages, threaded through the mem_map array. */
@@ -423,22 +422,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
page = pfn_to_page(pfn);
#ifdef CONFIG_XEN_HAVE_PVMMU
- /*
- * Ballooned out frames are effectively replaced with
- * a scratch frame. Ensure direct mappings and the
- * p2m are consistent.
- */
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
if (!PageHighMem(page)) {
- struct page *scratch_page = get_balloon_scratch_page();
-
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte(page_to_pfn(scratch_page),
- PAGE_KERNEL_RO), 0);
+ __pte_ma(0), 0);
BUG_ON(ret);
-
- put_balloon_scratch_page();
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
@@ -500,18 +489,6 @@ static void balloon_process(struct work_struct *work)
mutex_unlock(&balloon_mutex);
}
-struct page *get_balloon_scratch_page(void)
-{
- struct page *ret = get_cpu_var(balloon_scratch_page);
- BUG_ON(ret == NULL);
- return ret;
-}
-
-void put_balloon_scratch_page(void)
-{
- put_cpu_var(balloon_scratch_page);
-}
-
/* Resets the Xen limit, sets new target, and kicks off processing. */
void balloon_set_new_target(unsigned long target)
{
@@ -605,61 +582,13 @@ static void __init balloon_add_region(unsigned long start_pfn,
}
}
-static int alloc_balloon_scratch_page(int cpu)
-{
- if (per_cpu(balloon_scratch_page, cpu) != NULL)
- return 0;
-
- per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
- if (per_cpu(balloon_scratch_page, cpu) == NULL) {
- pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-
-static int balloon_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
- switch (action) {
- case CPU_UP_PREPARE:
- if (alloc_balloon_scratch_page(cpu))
- return NOTIFY_BAD;
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block balloon_cpu_notifier = {
- .notifier_call = balloon_cpu_notify,
-};
-
static int __init balloon_init(void)
{
- int i, cpu;
+ int i;
if (!xen_domain())
return -ENODEV;
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- register_cpu_notifier(&balloon_cpu_notifier);
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (alloc_balloon_scratch_page(cpu)) {
- put_online_cpus();
- unregister_cpu_notifier(&balloon_cpu_notifier);
- return -ENOMEM;
- }
- }
- put_online_cpus();
- }
-
pr_info("Initialising balloon driver\n");
balloon_stats.current_pages = xen_pv_domain()
@@ -696,15 +625,4 @@ static int __init balloon_init(void)
subsys_initcall(balloon_init);
-static int __init balloon_clear(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(balloon_scratch_page, cpu) = NULL;
-
- return 0;
-}
-early_initcall(balloon_clear);
-
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 073b4a19a8b0..d5bb1a33d0a3 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -67,7 +67,7 @@ struct gntdev_priv {
* Only populated if populate_freeable_maps == 1 */
struct list_head freeable_maps;
/* lock protects maps and freeable_maps */
- spinlock_t lock;
+ struct mutex lock;
struct mm_struct *mm;
struct mmu_notifier mn;
};
@@ -91,7 +91,9 @@ struct grant_map {
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
+ struct gnttab_unmap_grant_ref *kunmap_ops;
struct page **pages;
+ unsigned long pages_vm_start;
};
static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
@@ -118,12 +120,13 @@ static void gntdev_free_map(struct grant_map *map)
return;
if (map->pages)
- free_xenballooned_pages(map->count, map->pages);
+ gnttab_free_pages(map->count, map->pages);
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
kfree(map->unmap_ops);
kfree(map->kmap_ops);
+ kfree(map->kunmap_ops);
kfree(map);
}
@@ -140,21 +143,24 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
+ add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
NULL == add->kmap_ops ||
+ NULL == add->kunmap_ops ||
NULL == add->pages)
goto err;
- if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
+ if (gnttab_alloc_pages(count, add->pages))
goto err;
for (i = 0; i < count; i++) {
add->map_ops[i].handle = -1;
add->unmap_ops[i].handle = -1;
add->kmap_ops[i].handle = -1;
+ add->kunmap_ops[i].handle = -1;
}
add->index = 0;
@@ -216,9 +222,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
}
if (populate_freeable_maps && priv) {
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
list_del(&map->next);
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
}
if (map->pages && !use_ptemod)
@@ -239,6 +245,14 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
+ /*
+ * Set the PTE as special to force get_user_pages_fast() fall
+ * back to the slow path. If this is not supported as part of
+ * the grant map, it will be done afterwards.
+ */
+ if (xen_feature(XENFEAT_gnttab_map_avail_bits))
+ flags |= (1 << _GNTMAP_guest_avail0);
+
gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
@@ -247,6 +261,15 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
return 0;
}
+#ifdef CONFIG_X86
+static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
+ return 0;
+}
+#endif
+
static int map_grant_pages(struct grant_map *map)
{
int i, err = 0;
@@ -280,6 +303,8 @@ static int map_grant_pages(struct grant_map *map)
map->flags | GNTMAP_host_map,
map->grants[i].ref,
map->grants[i].domid);
+ gnttab_set_unmap_op(&map->kunmap_ops[i], address,
+ map->flags | GNTMAP_host_map, -1);
}
}
@@ -290,20 +315,42 @@ static int map_grant_pages(struct grant_map *map)
return err;
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status)
+ if (map->map_ops[i].status) {
err = -EINVAL;
- else {
- BUG_ON(map->map_ops[i].handle == -1);
- map->unmap_ops[i].handle = map->map_ops[i].handle;
- pr_debug("map handle=%d\n", map->map_ops[i].handle);
+ continue;
}
+
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ if (use_ptemod)
+ map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
}
return err;
}
+struct unmap_grant_pages_callback_data
+{
+ struct completion completion;
+ int result;
+};
+
+static void unmap_grant_callback(int result,
+ struct gntab_unmap_queue_data *data)
+{
+ struct unmap_grant_pages_callback_data* d = data->data;
+
+ d->result = result;
+ complete(&d->completion);
+}
+
static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
int i, err = 0;
+ struct gntab_unmap_queue_data unmap_data;
+ struct unmap_grant_pages_callback_data data;
+
+ init_completion(&data.completion);
+ unmap_data.data = &data;
+ unmap_data.done= &unmap_grant_callback;
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
@@ -315,11 +362,16 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
}
- err = gnttab_unmap_refs(map->unmap_ops + offset,
- use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
- pages);
- if (err)
- return err;
+ unmap_data.unmap_ops = map->unmap_ops + offset;
+ unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+ unmap_data.pages = map->pages + offset;
+ unmap_data.count = pages;
+
+ gnttab_unmap_refs_async(&unmap_data);
+
+ wait_for_completion(&data.completion);
+ if (data.result)
+ return data.result;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
@@ -387,17 +439,26 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
* not do any unmapping, since that has been done prior to
* closing the vma, but it may still iterate the unmap_ops list.
*/
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
map->vma = NULL;
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
}
vma->vm_private_data = NULL;
gntdev_put_map(priv, map);
}
+static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct grant_map *map = vma->vm_private_data;
+
+ return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
+}
+
static struct vm_operations_struct gntdev_vmops = {
.open = gntdev_vma_open,
.close = gntdev_vma_close,
+ .find_special_page = gntdev_vma_find_special_page,
};
/* ------------------------------------------------------------------ */
@@ -433,14 +494,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
struct grant_map *map;
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
unmap_if_in_range(map, start, end);
}
list_for_each_entry(map, &priv->freeable_maps, next) {
unmap_if_in_range(map, start, end);
}
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
}
static void mn_invl_page(struct mmu_notifier *mn,
@@ -457,7 +518,7 @@ static void mn_release(struct mmu_notifier *mn,
struct grant_map *map;
int err;
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
if (!map->vma)
continue;
@@ -476,7 +537,7 @@ static void mn_release(struct mmu_notifier *mn,
err = unmap_grant_pages(map, /* offset */ 0, map->count);
WARN_ON(err);
}
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
}
static struct mmu_notifier_ops gntdev_mmu_ops = {
@@ -498,7 +559,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->maps);
INIT_LIST_HEAD(&priv->freeable_maps);
- spin_lock_init(&priv->lock);
+ mutex_init(&priv->lock);
if (use_ptemod) {
priv->mm = get_task_mm(current);
@@ -572,10 +633,10 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
return -EFAULT;
}
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
gntdev_add_map(priv, map);
op.index = map->index << PAGE_SHIFT;
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
if (copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
@@ -594,7 +655,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
return -EFAULT;
pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
if (map) {
list_del(&map->next);
@@ -602,7 +663,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
list_add_tail(&map->next, &priv->freeable_maps);
err = 0;
}
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
if (map)
gntdev_put_map(priv, map);
return err;
@@ -670,7 +731,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
out_flags = op.action;
out_event = op.event_channel_port;
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
uint64_t begin = map->index << PAGE_SHIFT;
@@ -698,7 +759,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
rc = 0;
unlock_out:
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
/* Drop the reference to the event channel we did not save in the map */
if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
@@ -748,7 +809,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
pr_debug("map %d+%d at %lx (pgoff %lx)\n",
index, count, vma->vm_start, vma->vm_pgoff);
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
@@ -783,7 +844,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map->flags |= GNTMAP_readonly;
}
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
if (use_ptemod) {
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
@@ -806,16 +867,34 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
if (err)
goto out_put_map;
}
+ } else {
+#ifdef CONFIG_X86
+ /*
+ * If the PTEs were not made special by the grant map
+ * hypercall, do so here.
+ *
+ * This is racy since the mapping is already visible
+ * to userspace but userspace should be well-behaved
+ * enough to not touch it until the mmap() call
+ * returns.
+ */
+ if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
+ apply_to_page_range(vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ set_grant_ptes_as_special, NULL);
+ }
+#endif
+ map->pages_vm_start = vma->vm_start;
}
return 0;
unlock_out:
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
return err;
out_unlock_put:
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
out_put_map:
if (use_ptemod)
map->vma = NULL;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7786291ba229..17972fbacddc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -42,6 +42,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
+#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -50,6 +51,7 @@
#include <xen/interface/memory.h>
#include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h>
+#include <xen/balloon.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
@@ -671,6 +673,59 @@ void gnttab_free_auto_xlat_frames(void)
}
EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
+/**
+ * gnttab_alloc_pages - alloc pages suitable for grant mapping into
+ * @nr_pages: number of pages to alloc
+ * @pages: returns the pages
+ */
+int gnttab_alloc_pages(int nr_pages, struct page **pages)
+{
+ int i;
+ int ret;
+
+ ret = alloc_xenballooned_pages(nr_pages, pages, false);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < nr_pages; i++) {
+#if BITS_PER_LONG < 64
+ struct xen_page_foreign *foreign;
+
+ foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
+ if (!foreign) {
+ gnttab_free_pages(nr_pages, pages);
+ return -ENOMEM;
+ }
+ set_page_private(pages[i], (unsigned long)foreign);
+#endif
+ SetPagePrivate(pages[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(gnttab_alloc_pages);
+
+/**
+ * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
+ * @nr_pages; number of pages to free
+ * @pages: the pages
+ */
+void gnttab_free_pages(int nr_pages, struct page **pages)
+{
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (PagePrivate(pages[i])) {
+#if BITS_PER_LONG < 64
+ kfree((void *)page_private(pages[i]));
+#endif
+ ClearPagePrivate(pages[i]);
+ }
+ }
+ free_xenballooned_pages(nr_pages, pages);
+}
+EXPORT_SYMBOL(gnttab_free_pages);
+
/* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256
static inline void
@@ -727,30 +782,87 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (ret)
return ret;
- /* Retry eagain maps */
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
+ /* Retry eagain maps */
if (map_ops[i].status == GNTST_eagain)
gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
&map_ops[i].status, __func__);
+ if (map_ops[i].status == GNTST_okay) {
+ struct xen_page_foreign *foreign;
+
+ SetPageForeign(pages[i]);
+ foreign = xen_page_foreign(pages[i]);
+ foreign->domid = map_ops[i].dom;
+ foreign->gref = map_ops[i].ref;
+ }
+ }
+
return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
+ unsigned int i;
int ret;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
return ret;
- return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count);
+ for (i = 0; i < count; i++)
+ ClearPageForeign(pages[i]);
+
+ return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+#define GNTTAB_UNMAP_REFS_DELAY 5
+
+static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+
+static void gnttab_unmap_work(struct work_struct *work)
+{
+ struct gntab_unmap_queue_data
+ *unmap_data = container_of(work,
+ struct gntab_unmap_queue_data,
+ gnttab_work.work);
+ if (unmap_data->age != UINT_MAX)
+ unmap_data->age++;
+ __gnttab_unmap_refs_async(unmap_data);
+}
+
+static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
+{
+ int ret;
+ int pc;
+
+ for (pc = 0; pc < item->count; pc++) {
+ if (page_count(item->pages[pc]) > 1) {
+ unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
+ schedule_delayed_work(&item->gnttab_work,
+ msecs_to_jiffies(delay));
+ return;
+ }
+ }
+
+ ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
+ item->pages, item->count);
+ item->done(ret, item);
+}
+
+void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
+{
+ INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
+ item->age = 0;
+
+ __gnttab_unmap_refs_async(item);
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
+
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{
int rc;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index f8bb36f9d9ce..bf1940706422 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -105,10 +105,16 @@ static void do_suspend(void)
err = freeze_processes();
if (err) {
- pr_err("%s: freeze failed %d\n", __func__, err);
+ pr_err("%s: freeze processes failed %d\n", __func__, err);
goto out;
}
+ err = freeze_kernel_threads();
+ if (err) {
+ pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
+ goto out_thaw;
+ }
+
err = dpm_suspend_start(PMSG_FREEZE);
if (err) {
pr_err("%s: dpm_suspend_start %d\n", __func__, err);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 83b5c53bec6b..8a65423bc696 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -374,7 +374,7 @@ static struct frontswap_ops tmem_frontswap_ops = {
};
#endif
-static int xen_tmem_init(void)
+static int __init xen_tmem_init(void)
{
if (!xen_domain())
return 0;
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
index 34e40b733f9a..4fc886cd5586 100644
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -117,8 +117,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
list_for_each_entry(info, &mem_device->res_list, list) {
if ((info->caching == address64.info.mem.caching) &&
(info->write_protect == address64.info.mem.write_protect) &&
- (info->start_addr + info->length == address64.minimum)) {
- info->length += address64.address_length;
+ (info->start_addr + info->length == address64.address.minimum)) {
+ info->length += address64.address.address_length;
return AE_OK;
}
}
@@ -130,8 +130,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
INIT_LIST_HEAD(&new->list);
new->caching = address64.info.mem.caching;
new->write_protect = address64.info.mem.write_protect;
- new->start_addr = address64.minimum;
- new->length = address64.address_length;
+ new->start_addr = address64.address.minimum;
+ new->length = address64.address.address_length;
list_add_tail(&new->list, &mem_device->res_list);
return AE_OK;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index e999496eda3e..61653a03a8f5 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -47,6 +47,7 @@
#include <generated/utsrelease.h>
+#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_tcq.h>
@@ -227,7 +228,7 @@ static void put_free_pages(struct page **page, int num)
return;
if (i > scsiback_max_buffer_pages) {
n = min(num, i - scsiback_max_buffer_pages);
- free_xenballooned_pages(n, page + num - n);
+ gnttab_free_pages(n, page + num - n);
n = num - n;
}
spin_lock_irqsave(&free_pages_lock, flags);
@@ -244,7 +245,7 @@ static int get_free_page(struct page **page)
spin_lock_irqsave(&free_pages_lock, flags);
if (list_empty(&scsiback_free_pages)) {
spin_unlock_irqrestore(&free_pages_lock, flags);
- return alloc_xenballooned_pages(1, page, false);
+ return gnttab_alloc_pages(1, page);
}
page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
list_del(&page[0]->lru);
@@ -2106,7 +2107,7 @@ static void __exit scsiback_exit(void)
while (free_pages_num) {
if (get_free_page(&page))
BUG();
- free_xenballooned_pages(1, &page);
+ gnttab_free_pages(1, &page);
}
scsiback_deregister_configfs();
xenbus_unregister_driver(&scsiback_driver);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 85534ea63555..9433e46518c8 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -326,10 +326,13 @@ static int xenbus_write_transaction(unsigned msg_type,
}
if (msg_type == XS_TRANSACTION_START) {
- trans->handle.id = simple_strtoul(reply, NULL, 0);
-
- list_add(&trans->list, &u->transactions);
- } else if (msg_type == XS_TRANSACTION_END) {
+ if (u->u.msg.type == XS_ERROR)
+ kfree(trans);
+ else {
+ trans->handle.id = simple_strtoul(reply, NULL, 0);
+ list_add(&trans->list, &u->transactions);
+ }
+ } else if (u->u.msg.type == XS_TRANSACTION_END) {
list_for_each_entry(trans, &u->transactions, list)
if (trans->handle.id == u->u.msg.tx_id)
break;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 6894b085f0ee..620d93489539 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -335,7 +335,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
}
init_rwsem(&v9ses->rename_sem);
- rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY);
+ rc = bdi_setup_and_register(&v9ses->bdi, "9p");
if (rc) {
kfree(v9ses->aname);
kfree(v9ses->uname);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 5594505e6e73..b40133796b87 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -831,7 +831,6 @@ static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = v9fs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
@@ -839,7 +838,6 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = v9fs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
diff --git a/fs/Kconfig b/fs/Kconfig
index 664991afe0c0..ec35851e5b71 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -13,13 +13,6 @@ if BLOCK
source "fs/ext2/Kconfig"
source "fs/ext3/Kconfig"
source "fs/ext4/Kconfig"
-
-config FS_XIP
-# execute in place
- bool
- depends on EXT2_FS_XIP
- default y
-
source "fs/jbd/Kconfig"
source "fs/jbd2/Kconfig"
@@ -40,6 +33,21 @@ source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
+config FS_DAX
+ bool "Direct Access (DAX) support"
+ depends on MMU
+ depends on !(ARM || MIPS || SPARC)
+ help
+ Direct Access (DAX) can be used on memory-backed block devices.
+ If the block device supports DAX and the filesystem supports DAX,
+ then you can avoid using the pagecache to buffer I/Os. Turning
+ on this option will compile in support for DAX; you will need to
+ mount the filesystem using the -o dax option.
+
+ If you do not have a block device that is capable of using this,
+ or if unsure, say N. Saying Y will increase the size of the kernel
+ by about 5kB.
+
endif # BLOCK
# Posix ACL utility routines
@@ -165,6 +173,7 @@ config HUGETLB_PAGE
def_bool HUGETLBFS
source "fs/configfs/Kconfig"
+source "fs/efivarfs/Kconfig"
endmenu
@@ -209,7 +218,6 @@ source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
source "fs/f2fs/Kconfig"
-source "fs/efivarfs/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index bedff48e8fdc..0f4635f7c49c 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SIGNALFD) += signalfd.o
obj-$(CONFIG_TIMERFD) += timerfd.o
obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_AIO) += aio.o
+obj-$(CONFIG_FS_DAX) += dax.o
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 06e14bfb3496..dbc732e9a5c0 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -306,8 +306,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
_debug("- range %u-%u%s",
offset, to, msg->msg_flags ? " [more]" : "");
- iov_iter_init(&msg->msg_iter, WRITE,
- (struct iovec *) iov, 1, to - offset);
+ iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC,
+ iov, 1, to - offset);
/* have to change the state *before* sending the last
* packet as RxRPC might give us the reply before it
@@ -384,7 +384,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)iov, 1,
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
call->request_size);
msg.msg_control = NULL;
msg.msg_controllen = 0;
@@ -770,7 +770,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
void afs_send_empty_reply(struct afs_call *call)
{
struct msghdr msg;
- struct iovec iov[1];
+ struct kvec iov[1];
_enter("");
@@ -778,7 +778,7 @@ void afs_send_empty_reply(struct afs_call *call)
iov[0].iov_len = 0;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_init(&msg.msg_iter, WRITE, iov, 0, 0); /* WTF? */
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0); /* WTF? */
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -805,7 +805,7 @@ void afs_send_empty_reply(struct afs_call *call)
void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
{
struct msghdr msg;
- struct iovec iov[1];
+ struct kvec iov[1];
int n;
_enter("");
@@ -814,7 +814,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
iov[0].iov_len = len;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_init(&msg.msg_iter, WRITE, iov, 1, len);
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 2b607257820c..d142a2449e65 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -106,7 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
volume->cell = params->cell;
volume->vid = vlocation->vldb.vid[params->type];
- ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY);
+ ret = bdi_setup_and_register(&volume->bdi, "afs");
if (ret)
goto error_bdi;
diff --git a/fs/aio.c b/fs/aio.c
index 1b7893ecc296..118a2e0088d8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -165,15 +165,6 @@ static struct vfsmount *aio_mnt;
static const struct file_operations aio_ring_fops;
static const struct address_space_operations aio_ctx_aops;
-/* Backing dev info for aio fs.
- * -no dirty page accounting or writeback happens
- */
-static struct backing_dev_info aio_fs_backing_dev_info = {
- .name = "aiofs",
- .state = 0,
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
-};
-
static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
{
struct qstr this = QSTR_INIT("[aio]", 5);
@@ -185,7 +176,6 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
inode->i_mapping->a_ops = &aio_ctx_aops;
inode->i_mapping->private_data = ctx;
- inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
inode->i_size = PAGE_SIZE * nr_pages;
path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
@@ -230,9 +220,6 @@ static int __init aio_setup(void)
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
- if (bdi_init(&aio_fs_backing_dev_info))
- panic("Failed to init aio fs backing dev info.");
-
kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
@@ -1140,6 +1127,13 @@ static long aio_read_events_ring(struct kioctx *ctx,
long ret = 0;
int copy_ret;
+ /*
+ * The mutex can block and wake us up and that will cause
+ * wait_event_interruptible_hrtimeout() to schedule without sleeping
+ * and repeat. This should be rare enough that it doesn't cause
+ * peformance issues. See the comment in read_events() for more detail.
+ */
+ sched_annotate_sleep();
mutex_lock(&ctx->ring_lock);
/* Access to ->ring_pages here is protected by ctx->ring_lock. */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b48c41bf0f86..975266be67d3 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -49,23 +49,15 @@ inline struct block_device *I_BDEV(struct inode *inode)
}
EXPORT_SYMBOL(I_BDEV);
-/*
- * Move the inode from its current bdi to a new bdi. Make sure the inode
- * is clean before moving so that it doesn't linger on the old bdi.
- */
-static void bdev_inode_switch_bdi(struct inode *inode,
- struct backing_dev_info *dst)
+static void bdev_write_inode(struct inode *inode)
{
- while (true) {
- spin_lock(&inode->i_lock);
- if (!(inode->i_state & I_DIRTY)) {
- inode->i_data.backing_dev_info = dst;
- spin_unlock(&inode->i_lock);
- return;
- }
+ spin_lock(&inode->i_lock);
+ while (inode->i_state & I_DIRTY) {
spin_unlock(&inode->i_lock);
WARN_ON_ONCE(write_inode_now(inode, true));
+ spin_lock(&inode->i_lock);
}
+ spin_unlock(&inode->i_lock);
}
/* Kill _all_ buffers and pagecache , dirty or not.. */
@@ -429,6 +421,46 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL_GPL(bdev_write_page);
+/**
+ * bdev_direct_access() - Get the address for directly-accessibly memory
+ * @bdev: The device containing the memory
+ * @sector: The offset within the device
+ * @addr: Where to put the address of the memory
+ * @pfn: The Page Frame Number for the memory
+ * @size: The number of bytes requested
+ *
+ * If a block device is made up of directly addressable memory, this function
+ * will tell the caller the PFN and the address of the memory. The address
+ * may be directly dereferenced within the kernel without the need to call
+ * ioremap(), kmap() or similar. The PFN is suitable for inserting into
+ * page tables.
+ *
+ * Return: negative errno if an error occurs, otherwise the number of bytes
+ * accessible at this address.
+ */
+long bdev_direct_access(struct block_device *bdev, sector_t sector,
+ void **addr, unsigned long *pfn, long size)
+{
+ long avail;
+ const struct block_device_operations *ops = bdev->bd_disk->fops;
+
+ if (size < 0)
+ return size;
+ if (!ops->direct_access)
+ return -EOPNOTSUPP;
+ if ((sector + DIV_ROUND_UP(size, 512)) >
+ part_nr_sects_read(bdev->bd_part))
+ return -ERANGE;
+ sector += get_start_sect(bdev);
+ if (sector % (PAGE_SIZE / 512))
+ return -EINVAL;
+ avail = ops->direct_access(bdev, sector, addr, pfn, size);
+ if (!avail)
+ return -ERANGE;
+ return min(avail, size);
+}
+EXPORT_SYMBOL_GPL(bdev_direct_access);
+
/*
* pseudo-fs
*/
@@ -584,7 +616,6 @@ struct block_device *bdget(dev_t dev)
inode->i_bdev = bdev;
inode->i_data.a_ops = &def_blk_aops;
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
- inode->i_data.backing_dev_info = &default_backing_dev_info;
spin_lock(&bdev_lock);
list_add(&bdev->bd_list, &all_bdevs);
spin_unlock(&bdev_lock);
@@ -1145,8 +1176,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
if (!partno) {
- struct backing_dev_info *bdi;
-
ret = -ENXIO;
bdev->bd_part = disk_get_part(disk, partno);
if (!bdev->bd_part)
@@ -1172,11 +1201,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
}
}
- if (!ret) {
+ if (!ret)
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
- bdi = blk_get_backing_dev_info(bdev);
- bdev_inode_switch_bdi(bdev->bd_inode, bdi);
- }
/*
* If the device is invalidated, rescan partition
@@ -1203,8 +1229,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (ret)
goto out_clear;
bdev->bd_contains = whole;
- bdev_inode_switch_bdi(bdev->bd_inode,
- whole->bd_inode->i_data.backing_dev_info);
bdev->bd_part = disk_get_part(disk, partno);
if (!(disk->flags & GENHD_FL_UP) ||
!bdev->bd_part || !bdev->bd_part->nr_sects) {
@@ -1244,7 +1268,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = NULL;
bdev->bd_part = NULL;
bdev->bd_queue = NULL;
- bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info);
if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL;
@@ -1464,11 +1487,11 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
WARN_ON_ONCE(bdev->bd_holders);
sync_blockdev(bdev);
kill_bdev(bdev);
- /* ->release can cause the old bdi to disappear,
- * so must switch it out first
+ /*
+ * ->release can cause the queue to disappear, so flush all
+ * dirty data before.
*/
- bdev_inode_switch_bdi(bdev->bd_inode,
- &default_backing_dev_info);
+ bdev_write_inode(bdev->bd_inode);
}
if (bdev->bd_contains == bdev) {
if (disk->fops->release)
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index a66768ebc8d1..80e9c18ea64f 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -8,6 +8,7 @@ config BTRFS_FS
select LZO_DECOMPRESS
select RAID6_PQ
select XOR_BLOCKS
+ select SRCU
help
Btrfs is a general purpose copy-on-write filesystem with extents,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7e607416755a..0b180708bf79 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1171,6 +1171,7 @@ struct btrfs_space_info {
struct percpu_counter total_bytes_pinned;
struct list_head list;
+ /* Protected by the spinlock 'lock'. */
struct list_head ro_bgs;
struct rw_semaphore groups_sem;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8c63419a7f70..1afb18226da8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1715,12 +1715,11 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
int err;
- bdi->capabilities = BDI_CAP_MAP_COPY;
- err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
+ err = bdi_setup_and_register(bdi, "btrfs");
if (err)
return err;
- bdi->ra_pages = default_backing_dev_info.ra_pages;
+ bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
bdi->congested_fn = btrfs_congested_fn;
bdi->congested_data = info;
return 0;
@@ -2319,7 +2318,6 @@ int open_ctree(struct super_block *sb,
*/
fs_info->btree_inode->i_size = OFFSET_MAX;
fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
- fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 15116585e714..a684086c3c81 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -9422,7 +9422,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* are still on the list after taking the semaphore
*/
list_del_init(&block_group->list);
- list_del_init(&block_group->ro_list);
if (list_empty(&block_group->space_info->block_groups[index])) {
kobj = block_group->space_info->block_group_kobjs[index];
block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9464,6 +9463,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
btrfs_remove_free_space_cache(block_group);
spin_lock(&block_group->space_info->lock);
+ list_del_init(&block_group->ro_list);
block_group->space_info->total_bytes -= block_group->key.offset;
block_group->space_info->bytes_readonly -= block_group->key.offset;
block_group->space_info->disk_total -= block_group->key.offset * factor;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4ebabd237153..c73df6a7c9b6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1407,8 +1407,8 @@ int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
while (index <= end_index) {
page = find_get_page(inode->i_mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
- account_page_redirty(page);
__set_page_dirty_nobuffers(page);
+ account_page_redirty(page);
page_cache_release(page);
index++;
}
@@ -2190,7 +2190,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
next = next_state(state);
- failrec = (struct io_failure_record *)state->private;
+ failrec = (struct io_failure_record *)(unsigned long)state->private;
free_extent_state(state);
kfree(failrec);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e4090259569b..b78bbbac900d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1746,7 +1746,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
mutex_lock(&inode->i_mutex);
- current->backing_dev_info = inode->i_mapping->backing_dev_info;
+ current->backing_dev_info = inode_to_bdi(inode);
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err) {
mutex_unlock(&inode->i_mutex);
@@ -2081,7 +2081,6 @@ static const struct vm_operations_struct btrfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = btrfs_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8bf326affb94..54bcf639d1cf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3608,7 +3608,6 @@ cache_acl:
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
@@ -3623,7 +3622,6 @@ cache_acl:
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
break;
default:
inode->i_op = &btrfs_special_inode_operations;
@@ -6088,7 +6086,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
@@ -9203,7 +9200,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
@@ -9247,7 +9243,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
inode_set_bytes(inode, name_len);
btrfs_i_size_write(inode, name_len);
err = btrfs_update_inode(trans, root, inode);
@@ -9459,7 +9454,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
ret = btrfs_init_inode_security(trans, inode, dir, NULL);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9e1569ffbf6e..e427cb7ee12c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3053,7 +3053,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
ppath = btrfs_alloc_path();
if (!ppath) {
- btrfs_free_path(ppath);
+ btrfs_free_path(path);
return -ENOMEM;
}
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
path->search_commit_root = 1;
path->skip_locking = 1;
+ ppath->search_commit_root = 1;
+ ppath->skip_locking = 1;
/*
* trigger the readahead for extent tree csum tree and wait for
* completion. During readahead, the scrub is officially paused
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 60f7cbe815e9..6f49b2872a64 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1000,10 +1000,20 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
*/
if (fs_info->pending_changes == 0)
return 0;
+ /*
+ * A non-blocking test if the fs is frozen. We must not
+ * start a new transaction here otherwise a deadlock
+ * happens. The pending operations are delayed to the
+ * next commit after thawing.
+ */
+ if (__sb_start_write(sb, SB_FREEZE_WRITE, false))
+ __sb_end_write(sb, SB_FREEZE_WRITE);
+ else
+ return 0;
trans = btrfs_start_transaction(root, 0);
- } else {
- return PTR_ERR(trans);
}
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
}
return btrfs_commit_transaction(trans, root);
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index a605d4e2f2bc..e88b59d13439 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2118,7 +2118,7 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
unsigned long prev;
unsigned long bit;
- prev = cmpxchg(&fs_info->pending_changes, 0, 0);
+ prev = xchg(&fs_info->pending_changes, 0);
if (!prev)
return;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9a02da16f2be..1a9585d4380a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
+ blk_finish_plug(&plug);
mutex_unlock(&log_root_tree->log_mutex);
ret = root_log_ctx.log_ret;
goto out;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c81c0e004588..24be059fd1f8 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1569,7 +1569,6 @@ out:
static struct vm_operations_struct ceph_vmops = {
.fault = ceph_filemap_fault,
.page_mkwrite = ceph_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
int ceph_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ce74b394b49d..905986dd4c3c 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -945,7 +945,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
mutex_lock(&inode->i_mutex);
/* We can write back this queue in page reclaim */
- current->backing_dev_info = file->f_mapping->backing_dev_info;
+ current->backing_dev_info = inode_to_bdi(inode);
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index f61a74115beb..6b5173605154 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -783,8 +783,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
}
inode->i_mapping->a_ops = &ceph_aops;
- inode->i_mapping->backing_dev_info =
- &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
switch (inode->i_mode & S_IFMT) {
case S_IFIFO:
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index c35c5c614e38..06ea5cd05cd9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -239,23 +239,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
return err;
}
-/**
- * Must be called with lock_flocks() already held. Fills in the passed
- * counter variables, so you can prepare pagelist metadata before calling
- * ceph_encode_locks.
+/*
+ * Fills in the passed counter variables, so you can prepare pagelist metadata
+ * before calling ceph_encode_locks.
*/
void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
{
- struct file_lock *lock;
+ struct file_lock_context *ctx;
*fcntl_count = 0;
*flock_count = 0;
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_POSIX)
- ++(*fcntl_count);
- else if (lock->fl_flags & FL_FLOCK)
- ++(*flock_count);
+ ctx = inode->i_flctx;
+ if (ctx) {
+ *fcntl_count = ctx->flc_posix_cnt;
+ *flock_count = ctx->flc_flock_cnt;
}
dout("counted %d flock locks and %d fcntl locks",
*flock_count, *fcntl_count);
@@ -271,6 +269,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
int num_fcntl_locks, int num_flock_locks)
{
struct file_lock *lock;
+ struct file_lock_context *ctx = inode->i_flctx;
int err = 0;
int seen_fcntl = 0;
int seen_flock = 0;
@@ -279,33 +278,34 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
dout("encoding %d flock and %d fcntl locks", num_flock_locks,
num_fcntl_locks);
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_POSIX) {
- ++seen_fcntl;
- if (seen_fcntl > num_fcntl_locks) {
- err = -ENOSPC;
- goto fail;
- }
- err = lock_to_ceph_filelock(lock, &flocks[l]);
- if (err)
- goto fail;
- ++l;
+ if (!ctx)
+ return 0;
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ ++seen_fcntl;
+ if (seen_fcntl > num_fcntl_locks) {
+ err = -ENOSPC;
+ goto fail;
}
+ err = lock_to_ceph_filelock(lock, &flocks[l]);
+ if (err)
+ goto fail;
+ ++l;
}
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_FLOCK) {
- ++seen_flock;
- if (seen_flock > num_flock_locks) {
- err = -ENOSPC;
- goto fail;
- }
- err = lock_to_ceph_filelock(lock, &flocks[l]);
- if (err)
- goto fail;
- ++l;
+ list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ ++seen_flock;
+ if (seen_flock > num_flock_locks) {
+ err = -ENOSPC;
+ goto fail;
}
+ err = lock_to_ceph_filelock(lock, &flocks[l]);
+ if (err)
+ goto fail;
+ ++l;
}
fail:
+ spin_unlock(&ctx->flc_lock);
return err;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d2171f4a6980..5f62fb7a5d0a 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2700,20 +2700,16 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_filelock *flocks;
encode_again:
- spin_lock(&inode->i_lock);
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
- spin_unlock(&inode->i_lock);
flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
sizeof(struct ceph_filelock), GFP_NOFS);
if (!flocks) {
err = -ENOMEM;
goto out_free;
}
- spin_lock(&inode->i_lock);
err = ceph_encode_locks_to_buffer(inode, flocks,
num_fcntl_locks,
num_flock_locks);
- spin_unlock(&inode->i_lock);
if (err) {
kfree(flocks);
if (err == -ENOSPC)
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 50f06cddc94b..5ae62587a71d 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -40,17 +40,6 @@ static void ceph_put_super(struct super_block *s)
dout("put_super\n");
ceph_mdsc_close_sessions(fsc->mdsc);
-
- /*
- * ensure we release the bdi before put_anon_super releases
- * the device name.
- */
- if (s->s_bdi == &fsc->backing_dev_info) {
- bdi_unregister(&fsc->backing_dev_info);
- s->s_bdi = NULL;
- }
-
- return;
}
static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -910,7 +899,7 @@ static int ceph_register_bdi(struct super_block *sb,
>> PAGE_SHIFT;
else
fsc->backing_dev_info.ra_pages =
- default_backing_dev_info.ra_pages;
+ VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
atomic_long_inc_return(&bdi_seq));
@@ -1002,11 +991,16 @@ out_final:
static void ceph_kill_sb(struct super_block *s)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
+ dev_t dev = s->s_dev;
+
dout("kill_sb %p\n", s);
+
ceph_mdsc_pre_umount(fsc->mdsc);
- kill_anon_super(s); /* will call put_super after sb is r/o */
+ generic_shutdown_super(s);
ceph_mdsc_destroy(fsc);
+
destroy_fs_client(fsc);
+ free_anon_bdev(dev);
}
static struct file_system_type ceph_fs_type = {
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 67b2007f10fe..ea06a3d0364c 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -24,27 +24,6 @@
#include "internal.h"
-/*
- * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
- * devices
- * - permits shared-mmap for read, write and/or exec
- * - does not permit private mmap in NOMMU mode (can't do COW)
- * - no readahead or I/O queue unplugging required
- */
-struct backing_dev_info directly_mappable_cdev_bdi = {
- .name = "char",
- .capabilities = (
-#ifdef CONFIG_MMU
- /* permit private copies of the data to be taken */
- BDI_CAP_MAP_COPY |
-#endif
- /* permit direct mmap, for read, write or exec */
- BDI_CAP_MAP_DIRECT |
- BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
- /* no writeback happens */
- BDI_CAP_NO_ACCT_AND_WRITEBACK),
-};
-
static struct kobj_map *cdev_map;
static DEFINE_MUTEX(chrdevs_lock);
@@ -575,8 +554,6 @@ static struct kobject *base_probe(dev_t dev, int *part, void *data)
void __init chrdev_init(void)
{
cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
- if (bdi_init(&directly_mappable_cdev_bdi))
- panic("Failed to init directly mappable cdev bdi");
}
@@ -590,4 +567,3 @@ EXPORT_SYMBOL(cdev_del);
EXPORT_SYMBOL(cdev_add);
EXPORT_SYMBOL(__register_chrdev);
EXPORT_SYMBOL(__unregister_chrdev);
-EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 9c56ef776407..7febcf2475c5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
*flags = CIFSSEC_MUST_NTLMV2;
else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
*flags = CIFSSEC_MUST_NTLM;
- else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
+ else if (CIFSSEC_MUST_LANMAN &&
+ (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
*flags = CIFSSEC_MUST_LANMAN;
- else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
+ else if (CIFSSEC_MUST_PLNTXT &&
+ (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
*flags = CIFSSEC_MUST_PLNTXT;
*flags |= signflags;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2a772da16b83..d3aa999ab785 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3446,7 +3446,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
int referral_walks_count = 0;
#endif
- rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
+ rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs");
if (rc)
return rc;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 96b7e9b7706d..8fe1f7a21b3e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
struct cifsLockInfo *li, *tmp;
struct cifs_fid fid;
struct cifs_pending_open open;
+ bool oplock_break_cancelled;
spin_lock(&cifs_file_list_lock);
if (--cifs_file->count > 0) {
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
}
spin_unlock(&cifs_file_list_lock);
- cancel_work_sync(&cifs_file->oplock_break);
+ oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
struct TCP_Server_Info *server = tcon->ses->server;
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
_free_xid(xid);
}
+ if (oplock_break_cancelled)
+ cifs_done_oplock_break(cifsi);
+
cifs_del_pending_open(&open);
/*
@@ -1109,11 +1113,6 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
return rc;
}
-/* copied from fs/locks.c with a name change */
-#define cifs_for_each_lock(inode, lockp) \
- for (lockp = &inode->i_flock; *lockp != NULL; \
- lockp = &(*lockp)->fl_next)
-
struct lock_to_push {
struct list_head llist;
__u64 offset;
@@ -1128,8 +1127,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
struct inode *inode = cfile->dentry->d_inode;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct file_lock *flock, **before;
- unsigned int count = 0, i = 0;
+ struct file_lock *flock;
+ struct file_lock_context *flctx = inode->i_flctx;
+ unsigned int i;
int rc = 0, xid, type;
struct list_head locks_to_send, *el;
struct lock_to_push *lck, *tmp;
@@ -1137,21 +1137,17 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
xid = get_xid();
- spin_lock(&inode->i_lock);
- cifs_for_each_lock(inode, before) {
- if ((*before)->fl_flags & FL_POSIX)
- count++;
- }
- spin_unlock(&inode->i_lock);
+ if (!flctx)
+ goto out;
INIT_LIST_HEAD(&locks_to_send);
/*
- * Allocating count locks is enough because no FL_POSIX locks can be
- * added to the list while we are holding cinode->lock_sem that
+ * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks
+ * can be added to the list while we are holding cinode->lock_sem that
* protects locking operations of this inode.
*/
- for (; i < count; i++) {
+ for (i = 0; i < flctx->flc_posix_cnt; i++) {
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
if (!lck) {
rc = -ENOMEM;
@@ -1161,11 +1157,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
}
el = locks_to_send.next;
- spin_lock(&inode->i_lock);
- cifs_for_each_lock(inode, before) {
- flock = *before;
- if ((flock->fl_flags & FL_POSIX) == 0)
- continue;
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
if (el == &locks_to_send) {
/*
* The list ended. We don't have enough allocated
@@ -1185,9 +1178,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
lck->length = length;
lck->type = type;
lck->offset = flock->fl_start;
- el = el->next;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
int stored_rc;
@@ -3244,7 +3236,6 @@ static struct vm_operations_struct cifs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = cifs_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 0c3ce464cae4..2d4f37235ed0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -937,8 +937,6 @@ retry_iget5_locked:
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
inode->i_ino = hash;
- if (S_ISREG(inode->i_mode))
- inode->i_data.backing_dev_info = sb->s_bdi;
#ifdef CONFIG_CIFS_FSCACHE
/* initialize per-inode cache cookie pointer */
CIFS_I(inode)->fscache = NULL;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 45cb59bcc791..8b7898b7670f 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
}
src_inode = file_inode(src_file.file);
+ rc = -EINVAL;
+ if (S_ISDIR(src_inode->i_mode))
+ goto out_fput;
/*
* Note: cifs case is easier than btrfs since server responsible for
* checks for proper open modes and file type and if it wants
* server could even support copy of range where source = target
*/
-
- /* so we do not deadlock racing two ioctls on same files */
- if (target_inode < src_inode) {
- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
- } else {
- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
- }
+ lock_two_nondirectories(target_inode, src_inode);
/* determine range to clone */
rc = -EINVAL;
@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
out_unlock:
/* although unlocking in the reverse order from locking is not
strictly necessary here it is a little cleaner to be consistent */
- if (target_inode < src_inode) {
- mutex_unlock(&src_inode->i_mutex);
- mutex_unlock(&target_inode->i_mutex);
- } else {
- mutex_unlock(&target_inode->i_mutex);
- mutex_unlock(&src_inode->i_mutex);
- }
+ unlock_two_nondirectories(src_inode, target_inode);
out_fput:
fdput(src_file);
out_drop_write:
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 6c1566366a66..a4232ec4f2ba 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
}
rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
- memset(wpwd, 0, 129 * sizeof(__le16));
+ memzero_explicit(wpwd, sizeof(wpwd));
return rc;
}
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index b945410bfcd5..82ec68b59208 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -183,7 +183,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
goto unlock_out;
}
- error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY);
+ error = bdi_setup_and_register(&vc->bdi, "coda");
if (error)
goto unlock_out;
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index bd4a3c167091..a315677e44d3 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -70,8 +70,6 @@ extern int configfs_is_root(struct config_item *item);
extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
-extern int configfs_inode_init(void);
-extern void configfs_inode_exit(void);
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
extern int configfs_make_dirent(struct configfs_dirent *,
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 5946ad98053f..65af86147154 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -50,12 +50,6 @@ static const struct address_space_operations configfs_aops = {
.write_end = simple_write_end,
};
-static struct backing_dev_info configfs_backing_dev_info = {
- .name = "configfs",
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
-};
-
static const struct inode_operations configfs_inode_operations ={
.setattr = configfs_setattr,
};
@@ -137,7 +131,6 @@ struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mapping->a_ops = &configfs_aops;
- inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
inode->i_op = &configfs_inode_operations;
if (sd->s_iattr) {
@@ -283,13 +276,3 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
}
mutex_unlock(&dir->d_inode->i_mutex);
}
-
-int __init configfs_inode_init(void)
-{
- return bdi_init(&configfs_backing_dev_info);
-}
-
-void configfs_inode_exit(void)
-{
- bdi_destroy(&configfs_backing_dev_info);
-}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index f6c285833390..da94e41bdbf6 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -145,19 +145,13 @@ static int __init configfs_init(void)
if (!config_kobj)
goto out2;
- err = configfs_inode_init();
- if (err)
- goto out3;
-
err = register_filesystem(&configfs_fs_type);
if (err)
- goto out4;
+ goto out3;
return 0;
-out4:
- pr_err("Unable to register filesystem!\n");
- configfs_inode_exit();
out3:
+ pr_err("Unable to register filesystem!\n");
kobject_put(config_kobj);
out2:
kmem_cache_destroy(configfs_dir_cachep);
@@ -172,7 +166,6 @@ static void __exit configfs_exit(void)
kobject_put(config_kobj);
kmem_cache_destroy(configfs_dir_cachep);
configfs_dir_cachep = NULL;
- configfs_inode_exit();
}
MODULE_AUTHOR("Oracle");
diff --git a/fs/dax.c b/fs/dax.c
new file mode 100644
index 000000000000..ed1619ec6537
--- /dev/null
+++ b/fs/dax.c
@@ -0,0 +1,534 @@
+/*
+ * fs/dax.c - Direct Access filesystem code
+ * Copyright (c) 2013-2014 Intel Corporation
+ * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
+ * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/highmem.h>
+#include <linux/memcontrol.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/uio.h>
+#include <linux/vmstat.h>
+
+int dax_clear_blocks(struct inode *inode, sector_t block, long size)
+{
+ struct block_device *bdev = inode->i_sb->s_bdev;
+ sector_t sector = block << (inode->i_blkbits - 9);
+
+ might_sleep();
+ do {
+ void *addr;
+ unsigned long pfn;
+ long count;
+
+ count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
+ if (count < 0)
+ return count;
+ BUG_ON(size < count);
+ while (count > 0) {
+ unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
+ if (pgsz > count)
+ pgsz = count;
+ if (pgsz < PAGE_SIZE)
+ memset(addr, 0, pgsz);
+ else
+ clear_page(addr);
+ addr += pgsz;
+ size -= pgsz;
+ count -= pgsz;
+ BUG_ON(pgsz & 511);
+ sector += pgsz / 512;
+ cond_resched();
+ }
+ } while (size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dax_clear_blocks);
+
+static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
+{
+ unsigned long pfn;
+ sector_t sector = bh->b_blocknr << (blkbits - 9);
+ return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
+}
+
+static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos,
+ loff_t end)
+{
+ loff_t final = end - pos + first; /* The final byte of the buffer */
+
+ if (first > 0)
+ memset(addr, 0, first);
+ if (final < size)
+ memset(addr + final, 0, size - final);
+}
+
+static bool buffer_written(struct buffer_head *bh)
+{
+ return buffer_mapped(bh) && !buffer_unwritten(bh);
+}
+
+/*
+ * When ext4 encounters a hole, it returns without modifying the buffer_head
+ * which means that we can't trust b_size. To cope with this, we set b_state
+ * to 0 before calling get_block and, if any bit is set, we know we can trust
+ * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
+ * and would save us time calling get_block repeatedly.
+ */
+static bool buffer_size_valid(struct buffer_head *bh)
+{
+ return bh->b_state != 0;
+}
+
+static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
+ loff_t start, loff_t end, get_block_t get_block,
+ struct buffer_head *bh)
+{
+ ssize_t retval = 0;
+ loff_t pos = start;
+ loff_t max = start;
+ loff_t bh_max = start;
+ void *addr;
+ bool hole = false;
+
+ if (rw != WRITE)
+ end = min(end, i_size_read(inode));
+
+ while (pos < end) {
+ unsigned len;
+ if (pos == max) {
+ unsigned blkbits = inode->i_blkbits;
+ sector_t block = pos >> blkbits;
+ unsigned first = pos - (block << blkbits);
+ long size;
+
+ if (pos == bh_max) {
+ bh->b_size = PAGE_ALIGN(end - pos);
+ bh->b_state = 0;
+ retval = get_block(inode, block, bh,
+ rw == WRITE);
+ if (retval)
+ break;
+ if (!buffer_size_valid(bh))
+ bh->b_size = 1 << blkbits;
+ bh_max = pos - first + bh->b_size;
+ } else {
+ unsigned done = bh->b_size -
+ (bh_max - (pos - first));
+ bh->b_blocknr += done >> blkbits;
+ bh->b_size -= done;
+ }
+
+ hole = (rw != WRITE) && !buffer_written(bh);
+ if (hole) {
+ addr = NULL;
+ size = bh->b_size - first;
+ } else {
+ retval = dax_get_addr(bh, &addr, blkbits);
+ if (retval < 0)
+ break;
+ if (buffer_unwritten(bh) || buffer_new(bh))
+ dax_new_buf(addr, retval, first, pos,
+ end);
+ addr += first;
+ size = retval - first;
+ }
+ max = min(pos + size, end);
+ }
+
+ if (rw == WRITE)
+ len = copy_from_iter(addr, max - pos, iter);
+ else if (!hole)
+ len = copy_to_iter(addr, max - pos, iter);
+ else
+ len = iov_iter_zero(max - pos, iter);
+
+ if (!len)
+ break;
+
+ pos += len;
+ addr += len;
+ }
+
+ return (pos == start) ? retval : pos - start;
+}
+
+/**
+ * dax_do_io - Perform I/O to a DAX file
+ * @rw: READ to read or WRITE to write
+ * @iocb: The control block for this I/O
+ * @inode: The file which the I/O is directed at
+ * @iter: The addresses to do I/O from or to
+ * @pos: The file offset where the I/O starts
+ * @get_block: The filesystem method used to translate file offsets to blocks
+ * @end_io: A filesystem callback for I/O completion
+ * @flags: See below
+ *
+ * This function uses the same locking scheme as do_blockdev_direct_IO:
+ * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
+ * caller for writes. For reads, we take and release the i_mutex ourselves.
+ * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
+ * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
+ * is in progress.
+ */
+ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
+ struct iov_iter *iter, loff_t pos,
+ get_block_t get_block, dio_iodone_t end_io, int flags)
+{
+ struct buffer_head bh;
+ ssize_t retval = -EINVAL;
+ loff_t end = pos + iov_iter_count(iter);
+
+ memset(&bh, 0, sizeof(bh));
+
+ if ((flags & DIO_LOCKING) && (rw == READ)) {
+ struct address_space *mapping = inode->i_mapping;
+ mutex_lock(&inode->i_mutex);
+ retval = filemap_write_and_wait_range(mapping, pos, end - 1);
+ if (retval) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+ }
+
+ /* Protects against truncate */
+ atomic_inc(&inode->i_dio_count);
+
+ retval = dax_io(rw, inode, iter, pos, end, get_block, &bh);
+
+ if ((flags & DIO_LOCKING) && (rw == READ))
+ mutex_unlock(&inode->i_mutex);
+
+ if ((retval > 0) && end_io)
+ end_io(iocb, pos, retval, bh.b_private);
+
+ inode_dio_done(inode);
+ out:
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dax_do_io);
+
+/*
+ * The user has performed a load from a hole in the file. Allocating
+ * a new page in the file would cause excessive storage usage for
+ * workloads with sparse files. We allocate a page cache page instead.
+ * We'll kick it out of the page cache if it's ever written to,
+ * otherwise it will simply fall out of the page cache under memory
+ * pressure without ever having been dirtied.
+ */
+static int dax_load_hole(struct address_space *mapping, struct page *page,
+ struct vm_fault *vmf)
+{
+ unsigned long size;
+ struct inode *inode = mapping->host;
+ if (!page)
+ page = find_or_create_page(mapping, vmf->pgoff,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+ /* Recheck i_size under page lock to avoid truncate race */
+ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (vmf->pgoff >= size) {
+ unlock_page(page);
+ page_cache_release(page);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vmf->page = page;
+ return VM_FAULT_LOCKED;
+}
+
+static int copy_user_bh(struct page *to, struct buffer_head *bh,
+ unsigned blkbits, unsigned long vaddr)
+{
+ void *vfrom, *vto;
+ if (dax_get_addr(bh, &vfrom, blkbits) < 0)
+ return -EIO;
+ vto = kmap_atomic(to);
+ copy_user_page(vto, vfrom, vaddr, to);
+ kunmap_atomic(vto);
+ return 0;
+}
+
+static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct address_space *mapping = inode->i_mapping;
+ sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
+ unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ void *addr;
+ unsigned long pfn;
+ pgoff_t size;
+ int error;
+
+ i_mmap_lock_read(mapping);
+
+ /*
+ * Check truncate didn't happen while we were allocating a block.
+ * If it did, this block may or may not be still allocated to the
+ * file. We can't tell the filesystem to free it because we can't
+ * take i_mutex here. In the worst case, the file still has blocks
+ * allocated past the end of the file.
+ */
+ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (unlikely(vmf->pgoff >= size)) {
+ error = -EIO;
+ goto out;
+ }
+
+ error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
+ if (error < 0)
+ goto out;
+ if (error < PAGE_SIZE) {
+ error = -EIO;
+ goto out;
+ }
+
+ if (buffer_unwritten(bh) || buffer_new(bh))
+ clear_page(addr);
+
+ error = vm_insert_mixed(vma, vaddr, pfn);
+
+ out:
+ i_mmap_unlock_read(mapping);
+
+ if (bh->b_end_io)
+ bh->b_end_io(bh, 1);
+
+ return error;
+}
+
+static int do_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ get_block_t get_block)
+{
+ struct file *file = vma->vm_file;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ struct page *page;
+ struct buffer_head bh;
+ unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ unsigned blkbits = inode->i_blkbits;
+ sector_t block;
+ pgoff_t size;
+ int error;
+ int major = 0;
+
+ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (vmf->pgoff >= size)
+ return VM_FAULT_SIGBUS;
+
+ memset(&bh, 0, sizeof(bh));
+ block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
+ bh.b_size = PAGE_SIZE;
+
+ repeat:
+ page = find_get_page(mapping, vmf->pgoff);
+ if (page) {
+ if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
+ page_cache_release(page);
+ return VM_FAULT_RETRY;
+ }
+ if (unlikely(page->mapping != mapping)) {
+ unlock_page(page);
+ page_cache_release(page);
+ goto repeat;
+ }
+ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (unlikely(vmf->pgoff >= size)) {
+ /*
+ * We have a struct page covering a hole in the file
+ * from a read fault and we've raced with a truncate
+ */
+ error = -EIO;
+ goto unlock_page;
+ }
+ }
+
+ error = get_block(inode, block, &bh, 0);
+ if (!error && (bh.b_size < PAGE_SIZE))
+ error = -EIO; /* fs corruption? */
+ if (error)
+ goto unlock_page;
+
+ if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
+ if (vmf->flags & FAULT_FLAG_WRITE) {
+ error = get_block(inode, block, &bh, 1);
+ count_vm_event(PGMAJFAULT);
+ mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ major = VM_FAULT_MAJOR;
+ if (!error && (bh.b_size < PAGE_SIZE))
+ error = -EIO;
+ if (error)
+ goto unlock_page;
+ } else {
+ return dax_load_hole(mapping, page, vmf);
+ }
+ }
+
+ if (vmf->cow_page) {
+ struct page *new_page = vmf->cow_page;
+ if (buffer_written(&bh))
+ error = copy_user_bh(new_page, &bh, blkbits, vaddr);
+ else
+ clear_user_highpage(new_page, vaddr);
+ if (error)
+ goto unlock_page;
+ vmf->page = page;
+ if (!page) {
+ i_mmap_lock_read(mapping);
+ /* Check we didn't race with truncate */
+ size = (i_size_read(inode) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
+ if (vmf->pgoff >= size) {
+ i_mmap_unlock_read(mapping);
+ error = -EIO;
+ goto out;
+ }
+ }
+ return VM_FAULT_LOCKED;
+ }
+
+ /* Check we didn't race with a read fault installing a new page */
+ if (!page && major)
+ page = find_lock_page(mapping, vmf->pgoff);
+
+ if (page) {
+ unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
+ PAGE_CACHE_SIZE, 0);
+ delete_from_page_cache(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+
+ error = dax_insert_mapping(inode, &bh, vma, vmf);
+
+ out:
+ if (error == -ENOMEM)
+ return VM_FAULT_OOM | major;
+ /* -EBUSY is fine, somebody else faulted on the same PTE */
+ if ((error < 0) && (error != -EBUSY))
+ return VM_FAULT_SIGBUS | major;
+ return VM_FAULT_NOPAGE | major;
+
+ unlock_page:
+ if (page) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ goto out;
+}
+
+/**
+ * dax_fault - handle a page fault on a DAX file
+ * @vma: The virtual memory area where the fault occurred
+ * @vmf: The description of the fault
+ * @get_block: The filesystem method used to translate file offsets to blocks
+ *
+ * When a page fault occurs, filesystems may call this helper in their
+ * fault handler for DAX files.
+ */
+int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ get_block_t get_block)
+{
+ int result;
+ struct super_block *sb = file_inode(vma->vm_file)->i_sb;
+
+ if (vmf->flags & FAULT_FLAG_WRITE) {
+ sb_start_pagefault(sb);
+ file_update_time(vma->vm_file);
+ }
+ result = do_dax_fault(vma, vmf, get_block);
+ if (vmf->flags & FAULT_FLAG_WRITE)
+ sb_end_pagefault(sb);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(dax_fault);
+
+/**
+ * dax_zero_page_range - zero a range within a page of a DAX file
+ * @inode: The file being truncated
+ * @from: The file offset that is being truncated to
+ * @length: The number of bytes to zero
+ * @get_block: The filesystem method used to translate file offsets to blocks
+ *
+ * This function can be called by a filesystem when it is zeroing part of a
+ * page in a DAX file. This is intended for hole-punch operations. If
+ * you are truncating a file, the helper function dax_truncate_page() may be
+ * more convenient.
+ *
+ * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
+ * took care of disposing of the unnecessary blocks. Even if the filesystem
+ * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
+ * since the file might be mmapped.
+ */
+int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
+ get_block_t get_block)
+{
+ struct buffer_head bh;
+ pgoff_t index = from >> PAGE_CACHE_SHIFT;
+ unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ int err;
+
+ /* Block boundary? Nothing to do */
+ if (!length)
+ return 0;
+ BUG_ON((offset + length) > PAGE_CACHE_SIZE);
+
+ memset(&bh, 0, sizeof(bh));
+ bh.b_size = PAGE_CACHE_SIZE;
+ err = get_block(inode, index, &bh, 0);
+ if (err < 0)
+ return err;
+ if (buffer_written(&bh)) {
+ void *addr;
+ err = dax_get_addr(&bh, &addr, inode->i_blkbits);
+ if (err < 0)
+ return err;
+ memset(addr + offset, 0, length);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dax_zero_page_range);
+
+/**
+ * dax_truncate_page - handle a partial page being truncated in a DAX file
+ * @inode: The file being truncated
+ * @from: The file offset that is being truncated to
+ * @get_block: The filesystem method used to translate file offsets to blocks
+ *
+ * Similar to block_truncate_page(), this function can be called by a
+ * filesystem when it is truncating a DAX file to handle the partial page.
+ *
+ * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
+ * took care of disposing of the unnecessary blocks. Even if the filesystem
+ * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
+ * since the file might be mmapped.
+ */
+int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
+{
+ unsigned length = PAGE_CACHE_ALIGN(from) - from;
+ return dax_zero_page_range(inode, from, length, get_block);
+}
+EXPORT_SYMBOL_GPL(dax_truncate_page);
diff --git a/fs/dcache.c b/fs/dcache.c
index e368d4f412f9..7d34f04ec7aa 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -38,6 +38,8 @@
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
+#include <linux/kasan.h>
+
#include "internal.h"
#include "mount.h"
@@ -400,19 +402,20 @@ static void d_shrink_add(struct dentry *dentry, struct list_head *list)
* LRU lists entirely, while shrink_move moves it to the indicated
* private list.
*/
-static void d_lru_isolate(struct dentry *dentry)
+static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
- list_del_init(&dentry->d_lru);
+ list_lru_isolate(lru, &dentry->d_lru);
}
-static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
+static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
+ struct list_head *list)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags |= DCACHE_SHRINK_LIST;
- list_move_tail(&dentry->d_lru, list);
+ list_lru_isolate_move(lru, &dentry->d_lru, list);
}
/*
@@ -869,8 +872,8 @@ static void shrink_dentry_list(struct list_head *list)
}
}
-static enum lru_status
-dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
+static enum lru_status dentry_lru_isolate(struct list_head *item,
+ struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -890,7 +893,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
* another pass through the LRU.
*/
if (dentry->d_lockref.count) {
- d_lru_isolate(dentry);
+ d_lru_isolate(lru, dentry);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
}
@@ -921,7 +924,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
return LRU_ROTATE;
}
- d_lru_shrink_move(dentry, freeable);
+ d_lru_shrink_move(lru, dentry, freeable);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
@@ -930,30 +933,28 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
/**
* prune_dcache_sb - shrink the dcache
* @sb: superblock
- * @nr_to_scan : number of entries to try to free
- * @nid: which node to scan for freeable entities
+ * @sc: shrink control, passed to list_lru_shrink_walk()
*
- * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
- * done when we need more memory an called from the superblock shrinker
+ * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
+ * is done when we need more memory and called from the superblock shrinker
* function.
*
* This function may fail to free any resources if all the dentries are in
* use.
*/
-long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
- int nid)
+long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
{
LIST_HEAD(dispose);
long freed;
- freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
- &dispose, &nr_to_scan);
+ freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
+ dentry_lru_isolate, &dispose);
shrink_dentry_list(&dispose);
return freed;
}
static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
- spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -966,7 +967,7 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
if (!spin_trylock(&dentry->d_lock))
return LRU_SKIP;
- d_lru_shrink_move(dentry, freeable);
+ d_lru_shrink_move(lru, dentry, freeable);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
@@ -1430,6 +1431,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
}
atomic_set(&p->u.count, 1);
dname = p->name;
+ if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
+ kasan_unpoison_shadow(dname,
+ round_up(name->len + 1, sizeof(unsigned long)));
} else {
dname = dentry->d_iname;
}
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index e7cfbaf8d0e2..1e6e227134d7 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -56,13 +56,8 @@ static int send_data(struct sk_buff *skb)
{
struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
void *data = genlmsg_data(genlhdr);
- int rv;
- rv = genlmsg_end(skb, data);
- if (rv < 0) {
- nlmsg_free(skb);
- return rv;
- }
+ genlmsg_end(skb, data);
return genlmsg_unicast(&init_net, skb, listener_nlportid);
}
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 2bc2c87f35e7..5718cb9f7273 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -37,20 +37,6 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
iput(toput_inode);
}
-static void drop_slab(void)
-{
- int nr_objects;
-
- do {
- int nid;
-
- nr_objects = 0;
- for_each_online_node(nid)
- nr_objects += shrink_node_slabs(GFP_KERNEL, nid,
- 1000, 1000);
- } while (nr_objects > 10);
-}
-
int drop_caches_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 1686dc2da9fd..34b36a504059 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -67,7 +67,6 @@ static int ecryptfs_inode_set(struct inode *inode, void *opaque)
inode->i_ino = lower_inode->i_ino;
inode->i_version++;
inode->i_mapping->a_ops = &ecryptfs_aops;
- inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
if (S_ISLNK(inode->i_mode))
inode->i_op = &ecryptfs_symlink_iops;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index d9eb84bda559..1895d60f4122 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -520,7 +520,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out;
}
- rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
+ rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs");
if (rc)
goto out1;
diff --git a/fs/efivarfs/Kconfig b/fs/efivarfs/Kconfig
index 367bbb10c543..c2499ef174a2 100644
--- a/fs/efivarfs/Kconfig
+++ b/fs/efivarfs/Kconfig
@@ -1,6 +1,7 @@
config EFIVAR_FS
tristate "EFI Variable filesystem"
depends on EFI
+ default m
help
efivarfs is a replacement filesystem for the old EFI
variable support via sysfs, as it doesn't suffer from the
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 6dad1176ec52..ddbce42548c9 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -140,7 +140,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
name[len] = '-';
- efi_guid_unparse(&entry->var.VendorGuid, name + len + 1);
+ efi_guid_to_str(&entry->var.VendorGuid, name + len + 1);
name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index d77f94491352..1e009cad8d5c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1639,9 +1639,9 @@ fetch_events:
spin_lock_irqsave(&ep->lock, flags);
}
- __remove_wait_queue(&ep->wq, &wait);
- set_current_state(TASK_RUNNING);
+ __remove_wait_queue(&ep->wq, &wait);
+ __set_current_state(TASK_RUNNING);
}
check_events:
/* Is it worth to try to dig for events ? */
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index f1d3d4eb8c4f..a198e94813fe 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -985,7 +985,6 @@ const struct address_space_operations exofs_aops = {
.direct_IO = exofs_direct_IO,
/* With these NULL has special meaning or default is not exported */
- .get_xip_mem = NULL,
.migratepage = NULL,
.launder_page = NULL,
.is_partially_uptodate = NULL,
@@ -1214,7 +1213,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
}
- inode->i_mapping->backing_dev_info = sb->s_bdi;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &exofs_file_inode_operations;
inode->i_fop = &exofs_file_operations;
@@ -1314,7 +1312,6 @@ struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
set_obj_2bcreated(oi);
- inode->i_mapping->backing_dev_info = sb->s_bdi;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
inode->i_blkbits = EXOFS_BLKSHIFT;
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 95965503afcb..fcc2e565f540 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -836,7 +836,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
- ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
+ ret = bdi_setup_and_register(&sbi->bdi, "exofs");
if (ret) {
EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
dput(sb->s_root);
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index 14a6780fd034..c634874e12d9 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -42,14 +42,3 @@ config EXT2_FS_SECURITY
If you are not using a security module that requires using
extended attributes for file security labels, say N.
-
-config EXT2_FS_XIP
- bool "Ext2 execute in place support"
- depends on EXT2_FS && MMU
- help
- Execute in place can be used on memory-backed block devices. If you
- enable this option, you can select to mount block devices which are
- capable of this feature without using the page cache.
-
- If you do not use a block device that is capable of using this,
- or if unsure, say N.
diff --git a/fs/ext2/Makefile b/fs/ext2/Makefile
index f42af45cfd88..445b0e996a12 100644
--- a/fs/ext2/Makefile
+++ b/fs/ext2/Makefile
@@ -10,4 +10,3 @@ ext2-y := balloc.o dir.o file.o ialloc.o inode.o \
ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
ext2-$(CONFIG_EXT2_FS_POSIX_ACL) += acl.o
ext2-$(CONFIG_EXT2_FS_SECURITY) += xattr_security.o
-ext2-$(CONFIG_EXT2_FS_XIP) += xip.o
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index e4279ead4a05..678f9ab08c48 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -380,10 +380,15 @@ struct ext2_inode {
#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
-#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
+#define EXT2_MOUNT_XIP 0x010000 /* Obsolete, use DAX */
#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
#define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
+#ifdef CONFIG_FS_DAX
+#define EXT2_MOUNT_DAX 0x100000 /* Direct Access */
+#else
+#define EXT2_MOUNT_DAX 0
+#endif
#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
@@ -788,11 +793,10 @@ extern int ext2_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
extern const struct inode_operations ext2_file_inode_operations;
extern const struct file_operations ext2_file_operations;
-extern const struct file_operations ext2_xip_file_operations;
+extern const struct file_operations ext2_dax_file_operations;
/* inode.c */
extern const struct address_space_operations ext2_aops;
-extern const struct address_space_operations ext2_aops_xip;
extern const struct address_space_operations ext2_nobh_aops;
/* namei.c */
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 7c87b22a7228..e31701713516 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -25,6 +25,36 @@
#include "xattr.h"
#include "acl.h"
+#ifdef CONFIG_FS_DAX
+static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return dax_fault(vma, vmf, ext2_get_block);
+}
+
+static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return dax_mkwrite(vma, vmf, ext2_get_block);
+}
+
+static const struct vm_operations_struct ext2_dax_vm_ops = {
+ .fault = ext2_dax_fault,
+ .page_mkwrite = ext2_dax_mkwrite,
+};
+
+static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ if (!IS_DAX(file_inode(file)))
+ return generic_file_mmap(file, vma);
+
+ file_accessed(file);
+ vma->vm_ops = &ext2_dax_vm_ops;
+ vma->vm_flags |= VM_MIXEDMAP;
+ return 0;
+}
+#else
+#define ext2_file_mmap generic_file_mmap
+#endif
+
/*
* Called when filp is released. This happens when all file descriptors
* for a single struct file are closed. Note that different open() calls
@@ -70,7 +100,7 @@ const struct file_operations ext2_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
- .mmap = generic_file_mmap,
+ .mmap = ext2_file_mmap,
.open = dquot_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
@@ -78,16 +108,18 @@ const struct file_operations ext2_file_operations = {
.splice_write = iter_file_splice_write,
};
-#ifdef CONFIG_EXT2_FS_XIP
-const struct file_operations ext2_xip_file_operations = {
+#ifdef CONFIG_FS_DAX
+const struct file_operations ext2_dax_file_operations = {
.llseek = generic_file_llseek,
- .read = xip_file_read,
- .write = xip_file_write,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.unlocked_ioctl = ext2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
- .mmap = xip_file_mmap,
+ .mmap = ext2_file_mmap,
.open = dquot_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 7d66fb0e4cca..6c14bb8322fa 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -170,7 +170,7 @@ static void ext2_preread_inode(struct inode *inode)
struct ext2_group_desc * gdp;
struct backing_dev_info *bdi;
- bdi = inode->i_mapping->backing_dev_info;
+ bdi = inode_to_bdi(inode);
if (bdi_read_congested(bdi))
return;
if (bdi_write_congested(bdi))
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 36d35c36311d..6434bc000125 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -34,7 +34,6 @@
#include <linux/aio.h>
#include "ext2.h"
#include "acl.h"
-#include "xip.h"
#include "xattr.h"
static int __ext2_write_inode(struct inode *inode, int do_sync);
@@ -731,12 +730,14 @@ static int ext2_get_blocks(struct inode *inode,
goto cleanup;
}
- if (ext2_use_xip(inode->i_sb)) {
+ if (IS_DAX(inode)) {
/*
- * we need to clear the block
+ * block must be initialised before we put it in the tree
+ * so that it's not found by another thread before it's
+ * initialised
*/
- err = ext2_clear_xip_target (inode,
- le32_to_cpu(chain[depth-1].key));
+ err = dax_clear_blocks(inode, le32_to_cpu(chain[depth-1].key),
+ 1 << inode->i_blkbits);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
@@ -859,7 +860,12 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext2_get_block);
+ if (IS_DAX(inode))
+ ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block,
+ NULL, DIO_LOCKING);
+ else
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+ ext2_get_block);
if (ret < 0 && (rw & WRITE))
ext2_write_failed(mapping, offset + count);
return ret;
@@ -885,11 +891,6 @@ const struct address_space_operations ext2_aops = {
.error_remove_page = generic_error_remove_page,
};
-const struct address_space_operations ext2_aops_xip = {
- .bmap = ext2_bmap,
- .get_xip_mem = ext2_get_xip_mem,
-};
-
const struct address_space_operations ext2_nobh_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
@@ -1201,8 +1202,8 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
inode_dio_wait(inode);
- if (mapping_is_xip(inode->i_mapping))
- error = xip_truncate_page(inode->i_mapping, newsize);
+ if (IS_DAX(inode))
+ error = dax_truncate_page(inode, newsize, ext2_get_block);
else if (test_opt(inode->i_sb, NOBH))
error = nobh_truncate_page(inode->i_mapping,
newsize, ext2_get_block);
@@ -1273,7 +1274,8 @@ void ext2_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT2_I(inode)->i_flags;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
+ S_DIRSYNC | S_DAX);
if (flags & EXT2_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT2_APPEND_FL)
@@ -1284,6 +1286,8 @@ void ext2_set_inode_flags(struct inode *inode)
inode->i_flags |= S_NOATIME;
if (flags & EXT2_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
+ if (test_opt(inode->i_sb, DAX))
+ inode->i_flags |= S_DAX;
}
/* Propagate flags from i_flags to EXT2_I(inode)->i_flags */
@@ -1384,9 +1388,9 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext2_file_inode_operations;
- if (ext2_use_xip(inode->i_sb)) {
- inode->i_mapping->a_ops = &ext2_aops_xip;
- inode->i_fop = &ext2_xip_file_operations;
+ if (test_opt(inode->i_sb, DAX)) {
+ inode->i_mapping->a_ops = &ext2_aops;
+ inode->i_fop = &ext2_dax_file_operations;
} else if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index c268d0af1db9..148f6e3789ea 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -35,7 +35,6 @@
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
-#include "xip.h"
static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
{
@@ -105,9 +104,9 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode
return PTR_ERR(inode);
inode->i_op = &ext2_file_inode_operations;
- if (ext2_use_xip(inode->i_sb)) {
- inode->i_mapping->a_ops = &ext2_aops_xip;
- inode->i_fop = &ext2_xip_file_operations;
+ if (test_opt(inode->i_sb, DAX)) {
+ inode->i_mapping->a_ops = &ext2_aops;
+ inode->i_fop = &ext2_dax_file_operations;
} else if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
@@ -126,9 +125,9 @@ static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
return PTR_ERR(inode);
inode->i_op = &ext2_file_inode_operations;
- if (ext2_use_xip(inode->i_sb)) {
- inode->i_mapping->a_ops = &ext2_aops_xip;
- inode->i_fop = &ext2_xip_file_operations;
+ if (test_opt(inode->i_sb, DAX)) {
+ inode->i_mapping->a_ops = &ext2_aops;
+ inode->i_fop = &ext2_dax_file_operations;
} else if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index ae55fddc26a9..d0e746e96511 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -35,7 +35,6 @@
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
-#include "xip.h"
static void ext2_sync_super(struct super_block *sb,
struct ext2_super_block *es, int wait);
@@ -292,9 +291,11 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",grpquota");
#endif
-#if defined(CONFIG_EXT2_FS_XIP)
+#ifdef CONFIG_FS_DAX
if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
seq_puts(seq, ",xip");
+ if (sbi->s_mount_opt & EXT2_MOUNT_DAX)
+ seq_puts(seq, ",dax");
#endif
if (!test_opt(sb, RESERVATION))
@@ -403,7 +404,7 @@ enum {
Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
- Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
+ Opt_acl, Opt_noacl, Opt_xip, Opt_dax, Opt_ignore, Opt_err, Opt_quota,
Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
};
@@ -432,6 +433,7 @@ static const match_table_t tokens = {
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_xip, "xip"},
+ {Opt_dax, "dax"},
{Opt_grpquota, "grpquota"},
{Opt_ignore, "noquota"},
{Opt_quota, "quota"},
@@ -559,10 +561,14 @@ static int parse_options(char *options, struct super_block *sb)
break;
#endif
case Opt_xip:
-#ifdef CONFIG_EXT2_FS_XIP
- set_opt (sbi->s_mount_opt, XIP);
+ ext2_msg(sb, KERN_INFO, "use dax instead of xip");
+ set_opt(sbi->s_mount_opt, XIP);
+ /* Fall through */
+ case Opt_dax:
+#ifdef CONFIG_FS_DAX
+ set_opt(sbi->s_mount_opt, DAX);
#else
- ext2_msg(sb, KERN_INFO, "xip option not supported");
+ ext2_msg(sb, KERN_INFO, "dax option not supported");
#endif
break;
@@ -877,9 +883,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
MS_POSIXACL : 0);
- ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
- EXT2_MOUNT_XIP if not */
-
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
(EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
@@ -909,11 +912,17 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
- if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
- if (!silent)
+ if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
+ if (blocksize != PAGE_SIZE) {
ext2_msg(sb, KERN_ERR,
- "error: unsupported blocksize for xip");
- goto failed_mount;
+ "error: unsupported blocksize for dax");
+ goto failed_mount;
+ }
+ if (!sb->s_bdev->bd_disk->fops->direct_access) {
+ ext2_msg(sb, KERN_ERR,
+ "error: device does not support dax");
+ goto failed_mount;
+ }
}
/* If the blocksize doesn't match, re-read the thing.. */
@@ -1259,7 +1268,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
{
struct ext2_sb_info * sbi = EXT2_SB(sb);
struct ext2_super_block * es;
- unsigned long old_mount_opt = sbi->s_mount_opt;
struct ext2_mount_options old_opts;
unsigned long old_sb_flags;
int err;
@@ -1284,22 +1292,11 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
- ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
- EXT2_MOUNT_XIP if not */
-
- if ((ext2_use_xip(sb)) && (sb->s_blocksize != PAGE_SIZE)) {
- ext2_msg(sb, KERN_WARNING,
- "warning: unsupported blocksize for xip");
- err = -EINVAL;
- goto restore_opts;
- }
-
es = sbi->s_es;
- if ((sbi->s_mount_opt ^ old_mount_opt) & EXT2_MOUNT_XIP) {
+ if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT2_MOUNT_DAX) {
ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
- "xip flag with busy inodes while remounting");
- sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
- sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
+ "dax flag with busy inodes while remounting");
+ sbi->s_mount_opt ^= EXT2_MOUNT_DAX;
}
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
spin_unlock(&sbi->s_lock);
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
deleted file mode 100644
index e98171a11cfe..000000000000
--- a/fs/ext2/xip.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * linux/fs/ext2/xip.c
- *
- * Copyright (C) 2005 IBM Corporation
- * Author: Carsten Otte (cotte@de.ibm.com)
- */
-
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/genhd.h>
-#include <linux/buffer_head.h>
-#include <linux/blkdev.h>
-#include "ext2.h"
-#include "xip.h"
-
-static inline int
-__inode_direct_access(struct inode *inode, sector_t block,
- void **kaddr, unsigned long *pfn)
-{
- struct block_device *bdev = inode->i_sb->s_bdev;
- const struct block_device_operations *ops = bdev->bd_disk->fops;
- sector_t sector;
-
- sector = block * (PAGE_SIZE / 512); /* ext2 block to bdev sector */
-
- BUG_ON(!ops->direct_access);
- return ops->direct_access(bdev, sector, kaddr, pfn);
-}
-
-static inline int
-__ext2_get_block(struct inode *inode, pgoff_t pgoff, int create,
- sector_t *result)
-{
- struct buffer_head tmp;
- int rc;
-
- memset(&tmp, 0, sizeof(struct buffer_head));
- tmp.b_size = 1 << inode->i_blkbits;
- rc = ext2_get_block(inode, pgoff, &tmp, create);
- *result = tmp.b_blocknr;
-
- /* did we get a sparse block (hole in the file)? */
- if (!tmp.b_blocknr && !rc) {
- BUG_ON(create);
- rc = -ENODATA;
- }
-
- return rc;
-}
-
-int
-ext2_clear_xip_target(struct inode *inode, sector_t block)
-{
- void *kaddr;
- unsigned long pfn;
- int rc;
-
- rc = __inode_direct_access(inode, block, &kaddr, &pfn);
- if (!rc)
- clear_page(kaddr);
- return rc;
-}
-
-void ext2_xip_verify_sb(struct super_block *sb)
-{
- struct ext2_sb_info *sbi = EXT2_SB(sb);
-
- if ((sbi->s_mount_opt & EXT2_MOUNT_XIP) &&
- !sb->s_bdev->bd_disk->fops->direct_access) {
- sbi->s_mount_opt &= (~EXT2_MOUNT_XIP);
- ext2_msg(sb, KERN_WARNING,
- "warning: ignoring xip option - "
- "not supported by bdev");
- }
-}
-
-int ext2_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, int create,
- void **kmem, unsigned long *pfn)
-{
- int rc;
- sector_t block;
-
- /* first, retrieve the sector number */
- rc = __ext2_get_block(mapping->host, pgoff, create, &block);
- if (rc)
- return rc;
-
- /* retrieve address of the target data */
- rc = __inode_direct_access(mapping->host, block, kmem, pfn);
- return rc;
-}
diff --git a/fs/ext2/xip.h b/fs/ext2/xip.h
deleted file mode 100644
index 18b34d2f31b3..000000000000
--- a/fs/ext2/xip.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * linux/fs/ext2/xip.h
- *
- * Copyright (C) 2005 IBM Corporation
- * Author: Carsten Otte (cotte@de.ibm.com)
- */
-
-#ifdef CONFIG_EXT2_FS_XIP
-extern void ext2_xip_verify_sb (struct super_block *);
-extern int ext2_clear_xip_target (struct inode *, sector_t);
-
-static inline int ext2_use_xip (struct super_block *sb)
-{
- struct ext2_sb_info *sbi = EXT2_SB(sb);
- return (sbi->s_mount_opt & EXT2_MOUNT_XIP);
-}
-int ext2_get_xip_mem(struct address_space *, pgoff_t, int,
- void **, unsigned long *);
-#define mapping_is_xip(map) unlikely(map->a_ops->get_xip_mem)
-#else
-#define mapping_is_xip(map) 0
-#define ext2_xip_verify_sb(sb) do { } while (0)
-#define ext2_use_xip(sb) 0
-#define ext2_clear_xip_target(inode, chain) 0
-#define ext2_get_xip_mem NULL
-#endif
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 9b4e7d750d4f..d4dbf3c259b3 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -466,6 +466,8 @@ static void ext3_put_super (struct super_block * sb)
}
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
+ mutex_destroy(&sbi->s_orphan_lock);
+ mutex_destroy(&sbi->s_resize_lock);
kfree(sbi);
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index a75fba67bb1f..982d934fd9ac 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -965,6 +965,11 @@ struct ext4_inode_info {
#define EXT4_MOUNT_ERRORS_MASK 0x00070
#define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */
#define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/
+#ifdef CONFIG_FS_DAX
+#define EXT4_MOUNT_DAX 0x00200 /* Direct Access */
+#else
+#define EXT4_MOUNT_DAX 0
+#endif
#define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */
#define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */
#define EXT4_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */
@@ -2578,6 +2583,7 @@ extern const struct file_operations ext4_dir_operations;
/* file.c */
extern const struct inode_operations ext4_file_inode_operations;
extern const struct file_operations ext4_file_operations;
+extern const struct file_operations ext4_dax_file_operations;
extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
/* inline.c */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8131be8c0af3..33a09da16c9c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -95,7 +95,7 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(iocb->ki_filp);
struct mutex *aio_mutex = NULL;
struct blk_plug plug;
- int o_direct = file->f_flags & O_DIRECT;
+ int o_direct = io_is_direct(file);
int overwrite = 0;
size_t length = iov_iter_count(from);
ssize_t ret;
@@ -191,17 +191,41 @@ errout:
return ret;
}
+#ifdef CONFIG_FS_DAX
+static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return dax_fault(vma, vmf, ext4_get_block);
+ /* Is this the right get_block? */
+}
+
+static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return dax_mkwrite(vma, vmf, ext4_get_block);
+}
+
+static const struct vm_operations_struct ext4_dax_vm_ops = {
+ .fault = ext4_dax_fault,
+ .page_mkwrite = ext4_dax_mkwrite,
+};
+#else
+#define ext4_dax_vm_ops ext4_file_vm_ops
+#endif
+
static const struct vm_operations_struct ext4_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = ext4_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
- vma->vm_ops = &ext4_file_vm_ops;
+ if (IS_DAX(file_inode(file))) {
+ vma->vm_ops = &ext4_dax_vm_ops;
+ vma->vm_flags |= VM_MIXEDMAP;
+ } else {
+ vma->vm_ops = &ext4_file_vm_ops;
+ }
return 0;
}
@@ -600,6 +624,26 @@ const struct file_operations ext4_file_operations = {
.fallocate = ext4_fallocate,
};
+#ifdef CONFIG_FS_DAX
+const struct file_operations ext4_dax_file_operations = {
+ .llseek = ext4_llseek,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = ext4_file_write_iter,
+ .unlocked_ioctl = ext4_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ext4_compat_ioctl,
+#endif
+ .mmap = ext4_file_mmap,
+ .open = ext4_file_open,
+ .release = ext4_release_file,
+ .fsync = ext4_sync_file,
+ /* Splice not yet supported with DAX */
+ .fallocate = ext4_fallocate,
+};
+#endif
+
const struct inode_operations ext4_file_inode_operations = {
.setattr = ext4_setattr,
.getattr = ext4_getattr,
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 36b369697a13..6b9878a24182 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -689,14 +689,22 @@ retry:
inode_dio_done(inode);
goto locked;
}
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iter, offset,
- ext4_get_block, NULL, NULL, 0);
+ if (IS_DAX(inode))
+ ret = dax_do_io(rw, iocb, inode, iter, offset,
+ ext4_get_block, NULL, 0);
+ else
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev, iter, offset,
+ ext4_get_block, NULL, NULL, 0);
inode_dio_done(inode);
} else {
locked:
- ret = blockdev_direct_IO(rw, iocb, inode, iter,
- offset, ext4_get_block);
+ if (IS_DAX(inode))
+ ret = dax_do_io(rw, iocb, inode, iter, offset,
+ ext4_get_block, NULL, DIO_LOCKING);
+ else
+ ret = blockdev_direct_IO(rw, iocb, inode, iter,
+ offset, ext4_get_block);
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5653fa42930b..28555f191b62 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -657,6 +657,18 @@ has_zeroout:
return retval;
}
+static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate)
+{
+ struct inode *inode = bh->b_assoc_map->host;
+ /* XXX: breaks on 32-bit > 16GB. Is that even supported? */
+ loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits;
+ int err;
+ if (!uptodate)
+ return;
+ WARN_ON(!buffer_unwritten(bh));
+ err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size);
+}
+
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
@@ -694,6 +706,11 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+ if (IS_DAX(inode) && buffer_unwritten(bh) && !io_end) {
+ bh->b_assoc_map = inode->i_mapping;
+ bh->b_private = (void *)(unsigned long)iblock;
+ bh->b_end_io = ext4_end_io_unwritten;
+ }
if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
set_buffer_defer_completion(bh);
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
@@ -3010,13 +3027,14 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iter,
- offset,
- get_block_func,
- ext4_end_io_dio,
- NULL,
- dio_flags);
+ if (IS_DAX(inode))
+ ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func,
+ ext4_end_io_dio, dio_flags);
+ else
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev, iter, offset,
+ get_block_func,
+ ext4_end_io_dio, NULL, dio_flags);
/*
* Put our reference to io_end. This can free the io_end structure e.g.
@@ -3180,19 +3198,12 @@ void ext4_set_aops(struct inode *inode)
inode->i_mapping->a_ops = &ext4_aops;
}
-/*
- * ext4_block_zero_page_range() zeros out a mapping of length 'length'
- * starting from file offset 'from'. The range to be zero'd must
- * be contained with in one block. If the specified range exceeds
- * the end of the block it will be shortened to end of the block
- * that cooresponds to 'from'
- */
-static int ext4_block_zero_page_range(handle_t *handle,
+static int __ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
- unsigned blocksize, max, pos;
+ unsigned blocksize, pos;
ext4_lblk_t iblock;
struct inode *inode = mapping->host;
struct buffer_head *bh;
@@ -3205,14 +3216,6 @@ static int ext4_block_zero_page_range(handle_t *handle,
return -ENOMEM;
blocksize = inode->i_sb->s_blocksize;
- max = blocksize - (offset & (blocksize - 1));
-
- /*
- * correct length if it does not fall between
- * 'from' and the end of the block
- */
- if (length > max || length < 0)
- length = max;
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
@@ -3278,6 +3281,33 @@ unlock:
}
/*
+ * ext4_block_zero_page_range() zeros out a mapping of length 'length'
+ * starting from file offset 'from'. The range to be zero'd must
+ * be contained with in one block. If the specified range exceeds
+ * the end of the block it will be shortened to end of the block
+ * that cooresponds to 'from'
+ */
+static int ext4_block_zero_page_range(handle_t *handle,
+ struct address_space *mapping, loff_t from, loff_t length)
+{
+ struct inode *inode = mapping->host;
+ unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ unsigned blocksize = inode->i_sb->s_blocksize;
+ unsigned max = blocksize - (offset & (blocksize - 1));
+
+ /*
+ * correct length if it does not fall between
+ * 'from' and the end of the block
+ */
+ if (length > max || length < 0)
+ length = max;
+
+ if (IS_DAX(inode))
+ return dax_zero_page_range(inode, from, length, ext4_get_block);
+ return __ext4_block_zero_page_range(handle, mapping, from, length);
+}
+
+/*
* ext4_block_truncate_page() zeroes out a mapping from file offset `from'
* up to the end of the block which corresponds to `from'.
* This required during truncate. We need to physically zero the tail end
@@ -3798,8 +3828,10 @@ void ext4_set_inode_flags(struct inode *inode)
new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
+ if (test_opt(inode->i_sb, DAX))
+ new_fl |= S_DAX;
inode_set_flags(inode, new_fl,
- S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
}
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
@@ -4052,7 +4084,10 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext4_file_inode_operations;
- inode->i_fop = &ext4_file_operations;
+ if (test_opt(inode->i_sb, DAX))
+ inode->i_fop = &ext4_dax_file_operations;
+ else
+ inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ext4_dir_inode_operations;
@@ -4534,7 +4569,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
* Truncate pagecache after we've waited for commit
* in data=journal mode to make pages freeable.
*/
- truncate_pagecache(inode, inode->i_size);
+ truncate_pagecache(inode, inode->i_size);
}
/*
* We want to call ext4_truncate() even if attr->ia_size ==
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2291923dae4e..28fe71a2904c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2235,7 +2235,10 @@ retry:
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext4_file_inode_operations;
- inode->i_fop = &ext4_file_operations;
+ if (test_opt(inode->i_sb, DAX))
+ inode->i_fop = &ext4_dax_file_operations;
+ else
+ inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
err = ext4_add_nondir(handle, dentry, inode);
if (!err && IS_DIRSYNC(dir))
@@ -2299,7 +2302,10 @@ retry:
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext4_file_inode_operations;
- inode->i_fop = &ext4_file_operations;
+ if (test_opt(inode->i_sb, DAX))
+ inode->i_fop = &ext4_dax_file_operations;
+ else
+ inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
d_tmpfile(dentry, inode);
err = ext4_orphan_add(handle, inode);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 74c5f53595fb..10e8c6b7ca08 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -334,7 +334,7 @@ static void save_error_info(struct super_block *sb, const char *func,
static int block_device_ejected(struct super_block *sb)
{
struct inode *bd_inode = sb->s_bdev->bd_inode;
- struct backing_dev_info *bdi = bd_inode->i_mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
return bdi->dev == NULL;
}
@@ -1046,10 +1046,7 @@ static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
struct path *path);
-static int ext4_quota_on_sysfile(struct super_block *sb, int type,
- int format_id);
static int ext4_quota_off(struct super_block *sb, int type);
-static int ext4_quota_off_sysfile(struct super_block *sb, int type);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
@@ -1084,16 +1081,6 @@ static const struct quotactl_ops ext4_qctl_operations = {
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk
};
-
-static const struct quotactl_ops ext4_qctl_sysfile_operations = {
- .quota_on_meta = ext4_quota_on_sysfile,
- .quota_off = ext4_quota_off_sysfile,
- .quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
- .set_info = dquot_set_dqinfo,
- .get_dqblk = dquot_get_dqblk,
- .set_dqblk = dquot_set_dqblk
-};
#endif
static const struct super_operations ext4_sops = {
@@ -1137,7 +1124,7 @@ enum {
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
- Opt_usrquota, Opt_grpquota, Opt_i_version,
+ Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax,
Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
@@ -1200,6 +1187,7 @@ static const match_table_t tokens = {
{Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
{Opt_i_version, "i_version"},
+ {Opt_dax, "dax"},
{Opt_stripe, "stripe=%u"},
{Opt_delalloc, "delalloc"},
{Opt_nodelalloc, "nodelalloc"},
@@ -1384,6 +1372,7 @@ static const struct mount_opts {
{Opt_min_batch_time, 0, MOPT_GTE0},
{Opt_inode_readahead_blks, 0, MOPT_GTE0},
{Opt_init_itable, 0, MOPT_GTE0},
+ {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
{Opt_stripe, 0, MOPT_GTE0},
{Opt_resuid, 0, MOPT_GTE0},
{Opt_resgid, 0, MOPT_GTE0},
@@ -1620,6 +1609,11 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
}
sbi->s_jquota_fmt = m->mount_opt;
#endif
+#ifndef CONFIG_FS_DAX
+ } else if (token == Opt_dax) {
+ ext4_msg(sb, KERN_INFO, "dax option not supported");
+ return -1;
+#endif
} else {
if (!args->from)
arg = 1;
@@ -3602,6 +3596,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"both data=journal and dioread_nolock");
goto failed_mount;
}
+ if (test_opt(sb, DAX)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and dax");
+ goto failed_mount;
+ }
if (test_opt(sb, DELALLOC))
clear_opt(sb, DELALLOC);
}
@@ -3665,6 +3664,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
+ if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
+ if (blocksize != PAGE_SIZE) {
+ ext4_msg(sb, KERN_ERR,
+ "error: unsupported blocksize for dax");
+ goto failed_mount;
+ }
+ if (!sb->s_bdev->bd_disk->fops->direct_access) {
+ ext4_msg(sb, KERN_ERR,
+ "error: device does not support dax");
+ goto failed_mount;
+ }
+ }
+
if (sb->s_blocksize != blocksize) {
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
@@ -3935,7 +3947,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_QUOTA
sb->dq_op = &ext4_quota_operations;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
- sb->s_qcop = &ext4_qctl_sysfile_operations;
+ sb->s_qcop = &dquot_quotactl_sysfile_ops;
else
sb->s_qcop = &ext4_qctl_operations;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
@@ -4882,6 +4894,18 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
err = -EINVAL;
goto restore_opts;
}
+ if (test_opt(sb, DAX)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and dax");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ }
+
+ if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
+ ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
+ "dax flag with busy inodes while remounting");
+ sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
}
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
@@ -5288,21 +5312,6 @@ static int ext4_enable_quotas(struct super_block *sb)
return 0;
}
-/*
- * quota_on function that is used when QUOTA feature is set.
- */
-static int ext4_quota_on_sysfile(struct super_block *sb, int type,
- int format_id)
-{
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
- return -EINVAL;
-
- /*
- * USAGE was enabled at mount time. Only need to enable LIMITS now.
- */
- return ext4_quota_enable(sb, type, format_id, DQUOT_LIMITS_ENABLED);
-}
-
static int ext4_quota_off(struct super_block *sb, int type)
{
struct inode *inode = sb_dqopt(sb)->files[type];
@@ -5329,18 +5338,6 @@ out:
return dquot_quota_off(sb, type);
}
-/*
- * quota_off function that is used when QUOTA feature is set.
- */
-static int ext4_quota_off_sysfile(struct super_block *sb, int type)
-{
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
- return -EINVAL;
-
- /* Disable only the limits. */
- return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
-}
-
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
* itself serializes the operations (and no one else should touch the files)
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 736a348509f7..94e2d2ffabe1 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -71,3 +71,13 @@ config F2FS_CHECK_FS
Enables BUG_ONs which check the filesystem consistency in runtime.
If you want to improve the performance, say N.
+
+config F2FS_IO_TRACE
+ bool "F2FS IO tracer"
+ depends on F2FS_FS
+ depends on FUNCTION_TRACER
+ help
+ F2FS IO trace is based on a function trace, which gathers process
+ information and block IO patterns in the filesystem level.
+
+ If unsure, say N.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 2e35da12d292..d92397731db8 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -5,3 +5,4 @@ f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o
f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
+f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 1ccb26bc2a0b..742202779bd5 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -62,7 +62,7 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
if (count == 0)
return NULL;
- acl = posix_acl_alloc(count, GFP_KERNEL);
+ acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
@@ -116,7 +116,7 @@ static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
int i;
f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
- sizeof(struct f2fs_acl_entry), GFP_KERNEL);
+ sizeof(struct f2fs_acl_entry), GFP_NOFS);
if (!f2fs_acl)
return ERR_PTR(-ENOMEM);
@@ -396,7 +396,7 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
posix_acl_release(default_acl);
}
if (acl) {
- if (error)
+ if (!error)
error = __f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl,
ipage);
posix_acl_release(acl);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index e6c271fefaca..7f794b72b3b7 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -20,10 +20,11 @@
#include "f2fs.h"
#include "node.h"
#include "segment.h"
+#include "trace.h"
#include <trace/events/f2fs.h>
static struct kmem_cache *ino_entry_slab;
-static struct kmem_cache *inode_entry_slab;
+struct kmem_cache *inode_entry_slab;
/*
* We guarantee no failure on the returned page.
@@ -50,6 +51,11 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
struct address_space *mapping = META_MAPPING(sbi);
struct page *page;
+ struct f2fs_io_info fio = {
+ .type = META,
+ .rw = READ_SYNC | REQ_META | REQ_PRIO,
+ .blk_addr = index,
+ };
repeat:
page = grab_cache_page(mapping, index);
if (!page) {
@@ -59,8 +65,7 @@ repeat:
if (PageUptodate(page))
goto out;
- if (f2fs_submit_page_bio(sbi, page, index,
- READ_SYNC | REQ_META | REQ_PRIO))
+ if (f2fs_submit_page_bio(sbi, page, &fio))
goto repeat;
lock_page(page);
@@ -112,14 +117,12 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
block_t prev_blk_addr = 0;
struct page *page;
block_t blkno = start;
-
struct f2fs_io_info fio = {
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO
};
for (; nrpages-- > 0; blkno++) {
- block_t blk_addr;
if (!is_valid_blkaddr(sbi, blkno, type))
goto out;
@@ -130,27 +133,27 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
blkno = 0;
/* get nat block addr */
- blk_addr = current_nat_addr(sbi,
+ fio.blk_addr = current_nat_addr(sbi,
blkno * NAT_ENTRY_PER_BLOCK);
break;
case META_SIT:
/* get sit block addr */
- blk_addr = current_sit_addr(sbi,
+ fio.blk_addr = current_sit_addr(sbi,
blkno * SIT_ENTRY_PER_BLOCK);
- if (blkno != start && prev_blk_addr + 1 != blk_addr)
+ if (blkno != start && prev_blk_addr + 1 != fio.blk_addr)
goto out;
- prev_blk_addr = blk_addr;
+ prev_blk_addr = fio.blk_addr;
break;
case META_SSA:
case META_CP:
case META_POR:
- blk_addr = blkno;
+ fio.blk_addr = blkno;
break;
default:
BUG();
}
- page = grab_cache_page(META_MAPPING(sbi), blk_addr);
+ page = grab_cache_page(META_MAPPING(sbi), fio.blk_addr);
if (!page)
continue;
if (PageUptodate(page)) {
@@ -158,7 +161,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
continue;
}
- f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
+ f2fs_submit_page_mbio(sbi, page, &fio);
f2fs_put_page(page, 0);
}
out:
@@ -187,7 +190,7 @@ static int f2fs_write_meta_page(struct page *page,
trace_f2fs_writepage(page, META);
- if (unlikely(sbi->por_doing))
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
goto redirty_out;
@@ -299,6 +302,8 @@ static int f2fs_set_meta_page_dirty(struct page *page)
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
+ SetPagePrivate(page);
+ f2fs_trace_pid(page);
return 1;
}
return 0;
@@ -308,6 +313,8 @@ const struct address_space_operations f2fs_meta_aops = {
.writepage = f2fs_write_meta_page,
.writepages = f2fs_write_meta_pages,
.set_page_dirty = f2fs_set_meta_page_dirty,
+ .invalidatepage = f2fs_invalidate_page,
+ .releasepage = f2fs_release_page,
};
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -462,7 +469,7 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
return;
- sbi->por_doing = true;
+ set_sbi_flag(sbi, SBI_POR_DOING);
start_blk = __start_cp_addr(sbi) + 1 +
le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
@@ -483,7 +490,7 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
}
/* clear Orphan Flag */
clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
- sbi->por_doing = false;
+ clear_sbi_flag(sbi, SBI_POR_DOING);
return;
}
@@ -567,7 +574,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
if (crc_offset >= blk_size)
goto invalid_cp1;
- crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
+ crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
if (!f2fs_crc_valid(crc, cp_block, crc_offset))
goto invalid_cp1;
@@ -582,7 +589,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
if (crc_offset >= blk_size)
goto invalid_cp2;
- crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
+ crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
if (!f2fs_crc_valid(crc, cp_block, crc_offset))
goto invalid_cp2;
@@ -669,7 +676,7 @@ fail_no_cp:
return -EINVAL;
}
-static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
+static int __add_dirty_inode(struct inode *inode, struct inode_entry *new)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -686,7 +693,7 @@ static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
void update_dirty_page(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dir_inode_entry *new;
+ struct inode_entry *new;
int ret = 0;
if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
@@ -710,12 +717,13 @@ void update_dirty_page(struct inode *inode, struct page *page)
kmem_cache_free(inode_entry_slab, new);
out:
SetPagePrivate(page);
+ f2fs_trace_pid(page);
}
void add_dirty_dir_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dir_inode_entry *new =
+ struct inode_entry *new =
f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
int ret = 0;
@@ -733,7 +741,7 @@ void add_dirty_dir_inode(struct inode *inode)
void remove_dirty_dir_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dir_inode_entry *entry;
+ struct inode_entry *entry;
if (!S_ISDIR(inode->i_mode))
return;
@@ -763,7 +771,7 @@ void remove_dirty_dir_inode(struct inode *inode)
void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
{
struct list_head *head;
- struct dir_inode_entry *entry;
+ struct inode_entry *entry;
struct inode *inode;
retry:
if (unlikely(f2fs_cp_error(sbi)))
@@ -776,7 +784,7 @@ retry:
spin_unlock(&sbi->dir_inode_lock);
return;
}
- entry = list_entry(head->next, struct dir_inode_entry, list);
+ entry = list_entry(head->next, struct inode_entry, list);
inode = igrab(entry->inode);
spin_unlock(&sbi->dir_inode_lock);
if (inode) {
@@ -922,7 +930,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
ckpt->next_free_nid = cpu_to_le32(last_nid);
/* 2 cp + n data seg summary + orphan inode blocks */
- data_sum_blocks = npages_for_summary_flush(sbi);
+ data_sum_blocks = npages_for_summary_flush(sbi, false);
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
@@ -932,24 +940,31 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
orphan_blocks);
- if (cpc->reason == CP_UMOUNT) {
- set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ if (__remain_node_summaries(cpc->reason))
ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
cp_payload_blks + data_sum_blocks +
orphan_blocks + NR_CURSEG_NODE_TYPE);
- } else {
- clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ else
ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
cp_payload_blks + data_sum_blocks +
orphan_blocks);
- }
+
+ if (cpc->reason == CP_UMOUNT)
+ set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ else
+ clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+
+ if (cpc->reason == CP_FASTBOOT)
+ set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+ else
+ clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
if (orphan_num)
set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
else
clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
- if (sbi->need_fsck)
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
set_ckpt_flags(ckpt, CP_FSCK_FLAG);
/* update SIT/NAT bitmap */
@@ -966,15 +981,14 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* write out checkpoint buffer at block 0 */
cp_page = grab_meta_page(sbi, start_blk++);
kaddr = page_address(cp_page);
- memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
+ memcpy(kaddr, ckpt, F2FS_BLKSIZE);
set_page_dirty(cp_page);
f2fs_put_page(cp_page, 1);
for (i = 1; i < 1 + cp_payload_blks; i++) {
cp_page = grab_meta_page(sbi, start_blk++);
kaddr = page_address(cp_page);
- memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE,
- (1 << sbi->log_blocksize));
+ memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE, F2FS_BLKSIZE);
set_page_dirty(cp_page);
f2fs_put_page(cp_page, 1);
}
@@ -986,7 +1000,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
write_data_summaries(sbi, start_blk);
start_blk += data_sum_blocks;
- if (cpc->reason == CP_UMOUNT) {
+ if (__remain_node_summaries(cpc->reason)) {
write_node_summaries(sbi, start_blk);
start_blk += NR_CURSEG_NODE_TYPE;
}
@@ -994,7 +1008,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* writeout checkpoint block */
cp_page = grab_meta_page(sbi, start_blk);
kaddr = page_address(cp_page);
- memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
+ memcpy(kaddr, ckpt, F2FS_BLKSIZE);
set_page_dirty(cp_page);
f2fs_put_page(cp_page, 1);
@@ -1023,7 +1037,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return;
clear_prefree_segments(sbi);
- F2FS_RESET_SB_DIRT(sbi);
+ clear_sbi_flag(sbi, SBI_IS_DIRTY);
}
/*
@@ -1038,10 +1052,13 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
mutex_lock(&sbi->cp_mutex);
- if (!sbi->s_dirty && cpc->reason != CP_DISCARD)
+ if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
+ cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT)
goto out;
if (unlikely(f2fs_cp_error(sbi)))
goto out;
+ if (f2fs_readonly(sbi->sb))
+ goto out;
if (block_operations(sbi))
goto out;
@@ -1102,8 +1119,8 @@ int __init create_checkpoint_caches(void)
sizeof(struct ino_entry));
if (!ino_entry_slab)
return -ENOMEM;
- inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
- sizeof(struct dir_inode_entry));
+ inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
+ sizeof(struct inode_entry));
if (!inode_entry_slab) {
kmem_cache_destroy(ino_entry_slab);
return -ENOMEM;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7ec697b37f19..985ed023a750 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -22,6 +22,7 @@
#include "f2fs.h"
#include "node.h"
#include "segment.h"
+#include "trace.h"
#include <trace/events/f2fs.h>
static void f2fs_read_end_io(struct bio *bio, int err)
@@ -95,11 +96,9 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
return;
if (is_read_io(fio->rw))
- trace_f2fs_submit_read_bio(io->sbi->sb, fio->rw,
- fio->type, io->bio);
+ trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
else
- trace_f2fs_submit_write_bio(io->sbi->sb, fio->rw,
- fio->type, io->bio);
+ trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
submit_bio(fio->rw, io->bio);
io->bio = NULL;
@@ -132,14 +131,15 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
* Return unlocked page.
*/
int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
- block_t blk_addr, int rw)
+ struct f2fs_io_info *fio)
{
struct bio *bio;
- trace_f2fs_submit_page_bio(page, blk_addr, rw);
+ trace_f2fs_submit_page_bio(page, fio);
+ f2fs_trace_ios(page, fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
+ bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw));
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
bio_put(bio);
@@ -147,12 +147,12 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
return -EFAULT;
}
- submit_bio(rw, bio);
+ submit_bio(fio->rw, bio);
return 0;
}
void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
- block_t blk_addr, struct f2fs_io_info *fio)
+ struct f2fs_io_info *fio)
{
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io;
@@ -160,21 +160,21 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
- verify_block_addr(sbi, blk_addr);
+ verify_block_addr(sbi, fio->blk_addr);
down_write(&io->io_rwsem);
if (!is_read)
inc_page_count(sbi, F2FS_WRITEBACK);
- if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
+ if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
io->fio.rw != fio->rw))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
int bio_blocks = MAX_BIO_BLOCKS(sbi);
- io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
+ io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
io->fio = *fio;
}
@@ -184,10 +184,11 @@ alloc_new:
goto alloc_new;
}
- io->last_block_in_bio = blk_addr;
+ io->last_block_in_bio = fio->blk_addr;
+ f2fs_trace_ios(page, fio, 0);
up_write(&io->io_rwsem);
- trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
+ trace_f2fs_submit_page_mbio(page, fio);
}
/*
@@ -196,7 +197,7 @@ alloc_new:
* ->node_page
* update block addresses in the node page
*/
-static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
+static void __set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn;
__le32 *addr_array;
@@ -209,7 +210,7 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
- addr_array[ofs_in_node] = cpu_to_le32(new_addr);
+ addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
set_page_dirty(node_page);
}
@@ -224,8 +225,8 @@ int reserve_new_block(struct dnode_of_data *dn)
trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
- __set_data_blkaddr(dn, NEW_ADDR);
dn->data_blkaddr = NEW_ADDR;
+ __set_data_blkaddr(dn);
mark_inode_dirty(dn->inode);
sync_inode_page(dn);
return 0;
@@ -273,7 +274,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
unsigned int blkbits = inode->i_sb->s_blocksize_bits;
size_t count;
- clear_buffer_new(bh_result);
+ set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb,
start_blkaddr + pgofs - start_fofs);
count = end_fofs - pgofs + 1;
@@ -290,23 +291,24 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
return 0;
}
-void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
+void update_extent_cache(struct dnode_of_data *dn)
{
struct f2fs_inode_info *fi = F2FS_I(dn->inode);
pgoff_t fofs, start_fofs, end_fofs;
block_t start_blkaddr, end_blkaddr;
int need_update = true;
- f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR);
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
- dn->ofs_in_node;
+ f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
/* Update the page address in the parent node */
- __set_data_blkaddr(dn, blk_addr);
+ __set_data_blkaddr(dn);
if (is_inode_flag_set(fi, FI_NO_EXTENT))
return;
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+ dn->ofs_in_node;
+
write_lock(&fi->ext.ext_lock);
start_fofs = fi->ext.fofs;
@@ -320,16 +322,16 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
/* Initial extent */
if (fi->ext.len == 0) {
- if (blk_addr != NULL_ADDR) {
+ if (dn->data_blkaddr != NULL_ADDR) {
fi->ext.fofs = fofs;
- fi->ext.blk_addr = blk_addr;
+ fi->ext.blk_addr = dn->data_blkaddr;
fi->ext.len = 1;
}
goto end_update;
}
/* Front merge */
- if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
+ if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) {
fi->ext.fofs--;
fi->ext.blk_addr--;
fi->ext.len++;
@@ -337,7 +339,7 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
}
/* Back merge */
- if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
+ if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) {
fi->ext.len++;
goto end_update;
}
@@ -376,6 +378,10 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
struct dnode_of_data dn;
struct page *page;
int err;
+ struct f2fs_io_info fio = {
+ .type = DATA,
+ .rw = sync ? READ_SYNC : READA,
+ };
page = find_get_page(mapping, index);
if (page && PageUptodate(page))
@@ -404,8 +410,8 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
return page;
}
- err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, dn.data_blkaddr,
- sync ? READ_SYNC : READA);
+ fio.blk_addr = dn.data_blkaddr;
+ err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
if (err)
return ERR_PTR(err);
@@ -430,7 +436,10 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
struct dnode_of_data dn;
struct page *page;
int err;
-
+ struct f2fs_io_info fio = {
+ .type = DATA,
+ .rw = READ_SYNC,
+ };
repeat:
page = grab_cache_page(mapping, index);
if (!page)
@@ -464,8 +473,8 @@ repeat:
return page;
}
- err = f2fs_submit_page_bio(F2FS_I_SB(inode), page,
- dn.data_blkaddr, READ_SYNC);
+ fio.blk_addr = dn.data_blkaddr;
+ err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
if (err)
return ERR_PTR(err);
@@ -515,8 +524,12 @@ repeat:
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
} else {
- err = f2fs_submit_page_bio(F2FS_I_SB(inode), page,
- dn.data_blkaddr, READ_SYNC);
+ struct f2fs_io_info fio = {
+ .type = DATA,
+ .rw = READ_SYNC,
+ .blk_addr = dn.data_blkaddr,
+ };
+ err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
if (err)
goto put_err;
@@ -550,30 +563,25 @@ static int __allocate_data_block(struct dnode_of_data *dn)
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_inode_info *fi = F2FS_I(dn->inode);
struct f2fs_summary sum;
- block_t new_blkaddr;
struct node_info ni;
+ int seg = CURSEG_WARM_DATA;
pgoff_t fofs;
- int type;
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
return -ENOSPC;
- __set_data_blkaddr(dn, NEW_ADDR);
- dn->data_blkaddr = NEW_ADDR;
-
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
- type = CURSEG_WARM_DATA;
+ if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
+ seg = CURSEG_DIRECT_IO;
- allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
+ allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
/* direct IO doesn't use extent cache to maximize the performance */
- set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
- update_extent_cache(new_blkaddr, dn);
- clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
+ __set_data_blkaddr(dn);
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
@@ -581,10 +589,59 @@ static int __allocate_data_block(struct dnode_of_data *dn)
if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
- dn->data_blkaddr = new_blkaddr;
return 0;
}
+static void __allocate_data_blocks(struct inode *inode, loff_t offset,
+ size_t count)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct dnode_of_data dn;
+ u64 start = F2FS_BYTES_TO_BLK(offset);
+ u64 len = F2FS_BYTES_TO_BLK(count);
+ bool allocated;
+ u64 end_offset;
+
+ while (len) {
+ f2fs_balance_fs(sbi);
+ f2fs_lock_op(sbi);
+
+ /* When reading holes, we need its node page */
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ if (get_dnode_of_data(&dn, start, ALLOC_NODE))
+ goto out;
+
+ allocated = false;
+ end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+
+ while (dn.ofs_in_node < end_offset && len) {
+ if (dn.data_blkaddr == NULL_ADDR) {
+ if (__allocate_data_block(&dn))
+ goto sync_out;
+ allocated = true;
+ }
+ len--;
+ start++;
+ dn.ofs_in_node++;
+ }
+
+ if (allocated)
+ sync_inode_page(&dn);
+
+ f2fs_put_dnode(&dn);
+ f2fs_unlock_op(sbi);
+ }
+ return;
+
+sync_out:
+ if (allocated)
+ sync_inode_page(&dn);
+ f2fs_put_dnode(&dn);
+out:
+ f2fs_unlock_op(sbi);
+ return;
+}
+
/*
* get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
* If original data blocks are allocated, then give them to blockdev.
@@ -610,10 +667,8 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
if (check_extent_cache(inode, pgofs, bh_result))
goto out;
- if (create) {
- f2fs_balance_fs(F2FS_I_SB(inode));
+ if (create)
f2fs_lock_op(F2FS_I_SB(inode));
- }
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -627,12 +682,14 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
goto put_out;
if (dn.data_blkaddr != NULL_ADDR) {
+ set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
} else if (create) {
err = __allocate_data_block(&dn);
if (err)
goto put_out;
allocated = true;
+ set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
} else {
goto put_out;
@@ -745,7 +802,6 @@ static int f2fs_read_data_pages(struct file *file,
int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
{
struct inode *inode = page->mapping->host;
- block_t old_blkaddr, new_blkaddr;
struct dnode_of_data dn;
int err = 0;
@@ -754,10 +810,10 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
if (err)
return err;
- old_blkaddr = dn.data_blkaddr;
+ fio->blk_addr = dn.data_blkaddr;
/* This page is already truncated */
- if (old_blkaddr == NULL_ADDR)
+ if (fio->blk_addr == NULL_ADDR)
goto out_writepage;
set_page_writeback(page);
@@ -766,14 +822,14 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (unlikely(old_blkaddr != NEW_ADDR &&
+ if (unlikely(fio->blk_addr != NEW_ADDR &&
!is_cold_data(page) &&
need_inplace_update(inode))) {
- rewrite_data_page(page, old_blkaddr, fio);
+ rewrite_data_page(page, fio);
set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
} else {
- write_data_page(page, &dn, &new_blkaddr, fio);
- update_extent_cache(new_blkaddr, &dn);
+ write_data_page(page, &dn, fio);
+ update_extent_cache(&dn);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
}
out_writepage:
@@ -812,7 +868,12 @@ static int f2fs_write_data_page(struct page *page,
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
write:
- if (unlikely(sbi->por_doing))
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto redirty_out;
+ if (f2fs_is_drop_cache(inode))
+ goto out;
+ if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
+ available_free_memory(sbi, BASE_CHECK))
goto redirty_out;
/* Dentry blocks are controlled by checkpoint */
@@ -826,7 +887,6 @@ write:
/* we should bypass data pages to proceed the kworkder jobs */
if (unlikely(f2fs_cp_error(sbi))) {
SetPageError(page);
- unlock_page(page);
goto out;
}
@@ -1002,8 +1062,12 @@ put_next:
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
} else {
- err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
- READ_SYNC);
+ struct f2fs_io_info fio = {
+ .type = DATA,
+ .rw = READ_SYNC,
+ .blk_addr = dn.data_blkaddr,
+ };
+ err = f2fs_submit_page_bio(sbi, page, &fio);
if (err)
goto fail;
@@ -1092,6 +1156,9 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+ if (rw & WRITE)
+ __allocate_data_blocks(inode, offset, count);
+
err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
if (err < 0 && (rw & WRITE))
f2fs_write_failed(mapping, offset + count);
@@ -1101,24 +1168,33 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
return err;
}
-static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
- unsigned int length)
+void f2fs_invalidate_page(struct page *page, unsigned int offset,
+ unsigned int length)
{
struct inode *inode = page->mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)
+ if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
+ (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
return;
- if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
- invalidate_inmem_page(inode, page);
-
- if (PageDirty(page))
- inode_dec_dirty_pages(inode);
+ if (PageDirty(page)) {
+ if (inode->i_ino == F2FS_META_INO(sbi))
+ dec_page_count(sbi, F2FS_DIRTY_META);
+ else if (inode->i_ino == F2FS_NODE_INO(sbi))
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ else
+ inode_dec_dirty_pages(inode);
+ }
ClearPagePrivate(page);
}
-static int f2fs_release_data_page(struct page *page, gfp_t wait)
+int f2fs_release_page(struct page *page, gfp_t wait)
{
+ /* If this is dirty page, keep PagePrivate */
+ if (PageDirty(page))
+ return 0;
+
ClearPagePrivate(page);
return 1;
}
@@ -1132,7 +1208,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
SetPageUptodate(page);
- if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) {
+ if (f2fs_is_atomic_file(inode)) {
register_inmem_page(inode, page);
return 1;
}
@@ -1168,8 +1244,8 @@ const struct address_space_operations f2fs_dblock_aops = {
.write_begin = f2fs_write_begin,
.write_end = f2fs_write_end,
.set_page_dirty = f2fs_set_data_page_dirty,
- .invalidatepage = f2fs_invalidate_data_page,
- .releasepage = f2fs_release_data_page,
+ .invalidatepage = f2fs_invalidate_page,
+ .releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
};
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 91e8f699ab30..e671373cc8ab 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -40,6 +40,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_dirs = sbi->n_dirty_dirs;
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
+ si->wb_pages = get_pages(sbi, F2FS_WRITEBACK);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
@@ -57,7 +58,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->node_pages = NODE_MAPPING(sbi)->nrpages;
si->meta_pages = META_MAPPING(sbi)->nrpages;
si->nats = NM_I(sbi)->nat_cnt;
- si->sits = SIT_I(sbi)->dirty_sentries;
+ si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
+ si->sits = MAIN_SEGS(sbi);
+ si->dirty_sits = SIT_I(sbi)->dirty_sentries;
si->fnids = NM_I(sbi)->fcnt;
si->bg_gc = sbi->bg_gc;
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
@@ -79,6 +82,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->segment_count[i] = sbi->segment_count[i];
si->block_count[i] = sbi->block_count[i];
}
+
+ si->inplace_count = atomic_read(&sbi->inplace_count);
}
/*
@@ -137,6 +142,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
@@ -159,20 +165,32 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
+get_cache:
+ si->cache_mem = 0;
+
/* build gc */
- si->base_mem += sizeof(struct f2fs_gc_kthread);
+ if (sbi->gc_thread)
+ si->cache_mem += sizeof(struct f2fs_gc_kthread);
+
+ /* build merge flush thread */
+ if (SM_I(sbi)->cmd_control_info)
+ si->cache_mem += sizeof(struct flush_cmd_control);
-get_cache:
/* free nids */
- si->cache_mem = NM_I(sbi)->fcnt;
- si->cache_mem += NM_I(sbi)->nat_cnt;
- npages = NODE_MAPPING(sbi)->nrpages;
- si->cache_mem += npages << PAGE_CACHE_SHIFT;
- npages = META_MAPPING(sbi)->nrpages;
- si->cache_mem += npages << PAGE_CACHE_SHIFT;
- si->cache_mem += sbi->n_dirty_dirs * sizeof(struct dir_inode_entry);
+ si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
+ si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
+ si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
+ sizeof(struct nat_entry_set);
+ si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
+ si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry);
for (i = 0; i <= UPDATE_INO; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
+
+ si->page_mem = 0;
+ npages = NODE_MAPPING(sbi)->nrpages;
+ si->page_mem += npages << PAGE_CACHE_SHIFT;
+ npages = META_MAPPING(sbi)->nrpages;
+ si->page_mem += npages << PAGE_CACHE_SHIFT;
}
static int stat_show(struct seq_file *s, void *v)
@@ -250,16 +268,16 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
si->hit_ext, si->total_ext);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - inmem: %4d\n",
- si->inmem_pages);
+ seq_printf(s, " - inmem: %4d, wb: %4d\n",
+ si->inmem_pages, si->wb_pages);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
seq_printf(s, " - dents: %4d in dirs:%4d\n",
si->ndirty_dent, si->ndirty_dirs);
seq_printf(s, " - meta: %4d in %4d\n",
si->ndirty_meta, si->meta_pages);
- seq_printf(s, " - NATs: %9d\n - SITs: %9d\n",
- si->nats, si->sits);
+ seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
+ si->dirty_nats, si->nats, si->dirty_sits, si->sits);
seq_printf(s, " - free_nids: %9d\n",
si->fnids);
seq_puts(s, "\nDistribution of User Blocks:");
@@ -277,6 +295,7 @@ static int stat_show(struct seq_file *s, void *v)
for (j = 0; j < si->util_free; j++)
seq_putc(s, '-');
seq_puts(s, "]\n\n");
+ seq_printf(s, "IPU: %u blocks\n", si->inplace_count);
seq_printf(s, "SSR: %u blocks in %u segments\n",
si->block_count[SSR], si->segment_count[SSR]);
seq_printf(s, "LFS: %u blocks in %u segments\n",
@@ -289,9 +308,14 @@ static int stat_show(struct seq_file *s, void *v)
/* memory footprint */
update_mem_info(si->sbi);
- seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n",
- (si->base_mem + si->cache_mem) >> 10,
- si->base_mem >> 10, si->cache_mem >> 10);
+ seq_printf(s, "\nMemory: %u KB\n",
+ (si->base_mem + si->cache_mem + si->page_mem) >> 10);
+ seq_printf(s, " - static: %u KB\n",
+ si->base_mem >> 10);
+ seq_printf(s, " - cached: %u KB\n",
+ si->cache_mem >> 10);
+ seq_printf(s, " - paged : %u KB\n",
+ si->page_mem >> 10);
}
mutex_unlock(&f2fs_stat_mutex);
return 0;
@@ -331,6 +355,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->inline_inode, 0);
atomic_set(&sbi->inline_dir, 0);
+ atomic_set(&sbi->inplace_count, 0);
mutex_lock(&f2fs_stat_mutex);
list_add_tail(&si->stat_list, &f2fs_stat_list);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index b1a7d5737cd0..b74097a7f6d9 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -286,8 +286,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
f2fs_wait_on_page_writeback(page, type);
de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode);
- if (!f2fs_has_inline_dentry(dir))
- kunmap(page);
+ f2fs_dentry_kunmap(dir, page);
set_page_dirty(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index ec58bb2373fc..7fa3313ab0e2 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -28,7 +28,7 @@
do { \
if (unlikely(condition)) { \
WARN_ON(1); \
- sbi->need_fsck = true; \
+ set_sbi_flag(sbi, SBI_NEED_FSCK); \
} \
} while (0)
#define f2fs_down_write(x, y) down_write(x)
@@ -100,10 +100,15 @@ enum {
enum {
CP_UMOUNT,
+ CP_FASTBOOT,
CP_SYNC,
CP_DISCARD,
};
+#define DEF_BATCHED_TRIM_SECTIONS 32
+#define BATCHED_TRIM_SEGMENTS(sbi) \
+ (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
+
struct cp_control {
int reason;
__u64 trim_start;
@@ -136,8 +141,14 @@ struct ino_entry {
nid_t ino; /* inode number */
};
-/* for the list of directory inodes */
-struct dir_inode_entry {
+/*
+ * for the list of directory inodes or gc inodes.
+ * NOTE: there are two slab users for this structure, if we add/modify/delete
+ * fields in structure for one of slab users, it may affect fields or size of
+ * other one, in this condition, it's better to split both of slab and related
+ * data structure.
+ */
+struct inode_entry {
struct list_head list; /* list head */
struct inode *inode; /* vfs inode pointer */
};
@@ -196,11 +207,14 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
*/
#define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
#define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
+#define F2FS_IOC_GETVERSION FS_IOC_GETVERSION
#define F2FS_IOCTL_MAGIC 0xf5
#define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1)
#define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2)
#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3)
+#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
+#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
@@ -295,7 +309,7 @@ struct f2fs_inode_info {
nid_t i_xattr_nid; /* node id that contains xattrs */
unsigned long long xattr_ver; /* cp version of xattr modification */
struct extent_info ext; /* in-memory extent cache entry */
- struct dir_inode_entry *dirty_dir; /* the pointer of dirty dir */
+ struct inode_entry *dirty_dir; /* the pointer of dirty dir */
struct radix_tree_root inmem_root; /* radix tree for inmem pages */
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
@@ -398,7 +412,8 @@ enum {
CURSEG_HOT_NODE, /* direct node blocks of directory files */
CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */
- NO_CHECK_TYPE
+ NO_CHECK_TYPE,
+ CURSEG_DIRECT_IO, /* to use for the direct IO path */
};
struct flush_cmd {
@@ -437,6 +452,9 @@ struct f2fs_sm_info {
int nr_discards; /* # of discards in the list */
int max_discards; /* max. discards to be issued */
+ /* for batched trimming */
+ unsigned int trim_sections; /* # of sections to trim */
+
struct list_head sit_entry_set; /* sit entry set list */
unsigned int ipu_policy; /* in-place-update policy */
@@ -489,6 +507,7 @@ enum page_type {
struct f2fs_io_info {
enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
+ block_t blk_addr; /* block address to be written */
};
#define is_read_io(rw) (((rw) & 1) == READ)
@@ -508,13 +527,20 @@ struct inode_management {
unsigned long ino_num; /* number of entries */
};
+/* For s_flag in struct f2fs_sb_info */
+enum {
+ SBI_IS_DIRTY, /* dirty flag for checkpoint */
+ SBI_IS_CLOSE, /* specify unmounting */
+ SBI_NEED_FSCK, /* need fsck.f2fs to fix */
+ SBI_POR_DOING, /* recovery is doing or not */
+};
+
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
struct buffer_head *raw_super_buf; /* buffer head of raw sb */
struct f2fs_super_block *raw_super; /* raw super block pointer */
- int s_dirty; /* dirty flag for checkpoint */
- bool need_fsck; /* need fsck.f2fs to fix */
+ int s_flag; /* flags for sbi */
/* for node-related operations */
struct f2fs_nm_info *nm_info; /* node manager */
@@ -534,7 +560,6 @@ struct f2fs_sb_info {
struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
struct mutex writepages; /* mutex for writepages() */
- bool por_doing; /* recovery is doing or not */
wait_queue_head_t cp_wait;
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
@@ -589,6 +614,7 @@ struct f2fs_sb_info {
struct f2fs_stat_info *stat_info; /* FS status information */
unsigned int segment_count[2]; /* # of allocated segments */
unsigned int block_count[2]; /* # of allocated blocks */
+ atomic_t inplace_count; /* # of inplace update */
int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
@@ -686,14 +712,19 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
return sbi->node_inode->i_mapping;
}
-static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
+static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_dirty = 1;
+ return sbi->s_flag & (0x01 << type);
}
-static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
+static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_dirty = 0;
+ sbi->s_flag |= (0x01 << type);
+}
+
+static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
+{
+ sbi->s_flag &= ~(0x01 << type);
}
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
@@ -741,6 +772,28 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
up_write(&sbi->cp_rwsem);
}
+static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
+{
+ int reason = CP_SYNC;
+
+ if (test_opt(sbi, FASTBOOT))
+ reason = CP_FASTBOOT;
+ if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
+ reason = CP_UMOUNT;
+ return reason;
+}
+
+static inline bool __remain_node_summaries(int reason)
+{
+ return (reason == CP_UMOUNT || reason == CP_FASTBOOT);
+}
+
+static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
+{
+ return (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) ||
+ is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FASTBOOT_FLAG));
+}
+
/*
* Check whether the given nid is within node id range.
*/
@@ -805,7 +858,7 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
atomic_inc(&sbi->nr_pages[count_type]);
- F2FS_SET_SB_DIRT(sbi);
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
}
static inline void inode_inc_dirty_pages(struct inode *inode)
@@ -1113,6 +1166,7 @@ enum {
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
FI_VOLATILE_FILE, /* indicate volatile file */
+ FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
};
@@ -1220,6 +1274,11 @@ static inline bool f2fs_is_volatile_file(struct inode *inode)
return is_inode_flag_set(F2FS_I(inode), FI_VOLATILE_FILE);
}
+static inline bool f2fs_is_drop_cache(struct inode *inode)
+{
+ return is_inode_flag_set(F2FS_I(inode), FI_DROP_CACHE);
+}
+
static inline void *inline_data_addr(struct page *page)
{
struct f2fs_inode *ri = F2FS_INODE(page);
@@ -1389,7 +1448,6 @@ void destroy_node_manager_caches(void);
* segment.c
*/
void register_inmem_page(struct inode *, struct page *);
-void invalidate_inmem_page(struct inode *, struct page *);
void commit_inmem_pages(struct inode *, bool);
void f2fs_balance_fs(struct f2fs_sb_info *);
void f2fs_balance_fs_bg(struct f2fs_sb_info *);
@@ -1401,16 +1459,16 @@ void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
void clear_prefree_segments(struct f2fs_sb_info *);
void release_discard_addrs(struct f2fs_sb_info *);
void discard_next_dnode(struct f2fs_sb_info *, block_t);
-int npages_for_summary_flush(struct f2fs_sb_info *);
+int npages_for_summary_flush(struct f2fs_sb_info *, bool);
void allocate_new_segments(struct f2fs_sb_info *);
int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
void write_meta_page(struct f2fs_sb_info *, struct page *);
void write_node_page(struct f2fs_sb_info *, struct page *,
- struct f2fs_io_info *, unsigned int, block_t, block_t *);
-void write_data_page(struct page *, struct dnode_of_data *, block_t *,
- struct f2fs_io_info *);
-void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *);
+ unsigned int, struct f2fs_io_info *);
+void write_data_page(struct page *, struct dnode_of_data *,
+ struct f2fs_io_info *);
+void rewrite_data_page(struct page *, struct f2fs_io_info *);
void recover_data_page(struct f2fs_sb_info *, struct page *,
struct f2fs_summary *, block_t, block_t);
void allocate_data_block(struct f2fs_sb_info *, struct page *,
@@ -1457,17 +1515,20 @@ void destroy_checkpoint_caches(void);
* data.c
*/
void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
-int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, block_t, int);
-void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, block_t,
+int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *,
+ struct f2fs_io_info *);
+void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *,
struct f2fs_io_info *);
int reserve_new_block(struct dnode_of_data *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
-void update_extent_cache(block_t, struct dnode_of_data *);
+void update_extent_cache(struct dnode_of_data *);
struct page *find_data_page(struct inode *, pgoff_t, bool);
struct page *get_lock_data_page(struct inode *, pgoff_t);
struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
int do_write_data_page(struct page *, struct f2fs_io_info *);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
+void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
+int f2fs_release_page(struct page *, gfp_t);
/*
* gc.c
@@ -1477,8 +1538,6 @@ void stop_gc_thread(struct f2fs_sb_info *);
block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
int f2fs_gc(struct f2fs_sb_info *);
void build_gc_manager(struct f2fs_sb_info *);
-int __init create_gc_caches(void);
-void destroy_gc_caches(void);
/*
* recovery.c
@@ -1497,9 +1556,9 @@ struct f2fs_stat_info {
int main_area_segs, main_area_sections, main_area_zones;
int hit_ext, total_ext;
int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
- int nats, sits, fnids;
+ int nats, dirty_nats, sits, dirty_sits, fnids;
int total_count, utilization;
- int bg_gc, inline_inode, inline_dir, inmem_pages;
+ int bg_gc, inline_inode, inline_dir, inmem_pages, wb_pages;
unsigned int valid_count, valid_node_count, valid_inode_count;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
@@ -1514,7 +1573,8 @@ struct f2fs_stat_info {
unsigned int segment_count[2];
unsigned int block_count[2];
- unsigned base_mem, cache_mem;
+ unsigned int inplace_count;
+ unsigned base_mem, cache_mem, page_mem;
};
static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
@@ -1553,7 +1613,8 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
((sbi)->segment_count[(curseg)->alloc_type]++)
#define stat_inc_block_count(sbi, curseg) \
((sbi)->block_count[(curseg)->alloc_type]++)
-
+#define stat_inc_inplace_blocks(sbi) \
+ (atomic_inc(&(sbi)->inplace_count))
#define stat_inc_seg_count(sbi, type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
@@ -1599,6 +1660,7 @@ void f2fs_destroy_root_stats(void);
#define stat_dec_inline_dir(inode)
#define stat_inc_seg_type(sbi, curseg)
#define stat_inc_block_count(sbi, curseg)
+#define stat_inc_inplace_blocks(sbi)
#define stat_inc_seg_count(si, type)
#define stat_inc_tot_blk_count(si, blks)
#define stat_inc_data_blk_count(si, blks)
@@ -1619,6 +1681,7 @@ extern const struct address_space_operations f2fs_meta_aops;
extern const struct inode_operations f2fs_dir_inode_operations;
extern const struct inode_operations f2fs_symlink_inode_operations;
extern const struct inode_operations f2fs_special_inode_operations;
+extern struct kmem_cache *inode_entry_slab;
/*
* inline.c
@@ -1629,7 +1692,6 @@ int f2fs_read_inline_data(struct inode *, struct page *);
int f2fs_convert_inline_page(struct dnode_of_data *, struct page *);
int f2fs_convert_inline_inode(struct inode *);
int f2fs_write_inline_data(struct inode *, struct page *);
-void truncate_inline_data(struct page *, u64);
bool recover_inline_data(struct inode *, struct page *);
struct f2fs_dir_entry *find_in_inline_dir(struct inode *, struct qstr *,
struct page **);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 3c27e0ecb3bc..98dac27bc3f7 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -26,6 +26,7 @@
#include "segment.h"
#include "xattr.h"
#include "acl.h"
+#include "trace.h"
#include <trace/events/f2fs.h>
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
@@ -92,7 +93,6 @@ static const struct vm_operations_struct f2fs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = f2fs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int get_parent_ino(struct inode *inode, nid_t *pino)
@@ -246,6 +246,10 @@ go_write:
sync_nodes:
sync_node_pages(sbi, ino, &wbc);
+ /* if cp_error was enabled, we should avoid infinite loop */
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto out;
+
if (need_inode_block_update(sbi, ino)) {
mark_inode_dirty_sync(inode);
f2fs_write_inode(inode, NULL);
@@ -265,6 +269,7 @@ flush_out:
ret = f2fs_issue_flush(sbi);
out:
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
+ f2fs_trace_ios(NULL, NULL, 1);
return ret;
}
@@ -351,7 +356,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
/* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset;
dn.ofs_in_node++, pgofs++,
- data_ofs = pgofs << PAGE_CACHE_SHIFT) {
+ data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
block_t blkaddr;
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
@@ -427,7 +432,8 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
if (blkaddr == NULL_ADDR)
continue;
- update_extent_cache(NULL_ADDR, dn);
+ dn->data_blkaddr = NULL_ADDR;
+ update_extent_cache(dn);
invalidate_blocks(sbi, blkaddr);
nr_free++;
}
@@ -484,8 +490,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
trace_f2fs_truncate_blocks_enter(inode, from);
- free_from = (pgoff_t)
- ((from + blocksize - 1) >> (sbi->log_blocksize));
+ free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
if (lock)
f2fs_lock_op(sbi);
@@ -836,6 +841,19 @@ static long f2fs_fallocate(struct file *file, int mode,
return ret;
}
+static int f2fs_release_file(struct inode *inode, struct file *filp)
+{
+ /* some remained atomic pages should discarded */
+ if (f2fs_is_atomic_file(inode))
+ commit_inmem_pages(inode, true);
+ if (f2fs_is_volatile_file(inode)) {
+ set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ filemap_fdatawrite(inode->i_mapping);
+ clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ }
+ return 0;
+}
+
#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
@@ -906,29 +924,30 @@ out:
return ret;
}
+static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+
+ return put_user(inode->i_generation, (int __user *)arg);
+}
+
static int f2fs_ioc_start_atomic_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (!inode_owner_or_capable(inode))
return -EACCES;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(F2FS_I_SB(inode));
+
+ if (f2fs_is_atomic_file(inode))
+ return 0;
set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
return f2fs_convert_inline_inode(inode);
}
-static int f2fs_release_file(struct inode *inode, struct file *filp)
-{
- /* some remained atomic pages should discarded */
- if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
- commit_inmem_pages(inode, true);
- return 0;
-}
-
static int f2fs_ioc_commit_atomic_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
@@ -949,6 +968,7 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
mnt_drop_write_file(filp);
+ clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
return ret;
}
@@ -959,11 +979,56 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
+ if (f2fs_is_volatile_file(inode))
+ return 0;
+
set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
return f2fs_convert_inline_inode(inode);
}
+static int f2fs_ioc_release_volatile_write(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (!f2fs_is_volatile_file(inode))
+ return 0;
+
+ punch_hole(inode, 0, F2FS_BLKSIZE);
+ return 0;
+}
+
+static int f2fs_ioc_abort_volatile_write(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+ int ret;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ f2fs_balance_fs(F2FS_I_SB(inode));
+
+ if (f2fs_is_atomic_file(inode)) {
+ commit_inmem_pages(inode, false);
+ clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+ }
+
+ if (f2fs_is_volatile_file(inode)) {
+ clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+ filemap_fdatawrite(inode->i_mapping);
+ set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+ }
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1001,12 +1066,18 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_getflags(filp, arg);
case F2FS_IOC_SETFLAGS:
return f2fs_ioc_setflags(filp, arg);
+ case F2FS_IOC_GETVERSION:
+ return f2fs_ioc_getversion(filp, arg);
case F2FS_IOC_START_ATOMIC_WRITE:
return f2fs_ioc_start_atomic_write(filp);
case F2FS_IOC_COMMIT_ATOMIC_WRITE:
return f2fs_ioc_commit_atomic_write(filp);
case F2FS_IOC_START_VOLATILE_WRITE:
return f2fs_ioc_start_volatile_write(filp);
+ case F2FS_IOC_RELEASE_VOLATILE_WRITE:
+ return f2fs_ioc_release_volatile_write(filp);
+ case F2FS_IOC_ABORT_VOLATILE_WRITE:
+ return f2fs_ioc_abort_volatile_write(filp);
case FITRIM:
return f2fs_ioc_fitrim(filp, arg);
default:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index eec0933a4819..76adbc3641f1 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -24,8 +24,6 @@
#include "gc.h"
#include <trace/events/f2fs.h>
-static struct kmem_cache *winode_slab;
-
static int gc_thread_func(void *data)
{
struct f2fs_sb_info *sbi = data;
@@ -46,7 +44,7 @@ static int gc_thread_func(void *data)
break;
if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
- wait_ms = increase_sleep_time(gc_th, wait_ms);
+ increase_sleep_time(gc_th, &wait_ms);
continue;
}
@@ -67,15 +65,15 @@ static int gc_thread_func(void *data)
continue;
if (!is_idle(sbi)) {
- wait_ms = increase_sleep_time(gc_th, wait_ms);
+ increase_sleep_time(gc_th, &wait_ms);
mutex_unlock(&sbi->gc_mutex);
continue;
}
if (has_enough_invalid_blocks(sbi))
- wait_ms = decrease_sleep_time(gc_th, wait_ms);
+ decrease_sleep_time(gc_th, &wait_ms);
else
- wait_ms = increase_sleep_time(gc_th, wait_ms);
+ increase_sleep_time(gc_th, &wait_ms);
stat_inc_bggc_count(sbi);
@@ -356,13 +354,10 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
iput(inode);
return;
}
- new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
+ new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
new_ie->inode = inode;
-retry:
- if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) {
- cond_resched();
- goto retry;
- }
+
+ f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
list_add_tail(&new_ie->list, &gc_list->ilist);
}
@@ -373,7 +368,7 @@ static void put_gc_inode(struct gc_inode_list *gc_list)
radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
iput(ie->inode);
list_del(&ie->list);
- kmem_cache_free(winode_slab, ie);
+ kmem_cache_free(inode_entry_slab, ie);
}
}
@@ -703,8 +698,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
.iroot = RADIX_TREE_INIT(GFP_NOFS),
};
- cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC;
-
+ cpc.reason = __get_cp_reason(sbi);
gc_more:
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
goto stop;
@@ -750,17 +744,3 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
{
DIRTY_I(sbi)->v_ops = &default_v_ops;
}
-
-int __init create_gc_caches(void)
-{
- winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
- sizeof(struct inode_entry));
- if (!winode_slab)
- return -ENOMEM;
- return 0;
-}
-
-void destroy_gc_caches(void)
-{
- kmem_cache_destroy(winode_slab);
-}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index 6ff7ad38463e..b4a65be9f7d3 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -35,11 +35,6 @@ struct f2fs_gc_kthread {
unsigned int gc_idle;
};
-struct inode_entry {
- struct list_head list;
- struct inode *inode;
-};
-
struct gc_inode_list {
struct list_head ilist;
struct radix_tree_root iroot;
@@ -69,26 +64,26 @@ static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
}
-static inline long increase_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
+static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th,
+ long *wait)
{
- if (wait == gc_th->no_gc_sleep_time)
- return wait;
+ if (*wait == gc_th->no_gc_sleep_time)
+ return;
- wait += gc_th->min_sleep_time;
- if (wait > gc_th->max_sleep_time)
- wait = gc_th->max_sleep_time;
- return wait;
+ *wait += gc_th->min_sleep_time;
+ if (*wait > gc_th->max_sleep_time)
+ *wait = gc_th->max_sleep_time;
}
-static inline long decrease_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
+static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
+ long *wait)
{
- if (wait == gc_th->no_gc_sleep_time)
- wait = gc_th->max_sleep_time;
+ if (*wait == gc_th->no_gc_sleep_time)
+ *wait = gc_th->max_sleep_time;
- wait -= gc_th->min_sleep_time;
- if (wait <= gc_th->min_sleep_time)
- wait = gc_th->min_sleep_time;
- return wait;
+ *wait -= gc_th->min_sleep_time;
+ if (*wait <= gc_th->min_sleep_time)
+ *wait = gc_th->min_sleep_time;
}
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index f2d3c581e776..1484c00133cd 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -50,6 +50,12 @@ void read_inline_data(struct page *page, struct page *ipage)
SetPageUptodate(page);
}
+static void truncate_inline_data(struct page *ipage)
+{
+ f2fs_wait_on_page_writeback(ipage, NODE);
+ memset(inline_data_addr(ipage), 0, MAX_INLINE_DATA);
+}
+
int f2fs_read_inline_data(struct inode *inode, struct page *page)
{
struct page *ipage;
@@ -79,7 +85,6 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
void *src_addr, *dst_addr;
- block_t new_blk_addr;
struct f2fs_io_info fio = {
.type = DATA,
.rw = WRITE_SYNC | REQ_PRIO,
@@ -115,9 +120,9 @@ no_update:
/* write data page to try to make data consistent */
set_page_writeback(page);
-
- write_data_page(page, dn, &new_blk_addr, &fio);
- update_extent_cache(new_blk_addr, dn);
+ fio.blk_addr = dn->data_blkaddr;
+ write_data_page(page, dn, &fio);
+ update_extent_cache(dn);
f2fs_wait_on_page_writeback(page, DATA);
if (dirty)
inode_dec_dirty_pages(dn->inode);
@@ -126,7 +131,7 @@ no_update:
set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
- truncate_inline_data(dn->inode_page, 0);
+ truncate_inline_data(dn->inode_page);
clear_out:
stat_dec_inline_inode(dn->inode);
f2fs_clear_inline_inode(dn->inode);
@@ -199,19 +204,6 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
return 0;
}
-void truncate_inline_data(struct page *ipage, u64 from)
-{
- void *addr;
-
- if (from >= MAX_INLINE_DATA)
- return;
-
- f2fs_wait_on_page_writeback(ipage, NODE);
-
- addr = inline_data_addr(ipage);
- memset(addr + from, 0, MAX_INLINE_DATA - from);
-}
-
bool recover_inline_data(struct inode *inode, struct page *npage)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -253,7 +245,7 @@ process_inline:
if (f2fs_has_inline_data(inode)) {
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- truncate_inline_data(ipage, 0);
+ truncate_inline_data(ipage);
f2fs_clear_inline_inode(inode);
update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
@@ -371,7 +363,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
set_page_dirty(page);
/* clear inline dir and flag after data writeback */
- truncate_inline_data(ipage, 0);
+ truncate_inline_data(ipage);
stat_dec_inline_dir(dir);
clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 196cc7843aaf..2d002e3738a7 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -67,29 +67,23 @@ static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
}
}
-static int __recover_inline_status(struct inode *inode, struct page *ipage)
+static void __recover_inline_status(struct inode *inode, struct page *ipage)
{
void *inline_data = inline_data_addr(ipage);
- struct f2fs_inode *ri;
- void *zbuf;
+ __le32 *start = inline_data;
+ __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
- zbuf = kzalloc(MAX_INLINE_DATA, GFP_NOFS);
- if (!zbuf)
- return -ENOMEM;
+ while (start < end) {
+ if (*start++) {
+ f2fs_wait_on_page_writeback(ipage, NODE);
- if (!memcmp(zbuf, inline_data, MAX_INLINE_DATA)) {
- kfree(zbuf);
- return 0;
+ set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
+ set_page_dirty(ipage);
+ return;
+ }
}
- kfree(zbuf);
-
- f2fs_wait_on_page_writeback(ipage, NODE);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
-
- ri = F2FS_INODE(ipage);
- set_raw_inline(F2FS_I(inode), ri);
- set_page_dirty(ipage);
- return 0;
+ return;
}
static int do_read_inode(struct inode *inode)
@@ -98,7 +92,6 @@ static int do_read_inode(struct inode *inode)
struct f2fs_inode_info *fi = F2FS_I(inode);
struct page *node_page;
struct f2fs_inode *ri;
- int err = 0;
/* Check if ino is within scope */
if (check_nid_range(sbi, inode->i_ino)) {
@@ -142,7 +135,7 @@ static int do_read_inode(struct inode *inode)
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
- err = __recover_inline_status(inode, node_page);
+ __recover_inline_status(inode, node_page);
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
@@ -152,7 +145,7 @@ static int do_read_inode(struct inode *inode)
stat_inc_inline_inode(inode);
stat_inc_inline_dir(inode);
- return err;
+ return 0;
}
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
@@ -304,7 +297,7 @@ void f2fs_evict_inode(struct inode *inode)
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
/* some remained atomic pages should discarded */
- if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
+ if (f2fs_is_atomic_file(inode))
commit_inmem_pages(inode, true);
trace_f2fs_evict_inode(inode);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 547a2deeb1ac..e79639a9787a 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -299,7 +299,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
+ mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
set_inode_flag(F2FS_I(inode), FI_INC_LINK);
f2fs_lock_op(sbi);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index f83326ca32ef..97bd9d3db882 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -19,6 +19,7 @@
#include "f2fs.h"
#include "node.h"
#include "segment.h"
+#include "trace.h"
#include <trace/events/f2fs.h>
#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
@@ -57,12 +58,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
} else if (type == INO_ENTRIES) {
int i;
- if (sbi->sb->s_bdi->dirty_exceeded)
- return false;
for (i = 0; i <= UPDATE_INO; i++)
mem_size += (sbi->im[i].ino_num *
sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+ } else {
+ if (sbi->sb->s_bdi->dirty_exceeded)
+ return false;
}
return res;
}
@@ -268,7 +270,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) {
e = grab_nat_entry(nm_i, ni->nid);
- e->ni = *ni;
+ copy_node_info(&e->ni, ni);
f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
/*
@@ -276,7 +278,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
* previous nat entry can be remained in nat cache.
* So, reinitialize it with new information.
*/
- e->ni = *ni;
+ copy_node_info(&e->ni, ni);
f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
}
@@ -346,7 +348,6 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
struct nat_entry *e;
int i;
- memset(&ne, 0, sizeof(struct f2fs_nat_entry));
ni->nid = nid;
/* Check nat cache */
@@ -361,6 +362,8 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
if (e)
return;
+ memset(&ne, 0, sizeof(struct f2fs_nat_entry));
+
/* Check current segment summary */
mutex_lock(&curseg->curseg_mutex);
i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
@@ -471,7 +474,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct page *npage[4];
- struct page *parent;
+ struct page *parent = NULL;
int offset[4];
unsigned int noffset[4];
nid_t nids[4];
@@ -488,6 +491,14 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
if (IS_ERR(npage[0]))
return PTR_ERR(npage[0]);
}
+
+ /* if inline_data is set, should not report any block indices */
+ if (f2fs_has_inline_data(dn->inode) && index) {
+ err = -EINVAL;
+ f2fs_put_page(npage[0], 1);
+ goto release_out;
+ }
+
parent = npage[0];
if (level != 0)
nids[1] = get_nid(parent, offset[0], true);
@@ -585,7 +596,7 @@ static void truncate_node(struct dnode_of_data *dn)
}
invalidate:
clear_node_page_dirty(dn->node_page);
- F2FS_SET_SB_DIRT(sbi);
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
f2fs_put_page(dn->node_page, 1);
@@ -976,6 +987,10 @@ static int read_node_page(struct page *page, int rw)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
struct node_info ni;
+ struct f2fs_io_info fio = {
+ .type = NODE,
+ .rw = rw,
+ };
get_node_info(sbi, page->index, &ni);
@@ -987,7 +1002,8 @@ static int read_node_page(struct page *page, int rw)
if (PageUptodate(page))
return LOCKED_PAGE;
- return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
+ fio.blk_addr = ni.blk_addr;
+ return f2fs_submit_page_bio(sbi, page, &fio);
}
/*
@@ -1028,11 +1044,11 @@ repeat:
err = read_node_page(page, READ_SYNC);
if (err < 0)
return ERR_PTR(err);
- else if (err == LOCKED_PAGE)
- goto got_it;
+ else if (err != LOCKED_PAGE)
+ lock_page(page);
- lock_page(page);
if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
+ ClearPageUptodate(page);
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
@@ -1040,7 +1056,6 @@ repeat:
f2fs_put_page(page, 1);
goto repeat;
}
-got_it:
return page;
}
@@ -1268,7 +1283,6 @@ static int f2fs_write_node_page(struct page *page,
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
nid_t nid;
- block_t new_addr;
struct node_info ni;
struct f2fs_io_info fio = {
.type = NODE,
@@ -1277,7 +1291,7 @@ static int f2fs_write_node_page(struct page *page,
trace_f2fs_writepage(page, NODE);
- if (unlikely(sbi->por_doing))
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
@@ -1303,9 +1317,11 @@ static int f2fs_write_node_page(struct page *page,
} else {
down_read(&sbi->node_write);
}
+
set_page_writeback(page);
- write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
- set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
+ fio.blk_addr = ni.blk_addr;
+ write_node_page(sbi, page, nid, &fio);
+ set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
dec_page_count(sbi, F2FS_DIRTY_NODES);
up_read(&sbi->node_write);
unlock_page(page);
@@ -1355,26 +1371,12 @@ static int f2fs_set_node_page_dirty(struct page *page)
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
SetPagePrivate(page);
+ f2fs_trace_pid(page);
return 1;
}
return 0;
}
-static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
- unsigned int length)
-{
- struct inode *inode = page->mapping->host;
- if (PageDirty(page))
- dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_NODES);
- ClearPagePrivate(page);
-}
-
-static int f2fs_release_node_page(struct page *page, gfp_t wait)
-{
- ClearPagePrivate(page);
- return 1;
-}
-
/*
* Structure of the f2fs node operations
*/
@@ -1382,8 +1384,8 @@ const struct address_space_operations f2fs_node_aops = {
.writepage = f2fs_write_node_page,
.writepages = f2fs_write_node_pages,
.set_page_dirty = f2fs_set_node_page_dirty,
- .invalidatepage = f2fs_invalidate_node_page,
- .releasepage = f2fs_release_node_page,
+ .invalidatepage = f2fs_invalidate_page,
+ .releasepage = f2fs_release_page,
};
static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
@@ -1726,80 +1728,41 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
return 0;
}
-/*
- * ra_sum_pages() merge contiguous pages into one bio and submit.
- * these pre-read pages are allocated in bd_inode's mapping tree.
- */
-static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
- int start, int nrpages)
-{
- struct inode *inode = sbi->sb->s_bdev->bd_inode;
- struct address_space *mapping = inode->i_mapping;
- int i, page_idx = start;
- struct f2fs_io_info fio = {
- .type = META,
- .rw = READ_SYNC | REQ_META | REQ_PRIO
- };
-
- for (i = 0; page_idx < start + nrpages; page_idx++, i++) {
- /* alloc page in bd_inode for reading node summary info */
- pages[i] = grab_cache_page(mapping, page_idx);
- if (!pages[i])
- break;
- f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio);
- }
-
- f2fs_submit_merged_bio(sbi, META, READ);
- return i;
-}
-
int restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum)
{
struct f2fs_node *rn;
struct f2fs_summary *sum_entry;
- struct inode *inode = sbi->sb->s_bdev->bd_inode;
block_t addr;
int bio_blocks = MAX_BIO_BLOCKS(sbi);
- struct page *pages[bio_blocks];
- int i, idx, last_offset, nrpages, err = 0;
+ int i, idx, last_offset, nrpages;
/* scan the node segment */
last_offset = sbi->blocks_per_seg;
addr = START_BLOCK(sbi, segno);
sum_entry = &sum->entries[0];
- for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
+ for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
nrpages = min(last_offset - i, bio_blocks);
/* readahead node pages */
- nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
- if (!nrpages)
- return -ENOMEM;
+ ra_meta_pages(sbi, addr, nrpages, META_POR);
- for (idx = 0; idx < nrpages; idx++) {
- if (err)
- goto skip;
+ for (idx = addr; idx < addr + nrpages; idx++) {
+ struct page *page = get_meta_page(sbi, idx);
- lock_page(pages[idx]);
- if (unlikely(!PageUptodate(pages[idx]))) {
- err = -EIO;
- } else {
- rn = F2FS_NODE(pages[idx]);
- sum_entry->nid = rn->footer.nid;
- sum_entry->version = 0;
- sum_entry->ofs_in_node = 0;
- sum_entry++;
- }
- unlock_page(pages[idx]);
-skip:
- page_cache_release(pages[idx]);
+ rn = F2FS_NODE(page);
+ sum_entry->nid = rn->footer.nid;
+ sum_entry->version = 0;
+ sum_entry->ofs_in_node = 0;
+ sum_entry++;
+ f2fs_put_page(page, 1);
}
- invalidate_mapping_pages(inode->i_mapping, addr,
+ invalidate_mapping_pages(META_MAPPING(sbi), addr,
addr + nrpages);
}
- return err;
+ return 0;
}
static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -1923,7 +1886,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
- struct nat_entry_set *setvec[NATVEC_SIZE];
+ struct nat_entry_set *setvec[SETVEC_SIZE];
struct nat_entry_set *set, *tmp;
unsigned int found;
nid_t set_idx = 0;
@@ -1940,7 +1903,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
remove_nats_in_journal(sbi);
while ((found = __gang_lookup_nat_set(nm_i,
- set_idx, NATVEC_SIZE, setvec))) {
+ set_idx, SETVEC_SIZE, setvec))) {
unsigned idx;
set_idx = setvec[found - 1]->set + 1;
for (idx = 0; idx < found; idx++)
@@ -2020,6 +1983,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *next_i;
struct nat_entry *natvec[NATVEC_SIZE];
+ struct nat_entry_set *setvec[SETVEC_SIZE];
nid_t nid = 0;
unsigned int found;
@@ -2044,11 +2008,27 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
while ((found = __gang_lookup_nat_cache(nm_i,
nid, NATVEC_SIZE, natvec))) {
unsigned idx;
+
nid = nat_get_nid(natvec[found - 1]) + 1;
for (idx = 0; idx < found; idx++)
__del_from_nat_cache(nm_i, natvec[idx]);
}
f2fs_bug_on(sbi, nm_i->nat_cnt);
+
+ /* destroy nat set cache */
+ nid = 0;
+ while ((found = __gang_lookup_nat_set(nm_i,
+ nid, SETVEC_SIZE, setvec))) {
+ unsigned idx;
+
+ nid = setvec[found - 1]->set + 1;
+ for (idx = 0; idx < found; idx++) {
+ /* entry_cnt is not zero, when cp_error was occurred */
+ f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
+ radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
+ kmem_cache_free(nat_entry_set_slab, setvec[idx]);
+ }
+ }
up_write(&nm_i->nat_tree_lock);
kfree(nm_i->nat_bitmap);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index d10b6448a671..f405bbf2435a 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -25,10 +25,19 @@
/* vector size for gang look-up from nat cache that consists of radix tree */
#define NATVEC_SIZE 64
+#define SETVEC_SIZE 32
/* return value for read_node_page */
#define LOCKED_PAGE 1
+/* For flag in struct node_info */
+enum {
+ IS_CHECKPOINTED, /* is it checkpointed before? */
+ HAS_FSYNCED_INODE, /* is the inode fsynced before? */
+ HAS_LAST_FSYNC, /* has the latest node fsync mark? */
+ IS_DIRTY, /* this nat entry is dirty? */
+};
+
/*
* For node information
*/
@@ -37,18 +46,11 @@ struct node_info {
nid_t ino; /* inode number of the node's owner */
block_t blk_addr; /* block address of the node */
unsigned char version; /* version of the node */
-};
-
-enum {
- IS_CHECKPOINTED, /* is it checkpointed before? */
- HAS_FSYNCED_INODE, /* is the inode fsynced before? */
- HAS_LAST_FSYNC, /* has the latest node fsync mark? */
- IS_DIRTY, /* this nat entry is dirty? */
+ unsigned char flag; /* for node information bits */
};
struct nat_entry {
struct list_head list; /* for clean or dirty nat list */
- unsigned char flag; /* for node information bits */
struct node_info ni; /* in-memory node information */
};
@@ -63,20 +65,30 @@ struct nat_entry {
#define inc_node_version(version) (++version)
+static inline void copy_node_info(struct node_info *dst,
+ struct node_info *src)
+{
+ dst->nid = src->nid;
+ dst->ino = src->ino;
+ dst->blk_addr = src->blk_addr;
+ dst->version = src->version;
+ /* should not copy flag here */
+}
+
static inline void set_nat_flag(struct nat_entry *ne,
unsigned int type, bool set)
{
unsigned char mask = 0x01 << type;
if (set)
- ne->flag |= mask;
+ ne->ni.flag |= mask;
else
- ne->flag &= ~mask;
+ ne->ni.flag &= ~mask;
}
static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
{
unsigned char mask = 0x01 << type;
- return ne->flag & mask;
+ return ne->ni.flag & mask;
}
static inline void nat_reset_flag(struct nat_entry *ne)
@@ -108,6 +120,7 @@ enum mem_type {
NAT_ENTRIES, /* indicates the cached nat entry */
DIRTY_DENTS, /* indicates dirty dentry pages */
INO_ENTRIES, /* indicates inode entries */
+ BASE_CHECK, /* check kernel status */
};
struct nat_entry_set {
@@ -200,11 +213,19 @@ static inline void fill_node_footer(struct page *page, nid_t nid,
nid_t ino, unsigned int ofs, bool reset)
{
struct f2fs_node *rn = F2FS_NODE(page);
+ unsigned int old_flag = 0;
+
if (reset)
memset(rn, 0, sizeof(*rn));
+ else
+ old_flag = le32_to_cpu(rn->footer.flag);
+
rn->footer.nid = cpu_to_le32(nid);
rn->footer.ino = cpu_to_le32(ino);
- rn->footer.flag = cpu_to_le32(ofs << OFFSET_BIT_SHIFT);
+
+ /* should remain old flag bits such as COLD_BIT_SHIFT */
+ rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
+ (old_flag & OFFSET_BIT_MASK));
}
static inline void copy_node_footer(struct page *dst, struct page *src)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 9160a37e1c7a..41afb9534bbd 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -346,6 +346,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (IS_INODE(page)) {
recover_inline_xattr(inode, page);
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
+ /*
+ * Deprecated; xattr blocks should be found from cold log.
+ * But, we should remain this for backward compatibility.
+ */
recover_xattr_data(inode, page, blkaddr);
goto out;
}
@@ -396,7 +400,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
/* write dummy data page */
recover_data_page(sbi, NULL, &sum, src, dest);
- update_extent_cache(dest, &dn);
+ dn.data_blkaddr = dest;
+ update_extent_cache(&dn);
recovered++;
}
dn.ofs_in_node++;
@@ -503,7 +508,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&inode_list);
/* step #1: find fsynced inode numbers */
- sbi->por_doing = true;
+ set_sbi_flag(sbi, SBI_POR_DOING);
/* prevent checkpoint */
mutex_lock(&sbi->cp_mutex);
@@ -536,7 +541,7 @@ out:
truncate_inode_pages_final(META_MAPPING(sbi));
}
- sbi->por_doing = false;
+ clear_sbi_flag(sbi, SBI_POR_DOING);
if (err) {
discard_next_dnode(sbi, blkaddr);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 42607a679923..daee4ab913da 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -20,6 +20,7 @@
#include "f2fs.h"
#include "segment.h"
#include "node.h"
+#include "trace.h"
#include <trace/events/f2fs.h>
#define __reverse_ffz(x) __reverse_ffs(~(x))
@@ -181,6 +182,7 @@ void register_inmem_page(struct inode *inode, struct page *page)
int err;
SetPagePrivate(page);
+ f2fs_trace_pid(page);
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
@@ -205,23 +207,6 @@ retry:
mutex_unlock(&fi->inmem_lock);
}
-void invalidate_inmem_page(struct inode *inode, struct page *page)
-{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- struct inmem_pages *cur;
-
- mutex_lock(&fi->inmem_lock);
- cur = radix_tree_lookup(&fi->inmem_root, page->index);
- if (cur) {
- radix_tree_delete(&fi->inmem_root, cur->page->index);
- f2fs_put_page(cur->page, 0);
- list_del(&cur->list);
- kmem_cache_free(inmem_entry_slab, cur);
- dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
- }
- mutex_unlock(&fi->inmem_lock);
-}
-
void commit_inmem_pages(struct inode *inode, bool abort)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -230,7 +215,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
bool submit_bio = false;
struct f2fs_io_info fio = {
.type = DATA,
- .rw = WRITE_SYNC,
+ .rw = WRITE_SYNC | REQ_PRIO,
};
/*
@@ -240,33 +225,38 @@ void commit_inmem_pages(struct inode *inode, bool abort)
* Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
* inode becomes free by iget_locked in f2fs_iget.
*/
- if (!abort)
+ if (!abort) {
f2fs_balance_fs(sbi);
-
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi);
+ }
mutex_lock(&fi->inmem_lock);
list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
- lock_page(cur->page);
- if (!abort && cur->page->mapping == inode->i_mapping) {
- f2fs_wait_on_page_writeback(cur->page, DATA);
- if (clear_page_dirty_for_io(cur->page))
- inode_dec_dirty_pages(inode);
- do_write_data_page(cur->page, &fio);
- submit_bio = true;
+ if (!abort) {
+ lock_page(cur->page);
+ if (cur->page->mapping == inode->i_mapping) {
+ f2fs_wait_on_page_writeback(cur->page, DATA);
+ if (clear_page_dirty_for_io(cur->page))
+ inode_dec_dirty_pages(inode);
+ do_write_data_page(cur->page, &fio);
+ submit_bio = true;
+ }
+ f2fs_put_page(cur->page, 1);
+ } else {
+ put_page(cur->page);
}
radix_tree_delete(&fi->inmem_root, cur->page->index);
- f2fs_put_page(cur->page, 1);
list_del(&cur->list);
kmem_cache_free(inmem_entry_slab, cur);
dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
}
- if (submit_bio)
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
mutex_unlock(&fi->inmem_lock);
- filemap_fdatawait_range(inode->i_mapping, 0, LLONG_MAX);
- f2fs_unlock_op(sbi);
+ if (!abort) {
+ f2fs_unlock_op(sbi);
+ if (submit_bio)
+ f2fs_submit_merged_bio(sbi, DATA, WRITE);
+ }
}
/*
@@ -290,7 +280,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
/* check the # of cached NAT entries and prefree segments */
if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
excess_prefree_segs(sbi) ||
- available_free_memory(sbi, INO_ENTRIES))
+ !available_free_memory(sbi, INO_ENTRIES))
f2fs_sync_fs(sbi->sb, true);
}
@@ -515,12 +505,13 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
- unsigned long dmap[entries];
+ unsigned long *dmap = SIT_I(sbi)->tmp_map;
unsigned int start = 0, end = -1;
bool force = (cpc->reason == CP_DISCARD);
int i;
- if (!force && !test_opt(sbi, DISCARD))
+ if (!force && (!test_opt(sbi, DISCARD) ||
+ SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards))
return;
if (force && !se->valid_blocks) {
@@ -548,7 +539,8 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
for (i = 0; i < entries; i++)
- dmap[i] = ~(cur_map[i] | ckpt_map[i]);
+ dmap[i] = force ? ~ckpt_map[i] :
+ (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
start = __find_rev_next_bit(dmap, max_blocks, end + 1);
@@ -735,7 +727,7 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
/*
* Calculate the number of current summary pages for writing
*/
-int npages_for_summary_flush(struct f2fs_sb_info *sbi)
+int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
{
int valid_sum_count = 0;
int i, sum_in_page;
@@ -743,8 +735,13 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi)
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
if (sbi->ckpt->alloc_type[i] == SSR)
valid_sum_count += sbi->blocks_per_seg;
- else
- valid_sum_count += curseg_blkoff(sbi, i);
+ else {
+ if (for_ra)
+ valid_sum_count += le16_to_cpu(
+ F2FS_CKPT(sbi)->cur_data_blkoff[i]);
+ else
+ valid_sum_count += curseg_blkoff(sbi, i);
+ }
}
sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
@@ -803,7 +800,7 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
int go_left = 0;
int i;
- write_lock(&free_i->segmap_lock);
+ spin_lock(&free_i->segmap_lock);
if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap,
@@ -876,7 +873,7 @@ got_it:
f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
__set_inuse(sbi, segno);
*newseg = segno;
- write_unlock(&free_i->segmap_lock);
+ spin_unlock(&free_i->segmap_lock);
}
static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
@@ -927,7 +924,7 @@ static void __next_free_blkoff(struct f2fs_sb_info *sbi,
{
struct seg_entry *se = get_seg_entry(sbi, seg->segno);
int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
- unsigned long target_map[entries];
+ unsigned long *target_map = SIT_I(sbi)->tmp_map;
unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
int i, pos;
@@ -1027,18 +1024,22 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
stat_inc_seg_type(sbi, curseg);
}
+static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ unsigned int old_segno;
+
+ old_segno = curseg->segno;
+ SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
+ locate_dirty_segment(sbi, old_segno);
+}
+
void allocate_new_segments(struct f2fs_sb_info *sbi)
{
- struct curseg_info *curseg;
- unsigned int old_curseg;
int i;
- for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
- curseg = CURSEG_I(sbi, i);
- old_curseg = curseg->segno;
- SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
- locate_dirty_segment(sbi, old_curseg);
- }
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
+ __allocate_new_segments(sbi, i);
}
static const struct segment_allocation default_salloc_ops = {
@@ -1047,8 +1048,8 @@ static const struct segment_allocation default_salloc_ops = {
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
{
- __u64 start = range->start >> sbi->log_blocksize;
- __u64 end = start + (range->len >> sbi->log_blocksize) - 1;
+ __u64 start = F2FS_BYTES_TO_BLK(range->start);
+ __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
unsigned int start_segno, end_segno;
struct cp_control cpc;
@@ -1065,16 +1066,21 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
cpc.reason = CP_DISCARD;
- cpc.trim_start = start_segno;
- cpc.trim_end = end_segno;
- cpc.trim_minlen = range->minlen >> sbi->log_blocksize;
+ cpc.trim_minlen = F2FS_BYTES_TO_BLK(range->minlen);
/* do checkpoint to issue discard commands safely */
- mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
- mutex_unlock(&sbi->gc_mutex);
+ for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
+ cpc.trim_start = start_segno;
+ cpc.trim_end = min_t(unsigned int, rounddown(start_segno +
+ BATCHED_TRIM_SEGMENTS(sbi),
+ sbi->segs_per_sec) - 1, end_segno);
+
+ mutex_lock(&sbi->gc_mutex);
+ write_checkpoint(sbi, &cpc);
+ mutex_unlock(&sbi->gc_mutex);
+ }
out:
- range->len = cpc.trimmed << sbi->log_blocksize;
+ range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
return 0;
}
@@ -1151,11 +1157,18 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg;
+ bool direct_io = (type == CURSEG_DIRECT_IO);
+
+ type = direct_io ? CURSEG_WARM_DATA : type;
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
+ /* direct_io'ed data is aligned to the segment for better performance */
+ if (direct_io && curseg->next_blkoff)
+ __allocate_new_segments(sbi, type);
+
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/*
@@ -1187,39 +1200,39 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
}
static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
- block_t old_blkaddr, block_t *new_blkaddr,
- struct f2fs_summary *sum, struct f2fs_io_info *fio)
+ struct f2fs_summary *sum,
+ struct f2fs_io_info *fio)
{
int type = __get_segment_type(page, fio->type);
- allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type);
+ allocate_data_block(sbi, page, fio->blk_addr, &fio->blk_addr, sum, type);
/* writeout dirty page into bdev */
- f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio);
+ f2fs_submit_page_mbio(sbi, page, fio);
}
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
{
struct f2fs_io_info fio = {
.type = META,
- .rw = WRITE_SYNC | REQ_META | REQ_PRIO
+ .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
+ .blk_addr = page->index,
};
set_page_writeback(page);
- f2fs_submit_page_mbio(sbi, page, page->index, &fio);
+ f2fs_submit_page_mbio(sbi, page, &fio);
}
void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
- struct f2fs_io_info *fio,
- unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
+ unsigned int nid, struct f2fs_io_info *fio)
{
struct f2fs_summary sum;
set_summary(&sum, nid, 0, 0);
- do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio);
+ do_write_page(sbi, page, &sum, fio);
}
void write_data_page(struct page *page, struct dnode_of_data *dn,
- block_t *new_blkaddr, struct f2fs_io_info *fio)
+ struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
@@ -1228,14 +1241,14 @@ void write_data_page(struct page *page, struct dnode_of_data *dn,
f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
-
- do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio);
+ do_write_page(sbi, page, &sum, fio);
+ dn->data_blkaddr = fio->blk_addr;
}
-void rewrite_data_page(struct page *page, block_t old_blkaddr,
- struct f2fs_io_info *fio)
+void rewrite_data_page(struct page *page, struct f2fs_io_info *fio)
{
- f2fs_submit_page_mbio(F2FS_P_SB(page), page, old_blkaddr, fio);
+ stat_inc_inplace_blocks(F2FS_P_SB(page));
+ f2fs_submit_page_mbio(F2FS_P_SB(page), page, fio);
}
void recover_data_page(struct f2fs_sb_info *sbi,
@@ -1393,7 +1406,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
segno = le32_to_cpu(ckpt->cur_data_segno[type]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
CURSEG_HOT_DATA]);
- if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
+ if (__exist_node_summaries(sbi))
blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
else
blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
@@ -1402,7 +1415,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
CURSEG_HOT_NODE]);
blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
CURSEG_HOT_NODE]);
- if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
+ if (__exist_node_summaries(sbi))
blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
type - CURSEG_HOT_NODE);
else
@@ -1413,7 +1426,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
sum = (struct f2fs_summary_block *)page_address(new);
if (IS_NODESEG(type)) {
- if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
+ if (__exist_node_summaries(sbi)) {
struct f2fs_summary *ns = &sum->entries[0];
int i;
for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
@@ -1450,12 +1463,22 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
int err;
if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
+ int npages = npages_for_summary_flush(sbi, true);
+
+ if (npages >= 2)
+ ra_meta_pages(sbi, start_sum_block(sbi), npages,
+ META_CP);
+
/* restore for compacted data summary */
if (read_compacted_summaries(sbi))
return -EINVAL;
type = CURSEG_HOT_NODE;
}
+ if (__exist_node_summaries(sbi))
+ ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
+ NR_CURSEG_TYPE - type, META_CP);
+
for (; type <= CURSEG_COLD_NODE; type++) {
err = read_normal_summaries(sbi, type);
if (err)
@@ -1549,8 +1572,7 @@ void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
- write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
+ write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
}
int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
@@ -1754,7 +1776,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
se = get_seg_entry(sbi, segno);
/* add discard candidates */
- if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards) {
+ if (cpc->reason != CP_DISCARD) {
cpc->trim_start = segno;
add_discard_addrs(sbi, cpc);
}
@@ -1833,6 +1855,10 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
return -ENOMEM;
}
+ sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->tmp_map)
+ return -ENOMEM;
+
if (sbi->segs_per_sec > 1) {
sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) *
sizeof(struct sec_entry));
@@ -1897,7 +1923,7 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
free_i->free_segments = 0;
free_i->free_sections = 0;
- rwlock_init(&free_i->segmap_lock);
+ spin_lock_init(&free_i->segmap_lock);
return 0;
}
@@ -2110,6 +2136,8 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->nr_discards = 0;
sm_info->max_discards = 0;
+ sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
+
INIT_LIST_HEAD(&sm_info->sit_entry_set);
if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
@@ -2212,6 +2240,8 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
kfree(sit_i->sentries[start].ckpt_valid_map);
}
}
+ kfree(sit_i->tmp_map);
+
vfree(sit_i->sentries);
vfree(sit_i->sec_entries);
kfree(sit_i->dirty_sentries_bitmap);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 7f327c0ba4e3..7fd35111cf62 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -189,6 +189,7 @@ struct sit_info {
char *sit_bitmap; /* SIT bitmap pointer */
unsigned int bitmap_size; /* SIT bitmap size */
+ unsigned long *tmp_map; /* bitmap for temporal use */
unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
unsigned int dirty_sentries; /* # of dirty sentries */
unsigned int sents_per_block; /* # of SIT entries per block */
@@ -207,7 +208,7 @@ struct free_segmap_info {
unsigned int start_segno; /* start segment number logically */
unsigned int free_segments; /* # of free segments */
unsigned int free_sections; /* # of free sections */
- rwlock_t segmap_lock; /* free segmap lock */
+ spinlock_t segmap_lock; /* free segmap lock */
unsigned long *free_segmap; /* free segment bitmap */
unsigned long *free_secmap; /* free section bitmap */
};
@@ -318,9 +319,9 @@ static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
unsigned int max, unsigned int segno)
{
unsigned int ret;
- read_lock(&free_i->segmap_lock);
+ spin_lock(&free_i->segmap_lock);
ret = find_next_bit(free_i->free_segmap, max, segno);
- read_unlock(&free_i->segmap_lock);
+ spin_unlock(&free_i->segmap_lock);
return ret;
}
@@ -331,7 +332,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned int start_segno = secno * sbi->segs_per_sec;
unsigned int next;
- write_lock(&free_i->segmap_lock);
+ spin_lock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap);
free_i->free_segments++;
@@ -340,7 +341,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
clear_bit(secno, free_i->free_secmap);
free_i->free_sections++;
}
- write_unlock(&free_i->segmap_lock);
+ spin_unlock(&free_i->segmap_lock);
}
static inline void __set_inuse(struct f2fs_sb_info *sbi,
@@ -362,7 +363,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
unsigned int start_segno = secno * sbi->segs_per_sec;
unsigned int next;
- write_lock(&free_i->segmap_lock);
+ spin_lock(&free_i->segmap_lock);
if (test_and_clear_bit(segno, free_i->free_segmap)) {
free_i->free_segments++;
@@ -373,7 +374,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
free_i->free_sections++;
}
}
- write_unlock(&free_i->segmap_lock);
+ spin_unlock(&free_i->segmap_lock);
}
static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
@@ -381,13 +382,13 @@ static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
{
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int secno = segno / sbi->segs_per_sec;
- write_lock(&free_i->segmap_lock);
+ spin_lock(&free_i->segmap_lock);
if (!test_and_set_bit(segno, free_i->free_segmap)) {
free_i->free_segments--;
if (!test_and_set_bit(secno, free_i->free_secmap))
free_i->free_sections--;
}
- write_unlock(&free_i->segmap_lock);
+ spin_unlock(&free_i->segmap_lock);
}
static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
@@ -460,7 +461,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
- if (unlikely(sbi->por_doing))
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
@@ -599,13 +600,13 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
{
if (segno > TOTAL_SEGS(sbi) - 1)
- sbi->need_fsck = true;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
}
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{
if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
- sbi->need_fsck = true;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
}
/*
@@ -616,11 +617,11 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
{
/* check segment usage */
if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
- sbi->need_fsck = true;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
/* check boundary of a given segment number */
if (segno > TOTAL_SEGS(sbi) - 1)
- sbi->need_fsck = true;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
}
#endif
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index f71421d70475..f2fe666a6ea9 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -30,6 +30,7 @@
#include "segment.h"
#include "xattr.h"
#include "gc.h"
+#include "trace.h"
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>
@@ -41,6 +42,7 @@ static struct kset *f2fs_kset;
enum {
Opt_gc_background,
Opt_disable_roll_forward,
+ Opt_norecovery,
Opt_discard,
Opt_noheap,
Opt_user_xattr,
@@ -61,6 +63,7 @@ enum {
static match_table_t f2fs_tokens = {
{Opt_gc_background, "background_gc=%s"},
{Opt_disable_roll_forward, "disable_roll_forward"},
+ {Opt_norecovery, "norecovery"},
{Opt_discard, "discard"},
{Opt_noheap, "no_heap"},
{Opt_user_xattr, "user_xattr"},
@@ -192,6 +195,7 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
@@ -207,6 +211,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_idle),
ATTR_LIST(reclaim_segments),
ATTR_LIST(max_small_discards),
+ ATTR_LIST(batched_trim_sections),
ATTR_LIST(ipu_policy),
ATTR_LIST(min_ipu_util),
ATTR_LIST(min_fsync_blocks),
@@ -286,6 +291,12 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_disable_roll_forward:
set_opt(sbi, DISABLE_ROLL_FORWARD);
break;
+ case Opt_norecovery:
+ /* this option mounts f2fs with ro */
+ set_opt(sbi, DISABLE_ROLL_FORWARD);
+ if (!f2fs_readonly(sb))
+ return -EINVAL;
+ break;
case Opt_discard:
set_opt(sbi, DISCARD);
break;
@@ -446,8 +457,13 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_stats(sbi);
stop_gc_thread(sbi);
- /* We don't need to do checkpoint when it's clean */
- if (sbi->s_dirty) {
+ /*
+ * We don't need to do checkpoint when superblock is clean.
+ * But, the previous checkpoint was not done by umount, it needs to do
+ * clean checkpoint again.
+ */
+ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+ !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
@@ -486,13 +502,15 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (sync) {
struct cp_control cpc;
- cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC;
+ cpc.reason = __get_cp_reason(sbi);
+
mutex_lock(&sbi->gc_mutex);
write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
} else {
f2fs_balance_fs(sbi);
}
+ f2fs_trace_ios(NULL, NULL, 1);
return 0;
}
@@ -887,7 +905,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
atomic_set(&sbi->nr_pages[i], 0);
sbi->dir_level = DEF_DIR_LEVEL;
- sbi->need_fsck = false;
+ clear_sbi_flag(sbi, SBI_NEED_FSCK);
}
/*
@@ -942,6 +960,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root;
long err = -EINVAL;
bool retry = true;
+ char *options = NULL;
int i;
try_onemore:
@@ -973,9 +992,15 @@ try_onemore:
set_opt(sbi, POSIX_ACL);
#endif
/* parse mount options */
- err = parse_options(sb, (char *)data);
- if (err)
+ options = kstrdup((const char *)data, GFP_KERNEL);
+ if (data && !options) {
+ err = -ENOMEM;
goto free_sb_buf;
+ }
+
+ err = parse_options(sb, options);
+ if (err)
+ goto free_options;
sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
sb->s_max_links = F2FS_LINK_MAX;
@@ -998,7 +1023,7 @@ try_onemore:
mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
- sbi->por_doing = false;
+ clear_sbi_flag(sbi, SBI_POR_DOING);
spin_lock_init(&sbi->stat_lock);
init_rwsem(&sbi->read_io.io_rwsem);
@@ -1019,7 +1044,7 @@ try_onemore:
if (IS_ERR(sbi->meta_inode)) {
f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode);
- goto free_sb_buf;
+ goto free_options;
}
err = get_valid_checkpoint(sbi);
@@ -1122,10 +1147,19 @@ try_onemore:
goto free_proc;
if (!retry)
- sbi->need_fsck = true;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
+ /*
+ * mount should be failed, when device has readonly mode, and
+ * previous checkpoint was not done by clean system shutdown.
+ */
+ if (bdev_read_only(sb->s_bdev) &&
+ !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
+ err = -EROFS;
+ goto free_kobj;
+ }
err = recover_fsync_data(sbi);
if (err) {
f2fs_msg(sb, KERN_ERR,
@@ -1144,6 +1178,7 @@ try_onemore:
if (err)
goto free_kobj;
}
+ kfree(options);
return 0;
free_kobj:
@@ -1168,6 +1203,8 @@ free_cp:
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
+free_options:
+ kfree(options);
free_sb_buf:
brelse(raw_super_buf);
free_sbi:
@@ -1188,11 +1225,18 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
}
+static void kill_f2fs_super(struct super_block *sb)
+{
+ if (sb->s_root)
+ set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
+ kill_block_super(sb);
+}
+
static struct file_system_type f2fs_fs_type = {
.owner = THIS_MODULE,
.name = "f2fs",
.mount = f2fs_mount,
- .kill_sb = kill_block_super,
+ .kill_sb = kill_f2fs_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("f2fs");
@@ -1220,6 +1264,8 @@ static int __init init_f2fs_fs(void)
{
int err;
+ f2fs_build_trace_ios();
+
err = init_inodecache();
if (err)
goto fail;
@@ -1229,12 +1275,9 @@ static int __init init_f2fs_fs(void)
err = create_segment_manager_caches();
if (err)
goto free_node_manager_caches;
- err = create_gc_caches();
- if (err)
- goto free_segment_manager_caches;
err = create_checkpoint_caches();
if (err)
- goto free_gc_caches;
+ goto free_segment_manager_caches;
f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
if (!f2fs_kset) {
err = -ENOMEM;
@@ -1251,8 +1294,6 @@ free_kset:
kset_unregister(f2fs_kset);
free_checkpoint_caches:
destroy_checkpoint_caches();
-free_gc_caches:
- destroy_gc_caches();
free_segment_manager_caches:
destroy_segment_manager_caches();
free_node_manager_caches:
@@ -1269,11 +1310,11 @@ static void __exit exit_f2fs_fs(void)
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
destroy_checkpoint_caches();
- destroy_gc_caches();
destroy_segment_manager_caches();
destroy_node_manager_caches();
destroy_inodecache();
kset_unregister(f2fs_kset);
+ f2fs_destroy_trace_ios();
}
module_init(init_f2fs_fs)
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
new file mode 100644
index 000000000000..875aa8179bc1
--- /dev/null
+++ b/fs/f2fs/trace.c
@@ -0,0 +1,159 @@
+/*
+ * f2fs IO tracer
+ *
+ * Copyright (c) 2014 Motorola Mobility
+ * Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/sched.h>
+#include <linux/radix-tree.h>
+
+#include "f2fs.h"
+#include "trace.h"
+
+static RADIX_TREE(pids, GFP_ATOMIC);
+static spinlock_t pids_lock;
+static struct last_io_info last_io;
+
+static inline void __print_last_io(void)
+{
+ if (!last_io.len)
+ return;
+
+ trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n",
+ last_io.major, last_io.minor,
+ last_io.pid, "----------------",
+ last_io.type,
+ last_io.fio.rw, last_io.fio.blk_addr,
+ last_io.len);
+ memset(&last_io, 0, sizeof(last_io));
+}
+
+static int __file_type(struct inode *inode, pid_t pid)
+{
+ if (f2fs_is_atomic_file(inode))
+ return __ATOMIC_FILE;
+ else if (f2fs_is_volatile_file(inode))
+ return __VOLATILE_FILE;
+ else if (S_ISDIR(inode->i_mode))
+ return __DIR_FILE;
+ else if (inode->i_ino == F2FS_NODE_INO(F2FS_I_SB(inode)))
+ return __NODE_FILE;
+ else if (inode->i_ino == F2FS_META_INO(F2FS_I_SB(inode)))
+ return __META_FILE;
+ else if (pid)
+ return __NORMAL_FILE;
+ else
+ return __MISC_FILE;
+}
+
+void f2fs_trace_pid(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ pid_t pid = task_pid_nr(current);
+ void *p;
+
+ page->private = pid;
+
+ if (radix_tree_preload(GFP_NOFS))
+ return;
+
+ spin_lock(&pids_lock);
+ p = radix_tree_lookup(&pids, pid);
+ if (p == current)
+ goto out;
+ if (p)
+ radix_tree_delete(&pids, pid);
+
+ f2fs_radix_tree_insert(&pids, pid, current);
+
+ trace_printk("%3x:%3x %4x %-16s\n",
+ MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
+ pid, current->comm);
+out:
+ spin_unlock(&pids_lock);
+ radix_tree_preload_end();
+}
+
+void f2fs_trace_ios(struct page *page, struct f2fs_io_info *fio, int flush)
+{
+ struct inode *inode;
+ pid_t pid;
+ int major, minor;
+
+ if (flush) {
+ __print_last_io();
+ return;
+ }
+
+ inode = page->mapping->host;
+ pid = page_private(page);
+
+ major = MAJOR(inode->i_sb->s_dev);
+ minor = MINOR(inode->i_sb->s_dev);
+
+ if (last_io.major == major && last_io.minor == minor &&
+ last_io.pid == pid &&
+ last_io.type == __file_type(inode, pid) &&
+ last_io.fio.rw == fio->rw &&
+ last_io.fio.blk_addr + last_io.len == fio->blk_addr) {
+ last_io.len++;
+ return;
+ }
+
+ __print_last_io();
+
+ last_io.major = major;
+ last_io.minor = minor;
+ last_io.pid = pid;
+ last_io.type = __file_type(inode, pid);
+ last_io.fio = *fio;
+ last_io.len = 1;
+ return;
+}
+
+void f2fs_build_trace_ios(void)
+{
+ spin_lock_init(&pids_lock);
+}
+
+#define PIDVEC_SIZE 128
+static unsigned int gang_lookup_pids(pid_t *results, unsigned long first_index,
+ unsigned int max_items)
+{
+ struct radix_tree_iter iter;
+ void **slot;
+ unsigned int ret = 0;
+
+ if (unlikely(!max_items))
+ return 0;
+
+ radix_tree_for_each_slot(slot, &pids, &iter, first_index) {
+ results[ret] = iter.index;
+ if (++ret == PIDVEC_SIZE)
+ break;
+ }
+ return ret;
+}
+
+void f2fs_destroy_trace_ios(void)
+{
+ pid_t pid[PIDVEC_SIZE];
+ pid_t next_pid = 0;
+ unsigned int found;
+
+ spin_lock(&pids_lock);
+ while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
+ unsigned idx;
+
+ next_pid = pid[found - 1] + 1;
+ for (idx = 0; idx < found; idx++)
+ radix_tree_delete(&pids, pid[idx]);
+ }
+ spin_unlock(&pids_lock);
+}
diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h
new file mode 100644
index 000000000000..1041dbeb52ae
--- /dev/null
+++ b/fs/f2fs/trace.h
@@ -0,0 +1,46 @@
+/*
+ * f2fs IO tracer
+ *
+ * Copyright (c) 2014 Motorola Mobility
+ * Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __F2FS_TRACE_H__
+#define __F2FS_TRACE_H__
+
+#ifdef CONFIG_F2FS_IO_TRACE
+#include <trace/events/f2fs.h>
+
+enum file_type {
+ __NORMAL_FILE,
+ __DIR_FILE,
+ __NODE_FILE,
+ __META_FILE,
+ __ATOMIC_FILE,
+ __VOLATILE_FILE,
+ __MISC_FILE,
+};
+
+struct last_io_info {
+ int major, minor;
+ pid_t pid;
+ enum file_type type;
+ struct f2fs_io_info fio;
+ block_t len;
+};
+
+extern void f2fs_trace_pid(struct page *);
+extern void f2fs_trace_ios(struct page *, struct f2fs_io_info *, int);
+extern void f2fs_build_trace_ios(void);
+extern void f2fs_destroy_trace_ios(void);
+#else
+#define f2fs_trace_pid(p)
+#define f2fs_trace_ios(p, i, n)
+#define f2fs_build_trace_ios()
+#define f2fs_destroy_trace_ios()
+
+#endif
+#endif /* __F2FS_TRACE_H__ */
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 2d609a5fbfea..c399152de397 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -66,15 +66,21 @@ int writeback_in_progress(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL(writeback_in_progress);
-static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
- struct super_block *sb = inode->i_sb;
+ struct super_block *sb;
- if (sb_is_blkdev_sb(sb))
- return inode->i_mapping->backing_dev_info;
+ if (!inode)
+ return &noop_backing_dev_info;
+ sb = inode->i_sb;
+#ifdef CONFIG_BLOCK
+ if (sb_is_blkdev_sb(sb))
+ return blk_get_backing_dev_info(I_BDEV(inode));
+#endif
return sb->s_bdi;
}
+EXPORT_SYMBOL_GPL(inode_to_bdi);
static inline struct inode *wb_inode(struct list_head *head)
{
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ba1107977f2e..ed19a7d622fa 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req)
req->in.h.pid = current->pid;
}
+void fuse_set_initialized(struct fuse_conn *fc)
+{
+ /* Make sure stores before this are seen on another CPU */
+ smp_wmb();
+ fc->initialized = 1;
+}
+
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
return !fc->initialized || (for_background && fc->blocked);
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
if (intr)
goto out;
}
+ /* Matches smp_wmb() in fuse_set_initialized() */
+ smp_rmb();
err = -ENOTCONN;
if (!fc->connected)
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
atomic_inc(&fc->num_waiting);
wait_event(fc->blocked_waitq, fc->initialized);
+ /* Matches smp_wmb() in fuse_set_initialized() */
+ smp_rmb();
req = fuse_request_alloc(0);
if (!req)
req = get_reserved_req(fc, file);
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
}
EXPORT_SYMBOL_GPL(fuse_request_send);
+static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
+{
+ if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
+ args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+
+ if (fc->minor < 9) {
+ switch (args->in.h.opcode) {
+ case FUSE_LOOKUP:
+ case FUSE_CREATE:
+ case FUSE_MKNOD:
+ case FUSE_MKDIR:
+ case FUSE_SYMLINK:
+ case FUSE_LINK:
+ args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ break;
+ case FUSE_GETATTR:
+ case FUSE_SETATTR:
+ args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ break;
+ }
+ }
+ if (fc->minor < 12) {
+ switch (args->in.h.opcode) {
+ case FUSE_CREATE:
+ args->in.args[0].size = sizeof(struct fuse_open_in);
+ break;
+ case FUSE_MKNOD:
+ args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+ break;
+ }
+ }
+}
+
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
{
struct fuse_req *req;
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
if (IS_ERR(req))
return PTR_ERR(req);
+ /* Needs to be done after fuse_get_req() so that fc->minor is valid */
+ fuse_adjust_compat(fc, args);
+
req->in.h.opcode = args->in.h.opcode;
req->in.h.nodeid = args->in.h.nodeid;
req->in.numargs = args->in.numargs;
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
if (fc->connected) {
fc->connected = 0;
fc->blocked = 0;
- fc->initialized = 1;
+ fuse_set_initialized(fc);
end_io_requests(fc);
end_queued_requests(fc);
end_polls(fc);
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
spin_lock(&fc->lock);
fc->connected = 0;
fc->blocked = 0;
- fc->initialized = 1;
+ fuse_set_initialized(fc);
end_queued_requests(fc);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 252b8a5de8b5..08e7b1a9d5d0 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
args->in.args[0].size = name->len + 1;
args->in.args[0].value = name->name;
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(struct fuse_entry_out);
+ args->out.args[0].size = sizeof(struct fuse_entry_out);
args->out.args[0].value = outarg;
}
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
args.in.h.opcode = FUSE_CREATE;
args.in.h.nodeid = get_node_id(dir);
args.in.numargs = 2;
- args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
- sizeof(inarg);
+ args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.in.args[1].size = entry->d_name.len + 1;
args.in.args[1].value = entry->d_name.name;
args.out.numargs = 2;
- if (fc->minor < 9)
- args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args.out.args[0].size = sizeof(outentry);
+ args.out.args[0].size = sizeof(outentry);
args.out.args[0].value = &outentry;
args.out.args[1].size = sizeof(outopen);
args.out.args[1].value = &outopen;
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
memset(&outarg, 0, sizeof(outarg));
args->in.h.nodeid = get_node_id(dir);
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(outarg);
+ args->out.args[0].size = sizeof(outarg);
args->out.args[0].value = &outarg;
err = fuse_simple_request(fc, args);
if (err)
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
inarg.umask = current_umask();
args.in.h.opcode = FUSE_MKNOD;
args.in.numargs = 2;
- args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
- sizeof(inarg);
+ args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.in.args[1].size = entry->d_name.len + 1;
args.in.args[1].value = entry->d_name.name;
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.out.numargs = 1;
- if (fc->minor < 9)
- args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
- else
- args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err) {
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
args->in.args[0].size = sizeof(*inarg_p);
args->in.args[0].value = inarg_p;
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(*outarg_p);
+ args->out.args[0].size = sizeof(*outarg_p);
args->out.args[0].value = outarg_p;
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 760b2c552197..c01ec3bdcfd8 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1159,7 +1159,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
mutex_lock(&inode->i_mutex);
/* We can write back this queue in page reclaim */
- current->backing_dev_info = mapping->backing_dev_info;
+ current->backing_dev_info = inode_to_bdi(inode);
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
@@ -1464,7 +1464,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
{
struct inode *inode = req->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
list_del(&req->writepages_entry);
@@ -1658,7 +1658,7 @@ static int fuse_writepage_locked(struct page *page)
req->end = fuse_writepage_end;
req->inode = inode;
- inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
+ inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
spin_lock(&fc->lock);
@@ -1768,7 +1768,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
old_req->state == FUSE_REQ_PENDING)) {
- struct backing_dev_info *bdi = page->mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
copy_highpage(old_req->pages[0], page);
spin_unlock(&fc->lock);
@@ -1872,7 +1872,7 @@ static int fuse_writepages_fill(struct page *page,
req->page_descs[req->num_pages].offset = 0;
req->page_descs[req->num_pages].length = PAGE_SIZE;
- inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
+ inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
err = 0;
@@ -2062,7 +2062,6 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = fuse_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e0fc6725d1d0..1cdfb07c1376 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
int fuse_do_setattr(struct inode *inode, struct iattr *attr,
struct file *file);
+void fuse_set_initialized(struct fuse_conn *fc);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6749109f255d..e8799c11424b 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -308,7 +308,6 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
if (!fc->writeback_cache || !S_ISREG(attr->mode))
inode->i_flags |= S_NOCMTIME;
inode->i_generation = generation;
- inode->i_data.backing_dev_info = &fc->bdi;
fuse_init_inode(inode, attr);
unlock_new_inode(inode);
} else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
@@ -424,8 +423,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
args.in.h.opcode = FUSE_STATFS;
args.in.h.nodeid = get_node_id(dentry->d_inode);
args.out.numargs = 1;
- args.out.args[0].size =
- fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
+ args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
@@ -898,7 +896,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->max_write = max_t(unsigned, 4096, fc->max_write);
fc->conn_init = 1;
}
- fc->initialized = 1;
+ fuse_set_initialized(fc);
wake_up_all(&fc->blocked_waitq);
}
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3088e2a38e30..7b3143064af1 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -73,7 +73,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
BUG_ON(name == NULL);
- if (acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
+ if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
return -E2BIG;
if (type == ACL_TYPE_ACCESS) {
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 805b37fed638..4ad4f94edebe 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -289,7 +289,7 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- trace_wbc_writepage(wbc, mapping->backing_dev_info);
+ trace_wbc_writepage(wbc, inode_to_bdi(inode));
ret = __gfs2_jdata_writepage(page, wbc);
if (unlikely(ret)) {
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c5a34f09e228..6371192961e2 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1896,7 +1896,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
if (ht == NULL)
- ht = vzalloc(size);
+ ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO,
+ PAGE_KERNEL);
if (!ht)
return -ENOMEM;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 6e600abf694a..ec9c2d33477a 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -498,7 +498,6 @@ static const struct vm_operations_struct gfs2_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = gfs2_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
/**
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a23524aa3eac..f42dffba056a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -173,19 +173,14 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
spin_unlock(&lru_lock);
}
-static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
+static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
+ spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
clear_bit(GLF_LRU, &gl->gl_flags);
}
-}
-
-static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
-{
- spin_lock(&lru_lock);
- __gfs2_glock_remove_from_lru(gl);
spin_unlock(&lru_lock);
}
@@ -205,9 +200,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
lockref_mark_dead(&gl->gl_lockref);
- spin_lock(&lru_lock);
- __gfs2_glock_remove_from_lru(gl);
- spin_unlock(&lru_lock);
+ gfs2_glock_remove_from_lru(gl);
spin_unlock(&gl->gl_lockref.lock);
spin_lock_bucket(gl->gl_hash);
hlist_bl_del_rcu(&gl->gl_list);
@@ -775,7 +768,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->private_data = NULL;
- mapping->backing_dev_info = s->s_bdi;
mapping->writeback_index = 0;
}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 9054002ebe70..73c72253faac 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -543,10 +543,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
}
error = gfs2_dir_add(&dip->i_inode, name, ip, da);
- if (error)
- goto fail_end_trans;
-fail_end_trans:
gfs2_trans_end(sdp);
fail_ipreserv:
gfs2_inplace_release(dip);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 8633ad328ee2..efc8e254787c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -112,7 +112,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->private_data = NULL;
- mapping->backing_dev_info = sb->s_bdi;
mapping->writeback_index = 0;
spin_lock_init(&sdp->sd_log_lock);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c8b148bbdc8b..3aa17d4d1cfc 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -145,7 +145,8 @@ static void gfs2_qd_dispose(struct list_head *list)
}
-static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
+static enum lru_status gfs2_qd_isolate(struct list_head *item,
+ struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *dispose = arg;
struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
@@ -155,7 +156,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock,
if (qd->qd_lockref.count == 0) {
lockref_mark_dead(&qd->qd_lockref);
- list_move(&qd->qd_lru, dispose);
+ list_lru_isolate_move(lru, &qd->qd_lru, dispose);
}
spin_unlock(&qd->qd_lockref.lock);
@@ -171,8 +172,8 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
- freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
- &dispose, &sc->nr_to_scan);
+ freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
+ gfs2_qd_isolate, &dispose);
gfs2_qd_dispose(&dispose);
@@ -182,7 +183,7 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
+ return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
}
struct shrinker gfs2_qd_shrinker = {
@@ -667,7 +668,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
s64 change, struct gfs2_quota_data *qd,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct inode *inode = &ip->i_inode;
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -697,16 +698,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
be64_add_cpu(&q.qu_value, change);
qd->qd_qb.qb_value = q.qu_value;
if (fdq) {
- if (fdq->d_fieldmask & FS_DQ_BSOFT) {
- q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPC_SOFT) {
+ q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_warn = q.qu_warn;
}
- if (fdq->d_fieldmask & FS_DQ_BHARD) {
- q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPC_HARD) {
+ q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_limit = q.qu_limit;
}
- if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
- q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPACE) {
+ q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_value = q.qu_value;
}
}
@@ -1497,7 +1498,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
}
static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_lvb *qlvb;
@@ -1505,7 +1506,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
struct gfs2_holder q_gh;
int error;
- memset(fdq, 0, sizeof(struct fs_disk_quota));
+ memset(fdq, 0, sizeof(*fdq));
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return -ESRCH; /* Crazy XFS error code */
@@ -1522,12 +1523,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
goto out;
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
- fdq->d_version = FS_DQUOT_VERSION;
- fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
- fdq->d_id = from_kqid_munged(current_user_ns(), qid);
- fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
- fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
- fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
+ fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
+ fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
+ fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
gfs2_glock_dq_uninit(&q_gh);
out:
@@ -1536,10 +1534,10 @@ out:
}
/* GFS2 only supports a subset of the XFS fields */
-#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
+#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1583,17 +1581,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
goto out_i;
/* If nothing has changed, this is a no-op */
- if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
- ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
- fdq->d_fieldmask ^= FS_DQ_BSOFT;
+ if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
+ ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
+ fdq->d_fieldmask ^= QC_SPC_SOFT;
- if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
- ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
- fdq->d_fieldmask ^= FS_DQ_BHARD;
+ if ((fdq->d_fieldmask & QC_SPC_HARD) &&
+ ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
+ fdq->d_fieldmask ^= QC_SPC_HARD;
- if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
- ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
- fdq->d_fieldmask ^= FS_DQ_BCOUNT;
+ if ((fdq->d_fieldmask & QC_SPACE) &&
+ ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
+ fdq->d_fieldmask ^= QC_SPACE;
if (fdq->d_fieldmask == 0)
goto out_i;
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 573bd3b758fa..1b645773c98e 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -439,7 +439,7 @@ static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
ls->ls_recover_jid_done = jid;
ls->ls_recover_jid_status = message;
- sprintf(env_jid, "JID=%d", jid);
+ sprintf(env_jid, "JID=%u", jid);
sprintf(env_status, "RECOVERY=%s",
message == LM_RD_SUCCESS ? "Done" : "Failed");
kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 5b327f837de7..1666382b198d 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -743,7 +743,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
- struct backing_dev_info *bdi = metamapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
int ret = 0;
if (wbc->sync_mode == WB_SYNC_ALL)
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 3ab566ba5696..ae8e8811f0e8 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -96,7 +96,7 @@ static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
struct super_block *sb = sdp->sd_vfs;
int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
- return snprintf(buf, PAGE_SIZE, "%u\n", frozen);
+ return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
}
static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 5eba47f593f8..c274aca8e8dc 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -62,12 +62,6 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
}
-static struct backing_dev_info hugetlbfs_backing_dev_info = {
- .name = "hugetlbfs",
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
-};
-
int sysctl_hugetlb_shm_group;
enum {
@@ -498,7 +492,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
&hugetlbfs_i_mmap_rwsem_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
- inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_mapping->private_data = resv_map;
info = HUGETLBFS_I(inode);
@@ -1032,10 +1025,6 @@ static int __init init_hugetlbfs_fs(void)
return -ENOTSUPP;
}
- error = bdi_init(&hugetlbfs_backing_dev_info);
- if (error)
- return error;
-
error = -ENOMEM;
hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
sizeof(struct hugetlbfs_inode_info),
@@ -1071,7 +1060,6 @@ static int __init init_hugetlbfs_fs(void)
out:
kmem_cache_destroy(hugetlbfs_inode_cachep);
out2:
- bdi_destroy(&hugetlbfs_backing_dev_info);
return error;
}
@@ -1091,7 +1079,6 @@ static void __exit exit_hugetlbfs_fs(void)
for_each_hstate(h)
kern_unmount(hugetlbfs_vfsmount[i++]);
unregister_filesystem(&hugetlbfs_fs_type);
- bdi_destroy(&hugetlbfs_backing_dev_info);
}
module_init(init_hugetlbfs_fs)
diff --git a/fs/inode.c b/fs/inode.c
index aa149e7262ac..86bfaca724db 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -170,20 +170,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
atomic_set(&mapping->i_mmap_writable, 0);
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->private_data = NULL;
- mapping->backing_dev_info = &default_backing_dev_info;
mapping->writeback_index = 0;
-
- /*
- * If the block_device provides a backing_dev_info for client
- * inodes then use that. Otherwise the inode share the bdev's
- * backing_dev_info.
- */
- if (sb->s_bdev) {
- struct backing_dev_info *bdi;
-
- bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
- mapping->backing_dev_info = bdi;
- }
inode->i_private = NULL;
inode->i_mapping = mapping;
INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
@@ -194,7 +181,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
#ifdef CONFIG_FSNOTIFY
inode->i_fsnotify_mask = 0;
#endif
-
+ inode->i_flctx = NULL;
this_cpu_inc(nr_inodes);
return 0;
@@ -237,6 +224,7 @@ void __destroy_inode(struct inode *inode)
BUG_ON(inode_has_buffers(inode));
security_inode_free(inode);
fsnotify_inode_delete(inode);
+ locks_free_lock_context(inode->i_flctx);
if (!inode->i_nlink) {
WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
atomic_long_dec(&inode->i_sb->s_remove_count);
@@ -355,7 +343,6 @@ void address_space_init_once(struct address_space *mapping)
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
mapping->i_mmap = RB_ROOT;
- INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
}
EXPORT_SYMBOL(address_space_init_once);
@@ -685,8 +672,8 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
* LRU does not have strict ordering. Hence we don't want to reclaim inodes
* with this flag set because they are the inodes that are out of order.
*/
-static enum lru_status
-inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
+static enum lru_status inode_lru_isolate(struct list_head *item,
+ struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *freeable = arg;
struct inode *inode = container_of(item, struct inode, i_lru);
@@ -704,7 +691,7 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
*/
if (atomic_read(&inode->i_count) ||
(inode->i_state & ~I_REFERENCED)) {
- list_del_init(&inode->i_lru);
+ list_lru_isolate(lru, &inode->i_lru);
spin_unlock(&inode->i_lock);
this_cpu_dec(nr_unused);
return LRU_REMOVED;
@@ -738,7 +725,7 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
- list_move(&inode->i_lru, freeable);
+ list_lru_isolate_move(lru, &inode->i_lru, freeable);
spin_unlock(&inode->i_lock);
this_cpu_dec(nr_unused);
@@ -751,14 +738,13 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
* to trim from the LRU. Inodes to be freed are moved to a temporary list and
* then are freed outside inode_lock by dispose_list().
*/
-long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan,
- int nid)
+long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
{
LIST_HEAD(freeable);
long freed;
- freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate,
- &freeable, &nr_to_scan);
+ freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
+ inode_lru_isolate, &freeable);
dispose_list(&freeable);
return freed;
}
diff --git a/fs/internal.h b/fs/internal.h
index e9a61fe67575..d92c346a793d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -14,6 +14,7 @@ struct file_system_type;
struct linux_binprm;
struct path;
struct mount;
+struct shrink_control;
/*
* block_dev.c
@@ -111,8 +112,7 @@ extern int open_check_o_direct(struct file *f);
* inode.c
*/
extern spinlock_t inode_sb_list_lock;
-extern long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan,
- int nid);
+extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
extern void inode_add_lru(struct inode *inode);
/*
@@ -129,8 +129,7 @@ extern int invalidate_inodes(struct super_block *, bool);
*/
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
extern int d_set_mounted(struct dentry *dentry);
-extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
- int nid);
+extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
/*
* read_write.c
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 214c3c11fbc2..5d01d2638ca5 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -379,6 +379,11 @@ int __generic_block_fiemap(struct inode *inode,
past_eof = true;
}
cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
} while (1);
/* If ret is 1 then we just hit the end of the extent array */
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index 01e1ee7a998b..005a15cfd30a 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -2,6 +2,7 @@
* linux/fs/isofs/util.c
*/
+#include <linux/time.h>
#include "isofs.h"
/*
@@ -17,9 +18,9 @@
int iso_date(char * p, int flag)
{
int year, month, day, hour, minute, second, tz;
- int crtime, days, i;
+ int crtime;
- year = p[0] - 70;
+ year = p[0];
month = p[1];
day = p[2];
hour = p[3];
@@ -31,18 +32,7 @@ int iso_date(char * p, int flag)
if (year < 0) {
crtime = 0;
} else {
- int monlen[12] = {31,28,31,30,31,30,31,31,30,31,30,31};
-
- days = year * 365;
- if (year > 2)
- days += (year+1) / 4;
- for (i = 1; i < month; i++)
- days += monlen[i-1];
- if (((year+2) % 4) == 0 && month > 2)
- days++;
- days += day - 1;
- crtime = ((((days * 24) + hour) * 60 + minute) * 60)
- + second;
+ crtime = mktime64(year+1900, month, day, hour, minute, second);
/* sign extend */
if (tz & 0x80)
diff --git a/fs/jfs/endian24.h b/fs/jfs/endian24.h
deleted file mode 100644
index fa92f7f1d0d0..000000000000
--- a/fs/jfs/endian24.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) International Business Machines Corp., 2001
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _H_ENDIAN24
-#define _H_ENDIAN24
-
-/*
- * endian24.h:
- *
- * Endian conversion for 24-byte data
- *
- */
-#define __swab24(x) \
-({ \
- __u32 __x = (x); \
- ((__u32)( \
- ((__x & (__u32)0x000000ffUL) << 16) | \
- (__x & (__u32)0x0000ff00UL) | \
- ((__x & (__u32)0x00ff0000UL) >> 16) )); \
-})
-
-#if (defined(__KERNEL__) && defined(__LITTLE_ENDIAN)) || (defined(__BYTE_ORDER) && (__BYTE_ORDER == __LITTLE_ENDIAN))
- #define __cpu_to_le24(x) ((__u32)(x))
- #define __le24_to_cpu(x) ((__u32)(x))
-#else
- #define __cpu_to_le24(x) __swab24(x)
- #define __le24_to_cpu(x) __swab24(x)
-#endif
-
-#ifdef __KERNEL__
- #define cpu_to_le24 __cpu_to_le24
- #define le24_to_cpu __le24_to_cpu
-#endif
-
-#endif /* !_H_ENDIAN24 */
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 984c2bbf4f61..d88576e23fe4 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -1040,8 +1040,8 @@ static int dtSplitUp(tid_t tid,
pxdlist.maxnpxd = 1;
pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
- PXDaddress(pxd, nxaddr)
- PXDlength(pxd, xlen + n);
+ PXDaddress(pxd, nxaddr);
+ PXDlength(pxd, xlen + n);
split->pxdlist = &pxdlist;
if ((rc = dtExtendPage(tid, ip, split, btstack))) {
nxaddr = addressPXD(pxd);
diff --git a/fs/jfs/jfs_types.h b/fs/jfs/jfs_types.h
index 43ea3713c083..8f602dcb51fa 100644
--- a/fs/jfs/jfs_types.h
+++ b/fs/jfs/jfs_types.h
@@ -30,8 +30,6 @@
#include <linux/types.h>
#include <linux/nls.h>
-#include "endian24.h"
-
/*
* transaction and lock id's
*
@@ -59,26 +57,42 @@ struct timestruc_t {
/*
* physical xd (pxd)
+ *
+ * The leftmost 24 bits of len_addr are the extent length.
+ * The rightmost 8 bits of len_addr are the most signficant bits of
+ * the extent address
*/
typedef struct {
- unsigned len:24;
- unsigned addr1:8;
+ __le32 len_addr;
__le32 addr2;
} pxd_t;
/* xd_t field construction */
-#define PXDlength(pxd, length32) ((pxd)->len = __cpu_to_le24(length32))
-#define PXDaddress(pxd, address64)\
-{\
- (pxd)->addr1 = ((s64)address64) >> 32;\
- (pxd)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
+static inline void PXDlength(pxd_t *pxd, __u32 len)
+{
+ pxd->len_addr = (pxd->len_addr & cpu_to_le32(~0xffffff)) |
+ cpu_to_le32(len & 0xffffff);
+}
+
+static inline void PXDaddress(pxd_t *pxd, __u64 addr)
+{
+ pxd->len_addr = (pxd->len_addr & cpu_to_le32(0xffffff)) |
+ cpu_to_le32((addr >> 32)<<24);
+ pxd->addr2 = cpu_to_le32(addr & 0xffffffff);
}
/* xd_t field extraction */
-#define lengthPXD(pxd) __le24_to_cpu((pxd)->len)
-#define addressPXD(pxd)\
- ( ((s64)((pxd)->addr1)) << 32 | __le32_to_cpu((pxd)->addr2))
+static inline __u32 lengthPXD(pxd_t *pxd)
+{
+ return le32_to_cpu((pxd)->len_addr) & 0xffffff;
+}
+
+static inline __u64 addressPXD(pxd_t *pxd)
+{
+ __u64 n = le32_to_cpu(pxd->len_addr) & ~0xffffff;
+ return (n << 8) + le32_to_cpu(pxd->addr2);
+}
#define MAXTREEHEIGHT 8
/* pxd list */
@@ -93,12 +107,10 @@ struct pxdlist {
* data extent descriptor (dxd)
*/
typedef struct {
- unsigned flag:8; /* 1: flags */
- unsigned rsrvd:24;
+ __u8 flag; /* 1: flags */
+ __u8 rsrvd[3];
__le32 size; /* 4: size in byte */
- unsigned len:24; /* 3: length in unit of fsblksize */
- unsigned addr1:8; /* 1: address in unit of fsblksize */
- __le32 addr2; /* 4: address in unit of fsblksize */
+ pxd_t loc; /* 8: address and length in unit of fsblksize */
} dxd_t; /* - 16 - */
/* dxd_t flags */
@@ -109,12 +121,11 @@ typedef struct {
#define DXD_CORRUPT 0x08 /* Inconsistency detected */
/* dxd_t field construction
- * Conveniently, the PXD macros work for DXD
*/
-#define DXDlength PXDlength
-#define DXDaddress PXDaddress
-#define lengthDXD lengthPXD
-#define addressDXD addressPXD
+#define DXDlength(dxd, len) PXDlength(&(dxd)->loc, len)
+#define DXDaddress(dxd, addr) PXDaddress(&(dxd)->loc, addr)
+#define lengthDXD(dxd) lengthPXD(&(dxd)->loc)
+#define addressDXD(dxd) addressPXD(&(dxd)->loc)
#define DXDsize(dxd, size32) ((dxd)->size = cpu_to_le32(size32))
#define sizeDXD(dxd) le32_to_cpu((dxd)->size)
diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
index 08c0c749b986..1e0987986d5f 100644
--- a/fs/jfs/jfs_xtree.h
+++ b/fs/jfs/jfs_xtree.h
@@ -29,13 +29,11 @@
* extent allocation descriptor (xad)
*/
typedef struct xad {
- unsigned flag:8; /* 1: flag */
- unsigned rsvrd:16; /* 2: reserved */
- unsigned off1:8; /* 1: offset in unit of fsblksize */
- __le32 off2; /* 4: offset in unit of fsblksize */
- unsigned len:24; /* 3: length in unit of fsblksize */
- unsigned addr1:8; /* 1: address in unit of fsblksize */
- __le32 addr2; /* 4: address in unit of fsblksize */
+ __u8 flag; /* 1: flag */
+ __u8 rsvrd[2]; /* 2: reserved */
+ __u8 off1; /* 1: offset in unit of fsblksize */
+ __le32 off2; /* 4: offset in unit of fsblksize */
+ pxd_t loc; /* 8: length and address in unit of fsblksize */
} xad_t; /* (16) */
#define MAXXLEN ((1 << 24) - 1)
@@ -49,19 +47,14 @@ typedef struct xad {
(xad)->off1 = ((u64)offset64) >> 32;\
(xad)->off2 = __cpu_to_le32((offset64) & 0xffffffff);\
}
-#define XADaddress(xad, address64)\
-{\
- (xad)->addr1 = ((u64)address64) >> 32;\
- (xad)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
-}
-#define XADlength(xad, length32) (xad)->len = __cpu_to_le24(length32)
+#define XADaddress(xad, address64) PXDaddress(&(xad)->loc, address64)
+#define XADlength(xad, length32) PXDlength(&(xad)->loc, length32)
/* xad_t field extraction */
#define offsetXAD(xad)\
( ((s64)((xad)->off1)) << 32 | __le32_to_cpu((xad)->off2))
-#define addressXAD(xad)\
- ( ((s64)((xad)->addr1)) << 32 | __le32_to_cpu((xad)->addr2))
-#define lengthXAD(xad) __le24_to_cpu((xad)->len)
+#define addressXAD(xad) addressPXD(&(xad)->loc)
+#define lengthXAD(xad) lengthPXD(&(xad)->loc)
/* xad list */
struct xadlist {
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 16c3a9556634..5d30c56ae075 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -619,8 +619,7 @@ out_mount_failed:
iput(sbi->direct_inode);
sbi->direct_inode = NULL;
out_unload:
- if (sbi->nls_tab)
- unload_nls(sbi->nls_tab);
+ unload_nls(sbi->nls_tab);
out_kfree:
kfree(sbi);
return ret;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 37989f02a226..6acc9648f986 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -201,10 +201,14 @@ static unsigned int kernfs_name_hash(const char *name, const void *ns)
static int kernfs_name_compare(unsigned int hash, const char *name,
const void *ns, const struct kernfs_node *kn)
{
- if (hash != kn->hash)
- return hash - kn->hash;
- if (ns != kn->ns)
- return ns - kn->ns;
+ if (hash < kn->hash)
+ return -1;
+ if (hash > kn->hash)
+ return 1;
+ if (ns < kn->ns)
+ return -1;
+ if (ns > kn->ns)
+ return 1;
return strcmp(name, kn->name);
}
@@ -407,8 +411,9 @@ void kernfs_put(struct kernfs_node *kn)
if (kernfs_type(kn) == KERNFS_LINK)
kernfs_put(kn->symlink.target_kn);
- if (!(kn->flags & KERNFS_STATIC_NAME))
- kfree(kn->name);
+
+ kfree_const(kn->name);
+
if (kn->iattr) {
if (kn->iattr->ia_secdata)
security_release_secctx(kn->iattr->ia_secdata,
@@ -502,15 +507,12 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
const char *name, umode_t mode,
unsigned flags)
{
- char *dup_name = NULL;
struct kernfs_node *kn;
int ret;
- if (!(flags & KERNFS_STATIC_NAME)) {
- name = dup_name = kstrdup(name, GFP_KERNEL);
- if (!name)
- return NULL;
- }
+ name = kstrdup_const(name, GFP_KERNEL);
+ if (!name)
+ return NULL;
kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
if (!kn)
@@ -534,7 +536,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
- kfree(dup_name);
+ kfree_const(name);
return NULL;
}
@@ -1260,7 +1262,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
/* rename kernfs_node */
if (strcmp(kn->name, new_name) != 0) {
error = -ENOMEM;
- new_name = kstrdup(new_name, GFP_KERNEL);
+ new_name = kstrdup_const(new_name, GFP_KERNEL);
if (!new_name)
goto out;
} else {
@@ -1281,9 +1283,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
kn->ns = new_ns;
if (new_name) {
- if (!(kn->flags & KERNFS_STATIC_NAME))
- old_name = kn->name;
- kn->flags &= ~KERNFS_STATIC_NAME;
+ old_name = kn->name;
kn->name = new_name;
}
@@ -1293,7 +1293,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
kernfs_link_sibling(kn);
kernfs_put(old_parent);
- kfree(old_name);
+ kfree_const(old_name);
error = 0;
out:
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index ddc9f9612f16..b684e8a132e6 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -901,7 +901,6 @@ const struct file_operations kernfs_file_fops = {
* @ops: kernfs operations for the file
* @priv: private data for the file
* @ns: optional namespace tag of the file
- * @name_is_static: don't copy file name
* @key: lockdep key for the file's active_ref, %NULL to disable lockdep
*
* Returns the created node on success, ERR_PTR() value on error.
@@ -911,7 +910,6 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
umode_t mode, loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
- bool name_is_static,
struct lock_class_key *key)
{
struct kernfs_node *kn;
@@ -919,8 +917,6 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
int rc;
flags = KERNFS_FILE;
- if (name_is_static)
- flags |= KERNFS_STATIC_NAME;
kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
if (!kn)
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 985217626e66..9000874a945b 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -24,12 +24,6 @@ static const struct address_space_operations kernfs_aops = {
.write_end = simple_write_end,
};
-static struct backing_dev_info kernfs_bdi = {
- .name = "kernfs",
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
-};
-
static const struct inode_operations kernfs_iops = {
.permission = kernfs_iop_permission,
.setattr = kernfs_iop_setattr,
@@ -40,12 +34,6 @@ static const struct inode_operations kernfs_iops = {
.listxattr = kernfs_iop_listxattr,
};
-void __init kernfs_inode_init(void)
-{
- if (bdi_init(&kernfs_bdi))
- panic("failed to init kernfs_bdi");
-}
-
static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
{
static DEFINE_MUTEX(iattr_mutex);
@@ -298,7 +286,6 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
kernfs_get(kn);
inode->i_private = kn;
inode->i_mapping->a_ops = &kernfs_aops;
- inode->i_mapping->backing_dev_info = &kernfs_bdi;
inode->i_op = &kernfs_iops;
set_default_inode_attr(inode, kn->mode);
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index dc84a3ef9ca2..af9fa7499919 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -88,7 +88,6 @@ int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf,
size_t size);
ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
-void kernfs_inode_init(void);
/*
* dir.c
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index f973ae9b05f1..8eaf417187f1 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -246,5 +246,4 @@ void __init kernfs_init(void)
kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
sizeof(struct kernfs_node),
0, SLAB_PANIC, NULL);
- kernfs_inode_init();
}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 1cc6ec51e6b1..47a32b6d9b90 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -65,7 +65,7 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
return (struct sockaddr *)&nsm->sm_addr;
}
-static struct rpc_clnt *nsm_create(struct net *net)
+static struct rpc_clnt *nsm_create(struct net *net, const char *nodename)
{
struct sockaddr_in sin = {
.sin_family = AF_INET,
@@ -77,6 +77,7 @@ static struct rpc_clnt *nsm_create(struct net *net)
.address = (struct sockaddr *)&sin,
.addrsize = sizeof(sin),
.servername = "rpc.statd",
+ .nodename = nodename,
.program = &nsm_program,
.version = NSM_VERSION,
.authflavor = RPC_AUTH_NULL,
@@ -102,7 +103,7 @@ out:
return clnt;
}
-static struct rpc_clnt *nsm_client_get(struct net *net)
+static struct rpc_clnt *nsm_client_get(struct net *net, const char *nodename)
{
struct rpc_clnt *clnt, *new;
struct lockd_net *ln = net_generic(net, lockd_net_id);
@@ -111,7 +112,7 @@ static struct rpc_clnt *nsm_client_get(struct net *net)
if (clnt != NULL)
goto out;
- clnt = new = nsm_create(net);
+ clnt = new = nsm_create(net, nodename);
if (IS_ERR(clnt))
goto out;
@@ -190,19 +191,23 @@ int nsm_monitor(const struct nlm_host *host)
struct nsm_res res;
int status;
struct rpc_clnt *clnt;
+ const char *nodename = NULL;
dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
if (nsm->sm_monitored)
return 0;
+ if (host->h_rpcclnt)
+ nodename = host->h_rpcclnt->cl_nodename;
+
/*
* Choose whether to record the caller_name or IP address of
* this peer in the local rpc.statd's database.
*/
nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
- clnt = nsm_client_get(host->net);
+ clnt = nsm_client_get(host->net, nodename);
if (IS_ERR(clnt)) {
status = PTR_ERR(clnt);
dprintk("lockd: failed to create NSM upcall transport, "
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index e94c887da2d7..55505cbe11af 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -138,10 +138,6 @@ lockd(void *vrqstp)
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
- if (!nlm_timeout)
- nlm_timeout = LOCKD_DFLT_TIMEO;
- nlmsvc_timeout = nlm_timeout * HZ;
-
/*
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
+ if (!nlm_timeout)
+ nlm_timeout = LOCKD_DFLT_TIMEO;
+ nlmsvc_timeout = nlm_timeout * HZ;
+
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 56598742dde4..5581e020644b 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -57,8 +57,8 @@ static DEFINE_SPINLOCK(nlm_blocked_lock);
static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
{
/*
- * We can get away with a static buffer because we're only
- * called with BKL held.
+ * We can get away with a static buffer because this is only called
+ * from lockd, which is single-threaded.
*/
static char buf[2*NLM_MAXCOOKIELEN+1];
unsigned int i, len = sizeof(buf);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index d12ff4e2dbe7..665ef5a05183 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -164,12 +164,15 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
struct nlm_host *lockhost;
+ if (!flctx || list_empty_careful(&flctx->flc_posix))
+ return 0;
again:
file->f_locks = 0;
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl; fl = fl->fl_next) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
if (fl->fl_lmops != &nlmsvc_lock_operations)
continue;
@@ -180,7 +183,7 @@ again:
if (match(lockhost, host)) {
struct file_lock lock = *fl;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
lock.fl_type = F_UNLCK;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
@@ -192,7 +195,7 @@ again:
goto again;
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
return 0;
}
@@ -223,18 +226,21 @@ nlm_file_inuse(struct nlm_file *file)
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
return 1;
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl; fl = fl->fl_next) {
- if (fl->fl_lmops == &nlmsvc_lock_operations) {
- spin_unlock(&inode->i_lock);
- return 1;
+ if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+ if (fl->fl_lmops == &nlmsvc_lock_operations) {
+ spin_unlock(&flctx->flc_lock);
+ return 1;
+ }
}
+ spin_unlock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
file->f_locks = 0;
return 0;
}
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 9340e7e10ef6..5b651daad518 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -95,14 +95,6 @@ nlm_decode_fh(__be32 *p, struct nfs_fh *f)
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
-static inline __be32 *
-nlm_encode_fh(__be32 *p, struct nfs_fh *f)
-{
- *p++ = htonl(NFS2_FHSIZE);
- memcpy(p, f->data, NFS2_FHSIZE);
- return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
/*
* Encode and decode owner handle
*/
diff --git a/fs/locks.c b/fs/locks.c
index 735b8d3fa78c..4753218f308e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -137,7 +137,7 @@
#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
-#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG))
+#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
static bool lease_breaking(struct file_lock *fl)
@@ -157,14 +157,11 @@ static int target_leasetype(struct file_lock *fl)
int leases_enable = 1;
int lease_break_time = 45;
-#define for_each_lock(inode, lockp) \
- for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
-
/*
* The global file_lock_list is only used for displaying /proc/locks, so we
* keep a list on each CPU, with each list protected by its own spinlock via
* the file_lock_lglock. Note that alterations to the list also require that
- * the relevant i_lock is held.
+ * the relevant flc_lock is held.
*/
DEFINE_STATIC_LGLOCK(file_lock_lglock);
static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
@@ -192,21 +189,68 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
* contrast to those that are acting as records of acquired locks).
*
* Note that when we acquire this lock in order to change the above fields,
- * we often hold the i_lock as well. In certain cases, when reading the fields
+ * we often hold the flc_lock as well. In certain cases, when reading the fields
* protected by this lock, we can skip acquiring it iff we already hold the
- * i_lock.
+ * flc_lock.
*
* In particular, adding an entry to the fl_block list requires that you hold
- * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting
- * an entry from the list however only requires the file_lock_lock.
+ * both the flc_lock and the blocked_lock_lock (acquired in that order).
+ * Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_SPINLOCK(blocked_lock_lock);
+static struct kmem_cache *flctx_cache __read_mostly;
static struct kmem_cache *filelock_cache __read_mostly;
+static struct file_lock_context *
+locks_get_lock_context(struct inode *inode)
+{
+ struct file_lock_context *new;
+
+ if (likely(inode->i_flctx))
+ goto out;
+
+ new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ spin_lock_init(&new->flc_lock);
+ INIT_LIST_HEAD(&new->flc_flock);
+ INIT_LIST_HEAD(&new->flc_posix);
+ INIT_LIST_HEAD(&new->flc_lease);
+
+ /*
+ * Assign the pointer if it's not already assigned. If it is, then
+ * free the context we just allocated.
+ */
+ spin_lock(&inode->i_lock);
+ if (likely(!inode->i_flctx)) {
+ inode->i_flctx = new;
+ new = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (new)
+ kmem_cache_free(flctx_cache, new);
+out:
+ return inode->i_flctx;
+}
+
+void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+ if (ctx) {
+ WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
+ WARN_ON_ONCE(!list_empty(&ctx->flc_posix));
+ WARN_ON_ONCE(!list_empty(&ctx->flc_lease));
+ kmem_cache_free(flctx_cache, ctx);
+ }
+}
+
static void locks_init_lock_heads(struct file_lock *fl)
{
INIT_HLIST_NODE(&fl->fl_link);
+ INIT_LIST_HEAD(&fl->fl_list);
INIT_LIST_HEAD(&fl->fl_block);
init_waitqueue_head(&fl->fl_wait);
}
@@ -243,6 +287,7 @@ EXPORT_SYMBOL_GPL(locks_release_private);
void locks_free_lock(struct file_lock *fl)
{
BUG_ON(waitqueue_active(&fl->fl_wait));
+ BUG_ON(!list_empty(&fl->fl_list));
BUG_ON(!list_empty(&fl->fl_block));
BUG_ON(!hlist_unhashed(&fl->fl_link));
@@ -257,8 +302,8 @@ locks_dispose_list(struct list_head *dispose)
struct file_lock *fl;
while (!list_empty(dispose)) {
- fl = list_first_entry(dispose, struct file_lock, fl_block);
- list_del_init(&fl->fl_block);
+ fl = list_first_entry(dispose, struct file_lock, fl_list);
+ list_del_init(&fl->fl_list);
locks_free_lock(fl);
}
}
@@ -513,7 +558,7 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
return fl1->fl_owner == fl2->fl_owner;
}
-/* Must be called with the i_lock held! */
+/* Must be called with the flc_lock held! */
static void locks_insert_global_locks(struct file_lock *fl)
{
lg_local_lock(&file_lock_lglock);
@@ -522,12 +567,12 @@ static void locks_insert_global_locks(struct file_lock *fl)
lg_local_unlock(&file_lock_lglock);
}
-/* Must be called with the i_lock held! */
+/* Must be called with the flc_lock held! */
static void locks_delete_global_locks(struct file_lock *fl)
{
/*
* Avoid taking lock if already unhashed. This is safe since this check
- * is done while holding the i_lock, and new insertions into the list
+ * is done while holding the flc_lock, and new insertions into the list
* also require that it be held.
*/
if (hlist_unhashed(&fl->fl_link))
@@ -579,10 +624,10 @@ static void locks_delete_block(struct file_lock *waiter)
* the order they blocked. The documentation doesn't require this but
* it seems like the reasonable thing to do.
*
- * Must be called with both the i_lock and blocked_lock_lock held. The fl_block
- * list itself is protected by the blocked_lock_lock, but by ensuring that the
- * i_lock is also held on insertions we can avoid taking the blocked_lock_lock
- * in some cases when we see that the fl_block list is empty.
+ * Must be called with both the flc_lock and blocked_lock_lock held. The
+ * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
+ * that the flc_lock is also held on insertions we can avoid taking the
+ * blocked_lock_lock in some cases when we see that the fl_block list is empty.
*/
static void __locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
@@ -594,7 +639,7 @@ static void __locks_insert_block(struct file_lock *blocker,
locks_insert_global_blocked(waiter);
}
-/* Must be called with i_lock held. */
+/* Must be called with flc_lock held. */
static void locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
@@ -606,15 +651,15 @@ static void locks_insert_block(struct file_lock *blocker,
/*
* Wake up processes blocked waiting for blocker.
*
- * Must be called with the inode->i_lock held!
+ * Must be called with the inode->flc_lock held!
*/
static void locks_wake_up_blocks(struct file_lock *blocker)
{
/*
* Avoid taking global lock if list is empty. This is safe since new
- * blocked requests are only added to the list under the i_lock, and
- * the i_lock is always held here. Note that removal from the fl_block
- * list does not require the i_lock, so we must recheck list_empty()
+ * blocked requests are only added to the list under the flc_lock, and
+ * the flc_lock is always held here. Note that removal from the fl_block
+ * list does not require the flc_lock, so we must recheck list_empty()
* after acquiring the blocked_lock_lock.
*/
if (list_empty(&blocker->fl_block))
@@ -635,63 +680,36 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
spin_unlock(&blocked_lock_lock);
}
-/* Insert file lock fl into an inode's lock list at the position indicated
- * by pos. At the same time add the lock to the global file lock list.
- *
- * Must be called with the i_lock held!
- */
-static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
+static void
+locks_insert_lock_ctx(struct file_lock *fl, int *counter,
+ struct list_head *before)
{
fl->fl_nspid = get_pid(task_tgid(current));
-
- /* insert into file's list */
- fl->fl_next = *pos;
- *pos = fl;
-
+ list_add_tail(&fl->fl_list, before);
+ ++*counter;
locks_insert_global_locks(fl);
}
-/**
- * locks_delete_lock - Delete a lock and then free it.
- * @thisfl_p: pointer that points to the fl_next field of the previous
- * inode->i_flock list entry
- *
- * Unlink a lock from all lists and free the namespace reference, but don't
- * free it yet. Wake up processes that are blocked waiting for this lock and
- * notify the FS that the lock has been cleared.
- *
- * Must be called with the i_lock held!
- */
-static void locks_unlink_lock(struct file_lock **thisfl_p)
+static void
+locks_unlink_lock_ctx(struct file_lock *fl, int *counter)
{
- struct file_lock *fl = *thisfl_p;
-
locks_delete_global_locks(fl);
-
- *thisfl_p = fl->fl_next;
- fl->fl_next = NULL;
-
+ list_del_init(&fl->fl_list);
+ --*counter;
if (fl->fl_nspid) {
put_pid(fl->fl_nspid);
fl->fl_nspid = NULL;
}
-
locks_wake_up_blocks(fl);
}
-/*
- * Unlink a lock from all lists and free it.
- *
- * Must be called with i_lock held!
- */
-static void locks_delete_lock(struct file_lock **thisfl_p,
- struct list_head *dispose)
+static void
+locks_delete_lock_ctx(struct file_lock *fl, int *counter,
+ struct list_head *dispose)
{
- struct file_lock *fl = *thisfl_p;
-
- locks_unlink_lock(thisfl_p);
+ locks_unlink_lock_ctx(fl, counter);
if (dispose)
- list_add(&fl->fl_block, dispose);
+ list_add(&fl->fl_list, dispose);
else
locks_free_lock(fl);
}
@@ -746,22 +764,27 @@ void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
+ struct file_lock_context *ctx;
struct inode *inode = file_inode(filp);
- spin_lock(&inode->i_lock);
- for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
- if (!IS_POSIX(cfl))
- continue;
- if (posix_locks_conflict(fl, cfl))
- break;
- }
- if (cfl) {
- locks_copy_conflock(fl, cfl);
- if (cfl->fl_nspid)
- fl->fl_pid = pid_vnr(cfl->fl_nspid);
- } else
+ ctx = inode->i_flctx;
+ if (!ctx || list_empty_careful(&ctx->flc_posix)) {
fl->fl_type = F_UNLCK;
- spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
+ if (posix_locks_conflict(fl, cfl)) {
+ locks_copy_conflock(fl, cfl);
+ if (cfl->fl_nspid)
+ fl->fl_pid = pid_vnr(cfl->fl_nspid);
+ goto out;
+ }
+ }
+ fl->fl_type = F_UNLCK;
+out:
+ spin_unlock(&ctx->flc_lock);
return;
}
EXPORT_SYMBOL(posix_test_lock);
@@ -845,34 +868,34 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
static int flock_lock_file(struct file *filp, struct file_lock *request)
{
struct file_lock *new_fl = NULL;
- struct file_lock **before;
- struct inode * inode = file_inode(filp);
+ struct file_lock *fl;
+ struct file_lock_context *ctx;
+ struct inode *inode = file_inode(filp);
int error = 0;
- int found = 0;
+ bool found = false;
LIST_HEAD(dispose);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
new_fl = locks_alloc_lock();
if (!new_fl)
return -ENOMEM;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
- for_each_lock(inode, before) {
- struct file_lock *fl = *before;
- if (IS_POSIX(fl))
- break;
- if (IS_LEASE(fl))
- continue;
+ list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
if (filp != fl->fl_file)
continue;
if (request->fl_type == fl->fl_type)
goto out;
- found = 1;
- locks_delete_lock(before, &dispose);
+ found = true;
+ locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose);
break;
}
@@ -887,18 +910,13 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
* give it the opportunity to lock the file.
*/
if (found) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
cond_resched();
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
}
find_conflict:
- for_each_lock(inode, before) {
- struct file_lock *fl = *before;
- if (IS_POSIX(fl))
- break;
- if (IS_LEASE(fl))
- continue;
+ list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
if (!flock_locks_conflict(request, fl))
continue;
error = -EAGAIN;
@@ -911,12 +929,12 @@ find_conflict:
if (request->fl_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
- locks_insert_lock(before, new_fl);
+ locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock);
new_fl = NULL;
error = 0;
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
@@ -925,16 +943,20 @@ out:
static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{
- struct file_lock *fl;
+ struct file_lock *fl, *tmp;
struct file_lock *new_fl = NULL;
struct file_lock *new_fl2 = NULL;
struct file_lock *left = NULL;
struct file_lock *right = NULL;
- struct file_lock **before;
+ struct file_lock_context *ctx;
int error;
bool added = false;
LIST_HEAD(dispose);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
/*
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
@@ -948,15 +970,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
new_fl2 = locks_alloc_lock();
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
/*
* New lock request. Walk all POSIX locks and look for conflicts. If
* there are any, either return error or put the request on the
* blocker's list of waiters and the global blocked_hash.
*/
if (request->fl_type != F_UNLCK) {
- for_each_lock(inode, before) {
- fl = *before;
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
if (!IS_POSIX(fl))
continue;
if (!posix_locks_conflict(request, fl))
@@ -986,29 +1007,25 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
if (request->fl_flags & FL_ACCESS)
goto out;
- /*
- * Find the first old lock with the same owner as the new lock.
- */
-
- before = &inode->i_flock;
-
- /* First skip locks owned by other processes. */
- while ((fl = *before) && (!IS_POSIX(fl) ||
- !posix_same_owner(request, fl))) {
- before = &fl->fl_next;
+ /* Find the first old lock with the same owner as the new lock */
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
+ if (posix_same_owner(request, fl))
+ break;
}
/* Process locks with this owner. */
- while ((fl = *before) && posix_same_owner(request, fl)) {
- /* Detect adjacent or overlapping regions (if same lock type)
- */
+ list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
+ if (!posix_same_owner(request, fl))
+ break;
+
+ /* Detect adjacent or overlapping regions (if same lock type) */
if (request->fl_type == fl->fl_type) {
/* In all comparisons of start vs end, use
* "start - 1" rather than "end + 1". If end
* is OFFSET_MAX, end + 1 will become negative.
*/
if (fl->fl_end < request->fl_start - 1)
- goto next_lock;
+ continue;
/* If the next lock in the list has entirely bigger
* addresses than the new one, insert the lock here.
*/
@@ -1029,18 +1046,18 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
else
request->fl_end = fl->fl_end;
if (added) {
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt,
+ &dispose);
continue;
}
request = fl;
added = true;
- }
- else {
+ } else {
/* Processing for different lock types is a bit
* more complex.
*/
if (fl->fl_end < request->fl_start)
- goto next_lock;
+ continue;
if (fl->fl_start > request->fl_end)
break;
if (request->fl_type == F_UNLCK)
@@ -1059,7 +1076,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
* one (This may happen several times).
*/
if (added) {
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl,
+ &ctx->flc_posix_cnt, &dispose);
continue;
}
/*
@@ -1075,15 +1093,13 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_copy_lock(new_fl, request);
request = new_fl;
new_fl = NULL;
- locks_delete_lock(before, &dispose);
- locks_insert_lock(before, request);
+ locks_insert_lock_ctx(request,
+ &ctx->flc_posix_cnt, &fl->fl_list);
+ locks_delete_lock_ctx(fl,
+ &ctx->flc_posix_cnt, &dispose);
added = true;
}
}
- /* Go on to next lock.
- */
- next_lock:
- before = &fl->fl_next;
}
/*
@@ -1108,7 +1124,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
goto out;
}
locks_copy_lock(new_fl, request);
- locks_insert_lock(before, new_fl);
+ locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt,
+ &fl->fl_list);
new_fl = NULL;
}
if (right) {
@@ -1119,7 +1136,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
left = new_fl2;
new_fl2 = NULL;
locks_copy_lock(left, right);
- locks_insert_lock(before, left);
+ locks_insert_lock_ctx(left, &ctx->flc_posix_cnt,
+ &fl->fl_list);
}
right->fl_start = request->fl_end + 1;
locks_wake_up_blocks(right);
@@ -1129,7 +1147,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_wake_up_blocks(left);
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
/*
* Free any unused locks.
*/
@@ -1199,22 +1217,29 @@ EXPORT_SYMBOL(posix_lock_file_wait);
*/
int locks_mandatory_locked(struct file *file)
{
+ int ret;
struct inode *inode = file_inode(file);
+ struct file_lock_context *ctx;
struct file_lock *fl;
+ ctx = inode->i_flctx;
+ if (!ctx || list_empty_careful(&ctx->flc_posix))
+ return 0;
+
/*
* Search the lock list for this inode for any POSIX locks.
*/
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!IS_POSIX(fl))
- continue;
+ spin_lock(&ctx->flc_lock);
+ ret = 0;
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
if (fl->fl_owner != current->files &&
- fl->fl_owner != file)
+ fl->fl_owner != file) {
+ ret = -EAGAIN;
break;
+ }
}
- spin_unlock(&inode->i_lock);
- return fl ? -EAGAIN : 0;
+ spin_unlock(&ctx->flc_lock);
+ return ret;
}
/**
@@ -1294,9 +1319,9 @@ static void lease_clear_pending(struct file_lock *fl, int arg)
}
/* We already had a lease on this file; just change its type */
-int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
+int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
{
- struct file_lock *fl = *before;
+ struct file_lock_context *flctx;
int error = assign_type(fl, arg);
if (error)
@@ -1306,6 +1331,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
if (arg == F_UNLCK) {
struct file *filp = fl->fl_file;
+ flctx = file_inode(filp)->i_flctx;
f_delown(filp);
filp->f_owner.signum = 0;
fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
@@ -1313,7 +1339,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
fl->fl_fasync = NULL;
}
- locks_delete_lock(before, dispose);
+ locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose);
}
return 0;
}
@@ -1329,25 +1355,24 @@ static bool past_time(unsigned long then)
static void time_out_leases(struct inode *inode, struct list_head *dispose)
{
- struct file_lock **before;
- struct file_lock *fl;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl, *tmp;
- lockdep_assert_held(&inode->i_lock);
+ lockdep_assert_held(&ctx->flc_lock);
- before = &inode->i_flock;
- while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
trace_time_out_leases(inode, fl);
if (past_time(fl->fl_downgrade_time))
- lease_modify(before, F_RDLCK, dispose);
+ lease_modify(fl, F_RDLCK, dispose);
if (past_time(fl->fl_break_time))
- lease_modify(before, F_UNLCK, dispose);
- if (fl == *before) /* lease_modify may have freed fl */
- before = &fl->fl_next;
+ lease_modify(fl, F_UNLCK, dispose);
}
}
static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
{
+ if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
+ return false;
if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
return false;
return locks_conflict(breaker, lease);
@@ -1356,11 +1381,12 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
static bool
any_leases_conflict(struct inode *inode, struct file_lock *breaker)
{
+ struct file_lock_context *ctx = inode->i_flctx;
struct file_lock *fl;
- lockdep_assert_held(&inode->i_lock);
+ lockdep_assert_held(&ctx->flc_lock);
- for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) {
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (leases_conflict(fl, breaker))
return true;
}
@@ -1384,7 +1410,8 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
{
int error = 0;
struct file_lock *new_fl;
- struct file_lock *fl, **before;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl;
unsigned long break_time;
int want_write = (mode & O_ACCMODE) != O_RDONLY;
LIST_HEAD(dispose);
@@ -1394,7 +1421,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
return PTR_ERR(new_fl);
new_fl->fl_flags = type;
- spin_lock(&inode->i_lock);
+ /* typically we will check that ctx is non-NULL before calling */
+ if (!ctx) {
+ WARN_ON_ONCE(1);
+ return error;
+ }
+
+ spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
@@ -1408,9 +1441,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
break_time++; /* so that 0 means no break time */
}
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (!leases_conflict(fl, new_fl))
continue;
if (want_write) {
@@ -1419,17 +1450,17 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
fl->fl_flags |= FL_UNLOCK_PENDING;
fl->fl_break_time = break_time;
} else {
- if (lease_breaking(inode->i_flock))
+ if (lease_breaking(fl))
continue;
fl->fl_flags |= FL_DOWNGRADE_PENDING;
fl->fl_downgrade_time = break_time;
}
if (fl->fl_lmops->lm_break(fl))
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt,
+ &dispose);
}
- fl = inode->i_flock;
- if (!fl || !IS_LEASE(fl))
+ if (list_empty(&ctx->flc_lease))
goto out;
if (mode & O_NONBLOCK) {
@@ -1439,18 +1470,19 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
}
restart:
- break_time = inode->i_flock->fl_break_time;
+ fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
+ break_time = fl->fl_break_time;
if (break_time != 0)
break_time -= jiffies;
if (break_time == 0)
break_time++;
- locks_insert_block(inode->i_flock, new_fl);
+ locks_insert_block(fl, new_fl);
trace_break_lease_block(inode, new_fl);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_next, break_time);
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
if (error >= 0) {
@@ -1462,12 +1494,10 @@ restart:
time_out_leases(inode, &dispose);
if (any_leases_conflict(inode, new_fl))
goto restart;
-
error = 0;
}
-
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
@@ -1487,14 +1517,18 @@ EXPORT_SYMBOL(__break_lease);
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
bool has_lease = false;
- struct file_lock *flock;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl;
- if (inode->i_flock) {
- spin_lock(&inode->i_lock);
- flock = inode->i_flock;
- if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
- has_lease = true;
- spin_unlock(&inode->i_lock);
+ if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ spin_lock(&ctx->flc_lock);
+ if (!list_empty(&ctx->flc_lease)) {
+ fl = list_first_entry(&ctx->flc_lease,
+ struct file_lock, fl_list);
+ if (fl->fl_type == F_WRLCK)
+ has_lease = true;
+ }
+ spin_unlock(&ctx->flc_lock);
}
if (has_lease)
@@ -1532,20 +1566,22 @@ int fcntl_getlease(struct file *filp)
{
struct file_lock *fl;
struct inode *inode = file_inode(filp);
+ struct file_lock_context *ctx = inode->i_flctx;
int type = F_UNLCK;
LIST_HEAD(dispose);
- spin_lock(&inode->i_lock);
- time_out_leases(file_inode(filp), &dispose);
- for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
- fl = fl->fl_next) {
- if (fl->fl_file == filp) {
+ if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ spin_lock(&ctx->flc_lock);
+ time_out_leases(file_inode(filp), &dispose);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file != filp)
+ continue;
type = target_leasetype(fl);
break;
}
+ spin_unlock(&ctx->flc_lock);
+ locks_dispose_list(&dispose);
}
- spin_unlock(&inode->i_lock);
- locks_dispose_list(&dispose);
return type;
}
@@ -1560,11 +1596,14 @@ int fcntl_getlease(struct file *filp)
* conflict with the lease we're trying to set.
*/
static int
-check_conflicting_open(const struct dentry *dentry, const long arg)
+check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
{
int ret = 0;
struct inode *inode = dentry->d_inode;
+ if (flags & FL_LAYOUT)
+ return 0;
+
if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
return -EAGAIN;
@@ -1578,9 +1617,10 @@ check_conflicting_open(const struct dentry *dentry, const long arg)
static int
generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
{
- struct file_lock *fl, **before, **my_before = NULL, *lease;
+ struct file_lock *fl, *my_fl = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ struct file_lock_context *ctx;
bool is_deleg = (*flp)->fl_flags & FL_DELEG;
int error;
LIST_HEAD(dispose);
@@ -1588,6 +1628,10 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
lease = *flp;
trace_generic_add_lease(inode, lease);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
/*
* In the delegation case we need mutual exclusion with
* a number of operations that take the i_mutex. We trylock
@@ -1606,9 +1650,9 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
return -EINVAL;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
- error = check_conflicting_open(dentry, arg);
+ error = check_conflicting_open(dentry, arg, lease->fl_flags);
if (error)
goto out;
@@ -1621,13 +1665,13 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
* except for this filp.
*/
error = -EAGAIN;
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
- if (fl->fl_file == filp) {
- my_before = before;
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file == filp &&
+ fl->fl_owner == lease->fl_owner) {
+ my_fl = fl;
continue;
}
+
/*
* No exclusive leases if someone else has a lease on
* this file:
@@ -1642,9 +1686,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
goto out;
}
- if (my_before != NULL) {
- lease = *my_before;
- error = lease->fl_lmops->lm_change(my_before, arg, &dispose);
+ if (my_fl != NULL) {
+ error = lease->fl_lmops->lm_change(my_fl, arg, &dispose);
if (error)
goto out;
goto out_setup;
@@ -1654,7 +1697,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
if (!leases_enable)
goto out;
- locks_insert_lock(before, lease);
+ locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease);
/*
* The check in break_lease() is lockless. It's possible for another
* open to race in after we did the earlier check for a conflicting
@@ -1665,46 +1708,51 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
* precedes these checks.
*/
smp_mb();
- error = check_conflicting_open(dentry, arg);
- if (error)
- goto out_unlink;
+ error = check_conflicting_open(dentry, arg, lease->fl_flags);
+ if (error) {
+ locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt);
+ goto out;
+ }
out_setup:
if (lease->fl_lmops->lm_setup)
lease->fl_lmops->lm_setup(lease, priv);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
if (is_deleg)
mutex_unlock(&inode->i_mutex);
- if (!error && !my_before)
+ if (!error && !my_fl)
*flp = NULL;
return error;
-out_unlink:
- locks_unlink_lock(before);
- goto out;
}
-static int generic_delete_lease(struct file *filp)
+static int generic_delete_lease(struct file *filp, void *owner)
{
int error = -EAGAIN;
- struct file_lock *fl, **before;
+ struct file_lock *fl, *victim = NULL;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ struct file_lock_context *ctx = inode->i_flctx;
LIST_HEAD(dispose);
- spin_lock(&inode->i_lock);
- time_out_leases(inode, &dispose);
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
- if (fl->fl_file == filp)
+ if (!ctx) {
+ trace_generic_delete_lease(inode, NULL);
+ return error;
+ }
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file == filp &&
+ fl->fl_owner == owner) {
+ victim = fl;
break;
+ }
}
trace_generic_delete_lease(inode, fl);
- if (fl)
- error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
- spin_unlock(&inode->i_lock);
+ if (victim)
+ error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
return error;
}
@@ -1737,13 +1785,14 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
switch (arg) {
case F_UNLCK:
- return generic_delete_lease(filp);
+ return generic_delete_lease(filp, *priv);
case F_RDLCK:
case F_WRLCK:
if (!(*flp)->fl_lmops->lm_break) {
WARN_ON_ONCE(1);
return -ENOLCK;
}
+
return generic_add_lease(filp, arg, flp, priv);
default:
return -EINVAL;
@@ -1816,7 +1865,7 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
if (arg == F_UNLCK)
- return vfs_setlease(filp, F_UNLCK, NULL, NULL);
+ return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
return do_fcntl_add_lease(fd, filp, arg);
}
@@ -2171,7 +2220,7 @@ again:
*/
/*
* we need that spin_lock here - it prevents reordering between
- * update of inode->i_flock and check for it done in close().
+ * update of i_flctx->flc_posix and check for it done in close().
* rcu_read_lock() wouldn't do.
*/
spin_lock(&current->files->file_lock);
@@ -2331,13 +2380,14 @@ out:
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
struct file_lock lock;
+ struct file_lock_context *ctx = file_inode(filp)->i_flctx;
/*
* If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
- if (!file_inode(filp)->i_flock)
+ if (!ctx || list_empty(&ctx->flc_posix))
return;
lock.fl_type = F_UNLCK;
@@ -2358,67 +2408,67 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
EXPORT_SYMBOL(locks_remove_posix);
+/* The i_flctx must be valid when calling into here */
+static void
+locks_remove_flock(struct file *filp)
+{
+ struct file_lock fl = {
+ .fl_owner = filp,
+ .fl_pid = current->tgid,
+ .fl_file = filp,
+ .fl_flags = FL_FLOCK,
+ .fl_type = F_UNLCK,
+ .fl_end = OFFSET_MAX,
+ };
+ struct file_lock_context *flctx = file_inode(filp)->i_flctx;
+
+ if (list_empty(&flctx->flc_flock))
+ return;
+
+ if (filp->f_op->flock)
+ filp->f_op->flock(filp, F_SETLKW, &fl);
+ else
+ flock_lock_file(filp, &fl);
+
+ if (fl.fl_ops && fl.fl_ops->fl_release_private)
+ fl.fl_ops->fl_release_private(&fl);
+}
+
+/* The i_flctx must be valid when calling into here */
+static void
+locks_remove_lease(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl, *tmp;
+ LIST_HEAD(dispose);
+
+ if (list_empty(&ctx->flc_lease))
+ return;
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
+ lease_modify(fl, F_UNLCK, &dispose);
+ spin_unlock(&ctx->flc_lock);
+ locks_dispose_list(&dispose);
+}
+
/*
* This function is called on the last close of an open file.
*/
void locks_remove_file(struct file *filp)
{
- struct inode * inode = file_inode(filp);
- struct file_lock *fl;
- struct file_lock **before;
- LIST_HEAD(dispose);
-
- if (!inode->i_flock)
+ if (!file_inode(filp)->i_flctx)
return;
+ /* remove any OFD locks */
locks_remove_posix(filp, filp);
- if (filp->f_op->flock) {
- struct file_lock fl = {
- .fl_owner = filp,
- .fl_pid = current->tgid,
- .fl_file = filp,
- .fl_flags = FL_FLOCK,
- .fl_type = F_UNLCK,
- .fl_end = OFFSET_MAX,
- };
- filp->f_op->flock(filp, F_SETLKW, &fl);
- if (fl.fl_ops && fl.fl_ops->fl_release_private)
- fl.fl_ops->fl_release_private(&fl);
- }
-
- spin_lock(&inode->i_lock);
- before = &inode->i_flock;
+ /* remove flock locks */
+ locks_remove_flock(filp);
- while ((fl = *before) != NULL) {
- if (fl->fl_file == filp) {
- if (IS_LEASE(fl)) {
- lease_modify(before, F_UNLCK, &dispose);
- continue;
- }
-
- /*
- * There's a leftover lock on the list of a type that
- * we didn't expect to see. Most likely a classic
- * POSIX lock that ended up not getting released
- * properly, or that raced onto the list somehow. Log
- * some info about it and then just remove it from
- * the list.
- */
- WARN(!IS_FLOCK(fl),
- "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
- MAJOR(inode->i_sb->s_dev),
- MINOR(inode->i_sb->s_dev), inode->i_ino,
- fl->fl_type, fl->fl_flags,
- fl->fl_start, fl->fl_end);
-
- locks_delete_lock(before, &dispose);
- continue;
- }
- before = &fl->fl_next;
- }
- spin_unlock(&inode->i_lock);
- locks_dispose_list(&dispose);
+ /* remove any leases */
+ locks_remove_lease(filp);
}
/**
@@ -2621,6 +2671,9 @@ static int __init filelock_init(void)
{
int i;
+ flctx_cache = kmem_cache_create("file_lock_ctx",
+ sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
+
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
diff --git a/fs/namespace.c b/fs/namespace.c
index cd1e9681a0cf..6dae553dd69c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -201,7 +201,7 @@ static struct mount *alloc_vfsmnt(const char *name)
goto out_free_cache;
if (name) {
- mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
+ mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
if (!mnt->mnt_devname)
goto out_free_id;
}
@@ -234,7 +234,7 @@ static struct mount *alloc_vfsmnt(const char *name)
#ifdef CONFIG_SMP
out_free_devname:
- kfree(mnt->mnt_devname);
+ kfree_const(mnt->mnt_devname);
#endif
out_free_id:
mnt_free_id(mnt);
@@ -568,7 +568,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
static void free_vfsmnt(struct mount *mnt)
{
- kfree(mnt->mnt_devname);
+ kfree_const(mnt->mnt_devname);
#ifdef CONFIG_SMP
free_percpu(mnt->mnt_pcp);
#endif
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index e31e589369a4..01a9e16e9782 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -267,7 +267,6 @@ ncp_iget(struct super_block *sb, struct ncp_entry_info *info)
if (inode) {
atomic_set(&NCP_FINFO(inode)->opened, info->opened);
- inode->i_mapping->backing_dev_info = sb->s_bdi;
inode->i_ino = info->ino;
ncp_set_attr(inode, info);
if (S_ISREG(inode->i_mode)) {
@@ -560,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
server = NCP_SBP(sb);
memset(server, 0, sizeof(*server));
- error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
+ error = bdi_setup_and_register(&server->bdi, "ncpfs");
if (error)
goto out_fput;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 3dece03f2fc8..c7abc10279af 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -128,6 +128,11 @@ config PNFS_OBJLAYOUT
depends on NFS_V4_1 && SCSI_OSD_ULD
default NFS_V4
+config PNFS_FLEXFILE_LAYOUT
+ tristate
+ depends on NFS_V4_1 && NFS_V3
+ default m
+
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
string "NFSv4.1 Implementation ID Domain"
depends on NFS_V4_1
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 04cb830fa09f..1e987acf20c9 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -27,9 +27,10 @@ nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o
dns_resolve.o nfs4trace.o
nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o
nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
-nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
+nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o pnfs_nfs.o
nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o
obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/
obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayout/
obj-$(CONFIG_PNFS_BLOCK) += blocklayout/
+obj-$(CONFIG_PNFS_FLEXFILE_LAYOUT) += flexfilelayout/
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 77fec6a55f57..1cac3c175d18 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -860,12 +860,14 @@ static const struct nfs_pageio_ops bl_pg_read_ops = {
.pg_init = bl_pg_init_read,
.pg_test = bl_pg_test_read,
.pg_doio = pnfs_generic_pg_readpages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static const struct nfs_pageio_ops bl_pg_write_ops = {
.pg_init = bl_pg_init_write,
.pg_test = bl_pg_test_write,
.pg_doio = pnfs_generic_pg_writepages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static struct pnfs_layoutdriver_type blocklayout_type = {
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index b8fb3a4ef649..351be9205bf8 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp)
if (try_to_freeze())
continue;
- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
if (!list_empty(&serv->sv_cb_list)) {
req = list_first_entry(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list);
list_del(&req->rq_bc_list);
spin_unlock_bh(&serv->sv_cb_lock);
+ finish_wait(&serv->sv_cb_waitq, &wq);
dprintk("Invoking bc_svc_process()\n");
error = bc_svc_process(serv, req, rqstp);
dprintk("bc_svc_process() returned w/ error code= %d\n",
error);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
- schedule();
+ /* schedule_timeout to game the hung task watchdog */
+ schedule_timeout(60 * HZ);
+ finish_wait(&serv->sv_cb_waitq, &wq);
}
- finish_wait(&serv->sv_cb_waitq, &wq);
}
return 0;
}
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 7f3f60641344..da5433230bb1 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -85,25 +85,30 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
{
struct inode *inode = state->inode;
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct list_head *list;
int status = 0;
- if (inode->i_flock == NULL)
+ if (flctx == NULL)
goto out;
- /* Protect inode->i_flock using the i_lock */
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
- continue;
+ list = &flctx->flc_posix;
+ spin_lock(&flctx->flc_lock);
+restart:
+ list_for_each_entry(fl, list, fl_list) {
if (nfs_file_open_context(fl->fl_file) != ctx)
continue;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
status = nfs4_lock_delegation_recall(fl, state, stateid);
if (status < 0)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
+ if (list == &flctx->flc_posix) {
+ list = &flctx->flc_flock;
+ goto restart;
+ }
+ spin_unlock(&flctx->flc_lock);
out:
return status;
}
@@ -301,6 +306,17 @@ nfs_inode_detach_delegation(struct inode *inode)
return nfs_detach_delegation(nfsi, delegation, server);
}
+static void
+nfs_update_inplace_delegation(struct nfs_delegation *delegation,
+ const struct nfs_delegation *update)
+{
+ if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
+ delegation->stateid.seqid = update->stateid.seqid;
+ smp_wmb();
+ delegation->type = update->type;
+ }
+}
+
/**
* nfs_inode_set_delegation - set up a delegation on an inode
* @inode: inode to which delegation applies
@@ -334,9 +350,12 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
old_delegation = rcu_dereference_protected(nfsi->delegation,
lockdep_is_held(&clp->cl_lock));
if (old_delegation != NULL) {
- if (nfs4_stateid_match(&delegation->stateid,
- &old_delegation->stateid) &&
- delegation->type == old_delegation->type) {
+ /* Is this an update of the existing delegation? */
+ if (nfs4_stateid_match_other(&old_delegation->stateid,
+ &delegation->stateid)) {
+ nfs_update_inplace_delegation(old_delegation,
+ delegation);
+ nfsi->delegation_state = old_delegation->type;
goto out;
}
/*
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 10bf07280f4a..7077521acdf4 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -66,6 +66,10 @@ static struct kmem_cache *nfs_direct_cachep;
/*
* This represents a set of asynchronous requests that we're waiting on
*/
+struct nfs_direct_mirror {
+ ssize_t count;
+};
+
struct nfs_direct_req {
struct kref kref; /* release manager */
@@ -78,8 +82,13 @@ struct nfs_direct_req {
/* completion state */
atomic_t io_count; /* i/os we're waiting for */
spinlock_t lock; /* protect completion state */
+
+ struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
+ int mirror_count;
+
ssize_t count, /* bytes actually processed */
bytes_left, /* bytes left to be sent */
+ io_start, /* start of IO */
error; /* any reported error */
struct completion completion; /* wait for i/o completion */
@@ -108,26 +117,56 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
return atomic_dec_and_test(&dreq->io_count);
}
+void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq)
+{
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+}
+EXPORT_SYMBOL_GPL(nfs_direct_set_resched_writes);
+
+static void
+nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+{
+ int i;
+ ssize_t count;
+
+ WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
+
+ count = dreq->mirrors[hdr->pgio_mirror_idx].count;
+ if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
+ count = hdr->io_start + hdr->good_bytes - dreq->io_start;
+ dreq->mirrors[hdr->pgio_mirror_idx].count = count;
+ }
+
+ /* update the dreq->count by finding the minimum agreed count from all
+ * mirrors */
+ count = dreq->mirrors[0].count;
+
+ for (i = 1; i < dreq->mirror_count; i++)
+ count = min(count, dreq->mirrors[i].count);
+
+ dreq->count = count;
+}
+
/*
* nfs_direct_select_verf - select the right verifier
* @dreq - direct request possibly spanning multiple servers
* @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
- * @ds_idx - index of data server in data server list, only valid if ds_clp set
+ * @commit_idx - commit bucket index for the DS
*
* returns the correct verifier to use given the role of the server
*/
static struct nfs_writeverf *
nfs_direct_select_verf(struct nfs_direct_req *dreq,
struct nfs_client *ds_clp,
- int ds_idx)
+ int commit_idx)
{
struct nfs_writeverf *verfp = &dreq->verf;
#ifdef CONFIG_NFS_V4_1
if (ds_clp) {
/* pNFS is in use, use the DS verf */
- if (ds_idx >= 0 && ds_idx < dreq->ds_cinfo.nbuckets)
- verfp = &dreq->ds_cinfo.buckets[ds_idx].direct_verf;
+ if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
+ verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
else
WARN_ON_ONCE(1);
}
@@ -148,8 +187,7 @@ static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
{
struct nfs_writeverf *verfp;
- verfp = nfs_direct_select_verf(dreq, hdr->ds_clp,
- hdr->ds_idx);
+ verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
WARN_ON_ONCE(verfp->committed >= 0);
memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
WARN_ON_ONCE(verfp->committed < 0);
@@ -169,8 +207,7 @@ static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
{
struct nfs_writeverf *verfp;
- verfp = nfs_direct_select_verf(dreq, hdr->ds_clp,
- hdr->ds_idx);
+ verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
if (verfp->committed < 0) {
nfs_direct_set_hdr_verf(dreq, hdr);
return 0;
@@ -193,7 +230,11 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
verfp = nfs_direct_select_verf(dreq, data->ds_clp,
data->ds_commit_index);
- WARN_ON_ONCE(verfp->committed < 0);
+
+ /* verifier not set so always fail */
+ if (verfp->committed < 0)
+ return 1;
+
return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf));
}
@@ -212,6 +253,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
*/
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+
+ /* we only support swap file calling nfs_direct_IO */
+ if (!IS_SWAPFILE(inode))
+ return 0;
+
#ifndef CONFIG_NFS_SWAP
dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
iocb->ki_filp, (long long) pos, iter->nr_segs);
@@ -243,6 +290,18 @@ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
cinfo->completion_ops = &nfs_direct_commit_completion_ops;
}
+static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
+ struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ int mirror_count = 1;
+
+ if (pgio->pg_ops->pg_get_mirror_count)
+ mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+
+ dreq->mirror_count = mirror_count;
+}
+
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
struct nfs_direct_req *dreq;
@@ -257,6 +316,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
INIT_LIST_HEAD(&dreq->mds_cinfo.list);
dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
+ dreq->mirror_count = 1;
spin_lock_init(&dreq->lock);
return dreq;
@@ -363,7 +423,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
dreq->error = hdr->error;
else
- dreq->count += hdr->good_bytes;
+ nfs_direct_good_bytes(dreq, hdr);
+
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
@@ -541,6 +602,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
dreq->inode = inode;
dreq->bytes_left = count;
+ dreq->io_start = pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
@@ -573,6 +635,20 @@ out:
return result;
}
+static void
+nfs_direct_write_scan_commit_list(struct inode *inode,
+ struct list_head *list,
+ struct nfs_commit_info *cinfo)
+{
+ spin_lock(cinfo->lock);
+#ifdef CONFIG_NFS_V4_1
+ if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
+ NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
+#endif
+ nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
+ spin_unlock(cinfo->lock);
+}
+
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor desc;
@@ -580,20 +656,23 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
LIST_HEAD(reqs);
struct nfs_commit_info cinfo;
LIST_HEAD(failed);
+ int i;
nfs_init_cinfo_from_dreq(&cinfo, dreq);
- pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
- spin_lock(cinfo.lock);
- nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
- spin_unlock(cinfo.lock);
+ nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
dreq->count = 0;
+ for (i = 0; i < dreq->mirror_count; i++)
+ dreq->mirrors[i].count = 0;
get_dreq(dreq);
nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
+ req = nfs_list_entry(reqs.next);
+ nfs_direct_setup_mirroring(dreq, &desc, req);
+
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
if (!nfs_pageio_add_request(&desc, req)) {
nfs_list_remove_request(req);
@@ -640,7 +719,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
nfs_list_remove_request(req);
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
/* Note the rewrite will go through mds */
- nfs_mark_request_commit(req, NULL, &cinfo);
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
} else
nfs_release_request(req);
nfs_unlock_and_release_request(req);
@@ -715,7 +794,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
dreq->error = hdr->error;
}
if (dreq->error == 0) {
- dreq->count += hdr->good_bytes;
+ nfs_direct_good_bytes(dreq, hdr);
if (nfs_write_need_commit(hdr)) {
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
request_commit = true;
@@ -739,7 +818,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
if (request_commit) {
kref_get(&req->wb_kref);
- nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+ nfs_mark_request_commit(req, hdr->lseg, &cinfo,
+ hdr->ds_commit_idx);
}
nfs_unlock_and_release_request(req);
}
@@ -820,6 +900,9 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
result = PTR_ERR(req);
break;
}
+
+ nfs_direct_setup_mirroring(dreq, &desc, req);
+
nfs_lock_request(req);
req->wb_index = pos >> PAGE_SHIFT;
req->wb_offset = pos & ~PAGE_MASK;
@@ -928,6 +1011,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
dreq->inode = inode;
dreq->bytes_left = count;
+ dreq->io_start = pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 2ab6f00dba5b..94712fc781fa 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -646,7 +646,6 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = nfs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int nfs_need_sync_write(struct file *filp, struct inode *inode)
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 7afb52f6a25a..7ae1c263c5cf 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -118,13 +118,6 @@ static void filelayout_reset_read(struct nfs_pgio_header *hdr)
}
}
-static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
-{
- if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
- return;
- pnfs_return_layout(inode);
-}
-
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
@@ -207,7 +200,7 @@ static int filelayout_async_handle_error(struct rpc_task *task,
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_mark_deviceid_unavailable(devid);
- set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
+ pnfs_error_mark_layout_for_return(inode, lseg);
rpc_wake_up(&tbl->slot_tbl_waitq);
/* fall through */
default:
@@ -339,16 +332,6 @@ static void filelayout_read_count_stats(struct rpc_task *task, void *data)
rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
}
-static void filelayout_read_release(void *data)
-{
- struct nfs_pgio_header *hdr = data;
- struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout;
-
- filelayout_fenceme(lo->plh_inode, lo);
- nfs_put_client(hdr->ds_clp);
- hdr->mds_ops->rpc_release(data);
-}
-
static int filelayout_write_done_cb(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
@@ -371,17 +354,6 @@ static int filelayout_write_done_cb(struct rpc_task *task,
return 0;
}
-/* Fake up some data that will cause nfs_commit_release to retry the writes. */
-static void prepare_to_resend_writes(struct nfs_commit_data *data)
-{
- struct nfs_page *first = nfs_list_entry(data->pages.next);
-
- data->task.tk_status = 0;
- memcpy(&data->verf.verifier, &first->wb_verf,
- sizeof(data->verf.verifier));
- data->verf.verifier.data[0]++; /* ensure verifier mismatch */
-}
-
static int filelayout_commit_done_cb(struct rpc_task *task,
struct nfs_commit_data *data)
{
@@ -393,7 +365,7 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
switch (err) {
case -NFS4ERR_RESET_TO_MDS:
- prepare_to_resend_writes(data);
+ pnfs_generic_prepare_to_resend_writes(data);
return -EAGAIN;
case -EAGAIN:
rpc_restart_call_prepare(task);
@@ -451,16 +423,6 @@ static void filelayout_write_count_stats(struct rpc_task *task, void *data)
rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
}
-static void filelayout_write_release(void *data)
-{
- struct nfs_pgio_header *hdr = data;
- struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout;
-
- filelayout_fenceme(lo->plh_inode, lo);
- nfs_put_client(hdr->ds_clp);
- hdr->mds_ops->rpc_release(data);
-}
-
static void filelayout_commit_prepare(struct rpc_task *task, void *data)
{
struct nfs_commit_data *wdata = data;
@@ -471,14 +433,6 @@ static void filelayout_commit_prepare(struct rpc_task *task, void *data)
task);
}
-static void filelayout_write_commit_done(struct rpc_task *task, void *data)
-{
- struct nfs_commit_data *wdata = data;
-
- /* Note this may cause RPC to be resent */
- wdata->mds_ops->rpc_call_done(task, data);
-}
-
static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
{
struct nfs_commit_data *cdata = data;
@@ -486,35 +440,25 @@ static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
}
-static void filelayout_commit_release(void *calldata)
-{
- struct nfs_commit_data *data = calldata;
-
- data->completion_ops->completion(data);
- pnfs_put_lseg(data->lseg);
- nfs_put_client(data->ds_clp);
- nfs_commitdata_release(data);
-}
-
static const struct rpc_call_ops filelayout_read_call_ops = {
.rpc_call_prepare = filelayout_read_prepare,
.rpc_call_done = filelayout_read_call_done,
.rpc_count_stats = filelayout_read_count_stats,
- .rpc_release = filelayout_read_release,
+ .rpc_release = pnfs_generic_rw_release,
};
static const struct rpc_call_ops filelayout_write_call_ops = {
.rpc_call_prepare = filelayout_write_prepare,
.rpc_call_done = filelayout_write_call_done,
.rpc_count_stats = filelayout_write_count_stats,
- .rpc_release = filelayout_write_release,
+ .rpc_release = pnfs_generic_rw_release,
};
static const struct rpc_call_ops filelayout_commit_call_ops = {
.rpc_call_prepare = filelayout_commit_prepare,
- .rpc_call_done = filelayout_write_commit_done,
+ .rpc_call_done = pnfs_generic_write_commit_done,
.rpc_count_stats = filelayout_commit_count_stats,
- .rpc_release = filelayout_commit_release,
+ .rpc_release = pnfs_generic_commit_release,
};
static enum pnfs_try_status
@@ -548,7 +492,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
/* No multipath support. Use first DS */
atomic_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- hdr->ds_idx = idx;
+ hdr->ds_commit_idx = idx;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
hdr->args.fh = fh;
@@ -557,8 +501,9 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->mds_offset = offset;
/* Perform an asynchronous read to ds */
- nfs_initiate_pgio(ds_clnt, hdr,
- &filelayout_read_call_ops, 0, RPC_TASK_SOFTCONN);
+ nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
+ NFS_PROTO(hdr->inode), &filelayout_read_call_ops,
+ 0, RPC_TASK_SOFTCONN);
return PNFS_ATTEMPTED;
}
@@ -591,16 +536,16 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
hdr->pgio_done_cb = filelayout_write_done_cb;
atomic_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- hdr->ds_idx = idx;
+ hdr->ds_commit_idx = idx;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
hdr->args.fh = fh;
hdr->args.offset = filelayout_get_dserver_offset(lseg, offset);
/* Perform an asynchronous write */
- nfs_initiate_pgio(ds_clnt, hdr,
- &filelayout_write_call_ops, sync,
- RPC_TASK_SOFTCONN);
+ nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
+ NFS_PROTO(hdr->inode), &filelayout_write_call_ops,
+ sync, RPC_TASK_SOFTCONN);
return PNFS_ATTEMPTED;
}
@@ -988,12 +933,14 @@ static const struct nfs_pageio_ops filelayout_pg_read_ops = {
.pg_init = filelayout_pg_init_read,
.pg_test = filelayout_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static const struct nfs_pageio_ops filelayout_pg_write_ops = {
.pg_init = filelayout_pg_init_write,
.pg_test = filelayout_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
@@ -1004,37 +951,11 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
return j;
}
-/* The generic layer is about to remove the req from the commit list.
- * If this will make the bucket empty, it will need to put the lseg reference.
- * Note this is must be called holding the inode (/cinfo) lock
- */
-static void
-filelayout_clear_request_commit(struct nfs_page *req,
- struct nfs_commit_info *cinfo)
-{
- struct pnfs_layout_segment *freeme = NULL;
-
- if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
- goto out;
- cinfo->ds->nwritten--;
- if (list_is_singular(&req->wb_list)) {
- struct pnfs_commit_bucket *bucket;
-
- bucket = list_first_entry(&req->wb_list,
- struct pnfs_commit_bucket,
- written);
- freeme = bucket->wlseg;
- bucket->wlseg = NULL;
- }
-out:
- nfs_request_remove_commit_list(req, cinfo);
- pnfs_put_lseg_locked(freeme);
-}
-
static void
filelayout_mark_request_commit(struct nfs_page *req,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx)
{
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
@@ -1064,7 +985,7 @@ filelayout_mark_request_commit(struct nfs_page *req,
* is normally transferred to the COMMIT call and released
* there. It could also be released if the last req is pulled
* off due to a rewrite, in which case it will be done in
- * filelayout_clear_request_commit
+ * pnfs_generic_clear_request_commit
*/
buckets[i].wlseg = pnfs_get_lseg(lseg);
}
@@ -1081,7 +1002,7 @@ mds_commit:
spin_unlock(cinfo->lock);
if (!cinfo->dreq) {
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
+ inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
BDI_RECLAIMABLE);
__mark_inode_dirty(req->wb_context->dentry->d_inode,
I_DIRTY_DATASYNC);
@@ -1138,101 +1059,15 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
if (fh)
data->args.fh = fh;
- return nfs_initiate_commit(ds_clnt, data,
+ return nfs_initiate_commit(ds_clnt, data, NFS_PROTO(data->inode),
&filelayout_commit_call_ops, how,
RPC_TASK_SOFTCONN);
out_err:
- prepare_to_resend_writes(data);
- filelayout_commit_release(data);
+ pnfs_generic_prepare_to_resend_writes(data);
+ pnfs_generic_commit_release(data);
return -EAGAIN;
}
-static int
-transfer_commit_list(struct list_head *src, struct list_head *dst,
- struct nfs_commit_info *cinfo, int max)
-{
- struct nfs_page *req, *tmp;
- int ret = 0;
-
- list_for_each_entry_safe(req, tmp, src, wb_list) {
- if (!nfs_lock_request(req))
- continue;
- kref_get(&req->wb_kref);
- if (cond_resched_lock(cinfo->lock))
- list_safe_reset_next(req, tmp, wb_list);
- nfs_request_remove_commit_list(req, cinfo);
- clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
- nfs_list_add_request(req, dst);
- ret++;
- if ((ret == max) && !cinfo->dreq)
- break;
- }
- return ret;
-}
-
-/* Note called with cinfo->lock held. */
-static int
-filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
- struct nfs_commit_info *cinfo,
- int max)
-{
- struct list_head *src = &bucket->written;
- struct list_head *dst = &bucket->committing;
- int ret;
-
- ret = transfer_commit_list(src, dst, cinfo, max);
- if (ret) {
- cinfo->ds->nwritten -= ret;
- cinfo->ds->ncommitting += ret;
- bucket->clseg = bucket->wlseg;
- if (list_empty(src))
- bucket->wlseg = NULL;
- else
- pnfs_get_lseg(bucket->clseg);
- }
- return ret;
-}
-
-/* Move reqs from written to committing lists, returning count of number moved.
- * Note called with cinfo->lock held.
- */
-static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
- int max)
-{
- int i, rv = 0, cnt;
-
- for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
- cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
- cinfo, max);
- max -= cnt;
- rv += cnt;
- }
- return rv;
-}
-
-/* Pull everything off the committing lists and dump into @dst */
-static void filelayout_recover_commit_reqs(struct list_head *dst,
- struct nfs_commit_info *cinfo)
-{
- struct pnfs_commit_bucket *b;
- struct pnfs_layout_segment *freeme;
- int i;
-
-restart:
- spin_lock(cinfo->lock);
- for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
- if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
- freeme = b->wlseg;
- b->wlseg = NULL;
- spin_unlock(cinfo->lock);
- pnfs_put_lseg(freeme);
- goto restart;
- }
- }
- cinfo->ds->nwritten = 0;
- spin_unlock(cinfo->lock);
-}
-
/* filelayout_search_commit_reqs - Search lists in @cinfo for the head reqest
* for @page
* @cinfo - commit info for current inode
@@ -1263,108 +1098,14 @@ filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
return NULL;
}
-static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx)
-{
- struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
- struct pnfs_commit_bucket *bucket;
- struct pnfs_layout_segment *freeme;
- int i;
-
- for (i = idx; i < fl_cinfo->nbuckets; i++) {
- bucket = &fl_cinfo->buckets[i];
- if (list_empty(&bucket->committing))
- continue;
- nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
- spin_lock(cinfo->lock);
- freeme = bucket->clseg;
- bucket->clseg = NULL;
- spin_unlock(cinfo->lock);
- pnfs_put_lseg(freeme);
- }
-}
-
-static unsigned int
-alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
-{
- struct pnfs_ds_commit_info *fl_cinfo;
- struct pnfs_commit_bucket *bucket;
- struct nfs_commit_data *data;
- int i;
- unsigned int nreq = 0;
-
- fl_cinfo = cinfo->ds;
- bucket = fl_cinfo->buckets;
- for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
- if (list_empty(&bucket->committing))
- continue;
- data = nfs_commitdata_alloc();
- if (!data)
- break;
- data->ds_commit_index = i;
- spin_lock(cinfo->lock);
- data->lseg = bucket->clseg;
- bucket->clseg = NULL;
- spin_unlock(cinfo->lock);
- list_add(&data->pages, list);
- nreq++;
- }
-
- /* Clean up on error */
- filelayout_retry_commit(cinfo, i);
- /* Caller will clean up entries put on list */
- return nreq;
-}
-
-/* This follows nfs_commit_list pretty closely */
static int
filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
int how, struct nfs_commit_info *cinfo)
{
- struct nfs_commit_data *data, *tmp;
- LIST_HEAD(list);
- unsigned int nreq = 0;
-
- if (!list_empty(mds_pages)) {
- data = nfs_commitdata_alloc();
- if (data != NULL) {
- data->lseg = NULL;
- list_add(&data->pages, &list);
- nreq++;
- } else {
- nfs_retry_commit(mds_pages, NULL, cinfo);
- filelayout_retry_commit(cinfo, 0);
- cinfo->completion_ops->error_cleanup(NFS_I(inode));
- return -ENOMEM;
- }
- }
-
- nreq += alloc_ds_commits(cinfo, &list);
-
- if (nreq == 0) {
- cinfo->completion_ops->error_cleanup(NFS_I(inode));
- goto out;
- }
-
- atomic_add(nreq, &cinfo->mds->rpcs_out);
-
- list_for_each_entry_safe(data, tmp, &list, pages) {
- list_del_init(&data->pages);
- if (!data->lseg) {
- nfs_init_commit(data, mds_pages, NULL, cinfo);
- nfs_initiate_commit(NFS_CLIENT(inode), data,
- data->mds_ops, how, 0);
- } else {
- struct pnfs_commit_bucket *buckets;
-
- buckets = cinfo->ds->buckets;
- nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
- filelayout_initiate_commit(data, how);
- }
- }
-out:
- cinfo->ds->ncommitting = 0;
- return PNFS_ATTEMPTED;
+ return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
+ filelayout_initiate_commit);
}
+
static struct nfs4_deviceid_node *
filelayout_alloc_deviceid_node(struct nfs_server *server,
struct pnfs_device *pdev, gfp_t gfp_flags)
@@ -1421,9 +1162,9 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.pg_write_ops = &filelayout_pg_write_ops,
.get_ds_info = &filelayout_get_ds_info,
.mark_request_commit = filelayout_mark_request_commit,
- .clear_request_commit = filelayout_clear_request_commit,
- .scan_commit_lists = filelayout_scan_commit_lists,
- .recover_commit_reqs = filelayout_recover_commit_reqs,
+ .clear_request_commit = pnfs_generic_clear_request_commit,
+ .scan_commit_lists = pnfs_generic_scan_commit_lists,
+ .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
.search_commit_reqs = filelayout_search_commit_reqs,
.commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h
index 7c9f800c49d7..2896cb833a11 100644
--- a/fs/nfs/filelayout/filelayout.h
+++ b/fs/nfs/filelayout/filelayout.h
@@ -33,13 +33,6 @@
#include "../pnfs.h"
/*
- * Default data server connection timeout and retrans vaules.
- * Set by module paramters dataserver_timeo and dataserver_retrans.
- */
-#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */
-#define NFS4_DEF_DS_RETRANS 5
-
-/*
* Field testing shows we need to support up to 4096 stripe indices.
* We store each index as a u8 (u32 on the wire) to keep the memory footprint
* reasonable. This in turn means we support a maximum of 256
@@ -48,32 +41,11 @@
#define NFS4_PNFS_MAX_STRIPE_CNT 4096
#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */
-/* error codes for internal use */
-#define NFS4ERR_RESET_TO_MDS 12001
-
enum stripetype4 {
STRIPE_SPARSE = 1,
STRIPE_DENSE = 2
};
-/* Individual ip address */
-struct nfs4_pnfs_ds_addr {
- struct sockaddr_storage da_addr;
- size_t da_addrlen;
- struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */
- char *da_remotestr; /* human readable addr+port */
-};
-
-struct nfs4_pnfs_ds {
- struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
- char *ds_remotestr; /* comma sep list of addrs */
- struct list_head ds_addrs;
- struct nfs_client *ds_clp;
- atomic_t ds_count;
- unsigned long ds_state;
-#define NFS4DS_CONNECTING 0 /* ds is establishing connection */
-};
-
struct nfs4_file_layout_dsaddr {
struct nfs4_deviceid_node id_node;
u32 stripe_count;
@@ -119,17 +91,6 @@ FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg)
return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
}
-static inline void
-filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node)
-{
- u32 *p = (u32 *)&node->deviceid;
-
- printk(KERN_WARNING "NFS: Deviceid [%x%x%x%x] marked out of use.\n",
- p[0], p[1], p[2], p[3]);
-
- set_bit(NFS_DEVICEID_INVALID, &node->flags);
-}
-
static inline bool
filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
{
@@ -142,7 +103,6 @@ filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node);
extern struct nfs_fh *
nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
-extern void print_ds(struct nfs4_pnfs_ds *ds);
u32 nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset);
u32 nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j);
struct nfs4_pnfs_ds *nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg,
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index bfecac781f19..4f372e224603 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -31,7 +31,6 @@
#include <linux/nfs_fs.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
-#include <linux/sunrpc/addr.h>
#include "../internal.h"
#include "../nfs4session.h"
@@ -42,183 +41,6 @@
static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
-/*
- * Data server cache
- *
- * Data servers can be mapped to different device ids.
- * nfs4_pnfs_ds reference counting
- * - set to 1 on allocation
- * - incremented when a device id maps a data server already in the cache.
- * - decremented when deviceid is removed from the cache.
- */
-static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
-static LIST_HEAD(nfs4_data_server_cache);
-
-/* Debug routines */
-void
-print_ds(struct nfs4_pnfs_ds *ds)
-{
- if (ds == NULL) {
- printk("%s NULL device\n", __func__);
- return;
- }
- printk(" ds %s\n"
- " ref count %d\n"
- " client %p\n"
- " cl_exchange_flags %x\n",
- ds->ds_remotestr,
- atomic_read(&ds->ds_count), ds->ds_clp,
- ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
-}
-
-static bool
-same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
-{
- struct sockaddr_in *a, *b;
- struct sockaddr_in6 *a6, *b6;
-
- if (addr1->sa_family != addr2->sa_family)
- return false;
-
- switch (addr1->sa_family) {
- case AF_INET:
- a = (struct sockaddr_in *)addr1;
- b = (struct sockaddr_in *)addr2;
-
- if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
- a->sin_port == b->sin_port)
- return true;
- break;
-
- case AF_INET6:
- a6 = (struct sockaddr_in6 *)addr1;
- b6 = (struct sockaddr_in6 *)addr2;
-
- /* LINKLOCAL addresses must have matching scope_id */
- if (ipv6_addr_src_scope(&a6->sin6_addr) ==
- IPV6_ADDR_SCOPE_LINKLOCAL &&
- a6->sin6_scope_id != b6->sin6_scope_id)
- return false;
-
- if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
- a6->sin6_port == b6->sin6_port)
- return true;
- break;
-
- default:
- dprintk("%s: unhandled address family: %u\n",
- __func__, addr1->sa_family);
- return false;
- }
-
- return false;
-}
-
-static bool
-_same_data_server_addrs_locked(const struct list_head *dsaddrs1,
- const struct list_head *dsaddrs2)
-{
- struct nfs4_pnfs_ds_addr *da1, *da2;
-
- /* step through both lists, comparing as we go */
- for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
- da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
- da1 != NULL && da2 != NULL;
- da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
- da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
- if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
- (struct sockaddr *)&da2->da_addr))
- return false;
- }
- if (da1 == NULL && da2 == NULL)
- return true;
-
- return false;
-}
-
-/*
- * Lookup DS by addresses. nfs4_ds_cache_lock is held
- */
-static struct nfs4_pnfs_ds *
-_data_server_lookup_locked(const struct list_head *dsaddrs)
-{
- struct nfs4_pnfs_ds *ds;
-
- list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
- if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
- return ds;
- return NULL;
-}
-
-/*
- * Create an rpc connection to the nfs4_pnfs_ds data server
- * Currently only supports IPv4 and IPv6 addresses
- */
-static int
-nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
-{
- struct nfs_client *clp = ERR_PTR(-EIO);
- struct nfs4_pnfs_ds_addr *da;
- int status = 0;
-
- dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
- mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor);
-
- list_for_each_entry(da, &ds->ds_addrs, da_node) {
- dprintk("%s: DS %s: trying address %s\n",
- __func__, ds->ds_remotestr, da->da_remotestr);
-
- clp = nfs4_set_ds_client(mds_srv->nfs_client,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen, IPPROTO_TCP,
- dataserver_timeo, dataserver_retrans);
- if (!IS_ERR(clp))
- break;
- }
-
- if (IS_ERR(clp)) {
- status = PTR_ERR(clp);
- goto out;
- }
-
- status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
- if (status)
- goto out_put;
-
- smp_wmb();
- ds->ds_clp = clp;
- dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
-out:
- return status;
-out_put:
- nfs_put_client(clp);
- goto out;
-}
-
-static void
-destroy_ds(struct nfs4_pnfs_ds *ds)
-{
- struct nfs4_pnfs_ds_addr *da;
-
- dprintk("--> %s\n", __func__);
- ifdebug(FACILITY)
- print_ds(ds);
-
- nfs_put_client(ds->ds_clp);
-
- while (!list_empty(&ds->ds_addrs)) {
- da = list_first_entry(&ds->ds_addrs,
- struct nfs4_pnfs_ds_addr,
- da_node);
- list_del_init(&da->da_node);
- kfree(da->da_remotestr);
- kfree(da);
- }
-
- kfree(ds->ds_remotestr);
- kfree(ds);
-}
-
void
nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
{
@@ -229,259 +51,13 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
for (i = 0; i < dsaddr->ds_num; i++) {
ds = dsaddr->ds_list[i];
- if (ds != NULL) {
- if (atomic_dec_and_lock(&ds->ds_count,
- &nfs4_ds_cache_lock)) {
- list_del_init(&ds->ds_node);
- spin_unlock(&nfs4_ds_cache_lock);
- destroy_ds(ds);
- }
- }
+ if (ds != NULL)
+ nfs4_pnfs_ds_put(ds);
}
kfree(dsaddr->stripe_indices);
kfree(dsaddr);
}
-/*
- * Create a string with a human readable address and port to avoid
- * complicated setup around many dprinks.
- */
-static char *
-nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
-{
- struct nfs4_pnfs_ds_addr *da;
- char *remotestr;
- size_t len;
- char *p;
-
- len = 3; /* '{', '}' and eol */
- list_for_each_entry(da, dsaddrs, da_node) {
- len += strlen(da->da_remotestr) + 1; /* string plus comma */
- }
-
- remotestr = kzalloc(len, gfp_flags);
- if (!remotestr)
- return NULL;
-
- p = remotestr;
- *(p++) = '{';
- len--;
- list_for_each_entry(da, dsaddrs, da_node) {
- size_t ll = strlen(da->da_remotestr);
-
- if (ll > len)
- goto out_err;
-
- memcpy(p, da->da_remotestr, ll);
- p += ll;
- len -= ll;
-
- if (len < 1)
- goto out_err;
- (*p++) = ',';
- len--;
- }
- if (len < 2)
- goto out_err;
- *(p++) = '}';
- *p = '\0';
- return remotestr;
-out_err:
- kfree(remotestr);
- return NULL;
-}
-
-static struct nfs4_pnfs_ds *
-nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
-{
- struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
- char *remotestr;
-
- if (list_empty(dsaddrs)) {
- dprintk("%s: no addresses defined\n", __func__);
- goto out;
- }
-
- ds = kzalloc(sizeof(*ds), gfp_flags);
- if (!ds)
- goto out;
-
- /* this is only used for debugging, so it's ok if its NULL */
- remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
-
- spin_lock(&nfs4_ds_cache_lock);
- tmp_ds = _data_server_lookup_locked(dsaddrs);
- if (tmp_ds == NULL) {
- INIT_LIST_HEAD(&ds->ds_addrs);
- list_splice_init(dsaddrs, &ds->ds_addrs);
- ds->ds_remotestr = remotestr;
- atomic_set(&ds->ds_count, 1);
- INIT_LIST_HEAD(&ds->ds_node);
- ds->ds_clp = NULL;
- list_add(&ds->ds_node, &nfs4_data_server_cache);
- dprintk("%s add new data server %s\n", __func__,
- ds->ds_remotestr);
- } else {
- kfree(remotestr);
- kfree(ds);
- atomic_inc(&tmp_ds->ds_count);
- dprintk("%s data server %s found, inc'ed ds_count to %d\n",
- __func__, tmp_ds->ds_remotestr,
- atomic_read(&tmp_ds->ds_count));
- ds = tmp_ds;
- }
- spin_unlock(&nfs4_ds_cache_lock);
-out:
- return ds;
-}
-
-/*
- * Currently only supports ipv4, ipv6 and one multi-path address.
- */
-static struct nfs4_pnfs_ds_addr *
-decode_ds_addr(struct net *net, struct xdr_stream *streamp, gfp_t gfp_flags)
-{
- struct nfs4_pnfs_ds_addr *da = NULL;
- char *buf, *portstr;
- __be16 port;
- int nlen, rlen;
- int tmp[2];
- __be32 *p;
- char *netid, *match_netid;
- size_t len, match_netid_len;
- char *startsep = "";
- char *endsep = "";
-
-
- /* r_netid */
- p = xdr_inline_decode(streamp, 4);
- if (unlikely(!p))
- goto out_err;
- nlen = be32_to_cpup(p++);
-
- p = xdr_inline_decode(streamp, nlen);
- if (unlikely(!p))
- goto out_err;
-
- netid = kmalloc(nlen+1, gfp_flags);
- if (unlikely(!netid))
- goto out_err;
-
- netid[nlen] = '\0';
- memcpy(netid, p, nlen);
-
- /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
- p = xdr_inline_decode(streamp, 4);
- if (unlikely(!p))
- goto out_free_netid;
- rlen = be32_to_cpup(p);
-
- p = xdr_inline_decode(streamp, rlen);
- if (unlikely(!p))
- goto out_free_netid;
-
- /* port is ".ABC.DEF", 8 chars max */
- if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
- dprintk("%s: Invalid address, length %d\n", __func__,
- rlen);
- goto out_free_netid;
- }
- buf = kmalloc(rlen + 1, gfp_flags);
- if (!buf) {
- dprintk("%s: Not enough memory\n", __func__);
- goto out_free_netid;
- }
- buf[rlen] = '\0';
- memcpy(buf, p, rlen);
-
- /* replace port '.' with '-' */
- portstr = strrchr(buf, '.');
- if (!portstr) {
- dprintk("%s: Failed finding expected dot in port\n",
- __func__);
- goto out_free_buf;
- }
- *portstr = '-';
-
- /* find '.' between address and port */
- portstr = strrchr(buf, '.');
- if (!portstr) {
- dprintk("%s: Failed finding expected dot between address and "
- "port\n", __func__);
- goto out_free_buf;
- }
- *portstr = '\0';
-
- da = kzalloc(sizeof(*da), gfp_flags);
- if (unlikely(!da))
- goto out_free_buf;
-
- INIT_LIST_HEAD(&da->da_node);
-
- if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
- sizeof(da->da_addr))) {
- dprintk("%s: error parsing address %s\n", __func__, buf);
- goto out_free_da;
- }
-
- portstr++;
- sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
- port = htons((tmp[0] << 8) | (tmp[1]));
-
- switch (da->da_addr.ss_family) {
- case AF_INET:
- ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
- da->da_addrlen = sizeof(struct sockaddr_in);
- match_netid = "tcp";
- match_netid_len = 3;
- break;
-
- case AF_INET6:
- ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
- da->da_addrlen = sizeof(struct sockaddr_in6);
- match_netid = "tcp6";
- match_netid_len = 4;
- startsep = "[";
- endsep = "]";
- break;
-
- default:
- dprintk("%s: unsupported address family: %u\n",
- __func__, da->da_addr.ss_family);
- goto out_free_da;
- }
-
- if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
- dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
- __func__, netid, match_netid);
- goto out_free_da;
- }
-
- /* save human readable address */
- len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
- da->da_remotestr = kzalloc(len, gfp_flags);
-
- /* NULL is ok, only used for dprintk */
- if (da->da_remotestr)
- snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
- buf, endsep, ntohs(port));
-
- dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
- kfree(buf);
- kfree(netid);
- return da;
-
-out_free_da:
- kfree(da);
-out_free_buf:
- dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
- kfree(buf);
-out_free_netid:
- kfree(netid);
-out_err:
- return NULL;
-}
-
/* Decode opaque device data and return the result */
struct nfs4_file_layout_dsaddr *
nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
@@ -584,8 +160,8 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
mp_count = be32_to_cpup(p); /* multipath count */
for (j = 0; j < mp_count; j++) {
- da = decode_ds_addr(server->nfs_client->cl_net,
- &stream, gfp_flags);
+ da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
+ &stream, gfp_flags);
if (da)
list_add_tail(&da->da_node, &dsaddrs);
}
@@ -681,22 +257,7 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
return flseg->fh_array[i];
}
-static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
-{
- might_sleep();
- wait_on_bit_action(&ds->ds_state, NFS4DS_CONNECTING,
- nfs_wait_bit_killable, TASK_KILLABLE);
-}
-
-static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
-{
- smp_mb__before_atomic();
- clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
- smp_mb__after_atomic();
- wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
-}
-
-
+/* Upon return, either ds is connected, or ds is NULL */
struct nfs4_pnfs_ds *
nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
{
@@ -704,29 +265,23 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
struct nfs4_pnfs_ds *ret = ds;
+ struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
if (ds == NULL) {
printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
__func__, ds_idx);
- filelayout_mark_devid_invalid(devid);
+ pnfs_generic_mark_devid_invalid(devid);
goto out;
}
smp_rmb();
if (ds->ds_clp)
goto out_test_devid;
- if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
- struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
- int err;
-
- err = nfs4_ds_connect(s, ds);
- if (err)
- nfs4_mark_deviceid_unavailable(devid);
- nfs4_clear_ds_conn_bit(ds);
- } else {
- /* Either ds is connected, or ds is NULL */
- nfs4_wait_ds_connect(ds);
- }
+ nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+ dataserver_retrans, 4,
+ s->nfs_client->cl_minorversion,
+ s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
+
out_test_devid:
if (filelayout_test_devid_unavailable(devid))
ret = NULL;
diff --git a/fs/nfs/flexfilelayout/Makefile b/fs/nfs/flexfilelayout/Makefile
new file mode 100644
index 000000000000..1d2c9f6bbcd4
--- /dev/null
+++ b/fs/nfs/flexfilelayout/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the pNFS Flexfile Layout Driver kernel module
+#
+obj-$(CONFIG_PNFS_FLEXFILE_LAYOUT) += nfs_layout_flexfiles.o
+nfs_layout_flexfiles-y := flexfilelayout.o flexfilelayoutdev.o
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
new file mode 100644
index 000000000000..c22ecaa86c1c
--- /dev/null
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -0,0 +1,1574 @@
+/*
+ * Module for pnfs flexfile layout driver.
+ *
+ * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
+ *
+ * Tao Peng <bergwolf@primarydata.com>
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/module.h>
+
+#include <linux/sunrpc/metrics.h>
+#include <linux/nfs_idmap.h>
+
+#include "flexfilelayout.h"
+#include "../nfs4session.h"
+#include "../internal.h"
+#include "../delegation.h"
+#include "../nfs4trace.h"
+#include "../iostat.h"
+#include "../nfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
+
+static struct pnfs_layout_hdr *
+ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+{
+ struct nfs4_flexfile_layout *ffl;
+
+ ffl = kzalloc(sizeof(*ffl), gfp_flags);
+ if (ffl) {
+ INIT_LIST_HEAD(&ffl->error_list);
+ return &ffl->generic_hdr;
+ } else
+ return NULL;
+}
+
+static void
+ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct nfs4_ff_layout_ds_err *err, *n;
+
+ list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
+ list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ kfree(FF_LAYOUT_FROM_HDR(lo));
+}
+
+static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
+ if (unlikely(p == NULL))
+ return -ENOBUFS;
+ memcpy(stateid, p, NFS4_STATEID_SIZE);
+ dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
+ p[0], p[1], p[2], p[3]);
+ return 0;
+}
+
+static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ memcpy(devid, p, NFS4_DEVICEID4_SIZE);
+ nfs4_print_deviceid(devid);
+ return 0;
+}
+
+static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ fh->size = be32_to_cpup(p++);
+ if (fh->size > sizeof(struct nfs_fh)) {
+ printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
+ fh->size);
+ return -EOVERFLOW;
+ }
+ /* fh.data */
+ p = xdr_inline_decode(xdr, fh->size);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ memcpy(&fh->data, p, fh->size);
+ dprintk("%s: fh len %d\n", __func__, fh->size);
+
+ return 0;
+}
+
+/*
+ * Currently only stringified uids and gids are accepted.
+ * I.e., kerberos is not supported to the DSes, so no pricipals.
+ *
+ * That means that one common function will suffice, but when
+ * principals are added, this should be split to accomodate
+ * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
+ */
+static int
+decode_name(struct xdr_stream *xdr, u32 *id)
+{
+ __be32 *p;
+ int len;
+
+ /* opaque_length(4)*/
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ len = be32_to_cpup(p++);
+ if (len < 0)
+ return -EINVAL;
+
+ dprintk("%s: len %u\n", __func__, len);
+
+ /* opaque body */
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return -ENOBUFS;
+
+ if (!nfs_map_string_to_numeric((char *)p, len, id))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
+{
+ int i;
+
+ if (fls->mirror_array) {
+ for (i = 0; i < fls->mirror_array_cnt; i++) {
+ /* normally mirror_ds is freed in
+ * .free_deviceid_node but we still do it here
+ * for .alloc_lseg error path */
+ if (fls->mirror_array[i]) {
+ kfree(fls->mirror_array[i]->fh_versions);
+ nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
+ kfree(fls->mirror_array[i]);
+ }
+ }
+ kfree(fls->mirror_array);
+ fls->mirror_array = NULL;
+ }
+}
+
+static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
+{
+ int ret = 0;
+
+ dprintk("--> %s\n", __func__);
+
+ /* FIXME: remove this check when layout segment support is added */
+ if (lgr->range.offset != 0 ||
+ lgr->range.length != NFS4_MAX_UINT64) {
+ dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ dprintk("--> %s returns %d\n", __func__, ret);
+ return ret;
+}
+
+static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
+{
+ if (fls) {
+ ff_layout_free_mirror_array(fls);
+ kfree(fls);
+ }
+}
+
+static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
+{
+ struct nfs4_ff_layout_mirror *tmp;
+ int i, j;
+
+ for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
+ for (j = i + 1; j < fls->mirror_array_cnt; j++)
+ if (fls->mirror_array[i]->efficiency <
+ fls->mirror_array[j]->efficiency) {
+ tmp = fls->mirror_array[i];
+ fls->mirror_array[i] = fls->mirror_array[j];
+ fls->mirror_array[j] = tmp;
+ }
+ }
+}
+
+static struct pnfs_layout_segment *
+ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
+ struct nfs4_layoutget_res *lgr,
+ gfp_t gfp_flags)
+{
+ struct pnfs_layout_segment *ret;
+ struct nfs4_ff_layout_segment *fls = NULL;
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ u64 stripe_unit;
+ u32 mirror_array_cnt;
+ __be32 *p;
+ int i, rc;
+
+ dprintk("--> %s\n", __func__);
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ return ERR_PTR(-ENOMEM);
+
+ xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
+ lgr->layoutp->len);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ /* stripe unit and mirror_array_cnt */
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 8 + 4);
+ if (!p)
+ goto out_err_free;
+
+ p = xdr_decode_hyper(p, &stripe_unit);
+ mirror_array_cnt = be32_to_cpup(p++);
+ dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
+ stripe_unit, mirror_array_cnt);
+
+ if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
+ mirror_array_cnt == 0)
+ goto out_err_free;
+
+ rc = -ENOMEM;
+ fls = kzalloc(sizeof(*fls), gfp_flags);
+ if (!fls)
+ goto out_err_free;
+
+ fls->mirror_array_cnt = mirror_array_cnt;
+ fls->stripe_unit = stripe_unit;
+ fls->mirror_array = kcalloc(fls->mirror_array_cnt,
+ sizeof(fls->mirror_array[0]), gfp_flags);
+ if (fls->mirror_array == NULL)
+ goto out_err_free;
+
+ for (i = 0; i < fls->mirror_array_cnt; i++) {
+ struct nfs4_deviceid devid;
+ struct nfs4_deviceid_node *idnode;
+ u32 ds_count;
+ u32 fh_count;
+ int j;
+
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ ds_count = be32_to_cpup(p);
+
+ /* FIXME: allow for striping? */
+ if (ds_count != 1)
+ goto out_err_free;
+
+ fls->mirror_array[i] =
+ kzalloc(sizeof(struct nfs4_ff_layout_mirror),
+ gfp_flags);
+ if (fls->mirror_array[i] == NULL) {
+ rc = -ENOMEM;
+ goto out_err_free;
+ }
+
+ spin_lock_init(&fls->mirror_array[i]->lock);
+ fls->mirror_array[i]->ds_count = ds_count;
+
+ /* deviceid */
+ rc = decode_deviceid(&stream, &devid);
+ if (rc)
+ goto out_err_free;
+
+ idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
+ &devid, lh->plh_lc_cred,
+ gfp_flags);
+ /*
+ * upon success, mirror_ds is allocated by previous
+ * getdeviceinfo, or newly by .alloc_deviceid_node
+ * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
+ */
+ if (idnode)
+ fls->mirror_array[i]->mirror_ds =
+ FF_LAYOUT_MIRROR_DS(idnode);
+ else
+ goto out_err_free;
+
+ /* efficiency */
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ fls->mirror_array[i]->efficiency = be32_to_cpup(p);
+
+ /* stateid */
+ rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
+ if (rc)
+ goto out_err_free;
+
+ /* fh */
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ fh_count = be32_to_cpup(p);
+
+ fls->mirror_array[i]->fh_versions =
+ kzalloc(fh_count * sizeof(struct nfs_fh),
+ gfp_flags);
+ if (fls->mirror_array[i]->fh_versions == NULL) {
+ rc = -ENOMEM;
+ goto out_err_free;
+ }
+
+ for (j = 0; j < fh_count; j++) {
+ rc = decode_nfs_fh(&stream,
+ &fls->mirror_array[i]->fh_versions[j]);
+ if (rc)
+ goto out_err_free;
+ }
+
+ fls->mirror_array[i]->fh_versions_cnt = fh_count;
+
+ /* user */
+ rc = decode_name(&stream, &fls->mirror_array[i]->uid);
+ if (rc)
+ goto out_err_free;
+
+ /* group */
+ rc = decode_name(&stream, &fls->mirror_array[i]->gid);
+ if (rc)
+ goto out_err_free;
+
+ dprintk("%s: uid %d gid %d\n", __func__,
+ fls->mirror_array[i]->uid,
+ fls->mirror_array[i]->gid);
+ }
+
+ ff_layout_sort_mirrors(fls);
+ rc = ff_layout_check_layout(lgr);
+ if (rc)
+ goto out_err_free;
+
+ ret = &fls->generic_hdr;
+ dprintk("<-- %s (success)\n", __func__);
+out_free_page:
+ __free_page(scratch);
+ return ret;
+out_err_free:
+ _ff_layout_free_lseg(fls);
+ ret = ERR_PTR(rc);
+ dprintk("<-- %s (%d)\n", __func__, rc);
+ goto out_free_page;
+}
+
+static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
+{
+ struct pnfs_layout_segment *lseg;
+
+ list_for_each_entry(lseg, &layout->plh_segs, pls_list)
+ if (lseg->pls_range.iomode == IOMODE_RW)
+ return true;
+
+ return false;
+}
+
+static void
+ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+ int i;
+
+ dprintk("--> %s\n", __func__);
+
+ for (i = 0; i < fls->mirror_array_cnt; i++) {
+ if (fls->mirror_array[i]) {
+ nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
+ fls->mirror_array[i]->mirror_ds = NULL;
+ if (fls->mirror_array[i]->cred) {
+ put_rpccred(fls->mirror_array[i]->cred);
+ fls->mirror_array[i]->cred = NULL;
+ }
+ }
+ }
+
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ struct nfs4_flexfile_layout *ffl;
+ struct inode *inode;
+
+ ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
+ inode = ffl->generic_hdr.plh_inode;
+ spin_lock(&inode->i_lock);
+ if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
+ ffl->commit_info.nbuckets = 0;
+ kfree(ffl->commit_info.buckets);
+ ffl->commit_info.buckets = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+ }
+ _ff_layout_free_lseg(fls);
+}
+
+/* Return 1 until we have multiple lsegs support */
+static int
+ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
+{
+ return 1;
+}
+
+static int
+ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo,
+ gfp_t gfp_flags)
+{
+ struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+ struct pnfs_commit_bucket *buckets;
+ int size;
+
+ if (cinfo->ds->nbuckets != 0) {
+ /* This assumes there is only one RW lseg per file.
+ * To support multiple lseg per file, we need to
+ * change struct pnfs_commit_bucket to allow dynamic
+ * increasing nbuckets.
+ */
+ return 0;
+ }
+
+ size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
+
+ buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
+ gfp_flags);
+ if (!buckets)
+ return -ENOMEM;
+ else {
+ int i;
+
+ spin_lock(cinfo->lock);
+ if (cinfo->ds->nbuckets != 0)
+ kfree(buckets);
+ else {
+ cinfo->ds->buckets = buckets;
+ cinfo->ds->nbuckets = size;
+ for (i = 0; i < size; i++) {
+ INIT_LIST_HEAD(&buckets[i].written);
+ INIT_LIST_HEAD(&buckets[i].committing);
+ /* mark direct verifier as unset */
+ buckets[i].direct_verf.committed =
+ NFS_INVALID_STABLE_HOW;
+ }
+ }
+ spin_unlock(cinfo->lock);
+ return 0;
+ }
+}
+
+static struct nfs4_pnfs_ds *
+ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
+ int *best_idx)
+{
+ struct nfs4_ff_layout_segment *fls;
+ struct nfs4_pnfs_ds *ds;
+ int idx;
+
+ fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
+ /* mirrors are sorted by efficiency */
+ for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
+ if (ds) {
+ *best_idx = idx;
+ return ds;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ struct nfs_pgio_mirror *pgm;
+ struct nfs4_ff_layout_mirror *mirror;
+ struct nfs4_pnfs_ds *ds;
+ int ds_idx;
+
+ /* Use full layout for now */
+ if (!pgio->pg_lseg)
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_READ,
+ GFP_KERNEL);
+ /* If no lseg, fall back to read through mds */
+ if (pgio->pg_lseg == NULL)
+ goto out_mds;
+
+ ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
+ if (!ds)
+ goto out_mds;
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
+
+ pgio->pg_mirror_idx = ds_idx;
+
+ /* read always uses only one mirror - idx 0 for pgio layer */
+ pgm = &pgio->pg_mirrors[0];
+ pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
+
+ return;
+out_mds:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ nfs_pageio_reset_read_mds(pgio);
+}
+
+static void
+ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ struct nfs4_ff_layout_mirror *mirror;
+ struct nfs_pgio_mirror *pgm;
+ struct nfs_commit_info cinfo;
+ struct nfs4_pnfs_ds *ds;
+ int i;
+ int status;
+
+ if (!pgio->pg_lseg)
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_RW,
+ GFP_NOFS);
+ /* If no lseg, fall back to write through mds */
+ if (pgio->pg_lseg == NULL)
+ goto out_mds;
+
+ nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
+ status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
+ if (status < 0)
+ goto out_mds;
+
+ /* Use a direct mapping of ds_idx to pgio mirror_idx */
+ if (WARN_ON_ONCE(pgio->pg_mirror_count !=
+ FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
+ goto out_mds;
+
+ for (i = 0; i < pgio->pg_mirror_count; i++) {
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
+ if (!ds)
+ goto out_mds;
+ pgm = &pgio->pg_mirrors[i];
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
+ pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
+ }
+
+ return;
+
+out_mds:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ nfs_pageio_reset_write_mds(pgio);
+}
+
+static unsigned int
+ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ if (!pgio->pg_lseg)
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_RW,
+ GFP_NOFS);
+ if (pgio->pg_lseg)
+ return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
+
+ /* no lseg means that pnfs is not in use, so no mirroring here */
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ nfs_pageio_reset_write_mds(pgio);
+ return 1;
+}
+
+static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
+ .pg_init = ff_layout_pg_init_read,
+ .pg_test = pnfs_generic_pg_test,
+ .pg_doio = pnfs_generic_pg_readpages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
+};
+
+static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
+ .pg_init = ff_layout_pg_init_write,
+ .pg_test = pnfs_generic_pg_test,
+ .pg_doio = pnfs_generic_pg_writepages,
+ .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
+};
+
+static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
+{
+ struct rpc_task *task = &hdr->task;
+
+ pnfs_layoutcommit_inode(hdr->inode, false);
+
+ if (retry_pnfs) {
+ dprintk("%s Reset task %5u for i/o through pNFS "
+ "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
+ hdr->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(hdr->inode),
+ hdr->args.count,
+ (unsigned long long)hdr->args.offset);
+
+ if (!hdr->dreq) {
+ struct nfs_open_context *ctx;
+
+ ctx = nfs_list_entry(hdr->pages.next)->wb_context;
+ set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
+ hdr->completion_ops->error_cleanup(&hdr->pages);
+ } else {
+ nfs_direct_set_resched_writes(hdr->dreq);
+ /* fake unstable write to let common nfs resend pages */
+ hdr->verf.committed = NFS_UNSTABLE;
+ hdr->good_bytes = 0;
+ }
+ return;
+ }
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ dprintk("%s Reset task %5u for i/o through MDS "
+ "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
+ hdr->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(hdr->inode),
+ hdr->args.count,
+ (unsigned long long)hdr->args.offset);
+
+ task->tk_status = pnfs_write_done_resend_to_mds(hdr);
+ }
+}
+
+static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
+{
+ struct rpc_task *task = &hdr->task;
+
+ pnfs_layoutcommit_inode(hdr->inode, false);
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ dprintk("%s Reset task %5u for i/o through MDS "
+ "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
+ hdr->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(hdr->inode),
+ hdr->args.count,
+ (unsigned long long)hdr->args.offset);
+
+ task->tk_status = pnfs_read_done_resend_to_mds(hdr);
+ }
+}
+
+static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ struct nfs4_state *state,
+ struct nfs_client *clp,
+ struct pnfs_layout_segment *lseg,
+ int idx)
+{
+ struct pnfs_layout_hdr *lo = lseg->pls_layout;
+ struct inode *inode = lo->plh_inode;
+ struct nfs_server *mds_server = NFS_SERVER(inode);
+
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs_client *mds_client = mds_server->nfs_client;
+ struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
+
+ if (task->tk_status >= 0)
+ return 0;
+
+ switch (task->tk_status) {
+ /* MDS state errors */
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ if (state == NULL)
+ break;
+ nfs_remove_bad_delegation(state->inode);
+ case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
+ if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
+ goto out_bad_stateid;
+ goto wait_on_recovery;
+ case -NFS4ERR_EXPIRED:
+ if (state != NULL) {
+ if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
+ goto out_bad_stateid;
+ }
+ nfs4_schedule_lease_recovery(mds_client);
+ goto wait_on_recovery;
+ /* DS session errors */
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session. Exchangeid "
+ "flags 0x%x\n", __func__, task->tk_status,
+ clp->cl_exchange_flags);
+ nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
+ break;
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
+ break;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ break;
+ /* Invalidate Layout errors */
+ case -NFS4ERR_PNFS_NO_LAYOUT:
+ case -ESTALE: /* mapped NFS4ERR_STALE */
+ case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
+ case -EISDIR: /* mapped NFS4ERR_ISDIR */
+ case -NFS4ERR_FHEXPIRED:
+ case -NFS4ERR_WRONG_TYPE:
+ dprintk("%s Invalid layout error %d\n", __func__,
+ task->tk_status);
+ /*
+ * Destroy layout so new i/o will get a new layout.
+ * Layout will not be destroyed until all current lseg
+ * references are put. Mark layout as invalid to resend failed
+ * i/o and all i/o waiting on the slot table to the MDS until
+ * layout is destroyed and a new valid layout is obtained.
+ */
+ pnfs_destroy_layout(NFS_I(inode));
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ goto reset;
+ /* RPC connection errors */
+ case -ECONNREFUSED:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -EIO:
+ case -ETIMEDOUT:
+ case -EPIPE:
+ dprintk("%s DS connection error %d\n", __func__,
+ task->tk_status);
+ nfs4_mark_deviceid_unavailable(devid);
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ /* fall through */
+ default:
+ if (ff_layout_has_available_ds(lseg))
+ return -NFS4ERR_RESET_TO_PNFS;
+reset:
+ dprintk("%s Retry through MDS. Error %d\n", __func__,
+ task->tk_status);
+ return -NFS4ERR_RESET_TO_MDS;
+ }
+out:
+ task->tk_status = 0;
+ return -EAGAIN;
+out_bad_stateid:
+ task->tk_status = -EIO;
+ return 0;
+wait_on_recovery:
+ rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
+ if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
+ rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
+ goto out;
+}
+
+/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
+static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ struct pnfs_layout_segment *lseg,
+ int idx)
+{
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+
+ if (task->tk_status >= 0)
+ return 0;
+
+ if (task->tk_status != -EJUKEBOX) {
+ dprintk("%s DS connection error %d\n", __func__,
+ task->tk_status);
+ nfs4_mark_deviceid_unavailable(devid);
+ if (ff_layout_has_available_ds(lseg))
+ return -NFS4ERR_RESET_TO_PNFS;
+ else
+ return -NFS4ERR_RESET_TO_MDS;
+ }
+
+ if (task->tk_status == -EJUKEBOX)
+ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+ task->tk_status = 0;
+ rpc_restart_call(task);
+ rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
+ return -EAGAIN;
+}
+
+static int ff_layout_async_handle_error(struct rpc_task *task,
+ struct nfs4_state *state,
+ struct nfs_client *clp,
+ struct pnfs_layout_segment *lseg,
+ int idx)
+{
+ int vers = clp->cl_nfs_mod->rpc_vers->number;
+
+ switch (vers) {
+ case 3:
+ return ff_layout_async_handle_error_v3(task, lseg, idx);
+ case 4:
+ return ff_layout_async_handle_error_v4(task, state, clp,
+ lseg, idx);
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+ int idx, u64 offset, u64 length,
+ u32 status, int opnum)
+{
+ struct nfs4_ff_layout_mirror *mirror;
+ int err;
+
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+ mirror, offset, length, status, opnum,
+ GFP_NOIO);
+ dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
+}
+
+/* NFS_PROTO call done callback routines */
+
+static int ff_layout_read_done_cb(struct rpc_task *task,
+ struct nfs_pgio_header *hdr)
+{
+ struct inode *inode;
+ int err;
+
+ trace_nfs4_pnfs_read(hdr, task->tk_status);
+ if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
+ hdr->res.op_status = NFS4ERR_NXIO;
+ if (task->tk_status < 0 && hdr->res.op_status)
+ ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
+ hdr->args.offset, hdr->args.count,
+ hdr->res.op_status, OP_READ);
+ err = ff_layout_async_handle_error(task, hdr->args.context->state,
+ hdr->ds_clp, hdr->lseg,
+ hdr->pgio_mirror_idx);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_PNFS:
+ set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &hdr->lseg->pls_layout->plh_flags);
+ pnfs_read_resend_pnfs(hdr);
+ return task->tk_status;
+ case -NFS4ERR_RESET_TO_MDS:
+ inode = hdr->lseg->pls_layout->plh_inode;
+ pnfs_error_mark_layout_for_return(inode, hdr->lseg);
+ ff_layout_reset_read(hdr);
+ return task->tk_status;
+ case -EAGAIN:
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * We reference the rpc_cred of the first WRITE that triggers the need for
+ * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
+ * rfc5661 is not clear about which credential should be used.
+ *
+ * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
+ * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
+ * we always send layoutcommit after DS writes.
+ */
+static void
+ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
+{
+ pnfs_set_layoutcommit(hdr);
+ dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
+ (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
+}
+
+static bool
+ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
+{
+ /* No mirroring for now */
+ struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
+
+ return ff_layout_test_devid_unavailable(node);
+}
+
+static int ff_layout_read_prepare_common(struct rpc_task *task,
+ struct nfs_pgio_header *hdr)
+{
+ if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
+ rpc_exit(task, -EIO);
+ return -EIO;
+ }
+ if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
+ dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+ if (ff_layout_has_available_ds(hdr->lseg))
+ pnfs_read_resend_pnfs(hdr);
+ else
+ ff_layout_reset_read(hdr);
+ rpc_exit(task, 0);
+ return -EAGAIN;
+ }
+ hdr->pgio_done_cb = ff_layout_read_done_cb;
+
+ return 0;
+}
+
+/*
+ * Call ops for the async read/write cases
+ * In the case of dense layouts, the offset needs to be reset to its
+ * original value.
+ */
+static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (ff_layout_read_prepare_common(task, hdr))
+ return;
+
+ rpc_call_start(task);
+}
+
+static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ struct rpc_task *task)
+{
+ if (ds_clp->cl_session)
+ return nfs41_setup_sequence(ds_clp->cl_session,
+ args,
+ res,
+ task);
+ return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
+ args,
+ res,
+ task);
+}
+
+static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (ff_layout_read_prepare_common(task, hdr))
+ return;
+
+ if (ff_layout_setup_sequence(hdr->ds_clp,
+ &hdr->args.seq_args,
+ &hdr->res.seq_res,
+ task))
+ return;
+
+ if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
+ hdr->args.lock_context, FMODE_READ) == -EIO)
+ rpc_exit(task, -EIO); /* lost lock, terminate I/O */
+}
+
+static void ff_layout_read_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
+ task->tk_status == 0) {
+ nfs4_sequence_done(task, &hdr->res.seq_res);
+ return;
+ }
+
+ /* Note this may cause RPC to be resent */
+ hdr->mds_ops->rpc_call_done(task, hdr);
+}
+
+static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ rpc_count_iostats_metrics(task,
+ &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
+}
+
+static int ff_layout_write_done_cb(struct rpc_task *task,
+ struct nfs_pgio_header *hdr)
+{
+ struct inode *inode;
+ int err;
+
+ trace_nfs4_pnfs_write(hdr, task->tk_status);
+ if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
+ hdr->res.op_status = NFS4ERR_NXIO;
+ if (task->tk_status < 0 && hdr->res.op_status)
+ ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
+ hdr->args.offset, hdr->args.count,
+ hdr->res.op_status, OP_WRITE);
+ err = ff_layout_async_handle_error(task, hdr->args.context->state,
+ hdr->ds_clp, hdr->lseg,
+ hdr->pgio_mirror_idx);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_PNFS:
+ case -NFS4ERR_RESET_TO_MDS:
+ inode = hdr->lseg->pls_layout->plh_inode;
+ pnfs_error_mark_layout_for_return(inode, hdr->lseg);
+ if (err == -NFS4ERR_RESET_TO_PNFS) {
+ pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
+ ff_layout_reset_write(hdr, true);
+ } else {
+ pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
+ ff_layout_reset_write(hdr, false);
+ }
+ return task->tk_status;
+ case -EAGAIN:
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ if (hdr->res.verf->committed == NFS_FILE_SYNC ||
+ hdr->res.verf->committed == NFS_DATA_SYNC)
+ ff_layout_set_layoutcommit(hdr);
+
+ return 0;
+}
+
+static int ff_layout_commit_done_cb(struct rpc_task *task,
+ struct nfs_commit_data *data)
+{
+ struct inode *inode;
+ int err;
+
+ trace_nfs4_pnfs_commit_ds(data, task->tk_status);
+ if (task->tk_status == -ETIMEDOUT && !data->res.op_status)
+ data->res.op_status = NFS4ERR_NXIO;
+ if (task->tk_status < 0 && data->res.op_status)
+ ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
+ data->args.offset, data->args.count,
+ data->res.op_status, OP_COMMIT);
+ err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
+ data->lseg, data->ds_commit_index);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_PNFS:
+ case -NFS4ERR_RESET_TO_MDS:
+ inode = data->lseg->pls_layout->plh_inode;
+ pnfs_error_mark_layout_for_return(inode, data->lseg);
+ if (err == -NFS4ERR_RESET_TO_PNFS)
+ pnfs_set_retry_layoutget(data->lseg->pls_layout);
+ else
+ pnfs_clear_retry_layoutget(data->lseg->pls_layout);
+ pnfs_generic_prepare_to_resend_writes(data);
+ return -EAGAIN;
+ case -EAGAIN:
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ if (data->verf.committed == NFS_UNSTABLE)
+ pnfs_commit_set_layoutcommit(data);
+
+ return 0;
+}
+
+static int ff_layout_write_prepare_common(struct rpc_task *task,
+ struct nfs_pgio_header *hdr)
+{
+ if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
+ rpc_exit(task, -EIO);
+ return -EIO;
+ }
+
+ if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
+ bool retry_pnfs;
+
+ retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
+ dprintk("%s task %u reset io to %s\n", __func__,
+ task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
+ ff_layout_reset_write(hdr, retry_pnfs);
+ rpc_exit(task, 0);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (ff_layout_write_prepare_common(task, hdr))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (ff_layout_write_prepare_common(task, hdr))
+ return;
+
+ if (ff_layout_setup_sequence(hdr->ds_clp,
+ &hdr->args.seq_args,
+ &hdr->res.seq_res,
+ task))
+ return;
+
+ if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
+ hdr->args.lock_context, FMODE_WRITE) == -EIO)
+ rpc_exit(task, -EIO); /* lost lock, terminate I/O */
+}
+
+static void ff_layout_write_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
+ task->tk_status == 0) {
+ nfs4_sequence_done(task, &hdr->res.seq_res);
+ return;
+ }
+
+ /* Note this may cause RPC to be resent */
+ hdr->mds_ops->rpc_call_done(task, hdr);
+}
+
+static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ rpc_count_iostats_metrics(task,
+ &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
+}
+
+static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
+{
+ rpc_call_start(task);
+}
+
+static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *wdata = data;
+
+ ff_layout_setup_sequence(wdata->ds_clp,
+ &wdata->args.seq_args,
+ &wdata->res.seq_res,
+ task);
+}
+
+static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *cdata = data;
+
+ rpc_count_iostats_metrics(task,
+ &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
+}
+
+static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
+ .rpc_call_prepare = ff_layout_read_prepare_v3,
+ .rpc_call_done = ff_layout_read_call_done,
+ .rpc_count_stats = ff_layout_read_count_stats,
+ .rpc_release = pnfs_generic_rw_release,
+};
+
+static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
+ .rpc_call_prepare = ff_layout_read_prepare_v4,
+ .rpc_call_done = ff_layout_read_call_done,
+ .rpc_count_stats = ff_layout_read_count_stats,
+ .rpc_release = pnfs_generic_rw_release,
+};
+
+static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
+ .rpc_call_prepare = ff_layout_write_prepare_v3,
+ .rpc_call_done = ff_layout_write_call_done,
+ .rpc_count_stats = ff_layout_write_count_stats,
+ .rpc_release = pnfs_generic_rw_release,
+};
+
+static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
+ .rpc_call_prepare = ff_layout_write_prepare_v4,
+ .rpc_call_done = ff_layout_write_call_done,
+ .rpc_count_stats = ff_layout_write_count_stats,
+ .rpc_release = pnfs_generic_rw_release,
+};
+
+static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
+ .rpc_call_prepare = ff_layout_commit_prepare_v3,
+ .rpc_call_done = pnfs_generic_write_commit_done,
+ .rpc_count_stats = ff_layout_commit_count_stats,
+ .rpc_release = pnfs_generic_commit_release,
+};
+
+static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
+ .rpc_call_prepare = ff_layout_commit_prepare_v4,
+ .rpc_call_done = pnfs_generic_write_commit_done,
+ .rpc_count_stats = ff_layout_commit_count_stats,
+ .rpc_release = pnfs_generic_commit_release,
+};
+
+static enum pnfs_try_status
+ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+{
+ struct pnfs_layout_segment *lseg = hdr->lseg;
+ struct nfs4_pnfs_ds *ds;
+ struct rpc_clnt *ds_clnt;
+ struct rpc_cred *ds_cred;
+ loff_t offset = hdr->args.offset;
+ u32 idx = hdr->pgio_mirror_idx;
+ int vers;
+ struct nfs_fh *fh;
+
+ dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
+ __func__, hdr->inode->i_ino,
+ hdr->args.pgbase, (size_t)hdr->args.count, offset);
+
+ ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
+ if (!ds)
+ goto out_failed;
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ hdr->inode);
+ if (IS_ERR(ds_clnt))
+ goto out_failed;
+
+ ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
+ if (IS_ERR(ds_cred))
+ goto out_failed;
+
+ vers = nfs4_ff_layout_ds_version(lseg, idx);
+
+ dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
+ ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
+
+ atomic_inc(&ds->ds_clp->cl_count);
+ hdr->ds_clp = ds->ds_clp;
+ fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
+ if (fh)
+ hdr->args.fh = fh;
+
+ /*
+ * Note that if we ever decide to split across DSes,
+ * then we may need to handle dense-like offsets.
+ */
+ hdr->args.offset = offset;
+ hdr->mds_offset = offset;
+
+ /* Perform an asynchronous read to ds */
+ nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
+ vers == 3 ? &ff_layout_read_call_ops_v3 :
+ &ff_layout_read_call_ops_v4,
+ 0, RPC_TASK_SOFTCONN);
+
+ return PNFS_ATTEMPTED;
+
+out_failed:
+ if (ff_layout_has_available_ds(lseg))
+ return PNFS_TRY_AGAIN;
+ return PNFS_NOT_ATTEMPTED;
+}
+
+/* Perform async writes. */
+static enum pnfs_try_status
+ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+{
+ struct pnfs_layout_segment *lseg = hdr->lseg;
+ struct nfs4_pnfs_ds *ds;
+ struct rpc_clnt *ds_clnt;
+ struct rpc_cred *ds_cred;
+ loff_t offset = hdr->args.offset;
+ int vers;
+ struct nfs_fh *fh;
+ int idx = hdr->pgio_mirror_idx;
+
+ ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
+ if (!ds)
+ return PNFS_NOT_ATTEMPTED;
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ hdr->inode);
+ if (IS_ERR(ds_clnt))
+ return PNFS_NOT_ATTEMPTED;
+
+ ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
+ if (IS_ERR(ds_cred))
+ return PNFS_NOT_ATTEMPTED;
+
+ vers = nfs4_ff_layout_ds_version(lseg, idx);
+
+ dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
+ __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
+ offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
+ vers);
+
+ hdr->pgio_done_cb = ff_layout_write_done_cb;
+ atomic_inc(&ds->ds_clp->cl_count);
+ hdr->ds_clp = ds->ds_clp;
+ hdr->ds_commit_idx = idx;
+ fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
+ if (fh)
+ hdr->args.fh = fh;
+
+ /*
+ * Note that if we ever decide to split across DSes,
+ * then we may need to handle dense-like offsets.
+ */
+ hdr->args.offset = offset;
+
+ /* Perform an asynchronous write */
+ nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
+ vers == 3 ? &ff_layout_write_call_ops_v3 :
+ &ff_layout_write_call_ops_v4,
+ sync, RPC_TASK_SOFTCONN);
+ return PNFS_ATTEMPTED;
+}
+
+static void
+ff_layout_mark_request_commit(struct nfs_page *req,
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx)
+{
+ struct list_head *list;
+ struct pnfs_commit_bucket *buckets;
+
+ spin_lock(cinfo->lock);
+ buckets = cinfo->ds->buckets;
+ list = &buckets[ds_commit_idx].written;
+ if (list_empty(list)) {
+ /* Non-empty buckets hold a reference on the lseg. That ref
+ * is normally transferred to the COMMIT call and released
+ * there. It could also be released if the last req is pulled
+ * off due to a rewrite, in which case it will be done in
+ * pnfs_common_clear_request_commit
+ */
+ WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
+ buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
+ }
+ set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ cinfo->ds->nwritten++;
+
+ /* nfs_request_add_commit_list(). We need to add req to list without
+ * dropping cinfo lock.
+ */
+ set_bit(PG_CLEAN, &(req)->wb_flags);
+ nfs_list_add_request(req, list);
+ cinfo->mds->ncommit++;
+ spin_unlock(cinfo->lock);
+ if (!cinfo->dreq) {
+ inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
+ BDI_RECLAIMABLE);
+ __mark_inode_dirty(req->wb_context->dentry->d_inode,
+ I_DIRTY_DATASYNC);
+ }
+}
+
+static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ return i;
+}
+
+static struct nfs_fh *
+select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
+
+ /* FIXME: Assume that there is only one NFS version available
+ * for the DS.
+ */
+ return &flseg->mirror_array[i]->fh_versions[0];
+}
+
+static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ struct rpc_clnt *ds_clnt;
+ struct rpc_cred *ds_cred;
+ u32 idx;
+ int vers;
+ struct nfs_fh *fh;
+
+ idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
+ if (!ds)
+ goto out_err;
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ data->inode);
+ if (IS_ERR(ds_clnt))
+ goto out_err;
+
+ ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
+ if (IS_ERR(ds_cred))
+ goto out_err;
+
+ vers = nfs4_ff_layout_ds_version(lseg, idx);
+
+ dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
+ data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
+ vers);
+ data->commit_done_cb = ff_layout_commit_done_cb;
+ data->cred = ds_cred;
+ atomic_inc(&ds->ds_clp->cl_count);
+ data->ds_clp = ds->ds_clp;
+ fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
+ if (fh)
+ data->args.fh = fh;
+ return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
+ vers == 3 ? &ff_layout_commit_call_ops_v3 :
+ &ff_layout_commit_call_ops_v4,
+ how, RPC_TASK_SOFTCONN);
+out_err:
+ pnfs_generic_prepare_to_resend_writes(data);
+ pnfs_generic_commit_release(data);
+ return -EAGAIN;
+}
+
+static int
+ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ int how, struct nfs_commit_info *cinfo)
+{
+ return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
+ ff_layout_initiate_commit);
+}
+
+static struct pnfs_ds_commit_info *
+ff_layout_get_ds_info(struct inode *inode)
+{
+ struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
+
+ if (layout == NULL)
+ return NULL;
+
+ return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
+}
+
+static void
+ff_layout_free_deveiceid_node(struct nfs4_deviceid_node *d)
+{
+ nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
+ id_node));
+}
+
+static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args)
+{
+ struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
+ __be32 *start;
+ int count = 0, ret = 0;
+
+ start = xdr_reserve_space(xdr, 4);
+ if (unlikely(!start))
+ return -E2BIG;
+
+ /* This assume we always return _ALL_ layouts */
+ spin_lock(&hdr->plh_inode->i_lock);
+ ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
+ spin_unlock(&hdr->plh_inode->i_lock);
+
+ *start = cpu_to_be32(count);
+
+ return ret;
+}
+
+/* report nothing for now */
+static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ if (likely(p))
+ *p = cpu_to_be32(0);
+}
+
+static struct nfs4_deviceid_node *
+ff_layout_alloc_deviceid_node(struct nfs_server *server,
+ struct pnfs_device *pdev, gfp_t gfp_flags)
+{
+ struct nfs4_ff_layout_ds *dsaddr;
+
+ dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
+ if (!dsaddr)
+ return NULL;
+ return &dsaddr->id_node;
+}
+
+static void
+ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args)
+{
+ struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
+ __be32 *start;
+
+ dprintk("%s: Begin\n", __func__);
+ start = xdr_reserve_space(xdr, 4);
+ BUG_ON(!start);
+
+ if (ff_layout_encode_ioerr(flo, xdr, args))
+ goto out;
+
+ ff_layout_encode_iostats(flo, xdr, args);
+out:
+ *start = cpu_to_be32((xdr->p - start - 1) * 4);
+ dprintk("%s: Return\n", __func__);
+}
+
+static struct pnfs_layoutdriver_type flexfilelayout_type = {
+ .id = LAYOUT_FLEX_FILES,
+ .name = "LAYOUT_FLEX_FILES",
+ .owner = THIS_MODULE,
+ .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
+ .free_layout_hdr = ff_layout_free_layout_hdr,
+ .alloc_lseg = ff_layout_alloc_lseg,
+ .free_lseg = ff_layout_free_lseg,
+ .pg_read_ops = &ff_layout_pg_read_ops,
+ .pg_write_ops = &ff_layout_pg_write_ops,
+ .get_ds_info = ff_layout_get_ds_info,
+ .free_deviceid_node = ff_layout_free_deveiceid_node,
+ .mark_request_commit = ff_layout_mark_request_commit,
+ .clear_request_commit = pnfs_generic_clear_request_commit,
+ .scan_commit_lists = pnfs_generic_scan_commit_lists,
+ .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
+ .commit_pagelist = ff_layout_commit_pagelist,
+ .read_pagelist = ff_layout_read_pagelist,
+ .write_pagelist = ff_layout_write_pagelist,
+ .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
+ .encode_layoutreturn = ff_layout_encode_layoutreturn,
+};
+
+static int __init nfs4flexfilelayout_init(void)
+{
+ printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
+ __func__);
+ return pnfs_register_layoutdriver(&flexfilelayout_type);
+}
+
+static void __exit nfs4flexfilelayout_exit(void)
+{
+ printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
+ __func__);
+ pnfs_unregister_layoutdriver(&flexfilelayout_type);
+}
+
+MODULE_ALIAS("nfs-layouttype4-4");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
+
+module_init(nfs4flexfilelayout_init);
+module_exit(nfs4flexfilelayout_exit);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
new file mode 100644
index 000000000000..070f20445b2d
--- /dev/null
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -0,0 +1,155 @@
+/*
+ * NFSv4 flexfile layout driver data structures.
+ *
+ * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
+ *
+ * Tao Peng <bergwolf@primarydata.com>
+ */
+
+#ifndef FS_NFS_NFS4FLEXFILELAYOUT_H
+#define FS_NFS_NFS4FLEXFILELAYOUT_H
+
+#include "../pnfs.h"
+
+/* XXX: Let's filter out insanely large mirror count for now to avoid oom
+ * due to network error etc. */
+#define NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT 4096
+
+struct nfs4_ff_ds_version {
+ u32 version;
+ u32 minor_version;
+ u32 rsize;
+ u32 wsize;
+ bool tightly_coupled;
+};
+
+/* chained in global deviceid hlist */
+struct nfs4_ff_layout_ds {
+ struct nfs4_deviceid_node id_node;
+ u32 ds_versions_cnt;
+ struct nfs4_ff_ds_version *ds_versions;
+ struct nfs4_pnfs_ds *ds;
+};
+
+struct nfs4_ff_layout_ds_err {
+ struct list_head list; /* linked in mirror error_list */
+ u64 offset;
+ u64 length;
+ int status;
+ enum nfs_opnum4 opnum;
+ nfs4_stateid stateid;
+ struct nfs4_deviceid deviceid;
+};
+
+struct nfs4_ff_layout_mirror {
+ u32 ds_count;
+ u32 efficiency;
+ struct nfs4_ff_layout_ds *mirror_ds;
+ u32 fh_versions_cnt;
+ struct nfs_fh *fh_versions;
+ nfs4_stateid stateid;
+ struct nfs4_string user_name;
+ struct nfs4_string group_name;
+ u32 uid;
+ u32 gid;
+ struct rpc_cred *cred;
+ spinlock_t lock;
+};
+
+struct nfs4_ff_layout_segment {
+ struct pnfs_layout_segment generic_hdr;
+ u64 stripe_unit;
+ u32 mirror_array_cnt;
+ struct nfs4_ff_layout_mirror **mirror_array;
+};
+
+struct nfs4_flexfile_layout {
+ struct pnfs_layout_hdr generic_hdr;
+ struct pnfs_ds_commit_info commit_info;
+ struct list_head error_list; /* nfs4_ff_layout_ds_err */
+};
+
+static inline struct nfs4_flexfile_layout *
+FF_LAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
+{
+ return container_of(lo, struct nfs4_flexfile_layout, generic_hdr);
+}
+
+static inline struct nfs4_ff_layout_segment *
+FF_LAYOUT_LSEG(struct pnfs_layout_segment *lseg)
+{
+ return container_of(lseg,
+ struct nfs4_ff_layout_segment,
+ generic_hdr);
+}
+
+static inline struct nfs4_deviceid_node *
+FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx)
+{
+ if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt ||
+ FF_LAYOUT_LSEG(lseg)->mirror_array[idx] == NULL ||
+ FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds == NULL)
+ return NULL;
+ return &FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds->id_node;
+}
+
+static inline struct nfs4_ff_layout_ds *
+FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node)
+{
+ return container_of(node, struct nfs4_ff_layout_ds, id_node);
+}
+
+static inline struct nfs4_ff_layout_mirror *
+FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx)
+{
+ if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt)
+ return NULL;
+ return FF_LAYOUT_LSEG(lseg)->mirror_array[idx];
+}
+
+static inline u32
+FF_LAYOUT_MIRROR_COUNT(struct pnfs_layout_segment *lseg)
+{
+ return FF_LAYOUT_LSEG(lseg)->mirror_array_cnt;
+}
+
+static inline bool
+ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
+{
+ return nfs4_test_deviceid_unavailable(node);
+}
+
+static inline int
+nfs4_ff_layout_ds_version(struct pnfs_layout_segment *lseg, u32 ds_idx)
+{
+ return FF_LAYOUT_COMP(lseg, ds_idx)->mirror_ds->ds_versions[0].version;
+}
+
+struct nfs4_ff_layout_ds *
+nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ gfp_t gfp_flags);
+void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds);
+void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds);
+int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
+ struct nfs4_ff_layout_mirror *mirror, u64 offset,
+ u64 length, int status, enum nfs_opnum4 opnum,
+ gfp_t gfp_flags);
+int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
+ struct xdr_stream *xdr, int *count,
+ const struct pnfs_layout_range *range);
+struct nfs_fh *
+nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
+
+struct nfs4_pnfs_ds *
+nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ bool fail_return);
+
+struct rpc_clnt *
+nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg,
+ u32 ds_idx,
+ struct nfs_client *ds_clp,
+ struct inode *inode);
+struct rpc_cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
+ u32 ds_idx, struct rpc_cred *mdscred);
+bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
+#endif /* FS_NFS_NFS4FLEXFILELAYOUT_H */
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
new file mode 100644
index 000000000000..e2c01f204a95
--- /dev/null
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -0,0 +1,552 @@
+/*
+ * Device operations for the pnfs nfs4 file layout driver.
+ *
+ * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
+ *
+ * Tao Peng <bergwolf@primarydata.com>
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/sunrpc/addr.h>
+
+#include "../internal.h"
+#include "../nfs4session.h"
+#include "flexfilelayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
+static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+
+void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
+{
+ if (mirror_ds)
+ nfs4_put_deviceid_node(&mirror_ds->id_node);
+}
+
+void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
+{
+ nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
+ nfs4_pnfs_ds_put(mirror_ds->ds);
+ kfree(mirror_ds);
+}
+
+/* Decode opaque device data and construct new_ds using it */
+struct nfs4_ff_layout_ds *
+nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ gfp_t gfp_flags)
+{
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ struct list_head dsaddrs;
+ struct nfs4_pnfs_ds_addr *da;
+ struct nfs4_ff_layout_ds *new_ds = NULL;
+ struct nfs4_ff_ds_version *ds_versions = NULL;
+ u32 mp_count;
+ u32 version_count;
+ __be32 *p;
+ int i, ret = -ENOMEM;
+
+ /* set up xdr stream */
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ goto out_err;
+
+ new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
+ if (!new_ds)
+ goto out_scratch;
+
+ nfs4_init_deviceid_node(&new_ds->id_node,
+ server,
+ &pdev->dev_id);
+ INIT_LIST_HEAD(&dsaddrs);
+
+ xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ /* multipath count */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_drain_dsaddrs;
+ mp_count = be32_to_cpup(p);
+ dprintk("%s: multipath ds count %d\n", __func__, mp_count);
+
+ for (i = 0; i < mp_count; i++) {
+ /* multipath ds */
+ da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
+ &stream, gfp_flags);
+ if (da)
+ list_add_tail(&da->da_node, &dsaddrs);
+ }
+ if (list_empty(&dsaddrs)) {
+ dprintk("%s: no suitable DS addresses found\n",
+ __func__);
+ ret = -ENOMEDIUM;
+ goto out_err_drain_dsaddrs;
+ }
+
+ /* version count */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_drain_dsaddrs;
+ version_count = be32_to_cpup(p);
+ dprintk("%s: version count %d\n", __func__, version_count);
+
+ ds_versions = kzalloc(version_count * sizeof(struct nfs4_ff_ds_version),
+ gfp_flags);
+ if (!ds_versions)
+ goto out_scratch;
+
+ for (i = 0; i < version_count; i++) {
+ /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
+ * tightly_coupled(4) */
+ p = xdr_inline_decode(&stream, 20);
+ if (unlikely(!p))
+ goto out_err_drain_dsaddrs;
+ ds_versions[i].version = be32_to_cpup(p++);
+ ds_versions[i].minor_version = be32_to_cpup(p++);
+ ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
+ ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
+ ds_versions[i].tightly_coupled = be32_to_cpup(p);
+
+ if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
+ ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
+ if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
+ ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
+
+ if (ds_versions[i].version != 3 || ds_versions[i].minor_version != 0) {
+ dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
+ i, ds_versions[i].version,
+ ds_versions[i].minor_version);
+ ret = -EPROTONOSUPPORT;
+ goto out_err_drain_dsaddrs;
+ }
+
+ dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
+ __func__, i, ds_versions[i].version,
+ ds_versions[i].minor_version,
+ ds_versions[i].rsize,
+ ds_versions[i].wsize,
+ ds_versions[i].tightly_coupled);
+ }
+
+ new_ds->ds_versions = ds_versions;
+ new_ds->ds_versions_cnt = version_count;
+
+ new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
+ if (!new_ds->ds)
+ goto out_err_drain_dsaddrs;
+
+ /* If DS was already in cache, free ds addrs */
+ while (!list_empty(&dsaddrs)) {
+ da = list_first_entry(&dsaddrs,
+ struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+
+ __free_page(scratch);
+ return new_ds;
+
+out_err_drain_dsaddrs:
+ while (!list_empty(&dsaddrs)) {
+ da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+
+ kfree(ds_versions);
+out_scratch:
+ __free_page(scratch);
+out_err:
+ kfree(new_ds);
+
+ dprintk("%s ERROR: returning %d\n", __func__, ret);
+ return NULL;
+}
+
+static u64
+end_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ end = start + len;
+ return end >= start ? end : NFS4_MAX_UINT64;
+}
+
+static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
+ u64 offset, u64 length)
+{
+ u64 end;
+
+ end = max_t(u64, end_offset(err->offset, err->length),
+ end_offset(offset, length));
+ err->offset = min_t(u64, err->offset, offset);
+ err->length = end - err->offset;
+}
+
+static bool ds_error_can_merge(struct nfs4_ff_layout_ds_err *err, u64 offset,
+ u64 length, int status, enum nfs_opnum4 opnum,
+ nfs4_stateid *stateid,
+ struct nfs4_deviceid *deviceid)
+{
+ return err->status == status && err->opnum == opnum &&
+ nfs4_stateid_match(&err->stateid, stateid) &&
+ !memcmp(&err->deviceid, deviceid, sizeof(*deviceid)) &&
+ end_offset(err->offset, err->length) >= offset &&
+ err->offset <= end_offset(offset, length);
+}
+
+static bool merge_ds_error(struct nfs4_ff_layout_ds_err *old,
+ struct nfs4_ff_layout_ds_err *new)
+{
+ if (!ds_error_can_merge(old, new->offset, new->length, new->status,
+ new->opnum, &new->stateid, &new->deviceid))
+ return false;
+
+ extend_ds_error(old, new->offset, new->length);
+ return true;
+}
+
+static bool
+ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
+ struct nfs4_ff_layout_ds_err *dserr)
+{
+ struct nfs4_ff_layout_ds_err *err;
+
+ list_for_each_entry(err, &flo->error_list, list) {
+ if (merge_ds_error(err, dserr)) {
+ return true;
+ }
+ }
+
+ list_add(&dserr->list, &flo->error_list);
+ return false;
+}
+
+static bool
+ff_layout_update_ds_error(struct nfs4_flexfile_layout *flo, u64 offset,
+ u64 length, int status, enum nfs_opnum4 opnum,
+ nfs4_stateid *stateid, struct nfs4_deviceid *deviceid)
+{
+ bool found = false;
+ struct nfs4_ff_layout_ds_err *err;
+
+ list_for_each_entry(err, &flo->error_list, list) {
+ if (ds_error_can_merge(err, offset, length, status, opnum,
+ stateid, deviceid)) {
+ found = true;
+ extend_ds_error(err, offset, length);
+ break;
+ }
+ }
+
+ return found;
+}
+
+int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
+ struct nfs4_ff_layout_mirror *mirror, u64 offset,
+ u64 length, int status, enum nfs_opnum4 opnum,
+ gfp_t gfp_flags)
+{
+ struct nfs4_ff_layout_ds_err *dserr;
+ bool needfree;
+
+ if (status == 0)
+ return 0;
+
+ if (mirror->mirror_ds == NULL)
+ return -EINVAL;
+
+ spin_lock(&flo->generic_hdr.plh_inode->i_lock);
+ if (ff_layout_update_ds_error(flo, offset, length, status, opnum,
+ &mirror->stateid,
+ &mirror->mirror_ds->id_node.deviceid)) {
+ spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
+ return 0;
+ }
+ spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
+ dserr = kmalloc(sizeof(*dserr), gfp_flags);
+ if (!dserr)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dserr->list);
+ dserr->offset = offset;
+ dserr->length = length;
+ dserr->status = status;
+ dserr->opnum = opnum;
+ nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
+ memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
+ NFS4_DEVICEID4_SIZE);
+
+ spin_lock(&flo->generic_hdr.plh_inode->i_lock);
+ needfree = ff_layout_add_ds_error_locked(flo, dserr);
+ spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
+ if (needfree)
+ kfree(dserr);
+
+ return 0;
+}
+
+/* currently we only support AUTH_NONE and AUTH_SYS */
+static rpc_authflavor_t
+nfs4_ff_layout_choose_authflavor(struct nfs4_ff_layout_mirror *mirror)
+{
+ if (mirror->uid == (u32)-1)
+ return RPC_AUTH_NULL;
+ return RPC_AUTH_UNIX;
+}
+
+/* fetch cred for NFSv3 DS */
+static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
+ struct nfs4_pnfs_ds *ds)
+{
+ if (ds->ds_clp && !mirror->cred &&
+ mirror->mirror_ds->ds_versions[0].version == 3) {
+ struct rpc_auth *auth = ds->ds_clp->cl_rpcclient->cl_auth;
+ struct rpc_cred *cred;
+ struct auth_cred acred = {
+ .uid = make_kuid(&init_user_ns, mirror->uid),
+ .gid = make_kgid(&init_user_ns, mirror->gid),
+ };
+
+ /* AUTH_NULL ignores acred */
+ cred = auth->au_ops->lookup_cred(auth, &acred, 0);
+ if (IS_ERR(cred)) {
+ dprintk("%s: lookup_cred failed with %ld\n",
+ __func__, PTR_ERR(cred));
+ return PTR_ERR(cred);
+ } else {
+ mirror->cred = cred;
+ }
+ }
+ return 0;
+}
+
+struct nfs_fh *
+nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
+ struct nfs_fh *fh = NULL;
+ struct nfs4_deviceid_node *devid;
+
+ if (mirror == NULL || mirror->mirror_ds == NULL ||
+ mirror->mirror_ds->ds == NULL) {
+ printk(KERN_ERR "NFS: %s: No data server for mirror offset index %d\n",
+ __func__, mirror_idx);
+ if (mirror && mirror->mirror_ds) {
+ devid = &mirror->mirror_ds->id_node;
+ pnfs_generic_mark_devid_invalid(devid);
+ }
+ goto out;
+ }
+
+ /* FIXME: For now assume there is only 1 version available for the DS */
+ fh = &mirror->fh_versions[0];
+out:
+ return fh;
+}
+
+/* Upon return, either ds is connected, or ds is NULL */
+struct nfs4_pnfs_ds *
+nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ bool fail_return)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
+ struct nfs4_pnfs_ds *ds = NULL;
+ struct nfs4_deviceid_node *devid;
+ struct inode *ino = lseg->pls_layout->plh_inode;
+ struct nfs_server *s = NFS_SERVER(ino);
+ unsigned int max_payload;
+ rpc_authflavor_t flavor;
+
+ if (mirror == NULL || mirror->mirror_ds == NULL ||
+ mirror->mirror_ds->ds == NULL) {
+ printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
+ __func__, ds_idx);
+ if (mirror && mirror->mirror_ds) {
+ devid = &mirror->mirror_ds->id_node;
+ pnfs_generic_mark_devid_invalid(devid);
+ }
+ goto out;
+ }
+
+ devid = &mirror->mirror_ds->id_node;
+ if (ff_layout_test_devid_unavailable(devid))
+ goto out;
+
+ ds = mirror->mirror_ds->ds;
+ /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
+ smp_rmb();
+ if (ds->ds_clp)
+ goto out;
+
+ flavor = nfs4_ff_layout_choose_authflavor(mirror);
+
+ /* FIXME: For now we assume the server sent only one version of NFS
+ * to use for the DS.
+ */
+ nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+ dataserver_retrans,
+ mirror->mirror_ds->ds_versions[0].version,
+ mirror->mirror_ds->ds_versions[0].minor_version,
+ flavor);
+
+ /* connect success, check rsize/wsize limit */
+ if (ds->ds_clp) {
+ max_payload =
+ nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
+ NULL);
+ if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
+ mirror->mirror_ds->ds_versions[0].rsize = max_payload;
+ if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
+ mirror->mirror_ds->ds_versions[0].wsize = max_payload;
+ } else {
+ ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+ mirror, lseg->pls_range.offset,
+ lseg->pls_range.length, NFS4ERR_NXIO,
+ OP_ILLEGAL, GFP_NOIO);
+ if (fail_return) {
+ pnfs_error_mark_layout_for_return(ino, lseg);
+ if (ff_layout_has_available_ds(lseg))
+ pnfs_set_retry_layoutget(lseg->pls_layout);
+ else
+ pnfs_clear_retry_layoutget(lseg->pls_layout);
+
+ } else {
+ if (ff_layout_has_available_ds(lseg))
+ set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &lseg->pls_layout->plh_flags);
+ else {
+ pnfs_error_mark_layout_for_return(ino, lseg);
+ pnfs_clear_retry_layoutget(lseg->pls_layout);
+ }
+ }
+ }
+
+ if (ff_layout_update_mirror_cred(mirror, ds))
+ ds = NULL;
+out:
+ return ds;
+}
+
+struct rpc_cred *
+ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ struct rpc_cred *mdscred)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
+ struct rpc_cred *cred = ERR_PTR(-EINVAL);
+
+ if (!nfs4_ff_layout_prepare_ds(lseg, ds_idx, true))
+ goto out;
+
+ if (mirror && mirror->cred)
+ cred = mirror->cred;
+ else
+ cred = mdscred;
+out:
+ return cred;
+}
+
+/**
+* Find or create a DS rpc client with th MDS server rpc client auth flavor
+* in the nfs_client cl_ds_clients list.
+*/
+struct rpc_clnt *
+nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ struct nfs_client *ds_clp, struct inode *inode)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
+
+ switch (mirror->mirror_ds->ds_versions[0].version) {
+ case 3:
+ /* For NFSv3 DS, flavor is set when creating DS connections */
+ return ds_clp->cl_rpcclient;
+ case 4:
+ return nfs4_find_or_create_ds_client(ds_clp, inode);
+ default:
+ BUG();
+ }
+}
+
+static bool is_range_intersecting(u64 offset1, u64 length1,
+ u64 offset2, u64 length2)
+{
+ u64 end1 = end_offset(offset1, length1);
+ u64 end2 = end_offset(offset2, length2);
+
+ return (end1 == NFS4_MAX_UINT64 || end1 > offset2) &&
+ (end2 == NFS4_MAX_UINT64 || end2 > offset1);
+}
+
+/* called with inode i_lock held */
+int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
+ struct xdr_stream *xdr, int *count,
+ const struct pnfs_layout_range *range)
+{
+ struct nfs4_ff_layout_ds_err *err, *n;
+ __be32 *p;
+
+ list_for_each_entry_safe(err, n, &flo->error_list, list) {
+ if (!is_range_intersecting(err->offset, err->length,
+ range->offset, range->length))
+ continue;
+ /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
+ * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4)
+ */
+ p = xdr_reserve_space(xdr,
+ 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ p = xdr_encode_hyper(p, err->offset);
+ p = xdr_encode_hyper(p, err->length);
+ p = xdr_encode_opaque_fixed(p, &err->stateid,
+ NFS4_STATEID_SIZE);
+ p = xdr_encode_opaque_fixed(p, &err->deviceid,
+ NFS4_DEVICEID4_SIZE);
+ *p++ = cpu_to_be32(err->status);
+ *p++ = cpu_to_be32(err->opnum);
+ *count += 1;
+ list_del(&err->list);
+ dprintk("%s: offset %llu length %llu status %d op %d count %d\n",
+ __func__, err->offset, err->length, err->status,
+ err->opnum, *count);
+ kfree(err);
+ }
+
+ return 0;
+}
+
+bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_ff_layout_mirror *mirror;
+ struct nfs4_deviceid_node *devid;
+ int idx;
+
+ for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ if (mirror && mirror->mirror_ds) {
+ devid = &mirror->mirror_ds->id_node;
+ if (!ff_layout_test_devid_unavailable(devid))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+module_param(dataserver_retrans, uint, 0644);
+MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
+ "retries a request before it attempts further "
+ " recovery action.");
+module_param(dataserver_timeo, uint, 0644);
+MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
+ "NFSv4.1 client waits for a response from a "
+ " data server before it retries an NFS request.");
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 2f5db844c172..857e2a99acc8 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -152,7 +152,7 @@ void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *f
nfs_fattr_free_group_name(fattr);
}
-static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
+int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
{
unsigned long val;
char buf[16];
@@ -166,6 +166,7 @@ static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *re
*res = val;
return 1;
}
+EXPORT_SYMBOL_GPL(nfs_map_string_to_numeric);
static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 4bffe637ea32..e4f0dcef8f54 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
nfs_attr_check_mountpoint(sb, fattr);
- if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
- !nfs_attr_use_mounted_on_fileid(fattr))
+ if (nfs_attr_use_mounted_on_fileid(fattr))
+ fattr->fileid = fattr->mounted_on_fileid;
+ else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
goto out_no_inode;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
goto out_no_inode;
@@ -387,7 +388,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
if (S_ISREG(inode->i_mode)) {
inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
inode->i_data.a_ops = &nfs_file_aops;
- inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
inode->i_fop = &nfs_dir_operations;
@@ -506,10 +506,15 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_valid &= ~ATTR_MODE;
if (attr->ia_valid & ATTR_SIZE) {
+ loff_t i_size;
+
BUG_ON(!S_ISREG(inode->i_mode));
- if (attr->ia_size == i_size_read(inode))
+ i_size = i_size_read(inode);
+ if (attr->ia_size == i_size)
attr->ia_valid &= ~ATTR_SIZE;
+ else if (attr->ia_size < i_size && IS_SWAPFILE(inode))
+ return -ETXTBSY;
}
/* Optimization: if the end result is no change, don't RPC */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index efaa31c70fbe..212b8c883d22 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -6,6 +6,7 @@
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/crc32.h>
+#include <linux/nfs_page.h>
#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
@@ -31,8 +32,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
(((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
return 0;
-
- fattr->fileid = fattr->mounted_on_fileid;
return 1;
}
@@ -189,9 +188,15 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
const struct sockaddr *ds_addr,
int ds_addrlen, int ds_proto,
unsigned int ds_timeo,
- unsigned int ds_retrans);
+ unsigned int ds_retrans,
+ u32 minor_version,
+ rpc_authflavor_t au_flavor);
extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *,
struct inode *);
+extern struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp,
+ const struct sockaddr *ds_addr, int ds_addrlen,
+ int ds_proto, unsigned int ds_timeo,
+ unsigned int ds_retrans, rpc_authflavor_t au_flavor);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
@@ -244,9 +249,12 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
-int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_header *,
- const struct rpc_call_ops *, int, int);
+int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
+ struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
+ const struct rpc_call_ops *call_ops, int how, int flags);
void nfs_free_request(struct nfs_page *req);
+struct nfs_pgio_mirror *
+nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc);
static inline void nfs_iocounter_init(struct nfs_io_counter *c)
{
@@ -254,6 +262,12 @@ static inline void nfs_iocounter_init(struct nfs_io_counter *c)
atomic_set(&c->io_count, 0);
}
+static inline bool nfs_pgio_has_mirroring(struct nfs_pageio_descriptor *desc)
+{
+ WARN_ON_ONCE(desc->pg_mirror_count < 1);
+ return desc->pg_mirror_count > 1;
+}
+
/* nfs2xdr.c */
extern struct rpc_procinfo nfs_procedures[];
extern int nfs2_decode_dirent(struct xdr_stream *,
@@ -377,7 +391,7 @@ extern struct rpc_stat nfs_rpcstat;
extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
-extern void nfs_sb_active(struct super_block *sb);
+extern bool nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
/* namespace.c */
@@ -416,7 +430,6 @@ int nfs_show_options(struct seq_file *, struct dentry *);
int nfs_show_devname(struct seq_file *, struct dentry *);
int nfs_show_path(struct seq_file *, struct dentry *);
int nfs_show_stats(struct seq_file *, struct dentry *);
-void nfs_put_super(struct super_block *);
int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
/* write.c */
@@ -429,6 +442,7 @@ extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
extern int nfs_initiate_commit(struct rpc_clnt *clnt,
struct nfs_commit_data *data,
+ const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
int how, int flags);
extern void nfs_init_commit(struct nfs_commit_data *data,
@@ -442,13 +456,15 @@ int nfs_scan_commit(struct inode *inode, struct list_head *dst,
struct nfs_commit_info *cinfo);
void nfs_mark_request_commit(struct nfs_page *req,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo);
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx);
int nfs_write_need_commit(struct nfs_pgio_header *);
int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
int how, struct nfs_commit_info *cinfo);
void nfs_retry_commit(struct list_head *page_list,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo);
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx);
void nfs_commitdata_release(struct nfs_commit_data *data);
void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
struct nfs_commit_info *cinfo);
@@ -459,6 +475,7 @@ void nfs_init_cinfo(struct nfs_commit_info *cinfo,
struct nfs_direct_req *dreq);
int nfs_key_timeout_notify(struct file *filp, struct inode *inode);
bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx);
+void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio);
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
@@ -482,6 +499,7 @@ static inline void nfs_inode_dio_wait(struct inode *inode)
inode_dio_wait(inode);
}
extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
+extern void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq);
/* nfs4proc.c */
extern void __nfs4_read_done_cb(struct nfs_pgio_header *);
@@ -495,6 +513,26 @@ extern int nfs41_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
struct rpc_cred *cred);
+static inline struct inode *nfs_igrab_and_active(struct inode *inode)
+{
+ inode = igrab(inode);
+ if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
+ iput(inode);
+ inode = NULL;
+ }
+ return inode;
+}
+
+static inline void nfs_iput_and_deactive(struct inode *inode)
+{
+ if (inode != NULL) {
+ struct super_block *sb = inode->i_sb;
+
+ iput(inode);
+ nfs_sb_deactive(sb);
+ }
+}
+
/*
* Determine the device name as a string
*/
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5f61b83f4a1c..b4e03ed8599d 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -481,7 +481,8 @@ out_overflow:
* void;
* };
*/
-static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
+static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result,
+ __u32 *op_status)
{
enum nfs_stat status;
int error;
@@ -489,6 +490,8 @@ static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
error = decode_stat(xdr, &status);
if (unlikely(error))
goto out;
+ if (op_status)
+ *op_status = status;
if (status != NFS_OK)
goto out_default;
error = decode_fattr(xdr, result);
@@ -808,7 +811,7 @@ out_default:
static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_fattr *result)
{
- return decode_attrstat(xdr, result);
+ return decode_attrstat(xdr, result, NULL);
}
static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -865,6 +868,7 @@ static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
error = decode_stat(xdr, &status);
if (unlikely(error))
goto out;
+ result->op_status = status;
if (status != NFS_OK)
goto out_default;
error = decode_fattr(xdr, result->fattr);
@@ -882,7 +886,7 @@ static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
{
/* All NFSv2 writes are "file sync" writes */
result->verf->committed = NFS_FILE_SYNC;
- return decode_attrstat(xdr, result->fattr);
+ return decode_attrstat(xdr, result->fattr, &result->op_status);
}
/**
diff --git a/fs/nfs/nfs3_fs.h b/fs/nfs/nfs3_fs.h
index 333ae4068506..e134d6548ab7 100644
--- a/fs/nfs/nfs3_fs.h
+++ b/fs/nfs/nfs3_fs.h
@@ -30,5 +30,7 @@ struct nfs_server *nfs3_create_server(struct nfs_mount_info *, struct nfs_subver
struct nfs_server *nfs3_clone_server(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
+/* nfs3super.c */
+extern struct nfs_subversion nfs_v3;
#endif /* __LINUX_FS_NFS_NFS3_FS_H */
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 8c1b437c5403..9e9fa347a948 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -1,5 +1,6 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
+#include <linux/sunrpc/addr.h>
#include "internal.h"
#include "nfs3_fs.h"
@@ -64,3 +65,43 @@ struct nfs_server *nfs3_clone_server(struct nfs_server *source,
nfs_init_server_aclclient(server);
return server;
}
+
+/*
+ * Set up a pNFS Data Server client over NFSv3.
+ *
+ * Return any existing nfs_client that matches server address,port,version
+ * and minorversion.
+ *
+ * For a new nfs_client, use a soft mount (default), a low retrans and a
+ * low timeout interval so that if a connection is lost, we retry through
+ * the MDS.
+ */
+struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp,
+ const struct sockaddr *ds_addr, int ds_addrlen,
+ int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
+ rpc_authflavor_t au_flavor)
+{
+ struct nfs_client_initdata cl_init = {
+ .addr = ds_addr,
+ .addrlen = ds_addrlen,
+ .nfs_mod = &nfs_v3,
+ .proto = ds_proto,
+ .net = mds_clp->cl_net,
+ };
+ struct rpc_timeout ds_timeout;
+ struct nfs_client *clp;
+ char buf[INET6_ADDRSTRLEN + 1];
+
+ /* fake a hostname because lockd wants it */
+ if (rpc_ntop(ds_addr, buf, sizeof(buf)) <= 0)
+ return ERR_PTR(-EINVAL);
+ cl_init.hostname = buf;
+
+ /* Use the MDS nfs_client cl_ipaddr. */
+ nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
+ clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
+ au_flavor);
+
+ return clp;
+}
+EXPORT_SYMBOL_GPL(nfs3_set_ds_client);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 524f9f837408..78e557c3ab87 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -800,6 +800,9 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
struct inode *inode = hdr->inode;
+ if (hdr->pgio_done_cb != NULL)
+ return hdr->pgio_done_cb(task, hdr);
+
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
@@ -825,6 +828,9 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
struct inode *inode = hdr->inode;
+ if (hdr->pgio_done_cb != NULL)
+ return hdr->pgio_done_cb(task, hdr);
+
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
if (task->tk_status >= 0)
@@ -845,6 +851,9 @@ static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commi
static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
{
+ if (data->commit_done_cb != NULL)
+ return data->commit_done_cb(task, data);
+
if (nfs3_async_handle_jukebox(task, data->inode))
return -EAGAIN;
nfs_refresh_inode(data->inode, data->res.fattr);
diff --git a/fs/nfs/nfs3super.c b/fs/nfs/nfs3super.c
index 6af29c2da352..5c4394e4656b 100644
--- a/fs/nfs/nfs3super.c
+++ b/fs/nfs/nfs3super.c
@@ -7,7 +7,7 @@
#include "nfs3_fs.h"
#include "nfs.h"
-static struct nfs_subversion nfs_v3 = {
+struct nfs_subversion nfs_v3 = {
.owner = THIS_MODULE,
.nfs_fs = &nfs_fs_type,
.rpc_vers = &nfs_version3,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 8f4cbe7f4aa8..2a932fdc57cb 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1636,6 +1636,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
+ result->op_status = status;
if (status != NFS3_OK)
goto out_status;
error = decode_read3resok(xdr, result);
@@ -1708,6 +1709,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
error = decode_wcc_data(xdr, result->fattr);
if (unlikely(error))
goto out;
+ result->op_status = status;
if (status != NFS3_OK)
goto out_status;
error = decode_write3resok(xdr, result);
@@ -2323,6 +2325,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
error = decode_wcc_data(xdr, result->fattr);
if (unlikely(error))
goto out;
+ result->op_status = status;
if (status != NFS3_OK)
goto out_status;
error = decode_writeverf3(xdr, &result->verf->verifier);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index a08178764cf9..fdef424b0cd3 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@ enum nfs4_client_state {
#define NFS4_RENEW_TIMEOUT 0x01
#define NFS4_RENEW_DELEGATION_CB 0x02
+struct nfs_seqid_counter;
struct nfs4_minor_version_ops {
u32 minor_version;
unsigned init_caps;
@@ -56,6 +57,8 @@ struct nfs4_minor_version_ops {
struct nfs_fsinfo *);
void (*free_lock_state)(struct nfs_server *,
struct nfs4_lock_state *);
+ struct nfs_seqid *
+ (*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
const struct rpc_call_ops *call_sync_ops;
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
@@ -443,6 +446,12 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
extern void nfs_release_seqid(struct nfs_seqid *seqid);
extern void nfs_free_seqid(struct nfs_seqid *seqid);
+extern int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ struct rpc_task *task);
+extern int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res);
extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 03311259b0c4..8646af9b11d2 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
kfree(clp->cl_serverowner);
kfree(clp->cl_serverscope);
kfree(clp->cl_implid);
+ kfree(clp->cl_owner_id);
}
void nfs4_free_client(struct nfs_client *clp)
@@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep,
spin_unlock(&nn->nfs_client_lock);
}
+static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
+ const struct nfs_client *clp2)
+{
+ if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL)
+ return true;
+ return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
+}
+
/**
* nfs40_walk_client_list - Find server that recognizes a client ID
*
@@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
if (pos->rpc_ops != new->rpc_ops)
continue;
- if (pos->cl_proto != new->cl_proto)
- continue;
-
if (pos->cl_minorversion != new->cl_minorversion)
continue;
@@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
if (pos->cl_clientid != new->cl_clientid)
continue;
+ if (!nfs4_match_client_owner_id(pos, new))
+ continue;
+
atomic_inc(&pos->cl_count);
spin_unlock(&nn->nfs_client_lock);
@@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
}
/*
- * Returns true if the server owners match
+ * Returns true if the server major ids match
*/
static bool
-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
+nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
{
struct nfs41_server_owner *o1 = a->cl_serverowner;
struct nfs41_server_owner *o2 = b->cl_serverowner;
- if (o1->minor_id != o2->minor_id) {
- dprintk("NFS: --> %s server owner minor IDs do not match\n",
- __func__);
- return false;
- }
-
if (o1->major_id_sz != o2->major_id_sz)
goto out_major_mismatch;
if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
@@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
if (pos->rpc_ops != new->rpc_ops)
continue;
- if (pos->cl_proto != new->cl_proto)
- continue;
-
if (pos->cl_minorversion != new->cl_minorversion)
continue;
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
prev = pos;
status = nfs_wait_client_init_complete(pos);
- if (status == 0) {
+ if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
nfs4_schedule_lease_recovery(pos);
status = nfs4_wait_clnt_recover(pos);
}
@@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new,
if (!nfs4_match_clientids(pos, new))
continue;
- if (!nfs4_match_serverowners(pos, new))
+ /*
+ * Note that session trunking is just a special subcase of
+ * client id trunking. In either case, we want to fall back
+ * to using the existing nfs_client.
+ */
+ if (!nfs4_check_clientid_trunking(pos, new))
+ continue;
+
+ /* Unlike NFSv4.0, we know that NFSv4.1 always uses the
+ * uniform string, however someone might switch the
+ * uniquifier string on us.
+ */
+ if (!nfs4_match_client_owner_id(pos, new))
continue;
atomic_inc(&pos->cl_count);
@@ -837,14 +849,15 @@ error:
*/
struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
const struct sockaddr *ds_addr, int ds_addrlen,
- int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
+ int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
+ u32 minor_version, rpc_authflavor_t au_flavor)
{
struct nfs_client_initdata cl_init = {
.addr = ds_addr,
.addrlen = ds_addrlen,
.nfs_mod = &nfs_v4,
.proto = ds_proto,
- .minorversion = mds_clp->cl_minorversion,
+ .minorversion = minor_version,
.net = mds_clp->cl_net,
};
struct rpc_timeout ds_timeout;
@@ -862,7 +875,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
*/
nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
- mds_clp->cl_rpcclient->cl_auth->au_flavor);
+ au_flavor);
dprintk("<-- %s %p\n", __func__, clp);
return clp;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e7f8d5ff2581..2e7c9f7a6f7c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -495,12 +495,11 @@ static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
args->sa_privileged = 1;
}
-static int nfs40_setup_sequence(const struct nfs_server *server,
- struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res,
- struct rpc_task *task)
+int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ struct rpc_task *task)
{
- struct nfs4_slot_table *tbl = server->nfs_client->cl_slot_tbl;
struct nfs4_slot *slot;
/* slot already allocated? */
@@ -535,6 +534,7 @@ out_sleep:
spin_unlock(&tbl->slot_tbl_lock);
return -EAGAIN;
}
+EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
static int nfs40_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
@@ -694,8 +694,7 @@ out_retry:
}
EXPORT_SYMBOL_GPL(nfs41_sequence_done);
-static int nfs4_sequence_done(struct rpc_task *task,
- struct nfs4_sequence_res *res)
+int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
if (res->sr_slot == NULL)
return 1;
@@ -703,6 +702,7 @@ static int nfs4_sequence_done(struct rpc_task *task,
return nfs40_sequence_done(task, res);
return nfs41_sequence_done(task, res);
}
+EXPORT_SYMBOL_GPL(nfs4_sequence_done);
int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
@@ -777,7 +777,8 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
int ret = 0;
if (!session)
- return nfs40_setup_sequence(server, args, res, task);
+ return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
+ args, res, task);
dprintk("--> %s clp %p session %p sr_slot %u\n",
__func__, session->clp, session, res->sr_slot ?
@@ -818,14 +819,16 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_res *res,
struct rpc_task *task)
{
- return nfs40_setup_sequence(server, args, res, task);
+ return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
+ args, res, task);
}
-static int nfs4_sequence_done(struct rpc_task *task,
- struct nfs4_sequence_res *res)
+int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
{
return nfs40_sequence_done(task, res);
}
+EXPORT_SYMBOL_GPL(nfs4_sequence_done);
#endif /* !CONFIG_NFS_V4_1 */
@@ -937,6 +940,31 @@ static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
return true;
}
+static u32
+nfs4_map_atomic_open_share(struct nfs_server *server,
+ fmode_t fmode, int openflags)
+{
+ u32 res = 0;
+
+ switch (fmode & (FMODE_READ | FMODE_WRITE)) {
+ case FMODE_READ:
+ res = NFS4_SHARE_ACCESS_READ;
+ break;
+ case FMODE_WRITE:
+ res = NFS4_SHARE_ACCESS_WRITE;
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ res = NFS4_SHARE_ACCESS_BOTH;
+ }
+ if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
+ goto out;
+ /* Want no delegation if we're using O_DIRECT */
+ if (openflags & O_DIRECT)
+ res |= NFS4_SHARE_WANT_NO_DELEG;
+out:
+ return res;
+}
+
static enum open_claim_type4
nfs4_map_atomic_open_claim(struct nfs_server *server,
enum open_claim_type4 claim)
@@ -977,6 +1005,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct dentry *parent = dget_parent(dentry);
struct inode *dir = parent->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
struct nfs4_opendata *p;
p = kzalloc(sizeof(*p), gfp_mask);
@@ -987,8 +1016,9 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
if (IS_ERR(p->f_label))
goto err_free_p;
- p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
- if (p->o_arg.seqid == NULL)
+ alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
+ p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
+ if (IS_ERR(p->o_arg.seqid))
goto err_free_label;
nfs_sb_active(dentry->d_sb);
p->dentry = dget(dentry);
@@ -997,6 +1027,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
atomic_inc(&sp->so_count);
p->o_arg.open_flags = flags;
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
+ p->o_arg.share_access = nfs4_map_atomic_open_share(server,
+ fmode, flags);
/* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
* will return permission denied for all bits until close */
if (!(flags & O_EXCL)) {
@@ -1117,8 +1149,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
- if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
- return 0;
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
return 0;
nfs_mark_delegation_referenced(delegation);
@@ -1169,6 +1199,16 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
return false;
}
+static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
+{
+ if (state->n_wronly)
+ set_bit(NFS_O_WRONLY_STATE, &state->flags);
+ if (state->n_rdonly)
+ set_bit(NFS_O_RDONLY_STATE, &state->flags);
+ if (state->n_rdwr)
+ set_bit(NFS_O_RDWR_STATE, &state->flags);
+}
+
static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
nfs4_stateid *stateid, fmode_t fmode)
{
@@ -1187,8 +1227,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
}
if (stateid == NULL)
return;
- if (!nfs_need_update_open_stateid(state, stateid))
+ /* Handle races with OPEN */
+ if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
+ !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
+ nfs_resync_open_stateid_locked(state);
return;
+ }
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
nfs4_stateid_copy(&state->stateid, stateid);
nfs4_stateid_copy(&state->open_stateid, stateid);
@@ -1283,6 +1327,23 @@ no_delegation:
return ret;
}
+static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
+ const nfs4_stateid *stateid)
+{
+ struct nfs4_state *state = lsp->ls_state;
+ bool ret = false;
+
+ spin_lock(&state->state_lock);
+ if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
+ goto out_noupdate;
+ if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
+ goto out_noupdate;
+ nfs4_stateid_copy(&lsp->ls_stateid, stateid);
+ ret = true;
+out_noupdate:
+ spin_unlock(&state->state_lock);
+ return ret;
+}
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
{
@@ -1681,8 +1742,8 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
- nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
- &data->c_res.seq_res, task);
+ nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
+ &data->c_arg.seq_args, &data->c_res.seq_res, task);
}
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
@@ -2589,6 +2650,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_EXPIRED:
+ if (!nfs4_stateid_match(&calldata->arg.stateid,
+ &state->stateid)) {
+ rpc_restart_call_prepare(task);
+ goto out_release;
+ }
if (calldata->arg.fmode == 0)
break;
default:
@@ -2621,6 +2687,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
+ nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid);
/* Calculate the change in open mode */
calldata->arg.fmode = 0;
if (state->n_rdwr == 0) {
@@ -2655,6 +2722,9 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
goto out_wait;
}
}
+ calldata->arg.share_access =
+ nfs4_map_atomic_open_share(NFS_SERVER(inode),
+ calldata->arg.fmode, 0);
nfs_fattr_init(calldata->res.fattr);
calldata->timestamp = jiffies;
@@ -2677,45 +2747,10 @@ static const struct rpc_call_ops nfs4_close_ops = {
.rpc_release = nfs4_free_closedata,
};
-static bool nfs4_state_has_opener(struct nfs4_state *state)
-{
- /* first check existing openers */
- if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 &&
- state->n_rdonly != 0)
- return true;
-
- if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 &&
- state->n_wronly != 0)
- return true;
-
- if (test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 &&
- state->n_rdwr != 0)
- return true;
-
- return false;
-}
-
static bool nfs4_roc(struct inode *inode)
{
- struct nfs_inode *nfsi = NFS_I(inode);
- struct nfs_open_context *ctx;
- struct nfs4_state *state;
-
- spin_lock(&inode->i_lock);
- list_for_each_entry(ctx, &nfsi->open_files, list) {
- state = ctx->state;
- if (state == NULL)
- continue;
- if (nfs4_state_has_opener(state)) {
- spin_unlock(&inode->i_lock);
- return false;
- }
- }
- spin_unlock(&inode->i_lock);
-
- if (nfs4_check_delegation(inode, FMODE_READ))
+ if (!nfs_have_layout(inode))
return false;
-
return pnfs_roc(inode);
}
@@ -2733,6 +2768,7 @@ static bool nfs4_roc(struct inode *inode)
int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
{
struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
struct nfs4_closedata *calldata;
struct nfs4_state_owner *sp = state->owner;
struct rpc_task *task;
@@ -2759,10 +2795,10 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
- calldata->arg.stateid = &state->open_stateid;
/* Serialization for the sequence id */
- calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
- if (calldata->arg.seqid == NULL)
+ alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
+ calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
+ if (IS_ERR(calldata->arg.seqid))
goto out_free_calldata;
calldata->arg.fmode = 0;
calldata->arg.bitmask = server->cache_consistency_bitmask;
@@ -4917,11 +4953,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
}
static unsigned int
-nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
+nfs4_init_nonuniform_client_string(struct nfs_client *clp,
char *buf, size_t len)
{
unsigned int result;
+ if (clp->cl_owner_id != NULL)
+ return strlcpy(buf, clp->cl_owner_id, len);
+
rcu_read_lock();
result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
clp->cl_ipaddr,
@@ -4930,24 +4969,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_PROTO));
rcu_read_unlock();
+ clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
return result;
}
static unsigned int
-nfs4_init_uniform_client_string(const struct nfs_client *clp,
+nfs4_init_uniform_client_string(struct nfs_client *clp,
char *buf, size_t len)
{
const char *nodename = clp->cl_rpcclient->cl_nodename;
+ unsigned int result;
+
+ if (clp->cl_owner_id != NULL)
+ return strlcpy(buf, clp->cl_owner_id, len);
if (nfs4_client_id_uniquifier[0] != '\0')
- return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
+ result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
clp->rpc_ops->version,
clp->cl_minorversion,
nfs4_client_id_uniquifier,
nodename);
- return scnprintf(buf, len, "Linux NFSv%u.%u %s",
+ else
+ result = scnprintf(buf, len, "Linux NFSv%u.%u %s",
clp->rpc_ops->version, clp->cl_minorversion,
nodename);
+ clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
+ return result;
}
/*
@@ -5128,9 +5175,13 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
static void nfs4_delegreturn_release(void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
+ struct inode *inode = data->inode;
- if (data->roc)
- pnfs_roc_release(data->inode);
+ if (inode) {
+ if (data->roc)
+ pnfs_roc_release(inode);
+ nfs_iput_and_deactive(inode);
+ }
kfree(calldata);
}
@@ -5187,9 +5238,9 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
- data->inode = inode;
- data->roc = list_empty(&NFS_I(inode)->open_files) ?
- pnfs_roc(inode) : false;
+ data->inode = nfs_igrab_and_active(inode);
+ if (data->inode)
+ data->roc = nfs4_roc(inode);
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
@@ -5344,7 +5395,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
- p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
@@ -5371,14 +5421,18 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
return;
switch (task->tk_status) {
case 0:
- nfs4_stateid_copy(&calldata->lsp->ls_stateid,
- &calldata->res.stateid);
renew_lease(calldata->server, calldata->timestamp);
- break;
+ do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
+ if (nfs4_update_lock_stateid(calldata->lsp,
+ &calldata->res.stateid))
+ break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
+ if (!nfs4_stateid_match(&calldata->arg.stateid,
+ &calldata->lsp->ls_stateid))
+ rpc_restart_call_prepare(task);
break;
default:
if (nfs4_async_handle_error(task, calldata->server,
@@ -5394,6 +5448,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
goto out_wait;
+ nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
goto out_no_action;
@@ -5464,6 +5519,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
struct nfs_seqid *seqid;
struct nfs4_lock_state *lsp;
struct rpc_task *task;
+ struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
int status = 0;
unsigned char fl_flags = request->fl_flags;
@@ -5487,9 +5543,10 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
lsp = request->fl_u.nfs4_fl.owner;
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
goto out;
- seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
+ alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
+ seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
- if (seqid == NULL)
+ if (IS_ERR(seqid))
goto out;
task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
status = PTR_ERR(task);
@@ -5522,6 +5579,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
@@ -5530,12 +5588,12 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
- if (p->arg.open_seqid == NULL)
+ if (IS_ERR(p->arg.open_seqid))
goto out_free;
- p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
- if (p->arg.lock_seqid == NULL)
+ alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
+ p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
+ if (IS_ERR(p->arg.lock_seqid))
goto out_free_seqid;
- p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
p->arg.lock_owner.s_dev = server->s_dev;
@@ -5562,15 +5620,19 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
goto out_wait;
/* Do we need to do an open_to_lock_owner? */
- if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
+ if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
goto out_release_lock_seqid;
}
- data->arg.open_stateid = &state->open_stateid;
+ nfs4_stateid_copy(&data->arg.open_stateid,
+ &state->open_stateid);
data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid;
- } else
+ } else {
data->arg.new_lock_owner = 0;
+ nfs4_stateid_copy(&data->arg.lock_stateid,
+ &data->lsp->ls_stateid);
+ }
if (!nfs4_valid_open_stateid(state)) {
data->rpc_status = -EBADF;
task->tk_action = NULL;
@@ -5594,6 +5656,7 @@ out_wait:
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
+ struct nfs4_lock_state *lsp = data->lsp;
dprintk("%s: begin!\n", __func__);
@@ -5601,18 +5664,36 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
return;
data->rpc_status = task->tk_status;
- if (data->arg.new_lock_owner != 0) {
- if (data->rpc_status == 0)
- nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
- else
- goto out;
- }
- if (data->rpc_status == 0) {
- nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
- set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags);
- renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
+ switch (task->tk_status) {
+ case 0:
+ renew_lease(NFS_SERVER(data->ctx->dentry->d_inode),
+ data->timestamp);
+ if (data->arg.new_lock) {
+ data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
+ if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
+ rpc_restart_call_prepare(task);
+ break;
+ }
+ }
+ if (data->arg.new_lock_owner != 0) {
+ nfs_confirm_seqid(&lsp->ls_seqid, 0);
+ nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
+ set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
+ } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
+ rpc_restart_call_prepare(task);
+ break;
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ if (data->arg.new_lock_owner != 0) {
+ if (!nfs4_stateid_match(&data->arg.open_stateid,
+ &lsp->ls_state->open_stateid))
+ rpc_restart_call_prepare(task);
+ } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
+ &lsp->ls_stateid))
+ rpc_restart_call_prepare(task);
}
-out:
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
}
@@ -5693,7 +5774,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
if (recovery_type == NFS_LOCK_RECLAIM)
data->arg.reclaim = NFS_LOCK_RECLAIM;
nfs4_set_sequence_privileged(&data->arg.seq_args);
- }
+ } else
+ data->arg.new_lock = 1;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -5817,10 +5899,8 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
- struct nfs4_state_owner *sp = state->owner;
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
- unsigned int seq;
int status = -ENOLCK;
if ((fl_flags & FL_POSIX) &&
@@ -5840,25 +5920,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
/* ...but avoid races with delegation recall... */
request->fl_flags = fl_flags & ~FL_SLEEP;
status = do_vfs_lock(request->fl_file, request);
- goto out_unlock;
- }
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
- up_read(&nfsi->rwsem);
- status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
- if (status != 0)
+ up_read(&nfsi->rwsem);
goto out;
- down_read(&nfsi->rwsem);
- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) {
- status = -NFS4ERR_DELAY;
- goto out_unlock;
}
- /* Note: we always want to sleep here! */
- request->fl_flags = fl_flags | FL_SLEEP;
- if (do_vfs_lock(request->fl_file, request) < 0)
- printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
- "manager!\n", __func__);
-out_unlock:
up_read(&nfsi->rwsem);
+ status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
out:
request->fl_flags = fl_flags;
return status;
@@ -5965,8 +6031,8 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata
{
struct nfs_release_lockowner_data *data = calldata;
struct nfs_server *server = data->server;
- nfs40_setup_sequence(server, &data->args.seq_args,
- &data->res.seq_res, task);
+ nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
+ &data->args.seq_args, &data->res.seq_res, task);
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
data->timestamp = jiffies;
}
@@ -7528,6 +7594,7 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
return;
if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
NFS_I(lgp->args.inode)->layout,
+ &lgp->args.range,
lgp->args.ctx->state)) {
rpc_exit(task, NFS4_OK);
}
@@ -7783,9 +7850,13 @@ static void nfs4_layoutreturn_release(void *calldata)
spin_lock(&lo->plh_inode->i_lock);
if (lrp->res.lrs_present)
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+ pnfs_clear_layoutreturn_waitbit(lo);
+ clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
+ rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
pnfs_put_layout_hdr(lrp->args.layout);
+ nfs_iput_and_deactive(lrp->inode);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
@@ -7796,7 +7867,7 @@ static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
.rpc_release = nfs4_layoutreturn_release,
};
-int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
+int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
{
struct rpc_task *task;
struct rpc_message msg = {
@@ -7811,14 +7882,23 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
.callback_ops = &nfs4_layoutreturn_call_ops,
.callback_data = lrp,
};
- int status;
+ int status = 0;
dprintk("--> %s\n", __func__);
+ if (!sync) {
+ lrp->inode = nfs_igrab_and_active(lrp->args.inode);
+ if (!lrp->inode) {
+ nfs4_layoutreturn_release(lrp);
+ return -EAGAIN;
+ }
+ task_setup_data.flags |= RPC_TASK_ASYNC;
+ }
nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
- status = task->tk_status;
+ if (sync)
+ status = task->tk_status;
trace_nfs4_layoutreturn(lrp->args.inode, status);
dprintk("<-- %s status=%d\n", __func__, status);
rpc_put_task(task);
@@ -7912,6 +7992,7 @@ static void nfs4_layoutcommit_release(void *calldata)
nfs_post_op_update_inode_force_wcc(data->args.inode,
data->res.fattr);
put_rpccred(data->cred);
+ nfs_iput_and_deactive(data->inode);
kfree(data);
}
@@ -7936,7 +8017,6 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
.rpc_message = &msg,
.callback_ops = &nfs4_layoutcommit_ops,
.callback_data = data,
- .flags = RPC_TASK_ASYNC,
};
struct rpc_task *task;
int status = 0;
@@ -7947,18 +8027,21 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
data->args.lastbytewritten,
data->args.inode->i_ino);
+ if (!sync) {
+ data->inode = nfs_igrab_and_active(data->args.inode);
+ if (data->inode == NULL) {
+ nfs4_layoutcommit_release(data);
+ return -EAGAIN;
+ }
+ task_setup_data.flags = RPC_TASK_ASYNC;
+ }
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
- if (sync == false)
- goto out;
- status = nfs4_wait_for_completion_rpc_task(task);
- if (status != 0)
- goto out;
- status = task->tk_status;
+ if (sync)
+ status = task->tk_status;
trace_nfs4_layoutcommit(data->args.inode, status);
-out:
dprintk("%s: status %d\n", __func__, status);
rpc_put_task(task);
return status;
@@ -8386,6 +8469,7 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.match_stateid = nfs4_match_stateid,
.find_root_sec = nfs4_find_root_sec,
.free_lock_state = nfs4_release_lockowner,
+ .alloc_seqid = nfs_alloc_seqid,
.call_sync_ops = &nfs40_call_sync_ops,
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
@@ -8394,6 +8478,12 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
};
#if defined(CONFIG_NFS_V4_1)
+static struct nfs_seqid *
+nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
+{
+ return NULL;
+}
+
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.minor_version = 1,
.init_caps = NFS_CAP_READDIRPLUS
@@ -8407,6 +8497,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.match_stateid = nfs41_match_stateid,
.find_root_sec = nfs41_find_root_sec,
.free_lock_state = nfs41_free_lock_state,
+ .alloc_seqid = nfs_alloc_no_seqid,
.call_sync_ops = &nfs41_call_sync_ops,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
@@ -8433,6 +8524,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
.find_root_sec = nfs41_find_root_sec,
.free_lock_state = nfs41_free_lock_state,
.call_sync_ops = &nfs41_call_sync_ops,
+ .alloc_seqid = nfs_alloc_no_seqid,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
.state_renewal_ops = &nfs41_state_renewal_ops,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5194933ed419..5ad908e9ce9c 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1003,11 +1003,11 @@ struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_m
struct nfs_seqid *new;
new = kmalloc(sizeof(*new), gfp_mask);
- if (new != NULL) {
- new->sequence = counter;
- INIT_LIST_HEAD(&new->list);
- new->task = NULL;
- }
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ new->sequence = counter;
+ INIT_LIST_HEAD(&new->list);
+ new->task = NULL;
return new;
}
@@ -1015,7 +1015,7 @@ void nfs_release_seqid(struct nfs_seqid *seqid)
{
struct nfs_seqid_counter *sequence;
- if (list_empty(&seqid->list))
+ if (seqid == NULL || list_empty(&seqid->list))
return;
sequence = seqid->sequence;
spin_lock(&sequence->lock);
@@ -1071,13 +1071,15 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
{
- struct nfs4_state_owner *sp = container_of(seqid->sequence,
- struct nfs4_state_owner, so_seqid);
- struct nfs_server *server = sp->so_server;
+ struct nfs4_state_owner *sp;
+
+ if (seqid == NULL)
+ return;
+ sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
if (status == -NFS4ERR_BAD_SEQID)
nfs4_drop_state_owner(sp);
- if (!nfs4_has_session(server->nfs_client))
+ if (!nfs4_has_session(sp->so_server->nfs_client))
nfs_increment_seqid(status, seqid);
}
@@ -1088,14 +1090,18 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
*/
void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
{
- nfs_increment_seqid(status, seqid);
+ if (seqid != NULL)
+ nfs_increment_seqid(status, seqid);
}
int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
{
- struct nfs_seqid_counter *sequence = seqid->sequence;
+ struct nfs_seqid_counter *sequence;
int status = 0;
+ if (seqid == NULL)
+ goto out;
+ sequence = seqid->sequence;
spin_lock(&sequence->lock);
seqid->task = task;
if (list_empty(&seqid->list))
@@ -1106,6 +1112,7 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
status = -EAGAIN;
unlock:
spin_unlock(&sequence->lock);
+out:
return status;
}
@@ -1366,49 +1373,55 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
struct nfs_inode *nfsi = NFS_I(inode);
struct file_lock *fl;
int status = 0;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct list_head *list;
- if (inode->i_flock == NULL)
+ if (flctx == NULL)
return 0;
+ list = &flctx->flc_posix;
+
/* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem);
- /* Protect inode->i_flock using the BKL */
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
- continue;
+ spin_lock(&flctx->flc_lock);
+restart:
+ list_for_each_entry(fl, list, fl_list) {
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
status = ops->recover_lock(state, fl);
switch (status) {
- case 0:
- break;
- case -ESTALE:
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_NO_GRACE:
- case -NFS4ERR_STALE_CLIENTID:
- case -NFS4ERR_BADSESSION:
- case -NFS4ERR_BADSLOT:
- case -NFS4ERR_BAD_HIGH_SLOT:
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
- goto out;
- default:
- printk(KERN_ERR "NFS: %s: unhandled error %d\n",
- __func__, status);
- case -ENOMEM:
- case -NFS4ERR_DENIED:
- case -NFS4ERR_RECLAIM_BAD:
- case -NFS4ERR_RECLAIM_CONFLICT:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
- status = 0;
+ case 0:
+ break;
+ case -ESTALE:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ goto out;
+ default:
+ pr_err("NFS: %s: unhandled error %d\n",
+ __func__, status);
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
+ case -NFS4ERR_RECLAIM_BAD:
+ case -NFS4ERR_RECLAIM_CONFLICT:
+ /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ status = 0;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
+ if (list == &flctx->flc_posix) {
+ list = &flctx->flc_flock;
+ goto restart;
+ }
+ spin_unlock(&flctx->flc_lock);
out:
up_write(&nfsi->rwsem);
return status;
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 6f340f02f2ba..75090feeafad 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -53,7 +53,6 @@ static const struct super_operations nfs4_sops = {
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs4_write_inode,
.drop_inode = nfs_drop_inode,
- .put_super = nfs_put_super,
.statfs = nfs_statfs,
.evict_inode = nfs4_evict_inode,
.umount_begin = nfs_umount_begin,
@@ -346,6 +345,9 @@ out:
static void __exit exit_nfs_v4(void)
{
+ /* Not called in the _init(), conditionally loaded */
+ nfs4_pnfs_v3_ds_connect_unload();
+
unregister_nfs_version(&nfs_v4);
nfs4_unregister_sysctl();
nfs_idmap_quit();
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index cb4376b78ed9..e23a0a664e12 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -946,7 +946,10 @@ static void encode_uint64(struct xdr_stream *xdr, u64 n)
static void encode_nfs4_seqid(struct xdr_stream *xdr,
const struct nfs_seqid *seqid)
{
- encode_uint32(xdr, seqid->sequence->counter);
+ if (seqid != NULL)
+ encode_uint32(xdr, seqid->sequence->counter);
+ else
+ encode_uint32(xdr, 0);
}
static void encode_compound_hdr(struct xdr_stream *xdr,
@@ -1125,7 +1128,7 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
{
encode_op_hdr(xdr, OP_CLOSE, decode_close_maxsz, hdr);
encode_nfs4_seqid(xdr, arg->seqid);
- encode_nfs4_stateid(xdr, arg->stateid);
+ encode_nfs4_stateid(xdr, &arg->stateid);
}
static void encode_commit(struct xdr_stream *xdr, const struct nfs_commitargs *args, struct compound_hdr *hdr)
@@ -1301,12 +1304,12 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
*p = cpu_to_be32(args->new_lock_owner);
if (args->new_lock_owner){
encode_nfs4_seqid(xdr, args->open_seqid);
- encode_nfs4_stateid(xdr, args->open_stateid);
+ encode_nfs4_stateid(xdr, &args->open_stateid);
encode_nfs4_seqid(xdr, args->lock_seqid);
encode_lockowner(xdr, &args->lock_owner);
}
else {
- encode_nfs4_stateid(xdr, args->lock_stateid);
+ encode_nfs4_stateid(xdr, &args->lock_stateid);
encode_nfs4_seqid(xdr, args->lock_seqid);
}
}
@@ -1330,7 +1333,7 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
encode_op_hdr(xdr, OP_LOCKU, decode_locku_maxsz, hdr);
encode_uint32(xdr, nfs4_lock_type(args->fl, 0));
encode_nfs4_seqid(xdr, args->seqid);
- encode_nfs4_stateid(xdr, args->stateid);
+ encode_nfs4_stateid(xdr, &args->stateid);
p = reserve_space(xdr, 16);
p = xdr_encode_hyper(p, args->fl->fl_start);
xdr_encode_hyper(p, nfs4_lock_length(args->fl));
@@ -1348,24 +1351,12 @@ static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struc
encode_string(xdr, name->len, name->name);
}
-static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
+static void encode_share_access(struct xdr_stream *xdr, u32 share_access)
{
__be32 *p;
p = reserve_space(xdr, 8);
- switch (fmode & (FMODE_READ|FMODE_WRITE)) {
- case FMODE_READ:
- *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ);
- break;
- case FMODE_WRITE:
- *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE);
- break;
- case FMODE_READ|FMODE_WRITE:
- *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH);
- break;
- default:
- *p++ = cpu_to_be32(0);
- }
+ *p++ = cpu_to_be32(share_access);
*p = cpu_to_be32(0); /* for linux, share_deny = 0 always */
}
@@ -1377,7 +1368,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
* owner 4 = 32
*/
encode_nfs4_seqid(xdr, arg->seqid);
- encode_share_access(xdr, arg->fmode);
+ encode_share_access(xdr, arg->share_access);
p = reserve_space(xdr, 36);
p = xdr_encode_hyper(p, arg->clientid);
*p++ = cpu_to_be32(24);
@@ -1530,9 +1521,9 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co
static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_OPEN_DOWNGRADE, decode_open_downgrade_maxsz, hdr);
- encode_nfs4_stateid(xdr, arg->stateid);
+ encode_nfs4_stateid(xdr, &arg->stateid);
encode_nfs4_seqid(xdr, arg->seqid);
- encode_share_access(xdr, arg->fmode);
+ encode_share_access(xdr, arg->share_access);
}
static void
@@ -1801,9 +1792,8 @@ static void encode_create_session(struct xdr_stream *xdr,
struct compound_hdr *hdr)
{
__be32 *p;
- char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
- uint32_t len;
struct nfs_client *clp = args->client;
+ struct rpc_clnt *clnt = clp->cl_rpcclient;
struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
u32 max_resp_sz_cached;
@@ -1814,11 +1804,8 @@ static void encode_create_session(struct xdr_stream *xdr,
max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
- len = scnprintf(machine_name, sizeof(machine_name), "%s",
- clp->cl_ipaddr);
-
encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
- p = reserve_space(xdr, 16 + 2*28 + 20 + len + 12);
+ p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
p = xdr_encode_hyper(p, clp->cl_clientid);
*p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
*p++ = cpu_to_be32(args->flags); /*flags */
@@ -1847,7 +1834,7 @@ static void encode_create_session(struct xdr_stream *xdr,
/* authsys_parms rfc1831 */
*p++ = cpu_to_be32(nn->boot_time.tv_nsec); /* stamp */
- p = xdr_encode_opaque(p, machine_name, len);
+ p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
*p++ = cpu_to_be32(0); /* UID */
*p++ = cpu_to_be32(0); /* GID */
*p = cpu_to_be32(0); /* No more gids */
@@ -2012,11 +1999,11 @@ encode_layoutreturn(struct xdr_stream *xdr,
p = reserve_space(xdr, 16);
*p++ = cpu_to_be32(0); /* reclaim. always 0 for now */
*p++ = cpu_to_be32(args->layout_type);
- *p++ = cpu_to_be32(IOMODE_ANY);
+ *p++ = cpu_to_be32(args->range.iomode);
*p = cpu_to_be32(RETURN_FILE);
p = reserve_space(xdr, 16);
- p = xdr_encode_hyper(p, 0);
- p = xdr_encode_hyper(p, NFS4_MAX_UINT64);
+ p = xdr_encode_hyper(p, args->range.offset);
+ p = xdr_encode_hyper(p, args->range.length);
spin_lock(&args->inode->i_lock);
encode_nfs4_stateid(xdr, &args->stateid);
spin_unlock(&args->inode->i_lock);
@@ -4936,20 +4923,13 @@ out_overflow:
return -EIO;
}
-static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+static int decode_rw_delegation(struct xdr_stream *xdr,
+ uint32_t delegation_type,
+ struct nfs_openres *res)
{
__be32 *p;
- uint32_t delegation_type;
int status;
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- goto out_overflow;
- delegation_type = be32_to_cpup(p);
- if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
- res->delegation_type = 0;
- return 0;
- }
status = decode_stateid(xdr, &res->delegation);
if (unlikely(status))
return status;
@@ -4973,6 +4953,52 @@ out_overflow:
return -EIO;
}
+static int decode_no_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+{
+ __be32 *p;
+ uint32_t why_no_delegation;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ why_no_delegation = be32_to_cpup(p);
+ switch (why_no_delegation) {
+ case WND4_CONTENTION:
+ case WND4_RESOURCE:
+ xdr_inline_decode(xdr, 4);
+ /* Ignore for now */
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+{
+ __be32 *p;
+ uint32_t delegation_type;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ delegation_type = be32_to_cpup(p);
+ res->delegation_type = 0;
+ switch (delegation_type) {
+ case NFS4_OPEN_DELEGATE_NONE:
+ return 0;
+ case NFS4_OPEN_DELEGATE_READ:
+ case NFS4_OPEN_DELEGATE_WRITE:
+ return decode_rw_delegation(xdr, delegation_type, res);
+ case NFS4_OPEN_DELEGATE_NONE_EXT:
+ return decode_no_delegation(xdr, res);
+ }
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
{
__be32 *p;
@@ -6567,6 +6593,7 @@ static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
int status;
status = decode_compound_hdr(xdr, &hdr);
+ res->op_status = hdr.status;
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
@@ -6592,6 +6619,7 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
int status;
status = decode_compound_hdr(xdr, &hdr);
+ res->op_status = hdr.status;
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
@@ -6621,6 +6649,7 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
int status;
status = decode_compound_hdr(xdr, &hdr);
+ res->op_status = hdr.status;
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index cd3c910d2d12..9bc9f04fb7f6 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -261,11 +261,11 @@ static int __init root_nfs_data(char *cmdline)
*/
len = snprintf(nfs_export_path, sizeof(nfs_export_path),
tmp, utsname()->nodename);
- if (len > (int)sizeof(nfs_export_path))
+ if (len >= (int)sizeof(nfs_export_path))
goto out_devnametoolong;
len = snprintf(nfs_root_device, sizeof(nfs_root_device),
"%pI4:%s", &servaddr, nfs_export_path);
- if (len > (int)sizeof(nfs_root_device))
+ if (len >= (int)sizeof(nfs_root_device))
goto out_devnametoolong;
retval = 0;
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9e5bc42180e4..24e1d7403c0b 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -537,11 +537,12 @@ int objio_write_pagelist(struct nfs_pgio_header *hdr, int how)
static size_t objio_pg_test(struct nfs_pageio_descriptor *pgio,
struct nfs_page *prev, struct nfs_page *req)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(pgio);
unsigned int size;
size = pnfs_generic_pg_test(pgio, prev, req);
- if (!size || pgio->pg_count + req->wb_bytes >
+ if (!size || mirror->pg_count + req->wb_bytes >
(unsigned long)pgio->pg_layout_private)
return 0;
@@ -607,12 +608,14 @@ static const struct nfs_pageio_ops objio_pg_read_ops = {
.pg_init = objio_init_read,
.pg_test = objio_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static const struct nfs_pageio_ops objio_pg_write_ops = {
.pg_init = objio_init_write,
.pg_test = objio_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static struct pnfs_layoutdriver_type objlayout_type = {
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 2b5e769beb16..d57190a0d533 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -42,21 +42,35 @@ static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
return p->pagevec != NULL;
}
+struct nfs_pgio_mirror *
+nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
+{
+ return nfs_pgio_has_mirroring(desc) ?
+ &desc->pg_mirrors[desc->pg_mirror_idx] :
+ &desc->pg_mirrors[0];
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
+
void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr,
void (*release)(struct nfs_pgio_header *hdr))
{
- hdr->req = nfs_list_entry(desc->pg_list.next);
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
+
+ hdr->req = nfs_list_entry(mirror->pg_list.next);
hdr->inode = desc->pg_inode;
hdr->cred = hdr->req->wb_context->cred;
hdr->io_start = req_offset(hdr->req);
- hdr->good_bytes = desc->pg_count;
+ hdr->good_bytes = mirror->pg_count;
hdr->dreq = desc->pg_dreq;
hdr->layout_private = desc->pg_layout_private;
hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr)
hdr->completion_ops->init_hdr(hdr);
+
+ hdr->pgio_mirror_idx = desc->pg_mirror_idx;
}
EXPORT_SYMBOL_GPL(nfs_pgheader_init);
@@ -480,7 +494,10 @@ nfs_wait_on_request(struct nfs_page *req)
size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
struct nfs_page *prev, struct nfs_page *req)
{
- if (desc->pg_count > desc->pg_bsize) {
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
+
+ if (mirror->pg_count > mirror->pg_bsize) {
/* should never happen */
WARN_ON_ONCE(1);
return 0;
@@ -490,11 +507,11 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
* Limit the request size so that we can still allocate a page array
* for it without upsetting the slab allocator.
*/
- if (((desc->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
+ if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
sizeof(struct page) > PAGE_SIZE)
return 0;
- return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes);
+ return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
}
EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
@@ -597,13 +614,14 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
}
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
+ struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
const struct rpc_call_ops *call_ops, int how, int flags)
{
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &hdr->args,
.rpc_resp = &hdr->res,
- .rpc_cred = hdr->cred,
+ .rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
@@ -616,7 +634,7 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
};
int ret = 0;
- hdr->rw_ops->rw_initiate(hdr, &msg, &task_setup_data, how);
+ hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
dprintk("NFS: %5u initiated pgio call "
"(req %s/%llu, %u bytes @ offset %llu)\n",
@@ -650,10 +668,18 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
+ struct nfs_pgio_mirror *mirror;
+ u32 midx;
+
set_bit(NFS_IOHDR_REDO, &hdr->flags);
nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
- desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ /* TODO: Make sure it's right to clean up all mirrors here
+ * and not just hdr->pgio_mirror_idx */
+ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+ mirror = &desc->pg_mirrors[midx];
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
+ }
return -ENOMEM;
}
@@ -670,6 +696,17 @@ static void nfs_pgio_release(void *calldata)
hdr->completion_ops->completion(hdr);
}
+static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
+ unsigned int bsize)
+{
+ INIT_LIST_HEAD(&mirror->pg_list);
+ mirror->pg_bytes_written = 0;
+ mirror->pg_count = 0;
+ mirror->pg_bsize = bsize;
+ mirror->pg_base = 0;
+ mirror->pg_recoalesce = 0;
+}
+
/**
* nfs_pageio_init - initialise a page io descriptor
* @desc: pointer to descriptor
@@ -686,13 +723,10 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
size_t bsize,
int io_flags)
{
- INIT_LIST_HEAD(&desc->pg_list);
- desc->pg_bytes_written = 0;
- desc->pg_count = 0;
- desc->pg_bsize = bsize;
- desc->pg_base = 0;
+ struct nfs_pgio_mirror *new;
+ int i;
+
desc->pg_moreio = 0;
- desc->pg_recoalesce = 0;
desc->pg_inode = inode;
desc->pg_ops = pg_ops;
desc->pg_completion_ops = compl_ops;
@@ -702,6 +736,26 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_lseg = NULL;
desc->pg_dreq = NULL;
desc->pg_layout_private = NULL;
+ desc->pg_bsize = bsize;
+
+ desc->pg_mirror_count = 1;
+ desc->pg_mirror_idx = 0;
+
+ if (pg_ops->pg_get_mirror_count) {
+ /* until we have a request, we don't have an lseg and no
+ * idea how many mirrors there will be */
+ new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
+ sizeof(struct nfs_pgio_mirror), GFP_KERNEL);
+ desc->pg_mirrors_dynamic = new;
+ desc->pg_mirrors = new;
+
+ for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
+ nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
+ } else {
+ desc->pg_mirrors_dynamic = NULL;
+ desc->pg_mirrors = desc->pg_mirrors_static;
+ nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
+ }
}
EXPORT_SYMBOL_GPL(nfs_pageio_init);
@@ -737,14 +791,16 @@ static void nfs_pgio_result(struct rpc_task *task, void *calldata)
int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_page *req;
struct page **pages,
*last_page;
- struct list_head *head = &desc->pg_list;
+ struct list_head *head = &mirror->pg_list;
struct nfs_commit_info cinfo;
unsigned int pagecount, pageused;
- pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count);
+ pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
if (!nfs_pgarray_set(&hdr->page_array, pagecount))
return nfs_pgio_error(desc, hdr);
@@ -772,7 +828,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
- nfs_pgio_rpcsetup(hdr, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
+ nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
@@ -780,23 +836,74 @@ EXPORT_SYMBOL_GPL(nfs_generic_pgio);
static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
{
+ struct nfs_pgio_mirror *mirror;
struct nfs_pgio_header *hdr;
int ret;
+ mirror = nfs_pgio_current_mirror(desc);
+
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
if (!hdr) {
- desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ /* TODO: make sure this is right with mirroring - or
+ * should it back out all mirrors? */
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
return -ENOMEM;
}
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
ret = nfs_generic_pgio(desc, hdr);
if (ret == 0)
ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
- hdr, desc->pg_rpc_callops,
+ hdr,
+ hdr->cred,
+ NFS_PROTO(hdr->inode),
+ desc->pg_rpc_callops,
desc->pg_ioflags, 0);
return ret;
}
+/*
+ * nfs_pageio_setup_mirroring - determine if mirroring is to be used
+ * by calling the pg_get_mirror_count op
+ */
+static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ int mirror_count = 1;
+
+ if (!pgio->pg_ops->pg_get_mirror_count)
+ return 0;
+
+ mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+
+ if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
+ return -EINVAL;
+
+ pgio->pg_mirror_count = mirror_count;
+
+ return 0;
+}
+
+/*
+ * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
+ */
+void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
+{
+ pgio->pg_mirror_count = 1;
+ pgio->pg_mirror_idx = 0;
+}
+
+static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
+{
+ pgio->pg_mirror_count = 1;
+ pgio->pg_mirror_idx = 0;
+ pgio->pg_mirrors = pgio->pg_mirrors_static;
+ kfree(pgio->pg_mirrors_dynamic);
+ pgio->pg_mirrors_dynamic = NULL;
+}
+
static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
const struct nfs_open_context *ctx2)
{
@@ -826,11 +933,15 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
struct nfs_pageio_descriptor *pgio)
{
size_t size;
+ struct file_lock_context *flctx;
if (prev) {
if (!nfs_match_open_context(req->wb_context, prev->wb_context))
return false;
- if (req->wb_context->dentry->d_inode->i_flock != NULL &&
+ flctx = req->wb_context->dentry->d_inode->i_flctx;
+ if (flctx != NULL &&
+ !(list_empty_careful(&flctx->flc_posix) &&
+ list_empty_careful(&flctx->flc_flock)) &&
!nfs_match_lock_context(req->wb_lock_context,
prev->wb_lock_context))
return false;
@@ -863,19 +974,22 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_page *prev = NULL;
- if (desc->pg_count != 0) {
- prev = nfs_list_entry(desc->pg_list.prev);
+
+ if (mirror->pg_count != 0) {
+ prev = nfs_list_entry(mirror->pg_list.prev);
} else {
if (desc->pg_ops->pg_init)
desc->pg_ops->pg_init(desc, req);
- desc->pg_base = req->wb_pgbase;
+ mirror->pg_base = req->wb_pgbase;
}
if (!nfs_can_coalesce_requests(prev, req, desc))
return 0;
nfs_list_remove_request(req);
- nfs_list_add_request(req, &desc->pg_list);
- desc->pg_count += req->wb_bytes;
+ nfs_list_add_request(req, &mirror->pg_list);
+ mirror->pg_count += req->wb_bytes;
return 1;
}
@@ -884,16 +998,19 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
*/
static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
- if (!list_empty(&desc->pg_list)) {
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
+
+ if (!list_empty(&mirror->pg_list)) {
int error = desc->pg_ops->pg_doio(desc);
if (error < 0)
desc->pg_error = error;
else
- desc->pg_bytes_written += desc->pg_count;
+ mirror->pg_bytes_written += mirror->pg_count;
}
- if (list_empty(&desc->pg_list)) {
- desc->pg_count = 0;
- desc->pg_base = 0;
+ if (list_empty(&mirror->pg_list)) {
+ mirror->pg_count = 0;
+ mirror->pg_base = 0;
}
}
@@ -911,6 +1028,8 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_page *subreq;
unsigned int bytes_left = 0;
unsigned int offset, pgbase;
@@ -934,7 +1053,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
nfs_pageio_doio(desc);
if (desc->pg_error < 0)
return 0;
- if (desc->pg_recoalesce)
+ if (mirror->pg_recoalesce)
return 0;
/* retry add_request for this subreq */
nfs_page_group_lock(req, false);
@@ -972,14 +1091,16 @@ err_ptr:
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
LIST_HEAD(head);
do {
- list_splice_init(&desc->pg_list, &head);
- desc->pg_bytes_written -= desc->pg_count;
- desc->pg_count = 0;
- desc->pg_base = 0;
- desc->pg_recoalesce = 0;
+ list_splice_init(&mirror->pg_list, &head);
+ mirror->pg_bytes_written -= mirror->pg_count;
+ mirror->pg_count = 0;
+ mirror->pg_base = 0;
+ mirror->pg_recoalesce = 0;
+
desc->pg_moreio = 0;
while (!list_empty(&head)) {
@@ -993,11 +1114,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
return 0;
break;
}
- } while (desc->pg_recoalesce);
+ } while (mirror->pg_recoalesce);
return 1;
}
-int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
+static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
int ret;
@@ -1010,9 +1131,80 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
break;
ret = nfs_do_recoalesce(desc);
} while (ret);
+
return ret;
}
+int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *req)
+{
+ u32 midx;
+ unsigned int pgbase, offset, bytes;
+ struct nfs_page *dupreq, *lastreq;
+
+ pgbase = req->wb_pgbase;
+ offset = req->wb_offset;
+ bytes = req->wb_bytes;
+
+ nfs_pageio_setup_mirroring(desc, req);
+
+ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+ if (midx) {
+ nfs_page_group_lock(req, false);
+
+ /* find the last request */
+ for (lastreq = req->wb_head;
+ lastreq->wb_this_page != req->wb_head;
+ lastreq = lastreq->wb_this_page)
+ ;
+
+ dupreq = nfs_create_request(req->wb_context,
+ req->wb_page, lastreq, pgbase, bytes);
+
+ if (IS_ERR(dupreq)) {
+ nfs_page_group_unlock(req);
+ return 0;
+ }
+
+ nfs_lock_request(dupreq);
+ nfs_page_group_unlock(req);
+ dupreq->wb_offset = offset;
+ dupreq->wb_index = req->wb_index;
+ } else
+ dupreq = req;
+
+ if (nfs_pgio_has_mirroring(desc))
+ desc->pg_mirror_idx = midx;
+ if (!nfs_pageio_add_request_mirror(desc, dupreq))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
+ * nfs_pageio_descriptor
+ * @desc: pointer to io descriptor
+ */
+static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
+ u32 mirror_idx)
+{
+ struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
+ u32 restore_idx = desc->pg_mirror_idx;
+
+ if (nfs_pgio_has_mirroring(desc))
+ desc->pg_mirror_idx = mirror_idx;
+ for (;;) {
+ nfs_pageio_doio(desc);
+ if (!mirror->pg_recoalesce)
+ break;
+ if (!nfs_do_recoalesce(desc))
+ break;
+ }
+ desc->pg_mirror_idx = restore_idx;
+}
+
/*
* nfs_pageio_resend - Transfer requests to new descriptor and resend
* @hdr - the pgio header to move request from
@@ -1046,18 +1238,19 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
EXPORT_SYMBOL_GPL(nfs_pageio_resend);
/**
- * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
+ * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
* @desc: pointer to io descriptor
*/
void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
{
- for (;;) {
- nfs_pageio_doio(desc);
- if (!desc->pg_recoalesce)
- break;
- if (!nfs_do_recoalesce(desc))
- break;
- }
+ u32 midx;
+
+ for (midx = 0; midx < desc->pg_mirror_count; midx++)
+ nfs_pageio_complete_mirror(desc, midx);
+
+ if (desc->pg_ops->pg_cleanup)
+ desc->pg_ops->pg_cleanup(desc);
+ nfs_pageio_cleanup_mirroring(desc);
}
/**
@@ -1073,10 +1266,17 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
*/
void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
{
- if (!list_empty(&desc->pg_list)) {
- struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
- if (index != prev->wb_index + 1)
- nfs_pageio_complete(desc);
+ struct nfs_pgio_mirror *mirror;
+ struct nfs_page *prev;
+ u32 midx;
+
+ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+ mirror = &desc->pg_mirrors[midx];
+ if (!list_empty(&mirror->pg_list)) {
+ prev = nfs_list_entry(mirror->pg_list.prev);
+ if (index != prev->wb_index + 1)
+ nfs_pageio_complete_mirror(desc, midx);
+ }
}
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0a5dda4d85c2..4f802b02fbb9 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -34,6 +34,7 @@
#include "pnfs.h"
#include "iostat.h"
#include "nfs4trace.h"
+#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_PNFS
#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
@@ -50,6 +51,10 @@ static DEFINE_SPINLOCK(pnfs_spinlock);
*/
static LIST_HEAD(pnfs_modules_tbl);
+static int
+pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid,
+ enum pnfs_iomode iomode, bool sync);
+
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
@@ -238,6 +243,8 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
struct inode *inode = lo->plh_inode;
if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+ if (!list_empty(&lo->plh_segs))
+ WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
spin_unlock(&inode->i_lock);
pnfs_free_layout_hdr(lo);
@@ -337,6 +344,48 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}
+/* Return true if layoutreturn is needed */
+static bool
+pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_segment *s;
+
+ if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+ return false;
+
+ list_for_each_entry(s, &lo->plh_segs, pls_list)
+ if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
+ return false;
+
+ return true;
+}
+
+static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
+ struct pnfs_layout_hdr *lo, struct inode *inode)
+{
+ lo = lseg->pls_layout;
+ inode = lo->plh_inode;
+
+ spin_lock(&inode->i_lock);
+ if (pnfs_layout_need_return(lo, lseg)) {
+ nfs4_stateid stateid;
+ enum pnfs_iomode iomode;
+
+ stateid = lo->plh_stateid;
+ iomode = lo->plh_return_iomode;
+ /* decreased in pnfs_send_layoutreturn() */
+ lo->plh_block_lgets++;
+ lo->plh_return_iomode = 0;
+ spin_unlock(&inode->i_lock);
+ pnfs_get_layout_hdr(lo);
+
+ /* Send an async layoutreturn so we dont deadlock */
+ pnfs_send_layoutreturn(lo, stateid, iomode, false);
+ } else
+ spin_unlock(&inode->i_lock);
+}
+
void
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
{
@@ -349,8 +398,17 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+
+ /* Handle the case where refcount != 1 */
+ if (atomic_add_unless(&lseg->pls_refcount, -1, 1))
+ return;
+
lo = lseg->pls_layout;
inode = lo->plh_inode;
+ /* Do we need a layoutreturn? */
+ if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+ pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
+
if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
pnfs_get_layout_hdr(lo);
pnfs_layout_remove_lseg(lo, lseg);
@@ -543,6 +601,7 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
pnfs_get_layout_hdr(lo);
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
+ pnfs_clear_retry_layoutget(lo);
spin_unlock(&nfsi->vfs_inode.i_lock);
pnfs_free_lseg_list(&tmp_list);
pnfs_put_layout_hdr(lo);
@@ -740,25 +799,37 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
}
+static bool
+pnfs_layout_returning(const struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range)
+{
+ return test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
+ (lo->plh_return_iomode == IOMODE_ANY ||
+ lo->plh_return_iomode == range->iomode);
+}
+
/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
-pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
+pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range, int lget)
{
return lo->plh_block_lgets ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
(list_empty(&lo->plh_segs) &&
- (atomic_read(&lo->plh_outstanding) > lget));
+ (atomic_read(&lo->plh_outstanding) > lget)) ||
+ pnfs_layout_returning(lo, range);
}
int
pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range,
struct nfs4_state *open_state)
{
int status = 0;
dprintk("--> %s\n", __func__);
spin_lock(&lo->plh_inode->i_lock);
- if (pnfs_layoutgets_blocked(lo, 1)) {
+ if (pnfs_layoutgets_blocked(lo, range, 1)) {
status = -EAGAIN;
} else if (!nfs4_valid_open_stateid(open_state)) {
status = -EBADF;
@@ -825,7 +896,9 @@ send_layoutget(struct pnfs_layout_hdr *lo,
pnfs_layout_io_set_failed(lo, range->iomode);
}
return NULL;
- }
+ } else
+ pnfs_layout_clear_fail_bit(lo,
+ pnfs_iomode_to_fail_bit(range->iomode));
return lseg;
}
@@ -845,6 +918,49 @@ static void pnfs_clear_layoutcommit(struct inode *inode,
}
}
+void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
+{
+ clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
+}
+
+static int
+pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid,
+ enum pnfs_iomode iomode, bool sync)
+{
+ struct inode *ino = lo->plh_inode;
+ struct nfs4_layoutreturn *lrp;
+ int status = 0;
+
+ lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
+ if (unlikely(lrp == NULL)) {
+ status = -ENOMEM;
+ spin_lock(&ino->i_lock);
+ lo->plh_block_lgets--;
+ pnfs_clear_layoutreturn_waitbit(lo);
+ rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
+ spin_unlock(&ino->i_lock);
+ pnfs_put_layout_hdr(lo);
+ goto out;
+ }
+
+ lrp->args.stateid = stateid;
+ lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
+ lrp->args.inode = ino;
+ lrp->args.range.iomode = iomode;
+ lrp->args.range.offset = 0;
+ lrp->args.range.length = NFS4_MAX_UINT64;
+ lrp->args.layout = lo;
+ lrp->clp = NFS_SERVER(ino)->nfs_client;
+ lrp->cred = lo->plh_lc_cred;
+
+ status = nfs4_proc_layoutreturn(lrp, sync);
+out:
+ dprintk("<-- %s status: %d\n", __func__, status);
+ return status;
+}
+
/*
* Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
* when the layout segment list is empty.
@@ -859,7 +975,6 @@ _pnfs_return_layout(struct inode *ino)
struct pnfs_layout_hdr *lo = NULL;
struct nfs_inode *nfsi = NFS_I(ino);
LIST_HEAD(tmp_list);
- struct nfs4_layoutreturn *lrp;
nfs4_stateid stateid;
int status = 0, empty;
@@ -901,24 +1016,7 @@ _pnfs_return_layout(struct inode *ino)
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
- lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
- if (unlikely(lrp == NULL)) {
- status = -ENOMEM;
- spin_lock(&ino->i_lock);
- lo->plh_block_lgets--;
- spin_unlock(&ino->i_lock);
- pnfs_put_layout_hdr(lo);
- goto out;
- }
-
- lrp->args.stateid = stateid;
- lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
- lrp->args.inode = ino;
- lrp->args.layout = lo;
- lrp->clp = NFS_SERVER(ino)->nfs_client;
- lrp->cred = lo->plh_lc_cred;
-
- status = nfs4_proc_layoutreturn(lrp);
+ status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
out:
dprintk("<-- %s status: %d\n", __func__, status);
return status;
@@ -954,31 +1052,60 @@ pnfs_commit_and_return_layout(struct inode *inode)
bool pnfs_roc(struct inode *ino)
{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct nfs_open_context *ctx;
+ struct nfs4_state *state;
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg, *tmp;
+ nfs4_stateid stateid;
LIST_HEAD(tmp_list);
- bool found = false;
+ bool found = false, layoutreturn = false;
spin_lock(&ino->i_lock);
- lo = NFS_I(ino)->layout;
+ lo = nfsi->layout;
if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
- goto out_nolayout;
+ goto out_noroc;
+
+ /* Don't return layout if we hold a delegation */
+ if (nfs4_check_delegation(ino, FMODE_READ))
+ goto out_noroc;
+
+ list_for_each_entry(ctx, &nfsi->open_files, list) {
+ state = ctx->state;
+ /* Don't return layout if there is open file state */
+ if (state != NULL && state->state != 0)
+ goto out_noroc;
+ }
+
+ pnfs_clear_retry_layoutget(lo);
list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
mark_lseg_invalid(lseg, &tmp_list);
found = true;
}
if (!found)
- goto out_nolayout;
+ goto out_noroc;
lo->plh_block_lgets++;
pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
return true;
-out_nolayout:
+out_noroc:
+ if (lo) {
+ stateid = lo->plh_stateid;
+ layoutreturn =
+ test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &lo->plh_flags);
+ if (layoutreturn) {
+ lo->plh_block_lgets++;
+ pnfs_get_layout_hdr(lo);
+ }
+ }
spin_unlock(&ino->i_lock);
+ if (layoutreturn)
+ pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
return false;
}
@@ -1013,8 +1140,9 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg;
+ nfs4_stateid stateid;
u32 current_seqid;
- bool found = false;
+ bool found = false, layoutreturn = false;
spin_lock(&ino->i_lock);
list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
@@ -1031,7 +1159,21 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
*/
*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
out:
+ if (!found) {
+ stateid = lo->plh_stateid;
+ layoutreturn =
+ test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &lo->plh_flags);
+ if (layoutreturn) {
+ lo->plh_block_lgets++;
+ pnfs_get_layout_hdr(lo);
+ }
+ }
spin_unlock(&ino->i_lock);
+ if (layoutreturn) {
+ rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+ pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
+ }
return found;
}
@@ -1178,6 +1320,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
+ !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
pnfs_lseg_range_match(&lseg->pls_range, range)) {
ret = pnfs_get_lseg(lseg);
break;
@@ -1266,6 +1409,35 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
return ret;
}
+/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
+static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key)
+{
+ if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
+ return 1;
+ return nfs_wait_bit_killable(key);
+}
+
+static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
+{
+ /*
+ * send layoutcommit as it can hold up layoutreturn due to lseg
+ * reference
+ */
+ pnfs_layoutcommit_inode(lo->plh_inode, false);
+ return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
+ pnfs_layoutget_retry_bit_wait,
+ TASK_UNINTERRUPTIBLE);
+}
+
+static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
+{
+ unsigned long *bitlock = &lo->plh_flags;
+
+ clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
+ smp_mb__after_atomic();
+ wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
+}
+
/*
* Layout segment is retreived from the server if not cached.
* The appropriate layout segment is referenced and returned to the caller.
@@ -1296,6 +1468,8 @@ pnfs_update_layout(struct inode *ino,
if (pnfs_within_mdsthreshold(ctx, ino, iomode))
goto out;
+lookup_again:
+ first = false;
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
@@ -1310,27 +1484,62 @@ pnfs_update_layout(struct inode *ino,
}
/* if LAYOUTGET already failed once we don't try again */
- if (pnfs_layout_io_test_failed(lo, iomode))
+ if (pnfs_layout_io_test_failed(lo, iomode) &&
+ !pnfs_should_retry_layoutget(lo))
goto out_unlock;
- /* Check to see if the layout for the given range already exists */
- lseg = pnfs_find_lseg(lo, &arg);
- if (lseg)
- goto out_unlock;
+ first = list_empty(&lo->plh_segs);
+ if (first) {
+ /* The first layoutget for the file. Need to serialize per
+ * RFC 5661 Errata 3208.
+ */
+ if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
+ &lo->plh_flags)) {
+ spin_unlock(&ino->i_lock);
+ wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
+ TASK_UNINTERRUPTIBLE);
+ pnfs_put_layout_hdr(lo);
+ goto lookup_again;
+ }
+ } else {
+ /* Check to see if the layout for the given range
+ * already exists
+ */
+ lseg = pnfs_find_lseg(lo, &arg);
+ if (lseg)
+ goto out_unlock;
+ }
+
+ /*
+ * Because we free lsegs before sending LAYOUTRETURN, we need to wait
+ * for LAYOUTRETURN even if first is true.
+ */
+ if (!lseg && pnfs_should_retry_layoutget(lo) &&
+ test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
+ spin_unlock(&ino->i_lock);
+ dprintk("%s wait for layoutreturn\n", __func__);
+ if (pnfs_prepare_to_retry_layoutget(lo)) {
+ if (first)
+ pnfs_clear_first_layoutget(lo);
+ pnfs_put_layout_hdr(lo);
+ dprintk("%s retrying\n", __func__);
+ goto lookup_again;
+ }
+ goto out_put_layout_hdr;
+ }
- if (pnfs_layoutgets_blocked(lo, 0))
+ if (pnfs_layoutgets_blocked(lo, &arg, 0))
goto out_unlock;
atomic_inc(&lo->plh_outstanding);
-
- first = list_empty(&lo->plh_layouts) ? true : false;
spin_unlock(&ino->i_lock);
- if (first) {
+ if (list_empty(&lo->plh_layouts)) {
/* The lo must be on the clp list if there is any
* chance of a CB_LAYOUTRECALL(FILE) coming in.
*/
spin_lock(&clp->cl_lock);
- list_add_tail(&lo->plh_layouts, &server->layouts);
+ if (list_empty(&lo->plh_layouts))
+ list_add_tail(&lo->plh_layouts, &server->layouts);
spin_unlock(&clp->cl_lock);
}
@@ -1343,8 +1552,11 @@ pnfs_update_layout(struct inode *ino,
arg.length = PAGE_CACHE_ALIGN(arg.length);
lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
+ pnfs_clear_retry_layoutget(lo);
atomic_dec(&lo->plh_outstanding);
out_put_layout_hdr:
+ if (first)
+ pnfs_clear_first_layoutget(lo);
pnfs_put_layout_hdr(lo);
out:
dprintk("%s: inode %s/%llu pNFS layout segment %s for "
@@ -1393,7 +1605,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
goto out_forget_reply;
}
- if (pnfs_layoutgets_blocked(lo, 1)) {
+ if (pnfs_layoutgets_blocked(lo, &lgp->args.range, 1)) {
dprintk("%s forget reply due to state\n", __func__);
goto out_forget_reply;
}
@@ -1440,24 +1652,79 @@ out_forget_reply:
goto out;
}
+static void
+pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
+ struct list_head *tmp_list,
+ struct pnfs_layout_range *return_range)
+{
+ struct pnfs_layout_segment *lseg, *next;
+
+ dprintk("%s:Begin lo %p\n", __func__, lo);
+
+ if (list_empty(&lo->plh_segs))
+ return;
+
+ list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+ if (should_free_lseg(&lseg->pls_range, return_range)) {
+ dprintk("%s: marking lseg %p iomode %d "
+ "offset %llu length %llu\n", __func__,
+ lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset,
+ lseg->pls_range.length);
+ set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
+ mark_lseg_invalid(lseg, tmp_list);
+ }
+}
+
+void pnfs_error_mark_layout_for_return(struct inode *inode,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
+ int iomode = pnfs_iomode_to_fail_bit(lseg->pls_range.iomode);
+ struct pnfs_layout_range range = {
+ .iomode = lseg->pls_range.iomode,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+ LIST_HEAD(free_me);
+
+ spin_lock(&inode->i_lock);
+ /* set failure bit so that pnfs path will be retried later */
+ pnfs_layout_set_fail_bit(lo, iomode);
+ set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
+ if (lo->plh_return_iomode == 0)
+ lo->plh_return_iomode = range.iomode;
+ else if (lo->plh_return_iomode != range.iomode)
+ lo->plh_return_iomode = IOMODE_ANY;
+ /*
+ * mark all matching lsegs so that we are sure to have no live
+ * segments at hand when sending layoutreturn. See pnfs_put_lseg()
+ * for how it works.
+ */
+ pnfs_mark_matching_lsegs_return(lo, &free_me, &range);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&free_me);
+}
+EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
+
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
u64 rd_size = req->wb_bytes;
- WARN_ON_ONCE(pgio->pg_lseg != NULL);
-
- if (pgio->pg_dreq == NULL)
- rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
- else
- rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
-
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- req->wb_context,
- req_offset(req),
- rd_size,
- IOMODE_READ,
- GFP_KERNEL);
+ if (pgio->pg_lseg == NULL) {
+ if (pgio->pg_dreq == NULL)
+ rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
+ else
+ rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ req_offset(req),
+ rd_size,
+ IOMODE_READ,
+ GFP_KERNEL);
+ }
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_read_mds(pgio);
@@ -1469,27 +1736,36 @@ void
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req, u64 wb_size)
{
- WARN_ON_ONCE(pgio->pg_lseg != NULL);
-
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- req->wb_context,
- req_offset(req),
- wb_size,
- IOMODE_RW,
- GFP_NOFS);
+ if (pgio->pg_lseg == NULL)
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ req_offset(req),
+ wb_size,
+ IOMODE_RW,
+ GFP_NOFS);
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_write_mds(pgio);
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
+void
+pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
+{
+ if (desc->pg_lseg) {
+ pnfs_put_lseg(desc->pg_lseg);
+ desc->pg_lseg = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
+
/*
* Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
* of bytes (maximum @req->wb_bytes) that can be coalesced.
*/
size_t
-pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
- struct nfs_page *req)
+pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *prev, struct nfs_page *req)
{
unsigned int size;
u64 seg_end, req_start, seg_left;
@@ -1513,10 +1789,16 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
pgio->pg_lseg->pls_range.length);
req_start = req_offset(req);
- WARN_ON_ONCE(req_start > seg_end);
+ WARN_ON_ONCE(req_start >= seg_end);
/* start of request is past the last byte of this segment */
- if (req_start >= seg_end)
+ if (req_start >= seg_end) {
+ /* reference the new lseg */
+ if (pgio->pg_ops->pg_cleanup)
+ pgio->pg_ops->pg_cleanup(pgio);
+ if (pgio->pg_ops->pg_init)
+ pgio->pg_ops->pg_init(pgio, req);
return 0;
+ }
/* adjust 'size' iff there are fewer bytes left in the
* segment than what nfs_generic_pg_test returned */
@@ -1571,10 +1853,12 @@ static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
- list_splice_tail_init(&hdr->pages, &desc->pg_list);
+ list_splice_tail_init(&hdr->pages, &mirror->pg_list);
nfs_pageio_reset_write_mds(desc);
- desc->pg_recoalesce = 1;
+ mirror->pg_recoalesce = 1;
}
nfs_pgio_data_destroy(hdr);
}
@@ -1608,11 +1892,9 @@ pnfs_do_write(struct nfs_pageio_descriptor *desc,
struct pnfs_layout_segment *lseg = desc->pg_lseg;
enum pnfs_try_status trypnfs;
- desc->pg_lseg = NULL;
trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
if (trypnfs == PNFS_NOT_ATTEMPTED)
pnfs_write_through_mds(desc, hdr);
- pnfs_put_lseg(lseg);
}
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
@@ -1625,24 +1907,23 @@ EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_pgio_header *hdr;
int ret;
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
if (!hdr) {
- desc->pg_completion_ops->error_cleanup(&desc->pg_list);
- pnfs_put_lseg(desc->pg_lseg);
- desc->pg_lseg = NULL;
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
return -ENOMEM;
}
nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
+
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
ret = nfs_generic_pgio(desc, hdr);
- if (ret != 0) {
- pnfs_put_lseg(desc->pg_lseg);
- desc->pg_lseg = NULL;
- } else
+ if (!ret)
pnfs_do_write(desc, hdr, desc->pg_ioflags);
+
return ret;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
@@ -1687,10 +1968,12 @@ static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
- list_splice_tail_init(&hdr->pages, &desc->pg_list);
+ list_splice_tail_init(&hdr->pages, &mirror->pg_list);
nfs_pageio_reset_read_mds(desc);
- desc->pg_recoalesce = 1;
+ mirror->pg_recoalesce = 1;
}
nfs_pgio_data_destroy(hdr);
}
@@ -1719,18 +2002,29 @@ pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
return trypnfs;
}
+/* Resend all requests through pnfs. */
+int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
+{
+ struct nfs_pageio_descriptor pgio;
+
+ nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops);
+ return nfs_pageio_resend(&pgio, hdr);
+}
+EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
+
static void
pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
{
const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
struct pnfs_layout_segment *lseg = desc->pg_lseg;
enum pnfs_try_status trypnfs;
+ int err = 0;
- desc->pg_lseg = NULL;
trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
- if (trypnfs == PNFS_NOT_ATTEMPTED)
+ if (trypnfs == PNFS_TRY_AGAIN)
+ err = pnfs_read_resend_pnfs(hdr);
+ if (trypnfs == PNFS_NOT_ATTEMPTED || err)
pnfs_read_through_mds(desc, hdr);
- pnfs_put_lseg(lseg);
}
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
@@ -1743,24 +2037,20 @@ EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_pgio_header *hdr;
int ret;
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
if (!hdr) {
- desc->pg_completion_ops->error_cleanup(&desc->pg_list);
- ret = -ENOMEM;
- pnfs_put_lseg(desc->pg_lseg);
- desc->pg_lseg = NULL;
- return ret;
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
+ return -ENOMEM;
}
nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
ret = nfs_generic_pgio(desc, hdr);
- if (ret != 0) {
- pnfs_put_lseg(desc->pg_lseg);
- desc->pg_lseg = NULL;
- } else
+ if (!ret)
pnfs_do_read(desc, hdr);
return ret;
}
@@ -1966,6 +2256,7 @@ clear_layoutcommitting:
pnfs_clear_layoutcommitting(inode);
goto out;
}
+EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 9ae5b765b073..797cd6253adf 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -38,6 +38,25 @@ enum {
NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */
NFS_LSEG_ROC, /* roc bit received from server */
NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */
+ NFS_LSEG_LAYOUTRETURN, /* layoutreturn bit set for layoutreturn */
+};
+
+/* Individual ip address */
+struct nfs4_pnfs_ds_addr {
+ struct sockaddr_storage da_addr;
+ size_t da_addrlen;
+ struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */
+ char *da_remotestr; /* human readable addr+port */
+};
+
+struct nfs4_pnfs_ds {
+ struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
+ char *ds_remotestr; /* comma sep list of addrs */
+ struct list_head ds_addrs;
+ struct nfs_client *ds_clp;
+ atomic_t ds_count;
+ unsigned long ds_state;
+#define NFS4DS_CONNECTING 0 /* ds is establishing connection */
};
struct pnfs_layout_segment {
@@ -53,19 +72,34 @@ struct pnfs_layout_segment {
enum pnfs_try_status {
PNFS_ATTEMPTED = 0,
PNFS_NOT_ATTEMPTED = 1,
+ PNFS_TRY_AGAIN = 2,
};
#ifdef CONFIG_NFS_V4_1
#define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
+/*
+ * Default data server connection timeout and retrans vaules.
+ * Set by module parameters dataserver_timeo and dataserver_retrans.
+ */
+#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */
+#define NFS4_DEF_DS_RETRANS 5
+
+/* error codes for internal use */
+#define NFS4ERR_RESET_TO_MDS 12001
+#define NFS4ERR_RESET_TO_PNFS 12002
+
enum {
NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
NFS_LAYOUT_ROC, /* some lseg had roc bit set */
NFS_LAYOUT_RETURN, /* Return this layout ASAP */
+ NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */
NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
+ NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
+ NFS_LAYOUT_RETRY_LAYOUTGET, /* Retry layoutget */
};
enum layoutdriver_policy_flags {
@@ -106,7 +140,8 @@ struct pnfs_layoutdriver_type {
struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode);
void (*mark_request_commit) (struct nfs_page *req,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo);
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx);
void (*clear_request_commit) (struct nfs_page *req,
struct nfs_commit_info *cinfo);
int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
@@ -154,6 +189,7 @@ struct pnfs_layout_hdr {
u32 plh_barrier; /* ignore lower seqids */
unsigned long plh_retry_timestamp;
unsigned long plh_flags;
+ enum pnfs_iomode plh_return_iomode;
loff_t plh_lwb; /* last write byte for layoutcommit */
struct rpc_cred *plh_lc_cred; /* layoutcommit cred */
struct inode *plh_inode;
@@ -185,7 +221,7 @@ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev,
struct rpc_cred *cred);
extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
-extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
+extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
/* pnfs.c */
void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo);
@@ -198,6 +234,7 @@ void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *
int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req, u64 wb_size);
+void pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *);
int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
size_t pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
struct nfs_page *prev, struct nfs_page *req);
@@ -217,6 +254,7 @@ void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
bool update_barrier);
int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range,
struct nfs4_state *open_state);
int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
@@ -233,17 +271,21 @@ int _pnfs_return_layout(struct inode *);
int pnfs_commit_and_return_layout(struct inode *);
void pnfs_ld_write_done(struct nfs_pgio_header *);
void pnfs_ld_read_done(struct nfs_pgio_header *);
+int pnfs_read_resend_pnfs(struct nfs_pgio_header *);
struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
struct nfs_open_context *ctx,
loff_t pos,
u64 count,
enum pnfs_iomode iomode,
gfp_t gfp_flags);
+void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo);
void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *);
int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *);
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
+void pnfs_error_mark_layout_for_return(struct inode *inode,
+ struct pnfs_layout_segment *lseg);
/* nfs4_deviceid_flags */
enum {
@@ -275,6 +317,39 @@ void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node);
bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node);
void nfs4_deviceid_purge_client(const struct nfs_client *);
+/* pnfs_nfs.c */
+void pnfs_generic_clear_request_commit(struct nfs_page *req,
+ struct nfs_commit_info *cinfo);
+void pnfs_generic_commit_release(void *calldata);
+void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data);
+void pnfs_generic_rw_release(void *data);
+void pnfs_generic_recover_commit_reqs(struct list_head *dst,
+ struct nfs_commit_info *cinfo);
+int pnfs_generic_commit_pagelist(struct inode *inode,
+ struct list_head *mds_pages,
+ int how,
+ struct nfs_commit_info *cinfo,
+ int (*initiate_commit)(struct nfs_commit_data *data,
+ int how));
+int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max);
+void pnfs_generic_write_commit_done(struct rpc_task *task, void *data);
+void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
+struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
+ gfp_t gfp_flags);
+void nfs4_pnfs_v3_ds_connect_unload(void);
+void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+ struct nfs4_deviceid_node *devid, unsigned int timeo,
+ unsigned int retrans, u32 version, u32 minor_version,
+ rpc_authflavor_t au_flavor);
+struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
+ struct xdr_stream *xdr,
+ gfp_t gfp_flags);
+
+static inline bool nfs_have_layout(struct inode *inode)
+{
+ return NFS_I(inode)->layout != NULL;
+}
+
static inline struct nfs4_deviceid_node *
nfs4_get_deviceid(struct nfs4_deviceid_node *d)
{
@@ -282,6 +357,26 @@ nfs4_get_deviceid(struct nfs4_deviceid_node *d)
return d;
}
+static inline void pnfs_set_retry_layoutget(struct pnfs_layout_hdr *lo)
+{
+ if (!test_and_set_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags))
+ atomic_inc(&lo->plh_refcount);
+}
+
+static inline void pnfs_clear_retry_layoutget(struct pnfs_layout_hdr *lo)
+{
+ if (test_and_clear_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags)) {
+ atomic_dec(&lo->plh_refcount);
+ /* wake up waiters for LAYOUTRETURN as that is not needed */
+ wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
+ }
+}
+
+static inline bool pnfs_should_retry_layoutget(struct pnfs_layout_hdr *lo)
+{
+ return test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags);
+}
+
static inline struct pnfs_layout_segment *
pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{
@@ -317,16 +412,22 @@ pnfs_get_ds_info(struct inode *inode)
return ld->get_ds_info(inode);
}
+static inline void
+pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node)
+{
+ set_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
static inline bool
pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{
struct inode *inode = req->wb_context->dentry->d_inode;
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (lseg == NULL || ld->mark_request_commit == NULL)
return false;
- ld->mark_request_commit(req, lseg, cinfo);
+ ld->mark_request_commit(req, lseg, cinfo, ds_commit_idx);
return true;
}
@@ -352,15 +453,6 @@ pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max);
}
-static inline void
-pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
- struct nfs_commit_info *cinfo)
-{
- if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
- return;
- NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
-}
-
static inline struct nfs_page *
pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
struct page *page)
@@ -427,6 +519,11 @@ static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id)
#endif /* NFS_DEBUG */
#else /* CONFIG_NFS_V4_1 */
+static inline bool nfs_have_layout(struct inode *inode)
+{
+ return false;
+}
+
static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
{
}
@@ -513,7 +610,7 @@ pnfs_get_ds_info(struct inode *inode)
static inline bool
pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{
return false;
}
@@ -531,12 +628,6 @@ pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
return 0;
}
-static inline void
-pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
- struct nfs_commit_info *cinfo)
-{
-}
-
static inline struct nfs_page *
pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
struct page *page)
@@ -568,6 +659,10 @@ static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
return NULL;
}
+static inline void nfs4_pnfs_v3_ds_connect_unload(void)
+{
+}
+
#endif /* CONFIG_NFS_V4_1 */
#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
new file mode 100644
index 000000000000..fdc4f6562bb7
--- /dev/null
+++ b/fs/nfs/pnfs_nfs.c
@@ -0,0 +1,840 @@
+/*
+ * Common NFS I/O operations for the pnfs file based
+ * layout drivers.
+ *
+ * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
+ *
+ * Tom Haynes <loghyr@primarydata.com>
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/module.h>
+
+#include "nfs4session.h"
+#include "internal.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS
+
+void pnfs_generic_rw_release(void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ nfs_put_client(hdr->ds_clp);
+ hdr->mds_ops->rpc_release(data);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
+
+/* Fake up some data that will cause nfs_commit_release to retry the writes. */
+void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
+{
+ struct nfs_page *first = nfs_list_entry(data->pages.next);
+
+ data->task.tk_status = 0;
+ memcpy(&data->verf.verifier, &first->wb_verf,
+ sizeof(data->verf.verifier));
+ data->verf.verifier.data[0]++; /* ensure verifier mismatch */
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
+
+void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *wdata = data;
+
+ /* Note this may cause RPC to be resent */
+ wdata->mds_ops->rpc_call_done(task, data);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
+
+void pnfs_generic_commit_release(void *calldata)
+{
+ struct nfs_commit_data *data = calldata;
+
+ data->completion_ops->completion(data);
+ pnfs_put_lseg(data->lseg);
+ nfs_put_client(data->ds_clp);
+ nfs_commitdata_release(data);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
+
+/* The generic layer is about to remove the req from the commit list.
+ * If this will make the bucket empty, it will need to put the lseg reference.
+ * Note this must be called holding the inode (/cinfo) lock
+ */
+void
+pnfs_generic_clear_request_commit(struct nfs_page *req,
+ struct nfs_commit_info *cinfo)
+{
+ struct pnfs_layout_segment *freeme = NULL;
+
+ if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
+ goto out;
+ cinfo->ds->nwritten--;
+ if (list_is_singular(&req->wb_list)) {
+ struct pnfs_commit_bucket *bucket;
+
+ bucket = list_first_entry(&req->wb_list,
+ struct pnfs_commit_bucket,
+ written);
+ freeme = bucket->wlseg;
+ bucket->wlseg = NULL;
+ }
+out:
+ nfs_request_remove_commit_list(req, cinfo);
+ pnfs_put_lseg_locked(freeme);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
+
+static int
+pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst,
+ struct nfs_commit_info *cinfo, int max)
+{
+ struct nfs_page *req, *tmp;
+ int ret = 0;
+
+ list_for_each_entry_safe(req, tmp, src, wb_list) {
+ if (!nfs_lock_request(req))
+ continue;
+ kref_get(&req->wb_kref);
+ if (cond_resched_lock(cinfo->lock))
+ list_safe_reset_next(req, tmp, wb_list);
+ nfs_request_remove_commit_list(req, cinfo);
+ clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ nfs_list_add_request(req, dst);
+ ret++;
+ if ((ret == max) && !cinfo->dreq)
+ break;
+ }
+ return ret;
+}
+
+static int
+pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
+ struct nfs_commit_info *cinfo,
+ int max)
+{
+ struct list_head *src = &bucket->written;
+ struct list_head *dst = &bucket->committing;
+ int ret;
+
+ lockdep_assert_held(cinfo->lock);
+ ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max);
+ if (ret) {
+ cinfo->ds->nwritten -= ret;
+ cinfo->ds->ncommitting += ret;
+ bucket->clseg = bucket->wlseg;
+ if (list_empty(src))
+ bucket->wlseg = NULL;
+ else
+ pnfs_get_lseg(bucket->clseg);
+ }
+ return ret;
+}
+
+/* Move reqs from written to committing lists, returning count
+ * of number moved.
+ */
+int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
+ int max)
+{
+ int i, rv = 0, cnt;
+
+ lockdep_assert_held(cinfo->lock);
+ for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
+ cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
+ cinfo, max);
+ max -= cnt;
+ rv += cnt;
+ }
+ return rv;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
+
+/* Pull everything off the committing lists and dump into @dst. */
+void pnfs_generic_recover_commit_reqs(struct list_head *dst,
+ struct nfs_commit_info *cinfo)
+{
+ struct pnfs_commit_bucket *b;
+ struct pnfs_layout_segment *freeme;
+ int i;
+
+ lockdep_assert_held(cinfo->lock);
+restart:
+ for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
+ if (pnfs_generic_transfer_commit_list(&b->written, dst,
+ cinfo, 0)) {
+ freeme = b->wlseg;
+ b->wlseg = NULL;
+ spin_unlock(cinfo->lock);
+ pnfs_put_lseg(freeme);
+ spin_lock(cinfo->lock);
+ goto restart;
+ }
+ }
+ cinfo->ds->nwritten = 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
+
+static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
+{
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+ struct pnfs_commit_bucket *bucket;
+ struct pnfs_layout_segment *freeme;
+ int i;
+
+ for (i = idx; i < fl_cinfo->nbuckets; i++) {
+ bucket = &fl_cinfo->buckets[i];
+ if (list_empty(&bucket->committing))
+ continue;
+ nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i);
+ spin_lock(cinfo->lock);
+ freeme = bucket->clseg;
+ bucket->clseg = NULL;
+ spin_unlock(cinfo->lock);
+ pnfs_put_lseg(freeme);
+ }
+}
+
+static unsigned int
+pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
+ struct list_head *list)
+{
+ struct pnfs_ds_commit_info *fl_cinfo;
+ struct pnfs_commit_bucket *bucket;
+ struct nfs_commit_data *data;
+ int i;
+ unsigned int nreq = 0;
+
+ fl_cinfo = cinfo->ds;
+ bucket = fl_cinfo->buckets;
+ for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
+ if (list_empty(&bucket->committing))
+ continue;
+ data = nfs_commitdata_alloc();
+ if (!data)
+ break;
+ data->ds_commit_index = i;
+ spin_lock(cinfo->lock);
+ data->lseg = bucket->clseg;
+ bucket->clseg = NULL;
+ spin_unlock(cinfo->lock);
+ list_add(&data->pages, list);
+ nreq++;
+ }
+
+ /* Clean up on error */
+ pnfs_generic_retry_commit(cinfo, i);
+ return nreq;
+}
+
+/* This follows nfs_commit_list pretty closely */
+int
+pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ int how, struct nfs_commit_info *cinfo,
+ int (*initiate_commit)(struct nfs_commit_data *data,
+ int how))
+{
+ struct nfs_commit_data *data, *tmp;
+ LIST_HEAD(list);
+ unsigned int nreq = 0;
+
+ if (!list_empty(mds_pages)) {
+ data = nfs_commitdata_alloc();
+ if (data != NULL) {
+ data->lseg = NULL;
+ list_add(&data->pages, &list);
+ nreq++;
+ } else {
+ nfs_retry_commit(mds_pages, NULL, cinfo, 0);
+ pnfs_generic_retry_commit(cinfo, 0);
+ cinfo->completion_ops->error_cleanup(NFS_I(inode));
+ return -ENOMEM;
+ }
+ }
+
+ nreq += pnfs_generic_alloc_ds_commits(cinfo, &list);
+
+ if (nreq == 0) {
+ cinfo->completion_ops->error_cleanup(NFS_I(inode));
+ goto out;
+ }
+
+ atomic_add(nreq, &cinfo->mds->rpcs_out);
+
+ list_for_each_entry_safe(data, tmp, &list, pages) {
+ list_del_init(&data->pages);
+ if (!data->lseg) {
+ nfs_init_commit(data, mds_pages, NULL, cinfo);
+ nfs_initiate_commit(NFS_CLIENT(inode), data,
+ NFS_PROTO(data->inode),
+ data->mds_ops, how, 0);
+ } else {
+ struct pnfs_commit_bucket *buckets;
+
+ buckets = cinfo->ds->buckets;
+ nfs_init_commit(data,
+ &buckets[data->ds_commit_index].committing,
+ data->lseg,
+ cinfo);
+ initiate_commit(data, how);
+ }
+ }
+out:
+ cinfo->ds->ncommitting = 0;
+ return PNFS_ATTEMPTED;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
+
+/*
+ * Data server cache
+ *
+ * Data servers can be mapped to different device ids.
+ * nfs4_pnfs_ds reference counting
+ * - set to 1 on allocation
+ * - incremented when a device id maps a data server already in the cache.
+ * - decremented when deviceid is removed from the cache.
+ */
+static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
+static LIST_HEAD(nfs4_data_server_cache);
+
+/* Debug routines */
+static void
+print_ds(struct nfs4_pnfs_ds *ds)
+{
+ if (ds == NULL) {
+ printk(KERN_WARNING "%s NULL device\n", __func__);
+ return;
+ }
+ printk(KERN_WARNING " ds %s\n"
+ " ref count %d\n"
+ " client %p\n"
+ " cl_exchange_flags %x\n",
+ ds->ds_remotestr,
+ atomic_read(&ds->ds_count), ds->ds_clp,
+ ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
+}
+
+static bool
+same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
+{
+ struct sockaddr_in *a, *b;
+ struct sockaddr_in6 *a6, *b6;
+
+ if (addr1->sa_family != addr2->sa_family)
+ return false;
+
+ switch (addr1->sa_family) {
+ case AF_INET:
+ a = (struct sockaddr_in *)addr1;
+ b = (struct sockaddr_in *)addr2;
+
+ if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
+ a->sin_port == b->sin_port)
+ return true;
+ break;
+
+ case AF_INET6:
+ a6 = (struct sockaddr_in6 *)addr1;
+ b6 = (struct sockaddr_in6 *)addr2;
+
+ /* LINKLOCAL addresses must have matching scope_id */
+ if (ipv6_addr_src_scope(&a6->sin6_addr) ==
+ IPV6_ADDR_SCOPE_LINKLOCAL &&
+ a6->sin6_scope_id != b6->sin6_scope_id)
+ return false;
+
+ if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
+ a6->sin6_port == b6->sin6_port)
+ return true;
+ break;
+
+ default:
+ dprintk("%s: unhandled address family: %u\n",
+ __func__, addr1->sa_family);
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+_same_data_server_addrs_locked(const struct list_head *dsaddrs1,
+ const struct list_head *dsaddrs2)
+{
+ struct nfs4_pnfs_ds_addr *da1, *da2;
+
+ /* step through both lists, comparing as we go */
+ for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
+ da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
+ da1 != NULL && da2 != NULL;
+ da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
+ da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
+ if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
+ (struct sockaddr *)&da2->da_addr))
+ return false;
+ }
+ if (da1 == NULL && da2 == NULL)
+ return true;
+
+ return false;
+}
+
+/*
+ * Lookup DS by addresses. nfs4_ds_cache_lock is held
+ */
+static struct nfs4_pnfs_ds *
+_data_server_lookup_locked(const struct list_head *dsaddrs)
+{
+ struct nfs4_pnfs_ds *ds;
+
+ list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+ if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
+ return ds;
+ return NULL;
+}
+
+static void destroy_ds(struct nfs4_pnfs_ds *ds)
+{
+ struct nfs4_pnfs_ds_addr *da;
+
+ dprintk("--> %s\n", __func__);
+ ifdebug(FACILITY)
+ print_ds(ds);
+
+ nfs_put_client(ds->ds_clp);
+
+ while (!list_empty(&ds->ds_addrs)) {
+ da = list_first_entry(&ds->ds_addrs,
+ struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+
+ kfree(ds->ds_remotestr);
+ kfree(ds);
+}
+
+void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
+{
+ if (atomic_dec_and_lock(&ds->ds_count,
+ &nfs4_ds_cache_lock)) {
+ list_del_init(&ds->ds_node);
+ spin_unlock(&nfs4_ds_cache_lock);
+ destroy_ds(ds);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
+
+/*
+ * Create a string with a human readable address and port to avoid
+ * complicated setup around many dprinks.
+ */
+static char *
+nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds_addr *da;
+ char *remotestr;
+ size_t len;
+ char *p;
+
+ len = 3; /* '{', '}' and eol */
+ list_for_each_entry(da, dsaddrs, da_node) {
+ len += strlen(da->da_remotestr) + 1; /* string plus comma */
+ }
+
+ remotestr = kzalloc(len, gfp_flags);
+ if (!remotestr)
+ return NULL;
+
+ p = remotestr;
+ *(p++) = '{';
+ len--;
+ list_for_each_entry(da, dsaddrs, da_node) {
+ size_t ll = strlen(da->da_remotestr);
+
+ if (ll > len)
+ goto out_err;
+
+ memcpy(p, da->da_remotestr, ll);
+ p += ll;
+ len -= ll;
+
+ if (len < 1)
+ goto out_err;
+ (*p++) = ',';
+ len--;
+ }
+ if (len < 2)
+ goto out_err;
+ *(p++) = '}';
+ *p = '\0';
+ return remotestr;
+out_err:
+ kfree(remotestr);
+ return NULL;
+}
+
+/*
+ * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
+ * uncached and return cached struct nfs4_pnfs_ds.
+ */
+struct nfs4_pnfs_ds *
+nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
+ char *remotestr;
+
+ if (list_empty(dsaddrs)) {
+ dprintk("%s: no addresses defined\n", __func__);
+ goto out;
+ }
+
+ ds = kzalloc(sizeof(*ds), gfp_flags);
+ if (!ds)
+ goto out;
+
+ /* this is only used for debugging, so it's ok if its NULL */
+ remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
+
+ spin_lock(&nfs4_ds_cache_lock);
+ tmp_ds = _data_server_lookup_locked(dsaddrs);
+ if (tmp_ds == NULL) {
+ INIT_LIST_HEAD(&ds->ds_addrs);
+ list_splice_init(dsaddrs, &ds->ds_addrs);
+ ds->ds_remotestr = remotestr;
+ atomic_set(&ds->ds_count, 1);
+ INIT_LIST_HEAD(&ds->ds_node);
+ ds->ds_clp = NULL;
+ list_add(&ds->ds_node, &nfs4_data_server_cache);
+ dprintk("%s add new data server %s\n", __func__,
+ ds->ds_remotestr);
+ } else {
+ kfree(remotestr);
+ kfree(ds);
+ atomic_inc(&tmp_ds->ds_count);
+ dprintk("%s data server %s found, inc'ed ds_count to %d\n",
+ __func__, tmp_ds->ds_remotestr,
+ atomic_read(&tmp_ds->ds_count));
+ ds = tmp_ds;
+ }
+ spin_unlock(&nfs4_ds_cache_lock);
+out:
+ return ds;
+}
+EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
+
+static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
+{
+ might_sleep();
+ wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
+ TASK_KILLABLE);
+}
+
+static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
+{
+ smp_mb__before_atomic();
+ clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
+ smp_mb__after_atomic();
+ wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
+}
+
+static struct nfs_client *(*get_v3_ds_connect)(
+ struct nfs_client *mds_clp,
+ const struct sockaddr *ds_addr,
+ int ds_addrlen,
+ int ds_proto,
+ unsigned int ds_timeo,
+ unsigned int ds_retrans,
+ rpc_authflavor_t au_flavor);
+
+static bool load_v3_ds_connect(void)
+{
+ if (!get_v3_ds_connect) {
+ get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
+ WARN_ON_ONCE(!get_v3_ds_connect);
+ }
+
+ return(get_v3_ds_connect != NULL);
+}
+
+void __exit nfs4_pnfs_v3_ds_connect_unload(void)
+{
+ if (get_v3_ds_connect) {
+ symbol_put(nfs3_set_ds_client);
+ get_v3_ds_connect = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload);
+
+static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
+ struct nfs4_pnfs_ds *ds,
+ unsigned int timeo,
+ unsigned int retrans,
+ rpc_authflavor_t au_flavor)
+{
+ struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs4_pnfs_ds_addr *da;
+ int status = 0;
+
+ dprintk("--> %s DS %s au_flavor %d\n", __func__,
+ ds->ds_remotestr, au_flavor);
+
+ if (!load_v3_ds_connect())
+ goto out;
+
+ list_for_each_entry(da, &ds->ds_addrs, da_node) {
+ dprintk("%s: DS %s: trying address %s\n",
+ __func__, ds->ds_remotestr, da->da_remotestr);
+
+ clp = get_v3_ds_connect(mds_srv->nfs_client,
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ timeo, retrans, au_flavor);
+ if (!IS_ERR(clp))
+ break;
+ }
+
+ if (IS_ERR(clp)) {
+ status = PTR_ERR(clp);
+ goto out;
+ }
+
+ smp_wmb();
+ ds->ds_clp = clp;
+ dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
+out:
+ return status;
+}
+
+static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
+ struct nfs4_pnfs_ds *ds,
+ unsigned int timeo,
+ unsigned int retrans,
+ u32 minor_version,
+ rpc_authflavor_t au_flavor)
+{
+ struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs4_pnfs_ds_addr *da;
+ int status = 0;
+
+ dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
+ au_flavor);
+
+ list_for_each_entry(da, &ds->ds_addrs, da_node) {
+ dprintk("%s: DS %s: trying address %s\n",
+ __func__, ds->ds_remotestr, da->da_remotestr);
+
+ clp = nfs4_set_ds_client(mds_srv->nfs_client,
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ timeo, retrans, minor_version,
+ au_flavor);
+ if (!IS_ERR(clp))
+ break;
+ }
+
+ if (IS_ERR(clp)) {
+ status = PTR_ERR(clp);
+ goto out;
+ }
+
+ status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
+ if (status)
+ goto out_put;
+
+ smp_wmb();
+ ds->ds_clp = clp;
+ dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
+out:
+ return status;
+out_put:
+ nfs_put_client(clp);
+ goto out;
+}
+
+/*
+ * Create an rpc connection to the nfs4_pnfs_ds data server.
+ * Currently only supports IPv4 and IPv6 addresses.
+ * If connection fails, make devid unavailable.
+ */
+void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+ struct nfs4_deviceid_node *devid, unsigned int timeo,
+ unsigned int retrans, u32 version,
+ u32 minor_version, rpc_authflavor_t au_flavor)
+{
+ if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
+ int err = 0;
+
+ if (version == 3) {
+ err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
+ retrans, au_flavor);
+ } else if (version == 4) {
+ err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
+ retrans, minor_version,
+ au_flavor);
+ } else {
+ dprintk("%s: unsupported DS version %d\n", __func__,
+ version);
+ err = -EPROTONOSUPPORT;
+ }
+
+ if (err)
+ nfs4_mark_deviceid_unavailable(devid);
+ nfs4_clear_ds_conn_bit(ds);
+ } else {
+ nfs4_wait_ds_connect(ds);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
+
+/*
+ * Currently only supports ipv4, ipv6 and one multi-path address.
+ */
+struct nfs4_pnfs_ds_addr *
+nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds_addr *da = NULL;
+ char *buf, *portstr;
+ __be16 port;
+ int nlen, rlen;
+ int tmp[2];
+ __be32 *p;
+ char *netid, *match_netid;
+ size_t len, match_netid_len;
+ char *startsep = "";
+ char *endsep = "";
+
+
+ /* r_netid */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_err;
+ nlen = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(xdr, nlen);
+ if (unlikely(!p))
+ goto out_err;
+
+ netid = kmalloc(nlen+1, gfp_flags);
+ if (unlikely(!netid))
+ goto out_err;
+
+ netid[nlen] = '\0';
+ memcpy(netid, p, nlen);
+
+ /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_free_netid;
+ rlen = be32_to_cpup(p);
+
+ p = xdr_inline_decode(xdr, rlen);
+ if (unlikely(!p))
+ goto out_free_netid;
+
+ /* port is ".ABC.DEF", 8 chars max */
+ if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
+ dprintk("%s: Invalid address, length %d\n", __func__,
+ rlen);
+ goto out_free_netid;
+ }
+ buf = kmalloc(rlen + 1, gfp_flags);
+ if (!buf) {
+ dprintk("%s: Not enough memory\n", __func__);
+ goto out_free_netid;
+ }
+ buf[rlen] = '\0';
+ memcpy(buf, p, rlen);
+
+ /* replace port '.' with '-' */
+ portstr = strrchr(buf, '.');
+ if (!portstr) {
+ dprintk("%s: Failed finding expected dot in port\n",
+ __func__);
+ goto out_free_buf;
+ }
+ *portstr = '-';
+
+ /* find '.' between address and port */
+ portstr = strrchr(buf, '.');
+ if (!portstr) {
+ dprintk("%s: Failed finding expected dot between address and "
+ "port\n", __func__);
+ goto out_free_buf;
+ }
+ *portstr = '\0';
+
+ da = kzalloc(sizeof(*da), gfp_flags);
+ if (unlikely(!da))
+ goto out_free_buf;
+
+ INIT_LIST_HEAD(&da->da_node);
+
+ if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
+ sizeof(da->da_addr))) {
+ dprintk("%s: error parsing address %s\n", __func__, buf);
+ goto out_free_da;
+ }
+
+ portstr++;
+ sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
+ port = htons((tmp[0] << 8) | (tmp[1]));
+
+ switch (da->da_addr.ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
+ da->da_addrlen = sizeof(struct sockaddr_in);
+ match_netid = "tcp";
+ match_netid_len = 3;
+ break;
+
+ case AF_INET6:
+ ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
+ da->da_addrlen = sizeof(struct sockaddr_in6);
+ match_netid = "tcp6";
+ match_netid_len = 4;
+ startsep = "[";
+ endsep = "]";
+ break;
+
+ default:
+ dprintk("%s: unsupported address family: %u\n",
+ __func__, da->da_addr.ss_family);
+ goto out_free_da;
+ }
+
+ if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
+ dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
+ __func__, netid, match_netid);
+ goto out_free_da;
+ }
+
+ /* save human readable address */
+ len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
+ da->da_remotestr = kzalloc(len, gfp_flags);
+
+ /* NULL is ok, only used for dprintk */
+ if (da->da_remotestr)
+ snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
+ buf, endsep, ntohs(port));
+
+ dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
+ kfree(buf);
+ kfree(netid);
+ return da;
+
+out_free_da:
+ kfree(da);
+out_free_buf:
+ dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
+ kfree(buf);
+out_free_netid:
+ kfree(netid);
+out_err:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c91a4799c562..568ecf0a880f 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -70,8 +70,15 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
{
+ struct nfs_pgio_mirror *mirror;
+
pgio->pg_ops = &nfs_pgio_rw_ops;
- pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
+
+ /* read path should never have more than one mirror */
+ WARN_ON_ONCE(pgio->pg_mirror_count != 1);
+
+ mirror = &pgio->pg_mirrors[0];
+ mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
@@ -81,6 +88,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
struct nfs_page *new;
unsigned int len;
struct nfs_pageio_descriptor pgio;
+ struct nfs_pgio_mirror *pgm;
len = nfs_page_length(page);
if (len == 0)
@@ -97,7 +105,13 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
&nfs_async_read_completion_ops);
nfs_pageio_add_request(&pgio, new);
nfs_pageio_complete(&pgio);
- NFS_I(inode)->read_io += pgio.pg_bytes_written;
+
+ /* It doesn't make sense to do mirrored reads! */
+ WARN_ON_ONCE(pgio.pg_mirror_count != 1);
+
+ pgm = &pgio.pg_mirrors[0];
+ NFS_I(inode)->read_io += pgm->pg_bytes_written;
+
return 0;
}
@@ -168,13 +182,14 @@ out:
static void nfs_initiate_read(struct nfs_pgio_header *hdr,
struct rpc_message *msg,
+ const struct nfs_rpc_ops *rpc_ops,
struct rpc_task_setup *task_setup_data, int how)
{
struct inode *inode = hdr->inode;
int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
task_setup_data->flags |= swap_flags;
- NFS_PROTO(inode)->read_setup(hdr, msg);
+ rpc_ops->read_setup(hdr, msg);
}
static void
@@ -351,6 +366,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct nfs_pageio_descriptor pgio;
+ struct nfs_pgio_mirror *pgm;
struct nfs_readdesc desc = {
.pgio = &pgio,
};
@@ -386,10 +402,15 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
&nfs_async_read_completion_ops);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
-
nfs_pageio_complete(&pgio);
- NFS_I(inode)->read_io += pgio.pg_bytes_written;
- npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+ /* It doesn't make sense to do mirrored reads! */
+ WARN_ON_ONCE(pgio.pg_mirror_count != 1);
+
+ pgm = &pgio.pg_mirrors[0];
+ NFS_I(inode)->read_io += pgm->pg_bytes_written;
+ npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
nfs_add_stats(inode, NFSIOS_READPAGES, npages);
read_complete:
put_nfs_open_context(desc.ctx);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 31a11b0e885d..322b2de02988 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -311,7 +311,6 @@ const struct super_operations nfs_sops = {
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
.drop_inode = nfs_drop_inode,
- .put_super = nfs_put_super,
.statfs = nfs_statfs,
.evict_inode = nfs_evict_inode,
.umount_begin = nfs_umount_begin,
@@ -405,12 +404,15 @@ void __exit unregister_nfs_fs(void)
unregister_filesystem(&nfs_fs_type);
}
-void nfs_sb_active(struct super_block *sb)
+bool nfs_sb_active(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
- if (atomic_inc_return(&server->active) == 1)
- atomic_inc(&sb->s_active);
+ if (!atomic_inc_not_zero(&sb->s_active))
+ return false;
+ if (atomic_inc_return(&server->active) != 1)
+ atomic_dec(&sb->s_active);
+ return true;
}
EXPORT_SYMBOL_GPL(nfs_sb_active);
@@ -2569,7 +2571,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
error = nfs_bdi_register(server);
if (error) {
mntroot = ERR_PTR(error);
- goto error_splat_bdi;
+ goto error_splat_super;
}
server->super = s;
}
@@ -2601,9 +2603,6 @@ error_splat_root:
dput(mntroot);
mntroot = ERR_PTR(error);
error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
deactivate_locked_super(s);
goto out;
}
@@ -2651,27 +2650,19 @@ out:
EXPORT_SYMBOL_GPL(nfs_fs_mount);
/*
- * Ensure that we unregister the bdi before kill_anon_super
- * releases the device name
- */
-void nfs_put_super(struct super_block *s)
-{
- struct nfs_server *server = NFS_SB(s);
-
- bdi_unregister(&server->backing_dev_info);
-}
-EXPORT_SYMBOL_GPL(nfs_put_super);
-
-/*
* Destroy an NFS2/3 superblock
*/
void nfs_kill_super(struct super_block *s)
{
struct nfs_server *server = NFS_SB(s);
+ dev_t dev = s->s_dev;
+
+ generic_shutdown_super(s);
- kill_anon_super(s);
nfs_fscache_release_super_cookie(s);
+
nfs_free_server(server);
+ free_anon_bdev(dev);
}
EXPORT_SYMBOL_GPL(nfs_kill_super);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af3af685a9e3..88a6d2196ece 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -473,13 +473,18 @@ try_again:
do {
/*
* Subrequests are always contiguous, non overlapping
- * and in order. If not, it's a programming error.
+ * and in order - but may be repeated (mirrored writes).
*/
- WARN_ON_ONCE(subreq->wb_offset !=
- (head->wb_offset + total_bytes));
-
- /* keep track of how many bytes this group covers */
- total_bytes += subreq->wb_bytes;
+ if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
+ /* keep track of how many bytes this group covers */
+ total_bytes += subreq->wb_bytes;
+ } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
+ ((subreq->wb_offset + subreq->wb_bytes) >
+ (head->wb_offset + total_bytes)))) {
+ nfs_page_group_unlock(head);
+ spin_unlock(&inode->i_lock);
+ return ERR_PTR(-EIO);
+ }
if (!nfs_lock_request(subreq)) {
/* releases page group bit lock and
@@ -786,7 +791,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
spin_unlock(cinfo->lock);
if (!cinfo->dreq) {
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
+ inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
BDI_RECLAIMABLE);
__mark_inode_dirty(req->wb_context->dentry->d_inode,
I_DIRTY_DATASYNC);
@@ -842,9 +847,9 @@ EXPORT_SYMBOL_GPL(nfs_init_cinfo);
*/
void
nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{
- if (pnfs_mark_request_commit(req, lseg, cinfo))
+ if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
return;
nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
}
@@ -853,7 +858,7 @@ static void
nfs_clear_page_commit(struct page *page)
{
dec_zone_page_state(page, NR_UNSTABLE_NFS);
- dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
+ dec_bdi_stat(inode_to_bdi(page_file_mapping(page)->host), BDI_RECLAIMABLE);
}
/* Called holding inode (/cinfo) lock */
@@ -900,7 +905,8 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
}
if (nfs_write_need_commit(hdr)) {
memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
- nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+ nfs_mark_request_commit(req, hdr->lseg, &cinfo,
+ hdr->pgio_mirror_idx);
goto next;
}
remove_req:
@@ -1091,6 +1097,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct nfs_lock_context *l_ctx;
+ struct file_lock_context *flctx = file_inode(file)->i_flctx;
struct nfs_page *req;
int do_flush, status;
/*
@@ -1109,7 +1116,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
do_flush = req->wb_page != page || req->wb_context != ctx;
/* for now, flush if more than 1 request in page_group */
do_flush |= req->wb_this_page != req;
- if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
+ if (l_ctx && flctx &&
+ !(list_empty_careful(&flctx->flc_posix) &&
+ list_empty_careful(&flctx->flc_flock))) {
do_flush |= l_ctx->lockowner.l_owner != current->files
|| l_ctx->lockowner.l_pid != current->tgid;
}
@@ -1170,6 +1179,13 @@ out:
return PageUptodate(page) != 0;
}
+static bool
+is_whole_file_wrlock(struct file_lock *fl)
+{
+ return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
+ fl->fl_type == F_WRLCK;
+}
+
/* If we know the page is up to date, and we're not using byte range locks (or
* if we have the whole file locked for writing), it may be more efficient to
* extend the write to cover the entire page in order to avoid fragmentation
@@ -1180,17 +1196,36 @@ out:
*/
static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
+ int ret;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct file_lock *fl;
+
if (file->f_flags & O_DSYNC)
return 0;
if (!nfs_write_pageuptodate(page, inode))
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
- if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
- inode->i_flock->fl_end == OFFSET_MAX &&
- inode->i_flock->fl_type != F_RDLCK))
- return 1;
- return 0;
+ if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
+ list_empty_careful(&flctx->flc_posix)))
+ return 0;
+
+ /* Check to see if there are whole file write locks */
+ ret = 0;
+ spin_lock(&flctx->flc_lock);
+ if (!list_empty(&flctx->flc_posix)) {
+ fl = list_first_entry(&flctx->flc_posix, struct file_lock,
+ fl_list);
+ if (is_whole_file_wrlock(fl))
+ ret = 1;
+ } else if (!list_empty(&flctx->flc_flock)) {
+ fl = list_first_entry(&flctx->flc_flock, struct file_lock,
+ fl_list);
+ if (fl->fl_type == F_WRLCK)
+ ret = 1;
+ }
+ spin_unlock(&flctx->flc_lock);
+ return ret;
}
/*
@@ -1240,15 +1275,15 @@ static int flush_task_priority(int how)
static void nfs_initiate_write(struct nfs_pgio_header *hdr,
struct rpc_message *msg,
+ const struct nfs_rpc_ops *rpc_ops,
struct rpc_task_setup *task_setup_data, int how)
{
- struct inode *inode = hdr->inode;
int priority = flush_task_priority(how);
task_setup_data->priority = priority;
- NFS_PROTO(inode)->write_setup(hdr, msg);
+ rpc_ops->write_setup(hdr, msg);
- nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client,
+ nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client,
&task_setup_data->rpc_client, msg, hdr);
}
@@ -1298,8 +1333,14 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
{
+ struct nfs_pgio_mirror *mirror;
+
pgio->pg_ops = &nfs_pgio_rw_ops;
- pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
+
+ nfs_pageio_stop_mirroring(pgio);
+
+ mirror = &pgio->pg_mirrors[0];
+ mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
@@ -1465,6 +1506,7 @@ void nfs_commitdata_release(struct nfs_commit_data *data)
EXPORT_SYMBOL_GPL(nfs_commitdata_release);
int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
+ const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
int how, int flags)
{
@@ -1486,7 +1528,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
.priority = priority,
};
/* Set up the initial task struct. */
- NFS_PROTO(data->inode)->commit_setup(data, &msg);
+ nfs_ops->commit_setup(data, &msg);
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
@@ -1554,17 +1596,18 @@ EXPORT_SYMBOL_GPL(nfs_init_commit);
void nfs_retry_commit(struct list_head *page_list,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx)
{
struct nfs_page *req;
while (!list_empty(page_list)) {
req = nfs_list_entry(page_list->next);
nfs_list_remove_request(req);
- nfs_mark_request_commit(req, lseg, cinfo);
+ nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
if (!cinfo->dreq) {
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
+ dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
BDI_RECLAIMABLE);
}
nfs_unlock_and_release_request(req);
@@ -1589,10 +1632,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
/* Set up the argument struct */
nfs_init_commit(data, head, NULL, cinfo);
atomic_inc(&cinfo->mds->rpcs_out);
- return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
- how, 0);
+ return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
+ data->mds_ops, how, 0);
out_bad:
- nfs_retry_commit(head, NULL, cinfo);
+ nfs_retry_commit(head, NULL, cinfo, 0);
cinfo->completion_ops->error_cleanup(NFS_I(inode));
return -ENOMEM;
}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 73395156bdb4..683bf718aead 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -82,6 +82,16 @@ config NFSD_V4
If unsure, say N.
+config NFSD_PNFS
+ bool "NFSv4.1 server support for Parallel NFS (pNFS)"
+ depends on NFSD_V4
+ help
+ This option enables support for the parallel NFS features of the
+ minor version 1 of the NFSv4 protocol (RFC5661) in the kernel's NFS
+ server.
+
+ If unsure, say N.
+
config NFSD_V4_SECURITY_LABEL
bool "Provide Security Label support for NFSv4 server"
depends on NFSD_V4 && SECURITY
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index af32ef06b4fe..9a6028e120c6 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -2,9 +2,14 @@
# Makefile for the Linux nfs server
#
+ccflags-y += -I$(src) # needed for trace events
+
obj-$(CONFIG_NFSD) += nfsd.o
-nfsd-y := nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
+# this one should be compiled first, as the tracing macros can easily blow up
+nfsd-y += trace.o
+
+nfsd-y += nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o
nfsd-$(CONFIG_NFSD_FAULT_INJECTION) += fault_inject.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
@@ -12,3 +17,4 @@ nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o
nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
nfs4acl.o nfs4callback.o nfs4recover.o
+nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o blocklayout.o blocklayoutxdr.o
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
new file mode 100644
index 000000000000..cdbc78c72542
--- /dev/null
+++ b/fs/nfsd/blocklayout.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include <linux/exportfs.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+
+#include <linux/nfsd/debug.h>
+
+#include "blocklayoutxdr.h"
+#include "pnfs.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+
+static int
+nfsd4_block_get_device_info_simple(struct super_block *sb,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ struct pnfs_block_deviceaddr *dev;
+ struct pnfs_block_volume *b;
+
+ dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
+ sizeof(struct pnfs_block_volume), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ gdp->gd_device = dev;
+
+ dev->nr_volumes = 1;
+ b = &dev->volumes[0];
+
+ b->type = PNFS_BLOCK_VOLUME_SIMPLE;
+ b->simple.sig_len = PNFS_BLOCK_UUID_LEN;
+ return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len,
+ &b->simple.offset);
+}
+
+static __be32
+nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ if (sb->s_bdev != sb->s_bdev->bd_contains)
+ return nfserr_inval;
+ return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp));
+}
+
+static __be32
+nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
+ struct nfsd4_layoutget *args)
+{
+ struct nfsd4_layout_seg *seg = &args->lg_seg;
+ struct super_block *sb = inode->i_sb;
+ u32 block_size = (1 << inode->i_blkbits);
+ struct pnfs_block_extent *bex;
+ struct iomap iomap;
+ u32 device_generation = 0;
+ int error;
+
+ /*
+ * We do not attempt to support I/O smaller than the fs block size,
+ * or not aligned to it.
+ */
+ if (args->lg_minlength < block_size) {
+ dprintk("pnfsd: I/O too small\n");
+ goto out_layoutunavailable;
+ }
+ if (seg->offset & (block_size - 1)) {
+ dprintk("pnfsd: I/O misaligned\n");
+ goto out_layoutunavailable;
+ }
+
+ /*
+ * Some clients barf on non-zero block numbers for NONE or INVALID
+ * layouts, so make sure to zero the whole structure.
+ */
+ error = -ENOMEM;
+ bex = kzalloc(sizeof(*bex), GFP_KERNEL);
+ if (!bex)
+ goto out_error;
+ args->lg_content = bex;
+
+ error = sb->s_export_op->map_blocks(inode, seg->offset, seg->length,
+ &iomap, seg->iomode != IOMODE_READ,
+ &device_generation);
+ if (error) {
+ if (error == -ENXIO)
+ goto out_layoutunavailable;
+ goto out_error;
+ }
+
+ if (iomap.length < args->lg_minlength) {
+ dprintk("pnfsd: extent smaller than minlength\n");
+ goto out_layoutunavailable;
+ }
+
+ switch (iomap.type) {
+ case IOMAP_MAPPED:
+ if (seg->iomode == IOMODE_READ)
+ bex->es = PNFS_BLOCK_READ_DATA;
+ else
+ bex->es = PNFS_BLOCK_READWRITE_DATA;
+ bex->soff = (iomap.blkno << 9);
+ break;
+ case IOMAP_UNWRITTEN:
+ if (seg->iomode & IOMODE_RW) {
+ /*
+ * Crack monkey special case from section 2.3.1.
+ */
+ if (args->lg_minlength == 0) {
+ dprintk("pnfsd: no soup for you!\n");
+ goto out_layoutunavailable;
+ }
+
+ bex->es = PNFS_BLOCK_INVALID_DATA;
+ bex->soff = (iomap.blkno << 9);
+ break;
+ }
+ /*FALLTHRU*/
+ case IOMAP_HOLE:
+ if (seg->iomode == IOMODE_READ) {
+ bex->es = PNFS_BLOCK_NONE_DATA;
+ break;
+ }
+ /*FALLTHRU*/
+ case IOMAP_DELALLOC:
+ default:
+ WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type);
+ goto out_layoutunavailable;
+ }
+
+ error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation);
+ if (error)
+ goto out_error;
+ bex->foff = iomap.offset;
+ bex->len = iomap.length;
+
+ seg->offset = iomap.offset;
+ seg->length = iomap.length;
+
+ dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es);
+ return 0;
+
+out_error:
+ seg->length = 0;
+ return nfserrno(error);
+out_layoutunavailable:
+ seg->length = 0;
+ return nfserr_layoutunavailable;
+}
+
+static __be32
+nfsd4_block_proc_layoutcommit(struct inode *inode,
+ struct nfsd4_layoutcommit *lcp)
+{
+ loff_t new_size = lcp->lc_last_wr + 1;
+ struct iattr iattr = { .ia_valid = 0 };
+ struct iomap *iomaps;
+ int nr_iomaps;
+ int error;
+
+ nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
+ lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
+ if (nr_iomaps < 0)
+ return nfserrno(nr_iomaps);
+
+ if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
+ timespec_compare(&lcp->lc_mtime, &inode->i_mtime) < 0)
+ lcp->lc_mtime = current_fs_time(inode->i_sb);
+ iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
+ iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
+
+ if (new_size > i_size_read(inode)) {
+ iattr.ia_valid |= ATTR_SIZE;
+ iattr.ia_size = new_size;
+ }
+
+ error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
+ nr_iomaps, &iattr);
+ kfree(iomaps);
+ return nfserrno(error);
+}
+
+const struct nfsd4_layout_ops bl_layout_ops = {
+ .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
+ .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
+ .proc_layoutget = nfsd4_block_proc_layoutget,
+ .encode_layoutget = nfsd4_block_encode_layoutget,
+ .proc_layoutcommit = nfsd4_block_proc_layoutcommit,
+};
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
new file mode 100644
index 000000000000..9da89fddab33
--- /dev/null
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include <linux/sunrpc/svc.h>
+#include <linux/exportfs.h>
+#include <linux/nfs4.h>
+
+#include "nfsd.h"
+#include "blocklayoutxdr.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+
+__be32
+nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
+ struct nfsd4_layoutget *lgp)
+{
+ struct pnfs_block_extent *b = lgp->lg_content;
+ int len = sizeof(__be32) + 5 * sizeof(__be64) + sizeof(__be32);
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, sizeof(__be32) + len);
+ if (!p)
+ return nfserr_toosmall;
+
+ *p++ = cpu_to_be32(len);
+ *p++ = cpu_to_be32(1); /* we always return a single extent */
+
+ p = xdr_encode_opaque_fixed(p, &b->vol_id,
+ sizeof(struct nfsd4_deviceid));
+ p = xdr_encode_hyper(p, b->foff);
+ p = xdr_encode_hyper(p, b->len);
+ p = xdr_encode_hyper(p, b->soff);
+ *p++ = cpu_to_be32(b->es);
+ return 0;
+}
+
+static int
+nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
+{
+ __be32 *p;
+ int len;
+
+ switch (b->type) {
+ case PNFS_BLOCK_VOLUME_SIMPLE:
+ len = 4 + 4 + 8 + 4 + b->simple.sig_len;
+ p = xdr_reserve_space(xdr, len);
+ if (!p)
+ return -ETOOSMALL;
+
+ *p++ = cpu_to_be32(b->type);
+ *p++ = cpu_to_be32(1); /* single signature */
+ p = xdr_encode_hyper(p, b->simple.offset);
+ p = xdr_encode_opaque(p, b->simple.sig, b->simple.sig_len);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return len;
+}
+
+__be32
+nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ struct pnfs_block_deviceaddr *dev = gdp->gd_device;
+ int len = sizeof(__be32), ret, i;
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, len + sizeof(__be32));
+ if (!p)
+ return nfserr_resource;
+
+ for (i = 0; i < dev->nr_volumes; i++) {
+ ret = nfsd4_block_encode_volume(xdr, &dev->volumes[i]);
+ if (ret < 0)
+ return nfserrno(ret);
+ len += ret;
+ }
+
+ /*
+ * Fill in the overall length and number of volumes at the beginning
+ * of the layout.
+ */
+ *p++ = cpu_to_be32(len);
+ *p++ = cpu_to_be32(dev->nr_volumes);
+ return 0;
+}
+
+int
+nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+ u32 block_size)
+{
+ struct iomap *iomaps;
+ u32 nr_iomaps, expected, i;
+
+ if (len < sizeof(u32)) {
+ dprintk("%s: extent array too small: %u\n", __func__, len);
+ return -EINVAL;
+ }
+
+ nr_iomaps = be32_to_cpup(p++);
+ expected = sizeof(__be32) + nr_iomaps * NFS4_BLOCK_EXTENT_SIZE;
+ if (len != expected) {
+ dprintk("%s: extent array size mismatch: %u/%u\n",
+ __func__, len, expected);
+ return -EINVAL;
+ }
+
+ iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
+ if (!iomaps) {
+ dprintk("%s: failed to allocate extent array\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_iomaps; i++) {
+ struct pnfs_block_extent bex;
+
+ memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
+ p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
+
+ p = xdr_decode_hyper(p, &bex.foff);
+ if (bex.foff & (block_size - 1)) {
+ dprintk("%s: unaligned offset %lld\n",
+ __func__, bex.foff);
+ goto fail;
+ }
+ p = xdr_decode_hyper(p, &bex.len);
+ if (bex.len & (block_size - 1)) {
+ dprintk("%s: unaligned length %lld\n",
+ __func__, bex.foff);
+ goto fail;
+ }
+ p = xdr_decode_hyper(p, &bex.soff);
+ if (bex.soff & (block_size - 1)) {
+ dprintk("%s: unaligned disk offset %lld\n",
+ __func__, bex.soff);
+ goto fail;
+ }
+ bex.es = be32_to_cpup(p++);
+ if (bex.es != PNFS_BLOCK_READWRITE_DATA) {
+ dprintk("%s: incorrect extent state %d\n",
+ __func__, bex.es);
+ goto fail;
+ }
+
+ iomaps[i].offset = bex.foff;
+ iomaps[i].length = bex.len;
+ }
+
+ *iomapp = iomaps;
+ return nr_iomaps;
+fail:
+ kfree(iomaps);
+ return -EINVAL;
+}
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
new file mode 100644
index 000000000000..fdc79037c0e7
--- /dev/null
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -0,0 +1,62 @@
+#ifndef _NFSD_BLOCKLAYOUTXDR_H
+#define _NFSD_BLOCKLAYOUTXDR_H 1
+
+#include <linux/blkdev.h>
+#include "xdr4.h"
+
+struct iomap;
+struct xdr_stream;
+
+enum pnfs_block_extent_state {
+ PNFS_BLOCK_READWRITE_DATA = 0,
+ PNFS_BLOCK_READ_DATA = 1,
+ PNFS_BLOCK_INVALID_DATA = 2,
+ PNFS_BLOCK_NONE_DATA = 3,
+};
+
+struct pnfs_block_extent {
+ struct nfsd4_deviceid vol_id;
+ u64 foff;
+ u64 len;
+ u64 soff;
+ enum pnfs_block_extent_state es;
+};
+#define NFS4_BLOCK_EXTENT_SIZE 44
+
+enum pnfs_block_volume_type {
+ PNFS_BLOCK_VOLUME_SIMPLE = 0,
+ PNFS_BLOCK_VOLUME_SLICE = 1,
+ PNFS_BLOCK_VOLUME_CONCAT = 2,
+ PNFS_BLOCK_VOLUME_STRIPE = 3,
+};
+
+/*
+ * Random upper cap for the uuid length to avoid unbounded allocation.
+ * Not actually limited by the protocol.
+ */
+#define PNFS_BLOCK_UUID_LEN 128
+
+struct pnfs_block_volume {
+ enum pnfs_block_volume_type type;
+ union {
+ struct {
+ u64 offset;
+ u32 sig_len;
+ u8 sig[PNFS_BLOCK_UUID_LEN];
+ } simple;
+ };
+};
+
+struct pnfs_block_deviceaddr {
+ u32 nr_volumes;
+ struct pnfs_block_volume volumes[];
+};
+
+__be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdp);
+__be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
+ struct nfsd4_layoutget *lgp);
+int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+ u32 block_size);
+
+#endif /* _NFSD_BLOCKLAYOUTXDR_H */
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 30a739d896ff..c3e3b6e55ae2 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -20,6 +20,7 @@
#include "nfsd.h"
#include "nfsfh.h"
#include "netns.h"
+#include "pnfs.h"
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
@@ -545,6 +546,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.ex_client = dom;
exp.cd = cd;
+ exp.ex_devid_map = NULL;
/* expiry */
err = -EINVAL;
@@ -621,6 +623,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
if (!gid_valid(exp.ex_anon_gid))
goto out4;
err = 0;
+
+ nfsd4_setup_layout_type(&exp);
}
expp = svc_export_lookup(&exp);
@@ -703,6 +707,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
+ new->ex_layout_type = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
}
@@ -717,6 +722,8 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
new->ex_anon_uid = item->ex_anon_uid;
new->ex_anon_gid = item->ex_anon_gid;
new->ex_fsid = item->ex_fsid;
+ new->ex_devid_map = item->ex_devid_map;
+ item->ex_devid_map = NULL;
new->ex_uuid = item->ex_uuid;
item->ex_uuid = NULL;
new->ex_fslocs.locations = item->ex_fslocs.locations;
@@ -725,6 +732,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
item->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = item->ex_fslocs.migrated;
item->ex_fslocs.migrated = 0;
+ new->ex_layout_type = item->ex_layout_type;
new->ex_nflavors = item->ex_nflavors;
for (i = 0; i < MAX_SECINFO_LIST; i++) {
new->ex_flavors[i] = item->ex_flavors[i];
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 04dc8c167b0c..1f52bfcc436f 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -56,6 +56,8 @@ struct svc_export {
struct nfsd4_fs_locations ex_fslocs;
uint32_t ex_nflavors;
struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST];
+ enum pnfs_layouttype ex_layout_type;
+ struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
};
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7cbdf1b2e4ab..58277859a467 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -546,6 +546,102 @@ out:
return status;
}
+#ifdef CONFIG_NFSD_PNFS
+/*
+ * CB_LAYOUTRECALL4args
+ *
+ * struct layoutrecall_file4 {
+ * nfs_fh4 lor_fh;
+ * offset4 lor_offset;
+ * length4 lor_length;
+ * stateid4 lor_stateid;
+ * };
+ *
+ * union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) {
+ * case LAYOUTRECALL4_FILE:
+ * layoutrecall_file4 lor_layout;
+ * case LAYOUTRECALL4_FSID:
+ * fsid4 lor_fsid;
+ * case LAYOUTRECALL4_ALL:
+ * void;
+ * };
+ *
+ * struct CB_LAYOUTRECALL4args {
+ * layouttype4 clora_type;
+ * layoutiomode4 clora_iomode;
+ * bool clora_changed;
+ * layoutrecall4 clora_recall;
+ * };
+ */
+static void encode_cb_layout4args(struct xdr_stream *xdr,
+ const struct nfs4_layout_stateid *ls,
+ struct nfs4_cb_compound_hdr *hdr)
+{
+ __be32 *p;
+
+ BUG_ON(hdr->minorversion == 0);
+
+ p = xdr_reserve_space(xdr, 5 * 4);
+ *p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
+ *p++ = cpu_to_be32(ls->ls_layout_type);
+ *p++ = cpu_to_be32(IOMODE_ANY);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(RETURN_FILE);
+
+ encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
+
+ p = xdr_reserve_space(xdr, 2 * 8);
+ p = xdr_encode_hyper(p, 0);
+ xdr_encode_hyper(p, NFS4_MAX_UINT64);
+
+ encode_stateid4(xdr, &ls->ls_recall_sid);
+
+ hdr->nops++;
+}
+
+static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfsd4_callback *cb)
+{
+ const struct nfs4_layout_stateid *ls =
+ container_of(cb, struct nfs4_layout_stateid, ls_recall);
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = 0,
+ .minorversion = cb->cb_minorversion,
+ };
+
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_layout4args(xdr, ls, &hdr);
+ encode_cb_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfsd4_callback *cb)
+{
+ struct nfs4_cb_compound_hdr hdr;
+ enum nfsstat4 nfserr;
+ int status;
+
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
+ goto out;
+ if (cb) {
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status))
+ goto out;
+ }
+ status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr);
+ if (unlikely(status))
+ goto out;
+ if (unlikely(nfserr != NFS4_OK))
+ status = nfs_cb_stat_to_errno(nfserr);
+out:
+ return status;
+}
+#endif /* CONFIG_NFSD_PNFS */
+
/*
* RPC procedure tables
*/
@@ -563,6 +659,9 @@ out:
static struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_NULL, NULL, cb_null, cb_null),
PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
+#ifdef CONFIG_NFSD_PNFS
+ PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
+#endif
};
static struct rpc_version nfs_cb_version4 = {
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
new file mode 100644
index 000000000000..3c1bfa155571
--- /dev/null
+++ b/fs/nfsd/nfs4layouts.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include <linux/kmod.h>
+#include <linux/file.h>
+#include <linux/jhash.h>
+#include <linux/sched.h>
+#include <linux/sunrpc/addr.h>
+
+#include "pnfs.h"
+#include "netns.h"
+#include "trace.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+struct nfs4_layout {
+ struct list_head lo_perstate;
+ struct nfs4_layout_stateid *lo_state;
+ struct nfsd4_layout_seg lo_seg;
+};
+
+static struct kmem_cache *nfs4_layout_cache;
+static struct kmem_cache *nfs4_layout_stateid_cache;
+
+static struct nfsd4_callback_ops nfsd4_cb_layout_ops;
+static const struct lock_manager_operations nfsd4_layouts_lm_ops;
+
+const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
+ [LAYOUT_BLOCK_VOLUME] = &bl_layout_ops,
+};
+
+/* pNFS device ID to export fsid mapping */
+#define DEVID_HASH_BITS 8
+#define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
+#define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
+static u64 nfsd_devid_seq = 1;
+static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
+static DEFINE_SPINLOCK(nfsd_devid_lock);
+
+static inline u32 devid_hashfn(u64 idx)
+{
+ return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
+}
+
+static void
+nfsd4_alloc_devid_map(const struct svc_fh *fhp)
+{
+ const struct knfsd_fh *fh = &fhp->fh_handle;
+ size_t fsid_len = key_len(fh->fh_fsid_type);
+ struct nfsd4_deviceid_map *map, *old;
+ int i;
+
+ map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
+ if (!map)
+ return;
+
+ map->fsid_type = fh->fh_fsid_type;
+ memcpy(&map->fsid, fh->fh_fsid, fsid_len);
+
+ spin_lock(&nfsd_devid_lock);
+ if (fhp->fh_export->ex_devid_map)
+ goto out_unlock;
+
+ for (i = 0; i < DEVID_HASH_SIZE; i++) {
+ list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
+ if (old->fsid_type != fh->fh_fsid_type)
+ continue;
+ if (memcmp(old->fsid, fh->fh_fsid,
+ key_len(old->fsid_type)))
+ continue;
+
+ fhp->fh_export->ex_devid_map = old;
+ goto out_unlock;
+ }
+ }
+
+ map->idx = nfsd_devid_seq++;
+ list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
+ fhp->fh_export->ex_devid_map = map;
+ map = NULL;
+
+out_unlock:
+ spin_unlock(&nfsd_devid_lock);
+ kfree(map);
+}
+
+struct nfsd4_deviceid_map *
+nfsd4_find_devid_map(int idx)
+{
+ struct nfsd4_deviceid_map *map, *ret = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
+ if (map->idx == idx)
+ ret = map;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+int
+nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
+ u32 device_generation)
+{
+ if (!fhp->fh_export->ex_devid_map) {
+ nfsd4_alloc_devid_map(fhp);
+ if (!fhp->fh_export->ex_devid_map)
+ return -ENOMEM;
+ }
+
+ id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
+ id->generation = device_generation;
+ id->pad = 0;
+ return 0;
+}
+
+void nfsd4_setup_layout_type(struct svc_export *exp)
+{
+ struct super_block *sb = exp->ex_path.mnt->mnt_sb;
+
+ if (exp->ex_flags & NFSEXP_NOPNFS)
+ return;
+
+ if (sb->s_export_op->get_uuid &&
+ sb->s_export_op->map_blocks &&
+ sb->s_export_op->commit_blocks)
+ exp->ex_layout_type = LAYOUT_BLOCK_VOLUME;
+}
+
+static void
+nfsd4_free_layout_stateid(struct nfs4_stid *stid)
+{
+ struct nfs4_layout_stateid *ls = layoutstateid(stid);
+ struct nfs4_client *clp = ls->ls_stid.sc_client;
+ struct nfs4_file *fp = ls->ls_stid.sc_file;
+
+ trace_layoutstate_free(&ls->ls_stid.sc_stateid);
+
+ spin_lock(&clp->cl_lock);
+ list_del_init(&ls->ls_perclnt);
+ spin_unlock(&clp->cl_lock);
+
+ spin_lock(&fp->fi_lock);
+ list_del_init(&ls->ls_perfile);
+ spin_unlock(&fp->fi_lock);
+
+ vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
+ fput(ls->ls_file);
+
+ if (ls->ls_recalled)
+ atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
+
+ kmem_cache_free(nfs4_layout_stateid_cache, ls);
+}
+
+static int
+nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
+{
+ struct file_lock *fl;
+ int status;
+
+ fl = locks_alloc_lock();
+ if (!fl)
+ return -ENOMEM;
+ locks_init_lock(fl);
+ fl->fl_lmops = &nfsd4_layouts_lm_ops;
+ fl->fl_flags = FL_LAYOUT;
+ fl->fl_type = F_RDLCK;
+ fl->fl_end = OFFSET_MAX;
+ fl->fl_owner = ls;
+ fl->fl_pid = current->tgid;
+ fl->fl_file = ls->ls_file;
+
+ status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
+ if (status) {
+ locks_free_lock(fl);
+ return status;
+ }
+ BUG_ON(fl != NULL);
+ return 0;
+}
+
+static struct nfs4_layout_stateid *
+nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
+ struct nfs4_stid *parent, u32 layout_type)
+{
+ struct nfs4_client *clp = cstate->clp;
+ struct nfs4_file *fp = parent->sc_file;
+ struct nfs4_layout_stateid *ls;
+ struct nfs4_stid *stp;
+
+ stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
+ if (!stp)
+ return NULL;
+ stp->sc_free = nfsd4_free_layout_stateid;
+ get_nfs4_file(fp);
+ stp->sc_file = fp;
+
+ ls = layoutstateid(stp);
+ INIT_LIST_HEAD(&ls->ls_perclnt);
+ INIT_LIST_HEAD(&ls->ls_perfile);
+ spin_lock_init(&ls->ls_lock);
+ INIT_LIST_HEAD(&ls->ls_layouts);
+ ls->ls_layout_type = layout_type;
+ nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
+ NFSPROC4_CLNT_CB_LAYOUT);
+
+ if (parent->sc_type == NFS4_DELEG_STID)
+ ls->ls_file = get_file(fp->fi_deleg_file);
+ else
+ ls->ls_file = find_any_file(fp);
+ BUG_ON(!ls->ls_file);
+
+ if (nfsd4_layout_setlease(ls)) {
+ put_nfs4_file(fp);
+ kmem_cache_free(nfs4_layout_stateid_cache, ls);
+ return NULL;
+ }
+
+ spin_lock(&clp->cl_lock);
+ stp->sc_type = NFS4_LAYOUT_STID;
+ list_add(&ls->ls_perclnt, &clp->cl_lo_states);
+ spin_unlock(&clp->cl_lock);
+
+ spin_lock(&fp->fi_lock);
+ list_add(&ls->ls_perfile, &fp->fi_lo_states);
+ spin_unlock(&fp->fi_lock);
+
+ trace_layoutstate_alloc(&ls->ls_stid.sc_stateid);
+ return ls;
+}
+
+__be32
+nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate, stateid_t *stateid,
+ bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
+{
+ struct nfs4_layout_stateid *ls;
+ struct nfs4_stid *stid;
+ unsigned char typemask = NFS4_LAYOUT_STID;
+ __be32 status;
+
+ if (create)
+ typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
+
+ status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
+ net_generic(SVC_NET(rqstp), nfsd_net_id));
+ if (status)
+ goto out;
+
+ if (!fh_match(&cstate->current_fh.fh_handle,
+ &stid->sc_file->fi_fhandle)) {
+ status = nfserr_bad_stateid;
+ goto out_put_stid;
+ }
+
+ if (stid->sc_type != NFS4_LAYOUT_STID) {
+ ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
+ nfs4_put_stid(stid);
+
+ status = nfserr_jukebox;
+ if (!ls)
+ goto out;
+ } else {
+ ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
+
+ status = nfserr_bad_stateid;
+ if (stateid->si_generation > stid->sc_stateid.si_generation)
+ goto out_put_stid;
+ if (layout_type != ls->ls_layout_type)
+ goto out_put_stid;
+ }
+
+ *lsp = ls;
+ return 0;
+
+out_put_stid:
+ nfs4_put_stid(stid);
+out:
+ return status;
+}
+
+static void
+nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
+{
+ spin_lock(&ls->ls_lock);
+ if (ls->ls_recalled)
+ goto out_unlock;
+
+ ls->ls_recalled = true;
+ atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
+ if (list_empty(&ls->ls_layouts))
+ goto out_unlock;
+
+ trace_layout_recall(&ls->ls_stid.sc_stateid);
+
+ atomic_inc(&ls->ls_stid.sc_count);
+ update_stateid(&ls->ls_stid.sc_stateid);
+ memcpy(&ls->ls_recall_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
+ nfsd4_run_cb(&ls->ls_recall);
+
+out_unlock:
+ spin_unlock(&ls->ls_lock);
+}
+
+static inline u64
+layout_end(struct nfsd4_layout_seg *seg)
+{
+ u64 end = seg->offset + seg->length;
+ return end >= seg->offset ? end : NFS4_MAX_UINT64;
+}
+
+static void
+layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
+{
+ if (end == NFS4_MAX_UINT64)
+ lo->length = NFS4_MAX_UINT64;
+ else
+ lo->length = end - lo->offset;
+}
+
+static bool
+layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
+{
+ if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
+ return false;
+ if (layout_end(&lo->lo_seg) <= s->offset)
+ return false;
+ if (layout_end(s) <= lo->lo_seg.offset)
+ return false;
+ return true;
+}
+
+static bool
+layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
+{
+ if (lo->iomode != new->iomode)
+ return false;
+ if (layout_end(new) < lo->offset)
+ return false;
+ if (layout_end(lo) < new->offset)
+ return false;
+
+ lo->offset = min(lo->offset, new->offset);
+ layout_update_len(lo, max(layout_end(lo), layout_end(new)));
+ return true;
+}
+
+static __be32
+nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
+{
+ struct nfs4_file *fp = ls->ls_stid.sc_file;
+ struct nfs4_layout_stateid *l, *n;
+ __be32 nfserr = nfs_ok;
+
+ assert_spin_locked(&fp->fi_lock);
+
+ list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
+ if (l != ls) {
+ nfsd4_recall_file_layout(l);
+ nfserr = nfserr_recallconflict;
+ }
+ }
+
+ return nfserr;
+}
+
+__be32
+nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
+{
+ struct nfsd4_layout_seg *seg = &lgp->lg_seg;
+ struct nfs4_file *fp = ls->ls_stid.sc_file;
+ struct nfs4_layout *lp, *new = NULL;
+ __be32 nfserr;
+
+ spin_lock(&fp->fi_lock);
+ nfserr = nfsd4_recall_conflict(ls);
+ if (nfserr)
+ goto out;
+ spin_lock(&ls->ls_lock);
+ list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
+ if (layouts_try_merge(&lp->lo_seg, seg))
+ goto done;
+ }
+ spin_unlock(&ls->ls_lock);
+ spin_unlock(&fp->fi_lock);
+
+ new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
+ if (!new)
+ return nfserr_jukebox;
+ memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
+ new->lo_state = ls;
+
+ spin_lock(&fp->fi_lock);
+ nfserr = nfsd4_recall_conflict(ls);
+ if (nfserr)
+ goto out;
+ spin_lock(&ls->ls_lock);
+ list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
+ if (layouts_try_merge(&lp->lo_seg, seg))
+ goto done;
+ }
+
+ atomic_inc(&ls->ls_stid.sc_count);
+ list_add_tail(&new->lo_perstate, &ls->ls_layouts);
+ new = NULL;
+done:
+ update_stateid(&ls->ls_stid.sc_stateid);
+ memcpy(&lgp->lg_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
+ spin_unlock(&ls->ls_lock);
+out:
+ spin_unlock(&fp->fi_lock);
+ if (new)
+ kmem_cache_free(nfs4_layout_cache, new);
+ return nfserr;
+}
+
+static void
+nfsd4_free_layouts(struct list_head *reaplist)
+{
+ while (!list_empty(reaplist)) {
+ struct nfs4_layout *lp = list_first_entry(reaplist,
+ struct nfs4_layout, lo_perstate);
+
+ list_del(&lp->lo_perstate);
+ nfs4_put_stid(&lp->lo_state->ls_stid);
+ kmem_cache_free(nfs4_layout_cache, lp);
+ }
+}
+
+static void
+nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
+ struct list_head *reaplist)
+{
+ struct nfsd4_layout_seg *lo = &lp->lo_seg;
+ u64 end = layout_end(lo);
+
+ if (seg->offset <= lo->offset) {
+ if (layout_end(seg) >= end) {
+ list_move_tail(&lp->lo_perstate, reaplist);
+ return;
+ }
+ end = seg->offset;
+ } else {
+ /* retain the whole layout segment on a split. */
+ if (layout_end(seg) < end) {
+ dprintk("%s: split not supported\n", __func__);
+ return;
+ }
+
+ lo->offset = layout_end(seg);
+ }
+
+ layout_update_len(lo, end);
+}
+
+__be32
+nfsd4_return_file_layouts(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp)
+{
+ struct nfs4_layout_stateid *ls;
+ struct nfs4_layout *lp, *n;
+ LIST_HEAD(reaplist);
+ __be32 nfserr;
+ int found = 0;
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
+ false, lrp->lr_layout_type,
+ &ls);
+ if (nfserr) {
+ trace_layout_return_lookup_fail(&lrp->lr_sid);
+ return nfserr;
+ }
+
+ spin_lock(&ls->ls_lock);
+ list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
+ if (layouts_overlapping(lp, &lrp->lr_seg)) {
+ nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
+ found++;
+ }
+ }
+ if (!list_empty(&ls->ls_layouts)) {
+ if (found) {
+ update_stateid(&ls->ls_stid.sc_stateid);
+ memcpy(&lrp->lr_sid, &ls->ls_stid.sc_stateid,
+ sizeof(stateid_t));
+ }
+ lrp->lrs_present = 1;
+ } else {
+ trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
+ nfs4_unhash_stid(&ls->ls_stid);
+ lrp->lrs_present = 0;
+ }
+ spin_unlock(&ls->ls_lock);
+
+ nfs4_put_stid(&ls->ls_stid);
+ nfsd4_free_layouts(&reaplist);
+ return nfs_ok;
+}
+
+__be32
+nfsd4_return_client_layouts(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp)
+{
+ struct nfs4_layout_stateid *ls, *n;
+ struct nfs4_client *clp = cstate->clp;
+ struct nfs4_layout *lp, *t;
+ LIST_HEAD(reaplist);
+
+ lrp->lrs_present = 0;
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
+ if (lrp->lr_return_type == RETURN_FSID &&
+ !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
+ &cstate->current_fh.fh_handle))
+ continue;
+
+ spin_lock(&ls->ls_lock);
+ list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
+ if (lrp->lr_seg.iomode == IOMODE_ANY ||
+ lrp->lr_seg.iomode == lp->lo_seg.iomode)
+ list_move_tail(&lp->lo_perstate, &reaplist);
+ }
+ spin_unlock(&ls->ls_lock);
+ }
+ spin_unlock(&clp->cl_lock);
+
+ nfsd4_free_layouts(&reaplist);
+ return 0;
+}
+
+static void
+nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
+ struct list_head *reaplist)
+{
+ spin_lock(&ls->ls_lock);
+ list_splice_init(&ls->ls_layouts, reaplist);
+ spin_unlock(&ls->ls_lock);
+}
+
+void
+nfsd4_return_all_client_layouts(struct nfs4_client *clp)
+{
+ struct nfs4_layout_stateid *ls, *n;
+ LIST_HEAD(reaplist);
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
+ nfsd4_return_all_layouts(ls, &reaplist);
+ spin_unlock(&clp->cl_lock);
+
+ nfsd4_free_layouts(&reaplist);
+}
+
+void
+nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
+{
+ struct nfs4_layout_stateid *ls, *n;
+ LIST_HEAD(reaplist);
+
+ spin_lock(&fp->fi_lock);
+ list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
+ if (ls->ls_stid.sc_client == clp)
+ nfsd4_return_all_layouts(ls, &reaplist);
+ }
+ spin_unlock(&fp->fi_lock);
+
+ nfsd4_free_layouts(&reaplist);
+}
+
+static void
+nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
+{
+ struct nfs4_client *clp = ls->ls_stid.sc_client;
+ char addr_str[INET6_ADDRSTRLEN];
+ static char *envp[] = {
+ "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ NULL
+ };
+ char *argv[8];
+ int error;
+
+ rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
+
+ nfsd4_cb_layout_fail(ls);
+
+ printk(KERN_WARNING
+ "nfsd: client %s failed to respond to layout recall. "
+ " Fencing..\n", addr_str);
+
+ argv[0] = "/sbin/nfsd-recall-failed";
+ argv[1] = addr_str;
+ argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
+ argv[3] = NULL;
+
+ error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ if (error) {
+ printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
+ addr_str, error);
+ }
+}
+
+static int
+nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
+{
+ struct nfs4_layout_stateid *ls =
+ container_of(cb, struct nfs4_layout_stateid, ls_recall);
+ LIST_HEAD(reaplist);
+
+ switch (task->tk_status) {
+ case 0:
+ return 1;
+ case -NFS4ERR_NOMATCHING_LAYOUT:
+ trace_layout_recall_done(&ls->ls_stid.sc_stateid);
+ task->tk_status = 0;
+ return 1;
+ case -NFS4ERR_DELAY:
+ /* Poll the client until it's done with the layout */
+ /* FIXME: cap number of retries.
+ * The pnfs standard states that we need to only expire
+ * the client after at-least "lease time" .eg lease-time * 2
+ * when failing to communicate a recall
+ */
+ rpc_delay(task, HZ/100); /* 10 mili-seconds */
+ return 0;
+ default:
+ /*
+ * Unknown error or non-responding client, we'll need to fence.
+ */
+ nfsd4_cb_layout_fail(ls);
+ return -1;
+ }
+}
+
+static void
+nfsd4_cb_layout_release(struct nfsd4_callback *cb)
+{
+ struct nfs4_layout_stateid *ls =
+ container_of(cb, struct nfs4_layout_stateid, ls_recall);
+ LIST_HEAD(reaplist);
+
+ trace_layout_recall_release(&ls->ls_stid.sc_stateid);
+
+ nfsd4_return_all_layouts(ls, &reaplist);
+ nfsd4_free_layouts(&reaplist);
+ nfs4_put_stid(&ls->ls_stid);
+}
+
+static struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
+ .done = nfsd4_cb_layout_done,
+ .release = nfsd4_cb_layout_release,
+};
+
+static bool
+nfsd4_layout_lm_break(struct file_lock *fl)
+{
+ /*
+ * We don't want the locks code to timeout the lease for us;
+ * we'll remove it ourself if a layout isn't returned
+ * in time:
+ */
+ fl->fl_break_time = 0;
+ nfsd4_recall_file_layout(fl->fl_owner);
+ return false;
+}
+
+static int
+nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
+ struct list_head *dispose)
+{
+ BUG_ON(!(arg & F_UNLCK));
+ return lease_modify(onlist, arg, dispose);
+}
+
+static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
+ .lm_break = nfsd4_layout_lm_break,
+ .lm_change = nfsd4_layout_lm_change,
+};
+
+int
+nfsd4_init_pnfs(void)
+{
+ int i;
+
+ for (i = 0; i < DEVID_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&nfsd_devid_hash[i]);
+
+ nfs4_layout_cache = kmem_cache_create("nfs4_layout",
+ sizeof(struct nfs4_layout), 0, 0, NULL);
+ if (!nfs4_layout_cache)
+ return -ENOMEM;
+
+ nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
+ sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
+ if (!nfs4_layout_stateid_cache) {
+ kmem_cache_destroy(nfs4_layout_cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+nfsd4_exit_pnfs(void)
+{
+ int i;
+
+ kmem_cache_destroy(nfs4_layout_cache);
+ kmem_cache_destroy(nfs4_layout_stateid_cache);
+
+ for (i = 0; i < DEVID_HASH_SIZE; i++) {
+ struct nfsd4_deviceid_map *map, *n;
+
+ list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
+ kfree(map);
+ }
+}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index ac71d13c69ef..d30bea8d0277 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -43,6 +43,8 @@
#include "current_stateid.h"
#include "netns.h"
#include "acl.h"
+#include "pnfs.h"
+#include "trace.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
@@ -1178,6 +1180,259 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return status == nfserr_same ? nfs_ok : status;
}
+#ifdef CONFIG_NFSD_PNFS
+static const struct nfsd4_layout_ops *
+nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
+{
+ if (!exp->ex_layout_type) {
+ dprintk("%s: export does not support pNFS\n", __func__);
+ return NULL;
+ }
+
+ if (exp->ex_layout_type != layout_type) {
+ dprintk("%s: layout type %d not supported\n",
+ __func__, layout_type);
+ return NULL;
+ }
+
+ return nfsd4_layout_ops[layout_type];
+}
+
+static __be32
+nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ const struct nfsd4_layout_ops *ops;
+ struct nfsd4_deviceid_map *map;
+ struct svc_export *exp;
+ __be32 nfserr;
+
+ dprintk("%s: layout_type %u dev_id [0x%llx:0x%x] maxcnt %u\n",
+ __func__,
+ gdp->gd_layout_type,
+ gdp->gd_devid.fsid_idx, gdp->gd_devid.generation,
+ gdp->gd_maxcount);
+
+ map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx);
+ if (!map) {
+ dprintk("%s: couldn't find device ID to export mapping!\n",
+ __func__);
+ return nfserr_noent;
+ }
+
+ exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid);
+ if (IS_ERR(exp)) {
+ dprintk("%s: could not find device id\n", __func__);
+ return nfserr_noent;
+ }
+
+ nfserr = nfserr_layoutunavailable;
+ ops = nfsd4_layout_verify(exp, gdp->gd_layout_type);
+ if (!ops)
+ goto out;
+
+ nfserr = nfs_ok;
+ if (gdp->gd_maxcount != 0)
+ nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp);
+
+ gdp->gd_notify_types &= ops->notify_types;
+ exp_put(exp);
+out:
+ return nfserr;
+}
+
+static __be32
+nfsd4_layoutget(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutget *lgp)
+{
+ struct svc_fh *current_fh = &cstate->current_fh;
+ const struct nfsd4_layout_ops *ops;
+ struct nfs4_layout_stateid *ls;
+ __be32 nfserr;
+ int accmode;
+
+ switch (lgp->lg_seg.iomode) {
+ case IOMODE_READ:
+ accmode = NFSD_MAY_READ;
+ break;
+ case IOMODE_RW:
+ accmode = NFSD_MAY_READ | NFSD_MAY_WRITE;
+ break;
+ default:
+ dprintk("%s: invalid iomode %d\n",
+ __func__, lgp->lg_seg.iomode);
+ nfserr = nfserr_badiomode;
+ goto out;
+ }
+
+ nfserr = fh_verify(rqstp, current_fh, 0, accmode);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_layoutunavailable;
+ ops = nfsd4_layout_verify(current_fh->fh_export, lgp->lg_layout_type);
+ if (!ops)
+ goto out;
+
+ /*
+ * Verify minlength and range as per RFC5661:
+ * o If loga_length is less than loga_minlength,
+ * the metadata server MUST return NFS4ERR_INVAL.
+ * o If the sum of loga_offset and loga_minlength exceeds
+ * NFS4_UINT64_MAX, and loga_minlength is not
+ * NFS4_UINT64_MAX, the error NFS4ERR_INVAL MUST result.
+ * o If the sum of loga_offset and loga_length exceeds
+ * NFS4_UINT64_MAX, and loga_length is not NFS4_UINT64_MAX,
+ * the error NFS4ERR_INVAL MUST result.
+ */
+ nfserr = nfserr_inval;
+ if (lgp->lg_seg.length < lgp->lg_minlength ||
+ (lgp->lg_minlength != NFS4_MAX_UINT64 &&
+ lgp->lg_minlength > NFS4_MAX_UINT64 - lgp->lg_seg.offset) ||
+ (lgp->lg_seg.length != NFS4_MAX_UINT64 &&
+ lgp->lg_seg.length > NFS4_MAX_UINT64 - lgp->lg_seg.offset))
+ goto out;
+ if (lgp->lg_seg.length == 0)
+ goto out;
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lgp->lg_sid,
+ true, lgp->lg_layout_type, &ls);
+ if (nfserr) {
+ trace_layout_get_lookup_fail(&lgp->lg_sid);
+ goto out;
+ }
+
+ nfserr = nfserr_recallconflict;
+ if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
+ goto out_put_stid;
+
+ nfserr = ops->proc_layoutget(current_fh->fh_dentry->d_inode,
+ current_fh, lgp);
+ if (nfserr)
+ goto out_put_stid;
+
+ nfserr = nfsd4_insert_layout(lgp, ls);
+
+out_put_stid:
+ nfs4_put_stid(&ls->ls_stid);
+out:
+ return nfserr;
+}
+
+static __be32
+nfsd4_layoutcommit(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutcommit *lcp)
+{
+ const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
+ struct svc_fh *current_fh = &cstate->current_fh;
+ const struct nfsd4_layout_ops *ops;
+ loff_t new_size = lcp->lc_last_wr + 1;
+ struct inode *inode;
+ struct nfs4_layout_stateid *ls;
+ __be32 nfserr;
+
+ nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_layoutunavailable;
+ ops = nfsd4_layout_verify(current_fh->fh_export, lcp->lc_layout_type);
+ if (!ops)
+ goto out;
+ inode = current_fh->fh_dentry->d_inode;
+
+ nfserr = nfserr_inval;
+ if (new_size <= seg->offset) {
+ dprintk("pnfsd: last write before layout segment\n");
+ goto out;
+ }
+ if (new_size > seg->offset + seg->length) {
+ dprintk("pnfsd: last write beyond layout segment\n");
+ goto out;
+ }
+ if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
+ dprintk("pnfsd: layoutcommit beyond EOF\n");
+ goto out;
+ }
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
+ false, lcp->lc_layout_type,
+ &ls);
+ if (nfserr) {
+ trace_layout_commit_lookup_fail(&lcp->lc_sid);
+ /* fixup error code as per RFC5661 */
+ if (nfserr == nfserr_bad_stateid)
+ nfserr = nfserr_badlayout;
+ goto out;
+ }
+
+ nfserr = ops->proc_layoutcommit(inode, lcp);
+ if (nfserr)
+ goto out_put_stid;
+
+ if (new_size > i_size_read(inode)) {
+ lcp->lc_size_chg = 1;
+ lcp->lc_newsize = new_size;
+ } else {
+ lcp->lc_size_chg = 0;
+ }
+
+out_put_stid:
+ nfs4_put_stid(&ls->ls_stid);
+out:
+ return nfserr;
+}
+
+static __be32
+nfsd4_layoutreturn(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp)
+{
+ struct svc_fh *current_fh = &cstate->current_fh;
+ __be32 nfserr;
+
+ nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_NOP);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_layoutunavailable;
+ if (!nfsd4_layout_verify(current_fh->fh_export, lrp->lr_layout_type))
+ goto out;
+
+ switch (lrp->lr_seg.iomode) {
+ case IOMODE_READ:
+ case IOMODE_RW:
+ case IOMODE_ANY:
+ break;
+ default:
+ dprintk("%s: invalid iomode %d\n", __func__,
+ lrp->lr_seg.iomode);
+ nfserr = nfserr_inval;
+ goto out;
+ }
+
+ switch (lrp->lr_return_type) {
+ case RETURN_FILE:
+ nfserr = nfsd4_return_file_layouts(rqstp, cstate, lrp);
+ break;
+ case RETURN_FSID:
+ case RETURN_ALL:
+ nfserr = nfsd4_return_client_layouts(rqstp, cstate, lrp);
+ break;
+ default:
+ dprintk("%s: invalid return_type %d\n", __func__,
+ lrp->lr_return_type);
+ nfserr = nfserr_inval;
+ break;
+ }
+out:
+ return nfserr;
+}
+#endif /* CONFIG_NFSD_PNFS */
+
/*
* NULL call.
*/
@@ -1679,6 +1934,36 @@ static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd
op_encode_channel_attrs_maxsz) * sizeof(__be32);
}
+#ifdef CONFIG_NFSD_PNFS
+/*
+ * At this stage we don't really know what layout driver will handle the request,
+ * so we need to define an arbitrary upper bound here.
+ */
+#define MAX_LAYOUT_SIZE 128
+static inline u32 nfsd4_layoutget_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 1 /* logr_return_on_close */ +
+ op_encode_stateid_maxsz +
+ 1 /* nr of layouts */ +
+ MAX_LAYOUT_SIZE) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_layoutcommit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 1 /* locr_newsize */ +
+ 2 /* ns_size */) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 1 /* lrs_stateid */ +
+ op_encode_stateid_maxsz) * sizeof(__be32);
+}
+#endif /* CONFIG_NFSD_PNFS */
+
static struct nfsd4_operation nfsd4_ops[] = {
[OP_ACCESS] = {
.op_func = (nfsd4op_func)nfsd4_access,
@@ -1966,6 +2251,31 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_get_currentstateid = (stateid_getter)nfsd4_get_freestateid,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
+#ifdef CONFIG_NFSD_PNFS
+ [OP_GETDEVICEINFO] = {
+ .op_func = (nfsd4op_func)nfsd4_getdeviceinfo,
+ .op_flags = ALLOWED_WITHOUT_FH,
+ .op_name = "OP_GETDEVICEINFO",
+ },
+ [OP_LAYOUTGET] = {
+ .op_func = (nfsd4op_func)nfsd4_layoutget,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_LAYOUTGET",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutget_rsize,
+ },
+ [OP_LAYOUTCOMMIT] = {
+ .op_func = (nfsd4op_func)nfsd4_layoutcommit,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_LAYOUTCOMMIT",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutcommit_rsize,
+ },
+ [OP_LAYOUTRETURN] = {
+ .op_func = (nfsd4op_func)nfsd4_layoutreturn,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_LAYOUTRETURN",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutreturn_rsize,
+ },
+#endif /* CONFIG_NFSD_PNFS */
/* NFSv4.2 operations */
[OP_ALLOCATE] = {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c06a1ba80d73..f6b2a09f793f 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -48,6 +48,7 @@
#include "current_stateid.h"
#include "netns.h"
+#include "pnfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -150,16 +151,6 @@ renew_client_locked(struct nfs4_client *clp)
clp->cl_time = get_seconds();
}
-static inline void
-renew_client(struct nfs4_client *clp)
-{
- struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
-
- spin_lock(&nn->client_lock);
- renew_client_locked(clp);
- spin_unlock(&nn->client_lock);
-}
-
static void put_client_renew_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
@@ -282,7 +273,7 @@ static void nfsd4_free_file_rcu(struct rcu_head *rcu)
kmem_cache_free(file_slab, fp);
}
-static inline void
+void
put_nfs4_file(struct nfs4_file *fi)
{
might_lock(&state_lock);
@@ -295,12 +286,6 @@ put_nfs4_file(struct nfs4_file *fi)
}
}
-static inline void
-get_nfs4_file(struct nfs4_file *fi)
-{
- atomic_inc(&fi->fi_ref);
-}
-
static struct file *
__nfs4_get_fd(struct nfs4_file *f, int oflag)
{
@@ -358,7 +343,7 @@ find_readable_file(struct nfs4_file *f)
return ret;
}
-static struct file *
+struct file *
find_any_file(struct nfs4_file *f)
{
struct file *ret;
@@ -408,14 +393,6 @@ static unsigned int file_hashval(struct knfsd_fh *fh)
return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
}
-static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
-{
- return fh1->fh_size == fh2->fh_size &&
- !memcmp(fh1->fh_base.fh_pad,
- fh2->fh_base.fh_pad,
- fh1->fh_size);
-}
-
static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
static void
@@ -494,7 +471,7 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
__nfs4_file_put_access(fp, O_RDONLY);
}
-static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
struct kmem_cache *slab)
{
struct nfs4_stid *stid;
@@ -688,17 +665,17 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
struct file *filp = NULL;
spin_lock(&fp->fi_lock);
- if (fp->fi_deleg_file && atomic_dec_and_test(&fp->fi_delegees))
+ if (fp->fi_deleg_file && --fp->fi_delegees == 0)
swap(filp, fp->fi_deleg_file);
spin_unlock(&fp->fi_lock);
if (filp) {
- vfs_setlease(filp, F_UNLCK, NULL, NULL);
+ vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
fput(filp);
}
}
-static void unhash_stid(struct nfs4_stid *s)
+void nfs4_unhash_stid(struct nfs4_stid *s)
{
s->sc_type = 0;
}
@@ -1006,7 +983,7 @@ static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
list_del_init(&stp->st_locks);
unhash_ol_stateid(stp);
- unhash_stid(&stp->st_stid);
+ nfs4_unhash_stid(&stp->st_stid);
}
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -1518,7 +1495,12 @@ unhash_session(struct nfsd4_session *ses)
static int
STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
{
- if (clid->cl_boot == nn->boot_time)
+ /*
+ * We're assuming the clid was not given out from a boot
+ * precisely 2^32 (about 136 years) before this one. That seems
+ * a safe assumption:
+ */
+ if (clid->cl_boot == (u32)nn->boot_time)
return 0;
dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
clid->cl_boot, clid->cl_id, nn->boot_time);
@@ -1558,6 +1540,9 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
INIT_LIST_HEAD(&clp->cl_lru);
INIT_LIST_HEAD(&clp->cl_callbacks);
INIT_LIST_HEAD(&clp->cl_revoked);
+#ifdef CONFIG_NFSD_PNFS
+ INIT_LIST_HEAD(&clp->cl_lo_states);
+#endif
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
@@ -1662,6 +1647,7 @@ __destroy_client(struct nfs4_client *clp)
nfs4_get_stateowner(&oo->oo_owner);
release_openowner(oo);
}
+ nfsd4_return_all_client_layouts(clp);
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
@@ -2145,8 +2131,11 @@ nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
static void
nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
{
- /* pNFS is not supported */
+#ifdef CONFIG_NFSD_PNFS
+ new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
+#else
new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
+#endif
/* Referrals are supported, Migration is not. */
new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
@@ -3074,6 +3063,10 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
+#ifdef CONFIG_NFSD_PNFS
+ INIT_LIST_HEAD(&fp->fi_lo_states);
+ atomic_set(&fp->fi_lo_recalls, 0);
+#endif
hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
}
@@ -3300,7 +3293,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
struct nfs4_file *fp;
hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
- if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
+ if (fh_match(&fp->fi_fhandle, fh)) {
if (atomic_inc_not_zero(&fp->fi_ref))
return fp;
}
@@ -3308,7 +3301,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
return NULL;
}
-static struct nfs4_file *
+struct nfs4_file *
find_file(struct knfsd_fh *fh)
{
struct nfs4_file *fp;
@@ -3477,7 +3470,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
}
static int
-nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose)
+nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
+ struct list_head *dispose)
{
if (arg & F_UNLCK)
return lease_modify(onlist, arg, dispose);
@@ -3855,12 +3849,12 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
/* Race breaker */
if (fp->fi_deleg_file) {
status = 0;
- atomic_inc(&fp->fi_delegees);
+ ++fp->fi_delegees;
hash_delegation_locked(dp, fp);
goto out_unlock;
}
fp->fi_deleg_file = filp;
- atomic_set(&fp->fi_delegees, 1);
+ fp->fi_delegees = 1;
hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
@@ -3901,7 +3895,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
status = -EAGAIN;
goto out_unlock;
}
- atomic_inc(&fp->fi_delegees);
+ ++fp->fi_delegees;
hash_delegation_locked(dp, fp);
status = 0;
out_unlock:
@@ -4294,7 +4288,7 @@ laundromat_main(struct work_struct *laundry)
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
{
- if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
+ if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
return nfserr_bad_stateid;
return nfs_ok;
}
@@ -4445,7 +4439,7 @@ out_unlock:
return status;
}
-static __be32
+__be32
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
stateid_t *stateid, unsigned char typemask,
struct nfs4_stid **s, struct nfsd_net *nn)
@@ -4859,6 +4853,9 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+ nfsd4_return_all_file_layouts(stp->st_stateowner->so_client,
+ stp->st_stid.sc_file);
+
nfsd4_close_open_stateid(stp);
/* put reference from nfs4_preprocess_seqid_op */
@@ -5556,10 +5553,11 @@ out_nfserr:
static bool
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{
- struct file_lock **flpp;
+ struct file_lock *fl;
int status = false;
struct file *filp = find_any_file(fp);
struct inode *inode;
+ struct file_lock_context *flctx;
if (!filp) {
/* Any valid lock stateid should have some sort of access */
@@ -5568,15 +5566,18 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
}
inode = file_inode(filp);
+ flctx = inode->i_flctx;
- spin_lock(&inode->i_lock);
- for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
- if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
- status = true;
- break;
+ if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+ if (fl->fl_owner == (fl_owner_t)lowner) {
+ status = true;
+ break;
+ }
}
+ spin_unlock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
fput(filp);
return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 15f7b73e0c0f..df5e66caf100 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -47,6 +47,7 @@
#include "state.h"
#include "cache.h"
#include "netns.h"
+#include "pnfs.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
@@ -234,6 +235,26 @@ static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
return ret;
}
+/*
+ * We require the high 32 bits of 'seconds' to be 0, and
+ * we ignore all 32 bits of 'nseconds'.
+ */
+static __be32
+nfsd4_decode_time(struct nfsd4_compoundargs *argp, struct timespec *tv)
+{
+ DECODE_HEAD;
+ u64 sec;
+
+ READ_BUF(12);
+ p = xdr_decode_hyper(p, &sec);
+ tv->tv_sec = sec;
+ tv->tv_nsec = be32_to_cpup(p++);
+ if (tv->tv_nsec >= (u32)1000000000)
+ return nfserr_inval;
+
+ DECODE_TAIL;
+}
+
static __be32
nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
{
@@ -267,7 +288,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
{
int expected_len, len = 0;
u32 dummy32;
- u64 sec;
char *buf;
DECODE_HEAD;
@@ -358,15 +378,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
dummy32 = be32_to_cpup(p++);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
- /* We require the high 32 bits of 'seconds' to be 0, and we ignore
- all 32 bits of 'nseconds'. */
- READ_BUF(12);
len += 12;
- p = xdr_decode_hyper(p, &sec);
- iattr->ia_atime.tv_sec = (time_t)sec;
- iattr->ia_atime.tv_nsec = be32_to_cpup(p++);
- if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
- return nfserr_inval;
+ status = nfsd4_decode_time(argp, &iattr->ia_atime);
+ if (status)
+ return status;
iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
@@ -382,15 +397,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
dummy32 = be32_to_cpup(p++);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
- /* We require the high 32 bits of 'seconds' to be 0, and we ignore
- all 32 bits of 'nseconds'. */
- READ_BUF(12);
len += 12;
- p = xdr_decode_hyper(p, &sec);
- iattr->ia_mtime.tv_sec = sec;
- iattr->ia_mtime.tv_nsec = be32_to_cpup(p++);
- if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
- return nfserr_inval;
+ status = nfsd4_decode_time(argp, &iattr->ia_mtime);
+ if (status)
+ return status;
iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
@@ -1513,6 +1523,127 @@ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, str
DECODE_TAIL;
}
+#ifdef CONFIG_NFSD_PNFS
+static __be32
+nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
+ struct nfsd4_getdeviceinfo *gdev)
+{
+ DECODE_HEAD;
+ u32 num, i;
+
+ READ_BUF(sizeof(struct nfsd4_deviceid) + 3 * 4);
+ COPYMEM(&gdev->gd_devid, sizeof(struct nfsd4_deviceid));
+ gdev->gd_layout_type = be32_to_cpup(p++);
+ gdev->gd_maxcount = be32_to_cpup(p++);
+ num = be32_to_cpup(p++);
+ if (num) {
+ READ_BUF(4 * num);
+ gdev->gd_notify_types = be32_to_cpup(p++);
+ for (i = 1; i < num; i++) {
+ if (be32_to_cpup(p++)) {
+ status = nfserr_inval;
+ goto out;
+ }
+ }
+ }
+ DECODE_TAIL;
+}
+
+static __be32
+nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
+ struct nfsd4_layoutget *lgp)
+{
+ DECODE_HEAD;
+
+ READ_BUF(36);
+ lgp->lg_signal = be32_to_cpup(p++);
+ lgp->lg_layout_type = be32_to_cpup(p++);
+ lgp->lg_seg.iomode = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
+ p = xdr_decode_hyper(p, &lgp->lg_seg.length);
+ p = xdr_decode_hyper(p, &lgp->lg_minlength);
+ nfsd4_decode_stateid(argp, &lgp->lg_sid);
+ READ_BUF(4);
+ lgp->lg_maxcount = be32_to_cpup(p++);
+
+ DECODE_TAIL;
+}
+
+static __be32
+nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
+ struct nfsd4_layoutcommit *lcp)
+{
+ DECODE_HEAD;
+ u32 timechange;
+
+ READ_BUF(20);
+ p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
+ p = xdr_decode_hyper(p, &lcp->lc_seg.length);
+ lcp->lc_reclaim = be32_to_cpup(p++);
+ nfsd4_decode_stateid(argp, &lcp->lc_sid);
+ READ_BUF(4);
+ lcp->lc_newoffset = be32_to_cpup(p++);
+ if (lcp->lc_newoffset) {
+ READ_BUF(8);
+ p = xdr_decode_hyper(p, &lcp->lc_last_wr);
+ } else
+ lcp->lc_last_wr = 0;
+ READ_BUF(4);
+ timechange = be32_to_cpup(p++);
+ if (timechange) {
+ status = nfsd4_decode_time(argp, &lcp->lc_mtime);
+ if (status)
+ return status;
+ } else {
+ lcp->lc_mtime.tv_nsec = UTIME_NOW;
+ }
+ READ_BUF(8);
+ lcp->lc_layout_type = be32_to_cpup(p++);
+
+ /*
+ * Save the layout update in XDR format and let the layout driver deal
+ * with it later.
+ */
+ lcp->lc_up_len = be32_to_cpup(p++);
+ if (lcp->lc_up_len > 0) {
+ READ_BUF(lcp->lc_up_len);
+ READMEM(lcp->lc_up_layout, lcp->lc_up_len);
+ }
+
+ DECODE_TAIL;
+}
+
+static __be32
+nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
+ struct nfsd4_layoutreturn *lrp)
+{
+ DECODE_HEAD;
+
+ READ_BUF(16);
+ lrp->lr_reclaim = be32_to_cpup(p++);
+ lrp->lr_layout_type = be32_to_cpup(p++);
+ lrp->lr_seg.iomode = be32_to_cpup(p++);
+ lrp->lr_return_type = be32_to_cpup(p++);
+ if (lrp->lr_return_type == RETURN_FILE) {
+ READ_BUF(16);
+ p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
+ p = xdr_decode_hyper(p, &lrp->lr_seg.length);
+ nfsd4_decode_stateid(argp, &lrp->lr_sid);
+ READ_BUF(4);
+ lrp->lrf_body_len = be32_to_cpup(p++);
+ if (lrp->lrf_body_len > 0) {
+ READ_BUF(lrp->lrf_body_len);
+ READMEM(lrp->lrf_body, lrp->lrf_body_len);
+ }
+ } else {
+ lrp->lr_seg.offset = 0;
+ lrp->lr_seg.length = NFS4_MAX_UINT64;
+ }
+
+ DECODE_TAIL;
+}
+#endif /* CONFIG_NFSD_PNFS */
+
static __be32
nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
struct nfsd4_fallocate *fallocate)
@@ -1607,11 +1738,19 @@ static nfsd4_dec nfsd4_dec_ops[] = {
[OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid,
[OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+#ifdef CONFIG_NFSD_PNFS
+ [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_getdeviceinfo,
+ [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_layoutcommit,
+ [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_layoutget,
+ [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_layoutreturn,
+#else
[OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
+#endif
[OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
[OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
@@ -2539,6 +2678,30 @@ out_acl:
get_parent_attributes(exp, &stat);
p = xdr_encode_hyper(p, stat.ino);
}
+#ifdef CONFIG_NFSD_PNFS
+ if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) ||
+ (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) {
+ if (exp->ex_layout_type) {
+ p = xdr_reserve_space(xdr, 8);
+ if (!p)
+ goto out_resource;
+ *p++ = cpu_to_be32(1);
+ *p++ = cpu_to_be32(exp->ex_layout_type);
+ } else {
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out_resource;
+ *p++ = cpu_to_be32(0);
+ }
+ }
+
+ if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out_resource;
+ *p++ = cpu_to_be32(stat.blksize);
+ }
+#endif /* CONFIG_NFSD_PNFS */
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
status = nfsd4_encode_security_label(xdr, rqstp, context,
contextlen);
@@ -2768,16 +2931,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
if (entry_bytes > cd->rd_maxcount)
goto fail;
cd->rd_maxcount -= entry_bytes;
- if (!cd->rd_dircount)
- goto fail;
/*
* RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
* let's always let through the first entry, at least:
*/
- name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8;
+ if (!cd->rd_dircount)
+ goto fail;
+ name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
goto fail;
cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
+
cd->cookie_offset = cookie_offset;
skip_entry:
cd->common.err = nfs_ok;
@@ -3814,6 +3978,156 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfserr;
}
+#ifdef CONFIG_NFSD_PNFS
+static __be32
+nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_getdeviceinfo *gdev)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ const struct nfsd4_layout_ops *ops =
+ nfsd4_layout_ops[gdev->gd_layout_type];
+ u32 starting_len = xdr->buf->len, needed_len;
+ __be32 *p;
+
+ dprintk("%s: err %d\n", __func__, nfserr);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_resource;
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out;
+
+ *p++ = cpu_to_be32(gdev->gd_layout_type);
+
+ /* If maxcount is 0 then just update notifications */
+ if (gdev->gd_maxcount != 0) {
+ nfserr = ops->encode_getdeviceinfo(xdr, gdev);
+ if (nfserr) {
+ /*
+ * We don't bother to burden the layout drivers with
+ * enforcing gd_maxcount, just tell the client to
+ * come back with a bigger buffer if it's not enough.
+ */
+ if (xdr->buf->len + 4 > gdev->gd_maxcount)
+ goto toosmall;
+ goto out;
+ }
+ }
+
+ nfserr = nfserr_resource;
+ if (gdev->gd_notify_types) {
+ p = xdr_reserve_space(xdr, 4 + 4);
+ if (!p)
+ goto out;
+ *p++ = cpu_to_be32(1); /* bitmap length */
+ *p++ = cpu_to_be32(gdev->gd_notify_types);
+ } else {
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out;
+ *p++ = 0;
+ }
+
+ nfserr = 0;
+out:
+ kfree(gdev->gd_device);
+ dprintk("%s: done: %d\n", __func__, be32_to_cpu(nfserr));
+ return nfserr;
+
+toosmall:
+ dprintk("%s: maxcount too small\n", __func__);
+ needed_len = xdr->buf->len + 4 /* notifications */;
+ xdr_truncate_encode(xdr, starting_len);
+ p = xdr_reserve_space(xdr, 4);
+ if (!p) {
+ nfserr = nfserr_resource;
+ } else {
+ *p++ = cpu_to_be32(needed_len);
+ nfserr = nfserr_toosmall;
+ }
+ goto out;
+}
+
+static __be32
+nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_layoutget *lgp)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ const struct nfsd4_layout_ops *ops =
+ nfsd4_layout_ops[lgp->lg_layout_type];
+ __be32 *p;
+
+ dprintk("%s: err %d\n", __func__, nfserr);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_resource;
+ p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
+ if (!p)
+ goto out;
+
+ *p++ = cpu_to_be32(1); /* we always set return-on-close */
+ *p++ = cpu_to_be32(lgp->lg_sid.si_generation);
+ p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
+ sizeof(stateid_opaque_t));
+
+ *p++ = cpu_to_be32(1); /* we always return a single layout */
+ p = xdr_encode_hyper(p, lgp->lg_seg.offset);
+ p = xdr_encode_hyper(p, lgp->lg_seg.length);
+ *p++ = cpu_to_be32(lgp->lg_seg.iomode);
+ *p++ = cpu_to_be32(lgp->lg_layout_type);
+
+ nfserr = ops->encode_layoutget(xdr, lgp);
+out:
+ kfree(lgp->lg_content);
+ return nfserr;
+}
+
+static __be32
+nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_layoutcommit *lcp)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ __be32 *p;
+
+ if (nfserr)
+ return nfserr;
+
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ return nfserr_resource;
+ *p++ = cpu_to_be32(lcp->lc_size_chg);
+ if (lcp->lc_size_chg) {
+ p = xdr_reserve_space(xdr, 8);
+ if (!p)
+ return nfserr_resource;
+ p = xdr_encode_hyper(p, lcp->lc_newsize);
+ }
+
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_layoutreturn *lrp)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ __be32 *p;
+
+ if (nfserr)
+ return nfserr;
+
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ return nfserr_resource;
+ *p++ = cpu_to_be32(lrp->lrs_present);
+ if (lrp->lrs_present)
+ nfsd4_encode_stateid(xdr, &lrp->lr_sid);
+ return nfs_ok;
+}
+#endif /* CONFIG_NFSD_PNFS */
+
static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_seek *seek)
@@ -3890,11 +4204,19 @@ static nfsd4_enc nfsd4_enc_ops[] = {
[OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
+#ifdef CONFIG_NFSD_PNFS
+ [OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_getdeviceinfo,
+ [OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_layoutcommit,
+ [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_layoutget,
+ [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_layoutreturn,
+#else
[OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop,
+#endif
[OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence,
[OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 19ace74d35f6..aa47d75ddb26 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -21,6 +21,7 @@
#include "cache.h"
#include "state.h"
#include "netns.h"
+#include "pnfs.h"
/*
* We have a single directory with several nodes in it.
@@ -1258,9 +1259,12 @@ static int __init init_nfsd(void)
retval = nfsd4_init_slabs();
if (retval)
goto out_unregister_pernet;
- retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */
+ retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
+ retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */
+ if (retval)
+ goto out_exit_pnfs;
nfsd_stat_init(); /* Statistics */
retval = nfsd_reply_cache_init();
if (retval)
@@ -1282,6 +1286,8 @@ out_free_lockd:
out_free_stat:
nfsd_stat_shutdown();
nfsd_fault_inject_cleanup();
+out_exit_pnfs:
+ nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
out_unregister_pernet:
@@ -1299,6 +1305,7 @@ static void __exit exit_nfsd(void)
nfsd_stat_shutdown();
nfsd_lockd_shutdown();
nfsd4_free_slabs();
+ nfsd4_exit_pnfs();
nfsd_fault_inject_cleanup();
unregister_filesystem(&nfsd_fs_type);
unregister_pernet_subsys(&nfsd_net_ops);
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 33a46a8dfaf7..565c4da1a9eb 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -325,15 +325,27 @@ void nfsd_lockd_shutdown(void);
#define NFSD4_SUPPORTED_ATTRS_WORD2 0
+/* 4.1 */
+#ifdef CONFIG_NFSD_PNFS
+#define PNFSD_SUPPORTED_ATTRS_WORD1 FATTR4_WORD1_FS_LAYOUT_TYPES
+#define PNFSD_SUPPORTED_ATTRS_WORD2 \
+(FATTR4_WORD2_LAYOUT_BLKSIZE | FATTR4_WORD2_LAYOUT_TYPES)
+#else
+#define PNFSD_SUPPORTED_ATTRS_WORD1 0
+#define PNFSD_SUPPORTED_ATTRS_WORD2 0
+#endif /* CONFIG_NFSD_PNFS */
+
#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \
NFSD4_SUPPORTED_ATTRS_WORD0
#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \
- NFSD4_SUPPORTED_ATTRS_WORD1
+ (NFSD4_SUPPORTED_ATTRS_WORD1 | PNFSD_SUPPORTED_ATTRS_WORD1)
#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \
- (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
+ (NFSD4_SUPPORTED_ATTRS_WORD2 | PNFSD_SUPPORTED_ATTRS_WORD2 | \
+ FATTR4_WORD2_SUPPATTR_EXCLCREAT)
+/* 4.2 */
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL
#else
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 08236d70c667..84cae2079d21 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -187,6 +187,24 @@ fh_init(struct svc_fh *fhp, int maxsize)
return fhp;
}
+static inline bool fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
+{
+ if (fh1->fh_size != fh2->fh_size)
+ return false;
+ if (memcmp(fh1->fh_base.fh_pad, fh2->fh_base.fh_pad, fh1->fh_size) != 0)
+ return false;
+ return true;
+}
+
+static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
+{
+ if (fh1->fh_fsid_type != fh2->fh_fsid_type)
+ return false;
+ if (memcmp(fh1->fh_fsid, fh2->fh_fsid, key_len(fh1->fh_fsid_type) != 0))
+ return false;
+ return true;
+}
+
#ifdef CONFIG_NFSD_V3
/*
* The wcc data stored in current_fh should be cleared
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 314f5c8f8f1a..9277cc91c21b 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -119,6 +119,7 @@ struct svc_program nfsd_program = {
static bool nfsd_supported_minorversions[NFSD_SUPPORTED_MINOR_VERSION + 1] = {
[0] = 1,
[1] = 1,
+ [2] = 1,
};
int nfsd_vers(int vers, enum vers_op change)
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
new file mode 100644
index 000000000000..fedb4d620a81
--- /dev/null
+++ b/fs/nfsd/pnfs.h
@@ -0,0 +1,81 @@
+#ifndef _FS_NFSD_PNFS_H
+#define _FS_NFSD_PNFS_H 1
+
+#include <linux/exportfs.h>
+#include <linux/nfsd/export.h>
+
+#include "state.h"
+#include "xdr4.h"
+
+struct xdr_stream;
+
+struct nfsd4_deviceid_map {
+ struct list_head hash;
+ u64 idx;
+ int fsid_type;
+ u32 fsid[];
+};
+
+struct nfsd4_layout_ops {
+ u32 notify_types;
+
+ __be32 (*proc_getdeviceinfo)(struct super_block *sb,
+ struct nfsd4_getdeviceinfo *gdevp);
+ __be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdevp);
+
+ __be32 (*proc_layoutget)(struct inode *, const struct svc_fh *fhp,
+ struct nfsd4_layoutget *lgp);
+ __be32 (*encode_layoutget)(struct xdr_stream *,
+ struct nfsd4_layoutget *lgp);
+
+ __be32 (*proc_layoutcommit)(struct inode *inode,
+ struct nfsd4_layoutcommit *lcp);
+};
+
+extern const struct nfsd4_layout_ops *nfsd4_layout_ops[];
+extern const struct nfsd4_layout_ops bl_layout_ops;
+
+__be32 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate, stateid_t *stateid,
+ bool create, u32 layout_type, struct nfs4_layout_stateid **lsp);
+__be32 nfsd4_insert_layout(struct nfsd4_layoutget *lgp,
+ struct nfs4_layout_stateid *ls);
+__be32 nfsd4_return_file_layouts(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp);
+__be32 nfsd4_return_client_layouts(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp);
+int nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
+ u32 device_generation);
+struct nfsd4_deviceid_map *nfsd4_find_devid_map(int idx);
+
+#ifdef CONFIG_NFSD_PNFS
+void nfsd4_setup_layout_type(struct svc_export *exp);
+void nfsd4_return_all_client_layouts(struct nfs4_client *);
+void nfsd4_return_all_file_layouts(struct nfs4_client *clp,
+ struct nfs4_file *fp);
+int nfsd4_init_pnfs(void);
+void nfsd4_exit_pnfs(void);
+#else
+static inline void nfsd4_setup_layout_type(struct svc_export *exp)
+{
+}
+
+static inline void nfsd4_return_all_client_layouts(struct nfs4_client *clp)
+{
+}
+static inline void nfsd4_return_all_file_layouts(struct nfs4_client *clp,
+ struct nfs4_file *fp)
+{
+}
+static inline void nfsd4_exit_pnfs(void)
+{
+}
+static inline int nfsd4_init_pnfs(void)
+{
+ return 0;
+}
+#endif /* CONFIG_NFSD_PNFS */
+#endif /* _FS_NFSD_PNFS_H */
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 9d3be371240a..4f3bfeb11766 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -92,6 +92,7 @@ struct nfs4_stid {
/* For a deleg stateid kept around only to process free_stateid's: */
#define NFS4_REVOKED_DELEG_STID 16
#define NFS4_CLOSED_DELEG_STID 32
+#define NFS4_LAYOUT_STID 64
unsigned char sc_type;
stateid_t sc_stateid;
struct nfs4_client *sc_client;
@@ -297,6 +298,9 @@ struct nfs4_client {
struct list_head cl_delegations;
struct list_head cl_revoked; /* unacknowledged, revoked 4.1 state */
struct list_head cl_lru; /* tail queue */
+#ifdef CONFIG_NFSD_PNFS
+ struct list_head cl_lo_states; /* outstanding layout states */
+#endif
struct xdr_netobj cl_name; /* id generated by client */
nfs4_verifier cl_verifier; /* generated by client */
time_t cl_time; /* time of last lease renewal */
@@ -493,9 +497,13 @@ struct nfs4_file {
atomic_t fi_access[2];
u32 fi_share_deny;
struct file *fi_deleg_file;
- atomic_t fi_delegees;
+ int fi_delegees;
struct knfsd_fh fi_fhandle;
bool fi_had_conflict;
+#ifdef CONFIG_NFSD_PNFS
+ struct list_head fi_lo_states;
+ atomic_t fi_lo_recalls;
+#endif
};
/*
@@ -528,6 +536,24 @@ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
return container_of(s, struct nfs4_ol_stateid, st_stid);
}
+struct nfs4_layout_stateid {
+ struct nfs4_stid ls_stid;
+ struct list_head ls_perclnt;
+ struct list_head ls_perfile;
+ spinlock_t ls_lock;
+ struct list_head ls_layouts;
+ u32 ls_layout_type;
+ struct file *ls_file;
+ struct nfsd4_callback ls_recall;
+ stateid_t ls_recall_sid;
+ bool ls_recalled;
+};
+
+static inline struct nfs4_layout_stateid *layoutstateid(struct nfs4_stid *s)
+{
+ return container_of(s, struct nfs4_layout_stateid, ls_stid);
+}
+
/* flags for preprocess_seqid_op() */
#define RD_STATE 0x00000010
#define WR_STATE 0x00000020
@@ -535,6 +561,7 @@ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_NULL = 0,
NFSPROC4_CLNT_CB_RECALL,
+ NFSPROC4_CLNT_CB_LAYOUT,
NFSPROC4_CLNT_CB_SEQUENCE,
};
@@ -545,6 +572,12 @@ struct nfsd_net;
extern __be32 nfs4_preprocess_stateid_op(struct net *net,
struct nfsd4_compound_state *cstate,
stateid_t *stateid, int flags, struct file **filp);
+__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ stateid_t *stateid, unsigned char typemask,
+ struct nfs4_stid **s, struct nfsd_net *nn);
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+ struct kmem_cache *slab);
+void nfs4_unhash_stid(struct nfs4_stid *s);
void nfs4_put_stid(struct nfs4_stid *s);
void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *);
extern void nfs4_release_reclaim(struct nfsd_net *);
@@ -567,6 +600,14 @@ extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name,
struct nfsd_net *nn);
extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn);
+struct nfs4_file *find_file(struct knfsd_fh *fh);
+void put_nfs4_file(struct nfs4_file *fi);
+static inline void get_nfs4_file(struct nfs4_file *fi)
+{
+ atomic_inc(&fi->fi_ref);
+}
+struct file *find_any_file(struct nfs4_file *f);
+
/* grace period management */
void nfsd4_end_grace(struct nfsd_net *nn);
diff --git a/fs/nfsd/trace.c b/fs/nfsd/trace.c
new file mode 100644
index 000000000000..82f89070594c
--- /dev/null
+++ b/fs/nfsd/trace.c
@@ -0,0 +1,5 @@
+
+#include "state.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
new file mode 100644
index 000000000000..c668520c344b
--- /dev/null
+++ b/fs/nfsd/trace.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nfsd
+
+#if !defined(_NFSD_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _NFSD_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(nfsd_stateid_class,
+ TP_PROTO(stateid_t *stp),
+ TP_ARGS(stp),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, si_id)
+ __field(u32, si_generation)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+ __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+ __entry->si_id = stp->si_opaque.so_id;
+ __entry->si_generation = stp->si_generation;
+ ),
+ TP_printk("client %08x:%08x stateid %08x:%08x",
+ __entry->cl_boot,
+ __entry->cl_id,
+ __entry->si_id,
+ __entry->si_generation)
+)
+
+#define DEFINE_STATEID_EVENT(name) \
+DEFINE_EVENT(nfsd_stateid_class, name, \
+ TP_PROTO(stateid_t *stp), \
+ TP_ARGS(stp))
+DEFINE_STATEID_EVENT(layoutstate_alloc);
+DEFINE_STATEID_EVENT(layoutstate_unhash);
+DEFINE_STATEID_EVENT(layoutstate_free);
+DEFINE_STATEID_EVENT(layout_get_lookup_fail);
+DEFINE_STATEID_EVENT(layout_commit_lookup_fail);
+DEFINE_STATEID_EVENT(layout_return_lookup_fail);
+DEFINE_STATEID_EVENT(layout_recall);
+DEFINE_STATEID_EVENT(layout_recall_done);
+DEFINE_STATEID_EVENT(layout_recall_fail);
+DEFINE_STATEID_EVENT(layout_recall_release);
+
+#endif /* _NFSD_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 90a5925bd6ab..0bda93e58e1b 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -428,6 +428,61 @@ struct nfsd4_reclaim_complete {
u32 rca_one_fs;
};
+struct nfsd4_deviceid {
+ u64 fsid_idx;
+ u32 generation;
+ u32 pad;
+};
+
+struct nfsd4_layout_seg {
+ u32 iomode;
+ u64 offset;
+ u64 length;
+};
+
+struct nfsd4_getdeviceinfo {
+ struct nfsd4_deviceid gd_devid; /* request */
+ u32 gd_layout_type; /* request */
+ u32 gd_maxcount; /* request */
+ u32 gd_notify_types;/* request - response */
+ void *gd_device; /* response */
+};
+
+struct nfsd4_layoutget {
+ u64 lg_minlength; /* request */
+ u32 lg_signal; /* request */
+ u32 lg_layout_type; /* request */
+ u32 lg_maxcount; /* request */
+ stateid_t lg_sid; /* request/response */
+ struct nfsd4_layout_seg lg_seg; /* request/response */
+ void *lg_content; /* response */
+};
+
+struct nfsd4_layoutcommit {
+ stateid_t lc_sid; /* request */
+ struct nfsd4_layout_seg lc_seg; /* request */
+ u32 lc_reclaim; /* request */
+ u32 lc_newoffset; /* request */
+ u64 lc_last_wr; /* request */
+ struct timespec lc_mtime; /* request */
+ u32 lc_layout_type; /* request */
+ u32 lc_up_len; /* layout length */
+ void *lc_up_layout; /* decoded by callback */
+ u32 lc_size_chg; /* boolean for response */
+ u64 lc_newsize; /* response */
+};
+
+struct nfsd4_layoutreturn {
+ u32 lr_return_type; /* request */
+ u32 lr_layout_type; /* request */
+ struct nfsd4_layout_seg lr_seg; /* request */
+ u32 lr_reclaim; /* request */
+ u32 lrf_body_len; /* request */
+ void *lrf_body; /* request */
+ stateid_t lr_sid; /* request/response */
+ u32 lrs_present; /* response */
+};
+
struct nfsd4_fallocate {
/* request */
stateid_t falloc_stateid;
@@ -491,6 +546,10 @@ struct nfsd4_op {
struct nfsd4_reclaim_complete reclaim_complete;
struct nfsd4_test_stateid test_stateid;
struct nfsd4_free_stateid free_stateid;
+ struct nfsd4_getdeviceinfo getdeviceinfo;
+ struct nfsd4_layoutget layoutget;
+ struct nfsd4_layoutcommit layoutcommit;
+ struct nfsd4_layoutreturn layoutreturn;
/* NFSv4.2 */
struct nfsd4_fallocate allocate;
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index c5c55dfb91a9..c47f6fdb111a 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -21,3 +21,10 @@
#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \
op_dec_sz)
+#define NFS4_enc_cb_layout_sz (cb_compound_enc_hdr_sz + \
+ cb_sequence_enc_sz + \
+ 1 + 3 + \
+ enc_nfs4_fh_sz + 4)
+#define NFS4_dec_cb_layout_sz (cb_compound_dec_hdr_sz + \
+ cb_sequence_dec_sz + \
+ op_dec_sz)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 3a03e0aea1fb..a8c728acb7a8 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -128,7 +128,6 @@ static const struct vm_operations_struct nilfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = nilfs_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 57ceaf33d177..748ca238915a 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -172,7 +172,6 @@ int nilfs_init_gcinode(struct inode *inode)
inode->i_mode = S_IFREG;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
inode->i_mapping->a_ops = &empty_aops;
- inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
ii->i_flags = 0;
nilfs_bmap_init_gc(ii->i_bmap);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index c4dcd1db57ee..892cf5ffdb8e 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -429,7 +429,6 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
inode->i_mode = S_IFREG;
mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
- inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
inode->i_op = &def_mdt_iops;
inode->i_fop = &def_mdt_fops;
@@ -457,13 +456,12 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct nilfs_shadow_map *shadow)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
- struct backing_dev_info *bdi = inode->i_sb->s_bdi;
INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
- nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
+ nilfs_mapping_init(&shadow->frozen_data, inode);
address_space_init_once(&shadow->frozen_btnodes);
- nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
+ nilfs_mapping_init(&shadow->frozen_btnodes, inode);
mi->mi_shadow = shadow;
return 0;
}
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 91093cd74f0d..385704027575 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -141,7 +141,6 @@ enum {
* @ti_save: Backup of journal_info field of task_struct
* @ti_flags: Flags
* @ti_count: Nest level
- * @ti_garbage: List of inode to be put when releasing semaphore
*/
struct nilfs_transaction_info {
u32 ti_magic;
@@ -150,7 +149,6 @@ struct nilfs_transaction_info {
one of other filesystems has a bug. */
unsigned short ti_flags;
unsigned short ti_count;
- struct list_head ti_garbage;
};
/* ti_magic */
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index da276640f776..700ecbcca55d 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -461,14 +461,12 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
return nc;
}
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
- struct backing_dev_info *bdi)
+void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
{
mapping->host = inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->private_data = NULL;
- mapping->backing_dev_info = bdi;
mapping->a_ops = &empty_aops;
}
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index ef30c5c2426f..a43b8287d012 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -57,8 +57,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_page(struct page *, bool);
void nilfs_clear_dirty_pages(struct address_space *, bool);
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
- struct backing_dev_info *bdi);
+void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 7ef18fc656c2..469086b9f99b 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
ti->ti_count = 0;
ti->ti_save = cur_ti;
ti->ti_magic = NILFS_TI_MAGIC;
- INIT_LIST_HEAD(&ti->ti_garbage);
current->journal_info = ti;
for (;;) {
@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb)
up_write(&nilfs->ns_segctor_sem);
current->journal_info = ti->ti_save;
- if (!list_empty(&ti->ti_garbage))
- nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
}
static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
}
}
+static void nilfs_iput_work_func(struct work_struct *work)
+{
+ struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
+ sc_iput_work);
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
+
+ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
+}
+
static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
struct nilfs_root *root)
{
@@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct nilfs_transaction_info *ti = current->journal_info;
struct nilfs_inode_info *ii, *n;
+ int defer_iput = false;
spin_lock(&nilfs->ns_inode_lock);
list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
@@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
clear_bit(NILFS_I_BUSY, &ii->i_state);
brelse(ii->i_bh);
ii->i_bh = NULL;
- list_move_tail(&ii->i_dirty, &ti->ti_garbage);
+ list_del_init(&ii->i_dirty);
+ if (!ii->vfs_inode.i_nlink) {
+ /*
+ * Defer calling iput() to avoid a deadlock
+ * over I_SYNC flag for inodes with i_nlink == 0
+ */
+ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+ defer_iput = true;
+ } else {
+ spin_unlock(&nilfs->ns_inode_lock);
+ iput(&ii->vfs_inode);
+ spin_lock(&nilfs->ns_inode_lock);
+ }
}
spin_unlock(&nilfs->ns_inode_lock);
+
+ if (defer_iput)
+ schedule_work(&sci->sc_iput_work);
}
/*
@@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
INIT_LIST_HEAD(&sci->sc_segbufs);
INIT_LIST_HEAD(&sci->sc_write_logs);
INIT_LIST_HEAD(&sci->sc_gc_inodes);
+ INIT_LIST_HEAD(&sci->sc_iput_queue);
+ INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
init_timer(&sci->sc_timer);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
@@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_transaction_unlock(sci->sc_super);
+ flush_work(&sci->sc_iput_work);
+
} while (ret && retrycount-- > 0);
}
@@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
+ if (flush_work(&sci->sc_iput_work))
+ flag = true;
+
if (flag || !nilfs_segctor_confirm(sci))
nilfs_segctor_write_out(sci);
@@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
}
+ if (!list_empty(&sci->sc_iput_queue)) {
+ nilfs_warning(sci->sc_super, __func__,
+ "iput queue is not empty\n");
+ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
+ }
+
WARN_ON(!list_empty(&sci->sc_segbufs));
WARN_ON(!list_empty(&sci->sc_write_logs));
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 38a1d0013314..a48d6de1e02c 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/workqueue.h>
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer {
* @sc_nblk_inc: Block count of current generation
* @sc_dirty_files: List of files to be written
* @sc_gc_inodes: List of GC inodes having blocks to be written
+ * @sc_iput_queue: list of inodes for which iput should be done
+ * @sc_iput_work: work struct to defer iput call
* @sc_freesegs: array of segment numbers to be freed
* @sc_nfreesegs: number of segments on @sc_freesegs
* @sc_dsync_inode: inode whose data pages are written for a sync operation
@@ -135,6 +138,8 @@ struct nilfs_sc_info {
struct list_head sc_dirty_files;
struct list_head sc_gc_inodes;
+ struct list_head sc_iput_queue;
+ struct work_struct sc_iput_work;
__u64 *sc_freesegs;
size_t sc_nfreesegs;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 2e5b3ec85b8f..5bc2a1cf73c3 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -166,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
ii->i_state = 0;
ii->i_cno = 0;
ii->vfs_inode.i_version = 1;
- nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi);
+ nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
return &ii->vfs_inode;
}
@@ -1057,7 +1057,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct the_nilfs *nilfs;
struct nilfs_root *fsroot;
- struct backing_dev_info *bdi;
__u64 cno;
int err;
@@ -1077,8 +1076,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1;
sb->s_max_links = NILFS_LINK_MAX;
- bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
- sb->s_bdi = bdi ? : &default_backing_dev_info;
+ sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
err = load_nilfs(nilfs, sb);
if (err)
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 22c629eedd82..2a24249b30af 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -1,5 +1,6 @@
config FSNOTIFY
def_bool n
+ select SRCU
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 30d3addfad75..51ceb8107284 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -140,7 +140,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
}
if (S_ISDIR(path->dentry->d_inode->i_mode) &&
- (marks_ignored_mask & FS_ISDIR))
+ !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
return false;
if (event_mask & marks_mask & ~marks_ignored_mask)
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index bff8567aa42d..cf275500a665 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -487,20 +487,27 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
unsigned int flags,
int *destroy)
{
- __u32 oldmask;
+ __u32 oldmask = 0;
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ __u32 tmask = fsn_mark->mask & ~mask;
+
+ if (flags & FAN_MARK_ONDIR)
+ tmask &= ~FAN_ONDIR;
+
oldmask = fsn_mark->mask;
- fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
+ fsnotify_set_mark_mask_locked(fsn_mark, tmask);
} else {
- oldmask = fsn_mark->ignored_mask;
- fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
+ __u32 tmask = fsn_mark->ignored_mask & ~mask;
+ if (flags & FAN_MARK_ONDIR)
+ tmask &= ~FAN_ONDIR;
+
+ fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
}
+ *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
spin_unlock(&fsn_mark->lock);
- *destroy = !(oldmask & ~mask);
-
return mask & oldmask;
}
@@ -569,20 +576,22 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ __u32 tmask = fsn_mark->mask | mask;
+
+ if (flags & FAN_MARK_ONDIR)
+ tmask |= FAN_ONDIR;
+
oldmask = fsn_mark->mask;
- fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
+ fsnotify_set_mark_mask_locked(fsn_mark, tmask);
} else {
__u32 tmask = fsn_mark->ignored_mask | mask;
+ if (flags & FAN_MARK_ONDIR)
+ tmask |= FAN_ONDIR;
+
fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
}
-
- if (!(flags & FAN_MARK_ONDIR)) {
- __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
- fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
- }
-
spin_unlock(&fsn_mark->lock);
return mask & ~oldmask;
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 643faa44f22b..1da9b2d184dc 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -19,6 +19,7 @@
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/gfp.h>
#include <linux/pagemap.h>
@@ -2091,7 +2092,7 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
count = iov_length(iov, nr_segs);
pos = *ppos;
/* We can write back this queue in page reclaim. */
- current->backing_dev_info = mapping->backing_dev_info;
+ current->backing_dev_info = inode_to_bdi(inode);
written = 0;
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 7e8282dcea2a..c58a1bcfda0f 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -245,16 +245,14 @@ int ocfs2_set_acl(handle_t *handle,
ret = posix_acl_equiv_mode(acl, &mode);
if (ret < 0)
return ret;
- else {
- if (ret == 0)
- acl = NULL;
- ret = ocfs2_acl_set_mode(inode, di_bh,
- handle, mode);
- if (ret)
- return ret;
+ if (ret == 0)
+ acl = NULL;
- }
+ ret = ocfs2_acl_set_mode(inode, di_bh,
+ handle, mode);
+ if (ret)
+ return ret;
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index fcae9ef1a328..044158bd22be 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6873,7 +6873,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
- goto out_unlock;
+ goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
@@ -6931,7 +6931,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
if (ret) {
mlog_errno(ret);
need_free = 1;
- goto out_commit;
+ goto out_unlock;
}
page_end = PAGE_CACHE_SIZE;
@@ -6964,12 +6964,16 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
if (ret) {
mlog_errno(ret);
need_free = 1;
- goto out_commit;
+ goto out_unlock;
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
}
+out_unlock:
+ if (pages)
+ ocfs2_unlock_and_free_pages(pages, num_pages);
+
out_commit:
if (ret < 0 && did_quota)
dquot_free_space_nodirty(inode,
@@ -6989,15 +6993,11 @@ out_commit:
ocfs2_commit_trans(osb, handle);
-out_unlock:
+out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
-
-out:
- if (pages) {
- ocfs2_unlock_and_free_pages(pages, num_pages);
+ if (pages)
kfree(pages);
- }
return ret;
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 46d93e941f3d..44db1808cdb5 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -28,6 +28,7 @@
#include <linux/pipe_fs_i.h>
#include <linux/mpage.h>
#include <linux/quotaops.h>
+#include <linux/blkdev.h>
#include <cluster/masklog.h>
@@ -47,6 +48,9 @@
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
+#include "dir.h"
+#include "namei.h"
+#include "sysfile.h"
static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
@@ -506,18 +510,21 @@ bail:
*
* called like this: dio->get_blocks(dio->inode, fs_startblk,
* fs_count, map_bh, dio->rw == WRITE);
- *
- * Note that we never bother to allocate blocks here, and thus ignore the
- * create argument.
*/
static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int ret;
+ u32 cpos = 0;
+ int alloc_locked = 0;
u64 p_blkno, inode_blocks, contig_blocks;
unsigned int ext_flags;
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
+ unsigned long len = bh_result->b_size;
+ unsigned int clusters_to_alloc = 0;
+
+ cpos = ocfs2_blocks_to_clusters(inode->i_sb, iblock);
/* This function won't even be called if the request isn't all
* nicely aligned and of the right size, so there's no need
@@ -539,6 +546,40 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
/* We should already CoW the refcounted extent in case of create. */
BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
+ /* allocate blocks if no p_blkno is found, and create == 1 */
+ if (!p_blkno && create) {
+ ret = ocfs2_inode_lock(inode, NULL, 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto bail;
+ }
+
+ alloc_locked = 1;
+
+ /* fill hole, allocate blocks can't be larger than the size
+ * of the hole */
+ clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len);
+ if (clusters_to_alloc > contig_blocks)
+ clusters_to_alloc = contig_blocks;
+
+ /* allocate extent and insert them into the extent tree */
+ ret = ocfs2_extend_allocation(inode, cpos,
+ clusters_to_alloc, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto bail;
+ }
+
+ ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
+ &contig_blocks, &ext_flags);
+ if (ret < 0) {
+ mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
+ (unsigned long long)iblock);
+ ret = -EIO;
+ goto bail;
+ }
+ }
+
/*
* get_more_blocks() expects us to describe a hole by clearing
* the mapped bit on bh_result().
@@ -556,6 +597,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
contig_blocks = max_blocks;
bh_result->b_size = contig_blocks << blocksize_bits;
bail:
+ if (alloc_locked)
+ ocfs2_inode_unlock(inode, 1);
return ret;
}
@@ -597,6 +640,184 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait)
return try_to_free_buffers(page);
}
+static int ocfs2_is_overwrite(struct ocfs2_super *osb,
+ struct inode *inode, loff_t offset)
+{
+ int ret = 0;
+ u32 v_cpos = 0;
+ u32 p_cpos = 0;
+ unsigned int num_clusters = 0;
+ unsigned int ext_flags = 0;
+
+ v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
+ ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
+ &num_clusters, &ext_flags);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN))
+ return 1;
+
+ return 0;
+}
+
+static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
+ struct iov_iter *iter,
+ loff_t offset)
+{
+ ssize_t ret = 0;
+ ssize_t written = 0;
+ bool orphaned = false;
+ int is_overwrite = 0;
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file)->i_mapping->host;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *di_bh = NULL;
+ size_t count = iter->count;
+ journal_t *journal = osb->journal->j_journal;
+ u32 zero_len;
+ int cluster_align;
+ loff_t final_size = offset + count;
+ int append_write = offset >= i_size_read(inode) ? 1 : 0;
+ unsigned int num_clusters = 0;
+ unsigned int ext_flags = 0;
+
+ {
+ u64 o = offset;
+
+ zero_len = do_div(o, 1 << osb->s_clustersize_bits);
+ cluster_align = !zero_len;
+ }
+
+ /*
+ * when final_size > inode->i_size, inode->i_size will be
+ * updated after direct write, so add the inode to orphan
+ * dir first.
+ */
+ if (final_size > i_size_read(inode)) {
+ ret = ocfs2_add_inode_to_orphan(osb, inode);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ orphaned = true;
+ }
+
+ if (append_write) {
+ ret = ocfs2_inode_lock(inode, &di_bh, 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto clean_orphan;
+ }
+
+ if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+ ret = ocfs2_zero_extend(inode, di_bh, offset);
+ else
+ ret = ocfs2_extend_no_holes(inode, di_bh, offset,
+ offset);
+ if (ret < 0) {
+ mlog_errno(ret);
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+ goto clean_orphan;
+ }
+
+ is_overwrite = ocfs2_is_overwrite(osb, inode, offset);
+ if (is_overwrite < 0) {
+ mlog_errno(is_overwrite);
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+ goto clean_orphan;
+ }
+
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+ di_bh = NULL;
+ }
+
+ written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
+ iter, offset,
+ ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io, NULL, 0);
+ if (unlikely(written < 0)) {
+ loff_t i_size = i_size_read(inode);
+
+ if (offset + count > i_size) {
+ ret = ocfs2_inode_lock(inode, &di_bh, 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto clean_orphan;
+ }
+
+ if (i_size == i_size_read(inode)) {
+ ret = ocfs2_truncate_file(inode, di_bh,
+ i_size);
+ if (ret < 0) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+ goto clean_orphan;
+ }
+ }
+
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+
+ ret = jbd2_journal_force_commit(journal);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+ } else if (written < 0 && append_write && !is_overwrite &&
+ !cluster_align) {
+ u32 p_cpos = 0;
+ u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
+
+ ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
+ &num_clusters, &ext_flags);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto clean_orphan;
+ }
+
+ BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
+
+ ret = blkdev_issue_zeroout(osb->sb->s_bdev,
+ p_cpos << (osb->s_clustersize_bits - 9),
+ zero_len >> 9, GFP_KERNEL, false);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+
+clean_orphan:
+ if (orphaned) {
+ int tmp_ret;
+ int update_isize = written > 0 ? 1 : 0;
+ loff_t end = update_isize ? offset + written : 0;
+
+ tmp_ret = ocfs2_del_inode_from_orphan(osb, inode,
+ update_isize, end);
+ if (tmp_ret < 0) {
+ ret = tmp_ret;
+ goto out;
+ }
+
+ tmp_ret = jbd2_journal_force_commit(journal);
+ if (tmp_ret < 0) {
+ ret = tmp_ret;
+ mlog_errno(tmp_ret);
+ }
+ }
+
+out:
+ if (ret >= 0)
+ ret = written;
+ return ret;
+}
+
static ssize_t ocfs2_direct_IO(int rw,
struct kiocb *iocb,
struct iov_iter *iter,
@@ -604,6 +825,9 @@ static ssize_t ocfs2_direct_IO(int rw,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file)->i_mapping->host;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ int full_coherency = !(osb->s_mount_opt &
+ OCFS2_MOUNT_COHERENCY_BUFFERED);
/*
* Fallback to buffered I/O if we see an inode without
@@ -612,14 +836,20 @@ static ssize_t ocfs2_direct_IO(int rw,
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return 0;
- /* Fallback to buffered I/O if we are appending. */
- if (i_size_read(inode) <= offset)
+ /* Fallback to buffered I/O if we are appending and
+ * concurrent O_DIRECT writes are allowed.
+ */
+ if (i_size_read(inode) <= offset && !full_coherency)
return 0;
- return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
+ if (rw == READ)
+ return __blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev,
iter, offset,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io, NULL, 0);
+ else
+ return ocfs2_direct_IO_write(iocb, iter, offset);
}
static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 2e355e0f8335..56c403a563bc 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1016,7 +1016,8 @@ void o2net_fill_node_map(unsigned long *map, unsigned bytes)
memset(map, 0, bytes);
for (node = 0; node < O2NM_MAX_NODES; ++node) {
- o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
+ if (!o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret))
+ continue;
if (!ret) {
set_bit(node, map);
sc_put(sc);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index dc024367110a..b95e7df5b76a 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -107,12 +107,12 @@ struct o2net_node {
struct list_head nn_status_list;
/* connects are attempted from when heartbeat comes up until either hb
- * goes down, the node is unconfigured, no connect attempts succeed
- * before O2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
- * is queued from set_nn_state both from hb up and from itself if a
- * connect attempt fails and so can be self-arming. shutdown is
- * careful to first mark the nn such that no connects will be attempted
- * before canceling delayed connect work and flushing the queue. */
+ * goes down, the node is unconfigured, or a connect succeeds.
+ * connect_work is queued from set_nn_state both from hb up and from
+ * itself if a connect attempt fails and so can be self-arming.
+ * shutdown is careful to first mark the nn such that no connects will
+ * be attempted before canceling delayed connect work and flushing the
+ * queue. */
struct delayed_work nn_connect_work;
unsigned long nn_last_connect_attempt;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 319e786175af..b08050bd3f2e 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3456,10 +3456,8 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
int blocksize = dir->i_sb->s_blocksize;
status = ocfs2_read_dir_block(dir, 0, &bh, 0);
- if (status) {
- mlog_errno(status);
+ if (status)
goto bail;
- }
rec_len = OCFS2_DIR_REC_LEN(namelen);
offset = 0;
@@ -3480,10 +3478,9 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
status = ocfs2_read_dir_block(dir,
offset >> sb->s_blocksize_bits,
&bh, 0);
- if (status) {
- mlog_errno(status);
+ if (status)
goto bail;
- }
+
/* move to next block */
de = (struct ocfs2_dir_entry *) bh->b_data;
}
@@ -3513,7 +3510,6 @@ next:
de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
}
- status = 0;
bail:
brelse(bh);
if (status)
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index b46278f9ae44..fd6bbbbd7d78 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -385,8 +385,12 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
head = &res->granted;
list_for_each_entry(lock, head, list) {
- if (lock->ml.cookie == cookie)
+ /* if lock is found but unlock is pending ignore the bast */
+ if (lock->ml.cookie == cookie) {
+ if (lock->unlock_pending)
+ break;
goto do_ast;
+ }
}
mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 149eb556b8c6..825136070d2c 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -406,7 +406,7 @@ static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
}
spin_unlock(&dlm->spinlock);
- out += snprintf(buf + out, len - out, "Total on list: %ld\n", total);
+ out += snprintf(buf + out, len - out, "Total on list: %lu\n", total);
return out;
}
@@ -464,7 +464,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
spin_unlock(&dlm->master_lock);
out += snprintf(buf + out, len - out,
- "Total: %ld, Longest: %ld\n", total, longest);
+ "Total: %lu, Longest: %lu\n", total, longest);
return out;
}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 50a59d2337b2..7df88a6dd626 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -674,20 +674,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
spin_unlock(&dlm->spinlock);
}
-int dlm_joined(struct dlm_ctxt *dlm)
-{
- int ret = 0;
-
- spin_lock(&dlm_domain_lock);
-
- if (dlm->dlm_state == DLM_CTXT_JOINED)
- ret = 1;
-
- spin_unlock(&dlm_domain_lock);
-
- return ret;
-}
-
int dlm_shutting_down(struct dlm_ctxt *dlm)
{
int ret = 0;
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
index 2f7f60bfeb3b..fd6122a38dbd 100644
--- a/fs/ocfs2/dlm/dlmdomain.h
+++ b/fs/ocfs2/dlm/dlmdomain.h
@@ -28,7 +28,6 @@
extern spinlock_t dlm_domain_lock;
extern struct list_head dlm_domains;
-int dlm_joined(struct dlm_ctxt *dlm);
int dlm_shutting_down(struct dlm_ctxt *dlm);
void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
int node_num);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index cecd875653e4..ce12e0b1a31f 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1070,6 +1070,9 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
+ /* Can't schedule DLM_UNLOCK_FREE_LOCK
+ * - do manually */
+ dlm_lock_put(lock);
break;
}
}
@@ -2346,6 +2349,10 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
+ /* Can't schedule
+ * DLM_UNLOCK_FREE_LOCK
+ * - do manually */
+ dlm_lock_put(lock);
break;
}
}
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 57c40e34f56f..061ba6a91bf2 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -390,12 +390,6 @@ clear_fields:
ip->ip_conn = NULL;
}
-static struct backing_dev_info dlmfs_backing_dev_info = {
- .name = "ocfs2-dlmfs",
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
-};
-
static struct inode *dlmfs_get_root_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
@@ -404,7 +398,6 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(inode, NULL, mode);
- inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inc_nlink(inode);
@@ -428,7 +421,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inode->i_ino = get_next_ino();
inode_init_owner(inode, parent, mode);
- inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ip = DLMFS_I(inode);
@@ -643,10 +635,6 @@ static int __init init_dlmfs_fs(void)
int status;
int cleanup_inode = 0, cleanup_worker = 0;
- status = bdi_init(&dlmfs_backing_dev_info);
- if (status)
- return status;
-
dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
sizeof(struct dlmfs_inode_private),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
@@ -673,7 +661,6 @@ bail:
kmem_cache_destroy(dlmfs_inode_cache);
if (cleanup_worker)
destroy_workqueue(user_dlm_worker);
- bdi_destroy(&dlmfs_backing_dev_info);
} else
printk("OCFS2 User DLM kernel interface loaded\n");
return status;
@@ -693,7 +680,6 @@ static void __exit exit_dlmfs_fs(void)
rcu_barrier();
kmem_cache_destroy(dlmfs_inode_cache);
- bdi_destroy(&dlmfs_backing_dev_info);
}
MODULE_AUTHOR("Oracle");
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 1c423af04c69..11849a44dc5a 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3750,6 +3750,9 @@ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
break;
spin_unlock(&dentry_attach_lock);
+ if (S_ISDIR(dl->dl_inode->i_mode))
+ shrink_dcache_parent(dentry);
+
mlog(0, "d_delete(%pd);\n", dentry);
/*
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 3950693dd0f6..46e0d4e857c7 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -295,7 +295,7 @@ out:
return ret;
}
-static int ocfs2_set_inode_size(handle_t *handle,
+int ocfs2_set_inode_size(handle_t *handle,
struct inode *inode,
struct buffer_head *fe_bh,
u64 new_i_size)
@@ -441,7 +441,7 @@ out:
return status;
}
-static int ocfs2_truncate_file(struct inode *inode,
+int ocfs2_truncate_file(struct inode *inode,
struct buffer_head *di_bh,
u64 new_i_size)
{
@@ -569,7 +569,7 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
handle_t *handle = NULL;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
- enum ocfs2_alloc_restarted why;
+ enum ocfs2_alloc_restarted why = RESTART_NONE;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_extent_tree et;
int did_quota = 0;
@@ -709,6 +709,13 @@ leave:
return status;
}
+int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
+ u32 clusters_to_add, int mark_unwritten)
+{
+ return __ocfs2_extend_allocation(inode, logical_start,
+ clusters_to_add, mark_unwritten);
+}
+
/*
* While a write will already be ordering the data, a truncate will not.
* Thus, we need to explicitly order the zeroed pages.
@@ -2109,6 +2116,9 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
loff_t saved_pos = 0, end;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ int full_coherency = !(osb->s_mount_opt &
+ OCFS2_MOUNT_COHERENCY_BUFFERED);
/*
* We start with a read level meta lock and only jump to an ex
@@ -2197,7 +2207,16 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
* one node could wind up truncating another
* nodes writes.
*/
- if (end > i_size_read(inode)) {
+ if (end > i_size_read(inode) && !full_coherency) {
+ *direct_io = 0;
+ break;
+ }
+
+ /*
+ * Fallback to old way if the feature bit is not set.
+ */
+ if (end > i_size_read(inode) &&
+ !ocfs2_supports_append_dio(osb)) {
*direct_io = 0;
break;
}
@@ -2210,7 +2229,13 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
*/
ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
if (ret == 1) {
- *direct_io = 0;
+ /*
+ * Fallback to old way if the feature bit is not set.
+ * Otherwise try dio first and then complete the rest
+ * request through buffer io.
+ */
+ if (!ocfs2_supports_append_dio(osb))
+ *direct_io = 0;
ret = 0;
} else if (ret < 0)
mlog_errno(ret);
@@ -2243,6 +2268,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
u32 old_clusters;
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct address_space *mapping = file->f_mapping;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
@@ -2357,13 +2383,53 @@ relock:
iov_iter_truncate(from, count);
if (direct_io) {
+ loff_t endbyte;
+ ssize_t written_buffered;
written = generic_file_direct_write(iocb, from, *ppos);
- if (written < 0) {
+ if (written < 0 || written == count) {
ret = written;
goto out_dio;
}
+
+ /*
+ * for completing the rest of the request.
+ */
+ *ppos += written;
+ count -= written;
+ written_buffered = generic_perform_write(file, from, *ppos);
+ /*
+ * If generic_file_buffered_write() returned a synchronous error
+ * then we want to return the number of bytes which were
+ * direct-written, or the error code if that was zero. Note
+ * that this differs from normal direct-io semantics, which
+ * will return -EFOO even if some bytes were written.
+ */
+ if (written_buffered < 0) {
+ ret = written_buffered;
+ goto out_dio;
+ }
+
+ iocb->ki_pos = *ppos + written_buffered;
+ /* We need to ensure that the page cache pages are written to
+ * disk and invalidated to preserve the expected O_DIRECT
+ * semantics.
+ */
+ endbyte = *ppos + written_buffered - 1;
+ ret = filemap_write_and_wait_range(file->f_mapping, *ppos,
+ endbyte);
+ if (ret == 0) {
+ written += written_buffered;
+ invalidate_mapping_pages(mapping,
+ *ppos >> PAGE_CACHE_SHIFT,
+ endbyte >> PAGE_CACHE_SHIFT);
+ } else {
+ /*
+ * We don't know how much we wrote, so just return
+ * the number of bytes which were direct-written
+ */
+ }
} else {
- current->backing_dev_info = file->f_mapping->backing_dev_info;
+ current->backing_dev_info = inode_to_bdi(inode);
written = generic_perform_write(file, from, *ppos);
if (likely(written >= 0))
iocb->ki_pos = *ppos + written;
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 97bf761c9e7c..e8c62f22215c 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -51,13 +51,22 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret);
+int ocfs2_set_inode_size(handle_t *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 new_i_size);
int ocfs2_simple_size_update(struct inode *inode,
struct buffer_head *di_bh,
u64 new_i_size);
+int ocfs2_truncate_file(struct inode *inode,
+ struct buffer_head *di_bh,
+ u64 new_i_size);
int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
u64 new_i_size, u64 zero_to);
int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
loff_t zero_to);
+int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
+ u32 clusters_to_add, int mark_unwritten);
int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index c8b25de9efbb..3025c0da6b8a 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -648,7 +648,7 @@ static int ocfs2_remove_inode(struct inode *inode,
if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
- orphan_dir_bh);
+ orphan_dir_bh, false);
if (status < 0) {
mlog_errno(status);
goto bail_commit;
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index ca3431ee7f24..5e86b247c821 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -81,6 +81,8 @@ struct ocfs2_inode_info
tid_t i_sync_tid;
tid_t i_datasync_tid;
+ wait_queue_head_t append_dio_wq;
+
struct dquot *i_dquot[MAXQUOTAS];
};
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 4f502382180f..ff531928269e 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -50,6 +50,8 @@
#include "sysfile.h"
#include "uptodate.h"
#include "quota.h"
+#include "file.h"
+#include "namei.h"
#include "buffer_head_io.h"
#include "ocfs2_trace.h"
@@ -69,13 +71,15 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
static int ocfs2_trylock_journal(struct ocfs2_super *osb,
int slot_num);
static int ocfs2_recover_orphans(struct ocfs2_super *osb,
- int slot);
+ int slot,
+ enum ocfs2_orphan_reco_type orphan_reco_type);
static int ocfs2_commit_thread(void *arg);
static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
int slot_num,
struct ocfs2_dinode *la_dinode,
struct ocfs2_dinode *tl_dinode,
- struct ocfs2_quota_recovery *qrec);
+ struct ocfs2_quota_recovery *qrec,
+ enum ocfs2_orphan_reco_type orphan_reco_type);
static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb)
{
@@ -149,7 +153,8 @@ int ocfs2_compute_replay_slots(struct ocfs2_super *osb)
return 0;
}
-void ocfs2_queue_replay_slots(struct ocfs2_super *osb)
+void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
+ enum ocfs2_orphan_reco_type orphan_reco_type)
{
struct ocfs2_replay_map *replay_map = osb->replay_map;
int i;
@@ -163,7 +168,8 @@ void ocfs2_queue_replay_slots(struct ocfs2_super *osb)
for (i = 0; i < replay_map->rm_slots; i++)
if (replay_map->rm_replay_slots[i])
ocfs2_queue_recovery_completion(osb->journal, i, NULL,
- NULL, NULL);
+ NULL, NULL,
+ orphan_reco_type);
replay_map->rm_state = REPLAY_DONE;
}
@@ -1174,6 +1180,7 @@ struct ocfs2_la_recovery_item {
struct ocfs2_dinode *lri_la_dinode;
struct ocfs2_dinode *lri_tl_dinode;
struct ocfs2_quota_recovery *lri_qrec;
+ enum ocfs2_orphan_reco_type lri_orphan_reco_type;
};
/* Does the second half of the recovery process. By this point, the
@@ -1195,6 +1202,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
struct ocfs2_dinode *la_dinode, *tl_dinode;
struct ocfs2_la_recovery_item *item, *n;
struct ocfs2_quota_recovery *qrec;
+ enum ocfs2_orphan_reco_type orphan_reco_type;
LIST_HEAD(tmp_la_list);
trace_ocfs2_complete_recovery(
@@ -1212,6 +1220,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
la_dinode = item->lri_la_dinode;
tl_dinode = item->lri_tl_dinode;
qrec = item->lri_qrec;
+ orphan_reco_type = item->lri_orphan_reco_type;
trace_ocfs2_complete_recovery_slot(item->lri_slot,
la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0,
@@ -1236,7 +1245,8 @@ void ocfs2_complete_recovery(struct work_struct *work)
kfree(tl_dinode);
}
- ret = ocfs2_recover_orphans(osb, item->lri_slot);
+ ret = ocfs2_recover_orphans(osb, item->lri_slot,
+ orphan_reco_type);
if (ret < 0)
mlog_errno(ret);
@@ -1261,7 +1271,8 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
int slot_num,
struct ocfs2_dinode *la_dinode,
struct ocfs2_dinode *tl_dinode,
- struct ocfs2_quota_recovery *qrec)
+ struct ocfs2_quota_recovery *qrec,
+ enum ocfs2_orphan_reco_type orphan_reco_type)
{
struct ocfs2_la_recovery_item *item;
@@ -1285,6 +1296,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
item->lri_slot = slot_num;
item->lri_tl_dinode = tl_dinode;
item->lri_qrec = qrec;
+ item->lri_orphan_reco_type = orphan_reco_type;
spin_lock(&journal->j_lock);
list_add_tail(&item->lri_list, &journal->j_la_cleanups);
@@ -1304,7 +1316,8 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
/* No need to queue up our truncate_log as regular cleanup will catch
* that */
ocfs2_queue_recovery_completion(journal, osb->slot_num,
- osb->local_alloc_copy, NULL, NULL);
+ osb->local_alloc_copy, NULL, NULL,
+ ORPHAN_NEED_TRUNCATE);
ocfs2_schedule_truncate_log_flush(osb, 0);
osb->local_alloc_copy = NULL;
@@ -1312,7 +1325,7 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
/* queue to recover orphan slots for all offline slots */
ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
- ocfs2_queue_replay_slots(osb);
+ ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE);
ocfs2_free_replay_slots(osb);
}
@@ -1323,7 +1336,8 @@ void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
osb->slot_num,
NULL,
NULL,
- osb->quota_rec);
+ osb->quota_rec,
+ ORPHAN_NEED_TRUNCATE);
osb->quota_rec = NULL;
}
}
@@ -1360,7 +1374,7 @@ restart:
/* queue recovery for our own slot */
ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
- NULL, NULL);
+ NULL, NULL, ORPHAN_NO_NEED_TRUNCATE);
spin_lock(&osb->osb_lock);
while (rm->rm_used) {
@@ -1419,13 +1433,14 @@ skip_recovery:
continue;
}
ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
- NULL, NULL, qrec);
+ NULL, NULL, qrec,
+ ORPHAN_NEED_TRUNCATE);
}
ocfs2_super_unlock(osb, 1);
/* queue recovery for offline slots */
- ocfs2_queue_replay_slots(osb);
+ ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE);
bail:
mutex_lock(&osb->recovery_lock);
@@ -1447,7 +1462,6 @@ bail:
* requires that we call do_exit(). And it isn't exported, but
* complete_and_exit() seems to be a minimal wrapper around it. */
complete_and_exit(NULL, status);
- return status;
}
void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
@@ -1712,7 +1726,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
/* This will kfree the memory pointed to by la_copy and tl_copy */
ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
- tl_copy, NULL);
+ tl_copy, NULL, ORPHAN_NEED_TRUNCATE);
status = 0;
done:
@@ -1902,7 +1916,7 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
for (i = 0; i < osb->max_slots; i++)
ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL,
- NULL);
+ NULL, ORPHAN_NO_NEED_TRUNCATE);
/*
* We queued a recovery on orphan slots, increment the sequence
* number and update LVB so other node will skip the scan for a while
@@ -2001,6 +2015,13 @@ static int ocfs2_orphan_filldir(struct dir_context *ctx, const char *name,
if (IS_ERR(iter))
return 0;
+ /* Skip inodes which are already added to recover list, since dio may
+ * happen concurrently with unlink/rename */
+ if (OCFS2_I(iter)->ip_next_orphan) {
+ iput(iter);
+ return 0;
+ }
+
trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
/* No locking is required for the next_orphan queue as there
* is only ever a single process doing orphan recovery. */
@@ -2109,7 +2130,8 @@ static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
* advertising our state to ocfs2_delete_inode().
*/
static int ocfs2_recover_orphans(struct ocfs2_super *osb,
- int slot)
+ int slot,
+ enum ocfs2_orphan_reco_type orphan_reco_type)
{
int ret = 0;
struct inode *inode = NULL;
@@ -2133,13 +2155,60 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
(unsigned long long)oi->ip_blkno);
iter = oi->ip_next_orphan;
+ oi->ip_next_orphan = NULL;
+
+ /*
+ * We need to take and drop the inode lock to
+ * force read inode from disk.
+ */
+ ret = ocfs2_inode_lock(inode, NULL, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto next;
+ }
+ ocfs2_inode_unlock(inode, 0);
+
+ if (inode->i_nlink == 0) {
+ spin_lock(&oi->ip_lock);
+ /* Set the proper information to get us going into
+ * ocfs2_delete_inode. */
+ oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
+ spin_unlock(&oi->ip_lock);
+ } else if (orphan_reco_type == ORPHAN_NEED_TRUNCATE) {
+ struct buffer_head *di_bh = NULL;
+
+ ret = ocfs2_rw_lock(inode, 1);
+ if (ret) {
+ mlog_errno(ret);
+ goto next;
+ }
+
+ ret = ocfs2_inode_lock(inode, &di_bh, 1);
+ if (ret < 0) {
+ ocfs2_rw_unlock(inode, 1);
+ mlog_errno(ret);
+ goto next;
+ }
+
+ ret = ocfs2_truncate_file(inode, di_bh,
+ i_size_read(inode));
+ ocfs2_inode_unlock(inode, 1);
+ ocfs2_rw_unlock(inode, 1);
+ brelse(di_bh);
+ if (ret < 0) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto next;
+ }
+
+ ret = ocfs2_del_inode_from_orphan(osb, inode, 0, 0);
+ if (ret)
+ mlog_errno(ret);
- spin_lock(&oi->ip_lock);
- /* Set the proper information to get us going into
- * ocfs2_delete_inode. */
- oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
- spin_unlock(&oi->ip_lock);
+ wake_up(&OCFS2_I(inode)->append_dio_wq);
+ } /* else if ORPHAN_NO_NEED_TRUNCATE, do nothing */
+next:
iput(inode);
inode = iter;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 7f8cde94abfe..f4cd3c3e9fb7 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -472,6 +472,11 @@ static inline int ocfs2_unlink_credits(struct super_block *sb)
* orphan dir index leaf */
#define OCFS2_DELETE_INODE_CREDITS (3 * OCFS2_INODE_UPDATE_CREDITS + 4)
+/* dinode + orphan dir dinode + extent tree leaf block + orphan dir entry +
+ * orphan dir index root + orphan dir index leaf */
+#define OCFS2_INODE_ADD_TO_ORPHAN_CREDITS (2 * OCFS2_INODE_UPDATE_CREDITS + 4)
+#define OCFS2_INODE_DEL_FROM_ORPHAN_CREDITS OCFS2_INODE_ADD_TO_ORPHAN_CREDITS
+
/* dinode update, old dir dinode update, new dir dinode update, old
* dir dir entry, new dir dir entry, dir entry update for renaming
* directory + target unlink + 3 x dir index leaves */
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 10d66c75cecb..9581d190f6e1 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -173,7 +173,6 @@ out:
static const struct vm_operations_struct ocfs2_file_vm_ops = {
.fault = ocfs2_fault,
.page_mkwrite = ocfs2_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 914c121ec890..b5c3a5ea3ee6 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -79,7 +79,8 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
u64 blkno,
char *name,
- struct ocfs2_dir_lookup_result *lookup);
+ struct ocfs2_dir_lookup_result *lookup,
+ bool dio);
static int ocfs2_orphan_add(struct ocfs2_super *osb,
handle_t *handle,
@@ -87,7 +88,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
struct buffer_head *fe_bh,
char *name,
struct ocfs2_dir_lookup_result *lookup,
- struct inode *orphan_dir_inode);
+ struct inode *orphan_dir_inode,
+ bool dio);
static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
handle_t *handle,
@@ -104,6 +106,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
/* An orphan dir name is an 8 byte value, printed as a hex string */
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
+#define OCFS2_DIO_ORPHAN_PREFIX "dio-"
+#define OCFS2_DIO_ORPHAN_PREFIX_LEN 4
static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
@@ -952,7 +956,8 @@ static int ocfs2_unlink(struct inode *dir,
if (ocfs2_inode_is_unlinkable(inode)) {
status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
OCFS2_I(inode)->ip_blkno,
- orphan_name, &orphan_insert);
+ orphan_name, &orphan_insert,
+ false);
if (status < 0) {
mlog_errno(status);
goto leave;
@@ -1004,7 +1009,7 @@ static int ocfs2_unlink(struct inode *dir,
if (is_unlinkable) {
status = ocfs2_orphan_add(osb, handle, inode, fe_bh,
- orphan_name, &orphan_insert, orphan_dir);
+ orphan_name, &orphan_insert, orphan_dir, false);
if (status < 0)
mlog_errno(status);
}
@@ -1440,7 +1445,8 @@ static int ocfs2_rename(struct inode *old_dir,
if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) {
status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
OCFS2_I(new_inode)->ip_blkno,
- orphan_name, &orphan_insert);
+ orphan_name, &orphan_insert,
+ false);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -1507,7 +1513,7 @@ static int ocfs2_rename(struct inode *old_dir,
if (should_add_orphan) {
status = ocfs2_orphan_add(osb, handle, new_inode,
newfe_bh, orphan_name,
- &orphan_insert, orphan_dir);
+ &orphan_insert, orphan_dir, false);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -2088,12 +2094,28 @@ static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
struct buffer_head *orphan_dir_bh,
u64 blkno,
char *name,
- struct ocfs2_dir_lookup_result *lookup)
+ struct ocfs2_dir_lookup_result *lookup,
+ bool dio)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
+ int namelen = dio ?
+ (OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN) :
+ OCFS2_ORPHAN_NAMELEN;
+
+ if (dio) {
+ ret = snprintf(name, OCFS2_DIO_ORPHAN_PREFIX_LEN + 1, "%s",
+ OCFS2_DIO_ORPHAN_PREFIX);
+ if (ret != OCFS2_DIO_ORPHAN_PREFIX_LEN) {
+ ret = -EINVAL;
+ mlog_errno(ret);
+ return ret;
+ }
- ret = ocfs2_blkno_stringify(blkno, name);
+ ret = ocfs2_blkno_stringify(blkno,
+ name + OCFS2_DIO_ORPHAN_PREFIX_LEN);
+ } else
+ ret = ocfs2_blkno_stringify(blkno, name);
if (ret < 0) {
mlog_errno(ret);
return ret;
@@ -2101,7 +2123,7 @@ static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
orphan_dir_bh, name,
- OCFS2_ORPHAN_NAMELEN, lookup);
+ namelen, lookup);
if (ret < 0) {
mlog_errno(ret);
return ret;
@@ -2128,7 +2150,8 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
u64 blkno,
char *name,
- struct ocfs2_dir_lookup_result *lookup)
+ struct ocfs2_dir_lookup_result *lookup,
+ bool dio)
{
struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
@@ -2142,7 +2165,7 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
}
ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
- blkno, name, lookup);
+ blkno, name, lookup, dio);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -2170,12 +2193,16 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
struct buffer_head *fe_bh,
char *name,
struct ocfs2_dir_lookup_result *lookup,
- struct inode *orphan_dir_inode)
+ struct inode *orphan_dir_inode,
+ bool dio)
{
struct buffer_head *orphan_dir_bh = NULL;
int status = 0;
struct ocfs2_dinode *orphan_fe;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ int namelen = dio ?
+ (OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN) :
+ OCFS2_ORPHAN_NAMELEN;
trace_ocfs2_orphan_add_begin(
(unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -2219,7 +2246,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
ocfs2_journal_dirty(handle, orphan_dir_bh);
status = __ocfs2_add_entry(handle, orphan_dir_inode, name,
- OCFS2_ORPHAN_NAMELEN, inode,
+ namelen, inode,
OCFS2_I(inode)->ip_blkno,
orphan_dir_bh, lookup);
if (status < 0) {
@@ -2227,13 +2254,21 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
goto rollback;
}
- fe->i_flags |= cpu_to_le32(OCFS2_ORPHANED_FL);
- OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR;
+ if (dio) {
+ /* Update flag OCFS2_DIO_ORPHANED_FL and record the orphan
+ * slot.
+ */
+ fe->i_flags |= cpu_to_le32(OCFS2_DIO_ORPHANED_FL);
+ fe->i_dio_orphaned_slot = cpu_to_le16(osb->slot_num);
+ } else {
+ fe->i_flags |= cpu_to_le32(OCFS2_ORPHANED_FL);
+ OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR;
- /* Record which orphan dir our inode now resides
- * in. delete_inode will use this to determine which orphan
- * dir to lock. */
- fe->i_orphaned_slot = cpu_to_le16(osb->slot_num);
+ /* Record which orphan dir our inode now resides
+ * in. delete_inode will use this to determine which orphan
+ * dir to lock. */
+ fe->i_orphaned_slot = cpu_to_le16(osb->slot_num);
+ }
ocfs2_journal_dirty(handle, fe_bh);
@@ -2258,14 +2293,28 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
handle_t *handle,
struct inode *orphan_dir_inode,
struct inode *inode,
- struct buffer_head *orphan_dir_bh)
+ struct buffer_head *orphan_dir_bh,
+ bool dio)
{
- char name[OCFS2_ORPHAN_NAMELEN + 1];
+ const int namelen = OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN;
+ char name[namelen + 1];
struct ocfs2_dinode *orphan_fe;
int status = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
- status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
+ if (dio) {
+ status = snprintf(name, OCFS2_DIO_ORPHAN_PREFIX_LEN + 1, "%s",
+ OCFS2_DIO_ORPHAN_PREFIX);
+ if (status != OCFS2_DIO_ORPHAN_PREFIX_LEN) {
+ status = -EINVAL;
+ mlog_errno(status);
+ return status;
+ }
+
+ status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno,
+ name + OCFS2_DIO_ORPHAN_PREFIX_LEN);
+ } else
+ status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
if (status < 0) {
mlog_errno(status);
goto leave;
@@ -2273,10 +2322,10 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
trace_ocfs2_orphan_del(
(unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
- name, OCFS2_ORPHAN_NAMELEN);
+ name, namelen);
/* find it's spot in the orphan directory */
- status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode,
+ status = ocfs2_find_entry(name, namelen, orphan_dir_inode,
&lookup);
if (status) {
mlog_errno(status);
@@ -2376,7 +2425,8 @@ static int ocfs2_prep_new_orphaned_file(struct inode *dir,
}
ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
- di_blkno, orphan_name, orphan_insert);
+ di_blkno, orphan_name, orphan_insert,
+ false);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -2482,7 +2532,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
di = (struct ocfs2_dinode *)new_di_bh->b_data;
status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name,
- &orphan_insert, orphan_dir);
+ &orphan_insert, orphan_dir, false);
if (status < 0) {
mlog_errno(status);
goto leave;
@@ -2527,6 +2577,186 @@ leave:
return status;
}
+static int ocfs2_dio_orphan_recovered(struct inode *inode)
+{
+ int ret;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di = NULL;
+
+ ret = ocfs2_inode_lock(inode, &di_bh, 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return 0;
+ }
+
+ di = (struct ocfs2_dinode *) di_bh->b_data;
+ ret = !(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL));
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+
+ return ret;
+}
+
+#define OCFS2_DIO_ORPHANED_FL_CHECK_INTERVAL 10000
+int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb,
+ struct inode *inode)
+{
+ char orphan_name[OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN + 1];
+ struct inode *orphan_dir_inode = NULL;
+ struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+ struct buffer_head *di_bh = NULL;
+ int status = 0;
+ handle_t *handle = NULL;
+ struct ocfs2_dinode *di = NULL;
+
+restart:
+ status = ocfs2_inode_lock(inode, &di_bh, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ di = (struct ocfs2_dinode *) di_bh->b_data;
+ /*
+ * Another append dio crashed?
+ * If so, wait for recovery first.
+ */
+ if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+ wait_event_interruptible_timeout(OCFS2_I(inode)->append_dio_wq,
+ ocfs2_dio_orphan_recovered(inode),
+ msecs_to_jiffies(OCFS2_DIO_ORPHANED_FL_CHECK_INTERVAL));
+ goto restart;
+ }
+
+ status = ocfs2_prepare_orphan_dir(osb, &orphan_dir_inode,
+ OCFS2_I(inode)->ip_blkno,
+ orphan_name,
+ &orphan_insert,
+ true);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_unlock_inode;
+ }
+
+ handle = ocfs2_start_trans(osb,
+ OCFS2_INODE_ADD_TO_ORPHAN_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ goto bail_unlock_orphan;
+ }
+
+ status = ocfs2_orphan_add(osb, handle, inode, di_bh, orphan_name,
+ &orphan_insert, orphan_dir_inode, true);
+ if (status)
+ mlog_errno(status);
+
+ ocfs2_commit_trans(osb, handle);
+
+bail_unlock_orphan:
+ ocfs2_inode_unlock(orphan_dir_inode, 1);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ iput(orphan_dir_inode);
+
+ ocfs2_free_dir_lookup_result(&orphan_insert);
+
+bail_unlock_inode:
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+
+bail:
+ return status;
+}
+
+int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb,
+ struct inode *inode, int update_isize,
+ loff_t end)
+{
+ struct inode *orphan_dir_inode = NULL;
+ struct buffer_head *orphan_dir_bh = NULL;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di = NULL;
+ handle_t *handle = NULL;
+ int status = 0;
+
+ status = ocfs2_inode_lock(inode, &di_bh, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ di = (struct ocfs2_dinode *) di_bh->b_data;
+
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ le16_to_cpu(di->i_dio_orphaned_slot));
+ if (!orphan_dir_inode) {
+ status = -ENOENT;
+ mlog_errno(status);
+ goto bail_unlock_inode;
+ }
+
+ mutex_lock(&orphan_dir_inode->i_mutex);
+ status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+ if (status < 0) {
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ iput(orphan_dir_inode);
+ mlog_errno(status);
+ goto bail_unlock_inode;
+ }
+
+ handle = ocfs2_start_trans(osb,
+ OCFS2_INODE_DEL_FROM_ORPHAN_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ goto bail_unlock_orphan;
+ }
+
+ BUG_ON(!(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL)));
+
+ status = ocfs2_orphan_del(osb, handle, orphan_dir_inode,
+ inode, orphan_dir_bh, true);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_commit;
+ }
+
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(inode),
+ di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_commit;
+ }
+
+ di->i_flags &= ~cpu_to_le32(OCFS2_DIO_ORPHANED_FL);
+ di->i_dio_orphaned_slot = 0;
+
+ if (update_isize) {
+ status = ocfs2_set_inode_size(handle, inode, di_bh, end);
+ if (status)
+ mlog_errno(status);
+ } else
+ ocfs2_journal_dirty(handle, di_bh);
+
+bail_commit:
+ ocfs2_commit_trans(osb, handle);
+
+bail_unlock_orphan:
+ ocfs2_inode_unlock(orphan_dir_inode, 1);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ brelse(orphan_dir_bh);
+ iput(orphan_dir_inode);
+
+bail_unlock_inode:
+ ocfs2_inode_unlock(inode, 1);
+ brelse(di_bh);
+
+bail:
+ return status;
+}
+
int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
struct inode *inode,
struct dentry *dentry)
@@ -2615,7 +2845,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
}
status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
- orphan_dir_bh);
+ orphan_dir_bh, false);
if (status < 0) {
mlog_errno(status);
goto out_commit;
diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h
index e5d059d4f115..5ddecce172fa 100644
--- a/fs/ocfs2/namei.h
+++ b/fs/ocfs2/namei.h
@@ -34,10 +34,16 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
handle_t *handle,
struct inode *orphan_dir_inode,
struct inode *inode,
- struct buffer_head *orphan_dir_bh);
+ struct buffer_head *orphan_dir_bh,
+ bool dio);
int ocfs2_create_inode_in_orphan(struct inode *dir,
int mode,
struct inode **new_inode);
+int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb,
+ struct inode *inode);
+int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb,
+ struct inode *inode, int update_isize,
+ loff_t end);
int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
struct inode *new_inode,
struct dentry *new_dentry);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 7d6b7d090452..8490c64d34fe 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -209,6 +209,11 @@ struct ocfs2_lock_res {
#endif
};
+enum ocfs2_orphan_reco_type {
+ ORPHAN_NO_NEED_TRUNCATE = 0,
+ ORPHAN_NEED_TRUNCATE,
+};
+
enum ocfs2_orphan_scan_state {
ORPHAN_SCAN_ACTIVE,
ORPHAN_SCAN_INACTIVE
@@ -279,6 +284,8 @@ enum ocfs2_mount_options
writes */
OCFS2_MOUNT_HB_NONE = 1 << 13, /* No heartbeat */
OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
+
+ OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15, /* Journal Async Commit */
};
#define OCFS2_OSB_SOFT_RO 0x0001
@@ -493,6 +500,14 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
return 0;
}
+static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb)
+{
+ if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
+ return 1;
+ return 0;
+}
+
+
static inline int ocfs2_supports_inline_data(struct ocfs2_super *osb)
{
if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
@@ -724,6 +739,16 @@ static inline unsigned int ocfs2_clusters_for_bytes(struct super_block *sb,
return clusters;
}
+static inline unsigned int ocfs2_bytes_to_clusters(struct super_block *sb,
+ u64 bytes)
+{
+ int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
+ unsigned int clusters;
+
+ clusters = (unsigned int)(bytes >> cl_bits);
+ return clusters;
+}
+
static inline u64 ocfs2_blocks_for_bytes(struct super_block *sb,
u64 bytes)
{
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 938387a10d5d..20e37a3ed26f 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -105,7 +105,8 @@
| OCFS2_FEATURE_INCOMPAT_CLUSTERINFO)
#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \
| OCFS2_FEATURE_RO_COMPAT_USRQUOTA \
- | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)
+ | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \
+ | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
/*
* Heartbeat-only devices are missing journals and other files. The
@@ -199,6 +200,11 @@
#define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002
#define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004
+/*
+ * Append Direct IO support
+ */
+#define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008
+
/* The byte offset of the first backup block will be 1G.
* The following will be 4G, 16G, 64G, 256G and 1T.
*/
@@ -229,6 +235,8 @@
#define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */
#define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */
#define OCFS2_QUOTA_FL (0x00001000) /* Quota file */
+#define OCFS2_DIO_ORPHANED_FL (0X00002000) /* On the orphan list especially
+ * for dio */
/*
* Flags on ocfs2_dinode.i_dyn_features
@@ -729,7 +737,9 @@ struct ocfs2_dinode {
inode belongs to. Only valid
if allocated from a
discontiguous block group */
-/*A0*/ __le64 i_reserved2[3];
+/*A0*/ __le16 i_dio_orphaned_slot; /* only used for append dio write */
+ __le16 i_reserved1[3];
+ __le64 i_reserved2[2];
/*B8*/ union {
__le64 i_pad1; /* Generic way to refer to this
64bit union */
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 1eae330193a6..b6d51333ad02 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -48,6 +48,7 @@ struct ocfs2_quota_recovery {
/* In-memory structure with quota header information */
struct ocfs2_mem_dqinfo {
unsigned int dqi_type; /* Quota type this structure describes */
+ unsigned int dqi_flags; /* Flags OLQF_* */
unsigned int dqi_chunks; /* Number of chunks in local quota file */
unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */
unsigned int dqi_syncms; /* How often should we sync with other nodes */
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 10b653930ee2..3d0b63d34225 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -73,12 +73,6 @@ static loff_t ol_dqblk_off(struct super_block *sb, int c, int off)
ol_dqblk_block_off(sb, c, off);
}
-/* Compute block number from given offset */
-static inline unsigned int ol_dqblk_file_block(struct super_block *sb, loff_t off)
-{
- return off >> sb->s_blocksize_bits;
-}
-
static inline unsigned int ol_dqblk_block_offset(struct super_block *sb, loff_t off)
{
return off & ((1 << sb->s_blocksize_bits) - 1);
@@ -292,7 +286,7 @@ static void olq_update_info(struct buffer_head *bh, void *private)
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
spin_lock(&dq_data_lock);
- ldinfo->dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
+ ldinfo->dqi_flags = cpu_to_le32(oinfo->dqi_flags);
ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks);
ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks);
spin_unlock(&dq_data_lock);
@@ -701,8 +695,8 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
/* We don't need the lock and we have to acquire quota file locks
* which will later depend on this lock */
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
- info->dqi_maxblimit = 0x7fffffffffffffffLL;
- info->dqi_maxilimit = 0x7fffffffffffffffLL;
+ info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
+ info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
if (!oinfo) {
mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
@@ -737,13 +731,13 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
}
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
- info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
+ oinfo->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
oinfo->dqi_libh = bh;
/* We crashed when using local quota file? */
- if (!(info->dqi_flags & OLQF_CLEAN)) {
+ if (!(oinfo->dqi_flags & OLQF_CLEAN)) {
rec = OCFS2_SB(sb)->quota_rec;
if (!rec) {
rec = ocfs2_alloc_quota_recovery();
@@ -772,7 +766,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
}
/* Now mark quota file as used */
- info->dqi_flags &= ~OLQF_CLEAN;
+ oinfo->dqi_flags &= ~OLQF_CLEAN;
status = ocfs2_modify_bh(lqinode, bh, olq_update_info, info);
if (status < 0) {
mlog_errno(status);
@@ -857,7 +851,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
goto out;
/* Mark local file as clean */
- info->dqi_flags |= OLQF_CLEAN;
+ oinfo->dqi_flags |= OLQF_CLEAN;
status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
oinfo->dqi_libh,
olq_update_info,
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index d81f6e2a97f5..ee541f92dab4 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2428,8 +2428,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
get_bh(prev_bh);
}
- rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
-
trace_ocfs2_calc_refcount_meta_credits_iterate(
recs_add, (unsigned long long)cpos, clusters,
(unsigned long long)le64_to_cpu(rec.r_cpos),
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index 41ffd36c689c..6a348b0294ab 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -39,7 +39,7 @@
#define OCFS2_CHECK_RESERVATIONS
#endif
-DEFINE_SPINLOCK(resv_lock);
+static DEFINE_SPINLOCK(resv_lock);
#define OCFS2_MIN_RESV_WINDOW_BITS 8
#define OCFS2_MAX_RESV_WINDOW_BITS 1024
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 83723179e1ec..26675185b886 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -191,6 +191,7 @@ enum {
Opt_coherency_full,
Opt_resv_level,
Opt_dir_resv_level,
+ Opt_journal_async_commit,
Opt_err,
};
@@ -222,6 +223,7 @@ static const match_table_t tokens = {
{Opt_coherency_full, "coherency=full"},
{Opt_resv_level, "resv_level=%u"},
{Opt_dir_resv_level, "dir_resv_level=%u"},
+ {Opt_journal_async_commit, "journal_async_commit"},
{Opt_err, NULL}
};
@@ -1000,36 +1002,6 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
}
}
-/* Handle quota on quotactl */
-static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
-{
- unsigned int feature[OCFS2_MAXQUOTAS] = {
- OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
-
- if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
- return -EINVAL;
-
- return dquot_enable(sb_dqopt(sb)->files[type], type,
- format_id, DQUOT_LIMITS_ENABLED);
-}
-
-/* Handle quota off quotactl */
-static int ocfs2_quota_off(struct super_block *sb, int type)
-{
- return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
-}
-
-static const struct quotactl_ops ocfs2_quotactl_ops = {
- .quota_on_meta = ocfs2_quota_on,
- .quota_off = ocfs2_quota_off,
- .quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
- .set_info = dquot_set_dqinfo,
- .get_dqblk = dquot_get_dqblk,
- .set_dqblk = dquot_set_dqblk,
-};
-
static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
{
struct dentry *root;
@@ -1500,6 +1472,9 @@ static int ocfs2_parse_options(struct super_block *sb,
option < OCFS2_MAX_RESV_LEVEL)
mopt->dir_resv_level = option;
break;
+ case Opt_journal_async_commit:
+ mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
+ break;
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
@@ -1606,6 +1581,9 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
if (osb->osb_dir_resv_level != osb->osb_resv_level)
seq_printf(s, ",dir_resv_level=%d", osb->osb_resv_level);
+ if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
+ seq_printf(s, ",journal_async_commit");
+
return 0;
}
@@ -1768,6 +1746,8 @@ static void ocfs2_inode_init_once(void *data)
ocfs2_lock_res_init_once(&oi->ip_inode_lockres);
ocfs2_lock_res_init_once(&oi->ip_open_lockres);
+ init_waitqueue_head(&oi->append_dio_wq);
+
ocfs2_metadata_cache_init(INODE_CACHE(&oi->vfs_inode),
&ocfs2_inode_caching_ops);
@@ -2079,7 +2059,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
sb->s_op = &ocfs2_sops;
sb->s_d_op = &ocfs2_dentry_ops;
sb->s_export_op = &ocfs2_export_ops;
- sb->s_qcop = &ocfs2_quotactl_ops;
+ sb->s_qcop = &dquot_quotactl_sysfile_ops;
sb->dq_op = &ocfs2_quota_operations;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
sb->s_xattr = ocfs2_xattr_handlers;
@@ -2475,6 +2455,15 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
goto finally;
}
+ if (osb->s_mount_opt & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
+ jbd2_journal_set_features(osb->journal->j_journal,
+ JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+ else
+ jbd2_journal_clear_features(osb->journal->j_journal,
+ JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+
if (dirty) {
/* recover my local alloc if we didn't unmount cleanly. */
status = ocfs2_begin_local_alloc_recovery(osb,
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 662f8dee149f..85b190dc132f 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -5334,16 +5334,6 @@ out:
return ret;
}
-static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode,
- struct ocfs2_xattr_bucket *bucket,
- int offs)
-{
- int block_off = offs >> inode->i_sb->s_blocksize_bits;
-
- offs = offs % inode->i_sb->s_blocksize;
- return bucket_block(bucket, block_off) + offs;
-}
-
/*
* Truncate the specified xe_off entry in xattr bucket.
* bucket is indicated by header_bh and len is the new length.
diff --git a/fs/open.c b/fs/open.c
index 813be037b412..a293c2020676 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -667,11 +667,8 @@ int open_check_o_direct(struct file *f)
{
/* NB: we're sure to have correct a_ops only after f_op->open */
if (f->f_flags & O_DIRECT) {
- if (!f->f_mapping->a_ops ||
- ((!f->f_mapping->a_ops->direct_IO) &&
- (!f->f_mapping->a_ops->get_xip_mem))) {
+ if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
return -EINVAL;
- }
}
return 0;
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index bd117d065b82..1295a00ca316 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -81,6 +81,7 @@
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
+#include <linux/string_helpers.h>
#include <linux/user_namespace.h>
#include <asm/pgtable.h>
@@ -89,39 +90,18 @@
static inline void task_name(struct seq_file *m, struct task_struct *p)
{
- int i;
- char *buf, *end;
- char *name;
+ char *buf;
char tcomm[sizeof(p->comm)];
get_task_comm(tcomm, p);
seq_puts(m, "Name:\t");
- end = m->buf + m->size;
buf = m->buf + m->count;
- name = tcomm;
- i = sizeof(tcomm);
- while (i && (buf < end)) {
- unsigned char c = *name;
- name++;
- i--;
- *buf = c;
- if (!c)
- break;
- if (c == '\\') {
- buf++;
- if (buf < end)
- *buf++ = c;
- continue;
- }
- if (c == '\n') {
- *buf++ = '\\';
- if (buf < end)
- *buf++ = 'n';
- continue;
- }
- buf++;
- }
+
+ /* Ignore error for now */
+ string_escape_str(tcomm, &buf, m->size - m->count,
+ ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\");
+
m->count = buf - m->buf;
seq_putc(m, '\n');
}
@@ -336,12 +316,10 @@ static inline void task_context_switch_counts(struct seq_file *m,
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
- seq_puts(m, "Cpus_allowed:\t");
- seq_cpumask(m, &task->cpus_allowed);
- seq_putc(m, '\n');
- seq_puts(m, "Cpus_allowed_list:\t");
- seq_cpumask_list(m, &task->cpus_allowed);
- seq_putc(m, '\n');
+ seq_printf(m, "Cpus_allowed:\t%*pb\n",
+ cpumask_pr_args(&task->cpus_allowed));
+ seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
+ cpumask_pr_args(&task->cpus_allowed));
}
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 7fea13229f33..de14e46fd807 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -122,7 +122,7 @@ static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
- struct proc_dir_entry *de = PROC_I(inode)->pde;
+ struct proc_dir_entry *de = PDE(inode);
if (de && de->nlink)
set_nlink(inode, de->nlink);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 8420a2f80811..13a50a32652d 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -40,7 +40,7 @@ static void proc_evict_inode(struct inode *inode)
put_pid(PROC_I(inode)->pid);
/* Let go of any associated proc directory entry */
- de = PROC_I(inode)->pde;
+ de = PDE(inode);
if (de)
pde_put(de);
head = PROC_I(inode)->sysctl;
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 1e3187da1fed..7eee2d8b97d9 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -5,6 +5,7 @@
#include <linux/ksm.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
+#include <linux/huge_mm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/hugetlb.h>
@@ -121,9 +122,18 @@ u64 stable_page_flags(struct page *page)
* just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page.
*/
- else if (PageTransCompound(page) && (PageLRU(compound_head(page)) ||
- PageAnon(compound_head(page))))
- u |= 1 << KPF_THP;
+ else if (PageTransCompound(page)) {
+ struct page *head = compound_head(page);
+
+ if (PageLRU(head) || PageAnon(head))
+ u |= 1 << KPF_THP;
+ else if (is_huge_zero_page(head)) {
+ u |= 1 << KPF_ZERO_PAGE;
+ u |= 1 << KPF_THP;
+ }
+ } else if (is_zero_pfn(page_to_pfn(page)))
+ u |= 1 << KPF_ZERO_PAGE;
+
/*
* Caveats on high order pages: page->_count will only be set
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 246eae84b13b..956b75d61809 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -21,7 +21,7 @@
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
- unsigned long data, text, lib, swap;
+ unsigned long data, text, lib, swap, ptes, pmds;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
/*
@@ -42,6 +42,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
+ ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
+ pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
@@ -54,6 +56,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
+ "VmPMD:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
total_vm << (PAGE_SHIFT-10),
@@ -63,8 +66,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
total_rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
- (PTRS_PER_PTE * sizeof(pte_t) *
- atomic_long_read(&mm->nr_ptes)) >> 10,
+ ptes >> 10,
+ pmds >> 10,
swap << (PAGE_SHIFT-10));
}
@@ -433,7 +436,6 @@ const struct file_operations proc_tid_maps_operations = {
#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
- struct vm_area_struct *vma;
unsigned long resident;
unsigned long shared_clean;
unsigned long shared_dirty;
@@ -443,7 +445,6 @@ struct mem_size_stats {
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long swap;
- unsigned long nonlinear;
u64 pss;
};
@@ -483,8 +484,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
- struct vm_area_struct *vma = mss->vma;
- pgoff_t pgoff = linear_page_index(vma, addr);
+ struct vm_area_struct *vma = walk->vma;
struct page *page = NULL;
if (pte_present(*pte)) {
@@ -496,17 +496,10 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
mss->swap += PAGE_SIZE;
else if (is_migration_entry(swpent))
page = migration_entry_to_page(swpent);
- } else if (pte_file(*pte)) {
- if (pte_to_pgoff(*pte) != pgoff)
- mss->nonlinear += PAGE_SIZE;
}
if (!page)
return;
-
- if (page->index != pgoff)
- mss->nonlinear += PAGE_SIZE;
-
smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
}
@@ -515,7 +508,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
- struct vm_area_struct *vma = mss->vma;
+ struct vm_area_struct *vma = walk->vma;
struct page *page;
/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -536,8 +529,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- struct mem_size_stats *mss = walk->private;
- struct vm_area_struct *vma = mss->vma;
+ struct vm_area_struct *vma = walk->vma;
pte_t *pte;
spinlock_t *ptl;
@@ -596,7 +588,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
[ilog2(VM_ACCOUNT)] = "ac",
[ilog2(VM_NORESERVE)] = "nr",
[ilog2(VM_HUGETLB)] = "ht",
- [ilog2(VM_NONLINEAR)] = "nl",
[ilog2(VM_ARCH_1)] = "ar",
[ilog2(VM_DONTDUMP)] = "dd",
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -630,10 +621,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
};
memset(&mss, 0, sizeof mss);
- mss.vma = vma;
/* mmap_sem is held in m_start */
- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
+ walk_page_vma(vma, &smaps_walk);
show_map_vma(m, vma, is_pid);
@@ -668,10 +657,6 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
(vma->vm_flags & VM_LOCKED) ?
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
- if (vma->vm_flags & VM_NONLINEAR)
- seq_printf(m, "Nonlinear: %8lu kB\n",
- mss.nonlinear >> 10);
-
show_smap_vma_flags(m, vma);
m_cache_vma(m, vma);
return 0;
@@ -747,18 +732,18 @@ enum clear_refs_types {
CLEAR_REFS_ANON,
CLEAR_REFS_MAPPED,
CLEAR_REFS_SOFT_DIRTY,
+ CLEAR_REFS_MM_HIWATER_RSS,
CLEAR_REFS_LAST,
};
struct clear_refs_private {
- struct vm_area_struct *vma;
enum clear_refs_types type;
};
+#ifdef CONFIG_MEM_SOFT_DIRTY
static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
-#ifdef CONFIG_MEM_SOFT_DIRTY
/*
* The soft-dirty tracker uses #PF-s to catch writes
* to pages, so write-protect the pte as well. See the
@@ -772,24 +757,63 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_clear_soft_dirty(ptent);
- } else if (pte_file(ptent)) {
- ptent = pte_file_clear_soft_dirty(ptent);
}
set_pte_at(vma->vm_mm, addr, pte, ptent);
-#endif
}
+static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ pmd = pmd_wrprotect(pmd);
+ pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
+
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ vma->vm_flags &= ~VM_SOFTDIRTY;
+
+ set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
+}
+
+#else
+
+static inline void clear_soft_dirty(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *pte)
+{
+}
+
+static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+}
+#endif
+
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct clear_refs_private *cp = walk->private;
- struct vm_area_struct *vma = cp->vma;
+ struct vm_area_struct *vma = walk->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
- split_huge_page_pmd(vma, addr, pmd);
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+ if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
+ clear_soft_dirty_pmd(vma, addr, pmd);
+ goto out;
+ }
+
+ page = pmd_page(*pmd);
+
+ /* Clear accessed and referenced bits. */
+ pmdp_test_and_clear_young(vma, addr, pmd);
+ ClearPageReferenced(page);
+out:
+ spin_unlock(ptl);
+ return 0;
+ }
+
if (pmd_trans_unstable(pmd))
return 0;
@@ -818,6 +842,28 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
return 0;
}
+static int clear_refs_test_walk(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct clear_refs_private *cp = walk->private;
+ struct vm_area_struct *vma = walk->vma;
+
+ if (vma->vm_flags & VM_PFNMAP)
+ return 1;
+
+ /*
+ * Writing 1 to /proc/pid/clear_refs affects all pages.
+ * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
+ * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
+ * Writing 4 to /proc/pid/clear_refs affects all pages.
+ */
+ if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
+ return 1;
+ if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
+ return 1;
+ return 0;
+}
+
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -858,9 +904,22 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
};
struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range,
+ .test_walk = clear_refs_test_walk,
.mm = mm,
.private = &cp,
};
+
+ if (type == CLEAR_REFS_MM_HIWATER_RSS) {
+ /*
+ * Writing 5 to /proc/pid/clear_refs resets the peak
+ * resident set size to this mm's current rss value.
+ */
+ down_write(&mm->mmap_sem);
+ reset_mm_hiwater_rss(mm);
+ up_write(&mm->mmap_sem);
+ goto out_mm;
+ }
+
down_read(&mm->mmap_sem);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -877,32 +936,12 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}
mmu_notifier_invalidate_range_start(mm, 0, -1);
}
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- cp.vma = vma;
- if (is_vm_hugetlb_page(vma))
- continue;
- /*
- * Writing 1 to /proc/pid/clear_refs affects all pages.
- *
- * Writing 2 to /proc/pid/clear_refs only affects
- * Anonymous pages.
- *
- * Writing 3 to /proc/pid/clear_refs only affects file
- * mapped pages.
- *
- * Writing 4 to /proc/pid/clear_refs affects all pages.
- */
- if (type == CLEAR_REFS_ANON && vma->vm_file)
- continue;
- if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
- continue;
- walk_page_range(vma->vm_start, vma->vm_end,
- &clear_refs_walk);
- }
+ walk_page_range(0, ~0UL, &clear_refs_walk);
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(mm, 0, -1);
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
+out_mm:
mmput(mm);
}
put_task_struct(task);
@@ -1066,15 +1105,13 @@ static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemap
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = walk->vma;
struct pagemapread *pm = walk->private;
spinlock_t *ptl;
- pte_t *pte;
+ pte_t *pte, *orig_pte;
int err = 0;
- /* find the first VMA at or above 'addr' */
- vma = find_vma(walk->mm, addr);
- if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
int pmd_flags2;
if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1100,51 +1137,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (pmd_trans_unstable(pmd))
return 0;
- while (1) {
- /* End of address space hole, which we mark as non-present. */
- unsigned long hole_end;
-
- if (vma)
- hole_end = min(end, vma->vm_start);
- else
- hole_end = end;
-
- for (; addr < hole_end; addr += PAGE_SIZE) {
- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
-
- err = add_to_pagemap(addr, &pme, pm);
- if (err)
- return err;
- }
-
- if (!vma || vma->vm_start >= end)
- break;
- /*
- * We can't possibly be in a hugetlb VMA. In general,
- * for a mm_walk with a pmd_entry and a hugetlb_entry,
- * the pmd_entry can only be called on addresses in a
- * hugetlb if the walk starts in a non-hugetlb VMA and
- * spans a hugepage VMA. Since pagemap_read walks are
- * PMD-sized and PMD-aligned, this will never be true.
- */
- BUG_ON(is_vm_hugetlb_page(vma));
-
- /* Addresses in the VMA. */
- for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
- pagemap_entry_t pme;
- pte = pte_offset_map(pmd, addr);
- pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
- pte_unmap(pte);
- err = add_to_pagemap(addr, &pme, pm);
- if (err)
- return err;
- }
+ /*
+ * We can assume that @vma always points to a valid one and @end never
+ * goes beyond vma->vm_end.
+ */
+ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ for (; addr < end; pte++, addr += PAGE_SIZE) {
+ pagemap_entry_t pme;
- if (addr == end)
+ pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
break;
-
- vma = find_vma(walk->mm, addr);
}
+ pte_unmap_unlock(orig_pte, ptl);
cond_resched();
@@ -1170,15 +1176,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = walk->vma;
int err = 0;
int flags2;
pagemap_entry_t pme;
- vma = find_vma(walk->mm, addr);
- WARN_ON_ONCE(!vma);
-
- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+ if (vma->vm_flags & VM_SOFTDIRTY)
flags2 = __PM_SOFT_DIRTY;
else
flags2 = 0;
@@ -1338,7 +1341,6 @@ const struct file_operations proc_pagemap_operations = {
#ifdef CONFIG_NUMA
struct numa_maps {
- struct vm_area_struct *vma;
unsigned long pages;
unsigned long anon;
unsigned long active;
@@ -1407,18 +1409,17 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
- struct numa_maps *md;
+ struct numa_maps *md = walk->private;
+ struct vm_area_struct *vma = walk->vma;
spinlock_t *ptl;
pte_t *orig_pte;
pte_t *pte;
- md = walk->private;
-
- if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
- page = can_gather_numa_stats(huge_pte, md->vma, addr);
+ page = can_gather_numa_stats(huge_pte, vma, addr);
if (page)
gather_stats(page, md, pte_dirty(huge_pte),
HPAGE_PMD_SIZE/PAGE_SIZE);
@@ -1430,7 +1431,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
- struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
+ struct page *page = can_gather_numa_stats(*pte, vma, addr);
if (!page)
continue;
gather_stats(page, md, pte_dirty(*pte), 1);
@@ -1440,7 +1441,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
-static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
+static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
struct numa_maps *md;
@@ -1459,7 +1460,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
}
#else
-static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
+static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
return 0;
@@ -1477,7 +1478,12 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
struct numa_maps *md = &numa_priv->md;
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
- struct mm_walk walk = {};
+ struct mm_walk walk = {
+ .hugetlb_entry = gather_hugetlb_stats,
+ .pmd_entry = gather_pte_stats,
+ .private = md,
+ .mm = mm,
+ };
struct mempolicy *pol;
char buffer[64];
int nid;
@@ -1488,13 +1494,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
/* Ensure we start with an empty set of numa_maps statistics. */
memset(md, 0, sizeof(*md));
- md->vma = vma;
-
- walk.hugetlb_entry = gather_hugetbl_stats;
- walk.pmd_entry = gather_pte_stats;
- walk.private = md;
- walk.mm = mm;
-
pol = __get_vma_policy(vma, vma->vm_start);
if (pol) {
mpol_to_str(buffer, sizeof(buffer), pol);
@@ -1528,7 +1527,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
if (is_vm_hugetlb_page(vma))
seq_puts(m, " huge");
- walk_page_range(vma->vm_start, vma->vm_end, &walk);
+ /* mmap_sem is held by m_start */
+ walk_page_vma(vma, &walk);
if (!md->pages)
goto out;
@@ -1557,6 +1557,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
for_each_node_state(nid, N_MEMORY)
if (md->node[nid])
seq_printf(m, " N%d=%lu", nid, md->node[nid]);
+
+ seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
out:
seq_putc(m, '\n');
m_cache_vma(m, vma);
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 983d9510becc..916b8e23d968 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -21,6 +21,16 @@ config PSTORE_CONSOLE
When the option is enabled, pstore will log all kernel
messages, even if no oops or panic happened.
+config PSTORE_PMSG
+ bool "Log user space messages"
+ depends on PSTORE
+ help
+ When the option is enabled, pstore will export a character
+ interface /dev/pmsg0 to log user space messages. On reboot
+ data can be retrieved from /sys/fs/pstore/pmsg-ramoops-[ID].
+
+ If unsure, say N.
+
config PSTORE_FTRACE
bool "Persistent function tracer"
depends on PSTORE
diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile
index 4c9095c2781e..e647d8e81712 100644
--- a/fs/pstore/Makefile
+++ b/fs/pstore/Makefile
@@ -7,5 +7,7 @@ obj-y += pstore.o
pstore-objs += inode.o platform.o
obj-$(CONFIG_PSTORE_FTRACE) += ftrace.o
+obj-$(CONFIG_PSTORE_PMSG) += pmsg.o
+
ramoops-objs += ram.o ram_core.o
obj-$(CONFIG_PSTORE_RAM) += ramoops.o
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 50416602774d..b32ce53d24ee 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -338,32 +338,38 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
switch (type) {
case PSTORE_TYPE_DMESG:
- sprintf(name, "dmesg-%s-%lld%s", psname, id,
- compressed ? ".enc.z" : "");
+ scnprintf(name, sizeof(name), "dmesg-%s-%lld%s",
+ psname, id, compressed ? ".enc.z" : "");
break;
case PSTORE_TYPE_CONSOLE:
- sprintf(name, "console-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "console-%s-%lld", psname, id);
break;
case PSTORE_TYPE_FTRACE:
- sprintf(name, "ftrace-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "ftrace-%s-%lld", psname, id);
break;
case PSTORE_TYPE_MCE:
- sprintf(name, "mce-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "mce-%s-%lld", psname, id);
break;
case PSTORE_TYPE_PPC_RTAS:
- sprintf(name, "rtas-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "rtas-%s-%lld", psname, id);
break;
case PSTORE_TYPE_PPC_OF:
- sprintf(name, "powerpc-ofw-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "powerpc-ofw-%s-%lld",
+ psname, id);
break;
case PSTORE_TYPE_PPC_COMMON:
- sprintf(name, "powerpc-common-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "powerpc-common-%s-%lld",
+ psname, id);
+ break;
+ case PSTORE_TYPE_PMSG:
+ scnprintf(name, sizeof(name), "pmsg-%s-%lld", psname, id);
break;
case PSTORE_TYPE_UNKNOWN:
- sprintf(name, "unknown-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id);
break;
default:
- sprintf(name, "type%d-%s-%lld", type, psname, id);
+ scnprintf(name, sizeof(name), "type%d-%s-%lld",
+ type, psname, id);
break;
}
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 3b3d305277c4..c36ba2cd0b5d 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -45,6 +45,12 @@ extern void pstore_register_ftrace(void);
static inline void pstore_register_ftrace(void) {}
#endif
+#ifdef CONFIG_PSTORE_PMSG
+extern void pstore_register_pmsg(void);
+#else
+static inline void pstore_register_pmsg(void) {}
+#endif
+
extern struct pstore_info *psinfo;
extern void pstore_set_kmsg_bytes(int);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 0a9b72cdfeca..c4c9a10c5760 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -301,7 +301,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
if (big_oops_buf) {
dst = big_oops_buf;
- hsize = sprintf(dst, "%s#%d Part%d\n", why,
+ hsize = sprintf(dst, "%s#%d Part%u\n", why,
oopscount, part);
size = big_oops_buf_sz - hsize;
@@ -321,7 +321,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
}
} else {
dst = psinfo->buf;
- hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount,
+ hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount,
part);
size = psinfo->bufsize - hsize;
dst += hsize;
@@ -447,6 +447,7 @@ int pstore_register(struct pstore_info *psi)
if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
pstore_register_console();
pstore_register_ftrace();
+ pstore_register_pmsg();
}
if (pstore_update_ms >= 0) {
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
new file mode 100644
index 000000000000..feb5dd2948b4
--- /dev/null
+++ b/fs/pstore/pmsg.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include "internal.h"
+
+static DEFINE_MUTEX(pmsg_lock);
+#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
+
+static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t i, buffer_size;
+ char *buffer;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ buffer_size = count;
+ if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
+ buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
+ buffer = vmalloc(buffer_size);
+
+ mutex_lock(&pmsg_lock);
+ for (i = 0; i < count; ) {
+ size_t c = min(count - i, buffer_size);
+ u64 id;
+ long ret;
+
+ ret = __copy_from_user(buffer, buf + i, c);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&pmsg_lock);
+ vfree(buffer);
+ return -EFAULT;
+ }
+ psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, 0, c,
+ psinfo);
+
+ i += c;
+ }
+
+ mutex_unlock(&pmsg_lock);
+ vfree(buffer);
+ return count;
+}
+
+static const struct file_operations pmsg_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .write = write_pmsg,
+};
+
+static struct class *pmsg_class;
+static int pmsg_major;
+#define PMSG_NAME "pmsg"
+#undef pr_fmt
+#define pr_fmt(fmt) PMSG_NAME ": " fmt
+
+static char *pmsg_devnode(struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0220;
+ return NULL;
+}
+
+void pstore_register_pmsg(void)
+{
+ struct device *pmsg_device;
+
+ pmsg_major = register_chrdev(0, PMSG_NAME, &pmsg_fops);
+ if (pmsg_major < 0) {
+ pr_err("register_chrdev failed\n");
+ goto err;
+ }
+
+ pmsg_class = class_create(THIS_MODULE, PMSG_NAME);
+ if (IS_ERR(pmsg_class)) {
+ pr_err("device class file already in use\n");
+ goto err_class;
+ }
+ pmsg_class->devnode = pmsg_devnode;
+
+ pmsg_device = device_create(pmsg_class, NULL, MKDEV(pmsg_major, 0),
+ NULL, "%s%d", PMSG_NAME, 0);
+ if (IS_ERR(pmsg_device)) {
+ pr_err("failed to create device\n");
+ goto err_device;
+ }
+ return;
+
+err_device:
+ class_destroy(pmsg_class);
+err_class:
+ unregister_chrdev(pmsg_major, PMSG_NAME);
+err:
+ return;
+}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 8613e5b35c22..39d1373128e9 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -51,6 +51,10 @@ static ulong ramoops_ftrace_size = MIN_MEM_SIZE;
module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400);
MODULE_PARM_DESC(ftrace_size, "size of ftrace log");
+static ulong ramoops_pmsg_size = MIN_MEM_SIZE;
+module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400);
+MODULE_PARM_DESC(pmsg_size, "size of user space message log");
+
static ulong mem_address;
module_param(mem_address, ulong, 0400);
MODULE_PARM_DESC(mem_address,
@@ -82,12 +86,14 @@ struct ramoops_context {
struct persistent_ram_zone **przs;
struct persistent_ram_zone *cprz;
struct persistent_ram_zone *fprz;
+ struct persistent_ram_zone *mprz;
phys_addr_t phys_addr;
unsigned long size;
unsigned int memtype;
size_t record_size;
size_t console_size;
size_t ftrace_size;
+ size_t pmsg_size;
int dump_oops;
struct persistent_ram_ecc_info ecc_info;
unsigned int max_dump_cnt;
@@ -96,6 +102,7 @@ struct ramoops_context {
unsigned int dump_read_cnt;
unsigned int console_read_cnt;
unsigned int ftrace_read_cnt;
+ unsigned int pmsg_read_cnt;
struct pstore_info pstore;
};
@@ -109,6 +116,7 @@ static int ramoops_pstore_open(struct pstore_info *psi)
cxt->dump_read_cnt = 0;
cxt->console_read_cnt = 0;
cxt->ftrace_read_cnt = 0;
+ cxt->pmsg_read_cnt = 0;
return 0;
}
@@ -164,6 +172,12 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec *time,
return header_length;
}
+static bool prz_ok(struct persistent_ram_zone *prz)
+{
+ return !!prz && !!(persistent_ram_old_size(prz) +
+ persistent_ram_ecc_string(prz, NULL, 0));
+}
+
static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time,
char **buf, bool *compressed,
@@ -178,13 +192,16 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
prz = ramoops_get_next_prz(cxt->przs, &cxt->dump_read_cnt,
cxt->max_dump_cnt, id, type,
PSTORE_TYPE_DMESG, 1);
- if (!prz)
+ if (!prz_ok(prz))
prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
1, id, type, PSTORE_TYPE_CONSOLE, 0);
- if (!prz)
+ if (!prz_ok(prz))
prz = ramoops_get_next_prz(&cxt->fprz, &cxt->ftrace_read_cnt,
1, id, type, PSTORE_TYPE_FTRACE, 0);
- if (!prz)
+ if (!prz_ok(prz))
+ prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
+ 1, id, type, PSTORE_TYPE_PMSG, 0);
+ if (!prz_ok(prz))
return 0;
if (!persistent_ram_old(prz))
@@ -252,6 +269,11 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
return -ENOMEM;
persistent_ram_write(cxt->fprz, buf, size);
return 0;
+ } else if (type == PSTORE_TYPE_PMSG) {
+ if (!cxt->mprz)
+ return -ENOMEM;
+ persistent_ram_write(cxt->mprz, buf, size);
+ return 0;
}
if (type != PSTORE_TYPE_DMESG)
@@ -309,6 +331,9 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
case PSTORE_TYPE_FTRACE:
prz = cxt->fprz;
break;
+ case PSTORE_TYPE_PMSG:
+ prz = cxt->mprz;
+ break;
default:
return -EINVAL;
}
@@ -435,7 +460,7 @@ static int ramoops_probe(struct platform_device *pdev)
goto fail_out;
if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size &&
- !pdata->ftrace_size)) {
+ !pdata->ftrace_size && !pdata->pmsg_size)) {
pr_err("The memory size and the record/console size must be "
"non-zero\n");
goto fail_out;
@@ -447,6 +472,8 @@ static int ramoops_probe(struct platform_device *pdev)
pdata->console_size = rounddown_pow_of_two(pdata->console_size);
if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size))
pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
+ if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size))
+ pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size);
cxt->size = pdata->mem_size;
cxt->phys_addr = pdata->mem_address;
@@ -454,12 +481,14 @@ static int ramoops_probe(struct platform_device *pdev)
cxt->record_size = pdata->record_size;
cxt->console_size = pdata->console_size;
cxt->ftrace_size = pdata->ftrace_size;
+ cxt->pmsg_size = pdata->pmsg_size;
cxt->dump_oops = pdata->dump_oops;
cxt->ecc_info = pdata->ecc_info;
paddr = cxt->phys_addr;
- dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size;
+ dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
+ - cxt->pmsg_size;
err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz);
if (err)
goto fail_out;
@@ -474,13 +503,9 @@ static int ramoops_probe(struct platform_device *pdev)
if (err)
goto fail_init_fprz;
- if (!cxt->przs && !cxt->cprz && !cxt->fprz) {
- pr_err("memory size too small, minimum is %zu\n",
- cxt->console_size + cxt->record_size +
- cxt->ftrace_size);
- err = -EINVAL;
- goto fail_cnt;
- }
+ err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0);
+ if (err)
+ goto fail_init_mprz;
cxt->pstore.data = cxt;
/*
@@ -525,7 +550,8 @@ fail_buf:
kfree(cxt->pstore.buf);
fail_clear:
cxt->pstore.bufsize = 0;
-fail_cnt:
+ kfree(cxt->mprz);
+fail_init_mprz:
kfree(cxt->fprz);
fail_init_fprz:
kfree(cxt->cprz);
@@ -583,6 +609,7 @@ static void ramoops_register_dummy(void)
dummy_data->record_size = record_size;
dummy_data->console_size = ramoops_console_size;
dummy_data->ftrace_size = ramoops_ftrace_size;
+ dummy_data->pmsg_size = ramoops_pmsg_size;
dummy_data->dump_oops = dump_oops;
/*
* For backwards compatibility ramoops.ecc=1 means 16 bytes ECC
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index c51df1dd237e..4a09975aac90 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -5,6 +5,7 @@
config QUOTA
bool "Quota support"
select QUOTACTL
+ select SRCU
help
If you say Y here, you will be able to set per user limits for disk
usage (also called disk quotas). Currently, it works for the
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 8f0acef3d184..0ccd4ba3a246 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1248,7 +1248,7 @@ static int ignore_hardlimit(struct dquot *dquot)
return capable(CAP_SYS_RESOURCE) &&
(info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
- !(info->dqi_flags & V1_DQF_RSQUASH));
+ !(info->dqi_flags & DQF_ROOT_SQUASH));
}
/* needs dq_data_lock */
@@ -2385,41 +2385,106 @@ out:
}
EXPORT_SYMBOL(dquot_quota_on_mount);
-static inline qsize_t qbtos(qsize_t blocks)
+static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
{
- return blocks << QIF_DQBLKSIZE_BITS;
+ int ret;
+ int type;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+ return -ENOSYS;
+ /* Accounting cannot be turned on while fs is mounted */
+ flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
+ if (!flags)
+ return -EINVAL;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (!(flags & qtype_enforce_flag(type)))
+ continue;
+ /* Can't enforce without accounting */
+ if (!sb_has_quota_usage_enabled(sb, type))
+ return -EINVAL;
+ ret = dquot_enable(dqopt->files[type], type,
+ dqopt->info[type].dqi_fmt_id,
+ DQUOT_LIMITS_ENABLED);
+ if (ret < 0)
+ goto out_err;
+ }
+ return 0;
+out_err:
+ /* Backout enforcement enablement we already did */
+ for (type--; type >= 0; type--) {
+ if (flags & qtype_enforce_flag(type))
+ dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+ }
+ /* Error code translation for better compatibility with XFS */
+ if (ret == -EBUSY)
+ ret = -EEXIST;
+ return ret;
}
-static inline qsize_t stoqb(qsize_t space)
+static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
{
- return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+ int ret;
+ int type;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+ return -ENOSYS;
+ /*
+ * We don't support turning off accounting via quotactl. In principle
+ * quota infrastructure can do this but filesystems don't expect
+ * userspace to be able to do it.
+ */
+ if (flags &
+ (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
+ return -EOPNOTSUPP;
+
+ /* Filter out limits not enabled */
+ for (type = 0; type < MAXQUOTAS; type++)
+ if (!sb_has_quota_limits_enabled(sb, type))
+ flags &= ~qtype_enforce_flag(type);
+ /* Nothing left? */
+ if (!flags)
+ return -EEXIST;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (flags & qtype_enforce_flag(type)) {
+ ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+ if (ret < 0)
+ goto out_err;
+ }
+ }
+ return 0;
+out_err:
+ /* Backout enforcement disabling we already did */
+ for (type--; type >= 0; type--) {
+ if (flags & qtype_enforce_flag(type))
+ dquot_enable(dqopt->files[type], type,
+ dqopt->info[type].dqi_fmt_id,
+ DQUOT_LIMITS_ENABLED);
+ }
+ return ret;
}
/* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
memset(di, 0, sizeof(*di));
- di->d_version = FS_DQUOT_VERSION;
- di->d_flags = dquot->dq_id.type == USRQUOTA ?
- FS_USER_QUOTA : FS_GROUP_QUOTA;
- di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
-
spin_lock(&dq_data_lock);
- di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
- di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+ di->d_spc_hardlimit = dm->dqb_bhardlimit;
+ di->d_spc_softlimit = dm->dqb_bsoftlimit;
di->d_ino_hardlimit = dm->dqb_ihardlimit;
di->d_ino_softlimit = dm->dqb_isoftlimit;
- di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
- di->d_icount = dm->dqb_curinodes;
- di->d_btimer = dm->dqb_btime;
- di->d_itimer = dm->dqb_itime;
+ di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
+ di->d_ino_count = dm->dqb_curinodes;
+ di->d_spc_timer = dm->dqb_btime;
+ di->d_ino_timer = dm->dqb_itime;
spin_unlock(&dq_data_lock);
}
int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *di)
+ struct qc_dqblk *di)
{
struct dquot *dquot;
@@ -2433,70 +2498,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
}
EXPORT_SYMBOL(dquot_get_dqblk);
-#define VFS_FS_DQ_MASK \
- (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
- FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
- FS_DQ_BTIMER | FS_DQ_ITIMER)
+#define VFS_QC_MASK \
+ (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
+ QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
+ QC_SPC_TIMER | QC_INO_TIMER)
/* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
- if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+ if (di->d_fieldmask & ~VFS_QC_MASK)
return -EINVAL;
- if (((di->d_fieldmask & FS_DQ_BSOFT) &&
- (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
- ((di->d_fieldmask & FS_DQ_BHARD) &&
- (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
- ((di->d_fieldmask & FS_DQ_ISOFT) &&
- (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
- ((di->d_fieldmask & FS_DQ_IHARD) &&
- (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
+ if (((di->d_fieldmask & QC_SPC_SOFT) &&
+ di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
+ ((di->d_fieldmask & QC_SPC_HARD) &&
+ di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
+ ((di->d_fieldmask & QC_INO_SOFT) &&
+ (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
+ ((di->d_fieldmask & QC_INO_HARD) &&
+ (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
return -ERANGE;
spin_lock(&dq_data_lock);
- if (di->d_fieldmask & FS_DQ_BCOUNT) {
- dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
+ if (di->d_fieldmask & QC_SPACE) {
+ dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_BSOFT)
- dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
- if (di->d_fieldmask & FS_DQ_BHARD)
- dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
- if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
+ if (di->d_fieldmask & QC_SPC_SOFT)
+ dm->dqb_bsoftlimit = di->d_spc_softlimit;
+ if (di->d_fieldmask & QC_SPC_HARD)
+ dm->dqb_bhardlimit = di->d_spc_hardlimit;
+ if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ICOUNT) {
- dm->dqb_curinodes = di->d_icount;
+ if (di->d_fieldmask & QC_INO_COUNT) {
+ dm->dqb_curinodes = di->d_ino_count;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ISOFT)
+ if (di->d_fieldmask & QC_INO_SOFT)
dm->dqb_isoftlimit = di->d_ino_softlimit;
- if (di->d_fieldmask & FS_DQ_IHARD)
+ if (di->d_fieldmask & QC_INO_HARD)
dm->dqb_ihardlimit = di->d_ino_hardlimit;
- if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
+ if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_BTIMER) {
- dm->dqb_btime = di->d_btimer;
+ if (di->d_fieldmask & QC_SPC_TIMER) {
+ dm->dqb_btime = di->d_spc_timer;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ITIMER) {
- dm->dqb_itime = di->d_itimer;
+ if (di->d_fieldmask & QC_INO_TIMER) {
+ dm->dqb_itime = di->d_ino_timer;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
}
@@ -2506,7 +2571,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
dm->dqb_curspace < dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
- } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
+ } else if (!(di->d_fieldmask & QC_SPC_TIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
}
@@ -2515,7 +2580,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
dm->dqb_curinodes < dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
- } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
+ } else if (!(di->d_fieldmask & QC_INO_TIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
}
@@ -2531,7 +2596,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
}
int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *di)
+ struct qc_dqblk *di)
{
struct dquot *dquot;
int rc;
@@ -2582,6 +2647,14 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
goto out;
}
mi = sb_dqopt(sb)->info + type;
+ if (ii->dqi_valid & IIF_FLAGS) {
+ if (ii->dqi_flags & ~DQF_SETINFO_MASK ||
+ (ii->dqi_flags & DQF_ROOT_SQUASH &&
+ mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
spin_lock(&dq_data_lock);
if (ii->dqi_valid & IIF_BGRACE)
mi->dqi_bgrace = ii->dqi_bgrace;
@@ -2611,6 +2684,17 @@ const struct quotactl_ops dquot_quotactl_ops = {
};
EXPORT_SYMBOL(dquot_quotactl_ops);
+const struct quotactl_ops dquot_quotactl_sysfile_ops = {
+ .quota_enable = dquot_quota_enable,
+ .quota_disable = dquot_quota_disable,
+ .quota_sync = dquot_quota_sync,
+ .get_info = dquot_get_dqinfo,
+ .set_info = dquot_set_dqinfo,
+ .get_dqblk = dquot_get_dqblk,
+ .set_dqblk = dquot_set_dqblk
+};
+EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
+
static int do_proc_dqstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 2aa4151f99d2..d14a799c7785 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -66,18 +66,40 @@ static int quota_sync_all(int type)
return ret;
}
+unsigned int qtype_enforce_flag(int type)
+{
+ switch (type) {
+ case USRQUOTA:
+ return FS_QUOTA_UDQ_ENFD;
+ case GRPQUOTA:
+ return FS_QUOTA_GDQ_ENFD;
+ case PRJQUOTA:
+ return FS_QUOTA_PDQ_ENFD;
+ }
+ return 0;
+}
+
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
struct path *path)
{
- if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
+ if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
return -ENOSYS;
- if (sb->s_qcop->quota_on_meta)
- return sb->s_qcop->quota_on_meta(sb, type, id);
+ if (sb->s_qcop->quota_enable)
+ return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
if (IS_ERR(path))
return PTR_ERR(path);
return sb->s_qcop->quota_on(sb, type, id, path);
}
+static int quota_quotaoff(struct super_block *sb, int type)
+{
+ if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
+ return -ENOSYS;
+ if (sb->s_qcop->quota_disable)
+ return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
+ return sb->s_qcop->quota_off(sb, type);
+}
+
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{
__u32 fmt;
@@ -118,17 +140,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
return sb->s_qcop->set_info(sb, type, &info);
}
-static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+static inline qsize_t qbtos(qsize_t blocks)
+{
+ return blocks << QIF_DQBLKSIZE_BITS;
+}
+
+static inline qsize_t stoqb(qsize_t space)
+{
+ return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+}
+
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
{
memset(dst, 0, sizeof(*dst));
- dst->dqb_bhardlimit = src->d_blk_hardlimit;
- dst->dqb_bsoftlimit = src->d_blk_softlimit;
- dst->dqb_curspace = src->d_bcount;
+ dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
+ dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
+ dst->dqb_curspace = src->d_space;
dst->dqb_ihardlimit = src->d_ino_hardlimit;
dst->dqb_isoftlimit = src->d_ino_softlimit;
- dst->dqb_curinodes = src->d_icount;
- dst->dqb_btime = src->d_btimer;
- dst->dqb_itime = src->d_itimer;
+ dst->dqb_curinodes = src->d_ino_count;
+ dst->dqb_btime = src->d_spc_timer;
+ dst->dqb_itime = src->d_ino_timer;
dst->dqb_valid = QIF_ALL;
}
@@ -136,7 +168,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct kqid qid;
- struct fs_disk_quota fdq;
+ struct qc_dqblk fdq;
struct if_dqblk idq;
int ret;
@@ -154,36 +186,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
return 0;
}
-static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
{
- dst->d_blk_hardlimit = src->dqb_bhardlimit;
- dst->d_blk_softlimit = src->dqb_bsoftlimit;
- dst->d_bcount = src->dqb_curspace;
+ dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
+ dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
+ dst->d_space = src->dqb_curspace;
dst->d_ino_hardlimit = src->dqb_ihardlimit;
dst->d_ino_softlimit = src->dqb_isoftlimit;
- dst->d_icount = src->dqb_curinodes;
- dst->d_btimer = src->dqb_btime;
- dst->d_itimer = src->dqb_itime;
+ dst->d_ino_count = src->dqb_curinodes;
+ dst->d_spc_timer = src->dqb_btime;
+ dst->d_ino_timer = src->dqb_itime;
dst->d_fieldmask = 0;
if (src->dqb_valid & QIF_BLIMITS)
- dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+ dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
if (src->dqb_valid & QIF_SPACE)
- dst->d_fieldmask |= FS_DQ_BCOUNT;
+ dst->d_fieldmask |= QC_SPACE;
if (src->dqb_valid & QIF_ILIMITS)
- dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+ dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
if (src->dqb_valid & QIF_INODES)
- dst->d_fieldmask |= FS_DQ_ICOUNT;
+ dst->d_fieldmask |= QC_INO_COUNT;
if (src->dqb_valid & QIF_BTIME)
- dst->d_fieldmask |= FS_DQ_BTIMER;
+ dst->d_fieldmask |= QC_SPC_TIMER;
if (src->dqb_valid & QIF_ITIME)
- dst->d_fieldmask |= FS_DQ_ITIMER;
+ dst->d_fieldmask |= QC_INO_TIMER;
}
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
- struct fs_disk_quota fdq;
+ struct qc_dqblk fdq;
struct if_dqblk idq;
struct kqid qid;
@@ -198,15 +230,26 @@ static int quota_setquota(struct super_block *sb, int type, qid_t id,
return sb->s_qcop->set_dqblk(sb, qid, &fdq);
}
-static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
+static int quota_enable(struct super_block *sb, void __user *addr)
+{
+ __u32 flags;
+
+ if (copy_from_user(&flags, addr, sizeof(flags)))
+ return -EFAULT;
+ if (!sb->s_qcop->quota_enable)
+ return -ENOSYS;
+ return sb->s_qcop->quota_enable(sb, flags);
+}
+
+static int quota_disable(struct super_block *sb, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
- if (!sb->s_qcop->set_xstate)
+ if (!sb->s_qcop->quota_disable)
return -ENOSYS;
- return sb->s_qcop->set_xstate(sb, flags, cmd);
+ return sb->s_qcop->quota_disable(sb, flags);
}
static int quota_getxstate(struct super_block *sb, void __user *addr)
@@ -247,10 +290,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
return ret;
}
+/*
+ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
+ * out of there as xfsprogs rely on definitions being in that header file. So
+ * just define same functions here for quota purposes.
+ */
+#define XFS_BB_SHIFT 9
+
+static inline u64 quota_bbtob(u64 blocks)
+{
+ return blocks << XFS_BB_SHIFT;
+}
+
+static inline u64 quota_btobb(u64 bytes)
+{
+ return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
+}
+
+static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
+{
+ dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
+ dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
+ dst->d_ino_hardlimit = src->d_ino_hardlimit;
+ dst->d_ino_softlimit = src->d_ino_softlimit;
+ dst->d_space = quota_bbtob(src->d_bcount);
+ dst->d_ino_count = src->d_icount;
+ dst->d_ino_timer = src->d_itimer;
+ dst->d_spc_timer = src->d_btimer;
+ dst->d_ino_warns = src->d_iwarns;
+ dst->d_spc_warns = src->d_bwarns;
+ dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
+ dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
+ dst->d_rt_space = quota_bbtob(src->d_rtbcount);
+ dst->d_rt_spc_timer = src->d_rtbtimer;
+ dst->d_rt_spc_warns = src->d_rtbwarns;
+ dst->d_fieldmask = 0;
+ if (src->d_fieldmask & FS_DQ_ISOFT)
+ dst->d_fieldmask |= QC_INO_SOFT;
+ if (src->d_fieldmask & FS_DQ_IHARD)
+ dst->d_fieldmask |= QC_INO_HARD;
+ if (src->d_fieldmask & FS_DQ_BSOFT)
+ dst->d_fieldmask |= QC_SPC_SOFT;
+ if (src->d_fieldmask & FS_DQ_BHARD)
+ dst->d_fieldmask |= QC_SPC_HARD;
+ if (src->d_fieldmask & FS_DQ_RTBSOFT)
+ dst->d_fieldmask |= QC_RT_SPC_SOFT;
+ if (src->d_fieldmask & FS_DQ_RTBHARD)
+ dst->d_fieldmask |= QC_RT_SPC_HARD;
+ if (src->d_fieldmask & FS_DQ_BTIMER)
+ dst->d_fieldmask |= QC_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_ITIMER)
+ dst->d_fieldmask |= QC_INO_TIMER;
+ if (src->d_fieldmask & FS_DQ_RTBTIMER)
+ dst->d_fieldmask |= QC_RT_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_BWARNS)
+ dst->d_fieldmask |= QC_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_IWARNS)
+ dst->d_fieldmask |= QC_INO_WARNS;
+ if (src->d_fieldmask & FS_DQ_RTBWARNS)
+ dst->d_fieldmask |= QC_RT_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_BCOUNT)
+ dst->d_fieldmask |= QC_SPACE;
+ if (src->d_fieldmask & FS_DQ_ICOUNT)
+ dst->d_fieldmask |= QC_INO_COUNT;
+ if (src->d_fieldmask & FS_DQ_RTBCOUNT)
+ dst->d_fieldmask |= QC_RT_SPACE;
+}
+
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
struct kqid qid;
if (copy_from_user(&fdq, addr, sizeof(fdq)))
@@ -260,13 +371,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
qid = make_kqid(current_user_ns(), type, id);
if (!qid_valid(qid))
return -EINVAL;
- return sb->s_qcop->set_dqblk(sb, qid, &fdq);
+ copy_from_xfs_dqblk(&qdq, &fdq);
+ return sb->s_qcop->set_dqblk(sb, qid, &qdq);
+}
+
+static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
+ int type, qid_t id)
+{
+ memset(dst, 0, sizeof(*dst));
+ dst->d_version = FS_DQUOT_VERSION;
+ dst->d_id = id;
+ if (type == USRQUOTA)
+ dst->d_flags = FS_USER_QUOTA;
+ else if (type == PRJQUOTA)
+ dst->d_flags = FS_PROJ_QUOTA;
+ else
+ dst->d_flags = FS_GROUP_QUOTA;
+ dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
+ dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
+ dst->d_ino_hardlimit = src->d_ino_hardlimit;
+ dst->d_ino_softlimit = src->d_ino_softlimit;
+ dst->d_bcount = quota_btobb(src->d_space);
+ dst->d_icount = src->d_ino_count;
+ dst->d_itimer = src->d_ino_timer;
+ dst->d_btimer = src->d_spc_timer;
+ dst->d_iwarns = src->d_ino_warns;
+ dst->d_bwarns = src->d_spc_warns;
+ dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
+ dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
+ dst->d_rtbcount = quota_btobb(src->d_rt_space);
+ dst->d_rtbtimer = src->d_rt_spc_timer;
+ dst->d_rtbwarns = src->d_rt_spc_warns;
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
struct kqid qid;
int ret;
@@ -275,8 +417,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
qid = make_kqid(current_user_ns(), type, id);
if (!qid_valid(qid))
return -EINVAL;
- ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
- if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
+ ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
+ if (ret)
+ return ret;
+ copy_to_xfs_dqblk(&fdq, &qdq, type, id);
+ if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
}
@@ -317,9 +462,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
case Q_QUOTAON:
return quota_quotaon(sb, type, cmd, id, path);
case Q_QUOTAOFF:
- if (!sb->s_qcop->quota_off)
- return -ENOSYS;
- return sb->s_qcop->quota_off(sb, type);
+ return quota_quotaoff(sb, type);
case Q_GETFMT:
return quota_getfmt(sb, type, addr);
case Q_GETINFO:
@@ -335,8 +478,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
return -ENOSYS;
return sb->s_qcop->quota_sync(sb, type);
case Q_XQUOTAON:
+ return quota_enable(sb, addr);
case Q_XQUOTAOFF:
- return quota_setxstate(sb, cmd, addr);
+ return quota_disable(sb, addr);
case Q_XQUOTARM:
return quota_rmxquota(sb, addr);
case Q_XGETQSTAT:
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 469c6848b322..8fe79beced5c 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -169,8 +169,8 @@ static int v1_read_file_info(struct super_block *sb, int type)
}
ret = 0;
/* limits are stored as unsigned 32-bit data */
- dqopt->info[type].dqi_maxblimit = 0xffffffff;
- dqopt->info[type].dqi_maxilimit = 0xffffffff;
+ dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+ dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
dqopt->info[type].dqi_igrace =
dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
dqopt->info[type].dqi_bgrace =
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 02751ec695c5..9cb10d7197f7 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -117,16 +117,17 @@ static int v2_read_file_info(struct super_block *sb, int type)
qinfo = info->dqi_priv;
if (version == 0) {
/* limits are stored as unsigned 32-bit data */
- info->dqi_maxblimit = 0xffffffff;
- info->dqi_maxilimit = 0xffffffff;
+ info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+ info->dqi_max_ino_limit = 0xffffffff;
} else {
- /* used space is stored as unsigned 64-bit value */
- info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */
- info->dqi_maxilimit = 0xffffffffffffffffULL;
+ /* used space is stored as unsigned 64-bit value in bytes */
+ info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */
+ info->dqi_max_ino_limit = 0xffffffffffffffffULL;
}
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
- info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
+ /* No flags currently supported */
+ info->dqi_flags = 0;
qinfo->dqi_sb = sb;
qinfo->dqi_type = type;
qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
@@ -157,7 +158,8 @@ static int v2_write_file_info(struct super_block *sb, int type)
info->dqi_flags &= ~DQF_INFO_DIRTY;
dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
- dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
+ /* No flags currently supported */
+ dinfo.dqi_flags = cpu_to_le32(0);
spin_unlock(&dq_data_lock);
dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index bbafbde3471a..f6ab41b39612 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -34,7 +34,14 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
unsigned long flags);
static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
+static unsigned ramfs_mmap_capabilities(struct file *file)
+{
+ return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ |
+ NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
+}
+
const struct file_operations ramfs_file_operations = {
+ .mmap_capabilities = ramfs_mmap_capabilities,
.mmap = ramfs_nommu_mmap,
.get_unmapped_area = ramfs_nommu_get_unmapped_area,
.read = new_sync_read,
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index d365b1c4eb3c..889d558b4e05 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -50,14 +50,6 @@ static const struct address_space_operations ramfs_aops = {
.set_page_dirty = __set_page_dirty_no_writeback,
};
-static struct backing_dev_info ramfs_backing_dev_info = {
- .name = "ramfs",
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK |
- BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
- BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
-};
-
struct inode *ramfs_get_inode(struct super_block *sb,
const struct inode *dir, umode_t mode, dev_t dev)
{
@@ -67,7 +59,6 @@ struct inode *ramfs_get_inode(struct super_block *sb,
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_mapping->a_ops = &ramfs_aops;
- inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping);
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -267,19 +258,9 @@ static struct file_system_type ramfs_fs_type = {
int __init init_ramfs_fs(void)
{
static unsigned long once;
- int err;
if (test_and_set_bit(0, &once))
return 0;
-
- err = bdi_init(&ramfs_backing_dev_info);
- if (err)
- return err;
-
- err = register_filesystem(&ramfs_fs_type);
- if (err)
- bdi_destroy(&ramfs_backing_dev_info);
-
- return err;
+ return register_filesystem(&ramfs_fs_type);
}
fs_initcall(init_ramfs_fs);
diff --git a/fs/read_write.c b/fs/read_write.c
index c0805c93b6fa..4060691e78f7 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -358,7 +358,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
return retval;
}
- if (unlikely(inode->i_flock && mandatory_lock(inode))) {
+ if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
retval = locks_mandatory_area(
read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
inode, file, pos, count);
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index ea06c7554860..7da9e2153953 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -70,6 +70,15 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
}
+static unsigned romfs_mmap_capabilities(struct file *file)
+{
+ struct mtd_info *mtd = file_inode(file)->i_sb->s_mtd;
+
+ if (!mtd)
+ return NOMMU_MAP_COPY;
+ return mtd_mmap_capabilities(mtd);
+}
+
const struct file_operations romfs_ro_fops = {
.llseek = generic_file_llseek,
.read = new_sync_read,
@@ -77,4 +86,5 @@ const struct file_operations romfs_ro_fops = {
.splice_read = generic_file_splice_read,
.mmap = romfs_mmap,
.get_unmapped_area = romfs_get_unmapped_area,
+ .mmap_capabilities = romfs_mmap_capabilities,
};
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index e98dd88197d5..268733cda397 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -355,9 +355,6 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos)
case ROMFH_REG:
i->i_fop = &romfs_ro_fops;
i->i_data.a_ops = &romfs_aops;
- if (i->i_sb->s_mtd)
- i->i_data.backing_dev_info =
- i->i_sb->s_mtd->backing_dev_info;
if (nextfh & ROMFH_EXEC)
mode |= S_IXUGO;
break;
diff --git a/fs/select.c b/fs/select.c
index 467bb1cb3ea5..f684c750e08a 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -971,7 +971,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
if (ret == -EINTR) {
struct restart_block *restart_block;
- restart_block = &current_thread_info()->restart_block;
+ restart_block = &current->restart_block;
restart_block->fn = do_restart_poll;
restart_block->poll.ufds = ufds;
restart_block->poll.nfds = nfds;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index dbf3a59c86bb..555f82155be8 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -539,38 +539,6 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc)
return res;
}
-int seq_bitmap(struct seq_file *m, const unsigned long *bits,
- unsigned int nr_bits)
-{
- if (m->count < m->size) {
- int len = bitmap_scnprintf(m->buf + m->count,
- m->size - m->count, bits, nr_bits);
- if (m->count + len < m->size) {
- m->count += len;
- return 0;
- }
- }
- seq_set_overflow(m);
- return -1;
-}
-EXPORT_SYMBOL(seq_bitmap);
-
-int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
- unsigned int nr_bits)
-{
- if (m->count < m->size) {
- int len = bitmap_scnlistprintf(m->buf + m->count,
- m->size - m->count, bits, nr_bits);
- if (m->count + len < m->size) {
- m->count += len;
- return 0;
- }
- }
- seq_set_overflow(m);
- return -1;
-}
-EXPORT_SYMBOL(seq_bitmap_list);
-
static void *single_start(struct seq_file *p, loff_t *pos)
{
return NULL + (*pos == 0);
diff --git a/fs/super.c b/fs/super.c
index eae088f6aaae..1facd2c282e5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -36,8 +36,8 @@
#include "internal.h"
-LIST_HEAD(super_blocks);
-DEFINE_SPINLOCK(sb_lock);
+static LIST_HEAD(super_blocks);
+static DEFINE_SPINLOCK(sb_lock);
static char *sb_writers_name[SB_FREEZE_LEVELS] = {
"sb_writers",
@@ -75,10 +75,10 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
return SHRINK_STOP;
if (sb->s_op->nr_cached_objects)
- fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid);
+ fs_objects = sb->s_op->nr_cached_objects(sb, sc);
- inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
- dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
+ inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
+ dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
total_objects = dentries + inodes + fs_objects + 1;
if (!total_objects)
total_objects = 1;
@@ -86,19 +86,23 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
/* proportion the scan between the caches */
dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
+ fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
/*
* prune the dcache first as the icache is pinned by it, then
* prune the icache, followed by the filesystem specific caches
+ *
+ * Ensure that we always scan at least one object - memcg kmem
+ * accounting uses this to fully empty the caches.
*/
- freed = prune_dcache_sb(sb, dentries, sc->nid);
- freed += prune_icache_sb(sb, inodes, sc->nid);
+ sc->nr_to_scan = dentries + 1;
+ freed = prune_dcache_sb(sb, sc);
+ sc->nr_to_scan = inodes + 1;
+ freed += prune_icache_sb(sb, sc);
if (fs_objects) {
- fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
- total_objects);
- freed += sb->s_op->free_cached_objects(sb, fs_objects,
- sc->nid);
+ sc->nr_to_scan = fs_objects + 1;
+ freed += sb->s_op->free_cached_objects(sb, sc);
}
drop_super(sb);
@@ -118,17 +122,14 @@ static unsigned long super_cache_count(struct shrinker *shrink,
* scalability bottleneck. The counts could get updated
* between super_cache_count and super_cache_scan anyway.
* Call to super_cache_count with shrinker_rwsem held
- * ensures the safety of call to list_lru_count_node() and
+ * ensures the safety of call to list_lru_shrink_count() and
* s_op->nr_cached_objects().
*/
if (sb->s_op && sb->s_op->nr_cached_objects)
- total_objects = sb->s_op->nr_cached_objects(sb,
- sc->nid);
+ total_objects = sb->s_op->nr_cached_objects(sb, sc);
- total_objects += list_lru_count_node(&sb->s_dentry_lru,
- sc->nid);
- total_objects += list_lru_count_node(&sb->s_inode_lru,
- sc->nid);
+ total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
+ total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
total_objects = vfs_pressure_ratio(total_objects);
return total_objects;
@@ -185,15 +186,15 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
}
init_waitqueue_head(&s->s_writers.wait);
init_waitqueue_head(&s->s_writers.wait_unfrozen);
+ s->s_bdi = &noop_backing_dev_info;
s->s_flags = flags;
- s->s_bdi = &default_backing_dev_info;
INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
- if (list_lru_init(&s->s_dentry_lru))
+ if (list_lru_init_memcg(&s->s_dentry_lru))
goto fail;
- if (list_lru_init(&s->s_inode_lru))
+ if (list_lru_init_memcg(&s->s_inode_lru))
goto fail;
init_rwsem(&s->s_umount);
@@ -229,7 +230,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
s->s_shrink.scan_objects = super_cache_scan;
s->s_shrink.count_objects = super_cache_count;
s->s_shrink.batch = 1024;
- s->s_shrink.flags = SHRINKER_NUMA_AWARE;
+ s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
return s;
fail:
@@ -284,6 +285,14 @@ void deactivate_locked_super(struct super_block *s)
unregister_shrinker(&s->s_shrink);
fs->kill_sb(s);
+ /*
+ * Since list_lru_destroy() may sleep, we cannot call it from
+ * put_super(), where we hold the sb_lock. Therefore we destroy
+ * the lru lists right now.
+ */
+ list_lru_destroy(&s->s_dentry_lru);
+ list_lru_destroy(&s->s_inode_lru);
+
put_filesystem(fs);
put_super(s);
} else {
@@ -863,10 +872,7 @@ EXPORT_SYMBOL(free_anon_bdev);
int set_anon_super(struct super_block *s, void *data)
{
- int error = get_anon_bdev(&s->s_dev);
- if (!error)
- s->s_bdi = &noop_backing_dev_info;
- return error;
+ return get_anon_bdev(&s->s_dev);
}
EXPORT_SYMBOL(set_anon_super);
@@ -1111,7 +1117,6 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
sb = root->d_sb;
BUG_ON(!sb);
WARN_ON(!sb->s_bdi);
- WARN_ON(sb->s_bdi == &default_backing_dev_info);
sb->s_flags |= MS_BORN;
error = security_sb_kern_mount(sb, flags, secdata);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index dfe928a9540f..7c2867b44141 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -295,7 +295,7 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent,
key = attr->key ?: (struct lock_class_key *)&attr->skey;
#endif
kn = __kernfs_create_file(parent, attr->name, mode & 0777, size, ops,
- (void *)attr, ns, true, key);
+ (void *)attr, ns, key);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(parent, attr->name);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 7d2a860ba788..2554d8835b48 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -99,7 +99,7 @@ static int internal_create_group(struct kobject *kobj, int update,
return -EINVAL;
if (!grp->attrs && !grp->bin_attrs) {
WARN(1, "sysfs: (bin_)attrs not set by subsystem for group: %s/%s\n",
- kobj->name, grp->name ? "" : grp->name);
+ kobj->name, grp->name ?: "");
return -EINVAL;
}
if (grp->name) {
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 7ed13e1e216a..4cfb3e82c56f 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2032,6 +2032,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
long long blk_offs;
struct ubifs_data_node *dn = node;
+ ubifs_assert(zbr->len >= UBIFS_DATA_NODE_SZ);
+
/*
* Search the inode node this data node belongs to and insert
* it to the RB-tree of inodes.
@@ -2060,6 +2062,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
struct ubifs_dent_node *dent = node;
struct fsck_inode *fscki1;
+ ubifs_assert(zbr->len >= UBIFS_DENT_NODE_SZ);
+
err = ubifs_validate_entry(c, dent);
if (err)
goto out_dump;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ea41649e4ca5..0fa6c803992e 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -108,8 +108,6 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
inode->i_mtime = inode->i_atime = inode->i_ctime =
ubifs_current_time(inode);
inode->i_mapping->nrpages = 0;
- /* Disable readahead */
- inode->i_mapping->backing_dev_info = &c->bdi;
switch (mode & S_IFMT) {
case S_IFREG:
@@ -272,6 +270,10 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto out_budg;
}
+ err = ubifs_init_security(dir, inode, &dentry->d_name);
+ if (err)
+ goto out_cancel;
+
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
@@ -728,6 +730,10 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out_budg;
}
+ err = ubifs_init_security(dir, inode, &dentry->d_name);
+ if (err)
+ goto out_cancel;
+
mutex_lock(&dir_ui->ui_mutex);
insert_inode_hash(inode);
inc_nlink(inode);
@@ -808,6 +814,10 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
ui->data = dev;
ui->data_len = devlen;
+ err = ubifs_init_security(dir, inode, &dentry->d_name);
+ if (err)
+ goto out_cancel;
+
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
@@ -884,6 +894,10 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
ui->data_len = len;
inode->i_size = ubifs_inode(inode)->ui_size = len;
+ err = ubifs_init_security(dir, inode, &dentry->d_name);
+ if (err)
+ goto out_cancel;
+
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 538519ee37d9..e627c0acf626 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1536,7 +1536,6 @@ static const struct vm_operations_struct ubifs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = ubifs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1574,6 +1573,10 @@ const struct inode_operations ubifs_symlink_inode_operations = {
.follow_link = ubifs_follow_link,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
+ .setxattr = ubifs_setxattr,
+ .getxattr = ubifs_getxattr,
+ .listxattr = ubifs_listxattr,
+ .removexattr = ubifs_removexattr,
};
const struct file_operations ubifs_file_operations = {
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 3187925e9879..9b40a1c5e160 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -1028,9 +1028,22 @@ int ubifs_replay_journal(struct ubifs_info *c)
do {
err = replay_log_leb(c, lnum, 0, c->sbuf);
- if (err == 1)
- /* We hit the end of the log */
- break;
+ if (err == 1) {
+ if (lnum != c->lhead_lnum)
+ /* We hit the end of the log */
+ break;
+
+ /*
+ * The head of the log must always start with the
+ * "commit start" node on a properly formatted UBIFS.
+ * But we found no nodes at all, which means that
+ * someting went wrong and we cannot proceed mounting
+ * the file-system.
+ */
+ ubifs_err("no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted",
+ lnum, 0);
+ err = -EINVAL;
+ }
if (err)
goto out;
lnum = ubifs_next_log_lnum(c, lnum);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 106bf20629ce..93e946561c5c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -156,9 +156,6 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
if (err)
goto out_invalid;
- /* Disable read-ahead */
- inode->i_mapping->backing_dev_info = &c->bdi;
-
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_mapping->a_ops = &ubifs_file_address_operations;
@@ -2017,7 +2014,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
* Read-ahead will be disabled because @c->bdi.ra_pages is 0.
*/
c->bdi.name = "ubifs",
- c->bdi.capabilities = BDI_CAP_MAP_COPY;
+ c->bdi.capabilities = 0;
err = bdi_init(&c->bdi);
if (err)
goto out_close;
@@ -2039,6 +2036,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
if (c->max_inode_sz > MAX_LFS_FILESIZE)
sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
sb->s_op = &ubifs_super_operations;
+ sb->s_xattr = ubifs_xattr_handlers;
mutex_lock(&c->umount_mutex);
err = mount_ubifs(c);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index c4fe900c67ab..bc04b9c69891 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -36,6 +36,7 @@
#include <linux/mtd/ubi.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
+#include <linux/security.h>
#include "ubifs-media.h"
/* Version of this UBIFS implementation */
@@ -1465,6 +1466,7 @@ extern spinlock_t ubifs_infos_lock;
extern atomic_long_t ubifs_clean_zn_cnt;
extern struct kmem_cache *ubifs_inode_slab;
extern const struct super_operations ubifs_super_operations;
+extern const struct xattr_handler *ubifs_xattr_handlers[];
extern const struct address_space_operations ubifs_file_address_operations;
extern const struct file_operations ubifs_file_operations;
extern const struct inode_operations ubifs_file_inode_operations;
@@ -1754,6 +1756,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
size_t size);
ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ubifs_removexattr(struct dentry *dentry, const char *name);
+int ubifs_init_security(struct inode *dentry, struct inode *inode,
+ const struct qstr *qstr);
/* super.c */
struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 5e0a63b1b0d5..a92be244a6fb 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -100,24 +100,30 @@ static const struct file_operations empty_fops;
static int create_xattr(struct ubifs_info *c, struct inode *host,
const struct qstr *nm, const void *value, int size)
{
- int err;
+ int err, names_len;
struct inode *inode;
struct ubifs_inode *ui, *host_ui = ubifs_inode(host);
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
.new_ino_d = ALIGN(size, 8), .dirtied_ino = 1,
.dirtied_ino_d = ALIGN(host_ui->data_len, 8) };
- if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE)
+ if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE) {
+ ubifs_err("inode %lu already has too many xattrs (%d), cannot create more",
+ host->i_ino, host_ui->xattr_cnt);
return -ENOSPC;
+ }
/*
* Linux limits the maximum size of the extended attribute names list
* to %XATTR_LIST_MAX. This means we should not allow creating more
* extended attributes if the name list becomes larger. This limitation
* is artificial for UBIFS, though.
*/
- if (host_ui->xattr_names + host_ui->xattr_cnt +
- nm->len + 1 > XATTR_LIST_MAX)
+ names_len = host_ui->xattr_names + host_ui->xattr_cnt + nm->len + 1;
+ if (names_len > XATTR_LIST_MAX) {
+ ubifs_err("cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d",
+ host->i_ino, names_len, XATTR_LIST_MAX);
return -ENOSPC;
+ }
err = ubifs_budget_space(c, &req);
if (err)
@@ -293,18 +299,16 @@ static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum)
return ERR_PTR(-EINVAL);
}
-int ubifs_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+static int setxattr(struct inode *host, const char *name, const void *value,
+ size_t size, int flags)
{
- struct inode *inode, *host = dentry->d_inode;
+ struct inode *inode;
struct ubifs_info *c = host->i_sb->s_fs_info;
struct qstr nm = QSTR_INIT(name, strlen(name));
struct ubifs_dent_node *xent;
union ubifs_key key;
int err, type;
- dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", name,
- host->i_ino, dentry, size);
ubifs_assert(mutex_is_locked(&host->i_mutex));
if (size > UBIFS_MAX_INO_DATA)
@@ -356,6 +360,15 @@ out_free:
return err;
}
+int ubifs_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
+ name, dentry->d_inode->i_ino, dentry, size);
+
+ return setxattr(dentry->d_inode, name, value, size, flags);
+}
+
ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
size_t size)
{
@@ -568,3 +581,84 @@ out_free:
kfree(xent);
return err;
}
+
+static size_t security_listxattr(struct dentry *d, char *list, size_t list_size,
+ const char *name, size_t name_len, int flags)
+{
+ const int prefix_len = XATTR_SECURITY_PREFIX_LEN;
+ const size_t total_len = prefix_len + name_len + 1;
+
+ if (list && total_len <= list_size) {
+ memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
+ memcpy(list + prefix_len, name, name_len);
+ list[prefix_len + name_len] = '\0';
+ }
+
+ return total_len;
+}
+
+static int security_getxattr(struct dentry *d, const char *name, void *buffer,
+ size_t size, int flags)
+{
+ return ubifs_getxattr(d, name, buffer, size);
+}
+
+static int security_setxattr(struct dentry *d, const char *name,
+ const void *value, size_t size, int flags,
+ int handler_flags)
+{
+ return ubifs_setxattr(d, name, value, size, flags);
+}
+
+static const struct xattr_handler ubifs_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .list = security_listxattr,
+ .get = security_getxattr,
+ .set = security_setxattr,
+};
+
+const struct xattr_handler *ubifs_xattr_handlers[] = {
+ &ubifs_xattr_security_handler,
+ NULL,
+};
+
+static int init_xattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr *xattr;
+ char *name;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1, GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ break;
+ }
+ strcpy(name, XATTR_SECURITY_PREFIX);
+ strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+ err = setxattr(inode, name, xattr->value, xattr->value_len, 0);
+ kfree(name);
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+int ubifs_init_security(struct inode *dentry, struct inode *inode,
+ const struct qstr *qstr)
+{
+ int err;
+
+ mutex_lock(&inode->i_mutex);
+ err = security_inode_init_security(inode, dentry, qstr,
+ &init_xattrs, 0);
+ mutex_unlock(&inode->i_mutex);
+
+ if (err)
+ ubifs_err("cannot initialize security for inode %lu, error %d",
+ inode->i_ino, err);
+ return err;
+}
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index 0e0e99bd6bce..c6e17a744c3b 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -2,10 +2,12 @@ config UDF_FS
tristate "UDF file system support"
select CRC_ITU_T
help
- This is the new file system used on some CD-ROMs and DVDs. Say Y if
- you intend to mount DVD discs or CDRW's written in packet mode, or
- if written to by other UDF utilities, such as DirectCD.
- Please read <file:Documentation/filesystems/udf.txt>.
+ This is a file system used on some CD-ROMs and DVDs. Since the
+ file system is supported by multiple operating systems and is more
+ compatible with standard unix file systems, it is also suitable for
+ removable USB disks. Say Y if you intend to mount DVD discs or CDRW's
+ written in packet mode, or if you want to use UDF for removable USB
+ disks. Please read <file:Documentation/filesystems/udf.txt>.
To compile this file system support as a module, choose M here: the
module will be called udf.
diff --git a/fs/udf/file.c b/fs/udf/file.c
index bb15771b92ae..08f3555fbeac 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -224,7 +224,7 @@ out:
static int udf_release_file(struct inode *inode, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE &&
- atomic_read(&inode->i_writecount) > 1) {
+ atomic_read(&inode->i_writecount) == 1) {
/*
* Grab i_mutex to avoid races with writes changing i_size
* while we are running.
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 5bc71d9a674a..a445d599098d 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -750,7 +750,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
/* Are we beyond EOF? */
if (etype == -1) {
int ret;
- isBeyondEOF = 1;
+ isBeyondEOF = true;
if (count) {
if (c)
laarr[0] = laarr[1];
@@ -792,7 +792,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
endnum = c + 1;
lastblock = 1;
} else {
- isBeyondEOF = 0;
+ isBeyondEOF = false;
endnum = startnum = ((count > 2) ? 2 : count);
/* if the current extent is in position 0,
@@ -1288,6 +1288,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
struct kernel_lb_addr *iloc = &iinfo->i_location;
unsigned int link_count;
unsigned int indirections = 0;
+ int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
reread:
@@ -1374,38 +1375,35 @@ reread:
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ ret = udf_alloc_i_data(inode, bs -
sizeof(struct extendedFileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
- inode->i_sb->s_blocksize -
- sizeof(struct extendedFileEntry));
+ bs - sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
- sizeof(struct fileEntry));
+ ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
- inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+ bs - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ ret = udf_alloc_i_data(inode, bs -
sizeof(struct unallocSpaceEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
- inode->i_sb->s_blocksize -
- sizeof(struct unallocSpaceEntry));
+ bs - sizeof(struct unallocSpaceEntry));
return 0;
}
@@ -1489,6 +1487,15 @@ reread:
}
inode->i_generation = iinfo->i_unique;
+ /*
+ * Sanity check length of allocation descriptors and extended attrs to
+ * avoid integer overflows
+ */
+ if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
+ goto out;
+ /* Now do exact checks */
+ if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
+ goto out;
/* Sanity checks for files in ICB so that we don't get confused later */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
/*
@@ -1498,8 +1505,7 @@ reread:
if (iinfo->i_lenAlloc != inode->i_size)
goto out;
/* File in ICB has to fit in there... */
- if (inode->i_size > inode->i_sb->s_blocksize -
- udf_file_entry_alloc_offset(inode))
+ if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
goto out;
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3ccb2f11fc76..f169411c4ea0 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1599,7 +1599,7 @@ static noinline int udf_process_sequence(
struct udf_vds_record *curr;
struct generic_desc *gd;
struct volDescPtr *vdp;
- int done = 0;
+ bool done = false;
uint32_t vdsn;
uint16_t ident;
long next_s = 0, next_e = 0;
@@ -1680,7 +1680,7 @@ static noinline int udf_process_sequence(
lastblock = next_e;
next_s = next_e = 0;
} else
- done = 1;
+ done = true;
break;
}
brelse(bh);
@@ -2300,6 +2300,7 @@ static void udf_put_super(struct super_block *sb)
udf_close_lvid(sb);
brelse(sbi->s_lvid_bh);
udf_sb_free_partitions(sb);
+ mutex_destroy(&sbi->s_alloc_mutex);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 53e95b2a1369..a7a3a63bb360 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -91,16 +91,6 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
return ptr;
}
-void
-kmem_free(const void *ptr)
-{
- if (!is_vmalloc_addr(ptr)) {
- kfree(ptr);
- } else {
- vfree(ptr);
- }
-}
-
void *
kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
xfs_km_flags_t flags)
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 64db0e53edea..cc6b768fc068 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -63,7 +63,10 @@ kmem_flags_convert(xfs_km_flags_t flags)
extern void *kmem_alloc(size_t, xfs_km_flags_t);
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
-extern void kmem_free(const void *);
+static inline void kmem_free(const void *ptr)
+{
+ kvfree(ptr);
+}
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 5d38e8b8a913..15105dbc9e28 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -403,7 +403,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
xfs_sb_version_addattr2(&mp->m_sb);
spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
+ xfs_log_sb(tp);
} else
spin_unlock(&mp->m_sb_lock);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index b5eb4743f75a..61ec015dca16 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -973,7 +973,11 @@ xfs_bmap_local_to_extents(
*firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
- /* initialise the block and copy the data */
+ /*
+ * Initialise the block and copy the data
+ *
+ * Note: init_fn must set the buffer log item type correctly!
+ */
init_fn(tp, bp, ip, ifp);
/* account for the change in fork size and log everything */
@@ -1221,22 +1225,20 @@ xfs_bmap_add_attrfork(
goto bmap_cancel;
if (!xfs_sb_version_hasattr(&mp->m_sb) ||
(!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
- __int64_t sbfields = 0;
+ bool log_sb = false;
spin_lock(&mp->m_sb_lock);
if (!xfs_sb_version_hasattr(&mp->m_sb)) {
xfs_sb_version_addattr(&mp->m_sb);
- sbfields |= XFS_SB_VERSIONNUM;
+ log_sb = true;
}
if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
xfs_sb_version_addattr2(&mp->m_sb);
- sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
+ log_sb = true;
}
- if (sbfields) {
- spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, sbfields);
- } else
- spin_unlock(&mp->m_sb_lock);
+ spin_unlock(&mp->m_sb_lock);
+ if (log_sb)
+ xfs_log_sb(tp);
}
error = xfs_bmap_finish(&tp, &flist, &committed);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 44db6db86402..b9d8a499d2c4 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -28,6 +28,37 @@ struct xfs_trans;
extern kmem_zone_t *xfs_bmap_free_item_zone;
/*
+ * Argument structure for xfs_bmap_alloc.
+ */
+struct xfs_bmalloca {
+ xfs_fsblock_t *firstblock; /* i/o first block allocated */
+ struct xfs_bmap_free *flist; /* bmap freelist */
+ struct xfs_trans *tp; /* transaction pointer */
+ struct xfs_inode *ip; /* incore inode pointer */
+ struct xfs_bmbt_irec prev; /* extent before the new one */
+ struct xfs_bmbt_irec got; /* extent after, or delayed */
+
+ xfs_fileoff_t offset; /* offset in file filling in */
+ xfs_extlen_t length; /* i/o length asked/allocated */
+ xfs_fsblock_t blkno; /* starting block of new extent */
+
+ struct xfs_btree_cur *cur; /* btree cursor */
+ xfs_extnum_t idx; /* current extent index */
+ int nallocs;/* number of extents alloc'd */
+ int logflags;/* flags for transaction logging */
+
+ xfs_extlen_t total; /* total blocks needed for xaction */
+ xfs_extlen_t minlen; /* minimum allocation size (blocks) */
+ xfs_extlen_t minleft; /* amount must be left after alloc */
+ bool eof; /* set if allocating past last extent */
+ bool wasdel; /* replacing a delayed allocation */
+ bool userdata;/* set if is user data */
+ bool aeof; /* allocated space at eof */
+ bool conv; /* overwriting unwritten extents */
+ int flags;
+};
+
+/*
* List of extents to be free "later".
* The list is kept sorted on xbf_startblock.
*/
@@ -149,6 +180,8 @@ void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
struct xfs_bmap_free *flist, struct xfs_mount *mp);
void xfs_bmap_cancel(struct xfs_bmap_free *flist);
+int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
+ int *committed);
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index fbd6da263571..8eb718979383 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -151,10 +151,13 @@ typedef struct xfs_sb {
__uint32_t sb_features2; /* additional feature bits */
/*
- * bad features2 field as a result of failing to pad the sb
- * structure to 64 bits. Some machines will be using this field
- * for features2 bits. Easiest just to mark it bad and not use
- * it for anything else.
+ * bad features2 field as a result of failing to pad the sb structure to
+ * 64 bits. Some machines will be using this field for features2 bits.
+ * Easiest just to mark it bad and not use it for anything else.
+ *
+ * This is not kept up to date in memory; it is always overwritten by
+ * the value in sb_features2 when formatting the incore superblock to
+ * the disk buffer.
*/
__uint32_t sb_bad_features2;
@@ -304,8 +307,8 @@ typedef enum {
#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
-#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2)
-#define XFS_SB_BAD_FEATURES2 XFS_SB_MVAL(BAD_FEATURES2)
+#define XFS_SB_FEATURES2 (XFS_SB_MVAL(FEATURES2) | \
+ XFS_SB_MVAL(BAD_FEATURES2))
#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
@@ -319,9 +322,9 @@ typedef enum {
XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
- XFS_SB_BAD_FEATURES2 | XFS_SB_FEATURES_COMPAT | \
- XFS_SB_FEATURES_RO_COMPAT | XFS_SB_FEATURES_INCOMPAT | \
- XFS_SB_FEATURES_LOG_INCOMPAT | XFS_SB_PQUOTINO)
+ XFS_SB_FEATURES_COMPAT | XFS_SB_FEATURES_RO_COMPAT | \
+ XFS_SB_FEATURES_INCOMPAT | XFS_SB_FEATURES_LOG_INCOMPAT | \
+ XFS_SB_PQUOTINO)
/*
@@ -453,13 +456,11 @@ static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp)
{
sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
- sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT;
}
static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp)
{
sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
- sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
if (!sbp->sb_features2)
sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
}
@@ -475,7 +476,6 @@ static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
{
sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
- sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
}
/*
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 18dc721ca19f..18dc721ca19f 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 752915fa775a..b0a5fe95a3e2 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -40,69 +40,6 @@
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
*/
-static const struct {
- short offset;
- short type; /* 0 = integer
- * 1 = binary / string (no translation)
- */
-} xfs_sb_info[] = {
- { offsetof(xfs_sb_t, sb_magicnum), 0 },
- { offsetof(xfs_sb_t, sb_blocksize), 0 },
- { offsetof(xfs_sb_t, sb_dblocks), 0 },
- { offsetof(xfs_sb_t, sb_rblocks), 0 },
- { offsetof(xfs_sb_t, sb_rextents), 0 },
- { offsetof(xfs_sb_t, sb_uuid), 1 },
- { offsetof(xfs_sb_t, sb_logstart), 0 },
- { offsetof(xfs_sb_t, sb_rootino), 0 },
- { offsetof(xfs_sb_t, sb_rbmino), 0 },
- { offsetof(xfs_sb_t, sb_rsumino), 0 },
- { offsetof(xfs_sb_t, sb_rextsize), 0 },
- { offsetof(xfs_sb_t, sb_agblocks), 0 },
- { offsetof(xfs_sb_t, sb_agcount), 0 },
- { offsetof(xfs_sb_t, sb_rbmblocks), 0 },
- { offsetof(xfs_sb_t, sb_logblocks), 0 },
- { offsetof(xfs_sb_t, sb_versionnum), 0 },
- { offsetof(xfs_sb_t, sb_sectsize), 0 },
- { offsetof(xfs_sb_t, sb_inodesize), 0 },
- { offsetof(xfs_sb_t, sb_inopblock), 0 },
- { offsetof(xfs_sb_t, sb_fname[0]), 1 },
- { offsetof(xfs_sb_t, sb_blocklog), 0 },
- { offsetof(xfs_sb_t, sb_sectlog), 0 },
- { offsetof(xfs_sb_t, sb_inodelog), 0 },
- { offsetof(xfs_sb_t, sb_inopblog), 0 },
- { offsetof(xfs_sb_t, sb_agblklog), 0 },
- { offsetof(xfs_sb_t, sb_rextslog), 0 },
- { offsetof(xfs_sb_t, sb_inprogress), 0 },
- { offsetof(xfs_sb_t, sb_imax_pct), 0 },
- { offsetof(xfs_sb_t, sb_icount), 0 },
- { offsetof(xfs_sb_t, sb_ifree), 0 },
- { offsetof(xfs_sb_t, sb_fdblocks), 0 },
- { offsetof(xfs_sb_t, sb_frextents), 0 },
- { offsetof(xfs_sb_t, sb_uquotino), 0 },
- { offsetof(xfs_sb_t, sb_gquotino), 0 },
- { offsetof(xfs_sb_t, sb_qflags), 0 },
- { offsetof(xfs_sb_t, sb_flags), 0 },
- { offsetof(xfs_sb_t, sb_shared_vn), 0 },
- { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
- { offsetof(xfs_sb_t, sb_unit), 0 },
- { offsetof(xfs_sb_t, sb_width), 0 },
- { offsetof(xfs_sb_t, sb_dirblklog), 0 },
- { offsetof(xfs_sb_t, sb_logsectlog), 0 },
- { offsetof(xfs_sb_t, sb_logsectsize), 0 },
- { offsetof(xfs_sb_t, sb_logsunit), 0 },
- { offsetof(xfs_sb_t, sb_features2), 0 },
- { offsetof(xfs_sb_t, sb_bad_features2), 0 },
- { offsetof(xfs_sb_t, sb_features_compat), 0 },
- { offsetof(xfs_sb_t, sb_features_ro_compat), 0 },
- { offsetof(xfs_sb_t, sb_features_incompat), 0 },
- { offsetof(xfs_sb_t, sb_features_log_incompat), 0 },
- { offsetof(xfs_sb_t, sb_crc), 0 },
- { offsetof(xfs_sb_t, sb_pad), 0 },
- { offsetof(xfs_sb_t, sb_pquotino), 0 },
- { offsetof(xfs_sb_t, sb_lsn), 0 },
- { sizeof(xfs_sb_t), 0 }
-};
-
/*
* Reference counting access wrappers to the perag structures.
* Because we never free per-ag structures, the only thing we
@@ -461,58 +398,49 @@ xfs_sb_from_disk(
__xfs_sb_from_disk(to, from, true);
}
-static inline void
+static void
xfs_sb_quota_to_disk(
- xfs_dsb_t *to,
- xfs_sb_t *from,
- __int64_t *fields)
+ struct xfs_dsb *to,
+ struct xfs_sb *from)
{
__uint16_t qflags = from->sb_qflags;
+ to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
+ if (xfs_sb_version_has_pquotino(from)) {
+ to->sb_qflags = cpu_to_be16(from->sb_qflags);
+ to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
+ to->sb_pquotino = cpu_to_be64(from->sb_pquotino);
+ return;
+ }
+
/*
- * We need to do these manipilations only if we are working
- * with an older version of on-disk superblock.
+ * The in-core version of sb_qflags do not have XFS_OQUOTA_*
+ * flags, whereas the on-disk version does. So, convert incore
+ * XFS_{PG}QUOTA_* flags to on-disk XFS_OQUOTA_* flags.
*/
- if (xfs_sb_version_has_pquotino(from))
- return;
+ qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
+ XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
- if (*fields & XFS_SB_QFLAGS) {
- /*
- * The in-core version of sb_qflags do not have
- * XFS_OQUOTA_* flags, whereas the on-disk version
- * does. So, convert incore XFS_{PG}QUOTA_* flags
- * to on-disk XFS_OQUOTA_* flags.
- */
- qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
- XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
-
- if (from->sb_qflags &
- (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
- qflags |= XFS_OQUOTA_ENFD;
- if (from->sb_qflags &
- (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
- qflags |= XFS_OQUOTA_CHKD;
- to->sb_qflags = cpu_to_be16(qflags);
- *fields &= ~XFS_SB_QFLAGS;
- }
+ if (from->sb_qflags &
+ (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
+ qflags |= XFS_OQUOTA_ENFD;
+ if (from->sb_qflags &
+ (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
+ qflags |= XFS_OQUOTA_CHKD;
+ to->sb_qflags = cpu_to_be16(qflags);
/*
- * GQUOTINO and PQUOTINO cannot be used together in versions of
- * superblock that do not have pquotino. from->sb_flags tells us which
- * quota is active and should be copied to disk. If neither are active,
- * make sure we write NULLFSINO to the sb_gquotino field as a quota
- * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
- * bit is set.
+ * GQUOTINO and PQUOTINO cannot be used together in versions
+ * of superblock that do not have pquotino. from->sb_flags
+ * tells us which quota is active and should be copied to
+ * disk. If neither are active, we should NULL the inode.
*
- * Note that we don't need to handle the sb_uquotino or sb_pquotino here
- * as they do not require any translation. Hence the main sb field loop
- * will write them appropriately from the in-core superblock.
+ * In all cases, the separate pquotino must remain 0 because it
+ * it beyond the "end" of the valid non-pquotino superblock.
*/
- if ((*fields & XFS_SB_GQUOTINO) &&
- (from->sb_qflags & XFS_GQUOTA_ACCT))
+ if (from->sb_qflags & XFS_GQUOTA_ACCT)
to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
- else if ((*fields & XFS_SB_PQUOTINO) &&
- (from->sb_qflags & XFS_PQUOTA_ACCT))
+ else if (from->sb_qflags & XFS_PQUOTA_ACCT)
to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
else {
/*
@@ -526,63 +454,78 @@ xfs_sb_quota_to_disk(
to->sb_gquotino = cpu_to_be64(NULLFSINO);
}
- *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
+ to->sb_pquotino = 0;
}
-/*
- * Copy in core superblock to ondisk one.
- *
- * The fields argument is mask of superblock fields to copy.
- */
void
xfs_sb_to_disk(
- xfs_dsb_t *to,
- xfs_sb_t *from,
- __int64_t fields)
+ struct xfs_dsb *to,
+ struct xfs_sb *from)
{
- xfs_caddr_t to_ptr = (xfs_caddr_t)to;
- xfs_caddr_t from_ptr = (xfs_caddr_t)from;
- xfs_sb_field_t f;
- int first;
- int size;
-
- ASSERT(fields);
- if (!fields)
- return;
+ xfs_sb_quota_to_disk(to, from);
- /* We should never write the crc here, it's updated in the IO path */
- fields &= ~XFS_SB_CRC;
-
- xfs_sb_quota_to_disk(to, from, &fields);
- while (fields) {
- f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
- first = xfs_sb_info[f].offset;
- size = xfs_sb_info[f + 1].offset - first;
-
- ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
-
- if (size == 1 || xfs_sb_info[f].type == 1) {
- memcpy(to_ptr + first, from_ptr + first, size);
- } else {
- switch (size) {
- case 2:
- *(__be16 *)(to_ptr + first) =
- cpu_to_be16(*(__u16 *)(from_ptr + first));
- break;
- case 4:
- *(__be32 *)(to_ptr + first) =
- cpu_to_be32(*(__u32 *)(from_ptr + first));
- break;
- case 8:
- *(__be64 *)(to_ptr + first) =
- cpu_to_be64(*(__u64 *)(from_ptr + first));
- break;
- default:
- ASSERT(0);
- }
- }
+ to->sb_magicnum = cpu_to_be32(from->sb_magicnum);
+ to->sb_blocksize = cpu_to_be32(from->sb_blocksize);
+ to->sb_dblocks = cpu_to_be64(from->sb_dblocks);
+ to->sb_rblocks = cpu_to_be64(from->sb_rblocks);
+ to->sb_rextents = cpu_to_be64(from->sb_rextents);
+ memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
+ to->sb_logstart = cpu_to_be64(from->sb_logstart);
+ to->sb_rootino = cpu_to_be64(from->sb_rootino);
+ to->sb_rbmino = cpu_to_be64(from->sb_rbmino);
+ to->sb_rsumino = cpu_to_be64(from->sb_rsumino);
+ to->sb_rextsize = cpu_to_be32(from->sb_rextsize);
+ to->sb_agblocks = cpu_to_be32(from->sb_agblocks);
+ to->sb_agcount = cpu_to_be32(from->sb_agcount);
+ to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks);
+ to->sb_logblocks = cpu_to_be32(from->sb_logblocks);
+ to->sb_versionnum = cpu_to_be16(from->sb_versionnum);
+ to->sb_sectsize = cpu_to_be16(from->sb_sectsize);
+ to->sb_inodesize = cpu_to_be16(from->sb_inodesize);
+ to->sb_inopblock = cpu_to_be16(from->sb_inopblock);
+ memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
+ to->sb_blocklog = from->sb_blocklog;
+ to->sb_sectlog = from->sb_sectlog;
+ to->sb_inodelog = from->sb_inodelog;
+ to->sb_inopblog = from->sb_inopblog;
+ to->sb_agblklog = from->sb_agblklog;
+ to->sb_rextslog = from->sb_rextslog;
+ to->sb_inprogress = from->sb_inprogress;
+ to->sb_imax_pct = from->sb_imax_pct;
+ to->sb_icount = cpu_to_be64(from->sb_icount);
+ to->sb_ifree = cpu_to_be64(from->sb_ifree);
+ to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks);
+ to->sb_frextents = cpu_to_be64(from->sb_frextents);
- fields &= ~(1LL << f);
+ to->sb_flags = from->sb_flags;
+ to->sb_shared_vn = from->sb_shared_vn;
+ to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt);
+ to->sb_unit = cpu_to_be32(from->sb_unit);
+ to->sb_width = cpu_to_be32(from->sb_width);
+ to->sb_dirblklog = from->sb_dirblklog;
+ to->sb_logsectlog = from->sb_logsectlog;
+ to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize);
+ to->sb_logsunit = cpu_to_be32(from->sb_logsunit);
+
+ /*
+ * We need to ensure that bad_features2 always matches features2.
+ * Hence we enforce that here rather than having to remember to do it
+ * everywhere else that updates features2.
+ */
+ from->sb_bad_features2 = from->sb_features2;
+ to->sb_features2 = cpu_to_be32(from->sb_features2);
+ to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2);
+
+ if (xfs_sb_version_hascrc(from)) {
+ to->sb_features_compat = cpu_to_be32(from->sb_features_compat);
+ to->sb_features_ro_compat =
+ cpu_to_be32(from->sb_features_ro_compat);
+ to->sb_features_incompat =
+ cpu_to_be32(from->sb_features_incompat);
+ to->sb_features_log_incompat =
+ cpu_to_be32(from->sb_features_log_incompat);
+ to->sb_pad = 0;
+ to->sb_lsn = cpu_to_be64(from->sb_lsn);
}
}
@@ -816,42 +759,51 @@ xfs_initialize_perag_data(
}
/*
- * xfs_mod_sb() can be used to copy arbitrary changes to the
- * in-core superblock into the superblock buffer to be logged.
- * It does not provide the higher level of locking that is
- * needed to protect the in-core superblock from concurrent
- * access.
+ * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock
+ * into the superblock buffer to be logged. It does not provide the higher
+ * level of locking that is needed to protect the in-core superblock from
+ * concurrent access.
*/
void
-xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
+xfs_log_sb(
+ struct xfs_trans *tp)
{
- xfs_buf_t *bp;
- int first;
- int last;
- xfs_mount_t *mp;
- xfs_sb_field_t f;
-
- ASSERT(fields);
- if (!fields)
- return;
- mp = tp->t_mountp;
- bp = xfs_trans_getsb(tp, mp, 0);
- first = sizeof(xfs_sb_t);
- last = 0;
-
- /* translate/copy */
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0);
- xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
+ xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+ xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
+}
- /* find modified range */
- f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
- ASSERT((1LL << f) & XFS_SB_MOD_BITS);
- last = xfs_sb_info[f + 1].offset - 1;
+/*
+ * xfs_sync_sb
+ *
+ * Sync the superblock to disk.
+ *
+ * Note that the caller is responsible for checking the frozen state of the
+ * filesystem. This procedure uses the non-blocking transaction allocator and
+ * thus will allow modifications to a frozen fs. This is required because this
+ * code can be called during the process of freezing where use of the high-level
+ * allocator would deadlock.
+ */
+int
+xfs_sync_sb(
+ struct xfs_mount *mp,
+ bool wait)
+{
+ struct xfs_trans *tp;
+ int error;
- f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
- ASSERT((1LL << f) & XFS_SB_MOD_BITS);
- first = xfs_sb_info[f].offset;
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
- xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
- xfs_trans_log_buf(tp, bp, first, last);
+ xfs_log_sb(tp);
+ if (wait)
+ xfs_trans_set_sync(tp);
+ return xfs_trans_commit(tp, 0);
}
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 8eb1c54bafbf..b25bb9a343f3 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -27,11 +27,12 @@ extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
extern void xfs_perag_put(struct xfs_perag *pag);
extern int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
-extern void xfs_sb_calc_crc(struct xfs_buf *);
-extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
-extern void xfs_sb_mount_common(struct xfs_mount *, struct xfs_sb *);
-extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
-extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
+extern void xfs_sb_calc_crc(struct xfs_buf *bp);
+extern void xfs_log_sb(struct xfs_trans *tp);
+extern int xfs_sync_sb(struct xfs_mount *mp, bool wait);
+extern void xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp);
+extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from);
+extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp);
#endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 82404da2ca67..8dda4b321343 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -82,7 +82,7 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
#define XFS_TRANS_ATTR_RM 23
#define XFS_TRANS_ATTR_FLAG 24
#define XFS_TRANS_CLEAR_AGI_BUCKET 25
-#define XFS_TRANS_QM_SBCHANGE 26
+#define XFS_TRANS_SB_CHANGE 26
/*
* Dummy entries since we use the transaction type to index into the
* trans_type[] in xlog_recover_print_trans_head()
@@ -95,17 +95,15 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
#define XFS_TRANS_QM_DQCLUSTER 32
#define XFS_TRANS_QM_QINOCREATE 33
#define XFS_TRANS_QM_QUOTAOFF_END 34
-#define XFS_TRANS_SB_UNIT 35
-#define XFS_TRANS_FSYNC_TS 36
-#define XFS_TRANS_GROWFSRT_ALLOC 37
-#define XFS_TRANS_GROWFSRT_ZERO 38
-#define XFS_TRANS_GROWFSRT_FREE 39
-#define XFS_TRANS_SWAPEXT 40
-#define XFS_TRANS_SB_COUNT 41
-#define XFS_TRANS_CHECKPOINT 42
-#define XFS_TRANS_ICREATE 43
-#define XFS_TRANS_CREATE_TMPFILE 44
-#define XFS_TRANS_TYPE_MAX 44
+#define XFS_TRANS_FSYNC_TS 35
+#define XFS_TRANS_GROWFSRT_ALLOC 36
+#define XFS_TRANS_GROWFSRT_ZERO 37
+#define XFS_TRANS_GROWFSRT_FREE 38
+#define XFS_TRANS_SWAPEXT 39
+#define XFS_TRANS_CHECKPOINT 40
+#define XFS_TRANS_ICREATE 41
+#define XFS_TRANS_CREATE_TMPFILE 42
+#define XFS_TRANS_TYPE_MAX 43
/* new transaction types need to be reflected in xfs_logprint(8) */
#define XFS_TRANS_TYPES \
@@ -113,7 +111,6 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
{ XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
{ XFS_TRANS_INACTIVE, "INACTIVE" }, \
{ XFS_TRANS_CREATE, "CREATE" }, \
- { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \
{ XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
{ XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
{ XFS_TRANS_REMOVE, "REMOVE" }, \
@@ -134,23 +131,23 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
{ XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
{ XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
{ XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
- { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
+ { XFS_TRANS_SB_CHANGE, "SBCHANGE" }, \
+ { XFS_TRANS_DUMMY1, "DUMMY1" }, \
+ { XFS_TRANS_DUMMY2, "DUMMY2" }, \
{ XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
{ XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
{ XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
{ XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
{ XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
{ XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
- { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
{ XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
{ XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
{ XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
{ XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
{ XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
- { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
{ XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \
- { XFS_TRANS_DUMMY1, "DUMMY1" }, \
- { XFS_TRANS_DUMMY2, "DUMMY2" }, \
+ { XFS_TRANS_ICREATE, "ICREATE" }, \
+ { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \
{ XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
/*
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index c80c5236c3da..e7e26bd6468f 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -178,6 +178,8 @@ xfs_symlink_local_to_remote(
struct xfs_mount *mp = ip->i_mount;
char *buf;
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
+
if (!xfs_sb_version_hascrc(&mp->m_sb)) {
bp->b_ops = NULL;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 6c1330f29050..68cb1e7bf2bb 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -716,17 +716,6 @@ xfs_calc_clear_agi_bucket_reservation(
}
/*
- * Clearing the quotaflags in the superblock.
- * the super block for changing quota flags: sector size
- */
-STATIC uint
-xfs_calc_qm_sbchange_reservation(
- struct xfs_mount *mp)
-{
- return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
-}
-
-/*
* Adjusting quota limits.
* the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
*/
@@ -864,9 +853,6 @@ xfs_trans_resv_calc(
* The following transactions are logged in logical format with
* a default log count.
*/
- resp->tr_qm_sbchange.tr_logres = xfs_calc_qm_sbchange_reservation(mp);
- resp->tr_qm_sbchange.tr_logcount = XFS_DEFAULT_LOG_COUNT;
-
resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
diff --git a/fs/xfs/libxfs/xfs_trans_resv.h b/fs/xfs/libxfs/xfs_trans_resv.h
index 1097d14cd583..2d5bdfce6d8f 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.h
+++ b/fs/xfs/libxfs/xfs_trans_resv.h
@@ -56,7 +56,6 @@ struct xfs_trans_resv {
struct xfs_trans_res tr_growrtalloc; /* grow realtime allocations */
struct xfs_trans_res tr_growrtzero; /* grow realtime zeroing */
struct xfs_trans_res tr_growrtfree; /* grow realtime freeing */
- struct xfs_trans_res tr_qm_sbchange; /* change quota flags */
struct xfs_trans_res tr_qm_setqlim; /* adjust quota limits */
struct xfs_trans_res tr_qm_dqalloc; /* allocate quota on disk */
struct xfs_trans_res tr_qm_quotaoff; /* turn quota off */
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index b79dc66b2ecd..b79dc66b2ecd 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 18e2f3bbae5e..3a9b7a1b8704 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -135,30 +135,22 @@ xfs_setfilesize_trans_alloc(
*/
STATIC int
xfs_setfilesize(
- struct xfs_ioend *ioend)
+ struct xfs_inode *ip,
+ struct xfs_trans *tp,
+ xfs_off_t offset,
+ size_t size)
{
- struct xfs_inode *ip = XFS_I(ioend->io_inode);
- struct xfs_trans *tp = ioend->io_append_trans;
xfs_fsize_t isize;
- /*
- * The transaction may have been allocated in the I/O submission thread,
- * thus we need to mark ourselves as beeing in a transaction manually.
- * Similarly for freeze protection.
- */
- current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
- rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
- 0, 1, _THIS_IP_);
-
xfs_ilock(ip, XFS_ILOCK_EXCL);
- isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
+ isize = xfs_new_eof(ip, offset + size);
if (!isize) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_trans_cancel(tp, 0);
return 0;
}
- trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
+ trace_xfs_setfilesize(ip, offset, size);
ip->i_d.di_size = isize;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -167,6 +159,25 @@ xfs_setfilesize(
return xfs_trans_commit(tp, 0);
}
+STATIC int
+xfs_setfilesize_ioend(
+ struct xfs_ioend *ioend)
+{
+ struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ struct xfs_trans *tp = ioend->io_append_trans;
+
+ /*
+ * The transaction may have been allocated in the I/O submission thread,
+ * thus we need to mark ourselves as being in a transaction manually.
+ * Similarly for freeze protection.
+ */
+ current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
+ rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 0, 1, _THIS_IP_);
+
+ return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
+}
+
/*
* Schedule IO completion handling on the final put of an ioend.
*
@@ -182,8 +193,7 @@ xfs_finish_ioend(
if (ioend->io_type == XFS_IO_UNWRITTEN)
queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
- else if (ioend->io_append_trans ||
- (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
+ else if (ioend->io_append_trans)
queue_work(mp->m_data_workqueue, &ioend->io_work);
else
xfs_destroy_ioend(ioend);
@@ -215,22 +225,8 @@ xfs_end_io(
if (ioend->io_type == XFS_IO_UNWRITTEN) {
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
ioend->io_size);
- } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
- /*
- * For direct I/O we do not know if we need to allocate blocks
- * or not so we can't preallocate an append transaction as that
- * results in nested reservations and log space deadlocks. Hence
- * allocate the transaction here. While this is sub-optimal and
- * can block IO completion for some time, we're stuck with doing
- * it this way until we can pass the ioend to the direct IO
- * allocation callbacks and avoid nesting that way.
- */
- error = xfs_setfilesize_trans_alloc(ioend);
- if (error)
- goto done;
- error = xfs_setfilesize(ioend);
} else if (ioend->io_append_trans) {
- error = xfs_setfilesize(ioend);
+ error = xfs_setfilesize_ioend(ioend);
} else {
ASSERT(!xfs_ioend_is_append(ioend));
}
@@ -242,17 +238,6 @@ done:
}
/*
- * Call IO completion handling in caller context on the final put of an ioend.
- */
-STATIC void
-xfs_finish_ioend_sync(
- struct xfs_ioend *ioend)
-{
- if (atomic_dec_and_test(&ioend->io_remaining))
- xfs_end_io(&ioend->io_work);
-}
-
-/*
* Allocate and initialise an IO completion structure.
* We need to track unwritten extent write completion here initially.
* We'll need to extend this for updating the ondisk inode size later
@@ -273,7 +258,6 @@ xfs_alloc_ioend(
* all the I/O from calling the completion routine too early.
*/
atomic_set(&ioend->io_remaining, 1);
- ioend->io_isdirect = 0;
ioend->io_error = 0;
ioend->io_list = NULL;
ioend->io_type = type;
@@ -1459,11 +1443,7 @@ xfs_get_blocks_direct(
*
* If the private argument is non-NULL __xfs_get_blocks signals us that we
* need to issue a transaction to convert the range from unwritten to written
- * extents. In case this is regular synchronous I/O we just call xfs_end_io
- * to do this and we are done. But in case this was a successful AIO
- * request this handler is called from interrupt context, from which we
- * can't start transactions. In that case offload the I/O completion to
- * the workqueues we also use for buffered I/O completion.
+ * extents.
*/
STATIC void
xfs_end_io_direct_write(
@@ -1472,7 +1452,12 @@ xfs_end_io_direct_write(
ssize_t size,
void *private)
{
- struct xfs_ioend *ioend = iocb->private;
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return;
/*
* While the generic direct I/O code updates the inode size, it does
@@ -1480,22 +1465,33 @@ xfs_end_io_direct_write(
* end_io handler thinks the on-disk size is outside the in-core
* size. To prevent this just update it a little bit earlier here.
*/
- if (offset + size > i_size_read(ioend->io_inode))
- i_size_write(ioend->io_inode, offset + size);
+ if (offset + size > i_size_read(inode))
+ i_size_write(inode, offset + size);
/*
- * blockdev_direct_IO can return an error even after the I/O
- * completion handler was called. Thus we need to protect
- * against double-freeing.
+ * For direct I/O we do not know if we need to allocate blocks or not,
+ * so we can't preallocate an append transaction, as that results in
+ * nested reservations and log space deadlocks. Hence allocate the
+ * transaction here. While this is sub-optimal and can block IO
+ * completion for some time, we're stuck with doing it this way until
+ * we can pass the ioend to the direct IO allocation callbacks and
+ * avoid nesting that way.
*/
- iocb->private = NULL;
-
- ioend->io_offset = offset;
- ioend->io_size = size;
- if (private && size > 0)
- ioend->io_type = XFS_IO_UNWRITTEN;
+ if (private && size > 0) {
+ xfs_iomap_write_unwritten(ip, offset, size);
+ } else if (offset + size > ip->i_d.di_size) {
+ struct xfs_trans *tp;
+ int error;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return;
+ }
- xfs_finish_ioend_sync(ioend);
+ xfs_setfilesize(ip, tp, offset, size);
+ }
}
STATIC ssize_t
@@ -1507,39 +1503,16 @@ xfs_vm_direct_IO(
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
- struct xfs_ioend *ioend = NULL;
- ssize_t ret;
if (rw & WRITE) {
- size_t size = iov_iter_count(iter);
-
- /*
- * We cannot preallocate a size update transaction here as we
- * don't know whether allocation is necessary or not. Hence we
- * can only tell IO completion that one is necessary if we are
- * not doing unwritten extent conversion.
- */
- iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
- if (offset + size > XFS_I(inode)->i_d.di_size)
- ioend->io_isdirect = 1;
-
- ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
+ return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
offset, xfs_get_blocks_direct,
xfs_end_io_direct_write, NULL,
DIO_ASYNC_EXTEND);
- if (ret != -EIOCBQUEUED && iocb->private)
- goto out_destroy_ioend;
- } else {
- ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
- offset, xfs_get_blocks_direct,
- NULL, NULL, 0);
}
-
- return ret;
-
-out_destroy_ioend:
- xfs_destroy_ioend(ioend);
- return ret;
+ return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
+ offset, xfs_get_blocks_direct,
+ NULL, NULL, 0);
}
/*
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index f94dd459dff9..ac644e0137a4 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -24,14 +24,12 @@ extern mempool_t *xfs_ioend_pool;
* Types of I/O for bmap clustering and I/O completion tracking.
*/
enum {
- XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
XFS_IO_DELALLOC, /* covers delalloc region */
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
XFS_IO_OVERWRITE, /* covers already allocated extent */
};
#define XFS_IO_TYPES \
- { 0, "" }, \
{ XFS_IO_DELALLOC, "delalloc" }, \
{ XFS_IO_UNWRITTEN, "unwritten" }, \
{ XFS_IO_OVERWRITE, "overwrite" }
@@ -45,7 +43,6 @@ typedef struct xfs_ioend {
unsigned int io_type; /* delalloc / unwritten */
int io_error; /* I/O error code */
atomic_t io_remaining; /* hold count */
- unsigned int io_isdirect : 1;/* direct I/O */
struct inode *io_inode; /* file being written to */
struct buffer_head *io_buffer_head;/* buffer linked list head */
struct buffer_head *io_buffer_tail;/* buffer linked list tail */
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 2fdb72d2c908..736429a72a12 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -26,43 +26,8 @@ struct xfs_ifork;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
+struct xfs_bmalloca;
-/*
- * Argument structure for xfs_bmap_alloc.
- */
-struct xfs_bmalloca {
- xfs_fsblock_t *firstblock; /* i/o first block allocated */
- struct xfs_bmap_free *flist; /* bmap freelist */
- struct xfs_trans *tp; /* transaction pointer */
- struct xfs_inode *ip; /* incore inode pointer */
- struct xfs_bmbt_irec prev; /* extent before the new one */
- struct xfs_bmbt_irec got; /* extent after, or delayed */
-
- xfs_fileoff_t offset; /* offset in file filling in */
- xfs_extlen_t length; /* i/o length asked/allocated */
- xfs_fsblock_t blkno; /* starting block of new extent */
-
- struct xfs_btree_cur *cur; /* btree cursor */
- xfs_extnum_t idx; /* current extent index */
- int nallocs;/* number of extents alloc'd */
- int logflags;/* flags for transaction logging */
-
- xfs_extlen_t total; /* total blocks needed for xaction */
- xfs_extlen_t minlen; /* minimum allocation size (blocks) */
- xfs_extlen_t minleft; /* amount must be left after alloc */
- bool eof; /* set if allocating past last extent */
- bool wasdel; /* replacing a delayed allocation */
- bool userdata;/* set if is user data */
- bool aeof; /* allocated space at eof */
- bool conv; /* overwriting unwritten extents */
- int flags;
- struct completion *done;
- struct work_struct work;
- int result;
-};
-
-int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
- int *committed);
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index bb502a391792..1790b00bea7a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1488,6 +1488,7 @@ xfs_buf_iomove(
static enum lru_status
xfs_buftarg_wait_rele(
struct list_head *item,
+ struct list_lru_one *lru,
spinlock_t *lru_lock,
void *arg)
@@ -1509,7 +1510,7 @@ xfs_buftarg_wait_rele(
*/
atomic_set(&bp->b_lru_ref, 0);
bp->b_state |= XFS_BSTATE_DISPOSE;
- list_move(item, dispose);
+ list_lru_isolate_move(lru, item, dispose);
spin_unlock(&bp->b_lock);
return LRU_REMOVED;
}
@@ -1546,6 +1547,7 @@ xfs_wait_buftarg(
static enum lru_status
xfs_buftarg_isolate(
struct list_head *item,
+ struct list_lru_one *lru,
spinlock_t *lru_lock,
void *arg)
{
@@ -1569,7 +1571,7 @@ xfs_buftarg_isolate(
}
bp->b_state |= XFS_BSTATE_DISPOSE;
- list_move(item, dispose);
+ list_lru_isolate_move(lru, item, dispose);
spin_unlock(&bp->b_lock);
return LRU_REMOVED;
}
@@ -1583,10 +1585,9 @@ xfs_buftarg_shrink_scan(
struct xfs_buftarg, bt_shrinker);
LIST_HEAD(dispose);
unsigned long freed;
- unsigned long nr_to_scan = sc->nr_to_scan;
- freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
- &dispose, &nr_to_scan);
+ freed = list_lru_shrink_walk(&btp->bt_lru, sc,
+ xfs_buftarg_isolate, &dispose);
while (!list_empty(&dispose)) {
struct xfs_buf *bp;
@@ -1605,7 +1606,7 @@ xfs_buftarg_shrink_count(
{
struct xfs_buftarg *btp = container_of(shrink,
struct xfs_buftarg, bt_shrinker);
- return list_lru_count_node(&btp->bt_lru, sc->nid);
+ return list_lru_shrink_count(&btp->bt_lru, sc);
}
void
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 3f9bd58edec7..507d96a57ac7 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -319,6 +319,10 @@ xfs_buf_item_format(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
+ ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
+ (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
+ && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
+
/*
* If it is an inode buffer, transfer the in-memory state to the
@@ -535,7 +539,7 @@ xfs_buf_item_push(
if ((bp->b_flags & XBF_WRITE_FAIL) &&
___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
xfs_warn(bp->b_target->bt_mount,
-"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
+"Detected failing async write on buffer block 0x%llx. Retrying async write.",
(long long)bp->b_bn);
}
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index c24c67e22a2a..2f536f33cd26 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -86,7 +86,7 @@ static inline void xfs_dqflock(xfs_dquot_t *dqp)
wait_for_completion(&dqp->q_flush);
}
-static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp)
+static inline bool xfs_dqflock_nowait(xfs_dquot_t *dqp)
{
return try_wait_for_completion(&dqp->q_flush);
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 13e974e6a889..1cdba95c78cb 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -127,6 +127,42 @@ xfs_iozero(
return (-status);
}
+int
+xfs_update_prealloc_flags(
+ struct xfs_inode *ip,
+ enum xfs_prealloc_flags flags)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
+ error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ if (!(flags & XFS_PREALLOC_INVISIBLE)) {
+ ip->i_d.di_mode &= ~S_ISUID;
+ if (ip->i_d.di_mode & S_IXGRP)
+ ip->i_d.di_mode &= ~S_ISGID;
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ }
+
+ if (flags & XFS_PREALLOC_SET)
+ ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+ if (flags & XFS_PREALLOC_CLEAR)
+ ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ if (flags & XFS_PREALLOC_SYNC)
+ xfs_trans_set_sync(tp);
+ return xfs_trans_commit(tp, 0);
+}
+
/*
* Fsync operations on directories are much simpler than on regular files,
* as there is no file data to flush, and thus also no need for explicit
@@ -699,7 +735,7 @@ xfs_file_buffered_aio_write(
iov_iter_truncate(from, count);
/* We can write back this queue in page reclaim */
- current->backing_dev_info = mapping->backing_dev_info;
+ current->backing_dev_info = inode_to_bdi(inode);
write_retry:
trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
@@ -784,8 +820,8 @@ xfs_file_fallocate(
{
struct inode *inode = file_inode(file);
struct xfs_inode *ip = XFS_I(inode);
- struct xfs_trans *tp;
long error;
+ enum xfs_prealloc_flags flags = 0;
loff_t new_size = 0;
if (!S_ISREG(inode->i_mode))
@@ -822,6 +858,8 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
} else {
+ flags |= XFS_PREALLOC_SET;
+
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
@@ -839,28 +877,10 @@ xfs_file_fallocate(
goto out_unlock;
}
- tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
- error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- goto out_unlock;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- ip->i_d.di_mode &= ~S_ISUID;
- if (ip->i_d.di_mode & S_IXGRP)
- ip->i_d.di_mode &= ~S_ISGID;
-
- if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)))
- ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
-
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
if (file->f_flags & O_DSYNC)
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
+ flags |= XFS_PREALLOC_SYNC;
+
+ error = xfs_update_prealloc_flags(ip, flags);
if (error)
goto out_unlock;
@@ -1384,5 +1404,4 @@ static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = xfs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index fdc64220fcb0..fba6532efba4 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -488,6 +488,7 @@ xfs_growfs_data_private(
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
if (dpct)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
+ xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
if (error)
return error;
@@ -541,7 +542,7 @@ xfs_growfs_data_private(
saved_error = error;
continue;
}
- xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
+ xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
@@ -756,37 +757,6 @@ out:
return 0;
}
-/*
- * Dump a transaction into the log that contains no real change. This is needed
- * to be able to make the log dirty or stamp the current tail LSN into the log
- * during the covering operation.
- *
- * We cannot use an inode here for this - that will push dirty state back up
- * into the VFS and then periodic inode flushing will prevent log covering from
- * making progress. Hence we log a field in the superblock instead and use a
- * synchronous transaction to ensure the superblock is immediately unpinned
- * and can be written back.
- */
-int
-xfs_fs_log_dummy(
- xfs_mount_t *mp)
-{
- xfs_trans_t *tp;
- int error;
-
- tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- /* log the UUID because it is an unchanging field */
- xfs_mod_sb(tp, XFS_SB_UUID);
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp, 0);
-}
-
int
xfs_fs_goingdown(
xfs_mount_t *mp,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 41f804e740d7..daafa1f6d260 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1995,6 +1995,7 @@ xfs_iunlink(
agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket_index);
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
xfs_trans_log_buf(tp, agibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
return 0;
@@ -2086,6 +2087,7 @@ xfs_iunlink_remove(
agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket_index);
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
xfs_trans_log_buf(tp, agibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
} else {
@@ -2656,6 +2658,124 @@ xfs_sort_for_rename(
}
/*
+ * xfs_cross_rename()
+ *
+ * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
+ */
+STATIC int
+xfs_cross_rename(
+ struct xfs_trans *tp,
+ struct xfs_inode *dp1,
+ struct xfs_name *name1,
+ struct xfs_inode *ip1,
+ struct xfs_inode *dp2,
+ struct xfs_name *name2,
+ struct xfs_inode *ip2,
+ struct xfs_bmap_free *free_list,
+ xfs_fsblock_t *first_block,
+ int spaceres)
+{
+ int error = 0;
+ int ip1_flags = 0;
+ int ip2_flags = 0;
+ int dp2_flags = 0;
+
+ /* Swap inode number for dirent in first parent */
+ error = xfs_dir_replace(tp, dp1, name1,
+ ip2->i_ino,
+ first_block, free_list, spaceres);
+ if (error)
+ goto out;
+
+ /* Swap inode number for dirent in second parent */
+ error = xfs_dir_replace(tp, dp2, name2,
+ ip1->i_ino,
+ first_block, free_list, spaceres);
+ if (error)
+ goto out;
+
+ /*
+ * If we're renaming one or more directories across different parents,
+ * update the respective ".." entries (and link counts) to match the new
+ * parents.
+ */
+ if (dp1 != dp2) {
+ dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+
+ if (S_ISDIR(ip2->i_d.di_mode)) {
+ error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
+ dp1->i_ino, first_block,
+ free_list, spaceres);
+ if (error)
+ goto out;
+
+ /* transfer ip2 ".." reference to dp1 */
+ if (!S_ISDIR(ip1->i_d.di_mode)) {
+ error = xfs_droplink(tp, dp2);
+ if (error)
+ goto out;
+ error = xfs_bumplink(tp, dp1);
+ if (error)
+ goto out;
+ }
+
+ /*
+ * Although ip1 isn't changed here, userspace needs
+ * to be warned about the change, so that applications
+ * relying on it (like backup ones), will properly
+ * notify the change
+ */
+ ip1_flags |= XFS_ICHGTIME_CHG;
+ ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ }
+
+ if (S_ISDIR(ip1->i_d.di_mode)) {
+ error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
+ dp2->i_ino, first_block,
+ free_list, spaceres);
+ if (error)
+ goto out;
+
+ /* transfer ip1 ".." reference to dp2 */
+ if (!S_ISDIR(ip2->i_d.di_mode)) {
+ error = xfs_droplink(tp, dp1);
+ if (error)
+ goto out;
+ error = xfs_bumplink(tp, dp2);
+ if (error)
+ goto out;
+ }
+
+ /*
+ * Although ip2 isn't changed here, userspace needs
+ * to be warned about the change, so that applications
+ * relying on it (like backup ones), will properly
+ * notify the change
+ */
+ ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ ip2_flags |= XFS_ICHGTIME_CHG;
+ }
+ }
+
+ if (ip1_flags) {
+ xfs_trans_ichgtime(tp, ip1, ip1_flags);
+ xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
+ }
+ if (ip2_flags) {
+ xfs_trans_ichgtime(tp, ip2, ip2_flags);
+ xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
+ }
+ if (dp2_flags) {
+ xfs_trans_ichgtime(tp, dp2, dp2_flags);
+ xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
+ }
+ xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
+out:
+ return error;
+}
+
+/*
* xfs_rename
*/
int
@@ -2665,7 +2785,8 @@ xfs_rename(
xfs_inode_t *src_ip,
xfs_inode_t *target_dp,
struct xfs_name *target_name,
- xfs_inode_t *target_ip)
+ xfs_inode_t *target_ip,
+ unsigned int flags)
{
xfs_trans_t *tp = NULL;
xfs_mount_t *mp = src_dp->i_mount;
@@ -2743,6 +2864,18 @@ xfs_rename(
}
/*
+ * Handle RENAME_EXCHANGE flags
+ */
+ if (flags & RENAME_EXCHANGE) {
+ error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
+ target_dp, target_name, target_ip,
+ &free_list, &first_block, spaceres);
+ if (error)
+ goto abort_return;
+ goto finish_rename;
+ }
+
+ /*
* Set up the target.
*/
if (target_ip == NULL) {
@@ -2881,6 +3014,7 @@ xfs_rename(
if (new_parent)
xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
+finish_rename:
/*
* If this is a synchronous mount, make sure that the
* rename transaction goes to disk before returning to
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 4ed2ba9342dc..86cd6b39bed7 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -338,7 +338,7 @@ int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
struct xfs_inode *src_ip, struct xfs_inode *target_dp,
struct xfs_name *target_name,
- struct xfs_inode *target_ip);
+ struct xfs_inode *target_ip, unsigned int flags);
void xfs_ilock(xfs_inode_t *, uint);
int xfs_ilock_nowait(xfs_inode_t *, uint);
@@ -377,6 +377,15 @@ int xfs_droplink(struct xfs_trans *, struct xfs_inode *);
int xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
/* from xfs_file.c */
+enum xfs_prealloc_flags {
+ XFS_PREALLOC_SET = (1 << 1),
+ XFS_PREALLOC_CLEAR = (1 << 2),
+ XFS_PREALLOC_SYNC = (1 << 3),
+ XFS_PREALLOC_INVISIBLE = (1 << 4),
+};
+
+int xfs_update_prealloc_flags(struct xfs_inode *,
+ enum xfs_prealloc_flags);
int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
int xfs_iozero(struct xfs_inode *, loff_t, size_t);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index a1831980a68e..f7afb86c9148 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -606,11 +606,8 @@ xfs_ioc_space(
unsigned int cmd,
xfs_flock64_t *bf)
{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
struct iattr iattr;
- bool setprealloc = false;
- bool clrprealloc = false;
+ enum xfs_prealloc_flags flags = 0;
int error;
/*
@@ -630,6 +627,11 @@ xfs_ioc_space(
if (!S_ISREG(inode->i_mode))
return -EINVAL;
+ if (filp->f_flags & O_DSYNC)
+ flags |= XFS_PREALLOC_SYNC;
+ if (ioflags & XFS_IO_INVIS)
+ flags |= XFS_PREALLOC_INVISIBLE;
+
error = mnt_want_write_file(filp);
if (error)
return error;
@@ -673,25 +675,23 @@ xfs_ioc_space(
}
if (bf->l_start < 0 ||
- bf->l_start > mp->m_super->s_maxbytes ||
+ bf->l_start > inode->i_sb->s_maxbytes ||
bf->l_start + bf->l_len < 0 ||
- bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {
+ bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) {
error = -EINVAL;
goto out_unlock;
}
switch (cmd) {
case XFS_IOC_ZERO_RANGE:
+ flags |= XFS_PREALLOC_SET;
error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
- if (!error)
- setprealloc = true;
break;
case XFS_IOC_RESVSP:
case XFS_IOC_RESVSP64:
+ flags |= XFS_PREALLOC_SET;
error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
XFS_BMAPI_PREALLOC);
- if (!error)
- setprealloc = true;
break;
case XFS_IOC_UNRESVSP:
case XFS_IOC_UNRESVSP64:
@@ -701,6 +701,7 @@ xfs_ioc_space(
case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP:
case XFS_IOC_FREESP64:
+ flags |= XFS_PREALLOC_CLEAR;
if (bf->l_start > XFS_ISIZE(ip)) {
error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
bf->l_start - XFS_ISIZE(ip), 0);
@@ -712,8 +713,6 @@ xfs_ioc_space(
iattr.ia_size = bf->l_start;
error = xfs_setattr_size(ip, &iattr);
- if (!error)
- clrprealloc = true;
break;
default:
ASSERT(0);
@@ -723,32 +722,7 @@ xfs_ioc_space(
if (error)
goto out_unlock;
- tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- goto out_unlock;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- if (!(ioflags & XFS_IO_INVIS)) {
- ip->i_d.di_mode &= ~S_ISUID;
- if (ip->i_d.di_mode & S_IXGRP)
- ip->i_d.di_mode &= ~S_ISGID;
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- }
-
- if (setprealloc)
- ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
- else if (clrprealloc)
- ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- if (filp->f_flags & O_DSYNC)
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
+ error = xfs_update_prealloc_flags(ip, flags);
out_unlock:
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
@@ -1013,20 +987,182 @@ xfs_diflags_to_linux(
inode->i_flags &= ~S_NOATIME;
}
-#define FSX_PROJID 1
-#define FSX_EXTSIZE 2
-#define FSX_XFLAGS 4
-#define FSX_NONBLOCK 8
+static int
+xfs_ioctl_setattr_xflags(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct fsxattr *fa)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ /* Can't change realtime flag if any extents are allocated. */
+ if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
+ XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & XFS_XFLAG_REALTIME))
+ return -EINVAL;
+
+ /* If realtime flag is set then must have realtime device */
+ if (fa->fsx_xflags & XFS_XFLAG_REALTIME) {
+ if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
+ (ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
+ return -EINVAL;
+ }
+
+ /*
+ * Can't modify an immutable/append-only file unless
+ * we have appropriate permission.
+ */
+ if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) ||
+ (fa->fsx_xflags & (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
+ !capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+
+ xfs_set_diflags(ip, fa->fsx_xflags);
+ xfs_diflags_to_linux(ip);
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ XFS_STATS_INC(xs_ig_attrchg);
+ return 0;
+}
+
+/*
+ * Set up the transaction structure for the setattr operation, checking that we
+ * have permission to do so. On success, return a clean transaction and the
+ * inode locked exclusively ready for further operation specific checks. On
+ * failure, return an error without modifying or locking the inode.
+ */
+static struct xfs_trans *
+xfs_ioctl_setattr_get_trans(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
+ return ERR_PTR(-EROFS);
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return ERR_PTR(-EIO);
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+ if (error)
+ goto out_cancel;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ /*
+ * CAP_FOWNER overrides the following restrictions:
+ *
+ * The user ID of the calling process must be equal to the file owner
+ * ID, except in cases where the CAP_FSETID capability is applicable.
+ */
+ if (!inode_owner_or_capable(VFS_I(ip))) {
+ error = -EPERM;
+ goto out_cancel;
+ }
+
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(tp);
+
+ return tp;
+
+out_cancel:
+ xfs_trans_cancel(tp, 0);
+ return ERR_PTR(error);
+}
+
+/*
+ * extent size hint validation is somewhat cumbersome. Rules are:
+ *
+ * 1. extent size hint is only valid for directories and regular files
+ * 2. XFS_XFLAG_EXTSIZE is only valid for regular files
+ * 3. XFS_XFLAG_EXTSZINHERIT is only valid for directories.
+ * 4. can only be changed on regular files if no extents are allocated
+ * 5. can be changed on directories at any time
+ * 6. extsize hint of 0 turns off hints, clears inode flags.
+ * 7. Extent size must be a multiple of the appropriate block size.
+ * 8. for non-realtime files, the extent size hint must be limited
+ * to half the AG size to avoid alignment extending the extent beyond the
+ * limits of the AG.
+ */
+static int
+xfs_ioctl_setattr_check_extsize(
+ struct xfs_inode *ip,
+ struct fsxattr *fa)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ if ((fa->fsx_xflags & XFS_XFLAG_EXTSIZE) && !S_ISREG(ip->i_d.di_mode))
+ return -EINVAL;
+
+ if ((fa->fsx_xflags & XFS_XFLAG_EXTSZINHERIT) &&
+ !S_ISDIR(ip->i_d.di_mode))
+ return -EINVAL;
+
+ if (S_ISREG(ip->i_d.di_mode) && ip->i_d.di_nextents &&
+ ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
+ return -EINVAL;
+
+ if (fa->fsx_extsize != 0) {
+ xfs_extlen_t size;
+ xfs_fsblock_t extsize_fsb;
+
+ extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
+ if (extsize_fsb > MAXEXTLEN)
+ return -EINVAL;
+
+ if (XFS_IS_REALTIME_INODE(ip) ||
+ (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
+ size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
+ } else {
+ size = mp->m_sb.sb_blocksize;
+ if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
+ return -EINVAL;
+ }
+
+ if (fa->fsx_extsize % size)
+ return -EINVAL;
+ } else
+ fa->fsx_xflags &= ~(XFS_XFLAG_EXTSIZE | XFS_XFLAG_EXTSZINHERIT);
+
+ return 0;
+}
+
+static int
+xfs_ioctl_setattr_check_projid(
+ struct xfs_inode *ip,
+ struct fsxattr *fa)
+{
+ /* Disallow 32bit project ids if projid32bit feature is not enabled. */
+ if (fa->fsx_projid > (__uint16_t)-1 &&
+ !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
+ return -EINVAL;
+
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() == &init_user_ns)
+ return 0;
+
+ if (xfs_get_projid(ip) != fa->fsx_projid)
+ return -EINVAL;
+ if ((fa->fsx_xflags & XFS_XFLAG_PROJINHERIT) !=
+ (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT))
+ return -EINVAL;
+
+ return 0;
+}
STATIC int
xfs_ioctl_setattr(
xfs_inode_t *ip,
- struct fsxattr *fa,
- int mask)
+ struct fsxattr *fa)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- unsigned int lock_flags = 0;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *pdqp = NULL;
struct xfs_dquot *olddquot = NULL;
@@ -1034,17 +1170,9 @@ xfs_ioctl_setattr(
trace_xfs_ioctl_setattr(ip);
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return -EROFS;
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- /*
- * Disallow 32bit project ids when projid32bit feature is not enabled.
- */
- if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&
- !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
- return -EINVAL;
+ code = xfs_ioctl_setattr_check_projid(ip, fa);
+ if (code)
+ return code;
/*
* If disk quotas is on, we make sure that the dquots do exist on disk,
@@ -1054,7 +1182,7 @@ xfs_ioctl_setattr(
* If the IDs do change before we take the ilock, we're covered
* because the i_*dquot fields will get updated anyway.
*/
- if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
+ if (XFS_IS_QUOTA_ON(mp)) {
code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
ip->i_d.di_gid, fa->fsx_projid,
XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
@@ -1062,175 +1190,49 @@ xfs_ioctl_setattr(
return code;
}
- /*
- * For the other attributes, we acquire the inode lock and
- * first do an error checking pass.
- */
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- code = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
- if (code)
- goto error_return;
-
- lock_flags = XFS_ILOCK_EXCL;
- xfs_ilock(ip, lock_flags);
-
- /*
- * CAP_FOWNER overrides the following restrictions:
- *
- * The user ID of the calling process must be equal
- * to the file owner ID, except in cases where the
- * CAP_FSETID capability is applicable.
- */
- if (!inode_owner_or_capable(VFS_I(ip))) {
- code = -EPERM;
- goto error_return;
- }
-
- /*
- * Do a quota reservation only if projid is actually going to change.
- * Only allow changing of projid from init_user_ns since it is a
- * non user namespace aware identifier.
- */
- if (mask & FSX_PROJID) {
- if (current_user_ns() != &init_user_ns) {
- code = -EINVAL;
- goto error_return;
- }
-
- if (XFS_IS_QUOTA_RUNNING(mp) &&
- XFS_IS_PQUOTA_ON(mp) &&
- xfs_get_projid(ip) != fa->fsx_projid) {
- ASSERT(tp);
- code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL,
- pdqp, capable(CAP_FOWNER) ?
- XFS_QMOPT_FORCE_RES : 0);
- if (code) /* out of quota */
- goto error_return;
- }
+ tp = xfs_ioctl_setattr_get_trans(ip);
+ if (IS_ERR(tp)) {
+ code = PTR_ERR(tp);
+ goto error_free_dquots;
}
- if (mask & FSX_EXTSIZE) {
- /*
- * Can't change extent size if any extents are allocated.
- */
- if (ip->i_d.di_nextents &&
- ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
- fa->fsx_extsize)) {
- code = -EINVAL; /* EFBIG? */
- goto error_return;
- }
- /*
- * Extent size must be a multiple of the appropriate block
- * size, if set at all. It must also be smaller than the
- * maximum extent size supported by the filesystem.
- *
- * Also, for non-realtime files, limit the extent size hint to
- * half the size of the AGs in the filesystem so alignment
- * doesn't result in extents larger than an AG.
- */
- if (fa->fsx_extsize != 0) {
- xfs_extlen_t size;
- xfs_fsblock_t extsize_fsb;
-
- extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
- if (extsize_fsb > MAXEXTLEN) {
- code = -EINVAL;
- goto error_return;
- }
-
- if (XFS_IS_REALTIME_INODE(ip) ||
- ((mask & FSX_XFLAGS) &&
- (fa->fsx_xflags & XFS_XFLAG_REALTIME))) {
- size = mp->m_sb.sb_rextsize <<
- mp->m_sb.sb_blocklog;
- } else {
- size = mp->m_sb.sb_blocksize;
- if (extsize_fsb > mp->m_sb.sb_agblocks / 2) {
- code = -EINVAL;
- goto error_return;
- }
- }
-
- if (fa->fsx_extsize % size) {
- code = -EINVAL;
- goto error_return;
- }
- }
+ if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
+ xfs_get_projid(ip) != fa->fsx_projid) {
+ code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
+ capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
+ if (code) /* out of quota */
+ goto error_trans_cancel;
}
+ code = xfs_ioctl_setattr_check_extsize(ip, fa);
+ if (code)
+ goto error_trans_cancel;
- if (mask & FSX_XFLAGS) {
- /*
- * Can't change realtime flag if any extents are allocated.
- */
- if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
- (XFS_IS_REALTIME_INODE(ip)) !=
- (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
- code = -EINVAL; /* EFBIG? */
- goto error_return;
- }
-
- /*
- * If realtime flag is set then must have realtime data.
- */
- if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
- if ((mp->m_sb.sb_rblocks == 0) ||
- (mp->m_sb.sb_rextsize == 0) ||
- (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) {
- code = -EINVAL;
- goto error_return;
- }
- }
-
- /*
- * Can't modify an immutable/append-only file unless
- * we have appropriate permission.
- */
- if ((ip->i_d.di_flags &
- (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) ||
- (fa->fsx_xflags &
- (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
- !capable(CAP_LINUX_IMMUTABLE)) {
- code = -EPERM;
- goto error_return;
- }
- }
-
- xfs_trans_ijoin(tp, ip, 0);
+ code = xfs_ioctl_setattr_xflags(tp, ip, fa);
+ if (code)
+ goto error_trans_cancel;
/*
- * Change file ownership. Must be the owner or privileged.
+ * Change file ownership. Must be the owner or privileged. CAP_FSETID
+ * overrides the following restrictions:
+ *
+ * The set-user-ID and set-group-ID bits of a file will be cleared upon
+ * successful return from chown()
*/
- if (mask & FSX_PROJID) {
- /*
- * CAP_FSETID overrides the following restrictions:
- *
- * The set-user-ID and set-group-ID bits of a file will be
- * cleared upon successful return from chown()
- */
- if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
- !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
- ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
-
- /*
- * Change the ownerships and register quota modifications
- * in the transaction.
- */
- if (xfs_get_projid(ip) != fa->fsx_projid) {
- if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
- olddquot = xfs_qm_vop_chown(tp, ip,
- &ip->i_pdquot, pdqp);
- }
- ASSERT(ip->i_d.di_version > 1);
- xfs_set_projid(ip, fa->fsx_projid);
- }
- }
+ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+ !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
+ ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
- if (mask & FSX_XFLAGS) {
- xfs_set_diflags(ip, fa->fsx_xflags);
- xfs_diflags_to_linux(ip);
+ /* Change the ownerships and register project quota modifications */
+ if (xfs_get_projid(ip) != fa->fsx_projid) {
+ if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
+ olddquot = xfs_qm_vop_chown(tp, ip,
+ &ip->i_pdquot, pdqp);
+ }
+ ASSERT(ip->i_d.di_version > 1);
+ xfs_set_projid(ip, fa->fsx_projid);
}
/*
@@ -1238,34 +1240,12 @@ xfs_ioctl_setattr(
* extent size hint should be set on the inode. If no extent size flags
* are set on the inode then unconditionally clear the extent size hint.
*/
- if (mask & FSX_EXTSIZE) {
- int extsize = 0;
-
- if (ip->i_d.di_flags &
- (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
- extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
- ip->i_d.di_extsize = extsize;
- }
-
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
- XFS_STATS_INC(xs_ig_attrchg);
+ if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
+ ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
+ else
+ ip->i_d.di_extsize = 0;
- /*
- * If this is a synchronous mount, make sure that the
- * transaction goes to disk before returning to the user.
- * This is slightly sub-optimal in that truncates require
- * two sync transactions instead of one for wsync filesystems.
- * One for the truncate and one for the timestamps since we
- * don't want to change the timestamps unless we're sure the
- * truncate worked. Truncates are less than 1% of the laddis
- * mix so this probably isn't worth the trouble to optimize.
- */
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(tp);
code = xfs_trans_commit(tp, 0);
- xfs_iunlock(ip, lock_flags);
/*
* Release any dquot(s) the inode had kept before chown.
@@ -1276,12 +1256,11 @@ xfs_ioctl_setattr(
return code;
- error_return:
+error_trans_cancel:
+ xfs_trans_cancel(tp, 0);
+error_free_dquots:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(pdqp);
- xfs_trans_cancel(tp, 0);
- if (lock_flags)
- xfs_iunlock(ip, lock_flags);
return code;
}
@@ -1292,20 +1271,15 @@ xfs_ioc_fssetxattr(
void __user *arg)
{
struct fsxattr fa;
- unsigned int mask;
int error;
if (copy_from_user(&fa, arg, sizeof(fa)))
return -EFAULT;
- mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID;
- if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
- mask |= FSX_NONBLOCK;
-
error = mnt_want_write_file(filp);
if (error)
return error;
- error = xfs_ioctl_setattr(ip, &fa, mask);
+ error = xfs_ioctl_setattr(ip, &fa);
mnt_drop_write_file(filp);
return error;
}
@@ -1325,14 +1299,14 @@ xfs_ioc_getxflags(
STATIC int
xfs_ioc_setxflags(
- xfs_inode_t *ip,
+ struct xfs_inode *ip,
struct file *filp,
void __user *arg)
{
+ struct xfs_trans *tp;
struct fsxattr fa;
unsigned int flags;
- unsigned int mask;
- int error;
+ int error;
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
@@ -1342,15 +1316,26 @@ xfs_ioc_setxflags(
FS_SYNC_FL))
return -EOPNOTSUPP;
- mask = FSX_XFLAGS;
- if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
- mask |= FSX_NONBLOCK;
fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
error = mnt_want_write_file(filp);
if (error)
return error;
- error = xfs_ioctl_setattr(ip, &fa, mask);
+
+ tp = xfs_ioctl_setattr_get_trans(ip);
+ if (IS_ERR(tp)) {
+ error = PTR_ERR(tp);
+ goto out_drop_write;
+ }
+
+ error = xfs_ioctl_setattr_xflags(tp, ip, &fa);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ goto out_drop_write;
+ }
+
+ error = xfs_trans_commit(tp, 0);
+out_drop_write:
mnt_drop_write_file(filp);
return error;
}
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index ec6772866f3d..bfc7c7c8a0c8 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -423,7 +423,7 @@ xfs_compat_attrmulti_by_handle(
ops = memdup_user(compat_ptr(am_hreq.ops), size);
if (IS_ERR(ops)) {
- error = -PTR_ERR(ops);
+ error = PTR_ERR(ops);
goto out_dput;
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index c980e2a5086b..ccb1dd0d509e 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -802,7 +802,7 @@ int
xfs_iomap_write_unwritten(
xfs_inode_t *ip,
xfs_off_t offset,
- size_t count)
+ xfs_off_t count)
{
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 411fbb8919ef..8688e663d744 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -27,6 +27,6 @@ int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *);
int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
struct xfs_bmbt_irec *);
-int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c50311cae1b1..ce80eeb8faa4 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -380,18 +380,27 @@ xfs_vn_rename(
struct inode *odir,
struct dentry *odentry,
struct inode *ndir,
- struct dentry *ndentry)
+ struct dentry *ndentry,
+ unsigned int flags)
{
struct inode *new_inode = ndentry->d_inode;
+ int omode = 0;
struct xfs_name oname;
struct xfs_name nname;
- xfs_dentry_to_name(&oname, odentry, 0);
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+ return -EINVAL;
+
+ /* if we are exchanging files, we need to set i_mode of both files */
+ if (flags & RENAME_EXCHANGE)
+ omode = ndentry->d_inode->i_mode;
+
+ xfs_dentry_to_name(&oname, odentry, omode);
xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode);
return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
- XFS_I(ndir), &nname, new_inode ?
- XFS_I(new_inode) : NULL);
+ XFS_I(ndir), &nname,
+ new_inode ? XFS_I(new_inode) : NULL, flags);
}
/*
@@ -1144,7 +1153,7 @@ static const struct inode_operations xfs_dir_inode_operations = {
*/
.rmdir = xfs_vn_unlink,
.mknod = xfs_vn_mknod,
- .rename = xfs_vn_rename,
+ .rename2 = xfs_vn_rename,
.get_acl = xfs_get_acl,
.set_acl = xfs_set_acl,
.getattr = xfs_vn_getattr,
@@ -1172,7 +1181,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = {
*/
.rmdir = xfs_vn_unlink,
.mknod = xfs_vn_mknod,
- .rename = xfs_vn_rename,
+ .rename2 = xfs_vn_rename,
.get_acl = xfs_get_acl,
.set_acl = xfs_set_acl,
.getattr = xfs_vn_getattr,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e408bf5a3ff7..bcc7cfabb787 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -33,6 +33,7 @@
#include "xfs_fsops.h"
#include "xfs_cksum.h"
#include "xfs_sysfs.h"
+#include "xfs_sb.h"
kmem_zone_t *xfs_log_ticket_zone;
@@ -1290,9 +1291,20 @@ xfs_log_worker(
struct xfs_mount *mp = log->l_mp;
/* dgc: errors ignored - not fatal and nowhere to report them */
- if (xfs_log_need_covered(mp))
- xfs_fs_log_dummy(mp);
- else
+ if (xfs_log_need_covered(mp)) {
+ /*
+ * Dump a transaction into the log that contains no real change.
+ * This is needed to stamp the current tail LSN into the log
+ * during the covering operation.
+ *
+ * We cannot use an inode here for this - that will push dirty
+ * state back up into the VFS and then periodic inode flushing
+ * will prevent log covering from making progress. Hence we
+ * synchronously log the superblock instead to ensure the
+ * superblock is immediately unpinned and can be written back.
+ */
+ xfs_sync_sb(mp, true);
+ } else
xfs_log_force(mp, 0);
/* start pushing all the metadata that is currently dirty */
@@ -1395,6 +1407,8 @@ xlog_alloc_log(
ASSERT(xfs_buf_islocked(bp));
xfs_buf_unlock(bp);
+ /* use high priority wq for log I/O completion */
+ bp->b_ioend_wq = mp->m_log_workqueue;
bp->b_iodone = xlog_iodone;
log->l_xbuf = bp;
@@ -1427,6 +1441,8 @@ xlog_alloc_log(
ASSERT(xfs_buf_islocked(bp));
xfs_buf_unlock(bp);
+ /* use high priority wq for log I/O completion */
+ bp->b_ioend_wq = mp->m_log_workqueue;
bp->b_iodone = xlog_iodone;
iclog->ic_bp = bp;
iclog->ic_data = bp->b_addr;
@@ -1806,8 +1822,6 @@ xlog_sync(
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_SYNCIO;
- /* use high priority completion wq */
- bp->b_ioend_wq = log->l_mp->m_log_workqueue;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
bp->b_flags |= XBF_FUA;
@@ -1856,8 +1870,6 @@ xlog_sync(
bp->b_flags |= XBF_SYNCIO;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
bp->b_flags |= XBF_FUA;
- /* use high priority completion wq */
- bp->b_ioend_wq = log->l_mp->m_log_workqueue;
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
@@ -2027,7 +2039,7 @@ xlog_print_tic_res(
" total reg = %u bytes (o/flow = %u bytes)\n"
" ophdrs = %u (ophdr space = %u bytes)\n"
" ophdr + reg = %u bytes\n"
- " num regions = %u\n",
+ " num regions = %u",
((ticket->t_trans_type <= 0 ||
ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
"bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index d3d38836f87f..4fa80e63eea2 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -408,11 +408,11 @@ xfs_update_alignment(xfs_mount_t *mp)
if (xfs_sb_version_hasdalign(sbp)) {
if (sbp->sb_unit != mp->m_dalign) {
sbp->sb_unit = mp->m_dalign;
- mp->m_update_flags |= XFS_SB_UNIT;
+ mp->m_update_sb = true;
}
if (sbp->sb_width != mp->m_swidth) {
sbp->sb_width = mp->m_swidth;
- mp->m_update_flags |= XFS_SB_WIDTH;
+ mp->m_update_sb = true;
}
} else {
xfs_warn(mp,
@@ -583,38 +583,19 @@ int
xfs_mount_reset_sbqflags(
struct xfs_mount *mp)
{
- int error;
- struct xfs_trans *tp;
-
mp->m_qflags = 0;
- /*
- * It is OK to look at sb_qflags here in mount path,
- * without m_sb_lock.
- */
+ /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
if (mp->m_sb.sb_qflags == 0)
return 0;
spin_lock(&mp->m_sb_lock);
mp->m_sb.sb_qflags = 0;
spin_unlock(&mp->m_sb_lock);
- /*
- * If the fs is readonly, let the incore superblock run
- * with quotas off but don't flush the update out to disk
- */
- if (mp->m_flags & XFS_MOUNT_RDONLY)
+ if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
return 0;
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- xfs_alert(mp, "%s: Superblock update failed!", __func__);
- return error;
- }
-
- xfs_mod_sb(tp, XFS_SB_QFLAGS);
- return xfs_trans_commit(tp, 0);
+ return xfs_sync_sb(mp, false);
}
__uint64_t
@@ -659,26 +640,25 @@ xfs_mountfs(
xfs_sb_mount_common(mp, sbp);
/*
- * Check for a mismatched features2 values. Older kernels
- * read & wrote into the wrong sb offset for sb_features2
- * on some platforms due to xfs_sb_t not being 64bit size aligned
- * when sb_features2 was added, which made older superblock
- * reading/writing routines swap it as a 64-bit value.
+ * Check for a mismatched features2 values. Older kernels read & wrote
+ * into the wrong sb offset for sb_features2 on some platforms due to
+ * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
+ * which made older superblock reading/writing routines swap it as a
+ * 64-bit value.
*
* For backwards compatibility, we make both slots equal.
*
- * If we detect a mismatched field, we OR the set bits into the
- * existing features2 field in case it has already been modified; we
- * don't want to lose any features. We then update the bad location
- * with the ORed value so that older kernels will see any features2
- * flags, and mark the two fields as needing updates once the
- * transaction subsystem is online.
+ * If we detect a mismatched field, we OR the set bits into the existing
+ * features2 field in case it has already been modified; we don't want
+ * to lose any features. We then update the bad location with the ORed
+ * value so that older kernels will see any features2 flags. The
+ * superblock writeback code ensures the new sb_features2 is copied to
+ * sb_bad_features2 before it is logged or written to disk.
*/
if (xfs_sb_has_mismatched_features2(sbp)) {
xfs_warn(mp, "correcting sb_features alignment problem");
sbp->sb_features2 |= sbp->sb_bad_features2;
- sbp->sb_bad_features2 = sbp->sb_features2;
- mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
+ mp->m_update_sb = true;
/*
* Re-check for ATTR2 in case it was found in bad_features2
@@ -692,17 +672,17 @@ xfs_mountfs(
if (xfs_sb_version_hasattr2(&mp->m_sb) &&
(mp->m_flags & XFS_MOUNT_NOATTR2)) {
xfs_sb_version_removeattr2(&mp->m_sb);
- mp->m_update_flags |= XFS_SB_FEATURES2;
+ mp->m_update_sb = true;
/* update sb_versionnum for the clearing of the morebits */
if (!sbp->sb_features2)
- mp->m_update_flags |= XFS_SB_VERSIONNUM;
+ mp->m_update_sb = true;
}
/* always use v2 inodes by default now */
if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
- mp->m_update_flags |= XFS_SB_VERSIONNUM;
+ mp->m_update_sb = true;
}
/*
@@ -895,8 +875,8 @@ xfs_mountfs(
* the next remount into writeable mode. Otherwise we would never
* perform the update e.g. for the root filesystem.
*/
- if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
- error = xfs_mount_log_sb(mp, mp->m_update_flags);
+ if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ error = xfs_sync_sb(mp, false);
if (error) {
xfs_warn(mp, "failed to write sb changes");
goto out_rtunmount;
@@ -1103,9 +1083,6 @@ xfs_fs_writable(
int
xfs_log_sbcount(xfs_mount_t *mp)
{
- xfs_trans_t *tp;
- int error;
-
/* allow this to proceed during the freeze sequence... */
if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
return 0;
@@ -1119,17 +1096,7 @@ xfs_log_sbcount(xfs_mount_t *mp)
if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
return 0;
- tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
- return error;
+ return xfs_sync_sb(mp, true);
}
/*
@@ -1423,34 +1390,6 @@ xfs_freesb(
}
/*
- * Used to log changes to the superblock unit and width fields which could
- * be altered by the mount options, as well as any potential sb_features2
- * fixup. Only the first superblock is updated.
- */
-int
-xfs_mount_log_sb(
- xfs_mount_t *mp,
- __int64_t fields)
-{
- xfs_trans_t *tp;
- int error;
-
- ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
- XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
- XFS_SB_VERSIONNUM));
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
- xfs_mod_sb(tp, fields);
- error = xfs_trans_commit(tp, 0);
- return error;
-}
-
-/*
* If the underlying (data/log/rt) device is readonly, there are some
* operations that cannot proceed.
*/
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 22ccf69d4d3c..a5b2ff822653 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -162,8 +162,7 @@ typedef struct xfs_mount {
struct delayed_work m_reclaim_work; /* background inode reclaim */
struct delayed_work m_eofblocks_work; /* background eof blocks
trimming */
- __int64_t m_update_flags; /* sb flags we need to update
- on the next remount,rw */
+ bool m_update_sb; /* sb needs update in mount */
int64_t m_low_space[XFS_LOWSP_MAX];
/* low free space thresholds */
struct xfs_kobj m_kobj;
@@ -378,7 +377,7 @@ extern void xfs_unmountfs(xfs_mount_t *);
extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
uint, int);
-extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
+extern int xfs_mount_log_sb(xfs_mount_t *);
extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
extern int xfs_readsb(xfs_mount_t *, int);
extern void xfs_freesb(xfs_mount_t *);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 79fb19dd9c83..53cc2aaf8d2b 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -430,6 +430,7 @@ struct xfs_qm_isolate {
static enum lru_status
xfs_qm_dquot_isolate(
struct list_head *item,
+ struct list_lru_one *lru,
spinlock_t *lru_lock,
void *arg)
__releases(lru_lock) __acquires(lru_lock)
@@ -450,7 +451,7 @@ xfs_qm_dquot_isolate(
XFS_STATS_INC(xs_qm_dqwants);
trace_xfs_dqreclaim_want(dqp);
- list_del_init(&dqp->q_lru);
+ list_lru_isolate(lru, &dqp->q_lru);
XFS_STATS_DEC(xs_qm_dquot_unused);
return LRU_REMOVED;
}
@@ -494,7 +495,7 @@ xfs_qm_dquot_isolate(
xfs_dqunlock(dqp);
ASSERT(dqp->q_nrefs == 0);
- list_move_tail(&dqp->q_lru, &isol->dispose);
+ list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
XFS_STATS_DEC(xs_qm_dquot_unused);
trace_xfs_dqreclaim_done(dqp);
XFS_STATS_INC(xs_qm_dqreclaims);
@@ -523,7 +524,6 @@ xfs_qm_shrink_scan(
struct xfs_qm_isolate isol;
unsigned long freed;
int error;
- unsigned long nr_to_scan = sc->nr_to_scan;
if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
return 0;
@@ -531,8 +531,8 @@ xfs_qm_shrink_scan(
INIT_LIST_HEAD(&isol.buffers);
INIT_LIST_HEAD(&isol.dispose);
- freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol,
- &nr_to_scan);
+ freed = list_lru_shrink_walk(&qi->qi_lru, sc,
+ xfs_qm_dquot_isolate, &isol);
error = xfs_buf_delwri_submit(&isol.buffers);
if (error)
@@ -557,7 +557,7 @@ xfs_qm_shrink_count(
struct xfs_quotainfo *qi = container_of(shrink,
struct xfs_quotainfo, qi_shrinker);
- return list_lru_count_node(&qi->qi_lru, sc->nid);
+ return list_lru_shrink_count(&qi->qi_lru, sc);
}
/*
@@ -714,7 +714,6 @@ STATIC int
xfs_qm_qino_alloc(
xfs_mount_t *mp,
xfs_inode_t **ip,
- __int64_t sbfields,
uint flags)
{
xfs_trans_t *tp;
@@ -777,11 +776,6 @@ xfs_qm_qino_alloc(
spin_lock(&mp->m_sb_lock);
if (flags & XFS_QMOPT_SBVERSION) {
ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
- ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
- (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
- XFS_SB_QFLAGS));
xfs_sb_version_addquota(&mp->m_sb);
mp->m_sb.sb_uquotino = NULLFSINO;
@@ -798,7 +792,7 @@ xfs_qm_qino_alloc(
else
mp->m_sb.sb_pquotino = (*ip)->i_ino;
spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, sbfields);
+ xfs_log_sb(tp);
if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
xfs_alert(mp, "%s failed (error %d)!", __func__, error);
@@ -1451,7 +1445,7 @@ xfs_qm_mount_quotas(
spin_unlock(&mp->m_sb_lock);
if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
- if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
+ if (xfs_sync_sb(mp, false)) {
/*
* We could only have been turning quotas off.
* We aren't in very good shape actually because
@@ -1482,7 +1476,6 @@ xfs_qm_init_quotainos(
struct xfs_inode *gip = NULL;
struct xfs_inode *pip = NULL;
int error;
- __int64_t sbflags = 0;
uint flags = 0;
ASSERT(mp->m_quotainfo);
@@ -1517,9 +1510,6 @@ xfs_qm_init_quotainos(
}
} else {
flags |= XFS_QMOPT_SBVERSION;
- sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
- XFS_SB_QFLAGS);
}
/*
@@ -1530,7 +1520,6 @@ xfs_qm_init_quotainos(
*/
if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
error = xfs_qm_qino_alloc(mp, &uip,
- sbflags | XFS_SB_UQUOTINO,
flags | XFS_QMOPT_UQUOTA);
if (error)
goto error_rele;
@@ -1539,7 +1528,6 @@ xfs_qm_init_quotainos(
}
if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
error = xfs_qm_qino_alloc(mp, &gip,
- sbflags | XFS_SB_GQUOTINO,
flags | XFS_QMOPT_GQUOTA);
if (error)
goto error_rele;
@@ -1548,7 +1536,6 @@ xfs_qm_init_quotainos(
}
if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
error = xfs_qm_qino_alloc(mp, &pip,
- sbflags | XFS_SB_PQUOTINO,
flags | XFS_QMOPT_PQUOTA);
if (error)
goto error_rele;
@@ -1587,32 +1574,6 @@ xfs_qm_dqfree_one(
xfs_qm_dqdestroy(dqp);
}
-/*
- * Start a transaction and write the incore superblock changes to
- * disk. flags parameter indicates which fields have changed.
- */
-int
-xfs_qm_write_sb_changes(
- xfs_mount_t *mp,
- __int64_t flags)
-{
- xfs_trans_t *tp;
- int error;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_mod_sb(tp, flags);
- error = xfs_trans_commit(tp, 0);
-
- return error;
-}
-
-
/* --------------- utility functions for vnodeops ---------------- */
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 3a07a937e232..0d4d3590cf85 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -157,7 +157,6 @@ struct xfs_dquot_acct {
#define XFS_QM_RTBWARNLIMIT 5
extern void xfs_qm_destroy_quotainfo(struct xfs_mount *);
-extern int xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t);
/* dquot stuff */
extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
@@ -166,9 +165,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
/* quota ops */
extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
- uint, struct fs_disk_quota *);
+ uint, struct qc_dqblk *);
extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
- struct fs_disk_quota *);
+ struct qc_dqblk *);
extern int xfs_qm_scall_getqstat(struct xfs_mount *,
struct fs_quota_stat *);
extern int xfs_qm_scall_getqstatv(struct xfs_mount *,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 74fca68e43b6..9b965db45800 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
uint);
STATIC uint xfs_qm_export_flags(uint);
-STATIC uint xfs_qm_export_qtype_flags(uint);
/*
* Turn off quota accounting and/or enforcement for all udquots and/or
@@ -92,8 +91,7 @@ xfs_qm_scall_quotaoff(
mutex_unlock(&q->qi_quotaofflock);
/* XXX what to do if error ? Revert back to old vals incore ? */
- error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
- return error;
+ return xfs_sync_sb(mp, false);
}
dqtype = 0;
@@ -314,7 +312,6 @@ xfs_qm_scall_quotaon(
{
int error;
uint qf;
- __int64_t sbflags;
flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
/*
@@ -322,30 +319,22 @@ xfs_qm_scall_quotaon(
*/
flags &= ~(XFS_ALL_QUOTA_ACCT);
- sbflags = 0;
-
if (flags == 0) {
xfs_debug(mp, "%s: zero flags, m_qflags=%x",
__func__, mp->m_qflags);
return -EINVAL;
}
- /* No fs can turn on quotas with a delayed effect */
- ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
-
/*
* Can't enforce without accounting. We check the superblock
* qflags here instead of m_qflags because rootfs can have
* quota acct on ondisk without m_qflags' knowing.
*/
- if (((flags & XFS_UQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
+ if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
(flags & XFS_UQUOTA_ENFD)) ||
- ((flags & XFS_GQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
+ ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
(flags & XFS_GQUOTA_ENFD)) ||
- ((flags & XFS_PQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
+ ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
(flags & XFS_PQUOTA_ENFD))) {
xfs_debug(mp,
"%s: Can't enforce without acct, flags=%x sbflags=%x",
@@ -370,11 +359,11 @@ xfs_qm_scall_quotaon(
/*
* There's nothing to change if it's the same.
*/
- if ((qf & flags) == flags && sbflags == 0)
+ if ((qf & flags) == flags)
return -EEXIST;
- sbflags |= XFS_SB_QFLAGS;
- if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
+ error = xfs_sync_sb(mp, false);
+ if (error)
return error;
/*
* If we aren't trying to switch on quota enforcement, we are done.
@@ -384,8 +373,7 @@ xfs_qm_scall_quotaon(
((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
(mp->m_qflags & XFS_PQUOTA_ACCT)) ||
((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
- (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
- (flags & XFS_ALL_QUOTA_ENFD) == 0)
+ (mp->m_qflags & XFS_GQUOTA_ACCT)))
return 0;
if (! XFS_IS_QUOTA_RUNNING(mp))
@@ -422,20 +410,12 @@ xfs_qm_scall_getqstat(
memset(out, 0, sizeof(fs_quota_stat_t));
out->qs_version = FS_QSTAT_VERSION;
- if (!xfs_sb_version_hasquota(&mp->m_sb)) {
- out->qs_uquota.qfs_ino = NULLFSINO;
- out->qs_gquota.qfs_ino = NULLFSINO;
- return 0;
- }
-
out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
(XFS_ALL_QUOTA_ACCT|
XFS_ALL_QUOTA_ENFD));
- if (q) {
- uip = q->qi_uquotaip;
- gip = q->qi_gquotaip;
- pip = q->qi_pquotaip;
- }
+ uip = q->qi_uquotaip;
+ gip = q->qi_gquotaip;
+ pip = q->qi_pquotaip;
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
0, 0, &uip) == 0)
@@ -481,14 +461,13 @@ xfs_qm_scall_getqstat(
if (temppqip)
IRELE(pip);
}
- if (q) {
- out->qs_incoredqs = q->qi_dquots;
- out->qs_btimelimit = q->qi_btimelimit;
- out->qs_itimelimit = q->qi_itimelimit;
- out->qs_rtbtimelimit = q->qi_rtbtimelimit;
- out->qs_bwarnlimit = q->qi_bwarnlimit;
- out->qs_iwarnlimit = q->qi_iwarnlimit;
- }
+ out->qs_incoredqs = q->qi_dquots;
+ out->qs_btimelimit = q->qi_btimelimit;
+ out->qs_itimelimit = q->qi_itimelimit;
+ out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+ out->qs_bwarnlimit = q->qi_bwarnlimit;
+ out->qs_iwarnlimit = q->qi_iwarnlimit;
+
return 0;
}
@@ -509,13 +488,6 @@ xfs_qm_scall_getqstatv(
bool tempgqip = false;
bool temppqip = false;
- if (!xfs_sb_version_hasquota(&mp->m_sb)) {
- out->qs_uquota.qfs_ino = NULLFSINO;
- out->qs_gquota.qfs_ino = NULLFSINO;
- out->qs_pquota.qfs_ino = NULLFSINO;
- return 0;
- }
-
out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
(XFS_ALL_QUOTA_ACCT|
XFS_ALL_QUOTA_ENFD));
@@ -523,11 +495,9 @@ xfs_qm_scall_getqstatv(
out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;
- if (q) {
- uip = q->qi_uquotaip;
- gip = q->qi_gquotaip;
- pip = q->qi_pquotaip;
- }
+ uip = q->qi_uquotaip;
+ gip = q->qi_gquotaip;
+ pip = q->qi_pquotaip;
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
0, 0, &uip) == 0)
@@ -562,19 +532,18 @@ xfs_qm_scall_getqstatv(
if (temppqip)
IRELE(pip);
}
- if (q) {
- out->qs_incoredqs = q->qi_dquots;
- out->qs_btimelimit = q->qi_btimelimit;
- out->qs_itimelimit = q->qi_itimelimit;
- out->qs_rtbtimelimit = q->qi_rtbtimelimit;
- out->qs_bwarnlimit = q->qi_bwarnlimit;
- out->qs_iwarnlimit = q->qi_iwarnlimit;
- }
+ out->qs_incoredqs = q->qi_dquots;
+ out->qs_btimelimit = q->qi_btimelimit;
+ out->qs_itimelimit = q->qi_itimelimit;
+ out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+ out->qs_bwarnlimit = q->qi_bwarnlimit;
+ out->qs_iwarnlimit = q->qi_iwarnlimit;
+
return 0;
}
-#define XFS_DQ_MASK \
- (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+#define XFS_QC_MASK \
+ (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
/*
* Adjust quota limits, and start/stop timers accordingly.
@@ -584,7 +553,7 @@ xfs_qm_scall_setqlim(
struct xfs_mount *mp,
xfs_dqid_t id,
uint type,
- fs_disk_quota_t *newlim)
+ struct qc_dqblk *newlim)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_disk_dquot *ddq;
@@ -593,9 +562,9 @@ xfs_qm_scall_setqlim(
int error;
xfs_qcnt_t hard, soft;
- if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+ if (newlim->d_fieldmask & ~XFS_QC_MASK)
return -EINVAL;
- if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+ if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
return 0;
/*
@@ -633,11 +602,11 @@ xfs_qm_scall_setqlim(
/*
* Make sure that hardlimits are >= soft limits before changing.
*/
- hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
+ hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
be64_to_cpu(ddq->d_blk_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
+ soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
be64_to_cpu(ddq->d_blk_softlimit);
if (hard == 0 || hard >= soft) {
ddq->d_blk_hardlimit = cpu_to_be64(hard);
@@ -650,11 +619,11 @@ xfs_qm_scall_setqlim(
} else {
xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
}
- hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
+ hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
be64_to_cpu(ddq->d_rtb_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
+ soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
be64_to_cpu(ddq->d_rtb_softlimit);
if (hard == 0 || hard >= soft) {
ddq->d_rtb_hardlimit = cpu_to_be64(hard);
@@ -667,10 +636,10 @@ xfs_qm_scall_setqlim(
xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
}
- hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
+ hard = (newlim->d_fieldmask & QC_INO_HARD) ?
(xfs_qcnt_t) newlim->d_ino_hardlimit :
be64_to_cpu(ddq->d_ino_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
+ soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
(xfs_qcnt_t) newlim->d_ino_softlimit :
be64_to_cpu(ddq->d_ino_softlimit);
if (hard == 0 || hard >= soft) {
@@ -687,12 +656,12 @@ xfs_qm_scall_setqlim(
/*
* Update warnings counter(s) if requested
*/
- if (newlim->d_fieldmask & FS_DQ_BWARNS)
- ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
- if (newlim->d_fieldmask & FS_DQ_IWARNS)
- ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
- if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
+ if (newlim->d_fieldmask & QC_SPC_WARNS)
+ ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
+ if (newlim->d_fieldmask & QC_INO_WARNS)
+ ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
+ if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+ ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
if (id == 0) {
/*
@@ -702,24 +671,24 @@ xfs_qm_scall_setqlim(
* soft and hard limit values (already done, above), and
* for warnings.
*/
- if (newlim->d_fieldmask & FS_DQ_BTIMER) {
- q->qi_btimelimit = newlim->d_btimer;
- ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
+ if (newlim->d_fieldmask & QC_SPC_TIMER) {
+ q->qi_btimelimit = newlim->d_spc_timer;
+ ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
}
- if (newlim->d_fieldmask & FS_DQ_ITIMER) {
- q->qi_itimelimit = newlim->d_itimer;
- ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
+ if (newlim->d_fieldmask & QC_INO_TIMER) {
+ q->qi_itimelimit = newlim->d_ino_timer;
+ ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
}
- if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
- q->qi_rtbtimelimit = newlim->d_rtbtimer;
- ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
+ if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
+ q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
+ ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
}
- if (newlim->d_fieldmask & FS_DQ_BWARNS)
- q->qi_bwarnlimit = newlim->d_bwarns;
- if (newlim->d_fieldmask & FS_DQ_IWARNS)
- q->qi_iwarnlimit = newlim->d_iwarns;
- if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- q->qi_rtbwarnlimit = newlim->d_rtbwarns;
+ if (newlim->d_fieldmask & QC_SPC_WARNS)
+ q->qi_bwarnlimit = newlim->d_spc_warns;
+ if (newlim->d_fieldmask & QC_INO_WARNS)
+ q->qi_iwarnlimit = newlim->d_ino_warns;
+ if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+ q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
} else {
/*
* If the user is now over quota, start the timelimit.
@@ -801,7 +770,7 @@ xfs_qm_log_quotaoff(
mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, XFS_SB_QFLAGS);
+ xfs_log_sb(tp);
/*
* We have to make sure that the transaction is secure on disk before we
@@ -824,7 +793,7 @@ xfs_qm_scall_getquota(
struct xfs_mount *mp,
xfs_dqid_t id,
uint type,
- struct fs_disk_quota *dst)
+ struct qc_dqblk *dst)
{
struct xfs_dquot *dqp;
int error;
@@ -848,28 +817,25 @@ xfs_qm_scall_getquota(
}
memset(dst, 0, sizeof(*dst));
- dst->d_version = FS_DQUOT_VERSION;
- dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
- dst->d_id = be32_to_cpu(dqp->q_core.d_id);
- dst->d_blk_hardlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
- dst->d_blk_softlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+ dst->d_spc_hardlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
+ dst->d_spc_softlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
- dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
- dst->d_icount = dqp->q_res_icount;
- dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
- dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
- dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
- dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
- dst->d_rtb_hardlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
- dst->d_rtb_softlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
- dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
- dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
- dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
+ dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
+ dst->d_ino_count = dqp->q_res_icount;
+ dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
+ dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
+ dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
+ dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
+ dst->d_rt_spc_hardlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
+ dst->d_rt_spc_softlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+ dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
+ dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+ dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
/*
* Internally, we don't reset all the timers when quota enforcement
@@ -882,23 +848,23 @@ xfs_qm_scall_getquota(
dqp->q_core.d_flags == XFS_DQ_GROUP) ||
(!XFS_IS_PQUOTA_ENFORCED(mp) &&
dqp->q_core.d_flags == XFS_DQ_PROJ)) {
- dst->d_btimer = 0;
- dst->d_itimer = 0;
- dst->d_rtbtimer = 0;
+ dst->d_spc_timer = 0;
+ dst->d_ino_timer = 0;
+ dst->d_rt_spc_timer = 0;
}
#ifdef DEBUG
- if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
- (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
- (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
- dst->d_id != 0) {
- if ((dst->d_bcount > dst->d_blk_softlimit) &&
- (dst->d_blk_softlimit > 0)) {
- ASSERT(dst->d_btimer != 0);
+ if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
+ (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
+ (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
+ id != 0) {
+ if ((dst->d_space > dst->d_spc_softlimit) &&
+ (dst->d_spc_softlimit > 0)) {
+ ASSERT(dst->d_spc_timer != 0);
}
- if ((dst->d_icount > dst->d_ino_softlimit) &&
+ if ((dst->d_ino_count > dst->d_ino_softlimit) &&
(dst->d_ino_softlimit > 0)) {
- ASSERT(dst->d_itimer != 0);
+ ASSERT(dst->d_ino_timer != 0);
}
}
#endif
@@ -908,26 +874,6 @@ out_put:
}
STATIC uint
-xfs_qm_export_qtype_flags(
- uint flags)
-{
- /*
- * Can't be more than one, or none.
- */
- ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
- (FS_PROJ_QUOTA | FS_USER_QUOTA));
- ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
- (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
- ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
- (FS_USER_QUOTA | FS_GROUP_QUOTA));
- ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
-
- return (flags & XFS_DQ_USER) ?
- FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
- FS_PROJ_QUOTA : FS_GROUP_QUOTA;
-}
-
-STATIC uint
xfs_qm_export_flags(
uint flags)
{
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 7542bbeca6a1..6923905ab33d 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -64,19 +64,10 @@ xfs_fs_get_xstatev(
return xfs_qm_scall_getqstatv(mp, fqs);
}
-STATIC int
-xfs_fs_set_xstate(
- struct super_block *sb,
- unsigned int uflags,
- int op)
+static unsigned int
+xfs_quota_flags(unsigned int uflags)
{
- struct xfs_mount *mp = XFS_M(sb);
- unsigned int flags = 0;
-
- if (sb->s_flags & MS_RDONLY)
- return -EROFS;
- if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
- return -ENOSYS;
+ unsigned int flags = 0;
if (uflags & FS_QUOTA_UDQ_ACCT)
flags |= XFS_UQUOTA_ACCT;
@@ -91,16 +82,39 @@ xfs_fs_set_xstate(
if (uflags & FS_QUOTA_PDQ_ENFD)
flags |= XFS_PQUOTA_ENFD;
- switch (op) {
- case Q_XQUOTAON:
- return xfs_qm_scall_quotaon(mp, flags);
- case Q_XQUOTAOFF:
- if (!XFS_IS_QUOTA_ON(mp))
- return -EINVAL;
- return xfs_qm_scall_quotaoff(mp, flags);
- }
+ return flags;
+}
+
+STATIC int
+xfs_quota_enable(
+ struct super_block *sb,
+ unsigned int uflags)
+{
+ struct xfs_mount *mp = XFS_M(sb);
+
+ if (sb->s_flags & MS_RDONLY)
+ return -EROFS;
+ if (!XFS_IS_QUOTA_RUNNING(mp))
+ return -ENOSYS;
+
+ return xfs_qm_scall_quotaon(mp, xfs_quota_flags(uflags));
+}
+
+STATIC int
+xfs_quota_disable(
+ struct super_block *sb,
+ unsigned int uflags)
+{
+ struct xfs_mount *mp = XFS_M(sb);
+
+ if (sb->s_flags & MS_RDONLY)
+ return -EROFS;
+ if (!XFS_IS_QUOTA_RUNNING(mp))
+ return -ENOSYS;
+ if (!XFS_IS_QUOTA_ON(mp))
+ return -EINVAL;
- return -EINVAL;
+ return xfs_qm_scall_quotaoff(mp, xfs_quota_flags(uflags));
}
STATIC int
@@ -131,7 +145,7 @@ STATIC int
xfs_fs_get_dqblk(
struct super_block *sb,
struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *qdq)
{
struct xfs_mount *mp = XFS_M(sb);
@@ -141,14 +155,14 @@ xfs_fs_get_dqblk(
return -ESRCH;
return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
- xfs_quota_type(qid.type), fdq);
+ xfs_quota_type(qid.type), qdq);
}
STATIC int
xfs_fs_set_dqblk(
struct super_block *sb,
struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *qdq)
{
struct xfs_mount *mp = XFS_M(sb);
@@ -160,13 +174,14 @@ xfs_fs_set_dqblk(
return -ESRCH;
return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
- xfs_quota_type(qid.type), fdq);
+ xfs_quota_type(qid.type), qdq);
}
const struct quotactl_ops xfs_quotactl_operations = {
.get_xstatev = xfs_fs_get_xstatev,
.get_xstate = xfs_fs_get_xstate,
- .set_xstate = xfs_fs_set_xstate,
+ .quota_enable = xfs_quota_enable,
+ .quota_disable = xfs_quota_disable,
.rm_xquota = xfs_fs_rm_xquota,
.get_dqblk = xfs_fs_get_dqblk,
.set_dqblk = xfs_fs_set_dqblk,
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 19cbda196369..8fcc4ccc5c79 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -685,7 +685,7 @@ xfs_blkdev_get(
mp);
if (IS_ERR(*bdevp)) {
error = PTR_ERR(*bdevp);
- xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
+ xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
}
return error;
@@ -1111,6 +1111,11 @@ xfs_fs_statfs(
statp->f_files,
mp->m_maxicount);
+ /* If sb_icount overshot maxicount, report actual allocation */
+ statp->f_files = max_t(typeof(statp->f_files),
+ statp->f_files,
+ sbp->sb_icount);
+
/* make sure statp->f_ffree does not underflow */
ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
statp->f_ffree = max_t(__int64_t, ffree, 0);
@@ -1257,13 +1262,13 @@ xfs_fs_remount(
* If this is the first remount to writeable state we
* might have some superblock changes to update.
*/
- if (mp->m_update_flags) {
- error = xfs_mount_log_sb(mp, mp->m_update_flags);
+ if (mp->m_update_sb) {
+ error = xfs_sync_sb(mp, false);
if (error) {
xfs_warn(mp, "failed to write sb changes");
return error;
}
- mp->m_update_flags = 0;
+ mp->m_update_sb = false;
}
/*
@@ -1293,8 +1298,9 @@ xfs_fs_remount(
/*
* Second stage of a freeze. The data is already frozen so we only
- * need to take care of the metadata. Once that's done write a dummy
- * record to dirty the log in case of a crash while frozen.
+ * need to take care of the metadata. Once that's done sync the superblock
+ * to the log to dirty it in case of a crash while frozen. This ensures that we
+ * will recover the unlinked inode lists on the next mount.
*/
STATIC int
xfs_fs_freeze(
@@ -1304,7 +1310,7 @@ xfs_fs_freeze(
xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
- return xfs_fs_log_dummy(mp);
+ return xfs_sync_sb(mp, true);
}
STATIC int
@@ -1531,7 +1537,7 @@ xfs_fs_mount(
static long
xfs_fs_nr_cached_objects(
struct super_block *sb,
- int nid)
+ struct shrink_control *sc)
{
return xfs_reclaim_inodes_count(XFS_M(sb));
}
@@ -1539,10 +1545,9 @@ xfs_fs_nr_cached_objects(
static long
xfs_fs_free_cached_objects(
struct super_block *sb,
- long nr_to_scan,
- int nid)
+ struct shrink_control *sc)
{
- return xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
+ return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
}
static const struct super_operations xfs_super_operations = {
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index 1743b9f8e23d..a0c8067cea6f 100644
--- a/fs/xfs/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
@@ -149,24 +149,6 @@ static struct ctl_table xfs_table[] = {
.extra2 = &xfs_params.inherit_noatim.max
},
{
- .procname = "xfsbufd_centisecs",
- .data = &xfs_params.xfs_buf_timer.val,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &xfs_params.xfs_buf_timer.min,
- .extra2 = &xfs_params.xfs_buf_timer.max
- },
- {
- .procname = "age_buffer_centisecs",
- .data = &xfs_params.xfs_buf_age.val,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &xfs_params.xfs_buf_age.min,
- .extra2 = &xfs_params.xfs_buf_age.max
- },
- {
.procname = "inherit_nosymlinks",
.data = &xfs_params.inherit_nosym.val,
.maxlen = sizeof(int),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index fa3135b9bf04..eb90cd59a0ec 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -472,6 +472,7 @@ xfs_trans_apply_sb_deltas(
whole = 1;
}
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
if (whole)
/*
* Log the whole thing, the fields are noncontiguous.
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 0a4d4ab6d9a9..75798412859a 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -327,9 +327,10 @@ xfs_trans_read_buf_map(
return -EIO;
}
- if (tp)
+ if (tp) {
_xfs_trans_bjoin(tp, bp, 1);
- trace_xfs_trans_read_buf(bp->b_fspriv);
+ trace_xfs_trans_read_buf(bp->b_fspriv);
+ }
*bpp = bp;
return 0;
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index d5ec6c87810f..6b040f4ddfab 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 5a0a3e5daf85..03aacfb3e98b 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 8b06e4c1dd5d..11c3a011dcbf 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 7461327e14e4..273de709495c 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 1baae6edda89..a8f344363e77 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -240,7 +240,7 @@
/*
* If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header,
* define it now. This is the case where there the compiler does not support
- * a __FUNCTION__ macro or equivalent.
+ * a __func__ macro or equivalent.
*/
#ifndef ACPI_GET_FUNCTION_NAME
#define ACPI_GET_FUNCTION_NAME _acpi_function_name
@@ -249,12 +249,12 @@
* The Name parameter should be the procedure name as a quoted string.
* The function name is also used by the function exit macros below.
* Note: (const char) is used to be compatible with the debug interfaces
- * and macros such as __FUNCTION__.
+ * and macros such as __func__.
*/
#define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name;
#else
-/* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */
+/* Compiler supports __func__ (or equivalent) -- Ignore this macro */
#define ACPI_FUNCTION_NAME(name)
#endif /* ACPI_GET_FUNCTION_NAME */
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index a08e55a263c9..b0bb30ebb807 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 03b3e6d405ff..0bc78df66d4b 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 5ba78464c1b1..d56f5d722138 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20141107
+#define ACPI_CA_VERSION 0x20150204
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -569,6 +569,14 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
address,
void *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_gpe_raw_handler(acpi_handle
+ gpe_device,
+ u32 gpe_number,
+ u32 type,
+ acpi_gpe_handler
+ address,
+ void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_remove_gpe_handler(acpi_handle gpe_device,
u32 gpe_number,
acpi_gpe_handler
@@ -891,12 +899,6 @@ ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_id(acpi_handle object,
- acpi_owner_id * out_type))
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_table_id(acpi_owner_id id))
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table_with_size(acpi_string signature,
u32 instance,
struct acpi_table_header
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index eb760ca0b2e0..ebe242638591 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -305,43 +305,51 @@ struct acpi_resource_source {
u8 max_address_fixed; \
union acpi_resource_attribute info;
-struct acpi_resource_address {
-ACPI_RESOURCE_ADDRESS_COMMON};
-
-struct acpi_resource_address16 {
- ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
+struct acpi_address16_attribute {
+ u16 granularity;
u16 minimum;
u16 maximum;
u16 translation_offset;
u16 address_length;
- struct acpi_resource_source resource_source;
};
-struct acpi_resource_address32 {
- ACPI_RESOURCE_ADDRESS_COMMON u32 granularity;
+struct acpi_address32_attribute {
+ u32 granularity;
u32 minimum;
u32 maximum;
u32 translation_offset;
u32 address_length;
- struct acpi_resource_source resource_source;
};
-struct acpi_resource_address64 {
- ACPI_RESOURCE_ADDRESS_COMMON u64 granularity;
+struct acpi_address64_attribute {
+ u64 granularity;
u64 minimum;
u64 maximum;
u64 translation_offset;
u64 address_length;
+};
+
+struct acpi_resource_address {
+ACPI_RESOURCE_ADDRESS_COMMON};
+
+struct acpi_resource_address16 {
+ ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address16_attribute address;
+ struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address32 {
+ ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address32_attribute address;
+ struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address64 {
+ ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address64_attribute address;
struct acpi_resource_source resource_source;
};
struct acpi_resource_extended_address64 {
ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID;
- u64 granularity;
- u64 minimum;
- u64 maximum;
- u64 translation_offset;
- u64 address_length;
+ struct acpi_address64_attribute address;
u64 type_specific;
};
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index bee19d8170c5..d4081fef1095 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 29e79370641d..b80b0e6dabc5 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ecff62405f17..f06d75e5fa54 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 5480cb2236bf..440ca8104b43 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index bbef17368e49..b034f1068dfe 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -744,7 +744,7 @@ typedef u32 acpi_event_status;
/*
* GPE info flags - Per GPE
* +-------+-+-+---+
- * | 7:4 |3|2|1:0|
+ * | 7:5 |4|3|2:0|
* +-------+-+-+---+
* | | | |
* | | | +-- Type of dispatch:to method, handler, notify, or none
@@ -756,13 +756,15 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01
#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02
#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03
-#define ACPI_GPE_DISPATCH_MASK (u8) 0x03
+#define ACPI_GPE_DISPATCH_RAW_HANDLER (u8) 0x04
+#define ACPI_GPE_DISPATCH_MASK (u8) 0x07
+#define ACPI_GPE_DISPATCH_TYPE(flags) ((u8) ((flags) & ACPI_GPE_DISPATCH_MASK))
-#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x04
+#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x08
#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00
-#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x04
+#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08
-#define ACPI_GPE_CAN_WAKE (u8) 0x08
+#define ACPI_GPE_CAN_WAKE (u8) 0x10
/*
* Flags for GPE and Lock interfaces
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 5f8cc1fa3278..ad74dc51d5b7 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 2b612384c994..71e5ec5b07a3 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 384875da3713..f54de0a63558 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 1ba7c190c2cc..74ba46c8157a 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 568d4b886712..acedc3f026de 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 77ff547730af..5bdab6bffd23 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -4,6 +4,7 @@
#define __ARCH_HAS_4LEVEL_HACK
#define __PAGETABLE_PUD_FOLDED
+#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE PGDIR_SIZE
#define PUD_MASK PGDIR_MASK
#define PTRS_PER_PUD 1
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 177d5973b132..4d46085c1b90 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -244,10 +244,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
# define pte_accessible(mm, pte) ((void)(pte), 1)
#endif
-#ifndef pte_present_nonuma
-#define pte_present_nonuma(pte) pte_present(pte)
-#endif
-
#ifndef flush_tlb_fix_spurious_fault
#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
#endif
@@ -474,21 +470,6 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return pte;
}
-
-static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pte_t pte_file_mksoft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline int pte_file_soft_dirty(pte_t pte)
-{
- return 0;
-}
#endif
#ifndef __HAVE_PFNMAP_TRACKING
@@ -688,155 +669,24 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
#endif
}
-#ifdef CONFIG_NUMA_BALANCING
-/*
- * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that
- * is protected for PROT_NONE and a NUMA hinting fault entry. If the
- * architecture defines __PAGE_PROTNONE then it should take that into account
- * but those that do not can rely on the fact that the NUMA hinting scanner
- * skips inaccessible VMAs.
- *
- * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
- * fault triggers on those regions if pte/pmd_numa returns true
- * (because _PAGE_PRESENT is not set).
- */
-#ifndef pte_numa
-static inline int pte_numa(pte_t pte)
-{
- return ptenuma_flags(pte) == _PAGE_NUMA;
-}
-#endif
-
-#ifndef pmd_numa
-static inline int pmd_numa(pmd_t pmd)
-{
- return pmdnuma_flags(pmd) == _PAGE_NUMA;
-}
-#endif
-
+#ifndef CONFIG_NUMA_BALANCING
/*
- * pte/pmd_mknuma sets the _PAGE_ACCESSED bitflag automatically
- * because they're called by the NUMA hinting minor page fault. If we
- * wouldn't set the _PAGE_ACCESSED bitflag here, the TLB miss handler
- * would be forced to set it later while filling the TLB after we
- * return to userland. That would trigger a second write to memory
- * that we optimize away by setting _PAGE_ACCESSED here.
+ * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
+ * the only case the kernel cares is for NUMA balancing and is only ever set
+ * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
+ * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
+ * is the responsibility of the caller to distinguish between PROT_NONE
+ * protections and NUMA hinting fault protections.
*/
-#ifndef pte_mknonnuma
-static inline pte_t pte_mknonnuma(pte_t pte)
-{
- pteval_t val = pte_val(pte);
-
- val &= ~_PAGE_NUMA;
- val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
- return __pte(val);
-}
-#endif
-
-#ifndef pmd_mknonnuma
-static inline pmd_t pmd_mknonnuma(pmd_t pmd)
-{
- pmdval_t val = pmd_val(pmd);
-
- val &= ~_PAGE_NUMA;
- val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
-
- return __pmd(val);
-}
-#endif
-
-#ifndef pte_mknuma
-static inline pte_t pte_mknuma(pte_t pte)
-{
- pteval_t val = pte_val(pte);
-
- VM_BUG_ON(!(val & _PAGE_PRESENT));
-
- val &= ~_PAGE_PRESENT;
- val |= _PAGE_NUMA;
-
- return __pte(val);
-}
-#endif
-
-#ifndef ptep_set_numa
-static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- pte_t ptent = *ptep;
-
- ptent = pte_mknuma(ptent);
- set_pte_at(mm, addr, ptep, ptent);
- return;
-}
-#endif
-
-#ifndef pmd_mknuma
-static inline pmd_t pmd_mknuma(pmd_t pmd)
-{
- pmdval_t val = pmd_val(pmd);
-
- val &= ~_PAGE_PRESENT;
- val |= _PAGE_NUMA;
-
- return __pmd(val);
-}
-#endif
-
-#ifndef pmdp_set_numa
-static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp)
-{
- pmd_t pmd = *pmdp;
-
- pmd = pmd_mknuma(pmd);
- set_pmd_at(mm, addr, pmdp, pmd);
- return;
-}
-#endif
-#else
-static inline int pmd_numa(pmd_t pmd)
+static inline int pte_protnone(pte_t pte)
{
return 0;
}
-static inline int pte_numa(pte_t pte)
+static inline int pmd_protnone(pmd_t pmd)
{
return 0;
}
-
-static inline pte_t pte_mknonnuma(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_mknonnuma(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline pte_t pte_mknuma(pte_t pte)
-{
- return pte;
-}
-
-static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- return;
-}
-
-
-static inline pmd_t pmd_mknuma(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp)
-{
- return ;
-}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_MMU */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 08848050922e..db284bff29dc 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
static inline void __tlb_reset_range(struct mmu_gather *tlb)
{
- tlb->start = TASK_SIZE;
- tlb->end = 0;
+ if (tlb->fullmm) {
+ tlb->start = tlb->end = ~0;
+ } else {
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+ }
}
/*
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bee5d683074d..ac78910d7416 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -478,6 +478,7 @@
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
*(.ctors) \
+ *(SORT(.init_array.*)) \
*(.init_array) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 6d26b40cbf5d..9916d0e4eff5 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -16,7 +16,7 @@
#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H
#define __CLKSOURCE_ARM_ARCH_TIMER_H
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/types.h>
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index cd62bf4289e9..178525e5f430 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -50,6 +50,7 @@ struct af_alg_type {
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
int (*accept)(void *private, struct sock *sk);
+ int (*setauthsize)(void *private, unsigned int authsize);
struct proto_ops *ops;
struct module *owner;
@@ -67,8 +68,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
int af_alg_accept(struct sock *sk, struct socket *newsock);
-int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
- int write);
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 7ef512f8631c..20e4226a2e14 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -33,21 +33,13 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
sg1[num - 1].page_link |= 0x01;
}
-static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
-{
- if (sg_is_last(sg))
- return NULL;
-
- return (++sg)->length ? sg : sg_chain_ptr(sg);
-}
-
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg,
int chain, int num)
{
if (chain) {
head->length += sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
if (sg)
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
new file mode 100644
index 000000000000..5a4f49005169
--- /dev/null
+++ b/include/drm/bridge/dw_hdmi.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DW_HDMI__
+#define __DW_HDMI__
+
+#include <drm/drmP.h>
+
+enum {
+ DW_HDMI_RES_8,
+ DW_HDMI_RES_10,
+ DW_HDMI_RES_12,
+ DW_HDMI_RES_MAX,
+};
+
+enum dw_hdmi_devtype {
+ IMX6Q_HDMI,
+ IMX6DL_HDMI,
+ RK3288_HDMI,
+};
+
+struct dw_hdmi_mpll_config {
+ unsigned long mpixelclock;
+ struct {
+ u16 cpce;
+ u16 gmp;
+ } res[DW_HDMI_RES_MAX];
+};
+
+struct dw_hdmi_curr_ctrl {
+ unsigned long mpixelclock;
+ u16 curr[DW_HDMI_RES_MAX];
+};
+
+struct dw_hdmi_sym_term {
+ unsigned long mpixelclock;
+ u16 sym_ctr; /*clock symbol and transmitter control*/
+ u16 term; /*transmission termination value*/
+};
+
+struct dw_hdmi_plat_data {
+ enum dw_hdmi_devtype dev_type;
+ const struct dw_hdmi_mpll_config *mpll_cfg;
+ const struct dw_hdmi_curr_ctrl *cur_ctr;
+ const struct dw_hdmi_sym_term *sym_term;
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+};
+
+void dw_hdmi_unbind(struct device *dev, struct device *master, void *data);
+int dw_hdmi_bind(struct device *dev, struct device *master,
+ void *data, struct drm_encoder *encoder,
+ struct resource *iores, int irq,
+ const struct dw_hdmi_plat_data *plat_data);
+#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/ptn3460.h b/include/drm/bridge/ptn3460.h
index ff62344fec6c..b11f8e17e72f 100644
--- a/include/drm/bridge/ptn3460.h
+++ b/include/drm/bridge/ptn3460.h
@@ -15,6 +15,7 @@
#define _DRM_BRIDGE_PTN3460_H_
struct drm_device;
+struct drm_bridge;
struct drm_encoder;
struct i2c_client;
struct device_node;
@@ -23,6 +24,9 @@ struct device_node;
int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
struct i2c_client *client, struct device_node *node);
+
+void ptn3460_destroy(struct drm_bridge *bridge);
+
#else
static inline int ptn3460_init(struct drm_device *dev,
@@ -32,6 +36,10 @@ static inline int ptn3460_init(struct drm_device *dev,
return 0;
}
+static inline void ptn3460_destroy(struct drm_bridge *bridge)
+{
+}
+
#endif
#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e1b2e8b98af7..e928625a9da0 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -143,6 +143,7 @@ void drm_err(const char *format, ...);
#define DRIVER_MODESET 0x2000
#define DRIVER_PRIME 0x4000
#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
/***********************************************************************/
/** \name Macros to make printk easier */
@@ -283,6 +284,8 @@ struct drm_file {
* in the plane list
*/
unsigned universal_planes:1;
+ /* true if client understands atomic properties */
+ unsigned atomic:1;
struct pid *pid;
kuid_t uid;
@@ -744,8 +747,6 @@ struct drm_device {
/** \name Context support */
/*@{ */
- bool irq_enabled; /**< True if irq handler is enabled */
- int irq;
__volatile__ long context_flag; /**< Context swapping flag */
int last_context; /**< Last current context */
@@ -753,6 +754,8 @@ struct drm_device {
/** \name VBLANK IRQ support */
/*@{ */
+ bool irq_enabled;
+ int irq;
/*
* At load time, disabling the vblank interrupt won't be allowed since
@@ -954,6 +957,7 @@ extern void drm_master_put(struct drm_master **master);
extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
+extern bool drm_atomic;
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ad2229574dd9..51168a8b723a 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -38,16 +38,25 @@ void drm_atomic_state_free(struct drm_atomic_state *state);
struct drm_crtc_state * __must_check
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc);
+int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, struct drm_property *property,
+ uint64_t val);
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
+int drm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val);
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
+int drm_atomic_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state, struct drm_property *property,
+ uint64_t val);
int __must_check
-drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
- struct drm_plane *plane, struct drm_crtc *crtc);
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
int __must_check
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index f956b413311e..8039d54a7441 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -30,6 +30,10 @@
#include <drm/drm_crtc.h>
+int drm_atomic_helper_check_modeset(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_check_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
@@ -78,6 +82,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags);
+void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+ int mode);
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
@@ -123,4 +129,41 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+/*
+ * drm_atomic_plane_disabling - check whether a plane is being disabled
+ * @plane: plane object
+ * @old_state: previous atomic state
+ *
+ * Checks the atomic state of a plane to determine whether it's being disabled
+ * or not. This also WARNs if it detects an invalid state (both CRTC and FB
+ * need to either both be NULL or both be non-NULL).
+ *
+ * RETURNS:
+ * True if the plane is being disabled, false otherwise.
+ */
+static inline bool
+drm_atomic_plane_disabling(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ /*
+ * When disabling a plane, CRTC and FB should always be NULL together.
+ * Anything else should be considered a bug in the atomic core, so we
+ * gently warn about it.
+ */
+ WARN_ON((plane->state->crtc == NULL && plane->state->fb != NULL) ||
+ (plane->state->crtc != NULL && plane->state->fb == NULL));
+
+ /*
+ * When using the transitional helpers, old_state may be NULL. If so,
+ * we know nothing about the current state and have to assume that it
+ * might be enabled.
+ *
+ * When using the atomic helpers, old_state won't be NULL. Therefore
+ * this check assumes that either the driver will have reconstructed
+ * the correct state in ->reset() or that the driver will have taken
+ * appropriate measures to disable all planes.
+ */
+ return (!old_state || old_state->crtc) && !plane->state->crtc;
+}
+
#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b86329813ad3..920e21a8f3fd 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -31,6 +31,7 @@
#include <linux/idr.h>
#include <linux/fb.h>
#include <linux/hdmi.h>
+#include <linux/media-bus-format.h>
#include <uapi/drm/drm_mode.h>
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
@@ -63,8 +64,16 @@ struct drm_mode_object {
#define DRM_OBJECT_MAX_PROPERTY 24
struct drm_object_properties {
- int count;
- uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+ int count, atomic_count;
+ /* NOTE: if we ever start dynamically destroying properties (ie.
+ * not at drm_mode_config_cleanup() time), then we'd have to do
+ * a better job of detaching property from mode objects to avoid
+ * dangling property pointers:
+ */
+ struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
+ /* do not read/write values directly, but use drm_object_property_get_value()
+ * and drm_object_property_set_value():
+ */
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
@@ -131,6 +140,9 @@ struct drm_display_info {
enum subpixel_order subpixel_order;
u32 color_formats;
+ const u32 *bus_formats;
+ unsigned int num_bus_formats;
+
/* Mask of supported hdmi deep color modes */
u8 edid_hdmi_dc_modes;
@@ -237,8 +249,11 @@ struct drm_atomic_state;
/**
* struct drm_crtc_state - mutable CRTC state
+ * @crtc: backpointer to the CRTC
* @enable: whether the CRTC should be enabled, gates all other state
+ * @active: whether the CRTC is actively displaying (used for DPMS)
* @mode_changed: for use by helpers and drivers when computing state updates
+ * @active_changed: for use by helpers and drivers when computing state updates
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
@@ -248,13 +263,23 @@ struct drm_atomic_state;
* @event: optional pointer to a DRM event to signal upon completion of the
* state update
* @state: backpointer to global drm_atomic_state
+ *
+ * Note that the distinction between @enable and @active is rather subtile:
+ * Flipping @active while @enable is set without changing anything else may
+ * never return in a failure from the ->atomic_check callback. Userspace assumes
+ * that a DPMS On will always succeed. In other words: @enable controls resource
+ * assignment, @active controls the actual hardware state.
*/
struct drm_crtc_state {
+ struct drm_crtc *crtc;
+
bool enable;
+ bool active;
/* computed state bits used by helpers and drivers */
bool planes_changed : 1;
bool mode_changed : 1;
+ bool active_changed : 1;
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
@@ -292,6 +317,9 @@ struct drm_crtc_state {
* @atomic_duplicate_state: duplicate the atomic state for this CRTC
* @atomic_destroy_state: destroy an atomic state for this CRTC
* @atomic_set_property: set a property on an atomic state for this CRTC
+ * (do not call directly, use drm_atomic_crtc_set_property())
+ * @atomic_get_property: get a property on an atomic state for this CRTC
+ * (do not call directly, use drm_atomic_crtc_get_property())
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -351,6 +379,10 @@ struct drm_crtc_funcs {
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
/**
@@ -449,11 +481,14 @@ struct drm_crtc {
/**
* struct drm_connector_state - mutable connector state
+ * @connector: backpointer to the connector
* @crtc: CRTC to connect connector to, NULL if disabled
* @best_encoder: can be used by helpers and drivers to select the encoder
* @state: backpointer to global drm_atomic_state
*/
struct drm_connector_state {
+ struct drm_connector *connector;
+
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
struct drm_encoder *best_encoder;
@@ -463,7 +498,7 @@ struct drm_connector_state {
/**
* struct drm_connector_funcs - control connectors on a given device
- * @dpms: set power state (see drm_crtc_funcs above)
+ * @dpms: set power state
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidated (e.g. resume)
@@ -475,6 +510,9 @@ struct drm_connector_state {
* @atomic_duplicate_state: duplicate the atomic state for this connector
* @atomic_destroy_state: destroy an atomic state for this connector
* @atomic_set_property: set a property on an atomic state for this connector
+ * (do not call directly, use drm_atomic_connector_set_property())
+ * @atomic_get_property: get a property on an atomic state for this connector
+ * (do not call directly, use drm_atomic_connector_get_property())
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
@@ -508,6 +546,10 @@ struct drm_connector_funcs {
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
/**
@@ -693,6 +735,7 @@ struct drm_connector {
/**
* struct drm_plane_state - mutable plane state
+ * @plane: backpointer to the plane
* @crtc: currently bound CRTC, NULL if disabled
* @fb: currently bound framebuffer
* @fence: optional fence to wait for before scanning out @fb
@@ -709,6 +752,8 @@ struct drm_connector {
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
+ struct drm_plane *plane;
+
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
struct fence *fence;
@@ -721,6 +766,9 @@ struct drm_plane_state {
uint32_t src_x, src_y;
uint32_t src_h, src_w;
+ /* Plane rotation */
+ unsigned int rotation;
+
struct drm_atomic_state *state;
};
@@ -735,6 +783,9 @@ struct drm_plane_state {
* @atomic_duplicate_state: duplicate the atomic state for this plane
* @atomic_destroy_state: destroy an atomic state for this plane
* @atomic_set_property: set a property on an atomic state for this plane
+ * (do not call directly, use drm_atomic_plane_set_property())
+ * @atomic_get_property: get a property on an atomic state for this plane
+ * (do not call directly, use drm_atomic_plane_get_property())
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
@@ -758,6 +809,10 @@ struct drm_plane_funcs {
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
enum drm_plane_type {
@@ -813,15 +868,16 @@ struct drm_plane {
/**
* struct drm_bridge_funcs - drm_bridge control functions
+ * @attach: Called during drm_bridge_attach
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
* @disable: Called right before encoder prepare, disables the bridge
* @post_disable: Called right after encoder prepare, for lockstepped disable
* @mode_set: Set this mode to the bridge
* @pre_enable: Called right before encoder commit, for lockstepped commit
* @enable: Called right after encoder commit, enables the bridge
- * @destroy: make object go away
*/
struct drm_bridge_funcs {
+ int (*attach)(struct drm_bridge *bridge);
bool (*mode_fixup)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
@@ -832,22 +888,24 @@ struct drm_bridge_funcs {
struct drm_display_mode *adjusted_mode);
void (*pre_enable)(struct drm_bridge *bridge);
void (*enable)(struct drm_bridge *bridge);
- void (*destroy)(struct drm_bridge *bridge);
};
/**
* struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
- * @head: list management
+ * @of_node: device node pointer to the bridge
+ * @list: to keep track of all added bridges
* @base: base mode object
* @funcs: control functions
* @driver_private: pointer to the bridge driver's internal context
*/
struct drm_bridge {
struct drm_device *dev;
- struct list_head head;
-
- struct drm_mode_object base;
+ struct drm_encoder *encoder;
+#ifdef CONFIG_OF
+ struct device_node *of_node;
+#endif
+ struct list_head list;
const struct drm_bridge_funcs *funcs;
void *driver_private;
@@ -856,7 +914,8 @@ struct drm_bridge {
/**
* struct struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
- * @flags: state flags like async update
+ * @allow_modeset: allow full modeset
+ * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
@@ -868,7 +927,8 @@ struct drm_bridge {
*/
struct drm_atomic_state {
struct drm_device *dev;
- uint32_t flags;
+ bool allow_modeset : 1;
+ bool legacy_cursor_update : 1;
struct drm_plane **planes;
struct drm_plane_state **plane_states;
struct drm_crtc **crtcs;
@@ -950,7 +1010,6 @@ struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
uint32_t num_connectors;
- uint32_t num_bridges;
/* list of object IDs for this group */
uint32_t *id_list;
@@ -969,8 +1028,6 @@ struct drm_mode_group {
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
* @connector_list: list of connector objects
- * @num_bridge: number of bridges on this device
- * @bridge_list: list of bridge objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
* @num_overlay_plane: number of overlay planes on this device
@@ -1015,8 +1072,6 @@ struct drm_mode_config {
int num_connector;
struct list_head connector_list;
- int num_bridge;
- struct list_head bridge_list;
int num_encoder;
struct list_head encoder_list;
@@ -1043,6 +1098,7 @@ struct drm_mode_config {
/* output poll support */
bool poll_enabled;
bool poll_running;
+ bool delayed_event;
struct delayed_work output_poll_work;
/* pointers to standard properties */
@@ -1053,6 +1109,17 @@ struct drm_mode_config {
struct drm_property *tile_property;
struct drm_property *plane_type_property;
struct drm_property *rotation_property;
+ struct drm_property *prop_src_x;
+ struct drm_property *prop_src_y;
+ struct drm_property *prop_src_w;
+ struct drm_property *prop_src_h;
+ struct drm_property *prop_crtc_x;
+ struct drm_property *prop_crtc_y;
+ struct drm_property *prop_crtc_w;
+ struct drm_property *prop_crtc_h;
+ struct drm_property *prop_fb_id;
+ struct drm_property *prop_crtc_id;
+ struct drm_property *prop_active;
/* DVI-I properties */
struct drm_property *dvi_i_subconnector_property;
@@ -1153,9 +1220,10 @@ extern unsigned int drm_connector_index(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
-extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
- const struct drm_bridge_funcs *funcs);
-extern void drm_bridge_cleanup(struct drm_bridge *bridge);
+extern int drm_bridge_add(struct drm_bridge *bridge);
+extern void drm_bridge_remove(struct drm_bridge *bridge);
+extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
@@ -1191,6 +1259,8 @@ extern int drm_plane_init(struct drm_device *dev,
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
extern void drm_plane_force_disable(struct drm_plane *plane);
+extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
@@ -1224,6 +1294,10 @@ int drm_mode_connector_set_tile_property(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
+extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
+ const u32 *formats,
+ unsigned int num_formats);
+
static inline bool drm_property_type_is(struct drm_property *property,
uint32_t type)
{
@@ -1279,6 +1353,8 @@ struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
int64_t min, int64_t max);
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+ const char *name);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
@@ -1290,6 +1366,10 @@ extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+extern bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value, struct drm_mode_object **ref);
+extern void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -1381,6 +1461,8 @@ extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value);
+extern int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 7adbb65ea8ae..c250a22b39ab 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -39,17 +39,38 @@
#include <linux/fb.h>
+#include <drm/drm_crtc.h>
+
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
ENTER_ATOMIC_MODE_SET,
};
/**
- * drm_crtc_helper_funcs - helper operations for CRTCs
- * @mode_fixup: try to fixup proposed mode for this connector
+ * struct drm_crtc_helper_funcs - helper operations for CRTCs
+ * @dpms: set power state
+ * @prepare: prepare the CRTC, called before @mode_set
+ * @commit: commit changes to CRTC, called after @mode_set
+ * @mode_fixup: try to fixup proposed mode for this CRTC
* @mode_set: set this mode
+ * @mode_set_nofb: set mode only (no scanout buffer attached)
+ * @mode_set_base: update the scanout buffer
+ * @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
+ * @load_lut: load color palette
+ * @disable: disable CRTC when no longer in use
+ * @enable: enable CRTC
+ * @atomic_check: check for validity of an atomic state
+ * @atomic_begin: begin atomic update
+ * @atomic_flush: flush atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
+ *
+ * Note that with atomic helpers @dpms, @prepare and @commit hooks are
+ * deprecated. Used @enable and @disable instead exclusively.
+ *
+ * With legacy crtc helpers there's a big semantic difference between @disable
+ * and the other hooks: @disable also needs to release any resources acquired in
+ * @mode_set (like shared PLLs).
*/
struct drm_crtc_helper_funcs {
/*
@@ -80,8 +101,8 @@ struct drm_crtc_helper_funcs {
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
- /* disable crtc when not in use - more explicit than dpms off */
void (*disable)(struct drm_crtc *crtc);
+ void (*enable)(struct drm_crtc *crtc);
/* atomic helpers */
int (*atomic_check)(struct drm_crtc *crtc,
@@ -91,11 +112,28 @@ struct drm_crtc_helper_funcs {
};
/**
- * drm_encoder_helper_funcs - helper operations for encoders
+ * struct drm_encoder_helper_funcs - helper operations for encoders
+ * @dpms: set power state
+ * @save: save connector state
+ * @restore: restore connector state
* @mode_fixup: try to fixup proposed mode for this connector
+ * @prepare: part of the disable sequence, called before the CRTC modeset
+ * @commit: called after the CRTC modeset
* @mode_set: set this mode
+ * @get_crtc: return CRTC that the encoder is currently attached to
+ * @detect: connection status detection
+ * @disable: disable encoder when not in use (overrides DPMS off)
+ * @enable: enable encoder
+ * @atomic_check: check for validity of an atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
+ *
+ * Note that with atomic helpers @dpms, @prepare and @commit hooks are
+ * deprecated. Used @enable and @disable instead exclusively.
+ *
+ * With legacy crtc helpers there's a big semantic difference between @disable
+ * and the other hooks: @disable also needs to release any resources acquired in
+ * @mode_set (like shared PLLs).
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
@@ -114,14 +152,21 @@ struct drm_encoder_helper_funcs {
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
- /* disable encoder when not in use - more explicit than dpms off */
void (*disable)(struct drm_encoder *encoder);
+
+ void (*enable)(struct drm_encoder *encoder);
+
+ /* atomic helpers */
+ int (*atomic_check)(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
};
/**
- * drm_connector_helper_funcs - helper operations for connectors
+ * struct drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
- * @mode_valid (optional): is this mode valid on the given connector?
+ * @mode_valid: is this mode valid on the given connector? (optional)
+ * @best_encoder: return the preferred encoder for this connector
*
* The helper operations are called by the mid-layer CRTC helper.
*/
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 11f8c84f98ce..7e25030a6aa2 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -586,6 +586,7 @@ struct drm_dp_link {
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_aux_register(struct drm_dp_aux *aux);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index b597068103aa..21b944c456f6 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -125,7 +125,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
-bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 91d0582f924e..d92f6dd1fb11 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -90,6 +90,9 @@ enum drm_mode_status {
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
+#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
+#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
+#define CRTC_STEREO_DOUBLE_ONLY (CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
@@ -197,6 +200,8 @@ struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
int GTF_K, int GTF_2J);
void drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode);
+void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
+ struct videomode *vm);
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode,
int index);
@@ -217,9 +222,9 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
/* for use by the crtc helper probe functions */
-void drm_mode_validate_size(struct drm_device *dev,
- struct list_head *mode_list,
- int maxX, int maxY);
+enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
+enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
+ int maxX, int maxY);
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index a185392cafeb..31c11d36fae6 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -52,7 +52,8 @@ extern int drm_crtc_init(struct drm_device *dev,
* @prepare_fb: prepare a framebuffer for use by the plane
* @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
* @atomic_check: check that a given atomic state is valid and can be applied
- * @atomic_update: apply an atomic state to the plane
+ * @atomic_update: apply an atomic state to the plane (mandatory)
+ * @atomic_disable: disable the plane
*
* The helper operations are called by the mid-layer CRTC helper.
*/
@@ -66,6 +67,8 @@ struct drm_plane_helper_funcs {
struct drm_plane_state *state);
void (*atomic_update)(struct drm_plane *plane,
struct drm_plane_state *old_state);
+ void (*atomic_disable)(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
};
static inline void drm_plane_helper_add(struct drm_plane *plane,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
new file mode 100644
index 000000000000..3e2f22e5bf3c
--- /dev/null
+++ b/include/drm/i915_component.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _I915_COMPONENT_H_
+#define _I915_COMPONENT_H_
+
+struct i915_audio_component {
+ struct device *dev;
+
+ const struct i915_audio_component_ops {
+ struct module *owner;
+ void (*get_power)(struct device *);
+ void (*put_power)(struct device *);
+ int (*get_cdclk_freq)(struct device *);
+ } *ops;
+};
+
+#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h
deleted file mode 100644
index baa6f11b1837..000000000000
--- a/include/drm/i915_powerwell.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2013 Intel Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-
-#ifndef _I915_POWERWELL_H_
-#define _I915_POWERWELL_H_
-
-/* For use by hda_i915 driver */
-extern int i915_request_power_well(void);
-extern int i915_release_power_well(void);
-extern int i915_get_cdclk_freq(void);
-
-#endif /* _I915_POWERWELL_H_ */
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index f60ce72a2b2c..1c34c24efe08 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -81,6 +81,9 @@
#define SCLK_SDIO1_SAMPLE 120
#define SCLK_EMMC_SAMPLE 121
+#define SCLK_MAC 151
+#define SCLK_MACREF_OUT 152
+
#define DCLK_VOP0 190
#define DCLK_VOP1 191
diff --git a/include/dt-bindings/gpio/meson8-gpio.h b/include/dt-bindings/gpio/meson8-gpio.h
new file mode 100644
index 000000000000..fdaeb5cbf5e1
--- /dev/null
+++ b/include/dt-bindings/gpio/meson8-gpio.h
@@ -0,0 +1,157 @@
+/*
+ * GPIO definitions for Amlogic Meson8 SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON8_GPIO_H
+#define _DT_BINDINGS_MESON8_GPIO_H
+
+/* First GPIO chip */
+#define GPIOX_0 0
+#define GPIOX_1 1
+#define GPIOX_2 2
+#define GPIOX_3 3
+#define GPIOX_4 4
+#define GPIOX_5 5
+#define GPIOX_6 6
+#define GPIOX_7 7
+#define GPIOX_8 8
+#define GPIOX_9 9
+#define GPIOX_10 10
+#define GPIOX_11 11
+#define GPIOX_12 12
+#define GPIOX_13 13
+#define GPIOX_14 14
+#define GPIOX_15 15
+#define GPIOX_16 16
+#define GPIOX_17 17
+#define GPIOX_18 18
+#define GPIOX_19 19
+#define GPIOX_20 20
+#define GPIOX_21 21
+#define GPIOY_0 22
+#define GPIOY_1 23
+#define GPIOY_2 24
+#define GPIOY_3 25
+#define GPIOY_4 26
+#define GPIOY_5 27
+#define GPIOY_6 28
+#define GPIOY_7 29
+#define GPIOY_8 30
+#define GPIOY_9 31
+#define GPIOY_10 32
+#define GPIOY_11 33
+#define GPIOY_12 34
+#define GPIOY_13 35
+#define GPIOY_14 36
+#define GPIOY_15 37
+#define GPIOY_16 38
+#define GPIODV_0 39
+#define GPIODV_1 40
+#define GPIODV_2 41
+#define GPIODV_3 42
+#define GPIODV_4 43
+#define GPIODV_5 44
+#define GPIODV_6 45
+#define GPIODV_7 46
+#define GPIODV_8 47
+#define GPIODV_9 48
+#define GPIODV_10 49
+#define GPIODV_11 50
+#define GPIODV_12 51
+#define GPIODV_13 52
+#define GPIODV_14 53
+#define GPIODV_15 54
+#define GPIODV_16 55
+#define GPIODV_17 56
+#define GPIODV_18 57
+#define GPIODV_19 58
+#define GPIODV_20 59
+#define GPIODV_21 60
+#define GPIODV_22 61
+#define GPIODV_23 62
+#define GPIODV_24 63
+#define GPIODV_25 64
+#define GPIODV_26 65
+#define GPIODV_27 66
+#define GPIODV_28 67
+#define GPIODV_29 68
+#define GPIOH_0 69
+#define GPIOH_1 70
+#define GPIOH_2 71
+#define GPIOH_3 72
+#define GPIOH_4 73
+#define GPIOH_5 74
+#define GPIOH_6 75
+#define GPIOH_7 76
+#define GPIOH_8 77
+#define GPIOH_9 78
+#define GPIOZ_0 79
+#define GPIOZ_1 80
+#define GPIOZ_2 81
+#define GPIOZ_3 82
+#define GPIOZ_4 83
+#define GPIOZ_5 84
+#define GPIOZ_6 85
+#define GPIOZ_7 86
+#define GPIOZ_8 87
+#define GPIOZ_9 88
+#define GPIOZ_10 89
+#define GPIOZ_11 90
+#define GPIOZ_12 91
+#define GPIOZ_13 92
+#define GPIOZ_14 93
+#define CARD_0 94
+#define CARD_1 95
+#define CARD_2 96
+#define CARD_3 97
+#define CARD_4 98
+#define CARD_5 99
+#define CARD_6 100
+#define BOOT_0 101
+#define BOOT_1 102
+#define BOOT_2 103
+#define BOOT_3 104
+#define BOOT_4 105
+#define BOOT_5 106
+#define BOOT_6 107
+#define BOOT_7 108
+#define BOOT_8 109
+#define BOOT_9 110
+#define BOOT_10 111
+#define BOOT_11 112
+#define BOOT_12 113
+#define BOOT_13 114
+#define BOOT_14 115
+#define BOOT_15 116
+#define BOOT_16 117
+#define BOOT_17 118
+#define BOOT_18 119
+
+/* Second GPIO chip */
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+#define GPIOAO_10 10
+#define GPIOAO_11 11
+#define GPIOAO_12 12
+#define GPIOAO_13 13
+#define GPIO_BSD_EN 14
+#define GPIO_TEST_N 15
+
+#endif /* _DT_BINDINGS_MESON8_GPIO_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
new file mode 100644
index 000000000000..42121fa238fa
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_H
+
+/* Voltage ADC channels */
+#define VADC_USBIN 0x00
+#define VADC_DCIN 0x01
+#define VADC_VCHG_SNS 0x02
+#define VADC_SPARE1_03 0x03
+#define VADC_USB_ID_MV 0x04
+#define VADC_VCOIN 0x05
+#define VADC_VBAT_SNS 0x06
+#define VADC_VSYS 0x07
+#define VADC_DIE_TEMP 0x08
+#define VADC_REF_625MV 0x09
+#define VADC_REF_1250MV 0x0a
+#define VADC_CHG_TEMP 0x0b
+#define VADC_SPARE1 0x0c
+#define VADC_SPARE2 0x0d
+#define VADC_GND_REF 0x0e
+#define VADC_VDD_VADC 0x0f
+
+#define VADC_P_MUX1_1_1 0x10
+#define VADC_P_MUX2_1_1 0x11
+#define VADC_P_MUX3_1_1 0x12
+#define VADC_P_MUX4_1_1 0x13
+#define VADC_P_MUX5_1_1 0x14
+#define VADC_P_MUX6_1_1 0x15
+#define VADC_P_MUX7_1_1 0x16
+#define VADC_P_MUX8_1_1 0x17
+#define VADC_P_MUX9_1_1 0x18
+#define VADC_P_MUX10_1_1 0x19
+#define VADC_P_MUX11_1_1 0x1a
+#define VADC_P_MUX12_1_1 0x1b
+#define VADC_P_MUX13_1_1 0x1c
+#define VADC_P_MUX14_1_1 0x1d
+#define VADC_P_MUX15_1_1 0x1e
+#define VADC_P_MUX16_1_1 0x1f
+
+#define VADC_P_MUX1_1_3 0x20
+#define VADC_P_MUX2_1_3 0x21
+#define VADC_P_MUX3_1_3 0x22
+#define VADC_P_MUX4_1_3 0x23
+#define VADC_P_MUX5_1_3 0x24
+#define VADC_P_MUX6_1_3 0x25
+#define VADC_P_MUX7_1_3 0x26
+#define VADC_P_MUX8_1_3 0x27
+#define VADC_P_MUX9_1_3 0x28
+#define VADC_P_MUX10_1_3 0x29
+#define VADC_P_MUX11_1_3 0x2a
+#define VADC_P_MUX12_1_3 0x2b
+#define VADC_P_MUX13_1_3 0x2c
+#define VADC_P_MUX14_1_3 0x2d
+#define VADC_P_MUX15_1_3 0x2e
+#define VADC_P_MUX16_1_3 0x2f
+
+#define VADC_LR_MUX1_BAT_THERM 0x30
+#define VADC_LR_MUX2_BAT_ID 0x31
+#define VADC_LR_MUX3_XO_THERM 0x32
+#define VADC_LR_MUX4_AMUX_THM1 0x33
+#define VADC_LR_MUX5_AMUX_THM2 0x34
+#define VADC_LR_MUX6_AMUX_THM3 0x35
+#define VADC_LR_MUX7_HW_ID 0x36
+#define VADC_LR_MUX8_AMUX_THM4 0x37
+#define VADC_LR_MUX9_AMUX_THM5 0x38
+#define VADC_LR_MUX10_USB_ID 0x39
+#define VADC_AMUX_PU1 0x3a
+#define VADC_AMUX_PU2 0x3b
+#define VADC_LR_MUX3_BUF_XO_THERM 0x3c
+
+#define VADC_LR_MUX1_PU1_BAT_THERM 0x70
+#define VADC_LR_MUX2_PU1_BAT_ID 0x71
+#define VADC_LR_MUX3_PU1_XO_THERM 0x72
+#define VADC_LR_MUX4_PU1_AMUX_THM1 0x73
+#define VADC_LR_MUX5_PU1_AMUX_THM2 0x74
+#define VADC_LR_MUX6_PU1_AMUX_THM3 0x75
+#define VADC_LR_MUX7_PU1_AMUX_HW_ID 0x76
+#define VADC_LR_MUX8_PU1_AMUX_THM4 0x77
+#define VADC_LR_MUX9_PU1_AMUX_THM5 0x78
+#define VADC_LR_MUX10_PU1_AMUX_USB_ID 0x79
+#define VADC_LR_MUX3_BUF_PU1_XO_THERM 0x7c
+
+#define VADC_LR_MUX1_PU2_BAT_THERM 0xb0
+#define VADC_LR_MUX2_PU2_BAT_ID 0xb1
+#define VADC_LR_MUX3_PU2_XO_THERM 0xb2
+#define VADC_LR_MUX4_PU2_AMUX_THM1 0xb3
+#define VADC_LR_MUX5_PU2_AMUX_THM2 0xb4
+#define VADC_LR_MUX6_PU2_AMUX_THM3 0xb5
+#define VADC_LR_MUX7_PU2_AMUX_HW_ID 0xb6
+#define VADC_LR_MUX8_PU2_AMUX_THM4 0xb7
+#define VADC_LR_MUX9_PU2_AMUX_THM5 0xb8
+#define VADC_LR_MUX10_PU2_AMUX_USB_ID 0xb9
+#define VADC_LR_MUX3_BUF_PU2_XO_THERM 0xbc
+
+#define VADC_LR_MUX1_PU1_PU2_BAT_THERM 0xf0
+#define VADC_LR_MUX2_PU1_PU2_BAT_ID 0xf1
+#define VADC_LR_MUX3_PU1_PU2_XO_THERM 0xf2
+#define VADC_LR_MUX4_PU1_PU2_AMUX_THM1 0xf3
+#define VADC_LR_MUX5_PU1_PU2_AMUX_THM2 0xf4
+#define VADC_LR_MUX6_PU1_PU2_AMUX_THM3 0xf5
+#define VADC_LR_MUX7_PU1_PU2_AMUX_HW_ID 0xf6
+#define VADC_LR_MUX8_PU1_PU2_AMUX_THM4 0xf7
+#define VADC_LR_MUX9_PU1_PU2_AMUX_THM5 0xf8
+#define VADC_LR_MUX10_PU1_PU2_AMUX_USB_ID 0xf9
+#define VADC_LR_MUX3_BUF_PU1_PU2_XO_THERM 0xfc
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 1ea1b702fec2..d4110d5caa3e 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -7,14 +7,14 @@
#include <dt-bindings/interrupt-controller/irq.h>
-/* interrupt specific cell 0 */
+/* interrupt specifier cell 0 */
#define GIC_SPI 0
#define GIC_PPI 1
/*
* Interrupt specifier cell 2.
- * The flaggs in irq.h are valid, plus those below.
+ * The flags in irq.h are valid, plus those below.
*/
#define GIC_CPU_MASK_RAW(x) ((x) << 8)
#define GIC_CPU_MASK_SIMPLE(num) GIC_CPU_MASK_RAW((1 << (num)) - 1)
diff --git a/include/dt-bindings/sound/samsung-i2s.h b/include/dt-bindings/sound/samsung-i2s.h
new file mode 100644
index 000000000000..0c69818d530c
--- /dev/null
+++ b/include/dt-bindings/sound/samsung-i2s.h
@@ -0,0 +1,8 @@
+#ifndef _DT_BINDINGS_SAMSUNG_I2S_H
+#define _DT_BINDINGS_SAMSUNG_I2S_H
+
+#define CLK_I2S_CDCLK 0
+#define CLK_I2S_RCLK_SRC 1
+#define CLK_I2S_RCLK_PSR 2
+
+#endif /* _DT_BINDINGS_SAMSUNG_I2S_H */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index ac4888dc86bc..7c55dd5dd2c9 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -33,10 +33,11 @@
#define VGIC_V2_MAX_LRS (1 << 6)
#define VGIC_V3_MAX_LRS 16
#define VGIC_MAX_IRQS 1024
+#define VGIC_V2_MAX_CPUS 8
/* Sanity checks... */
-#if (KVM_MAX_VCPUS > 8)
-#error Invalid number of CPU interfaces
+#if (KVM_MAX_VCPUS > 255)
+#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
#endif
#if (VGIC_NR_IRQS_LEGACY & 31)
@@ -132,6 +133,18 @@ struct vgic_params {
unsigned int maint_irq;
/* Virtual control interface base address */
void __iomem *vctrl_base;
+ int max_gic_vcpus;
+ /* Only needed for the legacy KVM_CREATE_IRQCHIP */
+ bool can_emulate_gicv2;
+};
+
+struct vgic_vm_ops {
+ bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
+ struct kvm_exit_mmio *);
+ bool (*queue_sgi)(struct kvm_vcpu *, int irq);
+ void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
+ int (*init_model)(struct kvm *);
+ int (*map_resources)(struct kvm *, const struct vgic_params *);
};
struct vgic_dist {
@@ -140,6 +153,9 @@ struct vgic_dist {
bool in_kernel;
bool ready;
+ /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
+ u32 vgic_model;
+
int nr_cpus;
int nr_irqs;
@@ -148,7 +164,11 @@ struct vgic_dist {
/* Distributor and vcpu interface mapping in the guest */
phys_addr_t vgic_dist_base;
- phys_addr_t vgic_cpu_base;
+ /* GICv2 and GICv3 use different mapped register blocks */
+ union {
+ phys_addr_t vgic_cpu_base;
+ phys_addr_t vgic_redist_base;
+ };
/* Distributor enabled */
u32 enabled;
@@ -210,8 +230,13 @@ struct vgic_dist {
*/
struct vgic_bitmap *irq_spi_target;
+ /* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
+ u32 *irq_spi_mpidr;
+
/* Bitmap indicating which CPU has something pending */
unsigned long *irq_pending_on_cpu;
+
+ struct vgic_vm_ops vm_ops;
#endif
};
@@ -229,6 +254,7 @@ struct vgic_v3_cpu_if {
#ifdef CONFIG_ARM_GIC_V3
u32 vgic_hcr;
u32 vgic_vmcr;
+ u32 vgic_sre; /* Restored only, change ignored */
u32 vgic_misr; /* Saved only */
u32 vgic_eisr; /* Saved only */
u32 vgic_elrsr; /* Saved only */
@@ -275,13 +301,15 @@ struct kvm_exit_mmio;
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
int kvm_vgic_map_resources(struct kvm *kvm);
-int kvm_vgic_create(struct kvm *kvm);
+int kvm_vgic_get_max_vcpus(void);
+int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio);
@@ -327,7 +355,7 @@ static inline int kvm_vgic_map_resources(struct kvm *kvm)
return 0;
}
-static inline int kvm_vgic_create(struct kvm *kvm)
+static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
{
return 0;
}
@@ -379,6 +407,11 @@ static inline bool vgic_ready(struct kvm *kvm)
{
return true;
}
+
+static inline int kvm_vgic_get_max_vcpus(void)
+{
+ return KVM_MAX_VCPUS;
+}
#endif
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d459cd17b477..24c7aa8b1d20 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
+#include <linux/resource_ext.h>
#include <linux/device.h>
#include <linux/property.h>
@@ -151,6 +152,10 @@ int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
+#endif
+
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
@@ -288,22 +293,25 @@ extern int pnpacpi_disabled;
bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res);
bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res);
bool acpi_dev_resource_address_space(struct acpi_resource *ares,
- struct resource *res);
+ struct resource_win *win);
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
- struct resource *res);
+ struct resource_win *win);
unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
-struct resource_list_entry {
- struct list_head node;
- struct resource res;
-};
-
void acpi_dev_free_resource_list(struct list_head *list);
int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data);
+int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+ unsigned long types);
+
+static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
+ void *arg)
+{
+ return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
+}
int acpi_check_resource_conflict(const struct resource *res);
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 642d6ae4030c..a270f25ee7c7 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -21,16 +21,20 @@ struct device;
struct ata_port_info;
struct ahci_host_priv;
struct platform_device;
+struct scsi_host_template;
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
struct ahci_host_priv *ahci_platform_get_resources(
struct platform_device *pdev);
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
- const struct ata_port_info *pi_template);
+ const struct ata_port_info *pi_template,
+ struct scsi_host_template *sht);
int ahci_platform_suspend_host(struct device *dev);
int ahci_platform_resume_host(struct device *dev);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 2afc618b15ce..50fc66868402 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -33,6 +33,7 @@ struct amba_device {
struct clk *pclk;
unsigned int periphid;
unsigned int irq[AMBA_NR_IRQS];
+ char *driver_override;
};
struct amba_driver {
@@ -92,11 +93,15 @@ struct amba_device *amba_find_device(const char *, struct device *, unsigned int
int amba_request_regions(struct amba_device *, const char *);
void amba_release_regions(struct amba_device *);
-#define amba_pclk_enable(d) \
- (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk))
+static inline int amba_pclk_enable(struct amba_device *dev)
+{
+ return clk_enable(dev->pclk);
+}
-#define amba_pclk_disable(d) \
- do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
+static inline void amba_pclk_disable(struct amba_device *dev)
+{
+ clk_disable(dev->pclk);
+}
static inline int amba_pclk_prepare(struct amba_device *dev)
{
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f2f4d8da97c0..1648026e06b4 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -503,7 +503,7 @@ struct ata_bmdma_prd {
#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8))
#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
-#define ata_id_removeable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
+#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
#define ata_id_has_atapi_AN(id) \
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 5c618a084225..619d9e78e644 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -10,12 +10,15 @@ struct pata_platform_info {
unsigned int ioport_shift;
};
+struct scsi_host_template;
+
extern int __pata_platform_probe(struct device *dev,
struct resource *io_res,
struct resource *ctl_res,
struct resource *irq_res,
unsigned int ioport_shift,
- int __pio_mask);
+ int __pio_mask,
+ struct scsi_host_template *sht);
/*
* Marvell SATA private data
diff --git a/include/linux/audit.h b/include/linux/audit.h
index af84234e1f6e..599f3bd2d6c5 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -46,7 +46,6 @@ struct audit_tree;
struct sk_buff;
struct audit_krule {
- int vers_ops;
u32 pflags;
u32 flags;
u32 listnr;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5da6012b7a14..d94077fea1f8 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -106,6 +106,8 @@ struct backing_dev_info {
#endif
};
+struct backing_dev_info *inode_to_bdi(struct inode *inode);
+
int __must_check bdi_init(struct backing_dev_info *bdi);
void bdi_destroy(struct backing_dev_info *bdi);
@@ -114,7 +116,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
-int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
+int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason);
void bdi_start_background_writeback(struct backing_dev_info *bdi);
@@ -228,46 +230,17 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
* BDI_CAP_NO_WRITEBACK: Don't write pages back
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
- *
- * These flags let !MMU mmap() govern direct device mapping vs immediate
- * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
- *
- * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
- * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
- * BDI_CAP_READ_MAP: Can be mapped for reading
- * BDI_CAP_WRITE_MAP: Can be mapped for writing
- * BDI_CAP_EXEC_MAP: Can be mapped for execution
- *
- * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
- *
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
-#define BDI_CAP_MAP_COPY 0x00000004
-#define BDI_CAP_MAP_DIRECT 0x00000008
-#define BDI_CAP_READ_MAP 0x00000010
-#define BDI_CAP_WRITE_MAP 0x00000020
-#define BDI_CAP_EXEC_MAP 0x00000040
-#define BDI_CAP_NO_ACCT_WB 0x00000080
-#define BDI_CAP_SWAP_BACKED 0x00000100
-#define BDI_CAP_STABLE_WRITES 0x00000200
-#define BDI_CAP_STRICTLIMIT 0x00000400
-
-#define BDI_CAP_VMFLAGS \
- (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
+#define BDI_CAP_NO_ACCT_WB 0x00000004
+#define BDI_CAP_STABLE_WRITES 0x00000008
+#define BDI_CAP_STRICTLIMIT 0x00000010
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
-#if defined(VM_MAYREAD) && \
- (BDI_CAP_READ_MAP != VM_MAYREAD || \
- BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
- BDI_CAP_EXEC_MAP != VM_MAYEXEC)
-#error please change backing_dev_info::capabilities flags
-#endif
-
-extern struct backing_dev_info default_backing_dev_info;
extern struct backing_dev_info noop_backing_dev_info;
int writeback_in_progress(struct backing_dev_info *bdi);
@@ -329,24 +302,14 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
BDI_CAP_NO_WRITEBACK));
}
-static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_SWAP_BACKED;
-}
-
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
- return bdi_cap_writeback_dirty(mapping->backing_dev_info);
+ return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
}
static inline bool mapping_cap_account_dirty(struct address_space *mapping)
{
- return bdi_cap_account_dirty(mapping->backing_dev_info);
-}
-
-static inline bool mapping_cap_swap_backed(struct address_space *mapping)
-{
- return bdi_cap_swap_backed(mapping->backing_dev_info);
+ return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
}
static inline int bdi_sched_wait(void *word)
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index eb1c6a47b67f..994739da827f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -318,6 +318,7 @@ struct bcma_bus {
const struct bcma_host_ops *ops;
enum bcma_hosttype hosttype;
+ bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
union {
/* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
struct pci_dev *host_pci;
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 0333e605ea0d..3f809ae372c4 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -223,6 +223,7 @@ struct bcma_drv_pci_host {
struct bcma_drv_pci {
struct bcma_device *core;
+ u8 early_setup_done:1;
u8 setup_done:1;
u8 hostmode:1;
@@ -237,6 +238,7 @@ struct bcma_drv_pci {
#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
+extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index e64ae7bf80a1..ebd5c1fcdea4 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -64,6 +64,8 @@
#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */
#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */
+#define BCMA_PCIE2_BAR0_WIN2 0x70
+
/* SiliconBackplane Address Map.
* All regions may not exist on all chips.
*/
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index f24d245f8394..1b5fc0c3b1b5 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -5,8 +5,6 @@
struct bcma_soc {
struct bcma_bus bus;
- struct bcma_device core_cc;
- struct bcma_device core_mips;
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index efead0b532c4..da3a127c9958 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -428,13 +428,9 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
-extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
- unsigned long, unsigned int, int, gfp_t);
-struct sg_iovec;
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
- struct block_device *,
- const struct sg_iovec *, int, int, gfp_t);
+ const struct iov_iter *, gfp_t);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
gfp_t);
@@ -462,12 +458,10 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
-extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
- unsigned long, unsigned int, int, gfp_t);
extern struct bio *bio_copy_user_iov(struct request_queue *,
struct rq_map_data *,
- const struct sg_iovec *,
- int, int, gfp_t);
+ const struct iov_iter *,
+ gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 202e4034fe26..dbfbf4990005 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -52,16 +52,13 @@
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
- * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
- * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
- * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
*/
/*
@@ -96,10 +93,10 @@ extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits);
-extern void __bitmap_shift_right(unsigned long *dst,
- const unsigned long *src, int shift, int bits);
-extern void __bitmap_shift_left(unsigned long *dst,
- const unsigned long *src, int shift, int bits);
+extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
@@ -147,31 +144,31 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-extern int bitmap_scnprintf(char *buf, unsigned int len,
- const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
-extern int bitmap_scnlistprintf(char *buf, unsigned int len,
- const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
- const unsigned long *old, const unsigned long *new, int bits);
+ const unsigned long *old, const unsigned long *new, unsigned int nbits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
- const unsigned long *relmap, int bits);
+ const unsigned long *relmap, unsigned int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
- int sz, int bits);
+ unsigned int sz, unsigned int nbits);
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
-extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
+#ifdef __BIG_ENDIAN
+extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
+#else
+#define bitmap_copy_le bitmap_copy
+#endif
+extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
@@ -185,33 +182,33 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
-static inline void bitmap_zero(unsigned long *dst, int nbits)
+static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
-static inline void bitmap_fill(unsigned long *dst, int nbits)
+static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- size_t nlongs = BITS_TO_LONGS(nbits);
+ unsigned int nlongs = BITS_TO_LONGS(nbits);
if (!small_const_nbits(nbits)) {
- int len = (nlongs - 1) * sizeof(unsigned long);
+ unsigned int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- int nbits)
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
@@ -309,22 +306,22 @@ static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
return __bitmap_weight(src, nbits);
}
-static inline void bitmap_shift_right(unsigned long *dst,
- const unsigned long *src, int n, int nbits)
+static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, int nbits)
{
if (small_const_nbits(nbits))
- *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
+ *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
else
- __bitmap_shift_right(dst, src, n, nbits);
+ __bitmap_shift_right(dst, src, shift, nbits);
}
-static inline void bitmap_shift_left(unsigned long *dst,
- const unsigned long *src, int n, int nbits)
+static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
- *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
+ *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
else
- __bitmap_shift_left(dst, src, n, nbits);
+ __bitmap_shift_left(dst, src, shift, nbits);
}
static inline int bitmap_parse(const char *buf, unsigned int buflen,
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index 7ffe03f4693d..fb790b8449c1 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -3,14 +3,83 @@
#include <linux/types.h>
-extern u8 const byte_rev_table[256];
+#ifdef CONFIG_HAVE_ARCH_BITREVERSE
+#include <asm/bitrev.h>
+
+#define __bitrev32 __arch_bitrev32
+#define __bitrev16 __arch_bitrev16
+#define __bitrev8 __arch_bitrev8
-static inline u8 bitrev8(u8 byte)
+#else
+extern u8 const byte_rev_table[256];
+static inline u8 __bitrev8(u8 byte)
{
return byte_rev_table[byte];
}
-extern u16 bitrev16(u16 in);
-extern u32 bitrev32(u32 in);
+static inline u16 __bitrev16(u16 x)
+{
+ return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8);
+}
+
+static inline u32 __bitrev32(u32 x)
+{
+ return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16);
+}
+
+#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
+
+#define __constant_bitrev32(x) \
+({ \
+ u32 __x = x; \
+ __x = (__x >> 16) | (__x << 16); \
+ __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
+ __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
+ __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
+ __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
+ __x; \
+})
+
+#define __constant_bitrev16(x) \
+({ \
+ u16 __x = x; \
+ __x = (__x >> 8) | (__x << 8); \
+ __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
+ __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
+ __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
+ __x; \
+})
+
+#define __constant_bitrev8(x) \
+({ \
+ u8 __x = x; \
+ __x = (__x >> 4) | (__x << 4); \
+ __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
+ __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
+ __x; \
+})
+
+#define bitrev32(x) \
+({ \
+ u32 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev32(__x) : \
+ __bitrev32(__x); \
+})
+
+#define bitrev16(x) \
+({ \
+ u16 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev16(__x) : \
+ __bitrev16(__x); \
+ })
+#define bitrev8(x) \
+({ \
+ u8 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev8(__x) : \
+ __bitrev8(__x) ; \
+ })
#endif /* _LINUX_BITREV_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8aded9ab2e4e..7aec86127335 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
unsigned long flags; /* BLK_MQ_F_* flags */
struct request_queue *queue;
- unsigned int queue_num;
struct blk_flush_queue *fq;
void *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
unsigned int numa_node;
- unsigned int cmd_size; /* per-request extra data */
+ unsigned int queue_num;
atomic_t nr_active;
@@ -147,6 +146,8 @@ enum {
BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_SYSFS_UP = 1 << 3,
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
+ BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
+ BLK_MQ_F_ALLOC_POLICY_BITS = 1,
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
@@ -155,6 +156,12 @@ enum {
BLK_MQ_CPU_WORK_BATCH = 8,
};
+#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
+ ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
+ ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
+#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
+ ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
+ << BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
void blk_mq_finish_init(struct request_queue *q);
@@ -167,7 +174,6 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request *, bool, bool, bool);
-void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
@@ -195,13 +201,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
+int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, int error);
void __blk_mq_end_request(struct request *rq, int error);
void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +221,9 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void *priv);
+void blk_mq_freeze_queue(struct request_queue *q);
+void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
/*
* Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 445d59231bc4..c294e3e25e37 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -190,6 +190,7 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
+ __REQ_NO_TIMEOUT, /* requests may never expire */
__REQ_NR_BITS, /* stops here */
};
@@ -243,5 +244,6 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 92f4b4b288dd..7f9a516f24de 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -272,7 +272,11 @@ struct blk_queue_tag {
int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */
atomic_t refcnt; /* map can be shared */
+ int alloc_policy; /* tag allocation policy */
+ int next_tag; /* next tag */
};
+#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
+#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
@@ -516,6 +520,7 @@ struct request_queue {
(1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP))
static inline void queue_lockdep_assert_held(struct request_queue *q)
@@ -850,8 +855,8 @@ extern int blk_rq_map_user(struct request_queue *, struct request *,
extern int blk_rq_unmap_user(struct bio *);
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct sg_iovec *,
- int, unsigned int, gfp_t);
+ struct rq_map_data *, const struct iov_iter *,
+ gfp_t);
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@@ -1044,8 +1049,6 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
-extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
- struct scatterlist *sglist);
extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
@@ -1139,11 +1142,11 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
extern int blk_queue_start_tag(struct request_queue *, struct request *);
extern struct request *blk_queue_find_tag(struct request_queue *, int);
extern void blk_queue_end_tag(struct request_queue *, struct request *);
-extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
+extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
extern void blk_queue_free_tags(struct request_queue *);
extern int blk_queue_resize_tags(struct request_queue *, int);
extern void blk_queue_invalidate_tags(struct request_queue *);
-extern struct blk_queue_tag *blk_init_tags(int);
+extern struct blk_queue_tag *blk_init_tags(int, int);
extern void blk_free_tags(struct blk_queue_tag *);
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
@@ -1162,7 +1165,7 @@ extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask);
+ sector_t nr_sects, gfp_t gfp_mask, bool discard);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
@@ -1176,7 +1179,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
return blkdev_issue_zeroout(sb->s_bdev,
block << (sb->s_blocksize_bits - 9),
nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask);
+ gfp_mask, true);
}
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1601,8 +1604,8 @@ struct block_device_operations {
int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- int (*direct_access) (struct block_device *, sector_t,
- void **, unsigned long *);
+ long (*direct_access)(struct block_device *, sector_t,
+ void **, unsigned long *pfn, long size);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1620,6 +1623,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
+extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
+ unsigned long *pfn, long size);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index fb4591977b03..f8763615a5f2 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -30,6 +30,4 @@ void cdev_del(struct cdev *);
void cd_forget(struct inode *);
-extern struct backing_dev_info directly_mappable_cdev_bdi;
-
#endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index da0dae0600e6..b9cb94c3102a 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -943,6 +943,8 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
#else /* !CONFIG_CGROUPS */
+struct cgroup_subsys_state;
+
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
static inline void cgroup_fork(struct task_struct *p) {}
@@ -955,6 +957,8 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
return -EINVAL;
}
+static inline void css_put(struct cgroup_subsys_state *css) {}
+
/* No cgroups - nothing to do */
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 98c4f9b12b03..e4a96fb14403 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -15,6 +15,10 @@ SUBSYS(cpu)
SUBSYS(cpuacct)
#endif
+#if IS_ENABLED(CONFIG_BLK_CGROUP)
+SUBSYS(blkio)
+#endif
+
#if IS_ENABLED(CONFIG_MEMCG)
SUBSYS(memory)
#endif
@@ -31,10 +35,6 @@ SUBSYS(freezer)
SUBSYS(net_cls)
#endif
-#if IS_ENABLED(CONFIG_BLK_CGROUP)
-SUBSYS(blkio)
-#endif
-
#if IS_ENABLED(CONFIG_CGROUP_PERF)
SUBSYS(perf_event)
#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index abcafaa20b86..9c78d15d33e4 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -18,8 +18,6 @@
#include <asm/div64.h>
#include <asm/io.h>
-/* clocksource cycle base type */
-typedef u64 cycle_t;
struct clocksource;
struct module;
@@ -28,106 +26,6 @@ struct module;
#endif
/**
- * struct cyclecounter - hardware abstraction for a free running counter
- * Provides completely state-free accessors to the underlying hardware.
- * Depending on which hardware it reads, the cycle counter may wrap
- * around quickly. Locking rules (if necessary) have to be defined
- * by the implementor and user of specific instances of this API.
- *
- * @read: returns the current cycle value
- * @mask: bitmask for two's complement
- * subtraction of non 64 bit counters,
- * see CLOCKSOURCE_MASK() helper macro
- * @mult: cycle to nanosecond multiplier
- * @shift: cycle to nanosecond divisor (power of two)
- */
-struct cyclecounter {
- cycle_t (*read)(const struct cyclecounter *cc);
- cycle_t mask;
- u32 mult;
- u32 shift;
-};
-
-/**
- * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
- * Contains the state needed by timecounter_read() to detect
- * cycle counter wrap around. Initialize with
- * timecounter_init(). Also used to convert cycle counts into the
- * corresponding nanosecond counts with timecounter_cyc2time(). Users
- * of this code are responsible for initializing the underlying
- * cycle counter hardware, locking issues and reading the time
- * more often than the cycle counter wraps around. The nanosecond
- * counter will only wrap around after ~585 years.
- *
- * @cc: the cycle counter used by this instance
- * @cycle_last: most recent cycle counter value seen by
- * timecounter_read()
- * @nsec: continuously increasing count
- */
-struct timecounter {
- const struct cyclecounter *cc;
- cycle_t cycle_last;
- u64 nsec;
-};
-
-/**
- * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @cc: Pointer to cycle counter.
- * @cycles: Cycles
- *
- * XXX - This could use some mult_lxl_ll() asm optimization. Same code
- * as in cyc2ns, but with unsigned result.
- */
-static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
- cycle_t cycles)
-{
- u64 ret = (u64)cycles;
- ret = (ret * cc->mult) >> cc->shift;
- return ret;
-}
-
-/**
- * timecounter_init - initialize a time counter
- * @tc: Pointer to time counter which is to be initialized/reset
- * @cc: A cycle counter, ready to be used.
- * @start_tstamp: Arbitrary initial time stamp.
- *
- * After this call the current cycle register (roughly) corresponds to
- * the initial time stamp. Every call to timecounter_read() increments
- * the time stamp counter by the number of elapsed nanoseconds.
- */
-extern void timecounter_init(struct timecounter *tc,
- const struct cyclecounter *cc,
- u64 start_tstamp);
-
-/**
- * timecounter_read - return nanoseconds elapsed since timecounter_init()
- * plus the initial time stamp
- * @tc: Pointer to time counter.
- *
- * In other words, keeps track of time since the same epoch as
- * the function which generated the initial time stamp.
- */
-extern u64 timecounter_read(struct timecounter *tc);
-
-/**
- * timecounter_cyc2time - convert a cycle counter to same
- * time base as values returned by
- * timecounter_read()
- * @tc: Pointer to time counter.
- * @cycle_tstamp: a value returned by tc->cc->read()
- *
- * Cycle counts that are converted correctly as long as they
- * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
- * with "max cycle count" == cs->mask+1.
- *
- * This allows conversion of cycle counter values which were generated
- * in the past.
- */
-extern u64 timecounter_cyc2time(struct timecounter *tc,
- cycle_t cycle_tstamp);
-
-/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 3238ffa33f68..a014559e4a49 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -12,6 +12,10 @@
#define COMPACT_PARTIAL 3
/* The full zone was compacted */
#define COMPACT_COMPLETE 4
+/* For more detailed tracepoint output */
+#define COMPACT_NO_SUITABLE_PAGE 5
+#define COMPACT_NOT_SUITABLE_ZONE 6
+/* When adding new state, please change compaction_status_string, too */
/* Used to signal whether compaction detected need_sched() or lock contention */
/* No contention detected */
@@ -21,6 +25,8 @@
/* Zone lock or lru_lock was contended in async compaction */
#define COMPACT_CONTENDED_LOCK 2
+struct alloc_context; /* in mm/internal.h */
+
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -30,81 +36,25 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos);
extern int fragmentation_index(struct zone *zone, unsigned int order);
-extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *mask,
- enum migrate_mode mode, int *contended,
- int alloc_flags, int classzone_idx);
+extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
+ int alloc_flags, const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx);
-/* Do not skip compaction more than 64 times */
-#define COMPACT_MAX_DEFER_SHIFT 6
-
-/*
- * Compaction is deferred when compaction fails to result in a page
- * allocation success. 1 << compact_defer_limit compactions are skipped up
- * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
- */
-static inline void defer_compaction(struct zone *zone, int order)
-{
- zone->compact_considered = 0;
- zone->compact_defer_shift++;
-
- if (order < zone->compact_order_failed)
- zone->compact_order_failed = order;
-
- if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
- zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
-}
-
-/* Returns true if compaction should be skipped this time */
-static inline bool compaction_deferred(struct zone *zone, int order)
-{
- unsigned long defer_limit = 1UL << zone->compact_defer_shift;
-
- if (order < zone->compact_order_failed)
- return false;
-
- /* Avoid possible overflow */
- if (++zone->compact_considered > defer_limit)
- zone->compact_considered = defer_limit;
-
- return zone->compact_considered < defer_limit;
-}
-
-/*
- * Update defer tracking counters after successful compaction of given order,
- * which means an allocation either succeeded (alloc_success == true) or is
- * expected to succeed.
- */
-static inline void compaction_defer_reset(struct zone *zone, int order,
- bool alloc_success)
-{
- if (alloc_success) {
- zone->compact_considered = 0;
- zone->compact_defer_shift = 0;
- }
- if (order >= zone->compact_order_failed)
- zone->compact_order_failed = order + 1;
-}
-
-/* Returns true if restarting compaction after many failures */
-static inline bool compaction_restarting(struct zone *zone, int order)
-{
- if (order < zone->compact_order_failed)
- return false;
-
- return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
- zone->compact_considered >= 1UL << zone->compact_defer_shift;
-}
+extern void defer_compaction(struct zone *zone, int order);
+extern bool compaction_deferred(struct zone *zone, int order);
+extern void compaction_defer_reset(struct zone *zone, int order,
+ bool alloc_success);
+extern bool compaction_restarting(struct zone *zone, int order);
#else
-static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *nodemask,
- enum migrate_mode mode, int *contended,
- int alloc_flags, int classzone_idx)
+static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
+ unsigned int order, int alloc_flags,
+ const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended)
{
return COMPACT_CONTINUE;
}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 7450ca2ac1fc..ab25814690bc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -689,6 +689,15 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
compat_stack_t __user *uoss_ptr);
+#ifdef __ARCH_WANT_SYS_SIGPENDING
+asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
+#endif
+
+#ifdef __ARCH_WANT_SYS_SIGPROCMASK
+asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset,
+ compat_old_sigset_t __user *oset);
+#endif
+
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
#define compat_save_altstack_ex(uss, sp) do { \
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 02ae99e8e6d3..cdf13ca7cac3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -66,6 +66,7 @@
#define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
+#define __alias(symbol) __attribute__((alias(#symbol)))
/*
* it doesn't make sense on ARM (currently the only user of __naked) to trace
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index d1a558239b1a..769e19864632 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -85,3 +85,7 @@
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if GCC_VERSION >= 40902
+#define KASAN_ABI_VERSION 3
+#endif
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
index c8c565952548..efee493714eb 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
@@ -63,3 +63,5 @@
#define __HAVE_BUILTIN_BSWAP64__
#define __HAVE_BUILTIN_BSWAP16__
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a1c81f80978e..d1ec10a940ff 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -54,7 +54,11 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#include <linux/compiler-gcc.h>
#endif
+#ifdef CC_USING_HOTPATCH
+#define notrace __attribute__((hotpatch(0,0)))
+#else
#define notrace __attribute__((no_instrument_function))
+#endif
/* Intel compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
@@ -215,7 +219,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
}
}
-static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +239,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
- * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
- * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
@@ -257,8 +261,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
#define READ_ONCE(x) \
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
-#define ASSIGN_ONCE(val, x) \
- ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+#define WRITE_ONCE(x, val) \
+ ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
#endif /* __KERNEL__ */
@@ -385,7 +389,7 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
/* Compile time object size, -1 for unknown */
@@ -447,12 +451,23 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
- * This macro does absolutely -nothing- to prevent the CPU from reordering,
- * merging, or refetching absolutely anything at any time. Its main intended
- * use is to mediate communication between process-level code and irq/NMI
- * handlers, all running on the same CPU.
+ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
+ * on a union member will work as long as the size of the member matches the
+ * size of the union and the size is smaller than word size.
+ *
+ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
+ * between process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ *
+ * If possible use READ_ONCE/ASSIGN_ONCE instead.
*/
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define __ACCESS_ONCE(x) ({ \
+ __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
+ (volatile typeof(x) *)&(x); })
+#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 5d3c54311f7a..3486b9082adb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -179,15 +179,6 @@ struct coresight_device {
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
-#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \
- __mode, __get, __set, __fmt) \
-DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \
-static const struct coresight_ops_entry __name ## _entry = { \
- .name = __entry_name, \
- .mode = __mode, \
- .ops = &__name ## _ops \
-}
-
/**
* struct coresight_ops_sink - basic operations for a sink
* Operations available for sinks
@@ -236,13 +227,8 @@ coresight_register(struct coresight_desc *desc);
extern void coresight_unregister(struct coresight_device *csdev);
extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_is_bit_set(u32 val, int position, int value);
extern int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value);
-#ifdef CONFIG_OF
-extern struct coresight_platform_data *of_get_coresight_platform_data(
- struct device *dev, struct device_node *node);
-#endif
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -250,14 +236,16 @@ static inline void coresight_unregister(struct coresight_device *csdev) {}
static inline int
coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
-static inline int coresight_is_bit_set(u32 val, int position, int value)
- { return 0; }
static inline int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value) { return 1; }
+#endif
+
#ifdef CONFIG_OF
+extern struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node);
+#else
static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
#endif
-#endif
#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4d078cebafd2..2ee4888c1f47 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -66,8 +66,6 @@ struct cpufreq_policy {
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
unsigned int cpu; /* cpu nr of CPU managing this policy */
- unsigned int last_cpu; /* cpu nr of previous CPU that managed
- * this policy */
struct clk *clk;
struct cpufreq_cpuinfo cpuinfo;/* see above */
@@ -113,6 +111,9 @@ struct cpufreq_policy {
wait_queue_head_t transition_wait;
struct task_struct *transition_task; /* Task which is doing the transition */
+ /* cpufreq-stats */
+ struct cpufreq_stats *stats;
+
/* For cpufreq driver's internal use */
void *driver_data;
};
@@ -367,9 +368,8 @@ static inline void cpufreq_resume(void) {}
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
-#define CPUFREQ_UPDATE_POLICY_CPU (4)
-#define CPUFREQ_CREATE_POLICY (5)
-#define CPUFREQ_REMOVE_POLICY (6)
+#define CPUFREQ_CREATE_POLICY (4)
+#define CPUFREQ_REMOVE_POLICY (5)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b950e9d6008b..086549a665e2 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -22,6 +22,14 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_bits(maskp) ((maskp)->bits)
+/**
+ * cpumask_pr_args - printf args to output a cpumask
+ * @maskp: cpumask to be printed
+ *
+ * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
+ */
+#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
+
#if NR_CPUS == 1
#define nr_cpu_ids 1
#else
@@ -539,21 +547,6 @@ static inline void cpumask_copy(struct cpumask *dstp,
#define cpumask_of(cpu) (get_cpu_mask(cpu))
/**
- * cpumask_scnprintf - print a cpumask into a string as comma-separated hex
- * @buf: the buffer to sprintf into
- * @len: the length of the buffer
- * @srcp: the cpumask to print
- *
- * If len is zero, returns zero. Otherwise returns the length of the
- * (nul-terminated) @buf string.
- */
-static inline int cpumask_scnprintf(char *buf, int len,
- const struct cpumask *srcp)
-{
- return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
-}
-
-/**
* cpumask_parse_user - extract a cpumask from a user string
* @buf: the buffer to extract from
* @len: the length of the buffer
@@ -564,7 +557,7 @@ static inline int cpumask_scnprintf(char *buf, int len,
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
- return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
}
/**
@@ -579,23 +572,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
- nr_cpumask_bits);
-}
-
-/**
- * cpulist_scnprintf - print a cpumask into a string as comma-separated list
- * @buf: the buffer to sprintf into
- * @len: the length of the buffer
- * @srcp: the cpumask to print
- *
- * If len is zero, returns zero. Otherwise returns the length of the
- * (nul-terminated) @buf string.
- */
-static inline int cpulist_scnprintf(char *buf, int len,
- const struct cpumask *srcp)
-{
- return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
- nr_cpumask_bits);
+ nr_cpu_ids);
}
/**
@@ -610,7 +587,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
}
/**
@@ -622,7 +599,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
- return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
}
/**
@@ -817,7 +794,7 @@ static inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
- nr_cpumask_bits);
+ nr_cpu_ids);
}
/*
@@ -905,13 +882,13 @@ static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
}
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
@@ -927,21 +904,21 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+ const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+ const cpumask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+ const cpumask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -949,40 +926,40 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
#define cpus_andnot(dst, src1, src2) \
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+ const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
static inline int __cpus_equal(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+ const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
static inline int __cpus_intersects(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+ const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
static inline int __cpus_subset(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+ const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 9c8776d0ada8..fb5ef16d6a12 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -1147,7 +1147,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
* cipher operation completes.
*
* The callback function is registered with the ablkcipher_request handle and
- * must comply with the following template:
+ * must comply with the following template
*
* void callback_function(struct crypto_async_request *req, int error)
*/
@@ -1174,7 +1174,7 @@ static inline void ablkcipher_request_set_callback(
*
* For encryption, the source is treated as the plaintext and the
* destination is the ciphertext. For a decryption operation, the use is
- * reversed: the source is the ciphertext and the destination is the plaintext.
+ * reversed - the source is the ciphertext and the destination is the plaintext.
*/
static inline void ablkcipher_request_set_crypt(
struct ablkcipher_request *req,
@@ -1412,6 +1412,9 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
*/
static inline int crypto_aead_decrypt(struct aead_request *req)
{
+ if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+ return -EINVAL;
+
return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
}
@@ -1506,7 +1509,7 @@ static inline void aead_request_free(struct aead_request *req)
* completes
*
* The callback function is registered with the aead_request handle and
- * must comply with the following template:
+ * must comply with the following template
*
* void callback_function(struct crypto_async_request *req, int error)
*/
@@ -1533,7 +1536,7 @@ static inline void aead_request_set_callback(struct aead_request *req,
*
* For encryption, the source is treated as the plaintext and the
* destination is the ciphertext. For a decryption operation, the use is
- * reversed: the source is the ciphertext and the destination is the plaintext.
+ * reversed - the source is the ciphertext and the destination is the plaintext.
*
* IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
* the caller must concatenate the ciphertext followed by the
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index 2cd9f1cf9fa3..f4754282c9c2 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -1,6 +1,8 @@
#ifndef __CRYPTOHASH_H
#define __CRYPTOHASH_H
+#include <uapi/linux/types.h>
+
#define SHA_DIGEST_WORDS 5
#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
#define SHA_WORKSPACE_WORDS 16
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
new file mode 100644
index 000000000000..602fbbfcfeed
--- /dev/null
+++ b/include/linux/devfreq-event.h
@@ -0,0 +1,196 @@
+/*
+ * devfreq-event: a framework to provide raw data and events of devfreq devices
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_DEVFREQ_EVENT_H__
+#define __LINUX_DEVFREQ_EVENT_H__
+
+#include <linux/device.h>
+
+/**
+ * struct devfreq_event_dev - the devfreq-event device
+ *
+ * @node : Contain the devfreq-event device that have been registered.
+ * @dev : the device registered by devfreq-event class. dev.parent is
+ * the device using devfreq-event.
+ * @lock : a mutex to protect accessing devfreq-event.
+ * @enable_count: the number of enable function have been called.
+ * @desc : the description for devfreq-event device.
+ *
+ * This structure contains devfreq-event device information.
+ */
+struct devfreq_event_dev {
+ struct list_head node;
+
+ struct device dev;
+ struct mutex lock;
+ u32 enable_count;
+
+ const struct devfreq_event_desc *desc;
+};
+
+/**
+ * struct devfreq_event_data - the devfreq-event data
+ *
+ * @load_count : load count of devfreq-event device for the given period.
+ * @total_count : total count of devfreq-event device for the given period.
+ * each count may represent a clock cycle, a time unit
+ * (ns/us/...), or anything the device driver wants.
+ * Generally, utilization is load_count / total_count.
+ *
+ * This structure contains the data of devfreq-event device for polling period.
+ */
+struct devfreq_event_data {
+ unsigned long load_count;
+ unsigned long total_count;
+};
+
+/**
+ * struct devfreq_event_ops - the operations of devfreq-event device
+ *
+ * @enable : Enable the devfreq-event device.
+ * @disable : Disable the devfreq-event device.
+ * @reset : Reset all setting of the devfreq-event device.
+ * @set_event : Set the specific event type for the devfreq-event device.
+ * @get_event : Get the result of the devfreq-event devie with specific
+ * event type.
+ *
+ * This structure contains devfreq-event device operations which can be
+ * implemented by devfreq-event device drivers.
+ */
+struct devfreq_event_ops {
+ /* Optional functions */
+ int (*enable)(struct devfreq_event_dev *edev);
+ int (*disable)(struct devfreq_event_dev *edev);
+ int (*reset)(struct devfreq_event_dev *edev);
+
+ /* Mandatory functions */
+ int (*set_event)(struct devfreq_event_dev *edev);
+ int (*get_event)(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata);
+};
+
+/**
+ * struct devfreq_event_desc - the descriptor of devfreq-event device
+ *
+ * @name : the name of devfreq-event device.
+ * @driver_data : the private data for devfreq-event driver.
+ * @ops : the operation to control devfreq-event device.
+ *
+ * Each devfreq-event device is described with a this structure.
+ * This structure contains the various data for devfreq-event device.
+ */
+struct devfreq_event_desc {
+ const char *name;
+ void *driver_data;
+
+ struct devfreq_event_ops *ops;
+};
+
+#if defined(CONFIG_PM_DEVFREQ_EVENT)
+extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev);
+extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev);
+extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev);
+extern int devfreq_event_set_event(struct devfreq_event_dev *edev);
+extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata);
+extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
+extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+ struct device *dev, int index);
+extern int devfreq_event_get_edev_count(struct device *dev);
+extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc);
+extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
+extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc);
+extern void devm_devfreq_event_remove_edev(struct device *dev,
+ struct devfreq_event_dev *edev);
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return edev->desc->driver_data;
+}
+#else
+static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
+{
+ return false;
+}
+
+static inline int devfreq_event_set_event(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+ struct device *dev, int index)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_event_get_edev_count(struct device *dev)
+{
+ return -EINVAL;
+}
+
+static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline struct devfreq_event_dev *devm_devfreq_event_add_edev(
+ struct device *dev,
+ struct devfreq_event_desc *desc)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void devm_devfreq_event_remove_edev(struct device *dev,
+ struct devfreq_event_dev *edev)
+{
+}
+
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return NULL;
+}
+#endif /* CONFIG_PM_DEVFREQ_EVENT */
+
+#endif /* __LINUX_DEVFREQ_EVENT_H__ */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ca6d2acc5eb7..2646aed1d3fe 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -48,6 +48,11 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
union map_info *map_context);
+typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
+ struct request *rq,
+ union map_info *map_context,
+ struct request **clone);
+typedef void (*dm_release_clone_request_fn) (struct request *clone);
/*
* Returns:
@@ -143,6 +148,8 @@ struct target_type {
dm_dtr_fn dtr;
dm_map_fn map;
dm_map_request_fn map_rq;
+ dm_clone_and_map_request_fn clone_and_map_rq;
+ dm_release_clone_request_fn release_clone_rq;
dm_endio_fn end_io;
dm_request_endio_fn rq_end_io;
dm_presuspend_fn presuspend;
@@ -600,9 +607,6 @@ static inline unsigned long to_bytes(sector_t n)
/*-----------------------------------------------------------------
* Helper for block layer and dm core operations
*---------------------------------------------------------------*/
-void dm_dispatch_request(struct request *rq);
-void dm_requeue_unmapped_request(struct request *rq);
-void dm_kill_unmapped_request(struct request *rq, int error);
int dm_underlying_device_busy(struct request_queue *q);
#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index fb506738f7b7..0eb8ee2dc6d1 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1038,22 +1038,22 @@ extern __printf(3, 4)
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
extern __printf(3, 4)
-int dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...);
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
extern __printf(2, 3)
-int dev_emerg(const struct device *dev, const char *fmt, ...);
+void dev_emerg(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_alert(const struct device *dev, const char *fmt, ...);
+void dev_alert(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_crit(const struct device *dev, const char *fmt, ...);
+void dev_crit(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_err(const struct device *dev, const char *fmt, ...);
+void dev_err(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_warn(const struct device *dev, const char *fmt, ...);
+void dev_warn(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_notice(const struct device *dev, const char *fmt, ...);
+void dev_notice(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int _dev_info(const struct device *dev, const char *fmt, ...);
+void _dev_info(const struct device *dev, const char *fmt, ...);
#else
@@ -1065,35 +1065,35 @@ static inline __printf(3, 4)
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
{ return 0; }
-static inline int __dev_printk(const char *level, const struct device *dev,
- struct va_format *vaf)
-{ return 0; }
+static inline void __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{}
static inline __printf(3, 4)
-int dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...)
-{ return 0; }
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_emerg(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_emerg(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_crit(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_crit(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_alert(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_alert(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_err(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_err(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_warn(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_warn(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_notice(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_notice(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int _dev_info(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void _dev_info(const struct device *dev, const char *fmt, ...)
+{}
#endif
@@ -1119,7 +1119,6 @@ do { \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, format, ##arg); \
- 0; \
})
#endif
@@ -1156,7 +1155,7 @@ do { \
#define dev_info_once(dev, fmt, ...) \
dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
#define dev_dbg_once(dev, fmt, ...) \
- dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+ dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
do { \
@@ -1215,7 +1214,6 @@ do { \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, format, ##arg); \
- 0; \
})
#endif
diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h
index 3713a7232dd8..c0d4d1e2a45c 100644
--- a/include/linux/dqblk_v1.h
+++ b/include/linux/dqblk_v1.h
@@ -5,9 +5,6 @@
#ifndef _LINUX_DQBLK_V1_H
#define _LINUX_DQBLK_V1_H
-/* Root squash turned on */
-#define V1_DQF_RSQUASH 1
-
/* Numbers of blocks needed for updates */
#define V1_INIT_ALLOC 1
#define V1_INIT_REWRITE 1
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0238d612750e..cf7e431cbc73 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -848,7 +848,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
}
static inline char *
-efi_guid_unparse(efi_guid_t *guid, char *out)
+efi_guid_to_str(efi_guid_t *guid, char *out)
{
sprintf(out, "%pUl", guid->b);
return out;
@@ -875,6 +875,8 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern int efi_config_init(efi_config_table_type_t *arch_tables);
+extern int efi_config_parse_tables(void *config_tables, int count, int sz,
+ efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 9a33c5f7e126..7be22da321f3 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -79,6 +79,12 @@ struct enclosure_component_callbacks {
int (*set_locate)(struct enclosure_device *,
struct enclosure_component *,
enum enclosure_component_setting);
+ void (*get_power_status)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_power_status)(struct enclosure_device *,
+ struct enclosure_component *,
+ int);
+ int (*show_id)(struct enclosure_device *, char *buf);
};
@@ -91,7 +97,9 @@ struct enclosure_component {
int fault;
int active;
int locate;
+ int slot;
enum enclosure_status status;
+ int power_status;
};
struct enclosure_device {
@@ -120,8 +128,9 @@ enclosure_register(struct device *, const char *, int,
struct enclosure_component_callbacks *);
void enclosure_unregister(struct enclosure_device *);
struct enclosure_component *
-enclosure_component_register(struct enclosure_device *, unsigned int,
- enum enclosure_component_type, const char *);
+enclosure_component_alloc(struct enclosure_device *, unsigned int,
+ enum enclosure_component_type, const char *);
+int enclosure_component_register(struct enclosure_component *);
int enclosure_add_device(struct enclosure_device *enclosure, int component,
struct device *dev);
int enclosure_remove_device(struct enclosure_device *, struct device *);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 41c891d05f04..1d869d185a0d 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -52,6 +52,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb);
+int eth_gro_complete(struct sk_buff *skb, int nhoff);
+
/* Reserved Ethernet Addresses per IEEE 802.1Q */
static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 41b223a59a63..fa05e04c5531 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
struct dentry;
+struct iattr;
struct inode;
struct super_block;
struct vfsmount;
@@ -180,6 +181,21 @@ struct fid {
* get_name is not (which is possibly inconsistent)
*/
+/* types of block ranges for multipage write mappings. */
+#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
+#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
+#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
+#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
+
+#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
+
+struct iomap {
+ sector_t blkno; /* first sector of mapping */
+ loff_t offset; /* file offset of mapping, bytes */
+ u64 length; /* length of mapping, bytes */
+ int type; /* type of mapping */
+};
+
struct export_operations {
int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
struct inode *parent);
@@ -191,6 +207,13 @@ struct export_operations {
struct dentry *child);
struct dentry * (*get_parent)(struct dentry *child);
int (*commit_metadata)(struct inode *inode);
+
+ int (*get_uuid)(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
+ int (*map_blocks)(struct inode *inode, loff_t offset,
+ u64 len, struct iomap *iomap,
+ bool write, u32 *device_generation);
+ int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
+ int nr_iomaps, struct iattr *iattr);
};
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 87f14e90e984..a23556c32703 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -19,12 +19,16 @@
#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
+#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
+
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -87,6 +91,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
#define CP_COMPACT_SUM_FLAG 0x00000004
@@ -224,6 +229,8 @@ enum {
OFFSET_BIT_SHIFT
};
+#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
+
struct node_footer {
__le32 nid; /* node id */
__le32 ino; /* inode nunmber */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 09bb7a18d287..043f3283b71c 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -726,7 +726,9 @@ extern int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode);
/* drivers/video/modedb.c */
-#define VESA_MODEDB_SIZE 34
+#define VESA_MODEDB_SIZE 43
+#define DMT_SIZE 0x50
+
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
@@ -777,9 +779,17 @@ struct fb_videomode {
u32 flag;
};
+struct dmt_videomode {
+ u32 dmt_id;
+ u32 std_2byte_code;
+ u32 cvt_3byte_code;
+ const struct fb_videomode *mode;
+};
+
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
+extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
struct list_head list;
diff --git a/include/linux/fec.h b/include/linux/fec.h
index bcff455d1d53..1454a503622d 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -19,6 +19,7 @@
struct fec_platform_data {
phy_interface_t phy;
unsigned char mac[ETH_ALEN];
+ void (*sleep_mode_enable)(int enabled);
};
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 42efe13077b6..ed5a0900b94d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -34,6 +34,7 @@
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
+struct backing_dev_info;
struct export_operations;
struct hd_geometry;
struct iovec;
@@ -50,6 +51,7 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
+struct vm_fault;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -360,8 +362,6 @@ struct address_space_operations {
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
- void **, unsigned long *);
/*
* migrate the contents of a page to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -394,14 +394,12 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
-struct backing_dev_info;
struct address_space {
struct inode *host; /* owner: inode, block_device */
struct radix_tree_root page_tree; /* radix tree of all pages */
spinlock_t tree_lock; /* and lock protecting it */
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
struct rb_root i_mmap; /* tree of private and shared mappings */
- struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
@@ -409,7 +407,6 @@ struct address_space {
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
unsigned long flags; /* error bits/gfp mask */
- struct backing_dev_info *backing_dev_info; /* device readahead, etc */
spinlock_t private_lock; /* for use by the address_space */
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
@@ -493,8 +490,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
*/
static inline int mapping_mapped(struct address_space *mapping)
{
- return !RB_EMPTY_ROOT(&mapping->i_mmap) ||
- !list_empty(&mapping->i_mmap_nonlinear);
+ return !RB_EMPTY_ROOT(&mapping->i_mmap);
}
/*
@@ -625,7 +621,7 @@ struct inode {
atomic_t i_readcount; /* struct files open RO */
#endif
const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
- struct file_lock *i_flock;
+ struct file_lock_context *i_flctx;
struct address_space i_data;
struct list_head i_devices;
union {
@@ -875,6 +871,7 @@ static inline struct file *get_file(struct file *f)
#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
+#define FL_LAYOUT 2048 /* outstanding pNFS layout */
/*
* Special return value from posix_lock_file() and vfs_lock_file() for
@@ -885,6 +882,8 @@ static inline struct file *get_file(struct file *f)
/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
+struct file_lock;
+
struct file_lock_operations {
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
@@ -898,7 +897,7 @@ struct lock_manager_operations {
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, int);
bool (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock **, int, struct list_head *);
+ int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
};
@@ -923,17 +922,17 @@ int locks_in_grace(struct net *);
* FIXME: should we create a separate "struct lock_request" to help distinguish
* these two uses?
*
- * The i_flock list is ordered by:
+ * The varous i_flctx lists are ordered by:
*
- * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX
- * 2) lock owner
- * 3) lock range start
- * 4) lock range end
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
*
* Obviously, the last two criteria only matter for POSIX locks.
*/
struct file_lock {
struct file_lock *fl_next; /* singly linked list for this inode */
+ struct list_head fl_list; /* link into file_lock_context */
struct hlist_node fl_link; /* node in global lists */
struct list_head fl_block; /* circular list of blocked processes */
fl_owner_t fl_owner;
@@ -964,6 +963,16 @@ struct file_lock {
} fl_u;
};
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+ int flc_flock_cnt;
+ int flc_posix_cnt;
+ int flc_lease_cnt;
+};
+
/* The following constant reflects the upper bound of the file/locking space */
#ifndef OFFSET_MAX
#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
@@ -990,6 +999,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
extern int fcntl_getlease(struct file *filp);
/* fs/locks.c */
+void locks_free_lock_context(struct file_lock_context *ctx);
void locks_free_lock(struct file_lock *fl);
extern void locks_init_lock(struct file_lock *);
extern struct file_lock * locks_alloc_lock(void);
@@ -1010,7 +1020,7 @@ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int t
extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
-extern int lease_modify(struct file_lock **, int, struct list_head *);
+extern int lease_modify(struct file_lock *, int, struct list_head *);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1047,6 +1057,11 @@ static inline int fcntl_getlease(struct file *filp)
return F_UNLCK;
}
+static inline void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+}
+
static inline void locks_init_lock(struct file_lock *fl)
{
return;
@@ -1137,7 +1152,7 @@ static inline int vfs_setlease(struct file *filp, long arg,
return -EINVAL;
}
-static inline int lease_modify(struct file_lock **before, int arg,
+static inline int lease_modify(struct file_lock *fl, int arg,
struct list_head *dispose)
{
return -EINVAL;
@@ -1184,8 +1199,6 @@ struct mm_struct;
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
-extern struct list_head super_blocks;
-extern spinlock_t sb_lock;
/* Possible states of 'frozen' field */
enum {
@@ -1502,6 +1515,26 @@ struct block_device_operations;
#define HAVE_COMPAT_IOCTL 1
#define HAVE_UNLOCKED_IOCTL 1
+/*
+ * These flags let !MMU mmap() govern direct device mapping vs immediate
+ * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
+ *
+ * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
+ * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
+ * NOMMU_MAP_READ: Can be mapped for reading
+ * NOMMU_MAP_WRITE: Can be mapped for writing
+ * NOMMU_MAP_EXEC: Can be mapped for execution
+ */
+#define NOMMU_MAP_COPY 0x00000001
+#define NOMMU_MAP_DIRECT 0x00000008
+#define NOMMU_MAP_READ VM_MAYREAD
+#define NOMMU_MAP_WRITE VM_MAYWRITE
+#define NOMMU_MAP_EXEC VM_MAYEXEC
+
+#define NOMMU_VMFLAGS \
+ (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
+
+
struct iov_iter;
struct file_operations {
@@ -1536,6 +1569,9 @@ struct file_operations {
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);
+#ifndef CONFIG_MMU
+ unsigned (*mmap_capabilities)(struct file *);
+#endif
};
struct inode_operations {
@@ -1618,8 +1654,10 @@ struct super_operations {
struct dquot **(*get_dquots)(struct inode *);
#endif
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
- long (*nr_cached_objects)(struct super_block *, int);
- long (*free_cached_objects)(struct super_block *, long, int);
+ long (*nr_cached_objects)(struct super_block *,
+ struct shrink_control *);
+ long (*free_cached_objects)(struct super_block *,
+ struct shrink_control *);
};
/*
@@ -1638,6 +1676,11 @@ struct super_operations {
#define S_IMA 1024 /* Inode has an associated IMA struct */
#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
#define S_NOSEC 4096 /* no suid or xattr security attributes */
+#ifdef CONFIG_FS_DAX
+#define S_DAX 8192 /* Direct Access, avoiding the page cache */
+#else
+#define S_DAX 0 /* Make all the DAX code disappear */
+#endif
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1675,6 +1718,7 @@ struct super_operations {
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
+#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
@@ -1959,7 +2003,7 @@ static inline int locks_verify_truncate(struct inode *inode,
struct file *filp,
loff_t size)
{
- if (inode->i_flock && mandatory_lock(inode))
+ if (inode->i_flctx && mandatory_lock(inode))
return locks_mandatory_area(
FLOCK_VERIFY_WRITE, inode, filp,
size < inode->i_size ? size : inode->i_size,
@@ -1973,11 +2017,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
{
/*
* Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking inode->i_flock. Otherwise, we could
- * end up racing with tasks trying to set a new lease on this file.
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
*/
smp_mb();
- if (inode->i_flock)
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode, mode, FL_LEASE);
return 0;
}
@@ -1986,11 +2031,12 @@ static inline int break_deleg(struct inode *inode, unsigned int mode)
{
/*
* Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking inode->i_flock. Otherwise, we could
- * end up racing with tasks trying to set a new lease on this file.
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
*/
smp_mb();
- if (inode->i_flock)
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode, mode, FL_DELEG);
return 0;
}
@@ -2017,6 +2063,16 @@ static inline int break_deleg_wait(struct inode **delegated_inode)
return ret;
}
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode,
+ wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
+ FL_LAYOUT);
+ return 0;
+}
+
#else /* !CONFIG_FILE_LOCKING */
static inline int locks_mandatory_locked(struct file *file)
{
@@ -2072,6 +2128,11 @@ static inline int break_deleg_wait(struct inode **delegated_inode)
return 0;
}
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
#endif /* CONFIG_FILE_LOCKING */
/* fs/open.c */
@@ -2481,8 +2542,6 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
-extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
- unsigned long size, pgoff_t pgoff);
int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
@@ -2527,19 +2586,13 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
-#ifdef CONFIG_FS_XIP
-extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
- loff_t *ppos);
-extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
-extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
- size_t len, loff_t *ppos);
-extern int xip_truncate_page(struct address_space *mapping, loff_t from);
-#else
-static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
-{
- return 0;
-}
-#endif
+ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *,
+ loff_t, get_block_t, dio_iodone_t, int flags);
+int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
+int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
@@ -2696,6 +2749,11 @@ extern int generic_show_options(struct seq_file *m, struct dentry *root);
extern void save_mount_options(struct super_block *sb, char *options);
extern void replace_mount_options(struct super_block *sb, char *options);
+static inline bool io_is_direct(struct file *filp)
+{
+ return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
+}
+
static inline ino_t parent_ino(struct dentry *dentry)
{
ino_t res;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 1c804b057fb1..7ee1774edee5 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
new_dir_mask |= FS_ISDIR;
}
- fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
- fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
+ fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
+ fs_cookie);
+ fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
+ fs_cookie);
if (target)
fsnotify_link_count(target);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 0bebb5c348b8..c674ee8f7fca 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -44,6 +44,10 @@ const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);
+const char *ftrace_print_array_seq(struct trace_seq *p,
+ const void *buf, int buf_len,
+ size_t el_size);
+
struct trace_iterator;
struct trace_event;
@@ -595,7 +599,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
extern void *perf_trace_buf_prepare(int size, unsigned short type,
- struct pt_regs *regs, int *rctxp);
+ struct pt_regs **regs, int *rctxp);
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 55b685719d52..09460d6d6682 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -11,6 +11,10 @@ extern void genl_unlock(void);
extern int lockdep_genl_is_held(void);
#endif
+/* for synchronisation between af_netlink and genetlink */
+extern atomic_t genl_sk_destructing_cnt;
+extern wait_queue_head_t genl_sk_destructing_waitq;
+
/**
* rcu_dereference_genl - rcu_dereference with debug checking
* @p: The pointer to read, prior to dereferencing
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b840e3b2770d..51bd1e72a917 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -334,18 +334,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node);
+ int node, bool hugepage);
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+ alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index fd85cb120ee0..45afc2dee560 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -340,31 +340,32 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
* etc.
*/
#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
+#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
#define __gpiod_get_index(dev, con_id, index, flags, ...) \
__gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
+#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
#define __gpiod_get_optional(dev, con_id, flags, ...) \
__gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
+#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
__gpiod_get_index_optional(dev, con_id, index, flags)
#define gpiod_get_index_optional(varargs...) \
- __gpiod_get_index_optional(varargs, 0)
+ __gpiod_get_index_optional(varargs, GPIOD_ASIS)
#define __devm_gpiod_get(dev, con_id, flags, ...) \
__devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
+#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
__devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
+#define devm_gpiod_get_index(varargs...) \
+ __devm_gpiod_get_index(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
__devm_gpiod_get_optional(dev, con_id, flags)
#define devm_gpiod_get_optional(varargs...) \
- __devm_gpiod_get_optional(varargs, 0)
+ __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
__devm_gpiod_get_index_optional(dev, con_id, index, flags)
#define devm_gpiod_get_index_optional(varargs...) \
- __devm_gpiod_get_index_optional(varargs, 0)
+ __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index cbb5790a35cd..e9744202fa29 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -25,6 +25,7 @@
#define __LINUX_HDMI_H_
#include <linux/types.h>
+#include <linux/device.h>
enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
@@ -52,12 +53,18 @@ enum hdmi_colorspace {
HDMI_COLORSPACE_RGB,
HDMI_COLORSPACE_YUV422,
HDMI_COLORSPACE_YUV444,
+ HDMI_COLORSPACE_YUV420,
+ HDMI_COLORSPACE_RESERVED4,
+ HDMI_COLORSPACE_RESERVED5,
+ HDMI_COLORSPACE_RESERVED6,
+ HDMI_COLORSPACE_IDO_DEFINED,
};
enum hdmi_scan_mode {
HDMI_SCAN_MODE_NONE,
HDMI_SCAN_MODE_OVERSCAN,
HDMI_SCAN_MODE_UNDERSCAN,
+ HDMI_SCAN_MODE_RESERVED,
};
enum hdmi_colorimetry {
@@ -71,6 +78,7 @@ enum hdmi_picture_aspect {
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
+ HDMI_PICTURE_ASPECT_RESERVED,
};
enum hdmi_active_aspect {
@@ -92,12 +100,18 @@ enum hdmi_extended_colorimetry {
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
+
+ /* The following EC values are only defined in CEA-861-F. */
+ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
+ HDMI_EXTENDED_COLORIMETRY_BT2020,
+ HDMI_EXTENDED_COLORIMETRY_RESERVED,
};
enum hdmi_quantization_range {
HDMI_QUANTIZATION_RANGE_DEFAULT,
HDMI_QUANTIZATION_RANGE_LIMITED,
HDMI_QUANTIZATION_RANGE_FULL,
+ HDMI_QUANTIZATION_RANGE_RESERVED,
};
/* non-uniform picture scaling */
@@ -114,7 +128,7 @@ enum hdmi_ycc_quantization_range {
};
enum hdmi_content_type {
- HDMI_CONTENT_TYPE_NONE,
+ HDMI_CONTENT_TYPE_GRAPHICS,
HDMI_CONTENT_TYPE_PHOTO,
HDMI_CONTENT_TYPE_CINEMA,
HDMI_CONTENT_TYPE_GAME,
@@ -194,6 +208,7 @@ enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_MLP,
HDMI_AUDIO_CODING_TYPE_DST,
HDMI_AUDIO_CODING_TYPE_WMA_PRO,
+ HDMI_AUDIO_CODING_TYPE_CXT,
};
enum hdmi_audio_sample_size {
@@ -215,10 +230,25 @@ enum hdmi_audio_sample_frequency {
};
enum hdmi_audio_coding_type_ext {
- HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
+ /* Refer to Audio Coding Type (CT) field in Data Byte 1 */
+ HDMI_AUDIO_CODING_TYPE_EXT_CT,
+
+ /*
+ * The next three CXT values are defined in CEA-861-E only.
+ * They do not exist in older versions, and in CEA-861-F they are
+ * defined as 'Not in use'.
+ */
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
+
+ /* The following CXT values are only defined in CEA-861-F. */
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC,
+ HDMI_AUDIO_CODING_TYPE_EXT_DRA,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
};
struct hdmi_audio_infoframe {
@@ -299,5 +329,8 @@ union hdmi_infoframe {
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+void hdmi_infoframe_log(const char *level, struct device *dev,
+ union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 06c4607744f6..efc7787a41a8 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -574,7 +574,9 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
#define HID_GLOBAL_STACK_SIZE 4
#define HID_COLLECTION_STACK_SIZE 4
-#define HID_SCAN_FLAG_MT_WIN_8 0x00000001
+#define HID_SCAN_FLAG_MT_WIN_8 BIT(0)
+#define HID_SCAN_FLAG_VENDOR_SPECIFIC BIT(1)
+#define HID_SCAN_FLAG_GD_POINTER BIT(2)
struct hid_parser {
struct hid_global global;
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index bb9840fd1e18..464f33814a94 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -250,17 +250,29 @@ void host1x_job_unpin(struct host1x_job *job);
struct host1x_device;
struct host1x_driver {
+ struct device_driver driver;
+
const struct of_device_id *subdevs;
struct list_head list;
- const char *name;
int (*probe)(struct host1x_device *device);
int (*remove)(struct host1x_device *device);
+ void (*shutdown)(struct host1x_device *device);
};
-int host1x_driver_register(struct host1x_driver *driver);
+static inline struct host1x_driver *
+to_host1x_driver(struct device_driver *driver)
+{
+ return container_of(driver, struct host1x_driver, driver);
+}
+
+int host1x_driver_register_full(struct host1x_driver *driver,
+ struct module *owner);
void host1x_driver_unregister(struct host1x_driver *driver);
+#define host1x_driver_register(driver) \
+ host1x_driver_register_full(driver, THIS_MODULE)
+
struct host1x_device {
struct host1x_driver *driver;
struct list_head list;
@@ -272,6 +284,8 @@ struct host1x_device {
struct mutex clients_lock;
struct list_head clients;
+
+ bool registered;
};
static inline struct host1x_device *to_host1x_device(struct device *dev)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index a036d058a249..05f6df1fdf5b 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -170,6 +170,7 @@ enum hrtimer_base_type {
* @clock_was_set: Indicates that clock was set from irq context.
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
* @hang_detected: The last hrtimer interrupt detected a hang
* @nr_events: Total number of hrtimer interrupt events
@@ -185,6 +186,7 @@ struct hrtimer_cpu_base {
unsigned int clock_was_set;
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
+ int in_hrtirq;
int hres_active;
int hang_detected;
unsigned long nr_events;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ad9051bab267..f10b20f05159 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -157,6 +157,13 @@ static inline int hpage_nr_pages(struct page *page)
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp);
+extern struct page *huge_zero_page;
+
+static inline bool is_huge_zero_page(struct page *page)
+{
+ return ACCESS_ONCE(huge_zero_page) == page;
+}
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -206,6 +213,11 @@ static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_str
return 0;
}
+static inline bool is_huge_zero_page(struct page *page)
+{
+ return false;
+}
+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 431b7fc605c9..7b5785032049 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -86,7 +86,7 @@ void free_huge_page(struct page *page);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
#endif
-extern unsigned long hugepages_treat_as_movable;
+extern int hugepages_treat_as_movable;
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
@@ -99,9 +99,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write);
+ pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write);
+ pud_t *pud, int flags);
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pmd);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -133,8 +133,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
static inline void hugetlb_show_meminfo(void)
{
}
-#define follow_huge_pmd(mm, addr, pmd, write) NULL
-#define follow_huge_pud(mm, addr, pud, write) NULL
+#define follow_huge_pmd(mm, addr, pmd, flags) NULL
+#define follow_huge_pud(mm, addr, pud, flags) NULL
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define pud_huge(x) 0
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 914bb08cd738..eb7b414d232b 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -12,8 +12,10 @@
#ifndef LINUX_HWRANDOM_H_
#define LINUX_HWRANDOM_H_
+#include <linux/completion.h>
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/kref.h>
/**
* struct hwrng - Hardware Random Number Generator driver
@@ -44,6 +46,8 @@ struct hwrng {
/* internal. */
struct list_head list;
+ struct kref ref;
+ struct completion cleanup_done;
};
/** Register a new Hardware Random Number Generator driver. */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 476c685ca6f9..5a2ba674795e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -57,6 +57,18 @@ struct hv_multipage_buffer {
u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
};
+/*
+ * Multiple-page buffer array; the pfn array is variable size:
+ * The number of entries in the PFN array is determined by
+ * "len" and "offset".
+ */
+struct hv_mpb_array {
+ /* Length and Offset determines the # of pfns in the array */
+ u32 len;
+ u32 offset;
+ u64 pfn_array[];
+};
+
/* 0x18 includes the proprietary packet header */
#define MAX_PAGE_BUFFER_PACKET (0x18 + \
(sizeof(struct hv_page_buffer) * \
@@ -722,7 +734,12 @@ struct vmbus_channel {
*/
void (*sc_creation_callback)(struct vmbus_channel *new_sc);
- spinlock_t sc_lock;
+ /*
+ * The spinlock to protect the structure. It is being used to protect
+ * test-and-set access to various attributes of the structure as well
+ * as all sc_list operations.
+ */
+ spinlock_t lock;
/*
* All Sub-channels of a primary channel are linked here.
*/
@@ -814,6 +831,18 @@ struct vmbus_channel_packet_multipage_buffer {
struct hv_multipage_buffer range;
} __packed;
+/* The format must be the same as struct vmdata_gpa_direct */
+struct vmbus_packet_mpb_array {
+ u16 type;
+ u16 dataoffset8;
+ u16 length8;
+ u16 flags;
+ u64 transactionid;
+ u32 reserved;
+ u32 rangecount; /* Always 1 in this case */
+ struct hv_mpb_array range;
+} __packed;
+
extern int vmbus_open(struct vmbus_channel *channel,
u32 send_ringbuffersize,
@@ -845,6 +874,13 @@ extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 bufferlen,
u64 requestid);
+extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+ struct vmbus_packet_mpb_array *mpb,
+ u32 desc_size,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid);
+
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e3a1721c8354..7c7695940ddd 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -228,7 +228,9 @@ struct i2c_client {
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
i2c_slave_cb_t slave_cb; /* callback for slave mode */
+#endif
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
/* I2C slave support */
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
I2C_SLAVE_REQ_READ_START,
I2C_SLAVE_REQ_READ_END,
@@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
{
return client->slave_cb(client, event, val);
}
+#endif
/**
* struct i2c_board_info - template for device creation
@@ -404,8 +408,10 @@ struct i2c_algorithm {
/* To determine what the adapter supports */
u32 (*functionality) (struct i2c_adapter *);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
int (*reg_slave)(struct i2c_client *client);
int (*unreg_slave)(struct i2c_client *client);
+#endif
};
/**
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 4f4eea8a6288..b9c7897dc566 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1017,6 +1017,15 @@ struct ieee80211_mmie {
u8 mic[8];
} __packed;
+/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */
+struct ieee80211_mmie_16 {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[16];
+} __packed;
+
struct ieee80211_vendor_ie {
u8 element_id;
u8 len;
@@ -1994,9 +2003,15 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
WLAN_KEY_LEN_WEP104 = 13,
WLAN_KEY_LEN_CCMP = 16,
+ WLAN_KEY_LEN_CCMP_256 = 32,
WLAN_KEY_LEN_TKIP = 32,
WLAN_KEY_LEN_AES_CMAC = 16,
WLAN_KEY_LEN_SMS4 = 32,
+ WLAN_KEY_LEN_GCMP = 16,
+ WLAN_KEY_LEN_GCMP_256 = 32,
+ WLAN_KEY_LEN_BIP_CMAC_256 = 32,
+ WLAN_KEY_LEN_BIP_GMAC_128 = 16,
+ WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
#define IEEE80211_WEP_IV_LEN 4
@@ -2004,9 +2019,16 @@ enum ieee80211_key_len {
#define IEEE80211_CCMP_HDR_LEN 8
#define IEEE80211_CCMP_MIC_LEN 8
#define IEEE80211_CCMP_PN_LEN 6
+#define IEEE80211_CCMP_256_HDR_LEN 8
+#define IEEE80211_CCMP_256_MIC_LEN 16
+#define IEEE80211_CCMP_256_PN_LEN 6
#define IEEE80211_TKIP_IV_LEN 8
#define IEEE80211_TKIP_ICV_LEN 4
#define IEEE80211_CMAC_PN_LEN 6
+#define IEEE80211_GMAC_PN_LEN 6
+#define IEEE80211_GCMP_HDR_LEN 8
+#define IEEE80211_GCMP_MIC_LEN 16
+#define IEEE80211_GCMP_PN_LEN 6
/* Public action codes */
enum ieee80211_pub_actioncode {
@@ -2230,6 +2252,11 @@ enum ieee80211_sa_query_action {
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
+#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
+#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
#define WLAN_CIPHER_SUITE_SMS4 0x00147201
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0a8ce762a47f..a57bca2ea97e 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -50,24 +50,6 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use
typedef int br_should_route_hook_t(struct sk_buff *skb);
extern br_should_route_hook_t __rcu *br_should_route_hook;
-#if IS_ENABLED(CONFIG_BRIDGE)
-int br_fdb_external_learn_add(struct net_device *dev,
- const unsigned char *addr, u16 vid);
-int br_fdb_external_learn_del(struct net_device *dev,
- const unsigned char *addr, u16 vid);
-#else
-static inline int br_fdb_external_learn_add(struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- return 0;
-}
-static inline int br_fdb_external_learn_del(struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- return 0;
-}
-#endif
-
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 515a35e2a48a..b11b28a30b9e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -78,9 +78,9 @@ static inline bool is_vlan_dev(struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN;
}
-#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -376,7 +376,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
{
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
+ skb_vlan_tag_get(skb));
if (likely(skb))
skb->vlan_tci = 0;
return skb;
@@ -393,7 +393,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
*/
static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
{
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
@@ -442,8 +442,8 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
- if (vlan_tx_tag_present(skb)) {
- *vlan_tci = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ *vlan_tci = skb_vlan_tag_get(skb);
return 0;
} else {
*vlan_tci = 0;
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
/**
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
+ * @type: first vlan protocol
+ * @depth: buffer to store length of eth and vlan tags in bytes
*
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+ int *depth)
{
- __be16 protocol = 0;
-
- if (vlan_tx_tag_present(skb) ||
- skb->protocol != cpu_to_be16(ETH_P_8021Q))
- protocol = skb->protocol;
- else {
- __be16 proto, *protop;
- protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
- h_vlan_encapsulated_proto),
- sizeof(proto), &proto);
- if (likely(protop))
- protocol = *protop;
+ unsigned int vlan_depth = skb->mac_len;
+
+ /* if type is 802.1Q/AD then the header should already be
+ * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
+ * ETH_HLEN otherwise
+ */
+ if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+ if (vlan_depth) {
+ if (WARN_ON(vlan_depth < VLAN_HLEN))
+ return 0;
+ vlan_depth -= VLAN_HLEN;
+ } else {
+ vlan_depth = ETH_HLEN;
+ }
+ do {
+ struct vlan_hdr *vh;
+
+ if (unlikely(!pskb_may_pull(skb,
+ vlan_depth + VLAN_HLEN)))
+ return 0;
+
+ vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+ type = vh->h_vlan_encapsulated_proto;
+ vlan_depth += VLAN_HLEN;
+ } while (type == htons(ETH_P_8021Q) ||
+ type == htons(ETH_P_8021AD));
}
- return protocol;
+ if (depth)
+ *depth = vlan_depth;
+
+ return type;
+}
+
+/**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+{
+ return __vlan_get_protocol(skb, skb->protocol, NULL);
}
static inline void vlan_set_encap_proto(struct sk_buff *skb,
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 519392763393..b65850a41127 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -25,9 +25,7 @@ struct iio_buffer;
* available.
* @request_update: if a parameter change has been marked, update underlying
* storage.
- * @get_bytes_per_datum:get current bytes per datum
* @set_bytes_per_datum:set number of bytes per datum
- * @get_length: get number of datums in buffer
* @set_length: set number of datums in buffer
* @release: called when the last reference to the buffer is dropped,
* should free all resources allocated by the buffer.
@@ -49,9 +47,7 @@ struct iio_buffer_access_funcs {
int (*request_update)(struct iio_buffer *buffer);
- int (*get_bytes_per_datum)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
- int (*get_length)(struct iio_buffer *buffer);
int (*set_length)(struct iio_buffer *buffer, int length);
void (*release)(struct iio_buffer *buffer);
@@ -85,10 +81,11 @@ struct iio_buffer {
bool scan_timestamp;
const struct iio_buffer_access_funcs *access;
struct list_head scan_el_dev_attr_list;
+ struct attribute_group buffer_group;
struct attribute_group scan_el_group;
wait_queue_head_t pollq;
bool stufftoread;
- const struct attribute_group *attrs;
+ const struct attribute **attrs;
struct list_head demux_list;
void *demux_bounce;
struct list_head buffer_list;
@@ -117,15 +114,6 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit);
/**
- * iio_scan_mask_set() - set particular bit in the scan mask
- * @indio_dev IIO device structure
- * @buffer: the buffer whose scan mask we are interested in
- * @bit: the bit to be set.
- **/
-int iio_scan_mask_set(struct iio_dev *indio_dev,
- struct iio_buffer *buffer, int bit);
-
-/**
* iio_push_to_buffers() - push to a registered buffer.
* @indio_dev: iio_dev structure for device.
* @data: Full scan.
@@ -159,56 +147,6 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
int iio_update_demux(struct iio_dev *indio_dev);
-/**
- * iio_buffer_register() - register the buffer with IIO core
- * @indio_dev: device with the buffer to be registered
- * @channels: the channel descriptions used to construct buffer
- * @num_channels: the number of channels
- **/
-int iio_buffer_register(struct iio_dev *indio_dev,
- const struct iio_chan_spec *channels,
- int num_channels);
-
-/**
- * iio_buffer_unregister() - unregister the buffer from IIO core
- * @indio_dev: the device with the buffer to be unregistered
- **/
-void iio_buffer_unregister(struct iio_dev *indio_dev);
-
-/**
- * iio_buffer_read_length() - attr func to get number of datums in the buffer
- **/
-ssize_t iio_buffer_read_length(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-/**
- * iio_buffer_write_length() - attr func to set number of datums in the buffer
- **/
-ssize_t iio_buffer_write_length(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len);
-/**
- * iio_buffer_store_enable() - attr to turn the buffer on
- **/
-ssize_t iio_buffer_store_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len);
-/**
- * iio_buffer_show_enable() - attr to see if the buffer is on
- **/
-ssize_t iio_buffer_show_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
- iio_buffer_read_length, \
- iio_buffer_write_length)
-
-#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
- iio_buffer_show_enable, \
- iio_buffer_store_enable)
-
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
@@ -232,16 +170,6 @@ static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
#else /* CONFIG_IIO_BUFFER */
-static inline int iio_buffer_register(struct iio_dev *indio_dev,
- const struct iio_chan_spec *channels,
- int num_channels)
-{
- return 0;
-}
-
-static inline void iio_buffer_unregister(struct iio_dev *indio_dev)
-{}
-
static inline void iio_buffer_get(struct iio_buffer *buffer) {}
static inline void iio_buffer_put(struct iio_buffer *buffer) {}
diff --git a/include/linux/iio/common/ssp_sensors.h b/include/linux/iio/common/ssp_sensors.h
new file mode 100644
index 000000000000..f4d1b0edb432
--- /dev/null
+++ b/include/linux/iio/common/ssp_sensors.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _SSP_SENSORS_H_
+#define _SSP_SENSORS_H_
+
+#include <linux/iio/iio.h>
+
+#define SSP_TIME_SIZE 4
+#define SSP_ACCELEROMETER_SIZE 6
+#define SSP_GYROSCOPE_SIZE 6
+#define SSP_BIO_HRM_RAW_SIZE 8
+#define SSP_BIO_HRM_RAW_FAC_SIZE 36
+#define SSP_BIO_HRM_LIB_SIZE 8
+
+/**
+ * enum ssp_sensor_type - SSP sensor type
+ */
+enum ssp_sensor_type {
+ SSP_ACCELEROMETER_SENSOR = 0,
+ SSP_GYROSCOPE_SENSOR,
+ SSP_GEOMAGNETIC_UNCALIB_SENSOR,
+ SSP_GEOMAGNETIC_RAW,
+ SSP_GEOMAGNETIC_SENSOR,
+ SSP_PRESSURE_SENSOR,
+ SSP_GESTURE_SENSOR,
+ SSP_PROXIMITY_SENSOR,
+ SSP_TEMPERATURE_HUMIDITY_SENSOR,
+ SSP_LIGHT_SENSOR,
+ SSP_PROXIMITY_RAW,
+ SSP_ORIENTATION_SENSOR,
+ SSP_STEP_DETECTOR,
+ SSP_SIG_MOTION_SENSOR,
+ SSP_GYRO_UNCALIB_SENSOR,
+ SSP_GAME_ROTATION_VECTOR,
+ SSP_ROTATION_VECTOR,
+ SSP_STEP_COUNTER,
+ SSP_BIO_HRM_RAW,
+ SSP_BIO_HRM_RAW_FAC,
+ SSP_BIO_HRM_LIB,
+ SSP_SENSOR_MAX,
+};
+
+struct ssp_data;
+
+/**
+ * struct ssp_sensor_data - Sensor object
+ * @process_data: Callback to feed sensor data.
+ * @type: Used sensor type.
+ * @buffer: Received data buffer.
+ */
+struct ssp_sensor_data {
+ int (*process_data)(struct iio_dev *indio_dev, void *buf,
+ int64_t timestamp);
+ enum ssp_sensor_type type;
+ u8 *buffer;
+};
+
+void ssp_register_consumer(struct iio_dev *indio_dev,
+ enum ssp_sensor_type type);
+
+int ssp_enable_sensor(struct ssp_data *data, enum ssp_sensor_type type,
+ u32 delay);
+
+int ssp_disable_sensor(struct ssp_data *data, enum ssp_sensor_type type);
+
+u32 ssp_get_sensor_delay(struct ssp_data *data, enum ssp_sensor_type);
+
+int ssp_change_delay(struct ssp_data *data, enum ssp_sensor_type type,
+ u32 delay);
+#endif /* _SSP_SENSORS_H_ */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 651f9a0e2765..26fb8f6342bb 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -151,6 +151,16 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_write_channel_raw() - write to a given channel
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ *
+ * Note raw writes to iio channels are in dac counts and hence
+ * scale will need to be applied if standard units required.
+ */
+int iio_write_channel_raw(struct iio_channel *chan, int val);
+
+/**
* iio_get_channel_type() - get the type of a channel
* @channel: The channel being queried.
* @type: The type of the channel.
@@ -191,7 +201,7 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val,
* The scale factor allows to increase the precession of the returned value. For
* a scale factor of 1 the function will return the result in the normal IIO
* unit for the channel type. E.g. millivolt for voltage channels, if you want
- * nanovolts instead pass 1000 as the scale factor.
+ * nanovolts instead pass 1000000 as the scale factor.
*/
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
int *processed, unsigned int scale);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 3642ce7ef512..80d855061064 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -38,6 +38,11 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_HARDWAREGAIN,
IIO_CHAN_INFO_HYSTERESIS,
IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
};
enum iio_shared_by {
@@ -284,10 +289,11 @@ static inline s64 iio_get_time_ns(void)
/* Device operating modes */
#define INDIO_DIRECT_MODE 0x01
#define INDIO_BUFFER_TRIGGERED 0x02
+#define INDIO_BUFFER_SOFTWARE 0x04
#define INDIO_BUFFER_HARDWARE 0x08
#define INDIO_ALL_BUFFER_MODES \
- (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE)
+ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
#define INDIO_MAX_RAW_ELEMENTS 4
@@ -591,7 +597,8 @@ void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
{
return indio_dev->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
+ & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
+ INDIO_BUFFER_SOFTWARE);
}
/**
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 25eeac762e84..1683bc710d14 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -5,7 +5,10 @@
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
-struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
+struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
+struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
+void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
+
#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 4a2af8adf874..580ed5bdb3fa 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -30,6 +30,11 @@ enum iio_chan_type {
IIO_CCT,
IIO_PRESSURE,
IIO_HUMIDITYRELATIVE,
+ IIO_ACTIVITY,
+ IIO_STEPS,
+ IIO_ENERGY,
+ IIO_DISTANCE,
+ IIO_VELOCITY,
};
enum iio_modifier {
@@ -59,7 +64,12 @@ enum iio_modifier {
IIO_MOD_NORTH_MAGN,
IIO_MOD_NORTH_TRUE,
IIO_MOD_NORTH_MAGN_TILT_COMP,
- IIO_MOD_NORTH_TRUE_TILT_COMP
+ IIO_MOD_NORTH_TRUE_TILT_COMP,
+ IIO_MOD_RUNNING,
+ IIO_MOD_JOGGING,
+ IIO_MOD_WALKING,
+ IIO_MOD_STILL,
+ IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z,
};
enum iio_event_type {
@@ -68,6 +78,7 @@ enum iio_event_type {
IIO_EV_TYPE_ROC,
IIO_EV_TYPE_THRESH_ADAPTIVE,
IIO_EV_TYPE_MAG_ADAPTIVE,
+ IIO_EV_TYPE_CHANGE,
};
enum iio_event_info {
@@ -81,6 +92,7 @@ enum iio_event_direction {
IIO_EV_DIR_EITHER,
IIO_EV_DIR_RISING,
IIO_EV_DIR_FALLING,
+ IIO_EV_DIR_NONE,
};
#define IIO_VAL_INT 1
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 3037fc085e8e..696d22312b31 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -175,6 +175,13 @@ extern struct task_group root_task_group;
# define INIT_NUMA_BALANCING(tsk)
#endif
+#ifdef CONFIG_KASAN
+# define INIT_KASAN(tsk) \
+ .kasan_depth = 1,
+#else
+# define INIT_KASAN(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -193,6 +200,9 @@ extern struct task_group root_task_group;
.nr_cpus_allowed= NR_CPUS, \
.mm = NULL, \
.active_mm = &init_mm, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
.se = { \
.group_node = LIST_HEAD_INIT(tsk.se.group_node), \
}, \
@@ -247,6 +257,7 @@ extern struct task_group root_task_group;
INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
+ INIT_KASAN(tsk) \
}
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index f583ff639776..d7188de4db96 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -119,7 +119,8 @@ struct input_mt_pos {
};
int input_mt_assign_slots(struct input_dev *dev, int *slots,
- const struct input_mt_pos *pos, int num_pos);
+ const struct input_mt_pos *pos, int num_pos,
+ int dmax);
int input_mt_get_slot_by_key(struct input_dev *dev, int key);
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
new file mode 100644
index 000000000000..1c30014ed176
--- /dev/null
+++ b/include/linux/iopoll.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_IOPOLL_H
+#define _LINUX_IOPOLL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+/**
+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ for (;;) { \
+ (val) = op(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ (val) = op(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/**
+ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
+ * be less than ~10us since udelay is used (see
+ * Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ for (;;) { \
+ (val) = op(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ (val) = op(addr); \
+ break; \
+ } \
+ if (delay_us) \
+ udelay(delay_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+
+#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
+
+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
+
+#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
+
+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
+
+#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
+
+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
+
+#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
+
+#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
+
+#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#endif /* _LINUX_IOPOLL_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 19e81d5ccb6d..3920a19d8194 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -16,9 +16,6 @@
#include <linux/rbtree.h>
#include <linux/dma-mapping.h>
-/* IO virtual address start page frame number */
-#define IOVA_START_PFN (1)
-
/* iova structure */
struct iova {
struct rb_node node;
@@ -31,6 +28,8 @@ struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */
+ unsigned long granule; /* pfn granularity for this domain */
+ unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
};
@@ -39,6 +38,39 @@ static inline unsigned long iova_size(struct iova *iova)
return iova->pfn_hi - iova->pfn_lo + 1;
}
+static inline unsigned long iova_shift(struct iova_domain *iovad)
+{
+ return __ffs(iovad->granule);
+}
+
+static inline unsigned long iova_mask(struct iova_domain *iovad)
+{
+ return iovad->granule - 1;
+}
+
+static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
+{
+ return iova & iova_mask(iovad);
+}
+
+static inline size_t iova_align(struct iova_domain *iovad, size_t size)
+{
+ return ALIGN(size, iovad->granule);
+}
+
+static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
+{
+ return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
+}
+
+static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
+{
+ return iova >> iova_shift(iovad);
+}
+
+int iommu_iova_cache_init(void);
+void iommu_iova_cache_destroy(void);
+
struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova);
void free_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -49,7 +81,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
-void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
+void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
+ unsigned long start_pfn, unsigned long pfn_32bit);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c694e7baa621..4d5169f5d7d1 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -52,6 +52,7 @@ struct ipv6_devconf {
__s32 force_tllao;
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
+ __s32 accept_ra_mtu;
void *sysctl;
};
@@ -124,6 +125,12 @@ struct ipv6_mc_socklist;
struct ipv6_ac_socklist;
struct ipv6_fl_socklist;
+struct inet6_cork {
+ struct ipv6_txoptions *opt;
+ u8 hop_limit;
+ u8 tclass;
+};
+
/**
* struct ipv6_pinfo - ipv6 private area
*
@@ -216,11 +223,7 @@ struct ipv6_pinfo {
struct ipv6_txoptions *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
- struct {
- struct ipv6_txoptions *opt;
- u8 hop_limit;
- u8 tclass;
- } cork;
+ struct inet6_cork cork;
};
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1e8b0cf30792..800544bc7bfd 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -33,6 +33,7 @@
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
#define GICD_SEIR 0x0068
+#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
@@ -41,14 +42,37 @@
#define GICD_ICACTIVER 0x0380
#define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
+#define GICD_IGRPMODR 0x0D00
+#define GICD_NSACR 0x0E00
#define GICD_IROUTER 0x6000
+#define GICD_IDREGS 0xFFD0
#define GICD_PIDR2 0xFFE8
+/*
+ * Those registers are actually from GICv2, but the spec demands that they
+ * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3).
+ */
+#define GICD_ITARGETSR 0x0800
+#define GICD_SGIR 0x0F00
+#define GICD_CPENDSGIR 0x0F10
+#define GICD_SPENDSGIR 0x0F20
+
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+/*
+ * In systems with a single security state (what we emulate in KVM)
+ * the meaning of the interrupt group enable bits is slightly different
+ */
+#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
+#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
+
+#define GICD_TYPER_LPIS (1U << 17)
+#define GICD_TYPER_MBIS (1U << 16)
+
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
#define GICD_TYPER_LPIS (1U << 17)
@@ -60,6 +84,8 @@
#define GIC_PIDR2_ARCH_GICv3 0x30
#define GIC_PIDR2_ARCH_GICv4 0x40
+#define GIC_V3_DIST_SIZE 0x10000
+
/*
* Re-Distributor registers, offsets from RD_base
*/
@@ -78,6 +104,7 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
+#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
@@ -104,6 +131,7 @@
/*
* Re-Distributor registers, offsets from SGI_base
*/
+#define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
@@ -112,11 +140,15 @@
#define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_IGRPMODR0 GICD_IGRPMODR
+#define GICR_NSACR GICD_NSACR
#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_LAST (1U << 4)
+#define GIC_V3_REDIST_SIZE 0x20000
+
#define LPI_PROP_GROUP1 (1 << 1)
#define LPI_PROP_ENABLED (1 << 0)
@@ -248,6 +280,18 @@
#define ICC_SRE_EL2_SRE (1 << 0)
#define ICC_SRE_EL2_ENABLE (1 << 3)
+#define ICC_SGI1R_TARGET_LIST_SHIFT 0
+#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT)
+#define ICC_SGI1R_AFFINITY_1_SHIFT 16
+#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_SGI_ID_SHIFT 24
+#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_SHIFT 32
+#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
+#define ICC_SGI1R_AFFINITY_3_SHIFT 48
+#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+
/*
* System register definitions
*/
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index e06b370cfc0d..2e3d1afeb674 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -18,9 +18,7 @@
#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
-void omap2_init_irq(void);
void omap3_init_irq(void);
-void ti81xx_init_irq(void);
int omap_irq_pending(void);
void omap_intc_save_context(void);
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 31229e0be90b..d32615280be9 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -956,15 +956,6 @@ void __log_wait_for_space(journal_t *journal);
extern void __journal_drop_transaction(journal_t *, transaction_t *);
extern int cleanup_journal_tail(journal_t *);
-/* Debugging code only: */
-
-#define jbd_ENOSYS() \
-do { \
- printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
- current->state = TASK_UNINTERRUPTIBLE; \
- schedule(); \
-} while (1)
-
/*
* is_journal_abort
*
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 704b9a599b26..20e7f78041c8 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1251,15 +1251,6 @@ void __jbd2_log_wait_for_space(journal_t *journal);
extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
extern int jbd2_cleanup_journal_tail(journal_t *);
-/* Debugging code only: */
-
-#define jbd_ENOSYS() \
-do { \
- printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
- current->state = TASK_UNINTERRUPTIBLE; \
- schedule(); \
-} while (1)
-
/*
* is_journal_abort
*
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
new file mode 100644
index 000000000000..72ba725ddf9c
--- /dev/null
+++ b/include/linux/kasan.h
@@ -0,0 +1,89 @@
+#ifndef _LINUX_KASAN_H
+#define _LINUX_KASAN_H
+
+#include <linux/types.h>
+
+struct kmem_cache;
+struct page;
+
+#ifdef CONFIG_KASAN
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+#include <asm/kasan.h>
+#include <linux/sched.h>
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ + KASAN_SHADOW_OFFSET;
+}
+
+/* Enable reporting bugs after kasan_disable_current() */
+static inline void kasan_enable_current(void)
+{
+ current->kasan_depth++;
+}
+
+/* Disable reporting bugs for current task */
+static inline void kasan_disable_current(void)
+{
+ current->kasan_depth--;
+}
+
+void kasan_unpoison_shadow(const void *address, size_t size);
+
+void kasan_alloc_pages(struct page *page, unsigned int order);
+void kasan_free_pages(struct page *page, unsigned int order);
+
+void kasan_poison_slab(struct page *page);
+void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
+void kasan_poison_object_data(struct kmem_cache *cache, void *object);
+
+void kasan_kmalloc_large(const void *ptr, size_t size);
+void kasan_kfree_large(const void *ptr);
+void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
+void kasan_krealloc(const void *object, size_t new_size);
+
+void kasan_slab_alloc(struct kmem_cache *s, void *object);
+void kasan_slab_free(struct kmem_cache *s, void *object);
+
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+
+int kasan_module_alloc(void *addr, size_t size);
+void kasan_module_free(void *addr);
+
+#else /* CONFIG_KASAN */
+
+#define MODULE_ALIGN 1
+
+static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
+
+static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
+static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+
+static inline void kasan_poison_slab(struct page *page) {}
+static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+ void *object) {}
+static inline void kasan_poison_object_data(struct kmem_cache *cache,
+ void *object) {}
+
+static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
+static inline void kasan_kfree_large(const void *ptr) {}
+static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size) {}
+static inline void kasan_krealloc(const void *object, size_t new_size) {}
+
+static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
+static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
+
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_module_free(void *addr) {}
+
+#endif /* CONFIG_KASAN */
+
+#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5449d2f4a1ef..d6d630d31ef3 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -176,7 +176,7 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
-# define sched_annotate_sleep() __set_current_state(TASK_RUNNING)
+# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
@@ -471,6 +471,7 @@ extern enum system_states {
#define TAINT_OOT_MODULE 12
#define TAINT_UNSIGNED_MODULE 13
#define TAINT_SOFTLOCKUP 14
+#define TAINT_LIVEPATCH 15
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
@@ -799,9 +800,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
-/* Trap pasters of __FUNCTION__ at compile-time */
-#define __FUNCTION__ (__func__)
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index d4e01b358341..71ecdab1671b 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -43,7 +43,6 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
- KERNFS_STATIC_NAME = 0x0200,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
};
@@ -291,7 +290,6 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
umode_t mode, loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
- bool name_is_static,
struct lock_class_key *key);
struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
@@ -369,8 +367,7 @@ kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
static inline struct kernfs_node *
__kernfs_create_file(struct kernfs_node *parent, const char *name,
umode_t mode, loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns, bool name_is_static,
- struct lock_class_key *key)
+ void *priv, const void *ns, struct lock_class_key *key)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
@@ -439,7 +436,7 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
key = (struct lock_class_key *)&ops->lockdep_key;
#endif
return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
- false, key);
+ key);
}
static inline struct kernfs_node *
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 5297f9fa0ef2..1ab54754a86d 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -308,7 +308,8 @@ struct optimized_kprobe {
/* Architecture dependent functions for direct jump optimization */
extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
-extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op);
+extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
+ struct kprobe *orig);
extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
extern void arch_optimize_kprobes(struct list_head *oplist);
extern void arch_unoptimize_kprobes(struct list_head *oplist,
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index c9d645ad98ff..5fc3d1083071 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -166,7 +166,17 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
}
#if BITS_PER_LONG < 64
-extern u64 ktime_divns(const ktime_t kt, s64 div);
+extern u64 __ktime_divns(const ktime_t kt, s64 div);
+static inline u64 ktime_divns(const ktime_t kt, s64 div)
+{
+ if (__builtin_constant_p(div) && !(div >> 32)) {
+ u64 ns = kt.tv64;
+ do_div(ns, div);
+ return ns;
+ } else {
+ return __ktime_divns(kt, div);
+ }
+}
#else /* BITS_PER_LONG < 64 */
# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
#endif
@@ -186,6 +196,11 @@ static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
return ktime_to_us(ktime_sub(later, earlier));
}
+static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
+{
+ return ktime_to_ms(ktime_sub(later, earlier));
+}
+
static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
{
return ktime_add_ns(kt, usec * NSEC_PER_USEC);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 26f106022c88..d12b2104d19b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -33,10 +33,6 @@
#include <asm/kvm_host.h>
-#ifndef KVM_MMIO_SIZE
-#define KVM_MMIO_SIZE 8
-#endif
-
/*
* The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
* in kvm, other bits are visible for userspace which are defined in
@@ -200,17 +196,6 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
-/*
- * Carry out a gup that requires IO. Allow the mm to relinquish the mmap
- * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL
- * controls whether we retry the gup one more time to completion in that case.
- * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp
- * handler.
- */
-int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, bool write_fault,
- struct page **pagep);
-
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
@@ -611,6 +596,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty);
+
+int kvm_get_dirty_log_protect(struct kvm *kvm,
+ struct kvm_dirty_log *log, bool *is_dirty);
+
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset,
+ unsigned long mask);
+
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log);
@@ -652,7 +646,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_hardware_enable(void);
@@ -1042,6 +1036,8 @@ void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_xics_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
new file mode 100644
index 000000000000..5ba2facd7a51
--- /dev/null
+++ b/include/linux/led-class-flash.h
@@ -0,0 +1,207 @@
+/*
+ * LED Flash class interface
+ *
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_FLASH_LEDS_H_INCLUDED
+#define __LINUX_FLASH_LEDS_H_INCLUDED
+
+#include <linux/leds.h>
+#include <uapi/linux/v4l2-controls.h>
+
+struct device_node;
+struct led_classdev_flash;
+
+/*
+ * Supported led fault bits - must be kept in synch
+ * with V4L2_FLASH_FAULT bits.
+ */
+#define LED_FAULT_OVER_VOLTAGE (1 << 0)
+#define LED_FAULT_TIMEOUT (1 << 1)
+#define LED_FAULT_OVER_TEMPERATURE (1 << 2)
+#define LED_FAULT_SHORT_CIRCUIT (1 << 3)
+#define LED_FAULT_OVER_CURRENT (1 << 4)
+#define LED_FAULT_INDICATOR (1 << 5)
+#define LED_FAULT_UNDER_VOLTAGE (1 << 6)
+#define LED_FAULT_INPUT_VOLTAGE (1 << 7)
+#define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8)
+#define LED_NUM_FLASH_FAULTS 9
+
+#define LED_FLASH_MAX_SYSFS_GROUPS 7
+
+struct led_flash_ops {
+ /* set flash brightness */
+ int (*flash_brightness_set)(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
+ /* get flash brightness */
+ int (*flash_brightness_get)(struct led_classdev_flash *fled_cdev,
+ u32 *brightness);
+ /* set flash strobe state */
+ int (*strobe_set)(struct led_classdev_flash *fled_cdev, bool state);
+ /* get flash strobe state */
+ int (*strobe_get)(struct led_classdev_flash *fled_cdev, bool *state);
+ /* set flash timeout */
+ int (*timeout_set)(struct led_classdev_flash *fled_cdev, u32 timeout);
+ /* get the flash LED fault */
+ int (*fault_get)(struct led_classdev_flash *fled_cdev, u32 *fault);
+};
+
+/*
+ * Current value of a flash setting along
+ * with its constraints.
+ */
+struct led_flash_setting {
+ /* maximum allowed value */
+ u32 min;
+ /* maximum allowed value */
+ u32 max;
+ /* step value */
+ u32 step;
+ /* current value */
+ u32 val;
+};
+
+struct led_classdev_flash {
+ /* led class device */
+ struct led_classdev led_cdev;
+
+ /* flash led specific ops */
+ const struct led_flash_ops *ops;
+
+ /* flash brightness value in microamperes along with its constraints */
+ struct led_flash_setting brightness;
+
+ /* flash timeout value in microseconds along with its constraints */
+ struct led_flash_setting timeout;
+
+ /* LED Flash class sysfs groups */
+ const struct attribute_group *sysfs_groups[LED_FLASH_MAX_SYSFS_GROUPS];
+
+ /* LEDs available for flash strobe synchronization */
+ struct led_classdev_flash **sync_leds;
+
+ /* Number of LEDs available for flash strobe synchronization */
+ int num_sync_leds;
+
+ /*
+ * The identifier of the sub-led to synchronize the flash strobe with.
+ * Identifiers start from 1, which reflects the first element from the
+ * sync_leds array. 0 means that the flash strobe should not be
+ * synchronized.
+ */
+ u32 sync_led_id;
+};
+
+static inline struct led_classdev_flash *lcdev_to_flcdev(
+ struct led_classdev *lcdev)
+{
+ return container_of(lcdev, struct led_classdev_flash, led_cdev);
+}
+
+/**
+ * led_classdev_flash_register - register a new object of led_classdev class
+ * with support for flash LEDs
+ * @parent: the flash LED to register
+ * @fled_cdev: the led_classdev_flash structure for this device
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_classdev_flash_unregister - unregisters an object of led_classdev class
+ * with support for flash LEDs
+ * @fled_cdev: the flash LED to unregister
+ *
+ * Unregister a previously registered via led_classdev_flash_register object
+ */
+extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_set_flash_strobe - setup flash strobe
+ * @fled_cdev: the flash LED to set strobe on
+ * @state: 1 - strobe flash, 0 - stop flash strobe
+ *
+ * Strobe the flash LED.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
+ bool state)
+{
+ return fled_cdev->ops->strobe_set(fled_cdev, state);
+}
+
+/**
+ * led_get_flash_strobe - get flash strobe status
+ * @fled_cdev: the flash LED to query
+ * @state: 1 - flash is strobing, 0 - flash is off
+ *
+ * Check whether the flash is strobing at the moment.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
+ bool *state)
+{
+ if (fled_cdev->ops->strobe_get)
+ return fled_cdev->ops->strobe_get(fled_cdev, state);
+
+ return -EINVAL;
+}
+
+/**
+ * led_set_flash_brightness - set flash LED brightness
+ * @fled_cdev: the flash LED to set
+ * @brightness: the brightness to set it to
+ *
+ * Set a flash LED's brightness.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
+
+/**
+ * led_update_flash_brightness - update flash LED brightness
+ * @fled_cdev: the flash LED to query
+ *
+ * Get a flash LED's current brightness and update led_flash->brightness
+ * member with the obtained value.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_set_flash_timeout - set flash LED timeout
+ * @fled_cdev: the flash LED to set
+ * @timeout: the flash timeout to set it to
+ *
+ * Set the flash strobe duration.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
+ u32 timeout);
+
+/**
+ * led_get_flash_fault - get the flash LED fault
+ * @fled_cdev: the flash LED to query
+ * @fault: bitmask containing flash faults
+ *
+ * Get the flash LED fault.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev,
+ u32 *fault);
+
+#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index cfceef32c9b3..f70f84f35674 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -46,6 +46,8 @@ struct led_classdev {
#define LED_SYSFS_DISABLE (1 << 20)
#define SET_BRIGHTNESS_ASYNC (1 << 21)
#define SET_BRIGHTNESS_SYNC (1 << 22)
+#define LED_DEV_CAP_FLASH (1 << 23)
+#define LED_DEV_CAP_SYNC_STROBE (1 << 24)
/* Set LED brightness level */
/* Must not sleep, use a workqueue if needed */
@@ -81,6 +83,7 @@ struct led_classdev {
unsigned long blink_delay_on, blink_delay_off;
struct timer_list blink_timer;
int blink_brightness;
+ void (*flash_resume)(struct led_classdev *led_cdev);
struct work_struct set_brightness_work;
int delayed_set_value;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2d182413b1db..fc03efa64ffe 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -231,6 +231,7 @@ enum {
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
* led */
ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
+ ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
@@ -422,6 +423,7 @@ enum {
ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
+ ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -821,10 +823,10 @@ struct ata_port {
unsigned int cbl; /* cable type; ATA_CBL_xxx */
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
- unsigned long qc_allocated;
+ unsigned long sas_tag_allocated; /* for sas tag allocation only */
unsigned int qc_active;
int nr_active_links; /* #links with active qcs */
- unsigned int last_tag; /* track next tag hw expects */
+ unsigned int sas_last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -1338,12 +1340,19 @@ extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
extern struct device_attribute *ata_common_sdev_attrs[];
+/*
+ * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
+ * by the edge drivers. Because the 'module' field of sht must be the
+ * edge driver's module reference, otherwise the driver can be unloaded
+ * even if the scsi_device is being accessed.
+ */
#define ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
.name = drv_name, \
.ioctl = ata_scsi_ioctl, \
.queuecommand = ata_scsi_queuecmd, \
.can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
.cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
.emulated = ATA_SHT_EMULATED, \
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index f3434533fbf8..2a6b9947aaa3 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -9,6 +9,9 @@
#include <linux/list.h>
#include <linux/nodemask.h>
+#include <linux/shrinker.h>
+
+struct mem_cgroup;
/* list_lru_walk_cb has to always return one of those */
enum lru_status {
@@ -21,24 +24,45 @@ enum lru_status {
internally, but has to return locked. */
};
-struct list_lru_node {
- spinlock_t lock;
+struct list_lru_one {
struct list_head list;
- /* kept as signed so we can catch imbalance bugs */
+ /* may become negative during memcg reparenting */
long nr_items;
+};
+
+struct list_lru_memcg {
+ /* array of per cgroup lists, indexed by memcg_cache_id */
+ struct list_lru_one *lru[0];
+};
+
+struct list_lru_node {
+ /* protects all lists on the node, including per cgroup */
+ spinlock_t lock;
+ /* global list, used for the root cgroup in cgroup aware lrus */
+ struct list_lru_one lru;
+#ifdef CONFIG_MEMCG_KMEM
+ /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
+ struct list_lru_memcg *memcg_lrus;
+#endif
} ____cacheline_aligned_in_smp;
struct list_lru {
struct list_lru_node *node;
- nodemask_t active_nodes;
+#ifdef CONFIG_MEMCG_KMEM
+ struct list_head list;
+#endif
};
void list_lru_destroy(struct list_lru *lru);
-int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key);
-static inline int list_lru_init(struct list_lru *lru)
-{
- return list_lru_init_key(lru, NULL);
-}
+int __list_lru_init(struct list_lru *lru, bool memcg_aware,
+ struct lock_class_key *key);
+
+#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
+#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
+#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
+
+int memcg_update_all_list_lrus(int num_memcgs);
+void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
/**
* list_lru_add: add an element to the lru list's tail
@@ -72,32 +96,48 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item);
bool list_lru_del(struct list_lru *lru, struct list_head *item);
/**
- * list_lru_count_node: return the number of objects currently held by @lru
+ * list_lru_count_one: return the number of objects currently held by @lru
* @lru: the lru pointer.
* @nid: the node id to count from.
+ * @memcg: the cgroup to count from.
*
* Always return a non-negative number, 0 for empty lists. There is no
* guarantee that the list is not updated while the count is being computed.
* Callers that want such a guarantee need to provide an outer lock.
*/
+unsigned long list_lru_count_one(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg);
unsigned long list_lru_count_node(struct list_lru *lru, int nid);
+
+static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
+ struct shrink_control *sc)
+{
+ return list_lru_count_one(lru, sc->nid, sc->memcg);
+}
+
static inline unsigned long list_lru_count(struct list_lru *lru)
{
long count = 0;
int nid;
- for_each_node_mask(nid, lru->active_nodes)
+ for_each_node_state(nid, N_NORMAL_MEMORY)
count += list_lru_count_node(lru, nid);
return count;
}
-typedef enum lru_status
-(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
+void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
+void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
+ struct list_head *head);
+
+typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
+ struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+
/**
- * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
+ * @memcg: the cgroup to scan from.
* @isolate: callback function that is resposible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
@@ -115,18 +155,30 @@ typedef enum lru_status
*
* Return value: the number of objects effectively removed from the LRU.
*/
+unsigned long list_lru_walk_one(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
static inline unsigned long
+list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
+ list_lru_walk_cb isolate, void *cb_arg)
+{
+ return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
+ &sc->nr_to_scan);
+}
+
+static inline unsigned long
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
void *cb_arg, unsigned long nr_to_walk)
{
long isolated = 0;
int nid;
- for_each_node_mask(nid, lru->active_nodes) {
+ for_each_node_state(nid, N_NORMAL_MEMORY) {
isolated += list_lru_walk_node(lru, nid, isolate,
cb_arg, &nr_to_walk);
if (nr_to_walk <= 0)
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 5d10ae364b5e..f266661d2666 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -1,6 +1,9 @@
#ifndef _LINUX_LIST_NULLS_H
#define _LINUX_LIST_NULLS_H
+#include <linux/poison.h>
+#include <linux/const.h>
+
/*
* Special version of lists, where end of list is not a NULL pointer,
* but a 'nulls' marker, which can have many different values.
@@ -21,8 +24,9 @@ struct hlist_nulls_head {
struct hlist_nulls_node {
struct hlist_nulls_node *next, **pprev;
};
+#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
- ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1)))
+ ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
/**
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
new file mode 100644
index 000000000000..95023fd8b00d
--- /dev/null
+++ b/include/linux/livepatch.h
@@ -0,0 +1,133 @@
+/*
+ * livepatch.h - Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2014 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_LIVEPATCH_H_
+#define _LINUX_LIVEPATCH_H_
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#if IS_ENABLED(CONFIG_LIVEPATCH)
+
+#include <asm/livepatch.h>
+
+enum klp_state {
+ KLP_DISABLED,
+ KLP_ENABLED
+};
+
+/**
+ * struct klp_func - function structure for live patching
+ * @old_name: name of the function to be patched
+ * @new_func: pointer to the patched function code
+ * @old_addr: a hint conveying at what address the old function
+ * can be found (optional, vmlinux patches only)
+ * @kobj: kobject for sysfs resources
+ * @state: tracks function-level patch application state
+ * @stack_node: list node for klp_ops func_stack list
+ */
+struct klp_func {
+ /* external */
+ const char *old_name;
+ void *new_func;
+ /*
+ * The old_addr field is optional and can be used to resolve
+ * duplicate symbol names in the vmlinux object. If this
+ * information is not present, the symbol is located by name
+ * with kallsyms. If the name is not unique and old_addr is
+ * not provided, the patch application fails as there is no
+ * way to resolve the ambiguity.
+ */
+ unsigned long old_addr;
+
+ /* internal */
+ struct kobject kobj;
+ enum klp_state state;
+ struct list_head stack_node;
+};
+
+/**
+ * struct klp_reloc - relocation structure for live patching
+ * @loc: address where the relocation will be written
+ * @val: address of the referenced symbol (optional,
+ * vmlinux patches only)
+ * @type: ELF relocation type
+ * @name: name of the referenced symbol (for lookup/verification)
+ * @addend: offset from the referenced symbol
+ * @external: symbol is either exported or within the live patch module itself
+ */
+struct klp_reloc {
+ unsigned long loc;
+ unsigned long val;
+ unsigned long type;
+ const char *name;
+ int addend;
+ int external;
+};
+
+/**
+ * struct klp_object - kernel object structure for live patching
+ * @name: module name (or NULL for vmlinux)
+ * @relocs: relocation entries to be applied at load time
+ * @funcs: function entries for functions to be patched in the object
+ * @kobj: kobject for sysfs resources
+ * @mod: kernel module associated with the patched object
+ * (NULL for vmlinux)
+ * @state: tracks object-level patch application state
+ */
+struct klp_object {
+ /* external */
+ const char *name;
+ struct klp_reloc *relocs;
+ struct klp_func *funcs;
+
+ /* internal */
+ struct kobject *kobj;
+ struct module *mod;
+ enum klp_state state;
+};
+
+/**
+ * struct klp_patch - patch structure for live patching
+ * @mod: reference to the live patch module
+ * @objs: object entries for kernel objects to be patched
+ * @list: list node for global list of registered patches
+ * @kobj: kobject for sysfs resources
+ * @state: tracks patch-level application state
+ */
+struct klp_patch {
+ /* external */
+ struct module *mod;
+ struct klp_object *objs;
+
+ /* internal */
+ struct list_head list;
+ struct kobject kobj;
+ enum klp_state state;
+};
+
+extern int klp_register_patch(struct klp_patch *);
+extern int klp_unregister_patch(struct klp_patch *);
+extern int klp_enable_patch(struct klp_patch *);
+extern int klp_disable_patch(struct klp_patch *);
+
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 164aad1f9f12..0819d36a3a74 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -25,8 +25,8 @@ int __mei_cl_driver_register(struct mei_cl_driver *driver,
void mei_cl_driver_unregister(struct mei_cl_driver *driver);
-int mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
-int mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
+ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
+ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
u32 events, void *context);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7c95af8d552c..72dff5fb0d0c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -52,7 +52,27 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};
+enum mem_cgroup_events_index {
+ MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
+ MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
+ MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
+ MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
+ MEM_CGROUP_EVENTS_NSTATS,
+ /* default hierarchy events */
+ MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
+ MEMCG_HIGH,
+ MEMCG_MAX,
+ MEMCG_OOM,
+ MEMCG_NR_EVENTS,
+};
+
#ifdef CONFIG_MEMCG
+void mem_cgroup_events(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_index idx,
+ unsigned int nr);
+
+bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
+
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
@@ -102,6 +122,7 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
* For memory reclaim.
*/
int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
+bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
@@ -138,12 +159,10 @@ static inline bool mem_cgroup_disabled(void)
return false;
}
-struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
- unsigned long *flags);
-void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
- unsigned long *flags);
+struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx, int val);
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
@@ -176,6 +195,18 @@ void mem_cgroup_split_huge_fixup(struct page *head);
#else /* CONFIG_MEMCG */
struct mem_cgroup;
+static inline void mem_cgroup_events(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_index idx,
+ unsigned int nr)
+{
+}
+
+static inline bool mem_cgroup_low(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
+{
+ return false;
+}
+
static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask,
struct mem_cgroup **memcgp)
@@ -268,6 +299,11 @@ mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
return 1;
}
+static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+{
+ return true;
+}
+
static inline unsigned long
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
@@ -285,14 +321,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
- bool *locked, unsigned long *flags)
+static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
{
return NULL;
}
-static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
- bool *locked, unsigned long *flags)
+static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
{
}
@@ -364,7 +398,9 @@ static inline void sock_release_memcg(struct sock *sk)
#ifdef CONFIG_MEMCG_KMEM
extern struct static_key memcg_kmem_enabled_key;
-extern int memcg_limited_groups_array_size;
+extern int memcg_nr_cache_ids;
+extern void memcg_get_cache_ids(void);
+extern void memcg_put_cache_ids(void);
/*
* Helper macro to loop through all memcg-specific caches. Callers must still
@@ -372,13 +408,15 @@ extern int memcg_limited_groups_array_size;
* the slab_mutex must be held when looping through those caches
*/
#define for_each_memcg_cache_index(_idx) \
- for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
+ for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
static inline bool memcg_kmem_enabled(void)
{
return static_key_false(&memcg_kmem_enabled_key);
}
+bool memcg_kmem_is_active(struct mem_cgroup *memcg);
+
/*
* In general, we'll do everything in our power to not incur in any overhead
* for non-memcg users for the kmem functions. Not even a function call, if we
@@ -398,15 +436,14 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg);
-void memcg_update_array_size(int num_groups);
-
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
-int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
-void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
+struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
-int __memcg_cleanup_cache_params(struct kmem_cache *s);
+int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
+ unsigned long nr_pages);
+void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
/**
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
@@ -500,6 +537,13 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
if (memcg_kmem_enabled())
__memcg_kmem_put_cache(cachep);
}
+
+static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
+{
+ if (!memcg_kmem_enabled())
+ return NULL;
+ return __mem_cgroup_from_kmem(ptr);
+}
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
@@ -509,6 +553,11 @@ static inline bool memcg_kmem_enabled(void)
return false;
}
+static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+{
+ return false;
+}
+
static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
@@ -529,6 +578,14 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return -1;
}
+static inline void memcg_get_cache_ids(void)
+{
+}
+
+static inline void memcg_put_cache_ids(void)
+{
+}
+
static inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
@@ -538,6 +595,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{
}
+
+static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
+{
+ return NULL;
+}
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
index cc892a8d8d6e..12a5b396921e 100644
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -461,7 +461,6 @@ struct ab8500_fg;
#ifdef CONFIG_AB8500_BM
extern struct abx500_bm_data ab8500_bm_data;
-void ab8500_fg_reinit(void);
void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
struct ab8500_btemp *ab8500_btemp_get(void);
int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 08dae01258b9..955dd990beaf 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -143,10 +143,118 @@ enum max77693_pmic_reg {
#define FLASH_INT_FLED1_SHORT BIT(3)
#define FLASH_INT_OVER_CURRENT BIT(4)
+/* Fast charge timer in in hours */
+#define DEFAULT_FAST_CHARGE_TIMER 4
+/* microamps */
+#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000
+/* minutes */
+#define DEFAULT_TOP_OFF_TIMER 30
+/* microvolts */
+#define DEFAULT_CONSTANT_VOLT 4200000
+/* microvolts */
+#define DEFAULT_MIN_SYSTEM_VOLT 3600000
+/* celsius */
+#define DEFAULT_THERMAL_REGULATION_TEMP 100
+/* microamps */
+#define DEFAULT_BATTERY_OVERCURRENT 3500000
+/* microvolts */
+#define DEFAULT_CHARGER_INPUT_THRESHOLD_VOLT 4300000
+
+/* MAX77693_CHG_REG_CHG_INT_OK register */
+#define CHG_INT_OK_BYP_SHIFT 0
+#define CHG_INT_OK_BAT_SHIFT 3
+#define CHG_INT_OK_CHG_SHIFT 4
+#define CHG_INT_OK_CHGIN_SHIFT 6
+#define CHG_INT_OK_DETBAT_SHIFT 7
+#define CHG_INT_OK_BYP_MASK BIT(CHG_INT_OK_BYP_SHIFT)
+#define CHG_INT_OK_BAT_MASK BIT(CHG_INT_OK_BAT_SHIFT)
+#define CHG_INT_OK_CHG_MASK BIT(CHG_INT_OK_CHG_SHIFT)
+#define CHG_INT_OK_CHGIN_MASK BIT(CHG_INT_OK_CHGIN_SHIFT)
+#define CHG_INT_OK_DETBAT_MASK BIT(CHG_INT_OK_DETBAT_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_00 register */
+#define CHG_DETAILS_00_CHGIN_SHIFT 5
+#define CHG_DETAILS_00_CHGIN_MASK (0x3 << CHG_DETAILS_00_CHGIN_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01 register */
+#define CHG_DETAILS_01_CHG_SHIFT 0
+#define CHG_DETAILS_01_BAT_SHIFT 4
+#define CHG_DETAILS_01_TREG_SHIFT 7
+#define CHG_DETAILS_01_CHG_MASK (0xf << CHG_DETAILS_01_CHG_SHIFT)
+#define CHG_DETAILS_01_BAT_MASK (0x7 << CHG_DETAILS_01_BAT_SHIFT)
+#define CHG_DETAILS_01_TREG_MASK BIT(7)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01/CHG field */
+enum max77693_charger_charging_state {
+ MAX77693_CHARGING_PREQUALIFICATION = 0x0,
+ MAX77693_CHARGING_FAST_CONST_CURRENT,
+ MAX77693_CHARGING_FAST_CONST_VOLTAGE,
+ MAX77693_CHARGING_TOP_OFF,
+ MAX77693_CHARGING_DONE,
+ MAX77693_CHARGING_HIGH_TEMP,
+ MAX77693_CHARGING_TIMER_EXPIRED,
+ MAX77693_CHARGING_THERMISTOR_SUSPEND,
+ MAX77693_CHARGING_OFF,
+ MAX77693_CHARGING_RESERVED,
+ MAX77693_CHARGING_OVER_TEMP,
+ MAX77693_CHARGING_WATCHDOG_EXPIRED,
+};
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01/BAT field */
+enum max77693_charger_battery_state {
+ MAX77693_BATTERY_NOBAT = 0x0,
+ /* Dead-battery or low-battery prequalification */
+ MAX77693_BATTERY_PREQUALIFICATION,
+ MAX77693_BATTERY_TIMER_EXPIRED,
+ MAX77693_BATTERY_GOOD,
+ MAX77693_BATTERY_LOWVOLTAGE,
+ MAX77693_BATTERY_OVERVOLTAGE,
+ MAX77693_BATTERY_OVERCURRENT,
+ MAX77693_BATTERY_RESERVED,
+};
+
+/* MAX77693_CHG_REG_CHG_DETAILS_02 register */
+#define CHG_DETAILS_02_BYP_SHIFT 0
+#define CHG_DETAILS_02_BYP_MASK (0xf << CHG_DETAILS_02_BYP_SHIFT)
+
/* MAX77693 CHG_CNFG_00 register */
#define CHG_CNFG_00_CHG_MASK 0x1
#define CHG_CNFG_00_BUCK_MASK 0x4
+/* MAX77693_CHG_REG_CHG_CNFG_01 register */
+#define CHG_CNFG_01_FCHGTIME_SHIFT 0
+#define CHG_CNFG_01_CHGRSTRT_SHIFT 4
+#define CHG_CNFG_01_PQEN_SHIFT 7
+#define CHG_CNFG_01_FCHGTIME_MASK (0x7 << CHG_CNFG_01_FCHGTIME_SHIFT)
+#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
+#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_03 register */
+#define CHG_CNFG_03_TOITH_SHIFT 0
+#define CHG_CNFG_03_TOTIME_SHIFT 3
+#define CHG_CNFG_03_TOITH_MASK (0x7 << CHG_CNFG_03_TOITH_SHIFT)
+#define CHG_CNFG_03_TOTIME_MASK (0x7 << CHG_CNFG_03_TOTIME_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_04 register */
+#define CHG_CNFG_04_CHGCVPRM_SHIFT 0
+#define CHG_CNFG_04_MINVSYS_SHIFT 5
+#define CHG_CNFG_04_CHGCVPRM_MASK (0x1f << CHG_CNFG_04_CHGCVPRM_SHIFT)
+#define CHG_CNFG_04_MINVSYS_MASK (0x7 << CHG_CNFG_04_MINVSYS_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_06 register */
+#define CHG_CNFG_06_CHGPROT_SHIFT 2
+#define CHG_CNFG_06_CHGPROT_MASK (0x3 << CHG_CNFG_06_CHGPROT_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_07 register */
+#define CHG_CNFG_07_REGTEMP_SHIFT 5
+#define CHG_CNFG_07_REGTEMP_MASK (0x3 << CHG_CNFG_07_REGTEMP_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_12 register */
+#define CHG_CNFG_12_B2SOVRC_SHIFT 0
+#define CHG_CNFG_12_VCHGINREG_SHIFT 3
+#define CHG_CNFG_12_B2SOVRC_MASK (0x7 << CHG_CNFG_12_B2SOVRC_SHIFT)
+#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
+
/* MAX77693 CHG_CNFG_09 Register */
#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index ce5dda8958fe..b1fd675fa36f 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -59,6 +59,7 @@ enum s2mps13_reg {
S2MPS13_REG_B6CTRL,
S2MPS13_REG_B6OUT,
S2MPS13_REG_B7CTRL,
+ S2MPS13_REG_B7SW,
S2MPS13_REG_B7OUT,
S2MPS13_REG_B8CTRL,
S2MPS13_REG_B8OUT,
@@ -102,6 +103,7 @@ enum s2mps13_reg {
S2MPS13_REG_L26CTRL,
S2MPS13_REG_L27CTRL,
S2MPS13_REG_L28CTRL,
+ S2MPS13_REG_L29CTRL,
S2MPS13_REG_L30CTRL,
S2MPS13_REG_L31CTRL,
S2MPS13_REG_L32CTRL,
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index f742b6717d52..c9d869027300 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -118,20 +118,6 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
/**
- * struct stmpe_gpio_platform_data - STMPE GPIO platform data
- * @norequest_mask: bitmask specifying which GPIOs should _not_ be
- * requestable due to different usage (e.g. touch, keypad)
- * STMPE_GPIO_NOREQ_* macros can be used here.
- * @setup: board specific setup callback.
- * @remove: board specific remove callback
- */
-struct stmpe_gpio_platform_data {
- unsigned norequest_mask;
- void (*setup)(struct stmpe *stmpe, unsigned gpio_base);
- void (*remove)(struct stmpe *stmpe, unsigned gpio_base);
-};
-
-/**
* struct stmpe_ts_platform_data - stmpe811 touch screen controller platform
* data
* @sample_time: ADC converstion time in number of clock.
@@ -182,7 +168,6 @@ struct stmpe_ts_platform_data {
* @irq_over_gpio: true if gpio is used to get irq
* @irq_gpio: gpio number over which irq will be requested (significant only if
* irq_over_gpio is true)
- * @gpio: GPIO-specific platform data
* @ts: touchscreen-specific platform data
*/
struct stmpe_platform_data {
@@ -194,7 +179,6 @@ struct stmpe_platform_data {
int irq_gpio;
int autosleep_timeout;
- struct stmpe_gpio_platform_data *gpio;
struct stmpe_ts_platform_data *ts;
};
diff --git a/include/linux/mfd/syscon/exynos4-pmu.h b/include/linux/mfd/syscon/exynos4-pmu.h
new file mode 100644
index 000000000000..278b1b1549e9
--- /dev/null
+++ b/include/linux/mfd/syscon/exynos4-pmu.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
+#define _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
+
+/* Exynos4 PMU register definitions */
+
+/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
+#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x710 + (n) * 4)
+#define EXYNOS4_MIPI_PHY_ENABLE (1 << 0)
+#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
+#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
+#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
+
+#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ */
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index e1c12d84c26a..c203c9c56776 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -163,24 +163,12 @@ struct tc3589x_keypad_platform_data {
};
/**
- * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data
- * @setup: callback for board-specific initialization
- * @remove: callback for board-specific teardown
- */
-struct tc3589x_gpio_platform_data {
- void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base);
- void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base);
-};
-
-/**
* struct tc3589x_platform_data - TC3589x platform data
* @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
- * @gpio: GPIO-specific platform data
* @keypad: keypad-specific platform data
*/
struct tc3589x_platform_data {
unsigned int block;
- struct tc3589x_gpio_platform_data *gpio;
const struct tc3589x_keypad_platform_data *keypad;
};
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index e2e70053470e..3f4e994ace2b 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -52,6 +52,7 @@
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
+#define IRQENB_EOS BIT(1)
#define IRQENB_FIFO0THRES BIT(2)
#define IRQENB_FIFO0OVRRUN BIT(3)
#define IRQENB_FIFO0UNDRFLW BIT(4)
@@ -107,7 +108,7 @@
/* Charge delay */
#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
#define CHARGEDLY_OPEN(val) ((val) << 0)
-#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(1)
+#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
/* Control register */
#define CNTRLREG_TSCSSENB BIT(0)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 57388171610d..605812820e48 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -96,11 +96,6 @@
#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
/*
- * Some controllers have DMA enable/disable register
- */
-#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9)
-
-/*
* Some controllers allows to set SDx actual clock
*/
#define TMIO_MMC_CLK_ACTUAL (1 << 10)
@@ -112,18 +107,6 @@ void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
struct dma_chan;
-struct tmio_mmc_dma {
- void *chan_priv_tx;
- void *chan_priv_rx;
- int slave_id_tx;
- int slave_id_rx;
- int alignment_shift;
- dma_addr_t dma_rx_offset;
- bool (*filter)(struct dma_chan *chan, void *arg);
-};
-
-struct tmio_mmc_host;
-
/*
* data for the MMC controller
*/
@@ -132,19 +115,12 @@ struct tmio_mmc_data {
unsigned long capabilities;
unsigned long capabilities2;
unsigned long flags;
- unsigned long bus_shift;
u32 ocr_mask; /* available voltages */
- struct tmio_mmc_dma *dma;
- struct device *dev;
unsigned int cd_gpio;
+ int alignment_shift;
+ dma_addr_t dma_rx_offset;
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
- int (*write16_hook)(struct tmio_mmc_host *host, int addr);
- /* clock management callbacks */
- int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
- void (*clk_disable)(struct platform_device *pdev);
- int (*multi_io_quirk)(struct mmc_card *card,
- unsigned int direction, int blk_size);
};
/*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index fab9b32ace8e..78baed5f2952 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -67,7 +67,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
-extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
extern bool migrate_ratelimited(int node);
@@ -76,9 +75,6 @@ static inline bool pmd_trans_migrating(pmd_t pmd)
{
return false;
}
-static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
-{
-}
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 64d25941b329..7b6d4e9ff603 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -71,6 +71,7 @@ enum {
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+ MLX4_CMD_VIRT_PORT_MAP = 0x5c,
MLX4_CMD_GET_OP_REQ = 0x59,
/* TPT commands */
@@ -165,9 +166,15 @@ enum {
};
enum {
- MLX4_CMD_TIME_CLASS_A = 10000,
- MLX4_CMD_TIME_CLASS_B = 10000,
- MLX4_CMD_TIME_CLASS_C = 10000,
+ MLX4_CMD_TIME_CLASS_A = 60000,
+ MLX4_CMD_TIME_CLASS_B = 60000,
+ MLX4_CMD_TIME_CLASS_C = 60000,
+};
+
+enum {
+ /* virtual to physical port mapping opcode modifiers */
+ MLX4_GET_PORT_VIRT2PHY = 0x0,
+ MLX4_SET_PORT_VIRT2PHY = 0x1,
};
enum {
@@ -279,6 +286,8 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
struct mlx4_config_dev_params *params);
+void mlx4_cmd_wake_completions(struct mlx4_dev *dev);
+void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev);
/*
* mlx4_get_slave_default_vlan -
* return true if VST ( default vlan)
@@ -288,5 +297,6 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
u16 *vlan, u8 *qos);
#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17)
#endif /* MLX4_CMD_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 25c791e295fd..e4ebff7e9d02 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -42,7 +42,7 @@
#include <linux/atomic.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
@@ -70,6 +70,7 @@ enum {
MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4,
MLX4_FLAG_OLD_REG_MAC = 1 << 6,
+ MLX4_FLAG_BONDED = 1 << 7
};
enum {
@@ -97,7 +98,7 @@ enum {
MLX4_MAX_NUM_PF = 16,
MLX4_MAX_NUM_VF = 126,
MLX4_MAX_NUM_VF_P_PORT = 64,
- MLX4_MFUNC_MAX = 80,
+ MLX4_MFUNC_MAX = 128,
MLX4_MAX_EQ_NUM = 1024,
MLX4_MFUNC_EQ_NUM = 4,
MLX4_MFUNC_MAX_EQES = 8,
@@ -200,7 +201,9 @@ enum {
MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
- MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
+ MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
+ MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
+ MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21
};
enum {
@@ -208,6 +211,10 @@ enum {
MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
};
+enum {
+ MLX4_VF_CAP_FLAG_RESET = 1 << 0
+};
+
/* bit enums for an 8-bit flags field indicating special use
* QPs which require special handling in qp_reserve_range.
* Currently, this only includes QPs used by the ETH interface,
@@ -248,9 +255,14 @@ enum {
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
+ MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
};
+enum {
+ MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP
+};
+
enum mlx4_event {
MLX4_EVENT_TYPE_COMP = 0x00,
MLX4_EVENT_TYPE_PATH_MIG = 0x01,
@@ -276,6 +288,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
+ MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
MLX4_EVENT_TYPE_NONE = 0xff,
};
@@ -285,6 +298,11 @@ enum {
};
enum {
+ MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
+ MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
+};
+
+enum {
MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
};
@@ -411,6 +429,16 @@ enum {
MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
};
+enum {
+ MLX4_DEVICE_STATE_UP = 1 << 0,
+ MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
+};
+
+enum {
+ MLX4_INTERFACE_STATE_UP = 1 << 0,
+ MLX4_INTERFACE_STATE_DELETION = 1 << 1,
+};
+
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
@@ -535,6 +563,7 @@ struct mlx4_caps {
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
+ u32 vf_caps;
};
struct mlx4_buf_list {
@@ -660,6 +689,8 @@ struct mlx4_cq {
void (*comp)(struct mlx4_cq *);
void *priv;
} tasklet_ctx;
+ int reset_notify_added;
+ struct list_head reset_notify;
};
struct mlx4_qp {
@@ -744,8 +775,23 @@ struct mlx4_vf_dev {
u8 n_ports;
};
-struct mlx4_dev {
+struct mlx4_dev_persistent {
struct pci_dev *pdev;
+ struct mlx4_dev *dev;
+ int nvfs[MLX4_MAX_PORTS + 1];
+ int num_vfs;
+ enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1];
+ enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1];
+ struct work_struct catas_work;
+ struct workqueue_struct *catas_wq;
+ struct mutex device_state_mutex; /* protect HW state */
+ u8 state;
+ struct mutex interface_state_mutex; /* protect SW state */
+ u8 interface_state;
+};
+
+struct mlx4_dev {
+ struct mlx4_dev_persistent *persist;
unsigned long flags;
unsigned long num_slaves;
struct mlx4_caps caps;
@@ -754,13 +800,11 @@ struct mlx4_dev {
struct radix_tree_root qp_table_tree;
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
- int num_vfs;
int numa_node;
int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
struct mlx4_vf_dev *dev_vfs;
- int nvfs[MLX4_MAX_PORTS + 1];
};
struct mlx4_eqe {
@@ -832,6 +876,11 @@ struct mlx4_eqe {
} __packed tbl_change_info;
} params;
} __packed port_mgmt_change;
+ struct {
+ u8 reserved[3];
+ u8 port;
+ u32 reserved1[5];
+ } __packed bad_cable;
} event;
u8 slave_id;
u8 reserved3[2];
@@ -1338,6 +1387,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 022055c8fb26..9553a73d2049 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -49,6 +49,10 @@ enum mlx4_dev_event {
MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
};
+enum {
+ MLX4_INTFF_BONDING = 1 << 0
+};
+
struct mlx4_interface {
void * (*add) (struct mlx4_dev *dev);
void (*remove)(struct mlx4_dev *dev, void *context);
@@ -57,11 +61,26 @@ struct mlx4_interface {
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
struct list_head list;
enum mlx4_protocol protocol;
+ int flags;
};
int mlx4_register_interface(struct mlx4_interface *intf);
void mlx4_unregister_interface(struct mlx4_interface *intf);
+int mlx4_bond(struct mlx4_dev *dev);
+int mlx4_unbond(struct mlx4_dev *dev);
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+ return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+struct mlx4_port_map {
+ u8 port1;
+ u8 port2;
+};
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
static inline u64 mlx4_mac_to_u64(u8 *addr)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 467ccdf94c98..2bbc62aa818a 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -96,6 +96,7 @@ enum {
MLX4_QP_BIT_RRE = 1 << 15,
MLX4_QP_BIT_RWE = 1 << 14,
MLX4_QP_BIT_RAE = 1 << 13,
+ MLX4_QP_BIT_FPP = 1 << 3,
MLX4_QP_BIT_RIC = 1 << 4,
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80fc92a49649..47a93928b90f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -138,7 +138,6 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
-#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
#define VM_ARCH_2 0x02000000
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
@@ -206,27 +205,26 @@ extern unsigned int kobjsize(const void *objp);
extern pgprot_t protection_map[16];
#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
-#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
-#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
-#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
-#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
-#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
-#define FAULT_FLAG_TRIED 0x40 /* second try */
-#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
+#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
+#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
+#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
+#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
+#define FAULT_FLAG_TRIED 0x20 /* Second try */
+#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
- * pgoff should be used in favour of virtual_address, if possible. If pgoff
- * is used, one may implement ->remap_pages to get nonlinear mapping support.
+ * pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
unsigned int flags; /* FAULT_FLAG_xxx flags */
pgoff_t pgoff; /* Logical page offset based on vma */
void __user *virtual_address; /* Faulting virtual address */
+ struct page *cow_page; /* Handler may choose to COW */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
@@ -287,9 +285,13 @@ struct vm_operations_struct {
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
#endif
- /* called by sys_remap_file_pages() to populate non-linear mapping */
- int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
- unsigned long size, pgoff_t pgoff);
+ /*
+ * Called by vm_normal_page() for special PTEs to find the
+ * page for @addr. This is useful if the default behavior
+ * (using pte_page()) would not find the correct page.
+ */
+ struct page *(*find_special_page)(struct vm_area_struct *vma,
+ unsigned long addr);
};
struct mmu_gather;
@@ -446,6 +448,12 @@ static inline struct page *compound_head_by_tail(struct page *tail)
return tail;
}
+/*
+ * Since either compound page could be dismantled asynchronously in THP
+ * or we access asynchronously arbitrary positioned struct page, there
+ * would be tail flag race. To handle this race, we should call
+ * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
+ */
static inline struct page *compound_head(struct page *page)
{
if (unlikely(PageTail(page)))
@@ -454,6 +462,18 @@ static inline struct page *compound_head(struct page *page)
}
/*
+ * If we access compound page synchronously such as access to
+ * allocated page, there is no need to handle tail flag race, so we can
+ * check tail flag directly without any synchronization primitive.
+ */
+static inline struct page *compound_head_fast(struct page *page)
+{
+ if (unlikely(PageTail(page)))
+ return page->first_page;
+ return page;
+}
+
+/*
* The atomic page->_mapcount, starts from -1: so that transitions
* both from it and to it can be tracked, using atomic_inc_and_test
* and atomic_add_negative(-1).
@@ -465,7 +485,8 @@ static inline void page_mapcount_reset(struct page *page)
static inline int page_mapcount(struct page *page)
{
- return atomic_read(&(page)->_mapcount) + 1;
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ return atomic_read(&page->_mapcount) + 1;
}
static inline int page_count(struct page *page)
@@ -531,7 +552,14 @@ static inline void get_page(struct page *page)
static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
- return compound_head(page);
+
+ /*
+ * We don't need to worry about synchronization of tail flag
+ * when we call virt_to_head_page() since it is only called for
+ * already allocated page and this page won't be freed until
+ * this virt_to_head_page() is finished. So use _fast variant.
+ */
+ return compound_head_fast(page);
}
/*
@@ -601,29 +629,28 @@ int split_free_page(struct page *page);
* prototype for that function and accessor functions.
* These are _only_ valid on the head of a PG_compound page.
*/
-typedef void compound_page_dtor(struct page *);
static inline void set_compound_page_dtor(struct page *page,
compound_page_dtor *dtor)
{
- page[1].lru.next = (void *)dtor;
+ page[1].compound_dtor = dtor;
}
static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
- return (compound_page_dtor *)page[1].lru.next;
+ return page[1].compound_dtor;
}
static inline int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
- return (unsigned long)page[1].lru.prev;
+ return page[1].compound_order;
}
static inline void set_compound_order(struct page *page, unsigned long order)
{
- page[1].lru.prev = (void *)order;
+ page[1].compound_order = order;
}
#ifdef CONFIG_MMU
@@ -1070,6 +1097,7 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
+#define VM_FAULT_SIGSEGV 0x0040
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
@@ -1078,8 +1106,9 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
- VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
+ VM_FAULT_FALLBACK)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@ -1119,7 +1148,6 @@ extern void user_shm_unlock(size_t, struct user_struct *);
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
- struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
@@ -1137,8 +1165,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
/**
* mm_walk - callbacks for walk_page_range
- * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
- * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
@@ -1146,16 +1172,18 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
* @hugetlb_entry: if set, called for each hugetlb entry
- * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
- * is used.
+ * @test_walk: caller specific callback function to determine whether
+ * we walk over the current vma or not. A positive returned
+ * value means "do page table walk over the current vma,"
+ * and a negative one means "abort current page table walk
+ * right now." 0 means "skip the current vma."
+ * @mm: mm_struct representing the target process of page table walk
+ * @vma: vma currently walked (NULL if walking outside vmas)
+ * @private: private data for callbacks' usage
*
- * (see walk_page_range for more details)
+ * (see the comment on walk_page_range() for more details)
*/
struct mm_walk {
- int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
- int (*pud_entry)(pud_t *pud, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk);
int (*pte_entry)(pte_t *pte, unsigned long addr,
@@ -1165,12 +1193,16 @@ struct mm_walk {
int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long next,
struct mm_walk *walk);
+ int (*test_walk)(unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
struct mm_struct *mm;
+ struct vm_area_struct *vma;
void *private;
};
int walk_page_range(unsigned long addr, unsigned long end,
struct mm_walk *walk);
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
@@ -1234,6 +1266,17 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas);
+long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ int *locked);
+long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ unsigned int gup_flags);
+long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
struct kvec;
@@ -1366,6 +1409,11 @@ static inline void update_hiwater_vm(struct mm_struct *mm)
mm->hiwater_vm = mm->total_vm;
}
+static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
+{
+ mm->hiwater_rss = get_mm_rss(mm);
+}
+
static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
struct mm_struct *mm)
{
@@ -1405,14 +1453,45 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif
-#ifdef __PAGETABLE_PMD_FOLDED
+#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
unsigned long address)
{
return 0;
}
+
+static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
+
+static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
+static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
+
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+
+static inline void mm_nr_pmds_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->nr_pmds, 0);
+}
+
+static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->nr_pmds);
+}
+
+static inline void mm_inc_nr_pmds(struct mm_struct *mm)
+{
+ atomic_long_inc(&mm->nr_pmds);
+}
+
+static inline void mm_dec_nr_pmds(struct mm_struct *mm)
+{
+ atomic_long_dec(&mm->nr_pmds);
+}
#endif
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -1775,12 +1854,6 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
for (vma = vma_interval_tree_iter_first(root, start, last); \
vma; vma = vma_interval_tree_iter_next(vma, start, last))
-static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
- struct list_head *list)
-{
- list_add_tail(&vma->shared.nonlinear, list);
-}
-
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
@@ -2108,9 +2181,8 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
#endif
-unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
- unsigned long nr_scanned,
- unsigned long nr_eligible);
+void drop_slab(void);
+void drop_slab_node(int nid);
#ifndef CONFIG_MMU
#define randomize_va_space 0
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6d34aa266a8c..199a03aab8dc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -28,6 +28,8 @@ struct mem_cgroup;
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
+typedef void compound_page_dtor(struct page *);
+
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -142,6 +144,12 @@ struct page {
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
+ /* First tail page of compound page */
+ struct {
+ compound_page_dtor *compound_dtor;
+ unsigned long compound_order;
+ };
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page->ptl */
#endif
@@ -273,15 +281,11 @@ struct vm_area_struct {
/*
* For areas with an address space and backing store,
- * linkage into the address_space->i_mmap interval tree, or
- * linkage of vma in the address_space->i_mmap_nonlinear list.
+ * linkage into the address_space->i_mmap interval tree.
*/
- union {
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } linear;
- struct list_head nonlinear;
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
} shared;
/*
@@ -359,7 +363,8 @@ struct mm_struct {
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
- atomic_long_t nr_ptes; /* Page table pages */
+ atomic_long_t nr_ptes; /* PTE page table pages */
+ atomic_long_t nr_pmds; /* PMD page table pages */
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some counters */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 4d69c00497bd..a6cf4c063e4e 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -83,7 +83,7 @@ struct mmc_ext_csd {
bool hpi; /* HPI support bit */
unsigned int hpi_cmd; /* cmd used as HPI */
bool bkops; /* background support bit */
- bool bkops_en; /* background enable bit */
+ bool man_bkops_en; /* manual bkops enable bit */
unsigned int data_sector_size; /* 512 bytes or 4KB */
unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
unsigned int boot_ro_lock; /* ro lock support */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index cb2b0400d284..160448f920ac 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -182,7 +182,6 @@ extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write);
extern int mmc_hw_reset(struct mmc_host *host);
-extern int mmc_hw_reset_check(struct mmc_host *host);
extern int mmc_can_reset(struct mmc_card *card);
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 42b724e8d503..471fb3116dbe 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -106,6 +106,11 @@ struct mmc_data;
* @cur_slot, @mrq and @state. These must always be updated
* at the same time while holding @lock.
*
+ * @irq_lock is an irq-safe spinlock protecting the INTMASK register
+ * to allow the interrupt handler to modify it directly. Held for only long
+ * enough to read-modify-write INTMASK and no other locks are grabbed when
+ * holding this one.
+ *
* The @mrq field of struct dw_mci_slot is also protected by @lock,
* and must always be written at the same time as the slot is added to
* @queue.
@@ -125,6 +130,7 @@ struct mmc_data;
*/
struct dw_mci {
spinlock_t lock;
+ spinlock_t irq_lock;
void __iomem *regs;
struct scatterlist *sg;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9f322706f7cb..0c8cbe5d1550 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -166,7 +166,6 @@ struct mmc_async_req {
* struct mmc_slot - MMC slot functions
*
* @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL
- * @lock: protect the @handler_priv pointer
* @handler_priv: MMC/SD-card slot context
*
* Some MMC/SD host controllers implement slot-functions like card and
@@ -176,7 +175,6 @@ struct mmc_async_req {
*/
struct mmc_slot {
int cd_irq;
- struct mutex lock;
void *handler_priv;
};
@@ -197,6 +195,7 @@ struct mmc_context_info {
};
struct regulator;
+struct mmc_pwrseq;
struct mmc_supply {
struct regulator *vmmc; /* Card power supply */
@@ -208,6 +207,7 @@ struct mmc_host {
struct device class_dev;
int index;
const struct mmc_host_ops *ops;
+ struct mmc_pwrseq *pwrseq;
unsigned int f_min;
unsigned int f_max;
unsigned int f_init;
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 49ad7a943638..124f562118b8 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -53,11 +53,6 @@
#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
-#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
-#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
-extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE];
-extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE];
-
/* class 3 */
#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
@@ -433,6 +428,11 @@ struct _mmc_csd {
#define EXT_CSD_BKOPS_LEVEL_2 0x2
/*
+ * BKOPS modes
+ */
+#define EXT_CSD_MANUAL_BKOPS_MASK 0x01
+
+/*
* MMC_SWITCH access modes
*/
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 375af80bde7d..c3e3db196738 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -17,6 +17,11 @@
#include <linux/io.h>
#include <linux/mmc/host.h>
+struct sdhci_host_next {
+ unsigned int sg_count;
+ s32 cookie;
+};
+
struct sdhci_host {
/* Data set by hardware interface driver */
const char *hw_name; /* Hardware bus name */
@@ -106,6 +111,10 @@ struct sdhci_host {
#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10)
/* Capability register bit-63 indicates HS400 support */
#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11)
+/* forced tuned clock */
+#define SDHCI_QUIRK2_TUNING_WORK_AROUND (1<<12)
+/* disable the block count for single block transactions */
+#define SDHCI_QUIRK2_SUPPORT_SINGLE (1<<13)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -137,6 +146,7 @@ struct sdhci_host {
#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
+#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
unsigned int version; /* SDHCI spec. version */
@@ -202,6 +212,7 @@ struct sdhci_host {
#define SDHCI_TUNING_MODE_1 0
struct timer_list tuning_timer; /* Timer for tuning */
+ struct sdhci_host_next next_data;
unsigned long private[0] ____cacheline_aligned;
};
#endif /* LINUX_MMC_SDHCI_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0f01fe065424..996807963716 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -24,13 +24,15 @@
* Vendors and devices. Sort key: vendor first, device next.
*/
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
+#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
+#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
-#define SDIO_DEVICE_ID_BROADCOM_43362 43362
+#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_VENDOR_ID_INTEL 0x0089
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index 68927ae50845..da77e5e2041d 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -3,20 +3,10 @@
#include <linux/types.h>
-struct platform_device;
-
#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect"
#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
#define SH_MOBILE_SDHI_IRQ_SDIO "sdio"
-/**
- * struct sh_mobile_sdhi_ops - SDHI driver callbacks
- * @cd_wakeup: trigger a card-detection run
- */
-struct sh_mobile_sdhi_ops {
- void (*cd_wakeup)(const struct platform_device *pdev);
-};
-
struct sh_mobile_sdhi_info {
int dma_slave_tx;
int dma_slave_rx;
@@ -25,11 +15,6 @@ struct sh_mobile_sdhi_info {
unsigned long tmio_caps2;
u32 tmio_ocr_mask; /* available MMC voltages */
unsigned int cd_gpio;
-
- /* callbacks for board specific setup code */
- int (*init)(struct platform_device *pdev,
- const struct sh_mobile_sdhi_ops *ops);
- void (*cleanup)(struct platform_device *pdev);
};
#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index e56fa24c9322..3945a8c9d3cb 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -15,12 +15,10 @@ struct mmc_host;
int mmc_gpio_get_ro(struct mmc_host *host);
int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
-void mmc_gpio_free_ro(struct mmc_host *host);
int mmc_gpio_get_cd(struct mmc_host *host);
int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
unsigned int debounce);
-void mmc_gpio_free_cd(struct mmc_host *host);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
@@ -28,7 +26,8 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce, bool *gpio_invert);
-void mmc_gpiod_free_cd(struct mmc_host *host);
+void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ irqreturn_t (*isr)(int irq, void *dev_id));
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2f0856d14b21..f279d9c158cd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -426,7 +426,7 @@ struct zone {
const char *name;
/*
- * Number of MIGRATE_RESEVE page block. To maintain for just
+ * Number of MIGRATE_RESERVE page block. To maintain for just
* optimization. Protected by zone->lock.
*/
int nr_migrate_reserve_block;
@@ -970,7 +970,6 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
* @z - The cursor used as a starting point for the search
* @highest_zoneidx - The zone index of the highest zone to return
* @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
*
* This function returns the next zone at or below a given zone index that is
* within the allowed nodemask using a cursor as the starting point for the
@@ -980,8 +979,7 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
*/
struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
- nodemask_t *nodes,
- struct zone **zone);
+ nodemask_t *nodes);
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
@@ -1000,8 +998,10 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
nodemask_t *nodes,
struct zone **zone)
{
- return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
- zone);
+ struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
+ highest_zoneidx, nodes);
+ *zone = zonelist_zone(z);
+ return z;
}
/**
@@ -1018,7 +1018,8 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
zone; \
- z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
+ z = next_zones_zonelist(++z, highidx, nodemask), \
+ zone = zonelist_zone(z)) \
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 745def862580..2e75ab00dbf2 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -53,9 +53,9 @@ struct ieee1394_device_id {
/**
* struct usb_device_id - identifies USB devices for probing and hotplugging
- * @match_flags: Bit mask controlling of the other fields are used to match
- * against new devices. Any field except for driver_info may be used,
- * although some only make sense in conjunction with other fields.
+ * @match_flags: Bit mask controlling which of the other fields are used to
+ * match against new devices. Any field except for driver_info may be
+ * used, although some only make sense in conjunction with other fields.
* This is usually set by a USB_DEVICE_*() macro, which sets all
* other fields in this structure except for driver_info.
* @idVendor: USB vendor ID for a device; numbers are assigned
@@ -220,8 +220,7 @@ struct serio_device_id {
/*
* Struct used for matching a device
*/
-struct of_device_id
-{
+struct of_device_id {
char name[32];
char type[32];
char compatible[128];
diff --git a/include/linux/module.h b/include/linux/module.h
index ebfb0e153c6a..42999fe2dbd0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -135,7 +135,7 @@ void trim_init_extable(struct module *m);
#ifdef MODULE
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
- extern const struct type##_device_id __mod_##type##__##name##_device_table \
+extern const typeof(name) __mod_##type##__##name##_device_table \
__attribute__ ((unused, alias(__stringify(name))))
#else /* !MODULE */
#define MODULE_DEVICE_TABLE(type, name)
@@ -444,7 +444,7 @@ extern void __module_put_and_exit(struct module *mod, long code)
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
-unsigned long module_refcount(struct module *mod);
+int module_refcount(struct module *mod);
void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
void symbol_put_addr(void *addr);
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 7eeb9bbfb816..f7556261fe3c 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -26,7 +26,7 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
void *module_alloc(unsigned long size);
/* Free memory returned from module_alloc. */
-void module_free(struct module *mod, void *module_region);
+void module_memfree(void *module_region);
/*
* Apply the given relocation to the (simplified) ELF. Return -error
@@ -82,4 +82,6 @@ int module_finalize(const Elf_Ehdr *hdr,
/* Any cleanup needed when module leaves. */
void module_arch_cleanup(struct module *mod);
+/* Any cleanup before freeing mod->module_init */
+void module_arch_freeing_init(struct module *mod);
#endif
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 031ff3a9a0bd..3301c4c289d6 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -408,4 +408,6 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
return mtd_is_bitflip(err) || mtd_is_eccerr(err);
}
+unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+
#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index c3918a0684fe..1e271cb559cd 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -23,22 +23,32 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+#include <linux/scatterlist.h>
#include <mtd/ubi-user.h>
/* All voumes/LEBs */
#define UBI_ALL -1
/*
+ * Maximum number of scatter gather list entries,
+ * we use only 64 to have a lower memory foot print.
+ */
+#define UBI_MAX_SG_COUNT 64
+
+/*
* enum ubi_open_mode - UBI volume open mode constants.
*
* UBI_READONLY: read-only mode
* UBI_READWRITE: read-write mode
* UBI_EXCLUSIVE: exclusive mode
+ * UBI_METAONLY: modify only the volume meta-data,
+ * i.e. the data stored in the volume table, but not in any of volume LEBs.
*/
enum {
UBI_READONLY = 1,
UBI_READWRITE,
- UBI_EXCLUSIVE
+ UBI_EXCLUSIVE,
+ UBI_METAONLY
};
/**
@@ -116,6 +126,35 @@ struct ubi_volume_info {
};
/**
+ * struct ubi_sgl - UBI scatter gather list data structure.
+ * @list_pos: current position in @sg[]
+ * @page_pos: current position in @sg[@list_pos]
+ * @sg: the scatter gather list itself
+ *
+ * ubi_sgl is a wrapper around a scatter list which keeps track of the
+ * current position in the list and the current list item such that
+ * it can be used across multiple ubi_leb_read_sg() calls.
+ */
+struct ubi_sgl {
+ int list_pos;
+ int page_pos;
+ struct scatterlist sg[UBI_MAX_SG_COUNT];
+};
+
+/**
+ * ubi_sgl_init - initialize an UBI scatter gather list data structure.
+ * @usgl: the UBI scatter gather struct itself
+ *
+ * Please note that you still have to use sg_init_table() or any adequate
+ * function to initialize the unterlaying struct scatterlist.
+ */
+static inline void ubi_sgl_init(struct ubi_sgl *usgl)
+{
+ usgl->list_pos = 0;
+ usgl->page_pos = 0;
+}
+
+/**
* struct ubi_device_info - UBI device description data structure.
* @ubi_num: ubi device number
* @leb_size: logical eraseblock size on this UBI device
@@ -210,6 +249,8 @@ int ubi_unregister_volume_notifier(struct notifier_block *nb);
void ubi_close_volume(struct ubi_volume_desc *desc);
int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
int len, int check);
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+ int offset, int len, int check);
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
int offset, int len);
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
@@ -230,4 +271,14 @@ static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
{
return ubi_leb_read(desc, lnum, buf, offset, len, 0);
}
+
+/*
+ * This function is the same as the 'ubi_leb_read_sg()' function, but it does
+ * not provide the checking capability.
+ */
+static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum,
+ struct ubi_sgl *sgl, int offset, int len)
+{
+ return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0);
+}
#endif /* !__LINUX_UBI_H__ */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index cc31498fc526..2cb7531e7d7a 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -59,7 +59,6 @@ struct mutex {
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
- const char *name;
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 8e30685affeb..7d59dc6ab789 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -66,6 +66,7 @@ enum {
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_BUSY_POLL_BIT, /* Busy poll */
+ NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
/*
* Add your fresh new feature above and remember to update
@@ -124,6 +125,7 @@ enum {
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
@@ -159,7 +161,9 @@ enum {
*/
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
- NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
+ NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
+ NETIF_F_HW_SWITCH_OFFLOAD)
+
/*
* If one device doesn't support one of these features, then disable it
* for all in netdev_increment_features.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 679e6e90aa4c..d115256ed5a2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -51,6 +51,7 @@
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
#include <uapi/linux/netdevice.h>
+#include <uapi/linux/if_bonding.h>
struct netpoll_info;
struct device;
@@ -643,39 +644,40 @@ struct rps_dev_flow_table {
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
+ * Each entry is a 32bit value. Upper part is the high order bits
+ * of flow hash, lower part is cpu number.
+ * rps_cpu_mask is used to partition the space, depending on number of
+ * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
+ * meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
- unsigned int mask;
- u16 ents[0];
+ u32 mask;
+
+ u32 ents[0] ____cacheline_aligned_in_smp;
};
-#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
- ((_num) * sizeof(u16)))
+#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
#define RPS_NO_CPU 0xffff
+extern u32 rps_cpu_mask;
+extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
if (table && hash) {
- unsigned int cpu, index = hash & table->mask;
+ unsigned int index = hash & table->mask;
+ u32 val = hash & ~rps_cpu_mask;
/* We only give a hint, preemption can change cpu under us */
- cpu = raw_smp_processor_id();
+ val |= raw_smp_processor_id();
- if (table->ents[index] != cpu)
- table->ents[index] = cpu;
+ if (table->ents[index] != val)
+ table->ents[index] = val;
}
}
-static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
- u32 hash)
-{
- if (table && hash)
- table->ents[hash & table->mask] = RPS_NO_CPU;
-}
-
-extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
-
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
@@ -852,11 +854,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
- * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device support VLAN filtering this function is called when a
* VLAN id is registered.
*
- * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device support VLAN filtering this function is called when a
* VLAN id is unregistered.
*
@@ -1154,13 +1156,15 @@ struct net_device_ops {
int idx);
int (*ndo_bridge_setlink)(struct net_device *dev,
- struct nlmsghdr *nlh);
+ struct nlmsghdr *nlh,
+ u16 flags);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask);
int (*ndo_bridge_dellink)(struct net_device *dev,
- struct nlmsghdr *nlh);
+ struct nlmsghdr *nlh,
+ u16 flags);
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
@@ -1514,6 +1518,8 @@ struct net_device {
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;
+ struct list_head ptype_all;
+ struct list_head ptype_specific;
struct {
struct list_head upper;
@@ -1969,7 +1975,7 @@ struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
+ struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
@@ -1979,10 +1985,21 @@ struct packet_offload {
struct list_head list;
};
+struct udp_offload;
+
+struct udp_offload_callbacks {
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb,
+ struct udp_offload *uoff);
+ int (*gro_complete)(struct sk_buff *skb,
+ int nhoff,
+ struct udp_offload *uoff);
+};
+
struct udp_offload {
__be16 port;
u8 ipproto;
- struct offload_callbacks callbacks;
+ struct udp_offload_callbacks callbacks;
};
/* often modified stats are per cpu, other are shared (netdev->stats) */
@@ -2041,6 +2058,7 @@ struct pcpu_sw_netstats {
#define NETDEV_RESEND_IGMP 0x0016
#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
#define NETDEV_CHANGEINFODATA 0x0018
+#define NETDEV_BONDING_INFO 0x0019
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2085,7 +2103,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
for_each_netdev_rcu(&init_net, slave) \
- if (netdev_master_upper_dev_get_rcu(slave) == bond)
+ if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
static inline struct net_device *next_net_device(struct net_device *dev)
@@ -2303,6 +2321,21 @@ do { \
compute_pseudo(skb, proto)); \
} while (0)
+static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ int start, int offset)
+{
+ __wsum delta;
+
+ BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+ delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+}
+
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -3464,6 +3497,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
netdev_features_t features);
+struct netdev_bonding_info {
+ ifslave slave;
+ ifbond master;
+};
+
+struct netdev_notifier_bonding_info {
+ struct netdev_notifier_info info; /* must be first */
+ struct netdev_bonding_info bonding_info;
+};
+
+void netdev_bonding_info_change(struct net_device *dev,
+ struct netdev_bonding_info *bonding_info);
+
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 022b761dbf0a..ed43cb74b11d 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -411,6 +411,7 @@ enum lock_type4 {
#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
+#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
@@ -516,6 +517,8 @@ enum pnfs_layouttype {
LAYOUT_NFSV4_1_FILES = 1,
LAYOUT_OSD2_OBJECTS = 2,
LAYOUT_BLOCK_VOLUME = 3,
+ LAYOUT_FLEX_FILES = 4,
+ LAYOUT_TYPE_MAX
};
/* used for both layout return and recall */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1e37fbb78f7a..5e1273d4de14 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -74,10 +74,9 @@ struct nfs_client {
/* idmapper */
struct idmap * cl_idmap;
- /* Our own IP address, as a null-terminated string.
- * This is used to generate the mv0 callback address.
- */
- char cl_ipaddr[48];
+ /* Client owner identifier */
+ const char * cl_owner_id;
+
u32 cl_cb_ident; /* v4.0 callback identifier */
const struct nfs4_minor_version_ops *cl_mvops;
unsigned long cl_mig_gen;
@@ -105,6 +104,11 @@ struct nfs_client {
#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
#endif /* CONFIG_NFS_V4 */
+ /* Our own IP address, as a null-terminated string.
+ * This is used to generate the mv0 callback address.
+ */
+ char cl_ipaddr[48];
+
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
index 0f4b79da6584..333844e38f66 100644
--- a/include/linux/nfs_idmap.h
+++ b/include/linux/nfs_idmap.h
@@ -73,5 +73,7 @@ int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, kgid_t
int nfs_map_uid_to_name(const struct nfs_server *, kuid_t, char *, size_t);
int nfs_map_gid_to_group(const struct nfs_server *, kgid_t, char *, size_t);
+int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res);
+
extern unsigned int nfs_idmap_cache_timeout;
#endif /* NFS_IDMAP_H */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 6c3e06ee2fb7..3eb072dbce83 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -58,6 +58,9 @@ struct nfs_pageio_ops {
size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *,
struct nfs_page *);
int (*pg_doio)(struct nfs_pageio_descriptor *);
+ unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
+ struct nfs_page *);
+ void (*pg_cleanup)(struct nfs_pageio_descriptor *);
};
struct nfs_rw_ops {
@@ -69,18 +72,21 @@ struct nfs_rw_ops {
struct inode *);
void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *);
void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *,
+ const struct nfs_rpc_ops *,
struct rpc_task_setup *, int);
};
-struct nfs_pageio_descriptor {
+struct nfs_pgio_mirror {
struct list_head pg_list;
unsigned long pg_bytes_written;
size_t pg_count;
size_t pg_bsize;
unsigned int pg_base;
- unsigned char pg_moreio : 1,
- pg_recoalesce : 1;
+ unsigned char pg_recoalesce : 1;
+};
+struct nfs_pageio_descriptor {
+ unsigned char pg_moreio : 1;
struct inode *pg_inode;
const struct nfs_pageio_ops *pg_ops;
const struct nfs_rw_ops *pg_rw_ops;
@@ -91,8 +97,18 @@ struct nfs_pageio_descriptor {
struct pnfs_layout_segment *pg_lseg;
struct nfs_direct_req *pg_dreq;
void *pg_layout_private;
+ unsigned int pg_bsize; /* default bsize for mirrors */
+
+ u32 pg_mirror_count;
+ struct nfs_pgio_mirror *pg_mirrors;
+ struct nfs_pgio_mirror pg_mirrors_static[1];
+ struct nfs_pgio_mirror *pg_mirrors_dynamic;
+ u32 pg_mirror_idx; /* current mirror */
};
+/* arbitrarily selected limit to number of mirrors */
+#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16
+
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 467c84efb596..38d96ba935c2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -285,6 +285,7 @@ struct nfs4_layoutcommit_data {
struct nfs_fattr fattr;
struct list_head lseg_list;
struct rpc_cred *cred;
+ struct inode *inode;
struct nfs4_layoutcommit_args args;
struct nfs4_layoutcommit_res res;
};
@@ -293,6 +294,7 @@ struct nfs4_layoutreturn_args {
struct nfs4_sequence_args seq_args;
struct pnfs_layout_hdr *layout;
struct inode *inode;
+ struct pnfs_layout_range range;
nfs4_stateid stateid;
__u32 layout_type;
};
@@ -308,6 +310,7 @@ struct nfs4_layoutreturn {
struct nfs4_layoutreturn_res res;
struct rpc_cred *cred;
struct nfs_client *clp;
+ struct inode *inode;
int rpc_status;
};
@@ -325,6 +328,7 @@ struct nfs_openargs {
struct nfs_seqid * seqid;
int open_flags;
fmode_t fmode;
+ u32 share_access;
u32 access;
__u64 clientid;
struct stateowner_id id;
@@ -389,9 +393,10 @@ struct nfs_open_confirmres {
struct nfs_closeargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
- nfs4_stateid * stateid;
+ nfs4_stateid stateid;
struct nfs_seqid * seqid;
fmode_t fmode;
+ u32 share_access;
const u32 * bitmask;
};
@@ -416,12 +421,13 @@ struct nfs_lock_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_seqid * lock_seqid;
- nfs4_stateid * lock_stateid;
+ nfs4_stateid lock_stateid;
struct nfs_seqid * open_seqid;
- nfs4_stateid * open_stateid;
+ nfs4_stateid open_stateid;
struct nfs_lowner lock_owner;
unsigned char block : 1;
unsigned char reclaim : 1;
+ unsigned char new_lock : 1;
unsigned char new_lock_owner : 1;
};
@@ -437,7 +443,7 @@ struct nfs_locku_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_seqid * seqid;
- nfs4_stateid * stateid;
+ nfs4_stateid stateid;
};
struct nfs_locku_res {
@@ -513,6 +519,7 @@ struct nfs_pgio_res {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
__u32 count;
+ __u32 op_status;
int eof; /* used by read */
struct nfs_writeverf * verf; /* used by write */
const struct nfs_server *server; /* used by write */
@@ -532,6 +539,7 @@ struct nfs_commitargs {
struct nfs_commitres {
struct nfs4_sequence_res seq_res;
+ __u32 op_status;
struct nfs_fattr *fattr;
struct nfs_writeverf *verf;
const struct nfs_server *server;
@@ -1325,7 +1333,8 @@ struct nfs_pgio_header {
__u64 mds_offset; /* Filelayout dense stripe */
struct nfs_page_array page_array;
struct nfs_client *ds_clp; /* pNFS data server */
- int ds_idx; /* ds index if ds_clp is set */
+ int ds_commit_idx; /* ds index if ds_clp is set */
+ int pgio_mirror_idx;/* mirror index in pgio layer */
};
struct nfs_mds_commit_info {
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 83a6aeda899d..6e85889cf9ab 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -8,14 +8,13 @@
* See detailed comments in the file linux/bitmap.h describing the
* data type on which these nodemasks are based.
*
- * For details of nodemask_scnprintf() and nodemask_parse_user(),
- * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
- * For details of nodelist_scnprintf() and nodelist_parse(), see
- * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
- * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c.
- * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c.
- * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c.
- * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c.
+ * For details of nodemask_parse_user(), see bitmap_parse_user() in
+ * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(),
+ * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in
+ * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in
+ * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in
+ * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in
+ * lib/bitmap.c.
*
* The available nodemask operations are:
*
@@ -52,9 +51,7 @@
* NODE_MASK_NONE Initializer - no bits set
* unsigned long *nodes_addr(mask) Array of unsigned long's in mask
*
- * int nodemask_scnprintf(buf, len, mask) Format nodemask for printing
* int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask
- * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
* int nodelist_parse(buf, map) Parse ascii string as nodelist
* int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
* void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
@@ -98,6 +95,14 @@
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
+/**
+ * nodemask_pr_args - printf args to output a nodemask
+ * @maskp: nodemask to be printed
+ *
+ * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
+ */
+#define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits
+
/*
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
@@ -120,13 +125,13 @@ static inline void __node_clear(int node, volatile nodemask_t *dstp)
}
#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
-static inline void __nodes_setall(nodemask_t *dstp, int nbits)
+static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
-static inline void __nodes_clear(nodemask_t *dstp, int nbits)
+static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
@@ -144,7 +149,7 @@ static inline int __node_test_and_set(int node, nodemask_t *addr)
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -152,7 +157,7 @@ static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_or(dst, src1, src2) \
__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -160,7 +165,7 @@ static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_xor(dst, src1, src2) \
__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -168,7 +173,7 @@ static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -176,7 +181,7 @@ static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_complement(dst, src) \
__nodes_complement(&(dst), &(src), MAX_NUMNODES)
static inline void __nodes_complement(nodemask_t *dstp,
- const nodemask_t *srcp, int nbits)
+ const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
}
@@ -184,7 +189,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
static inline int __nodes_equal(const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
@@ -192,7 +197,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
static inline int __nodes_intersects(const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
@@ -200,25 +205,25 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
static inline int __nodes_subset(const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, int nbits)
+static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, int nbits)
+static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, int nbits)
+static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
@@ -304,14 +309,6 @@ static inline int __first_unset_node(const nodemask_t *maskp)
#define nodes_addr(src) ((src).bits)
-#define nodemask_scnprintf(buf, len, src) \
- __nodemask_scnprintf((buf), (len), &(src), MAX_NUMNODES)
-static inline int __nodemask_scnprintf(char *buf, int len,
- const nodemask_t *srcp, int nbits)
-{
- return bitmap_scnprintf(buf, len, srcp->bits, nbits);
-}
-
#define nodemask_parse_user(ubuf, ulen, dst) \
__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
static inline int __nodemask_parse_user(const char __user *buf, int len,
@@ -320,14 +317,6 @@ static inline int __nodemask_parse_user(const char __user *buf, int len,
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
-#define nodelist_scnprintf(buf, len, src) \
- __nodelist_scnprintf((buf), (len), &(src), MAX_NUMNODES)
-static inline int __nodelist_scnprintf(char *buf, int len,
- const nodemask_t *srcp, int nbits)
-{
- return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
-}
-
#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 258945fcabf1..19a5d4b23209 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -132,13 +132,12 @@ struct nvme_ns {
* allocated to store the PRP list.
*/
struct nvme_iod {
- void *private; /* For the use of the submitter of the I/O */
+ unsigned long private; /* For the use of the submitter of the I/O */
int npages; /* In the PRP list. 0 means small pool in use */
int offset; /* Of PRP list */
int nents; /* Used in scatterlist */
int length; /* Of data, in bytes */
dma_addr_t first_dma;
- struct list_head node;
struct scatterlist sg[0];
};
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 38fc05036015..69dbe312b11b 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -52,6 +52,7 @@ extern int of_get_named_gpio_flags(struct device_node *np,
extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
+extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
extern void of_gpiochip_add(struct gpio_chip *gc);
extern void of_gpiochip_remove(struct gpio_chip *gc);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 853698c721f7..d5771bed59c9 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -47,6 +47,10 @@ static inline bool oom_task_origin(const struct task_struct *p)
return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
}
+extern void mark_tsk_oom_victim(struct task_struct *tsk);
+
+extern void unmark_oom_victim(void);
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
@@ -68,27 +72,14 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask,
bool force_kill);
-extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *mask, bool force_kill);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
extern bool oom_killer_disabled;
-
-static inline void oom_killer_disable(void)
-{
- oom_killer_disabled = true;
-}
-
-static inline void oom_killer_enable(void)
-{
- oom_killer_disabled = false;
-}
-
-static inline bool oom_gfp_allowed(gfp_t gfp_mask)
-{
- return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
-}
+extern bool oom_killer_disable(void);
+extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 90230d5811c5..3a6490e81b28 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -5,8 +5,11 @@
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
*/
-
-#define OSQ_UNLOCKED_VAL (0)
+struct optimistic_spin_node {
+ struct optimistic_spin_node *next, *prev;
+ int locked; /* 1 if lock acquired */
+ int cpu; /* encoded CPU # + 1 value */
+};
struct optimistic_spin_queue {
/*
@@ -16,6 +19,8 @@ struct optimistic_spin_queue {
atomic_t tail;
};
+#define OSQ_UNLOCKED_VAL (0)
+
/* Init macro and function. */
#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
@@ -24,4 +29,7 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
}
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
+
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e1f5fcd79792..5ed7bdaf22d5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -121,8 +121,12 @@ enum pageflags {
PG_fscache = PG_private_2, /* page backed by cache */
/* XEN */
+ /* Pinned in Xen as a read-only pagetable page. */
PG_pinned = PG_owner_priv_1,
+ /* Pinned as part of domain save (see xen_mm_pin_all()). */
PG_savepinned = PG_dirty,
+ /* Has a grant mapping of another (foreign) domain's page. */
+ PG_foreign = PG_owner_priv_1,
/* SLOB */
PG_slob_free = PG_private,
@@ -215,6 +219,7 @@ __PAGEFLAG(Slab, slab)
PAGEFLAG(Checked, checked) /* Used by some filesystems */
PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
PAGEFLAG(SavePinned, savepinned); /* Xen */
+PAGEFLAG(Foreign, foreign); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__SETPAGEFLAG(SwapBacked, swapbacked)
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 955421575d16..17fa4f8de3a6 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -41,7 +41,8 @@ int page_counter_try_charge(struct page_counter *counter,
struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
int page_counter_limit(struct page_counter *counter, unsigned long limit);
-int page_counter_memparse(const char *buf, unsigned long *nr_pages);
+int page_counter_memparse(const char *buf, const char *max,
+ unsigned long *nr_pages);
static inline void page_counter_reset_watermark(struct page_counter *counter)
{
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index d2a2c84c72d0..c42981cd99aa 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -40,7 +40,7 @@ struct page_ext {
#ifdef CONFIG_PAGE_OWNER
unsigned int order;
gfp_t gfp_mask;
- struct stack_trace trace;
+ unsigned int nr_entries;
unsigned long trace_entries[8];
#endif
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 360a966a97a5..211e9da8a7d7 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -29,6 +29,7 @@
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/resource_ext.h>
#include <uapi/linux/pci.h>
#include <linux/pci_ids.h>
@@ -175,6 +176,10 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
+ /* Do not use bus resets for device */
+ PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
+ /* Do not use PM reset even if device advertises NoSoftRst- */
+ PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
};
enum pci_irq_reroute_variant {
@@ -395,16 +400,10 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
return (pdev->error_state != pci_channel_io_normal);
}
-struct pci_host_bridge_window {
- struct list_head list;
- struct resource *res; /* host bridge aperture (CPU address) */
- resource_size_t offset; /* bus address + offset = CPU address */
-};
-
struct pci_host_bridge {
struct device dev;
struct pci_bus *bus; /* root bus */
- struct list_head windows; /* pci_host_bridge_windows */
+ struct list_head windows; /* resource_entry */
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
};
@@ -560,6 +559,7 @@ static inline int pcibios_err_to_errno(int err)
/* Low-level architecture-dependent routines */
struct pci_ops {
+ void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
};
@@ -857,6 +857,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
int where, u16 val);
int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 val);
+
+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val);
+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val);
+
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
@@ -1065,6 +1075,7 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
void pci_bus_assign_resources(const struct pci_bus *bus);
void pci_bus_size_bridges(struct pci_bus *bus);
int pci_claim_resource(struct pci_dev *, int);
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
@@ -1847,6 +1858,8 @@ static inline void pci_set_of_node(struct pci_dev *dev) { }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
#endif /* CONFIG_OF */
#ifdef CONFIG_EEH
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index b4337646388b..12c9b485beb7 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,8 +128,22 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
static inline bool __ref_is_percpu(struct percpu_ref *ref,
unsigned long __percpu **percpu_countp)
{
- /* paired with smp_store_release() in percpu_ref_reinit() */
- unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
+ unsigned long percpu_ptr;
+
+ /*
+ * The value of @ref->percpu_count_ptr is tested for
+ * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
+ * used as a pointer. If the compiler generates a separate fetch
+ * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
+ * between contaminating the pointer value, meaning that
+ * ACCESS_ONCE() is required when fetching it.
+ *
+ * Also, we need a data dependency barrier to be paired with
+ * smp_store_release() in __percpu_ref_switch_to_percpu().
+ *
+ * Use lockless deref which contains both.
+ */
+ percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
/*
* Theoretically, the following could test just ATOMIC; however,
@@ -233,7 +247,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
ret = true;
- } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->count);
}
@@ -281,6 +295,20 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
}
/**
+ * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref is dying or dead.
+ *
+ * This function is safe to call as long as @ref is between init and exit
+ * and the caller is responsible for synchronizing against state changes.
+ */
+static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
+{
+ return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
+}
+
+/**
* percpu_ref_is_zero - test whether a percpu refcount reached zero
* @ref: percpu_ref to test
*
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4f7a61ca4b39..2b621982938d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -202,6 +202,13 @@ struct pmu {
*/
int (*event_init) (struct perf_event *event);
+ /*
+ * Notification that the event was mapped or unmapped. Called
+ * in the context of the mapping task.
+ */
+ void (*event_mapped) (struct perf_event *event); /*optional*/
+ void (*event_unmapped) (struct perf_event *event); /*optional*/
+
#define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
@@ -450,11 +457,6 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */
};
-enum perf_event_context_type {
- task_context,
- cpu_context,
-};
-
/**
* struct perf_event_context - event context structure
*
@@ -462,7 +464,6 @@ enum perf_event_context_type {
*/
struct perf_event_context {
struct pmu *pmu;
- enum perf_event_context_type type;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
@@ -475,6 +476,7 @@ struct perf_event_context {
*/
struct mutex mutex;
+ struct list_head active_ctx_list;
struct list_head pinned_groups;
struct list_head flexible_groups;
struct list_head event_list;
@@ -525,7 +527,6 @@ struct perf_cpu_context {
int exclusive;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
- struct list_head rotation_list;
struct pmu *unique_pmu;
struct perf_cgroup *cgrp;
};
@@ -665,6 +666,7 @@ static inline int is_software_event(struct perf_event *event)
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs
@@ -689,14 +691,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
static __always_inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
- struct pt_regs hot_regs;
+ if (static_key_false(&perf_swevent_enabled[event_id]))
+ __perf_sw_event(event_id, nr, regs, addr);
+}
+DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
+
+/*
+ * 'Special' version for the scheduler, it hard assumes no recursion,
+ * which is guaranteed by us not actually scheduling inside other swevents
+ * because those disable preemption.
+ */
+static __always_inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+{
if (static_key_false(&perf_swevent_enabled[event_id])) {
- if (!regs) {
- perf_fetch_caller_regs(&hot_regs);
- regs = &hot_regs;
- }
- __perf_sw_event(event_id, nr, regs, addr);
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(event_id, nr, regs, addr);
}
}
@@ -712,7 +725,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
- perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
+ perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next);
@@ -823,6 +836,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
+static inline void
perf_bp_event(struct perf_event *event, void *data) { }
static inline int perf_register_guest_info_callbacks
@@ -899,12 +914,22 @@ struct perf_pmu_events_attr {
const char *event_str;
};
+ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *page);
+
#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
static struct perf_pmu_events_attr _var = { \
.attr = __ATTR(_name, 0444, _show, NULL), \
.id = _id, \
};
+#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
+static struct perf_pmu_events_attr _var = { \
+ .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = _str, \
+};
+
#define PMU_FORMAT_ATTR(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 22af8f8f5802..685809835b5c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,6 +327,8 @@ struct phy_c45_device_ids {
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
* is_internal: Set to true if this phy is internal to a MAC.
+ * has_fixups: Set to true if this phy has fixups/quirks.
+ * suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
* addr: Bus address of PHY
@@ -364,6 +366,7 @@ struct phy_device {
bool is_c45;
bool is_internal;
bool has_fixups;
+ bool suspended;
enum phy_state state;
@@ -565,6 +568,15 @@ struct phy_driver {
void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
int devnum, int regnum, u32 val);
+ /* Get the size and type of the eeprom contained within a plug-in
+ * module */
+ int (*module_info)(struct phy_device *dev,
+ struct ethtool_modinfo *modinfo);
+
+ /* Get the eeprom information from the plug-in module */
+ int (*module_eeprom)(struct phy_device *dev,
+ struct ethtool_eeprom *ee, u8 *data);
+
struct device_driver driver;
};
#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h
index e9e6cfbfbb58..eb7d4a135a9e 100644
--- a/include/linux/phy/omap_control_phy.h
+++ b/include/linux/phy/omap_control_phy.h
@@ -66,7 +66,7 @@ enum omap_control_usb_mode {
#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0
#define OMAP_CTRL_PCIE_PCS_MASK 0xff
-#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8
+#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16
#define OMAP_CTRL_USB2_PHY_PD BIT(28)
@@ -79,7 +79,7 @@ enum omap_control_usb_mode {
void omap_control_phy_power(struct device *dev, int on);
void omap_control_usb_set_mode(struct device *dev,
enum omap_control_usb_mode mode);
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay);
+void omap_control_pcie_pcs(struct device *dev, u8 delay);
#else
static inline void omap_control_phy_power(struct device *dev, int on)
@@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev,
{
}
-static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
{
}
#endif
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
new file mode 100644
index 000000000000..9d18e9f948e9
--- /dev/null
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef PHY_QCOM_UFS_H_
+#define PHY_QCOM_UFS_H_
+
+#include "phy.h"
+
+/**
+ * ufs_qcom_phy_enable_ref_clk() - Enable the phy
+ * ref clock.
+ * @phy: reference to a generic phy
+ *
+ * returns 0 for success, and non-zero for error.
+ */
+int ufs_qcom_phy_enable_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_disable_ref_clk() - Disable the phy
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_disable_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
+
+int ufs_qcom_phy_enable_iface_clk(struct phy *phy);
+void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
+int ufs_qcom_phy_start_serdes(struct phy *phy);
+int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
+int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
+int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
+void ufs_qcom_phy_save_controller_version(struct phy *phy,
+ u8 major, u16 minor, u16 step);
+
+#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 18eccefea06e..72c0415d6c21 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio)
static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline void pinctrl_put(struct pinctrl *p)
@@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
struct pinctrl *p,
const char *name)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline int pinctrl_select_state(struct pinctrl *p,
@@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p,
static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline void devm_pinctrl_put(struct pinctrl *p)
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index d578a60eff23..fe65962b264f 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -115,6 +115,18 @@ enum pin_config_param {
PIN_CONFIG_END = 0x7FFF,
};
+#ifdef CONFIG_DEBUG_FS
+#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
+ .has_arg = d }
+
+struct pin_config_item {
+ const enum pin_config_param param;
+ const char * const display;
+ const char * const format;
+ bool has_arg;
+};
+#endif /* CONFIG_DEBUG_FS */
+
/*
* Helpful configuration macro to be used in tables etc.
*/
@@ -150,6 +162,12 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param
struct pinctrl_dev;
struct pinctrl_map;
+struct pinconf_generic_params {
+ const char * const property;
+ enum pin_config_param param;
+ u32 default_value;
+};
+
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
unsigned *reserved_maps, unsigned *num_maps,
@@ -174,6 +192,17 @@ static inline int pinconf_generic_dt_node_to_map_pin(
PIN_MAP_TYPE_CONFIGS_PIN);
}
+static inline int pinconf_generic_dt_node_to_map_all(
+ struct pinctrl_dev *pctldev, struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ /*
+ * passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser
+ * to infer the map type from the DT properties used.
+ */
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_INVALID);
+}
#endif
#endif /* CONFIG_GENERIC_PINCONF */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index cc8e1aff0e28..66e4697516de 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -24,6 +24,7 @@ struct pinctrl_dev;
struct pinctrl_map;
struct pinmux_ops;
struct pinconf_ops;
+struct pin_config_item;
struct gpio_chip;
struct device_node;
@@ -117,6 +118,12 @@ struct pinctrl_ops {
* @confops: pin config operations vtable, if you support pin configuration in
* your driver
* @owner: module providing the pin controller, used for refcounting
+ * @num_custom_params: Number of driver-specific custom parameters to be parsed
+ * from the hardware description
+ * @custom_params: List of driver_specific custom parameters to be parsed from
+ * the hardware description
+ * @custom_conf_items: Information how to print @params in debugfs, must be
+ * the same size as the @custom_params, i.e. @num_custom_params
*/
struct pinctrl_desc {
const char *name;
@@ -126,6 +133,11 @@ struct pinctrl_desc {
const struct pinmux_ops *pmxops;
const struct pinconf_ops *confops;
struct module *owner;
+#ifdef CONFIG_GENERIC_PINCONF
+ unsigned int num_custom_params;
+ const struct pinconf_generic_params *custom_params;
+ const struct pin_config_item *custom_conf_items;
+#endif
};
/* External interface to pin controller */
diff --git a/include/linux/platform_data/ipmmu-vmsa.h b/include/linux/platform_data/ipmmu-vmsa.h
deleted file mode 100644
index 5275b3ac6d37..000000000000
--- a/include/linux/platform_data/ipmmu-vmsa.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * IPMMU VMSA Platform Data
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- */
-
-#ifndef __IPMMU_VMSA_H__
-#define __IPMMU_VMSA_H__
-
-struct ipmmu_vmsa_master {
- const char *name;
- unsigned int utlb;
-};
-
-struct ipmmu_vmsa_platform_data {
- const struct ipmmu_vmsa_master *masters;
- unsigned int num_masters;
-};
-
-#endif /* __IPMMU_VMSA_H__ */
diff --git a/arch/arm/include/asm/mach/irda.h b/include/linux/platform_data/irda-sa11x0.h
index 38f77b5e56cf..38f77b5e56cf 100644
--- a/arch/arm/include/asm/mach/irda.h
+++ b/include/linux/platform_data/irda-sa11x0.h
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 5c188f4e9bec..929469291406 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -31,10 +31,6 @@ struct omap_mmc_platform_data {
void (*cleanup)(struct device *dev);
void (*shutdown)(struct device *dev);
- /* To handle board related suspend/resume functionality for MMC */
- int (*suspend)(struct device *dev, int slot);
- int (*resume)(struct device *dev, int slot);
-
/* Return context loss count due to PM states changing */
int (*get_context_loss_count)(struct device *dev);
diff --git a/include/linux/platform_data/regulator-haptic.h b/include/linux/platform_data/regulator-haptic.h
new file mode 100644
index 000000000000..5658e58e0738
--- /dev/null
+++ b/include/linux/platform_data/regulator-haptic.h
@@ -0,0 +1,29 @@
+/*
+ * Regulator Haptic Platform Data
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Hyunhee Kim <hyunhee.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGULATOR_HAPTIC_H
+#define _REGULATOR_HAPTIC_H
+
+/*
+ * struct regulator_haptic_data - Platform device data
+ *
+ * @max_volt: maximum voltage value supplied to the haptic motor.
+ * <The unit of the voltage is a micro>
+ * @min_volt: minimum voltage value supplied to the haptic motor.
+ * <The unit of the voltage is a micro>
+ */
+struct regulator_haptic_data {
+ unsigned int max_volt;
+ unsigned int min_volt;
+};
+
+#endif /* _REGULATOR_HAPTIC_H */
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
index 5087fff96d86..cc2bdafb0c69 100644
--- a/include/linux/platform_data/st21nfca.h
+++ b/include/linux/platform_data/st21nfca.h
@@ -26,6 +26,8 @@
struct st21nfca_nfc_platform_data {
unsigned int gpio_ena;
unsigned int irq_polarity;
+ bool is_ese_present;
+ bool is_uicc_present;
};
#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
index c3b432f5b63e..b023373d9874 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st21nfcb.h
@@ -19,8 +19,6 @@
#ifndef _ST21NFCB_NCI_H_
#define _ST21NFCB_NCI_H_
-#include <linux/i2c.h>
-
#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
struct st21nfcb_nfc_platform_data {
@@ -28,4 +26,4 @@ struct st21nfcb_nfc_platform_data {
unsigned int irq_polarity;
};
-#endif /* _ST21NFCA_HCI_H_ */
+#endif /* _ST21NFCB_NCI_H_ */
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.h b/include/linux/platform_data/tpm_stm_st33.h
index 439a43249aa6..ff75310c0f47 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.h
+++ b/include/linux/platform_data/tpm_stm_st33.h
@@ -12,9 +12,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* STMicroelectronics version 1.2.0, Copyright (C) 2010
* STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
@@ -23,39 +22,18 @@
*
* @Author: Christophe RICARD tpmsupport@st.com
*
- * @File: stm_st33_tpm_i2c.h
+ * @File: stm_st33_tpm.h
*
* @Date: 09/15/2010
*/
-#ifndef __STM_ST33_TPM_I2C_MAIN_H__
-#define __STM_ST33_TPM_I2C_MAIN_H__
+#ifndef __STM_ST33_TPM_H__
+#define __STM_ST33_TPM_H__
-#define TPM_ACCESS (0x0)
-#define TPM_STS (0x18)
-#define TPM_HASH_END (0x20)
-#define TPM_DATA_FIFO (0x24)
-#define TPM_HASH_DATA (0x24)
-#define TPM_HASH_START (0x28)
-#define TPM_INTF_CAPABILITY (0x14)
-#define TPM_INT_STATUS (0x10)
-#define TPM_INT_ENABLE (0x08)
-
-#define TPM_DUMMY_BYTE 0xAA
-#define TPM_WRITE_DIRECTION 0x80
-#define TPM_HEADER_SIZE 10
-#define TPM_BUFSIZE 2048
-
-#define LOCALITY0 0
-
-#define TPM_ST33_I2C "st33zp24_i2c"
+#define TPM_ST33_I2C "st33zp24-i2c"
+#define TPM_ST33_SPI "st33zp24-spi"
struct st33zp24_platform_data {
- int io_serirq;
int io_lpcpd;
- struct i2c_client *client;
- u8 *tpm_i2c_buffer[2]; /* 0 Request 1 Response */
- struct completion irq_detection;
- struct mutex lock;
};
-#endif /* __STM_ST33_TPM_I2C_MAIN_H__ */
+#endif /* __STM_ST33_TPM_H__ */
diff --git a/include/linux/platform_data/vsp1.h b/include/linux/platform_data/vsp1.h
deleted file mode 100644
index 63170e2614b3..000000000000
--- a/include/linux/platform_data/vsp1.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * vsp1.h -- R-Car VSP1 Platform Data
- *
- * Copyright (C) 2013 Renesas Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __PLATFORM_VSP1_H__
-#define __PLATFORM_VSP1_H__
-
-#define VSP1_HAS_LIF (1 << 0)
-#define VSP1_HAS_LUT (1 << 1)
-#define VSP1_HAS_SRU (1 << 2)
-
-struct vsp1_platform_data {
- unsigned int features;
- unsigned int rpf_count;
- unsigned int uds_count;
- unsigned int wpf_count;
-};
-
-#endif /* __PLATFORM_VSP1_H__ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 8b5976364619..e2f1be6dd9dd 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -597,7 +597,7 @@ struct dev_pm_info {
extern void update_pm_runtime_accounting(struct device *dev);
extern int dev_pm_get_subsys_data(struct device *dev);
-extern int dev_pm_put_subsys_data(struct device *dev);
+extern void dev_pm_put_subsys_data(struct device *dev);
/*
* Power domains provide callbacks that are executed during system suspend,
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index a9edab2c787a..080e778118ba 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -113,8 +113,6 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
- struct mutex lock;
- unsigned int refcount;
int need_restore;
};
@@ -140,7 +138,6 @@ extern int __pm_genpd_name_add_device(const char *domain_name,
extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev);
-extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_subdomain);
extern int pm_genpd_add_subdomain_names(const char *master_name,
@@ -187,7 +184,6 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_sd)
{
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 195aafc6cd07..6512e9cbc6d5 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/mod_devicetable.h>
+#include <linux/console.h>
#define PNP_NAME_LEN 50
@@ -309,15 +310,22 @@ struct pnp_fixup {
#define PNP_DISABLE 0x0004
#define PNP_CONFIGURABLE 0x0008
#define PNP_REMOVABLE 0x0010
+#define PNP_CONSOLE 0x0020
#define pnp_can_read(dev) (((dev)->protocol->get) && \
((dev)->capabilities & PNP_READ))
#define pnp_can_write(dev) (((dev)->protocol->set) && \
((dev)->capabilities & PNP_WRITE))
-#define pnp_can_disable(dev) (((dev)->protocol->disable) && \
- ((dev)->capabilities & PNP_DISABLE))
+#define pnp_can_disable(dev) (((dev)->protocol->disable) && \
+ ((dev)->capabilities & PNP_DISABLE) && \
+ (!((dev)->capabilities & PNP_CONSOLE) || \
+ console_suspend_enabled))
#define pnp_can_configure(dev) ((!(dev)->active) && \
((dev)->capabilities & PNP_CONFIGURABLE))
+#define pnp_can_suspend(dev) (((dev)->protocol->suspend) && \
+ (!((dev)->capabilities & PNP_CONSOLE) || \
+ console_suspend_enabled))
+
#ifdef CONFIG_ISAPNP
extern struct pnp_protocol isapnp_protocol;
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index e97fc656a058..416ebeb6ee1e 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -17,6 +17,7 @@
#include <linux/power_supply.h>
#include <linux/extcon.h>
+#include <linux/alarmtimer.h>
enum data_source {
CM_BATTERY_PRESENT,
@@ -45,29 +46,6 @@ enum cm_event_types {
};
/**
- * struct charger_global_desc
- * @rtc_name: the name of RTC used to wake up the system from suspend.
- * @rtc_only_wakeup:
- * If the system is woken up by waekup-sources other than the RTC or
- * callbacks, Charger Manager should recognize with
- * rtc_only_wakeup() returning false.
- * If the RTC given to CM is the only wakeup reason,
- * rtc_only_wakeup should return true.
- * @assume_timer_stops_in_suspend:
- * Assume that the jiffy timer stops in suspend-to-RAM.
- * When enabled, CM does not rely on jiffies value in
- * suspend_again and assumes that jiffies value does not
- * change during suspend.
- */
-struct charger_global_desc {
- char *rtc_name;
-
- bool (*rtc_only_wakeup)(void);
-
- bool assume_timer_stops_in_suspend;
-};
-
-/**
* struct charger_cable
* @extcon_name: the name of extcon device.
* @name: the name of charger cable(external connector).
@@ -266,22 +244,14 @@ struct charger_manager {
char psy_name_buf[PSY_NAME_MAX + 1];
struct power_supply charger_psy;
- bool status_save_ext_pwr_inserted;
- bool status_save_batt;
-
u64 charging_start_time;
u64 charging_end_time;
};
#ifdef CONFIG_CHARGER_MANAGER
-extern int setup_charger_manager(struct charger_global_desc *gd);
-extern bool cm_suspend_again(void);
extern void cm_notify_event(struct power_supply *psy,
enum cm_event_types type, char *msg);
#else
-static inline int setup_charger_manager(struct charger_global_desc *gd)
-{ return 0; }
-static inline bool cm_suspend_again(void) { return false; }
static inline void cm_notify_event(struct power_supply *psy,
enum cm_event_types type, char *msg) { }
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c8f170324e64..baa3f97d8ce8 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -10,9 +10,6 @@
extern const char linux_banner[];
extern const char linux_proc_banner[];
-extern char *log_buf_addr_get(void);
-extern u32 log_buf_len_get(void);
-
static inline int printk_get_level(const char *buffer)
{
if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
@@ -163,6 +160,8 @@ extern int kptr_restrict;
extern void wake_up_klogd(void);
+char *log_buf_addr_get(void);
+u32 log_buf_len_get(void);
void log_buf_kexec_setup(void);
void __init setup_log_buf(int early);
void dump_stack_set_arch_desc(const char *fmt, ...);
@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void)
{
}
+static inline char *log_buf_addr_get(void)
+{
+ return NULL;
+}
+
+static inline u32 log_buf_len_get(void)
+{
+ return 0;
+}
+
static inline void log_buf_kexec_setup(void)
{
}
@@ -408,9 +417,9 @@ enum {
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
-extern void hex_dump_to_buffer(const void *buf, size_t len,
- int rowsize, int groupsize,
- char *linebuf, size_t linebuflen, bool ascii);
+extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, char *linebuf, size_t linebuflen,
+ bool ascii);
#ifdef CONFIG_PRINTK
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ece0c6bbfcc5..8884f6e507f7 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -39,6 +39,7 @@ enum pstore_type_id {
PSTORE_TYPE_PPC_RTAS = 4,
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
+ PSTORE_TYPE_PMSG = 7,
PSTORE_TYPE_UNKNOWN = 255
};
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 4af3fdc85b01..9c9d6c154c8e 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -81,6 +81,7 @@ struct ramoops_platform_data {
unsigned long record_size;
unsigned long console_size;
unsigned long ftrace_size;
+ unsigned long pmsg_size;
int dump_oops;
struct persistent_ram_ecc_info ecc_info;
};
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 77aed9ea1d26..dab545bb66b3 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -37,6 +37,7 @@
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
+#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 50978b781a19..d534e8ed308a 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -216,19 +216,21 @@ struct mem_dqinfo {
unsigned long dqi_flags;
unsigned int dqi_bgrace;
unsigned int dqi_igrace;
- qsize_t dqi_maxblimit;
- qsize_t dqi_maxilimit;
+ qsize_t dqi_max_spc_limit;
+ qsize_t dqi_max_ino_limit;
void *dqi_priv;
};
struct super_block;
-#define DQF_MASK 0xffff /* Mask for format specific flags */
-#define DQF_GETINFO_MASK 0x1ffff /* Mask for flags passed to userspace */
-#define DQF_SETINFO_MASK 0xffff /* Mask for flags modifiable from userspace */
-#define DQF_SYS_FILE_B 16
-#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B) /* Quota file stored as system file */
-#define DQF_INFO_DIRTY_B 31
+/* Mask for flags passed to userspace */
+#define DQF_GETINFO_MASK (DQF_ROOT_SQUASH | DQF_SYS_FILE)
+/* Mask for flags modifiable from userspace */
+#define DQF_SETINFO_MASK DQF_ROOT_SQUASH
+
+enum {
+ DQF_INFO_DIRTY_B = DQF_PRIVATE,
+};
#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
extern void mark_info_dirty(struct super_block *sb, int type);
@@ -321,18 +323,61 @@ struct dquot_operations {
struct path;
+/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
+struct qc_dqblk {
+ int d_fieldmask; /* mask of fields to change in ->set_dqblk() */
+ u64 d_spc_hardlimit; /* absolute limit on used space */
+ u64 d_spc_softlimit; /* preferred limit on used space */
+ u64 d_ino_hardlimit; /* maximum # allocated inodes */
+ u64 d_ino_softlimit; /* preferred inode limit */
+ u64 d_space; /* Space owned by the user */
+ u64 d_ino_count; /* # inodes owned by the user */
+ s64 d_ino_timer; /* zero if within inode limits */
+ /* if not, we refuse service */
+ s64 d_spc_timer; /* similar to above; for space */
+ int d_ino_warns; /* # warnings issued wrt num inodes */
+ int d_spc_warns; /* # warnings issued wrt used space */
+ u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
+ u64 d_rt_spc_softlimit; /* preferred limit on RT space */
+ u64 d_rt_space; /* realtime space owned */
+ s64 d_rt_spc_timer; /* similar to above; for RT space */
+ int d_rt_spc_warns; /* # warnings issued wrt RT space */
+};
+
+/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
+#define QC_INO_SOFT (1<<0)
+#define QC_INO_HARD (1<<1)
+#define QC_SPC_SOFT (1<<2)
+#define QC_SPC_HARD (1<<3)
+#define QC_RT_SPC_SOFT (1<<4)
+#define QC_RT_SPC_HARD (1<<5)
+#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
+ QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
+#define QC_SPC_TIMER (1<<6)
+#define QC_INO_TIMER (1<<7)
+#define QC_RT_SPC_TIMER (1<<8)
+#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
+#define QC_SPC_WARNS (1<<9)
+#define QC_INO_WARNS (1<<10)
+#define QC_RT_SPC_WARNS (1<<11)
+#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
+#define QC_SPACE (1<<12)
+#define QC_INO_COUNT (1<<13)
+#define QC_RT_SPACE (1<<14)
+#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+
/* Operations handling requests from userspace */
struct quotactl_ops {
int (*quota_on)(struct super_block *, int, int, struct path *);
- int (*quota_on_meta)(struct super_block *, int, int);
int (*quota_off)(struct super_block *, int);
+ int (*quota_enable)(struct super_block *, unsigned int);
+ int (*quota_disable)(struct super_block *, unsigned int);
int (*quota_sync)(struct super_block *, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
- int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
- int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
+ int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
- int (*set_xstate)(struct super_block *, unsigned int, int);
int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
int (*rm_xquota)(struct super_block *, unsigned int);
};
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f23538a6e411..df73258cca47 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int dquot_get_dqblk(struct super_block *sb, struct kqid id,
- struct fs_disk_quota *di);
+ struct qc_dqblk *di);
int dquot_set_dqblk(struct super_block *sb, struct kqid id,
- struct fs_disk_quota *di);
+ struct qc_dqblk *di);
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
int dquot_transfer(struct inode *inode, struct iattr *iattr);
@@ -166,6 +166,7 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type)
*/
extern const struct dquot_operations dquot_operations;
extern const struct quotactl_ops dquot_quotactl_ops;
+extern const struct quotactl_ops dquot_quotactl_sysfile_ops;
#else
@@ -386,4 +387,6 @@ static inline void dquot_release_reservation_block(struct inode *inode,
__dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE);
}
+unsigned int qtype_enforce_flag(int type);
+
#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 529bc946f450..a18b16f1dc0e 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu(pos, member) \
- for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
- typeof(*(pos)), member); \
+ for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member); \
pos; \
- pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
- typeof(*(pos)), member))
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
- for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
- typeof(*(pos)), member); \
+ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member); \
pos; \
- pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
- typeof(*(pos)), member))
+ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ed4f5939a452..78097491cd99 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void)
extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
+ rcu_all_qs(); \
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
@@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void)
})
#define __rcu_dereference_check(p, c, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ /* Dependency order vs. p above. */ \
+ typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- ((typeof(*p) __force __kernel *)(_________p1)); \
+ ((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
@@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void)
})
#define __rcu_dereference_index_check(p, c) \
({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
+ /* Dependency order vs. p above. */ \
+ typeof(p) _________p1 = lockless_dereference(p); \
rcu_lockdep_assert(c, \
"suspicious rcu_dereference_index_check() usage"); \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 0e5366200154..937edaeb150d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
}
/*
- * Return the number of grace periods.
+ * Return the number of grace periods started.
*/
-static inline long rcu_batches_completed(void)
+static inline unsigned long rcu_batches_started(void)
{
return 0;
}
/*
- * Return the number of bottom-half grace periods.
+ * Return the number of bottom-half grace periods started.
*/
-static inline long rcu_batches_completed_bh(void)
+static inline unsigned long rcu_batches_started_bh(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of sched grace periods started.
+ */
+static inline unsigned long rcu_batches_started_sched(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of bottom-half grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_bh(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of sched grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_sched(void)
{
return 0;
}
@@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void)
return true;
}
-
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+static inline void rcu_all_qs(void)
+{
+}
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 52953790dcca..d2e583a6aaca 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
-long rcu_batches_completed(void);
-long rcu_batches_completed_bh(void);
-long rcu_batches_completed_sched(void);
+unsigned long rcu_batches_started(void);
+unsigned long rcu_batches_started_bh(void);
+unsigned long rcu_batches_started_sched(void);
+unsigned long rcu_batches_completed(void);
+unsigned long rcu_batches_completed_bh(void);
+unsigned long rcu_batches_completed_sched(void);
void show_rcu_gp_kthreads(void);
void rcu_force_quiescent_state(void);
@@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly;
bool rcu_is_watching(void);
+void rcu_all_qs(void);
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4419b99d8d6e..116655d92269 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -468,7 +468,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
*
* @reg: Offset of the register within the regmap bank
* @lsb: lsb of the register field.
- * @reg: msb of the register field.
+ * @msb: msb of the register field.
* @id_size: port size if it has some ports
* @id_offset: address offset for each ports
*/
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5479394fefce..5dd65acc2a69 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -32,6 +32,8 @@ struct da9211_pdata {
* 2 : 2 phase 2 buck
*/
int num_buck;
+ int gpio_ren[DA9211_MAX_REGULATORS];
+ struct device_node *reg_node[DA9211_MAX_REGULATORS];
struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
};
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 5f1e9ca47417..d4ad5b5a02bb 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -21,6 +21,7 @@
struct regmap;
struct regulator_dev;
+struct regulator_config;
struct regulator_init_data;
struct regulator_enable_gpio;
@@ -205,6 +206,15 @@ enum regulator_type {
* @supply_name: Identifying the regulator supply
* @of_match: Name used to identify regulator in DT.
* @regulators_node: Name of node containing regulator definitions in DT.
+ * @of_parse_cb: Optional callback called only if of_match is present.
+ * Will be called for each regulator parsed from DT, during
+ * init_data parsing.
+ * The regulator_config passed as argument to the callback will
+ * be a copy of config passed to regulator_register, valid only
+ * for this particular call. Callback may freely change the
+ * config but it cannot store it for later usage.
+ * Callback should return 0 on success or negative ERRNO
+ * indicating failure.
* @id: Numerical identifier for the regulator.
* @ops: Regulator operations table.
* @irq: Interrupt number for the regulator.
@@ -251,6 +261,9 @@ struct regulator_desc {
const char *supply_name;
const char *of_match;
const char *regulators_node;
+ int (*of_parse_cb)(struct device_node *,
+ const struct regulator_desc *,
+ struct regulator_config *);
int id;
bool continuous_voltage_range;
unsigned n_voltages;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0b08d05d470b..b07562e082c4 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -191,15 +191,22 @@ struct regulator_init_data {
void *driver_data; /* core does not touch this */
};
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
-
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
+int regulator_suspend_prepare(suspend_state_t state);
+int regulator_suspend_finish(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
+static inline int regulator_suspend_prepare(suspend_state_t state)
+{
+ return 0;
+}
+static inline int regulator_suspend_finish(void)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644
index 000000000000..30cc5963e265
--- /dev/null
+++ b/include/linux/regulator/mt6397-regulator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu <flora.fu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6397_H
+#define __LINUX_REGULATOR_MT6397_H
+
+enum {
+ MT6397_ID_VPCA15 = 0,
+ MT6397_ID_VPCA7,
+ MT6397_ID_VSRAMCA15,
+ MT6397_ID_VSRAMCA7,
+ MT6397_ID_VCORE,
+ MT6397_ID_VGPU,
+ MT6397_ID_VDRM,
+ MT6397_ID_VIO18 = 7,
+ MT6397_ID_VTCXO,
+ MT6397_ID_VA28,
+ MT6397_ID_VCAMA,
+ MT6397_ID_VIO28,
+ MT6397_ID_VUSB,
+ MT6397_ID_VMC,
+ MT6397_ID_VMCH,
+ MT6397_ID_VEMC3V3,
+ MT6397_ID_VGP1,
+ MT6397_ID_VGP2,
+ MT6397_ID_VGP3,
+ MT6397_ID_VGP4,
+ MT6397_ID_VGP5,
+ MT6397_ID_VGP6,
+ MT6397_ID_VIBR,
+ MT6397_ID_RG_MAX,
+};
+
+#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX
+#define MT6397_REGULATOR_ID97 0x97
+#define MT6397_REGULATOR_ID91 0x91
+
+#endif /* __LINUX_REGULATOR_MT6397_H */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index 364f7a7c43db..70c6c66c5bcf 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -49,6 +49,20 @@
#define PFUZE200_VGEN5 11
#define PFUZE200_VGEN6 12
+#define PFUZE3000_SW1A 0
+#define PFUZE3000_SW1B 1
+#define PFUZE3000_SW2 2
+#define PFUZE3000_SW3 3
+#define PFUZE3000_SWBST 4
+#define PFUZE3000_VSNVS 5
+#define PFUZE3000_VREFDDR 6
+#define PFUZE3000_VLDO1 7
+#define PFUZE3000_VLDO2 8
+#define PFUZE3000_VCCSD 9
+#define PFUZE3000_V33 10
+#define PFUZE3000_VLDO3 11
+#define PFUZE3000_VLDO4 12
+
struct regulator_init_data;
struct pfuze_regulator_platform_data {
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
new file mode 100644
index 000000000000..e2bf63d881d4
--- /dev/null
+++ b/include/linux/resource_ext.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Jiang Liu <jiang.liu@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _LINUX_RESOURCE_EXT_H
+#define _LINUX_RESOURCE_EXT_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+/* Represent resource window for bridge devices */
+struct resource_win {
+ struct resource res; /* In master (CPU) address space */
+ resource_size_t offset; /* Translation offset for bridge */
+};
+
+/*
+ * Common resource list management data structure and interfaces to support
+ * ACPI, PNP and PCI host bridge etc.
+ */
+struct resource_entry {
+ struct list_head node;
+ struct resource *res; /* In master (CPU) address space */
+ resource_size_t offset; /* Translation offset for bridge */
+ struct resource __res; /* Default storage for res */
+};
+
+extern struct resource_entry *
+resource_list_create_entry(struct resource *res, size_t extra_size);
+extern void resource_list_free(struct list_head *head);
+
+static inline void resource_list_add(struct resource_entry *entry,
+ struct list_head *head)
+{
+ list_add(&entry->node, head);
+}
+
+static inline void resource_list_add_tail(struct resource_entry *entry,
+ struct list_head *head)
+{
+ list_add_tail(&entry->node, head);
+}
+
+static inline void resource_list_del(struct resource_entry *entry)
+{
+ list_del(&entry->node);
+}
+
+static inline void resource_list_free_entry(struct resource_entry *entry)
+{
+ kfree(entry);
+}
+
+static inline void
+resource_list_destroy_entry(struct resource_entry *entry)
+{
+ resource_list_del(entry);
+ resource_list_free_entry(entry);
+}
+
+#define resource_list_for_each_entry(entry, list) \
+ list_for_each_entry((entry), (list), node)
+
+#define resource_list_for_each_entry_safe(entry, tmp, list) \
+ list_for_each_entry_safe((entry), (tmp), (list), node)
+
+#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index b93fd89b2e5e..58851275fed9 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -18,16 +18,45 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
-#include <linux/rculist.h>
+#include <linux/compiler.h>
+#include <linux/list_nulls.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+/*
+ * The end of the chain is marked with a special nulls marks which has
+ * the following format:
+ *
+ * +-------+-----------------------------------------------------+-+
+ * | Base | Hash |1|
+ * +-------+-----------------------------------------------------+-+
+ *
+ * Base (4 bits) : Reserved to distinguish between multiple tables.
+ * Specified via &struct rhashtable_params.nulls_base.
+ * Hash (27 bits): Full hash (unmasked) of first element added to bucket
+ * 1 (1 bit) : Nulls marker (always set)
+ *
+ * The remaining bits of the next pointer remain unused for now.
+ */
+#define RHT_BASE_BITS 4
+#define RHT_HASH_BITS 27
+#define RHT_BASE_SHIFT RHT_HASH_BITS
struct rhash_head {
struct rhash_head __rcu *next;
};
-#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)
-
+/**
+ * struct bucket_table - Table of hash buckets
+ * @size: Number of hash buckets
+ * @locks_mask: Mask to apply before accessing locks[]
+ * @locks: Array of spinlocks protecting individual buckets
+ * @buckets: size * hash buckets
+ */
struct bucket_table {
size_t size;
+ unsigned int locks_mask;
+ spinlock_t *locks;
struct rhash_head __rcu *buckets[];
};
@@ -45,11 +74,16 @@ struct rhashtable;
* @hash_rnd: Seed to use while hashing
* @max_shift: Maximum number of shifts while expanding
* @min_shift: Minimum number of shifts while shrinking
+ * @nulls_base: Base value to generate nulls marker
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key
* @obj_hashfn: Function to hash object
* @grow_decision: If defined, may return true if table should expand
* @shrink_decision: If defined, may return true if table should shrink
- * @mutex_is_held: Must return true if protecting mutex is held
+ *
+ * Note: when implementing the grow and shrink decision function, min/max
+ * shift must be enforced, otherwise, resizing watermarks they set may be
+ * useless.
*/
struct rhashtable_params {
size_t nelem_hint;
@@ -59,36 +93,95 @@ struct rhashtable_params {
u32 hash_rnd;
size_t max_shift;
size_t min_shift;
+ u32 nulls_base;
+ size_t locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
bool (*grow_decision)(const struct rhashtable *ht,
size_t new_size);
bool (*shrink_decision)(const struct rhashtable *ht,
size_t new_size);
-#ifdef CONFIG_PROVE_LOCKING
- int (*mutex_is_held)(void *parent);
- void *parent;
-#endif
};
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
+ * @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table
* @shift: Current size (1 << shift)
* @p: Configuration parameters
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @walkers: List of active walkers
+ * @being_destroyed: True if table is set up for destruction
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- size_t nelems;
- size_t shift;
+ struct bucket_table __rcu *future_tbl;
+ atomic_t nelems;
+ atomic_t shift;
struct rhashtable_params p;
+ struct work_struct run_work;
+ struct mutex mutex;
+ struct list_head walkers;
+ bool being_destroyed;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @resize: Resize event occured
+ */
+struct rhashtable_walker {
+ struct list_head list;
+ bool resize;
};
+/**
+ * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhashtable_walker *walker;
+ unsigned int slot;
+ unsigned int skip;
+};
+
+static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
+{
+ return NULLS_MARKER(ht->p.nulls_base + hash);
+}
+
+#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
+ ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+
+static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
+{
+ return ((unsigned long) ptr & 1);
+}
+
+static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
+{
+ return ((unsigned long) ptr) >> 1;
+}
+
#ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht);
+int lockdep_rht_mutex_is_held(struct rhashtable *ht);
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
#else
-static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
+{
+ return 1;
+}
+
+static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
+ u32 hash)
{
return 1;
}
@@ -96,13 +189,8 @@ static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
-u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
-u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
-
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
-void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
- struct rhash_head __rcu **pprev);
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
@@ -110,11 +198,23 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
int rhashtable_expand(struct rhashtable *ht);
int rhashtable_shrink(struct rhashtable *ht);
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+void *rhashtable_lookup(struct rhashtable *ht, const void *key);
+void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
bool (*compare)(void *, void *), void *arg);
-void rhashtable_destroy(const struct rhashtable *ht);
+bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
+bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
+ struct rhash_head *obj,
+ bool (*compare)(void *, void *),
+ void *arg);
+
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+void rhashtable_walk_exit(struct rhashtable_iter *iter);
+int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+
+void rhashtable_destroy(struct rhashtable *ht);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -122,92 +222,146 @@ void rhashtable_destroy(const struct rhashtable *ht);
#define rht_dereference_rcu(p, ht) \
rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
-#define rht_entry(ptr, type, member) container_of(ptr, type, member)
-#define rht_entry_safe(ptr, type, member) \
-({ \
- typeof(ptr) __ptr = (ptr); \
- __ptr ? rht_entry(__ptr, type, member) : NULL; \
-})
+#define rht_dereference_bucket(p, tbl, hash) \
+ rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
+
+#define rht_dereference_bucket_rcu(p, tbl, hash) \
+ rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
-#define rht_next_entry_safe(pos, ht, member) \
-({ \
- pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \
- typeof(*(pos)), member) : NULL; \
-})
+#define rht_entry(tpos, pos, member) \
+ ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
+
+/**
+ * rht_for_each_continue - continue iterating over hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ */
+#define rht_for_each_continue(pos, head, tbl, hash) \
+ for (pos = rht_dereference_bucket(head, tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
* rht_for_each - iterate over hash chain
- * @pos: &struct rhash_head to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
*/
-#define rht_for_each(pos, head, ht) \
- for (pos = rht_dereference(head, ht); \
- pos; \
- pos = rht_dereference((pos)->next, ht))
+#define rht_for_each(pos, tbl, hash) \
+ rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_continue - continue iterating over hash chain
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ */
+#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
+ for (pos = rht_dereference_bucket(head, tbl, hash); \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
* rht_for_each_entry - iterate over hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*/
-#define rht_for_each_entry(pos, head, ht, member) \
- for (pos = rht_entry_safe(rht_dereference(head, ht), \
- typeof(*(pos)), member); \
- pos; \
- pos = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
+ tbl, hash, member)
/**
* rht_for_each_entry_safe - safely iterate over hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @n: type * to use for temporary next object storage
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @next: the &struct rhash_head to use as next in loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list.
*/
-#define rht_for_each_entry_safe(pos, n, head, ht, member) \
- for (pos = rht_entry_safe(rht_dereference(head, ht), \
- typeof(*(pos)), member), \
- n = rht_next_entry_safe(pos, ht, member); \
- pos; \
- pos = n, \
- n = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
+ for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = next, \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL)
+
+/**
+ * rht_for_each_rcu_continue - continue iterating over rcu hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
+ for (({barrier(); }), \
+ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rcu_dereference_raw(pos->next))
/**
* rht_for_each_rcu - iterate over rcu hash chain
- * @pos: &struct rhash_head to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu(pos, tbl, hash) \
+ rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_rcu(pos, head, ht) \
- for (pos = rht_dereference_rcu(head, ht); \
- pos; \
- pos = rht_dereference_rcu((pos)->next, ht))
+#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
+ for (({barrier(); }), \
+ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
/**
* rht_for_each_entry_rcu - iterate over rcu hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_entry_rcu(pos, head, member) \
- for (pos = rht_entry_safe(rcu_dereference_raw(head), \
- typeof(*(pos)), member); \
- pos; \
- pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \
- typeof(*(pos)), member))
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+ tbl, hash, member)
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d9d7e7e56352..c4c559a45dc8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -198,7 +198,7 @@ int page_referenced(struct page *, int is_locked,
int try_to_unmap(struct page *, enum ttu_flags flags);
/*
- * Called from mm/filemap_xip.c to unmap empty zero page
+ * Used by uprobes to replace a userspace page safely
*/
pte_t *__page_check_address(struct page *, struct mm_struct *,
unsigned long, spinlock_t **, int);
@@ -246,7 +246,6 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
* arg: passed to rmap_one() and invalid_vma()
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
- * file_nonlinear: for handling file nonlinear mapping
* anon_lock: for getting anon_lock by optimized way rather than default
* invalid_vma: for skipping uninterested vma
*/
@@ -255,7 +254,6 @@ struct rmap_walk_control {
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
- int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
struct anon_vma *(*anon_lock)(struct page *page);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 6d6be09a2fe5..dcad7ee0d746 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -161,7 +161,7 @@ extern void devm_rtc_device_unregister(struct device *dev,
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
-extern int rtc_set_ntp_time(struct timespec now);
+extern int rtc_set_ntp_time(struct timespec64 now);
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
new file mode 100644
index 000000000000..e6337a56d741
--- /dev/null
+++ b/include/linux/rtc/ds1685.h
@@ -0,0 +1,375 @@
+/*
+ * Definitions for the registers, addresses, and platform data of the
+ * DS1685/DS1687-series RTC chips.
+ *
+ * This Driver also works for the DS17X85/DS17X87 RTC chips. Functionally
+ * similar to the DS1685/DS1687, they support a few extra features which
+ * include larger, battery-backed NV-SRAM, burst-mode access, and an RTC
+ * write counter.
+ *
+ * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>.
+ * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>.
+ *
+ * References:
+ * DS1685/DS1687 3V/5V Real-Time Clocks, 19-5215, Rev 4/10.
+ * DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10.
+ * DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105.
+ * Application Note 90, Using the Multiplex Bus RTC Extended Features.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_RTC_DS1685_H_
+#define _LINUX_RTC_DS1685_H_
+
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct ds1685_priv - DS1685 private data structure.
+ * @dev: pointer to the rtc_device structure.
+ * @regs: iomapped base address pointer of the RTC registers.
+ * @regstep: padding/step size between registers (optional).
+ * @baseaddr: base address of the RTC device.
+ * @size: resource size.
+ * @lock: private lock variable for spin locking/unlocking.
+ * @work: private workqueue.
+ * @irq: IRQ number assigned to the RTC device.
+ * @prepare_poweroff: pointer to platform pre-poweroff function.
+ * @wake_alarm: pointer to platform wake alarm function.
+ * @post_ram_clear: pointer to platform post ram-clear function.
+ */
+struct ds1685_priv {
+ struct rtc_device *dev;
+ void __iomem *regs;
+ u32 regstep;
+ resource_size_t baseaddr;
+ size_t size;
+ spinlock_t lock;
+ struct work_struct work;
+ int irq_num;
+ bool bcd_mode;
+ bool no_irq;
+ bool uie_unsupported;
+ bool alloc_io_resources;
+ u8 (*read)(struct ds1685_priv *, int);
+ void (*write)(struct ds1685_priv *, int, u8);
+ void (*prepare_poweroff)(void);
+ void (*wake_alarm)(void);
+ void (*post_ram_clear)(void);
+};
+
+
+/**
+ * struct ds1685_rtc_platform_data - platform data structure.
+ * @plat_prepare_poweroff: platform-specific pre-poweroff function.
+ * @plat_wake_alarm: platform-specific wake alarm function.
+ * @plat_post_ram_clear: platform-specific post ram-clear function.
+ *
+ * If your platform needs to use a custom padding/step size between
+ * registers, or uses one or more of the extended interrupts and needs special
+ * handling, then include this header file in your platform definition and
+ * set regstep and the plat_* pointers as appropriate.
+ */
+struct ds1685_rtc_platform_data {
+ const u32 regstep;
+ const bool bcd_mode;
+ const bool no_irq;
+ const bool uie_unsupported;
+ const bool alloc_io_resources;
+ u8 (*plat_read)(struct ds1685_priv *, int);
+ void (*plat_write)(struct ds1685_priv *, int, u8);
+ void (*plat_prepare_poweroff)(void);
+ void (*plat_wake_alarm)(void);
+ void (*plat_post_ram_clear)(void);
+};
+
+
+/*
+ * Time Registers.
+ */
+#define RTC_SECS 0x00 /* Seconds 00-59 */
+#define RTC_SECS_ALARM 0x01 /* Alarm Seconds 00-59 */
+#define RTC_MINS 0x02 /* Minutes 00-59 */
+#define RTC_MINS_ALARM 0x03 /* Alarm Minutes 00-59 */
+#define RTC_HRS 0x04 /* Hours 01-12 AM/PM || 00-23 */
+#define RTC_HRS_ALARM 0x05 /* Alarm Hours 01-12 AM/PM || 00-23 */
+#define RTC_WDAY 0x06 /* Day of Week 01-07 */
+#define RTC_MDAY 0x07 /* Day of Month 01-31 */
+#define RTC_MONTH 0x08 /* Month 01-12 */
+#define RTC_YEAR 0x09 /* Year 00-99 */
+#define RTC_CENTURY 0x48 /* Century 00-99 */
+#define RTC_MDAY_ALARM 0x49 /* Alarm Day of Month 01-31 */
+
+
+/*
+ * Bit masks for the Time registers in BCD Mode (DM = 0).
+ */
+#define RTC_SECS_BCD_MASK 0x7f /* - x x x x x x x */
+#define RTC_MINS_BCD_MASK 0x7f /* - x x x x x x x */
+#define RTC_HRS_12_BCD_MASK 0x1f /* - - - x x x x x */
+#define RTC_HRS_24_BCD_MASK 0x3f /* - - x x x x x x */
+#define RTC_MDAY_BCD_MASK 0x3f /* - - x x x x x x */
+#define RTC_MONTH_BCD_MASK 0x1f /* - - - x x x x x */
+#define RTC_YEAR_BCD_MASK 0xff /* x x x x x x x x */
+
+/*
+ * Bit masks for the Time registers in BIN Mode (DM = 1).
+ */
+#define RTC_SECS_BIN_MASK 0x3f /* - - x x x x x x */
+#define RTC_MINS_BIN_MASK 0x3f /* - - x x x x x x */
+#define RTC_HRS_12_BIN_MASK 0x0f /* - - - - x x x x */
+#define RTC_HRS_24_BIN_MASK 0x1f /* - - - x x x x x */
+#define RTC_MDAY_BIN_MASK 0x1f /* - - - x x x x x */
+#define RTC_MONTH_BIN_MASK 0x0f /* - - - - x x x x */
+#define RTC_YEAR_BIN_MASK 0x7f /* - x x x x x x x */
+
+/*
+ * Bit masks common for the Time registers in BCD or BIN Mode.
+ */
+#define RTC_WDAY_MASK 0x07 /* - - - - - x x x */
+#define RTC_CENTURY_MASK 0xff /* x x x x x x x x */
+#define RTC_MDAY_ALARM_MASK 0xff /* x x x x x x x x */
+#define RTC_HRS_AMPM_MASK BIT(7) /* Mask for the AM/PM bit */
+
+
+
+/*
+ * Control Registers.
+ */
+#define RTC_CTRL_A 0x0a /* Control Register A */
+#define RTC_CTRL_B 0x0b /* Control Register B */
+#define RTC_CTRL_C 0x0c /* Control Register C */
+#define RTC_CTRL_D 0x0d /* Control Register D */
+#define RTC_EXT_CTRL_4A 0x4a /* Extended Control Register 4A */
+#define RTC_EXT_CTRL_4B 0x4b /* Extended Control Register 4B */
+
+
+/*
+ * Bit names in Control Register A.
+ */
+#define RTC_CTRL_A_UIP BIT(7) /* Update In Progress */
+#define RTC_CTRL_A_DV2 BIT(6) /* Countdown Chain */
+#define RTC_CTRL_A_DV1 BIT(5) /* Oscillator Enable */
+#define RTC_CTRL_A_DV0 BIT(4) /* Bank Select */
+#define RTC_CTRL_A_RS2 BIT(2) /* Rate-Selection Bit 2 */
+#define RTC_CTRL_A_RS3 BIT(3) /* Rate-Selection Bit 3 */
+#define RTC_CTRL_A_RS1 BIT(1) /* Rate-Selection Bit 1 */
+#define RTC_CTRL_A_RS0 BIT(0) /* Rate-Selection Bit 0 */
+#define RTC_CTRL_A_RS_MASK 0x0f /* RS3 + RS2 + RS1 + RS0 */
+
+/*
+ * Bit names in Control Register B.
+ */
+#define RTC_CTRL_B_SET BIT(7) /* SET Bit */
+#define RTC_CTRL_B_PIE BIT(6) /* Periodic-Interrupt Enable */
+#define RTC_CTRL_B_AIE BIT(5) /* Alarm-Interrupt Enable */
+#define RTC_CTRL_B_UIE BIT(4) /* Update-Ended Interrupt-Enable */
+#define RTC_CTRL_B_SQWE BIT(3) /* Square-Wave Enable */
+#define RTC_CTRL_B_DM BIT(2) /* Data Mode */
+#define RTC_CTRL_B_2412 BIT(1) /* 12-Hr/24-Hr Mode */
+#define RTC_CTRL_B_DSE BIT(0) /* Daylight Savings Enable */
+#define RTC_CTRL_B_PAU_MASK 0x70 /* PIE + AIE + UIE */
+
+
+/*
+ * Bit names in Control Register C.
+ *
+ * BIT(0), BIT(1), BIT(2), & BIT(3) are unused, always return 0, and cannot
+ * be written to.
+ */
+#define RTC_CTRL_C_IRQF BIT(7) /* Interrupt-Request Flag */
+#define RTC_CTRL_C_PF BIT(6) /* Periodic-Interrupt Flag */
+#define RTC_CTRL_C_AF BIT(5) /* Alarm-Interrupt Flag */
+#define RTC_CTRL_C_UF BIT(4) /* Update-Ended Interrupt Flag */
+#define RTC_CTRL_C_PAU_MASK 0x70 /* PF + AF + UF */
+
+
+/*
+ * Bit names in Control Register D.
+ *
+ * BIT(0) through BIT(6) are unused, always return 0, and cannot
+ * be written to.
+ */
+#define RTC_CTRL_D_VRT BIT(7) /* Valid RAM and Time */
+
+
+/*
+ * Bit names in Extended Control Register 4A.
+ *
+ * On the DS1685/DS1687/DS1689/DS1693, BIT(4) and BIT(5) are reserved for
+ * future use. They can be read from and written to, but have no effect
+ * on the RTC's operation.
+ *
+ * On the DS17x85/DS17x87, BIT(5) is Burst-Mode Enable (BME), and allows
+ * access to the extended NV-SRAM by automatically incrementing the address
+ * register when they are read from or written to.
+ */
+#define RTC_CTRL_4A_VRT2 BIT(7) /* Auxillary Battery Status */
+#define RTC_CTRL_4A_INCR BIT(6) /* Increment-in-Progress Status */
+#define RTC_CTRL_4A_PAB BIT(3) /* Power-Active Bar Control */
+#define RTC_CTRL_4A_RF BIT(2) /* RAM-Clear Flag */
+#define RTC_CTRL_4A_WF BIT(1) /* Wake-Up Alarm Flag */
+#define RTC_CTRL_4A_KF BIT(0) /* Kickstart Flag */
+#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
+#define RTC_CTRL_4A_BME BIT(5) /* Burst-Mode Enable */
+#endif
+#define RTC_CTRL_4A_RWK_MASK 0x07 /* RF + WF + KF */
+
+
+/*
+ * Bit names in Extended Control Register 4B.
+ */
+#define RTC_CTRL_4B_ABE BIT(7) /* Auxillary Battery Enable */
+#define RTC_CTRL_4B_E32K BIT(6) /* Enable 32.768Hz on SQW Pin */
+#define RTC_CTRL_4B_CS BIT(5) /* Crystal Select */
+#define RTC_CTRL_4B_RCE BIT(4) /* RAM Clear-Enable */
+#define RTC_CTRL_4B_PRS BIT(3) /* PAB Reset-Select */
+#define RTC_CTRL_4B_RIE BIT(2) /* RAM Clear-Interrupt Enable */
+#define RTC_CTRL_4B_WIE BIT(1) /* Wake-Up Alarm-Interrupt Enable */
+#define RTC_CTRL_4B_KSE BIT(0) /* Kickstart Interrupt-Enable */
+#define RTC_CTRL_4B_RWK_MASK 0x07 /* RIE + WIE + KSE */
+
+
+/*
+ * Misc register names in Bank 1.
+ *
+ * The DV0 bit in Control Register A must be set to 1 for these registers
+ * to become available, including Extended Control Registers 4A & 4B.
+ */
+#define RTC_BANK1_SSN_MODEL 0x40 /* Model Number */
+#define RTC_BANK1_SSN_BYTE_1 0x41 /* 1st Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_2 0x42 /* 2nd Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_3 0x43 /* 3rd Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_4 0x44 /* 4th Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_5 0x45 /* 5th Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_6 0x46 /* 6th Byte of Serial Number */
+#define RTC_BANK1_SSN_CRC 0x47 /* Serial CRC Byte */
+#define RTC_BANK1_RAM_DATA_PORT 0x53 /* Extended RAM Data Port */
+
+
+/*
+ * Model-specific registers in Bank 1.
+ *
+ * The addresses below differ depending on the model of the RTC chip
+ * selected in the kernel configuration. Not all of these features are
+ * supported in the main driver at present.
+ *
+ * DS1685/DS1687 - Extended NV-SRAM address (LSB only).
+ * DS1689/DS1693 - Vcc, Vbat, Pwr Cycle Counters & Customer-specific S/N.
+ * DS17x85/DS17x87 - Extended NV-SRAM addresses (MSB & LSB) & Write counter.
+ */
+#if defined(CONFIG_RTC_DRV_DS1685)
+#define RTC_BANK1_RAM_ADDR 0x50 /* NV-SRAM Addr */
+#elif defined(CONFIG_RTC_DRV_DS1689)
+#define RTC_BANK1_VCC_CTR_LSB 0x54 /* Vcc Counter Addr (LSB) */
+#define RTC_BANK1_VCC_CTR_MSB 0x57 /* Vcc Counter Addr (MSB) */
+#define RTC_BANK1_VBAT_CTR_LSB 0x58 /* Vbat Counter Addr (LSB) */
+#define RTC_BANK1_VBAT_CTR_MSB 0x5b /* Vbat Counter Addr (MSB) */
+#define RTC_BANK1_PWR_CTR_LSB 0x5c /* Pwr Cycle Counter Addr (LSB) */
+#define RTC_BANK1_PWR_CTR_MSB 0x5d /* Pwr Cycle Counter Addr (MSB) */
+#define RTC_BANK1_UNIQ_SN 0x60 /* Customer-specific S/N */
+#else /* DS17x85/DS17x87 */
+#define RTC_BANK1_RAM_ADDR_LSB 0x50 /* NV-SRAM Addr (LSB) */
+#define RTC_BANK1_RAM_ADDR_MSB 0x51 /* NV-SRAM Addr (MSB) */
+#define RTC_BANK1_WRITE_CTR 0x5e /* RTC Write Counter */
+#endif
+
+
+/*
+ * Model numbers.
+ *
+ * The DS1688/DS1691 and DS1689/DS1693 chips share the same model number
+ * and the manual doesn't indicate any major differences. As such, they
+ * are regarded as the same chip in this driver.
+ */
+#define RTC_MODEL_DS1685 0x71 /* DS1685/DS1687 */
+#define RTC_MODEL_DS17285 0x72 /* DS17285/DS17287 */
+#define RTC_MODEL_DS1689 0x73 /* DS1688/DS1691/DS1689/DS1693 */
+#define RTC_MODEL_DS17485 0x74 /* DS17485/DS17487 */
+#define RTC_MODEL_DS17885 0x78 /* DS17885/DS17887 */
+
+
+/*
+ * Periodic Interrupt Rates / Square-Wave Output Frequency
+ *
+ * Periodic rates are selected by setting the RS3-RS0 bits in Control
+ * Register A and enabled via either the E32K bit in Extended Control
+ * Register 4B or the SQWE bit in Control Register B.
+ *
+ * E32K overrides the settings of RS3-RS0 and outputs a frequency of 32768Hz
+ * on the SQW pin of the RTC chip. While there are 16 possible selections,
+ * the 1-of-16 decoder is only able to divide the base 32768Hz signal into 13
+ * smaller frequencies. The values 0x01 and 0x02 are not used and are
+ * synonymous with 0x08 and 0x09, respectively.
+ *
+ * When E32K is set to a logic 1, periodic interrupts are disabled and reading
+ * /dev/rtc will return -EINVAL. This also applies if the periodic interrupt
+ * frequency is set to 0Hz.
+ *
+ * Not currently used by the rtc-ds1685 driver because the RTC core removed
+ * support for hardware-generated periodic-interrupts in favour of
+ * hrtimer-generated interrupts. But these defines are kept around for use
+ * in userland, as documentation to the hardware, and possible future use if
+ * hardware-generated periodic interrupts are ever added back.
+ */
+ /* E32K RS3 RS2 RS1 RS0 */
+#define RTC_SQW_8192HZ 0x03 /* 0 0 0 1 1 */
+#define RTC_SQW_4096HZ 0x04 /* 0 0 1 0 0 */
+#define RTC_SQW_2048HZ 0x05 /* 0 0 1 0 1 */
+#define RTC_SQW_1024HZ 0x06 /* 0 0 1 1 0 */
+#define RTC_SQW_512HZ 0x07 /* 0 0 1 1 1 */
+#define RTC_SQW_256HZ 0x08 /* 0 1 0 0 0 */
+#define RTC_SQW_128HZ 0x09 /* 0 1 0 0 1 */
+#define RTC_SQW_64HZ 0x0a /* 0 1 0 1 0 */
+#define RTC_SQW_32HZ 0x0b /* 0 1 0 1 1 */
+#define RTC_SQW_16HZ 0x0c /* 0 1 1 0 0 */
+#define RTC_SQW_8HZ 0x0d /* 0 1 1 0 1 */
+#define RTC_SQW_4HZ 0x0e /* 0 1 1 1 0 */
+#define RTC_SQW_2HZ 0x0f /* 0 1 1 1 1 */
+#define RTC_SQW_0HZ 0x00 /* 0 0 0 0 0 */
+#define RTC_SQW_32768HZ 32768 /* 1 - - - - */
+#define RTC_MAX_USER_FREQ 8192
+
+
+/*
+ * NVRAM data & addresses:
+ * - 50 bytes of NVRAM are available just past the clock registers.
+ * - 64 additional bytes are available in Bank0.
+ *
+ * Extended, battery-backed NV-SRAM:
+ * - DS1685/DS1687 - 128 bytes.
+ * - DS1689/DS1693 - 0 bytes.
+ * - DS17285/DS17287 - 2048 bytes.
+ * - DS17485/DS17487 - 4096 bytes.
+ * - DS17885/DS17887 - 8192 bytes.
+ */
+#define NVRAM_TIME_BASE 0x0e /* NVRAM Addr in Time regs */
+#define NVRAM_BANK0_BASE 0x40 /* NVRAM Addr in Bank0 regs */
+#define NVRAM_SZ_TIME 50
+#define NVRAM_SZ_BANK0 64
+#if defined(CONFIG_RTC_DRV_DS1685)
+# define NVRAM_SZ_EXTND 128
+#elif defined(CONFIG_RTC_DRV_DS1689)
+# define NVRAM_SZ_EXTND 0
+#elif defined(CONFIG_RTC_DRV_DS17285)
+# define NVRAM_SZ_EXTND 2048
+#elif defined(CONFIG_RTC_DRV_DS17485)
+# define NVRAM_SZ_EXTND 4096
+#elif defined(CONFIG_RTC_DRV_DS17885)
+# define NVRAM_SZ_EXTND 8192
+#endif
+#define NVRAM_TOTAL_SZ_BANK0 (NVRAM_SZ_TIME + NVRAM_SZ_BANK0)
+#define NVRAM_TOTAL_SZ (NVRAM_TOTAL_SZ_BANK0 + NVRAM_SZ_EXTND)
+
+
+/*
+ * Function Prototypes.
+ */
+extern void __noreturn
+ds1685_rtc_poweroff(struct platform_device *pdev);
+
+#endif /* _LINUX_RTC_DS1685_H_ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8db31ef98d2f..41c60e5302d7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1370,6 +1370,8 @@ struct task_struct {
unsigned long atomic_flags; /* Flags needing atomic access. */
+ struct restart_block restart_block;
+
pid_t pid;
pid_t tgid;
@@ -1662,6 +1664,9 @@ struct task_struct {
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
+#ifdef CONFIG_KASAN
+ unsigned int kasan_depth;
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
@@ -2145,6 +2150,7 @@ extern unsigned long long notrace sched_clock(void);
*/
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
+extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
diff --git a/include/linux/security.h b/include/linux/security.h
index ba96471c11ba..a1b7dbd127ff 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1281,6 +1281,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @alter contains the flag indicating whether changes are to be made.
* Return 0 if permission is granted.
*
+ * @binder_set_context_mgr
+ * Check whether @mgr is allowed to be the binder context manager.
+ * @mgr contains the task_struct for the task being registered.
+ * Return 0 if permission is granted.
+ * @binder_transaction
+ * Check whether @from is allowed to invoke a binder transaction call
+ * to @to.
+ * @from contains the task_struct for the sending task.
+ * @to contains the task_struct for the receiving task.
+ * @binder_transfer_binder
+ * Check whether @from is allowed to transfer a binder reference to @to.
+ * @from contains the task_struct for the sending task.
+ * @to contains the task_struct for the receiving task.
+ * @binder_transfer_file
+ * Check whether @from is allowed to transfer @file to @to.
+ * @from contains the task_struct for the sending task.
+ * @file contains the struct file being transferred.
+ * @to contains the task_struct for the receiving task.
+ *
* @ptrace_access_check:
* Check permission before allowing the current process to trace the
* @child process.
@@ -1441,6 +1460,14 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
struct security_operations {
char name[SECURITY_NAME_MAX + 1];
+ int (*binder_set_context_mgr) (struct task_struct *mgr);
+ int (*binder_transaction) (struct task_struct *from,
+ struct task_struct *to);
+ int (*binder_transfer_binder) (struct task_struct *from,
+ struct task_struct *to);
+ int (*binder_transfer_file) (struct task_struct *from,
+ struct task_struct *to, struct file *file);
+
int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
int (*ptrace_traceme) (struct task_struct *parent);
int (*capget) (struct task_struct *target,
@@ -1739,6 +1766,13 @@ extern void __init security_fixup_ops(struct security_operations *ops);
/* Security operations */
+int security_binder_set_context_mgr(struct task_struct *mgr);
+int security_binder_transaction(struct task_struct *from,
+ struct task_struct *to);
+int security_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to);
+int security_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to, struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
@@ -1927,6 +1961,30 @@ static inline int security_init(void)
return 0;
}
+static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+ return 0;
+}
+
+static inline int security_binder_transaction(struct task_struct *from,
+ struct task_struct *to)
+{
+ return 0;
+}
+
+static inline int security_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to)
+{
+ return 0;
+}
+
+static inline int security_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to,
+ struct file *file)
+{
+ return 0;
+}
+
static inline int security_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index 9aafe0e24c68..fb7eb9ccb1cd 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -125,9 +125,6 @@ extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len);
extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
-extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
- int nmaskbits);
-
#ifdef CONFIG_BINARY_PRINTF
extern int
seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index cf6a9daaaf6d..afbb1fd77c77 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -126,31 +126,6 @@ int seq_path(struct seq_file *, const struct path *, const char *);
int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);
-int seq_bitmap(struct seq_file *m, const unsigned long *bits,
- unsigned int nr_bits);
-static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
-{
- return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids);
-}
-
-static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
-{
- return seq_bitmap(m, mask->bits, MAX_NUMNODES);
-}
-
-int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
- unsigned int nr_bits);
-
-static inline int seq_cpumask_list(struct seq_file *m,
- const struct cpumask *mask)
-{
- return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids);
-}
-
-static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
-{
- return seq_bitmap_list(m, mask->bits, MAX_NUMNODES);
-}
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index e02acf0a0ec9..a8efa235b7c1 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -85,6 +85,9 @@ struct uart_8250_port {
unsigned char mcr_force; /* mask of forced bits */
unsigned char cur_iotype; /* Running I/O type */
unsigned int rpm_tx_active;
+ unsigned char canary; /* non-zero during system sleep
+ * if no_console_suspend
+ */
/*
* Some bits in registers are cleared on a read, so they must
@@ -126,6 +129,7 @@ extern int serial8250_do_startup(struct uart_port *port);
extern void serial8250_do_shutdown(struct uart_port *port);
extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
+extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
extern int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 057038cf2788..baf3e1d08416 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -123,6 +123,7 @@ struct uart_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ void (*set_mctrl)(struct uart_port *, unsigned int);
int (*startup)(struct uart_port *port);
void (*shutdown)(struct uart_port *port);
void (*throttle)(struct uart_port *port);
@@ -190,8 +191,10 @@ struct uart_port {
#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
-/* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */
-#define UPF_HARD_FLOW ((__force upf_t) (1 << 21))
+/* Port has hardware-assisted h/w flow control */
+#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
+#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
+#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
/* Port has hardware-assisted s/w flow control */
#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
@@ -213,11 +216,17 @@ struct uart_port {
#error Change mask not equivalent to userspace-visible bit defines
#endif
- /* status must be updated while holding port lock */
+ /*
+ * Must hold termios_rwsem, port mutex and port lock to change;
+ * can hold any one lock to read.
+ */
upstat_t status;
#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0))
#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1))
+#define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2))
+#define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3))
+#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
int hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
@@ -391,6 +400,13 @@ static inline bool uart_cts_enabled(struct uart_port *uport)
return !!(uport->status & UPSTAT_CTS_ENABLE);
}
+static inline bool uart_softcts_mode(struct uart_port *uport)
+{
+ upstat_t mask = UPSTAT_CTS_ENABLE | UPSTAT_AUTOCTS;
+
+ return ((uport->status & mask) == UPSTAT_CTS_ENABLE);
+}
+
/*
* The following are helper functions for the low level drivers.
*/
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index e6fc9567690b..a7f004a3c177 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -104,6 +104,31 @@
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI)
+#define S3C64XX_UCON_TXBURST_1 (0<<20)
+#define S3C64XX_UCON_TXBURST_4 (1<<20)
+#define S3C64XX_UCON_TXBURST_8 (2<<20)
+#define S3C64XX_UCON_TXBURST_16 (3<<20)
+#define S3C64XX_UCON_TXBURST_MASK (0xf<<20)
+#define S3C64XX_UCON_RXBURST_1 (0<<16)
+#define S3C64XX_UCON_RXBURST_4 (1<<16)
+#define S3C64XX_UCON_RXBURST_8 (2<<16)
+#define S3C64XX_UCON_RXBURST_16 (3<<16)
+#define S3C64XX_UCON_RXBURST_MASK (0xf<<16)
+#define S3C64XX_UCON_TIMEOUT_SHIFT (12)
+#define S3C64XX_UCON_TIMEOUT_MASK (0xf<<12)
+#define S3C64XX_UCON_EMPTYINT_EN (1<<11)
+#define S3C64XX_UCON_DMASUS_EN (1<<10)
+#define S3C64XX_UCON_TXINT_LEVEL (1<<9)
+#define S3C64XX_UCON_RXINT_LEVEL (1<<8)
+#define S3C64XX_UCON_TIMEOUT_EN (1<<7)
+#define S3C64XX_UCON_ERRINT_EN (1<<6)
+#define S3C64XX_UCON_TXMODE_DMA (2<<2)
+#define S3C64XX_UCON_TXMODE_CPU (1<<2)
+#define S3C64XX_UCON_TXMODE_MASK (3<<2)
+#define S3C64XX_UCON_RXMODE_DMA (2<<0)
+#define S3C64XX_UCON_RXMODE_CPU (1<<0)
+#define S3C64XX_UCON_RXMODE_MASK (3<<0)
+
#define S3C2410_UFCON_FIFOMODE (1<<0)
#define S3C2410_UFCON_TXTRIG0 (0<<6)
#define S3C2410_UFCON_RXTRIG8 (1<<4)
@@ -155,6 +180,7 @@
#define S3C2440_UFSTAT_TXMASK (63<<8)
#define S3C2440_UFSTAT_RXMASK (63)
+#define S3C2410_UTRSTAT_TIMEOUT (1<<3)
#define S3C2410_UTRSTAT_TXE (1<<2)
#define S3C2410_UTRSTAT_TXFE (1<<1)
#define S3C2410_UTRSTAT_RXDR (1<<0)
@@ -179,8 +205,10 @@
#define S3C64XX_UINTM 0x38
#define S3C64XX_UINTM_RXD (0)
+#define S3C64XX_UINTM_ERROR (1)
#define S3C64XX_UINTM_TXD (2)
#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD)
+#define S3C64XX_UINTM_ERR_MSK (1 << S3C64XX_UINTM_ERROR)
#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD)
/* Following are specific to S5PV210 */
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index f4aee75f00b1..4fcacd915d45 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -20,6 +20,9 @@ struct shrink_control {
/* current node being shrunk (for NUMA aware shrinkers) */
int nid;
+
+ /* current memcg being shrunk (for memcg aware shrinkers) */
+ struct mem_cgroup *memcg;
};
#define SHRINK_STOP (~0UL)
@@ -61,7 +64,8 @@ struct shrinker {
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
/* Flags */
-#define SHRINKER_NUMA_AWARE (1 << 0)
+#define SHRINKER_NUMA_AWARE (1 << 0)
+#define SHRINKER_MEMCG_AWARE (1 << 1)
extern int register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 85ab7d72b54c..1bb36edb66b9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -626,8 +626,11 @@ struct sk_buff {
__u32 hash;
__be16 vlan_proto;
__u16 vlan_tci;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int napi_id;
+#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
+ union {
+ unsigned int napi_id;
+ unsigned int sender_cpu;
+ };
#endif
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
@@ -2484,19 +2487,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
}
static inline int skb_add_data(struct sk_buff *skb,
- char __user *from, int copy)
+ struct iov_iter *from, int copy)
{
const int off = skb->len;
if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
- __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
- copy, 0, &err);
- if (!err) {
+ __wsum csum = 0;
+ if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
+ &csum, from) == copy) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
- } else if (!copy_from_user(skb_put(skb, copy), from, copy))
+ } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
return 0;
__skb_trim(skb, off);
@@ -2693,8 +2695,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
- /* XXX: stripping const */
- return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
+ return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
}
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
@@ -3071,7 +3072,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
#define skb_checksum_validate_zero_check(skb, proto, check, \
compute_pseudo) \
- __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo)
+ __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
#define skb_checksum_simple_validate(skb) \
__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
@@ -3096,6 +3097,27 @@ do { \
compute_pseudo(skb, proto)); \
} while (0)
+/* Update skbuf and packet to reflect the remote checksum offload operation.
+ * When called, ptr indicates the starting point for skb->csum when
+ * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
+ * here, skb_postpull_rcsum is done so skb->csum start is ptr.
+ */
+static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
+ int start, int offset)
+{
+ __wsum delta;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+ __skb_checksum_complete(skb);
+ skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
+ }
+
+ delta = remcsum_adjust(ptr, skb->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+}
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9a139b637069..76f1feeabd38 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -104,6 +104,7 @@
(unsigned long)ZERO_SIZE_PTR)
#include <linux/kmemleak.h>
+#include <linux/kasan.h>
struct mem_cgroup;
/*
@@ -115,14 +116,12 @@ int slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
-#ifdef CONFIG_MEMCG_KMEM
-struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *,
- struct kmem_cache *,
- const char *);
-#endif
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
-void kmem_cache_free(struct kmem_cache *, void *);
+
+void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
+void memcg_deactivate_kmem_caches(struct mem_cgroup *);
+void memcg_destroy_kmem_caches(struct mem_cgroup *);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -289,6 +288,7 @@ static __always_inline int kmalloc_index(size_t size)
void *__kmalloc(size_t size, gfp_t flags);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+void kmem_cache_free(struct kmem_cache *, void *);
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node);
@@ -326,7 +326,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
gfp_t flags, size_t size)
{
- return kmem_cache_alloc(s, flags);
+ void *ret = kmem_cache_alloc(s, flags);
+
+ kasan_kmalloc(s, ret, size);
+ return ret;
}
static __always_inline void *
@@ -334,7 +337,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node, size_t size)
{
- return kmem_cache_alloc_node(s, gfpflags, node);
+ void *ret = kmem_cache_alloc_node(s, gfpflags, node);
+
+ kasan_kmalloc(s, ret, size);
+ return ret;
}
#endif /* CONFIG_TRACING */
@@ -474,14 +480,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
+
+struct memcg_cache_array {
+ struct rcu_head rcu;
+ struct kmem_cache *entries[0];
+};
+
/*
* This is the main placeholder for memcg-related information in kmem caches.
- * struct kmem_cache will hold a pointer to it, so the memory cost while
- * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
- * would otherwise be if that would be bundled in kmem_cache: we'll need an
- * extra pointer chase. But the trade off clearly lays in favor of not
- * penalizing non-users.
- *
* Both the root cache and the child caches will have it. For the root cache,
* this will hold a dynamically allocated array large enough to hold
* information about the currently limited memcgs in the system. To allow the
@@ -491,19 +497,18 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* Child caches will hold extra metadata needed for its operation. Fields are:
*
* @memcg: pointer to the memcg this cache belongs to
- * @list: list_head for the list of all caches in this memcg
* @root_cache: pointer to the global, root cache, this cache was derived from
+ *
+ * Both root and child caches of the same kind are linked into a list chained
+ * through @list.
*/
struct memcg_cache_params {
bool is_root_cache;
+ struct list_head list;
union {
- struct {
- struct rcu_head rcu_head;
- struct kmem_cache *memcg_caches[0];
- };
+ struct memcg_cache_array __rcu *memcg_caches;
struct {
struct mem_cgroup *memcg;
- struct list_head list;
struct kmem_cache *root_cache;
};
};
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index b869d1662ba3..33d049066c3d 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -70,7 +70,7 @@ struct kmem_cache {
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
#ifdef CONFIG_MEMCG_KMEM
- struct memcg_cache_params *memcg_params;
+ struct memcg_cache_params memcg_params;
#endif
struct kmem_cache_node *node[MAX_NUMNODES];
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d82abd40a3c0..33885118523c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -85,7 +85,7 @@ struct kmem_cache {
struct kobject kobj; /* For sysfs */
#endif
#ifdef CONFIG_MEMCG_KMEM
- struct memcg_cache_params *memcg_params;
+ struct memcg_cache_params memcg_params;
int max_attr_size; /* for propagation, maximum size of a stored attr */
#ifdef CONFIG_SYSFS
struct kset *memcg_kset;
@@ -110,4 +110,23 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
}
#endif
+
+/**
+ * virt_to_obj - returns address of the beginning of object.
+ * @s: object's kmem_cache
+ * @slab_page: address of slab page
+ * @x: address within object memory range
+ *
+ * Returns address of the beginning of object
+ */
+static inline void *virt_to_obj(struct kmem_cache *s,
+ const void *slab_page,
+ const void *x)
+{
+ return (void *)x - ((x - slab_page) % s->size);
+}
+
+void object_err(struct kmem_cache *s, struct page *page,
+ u8 *object, char *reason);
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 93dff5fff524..be91db2a7017 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -151,6 +151,13 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
static inline void kick_all_cpus_sync(void) { }
static inline void wake_up_all_idle_cpus(void) { }
+#ifdef CONFIG_UP_LATE_INIT
+extern void __init up_late_init(void);
+static inline void smp_init(void) { up_late_init(); }
+#else
+static inline void smp_init(void) { }
+#endif
+
#endif /* !SMP */
/*
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6e49a14365dc..5c19cba34dce 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -318,13 +318,6 @@ struct ucred {
/* IPX options */
#define IPX_TYPE 1
-extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
- struct iovec *iov,
- int offset,
- unsigned int len, __wsum *csump);
-extern unsigned long iov_pages(const struct iovec *iov, int offset,
- unsigned long nr_segs);
-
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index b2b1afbb3202..cd519a11c2c6 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
*/
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
index bc8677c8eba9..e69e9b51b21a 100644
--- a/include/linux/spi/l4f00242t03.h
+++ b/include/linux/spi/l4f00242t03.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
index 555d254e6606..fdd1d1d51da5 100644
--- a/include/linux/spi/lms283gf05.h
+++ b/include/linux/spi/lms283gf05.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h
index 4835486f58e5..381d368b91b4 100644
--- a/include/linux/spi/mxs-spi.h
+++ b/include/linux/spi/mxs-spi.h
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __LINUX_SPI_MXS_SPI_H__
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index d5a316550177..6d36dacec4ba 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __linux_pxa2xx_spi_h
#define __linux_pxa2xx_spi_h
@@ -57,7 +53,6 @@ struct pxa2xx_spi_chip {
#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
#include <linux/clk.h>
-#include <mach/dma.h>
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index e546b2ceb623..a693188cc08b 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_SPI_RENESAS_SPI_H__
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
index a1121f872ac1..aa0d440ab4f0 100644
--- a/include/linux/spi/sh_hspi.h
+++ b/include/linux/spi/sh_hspi.h
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef SH_HSPI_H
#define SH_HSPI_H
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index 88a14d81c49e..b087a85f5f72 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -7,6 +7,8 @@ struct sh_msiof_spi_info {
u16 num_chipselect;
unsigned int dma_tx_id;
unsigned int dma_rx_id;
+ u32 dtdl;
+ u32 syncdl;
};
#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a6ef2a8e6de4..ed9489d893a4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_SPI_H
@@ -260,6 +256,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @pump_messages: work struct for scheduling work to the message pump
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
+ * @idling: the device is entering idle state
* @cur_msg: the currently in-flight message
* @cur_msg_prepared: spi_prepare_message was called for the currently
* in-flight message
@@ -425,6 +422,7 @@ struct spi_master {
spinlock_t queue_lock;
struct list_head queue;
struct spi_message *cur_msg;
+ bool idling;
bool busy;
bool running;
bool rt;
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
index 60b59187e590..414c6fddfcf0 100644
--- a/include/linux/spi/tle62x0.h
+++ b/include/linux/spi/tle62x0.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
struct tle62x0_pdata {
diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
index 8f721e465e05..563b3b1799a8 100644
--- a/include/linux/spi/tsc2005.h
+++ b/include/linux/spi/tsc2005.h
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _LINUX_SPI_TSC2005_H
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 262ba4ef9a8e..3e18379dfa6f 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -190,6 +190,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
+# define raw_spin_lock_bh_nested(lock, subclass) \
+ _raw_spin_lock_bh_nested(lock, subclass)
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
@@ -205,6 +207,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
+# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -324,6 +327,11 @@ do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
+#define spin_lock_bh_nested(lock, subclass) \
+do { \
+ raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
+} while (0)
+
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 42dfab89e740..5344268e6e62 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
void __lockfunc
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d188861ad6..d3afef9d8dbe 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -57,6 +57,7 @@
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a2783cb5d275..9cfd9623fb03 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -45,7 +45,7 @@ struct rcu_batch {
#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
struct srcu_struct {
- unsigned completed;
+ unsigned long completed;
struct srcu_struct_array __percpu *per_cpu_ref;
spinlock_t queue_lock; /* protect ->batch_queue, ->running */
bool running;
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
* define and init a srcu struct at build time.
* dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
*/
-#define DEFINE_SRCU(name) \
+#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- struct srcu_struct name = __SRCU_STRUCT_INIT(name);
-
-#define DEFINE_STATIC_SRCU(name) \
- static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
void synchronize_srcu(struct srcu_struct *sp);
void synchronize_srcu_expedited(struct srcu_struct *sp);
-long srcu_batches_completed(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
void srcu_barrier(struct srcu_struct *sp);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index f7b9100686c3..c0f707ac192b 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -173,6 +173,7 @@
#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
#define SSB_SPROMSIZE_WORDS_R10 230
+#define SSB_SPROMSIZE_WORDS_R11 234
#define SSB_SPROM_BASE1 0x1000
#define SSB_SPROM_BASE31 0x0800
#define SSB_SPROM_REVISION 0x007E
diff --git a/include/linux/string.h b/include/linux/string.h
index 2e22a2e58f3a..e40099e585c9 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -40,9 +40,6 @@ extern int strcmp(const char *,const char *);
#ifndef __HAVE_ARCH_STRNCMP
extern int strncmp(const char *,const char *,__kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_STRNICMP
-#define strnicmp strncasecmp
-#endif
#ifndef __HAVE_ARCH_STRCASECMP
extern int strcasecmp(const char *s1, const char *s2);
#endif
@@ -115,7 +112,10 @@ extern void * memchr(const void *,int,__kernel_size_t);
#endif
void *memchr_inv(const void *s, int c, size_t n);
+extern void kfree_const(const void *x);
+
extern char *kstrdup(const char *s, gfp_t gfp);
+extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 6eb567ac56bc..657571817260 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -10,8 +10,8 @@ enum string_size_units {
STRING_UNITS_2, /* use binary powers of 2^10 */
};
-int string_get_size(u64 size, enum string_size_units units,
- char *buf, int len);
+void string_get_size(u64 size, enum string_size_units units,
+ char *buf, int len);
#define UNESCAPE_SPACE 0x01
#define UNESCAPE_OCTAL 0x02
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index d86acc63b25f..598ba80ec30c 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -57,7 +57,7 @@ struct rpc_clnt {
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
int cl_nodelen; /* nodename length */
- char cl_nodename[UNX_MAXNODENAME];
+ char cl_nodename[UNX_MAXNODENAME+1];
struct rpc_pipe_dir_head cl_pipedir_objects;
struct rpc_clnt * cl_parent; /* Points to parent of clones */
struct rpc_rtt cl_rtt_default;
@@ -112,6 +112,7 @@ struct rpc_create_args {
struct sockaddr *saddress;
const struct rpc_timeout *timeout;
const char *servername;
+ const char *nodename;
const struct rpc_program *program;
u32 prognumber; /* overrides program->number */
u32 version;
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index eecb5a71e6c0..7e61a17030a4 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -79,6 +79,8 @@ struct rpc_clnt;
struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *);
void rpc_count_iostats(const struct rpc_task *,
struct rpc_iostats *);
+void rpc_count_iostats_metrics(const struct rpc_task *,
+ struct rpc_iostats *);
void rpc_print_iostats(struct seq_file *, struct rpc_clnt *);
void rpc_free_iostats(struct rpc_iostats *);
@@ -87,6 +89,8 @@ void rpc_free_iostats(struct rpc_iostats *);
static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
static inline void rpc_count_iostats(const struct rpc_task *task,
struct rpc_iostats *stats) {}
+static inline void rpc_count_iostats_metrics(const struct rpc_task *,
+ struct rpc_iostats *) {}
static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index b78f16b1dea3..f33c5a4d6fe4 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -42,6 +42,9 @@
#include <linux/types.h>
+#define RPCRDMA_VERSION 1
+#define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION)
+
struct rpcrdma_segment {
__be32 rs_handle; /* Registered memory handle */
__be32 rs_length; /* Length of the chunk in bytes */
@@ -95,7 +98,10 @@ struct rpcrdma_msg {
} rm_body;
};
-#define RPCRDMA_HDRLEN_MIN 28
+/*
+ * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
+ */
+#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
enum rpcrdma_errcode {
ERR_VERS = 1,
@@ -115,4 +121,10 @@ enum rpcrdma_proc {
RDMA_ERROR = 4 /* An RPC RDMA encoding error */
};
+#define rdma_msg cpu_to_be32(RDMA_MSG)
+#define rdma_nomsg cpu_to_be32(RDMA_NOMSG)
+#define rdma_msgp cpu_to_be32(RDMA_MSGP)
+#define rdma_done cpu_to_be32(RDMA_DONE)
+#define rdma_error cpu_to_be32(RDMA_ERROR)
+
#endif /* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 6f22cfeef5e3..fae6fb947fc8 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -110,7 +110,7 @@ struct svc_serv {
* We use sv_nrthreads as a reference count. svc_destroy() drops
* this refcount, so we need to bump it up around operations that
* change the number of threads. Horrible, but there it is.
- * Should be called with the BKL held.
+ * Should be called with the "service mutex" held.
*/
static inline void svc_get(struct svc_serv *serv)
{
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 975da754c778..df8edf8ec914 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -63,8 +63,6 @@ extern atomic_t rdma_stat_rq_prod;
extern atomic_t rdma_stat_sq_poll;
extern atomic_t rdma_stat_sq_prod;
-#define RPCRDMA_VERSION 1
-
/*
* Contexts are built when an RDMA request is created and are a
* record of the resources that can be recovered when the request
@@ -79,6 +77,7 @@ struct svc_rdma_op_ctxt {
enum ib_wr_opcode wr_op;
enum ib_wc_status wc_status;
u32 byte_len;
+ u32 position;
struct svcxprt_rdma *xprt;
unsigned long flags;
enum dma_data_direction direction;
@@ -150,6 +149,10 @@ struct svcxprt_rdma {
struct ib_cq *sc_rq_cq;
struct ib_cq *sc_sq_cq;
struct ib_mr *sc_phys_mr; /* MR for server memory */
+ int (*sc_reader)(struct svcxprt_rdma *,
+ struct svc_rqst *,
+ struct svc_rdma_op_ctxt *,
+ int *, u32 *, u32, u32, u64, bool);
u32 sc_dev_caps; /* distilled device caps */
u32 sc_dma_lkey; /* local dma key */
unsigned int sc_frmr_pg_list_len;
@@ -178,8 +181,6 @@ struct svcxprt_rdma {
#define RPCRDMA_MAX_REQ_SIZE 4096
/* svc_rdma_marshal.c */
-extern void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *,
- int *, int *);
extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -197,6 +198,12 @@ extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *);
/* svc_rdma_recvfrom.c */
extern int svc_rdma_recvfrom(struct svc_rqst *);
+extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
+ struct svc_rdma_op_ctxt *, int *, u32 *,
+ u32, u32, u64, bool);
+extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
+ struct svc_rdma_op_ctxt *, int *, u32 *,
+ u32, u32, u64, bool);
/* svc_rdma_sendto.c */
extern int svc_rdma_sendto(struct svc_rqst *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 9d27ac45b909..8b93ef53df3c 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -347,6 +347,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt);
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
int xs_swapper(struct rpc_xprt *xprt, int enable);
+bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
+void xprt_unlock_connect(struct rpc_xprt *, void *);
+
/*
* Reserved bit positions in xprt->state
*/
@@ -357,10 +360,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
#define XPRT_BOUND (4)
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
-#define XPRT_CONNECTION_ABORT (7)
-#define XPRT_CONNECTION_CLOSE (8)
#define XPRT_CONGESTED (9)
-#define XPRT_CONNECTION_REUSE (10)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 34e8b60ab973..7067eca501e2 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -437,16 +437,6 @@ extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-#ifdef CONFIG_MEMCG
-extern void
-mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
-#else
-static inline void
-mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
-{
-}
-#endif
-
#else /* CONFIG_SWAP */
#define swap_address_space(entry) (NULL)
@@ -547,11 +537,6 @@ static inline swp_entry_t get_swap_page(void)
return entry;
}
-static inline void
-mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
-{
-}
-
#endif /* CONFIG_SWAP */
#endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 6adfb7bfbf44..cedf3d3c373f 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -54,7 +54,7 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
/* check whether a pte points to a swap entry */
static inline int is_swap_pte(pte_t pte)
{
- return !pte_none(pte) && !pte_present_nonuma(pte) && !pte_file(pte);
+ return !pte_none(pte) && !pte_present(pte);
}
#endif
@@ -66,7 +66,6 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{
swp_entry_t arch_entry;
- BUG_ON(pte_file(pte));
if (pte_swp_soft_dirty(pte))
pte = pte_swp_clear_soft_dirty(pte);
arch_entry = __pte_to_swp_entry(pte);
@@ -82,7 +81,6 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
swp_entry_t arch_entry;
arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
- BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
return __swp_entry_to_pte(arch_entry);
}
@@ -137,6 +135,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
*entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
}
+extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
extern void migration_entry_wait_huge(struct vm_area_struct *vma,
@@ -150,6 +150,8 @@ static inline int is_migration_entry(swp_entry_t swp)
}
#define migration_entry_to_page(swp) NULL
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
+static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 85893d744901..76d1e38aabe1 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -410,12 +410,16 @@ asmlinkage long sys_newlstat(const char __user *filename,
struct stat __user *statbuf);
asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf);
asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf);
-#if BITS_PER_LONG == 32
+#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
asmlinkage long sys_stat64(const char __user *filename,
struct stat64 __user *statbuf);
asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
asmlinkage long sys_lstat64(const char __user *filename,
struct stat64 __user *statbuf);
+asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
+ struct stat64 __user *statbuf, int flag);
+#endif
+#if BITS_PER_LONG == 32
asmlinkage long sys_truncate64(const char __user *path, loff_t length);
asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
#endif
@@ -771,8 +775,6 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
umode_t mode);
asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
struct stat __user *statbuf, int flag);
-asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
- struct stat64 __user *statbuf, int flag);
asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
int bufsiz);
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 67309ece0772..1a7adb411647 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -115,6 +115,7 @@ struct tcp_request_sock {
u32 rcv_isn;
u32 snt_isn;
u32 snt_synack; /* synack sent time */
+ u32 last_oow_ack_time; /* last SYNACK */
u32 rcv_nxt; /* the ack # by SYNACK. For
* FastOpen it's the seq#
* after data-in-SYN.
@@ -152,6 +153,7 @@ struct tcp_sock {
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
+ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 tsoffset; /* timestamp offset */
@@ -340,6 +342,10 @@ struct tcp_timewait_sock {
u32 tw_rcv_wnd;
u32 tw_ts_offset;
u32 tw_ts_recent;
+
+ /* The time we sent the last out-of-window ACK: */
+ u32 tw_last_oow_ack_time;
+
long tw_ts_recent_stamp;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 884d6263e962..c78dcfeaf25f 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,6 +86,7 @@ struct st_proto_s {
extern long st_register(struct st_proto_s *);
extern long st_unregister(struct st_proto_s *);
+extern struct ti_st_plat_data *dt_pdata;
/*
* header information used by st_core.c
@@ -261,7 +262,7 @@ struct kim_data_s {
struct completion kim_rcvd, ldisc_installed;
char resp_buffer[30];
const struct firmware *fw_entry;
- long nshutdown;
+ unsigned nshutdown;
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
@@ -269,8 +270,8 @@ struct kim_data_s {
struct chip_version version;
unsigned char ldisc_install;
unsigned char dev_name[UART_DEV_NAME_LEN + 1];
- unsigned char flow_cntrl;
- unsigned long baud_rate;
+ unsigned flow_cntrl;
+ unsigned baud_rate;
};
/**
@@ -436,10 +437,10 @@ struct gps_event_hdr {
*
*/
struct ti_st_plat_data {
- long nshutdown_gpio;
+ u32 nshutdown_gpio;
unsigned char dev_name[UART_DEV_NAME_LEN]; /* uart name */
- unsigned char flow_cntrl; /* flow control flag */
- unsigned long baud_rate;
+ u32 flow_cntrl; /* flow control flag */
+ u32 baud_rate;
int (*suspend)(struct platform_device *, pm_message_t);
int (*resume)(struct platform_device *);
int (*chip_enable) (struct kim_data_s *);
diff --git a/include/linux/time.h b/include/linux/time.h
index 203c2ad40d71..beebe3a02d43 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -110,6 +110,19 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
return true;
}
+static inline bool timeval_valid(const struct timeval *tv)
+{
+ /* Dates before 1970 are bogus */
+ if (tv->tv_sec < 0)
+ return false;
+
+ /* Can't have more microseconds then a second */
+ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
+ return false;
+
+ return true;
+}
+
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
#define CURRENT_TIME (current_kernel_time())
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
new file mode 100644
index 000000000000..4382035a75bb
--- /dev/null
+++ b/include/linux/timecounter.h
@@ -0,0 +1,139 @@
+/*
+ * linux/include/linux/timecounter.h
+ *
+ * based on code that migrated away from
+ * linux/include/linux/clocksource.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_TIMECOUNTER_H
+#define _LINUX_TIMECOUNTER_H
+
+#include <linux/types.h>
+
+/* simplify initialization of mask field */
+#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+
+/**
+ * struct cyclecounter - hardware abstraction for a free running counter
+ * Provides completely state-free accessors to the underlying hardware.
+ * Depending on which hardware it reads, the cycle counter may wrap
+ * around quickly. Locking rules (if necessary) have to be defined
+ * by the implementor and user of specific instances of this API.
+ *
+ * @read: returns the current cycle value
+ * @mask: bitmask for two's complement
+ * subtraction of non 64 bit counters,
+ * see CYCLECOUNTER_MASK() helper macro
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ */
+struct cyclecounter {
+ cycle_t (*read)(const struct cyclecounter *cc);
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+};
+
+/**
+ * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
+ * Contains the state needed by timecounter_read() to detect
+ * cycle counter wrap around. Initialize with
+ * timecounter_init(). Also used to convert cycle counts into the
+ * corresponding nanosecond counts with timecounter_cyc2time(). Users
+ * of this code are responsible for initializing the underlying
+ * cycle counter hardware, locking issues and reading the time
+ * more often than the cycle counter wraps around. The nanosecond
+ * counter will only wrap around after ~585 years.
+ *
+ * @cc: the cycle counter used by this instance
+ * @cycle_last: most recent cycle counter value seen by
+ * timecounter_read()
+ * @nsec: continuously increasing count
+ * @mask: bit mask for maintaining the 'frac' field
+ * @frac: accumulated fractional nanoseconds
+ */
+struct timecounter {
+ const struct cyclecounter *cc;
+ cycle_t cycle_last;
+ u64 nsec;
+ u64 mask;
+ u64 frac;
+};
+
+/**
+ * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
+ * @cc: Pointer to cycle counter.
+ * @cycles: Cycles
+ * @mask: bit mask for maintaining the 'frac' field
+ * @frac: pointer to storage for the fractional nanoseconds.
+ */
+static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
+ cycle_t cycles, u64 mask, u64 *frac)
+{
+ u64 ns = (u64) cycles;
+
+ ns = (ns * cc->mult) + *frac;
+ *frac = ns & mask;
+ return ns >> cc->shift;
+}
+
+/**
+ * timecounter_adjtime - Shifts the time of the clock.
+ * @delta: Desired change in nanoseconds.
+ */
+static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+ tc->nsec += delta;
+}
+
+/**
+ * timecounter_init - initialize a time counter
+ * @tc: Pointer to time counter which is to be initialized/reset
+ * @cc: A cycle counter, ready to be used.
+ * @start_tstamp: Arbitrary initial time stamp.
+ *
+ * After this call the current cycle register (roughly) corresponds to
+ * the initial time stamp. Every call to timecounter_read() increments
+ * the time stamp counter by the number of elapsed nanoseconds.
+ */
+extern void timecounter_init(struct timecounter *tc,
+ const struct cyclecounter *cc,
+ u64 start_tstamp);
+
+/**
+ * timecounter_read - return nanoseconds elapsed since timecounter_init()
+ * plus the initial time stamp
+ * @tc: Pointer to time counter.
+ *
+ * In other words, keeps track of time since the same epoch as
+ * the function which generated the initial time stamp.
+ */
+extern u64 timecounter_read(struct timecounter *tc);
+
+/**
+ * timecounter_cyc2time - convert a cycle counter to same
+ * time base as values returned by
+ * timecounter_read()
+ * @tc: Pointer to time counter.
+ * @cycle_tstamp: a value returned by tc->cc->read()
+ *
+ * Cycle counts that are converted correctly as long as they
+ * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
+ * with "max cycle count" == cs->mask+1.
+ *
+ * This allows conversion of cycle counter values which were generated
+ * in the past.
+ */
+extern u64 timecounter_cyc2time(struct timecounter *tc,
+ cycle_t cycle_tstamp);
+
+#endif
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 9b63d13ba82b..3eaae4754275 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -33,6 +33,7 @@ extern time64_t ktime_get_real_seconds(void);
extern int __getnstimeofday64(struct timespec64 *tv);
extern void getnstimeofday64(struct timespec64 *tv);
+extern void getboottime64(struct timespec64 *ts);
#if BITS_PER_LONG == 64
/**
@@ -72,6 +73,11 @@ static inline struct timespec get_monotonic_coarse(void)
{
return get_monotonic_coarse64();
}
+
+static inline void getboottime(struct timespec *ts)
+{
+ return getboottime64(ts);
+}
#else
/**
* Deprecated. Use do_settimeofday64().
@@ -129,9 +135,15 @@ static inline struct timespec get_monotonic_coarse(void)
{
return timespec64_to_timespec(get_monotonic_coarse64());
}
-#endif
-extern void getboottime(struct timespec *ts);
+static inline void getboottime(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getboottime64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+#endif
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
@@ -217,6 +229,11 @@ static inline void get_monotonic_boottime(struct timespec *ts)
*ts = ktime_to_timespec(ktime_get_boottime());
}
+static inline void get_monotonic_boottime64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_boottime());
+}
+
static inline void timekeeping_clocktai(struct timespec *ts)
{
*ts = ktime_to_timespec(ktime_get_clocktai());
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index e08e21e5f601..c72851328ca9 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -173,7 +173,7 @@ extern void syscall_unregfunc(void);
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond),,); \
- if (IS_ENABLED(CONFIG_LOCKDEP)) { \
+ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \
rcu_dereference_sched(__tracepoint_##name.funcs);\
rcu_read_unlock_sched_notrace(); \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7d66ae508e5c..358a337af598 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -14,6 +14,29 @@
#include <linux/llist.h>
+/*
+ * Lock subclasses for tty locks
+ *
+ * TTY_LOCK_NORMAL is for normal ttys and master ptys.
+ * TTY_LOCK_SLAVE is for slave ptys only.
+ *
+ * Lock subclasses are necessary for handling nested locking with pty pairs.
+ * tty locks which use nested locking:
+ *
+ * legacy_mutex - Nested tty locks are necessary for releasing pty pairs.
+ * The stable lock order is master pty first, then slave pty.
+ * termios_rwsem - The stable lock order is tty_buffer lock->termios_rwsem.
+ * Subclassing this lock enables the slave pty to hold its
+ * termios_rwsem when claiming the master tty_buffer lock.
+ * tty_buffer lock - slave ptys can claim nested buffer lock when handling
+ * signal chars. The stable lock order is slave pty, then
+ * master.
+ */
+
+enum {
+ TTY_LOCK_NORMAL = 0,
+ TTY_LOCK_SLAVE,
+};
/*
* (Note: the *_driver.minor_start values 1, 64, 128, 192 are
@@ -443,6 +466,7 @@ extern void tty_flush_to_ldisc(struct tty_struct *tty);
extern void tty_buffer_free_all(struct tty_port *port);
extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
extern void tty_buffer_init(struct tty_port *port);
+extern void tty_buffer_set_lock_subclass(struct tty_port *port);
extern speed_t tty_termios_baud_rate(struct ktermios *termios);
extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
extern void tty_termios_encode_baud_rate(struct ktermios *termios,
@@ -467,7 +491,6 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
-extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
diff --git a/include/linux/types.h b/include/linux/types.h
index a0bb7048687f..6747247e3f9f 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -135,12 +135,9 @@ typedef unsigned long blkcnt_t;
#endif
/*
- * The type of an index into the pagecache. Use a #define so asm/types.h
- * can override it.
+ * The type of an index into the pagecache.
*/
-#ifndef pgoff_t
#define pgoff_t unsigned long
-#endif
/* A dma_addr_t can hold any valid DMA or bus address for the platform */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
@@ -213,5 +210,8 @@ struct callback_head {
};
#define rcu_head callback_head
+/* clocksource cycle base type */
+typedef u64 cycle_t;
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ee3277593222..247cfdcc4b08 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,11 +49,7 @@ struct udp_sock {
unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
- convert_csum:1;/* On receive, convert checksum
- * unnecessary to checksum complete
- * if possible.
- */
+ no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -102,16 +98,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
-static inline void udp_set_convert_csum(struct sock *sk, bool val)
-{
- udp_sk(sk)->convert_csum = val;
-}
-
-static inline bool udp_get_convert_csum(struct sock *sk)
-{
- return udp_sk(sk)->convert_csum;
-}
-
#define udp_portaddr_for_each_entry(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1c5e453f7ea9..3e0cb4ea3905 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -135,10 +135,4 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
- int offset, int len);
-int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
- int offset, int len);
-
#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index f89c24a03bd9..7ee1b5c3b4cb 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -82,7 +82,7 @@ struct usb_host_interface {
int extralen;
unsigned char *extra; /* Extra descriptors */
- /* array of desc.bNumEndpoint endpoints associated with this
+ /* array of desc.bNumEndpoints endpoints associated with this
* interface setting. these will be in no particular order.
*/
struct usb_host_endpoint *endpoint;
@@ -127,10 +127,6 @@ enum usb_interface_condition {
* to the sysfs representation for that device.
* @pm_usage_cnt: PM usage counter for this interface
* @reset_ws: Used for scheduling resets from atomic context.
- * @reset_running: set to 1 if the interface is currently running a
- * queued reset so that usb_cancel_queued_reset() doesn't try to
- * remove from the workqueue when running inside the worker
- * thread. See __usb_queue_reset_device().
* @resetting_device: USB core reset the device, so use alt setting 0 as
* current; needs bandwidth alloc after reset.
*
@@ -181,7 +177,6 @@ struct usb_interface {
unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
unsigned needs_binding:1; /* needs delayed unbind/rebind */
- unsigned reset_running:1;
unsigned resetting_device:1; /* true: bandwidth alloc after reset */
struct device dev; /* interface specific device info */
diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h
index 7eb4dcd0d386..db0431b39a63 100644
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -34,6 +34,8 @@ struct usb_hcd;
* after initialization.
* @no_io_watchdog: set to 1 if the controller does not need the I/O
* watchdog to run.
+ * @reset_on_resume: set to 1 if the controller needs to be reset after
+ * a suspend / resume cycle (but can't detect that itself).
*
* These are general configuration options for the EHCI controller. All of
* these options are activating more or less workarounds for some hardware.
@@ -45,6 +47,8 @@ struct usb_ehci_pdata {
unsigned big_endian_desc:1;
unsigned big_endian_mmio:1;
unsigned no_io_watchdog:1;
+ unsigned reset_on_resume:1;
+ unsigned dma_mask_64:1;
/* Turn on all power and clocks */
int (*power_on)(struct platform_device *pdev);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 70ddb3943b62..e2f00fd8cd47 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -523,6 +523,7 @@ struct usb_gadget_ops {
* enabled HNP support.
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
+ * @is_selfpowered: if the gadget is self-powered.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -563,6 +564,7 @@ struct usb_gadget {
unsigned a_hnp_support:1;
unsigned a_alt_hnp_support:1;
unsigned quirk_ep_out_aligned_size:1;
+ unsigned is_selfpowered:1;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 086bf13307e6..68b1e836dff1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -146,6 +146,8 @@ struct usb_hcd {
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
unsigned tpl_support:1; /* OTG & EH TPL support */
+ unsigned cant_recv_wakeups:1;
+ /* wakeup requests from downstream aren't received */
unsigned int irq; /* irq allocated */
void __iomem *regs; /* device memory/io */
@@ -453,6 +455,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif /* CONFIG_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
+void usb_init_pool_max(void);
int hcd_buffer_create(struct usb_hcd *hcd);
void hcd_buffer_destroy(struct usb_hcd *hcd);
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index f499c23e6342..bc91b5d380fd 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -1,5 +1,5 @@
-/* USB OTG (On The Go) defines */
/*
+ * USB PHY defines
*
* These APIs may be used between USB controllers. USB device drivers
* (for either host or peripheral roles) don't use these calls; they
@@ -106,7 +106,7 @@ struct usb_phy {
int (*set_power)(struct usb_phy *x,
unsigned mA);
- /* for non-OTG B devices: set transceiver into suspend mode */
+ /* Set transceiver into suspend mode */
int (*set_suspend)(struct usb_phy *x,
int suspend);
diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h
index 68adae83affc..c13632d5292e 100644
--- a/include/linux/usb/usb_phy_generic.h
+++ b/include/linux/usb/usb_phy_generic.h
@@ -2,6 +2,7 @@
#define __LINUX_USB_NOP_XCEIV_H
#include <linux/usb/otg.h>
+#include <linux/gpio/consumer.h>
struct usb_phy_generic_platform_data {
enum usb_phy_type type;
@@ -11,6 +12,7 @@ struct usb_phy_generic_platform_data {
unsigned int needs_vcc:1;
unsigned int needs_reset:1; /* deprecated */
int gpio_reset;
+ struct gpio_desc *gpiod_vbus;
};
#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index b87696fdf06a..7d7acb35603d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -16,6 +16,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
+#define VM_NO_GUARD 0x00000040 /* don't add guard page */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -75,7 +76,9 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, const void *caller);
+ pgprot_t prot, unsigned long vm_flags, int node,
+ const void *caller);
+
extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
@@ -96,8 +99,12 @@ void vmalloc_sync_all(void);
static inline size_t get_vm_area_size(const struct vm_struct *area)
{
- /* return actual size without guard page */
- return area->size - PAGE_SIZE;
+ if (!(area->flags & VM_NO_GUARD))
+ /* return actual size without guard page */
+ return area->size - PAGE_SIZE;
+ else
+ return area->size;
+
}
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index 5691f752ce8f..63df3a2a8ce5 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
int mode);
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
- void *iov, size_t iov_size, int mode);
+ struct msghdr *msg, size_t iov_size, int mode);
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
struct msghdr *msg, size_t iov_size, int mode);
ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 057db7d2f448..f38c10ba3ff5 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -21,10 +21,6 @@
#ifndef VT_BUF_HAVE_RW
#define scr_writew(val, addr) (*(addr) = (val))
#define scr_readw(addr) (*(addr))
-#define scr_memcpyw(d, s, c) memcpy(d, s, c)
-#define scr_memmovew(d, s, c) memmove(d, s, c)
-#define VT_BUF_HAVE_MEMCPYW
-#define VT_BUF_HAVE_MEMMOVEW
#endif
#ifndef VT_BUF_HAVE_MEMSETW
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2232ed16635a..2db83349865b 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -267,6 +267,21 @@ do { \
__wait_event(wq, condition); \
} while (0)
+#define __io_wait_event(wq, condition) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ io_schedule())
+
+/*
+ * io_wait_event() -- like wait_event() but with io_schedule()
+ */
+#define io_wait_event(wq, condition) \
+do { \
+ might_sleep(); \
+ if (condition) \
+ break; \
+ __io_wait_event(wq, condition); \
+} while (0)
+
#define __wait_event_freezable(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
schedule(); try_to_freeze())
@@ -363,7 +378,6 @@ do { \
*/
#define wait_event_cmd(wq, condition, cmd1, cmd2) \
do { \
- might_sleep(); \
if (condition) \
break; \
__wait_event_cmd(wq, condition, cmd1, cmd2); \
@@ -991,6 +1005,32 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
}
/**
+ * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ * @timeout: timeout, in jiffies
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared. This is similar to wait_on_bit(), except also takes a
+ * timeout parameter.
+ *
+ * Returned value will be zero if the bit was cleared before the
+ * @timeout elapsed, or non-zero if the @timeout elapsed or process
+ * received a signal and the mode permitted wakeup on that signal.
+ */
+static inline int
+wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
+{
+ might_sleep();
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_timeout(word, bit,
+ bit_wait_timeout,
+ mode, timeout);
+}
+
+/**
* wait_on_bit_action - wait for a bit to be cleared
* @word: the word being waited on, a kernel virtual address
* @bit: the bit of the word being waited on
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b996e6cde6bb..74db135f9957 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -220,14 +220,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#endif
#define INIT_WORK(_work, _func) \
- do { \
- __INIT_WORK((_work), (_func), 0); \
- } while (0)
+ __INIT_WORK((_work), (_func), 0)
#define INIT_WORK_ONSTACK(_work, _func) \
- do { \
- __INIT_WORK((_work), (_func), 1); \
- } while (0)
+ __INIT_WORK((_work), (_func), 1)
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index f14bd75f08b3..56529b34dc63 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -36,7 +36,8 @@ enum zpool_mapmode {
ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
};
-struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops);
+struct zpool *zpool_create_pool(char *type, char *name,
+ gfp_t gfp, struct zpool_ops *ops);
char *zpool_get_type(struct zpool *pool);
@@ -80,7 +81,7 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;
- void *(*create)(gfp_t gfp, struct zpool_ops *ops);
+ void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops);
void (*destroy)(void *pool);
int (*malloc)(void *pool, size_t size, gfp_t gfp,
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 05c214760977..3283c6a55425 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -36,7 +36,7 @@ enum zs_mapmode {
struct zs_pool;
-struct zs_pool *zs_create_pool(gfp_t flags);
+struct zs_pool *zs_create_pool(char *name, gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size);
diff --git a/include/media/smiapp.h b/include/media/smiapp.h
index 0b8f124a630c..268a3cdbf6cb 100644
--- a/include/media/smiapp.h
+++ b/include/media/smiapp.h
@@ -65,19 +65,19 @@ struct smiapp_platform_data {
unsigned short i2c_addr_dfl; /* Default i2c addr */
unsigned short i2c_addr_alt; /* Alternate i2c addr */
- unsigned int nvm_size; /* bytes */
- unsigned int ext_clk; /* sensor external clk */
+ uint32_t nvm_size; /* bytes */
+ uint32_t ext_clk; /* sensor external clk */
unsigned int lanes; /* Number of CSI-2 lanes */
- u8 csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */
- const s64 *op_sys_clock;
+ uint32_t csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */
+ uint64_t *op_sys_clock;
enum smiapp_module_board_orient module_board_orient;
struct smiapp_flash_strobe_parms *strobe_setup;
int (*set_xclk)(struct v4l2_subdev *sd, int hz);
- int xshutdown; /* gpio or SMIAPP_NO_XSHUTDOWN */
+ int32_t xshutdown; /* gpio or SMIAPP_NO_XSHUTDOWN */
};
#endif /* __SMIAPP_H_ */
diff --git a/include/media/tea575x.h b/include/media/tea575x.h
index 2d4fa59db902..5d096578b736 100644
--- a/include/media/tea575x.h
+++ b/include/media/tea575x.h
@@ -71,6 +71,11 @@ struct snd_tea575x {
int (*ext_init)(struct snd_tea575x *tea);
};
+int snd_tea575x_enum_freq_bands(struct snd_tea575x *tea,
+ struct v4l2_frequency_band *band);
+int snd_tea575x_g_tuner(struct snd_tea575x *tea, struct v4l2_tuner *v);
+int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
+ const struct v4l2_hw_freq_seek *a);
int snd_tea575x_hw_init(struct snd_tea575x *tea);
int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner);
void snd_tea575x_exit(struct snd_tea575x *tea);
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index eb76cfd47189..3e4fddfc840c 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -124,7 +124,8 @@ struct video_device
spinlock_t fh_lock; /* Lock for all v4l2_fhs */
struct list_head fh_list; /* List of struct v4l2_fh */
- int debug; /* Activates debug level*/
+ /* Internal device debug flags, not for use by drivers */
+ int dev_debug;
/* Video standard vars */
v4l2_std_id tvnorms; /* Supported tv norms */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 53605f0f9903..8537983b9b22 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -291,9 +291,18 @@ struct v4l2_ioctl_ops {
/* v4l debugging and diagnostics */
-/* Debug bitmask flags to be used on V4L2 */
-#define V4L2_DEBUG_IOCTL 0x01
-#define V4L2_DEBUG_IOCTL_ARG 0x02
+/* Device debug flags to be used with the video device debug attribute */
+
+/* Just log the ioctl name + error code */
+#define V4L2_DEV_DEBUG_IOCTL 0x01
+/* Log the ioctl name arguments + error code */
+#define V4L2_DEV_DEBUG_IOCTL_ARG 0x02
+/* Log the file operations open, release, mmap and get_unmapped_area */
+#define V4L2_DEV_DEBUG_FOP 0x04
+/* Log the read and write file operations and the VIDIOC_(D)QBUF ioctls */
+#define V4L2_DEV_DEBUG_STREAMING 0x08
+/* Log poll() */
+#define V4L2_DEV_DEBUG_POLL 0x10
/* Video standard functions */
extern const char *v4l2_norm_to_name(v4l2_std_id id);
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 5860292d42eb..5beeb8744fd1 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -342,8 +342,6 @@ struct v4l2_subdev_video_ops {
struct v4l2_dv_timings *timings);
int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index,
u32 *code);
- int (*enum_mbus_fsizes)(struct v4l2_subdev *sd,
- struct v4l2_frmsizeenum *fsize);
int (*g_mbus_fmt)(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt);
int (*try_mbus_fmt)(struct v4l2_subdev *sd,
@@ -503,10 +501,6 @@ struct v4l2_subdev_pad_ops {
struct v4l2_subdev_format *format);
int (*set_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *format);
- int (*set_crop)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop);
- int (*get_crop)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop);
int (*get_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_selection *sel);
int (*set_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index fb6fd4d8f4ed..d8b27854e3bf 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -84,16 +84,8 @@ struct videobuf_dma_sg_memory {
* Despite the name, this is totally unrelated to videobuf, except that
* videobuf-dma-sg uses the same API internally.
*/
-void videobuf_dma_init(struct videobuf_dmabuf *dma);
-int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
- unsigned long data, unsigned long size);
-int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
- int nr_pages);
-int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
- dma_addr_t addr, int nr_pages);
int videobuf_dma_free(struct videobuf_dmabuf *dma);
-int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma);
int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma);
struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index d63965a1faaf..c3bfa473c3aa 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -56,9 +56,3 @@ struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_fro
int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
#endif /* _VIDEOBUF_DVB_H_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index d13573bb879e..80456f72d70a 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -62,6 +62,9 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg);
int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
+int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict,
+ u32 banned_flags);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 58695ffeb138..e00455aab18c 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -273,7 +273,7 @@ struct l2cap_ctrl {
struct hci_dev;
-typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status);
+typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
struct hci_req_ctrl {
bool start;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 40129b3838b2..8e54f825153c 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -102,6 +102,28 @@ enum {
*/
HCI_QUIRK_FIXUP_BUFFER_SIZE,
+ /* When this quirk is set, then a controller that does not
+ * indicate support for Inquiry Result with RSSI is assumed to
+ * support it anyway. Some early Bluetooth 1.2 controllers had
+ * wrongly configured local features that will require forcing
+ * them to enable this mode. Getting RSSI information with the
+ * inquiry responses is preferred since it allows for a better
+ * user expierence.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_FIXUP_INQUIRY_MODE,
+
+ /* When this quirk is set, then the HCI Read Local Supported
+ * Commands command is not supported. In general Bluetooth 1.2
+ * and later controllers should support this command. However
+ * some controllers indicate Bluetooth 1.2 support, but do
+ * not support this command.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_LOCAL_COMMANDS,
+
/* When this quirk is set, then no stored link key handling
* is performed. This is mainly due to the fact that the
* HCI Delete Stored Link Key command is advertised, but
@@ -162,8 +184,7 @@ enum {
*/
enum {
HCI_DUT_MODE,
- HCI_FORCE_SC,
- HCI_FORCE_LESC,
+ HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
};
@@ -343,6 +364,7 @@ enum {
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
#define HCI_LE_PING 0x10
+#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_EXT_SCAN_POLICY 0x80
/* Connection modes */
@@ -833,11 +855,26 @@ struct hci_cp_set_event_flt {
#define HCI_CONN_SETUP_AUTO_OFF 0x01
#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_OP_READ_STORED_LINK_KEY 0x0c0d
+struct hci_cp_read_stored_link_key {
+ bdaddr_t bdaddr;
+ __u8 read_all;
+} __packed;
+struct hci_rp_read_stored_link_key {
+ __u8 status;
+ __u8 max_keys;
+ __u8 num_keys;
+} __packed;
+
#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
struct hci_cp_delete_stored_link_key {
bdaddr_t bdaddr;
__u8 delete_all;
} __packed;
+struct hci_rp_delete_stored_link_key {
+ __u8 status;
+ __u8 num_keys;
+} __packed;
#define HCI_MAX_NAME_LENGTH 248
@@ -1371,6 +1408,39 @@ struct hci_cp_le_conn_param_req_neg_reply {
__u8 reason;
} __packed;
+#define HCI_OP_LE_SET_DATA_LEN 0x2022
+struct hci_cp_le_set_data_len {
+ __le16 handle;
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+struct hci_rp_le_set_data_len {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_READ_DEF_DATA_LEN 0x2023
+struct hci_rp_le_read_def_data_len {
+ __u8 status;
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+
+#define HCI_OP_LE_WRITE_DEF_DATA_LEN 0x2024
+struct hci_cp_le_write_def_data_len {
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+
+#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
+struct hci_rp_le_read_max_data_len {
+ __u8 status;
+ __le16 tx_len;
+ __le16 tx_time;
+ __le16 rx_len;
+ __le16 rx_time;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -1796,6 +1866,15 @@ struct hci_ev_le_remote_conn_param_req {
__le16 timeout;
} __packed;
+#define HCI_EV_LE_DATA_LEN_CHANGE 0x07
+struct hci_ev_le_data_len_change {
+ __le16 handle;
+ __le16 tx_len;
+ __le16 tx_time;
+ __le16 rx_len;
+ __le16 rx_time;
+} __packed;
+
#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
struct hci_ev_le_direct_adv_info {
__u8 evt_type;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3c7827005c25..52863c3e0b13 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -79,6 +79,8 @@ struct discovery_state {
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
+ unsigned long scan_start;
+ unsigned long scan_duration;
};
struct hci_conn_hash {
@@ -145,6 +147,7 @@ struct oob_data {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 present;
u8 hash192[16];
u8 rand192[16];
u8 hash256[16];
@@ -205,6 +208,8 @@ struct hci_dev {
__u16 lmp_subver;
__u16 voice_setting;
__u8 num_iac;
+ __u8 stored_max_keys;
+ __u8 stored_num_keys;
__u8 io_capability;
__s8 inq_tx_power;
__u16 page_scan_interval;
@@ -220,10 +225,17 @@ struct hci_dev {
__u16 le_conn_max_interval;
__u16 le_conn_latency;
__u16 le_supv_timeout;
+ __u16 le_def_tx_len;
+ __u16 le_def_tx_time;
+ __u16 le_max_tx_len;
+ __u16 le_max_tx_time;
+ __u16 le_max_rx_len;
+ __u16 le_max_rx_time;
__u16 discov_interleaved_timeout;
__u16 conn_info_min_age;
__u16 conn_info_max_age;
__u8 ssp_debug_mode;
+ __u8 hw_error_code;
__u32 clock;
__u16 devid_source;
@@ -285,6 +297,7 @@ struct hci_dev {
struct work_struct power_on;
struct delayed_work power_off;
+ struct work_struct error_reset;
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -343,6 +356,7 @@ struct hci_dev {
unsigned long dev_flags;
struct delayed_work le_scan_disable;
+ struct delayed_work le_scan_restart;
__s8 adv_tx_power;
__u8 adv_data[HCI_MAX_AD_LENGTH];
@@ -361,6 +375,7 @@ struct hci_dev {
int (*setup)(struct hci_dev *hdev);
int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
+ void (*hw_error)(struct hci_dev *hdev, u8 code);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
};
@@ -434,6 +449,7 @@ struct hci_conn {
struct delayed_work le_conn_timeout;
struct device dev;
+ struct dentry *debugfs;
struct hci_dev *hdev;
void *l2cap_data;
@@ -518,6 +534,8 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
hdev->discovery.uuid_count = 0;
kfree(hdev->discovery.uuids);
hdev->discovery.uuids = NULL;
+ hdev->discovery.scan_start = 0;
+ hdev->discovery.scan_duration = 0;
}
bool hci_discovery_active(struct hci_dev *hdev);
@@ -772,7 +790,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
bool initiator);
-int hci_conn_change_link_key(struct hci_conn *conn);
int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
@@ -920,8 +937,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type);
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type);
-int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
- u8 auto_connect);
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
void hci_conn_params_clear_all(struct hci_dev *hdev);
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
@@ -930,8 +945,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr,
u8 addr_type);
-void hci_update_background_scan(struct hci_dev *hdev);
-
void hci_uuids_clear(struct hci_dev *hdev);
void hci_link_keys_clear(struct hci_dev *hdev);
@@ -1014,8 +1027,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-#define bredr_sc_enabled(dev) ((lmp_sc_capable(dev) || \
- test_bit(HCI_FORCE_SC, &(dev)->dbg_flags)) && \
+#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
/* ----- HCI protocols ----- */
@@ -1284,30 +1296,8 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-struct hci_request {
- struct hci_dev *hdev;
- struct sk_buff_head cmd_q;
-
- /* If something goes wrong when building the HCI request, the error
- * value is stored in this field.
- */
- int err;
-};
-
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
- const void *param);
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
- const void *param, u8 event);
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
bool hci_req_pending(struct hci_dev *hdev);
-void hci_req_add_le_scan_disable(struct hci_request *req);
-void hci_req_add_le_passive_scan(struct hci_request *req);
-
-void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req);
-
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1344,6 +1334,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
#define DISCOV_BREDR_INQUIRY_LEN 0x08
+#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_new_settings(struct hci_dev *hdev);
@@ -1388,7 +1379,6 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
void mgmt_auth_failed(struct hci_conn *conn, u8 status);
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
@@ -1417,8 +1407,6 @@ u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
__u8 ltk[16]);
-int hci_update_random_address(struct hci_request *req, bool require_privacy,
- u8 *own_addr_type);
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 *bdaddr_type);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index d1bb342d083f..2239a3753092 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -248,6 +248,7 @@ struct l2cap_conn_rsp {
#define L2CAP_PSM_SDP 0x0001
#define L2CAP_PSM_RFCOMM 0x0003
#define L2CAP_PSM_3DSP 0x0021
+#define L2CAP_PSM_IPSP 0x0023 /* 6LoWPAN */
/* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 95c34d5180fa..e218a30f2061 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -301,10 +301,6 @@ struct mgmt_cp_user_passkey_neg_reply {
#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0020
#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
struct mgmt_rp_read_local_oob_data {
- __u8 hash[16];
- __u8 rand[16];
-} __packed;
-struct mgmt_rp_read_local_oob_ext_data {
__u8 hash192[16];
__u8 rand192[16];
__u8 hash256[16];
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 578b83127af1..4190af53a46a 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -24,8 +24,6 @@
#ifndef __RFCOMM_H
#define __RFCOMM_H
-#define RFCOMM_PSM 3
-
#define RFCOMM_CONN_TIMEOUT (HZ * 30)
#define RFCOMM_DISC_TIMEOUT (HZ * 20)
#define RFCOMM_AUTH_TIMEOUT (HZ * 25)
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index e01d903633ef..f04cdbb7848e 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -274,7 +274,6 @@ void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
int __bond_3ad_get_active_agg_info(struct bonding *bond,
struct ad_info *ad_info);
-int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 983a94b86b95..fda6feeb6c1f 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -150,6 +150,12 @@ struct bond_parm_tbl {
int mode;
};
+struct netdev_notify_work {
+ struct delayed_work work;
+ struct net_device *dev;
+ struct netdev_bonding_info bonding_info;
+};
+
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -243,6 +249,8 @@ struct bonding {
#define bond_slave_get_rtnl(dev) \
((struct slave *) rtnl_dereference(dev->rx_handler_data))
+void bond_queue_slave_event(struct slave *slave);
+
struct bond_vlan_tag {
__be16 vlan_proto;
unsigned short vlan_id;
@@ -315,6 +323,7 @@ static inline void bond_set_active_slave(struct slave *slave)
{
if (slave->backup) {
slave->backup = 0;
+ bond_queue_slave_event(slave);
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -323,6 +332,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
{
if (!slave->backup) {
slave->backup = 1;
+ bond_queue_slave_event(slave);
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -336,6 +346,7 @@ static inline void bond_set_slave_state(struct slave *slave,
slave->backup = slave_state;
if (notify) {
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
+ bond_queue_slave_event(slave);
slave->should_notify = 0;
} else {
if (slave->should_notify)
@@ -490,6 +501,12 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
+static inline void bond_set_slave_link_state(struct slave *slave, int state)
+{
+ slave->link = state;
+ bond_queue_slave_event(slave);
+}
+
static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
{
struct in_device *in_dev;
@@ -525,6 +542,7 @@ void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
+int bond_set_carrier(struct bonding *bond);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 4ebb816241fa..64e09e1e8099 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -520,37 +520,41 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
* @SURVEY_INFO_IN_USE: channel is currently being used
- * @SURVEY_INFO_CHANNEL_TIME: channel active time (in ms) was filled in
- * @SURVEY_INFO_CHANNEL_TIME_BUSY: channel busy time was filled in
- * @SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: extension channel busy time was filled in
- * @SURVEY_INFO_CHANNEL_TIME_RX: channel receive time was filled in
- * @SURVEY_INFO_CHANNEL_TIME_TX: channel transmit time was filled in
+ * @SURVEY_INFO_TIME: active time (in ms) was filled in
+ * @SURVEY_INFO_TIME_BUSY: busy time was filled in
+ * @SURVEY_INFO_TIME_EXT_BUSY: extension channel busy time was filled in
+ * @SURVEY_INFO_TIME_RX: receive time was filled in
+ * @SURVEY_INFO_TIME_TX: transmit time was filled in
+ * @SURVEY_INFO_TIME_SCAN: scan time was filled in
*
* Used by the driver to indicate which info in &struct survey_info
* it has filled in during the get_survey().
*/
enum survey_info_flags {
- SURVEY_INFO_NOISE_DBM = 1<<0,
- SURVEY_INFO_IN_USE = 1<<1,
- SURVEY_INFO_CHANNEL_TIME = 1<<2,
- SURVEY_INFO_CHANNEL_TIME_BUSY = 1<<3,
- SURVEY_INFO_CHANNEL_TIME_EXT_BUSY = 1<<4,
- SURVEY_INFO_CHANNEL_TIME_RX = 1<<5,
- SURVEY_INFO_CHANNEL_TIME_TX = 1<<6,
+ SURVEY_INFO_NOISE_DBM = BIT(0),
+ SURVEY_INFO_IN_USE = BIT(1),
+ SURVEY_INFO_TIME = BIT(2),
+ SURVEY_INFO_TIME_BUSY = BIT(3),
+ SURVEY_INFO_TIME_EXT_BUSY = BIT(4),
+ SURVEY_INFO_TIME_RX = BIT(5),
+ SURVEY_INFO_TIME_TX = BIT(6),
+ SURVEY_INFO_TIME_SCAN = BIT(7),
};
/**
* struct survey_info - channel survey response
*
- * @channel: the channel this survey record reports, mandatory
+ * @channel: the channel this survey record reports, may be %NULL for a single
+ * record to report global statistics
* @filled: bitflag of flags from &enum survey_info_flags
* @noise: channel noise in dBm. This and all following fields are
* optional
- * @channel_time: amount of time in ms the radio spent on the channel
- * @channel_time_busy: amount of time the primary channel was sensed busy
- * @channel_time_ext_busy: amount of time the extension channel was sensed busy
- * @channel_time_rx: amount of time the radio spent receiving data
- * @channel_time_tx: amount of time the radio spent transmitting data
+ * @time: amount of time in ms the radio was turn on (on the channel)
+ * @time_busy: amount of time the primary channel was sensed busy
+ * @time_ext_busy: amount of time the extension channel was sensed busy
+ * @time_rx: amount of time the radio spent receiving data
+ * @time_tx: amount of time the radio spent transmitting data
+ * @time_scan: amount of time the radio spent for scanning
*
* Used by dump_survey() to report back per-channel survey information.
*
@@ -559,11 +563,12 @@ enum survey_info_flags {
*/
struct survey_info {
struct ieee80211_channel *channel;
- u64 channel_time;
- u64 channel_time_busy;
- u64 channel_time_ext_busy;
- u64 channel_time_rx;
- u64 channel_time_tx;
+ u64 time;
+ u64 time_busy;
+ u64 time_ext_busy;
+ u64 time_rx;
+ u64 time_tx;
+ u64 time_scan;
u32 filled;
s8 noise;
};
@@ -861,75 +866,6 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
enum cfg80211_station_type statype);
/**
- * enum station_info_flags - station information flags
- *
- * Used by the driver to indicate which info in &struct station_info
- * it has filled in during get_station() or dump_station().
- *
- * @STATION_INFO_INACTIVE_TIME: @inactive_time filled
- * @STATION_INFO_RX_BYTES: @rx_bytes filled
- * @STATION_INFO_TX_BYTES: @tx_bytes filled
- * @STATION_INFO_RX_BYTES64: @rx_bytes filled with 64-bit value
- * @STATION_INFO_TX_BYTES64: @tx_bytes filled with 64-bit value
- * @STATION_INFO_LLID: @llid filled
- * @STATION_INFO_PLID: @plid filled
- * @STATION_INFO_PLINK_STATE: @plink_state filled
- * @STATION_INFO_SIGNAL: @signal filled
- * @STATION_INFO_TX_BITRATE: @txrate fields are filled
- * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
- * @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value
- * @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value
- * @STATION_INFO_TX_RETRIES: @tx_retries filled
- * @STATION_INFO_TX_FAILED: @tx_failed filled
- * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled
- * @STATION_INFO_SIGNAL_AVG: @signal_avg filled
- * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
- * @STATION_INFO_BSS_PARAM: @bss_param filled
- * @STATION_INFO_CONNECTED_TIME: @connected_time filled
- * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
- * @STATION_INFO_STA_FLAGS: @sta_flags filled
- * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled
- * @STATION_INFO_T_OFFSET: @t_offset filled
- * @STATION_INFO_LOCAL_PM: @local_pm filled
- * @STATION_INFO_PEER_PM: @peer_pm filled
- * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled
- * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled
- * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled
- * @STATION_INFO_EXPECTED_THROUGHPUT: @expected_throughput filled
- */
-enum station_info_flags {
- STATION_INFO_INACTIVE_TIME = BIT(0),
- STATION_INFO_RX_BYTES = BIT(1),
- STATION_INFO_TX_BYTES = BIT(2),
- STATION_INFO_LLID = BIT(3),
- STATION_INFO_PLID = BIT(4),
- STATION_INFO_PLINK_STATE = BIT(5),
- STATION_INFO_SIGNAL = BIT(6),
- STATION_INFO_TX_BITRATE = BIT(7),
- STATION_INFO_RX_PACKETS = BIT(8),
- STATION_INFO_TX_PACKETS = BIT(9),
- STATION_INFO_TX_RETRIES = BIT(10),
- STATION_INFO_TX_FAILED = BIT(11),
- STATION_INFO_RX_DROP_MISC = BIT(12),
- STATION_INFO_SIGNAL_AVG = BIT(13),
- STATION_INFO_RX_BITRATE = BIT(14),
- STATION_INFO_BSS_PARAM = BIT(15),
- STATION_INFO_CONNECTED_TIME = BIT(16),
- STATION_INFO_ASSOC_REQ_IES = BIT(17),
- STATION_INFO_STA_FLAGS = BIT(18),
- STATION_INFO_BEACON_LOSS_COUNT = BIT(19),
- STATION_INFO_T_OFFSET = BIT(20),
- STATION_INFO_LOCAL_PM = BIT(21),
- STATION_INFO_PEER_PM = BIT(22),
- STATION_INFO_NONPEER_PM = BIT(23),
- STATION_INFO_RX_BYTES64 = BIT(24),
- STATION_INFO_TX_BYTES64 = BIT(25),
- STATION_INFO_CHAIN_SIGNAL = BIT(26),
- STATION_INFO_CHAIN_SIGNAL_AVG = BIT(27),
- STATION_INFO_EXPECTED_THROUGHPUT = BIT(28),
-};
-
-/**
* enum station_info_rate_flags - bitrate info flags
*
* Used by the driver to indicate the specific rate transmission
@@ -937,22 +873,35 @@ enum station_info_flags {
*
* @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
* @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
- * @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 MHz width transmission
- * @RATE_INFO_FLAGS_80_MHZ_WIDTH: 80 MHz width transmission
- * @RATE_INFO_FLAGS_80P80_MHZ_WIDTH: 80+80 MHz width transmission
- * @RATE_INFO_FLAGS_160_MHZ_WIDTH: 160 MHz width transmission
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
* @RATE_INFO_FLAGS_60G: 60GHz MCS
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
RATE_INFO_FLAGS_VHT_MCS = BIT(1),
- RATE_INFO_FLAGS_40_MHZ_WIDTH = BIT(2),
- RATE_INFO_FLAGS_80_MHZ_WIDTH = BIT(3),
- RATE_INFO_FLAGS_80P80_MHZ_WIDTH = BIT(4),
- RATE_INFO_FLAGS_160_MHZ_WIDTH = BIT(5),
- RATE_INFO_FLAGS_SHORT_GI = BIT(6),
- RATE_INFO_FLAGS_60G = BIT(7),
+ RATE_INFO_FLAGS_SHORT_GI = BIT(2),
+ RATE_INFO_FLAGS_60G = BIT(3),
+};
+
+/**
+ * enum rate_info_bw - rate bandwidth information
+ *
+ * Used by the driver to indicate the rate bandwidth.
+ *
+ * @RATE_INFO_BW_5: 5 MHz bandwidth
+ * @RATE_INFO_BW_10: 10 MHz bandwidth
+ * @RATE_INFO_BW_20: 20 MHz bandwidth
+ * @RATE_INFO_BW_40: 40 MHz bandwidth
+ * @RATE_INFO_BW_80: 80 MHz bandwidth
+ * @RATE_INFO_BW_160: 160 MHz bandwidth
+ */
+enum rate_info_bw {
+ RATE_INFO_BW_5,
+ RATE_INFO_BW_10,
+ RATE_INFO_BW_20,
+ RATE_INFO_BW_40,
+ RATE_INFO_BW_80,
+ RATE_INFO_BW_160,
};
/**
@@ -964,12 +913,14 @@ enum rate_info_flags {
* @mcs: mcs index if struct describes a 802.11n bitrate
* @legacy: bitrate in 100kbit/s for 802.11abg
* @nss: number of streams (VHT only)
+ * @bw: bandwidth (from &enum rate_info_bw)
*/
struct rate_info {
u8 flags;
u8 mcs;
u16 legacy;
u8 nss;
+ u8 bw;
};
/**
@@ -1003,6 +954,24 @@ struct sta_bss_parameters {
u16 beacon_interval;
};
+/**
+ * struct cfg80211_tid_stats - per-TID statistics
+ * @filled: bitmap of flags using the bits of &enum nl80211_tid_stats to
+ * indicate the relevant values in this struct are filled
+ * @rx_msdu: number of received MSDUs
+ * @tx_msdu: number of (attempted) transmitted MSDUs
+ * @tx_msdu_retries: number of retries (not counting the first) for
+ * transmitted MSDUs
+ * @tx_msdu_failed: number of failed transmitted MSDUs
+ */
+struct cfg80211_tid_stats {
+ u32 filled;
+ u64 rx_msdu;
+ u64 tx_msdu;
+ u64 tx_msdu_retries;
+ u64 tx_msdu_failed;
+};
+
#define IEEE80211_MAX_CHAINS 4
/**
@@ -1010,11 +979,12 @@ struct sta_bss_parameters {
*
* Station information filled by driver for get_station() and dump_station.
*
- * @filled: bitflag of flags from &enum station_info_flags
+ * @filled: bitflag of flags using the bits of &enum nl80211_sta_info to
+ * indicate the relevant values in this struct for them
* @connected_time: time(in secs) since a station is last connected
* @inactive_time: time since last station activity (tx/rx) in milliseconds
- * @rx_bytes: bytes received from this station
- * @tx_bytes: bytes transmitted to this station
+ * @rx_bytes: bytes (size of MPDUs) received from this station
+ * @tx_bytes: bytes (size of MPDUs) transmitted to this station
* @llid: mesh local link id
* @plid: mesh peer link id
* @plink_state: mesh peer link state
@@ -1027,10 +997,10 @@ struct sta_bss_parameters {
* @chain_signal_avg: per-chain signal strength average in dBm
* @txrate: current unicast bitrate from this station
* @rxrate: current unicast bitrate to this station
- * @rx_packets: packets received from this station
- * @tx_packets: packets transmitted to this station
- * @tx_retries: cumulative retry counts
- * @tx_failed: number of failed transmissions (retries exceeded, no ACK)
+ * @rx_packets: packets (MSDUs & MMPDUs) received from this station
+ * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this station
+ * @tx_retries: cumulative retry counts (MPDUs)
+ * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK)
* @rx_dropped_misc: Dropped for un-specified reason.
* @bss_param: current BSS parameters
* @generation: generation number for nl80211 dumps.
@@ -1050,6 +1020,11 @@ struct sta_bss_parameters {
* @nonpeer_pm: non-peer mesh STA power save mode
* @expected_throughput: expected throughput in kbps (including 802.11 headers)
* towards this station.
+ * @rx_beacon: number of beacons received from this peer
+ * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
+ * from this peer
+ * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
+ * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
*/
struct station_info {
u32 filled;
@@ -1090,10 +1065,9 @@ struct station_info {
u32 expected_throughput;
- /*
- * Note: Add a new enum station_info_flags value for each new field and
- * use it to check which fields are initialized.
- */
+ u64 rx_beacon;
+ u8 rx_beacon_signal_avg;
+ struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
};
/**
@@ -1516,6 +1490,13 @@ struct cfg80211_match_set {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @rcu_head: RCU callback used to free the struct
+ * @owner_nlportid: netlink portid of owner (if this should is a request
+ * owned by a particular socket)
+ * @delay: delay in seconds to use before starting the first scan
+ * cycle. The driver may ignore this parameter and start
+ * immediately (or at any other time), if this feature is not
+ * supported.
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1529,6 +1510,7 @@ struct cfg80211_sched_scan_request {
struct cfg80211_match_set *match_sets;
int n_match_sets;
s32 min_rssi_thold;
+ u32 delay;
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
@@ -1537,6 +1519,8 @@ struct cfg80211_sched_scan_request {
struct wiphy *wiphy;
struct net_device *dev;
unsigned long scan_start;
+ struct rcu_head rcu_head;
+ u32 owner_nlportid;
/* keep last */
struct ieee80211_channel *channels[0];
@@ -3011,6 +2995,8 @@ struct wiphy_vendor_command {
* @regulatory_flags: wiphy regulatory flags, see
* &enum ieee80211_regulatory_flags
* @features: features advertised to nl80211, see &enum nl80211_feature_flags.
+ * @ext_features: extended features advertised to nl80211, see
+ * &enum nl80211_ext_feature_index.
* @bss_priv_size: each BSS struct has private data allocated with it,
* this variable determines its size
* @max_scan_ssids: maximum number of SSIDs the device can scan for in
@@ -3120,6 +3106,7 @@ struct wiphy {
u16 max_acl_mac_addrs;
u32 flags, regulatory_flags, features;
+ u8 ext_features[DIV_ROUND_UP(NUM_NL80211_EXT_FEATURES, 8)];
u32 ap_sme_capa;
@@ -3808,6 +3795,34 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
+ * regulatory_set_wiphy_regd - set regdom info for self managed drivers
+ * @wiphy: the wireless device we want to process the regulatory domain on
+ * @rd: the regulatory domain informatoin to use for this wiphy
+ *
+ * Set the regulatory domain information for self-managed wiphys, only they
+ * may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more
+ * information.
+ *
+ * Return: 0 on success. -EINVAL, -EPERM
+ */
+int regulatory_set_wiphy_regd(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd);
+
+/**
+ * regulatory_set_wiphy_regd_sync_rtnl - set regdom for self-managed drivers
+ * @wiphy: the wireless device we want to process the regulatory domain on
+ * @rd: the regulatory domain information to use for this wiphy
+ *
+ * This functions requires the RTNL to be held and applies the new regdomain
+ * synchronously to this wiphy. For more details see
+ * regulatory_set_wiphy_regd().
+ *
+ * Return: 0 on success. -EINVAL, -EPERM
+ */
+int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd);
+
+/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
* @wiphy: the wireless device we want to process the regulatory domain on
* @regd: the custom regulatory domain to use for this wiphy
@@ -4565,13 +4580,27 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo, gfp_t gfp);
/**
+ * cfg80211_del_sta_sinfo - notify userspace about deletion of a station
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @sinfo: the station information/statistics
+ * @gfp: allocation flags
+ */
+void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp);
+
+/**
* cfg80211_del_sta - notify userspace about deletion of a station
*
* @dev: the netdev
* @mac_addr: the station's address
* @gfp: allocation flags
*/
-void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp);
+static inline void cfg80211_del_sta(struct net_device *dev,
+ const u8 *mac_addr, gfp_t gfp)
+{
+ cfg80211_del_sta_sinfo(dev, mac_addr, NULL, gfp);
+}
/**
* cfg80211_conn_failed - connection request failed notification
@@ -5033,6 +5062,42 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
*/
void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy);
+/**
+ * wiphy_ext_feature_set - set the extended feature flag
+ *
+ * @wiphy: the wiphy to modify.
+ * @ftidx: extended feature bit index.
+ *
+ * The extended features are flagged in multiple bytes (see
+ * &struct wiphy.@ext_features)
+ */
+static inline void wiphy_ext_feature_set(struct wiphy *wiphy,
+ enum nl80211_ext_feature_index ftidx)
+{
+ u8 *ft_byte;
+
+ ft_byte = &wiphy->ext_features[ftidx / 8];
+ *ft_byte |= BIT(ftidx % 8);
+}
+
+/**
+ * wiphy_ext_feature_isset - check the extended feature flag
+ *
+ * @wiphy: the wiphy to modify.
+ * @ftidx: extended feature bit index.
+ *
+ * The extended features are flagged in multiple bytes (see
+ * &struct wiphy.@ext_features)
+ */
+static inline bool
+wiphy_ext_feature_isset(struct wiphy *wiphy,
+ enum nl80211_ext_feature_index ftidx)
+{
+ u8 ft_byte;
+
+ ft_byte = wiphy->ext_features[ftidx / 8];
+ return (ft_byte & BIT(ftidx % 8)) != 0;
+}
/* ethtool helper */
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 7f713acfa106..eeda67652766 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -25,6 +25,7 @@
#include <net/nl802154.h>
struct wpan_phy;
+struct wpan_phy_cca;
struct cfg802154_ops {
struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
@@ -39,6 +40,8 @@ struct cfg802154_ops {
int (*del_virtual_intf)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev);
int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
+ int (*set_cca_mode)(struct wpan_phy *wpan_phy,
+ const struct wpan_phy_cca *cca);
int (*set_pan_id)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, __le16 pan_id);
int (*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -56,6 +59,11 @@ struct cfg802154_ops {
struct wpan_dev *wpan_dev, bool mode);
};
+struct wpan_phy_cca {
+ enum nl802154_cca_modes mode;
+ enum nl802154_cca_opts opt;
+};
+
struct wpan_phy {
struct mutex pib_lock;
@@ -76,7 +84,7 @@ struct wpan_phy {
u8 current_page;
u32 channels_supported[IEEE802154_MAX_PAGE + 1];
s8 transmit_power;
- u8 cca_mode;
+ struct wpan_phy_cca cca;
__le64 perm_extended_addr;
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index a6fd939f202d..3ebb168b9afc 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -121,13 +121,6 @@ extern int cipso_v4_rbm_strictvalid;
#endif
/*
- * Helper Functions
- */
-
-#define CIPSO_V4_OPTEXIST(x) (IPCB(x)->opt.cipso != 0)
-#define CIPSO_V4_OPTPTR(x) (skb_network_header(x) + IPCB(x)->opt.cipso)
-
-/*
* DOI List Functions
*/
@@ -190,7 +183,7 @@ static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
#ifdef CONFIG_NETLABEL
void cipso_v4_cache_invalidate(void);
-int cipso_v4_cache_add(const struct sk_buff *skb,
+int cipso_v4_cache_add(const unsigned char *cipso_ptr,
const struct netlbl_lsm_secattr *secattr);
#else
static inline void cipso_v4_cache_invalidate(void)
@@ -198,7 +191,7 @@ static inline void cipso_v4_cache_invalidate(void)
return;
}
-static inline int cipso_v4_cache_add(const struct sk_buff *skb,
+static inline int cipso_v4_cache_add(const unsigned char *cipso_ptr,
const struct netlbl_lsm_secattr *secattr)
{
return 0;
@@ -211,6 +204,8 @@ static inline int cipso_v4_cache_add(const struct sk_buff *skb,
#ifdef CONFIG_NETLABEL
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway);
+int cipso_v4_getattr(const unsigned char *cipso,
+ struct netlbl_lsm_secattr *secattr);
int cipso_v4_sock_setattr(struct sock *sk,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr);
@@ -226,6 +221,7 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
int cipso_v4_skbuff_delattr(struct sk_buff *skb);
int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
struct netlbl_lsm_secattr *secattr);
+unsigned char *cipso_v4_optptr(const struct sk_buff *skb);
int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option);
#else
static inline void cipso_v4_error(struct sk_buff *skb,
@@ -235,6 +231,12 @@ static inline void cipso_v4_error(struct sk_buff *skb,
return;
}
+static inline int cipso_v4_getattr(const unsigned char *cipso,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+
static inline int cipso_v4_sock_setattr(struct sock *sk,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr)
@@ -282,6 +284,11 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
return -ENOSYS;
}
+static inline unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
+{
+ return NULL;
+}
+
static inline int cipso_v4_validate(const struct sk_buff *skb,
unsigned char **option)
{
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 7ee2df083542..dc8fd81412bf 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -22,9 +22,9 @@ struct flow_keys {
__be32 ports;
__be16 port16[2];
};
- u16 thoff;
- u16 n_proto;
- u8 ip_proto;
+ u16 thoff;
+ __be16 n_proto;
+ u8 ip_proto;
};
bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 84125088c309..0574abd3db86 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -27,13 +27,18 @@ struct genl_info;
* @maxattr: maximum number of attributes supported
* @netnsok: set to true if the family can handle network
* namespaces and should be presented in all of them
+ * @parallel_ops: operations can be called in parallel and aren't
+ * synchronized by the core genetlink code
* @pre_doit: called before an operation's doit callback, it may
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
* @mcast_bind: a socket bound to the given multicast group (which
* is given as the offset into the groups array)
- * @mcast_unbind: a socket was unbound from the given multicast group
+ * @mcast_unbind: a socket was unbound from the given multicast group.
+ * Note that unbind() will not be called symmetrically if the
+ * generic netlink family is removed while there are still open
+ * sockets.
* @attrbuf: buffer to store parsed attributes
* @family_list: family list
* @mcgrps: multicast groups used by this family (private)
@@ -206,6 +211,23 @@ static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr,
}
/**
+ * genlmsg_parse - parse attributes of a genetlink message
+ * @nlh: netlink message header
+ * @family: genetlink message family
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * */
+static inline int genlmsg_parse(const struct nlmsghdr *nlh,
+ const struct genl_family *family,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy)
+{
+ return nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
+ policy);
+}
+
+/**
* genl_dump_check_consistent - check if sequence is consistent and advertise if not
* @cb: netlink callback structure that stores the sequence number
* @user_hdr: user header as returned from genlmsg_put()
@@ -245,9 +267,9 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb,
* @skb: socket buffer the message is stored in
* @hdr: user specific header
*/
-static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
+static inline void genlmsg_end(struct sk_buff *skb, void *hdr)
{
- return nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+ nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
}
/**
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 112132cf8e2e..14fb8d3390b4 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -68,13 +68,12 @@ struct geneve_sock;
typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
struct geneve_sock {
- struct hlist_node hlist;
+ struct list_head list;
geneve_rcv_t *rcv;
void *rcv_data;
- struct work_struct del_work;
struct socket *sock;
struct rcu_head rcu;
- atomic_t refcnt;
+ int refcnt;
struct udp_offload udp_offloads;
};
@@ -91,7 +90,7 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
__u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
__be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool xnet);
+ bool csum, bool xnet);
#endif /*ifdef CONFIG_INET */
#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 734d9b5f577a..0f712c0bc0bf 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -8,25 +8,23 @@
struct gro_cell {
struct sk_buff_head napi_skbs;
struct napi_struct napi;
-} ____cacheline_aligned_in_smp;
+};
struct gro_cells {
- unsigned int gro_cells_mask;
- struct gro_cell *cells;
+ struct gro_cell __percpu *cells;
};
static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
- struct gro_cell *cell = gcells->cells;
+ struct gro_cell *cell;
struct net_device *dev = skb->dev;
- if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
+ if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
netif_rx(skb);
return;
}
- if (skb_rx_queue_recorded(skb))
- cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
+ cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
atomic_long_inc(&dev->rx_dropped);
@@ -72,15 +70,12 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
{
int i;
- gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
- gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
- sizeof(struct gro_cell),
- GFP_KERNEL);
+ gcells->cells = alloc_percpu(struct gro_cell);
if (!gcells->cells)
return -ENOMEM;
- for (i = 0; i <= gcells->gro_cells_mask; i++) {
- struct gro_cell *cell = gcells->cells + i;
+ for_each_possible_cpu(i) {
+ struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
skb_queue_head_init(&cell->napi_skbs);
netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
@@ -91,16 +86,16 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
static inline void gro_cells_destroy(struct gro_cells *gcells)
{
- struct gro_cell *cell = gcells->cells;
int i;
- if (!cell)
+ if (!gcells->cells)
return;
- for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
+ for_each_possible_cpu(i) {
+ struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
netif_napi_del(&cell->napi);
skb_queue_purge(&cell->napi_skbs);
}
- kfree(gcells->cells);
+ free_percpu(gcells->cells);
gcells->cells = NULL;
}
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 83bb8a73d23c..94a297052442 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -28,6 +28,8 @@
#include <linux/skbuff.h>
#include <linux/ieee802154.h>
+#include <net/cfg802154.h>
+
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 level:3,
@@ -337,7 +339,7 @@ struct ieee802154_mac_params {
s8 frame_retries;
bool lbt;
- u8 cca_mode;
+ struct wpan_phy_cca cca;
s32 cca_ed_level;
};
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 848e85cb5c61..5976bdecf58b 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -98,7 +98,8 @@ struct inet_connection_sock {
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state;
+ __u8 icsk_ca_state:7,
+ icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
__u8 icsk_pending;
__u8 icsk_backoff;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a829b77523cf..eb16c7beed1e 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -16,7 +16,7 @@
#ifndef _INET_SOCK_H
#define _INET_SOCK_H
-
+#include <linux/bitops.h>
#include <linux/kmemcheck.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -184,6 +184,7 @@ struct inet_sock {
mc_all:1,
nodefrag:1;
__u8 rcv_tos;
+ __u8 convert_csum;
int uc_index;
int mc_index;
__be32 mc_addr;
@@ -194,6 +195,16 @@ struct inet_sock {
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
+/* cmsg flags for inet */
+#define IP_CMSG_PKTINFO BIT(0)
+#define IP_CMSG_TTL BIT(1)
+#define IP_CMSG_TOS BIT(2)
+#define IP_CMSG_RECVOPTS BIT(3)
+#define IP_CMSG_RETOPTS BIT(4)
+#define IP_CMSG_PASSSEC BIT(5)
+#define IP_CMSG_ORIGDSTADDR BIT(6)
+#define IP_CMSG_CHECKSUM BIT(7)
+
static inline struct inet_sock *inet_sk(const struct sock *sk)
{
return (struct inet_sock *)sk;
@@ -250,4 +261,20 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
return flags;
}
+static inline void inet_inc_convert_csum(struct sock *sk)
+{
+ inet_sk(sk)->convert_csum++;
+}
+
+static inline void inet_dec_convert_csum(struct sock *sk)
+{
+ if (inet_sk(sk)->convert_csum > 0)
+ inet_sk(sk)->convert_csum--;
+}
+
+static inline bool inet_get_convert_csum(struct sock *sk)
+{
+ return !!inet_sk(sk)->convert_csum;
+}
+
#endif /* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 0bb620702929..025c61c0dffb 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -39,11 +39,12 @@ struct inet_skb_parm {
struct ip_options opt; /* Compiled IP options */
unsigned char flags;
-#define IPSKB_FORWARDED 1
-#define IPSKB_XFRM_TUNNEL_SIZE 2
-#define IPSKB_XFRM_TRANSFORMED 4
-#define IPSKB_FRAG_COMPLETE 8
-#define IPSKB_REROUTED 16
+#define IPSKB_FORWARDED BIT(0)
+#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
+#define IPSKB_XFRM_TRANSFORMED BIT(2)
+#define IPSKB_FRAG_COMPLETE BIT(3)
+#define IPSKB_REROUTED BIT(4)
+#define IPSKB_DOREDIRECT BIT(5)
u16 frag_max_size;
};
@@ -180,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
@@ -537,7 +538,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
int ip_cmsg_send(struct net *net, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -557,6 +558,11 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);
+static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+{
+ ip_cmsg_recv_offset(msg, skb, 0);
+}
+
bool icmp_global_allow(void);
extern int sysctl_icmp_msgs_per_sec;
extern int sysctl_icmp_msgs_burst;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 8eea35d32a75..20e80fa7bbdd 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -74,6 +74,11 @@ struct fib6_node {
#define FIB6_SUBTREE(fn) ((fn)->subtree)
#endif
+struct mx6_config {
+ const u32 *mx;
+ DECLARE_BITMAP(mx_valid, RTAX_MAX);
+};
+
/*
* routing information
*
@@ -291,9 +296,8 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
void *arg);
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
- struct nlattr *mx, int mx_len);
-
+int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+ struct nl_info *info, struct mx6_config *mxc);
int fib6_del(struct rt6_info *rt, struct nl_info *info);
void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 9326c41c2d7f..76c091b53dae 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -70,6 +70,7 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
+struct net *ip6_tnl_get_link_net(const struct net_device *dev);
static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 09a819ee2151..5bd120e4bc0a 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -222,16 +222,19 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
struct fib_result *res)
{
- struct fib_table *table;
+ int err = -ENETUNREACH;
+
+ rcu_read_lock();
+
+ if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res,
+ FIB_LOOKUP_NOREF) ||
+ !fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res,
+ FIB_LOOKUP_NOREF))
+ err = 0;
- table = fib_get_table(net, RT_TABLE_LOCAL);
- if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF))
- return 0;
+ rcu_read_unlock();
- table = fib_get_table(net, RT_TABLE_MAIN);
- if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF))
- return 0;
- return -ENETUNREACH;
+ return err;
}
#else /* CONFIG_IP_MULTIPLE_TABLES */
@@ -247,20 +250,25 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
struct fib_result *res)
{
if (!net->ipv4.fib_has_custom_rules) {
+ int err = -ENETUNREACH;
+
+ rcu_read_lock();
+
res->tclassid = 0;
- if (net->ipv4.fib_local &&
- !fib_table_lookup(net->ipv4.fib_local, flp, res,
- FIB_LOOKUP_NOREF))
- return 0;
- if (net->ipv4.fib_main &&
- !fib_table_lookup(net->ipv4.fib_main, flp, res,
- FIB_LOOKUP_NOREF))
- return 0;
- if (net->ipv4.fib_default &&
- !fib_table_lookup(net->ipv4.fib_default, flp, res,
- FIB_LOOKUP_NOREF))
- return 0;
- return -ENETUNREACH;
+ if ((net->ipv4.fib_local &&
+ !fib_table_lookup(net->ipv4.fib_local, flp, res,
+ FIB_LOOKUP_NOREF)) ||
+ (net->ipv4.fib_main &&
+ !fib_table_lookup(net->ipv4.fib_main, flp, res,
+ FIB_LOOKUP_NOREF)) ||
+ (net->ipv4.fib_default &&
+ !fib_table_lookup(net->ipv4.fib_default, flp, res,
+ FIB_LOOKUP_NOREF)))
+ err = 0;
+
+ rcu_read_unlock();
+
+ return err;
}
return __fib_lookup(net, flp, res);
}
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 25a59eb388a6..2c47061a6954 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -97,7 +97,10 @@ struct ip_tunnel {
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
#define TUNNEL_OAM __cpu_to_be16(0x0200)
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
-#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800)
+#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
+#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
+
+#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
struct tnl_ptk_info {
__be16 flags;
@@ -138,6 +141,7 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
+struct net *ip_tunnel_get_link_net(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4292929392b0..4c9fe224d73b 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -671,6 +671,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
void ipv6_proxy_select_ident(struct sk_buff *skb);
int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -708,7 +709,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
__be32 flowlabel, bool autolabel)
{
if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
- __be32 hash;
+ u32 hash;
hash = skb_get_hash(skb);
@@ -718,7 +719,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
*/
hash ^= hash >> 12;
- flowlabel = hash & IPV6_FLOWLABEL_MASK;
+ flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
}
return flowlabel;
@@ -788,6 +789,25 @@ int ip6_push_pending_frames(struct sock *sk);
void ip6_flush_pending_frames(struct sock *sk);
+int ip6_send_skb(struct sk_buff *skb);
+
+struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
+ struct inet_cork_full *cork,
+ struct inet6_cork *v6_cork);
+struct sk_buff *ip6_make_skb(struct sock *sk,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ int hlimit, int tclass, struct ipv6_txoptions *opt,
+ struct flowi6 *fl6, struct rt6_info *rt,
+ unsigned int flags, int dontfrag);
+
+static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
+{
+ return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
+ &inet6_sk(sk)->cork);
+}
+
int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 29c7be8808d5..d52914b75331 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -376,6 +376,12 @@ enum ieee80211_rssi_event {
* @ssid_len: Length of SSID given in @ssid.
* @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
* @txpower: TX power in dBm
+ * @txpower_type: TX power adjustment used to control per packet Transmit
+ * Power Control (TPC) in lower driver for the current vif. In particular
+ * TPC is enabled if value passed in %txpower_type is
+ * NL80211_TX_POWER_LIMITED (allow using less than specified from
+ * userspace), whereas TPC is disabled if %txpower_type is set to
+ * NL80211_TX_POWER_FIXED (use value configured from userspace)
* @p2p_noa_attr: P2P NoA attribute for P2P powersave
*/
struct ieee80211_bss_conf {
@@ -411,6 +417,7 @@ struct ieee80211_bss_conf {
size_t ssid_len;
bool hidden_ssid;
int txpower;
+ enum nl80211_tx_power_setting txpower_type;
struct ieee80211_p2p_noa_attr p2p_noa_attr;
};
@@ -505,8 +512,11 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it
* would be fragmented by size (this is optional, only used for
* monitor injection).
- * @IEEE80211_TX_CTL_PS_RESPONSE: This frame is a response to a poll
- * frame (PS-Poll or uAPSD).
+ * @IEEE80211_TX_STAT_NOACK_TRANSMITTED: A frame that was marked with
+ * IEEE80211_TX_CTL_NO_ACK has been successfully transmitted without
+ * any errors (like issues specific to the driver/HW).
+ * This flag must not be set for frames that don't request no-ack
+ * behaviour with IEEE80211_TX_CTL_NO_ACK.
*
* Note: If you have to add new flags to the enumeration, then don't
* forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -542,7 +552,7 @@ enum mac80211_tx_info_flags {
IEEE80211_TX_STATUS_EOSP = BIT(28),
IEEE80211_TX_CTL_USE_MINRATE = BIT(29),
IEEE80211_TX_CTL_DONTFRAG = BIT(30),
- IEEE80211_TX_CTL_PS_RESPONSE = BIT(31),
+ IEEE80211_TX_STAT_NOACK_TRANSMITTED = BIT(31),
};
#define IEEE80211_TX_CTL_STBC_SHIFT 23
@@ -552,11 +562,14 @@ enum mac80211_tx_info_flags {
*
* @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control
* protocol frame (e.g. EAP)
+ * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
+ * frame (PS-Poll or uAPSD).
*
* These flags are used in tx_info->control.flags.
*/
enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
+ IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
};
/*
@@ -925,15 +938,13 @@ enum mac80211_rx_flags {
* These flags are used with the @vht_flag member of
* &struct ieee80211_rx_status.
* @RX_VHT_FLAG_80MHZ: 80 MHz was used
- * @RX_VHT_FLAG_80P80MHZ: 80+80 MHz was used
* @RX_VHT_FLAG_160MHZ: 160 MHz was used
* @RX_VHT_FLAG_BF: packet was beamformed
*/
enum mac80211_rx_vht_flags {
RX_VHT_FLAG_80MHZ = BIT(0),
- RX_VHT_FLAG_80P80MHZ = BIT(1),
- RX_VHT_FLAG_160MHZ = BIT(2),
- RX_VHT_FLAG_BF = BIT(3),
+ RX_VHT_FLAG_160MHZ = BIT(1),
+ RX_VHT_FLAG_BF = BIT(2),
};
/**
@@ -1181,10 +1192,15 @@ struct ieee80211_channel_switch {
* monitoring on this virtual interface -- i.e. it can monitor
* connection quality related parameters, such as the RSSI level and
* provide notifications if configured trigger levels are reached.
+ * @IEEE80211_VIF_SUPPORTS_UAPSD: The device can do U-APSD for this
+ * interface. This flag should be set during interface addition,
+ * but may be set/cleared as late as authentication to an AP. It is
+ * only valid for managed/station mode interfaces.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1),
+ IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
};
/**
@@ -1270,19 +1286,22 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
- * particular key.
+ * particular key. Setting this flag does not necessarily mean that SKBs
+ * will have sufficient tailroom for ICV or MIC.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
* @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
* that the key is pairwise rather then a shared key.
* @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
- * CCMP key if it requires CCMP encryption of management frames (MFP) to
- * be done in software.
+ * CCMP/GCMP key if it requires CCMP/GCMP encryption of management frames
+ * (MFP) to be done in software.
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
- * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
+ * not necessarily mean that SKBs will have sufficient tailroom for ICV or
+ * MIC.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames
@@ -1291,8 +1310,11 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* RX, if your crypto engine can't deal with TX you can also set the
* %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
* @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
- * driver for a CCMP key to indicate that is requires IV generation
+ * driver for a CCMP/GCMP key to indicate that is requires IV generation
* only for managment frames (MFP).
+ * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
+ * driver for a key to indicate that sufficient tailroom must always
+ * be reserved for ICV or MIC, even when HW encryption is enabled.
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -1302,6 +1324,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(4),
IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5),
IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7),
};
/**
@@ -1580,11 +1603,6 @@ struct ieee80211_tx_control {
* @IEEE80211_HW_MFP_CAPABLE:
* Hardware supports management frame protection (MFP, IEEE 802.11w).
*
- * @IEEE80211_HW_SUPPORTS_UAPSD:
- * Hardware supports Unscheduled Automatic Power Save Delivery
- * (U-APSD) in managed mode. The mode is configured with
- * conf_tx() operation.
- *
* @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
* Hardware can provide ack status reports of Tx frames to
* the stack.
@@ -1623,6 +1641,12 @@ struct ieee80211_tx_control {
* be created. It is expected user-space will create vifs as
* desired (and thus have them named as desired).
*
+ * @IEEE80211_HW_SW_CRYPTO_CONTROL: The driver wants to control which of the
+ * crypto algorithms can be done in software - so don't automatically
+ * try to fall back to it if hardware crypto fails, but do so only if
+ * the driver returns 1. This also forces the driver to advertise its
+ * supported cipher suites.
+ *
* @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
* queue mapping in order to use different queues (not just one per AC)
* for different virtual interfaces. See the doc section on HW queue
@@ -1670,8 +1694,8 @@ enum ieee80211_hw_flags {
IEEE80211_HW_MFP_CAPABLE = 1<<13,
IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
IEEE80211_HW_NO_AUTO_VIF = 1<<15,
- /* free slot */
- IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
+ IEEE80211_HW_SW_CRYPTO_CONTROL = 1<<16,
+ /* free slots */
IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
IEEE80211_HW_QUEUE_CONTROL = 1<<20,
@@ -1945,6 +1969,11 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* added; if you return 0 then hw_key_idx must be assigned to the
* hardware key index, you are free to use the full u8 range.
*
+ * Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is
+ * set, mac80211 will not automatically fall back to software crypto if
+ * enabling hardware crypto failed. The set_key() call may also return the
+ * value 1 to permit this specific key/algorithm to be done in software.
+ *
* When the cmd is %DISABLE_KEY then it must succeed.
*
* Note that it is permissible to not decrypt a frame even if a key
@@ -2023,7 +2052,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* enabled whenever user has enabled powersave.
*
* Driver informs U-APSD client support by enabling
- * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
+ * %IEEE80211_VIF_SUPPORTS_UAPSD flag. The mode is configured through the
* uapsd parameter in conf_tx() operation. Hardware needs to send the QoS
* Nullfunc frames and stay awake until the service period has ended. To
* utilize U-APSD, dynamic powersave is disabled for voip AC and all frames
@@ -2696,6 +2725,14 @@ enum ieee80211_reconfig_type {
* is only used if the configured rate control algorithm actually uses
* the new rate table API, and is therefore optional. Must be atomic.
*
+ * @sta_statistics: Get statistics for this station. For example with beacon
+ * filtering, the statistics kept by mac80211 might not be accurate, so
+ * let the driver pre-fill the statistics. The driver can fill most of
+ * the values (indicating which by setting the filled bitmap), but not
+ * all of them make sense - see the source for which ones are possible.
+ * Statistics that the driver doesn't fill will be filled by mac80211.
+ * The callback can sleep.
+ *
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
* Returns a negative error code on failure.
@@ -2856,9 +2893,6 @@ enum ieee80211_reconfig_type {
* @get_et_strings: Ethtool API to get a set of strings to describe stats
* and perhaps other supported types of ethtool data-sets.
*
- * @get_rssi: Get current signal strength in dBm, the function is optional
- * and can sleep.
- *
* @mgd_prepare_tx: Prepare for transmitting a management frame for association
* before associated. In multi-channel scenarios, a virtual interface is
* bound to a channel before it is associated, but as it isn't associated
@@ -3059,6 +3093,10 @@ struct ieee80211_ops {
void (*sta_rate_tbl_update)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+ void (*sta_statistics)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
int (*conf_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params);
@@ -3126,8 +3164,6 @@ struct ieee80211_ops {
void (*get_et_strings)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 sset, u8 *data);
- int (*get_rssi)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, s8 *rssi_dbm);
void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@@ -4062,6 +4098,10 @@ void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
* reverse order than in packet)
* @aes_cmac: PN data, most significant byte first (big endian,
* reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
*/
struct ieee80211_key_seq {
union {
@@ -4075,6 +4115,12 @@ struct ieee80211_key_seq {
struct {
u8 pn[6];
} aes_cmac;
+ struct {
+ u8 pn[6];
+ } aes_gmac;
+ struct {
+ u8 pn[6];
+ } gcmp;
};
};
@@ -4099,7 +4145,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
* ieee80211_get_key_rx_seq - get key RX sequence counter
*
* @keyconf: the parameter passed with the set key
- * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
* the value on TID 0 is also used for non-QoS frames. For
* CMAC, only TID 0 is valid.
* @seq: buffer to receive the sequence data
@@ -4135,7 +4181,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
* ieee80211_set_key_rx_seq - set key RX sequence counter
*
* @keyconf: the parameter passed with the set key
- * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
* the value on TID 0 is also used for non-QoS frames. For
* CMAC, only TID 0 is valid.
* @seq: new sequence data
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index c823d910b46c..850647811749 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -20,6 +20,8 @@
#include <linux/ieee802154.h>
#include <linux/skbuff.h>
+#include <net/cfg802154.h>
+
/* General MAC frame format:
* 2 bytes: Frame Control
* 1 byte: Sequence Number
@@ -212,7 +214,8 @@ struct ieee802154_ops {
unsigned long changed);
int (*set_txpower)(struct ieee802154_hw *hw, int db);
int (*set_lbt)(struct ieee802154_hw *hw, bool on);
- int (*set_cca_mode)(struct ieee802154_hw *hw, u8 mode);
+ int (*set_cca_mode)(struct ieee802154_hw *hw,
+ const struct wpan_phy_cca *cca);
int (*set_cca_ed_level)(struct ieee802154_hw *hw,
s32 level);
int (*set_csma_params)(struct ieee802154_hw *hw,
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2e8756b8c775..36faf4990c4b 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -60,6 +60,7 @@ struct net {
struct list_head exit_list; /* Use only net_mutex */
struct user_namespace *user_ns; /* Owning user namespace */
+ struct idr netns_ids;
struct ns_common ns;
@@ -290,6 +291,9 @@ static inline struct net *read_pnet(struct net * const *pnet)
#define __net_initconst __initconst
#endif
+int peernet2id(struct net *net, struct net *peer);
+struct net *get_net_ns_by_id(struct net *net, int id);
+
struct pernet_operations {
struct list_head list;
int (*init)(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index f0daed2b54d1..74f271a172dd 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -191,8 +191,6 @@ __nf_conntrack_find(struct net *net, u16 zone,
int nf_conntrack_hash_check_insert(struct nf_conn *ct);
bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
-void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
-
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num, struct nf_conntrack_tuple *tuple);
bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3ae969e3acf0..9eaaa7884586 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -530,6 +530,8 @@ enum nft_chain_type {
int nft_chain_validate_dependency(const struct nft_chain *chain,
enum nft_chain_type type);
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+ unsigned int hook_flags);
struct nft_stats {
u64 bytes;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 64158353ecb2..e010ee8da41d 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -490,14 +490,10 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
* Corrects the netlink message header to include the appeneded
* attributes. Only necessary if attributes have been added to
* the message.
- *
- * Returns the total data length of the skb.
*/
-static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
+static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
{
nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
-
- return skb->len;
}
/**
@@ -520,8 +516,10 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
*/
static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
{
- if (mark)
+ if (mark) {
+ WARN_ON((unsigned char *) mark < skb->data);
skb_trim(skb, (unsigned char *) mark - skb->data);
+ }
}
/**
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 24945cefc4fd..dbe225478adb 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -48,10 +48,12 @@ struct netns_ipv4 {
struct hlist_head *fib_table_hash;
struct sock *fibnl;
- struct sock **icmp_sk;
+ struct sock * __percpu *icmp_sk;
+
struct inet_peer_base *peers;
struct tcpm_hash_bucket *tcp_metrics_hash;
unsigned int tcp_metrics_hash_log;
+ struct sock * __percpu *tcp_sk;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
struct xt_table *iptable_filter;
@@ -80,6 +82,8 @@ struct netns_ipv4 {
int sysctl_fwmark_reflect;
int sysctl_tcp_fwmark_accept;
+ int sysctl_tcp_mtu_probing;
+ int sysctl_tcp_base_mss;
struct ping_group_range ping_group_range;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 14bd0e1c47fa..ab672b537dd4 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -51,8 +51,10 @@ struct nfc_hci_ops {
int (*tm_send)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
int (*check_presence)(struct nfc_hci_dev *hdev,
struct nfc_target *target);
- int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+ int (*event_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
struct sk_buff *skb);
+ void (*cmd_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
int (*discover_se)(struct nfc_hci_dev *dev);
int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
@@ -63,8 +65,10 @@ struct nfc_hci_ops {
};
/* Pipes */
-#define NFC_HCI_INVALID_PIPE 0x80
#define NFC_HCI_DO_NOT_CREATE_PIPE 0x81
+#define NFC_HCI_INVALID_PIPE 0x80
+#define NFC_HCI_INVALID_GATE 0xFF
+#define NFC_HCI_INVALID_HOST 0x80
#define NFC_HCI_LINK_MGMT_PIPE 0x00
#define NFC_HCI_ADMIN_PIPE 0x01
@@ -73,7 +77,13 @@ struct nfc_hci_gate {
u8 pipe;
};
+struct nfc_hci_pipe {
+ u8 gate;
+ u8 dest_host;
+};
+
#define NFC_HCI_MAX_CUSTOM_GATES 50
+#define NFC_HCI_MAX_PIPES 127
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
@@ -125,6 +135,7 @@ struct nfc_hci_dev {
void *clientdata;
u8 gate2pipe[NFC_HCI_MAX_GATES];
+ struct nfc_hci_pipe pipes[NFC_HCI_MAX_PIPES];
u8 sw_romlib;
u8 sw_patch;
@@ -167,6 +178,8 @@ void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
int nfc_hci_result_to_errno(u8 result);
+void nfc_hci_reset_pipes(struct nfc_hci_dev *dev);
+void nfc_hci_reset_pipes_per_host(struct nfc_hci_dev *hdev, u8 host);
/* Host IDs */
#define NFC_HCI_HOST_CONTROLLER_ID 0x00
@@ -219,6 +232,12 @@ int nfc_hci_result_to_errno(u8 result);
#define NFC_HCI_EVT_POST_DATA 0x02
#define NFC_HCI_EVT_HOT_PLUG 0x03
+/* Generic commands */
+#define NFC_HCI_ANY_SET_PARAMETER 0x01
+#define NFC_HCI_ANY_GET_PARAMETER 0x02
+#define NFC_HCI_ANY_OPEN_PIPE 0x03
+#define NFC_HCI_ANY_CLOSE_PIPE 0x04
+
/* Reader RF gates events */
#define NFC_HCI_EVT_READER_REQUESTED 0x10
#define NFC_HCI_EVT_END_OPERATION 0x11
@@ -249,8 +268,6 @@ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
const u8 *param, size_t param_len,
data_exchange_cb_t cb, void *cb_context);
-int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
- const u8 *param, size_t param_len);
int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
const u8 *param, size_t param_len);
int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate);
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index e7257a4653b4..a2f2f3d3196d 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -62,6 +62,25 @@
#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2
#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3
+/* NFCEE Interface/Protocols */
+#define NCI_NFCEE_INTERFACE_APDU 0x00
+#define NCI_NFCEE_INTERFACE_HCI_ACCESS 0x01
+#define NCI_NFCEE_INTERFACE_TYPE3_CMD_SET 0x02
+#define NCI_NFCEE_INTERFACE_TRANSPARENT 0x03
+
+/* Destination type */
+#define NCI_DESTINATION_NFCC_LOOPBACK 0x01
+#define NCI_DESTINATION_REMOTE_NFC_ENDPOINT 0x02
+#define NCI_DESTINATION_NFCEE 0x03
+
+/* Destination-specific parameters type */
+#define NCI_DESTINATION_SPECIFIC_PARAM_RF_TYPE 0x00
+#define NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE 0x01
+
+/* NFCEE Discovery Action */
+#define NCI_NFCEE_DISCOVERY_ACTION_DISABLE 0x00
+#define NCI_NFCEE_DISCOVERY_ACTION_ENABLE 0x01
+
/* NCI RF Technology and Mode */
#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00
#define NCI_NFC_B_PASSIVE_POLL_MODE 0x01
@@ -224,6 +243,28 @@ struct nci_core_set_config_cmd {
struct set_config_param param; /* support 1 param per cmd is enough */
} __packed;
+#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04)
+#define DEST_SPEC_PARAMS_ID_INDEX 0
+#define DEST_SPEC_PARAMS_PROTOCOL_INDEX 1
+struct dest_spec_params {
+ __u8 id;
+ __u8 protocol;
+} __packed;
+
+struct core_conn_create_dest_spec_params {
+ __u8 type;
+ __u8 length;
+ __u8 value[0];
+} __packed;
+
+struct nci_core_conn_create_cmd {
+ __u8 destination_type;
+ __u8 number_destination_params;
+ struct core_conn_create_dest_spec_params params[0];
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x05)
+
#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
struct disc_map_config {
__u8 rf_protocol;
@@ -260,6 +301,19 @@ struct nci_rf_deactivate_cmd {
__u8 type;
} __packed;
+#define NCI_OP_NFCEE_DISCOVER_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_discover_cmd {
+ __u8 discovery_action;
+} __packed;
+
+#define NCI_OP_NFCEE_MODE_SET_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
+#define NCI_NFCEE_DISABLE 0x00
+#define NCI_NFCEE_ENABLE 0x01
+struct nci_nfcee_mode_set_cmd {
+ __u8 nfcee_id;
+ __u8 nfcee_mode;
+} __packed;
+
/* ----------------------- */
/* ---- NCI Responses ---- */
/* ----------------------- */
@@ -295,6 +349,16 @@ struct nci_core_set_config_rsp {
__u8 params_id[0]; /* variable size array */
} __packed;
+#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
+struct nci_core_conn_create_rsp {
+ __u8 status;
+ __u8 max_ctrl_pkt_payload_len;
+ __u8 credits_cnt;
+ __u8 conn_id;
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x05)
+
#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -303,6 +367,13 @@ struct nci_core_set_config_rsp {
#define NCI_OP_RF_DEACTIVATE_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+#define NCI_OP_NFCEE_DISCOVER_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_discover_rsp {
+ __u8 status;
+ __u8 num_nfcee;
+} __packed;
+
+#define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
/* --------------------------- */
/* ---- NCI Notifications ---- */
/* --------------------------- */
@@ -430,4 +501,30 @@ struct nci_rf_deactivate_ntf {
__u8 reason;
} __packed;
+#define NCI_OP_RF_NFCEE_ACTION_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x09)
+struct nci_rf_nfcee_action_ntf {
+ __u8 nfcee_id;
+ __u8 trigger;
+ __u8 supported_data_length;
+ __u8 supported_data[0];
+} __packed;
+
+#define NCI_OP_NFCEE_DISCOVER_NTF nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_supported_protocol {
+ __u8 num_protocol;
+ __u8 supported_protocol[0];
+} __packed;
+
+struct nci_nfcee_information_tlv {
+ __u8 num_tlv;
+ __u8 information_tlv[0];
+} __packed;
+
+struct nci_nfcee_discover_ntf {
+ __u8 nfcee_id;
+ __u8 nfcee_status;
+ struct nci_nfcee_supported_protocol supported_protocols;
+ struct nci_nfcee_information_tlv information_tlv;
+} __packed;
+
#endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 9e51bb4d841e..ff87f8611fa3 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -78,15 +78,107 @@ struct nci_ops {
int (*se_io)(struct nci_dev *ndev, u32 se_idx,
u8 *apdu, size_t apdu_length,
se_io_cb_t cb, void *cb_context);
+ int (*hci_load_session)(struct nci_dev *ndev);
+ void (*hci_event_received)(struct nci_dev *ndev, u8 pipe, u8 event,
+ struct sk_buff *skb);
+ void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
};
#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
#define NCI_MAX_DISCOVERED_TARGETS 10
+#define NCI_MAX_NUM_NFCEE 255
+#define NCI_MAX_CONN_ID 7
+
+struct nci_conn_info {
+ struct list_head list;
+ __u8 id; /* can be an RF Discovery ID or an NFCEE ID */
+ __u8 conn_id;
+ __u8 max_pkt_payload_len;
+
+ atomic_t credits_cnt;
+ __u8 initial_num_credits;
+
+ data_exchange_cb_t data_exchange_cb;
+ void *data_exchange_cb_context;
+
+ struct sk_buff *rx_skb;
+};
+
+#define NCI_INVALID_CONN_ID 0x80
+
+#define NCI_HCI_ANY_OPEN_PIPE 0x03
+
+/* Gates */
+#define NCI_HCI_ADMIN_GATE 0x00
+#define NCI_HCI_LINK_MGMT_GATE 0x06
+
+/* Pipes */
+#define NCI_HCI_LINK_MGMT_PIPE 0x00
+#define NCI_HCI_ADMIN_PIPE 0x01
+
+/* Generic responses */
+#define NCI_HCI_ANY_OK 0x00
+#define NCI_HCI_ANY_E_NOT_CONNECTED 0x01
+#define NCI_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02
+#define NCI_HCI_ANY_E_NOK 0x03
+#define NCI_HCI_ANY_E_PIPES_FULL 0x04
+#define NCI_HCI_ANY_E_REG_PAR_UNKNOWN 0x05
+#define NCI_HCI_ANY_E_PIPE_NOT_OPENED 0x06
+#define NCI_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07
+#define NCI_HCI_ANY_E_INHIBITED 0x08
+#define NCI_HCI_ANY_E_TIMEOUT 0x09
+#define NCI_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
+#define NCI_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
+
+#define NCI_HCI_DO_NOT_OPEN_PIPE 0x81
+#define NCI_HCI_INVALID_PIPE 0x80
+#define NCI_HCI_INVALID_GATE 0xFF
+#define NCI_HCI_INVALID_HOST 0x80
+
+#define NCI_HCI_MAX_CUSTOM_GATES 50
+#define NCI_HCI_MAX_PIPES 127
+
+struct nci_hci_gate {
+ u8 gate;
+ u8 pipe;
+ u8 dest_host;
+} __packed;
+
+struct nci_hci_pipe {
+ u8 gate;
+ u8 host;
+} __packed;
+
+struct nci_hci_init_data {
+ u8 gate_count;
+ struct nci_hci_gate gates[NCI_HCI_MAX_CUSTOM_GATES];
+ char session_id[9];
+};
+
+#define NCI_HCI_MAX_GATES 256
+
+struct nci_hci_dev {
+ u8 nfcee_id;
+ struct nci_dev *ndev;
+ struct nci_conn_info *conn_info;
+
+ struct nci_hci_init_data init_data;
+ struct nci_hci_pipe pipes[NCI_HCI_MAX_PIPES];
+ u8 gate2pipe[NCI_HCI_MAX_GATES];
+ int expected_pipes;
+ int count_pipes;
+
+ struct sk_buff_head rx_hcp_frags;
+ struct work_struct msg_rx_work;
+ struct sk_buff_head msg_rx_queue;
+};
/* NCI Core structures */
struct nci_dev {
struct nfc_dev *nfc_dev;
struct nci_ops *ops;
+ struct nci_hci_dev *hci_dev;
int tx_headroom;
int tx_tailroom;
@@ -95,7 +187,10 @@ struct nci_dev {
unsigned long flags;
atomic_t cmd_cnt;
- atomic_t credits_cnt;
+ __u8 cur_conn_id;
+
+ struct list_head conn_info_list;
+ struct nci_conn_info *rf_conn_info;
struct timer_list cmd_timer;
struct timer_list data_timer;
@@ -141,13 +236,10 @@ struct nci_dev {
__u8 manufact_id;
__u32 manufact_specific_info;
- /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */
- __u8 max_data_pkt_payload_size;
- __u8 initial_num_credits;
+ /* Save RF Discovery ID or NFCEE ID under conn_create */
+ __u8 cur_id;
/* stored during nci_data_exchange */
- data_exchange_cb_t data_exchange_cb;
- void *data_exchange_cb_context;
struct sk_buff *rx_data_reassembly;
/* stored during intf_activated_ntf */
@@ -163,9 +255,36 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
void nci_free_device(struct nci_dev *ndev);
int nci_register_device(struct nci_dev *ndev);
void nci_unregister_device(struct nci_dev *ndev);
+int nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev,
+ unsigned long opt),
+ unsigned long opt, __u32 timeout);
int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
+int nci_nfcee_discover(struct nci_dev *ndev, u8 action);
+int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode);
+int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
+ u8 number_destination_params,
+ size_t params_len,
+ struct core_conn_create_dest_spec_params *params);
+int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
+
+struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
+ const u8 *param, size_t param_len);
+int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
+ u8 cmd, const u8 *param, size_t param_len,
+ struct sk_buff **skb);
+int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe);
+int nci_hci_connect_gate(struct nci_dev *ndev, u8 dest_host,
+ u8 dest_gate, u8 pipe);
+int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ const u8 *param, size_t param_len);
+int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ struct sk_buff **skb);
+int nci_hci_dev_session_init(struct nci_dev *ndev);
+
static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
unsigned int len,
gfp_t how)
@@ -200,7 +319,9 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
- int err);
+ __u8 conn_id, int err);
+void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err);
+
void nci_clear_target_list(struct nci_dev *ndev);
/* ----- NCI requests ----- */
@@ -209,6 +330,8 @@ void nci_clear_target_list(struct nci_dev *ndev);
#define NCI_REQ_CANCELED 2
void nci_req_complete(struct nci_dev *ndev, int result);
+struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
+ int conn_id);
/* ----- NCI status code ----- */
int nci_to_errno(__u8 code);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 12adb817c27a..73190e65d5c1 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -135,6 +135,31 @@ struct nfc_se {
u16 state;
};
+/**
+ * nfc_evt_transaction - A struct for NFC secure element event transaction.
+ *
+ * @aid: The application identifier triggering the event
+ *
+ * @aid_len: The application identifier length [5:16]
+ *
+ * @params: The application parameters transmitted during the transaction
+ *
+ * @params_len: The applications parameters length [0:255]
+ *
+ */
+#define NFC_MIN_AID_LENGTH 5
+#define NFC_MAX_AID_LENGTH 16
+#define NFC_MAX_PARAMS_LENGTH 255
+
+#define NFC_EVT_TRANSACTION_AID_TAG 0x81
+#define NFC_EVT_TRANSACTION_PARAMS_TAG 0x82
+struct nfc_evt_transaction {
+ u32 aid_len;
+ u8 aid[NFC_MAX_AID_LENGTH];
+ u8 params_len;
+ u8 params[NFC_MAX_PARAMS_LENGTH];
+} __packed;
+
struct nfc_genl_data {
u32 poll_req_portid;
struct mutex genl_data_mutex;
@@ -262,6 +287,8 @@ int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
void nfc_driver_failure(struct nfc_dev *dev, int err);
+int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx,
+ struct nfc_evt_transaction *evt_transaction);
int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 6dbd406ca41b..f8b5bc997959 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -82,7 +82,7 @@ enum nl802154_attrs {
NL802154_ATTR_TX_POWER,
NL802154_ATTR_CCA_MODE,
- NL802154_ATTR_CCA_MODE3_AND,
+ NL802154_ATTR_CCA_OPT,
NL802154_ATTR_CCA_ED_LEVEL,
NL802154_ATTR_MAX_FRAME_RETRIES,
@@ -119,4 +119,47 @@ enum nl802154_iftype {
NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1
};
+/**
+ * enum nl802154_cca_modes - cca modes
+ *
+ * @__NL802154_CCA_INVALID: cca mode number 0 is reserved
+ * @NL802154_CCA_ENERGY: Energy above threshold
+ * @NL802154_CCA_CARRIER: Carrier sense only
+ * @NL802154_CCA_ENERGY_CARRIER: Carrier sense with energy above threshold
+ * @NL802154_CCA_ALOHA: CCA shall always report an idle medium
+ * @NL802154_CCA_UWB_SHR: UWB preamble sense based on the SHR of a frame
+ * @NL802154_CCA_UWB_MULTIPEXED: UWB preamble sense based on the packet with
+ * the multiplexed preamble
+ * @__NL802154_CCA_ATTR_AFTER_LAST: Internal
+ * @NL802154_CCA_ATTR_MAX: Maximum CCA attribute number
+ */
+enum nl802154_cca_modes {
+ __NL802154_CCA_INVALID,
+ NL802154_CCA_ENERGY,
+ NL802154_CCA_CARRIER,
+ NL802154_CCA_ENERGY_CARRIER,
+ NL802154_CCA_ALOHA,
+ NL802154_CCA_UWB_SHR,
+ NL802154_CCA_UWB_MULTIPEXED,
+
+ /* keep last */
+ __NL802154_CCA_ATTR_AFTER_LAST,
+ NL802154_CCA_ATTR_MAX = __NL802154_CCA_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl802154_cca_opts - additional options for cca modes
+ *
+ * @NL802154_CCA_OPT_ENERGY_CARRIER_OR: NL802154_CCA_ENERGY_CARRIER with OR
+ * @NL802154_CCA_OPT_ENERGY_CARRIER_AND: NL802154_CCA_ENERGY_CARRIER with AND
+ */
+enum nl802154_cca_opts {
+ NL802154_CCA_OPT_ENERGY_CARRIER_AND,
+ NL802154_CCA_OPT_ENERGY_CARRIER_OR,
+
+ /* keep last */
+ __NL802154_CCA_OPT_ATTR_AFTER_LAST,
+ NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
+};
+
#endif /* __NL802154_H */
diff --git a/include/net/ping.h b/include/net/ping.h
index f074060bc5de..cc16d413f681 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -59,7 +59,7 @@ extern struct pingv6_ops pingv6_ops;
struct pingfakehdr {
struct icmphdr icmph;
- struct iovec *iov;
+ struct msghdr *msg;
sa_family_t family;
__wsum wcheck;
};
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 27a33833ff4a..2342bf12cb78 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -3,6 +3,7 @@
#include <linux/jiffies.h>
#include <linux/ktime.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
struct qdisc_walker {
@@ -114,6 +115,17 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res);
+static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
+{
+ /* We need to take extra care in case the skb came via
+ * vlan accelerated path. In that case, use skb->vlan_proto
+ * as the original vlan header was already stripped.
+ */
+ if (skb_vlan_tag_present(skb))
+ return skb->vlan_proto;
+ return skb->protocol;
+}
+
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index b776d72d84be..ebc5a2ed8631 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -147,6 +147,24 @@ struct regulatory_request {
* NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
* NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
* includes any modes unsupported for enforcement checking.
+ * @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific
+ * regdom management. These devices will ignore all regdom changes not
+ * originating from their own wiphy.
+ * A self-managed wiphys only employs regulatory information obtained from
+ * the FW and driver and does not use other cfg80211 sources like
+ * beacon-hints, country-code IEs and hints from other devices on the same
+ * system. Conversely, a self-managed wiphy does not share its regulatory
+ * hints with other devices in the system. If a system contains several
+ * devices, one or more of which are self-managed, there might be
+ * contradictory regulatory settings between them. Usage of flag is
+ * generally discouraged. Only use it if the FW/driver is incompatible
+ * with non-locally originated hints.
+ * This flag is incompatible with the flags: %REGULATORY_CUSTOM_REG,
+ * %REGULATORY_STRICT_REG, %REGULATORY_COUNTRY_IE_FOLLOW_POWER,
+ * %REGULATORY_COUNTRY_IE_IGNORE and %REGULATORY_DISABLE_BEACON_HINTS.
+ * Mixing any of the above flags with this flag will result in a failure
+ * to register the wiphy. This flag implies
+ * %REGULATORY_DISABLE_BEACON_HINTS and %REGULATORY_COUNTRY_IE_IGNORE.
*/
enum ieee80211_regulatory_flags {
REGULATORY_CUSTOM_REG = BIT(0),
@@ -156,6 +174,7 @@ enum ieee80211_regulatory_flags {
REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
REGULATORY_IGNORE_STALE_KICKOFF = BIT(6),
+ REGULATORY_WIPHY_SELF_MANAGED = BIT(7),
};
struct ieee80211_freq_range {
diff --git a/include/net/route.h b/include/net/route.h
index b17cf28f996e..fe22d03afb6a 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -46,6 +46,7 @@
struct fib_nh;
struct fib_info;
+struct uncached_list;
struct rtable {
struct dst_entry dst;
@@ -64,6 +65,7 @@ struct rtable {
u32 rt_pmtu;
struct list_head rt_uncached;
+ struct uncached_list *rt_uncached_list;
};
static inline bool rt_is_input_route(const struct rtable *rt)
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index e21b9f9653c0..6c6d5393fc34 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -46,6 +46,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
* to create when creating a new device.
* @get_num_rx_queues: Function to determine number of receive queues
* to create when creating a new device.
+ * @get_link_net: Function to get the i/o netns of the device
*/
struct rtnl_link_ops {
struct list_head list;
@@ -93,6 +94,7 @@ struct rtnl_link_ops {
int (*fill_slave_info)(struct sk_buff *skb,
const struct net_device *dev,
const struct net_device *slave_dev);
+ struct net *(*get_link_net)(const struct net_device *dev);
};
int __rtnl_link_register(struct rtnl_link_ops *ops);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3d282cbb66bf..c605d305c577 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -79,6 +79,9 @@ struct Qdisc {
struct netdev_queue *dev_queue;
struct gnet_stats_rate_est64 rate_est;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+
struct Qdisc *next_sched;
struct sk_buff *gso_skb;
/*
@@ -86,15 +89,9 @@ struct Qdisc {
*/
unsigned long state;
struct sk_buff_head q;
- union {
- struct gnet_stats_basic_packed bstats;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
- } __packed;
+ struct gnet_stats_basic_packed bstats;
unsigned int __state;
- union {
- struct gnet_stats_queue qstats;
- struct gnet_stats_queue __percpu *cpu_qstats;
- } __packed;
+ struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
int padded;
atomic_t refcnt;
diff --git a/include/net/sock.h b/include/net/sock.h
index 2210fec65669..ab186b1d31ff 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -857,18 +857,6 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
#endif
}
-static inline void sock_rps_reset_flow_hash(__u32 hash)
-{
-#ifdef CONFIG_RPS
- struct rps_sock_flow_table *sock_flow_table;
-
- rcu_read_lock();
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_reset_sock_flow(sock_flow_table, hash);
- rcu_read_unlock();
-#endif
-}
-
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
@@ -876,28 +864,18 @@ static inline void sock_rps_record_flow(const struct sock *sk)
#endif
}
-static inline void sock_rps_reset_flow(const struct sock *sk)
-{
-#ifdef CONFIG_RPS
- sock_rps_reset_flow_hash(sk->sk_rxhash);
-#endif
-}
-
static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (unlikely(sk->sk_rxhash != skb->hash)) {
- sock_rps_reset_flow(sk);
+ if (unlikely(sk->sk_rxhash != skb->hash))
sk->sk_rxhash = skb->hash;
- }
#endif
}
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
- sock_rps_reset_flow(sk);
sk->sk_rxhash = 0;
#endif
}
@@ -1099,11 +1077,6 @@ static inline bool memcg_proto_active(struct cg_proto *cg_proto)
return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
}
-static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
-{
- return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
-}
-
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
@@ -1374,29 +1347,6 @@ void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
#define SOCK_BINDADDR_LOCK 4
#define SOCK_BINDPORT_LOCK 8
-/* sock_iocb: used to kick off async processing of socket ios */
-struct sock_iocb {
- struct list_head list;
-
- int flags;
- int size;
- struct socket *sock;
- struct sock *sk;
- struct scm_cookie *scm;
- struct msghdr *msg, async_msg;
- struct kiocb *kiocb;
-};
-
-static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
-{
- return (struct sock_iocb *)iocb->private;
-}
-
-static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
-{
- return si->kiocb;
-}
-
struct socket_alloc {
struct socket socket;
struct inode vfs_inode;
@@ -1826,27 +1776,25 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
}
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
- char __user *from, char *to,
+ struct iov_iter *from, char *to,
int copy, int offset)
{
if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
- __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
- if (err)
- return err;
+ __wsum csum = 0;
+ if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
+ return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, offset);
} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
- if (!access_ok(VERIFY_READ, from, copy) ||
- __copy_from_user_nocache(to, from, copy))
+ if (copy_from_iter_nocache(to, copy, from) != copy)
return -EFAULT;
- } else if (copy_from_user(to, from, copy))
+ } else if (copy_from_iter(to, copy, from) != copy)
return -EFAULT;
return 0;
}
static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
- char __user *from, int copy)
+ struct iov_iter *from, int copy)
{
int err, offset = skb->len;
@@ -1858,7 +1806,7 @@ static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
return err;
}
-static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
+static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
struct sk_buff *skb,
struct page *page,
int off, int copy)
@@ -2262,6 +2210,7 @@ bool sk_net_capable(const struct sock *sk, int cap);
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
+extern int sysctl_tstamp_allow_data;
extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 8a6d1641fd9b..cfcdac2e5d25 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -11,13 +11,46 @@
#define _LINUX_SWITCHDEV_H_
#include <linux/netdevice.h>
+#include <linux/notifier.h>
+
+enum netdev_switch_notifier_type {
+ NETDEV_SWITCH_FDB_ADD = 1,
+ NETDEV_SWITCH_FDB_DEL,
+};
+
+struct netdev_switch_notifier_info {
+ struct net_device *dev;
+};
+
+struct netdev_switch_notifier_fdb_info {
+ struct netdev_switch_notifier_info info; /* must be first */
+ const unsigned char *addr;
+ u16 vid;
+};
+
+static inline struct net_device *
+netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info)
+{
+ return info->dev;
+}
#ifdef CONFIG_NET_SWITCHDEV
int netdev_switch_parent_id_get(struct net_device *dev,
struct netdev_phys_item_id *psid);
int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
-
+int register_netdev_switch_notifier(struct notifier_block *nb);
+int unregister_netdev_switch_notifier(struct notifier_block *nb);
+int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
+ struct netdev_switch_notifier_info *info);
+int netdev_switch_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int netdev_switch_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
#else
static inline int netdev_switch_parent_id_get(struct net_device *dev,
@@ -32,6 +65,50 @@ static inline int netdev_switch_port_stp_update(struct net_device *dev,
return -EOPNOTSUPP;
}
+static inline int register_netdev_switch_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_netdev_switch_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
+ struct netdev_switch_notifier_info *info)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int netdev_switch_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netdev_switch_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+{
+ return 0;
+}
+
+static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+{
+ return 0;
+}
+
#endif
#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
new file mode 100644
index 000000000000..86a070ffc930
--- /dev/null
+++ b/include/net/tc_act/tc_bpf.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_BPF_H
+#define __NET_TC_BPF_H
+
+#include <linux/filter.h>
+#include <net/act_api.h>
+
+struct tcf_bpf {
+ struct tcf_common common;
+ struct bpf_prog *filter;
+ struct sock_filter *bpf_ops;
+ u16 bpf_num_ops;
+};
+#define to_bpf(a) \
+ container_of(a->priv, struct tcf_bpf, common)
+
+#endif /* __NET_TC_BPF_H */
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
new file mode 100644
index 000000000000..5c1104c2e24f
--- /dev/null
+++ b/include/net/tc_act/tc_connmark.h
@@ -0,0 +1,14 @@
+#ifndef __NET_TC_CONNMARK_H
+#define __NET_TC_CONNMARK_H
+
+#include <net/act_api.h>
+
+struct tcf_connmark_info {
+ struct tcf_common common;
+ u16 zone;
+};
+
+#define to_connmark(a) \
+ container_of(a->priv, struct tcf_connmark_info, common)
+
+#endif /* __NET_TC_CONNMARK_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f50f29faf76f..8d6b983d5099 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -262,8 +262,6 @@ extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor;
-extern int sysctl_tcp_mtu_probing;
-extern int sysctl_tcp_base_mss;
extern int sysctl_tcp_workaround_signed_windows;
extern int sysctl_tcp_slow_start_after_idle;
extern int sysctl_tcp_thin_linear_timeouts;
@@ -274,6 +272,7 @@ extern int sysctl_tcp_challenge_ack_limit;
extern unsigned int sysctl_tcp_notsent_lowat;
extern int sysctl_tcp_min_tso_segs;
extern int sysctl_tcp_autocorking;
+extern int sysctl_tcp_invalid_ratelimit;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -448,6 +447,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(struct sock *sk,
struct request_sock *req,
struct sk_buff *skb);
+void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
@@ -636,6 +636,11 @@ static inline u32 tcp_rto_min_us(struct sock *sk)
return jiffies_to_usecs(tcp_rto_min(sk));
}
+static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
+{
+ return dst_metric_locked(dst, RTAX_CC_ALGO);
+}
+
/* Compute the actual receive window we are currently advertising.
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
@@ -787,6 +792,8 @@ enum tcp_ca_ack_event_flags {
#define TCP_CA_MAX 128
#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
+#define TCP_CA_UNSPEC 0
+
/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
#define TCP_CONG_NON_RESTRICTED 0x1
/* Requires ECN/ECT set on all packets */
@@ -794,7 +801,8 @@ enum tcp_ca_ack_event_flags {
struct tcp_congestion_ops {
struct list_head list;
- unsigned long flags;
+ u32 key;
+ u32 flags;
/* initialize private data (optional) */
void (*init)(struct sock *sk);
@@ -834,13 +842,24 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name);
-void tcp_slow_start(struct tcp_sock *tp, u32 acked);
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
u32 tcp_reno_ssthresh(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
+struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
+u32 tcp_ca_get_key_by_name(const char *name);
+#ifdef CONFIG_INET
+char *tcp_ca_get_name_by_key(u32 key, char *buffer);
+#else
+static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
+{
+ return NULL;
+}
+#endif
+
static inline bool tcp_ca_needs_ecn(const struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1124,6 +1143,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
tcp_rsk(req)->snt_synack = tcp_time_stamp;
+ tcp_rsk(req)->last_oow_ack_time = 0;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1216,6 +1236,37 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
return true;
}
+/* Return true if we're currently rate-limiting out-of-window ACKs and
+ * thus shouldn't send a dupack right now. We rate-limit dupacks in
+ * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
+ * attacks that send repeated SYNs or ACKs for the same connection. To
+ * do this, we do not send a duplicate SYNACK or ACK if the remote
+ * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
+ */
+static inline bool tcp_oow_rate_limited(struct net *net,
+ const struct sk_buff *skb,
+ int mib_idx, u32 *last_oow_ack_time)
+{
+ /* Data packets without SYNs are not likely part of an ACK loop. */
+ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
+ !tcp_hdr(skb)->syn)
+ goto not_rate_limited;
+
+ if (*last_oow_ack_time) {
+ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+ NET_INC_STATS_BH(net, mib_idx);
+ return true; /* rate-limited: don't send yet! */
+ }
+ }
+
+ *last_oow_ack_time = tcp_time_stamp;
+
+not_rate_limited:
+ return false; /* not rate-limited: go ahead, send dupack now! */
+}
+
static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
@@ -1693,4 +1744,19 @@ static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
return dopt;
}
+/* locally generated TCP pure ACKs have skb->truesize == 2
+ * (check tcp_send_ack() in net/ipv4/tcp_output.c )
+ * This is much faster than dissecting the packet to find out.
+ * (Think of GRE encapsulations, IPv4, IPv6, ...)
+ */
+static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
+{
+ return skb->truesize == 2;
+}
+
+static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
+{
+ skb->truesize = 2;
+}
+
#endif /* _TCP_H */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 2a50a70ef587..1a20d33d56bc 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -77,17 +77,17 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
struct udp_tunnel_sock_cfg *sock_cfg);
/* Transmit the skb using UDP encapsulation. */
-int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
- struct sk_buff *skb, __be32 src, __be32 dst,
- __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
- __be16 dst_port, bool xnet);
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl,
+ __be16 df, __be16 src_port, __be16 dst_port,
+ bool xnet, bool nocheck);
#if IS_ENABLED(CONFIG_IPV6)
-int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
- struct sk_buff *skb, struct net_device *dev,
- struct in6_addr *saddr, struct in6_addr *daddr,
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+ struct net_device *dev, struct in6_addr *saddr,
+ struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be16 src_port,
- __be16 dst_port);
+ __be16 dst_port, bool nocheck);
#endif
void udp_tunnel_sock_release(struct socket *sock);
diff --git a/include/net/udplite.h b/include/net/udplite.h
index ae7c8d1fbcad..80761938b9a7 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -20,8 +20,7 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
struct msghdr *msg = from;
- /* XXX: stripping const */
- return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len);
+ return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0;
}
/* Designate sk as UDP-Lite socket */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 903461aa5644..2927d6244481 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -11,14 +11,96 @@
#define VNI_HASH_BITS 10
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
-/* VXLAN protocol header */
+/*
+ * VXLAN Group Based Policy Extension:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1|-|-|-|1|-|-|-|R|D|R|R|A|R|R|R| Group Policy ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * D = Don't Learn bit. When set, this bit indicates that the egress
+ * VTEP MUST NOT learn the source address of the encapsulated frame.
+ *
+ * A = Indicates that the group policy has already been applied to
+ * this packet. Policies MUST NOT be applied by devices when the
+ * A bit is set.
+ *
+ * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy
+ */
+struct vxlanhdr_gbp {
+ __u8 vx_flags;
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ __u8 reserved_flags1:3,
+ policy_applied:1,
+ reserved_flags2:2,
+ dont_learn:1,
+ reserved_flags3:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 reserved_flags1:1,
+ dont_learn:1,
+ reserved_flags2:2,
+ policy_applied:1,
+ reserved_flags3:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be16 policy_id;
+ __be32 vx_vni;
+};
+
+#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF)
+
+/* skb->mark mapping
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|R|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define VXLAN_GBP_DONT_LEARN (BIT(6) << 16)
+#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
+#define VXLAN_GBP_ID_MASK (0xFFFF)
+
+/* VXLAN protocol header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |G|R|R|R|I|R|R|C| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * G = 1 Group Policy (VXLAN-GBP)
+ * I = 1 VXLAN Network Identifier (VNI) present
+ * C = 1 Remote checksum offload (RCO)
+ */
struct vxlanhdr {
__be32 vx_flags;
__be32 vx_vni;
};
+/* VXLAN header flags. */
+#define VXLAN_HF_RCO BIT(24)
+#define VXLAN_HF_VNI BIT(27)
+#define VXLAN_HF_GBP BIT(31)
+
+/* Remote checksum offload header option */
+#define VXLAN_RCO_MASK 0x7f /* Last byte of vni field */
+#define VXLAN_RCO_UDP 0x80 /* Indicate UDP RCO (TCP when not set *) */
+#define VXLAN_RCO_SHIFT 1 /* Left shift of start */
+#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
+#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
+
+#define VXLAN_N_VID (1u << 24)
+#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
+struct vxlan_metadata {
+ __be32 vni;
+ u32 gbp;
+};
+
struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
+typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
+ struct vxlan_metadata *md);
/* per UDP socket information */
struct vxlan_sock {
@@ -31,6 +113,7 @@ struct vxlan_sock {
struct hlist_head vni_list[VNI_HASH_SIZE];
atomic_t refcnt;
struct udp_offload udp_offloads;
+ u32 flags;
};
#define VXLAN_F_LEARN 0x01
@@ -42,6 +125,16 @@ struct vxlan_sock {
#define VXLAN_F_UDP_CSUM 0x40
#define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80
#define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100
+#define VXLAN_F_REMCSUM_TX 0x200
+#define VXLAN_F_REMCSUM_RX 0x400
+#define VXLAN_F_GBP 0x800
+
+/* Flags that are used in the receive patch. These flags must match in
+ * order for a socket to be shareable
+ */
+#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX | \
+ VXLAN_F_REMCSUM_RX)
struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
vxlan_rcv_t *rcv, void *data,
@@ -49,10 +142,10 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
void vxlan_sock_release(struct vxlan_sock *vs);
-int vxlan_xmit_skb(struct vxlan_sock *vs,
- struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
+ __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
+ bool xnet, u32 vxflags);
static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
netdev_features_t features)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0d74f1de99aa..65994a19e840 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
{
- size_t copy_sz;
-
- copy_sz = min_t(size_t, len, udata->outlen);
- return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
+ return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
}
/**
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8a7f8ad58aac..d0a66aa1868d 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -195,6 +195,9 @@ enum scsi_timeouts {
#define ATA_16 0x85 /* 16-byte pass-thru */
#define ATA_12 0xa1 /* 12-byte pass-thru */
+/* Vendor specific CDBs start here */
+#define VENDOR_SPECIFIC_CDB 0xc0
+
/*
* SCSI command lengths
*/
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 7982795df595..f8170e90b49d 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -5,8 +5,11 @@ struct scsi_cmnd;
struct scsi_device;
struct scsi_sense_hdr;
+#define SCSI_LOG_BUFSIZE 128
+
extern void scsi_print_command(struct scsi_cmnd *);
-extern void __scsi_print_command(const unsigned char *, size_t);
+extern size_t __scsi_format_command(char *, size_t,
+ const unsigned char *, size_t);
extern void scsi_show_extd_sense(const struct scsi_device *, const char *,
unsigned char, unsigned char);
extern void scsi_show_sense_hdr(const struct scsi_device *, const char *,
@@ -17,12 +20,73 @@ extern void scsi_print_sense(const struct scsi_cmnd *);
extern void __scsi_print_sense(const struct scsi_device *, const char *name,
const unsigned char *sense_buffer,
int sense_len);
-extern void scsi_print_result(struct scsi_cmnd *, const char *, int);
-extern const char *scsi_hostbyte_string(int);
-extern const char *scsi_driverbyte_string(int);
-extern const char *scsi_mlreturn_string(int);
+extern void scsi_print_result(const struct scsi_cmnd *, const char *, int);
+
+#ifdef CONFIG_SCSI_CONSTANTS
+extern bool scsi_opcode_sa_name(int, int, const char **, const char **);
extern const char *scsi_sense_key_string(unsigned char);
extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
const char **);
+extern const char *scsi_mlreturn_string(int);
+extern const char *scsi_hostbyte_string(int);
+extern const char *scsi_driverbyte_string(int);
+#else
+static inline bool
+scsi_opcode_sa_name(int cmd, int sa,
+ const char **cdb_name, const char **sa_name)
+{
+ *cdb_name = NULL;
+ switch (cmd) {
+ case VARIABLE_LENGTH_CMD:
+ case MAINTENANCE_IN:
+ case MAINTENANCE_OUT:
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ case SERVICE_ACTION_IN_12:
+ case SERVICE_ACTION_OUT_12:
+ case SERVICE_ACTION_BIDIRECTIONAL:
+ case SERVICE_ACTION_IN_16:
+ case SERVICE_ACTION_OUT_16:
+ case EXTENDED_COPY:
+ case RECEIVE_COPY_RESULTS:
+ *sa_name = NULL;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline const char *
+scsi_sense_key_string(unsigned char key)
+{
+ return NULL;
+}
+
+static inline const char *
+scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
+{
+ *fmt = NULL;
+ return NULL;
+}
+
+static inline const char *
+scsi_mlreturn_string(int result)
+{
+ return NULL;
+}
+
+static inline const char *
+scsi_hostbyte_string(int result)
+{
+ return NULL;
+}
+
+static inline const char *
+scsi_driverbyte_string(int result)
+{
+ return NULL;
+}
+
+#endif
#endif /* _SCSI_SCSI_DBG_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 3a4edd1f7dbb..a4c9336811d1 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -230,9 +230,6 @@ struct scsi_dh_data {
#define transport_class_to_sdev(class_dev) \
to_scsi_device(class_dev->parent)
-#define sdev_printk(prefix, sdev, fmt, a...) \
- dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
-
#define sdev_dbg(sdev, fmt, a...) \
dev_dbg(&(sdev)->sdev_gendev, fmt, ##a)
@@ -240,16 +237,15 @@ struct scsi_dh_data {
* like scmd_printk, but the device name is passed in
* as a string pointer
*/
-#define sdev_prefix_printk(l, sdev, p, fmt, a...) \
- (p) ? \
- sdev_printk(l, sdev, "[%s] " fmt, p, ##a) : \
- sdev_printk(l, sdev, fmt, ##a)
-
-#define scmd_printk(prefix, scmd, fmt, a...) \
- (scmd)->request->rq_disk ? \
- sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \
- (scmd)->request->rq_disk->disk_name, ##a) : \
- sdev_printk(prefix, (scmd)->device, fmt, ##a)
+__printf(4, 5) void
+sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
+ const char *, ...);
+
+#define sdev_printk(l, sdev, fmt, a...) \
+ sdev_prefix_printk(l, sdev, NULL, fmt, ##a)
+
+__printf(3, 4) void
+scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
#define scmd_dbg(scmd, fmt, a...) \
do { \
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 019e66858ce6..e113c757d555 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -402,6 +402,9 @@ struct scsi_host_template {
*/
unsigned char present;
+ /* If use block layer to manage tags, this is tag allocation policy */
+ int tag_alloc_policy;
+
/*
* Let the block layer assigns tags to all commands.
*/
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index 9708b28bd2aa..b27977e8aaed 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -66,7 +66,8 @@ static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth)
* devices on the shared host (for libata)
*/
if (!shost->bqt) {
- shost->bqt = blk_init_tags(depth);
+ shost->bqt = blk_init_tags(depth,
+ shost->hostt->tag_alloc_policy);
if (!shost->bqt)
return -ENOMEM;
}
diff --git a/include/sound/ad1816a.h b/include/sound/ad1816a.h
index abdf609c5918..f2d3a6d07210 100644
--- a/include/sound/ad1816a.h
+++ b/include/sound/ad1816a.h
@@ -170,10 +170,9 @@ extern int snd_ad1816a_create(struct snd_card *card, unsigned long port,
int irq, int dma1, int dma2,
struct snd_ad1816a *chip);
-extern int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device, struct snd_pcm **rpcm);
+extern int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device);
extern int snd_ad1816a_mixer(struct snd_ad1816a *chip);
-extern int snd_ad1816a_timer(struct snd_ad1816a *chip, int device,
- struct snd_timer **rtimer);
+extern int snd_ad1816a_timer(struct snd_ad1816a *chip, int device);
#ifdef CONFIG_PM
extern void snd_ad1816a_suspend(struct snd_ad1816a *chip);
extern void snd_ad1816a_resume(struct snd_ad1816a *chip);
diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h
index 2609048c1d44..58c145620c3c 100644
--- a/include/sound/ak4113.h
+++ b/include/sound/ak4113.h
@@ -286,7 +286,8 @@ struct ak4113 {
ak4113_write_t *write;
ak4113_read_t *read;
void *private_data;
- unsigned int init:1;
+ atomic_t wq_processing;
+ struct mutex reinit_mutex;
spinlock_t lock;
unsigned char regmap[AK4113_WRITABLE_REGS];
struct snd_kcontrol *kctls[AK4113_CONTROLS];
@@ -317,5 +318,13 @@ int snd_ak4113_build(struct ak4113 *ak4113,
int snd_ak4113_external_rate(struct ak4113 *ak4113);
int snd_ak4113_check_rate_and_errors(struct ak4113 *ak4113, unsigned int flags);
+#ifdef CONFIG_PM
+void snd_ak4113_suspend(struct ak4113 *chip);
+void snd_ak4113_resume(struct ak4113 *chip);
+#else
+static inline void snd_ak4113_suspend(struct ak4113 *chip) {}
+static inline void snd_ak4113_resume(struct ak4113 *chip) {}
+#endif
+
#endif /* __SOUND_AK4113_H */
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 52f02a60dba7..b6feb7e225f2 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -168,7 +168,8 @@ struct ak4114 {
ak4114_write_t * write;
ak4114_read_t * read;
void * private_data;
- unsigned int init: 1;
+ atomic_t wq_processing;
+ struct mutex reinit_mutex;
spinlock_t lock;
unsigned char regmap[6];
unsigned char txcsb[5];
@@ -199,5 +200,13 @@ int snd_ak4114_build(struct ak4114 *ak4114,
int snd_ak4114_external_rate(struct ak4114 *ak4114);
int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags);
+#ifdef CONFIG_PM
+void snd_ak4114_suspend(struct ak4114 *chip);
+void snd_ak4114_resume(struct ak4114 *chip);
+#else
+static inline void snd_ak4114_suspend(struct ak4114 *chip) {}
+static inline void snd_ak4114_resume(struct ak4114 *chip) {}
+#endif
+
#endif /* __SOUND_AK4114_H */
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 396e8f73670a..f48089d364c5 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/sched.h>
+#include <sound/core.h>
#include <sound/compress_offload.h>
#include <sound/asound.h>
#include <sound/pcm.h>
@@ -134,7 +135,7 @@ struct snd_compr_ops {
/**
* struct snd_compr: Compressed device
* @name: DSP device name
- * @dev: Device pointer
+ * @dev: associated device instance
* @ops: pointer to DSP callbacks
* @private_data: pointer to DSP pvt data
* @card: sound card pointer
@@ -144,7 +145,7 @@ struct snd_compr_ops {
*/
struct snd_compr {
const char *name;
- struct device *dev;
+ struct device dev;
struct snd_compr_ops *ops;
void *private_data;
struct snd_card *card;
diff --git a/include/sound/control.h b/include/sound/control.h
index 042613938a1d..75f3054023f7 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -93,12 +93,17 @@ struct snd_kctl_event {
struct pid;
+enum {
+ SND_CTL_SUBDEV_PCM,
+ SND_CTL_SUBDEV_RAWMIDI,
+ SND_CTL_SUBDEV_ITEMS,
+};
+
struct snd_ctl_file {
struct list_head list; /* list of all control files */
struct snd_card *card;
struct pid *pid;
- int prefer_pcm_subdevice;
- int prefer_rawmidi_subdevice;
+ int preferred_subdevice[SND_CTL_SUBDEV_ITEMS];
wait_queue_head_t change_sleep;
spinlock_t read_lock;
struct fasync_struct *fasync;
@@ -138,6 +143,8 @@ int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn);
#define snd_ctl_unregister_ioctl_compat(fcn)
#endif
+int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
+
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
return id->numid - kctl->id.numid;
diff --git a/include/sound/core.h b/include/sound/core.h
index 1df3f2fe5350..da5748289968 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -109,6 +109,7 @@ struct snd_card {
private data */
struct list_head devices; /* devices */
+ struct device ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
struct rw_semaphore controls_rwsem; /* controls list lock */
rwlock_t ctl_files_rwlock; /* ctl_files list lock */
@@ -131,6 +132,7 @@ struct snd_card {
struct completion *release_completion;
struct device *dev; /* device assigned to this card */
struct device card_dev; /* cardX object for sysfs */
+ const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
bool registered; /* card_dev is registered? */
#ifdef CONFIG_PM
@@ -206,43 +208,13 @@ extern struct class *sound_class;
void snd_request_card(int card);
-int snd_register_device_for_dev(int type, struct snd_card *card,
- int dev,
- const struct file_operations *f_ops,
- void *private_data,
- const char *name,
- struct device *device);
+void snd_device_initialize(struct device *dev, struct snd_card *card);
-/**
- * snd_register_device - Register the ALSA device file for the card
- * @type: the device type, SNDRV_DEVICE_TYPE_XXX
- * @card: the card instance
- * @dev: the device index
- * @f_ops: the file operations
- * @private_data: user pointer for f_ops->open()
- * @name: the device file name
- *
- * Registers an ALSA device file for the given card.
- * The operators have to be set in reg parameter.
- *
- * This function uses the card's device pointer to link to the
- * correct &struct device.
- *
- * Return: Zero if successful, or a negative error code on failure.
- */
-static inline int snd_register_device(int type, struct snd_card *card, int dev,
- const struct file_operations *f_ops,
- void *private_data,
- const char *name)
-{
- return snd_register_device_for_dev(type, card, dev, f_ops,
- private_data, name,
- snd_card_get_device_link(card));
-}
-
-int snd_unregister_device(int type, struct snd_card *card, int dev);
+int snd_register_device(int type, struct snd_card *card, int dev,
+ const struct file_operations *f_ops,
+ void *private_data, struct device *device);
+int snd_unregister_device(struct device *dev);
void *snd_lookup_minor_data(unsigned int minor, int type);
-struct device *snd_get_device(int type, struct snd_card *card, int dev);
#ifdef CONFIG_SND_OSSEMUL
int snd_register_oss_device(int type, struct snd_card *card, int dev,
@@ -291,6 +263,8 @@ void snd_card_set_id(struct snd_card *card, const char *id);
int snd_card_register(struct snd_card *card);
int snd_card_info_init(void);
int snd_card_info_done(void);
+int snd_card_add_dev_attr(struct snd_card *card,
+ const struct attribute_group *group);
int snd_component_add(struct snd_card *card, const char *component);
int snd_card_file_add(struct snd_card *card, struct file *file);
int snd_card_file_remove(struct snd_card *card, struct file *file);
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index c46908c1bb3f..0de95ccb92cf 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -33,8 +33,8 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <uapi/sound/emu10k1.h>
/* ------------------- DEFINES -------------------- */
@@ -1809,17 +1809,17 @@ int snd_emu10k1_create(struct snd_card *card,
uint subsystem,
struct snd_emu10k1 ** remu);
-int snd_emu10k1_pcm(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
-int snd_emu10k1_pcm_mic(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
-int snd_emu10k1_pcm_efx(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
-int snd_p16v_pcm(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
+int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device);
+int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device);
+int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device);
+int snd_p16v_pcm(struct snd_emu10k1 *emu, int device);
int snd_p16v_free(struct snd_emu10k1 * emu);
int snd_p16v_mixer(struct snd_emu10k1 * emu);
-int snd_emu10k1_pcm_multi(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
-int snd_emu10k1_fx8010_pcm(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
+int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device);
+int snd_emu10k1_fx8010_pcm(struct snd_emu10k1 *emu, int device);
int snd_emu10k1_mixer(struct snd_emu10k1 * emu, int pcm_device, int multi_device);
int snd_emu10k1_timer(struct snd_emu10k1 * emu, int device);
-int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device, struct snd_hwdep ** rhwdep);
+int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device);
irqreturn_t snd_emu10k1_interrupt(int irq, void *dev_id);
diff --git a/include/sound/es1688.h b/include/sound/es1688.h
index 1d636a2d8896..b34f23a5bb74 100644
--- a/include/sound/es1688.h
+++ b/include/sound/es1688.h
@@ -115,8 +115,7 @@ int snd_es1688_create(struct snd_card *card,
int mpu_irq,
int dma8,
unsigned short hardware);
-int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip, int device,
- struct snd_pcm **rpcm);
+int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip, int device);
int snd_es1688_mixer(struct snd_card *card, struct snd_es1688 *chip);
int snd_es1688_reset(struct snd_es1688 *chip);
diff --git a/include/sound/gus.h b/include/sound/gus.h
index 42905d811da7..07c116fe78c8 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -27,7 +27,7 @@
#include <sound/timer.h>
#include <sound/seq_midi_emul.h>
#include <sound/seq_device.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* IO ports */
@@ -591,7 +591,7 @@ int snd_gf1_new_mixer(struct snd_gus_card * gus);
/* gus_pcm.c */
-int snd_gf1_pcm_new(struct snd_gus_card * gus, int pcm_dev, int control_index, struct snd_pcm ** rpcm);
+int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index);
#ifdef CONFIG_SND_DEBUG
extern void snd_gf1_print_voice_registers(struct snd_gus_card * gus);
@@ -620,7 +620,7 @@ void snd_gus_irq_profile_init(struct snd_gus_card *gus);
/* gus_uart.c */
-int snd_gf1_rawmidi_new(struct snd_gus_card * gus, int device, struct snd_rawmidi **rrawmidi);
+int snd_gf1_rawmidi_new(struct snd_gus_card *gus, int device);
/* gus_dram.c */
int snd_gus_dram_write(struct snd_gus_card *gus, char __user *ptr,
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index ae04a3ec9c77..ab9fcb2f97f0 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -68,8 +68,7 @@ struct snd_hwdep {
wait_queue_head_t open_wait;
void *private_data;
void (*private_free) (struct snd_hwdep *hwdep);
- struct device *dev;
- const struct attribute_group **groups;
+ struct device dev;
struct mutex open_mutex;
int used; /* reference counter */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index b429b73e875e..c0ddb7e69c28 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -94,9 +94,6 @@ struct snd_pcm_ops {
#define SNDRV_PCM_DEVICES 8
#endif
-#define SNDRV_PCM_IOCTL1_FALSE ((void *)0)
-#define SNDRV_PCM_IOCTL1_TRUE ((void *)1)
-
#define SNDRV_PCM_IOCTL1_RESET 0
#define SNDRV_PCM_IOCTL1_INFO 1
#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
@@ -109,6 +106,7 @@ struct snd_pcm_ops {
#define SNDRV_PCM_TRIGGER_PAUSE_RELEASE 4
#define SNDRV_PCM_TRIGGER_SUSPEND 5
#define SNDRV_PCM_TRIGGER_RESUME 6
+#define SNDRV_PCM_TRIGGER_DRAIN 7
#define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1)
@@ -275,12 +273,19 @@ struct snd_pcm_hw_constraint_list {
unsigned int mask;
};
+struct snd_pcm_hw_constraint_ranges {
+ unsigned int count;
+ const struct snd_interval *ranges;
+ unsigned int mask;
+};
+
struct snd_pcm_hwptr_log;
struct snd_pcm_runtime {
/* -- Status -- */
struct snd_pcm_substream *trigger_master;
struct timespec trigger_tstamp; /* trigger timestamp */
+ bool trigger_tstamp_latched; /* trigger timestamp latched in low-level driver/hardware */
int overrange;
snd_pcm_uframes_t avail_max;
snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
@@ -449,6 +454,7 @@ struct snd_pcm_str {
#endif
#endif
struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */
+ struct device dev;
};
struct snd_pcm {
@@ -465,7 +471,6 @@ struct snd_pcm {
wait_queue_head_t open_wait;
void *private_data;
void (*private_free) (struct snd_pcm *pcm);
- struct device *dev; /* actual hw device this belongs to */
bool internal; /* pcm is for internal use only */
bool nonatomic; /* whole PCM operations are in non-atomic context */
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
@@ -520,7 +525,6 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream);
int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, struct file *file,
struct snd_pcm_substream **rsubstream);
void snd_pcm_detach_substream(struct snd_pcm_substream *substream);
-void snd_pcm_vma_notify_data(void *client, void *data);
int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area);
@@ -910,6 +914,8 @@ void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
const struct snd_interval *b, struct snd_interval *c);
int snd_interval_list(struct snd_interval *i, unsigned int count,
const unsigned int *list, unsigned int mask);
+int snd_interval_ranges(struct snd_interval *i, unsigned int count,
+ const struct snd_interval *list, unsigned int mask);
int snd_interval_ratnum(struct snd_interval *i,
unsigned int rats_count, struct snd_ratnum *rats,
unsigned int *nump, unsigned int *denp);
@@ -934,6 +940,10 @@ int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
const struct snd_pcm_hw_constraint_list *l);
+int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
+ unsigned int cond,
+ snd_pcm_hw_param_t var,
+ const struct snd_pcm_hw_constraint_ranges *r);
int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
@@ -986,21 +996,15 @@ int snd_pcm_format_physical_width(snd_pcm_format_t format); /* in bits */
ssize_t snd_pcm_format_size(snd_pcm_format_t format, size_t samples);
const unsigned char *snd_pcm_format_silence_64(snd_pcm_format_t format);
int snd_pcm_format_set_silence(snd_pcm_format_t format, void *buf, unsigned int frames);
-snd_pcm_format_t snd_pcm_build_linear_format(int width, int unsigned, int big_endian);
void snd_pcm_set_ops(struct snd_pcm * pcm, int direction,
const struct snd_pcm_ops *ops);
void snd_pcm_set_sync(struct snd_pcm_substream *substream);
-int snd_pcm_lib_interleave_len(struct snd_pcm_substream *substream);
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
int snd_pcm_update_state(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime);
int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream);
-int snd_pcm_playback_xrun_check(struct snd_pcm_substream *substream);
-int snd_pcm_capture_xrun_check(struct snd_pcm_substream *substream);
-int snd_pcm_playback_xrun_asap(struct snd_pcm_substream *substream);
-int snd_pcm_capture_xrun_asap(struct snd_pcm_substream *substream);
void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr);
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream);
snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream,
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 6b1c78f05fab..3c45f3924ba7 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -38,31 +38,6 @@ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
#define MASK_OFS(i) ((i) >> 5)
#define MASK_BIT(i) (1U << ((i) & 31))
-static inline unsigned int ld2(u_int32_t v)
-{
- unsigned r = 0;
-
- if (v >= 0x10000) {
- v >>= 16;
- r += 16;
- }
- if (v >= 0x100) {
- v >>= 8;
- r += 8;
- }
- if (v >= 0x10) {
- v >>= 4;
- r += 4;
- }
- if (v >= 4) {
- v >>= 2;
- r += 2;
- }
- if (v >= 2)
- r++;
- return r;
-}
-
static inline size_t snd_mask_sizeof(void)
{
return sizeof(struct snd_mask);
@@ -92,7 +67,7 @@ static inline unsigned int snd_mask_min(const struct snd_mask *mask)
int i;
for (i = 0; i < SNDRV_MASK_SIZE; i++) {
if (mask->bits[i])
- return ffs(mask->bits[i]) - 1 + (i << 5);
+ return __ffs(mask->bits[i]) + (i << 5);
}
return 0;
}
@@ -102,7 +77,7 @@ static inline unsigned int snd_mask_max(const struct snd_mask *mask)
int i;
for (i = SNDRV_MASK_SIZE - 1; i >= 0; i--) {
if (mask->bits[i])
- return ld2(mask->bits[i]) + (i << 5);
+ return __fls(mask->bits[i]) + (i << 5);
}
return 0;
}
@@ -325,43 +300,68 @@ static inline int snd_interval_eq(const struct snd_interval *i1, const struct sn
i1->max == i2->max && i1->openmax == i2->openmax;
}
-static inline unsigned int add(unsigned int a, unsigned int b)
+/**
+ * params_access - get the access type from the hw params
+ * @p: hw params
+ */
+static inline snd_pcm_access_t params_access(const struct snd_pcm_hw_params *p)
{
- if (a >= UINT_MAX - b)
- return UINT_MAX;
- return a + b;
+ return (__force snd_pcm_access_t)snd_mask_min(hw_param_mask_c(p,
+ SNDRV_PCM_HW_PARAM_ACCESS));
}
-static inline unsigned int sub(unsigned int a, unsigned int b)
+/**
+ * params_format - get the sample format from the hw params
+ * @p: hw params
+ */
+static inline snd_pcm_format_t params_format(const struct snd_pcm_hw_params *p)
{
- if (a > b)
- return a - b;
- return 0;
+ return (__force snd_pcm_format_t)snd_mask_min(hw_param_mask_c(p,
+ SNDRV_PCM_HW_PARAM_FORMAT));
}
-#define params_access(p) ((__force snd_pcm_access_t)\
- snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_ACCESS)))
-#define params_format(p) ((__force snd_pcm_format_t)\
- snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_FORMAT)))
-#define params_subformat(p) \
- snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_SUBFORMAT))
+/**
+ * params_subformat - get the sample subformat from the hw params
+ * @p: hw params
+ */
+static inline snd_pcm_subformat_t
+params_subformat(const struct snd_pcm_hw_params *p)
+{
+ return (__force snd_pcm_subformat_t)snd_mask_min(hw_param_mask_c(p,
+ SNDRV_PCM_HW_PARAM_SUBFORMAT));
+}
+/**
+ * params_period_bytes - get the period size (in bytes) from the hw params
+ * @p: hw params
+ */
static inline unsigned int
params_period_bytes(const struct snd_pcm_hw_params *p)
{
- return (params_period_size(p) *
- snd_pcm_format_physical_width(params_format(p)) *
- params_channels(p)) / 8;
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_BYTES)->min;
}
-static inline int
-params_width(const struct snd_pcm_hw_params *p)
+/**
+ * params_width - get the number of bits of the sample format from the hw params
+ * @p: hw params
+ *
+ * This function returns the number of bits per sample that the selected sample
+ * format of the hw params has.
+ */
+static inline int params_width(const struct snd_pcm_hw_params *p)
{
return snd_pcm_format_width(params_format(p));
}
-static inline int
-params_physical_width(const struct snd_pcm_hw_params *p)
+/*
+ * params_physical_width - get the storage size of the sample format from the hw params
+ * @p: hw params
+ *
+ * This functions returns the number of bits per sample that the selected sample
+ * format of the hw params takes up in memory. This will be equal or larger than
+ * params_width().
+ */
+static inline int params_physical_width(const struct snd_pcm_hw_params *p)
{
return snd_pcm_format_physical_width(params_format(p));
}
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 311dafe6cc4b..f6cbef78db62 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -28,6 +28,7 @@
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/device.h>
#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
#include <sound/seq_device.h>
@@ -139,7 +140,8 @@ struct snd_rawmidi {
struct mutex open_mutex;
wait_queue_head_t open_wait;
- struct snd_info_entry *dev;
+ struct device dev;
+
struct snd_info_entry *proc_entry;
#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 83284cae464c..4cecd0c175f6 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -55,6 +55,7 @@ struct rsnd_ssi_platform_info {
struct rsnd_src_platform_info {
u32 convert_rate; /* sampling rate convert */
int dma_id; /* for Gen2 SCU */
+ int irq;
};
/*
diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h
index d9eb7d861cd0..a6207043ac3c 100644
--- a/include/sound/rt5677.h
+++ b/include/sound/rt5677.h
@@ -37,6 +37,9 @@ struct rt5677_platform_data {
OFF, GPIO4, GPIO5 and GPIO6 respectively */
unsigned int jd2_gpio;
unsigned int jd3_gpio;
+
+ /* Set MICBIAS1 VDD 1v8 or 3v3 */
+ bool micbias1_vdd_3v3;
};
#endif
diff --git a/include/sound/sb.h b/include/sound/sb.h
index ba3960329646..bacefaee411a 100644
--- a/include/sound/sb.h
+++ b/include/sound/sb.h
@@ -25,7 +25,7 @@
#include <sound/pcm.h>
#include <sound/rawmidi.h>
#include <linux/interrupt.h>
-#include <asm/io.h>
+#include <linux/io.h>
enum sb_hw_type {
SB_HW_AUTO,
@@ -308,7 +308,7 @@ void snd_sbmixer_resume(struct snd_sb *chip);
#endif
/* sb8_init.c */
-int snd_sb8dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm);
+int snd_sb8dsp_pcm(struct snd_sb *chip, int device);
/* sb8.c */
irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip);
int snd_sb8_playback_open(struct snd_pcm_substream *substream);
@@ -317,10 +317,10 @@ int snd_sb8_playback_close(struct snd_pcm_substream *substream);
int snd_sb8_capture_close(struct snd_pcm_substream *substream);
/* midi8.c */
irqreturn_t snd_sb8dsp_midi_interrupt(struct snd_sb *chip);
-int snd_sb8dsp_midi(struct snd_sb *chip, int device, struct snd_rawmidi ** rrawmidi);
+int snd_sb8dsp_midi(struct snd_sb *chip, int device);
/* sb16_init.c */
-int snd_sb16dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm);
+int snd_sb16dsp_pcm(struct snd_sb *chip, int device);
const struct snd_pcm_ops *snd_sb16dsp_get_pcm_ops(int direction);
int snd_sb16dsp_configure(struct snd_sb *chip);
/* sb16.c */
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index eea5400fe373..18a2ac58b88f 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -27,11 +27,8 @@
typedef struct snd_seq_real_time snd_seq_real_time_t;
typedef union snd_seq_timestamp snd_seq_timestamp_t;
-/* maximum number of events dequeued per schedule interval */
-#define SNDRV_SEQ_MAX_DEQUEUE 50
-
/* maximum number of queues */
-#define SNDRV_SEQ_MAX_QUEUES 8
+#define SNDRV_SEQ_MAX_QUEUES 32
/* max number of concurrent clients */
#define SNDRV_SEQ_MAX_CLIENTS 192
@@ -42,9 +39,6 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
/* max number of events in memory pool */
#define SNDRV_SEQ_MAX_EVENTS 2000
-/* default number of events in memory chunk */
-#define SNDRV_SEQ_DEFAULT_CHUNK_EVENTS 64
-
/* default number of events in memory pool */
#define SNDRV_SEQ_DEFAULT_EVENTS 500
@@ -70,7 +64,6 @@ struct snd_seq_port_callback {
int (*unuse)(void *private_data, struct snd_seq_port_subscribe *info);
int (*event_input)(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop);
void (*private_free)(void *private_data);
- unsigned int callback_all; /* call subscribe callbacks at each connection/disconnection */
/*...*/
};
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index 9b0ac77177b6..1255ddb1d3e2 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -20,6 +20,7 @@ struct asoc_simple_dai {
unsigned int sysclk;
int slots;
int slot_width;
+ struct clk *clk;
};
struct asoc_simple_card_info {
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 89823cfe6f04..8d7416e46861 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -405,7 +405,7 @@ int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
struct snd_soc_dapm_update *update);
/* dapm sys fs - used by the core */
-int snd_soc_dapm_sys_add(struct device *dev);
+extern struct attribute *soc_dapm_dev_attrs[];
void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
struct dentry *parent);
@@ -431,7 +431,6 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
const char *pin);
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
const char *pin);
-void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card);
unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
/* Mostly internal - should not normally be used */
@@ -526,7 +525,6 @@ struct snd_soc_dapm_widget {
enum snd_soc_dapm_type id;
const char *name; /* widget name */
const char *sname; /* stream name */
- struct snd_soc_codec *codec;
struct list_head list;
struct snd_soc_dapm_context *dapm;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index b4fca9aed2a2..0d1ade195628 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -429,6 +429,9 @@ bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd);
void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream);
void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
+int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
+ unsigned int dai_fmt);
+
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params);
@@ -498,6 +501,7 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
unsigned int mask, unsigned int value);
#ifdef CONFIG_SND_SOC_AC97_BUS
+struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
diff --git a/include/sound/sta32x.h b/include/sound/sta32x.h
index 8d93b0357a14..a894f7d17b1a 100644
--- a/include/sound/sta32x.h
+++ b/include/sound/sta32x.h
@@ -24,12 +24,20 @@
#define STA32X_THERMAL_RECOVERY_ENABLE 2
struct sta32x_platform_data {
- int output_conf;
- int ch1_output_mapping;
- int ch2_output_mapping;
- int ch3_output_mapping;
- int thermal_conf;
+ u8 output_conf;
+ u8 ch1_output_mapping;
+ u8 ch2_output_mapping;
+ u8 ch3_output_mapping;
int needs_esd_watchdog;
+ u8 drop_compensation_ns;
+ unsigned int thermal_warning_recovery:1;
+ unsigned int thermal_warning_adjustment:1;
+ unsigned int fault_detect_recovery:1;
+ unsigned int max_power_use_mpcc:1;
+ unsigned int max_power_correction:1;
+ unsigned int am_reduction_mode:1;
+ unsigned int odd_pwm_speed_mode:1;
+ unsigned int invalid_input_detect_mute:1;
};
#endif /* __LINUX_SND__STA32X_H */
diff --git a/include/sound/wss.h b/include/sound/wss.h
index 0c7f034f1e86..1823e3a964e2 100644
--- a/include/sound/wss.h
+++ b/include/sound/wss.h
@@ -154,8 +154,8 @@ int snd_wss_create(struct snd_card *card,
unsigned short hardware,
unsigned short hwshare,
struct snd_wss **rchip);
-int snd_wss_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm);
-int snd_wss_timer(struct snd_wss *chip, int device, struct snd_timer **rtimer);
+int snd_wss_pcm(struct snd_wss *chip, int device);
+int snd_wss_timer(struct snd_wss *chip, int device);
int snd_wss_mixer(struct snd_wss *chip);
const struct snd_pcm_ops *snd_wss_get_pcm_ops(int direction);
@@ -167,7 +167,7 @@ int snd_cs4236_create(struct snd_card *card,
unsigned short hardware,
unsigned short hwshare,
struct snd_wss **rchip);
-int snd_cs4236_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm);
+int snd_cs4236_pcm(struct snd_wss *chip, int device);
int snd_cs4236_mixer(struct snd_wss *chip);
/*
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 430cfaf92285..db81c65b8f48 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
-int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
index 3247d7530107..186f7a923570 100644
--- a/include/target/target_core_backend_configfs.h
+++ b/include/target/target_core_backend_configfs.h
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
- DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
- TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 397fb635766a..4a8795a87b9e 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -77,8 +77,6 @@
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Default max_write_same_len, disabled by default */
#define DA_MAX_WRITE_SAME_LEN 0
-/* Default max transfer length */
-#define DA_FABRIC_MAX_SECTORS 8192
/* Use a model alias based on the configfs backend device name */
#define DA_EMULATE_MODEL_ALIAS 0
/* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
- u32 fabric_max_sectors;
u32 optimal_sectors;
u32 hw_queue_depth;
u32 queue_depth;
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c6814b917bdf..9a6a3fe0fb51 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -11,39 +11,55 @@
DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
- TP_PROTO(unsigned long nr_scanned,
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
unsigned long nr_taken),
- TP_ARGS(nr_scanned, nr_taken),
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken),
TP_STRUCT__entry(
+ __field(unsigned long, start_pfn)
+ __field(unsigned long, end_pfn)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_taken)
),
TP_fast_assign(
+ __entry->start_pfn = start_pfn;
+ __entry->end_pfn = end_pfn;
__entry->nr_scanned = nr_scanned;
__entry->nr_taken = nr_taken;
),
- TP_printk("nr_scanned=%lu nr_taken=%lu",
+ TP_printk("range=(0x%lx ~ 0x%lx) nr_scanned=%lu nr_taken=%lu",
+ __entry->start_pfn,
+ __entry->end_pfn,
__entry->nr_scanned,
__entry->nr_taken)
);
DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
- TP_PROTO(unsigned long nr_scanned,
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
unsigned long nr_taken),
- TP_ARGS(nr_scanned, nr_taken)
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
);
DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
- TP_PROTO(unsigned long nr_scanned,
+
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
unsigned long nr_taken),
- TP_ARGS(nr_scanned, nr_taken)
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
);
TRACE_EVENT(mm_compaction_migratepages,
@@ -85,47 +101,198 @@ TRACE_EVENT(mm_compaction_migratepages,
);
TRACE_EVENT(mm_compaction_begin,
- TP_PROTO(unsigned long zone_start, unsigned long migrate_start,
- unsigned long free_start, unsigned long zone_end),
+ TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
+ unsigned long free_pfn, unsigned long zone_end, bool sync),
- TP_ARGS(zone_start, migrate_start, free_start, zone_end),
+ TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
- __field(unsigned long, migrate_start)
- __field(unsigned long, free_start)
+ __field(unsigned long, migrate_pfn)
+ __field(unsigned long, free_pfn)
__field(unsigned long, zone_end)
+ __field(bool, sync)
),
TP_fast_assign(
__entry->zone_start = zone_start;
- __entry->migrate_start = migrate_start;
- __entry->free_start = free_start;
+ __entry->migrate_pfn = migrate_pfn;
+ __entry->free_pfn = free_pfn;
__entry->zone_end = zone_end;
+ __entry->sync = sync;
),
- TP_printk("zone_start=%lu migrate_start=%lu free_start=%lu zone_end=%lu",
+ TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s",
__entry->zone_start,
- __entry->migrate_start,
- __entry->free_start,
- __entry->zone_end)
+ __entry->migrate_pfn,
+ __entry->free_pfn,
+ __entry->zone_end,
+ __entry->sync ? "sync" : "async")
);
TRACE_EVENT(mm_compaction_end,
- TP_PROTO(int status),
+ TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
+ unsigned long free_pfn, unsigned long zone_end, bool sync,
+ int status),
- TP_ARGS(status),
+ TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status),
TP_STRUCT__entry(
+ __field(unsigned long, zone_start)
+ __field(unsigned long, migrate_pfn)
+ __field(unsigned long, free_pfn)
+ __field(unsigned long, zone_end)
+ __field(bool, sync)
__field(int, status)
),
TP_fast_assign(
+ __entry->zone_start = zone_start;
+ __entry->migrate_pfn = migrate_pfn;
+ __entry->free_pfn = free_pfn;
+ __entry->zone_end = zone_end;
+ __entry->sync = sync;
__entry->status = status;
),
- TP_printk("status=%d", __entry->status)
+ TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s status=%s",
+ __entry->zone_start,
+ __entry->migrate_pfn,
+ __entry->free_pfn,
+ __entry->zone_end,
+ __entry->sync ? "sync" : "async",
+ compaction_status_string[__entry->status])
+);
+
+TRACE_EVENT(mm_compaction_try_to_compact_pages,
+
+ TP_PROTO(
+ int order,
+ gfp_t gfp_mask,
+ enum migrate_mode mode),
+
+ TP_ARGS(order, gfp_mask, mode),
+
+ TP_STRUCT__entry(
+ __field(int, order)
+ __field(gfp_t, gfp_mask)
+ __field(enum migrate_mode, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->order = order;
+ __entry->gfp_mask = gfp_mask;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("order=%d gfp_mask=0x%x mode=%d",
+ __entry->order,
+ __entry->gfp_mask,
+ (int)__entry->mode)
+);
+
+DECLARE_EVENT_CLASS(mm_compaction_suitable_template,
+
+ TP_PROTO(struct zone *zone,
+ int order,
+ int ret),
+
+ TP_ARGS(zone, order, ret),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(char *, name)
+ __field(int, order)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = zone_to_nid(zone);
+ __entry->name = (char *)zone->name;
+ __entry->order = order;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("node=%d zone=%-8s order=%d ret=%s",
+ __entry->nid,
+ __entry->name,
+ __entry->order,
+ compaction_status_string[__entry->ret])
+);
+
+DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_finished,
+
+ TP_PROTO(struct zone *zone,
+ int order,
+ int ret),
+
+ TP_ARGS(zone, order, ret)
+);
+
+DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable,
+
+ TP_PROTO(struct zone *zone,
+ int order,
+ int ret),
+
+ TP_ARGS(zone, order, ret)
+);
+
+#ifdef CONFIG_COMPACTION
+DECLARE_EVENT_CLASS(mm_compaction_defer_template,
+
+ TP_PROTO(struct zone *zone, int order),
+
+ TP_ARGS(zone, order),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(char *, name)
+ __field(int, order)
+ __field(unsigned int, considered)
+ __field(unsigned int, defer_shift)
+ __field(int, order_failed)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = zone_to_nid(zone);
+ __entry->name = (char *)zone->name;
+ __entry->order = order;
+ __entry->considered = zone->compact_considered;
+ __entry->defer_shift = zone->compact_defer_shift;
+ __entry->order_failed = zone->compact_order_failed;
+ ),
+
+ TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu",
+ __entry->nid,
+ __entry->name,
+ __entry->order,
+ __entry->order_failed,
+ __entry->considered,
+ 1UL << __entry->defer_shift)
+);
+
+DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_deferred,
+
+ TP_PROTO(struct zone *zone, int order),
+
+ TP_ARGS(zone, order)
+);
+
+DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_compaction,
+
+ TP_PROTO(struct zone *zone, int order),
+
+ TP_ARGS(zone, order)
+);
+
+DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
+
+ TP_PROTO(struct zone *zone, int order),
+
+ TP_ARGS(zone, order)
);
+#endif
#endif /* _TRACE_COMPACTION_H */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index bbc4de9baef7..5422dbfaf97d 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -72,6 +72,7 @@
#define show_cpreason(type) \
__print_symbolic(type, \
{ CP_UMOUNT, "Umount" }, \
+ { CP_FASTBOOT, "Fastboot" }, \
{ CP_SYNC, "Sync" }, \
{ CP_DISCARD, "Discard" })
@@ -148,14 +149,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
TRACE_EVENT(f2fs_sync_file_exit,
- TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret),
+ TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret),
TP_ARGS(inode, need_cp, datasync, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(bool, need_cp)
+ __field(int, need_cp)
__field(int, datasync)
__field(int, ret)
),
@@ -190,7 +191,7 @@ TRACE_EVENT(f2fs_sync_fs,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->dirty = F2FS_SB(sb)->s_dirty;
+ __entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY);
__entry->wait = wait;
),
@@ -440,38 +441,6 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
__entry->err)
);
-TRACE_EVENT_CONDITION(f2fs_submit_page_bio,
-
- TP_PROTO(struct page *page, sector_t blkaddr, int type),
-
- TP_ARGS(page, blkaddr, type),
-
- TP_CONDITION(page->mapping),
-
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(pgoff_t, index)
- __field(sector_t, blkaddr)
- __field(int, type)
- ),
-
- TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->index = page->index;
- __entry->blkaddr = blkaddr;
- __entry->type = type;
- ),
-
- TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "blkaddr = 0x%llx, bio_type = %s%s",
- show_dev_ino(__entry),
- (unsigned long)__entry->index,
- (unsigned long long)__entry->blkaddr,
- show_bio_type(__entry->type))
-);
-
TRACE_EVENT(f2fs_get_data_block,
TP_PROTO(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int ret),
@@ -680,11 +649,63 @@ TRACE_EVENT(f2fs_reserve_new_block,
__entry->ofs_in_node)
);
+DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
+
+ TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+
+ TP_ARGS(page, fio),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(block_t, blkaddr)
+ __field(int, rw)
+ __field(int, type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = page->mapping->host->i_sb->s_dev;
+ __entry->ino = page->mapping->host->i_ino;
+ __entry->index = page->index;
+ __entry->blkaddr = fio->blk_addr;
+ __entry->rw = fio->rw;
+ __entry->type = fio->type;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
+ "blkaddr = 0x%llx, rw = %s%s, type = %s",
+ show_dev_ino(__entry),
+ (unsigned long)__entry->index,
+ (unsigned long long)__entry->blkaddr,
+ show_bio_type(__entry->rw),
+ show_block_type(__entry->type))
+);
+
+DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
+
+ TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+
+ TP_ARGS(page, fio),
+
+ TP_CONDITION(page->mapping)
+);
+
+DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio,
+
+ TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+
+ TP_ARGS(page, fio),
+
+ TP_CONDITION(page->mapping)
+);
+
DECLARE_EVENT_CLASS(f2fs__submit_bio,
- TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
+ TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+ struct bio *bio),
- TP_ARGS(sb, rw, type, bio),
+ TP_ARGS(sb, fio, bio),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -696,8 +717,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->rw = rw;
- __entry->type = type;
+ __entry->rw = fio->rw;
+ __entry->type = fio->type;
__entry->sector = bio->bi_iter.bi_sector;
__entry->size = bio->bi_iter.bi_size;
),
@@ -712,18 +733,20 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio,
- TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
+ TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+ struct bio *bio),
- TP_ARGS(sb, rw, type, bio),
+ TP_ARGS(sb, fio, bio),
TP_CONDITION(bio)
);
DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio,
- TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
+ TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+ struct bio *bio),
- TP_ARGS(sb, rw, type, bio),
+ TP_ARGS(sb, fio, bio),
TP_CONDITION(bio)
);
@@ -916,38 +939,6 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_sync)
);
-TRACE_EVENT(f2fs_submit_page_mbio,
-
- TP_PROTO(struct page *page, int rw, int type, block_t blk_addr),
-
- TP_ARGS(page, rw, type, blk_addr),
-
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(int, rw)
- __field(int, type)
- __field(pgoff_t, index)
- __field(block_t, block)
- ),
-
- TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->rw = rw;
- __entry->type = type;
- __entry->index = page->index;
- __entry->block = blk_addr;
- ),
-
- TP_printk("dev = (%d,%d), ino = %lu, %s%s, %s, index = %lu, blkaddr = 0x%llx",
- show_dev_ino(__entry),
- show_bio_type(__entry->rw),
- show_block_type(__entry->type),
- (unsigned long)__entry->index,
- (unsigned long long)__entry->block)
-);
-
TRACE_EVENT(f2fs_write_checkpoint,
TP_PROTO(struct super_block *sb, int reason, char *msg),
@@ -998,14 +989,15 @@ TRACE_EVENT(f2fs_issue_discard,
TRACE_EVENT(f2fs_issue_flush,
- TP_PROTO(struct super_block *sb, bool nobarrier, bool flush_merge),
+ TP_PROTO(struct super_block *sb, unsigned int nobarrier,
+ unsigned int flush_merge),
TP_ARGS(sb, nobarrier, flush_merge),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(bool, nobarrier)
- __field(bool, flush_merge)
+ __field(unsigned int, nobarrier)
+ __field(unsigned int, flush_merge)
),
TP_fast_assign(
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index a8f5c32d174b..2c7befb10f13 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
TP_ARGS(dev)
);
-DECLARE_EVENT_CLASS(iommu_map_unmap,
+TRACE_EVENT(map,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
TP_STRUCT__entry(
__field(u64, iova)
__field(u64, paddr)
- __field(int, size)
+ __field(size_t, size)
),
TP_fast_assign(
@@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
__entry->size = size;
),
- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x",
+ TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
__entry->iova, __entry->paddr, __entry->size
)
);
-DEFINE_EVENT(iommu_map_unmap, map,
+TRACE_EVENT(unmap,
- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
-
- TP_ARGS(iova, paddr, size)
-);
+ TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
-DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
+ TP_ARGS(iova, size, unmapped_size),
- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+ TP_STRUCT__entry(
+ __field(u64, iova)
+ __field(size_t, size)
+ __field(size_t, unmapped_size)
+ ),
- TP_ARGS(iova, paddr, size),
+ TP_fast_assign(
+ __entry->iova = iova;
+ __entry->size = size;
+ __entry->unmapped_size = unmapped_size;
+ ),
- TP_printk("IOMMU: iova=0x%016llx size=0x%x",
- __entry->iova, __entry->size
+ TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
+ __entry->iova, __entry->size, __entry->unmapped_size
)
);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index aece1346ceb7..4ad10baecd4d 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -268,11 +268,11 @@ TRACE_EVENT(mm_page_alloc_extfrag,
TP_PROTO(struct page *page,
int alloc_order, int fallback_order,
- int alloc_migratetype, int fallback_migratetype, int new_migratetype),
+ int alloc_migratetype, int fallback_migratetype),
TP_ARGS(page,
alloc_order, fallback_order,
- alloc_migratetype, fallback_migratetype, new_migratetype),
+ alloc_migratetype, fallback_migratetype),
TP_STRUCT__entry(
__field( struct page *, page )
@@ -289,7 +289,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->fallback_order = fallback_order;
__entry->alloc_migratetype = alloc_migratetype;
__entry->fallback_migratetype = fallback_migratetype;
- __entry->change_ownership = (new_migratetype == alloc_migratetype);
+ __entry->change_ownership = (alloc_migratetype ==
+ get_pageblock_migratetype(page));
),
TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6edf1f2028cd..a44062da684b 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -37,6 +37,25 @@ TRACE_EVENT(kvm_userspace_exit,
__entry->errno < 0 ? -__entry->errno : __entry->reason)
);
+TRACE_EVENT(kvm_vcpu_wakeup,
+ TP_PROTO(__u64 ns, bool waited),
+ TP_ARGS(ns, waited),
+
+ TP_STRUCT__entry(
+ __field( __u64, ns )
+ __field( bool, waited )
+ ),
+
+ TP_fast_assign(
+ __entry->ns = ns;
+ __entry->waited = waited;
+ ),
+
+ TP_printk("%s time %lld ns",
+ __entry->waited ? "wait" : "poll",
+ __entry->ns)
+);
+
#if defined(CONFIG_HAVE_KVM_IRQFD)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
@@ -146,6 +165,14 @@ TRACE_EVENT(kvm_msi_set_irq,
#if defined(CONFIG_HAVE_KVM_IRQFD)
+#ifdef kvm_irqchips
+#define kvm_ack_irq_string "irqchip %s pin %u"
+#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
+#else
+#define kvm_ack_irq_string "irqchip %d pin %u"
+#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
+#endif
+
TRACE_EVENT(kvm_ack_irq,
TP_PROTO(unsigned int irqchip, unsigned int pin),
TP_ARGS(irqchip, pin),
@@ -160,13 +187,7 @@ TRACE_EVENT(kvm_ack_irq,
__entry->pin = pin;
),
-#ifdef kvm_irqchips
- TP_printk("irqchip %s pin %u",
- __print_symbolic(__entry->irqchip, kvm_irqchips),
- __entry->pin)
-#else
- TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin)
-#endif
+ TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
);
#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 1de256b35807..49cc7c3de252 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -40,9 +40,9 @@ TRACE_EVENT(net_dev_start_xmit,
__assign_str(name, dev->name);
__entry->queue_mapping = skb->queue_mapping;
__entry->skbaddr = skb;
- __entry->vlan_tagged = vlan_tx_tag_present(skb);
+ __entry->vlan_tagged = skb_vlan_tag_present(skb);
__entry->vlan_proto = ntohs(skb->vlan_proto);
- __entry->vlan_tci = vlan_tx_tag_get(skb);
+ __entry->vlan_tci = skb_vlan_tag_get(skb);
__entry->protocol = ntohs(skb->protocol);
__entry->ip_summed = skb->ip_summed;
__entry->len = skb->len;
@@ -174,9 +174,9 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
#endif
__entry->queue_mapping = skb->queue_mapping;
__entry->skbaddr = skb;
- __entry->vlan_tagged = vlan_tx_tag_present(skb);
+ __entry->vlan_tagged = skb_vlan_tag_present(skb);
__entry->vlan_proto = ntohs(skb->vlan_proto);
- __entry->vlan_tci = vlan_tx_tag_get(skb);
+ __entry->vlan_tci = skb_vlan_tag_get(skb);
__entry->protocol = ntohs(skb->protocol);
__entry->ip_summed = skb->ip_summed;
__entry->hash = skb->hash;
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 13391d288107..0e7635765153 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -13,11 +13,13 @@
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" }
-TRACE_EVENT(tlb_flush,
+TRACE_EVENT_CONDITION(tlb_flush,
TP_PROTO(int reason, unsigned long pages),
TP_ARGS(reason, pages),
+ TP_CONDITION(cpu_online(smp_processor_id())),
+
TP_STRUCT__entry(
__field( int, reason)
__field(unsigned long, pages)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index cee02d65ab3f..0e9310905413 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -47,7 +47,7 @@ TRACE_EVENT(writeback_dirty_page,
TP_fast_assign(
strncpy(__entry->name,
- mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
+ mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
),
TP_fast_assign(
- struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
strncpy(__entry->name,
@@ -116,7 +116,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_fast_assign(
strncpy(__entry->name,
- dev_name(inode->i_mapping->backing_dev_info->dev), 32);
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
),
@@ -156,10 +156,8 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, reason)
),
TP_fast_assign(
- struct device *dev = bdi->dev;
- if (!dev)
- dev = default_backing_dev_info.dev;
- strncpy(__entry->name, dev_name(dev), 32);
+ strncpy(__entry->name,
+ bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 139b5067345b..41bf65f04dd9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -263,6 +263,14 @@
#undef __print_hex
#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
+#undef __print_array
+#define __print_array(array, count, el_size) \
+ ({ \
+ BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
+ el_size != 4 && el_size != 8); \
+ ftrace_print_array_seq(p, array, count, el_size); \
+ })
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
@@ -674,6 +682,7 @@ static inline void ftrace_test_probe_##call(void) \
#undef __get_dynamic_array_len
#undef __get_str
#undef __get_bitmask
+#undef __print_array
#undef TP_printk
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
@@ -763,7 +772,7 @@ perf_trace_##call(void *__data, proto) \
struct ftrace_event_call *event_call = __data; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_raw_##call *entry; \
- struct pt_regs __regs; \
+ struct pt_regs *__regs; \
u64 __addr = 0, __count = 1; \
struct task_struct *__task = NULL; \
struct hlist_head *head; \
@@ -782,18 +791,19 @@ perf_trace_##call(void *__data, proto) \
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
- perf_fetch_caller_regs(&__regs); \
entry = perf_trace_buf_prepare(__entry_size, \
event_call->event.type, &__regs, &rctx); \
if (!entry) \
return; \
\
+ perf_fetch_caller_regs(__regs); \
+ \
tstruct \
\
{ assign; } \
\
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, &__regs, head, __task); \
+ __count, __regs, head, __task); \
}
/*
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index b0b855613641..01b2d6d0e355 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -654,6 +654,13 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+/**
+ * DRM_CLIENT_CAP_ATOMIC
+ *
+ * If set to 1, the DRM core will expose atomic properties to userspace
+ */
+#define DRM_CLIENT_CAP_ATOMIC 3
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -777,6 +784,7 @@ struct drm_prime_handle {
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
+#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
/**
* Device specific ioctls should only be in their respective headers
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 646ae5f39f42..a284f11a8ef5 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -109,9 +109,6 @@
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
-/* special NV12 tiled format */
-#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
-
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 86574b0005ff..ca788e01dab2 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -272,6 +272,13 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+/* the PROP_ATOMIC flag is used to hide properties from userspace that
+ * is not aware of atomic properties. This is mostly to work around
+ * older userspace (DDX drivers) that read/write each prop they find,
+ * witout being aware that this could be triggering a lengthy modeset.
+ */
+#define DRM_MODE_PROP_ATOMIC 0x80000000
+
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
@@ -338,7 +345,7 @@ struct drm_mode_fb_cmd2 {
/*
* In case of planar formats, this ioctl allows up to 4
- * buffer objects with offets and pitches per plane.
+ * buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
@@ -346,9 +353,9 @@ struct drm_mode_fb_cmd2 {
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
- * So it would consist of Y as offset[0] and UV as
- * offeset[1]. Note that offset[0] will generally
- * be 0.
+ * So it would consist of Y as offsets[0] and UV as
+ * offsets[1]. Note that offsets[0] will generally
+ * be 0 (but this is not required).
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
@@ -519,4 +526,27 @@ struct drm_mode_destroy_dumb {
uint32_t handle;
};
+/* page-flip flags are valid, plus: */
+#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+
+#define DRM_MODE_ATOMIC_FLAGS (\
+ DRM_MODE_PAGE_FLIP_EVENT |\
+ DRM_MODE_PAGE_FLIP_ASYNC |\
+ DRM_MODE_ATOMIC_TEST_ONLY |\
+ DRM_MODE_ATOMIC_NONBLOCK |\
+ DRM_MODE_ATOMIC_ALLOW_MODESET)
+
+struct drm_mode_atomic {
+ __u32 flags;
+ __u32 count_objs;
+ __u64 objs_ptr;
+ __u64 count_props_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u64 reserved;
+ __u64 user_data;
+};
+
#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 250262265ee3..6eed16b92a24 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -224,6 +224,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
+#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
+#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -275,6 +277,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -341,6 +345,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
+#define I915_PARAM_MMAP_VERSION 30
+#define I915_PARAM_HAS_BSD2 31
typedef struct drm_i915_getparam {
int param;
@@ -488,6 +494,14 @@ struct drm_i915_gem_mmap {
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * Added in version 2.
+ */
+ __u64 flags;
+#define I915_MMAP_WC 0x1
};
struct drm_i915_gem_mmap_gtt {
@@ -737,7 +751,13 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_HANDLE_LUT (1<<12)
-#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
+/** Used for switching BSD rings on the platforms with two BSD rings */
+#define I915_EXEC_BSD_MASK (3<<13)
+#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
+#define I915_EXEC_BSD_RING1 (1<<13)
+#define I915_EXEC_BSD_RING2 (2<<13)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1073,4 +1093,12 @@ struct drm_i915_gem_userptr {
__u32 handle;
};
+struct drm_i915_gem_context_param {
+ __u32 ctx_id;
+ __u32 size;
+ __u64 param;
+#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+ __u64 value;
+};
+
#endif /* _UAPI_I915_DRM_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 00b100023c47..7b8141bf59a7 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -35,6 +35,7 @@ header-y += adfs_fs.h
header-y += affs_hardblocks.h
header-y += agpgart.h
header-y += aio_abi.h
+header-y += am437x-vpfe.h
header-y += apm_bios.h
header-y += arcfb.h
header-y += atalk.h
@@ -283,6 +284,7 @@ header-y += net.h
header-y += netlink_diag.h
header-y += netlink.h
header-y += netrom.h
+header-y += net_namespace.h
header-y += net_tstamp.h
header-y += nfc.h
header-y += nfs2.h
diff --git a/include/uapi/linux/am437x-vpfe.h b/include/uapi/linux/am437x-vpfe.h
new file mode 100644
index 000000000000..9b03033f9cd6
--- /dev/null
+++ b/include/uapi/linux/am437x-vpfe.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef AM437X_VPFE_USER_H
+#define AM437X_VPFE_USER_H
+
+enum vpfe_ccdc_data_size {
+ VPFE_CCDC_DATA_16BITS = 0,
+ VPFE_CCDC_DATA_15BITS,
+ VPFE_CCDC_DATA_14BITS,
+ VPFE_CCDC_DATA_13BITS,
+ VPFE_CCDC_DATA_12BITS,
+ VPFE_CCDC_DATA_11BITS,
+ VPFE_CCDC_DATA_10BITS,
+ VPFE_CCDC_DATA_8BITS,
+};
+
+/* enum for No of pixel per line to be avg. in Black Clamping*/
+enum vpfe_ccdc_sample_length {
+ VPFE_CCDC_SAMPLE_1PIXELS = 0,
+ VPFE_CCDC_SAMPLE_2PIXELS,
+ VPFE_CCDC_SAMPLE_4PIXELS,
+ VPFE_CCDC_SAMPLE_8PIXELS,
+ VPFE_CCDC_SAMPLE_16PIXELS,
+};
+
+/* enum for No of lines in Black Clamping */
+enum vpfe_ccdc_sample_line {
+ VPFE_CCDC_SAMPLE_1LINES = 0,
+ VPFE_CCDC_SAMPLE_2LINES,
+ VPFE_CCDC_SAMPLE_4LINES,
+ VPFE_CCDC_SAMPLE_8LINES,
+ VPFE_CCDC_SAMPLE_16LINES,
+};
+
+/* enum for Alaw gamma width */
+enum vpfe_ccdc_gamma_width {
+ VPFE_CCDC_GAMMA_BITS_15_6 = 0, /* use bits 15-6 for gamma */
+ VPFE_CCDC_GAMMA_BITS_14_5,
+ VPFE_CCDC_GAMMA_BITS_13_4,
+ VPFE_CCDC_GAMMA_BITS_12_3,
+ VPFE_CCDC_GAMMA_BITS_11_2,
+ VPFE_CCDC_GAMMA_BITS_10_1,
+ VPFE_CCDC_GAMMA_BITS_09_0, /* use bits 9-0 for gamma */
+};
+
+/* structure for ALaw */
+struct vpfe_ccdc_a_law {
+ /* Enable/disable A-Law */
+ unsigned char enable;
+ /* Gamma Width Input */
+ enum vpfe_ccdc_gamma_width gamma_wd;
+};
+
+/* structure for Black Clamping */
+struct vpfe_ccdc_black_clamp {
+ unsigned char enable;
+ /* only if bClampEnable is TRUE */
+ enum vpfe_ccdc_sample_length sample_pixel;
+ /* only if bClampEnable is TRUE */
+ enum vpfe_ccdc_sample_line sample_ln;
+ /* only if bClampEnable is TRUE */
+ unsigned short start_pixel;
+ /* only if bClampEnable is TRUE */
+ unsigned short sgain;
+ /* only if bClampEnable is FALSE */
+ unsigned short dc_sub;
+};
+
+/* structure for Black Level Compensation */
+struct vpfe_ccdc_black_compensation {
+ /* Constant value to subtract from Red component */
+ char r;
+ /* Constant value to subtract from Gr component */
+ char gr;
+ /* Constant value to subtract from Blue component */
+ char b;
+ /* Constant value to subtract from Gb component */
+ char gb;
+};
+
+/* Structure for CCDC configuration parameters for raw capture mode passed
+ * by application
+ */
+struct vpfe_ccdc_config_params_raw {
+ /* data size value from 8 to 16 bits */
+ enum vpfe_ccdc_data_size data_sz;
+ /* Structure for Optional A-Law */
+ struct vpfe_ccdc_a_law alaw;
+ /* Structure for Optical Black Clamp */
+ struct vpfe_ccdc_black_clamp blk_clamp;
+ /* Structure for Black Compensation */
+ struct vpfe_ccdc_black_compensation blk_comp;
+};
+
+/*
+ * Private IOCTL
+ * VIDIOC_AM437X_CCDC_CFG - Set CCDC configuration for raw capture
+ * This is an experimental ioctl that will change in future kernels. So use
+ * this ioctl with care !
+ **/
+#define VIDIOC_AM437X_CCDC_CFG \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 1, void *)
+
+#endif /* AM437X_VPFE_USER_H */
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 3e4323a3918d..94ffe0c83ce7 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -98,6 +98,7 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_BERR_REPORTING 0x10 /* Bus-error reporting */
#define CAN_CTRLMODE_FD 0x20 /* CAN FD mode */
#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
+#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
/*
* CAN device statistics
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index a570d7b5796c..889f3a5b7b18 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 29
+#define DM_VERSION_MINOR 30
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2014-10-28)"
+#define DM_VERSION_EXTRA "-ioctl (2014-12-22)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 5f66d9c2889d..2e49fc880d29 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -139,6 +139,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
#define ETHTOOL_FWVERS_LEN 32
#define ETHTOOL_BUSINFO_LEN 32
+#define ETHTOOL_EROMVERS_LEN 32
/**
* struct ethtool_drvinfo - general driver and device information
@@ -148,6 +149,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
* not be an empty string.
* @version: Driver version string; may be an empty string
* @fw_version: Firmware version string; may be an empty string
+ * @erom_version: Expansion ROM version string; may be an empty string
* @bus_info: Device bus address. This should match the dev_name()
* string for the underlying bus device, if there is one. May be
* an empty string.
@@ -176,7 +178,7 @@ struct ethtool_drvinfo {
char version[32];
char fw_version[ETHTOOL_FWVERS_LEN];
char bus_info[ETHTOOL_BUSINFO_LEN];
- char reserved1[32];
+ char erom_version[ETHTOOL_EROMVERS_LEN];
char reserved2[12];
__u32 n_priv_flags;
__u32 n_stats;
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index b03ee8f62d3c..eaaea6208b42 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -125,6 +125,8 @@ enum {
#define BRIDGE_VLAN_INFO_MASTER (1<<0) /* Operate on Bridge device as well */
#define BRIDGE_VLAN_INFO_PVID (1<<1) /* VLAN is PVID, ingress untagged */
#define BRIDGE_VLAN_INFO_UNTAGGED (1<<2) /* VLAN egresses untagged */
+#define BRIDGE_VLAN_INFO_RANGE_BEGIN (1<<3) /* VLAN is start of vlan range */
+#define BRIDGE_VLAN_INFO_RANGE_END (1<<4) /* VLAN is end of vlan range */
struct bridge_vlan_info {
__u16 flags;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index f7d0d2d7173a..0deee3eeddbf 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -146,6 +146,7 @@ enum {
IFLA_PHYS_PORT_ID,
IFLA_CARRIER_CHANGES,
IFLA_PHYS_SWITCH_ID,
+ IFLA_LINK_NETNSID,
__IFLA_MAX
};
@@ -370,6 +371,9 @@ enum {
IFLA_VXLAN_UDP_CSUM,
IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+ IFLA_VXLAN_REMCSUM_TX,
+ IFLA_VXLAN_REMCSUM_RX,
+ IFLA_VXLAN_GBP,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index c33a65e3d62c..589ced069e8a 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -109,6 +109,7 @@ struct in_addr {
#define IP_MINTTL 21
#define IP_NODEFRAG 22
+#define IP_CHECKSUM 23
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index a1d7e931ab72..b0a813079852 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -166,6 +166,7 @@ struct input_keymap_entry {
#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */
#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */
+#define INPUT_PROP_ACCELEROMETER 0x06 /* has accelerometer */
#define INPUT_PROP_MAX 0x1f
#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index e863d088b9a5..437a6a4b125a 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -1,6 +1,7 @@
#ifndef _UAPI_IPV6_H
#define _UAPI_IPV6_H
+#include <linux/libc-compat.h>
#include <linux/types.h>
#include <linux/in6.h>
#include <asm/byteorder.h>
@@ -15,16 +16,19 @@
* *under construction*
*/
-
+#if __UAPI_DEF_IN6_PKTINFO
struct in6_pktinfo {
struct in6_addr ipi6_addr;
int ipi6_ifindex;
};
+#endif
+#if __UAPI_DEF_IP6_MTUINFO
struct ip6_mtuinfo {
struct sockaddr_in6 ip6m_addr;
__u32 ip6m_mtu;
};
+#endif
struct in6_ifreq {
struct in6_addr ifr6_addr;
@@ -165,6 +169,7 @@ enum {
DEVCONF_SUPPRESS_FRAG_NDISC,
DEVCONF_ACCEPT_RA_FROM_LOCAL,
DEVCONF_USE_OPTIMISTIC,
+ DEVCONF_ACCEPT_RA_MTU,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 2f96d233c980..a6c4962e5d46 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -32,6 +32,7 @@
#define KPF_KSM 21
#define KPF_THP 22
#define KPF_BALLOON 23
+#define KPF_ZERO_PAGE 24
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a37fd1224f36..805570650062 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -491,6 +491,11 @@ struct kvm_s390_emerg_info {
__u16 code;
};
+#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
+struct kvm_s390_stop_info {
+ __u32 flags;
+};
+
struct kvm_s390_mchk_info {
__u64 cr14;
__u64 mcic;
@@ -509,6 +514,7 @@ struct kvm_s390_irq {
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk;
char reserved[64];
} u;
@@ -753,6 +759,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_FIXUP_HCALL 103
#define KVM_CAP_PPC_ENABLE_HCALL 104
#define KVM_CAP_CHECK_EXTENSION_VM 105
+#define KVM_CAP_S390_USER_SIGP 106
#ifdef KVM_CAP_IRQ_ROUTING
@@ -952,6 +959,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2
KVM_DEV_TYPE_FLIC,
#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
+ KVM_DEV_TYPE_ARM_VGIC_V3,
+#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
KVM_DEV_TYPE_MAX,
};
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 21caa2631c20..347ef22a964e 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -178,5 +178,6 @@ enum l2tp_seqmode {
*/
#define L2TP_GENL_NAME "l2tp"
#define L2TP_GENL_VERSION 0x1
+#define L2TP_GENL_MCGROUP "l2tp"
#endif /* _UAPI_LINUX_L2TP_H_ */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index e28807ad17fa..fa673e9cc040 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -70,6 +70,8 @@
#define __UAPI_DEF_IPV6_MREQ 0
#define __UAPI_DEF_IPPROTO_V6 0
#define __UAPI_DEF_IPV6_OPTIONS 0
+#define __UAPI_DEF_IN6_PKTINFO 0
+#define __UAPI_DEF_IP6_MTUINFO 0
#else
@@ -84,6 +86,8 @@
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
#define __UAPI_DEF_IPV6_OPTIONS 1
+#define __UAPI_DEF_IN6_PKTINFO 1
+#define __UAPI_DEF_IP6_MTUINFO 1
#endif /* _NETINET_IN_H */
@@ -106,6 +110,8 @@
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
#define __UAPI_DEF_IPV6_OPTIONS 1
+#define __UAPI_DEF_IN6_PKTINFO 1
+#define __UAPI_DEF_IP6_MTUINFO 1
/* Definitions for xattr.h */
#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 0d11c3dcd3a1..9cd8b21dddbe 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -67,7 +67,7 @@ enum mpol_rebind_step {
#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
-#define MPOL_F_MORON (1 << 4) /* Migrate On pte_numa Reference On Node */
+#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
#endif /* _UAPI_LINUX_MEMPOLICY_H */
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index e284ff919d6e..e956704f5fb1 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -134,7 +134,7 @@ struct fat_boot_sector {
__u8 vol_id[4]; /* volume ID */
__u8 vol_label[11]; /* volume label */
__u8 fs_type[8]; /* file system type */
- /* other fiealds are not added here */
+ /* other fields are not added here */
} fat16;
struct {
@@ -157,7 +157,7 @@ struct fat_boot_sector {
__u8 vol_id[4]; /* volume ID */
__u8 vol_label[11]; /* volume label */
__u8 fs_type[8]; /* file system type */
- /* other fiealds are not added here */
+ /* other fields are not added here */
} fat32;
};
};
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index f3d77f9f1e0b..3873a35509aa 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -25,6 +25,7 @@ enum {
NDA_VNI,
NDA_IFINDEX,
NDA_MASTER,
+ NDA_LINK_NETNSID,
__NDA_MAX
};
diff --git a/include/uapi/linux/net_namespace.h b/include/uapi/linux/net_namespace.h
new file mode 100644
index 000000000000..778cd2c3ebf4
--- /dev/null
+++ b/include/uapi/linux/net_namespace.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2015 6WIND S.A.
+ * Author: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+#ifndef _UAPI_LINUX_NET_NAMESPACE_H_
+#define _UAPI_LINUX_NET_NAMESPACE_H_
+
+/* Attributes of RTM_NEWNSID/RTM_GETNSID messages */
+enum {
+ NETNSA_NONE,
+#define NETNSA_NSID_NOT_ASSIGNED -1
+ NETNSA_NSID,
+ NETNSA_PID,
+ NETNSA_FD,
+ __NETNSA_MAX,
+};
+
+#define NETNSA_MAX (__NETNSA_MAX - 1)
+
+#endif /* _UAPI_LINUX_NET_NAMESPACE_H_ */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index edbc888ceb51..6d1abea9746e 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -24,8 +24,9 @@ enum {
SOF_TIMESTAMPING_TX_SCHED = (1<<8),
SOF_TIMESTAMPING_TX_ACK = (1<<9),
SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
+ SOF_TIMESTAMPING_OPT_TSONLY = (1<<11),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_CMSG,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TSONLY,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 8119255feae4..c1e2e63cf9b5 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -183,6 +183,7 @@ enum nfc_attrs {
NFC_ATTR_SE_APDU,
NFC_ATTR_TARGET_ISO15693_DSFID,
NFC_ATTR_TARGET_ISO15693_UID,
+ NFC_ATTR_SE_PARAMS,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
index 1fdc95bb2375..0bf130a1c58d 100644
--- a/include/uapi/linux/nfsd/debug.h
+++ b/include/uapi/linux/nfsd/debug.h
@@ -32,6 +32,7 @@
#define NFSDDBG_REPCACHE 0x0080
#define NFSDDBG_XDR 0x0100
#define NFSDDBG_LOCKD 0x0200
+#define NFSDDBG_PNFS 0x0400
#define NFSDDBG_ALL 0x7FFF
#define NFSDDBG_NOCHANGE 0xFFFF
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index 584b6ef3a5e8..4742f2cb42f2 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -47,8 +47,10 @@
* exported filesystem.
*/
#define NFSEXP_V4ROOT 0x10000
+#define NFSEXP_NOPNFS 0x20000
+
/* All flags that we claim to support. (Note we don't support NOACL.) */
-#define NFSEXP_ALLFLAGS 0x1FE7F
+#define NFSEXP_ALLFLAGS 0x3FE7F
/* The flags that may vary depending on security flavor: */
#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index b37bd5a1cb82..68b294e83944 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -29,6 +29,13 @@
#define NL80211_GENL_NAME "nl80211"
+#define NL80211_MULTICAST_GROUP_CONFIG "config"
+#define NL80211_MULTICAST_GROUP_SCAN "scan"
+#define NL80211_MULTICAST_GROUP_REG "regulatory"
+#define NL80211_MULTICAST_GROUP_MLME "mlme"
+#define NL80211_MULTICAST_GROUP_VENDOR "vendor"
+#define NL80211_MULTICAST_GROUP_TESTMODE "testmode"
+
/**
* DOC: Station handling
*
@@ -173,8 +180,8 @@
* %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME.
*
* @NL80211_CMD_GET_INTERFACE: Request an interface's configuration;
- * either a dump request on a %NL80211_ATTR_WIPHY or a specific get
- * on an %NL80211_ATTR_IFINDEX is supported.
+ * either a dump request for all interfaces or a specific get with a
+ * single %NL80211_ATTR_IFINDEX is supported.
* @NL80211_CMD_SET_INTERFACE: Set type of a virtual interface, requires
* %NL80211_ATTR_IFINDEX and %NL80211_ATTR_IFTYPE.
* @NL80211_CMD_NEW_INTERFACE: Newly created virtual interface or response
@@ -252,7 +259,18 @@
* %NL80211_ATTR_IFINDEX.
*
* @NL80211_CMD_GET_REG: ask the wireless core to send us its currently set
- * regulatory domain.
+ * regulatory domain. If %NL80211_ATTR_WIPHY is specified and the device
+ * has a private regulatory domain, it will be returned. Otherwise, the
+ * global regdomain will be returned.
+ * A device will have a private regulatory domain if it uses the
+ * regulatory_hint() API. Even when a private regdomain is used the channel
+ * information will still be mended according to further hints from
+ * the regulatory core to help with compliance. A dump version of this API
+ * is now available which will returns the global regdomain as well as
+ * all private regdomains of present wiphys (for those that have it).
+ * If a wiphy is self-managed (%NL80211_ATTR_WIPHY_SELF_MANAGED_REG), then
+ * its private regdomain is the only valid one for it. The regulatory
+ * core is not used to help with compliance in this case.
* @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command
* after being queried by the kernel. CRDA replies by sending a regulatory
* domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our
@@ -306,7 +324,9 @@
* if passed, define which channels should be scanned; if not
* passed, all channels allowed for the current regulatory domain
* are used. Extra IEs can also be passed from the userspace by
- * using the %NL80211_ATTR_IE attribute.
+ * using the %NL80211_ATTR_IE attribute. The first cycle of the
+ * scheduled scan can be delayed by %NL80211_ATTR_SCHED_SCAN_DELAY
+ * is supplied.
* @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if
* scheduled scan is not running. The caller may assume that as soon
* as the call returns, it is safe to start a new scheduled scan again.
@@ -774,6 +794,10 @@
* peer given by %NL80211_ATTR_MAC. Both peers must be on the base channel
* when this command completes.
*
+ * @NL80211_CMD_WIPHY_REG_CHANGE: Similar to %NL80211_CMD_REG_CHANGE, but used
+ * as an event to indicate changes for devices with wiphy-specific regdom
+ * management.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -958,6 +982,8 @@ enum nl80211_commands {
NL80211_CMD_TDLS_CHANNEL_SWITCH,
NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH,
+ NL80211_CMD_WIPHY_REG_CHANGE,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1655,6 +1681,9 @@ enum nl80211_commands {
* @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface
* creation then the new interface will be owned by the netlink socket
* that created it and will be destroyed when the socket is closed.
+ * If set during scheduled scan start then the new scan req will be
+ * owned by the netlink socket that created it and the scheduled scan will
+ * be stopped when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1688,6 +1717,29 @@ enum nl80211_commands {
*
* @NL80211_ATTR_MAC_MASK: MAC address mask
*
+ * @NL80211_ATTR_WIPHY_SELF_MANAGED_REG: flag attribute indicating this device
+ * is self-managing its regulatory information and any regulatory domain
+ * obtained from it is coming from the device's wiphy and not the global
+ * cfg80211 regdomain.
+ *
+ * @NL80211_ATTR_EXT_FEATURES: extended feature flags contained in a byte
+ * array. The feature flags are identified by their bit index (see &enum
+ * nl80211_ext_feature_index). The bit index is ordered starting at the
+ * least-significant bit of the first byte in the array, ie. bit index 0
+ * is located at bit 0 of byte 0. bit index 25 would be located at bit 1
+ * of byte 3 (u8 array).
+ *
+ * @NL80211_ATTR_SURVEY_RADIO_STATS: Request overall radio statistics to be
+ * returned along with other survey data. If set, @NL80211_CMD_GET_SURVEY
+ * may return a survey entry without a channel indicating global radio
+ * statistics (only some values are valid and make sense.)
+ * For devices that don't return such an entry even then, the information
+ * should be contained in the result as the sum of the respective counters
+ * over all channels.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before a scheduled scan (or a
+ * WoWLAN net-detect scan) is started, u32 in seconds.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2045,6 +2097,16 @@ enum nl80211_attrs {
NL80211_ATTR_MAC_MASK,
+ NL80211_ATTR_WIPHY_SELF_MANAGED_REG,
+
+ NL80211_ATTR_EXT_FEATURES,
+
+ NL80211_ATTR_SURVEY_RADIO_STATS,
+
+ NL80211_ATTR_NETNS_FD,
+
+ NL80211_ATTR_SCHED_SCAN_DELAY,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2085,7 +2147,7 @@ enum nl80211_attrs {
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_HT_RATES 77
-#define NL80211_MAX_SUPP_REG_RULES 32
+#define NL80211_MAX_SUPP_REG_RULES 64
#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0
#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16
#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
@@ -2225,8 +2287,15 @@ struct nl80211_sta_flag_update {
* @NL80211_RATE_INFO_VHT_MCS: MCS index for VHT (u8)
* @NL80211_RATE_INFO_VHT_NSS: number of streams in VHT (u8)
* @NL80211_RATE_INFO_80_MHZ_WIDTH: 80 MHz VHT rate
- * @NL80211_RATE_INFO_80P80_MHZ_WIDTH: 80+80 MHz VHT rate
+ * @NL80211_RATE_INFO_80P80_MHZ_WIDTH: unused - 80+80 is treated the
+ * same as 160 for purposes of the bitrates
* @NL80211_RATE_INFO_160_MHZ_WIDTH: 160 MHz VHT rate
+ * @NL80211_RATE_INFO_10_MHZ_WIDTH: 10 MHz width - note that this is
+ * a legacy rate and will be reported as the actual bitrate, i.e.
+ * half the base (20 MHz) rate
+ * @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is
+ * a legacy rate and will be reported as the actual bitrate, i.e.
+ * a quarter of the base (20 MHz) rate
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -2241,6 +2310,8 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_80_MHZ_WIDTH,
NL80211_RATE_INFO_80P80_MHZ_WIDTH,
NL80211_RATE_INFO_160_MHZ_WIDTH,
+ NL80211_RATE_INFO_10_MHZ_WIDTH,
+ NL80211_RATE_INFO_5_MHZ_WIDTH,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -2285,18 +2356,24 @@ enum nl80211_sta_bss_param {
*
* @__NL80211_STA_INFO_INVALID: attribute number 0 is reserved
* @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs)
- * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station)
- * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
- * @NL80211_STA_INFO_RX_BYTES64: total received bytes (u64, from this station)
- * @NL80211_STA_INFO_TX_BYTES64: total transmitted bytes (u64, to this station)
+ * @NL80211_STA_INFO_RX_BYTES: total received bytes (MPDU length)
+ * (u32, from this station)
+ * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (MPDU length)
+ * (u32, to this station)
+ * @NL80211_STA_INFO_RX_BYTES64: total received bytes (MPDU length)
+ * (u64, from this station)
+ * @NL80211_STA_INFO_TX_BYTES64: total transmitted bytes (MPDU length)
+ * (u64, to this station)
* @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
* @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
* containing info as possible, see &enum nl80211_rate_info
- * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station)
- * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this
- * station)
- * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station)
- * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station)
+ * @NL80211_STA_INFO_RX_PACKETS: total received packet (MSDUs and MMPDUs)
+ * (u32, from this station)
+ * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (MSDUs and MMPDUs)
+ * (u32, to this station)
+ * @NL80211_STA_INFO_TX_RETRIES: total retries (MPDUs) (u32, to this station)
+ * @NL80211_STA_INFO_TX_FAILED: total failed packets (MPDUs)
+ * (u32, to this station)
* @NL80211_STA_INFO_SIGNAL_AVG: signal strength average (u8, dBm)
* @NL80211_STA_INFO_LLID: the station's mesh LLID
* @NL80211_STA_INFO_PLID: the station's mesh PLID
@@ -2320,6 +2397,16 @@ enum nl80211_sta_bss_param {
* Same format as NL80211_STA_INFO_CHAIN_SIGNAL.
* @NL80211_STA_EXPECTED_THROUGHPUT: expected throughput considering also the
* 802.11 header (u32, kbps)
+ * @NL80211_STA_INFO_RX_DROP_MISC: RX packets dropped for unspecified reasons
+ * (u64)
+ * @NL80211_STA_INFO_BEACON_RX: number of beacons received from this peer (u64)
+ * @NL80211_STA_INFO_BEACON_SIGNAL_AVG: signal strength average
+ * for beacons only (u8, dBm)
+ * @NL80211_STA_INFO_TID_STATS: per-TID statistics (see &enum nl80211_tid_stats)
+ * This is a nested attribute where each the inner attribute number is the
+ * TID+1 and the special TID 16 (i.e. value 17) is used for non-QoS frames;
+ * each one of those is again nested with &enum nl80211_tid_stats
+ * attributes carrying the actual values.
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -2352,6 +2439,10 @@ enum nl80211_sta_info {
NL80211_STA_INFO_CHAIN_SIGNAL,
NL80211_STA_INFO_CHAIN_SIGNAL_AVG,
NL80211_STA_INFO_EXPECTED_THROUGHPUT,
+ NL80211_STA_INFO_RX_DROP_MISC,
+ NL80211_STA_INFO_BEACON_RX,
+ NL80211_STA_INFO_BEACON_SIGNAL_AVG,
+ NL80211_STA_INFO_TID_STATS,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -2359,6 +2450,31 @@ enum nl80211_sta_info {
};
/**
+ * enum nl80211_tid_stats - per TID statistics attributes
+ * @__NL80211_TID_STATS_INVALID: attribute number 0 is reserved
+ * @NL80211_TID_STATS_RX_MSDU: number of MSDUs received (u64)
+ * @NL80211_TID_STATS_TX_MSDU: number of MSDUs transmitted (or
+ * attempted to transmit; u64)
+ * @NL80211_TID_STATS_TX_MSDU_RETRIES: number of retries for
+ * transmitted MSDUs (not counting the first attempt; u64)
+ * @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted
+ * MSDUs (u64)
+ * @NUM_NL80211_TID_STATS: number of attributes here
+ * @NL80211_TID_STATS_MAX: highest numbered attribute here
+ */
+enum nl80211_tid_stats {
+ __NL80211_TID_STATS_INVALID,
+ NL80211_TID_STATS_RX_MSDU,
+ NL80211_TID_STATS_TX_MSDU,
+ NL80211_TID_STATS_TX_MSDU_RETRIES,
+ NL80211_TID_STATS_TX_MSDU_FAILED,
+
+ /* keep last */
+ NUM_NL80211_TID_STATS,
+ NL80211_TID_STATS_MAX = NUM_NL80211_TID_STATS - 1
+};
+
+/**
* enum nl80211_mpath_flags - nl80211 mesh path flags
*
* @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active
@@ -2772,16 +2888,18 @@ enum nl80211_user_reg_hint_type {
* @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel
* @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm)
* @NL80211_SURVEY_INFO_IN_USE: channel is currently being used
- * @NL80211_SURVEY_INFO_CHANNEL_TIME: amount of time (in ms) that the radio
- * spent on this channel
- * @NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY: amount of the time the primary
+ * @NL80211_SURVEY_INFO_TIME: amount of time (in ms) that the radio
+ * was turned on (on channel or globally)
+ * @NL80211_SURVEY_INFO_TIME_BUSY: amount of the time the primary
* channel was sensed busy (either due to activity or energy detect)
- * @NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: amount of time the extension
+ * @NL80211_SURVEY_INFO_TIME_EXT_BUSY: amount of time the extension
* channel was sensed busy
- * @NL80211_SURVEY_INFO_CHANNEL_TIME_RX: amount of time the radio spent
- * receiving data
- * @NL80211_SURVEY_INFO_CHANNEL_TIME_TX: amount of time the radio spent
- * transmitting data
+ * @NL80211_SURVEY_INFO_TIME_RX: amount of time the radio spent
+ * receiving data (on channel or globally)
+ * @NL80211_SURVEY_INFO_TIME_TX: amount of time the radio spent
+ * transmitting data (on channel or globally)
+ * @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan
+ * (on this channel or globally)
* @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
* currently defined
* @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
@@ -2791,17 +2909,25 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_FREQUENCY,
NL80211_SURVEY_INFO_NOISE,
NL80211_SURVEY_INFO_IN_USE,
- NL80211_SURVEY_INFO_CHANNEL_TIME,
- NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
- NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
- NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
- NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
+ NL80211_SURVEY_INFO_TIME,
+ NL80211_SURVEY_INFO_TIME_BUSY,
+ NL80211_SURVEY_INFO_TIME_EXT_BUSY,
+ NL80211_SURVEY_INFO_TIME_RX,
+ NL80211_SURVEY_INFO_TIME_TX,
+ NL80211_SURVEY_INFO_TIME_SCAN,
/* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST,
NL80211_SURVEY_INFO_MAX = __NL80211_SURVEY_INFO_AFTER_LAST - 1
};
+/* keep old names for compatibility */
+#define NL80211_SURVEY_INFO_CHANNEL_TIME NL80211_SURVEY_INFO_TIME
+#define NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY NL80211_SURVEY_INFO_TIME_BUSY
+#define NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY NL80211_SURVEY_INFO_TIME_EXT_BUSY
+#define NL80211_SURVEY_INFO_CHANNEL_TIME_RX NL80211_SURVEY_INFO_TIME_RX
+#define NL80211_SURVEY_INFO_CHANNEL_TIME_TX NL80211_SURVEY_INFO_TIME_TX
+
/**
* enum nl80211_mntr_flags - monitor configuration flags
*
@@ -3238,6 +3364,9 @@ enum nl80211_bss {
/**
* enum nl80211_bss_status - BSS "status"
* @NL80211_BSS_STATUS_AUTHENTICATED: Authenticated with this BSS.
+ * Note that this is no longer used since cfg80211 no longer
+ * keeps track of whether or not authentication was done with
+ * a given BSS.
* @NL80211_BSS_STATUS_ASSOCIATED: Associated with this BSS.
* @NL80211_BSS_STATUS_IBSS_JOINED: Joined to this IBSS.
*
@@ -3621,9 +3750,12 @@ struct nl80211_pattern_support {
* @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network
* is detected. This is a nested attribute that contains the
* same attributes used with @NL80211_CMD_START_SCHED_SCAN. It
- * specifies how the scan is performed (e.g. the interval and the
- * channels to scan) as well as the scan results that will
- * trigger a wake (i.e. the matchsets).
+ * specifies how the scan is performed (e.g. the interval, the
+ * channels to scan and the initial delay) as well as the scan
+ * results that will trigger a wake (i.e. the matchsets). This
+ * attribute is also sent in a response to
+ * @NL80211_CMD_GET_WIPHY, indicating the number of match sets
+ * supported by the driver (u32).
* @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute
* containing an array with information about what triggered the
* wake up. If no elements are present in the array, it means
@@ -4194,6 +4326,19 @@ enum nl80211_feature_flags {
};
/**
+ * enum nl80211_ext_feature_index - bit index of extended features.
+ *
+ * @NUM_NL80211_EXT_FEATURES: number of extended features.
+ * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
+ */
+enum nl80211_ext_feature_index {
+
+ /* add new features before the definition below */
+ NUM_NL80211_EXT_FEATURES,
+ MAX_NL80211_EXT_FEATURES = NUM_NL80211_EXT_FEATURES - 1
+};
+
+/**
* enum nl80211_probe_resp_offload_support_attr - optional supported
* protocols for probe-response offloading by the driver/FW.
* To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute.
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 3a6dcaa359b7..bbd49a0c46c7 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
attributes. */
+ OVS_PACKET_ATTR_UNUSED1,
+ OVS_PACKET_ATTR_UNUSED2,
+ OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
+ error logging should be suppressed. */
__OVS_PACKET_ATTR_MAX
};
@@ -248,11 +252,21 @@ enum ovs_vport_attr {
#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+enum {
+ OVS_VXLAN_EXT_UNSPEC,
+ OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
+ __OVS_VXLAN_EXT_MAX,
+};
+
+#define OVS_VXLAN_EXT_MAX (__OVS_VXLAN_EXT_MAX - 1)
+
+
/* OVS_VPORT_ATTR_OPTIONS attributes for tunnels.
*/
enum {
OVS_TUNNEL_ATTR_UNSPEC,
OVS_TUNNEL_ATTR_DST_PORT, /* 16-bit UDP port, used by L4 tunnels. */
+ OVS_TUNNEL_ATTR_EXTENSION,
__OVS_TUNNEL_ATTR_MAX
};
@@ -324,6 +338,7 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */
OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */
OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */
+ OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested OVS_VXLAN_EXT_* */
__OVS_TUNNEL_KEY_ATTR_MAX
};
@@ -444,6 +459,14 @@ struct ovs_key_nd {
* a wildcarded match. Omitting attribute is treated as wildcarding all
* corresponding fields. Optional for all requests. If not present,
* all flow key bits are exact match bits.
+ * @OVS_FLOW_ATTR_UFID: A value between 1-16 octets specifying a unique
+ * identifier for the flow. Causes the flow to be indexed by this value rather
+ * than the value of the %OVS_FLOW_ATTR_KEY attribute. Optional for all
+ * requests. Present in notifications if the flow was created with this
+ * attribute.
+ * @OVS_FLOW_ATTR_UFID_FLAGS: A 32-bit value of OR'd %OVS_UFID_F_*
+ * flags that provide alternative semantics for flow installation and
+ * retrieval. Optional for all requests.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_FLOW_* commands.
@@ -459,12 +482,24 @@ enum ovs_flow_attr {
OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */
OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error
* logging should be suppressed. */
+ OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */
+ OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
__OVS_FLOW_ATTR_MAX
};
#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
/**
+ * Omit attributes for notifications.
+ *
+ * If a datapath request contains an %OVS_UFID_F_OMIT_* flag, then the datapath
+ * may omit the corresponding %OVS_FLOW_ATTR_* from the response.
+ */
+#define OVS_UFID_F_OMIT_KEY (1 << 0)
+#define OVS_UFID_F_OMIT_MASK (1 << 1)
+#define OVS_UFID_F_OMIT_ACTIONS (1 << 2)
+
+/**
* enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action.
* @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with
* @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of
@@ -564,6 +599,12 @@ struct ovs_action_hash {
* @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The
* single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its
* value.
+ * @OVS_ACTION_ATTR_SET_MASKED: Replaces the contents of an existing header. A
+ * nested %OVS_KEY_ATTR_* attribute specifies a header to modify, its value,
+ * and a mask. For every bit set in the mask, the corresponding bit value
+ * is copied from the value to the packet header field, rest of the bits are
+ * left unchanged. The non-masked value bits must be passed in as zeroes.
+ * Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute.
* @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
* packet.
* @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
@@ -582,6 +623,9 @@ struct ovs_action_hash {
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
* type may not be changed.
+ *
+ * @OVS_ACTION_ATTR_SET_TO_MASKED: Kernel internal masked set action translated
+ * from the @OVS_ACTION_ATTR_SET.
*/
enum ovs_action_attr {
@@ -596,8 +640,19 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */
OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */
OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */
+ OVS_ACTION_ATTR_SET_MASKED, /* One nested OVS_KEY_ATTR_* including
+ * data immediately followed by a mask.
+ * The data must be zero for the unmasked
+ * bits. */
- __OVS_ACTION_ATTR_MAX
+ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
+ * from userspace. */
+
+#ifdef __KERNEL__
+ OVS_ACTION_ATTR_SET_TO_MASKED, /* Kernel module internal masked
+ * set action converted from
+ * OVS_ACTION_ATTR_SET. */
+#endif
};
#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4a1d0cc38ff2..efe3443572ba 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -451,6 +451,10 @@
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
+#define PCI_EXP_DEVCTL_READRQ_128B 0x0000 /* 128 Bytes */
+#define PCI_EXP_DEVCTL_READRQ_256B 0x1000 /* 256 Bytes */
+#define PCI_EXP_DEVCTL_READRQ_512B 0x2000 /* 512 Bytes */
+#define PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
#define PCI_EXP_DEVSTA 10 /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index d62316baae94..534b84710745 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -774,6 +774,8 @@ enum {
TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
+ TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+
__TCA_FQ_MAX
};
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 3b6cfbeb086d..1f49b8341c99 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -126,10 +126,22 @@ struct if_dqblk {
#define IIF_FLAGS 4
#define IIF_ALL (IIF_BGRACE | IIF_IGRACE | IIF_FLAGS)
+enum {
+ DQF_ROOT_SQUASH_B = 0,
+ DQF_SYS_FILE_B = 16,
+ /* Kernel internal flags invisible to userspace */
+ DQF_PRIVATE
+};
+
+/* Root squash enabled (for v1 quota format) */
+#define DQF_ROOT_SQUASH (1 << DQF_ROOT_SQUASH_B)
+/* Quota stored in a system file */
+#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B)
+
struct if_dqinfo {
__u64 dqi_bgrace;
__u64 dqi_igrace;
- __u32 dqi_flags;
+ __u32 dqi_flags; /* DFQ_* */
__u32 dqi_valid;
};
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 9c9b8b4480cd..5cc5d66bf519 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -132,6 +132,11 @@ enum {
RTM_GETMDB = 86,
#define RTM_GETMDB RTM_GETMDB
+ RTM_NEWNSID = 88,
+#define RTM_NEWNSID RTM_NEWNSID
+ RTM_GETNSID = 90,
+#define RTM_GETNSID RTM_GETNSID
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -389,6 +394,8 @@ enum {
#define RTAX_INITRWND RTAX_INITRWND
RTAX_QUICKACK,
#define RTAX_QUICKACK RTAX_QUICKACK
+ RTAX_CC_ALGO,
+#define RTAX_CC_ALGO RTAX_CC_ALGO
__RTAX_MAX
};
@@ -634,6 +641,7 @@ struct tcamsg {
/* New extended info filters for IFLA_EXT_MASK */
#define RTEXT_FILTER_VF (1 << 0)
#define RTEXT_FILTER_BRVLAN (1 << 1)
+#define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
/* End of information exported to user level */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index c17218094f18..b2122813f18a 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -55,7 +55,8 @@
#define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */
#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
#define PORT_RT2880 29 /* Ralink RT2880 internal UART */
-#define PORT_MAX_8250 29 /* max port ID */
+#define PORT_16550A_FSL64 30 /* Freescale 16550 UART with 64 FIFOs */
+#define PORT_MAX_8250 30 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -248,4 +249,13 @@
/* MESON */
#define PORT_MESON 109
+/* Conexant Digicolor */
+#define PORT_DIGICOLOR 110
+
+/* SPRD SERIAL */
+#define PORT_SPRD 111
+
+/* Cris v10 / v32 SoC */
+#define PORT_CRIS 112
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 53af3b790129..00adb01fa5f3 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -86,7 +86,8 @@
#define UART_FCR6_T_TRIGGER_8 0x10 /* Mask for transmit trigger set at 8 */
#define UART_FCR6_T_TRIGGER_24 0x20 /* Mask for transmit trigger set at 24 */
#define UART_FCR6_T_TRIGGER_30 0x30 /* Mask for transmit trigger set at 30 */
-#define UART_FCR7_64BYTE 0x20 /* Go into 64 byte mode (TI16C750) */
+#define UART_FCR7_64BYTE 0x20 /* Go into 64 byte mode (TI16C750 and
+ some Freescale UARTs) */
#define UART_FCR_R_TRIG_SHIFT 6
#define UART_FCR_R_TRIG_BITS(x) \
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index b22224100011..6a6fb747c78d 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -270,6 +270,12 @@ enum
LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */
LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */
LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */
+ LINUX_MIB_TCPACKSKIPPEDSYNRECV, /* TCPACKSkippedSynRecv */
+ LINUX_MIB_TCPACKSKIPPEDPAWS, /* TCPACKSkippedPAWS */
+ LINUX_MIB_TCPACKSKIPPEDSEQ, /* TCPACKSkippedSeq */
+ LINUX_MIB_TCPACKSKIPPEDFINWAIT2, /* TCPACKSkippedFinWait2 */
+ LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, /* TCPACKSkippedTimeWait */
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index b057da2b87a4..19d5219b0b99 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -8,3 +8,4 @@ header-y += tc_nat.h
header-y += tc_pedit.h
header-y += tc_skbedit.h
header-y += tc_vlan.h
+header-y += tc_bpf.h
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
new file mode 100644
index 000000000000..5288bd77e63b
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_BPF_H
+#define __LINUX_TC_BPF_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_BPF 13
+
+struct tc_act_bpf {
+ tc_gen;
+};
+
+enum {
+ TCA_ACT_BPF_UNSPEC,
+ TCA_ACT_BPF_TM,
+ TCA_ACT_BPF_PARMS,
+ TCA_ACT_BPF_OPS_LEN,
+ TCA_ACT_BPF_OPS,
+ __TCA_ACT_BPF_MAX,
+};
+#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_connmark.h b/include/uapi/linux/tc_act/tc_connmark.h
new file mode 100644
index 000000000000..994b0971bce2
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_connmark.h
@@ -0,0 +1,22 @@
+#ifndef __UAPI_TC_CONNMARK_H
+#define __UAPI_TC_CONNMARK_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_CONNMARK 14
+
+struct tc_connmark {
+ tc_gen;
+ __u16 zone;
+};
+
+enum {
+ TCA_CONNMARK_UNSPEC,
+ TCA_CONNMARK_PARMS,
+ TCA_CONNMARK_TM,
+ __TCA_CONNMARK_MAX
+};
+#define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 876d0a14863c..087b0ef82c07 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -272,6 +272,26 @@ static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
(ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
}
+static inline int TLV_GET_LEN(struct tlv_desc *tlv)
+{
+ return ntohs(tlv->tlv_len);
+}
+
+static inline void TLV_SET_LEN(struct tlv_desc *tlv, __u16 len)
+{
+ tlv->tlv_len = htons(len);
+}
+
+static inline int TLV_CHECK_TYPE(struct tlv_desc *tlv, __u16 type)
+{
+ return (ntohs(tlv->tlv_type) == type);
+}
+
+static inline void TLV_SET_TYPE(struct tlv_desc *tlv, __u16 type)
+{
+ tlv->tlv_type = htons(type);
+}
+
static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
{
struct tlv_desc *tlv_ptr;
diff --git a/include/uapi/linux/uinput.h b/include/uapi/linux/uinput.h
index baeab83deb64..013c9d8db372 100644
--- a/include/uapi/linux/uinput.h
+++ b/include/uapi/linux/uinput.h
@@ -82,7 +82,7 @@ struct uinput_ff_erase {
* The complete sysfs path is then /sys/devices/virtual/input/--NAME--
* Usually, it is in the form "inputN"
*/
-#define UI_GET_SYSNAME(len) _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 300, len)
+#define UI_GET_SYSNAME(len) _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 44, len)
/**
* UI_GET_VERSION - Return version of uinput protocol
@@ -91,7 +91,7 @@ struct uinput_ff_erase {
* the integer pointed to by the ioctl argument. The protocol version
* is hard-coded in the kernel and is independent of the uinput device.
*/
-#define UI_GET_VERSION _IOR(UINPUT_IOCTL_BASE, 301, unsigned int)
+#define UI_GET_VERSION _IOR(UINPUT_IOCTL_BASE, 45, unsigned int)
/*
* To write a force-feedback-capable driver, the upload_effect
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 295ba299e7bd..108dd7997014 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -20,6 +20,7 @@ enum functionfs_flags {
FUNCTIONFS_HAS_SS_DESC = 4,
FUNCTIONFS_HAS_MS_OS_DESC = 8,
FUNCTIONFS_VIRTUAL_ADDR = 16,
+ FUNCTIONFS_EVENTFD = 32,
};
/* Descriptor of an non-audio endpoint */
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index abe5f4bd4d82..019ba1e0799a 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -128,11 +128,12 @@ struct usbdevfs_hub_portinfo {
char port [127]; /* e.g. port 3 connects to device 27 */
};
-/* Device capability flags */
+/* System and bus capability flags */
#define USBDEVFS_CAP_ZERO_PACKET 0x01
#define USBDEVFS_CAP_BULK_CONTINUATION 0x02
#define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04
#define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08
+#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10
/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 661f119a51b8..9f6e108ff4a0 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -170,6 +170,10 @@ enum v4l2_colorfx {
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_SAA7134_BASE (V4L2_CID_USER_BASE + 0x1060)
+/* The base for the adv7180 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index d279c1b75cf7..fbdc3602ee27 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -463,10 +463,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
-#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12 BGBG.. GRGR.. */
-#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
-#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
-#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
+ /* 10bit raw bayer packed, 5 bytes for every 4 pixels */
+#define V4L2_PIX_FMT_SBGGR10P v4l2_fourcc('p', 'B', 'A', 'A')
+#define V4L2_PIX_FMT_SGBRG10P v4l2_fourcc('p', 'G', 'A', 'A')
+#define V4L2_PIX_FMT_SGRBG10P v4l2_fourcc('p', 'g', 'A', 'A')
+#define V4L2_PIX_FMT_SRGGB10P v4l2_fourcc('p', 'R', 'A', 'A')
/* 10bit raw bayer a-law compressed to 8 bits */
#define V4L2_PIX_FMT_SBGGR10ALAW8 v4l2_fourcc('a', 'B', 'A', '8')
#define V4L2_PIX_FMT_SGBRG10ALAW8 v4l2_fourcc('a', 'G', 'A', '8')
@@ -477,10 +478,10 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG10DPCM8 v4l2_fourcc('b', 'G', 'A', '8')
#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
#define V4L2_PIX_FMT_SRGGB10DPCM8 v4l2_fourcc('b', 'R', 'A', '8')
- /*
- * 10bit raw bayer, expanded to 16 bits
- * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
- */
+#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
/* compressed formats */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 4275b961bf60..867cc5084afb 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,7 +90,6 @@ enum {
};
enum {
- IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
@@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp {
__u8 reserved[4];
};
-enum {
- IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0,
-};
-
-struct ib_uverbs_ex_query_device {
- __u32 comp_mask;
- __u32 reserved;
-};
-
-struct ib_uverbs_odp_caps {
- __u64 general_caps;
- struct {
- __u32 rc_odp_caps;
- __u32 uc_odp_caps;
- __u32 ud_odp_caps;
- } per_transport_caps;
- __u32 reserved;
-};
-
-struct ib_uverbs_ex_query_device_resp {
- struct ib_uverbs_query_device_resp base;
- __u32 comp_mask;
- __u32 reserved;
- struct ib_uverbs_odp_caps odp_caps;
-};
-
struct ib_uverbs_query_port {
__u64 response;
__u8 port_num;
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 1f23cd635957..0e88e7a0f0eb 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -268,6 +268,7 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */
#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */
#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* has audio wall clock for audio/system time sync */
+#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
typedef int __bitwise snd_pcm_state_t;
diff --git a/include/uapi/sound/usb_stream.h b/include/uapi/sound/usb_stream.h
new file mode 100644
index 000000000000..cfe8fba00714
--- /dev/null
+++ b/include/uapi/sound/usb_stream.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _UAPI__SOUND_USB_STREAM_H
+#define _UAPI__SOUND_USB_STREAM_H
+
+#define USB_STREAM_INTERFACE_VERSION 2
+
+#define SNDRV_USB_STREAM_IOCTL_SET_PARAMS \
+ _IOW('H', 0x90, struct usb_stream_config)
+
+struct usb_stream_packet {
+ unsigned offset;
+ unsigned length;
+};
+
+
+struct usb_stream_config {
+ unsigned version;
+ unsigned sample_rate;
+ unsigned period_frames;
+ unsigned frame_size;
+};
+
+struct usb_stream {
+ struct usb_stream_config cfg;
+ unsigned read_size;
+ unsigned write_size;
+
+ int period_size;
+
+ unsigned state;
+
+ int idle_insize;
+ int idle_outsize;
+ int sync_packet;
+ unsigned insize_done;
+ unsigned periods_done;
+ unsigned periods_polled;
+
+ struct usb_stream_packet outpacket[2];
+ unsigned inpackets;
+ unsigned inpacket_head;
+ unsigned inpacket_split;
+ unsigned inpacket_split_at;
+ unsigned next_inpacket_split;
+ unsigned next_inpacket_split_at;
+ struct usb_stream_packet inpacket[0];
+};
+
+enum usb_stream_state {
+ usb_stream_invalid,
+ usb_stream_stopped,
+ usb_stream_sync0,
+ usb_stream_sync1,
+ usb_stream_ready,
+ usb_stream_running,
+ usb_stream_xrun,
+};
+
+#endif /* _UAPI__SOUND_USB_STREAM_H */
diff --git a/include/video/exynos7_decon.h b/include/video/exynos7_decon.h
new file mode 100644
index 000000000000..a62b11b613f6
--- /dev/null
+++ b/include/video/exynos7_decon.h
@@ -0,0 +1,349 @@
+/* include/video/exynos7_decon.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Ajay Kumar <ajaykumar.rs@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/* VIDCON0 */
+#define VIDCON0 0x00
+
+#define VIDCON0_SWRESET (1 << 28)
+#define VIDCON0_DECON_STOP_STATUS (1 << 2)
+#define VIDCON0_ENVID (1 << 1)
+#define VIDCON0_ENVID_F (1 << 0)
+
+/* VIDOUTCON0 */
+#define VIDOUTCON0 0x4
+
+#define VIDOUTCON0_DUAL_MASK (0x3 << 24)
+#define VIDOUTCON0_DUAL_ON (0x3 << 24)
+#define VIDOUTCON0_DISP_IF_1_ON (0x2 << 24)
+#define VIDOUTCON0_DISP_IF_0_ON (0x1 << 24)
+#define VIDOUTCON0_DUAL_OFF (0x0 << 24)
+#define VIDOUTCON0_IF_SHIFT 23
+#define VIDOUTCON0_IF_MASK (0x1 << 23)
+#define VIDOUTCON0_RGBIF (0x0 << 23)
+#define VIDOUTCON0_I80IF (0x1 << 23)
+
+/* VIDCON3 */
+#define VIDCON3 0x8
+
+/* VIDCON4 */
+#define VIDCON4 0xC
+#define VIDCON4_FIFOCNT_START_EN (1 << 0)
+
+/* VCLKCON0 */
+#define VCLKCON0 0x10
+#define VCLKCON0_CLKVALUP (1 << 8)
+#define VCLKCON0_VCLKFREE (1 << 0)
+
+/* VCLKCON */
+#define VCLKCON1 0x14
+#define VCLKCON1_CLKVAL_NUM_VCLK(val) (((val) & 0xff) << 0)
+#define VCLKCON2 0x18
+
+/* SHADOWCON */
+#define SHADOWCON 0x30
+
+#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win)))
+
+/* WINCONx */
+#define WINCON(_win) (0x50 + ((_win) * 4))
+
+#define WINCONx_BUFSTATUS (0x3 << 30)
+#define WINCONx_BUFSEL_MASK (0x3 << 28)
+#define WINCONx_BUFSEL_SHIFT 28
+#define WINCONx_TRIPLE_BUF_MODE (0x1 << 18)
+#define WINCONx_DOUBLE_BUF_MODE (0x0 << 18)
+#define WINCONx_BURSTLEN_16WORD (0x0 << 11)
+#define WINCONx_BURSTLEN_8WORD (0x1 << 11)
+#define WINCONx_BURSTLEN_MASK (0x1 << 11)
+#define WINCONx_BURSTLEN_SHIFT 11
+#define WINCONx_BLD_PLANE (0 << 8)
+#define WINCONx_BLD_PIX (1 << 8)
+#define WINCONx_ALPHA_MUL (1 << 7)
+
+#define WINCONx_BPPMODE_MASK (0xf << 2)
+#define WINCONx_BPPMODE_SHIFT 2
+#define WINCONx_BPPMODE_16BPP_565 (0x8 << 2)
+#define WINCONx_BPPMODE_24BPP_BGRx (0x7 << 2)
+#define WINCONx_BPPMODE_24BPP_RGBx (0x6 << 2)
+#define WINCONx_BPPMODE_24BPP_xBGR (0x5 << 2)
+#define WINCONx_BPPMODE_24BPP_xRGB (0x4 << 2)
+#define WINCONx_BPPMODE_32BPP_BGRA (0x3 << 2)
+#define WINCONx_BPPMODE_32BPP_RGBA (0x2 << 2)
+#define WINCONx_BPPMODE_32BPP_ABGR (0x1 << 2)
+#define WINCONx_BPPMODE_32BPP_ARGB (0x0 << 2)
+#define WINCONx_ALPHA_SEL (1 << 1)
+#define WINCONx_ENWIN (1 << 0)
+
+#define WINCON1_ALPHA_MUL_F (1 << 7)
+#define WINCON2_ALPHA_MUL_F (1 << 7)
+#define WINCON3_ALPHA_MUL_F (1 << 7)
+#define WINCON4_ALPHA_MUL_F (1 << 7)
+
+/* VIDOSDxH: The height for the OSD image(READ ONLY)*/
+#define VIDOSD_H(_x) (0x80 + ((_x) * 4))
+
+/* Frame buffer start addresses: VIDWxxADD0n */
+#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10))
+#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10))
+#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10))
+
+#define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8))
+#define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8))
+#define VIDW_OFFSET_X(_win) (0x0170 + ((_win) * 8))
+#define VIDW_OFFSET_Y(_win) (0x0174 + ((_win) * 8))
+#define VIDW_BLKOFFSET(_win) (0x01B0 + ((_win) * 4))
+#define VIDW_BLKSIZE(win) (0x0200 + ((_win) * 4))
+
+/* Interrupt controls register */
+#define VIDINTCON2 0x228
+
+#define VIDINTCON1_INTEXTRA1_EN (1 << 1)
+#define VIDINTCON1_INTEXTRA0_EN (1 << 0)
+
+/* Interrupt controls and status register */
+#define VIDINTCON3 0x22C
+
+#define VIDINTCON1_INTEXTRA1_PEND (1 << 1)
+#define VIDINTCON1_INTEXTRA0_PEND (1 << 0)
+
+/* VIDOSDxA ~ VIDOSDxE */
+#define VIDOSD_BASE 0x230
+
+#define OSD_STRIDE 0x20
+
+#define VIDOSD_A(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x00)
+#define VIDOSD_B(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x04)
+#define VIDOSD_C(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x08)
+#define VIDOSD_D(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x0C)
+#define VIDOSD_E(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x10)
+
+#define VIDOSDxA_TOPLEFT_X_MASK (0x1fff << 13)
+#define VIDOSDxA_TOPLEFT_X_SHIFT 13
+#define VIDOSDxA_TOPLEFT_X_LIMIT 0x1fff
+#define VIDOSDxA_TOPLEFT_X(_x) (((_x) & 0x1fff) << 13)
+
+#define VIDOSDxA_TOPLEFT_Y_MASK (0x1fff << 0)
+#define VIDOSDxA_TOPLEFT_Y_SHIFT 0
+#define VIDOSDxA_TOPLEFT_Y_LIMIT 0x1fff
+#define VIDOSDxA_TOPLEFT_Y(_x) (((_x) & 0x1fff) << 0)
+
+#define VIDOSDxB_BOTRIGHT_X_MASK (0x1fff << 13)
+#define VIDOSDxB_BOTRIGHT_X_SHIFT 13
+#define VIDOSDxB_BOTRIGHT_X_LIMIT 0x1fff
+#define VIDOSDxB_BOTRIGHT_X(_x) (((_x) & 0x1fff) << 13)
+
+#define VIDOSDxB_BOTRIGHT_Y_MASK (0x1fff << 0)
+#define VIDOSDxB_BOTRIGHT_Y_SHIFT 0
+#define VIDOSDxB_BOTRIGHT_Y_LIMIT 0x1fff
+#define VIDOSDxB_BOTRIGHT_Y(_x) (((_x) & 0x1fff) << 0)
+
+#define VIDOSDxC_ALPHA0_R_F(_x) (((_x) & 0xFF) << 16)
+#define VIDOSDxC_ALPHA0_G_F(_x) (((_x) & 0xFF) << 8)
+#define VIDOSDxC_ALPHA0_B_F(_x) (((_x) & 0xFF) << 0)
+
+#define VIDOSDxD_ALPHA1_R_F(_x) (((_x) & 0xFF) << 16)
+#define VIDOSDxD_ALPHA1_G_F(_x) (((_x) & 0xFF) << 8)
+#define VIDOSDxD_ALPHA1_B_F(_x) (((_x) & 0xFF) >> 0)
+
+/* Window MAP (Color map) */
+#define WINxMAP(_win) (0x340 + ((_win) * 4))
+
+#define WINxMAP_MAP (1 << 24)
+#define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0)
+#define WINxMAP_MAP_COLOUR_SHIFT 0
+#define WINxMAP_MAP_COLOUR_LIMIT 0xffffff
+#define WINxMAP_MAP_COLOUR(_x) ((_x) << 0)
+
+/* Window colour-key control registers */
+#define WKEYCON 0x370
+
+#define WKEYCON0 0x00
+#define WKEYCON1 0x04
+#define WxKEYCON0_KEYBL_EN (1 << 26)
+#define WxKEYCON0_KEYEN_F (1 << 25)
+#define WxKEYCON0_DIRCON (1 << 24)
+#define WxKEYCON0_COMPKEY_MASK (0xffffff << 0)
+#define WxKEYCON0_COMPKEY_SHIFT 0
+#define WxKEYCON0_COMPKEY_LIMIT 0xffffff
+#define WxKEYCON0_COMPKEY(_x) ((_x) << 0)
+#define WxKEYCON1_COLVAL_MASK (0xffffff << 0)
+#define WxKEYCON1_COLVAL_SHIFT 0
+#define WxKEYCON1_COLVAL_LIMIT 0xffffff
+#define WxKEYCON1_COLVAL(_x) ((_x) << 0)
+
+/* color key control register for hardware window 1 ~ 4. */
+#define WKEYCON0_BASE(x) ((WKEYCON + WKEYCON0) + ((x - 1) * 8))
+/* color key value register for hardware window 1 ~ 4. */
+#define WKEYCON1_BASE(x) ((WKEYCON + WKEYCON1) + ((x - 1) * 8))
+
+/* Window KEY Alpha value */
+#define WxKEYALPHA(_win) (0x3A0 + (((_win) - 1) * 0x4))
+
+#define Wx_KEYALPHA_R_F_SHIFT 16
+#define Wx_KEYALPHA_G_F_SHIFT 8
+#define Wx_KEYALPHA_B_F_SHIFT 0
+
+/* Blending equation */
+#define BLENDE(_win) (0x03C0 + ((_win) * 4))
+#define BLENDE_COEF_ZERO 0x0
+#define BLENDE_COEF_ONE 0x1
+#define BLENDE_COEF_ALPHA_A 0x2
+#define BLENDE_COEF_ONE_MINUS_ALPHA_A 0x3
+#define BLENDE_COEF_ALPHA_B 0x4
+#define BLENDE_COEF_ONE_MINUS_ALPHA_B 0x5
+#define BLENDE_COEF_ALPHA0 0x6
+#define BLENDE_COEF_A 0xA
+#define BLENDE_COEF_ONE_MINUS_A 0xB
+#define BLENDE_COEF_B 0xC
+#define BLENDE_COEF_ONE_MINUS_B 0xD
+#define BLENDE_Q_FUNC(_v) ((_v) << 18)
+#define BLENDE_P_FUNC(_v) ((_v) << 12)
+#define BLENDE_B_FUNC(_v) ((_v) << 6)
+#define BLENDE_A_FUNC(_v) ((_v) << 0)
+
+/* Blending equation control */
+#define BLENDCON 0x3D8
+#define BLENDCON_NEW_MASK (1 << 0)
+#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
+#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+
+/* Interrupt control register */
+#define VIDINTCON0 0x500
+
+#define VIDINTCON0_WAKEUP_MASK (0x3f << 26)
+#define VIDINTCON0_INTEXTRAEN (1 << 21)
+
+#define VIDINTCON0_FRAMESEL0_SHIFT 15
+#define VIDINTCON0_FRAMESEL0_MASK (0x3 << 15)
+#define VIDINTCON0_FRAMESEL0_BACKPORCH (0x0 << 15)
+#define VIDINTCON0_FRAMESEL0_VSYNC (0x1 << 15)
+#define VIDINTCON0_FRAMESEL0_ACTIVE (0x2 << 15)
+#define VIDINTCON0_FRAMESEL0_FRONTPORCH (0x3 << 15)
+
+#define VIDINTCON0_INT_FRAME (1 << 11)
+
+#define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 3)
+#define VIDINTCON0_FIFOLEVEL_SHIFT 3
+#define VIDINTCON0_FIFOLEVEL_EMPTY (0x0 << 3)
+#define VIDINTCON0_FIFOLEVEL_TO25PC (0x1 << 3)
+#define VIDINTCON0_FIFOLEVEL_TO50PC (0x2 << 3)
+#define VIDINTCON0_FIFOLEVEL_FULL (0x4 << 3)
+
+#define VIDINTCON0_FIFOSEL_MAIN_EN (1 << 1)
+#define VIDINTCON0_INT_FIFO (1 << 1)
+
+#define VIDINTCON0_INT_ENABLE (1 << 0)
+
+/* Interrupt controls and status register */
+#define VIDINTCON1 0x504
+
+#define VIDINTCON1_INT_EXTRA (1 << 3)
+#define VIDINTCON1_INT_I80 (1 << 2)
+#define VIDINTCON1_INT_FRAME (1 << 1)
+#define VIDINTCON1_INT_FIFO (1 << 0)
+
+/* VIDCON1 */
+#define VIDCON1(_x) (0x0600 + ((_x) * 0x50))
+#define VIDCON1_LINECNT_GET(_v) (((_v) >> 17) & 0x1fff)
+#define VIDCON1_VCLK_MASK (0x3 << 9)
+#define VIDCON1_VCLK_HOLD (0x0 << 9)
+#define VIDCON1_VCLK_RUN (0x1 << 9)
+#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
+#define VIDCON1_RGB_ORDER_O_MASK (0x7 << 4)
+#define VIDCON1_RGB_ORDER_O_RGB (0x0 << 4)
+#define VIDCON1_RGB_ORDER_O_GBR (0x1 << 4)
+#define VIDCON1_RGB_ORDER_O_BRG (0x2 << 4)
+#define VIDCON1_RGB_ORDER_O_BGR (0x4 << 4)
+#define VIDCON1_RGB_ORDER_O_RBG (0x5 << 4)
+#define VIDCON1_RGB_ORDER_O_GRB (0x6 << 4)
+
+/* VIDTCON0 */
+#define VIDTCON0 0x610
+
+#define VIDTCON0_VBPD_MASK (0xffff << 16)
+#define VIDTCON0_VBPD_SHIFT 16
+#define VIDTCON0_VBPD_LIMIT 0xffff
+#define VIDTCON0_VBPD(_x) ((_x) << 16)
+
+#define VIDTCON0_VFPD_MASK (0xffff << 0)
+#define VIDTCON0_VFPD_SHIFT 0
+#define VIDTCON0_VFPD_LIMIT 0xffff
+#define VIDTCON0_VFPD(_x) ((_x) << 0)
+
+/* VIDTCON1 */
+#define VIDTCON1 0x614
+
+#define VIDTCON1_VSPW_MASK (0xffff << 16)
+#define VIDTCON1_VSPW_SHIFT 16
+#define VIDTCON1_VSPW_LIMIT 0xffff
+#define VIDTCON1_VSPW(_x) ((_x) << 16)
+
+/* VIDTCON2 */
+#define VIDTCON2 0x618
+
+#define VIDTCON2_HBPD_MASK (0xffff << 16)
+#define VIDTCON2_HBPD_SHIFT 16
+#define VIDTCON2_HBPD_LIMIT 0xffff
+#define VIDTCON2_HBPD(_x) ((_x) << 16)
+
+#define VIDTCON2_HFPD_MASK (0xffff << 0)
+#define VIDTCON2_HFPD_SHIFT 0
+#define VIDTCON2_HFPD_LIMIT 0xffff
+#define VIDTCON2_HFPD(_x) ((_x) << 0)
+
+/* VIDTCON3 */
+#define VIDTCON3 0x61C
+
+#define VIDTCON3_HSPW_MASK (0xffff << 16)
+#define VIDTCON3_HSPW_SHIFT 16
+#define VIDTCON3_HSPW_LIMIT 0xffff
+#define VIDTCON3_HSPW(_x) ((_x) << 16)
+
+/* VIDTCON4 */
+#define VIDTCON4 0x620
+
+#define VIDTCON4_LINEVAL_MASK (0xfff << 16)
+#define VIDTCON4_LINEVAL_SHIFT 16
+#define VIDTCON4_LINEVAL_LIMIT 0xfff
+#define VIDTCON4_LINEVAL(_x) (((_x) & 0xfff) << 16)
+
+#define VIDTCON4_HOZVAL_MASK (0xfff << 0)
+#define VIDTCON4_HOZVAL_SHIFT 0
+#define VIDTCON4_HOZVAL_LIMIT 0xfff
+#define VIDTCON4_HOZVAL(_x) (((_x) & 0xfff) << 0)
+
+/* LINECNT OP THRSHOLD*/
+#define LINECNT_OP_THRESHOLD 0x630
+
+/* CRCCTRL */
+#define CRCCTRL 0x6C8
+#define CRCCTRL_CRCCLKEN (0x1 << 2)
+#define CRCCTRL_CRCSTART_F (0x1 << 1)
+#define CRCCTRL_CRCEN (0x1 << 0)
+
+/* DECON_CMU */
+#define DECON_CMU 0x704
+
+#define DECON_CMU_ALL_CLKGATE_ENABLE 0x3
+#define DECON_CMU_SE_CLKGATE_ENABLE (0x1 << 2)
+#define DECON_CMU_SFR_CLKGATE_ENABLE (0x1 << 1)
+#define DECON_CMU_MEM_CLKGATE_ENABLE (0x1 << 0)
+
+/* DECON_UPDATE */
+#define DECON_UPDATE 0x710
+
+#define DECON_UPDATE_SLAVE_SYNC (1 << 4)
+#define DECON_UPDATE_STANDALONE_F (1 << 0)
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index c74bf4a0520e..73390c120cad 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -17,6 +17,7 @@
#include <linux/bitmap.h>
#include <linux/fb.h>
#include <media/v4l2-mediabus.h>
+#include <video/videomode.h>
struct ipu_soc;
@@ -32,28 +33,15 @@ enum ipuv3_type {
* Bitfield of Display Interface signal polarities.
*/
struct ipu_di_signal_cfg {
- unsigned datamask_en:1;
- unsigned interlaced:1;
- unsigned odd_field_first:1;
- unsigned clksel_en:1;
- unsigned clkidle_en:1;
unsigned data_pol:1; /* true = inverted */
unsigned clk_pol:1; /* true = rising edge */
unsigned enable_pol:1;
- unsigned Hsync_pol:1; /* true = active high */
- unsigned Vsync_pol:1;
- u16 width;
- u16 height;
+ struct videomode mode;
+
u32 pixel_fmt;
- u16 h_start_width;
- u16 h_sync_width;
- u16 h_end_width;
- u16 v_start_width;
- u16 v_sync_width;
- u16 v_end_width;
u32 v_to_h_sync;
- unsigned long pixelclock;
+
#define IPU_DI_CLKMODE_SYNC (1 << 0)
#define IPU_DI_CLKMODE_EXT (1 << 1)
unsigned long clkflags;
@@ -236,6 +224,7 @@ void ipu_di_put(struct ipu_di *);
int ipu_di_disable(struct ipu_di *);
int ipu_di_enable(struct ipu_di *);
int ipu_di_get_num(struct ipu_di *);
+int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode);
int ipu_di_init_sync_panel(struct ipu_di *, struct ipu_di_signal_cfg *sig);
/*
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 6a84498ea513..60de61fea8e3 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -314,6 +314,7 @@ enum omapdss_version {
OMAPDSS_VER_OMAP4, /* All other OMAP4s */
OMAPDSS_VER_OMAP5,
OMAPDSS_VER_AM43xx,
+ OMAPDSS_VER_DRA7xx,
};
/* Board specific data */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 3387465b9caa..143ca5ffab7a 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -45,6 +45,8 @@
#include <asm/xen/hypervisor.h>
#include <xen/features.h>
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
#define GNTTAB_RESERVED_XENSTORE 1
@@ -58,6 +60,22 @@ struct gnttab_free_callback {
u16 count;
};
+struct gntab_unmap_queue_data;
+
+typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
+
+struct gntab_unmap_queue_data
+{
+ struct delayed_work gnttab_work;
+ void *data;
+ gnttab_unmap_refs_done done;
+ struct gnttab_unmap_grant_ref *unmap_ops;
+ struct gnttab_unmap_grant_ref *kunmap_ops;
+ struct page **pages;
+ unsigned int count;
+ unsigned int age;
+};
+
int gnttab_init(void);
int gnttab_suspend(void);
int gnttab_resume(void);
@@ -163,12 +181,17 @@ void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
+int gnttab_alloc_pages(int nr_pages, struct page **pages);
+void gnttab_free_pages(int nr_pages, struct page **pages);
+
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kunmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
+void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+
/* Perform a batch of grant map/copy operations. Retry every batch slot
* for which the hypervisor returns GNTST_eagain. This is typically due
@@ -182,4 +205,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
+
+struct xen_page_foreign {
+ domid_t domid;
+ grant_ref_t gref;
+};
+
+static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
+{
+ if (!PageForeign(page))
+ return NULL;
+#if BITS_PER_LONG < 64
+ return (struct xen_page_foreign *)page->private;
+#else
+ BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
+ return (struct xen_page_foreign *)&page->private;
+#endif
+}
+
#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 131a6ccdba25..6ad3d110bb81 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -41,6 +41,12 @@
/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
#define XENFEAT_mmu_pt_update_preserve_ad 5
+/*
+ * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
+ * available pte bits.
+ */
+#define XENFEAT_gnttab_map_avail_bits 7
+
/* x86: Does this Xen host support the HVM callback vector type? */
#define XENFEAT_hvm_callback_vector 8
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index bcce56439d64..56806bc90c2f 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -526,6 +526,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
/*
+ * Bits to be placed in guest kernel available PTE bits (architecture
+ * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
+ */
+#define _GNTMAP_guest_avail0 (16)
+#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
+
+/*
* Values for error status returns. All errors are -ve.
*/
#define GNTST_okay (0) /* Normal return. */
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644
index 000000000000..b47d9d06fade
--- /dev/null
+++ b/include/xen/interface/nmi.h
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * nmi.h
+ *
+ * NMI callback registration and reason codes.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_NMI_H__
+#define __XEN_PUBLIC_NMI_H__
+
+#include <xen/interface/xen.h>
+
+/*
+ * NMI reason codes:
+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
+ */
+ /* I/O-check error reported via ISA port 0x61, bit 6. */
+#define _XEN_NMIREASON_io_error 0
+#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
+ /* PCI SERR reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_pci_serr 1
+#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr)
+ /* Unknown hardware-generated NMI. */
+#define _XEN_NMIREASON_unknown 2
+#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
+
+/*
+ * long nmi_op(unsigned int cmd, void *arg)
+ * NB. All ops return zero on success, else a negative error code.
+ */
+
+/*
+ * Register NMI callback for this (calling) VCPU. Currently this only makes
+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
+ * arg == pointer to xennmi_callback structure.
+ */
+#define XENNMI_register_callback 0
+struct xennmi_callback {
+ unsigned long handler_address;
+ unsigned long pad;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
+
+/*
+ * Deregister NMI callback for this (calling) VCPU.
+ * arg == NULL.
+ */
+#define XENNMI_unregister_callback 1
+
+#endif /* __XEN_PUBLIC_NMI_H__ */
diff --git a/include/xen/page.h b/include/xen/page.h
index 12765b6f9517..c5ed20bb3fe9 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,6 +3,11 @@
#include <asm/xen/page.h>
+static inline unsigned long page_to_mfn(struct page *page)
+{
+ return pfn_to_mfn(page_to_pfn(page));
+}
+
struct xen_memory_region {
phys_addr_t start;
phys_addr_t size;
diff --git a/init/Kconfig b/init/Kconfig
index 9afb971497f4..058e3671fa11 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -470,7 +470,6 @@ choice
config TREE_RCU
bool "Tree-based hierarchical RCU"
depends on !PREEMPT && SMP
- select IRQ_WORK
help
This option selects the RCU implementation that is
designed for very large SMP system with hundreds or
@@ -480,7 +479,6 @@ config TREE_RCU
config PREEMPT_RCU
bool "Preemptible tree-based hierarchical RCU"
depends on PREEMPT
- select IRQ_WORK
help
This option selects the RCU implementation that is
designed for very large SMP systems with hundreds or
@@ -501,9 +499,17 @@ config TINY_RCU
endchoice
+config SRCU
+ bool
+ help
+ This option selects the sleepable version of RCU. This version
+ permits arbitrary sleeping or blocking within RCU read-side critical
+ sections.
+
config TASKS_RCU
bool "Task_based RCU implementation using voluntary context switch"
default n
+ select SRCU
help
This option enables a task-based RCU implementation that uses
only voluntary context switch (not preemption!), idle, and
@@ -668,9 +674,10 @@ config RCU_BOOST
config RCU_KTHREAD_PRIO
int "Real-time priority to use for RCU worker threads"
- range 1 99
- depends on RCU_BOOST
- default 1
+ range 1 99 if RCU_BOOST
+ range 0 99 if !RCU_BOOST
+ default 1 if RCU_BOOST
+ default 0 if !RCU_BOOST
help
This option specifies the SCHED_FIFO priority value that will be
assigned to the rcuc/n and rcub/n threads and is also the value
@@ -1280,22 +1287,6 @@ source "usr/Kconfig"
endif
-config INIT_FALLBACK
- bool "Fall back to defaults if init= parameter is bad"
- default y
- help
- If enabled, the kernel will try the default init binaries if an
- explicit request from the init= parameter fails.
-
- This can have unexpected effects. For example, booting
- with init=/sbin/kiosk_app will run /sbin/init or even /bin/sh
- if /sbin/kiosk_app cannot be executed.
-
- The default value of Y is consistent with historical behavior.
- Selecting N is likely to be more appropriate for most uses,
- especially on kiosks and on kernels that are intended to be
- run under the control of a script.
-
config CC_OPTIMIZE_FOR_SIZE
bool "Optimize for size"
help
@@ -1595,6 +1586,7 @@ config PERF_EVENTS
depends on HAVE_PERF_EVENTS
select ANON_INODES
select IRQ_WORK
+ select SRCU
help
Enable kernel support for various performance events provided
by software and hardware.
diff --git a/init/main.c b/init/main.c
index 61b993767db5..6f0f1c5ff8cc 100644
--- a/init/main.c
+++ b/init/main.c
@@ -87,10 +87,6 @@
#include <asm/sections.h>
#include <asm/cacheflush.h>
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/smp.h>
-#endif
-
static int kernel_init(void *);
extern void init_IRQ(void);
@@ -351,15 +347,6 @@ __setup("rdinit=", rdinit_setup);
#ifndef CONFIG_SMP
static const unsigned int setup_max_cpus = NR_CPUS;
-#ifdef CONFIG_X86_LOCAL_APIC
-static void __init smp_init(void)
-{
- APIC_init_uniprocessor();
-}
-#else
-#define smp_init() do { } while (0)
-#endif
-
static inline void setup_nr_cpu_ids(void) { }
static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#endif
@@ -966,13 +953,8 @@ static int __ref kernel_init(void *unused)
ret = run_init_process(execute_command);
if (!ret)
return 0;
-#ifndef CONFIG_INIT_FALLBACK
panic("Requested init %s failed (error %d).",
execute_command, ret);
-#else
- pr_err("Failed to execute %s (error %d). Attempting defaults...\n",
- execute_command, ret);
-#endif
}
if (!try_to_run_init_process("/sbin/init") ||
!try_to_run_init_process("/etc/init") ||
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 76768ee812b2..08561f1acd13 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -231,6 +231,10 @@ config RWSEM_SPIN_ON_OWNER
def_bool y
depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
+config LOCK_SPIN_ON_OWNER
+ def_bool y
+ depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER
+
config ARCH_USE_QUEUE_RWLOCK
bool
diff --git a/kernel/Makefile b/kernel/Makefile
index a59481a3fa6c..1408b3353a3c 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -13,8 +13,8 @@ obj-y = fork.o exec_domain.o panic.o \
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
-CFLAGS_REMOVE_cgroup-debug.o = -pg
-CFLAGS_REMOVE_irq_work.o = -pg
+CFLAGS_REMOVE_cgroup-debug.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_irq_work.o = $(CC_FLAGS_FTRACE)
endif
# cond_syscall is currently not LTO compatible
@@ -26,6 +26,7 @@ obj-y += power/
obj-y += printk/
obj-y += irq/
obj-y += rcu/
+obj-y += livepatch/
obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
obj-$(CONFIG_FREEZER) += freezer.o
@@ -142,7 +143,7 @@ endif
kernel/system_certificates.o: $(obj)/x509_certificate_list
quiet_cmd_x509certs = CERTS $@
- cmd_x509certs = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; echo " - Including cert $(X509)")
+ cmd_x509certs = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; $(kecho) " - Including cert $(X509)")
targets += $(obj)/x509_certificate_list
$(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 4f68a326d92e..72e1660a79a3 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -425,7 +425,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
goto exit_nofree;
bufp = data->buf;
- entry->rule.vers_ops = 2;
for (i = 0; i < data->field_count; i++) {
struct audit_field *f = &entry->rule.fields[i];
@@ -758,7 +757,6 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
return ERR_PTR(-ENOMEM);
new = &entry->rule;
- new->vers_ops = old->vers_ops;
new->flags = old->flags;
new->pflags = old->pflags;
new->listnr = old->listnr;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d6594e457a25..a64e7a207d2b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
- module_free(NULL, hdr);
+ module_memfree(hdr);
}
#endif /* CONFIG_BPF_JIT */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 088ac0b1b106..536edc2be307 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
int ufd = attr->map_fd;
struct fd f = fdget(ufd);
struct bpf_map *map;
- void *key, *value;
+ void *key, *value, *ptr;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
if (copy_from_user(key, ukey, map->key_size) != 0)
goto free_key;
- err = -ENOENT;
- rcu_read_lock();
- value = map->ops->map_lookup_elem(map, key);
+ err = -ENOMEM;
+ value = kmalloc(map->value_size, GFP_USER);
if (!value)
- goto err_unlock;
+ goto free_key;
+
+ rcu_read_lock();
+ ptr = map->ops->map_lookup_elem(map, key);
+ if (ptr)
+ memcpy(value, ptr, map->value_size);
+ rcu_read_unlock();
+
+ err = -ENOENT;
+ if (!ptr)
+ goto free_value;
err = -EFAULT;
if (copy_to_user(uvalue, value, map->value_size) != 0)
- goto err_unlock;
+ goto free_value;
err = 0;
-err_unlock:
- rcu_read_unlock();
+free_value:
+ kfree(value);
free_key:
kfree(key);
err_put:
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index bb263d0caab3..29a7b2cc593e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb)
*
* And don't kill the default root.
*/
- if (css_has_online_children(&root->cgrp.self) ||
+ if (!list_empty(&root->cgrp.self.children) ||
root == &cgrp_dfl_root)
cgroup_put(&root->cgrp);
else
@@ -3077,7 +3077,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
#endif
kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
cgroup_file_mode(cft), 0, cft->kf_ops, cft,
- NULL, false, key);
+ NULL, key);
if (IS_ERR(kn))
return PTR_ERR(kn);
@@ -4373,16 +4373,20 @@ static void css_free_work_fn(struct work_struct *work)
{
struct cgroup_subsys_state *css =
container_of(work, struct cgroup_subsys_state, destroy_work);
+ struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
percpu_ref_exit(&css->refcnt);
- if (css->ss) {
+ if (ss) {
/* css free path */
+ int id = css->id;
+
if (css->parent)
css_put(css->parent);
- css->ss->css_free(css);
+ ss->css_free(css);
+ cgroup_idr_remove(&ss->css_idr, id);
cgroup_put(cgrp);
} else {
/* cgroup free path */
@@ -4434,7 +4438,7 @@ static void css_release_work_fn(struct work_struct *work)
if (ss) {
/* css release path */
- cgroup_idr_remove(&ss->css_idr, css->id);
+ cgroup_idr_replace(&ss->css_idr, NULL, css->id);
if (ss->css_released)
ss->css_released(css);
} else {
diff --git a/kernel/compat.c b/kernel/compat.c
index ebb3c369d03d..24f00610c575 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -276,8 +276,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
* core implementation decides to return random nonsense.
*/
if (ret == -ERESTART_RESTARTBLOCK) {
- struct restart_block *restart
- = &current_thread_info()->restart_block;
+ struct restart_block *restart = &current->restart_block;
restart->fn = compat_nanosleep_restart;
restart->nanosleep.compat_rmtp = rmtp;
@@ -860,7 +859,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
return -EFAULT;
if (err == -ERESTART_RESTARTBLOCK) {
- restart = &current_thread_info()->restart_block;
+ restart = &current->restart_block;
restart->fn = compat_clock_nanosleep_restart;
restart->nanosleep.compat_rmtp = rmtp;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5d220234b3ca..1972b161c61e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -58,22 +58,23 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
- struct mutex lock; /* Synchronizes accesses to refcount, */
+ /* wait queue to wake up the active_writer */
+ wait_queue_head_t wq;
+ /* verifies that no writer will get active while readers are active */
+ struct mutex lock;
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
*/
- int refcount;
- /* And allows lockless put_online_cpus(). */
- atomic_t puts_pending;
+ atomic_t refcount;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} cpu_hotplug = {
.active_writer = NULL,
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
- .refcount = 0,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
.dep_map = {.name = "cpu_hotplug.lock" },
#endif
@@ -86,15 +87,6 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
-static void apply_puts_pending(int max)
-{
- int delta;
-
- if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
- delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
- cpu_hotplug.refcount -= delta;
- }
-}
void get_online_cpus(void)
{
@@ -103,8 +95,7 @@ void get_online_cpus(void)
return;
cpuhp_lock_acquire_read();
mutex_lock(&cpu_hotplug.lock);
- apply_puts_pending(65536);
- cpu_hotplug.refcount++;
+ atomic_inc(&cpu_hotplug.refcount);
mutex_unlock(&cpu_hotplug.lock);
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -116,8 +107,7 @@ bool try_get_online_cpus(void)
if (!mutex_trylock(&cpu_hotplug.lock))
return false;
cpuhp_lock_acquire_tryread();
- apply_puts_pending(65536);
- cpu_hotplug.refcount++;
+ atomic_inc(&cpu_hotplug.refcount);
mutex_unlock(&cpu_hotplug.lock);
return true;
}
@@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus);
void put_online_cpus(void)
{
+ int refcount;
+
if (cpu_hotplug.active_writer == current)
return;
- if (!mutex_trylock(&cpu_hotplug.lock)) {
- atomic_inc(&cpu_hotplug.puts_pending);
- cpuhp_lock_release();
- return;
- }
- if (WARN_ON(!cpu_hotplug.refcount))
- cpu_hotplug.refcount++; /* try to fix things up */
+ refcount = atomic_dec_return(&cpu_hotplug.refcount);
+ if (WARN_ON(refcount < 0)) /* try to fix things up */
+ atomic_inc(&cpu_hotplug.refcount);
+
+ if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
+ wake_up(&cpu_hotplug.wq);
- if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
- wake_up_process(cpu_hotplug.active_writer);
- mutex_unlock(&cpu_hotplug.lock);
cpuhp_lock_release();
}
@@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
*/
void cpu_hotplug_begin(void)
{
- cpu_hotplug.active_writer = current;
+ DEFINE_WAIT(wait);
+ cpu_hotplug.active_writer = current;
cpuhp_lock_acquire();
+
for (;;) {
mutex_lock(&cpu_hotplug.lock);
- apply_puts_pending(1);
- if (likely(!cpu_hotplug.refcount))
- break;
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (likely(!atomic_read(&cpu_hotplug.refcount)))
+ break;
mutex_unlock(&cpu_hotplug.lock);
schedule();
}
+ finish_wait(&cpu_hotplug.wq, &wait);
}
void cpu_hotplug_done(void)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 64b257f6bca2..1d1fe9361d29 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1707,40 +1707,27 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
{
struct cpuset *cs = css_cs(seq_css(sf));
cpuset_filetype_t type = seq_cft(sf)->private;
- ssize_t count;
- char *buf, *s;
int ret = 0;
- count = seq_get_buf(sf, &buf);
- s = buf;
-
spin_lock_irq(&callback_lock);
switch (type) {
case FILE_CPULIST:
- s += cpulist_scnprintf(s, count, cs->cpus_allowed);
+ seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
break;
case FILE_MEMLIST:
- s += nodelist_scnprintf(s, count, cs->mems_allowed);
+ seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
break;
case FILE_EFFECTIVE_CPULIST:
- s += cpulist_scnprintf(s, count, cs->effective_cpus);
+ seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
break;
case FILE_EFFECTIVE_MEMLIST:
- s += nodelist_scnprintf(s, count, cs->effective_mems);
+ seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
break;
default:
ret = -EINVAL;
- goto out_unlock;
}
- if (s < buf + count - 1) {
- *s++ = '\n';
- seq_commit(sf, s - buf);
- } else {
- seq_commit(sf, -1);
- }
-out_unlock:
spin_unlock_irq(&callback_lock);
return ret;
}
@@ -2400,7 +2387,7 @@ void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
*/
}
-void cpuset_init_current_mems_allowed(void)
+void __init cpuset_init_current_mems_allowed(void)
{
nodes_setall(current->mems_allowed);
}
@@ -2610,8 +2597,6 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
-#define CPUSET_NODELIST_LEN (256)
-
/**
* cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
* @tsk: pointer to task_struct of some task.
@@ -2621,23 +2606,16 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
*/
void cpuset_print_task_mems_allowed(struct task_struct *tsk)
{
- /* Statically allocated to prevent using excess stack. */
- static char cpuset_nodelist[CPUSET_NODELIST_LEN];
- static DEFINE_SPINLOCK(cpuset_buffer_lock);
struct cgroup *cgrp;
- spin_lock(&cpuset_buffer_lock);
rcu_read_lock();
cgrp = task_cs(tsk)->css.cgroup;
- nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
- tsk->mems_allowed);
pr_info("%s cpuset=", tsk->comm);
pr_cont_cgroup_name(cgrp);
- pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
+ pr_cont(" mems_allowed=%*pbl\n", nodemask_pr_args(&tsk->mems_allowed));
rcu_read_unlock();
- spin_unlock(&cpuset_buffer_lock);
}
/*
@@ -2715,10 +2693,8 @@ out:
/* Display task mems_allowed in /proc/<pid>/status file. */
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
{
- seq_puts(m, "Mems_allowed:\t");
- seq_nodemask(m, &task->mems_allowed);
- seq_puts(m, "\n");
- seq_puts(m, "Mems_allowed_list:\t");
- seq_nodemask_list(m, &task->mems_allowed);
- seq_puts(m, "\n");
+ seq_printf(m, "Mems_allowed:\t%*pb\n",
+ nodemask_pr_args(&task->mems_allowed));
+ seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
+ nodemask_pr_args(&task->mems_allowed));
}
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index f191bddf64b8..7b40c5f07dce 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2023,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv)
kdb_printf("%-20s%8u 0x%p ", mod->name,
mod->core_size, (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
- kdb_printf("%4ld ", module_refcount(mod));
+ kdb_printf("%4d ", module_refcount(mod));
#endif
if (mod->state == MODULE_STATE_GOING)
kdb_printf(" (Unloading)");
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 103f5d147b2f..2925188f50ea 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -1,5 +1,5 @@
ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_core.o = -pg
+CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
endif
obj-y := core.o ring_buffer.o callchain.o
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 882f835a0d85..f04daabfd1cf 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -872,22 +872,32 @@ void perf_pmu_enable(struct pmu *pmu)
pmu->pmu_enable(pmu);
}
-static DEFINE_PER_CPU(struct list_head, rotation_list);
+static DEFINE_PER_CPU(struct list_head, active_ctx_list);
/*
- * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
- * because they're strictly cpu affine and rotate_start is called with IRQs
- * disabled, while rotate_context is called from IRQ context.
+ * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
+ * perf_event_task_tick() are fully serialized because they're strictly cpu
+ * affine and perf_event_ctx{activate,deactivate} are called with IRQs
+ * disabled, while perf_event_task_tick is called from IRQ context.
*/
-static void perf_pmu_rotate_start(struct pmu *pmu)
+static void perf_event_ctx_activate(struct perf_event_context *ctx)
{
- struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- struct list_head *head = this_cpu_ptr(&rotation_list);
+ struct list_head *head = this_cpu_ptr(&active_ctx_list);
WARN_ON(!irqs_disabled());
- if (list_empty(&cpuctx->rotation_list))
- list_add(&cpuctx->rotation_list, head);
+ WARN_ON(!list_empty(&ctx->active_ctx_list));
+
+ list_add(&ctx->active_ctx_list, head);
+}
+
+static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
+{
+ WARN_ON(!irqs_disabled());
+
+ WARN_ON(list_empty(&ctx->active_ctx_list));
+
+ list_del_init(&ctx->active_ctx_list);
}
static void get_ctx(struct perf_event_context *ctx)
@@ -907,6 +917,84 @@ static void put_ctx(struct perf_event_context *ctx)
}
/*
+ * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
+ * perf_pmu_migrate_context() we need some magic.
+ *
+ * Those places that change perf_event::ctx will hold both
+ * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
+ *
+ * Lock ordering is by mutex address. There is one other site where
+ * perf_event_context::mutex nests and that is put_event(). But remember that
+ * that is a parent<->child context relation, and migration does not affect
+ * children, therefore these two orderings should not interact.
+ *
+ * The change in perf_event::ctx does not affect children (as claimed above)
+ * because the sys_perf_event_open() case will install a new event and break
+ * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
+ * concerned with cpuctx and that doesn't have children.
+ *
+ * The places that change perf_event::ctx will issue:
+ *
+ * perf_remove_from_context();
+ * synchronize_rcu();
+ * perf_install_in_context();
+ *
+ * to affect the change. The remove_from_context() + synchronize_rcu() should
+ * quiesce the event, after which we can install it in the new location. This
+ * means that only external vectors (perf_fops, prctl) can perturb the event
+ * while in transit. Therefore all such accessors should also acquire
+ * perf_event_context::mutex to serialize against this.
+ *
+ * However; because event->ctx can change while we're waiting to acquire
+ * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
+ * function.
+ *
+ * Lock order:
+ * task_struct::perf_event_mutex
+ * perf_event_context::mutex
+ * perf_event_context::lock
+ * perf_event::child_mutex;
+ * perf_event::mmap_mutex
+ * mmap_sem
+ */
+static struct perf_event_context *
+perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
+{
+ struct perf_event_context *ctx;
+
+again:
+ rcu_read_lock();
+ ctx = ACCESS_ONCE(event->ctx);
+ if (!atomic_inc_not_zero(&ctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock_nested(&ctx->mutex, nesting);
+ if (event->ctx != ctx) {
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
+ goto again;
+ }
+
+ return ctx;
+}
+
+static inline struct perf_event_context *
+perf_event_ctx_lock(struct perf_event *event)
+{
+ return perf_event_ctx_lock_nested(event, 0);
+}
+
+static void perf_event_ctx_unlock(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
+}
+
+/*
* This must be done under the ctx->lock, such as to serialize against
* context_equiv(), therefore we cannot call put_ctx() since that might end up
* calling scheduler related locks and ctx->lock nests inside those.
@@ -1155,8 +1243,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
ctx->nr_branch_stack++;
list_add_rcu(&event->event_entry, &ctx->event_list);
- if (!ctx->nr_events)
- perf_pmu_rotate_start(ctx->pmu);
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
@@ -1275,6 +1361,8 @@ static void perf_group_attach(struct perf_event *event)
if (group_leader == event)
return;
+ WARN_ON_ONCE(group_leader->ctx != event->ctx);
+
if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
!is_software_event(event))
group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
@@ -1296,6 +1384,10 @@ static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
+
+ WARN_ON_ONCE(event->ctx != ctx);
+ lockdep_assert_held(&ctx->lock);
+
/*
* We can have double detach due to exit/hot-unplug + close.
*/
@@ -1380,6 +1472,8 @@ static void perf_group_detach(struct perf_event *event)
/* Inherit group flags from the previous leader */
sibling->group_flags = event->group_flags;
+
+ WARN_ON_ONCE(sibling->ctx != event->ctx);
}
out:
@@ -1442,6 +1536,10 @@ event_sched_out(struct perf_event *event,
{
u64 tstamp = perf_event_time(event);
u64 delta;
+
+ WARN_ON_ONCE(event->ctx != ctx);
+ lockdep_assert_held(&ctx->lock);
+
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
@@ -1471,7 +1569,8 @@ event_sched_out(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu--;
- ctx->nr_active--;
+ if (!--ctx->nr_active)
+ perf_event_ctx_deactivate(ctx);
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
@@ -1654,7 +1753,7 @@ int __perf_event_disable(void *info)
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
-void perf_event_disable(struct perf_event *event)
+static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -1695,6 +1794,19 @@ retry:
}
raw_spin_unlock_irq(&ctx->lock);
}
+
+/*
+ * Strictly speaking kernel users cannot create groups and therefore this
+ * interface does not need the perf_event_ctx_lock() magic.
+ */
+void perf_event_disable(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+ ctx = perf_event_ctx_lock(event);
+ _perf_event_disable(event);
+ perf_event_ctx_unlock(event, ctx);
+}
EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
@@ -1782,7 +1894,8 @@ event_sched_in(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu++;
- ctx->nr_active++;
+ if (!ctx->nr_active++)
+ perf_event_ctx_activate(ctx);
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq++;
@@ -2158,7 +2271,7 @@ unlock:
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
-void perf_event_enable(struct perf_event *event)
+static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -2214,9 +2327,21 @@ retry:
out:
raw_spin_unlock_irq(&ctx->lock);
}
+
+/*
+ * See perf_event_disable();
+ */
+void perf_event_enable(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+ ctx = perf_event_ctx_lock(event);
+ _perf_event_enable(event);
+ perf_event_ctx_unlock(event, ctx);
+}
EXPORT_SYMBOL_GPL(perf_event_enable);
-int perf_event_refresh(struct perf_event *event, int refresh)
+static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
@@ -2225,10 +2350,25 @@ int perf_event_refresh(struct perf_event *event, int refresh)
return -EINVAL;
atomic_add(refresh, &event->event_limit);
- perf_event_enable(event);
+ _perf_event_enable(event);
return 0;
}
+
+/*
+ * See perf_event_disable()
+ */
+int perf_event_refresh(struct perf_event *event, int refresh)
+{
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_event_refresh(event, refresh);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
@@ -2612,12 +2752,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
-
- /*
- * Since these rotations are per-cpu, we need to ensure the
- * cpu-context we got scheduled on is actually rotating.
- */
- perf_pmu_rotate_start(ctx->pmu);
}
/*
@@ -2905,25 +3039,18 @@ static void rotate_ctx(struct perf_event_context *ctx)
list_rotate_left(&ctx->flexible_groups);
}
-/*
- * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
- * because they're strictly cpu affine and rotate_start is called with IRQs
- * disabled, while rotate_context is called from IRQ context.
- */
static int perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
- int rotate = 0, remove = 1;
+ int rotate = 0;
if (cpuctx->ctx.nr_events) {
- remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
}
ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) {
- remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
}
@@ -2947,8 +3074,6 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
- if (remove)
- list_del_init(&cpuctx->rotation_list);
return rotate;
}
@@ -2966,9 +3091,8 @@ bool perf_event_can_stop_tick(void)
void perf_event_task_tick(void)
{
- struct list_head *head = this_cpu_ptr(&rotation_list);
- struct perf_cpu_context *cpuctx, *tmp;
- struct perf_event_context *ctx;
+ struct list_head *head = this_cpu_ptr(&active_ctx_list);
+ struct perf_event_context *ctx, *tmp;
int throttled;
WARN_ON(!irqs_disabled());
@@ -2976,14 +3100,8 @@ void perf_event_task_tick(void)
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
- list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
- ctx = &cpuctx->ctx;
+ list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
perf_adjust_freq_unthr_context(ctx, throttled);
-
- ctx = cpuctx->task_ctx;
- if (ctx)
- perf_adjust_freq_unthr_context(ctx, throttled);
- }
}
static int event_enable_on_exec(struct perf_event *event,
@@ -3142,6 +3260,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
+ INIT_LIST_HEAD(&ctx->active_ctx_list);
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
@@ -3421,7 +3540,16 @@ static void perf_remove_from_owner(struct perf_event *event)
rcu_read_unlock();
if (owner) {
- mutex_lock(&owner->perf_event_mutex);
+ /*
+ * If we're here through perf_event_exit_task() we're already
+ * holding ctx->mutex which would be an inversion wrt. the
+ * normal lock order.
+ *
+ * However we can safely take this lock because its the child
+ * ctx->mutex.
+ */
+ mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
+
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
@@ -3440,7 +3568,7 @@ static void perf_remove_from_owner(struct perf_event *event)
*/
static void put_event(struct perf_event *event)
{
- struct perf_event_context *ctx = event->ctx;
+ struct perf_event_context *ctx;
if (!atomic_long_dec_and_test(&event->refcount))
return;
@@ -3448,7 +3576,6 @@ static void put_event(struct perf_event *event)
if (!is_kernel_event(event))
perf_remove_from_owner(event);
- WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
*
@@ -3461,7 +3588,8 @@ static void put_event(struct perf_event *event)
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
*/
- mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
+ ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
+ WARN_ON_ONCE(ctx->parent_ctx);
perf_remove_from_context(event, true);
mutex_unlock(&ctx->mutex);
@@ -3547,12 +3675,13 @@ static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
- int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
- u64 values[5];
+ int n = 0, size = 0, ret;
u64 count, enabled, running;
+ u64 values[5];
+
+ lockdep_assert_held(&ctx->mutex);
- mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);
values[n++] = 1 + leader->nr_siblings;
@@ -3567,7 +3696,7 @@ static int perf_event_read_group(struct perf_event *event,
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
- goto unlock;
+ return -EFAULT;
ret = size;
@@ -3581,14 +3710,11 @@ static int perf_event_read_group(struct perf_event *event,
size = n * sizeof(u64);
if (copy_to_user(buf + ret, values, size)) {
- ret = -EFAULT;
- goto unlock;
+ return -EFAULT;
}
ret += size;
}
-unlock:
- mutex_unlock(&ctx->mutex);
return ret;
}
@@ -3660,8 +3786,14 @@ static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = perf_read_hw(event, buf, count);
+ perf_event_ctx_unlock(event, ctx);
- return perf_read_hw(event, buf, count);
+ return ret;
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
@@ -3687,7 +3819,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
return events;
}
-static void perf_event_reset(struct perf_event *event)
+static void _perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
@@ -3706,6 +3838,7 @@ static void perf_event_for_each_child(struct perf_event *event,
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
+
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
@@ -3719,14 +3852,13 @@ static void perf_event_for_each(struct perf_event *event,
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
- WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
+ lockdep_assert_held(&ctx->mutex);
+
event = event->group_leader;
perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(sibling, func);
- mutex_unlock(&ctx->mutex);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
@@ -3796,25 +3928,24 @@ static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
-static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
- struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
- func = perf_event_enable;
+ func = _perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
- func = perf_event_disable;
+ func = _perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
- func = perf_event_reset;
+ func = _perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
- return perf_event_refresh(event, arg);
+ return _perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
@@ -3861,6 +3992,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx;
+ long ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_ioctl(event, cmd, arg);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -3883,11 +4027,15 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd,
int perf_event_task_enable(void)
{
+ struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(&current->perf_event_mutex);
- list_for_each_entry(event, &current->perf_event_list, owner_entry)
- perf_event_for_each_child(event, perf_event_enable);
+ list_for_each_entry(event, &current->perf_event_list, owner_entry) {
+ ctx = perf_event_ctx_lock(event);
+ perf_event_for_each_child(event, _perf_event_enable);
+ perf_event_ctx_unlock(event, ctx);
+ }
mutex_unlock(&current->perf_event_mutex);
return 0;
@@ -3895,11 +4043,15 @@ int perf_event_task_enable(void)
int perf_event_task_disable(void)
{
+ struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(&current->perf_event_mutex);
- list_for_each_entry(event, &current->perf_event_list, owner_entry)
- perf_event_for_each_child(event, perf_event_disable);
+ list_for_each_entry(event, &current->perf_event_list, owner_entry) {
+ ctx = perf_event_ctx_lock(event);
+ perf_event_for_each_child(event, _perf_event_disable);
+ perf_event_ctx_unlock(event, ctx);
+ }
mutex_unlock(&current->perf_event_mutex);
return 0;
@@ -3949,7 +4101,8 @@ unlock:
rcu_read_unlock();
}
-void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
+void __weak arch_perf_update_userpage(
+ struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
{
}
@@ -3999,7 +4152,7 @@ void perf_event_update_userpage(struct perf_event *event)
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
- arch_perf_update_userpage(userpg, now);
+ arch_perf_update_userpage(event, userpg, now);
barrier();
++userpg->lock;
@@ -4141,6 +4294,9 @@ static void perf_mmap_open(struct vm_area_struct *vma)
atomic_inc(&event->mmap_count);
atomic_inc(&event->rb->mmap_count);
+
+ if (event->pmu->event_mapped)
+ event->pmu->event_mapped(event);
}
/*
@@ -4160,6 +4316,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
+ if (event->pmu->event_unmapped)
+ event->pmu->event_unmapped(event);
+
atomic_dec(&rb->mmap_count);
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
@@ -4361,6 +4520,9 @@ unlock:
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops;
+ if (event->pmu->event_mapped)
+ event->pmu->event_mapped(event);
+
return ret;
}
@@ -5889,6 +6051,8 @@ end:
rcu_read_unlock();
}
+DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
+
int perf_swevent_get_recursion_context(void)
{
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
@@ -5904,21 +6068,30 @@ inline void perf_swevent_put_recursion_context(int rctx)
put_recursion_context(swhash->recursion, rctx);
}
-void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
+void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
- int rctx;
- preempt_disable_notrace();
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
+ if (WARN_ON_ONCE(!regs))
return;
perf_sample_data_init(&data, addr, 0);
-
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
+}
+
+void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
+{
+ int rctx;
+
+ preempt_disable_notrace();
+ rctx = perf_swevent_get_recursion_context();
+ if (unlikely(rctx < 0))
+ goto fail;
+
+ ___perf_sw_event(event_id, nr, regs, addr);
perf_swevent_put_recursion_context(rctx);
+fail:
preempt_enable_notrace();
}
@@ -6776,12 +6949,10 @@ skip_type:
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
- cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
__perf_cpu_hrtimer_init(cpuctx, cpu);
- INIT_LIST_HEAD(&cpuctx->rotation_list);
cpuctx->unique_pmu = pmu;
}
@@ -6854,6 +7025,20 @@ void perf_pmu_unregister(struct pmu *pmu)
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
+static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
+{
+ int ret;
+
+ if (!try_module_get(pmu->module))
+ return -ENODEV;
+ event->pmu = pmu;
+ ret = pmu->event_init(event);
+ if (ret)
+ module_put(pmu->module);
+
+ return ret;
+}
+
struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
@@ -6866,24 +7051,14 @@ struct pmu *perf_init_event(struct perf_event *event)
pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock();
if (pmu) {
- if (!try_module_get(pmu->module)) {
- pmu = ERR_PTR(-ENODEV);
- goto unlock;
- }
- event->pmu = pmu;
- ret = pmu->event_init(event);
+ ret = perf_try_init_event(pmu, event);
if (ret)
pmu = ERR_PTR(ret);
goto unlock;
}
list_for_each_entry_rcu(pmu, &pmus, entry) {
- if (!try_module_get(pmu->module)) {
- pmu = ERR_PTR(-ENODEV);
- goto unlock;
- }
- event->pmu = pmu;
- ret = pmu->event_init(event);
+ ret = perf_try_init_event(pmu, event);
if (!ret)
goto unlock;
@@ -7247,6 +7422,15 @@ out:
return ret;
}
+static void mutex_lock_double(struct mutex *a, struct mutex *b)
+{
+ if (b < a)
+ swap(a, b);
+
+ mutex_lock(a);
+ mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -7262,7 +7446,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
- struct perf_event_context *ctx;
+ struct perf_event_context *ctx, *uninitialized_var(gctx);
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
@@ -7420,7 +7604,19 @@ SYSCALL_DEFINE5(perf_event_open,
* task or CPU context:
*/
if (move_group) {
- if (group_leader->ctx->type != ctx->type)
+ /*
+ * Make sure we're both on the same task, or both
+ * per-cpu events.
+ */
+ if (group_leader->ctx->task != ctx->task)
+ goto err_context;
+
+ /*
+ * Make sure we're both events for the same CPU;
+ * grouping events for different CPUs is broken; since
+ * you can never concurrently schedule them anyhow.
+ */
+ if (group_leader->cpu != event->cpu)
goto err_context;
} else {
if (group_leader->ctx != ctx)
@@ -7448,43 +7644,68 @@ SYSCALL_DEFINE5(perf_event_open,
}
if (move_group) {
- struct perf_event_context *gctx = group_leader->ctx;
-
- mutex_lock(&gctx->mutex);
- perf_remove_from_context(group_leader, false);
+ gctx = group_leader->ctx;
/*
- * Removing from the context ends up with disabled
- * event. What we want here is event in the initial
- * startup state, ready to be add into new context.
+ * See perf_event_ctx_lock() for comments on the details
+ * of swizzling perf_event::ctx.
*/
- perf_event__state_init(group_leader);
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+ perf_remove_from_context(group_leader, false);
+
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling, false);
- perf_event__state_init(sibling);
put_ctx(gctx);
}
- mutex_unlock(&gctx->mutex);
- put_ctx(gctx);
+ } else {
+ mutex_lock(&ctx->mutex);
}
WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
if (move_group) {
+ /*
+ * Wait for everybody to stop referencing the events through
+ * the old lists, before installing it on new lists.
+ */
synchronize_rcu();
- perf_install_in_context(ctx, group_leader, group_leader->cpu);
- get_ctx(ctx);
+
+ /*
+ * Install the group siblings before the group leader.
+ *
+ * Because a group leader will try and install the entire group
+ * (through the sibling list, which is still in-tact), we can
+ * end up with siblings installed in the wrong context.
+ *
+ * By installing siblings first we NO-OP because they're not
+ * reachable through the group lists.
+ */
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
+ perf_event__state_init(sibling);
perf_install_in_context(ctx, sibling, sibling->cpu);
get_ctx(ctx);
}
+
+ /*
+ * Removing from the context ends up with disabled
+ * event. What we want here is event in the initial
+ * startup state, ready to be add into new context.
+ */
+ perf_event__state_init(group_leader);
+ perf_install_in_context(ctx, group_leader, group_leader->cpu);
+ get_ctx(ctx);
}
perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
+
+ if (move_group) {
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
+ }
mutex_unlock(&ctx->mutex);
put_online_cpus();
@@ -7592,7 +7813,11 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
- mutex_lock(&src_ctx->mutex);
+ /*
+ * See perf_event_ctx_lock() for comments on the details
+ * of swizzling perf_event::ctx.
+ */
+ mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event, false);
@@ -7600,11 +7825,36 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
put_ctx(src_ctx);
list_add(&event->migrate_entry, &events);
}
- mutex_unlock(&src_ctx->mutex);
+ /*
+ * Wait for the events to quiesce before re-instating them.
+ */
synchronize_rcu();
- mutex_lock(&dst_ctx->mutex);
+ /*
+ * Re-instate events in 2 passes.
+ *
+ * Skip over group leaders and only install siblings on this first
+ * pass, siblings will not get enabled without a leader, however a
+ * leader will enable its siblings, even if those are still on the old
+ * context.
+ */
+ list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
+ if (event->group_leader == event)
+ continue;
+
+ list_del(&event->migrate_entry);
+ if (event->state >= PERF_EVENT_STATE_OFF)
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ account_event_cpu(event, dst_cpu);
+ perf_install_in_context(dst_ctx, event, dst_cpu);
+ get_ctx(dst_ctx);
+ }
+
+ /*
+ * Once all the siblings are setup properly, install the group leaders
+ * to make it go.
+ */
list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
list_del(&event->migrate_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
@@ -7614,6 +7864,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
get_ctx(dst_ctx);
}
mutex_unlock(&dst_ctx->mutex);
+ mutex_unlock(&src_ctx->mutex);
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
@@ -7800,14 +8051,19 @@ static void perf_free_event(struct perf_event *event,
put_event(parent);
+ raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
list_del_event(event, ctx);
+ raw_spin_unlock_irq(&ctx->lock);
free_event(event);
}
/*
- * free an unexposed, unused context as created by inheritance by
+ * Free an unexposed, unused context as created by inheritance by
* perf_event_init_task below, used by fork() in case of fail.
+ *
+ * Not all locks are strictly required, but take them anyway to be nice and
+ * help out with the lockdep assertions.
*/
void perf_event_free_task(struct task_struct *task)
{
@@ -8126,7 +8382,7 @@ static void __init perf_event_init_all_cpus(void)
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex);
- INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
+ INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
}
}
@@ -8147,22 +8403,11 @@ static void perf_event_init_cpu(int cpu)
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
-static void perf_pmu_rotate_stop(struct pmu *pmu)
-{
- struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-
- WARN_ON(!irqs_disabled());
-
- list_del_init(&cpuctx->rotation_list);
-}
-
static void __perf_event_exit_context(void *__info)
{
struct remove_event re = { .detach_group = true };
struct perf_event_context *ctx = __info;
- perf_pmu_rotate_stop(ctx->pmu);
-
rcu_read_lock();
list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
__perf_remove_from_context(&re);
@@ -8273,6 +8518,18 @@ void __init perf_event_init(void)
!= 1024);
}
+ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_attr, attr);
+
+ if (pmu_attr->event_str)
+ return sprintf(page, "%s\n", pmu_attr->event_str);
+
+ return 0;
+}
+
static int __init perf_event_sysfs_init(void)
{
struct pmu *pmu;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 146a5792b1d2..eadb95ce7aac 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -13,12 +13,13 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/circ_buf.h>
+#include <linux/poll.h>
#include "internal.h"
static void perf_output_wakeup(struct perf_output_handle *handle)
{
- atomic_set(&handle->rb->poll, POLL_IN);
+ atomic_set(&handle->rb->poll, POLLIN);
handle->event->pending_wakeup = 1;
irq_work_queue(&handle->event->pending);
diff --git a/kernel/exit.c b/kernel/exit.c
index 6806c55475ee..feff10bbb307 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -435,7 +435,8 @@ static void exit_mm(struct task_struct *tsk)
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
- clear_thread_flag(TIF_MEMDIE);
+ if (test_thread_flag(TIF_MEMDIE))
+ unmark_oom_victim();
}
static struct task_struct *find_alive_thread(struct task_struct *p)
diff --git a/kernel/fork.c b/kernel/fork.c
index 4dc2ddade9f1..cf65139615a0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -438,12 +438,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
atomic_inc(&mapping->i_mmap_writable);
flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */
- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
- vma_nonlinear_insert(tmp,
- &mapping->i_mmap_nonlinear);
- else
- vma_interval_tree_insert_after(tmp, mpnt,
- &mapping->i_mmap);
+ vma_interval_tree_insert_after(tmp, mpnt,
+ &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
i_mmap_unlock_write(mapping);
}
@@ -559,6 +555,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
INIT_LIST_HEAD(&mm->mmlist);
mm->core_state = NULL;
atomic_long_set(&mm->nr_ptes, 0);
+ mm_nr_pmds_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
mm->pinned_vm = 0;
@@ -607,6 +604,14 @@ static void check_mm(struct mm_struct *mm)
printk(KERN_ALERT "BUG: Bad rss-counter state "
"mm:%p idx:%d val:%ld\n", mm, i, x);
}
+
+ if (atomic_long_read(&mm->nr_ptes))
+ pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
+ atomic_long_read(&mm->nr_ptes));
+ if (mm_nr_pmds(mm))
+ pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
+ mm_nr_pmds(mm));
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index 63678b573d61..2a5e3830e953 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2217,7 +2217,7 @@ retry:
if (!abs_time)
goto out;
- restart = &current_thread_info()->restart_block;
+ restart = &current->restart_block;
restart->fn = futex_wait_restart;
restart->futex.uaddr = uaddr;
restart->futex.val = val;
@@ -2258,7 +2258,7 @@ static long futex_wait_restart(struct restart_block *restart)
* if there are waiters then it will block, it does PI, etc. (Due to
* races the kernel might see a 0 value of the futex too.)
*/
-static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
+static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
@@ -2953,11 +2953,11 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
case FUTEX_WAKE_OP:
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
case FUTEX_LOCK_PI:
- return futex_lock_pi(uaddr, flags, val, timeout, 0);
+ return futex_lock_pi(uaddr, flags, timeout, 0);
case FUTEX_UNLOCK_PI:
return futex_unlock_pi(uaddr, flags);
case FUTEX_TRYLOCK_PI:
- return futex_lock_pi(uaddr, flags, 0, timeout, 1);
+ return futex_lock_pi(uaddr, flags, NULL, 1);
case FUTEX_WAIT_REQUEUE_PI:
val3 = FUTEX_BITSET_MATCH_ANY;
return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 80692373abd6..196a06fbc122 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -243,6 +243,9 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
return -EINVAL;
desc->affinity_hint = m;
irq_put_desc_unlock(desc, flags);
+ /* set the initial affinity to prevent every interrupt being on CPU0 */
+ if (m)
+ __irq_set_affinity(irq, m, false);
return 0;
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 9dc9bfd8a678..df2f4642d1e7 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -46,10 +46,9 @@ static int show_irq_affinity(int type, struct seq_file *m, void *v)
mask = desc->pending_mask;
#endif
if (type)
- seq_cpumask_list(m, mask);
+ seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
else
- seq_cpumask(m, mask);
- seq_putc(m, '\n');
+ seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
return 0;
}
@@ -67,8 +66,7 @@ static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
cpumask_copy(mask, desc->affinity_hint);
raw_spin_unlock_irqrestore(&desc->lock, flags);
- seq_cpumask(m, mask);
- seq_putc(m, '\n');
+ seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
free_cpumask_var(mask);
return 0;
@@ -186,8 +184,7 @@ static const struct file_operations irq_affinity_list_proc_fops = {
static int default_affinity_show(struct seq_file *m, void *v)
{
- seq_cpumask(m, irq_default_affinity);
- seq_putc(m, '\n');
+ seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
return 0;
}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 9a8a01abbaed..c85277639b34 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -2512,7 +2512,7 @@ static int kexec_apply_relocations(struct kimage *image)
continue;
/*
- * Respective archicture needs to provide support for applying
+ * Respective architecture needs to provide support for applying
* relocations of type SHT_RELA/SHT_REL.
*/
if (sechdrs[i].sh_type == SHT_RELA)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 06f58309fed2..c90e417bb963 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -127,7 +127,7 @@ static void *alloc_insn_page(void)
static void free_insn_page(void *page)
{
- module_free(NULL, page);
+ module_memfree(page);
}
struct kprobe_insn_cache kprobe_insn_slots = {
@@ -717,7 +717,7 @@ static void prepare_optimized_kprobe(struct kprobe *p)
struct optimized_kprobe *op;
op = container_of(p, struct optimized_kprobe, kp);
- arch_prepare_optimized_kprobe(op);
+ arch_prepare_optimized_kprobe(op, p);
}
/* Allocate new optimized_kprobe and try to prepare optimized instructions */
@@ -731,7 +731,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
INIT_LIST_HEAD(&op->list);
op->kp.addr = p->addr;
- arch_prepare_optimized_kprobe(op);
+ arch_prepare_optimized_kprobe(op, p);
return &op->kp;
}
@@ -869,7 +869,8 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
{
struct kprobe *_p;
- unoptimize_kprobe(p, false); /* Try to unoptimize */
+ /* Try to unoptimize */
+ unoptimize_kprobe(p, kprobes_all_disarmed);
if (!kprobe_queued(p)) {
arch_disarm_kprobe(p);
@@ -1571,7 +1572,13 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
/* Try to disarm and disable this/parent probe */
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
- disarm_kprobe(orig_p, true);
+ /*
+ * If kprobes_all_disarmed is set, orig_p
+ * should have already been disarmed, so
+ * skip unneed disarming process.
+ */
+ if (!kprobes_all_disarmed)
+ disarm_kprobe(orig_p, true);
orig_p->flags |= KPROBE_FLAG_DISABLED;
}
}
@@ -2320,6 +2327,12 @@ static void arm_all_kprobes(void)
if (!kprobes_all_disarmed)
goto already_enabled;
+ /*
+ * optimize_kprobe() called by arm_kprobe() checks
+ * kprobes_all_disarmed, so set kprobes_all_disarmed before
+ * arm_kprobe.
+ */
+ kprobes_all_disarmed = false;
/* Arming kprobes doesn't optimize kprobe itself */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
@@ -2328,7 +2341,6 @@ static void arm_all_kprobes(void)
arm_kprobe(p);
}
- kprobes_all_disarmed = false;
printk(KERN_INFO "Kprobes globally enabled\n");
already_enabled:
diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig
new file mode 100644
index 000000000000..045022557936
--- /dev/null
+++ b/kernel/livepatch/Kconfig
@@ -0,0 +1,18 @@
+config HAVE_LIVEPATCH
+ bool
+ help
+ Arch supports kernel live patching
+
+config LIVEPATCH
+ bool "Kernel Live Patching"
+ depends on DYNAMIC_FTRACE_WITH_REGS
+ depends on MODULES
+ depends on SYSFS
+ depends on KALLSYMS_ALL
+ depends on HAVE_LIVEPATCH
+ help
+ Say Y here if you want to support kernel live patching.
+ This option has no runtime impact until a kernel "patch"
+ module uses the interface provided by this option to register
+ a patch, causing calls to patched functions to be redirected
+ to new function code contained in the patch module.
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
new file mode 100644
index 000000000000..e8780c0901d9
--- /dev/null
+++ b/kernel/livepatch/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_LIVEPATCH) += livepatch.o
+
+livepatch-objs := core.o
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
new file mode 100644
index 000000000000..ff7f47d026ac
--- /dev/null
+++ b/kernel/livepatch/core.c
@@ -0,0 +1,1015 @@
+/*
+ * core.c - Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2014 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/ftrace.h>
+#include <linux/list.h>
+#include <linux/kallsyms.h>
+#include <linux/livepatch.h>
+
+/**
+ * struct klp_ops - structure for tracking registered ftrace ops structs
+ *
+ * A single ftrace_ops is shared between all enabled replacement functions
+ * (klp_func structs) which have the same old_addr. This allows the switch
+ * between function versions to happen instantaneously by updating the klp_ops
+ * struct's func_stack list. The winner is the klp_func at the top of the
+ * func_stack (front of the list).
+ *
+ * @node: node for the global klp_ops list
+ * @func_stack: list head for the stack of klp_func's (active func is on top)
+ * @fops: registered ftrace ops struct
+ */
+struct klp_ops {
+ struct list_head node;
+ struct list_head func_stack;
+ struct ftrace_ops fops;
+};
+
+/*
+ * The klp_mutex protects the global lists and state transitions of any
+ * structure reachable from them. References to any structure must be obtained
+ * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
+ * ensure it gets consistent data).
+ */
+static DEFINE_MUTEX(klp_mutex);
+
+static LIST_HEAD(klp_patches);
+static LIST_HEAD(klp_ops);
+
+static struct kobject *klp_root_kobj;
+
+static struct klp_ops *klp_find_ops(unsigned long old_addr)
+{
+ struct klp_ops *ops;
+ struct klp_func *func;
+
+ list_for_each_entry(ops, &klp_ops, node) {
+ func = list_first_entry(&ops->func_stack, struct klp_func,
+ stack_node);
+ if (func->old_addr == old_addr)
+ return ops;
+ }
+
+ return NULL;
+}
+
+static bool klp_is_module(struct klp_object *obj)
+{
+ return obj->name;
+}
+
+static bool klp_is_object_loaded(struct klp_object *obj)
+{
+ return !obj->name || obj->mod;
+}
+
+/* sets obj->mod if object is not vmlinux and module is found */
+static void klp_find_object_module(struct klp_object *obj)
+{
+ if (!klp_is_module(obj))
+ return;
+
+ mutex_lock(&module_mutex);
+ /*
+ * We don't need to take a reference on the module here because we have
+ * the klp_mutex, which is also taken by the module notifier. This
+ * prevents any module from unloading until we release the klp_mutex.
+ */
+ obj->mod = find_module(obj->name);
+ mutex_unlock(&module_mutex);
+}
+
+/* klp_mutex must be held by caller */
+static bool klp_is_patch_registered(struct klp_patch *patch)
+{
+ struct klp_patch *mypatch;
+
+ list_for_each_entry(mypatch, &klp_patches, list)
+ if (mypatch == patch)
+ return true;
+
+ return false;
+}
+
+static bool klp_initialized(void)
+{
+ return klp_root_kobj;
+}
+
+struct klp_find_arg {
+ const char *objname;
+ const char *name;
+ unsigned long addr;
+ /*
+ * If count == 0, the symbol was not found. If count == 1, a unique
+ * match was found and addr is set. If count > 1, there is
+ * unresolvable ambiguity among "count" number of symbols with the same
+ * name in the same object.
+ */
+ unsigned long count;
+};
+
+static int klp_find_callback(void *data, const char *name,
+ struct module *mod, unsigned long addr)
+{
+ struct klp_find_arg *args = data;
+
+ if ((mod && !args->objname) || (!mod && args->objname))
+ return 0;
+
+ if (strcmp(args->name, name))
+ return 0;
+
+ if (args->objname && strcmp(args->objname, mod->name))
+ return 0;
+
+ /*
+ * args->addr might be overwritten if another match is found
+ * but klp_find_object_symbol() handles this and only returns the
+ * addr if count == 1.
+ */
+ args->addr = addr;
+ args->count++;
+
+ return 0;
+}
+
+static int klp_find_object_symbol(const char *objname, const char *name,
+ unsigned long *addr)
+{
+ struct klp_find_arg args = {
+ .objname = objname,
+ .name = name,
+ .addr = 0,
+ .count = 0
+ };
+
+ kallsyms_on_each_symbol(klp_find_callback, &args);
+
+ if (args.count == 0)
+ pr_err("symbol '%s' not found in symbol table\n", name);
+ else if (args.count > 1)
+ pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
+ args.count, name, objname);
+ else {
+ *addr = args.addr;
+ return 0;
+ }
+
+ *addr = 0;
+ return -EINVAL;
+}
+
+struct klp_verify_args {
+ const char *name;
+ const unsigned long addr;
+};
+
+static int klp_verify_callback(void *data, const char *name,
+ struct module *mod, unsigned long addr)
+{
+ struct klp_verify_args *args = data;
+
+ if (!mod &&
+ !strcmp(args->name, name) &&
+ args->addr == addr)
+ return 1;
+
+ return 0;
+}
+
+static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
+{
+ struct klp_verify_args args = {
+ .name = name,
+ .addr = addr,
+ };
+
+ if (kallsyms_on_each_symbol(klp_verify_callback, &args))
+ return 0;
+
+ pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
+ name, addr);
+ return -EINVAL;
+}
+
+static int klp_find_verify_func_addr(struct klp_object *obj,
+ struct klp_func *func)
+{
+ int ret;
+
+#if defined(CONFIG_RANDOMIZE_BASE)
+ /* KASLR is enabled, disregard old_addr from user */
+ func->old_addr = 0;
+#endif
+
+ if (!func->old_addr || klp_is_module(obj))
+ ret = klp_find_object_symbol(obj->name, func->old_name,
+ &func->old_addr);
+ else
+ ret = klp_verify_vmlinux_symbol(func->old_name,
+ func->old_addr);
+
+ return ret;
+}
+
+/*
+ * external symbols are located outside the parent object (where the parent
+ * object is either vmlinux or the kmod being patched).
+ */
+static int klp_find_external_symbol(struct module *pmod, const char *name,
+ unsigned long *addr)
+{
+ const struct kernel_symbol *sym;
+
+ /* first, check if it's an exported symbol */
+ preempt_disable();
+ sym = find_symbol(name, NULL, NULL, true, true);
+ preempt_enable();
+ if (sym) {
+ *addr = sym->value;
+ return 0;
+ }
+
+ /* otherwise check if it's in another .o within the patch module */
+ return klp_find_object_symbol(pmod->name, name, addr);
+}
+
+static int klp_write_object_relocations(struct module *pmod,
+ struct klp_object *obj)
+{
+ int ret;
+ struct klp_reloc *reloc;
+
+ if (WARN_ON(!klp_is_object_loaded(obj)))
+ return -EINVAL;
+
+ if (WARN_ON(!obj->relocs))
+ return -EINVAL;
+
+ for (reloc = obj->relocs; reloc->name; reloc++) {
+ if (!klp_is_module(obj)) {
+ ret = klp_verify_vmlinux_symbol(reloc->name,
+ reloc->val);
+ if (ret)
+ return ret;
+ } else {
+ /* module, reloc->val needs to be discovered */
+ if (reloc->external)
+ ret = klp_find_external_symbol(pmod,
+ reloc->name,
+ &reloc->val);
+ else
+ ret = klp_find_object_symbol(obj->mod->name,
+ reloc->name,
+ &reloc->val);
+ if (ret)
+ return ret;
+ }
+ ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
+ reloc->val + reloc->addend);
+ if (ret) {
+ pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
+ reloc->name, reloc->val, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void notrace klp_ftrace_handler(unsigned long ip,
+ unsigned long parent_ip,
+ struct ftrace_ops *fops,
+ struct pt_regs *regs)
+{
+ struct klp_ops *ops;
+ struct klp_func *func;
+
+ ops = container_of(fops, struct klp_ops, fops);
+
+ rcu_read_lock();
+ func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
+ stack_node);
+ rcu_read_unlock();
+
+ if (WARN_ON_ONCE(!func))
+ return;
+
+ klp_arch_set_pc(regs, (unsigned long)func->new_func);
+}
+
+static int klp_disable_func(struct klp_func *func)
+{
+ struct klp_ops *ops;
+ int ret;
+
+ if (WARN_ON(func->state != KLP_ENABLED))
+ return -EINVAL;
+
+ if (WARN_ON(!func->old_addr))
+ return -EINVAL;
+
+ ops = klp_find_ops(func->old_addr);
+ if (WARN_ON(!ops))
+ return -EINVAL;
+
+ if (list_is_singular(&ops->func_stack)) {
+ ret = unregister_ftrace_function(&ops->fops);
+ if (ret) {
+ pr_err("failed to unregister ftrace handler for function '%s' (%d)\n",
+ func->old_name, ret);
+ return ret;
+ }
+
+ ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+ if (ret)
+ pr_warn("function unregister succeeded but failed to clear the filter\n");
+
+ list_del_rcu(&func->stack_node);
+ list_del(&ops->node);
+ kfree(ops);
+ } else {
+ list_del_rcu(&func->stack_node);
+ }
+
+ func->state = KLP_DISABLED;
+
+ return 0;
+}
+
+static int klp_enable_func(struct klp_func *func)
+{
+ struct klp_ops *ops;
+ int ret;
+
+ if (WARN_ON(!func->old_addr))
+ return -EINVAL;
+
+ if (WARN_ON(func->state != KLP_DISABLED))
+ return -EINVAL;
+
+ ops = klp_find_ops(func->old_addr);
+ if (!ops) {
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ ops->fops.func = klp_ftrace_handler;
+ ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
+ FTRACE_OPS_FL_DYNAMIC |
+ FTRACE_OPS_FL_IPMODIFY;
+
+ list_add(&ops->node, &klp_ops);
+
+ INIT_LIST_HEAD(&ops->func_stack);
+ list_add_rcu(&func->stack_node, &ops->func_stack);
+
+ ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
+ if (ret) {
+ pr_err("failed to set ftrace filter for function '%s' (%d)\n",
+ func->old_name, ret);
+ goto err;
+ }
+
+ ret = register_ftrace_function(&ops->fops);
+ if (ret) {
+ pr_err("failed to register ftrace handler for function '%s' (%d)\n",
+ func->old_name, ret);
+ ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+ goto err;
+ }
+
+
+ } else {
+ list_add_rcu(&func->stack_node, &ops->func_stack);
+ }
+
+ func->state = KLP_ENABLED;
+
+ return 0;
+
+err:
+ list_del_rcu(&func->stack_node);
+ list_del(&ops->node);
+ kfree(ops);
+ return ret;
+}
+
+static int klp_disable_object(struct klp_object *obj)
+{
+ struct klp_func *func;
+ int ret;
+
+ for (func = obj->funcs; func->old_name; func++) {
+ if (func->state != KLP_ENABLED)
+ continue;
+
+ ret = klp_disable_func(func);
+ if (ret)
+ return ret;
+ }
+
+ obj->state = KLP_DISABLED;
+
+ return 0;
+}
+
+static int klp_enable_object(struct klp_object *obj)
+{
+ struct klp_func *func;
+ int ret;
+
+ if (WARN_ON(obj->state != KLP_DISABLED))
+ return -EINVAL;
+
+ if (WARN_ON(!klp_is_object_loaded(obj)))
+ return -EINVAL;
+
+ for (func = obj->funcs; func->old_name; func++) {
+ ret = klp_enable_func(func);
+ if (ret)
+ goto unregister;
+ }
+ obj->state = KLP_ENABLED;
+
+ return 0;
+
+unregister:
+ WARN_ON(klp_disable_object(obj));
+ return ret;
+}
+
+static int __klp_disable_patch(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ int ret;
+
+ /* enforce stacking: only the last enabled patch can be disabled */
+ if (!list_is_last(&patch->list, &klp_patches) &&
+ list_next_entry(patch, list)->state == KLP_ENABLED)
+ return -EBUSY;
+
+ pr_notice("disabling patch '%s'\n", patch->mod->name);
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ if (obj->state != KLP_ENABLED)
+ continue;
+
+ ret = klp_disable_object(obj);
+ if (ret)
+ return ret;
+ }
+
+ patch->state = KLP_DISABLED;
+
+ return 0;
+}
+
+/**
+ * klp_disable_patch() - disables a registered patch
+ * @patch: The registered, enabled patch to be disabled
+ *
+ * Unregisters the patched functions from ftrace.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_disable_patch(struct klp_patch *patch)
+{
+ int ret;
+
+ mutex_lock(&klp_mutex);
+
+ if (!klp_is_patch_registered(patch)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (patch->state == KLP_DISABLED) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = __klp_disable_patch(patch);
+
+err:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_disable_patch);
+
+static int __klp_enable_patch(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ int ret;
+
+ if (WARN_ON(patch->state != KLP_DISABLED))
+ return -EINVAL;
+
+ /* enforce stacking: only the first disabled patch can be enabled */
+ if (patch->list.prev != &klp_patches &&
+ list_prev_entry(patch, list)->state == KLP_DISABLED)
+ return -EBUSY;
+
+ pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
+ add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
+
+ pr_notice("enabling patch '%s'\n", patch->mod->name);
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ klp_find_object_module(obj);
+
+ if (!klp_is_object_loaded(obj))
+ continue;
+
+ ret = klp_enable_object(obj);
+ if (ret)
+ goto unregister;
+ }
+
+ patch->state = KLP_ENABLED;
+
+ return 0;
+
+unregister:
+ WARN_ON(__klp_disable_patch(patch));
+ return ret;
+}
+
+/**
+ * klp_enable_patch() - enables a registered patch
+ * @patch: The registered, disabled patch to be enabled
+ *
+ * Performs the needed symbol lookups and code relocations,
+ * then registers the patched functions with ftrace.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_enable_patch(struct klp_patch *patch)
+{
+ int ret;
+
+ mutex_lock(&klp_mutex);
+
+ if (!klp_is_patch_registered(patch)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = __klp_enable_patch(patch);
+
+err:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_enable_patch);
+
+/*
+ * Sysfs Interface
+ *
+ * /sys/kernel/livepatch
+ * /sys/kernel/livepatch/<patch>
+ * /sys/kernel/livepatch/<patch>/enabled
+ * /sys/kernel/livepatch/<patch>/<object>
+ * /sys/kernel/livepatch/<patch>/<object>/<func>
+ */
+
+static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return -EINVAL;
+
+ if (val != KLP_DISABLED && val != KLP_ENABLED)
+ return -EINVAL;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+
+ mutex_lock(&klp_mutex);
+
+ if (val == patch->state) {
+ /* already in requested state */
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (val == KLP_ENABLED) {
+ ret = __klp_enable_patch(patch);
+ if (ret)
+ goto err;
+ } else {
+ ret = __klp_disable_patch(patch);
+ if (ret)
+ goto err;
+ }
+
+ mutex_unlock(&klp_mutex);
+
+ return count;
+
+err:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+
+static ssize_t enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_patch *patch;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
+}
+
+static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
+static struct attribute *klp_patch_attrs[] = {
+ &enabled_kobj_attr.attr,
+ NULL
+};
+
+static void klp_kobj_release_patch(struct kobject *kobj)
+{
+ /*
+ * Once we have a consistency model we'll need to module_put() the
+ * patch module here. See klp_register_patch() for more details.
+ */
+}
+
+static struct kobj_type klp_ktype_patch = {
+ .release = klp_kobj_release_patch,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = klp_patch_attrs,
+};
+
+static void klp_kobj_release_func(struct kobject *kobj)
+{
+}
+
+static struct kobj_type klp_ktype_func = {
+ .release = klp_kobj_release_func,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/*
+ * Free all functions' kobjects in the array up to some limit. When limit is
+ * NULL, all kobjects are freed.
+ */
+static void klp_free_funcs_limited(struct klp_object *obj,
+ struct klp_func *limit)
+{
+ struct klp_func *func;
+
+ for (func = obj->funcs; func->old_name && func != limit; func++)
+ kobject_put(&func->kobj);
+}
+
+/* Clean up when a patched object is unloaded */
+static void klp_free_object_loaded(struct klp_object *obj)
+{
+ struct klp_func *func;
+
+ obj->mod = NULL;
+
+ for (func = obj->funcs; func->old_name; func++)
+ func->old_addr = 0;
+}
+
+/*
+ * Free all objects' kobjects in the array up to some limit. When limit is
+ * NULL, all kobjects are freed.
+ */
+static void klp_free_objects_limited(struct klp_patch *patch,
+ struct klp_object *limit)
+{
+ struct klp_object *obj;
+
+ for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
+ klp_free_funcs_limited(obj, NULL);
+ kobject_put(obj->kobj);
+ }
+}
+
+static void klp_free_patch(struct klp_patch *patch)
+{
+ klp_free_objects_limited(patch, NULL);
+ if (!list_empty(&patch->list))
+ list_del(&patch->list);
+ kobject_put(&patch->kobj);
+}
+
+static int klp_init_func(struct klp_object *obj, struct klp_func *func)
+{
+ INIT_LIST_HEAD(&func->stack_node);
+ func->state = KLP_DISABLED;
+
+ return kobject_init_and_add(&func->kobj, &klp_ktype_func,
+ obj->kobj, func->old_name);
+}
+
+/* parts of the initialization that is done only when the object is loaded */
+static int klp_init_object_loaded(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ struct klp_func *func;
+ int ret;
+
+ if (obj->relocs) {
+ ret = klp_write_object_relocations(patch->mod, obj);
+ if (ret)
+ return ret;
+ }
+
+ for (func = obj->funcs; func->old_name; func++) {
+ ret = klp_find_verify_func_addr(obj, func);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
+{
+ struct klp_func *func;
+ int ret;
+ const char *name;
+
+ if (!obj->funcs)
+ return -EINVAL;
+
+ obj->state = KLP_DISABLED;
+
+ klp_find_object_module(obj);
+
+ name = klp_is_module(obj) ? obj->name : "vmlinux";
+ obj->kobj = kobject_create_and_add(name, &patch->kobj);
+ if (!obj->kobj)
+ return -ENOMEM;
+
+ for (func = obj->funcs; func->old_name; func++) {
+ ret = klp_init_func(obj, func);
+ if (ret)
+ goto free;
+ }
+
+ if (klp_is_object_loaded(obj)) {
+ ret = klp_init_object_loaded(patch, obj);
+ if (ret)
+ goto free;
+ }
+
+ return 0;
+
+free:
+ klp_free_funcs_limited(obj, func);
+ kobject_put(obj->kobj);
+ return ret;
+}
+
+static int klp_init_patch(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ int ret;
+
+ if (!patch->objs)
+ return -EINVAL;
+
+ mutex_lock(&klp_mutex);
+
+ patch->state = KLP_DISABLED;
+
+ ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
+ klp_root_kobj, patch->mod->name);
+ if (ret)
+ goto unlock;
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ ret = klp_init_object(patch, obj);
+ if (ret)
+ goto free;
+ }
+
+ list_add_tail(&patch->list, &klp_patches);
+
+ mutex_unlock(&klp_mutex);
+
+ return 0;
+
+free:
+ klp_free_objects_limited(patch, obj);
+ kobject_put(&patch->kobj);
+unlock:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+
+/**
+ * klp_unregister_patch() - unregisters a patch
+ * @patch: Disabled patch to be unregistered
+ *
+ * Frees the data structures and removes the sysfs interface.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_unregister_patch(struct klp_patch *patch)
+{
+ int ret = 0;
+
+ mutex_lock(&klp_mutex);
+
+ if (!klp_is_patch_registered(patch)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (patch->state == KLP_ENABLED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ klp_free_patch(patch);
+
+out:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_unregister_patch);
+
+/**
+ * klp_register_patch() - registers a patch
+ * @patch: Patch to be registered
+ *
+ * Initializes the data structure associated with the patch and
+ * creates the sysfs interface.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_register_patch(struct klp_patch *patch)
+{
+ int ret;
+
+ if (!klp_initialized())
+ return -ENODEV;
+
+ if (!patch || !patch->mod)
+ return -EINVAL;
+
+ /*
+ * A reference is taken on the patch module to prevent it from being
+ * unloaded. Right now, we don't allow patch modules to unload since
+ * there is currently no method to determine if a thread is still
+ * running in the patched code contained in the patch module once
+ * the ftrace registration is successful.
+ */
+ if (!try_module_get(patch->mod))
+ return -ENODEV;
+
+ ret = klp_init_patch(patch);
+ if (ret)
+ module_put(patch->mod);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_register_patch);
+
+static void klp_module_notify_coming(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ struct module *pmod = patch->mod;
+ struct module *mod = obj->mod;
+ int ret;
+
+ ret = klp_init_object_loaded(patch, obj);
+ if (ret)
+ goto err;
+
+ if (patch->state == KLP_DISABLED)
+ return;
+
+ pr_notice("applying patch '%s' to loading module '%s'\n",
+ pmod->name, mod->name);
+
+ ret = klp_enable_object(obj);
+ if (!ret)
+ return;
+
+err:
+ pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
+ pmod->name, mod->name, ret);
+}
+
+static void klp_module_notify_going(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ struct module *pmod = patch->mod;
+ struct module *mod = obj->mod;
+ int ret;
+
+ if (patch->state == KLP_DISABLED)
+ goto disabled;
+
+ pr_notice("reverting patch '%s' on unloading module '%s'\n",
+ pmod->name, mod->name);
+
+ ret = klp_disable_object(obj);
+ if (ret)
+ pr_warn("failed to revert patch '%s' on module '%s' (%d)\n",
+ pmod->name, mod->name, ret);
+
+disabled:
+ klp_free_object_loaded(obj);
+}
+
+static int klp_module_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct module *mod = data;
+ struct klp_patch *patch;
+ struct klp_object *obj;
+
+ if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
+ return 0;
+
+ mutex_lock(&klp_mutex);
+
+ list_for_each_entry(patch, &klp_patches, list) {
+ for (obj = patch->objs; obj->funcs; obj++) {
+ if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
+ continue;
+
+ if (action == MODULE_STATE_COMING) {
+ obj->mod = mod;
+ klp_module_notify_coming(patch, obj);
+ } else /* MODULE_STATE_GOING */
+ klp_module_notify_going(patch, obj);
+
+ break;
+ }
+ }
+
+ mutex_unlock(&klp_mutex);
+
+ return 0;
+}
+
+static struct notifier_block klp_module_nb = {
+ .notifier_call = klp_module_notify,
+ .priority = INT_MIN+1, /* called late but before ftrace notifier */
+};
+
+static int klp_init(void)
+{
+ int ret;
+
+ ret = klp_check_compiler_support();
+ if (ret) {
+ pr_info("Your compiler is too old; turning off.\n");
+ return -EINVAL;
+ }
+
+ ret = register_module_notifier(&klp_module_nb);
+ if (ret)
+ return ret;
+
+ klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
+ if (!klp_root_kobj) {
+ ret = -ENOMEM;
+ goto unregister;
+ }
+
+ return 0;
+
+unregister:
+ unregister_module_notifier(&klp_module_nb);
+ return ret;
+}
+
+module_init(klp_init);
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 8541bfdfd232..de7a416cca2a 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -1,11 +1,11 @@
-obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o
+obj-y += mutex.o semaphore.o rwsem.o
ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_lockdep.o = -pg
-CFLAGS_REMOVE_lockdep_proc.o = -pg
-CFLAGS_REMOVE_mutex-debug.o = -pg
-CFLAGS_REMOVE_rtmutex-debug.o = -pg
+CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
endif
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
@@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
endif
obj-$(CONFIG_SMP) += spinlock.o
+obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
obj-$(CONFIG_SMP) += lglock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 4d60986fcbee..d1fe2ba5bac9 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -108,20 +108,4 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
arch_mcs_spin_unlock_contended(&next->locked);
}
-/*
- * Cancellable version of the MCS lock above.
- *
- * Intended for adaptive spinning of sleeping locks:
- * mutex_lock()/rwsem_down_{read,write}() etc.
- */
-
-struct optimistic_spin_node {
- struct optimistic_spin_node *next, *prev;
- int locked; /* 1 if lock acquired */
- int cpu; /* encoded CPU # value */
-};
-
-extern bool osq_lock(struct optimistic_spin_queue *lock);
-extern void osq_unlock(struct optimistic_spin_queue *lock);
-
#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 454195194d4a..94674e5919cb 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -81,7 +81,7 @@ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
* The mutex must later on be released by the same task that
* acquired it. Recursive locking is not allowed. The task
* may not exit without first unlocking the mutex. Also, kernel
- * memory where the mutex resides mutex must not be freed with
+ * memory where the mutex resides must not be freed with
* the mutex still locked. The mutex must first be initialized
* (or statically defined) before it can be locked. memset()-ing
* the mutex to 0 is not allowed.
@@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
}
/*
- * after acquiring lock with fastpath or when we lost out in contested
+ * After acquiring lock with fastpath or when we lost out in contested
* slowpath, set ctx and wake up any waiters so they can recheck.
*
* This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
@@ -191,19 +191,32 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
spin_unlock_mutex(&lock->base.wait_lock, flags);
}
-
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
- * In order to avoid a stampede of mutex spinners from acquiring the mutex
- * more or less simultaneously, the spinners need to acquire a MCS lock
- * first before spinning on the owner field.
+ * After acquiring lock in the slowpath set ctx and wake up any
+ * waiters so they can recheck.
*
+ * Callers must hold the mutex wait_lock.
*/
+static __always_inline void
+ww_mutex_set_context_slowpath(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx)
+{
+ struct mutex_waiter *cur;
-/*
- * Mutex spinning code migrated from kernel/sched/core.c
- */
+ ww_mutex_lock_acquired(lock, ctx);
+ lock->ctx = ctx;
+
+ /*
+ * Give any possible sleeping processes the chance to wake up,
+ * so they can recheck if they have to back off.
+ */
+ list_for_each_entry(cur, &lock->base.wait_list, list) {
+ debug_mutex_wake_waiter(&lock->base, cur);
+ wake_up_process(cur->task);
+ }
+}
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
if (lock->owner != owner)
@@ -307,6 +320,11 @@ static bool mutex_optimistic_spin(struct mutex *lock,
if (!mutex_can_spin_on_owner(lock))
goto done;
+ /*
+ * In order to avoid a stampede of mutex spinners trying to
+ * acquire the mutex all at once, the spinners need to take a
+ * MCS (queued) lock first before spinning on the owner field.
+ */
if (!osq_lock(&lock->osq))
goto done;
@@ -469,7 +487,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
EXPORT_SYMBOL(ww_mutex_unlock);
static inline int __sched
-__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
+__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
@@ -557,7 +575,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
}
if (use_ww_ctx && ww_ctx->acquired > 0) {
- ret = __mutex_lock_check_stamp(lock, ww_ctx);
+ ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
if (ret)
goto err;
}
@@ -569,6 +587,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
schedule_preempt_disabled();
spin_lock_mutex(&lock->wait_lock, flags);
}
+ __set_task_state(task, TASK_RUNNING);
+
mutex_remove_waiter(lock, &waiter, current_thread_info());
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
@@ -582,23 +602,7 @@ skip_wait:
if (use_ww_ctx) {
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
- struct mutex_waiter *cur;
-
- /*
- * This branch gets optimized out for the common case,
- * and is only important for ww_mutex_lock.
- */
- ww_mutex_lock_acquired(ww, ww_ctx);
- ww->ctx = ww_ctx;
-
- /*
- * Give any possible sleeping processes the chance to wake up,
- * so they can recheck if they have to back off.
- */
- list_for_each_entry(cur, &lock->wait_list, list) {
- debug_mutex_wake_waiter(lock, cur);
- wake_up_process(cur->task);
- }
+ ww_mutex_set_context_slowpath(ww, ww_ctx);
}
spin_unlock_mutex(&lock->wait_lock, flags);
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/osq_lock.c
index 9887a905a762..c112d00341b0 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/osq_lock.c
@@ -1,8 +1,6 @@
#include <linux/percpu.h>
#include <linux/sched.h>
-#include "mcs_spinlock.h"
-
-#ifdef CONFIG_SMP
+#include <linux/osq_lock.h>
/*
* An MCS like lock especially tailored for optimistic spinning for sleeping
@@ -111,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* cmpxchg in an attempt to undo our queueing.
*/
- while (!smp_load_acquire(&node->locked)) {
+ while (!ACCESS_ONCE(node->locked)) {
/*
* If we need to reschedule bail... so we can block.
*/
@@ -203,6 +201,3 @@ void osq_unlock(struct optimistic_spin_queue *lock)
if (next)
ACCESS_ONCE(next->locked) = 1;
}
-
-#endif
-
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7c98873a3077..3059bc2f022d 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1130,6 +1130,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
set_current_state(state);
}
+ __set_current_state(TASK_RUNNING);
return ret;
}
@@ -1188,10 +1189,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
if (likely(!ret))
+ /* sleep on the mutex */
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
- set_current_state(TASK_RUNNING);
-
if (unlikely(ret)) {
remove_waiter(lock, &waiter);
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
@@ -1626,10 +1626,9 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
set_current_state(TASK_INTERRUPTIBLE);
+ /* sleep on the mutex */
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
- set_current_state(TASK_RUNNING);
-
if (unlikely(ret))
remove_waiter(lock, waiter);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 2c93571162cb..2555ae15ec14 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -154,7 +154,7 @@ void __sched __down_read(struct rw_semaphore *sem)
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
- tsk->state = TASK_RUNNING;
+ __set_task_state(tsk, TASK_RUNNING);
out:
;
}
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 7628c3fc37ca..2f7cc4076f50 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -242,8 +242,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
schedule();
}
- tsk->state = TASK_RUNNING;
-
+ __set_task_state(tsk, TASK_RUNNING);
return sem;
}
EXPORT_SYMBOL(rwsem_down_read_failed);
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 4b082b5cac9e..db3ccb1dd614 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -363,6 +363,14 @@ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
}
EXPORT_SYMBOL(_raw_spin_lock_nested);
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+{
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+EXPORT_SYMBOL(_raw_spin_lock_bh_nested);
+
unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
int subclass)
{
diff --git a/kernel/module.c b/kernel/module.c
index 3965511ae133..8426ad48362c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -56,6 +56,7 @@
#include <linux/async.h>
#include <linux/percpu.h>
#include <linux/kmemleak.h>
+#include <linux/kasan.h>
#include <linux/jump_label.h>
#include <linux/pfn.h>
#include <linux/bsearch.h>
@@ -772,9 +773,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
return 0;
}
-unsigned long module_refcount(struct module *mod)
+/**
+ * module_refcount - return the refcount or -1 if unloading
+ *
+ * @mod: the module we're checking
+ *
+ * Returns:
+ * -1 if the module is in the process of unloading
+ * otherwise the number of references in the kernel to the module
+ */
+int module_refcount(struct module *mod)
{
- return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
+ return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
}
EXPORT_SYMBOL(module_refcount);
@@ -856,7 +866,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
struct module_use *use;
int printed_something = 0;
- seq_printf(m, " %lu ", module_refcount(mod));
+ seq_printf(m, " %i ", module_refcount(mod));
/*
* Always include a trailing , so userspace can differentiate
@@ -908,7 +918,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr);
static ssize_t show_refcnt(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
- return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
+ return sprintf(buffer, "%i\n", module_refcount(mk->mod));
}
static struct module_attribute modinfo_refcnt =
@@ -1216,6 +1226,12 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
const unsigned long *crc;
int err;
+ /*
+ * The module_mutex should not be a heavily contended lock;
+ * if we get the occasional sleep here, we'll go an extra iteration
+ * in the wait_event_interruptible(), which is harmless.
+ */
+ sched_annotate_sleep();
mutex_lock(&module_mutex);
sym = find_symbol(name, &owner, &crc,
!(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
@@ -1795,15 +1811,20 @@ static void unset_module_core_ro_nx(struct module *mod) { }
static void unset_module_init_ro_nx(struct module *mod) { }
#endif
-void __weak module_free(struct module *mod, void *module_region)
+void __weak module_memfree(void *module_region)
{
vfree(module_region);
+ kasan_module_free(module_region);
}
void __weak module_arch_cleanup(struct module *mod)
{
}
+void __weak module_arch_freeing_init(struct module *mod)
+{
+}
+
/* Free a module, remove from lists, etc. */
static void free_module(struct module *mod)
{
@@ -1841,7 +1862,8 @@ static void free_module(struct module *mod)
/* This may be NULL, but that's OK */
unset_module_init_ro_nx(mod);
- module_free(mod, mod->module_init);
+ module_arch_freeing_init(mod);
+ module_memfree(mod->module_init);
kfree(mod->args);
percpu_modfree(mod);
@@ -1850,7 +1872,7 @@ static void free_module(struct module *mod)
/* Finally, free the core (containing the module structure) */
unset_module_core_ro_nx(mod);
- module_free(mod, mod->module_core);
+ module_memfree(mod->module_core);
#ifdef CONFIG_MPU
update_protections(current->mm);
@@ -2785,7 +2807,7 @@ static int move_module(struct module *mod, struct load_info *info)
*/
kmemleak_ignore(ptr);
if (!ptr) {
- module_free(mod, mod->module_core);
+ module_memfree(mod->module_core);
return -ENOMEM;
}
memset(ptr, 0, mod->init_size);
@@ -2930,8 +2952,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
- module_free(mod, mod->module_init);
- module_free(mod, mod->module_core);
+ module_arch_freeing_init(mod);
+ module_memfree(mod->module_init);
+ module_memfree(mod->module_core);
}
int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2963,6 +2986,12 @@ static bool finished_loading(const char *name)
struct module *mod;
bool ret;
+ /*
+ * The module_mutex should not be a heavily contended lock;
+ * if we get the occasional sleep here, we'll go an extra iteration
+ * in the wait_event_interruptible(), which is harmless.
+ */
+ sched_annotate_sleep();
mutex_lock(&module_mutex);
mod = find_module_all(name, strlen(name), true);
ret = !mod || mod->state == MODULE_STATE_LIVE
@@ -2983,10 +3012,31 @@ static void do_mod_ctors(struct module *mod)
#endif
}
+/* For freeing module_init on success, in case kallsyms traversing */
+struct mod_initfree {
+ struct rcu_head rcu;
+ void *module_init;
+};
+
+static void do_free_init(struct rcu_head *head)
+{
+ struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
+ module_memfree(m->module_init);
+ kfree(m);
+}
+
/* This is where the real work happens */
static int do_init_module(struct module *mod)
{
int ret = 0;
+ struct mod_initfree *freeinit;
+
+ freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
+ if (!freeinit) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ freeinit->module_init = mod->module_init;
/*
* We want to find out whether @mod uses async during init. Clear
@@ -2999,18 +3049,7 @@ static int do_init_module(struct module *mod)
if (mod->init != NULL)
ret = do_one_initcall(mod->init);
if (ret < 0) {
- /*
- * Init routine failed: abort. Try to protect us from
- * buggy refcounters.
- */
- mod->state = MODULE_STATE_GOING;
- synchronize_sched();
- module_put(mod);
- blocking_notifier_call_chain(&module_notify_list,
- MODULE_STATE_GOING, mod);
- free_module(mod);
- wake_up_all(&module_wq);
- return ret;
+ goto fail_free_freeinit;
}
if (ret > 0) {
pr_warn("%s: '%s'->init suspiciously returned %d, it should "
@@ -3055,15 +3094,35 @@ static int do_init_module(struct module *mod)
mod->strtab = mod->core_strtab;
#endif
unset_module_init_ro_nx(mod);
- module_free(mod, mod->module_init);
+ module_arch_freeing_init(mod);
mod->module_init = NULL;
mod->init_size = 0;
mod->init_ro_size = 0;
mod->init_text_size = 0;
+ /*
+ * We want to free module_init, but be aware that kallsyms may be
+ * walking this with preempt disabled. In all the failure paths,
+ * we call synchronize_rcu/synchronize_sched, but we don't want
+ * to slow down the success path, so use actual RCU here.
+ */
+ call_rcu(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
return 0;
+
+fail_free_freeinit:
+ kfree(freeinit);
+fail:
+ /* Try to protect us from buggy refcounters. */
+ mod->state = MODULE_STATE_GOING;
+ synchronize_sched();
+ module_put(mod);
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_GOING, mod);
+ free_module(mod);
+ wake_up_all(&module_wq);
+ return ret;
}
static int may_init_module(void)
@@ -3075,32 +3134,6 @@ static int may_init_module(void)
}
/*
- * Can't use wait_event_interruptible() because our condition
- * 'finished_loading()' contains a blocking primitive itself (mutex_lock).
- */
-static int wait_finished_loading(struct module *mod)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int ret = 0;
-
- add_wait_queue(&module_wq, &wait);
- for (;;) {
- if (finished_loading(mod->name))
- break;
-
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
- }
- remove_wait_queue(&module_wq, &wait);
-
- return ret;
-}
-
-/*
* We try to place it in the list now to make sure it's unique before
* we dedicate too many resources. In particular, temporary percpu
* memory exhaustion.
@@ -3120,8 +3153,8 @@ again:
|| old->state == MODULE_STATE_UNFORMED) {
/* Wait in case it fails to load. */
mutex_unlock(&module_mutex);
-
- err = wait_finished_loading(mod);
+ err = wait_event_interruptible(module_wq,
+ finished_loading(mod->name));
if (err)
goto out_unlocked;
goto again;
@@ -3220,7 +3253,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
mod->sig_ok = info->sig_ok;
if (!mod->sig_ok) {
pr_notice_once("%s: module verification failed: signature "
- "and/or required key missing - tainting "
+ "and/or required key missing - tainting "
"kernel\n", mod->name);
add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
}
@@ -3311,6 +3344,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
module_bug_cleanup(mod);
mutex_unlock(&module_mutex);
+ /* Free lock-classes: */
+ lockdep_free_key_range(mod->module_core, mod->core_size);
+
/* we can't deallocate the module until we clear memory protection */
unset_module_init_ro_nx(mod);
unset_module_core_ro_nx(mod);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4803da6eab62..ae9fc7cc360e 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh,
}
EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
+#ifdef CONFIG_SRCU
/*
* SRCU notifier chain routines. Registration and unregistration
* use a mutex, and call_chain is synchronized by SRCU (no locks).
@@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh)
}
EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
+#endif /* CONFIG_SRCU */
+
static ATOMIC_NOTIFIER_HEAD(die_chain);
int notrace notify_die(enum die_val val, const char *str,
diff --git a/kernel/padata.c b/kernel/padata.c
index 161402f0b517..b38bea9c466a 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -917,15 +917,10 @@ static ssize_t show_cpumask(struct padata_instance *pinst,
else
cpumask = pinst->cpumask.pcpu;
- len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask),
- nr_cpu_ids);
- if (PAGE_SIZE - len < 2)
- len = -EINVAL;
- else
- len += sprintf(buf + len, "\n");
-
+ len = snprintf(buf, PAGE_SIZE, "%*pb\n",
+ nr_cpu_ids, cpumask_bits(cpumask));
mutex_unlock(&pinst->lock);
- return len;
+ return len < PAGE_SIZE ? len : -EINVAL;
}
static ssize_t store_cpumask(struct padata_instance *pinst,
diff --git a/kernel/panic.c b/kernel/panic.c
index 4d8d6f906dec..8136ad76e5fd 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -226,6 +226,7 @@ static const struct tnt tnts[] = {
{ TAINT_OOT_MODULE, 'O', ' ' },
{ TAINT_UNSIGNED_MODULE, 'E', ' ' },
{ TAINT_SOFTLOCKUP, 'L', ' ' },
+ { TAINT_LIVEPATCH, 'K', ' ' },
};
/**
@@ -246,6 +247,7 @@ static const struct tnt tnts[] = {
* 'O' - Out-of-tree module has been loaded.
* 'E' - Unsigned module has been loaded.
* 'L' - A soft lockup has previously occurred.
+ * 'K' - Kernel has been live patched.
*
* The string is overwritten by the next call to print_tainted().
*/
diff --git a/kernel/params.c b/kernel/params.c
index 0af9b2c4e56c..728e05b167de 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
mk->mp->grp.attrs = new_attrs;
/* Tack new one on the end. */
+ memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
mk->mp->attrs[mk->mp->num].param = kp;
mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
/* Do not allow runtime DAC changes to make param writable. */
if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
+ else
+ mk->mp->attrs[mk->mp->num].mattr.store = NULL;
mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
mk->mp->num++;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 48b28d387c7f..7e01f78f0417 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -251,6 +251,7 @@ config APM_EMULATION
config PM_OPP
bool
+ select SRCU
---help---
SOCs have a standard set of tuples consisting of frequency and
voltage pairs that the device will support per voltage domain. This
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5a6ec8678b9a..564f786df470 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -84,8 +84,8 @@ static int try_to_freeze_tasks(bool user_only)
elapsed_msecs = elapsed_msecs64;
if (todo) {
- printk("\n");
- printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
+ pr_cont("\n");
+ pr_err("Freezing of tasks %s after %d.%03d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
wakeup ? "aborted" : "failed",
elapsed_msecs / 1000, elapsed_msecs % 1000,
@@ -101,37 +101,13 @@ static int try_to_freeze_tasks(bool user_only)
read_unlock(&tasklist_lock);
}
} else {
- printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
+ pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
elapsed_msecs % 1000);
}
return todo ? -EBUSY : 0;
}
-static bool __check_frozen_processes(void)
-{
- struct task_struct *g, *p;
-
- for_each_process_thread(g, p)
- if (p != current && !freezer_should_skip(p) && !frozen(p))
- return false;
-
- return true;
-}
-
-/*
- * Returns true if all freezable tasks (except for current) are frozen already
- */
-static bool check_frozen_processes(void)
-{
- bool ret;
-
- read_lock(&tasklist_lock);
- ret = __check_frozen_processes();
- read_unlock(&tasklist_lock);
- return ret;
-}
-
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
* The current thread will not be frozen. The same process that calls
@@ -142,7 +118,6 @@ static bool check_frozen_processes(void)
int freeze_processes(void)
{
int error;
- int oom_kills_saved;
error = __usermodehelper_disable(UMH_FREEZING);
if (error)
@@ -155,31 +130,24 @@ int freeze_processes(void)
atomic_inc(&system_freezing_cnt);
pm_wakeup_clear();
- printk("Freezing user space processes ... ");
+ pr_info("Freezing user space processes ... ");
pm_freezing = true;
- oom_kills_saved = oom_kills_count();
error = try_to_freeze_tasks(true);
if (!error) {
__usermodehelper_set_disable_depth(UMH_DISABLED);
- oom_killer_disable();
-
- /*
- * There might have been an OOM kill while we were
- * freezing tasks and the killed task might be still
- * on the way out so we have to double check for race.
- */
- if (oom_kills_count() != oom_kills_saved &&
- !check_frozen_processes()) {
- __usermodehelper_set_disable_depth(UMH_ENABLED);
- printk("OOM in progress.");
- error = -EBUSY;
- } else {
- printk("done.");
- }
+ pr_cont("done.");
}
- printk("\n");
+ pr_cont("\n");
BUG_ON(in_atomic());
+ /*
+ * Now that the whole userspace is frozen we need to disbale
+ * the OOM killer to disallow any further interference with
+ * killable tasks.
+ */
+ if (!error && !oom_killer_disable())
+ error = -EBUSY;
+
if (error)
thaw_processes();
return error;
@@ -197,13 +165,14 @@ int freeze_kernel_threads(void)
{
int error;
- printk("Freezing remaining freezable tasks ... ");
+ pr_info("Freezing remaining freezable tasks ... ");
+
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
- printk("done.");
+ pr_cont("done.");
- printk("\n");
+ pr_cont("\n");
BUG_ON(in_atomic());
if (error)
@@ -224,7 +193,7 @@ void thaw_processes(void)
oom_killer_enable();
- printk("Restarting tasks ... ");
+ pr_info("Restarting tasks ... ");
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
@@ -243,7 +212,7 @@ void thaw_processes(void)
usermodehelper_enable();
schedule();
- printk("done.\n");
+ pr_cont("done.\n");
trace_suspend_resume(TPS("thaw_processes"), 0, false);
}
@@ -252,7 +221,7 @@ void thaw_kernel_threads(void)
struct task_struct *g, *p;
pm_nosig_freezing = false;
- printk("Restarting kernel threads ... ");
+ pr_info("Restarting kernel threads ... ");
thaw_workqueues();
@@ -264,5 +233,5 @@ void thaw_kernel_threads(void)
read_unlock(&tasklist_lock);
schedule();
- printk("done.\n");
+ pr_cont("done.\n");
}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 5f4c006c4b1e..97b0df71303e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -41,6 +41,8 @@
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/export.h>
@@ -182,6 +184,81 @@ static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
c->target_value = value;
}
+static inline int pm_qos_get_value(struct pm_qos_constraints *c);
+static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
+{
+ struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
+ struct pm_qos_constraints *c;
+ struct pm_qos_request *req;
+ char *type;
+ unsigned long flags;
+ int tot_reqs = 0;
+ int active_reqs = 0;
+
+ if (IS_ERR_OR_NULL(qos)) {
+ pr_err("%s: bad qos param!\n", __func__);
+ return -EINVAL;
+ }
+ c = qos->constraints;
+ if (IS_ERR_OR_NULL(c)) {
+ pr_err("%s: Bad constraints on qos?\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Lock to ensure we have a snapshot */
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ if (plist_head_empty(&c->list)) {
+ seq_puts(s, "Empty!\n");
+ goto out;
+ }
+
+ switch (c->type) {
+ case PM_QOS_MIN:
+ type = "Minimum";
+ break;
+ case PM_QOS_MAX:
+ type = "Maximum";
+ break;
+ case PM_QOS_SUM:
+ type = "Sum";
+ break;
+ default:
+ type = "Unknown";
+ }
+
+ plist_for_each_entry(req, &c->list, node) {
+ char *state = "Default";
+
+ if ((req->node).prio != c->default_value) {
+ active_reqs++;
+ state = "Active";
+ }
+ tot_reqs++;
+ seq_printf(s, "%d: %d: %s\n", tot_reqs,
+ (req->node).prio, state);
+ }
+
+ seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
+ type, pm_qos_get_value(c), active_reqs, tot_reqs);
+
+out:
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ return 0;
+}
+
+static int pm_qos_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pm_qos_dbg_show_requests,
+ inode->i_private);
+}
+
+static const struct file_operations pm_qos_debug_fops = {
+ .open = pm_qos_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
* if needed
@@ -509,12 +586,17 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
/* User space interface to PM QoS classes via misc devices */
-static int register_pm_qos_misc(struct pm_qos_object *qos)
+static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
{
qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
qos->pm_qos_power_miscdev.name = qos->name;
qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
+ if (d) {
+ (void)debugfs_create_file(qos->name, S_IRUGO, d,
+ (void *)qos, &pm_qos_debug_fops);
+ }
+
return misc_register(&qos->pm_qos_power_miscdev);
}
@@ -608,11 +690,16 @@ static int __init pm_qos_power_init(void)
{
int ret = 0;
int i;
+ struct dentry *d;
BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
+ d = debugfs_create_dir("pm_qos", NULL);
+ if (IS_ERR_OR_NULL(d))
+ d = NULL;
+
for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
- ret = register_pm_qos_misc(pm_qos_array[i]);
+ ret = register_pm_qos_misc(pm_qos_array[i], d);
if (ret < 0) {
printk(KERN_ERR "pm_qos_param: %s setup failed\n",
pm_qos_array[i]->name);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0c40c16174b4..c24d5a23bf93 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1472,9 +1472,9 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
/**
* free_unnecessary_pages - Release preallocated pages not needed for the image
*/
-static void free_unnecessary_pages(void)
+static unsigned long free_unnecessary_pages(void)
{
- unsigned long save, to_free_normal, to_free_highmem;
+ unsigned long save, to_free_normal, to_free_highmem, free;
save = count_data_pages();
if (alloc_normal >= save) {
@@ -1495,6 +1495,7 @@ static void free_unnecessary_pages(void)
else
to_free_normal = 0;
}
+ free = to_free_normal + to_free_highmem;
memory_bm_position_reset(&copy_bm);
@@ -1518,6 +1519,8 @@ static void free_unnecessary_pages(void)
swsusp_unset_page_free(page);
__free_page(page);
}
+
+ return free;
}
/**
@@ -1707,7 +1710,7 @@ int hibernate_preallocate_memory(void)
* pages in memory, but we have allocated more. Release the excessive
* ones now.
*/
- free_unnecessary_pages();
+ pages -= free_unnecessary_pages();
out:
stop = ktime_get();
@@ -2310,8 +2313,6 @@ static inline void free_highmem_data(void)
free_image_page(buffer, PG_UNSAFE_CLEAR);
}
#else
-static inline int get_safe_write_buffer(void) { return 0; }
-
static unsigned int
count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 02d6b6d28796..c06df7de0963 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -935,8 +935,8 @@ static int __init ignore_loglevel_setup(char *str)
early_param("ignore_loglevel", ignore_loglevel_setup);
module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
- "print all kernel messages to the console.");
+MODULE_PARM_DESC(ignore_loglevel,
+ "ignore loglevel setting (prints all kernel messages to the console)");
#ifdef CONFIG_BOOT_PRINTK_DELAY
@@ -1419,16 +1419,16 @@ static void call_console_drivers(int level, const char *text, size_t len)
}
/*
- * Zap console related locks when oopsing. Only zap at most once
- * every 10 seconds, to leave time for slow consoles to print a
- * full oops.
+ * Zap console related locks when oopsing.
+ * To leave time for slow consoles to print a full oops,
+ * only zap at most once every 30 seconds.
*/
static void zap_locks(void)
{
static unsigned long oops_timestamp;
if (time_after_eq(jiffies, oops_timestamp) &&
- !time_after(jiffies, oops_timestamp + 30 * HZ))
+ !time_after(jiffies, oops_timestamp + 30 * HZ))
return;
oops_timestamp = jiffies;
diff --git a/kernel/profile.c b/kernel/profile.c
index 54bf5ba26420..a7bcd28d6e9f 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -422,8 +422,7 @@ void profile_tick(int type)
static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
{
- seq_cpumask(m, prof_cpu_mask);
- seq_putc(m, '\n');
+ seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
return 0;
}
diff --git a/kernel/range.c b/kernel/range.c
index 322ea8e93e4b..82cfc285b046 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2)
{
const struct range *r1 = x1;
const struct range *r2 = x2;
- s64 start1, start2;
- start1 = r1->start;
- start2 = r2->start;
-
- return start1 - start2;
+ if (r1->start < r2->start)
+ return -1;
+ if (r1->start > r2->start)
+ return 1;
+ return 0;
}
int clean_sort_range(struct range *range, int az)
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index e6fae503d1bc..50a808424b06 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -1,4 +1,5 @@
-obj-y += update.o srcu.o
+obj-y += update.o
+obj-$(CONFIG_SRCU) += srcu.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_TREE_RCU) += tree.o
obj-$(CONFIG_PREEMPT_RCU) += tree.o
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 07bb02eda844..80adef7d4c3d 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void);
void rcu_early_boot_tests(void);
+/*
+ * This function really isn't for public consumption, but RCU is special in
+ * that context switches can allow the state machine to make progress.
+ */
+extern void resched_cpu(int cpu);
+
#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 4d559baf06e0..30d42aa55d83 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -244,7 +244,8 @@ struct rcu_torture_ops {
int (*readlock)(void);
void (*read_delay)(struct torture_random_state *rrsp);
void (*readunlock)(int idx);
- int (*completed)(void);
+ unsigned long (*started)(void);
+ unsigned long (*completed)(void);
void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
void (*exp_sync)(void);
@@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU)
rcu_read_unlock();
}
-static int rcu_torture_completed(void)
-{
- return rcu_batches_completed();
-}
-
/*
* Update callback in the pipe. This should be invoked after a grace period.
*/
@@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p)
cur_ops->deferred_free(rp);
}
-static int rcu_no_completed(void)
+static unsigned long rcu_no_completed(void)
{
return 0;
}
@@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = {
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
+ .started = rcu_batches_started,
+ .completed = rcu_batches_completed,
.deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu,
.exp_sync = synchronize_rcu_expedited,
@@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
rcu_read_unlock_bh();
}
-static int rcu_bh_torture_completed(void)
-{
- return rcu_batches_completed_bh();
-}
-
static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
{
call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
@@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
+ .started = rcu_batches_started_bh,
+ .completed = rcu_batches_completed_bh,
.deferred_free = rcu_bh_torture_deferred_free,
.sync = synchronize_rcu_bh,
.exp_sync = synchronize_rcu_bh_expedited,
@@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock,
+ .started = rcu_no_completed,
.completed = rcu_no_completed,
.deferred_free = rcu_busted_torture_deferred_free,
.sync = synchronize_rcu_busted,
@@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
srcu_read_unlock(&srcu_ctl, idx);
}
-static int srcu_torture_completed(void)
+static unsigned long srcu_torture_completed(void)
{
return srcu_batches_completed(&srcu_ctl);
}
@@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = {
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
+ .started = NULL,
.completed = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
@@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = {
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
- .completed = rcu_no_completed,
+ .started = rcu_batches_started_sched,
+ .completed = rcu_batches_completed_sched,
.deferred_free = rcu_sched_torture_deferred_free,
.sync = synchronize_sched,
.exp_sync = synchronize_sched_expedited,
@@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = {
.readlock = tasks_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = tasks_torture_read_unlock,
+ .started = rcu_no_completed,
.completed = rcu_no_completed,
.deferred_free = rcu_tasks_torture_deferred_free,
.sync = synchronize_rcu_tasks,
@@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void)
static void rcu_torture_timer(unsigned long unused)
{
int idx;
- int completed;
- int completed_end;
+ unsigned long started;
+ unsigned long completed;
static DEFINE_TORTURE_RANDOM(rand);
static DEFINE_SPINLOCK(rand_lock);
struct rcu_torture *p;
@@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused)
unsigned long long ts;
idx = cur_ops->readlock();
- completed = cur_ops->completed();
+ if (cur_ops->started)
+ started = cur_ops->started();
+ else
+ started = cur_ops->completed();
ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current,
rcu_read_lock_bh_held() ||
@@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- completed_end = cur_ops->completed();
+ completed = cur_ops->completed();
if (pipe_count > 1) {
do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
- completed, completed_end);
+ started, completed);
rcutorture_trace_dump();
}
__this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed_end - completed;
+ completed = completed - started;
+ if (cur_ops->started)
+ completed++;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
@@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused)
static int
rcu_torture_reader(void *arg)
{
- int completed;
- int completed_end;
+ unsigned long started;
+ unsigned long completed;
int idx;
DEFINE_TORTURE_RANDOM(rand);
struct rcu_torture *p;
@@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg)
mod_timer(&t, jiffies + 1);
}
idx = cur_ops->readlock();
- completed = cur_ops->completed();
+ if (cur_ops->started)
+ started = cur_ops->started();
+ else
+ started = cur_ops->completed();
ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current,
rcu_read_lock_bh_held() ||
@@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- completed_end = cur_ops->completed();
+ completed = cur_ops->completed();
if (pipe_count > 1) {
do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
- ts, completed, completed_end);
+ ts, started, completed);
rcutorture_trace_dump();
}
__this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed_end - completed;
+ completed = completed - started;
+ if (cur_ops->started)
+ completed++;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
@@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg)
cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
n_rcu_torture_barrier_error++;
+ pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
+ atomic_read(&barrier_cbs_invoked),
+ n_barrier_cbs);
WARN_ON_ONCE(1);
}
n_barrier_successes++;
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index e037f3eb2f7b..445bf8ffe3fb 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier);
* Report the number of batches, correlated with, but not necessarily
* precisely the same as, the number of grace periods that have elapsed.
*/
-long srcu_batches_completed(struct srcu_struct *sp)
+unsigned long srcu_batches_completed(struct srcu_struct *sp)
{
return sp->completed;
}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 0db5649f8817..cc9ceca7bde1 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp);
-static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
-
#include "tiny_plugin.h"
-/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
-static void rcu_idle_enter_common(long long newval)
-{
- if (newval) {
- RCU_TRACE(trace_rcu_dyntick(TPS("--="),
- rcu_dynticks_nesting, newval));
- rcu_dynticks_nesting = newval;
- return;
- }
- RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
- rcu_dynticks_nesting, newval));
- if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
- struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
-
- RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
- rcu_dynticks_nesting, newval));
- ftrace_dump(DUMP_ALL);
- WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
- current->pid, current->comm,
- idle->pid, idle->comm); /* must be idle task! */
- }
- rcu_sched_qs(); /* implies rcu_bh_inc() */
- barrier();
- rcu_dynticks_nesting = newval;
-}
-
/*
* Enter idle, which is an extended quiescent state if we have fully
- * entered that mode (i.e., if the new value of dynticks_nesting is zero).
+ * entered that mode.
*/
void rcu_idle_enter(void)
{
- unsigned long flags;
- long long newval;
-
- local_irq_save(flags);
- WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
- if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
- DYNTICK_TASK_NEST_VALUE)
- newval = 0;
- else
- newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
- rcu_idle_enter_common(newval);
- local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
*/
void rcu_irq_exit(void)
{
- unsigned long flags;
- long long newval;
-
- local_irq_save(flags);
- newval = rcu_dynticks_nesting - 1;
- WARN_ON_ONCE(newval < 0);
- rcu_idle_enter_common(newval);
- local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_irq_exit);
-/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
-static void rcu_idle_exit_common(long long oldval)
-{
- if (oldval) {
- RCU_TRACE(trace_rcu_dyntick(TPS("++="),
- oldval, rcu_dynticks_nesting));
- return;
- }
- RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
- if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
- struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
-
- RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
- oldval, rcu_dynticks_nesting));
- ftrace_dump(DUMP_ALL);
- WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
- current->pid, current->comm,
- idle->pid, idle->comm); /* must be idle task! */
- }
-}
-
/*
* Exit idle, so that we are no longer in an extended quiescent state.
*/
void rcu_idle_exit(void)
{
- unsigned long flags;
- long long oldval;
-
- local_irq_save(flags);
- oldval = rcu_dynticks_nesting;
- WARN_ON_ONCE(rcu_dynticks_nesting < 0);
- if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
- rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
- else
- rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
- rcu_idle_exit_common(oldval);
- local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
*/
void rcu_irq_enter(void)
{
- unsigned long flags;
- long long oldval;
-
- local_irq_save(flags);
- oldval = rcu_dynticks_nesting;
- rcu_dynticks_nesting++;
- WARN_ON_ONCE(rcu_dynticks_nesting == 0);
- rcu_idle_exit_common(oldval);
- local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_irq_enter);
@@ -179,23 +89,13 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
*/
bool notrace __rcu_is_watching(void)
{
- return rcu_dynticks_nesting;
+ return true;
}
EXPORT_SYMBOL(__rcu_is_watching);
#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
/*
- * Test whether the current CPU was interrupted from idle. Nested
- * interrupts don't count, we must be running at the first interrupt
- * level.
- */
-static int rcu_is_cpu_rrupt_from_idle(void)
-{
- return rcu_dynticks_nesting <= 1;
-}
-
-/*
* Helper function for rcu_sched_qs() and rcu_bh_qs().
* Also irqs are disabled to avoid confusion due to interrupt handlers
* invoking call_rcu().
@@ -250,7 +150,7 @@ void rcu_bh_qs(void)
void rcu_check_callbacks(int user)
{
RCU_TRACE(check_cpu_stalls());
- if (user || rcu_is_cpu_rrupt_from_idle())
+ if (user)
rcu_sched_qs();
else if (!in_softirq())
rcu_bh_qs();
@@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head,
rcp->curtail = &head->next;
RCU_TRACE(rcp->qlen++);
local_irq_restore(flags);
+
+ if (unlikely(is_idle_task(current))) {
+ /* force scheduling for rcu_sched_qs() */
+ resched_cpu(0);
+ }
}
/*
@@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
void __init rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+ RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
+ RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
rcu_early_boot_tests();
}
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 858c56569127..f94e209a10d6 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
rcp->ticks_this_gp++;
j = jiffies;
js = ACCESS_ONCE(rcp->jiffies_stall);
- if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
+ if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
- rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
+ rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
jiffies - rcp->gp_start, rcp->qlen);
dump_stack();
- }
- if (*rcp->curtail && ULONG_CMP_GE(j, js))
ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
3 * rcu_jiffies_till_stall_check() + 3;
- else if (ULONG_CMP_GE(j, js))
+ } else if (ULONG_CMP_GE(j, js)) {
ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
+ }
}
static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7680fc275036..48d640ca1a05 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
+/* rcuc/rcub kthread realtime priority */
+static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
+module_param(kthread_prio, int, 0644);
+
/*
* Track the rcutorture test sequence number and the update version
* number within a given test. The rcutorture_testseq is incremented
@@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
};
+DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
+EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
+
/*
* Let the RCU core know that this CPU has gone through the scheduler,
* which is a quiescent state. This is called when the need for a
@@ -284,6 +291,22 @@ void rcu_note_context_switch(void)
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
+/*
+ * Register a quiesecent state for all RCU flavors. If there is an
+ * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
+ * dyntick-idle quiescent state visible to other CPUs (but only for those
+ * RCU flavors in desparate need of a quiescent state, which will normally
+ * be none of them). Either way, do a lightweight quiescent state for
+ * all RCU flavors.
+ */
+void rcu_all_qs(void)
+{
+ if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+ rcu_momentary_dyntick_idle();
+ this_cpu_inc(rcu_qs_ctr);
+}
+EXPORT_SYMBOL_GPL(rcu_all_qs);
+
static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static long qhimark = 10000; /* If this many pending, ignore blimit. */
static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp);
static int rcu_pending(void);
/*
- * Return the number of RCU-sched batches processed thus far for debug & stats.
+ * Return the number of RCU batches started thus far for debug & stats.
+ */
+unsigned long rcu_batches_started(void)
+{
+ return rcu_state_p->gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started);
+
+/*
+ * Return the number of RCU-sched batches started thus far for debug & stats.
+ */
+unsigned long rcu_batches_started_sched(void)
+{
+ return rcu_sched_state.gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+
+/*
+ * Return the number of RCU BH batches started thus far for debug & stats.
*/
-long rcu_batches_completed_sched(void)
+unsigned long rcu_batches_started_bh(void)
+{
+ return rcu_bh_state.gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
+
+/*
+ * Return the number of RCU batches completed thus far for debug & stats.
+ */
+unsigned long rcu_batches_completed(void)
+{
+ return rcu_state_p->completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU-sched batches completed thus far for debug & stats.
+ */
+unsigned long rcu_batches_completed_sched(void)
{
return rcu_sched_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
/*
- * Return the number of RCU BH batches processed thus far for debug & stats.
+ * Return the number of RCU BH batches completed thus far for debug & stats.
*/
-long rcu_batches_completed_bh(void)
+unsigned long rcu_batches_completed_bh(void)
{
return rcu_bh_state.completed;
}
@@ -759,39 +818,71 @@ void rcu_irq_enter(void)
/**
* rcu_nmi_enter - inform RCU of entry to NMI context
*
- * If the CPU was idle with dynamic ticks active, and there is no
- * irq handler running, this updates rdtp->dynticks_nmi to let the
- * RCU grace-period handling know that the CPU is active.
+ * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
+ * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
+ * that the CPU is active. This implementation permits nested NMIs, as
+ * long as the nesting level does not overflow an int. (You will probably
+ * run out of stack space first.)
*/
void rcu_nmi_enter(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ int incby = 2;
- if (rdtp->dynticks_nmi_nesting == 0 &&
- (atomic_read(&rdtp->dynticks) & 0x1))
- return;
- rdtp->dynticks_nmi_nesting++;
- smp_mb__before_atomic(); /* Force delay from prior write. */
- atomic_inc(&rdtp->dynticks);
- /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
- smp_mb__after_atomic(); /* See above. */
- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+ /* Complain about underflow. */
+ WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
+
+ /*
+ * If idle from RCU viewpoint, atomically increment ->dynticks
+ * to mark non-idle and increment ->dynticks_nmi_nesting by one.
+ * Otherwise, increment ->dynticks_nmi_nesting by two. This means
+ * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
+ * to be in the outermost NMI handler that interrupted an RCU-idle
+ * period (observation due to Andy Lutomirski).
+ */
+ if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
+ smp_mb__before_atomic(); /* Force delay from prior write. */
+ atomic_inc(&rdtp->dynticks);
+ /* atomic_inc() before later RCU read-side crit sects */
+ smp_mb__after_atomic(); /* See above. */
+ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+ incby = 1;
+ }
+ rdtp->dynticks_nmi_nesting += incby;
+ barrier();
}
/**
* rcu_nmi_exit - inform RCU of exit from NMI context
*
- * If the CPU was idle with dynamic ticks active, and there is no
- * irq handler running, this updates rdtp->dynticks_nmi to let the
- * RCU grace-period handling know that the CPU is no longer active.
+ * If we are returning from the outermost NMI handler that interrupted an
+ * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
+ * to let the RCU grace-period handling know that the CPU is back to
+ * being RCU-idle.
*/
void rcu_nmi_exit(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- if (rdtp->dynticks_nmi_nesting == 0 ||
- --rdtp->dynticks_nmi_nesting != 0)
+ /*
+ * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
+ * (We are exiting an NMI handler, so RCU better be paying attention
+ * to us!)
+ */
+ WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
+ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+
+ /*
+ * If the nesting level is not 1, the CPU wasn't RCU-idle, so
+ * leave it in non-RCU-idle state.
+ */
+ if (rdtp->dynticks_nmi_nesting != 1) {
+ rdtp->dynticks_nmi_nesting -= 2;
return;
+ }
+
+ /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
+ rdtp->dynticks_nmi_nesting = 0;
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic(); /* See above. */
atomic_inc(&rdtp->dynticks);
@@ -898,17 +989,14 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
return 1;
} else {
+ if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
+ rdp->mynode->gpnum))
+ ACCESS_ONCE(rdp->gpwrap) = true;
return 0;
}
}
/*
- * This function really isn't for public consumption, but RCU is special in
- * that context switches can allow the state machine to make progress.
- */
-extern void resched_cpu(int cpu);
-
-/*
* Return true if the specified CPU has passed through a quiescent
* state by virtue of being in or having passed through an dynticks
* idle state since the last call to dyntick_save_progress_counter()
@@ -1011,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
j1 = rcu_jiffies_till_stall_check();
ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
rsp->jiffies_resched = j + j1 / 2;
+ rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
+}
+
+/*
+ * Complain about starvation of grace-period kthread.
+ */
+static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
+{
+ unsigned long gpa;
+ unsigned long j;
+
+ j = jiffies;
+ gpa = ACCESS_ONCE(rsp->gp_activity);
+ if (j - gpa > 2 * HZ)
+ pr_err("%s kthread starved for %ld jiffies!\n",
+ rsp->name, j - gpa);
}
/*
@@ -1033,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
}
}
-static void print_other_cpu_stall(struct rcu_state *rsp)
+static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
{
int cpu;
long delta;
unsigned long flags;
+ unsigned long gpa;
+ unsigned long j;
int ndetected = 0;
struct rcu_node *rnp = rcu_get_root(rsp);
long totqlen = 0;
@@ -1075,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
- /*
- * Now rat on any tasks that got kicked up to the root rcu_node
- * due to CPU offlining.
- */
- rnp = rcu_get_root(rsp);
- raw_spin_lock_irqsave(&rnp->lock, flags);
- ndetected += rcu_print_task_stall(rnp);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
-
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start),
(long)rsp->gpnum, (long)rsp->completed, totqlen);
- if (ndetected == 0)
- pr_err("INFO: Stall ended before state dump start\n");
- else
+ if (ndetected) {
rcu_dump_cpu_stacks(rsp);
+ } else {
+ if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
+ ACCESS_ONCE(rsp->completed) == gpnum) {
+ pr_err("INFO: Stall ended before state dump start\n");
+ } else {
+ j = jiffies;
+ gpa = ACCESS_ONCE(rsp->gp_activity);
+ pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
+ rsp->name, j - gpa, j, gpa,
+ jiffies_till_next_fqs);
+ /* In this case, the current CPU might be at fault. */
+ sched_show_task(current);
+ }
+ }
/* Complain about tasks blocking the grace period. */
-
rcu_print_detail_task_stall(rsp);
+ rcu_check_gp_kthread_starvation(rsp);
+
force_quiescent_state(rsp); /* Kick them all. */
}
@@ -1123,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
jiffies - rsp->gp_start,
(long)rsp->gpnum, (long)rsp->completed, totqlen);
+
+ rcu_check_gp_kthread_starvation(rsp);
+
rcu_dump_cpu_stacks(rsp);
raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1193,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
/* They had a few time units to dump stack, so complain. */
- print_other_cpu_stall(rsp);
+ print_other_cpu_stall(rsp, gpnum);
}
}
@@ -1530,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
bool ret;
/* Handle the ends of any preceding grace periods first. */
- if (rdp->completed == rnp->completed) {
+ if (rdp->completed == rnp->completed &&
+ !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
/* No grace period end, so just accelerate recent callbacks. */
ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1545,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
}
- if (rdp->gpnum != rnp->gpnum) {
+ if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
/*
* If the current grace period is waiting for this CPU,
* set up to detect a quiescent state, otherwise don't
@@ -1554,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
rdp->gpnum = rnp->gpnum;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
rdp->passed_quiesce = 0;
+ rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
zero_cpu_stall_ticks(rdp);
+ ACCESS_ONCE(rdp->gpwrap) = false;
}
return ret;
}
@@ -1569,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_save(flags);
rnp = rdp->mynode;
if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
- rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
+ rdp->completed == ACCESS_ONCE(rnp->completed) &&
+ !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
local_irq_restore(flags);
return;
@@ -1589,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
+ ACCESS_ONCE(rsp->gp_activity) = jiffies;
rcu_bind_gp_kthread();
raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
@@ -1649,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
rnp->grphi, rnp->qsmask);
raw_spin_unlock_irq(&rnp->lock);
cond_resched_rcu_qs();
+ ACCESS_ONCE(rsp->gp_activity) = jiffies;
}
mutex_unlock(&rsp->onoff_mutex);
@@ -1665,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
unsigned long maxj;
struct rcu_node *rnp = rcu_get_root(rsp);
+ ACCESS_ONCE(rsp->gp_activity) = jiffies;
rsp->n_force_qs++;
if (fqs_state == RCU_SAVE_DYNTICK) {
/* Collect dyntick-idle snapshots. */
@@ -1703,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
+ ACCESS_ONCE(rsp->gp_activity) = jiffies;
raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
gp_duration = jiffies - rsp->gp_start;
@@ -1739,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
nocb += rcu_future_gp_cleanup(rsp, rnp);
raw_spin_unlock_irq(&rnp->lock);
cond_resched_rcu_qs();
+ ACCESS_ONCE(rsp->gp_activity) = jiffies;
}
rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock);
@@ -1788,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
if (rcu_gp_init(rsp))
break;
cond_resched_rcu_qs();
+ ACCESS_ONCE(rsp->gp_activity) = jiffies;
WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name,
ACCESS_ONCE(rsp->gpnum),
@@ -1831,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
ACCESS_ONCE(rsp->gpnum),
TPS("fqsend"));
cond_resched_rcu_qs();
+ ACCESS_ONCE(rsp->gp_activity) = jiffies;
} else {
/* Deal with stray signal. */
cond_resched_rcu_qs();
+ ACCESS_ONCE(rsp->gp_activity) = jiffies;
WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name,
ACCESS_ONCE(rsp->gpnum),
@@ -2010,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
- if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
- rnp->completed == rnp->gpnum) {
+ if ((rdp->passed_quiesce == 0 &&
+ rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
+ rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
+ rdp->gpwrap) {
/*
* The grace period in which this quiescent state was
@@ -2020,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
* within the current grace period.
*/
rdp->passed_quiesce = 0; /* need qs for new gp. */
+ rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
@@ -2064,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
* Was there a quiescent state since the beginning of the grace
* period? If no, then exit and wait for the next call.
*/
- if (!rdp->passed_quiesce)
+ if (!rdp->passed_quiesce &&
+ rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
return;
/*
@@ -2195,6 +2324,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
}
/*
+ * All CPUs for the specified rcu_node structure have gone offline,
+ * and all tasks that were preempted within an RCU read-side critical
+ * section while running on one of those CPUs have since exited their RCU
+ * read-side critical section. Some other CPU is reporting this fact with
+ * the specified rcu_node structure's ->lock held and interrupts disabled.
+ * This function therefore goes up the tree of rcu_node structures,
+ * clearing the corresponding bits in the ->qsmaskinit fields. Note that
+ * the leaf rcu_node structure's ->qsmaskinit field has already been
+ * updated
+ *
+ * This function does check that the specified rcu_node structure has
+ * all CPUs offline and no blocked tasks, so it is OK to invoke it
+ * prematurely. That said, invoking it after the fact will cost you
+ * a needless lock acquisition. So once it has done its work, don't
+ * invoke it again.
+ */
+static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
+{
+ long mask;
+ struct rcu_node *rnp = rnp_leaf;
+
+ if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+ return;
+ for (;;) {
+ mask = rnp->grpmask;
+ rnp = rnp->parent;
+ if (!rnp)
+ break;
+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+ smp_mb__after_unlock_lock(); /* GP memory ordering. */
+ rnp->qsmaskinit &= ~mask;
+ if (rnp->qsmaskinit) {
+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ return;
+ }
+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ }
+}
+
+/*
* The CPU has been completely removed, and some other CPU is reporting
* this fact from process context. Do the remainder of the cleanup,
* including orphaning the outgoing CPU's RCU callbacks, and also
@@ -2204,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
- unsigned long mask;
- int need_report = 0;
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
@@ -2219,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
rcu_adopt_orphan_cbs(rsp, flags);
+ raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
- /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
- mask = rdp->grpmask; /* rnp->grplo is constant. */
- do {
- raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- smp_mb__after_unlock_lock();
- rnp->qsmaskinit &= ~mask;
- if (rnp->qsmaskinit != 0) {
- if (rnp != rdp->mynode)
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- break;
- }
- if (rnp == rdp->mynode)
- need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
- else
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- mask = rnp->grpmask;
- rnp = rnp->parent;
- } while (rnp != NULL);
-
- /*
- * We still hold the leaf rcu_node structure lock here, and
- * irqs are still disabled. The reason for this subterfuge is
- * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
- * held leads to deadlock.
- */
- raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
- rnp = rdp->mynode;
- if (need_report & RCU_OFL_TASKS_NORM_GP)
- rcu_report_unblock_qs_rnp(rnp, flags);
- else
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- if (need_report & RCU_OFL_TASKS_EXP_GP)
- rcu_report_exp_rnp(rsp, rnp, true);
+ /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
+ rnp->qsmaskinit &= ~rdp->grpmask;
+ if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
+ rcu_cleanup_dead_rnp(rnp);
+ rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
cpu, rdp->qlen, rdp->nxtlist);
@@ -2268,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
{
}
+static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
+{
+}
+
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
{
}
@@ -2464,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
- rnp = rcu_get_root(rsp);
- if (rnp->qsmask == 0) {
- raw_spin_lock_irqsave(&rnp->lock, flags);
- smp_mb__after_unlock_lock();
- rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
- }
}
/*
@@ -2569,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
* otherwise wake up the per-CPU kernel kthread. Note that because we
- * are running on the current CPU with interrupts disabled, the
+ * are running on the current CPU with softirqs disabled, the
* rcu_cpu_kthread_task cannot disappear out from under us.
*/
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -3109,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
/* Is the RCU core waiting for a quiescent state from this CPU? */
if (rcu_scheduler_fully_active &&
- rdp->qs_pending && !rdp->passed_quiesce) {
+ rdp->qs_pending && !rdp->passed_quiesce &&
+ rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
rdp->n_rp_qs_pending++;
- } else if (rdp->qs_pending && rdp->passed_quiesce) {
+ } else if (rdp->qs_pending &&
+ (rdp->passed_quiesce ||
+ rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
rdp->n_rp_report_qs++;
return 1;
}
@@ -3135,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
}
/* Has a new RCU grace period started? */
- if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
+ if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
+ unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
rdp->n_rp_gp_started++;
return 1;
}
@@ -3318,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
} else {
_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
rsp->n_barrier_done);
+ smp_mb__before_atomic();
atomic_inc(&rsp->barrier_cpu_count);
__call_rcu(&rdp->barrier_head,
rcu_barrier_callback, rsp, cpu, 0);
@@ -3385,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
/* Set up local state, ensuring consistent view of global state. */
raw_spin_lock_irqsave(&rnp->lock, flags);
rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
- init_callback_list(rdp);
- rdp->qlen_lazy = 0;
- ACCESS_ONCE(rdp->qlen) = 0;
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -3444,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->gpnum = rnp->completed;
rdp->completed = rnp->completed;
rdp->passed_quiesce = 0;
+ rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
rdp->qs_pending = 0;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
}
@@ -3535,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self,
static int __init rcu_spawn_gp_kthread(void)
{
unsigned long flags;
+ int kthread_prio_in = kthread_prio;
struct rcu_node *rnp;
struct rcu_state *rsp;
+ struct sched_param sp;
struct task_struct *t;
+ /* Force priority into range. */
+ if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+ kthread_prio = 1;
+ else if (kthread_prio < 0)
+ kthread_prio = 0;
+ else if (kthread_prio > 99)
+ kthread_prio = 99;
+ if (kthread_prio != kthread_prio_in)
+ pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
+ kthread_prio, kthread_prio_in);
+
rcu_scheduler_fully_active = 1;
for_each_rcu_flavor(rsp) {
- t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
+ t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
BUG_ON(IS_ERR(t));
rnp = rcu_get_root(rsp);
raw_spin_lock_irqsave(&rnp->lock, flags);
rsp->gp_kthread = t;
+ if (kthread_prio) {
+ sp.sched_priority = kthread_prio;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+ }
+ wake_up_process(t);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
rcu_spawn_nocb_kthreads();
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 8e7b1843896e..119de399eb2f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -27,7 +27,6 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
-#include <linux/irq_work.h>
/*
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -172,11 +171,6 @@ struct rcu_node {
/* queued on this rcu_node structure that */
/* are blocking the current grace period, */
/* there can be no such task. */
- struct completion boost_completion;
- /* Used to ensure that the rt_mutex used */
- /* to carry out the boosting is fully */
- /* released with no future boostee accesses */
- /* before that rt_mutex is re-initialized. */
struct rt_mutex boost_mtx;
/* Used only for the priority-boosting */
/* side effect, not as a lock. */
@@ -257,9 +251,12 @@ struct rcu_data {
/* in order to detect GP end. */
unsigned long gpnum; /* Highest gp number that this CPU */
/* is aware of having started. */
+ unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
+ /* for rcu_all_qs() invocations. */
bool passed_quiesce; /* User-mode/idle loop etc. */
bool qs_pending; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
+ bool gpwrap; /* Possible gpnum/completed wrap. */
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
#ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -340,14 +337,10 @@ struct rcu_data {
#ifdef CONFIG_RCU_NOCB_CPU
struct rcu_head *nocb_head; /* CBs waiting for kthread. */
struct rcu_head **nocb_tail;
- atomic_long_t nocb_q_count; /* # CBs waiting for kthread */
- atomic_long_t nocb_q_count_lazy; /* (approximate). */
+ atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
+ atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
struct rcu_head **nocb_follower_tail;
- atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
- atomic_long_t nocb_follower_count_lazy; /* (approximate). */
- int nocb_p_count; /* # CBs being invoked by kthread */
- int nocb_p_count_lazy; /* (approximate). */
wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
struct task_struct *nocb_kthread;
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
@@ -356,8 +349,6 @@ struct rcu_data {
struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
/* CBs waiting for GP. */
struct rcu_head **nocb_gp_tail;
- long nocb_gp_count;
- long nocb_gp_count_lazy;
bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
struct rcu_data *nocb_next_follower;
/* Next follower in wakeup chain. */
@@ -488,10 +479,14 @@ struct rcu_state {
/* due to no GP active. */
unsigned long gp_start; /* Time at which GP started, */
/* but in jiffies. */
+ unsigned long gp_activity; /* Time of last GP kthread */
+ /* activity in jiffies. */
unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */
unsigned long jiffies_resched; /* Time at which to resched */
/* a reluctant CPU. */
+ unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
+ /* GP start. */
unsigned long gp_max; /* Maximum GP duration in */
/* jiffies. */
const char *name; /* Name of structure. */
@@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors;
#define for_each_rcu_flavor(rsp) \
list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
-/* Return values for rcu_preempt_offline_tasks(). */
-
-#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
- /* GP were moved to root. */
-#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
- /* GP were moved to root. */
-
/*
* RCU implementation internal declarations:
*/
@@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
/* Forward declarations for rcutree_plugin.h */
static void rcu_bootup_announce(void);
-long rcu_batches_completed(void);
static void rcu_preempt_note_context_switch(void);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
-static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
- unsigned long flags);
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static int rcu_print_task_stall(struct rcu_node *rnp);
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
-#ifdef CONFIG_HOTPLUG_CPU
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp,
- struct rcu_data *rdp);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_preempt_check_callbacks(void);
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
- bool wake);
-#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void);
#endif /* #ifndef RCU_TREE_NONCORE */
#ifdef CONFIG_RCU_TRACE
-#ifdef CONFIG_RCU_NOCB_CPU
-/* Sum up queue lengths for tracing. */
+/* Read out queue lengths for tracing. */
static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
{
- *ql = atomic_long_read(&rdp->nocb_q_count) +
- rdp->nocb_p_count +
- atomic_long_read(&rdp->nocb_follower_count) +
- rdp->nocb_p_count + rdp->nocb_gp_count;
- *qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
- rdp->nocb_p_count_lazy +
- atomic_long_read(&rdp->nocb_follower_count_lazy) +
- rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
-}
+#ifdef CONFIG_RCU_NOCB_CPU
+ *ql = atomic_long_read(&rdp->nocb_q_count);
+ *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
-{
*ql = 0;
*qll = 0;
-}
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+}
#endif /* #ifdef CONFIG_RCU_TRACE */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3ec85cb5d544..0d7bbe3095ad 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -34,10 +34,6 @@
#include "../locking/rtmutex_common.h"
-/* rcuc/rcub kthread realtime priority */
-static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
-module_param(kthread_prio, int, 0644);
-
/*
* Control variables for per-CPU and per-rcu_node kthreads. These
* handle all flavors of RCU.
@@ -53,7 +49,6 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
-static char __initdata nocb_buf[NR_CPUS * 5];
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
/*
@@ -103,6 +98,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
static struct rcu_state *rcu_state_p = &rcu_preempt_state;
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+ bool wake);
/*
* Tell them what RCU they are running.
@@ -114,25 +111,6 @@ static void __init rcu_bootup_announce(void)
}
/*
- * Return the number of RCU-preempt batches processed thus far
- * for debug and statistics.
- */
-static long rcu_batches_completed_preempt(void)
-{
- return rcu_preempt_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
-
-/*
- * Return the number of RCU batches processed thus far for debug & stats.
- */
-long rcu_batches_completed(void)
-{
- return rcu_batches_completed_preempt();
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
* Record a preemptible-RCU quiescent state for the specified CPU. Note
* that this just means that the task currently running on the CPU is
* not in a quiescent state. There might be any number of tasks blocked
@@ -307,15 +285,25 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
}
/*
+ * Return true if the specified rcu_node structure has tasks that were
+ * preempted within an RCU read-side critical section.
+ */
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
+{
+ return !list_empty(&rnp->blkd_tasks);
+}
+
+/*
* Handle special cases during rcu_read_unlock(), such as needing to
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
void rcu_read_unlock_special(struct task_struct *t)
{
- int empty;
- int empty_exp;
- int empty_exp_now;
+ bool empty;
+ bool empty_exp;
+ bool empty_norm;
+ bool empty_exp_now;
unsigned long flags;
struct list_head *np;
#ifdef CONFIG_RCU_BOOST
@@ -367,7 +355,8 @@ void rcu_read_unlock_special(struct task_struct *t)
break;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
- empty = !rcu_preempt_blocked_readers_cgp(rnp);
+ empty = !rcu_preempt_has_tasks(rnp);
+ empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
empty_exp = !rcu_preempted_readers_exp(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
np = rcu_next_node_entry(t, rnp);
@@ -387,13 +376,21 @@ void rcu_read_unlock_special(struct task_struct *t)
#endif /* #ifdef CONFIG_RCU_BOOST */
/*
+ * If this was the last task on the list, go see if we
+ * need to propagate ->qsmaskinit bit clearing up the
+ * rcu_node tree.
+ */
+ if (!empty && !rcu_preempt_has_tasks(rnp))
+ rcu_cleanup_dead_rnp(rnp);
+
+ /*
* If this was the last task on the current list, and if
* we aren't waiting on any CPUs, report the quiescent state.
* Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
* so we must take a snapshot of the expedited state.
*/
empty_exp_now = !rcu_preempted_readers_exp(rnp);
- if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+ if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
rnp->gpnum,
0, rnp->qsmask,
@@ -408,10 +405,8 @@ void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */
- if (drop_boost_mutex) {
+ if (drop_boost_mutex)
rt_mutex_unlock(&rnp->boost_mtx);
- complete(&rnp->boost_completion);
- }
#endif /* #ifdef CONFIG_RCU_BOOST */
/*
@@ -519,99 +514,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
{
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
- if (!list_empty(&rnp->blkd_tasks))
+ if (rcu_preempt_has_tasks(rnp))
rnp->gp_tasks = rnp->blkd_tasks.next;
WARN_ON_ONCE(rnp->qsmask);
}
#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Handle tasklist migration for case in which all CPUs covered by the
- * specified rcu_node have gone offline. Move them up to the root
- * rcu_node. The reason for not just moving them to the immediate
- * parent is to remove the need for rcu_read_unlock_special() to
- * make more than two attempts to acquire the target rcu_node's lock.
- * Returns true if there were tasks blocking the current RCU grace
- * period.
- *
- * Returns 1 if there was previously a task blocking the current grace
- * period on the specified rcu_node structure.
- *
- * The caller must hold rnp->lock with irqs disabled.
- */
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp,
- struct rcu_data *rdp)
-{
- struct list_head *lp;
- struct list_head *lp_root;
- int retval = 0;
- struct rcu_node *rnp_root = rcu_get_root(rsp);
- struct task_struct *t;
-
- if (rnp == rnp_root) {
- WARN_ONCE(1, "Last CPU thought to be offlined?");
- return 0; /* Shouldn't happen: at least one CPU online. */
- }
-
- /* If we are on an internal node, complain bitterly. */
- WARN_ON_ONCE(rnp != rdp->mynode);
-
- /*
- * Move tasks up to root rcu_node. Don't try to get fancy for
- * this corner-case operation -- just put this node's tasks
- * at the head of the root node's list, and update the root node's
- * ->gp_tasks and ->exp_tasks pointers to those of this node's,
- * if non-NULL. This might result in waiting for more tasks than
- * absolutely necessary, but this is a good performance/complexity
- * tradeoff.
- */
- if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
- retval |= RCU_OFL_TASKS_NORM_GP;
- if (rcu_preempted_readers_exp(rnp))
- retval |= RCU_OFL_TASKS_EXP_GP;
- lp = &rnp->blkd_tasks;
- lp_root = &rnp_root->blkd_tasks;
- while (!list_empty(lp)) {
- t = list_entry(lp->next, typeof(*t), rcu_node_entry);
- raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
- smp_mb__after_unlock_lock();
- list_del(&t->rcu_node_entry);
- t->rcu_blocked_node = rnp_root;
- list_add(&t->rcu_node_entry, lp_root);
- if (&t->rcu_node_entry == rnp->gp_tasks)
- rnp_root->gp_tasks = rnp->gp_tasks;
- if (&t->rcu_node_entry == rnp->exp_tasks)
- rnp_root->exp_tasks = rnp->exp_tasks;
-#ifdef CONFIG_RCU_BOOST
- if (&t->rcu_node_entry == rnp->boost_tasks)
- rnp_root->boost_tasks = rnp->boost_tasks;
-#endif /* #ifdef CONFIG_RCU_BOOST */
- raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
- }
-
- rnp->gp_tasks = NULL;
- rnp->exp_tasks = NULL;
-#ifdef CONFIG_RCU_BOOST
- rnp->boost_tasks = NULL;
- /*
- * In case root is being boosted and leaf was not. Make sure
- * that we boost the tasks blocking the current grace period
- * in this case.
- */
- raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
- smp_mb__after_unlock_lock();
- if (rnp_root->boost_tasks != NULL &&
- rnp_root->boost_tasks != rnp_root->gp_tasks &&
- rnp_root->boost_tasks != rnp_root->exp_tasks)
- rnp_root->boost_tasks = rnp_root->gp_tasks;
- raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
- return retval;
-}
-
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
@@ -771,7 +680,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
- if (list_empty(&rnp->blkd_tasks)) {
+ if (!rcu_preempt_has_tasks(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else {
rnp->exp_tasks = rnp->blkd_tasks.next;
@@ -933,15 +842,6 @@ static void __init rcu_bootup_announce(void)
}
/*
- * Return the number of RCU batches processed thus far for debug & stats.
- */
-long rcu_batches_completed(void)
-{
- return rcu_batches_completed_sched();
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
* Because preemptible RCU does not exist, we never have to check for
* CPUs being in quiescent states.
*/
@@ -960,11 +860,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
#ifdef CONFIG_HOTPLUG_CPU
-/* Because preemptible RCU does not exist, no quieting of tasks. */
-static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
- __releases(rnp->lock)
+/*
+ * Because there is no preemptible RCU, there can be no readers blocked.
+ */
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
{
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ return false;
}
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -996,23 +897,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
WARN_ON_ONCE(rnp->qsmask);
}
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Because preemptible RCU does not exist, it never needs to migrate
- * tasks that were blocked within RCU read-side critical sections, and
- * such non-existent tasks cannot possibly have been blocking the current
- * grace period.
- */
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp,
- struct rcu_data *rdp)
-{
- return 0;
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
/*
* Because preemptible RCU does not exist, it never has any callbacks
* to check.
@@ -1031,20 +915,6 @@ void synchronize_rcu_expedited(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Because preemptible RCU does not exist, there is never any need to
- * report on tasks preempted in RCU read-side critical sections during
- * expedited RCU grace periods.
- */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
- bool wake)
-{
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
/*
* Because preemptible RCU does not exist, rcu_barrier() is just
* another name for rcu_barrier_sched().
@@ -1080,7 +950,7 @@ void exit_rcu(void)
static void rcu_initiate_boost_trace(struct rcu_node *rnp)
{
- if (list_empty(&rnp->blkd_tasks))
+ if (!rcu_preempt_has_tasks(rnp))
rnp->n_balk_blkd_tasks++;
else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
rnp->n_balk_exp_gp_tasks++;
@@ -1127,7 +997,8 @@ static int rcu_boost(struct rcu_node *rnp)
struct task_struct *t;
struct list_head *tb;
- if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
+ if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
+ ACCESS_ONCE(rnp->boost_tasks) == NULL)
return 0; /* Nothing left to boost. */
raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1175,15 +1046,11 @@ static int rcu_boost(struct rcu_node *rnp)
*/
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
- init_completion(&rnp->boost_completion);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
/* Lock only for side effect: boosts task t's priority. */
rt_mutex_lock(&rnp->boost_mtx);
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
- /* Wait for boostee to be done w/boost_mtx before reinitializing. */
- wait_for_completion(&rnp->boost_completion);
-
return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
ACCESS_ONCE(rnp->boost_tasks) != NULL;
}
@@ -1416,12 +1283,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
if ((mask & 0x1) && cpu != outgoingcpu)
cpumask_set_cpu(cpu, cm);
- if (cpumask_weight(cm) == 0) {
+ if (cpumask_weight(cm) == 0)
cpumask_setall(cm);
- for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
- cpumask_clear_cpu(cpu, cm);
- WARN_ON_ONCE(cpumask_weight(cm) == 0);
- }
set_cpus_allowed_ptr(t, cm);
free_cpumask_var(cm);
}
@@ -1446,12 +1309,8 @@ static void __init rcu_spawn_boost_kthreads(void)
for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0;
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
- rnp = rcu_get_root(rcu_state_p);
- (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
- if (NUM_RCU_NODES > 1) {
- rcu_for_each_leaf_node(rcu_state_p, rnp)
- (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
- }
+ rcu_for_each_leaf_node(rcu_state_p, rnp)
+ (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
static void rcu_prepare_kthreads(int cpu)
@@ -1605,7 +1464,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
* completed since we last checked and there are
* callbacks not yet ready to invoke.
*/
- if (rdp->completed != rnp->completed &&
+ if ((rdp->completed != rnp->completed ||
+ unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
note_gp_changes(rsp, rdp);
@@ -1898,11 +1758,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
ticks_value = rsp->gpnum - rdp->gpnum;
}
print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
- pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
+ pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
cpu, ticks_value, ticks_title,
atomic_read(&rdtp->dynticks) & 0xfff,
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
+ ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
fast_no_hz);
}
@@ -2056,9 +1917,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ unsigned long ret;
+#ifdef CONFIG_PROVE_RCU
struct rcu_head *rhp;
+#endif /* #ifdef CONFIG_PROVE_RCU */
- /* No-CBs CPUs might have callbacks on any of three lists. */
+ /*
+ * Check count of all no-CBs callbacks awaiting invocation.
+ * There needs to be a barrier before this function is called,
+ * but associated with a prior determination that no more
+ * callbacks would be posted. In the worst case, the first
+ * barrier in _rcu_barrier() suffices (but the caller cannot
+ * necessarily rely on this, not a substitute for the caller
+ * getting the concurrency design right!). There must also be
+ * a barrier between the following load an posting of a callback
+ * (if a callback is in fact needed). This is associated with an
+ * atomic_inc() in the caller.
+ */
+ ret = atomic_long_read(&rdp->nocb_q_count);
+
+#ifdef CONFIG_PROVE_RCU
rhp = ACCESS_ONCE(rdp->nocb_head);
if (!rhp)
rhp = ACCESS_ONCE(rdp->nocb_gp_head);
@@ -2072,8 +1950,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
cpu, rhp->func);
WARN_ON_ONCE(1);
}
+#endif /* #ifdef CONFIG_PROVE_RCU */
- return !!rhp;
+ return !!ret;
}
/*
@@ -2095,9 +1974,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
struct task_struct *t;
/* Enqueue the callback on the nocb list and update counts. */
+ atomic_long_add(rhcount, &rdp->nocb_q_count);
+ /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
old_rhpp = xchg(&rdp->nocb_tail, rhtp);
ACCESS_ONCE(*old_rhpp) = rhp;
- atomic_long_add(rhcount, &rdp->nocb_q_count);
atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
@@ -2288,9 +2168,6 @@ wait_again:
/* Move callbacks to wait-for-GP list, which is empty. */
ACCESS_ONCE(rdp->nocb_head) = NULL;
rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
- rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
- rdp->nocb_gp_count_lazy =
- atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
gotcbs = true;
}
@@ -2338,9 +2215,6 @@ wait_again:
/* Append callbacks to follower's "done" list. */
tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
*tail = rdp->nocb_gp_head;
- atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
- atomic_long_add(rdp->nocb_gp_count_lazy,
- &rdp->nocb_follower_count_lazy);
smp_mb__after_atomic(); /* Store *tail before wakeup. */
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
/*
@@ -2415,13 +2289,11 @@ static int rcu_nocb_kthread(void *arg)
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
- c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
- cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
- rdp->nocb_p_count += c;
- rdp->nocb_p_count_lazy += cl;
/* Each pass through the following loop invokes a callback. */
- trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
+ trace_rcu_batch_start(rdp->rsp->name,
+ atomic_long_read(&rdp->nocb_q_count_lazy),
+ atomic_long_read(&rdp->nocb_q_count), -1);
c = cl = 0;
while (list) {
next = list->next;
@@ -2443,9 +2315,9 @@ static int rcu_nocb_kthread(void *arg)
list = next;
}
trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
- rdp->nocb_p_count_lazy - cl;
+ smp_mb__before_atomic(); /* _add after CB invocation. */
+ atomic_long_add(-c, &rdp->nocb_q_count);
+ atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
rdp->n_nocbs_invoked += c;
}
return 0;
@@ -2513,8 +2385,8 @@ void __init rcu_init_nohz(void)
cpumask_and(rcu_nocb_mask, cpu_possible_mask,
rcu_nocb_mask);
}
- cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
- pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
+ pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
+ cpumask_pr_args(rcu_nocb_mask));
if (rcu_nocb_poll)
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 5cdc62e1beeb..fbb6240509ea 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -46,6 +46,8 @@
#define RCU_TREE_NONCORE
#include "tree.h"
+DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
+
static int r_open(struct inode *inode, struct file *file,
const struct seq_operations *op)
{
@@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
if (!rdp->beenonline)
return;
- seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d",
+ seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? '!' : ' ',
ulong2long(rdp->completed), ulong2long(rdp->gpnum),
- rdp->passed_quiesce, rdp->qs_pending);
+ rdp->passed_quiesce,
+ rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
+ rdp->qs_pending);
seq_printf(m, " dt=%d/%llx/%d df=%lu",
atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
diff --git a/kernel/resource.c b/kernel/resource.c
index 0bcebffc4e77..19f2357dfda3 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -22,6 +22,7 @@
#include <linux/device.h>
#include <linux/pfn.h>
#include <linux/mm.h>
+#include <linux/resource_ext.h>
#include <asm/io.h>
@@ -1529,6 +1530,30 @@ int iomem_is_exclusive(u64 addr)
return err;
}
+struct resource_entry *resource_list_create_entry(struct resource *res,
+ size_t extra_size)
+{
+ struct resource_entry *entry;
+
+ entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
+ if (entry) {
+ INIT_LIST_HEAD(&entry->node);
+ entry->res = res ? res : &entry->__res;
+ }
+
+ return entry;
+}
+EXPORT_SYMBOL(resource_list_create_entry);
+
+void resource_list_free(struct list_head *head)
+{
+ struct resource_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, head, node)
+ resource_list_destroy_entry(entry);
+}
+EXPORT_SYMBOL(resource_list_free);
+
static int __init strict_iomem(char *str)
{
if (strstr(str, "relaxed"))
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index ab32b7b0db5c..46be87024875 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -1,5 +1,5 @@
ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_clock.o = -pg
+CFLAGS_REMOVE_clock.o = $(CC_FLAGS_FTRACE)
endif
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c27e4f8f4879..c0a205101c23 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -420,3 +420,16 @@ u64 local_clock(void)
EXPORT_SYMBOL_GPL(cpu_clock);
EXPORT_SYMBOL_GPL(local_clock);
+
+/*
+ * Running clock - returns the time that has elapsed while a guest has been
+ * running.
+ * On a guest this value should be local_clock minus the time the guest was
+ * suspended by the hypervisor (for any reason).
+ * On bare metal this function should return the same as local_clock.
+ * Architectures and sub-architectures can override this.
+ */
+u64 __weak running_clock(void)
+{
+ return local_clock();
+}
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 607f852b4d04..7052d3fd4e7b 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -268,6 +268,15 @@ bool try_wait_for_completion(struct completion *x)
unsigned long flags;
int ret = 1;
+ /*
+ * Since x->done will need to be locked only
+ * in the non-blocking case, we check x->done
+ * first without taking the lock so we can
+ * return early in the blocking case.
+ */
+ if (!ACCESS_ONCE(x->done))
+ return 0;
+
spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
@@ -288,13 +297,6 @@ EXPORT_SYMBOL(try_wait_for_completion);
*/
bool completion_done(struct completion *x)
{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&x->wait.lock, flags);
- if (!x->done)
- ret = 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- return ret;
+ return !!ACCESS_ONCE(x->done);
}
EXPORT_SYMBOL(completion_done);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c0accc00566e..13049aac05a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -119,7 +119,9 @@ void update_rq_clock(struct rq *rq)
{
s64 delta;
- if (rq->skip_clock_update > 0)
+ lockdep_assert_held(&rq->lock);
+
+ if (rq->clock_skip_update & RQCF_ACT_SKIP)
return;
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
@@ -490,6 +492,11 @@ static __init void init_hrtick(void)
*/
void hrtick_start(struct rq *rq, u64 delay)
{
+ /*
+ * Don't schedule slices shorter than 10000ns, that just
+ * doesn't make sense. Rely on vruntime for fairness.
+ */
+ delay = max_t(u64, delay, 10000LL);
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
@@ -1046,7 +1053,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* this case, we can save a useless back to back clock update.
*/
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
- rq->skip_clock_update = 1;
+ rq_clock_skip_update(rq, true);
}
#ifdef CONFIG_SMP
@@ -1082,7 +1089,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
- perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+ perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
}
__set_task_cpu(p, new_cpu);
@@ -1814,6 +1821,10 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_period = 0;
dl_se->flags = 0;
dl_se->dl_bw = 0;
+
+ dl_se->dl_throttled = 0;
+ dl_se->dl_new = 1;
+ dl_se->dl_yielded = 0;
}
/*
@@ -1832,6 +1843,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+#ifdef CONFIG_SMP
+ p->se.avg.decay_count = 0;
+#endif
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_SCHEDSTATS
@@ -1839,7 +1853,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
#endif
RB_CLEAR_NODE(&p->dl.rb_node);
- hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ init_dl_task_timer(&p->dl);
__dl_clear_params(p);
INIT_LIST_HEAD(&p->rt.run_list);
@@ -2049,6 +2063,9 @@ static inline int dl_bw_cpus(int i)
* allocated bandwidth to reflect the new situation.
*
* This function is called while holding p's rq->lock.
+ *
+ * XXX we should delay bw change until the task's 0-lag point, see
+ * __setparam_dl().
*/
static int dl_overflow(struct task_struct *p, int policy,
const struct sched_attr *attr)
@@ -2748,6 +2765,10 @@ again:
* - explicit schedule() call
* - return from syscall or exception to user-space
* - return from interrupt-handler to user-space
+ *
+ * WARNING: all callers must re-check need_resched() afterward and reschedule
+ * accordingly in case an event triggered the need for rescheduling (such as
+ * an interrupt waking up a task) while preemption was disabled in __schedule().
*/
static void __sched __schedule(void)
{
@@ -2756,7 +2777,6 @@ static void __sched __schedule(void)
struct rq *rq;
int cpu;
-need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
@@ -2776,6 +2796,8 @@ need_resched:
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
+ rq->clock_skip_update <<= 1; /* promote REQ to ACT */
+
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2800,13 +2822,13 @@ need_resched:
switch_count = &prev->nvcsw;
}
- if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
+ if (task_on_rq_queued(prev))
update_rq_clock(rq);
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
- rq->skip_clock_update = 0;
+ rq->clock_skip_update = 0;
if (likely(prev != next)) {
rq->nr_switches++;
@@ -2821,8 +2843,6 @@ need_resched:
post_schedule(rq);
sched_preempt_enable_no_resched();
- if (need_resched())
- goto need_resched;
}
static inline void sched_submit_work(struct task_struct *tsk)
@@ -2842,7 +2862,9 @@ asmlinkage __visible void __sched schedule(void)
struct task_struct *tsk = current;
sched_submit_work(tsk);
- __schedule();
+ do {
+ __schedule();
+ } while (need_resched());
}
EXPORT_SYMBOL(schedule);
@@ -2877,6 +2899,21 @@ void __sched schedule_preempt_disabled(void)
preempt_disable();
}
+static void preempt_schedule_common(void)
+{
+ do {
+ __preempt_count_add(PREEMPT_ACTIVE);
+ __schedule();
+ __preempt_count_sub(PREEMPT_ACTIVE);
+
+ /*
+ * Check again in case we missed a preemption opportunity
+ * between schedule and now.
+ */
+ barrier();
+ } while (need_resched());
+}
+
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
@@ -2892,17 +2929,7 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
if (likely(!preemptible()))
return;
- do {
- __preempt_count_add(PREEMPT_ACTIVE);
- __schedule();
- __preempt_count_sub(PREEMPT_ACTIVE);
-
- /*
- * Check again in case we missed a preemption opportunity
- * between schedule and now.
- */
- barrier();
- } while (need_resched());
+ preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
@@ -3251,15 +3278,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
- init_dl_task_timer(dl_se);
dl_se->dl_runtime = attr->sched_runtime;
dl_se->dl_deadline = attr->sched_deadline;
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
dl_se->flags = attr->sched_flags;
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
- dl_se->dl_throttled = 0;
- dl_se->dl_new = 1;
- dl_se->dl_yielded = 0;
+
+ /*
+ * Changing the parameters of a task is 'tricky' and we're not doing
+ * the correct thing -- also see task_dead_dl() and switched_from_dl().
+ *
+ * What we SHOULD do is delay the bandwidth release until the 0-lag
+ * point. This would include retaining the task_struct until that time
+ * and change dl_overflow() to not immediately decrement the current
+ * amount.
+ *
+ * Instead we retain the current runtime/deadline and let the new
+ * parameters take effect after the current reservation period lapses.
+ * This is safe (albeit pessimistic) because the 0-lag point is always
+ * before the current scheduling deadline.
+ *
+ * We can still have temporary overloads because we do not delay the
+ * change in bandwidth until that time; so admission control is
+ * not on the safe side. It does however guarantee tasks will never
+ * consume more than promised.
+ */
}
/*
@@ -3382,6 +3425,20 @@ static bool check_same_owner(struct task_struct *p)
return match;
}
+static bool dl_param_changed(struct task_struct *p,
+ const struct sched_attr *attr)
+{
+ struct sched_dl_entity *dl_se = &p->dl;
+
+ if (dl_se->dl_runtime != attr->sched_runtime ||
+ dl_se->dl_deadline != attr->sched_deadline ||
+ dl_se->dl_period != attr->sched_period ||
+ dl_se->flags != attr->sched_flags)
+ return true;
+
+ return false;
+}
+
static int __sched_setscheduler(struct task_struct *p,
const struct sched_attr *attr,
bool user)
@@ -3510,7 +3567,7 @@ recheck:
goto change;
if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
goto change;
- if (dl_policy(policy))
+ if (dl_policy(policy) && dl_param_changed(p, attr))
goto change;
p->sched_reset_on_fork = reset_on_fork;
@@ -4202,17 +4259,10 @@ SYSCALL_DEFINE0(sched_yield)
return 0;
}
-static void __cond_resched(void)
-{
- __preempt_count_add(PREEMPT_ACTIVE);
- __schedule();
- __preempt_count_sub(PREEMPT_ACTIVE);
-}
-
int __sched _cond_resched(void)
{
if (should_resched()) {
- __cond_resched();
+ preempt_schedule_common();
return 1;
}
return 0;
@@ -4237,7 +4287,7 @@ int __cond_resched_lock(spinlock_t *lock)
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
- __cond_resched();
+ preempt_schedule_common();
else
cpu_relax();
ret = 1;
@@ -4253,7 +4303,7 @@ int __sched __cond_resched_softirq(void)
if (should_resched()) {
local_bh_enable();
- __cond_resched();
+ preempt_schedule_common();
local_bh_disable();
return 1;
}
@@ -4508,9 +4558,10 @@ void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
int ppid;
- unsigned state;
+ unsigned long state = p->state;
- state = p->state ? __ffs(p->state) + 1 : 0;
+ if (state)
+ state = __ffs(state) + 1;
printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
@@ -4642,6 +4693,9 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
struct dl_bw *cur_dl_b;
unsigned long flags;
+ if (!cpumask_weight(cur))
+ return ret;
+
rcu_read_lock_sched();
cur_dl_b = dl_bw_of(cpumask_any(cur));
trial_cpus = cpumask_weight(trial);
@@ -4740,7 +4794,7 @@ static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
- if (p->sched_class && p->sched_class->set_cpus_allowed)
+ if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
cpumask_copy(&p->cpus_allowed, new_mask);
@@ -5408,9 +5462,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
struct cpumask *groupmask)
{
struct sched_group *group = sd->groups;
- char str[256];
- cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
cpumask_clear(groupmask);
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
@@ -5423,7 +5475,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
return -1;
}
- printk(KERN_CONT "span %s level %s\n", str, sd->name);
+ printk(KERN_CONT "span %*pbl level %s\n",
+ cpumask_pr_args(sched_domain_span(sd)), sd->name);
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -5468,9 +5521,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpumask_or(groupmask, groupmask, sched_group_cpus(group));
- cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
-
- printk(KERN_CONT " %s", str);
+ printk(KERN_CONT " %*pbl",
+ cpumask_pr_args(sched_group_cpus(group)));
if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
printk(KERN_CONT " (cpu_capacity = %d)",
group->sgc->capacity);
@@ -7250,6 +7302,11 @@ void __init sched_init(void)
enter_lazy_tlb(&init_mm, current);
/*
+ * During early bootup we pretend to be a normal task:
+ */
+ current->sched_class = &fair_sched_class;
+
+ /*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
@@ -7259,11 +7316,6 @@ void __init sched_init(void)
calc_load_update = jiffies + LOAD_FREQ;
- /*
- * During early bootup we pretend to be a normal task:
- */
- current->sched_class = &fair_sched_class;
-
#ifdef CONFIG_SMP
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
/* May be allocated at isolcpus cmdline parse time */
@@ -7292,13 +7344,12 @@ void __might_sleep(const char *file, int line, int preempt_offset)
* since we will exit with TASK_RUNNING make sure we enter with it,
* otherwise we will destroy state.
*/
- if (WARN_ONCE(current->state != TASK_RUNNING,
+ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
"do not call blocking ops when !TASK_RUNNING; "
"state=%lx set at [<%p>] %pS\n",
current->state,
(void *)current->task_state_change,
- (void *)current->task_state_change))
- __set_current_state(TASK_RUNNING);
+ (void *)current->task_state_change);
___might_sleep(file, line, preempt_offset);
}
@@ -7325,6 +7376,9 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
in_atomic(), irqs_disabled(),
current->pid, current->comm);
+ if (task_stack_end_corrupted(current))
+ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
+
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 539ca3ce071b..c6acb07466bb 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -107,7 +107,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;
- if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
+ if (later_mask &&
+ cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
best_cpu = cpumask_any(later_mask);
goto out;
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
@@ -186,6 +187,26 @@ out:
}
/*
+ * cpudl_set_freecpu - Set the cpudl.free_cpus
+ * @cp: the cpudl max-heap context
+ * @cpu: rd attached cpu
+ */
+void cpudl_set_freecpu(struct cpudl *cp, int cpu)
+{
+ cpumask_set_cpu(cpu, cp->free_cpus);
+}
+
+/*
+ * cpudl_clear_freecpu - Clear the cpudl.free_cpus
+ * @cp: the cpudl max-heap context
+ * @cpu: rd attached cpu
+ */
+void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
+{
+ cpumask_clear_cpu(cpu, cp->free_cpus);
+}
+
+/*
* cpudl_init - initialize the cpudl structure
* @cp: the cpudl max-heap context
*/
@@ -203,7 +224,7 @@ int cpudl_init(struct cpudl *cp)
if (!cp->elements)
return -ENOMEM;
- if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
kfree(cp->elements);
return -ENOMEM;
}
@@ -211,8 +232,6 @@ int cpudl_init(struct cpudl *cp)
for_each_possible_cpu(i)
cp->elements[i].idx = IDX_INVALID;
- cpumask_setall(cp->free_cpus);
-
return 0;
}
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
index 020039bd1326..1a0a6ef2fbe1 100644
--- a/kernel/sched/cpudeadline.h
+++ b/kernel/sched/cpudeadline.h
@@ -24,6 +24,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask);
void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
int cpudl_init(struct cpudl *cp);
+void cpudl_set_freecpu(struct cpudl *cp, int cpu);
+void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
void cpudl_cleanup(struct cpudl *cp);
#endif /* CONFIG_SMP */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b52092f2636d..a027799ae130 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
+
+ if (dl_se->dl_yielded)
+ dl_se->dl_yielded = 0;
+ if (dl_se->dl_throttled)
+ dl_se->dl_throttled = 0;
}
/*
@@ -536,23 +541,19 @@ again:
sched_clock_tick();
update_rq_clock(rq);
- dl_se->dl_throttled = 0;
- dl_se->dl_yielded = 0;
- if (task_on_rq_queued(p)) {
- enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
- if (dl_task(rq->curr))
- check_preempt_curr_dl(rq, p, 0);
- else
- resched_curr(rq);
+ enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
+ if (dl_task(rq->curr))
+ check_preempt_curr_dl(rq, p, 0);
+ else
+ resched_curr(rq);
#ifdef CONFIG_SMP
- /*
- * Queueing this task back might have overloaded rq,
- * check if we need to kick someone away.
- */
- if (has_pushable_dl_tasks(rq))
- push_dl_task(rq);
+ /*
+ * Queueing this task back might have overloaded rq,
+ * check if we need to kick someone away.
+ */
+ if (has_pushable_dl_tasks(rq))
+ push_dl_task(rq);
#endif
- }
unlock:
raw_spin_unlock(&rq->lock);
@@ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq)
dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
if (dl_runtime_exceeded(rq, dl_se)) {
+ dl_se->dl_throttled = 1;
__dequeue_task_dl(rq, curr, 0);
- if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
- dl_se->dl_throttled = 1;
- else
+ if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
if (!is_leftmost(curr, &rq->dl))
@@ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* its rq, the bandwidth timer callback (which clearly has not
* run yet) will take care of this.
*/
- if (p->dl.dl_throttled)
+ if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
return;
enqueue_dl_entity(&p->dl, pi_se, flags);
@@ -1073,7 +1073,13 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
- if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
+ /*
+ * Even when we have runtime, update_curr_dl() might have resulted in us
+ * not being the leftmost task anymore. In that case NEED_RESCHED will
+ * be set and schedule() will start a new hrtick for the next task.
+ */
+ if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
+ is_leftmost(p, &rq->dl))
start_hrtick_dl(rq, p);
}
@@ -1094,6 +1100,7 @@ static void task_dead_dl(struct task_struct *p)
* Since we are TASK_DEAD we won't slip out of the domain!
*/
raw_spin_lock_irq(&dl_b->lock);
+ /* XXX we should retain the bw until 0-lag */
dl_b->total_bw -= p->dl.dl_bw;
raw_spin_unlock_irq(&dl_b->lock);
@@ -1165,9 +1172,6 @@ static int find_later_rq(struct task_struct *task)
* We have to consider system topology and task affinity
* first, then we can look for a suitable cpu.
*/
- cpumask_copy(later_mask, task_rq(task)->rd->span);
- cpumask_and(later_mask, later_mask, cpu_active_mask);
- cpumask_and(later_mask, later_mask, &task->cpus_allowed);
best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
task, later_mask);
if (best_cpu == -1)
@@ -1562,6 +1566,7 @@ static void rq_online_dl(struct rq *rq)
if (rq->dl.overloaded)
dl_set_overload(rq);
+ cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
if (rq->dl.dl_nr_running > 0)
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
}
@@ -1573,6 +1578,7 @@ static void rq_offline_dl(struct rq *rq)
dl_clear_overload(rq);
cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
+ cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
}
void init_sched_dl_class(void)
@@ -1614,8 +1620,8 @@ static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
static void switched_from_dl(struct rq *rq, struct task_struct *p)
{
+ /* XXX we should retain the bw until 0-lag */
cancel_dl_timer(rq, p);
-
__dl_clear_params(p);
/*
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 92cc52001e74..8baaf858d25c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -305,6 +305,7 @@ do { \
PN(next_balance);
SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
PN(clock);
+ PN(clock_task);
P(cpu_load[0]);
P(cpu_load[1]);
P(cpu_load[2]);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40667cbf371b..7ce18f3c097a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -676,7 +676,6 @@ void init_task_runnable_average(struct task_struct *p)
{
u32 slice;
- p->se.avg.decay_count = 0;
slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
p->se.avg.runnable_avg_sum = slice;
p->se.avg.runnable_avg_period = slice;
@@ -1730,7 +1729,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
nodes = node_online_map;
for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
unsigned long max_faults = 0;
- nodemask_t max_group;
+ nodemask_t max_group = NODE_MASK_NONE;
int a, b;
/* Are there nodes at this distance from each other? */
@@ -2574,11 +2573,11 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
u64 decays = atomic64_read(&cfs_rq->decay_counter);
decays -= se->avg.decay_count;
+ se->avg.decay_count = 0;
if (!decays)
return 0;
se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
- se->avg.decay_count = 0;
return decays;
}
@@ -5157,7 +5156,7 @@ static void yield_task_fair(struct rq *rq)
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
- rq->skip_clock_update = 1;
+ rq_clock_skip_update(rq, true);
}
set_skip_buddy(se);
@@ -5949,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu)
*/
age_stamp = ACCESS_ONCE(rq->age_stamp);
avg = ACCESS_ONCE(rq->rt_avg);
+ delta = __rq_clock_broken(rq) - age_stamp;
- delta = rq_clock(rq) - age_stamp;
if (unlikely(delta < 0))
delta = 0;
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index c47fce75e666..aaf1c1d5cf5d 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -47,7 +47,8 @@ static inline int cpu_idle_poll(void)
rcu_idle_enter();
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
- while (!tif_need_resched())
+ while (!tif_need_resched() &&
+ (cpu_idle_force_poll || tick_check_broadcast_expired()))
cpu_relax();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
rcu_idle_exit();
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ee15f5a0d1c1..f4d4b077eba0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -831,11 +831,14 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
enqueue = 1;
/*
- * Force a clock update if the CPU was idle,
- * lest wakeup -> unthrottle time accumulate.
+ * When we're idle and a woken (rt) task is
+ * throttled check_preempt_curr() will set
+ * skip_update and the time between the wakeup
+ * and this unthrottle will get accounted as
+ * 'runtime'.
*/
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
- rq->skip_clock_update = -1;
+ rq_clock_skip_update(rq, false);
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
@@ -1337,7 +1340,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
curr->prio <= p->prio)) {
int target = find_lowest_rq(p);
- if (target != -1)
+ /*
+ * Don't bother moving it if the destination CPU is
+ * not running a lower priority task.
+ */
+ if (target != -1 &&
+ p->prio < cpu_rq(target)->rt.highest_prio.curr)
cpu = target;
}
rcu_read_unlock();
@@ -1614,6 +1622,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
lowest_rq = cpu_rq(cpu);
+ if (lowest_rq->rt.highest_prio.curr <= task->prio) {
+ /*
+ * Target rq has tasks of equal or higher priority,
+ * retrying does not release any lock and is unlikely
+ * to yield a different result.
+ */
+ lowest_rq = NULL;
+ break;
+ }
+
/* if the prio of this runqueue changed, try again */
if (double_lock_balance(rq, lowest_rq)) {
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9a2a45c970e7..0870db23d79c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -558,8 +558,6 @@ struct rq {
#ifdef CONFIG_NO_HZ_FULL
unsigned long last_sched_tick;
#endif
- int skip_clock_update;
-
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates;
@@ -588,6 +586,7 @@ struct rq {
unsigned long next_balance;
struct mm_struct *prev_mm;
+ unsigned int clock_skip_update;
u64 clock;
u64 clock_task;
@@ -687,16 +686,35 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues)
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+ return ACCESS_ONCE(rq->clock);
+}
+
static inline u64 rq_clock(struct rq *rq)
{
+ lockdep_assert_held(&rq->lock);
return rq->clock;
}
static inline u64 rq_clock_task(struct rq *rq)
{
+ lockdep_assert_held(&rq->lock);
return rq->clock_task;
}
+#define RQCF_REQ_SKIP 0x01
+#define RQCF_ACT_SKIP 0x02
+
+static inline void rq_clock_skip_update(struct rq *rq, bool skip)
+{
+ lockdep_assert_held(&rq->lock);
+ if (skip)
+ rq->clock_skip_update |= RQCF_REQ_SKIP;
+ else
+ rq->clock_skip_update &= ~RQCF_REQ_SKIP;
+}
+
#ifdef CONFIG_NUMA
enum numa_topology_type {
NUMA_DIRECT,
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index a476bea17fbc..87e2c9f0c33e 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -15,11 +15,6 @@
static int show_schedstat(struct seq_file *seq, void *v)
{
int cpu;
- int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
- char *mask_str = kmalloc(mask_len, GFP_KERNEL);
-
- if (mask_str == NULL)
- return -ENOMEM;
if (v == (void *)1) {
seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
@@ -50,9 +45,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
for_each_domain(cpu, sd) {
enum cpu_idle_type itype;
- cpumask_scnprintf(mask_str, mask_len,
- sched_domain_span(sd));
- seq_printf(seq, "domain%d %s", dcount++, mask_str);
+ seq_printf(seq, "domain%d %*pb", dcount++,
+ cpumask_pr_args(sched_domain_span(sd)));
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) {
seq_printf(seq, " %u %u %u %u %u %u %u %u",
@@ -76,7 +70,6 @@ static int show_schedstat(struct seq_file *seq, void *v)
rcu_read_unlock();
#endif
}
- kfree(mask_str);
return 0;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 16a305295256..33a52759cc0e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2501,7 +2501,7 @@ EXPORT_SYMBOL(unblock_all_signals);
*/
SYSCALL_DEFINE0(restart_syscall)
{
- struct restart_block *restart = &current_thread_info()->restart_block;
+ struct restart_block *restart = &current->restart_block;
return restart->fn(restart);
}
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index f032fb5284e3..40190f28db35 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -280,6 +280,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
unsigned int cpu;
int ret = 0;
+ get_online_cpus();
mutex_lock(&smpboot_threads_lock);
for_each_online_cpu(cpu) {
ret = __smpboot_create_thread(plug_thread, cpu);
@@ -292,6 +293,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
list_add(&plug_thread->list, &hotplug_threads);
out:
mutex_unlock(&smpboot_threads_lock);
+ put_online_cpus();
return ret;
}
EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 501baa9ac1be..479e4436f787 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -114,8 +114,12 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
- if (preempt_count() == cnt)
+ if (preempt_count() == cnt) {
+#ifdef CONFIG_DEBUG_PREEMPT
+ current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1);
+#endif
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+ }
}
EXPORT_SYMBOL(__local_bh_disable_ip);
#endif /* CONFIG_TRACE_IRQFLAGS */
@@ -656,9 +660,8 @@ static void run_ksoftirqd(unsigned int cpu)
* in the task stack here.
*/
__do_softirq();
- rcu_note_context_switch();
local_irq_enable();
- cond_resched();
+ cond_resched_rcu_qs();
return;
}
local_irq_enable();
diff --git a/kernel/sys.c b/kernel/sys.c
index a8c9f5a7dda6..ea9c88109894 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
up_write(&me->mm->mmap_sem);
break;
case PR_MPX_ENABLE_MANAGEMENT:
+ if (arg2 || arg3 || arg4 || arg5)
+ return -EINVAL;
error = MPX_ENABLE_MANAGEMENT(me);
break;
case PR_MPX_DISABLE_MANAGEMENT:
+ if (arg2 || arg3 || arg4 || arg5)
+ return -EINVAL;
error = MPX_DISABLE_MANAGEMENT(me);
break;
default:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 137c7f69b264..88ea2d6e0031 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1248,7 +1248,6 @@ static struct ctl_table vm_table[] = {
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = hugetlb_sysctl_handler,
- .extra1 = &zero,
},
#ifdef CONFIG_NUMA
{
@@ -1257,7 +1256,6 @@ static struct ctl_table vm_table[] = {
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &hugetlb_mempolicy_sysctl_handler,
- .extra1 = &zero,
},
#endif
{
@@ -1280,7 +1278,6 @@ static struct ctl_table vm_table[] = {
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = hugetlb_overcommit_handler,
- .extra1 = &zero,
},
#endif
{
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 670fff88a961..21f82c29c914 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -111,13 +111,8 @@ static int send_reply(struct sk_buff *skb, struct genl_info *info)
{
struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
void *reply = genlmsg_data(genlhdr);
- int rc;
- rc = genlmsg_end(skb, reply);
- if (rc < 0) {
- nlmsg_free(skb);
- return rc;
- }
+ genlmsg_end(skb, reply);
return genlmsg_reply(skb, info);
}
@@ -134,11 +129,7 @@ static void send_cpu_listeners(struct sk_buff *skb,
void *reply = genlmsg_data(genlhdr);
int rc, delcount = 0;
- rc = genlmsg_end(skb, reply);
- if (rc < 0) {
- nlmsg_free(skb);
- return;
- }
+ genlmsg_end(skb, reply);
rc = 0;
down_read(&listeners->sem);
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index f622cf28628a..c09c07817d7a 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,6 +1,6 @@
obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
-obj-y += timeconv.o posix-clock.o alarmtimer.o
+obj-y += timeconv.o timecounter.o posix-clock.o alarmtimer.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index a7077d3ae52f..1b001ed1edb9 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -788,7 +788,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
goto out;
}
- restart = &current_thread_info()->restart_block;
+ restart = &current->restart_block;
restart->fn = alarm_timer_nsleep_restart;
restart->nanosleep.clockid = type;
restart->nanosleep.expires = exp.tv64;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index b79f39bda7e1..4892352f0e49 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -34,82 +34,6 @@
#include "tick-internal.h"
#include "timekeeping_internal.h"
-void timecounter_init(struct timecounter *tc,
- const struct cyclecounter *cc,
- u64 start_tstamp)
-{
- tc->cc = cc;
- tc->cycle_last = cc->read(cc);
- tc->nsec = start_tstamp;
-}
-EXPORT_SYMBOL_GPL(timecounter_init);
-
-/**
- * timecounter_read_delta - get nanoseconds since last call of this function
- * @tc: Pointer to time counter
- *
- * When the underlying cycle counter runs over, this will be handled
- * correctly as long as it does not run over more than once between
- * calls.
- *
- * The first call to this function for a new time counter initializes
- * the time tracking and returns an undefined result.
- */
-static u64 timecounter_read_delta(struct timecounter *tc)
-{
- cycle_t cycle_now, cycle_delta;
- u64 ns_offset;
-
- /* read cycle counter: */
- cycle_now = tc->cc->read(tc->cc);
-
- /* calculate the delta since the last timecounter_read_delta(): */
- cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
-
- /* convert to nanoseconds: */
- ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
-
- /* update time stamp of timecounter_read_delta() call: */
- tc->cycle_last = cycle_now;
-
- return ns_offset;
-}
-
-u64 timecounter_read(struct timecounter *tc)
-{
- u64 nsec;
-
- /* increment time by nanoseconds since last call */
- nsec = timecounter_read_delta(tc);
- nsec += tc->nsec;
- tc->nsec = nsec;
-
- return nsec;
-}
-EXPORT_SYMBOL_GPL(timecounter_read);
-
-u64 timecounter_cyc2time(struct timecounter *tc,
- cycle_t cycle_tstamp)
-{
- u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
- u64 nsec;
-
- /*
- * Instead of always treating cycle_tstamp as more recent
- * than tc->cycle_last, detect when it is too far in the
- * future and treat it as old time stamp instead.
- */
- if (cycle_delta > tc->cc->mask / 2) {
- cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
- nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
- } else {
- nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
- }
-
- return nsec;
-}
-EXPORT_SYMBOL_GPL(timecounter_cyc2time);
-
/**
* clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
* @mult: pointer to mult variable
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 37e50aadd471..bee0c1f78091 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -122,7 +122,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
boot = ktime_add(mono, off_boot);
xtim = ktime_add(mono, off_real);
- tai = ktime_add(xtim, off_tai);
+ tai = ktime_add(mono, off_tai);
base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
@@ -266,7 +266,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
/*
* Divide a ktime value by a nanosecond value
*/
-u64 ktime_divns(const ktime_t kt, s64 div)
+u64 __ktime_divns(const ktime_t kt, s64 div)
{
u64 dclc;
int sft = 0;
@@ -282,7 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div)
return dclc;
}
-EXPORT_SYMBOL_GPL(ktime_divns);
+EXPORT_SYMBOL_GPL(__ktime_divns);
#endif /* BITS_PER_LONG >= 64 */
/*
@@ -440,6 +440,37 @@ static inline void debug_deactivate(struct hrtimer *timer)
trace_hrtimer_cancel(timer);
}
+#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+{
+ struct hrtimer_clock_base *base = cpu_base->clock_base;
+ ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
+ int i;
+
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
+ struct timerqueue_node *next;
+ struct hrtimer *timer;
+
+ next = timerqueue_getnext(&base->active);
+ if (!next)
+ continue;
+
+ timer = container_of(next, struct hrtimer, node);
+ expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+ if (expires.tv64 < expires_next.tv64)
+ expires_next = expires;
+ }
+ /*
+ * clock_was_set() might have changed base->offset of any of
+ * the clock bases so the result might be negative. Fix it up
+ * to prevent a false positive in clockevents_program_event().
+ */
+ if (expires_next.tv64 < 0)
+ expires_next.tv64 = 0;
+ return expires_next;
+}
+#endif
+
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -488,32 +519,7 @@ static inline int hrtimer_hres_active(void)
static void
hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
- int i;
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- ktime_t expires, expires_next;
-
- expires_next.tv64 = KTIME_MAX;
-
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
- struct hrtimer *timer;
- struct timerqueue_node *next;
-
- next = timerqueue_getnext(&base->active);
- if (!next)
- continue;
- timer = container_of(next, struct hrtimer, node);
-
- expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
- /*
- * clock_was_set() has changed base->offset so the
- * result might be negative. Fix it up to prevent a
- * false positive in clockevents_program_event()
- */
- if (expires.tv64 < 0)
- expires.tv64 = 0;
- if (expires.tv64 < expires_next.tv64)
- expires_next = expires;
- }
+ ktime_t expires_next = __hrtimer_get_next_event(cpu_base);
if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
return;
@@ -587,6 +593,15 @@ static int hrtimer_reprogram(struct hrtimer *timer,
return 0;
/*
+ * When the target cpu of the timer is currently executing
+ * hrtimer_interrupt(), then we do not touch the clock event
+ * device. hrtimer_interrupt() will reevaluate all clock bases
+ * before reprogramming the device.
+ */
+ if (cpu_base->in_hrtirq)
+ return 0;
+
+ /*
* If a hang was detected in the last timer interrupt then we
* do not schedule a timer which is earlier than the expiry
* which we enforced in the hang detection. We want the system
@@ -1104,29 +1119,14 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
ktime_t hrtimer_get_next_event(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
+ ktime_t mindelta = { .tv64 = KTIME_MAX };
unsigned long flags;
- int i;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
- if (!hrtimer_hres_active()) {
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
- struct hrtimer *timer;
- struct timerqueue_node *next;
-
- next = timerqueue_getnext(&base->active);
- if (!next)
- continue;
-
- timer = container_of(next, struct hrtimer, node);
- delta.tv64 = hrtimer_get_expires_tv64(timer);
- delta = ktime_sub(delta, base->get_time());
- if (delta.tv64 < mindelta.tv64)
- mindelta.tv64 = delta.tv64;
- }
- }
+ if (!hrtimer_hres_active())
+ mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base),
+ ktime_get());
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
@@ -1253,7 +1253,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
raw_spin_lock(&cpu_base->lock);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
- expires_next.tv64 = KTIME_MAX;
+ cpu_base->in_hrtirq = 1;
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
@@ -1291,28 +1291,20 @@ retry:
* are right-of a not yet expired timer, because that
* timer will have to trigger a wakeup anyway.
*/
-
- if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
- ktime_t expires;
-
- expires = ktime_sub(hrtimer_get_expires(timer),
- base->offset);
- if (expires.tv64 < 0)
- expires.tv64 = KTIME_MAX;
- if (expires.tv64 < expires_next.tv64)
- expires_next = expires;
+ if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
break;
- }
__run_hrtimer(timer, &basenow);
}
}
-
+ /* Reevaluate the clock bases for the next expiry */
+ expires_next = __hrtimer_get_next_event(cpu_base);
/*
* Store the new expiry value so the migration code can verify
* against it.
*/
cpu_base->expires_next = expires_next;
+ cpu_base->in_hrtirq = 0;
raw_spin_unlock(&cpu_base->lock);
/* Reprogramming necessary ? */
@@ -1591,7 +1583,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
goto out;
}
- restart = &current_thread_info()->restart_block;
+ restart = &current->restart_block;
restart->fn = hrtimer_nanosleep_restart;
restart->nanosleep.clockid = t.timer.base->clockid;
restart->nanosleep.rmtp = rmtp;
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 87a346fd6d61..4b585e0fdd22 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -488,13 +488,13 @@ static void sync_cmos_clock(struct work_struct *work)
getnstimeofday64(&now);
if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
- struct timespec adjust = timespec64_to_timespec(now);
+ struct timespec64 adjust = now;
fail = -ENODEV;
if (persistent_clock_is_local)
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
#ifdef CONFIG_GENERIC_CMOS_UPDATE
- fail = update_persistent_clock(adjust);
+ fail = update_persistent_clock(timespec64_to_timespec(adjust));
#endif
#ifdef CONFIG_RTC_SYSTOHC
if (fail == -ENODEV)
@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
return -EPERM;
+ if (txc->modes & ADJ_FREQUENCY) {
+ if (LONG_MIN / PPM_SCALE > txc->freq)
+ return -EINVAL;
+ if (LONG_MAX / PPM_SCALE < txc->freq)
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index a16b67859e2a..0075da74abf0 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1334,8 +1334,7 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct timespec __user *rmtp)
{
- struct restart_block *restart_block =
- &current_thread_info()->restart_block;
+ struct restart_block *restart_block = &current->restart_block;
struct itimerspec it;
int error;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1363d58f07e9..a4c4edac4528 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -326,13 +326,6 @@ static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-/*
- * Worst case string length in chunks of CPU range seems 2 steps
- * separations: 0,2,4,6,...
- * This is NR_CPUS + sizeof('\0')
- */
-static char __initdata nohz_full_buf[NR_CPUS + 1];
-
static int tick_nohz_init_all(void)
{
int err = -1;
@@ -393,8 +386,8 @@ void __init tick_nohz_init(void)
context_tracking_cpu_set(cpu);
cpu_notifier(tick_nohz_cpu_down_callback, 0);
- cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), tick_nohz_full_mask);
- pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
+ pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
+ cpumask_pr_args(tick_nohz_full_mask));
}
#endif
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 6390517e77d4..2c85b7724af4 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
if (tv) {
if (copy_from_user(&user_tv, tv, sizeof(*tv)))
return -EFAULT;
+
+ if (!timeval_valid(&user_tv))
+ return -EINVAL;
+
new_ts.tv_sec = user_tv.tv_sec;
new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
}
diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c
new file mode 100644
index 000000000000..4687b3104bae
--- /dev/null
+++ b/kernel/time/timecounter.c
@@ -0,0 +1,112 @@
+/*
+ * linux/kernel/time/timecounter.c
+ *
+ * based on code that migrated away from
+ * linux/kernel/time/clocksource.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/timecounter.h>
+
+void timecounter_init(struct timecounter *tc,
+ const struct cyclecounter *cc,
+ u64 start_tstamp)
+{
+ tc->cc = cc;
+ tc->cycle_last = cc->read(cc);
+ tc->nsec = start_tstamp;
+ tc->mask = (1ULL << cc->shift) - 1;
+ tc->frac = 0;
+}
+EXPORT_SYMBOL_GPL(timecounter_init);
+
+/**
+ * timecounter_read_delta - get nanoseconds since last call of this function
+ * @tc: Pointer to time counter
+ *
+ * When the underlying cycle counter runs over, this will be handled
+ * correctly as long as it does not run over more than once between
+ * calls.
+ *
+ * The first call to this function for a new time counter initializes
+ * the time tracking and returns an undefined result.
+ */
+static u64 timecounter_read_delta(struct timecounter *tc)
+{
+ cycle_t cycle_now, cycle_delta;
+ u64 ns_offset;
+
+ /* read cycle counter: */
+ cycle_now = tc->cc->read(tc->cc);
+
+ /* calculate the delta since the last timecounter_read_delta(): */
+ cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
+
+ /* convert to nanoseconds: */
+ ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta,
+ tc->mask, &tc->frac);
+
+ /* update time stamp of timecounter_read_delta() call: */
+ tc->cycle_last = cycle_now;
+
+ return ns_offset;
+}
+
+u64 timecounter_read(struct timecounter *tc)
+{
+ u64 nsec;
+
+ /* increment time by nanoseconds since last call */
+ nsec = timecounter_read_delta(tc);
+ nsec += tc->nsec;
+ tc->nsec = nsec;
+
+ return nsec;
+}
+EXPORT_SYMBOL_GPL(timecounter_read);
+
+/*
+ * This is like cyclecounter_cyc2ns(), but it is used for computing a
+ * time previous to the time stored in the cycle counter.
+ */
+static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
+ cycle_t cycles, u64 mask, u64 frac)
+{
+ u64 ns = (u64) cycles;
+
+ ns = ((ns * cc->mult) - frac) >> cc->shift;
+
+ return ns;
+}
+
+u64 timecounter_cyc2time(struct timecounter *tc,
+ cycle_t cycle_tstamp)
+{
+ u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
+ u64 nsec = tc->nsec, frac = tc->frac;
+
+ /*
+ * Instead of always treating cycle_tstamp as more recent
+ * than tc->cycle_last, detect when it is too far in the
+ * future and treat it as old time stamp instead.
+ */
+ if (delta > tc->cc->mask / 2) {
+ delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
+ nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac);
+ } else {
+ nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac);
+ }
+
+ return nsec;
+}
+EXPORT_SYMBOL_GPL(timecounter_cyc2time);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6a931852082f..b124af259800 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1659,24 +1659,24 @@ out:
}
/**
- * getboottime - Return the real time of system boot.
- * @ts: pointer to the timespec to be set
+ * getboottime64 - Return the real time of system boot.
+ * @ts: pointer to the timespec64 to be set
*
- * Returns the wall-time of boot in a timespec.
+ * Returns the wall-time of boot in a timespec64.
*
* This is based on the wall_to_monotonic offset and the total suspend
* time. Calls to settimeofday will affect the value returned (which
* basically means that however wrong your real time clock is at boot time,
* you get the right time here).
*/
-void getboottime(struct timespec *ts)
+void getboottime64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
- *ts = ktime_to_timespec(t);
+ *ts = ktime_to_timespec64(t);
}
-EXPORT_SYMBOL_GPL(getboottime);
+EXPORT_SYMBOL_GPL(getboottime64);
unsigned long get_seconds(void)
{
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 979ccde26720..98f26588255e 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -3,11 +3,11 @@
ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
+KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
ifdef CONFIG_FTRACE_SELFTEST
# selftest needs instrumentation
-CFLAGS_trace_selftest_dynamic.o = -pg
+CFLAGS_trace_selftest_dynamic.o = $(CC_FLAGS_FTRACE)
obj-y += trace_selftest_dynamic.o
endif
endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 929a733d302e..45e5cb143d17 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command)
}
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
- struct ftrace_hash *old_hash)
+ struct ftrace_ops_hash *old_hash)
{
ops->flags |= FTRACE_OPS_FL_MODIFYING;
- ops->old_hash.filter_hash = old_hash;
+ ops->old_hash.filter_hash = old_hash->filter_hash;
+ ops->old_hash.notrace_hash = old_hash->notrace_hash;
ftrace_run_update_code(command);
ops->old_hash.filter_hash = NULL;
+ ops->old_hash.notrace_hash = NULL;
ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
}
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
static int ftrace_probe_registered;
-static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
+static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
{
int ret;
int i;
@@ -3637,6 +3639,7 @@ int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data)
{
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_probe *entry;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
struct ftrace_hash *old_hash = *orig_hash;
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+ old_hash_ops.filter_hash = old_hash;
+ /* Probes only have filters */
+ old_hash_ops.notrace_hash = NULL;
+
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
if (!hash) {
count = -ENOMEM;
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
- __enable_ftrace_function_probe(old_hash);
+ __enable_ftrace_function_probe(&old_hash_ops);
if (!ret)
free_ftrace_hash_rcu(old_hash);
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
}
static void ftrace_ops_update_code(struct ftrace_ops *ops,
- struct ftrace_hash *old_hash)
+ struct ftrace_ops_hash *old_hash)
{
- if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+ struct ftrace_ops *op;
+
+ if (!ftrace_enabled)
+ return;
+
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
+ return;
+ }
+
+ /*
+ * If this is the shared global_ops filter, then we need to
+ * check if there is another ops that shares it, is enabled.
+ * If so, we still need to run the modify code.
+ */
+ if (ops->func_hash != &global_ops.local_hash)
+ return;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (op->func_hash == &global_ops.local_hash &&
+ op->flags & FTRACE_OPS_FL_ENABLED) {
+ ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
+ /* Only need to do this once */
+ return;
+ }
+ } while_for_each_ftrace_op(op);
}
static int
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
{
struct ftrace_hash **orig_hash;
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_hash *old_hash;
struct ftrace_hash *hash;
int ret;
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
+ old_hash_ops.filter_hash = ops->func_hash->filter_hash;
+ old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
if (!ret) {
- ftrace_ops_update_code(ops, old_hash);
+ ftrace_ops_update_code(ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock);
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void)
int ftrace_regex_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_iterator *iter;
struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
+ old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
+ old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
if (!ret) {
- ftrace_ops_update_code(iter->ops, old_hash);
+ ftrace_ops_update_code(iter->ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock);
@@ -5419,7 +5456,7 @@ static __init int ftrace_init_debugfs(void)
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
- if (!d_tracer)
+ if (IS_ERR(d_tracer))
return 0;
ftrace_init_dyn_debugfs(d_tracer);
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 1c71382b283d..eb4220a132ec 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -13,5 +13,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/power.h>
+EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7a4104cb95cb..5040d44fe5a3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -9,7 +9,6 @@
#include <linux/trace_seq.h>
#include <linux/spinlock.h>
#include <linux/irq_work.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kthread.h> /* for self test */
@@ -23,7 +22,6 @@
#include <linux/hash.h>
#include <linux/list.h>
#include <linux/cpu.h>
-#include <linux/fs.h>
#include <asm/local.h>
@@ -447,7 +445,10 @@ int ring_buffer_print_page_header(struct trace_seq *s)
struct rb_irq_work {
struct irq_work work;
wait_queue_head_t waiters;
+ wait_queue_head_t full_waiters;
bool waiters_pending;
+ bool full_waiters_pending;
+ bool wakeup_full;
};
/*
@@ -529,6 +530,10 @@ static void rb_wake_up_waiters(struct irq_work *work)
struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
wake_up_all(&rbwork->waiters);
+ if (rbwork->wakeup_full) {
+ rbwork->wakeup_full = false;
+ wake_up_all(&rbwork->full_waiters);
+ }
}
/**
@@ -553,9 +558,11 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
* data in any cpu buffer, or a specific buffer, put the
* caller on the appropriate wait queue.
*/
- if (cpu == RING_BUFFER_ALL_CPUS)
+ if (cpu == RING_BUFFER_ALL_CPUS) {
work = &buffer->irq_work;
- else {
+ /* Full only makes sense on per cpu reads */
+ full = false;
+ } else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -ENODEV;
cpu_buffer = buffer->buffers[cpu];
@@ -564,7 +571,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
while (true) {
- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+ if (full)
+ prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
+ else
+ prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
/*
* The events can happen in critical sections where
@@ -586,7 +596,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
* that is necessary is that the wake up happens after
* a task has been queued. It's OK for spurious wake ups.
*/
- work->waiters_pending = true;
+ if (full)
+ work->full_waiters_pending = true;
+ else
+ work->waiters_pending = true;
if (signal_pending(current)) {
ret = -EINTR;
@@ -615,7 +628,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
schedule();
}
- finish_wait(&work->waiters, &wait);
+ if (full)
+ finish_wait(&work->full_waiters, &wait);
+ else
+ finish_wait(&work->waiters, &wait);
return ret;
}
@@ -1230,6 +1246,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
init_completion(&cpu_buffer->update_done);
init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&cpu_buffer->irq_work.waiters);
+ init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
@@ -2801,6 +2818,8 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
+ bool pagebusy;
+
if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
@@ -2812,6 +2831,15 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&cpu_buffer->irq_work.work);
}
+
+ pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+
+ if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
+ cpu_buffer->irq_work.wakeup_full = true;
+ cpu_buffer->irq_work.full_waiters_pending = false;
+ /* irq_work_queue() supplies it's own memory barriers */
+ irq_work_queue(&cpu_buffer->irq_work.work);
+ }
}
/**
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 3f9e328c30b5..13d945c0d03f 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -7,7 +7,7 @@
#include <linux/completion.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/time.h>
+#include <linux/ktime.h>
#include <asm/local.h>
struct rb_page {
@@ -17,7 +17,7 @@ struct rb_page {
};
/* run time and sleep time in seconds */
-#define RUN_TIME 10
+#define RUN_TIME 10ULL
#define SLEEP_TIME 10
/* number of events for writer to wake up the reader */
@@ -212,8 +212,7 @@ static void ring_buffer_consumer(void)
static void ring_buffer_producer(void)
{
- struct timeval start_tv;
- struct timeval end_tv;
+ ktime_t start_time, end_time, timeout;
unsigned long long time;
unsigned long long entries;
unsigned long long overruns;
@@ -227,7 +226,8 @@ static void ring_buffer_producer(void)
* make the system stall)
*/
trace_printk("Starting ring buffer hammer\n");
- do_gettimeofday(&start_tv);
+ start_time = ktime_get();
+ timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
do {
struct ring_buffer_event *event;
int *entry;
@@ -244,7 +244,7 @@ static void ring_buffer_producer(void)
ring_buffer_unlock_commit(buffer, event);
}
}
- do_gettimeofday(&end_tv);
+ end_time = ktime_get();
cnt++;
if (consumer && !(cnt % wakeup_interval))
@@ -264,7 +264,7 @@ static void ring_buffer_producer(void)
cond_resched();
#endif
- } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
+ } while (ktime_before(end_time, timeout) && !kill_test);
trace_printk("End ring buffer hammer\n");
if (consumer) {
@@ -280,9 +280,7 @@ static void ring_buffer_producer(void)
wait_for_completion(&read_done);
}
- time = end_tv.tv_sec - start_tv.tv_sec;
- time *= USEC_PER_SEC;
- time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec);
+ time = ktime_us_delta(end_time, start_time);
entries = ring_buffer_entries(buffer);
overruns = ring_buffer_overruns(buffer);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2e767972e99c..62c6506d663f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2036,7 +2036,8 @@ void trace_printk_init_buffers(void)
/* trace_printk() is for debug use only. Don't use it in production. */
- pr_warning("\n**********************************************************\n");
+ pr_warning("\n");
+ pr_warning("**********************************************************\n");
pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warning("** **\n");
pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
@@ -3352,12 +3353,12 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
mutex_lock(&tracing_cpumask_update_lock);
- len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
- if (count - len < 2) {
+ len = snprintf(mask_str, count, "%*pb\n",
+ cpumask_pr_args(tr->tracing_cpumask));
+ if (len >= count) {
count = -EINVAL;
goto out_err;
}
- len += sprintf(mask_str + len, "\n");
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
out_err:
@@ -4140,6 +4141,12 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
goto out;
}
+ /* If trace pipe files are being read, we can't change the tracer */
+ if (tr->current_trace->ref) {
+ ret = -EBUSY;
+ goto out;
+ }
+
trace_branch_disable();
tr->current_trace->enabled--;
@@ -4326,17 +4333,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
}
trace_seq_init(&iter->seq);
-
- /*
- * We make a copy of the current tracer to avoid concurrent
- * changes on it while we are reading.
- */
- iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
- if (!iter->trace) {
- ret = -ENOMEM;
- goto fail;
- }
- *iter->trace = *tr->current_trace;
+ iter->trace = tr->current_trace;
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
ret = -ENOMEM;
@@ -4363,6 +4360,8 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter->trace->pipe_open(iter);
nonseekable_open(inode, filp);
+
+ tr->current_trace->ref++;
out:
mutex_unlock(&trace_types_lock);
return ret;
@@ -4382,6 +4381,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
mutex_lock(&trace_types_lock);
+ tr->current_trace->ref--;
+
if (iter->trace->pipe_close)
iter->trace->pipe_close(iter);
@@ -4389,7 +4390,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
free_cpumask_var(iter->started);
mutex_destroy(&iter->mutex);
- kfree(iter->trace);
kfree(iter);
trace_array_put(tr);
@@ -4422,7 +4422,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
return trace_poll(iter, filp, poll_table);
}
-/* Must be called with trace_types_lock mutex held. */
+/* Must be called with iter->mutex held. */
static int tracing_wait_pipe(struct file *filp)
{
struct trace_iterator *iter = filp->private_data;
@@ -4467,7 +4467,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_iterator *iter = filp->private_data;
- struct trace_array *tr = iter->tr;
ssize_t sret;
/* return any leftover data */
@@ -4477,12 +4476,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
trace_seq_init(&iter->seq);
- /* copy the tracer to avoid using a global lock all around */
- mutex_lock(&trace_types_lock);
- if (unlikely(iter->trace->name != tr->current_trace->name))
- *iter->trace = *tr->current_trace;
- mutex_unlock(&trace_types_lock);
-
/*
* Avoid more than one consumer on a single file descriptor
* This is just a matter of traces coherency, the ring buffer itself
@@ -4642,7 +4635,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
.ops = &tracing_pipe_buf_ops,
.spd_release = tracing_spd_release_pipe,
};
- struct trace_array *tr = iter->tr;
ssize_t ret;
size_t rem;
unsigned int i;
@@ -4650,12 +4642,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
- /* copy the tracer to avoid using a global lock all around */
- mutex_lock(&trace_types_lock);
- if (unlikely(iter->trace->name != tr->current_trace->name))
- *iter->trace = *tr->current_trace;
- mutex_unlock(&trace_types_lock);
-
mutex_lock(&iter->mutex);
if (iter->trace->splice_read) {
@@ -4942,7 +4928,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
*fpos += written;
out_unlock:
- for (i = 0; i < nr_pages; i++){
+ for (i = nr_pages - 1; i >= 0; i--) {
kunmap_atomic(map_page[i]);
put_page(pages[i]);
}
@@ -5331,6 +5317,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
filp->private_data = info;
+ tr->current_trace->ref++;
+
mutex_unlock(&trace_types_lock);
ret = nonseekable_open(inode, filp);
@@ -5361,21 +5349,16 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (!count)
return 0;
- mutex_lock(&trace_types_lock);
-
#ifdef CONFIG_TRACER_MAX_TRACE
- if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
- size = -EBUSY;
- goto out_unlock;
- }
+ if (iter->snapshot && iter->tr->current_trace->use_max_tr)
+ return -EBUSY;
#endif
if (!info->spare)
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
iter->cpu_file);
- size = -ENOMEM;
if (!info->spare)
- goto out_unlock;
+ return -ENOMEM;
/* Do we have previous read data to read? */
if (info->read < PAGE_SIZE)
@@ -5391,21 +5374,16 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (ret < 0) {
if (trace_empty(iter)) {
- if ((filp->f_flags & O_NONBLOCK)) {
- size = -EAGAIN;
- goto out_unlock;
- }
- mutex_unlock(&trace_types_lock);
+ if ((filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
ret = wait_on_pipe(iter, false);
- mutex_lock(&trace_types_lock);
- if (ret) {
- size = ret;
- goto out_unlock;
- }
+ if (ret)
+ return ret;
+
goto again;
}
- size = 0;
- goto out_unlock;
+ return 0;
}
info->read = 0;
@@ -5415,18 +5393,14 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
size = count;
ret = copy_to_user(ubuf, info->spare + info->read, size);
- if (ret == size) {
- size = -EFAULT;
- goto out_unlock;
- }
+ if (ret == size)
+ return -EFAULT;
+
size -= ret;
*ppos += size;
info->read += size;
- out_unlock:
- mutex_unlock(&trace_types_lock);
-
return size;
}
@@ -5437,6 +5411,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
mutex_lock(&trace_types_lock);
+ iter->tr->current_trace->ref--;
+
__trace_array_put(iter->tr);
if (info->spare)
@@ -5522,30 +5498,20 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
int entries, size, i;
ssize_t ret = 0;
- mutex_lock(&trace_types_lock);
-
#ifdef CONFIG_TRACER_MAX_TRACE
- if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
- ret = -EBUSY;
- goto out;
- }
+ if (iter->snapshot && iter->tr->current_trace->use_max_tr)
+ return -EBUSY;
#endif
- if (splice_grow_spd(pipe, &spd)) {
- ret = -ENOMEM;
- goto out;
- }
+ if (splice_grow_spd(pipe, &spd))
+ return -ENOMEM;
- if (*ppos & (PAGE_SIZE - 1)) {
- ret = -EINVAL;
- goto out;
- }
+ if (*ppos & (PAGE_SIZE - 1))
+ return -EINVAL;
if (len & (PAGE_SIZE - 1)) {
- if (len < PAGE_SIZE) {
- ret = -EINVAL;
- goto out;
- }
+ if (len < PAGE_SIZE)
+ return -EINVAL;
len &= PAGE_MASK;
}
@@ -5606,25 +5572,20 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
/* did we read anything? */
if (!spd.nr_pages) {
if (ret)
- goto out;
+ return ret;
+
+ if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
+ return -EAGAIN;
- if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
- ret = -EAGAIN;
- goto out;
- }
- mutex_unlock(&trace_types_lock);
ret = wait_on_pipe(iter, true);
- mutex_lock(&trace_types_lock);
if (ret)
- goto out;
+ return ret;
goto again;
}
ret = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd);
-out:
- mutex_unlock(&trace_types_lock);
return ret;
}
@@ -5854,28 +5815,11 @@ static __init int register_snapshot_cmd(void)
static inline __init int register_snapshot_cmd(void) { return 0; }
#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
-struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
+static struct dentry *tracing_get_dentry(struct trace_array *tr)
{
- if (tr->dir)
- return tr->dir;
-
- if (!debugfs_initialized())
- return NULL;
-
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
- tr->dir = debugfs_create_dir("tracing", NULL);
-
- if (!tr->dir)
- pr_warn_once("Could not create debugfs directory 'tracing'\n");
-
return tr->dir;
}
-struct dentry *tracing_init_dentry(void)
-{
- return tracing_init_dentry_tr(&global_trace);
-}
-
static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
{
struct dentry *d_tracer;
@@ -5883,8 +5827,8 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
if (tr->percpu_dir)
return tr->percpu_dir;
- d_tracer = tracing_init_dentry_tr(tr);
- if (!d_tracer)
+ d_tracer = tracing_get_dentry(tr);
+ if (IS_ERR(d_tracer))
return NULL;
tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
@@ -6086,8 +6030,8 @@ static struct dentry *trace_options_init_dentry(struct trace_array *tr)
if (tr->options)
return tr->options;
- d_tracer = tracing_init_dentry_tr(tr);
- if (!d_tracer)
+ d_tracer = tracing_get_dentry(tr);
+ if (IS_ERR(d_tracer))
return NULL;
tr->options = debugfs_create_dir("options", d_tracer);
@@ -6416,7 +6360,7 @@ static int instance_delete(const char *name)
goto out_unlock;
ret = -EBUSY;
- if (tr->ref)
+ if (tr->ref || (tr->current_trace && tr->current_trace->ref))
goto out_unlock;
list_del(&tr->list);
@@ -6571,6 +6515,33 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
}
+/**
+ * tracing_init_dentry - initialize top level trace array
+ *
+ * This is called when creating files or directories in the tracing
+ * directory. It is called via fs_initcall() by any of the boot up code
+ * and expects to return the dentry of the top level tracing directory.
+ */
+struct dentry *tracing_init_dentry(void)
+{
+ struct trace_array *tr = &global_trace;
+
+ if (tr->dir)
+ return tr->dir;
+
+ if (WARN_ON(!debugfs_initialized()))
+ return ERR_PTR(-ENODEV);
+
+ tr->dir = debugfs_create_dir("tracing", NULL);
+
+ if (!tr->dir) {
+ pr_warn_once("Could not create debugfs directory 'tracing'\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return tr->dir;
+}
+
static __init int tracer_init_debugfs(void)
{
struct dentry *d_tracer;
@@ -6578,7 +6549,7 @@ static __init int tracer_init_debugfs(void)
trace_access_lock_init();
d_tracer = tracing_init_dentry();
- if (!d_tracer)
+ if (IS_ERR(d_tracer))
return 0;
init_tracer_debugfs(&global_trace, d_tracer);
@@ -6811,7 +6782,6 @@ __init static int tracer_alloc_buffers(void)
int ring_buf_size;
int ret = -ENOMEM;
-
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
goto out;
@@ -6918,7 +6888,6 @@ void __init trace_init(void)
tracepoint_printk = 0;
}
tracer_alloc_buffers();
- init_ftrace_syscalls();
trace_event_init();
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8de48bac1ce2..dd8205a35760 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -388,6 +388,7 @@ struct tracer {
struct tracer *next;
struct tracer_flags *flags;
int enabled;
+ int ref;
bool print_max;
bool allow_instances;
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -541,7 +542,6 @@ struct dentry *trace_create_file(const char *name,
void *data,
const struct file_operations *fops);
-struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
struct dentry *tracing_init_dentry(void);
struct ring_buffer_event;
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 7d6e2afde669..57cbf1efdd44 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -7,7 +7,6 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/irqflags.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 4b9c114ee9de..6fa484de2ba1 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -261,7 +261,7 @@ void perf_trace_del(struct perf_event *p_event, int flags)
}
void *perf_trace_buf_prepare(int size, unsigned short type,
- struct pt_regs *regs, int *rctxp)
+ struct pt_regs **regs, int *rctxp)
{
struct trace_entry *entry;
unsigned long flags;
@@ -280,6 +280,8 @@ void *perf_trace_buf_prepare(int size, unsigned short type,
if (*rctxp < 0)
return NULL;
+ if (regs)
+ *regs = this_cpu_ptr(&__perf_regs[*rctxp]);
raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
/* zero the dead bytes from align to not leak stack to user */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 366a78a3e61e..db54dda10ccc 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void)
return 0;
}
+static __init void
+early_enable_events(struct trace_array *tr, bool disable_first)
+{
+ char *buf = bootup_event_buf;
+ char *token;
+ int ret;
+
+ while (true) {
+ token = strsep(&buf, ",");
+
+ if (!token)
+ break;
+ if (!*token)
+ continue;
+
+ /* Restarting syscalls requires that we stop them first */
+ if (disable_first)
+ ftrace_set_clr_event(tr, token, 0);
+
+ ret = ftrace_set_clr_event(tr, token, 1);
+ if (ret)
+ pr_warn("Failed to enable trace event: %s\n", token);
+
+ /* Put back the comma to allow this to be called again */
+ if (buf)
+ *(buf - 1) = ',';
+ }
+}
+
static __init int event_trace_enable(void)
{
struct trace_array *tr = top_trace_array();
struct ftrace_event_call **iter, *call;
- char *buf = bootup_event_buf;
- char *token;
int ret;
if (!tr)
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void)
*/
__trace_early_add_events(tr);
- while (true) {
- token = strsep(&buf, ",");
-
- if (!token)
- break;
- if (!*token)
- continue;
-
- ret = ftrace_set_clr_event(tr, token, 1);
- if (ret)
- pr_warn("Failed to enable trace event: %s\n", token);
- }
+ early_enable_events(tr, false);
trace_printk_start_comm();
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void)
return 0;
}
+/*
+ * event_trace_enable() is called from trace_event_init() first to
+ * initialize events and perhaps start any events that are on the
+ * command line. Unfortunately, there are some events that will not
+ * start this early, like the system call tracepoints that need
+ * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
+ * is called before pid 1 starts, and this flag is never set, making
+ * the syscall tracepoint never get reached, but the event is enabled
+ * regardless (and not doing anything).
+ */
+static __init int event_trace_enable_again(void)
+{
+ struct trace_array *tr;
+
+ tr = top_trace_array();
+ if (!tr)
+ return -ENODEV;
+
+ early_enable_events(tr, true);
+
+ return 0;
+}
+
+early_initcall(event_trace_enable_again);
+
static __init int event_trace_init(void)
{
struct trace_array *tr;
@@ -2490,7 +2531,7 @@ static __init int event_trace_init(void)
return -ENODEV;
d_tracer = tracing_init_dentry();
- if (!d_tracer)
+ if (IS_ERR(d_tracer))
return 0;
entry = debugfs_create_file("available_events", 0444, d_tracer,
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index d4ddde28a81a..12e2b99be862 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -6,12 +6,10 @@
#include <linux/stringify.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/fs.h>
#include "trace_output.h"
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index ba476009e5de..2d25ad1526bb 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1437,7 +1437,7 @@ static __init int init_graph_debugfs(void)
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
- if (!d_tracer)
+ if (IS_ERR(d_tracer))
return 0;
trace_create_file("max_graph_depth", 0644, d_tracer,
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 9bb104f748d0..8523ea345f2b 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -10,11 +10,9 @@
* Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/kallsyms.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
-#include <linux/fs.h>
#include "trace.h"
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5edb518be345..d73f565b4e06 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1148,7 +1148,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
+ entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry)
return;
@@ -1179,7 +1179,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
+ entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry)
return;
@@ -1320,7 +1320,7 @@ static __init int init_kprobe_trace(void)
return -EINVAL;
d_tracer = tracing_init_dentry();
- if (!d_tracer)
+ if (IS_ERR(d_tracer))
return 0;
entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index fcf0a9e48916..8bb2071474dd 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -6,8 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include "trace.h"
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index b77b9a697619..692bf7184c8c 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -177,6 +177,50 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
}
EXPORT_SYMBOL(ftrace_print_hex_seq);
+const char *
+ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len,
+ size_t el_size)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ const char *prefix = "";
+ void *ptr = (void *)buf;
+
+ trace_seq_putc(p, '{');
+
+ while (ptr < buf + buf_len) {
+ switch (el_size) {
+ case 1:
+ trace_seq_printf(p, "%s0x%x", prefix,
+ *(u8 *)ptr);
+ break;
+ case 2:
+ trace_seq_printf(p, "%s0x%x", prefix,
+ *(u16 *)ptr);
+ break;
+ case 4:
+ trace_seq_printf(p, "%s0x%x", prefix,
+ *(u32 *)ptr);
+ break;
+ case 8:
+ trace_seq_printf(p, "%s0x%llx", prefix,
+ *(u64 *)ptr);
+ break;
+ default:
+ trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
+ *(u8 *)ptr);
+ el_size = 1;
+ }
+ prefix = ",";
+ ptr += el_size;
+ }
+
+ trace_seq_putc(p, '}');
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(ftrace_print_array_seq);
+
int ftrace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *trace_event)
{
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index c4e70b6bd7fa..36c1455b7567 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -5,7 +5,6 @@
*
*/
#include <linux/seq_file.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
@@ -15,7 +14,6 @@
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/fs.h>
#include "trace.h"
@@ -349,7 +347,7 @@ static __init int init_trace_printk_function_export(void)
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
- if (!d_tracer)
+ if (IS_ERR(d_tracer))
return 0;
trace_create_file("printk_formats", 0444, d_tracer,
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 2e293beb186e..419ca37e72c9 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -5,8 +5,6 @@
*
*/
#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 8fb84b362816..d6e1003724e9 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -10,8 +10,6 @@
* Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index f8b45d8792f9..e694c9f9efa4 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -120,7 +120,7 @@ void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
__trace_seq_init(s);
- seq_buf_bitmask(&s->seq, maskp, nmaskbits);
+ seq_buf_printf(&s->seq, "%*pb", nmaskbits, maskp);
if (unlikely(seq_buf_has_overflowed(&s->seq))) {
s->seq.len = save_len;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 16eddb308c33..c3e4fcfddd45 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -7,12 +7,10 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
-#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/init.h>
-#include <linux/fs.h>
#include <asm/setup.h>
@@ -462,7 +460,7 @@ static __init int stack_trace_init(void)
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
- if (!d_tracer)
+ if (IS_ERR(d_tracer))
return 0;
trace_create_file("stack_max_size", 0644, d_tracer,
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 7af67360b330..75e19e86c954 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -276,7 +276,7 @@ static int tracing_stat_init(void)
struct dentry *d_tracing;
d_tracing = tracing_init_dentry();
- if (!d_tracing)
+ if (IS_ERR(d_tracing))
return 0;
stat_dir = debugfs_create_dir("trace_stat", d_tracing);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index c6ee36fcbf90..f97f6e3a676c 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -574,7 +574,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
size -= sizeof(u32);
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
- sys_data->enter_event->event.type, regs, &rctx);
+ sys_data->enter_event->event.type, NULL, &rctx);
if (!rec)
return;
@@ -647,7 +647,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
size -= sizeof(u32);
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
- sys_data->exit_event->event.type, regs, &rctx);
+ sys_data->exit_event->event.type, NULL, &rctx);
if (!rec)
return;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 8520acc34b18..7dc1c8abecd6 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1111,7 +1111,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
if (hlist_empty(head))
goto out;
- entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
+ entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry)
goto out;
@@ -1321,7 +1321,7 @@ static __init int init_uprobe_trace(void)
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
- if (!d_tracer)
+ if (IS_ERR(d_tracer))
return 0;
trace_create_file("uprobe_events", 0644, d_tracer,
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 70bf11815f84..3174bf8e3538 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -154,7 +154,7 @@ static int get_softlockup_thresh(void)
*/
static unsigned long get_timestamp(void)
{
- return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
+ return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
}
static void set_sample_period(void)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6202b08f1933..f28849394791 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool)
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
- *
- * Return:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
*/
-static bool maybe_create_worker(struct worker_pool *pool)
+static void maybe_create_worker(struct worker_pool *pool)
__releases(&pool->lock)
__acquires(&pool->lock)
{
- if (!need_to_create_worker(pool))
- return false;
restart:
spin_unlock_irq(&pool->lock);
@@ -1877,7 +1871,6 @@ restart:
*/
if (need_to_create_worker(pool))
goto restart;
- return true;
}
/**
@@ -1897,16 +1890,14 @@ restart:
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
- * %false if the pool don't need management and the caller can safely start
- * processing works, %true indicates that the function released pool->lock
- * and reacquired it to perform some management function and that the
- * conditions that the caller verified while holding the lock before
- * calling the function might no longer be true.
+ * %false if the pool doesn't need management and the caller can safely
+ * start processing works, %true if management function was performed and
+ * the conditions that the caller verified before calling the function may
+ * no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- bool ret = false;
/*
* Anyone who successfully grabs manager_arb wins the arbitration
@@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker)
* actual management, the pool may stall indefinitely.
*/
if (!mutex_trylock(&pool->manager_arb))
- return ret;
+ return false;
- ret |= maybe_create_worker(pool);
+ maybe_create_worker(pool);
mutex_unlock(&pool->manager_arb);
- return ret;
+ return true;
}
/**
@@ -3092,10 +3083,9 @@ static ssize_t wq_cpumask_show(struct device *dev,
int written;
mutex_lock(&wq->mutex);
- written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask);
+ written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
+ cpumask_pr_args(wq->unbound_attrs->cpumask));
mutex_unlock(&wq->mutex);
-
- written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
return written;
}
diff --git a/lib/Kconfig b/lib/Kconfig
index 54cf309a92a5..cb9758e0ba0c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -13,6 +13,15 @@ config RAID6_PQ
config BITREVERSE
tristate
+config HAVE_ARCH_BITREVERSE
+ bool
+ default n
+ depends on BITREVERSE
+ help
+ This option provides an config for the architecture which have instruction
+ can do bitreverse operation, we use the hardware instruction if the architecture
+ have this capability.
+
config RATIONAL
boolean
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5f2ce616c046..ecb3516f6546 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -636,7 +636,7 @@ config DEBUG_STACKOVERFLOW
depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW
---help---
Say Y here if you want to check for overflows of kernel, IRQ
- and exception stacks (if your archicture uses them). This
+ and exception stacks (if your architecture uses them). This
option will show detailed messages if free stack space drops
below a certain limit.
@@ -651,6 +651,8 @@ config DEBUG_STACKOVERFLOW
source "lib/Kconfig.kmemcheck"
+source "lib/Kconfig.kasan"
+
endmenu # "Memory Debugging"
config DEBUG_SHIRQ
@@ -1215,6 +1217,7 @@ config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
select TORTURE_TEST
+ select SRCU
default n
help
This option provides a kernel module that runs torture tests
@@ -1257,7 +1260,7 @@ config RCU_CPU_STALL_TIMEOUT
config RCU_CPU_STALL_INFO
bool "Print additional diagnostics on RCU CPU stall"
depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
- default n
+ default y
help
For each stalled CPU that is aware of the current RCU grace
period, print out additional per-CPU diagnostic information
@@ -1579,6 +1582,9 @@ config ASYNC_RAID6_TEST
If unsure, say N.
+config TEST_HEXDUMP
+ tristate "Test functions located in the hexdump module at runtime"
+
config TEST_STRING_HELPERS
tristate "Test functions located in the string_helpers module at runtime"
@@ -1586,7 +1592,7 @@ config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
config TEST_RHASHTABLE
- bool "Perform selftest on resizable hash table"
+ tristate "Perform selftest on resizable hash table"
default n
help
Enable this option to test the rhashtable functions at boot.
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
new file mode 100644
index 000000000000..4fecaedc80a2
--- /dev/null
+++ b/lib/Kconfig.kasan
@@ -0,0 +1,54 @@
+config HAVE_ARCH_KASAN
+ bool
+
+if HAVE_ARCH_KASAN
+
+config KASAN
+ bool "KASan: runtime memory debugger"
+ depends on SLUB_DEBUG
+ select CONSTRUCTORS
+ help
+ Enables kernel address sanitizer - runtime memory debugger,
+ designed to find out-of-bounds accesses and use-after-free bugs.
+ This is strictly debugging feature. It consumes about 1/8
+ of available memory and brings about ~x3 performance slowdown.
+ For better error detection enable CONFIG_STACKTRACE,
+ and add slub_debug=U to boot cmdline.
+
+config KASAN_SHADOW_OFFSET
+ hex
+ default 0xdffffc0000000000 if X86_64
+
+choice
+ prompt "Instrumentation type"
+ depends on KASAN
+ default KASAN_OUTLINE
+
+config KASAN_OUTLINE
+ bool "Outline instrumentation"
+ help
+ Before every memory access compiler insert function call
+ __asan_load*/__asan_store*. These functions performs check
+ of shadow memory. This is slower than inline instrumentation,
+ however it doesn't bloat size of kernel's .text section so
+ much as inline does.
+
+config KASAN_INLINE
+ bool "Inline instrumentation"
+ help
+ Compiler directly inserts code checking shadow memory before
+ memory accesses. This is faster than outline (in some workloads
+ it gives about x2 boost over outline instrumentation), but
+ make kernel's .text size much bigger.
+
+endchoice
+
+config TEST_KASAN
+ tristate "Module for testing kasan for bug detection"
+ depends on m && KASAN
+ help
+ This is a test module doing various nasty things like
+ out of bounds accesses, use after free. It is useful for testing
+ kernel debugging features like kernel address sanitizer.
+
+endif
diff --git a/lib/Makefile b/lib/Makefile
index 3c3b30b9e020..87eb3bffc283 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -4,7 +4,7 @@
ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
+KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
@@ -23,18 +23,22 @@ lib-y += kobject.o klist.o
obj-y += lockref.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
- bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
- gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
+ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
+ gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
+obj-y += hexdump.o
+obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o
obj-y += kstrtox.o
+obj-$(CONFIG_TEST_BPF) += test_bpf.o
+obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_KASAN) += test_kasan.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LKM) += test_module.o
+obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
-obj-$(CONFIG_TEST_BPF) += test_bpf.o
-obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 324ea9eab8c1..d456f4c15a9f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -104,18 +104,18 @@ EXPORT_SYMBOL(__bitmap_complement);
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
- * @bits : bitmap size, in bits
+ * @nbits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
* LS bits shifted off the bottom are lost.
*/
-void __bitmap_shift_right(unsigned long *dst,
- const unsigned long *src, int shift, int bits)
+void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned shift, unsigned nbits)
{
- int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
- int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
- unsigned long mask = (1UL << left) - 1;
+ unsigned k, lim = BITS_TO_LONGS(nbits);
+ unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
+ unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
for (k = 0; off + k < lim; ++k) {
unsigned long upper, lower;
@@ -127,17 +127,15 @@ void __bitmap_shift_right(unsigned long *dst,
upper = 0;
else {
upper = src[off + k + 1];
- if (off + k + 1 == lim - 1 && left)
+ if (off + k + 1 == lim - 1)
upper &= mask;
+ upper <<= (BITS_PER_LONG - rem);
}
lower = src[off + k];
- if (left && off + k == lim - 1)
+ if (off + k == lim - 1)
lower &= mask;
- dst[k] = lower >> rem;
- if (rem)
- dst[k] |= upper << (BITS_PER_LONG - rem);
- if (left && k == lim - 1)
- dst[k] &= mask;
+ lower >>= rem;
+ dst[k] = lower | upper;
}
if (off)
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
@@ -150,18 +148,19 @@ EXPORT_SYMBOL(__bitmap_shift_right);
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
- * @bits : bitmap size, in bits
+ * @nbits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
* and those MS bits shifted off the top are lost.
*/
-void __bitmap_shift_left(unsigned long *dst,
- const unsigned long *src, int shift, int bits)
+void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
- int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
- int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
+ int k;
+ unsigned int lim = BITS_TO_LONGS(nbits);
+ unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
for (k = lim - off - 1; k >= 0; --k) {
unsigned long upper, lower;
@@ -170,17 +169,11 @@ void __bitmap_shift_left(unsigned long *dst,
* word below and make them the bottom rem bits of result.
*/
if (rem && k > 0)
- lower = src[k - 1];
+ lower = src[k - 1] >> (BITS_PER_LONG - rem);
else
lower = 0;
- upper = src[k];
- if (left && k == lim - 1)
- upper &= (1UL << left) - 1;
- dst[k + off] = upper << rem;
- if (rem)
- dst[k + off] |= lower >> (BITS_PER_LONG - rem);
- if (left && k + off == lim - 1)
- dst[k + off] &= (1UL << left) - 1;
+ upper = src[k] << rem;
+ dst[k + off] = lower | upper;
}
if (off)
memset(dst, 0, off*sizeof(unsigned long));
@@ -377,45 +370,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
/**
- * bitmap_scnprintf - convert bitmap to an ASCII hex string.
- * @buf: byte buffer into which string is placed
- * @buflen: reserved size of @buf, in bytes
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- *
- * Exactly @nmaskbits bits are displayed. Hex digits are grouped into
- * comma-separated sets of eight digits per set. Returns the number of
- * characters which were written to *buf, excluding the trailing \0.
- */
-int bitmap_scnprintf(char *buf, unsigned int buflen,
- const unsigned long *maskp, int nmaskbits)
-{
- int i, word, bit, len = 0;
- unsigned long val;
- const char *sep = "";
- int chunksz;
- u32 chunkmask;
-
- chunksz = nmaskbits & (CHUNKSZ - 1);
- if (chunksz == 0)
- chunksz = CHUNKSZ;
-
- i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
- for (; i >= 0; i -= CHUNKSZ) {
- chunkmask = ((1ULL << chunksz) - 1);
- word = i / BITS_PER_LONG;
- bit = i % BITS_PER_LONG;
- val = (maskp[word] >> bit) & chunkmask;
- len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep,
- (chunksz+3)/4, val);
- chunksz = CHUNKSZ;
- sep = ",";
- }
- return len;
-}
-EXPORT_SYMBOL(bitmap_scnprintf);
-
-/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
@@ -528,65 +482,6 @@ int bitmap_parse_user(const char __user *ubuf,
}
EXPORT_SYMBOL(bitmap_parse_user);
-/*
- * bscnl_emit(buf, buflen, rbot, rtop, bp)
- *
- * Helper routine for bitmap_scnlistprintf(). Write decimal number
- * or range to buf, suppressing output past buf+buflen, with optional
- * comma-prefix. Return len of what was written to *buf, excluding the
- * trailing \0.
- */
-static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
-{
- if (len > 0)
- len += scnprintf(buf + len, buflen - len, ",");
- if (rbot == rtop)
- len += scnprintf(buf + len, buflen - len, "%d", rbot);
- else
- len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop);
- return len;
-}
-
-/**
- * bitmap_scnlistprintf - convert bitmap to list format ASCII string
- * @buf: byte buffer into which string is placed
- * @buflen: reserved size of @buf, in bytes
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- *
- * Output format is a comma-separated list of decimal numbers and
- * ranges. Consecutively set bits are shown as two hyphen-separated
- * decimal numbers, the smallest and largest bit numbers set in
- * the range. Output format is compatible with the format
- * accepted as input by bitmap_parselist().
- *
- * The return value is the number of characters which were written to *buf
- * excluding the trailing '\0', as per ISO C99's scnprintf.
- */
-int bitmap_scnlistprintf(char *buf, unsigned int buflen,
- const unsigned long *maskp, int nmaskbits)
-{
- int len = 0;
- /* current bit is 'cur', most recently seen range is [rbot, rtop] */
- int cur, rbot, rtop;
-
- if (buflen == 0)
- return 0;
- buf[0] = 0;
-
- rbot = cur = find_first_bit(maskp, nmaskbits);
- while (cur < nmaskbits) {
- rtop = cur;
- cur = find_next_bit(maskp, nmaskbits, cur+1);
- if (cur >= nmaskbits || cur > rtop + 1) {
- len = bscnl_emit(buf, buflen, rbot, rtop, len);
- rbot = cur;
- }
- }
- return len;
-}
-EXPORT_SYMBOL(bitmap_scnlistprintf);
-
/**
* bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
* @list: indicates whether the bitmap must be list
@@ -605,8 +500,8 @@ int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
int n = 0;
if (len > 1) {
- n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) :
- bitmap_scnprintf(buf, len, maskp, nmaskbits);
+ n = list ? scnprintf(buf, len, "%*pbl", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb", nmaskbits, maskp);
buf[n++] = '\n';
buf[n] = '\0';
}
@@ -744,10 +639,10 @@ EXPORT_SYMBOL(bitmap_parselist_user);
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
- * @pos: a bit position in @buf (0 <= @pos < @bits)
- * @bits: number of valid bit positions in @buf
+ * @pos: a bit position in @buf (0 <= @pos < @nbits)
+ * @nbits: number of valid bit positions in @buf
*
- * Map the bit at position @pos in @buf (of length @bits) to the
+ * Map the bit at position @pos in @buf (of length @nbits) to the
* ordinal of which set bit it is. If it is not set or if @pos
* is not a valid bit position, map to -1.
*
@@ -759,56 +654,40 @@ EXPORT_SYMBOL(bitmap_parselist_user);
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
-static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
+static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits)
{
- int i, ord;
-
- if (pos < 0 || pos >= bits || !test_bit(pos, buf))
+ if (pos >= nbits || !test_bit(pos, buf))
return -1;
- i = find_first_bit(buf, bits);
- ord = 0;
- while (i < pos) {
- i = find_next_bit(buf, bits, i + 1);
- ord++;
- }
- BUG_ON(i != pos);
-
- return ord;
+ return __bitmap_weight(buf, pos);
}
/**
* bitmap_ord_to_pos - find position of n-th set bit in bitmap
* @buf: pointer to bitmap
* @ord: ordinal bit position (n-th set bit, n >= 0)
- * @bits: number of valid bit positions in @buf
+ * @nbits: number of valid bit positions in @buf
*
* Map the ordinal offset of bit @ord in @buf to its position in @buf.
- * Value of @ord should be in range 0 <= @ord < weight(buf), else
- * results are undefined.
+ * Value of @ord should be in range 0 <= @ord < weight(buf). If @ord
+ * >= weight(buf), returns @nbits.
*
* If for example, just bits 4 through 7 are set in @buf, then @ord
* values 0 through 3 will get mapped to 4 through 7, respectively,
- * and all other @ord values return undefined values. When @ord value 3
+ * and all other @ord values returns @nbits. When @ord value 3
* gets mapped to (returns) @pos value 7 in this example, that means
* that the 3rd set bit (starting with 0th) is at position 7 in @buf.
*
- * The bit positions 0 through @bits are valid positions in @buf.
+ * The bit positions 0 through @nbits-1 are valid positions in @buf.
*/
-int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
+unsigned int bitmap_ord_to_pos(const unsigned long *buf, unsigned int ord, unsigned int nbits)
{
- int pos = 0;
-
- if (ord >= 0 && ord < bits) {
- int i;
+ unsigned int pos;
- for (i = find_first_bit(buf, bits);
- i < bits && ord > 0;
- i = find_next_bit(buf, bits, i + 1))
- ord--;
- if (i < bits && ord == 0)
- pos = i;
- }
+ for (pos = find_first_bit(buf, nbits);
+ pos < nbits && ord;
+ pos = find_next_bit(buf, nbits, pos + 1))
+ ord--;
return pos;
}
@@ -819,7 +698,7 @@ int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
* @src: subset to be remapped
* @old: defines domain of map
* @new: defines range of map
- * @bits: number of bits in each of these bitmaps
+ * @nbits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
@@ -847,22 +726,22 @@ int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
*/
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new,
- int bits)
+ unsigned int nbits)
{
- int oldbit, w;
+ unsigned int oldbit, w;
if (dst == src) /* following doesn't handle inplace remaps */
return;
- bitmap_zero(dst, bits);
+ bitmap_zero(dst, nbits);
- w = bitmap_weight(new, bits);
- for_each_set_bit(oldbit, src, bits) {
- int n = bitmap_pos_to_ord(old, oldbit, bits);
+ w = bitmap_weight(new, nbits);
+ for_each_set_bit(oldbit, src, nbits) {
+ int n = bitmap_pos_to_ord(old, oldbit, nbits);
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
- set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
+ set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
@@ -1006,9 +885,9 @@ EXPORT_SYMBOL(bitmap_bitremap);
* All bits in @dst not set by the above rule are cleared.
*/
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
- const unsigned long *relmap, int bits)
+ const unsigned long *relmap, unsigned int bits)
{
- int n, m; /* same meaning as in above comment */
+ unsigned int n, m; /* same meaning as in above comment */
if (dst == orig) /* following doesn't handle inplace mappings */
return;
@@ -1039,22 +918,22 @@ EXPORT_SYMBOL(bitmap_onto);
* @dst: resulting smaller bitmap
* @orig: original larger bitmap
* @sz: specified size
- * @bits: number of bits in each of these bitmaps
+ * @nbits: number of bits in each of these bitmaps
*
* For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
* Clear all other bits in @dst. See further the comment and
* Example [2] for bitmap_onto() for why and how to use this.
*/
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
- int sz, int bits)
+ unsigned int sz, unsigned int nbits)
{
- int oldbit;
+ unsigned int oldbit;
if (dst == orig) /* following doesn't handle inplace mappings */
return;
- bitmap_zero(dst, bits);
+ bitmap_zero(dst, nbits);
- for_each_set_bit(oldbit, orig, bits)
+ for_each_set_bit(oldbit, orig, nbits)
set_bit(oldbit % sz, dst);
}
EXPORT_SYMBOL(bitmap_fold);
@@ -1207,16 +1086,17 @@ EXPORT_SYMBOL(bitmap_allocate_region);
*
* Require nbits % BITS_PER_LONG == 0.
*/
-void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
+#ifdef __BIG_ENDIAN
+void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
- unsigned long *d = dst;
- int i;
+ unsigned int i;
for (i = 0; i < nbits/BITS_PER_LONG; i++) {
if (BITS_PER_LONG == 64)
- d[i] = cpu_to_le64(src[i]);
+ dst[i] = cpu_to_le64(src[i]);
else
- d[i] = cpu_to_le32(src[i]);
+ dst[i] = cpu_to_le32(src[i]);
}
}
EXPORT_SYMBOL(bitmap_copy_le);
+#endif
diff --git a/lib/bitrev.c b/lib/bitrev.c
index 3956203456d4..40ffda94cc5d 100644
--- a/lib/bitrev.c
+++ b/lib/bitrev.c
@@ -1,3 +1,4 @@
+#ifndef CONFIG_HAVE_ARCH_BITREVERSE
#include <linux/types.h>
#include <linux/module.h>
#include <linux/bitrev.h>
@@ -42,18 +43,4 @@ const u8 byte_rev_table[256] = {
};
EXPORT_SYMBOL_GPL(byte_rev_table);
-u16 bitrev16(u16 x)
-{
- return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
-}
-EXPORT_SYMBOL(bitrev16);
-
-/**
- * bitrev32 - reverse the order of bits in a u32 value
- * @x: value to be bit-reversed
- */
-u32 bitrev32(u32 x)
-{
- return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16);
-}
-EXPORT_SYMBOL(bitrev32);
+#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
diff --git a/lib/checksum.c b/lib/checksum.c
index 129775eb6de6..8b39e86dbab5 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
EXPORT_SYMBOL(csum_partial_copy);
#ifndef csum_tcpudp_nofold
+static inline u32 from64to32(u64 x)
+{
+ /* add up 32-bit and 32-bit for 32+c bit */
+ x = (x & 0xffffffff) + (x >> 32);
+ /* add up carry.. */
+ x = (x & 0xffffffff) + (x >> 32);
+ return (u32)x;
+}
+
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
#else
s += (proto + len) << 8;
#endif
- s += (s >> 32);
- return (__force __wsum)s;
+ return (__force __wsum)from64to32(s);
}
EXPORT_SYMBOL(csum_tcpudp_nofold);
#endif
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 527799d44476..d8f3d3150603 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -641,7 +641,7 @@ static __init int ddebug_setup_query(char *str)
__setup("ddebug_query=", ddebug_setup_query);
/*
- * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the
+ * File_ops->write method for <debugfs>/dynamic_debug/control. Gathers the
* command text from userspace, parses and executes it.
*/
#define USER_BUF_PAGE 4096
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index 0777c5a45fa0..f346715e2255 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -3,12 +3,12 @@
*
* Copyright (c) 2011, Tom Herbert <therbert@google.com>
*/
-#include <linux/module.h>
#include <linux/types.h>
-#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/dynamic_queue_limits.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 71fcfcd96410..d83a372fa76f 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -109,7 +109,7 @@ int main(int argc, char** argv)
if (CRC_LE_BITS > 1) {
crc32init_le();
- printf("static u32 __cacheline_aligned "
+ printf("static const u32 ____cacheline_aligned "
"crc32table_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32table_le, LE_TABLE_ROWS,
@@ -119,7 +119,7 @@ int main(int argc, char** argv)
if (CRC_BE_BITS > 1) {
crc32init_be();
- printf("static u32 __cacheline_aligned "
+ printf("static const u32 ____cacheline_aligned "
"crc32table_be[%d][%d] = {",
BE_TABLE_ROWS, BE_TABLE_SIZE);
output_table(crc32table_be, LE_TABLE_ROWS,
@@ -128,7 +128,7 @@ int main(int argc, char** argv)
}
if (CRC_LE_BITS > 1) {
crc32cinit_le();
- printf("static u32 __cacheline_aligned "
+ printf("static const u32 ____cacheline_aligned "
"crc32ctable_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32ctable_le, LE_TABLE_ROWS,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 2e65d206b01c..d214866eeea2 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -34,7 +34,6 @@
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/genalloc.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
@@ -415,7 +414,7 @@ bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
size_t size)
{
bool found = false;
- unsigned long end = start + size;
+ unsigned long end = start + size - 1;
struct gen_pool_chunk *chunk;
rcu_read_lock();
@@ -587,6 +586,8 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
struct gen_pool **ptr, *pool;
ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
pool = gen_pool_create(min_alloc_order, nid);
if (pool) {
diff --git a/lib/halfmd4.c b/lib/halfmd4.c
index 66d0ee8b7776..a8fe6274a13c 100644
--- a/lib/halfmd4.c
+++ b/lib/halfmd4.c
@@ -1,4 +1,4 @@
-#include <linux/kernel.h>
+#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/cryptohash.h>
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 270773b91923..7ea09699855d 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -97,63 +97,79 @@ EXPORT_SYMBOL(bin2hex);
*
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
+ *
+ * Return:
+ * The amount of bytes placed in the buffer without terminating NUL. If the
+ * output was truncated, then the return value is the number of bytes
+ * (excluding the terminating NUL) which would have been written to the final
+ * string if enough space had been available.
*/
-void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
- int groupsize, char *linebuf, size_t linebuflen,
- bool ascii)
+int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
+ char *linebuf, size_t linebuflen, bool ascii)
{
const u8 *ptr = buf;
+ int ngroups;
u8 ch;
int j, lx = 0;
int ascii_column;
+ int ret;
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
- if (!len)
- goto nil;
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
+ if (!is_power_of_2(groupsize) || groupsize > 8)
+ groupsize = 1;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
- switch (groupsize) {
- case 8: {
- const u64 *ptr8 = buf;
- int ngroups = len / groupsize;
+ ngroups = len / groupsize;
+ ascii_column = rowsize * 2 + rowsize / groupsize + 1;
- for (j = 0; j < ngroups; j++)
- lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%s%16.16llx", j ? " " : "",
- (unsigned long long)*(ptr8 + j));
- ascii_column = 17 * ngroups + 2;
- break;
- }
+ if (!linebuflen)
+ goto overflow1;
- case 4: {
- const u32 *ptr4 = buf;
- int ngroups = len / groupsize;
+ if (!len)
+ goto nil;
- for (j = 0; j < ngroups; j++)
- lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%s%8.8x", j ? " " : "", *(ptr4 + j));
- ascii_column = 9 * ngroups + 2;
- break;
- }
+ if (groupsize == 8) {
+ const u64 *ptr8 = buf;
- case 2: {
- const u16 *ptr2 = buf;
- int ngroups = len / groupsize;
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%16.16llx", j ? " " : "",
+ (unsigned long long)*(ptr8 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else if (groupsize == 4) {
+ const u32 *ptr4 = buf;
- for (j = 0; j < ngroups; j++)
- lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%s%4.4x", j ? " " : "", *(ptr2 + j));
- ascii_column = 5 * ngroups + 2;
- break;
- }
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%8.8x", j ? " " : "",
+ *(ptr4 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else if (groupsize == 2) {
+ const u16 *ptr2 = buf;
- default:
- for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%4.4x", j ? " " : "",
+ *(ptr2 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else {
+ for (j = 0; j < len; j++) {
+ if (linebuflen < lx + 3)
+ goto overflow2;
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
linebuf[lx++] = hex_asc_lo(ch);
@@ -161,21 +177,28 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
}
if (j)
lx--;
-
- ascii_column = 3 * rowsize + 2;
- break;
}
if (!ascii)
goto nil;
- while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
+ while (lx < ascii_column) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
linebuf[lx++] = ' ';
- for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
+ }
+ for (j = 0; j < len; j++) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
ch = ptr[j];
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
}
nil:
+ linebuf[lx] = '\0';
+ return lx;
+overflow2:
linebuf[lx++] = '\0';
+overflow1:
+ return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
}
EXPORT_SYMBOL(hex_dump_to_buffer);
diff --git a/lib/idr.c b/lib/idr.c
index e654aebd5f80..5335c43adf46 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -30,7 +30,6 @@
#include <linux/idr.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
-#include <linux/hardirq.h>
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index f367f9ad544c..c85f6600a5f8 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -1,7 +1,7 @@
-#include <linux/init.h>
#include <linux/interval_tree.h>
#include <linux/interval_tree_generic.h>
-#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
diff --git a/lib/iovec.c b/lib/iovec.c
deleted file mode 100644
index 2d99cb4a5006..000000000000
--- a/lib/iovec.c
+++ /dev/null
@@ -1,87 +0,0 @@
-#include <linux/uaccess.h>
-#include <linux/export.h>
-#include <linux/uio.h>
-
-/*
- * Copy iovec to kernel. Returns -EFAULT on error.
- *
- * Note: this modifies the original iovec.
- */
-
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
-{
- while (len > 0) {
- if (iov->iov_len) {
- int copy = min_t(unsigned int, len, iov->iov_len);
- if (copy_from_user(kdata, iov->iov_base, copy))
- return -EFAULT;
- len -= copy;
- kdata += copy;
- iov->iov_base += copy;
- iov->iov_len -= copy;
- }
- iov++;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovec);
-
-/*
- * Copy kernel to iovec. Returns -EFAULT on error.
- */
-
-int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
- int offset, int len)
-{
- int copy;
- for (; len > 0; ++iov) {
- /* Skip over the finished iovecs */
- if (unlikely(offset >= iov->iov_len)) {
- offset -= iov->iov_len;
- continue;
- }
- copy = min_t(unsigned int, iov->iov_len - offset, len);
- if (copy_to_user(iov->iov_base + offset, kdata, copy))
- return -EFAULT;
- offset = 0;
- kdata += copy;
- len -= copy;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovecend);
-
-/*
- * Copy iovec to kernel. Returns -EFAULT on error.
- */
-
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
- int offset, int len)
-{
- /* No data? Done! */
- if (len == 0)
- return 0;
-
- /* Skip over the finished iovecs */
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- iov++;
- }
-
- while (len > 0) {
- u8 __user *base = iov->iov_base + offset;
- int copy = min_t(unsigned int, len, iov->iov_len - offset);
-
- offset = 0;
- if (copy_from_user(kdata, base, copy))
- return -EFAULT;
- len -= copy;
- kdata += copy;
- iov++;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovecend);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9ebf9e20de53..f6c2c1e7779c 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -20,7 +20,6 @@
#include <linux/export.h>
#include <linux/kmod.h>
#include <linux/slab.h>
-#include <linux/user_namespace.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
diff --git a/lib/lcm.c b/lib/lcm.c
index 51cc6b13cd52..e97dbd51e756 100644
--- a/lib/lcm.c
+++ b/lib/lcm.c
@@ -1,4 +1,4 @@
-#include <linux/kernel.h>
+#include <linux/compiler.h>
#include <linux/gcd.h>
#include <linux/export.h>
#include <linux/lcm.h>
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 12bcba1c8612..b29015102698 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -2,9 +2,11 @@
#define pr_fmt(fmt) "list_sort_test: " fmt
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/string.h>
#include <linux/list_sort.h>
-#include <linux/slab.h>
#include <linux/list.h>
#define MAX_LIST_LENGTH_BITS 20
@@ -146,6 +148,7 @@ EXPORT_SYMBOL(list_sort);
#ifdef CONFIG_TEST_LIST_SORT
+#include <linux/slab.h>
#include <linux/random.h>
/*
diff --git a/lib/llist.c b/lib/llist.c
index f76196d07409..0b0e9779d675 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -24,7 +24,6 @@
*/
#include <linux/kernel.h>
#include <linux/export.h>
-#include <linux/interrupt.h>
#include <linux/llist.h>
diff --git a/lib/md5.c b/lib/md5.c
index 958a3c15923c..bb0cd01d356d 100644
--- a/lib/md5.c
+++ b/lib/md5.c
@@ -1,4 +1,4 @@
-#include <linux/kernel.h>
+#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/cryptohash.h>
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
index 1871e7b61ca0..d25e9e96c310 100644
--- a/lib/mpi/mpi-cmp.c
+++ b/lib/mpi/mpi-cmp.c
@@ -57,14 +57,12 @@ int mpi_cmp(MPI u, MPI v)
if (usize != vsize && !u->sign && !v->sign)
return usize - vsize;
if (usize != vsize && u->sign && v->sign)
- return vsize + usize;
+ return vsize - usize;
if (!usize)
return 0;
cmp = mpihelp_cmp(u->d, v->d, usize);
- if (!cmp)
- return 0;
- if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0))
- return 1;
- return -1;
+ if (u->sign)
+ return -cmp;
+ return cmp;
}
EXPORT_SYMBOL_GPL(mpi_cmp);
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h
index 60cf765628e9..c65dd1bff45a 100644
--- a/lib/mpi/mpi-internal.h
+++ b/lib/mpi/mpi-internal.h
@@ -84,7 +84,7 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
do { \
mpi_size_t _i; \
for (_i = 0; _i < (n); _i++) \
- (d)[_i] = (d)[_i]; \
+ (d)[_i] = (s)[_i]; \
} while (0)
#define MPN_COPY_DECR(d, s, n) \
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 9c3e85ff0a6c..76a1b59523ab 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 93d145e5539c..f75715131f20 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -19,13 +19,10 @@
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/hardirq.h>
-#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/percpu_ida.h>
diff --git a/lib/plist.c b/lib/plist.c
index d408e774b746..3a30c53db061 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -25,7 +25,6 @@
#include <linux/bug.h>
#include <linux/plist.h>
-#include <linux/spinlock.h>
#ifdef CONFIG_DEBUG_PI_LIST
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3291a8e37490..3d2aa27b845b 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -33,7 +33,7 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/rcupdate.h>
-#include <linux/hardirq.h> /* in_interrupt() */
+#include <linux/preempt_mask.h> /* in_interrupt() */
/*
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 7d0e5cd7b570..dbef2314901e 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -89,10 +89,10 @@ void (*raid6_datap_recov)(int, size_t, int, void **);
EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
-#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
#ifdef CONFIG_AS_AVX2
&raid6_recov_avx2,
#endif
+#ifdef CONFIG_AS_SSSE3
&raid6_recov_ssse3,
#endif
&raid6_recov_intx1,
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
index e1eea433a493..53fe3d7bdfb3 100644
--- a/lib/raid6/recov_avx2.c
+++ b/lib/raid6/recov_avx2.c
@@ -8,7 +8,7 @@
* of the License.
*/
-#if CONFIG_AS_AVX2
+#ifdef CONFIG_AS_AVX2
#include <linux/raid/pq.h>
#include "x86.h"
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index a9168328f03b..cda33e56a5e3 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -7,6 +7,8 @@
* of the License.
*/
+#ifdef CONFIG_AS_SSSE3
+
#include <linux/raid/pq.h>
#include "x86.h"
@@ -330,3 +332,7 @@ const struct raid6_recov_calls raid6_recov_ssse3 = {
#endif
.priority = 1,
};
+
+#else
+#warning "your version of binutils lacks SSSE3 support"
+#endif
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6c3c723e902b..9cc4c4a90d00 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1,7 +1,7 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
* Based on the following paper:
@@ -23,94 +23,203 @@
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
+#include <linux/err.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4UL
+#define BUCKET_LOCKS_PER_CPU 128UL
-#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+/* Base bits plus 1 bit for nulls marker */
+#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
-#ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+enum {
+ RHT_LOCK_NORMAL,
+ RHT_LOCK_NESTED,
+};
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
+ * a single lock always covers both buckets which may both contains
+ * entries which link to the same bucket of the old table during resizing.
+ * This allows to simplify the locking as locking the bucket in both
+ * tables during resize always guarantee protection.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
{
- return ht->p.mutex_is_held(ht->p.parent);
+ return &tbl->locks[hash & tbl->locks_mask];
}
-EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
-#endif
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
{
return (void *) he - ht->p.head_offset;
}
-static u32 __hashfn(const struct rhashtable *ht, const void *key,
- u32 len, u32 hsize)
+static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
+{
+ return hash & (tbl->size - 1);
+}
+
+static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
{
- u32 h;
+ u32 hash;
- h = ht->p.hashfn(key, len, ht->p.hash_rnd);
+ if (unlikely(!ht->p.key_len))
+ hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+ else
+ hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
+ ht->p.hash_rnd);
- return h & (hsize - 1);
+ return hash >> HASH_RESERVED_SPACE;
}
-/**
- * rhashtable_hashfn - compute hash for key of given length
- * @ht: hash table to compute for
- * @key: pointer to key
- * @len: length of key
- *
- * Computes the hash value using the hash function provided in the 'hashfn'
- * of struct rhashtable_params. The returned value is guaranteed to be
- * smaller than the number of buckets in the hash table.
- */
-u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
+static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
{
- struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
+}
- return __hashfn(ht, key, len, tbl->size);
+static u32 head_hashfn(const struct rhashtable *ht,
+ const struct bucket_table *tbl,
+ const struct rhash_head *he)
+{
+ return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
}
-EXPORT_SYMBOL_GPL(rhashtable_hashfn);
-static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
+#ifdef CONFIG_PROVE_LOCKING
+static void debug_dump_buckets(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
{
- if (unlikely(!ht->p.key_len)) {
- u32 h;
+ struct rhash_head *he;
+ unsigned int i, hash;
- h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+ for (i = 0; i < tbl->size; i++) {
+ pr_warn(" [Bucket %d] ", i);
+ rht_for_each_rcu(he, tbl, i) {
+ hash = head_hashfn(ht, tbl, he);
+ pr_cont("[hash = %#x, lock = %p] ",
+ hash, bucket_lock(tbl, hash));
+ }
+ pr_cont("\n");
+ }
+
+}
+
+static void debug_dump_table(struct rhashtable *ht,
+ const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ struct bucket_table *old_tbl, *future_tbl;
+
+ pr_emerg("BUG: lock for hash %#x in table %p not held\n",
+ hash, tbl);
- return h & (hsize - 1);
+ rcu_read_lock();
+ future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ old_tbl = rht_dereference_rcu(ht->tbl, ht);
+ if (future_tbl != old_tbl) {
+ pr_warn("Future table %p (size: %zd)\n",
+ future_tbl, future_tbl->size);
+ debug_dump_buckets(ht, future_tbl);
}
- return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
+ pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
+ debug_dump_buckets(ht, old_tbl);
+
+ rcu_read_unlock();
}
-/**
- * rhashtable_obj_hashfn - compute hash for hashed object
- * @ht: hash table to compute for
- * @ptr: pointer to hashed object
- *
- * Computes the hash value using the hash function `hashfn` respectively
- * 'obj_hashfn' depending on whether the hash table is set up to work with
- * a fixed length key. The returned value is guaranteed to be smaller than
- * the number of buckets in the hash table.
- */
-u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
+#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \
+ do { \
+ if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
+ debug_dump_table(HT, TBL, HASH); \
+ BUG(); \
+ } \
+ } while (0)
+
+int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
- struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
- return obj_hashfn(ht, ptr, tbl->size);
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
+{
+ spinlock_t *lock = bucket_lock(tbl, hash);
+
+ return (debug_locks) ? lockdep_is_held(lock) : 1;
}
-EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
+EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
+#else
+#define ASSERT_RHT_MUTEX(HT)
+#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
+#endif
-static u32 head_hashfn(const struct rhashtable *ht,
- const struct rhash_head *he, u32 hsize)
+
+static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
{
- return obj_hashfn(ht, rht_obj(ht, he), hsize);
+ struct rhash_head __rcu **pprev;
+
+ for (pprev = &tbl->buckets[n];
+ !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
+ pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
+ ;
+
+ return pprev;
}
-static struct bucket_table *bucket_table_alloc(size_t nbuckets)
+static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
+{
+ unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+ unsigned int nr_pcpus = 2;
+#else
+ unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+ nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
+ size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
+
+ /* Never allocate more than 0.5 locks per bucket */
+ size = min_t(unsigned int, size, tbl->size >> 1);
+
+ if (sizeof(spinlock_t) != 0) {
+#ifdef CONFIG_NUMA
+ if (size * sizeof(spinlock_t) > PAGE_SIZE)
+ tbl->locks = vmalloc(size * sizeof(spinlock_t));
+ else
+#endif
+ tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
+ GFP_KERNEL);
+ if (!tbl->locks)
+ return -ENOMEM;
+ for (i = 0; i < size; i++)
+ spin_lock_init(&tbl->locks[i]);
+ }
+ tbl->locks_mask = size - 1;
+
+ return 0;
+}
+
+static void bucket_table_free(const struct bucket_table *tbl)
+{
+ if (tbl)
+ kvfree(tbl->locks);
+
+ kvfree(tbl);
+}
+
+static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
+ size_t nbuckets)
{
struct bucket_table *tbl;
size_t size;
+ int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
@@ -122,12 +231,15 @@ static struct bucket_table *bucket_table_alloc(size_t nbuckets)
tbl->size = nbuckets;
- return tbl;
-}
+ if (alloc_bucket_locks(ht, tbl) < 0) {
+ bucket_table_free(tbl);
+ return NULL;
+ }
-static void bucket_table_free(const struct bucket_table *tbl)
-{
- kvfree(tbl);
+ for (i = 0; i < nbuckets; i++)
+ INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+
+ return tbl;
}
/**
@@ -138,7 +250,8 @@ static void bucket_table_free(const struct bucket_table *tbl)
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
{
/* Expand table when exceeding 75% load */
- return ht->nelems > (new_size / 4 * 3);
+ return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
+ (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
}
EXPORT_SYMBOL_GPL(rht_grow_above_75);
@@ -150,41 +263,75 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75);
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
{
/* Shrink table beneath 30% load */
- return ht->nelems < (new_size * 3 / 10);
+ return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
+ (atomic_read(&ht->shift) > ht->p.min_shift);
}
EXPORT_SYMBOL_GPL(rht_shrink_below_30);
-static void hashtable_chain_unzip(const struct rhashtable *ht,
+static void lock_buckets(struct bucket_table *new_tbl,
+ struct bucket_table *old_tbl, unsigned int hash)
+ __acquires(old_bucket_lock)
+{
+ spin_lock_bh(bucket_lock(old_tbl, hash));
+ if (new_tbl != old_tbl)
+ spin_lock_bh_nested(bucket_lock(new_tbl, hash),
+ RHT_LOCK_NESTED);
+}
+
+static void unlock_buckets(struct bucket_table *new_tbl,
+ struct bucket_table *old_tbl, unsigned int hash)
+ __releases(old_bucket_lock)
+{
+ if (new_tbl != old_tbl)
+ spin_unlock_bh(bucket_lock(new_tbl, hash));
+ spin_unlock_bh(bucket_lock(old_tbl, hash));
+}
+
+/**
+ * Unlink entries on bucket which hash to different bucket.
+ *
+ * Returns true if no more work needs to be performed on the bucket.
+ */
+static bool hashtable_chain_unzip(struct rhashtable *ht,
const struct bucket_table *new_tbl,
- struct bucket_table *old_tbl, size_t n)
+ struct bucket_table *old_tbl,
+ size_t old_hash)
{
struct rhash_head *he, *p, *next;
- unsigned int h;
+ unsigned int new_hash, new_hash2;
+
+ ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
/* Old bucket empty, no work needed. */
- p = rht_dereference(old_tbl->buckets[n], ht);
- if (!p)
- return;
+ p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
+ old_hash);
+ if (rht_is_a_nulls(p))
+ return false;
+
+ new_hash = head_hashfn(ht, new_tbl, p);
+ ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
/* Advance the old bucket pointer one or more times until it
* reaches a node that doesn't hash to the same bucket as the
* previous node p. Call the previous node p;
*/
- h = head_hashfn(ht, p, new_tbl->size);
- rht_for_each(he, p->next, ht) {
- if (head_hashfn(ht, he, new_tbl->size) != h)
+ rht_for_each_continue(he, p->next, old_tbl, old_hash) {
+ new_hash2 = head_hashfn(ht, new_tbl, he);
+ ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
+
+ if (new_hash != new_hash2)
break;
p = he;
}
- RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
+ rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
/* Find the subsequent node which does hash to the same
* bucket as node P, or NULL if no such node exists.
*/
- next = NULL;
- if (he) {
- rht_for_each(he, he->next, ht) {
- if (head_hashfn(ht, he, new_tbl->size) == h) {
+ INIT_RHT_NULLS_HEAD(next, ht, old_hash);
+ if (!rht_is_a_nulls(he)) {
+ rht_for_each_continue(he, he->next, old_tbl, old_hash) {
+ if (head_hashfn(ht, new_tbl, he) == new_hash) {
next = he;
break;
}
@@ -194,7 +341,20 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
/* Set p's next pointer to that subsequent node pointer,
* bypassing the nodes which do not hash to p's bucket
*/
- RCU_INIT_POINTER(p->next, next);
+ rcu_assign_pointer(p->next, next);
+
+ p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
+ old_hash);
+
+ return !rht_is_a_nulls(p);
+}
+
+static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
+ unsigned int new_hash, struct rhash_head *entry)
+{
+ ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
+
+ rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
}
/**
@@ -207,53 +367,57 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
* This function may only be called in a context where it is safe to call
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
*
- * The caller must ensure that no concurrent table mutations take place.
- * It is however valid to have concurrent lookups if they are RCU protected.
+ * The caller must ensure that no concurrent resizing occurs by holding
+ * ht->mutex.
+ *
+ * It is valid to have concurrent insertions and deletions protected by per
+ * bucket locks or concurrent RCU protected lookups and traversals.
*/
int rhashtable_expand(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
struct rhash_head *he;
- unsigned int i, h;
- bool complete;
+ unsigned int new_hash, old_hash;
+ bool complete = false;
ASSERT_RHT_MUTEX(ht);
- if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
- return 0;
-
- new_tbl = bucket_table_alloc(old_tbl->size * 2);
+ new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
if (new_tbl == NULL)
return -ENOMEM;
- ht->shift++;
+ atomic_inc(&ht->shift);
+
+ /* Make insertions go into the new, empty table right away. Deletions
+ * and lookups will be attempted in both tables until we synchronize.
+ * The synchronize_rcu() guarantees for the new table to be picked up
+ * so no new additions go into the old table while we relink.
+ */
+ rcu_assign_pointer(ht->future_tbl, new_tbl);
+ synchronize_rcu();
- /* For each new bucket, search the corresponding old bucket
- * for the first entry that hashes to the new bucket, and
- * link the new bucket to that entry. Since all the entries
- * which will end up in the new bucket appear in the same
- * old bucket, this constructs an entirely valid new hash
- * table, but with multiple buckets "zipped" together into a
- * single imprecise chain.
+ /* For each new bucket, search the corresponding old bucket for the
+ * first entry that hashes to the new bucket, and link the end of
+ * newly formed bucket chain (containing entries added to future
+ * table) to that entry. Since all the entries which will end up in
+ * the new bucket appear in the same old bucket, this constructs an
+ * entirely valid new hash table, but with multiple buckets
+ * "zipped" together into a single imprecise chain.
*/
- for (i = 0; i < new_tbl->size; i++) {
- h = i & (old_tbl->size - 1);
- rht_for_each(he, old_tbl->buckets[h], ht) {
- if (head_hashfn(ht, he, new_tbl->size) == i) {
- RCU_INIT_POINTER(new_tbl->buckets[i], he);
+ for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
+ old_hash = rht_bucket_index(old_tbl, new_hash);
+ lock_buckets(new_tbl, old_tbl, new_hash);
+ rht_for_each(he, old_tbl, old_hash) {
+ if (head_hashfn(ht, new_tbl, he) == new_hash) {
+ link_old_to_new(ht, new_tbl, new_hash, he);
break;
}
}
+ unlock_buckets(new_tbl, old_tbl, new_hash);
}
- /* Publish the new table pointer. Lookups may now traverse
- * the new table, but they will not benefit from any
- * additional efficiency until later steps unzip the buckets.
- */
- rcu_assign_pointer(ht->tbl, new_tbl);
-
/* Unzip interleaved hash chains */
- do {
+ while (!complete && !ht->being_destroyed) {
/* Wait for readers. All new readers will see the new
* table, and thus no references to the old table will
* remain.
@@ -265,12 +429,19 @@ int rhashtable_expand(struct rhashtable *ht)
* table): ...
*/
complete = true;
- for (i = 0; i < old_tbl->size; i++) {
- hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
- if (old_tbl->buckets[i] != NULL)
+ for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
+ lock_buckets(new_tbl, old_tbl, old_hash);
+
+ if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
+ old_hash))
complete = false;
+
+ unlock_buckets(new_tbl, old_tbl, old_hash);
}
- } while (!complete);
+ }
+
+ rcu_assign_pointer(ht->tbl, new_tbl);
+ synchronize_rcu();
bucket_table_free(old_tbl);
return 0;
@@ -284,45 +455,51 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
* This function may only be called in a context where it is safe to call
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
*
+ * The caller must ensure that no concurrent resizing occurs by holding
+ * ht->mutex.
+ *
* The caller must ensure that no concurrent table mutations take place.
* It is however valid to have concurrent lookups if they are RCU protected.
+ *
+ * It is valid to have concurrent insertions and deletions protected by per
+ * bucket locks or concurrent RCU protected lookups and traversals.
*/
int rhashtable_shrink(struct rhashtable *ht)
{
- struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
- struct rhash_head __rcu **pprev;
- unsigned int i;
+ struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
+ unsigned int new_hash;
ASSERT_RHT_MUTEX(ht);
- if (ht->shift <= ht->p.min_shift)
- return 0;
-
- ntbl = bucket_table_alloc(tbl->size / 2);
- if (ntbl == NULL)
+ new_tbl = bucket_table_alloc(ht, tbl->size / 2);
+ if (new_tbl == NULL)
return -ENOMEM;
- ht->shift--;
+ rcu_assign_pointer(ht->future_tbl, new_tbl);
+ synchronize_rcu();
- /* Link each bucket in the new table to the first bucket
- * in the old table that contains entries which will hash
- * to the new bucket.
+ /* Link the first entry in the old bucket to the end of the
+ * bucket in the new table. As entries are concurrently being
+ * added to the new table, lock down the new bucket. As we
+ * always divide the size in half when shrinking, each bucket
+ * in the new table maps to exactly two buckets in the old
+ * table.
*/
- for (i = 0; i < ntbl->size; i++) {
- ntbl->buckets[i] = tbl->buckets[i];
+ for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
+ lock_buckets(new_tbl, tbl, new_hash);
- /* Link each bucket in the new table to the first bucket
- * in the old table that contains entries which will hash
- * to the new bucket.
- */
- for (pprev = &ntbl->buckets[i]; *pprev != NULL;
- pprev = &rht_dereference(*pprev, ht)->next)
- ;
- RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
+ rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
+ tbl->buckets[new_hash]);
+ ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
+ rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
+ tbl->buckets[new_hash + new_tbl->size]);
+
+ unlock_buckets(new_tbl, tbl, new_hash);
}
/* Publish the new, valid hash table */
- rcu_assign_pointer(ht->tbl, ntbl);
+ rcu_assign_pointer(ht->tbl, new_tbl);
+ atomic_dec(&ht->shift);
/* Wait for readers. No new readers will have references to the
* old hash table.
@@ -335,59 +512,99 @@ int rhashtable_shrink(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);
-/**
- * rhashtable_insert - insert object into hash hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- *
- * Will automatically grow the table via rhashtable_expand() if the the
- * grow_decision function specified at rhashtable_init() returns true.
- *
- * The caller must ensure that no concurrent table mutations occur. It is
- * however valid to have concurrent lookups if they are RCU protected.
- */
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
+static void rht_deferred_worker(struct work_struct *work)
{
- struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
- u32 hash;
+ struct rhashtable *ht;
+ struct bucket_table *tbl;
+ struct rhashtable_walker *walker;
- ASSERT_RHT_MUTEX(ht);
+ ht = container_of(work, struct rhashtable, run_work);
+ mutex_lock(&ht->mutex);
+ if (ht->being_destroyed)
+ goto unlock;
- hash = head_hashfn(ht, obj, tbl->size);
- RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
- rcu_assign_pointer(tbl->buckets[hash], obj);
- ht->nelems++;
+ tbl = rht_dereference(ht->tbl, ht);
+
+ list_for_each_entry(walker, &ht->walkers, list)
+ walker->resize = true;
if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
rhashtable_expand(ht);
+ else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
+ rhashtable_shrink(ht);
+
+unlock:
+ mutex_unlock(&ht->mutex);
+}
+
+static void rhashtable_wakeup_worker(struct rhashtable *ht)
+{
+ struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ size_t size = tbl->size;
+
+ /* Only adjust the table if no resizing is currently in progress. */
+ if (tbl == new_tbl &&
+ ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
+ (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
+ schedule_work(&ht->run_work);
+}
+
+static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
+ struct bucket_table *tbl, u32 hash)
+{
+ struct rhash_head *head;
+
+ hash = rht_bucket_index(tbl, hash);
+ head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+
+ ASSERT_BUCKET_LOCK(ht, tbl, hash);
+
+ if (rht_is_a_nulls(head))
+ INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
+ else
+ RCU_INIT_POINTER(obj->next, head);
+
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+
+ atomic_inc(&ht->nelems);
+
+ rhashtable_wakeup_worker(ht);
}
-EXPORT_SYMBOL_GPL(rhashtable_insert);
/**
- * rhashtable_remove_pprev - remove object from hash table given previous element
+ * rhashtable_insert - insert object into hash table
* @ht: hash table
* @obj: pointer to hash head inside object
- * @pprev: pointer to previous element
*
- * Identical to rhashtable_remove() but caller is alreayd aware of the element
- * in front of the element to be deleted. This is in particular useful for
- * deletion when combined with walking or lookup.
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
*/
-void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
- struct rhash_head __rcu **pprev)
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
{
- struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *tbl, *old_tbl;
+ unsigned hash;
- ASSERT_RHT_MUTEX(ht);
+ rcu_read_lock();
- RCU_INIT_POINTER(*pprev, obj->next);
- ht->nelems--;
+ tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ old_tbl = rht_dereference_rcu(ht->tbl, ht);
+ hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
- if (ht->p.shrink_decision &&
- ht->p.shrink_decision(ht, tbl->size))
- rhashtable_shrink(ht);
+ lock_buckets(tbl, old_tbl, hash);
+ __rhashtable_insert(ht, obj, tbl, hash);
+ unlock_buckets(tbl, old_tbl, hash);
+
+ rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
+EXPORT_SYMBOL_GPL(rhashtable_insert);
/**
* rhashtable_remove - remove object from hash table
@@ -398,7 +615,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
* walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized.
*
- * Will automatically shrink the table via rhashtable_expand() if the the
+ * Will automatically shrink the table via rhashtable_expand() if the
* shrink_decision function specified at rhashtable_init() returns true.
*
* The caller must ensure that no concurrent table mutations occur. It is
@@ -406,30 +623,87 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
*/
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
{
- struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *tbl, *new_tbl, *old_tbl;
struct rhash_head __rcu **pprev;
- struct rhash_head *he;
- u32 h;
+ struct rhash_head *he, *he2;
+ unsigned int hash, new_hash;
+ bool ret = false;
- ASSERT_RHT_MUTEX(ht);
-
- h = head_hashfn(ht, obj, tbl->size);
-
- pprev = &tbl->buckets[h];
- rht_for_each(he, tbl->buckets[h], ht) {
+ rcu_read_lock();
+ old_tbl = rht_dereference_rcu(ht->tbl, ht);
+ tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
+
+ lock_buckets(new_tbl, old_tbl, new_hash);
+restart:
+ hash = rht_bucket_index(tbl, new_hash);
+ pprev = &tbl->buckets[hash];
+ rht_for_each(he, tbl, hash) {
if (he != obj) {
pprev = &he->next;
continue;
}
- rhashtable_remove_pprev(ht, he, pprev);
- return true;
+ ASSERT_BUCKET_LOCK(ht, tbl, hash);
+
+ if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
+ !rht_is_a_nulls(obj->next) &&
+ head_hashfn(ht, tbl, obj->next) != hash) {
+ rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
+ } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
+ rht_for_each_continue(he2, obj->next, tbl, hash) {
+ if (head_hashfn(ht, tbl, he2) == hash) {
+ rcu_assign_pointer(*pprev, he2);
+ goto found;
+ }
+ }
+
+ rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
+ } else {
+ rcu_assign_pointer(*pprev, obj->next);
+ }
+
+found:
+ ret = true;
+ break;
+ }
+
+ /* The entry may be linked in either 'tbl', 'future_tbl', or both.
+ * 'future_tbl' only exists for a short period of time during
+ * resizing. Thus traversing both is fine and the added cost is
+ * very rare.
+ */
+ if (tbl != old_tbl) {
+ tbl = old_tbl;
+ goto restart;
+ }
+
+ unlock_buckets(new_tbl, old_tbl, new_hash);
+
+ if (ret) {
+ atomic_dec(&ht->nelems);
+ rhashtable_wakeup_worker(ht);
}
- return false;
+ rcu_read_unlock();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(rhashtable_remove);
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
+static bool rhashtable_compare(void *ptr, void *arg)
+{
+ struct rhashtable_compare_arg *x = arg;
+ struct rhashtable *ht = x->ht;
+
+ return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
+}
+
/**
* rhashtable_lookup - lookup key in hash table
* @ht: hash table
@@ -439,65 +713,313 @@ EXPORT_SYMBOL_GPL(rhashtable_remove);
* for a entry with an identical key. The first matching entry is returned.
*
* This lookup function may only be used for fixed key hash table (key_len
- * paramter set). It will BUG() if used inappropriately.
+ * parameter set). It will BUG() if used inappropriately.
*
- * Lookups may occur in parallel with hash mutations as long as the lookup is
- * guarded by rcu_read_lock(). The caller must take care of this.
+ * Lookups may occur in parallel with hashtable mutations and resizing.
*/
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
+void *rhashtable_lookup(struct rhashtable *ht, const void *key)
{
- const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
- struct rhash_head *he;
- u32 h;
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
BUG_ON(!ht->p.key_len);
- h = __hashfn(ht, key, ht->p.key_len, tbl->size);
- rht_for_each_rcu(he, tbl->buckets[h], ht) {
- if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
- ht->p.key_len))
- continue;
- return (void *) he - ht->p.head_offset;
- }
-
- return NULL;
+ return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);
/**
* rhashtable_lookup_compare - search hash table with compare function
* @ht: hash table
- * @hash: hash value of desired entry
+ * @key: the pointer to the key
* @compare: compare function, must return true on match
* @arg: argument passed on to compare function
*
* Traverses the bucket chain behind the provided hash value and calls the
* specified compare function for each entry.
*
- * Lookups may occur in parallel with hash mutations as long as the lookup is
- * guarded by rcu_read_lock(). The caller must take care of this.
+ * Lookups may occur in parallel with hashtable mutations and resizing.
*
* Returns the first entry on which the compare function returned true.
*/
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
bool (*compare)(void *, void *), void *arg)
{
- const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ const struct bucket_table *tbl, *old_tbl;
struct rhash_head *he;
+ u32 hash;
- if (unlikely(hash >= tbl->size))
- return NULL;
+ rcu_read_lock();
- rht_for_each_rcu(he, tbl->buckets[hash], ht) {
+ old_tbl = rht_dereference_rcu(ht->tbl, ht);
+ tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ hash = key_hashfn(ht, key, ht->p.key_len);
+restart:
+ rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
if (!compare(rht_obj(ht, he), arg))
continue;
- return (void *) he - ht->p.head_offset;
+ rcu_read_unlock();
+ return rht_obj(ht, he);
+ }
+
+ if (unlikely(tbl != old_tbl)) {
+ tbl = old_tbl;
+ goto restart;
}
+ rcu_read_unlock();
return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
+/**
+ * rhashtable_lookup_insert - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = rht_obj(ht, obj) + ht->p.key_offset,
+ };
+
+ BUG_ON(!ht->p.key_len);
+
+ return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
+ &arg);
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
+
+/**
+ * rhashtable_lookup_compare_insert - search and insert object to hash table
+ * with compare function
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @compare: compare function, must return true on match
+ * @arg: argument passed on to compare function
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
+ struct rhash_head *obj,
+ bool (*compare)(void *, void *),
+ void *arg)
+{
+ struct bucket_table *new_tbl, *old_tbl;
+ u32 new_hash;
+ bool success = true;
+
+ BUG_ON(!ht->p.key_len);
+
+ rcu_read_lock();
+ old_tbl = rht_dereference_rcu(ht->tbl, ht);
+ new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
+
+ lock_buckets(new_tbl, old_tbl, new_hash);
+
+ if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
+ compare, arg)) {
+ success = false;
+ goto exit;
+ }
+
+ __rhashtable_insert(ht, obj, new_tbl, new_hash);
+
+exit:
+ unlock_buckets(new_tbl, old_tbl, new_hash);
+ rcu_read_unlock();
+
+ return success;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
+
+/**
+ * rhashtable_walk_init - Initialise an iterator
+ * @ht: Table to walk over
+ * @iter: Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice. Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit if this function returns
+ * successfully.
+ */
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
+{
+ iter->ht = ht;
+ iter->p = NULL;
+ iter->slot = 0;
+ iter->skip = 0;
+
+ iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
+ if (!iter->walker)
+ return -ENOMEM;
+
+ mutex_lock(&ht->mutex);
+ list_add(&iter->walker->list, &ht->walkers);
+ mutex_unlock(&ht->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_init);
+
+/**
+ * rhashtable_walk_exit - Free an iterator
+ * @iter: Hash table Iterator
+ *
+ * This function frees resources allocated by rhashtable_walk_init.
+ */
+void rhashtable_walk_exit(struct rhashtable_iter *iter)
+{
+ mutex_lock(&iter->ht->mutex);
+ list_del(&iter->walker->list);
+ mutex_unlock(&iter->ht->mutex);
+ kfree(iter->walker);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
+
+/**
+ * rhashtable_walk_start - Start a hash table walk
+ * @iter: Hash table iterator
+ *
+ * Start a hash table walk. Note that we take the RCU lock in all
+ * cases including when we return an error. So you must always call
+ * rhashtable_walk_stop to clean up.
+ *
+ * Returns zero if successful.
+ *
+ * Returns -EAGAIN if resize event occured. Note that the iterator
+ * will rewind back to the beginning and you may use it immediately
+ * by calling rhashtable_walk_next.
+ */
+int rhashtable_walk_start(struct rhashtable_iter *iter)
+{
+ rcu_read_lock();
+
+ if (iter->walker->resize) {
+ iter->slot = 0;
+ iter->skip = 0;
+ iter->walker->resize = false;
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_start);
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occured. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+ const struct bucket_table *tbl;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+ void *obj = NULL;
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ if (p) {
+ p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
+ goto next;
+ }
+
+ for (; iter->slot < tbl->size; iter->slot++) {
+ int skip = iter->skip;
+
+ rht_for_each_rcu(p, tbl, iter->slot) {
+ if (!skip)
+ break;
+ skip--;
+ }
+
+next:
+ if (!rht_is_a_nulls(p)) {
+ iter->skip++;
+ iter->p = p;
+ obj = rht_obj(ht, p);
+ goto out;
+ }
+
+ iter->skip = 0;
+ }
+
+ iter->p = NULL;
+
+out:
+ if (iter->walker->resize) {
+ iter->p = NULL;
+ iter->slot = 0;
+ iter->skip = 0;
+ iter->walker->resize = false;
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_next);
+
+/**
+ * rhashtable_walk_stop - Finish a hash table walk
+ * @iter: Hash table iterator
+ *
+ * Finish a hash table walk.
+ */
+void rhashtable_walk_stop(struct rhashtable_iter *iter)
+{
+ rcu_read_unlock();
+ iter->p = NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
+
static size_t rounded_hashtable_size(struct rhashtable_params *params)
{
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
@@ -525,9 +1047,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
* .hashfn = jhash,
- * #ifdef CONFIG_PROVE_LOCKING
- * .mutex_is_held = &my_mutex_is_held,
- * #endif
+ * .nulls_base = (1U << RHT_BASE_SHIFT),
* };
*
* Configuration Example 2: Variable length keys
@@ -547,9 +1067,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
* .head_offset = offsetof(struct test_obj, node),
* .hashfn = jhash,
* .obj_hashfn = my_hash_fn,
- * #ifdef CONFIG_PROVE_LOCKING
- * .mutex_is_held = &my_mutex_is_held,
- * #endif
* };
*/
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
@@ -563,24 +1080,40 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
(!params->key_len && !params->obj_hashfn))
return -EINVAL;
+ if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
+ return -EINVAL;
+
params->min_shift = max_t(size_t, params->min_shift,
ilog2(HASH_MIN_SIZE));
if (params->nelem_hint)
size = rounded_hashtable_size(params);
- tbl = bucket_table_alloc(size);
+ memset(ht, 0, sizeof(*ht));
+ mutex_init(&ht->mutex);
+ memcpy(&ht->p, params, sizeof(*params));
+ INIT_LIST_HEAD(&ht->walkers);
+
+ if (params->locks_mul)
+ ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
+ else
+ ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
+
+ tbl = bucket_table_alloc(ht, size);
if (tbl == NULL)
return -ENOMEM;
- memset(ht, 0, sizeof(*ht));
- ht->shift = ilog2(tbl->size);
- memcpy(&ht->p, params, sizeof(*params));
+ atomic_set(&ht->nelems, 0);
+ atomic_set(&ht->shift, ilog2(tbl->size));
RCU_INIT_POINTER(ht->tbl, tbl);
+ RCU_INIT_POINTER(ht->future_tbl, tbl);
if (!ht->p.hash_rnd)
get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
+ if (ht->p.grow_decision || ht->p.shrink_decision)
+ INIT_WORK(&ht->run_work, rht_deferred_worker);
+
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);
@@ -593,216 +1126,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
* has to make sure that no resizing may happen by unpublishing the hashtable
* and waiting for the quiescent cycle before releasing the bucket array.
*/
-void rhashtable_destroy(const struct rhashtable *ht)
+void rhashtable_destroy(struct rhashtable *ht)
{
- bucket_table_free(ht->tbl);
-}
-EXPORT_SYMBOL_GPL(rhashtable_destroy);
-
-/**************************************************************************
- * Self Test
- **************************************************************************/
-
-#ifdef CONFIG_TEST_RHASHTABLE
+ ht->being_destroyed = true;
-#define TEST_HT_SIZE 8
-#define TEST_ENTRIES 2048
-#define TEST_PTR ((void *) 0xdeadbeef)
-#define TEST_NEXPANDS 4
+ if (ht->p.grow_decision || ht->p.shrink_decision)
+ cancel_work_sync(&ht->run_work);
-#ifdef CONFIG_PROVE_LOCKING
-static int test_mutex_is_held(void *parent)
-{
- return 1;
+ mutex_lock(&ht->mutex);
+ bucket_table_free(rht_dereference(ht->tbl, ht));
+ mutex_unlock(&ht->mutex);
}
-#endif
-
-struct test_obj {
- void *ptr;
- int value;
- struct rhash_head node;
-};
-
-static int __init test_rht_lookup(struct rhashtable *ht)
-{
- unsigned int i;
-
- for (i = 0; i < TEST_ENTRIES * 2; i++) {
- struct test_obj *obj;
- bool expected = !(i % 2);
- u32 key = i;
-
- obj = rhashtable_lookup(ht, &key);
-
- if (expected && !obj) {
- pr_warn("Test failed: Could not find key %u\n", key);
- return -ENOENT;
- } else if (!expected && obj) {
- pr_warn("Test failed: Unexpected entry found for key %u\n",
- key);
- return -EEXIST;
- } else if (expected && obj) {
- if (obj->ptr != TEST_PTR || obj->value != i) {
- pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
- obj->ptr, TEST_PTR, obj->value, i);
- return -EINVAL;
- }
- }
- }
-
- return 0;
-}
-
-static void test_bucket_stats(struct rhashtable *ht, bool quiet)
-{
- unsigned int cnt, rcu_cnt, i, total = 0;
- struct test_obj *obj;
- struct bucket_table *tbl;
-
- tbl = rht_dereference_rcu(ht->tbl, ht);
- for (i = 0; i < tbl->size; i++) {
- rcu_cnt = cnt = 0;
-
- if (!quiet)
- pr_info(" [%#4x/%zu]", i, tbl->size);
-
- rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
- cnt++;
- total++;
- if (!quiet)
- pr_cont(" [%p],", obj);
- }
-
- rht_for_each_entry_rcu(obj, tbl->buckets[i], node)
- rcu_cnt++;
-
- if (rcu_cnt != cnt)
- pr_warn("Test failed: Chain count mismach %d != %d",
- cnt, rcu_cnt);
-
- if (!quiet)
- pr_cont("\n [%#x] first element: %p, chain length: %u\n",
- i, tbl->buckets[i], cnt);
- }
-
- pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
- total, ht->nelems, TEST_ENTRIES);
-
- if (total != ht->nelems || total != TEST_ENTRIES)
- pr_warn("Test failed: Total count mismatch ^^^");
-}
-
-static int __init test_rhashtable(struct rhashtable *ht)
-{
- struct bucket_table *tbl;
- struct test_obj *obj, *next;
- int err;
- unsigned int i;
-
- /*
- * Insertion Test:
- * Insert TEST_ENTRIES into table with all keys even numbers
- */
- pr_info(" Adding %d keys\n", TEST_ENTRIES);
- for (i = 0; i < TEST_ENTRIES; i++) {
- struct test_obj *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj) {
- err = -ENOMEM;
- goto error;
- }
-
- obj->ptr = TEST_PTR;
- obj->value = i * 2;
-
- rhashtable_insert(ht, &obj->node);
- }
-
- rcu_read_lock();
- test_bucket_stats(ht, true);
- test_rht_lookup(ht);
- rcu_read_unlock();
-
- for (i = 0; i < TEST_NEXPANDS; i++) {
- pr_info(" Table expansion iteration %u...\n", i);
- rhashtable_expand(ht);
-
- rcu_read_lock();
- pr_info(" Verifying lookups...\n");
- test_rht_lookup(ht);
- rcu_read_unlock();
- }
-
- for (i = 0; i < TEST_NEXPANDS; i++) {
- pr_info(" Table shrinkage iteration %u...\n", i);
- rhashtable_shrink(ht);
-
- rcu_read_lock();
- pr_info(" Verifying lookups...\n");
- test_rht_lookup(ht);
- rcu_read_unlock();
- }
-
- rcu_read_lock();
- test_bucket_stats(ht, true);
- rcu_read_unlock();
-
- pr_info(" Deleting %d keys\n", TEST_ENTRIES);
- for (i = 0; i < TEST_ENTRIES; i++) {
- u32 key = i * 2;
-
- obj = rhashtable_lookup(ht, &key);
- BUG_ON(!obj);
-
- rhashtable_remove(ht, &obj->node);
- kfree(obj);
- }
-
- return 0;
-
-error:
- tbl = rht_dereference_rcu(ht->tbl, ht);
- for (i = 0; i < tbl->size; i++)
- rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
- kfree(obj);
-
- return err;
-}
-
-static int __init test_rht_init(void)
-{
- struct rhashtable ht;
- struct rhashtable_params params = {
- .nelem_hint = TEST_HT_SIZE,
- .head_offset = offsetof(struct test_obj, node),
- .key_offset = offsetof(struct test_obj, value),
- .key_len = sizeof(int),
- .hashfn = jhash,
-#ifdef CONFIG_PROVE_LOCKING
- .mutex_is_held = &test_mutex_is_held,
-#endif
- .grow_decision = rht_grow_above_75,
- .shrink_decision = rht_shrink_below_30,
- };
- int err;
-
- pr_info("Running resizable hashtable tests...\n");
-
- err = rhashtable_init(&ht, &params);
- if (err < 0) {
- pr_warn("Test failed: Unable to initialize hashtable: %d\n",
- err);
- return err;
- }
-
- err = test_rhashtable(&ht);
-
- rhashtable_destroy(&ht);
-
- return err;
-}
-
-subsys_initcall(test_rht_init);
-
-#endif /* CONFIG_TEST_RHASHTABLE */
+EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 4eedfedb9e31..88c0854bd752 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -91,42 +91,6 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
return ret;
}
-/**
- * seq_buf_bitmask - write a bitmask array in its ASCII representation
- * @s: seq_buf descriptor
- * @maskp: points to an array of unsigned longs that represent a bitmask
- * @nmaskbits: The number of bits that are valid in @maskp
- *
- * Writes a ASCII representation of a bitmask string into @s.
- *
- * Returns zero on success, -1 on overflow.
- */
-int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
- int nmaskbits)
-{
- unsigned int len = seq_buf_buffer_left(s);
- int ret;
-
- WARN_ON(s->size == 0);
-
- /*
- * Note, because bitmap_scnprintf() only returns the number of bytes
- * written and not the number that would be written, we use the last
- * byte of the buffer to let us know if we overflowed. There's a small
- * chance that the bitmap could have fit exactly inside the buffer, but
- * it's not that critical if that does happen.
- */
- if (len > 1) {
- ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits);
- if (ret < len) {
- s->len += ret;
- return 0;
- }
- }
- seq_buf_set_overflow(s);
- return -1;
-}
-
#ifdef CONFIG_BINARY_PRINTF
/**
* seq_buf_bprintf - Write the printf string from binary arguments
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 7de89f4a36cf..adc98e1825ba 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -6,7 +6,6 @@
*/
#include <linux/mm.h>
-#include <linux/nmi.h>
#include <linux/quicklist.h>
#include <linux/cma.h>
diff --git a/lib/sort.c b/lib/sort.c
index 926d00429ed2..43c9fe73ae2e 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -4,10 +4,9 @@
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/export.h>
#include <linux/sort.h>
-#include <linux/slab.h>
static void u32_swap(void *a, void *b, int size)
{
@@ -85,6 +84,7 @@ void sort(void *base, size_t num, size_t size,
EXPORT_SYMBOL(sort);
#if 0
+#include <linux/slab.h>
/* a simple boot-time regression test */
int cmpint(const void *a, const void *b)
diff --git a/lib/stmp_device.c b/lib/stmp_device.c
index 8ac9bcc4289a..a904656f4fd7 100644
--- a/lib/stmp_device.c
+++ b/lib/stmp_device.c
@@ -15,7 +15,8 @@
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
#include <linux/stmp_device.h>
#define STMP_MODULE_CLKGATE (1 << 30)
diff --git a/lib/string.c b/lib/string.c
index 10063300b830..ce81aaec3839 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -58,14 +58,6 @@ int strncasecmp(const char *s1, const char *s2, size_t len)
}
EXPORT_SYMBOL(strncasecmp);
#endif
-#ifndef __HAVE_ARCH_STRNICMP
-#undef strnicmp
-int strnicmp(const char *s1, const char *s2, size_t len)
-{
- return strncasecmp(s1, s2, len);
-}
-EXPORT_SYMBOL(strnicmp);
-#endif
#ifndef __HAVE_ARCH_STRCASECMP
int strcasecmp(const char *s1, const char *s2)
@@ -321,12 +313,12 @@ EXPORT_SYMBOL(strchrnul);
*/
char *strrchr(const char *s, int c)
{
- const char *p = s + strlen(s);
- do {
- if (*p == (char)c)
- return (char *)p;
- } while (--p >= s);
- return NULL;
+ const char *last = NULL;
+ do {
+ if (*s == (char)c)
+ last = s;
+ } while (*s++);
+ return (char *)last;
}
EXPORT_SYMBOL(strrchr);
#endif
@@ -604,6 +596,11 @@ EXPORT_SYMBOL(memset);
* @s: Pointer to the start of the area.
* @count: The size of the area.
*
+ * Note: usually using memset() is just fine (!), but in cases
+ * where clearing out _local_ data at the end of a scope is
+ * necessary, memzero_explicit() should be used instead in
+ * order to prevent the compiler from optimising away zeroing.
+ *
* memzero_explicit() doesn't need an arch-specific version as
* it just invokes the one of memset() implicitly.
*/
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 58b78ba57439..8f8c4417f228 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -20,19 +20,18 @@
* @len: length of buffer
*
* This function returns a string formatted to 3 significant figures
- * giving the size in the required units. Returns 0 on success or
- * error on failure. @buf is always zero terminated.
+ * giving the size in the required units. @buf should have room for
+ * at least 9 bytes and will always be zero terminated.
*
*/
-int string_get_size(u64 size, const enum string_size_units units,
- char *buf, int len)
+void string_get_size(u64 size, const enum string_size_units units,
+ char *buf, int len)
{
static const char *const units_10[] = {
- "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", NULL
+ "B", "kB", "MB", "GB", "TB", "PB", "EB"
};
static const char *const units_2[] = {
- "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
- NULL
+ "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
@@ -43,13 +42,13 @@ int string_get_size(u64 size, const enum string_size_units units,
[STRING_UNITS_2] = 1024,
};
int i, j;
- u64 remainder = 0, sf_cap;
+ u32 remainder = 0, sf_cap;
char tmp[8];
tmp[0] = '\0';
i = 0;
if (size >= divisor[units]) {
- while (size >= divisor[units] && units_str[units][i]) {
+ while (size >= divisor[units]) {
remainder = do_div(size, divisor[units]);
i++;
}
@@ -60,17 +59,14 @@ int string_get_size(u64 size, const enum string_size_units units,
if (j) {
remainder *= 1000;
- do_div(remainder, divisor[units]);
- snprintf(tmp, sizeof(tmp), ".%03lld",
- (unsigned long long)remainder);
+ remainder /= divisor[units];
+ snprintf(tmp, sizeof(tmp), ".%03u", remainder);
tmp[j+1] = '\0';
}
}
- snprintf(buf, len, "%lld%s %s", (unsigned long long)size,
+ snprintf(buf, len, "%u%s %s", (u32)size,
tmp, units_str[units][i]);
-
- return 0;
}
EXPORT_SYMBOL(string_get_size);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index bb2b201d6ad0..e0af6ff73d14 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c
new file mode 100644
index 000000000000..daf29a390a89
--- /dev/null
+++ b/lib/test-hexdump.c
@@ -0,0 +1,180 @@
+/*
+ * Test cases for lib/hexdump.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+static const unsigned char data_b[] = {
+ '\xbe', '\x32', '\xdb', '\x7b', '\x0a', '\x18', '\x93', '\xb2', /* 00 - 07 */
+ '\x70', '\xba', '\xc4', '\x24', '\x7d', '\x83', '\x34', '\x9b', /* 08 - 0f */
+ '\xa6', '\x9c', '\x31', '\xad', '\x9c', '\x0f', '\xac', '\xe9', /* 10 - 17 */
+ '\x4c', '\xd1', '\x19', '\x99', '\x43', '\xb1', '\xaf', '\x0c', /* 18 - 1f */
+};
+
+static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
+
+static const char *test_data_1_le[] __initconst = {
+ "be", "32", "db", "7b", "0a", "18", "93", "b2",
+ "70", "ba", "c4", "24", "7d", "83", "34", "9b",
+ "a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
+ "4c", "d1", "19", "99", "43", "b1", "af", "0c",
+};
+
+static const char *test_data_2_le[] __initconst = {
+ "32be", "7bdb", "180a", "b293",
+ "ba70", "24c4", "837d", "9b34",
+ "9ca6", "ad31", "0f9c", "e9ac",
+ "d14c", "9919", "b143", "0caf",
+};
+
+static const char *test_data_4_le[] __initconst = {
+ "7bdb32be", "b293180a", "24c4ba70", "9b34837d",
+ "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
+};
+
+static const char *test_data_8_le[] __initconst = {
+ "b293180a7bdb32be", "9b34837d24c4ba70",
+ "e9ac0f9cad319ca6", "0cafb1439919d14c",
+};
+
+static void __init test_hexdump(size_t len, int rowsize, int groupsize,
+ bool ascii)
+{
+ char test[32 * 3 + 2 + 32 + 1];
+ char real[32 * 3 + 2 + 32 + 1];
+ char *p;
+ const char **result;
+ size_t l = len;
+ int gs = groupsize, rs = rowsize;
+ unsigned int i;
+
+ hex_dump_to_buffer(data_b, l, rs, gs, real, sizeof(real), ascii);
+
+ if (rs != 16 && rs != 32)
+ rs = 16;
+
+ if (l > rs)
+ l = rs;
+
+ if (!is_power_of_2(gs) || gs > 8 || (len % gs != 0))
+ gs = 1;
+
+ if (gs == 8)
+ result = test_data_8_le;
+ else if (gs == 4)
+ result = test_data_4_le;
+ else if (gs == 2)
+ result = test_data_2_le;
+ else
+ result = test_data_1_le;
+
+ memset(test, ' ', sizeof(test));
+
+ /* hex dump */
+ p = test;
+ for (i = 0; i < l / gs; i++) {
+ const char *q = *result++;
+ size_t amount = strlen(q);
+
+ strncpy(p, q, amount);
+ p += amount + 1;
+ }
+ if (i)
+ p--;
+
+ /* ASCII part */
+ if (ascii) {
+ p = test + rs * 2 + rs / gs + 1;
+ strncpy(p, data_a, l);
+ p += l;
+ }
+
+ *p = '\0';
+
+ if (strcmp(test, real)) {
+ pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize);
+ pr_err("Result: '%s'\n", real);
+ pr_err("Expect: '%s'\n", test);
+ }
+}
+
+static void __init test_hexdump_set(int rowsize, bool ascii)
+{
+ size_t d = min_t(size_t, sizeof(data_b), rowsize);
+ size_t len = get_random_int() % d + 1;
+
+ test_hexdump(len, rowsize, 4, ascii);
+ test_hexdump(len, rowsize, 2, ascii);
+ test_hexdump(len, rowsize, 8, ascii);
+ test_hexdump(len, rowsize, 1, ascii);
+}
+
+static void __init test_hexdump_overflow(bool ascii)
+{
+ char buf[56];
+ const char *t = test_data_1_le[0];
+ size_t l = get_random_int() % sizeof(buf);
+ bool a;
+ int e, r;
+
+ memset(buf, ' ', sizeof(buf));
+
+ r = hex_dump_to_buffer(data_b, 1, 16, 1, buf, l, ascii);
+
+ if (ascii)
+ e = 50;
+ else
+ e = 2;
+ buf[e + 2] = '\0';
+
+ if (!l) {
+ a = r == e && buf[0] == ' ';
+ } else if (l < 3) {
+ a = r == e && buf[0] == '\0';
+ } else if (l < 4) {
+ a = r == e && !strcmp(buf, t);
+ } else if (ascii) {
+ if (l < 51)
+ a = r == e && buf[l - 1] == '\0' && buf[l - 2] == ' ';
+ else
+ a = r == e && buf[50] == '\0' && buf[49] == '.';
+ } else {
+ a = r == e && buf[e] == '\0';
+ }
+
+ if (!a) {
+ pr_err("Len: %zu rc: %u strlen: %zu\n", l, r, strlen(buf));
+ pr_err("Result: '%s'\n", buf);
+ }
+}
+
+static int __init test_hexdump_init(void)
+{
+ unsigned int i;
+ int rowsize;
+
+ pr_info("Running tests...\n");
+
+ rowsize = (get_random_int() % 2 + 1) * 16;
+ for (i = 0; i < 16; i++)
+ test_hexdump_set(rowsize, false);
+
+ rowsize = (get_random_int() % 2 + 1) * 16;
+ for (i = 0; i < 16; i++)
+ test_hexdump_set(rowsize, true);
+
+ for (i = 0; i < 16; i++)
+ test_hexdump_overflow(false);
+
+ for (i = 0; i < 16; i++)
+ test_hexdump_overflow(true);
+
+ return -EINVAL;
+}
+module_init(test_hexdump_init);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
new file mode 100644
index 000000000000..098c08eddfab
--- /dev/null
+++ b/lib/test_kasan.c
@@ -0,0 +1,277 @@
+/*
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+static noinline void __init kmalloc_oob_right(void)
+{
+ char *ptr;
+ size_t size = 123;
+
+ pr_info("out-of-bounds to right\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ ptr[size] = 'x';
+ kfree(ptr);
+}
+
+static noinline void __init kmalloc_oob_left(void)
+{
+ char *ptr;
+ size_t size = 15;
+
+ pr_info("out-of-bounds to left\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ *ptr = *(ptr - 1);
+ kfree(ptr);
+}
+
+static noinline void __init kmalloc_node_oob_right(void)
+{
+ char *ptr;
+ size_t size = 4096;
+
+ pr_info("kmalloc_node(): out-of-bounds to right\n");
+ ptr = kmalloc_node(size, GFP_KERNEL, 0);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ ptr[size] = 0;
+ kfree(ptr);
+}
+
+static noinline void __init kmalloc_large_oob_rigth(void)
+{
+ char *ptr;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
+
+ pr_info("kmalloc large allocation: out-of-bounds to right\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ ptr[size] = 0;
+ kfree(ptr);
+}
+
+static noinline void __init kmalloc_oob_krealloc_more(void)
+{
+ char *ptr1, *ptr2;
+ size_t size1 = 17;
+ size_t size2 = 19;
+
+ pr_info("out-of-bounds after krealloc more\n");
+ ptr1 = kmalloc(size1, GFP_KERNEL);
+ ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
+ if (!ptr1 || !ptr2) {
+ pr_err("Allocation failed\n");
+ kfree(ptr1);
+ return;
+ }
+
+ ptr2[size2] = 'x';
+ kfree(ptr2);
+}
+
+static noinline void __init kmalloc_oob_krealloc_less(void)
+{
+ char *ptr1, *ptr2;
+ size_t size1 = 17;
+ size_t size2 = 15;
+
+ pr_info("out-of-bounds after krealloc less\n");
+ ptr1 = kmalloc(size1, GFP_KERNEL);
+ ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
+ if (!ptr1 || !ptr2) {
+ pr_err("Allocation failed\n");
+ kfree(ptr1);
+ return;
+ }
+ ptr2[size1] = 'x';
+ kfree(ptr2);
+}
+
+static noinline void __init kmalloc_oob_16(void)
+{
+ struct {
+ u64 words[2];
+ } *ptr1, *ptr2;
+
+ pr_info("kmalloc out-of-bounds for 16-bytes access\n");
+ ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
+ ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
+ if (!ptr1 || !ptr2) {
+ pr_err("Allocation failed\n");
+ kfree(ptr1);
+ kfree(ptr2);
+ return;
+ }
+ *ptr1 = *ptr2;
+ kfree(ptr1);
+ kfree(ptr2);
+}
+
+static noinline void __init kmalloc_oob_in_memset(void)
+{
+ char *ptr;
+ size_t size = 666;
+
+ pr_info("out-of-bounds in memset\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ memset(ptr, 0, size+5);
+ kfree(ptr);
+}
+
+static noinline void __init kmalloc_uaf(void)
+{
+ char *ptr;
+ size_t size = 10;
+
+ pr_info("use-after-free\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kfree(ptr);
+ *(ptr + 8) = 'x';
+}
+
+static noinline void __init kmalloc_uaf_memset(void)
+{
+ char *ptr;
+ size_t size = 33;
+
+ pr_info("use-after-free in memset\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kfree(ptr);
+ memset(ptr, 0, size);
+}
+
+static noinline void __init kmalloc_uaf2(void)
+{
+ char *ptr1, *ptr2;
+ size_t size = 43;
+
+ pr_info("use-after-free after another kmalloc\n");
+ ptr1 = kmalloc(size, GFP_KERNEL);
+ if (!ptr1) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kfree(ptr1);
+ ptr2 = kmalloc(size, GFP_KERNEL);
+ if (!ptr2) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ ptr1[40] = 'x';
+ kfree(ptr2);
+}
+
+static noinline void __init kmem_cache_oob(void)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache = kmem_cache_create("test_cache",
+ size, 0,
+ 0, NULL);
+ if (!cache) {
+ pr_err("Cache allocation failed\n");
+ return;
+ }
+ pr_info("out-of-bounds in kmem_cache_alloc\n");
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ pr_err("Allocation failed\n");
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ *p = p[size];
+ kmem_cache_free(cache, p);
+ kmem_cache_destroy(cache);
+}
+
+static char global_array[10];
+
+static noinline void __init kasan_global_oob(void)
+{
+ volatile int i = 3;
+ char *p = &global_array[ARRAY_SIZE(global_array) + i];
+
+ pr_info("out-of-bounds global variable\n");
+ *(volatile char *)p;
+}
+
+static noinline void __init kasan_stack_oob(void)
+{
+ char stack_array[10];
+ volatile int i = 0;
+ char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
+
+ pr_info("out-of-bounds on stack\n");
+ *(volatile char *)p;
+}
+
+static int __init kmalloc_tests_init(void)
+{
+ kmalloc_oob_right();
+ kmalloc_oob_left();
+ kmalloc_node_oob_right();
+ kmalloc_large_oob_rigth();
+ kmalloc_oob_krealloc_more();
+ kmalloc_oob_krealloc_less();
+ kmalloc_oob_16();
+ kmalloc_oob_in_memset();
+ kmalloc_uaf();
+ kmalloc_uaf_memset();
+ kmalloc_uaf2();
+ kmem_cache_oob();
+ kasan_stack_oob();
+ kasan_global_oob();
+ return -EAGAIN;
+}
+
+module_init(kmalloc_tests_init);
+MODULE_LICENSE("GPL");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
new file mode 100644
index 000000000000..1dfeba73fc74
--- /dev/null
+++ b/lib/test_rhashtable.c
@@ -0,0 +1,227 @@
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on the following paper:
+ * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
+ *
+ * Code partially derived from nft_hash
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/**************************************************************************
+ * Self Test
+ **************************************************************************/
+
+#include <linux/init.h>
+#include <linux/jhash.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rcupdate.h>
+#include <linux/rhashtable.h>
+#include <linux/slab.h>
+
+
+#define TEST_HT_SIZE 8
+#define TEST_ENTRIES 2048
+#define TEST_PTR ((void *) 0xdeadbeef)
+#define TEST_NEXPANDS 4
+
+struct test_obj {
+ void *ptr;
+ int value;
+ struct rhash_head node;
+};
+
+static int __init test_rht_lookup(struct rhashtable *ht)
+{
+ unsigned int i;
+
+ for (i = 0; i < TEST_ENTRIES * 2; i++) {
+ struct test_obj *obj;
+ bool expected = !(i % 2);
+ u32 key = i;
+
+ obj = rhashtable_lookup(ht, &key);
+
+ if (expected && !obj) {
+ pr_warn("Test failed: Could not find key %u\n", key);
+ return -ENOENT;
+ } else if (!expected && obj) {
+ pr_warn("Test failed: Unexpected entry found for key %u\n",
+ key);
+ return -EEXIST;
+ } else if (expected && obj) {
+ if (obj->ptr != TEST_PTR || obj->value != i) {
+ pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
+ obj->ptr, TEST_PTR, obj->value, i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void test_bucket_stats(struct rhashtable *ht, bool quiet)
+{
+ unsigned int cnt, rcu_cnt, i, total = 0;
+ struct rhash_head *pos;
+ struct test_obj *obj;
+ struct bucket_table *tbl;
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ for (i = 0; i < tbl->size; i++) {
+ rcu_cnt = cnt = 0;
+
+ if (!quiet)
+ pr_info(" [%#4x/%zu]", i, tbl->size);
+
+ rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
+ cnt++;
+ total++;
+ if (!quiet)
+ pr_cont(" [%p],", obj);
+ }
+
+ rht_for_each_entry_rcu(obj, pos, tbl, i, node)
+ rcu_cnt++;
+
+ if (rcu_cnt != cnt)
+ pr_warn("Test failed: Chain count mismach %d != %d",
+ cnt, rcu_cnt);
+
+ if (!quiet)
+ pr_cont("\n [%#x] first element: %p, chain length: %u\n",
+ i, tbl->buckets[i], cnt);
+ }
+
+ pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n",
+ total, atomic_read(&ht->nelems), TEST_ENTRIES);
+
+ if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
+ pr_warn("Test failed: Total count mismatch ^^^");
+}
+
+static int __init test_rhashtable(struct rhashtable *ht)
+{
+ struct bucket_table *tbl;
+ struct test_obj *obj;
+ struct rhash_head *pos, *next;
+ int err;
+ unsigned int i;
+
+ /*
+ * Insertion Test:
+ * Insert TEST_ENTRIES into table with all keys even numbers
+ */
+ pr_info(" Adding %d keys\n", TEST_ENTRIES);
+ for (i = 0; i < TEST_ENTRIES; i++) {
+ struct test_obj *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ obj->ptr = TEST_PTR;
+ obj->value = i * 2;
+
+ rhashtable_insert(ht, &obj->node);
+ }
+
+ rcu_read_lock();
+ test_bucket_stats(ht, true);
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+
+ for (i = 0; i < TEST_NEXPANDS; i++) {
+ pr_info(" Table expansion iteration %u...\n", i);
+ mutex_lock(&ht->mutex);
+ rhashtable_expand(ht);
+ mutex_unlock(&ht->mutex);
+
+ rcu_read_lock();
+ pr_info(" Verifying lookups...\n");
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+ }
+
+ for (i = 0; i < TEST_NEXPANDS; i++) {
+ pr_info(" Table shrinkage iteration %u...\n", i);
+ mutex_lock(&ht->mutex);
+ rhashtable_shrink(ht);
+ mutex_unlock(&ht->mutex);
+
+ rcu_read_lock();
+ pr_info(" Verifying lookups...\n");
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+ }
+
+ rcu_read_lock();
+ test_bucket_stats(ht, true);
+ rcu_read_unlock();
+
+ pr_info(" Deleting %d keys\n", TEST_ENTRIES);
+ for (i = 0; i < TEST_ENTRIES; i++) {
+ u32 key = i * 2;
+
+ obj = rhashtable_lookup(ht, &key);
+ BUG_ON(!obj);
+
+ rhashtable_remove(ht, &obj->node);
+ kfree(obj);
+ }
+
+ return 0;
+
+error:
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
+ kfree(obj);
+
+ return err;
+}
+
+static int __init test_rht_init(void)
+{
+ struct rhashtable ht;
+ struct rhashtable_params params = {
+ .nelem_hint = TEST_HT_SIZE,
+ .head_offset = offsetof(struct test_obj, node),
+ .key_offset = offsetof(struct test_obj, value),
+ .key_len = sizeof(int),
+ .hashfn = jhash,
+ .nulls_base = (3U << RHT_BASE_SHIFT),
+ .grow_decision = rht_grow_above_75,
+ .shrink_decision = rht_shrink_below_30,
+ };
+ int err;
+
+ pr_info("Running resizable hashtable tests...\n");
+
+ err = rhashtable_init(&ht, &params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ return err;
+ }
+
+ err = test_rhashtable(&ht);
+
+ rhashtable_destroy(&ht);
+
+ return err;
+}
+
+module_init(test_rht_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index ec337f64f52d..b235c96167d3 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -114,8 +114,9 @@ int skip_atoi(const char **s)
{
int i = 0;
- while (isdigit(**s))
+ do {
i = i*10 + *((*s)++) - '0';
+ } while (isdigit(**s));
return i;
}
@@ -793,6 +794,87 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
}
static noinline_for_stack
+char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
+ struct printf_spec spec, const char *fmt)
+{
+ const int CHUNKSZ = 32;
+ int nr_bits = max_t(int, spec.field_width, 0);
+ int i, chunksz;
+ bool first = true;
+
+ /* reused to print numbers */
+ spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 };
+
+ chunksz = nr_bits & (CHUNKSZ - 1);
+ if (chunksz == 0)
+ chunksz = CHUNKSZ;
+
+ i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ;
+ for (; i >= 0; i -= CHUNKSZ) {
+ u32 chunkmask, val;
+ int word, bit;
+
+ chunkmask = ((1ULL << chunksz) - 1);
+ word = i / BITS_PER_LONG;
+ bit = i % BITS_PER_LONG;
+ val = (bitmap[word] >> bit) & chunkmask;
+
+ if (!first) {
+ if (buf < end)
+ *buf = ',';
+ buf++;
+ }
+ first = false;
+
+ spec.field_width = DIV_ROUND_UP(chunksz, 4);
+ buf = number(buf, end, val, spec);
+
+ chunksz = CHUNKSZ;
+ }
+ return buf;
+}
+
+static noinline_for_stack
+char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
+ struct printf_spec spec, const char *fmt)
+{
+ int nr_bits = max_t(int, spec.field_width, 0);
+ /* current bit is 'cur', most recently seen range is [rbot, rtop] */
+ int cur, rbot, rtop;
+ bool first = true;
+
+ /* reused to print numbers */
+ spec = (struct printf_spec){ .base = 10 };
+
+ rbot = cur = find_first_bit(bitmap, nr_bits);
+ while (cur < nr_bits) {
+ rtop = cur;
+ cur = find_next_bit(bitmap, nr_bits, cur + 1);
+ if (cur < nr_bits && cur <= rtop + 1)
+ continue;
+
+ if (!first) {
+ if (buf < end)
+ *buf = ',';
+ buf++;
+ }
+ first = false;
+
+ buf = number(buf, end, rbot, spec);
+ if (rbot < rtop) {
+ if (buf < end)
+ *buf = '-';
+ buf++;
+
+ buf = number(buf, end, rtop, spec);
+ }
+
+ rbot = cur;
+ }
+ return buf;
+}
+
+static noinline_for_stack
char *mac_address_string(char *buf, char *end, u8 *addr,
struct printf_spec spec, const char *fmt)
{
@@ -1257,6 +1339,10 @@ int kptr_restrict __read_mostly;
* - 'B' For backtraced symbolic direct pointers with offset
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
+ * - 'b[l]' For a bitmap, the number of bits is determined by the field
+ * width which must be explicitly specified either as part of the
+ * format string '%32b[l]' or through '%*b[l]', [l] selects
+ * range-list format instead of hex format
* - 'M' For a 6-byte MAC address, it prints the address in the
* usual colon-separated hex notation
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
@@ -1353,6 +1439,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return resource_string(buf, end, ptr, spec, fmt);
case 'h':
return hex_string(buf, end, ptr, spec, fmt);
+ case 'b':
+ switch (fmt[1]) {
+ case 'l':
+ return bitmap_list_string(buf, end, ptr, spec, fmt);
+ default:
+ return bitmap_string(buf, end, ptr, spec, fmt);
+ }
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
/* [mM]F (FDDI) */
@@ -1604,8 +1697,7 @@ qualifier:
case 'p':
spec->type = FORMAT_TYPE_PTR;
- return fmt - start;
- /* skip alnum */
+ return ++fmt - start;
case '%':
spec->type = FORMAT_TYPE_PERCENT_CHAR;
@@ -1689,6 +1781,8 @@ qualifier:
* %pB output the name of a backtrace symbol with its offset
* %pR output the address range in a struct resource with decoded flags
* %pr output the address range in a struct resource with raw flags
+ * %pb output the bitmap with field width as the number of bits
+ * %pbl output the bitmap as range list with field width as the number of bits
* %pM output a 6-byte MAC address with colons
* %pMR output a 6-byte MAC address with colons in reversed order
* %pMF output a 6-byte MAC address with dashes
@@ -1728,7 +1822,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
- if (WARN_ON_ONCE((int) size < 0))
+ if (WARN_ON_ONCE(size > INT_MAX))
return 0;
str = buf;
@@ -1794,7 +1888,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
break;
case FORMAT_TYPE_PTR:
- str = pointer(fmt+1, str, end, va_arg(args, void *),
+ str = pointer(fmt, str, end, va_arg(args, void *),
spec);
while (isalnum(*fmt))
fmt++;
@@ -2232,7 +2326,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
}
case FORMAT_TYPE_PTR:
- str = pointer(fmt+1, str, end, get_arg(void *), spec);
+ str = pointer(fmt, str, end, get_arg(void *), spec);
while (isalnum(*fmt))
fmt++;
break;
diff --git a/mm/Kconfig b/mm/Kconfig
index 1d1ae6b078fd..de5239c152f9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -325,6 +325,7 @@ config VIRT_TO_BUS
config MMU_NOTIFIER
bool
+ select SRCU
config KSM
bool "Enable KSM for page merging"
@@ -601,6 +602,16 @@ config PGTABLE_MAPPING
You can check speed with zsmalloc benchmark:
https://github.com/spartacus06/zsmapbench
+config ZSMALLOC_STAT
+ bool "Export zsmalloc statistics"
+ depends on ZSMALLOC
+ select DEBUG_FS
+ help
+ This option enables code in the zsmalloc to collect various
+ statistics about whats happening in zsmalloc and exports that
+ information to userspace via debugfs.
+ If unsure, say N.
+
config GENERIC_EARLY_IOREMAP
bool
diff --git a/mm/Makefile b/mm/Makefile
index 4bf586e66378..3c1caa2693bd 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -2,8 +2,11 @@
# Makefile for the linux memory manager.
#
+KASAN_SANITIZE_slab_common.o := n
+KASAN_SANITIZE_slub.o := n
+
mmu-y := nommu.o
-mmu-$(CONFIG_MMU) := fremap.o gup.o highmem.o memory.o mincore.o \
+mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
vmalloc.o pagewalk.o pgtable-generic.o
@@ -49,9 +52,9 @@ obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
+obj-$(CONFIG_KASAN) += kasan/
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
-obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 0ae0df55000b..7690ec77c722 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -14,19 +14,10 @@
static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
-struct backing_dev_info default_backing_dev_info = {
- .name = "default",
- .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
- .state = 0,
- .capabilities = BDI_CAP_MAP_COPY,
-};
-EXPORT_SYMBOL_GPL(default_backing_dev_info);
-
struct backing_dev_info noop_backing_dev_info = {
.name = "noop",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
-EXPORT_SYMBOL_GPL(noop_backing_dev_info);
static struct class *bdi_class;
@@ -40,17 +31,6 @@ LIST_HEAD(bdi_list);
/* bdi_wq serves all asynchronous writeback tasks */
struct workqueue_struct *bdi_wq;
-static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
-{
- if (wb1 < wb2) {
- spin_lock(&wb1->list_lock);
- spin_lock_nested(&wb2->list_lock, 1);
- } else {
- spin_lock(&wb2->list_lock);
- spin_lock_nested(&wb1->list_lock, 1);
- }
-}
-
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -264,9 +244,6 @@ static int __init default_bdi_init(void)
if (!bdi_wq)
return -ENOMEM;
- err = bdi_init(&default_backing_dev_info);
- if (!err)
- bdi_register(&default_backing_dev_info, NULL, "default");
err = bdi_init(&noop_backing_dev_info);
return err;
@@ -355,19 +332,19 @@ EXPORT_SYMBOL(bdi_register_dev);
*/
static void bdi_wb_shutdown(struct backing_dev_info *bdi)
{
- if (!bdi_cap_writeback_dirty(bdi))
+ /* Make sure nobody queues further work */
+ spin_lock_bh(&bdi->wb_lock);
+ if (!test_and_clear_bit(BDI_registered, &bdi->state)) {
+ spin_unlock_bh(&bdi->wb_lock);
return;
+ }
+ spin_unlock_bh(&bdi->wb_lock);
/*
* Make sure nobody finds us on the bdi_list anymore
*/
bdi_remove_from_list(bdi);
- /* Make sure nobody queues further work */
- spin_lock_bh(&bdi->wb_lock);
- clear_bit(BDI_registered, &bdi->state);
- spin_unlock_bh(&bdi->wb_lock);
-
/*
* Drain work list and shutdown the delayed_work. At this point,
* @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
@@ -375,37 +352,22 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
*/
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
flush_delayed_work(&bdi->wb.dwork);
- WARN_ON(!list_empty(&bdi->work_list));
- WARN_ON(delayed_work_pending(&bdi->wb.dwork));
}
/*
- * This bdi is going away now, make sure that no super_blocks point to it
+ * Called when the device behind @bdi has been removed or ejected.
+ *
+ * We can't really do much here except for reducing the dirty ratio at
+ * the moment. In the future we should be able to set a flag so that
+ * the filesystem can handle errors at mark_inode_dirty time instead
+ * of only at writeback time.
*/
-static void bdi_prune_sb(struct backing_dev_info *bdi)
-{
- struct super_block *sb;
-
- spin_lock(&sb_lock);
- list_for_each_entry(sb, &super_blocks, s_list) {
- if (sb->s_bdi == bdi)
- sb->s_bdi = &default_backing_dev_info;
- }
- spin_unlock(&sb_lock);
-}
-
void bdi_unregister(struct backing_dev_info *bdi)
{
- if (bdi->dev) {
- bdi_set_min_ratio(bdi, 0);
- trace_writeback_bdi_unregister(bdi);
- bdi_prune_sb(bdi);
+ if (WARN_ON_ONCE(!bdi->dev))
+ return;
- bdi_wb_shutdown(bdi);
- bdi_debug_unregister(bdi);
- device_unregister(bdi->dev);
- bdi->dev = NULL;
- }
+ bdi_set_min_ratio(bdi, 0);
}
EXPORT_SYMBOL(bdi_unregister);
@@ -474,37 +436,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
{
int i;
- /*
- * Splice our entries to the default_backing_dev_info. This
- * condition shouldn't happen. @wb must be empty at this point and
- * dirty inodes on it might cause other issues. This workaround is
- * added by ce5f8e779519 ("writeback: splice dirty inode entries to
- * default bdi on bdi_destroy()") without root-causing the issue.
- *
- * http://lkml.kernel.org/g/1253038617-30204-11-git-send-email-jens.axboe@oracle.com
- * http://thread.gmane.org/gmane.linux.file-systems/35341/focus=35350
- *
- * We should probably add WARN_ON() to find out whether it still
- * happens and track it down if so.
- */
- if (bdi_has_dirty_io(bdi)) {
- struct bdi_writeback *dst = &default_backing_dev_info.wb;
-
- bdi_lock_two(&bdi->wb, dst);
- list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
- list_splice(&bdi->wb.b_io, &dst->b_io);
- list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
- spin_unlock(&bdi->wb.list_lock);
- spin_unlock(&dst->list_lock);
- }
-
- bdi_unregister(bdi);
+ bdi_wb_shutdown(bdi);
+ WARN_ON(!list_empty(&bdi->work_list));
WARN_ON(delayed_work_pending(&bdi->wb.dwork));
+ if (bdi->dev) {
+ bdi_debug_unregister(bdi);
+ device_unregister(bdi->dev);
+ bdi->dev = NULL;
+ }
+
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
percpu_counter_destroy(&bdi->bdi_stat[i]);
-
fprop_local_destroy_percpu(&bdi->completions);
}
EXPORT_SYMBOL(bdi_destroy);
@@ -513,13 +457,12 @@ EXPORT_SYMBOL(bdi_destroy);
* For use from filesystems to quickly init and register a bdi associated
* with dirty writeback
*/
-int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
- unsigned int cap)
+int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
{
int err;
bdi->name = name;
- bdi->capabilities = cap;
+ bdi->capabilities = 0;
err = bdi_init(bdi);
if (err)
return err;
diff --git a/mm/cleancache.c b/mm/cleancache.c
index d0eac4350403..053bcd8f12fb 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -25,7 +25,7 @@
static struct cleancache_ops *cleancache_ops __read_mostly;
/*
- * Counters available via /sys/kernel/debug/frontswap (if debugfs is
+ * Counters available via /sys/kernel/debug/cleancache (if debugfs is
* properly configured. These are for information only so are not protected
* against increment races.
*/
diff --git a/mm/cma.c b/mm/cma.c
index a85ae28709a3..75016fd1de90 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -199,6 +199,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
cma->order_per_bit = order_per_bit;
*res_cma = cma;
cma_area_count++;
+ totalcma_pages += (size / PAGE_SIZE);
return 0;
}
@@ -337,7 +338,6 @@ int __init cma_declare_contiguous(phys_addr_t base,
if (ret)
goto err;
- totalcma_pages += (size / PAGE_SIZE);
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
&base);
return 0;
diff --git a/mm/compaction.c b/mm/compaction.c
index 546e571e9d60..8c0d9459b54a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,6 +16,7 @@
#include <linux/sysfs.h>
#include <linux/balloon_compaction.h>
#include <linux/page-isolation.h>
+#include <linux/kasan.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -34,6 +35,17 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#endif
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+#ifdef CONFIG_TRACEPOINTS
+static const char *const compaction_status_string[] = {
+ "deferred",
+ "skipped",
+ "continue",
+ "partial",
+ "complete",
+ "no_suitable_page",
+ "not_suitable_zone",
+};
+#endif
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
@@ -61,6 +73,7 @@ static void map_pages(struct list_head *list)
list_for_each_entry(page, list, lru) {
arch_alloc_page(page, 0);
kernel_map_pages(page, 1, 1);
+ kasan_alloc_pages(page, 0);
}
}
@@ -113,6 +126,77 @@ static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
}
#ifdef CONFIG_COMPACTION
+
+/* Do not skip compaction more than 64 times */
+#define COMPACT_MAX_DEFER_SHIFT 6
+
+/*
+ * Compaction is deferred when compaction fails to result in a page
+ * allocation success. 1 << compact_defer_limit compactions are skipped up
+ * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
+ */
+void defer_compaction(struct zone *zone, int order)
+{
+ zone->compact_considered = 0;
+ zone->compact_defer_shift++;
+
+ if (order < zone->compact_order_failed)
+ zone->compact_order_failed = order;
+
+ if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
+ zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
+
+ trace_mm_compaction_defer_compaction(zone, order);
+}
+
+/* Returns true if compaction should be skipped this time */
+bool compaction_deferred(struct zone *zone, int order)
+{
+ unsigned long defer_limit = 1UL << zone->compact_defer_shift;
+
+ if (order < zone->compact_order_failed)
+ return false;
+
+ /* Avoid possible overflow */
+ if (++zone->compact_considered > defer_limit)
+ zone->compact_considered = defer_limit;
+
+ if (zone->compact_considered >= defer_limit)
+ return false;
+
+ trace_mm_compaction_deferred(zone, order);
+
+ return true;
+}
+
+/*
+ * Update defer tracking counters after successful compaction of given order,
+ * which means an allocation either succeeded (alloc_success == true) or is
+ * expected to succeed.
+ */
+void compaction_defer_reset(struct zone *zone, int order,
+ bool alloc_success)
+{
+ if (alloc_success) {
+ zone->compact_considered = 0;
+ zone->compact_defer_shift = 0;
+ }
+ if (order >= zone->compact_order_failed)
+ zone->compact_order_failed = order + 1;
+
+ trace_mm_compaction_defer_reset(zone, order);
+}
+
+/* Returns true if restarting compaction after many failures */
+bool compaction_restarting(struct zone *zone, int order)
+{
+ if (order < zone->compact_order_failed)
+ return false;
+
+ return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
+ zone->compact_considered >= 1UL << zone->compact_defer_shift;
+}
+
/* Returns true if the pageblock should be scanned for pages to isolate. */
static inline bool isolation_suitable(struct compact_control *cc,
struct page *page)
@@ -408,6 +492,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/* If a page was split, advance to the end of it */
if (isolated) {
+ cc->nr_freepages += isolated;
+ if (!strict &&
+ cc->nr_migratepages <= cc->nr_freepages) {
+ blockpfn += isolated;
+ break;
+ }
+
blockpfn += isolated - 1;
cursor += isolated - 1;
continue;
@@ -421,11 +512,12 @@ isolate_fail:
}
+ trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
+ nr_scanned, total_isolated);
+
/* Record how far we have got within the block */
*start_pfn = blockpfn;
- trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
-
/*
* If strict isolation is requested by CMA then check that all the
* pages requested were isolated. If there were any failures, 0 is
@@ -581,6 +673,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
unsigned long flags = 0;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
+ unsigned long start_pfn = low_pfn;
/*
* Ensure that there are not too many pages isolated from the LRU
@@ -741,7 +834,8 @@ isolate_success:
if (low_pfn == end_pfn)
update_pageblock_skip(cc, valid_page, nr_isolated, true);
- trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
+ trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
+ nr_scanned, nr_isolated);
count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
if (nr_isolated)
@@ -814,7 +908,6 @@ static void isolate_freepages(struct compact_control *cc)
unsigned long isolate_start_pfn; /* exact pfn we start at */
unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
- int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;
/*
@@ -839,11 +932,11 @@ static void isolate_freepages(struct compact_control *cc)
* pages on cc->migratepages. We stop searching if the migrate
* and free page scanners meet or enough free pages are isolated.
*/
- for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
+ for (; block_start_pfn >= low_pfn &&
+ cc->nr_migratepages > cc->nr_freepages;
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) {
- unsigned long isolated;
/*
* This can iterate a massively long zone without finding any
@@ -868,9 +961,8 @@ static void isolate_freepages(struct compact_control *cc)
continue;
/* Found a block suitable for isolating free pages from. */
- isolated = isolate_freepages_block(cc, &isolate_start_pfn,
+ isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, freelist, false);
- nr_freepages += isolated;
/*
* Remember where the free scanner should restart next time,
@@ -902,8 +994,6 @@ static void isolate_freepages(struct compact_control *cc)
*/
if (block_start_pfn < low_pfn)
cc->free_pfn = cc->migrate_pfn;
-
- cc->nr_freepages = nr_freepages;
}
/*
@@ -1015,8 +1105,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
isolate_mode);
- if (!low_pfn || cc->contended)
+ if (!low_pfn || cc->contended) {
+ acct_isolated(zone, cc);
return ISOLATE_ABORT;
+ }
/*
* Either we isolated something and proceed with migration. Or
@@ -1037,7 +1129,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
}
-static int compact_finished(struct zone *zone, struct compact_control *cc,
+static int __compact_finished(struct zone *zone, struct compact_control *cc,
const int migratetype)
{
unsigned int order;
@@ -1088,11 +1180,24 @@ static int compact_finished(struct zone *zone, struct compact_control *cc,
return COMPACT_PARTIAL;
/* Job done if allocation would set block type */
- if (cc->order >= pageblock_order && area->nr_free)
+ if (order >= pageblock_order && area->nr_free)
return COMPACT_PARTIAL;
}
- return COMPACT_CONTINUE;
+ return COMPACT_NO_SUITABLE_PAGE;
+}
+
+static int compact_finished(struct zone *zone, struct compact_control *cc,
+ const int migratetype)
+{
+ int ret;
+
+ ret = __compact_finished(zone, cc, migratetype);
+ trace_mm_compaction_finished(zone, cc->order, ret);
+ if (ret == COMPACT_NO_SUITABLE_PAGE)
+ ret = COMPACT_CONTINUE;
+
+ return ret;
}
/*
@@ -1102,7 +1207,7 @@ static int compact_finished(struct zone *zone, struct compact_control *cc,
* COMPACT_PARTIAL - If the allocation would succeed without compaction
* COMPACT_CONTINUE - If compaction should run now
*/
-unsigned long compaction_suitable(struct zone *zone, int order,
+static unsigned long __compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx)
{
int fragindex;
@@ -1146,11 +1251,24 @@ unsigned long compaction_suitable(struct zone *zone, int order,
*/
fragindex = fragmentation_index(zone, order);
if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
- return COMPACT_SKIPPED;
+ return COMPACT_NOT_SUITABLE_ZONE;
return COMPACT_CONTINUE;
}
+unsigned long compaction_suitable(struct zone *zone, int order,
+ int alloc_flags, int classzone_idx)
+{
+ unsigned long ret;
+
+ ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
+ trace_mm_compaction_suitable(zone, order, ret);
+ if (ret == COMPACT_NOT_SUITABLE_ZONE)
+ ret = COMPACT_SKIPPED;
+
+ return ret;
+}
+
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
int ret;
@@ -1197,7 +1315,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
}
- trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
+ trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
+ cc->free_pfn, end_pfn, sync);
migrate_prep_local();
@@ -1299,7 +1418,8 @@ out:
zone->compact_cached_free_pfn = free_pfn;
}
- trace_mm_compaction_end(ret);
+ trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
+ cc->free_pfn, end_pfn, sync, ret);
return ret;
}
@@ -1335,22 +1455,20 @@ int sysctl_extfrag_threshold = 500;
/**
* try_to_compact_pages - Direct compact to satisfy a high-order allocation
- * @zonelist: The zonelist used for the current allocation
- * @order: The order of the current allocation
* @gfp_mask: The GFP mask of the current allocation
- * @nodemask: The allowed nodes to allocate from
+ * @order: The order of the current allocation
+ * @alloc_flags: The allocation flags of the current allocation
+ * @ac: The context of current allocation
* @mode: The migration mode for async, sync light, or sync migration
* @contended: Return value that determines if compaction was aborted due to
* need_resched() or lock contention
*
* This is the main entry point for direct page compaction.
*/
-unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *nodemask,
- enum migrate_mode mode, int *contended,
- int alloc_flags, int classzone_idx)
+unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
+ int alloc_flags, const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended)
{
- enum zone_type high_zoneidx = gfp_zone(gfp_mask);
int may_enter_fs = gfp_mask & __GFP_FS;
int may_perform_io = gfp_mask & __GFP_IO;
struct zoneref *z;
@@ -1364,9 +1482,11 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
if (!order || !may_enter_fs || !may_perform_io)
return COMPACT_SKIPPED;
+ trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
+
/* Compact each zone in the list */
- for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
- nodemask) {
+ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
+ ac->nodemask) {
int status;
int zone_contended;
@@ -1374,7 +1494,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
continue;
status = compact_zone_order(zone, order, gfp_mask, mode,
- &zone_contended, alloc_flags, classzone_idx);
+ &zone_contended, alloc_flags,
+ ac->classzone_idx);
rc = max(status, rc);
/*
* It takes at least one zone that wasn't lock contended
@@ -1384,7 +1505,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
/* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
- classzone_idx, alloc_flags)) {
+ ac->classzone_idx, alloc_flags)) {
/*
* We think the allocation will succeed in this zone,
* but it is not certain, hence the false. The caller
diff --git a/mm/debug.c b/mm/debug.c
index 0e58f3211f89..3eb3ac2fcee7 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -130,7 +130,6 @@ static const struct trace_print_flags vmaflags_names[] = {
{VM_ACCOUNT, "account" },
{VM_NORESERVE, "noreserve" },
{VM_HUGETLB, "hugetlb" },
- {VM_NONLINEAR, "nonlinear" },
#if defined(CONFIG_X86)
{VM_PAT, "pat" },
#elif defined(CONFIG_PPC)
@@ -174,7 +173,7 @@ void dump_mm(const struct mm_struct *mm)
"get_unmapped_area %p\n"
#endif
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
- "pgd %p mm_users %d mm_count %d nr_ptes %lu map_count %d\n"
+ "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
"pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
@@ -207,6 +206,7 @@ void dump_mm(const struct mm_struct *mm)
mm->pgd, atomic_read(&mm->mm_users),
atomic_read(&mm->mm_count),
atomic_long_read((atomic_long_t *)&mm->nr_ptes),
+ mm_nr_pmds((struct mm_struct *)mm),
mm->map_count,
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm,
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 2ad7adf4f0a4..4a3907cf79f8 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -28,6 +28,7 @@
SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
{
struct fd f = fdget(fd);
+ struct inode *inode;
struct address_space *mapping;
struct backing_dev_info *bdi;
loff_t endbyte; /* inclusive */
@@ -39,7 +40,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
if (!f.file)
return -EBADF;
- if (S_ISFIFO(file_inode(f.file)->i_mode)) {
+ inode = file_inode(f.file);
+ if (S_ISFIFO(inode->i_mode)) {
ret = -ESPIPE;
goto out;
}
@@ -50,7 +52,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
goto out;
}
- if (mapping->a_ops->get_xip_mem) {
+ if (IS_DAX(inode)) {
switch (advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
@@ -73,7 +75,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
else
endbyte--; /* inclusive */
- bdi = mapping->backing_dev_info;
+ bdi = inode_to_bdi(mapping->host);
switch (advice) {
case POSIX_FADV_NORMAL:
@@ -113,7 +115,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
case POSIX_FADV_NOREUSE:
break;
case POSIX_FADV_DONTNEED:
- if (!bdi_write_congested(mapping->backing_dev_info))
+ if (!bdi_write_congested(bdi))
__filemap_fdatawrite_range(mapping, offset, endbyte,
WB_SYNC_NONE);
diff --git a/mm/filemap.c b/mm/filemap.c
index 673e4581a2e5..ad7242043bdb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -211,7 +211,7 @@ void __delete_from_page_cache(struct page *page, void *shadow)
*/
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
- dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+ dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE);
}
}
@@ -1695,8 +1695,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
loff_t *ppos = &iocb->ki_pos;
loff_t pos = *ppos;
- /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
- if (file->f_flags & O_DIRECT) {
+ if (io_is_direct(file)) {
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
@@ -1723,9 +1722,11 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
* we've already read everything we wanted to, or if
* there was a short read because we hit EOF, go ahead
* and return. Otherwise fallthrough to buffered io for
- * the rest of the read.
+ * the rest of the read. Buffered reads will not work for
+ * DAX files, so don't bother trying.
*/
- if (retval < 0 || !iov_iter_count(iter) || *ppos >= size) {
+ if (retval < 0 || !iov_iter_count(iter) || *ppos >= size ||
+ IS_DAX(inode)) {
file_accessed(file);
goto out;
}
@@ -2087,7 +2088,6 @@ const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = filemap_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
/* This is used for a general mmap of a disk file */
@@ -2565,7 +2565,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
size_t count = iov_iter_count(from);
/* We can write back this queue in page reclaim */
- current->backing_dev_info = mapping->backing_dev_info;
+ current->backing_dev_info = inode_to_bdi(inode);
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
goto out;
@@ -2583,18 +2583,20 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (err)
goto out;
- /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
- if (unlikely(file->f_flags & O_DIRECT)) {
+ if (io_is_direct(file)) {
loff_t endbyte;
written = generic_file_direct_write(iocb, from, pos);
- if (written < 0 || written == count)
- goto out;
-
/*
- * direct-io write to a hole: fall through to buffered I/O
- * for completing the rest of the request.
+ * If the write stopped short of completing, fall back to
+ * buffered writes. Some filesystems do this for writes to
+ * holes, for example. For DAX files, a buffered write will
+ * not succeed (even if it did, DAX does not handle dirty
+ * page-cache pages correctly).
*/
+ if (written < 0 || written == count || IS_DAX(inode))
+ goto out;
+
pos += written;
count -= written;
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
deleted file mode 100644
index 0d105aeff82f..000000000000
--- a/mm/filemap_xip.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * linux/mm/filemap_xip.c
- *
- * Copyright (C) 2005 IBM Corporation
- * Author: Carsten Otte <cotte@de.ibm.com>
- *
- * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
- *
- */
-
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include <linux/export.h>
-#include <linux/uio.h>
-#include <linux/rmap.h>
-#include <linux/mmu_notifier.h>
-#include <linux/sched.h>
-#include <linux/seqlock.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-#include <asm/tlbflush.h>
-#include <asm/io.h>
-
-/*
- * We do use our own empty page to avoid interference with other users
- * of ZERO_PAGE(), such as /dev/zero
- */
-static DEFINE_MUTEX(xip_sparse_mutex);
-static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq);
-static struct page *__xip_sparse_page;
-
-/* called under xip_sparse_mutex */
-static struct page *xip_sparse_page(void)
-{
- if (!__xip_sparse_page) {
- struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
-
- if (page)
- __xip_sparse_page = page;
- }
- return __xip_sparse_page;
-}
-
-/*
- * This is a file read routine for execute in place files, and uses
- * the mapping->a_ops->get_xip_mem() function for the actual low-level
- * stuff.
- *
- * Note the struct file* is not used at all. It may be NULL.
- */
-static ssize_t
-do_xip_mapping_read(struct address_space *mapping,
- struct file_ra_state *_ra,
- struct file *filp,
- char __user *buf,
- size_t len,
- loff_t *ppos)
-{
- struct inode *inode = mapping->host;
- pgoff_t index, end_index;
- unsigned long offset;
- loff_t isize, pos;
- size_t copied = 0, error = 0;
-
- BUG_ON(!mapping->a_ops->get_xip_mem);
-
- pos = *ppos;
- index = pos >> PAGE_CACHE_SHIFT;
- offset = pos & ~PAGE_CACHE_MASK;
-
- isize = i_size_read(inode);
- if (!isize)
- goto out;
-
- end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
- do {
- unsigned long nr, left;
- void *xip_mem;
- unsigned long xip_pfn;
- int zero = 0;
-
- /* nr is the maximum number of bytes to copy from this page */
- nr = PAGE_CACHE_SIZE;
- if (index >= end_index) {
- if (index > end_index)
- goto out;
- nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
- if (nr <= offset) {
- goto out;
- }
- }
- nr = nr - offset;
- if (nr > len - copied)
- nr = len - copied;
-
- error = mapping->a_ops->get_xip_mem(mapping, index, 0,
- &xip_mem, &xip_pfn);
- if (unlikely(error)) {
- if (error == -ENODATA) {
- /* sparse */
- zero = 1;
- } else
- goto out;
- }
-
- /* If users can be writing to this page using arbitrary
- * virtual addresses, take care about potential aliasing
- * before reading the page on the kernel side.
- */
- if (mapping_writably_mapped(mapping))
- /* address based flush */ ;
-
- /*
- * Ok, we have the mem, so now we can copy it to user space...
- *
- * The actor routine returns how many bytes were actually used..
- * NOTE! This may not be the same as how much of a user buffer
- * we filled up (we may be padding etc), so we can only update
- * "pos" here (the actor routine has to update the user buffer
- * pointers and the remaining count).
- */
- if (!zero)
- left = __copy_to_user(buf+copied, xip_mem+offset, nr);
- else
- left = __clear_user(buf + copied, nr);
-
- if (left) {
- error = -EFAULT;
- goto out;
- }
-
- copied += (nr - left);
- offset += (nr - left);
- index += offset >> PAGE_CACHE_SHIFT;
- offset &= ~PAGE_CACHE_MASK;
- } while (copied < len);
-
-out:
- *ppos = pos + copied;
- if (filp)
- file_accessed(filp);
-
- return (copied ? copied : error);
-}
-
-ssize_t
-xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
-{
- if (!access_ok(VERIFY_WRITE, buf, len))
- return -EFAULT;
-
- return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
- buf, len, ppos);
-}
-EXPORT_SYMBOL_GPL(xip_file_read);
-
-/*
- * __xip_unmap is invoked from xip_unmap and xip_write
- *
- * This function walks all vmas of the address_space and unmaps the
- * __xip_sparse_page when found at pgoff.
- */
-static void __xip_unmap(struct address_space * mapping, unsigned long pgoff)
-{
- struct vm_area_struct *vma;
- struct page *page;
- unsigned count;
- int locked = 0;
-
- count = read_seqcount_begin(&xip_sparse_seq);
-
- page = __xip_sparse_page;
- if (!page)
- return;
-
-retry:
- i_mmap_lock_read(mapping);
- vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
- pte_t *pte, pteval;
- spinlock_t *ptl;
- struct mm_struct *mm = vma->vm_mm;
- unsigned long address = vma->vm_start +
- ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
-
- BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- pte = page_check_address(page, mm, address, &ptl, 1);
- if (pte) {
- /* Nuke the page table entry. */
- flush_cache_page(vma, address, pte_pfn(*pte));
- pteval = ptep_clear_flush(vma, address, pte);
- page_remove_rmap(page);
- dec_mm_counter(mm, MM_FILEPAGES);
- BUG_ON(pte_dirty(pteval));
- pte_unmap_unlock(pte, ptl);
- /* must invalidate_page _before_ freeing the page */
- mmu_notifier_invalidate_page(mm, address);
- page_cache_release(page);
- }
- }
- i_mmap_unlock_read(mapping);
-
- if (locked) {
- mutex_unlock(&xip_sparse_mutex);
- } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
- mutex_lock(&xip_sparse_mutex);
- locked = 1;
- goto retry;
- }
-}
-
-/*
- * xip_fault() is invoked via the vma operations vector for a
- * mapped memory region to read in file data during a page fault.
- *
- * This function is derived from filemap_fault, but used for execute in place
- */
-static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct file *file = vma->vm_file;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- pgoff_t size;
- void *xip_mem;
- unsigned long xip_pfn;
- struct page *page;
- int error;
-
- /* XXX: are VM_FAULT_ codes OK? */
-again:
- size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (vmf->pgoff >= size)
- return VM_FAULT_SIGBUS;
-
- error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
- &xip_mem, &xip_pfn);
- if (likely(!error))
- goto found;
- if (error != -ENODATA)
- return VM_FAULT_OOM;
-
- /* sparse block */
- if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
- (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
- (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
- int err;
-
- /* maybe shared writable, allocate new block */
- mutex_lock(&xip_sparse_mutex);
- error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
- &xip_mem, &xip_pfn);
- mutex_unlock(&xip_sparse_mutex);
- if (error)
- return VM_FAULT_SIGBUS;
- /* unmap sparse mappings at pgoff from all other vmas */
- __xip_unmap(mapping, vmf->pgoff);
-
-found:
- err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- xip_pfn);
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- /*
- * err == -EBUSY is fine, we've raced against another thread
- * that faulted-in the same page
- */
- if (err != -EBUSY)
- BUG_ON(err);
- return VM_FAULT_NOPAGE;
- } else {
- int err, ret = VM_FAULT_OOM;
-
- mutex_lock(&xip_sparse_mutex);
- write_seqcount_begin(&xip_sparse_seq);
- error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
- &xip_mem, &xip_pfn);
- if (unlikely(!error)) {
- write_seqcount_end(&xip_sparse_seq);
- mutex_unlock(&xip_sparse_mutex);
- goto again;
- }
- if (error != -ENODATA)
- goto out;
- /* not shared and writable, use xip_sparse_page() */
- page = xip_sparse_page();
- if (!page)
- goto out;
- err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- page);
- if (err == -ENOMEM)
- goto out;
-
- ret = VM_FAULT_NOPAGE;
-out:
- write_seqcount_end(&xip_sparse_seq);
- mutex_unlock(&xip_sparse_mutex);
-
- return ret;
- }
-}
-
-static const struct vm_operations_struct xip_file_vm_ops = {
- .fault = xip_file_fault,
- .page_mkwrite = filemap_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
-};
-
-int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
-{
- BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
-
- file_accessed(file);
- vma->vm_ops = &xip_file_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP;
- return 0;
-}
-EXPORT_SYMBOL_GPL(xip_file_mmap);
-
-static ssize_t
-__xip_file_write(struct file *filp, const char __user *buf,
- size_t count, loff_t pos, loff_t *ppos)
-{
- struct address_space * mapping = filp->f_mapping;
- const struct address_space_operations *a_ops = mapping->a_ops;
- struct inode *inode = mapping->host;
- long status = 0;
- size_t bytes;
- ssize_t written = 0;
-
- BUG_ON(!mapping->a_ops->get_xip_mem);
-
- do {
- unsigned long index;
- unsigned long offset;
- size_t copied;
- void *xip_mem;
- unsigned long xip_pfn;
-
- offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
- index = pos >> PAGE_CACHE_SHIFT;
- bytes = PAGE_CACHE_SIZE - offset;
- if (bytes > count)
- bytes = count;
-
- status = a_ops->get_xip_mem(mapping, index, 0,
- &xip_mem, &xip_pfn);
- if (status == -ENODATA) {
- /* we allocate a new page unmap it */
- mutex_lock(&xip_sparse_mutex);
- status = a_ops->get_xip_mem(mapping, index, 1,
- &xip_mem, &xip_pfn);
- mutex_unlock(&xip_sparse_mutex);
- if (!status)
- /* unmap page at pgoff from all other vmas */
- __xip_unmap(mapping, index);
- }
-
- if (status)
- break;
-
- copied = bytes -
- __copy_from_user_nocache(xip_mem + offset, buf, bytes);
-
- if (likely(copied > 0)) {
- status = copied;
-
- if (status >= 0) {
- written += status;
- count -= status;
- pos += status;
- buf += status;
- }
- }
- if (unlikely(copied != bytes))
- if (status >= 0)
- status = -EFAULT;
- if (status < 0)
- break;
- } while (count);
- *ppos = pos;
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_mutex.
- */
- if (pos > inode->i_size) {
- i_size_write(inode, pos);
- mark_inode_dirty(inode);
- }
-
- return written ? written : status;
-}
-
-ssize_t
-xip_file_write(struct file *filp, const char __user *buf, size_t len,
- loff_t *ppos)
-{
- struct address_space *mapping = filp->f_mapping;
- struct inode *inode = mapping->host;
- size_t count;
- loff_t pos;
- ssize_t ret;
-
- mutex_lock(&inode->i_mutex);
-
- if (!access_ok(VERIFY_READ, buf, len)) {
- ret=-EFAULT;
- goto out_up;
- }
-
- pos = *ppos;
- count = len;
-
- /* We can write back this queue in page reclaim */
- current->backing_dev_info = mapping->backing_dev_info;
-
- ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
- if (ret)
- goto out_backing;
- if (count == 0)
- goto out_backing;
-
- ret = file_remove_suid(filp);
- if (ret)
- goto out_backing;
-
- ret = file_update_time(filp);
- if (ret)
- goto out_backing;
-
- ret = __xip_file_write (filp, buf, count, pos, ppos);
-
- out_backing:
- current->backing_dev_info = NULL;
- out_up:
- mutex_unlock(&inode->i_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(xip_file_write);
-
-/*
- * truncate a page used for execute in place
- * functionality is analog to block_truncate_page but does use get_xip_mem
- * to get the page instead of page cache
- */
-int
-xip_truncate_page(struct address_space *mapping, loff_t from)
-{
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
- unsigned blocksize;
- unsigned length;
- void *xip_mem;
- unsigned long xip_pfn;
- int err;
-
- BUG_ON(!mapping->a_ops->get_xip_mem);
-
- blocksize = 1 << mapping->host->i_blkbits;
- length = offset & (blocksize - 1);
-
- /* Block boundary? Nothing to do */
- if (!length)
- return 0;
-
- length = blocksize - length;
-
- err = mapping->a_ops->get_xip_mem(mapping, index, 0,
- &xip_mem, &xip_pfn);
- if (unlikely(err)) {
- if (err == -ENODATA)
- /* Hole? No need to truncate */
- return 0;
- else
- return err;
- }
- memset(xip_mem + offset, 0, length);
- return 0;
-}
-EXPORT_SYMBOL_GPL(xip_truncate_page);
diff --git a/mm/fremap.c b/mm/fremap.c
deleted file mode 100644
index 2805d71cf476..000000000000
--- a/mm/fremap.c
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * linux/mm/fremap.c
- *
- * Explicit pagetable population and nonlinear (random) mappings support.
- *
- * started by Ingo Molnar, Copyright (C) 2002, 2003
- */
-#include <linux/export.h>
-#include <linux/backing-dev.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/file.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include <linux/swapops.h>
-#include <linux/rmap.h>
-#include <linux/syscalls.h>
-#include <linux/mmu_notifier.h>
-
-#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
-#include "internal.h"
-
-static int mm_counter(struct page *page)
-{
- return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
-}
-
-static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- pte_t pte = *ptep;
- struct page *page;
- swp_entry_t entry;
-
- if (pte_present(pte)) {
- flush_cache_page(vma, addr, pte_pfn(pte));
- pte = ptep_clear_flush_notify(vma, addr, ptep);
- page = vm_normal_page(vma, addr, pte);
- if (page) {
- if (pte_dirty(pte))
- set_page_dirty(page);
- update_hiwater_rss(mm);
- dec_mm_counter(mm, mm_counter(page));
- page_remove_rmap(page);
- page_cache_release(page);
- }
- } else { /* zap_pte() is not called when pte_none() */
- if (!pte_file(pte)) {
- update_hiwater_rss(mm);
- entry = pte_to_swp_entry(pte);
- if (non_swap_entry(entry)) {
- if (is_migration_entry(entry)) {
- page = migration_entry_to_page(entry);
- dec_mm_counter(mm, mm_counter(page));
- }
- } else {
- free_swap_and_cache(entry);
- dec_mm_counter(mm, MM_SWAPENTS);
- }
- }
- pte_clear_not_present_full(mm, addr, ptep, 0);
- }
-}
-
-/*
- * Install a file pte to a given virtual memory address, release any
- * previously existing mapping.
- */
-static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, unsigned long pgoff, pgprot_t prot)
-{
- int err = -ENOMEM;
- pte_t *pte, ptfile;
- spinlock_t *ptl;
-
- pte = get_locked_pte(mm, addr, &ptl);
- if (!pte)
- goto out;
-
- ptfile = pgoff_to_pte(pgoff);
-
- if (!pte_none(*pte))
- zap_pte(mm, vma, addr, pte);
-
- set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile));
- /*
- * We don't need to run update_mmu_cache() here because the "file pte"
- * being installed by install_file_pte() is not a real pte - it's a
- * non-present entry (like a swap entry), noting what file offset should
- * be mapped there when there's a fault (in a non-linear vma where
- * that's not obvious).
- */
- pte_unmap_unlock(pte, ptl);
- err = 0;
-out:
- return err;
-}
-
-int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
- unsigned long size, pgoff_t pgoff)
-{
- struct mm_struct *mm = vma->vm_mm;
- int err;
-
- do {
- err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
- if (err)
- return err;
-
- size -= PAGE_SIZE;
- addr += PAGE_SIZE;
- pgoff++;
- } while (size);
-
- return 0;
-}
-EXPORT_SYMBOL(generic_file_remap_pages);
-
-/**
- * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
- * @start: start of the remapped virtual memory range
- * @size: size of the remapped virtual memory range
- * @prot: new protection bits of the range (see NOTE)
- * @pgoff: to-be-mapped page of the backing store file
- * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
- *
- * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
- * (shared backing store file).
- *
- * This syscall works purely via pagetables, so it's the most efficient
- * way to map the same (large) file into a given virtual window. Unlike
- * mmap()/mremap() it does not create any new vmas. The new mappings are
- * also safe across swapout.
- *
- * NOTE: the @prot parameter right now is ignored (but must be zero),
- * and the vma's default protection is used. Arbitrary protections
- * might be implemented in the future.
- */
-SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
- unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
-{
- struct mm_struct *mm = current->mm;
- struct address_space *mapping;
- struct vm_area_struct *vma;
- int err = -EINVAL;
- int has_write_lock = 0;
- vm_flags_t vm_flags = 0;
-
- pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
- "See Documentation/vm/remap_file_pages.txt.\n",
- current->comm, current->pid);
-
- if (prot)
- return err;
- /*
- * Sanitize the syscall parameters:
- */
- start = start & PAGE_MASK;
- size = size & PAGE_MASK;
-
- /* Does the address range wrap, or is the span zero-sized? */
- if (start + size <= start)
- return err;
-
- /* Does pgoff wrap? */
- if (pgoff + (size >> PAGE_SHIFT) < pgoff)
- return err;
-
- /* Can we represent this offset inside this architecture's pte's? */
-#if PTE_FILE_MAX_BITS < BITS_PER_LONG
- if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
- return err;
-#endif
-
- /* We need down_write() to change vma->vm_flags. */
- down_read(&mm->mmap_sem);
- retry:
- vma = find_vma(mm, start);
-
- /*
- * Make sure the vma is shared, that it supports prefaulting,
- * and that the remapped range is valid and fully within
- * the single existing vma.
- */
- if (!vma || !(vma->vm_flags & VM_SHARED))
- goto out;
-
- if (!vma->vm_ops || !vma->vm_ops->remap_pages)
- goto out;
-
- if (start < vma->vm_start || start + size > vma->vm_end)
- goto out;
-
- /* Must set VM_NONLINEAR before any pages are populated. */
- if (!(vma->vm_flags & VM_NONLINEAR)) {
- /*
- * vm_private_data is used as a swapout cursor
- * in a VM_NONLINEAR vma.
- */
- if (vma->vm_private_data)
- goto out;
-
- /* Don't need a nonlinear mapping, exit success */
- if (pgoff == linear_page_index(vma, start)) {
- err = 0;
- goto out;
- }
-
- if (!has_write_lock) {
-get_write_lock:
- up_read(&mm->mmap_sem);
- down_write(&mm->mmap_sem);
- has_write_lock = 1;
- goto retry;
- }
- mapping = vma->vm_file->f_mapping;
- /*
- * page_mkclean doesn't work on nonlinear vmas, so if
- * dirty pages need to be accounted, emulate with linear
- * vmas.
- */
- if (mapping_cap_account_dirty(mapping)) {
- unsigned long addr;
- struct file *file = get_file(vma->vm_file);
- /* mmap_region may free vma; grab the info now */
- vm_flags = vma->vm_flags;
-
- addr = mmap_region(file, start, size, vm_flags, pgoff);
- fput(file);
- if (IS_ERR_VALUE(addr)) {
- err = addr;
- } else {
- BUG_ON(addr != start);
- err = 0;
- }
- goto out_freed;
- }
- i_mmap_lock_write(mapping);
- flush_dcache_mmap_lock(mapping);
- vma->vm_flags |= VM_NONLINEAR;
- vma_interval_tree_remove(vma, &mapping->i_mmap);
- vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
- flush_dcache_mmap_unlock(mapping);
- i_mmap_unlock_write(mapping);
- }
-
- if (vma->vm_flags & VM_LOCKED) {
- /*
- * drop PG_Mlocked flag for over-mapped range
- */
- if (!has_write_lock)
- goto get_write_lock;
- vm_flags = vma->vm_flags;
- munlock_vma_pages_range(vma, start, start + size);
- vma->vm_flags = vm_flags;
- }
-
- mmu_notifier_invalidate_range_start(mm, start, start + size);
- err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
- mmu_notifier_invalidate_range_end(mm, start, start + size);
-
- /*
- * We can't clear VM_NONLINEAR because we'd have to do
- * it after ->populate completes, and that would prevent
- * downgrading the lock. (Locks can't be upgraded).
- */
-
-out:
- if (vma)
- vm_flags = vma->vm_flags;
-out_freed:
- if (likely(!has_write_lock))
- up_read(&mm->mmap_sem);
- else
- up_write(&mm->mmap_sem);
- if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
- mm_populate(start, size);
-
- return err;
-}
diff --git a/mm/gup.c b/mm/gup.c
index a900759cc807..a6e24e246f86 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -55,7 +55,7 @@ retry:
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
- if (pte_none(pte) || pte_file(pte))
+ if (pte_none(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
@@ -64,7 +64,7 @@ retry:
migration_entry_wait(mm, pmd, address);
goto retry;
}
- if ((flags & FOLL_NUMA) && pte_numa(pte))
+ if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte)) {
pte_unmap_unlock(ptep, ptl);
@@ -167,10 +167,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pud_none(*pud))
return no_page_table(vma, flags);
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
- if (flags & FOLL_GET)
- return NULL;
- page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
- return page;
+ page = follow_huge_pud(mm, address, pud, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
@@ -179,21 +179,12 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pmd_none(*pmd))
return no_page_table(vma, flags);
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
- page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
- if (flags & FOLL_GET) {
- /*
- * Refcount on tail pages are not well-defined and
- * shouldn't be taken. The caller should handle a NULL
- * return when trying to follow tail pages.
- */
- if (PageHead(page))
- get_page(page);
- else
- page = NULL;
- }
- return page;
+ page = follow_huge_pmd(mm, address, pmd, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
}
- if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
return no_page_table(vma, flags);
if (pmd_trans_huge(*pmd)) {
if (flags & FOLL_SPLIT) {
@@ -296,7 +287,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT;
BUG();
}
@@ -571,7 +562,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return -EHWPOISON;
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT;
BUG();
}
@@ -584,6 +575,185 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
return 0;
}
+static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long nr_pages,
+ int write, int force,
+ struct page **pages,
+ struct vm_area_struct **vmas,
+ int *locked, bool notify_drop,
+ unsigned int flags)
+{
+ long ret, pages_done;
+ bool lock_dropped;
+
+ if (locked) {
+ /* if VM_FAULT_RETRY can be returned, vmas become invalid */
+ BUG_ON(vmas);
+ /* check caller initialized locked */
+ BUG_ON(*locked != 1);
+ }
+
+ if (pages)
+ flags |= FOLL_GET;
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ pages_done = 0;
+ lock_dropped = false;
+ for (;;) {
+ ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
+ vmas, locked);
+ if (!locked)
+ /* VM_FAULT_RETRY couldn't trigger, bypass */
+ return ret;
+
+ /* VM_FAULT_RETRY cannot return errors */
+ if (!*locked) {
+ BUG_ON(ret < 0);
+ BUG_ON(ret >= nr_pages);
+ }
+
+ if (!pages)
+ /* If it's a prefault don't insist harder */
+ return ret;
+
+ if (ret > 0) {
+ nr_pages -= ret;
+ pages_done += ret;
+ if (!nr_pages)
+ break;
+ }
+ if (*locked) {
+ /* VM_FAULT_RETRY didn't trigger */
+ if (!pages_done)
+ pages_done = ret;
+ break;
+ }
+ /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
+ pages += ret;
+ start += ret << PAGE_SHIFT;
+
+ /*
+ * Repeat on the address that fired VM_FAULT_RETRY
+ * without FAULT_FLAG_ALLOW_RETRY but with
+ * FAULT_FLAG_TRIED.
+ */
+ *locked = 1;
+ lock_dropped = true;
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
+ pages, NULL, NULL);
+ if (ret != 1) {
+ BUG_ON(ret > 1);
+ if (!pages_done)
+ pages_done = ret;
+ break;
+ }
+ nr_pages--;
+ pages_done++;
+ if (!nr_pages)
+ break;
+ pages++;
+ start += PAGE_SIZE;
+ }
+ if (notify_drop && lock_dropped && *locked) {
+ /*
+ * We must let the caller know we temporarily dropped the lock
+ * and so the critical section protected by it was lost.
+ */
+ up_read(&mm->mmap_sem);
+ *locked = 0;
+ }
+ return pages_done;
+}
+
+/*
+ * We can leverage the VM_FAULT_RETRY functionality in the page fault
+ * paths better by using either get_user_pages_locked() or
+ * get_user_pages_unlocked().
+ *
+ * get_user_pages_locked() is suitable to replace the form:
+ *
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages(tsk, mm, ..., pages, NULL);
+ * up_read(&mm->mmap_sem);
+ *
+ * to:
+ *
+ * int locked = 1;
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages_locked(tsk, mm, ..., pages, &locked);
+ * if (locked)
+ * up_read(&mm->mmap_sem);
+ */
+long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ int *locked)
+{
+ return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
+ pages, NULL, locked, true, FOLL_TOUCH);
+}
+EXPORT_SYMBOL(get_user_pages_locked);
+
+/*
+ * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
+ * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
+ *
+ * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
+ * caller if required (just like with __get_user_pages). "FOLL_GET",
+ * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
+ * according to the parameters "pages", "write", "force"
+ * respectively.
+ */
+__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ unsigned int gup_flags)
+{
+ long ret;
+ int locked = 1;
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
+ pages, NULL, &locked, false, gup_flags);
+ if (locked)
+ up_read(&mm->mmap_sem);
+ return ret;
+}
+EXPORT_SYMBOL(__get_user_pages_unlocked);
+
+/*
+ * get_user_pages_unlocked() is suitable to replace the form:
+ *
+ * down_read(&mm->mmap_sem);
+ * get_user_pages(tsk, mm, ..., pages, NULL);
+ * up_read(&mm->mmap_sem);
+ *
+ * with:
+ *
+ * get_user_pages_unlocked(tsk, mm, ..., pages);
+ *
+ * It is functionally equivalent to get_user_pages_fast so
+ * get_user_pages_fast should be used instead, if the two parameters
+ * "tsk" and "mm" are respectively equal to current and current->mm,
+ * or if "force" shall be set to 1 (get_user_pages_fast misses the
+ * "force" parameter).
+ */
+long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages)
+{
+ return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
+ force, pages, FOLL_TOUCH);
+}
+EXPORT_SYMBOL(get_user_pages_unlocked);
+
/*
* get_user_pages() - pin user pages in memory
* @tsk: the task_struct to use for page fault accounting, or
@@ -633,22 +803,18 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
* use the correct cache flushing APIs.
*
* See also get_user_pages_fast, for performance critical applications.
+ *
+ * get_user_pages should be phased out in favor of
+ * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
+ * should use get_user_pages because it cannot pass
+ * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, int write,
int force, struct page **pages, struct vm_area_struct **vmas)
{
- int flags = FOLL_TOUCH;
-
- if (pages)
- flags |= FOLL_GET;
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
-
- return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
- NULL);
+ return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
+ pages, vmas, NULL, false, FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
@@ -740,10 +906,10 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
/*
* Similar to the PMD case below, NUMA hinting must take slow
- * path
+ * path using the pte_protnone check.
*/
if (!pte_present(pte) || pte_special(pte) ||
- pte_numa(pte) || (write && !pte_write(pte)))
+ pte_protnone(pte) || (write && !pte_write(pte)))
goto pte_unmap;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -926,7 +1092,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
pmdp = pmd_offset(&pud, addr);
do {
- pmd_t pmd = ACCESS_ONCE(*pmdp);
+ pmd_t pmd = READ_ONCE(*pmdp);
next = pmd_addr_end(addr, end);
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
@@ -938,7 +1104,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
* slowpath for accounting purposes and so that they
* can be serialised against THP migration.
*/
- if (pmd_numa(pmd))
+ if (pmd_protnone(pmd))
return 0;
if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
@@ -1077,10 +1243,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start += nr << PAGE_SHIFT;
pages += nr;
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- nr_pages - nr, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
+ ret = get_user_pages_unlocked(current, mm, start,
+ nr_pages - nr, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 817a875f2b8c..fc00c8cb5a82 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -171,12 +171,7 @@ static int start_khugepaged(void)
}
static atomic_t huge_zero_refcount;
-static struct page *huge_zero_page __read_mostly;
-
-static inline bool is_huge_zero_page(struct page *page)
-{
- return ACCESS_ONCE(huge_zero_page) == page;
-}
+struct page *huge_zero_page __read_mostly;
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
@@ -766,15 +761,6 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
}
-static inline struct page *alloc_hugepage_vma(int defrag,
- struct vm_area_struct *vma,
- unsigned long haddr, int nd,
- gfp_t extra_gfp)
-{
- return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
- HPAGE_PMD_ORDER, vma, haddr, nd);
-}
-
/* Caller must hold page table lock. */
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
@@ -795,6 +781,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
unsigned int flags)
{
+ gfp_t gfp;
struct page *page;
unsigned long haddr = address & HPAGE_PMD_MASK;
@@ -829,8 +816,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
return 0;
}
- page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
- vma, haddr, numa_node_id(), 0);
+ gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
+ page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
@@ -1118,10 +1105,12 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(ptl);
alloc:
if (transparent_hugepage_enabled(vma) &&
- !transparent_hugepage_debug_cow())
- new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
- vma, haddr, numa_node_id(), 0);
- else
+ !transparent_hugepage_debug_cow()) {
+ gfp_t gfp;
+
+ gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
+ new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ } else
new_page = NULL;
if (unlikely(!new_page)) {
@@ -1222,7 +1211,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
return ERR_PTR(-EFAULT);
/* Full NUMA hinting faults to serialise migration in fault paths */
- if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
goto out;
page = pmd_page(*pmd);
@@ -1273,6 +1262,9 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
bool migrated = false;
int flags = 0;
+ /* A PROT_NONE fault should not end up here */
+ BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
+
ptl = pmd_lock(mm, pmdp);
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
@@ -1283,8 +1275,9 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
* check_same as the page may no longer be mapped.
*/
if (unlikely(pmd_trans_migrating(*pmdp))) {
+ page = pmd_page(*pmdp);
spin_unlock(ptl);
- wait_migrate_huge_page(vma->anon_vma, pmdp);
+ wait_on_page_locked(page);
goto out;
}
@@ -1352,7 +1345,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/*
* Migrate the THP to the requested node, returns with page unlocked
- * and pmd_numa cleared.
+ * and access rights restored.
*/
spin_unlock(ptl);
migrated = migrate_misplaced_transhuge_page(mm, vma,
@@ -1365,9 +1358,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
clear_pmdnuma:
BUG_ON(!PageLocked(page));
- pmd = pmd_mknonnuma(pmd);
+ pmd = pmd_modify(pmd, vma->vm_page_prot);
set_pmd_at(mm, haddr, pmdp, pmd);
- VM_BUG_ON(pmd_numa(*pmdp));
update_mmu_cache_pmd(vma, addr, pmdp);
unlock_page(page);
out_unlock:
@@ -1423,26 +1415,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
return ret;
}
-int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned char *vec)
-{
- spinlock_t *ptl;
- int ret = 0;
-
- if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
- /*
- * All logical pages in the range are present
- * if backed by a huge page.
- */
- spin_unlock(ptl);
- memset(vec, 1, (end - addr) >> PAGE_SHIFT);
- ret = 1;
- }
-
- return ret;
-}
-
int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
@@ -1510,29 +1482,24 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
pmd_t entry;
- ret = 1;
- if (!prot_numa) {
+
+ /*
+ * Avoid trapping faults against the zero page. The read-only
+ * data is likely to be read-cached on the local CPU and
+ * local/remote hits to the zero page are not interesting.
+ */
+ if (prot_numa && is_huge_zero_pmd(*pmd)) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ if (!prot_numa || !pmd_protnone(*pmd)) {
+ ret = 1;
entry = pmdp_get_and_clear_notify(mm, addr, pmd);
- if (pmd_numa(entry))
- entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot);
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
BUG_ON(pmd_write(entry));
- } else {
- struct page *page = pmd_page(*pmd);
-
- /*
- * Do not trap faults against the zero page. The
- * read-only data is likely to be read-cached on the
- * local CPU cache and it is less useful to know about
- * local vs remote hits on the zero page.
- */
- if (!is_huge_zero_page(page) &&
- !pmd_numa(*pmd)) {
- pmdp_set_numa(mm, addr, pmd);
- ret = HPAGE_PMD_NR;
- }
}
spin_unlock(ptl);
}
@@ -1797,9 +1764,9 @@ static int __split_huge_page_map(struct page *page,
pte_t *pte, entry;
BUG_ON(PageCompound(page+i));
/*
- * Note that pmd_numa is not transferred deliberately
- * to avoid any possibility that pte_numa leaks to
- * a PROT_NONE VMA by accident.
+ * Note that NUMA hinting access restrictions are not
+ * transferred to avoid any possibility of altering
+ * permissions across VMAs.
*/
entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2148,7 +2115,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
{
struct page *page;
pte_t *_pte;
- int referenced = 0, none = 0;
+ int none = 0;
+ bool referenced = false, writable = false;
for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, address += PAGE_SIZE) {
pte_t pteval = *_pte;
@@ -2158,7 +2126,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
else
goto out;
}
- if (!pte_present(pteval) || !pte_write(pteval))
+ if (!pte_present(pteval))
goto out;
page = vm_normal_page(vma, address, pteval);
if (unlikely(!page))
@@ -2168,9 +2136,6 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
- /* cannot use mapcount: can't collapse if there's a gup pin */
- if (page_count(page) != 1)
- goto out;
/*
* We can do it before isolate_lru_page because the
* page can't be freed from under us. NOTE: PG_lock
@@ -2179,6 +2144,29 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
*/
if (!trylock_page(page))
goto out;
+
+ /*
+ * cannot use mapcount: can't collapse if there's a gup pin.
+ * The page must only be referenced by the scanned process
+ * and page swap cache.
+ */
+ if (page_count(page) != 1 + !!PageSwapCache(page)) {
+ unlock_page(page);
+ goto out;
+ }
+ if (pte_write(pteval)) {
+ writable = true;
+ } else {
+ if (PageSwapCache(page) && !reuse_swap_page(page)) {
+ unlock_page(page);
+ goto out;
+ }
+ /*
+ * Page is not in the swap cache. It can be collapsed
+ * into a THP.
+ */
+ }
+
/*
* Isolate the page to avoid collapsing an hugepage
* currently in use by the VM.
@@ -2195,9 +2183,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
/* If there is no mapped pte young don't collapse the page */
if (pte_young(pteval) || PageReferenced(page) ||
mmu_notifier_test_young(vma->vm_mm, address))
- referenced = 1;
+ referenced = true;
}
- if (likely(referenced))
+ if (likely(referenced && writable))
return 1;
out:
release_pte_pages(pte, _pte);
@@ -2550,11 +2538,12 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
{
pmd_t *pmd;
pte_t *pte, *_pte;
- int ret = 0, referenced = 0, none = 0;
+ int ret = 0, none = 0;
struct page *page;
unsigned long _address;
spinlock_t *ptl;
int node = NUMA_NO_NODE;
+ bool writable = false, referenced = false;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
@@ -2573,8 +2562,11 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
else
goto out_unmap;
}
- if (!pte_present(pteval) || !pte_write(pteval))
+ if (!pte_present(pteval))
goto out_unmap;
+ if (pte_write(pteval))
+ writable = true;
+
page = vm_normal_page(vma, _address, pteval);
if (unlikely(!page))
goto out_unmap;
@@ -2591,14 +2583,18 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
VM_BUG_ON_PAGE(PageCompound(page), page);
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
goto out_unmap;
- /* cannot use mapcount: can't collapse if there's a gup pin */
- if (page_count(page) != 1)
+ /*
+ * cannot use mapcount: can't collapse if there's a gup pin.
+ * The page must only be referenced by the scanned process
+ * and page swap cache.
+ */
+ if (page_count(page) != 1 + !!PageSwapCache(page))
goto out_unmap;
if (pte_young(pteval) || PageReferenced(page) ||
mmu_notifier_test_young(vma->vm_mm, address))
- referenced = 1;
+ referenced = true;
}
- if (referenced)
+ if (referenced && writable)
ret = 1;
out_unmap:
pte_unmap_unlock(pte, ptl);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 85032de5e20f..0a9ac6c26832 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -35,7 +35,7 @@
#include <linux/node.h>
#include "internal.h"
-unsigned long hugepages_treat_as_movable;
+int hugepages_treat_as_movable;
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
@@ -2657,9 +2657,10 @@ again:
goto unlock;
/*
- * HWPoisoned hugepage is already unmapped and dropped reference
+ * Migrating hugepage or HWPoisoned hugepage is already
+ * unmapped and its refcount is dropped, so just clear pte here.
*/
- if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+ if (unlikely(!pte_present(pte))) {
huge_pte_clear(mm, address, ptep);
goto unlock;
}
@@ -3134,6 +3135,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *pagecache_page = NULL;
struct hstate *h = hstate_vma(vma);
struct address_space *mapping;
+ int need_wait_lock = 0;
address &= huge_page_mask(h);
@@ -3172,6 +3174,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = 0;
/*
+ * entry could be a migration/hwpoison entry at this point, so this
+ * check prevents the kernel from going below assuming that we have
+ * a active hugepage in pagecache. This goto expects the 2nd page fault,
+ * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
+ * handle it.
+ */
+ if (!pte_present(entry))
+ goto out_mutex;
+
+ /*
* If we are going to COW the mapping later, we examine the pending
* reservations for this page now. This will ensure that any
* allocations necessary to record that reservation occur outside the
@@ -3190,30 +3202,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vma, address);
}
+ ptl = huge_pte_lock(h, mm, ptep);
+
+ /* Check for a racing update before calling hugetlb_cow */
+ if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+ goto out_ptl;
+
/*
* hugetlb_cow() requires page locks of pte_page(entry) and
* pagecache_page, so here we need take the former one
* when page != pagecache_page or !pagecache_page.
- * Note that locking order is always pagecache_page -> page,
- * so no worry about deadlock.
*/
page = pte_page(entry);
- get_page(page);
if (page != pagecache_page)
- lock_page(page);
-
- ptl = huge_pte_lockptr(h, mm, ptep);
- spin_lock(ptl);
- /* Check for a racing update before calling hugetlb_cow */
- if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
- goto out_ptl;
+ if (!trylock_page(page)) {
+ need_wait_lock = 1;
+ goto out_ptl;
+ }
+ get_page(page);
if (flags & FAULT_FLAG_WRITE) {
if (!huge_pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep, entry,
pagecache_page, ptl);
- goto out_ptl;
+ goto out_put_page;
}
entry = huge_pte_mkdirty(entry);
}
@@ -3221,7 +3234,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (huge_ptep_set_access_flags(vma, address, ptep, entry,
flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, address, ptep);
-
+out_put_page:
+ if (page != pagecache_page)
+ unlock_page(page);
+ put_page(page);
out_ptl:
spin_unlock(ptl);
@@ -3229,12 +3245,17 @@ out_ptl:
unlock_page(pagecache_page);
put_page(pagecache_page);
}
- if (page != pagecache_page)
- unlock_page(page);
- put_page(page);
-
out_mutex:
mutex_unlock(&htlb_fault_mutex_table[hash]);
+ /*
+ * Generally it's safe to hold refcount during waiting page lock. But
+ * here we just wait to defer the next page fault to avoid busy loop and
+ * the page is not used after unlocked before returning from the current
+ * page fault. So we are safe from accessing freed page, even if we wait
+ * here without taking refcount.
+ */
+ if (need_wait_lock)
+ wait_on_page_locked(page);
return ret;
}
@@ -3364,7 +3385,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
spin_unlock(ptl);
continue;
}
- if (!huge_pte_none(huge_ptep_get(ptep))) {
+ pte = huge_ptep_get(ptep);
+ if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+ spin_unlock(ptl);
+ continue;
+ }
+ if (unlikely(is_hugetlb_entry_migration(pte))) {
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ if (is_write_migration_entry(entry)) {
+ pte_t newpte;
+
+ make_migration_entry_read(&entry);
+ newpte = swp_entry_to_pte(entry);
+ set_huge_pte_at(mm, address, ptep, newpte);
+ pages++;
+ }
+ spin_unlock(ptl);
+ continue;
+ }
+ if (!huge_pte_none(pte)) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(huge_pte_modify(pte, newprot));
pte = arch_make_huge_pte(pte, vma, NULL, 0);
@@ -3558,6 +3598,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
if (saddr) {
spte = huge_pte_offset(svma->vm_mm, saddr);
if (spte) {
+ mm_inc_nr_pmds(mm);
get_page(virt_to_page(spte));
break;
}
@@ -3569,11 +3610,13 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
spin_lock(ptl);
- if (pud_none(*pud))
+ if (pud_none(*pud)) {
pud_populate(mm, pud,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
- else
+ } else {
put_page(virt_to_page(spte));
+ mm_inc_nr_pmds(mm);
+ }
spin_unlock(ptl);
out:
pte = (pte_t *)pmd_alloc(mm, pud, addr);
@@ -3604,6 +3647,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
pud_clear(pud);
put_page(virt_to_page(ptep));
+ mm_dec_nr_pmds(mm);
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
return 1;
}
@@ -3660,42 +3704,64 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd;
}
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
+#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
- return page;
+/*
+ * These functions are overwritable if your architecture needs its own
+ * behavior.
+ */
+struct page * __weak
+follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ int write)
+{
+ return ERR_PTR(-EINVAL);
}
-struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
+struct page * __weak
+follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int flags)
{
- struct page *page;
-
- page = pte_page(*(pte_t *)pud);
- if (page)
- page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
+ struct page *page = NULL;
+ spinlock_t *ptl;
+retry:
+ ptl = pmd_lockptr(mm, pmd);
+ spin_lock(ptl);
+ /*
+ * make sure that the address range covered by this pmd is not
+ * unmapped from other threads.
+ */
+ if (!pmd_huge(*pmd))
+ goto out;
+ if (pmd_present(*pmd)) {
+ page = pte_page(*(pte_t *)pmd) +
+ ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ if (flags & FOLL_GET)
+ get_page(page);
+ } else {
+ if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
+ spin_unlock(ptl);
+ __migration_entry_wait(mm, (pte_t *)pmd, ptl);
+ goto retry;
+ }
+ /*
+ * hwpoisoned entry is treated as no_page_table in
+ * follow_page_mask().
+ */
+ }
+out:
+ spin_unlock(ptl);
return page;
}
-#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
-
-/* Can be overriden by architectures */
struct page * __weak
follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
+ pud_t *pud, int flags)
{
- BUG();
- return NULL;
-}
+ if (flags & FOLL_GET)
+ return NULL;
-#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+ return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
+}
#ifdef CONFIG_MEMORY_FAILURE
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 037e1c00a5b7..6e0057439a46 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -279,7 +279,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
return -EINVAL;
buf = strstrip(buf);
- ret = page_counter_memparse(buf, &nr_pages);
+ ret = page_counter_memparse(buf, "-1", &nr_pages);
if (ret)
return ret;
diff --git a/mm/internal.h b/mm/internal.h
index efad241f7014..a96da5b0029d 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -110,6 +110,28 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
*/
/*
+ * Structure for holding the mostly immutable allocation parameters passed
+ * between functions involved in allocations, including the alloc_pages*
+ * family of functions.
+ *
+ * nodemask, migratetype and high_zoneidx are initialized only once in
+ * __alloc_pages_nodemask() and then never change.
+ *
+ * zonelist, preferred_zone and classzone_idx are set first in
+ * __alloc_pages_nodemask() for the fast path, and might be later changed
+ * in __alloc_pages_slowpath(). All other functions pass the whole strucure
+ * by a const pointer.
+ */
+struct alloc_context {
+ struct zonelist *zonelist;
+ nodemask_t *nodemask;
+ struct zone *preferred_zone;
+ int classzone_idx;
+ int migratetype;
+ enum zone_type high_zoneidx;
+};
+
+/*
* Locate the struct page for both the matching buddy in our
* pair (buddy1) and the combined O(n+1) page they form (page).
*
@@ -329,8 +351,10 @@ extern int mminit_loglevel;
#define mminit_dprintk(level, prefix, fmt, arg...) \
do { \
if (level < mminit_loglevel) { \
- printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
- printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
+ if (level <= MMINIT_WARNING) \
+ printk(KERN_WARNING "mminit::" prefix " " fmt, ##arg); \
+ else \
+ printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
} \
} while (0)
diff --git a/mm/interval_tree.c b/mm/interval_tree.c
index 8da581fa9060..f2c2492681bf 100644
--- a/mm/interval_tree.c
+++ b/mm/interval_tree.c
@@ -21,8 +21,8 @@ static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
}
-INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.linear.rb,
- unsigned long, shared.linear.rb_subtree_last,
+INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
+ unsigned long, shared.rb_subtree_last,
vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
/* Insert node immediately after prev in the interval tree */
@@ -36,26 +36,26 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node,
VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
- if (!prev->shared.linear.rb.rb_right) {
+ if (!prev->shared.rb.rb_right) {
parent = prev;
- link = &prev->shared.linear.rb.rb_right;
+ link = &prev->shared.rb.rb_right;
} else {
- parent = rb_entry(prev->shared.linear.rb.rb_right,
- struct vm_area_struct, shared.linear.rb);
- if (parent->shared.linear.rb_subtree_last < last)
- parent->shared.linear.rb_subtree_last = last;
- while (parent->shared.linear.rb.rb_left) {
- parent = rb_entry(parent->shared.linear.rb.rb_left,
- struct vm_area_struct, shared.linear.rb);
- if (parent->shared.linear.rb_subtree_last < last)
- parent->shared.linear.rb_subtree_last = last;
+ parent = rb_entry(prev->shared.rb.rb_right,
+ struct vm_area_struct, shared.rb);
+ if (parent->shared.rb_subtree_last < last)
+ parent->shared.rb_subtree_last = last;
+ while (parent->shared.rb.rb_left) {
+ parent = rb_entry(parent->shared.rb.rb_left,
+ struct vm_area_struct, shared.rb);
+ if (parent->shared.rb_subtree_last < last)
+ parent->shared.rb_subtree_last = last;
}
- link = &parent->shared.linear.rb.rb_left;
+ link = &parent->shared.rb.rb_left;
}
- node->shared.linear.rb_subtree_last = last;
- rb_link_node(&node->shared.linear.rb, &parent->shared.linear.rb, link);
- rb_insert_augmented(&node->shared.linear.rb, root,
+ node->shared.rb_subtree_last = last;
+ rb_link_node(&node->shared.rb, &parent->shared.rb, link);
+ rb_insert_augmented(&node->shared.rb, root,
&vma_interval_tree_augment);
}
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
new file mode 100644
index 000000000000..bd837b8c2f41
--- /dev/null
+++ b/mm/kasan/Makefile
@@ -0,0 +1,8 @@
+KASAN_SANITIZE := n
+
+CFLAGS_REMOVE_kasan.o = -pg
+# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
+# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
+CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+
+obj-y := kasan.o report.o
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
new file mode 100644
index 000000000000..78fee632a7ee
--- /dev/null
+++ b/mm/kasan/kasan.c
@@ -0,0 +1,516 @@
+/*
+ * This file contains shadow memory manipulation code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ *
+ * Some of code borrowed from https://github.com/xairy/linux by
+ * Andrey Konovalov <adech.fo@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/kasan.h>
+
+#include "kasan.h"
+#include "../slab.h"
+
+/*
+ * Poisons the shadow memory for 'size' bytes starting from 'addr'.
+ * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
+ */
+static void kasan_poison_shadow(const void *address, size_t size, u8 value)
+{
+ void *shadow_start, *shadow_end;
+
+ shadow_start = kasan_mem_to_shadow(address);
+ shadow_end = kasan_mem_to_shadow(address + size);
+
+ memset(shadow_start, value, shadow_end - shadow_start);
+}
+
+void kasan_unpoison_shadow(const void *address, size_t size)
+{
+ kasan_poison_shadow(address, size, 0);
+
+ if (size & KASAN_SHADOW_MASK) {
+ u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
+ *shadow = size & KASAN_SHADOW_MASK;
+ }
+}
+
+
+/*
+ * All functions below always inlined so compiler could
+ * perform better optimizations in each of __asan_loadX/__assn_storeX
+ * depending on memory access size X.
+ */
+
+static __always_inline bool memory_is_poisoned_1(unsigned long addr)
+{
+ s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(shadow_value)) {
+ s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+ return unlikely(last_accessible_byte >= shadow_value);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_2(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(*shadow_addr)) {
+ if (memory_is_poisoned_1(addr + 1))
+ return true;
+
+ if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
+ return false;
+
+ return unlikely(*(u8 *)shadow_addr);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_4(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(*shadow_addr)) {
+ if (memory_is_poisoned_1(addr + 3))
+ return true;
+
+ if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
+ return false;
+
+ return unlikely(*(u8 *)shadow_addr);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_8(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(*shadow_addr)) {
+ if (memory_is_poisoned_1(addr + 7))
+ return true;
+
+ if (likely(((addr + 7) & KASAN_SHADOW_MASK) >= 7))
+ return false;
+
+ return unlikely(*(u8 *)shadow_addr);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_16(unsigned long addr)
+{
+ u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(*shadow_addr)) {
+ u16 shadow_first_bytes = *(u16 *)shadow_addr;
+ s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
+
+ if (unlikely(shadow_first_bytes))
+ return true;
+
+ if (likely(!last_byte))
+ return false;
+
+ return memory_is_poisoned_1(addr + 15);
+ }
+
+ return false;
+}
+
+static __always_inline unsigned long bytes_is_zero(const u8 *start,
+ size_t size)
+{
+ while (size) {
+ if (unlikely(*start))
+ return (unsigned long)start;
+ start++;
+ size--;
+ }
+
+ return 0;
+}
+
+static __always_inline unsigned long memory_is_zero(const void *start,
+ const void *end)
+{
+ unsigned int words;
+ unsigned long ret;
+ unsigned int prefix = (unsigned long)start % 8;
+
+ if (end - start <= 16)
+ return bytes_is_zero(start, end - start);
+
+ if (prefix) {
+ prefix = 8 - prefix;
+ ret = bytes_is_zero(start, prefix);
+ if (unlikely(ret))
+ return ret;
+ start += prefix;
+ }
+
+ words = (end - start) / 8;
+ while (words) {
+ if (unlikely(*(u64 *)start))
+ return bytes_is_zero(start, 8);
+ start += 8;
+ words--;
+ }
+
+ return bytes_is_zero(start, (end - start) % 8);
+}
+
+static __always_inline bool memory_is_poisoned_n(unsigned long addr,
+ size_t size)
+{
+ unsigned long ret;
+
+ ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
+ kasan_mem_to_shadow((void *)addr + size - 1) + 1);
+
+ if (unlikely(ret)) {
+ unsigned long last_byte = addr + size - 1;
+ s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
+
+ if (unlikely(ret != (unsigned long)last_shadow ||
+ ((last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
+{
+ if (__builtin_constant_p(size)) {
+ switch (size) {
+ case 1:
+ return memory_is_poisoned_1(addr);
+ case 2:
+ return memory_is_poisoned_2(addr);
+ case 4:
+ return memory_is_poisoned_4(addr);
+ case 8:
+ return memory_is_poisoned_8(addr);
+ case 16:
+ return memory_is_poisoned_16(addr);
+ default:
+ BUILD_BUG();
+ }
+ }
+
+ return memory_is_poisoned_n(addr, size);
+}
+
+
+static __always_inline void check_memory_region(unsigned long addr,
+ size_t size, bool write)
+{
+ struct kasan_access_info info;
+
+ if (unlikely(size == 0))
+ return;
+
+ if (unlikely((void *)addr <
+ kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
+ info.access_addr = (void *)addr;
+ info.access_size = size;
+ info.is_write = write;
+ info.ip = _RET_IP_;
+ kasan_report_user_access(&info);
+ return;
+ }
+
+ if (likely(!memory_is_poisoned(addr, size)))
+ return;
+
+ kasan_report(addr, size, write, _RET_IP_);
+}
+
+void __asan_loadN(unsigned long addr, size_t size);
+void __asan_storeN(unsigned long addr, size_t size);
+
+#undef memset
+void *memset(void *addr, int c, size_t len)
+{
+ __asan_storeN((unsigned long)addr, len);
+
+ return __memset(addr, c, len);
+}
+
+#undef memmove
+void *memmove(void *dest, const void *src, size_t len)
+{
+ __asan_loadN((unsigned long)src, len);
+ __asan_storeN((unsigned long)dest, len);
+
+ return __memmove(dest, src, len);
+}
+
+#undef memcpy
+void *memcpy(void *dest, const void *src, size_t len)
+{
+ __asan_loadN((unsigned long)src, len);
+ __asan_storeN((unsigned long)dest, len);
+
+ return __memcpy(dest, src, len);
+}
+
+void kasan_alloc_pages(struct page *page, unsigned int order)
+{
+ if (likely(!PageHighMem(page)))
+ kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
+}
+
+void kasan_free_pages(struct page *page, unsigned int order)
+{
+ if (likely(!PageHighMem(page)))
+ kasan_poison_shadow(page_address(page),
+ PAGE_SIZE << order,
+ KASAN_FREE_PAGE);
+}
+
+void kasan_poison_slab(struct page *page)
+{
+ kasan_poison_shadow(page_address(page),
+ PAGE_SIZE << compound_order(page),
+ KASAN_KMALLOC_REDZONE);
+}
+
+void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+{
+ kasan_unpoison_shadow(object, cache->object_size);
+}
+
+void kasan_poison_object_data(struct kmem_cache *cache, void *object)
+{
+ kasan_poison_shadow(object,
+ round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
+ KASAN_KMALLOC_REDZONE);
+}
+
+void kasan_slab_alloc(struct kmem_cache *cache, void *object)
+{
+ kasan_kmalloc(cache, object, cache->object_size);
+}
+
+void kasan_slab_free(struct kmem_cache *cache, void *object)
+{
+ unsigned long size = cache->object_size;
+ unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+
+ /* RCU slabs could be legally used after free within the RCU period */
+ if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
+ return;
+
+ kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
+}
+
+void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
+{
+ unsigned long redzone_start;
+ unsigned long redzone_end;
+
+ if (unlikely(object == NULL))
+ return;
+
+ redzone_start = round_up((unsigned long)(object + size),
+ KASAN_SHADOW_SCALE_SIZE);
+ redzone_end = round_up((unsigned long)object + cache->object_size,
+ KASAN_SHADOW_SCALE_SIZE);
+
+ kasan_unpoison_shadow(object, size);
+ kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
+ KASAN_KMALLOC_REDZONE);
+}
+EXPORT_SYMBOL(kasan_kmalloc);
+
+void kasan_kmalloc_large(const void *ptr, size_t size)
+{
+ struct page *page;
+ unsigned long redzone_start;
+ unsigned long redzone_end;
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ page = virt_to_page(ptr);
+ redzone_start = round_up((unsigned long)(ptr + size),
+ KASAN_SHADOW_SCALE_SIZE);
+ redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+
+ kasan_unpoison_shadow(ptr, size);
+ kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
+ KASAN_PAGE_REDZONE);
+}
+
+void kasan_krealloc(const void *object, size_t size)
+{
+ struct page *page;
+
+ if (unlikely(object == ZERO_SIZE_PTR))
+ return;
+
+ page = virt_to_head_page(object);
+
+ if (unlikely(!PageSlab(page)))
+ kasan_kmalloc_large(object, size);
+ else
+ kasan_kmalloc(page->slab_cache, object, size);
+}
+
+void kasan_kfree_large(const void *ptr)
+{
+ struct page *page = virt_to_page(ptr);
+
+ kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
+ KASAN_FREE_PAGE);
+}
+
+int kasan_module_alloc(void *addr, size_t size)
+{
+ void *ret;
+ size_t shadow_size;
+ unsigned long shadow_start;
+
+ shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
+ shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
+ PAGE_SIZE);
+
+ if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
+ return -EINVAL;
+
+ ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
+ shadow_start + shadow_size,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ return ret ? 0 : -ENOMEM;
+}
+
+void kasan_module_free(void *addr)
+{
+ vfree(kasan_mem_to_shadow(addr));
+}
+
+static void register_global(struct kasan_global *global)
+{
+ size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+
+ kasan_unpoison_shadow(global->beg, global->size);
+
+ kasan_poison_shadow(global->beg + aligned_size,
+ global->size_with_redzone - aligned_size,
+ KASAN_GLOBAL_REDZONE);
+}
+
+void __asan_register_globals(struct kasan_global *globals, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ register_global(&globals[i]);
+}
+EXPORT_SYMBOL(__asan_register_globals);
+
+void __asan_unregister_globals(struct kasan_global *globals, size_t size)
+{
+}
+EXPORT_SYMBOL(__asan_unregister_globals);
+
+#define DEFINE_ASAN_LOAD_STORE(size) \
+ void __asan_load##size(unsigned long addr) \
+ { \
+ check_memory_region(addr, size, false); \
+ } \
+ EXPORT_SYMBOL(__asan_load##size); \
+ __alias(__asan_load##size) \
+ void __asan_load##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_load##size##_noabort); \
+ void __asan_store##size(unsigned long addr) \
+ { \
+ check_memory_region(addr, size, true); \
+ } \
+ EXPORT_SYMBOL(__asan_store##size); \
+ __alias(__asan_store##size) \
+ void __asan_store##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_store##size##_noabort)
+
+DEFINE_ASAN_LOAD_STORE(1);
+DEFINE_ASAN_LOAD_STORE(2);
+DEFINE_ASAN_LOAD_STORE(4);
+DEFINE_ASAN_LOAD_STORE(8);
+DEFINE_ASAN_LOAD_STORE(16);
+
+void __asan_loadN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, false);
+}
+EXPORT_SYMBOL(__asan_loadN);
+
+__alias(__asan_loadN)
+void __asan_loadN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_loadN_noabort);
+
+void __asan_storeN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, true);
+}
+EXPORT_SYMBOL(__asan_storeN);
+
+__alias(__asan_storeN)
+void __asan_storeN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_storeN_noabort);
+
+/* to shut up compiler complaints */
+void __asan_handle_no_return(void) {}
+EXPORT_SYMBOL(__asan_handle_no_return);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int kasan_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static int __init kasan_memhotplug_init(void)
+{
+ pr_err("WARNING: KASan doesn't support memory hot-add\n");
+ pr_err("Memory hot-add will be disabled\n");
+
+ hotplug_memory_notifier(kasan_mem_notifier, 0);
+
+ return 0;
+}
+
+module_init(kasan_memhotplug_init);
+#endif
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
new file mode 100644
index 000000000000..4986b0acab21
--- /dev/null
+++ b/mm/kasan/kasan.h
@@ -0,0 +1,75 @@
+#ifndef __MM_KASAN_KASAN_H
+#define __MM_KASAN_KASAN_H
+
+#include <linux/kasan.h>
+
+#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
+
+#define KASAN_FREE_PAGE 0xFF /* page was freed */
+#define KASAN_FREE_PAGE 0xFF /* page was freed */
+#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
+#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
+#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
+#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */
+
+/*
+ * Stack redzone shadow values
+ * (Those are compiler's ABI, don't change them)
+ */
+#define KASAN_STACK_LEFT 0xF1
+#define KASAN_STACK_MID 0xF2
+#define KASAN_STACK_RIGHT 0xF3
+#define KASAN_STACK_PARTIAL 0xF4
+
+/* Don't break randconfig/all*config builds */
+#ifndef KASAN_ABI_VERSION
+#define KASAN_ABI_VERSION 1
+#endif
+
+struct kasan_access_info {
+ const void *access_addr;
+ const void *first_bad_addr;
+ size_t access_size;
+ bool is_write;
+ unsigned long ip;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_global {
+ const void *beg; /* Address of the beginning of the global variable. */
+ size_t size; /* Size of the global variable. */
+ size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
+ const void *name;
+ const void *module_name; /* Name of the module where the global variable is declared. */
+ unsigned long has_dynamic_init; /* This needed for C++ */
+#if KASAN_ABI_VERSION >= 4
+ struct kasan_source_location *location;
+#endif
+};
+
+void kasan_report_error(struct kasan_access_info *info);
+void kasan_report_user_access(struct kasan_access_info *info);
+
+static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
+ << KASAN_SHADOW_SCALE_SHIFT);
+}
+
+static inline bool kasan_enabled(void)
+{
+ return !current->kasan_depth;
+}
+
+void kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip);
+
+#endif
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
new file mode 100644
index 000000000000..680ceedf810a
--- /dev/null
+++ b/mm/kasan/report.c
@@ -0,0 +1,269 @@
+/*
+ * This file contains error reporting code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ *
+ * Some of code borrowed from https://github.com/xairy/linux by
+ * Andrey Konovalov <adech.fo@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/kasan.h>
+
+#include <asm/sections.h>
+
+#include "kasan.h"
+#include "../slab.h"
+
+/* Shadow layout customization. */
+#define SHADOW_BYTES_PER_BLOCK 1
+#define SHADOW_BLOCKS_PER_ROW 16
+#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
+#define SHADOW_ROWS_AROUND_ADDR 2
+
+static const void *find_first_bad_addr(const void *addr, size_t size)
+{
+ u8 shadow_val = *(u8 *)kasan_mem_to_shadow(addr);
+ const void *first_bad_addr = addr;
+
+ while (!shadow_val && first_bad_addr < addr + size) {
+ first_bad_addr += KASAN_SHADOW_SCALE_SIZE;
+ shadow_val = *(u8 *)kasan_mem_to_shadow(first_bad_addr);
+ }
+ return first_bad_addr;
+}
+
+static void print_error_description(struct kasan_access_info *info)
+{
+ const char *bug_type = "unknown crash";
+ u8 shadow_val;
+
+ info->first_bad_addr = find_first_bad_addr(info->access_addr,
+ info->access_size);
+
+ shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr);
+
+ switch (shadow_val) {
+ case KASAN_FREE_PAGE:
+ case KASAN_KMALLOC_FREE:
+ bug_type = "use after free";
+ break;
+ case KASAN_PAGE_REDZONE:
+ case KASAN_KMALLOC_REDZONE:
+ case KASAN_GLOBAL_REDZONE:
+ case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
+ bug_type = "out of bounds access";
+ break;
+ case KASAN_STACK_LEFT:
+ case KASAN_STACK_MID:
+ case KASAN_STACK_RIGHT:
+ case KASAN_STACK_PARTIAL:
+ bug_type = "out of bounds on stack";
+ break;
+ }
+
+ pr_err("BUG: KASan: %s in %pS at addr %p\n",
+ bug_type, (void *)info->ip,
+ info->access_addr);
+ pr_err("%s of size %zu by task %s/%d\n",
+ info->is_write ? "Write" : "Read",
+ info->access_size, current->comm, task_pid_nr(current));
+}
+
+static inline bool kernel_or_module_addr(const void *addr)
+{
+ return (addr >= (void *)_stext && addr < (void *)_end)
+ || (addr >= (void *)MODULES_VADDR
+ && addr < (void *)MODULES_END);
+}
+
+static inline bool init_task_stack_addr(const void *addr)
+{
+ return addr >= (void *)&init_thread_union.stack &&
+ (addr <= (void *)&init_thread_union.stack +
+ sizeof(init_thread_union.stack));
+}
+
+static void print_address_description(struct kasan_access_info *info)
+{
+ const void *addr = info->access_addr;
+
+ if ((addr >= (void *)PAGE_OFFSET) &&
+ (addr < high_memory)) {
+ struct page *page = virt_to_head_page(addr);
+
+ if (PageSlab(page)) {
+ void *object;
+ struct kmem_cache *cache = page->slab_cache;
+ void *last_object;
+
+ object = virt_to_obj(cache, page_address(page), addr);
+ last_object = page_address(page) +
+ page->objects * cache->size;
+
+ if (unlikely(object > last_object))
+ object = last_object; /* we hit into padding */
+
+ object_err(cache, page, object,
+ "kasan: bad access detected");
+ return;
+ }
+ dump_page(page, "kasan: bad access detected");
+ }
+
+ if (kernel_or_module_addr(addr)) {
+ if (!init_task_stack_addr(addr))
+ pr_err("Address belongs to variable %pS\n", addr);
+ }
+
+ dump_stack();
+}
+
+static bool row_is_guilty(const void *row, const void *guilty)
+{
+ return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
+}
+
+static int shadow_pointer_offset(const void *row, const void *shadow)
+{
+ /* The length of ">ff00ff00ff00ff00: " is
+ * 3 + (BITS_PER_LONG/8)*2 chars.
+ */
+ return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
+ (shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
+}
+
+static void print_shadow_for_address(const void *addr)
+{
+ int i;
+ const void *shadow = kasan_mem_to_shadow(addr);
+ const void *shadow_row;
+
+ shadow_row = (void *)round_down((unsigned long)shadow,
+ SHADOW_BYTES_PER_ROW)
+ - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
+
+ pr_err("Memory state around the buggy address:\n");
+
+ for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
+ const void *kaddr = kasan_shadow_to_mem(shadow_row);
+ char buffer[4 + (BITS_PER_LONG/8)*2];
+
+ snprintf(buffer, sizeof(buffer),
+ (i == 0) ? ">%p: " : " %p: ", kaddr);
+
+ kasan_disable_current();
+ print_hex_dump(KERN_ERR, buffer,
+ DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
+ shadow_row, SHADOW_BYTES_PER_ROW, 0);
+ kasan_enable_current();
+
+ if (row_is_guilty(shadow_row, shadow))
+ pr_err("%*c\n",
+ shadow_pointer_offset(shadow_row, shadow),
+ '^');
+
+ shadow_row += SHADOW_BYTES_PER_ROW;
+ }
+}
+
+static DEFINE_SPINLOCK(report_lock);
+
+void kasan_report_error(struct kasan_access_info *info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&report_lock, flags);
+ pr_err("================================="
+ "=================================\n");
+ print_error_description(info);
+ print_address_description(info);
+ print_shadow_for_address(info->first_bad_addr);
+ pr_err("================================="
+ "=================================\n");
+ spin_unlock_irqrestore(&report_lock, flags);
+}
+
+void kasan_report_user_access(struct kasan_access_info *info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&report_lock, flags);
+ pr_err("================================="
+ "=================================\n");
+ pr_err("BUG: KASan: user-memory-access on address %p\n",
+ info->access_addr);
+ pr_err("%s of size %zu by task %s/%d\n",
+ info->is_write ? "Write" : "Read",
+ info->access_size, current->comm, task_pid_nr(current));
+ dump_stack();
+ pr_err("================================="
+ "=================================\n");
+ spin_unlock_irqrestore(&report_lock, flags);
+}
+
+void kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip)
+{
+ struct kasan_access_info info;
+
+ if (likely(!kasan_enabled()))
+ return;
+
+ info.access_addr = (void *)addr;
+ info.access_size = size;
+ info.is_write = is_write;
+ info.ip = ip;
+ kasan_report_error(&info);
+}
+
+
+#define DEFINE_ASAN_REPORT_LOAD(size) \
+void __asan_report_load##size##_noabort(unsigned long addr) \
+{ \
+ kasan_report(addr, size, false, _RET_IP_); \
+} \
+EXPORT_SYMBOL(__asan_report_load##size##_noabort)
+
+#define DEFINE_ASAN_REPORT_STORE(size) \
+void __asan_report_store##size##_noabort(unsigned long addr) \
+{ \
+ kasan_report(addr, size, true, _RET_IP_); \
+} \
+EXPORT_SYMBOL(__asan_report_store##size##_noabort)
+
+DEFINE_ASAN_REPORT_LOAD(1);
+DEFINE_ASAN_REPORT_LOAD(2);
+DEFINE_ASAN_REPORT_LOAD(4);
+DEFINE_ASAN_REPORT_LOAD(8);
+DEFINE_ASAN_REPORT_LOAD(16);
+DEFINE_ASAN_REPORT_STORE(1);
+DEFINE_ASAN_REPORT_STORE(2);
+DEFINE_ASAN_REPORT_STORE(4);
+DEFINE_ASAN_REPORT_STORE(8);
+DEFINE_ASAN_REPORT_STORE(16);
+
+void __asan_report_load_n_noabort(unsigned long addr, size_t size)
+{
+ kasan_report(addr, size, false, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_report_load_n_noabort);
+
+void __asan_report_store_n_noabort(unsigned long addr, size_t size)
+{
+ kasan_report(addr, size, true, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_report_store_n_noabort);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 3cda50c1e394..5405aff5a590 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -98,6 +98,7 @@
#include <asm/processor.h>
#include <linux/atomic.h>
+#include <linux/kasan.h>
#include <linux/kmemcheck.h>
#include <linux/kmemleak.h>
#include <linux/memory_hotplug.h>
@@ -1113,7 +1114,10 @@ static bool update_checksum(struct kmemleak_object *object)
if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
return false;
+ kasan_disable_current();
object->checksum = crc32(0, (void *)object->pointer, object->size);
+ kasan_enable_current();
+
return object->checksum != old_csum;
}
@@ -1164,7 +1168,9 @@ static void scan_block(void *_start, void *_end,
BYTES_PER_POINTER))
continue;
+ kasan_disable_current();
pointer = *ptr;
+ kasan_enable_current();
object = find_and_get_object(pointer, 1);
if (!object)
diff --git a/mm/ksm.c b/mm/ksm.c
index d247efab5073..4162dce2eb44 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
else
ret = VM_FAULT_WRITE;
put_page(page);
- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
/*
* We must loop because handle_mm_fault() may back out if there's
* any difficulty e.g. if pte accessed bit gets updated concurrently.
@@ -1748,7 +1748,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
*/
if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
VM_PFNMAP | VM_IO | VM_DONTEXPAND |
- VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP))
+ VM_HUGETLB | VM_MIXEDMAP))
return 0; /* just ignore the advice */
#ifdef VM_SAO
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f1a0db194173..909eca2c820e 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -9,18 +9,100 @@
#include <linux/mm.h>
#include <linux/list_lru.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/memcontrol.h>
+
+#ifdef CONFIG_MEMCG_KMEM
+static LIST_HEAD(list_lrus);
+static DEFINE_MUTEX(list_lrus_mutex);
+
+static void list_lru_register(struct list_lru *lru)
+{
+ mutex_lock(&list_lrus_mutex);
+ list_add(&lru->list, &list_lrus);
+ mutex_unlock(&list_lrus_mutex);
+}
+
+static void list_lru_unregister(struct list_lru *lru)
+{
+ mutex_lock(&list_lrus_mutex);
+ list_del(&lru->list);
+ mutex_unlock(&list_lrus_mutex);
+}
+#else
+static void list_lru_register(struct list_lru *lru)
+{
+}
+
+static void list_lru_unregister(struct list_lru *lru)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
+#ifdef CONFIG_MEMCG_KMEM
+static inline bool list_lru_memcg_aware(struct list_lru *lru)
+{
+ return !!lru->node[0].memcg_lrus;
+}
+
+static inline struct list_lru_one *
+list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
+{
+ /*
+ * The lock protects the array of per cgroup lists from relocation
+ * (see memcg_update_list_lru_node).
+ */
+ lockdep_assert_held(&nlru->lock);
+ if (nlru->memcg_lrus && idx >= 0)
+ return nlru->memcg_lrus->lru[idx];
+
+ return &nlru->lru;
+}
+
+static inline struct list_lru_one *
+list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
+{
+ struct mem_cgroup *memcg;
+
+ if (!nlru->memcg_lrus)
+ return &nlru->lru;
+
+ memcg = mem_cgroup_from_kmem(ptr);
+ if (!memcg)
+ return &nlru->lru;
+
+ return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
+}
+#else
+static inline bool list_lru_memcg_aware(struct list_lru *lru)
+{
+ return false;
+}
+
+static inline struct list_lru_one *
+list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
+{
+ return &nlru->lru;
+}
+
+static inline struct list_lru_one *
+list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
+{
+ return &nlru->lru;
+}
+#endif /* CONFIG_MEMCG_KMEM */
bool list_lru_add(struct list_lru *lru, struct list_head *item)
{
int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
+ struct list_lru_one *l;
spin_lock(&nlru->lock);
- WARN_ON_ONCE(nlru->nr_items < 0);
+ l = list_lru_from_kmem(nlru, item);
if (list_empty(item)) {
- list_add_tail(item, &nlru->list);
- if (nlru->nr_items++ == 0)
- node_set(nid, lru->active_nodes);
+ list_add_tail(item, &l->list);
+ l->nr_items++;
spin_unlock(&nlru->lock);
return true;
}
@@ -33,13 +115,13 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
{
int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
+ struct list_lru_one *l;
spin_lock(&nlru->lock);
+ l = list_lru_from_kmem(nlru, item);
if (!list_empty(item)) {
list_del_init(item);
- if (--nlru->nr_items == 0)
- node_clear(nid, lru->active_nodes);
- WARN_ON_ONCE(nlru->nr_items < 0);
+ l->nr_items--;
spin_unlock(&nlru->lock);
return true;
}
@@ -48,33 +130,72 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
}
EXPORT_SYMBOL_GPL(list_lru_del);
-unsigned long
-list_lru_count_node(struct list_lru *lru, int nid)
+void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
+{
+ list_del_init(item);
+ list->nr_items--;
+}
+EXPORT_SYMBOL_GPL(list_lru_isolate);
+
+void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
+ struct list_head *head)
+{
+ list_move(item, head);
+ list->nr_items--;
+}
+EXPORT_SYMBOL_GPL(list_lru_isolate_move);
+
+static unsigned long __list_lru_count_one(struct list_lru *lru,
+ int nid, int memcg_idx)
{
- unsigned long count = 0;
struct list_lru_node *nlru = &lru->node[nid];
+ struct list_lru_one *l;
+ unsigned long count;
spin_lock(&nlru->lock);
- WARN_ON_ONCE(nlru->nr_items < 0);
- count += nlru->nr_items;
+ l = list_lru_from_memcg_idx(nlru, memcg_idx);
+ count = l->nr_items;
spin_unlock(&nlru->lock);
return count;
}
+
+unsigned long list_lru_count_one(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg)
+{
+ return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
+}
+EXPORT_SYMBOL_GPL(list_lru_count_one);
+
+unsigned long list_lru_count_node(struct list_lru *lru, int nid)
+{
+ long count = 0;
+ int memcg_idx;
+
+ count += __list_lru_count_one(lru, nid, -1);
+ if (list_lru_memcg_aware(lru)) {
+ for_each_memcg_cache_index(memcg_idx)
+ count += __list_lru_count_one(lru, nid, memcg_idx);
+ }
+ return count;
+}
EXPORT_SYMBOL_GPL(list_lru_count_node);
-unsigned long
-list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
- void *cb_arg, unsigned long *nr_to_walk)
+static unsigned long
+__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk)
{
- struct list_lru_node *nlru = &lru->node[nid];
+ struct list_lru_node *nlru = &lru->node[nid];
+ struct list_lru_one *l;
struct list_head *item, *n;
unsigned long isolated = 0;
spin_lock(&nlru->lock);
+ l = list_lru_from_memcg_idx(nlru, memcg_idx);
restart:
- list_for_each_safe(item, n, &nlru->list) {
+ list_for_each_safe(item, n, &l->list) {
enum lru_status ret;
/*
@@ -85,14 +206,11 @@ restart:
break;
--*nr_to_walk;
- ret = isolate(item, &nlru->lock, cb_arg);
+ ret = isolate(item, l, &nlru->lock, cb_arg);
switch (ret) {
case LRU_REMOVED_RETRY:
assert_spin_locked(&nlru->lock);
case LRU_REMOVED:
- if (--nlru->nr_items == 0)
- node_clear(nid, lru->active_nodes);
- WARN_ON_ONCE(nlru->nr_items < 0);
isolated++;
/*
* If the lru lock has been dropped, our list
@@ -103,7 +221,7 @@ restart:
goto restart;
break;
case LRU_ROTATE:
- list_move_tail(item, &nlru->list);
+ list_move_tail(item, &l->list);
break;
case LRU_SKIP:
break;
@@ -122,31 +240,322 @@ restart:
spin_unlock(&nlru->lock);
return isolated;
}
+
+unsigned long
+list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk)
+{
+ return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
+ isolate, cb_arg, nr_to_walk);
+}
+EXPORT_SYMBOL_GPL(list_lru_walk_one);
+
+unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk)
+{
+ long isolated = 0;
+ int memcg_idx;
+
+ isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
+ nr_to_walk);
+ if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
+ for_each_memcg_cache_index(memcg_idx) {
+ isolated += __list_lru_walk_one(lru, nid, memcg_idx,
+ isolate, cb_arg, nr_to_walk);
+ if (*nr_to_walk <= 0)
+ break;
+ }
+ }
+ return isolated;
+}
EXPORT_SYMBOL_GPL(list_lru_walk_node);
-int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
+static void init_one_lru(struct list_lru_one *l)
+{
+ INIT_LIST_HEAD(&l->list);
+ l->nr_items = 0;
+}
+
+#ifdef CONFIG_MEMCG_KMEM
+static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
+ int begin, int end)
+{
+ int i;
+
+ for (i = begin; i < end; i++)
+ kfree(memcg_lrus->lru[i]);
+}
+
+static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
+ int begin, int end)
+{
+ int i;
+
+ for (i = begin; i < end; i++) {
+ struct list_lru_one *l;
+
+ l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
+ if (!l)
+ goto fail;
+
+ init_one_lru(l);
+ memcg_lrus->lru[i] = l;
+ }
+ return 0;
+fail:
+ __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
+ return -ENOMEM;
+}
+
+static int memcg_init_list_lru_node(struct list_lru_node *nlru)
+{
+ int size = memcg_nr_cache_ids;
+
+ nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
+ if (!nlru->memcg_lrus)
+ return -ENOMEM;
+
+ if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
+ kfree(nlru->memcg_lrus);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
+{
+ __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
+ kfree(nlru->memcg_lrus);
+}
+
+static int memcg_update_list_lru_node(struct list_lru_node *nlru,
+ int old_size, int new_size)
+{
+ struct list_lru_memcg *old, *new;
+
+ BUG_ON(old_size > new_size);
+
+ old = nlru->memcg_lrus;
+ new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ if (__memcg_init_list_lru_node(new, old_size, new_size)) {
+ kfree(new);
+ return -ENOMEM;
+ }
+
+ memcpy(new, old, old_size * sizeof(void *));
+
+ /*
+ * The lock guarantees that we won't race with a reader
+ * (see list_lru_from_memcg_idx).
+ *
+ * Since list_lru_{add,del} may be called under an IRQ-safe lock,
+ * we have to use IRQ-safe primitives here to avoid deadlock.
+ */
+ spin_lock_irq(&nlru->lock);
+ nlru->memcg_lrus = new;
+ spin_unlock_irq(&nlru->lock);
+
+ kfree(old);
+ return 0;
+}
+
+static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
+ int old_size, int new_size)
+{
+ /* do not bother shrinking the array back to the old size, because we
+ * cannot handle allocation failures here */
+ __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
+}
+
+static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
+{
+ int i;
+
+ for (i = 0; i < nr_node_ids; i++) {
+ if (!memcg_aware)
+ lru->node[i].memcg_lrus = NULL;
+ else if (memcg_init_list_lru_node(&lru->node[i]))
+ goto fail;
+ }
+ return 0;
+fail:
+ for (i = i - 1; i >= 0; i--)
+ memcg_destroy_list_lru_node(&lru->node[i]);
+ return -ENOMEM;
+}
+
+static void memcg_destroy_list_lru(struct list_lru *lru)
+{
+ int i;
+
+ if (!list_lru_memcg_aware(lru))
+ return;
+
+ for (i = 0; i < nr_node_ids; i++)
+ memcg_destroy_list_lru_node(&lru->node[i]);
+}
+
+static int memcg_update_list_lru(struct list_lru *lru,
+ int old_size, int new_size)
+{
+ int i;
+
+ if (!list_lru_memcg_aware(lru))
+ return 0;
+
+ for (i = 0; i < nr_node_ids; i++) {
+ if (memcg_update_list_lru_node(&lru->node[i],
+ old_size, new_size))
+ goto fail;
+ }
+ return 0;
+fail:
+ for (i = i - 1; i >= 0; i--)
+ memcg_cancel_update_list_lru_node(&lru->node[i],
+ old_size, new_size);
+ return -ENOMEM;
+}
+
+static void memcg_cancel_update_list_lru(struct list_lru *lru,
+ int old_size, int new_size)
+{
+ int i;
+
+ if (!list_lru_memcg_aware(lru))
+ return;
+
+ for (i = 0; i < nr_node_ids; i++)
+ memcg_cancel_update_list_lru_node(&lru->node[i],
+ old_size, new_size);
+}
+
+int memcg_update_all_list_lrus(int new_size)
+{
+ int ret = 0;
+ struct list_lru *lru;
+ int old_size = memcg_nr_cache_ids;
+
+ mutex_lock(&list_lrus_mutex);
+ list_for_each_entry(lru, &list_lrus, list) {
+ ret = memcg_update_list_lru(lru, old_size, new_size);
+ if (ret)
+ goto fail;
+ }
+out:
+ mutex_unlock(&list_lrus_mutex);
+ return ret;
+fail:
+ list_for_each_entry_continue_reverse(lru, &list_lrus, list)
+ memcg_cancel_update_list_lru(lru, old_size, new_size);
+ goto out;
+}
+
+static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
+ int src_idx, int dst_idx)
+{
+ struct list_lru_one *src, *dst;
+
+ /*
+ * Since list_lru_{add,del} may be called under an IRQ-safe lock,
+ * we have to use IRQ-safe primitives here to avoid deadlock.
+ */
+ spin_lock_irq(&nlru->lock);
+
+ src = list_lru_from_memcg_idx(nlru, src_idx);
+ dst = list_lru_from_memcg_idx(nlru, dst_idx);
+
+ list_splice_init(&src->list, &dst->list);
+ dst->nr_items += src->nr_items;
+ src->nr_items = 0;
+
+ spin_unlock_irq(&nlru->lock);
+}
+
+static void memcg_drain_list_lru(struct list_lru *lru,
+ int src_idx, int dst_idx)
+{
+ int i;
+
+ if (!list_lru_memcg_aware(lru))
+ return;
+
+ for (i = 0; i < nr_node_ids; i++)
+ memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
+}
+
+void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
+{
+ struct list_lru *lru;
+
+ mutex_lock(&list_lrus_mutex);
+ list_for_each_entry(lru, &list_lrus, list)
+ memcg_drain_list_lru(lru, src_idx, dst_idx);
+ mutex_unlock(&list_lrus_mutex);
+}
+#else
+static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
+{
+ return 0;
+}
+
+static void memcg_destroy_list_lru(struct list_lru *lru)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
+int __list_lru_init(struct list_lru *lru, bool memcg_aware,
+ struct lock_class_key *key)
{
int i;
size_t size = sizeof(*lru->node) * nr_node_ids;
+ int err = -ENOMEM;
+
+ memcg_get_cache_ids();
lru->node = kzalloc(size, GFP_KERNEL);
if (!lru->node)
- return -ENOMEM;
+ goto out;
- nodes_clear(lru->active_nodes);
for (i = 0; i < nr_node_ids; i++) {
spin_lock_init(&lru->node[i].lock);
if (key)
lockdep_set_class(&lru->node[i].lock, key);
- INIT_LIST_HEAD(&lru->node[i].list);
- lru->node[i].nr_items = 0;
+ init_one_lru(&lru->node[i].lru);
}
- return 0;
+
+ err = memcg_init_list_lru(lru, memcg_aware);
+ if (err) {
+ kfree(lru->node);
+ goto out;
+ }
+
+ list_lru_register(lru);
+out:
+ memcg_put_cache_ids();
+ return err;
}
-EXPORT_SYMBOL_GPL(list_lru_init_key);
+EXPORT_SYMBOL_GPL(__list_lru_init);
void list_lru_destroy(struct list_lru *lru)
{
+ /* Already destroyed or not yet initialized? */
+ if (!lru->node)
+ return;
+
+ memcg_get_cache_ids();
+
+ list_lru_unregister(lru);
+
+ memcg_destroy_list_lru(lru);
kfree(lru->node);
+ lru->node = NULL;
+
+ memcg_put_cache_ids();
}
EXPORT_SYMBOL_GPL(list_lru_destroy);
diff --git a/mm/madvise.c b/mm/madvise.c
index a271adc93289..d551475517bf 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -155,7 +155,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
pte = *(orig_pte + ((index - start) / PAGE_SIZE));
pte_unmap_unlock(orig_pte, ptl);
- if (pte_present(pte) || pte_none(pte) || pte_file(pte))
+ if (pte_present(pte) || pte_none(pte))
continue;
entry = pte_to_swp_entry(pte);
if (unlikely(non_swap_entry(entry)))
@@ -222,21 +222,24 @@ static long madvise_willneed(struct vm_area_struct *vma,
struct file *file = vma->vm_file;
#ifdef CONFIG_SWAP
- if (!file || mapping_cap_swap_backed(file->f_mapping)) {
+ if (!file) {
*prev = vma;
- if (!file)
- force_swapin_readahead(vma, start, end);
- else
- force_shm_swapin_readahead(vma, start, end,
- file->f_mapping);
+ force_swapin_readahead(vma, start, end);
return 0;
}
-#endif
+ if (shmem_mapping(file->f_mapping)) {
+ *prev = vma;
+ force_shm_swapin_readahead(vma, start, end,
+ file->f_mapping);
+ return 0;
+ }
+#else
if (!file)
return -EBADF;
+#endif
- if (file->f_mapping->a_ops->get_xip_mem) {
+ if (IS_DAX(file_inode(file))) {
/* no bad return value, but ignore advice */
return 0;
}
@@ -278,14 +281,7 @@ static long madvise_dontneed(struct vm_area_struct *vma,
if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
return -EINVAL;
- if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
- struct zap_details details = {
- .nonlinear_vma = vma,
- .last_index = ULONG_MAX,
- };
- zap_page_range(vma, start, end - start, &details);
- } else
- zap_page_range(vma, start, end - start, NULL);
+ zap_page_range(vma, start, end - start, NULL);
return 0;
}
@@ -303,7 +299,7 @@ static long madvise_remove(struct vm_area_struct *vma,
*prev = NULL; /* tell sys_madvise we drop mmap_sem */
- if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
+ if (vma->vm_flags & (VM_LOCKED | VM_HUGETLB))
return -EINVAL;
f = vma->vm_file;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 851924fa5170..d18d3a6e7337 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -72,22 +72,13 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
#define MEM_CGROUP_RECLAIM_RETRIES 5
static struct mem_cgroup *root_mem_cgroup __read_mostly;
+/* Whether the swap controller is active */
#ifdef CONFIG_MEMCG_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
int do_swap_account __read_mostly;
-
-/* for remember boot option*/
-#ifdef CONFIG_MEMCG_SWAP_ENABLED
-static int really_do_swap_account __initdata = 1;
-#else
-static int really_do_swap_account __initdata;
-#endif
-
#else
#define do_swap_account 0
#endif
-
static const char * const mem_cgroup_stat_names[] = {
"cache",
"rss",
@@ -97,14 +88,6 @@ static const char * const mem_cgroup_stat_names[] = {
"swap",
};
-enum mem_cgroup_events_index {
- MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
- MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
- MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
- MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
- MEM_CGROUP_EVENTS_NSTATS,
-};
-
static const char * const mem_cgroup_events_names[] = {
"pgpgin",
"pgpgout",
@@ -138,7 +121,7 @@ enum mem_cgroup_events_target {
struct mem_cgroup_stat_cpu {
long count[MEM_CGROUP_STAT_NSTATS];
- unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
+ unsigned long events[MEMCG_NR_EVENTS];
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
};
@@ -284,6 +267,10 @@ struct mem_cgroup {
struct page_counter memsw;
struct page_counter kmem;
+ /* Normal memory consumption range */
+ unsigned long low;
+ unsigned long high;
+
unsigned long soft_limit;
/* vmpressure notifications */
@@ -325,9 +312,11 @@ struct mem_cgroup {
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
- atomic_t moving_account;
+ atomic_t moving_account;
/* taken only while moving_account > 0 */
- spinlock_t move_lock;
+ spinlock_t move_lock;
+ struct task_struct *move_lock_task;
+ unsigned long move_lock_flags;
/*
* percpu counter.
*/
@@ -343,11 +332,10 @@ struct mem_cgroup {
struct cg_proto tcp_mem;
#endif
#if defined(CONFIG_MEMCG_KMEM)
- /* analogous to slab_common's slab_caches list, but per-memcg;
- * protected by memcg_slab_mutex */
- struct list_head memcg_slab_caches;
- /* Index in the kmem_cache->memcg_params->memcg_caches array */
+ /* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
+ bool kmem_acct_activated;
+ bool kmem_acct_active;
#endif
int last_scanned_node;
@@ -366,29 +354,26 @@ struct mem_cgroup {
};
#ifdef CONFIG_MEMCG_KMEM
-static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+bool memcg_kmem_is_active(struct mem_cgroup *memcg)
{
- return memcg->kmemcg_id >= 0;
+ return memcg->kmem_acct_active;
}
#endif
/* Stuffs for move charges at task migration. */
/*
- * Types of charges to be moved. "move_charge_at_immitgrate" and
- * "immigrate_flags" are treated as a left-shifted bitmap of these types.
+ * Types of charges to be moved.
*/
-enum move_type {
- MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
- MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
- NR_MOVE_TYPE,
-};
+#define MOVE_ANON 0x1U
+#define MOVE_FILE 0x2U
+#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
spinlock_t lock; /* for from, to */
struct mem_cgroup *from;
struct mem_cgroup *to;
- unsigned long immigrate_flags;
+ unsigned long flags;
unsigned long precharge;
unsigned long moved_charge;
unsigned long moved_swap;
@@ -399,16 +384,6 @@ static struct move_charge_struct {
.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
-static bool move_anon(void)
-{
- return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
-}
-
-static bool move_file(void)
-{
- return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
-}
-
/*
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
* limit reclaim to prevent infinite loops, if they ever occur.
@@ -544,33 +519,35 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
}
EXPORT_SYMBOL(tcp_proto_cgroup);
-static void disarm_sock_keys(struct mem_cgroup *memcg)
-{
- if (!memcg_proto_activated(&memcg->tcp_mem))
- return;
- static_key_slow_dec(&memcg_socket_limit_enabled);
-}
-#else
-static void disarm_sock_keys(struct mem_cgroup *memcg)
-{
-}
#endif
#ifdef CONFIG_MEMCG_KMEM
/*
- * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
+ * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
* The main reason for not using cgroup id for this:
* this works better in sparse environments, where we have a lot of memcgs,
* but only a few kmem-limited. Or also, if we have, for instance, 200
* memcgs, and none but the 200th is kmem-limited, we'd have to have a
* 200 entry array for that.
*
- * The current size of the caches array is stored in
- * memcg_limited_groups_array_size. It will double each time we have to
- * increase it.
+ * The current size of the caches array is stored in memcg_nr_cache_ids. It
+ * will double each time we have to increase it.
*/
-static DEFINE_IDA(kmem_limited_groups);
-int memcg_limited_groups_array_size;
+static DEFINE_IDA(memcg_cache_ida);
+int memcg_nr_cache_ids;
+
+/* Protects memcg_nr_cache_ids */
+static DECLARE_RWSEM(memcg_cache_ids_sem);
+
+void memcg_get_cache_ids(void)
+{
+ down_read(&memcg_cache_ids_sem);
+}
+
+void memcg_put_cache_ids(void)
+{
+ up_read(&memcg_cache_ids_sem);
+}
/*
* MIN_SIZE is different than 1, because we would like to avoid going through
@@ -596,32 +573,8 @@ int memcg_limited_groups_array_size;
struct static_key memcg_kmem_enabled_key;
EXPORT_SYMBOL(memcg_kmem_enabled_key);
-static void memcg_free_cache_id(int id);
-
-static void disarm_kmem_keys(struct mem_cgroup *memcg)
-{
- if (memcg_kmem_is_active(memcg)) {
- static_key_slow_dec(&memcg_kmem_enabled_key);
- memcg_free_cache_id(memcg->kmemcg_id);
- }
- /*
- * This check can't live in kmem destruction function,
- * since the charges will outlive the cgroup
- */
- WARN_ON(page_counter_read(&memcg->kmem));
-}
-#else
-static void disarm_kmem_keys(struct mem_cgroup *memcg)
-{
-}
#endif /* CONFIG_MEMCG_KMEM */
-static void disarm_static_keys(struct mem_cgroup *memcg)
-{
- disarm_sock_keys(memcg);
- disarm_kmem_keys(memcg);
-}
-
static struct mem_cgroup_per_zone *
mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
{
@@ -1368,6 +1321,20 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
return inactive * inactive_ratio < active;
}
+bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+{
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return true;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ memcg = mz->memcg;
+
+ return !!(memcg->css.flags & CSS_ONLINE);
+}
+
#define mem_cgroup_from_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
@@ -1477,9 +1444,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
pr_info("Task in ");
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
- pr_info(" killed as a result of limit of ");
+ pr_cont(" killed as a result of limit of ");
pr_cont_cgroup_path(memcg->css.cgroup);
- pr_info("\n");
+ pr_cont("\n");
rcu_read_unlock();
@@ -1560,7 +1527,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
* quickly exit and free its memory.
*/
if (fatal_signal_pending(current) || task_will_free_mem(current)) {
- set_thread_flag(TIF_MEMDIE);
+ mark_tsk_oom_victim(current);
return;
}
@@ -1934,7 +1901,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
if (!memcg)
return false;
- if (!handle)
+ if (!handle || oom_killer_disabled)
goto cleanup;
owait.memcg = memcg;
@@ -1980,34 +1947,33 @@ cleanup:
/**
* mem_cgroup_begin_page_stat - begin a page state statistics transaction
* @page: page that is going to change accounted state
- * @locked: &memcg->move_lock slowpath was taken
- * @flags: IRQ-state flags for &memcg->move_lock
*
* This function must mark the beginning of an accounted page state
* change to prevent double accounting when the page is concurrently
* being moved to another memcg:
*
- * memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+ * memcg = mem_cgroup_begin_page_stat(page);
* if (TestClearPageState(page))
* mem_cgroup_update_page_stat(memcg, state, -1);
- * mem_cgroup_end_page_stat(memcg, locked, flags);
- *
- * The RCU lock is held throughout the transaction. The fast path can
- * get away without acquiring the memcg->move_lock (@locked is false)
- * because page moving starts with an RCU grace period.
- *
- * The RCU lock also protects the memcg from being freed when the page
- * state that is going to change is the only thing preventing the page
- * from being uncharged. E.g. end-writeback clearing PageWriteback(),
- * which allows migration to go ahead and uncharge the page before the
- * account transaction might be complete.
+ * mem_cgroup_end_page_stat(memcg);
*/
-struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
- bool *locked,
- unsigned long *flags)
+struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
{
struct mem_cgroup *memcg;
+ unsigned long flags;
+ /*
+ * The RCU lock is held throughout the transaction. The fast
+ * path can get away without acquiring the memcg->move_lock
+ * because page moving starts with an RCU grace period.
+ *
+ * The RCU lock also protects the memcg from being freed when
+ * the page state that is going to change is the only thing
+ * preventing the page from being uncharged.
+ * E.g. end-writeback clearing PageWriteback(), which allows
+ * migration to go ahead and uncharge the page before the
+ * account transaction might be complete.
+ */
rcu_read_lock();
if (mem_cgroup_disabled())
@@ -2017,16 +1983,22 @@ again:
if (unlikely(!memcg))
return NULL;
- *locked = false;
if (atomic_read(&memcg->moving_account) <= 0)
return memcg;
- spin_lock_irqsave(&memcg->move_lock, *flags);
+ spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page->mem_cgroup) {
- spin_unlock_irqrestore(&memcg->move_lock, *flags);
+ spin_unlock_irqrestore(&memcg->move_lock, flags);
goto again;
}
- *locked = true;
+
+ /*
+ * When charge migration first begins, we can have locked and
+ * unlocked page stat updates happening concurrently. Track
+ * the task who has the lock for mem_cgroup_end_page_stat().
+ */
+ memcg->move_lock_task = current;
+ memcg->move_lock_flags = flags;
return memcg;
}
@@ -2034,14 +2006,17 @@ again:
/**
* mem_cgroup_end_page_stat - finish a page state statistics transaction
* @memcg: the memcg that was accounted against
- * @locked: value received from mem_cgroup_begin_page_stat()
- * @flags: value received from mem_cgroup_begin_page_stat()
*/
-void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
- unsigned long *flags)
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
{
- if (memcg && *locked)
- spin_unlock_irqrestore(&memcg->move_lock, *flags);
+ if (memcg && memcg->move_lock_task == current) {
+ unsigned long flags = memcg->move_lock_flags;
+
+ memcg->move_lock_task = NULL;
+ memcg->move_lock_flags = 0;
+
+ spin_unlock_irqrestore(&memcg->move_lock, flags);
+ }
rcu_read_unlock();
}
@@ -2134,17 +2109,6 @@ static void drain_local_stock(struct work_struct *dummy)
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
}
-static void __init memcg_stock_init(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct memcg_stock_pcp *stock =
- &per_cpu(memcg_stock, cpu);
- INIT_WORK(&stock->work, drain_local_stock);
- }
-}
-
/*
* Cache charges(val) to local per_cpu area.
* This will be consumed by consume_stock() function, later.
@@ -2294,6 +2258,8 @@ retry:
if (!(gfp_mask & __GFP_WAIT))
goto nomem;
+ mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
+
nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
gfp_mask, may_swap);
@@ -2335,6 +2301,8 @@ retry:
if (fatal_signal_pending(current))
goto bypass;
+ mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
+
mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
nomem:
if (!(gfp_mask & __GFP_NOFAIL))
@@ -2346,6 +2314,16 @@ done_restock:
css_get_many(&memcg->css, batch);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);
+ /*
+ * If the hierarchy is above the normal consumption range,
+ * make the charging task trim their excess contribution.
+ */
+ do {
+ if (page_counter_read(&memcg->memory) <= memcg->high)
+ continue;
+ mem_cgroup_events(memcg, MEMCG_HIGH, 1);
+ try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
+ } while ((memcg = parent_mem_cgroup(memcg)));
done:
return ret;
}
@@ -2476,27 +2454,8 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
}
#ifdef CONFIG_MEMCG_KMEM
-/*
- * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or
- * destroyed. It protects memcg_caches arrays and memcg_slab_caches lists.
- */
-static DEFINE_MUTEX(memcg_slab_mutex);
-
-/*
- * This is a bit cumbersome, but it is rarely used and avoids a backpointer
- * in the memcg_cache_params struct.
- */
-static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
-{
- struct kmem_cache *cachep;
-
- VM_BUG_ON(p->is_root_cache);
- cachep = p->root_cache;
- return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
-}
-
-static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
- unsigned long nr_pages)
+int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
+ unsigned long nr_pages)
{
struct page_counter *counter;
int ret = 0;
@@ -2533,8 +2492,7 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
return ret;
}
-static void memcg_uncharge_kmem(struct mem_cgroup *memcg,
- unsigned long nr_pages)
+void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
{
page_counter_uncharge(&memcg->memory, nr_pages);
if (do_swap_account)
@@ -2560,18 +2518,19 @@ static int memcg_alloc_cache_id(void)
int id, size;
int err;
- id = ida_simple_get(&kmem_limited_groups,
+ id = ida_simple_get(&memcg_cache_ida,
0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
if (id < 0)
return id;
- if (id < memcg_limited_groups_array_size)
+ if (id < memcg_nr_cache_ids)
return id;
/*
* There's no space for the new id in memcg_caches arrays,
* so we have to grow them.
*/
+ down_write(&memcg_cache_ids_sem);
size = 2 * (id + 1);
if (size < MEMCG_CACHES_MIN_SIZE)
@@ -2579,12 +2538,16 @@ static int memcg_alloc_cache_id(void)
else if (size > MEMCG_CACHES_MAX_SIZE)
size = MEMCG_CACHES_MAX_SIZE;
- mutex_lock(&memcg_slab_mutex);
err = memcg_update_all_caches(size);
- mutex_unlock(&memcg_slab_mutex);
+ if (!err)
+ err = memcg_update_all_list_lrus(size);
+ if (!err)
+ memcg_nr_cache_ids = size;
+
+ up_write(&memcg_cache_ids_sem);
if (err) {
- ida_simple_remove(&kmem_limited_groups, id);
+ ida_simple_remove(&memcg_cache_ida, id);
return err;
}
return id;
@@ -2592,136 +2555,23 @@ static int memcg_alloc_cache_id(void)
static void memcg_free_cache_id(int id)
{
- ida_simple_remove(&kmem_limited_groups, id);
+ ida_simple_remove(&memcg_cache_ida, id);
}
-/*
- * We should update the current array size iff all caches updates succeed. This
- * can only be done from the slab side. The slab mutex needs to be held when
- * calling this.
- */
-void memcg_update_array_size(int num)
-{
- memcg_limited_groups_array_size = num;
-}
-
-static void memcg_register_cache(struct mem_cgroup *memcg,
- struct kmem_cache *root_cache)
-{
- static char memcg_name_buf[NAME_MAX + 1]; /* protected by
- memcg_slab_mutex */
- struct kmem_cache *cachep;
- int id;
-
- lockdep_assert_held(&memcg_slab_mutex);
-
- id = memcg_cache_id(memcg);
-
- /*
- * Since per-memcg caches are created asynchronously on first
- * allocation (see memcg_kmem_get_cache()), several threads can try to
- * create the same cache, but only one of them may succeed.
- */
- if (cache_from_memcg_idx(root_cache, id))
- return;
-
- cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1);
- cachep = memcg_create_kmem_cache(memcg, root_cache, memcg_name_buf);
- /*
- * If we could not create a memcg cache, do not complain, because
- * that's not critical at all as we can always proceed with the root
- * cache.
- */
- if (!cachep)
- return;
-
- list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
-
- /*
- * Since readers won't lock (see cache_from_memcg_idx()), we need a
- * barrier here to ensure nobody will see the kmem_cache partially
- * initialized.
- */
- smp_wmb();
-
- BUG_ON(root_cache->memcg_params->memcg_caches[id]);
- root_cache->memcg_params->memcg_caches[id] = cachep;
-}
-
-static void memcg_unregister_cache(struct kmem_cache *cachep)
-{
- struct kmem_cache *root_cache;
- struct mem_cgroup *memcg;
- int id;
-
- lockdep_assert_held(&memcg_slab_mutex);
-
- BUG_ON(is_root_cache(cachep));
-
- root_cache = cachep->memcg_params->root_cache;
- memcg = cachep->memcg_params->memcg;
- id = memcg_cache_id(memcg);
-
- BUG_ON(root_cache->memcg_params->memcg_caches[id] != cachep);
- root_cache->memcg_params->memcg_caches[id] = NULL;
-
- list_del(&cachep->memcg_params->list);
-
- kmem_cache_destroy(cachep);
-}
-
-int __memcg_cleanup_cache_params(struct kmem_cache *s)
-{
- struct kmem_cache *c;
- int i, failed = 0;
-
- mutex_lock(&memcg_slab_mutex);
- for_each_memcg_cache_index(i) {
- c = cache_from_memcg_idx(s, i);
- if (!c)
- continue;
-
- memcg_unregister_cache(c);
-
- if (cache_from_memcg_idx(s, i))
- failed++;
- }
- mutex_unlock(&memcg_slab_mutex);
- return failed;
-}
-
-static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
-{
- struct kmem_cache *cachep;
- struct memcg_cache_params *params, *tmp;
-
- if (!memcg_kmem_is_active(memcg))
- return;
-
- mutex_lock(&memcg_slab_mutex);
- list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
- cachep = memcg_params_to_cache(params);
- memcg_unregister_cache(cachep);
- }
- mutex_unlock(&memcg_slab_mutex);
-}
-
-struct memcg_register_cache_work {
+struct memcg_kmem_cache_create_work {
struct mem_cgroup *memcg;
struct kmem_cache *cachep;
struct work_struct work;
};
-static void memcg_register_cache_func(struct work_struct *w)
+static void memcg_kmem_cache_create_func(struct work_struct *w)
{
- struct memcg_register_cache_work *cw =
- container_of(w, struct memcg_register_cache_work, work);
+ struct memcg_kmem_cache_create_work *cw =
+ container_of(w, struct memcg_kmem_cache_create_work, work);
struct mem_cgroup *memcg = cw->memcg;
struct kmem_cache *cachep = cw->cachep;
- mutex_lock(&memcg_slab_mutex);
- memcg_register_cache(memcg, cachep);
- mutex_unlock(&memcg_slab_mutex);
+ memcg_create_kmem_cache(memcg, cachep);
css_put(&memcg->css);
kfree(cw);
@@ -2730,10 +2580,10 @@ static void memcg_register_cache_func(struct work_struct *w)
/*
* Enqueue the creation of a per-memcg kmem_cache.
*/
-static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
- struct kmem_cache *cachep)
+static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
+ struct kmem_cache *cachep)
{
- struct memcg_register_cache_work *cw;
+ struct memcg_kmem_cache_create_work *cw;
cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
if (!cw)
@@ -2743,18 +2593,18 @@ static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
cw->memcg = memcg;
cw->cachep = cachep;
+ INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
- INIT_WORK(&cw->work, memcg_register_cache_func);
schedule_work(&cw->work);
}
-static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
- struct kmem_cache *cachep)
+static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
+ struct kmem_cache *cachep)
{
/*
* We need to stop accounting when we kmalloc, because if the
* corresponding kmalloc cache is not yet created, the first allocation
- * in __memcg_schedule_register_cache will recurse.
+ * in __memcg_schedule_kmem_cache_create will recurse.
*
* However, it is better to enclose the whole function. Depending on
* the debugging options enabled, INIT_WORK(), for instance, can
@@ -2763,24 +2613,10 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
* the safest choice is to do it like this, wrapping the whole function.
*/
current->memcg_kmem_skip_account = 1;
- __memcg_schedule_register_cache(memcg, cachep);
+ __memcg_schedule_kmem_cache_create(memcg, cachep);
current->memcg_kmem_skip_account = 0;
}
-int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
-{
- unsigned int nr_pages = 1 << order;
-
- return memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages);
-}
-
-void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
-{
- unsigned int nr_pages = 1 << order;
-
- memcg_uncharge_kmem(cachep->memcg_params->memcg, nr_pages);
-}
-
/*
* Return the kmem_cache we're supposed to use for a slab allocation.
* We try to use the current memcg's version of the cache.
@@ -2798,18 +2634,19 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
{
struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep;
+ int kmemcg_id;
- VM_BUG_ON(!cachep->memcg_params);
- VM_BUG_ON(!cachep->memcg_params->is_root_cache);
+ VM_BUG_ON(!is_root_cache(cachep));
if (current->memcg_kmem_skip_account)
return cachep;
memcg = get_mem_cgroup_from_mm(current->mm);
- if (!memcg_kmem_is_active(memcg))
+ kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
+ if (kmemcg_id < 0)
goto out;
- memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
+ memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
if (likely(memcg_cachep))
return memcg_cachep;
@@ -2825,7 +2662,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
* could happen with the slab_mutex held. So it's better to
* defer everything.
*/
- memcg_schedule_register_cache(memcg, cachep);
+ memcg_schedule_kmem_cache_create(memcg, cachep);
out:
css_put(&memcg->css);
return cachep;
@@ -2834,7 +2671,7 @@ out:
void __memcg_kmem_put_cache(struct kmem_cache *cachep)
{
if (!is_root_cache(cachep))
- css_put(&cachep->memcg_params->memcg->css);
+ css_put(&cachep->memcg_params.memcg->css);
}
/*
@@ -2899,6 +2736,24 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
memcg_uncharge_kmem(memcg, 1 << order);
page->mem_cgroup = NULL;
}
+
+struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
+{
+ struct mem_cgroup *memcg = NULL;
+ struct kmem_cache *cachep;
+ struct page *page;
+
+ page = virt_to_head_page(ptr);
+ if (PageSlab(page)) {
+ cachep = page->slab_cache;
+ if (!is_root_cache(cachep))
+ memcg = cachep->memcg_params.memcg;
+ } else
+ /* page allocated by alloc_kmem_pages */
+ memcg = page->mem_cgroup;
+
+ return memcg;
+}
#endif /* CONFIG_MEMCG_KMEM */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -3433,8 +3288,9 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
int err = 0;
int memcg_id;
- if (memcg_kmem_is_active(memcg))
- return 0;
+ BUG_ON(memcg->kmemcg_id >= 0);
+ BUG_ON(memcg->kmem_acct_activated);
+ BUG_ON(memcg->kmem_acct_active);
/*
* For simplicity, we won't allow this to be disabled. It also can't
@@ -3477,6 +3333,8 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
* patched.
*/
memcg->kmemcg_id = memcg_id;
+ memcg->kmem_acct_activated = true;
+ memcg->kmem_acct_active = true;
out:
return err;
}
@@ -3533,7 +3391,7 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
int ret;
buf = strstrip(buf);
- ret = page_counter_memparse(buf, &nr_pages);
+ ret = page_counter_memparse(buf, "-1", &nr_pages);
if (ret)
return ret;
@@ -3609,7 +3467,7 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
- if (val >= (1 << NR_MOVE_TYPE))
+ if (val & ~MOVE_MASK)
return -EINVAL;
/*
@@ -3687,6 +3545,10 @@ static int memcg_stat_show(struct seq_file *m, void *v)
struct mem_cgroup *mi;
unsigned int i;
+ BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
+ MEM_CGROUP_STAT_NSTATS);
+ BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
+ MEM_CGROUP_EVENTS_NSTATS);
BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
@@ -3901,7 +3763,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
unsigned long usage;
int i, size, ret;
- ret = page_counter_memparse(args, &threshold);
+ ret = page_counter_memparse(args, "-1", &threshold);
if (ret)
return ret;
@@ -4152,9 +4014,59 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
return mem_cgroup_sockets_init(memcg, ss);
}
+static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
+{
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *parent, *child;
+ int kmemcg_id;
+
+ if (!memcg->kmem_acct_active)
+ return;
+
+ /*
+ * Clear the 'active' flag before clearing memcg_caches arrays entries.
+ * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
+ * guarantees no cache will be created for this cgroup after we are
+ * done (see memcg_create_kmem_cache()).
+ */
+ memcg->kmem_acct_active = false;
+
+ memcg_deactivate_kmem_caches(memcg);
+
+ kmemcg_id = memcg->kmemcg_id;
+ BUG_ON(kmemcg_id < 0);
+
+ parent = parent_mem_cgroup(memcg);
+ if (!parent)
+ parent = root_mem_cgroup;
+
+ /*
+ * Change kmemcg_id of this cgroup and all its descendants to the
+ * parent's id, and then move all entries from this cgroup's list_lrus
+ * to ones of the parent. After we have finished, all list_lrus
+ * corresponding to this cgroup are guaranteed to remain empty. The
+ * ordering is imposed by list_lru_node->lock taken by
+ * memcg_drain_all_list_lrus().
+ */
+ css_for_each_descendant_pre(css, &memcg->css) {
+ child = mem_cgroup_from_css(css);
+ BUG_ON(child->kmemcg_id != kmemcg_id);
+ child->kmemcg_id = parent->kmemcg_id;
+ if (!memcg->use_hierarchy)
+ break;
+ }
+ memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
+
+ memcg_free_cache_id(kmemcg_id);
+}
+
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
- memcg_unregister_all_caches(memcg);
+ if (memcg->kmem_acct_activated) {
+ memcg_destroy_kmem_caches(memcg);
+ static_key_slow_dec(&memcg_kmem_enabled_key);
+ WARN_ON(page_counter_read(&memcg->kmem));
+ }
mem_cgroup_sockets_destroy(memcg);
}
#else
@@ -4163,6 +4075,10 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
return 0;
}
+static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
+{
+}
+
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
}
@@ -4391,7 +4307,7 @@ out_kfree:
return ret;
}
-static struct cftype mem_cgroup_files[] = {
+static struct cftype mem_cgroup_legacy_files[] = {
{
.name = "usage_in_bytes",
.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
@@ -4502,34 +4418,6 @@ static struct cftype mem_cgroup_files[] = {
{ }, /* terminate */
};
-#ifdef CONFIG_MEMCG_SWAP
-static struct cftype memsw_cgroup_files[] = {
- {
- .name = "memsw.usage_in_bytes",
- .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
- .read_u64 = mem_cgroup_read_u64,
- },
- {
- .name = "memsw.max_usage_in_bytes",
- .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
- .write = mem_cgroup_reset,
- .read_u64 = mem_cgroup_read_u64,
- },
- {
- .name = "memsw.limit_in_bytes",
- .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
- .write = mem_cgroup_write,
- .read_u64 = mem_cgroup_read_u64,
- },
- {
- .name = "memsw.failcnt",
- .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
- .write = mem_cgroup_reset,
- .read_u64 = mem_cgroup_read_u64,
- },
- { }, /* terminate */
-};
-#endif
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
@@ -4609,8 +4497,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
free_mem_cgroup_per_zone_info(memcg, node);
free_percpu(memcg->stat);
-
- disarm_static_keys(memcg);
kfree(memcg);
}
@@ -4625,29 +4511,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
}
EXPORT_SYMBOL(parent_mem_cgroup);
-static void __init mem_cgroup_soft_limit_tree_init(void)
-{
- struct mem_cgroup_tree_per_node *rtpn;
- struct mem_cgroup_tree_per_zone *rtpz;
- int tmp, node, zone;
-
- for_each_node(node) {
- tmp = node;
- if (!node_state(node, N_NORMAL_MEMORY))
- tmp = -1;
- rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
- BUG_ON(!rtpn);
-
- soft_limit_tree.rb_tree_per_node[node] = rtpn;
-
- for (zone = 0; zone < MAX_NR_ZONES; zone++) {
- rtpz = &rtpn->rb_tree_per_zone[zone];
- rtpz->rb_root = RB_ROOT;
- spin_lock_init(&rtpz->lock);
- }
- }
-}
-
static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
@@ -4667,6 +4530,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (parent_css == NULL) {
root_mem_cgroup = memcg;
page_counter_init(&memcg->memory, NULL);
+ memcg->high = PAGE_COUNTER_MAX;
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
@@ -4682,7 +4546,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
spin_lock_init(&memcg->event_list_lock);
#ifdef CONFIG_MEMCG_KMEM
memcg->kmemcg_id = -1;
- INIT_LIST_HEAD(&memcg->memcg_slab_caches);
#endif
return &memcg->css;
@@ -4713,6 +4576,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
if (parent->use_hierarchy) {
page_counter_init(&memcg->memory, &parent->memory);
+ memcg->high = PAGE_COUNTER_MAX;
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, &parent->memsw);
page_counter_init(&memcg->kmem, &parent->kmem);
@@ -4723,6 +4587,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
*/
} else {
page_counter_init(&memcg->memory, NULL);
+ memcg->high = PAGE_COUNTER_MAX;
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
@@ -4768,6 +4633,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
spin_unlock(&memcg->event_list_lock);
vmpressure_cleanup(&memcg->vmpressure);
+
+ memcg_deactivate_kmem(memcg);
}
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
@@ -4798,6 +4665,8 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
+ memcg->low = 0;
+ memcg->high = PAGE_COUNTER_MAX;
memcg->soft_limit = PAGE_COUNTER_MAX;
}
@@ -4874,12 +4743,12 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
if (!page || !page_mapped(page))
return NULL;
if (PageAnon(page)) {
- /* we don't move shared anon */
- if (!move_anon())
+ if (!(mc.flags & MOVE_ANON))
return NULL;
- } else if (!move_file())
- /* we ignore mapcount for file pages */
- return NULL;
+ } else {
+ if (!(mc.flags & MOVE_FILE))
+ return NULL;
+ }
if (!get_page_unless_zero(page))
return NULL;
@@ -4893,7 +4762,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
struct page *page = NULL;
swp_entry_t ent = pte_to_swp_entry(ptent);
- if (!move_anon() || non_swap_entry(ent))
+ if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
return NULL;
/*
* Because lookup_swap_cache() updates some statistics counter,
@@ -4922,14 +4791,11 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
if (!vma->vm_file) /* anonymous vma */
return NULL;
- if (!move_file())
+ if (!(mc.flags & MOVE_FILE))
return NULL;
mapping = vma->vm_file->f_mapping;
- if (pte_none(ptent))
- pgoff = linear_page_index(vma, addr);
- else /* pte_file(ptent) is true */
- pgoff = pte_to_pgoff(ptent);
+ pgoff = linear_page_index(vma, addr);
/* page is moved even if it's not RSS of this task(page-faulted). */
#ifdef CONFIG_SWAP
@@ -4961,7 +4827,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
page = mc_handle_present_pte(vma, addr, ptent);
else if (is_swap_pte(ptent))
page = mc_handle_swap_pte(vma, addr, ptent, &ent);
- else if (pte_none(ptent) || pte_file(ptent))
+ else if (pte_none(ptent))
page = mc_handle_file_pte(vma, addr, ptent, &ent);
if (!page && !ent.val)
@@ -5004,7 +4870,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
page = pmd_page(pmd);
VM_BUG_ON_PAGE(!page || !PageHead(page), page);
- if (!move_anon())
+ if (!(mc.flags & MOVE_ANON))
return ret;
if (page->mem_cgroup == mc.from) {
ret = MC_TARGET_PAGE;
@@ -5027,7 +4893,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- struct vm_area_struct *vma = walk->private;
+ struct vm_area_struct *vma = walk->vma;
pte_t *pte;
spinlock_t *ptl;
@@ -5053,20 +4919,13 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
unsigned long precharge;
- struct vm_area_struct *vma;
+ struct mm_walk mem_cgroup_count_precharge_walk = {
+ .pmd_entry = mem_cgroup_count_precharge_pte_range,
+ .mm = mm,
+ };
down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- struct mm_walk mem_cgroup_count_precharge_walk = {
- .pmd_entry = mem_cgroup_count_precharge_pte_range,
- .mm = mm,
- .private = vma,
- };
- if (is_vm_hugetlb_page(vma))
- continue;
- walk_page_range(vma->vm_start, vma->vm_end,
- &mem_cgroup_count_precharge_walk);
- }
+ walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
up_read(&mm->mmap_sem);
precharge = mc.precharge;
@@ -5146,15 +5005,15 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
struct task_struct *p = cgroup_taskset_first(tset);
int ret = 0;
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
- unsigned long move_charge_at_immigrate;
+ unsigned long move_flags;
/*
* We are now commited to this value whatever it is. Changes in this
* tunable will only affect upcoming migrations, not the current one.
* So we need to save it, and keep it going.
*/
- move_charge_at_immigrate = memcg->move_charge_at_immigrate;
- if (move_charge_at_immigrate) {
+ move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate);
+ if (move_flags) {
struct mm_struct *mm;
struct mem_cgroup *from = mem_cgroup_from_task(p);
@@ -5174,7 +5033,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
spin_lock(&mc.lock);
mc.from = from;
mc.to = memcg;
- mc.immigrate_flags = move_charge_at_immigrate;
+ mc.flags = move_flags;
spin_unlock(&mc.lock);
/* We set mc.moving_task later */
@@ -5199,7 +5058,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
struct mm_walk *walk)
{
int ret = 0;
- struct vm_area_struct *vma = walk->private;
+ struct vm_area_struct *vma = walk->vma;
pte_t *pte;
spinlock_t *ptl;
enum mc_target_type target_type;
@@ -5295,7 +5154,10 @@ put: /* get_mctgt_type() gets the page */
static void mem_cgroup_move_charge(struct mm_struct *mm)
{
- struct vm_area_struct *vma;
+ struct mm_walk mem_cgroup_move_charge_walk = {
+ .pmd_entry = mem_cgroup_move_charge_pte_range,
+ .mm = mm,
+ };
lru_add_drain_all();
/*
@@ -5318,24 +5180,11 @@ retry:
cond_resched();
goto retry;
}
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- int ret;
- struct mm_walk mem_cgroup_move_charge_walk = {
- .pmd_entry = mem_cgroup_move_charge_pte_range,
- .mm = mm,
- .private = vma,
- };
- if (is_vm_hugetlb_page(vma))
- continue;
- ret = walk_page_range(vma->vm_start, vma->vm_end,
- &mem_cgroup_move_charge_walk);
- if (ret)
- /*
- * means we have consumed all precharges and failed in
- * doing additional charge. Just abandon here.
- */
- break;
- }
+ /*
+ * When we have consumed all precharges and failed in doing
+ * additional charge, the page walk just aborts.
+ */
+ walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
up_read(&mm->mmap_sem);
atomic_dec(&mc.from->moving_account);
}
@@ -5386,118 +5235,211 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
mem_cgroup_from_css(root_css)->use_hierarchy = true;
}
-struct cgroup_subsys memory_cgrp_subsys = {
- .css_alloc = mem_cgroup_css_alloc,
- .css_online = mem_cgroup_css_online,
- .css_offline = mem_cgroup_css_offline,
- .css_free = mem_cgroup_css_free,
- .css_reset = mem_cgroup_css_reset,
- .can_attach = mem_cgroup_can_attach,
- .cancel_attach = mem_cgroup_cancel_attach,
- .attach = mem_cgroup_move_task,
- .bind = mem_cgroup_bind,
- .legacy_cftypes = mem_cgroup_files,
- .early_init = 0,
-};
+static u64 memory_current_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ return mem_cgroup_usage(mem_cgroup_from_css(css), false);
+}
-#ifdef CONFIG_MEMCG_SWAP
-static int __init enable_swap_account(char *s)
+static int memory_low_show(struct seq_file *m, void *v)
{
- if (!strcmp(s, "1"))
- really_do_swap_account = 1;
- else if (!strcmp(s, "0"))
- really_do_swap_account = 0;
- return 1;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ unsigned long low = ACCESS_ONCE(memcg->low);
+
+ if (low == PAGE_COUNTER_MAX)
+ seq_puts(m, "infinity\n");
+ else
+ seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
+
+ return 0;
}
-__setup("swapaccount=", enable_swap_account);
-static void __init memsw_file_init(void)
+static ssize_t memory_low_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
{
- WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
- memsw_cgroup_files));
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned long low;
+ int err;
+
+ buf = strstrip(buf);
+ err = page_counter_memparse(buf, "infinity", &low);
+ if (err)
+ return err;
+
+ memcg->low = low;
+
+ return nbytes;
}
-static void __init enable_swap_cgroup(void)
+static int memory_high_show(struct seq_file *m, void *v)
{
- if (!mem_cgroup_disabled() && really_do_swap_account) {
- do_swap_account = 1;
- memsw_file_init();
- }
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ unsigned long high = ACCESS_ONCE(memcg->high);
+
+ if (high == PAGE_COUNTER_MAX)
+ seq_puts(m, "infinity\n");
+ else
+ seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
+
+ return 0;
}
-#else
-static void __init enable_swap_cgroup(void)
+static ssize_t memory_high_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned long high;
+ int err;
+
+ buf = strstrip(buf);
+ err = page_counter_memparse(buf, "infinity", &high);
+ if (err)
+ return err;
+
+ memcg->high = high;
+
+ return nbytes;
}
-#endif
-#ifdef CONFIG_MEMCG_SWAP
-/**
- * mem_cgroup_swapout - transfer a memsw charge to swap
- * @page: page whose memsw charge to transfer
- * @entry: swap entry to move the charge to
- *
- * Transfer the memsw charge of @page to @entry.
- */
-void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+static int memory_max_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg;
- unsigned short oldid;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ unsigned long max = ACCESS_ONCE(memcg->memory.limit);
- VM_BUG_ON_PAGE(PageLRU(page), page);
- VM_BUG_ON_PAGE(page_count(page), page);
+ if (max == PAGE_COUNTER_MAX)
+ seq_puts(m, "infinity\n");
+ else
+ seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
- if (!do_swap_account)
- return;
+ return 0;
+}
- memcg = page->mem_cgroup;
+static ssize_t memory_max_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned long max;
+ int err;
- /* Readahead page, never charged */
- if (!memcg)
- return;
+ buf = strstrip(buf);
+ err = page_counter_memparse(buf, "infinity", &max);
+ if (err)
+ return err;
- oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
- VM_BUG_ON_PAGE(oldid, page);
- mem_cgroup_swap_statistics(memcg, true);
+ err = mem_cgroup_resize_limit(memcg, max);
+ if (err)
+ return err;
- page->mem_cgroup = NULL;
+ return nbytes;
+}
- if (!mem_cgroup_is_root(memcg))
- page_counter_uncharge(&memcg->memory, 1);
+static int memory_events_show(struct seq_file *m, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- /* XXX: caller holds IRQ-safe mapping->tree_lock */
- VM_BUG_ON(!irqs_disabled());
+ seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
+ seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
+ seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
+ seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
- mem_cgroup_charge_statistics(memcg, page, -1);
- memcg_check_events(memcg, page);
+ return 0;
}
+static struct cftype memory_files[] = {
+ {
+ .name = "current",
+ .read_u64 = memory_current_read,
+ },
+ {
+ .name = "low",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = memory_low_show,
+ .write = memory_low_write,
+ },
+ {
+ .name = "high",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = memory_high_show,
+ .write = memory_high_write,
+ },
+ {
+ .name = "max",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = memory_max_show,
+ .write = memory_max_write,
+ },
+ {
+ .name = "events",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = memory_events_show,
+ },
+ { } /* terminate */
+};
+
+struct cgroup_subsys memory_cgrp_subsys = {
+ .css_alloc = mem_cgroup_css_alloc,
+ .css_online = mem_cgroup_css_online,
+ .css_offline = mem_cgroup_css_offline,
+ .css_free = mem_cgroup_css_free,
+ .css_reset = mem_cgroup_css_reset,
+ .can_attach = mem_cgroup_can_attach,
+ .cancel_attach = mem_cgroup_cancel_attach,
+ .attach = mem_cgroup_move_task,
+ .bind = mem_cgroup_bind,
+ .dfl_cftypes = memory_files,
+ .legacy_cftypes = mem_cgroup_legacy_files,
+ .early_init = 0,
+};
+
/**
- * mem_cgroup_uncharge_swap - uncharge a swap entry
- * @entry: swap entry to uncharge
+ * mem_cgroup_events - count memory events against a cgroup
+ * @memcg: the memory cgroup
+ * @idx: the event index
+ * @nr: the number of events to account for
+ */
+void mem_cgroup_events(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_index idx,
+ unsigned int nr)
+{
+ this_cpu_add(memcg->stat->events[idx], nr);
+}
+
+/**
+ * mem_cgroup_low - check if memory consumption is below the normal range
+ * @root: the highest ancestor to consider
+ * @memcg: the memory cgroup to check
*
- * Drop the memsw charge associated with @entry.
+ * Returns %true if memory consumption of @memcg, and that of all
+ * configurable ancestors up to @root, is below the normal range.
*/
-void mem_cgroup_uncharge_swap(swp_entry_t entry)
+bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
{
- struct mem_cgroup *memcg;
- unsigned short id;
+ if (mem_cgroup_disabled())
+ return false;
- if (!do_swap_account)
- return;
+ /*
+ * The toplevel group doesn't have a configurable range, so
+ * it's never low when looked at directly, and it is not
+ * considered an ancestor when assessing the hierarchy.
+ */
- id = swap_cgroup_record(entry, 0);
- rcu_read_lock();
- memcg = mem_cgroup_lookup(id);
- if (memcg) {
- if (!mem_cgroup_is_root(memcg))
- page_counter_uncharge(&memcg->memsw, 1);
- mem_cgroup_swap_statistics(memcg, false);
- css_put(&memcg->css);
+ if (memcg == root_mem_cgroup)
+ return false;
+
+ if (page_counter_read(&memcg->memory) > memcg->low)
+ return false;
+
+ while (memcg != root) {
+ memcg = parent_mem_cgroup(memcg);
+
+ if (memcg == root_mem_cgroup)
+ break;
+
+ if (page_counter_read(&memcg->memory) > memcg->low)
+ return false;
}
- rcu_read_unlock();
+ return true;
}
-#endif
/**
* mem_cgroup_try_charge - try charging a page
@@ -5773,7 +5715,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
* mem_cgroup_migrate - migrate a charge to another page
* @oldpage: currently charged page
* @newpage: page to transfer the charge to
- * @lrucare: both pages might be on the LRU already
+ * @lrucare: either or both pages might be on the LRU already
*
* Migrate the charge from @oldpage to @newpage.
*
@@ -5831,10 +5773,155 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
*/
static int __init mem_cgroup_init(void)
{
+ int cpu, node;
+
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
- enable_swap_cgroup();
- mem_cgroup_soft_limit_tree_init();
- memcg_stock_init();
+
+ for_each_possible_cpu(cpu)
+ INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+ drain_local_stock);
+
+ for_each_node(node) {
+ struct mem_cgroup_tree_per_node *rtpn;
+ int zone;
+
+ rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
+ node_online(node) ? node : NUMA_NO_NODE);
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ struct mem_cgroup_tree_per_zone *rtpz;
+
+ rtpz = &rtpn->rb_tree_per_zone[zone];
+ rtpz->rb_root = RB_ROOT;
+ spin_lock_init(&rtpz->lock);
+ }
+ soft_limit_tree.rb_tree_per_node[node] = rtpn;
+ }
+
return 0;
}
subsys_initcall(mem_cgroup_init);
+
+#ifdef CONFIG_MEMCG_SWAP
+/**
+ * mem_cgroup_swapout - transfer a memsw charge to swap
+ * @page: page whose memsw charge to transfer
+ * @entry: swap entry to move the charge to
+ *
+ * Transfer the memsw charge of @page to @entry.
+ */
+void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+{
+ struct mem_cgroup *memcg;
+ unsigned short oldid;
+
+ VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_PAGE(page_count(page), page);
+
+ if (!do_swap_account)
+ return;
+
+ memcg = page->mem_cgroup;
+
+ /* Readahead page, never charged */
+ if (!memcg)
+ return;
+
+ oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+ VM_BUG_ON_PAGE(oldid, page);
+ mem_cgroup_swap_statistics(memcg, true);
+
+ page->mem_cgroup = NULL;
+
+ if (!mem_cgroup_is_root(memcg))
+ page_counter_uncharge(&memcg->memory, 1);
+
+ /* XXX: caller holds IRQ-safe mapping->tree_lock */
+ VM_BUG_ON(!irqs_disabled());
+
+ mem_cgroup_charge_statistics(memcg, page, -1);
+ memcg_check_events(memcg, page);
+}
+
+/**
+ * mem_cgroup_uncharge_swap - uncharge a swap entry
+ * @entry: swap entry to uncharge
+ *
+ * Drop the memsw charge associated with @entry.
+ */
+void mem_cgroup_uncharge_swap(swp_entry_t entry)
+{
+ struct mem_cgroup *memcg;
+ unsigned short id;
+
+ if (!do_swap_account)
+ return;
+
+ id = swap_cgroup_record(entry, 0);
+ rcu_read_lock();
+ memcg = mem_cgroup_lookup(id);
+ if (memcg) {
+ if (!mem_cgroup_is_root(memcg))
+ page_counter_uncharge(&memcg->memsw, 1);
+ mem_cgroup_swap_statistics(memcg, false);
+ css_put(&memcg->css);
+ }
+ rcu_read_unlock();
+}
+
+/* for remember boot option*/
+#ifdef CONFIG_MEMCG_SWAP_ENABLED
+static int really_do_swap_account __initdata = 1;
+#else
+static int really_do_swap_account __initdata;
+#endif
+
+static int __init enable_swap_account(char *s)
+{
+ if (!strcmp(s, "1"))
+ really_do_swap_account = 1;
+ else if (!strcmp(s, "0"))
+ really_do_swap_account = 0;
+ return 1;
+}
+__setup("swapaccount=", enable_swap_account);
+
+static struct cftype memsw_cgroup_files[] = {
+ {
+ .name = "memsw.usage_in_bytes",
+ .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
+ .read_u64 = mem_cgroup_read_u64,
+ },
+ {
+ .name = "memsw.max_usage_in_bytes",
+ .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
+ .write = mem_cgroup_reset,
+ .read_u64 = mem_cgroup_read_u64,
+ },
+ {
+ .name = "memsw.limit_in_bytes",
+ .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
+ .write = mem_cgroup_write,
+ .read_u64 = mem_cgroup_read_u64,
+ },
+ {
+ .name = "memsw.failcnt",
+ .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
+ .write = mem_cgroup_reset,
+ .read_u64 = mem_cgroup_read_u64,
+ },
+ { }, /* terminate */
+};
+
+static int __init mem_cgroup_swap_init(void)
+{
+ if (!mem_cgroup_disabled() && really_do_swap_account) {
+ do_swap_account = 1;
+ WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
+ memsw_cgroup_files));
+ }
+ return 0;
+}
+subsys_initcall(mem_cgroup_swap_init);
+
+#endif /* CONFIG_MEMCG_SWAP */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index feb803bf3443..d487f8dc6d39 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -242,15 +242,8 @@ void shake_page(struct page *p, int access)
* Only call shrink_node_slabs here (which would also shrink
* other caches) if access is not potentially fatal.
*/
- if (access) {
- int nr;
- int nid = page_to_nid(p);
- do {
- nr = shrink_node_slabs(GFP_KERNEL, nid, 1000, 1000);
- if (page_count(p) == 1)
- break;
- } while (nr > 10);
- }
+ if (access)
+ drop_slab_node(page_to_nid(p));
}
EXPORT_SYMBOL_GPL(shake_page);
@@ -1654,8 +1647,6 @@ static int __soft_offline_page(struct page *page, int flags)
* setting PG_hwpoison.
*/
if (!is_free_buddy_page(page))
- lru_add_drain_all();
- if (!is_free_buddy_page(page))
drain_all_pages(page_zone(page));
SetPageHWPoison(page);
if (!is_free_buddy_page(page))
diff --git a/mm/memory.c b/mm/memory.c
index c6565f00fb38..8068893697bb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
+ if (!tlb->end)
+ return;
+
tlb_flush(tlb);
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
- for (batch = &tlb->local; batch; batch = batch->next) {
+ for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
void tlb_flush_mmu(struct mmu_gather *tlb)
{
- if (!tlb->end)
- return;
-
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
+ mm_dec_nr_pmds(tlb->mm);
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -754,6 +755,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
if (HAVE_PTE_SPECIAL) {
if (likely(!pte_special(pte)))
goto check_pfn;
+ if (vma->vm_ops && vma->vm_ops->find_special_page)
+ return vma->vm_ops->find_special_page(vma, addr);
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return NULL;
if (!is_zero_pfn(pfn))
@@ -811,42 +814,40 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
- if (!pte_file(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
- if (likely(!non_swap_entry(entry))) {
- if (swap_duplicate(entry) < 0)
- return entry.val;
-
- /* make sure dst_mm is on swapoff's mmlist. */
- if (unlikely(list_empty(&dst_mm->mmlist))) {
- spin_lock(&mmlist_lock);
- if (list_empty(&dst_mm->mmlist))
- list_add(&dst_mm->mmlist,
- &src_mm->mmlist);
- spin_unlock(&mmlist_lock);
- }
- rss[MM_SWAPENTS]++;
- } else if (is_migration_entry(entry)) {
- page = migration_entry_to_page(entry);
-
- if (PageAnon(page))
- rss[MM_ANONPAGES]++;
- else
- rss[MM_FILEPAGES]++;
-
- if (is_write_migration_entry(entry) &&
- is_cow_mapping(vm_flags)) {
- /*
- * COW mappings require pages in both
- * parent and child to be set to read.
- */
- make_migration_entry_read(&entry);
- pte = swp_entry_to_pte(entry);
- if (pte_swp_soft_dirty(*src_pte))
- pte = pte_swp_mksoft_dirty(pte);
- set_pte_at(src_mm, addr, src_pte, pte);
- }
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ if (likely(!non_swap_entry(entry))) {
+ if (swap_duplicate(entry) < 0)
+ return entry.val;
+
+ /* make sure dst_mm is on swapoff's mmlist. */
+ if (unlikely(list_empty(&dst_mm->mmlist))) {
+ spin_lock(&mmlist_lock);
+ if (list_empty(&dst_mm->mmlist))
+ list_add(&dst_mm->mmlist,
+ &src_mm->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ rss[MM_SWAPENTS]++;
+ } else if (is_migration_entry(entry)) {
+ page = migration_entry_to_page(entry);
+
+ if (PageAnon(page))
+ rss[MM_ANONPAGES]++;
+ else
+ rss[MM_FILEPAGES]++;
+
+ if (is_write_migration_entry(entry) &&
+ is_cow_mapping(vm_flags)) {
+ /*
+ * COW mappings require pages in both
+ * parent and child to be set to read.
+ */
+ make_migration_entry_read(&entry);
+ pte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(*src_pte))
+ pte = pte_swp_mksoft_dirty(pte);
+ set_pte_at(src_mm, addr, src_pte, pte);
}
}
goto out_set_pte;
@@ -1020,11 +1021,9 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
- if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
- VM_PFNMAP | VM_MIXEDMAP))) {
- if (!vma->anon_vma)
- return 0;
- }
+ if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
+ !vma->anon_vma)
+ return 0;
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
@@ -1082,6 +1081,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
+ swp_entry_t entry;
again:
init_rss_vec(rss);
@@ -1107,28 +1107,12 @@ again:
if (details->check_mapping &&
details->check_mapping != page->mapping)
continue;
- /*
- * Each page->index must be checked when
- * invalidating or truncating nonlinear.
- */
- if (details->nonlinear_vma &&
- (page->index < details->first_index ||
- page->index > details->last_index))
- continue;
}
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
if (unlikely(!page))
continue;
- if (unlikely(details) && details->nonlinear_vma
- && linear_page_index(details->nonlinear_vma,
- addr) != page->index) {
- pte_t ptfile = pgoff_to_pte(page->index);
- if (pte_soft_dirty(ptent))
- ptfile = pte_file_mksoft_dirty(ptfile);
- set_pte_at(mm, addr, pte, ptfile);
- }
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
@@ -1151,33 +1135,25 @@ again:
}
continue;
}
- /*
- * If details->check_mapping, we leave swap entries;
- * if details->nonlinear_vma, we leave file entries.
- */
+ /* If details->check_mapping, we leave swap entries. */
if (unlikely(details))
continue;
- if (pte_file(ptent)) {
- if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
- print_bad_pte(vma, addr, ptent, NULL);
- } else {
- swp_entry_t entry = pte_to_swp_entry(ptent);
- if (!non_swap_entry(entry))
- rss[MM_SWAPENTS]--;
- else if (is_migration_entry(entry)) {
- struct page *page;
+ entry = pte_to_swp_entry(ptent);
+ if (!non_swap_entry(entry))
+ rss[MM_SWAPENTS]--;
+ else if (is_migration_entry(entry)) {
+ struct page *page;
- page = migration_entry_to_page(entry);
+ page = migration_entry_to_page(entry);
- if (PageAnon(page))
- rss[MM_ANONPAGES]--;
- else
- rss[MM_FILEPAGES]--;
- }
- if (unlikely(!free_swap_and_cache(entry)))
- print_bad_pte(vma, addr, ptent, NULL);
+ if (PageAnon(page))
+ rss[MM_ANONPAGES]--;
+ else
+ rss[MM_FILEPAGES]--;
}
+ if (unlikely(!free_swap_and_cache(entry)))
+ print_bad_pte(vma, addr, ptent, NULL);
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
} while (pte++, addr += PAGE_SIZE, addr != end);
@@ -1277,7 +1253,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
pgd_t *pgd;
unsigned long next;
- if (details && !details->check_mapping && !details->nonlinear_vma)
+ if (details && !details->check_mapping)
details = NULL;
BUG_ON(addr >= end);
@@ -1371,7 +1347,7 @@ void unmap_vmas(struct mmu_gather *tlb,
* @vma: vm_area_struct holding the applicable pages
* @start: starting address of pages to zap
* @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
+ * @details: details of shared cache invalidation
*
* Caller must protect the VMA list
*/
@@ -1397,7 +1373,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
* @vma: vm_area_struct holding the applicable pages
* @address: starting address of pages to zap
* @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
+ * @details: details of shared cache invalidation
*
* The range must fit into one VMA.
*/
@@ -1922,12 +1898,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
EXPORT_SYMBOL_GPL(apply_to_page_range);
/*
- * handle_pte_fault chooses page fault handler according to an entry
- * which was read non-atomically. Before making any commitment, on
- * those architectures or configurations (e.g. i386 with PAE) which
- * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
- * must check under lock before unmapping the pte and proceeding
- * (but do_wp_page is only called after already making such a check;
+ * handle_pte_fault chooses page fault handler according to an entry which was
+ * read non-atomically. Before making any commitment, on those architectures
+ * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
+ * parts, do_swap_page must check under lock before unmapping the pte and
+ * proceeding (but do_wp_page is only called after already making such a check;
* and do_anonymous_page can safely check later on).
*/
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
@@ -1990,6 +1965,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
vmf.pgoff = page->index;
vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
vmf.page = page;
+ vmf.cow_page = NULL;
ret = vma->vm_ops->page_mkwrite(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
@@ -2033,7 +2009,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t entry;
int ret = 0;
int page_mkwrite = 0;
- struct page *dirty_page = NULL;
+ bool dirty_shared = false;
unsigned long mmun_start = 0; /* For mmu_notifiers */
unsigned long mmun_end = 0; /* For mmu_notifiers */
struct mem_cgroup *memcg;
@@ -2084,6 +2060,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(old_page);
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
+ page_cache_get(old_page);
/*
* Only catch write-faults on shared writable pages,
* read-only shared pages can get COWed by
@@ -2091,7 +2068,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
int tmp;
- page_cache_get(old_page);
+
pte_unmap_unlock(page_table, ptl);
tmp = do_page_mkwrite(vma, old_page, address);
if (unlikely(!tmp || (tmp &
@@ -2111,11 +2088,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(old_page);
goto unlock;
}
-
page_mkwrite = 1;
}
- dirty_page = old_page;
- get_page(dirty_page);
+
+ dirty_shared = true;
reuse:
/*
@@ -2134,20 +2110,20 @@ reuse:
pte_unmap_unlock(page_table, ptl);
ret |= VM_FAULT_WRITE;
- if (!dirty_page)
- return ret;
-
- if (!page_mkwrite) {
+ if (dirty_shared) {
struct address_space *mapping;
int dirtied;
- lock_page(dirty_page);
- dirtied = set_page_dirty(dirty_page);
- VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
- mapping = dirty_page->mapping;
- unlock_page(dirty_page);
+ if (!page_mkwrite)
+ lock_page(old_page);
- if (dirtied && mapping) {
+ dirtied = set_page_dirty(old_page);
+ VM_BUG_ON_PAGE(PageAnon(old_page), old_page);
+ mapping = old_page->mapping;
+ unlock_page(old_page);
+ page_cache_release(old_page);
+
+ if ((dirtied || page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping
* but still dirty their pages
@@ -2155,25 +2131,9 @@ reuse:
balance_dirty_pages_ratelimited(mapping);
}
- /* file_update_time outside page_lock */
- if (vma->vm_file)
+ if (!page_mkwrite)
file_update_time(vma->vm_file);
}
- put_page(dirty_page);
- if (page_mkwrite) {
- struct address_space *mapping = dirty_page->mapping;
-
- set_page_dirty(dirty_page);
- unlock_page(dirty_page);
- page_cache_release(dirty_page);
- if (mapping) {
- /*
- * Some device drivers do not set page.mapping
- * but still dirty their pages
- */
- balance_dirty_pages_ratelimited(mapping);
- }
- }
return ret;
}
@@ -2331,25 +2291,11 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
}
}
-static inline void unmap_mapping_range_list(struct list_head *head,
- struct zap_details *details)
-{
- struct vm_area_struct *vma;
-
- /*
- * In nonlinear VMAs there is no correspondence between virtual address
- * offset and file offset. So we must perform an exhaustive search
- * across *all* the pages in each nonlinear VMA, not just the pages
- * whose virtual address lies outside the file truncation point.
- */
- list_for_each_entry(vma, head, shared.nonlinear) {
- details->nonlinear_vma = vma;
- unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
- }
-}
-
/**
- * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
+ * unmap_mapping_range - unmap the portion of all mmaps in the specified
+ * address_space corresponding to the specified page range in the underlying
+ * file.
+ *
* @mapping: the address space containing mmaps to be unmapped.
* @holebegin: byte in first page to unmap, relative to the start of
* the underlying file. This will be rounded down to a PAGE_SIZE
@@ -2378,18 +2324,16 @@ void unmap_mapping_range(struct address_space *mapping,
}
details.check_mapping = even_cows? NULL: mapping;
- details.nonlinear_vma = NULL;
details.first_index = hba;
details.last_index = hba + hlen - 1;
if (details.last_index < details.first_index)
details.last_index = ULONG_MAX;
+ /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
- if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
- unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
i_mmap_unlock_write(mapping);
}
EXPORT_SYMBOL(unmap_mapping_range);
@@ -2632,7 +2576,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
@@ -2696,7 +2640,8 @@ oom:
* See filemap_fault() and __lock_page_retry().
*/
static int __do_fault(struct vm_area_struct *vma, unsigned long address,
- pgoff_t pgoff, unsigned int flags, struct page **page)
+ pgoff_t pgoff, unsigned int flags,
+ struct page *cow_page, struct page **page)
{
struct vm_fault vmf;
int ret;
@@ -2705,10 +2650,13 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
vmf.pgoff = pgoff;
vmf.flags = flags;
vmf.page = NULL;
+ vmf.cow_page = cow_page;
ret = vma->vm_ops->fault(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
+ if (!vmf.page)
+ goto out;
if (unlikely(PageHWPoison(vmf.page))) {
if (ret & VM_FAULT_LOCKED)
@@ -2722,6 +2670,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
else
VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
+ out:
*page = vmf.page;
return ret;
}
@@ -2750,8 +2699,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
entry = mk_pte(page, vma->vm_page_prot);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- else if (pte_file(*pte) && pte_file_soft_dirty(*pte))
- entry = pte_mksoft_dirty(entry);
if (anon) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
@@ -2886,8 +2833,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
- if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
- fault_around_bytes >> PAGE_SHIFT > 1) {
+ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
do_fault_around(vma, address, pte, pgoff, flags);
if (!pte_same(*pte, orig_pte))
@@ -2895,7 +2841,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap_unlock(pte, ptl);
}
- ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+ ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
@@ -2935,26 +2881,43 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
}
- ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+ ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
- copy_user_highpage(new_page, fault_page, address, vma);
+ if (fault_page)
+ copy_user_highpage(new_page, fault_page, address, vma);
__SetPageUptodate(new_page);
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*pte, orig_pte))) {
pte_unmap_unlock(pte, ptl);
- unlock_page(fault_page);
- page_cache_release(fault_page);
+ if (fault_page) {
+ unlock_page(fault_page);
+ page_cache_release(fault_page);
+ } else {
+ /*
+ * The fault handler has no page to lock, so it holds
+ * i_mmap_lock for read to protect against truncate.
+ */
+ i_mmap_unlock_read(vma->vm_file->f_mapping);
+ }
goto uncharge_out;
}
do_set_pte(vma, address, new_page, pte, true, true);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
- unlock_page(fault_page);
- page_cache_release(fault_page);
+ if (fault_page) {
+ unlock_page(fault_page);
+ page_cache_release(fault_page);
+ } else {
+ /*
+ * The fault handler has no page to lock, so it holds
+ * i_mmap_lock for read to protect against truncate.
+ */
+ i_mmap_unlock_read(vma->vm_file->f_mapping);
+ }
return ret;
uncharge_out:
mem_cgroup_cancel_charge(new_page, memcg);
@@ -2973,7 +2936,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int dirtied = 0;
int ret, tmp;
- ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+ ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
@@ -3019,8 +2982,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
balance_dirty_pages_ratelimited(mapping);
}
- /* file_update_time outside page_lock */
- if (vma->vm_file && !vma->vm_ops->page_mkwrite)
+ if (!vma->vm_ops->page_mkwrite)
file_update_time(vma->vm_file);
return ret;
@@ -3032,7 +2994,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
-static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
@@ -3049,46 +3011,6 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
-/*
- * Fault of a previously existing named mapping. Repopulate the pte
- * from the encoded file_pte if possible. This enables swappable
- * nonlinear vmas.
- *
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
- * We return with pte unmapped and unlocked.
- * The mmap_sem may have been released depending on flags and our
- * return value. See filemap_fault() and __lock_page_or_retry().
- */
-static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pte_t *page_table, pmd_t *pmd,
- unsigned int flags, pte_t orig_pte)
-{
- pgoff_t pgoff;
-
- flags |= FAULT_FLAG_NONLINEAR;
-
- if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
- return 0;
-
- if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
- /*
- * Page table corrupted: show pte and kill process.
- */
- print_bad_pte(vma, address, orig_pte, NULL);
- return VM_FAULT_SIGBUS;
- }
-
- pgoff = pte_to_pgoff(orig_pte);
- if (!(flags & FAULT_FLAG_WRITE))
- return do_read_fault(mm, vma, address, pmd, pgoff, flags,
- orig_pte);
- if (!(vma->vm_flags & VM_SHARED))
- return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
- orig_pte);
- return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
-}
-
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int page_nid,
int *flags)
@@ -3115,14 +3037,17 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
bool migrated = false;
int flags = 0;
+ /* A PROT_NONE fault should not end up here */
+ BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
+
/*
* The "pte" at this point cannot be used safely without
* validation through pte_unmap_same(). It's of NUMA type but
* the pfn may be screwed if the read is non atomic.
*
- * ptep_modify_prot_start is not called as this is clearing
- * the _PAGE_NUMA bit and it is not really expected that there
- * would be concurrent hardware modifications to the PTE.
+ * We can safely just do a "set_pte_at()", because the old
+ * page table entry is not accessible, so there would be no
+ * concurrent hardware modifications to the PTE.
*/
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
@@ -3131,7 +3056,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
}
- pte = pte_mknonnuma(pte);
+ /* Make it present again */
+ pte = pte_modify(pte, vma->vm_page_prot);
+ pte = pte_mkyoung(pte);
set_pte_at(mm, addr, ptep, pte);
update_mmu_cache(vma, addr, ptep);
@@ -3140,7 +3067,6 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap_unlock(ptep, ptl);
return 0;
}
- BUG_ON(is_zero_pfn(page_to_pfn(page)));
/*
* Avoid grouping on DSO/COW pages in specific and RO pages
@@ -3216,20 +3142,17 @@ static int handle_pte_fault(struct mm_struct *mm,
if (pte_none(entry)) {
if (vma->vm_ops) {
if (likely(vma->vm_ops->fault))
- return do_linear_fault(mm, vma, address,
- pte, pmd, flags, entry);
+ return do_fault(mm, vma, address, pte,
+ pmd, flags, entry);
}
return do_anonymous_page(mm, vma, address,
pte, pmd, flags);
}
- if (pte_file(entry))
- return do_nonlinear_fault(mm, vma, address,
- pte, pmd, flags, entry);
return do_swap_page(mm, vma, address,
pte, pmd, flags, entry);
}
- if (pte_numa(entry))
+ if (pte_protnone(entry))
return do_numa_page(mm, vma, address, entry, pte, pmd);
ptl = pte_lockptr(mm, pmd);
@@ -3307,7 +3230,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (pmd_trans_splitting(orig_pmd))
return 0;
- if (pmd_numa(orig_pmd))
+ if (pmd_protnone(orig_pmd))
return do_huge_pmd_numa_page(mm, vma, address,
orig_pmd, pmd);
@@ -3428,15 +3351,17 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_4LEVEL_HACK
- if (pud_present(*pud)) /* Another has populated it */
- pmd_free(mm, new);
- else
+ if (!pud_present(*pud)) {
+ mm_inc_nr_pmds(mm);
pud_populate(mm, pud, new);
-#else
- if (pgd_present(*pud)) /* Another has populated it */
+ } else /* Another has populated it */
pmd_free(mm, new);
- else
+#else
+ if (!pgd_present(*pud)) {
+ mm_inc_nr_pmds(mm);
pgd_populate(mm, pud, new);
+ } else /* Another has populated it */
+ pmd_free(mm, new);
#endif /* __ARCH_HAS_4LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
return 0;
@@ -3561,7 +3486,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
if (follow_phys(vma, addr, write, &prot, &phys_addr))
return -EINVAL;
- maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
+ maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (write)
memcpy_toio(maddr + offset, buf, len);
else
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0e0961b8c39c..4721046a134a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -471,24 +471,34 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
+struct queue_pages {
+ struct list_head *pagelist;
+ unsigned long flags;
+ nodemask_t *nmask;
+ struct vm_area_struct *prev;
+};
+
/*
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
*/
-static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- const nodemask_t *nodes, unsigned long flags,
- void *private)
+static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
{
- pte_t *orig_pte;
+ struct vm_area_struct *vma = walk->vma;
+ struct page *page;
+ struct queue_pages *qp = walk->private;
+ unsigned long flags = qp->flags;
+ int nid;
pte_t *pte;
spinlock_t *ptl;
- orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- do {
- struct page *page;
- int nid;
+ split_huge_page_pmd(vma, addr, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
+ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, addr, *pte);
@@ -501,114 +511,46 @@ static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (PageReserved(page))
continue;
nid = page_to_nid(page);
- if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
+ if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
continue;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
- migrate_page_add(page, private, flags);
- else
- break;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap_unlock(orig_pte, ptl);
- return addr != end;
+ migrate_page_add(page, qp->pagelist, flags);
+ }
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+ return 0;
}
-static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
- pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
- void *private)
+static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
{
#ifdef CONFIG_HUGETLB_PAGE
+ struct queue_pages *qp = walk->private;
+ unsigned long flags = qp->flags;
int nid;
struct page *page;
spinlock_t *ptl;
pte_t entry;
- ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
- entry = huge_ptep_get((pte_t *)pmd);
+ ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
+ entry = huge_ptep_get(pte);
if (!pte_present(entry))
goto unlock;
page = pte_page(entry);
nid = page_to_nid(page);
- if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
+ if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
goto unlock;
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
- isolate_huge_page(page, private);
+ isolate_huge_page(page, qp->pagelist);
unlock:
spin_unlock(ptl);
#else
BUG();
#endif
-}
-
-static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
- unsigned long addr, unsigned long end,
- const nodemask_t *nodes, unsigned long flags,
- void *private)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- if (!pmd_present(*pmd))
- continue;
- if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
- queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
- flags, private);
- continue;
- }
- split_huge_page_pmd(vma, addr, pmd);
- if (pmd_none_or_trans_huge_or_clear_bad(pmd))
- continue;
- if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
- flags, private))
- return -EIO;
- } while (pmd++, addr = next, addr != end);
- return 0;
-}
-
-static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
- unsigned long addr, unsigned long end,
- const nodemask_t *nodes, unsigned long flags,
- void *private)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
- continue;
- if (pud_none_or_clear_bad(pud))
- continue;
- if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
- flags, private))
- return -EIO;
- } while (pud++, addr = next, addr != end);
- return 0;
-}
-
-static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
- const nodemask_t *nodes, unsigned long flags,
- void *private)
-{
- pgd_t *pgd;
- unsigned long next;
-
- pgd = pgd_offset(vma->vm_mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
- flags, private))
- return -EIO;
- } while (pgd++, addr = next, addr != end);
return 0;
}
@@ -627,7 +569,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
{
int nr_updated;
- nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
+ nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
@@ -641,6 +583,49 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
}
#endif /* CONFIG_NUMA_BALANCING */
+static int queue_pages_test_walk(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct vm_area_struct *vma = walk->vma;
+ struct queue_pages *qp = walk->private;
+ unsigned long endvma = vma->vm_end;
+ unsigned long flags = qp->flags;
+
+ if (vma->vm_flags & VM_PFNMAP)
+ return 1;
+
+ if (endvma > end)
+ endvma = end;
+ if (vma->vm_start > start)
+ start = vma->vm_start;
+
+ if (!(flags & MPOL_MF_DISCONTIG_OK)) {
+ if (!vma->vm_next && vma->vm_end < end)
+ return -EFAULT;
+ if (qp->prev && qp->prev->vm_end < vma->vm_start)
+ return -EFAULT;
+ }
+
+ qp->prev = vma;
+
+ if (vma->vm_flags & VM_PFNMAP)
+ return 1;
+
+ if (flags & MPOL_MF_LAZY) {
+ /* Similar to task_numa_work, skip inaccessible VMAs */
+ if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
+ change_prot_numa(vma, start, endvma);
+ return 1;
+ }
+
+ if ((flags & MPOL_MF_STRICT) ||
+ ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
+ vma_migratable(vma)))
+ /* queue pages from current vma */
+ return 0;
+ return 1;
+}
+
/*
* Walk through page tables and collect pages to be migrated.
*
@@ -650,50 +635,24 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
*/
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
- const nodemask_t *nodes, unsigned long flags, void *private)
-{
- int err = 0;
- struct vm_area_struct *vma, *prev;
-
- vma = find_vma(mm, start);
- if (!vma)
- return -EFAULT;
- prev = NULL;
- for (; vma && vma->vm_start < end; vma = vma->vm_next) {
- unsigned long endvma = vma->vm_end;
-
- if (endvma > end)
- endvma = end;
- if (vma->vm_start > start)
- start = vma->vm_start;
-
- if (!(flags & MPOL_MF_DISCONTIG_OK)) {
- if (!vma->vm_next && vma->vm_end < end)
- return -EFAULT;
- if (prev && prev->vm_end < vma->vm_start)
- return -EFAULT;
- }
-
- if (flags & MPOL_MF_LAZY) {
- /* Similar to task_numa_work, skip inaccessible VMAs */
- if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
- change_prot_numa(vma, start, endvma);
- goto next;
- }
-
- if ((flags & MPOL_MF_STRICT) ||
- ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
- vma_migratable(vma))) {
-
- err = queue_pages_pgd_range(vma, start, endvma, nodes,
- flags, private);
- if (err)
- break;
- }
-next:
- prev = vma;
- }
- return err;
+ nodemask_t *nodes, unsigned long flags,
+ struct list_head *pagelist)
+{
+ struct queue_pages qp = {
+ .pagelist = pagelist,
+ .flags = flags,
+ .nmask = nodes,
+ .prev = NULL,
+ };
+ struct mm_walk queue_pages_walk = {
+ .hugetlb_entry = queue_pages_hugetlb,
+ .pmd_entry = queue_pages_pte_range,
+ .test_walk = queue_pages_test_walk,
+ .mm = mm,
+ .private = &qp,
+ };
+
+ return walk_page_range(start, end, &queue_pages_walk);
}
/*
@@ -1988,43 +1947,63 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
* @order:Order of the GFP allocation.
* @vma: Pointer to VMA or NULL if not available.
* @addr: Virtual Address of the allocation. Must be inside the VMA.
+ * @node: Which node to prefer for allocation (modulo policy).
+ * @hugepage: for hugepages try only the preferred node if possible
*
* This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process.
* When VMA is not NULL caller must hold down_read on the mmap_sem of the
* mm_struct of the VMA to prevent it from going away. Should be used for
- * all allocations for pages that will be mapped into
- * user space. Returns NULL when no page can be allocated.
- *
- * Should be called with the mm_sem of the vma hold.
+ * all allocations for pages that will be mapped into user space. Returns
+ * NULL when no page can be allocated.
*/
struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
- unsigned long addr, int node)
+ unsigned long addr, int node, bool hugepage)
{
struct mempolicy *pol;
struct page *page;
unsigned int cpuset_mems_cookie;
+ struct zonelist *zl;
+ nodemask_t *nmask;
retry_cpuset:
pol = get_vma_policy(vma, addr);
cpuset_mems_cookie = read_mems_allowed_begin();
- if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
+ if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage &&
+ pol->mode != MPOL_INTERLEAVE)) {
+ /*
+ * For hugepage allocation and non-interleave policy which
+ * allows the current node, we only try to allocate from the
+ * current node and don't fall back to other nodes, as the
+ * cost of remote accesses would likely offset THP benefits.
+ *
+ * If the policy is interleave, or does not allow the current
+ * node in its nodemask, we allocate the standard way.
+ */
+ nmask = policy_nodemask(gfp, pol);
+ if (!nmask || node_isset(node, *nmask)) {
+ mpol_cond_put(pol);
+ page = alloc_pages_exact_node(node, gfp, order);
+ goto out;
+ }
+ }
+
+ if (pol->mode == MPOL_INTERLEAVE) {
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
- if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
- goto retry_cpuset;
-
- return page;
+ goto out;
}
- page = __alloc_pages_nodemask(gfp, order,
- policy_zonelist(gfp, pol, node),
- policy_nodemask(gfp, pol));
+
+ nmask = policy_nodemask(gfp, pol);
+ zl = policy_zonelist(gfp, pol, node);
mpol_cond_put(pol);
+ page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+out:
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
@@ -2838,8 +2817,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
p += snprintf(p, buffer + maxlen - p, "relative");
}
- if (!nodes_empty(nodes)) {
- p += snprintf(p, buffer + maxlen - p, ":");
- p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
- }
+ if (!nodes_empty(nodes))
+ p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
+ nodemask_pr_args(&nodes));
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 344cdf692fc8..85e042686031 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -179,37 +179,6 @@ out:
}
/*
- * Congratulations to trinity for discovering this bug.
- * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
- * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
- * replace the specified range by file ptes throughout (maybe populated after).
- * If page migration finds a page within that range, while it's still located
- * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
- * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
- * But if the migrating page is in a part of the vma outside the range to be
- * remapped, then it will not be cleared, and remove_migration_ptes() needs to
- * deal with it. Fortunately, this part of the vma is of course still linear,
- * so we just need to use linear location on the nonlinear list.
- */
-static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
- struct address_space *mapping, void *arg)
-{
- struct vm_area_struct *vma;
- /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- unsigned long addr;
-
- list_for_each_entry(vma,
- &mapping->i_mmap_nonlinear, shared.nonlinear) {
-
- addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
- if (addr >= vma->vm_start && addr < vma->vm_end)
- remove_migration_pte(page, vma, addr, arg);
- }
- return SWAP_AGAIN;
-}
-
-/*
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
@@ -218,7 +187,6 @@ static void remove_migration_ptes(struct page *old, struct page *new)
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
.arg = old,
- .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
};
rmap_walk(new, &rwc);
@@ -229,7 +197,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
* get to the page and wait until migration is finished.
* When we return from this function the fault will be retried.
*/
-static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl)
{
pte_t pte;
@@ -1268,7 +1236,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
goto put_and_set;
if (PageHuge(page)) {
- isolate_huge_page(page, &pagelist);
+ if (PageHead(page))
+ isolate_huge_page(page, &pagelist);
goto put_and_set;
}
@@ -1685,12 +1654,6 @@ bool pmd_trans_migrating(pmd_t pmd)
return PageLocked(page);
}
-void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
-{
- struct page *page = pmd_page(*pmd);
- wait_on_page_locked(page);
-}
-
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
@@ -1884,7 +1847,7 @@ out_fail:
out_dropref:
ptl = pmd_lock(mm, pmd);
if (pmd_same(*pmd, entry)) {
- entry = pmd_mknonnuma(entry);
+ entry = pmd_modify(entry, vma->vm_page_prot);
set_pmd_at(mm, mmun_start, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry);
}
diff --git a/mm/mincore.c b/mm/mincore.c
index c8c528b36641..be25efde64a4 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -19,38 +19,25 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
- unsigned char *vec)
+static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
{
#ifdef CONFIG_HUGETLB_PAGE
- struct hstate *h;
+ unsigned char present;
+ unsigned char *vec = walk->private;
- h = hstate_vma(vma);
- while (1) {
- unsigned char present;
- pte_t *ptep;
- /*
- * Huge pages are always in RAM for now, but
- * theoretically it needs to be checked.
- */
- ptep = huge_pte_offset(current->mm,
- addr & huge_page_mask(h));
- present = ptep && !huge_pte_none(huge_ptep_get(ptep));
- while (1) {
- *vec = present;
- vec++;
- addr += PAGE_SIZE;
- if (addr == end)
- return;
- /* check hugepage border */
- if (!(addr & ~huge_page_mask(h)))
- break;
- }
- }
+ /*
+ * Hugepages under user process are always in RAM and never
+ * swapped out, but theoretically it needs to be checked.
+ */
+ present = pte && !huge_pte_none(huge_ptep_get(pte));
+ for (; addr != end; vec++, addr += PAGE_SIZE)
+ *vec = present;
+ walk->private = vec;
#else
BUG();
#endif
+ return 0;
}
/*
@@ -94,9 +81,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
return present;
}
-static void mincore_unmapped_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
- unsigned char *vec)
+static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
+ struct vm_area_struct *vma, unsigned char *vec)
{
unsigned long nr = (end - addr) >> PAGE_SHIFT;
int i;
@@ -111,30 +97,47 @@ static void mincore_unmapped_range(struct vm_area_struct *vma,
for (i = 0; i < nr; i++)
vec[i] = 0;
}
+ return nr;
+}
+
+static int mincore_unmapped_range(unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ walk->private += __mincore_unmapped_range(addr, end,
+ walk->vma, walk->private);
+ return 0;
}
-static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned char *vec)
+static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
{
- unsigned long next;
spinlock_t *ptl;
+ struct vm_area_struct *vma = walk->vma;
pte_t *ptep;
+ unsigned char *vec = walk->private;
+ int nr = (end - addr) >> PAGE_SHIFT;
+
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+ memset(vec, 1, nr);
+ spin_unlock(ptl);
+ goto out;
+ }
+
+ if (pmd_trans_unstable(pmd)) {
+ __mincore_unmapped_range(addr, end, vma, vec);
+ goto out;
+ }
- ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- do {
+ ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ for (; addr != end; ptep++, addr += PAGE_SIZE) {
pte_t pte = *ptep;
- pgoff_t pgoff;
- next = addr + PAGE_SIZE;
if (pte_none(pte))
- mincore_unmapped_range(vma, addr, next, vec);
+ __mincore_unmapped_range(addr, addr + PAGE_SIZE,
+ vma, vec);
else if (pte_present(pte))
*vec = 1;
- else if (pte_file(pte)) {
- pgoff = pte_to_pgoff(pte);
- *vec = mincore_page(vma->vm_file->f_mapping, pgoff);
- } else { /* pte is a swap entry */
+ else { /* pte is a swap entry */
swp_entry_t entry = pte_to_swp_entry(pte);
if (non_swap_entry(entry)) {
@@ -145,9 +148,8 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
*vec = 1;
} else {
#ifdef CONFIG_SWAP
- pgoff = entry.val;
*vec = mincore_page(swap_address_space(entry),
- pgoff);
+ entry.val);
#else
WARN_ON(1);
*vec = 1;
@@ -155,69 +157,12 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
}
}
vec++;
- } while (ptep++, addr = next, addr != end);
+ }
pte_unmap_unlock(ptep - 1, ptl);
-}
-
-static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
- unsigned long addr, unsigned long end,
- unsigned char *vec)
-{
- unsigned long next;
- pmd_t *pmd;
-
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- if (pmd_trans_huge(*pmd)) {
- if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
- vec += (next - addr) >> PAGE_SHIFT;
- continue;
- }
- /* fall through */
- }
- if (pmd_none_or_trans_huge_or_clear_bad(pmd))
- mincore_unmapped_range(vma, addr, next, vec);
- else
- mincore_pte_range(vma, pmd, addr, next, vec);
- vec += (next - addr) >> PAGE_SHIFT;
- } while (pmd++, addr = next, addr != end);
-}
-
-static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
- unsigned long addr, unsigned long end,
- unsigned char *vec)
-{
- unsigned long next;
- pud_t *pud;
-
- pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- mincore_unmapped_range(vma, addr, next, vec);
- else
- mincore_pmd_range(vma, pud, addr, next, vec);
- vec += (next - addr) >> PAGE_SHIFT;
- } while (pud++, addr = next, addr != end);
-}
-
-static void mincore_page_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
- unsigned char *vec)
-{
- unsigned long next;
- pgd_t *pgd;
-
- pgd = pgd_offset(vma->vm_mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- mincore_unmapped_range(vma, addr, next, vec);
- else
- mincore_pud_range(vma, pgd, addr, next, vec);
- vec += (next - addr) >> PAGE_SHIFT;
- } while (pgd++, addr = next, addr != end);
+out:
+ walk->private += nr;
+ cond_resched();
+ return 0;
}
/*
@@ -229,18 +174,22 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
{
struct vm_area_struct *vma;
unsigned long end;
+ int err;
+ struct mm_walk mincore_walk = {
+ .pmd_entry = mincore_pte_range,
+ .pte_hole = mincore_unmapped_range,
+ .hugetlb_entry = mincore_hugetlb,
+ .private = vec,
+ };
vma = find_vma(current->mm, addr);
if (!vma || addr < vma->vm_start)
return -ENOMEM;
-
+ mincore_walk.mm = vma->vm_mm;
end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
-
- if (is_vm_hugetlb_page(vma))
- mincore_hugetlb_page_range(vma, addr, end, vec);
- else
- mincore_page_range(vma, addr, end, vec);
-
+ err = walk_page_range(addr, end, &mincore_walk);
+ if (err < 0)
+ return err;
return (end - addr) >> PAGE_SHIFT;
}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 4074caf9936b..5f420f7fafa1 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -14,14 +14,14 @@
#include "internal.h"
#ifdef CONFIG_DEBUG_MEMORY_INIT
-int mminit_loglevel;
+int __meminitdata mminit_loglevel;
#ifndef SECTIONS_SHIFT
#define SECTIONS_SHIFT 0
#endif
/* The zonelists are simply reported, validation is manual. */
-void mminit_verify_zonelist(void)
+void __init mminit_verify_zonelist(void)
{
int nid;
diff --git a/mm/mmap.c b/mm/mmap.c
index 7f684d5a8087..da9990acc08b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
*/
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
{
- unsigned long free, allowed, reserve;
+ long free, allowed, reserve;
VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
-(s64)vm_committed_as_batch * num_online_cpus(),
@@ -220,7 +220,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
*/
if (mm) {
reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
- allowed -= min(mm->total_vm / 32, reserve);
+ allowed -= min_t(long, mm->total_vm / 32, reserve);
}
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
@@ -243,10 +243,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
mapping_unmap_writable(mapping);
flush_dcache_mmap_lock(mapping);
- if (unlikely(vma->vm_flags & VM_NONLINEAR))
- list_del_init(&vma->shared.nonlinear);
- else
- vma_interval_tree_remove(vma, &mapping->i_mmap);
+ vma_interval_tree_remove(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}
@@ -649,10 +646,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
atomic_inc(&mapping->i_mmap_writable);
flush_dcache_mmap_lock(mapping);
- if (unlikely(vma->vm_flags & VM_NONLINEAR))
- vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
- else
- vma_interval_tree_insert(vma, &mapping->i_mmap);
+ vma_interval_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}
}
@@ -789,14 +783,11 @@ again: remove_next = 1 + (end > next->vm_end);
if (file) {
mapping = file->f_mapping;
- if (!(vma->vm_flags & VM_NONLINEAR)) {
- root = &mapping->i_mmap;
- uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+ root = &mapping->i_mmap;
+ uprobe_munmap(vma, vma->vm_start, vma->vm_end);
- if (adjust_next)
- uprobe_munmap(next, next->vm_start,
- next->vm_end);
- }
+ if (adjust_next)
+ uprobe_munmap(next, next->vm_start, next->vm_end);
i_mmap_lock_write(mapping);
if (insert) {
@@ -2634,6 +2625,75 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return vm_munmap(addr, len);
}
+
+/*
+ * Emulation of deprecated remap_file_pages() syscall.
+ */
+SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+ unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
+{
+
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long populate = 0;
+ unsigned long ret = -EINVAL;
+ struct file *file;
+
+ pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
+ "See Documentation/vm/remap_file_pages.txt.\n",
+ current->comm, current->pid);
+
+ if (prot)
+ return ret;
+ start = start & PAGE_MASK;
+ size = size & PAGE_MASK;
+
+ if (start + size <= start)
+ return ret;
+
+ /* Does pgoff wrap? */
+ if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+ return ret;
+
+ down_write(&mm->mmap_sem);
+ vma = find_vma(mm, start);
+
+ if (!vma || !(vma->vm_flags & VM_SHARED))
+ goto out;
+
+ if (start < vma->vm_start || start + size > vma->vm_end)
+ goto out;
+
+ if (pgoff == linear_page_index(vma, start)) {
+ ret = 0;
+ goto out;
+ }
+
+ prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
+ prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
+ prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
+
+ flags &= MAP_NONBLOCK;
+ flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
+ if (vma->vm_flags & VM_LOCKED) {
+ flags |= MAP_LOCKED;
+ /* drop PG_Mlocked flag for over-mapped range */
+ munlock_vma_pages_range(vma, start, start + size);
+ }
+
+ file = get_file(vma->vm_file);
+ ret = do_mmap_pgoff(vma->vm_file, start, size,
+ prot, flags, pgoff, &populate);
+ fput(file);
+out:
+ up_write(&mm->mmap_sem);
+ if (populate)
+ mm_populate(ret, populate);
+ if (!IS_ERR_VALUE(ret))
+ ret = 0;
+ return ret;
+}
+
static inline void verify_mm_writelocked(struct mm_struct *mm)
{
#ifdef CONFIG_DEBUG_VM
@@ -2791,9 +2851,6 @@ void exit_mmap(struct mm_struct *mm)
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-
- WARN_ON(atomic_long_read(&mm->nr_ptes) >
- (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
/* Insert vm structure into process list sorted by address
@@ -3108,8 +3165,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
*
* mmap_sem in write mode is required in order to block all operations
* that could modify pagetables and free pages without need of
- * altering the vma layout (for example populate_range() with
- * nonlinear vmas). It's also needed in write mode to avoid new
+ * altering the vma layout. It's also needed in write mode to avoid new
* anon_vmas to be associated with existing vmas.
*
* A single task can't take more than one mm_take_all_locks() in a row
diff --git a/mm/mmzone.c b/mm/mmzone.c
index bf34fb8556db..7d87ebb0d632 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -54,8 +54,7 @@ static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
/* Returns the next zone at or below highest_zoneidx in a zonelist */
struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
- nodemask_t *nodes,
- struct zone **zone)
+ nodemask_t *nodes)
{
/*
* Find the next suitable zone to use for the allocation.
@@ -69,7 +68,6 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
(z->zone && !zref_in_nodemask(z, nodes)))
z++;
- *zone = zonelist_zone(z);
return z;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ace93454ce8e..44727811bf4c 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -75,37 +75,35 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
oldpte = *pte;
if (pte_present(oldpte)) {
pte_t ptent;
- bool updated = false;
- if (!prot_numa) {
- ptent = ptep_modify_prot_start(mm, addr, pte);
- if (pte_numa(ptent))
- ptent = pte_mknonnuma(ptent);
- ptent = pte_modify(ptent, newprot);
- /*
- * Avoid taking write faults for pages we
- * know to be dirty.
- */
- if (dirty_accountable && pte_dirty(ptent) &&
- (pte_soft_dirty(ptent) ||
- !(vma->vm_flags & VM_SOFTDIRTY)))
- ptent = pte_mkwrite(ptent);
- ptep_modify_prot_commit(mm, addr, pte, ptent);
- updated = true;
- } else {
+ /*
+ * Avoid trapping faults against the zero or KSM
+ * pages. See similar comment in change_huge_pmd.
+ */
+ if (prot_numa) {
struct page *page;
page = vm_normal_page(vma, addr, oldpte);
- if (page && !PageKsm(page)) {
- if (!pte_numa(oldpte)) {
- ptep_set_numa(mm, addr, pte);
- updated = true;
- }
- }
+ if (!page || PageKsm(page))
+ continue;
+
+ /* Avoid TLB flush if possible */
+ if (pte_protnone(oldpte))
+ continue;
}
- if (updated)
- pages++;
- } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
+
+ ptent = ptep_modify_prot_start(mm, addr, pte);
+ ptent = pte_modify(ptent, newprot);
+
+ /* Avoid taking write faults for known dirty pages */
+ if (dirty_accountable && pte_dirty(ptent) &&
+ (pte_soft_dirty(ptent) ||
+ !(vma->vm_flags & VM_SOFTDIRTY))) {
+ ptent = pte_mkwrite(ptent);
+ }
+ ptep_modify_prot_commit(mm, addr, pte, ptent);
+ pages++;
+ } else if (IS_ENABLED(CONFIG_MIGRATION)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
diff --git a/mm/mremap.c b/mm/mremap.c
index 17fa018f5f39..57dadc025c64 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -81,8 +81,6 @@ static pte_t move_soft_dirty_pte(pte_t pte)
pte = pte_mksoft_dirty(pte);
else if (is_swap_pte(pte))
pte = pte_swp_mksoft_dirty(pte);
- else if (pte_file(pte))
- pte = pte_file_mksoft_dirty(pte);
#endif
return pte;
}
diff --git a/mm/msync.c b/mm/msync.c
index 992a1673d488..bb04d53ae852 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -86,10 +86,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
(vma->vm_flags & VM_SHARED)) {
get_file(file);
up_read(&mm->mmap_sem);
- if (vma->vm_flags & VM_NONLINEAR)
- error = vfs_fsync(file, 1);
- else
- error = vfs_fsync_range(file, fstart, fend, 1);
+ error = vfs_fsync_range(file, fstart, fend, 1);
fput(file);
if (error || start >= end)
goto out;
diff --git a/mm/nommu.c b/mm/nommu.c
index b51eadf6d952..7296360fc057 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -59,6 +59,7 @@
#endif
void *high_memory;
+EXPORT_SYMBOL(high_memory);
struct page *mem_map;
unsigned long max_mapnr;
unsigned long highest_memmap_pfn;
@@ -213,6 +214,39 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages);
+long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ int *locked)
+{
+ return get_user_pages(tsk, mm, start, nr_pages, write, force,
+ pages, NULL);
+}
+EXPORT_SYMBOL(get_user_pages_locked);
+
+long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ unsigned int gup_flags)
+{
+ long ret;
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(tsk, mm, start, nr_pages, write, force,
+ pages, NULL);
+ up_read(&mm->mmap_sem);
+ return ret;
+}
+EXPORT_SYMBOL(__get_user_pages_unlocked);
+
+long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages)
+{
+ return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
+ force, pages, 0);
+}
+EXPORT_SYMBOL(get_user_pages_unlocked);
+
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping
@@ -946,9 +980,6 @@ static int validate_mmap_request(struct file *file,
return -EOVERFLOW;
if (file) {
- /* validate file mapping requests */
- struct address_space *mapping;
-
/* files must support mmap */
if (!file->f_op->mmap)
return -ENODEV;
@@ -957,28 +988,22 @@ static int validate_mmap_request(struct file *file,
* - we support chardevs that provide their own "memory"
* - we support files/blockdevs that are memory backed
*/
- mapping = file->f_mapping;
- if (!mapping)
- mapping = file_inode(file)->i_mapping;
-
- capabilities = 0;
- if (mapping && mapping->backing_dev_info)
- capabilities = mapping->backing_dev_info->capabilities;
-
- if (!capabilities) {
+ if (file->f_op->mmap_capabilities) {
+ capabilities = file->f_op->mmap_capabilities(file);
+ } else {
/* no explicit capabilities set, so assume some
* defaults */
switch (file_inode(file)->i_mode & S_IFMT) {
case S_IFREG:
case S_IFBLK:
- capabilities = BDI_CAP_MAP_COPY;
+ capabilities = NOMMU_MAP_COPY;
break;
case S_IFCHR:
capabilities =
- BDI_CAP_MAP_DIRECT |
- BDI_CAP_READ_MAP |
- BDI_CAP_WRITE_MAP;
+ NOMMU_MAP_DIRECT |
+ NOMMU_MAP_READ |
+ NOMMU_MAP_WRITE;
break;
default:
@@ -989,9 +1014,9 @@ static int validate_mmap_request(struct file *file,
/* eliminate any capabilities that we can't support on this
* device */
if (!file->f_op->get_unmapped_area)
- capabilities &= ~BDI_CAP_MAP_DIRECT;
+ capabilities &= ~NOMMU_MAP_DIRECT;
if (!file->f_op->read)
- capabilities &= ~BDI_CAP_MAP_COPY;
+ capabilities &= ~NOMMU_MAP_COPY;
/* The file shall have been opened with read permission. */
if (!(file->f_mode & FMODE_READ))
@@ -1010,29 +1035,29 @@ static int validate_mmap_request(struct file *file,
if (locks_verify_locked(file))
return -EAGAIN;
- if (!(capabilities & BDI_CAP_MAP_DIRECT))
+ if (!(capabilities & NOMMU_MAP_DIRECT))
return -ENODEV;
/* we mustn't privatise shared mappings */
- capabilities &= ~BDI_CAP_MAP_COPY;
+ capabilities &= ~NOMMU_MAP_COPY;
} else {
/* we're going to read the file into private memory we
* allocate */
- if (!(capabilities & BDI_CAP_MAP_COPY))
+ if (!(capabilities & NOMMU_MAP_COPY))
return -ENODEV;
/* we don't permit a private writable mapping to be
* shared with the backing device */
if (prot & PROT_WRITE)
- capabilities &= ~BDI_CAP_MAP_DIRECT;
+ capabilities &= ~NOMMU_MAP_DIRECT;
}
- if (capabilities & BDI_CAP_MAP_DIRECT) {
- if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
- ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
- ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
+ if (capabilities & NOMMU_MAP_DIRECT) {
+ if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
+ ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
+ ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
) {
- capabilities &= ~BDI_CAP_MAP_DIRECT;
+ capabilities &= ~NOMMU_MAP_DIRECT;
if (flags & MAP_SHARED) {
printk(KERN_WARNING
"MAP_SHARED not completely supported on !MMU\n");
@@ -1049,21 +1074,21 @@ static int validate_mmap_request(struct file *file,
} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
/* handle implication of PROT_EXEC by PROT_READ */
if (current->personality & READ_IMPLIES_EXEC) {
- if (capabilities & BDI_CAP_EXEC_MAP)
+ if (capabilities & NOMMU_MAP_EXEC)
prot |= PROT_EXEC;
}
} else if ((prot & PROT_READ) &&
(prot & PROT_EXEC) &&
- !(capabilities & BDI_CAP_EXEC_MAP)
+ !(capabilities & NOMMU_MAP_EXEC)
) {
/* backing file is not executable, try to copy */
- capabilities &= ~BDI_CAP_MAP_DIRECT;
+ capabilities &= ~NOMMU_MAP_DIRECT;
}
} else {
/* anonymous mappings are always memory backed and can be
* privately mapped
*/
- capabilities = BDI_CAP_MAP_COPY;
+ capabilities = NOMMU_MAP_COPY;
/* handle PROT_EXEC implication by PROT_READ */
if ((prot & PROT_READ) &&
@@ -1095,7 +1120,7 @@ static unsigned long determine_vm_flags(struct file *file,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
/* vm_flags |= mm->def_flags; */
- if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
+ if (!(capabilities & NOMMU_MAP_DIRECT)) {
/* attempt to share read-only copies of mapped file chunks */
vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (file && !(prot & PROT_WRITE))
@@ -1104,7 +1129,7 @@ static unsigned long determine_vm_flags(struct file *file,
/* overlay a shareable mapping on the backing device or inode
* if possible - used for chardevs, ramfs/tmpfs/shmfs and
* romfs/cramfs */
- vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
+ vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
if (flags & MAP_SHARED)
vm_flags |= VM_SHARED;
}
@@ -1157,7 +1182,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
* shared mappings on devices or memory
* - VM_MAYSHARE will be set if it may attempt to share
*/
- if (capabilities & BDI_CAP_MAP_DIRECT) {
+ if (capabilities & NOMMU_MAP_DIRECT) {
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
if (ret == 0) {
/* shouldn't return success if we're not sharing */
@@ -1346,7 +1371,7 @@ unsigned long do_mmap_pgoff(struct file *file,
if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
!(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
/* new mapping is not a subset of the region */
- if (!(capabilities & BDI_CAP_MAP_DIRECT))
+ if (!(capabilities & NOMMU_MAP_DIRECT))
goto sharing_violation;
continue;
}
@@ -1385,7 +1410,7 @@ unsigned long do_mmap_pgoff(struct file *file,
* - this is the hook for quasi-memory character devices to
* tell us the location of a shared mapping
*/
- if (capabilities & BDI_CAP_MAP_DIRECT) {
+ if (capabilities & NOMMU_MAP_DIRECT) {
addr = file->f_op->get_unmapped_area(file, addr, len,
pgoff, flags);
if (IS_ERR_VALUE(addr)) {
@@ -1397,10 +1422,10 @@ unsigned long do_mmap_pgoff(struct file *file,
* the mapping so we'll have to attempt to copy
* it */
ret = -ENODEV;
- if (!(capabilities & BDI_CAP_MAP_COPY))
+ if (!(capabilities & NOMMU_MAP_COPY))
goto error_just_free;
- capabilities &= ~BDI_CAP_MAP_DIRECT;
+ capabilities &= ~NOMMU_MAP_DIRECT;
} else {
vma->vm_start = region->vm_start = addr;
vma->vm_end = region->vm_end = addr + len;
@@ -1411,7 +1436,7 @@ unsigned long do_mmap_pgoff(struct file *file,
vma->vm_region = region;
/* set up the mapping
- * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
+ * - the region is filled in if NOMMU_MAP_DIRECT is still set
*/
if (file && vma->vm_flags & VM_SHARED)
ret = do_mmap_shared_file(vma);
@@ -1894,7 +1919,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
*/
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
{
- unsigned long free, allowed, reserve;
+ long free, allowed, reserve;
vm_acct_memory(pages);
@@ -1958,7 +1983,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
*/
if (mm) {
reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
- allowed -= min(mm->total_vm / 32, reserve);
+ allowed -= min_t(long, mm->total_vm / 32, reserve);
}
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
@@ -1983,14 +2008,6 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
}
EXPORT_SYMBOL(filemap_map_pages);
-int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
- unsigned long size, pgoff_t pgoff)
-{
- BUG();
- return 0;
-}
-EXPORT_SYMBOL(generic_file_remap_pages);
-
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, int write)
{
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d503e9ce1c7b..642f38cb175a 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -169,8 +169,8 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
- points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
- get_mm_counter(p->mm, MM_SWAPENTS);
+ points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
+ atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
task_unlock(p);
/*
@@ -266,8 +266,6 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
* Don't allow any other task to have access to the reserves.
*/
if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
- if (unlikely(frozen(task)))
- __thaw_task(task);
if (!force_kill)
return OOM_SCAN_ABORT;
}
@@ -353,7 +351,7 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
struct task_struct *p;
struct task_struct *task;
- pr_info("[ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name\n");
+ pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
rcu_read_lock();
for_each_process(p) {
if (oom_unkillable_task(p, memcg, nodemask))
@@ -369,10 +367,11 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
continue;
}
- pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu %5hd %s\n",
+ pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
atomic_long_read(&task->mm->nr_ptes),
+ mm_nr_pmds(task->mm),
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
@@ -400,20 +399,98 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
}
/*
- * Number of OOM killer invocations (including memcg OOM killer).
- * Primarily used by PM freezer to check for potential races with
- * OOM killed frozen task.
+ * Number of OOM victims in flight
*/
-static atomic_t oom_kills = ATOMIC_INIT(0);
+static atomic_t oom_victims = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
-int oom_kills_count(void)
+bool oom_killer_disabled __read_mostly;
+static DECLARE_RWSEM(oom_sem);
+
+/**
+ * mark_tsk_oom_victim - marks the given taks as OOM victim.
+ * @tsk: task to mark
+ *
+ * Has to be called with oom_sem taken for read and never after
+ * oom has been disabled already.
+ */
+void mark_tsk_oom_victim(struct task_struct *tsk)
{
- return atomic_read(&oom_kills);
+ WARN_ON(oom_killer_disabled);
+ /* OOM killer might race with memcg OOM */
+ if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
+ return;
+ /*
+ * Make sure that the task is woken up from uninterruptible sleep
+ * if it is frozen because OOM killer wouldn't be able to free
+ * any memory and livelock. freezing_slow_path will tell the freezer
+ * that TIF_MEMDIE tasks should be ignored.
+ */
+ __thaw_task(tsk);
+ atomic_inc(&oom_victims);
+}
+
+/**
+ * unmark_oom_victim - unmarks the current task as OOM victim.
+ *
+ * Wakes up all waiters in oom_killer_disable()
+ */
+void unmark_oom_victim(void)
+{
+ if (!test_and_clear_thread_flag(TIF_MEMDIE))
+ return;
+
+ down_read(&oom_sem);
+ /*
+ * There is no need to signal the lasst oom_victim if there
+ * is nobody who cares.
+ */
+ if (!atomic_dec_return(&oom_victims) && oom_killer_disabled)
+ wake_up_all(&oom_victims_wait);
+ up_read(&oom_sem);
+}
+
+/**
+ * oom_killer_disable - disable OOM killer
+ *
+ * Forces all page allocations to fail rather than trigger OOM killer.
+ * Will block and wait until all OOM victims are killed.
+ *
+ * The function cannot be called when there are runnable user tasks because
+ * the userspace would see unexpected allocation failures as a result. Any
+ * new usage of this function should be consulted with MM people.
+ *
+ * Returns true if successful and false if the OOM killer cannot be
+ * disabled.
+ */
+bool oom_killer_disable(void)
+{
+ /*
+ * Make sure to not race with an ongoing OOM killer
+ * and that the current is not the victim.
+ */
+ down_write(&oom_sem);
+ if (test_thread_flag(TIF_MEMDIE)) {
+ up_write(&oom_sem);
+ return false;
+ }
+
+ oom_killer_disabled = true;
+ up_write(&oom_sem);
+
+ wait_event(oom_victims_wait, !atomic_read(&oom_victims));
+
+ return true;
}
-void note_oom_kill(void)
+/**
+ * oom_killer_enable - enable OOM killer
+ */
+void oom_killer_enable(void)
{
- atomic_inc(&oom_kills);
+ down_write(&oom_sem);
+ oom_killer_disabled = false;
+ up_write(&oom_sem);
}
#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -438,11 +515,14 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
* If the task is already exiting, don't alarm the sysadmin or kill
* its children or threads, just set TIF_MEMDIE so it can die quickly
*/
- if (task_will_free_mem(p)) {
- set_tsk_thread_flag(p, TIF_MEMDIE);
+ task_lock(p);
+ if (p->mm && task_will_free_mem(p)) {
+ mark_tsk_oom_victim(p);
+ task_unlock(p);
put_task_struct(p);
return;
}
+ task_unlock(p);
if (__ratelimit(&oom_rs))
dump_header(p, gfp_mask, order, memcg, nodemask);
@@ -492,6 +572,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
/* mm cannot safely be dereferenced after task_unlock(victim) */
mm = victim->mm;
+ mark_tsk_oom_victim(victim);
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
@@ -522,7 +603,6 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
}
rcu_read_unlock();
- set_tsk_thread_flag(victim, TIF_MEMDIE);
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
put_task_struct(victim);
}
@@ -611,7 +691,7 @@ void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
}
/**
- * out_of_memory - kill the "best" process when we run out of memory
+ * __out_of_memory - kill the "best" process when we run out of memory
* @zonelist: zonelist pointer
* @gfp_mask: memory allocation flags
* @order: amount of memory being requested as a power of 2
@@ -623,7 +703,7 @@ void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *nodemask, bool force_kill)
{
const nodemask_t *mpol_mask;
@@ -643,9 +723,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
* If current has a pending SIGKILL or is exiting, then automatically
* select it. The goal is to allow it to allocate so that it may
* quickly exit and free its memory.
+ *
+ * But don't select if current has already released its mm and cleared
+ * TIF_MEMDIE flag at exit_mm(), otherwise an OOM livelock may occur.
*/
- if (fatal_signal_pending(current) || task_will_free_mem(current)) {
- set_thread_flag(TIF_MEMDIE);
+ if (current->mm &&
+ (fatal_signal_pending(current) || task_will_free_mem(current))) {
+ mark_tsk_oom_victim(current);
return;
}
@@ -688,6 +772,32 @@ out:
schedule_timeout_killable(1);
}
+/**
+ * out_of_memory - tries to invoke OOM killer.
+ * @zonelist: zonelist pointer
+ * @gfp_mask: memory allocation flags
+ * @order: amount of memory being requested as a power of 2
+ * @nodemask: nodemask passed to page allocator
+ * @force_kill: true if a task must be killed, even if others are exiting
+ *
+ * invokes __out_of_memory if the OOM is not disabled by oom_killer_disable()
+ * when it returns false. Otherwise returns true.
+ */
+bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *nodemask, bool force_kill)
+{
+ bool ret = false;
+
+ down_read(&oom_sem);
+ if (!oom_killer_disabled) {
+ __out_of_memory(zonelist, gfp_mask, order, nodemask, force_kill);
+ ret = true;
+ }
+ up_read(&oom_sem);
+
+ return ret;
+}
+
/*
* The pagefault handler calls here because it is out of memory, so kill a
* memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a
@@ -697,12 +807,25 @@ void pagefault_out_of_memory(void)
{
struct zonelist *zonelist;
+ down_read(&oom_sem);
if (mem_cgroup_oom_synchronize(true))
- return;
+ goto unlock;
zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) {
- out_of_memory(NULL, 0, 0, NULL, false);
+ if (!oom_killer_disabled)
+ __out_of_memory(NULL, 0, 0, NULL, false);
+ else
+ /*
+ * There shouldn't be any user tasks runable while the
+ * OOM killer is disabled so the current task has to
+ * be a racing OOM victim for which oom_killer_disable()
+ * is waiting for.
+ */
+ WARN_ON(test_thread_flag(TIF_MEMDIE));
+
oom_zonelist_unlock(zonelist, GFP_KERNEL);
}
+unlock:
+ up_read(&oom_sem);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6f4335238e33..45e187b2d971 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1351,7 +1351,7 @@ static void balance_dirty_pages(struct address_space *mapping,
unsigned long task_ratelimit;
unsigned long dirty_ratelimit;
unsigned long pos_ratio;
- struct backing_dev_info *bdi = mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
unsigned long start_time = jiffies;
@@ -1574,7 +1574,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
*/
void balance_dirty_pages_ratelimited(struct address_space *mapping)
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
int ratelimit;
int *p;
@@ -1929,7 +1929,7 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- trace_wbc_writepage(wbc, mapping->backing_dev_info);
+ trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
ret = (*writepage)(page, wbc, data);
if (unlikely(ret)) {
if (ret == AOP_WRITEPAGE_ACTIVATE) {
@@ -2094,10 +2094,12 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
trace_writeback_dirty_page(page, mapping);
if (mapping_cap_account_dirty(mapping)) {
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
+
__inc_zone_page_state(page, NR_FILE_DIRTY);
__inc_zone_page_state(page, NR_DIRTIED);
- __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
- __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
+ __inc_bdi_stat(bdi, BDI_RECLAIMABLE);
+ __inc_bdi_stat(bdi, BDI_DIRTIED);
task_io_account_write(PAGE_CACHE_SIZE);
current->nr_dirtied++;
this_cpu_inc(bdp_ratelimits);
@@ -2156,7 +2158,7 @@ void account_page_redirty(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) {
current->nr_dirtied--;
dec_zone_page_state(page, NR_DIRTIED);
- dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
+ dec_bdi_stat(inode_to_bdi(mapping->host), BDI_DIRTIED);
}
}
EXPORT_SYMBOL(account_page_redirty);
@@ -2168,9 +2170,12 @@ EXPORT_SYMBOL(account_page_redirty);
*/
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
+ int ret;
+
wbc->pages_skipped++;
+ ret = __set_page_dirty_nobuffers(page);
account_page_redirty(page);
- return __set_page_dirty_nobuffers(page);
+ return ret;
}
EXPORT_SYMBOL(redirty_page_for_writepage);
@@ -2295,7 +2300,7 @@ int clear_page_dirty_for_io(struct page *page)
*/
if (TestClearPageDirty(page)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
- dec_bdi_stat(mapping->backing_dev_info,
+ dec_bdi_stat(inode_to_bdi(mapping->host),
BDI_RECLAIMABLE);
return 1;
}
@@ -2308,14 +2313,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
- unsigned long memcg_flags;
struct mem_cgroup *memcg;
- bool locked;
int ret;
- memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
+ memcg = mem_cgroup_begin_page_stat(page);
if (mapping) {
- struct backing_dev_info *bdi = mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long flags;
spin_lock_irqsave(&mapping->tree_lock, flags);
@@ -2338,21 +2341,19 @@ int test_clear_page_writeback(struct page *page)
dec_zone_page_state(page, NR_WRITEBACK);
inc_zone_page_state(page, NR_WRITTEN);
}
- mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags);
+ mem_cgroup_end_page_stat(memcg);
return ret;
}
int __test_set_page_writeback(struct page *page, bool keep_write)
{
struct address_space *mapping = page_mapping(page);
- unsigned long memcg_flags;
struct mem_cgroup *memcg;
- bool locked;
int ret;
- memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
+ memcg = mem_cgroup_begin_page_stat(page);
if (mapping) {
- struct backing_dev_info *bdi = mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long flags;
spin_lock_irqsave(&mapping->tree_lock, flags);
@@ -2380,7 +2381,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
inc_zone_page_state(page, NR_WRITEBACK);
}
- mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags);
+ mem_cgroup_end_page_stat(memcg);
return ret;
}
@@ -2406,12 +2407,7 @@ EXPORT_SYMBOL(mapping_tagged);
*/
void wait_for_stable_page(struct page *page)
{
- struct address_space *mapping = page_mapping(page);
- struct backing_dev_info *bdi = mapping->backing_dev_info;
-
- if (!bdi_cap_stable_pages_required(bdi))
- return;
-
- wait_on_page_writeback(page);
+ if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
+ wait_on_page_writeback(page);
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7633c503a116..a47f0b229a1a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -25,6 +25,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
+#include <linux/kasan.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
@@ -172,7 +173,7 @@ static void __free_pages_ok(struct page *page, unsigned int order);
* 1G machine -> (16M dma, 784M normal, 224M high)
* NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
* HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
- * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
+ * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
*
* TBD: should special case ZONE_DMA32 machines here - in those we normally
* don't need any ZONE_NORMAL reservation
@@ -244,8 +245,6 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
PB_migrate, PB_migrate_end);
}
-bool oom_killer_disabled __read_mostly;
-
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
@@ -381,36 +380,6 @@ void prep_compound_page(struct page *page, unsigned long order)
}
}
-/* update __split_huge_page_refcount if you change this function */
-static int destroy_compound_page(struct page *page, unsigned long order)
-{
- int i;
- int nr_pages = 1 << order;
- int bad = 0;
-
- if (unlikely(compound_order(page) != order)) {
- bad_page(page, "wrong compound order", 0);
- bad++;
- }
-
- __ClearPageHead(page);
-
- for (i = 1; i < nr_pages; i++) {
- struct page *p = page + i;
-
- if (unlikely(!PageTail(p))) {
- bad_page(page, "PageTail not set", 0);
- bad++;
- } else if (unlikely(p->first_page != page)) {
- bad_page(page, "first_page not consistent", 0);
- bad++;
- }
- __ClearPageTail(p);
- }
-
- return bad;
-}
-
static inline void prep_zero_page(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
@@ -552,17 +521,15 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
return 0;
if (page_is_guard(buddy) && page_order(buddy) == order) {
- VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
-
if (page_zone_id(page) != page_zone_id(buddy))
return 0;
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
+
return 1;
}
if (PageBuddy(buddy) && page_order(buddy) == order) {
- VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
-
/*
* zone check is done late to avoid uselessly
* calculating zone/node ids for pages that could
@@ -571,6 +538,8 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
if (page_zone_id(page) != page_zone_id(buddy))
return 0;
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
+
return 1;
}
return 0;
@@ -613,10 +582,7 @@ static inline void __free_one_page(struct page *page,
int max_order = MAX_ORDER;
VM_BUG_ON(!zone_is_initialized(zone));
-
- if (unlikely(PageCompound(page)))
- if (unlikely(destroy_compound_page(page, order)))
- return;
+ VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
VM_BUG_ON(migratetype == -1);
if (is_migrate_isolate(migratetype)) {
@@ -797,21 +763,41 @@ static void free_one_page(struct zone *zone,
spin_unlock(&zone->lock);
}
+static int free_tail_pages_check(struct page *head_page, struct page *page)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_VM))
+ return 0;
+ if (unlikely(!PageTail(page))) {
+ bad_page(page, "PageTail not set", 0);
+ return 1;
+ }
+ if (unlikely(page->first_page != head_page)) {
+ bad_page(page, "first_page not consistent", 0);
+ return 1;
+ }
+ return 0;
+}
+
static bool free_pages_prepare(struct page *page, unsigned int order)
{
- int i;
- int bad = 0;
+ bool compound = PageCompound(page);
+ int i, bad = 0;
VM_BUG_ON_PAGE(PageTail(page), page);
- VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
+ VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
+ kasan_free_pages(page, order);
if (PageAnon(page))
page->mapping = NULL;
- for (i = 0; i < (1 << order); i++)
+ bad += free_pages_check(page);
+ for (i = 1; i < (1 << order); i++) {
+ if (compound)
+ bad += free_tail_pages_check(page, page + i);
bad += free_pages_check(page + i);
+ }
if (bad)
return false;
@@ -970,7 +956,8 @@ static inline int check_new_page(struct page *page)
return 0;
}
-static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
+static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
+ int alloc_flags)
{
int i;
@@ -985,6 +972,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
+ kasan_alloc_pages(page, order);
if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags);
@@ -994,6 +982,14 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
set_page_owner(page, order, gfp_flags);
+ /*
+ * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
+ * allocate the page. The expectation is that the caller is taking
+ * steps that will free more memory. The caller should avoid the page
+ * being used for !PFMEMALLOC purposes.
+ */
+ page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+
return 0;
}
@@ -1130,39 +1126,34 @@ static void change_pageblock_range(struct page *pageblock_page,
}
/*
- * If breaking a large block of pages, move all free pages to the preferred
- * allocation list. If falling back for a reclaimable kernel allocation, be
- * more aggressive about taking ownership of free pages.
+ * When we are falling back to another migratetype during allocation, try to
+ * steal extra free pages from the same pageblocks to satisfy further
+ * allocations, instead of polluting multiple pageblocks.
*
- * On the other hand, never change migration type of MIGRATE_CMA pageblocks
- * nor move CMA pages to different free lists. We don't want unmovable pages
- * to be allocated from MIGRATE_CMA areas.
+ * If we are stealing a relatively large buddy page, it is likely there will
+ * be more free pages in the pageblock, so try to steal them all. For
+ * reclaimable and unmovable allocations, we steal regardless of page size,
+ * as fragmentation caused by those allocations polluting movable pageblocks
+ * is worse than movable allocations stealing from unmovable and reclaimable
+ * pageblocks.
*
- * Returns the new migratetype of the pageblock (or the same old migratetype
- * if it was unchanged).
+ * If we claim more than half of the pageblock, change pageblock's migratetype
+ * as well.
*/
-static int try_to_steal_freepages(struct zone *zone, struct page *page,
+static void try_to_steal_freepages(struct zone *zone, struct page *page,
int start_type, int fallback_type)
{
int current_order = page_order(page);
- /*
- * When borrowing from MIGRATE_CMA, we need to release the excess
- * buddy pages to CMA itself. We also ensure the freepage_migratetype
- * is set to CMA so it is returned to the correct freelist in case
- * the page ends up being not actually allocated from the pcp lists.
- */
- if (is_migrate_cma(fallback_type))
- return fallback_type;
-
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
change_pageblock_range(page, current_order, start_type);
- return start_type;
+ return;
}
if (current_order >= pageblock_order / 2 ||
start_type == MIGRATE_RECLAIMABLE ||
+ start_type == MIGRATE_UNMOVABLE ||
page_group_by_mobility_disabled) {
int pages;
@@ -1170,15 +1161,9 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
/* Claim the whole block if over half of it is free */
if (pages >= (1 << (pageblock_order-1)) ||
- page_group_by_mobility_disabled) {
-
+ page_group_by_mobility_disabled)
set_pageblock_migratetype(page, start_type);
- return start_type;
- }
-
}
-
- return fallback_type;
}
/* Remove an element from the buddy allocator from the fallback list */
@@ -1188,14 +1173,15 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
struct free_area *area;
unsigned int current_order;
struct page *page;
- int migratetype, new_type, i;
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1;
current_order >= order && current_order <= MAX_ORDER-1;
--current_order) {
+ int i;
for (i = 0;; i++) {
- migratetype = fallbacks[start_migratetype][i];
+ int migratetype = fallbacks[start_migratetype][i];
+ int buddy_type = start_migratetype;
/* MIGRATE_RESERVE handled later if necessary */
if (migratetype == MIGRATE_RESERVE)
@@ -1209,25 +1195,39 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
struct page, lru);
area->nr_free--;
- new_type = try_to_steal_freepages(zone, page,
- start_migratetype,
- migratetype);
+ if (!is_migrate_cma(migratetype)) {
+ try_to_steal_freepages(zone, page,
+ start_migratetype,
+ migratetype);
+ } else {
+ /*
+ * When borrowing from MIGRATE_CMA, we need to
+ * release the excess buddy pages to CMA
+ * itself, and we do not try to steal extra
+ * free pages.
+ */
+ buddy_type = migratetype;
+ }
/* Remove the page from the freelists */
list_del(&page->lru);
rmv_page_order(page);
expand(zone, page, order, current_order, area,
- new_type);
- /* The freepage_migratetype may differ from pageblock's
+ buddy_type);
+
+ /*
+ * The freepage_migratetype may differ from pageblock's
* migratetype depending on the decisions in
- * try_to_steal_freepages. This is OK as long as it does
- * not differ for MIGRATE_CMA type.
+ * try_to_steal_freepages(). This is OK as long as it
+ * does not differ for MIGRATE_CMA pageblocks. For CMA
+ * we need to make sure unallocated pages flushed from
+ * pcp lists are returned to the correct freelist.
*/
- set_freepage_migratetype(page, new_type);
+ set_freepage_migratetype(page, buddy_type);
trace_mm_page_alloc_extfrag(page, order, current_order,
- start_migratetype, migratetype, new_type);
+ start_migratetype, migratetype);
return page;
}
@@ -1642,9 +1642,7 @@ int split_free_page(struct page *page)
}
/*
- * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
- * we cheat by calling it from here, in the order > 0 path. Saves a branch
- * or two.
+ * Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/
static inline
struct page *buffered_rmqueue(struct zone *preferred_zone,
@@ -1655,7 +1653,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
struct page *page;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
-again:
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -1711,8 +1708,6 @@ again:
local_irq_restore(flags);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
- if (prep_new_page(page, order, gfp_flags))
- goto again;
return page;
failed:
@@ -2033,10 +2028,10 @@ static void reset_alloc_batches(struct zone *preferred_zone)
* a page.
*/
static struct page *
-get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
- struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
- struct zone *preferred_zone, int classzone_idx, int migratetype)
+get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
+ const struct alloc_context *ac)
{
+ struct zonelist *zonelist = ac->zonelist;
struct zoneref *z;
struct page *page = NULL;
struct zone *zone;
@@ -2055,8 +2050,8 @@ zonelist_scan:
* Scan zonelist, looking for a zone with enough free.
* See also __cpuset_node_allowed() comment in kernel/cpuset.c.
*/
- for_each_zone_zonelist_nodemask(zone, z, zonelist,
- high_zoneidx, nodemask) {
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
+ ac->nodemask) {
unsigned long mark;
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
@@ -2073,7 +2068,7 @@ zonelist_scan:
* time the page has in memory before being reclaimed.
*/
if (alloc_flags & ALLOC_FAIR) {
- if (!zone_local(preferred_zone, zone))
+ if (!zone_local(ac->preferred_zone, zone))
break;
if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
nr_fair_skipped++;
@@ -2111,7 +2106,7 @@ zonelist_scan:
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
if (!zone_watermark_ok(zone, order, mark,
- classzone_idx, alloc_flags)) {
+ ac->classzone_idx, alloc_flags)) {
int ret;
/* Checked here to keep the fast path fast */
@@ -2132,7 +2127,7 @@ zonelist_scan:
}
if (zone_reclaim_mode == 0 ||
- !zone_allows_reclaim(preferred_zone, zone))
+ !zone_allows_reclaim(ac->preferred_zone, zone))
goto this_zone_full;
/*
@@ -2154,7 +2149,7 @@ zonelist_scan:
default:
/* did we reclaim enough */
if (zone_watermark_ok(zone, order, mark,
- classzone_idx, alloc_flags))
+ ac->classzone_idx, alloc_flags))
goto try_this_zone;
/*
@@ -2175,27 +2170,18 @@ zonelist_scan:
}
try_this_zone:
- page = buffered_rmqueue(preferred_zone, zone, order,
- gfp_mask, migratetype);
- if (page)
- break;
+ page = buffered_rmqueue(ac->preferred_zone, zone, order,
+ gfp_mask, ac->migratetype);
+ if (page) {
+ if (prep_new_page(page, order, gfp_mask, alloc_flags))
+ goto try_this_zone;
+ return page;
+ }
this_zone_full:
if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
zlc_mark_zone_full(zonelist, z);
}
- if (page) {
- /*
- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
- * necessary to allocate the page. The expectation is
- * that the caller is taking steps that will free more
- * memory. The caller should avoid the page being used
- * for !PFMEMALLOC purposes.
- */
- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
- return page;
- }
-
/*
* The first pass makes sure allocations are spread fairly within the
* local node. However, the local node might have free pages left
@@ -2208,7 +2194,7 @@ this_zone_full:
alloc_flags &= ~ALLOC_FAIR;
if (nr_fair_skipped) {
zonelist_rescan = true;
- reset_alloc_batches(preferred_zone);
+ reset_alloc_batches(ac->preferred_zone);
}
if (nr_online_nodes > 1)
zonelist_rescan = true;
@@ -2330,44 +2316,44 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, enum zone_type high_zoneidx,
- nodemask_t *nodemask, struct zone *preferred_zone,
- int classzone_idx, int migratetype)
+ const struct alloc_context *ac, unsigned long *did_some_progress)
{
struct page *page;
- /* Acquire the per-zone oom lock for each zone */
- if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
- schedule_timeout_uninterruptible(1);
- return NULL;
- }
+ *did_some_progress = 0;
/*
- * PM-freezer should be notified that there might be an OOM killer on
- * its way to kill and wake somebody up. This is too early and we might
- * end up not killing anything but false positives are acceptable.
- * See freeze_processes.
+ * Acquire the per-zone oom lock for each zone. If that
+ * fails, somebody else is making progress for us.
*/
- note_oom_kill();
+ if (!oom_zonelist_trylock(ac->zonelist, gfp_mask)) {
+ *did_some_progress = 1;
+ schedule_timeout_uninterruptible(1);
+ return NULL;
+ }
/*
* Go through the zonelist yet one more time, keep very high watermark
* here, this is only to catch a parallel oom killing, we must fail if
* we're still under heavy pressure.
*/
- page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
- order, zonelist, high_zoneidx,
- ALLOC_WMARK_HIGH|ALLOC_CPUSET,
- preferred_zone, classzone_idx, migratetype);
+ page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
+ ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
if (page)
goto out;
if (!(gfp_mask & __GFP_NOFAIL)) {
+ /* Coredumps can quickly deplete all memory reserves */
+ if (current->flags & PF_DUMPCORE)
+ goto out;
/* The OOM killer will not help higher order allocs */
if (order > PAGE_ALLOC_COSTLY_ORDER)
goto out;
/* The OOM killer does not needlessly kill tasks for lowmem */
- if (high_zoneidx < ZONE_NORMAL)
+ if (ac->high_zoneidx < ZONE_NORMAL)
+ goto out;
+ /* The OOM killer does not compensate for light reclaim */
+ if (!(gfp_mask & __GFP_FS))
goto out;
/*
* GFP_THISNODE contains __GFP_NORETRY and we never hit this.
@@ -2380,10 +2366,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
goto out;
}
/* Exhausted what can be done so it's blamo time */
- out_of_memory(zonelist, gfp_mask, order, nodemask, false);
-
+ if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false))
+ *did_some_progress = 1;
out:
- oom_zonelist_unlock(zonelist, gfp_mask);
+ oom_zonelist_unlock(ac->zonelist, gfp_mask);
return page;
}
@@ -2391,10 +2377,9 @@ out:
/* Try memory compaction for high-order allocations before reclaim */
static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, enum zone_type high_zoneidx,
- nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int classzone_idx, int migratetype, enum migrate_mode mode,
- int *contended_compaction, bool *deferred_compaction)
+ int alloc_flags, const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended_compaction,
+ bool *deferred_compaction)
{
unsigned long compact_result;
struct page *page;
@@ -2403,10 +2388,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL;
current->flags |= PF_MEMALLOC;
- compact_result = try_to_compact_pages(zonelist, order, gfp_mask,
- nodemask, mode,
- contended_compaction,
- alloc_flags, classzone_idx);
+ compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
+ mode, contended_compaction);
current->flags &= ~PF_MEMALLOC;
switch (compact_result) {
@@ -2425,10 +2408,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
*/
count_vm_event(COMPACTSTALL);
- page = get_page_from_freelist(gfp_mask, nodemask,
- order, zonelist, high_zoneidx,
- alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, classzone_idx, migratetype);
+ page = get_page_from_freelist(gfp_mask, order,
+ alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
if (page) {
struct zone *zone = page_zone(page);
@@ -2452,10 +2433,9 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
#else
static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, enum zone_type high_zoneidx,
- nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int classzone_idx, int migratetype, enum migrate_mode mode,
- int *contended_compaction, bool *deferred_compaction)
+ int alloc_flags, const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended_compaction,
+ bool *deferred_compaction)
{
return NULL;
}
@@ -2463,8 +2443,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
/* Perform direct synchronous page reclaim */
static int
-__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
- nodemask_t *nodemask)
+__perform_reclaim(gfp_t gfp_mask, unsigned int order,
+ const struct alloc_context *ac)
{
struct reclaim_state reclaim_state;
int progress;
@@ -2478,7 +2458,8 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
reclaim_state.reclaimed_slab = 0;
current->reclaim_state = &reclaim_state;
- progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+ progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
+ ac->nodemask);
current->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
@@ -2492,28 +2473,23 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
/* The really slow allocator path where we enter direct reclaim */
static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, enum zone_type high_zoneidx,
- nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int classzone_idx, int migratetype, unsigned long *did_some_progress)
+ int alloc_flags, const struct alloc_context *ac,
+ unsigned long *did_some_progress)
{
struct page *page = NULL;
bool drained = false;
- *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
- nodemask);
+ *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
if (unlikely(!(*did_some_progress)))
return NULL;
/* After successful reclaim, reconsider all zones for allocation */
if (IS_ENABLED(CONFIG_NUMA))
- zlc_clear_zones_full(zonelist);
+ zlc_clear_zones_full(ac->zonelist);
retry:
- page = get_page_from_freelist(gfp_mask, nodemask, order,
- zonelist, high_zoneidx,
- alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, classzone_idx,
- migratetype);
+ page = get_page_from_freelist(gfp_mask, order,
+ alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
/*
* If an allocation failed after direct reclaim, it could be because
@@ -2534,36 +2510,30 @@ retry:
*/
static inline struct page *
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, enum zone_type high_zoneidx,
- nodemask_t *nodemask, struct zone *preferred_zone,
- int classzone_idx, int migratetype)
+ const struct alloc_context *ac)
{
struct page *page;
do {
- page = get_page_from_freelist(gfp_mask, nodemask, order,
- zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
- preferred_zone, classzone_idx, migratetype);
+ page = get_page_from_freelist(gfp_mask, order,
+ ALLOC_NO_WATERMARKS, ac);
if (!page && gfp_mask & __GFP_NOFAIL)
- wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
+ wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC,
+ HZ/50);
} while (!page && (gfp_mask & __GFP_NOFAIL));
return page;
}
-static void wake_all_kswapds(unsigned int order,
- struct zonelist *zonelist,
- enum zone_type high_zoneidx,
- struct zone *preferred_zone,
- nodemask_t *nodemask)
+static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
{
struct zoneref *z;
struct zone *zone;
- for_each_zone_zonelist_nodemask(zone, z, zonelist,
- high_zoneidx, nodemask)
- wakeup_kswapd(zone, order, zone_idx(preferred_zone));
+ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
+ ac->high_zoneidx, ac->nodemask)
+ wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
}
static inline int
@@ -2622,9 +2592,7 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, enum zone_type high_zoneidx,
- nodemask_t *nodemask, struct zone *preferred_zone,
- int classzone_idx, int migratetype)
+ struct alloc_context *ac)
{
const gfp_t wait = gfp_mask & __GFP_WAIT;
struct page *page = NULL;
@@ -2658,10 +2626,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage;
-restart:
+retry:
if (!(gfp_mask & __GFP_NO_KSWAPD))
- wake_all_kswapds(order, zonelist, high_zoneidx,
- preferred_zone, nodemask);
+ wake_all_kswapds(order, ac);
/*
* OK, we're below the kswapd watermark and have kicked background
@@ -2674,18 +2641,16 @@ restart:
* Find the true preferred zone if the allocation is unconstrained by
* cpusets.
*/
- if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
+ if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
struct zoneref *preferred_zoneref;
- preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
- NULL, &preferred_zone);
- classzone_idx = zonelist_zone_idx(preferred_zoneref);
+ preferred_zoneref = first_zones_zonelist(ac->zonelist,
+ ac->high_zoneidx, NULL, &ac->preferred_zone);
+ ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
}
-rebalance:
/* This is the last chance, in general, before the goto nopage. */
- page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
- high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, classzone_idx, migratetype);
+ page = get_page_from_freelist(gfp_mask, order,
+ alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
if (page)
goto got_pg;
@@ -2696,11 +2661,10 @@ rebalance:
* the allocation is high priority and these type of
* allocations are system rather than user orientated
*/
- zonelist = node_zonelist(numa_node_id(), gfp_mask);
+ ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+
+ page = __alloc_pages_high_priority(gfp_mask, order, ac);
- page = __alloc_pages_high_priority(gfp_mask, order,
- zonelist, high_zoneidx, nodemask,
- preferred_zone, classzone_idx, migratetype);
if (page) {
goto got_pg;
}
@@ -2729,11 +2693,9 @@ rebalance:
* Try direct compaction. The first pass is asynchronous. Subsequent
* attempts after direct reclaim are synchronous
*/
- page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
- high_zoneidx, nodemask, alloc_flags,
- preferred_zone,
- classzone_idx, migratetype,
- migration_mode, &contended_compaction,
+ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
+ migration_mode,
+ &contended_compaction,
&deferred_compaction);
if (page)
goto got_pg;
@@ -2779,74 +2741,40 @@ rebalance:
migration_mode = MIGRATE_SYNC_LIGHT;
/* Try direct reclaim and then allocating */
- page = __alloc_pages_direct_reclaim(gfp_mask, order,
- zonelist, high_zoneidx,
- nodemask,
- alloc_flags, preferred_zone,
- classzone_idx, migratetype,
- &did_some_progress);
+ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
+ &did_some_progress);
if (page)
goto got_pg;
- /*
- * If we failed to make any progress reclaiming, then we are
- * running out of options and have to consider going OOM
- */
- if (!did_some_progress) {
- if (oom_gfp_allowed(gfp_mask)) {
- if (oom_killer_disabled)
- goto nopage;
- /* Coredumps can quickly deplete all memory reserves */
- if ((current->flags & PF_DUMPCORE) &&
- !(gfp_mask & __GFP_NOFAIL))
- goto nopage;
- page = __alloc_pages_may_oom(gfp_mask, order,
- zonelist, high_zoneidx,
- nodemask, preferred_zone,
- classzone_idx, migratetype);
- if (page)
- goto got_pg;
-
- if (!(gfp_mask & __GFP_NOFAIL)) {
- /*
- * The oom killer is not called for high-order
- * allocations that may fail, so if no progress
- * is being made, there are no other options and
- * retrying is unlikely to help.
- */
- if (order > PAGE_ALLOC_COSTLY_ORDER)
- goto nopage;
- /*
- * The oom killer is not called for lowmem
- * allocations to prevent needlessly killing
- * innocent tasks.
- */
- if (high_zoneidx < ZONE_NORMAL)
- goto nopage;
- }
-
- goto restart;
- }
- }
-
/* Check if we should retry the allocation */
pages_reclaimed += did_some_progress;
if (should_alloc_retry(gfp_mask, order, did_some_progress,
pages_reclaimed)) {
+ /*
+ * If we fail to make progress by freeing individual
+ * pages, but the allocation wants us to keep going,
+ * start OOM killing tasks.
+ */
+ if (!did_some_progress) {
+ page = __alloc_pages_may_oom(gfp_mask, order, ac,
+ &did_some_progress);
+ if (page)
+ goto got_pg;
+ if (!did_some_progress)
+ goto nopage;
+ }
/* Wait for some write requests to complete then retry */
- wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
- goto rebalance;
+ wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
+ goto retry;
} else {
/*
* High-order allocations do not necessarily loop after
* direct reclaim and reclaim/compaction depends on compaction
* being called after reclaim so call directly if necessary
*/
- page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
- high_zoneidx, nodemask, alloc_flags,
- preferred_zone,
- classzone_idx, migratetype,
- migration_mode, &contended_compaction,
+ page = __alloc_pages_direct_compact(gfp_mask, order,
+ alloc_flags, ac, migration_mode,
+ &contended_compaction,
&deferred_compaction);
if (page)
goto got_pg;
@@ -2854,11 +2782,7 @@ rebalance:
nopage:
warn_alloc_failed(gfp_mask, order, NULL);
- return page;
got_pg:
- if (kmemcheck_enabled)
- kmemcheck_pagealloc_alloc(page, order, gfp_mask);
-
return page;
}
@@ -2869,14 +2793,16 @@ struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
{
- enum zone_type high_zoneidx = gfp_zone(gfp_mask);
- struct zone *preferred_zone;
struct zoneref *preferred_zoneref;
struct page *page = NULL;
- int migratetype = gfpflags_to_migratetype(gfp_mask);
unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
- int classzone_idx;
+ gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
+ struct alloc_context ac = {
+ .high_zoneidx = gfp_zone(gfp_mask),
+ .nodemask = nodemask,
+ .migratetype = gfpflags_to_migratetype(gfp_mask),
+ };
gfp_mask &= gfp_allowed_mask;
@@ -2895,37 +2821,40 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
- if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE)
+ if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
+ /* We set it here, as __alloc_pages_slowpath might have changed it */
+ ac.zonelist = zonelist;
/* The preferred zone is used for statistics later */
- preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
- nodemask ? : &cpuset_current_mems_allowed,
- &preferred_zone);
- if (!preferred_zone)
+ preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
+ ac.nodemask ? : &cpuset_current_mems_allowed,
+ &ac.preferred_zone);
+ if (!ac.preferred_zone)
goto out;
- classzone_idx = zonelist_zone_idx(preferred_zoneref);
+ ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
/* First allocation attempt */
- page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
- zonelist, high_zoneidx, alloc_flags,
- preferred_zone, classzone_idx, migratetype);
+ alloc_mask = gfp_mask|__GFP_HARDWALL;
+ page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
if (unlikely(!page)) {
/*
* Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not
* complete.
*/
- gfp_mask = memalloc_noio_flags(gfp_mask);
- page = __alloc_pages_slowpath(gfp_mask, order,
- zonelist, high_zoneidx, nodemask,
- preferred_zone, classzone_idx, migratetype);
+ alloc_mask = memalloc_noio_flags(gfp_mask);
+
+ page = __alloc_pages_slowpath(alloc_mask, order, &ac);
}
- trace_mm_page_alloc(page, order, gfp_mask, migratetype);
+ if (kmemcheck_enabled && page)
+ kmemcheck_pagealloc_alloc(page, order, gfp_mask);
+
+ trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
out:
/*
@@ -3945,18 +3874,29 @@ static int __build_all_zonelists(void *data)
return 0;
}
+static noinline void __init
+build_all_zonelists_init(void)
+{
+ __build_all_zonelists(NULL);
+ mminit_verify_zonelist();
+ cpuset_init_current_mems_allowed();
+}
+
/*
* Called with zonelists_mutex held always
* unless system_state == SYSTEM_BOOTING.
+ *
+ * __ref due to (1) call of __meminit annotated setup_zone_pageset
+ * [we're only called with non-NULL zone through __meminit paths] and
+ * (2) call of __init annotated helper build_all_zonelists_init
+ * [protected by SYSTEM_BOOTING].
*/
void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
{
set_zonelist_order();
if (system_state == SYSTEM_BOOTING) {
- __build_all_zonelists(NULL);
- mminit_verify_zonelist();
- cpuset_init_current_mems_allowed();
+ build_all_zonelists_init();
} else {
#ifdef CONFIG_MEMORY_HOTPLUG
if (zone)
@@ -5059,8 +4999,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
pgdat->node_start_pfn = node_start_pfn;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
- printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
- (u64) start_pfn << PAGE_SHIFT, (u64) (end_pfn << PAGE_SHIFT) - 1);
+ pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
+ (u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1);
#endif
calculate_node_totalpages(pgdat, start_pfn, end_pfn,
zones_size, zholes_size);
@@ -5432,9 +5372,10 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
arch_zone_highest_possible_pfn[i])
pr_cont("empty\n");
else
- pr_cont("[mem %0#10lx-%0#10lx]\n",
- arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
- (arch_zone_highest_possible_pfn[i]
+ pr_cont("[mem %#018Lx-%#018Lx]\n",
+ (u64)arch_zone_lowest_possible_pfn[i]
+ << PAGE_SHIFT,
+ ((u64)arch_zone_highest_possible_pfn[i]
<< PAGE_SHIFT) - 1);
}
@@ -5442,15 +5383,16 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
pr_info("Movable zone start for each node\n");
for (i = 0; i < MAX_NUMNODES; i++) {
if (zone_movable_pfn[i])
- pr_info(" Node %d: %#010lx\n", i,
- zone_movable_pfn[i] << PAGE_SHIFT);
+ pr_info(" Node %d: %#018Lx\n", i,
+ (u64)zone_movable_pfn[i] << PAGE_SHIFT);
}
/* Print out the early node map */
pr_info("Early memory node ranges\n");
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
- pr_info(" node %3d: [mem %#010lx-%#010lx]\n", nid,
- start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
+ (u64)start_pfn << PAGE_SHIFT,
+ ((u64)end_pfn << PAGE_SHIFT) - 1);
/* Initialise every node */
mminit_verify_pageflags_layout();
diff --git a/mm/page_counter.c b/mm/page_counter.c
index a009574fbba9..11b4beda14ba 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -166,18 +166,19 @@ int page_counter_limit(struct page_counter *counter, unsigned long limit)
/**
* page_counter_memparse - memparse() for page counter limits
* @buf: string to parse
+ * @max: string meaning maximum possible value
* @nr_pages: returns the result in number of pages
*
* Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
* limited to %PAGE_COUNTER_MAX.
*/
-int page_counter_memparse(const char *buf, unsigned long *nr_pages)
+int page_counter_memparse(const char *buf, const char *max,
+ unsigned long *nr_pages)
{
- char unlimited[] = "-1";
char *end;
u64 bytes;
- if (!strncmp(buf, unlimited, sizeof(unlimited))) {
+ if (!strcmp(buf, max)) {
*nr_pages = PAGE_COUNTER_MAX;
return 0;
}
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 9ab4a9b5bc09..0993f5f36b01 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -59,20 +59,19 @@ void __reset_page_owner(struct page *page, unsigned int order)
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
{
- struct page_ext *page_ext;
- struct stack_trace *trace;
-
- page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = lookup_page_ext(page);
+ struct stack_trace trace = {
+ .nr_entries = 0,
+ .max_entries = ARRAY_SIZE(page_ext->trace_entries),
+ .entries = &page_ext->trace_entries[0],
+ .skip = 3,
+ };
- trace = &page_ext->trace;
- trace->nr_entries = 0;
- trace->max_entries = ARRAY_SIZE(page_ext->trace_entries);
- trace->entries = &page_ext->trace_entries[0];
- trace->skip = 3;
- save_stack_trace(&page_ext->trace);
+ save_stack_trace(&trace);
page_ext->order = order;
page_ext->gfp_mask = gfp_mask;
+ page_ext->nr_entries = trace.nr_entries;
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
@@ -84,6 +83,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
int ret;
int pageblock_mt, page_mt;
char *kbuf;
+ struct stack_trace trace = {
+ .nr_entries = page_ext->nr_entries,
+ .entries = &page_ext->trace_entries[0],
+ };
kbuf = kmalloc(count, GFP_KERNEL);
if (!kbuf)
@@ -121,8 +124,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
if (ret >= count)
goto err;
- ret += snprint_stack_trace(kbuf + ret, count - ret,
- &page_ext->trace, 0);
+ ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
if (ret >= count)
goto err;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index ad83195521f2..75c1f2878519 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -35,7 +35,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
do {
again:
next = pmd_addr_end(addr, end);
- if (pmd_none(*pmd)) {
+ if (pmd_none(*pmd) || !walk->vma) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
@@ -59,7 +59,7 @@ again:
continue;
split_huge_page_pmd_mm(walk->mm, addr, pmd);
- if (pmd_none_or_trans_huge_or_clear_bad(pmd))
+ if (pmd_trans_unstable(pmd))
goto again;
err = walk_pte_range(pmd, addr, next, walk);
if (err)
@@ -86,9 +86,7 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
break;
continue;
}
- if (walk->pud_entry)
- err = walk->pud_entry(pud, addr, next, walk);
- if (!err && (walk->pmd_entry || walk->pte_entry))
+ if (walk->pmd_entry || walk->pte_entry)
err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
@@ -97,6 +95,32 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
return err;
}
+static int walk_pgd_range(unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ pgd_t *pgd;
+ unsigned long next;
+ int err = 0;
+
+ pgd = pgd_offset(walk->mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd)) {
+ if (walk->pte_hole)
+ err = walk->pte_hole(addr, next, walk);
+ if (err)
+ break;
+ continue;
+ }
+ if (walk->pmd_entry || walk->pte_entry)
+ err = walk_pud_range(pgd, addr, next, walk);
+ if (err)
+ break;
+ } while (pgd++, addr = next, addr != end);
+
+ return err;
+}
+
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
unsigned long end)
@@ -105,10 +129,10 @@ static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
return boundary < end ? boundary : end;
}
-static int walk_hugetlb_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
+static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
+ struct vm_area_struct *vma = walk->vma;
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
@@ -121,15 +145,14 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
if (pte && walk->hugetlb_entry)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
if (err)
- return err;
+ break;
} while (addr = next, addr != end);
- return 0;
+ return err;
}
#else /* CONFIG_HUGETLB_PAGE */
-static int walk_hugetlb_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
+static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
return 0;
@@ -137,112 +160,138 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
#endif /* CONFIG_HUGETLB_PAGE */
+/*
+ * Decide whether we really walk over the current vma on [@start, @end)
+ * or skip it via the returned value. Return 0 if we do walk over the
+ * current vma, and return 1 if we skip the vma. Negative values means
+ * error, where we abort the current walk.
+ */
+static int walk_page_test(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct vm_area_struct *vma = walk->vma;
+
+ if (walk->test_walk)
+ return walk->test_walk(start, end, walk);
+
+ /*
+ * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
+ * range, so we don't walk over it as we do for normal vmas. However,
+ * Some callers are interested in handling hole range and they don't
+ * want to just ignore any single address range. Such users certainly
+ * define their ->pte_hole() callbacks, so let's delegate them to handle
+ * vma(VM_PFNMAP).
+ */
+ if (vma->vm_flags & VM_PFNMAP) {
+ int err = 1;
+ if (walk->pte_hole)
+ err = walk->pte_hole(start, end, walk);
+ return err ? err : 1;
+ }
+ return 0;
+}
+
+static int __walk_page_range(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ int err = 0;
+ struct vm_area_struct *vma = walk->vma;
+
+ if (vma && is_vm_hugetlb_page(vma)) {
+ if (walk->hugetlb_entry)
+ err = walk_hugetlb_range(start, end, walk);
+ } else
+ err = walk_pgd_range(start, end, walk);
+ return err;
+}
/**
- * walk_page_range - walk a memory map's page tables with a callback
- * @addr: starting address
- * @end: ending address
- * @walk: set of callbacks to invoke for each level of the tree
+ * walk_page_range - walk page table with caller specific callbacks
*
- * Recursively walk the page table for the memory area in a VMA,
- * calling supplied callbacks. Callbacks are called in-order (first
- * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
- * etc.). If lower-level callbacks are omitted, walking depth is reduced.
+ * Recursively walk the page table tree of the process represented by @walk->mm
+ * within the virtual address range [@start, @end). During walking, we can do
+ * some caller-specific works for each entry, by setting up pmd_entry(),
+ * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
+ * callbacks, the associated entries/pages are just ignored.
+ * The return values of these callbacks are commonly defined like below:
+ * - 0 : succeeded to handle the current entry, and if you don't reach the
+ * end address yet, continue to walk.
+ * - >0 : succeeded to handle the current entry, and return to the caller
+ * with caller specific value.
+ * - <0 : failed to handle the current entry, and return to the caller
+ * with error code.
*
- * Each callback receives an entry pointer and the start and end of the
- * associated range, and a copy of the original mm_walk for access to
- * the ->private or ->mm fields.
+ * Before starting to walk page table, some callers want to check whether
+ * they really want to walk over the current vma, typically by checking
+ * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
+ * purpose.
*
- * Usually no locks are taken, but splitting transparent huge page may
- * take page table lock. And the bottom level iterator will map PTE
- * directories from highmem if necessary.
+ * struct mm_walk keeps current values of some common data like vma and pmd,
+ * which are useful for the access from callbacks. If you want to pass some
+ * caller-specific data to callbacks, @walk->private should be helpful.
*
- * If any callback returns a non-zero value, the walk is aborted and
- * the return value is propagated back to the caller. Otherwise 0 is returned.
- *
- * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry
- * is !NULL.
+ * Locking:
+ * Callers of walk_page_range() and walk_page_vma() should hold
+ * @walk->mm->mmap_sem, because these function traverse vma list and/or
+ * access to vma's data.
*/
-int walk_page_range(unsigned long addr, unsigned long end,
+int walk_page_range(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
- pgd_t *pgd;
- unsigned long next;
int err = 0;
+ unsigned long next;
+ struct vm_area_struct *vma;
- if (addr >= end)
- return err;
+ if (start >= end)
+ return -EINVAL;
if (!walk->mm)
return -EINVAL;
VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
- pgd = pgd_offset(walk->mm, addr);
+ vma = find_vma(walk->mm, start);
do {
- struct vm_area_struct *vma = NULL;
-
- next = pgd_addr_end(addr, end);
+ if (!vma) { /* after the last vma */
+ walk->vma = NULL;
+ next = end;
+ } else if (start < vma->vm_start) { /* outside vma */
+ walk->vma = NULL;
+ next = min(end, vma->vm_start);
+ } else { /* inside vma */
+ walk->vma = vma;
+ next = min(end, vma->vm_end);
+ vma = vma->vm_next;
- /*
- * This function was not intended to be vma based.
- * But there are vma special cases to be handled:
- * - hugetlb vma's
- * - VM_PFNMAP vma's
- */
- vma = find_vma(walk->mm, addr);
- if (vma) {
- /*
- * There are no page structures backing a VM_PFNMAP
- * range, so do not allow split_huge_page_pmd().
- */
- if ((vma->vm_start <= addr) &&
- (vma->vm_flags & VM_PFNMAP)) {
- next = vma->vm_end;
- pgd = pgd_offset(walk->mm, next);
- continue;
- }
- /*
- * Handle hugetlb vma individually because pagetable
- * walk for the hugetlb page is dependent on the
- * architecture and we can't handled it in the same
- * manner as non-huge pages.
- */
- if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
- is_vm_hugetlb_page(vma)) {
- if (vma->vm_end < next)
- next = vma->vm_end;
- /*
- * Hugepage is very tightly coupled with vma,
- * so walk through hugetlb entries within a
- * given vma.
- */
- err = walk_hugetlb_range(vma, addr, next, walk);
- if (err)
- break;
- pgd = pgd_offset(walk->mm, next);
+ err = walk_page_test(start, next, walk);
+ if (err > 0)
continue;
- }
- }
-
- if (pgd_none_or_clear_bad(pgd)) {
- if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
- if (err)
+ if (err < 0)
break;
- pgd++;
- continue;
}
- if (walk->pgd_entry)
- err = walk->pgd_entry(pgd, addr, next, walk);
- if (!err &&
- (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
- err = walk_pud_range(pgd, addr, next, walk);
+ if (walk->vma || walk->pte_hole)
+ err = __walk_page_range(start, next, walk);
if (err)
break;
- pgd++;
- } while (addr = next, addr < end);
-
+ } while (start = next, start < end);
return err;
}
+
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
+{
+ int err;
+
+ if (!walk->mm)
+ return -EINVAL;
+
+ VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+ VM_BUG_ON(!vma);
+ walk->vma = vma;
+ err = walk_page_test(vma->vm_start, vma->vm_end, walk);
+ if (err > 0)
+ return 0;
+ if (err < 0)
+ return err;
+ return __walk_page_range(vma->vm_start, vma->vm_end, walk);
+}
diff --git a/mm/percpu.c b/mm/percpu.c
index d39e2f4e335c..73c97a5f4495 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1528,7 +1528,6 @@ static void pcpu_dump_alloc_info(const char *lvl,
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr)
{
- static char cpus_buf[4096] __initdata;
static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
size_t dyn_size = ai->dyn_size;
@@ -1541,12 +1540,11 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
int *unit_map;
int group, unit, i;
- cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
-
#define PCPU_SETUP_BUG_ON(cond) do { \
if (unlikely(cond)) { \
pr_emerg("PERCPU: failed to initialize, %s", #cond); \
- pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
+ pr_emerg("PERCPU: cpu_possible_mask=%*pb\n", \
+ cpumask_pr_args(cpu_possible_mask)); \
pcpu_dump_alloc_info(KERN_EMERG, ai); \
BUG(); \
} \
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index dfb79e028ecb..c25f94b33811 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -193,8 +193,6 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_t entry = *pmdp;
- if (pmd_numa(entry))
- entry = pmd_mknonnuma(entry);
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 5077afcd9e11..b1597690530c 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -99,11 +99,8 @@ static int process_vm_rw_single_vec(unsigned long addr,
size_t bytes;
/* Get the pages we're interested in */
- down_read(&mm->mmap_sem);
- pages = get_user_pages(task, mm, pa, pages,
- vm_write, 0, process_pages, NULL);
- up_read(&mm->mmap_sem);
-
+ pages = get_user_pages_unlocked(task, mm, pa, pages,
+ vm_write, 0, process_pages);
if (pages <= 0)
return -EFAULT;
diff --git a/mm/readahead.c b/mm/readahead.c
index 17b9172ec37f..935675844b2e 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -27,7 +27,7 @@
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
- ra->ra_pages = mapping->backing_dev_info->ra_pages;
+ ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
ra->prev_pos = -1;
}
EXPORT_SYMBOL_GPL(file_ra_state_init);
@@ -541,7 +541,7 @@ page_cache_async_readahead(struct address_space *mapping,
/*
* Defer asynchronous read-ahead on IO congestion.
*/
- if (bdi_read_congested(mapping->backing_dev_info))
+ if (bdi_read_congested(inode_to_bdi(mapping->host)))
return;
/* do read-ahead */
diff --git a/mm/rmap.c b/mm/rmap.c
index 71cd5bd0c17d..5e3e09081164 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -590,9 +590,8 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
if (!vma->anon_vma || !page__anon_vma ||
vma->anon_vma->root != page__anon_vma->root)
return -EFAULT;
- } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
- if (!vma->vm_file ||
- vma->vm_file->f_mapping != page->mapping)
+ } else if (page->mapping) {
+ if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
return -EFAULT;
} else
return -EFAULT;
@@ -1086,24 +1085,20 @@ void page_add_new_anon_rmap(struct page *page,
void page_add_file_rmap(struct page *page)
{
struct mem_cgroup *memcg;
- unsigned long flags;
- bool locked;
- memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+ memcg = mem_cgroup_begin_page_stat(page);
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
}
- mem_cgroup_end_page_stat(memcg, &locked, &flags);
+ mem_cgroup_end_page_stat(memcg);
}
static void page_remove_file_rmap(struct page *page)
{
struct mem_cgroup *memcg;
- unsigned long flags;
- bool locked;
- memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+ memcg = mem_cgroup_begin_page_stat(page);
/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
@@ -1124,7 +1119,7 @@ static void page_remove_file_rmap(struct page *page)
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
out:
- mem_cgroup_end_page_stat(memcg, &locked, &flags);
+ mem_cgroup_end_page_stat(memcg);
}
/**
@@ -1274,7 +1269,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, address, pte, swp_pte);
- BUG_ON(pte_file(*pte));
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
(flags & TTU_MIGRATION)) {
/* Establish migration entry for a file page */
@@ -1316,211 +1310,6 @@ out_mlock:
return ret;
}
-/*
- * objrmap doesn't work for nonlinear VMAs because the assumption that
- * offset-into-file correlates with offset-into-virtual-addresses does not hold.
- * Consequently, given a particular page and its ->index, we cannot locate the
- * ptes which are mapping that page without an exhaustive linear search.
- *
- * So what this code does is a mini "virtual scan" of each nonlinear VMA which
- * maps the file to which the target page belongs. The ->vm_private_data field
- * holds the current cursor into that scan. Successive searches will circulate
- * around the vma's virtual address space.
- *
- * So as more replacement pressure is applied to the pages in a nonlinear VMA,
- * more scanning pressure is placed against them as well. Eventually pages
- * will become fully unmapped and are eligible for eviction.
- *
- * For very sparsely populated VMAs this is a little inefficient - chances are
- * there there won't be many ptes located within the scan cluster. In this case
- * maybe we could scan further - to the end of the pte page, perhaps.
- *
- * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
- * acquire it without blocking. If vma locked, mlock the pages in the cluster,
- * rather than unmapping them. If we encounter the "check_page" that vmscan is
- * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
- */
-#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
-#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
-
-static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
- struct vm_area_struct *vma, struct page *check_page)
-{
- struct mm_struct *mm = vma->vm_mm;
- pmd_t *pmd;
- pte_t *pte;
- pte_t pteval;
- spinlock_t *ptl;
- struct page *page;
- unsigned long address;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
- unsigned long end;
- int ret = SWAP_AGAIN;
- int locked_vma = 0;
-
- address = (vma->vm_start + cursor) & CLUSTER_MASK;
- end = address + CLUSTER_SIZE;
- if (address < vma->vm_start)
- address = vma->vm_start;
- if (end > vma->vm_end)
- end = vma->vm_end;
-
- pmd = mm_find_pmd(mm, address);
- if (!pmd)
- return ret;
-
- mmun_start = address;
- mmun_end = end;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-
- /*
- * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
- * keep the sem while scanning the cluster for mlocking pages.
- */
- if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
- locked_vma = (vma->vm_flags & VM_LOCKED);
- if (!locked_vma)
- up_read(&vma->vm_mm->mmap_sem); /* don't need it */
- }
-
- pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-
- /* Update high watermark before we lower rss */
- update_hiwater_rss(mm);
-
- for (; address < end; pte++, address += PAGE_SIZE) {
- if (!pte_present(*pte))
- continue;
- page = vm_normal_page(vma, address, *pte);
- BUG_ON(!page || PageAnon(page));
-
- if (locked_vma) {
- if (page == check_page) {
- /* we know we have check_page locked */
- mlock_vma_page(page);
- ret = SWAP_MLOCK;
- } else if (trylock_page(page)) {
- /*
- * If we can lock the page, perform mlock.
- * Otherwise leave the page alone, it will be
- * eventually encountered again later.
- */
- mlock_vma_page(page);
- unlock_page(page);
- }
- continue; /* don't unmap */
- }
-
- /*
- * No need for _notify because we're within an
- * mmu_notifier_invalidate_range_ {start|end} scope.
- */
- if (ptep_clear_flush_young(vma, address, pte))
- continue;
-
- /* Nuke the page table entry. */
- flush_cache_page(vma, address, pte_pfn(*pte));
- pteval = ptep_clear_flush_notify(vma, address, pte);
-
- /* If nonlinear, store the file page offset in the pte. */
- if (page->index != linear_page_index(vma, address)) {
- pte_t ptfile = pgoff_to_pte(page->index);
- if (pte_soft_dirty(pteval))
- ptfile = pte_file_mksoft_dirty(ptfile);
- set_pte_at(mm, address, pte, ptfile);
- }
-
- /* Move the dirty bit to the physical page now the pte is gone. */
- if (pte_dirty(pteval))
- set_page_dirty(page);
-
- page_remove_rmap(page);
- page_cache_release(page);
- dec_mm_counter(mm, MM_FILEPAGES);
- (*mapcount)--;
- }
- pte_unmap_unlock(pte - 1, ptl);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- if (locked_vma)
- up_read(&vma->vm_mm->mmap_sem);
- return ret;
-}
-
-static int try_to_unmap_nonlinear(struct page *page,
- struct address_space *mapping, void *arg)
-{
- struct vm_area_struct *vma;
- int ret = SWAP_AGAIN;
- unsigned long cursor;
- unsigned long max_nl_cursor = 0;
- unsigned long max_nl_size = 0;
- unsigned int mapcount;
-
- list_for_each_entry(vma,
- &mapping->i_mmap_nonlinear, shared.nonlinear) {
-
- cursor = (unsigned long) vma->vm_private_data;
- if (cursor > max_nl_cursor)
- max_nl_cursor = cursor;
- cursor = vma->vm_end - vma->vm_start;
- if (cursor > max_nl_size)
- max_nl_size = cursor;
- }
-
- if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
- return SWAP_FAIL;
- }
-
- /*
- * We don't try to search for this page in the nonlinear vmas,
- * and page_referenced wouldn't have found it anyway. Instead
- * just walk the nonlinear vmas trying to age and unmap some.
- * The mapcount of the page we came in with is irrelevant,
- * but even so use it as a guide to how hard we should try?
- */
- mapcount = page_mapcount(page);
- if (!mapcount)
- return ret;
-
- cond_resched();
-
- max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
- if (max_nl_cursor == 0)
- max_nl_cursor = CLUSTER_SIZE;
-
- do {
- list_for_each_entry(vma,
- &mapping->i_mmap_nonlinear, shared.nonlinear) {
-
- cursor = (unsigned long) vma->vm_private_data;
- while (cursor < max_nl_cursor &&
- cursor < vma->vm_end - vma->vm_start) {
- if (try_to_unmap_cluster(cursor, &mapcount,
- vma, page) == SWAP_MLOCK)
- ret = SWAP_MLOCK;
- cursor += CLUSTER_SIZE;
- vma->vm_private_data = (void *) cursor;
- if ((int)mapcount <= 0)
- return ret;
- }
- vma->vm_private_data = (void *) max_nl_cursor;
- }
- cond_resched();
- max_nl_cursor += CLUSTER_SIZE;
- } while (max_nl_cursor <= max_nl_size);
-
- /*
- * Don't loop forever (perhaps all the remaining pages are
- * in locked vmas). Reset cursor on all unreserved nonlinear
- * vmas, now forgetting on which ones it had fallen behind.
- */
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
- vma->vm_private_data = NULL;
-
- return ret;
-}
-
bool is_vma_temporary_stack(struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -1566,7 +1355,6 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
.done = page_not_mapped,
- .file_nonlinear = try_to_unmap_nonlinear,
.anon_lock = page_lock_anon_vma_read,
};
@@ -1612,12 +1400,6 @@ int try_to_munlock(struct page *page)
.rmap_one = try_to_unmap_one,
.arg = (void *)TTU_MUNLOCK,
.done = page_not_mapped,
- /*
- * We don't bother to try to find the munlocked page in
- * nonlinears. It's costly. Instead, later, page reclaim logic
- * may call try_to_unmap() and recover PG_mlocked lazily.
- */
- .file_nonlinear = NULL,
.anon_lock = page_lock_anon_vma_read,
};
@@ -1748,13 +1530,6 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
goto done;
}
- if (!rwc->file_nonlinear)
- goto done;
-
- if (list_empty(&mapping->i_mmap_nonlinear))
- goto done;
-
- ret = rwc->file_nonlinear(page, mapping, rwc->arg);
done:
i_mmap_unlock_read(mapping);
return ret;
diff --git a/mm/shmem.c b/mm/shmem.c
index 73ba1df7c8ba..a63031fa3e0c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -191,11 +191,6 @@ static const struct inode_operations shmem_dir_inode_operations;
static const struct inode_operations shmem_special_inode_operations;
static const struct vm_operations_struct shmem_vm_ops;
-static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
-};
-
static LIST_HEAD(shmem_swaplist);
static DEFINE_MUTEX(shmem_swaplist_mutex);
@@ -765,11 +760,11 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
goto redirty;
/*
- * shmem_backing_dev_info's capabilities prevent regular writeback or
- * sync from ever calling shmem_writepage; but a stacking filesystem
- * might use ->writepage of its underlying filesystem, in which case
- * tmpfs should write out to swap only in response to memory pressure,
- * and not for the writeback threads or sync.
+ * Our capabilities prevent regular writeback or sync from ever calling
+ * shmem_writepage; but a stacking filesystem might use ->writepage of
+ * its underlying filesystem, in which case tmpfs should write out to
+ * swap only in response to memory pressure, and not for the writeback
+ * threads or sync.
*/
if (!wbc->for_reclaim) {
WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
@@ -1013,7 +1008,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
*/
oldpage = newpage;
} else {
- mem_cgroup_migrate(oldpage, newpage, false);
+ mem_cgroup_migrate(oldpage, newpage, true);
lru_cache_add_anon(newpage);
*pagep = newpage;
}
@@ -1131,7 +1126,7 @@ repeat:
* truncated or holepunched since swap was confirmed.
* shmem_undo_range() will have done some of the
* unaccounting, now delete_from_swap_cache() will do
- * the rest (including mem_cgroup_uncharge_swapcache).
+ * the rest.
* Reset swap.val? No, leave it so "failed" goes back to
* "repeat": reading a hole and writing should succeed.
*/
@@ -1415,7 +1410,6 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_blocks = 0;
- inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_generation = get_seconds();
info = SHMEM_I(inode);
@@ -1461,7 +1455,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
bool shmem_mapping(struct address_space *mapping)
{
- return mapping->backing_dev_info == &shmem_backing_dev_info;
+ return mapping->host->i_sb->s_op == &shmem_ops;
}
#ifdef CONFIG_TMPFS
@@ -3201,7 +3195,6 @@ static const struct vm_operations_struct shmem_vm_ops = {
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
- .remap_pages = generic_file_remap_pages,
};
static struct dentry *shmem_mount(struct file_system_type *fs_type,
@@ -3226,10 +3219,6 @@ int __init shmem_init(void)
if (shmem_inode_cachep)
return 0;
- error = bdi_init(&shmem_backing_dev_info);
- if (error)
- goto out4;
-
error = shmem_init_inodecache();
if (error)
goto out3;
@@ -3253,8 +3242,6 @@ out1:
out2:
shmem_destroy_inodecache();
out3:
- bdi_destroy(&shmem_backing_dev_info);
-out4:
shm_mnt = ERR_PTR(error);
return error;
}
diff --git a/mm/slab.c b/mm/slab.c
index 65b5dcb6f671..c4b89eaf4c96 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2382,7 +2382,7 @@ out:
return nr_freed;
}
-int __kmem_cache_shrink(struct kmem_cache *cachep)
+int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
{
int ret = 0;
int node;
@@ -2404,7 +2404,7 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
int i;
struct kmem_cache_node *n;
- int rc = __kmem_cache_shrink(cachep);
+ int rc = __kmem_cache_shrink(cachep, false);
if (rc)
return rc;
@@ -3708,8 +3708,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp)
{
int ret;
- struct kmem_cache *c = NULL;
- int i = 0;
+ struct kmem_cache *c;
ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
@@ -3719,12 +3718,10 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
if ((ret < 0) || !is_root_cache(cachep))
return ret;
- VM_BUG_ON(!mutex_is_locked(&slab_mutex));
- for_each_memcg_cache_index(i) {
- c = cache_from_memcg_idx(cachep, i);
- if (c)
- /* return value determined by the parent cache only */
- __do_tune_cpucache(c, limit, batchcount, shared, gfp);
+ lockdep_assert_held(&slab_mutex);
+ for_each_memcg_cache(c, cachep) {
+ /* return value determined by the root cache only */
+ __do_tune_cpucache(c, limit, batchcount, shared, gfp);
}
return ret;
diff --git a/mm/slab.h b/mm/slab.h
index 1cf4005482dd..4c3ac12dd644 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -86,8 +86,6 @@ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
extern void create_boot_cache(struct kmem_cache *, const char *name,
size_t size, unsigned long flags);
-struct mem_cgroup;
-
int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(size_t size, size_t align,
unsigned long flags, const char *name, void (*ctor)(void *));
@@ -140,7 +138,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
int __kmem_cache_shutdown(struct kmem_cache *);
-int __kmem_cache_shrink(struct kmem_cache *);
+int __kmem_cache_shrink(struct kmem_cache *, bool);
void slab_kmem_cache_release(struct kmem_cache *);
struct seq_file;
@@ -165,16 +163,27 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos);
#ifdef CONFIG_MEMCG_KMEM
+/*
+ * Iterate over all memcg caches of the given root cache. The caller must hold
+ * slab_mutex.
+ */
+#define for_each_memcg_cache(iter, root) \
+ list_for_each_entry(iter, &(root)->memcg_params.list, \
+ memcg_params.list)
+
+#define for_each_memcg_cache_safe(iter, tmp, root) \
+ list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \
+ memcg_params.list)
+
static inline bool is_root_cache(struct kmem_cache *s)
{
- return !s->memcg_params || s->memcg_params->is_root_cache;
+ return s->memcg_params.is_root_cache;
}
static inline bool slab_equal_or_root(struct kmem_cache *s,
- struct kmem_cache *p)
+ struct kmem_cache *p)
{
- return (p == s) ||
- (s->memcg_params && (p == s->memcg_params->root_cache));
+ return p == s || p == s->memcg_params.root_cache;
}
/*
@@ -185,37 +194,30 @@ static inline bool slab_equal_or_root(struct kmem_cache *s,
static inline const char *cache_name(struct kmem_cache *s)
{
if (!is_root_cache(s))
- return s->memcg_params->root_cache->name;
+ s = s->memcg_params.root_cache;
return s->name;
}
/*
* Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
- * That said the caller must assure the memcg's cache won't go away. Since once
- * created a memcg's cache is destroyed only along with the root cache, it is
- * true if we are going to allocate from the cache or hold a reference to the
- * root cache by other means. Otherwise, we should hold either the slab_mutex
- * or the memcg's slab_caches_mutex while calling this function and accessing
- * the returned value.
+ * That said the caller must assure the memcg's cache won't go away by either
+ * taking a css reference to the owner cgroup, or holding the slab_mutex.
*/
static inline struct kmem_cache *
cache_from_memcg_idx(struct kmem_cache *s, int idx)
{
struct kmem_cache *cachep;
- struct memcg_cache_params *params;
-
- if (!s->memcg_params)
- return NULL;
+ struct memcg_cache_array *arr;
rcu_read_lock();
- params = rcu_dereference(s->memcg_params);
+ arr = rcu_dereference(s->memcg_params.memcg_caches);
/*
* Make sure we will access the up-to-date value. The code updating
* memcg_caches issues a write barrier to match this (see
- * memcg_register_cache()).
+ * memcg_create_kmem_cache()).
*/
- cachep = lockless_dereference(params->memcg_caches[idx]);
+ cachep = lockless_dereference(arr->entries[idx]);
rcu_read_unlock();
return cachep;
@@ -225,7 +227,7 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
if (is_root_cache(s))
return s;
- return s->memcg_params->root_cache;
+ return s->memcg_params.root_cache;
}
static __always_inline int memcg_charge_slab(struct kmem_cache *s,
@@ -235,7 +237,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
return 0;
if (is_root_cache(s))
return 0;
- return __memcg_charge_slab(s, gfp, order);
+ return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order);
}
static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
@@ -244,9 +246,18 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
return;
if (is_root_cache(s))
return;
- __memcg_uncharge_slab(s, order);
+ memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order);
}
-#else
+
+extern void slab_init_memcg_params(struct kmem_cache *);
+
+#else /* !CONFIG_MEMCG_KMEM */
+
+#define for_each_memcg_cache(iter, root) \
+ for ((void)(iter), (void)(root); 0; )
+#define for_each_memcg_cache_safe(iter, tmp, root) \
+ for ((void)(iter), (void)(tmp), (void)(root); 0; )
+
static inline bool is_root_cache(struct kmem_cache *s)
{
return true;
@@ -282,7 +293,11 @@ static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
{
}
-#endif
+
+static inline void slab_init_memcg_params(struct kmem_cache *s)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
diff --git a/mm/slab_common.c b/mm/slab_common.c
index e03dd6f2a272..999bb3424d44 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -106,62 +106,67 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
#endif
#ifdef CONFIG_MEMCG_KMEM
-static int memcg_alloc_cache_params(struct mem_cgroup *memcg,
- struct kmem_cache *s, struct kmem_cache *root_cache)
+void slab_init_memcg_params(struct kmem_cache *s)
{
- size_t size;
+ s->memcg_params.is_root_cache = true;
+ INIT_LIST_HEAD(&s->memcg_params.list);
+ RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
+}
+
+static int init_memcg_params(struct kmem_cache *s,
+ struct mem_cgroup *memcg, struct kmem_cache *root_cache)
+{
+ struct memcg_cache_array *arr;
- if (!memcg_kmem_enabled())
+ if (memcg) {
+ s->memcg_params.is_root_cache = false;
+ s->memcg_params.memcg = memcg;
+ s->memcg_params.root_cache = root_cache;
return 0;
+ }
- if (!memcg) {
- size = offsetof(struct memcg_cache_params, memcg_caches);
- size += memcg_limited_groups_array_size * sizeof(void *);
- } else
- size = sizeof(struct memcg_cache_params);
+ slab_init_memcg_params(s);
- s->memcg_params = kzalloc(size, GFP_KERNEL);
- if (!s->memcg_params)
- return -ENOMEM;
+ if (!memcg_nr_cache_ids)
+ return 0;
- if (memcg) {
- s->memcg_params->memcg = memcg;
- s->memcg_params->root_cache = root_cache;
- } else
- s->memcg_params->is_root_cache = true;
+ arr = kzalloc(sizeof(struct memcg_cache_array) +
+ memcg_nr_cache_ids * sizeof(void *),
+ GFP_KERNEL);
+ if (!arr)
+ return -ENOMEM;
+ RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
return 0;
}
-static void memcg_free_cache_params(struct kmem_cache *s)
+static void destroy_memcg_params(struct kmem_cache *s)
{
- kfree(s->memcg_params);
+ if (is_root_cache(s))
+ kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
}
-static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs)
+static int update_memcg_params(struct kmem_cache *s, int new_array_size)
{
- int size;
- struct memcg_cache_params *new_params, *cur_params;
-
- BUG_ON(!is_root_cache(s));
+ struct memcg_cache_array *old, *new;
- size = offsetof(struct memcg_cache_params, memcg_caches);
- size += num_memcgs * sizeof(void *);
+ if (!is_root_cache(s))
+ return 0;
- new_params = kzalloc(size, GFP_KERNEL);
- if (!new_params)
+ new = kzalloc(sizeof(struct memcg_cache_array) +
+ new_array_size * sizeof(void *), GFP_KERNEL);
+ if (!new)
return -ENOMEM;
- cur_params = s->memcg_params;
- memcpy(new_params->memcg_caches, cur_params->memcg_caches,
- memcg_limited_groups_array_size * sizeof(void *));
-
- new_params->is_root_cache = true;
-
- rcu_assign_pointer(s->memcg_params, new_params);
- if (cur_params)
- kfree_rcu(cur_params, rcu_head);
+ old = rcu_dereference_protected(s->memcg_params.memcg_caches,
+ lockdep_is_held(&slab_mutex));
+ if (old)
+ memcpy(new->entries, old->entries,
+ memcg_nr_cache_ids * sizeof(void *));
+ rcu_assign_pointer(s->memcg_params.memcg_caches, new);
+ if (old)
+ kfree_rcu(old, rcu);
return 0;
}
@@ -169,34 +174,28 @@ int memcg_update_all_caches(int num_memcgs)
{
struct kmem_cache *s;
int ret = 0;
- mutex_lock(&slab_mutex);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
- if (!is_root_cache(s))
- continue;
-
- ret = memcg_update_cache_params(s, num_memcgs);
+ ret = update_memcg_params(s, num_memcgs);
/*
* Instead of freeing the memory, we'll just leave the caches
* up to this point in an updated state.
*/
if (ret)
- goto out;
+ break;
}
-
- memcg_update_array_size(num_memcgs);
-out:
mutex_unlock(&slab_mutex);
return ret;
}
#else
-static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
- struct kmem_cache *s, struct kmem_cache *root_cache)
+static inline int init_memcg_params(struct kmem_cache *s,
+ struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
return 0;
}
-static inline void memcg_free_cache_params(struct kmem_cache *s)
+static inline void destroy_memcg_params(struct kmem_cache *s)
{
}
#endif /* CONFIG_MEMCG_KMEM */
@@ -296,8 +295,8 @@ unsigned long calculate_alignment(unsigned long flags,
}
static struct kmem_cache *
-do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *),
+do_kmem_cache_create(const char *name, size_t object_size, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *),
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
struct kmem_cache *s;
@@ -314,7 +313,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
s->align = align;
s->ctor = ctor;
- err = memcg_alloc_cache_params(memcg, s, root_cache);
+ err = init_memcg_params(s, memcg, root_cache);
if (err)
goto out_free_cache;
@@ -330,8 +329,8 @@ out:
return s;
out_free_cache:
- memcg_free_cache_params(s);
- kfree(s);
+ destroy_memcg_params(s);
+ kmem_cache_free(kmem_cache, s);
goto out;
}
@@ -364,11 +363,12 @@ kmem_cache_create(const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
- char *cache_name;
+ const char *cache_name;
int err;
get_online_cpus();
get_online_mems();
+ memcg_get_cache_ids();
mutex_lock(&slab_mutex);
@@ -390,7 +390,7 @@ kmem_cache_create(const char *name, size_t size, size_t align,
if (s)
goto out_unlock;
- cache_name = kstrdup(name, GFP_KERNEL);
+ cache_name = kstrdup_const(name, GFP_KERNEL);
if (!cache_name) {
err = -ENOMEM;
goto out_unlock;
@@ -401,12 +401,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
flags, ctor, NULL, NULL);
if (IS_ERR(s)) {
err = PTR_ERR(s);
- kfree(cache_name);
+ kfree_const(cache_name);
}
out_unlock:
mutex_unlock(&slab_mutex);
+ memcg_put_cache_ids();
put_online_mems();
put_online_cpus();
@@ -425,31 +426,91 @@ out_unlock:
}
EXPORT_SYMBOL(kmem_cache_create);
+static int do_kmem_cache_shutdown(struct kmem_cache *s,
+ struct list_head *release, bool *need_rcu_barrier)
+{
+ if (__kmem_cache_shutdown(s) != 0) {
+ printk(KERN_ERR "kmem_cache_destroy %s: "
+ "Slab cache still has objects\n", s->name);
+ dump_stack();
+ return -EBUSY;
+ }
+
+ if (s->flags & SLAB_DESTROY_BY_RCU)
+ *need_rcu_barrier = true;
+
+#ifdef CONFIG_MEMCG_KMEM
+ if (!is_root_cache(s))
+ list_del(&s->memcg_params.list);
+#endif
+ list_move(&s->list, release);
+ return 0;
+}
+
+static void do_kmem_cache_release(struct list_head *release,
+ bool need_rcu_barrier)
+{
+ struct kmem_cache *s, *s2;
+
+ if (need_rcu_barrier)
+ rcu_barrier();
+
+ list_for_each_entry_safe(s, s2, release, list) {
+#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_remove(s);
+#else
+ slab_kmem_cache_release(s);
+#endif
+ }
+}
+
#ifdef CONFIG_MEMCG_KMEM
/*
* memcg_create_kmem_cache - Create a cache for a memory cgroup.
* @memcg: The memory cgroup the new cache is for.
* @root_cache: The parent of the new cache.
- * @memcg_name: The name of the memory cgroup (used for naming the new cache).
*
* This function attempts to create a kmem cache that will serve allocation
* requests going from @memcg to @root_cache. The new cache inherits properties
* from its parent.
*/
-struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
- struct kmem_cache *root_cache,
- const char *memcg_name)
+void memcg_create_kmem_cache(struct mem_cgroup *memcg,
+ struct kmem_cache *root_cache)
{
+ static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
+ struct cgroup_subsys_state *css = mem_cgroup_css(memcg);
+ struct memcg_cache_array *arr;
struct kmem_cache *s = NULL;
char *cache_name;
+ int idx;
get_online_cpus();
get_online_mems();
mutex_lock(&slab_mutex);
+ /*
+ * The memory cgroup could have been deactivated while the cache
+ * creation work was pending.
+ */
+ if (!memcg_kmem_is_active(memcg))
+ goto out_unlock;
+
+ idx = memcg_cache_id(memcg);
+ arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
+ lockdep_is_held(&slab_mutex));
+
+ /*
+ * Since per-memcg caches are created asynchronously on first
+ * allocation (see memcg_kmem_get_cache()), several threads can try to
+ * create the same cache, but only one of them may succeed.
+ */
+ if (arr->entries[idx])
+ goto out_unlock;
+
+ cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
- memcg_cache_id(memcg), memcg_name);
+ css->id, memcg_name_buf);
if (!cache_name)
goto out_unlock;
@@ -457,49 +518,108 @@ struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
root_cache->size, root_cache->align,
root_cache->flags, root_cache->ctor,
memcg, root_cache);
+ /*
+ * If we could not create a memcg cache, do not complain, because
+ * that's not critical at all as we can always proceed with the root
+ * cache.
+ */
if (IS_ERR(s)) {
kfree(cache_name);
- s = NULL;
+ goto out_unlock;
}
+ list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
+
+ /*
+ * Since readers won't lock (see cache_from_memcg_idx()), we need a
+ * barrier here to ensure nobody will see the kmem_cache partially
+ * initialized.
+ */
+ smp_wmb();
+ arr->entries[idx] = s;
+
out_unlock:
mutex_unlock(&slab_mutex);
put_online_mems();
put_online_cpus();
-
- return s;
}
-static int memcg_cleanup_cache_params(struct kmem_cache *s)
+void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
{
- int rc;
+ int idx;
+ struct memcg_cache_array *arr;
+ struct kmem_cache *s, *c;
- if (!s->memcg_params ||
- !s->memcg_params->is_root_cache)
- return 0;
+ idx = memcg_cache_id(memcg);
+
+ get_online_cpus();
+ get_online_mems();
- mutex_unlock(&slab_mutex);
- rc = __memcg_cleanup_cache_params(s);
mutex_lock(&slab_mutex);
+ list_for_each_entry(s, &slab_caches, list) {
+ if (!is_root_cache(s))
+ continue;
+
+ arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
+ lockdep_is_held(&slab_mutex));
+ c = arr->entries[idx];
+ if (!c)
+ continue;
+
+ __kmem_cache_shrink(c, true);
+ arr->entries[idx] = NULL;
+ }
+ mutex_unlock(&slab_mutex);
- return rc;
+ put_online_mems();
+ put_online_cpus();
}
-#else
-static int memcg_cleanup_cache_params(struct kmem_cache *s)
+
+void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
{
- return 0;
+ LIST_HEAD(release);
+ bool need_rcu_barrier = false;
+ struct kmem_cache *s, *s2;
+
+ get_online_cpus();
+ get_online_mems();
+
+ mutex_lock(&slab_mutex);
+ list_for_each_entry_safe(s, s2, &slab_caches, list) {
+ if (is_root_cache(s) || s->memcg_params.memcg != memcg)
+ continue;
+ /*
+ * The cgroup is about to be freed and therefore has no charges
+ * left. Hence, all its caches must be empty by now.
+ */
+ BUG_ON(do_kmem_cache_shutdown(s, &release, &need_rcu_barrier));
+ }
+ mutex_unlock(&slab_mutex);
+
+ put_online_mems();
+ put_online_cpus();
+
+ do_kmem_cache_release(&release, need_rcu_barrier);
}
#endif /* CONFIG_MEMCG_KMEM */
void slab_kmem_cache_release(struct kmem_cache *s)
{
- kfree(s->name);
+ destroy_memcg_params(s);
+ kfree_const(s->name);
kmem_cache_free(kmem_cache, s);
}
void kmem_cache_destroy(struct kmem_cache *s)
{
+ struct kmem_cache *c, *c2;
+ LIST_HEAD(release);
+ bool need_rcu_barrier = false;
+ bool busy = false;
+
+ BUG_ON(!is_root_cache(s));
+
get_online_cpus();
get_online_mems();
@@ -509,35 +629,21 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->refcount)
goto out_unlock;
- if (memcg_cleanup_cache_params(s) != 0)
- goto out_unlock;
-
- if (__kmem_cache_shutdown(s) != 0) {
- printk(KERN_ERR "kmem_cache_destroy %s: "
- "Slab cache still has objects\n", s->name);
- dump_stack();
- goto out_unlock;
+ for_each_memcg_cache_safe(c, c2, s) {
+ if (do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
+ busy = true;
}
- list_del(&s->list);
-
- mutex_unlock(&slab_mutex);
- if (s->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
-
- memcg_free_cache_params(s);
-#ifdef SLAB_SUPPORTS_SYSFS
- sysfs_slab_remove(s);
-#else
- slab_kmem_cache_release(s);
-#endif
- goto out;
+ if (!busy)
+ do_kmem_cache_shutdown(s, &release, &need_rcu_barrier);
out_unlock:
mutex_unlock(&slab_mutex);
-out:
+
put_online_mems();
put_online_cpus();
+
+ do_kmem_cache_release(&release, need_rcu_barrier);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -554,7 +660,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
get_online_cpus();
get_online_mems();
- ret = __kmem_cache_shrink(cachep);
+ ret = __kmem_cache_shrink(cachep, false);
put_online_mems();
put_online_cpus();
return ret;
@@ -576,6 +682,9 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
s->name = name;
s->size = s->object_size = size;
s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
+
+ slab_init_memcg_params(s);
+
err = __kmem_cache_create(s, flags);
if (err)
@@ -789,6 +898,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
page = alloc_kmem_pages(flags, order);
ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
+ kasan_kmalloc_large(ret, size);
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
@@ -855,16 +965,11 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
{
struct kmem_cache *c;
struct slabinfo sinfo;
- int i;
if (!is_root_cache(s))
return;
- for_each_memcg_cache_index(i) {
- c = cache_from_memcg_idx(s, i);
- if (!c)
- continue;
-
+ for_each_memcg_cache(c, s) {
memset(&sinfo, 0, sizeof(sinfo));
get_slabinfo(c, &sinfo);
@@ -916,7 +1021,7 @@ int memcg_slab_show(struct seq_file *m, void *p)
if (p == slab_caches.next)
print_slabinfo_header(m);
- if (!is_root_cache(s) && s->memcg_params->memcg == memcg)
+ if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
cache_show(s, m);
return 0;
}
@@ -973,8 +1078,10 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
if (p)
ks = ksize(p);
- if (ks >= new_size)
+ if (ks >= new_size) {
+ kasan_krealloc((void *)p, new_size);
return (void *)p;
+ }
ret = kmalloc_track_caller(new_size, flags);
if (ret && p)
diff --git a/mm/slob.c b/mm/slob.c
index 96a86206a26b..94a7fede6d48 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -618,7 +618,7 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
return 0;
}
-int __kmem_cache_shrink(struct kmem_cache *d)
+int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
{
return 0;
}
diff --git a/mm/slub.c b/mm/slub.c
index fe376fe1f4fe..6832c4eab104 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -20,6 +20,7 @@
#include <linux/proc_fs.h>
#include <linux/notifier.h>
#include <linux/seq_file.h>
+#include <linux/kasan.h>
#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
@@ -468,12 +469,30 @@ static char *slub_debug_slabs;
static int disable_higher_order_debug;
/*
+ * slub is about to manipulate internal object metadata. This memory lies
+ * outside the range of the allocated object, so accessing it would normally
+ * be reported by kasan as a bounds error. metadata_access_enable() is used
+ * to tell kasan that these accesses are OK.
+ */
+static inline void metadata_access_enable(void)
+{
+ kasan_disable_current();
+}
+
+static inline void metadata_access_disable(void)
+{
+ kasan_enable_current();
+}
+
+/*
* Object debugging
*/
static void print_section(char *text, u8 *addr, unsigned int length)
{
+ metadata_access_enable();
print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
length, 1);
+ metadata_access_disable();
}
static struct track *get_track(struct kmem_cache *s, void *object,
@@ -503,7 +522,9 @@ static void set_track(struct kmem_cache *s, void *object,
trace.max_entries = TRACK_ADDRS_COUNT;
trace.entries = p->addrs;
trace.skip = 3;
+ metadata_access_enable();
save_stack_trace(&trace);
+ metadata_access_disable();
/* See rant in lockdep.c */
if (trace.nr_entries != 0 &&
@@ -629,7 +650,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
dump_stack();
}
-static void object_err(struct kmem_cache *s, struct page *page,
+void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason)
{
slab_bug(s, "%s", reason);
@@ -677,7 +698,9 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
u8 *fault;
u8 *end;
+ metadata_access_enable();
fault = memchr_inv(start, value, bytes);
+ metadata_access_disable();
if (!fault)
return 1;
@@ -770,7 +793,9 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
if (!remainder)
return 1;
+ metadata_access_enable();
fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
+ metadata_access_disable();
if (!fault)
return 1;
while (end > fault && end[-1] == POISON_INUSE)
@@ -1226,11 +1251,13 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
+ kasan_kmalloc_large(ptr, size);
}
static inline void kfree_hook(const void *x)
{
kmemleak_free(x);
+ kasan_kfree_large(x);
}
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
@@ -1253,6 +1280,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
memcg_kmem_put_cache(s);
+ kasan_slab_alloc(s, object);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1276,6 +1304,8 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
#endif
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(x, s->object_size);
+
+ kasan_slab_free(s, x);
}
/*
@@ -1370,8 +1400,11 @@ static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
- if (unlikely(s->ctor))
+ if (unlikely(s->ctor)) {
+ kasan_unpoison_object_data(s, object);
s->ctor(object);
+ kasan_poison_object_data(s, object);
+ }
}
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1404,6 +1437,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << order);
+ kasan_poison_slab(page);
+
for_each_object_idx(p, idx, s, start, page->objects) {
setup_object(s, page, p);
if (likely(idx < page->objects))
@@ -2007,6 +2042,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
int pages;
int pobjects;
+ preempt_disable();
do {
pages = 0;
pobjects = 0;
@@ -2040,6 +2076,14 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
!= oldpage);
+ if (unlikely(!s->cpu_partial)) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
+ local_irq_restore(flags);
+ }
+ preempt_enable();
#endif
}
@@ -2398,13 +2442,24 @@ redo:
* reading from one cpu area. That does not matter as long
* as we end up on the original cpu again when doing the cmpxchg.
*
- * Preemption is disabled for the retrieval of the tid because that
- * must occur from the current processor. We cannot allow rescheduling
- * on a different processor between the determination of the pointer
- * and the retrieval of the tid.
+ * We should guarantee that tid and kmem_cache are retrieved on
+ * the same cpu. It could be different if CONFIG_PREEMPT so we need
+ * to check if it is matched or not.
*/
- preempt_disable();
- c = this_cpu_ptr(s->cpu_slab);
+ do {
+ tid = this_cpu_read(s->cpu_slab->tid);
+ c = raw_cpu_ptr(s->cpu_slab);
+ } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
+
+ /*
+ * Irqless object alloc/free algorithm used here depends on sequence
+ * of fetching cpu_slab's data. tid should be fetched before anything
+ * on c to guarantee that object and page associated with previous tid
+ * won't be used with current tid. If we fetch tid first, object and
+ * page could be one associated with next tid and our alloc/free
+ * request will be failed. In this case, we will retry. So, no problem.
+ */
+ barrier();
/*
* The transaction ids are globally unique per cpu and per operation on
@@ -2412,8 +2467,6 @@ redo:
* occurs on the right processor and that there was no operation on the
* linked list in between.
*/
- tid = c->tid;
- preempt_enable();
object = c->freelist;
page = c->page;
@@ -2479,6 +2532,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
+ kasan_kmalloc(s, ret, size);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -2505,6 +2559,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
+
+ kasan_kmalloc(s, ret, size);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -2512,7 +2568,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
/*
- * Slow patch handling. This may still be called frequently since objects
+ * Slow path handling. This may still be called frequently since objects
* have a longer lifetime than the cpu slabs in most processing loads.
*
* So we still attempt to reduce cache line usage. Just take the slab
@@ -2659,11 +2715,13 @@ redo:
* data is retrieved via this pointer. If we are on the same cpu
* during the cmpxchg then the free will succedd.
*/
- preempt_disable();
- c = this_cpu_ptr(s->cpu_slab);
+ do {
+ tid = this_cpu_read(s->cpu_slab->tid);
+ c = raw_cpu_ptr(s->cpu_slab);
+ } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
- tid = c->tid;
- preempt_enable();
+ /* Same with comment on barrier() in slab_alloc_node() */
+ barrier();
if (likely(page == c->page)) {
set_freepointer(s, object, c->freelist);
@@ -2888,6 +2946,7 @@ static void early_kmem_cache_node_alloc(int node)
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
+ kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);
@@ -3260,6 +3319,8 @@ void *__kmalloc(size_t size, gfp_t flags)
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
+ kasan_kmalloc(s, ret, size);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc);
@@ -3303,12 +3364,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
+ kasan_kmalloc(s, ret, size);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
-size_t ksize(const void *object)
+static size_t __ksize(const void *object)
{
struct page *page;
@@ -3324,6 +3387,15 @@ size_t ksize(const void *object)
return slab_ksize(page->slab_cache);
}
+
+size_t ksize(const void *object)
+{
+ size_t size = __ksize(object);
+ /* We assume that ksize callers could use whole allocated area,
+ so we need unpoison this area. */
+ kasan_krealloc(object, size);
+ return size;
+}
EXPORT_SYMBOL(ksize);
void kfree(const void *x)
@@ -3347,69 +3419,92 @@ void kfree(const void *x)
}
EXPORT_SYMBOL(kfree);
+#define SHRINK_PROMOTE_MAX 32
+
/*
- * kmem_cache_shrink removes empty slabs from the partial lists and sorts
- * the remaining slabs by the number of items in use. The slabs with the
- * most items in use come first. New allocations will then fill those up
- * and thus they can be removed from the partial lists.
+ * kmem_cache_shrink discards empty slabs and promotes the slabs filled
+ * up most to the head of the partial lists. New allocations will then
+ * fill those up and thus they can be removed from the partial lists.
*
* The slabs with the least items are placed last. This results in them
* being allocated from last increasing the chance that the last objects
* are freed in them.
*/
-int __kmem_cache_shrink(struct kmem_cache *s)
+int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
{
int node;
int i;
struct kmem_cache_node *n;
struct page *page;
struct page *t;
- int objects = oo_objects(s->max);
- struct list_head *slabs_by_inuse =
- kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
+ struct list_head discard;
+ struct list_head promote[SHRINK_PROMOTE_MAX];
unsigned long flags;
+ int ret = 0;
- if (!slabs_by_inuse)
- return -ENOMEM;
+ if (deactivate) {
+ /*
+ * Disable empty slabs caching. Used to avoid pinning offline
+ * memory cgroups by kmem pages that can be freed.
+ */
+ s->cpu_partial = 0;
+ s->min_partial = 0;
+
+ /*
+ * s->cpu_partial is checked locklessly (see put_cpu_partial),
+ * so we have to make sure the change is visible.
+ */
+ kick_all_cpus_sync();
+ }
flush_all(s);
for_each_kmem_cache_node(s, node, n) {
- if (!n->nr_partial)
- continue;
-
- for (i = 0; i < objects; i++)
- INIT_LIST_HEAD(slabs_by_inuse + i);
+ INIT_LIST_HEAD(&discard);
+ for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
+ INIT_LIST_HEAD(promote + i);
spin_lock_irqsave(&n->list_lock, flags);
/*
- * Build lists indexed by the items in use in each slab.
+ * Build lists of slabs to discard or promote.
*
* Note that concurrent frees may occur while we hold the
* list_lock. page->inuse here is the upper limit.
*/
list_for_each_entry_safe(page, t, &n->partial, lru) {
- list_move(&page->lru, slabs_by_inuse + page->inuse);
- if (!page->inuse)
+ int free = page->objects - page->inuse;
+
+ /* Do not reread page->inuse */
+ barrier();
+
+ /* We do not keep full slabs on the list */
+ BUG_ON(free <= 0);
+
+ if (free == page->objects) {
+ list_move(&page->lru, &discard);
n->nr_partial--;
+ } else if (free <= SHRINK_PROMOTE_MAX)
+ list_move(&page->lru, promote + free - 1);
}
/*
- * Rebuild the partial list with the slabs filled up most
- * first and the least used slabs at the end.
+ * Promote the slabs filled up most to the head of the
+ * partial list.
*/
- for (i = objects - 1; i > 0; i--)
- list_splice(slabs_by_inuse + i, n->partial.prev);
+ for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
+ list_splice(promote + i, &n->partial);
spin_unlock_irqrestore(&n->list_lock, flags);
/* Release empty slabs */
- list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
+ list_for_each_entry_safe(page, t, &discard, lru)
discard_slab(s, page);
+
+ if (slabs_node(s, node))
+ ret = 1;
}
- kfree(slabs_by_inuse);
- return 0;
+ return ret;
}
static int slab_mem_going_offline_callback(void *arg)
@@ -3418,7 +3513,7 @@ static int slab_mem_going_offline_callback(void *arg)
mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list)
- __kmem_cache_shrink(s);
+ __kmem_cache_shrink(s, false);
mutex_unlock(&slab_mutex);
return 0;
@@ -3566,6 +3661,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
p->slab_cache = s;
#endif
}
+ slab_init_memcg_params(s);
list_add(&s->list, &slab_caches);
return s;
}
@@ -3624,13 +3720,10 @@ struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *))
{
- struct kmem_cache *s;
+ struct kmem_cache *s, *c;
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
- int i;
- struct kmem_cache *c;
-
s->refcount++;
/*
@@ -3640,10 +3733,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
s->object_size = max(s->object_size, (int)size);
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
- for_each_memcg_cache_index(i) {
- c = cache_from_memcg_idx(s, i);
- if (!c)
- continue;
+ for_each_memcg_cache(c, s) {
c->object_size = s->object_size;
c->inuse = max_t(int, c->inuse,
ALIGN(size, sizeof(void *)));
@@ -4070,20 +4160,16 @@ static int list_locations(struct kmem_cache *s, char *buf,
if (num_online_cpus() > 1 &&
!cpumask_empty(to_cpumask(l->cpus)) &&
- len < PAGE_SIZE - 60) {
- len += sprintf(buf + len, " cpus=");
- len += cpulist_scnprintf(buf + len,
- PAGE_SIZE - len - 50,
- to_cpumask(l->cpus));
- }
+ len < PAGE_SIZE - 60)
+ len += scnprintf(buf + len, PAGE_SIZE - len - 50,
+ " cpus=%*pbl",
+ cpumask_pr_args(to_cpumask(l->cpus)));
if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
- len < PAGE_SIZE - 60) {
- len += sprintf(buf + len, " nodes=");
- len += nodelist_scnprintf(buf + len,
- PAGE_SIZE - len - 50,
- l->nodes);
- }
+ len < PAGE_SIZE - 60)
+ len += scnprintf(buf + len, PAGE_SIZE - len - 50,
+ " nodes=%*pbl",
+ nodemask_pr_args(&l->nodes));
len += sprintf(buf + len, "\n");
}
@@ -4680,12 +4766,9 @@ static ssize_t shrink_show(struct kmem_cache *s, char *buf)
static ssize_t shrink_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- if (buf[0] == '1') {
- int rc = kmem_cache_shrink(s);
-
- if (rc)
- return rc;
- } else
+ if (buf[0] == '1')
+ kmem_cache_shrink(s);
+ else
return -EINVAL;
return length;
}
@@ -4909,7 +4992,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
err = attribute->store(s, buf, len);
#ifdef CONFIG_MEMCG_KMEM
if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
- int i;
+ struct kmem_cache *c;
mutex_lock(&slab_mutex);
if (s->max_attr_size < len)
@@ -4932,11 +5015,8 @@ static ssize_t slab_attr_store(struct kobject *kobj,
* directly either failed or succeeded, in which case we loop
* through the descendants with best-effort propagation.
*/
- for_each_memcg_cache_index(i) {
- struct kmem_cache *c = cache_from_memcg_idx(s, i);
- if (c)
- attribute->store(c, buf, len);
- }
+ for_each_memcg_cache(c, s)
+ attribute->store(c, buf, len);
mutex_unlock(&slab_mutex);
}
#endif
@@ -4953,7 +5033,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
if (is_root_cache(s))
return;
- root_cache = s->memcg_params->root_cache;
+ root_cache = s->memcg_params.root_cache;
/*
* This mean this cache had no attribute written. Therefore, no point
@@ -5033,7 +5113,7 @@ static inline struct kset *cache_kset(struct kmem_cache *s)
{
#ifdef CONFIG_MEMCG_KMEM
if (!is_root_cache(s))
- return s->memcg_params->root_cache->memcg_kset;
+ return s->memcg_params.root_cache->memcg_kset;
#endif
return slab_kset;
}
diff --git a/mm/swap.c b/mm/swap.c
index 8a12b33936b4..cd3a5e64cea9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1138,12 +1138,8 @@ void __init swap_setup(void)
#ifdef CONFIG_SWAP
int i;
- if (bdi_init(swapper_spaces[0].backing_dev_info))
- panic("Failed to init swap bdi");
- for (i = 0; i < MAX_SWAPFILES; i++) {
+ for (i = 0; i < MAX_SWAPFILES; i++)
spin_lock_init(&swapper_spaces[i].tree_lock);
- INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
- }
#endif
/* Use a smaller cluster for small-memory machines */
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 9711342987a0..405923f77334 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -32,17 +32,11 @@ static const struct address_space_operations swap_aops = {
#endif
};
-static struct backing_dev_info swap_backing_dev_info = {
- .name = "swap",
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
-};
-
struct address_space swapper_spaces[MAX_SWAPFILES] = {
[0 ... MAX_SWAPFILES - 1] = {
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
.i_mmap_writable = ATOMIC_INIT(0),
.a_ops = &swap_aops,
- .backing_dev_info = &swap_backing_dev_info,
}
};
diff --git a/mm/truncate.c b/mm/truncate.c
index f1e4d6052369..ddec5a5966d7 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -112,7 +112,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size)
struct address_space *mapping = page->mapping;
if (mapping && mapping_cap_account_dirty(mapping)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
- dec_bdi_stat(mapping->backing_dev_info,
+ dec_bdi_stat(inode_to_bdi(mapping->host),
BDI_RECLAIMABLE);
if (account_size)
task_io_account_cancelled_write(account_size);
diff --git a/mm/util.c b/mm/util.c
index fec39d4509a9..3981ae9d1b15 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -12,10 +12,30 @@
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
+#include <asm/sections.h>
#include <asm/uaccess.h>
#include "internal.h"
+static inline int is_kernel_rodata(unsigned long addr)
+{
+ return addr >= (unsigned long)__start_rodata &&
+ addr < (unsigned long)__end_rodata;
+}
+
+/**
+ * kfree_const - conditionally free memory
+ * @x: pointer to the memory
+ *
+ * Function calls kfree only if @x is not in .rodata section.
+ */
+void kfree_const(const void *x)
+{
+ if (!is_kernel_rodata((unsigned long)x))
+ kfree(x);
+}
+EXPORT_SYMBOL(kfree_const);
+
/**
* kstrdup - allocate space for and copy an existing string
* @s: the string to duplicate
@@ -38,6 +58,24 @@ char *kstrdup(const char *s, gfp_t gfp)
EXPORT_SYMBOL(kstrdup);
/**
+ * kstrdup_const - conditionally duplicate an existing const string
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Function returns source string if it is in .rodata section otherwise it
+ * fallbacks to kstrdup.
+ * Strings allocated by kstrdup_const should be freed by kfree_const.
+ */
+const char *kstrdup_const(const char *s, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)s))
+ return s;
+
+ return kstrdup(s, gfp);
+}
+EXPORT_SYMBOL(kstrdup_const);
+
+/**
* kstrndup - allocate space for and copy an existing string
* @s: the string to duplicate
* @max: read at most @max chars from @s
@@ -240,14 +278,8 @@ int __weak get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
struct mm_struct *mm = current->mm;
- int ret;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start, nr_pages,
- write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
-
- return ret;
+ return get_user_pages_unlocked(current, mm, start, nr_pages,
+ write, 0, pages);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 39c338896416..35b25e1340ca 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1324,10 +1324,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
if (unlikely(!area))
return NULL;
- /*
- * We always allocate a guard page.
- */
- size += PAGE_SIZE;
+ if (!(flags & VM_NO_GUARD))
+ size += PAGE_SIZE;
va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
if (IS_ERR(va)) {
@@ -1621,6 +1619,7 @@ fail:
* @end: vm area range end
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
+ * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
* @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address
*
@@ -1630,7 +1629,8 @@ fail:
*/
void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, const void *caller)
+ pgprot_t prot, unsigned long vm_flags, int node,
+ const void *caller)
{
struct vm_struct *area;
void *addr;
@@ -1640,8 +1640,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;
- area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
- start, end, node, gfp_mask, caller);
+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
+ vm_flags, start, end, node, gfp_mask, caller);
if (!area)
goto fail;
@@ -1690,7 +1690,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
int node, const void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
- gfp_mask, prot, node, caller);
+ gfp_mask, prot, 0, node, caller);
}
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ab2505c3ef54..5e8eadd71bac 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -91,6 +91,9 @@ struct scan_control {
/* Can pages be swapped as part of reclaim? */
unsigned int may_swap:1;
+ /* Can cgroups be reclaimed below their normal consumption range? */
+ unsigned int may_thrash:1;
+
unsigned int hibernation_mode:1;
/* One of the zones is ready for compaction */
@@ -229,10 +232,10 @@ EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
-static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
- struct shrinker *shrinker,
- unsigned long nr_scanned,
- unsigned long nr_eligible)
+static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
+ struct shrinker *shrinker,
+ unsigned long nr_scanned,
+ unsigned long nr_eligible)
{
unsigned long freed = 0;
unsigned long long delta;
@@ -341,9 +344,10 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
}
/**
- * shrink_node_slabs - shrink slab caches of a given node
+ * shrink_slab - shrink slab caches
* @gfp_mask: allocation context
* @nid: node whose slab caches to target
+ * @memcg: memory cgroup whose slab caches to target
* @nr_scanned: pressure numerator
* @nr_eligible: pressure denominator
*
@@ -352,6 +356,12 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
* @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
* unaware shrinkers will receive a node id of 0 instead.
*
+ * @memcg specifies the memory cgroup to target. If it is not NULL,
+ * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
+ * objects from the memory cgroup specified. Otherwise all shrinkers
+ * are called, and memcg aware shrinkers are supposed to scan the
+ * global list then.
+ *
* @nr_scanned and @nr_eligible form a ratio that indicate how much of
* the available objects should be scanned. Page reclaim for example
* passes the number of pages scanned and the number of pages on the
@@ -362,13 +372,17 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
*
* Returns the number of reclaimed slab objects.
*/
-unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
- unsigned long nr_scanned,
- unsigned long nr_eligible)
+static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
+ struct mem_cgroup *memcg,
+ unsigned long nr_scanned,
+ unsigned long nr_eligible)
{
struct shrinker *shrinker;
unsigned long freed = 0;
+ if (memcg && !memcg_kmem_is_active(memcg))
+ return 0;
+
if (nr_scanned == 0)
nr_scanned = SWAP_CLUSTER_MAX;
@@ -387,12 +401,16 @@ unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
struct shrink_control sc = {
.gfp_mask = gfp_mask,
.nid = nid,
+ .memcg = memcg,
};
+ if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE))
+ continue;
+
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
sc.nid = 0;
- freed += shrink_slabs(&sc, shrinker, nr_scanned, nr_eligible);
+ freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
}
up_read(&shrinker_rwsem);
@@ -401,6 +419,29 @@ out:
return freed;
}
+void drop_slab_node(int nid)
+{
+ unsigned long freed;
+
+ do {
+ struct mem_cgroup *memcg = NULL;
+
+ freed = 0;
+ do {
+ freed += shrink_slab(GFP_KERNEL, nid, memcg,
+ 1000, 1000);
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
+ } while (freed > 10);
+}
+
+void drop_slab(void)
+{
+ int nid;
+
+ for_each_online_node(nid)
+ drop_slab_node(nid);
+}
+
static inline int is_page_cache_freeable(struct page *page)
{
/*
@@ -497,7 +538,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
}
if (mapping->a_ops->writepage == NULL)
return PAGE_ACTIVATE;
- if (!may_write_to_queue(mapping->backing_dev_info, sc))
+ if (!may_write_to_queue(inode_to_bdi(mapping->host), sc))
return PAGE_KEEP;
if (clear_page_dirty_for_io(page)) {
@@ -876,7 +917,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
mapping = page_mapping(page);
if (((dirty || writeback) && mapping &&
- bdi_write_congested(mapping->backing_dev_info)) ||
+ bdi_write_congested(inode_to_bdi(mapping->host))) ||
(writeback && PageReclaim(page)))
nr_congested++;
@@ -1903,8 +1944,12 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
* latencies, so it's better to scan a minimum amount there as
* well.
*/
- if (current_is_kswapd() && !zone_reclaimable(zone))
- force_scan = true;
+ if (current_is_kswapd()) {
+ if (!zone_reclaimable(zone))
+ force_scan = true;
+ if (!mem_cgroup_lruvec_online(lruvec))
+ force_scan = true;
+ }
if (!global_reclaim(sc))
force_scan = true;
@@ -2269,6 +2314,7 @@ static inline bool should_continue_reclaim(struct zone *zone,
static bool shrink_zone(struct zone *zone, struct scan_control *sc,
bool is_classzone)
{
+ struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_reclaimed, nr_scanned;
bool reclaimable = false;
@@ -2287,15 +2333,28 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
memcg = mem_cgroup_iter(root, NULL, &reclaim);
do {
unsigned long lru_pages;
+ unsigned long scanned;
struct lruvec *lruvec;
int swappiness;
+ if (mem_cgroup_low(root, memcg)) {
+ if (!sc->may_thrash)
+ continue;
+ mem_cgroup_events(memcg, MEMCG_LOW, 1);
+ }
+
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
swappiness = mem_cgroup_swappiness(memcg);
+ scanned = sc->nr_scanned;
shrink_lruvec(lruvec, swappiness, sc, &lru_pages);
zone_lru_pages += lru_pages;
+ if (memcg && is_classzone)
+ shrink_slab(sc->gfp_mask, zone_to_nid(zone),
+ memcg, sc->nr_scanned - scanned,
+ lru_pages);
+
/*
* Direct reclaim and kswapd have to scan all memory
* cgroups to fulfill the overall scan target for the
@@ -2311,26 +2370,20 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
mem_cgroup_iter_break(root, memcg);
break;
}
- memcg = mem_cgroup_iter(root, memcg, &reclaim);
- } while (memcg);
+ } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
/*
* Shrink the slab caches in the same proportion that
* the eligible LRU pages were scanned.
*/
- if (global_reclaim(sc) && is_classzone) {
- struct reclaim_state *reclaim_state;
-
- shrink_node_slabs(sc->gfp_mask, zone_to_nid(zone),
- sc->nr_scanned - nr_scanned,
- zone_lru_pages);
-
- reclaim_state = current->reclaim_state;
- if (reclaim_state) {
- sc->nr_reclaimed +=
- reclaim_state->reclaimed_slab;
- reclaim_state->reclaimed_slab = 0;
- }
+ if (global_reclaim(sc) && is_classzone)
+ shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL,
+ sc->nr_scanned - nr_scanned,
+ zone_lru_pages);
+
+ if (reclaim_state) {
+ sc->nr_reclaimed += reclaim_state->reclaimed_slab;
+ reclaim_state->reclaimed_slab = 0;
}
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
@@ -2515,10 +2568,11 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc)
{
+ int initial_priority = sc->priority;
unsigned long total_scanned = 0;
unsigned long writeback_threshold;
bool zones_reclaimable;
-
+retry:
delayacct_freepages_start();
if (global_reclaim(sc))
@@ -2568,6 +2622,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
if (sc->compaction_ready)
return 1;
+ /* Untapped cgroup reserves? Don't OOM, retry. */
+ if (!sc->may_thrash) {
+ sc->priority = initial_priority;
+ sc->may_thrash = 1;
+ goto retry;
+ }
+
/* Any of the zones still reclaimable? Don't OOM. */
if (zones_reclaimable)
return 1;
@@ -2656,7 +2717,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
* should make reasonable progress.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
- gfp_mask, nodemask) {
+ gfp_zone(gfp_mask), nodemask) {
if (zone_idx(zone) > ZONE_NORMAL)
continue;
@@ -3175,7 +3236,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
*/
if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
pfmemalloc_watermark_ok(pgdat))
- wake_up(&pgdat->pfmemalloc_wait);
+ wake_up_all(&pgdat->pfmemalloc_wait);
/*
* Fragmentation may mean that the system cannot be rebalanced
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 1284f89fca08..4f5cd974e11a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -17,6 +17,9 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/vmstat.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/math64.h>
#include <linux/writeback.h>
@@ -670,66 +673,6 @@ int fragmentation_index(struct zone *zone, unsigned int order)
}
#endif
-#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-static char * const migratetype_names[MIGRATE_TYPES] = {
- "Unmovable",
- "Reclaimable",
- "Movable",
- "Reserve",
-#ifdef CONFIG_CMA
- "CMA",
-#endif
-#ifdef CONFIG_MEMORY_ISOLATION
- "Isolate",
-#endif
-};
-
-static void *frag_start(struct seq_file *m, loff_t *pos)
-{
- pg_data_t *pgdat;
- loff_t node = *pos;
- for (pgdat = first_online_pgdat();
- pgdat && node;
- pgdat = next_online_pgdat(pgdat))
- --node;
-
- return pgdat;
-}
-
-static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
-{
- pg_data_t *pgdat = (pg_data_t *)arg;
-
- (*pos)++;
- return next_online_pgdat(pgdat);
-}
-
-static void frag_stop(struct seq_file *m, void *arg)
-{
-}
-
-/* Walk all the zones in a node and print using a callback */
-static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
- void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
-{
- struct zone *zone;
- struct zone *node_zones = pgdat->node_zones;
- unsigned long flags;
-
- for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
- if (!populated_zone(zone))
- continue;
-
- spin_lock_irqsave(&zone->lock, flags);
- print(m, pgdat, zone);
- spin_unlock_irqrestore(&zone->lock, flags);
- }
-}
-#endif
-
#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
#ifdef CONFIG_ZONE_DMA
#define TEXT_FOR_DMA(xx) xx "_dma",
@@ -907,7 +850,66 @@ const char * const vmstat_text[] = {
#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
+#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
+ defined(CONFIG_PROC_FS)
+static void *frag_start(struct seq_file *m, loff_t *pos)
+{
+ pg_data_t *pgdat;
+ loff_t node = *pos;
+
+ for (pgdat = first_online_pgdat();
+ pgdat && node;
+ pgdat = next_online_pgdat(pgdat))
+ --node;
+
+ return pgdat;
+}
+
+static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+ pg_data_t *pgdat = (pg_data_t *)arg;
+
+ (*pos)++;
+ return next_online_pgdat(pgdat);
+}
+
+static void frag_stop(struct seq_file *m, void *arg)
+{
+}
+
+/* Walk all the zones in a node and print using a callback */
+static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
+ void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
+{
+ struct zone *zone;
+ struct zone *node_zones = pgdat->node_zones;
+ unsigned long flags;
+
+ for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+ if (!populated_zone(zone))
+ continue;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ print(m, pgdat, zone);
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+}
+#endif
+
#ifdef CONFIG_PROC_FS
+static char * const migratetype_names[MIGRATE_TYPES] = {
+ "Unmovable",
+ "Reclaimable",
+ "Movable",
+ "Reserve",
+#ifdef CONFIG_CMA
+ "CMA",
+#endif
+#ifdef CONFIG_MEMORY_ISOLATION
+ "Isolate",
+#endif
+};
+
static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
@@ -1435,8 +1437,8 @@ static void vmstat_shepherd(struct work_struct *w)
if (need_update(cpu) &&
cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
- schedule_delayed_work_on(cpu, &per_cpu(vmstat_work, cpu),
- __round_jiffies_relative(sysctl_stat_interval, cpu));
+ schedule_delayed_work_on(cpu,
+ &per_cpu(vmstat_work, cpu), 0);
put_online_cpus();
@@ -1450,7 +1452,7 @@ static void __init start_shepherd_timer(void)
int cpu;
for_each_possible_cpu(cpu)
- INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
+ INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
vmstat_update);
if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
@@ -1536,8 +1538,6 @@ static int __init setup_vmstat(void)
module_init(setup_vmstat)
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
-#include <linux/debugfs.h>
-
/*
* Return an index indicating how much of the available free memory is
diff --git a/mm/workingset.c b/mm/workingset.c
index f7216fa7da27..aa017133744b 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -275,7 +275,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
local_irq_disable();
- shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid);
+ shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
local_irq_enable();
pages = node_present_pages(sc->nid);
@@ -302,6 +302,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
}
static enum lru_status shadow_lru_isolate(struct list_head *item,
+ struct list_lru_one *lru,
spinlock_t *lru_lock,
void *arg)
{
@@ -332,7 +333,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out;
}
- list_del_init(item);
+ list_lru_isolate(lru, item);
spin_unlock(lru_lock);
/*
@@ -376,8 +377,8 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
local_irq_disable();
- ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid,
- shadow_lru_isolate, NULL, &sc->nr_to_scan);
+ ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc,
+ shadow_lru_isolate, NULL);
local_irq_enable();
return ret;
}
diff --git a/mm/zbud.c b/mm/zbud.c
index 4e387bea702e..2ee4e4520493 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -130,7 +130,8 @@ static struct zbud_ops zbud_zpool_ops = {
.evict = zbud_zpool_evict
};
-static void *zbud_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
+static void *zbud_zpool_create(char *name, gfp_t gfp,
+ struct zpool_ops *zpool_ops)
{
return zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
}
diff --git a/mm/zpool.c b/mm/zpool.c
index 739cdf0d183a..bacdab6e47de 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -129,6 +129,7 @@ static void zpool_put_driver(struct zpool_driver *driver)
/**
* zpool_create_pool() - Create a new zpool
* @type The type of the zpool to create (e.g. zbud, zsmalloc)
+ * @name The name of the zpool (e.g. zram0, zswap)
* @gfp The GFP flags to use when allocating the pool.
* @ops The optional ops callback.
*
@@ -140,7 +141,8 @@ static void zpool_put_driver(struct zpool_driver *driver)
*
* Returns: New zpool on success, NULL on failure.
*/
-struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops)
+struct zpool *zpool_create_pool(char *type, char *name, gfp_t gfp,
+ struct zpool_ops *ops)
{
struct zpool_driver *driver;
struct zpool *zpool;
@@ -168,7 +170,7 @@ struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops)
zpool->type = driver->type;
zpool->driver = driver;
- zpool->pool = driver->create(gfp, ops);
+ zpool->pool = driver->create(name, gfp, ops);
zpool->ops = ops;
if (!zpool->pool) {
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b72403927aa4..0dec1fa5f656 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -91,6 +91,7 @@
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/debugfs.h>
#include <linux/zsmalloc.h>
#include <linux/zpool.h>
@@ -168,6 +169,22 @@ enum fullness_group {
ZS_FULL
};
+enum zs_stat_type {
+ OBJ_ALLOCATED,
+ OBJ_USED,
+ NR_ZS_STAT_TYPE,
+};
+
+#ifdef CONFIG_ZSMALLOC_STAT
+
+static struct dentry *zs_stat_root;
+
+struct zs_size_stat {
+ unsigned long objs[NR_ZS_STAT_TYPE];
+};
+
+#endif
+
/*
* number of size_classes
*/
@@ -200,6 +217,10 @@ struct size_class {
/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
int pages_per_zspage;
+#ifdef CONFIG_ZSMALLOC_STAT
+ struct zs_size_stat stats;
+#endif
+
spinlock_t lock;
struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
@@ -217,10 +238,16 @@ struct link_free {
};
struct zs_pool {
+ char *name;
+
struct size_class **size_class;
gfp_t flags; /* allocation flags used when growing pool */
atomic_long_t pages_allocated;
+
+#ifdef CONFIG_ZSMALLOC_STAT
+ struct dentry *stat_dentry;
+#endif
};
/*
@@ -246,9 +273,9 @@ struct mapping_area {
#ifdef CONFIG_ZPOOL
-static void *zs_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
+static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops)
{
- return zs_create_pool(gfp);
+ return zs_create_pool(name, gfp);
}
static void zs_zpool_destroy(void *pool)
@@ -942,6 +969,166 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
return true;
}
+#ifdef CONFIG_ZSMALLOC_STAT
+
+static inline void zs_stat_inc(struct size_class *class,
+ enum zs_stat_type type, unsigned long cnt)
+{
+ class->stats.objs[type] += cnt;
+}
+
+static inline void zs_stat_dec(struct size_class *class,
+ enum zs_stat_type type, unsigned long cnt)
+{
+ class->stats.objs[type] -= cnt;
+}
+
+static inline unsigned long zs_stat_get(struct size_class *class,
+ enum zs_stat_type type)
+{
+ return class->stats.objs[type];
+}
+
+static int __init zs_stat_init(void)
+{
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
+ if (!zs_stat_root)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit zs_stat_exit(void)
+{
+ debugfs_remove_recursive(zs_stat_root);
+}
+
+static int zs_stats_size_show(struct seq_file *s, void *v)
+{
+ int i;
+ struct zs_pool *pool = s->private;
+ struct size_class *class;
+ int objs_per_zspage;
+ unsigned long obj_allocated, obj_used, pages_used;
+ unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
+
+ seq_printf(s, " %5s %5s %13s %10s %10s\n", "class", "size",
+ "obj_allocated", "obj_used", "pages_used");
+
+ for (i = 0; i < zs_size_classes; i++) {
+ class = pool->size_class[i];
+
+ if (class->index != i)
+ continue;
+
+ spin_lock(&class->lock);
+ obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+ obj_used = zs_stat_get(class, OBJ_USED);
+ spin_unlock(&class->lock);
+
+ objs_per_zspage = get_maxobj_per_zspage(class->size,
+ class->pages_per_zspage);
+ pages_used = obj_allocated / objs_per_zspage *
+ class->pages_per_zspage;
+
+ seq_printf(s, " %5u %5u %10lu %10lu %10lu\n", i,
+ class->size, obj_allocated, obj_used, pages_used);
+
+ total_objs += obj_allocated;
+ total_used_objs += obj_used;
+ total_pages += pages_used;
+ }
+
+ seq_puts(s, "\n");
+ seq_printf(s, " %5s %5s %10lu %10lu %10lu\n", "Total", "",
+ total_objs, total_used_objs, total_pages);
+
+ return 0;
+}
+
+static int zs_stats_size_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zs_stats_size_show, inode->i_private);
+}
+
+static const struct file_operations zs_stat_size_ops = {
+ .open = zs_stats_size_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int zs_pool_stat_create(char *name, struct zs_pool *pool)
+{
+ struct dentry *entry;
+
+ if (!zs_stat_root)
+ return -ENODEV;
+
+ entry = debugfs_create_dir(name, zs_stat_root);
+ if (!entry) {
+ pr_warn("debugfs dir <%s> creation failed\n", name);
+ return -ENOMEM;
+ }
+ pool->stat_dentry = entry;
+
+ entry = debugfs_create_file("obj_in_classes", S_IFREG | S_IRUGO,
+ pool->stat_dentry, pool, &zs_stat_size_ops);
+ if (!entry) {
+ pr_warn("%s: debugfs file entry <%s> creation failed\n",
+ name, "obj_in_classes");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void zs_pool_stat_destroy(struct zs_pool *pool)
+{
+ debugfs_remove_recursive(pool->stat_dentry);
+}
+
+#else /* CONFIG_ZSMALLOC_STAT */
+
+static inline void zs_stat_inc(struct size_class *class,
+ enum zs_stat_type type, unsigned long cnt)
+{
+}
+
+static inline void zs_stat_dec(struct size_class *class,
+ enum zs_stat_type type, unsigned long cnt)
+{
+}
+
+static inline unsigned long zs_stat_get(struct size_class *class,
+ enum zs_stat_type type)
+{
+ return 0;
+}
+
+static int __init zs_stat_init(void)
+{
+ return 0;
+}
+
+static void __exit zs_stat_exit(void)
+{
+}
+
+static inline int zs_pool_stat_create(char *name, struct zs_pool *pool)
+{
+ return 0;
+}
+
+static inline void zs_pool_stat_destroy(struct zs_pool *pool)
+{
+}
+
+#endif
+
unsigned long zs_get_total_pages(struct zs_pool *pool)
{
return atomic_long_read(&pool->pages_allocated);
@@ -1074,7 +1261,10 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
atomic_long_add(class->pages_per_zspage,
&pool->pages_allocated);
+
spin_lock(&class->lock);
+ zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
}
obj = (unsigned long)first_page->freelist;
@@ -1088,6 +1278,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
kunmap_atomic(vaddr);
first_page->inuse++;
+ zs_stat_inc(class, OBJ_USED, 1);
/* Now move the zspage to another fullness group, if required */
fix_fullness_group(pool, first_page);
spin_unlock(&class->lock);
@@ -1128,6 +1319,12 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
first_page->inuse--;
fullness = fix_fullness_group(pool, first_page);
+
+ zs_stat_dec(class, OBJ_USED, 1);
+ if (fullness == ZS_EMPTY)
+ zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
+
spin_unlock(&class->lock);
if (fullness == ZS_EMPTY) {
@@ -1148,7 +1345,7 @@ EXPORT_SYMBOL_GPL(zs_free);
* On success, a pointer to the newly created pool is returned,
* otherwise NULL.
*/
-struct zs_pool *zs_create_pool(gfp_t flags)
+struct zs_pool *zs_create_pool(char *name, gfp_t flags)
{
int i;
struct zs_pool *pool;
@@ -1158,9 +1355,16 @@ struct zs_pool *zs_create_pool(gfp_t flags)
if (!pool)
return NULL;
+ pool->name = kstrdup(name, GFP_KERNEL);
+ if (!pool->name) {
+ kfree(pool);
+ return NULL;
+ }
+
pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
GFP_KERNEL);
if (!pool->size_class) {
+ kfree(pool->name);
kfree(pool);
return NULL;
}
@@ -1210,6 +1414,9 @@ struct zs_pool *zs_create_pool(gfp_t flags)
pool->flags = flags;
+ if (zs_pool_stat_create(name, pool))
+ goto err;
+
return pool;
err:
@@ -1222,6 +1429,8 @@ void zs_destroy_pool(struct zs_pool *pool)
{
int i;
+ zs_pool_stat_destroy(pool);
+
for (i = 0; i < zs_size_classes; i++) {
int fg;
struct size_class *class = pool->size_class[i];
@@ -1242,6 +1451,7 @@ void zs_destroy_pool(struct zs_pool *pool)
}
kfree(pool->size_class);
+ kfree(pool->name);
kfree(pool);
}
EXPORT_SYMBOL_GPL(zs_destroy_pool);
@@ -1250,17 +1460,30 @@ static int __init zs_init(void)
{
int ret = zs_register_cpu_notifier();
- if (ret) {
- zs_unregister_cpu_notifier();
- return ret;
- }
+ if (ret)
+ goto notifier_fail;
init_zs_size_classes();
#ifdef CONFIG_ZPOOL
zpool_register_driver(&zs_zpool_driver);
#endif
+
+ ret = zs_stat_init();
+ if (ret) {
+ pr_err("zs stat initialization failed\n");
+ goto stat_fail;
+ }
return 0;
+
+stat_fail:
+#ifdef CONFIG_ZPOOL
+ zpool_unregister_driver(&zs_zpool_driver);
+#endif
+notifier_fail:
+ zs_unregister_cpu_notifier();
+
+ return ret;
}
static void __exit zs_exit(void)
@@ -1269,6 +1492,8 @@ static void __exit zs_exit(void)
zpool_unregister_driver(&zs_zpool_driver);
#endif
zs_unregister_cpu_notifier();
+
+ zs_stat_exit();
}
module_init(zs_init);
diff --git a/mm/zswap.c b/mm/zswap.c
index 0cfce9bc51e4..4249e82ff934 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -906,11 +906,12 @@ static int __init init_zswap(void)
pr_info("loading zswap\n");
- zswap_pool = zpool_create_pool(zswap_zpool_type, gfp, &zswap_zpool_ops);
+ zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp,
+ &zswap_zpool_ops);
if (!zswap_pool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
pr_info("%s zpool not available\n", zswap_zpool_type);
zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
- zswap_pool = zpool_create_pool(zswap_zpool_type, gfp,
+ zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp,
&zswap_zpool_ops);
}
if (!zswap_pool) {
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 90cc2bdd4064..61bf2a06e85d 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -9,7 +9,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
__be16 vlan_proto = skb->vlan_proto;
- u16 vlan_id = vlan_tx_tag_get_id(skb);
+ u16 vlan_id = skb_vlan_tag_get_id(skb);
struct net_device *vlan_dev;
struct vlan_pcpu_stats *rx_stats;
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 8ac8a5cc2143..c92b52f37d38 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -238,6 +238,13 @@ nla_put_failure:
return -EMSGSIZE;
}
+static struct net *vlan_get_link_net(const struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+ return dev_net(real_dev);
+}
+
struct rtnl_link_ops vlan_link_ops __read_mostly = {
.kind = "vlan",
.maxtype = IFLA_VLAN_MAX,
@@ -250,6 +257,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
.dellink = unregister_vlan_dev,
.get_size = vlan_get_size,
.fill_info = vlan_fill_info,
+ .get_link_net = vlan_get_link_net,
};
int __init vlan_netlink_init(void)
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 11660a3aab5a..c6fc8f756c9a 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -62,6 +62,7 @@ config BATMAN_ADV_MCAST
config BATMAN_ADV_DEBUG
bool "B.A.T.M.A.N. debugging"
depends on BATMAN_ADV
+ depends on DEBUG_FS
help
This is an option for use by developers; most people should
say N here. This enables compilation of support for
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 1e8053976e83..00e00e09b000 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -26,9 +26,8 @@
#include "bat_algo.h"
#include "network-coding.h"
-
/**
- * batadv_dup_status - duplicate status
+ * enum batadv_dup_status - duplicate status
* @BATADV_NO_DUP: the packet is a duplicate
* @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
* neighbor)
@@ -517,7 +516,7 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @packet_len: (total) length of the OGM
* @send_time: timestamp (jiffies) when the packet is to be sent
- * @direktlink: true if this is a direct link packet
+ * @directlink: true if this is a direct link packet
* @if_incoming: interface where the packet was received
* @if_outgoing: interface for which the retransmission should be considered
* @forw_packet: the forwarded packet which should be checked
@@ -879,7 +878,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
word_index = hard_iface->if_num * BATADV_NUM_WORDS;
- word = &(orig_node->bat_iv.bcast_own[word_index]);
+ word = &orig_node->bat_iv.bcast_own[word_index];
batadv_bit_get_packet(bat_priv, word, 1, 0);
if_num = hard_iface->if_num;
@@ -1362,10 +1361,10 @@ out:
return ret;
}
-
/**
* batadv_iv_ogm_process_per_outif - process a batman iv OGM for an outgoing if
* @skb: the skb containing the OGM
+ * @ogm_offset: offset from skb->data to start of ogm header
* @orig_node: the (cached) orig node for the originator of this OGM
* @if_incoming: the interface where this packet was received
* @if_outgoing: the interface for which the packet should be considered
@@ -1664,7 +1663,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
offset = if_num * BATADV_NUM_WORDS;
spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
- word = &(orig_neigh_node->bat_iv.bcast_own[offset]);
+ word = &orig_neigh_node->bat_iv.bcast_own[offset];
bit_pos = if_incoming_seqno - 2;
bit_pos -= ntohl(ogm_packet->seqno);
batadv_set_bit(word, bit_pos);
@@ -1902,10 +1901,10 @@ out:
* batadv_iv_ogm_neigh_is_eob - check if neigh1 is equally good or better than
* neigh2 from the metric prospective
* @neigh1: the first neighbor object of the comparison
- * @if_outgoing: outgoing interface for the first neighbor
+ * @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison
* @if_outgoing2: outgoing interface for the second neighbor
-
+ *
* Returns true if the metric via neigh1 is equally good or better than
* the metric via neigh2, false otherwise.
*/
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 9586750022f5..e3da07a64026 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -29,7 +29,6 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
}
-
/* receive and process one packet within the sequence number window.
*
* returns:
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index cc2407351d36..2acaafe60188 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -29,8 +29,7 @@ static inline int batadv_test_bit(const unsigned long *seq_bits,
diff = last_seqno - curr_seqno;
if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return 0;
- else
- return test_bit(diff, seq_bits) != 0;
+ return test_bit(diff, seq_bits) != 0;
}
/* turn corresponding bit on, so we can remember that we got the packet */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index a957c8140721..ac4b96eccade 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -69,7 +69,6 @@ static inline uint32_t batadv_choose_backbone_gw(const void *data,
return hash % size;
}
-
/* compares address and vid of two backbone gws */
static int batadv_compare_backbone_gw(const struct hlist_node *node,
const void *data2)
@@ -245,14 +244,14 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
spin_unlock_bh(list_lock);
}
- /* all claims gone, intialize CRC */
+ /* all claims gone, initialize CRC */
backbone_gw->crc = BATADV_BLA_CRC_INIT;
}
/**
* batadv_bla_send_claim - sends a claim frame according to the provided info
* @bat_priv: the bat priv with all the soft interface information
- * @orig: the mac address to be announced within the claim
+ * @mac: the mac address to be announced within the claim
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
*/
@@ -364,6 +363,7 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @orig: the mac address of the originator
* @vid: the VLAN ID
+ * @own_backbone: set if the requested backbone is local
*
* searches for the backbone gw or creates a new one if it could not
* be found.
@@ -454,6 +454,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
/**
* batadv_bla_answer_request - answer a bla request by sending own claims
* @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: interface where the request came on
* @vid: the vid where the request came on
*
* Repeat all of our own claims, and finally send an ANNOUNCE frame
@@ -660,7 +661,6 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
if (unlikely(!backbone_gw))
return 1;
-
/* handle as ANNOUNCE frame */
backbone_gw->lasttime = jiffies;
crc = ntohs(*((__be16 *)(&an_addr[4])));
@@ -775,6 +775,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
/**
* batadv_check_claim_group
* @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary interface of this batman interface
* @hw_src: the Hardware source in the ARP Header
* @hw_dst: the Hardware destination in the ARP Header
* @ethhdr: pointer to the Ethernet header of the claim frame
@@ -846,10 +847,10 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
return 2;
}
-
/**
* batadv_bla_process_claim
* @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
* @skb: the frame to be checked
*
* Check if this is a claim frame, and process it accordingly.
@@ -1327,7 +1328,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
goto out;
}
/* not found, add a new entry (overwrite the oldest entry)
- * and allow it, its the first occurence.
+ * and allow it, its the first occurrence.
*/
curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
curr %= BATADV_DUPLIST_SIZE;
@@ -1343,8 +1344,6 @@ out:
return ret;
}
-
-
/**
* batadv_bla_is_backbone_gw_orig
* @bat_priv: the bat priv with all the soft interface information
@@ -1386,7 +1385,6 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
return false;
}
-
/**
* batadv_bla_is_backbone_gw
* @skb: the frame to be checked
@@ -1476,7 +1474,6 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto allow;
-
if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
/* don't allow broadcasts while requests are in flight */
if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index a12e25efaf6f..a4972874c056 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -233,7 +233,6 @@ static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
{
- return;
}
#endif
@@ -405,6 +404,7 @@ struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
.release = single_release, \
}, \
}
+
static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO,
batadv_originators_hardif_open);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index b5981113c9a7..aad022dd15df 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1100,6 +1100,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
}
+
/**
* batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
* DAT storage only
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index d76e1d06c5b5..2fe0764c64be 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -25,9 +25,7 @@
#include <linux/if_arp.h>
-/**
- * BATADV_DAT_ADDR_MAX - maximum address value in the DHT space
- */
+/* BATADV_DAT_ADDR_MAX - maximum address value in the DHT space */
#define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
void batadv_dat_status_update(struct net_device *net_dev);
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 00f9e144cc97..3d1dcaa3e8b5 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -23,7 +23,6 @@
#include "hard-interface.h"
#include "soft-interface.h"
-
/**
* batadv_frag_clear_chain - delete entries in the fragment buffer chain
* @head: head of chain with entries.
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index 5d7a0e66a22b..d848cf6676a2 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -41,8 +41,7 @@ batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
if (!hlist_empty(&frags_entry->head) &&
batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
return true;
- else
- return false;
+ return false;
}
#endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index e0bcf9e84273..27649e85f3f6 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -775,6 +775,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
return ret;
}
+
/**
* batadv_gw_out_of_range - check if the dhcp request destination is the best gw
* @bat_priv: the bat priv with all the soft interface information
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index d1183e882167..12fc77bef23f 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -41,7 +41,6 @@
#include "network-coding.h"
#include "fragmentation.h"
-
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
* list traversals just rcu-locked
*/
@@ -403,6 +402,9 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
goto err_free;
}
+ /* reset control block to avoid left overs from previous users */
+ memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
+
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb.
*/
@@ -651,7 +653,7 @@ static struct batadv_tvlv_handler
/**
* batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
* possibly free it
- * @tvlv_handler: the tvlv container to free
+ * @tvlv: the tvlv container to free
*/
static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
{
@@ -796,11 +798,11 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
+ * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
* requested packet size
* @packet_buff: packet buffer
* @packet_buff_len: packet buffer size
- * @packet_min_len: requested packet minimum size
+ * @min_packet_len: requested packet minimum size
* @additional_packet_len: requested additional packet size on top of minimum
* size
*
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index a1fcd884f0b1..4d2318829a34 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2014.4.0"
+#define BATADV_SOURCE_VERSION "2015.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -92,9 +92,8 @@
/* numbers of originator to contact for any PUT/GET DHT operation */
#define BATADV_DAT_CANDIDATES_NUM 3
-/**
- * BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
- * at most from the primary one in order to be still considered acceptable
+/* BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
+ * at most from the primary one in order to be still considered acceptable
*/
#define BATADV_TQ_SIMILARITY_THRESHOLD 50
@@ -313,10 +312,10 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
* - when adding 128 - it is neither a predecessor nor a successor,
* - after adding more than 127 to the starting value - it is a successor
*/
-#define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \
- typeof(y) _d2 = (y); \
- typeof(x) _dummy = (_d1 - _d2); \
- (void) (&_d1 == &_d2); \
+#define batadv_seq_before(x, y) ({typeof(x)_d1 = (x); \
+ typeof(y)_d2 = (y); \
+ typeof(x)_dummy = (_d1 - _d2); \
+ (void)(&_d1 == &_d2); \
_dummy > batadv_smallest_signed_int(_dummy); })
#define batadv_seq_after(x, y) batadv_seq_before(y, x)
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 73b5d45819c1..3a44ebdb43cb 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -50,7 +50,6 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
static inline void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
{
- return;
}
static inline enum batadv_forw_mode
@@ -67,12 +66,10 @@ static inline int batadv_mcast_init(struct batadv_priv *bat_priv)
static inline void batadv_mcast_free(struct batadv_priv *bat_priv)
{
- return;
}
static inline void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node)
{
- return;
}
#endif /* CONFIG_BATMAN_ADV_MCAST */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index fab47f1f3ef9..127cc4d7380a 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1212,8 +1212,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb,
{
if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
return false;
- else
- return true;
+ return true;
}
/**
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index bea8198d0198..90e805aba379 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -797,7 +797,6 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
return ifinfo_purged;
}
-
/**
* batadv_purge_orig_neighbors - purges neighbors from originator
* @bat_priv: the bat priv with all the soft interface information
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index db3a9ed734cb..aa4a43696295 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -70,7 +70,6 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
unsigned short vid);
void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
-
/* hashfunction to choose an entry in a hash table of given size
* hash algorithm from http://en.wikipedia.org/wiki/Hash_table
*/
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 34e096d2dce1..b81fbbf21a63 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -198,6 +198,7 @@ struct batadv_bla_claim_dst {
uint8_t type; /* bla_claimframe */
__be16 group; /* group id */
};
+
#pragma pack()
/**
@@ -376,7 +377,7 @@ struct batadv_frag_packet {
uint8_t reserved:4;
uint8_t no:4;
#else
-#error "unknown bitfield endianess"
+#error "unknown bitfield endianness"
#endif
uint8_t dest[ETH_ALEN];
uint8_t orig[ETH_ALEN];
@@ -452,7 +453,7 @@ struct batadv_coded_packet {
* @src: address of the source
* @dst: address of the destination
* @tvlv_len: length of tvlv data following the unicast tvlv header
- * @align: 2 bytes to align the header to a 4 byte boundry
+ * @align: 2 bytes to align the header to a 4 byte boundary
*/
struct batadv_unicast_tvlv_packet {
uint8_t packet_type;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 6648f321864d..da83982bf974 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -292,7 +292,6 @@ out:
return ret;
}
-
int batadv_recv_icmp_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
@@ -457,7 +456,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
* the last chosen bonding candidate (next_candidate). If no such
* router is found, use the first candidate found (the previously
* chosen bonding candidate might have been the last one in the list).
- * If this can't be found either, return the previously choosen
+ * If this can't be found either, return the previously chosen
* router - obviously there are no other candidates.
*/
rcu_read_lock();
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5467955eb27c..5ec31d7de24f 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -36,7 +36,6 @@
#include "bridge_loop_avoidance.h"
#include "network-coding.h"
-
static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static void batadv_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info);
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index f40cb0436eba..a75dc12f96f8 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -151,7 +151,6 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
batadv_store_##_name)
-
#define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
ssize_t batadv_store_##_name(struct kobject *kobj, \
struct attribute *attr, char *buff, \
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5f59e7f899a0..07b263a437d1 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1780,7 +1780,6 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
orig_node, message);
-
out:
if (tt_global_entry)
batadv_tt_global_entry_free_ref(tt_global_entry);
@@ -2769,9 +2768,8 @@ static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
{
if (batadv_is_my_mac(bat_priv, req_dst))
return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
- else
- return batadv_send_other_tt_response(bat_priv, tt_data,
- req_src, req_dst);
+ return batadv_send_other_tt_response(bat_priv, tt_data, req_src,
+ req_dst);
}
static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
@@ -2854,7 +2852,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
/**
* batadv_is_my_client - check if a client is served by the local node
* @bat_priv: the bat priv with all the soft interface information
- * @addr: the mac adress of the client to check
+ * @addr: the mac address of the client to check
* @vid: VLAN identifier
*
* Returns true if the client is served by this node, false otherwise.
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 8854c05622a9..9398c3fb4174 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -199,7 +199,6 @@ struct batadv_orig_bat_iv {
/**
* struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
* @orig: originator ethernet address
- * @primary_addr: hosts primary interface address
* @ifinfo_list: list for routers per outgoing interface
* @last_bonding_candidate: pointer to last ifinfo of last used router
* @batadv_dat_addr_t: address of the orig node in the distributed hash
@@ -244,7 +243,6 @@ struct batadv_orig_bat_iv {
*/
struct batadv_orig_node {
uint8_t orig[ETH_ALEN];
- uint8_t primary_addr[ETH_ALEN];
struct hlist_head ifinfo_list;
struct batadv_orig_ifinfo *last_bonding_candidate;
#ifdef CONFIG_BATMAN_ADV_DAT
@@ -970,7 +968,7 @@ struct batadv_tt_orig_list_entry {
};
/**
- * struct batadv_tt_change_node - structure for tt changes occured
+ * struct batadv_tt_change_node - structure for tt changes occurred
* @list: list node for batadv_priv_tt::changes_list
* @change: holds the actual translation table diff data
*/
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index c989253737f0..1742b849fcff 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -31,7 +31,7 @@
#define VERSION "0.1"
-static struct dentry *lowpan_psm_debugfs;
+static struct dentry *lowpan_enable_debugfs;
static struct dentry *lowpan_control_debugfs;
#define IFACE_NAME_TEMPLATE "bt%d"
@@ -55,11 +55,7 @@ struct skb_cb {
static LIST_HEAD(bt_6lowpan_devices);
static DEFINE_SPINLOCK(devices_lock);
-/* If psm is set to 0 (default value), then 6lowpan is disabled.
- * Other values are used to indicate a Protocol Service Multiplexer
- * value for 6lowpan.
- */
-static u16 psm_6lowpan;
+static bool enable_6lowpan;
/* We are listening incoming connections via this channel
*/
@@ -761,7 +757,7 @@ static bool is_bt_6lowpan(struct hci_conn *hcon)
if (hcon->type != LE_LINK)
return false;
- if (!psm_6lowpan)
+ if (!enable_6lowpan)
return false;
return true;
@@ -1085,7 +1081,7 @@ static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
if (!pchan)
return -EINVAL;
- err = l2cap_chan_connect(pchan, cpu_to_le16(psm_6lowpan), 0,
+ err = l2cap_chan_connect(pchan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
addr, dst_type);
BT_DBG("chan %p err %d", pchan, err);
@@ -1118,7 +1114,7 @@ static struct l2cap_chan *bt_6lowpan_listen(void)
struct l2cap_chan *pchan;
int err;
- if (psm_6lowpan == 0)
+ if (!enable_6lowpan)
return NULL;
pchan = chan_get();
@@ -1130,10 +1126,9 @@ static struct l2cap_chan *bt_6lowpan_listen(void)
atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT);
- BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan,
- pchan->src_type);
+ BT_DBG("chan %p src type %d", pchan, pchan->src_type);
- err = l2cap_add_psm(pchan, addr, cpu_to_le16(psm_6lowpan));
+ err = l2cap_add_psm(pchan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
if (err) {
l2cap_chan_put(pchan);
BT_ERR("psm cannot be added err %d", err);
@@ -1219,22 +1214,23 @@ static void disconnect_all_peers(void)
spin_unlock(&devices_lock);
}
-struct set_psm {
+struct set_enable {
struct work_struct work;
- u16 psm;
+ bool flag;
};
-static void do_psm_set(struct work_struct *work)
+static void do_enable_set(struct work_struct *work)
{
- struct set_psm *set_psm = container_of(work, struct set_psm, work);
+ struct set_enable *set_enable = container_of(work,
+ struct set_enable, work);
- if (set_psm->psm == 0 || psm_6lowpan != set_psm->psm)
+ if (!set_enable->flag || enable_6lowpan != set_enable->flag)
/* Disconnect existing connections if 6lowpan is
- * disabled (psm = 0), or if psm changes.
+ * disabled
*/
disconnect_all_peers();
- psm_6lowpan = set_psm->psm;
+ enable_6lowpan = set_enable->flag;
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
@@ -1243,33 +1239,33 @@ static void do_psm_set(struct work_struct *work)
listen_chan = bt_6lowpan_listen();
- kfree(set_psm);
+ kfree(set_enable);
}
-static int lowpan_psm_set(void *data, u64 val)
+static int lowpan_enable_set(void *data, u64 val)
{
- struct set_psm *set_psm;
+ struct set_enable *set_enable;
- set_psm = kzalloc(sizeof(*set_psm), GFP_KERNEL);
- if (!set_psm)
+ set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
+ if (!set_enable)
return -ENOMEM;
- set_psm->psm = val;
- INIT_WORK(&set_psm->work, do_psm_set);
+ set_enable->flag = !!val;
+ INIT_WORK(&set_enable->work, do_enable_set);
- schedule_work(&set_psm->work);
+ schedule_work(&set_enable->work);
return 0;
}
-static int lowpan_psm_get(void *data, u64 *val)
+static int lowpan_enable_get(void *data, u64 *val)
{
- *val = psm_6lowpan;
+ *val = enable_6lowpan;
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(lowpan_psm_fops, lowpan_psm_get,
- lowpan_psm_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
+ lowpan_enable_set, "%llu\n");
static ssize_t lowpan_control_write(struct file *fp,
const char __user *user_buffer,
@@ -1439,9 +1435,9 @@ static struct notifier_block bt_6lowpan_dev_notifier = {
static int __init bt_6lowpan_init(void)
{
- lowpan_psm_debugfs = debugfs_create_file("6lowpan_psm", 0644,
- bt_debugfs, NULL,
- &lowpan_psm_fops);
+ lowpan_enable_debugfs = debugfs_create_file("6lowpan_enable", 0644,
+ bt_debugfs, NULL,
+ &lowpan_enable_fops);
lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
bt_debugfs, NULL,
&lowpan_control_fops);
@@ -1451,7 +1447,7 @@ static int __init bt_6lowpan_init(void)
static void __exit bt_6lowpan_exit(void)
{
- debugfs_remove(lowpan_psm_debugfs);
+ debugfs_remove(lowpan_enable_debugfs);
debugfs_remove(lowpan_control_debugfs);
if (listen_chan) {
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 29bcafc41adf..7de74635a110 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -64,4 +64,31 @@ config BT_6LOWPAN
help
IPv6 compression over Bluetooth Low Energy.
+config BT_SELFTEST
+ bool "Bluetooth self testing support"
+ depends on BT && DEBUG_KERNEL
+ help
+ Run self tests when initializing the Bluetooth subsystem. This
+ is a developer option and can cause significant delay when booting
+ the system.
+
+ When the Bluetooth subsystem is built as module, then the test
+ cases are run first thing at module load time. When the Bluetooth
+ subsystem is compiled into the kernel image, then the test cases
+ are run late in the initcall hierarchy.
+
+config BT_SELFTEST_ECDH
+ bool "ECDH test cases"
+ depends on BT_LE && BT_SELFTEST
+ help
+ Run test cases for ECDH cryptographic functionality used by the
+ Bluetooth Low Energy Secure Connections feature.
+
+config BT_SELFTEST_SMP
+ bool "SMP test cases"
+ depends on BT_LE && BT_SELFTEST
+ help
+ Run test cases for SMP cryptographic functionality, including both
+ legacy SMP as well as the Secure Connections features.
+
source "drivers/bluetooth/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index a5432a6a0ae6..8e96e3072266 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -13,6 +13,8 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
- a2mp.o amp.o ecc.o
+ a2mp.o amp.o ecc.o hci_request.o hci_debugfs.o
+
+bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 012e3b03589d..ce22e0cfa923 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -31,6 +31,8 @@
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
+#include "selftest.h"
+
#define VERSION "2.20"
/* Bluetooth sockets */
@@ -716,6 +718,10 @@ static int __init bt_init(void)
BT_INFO("Core ver %s", VERSION);
+ err = bt_selftest();
+ if (err < 0)
+ return err;
+
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
err = bt_sysfs_init();
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ce82722d049b..05f57e491ccb 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -511,13 +511,12 @@ static int bnep_session(void *arg)
static struct device *bnep_get_device(struct bnep_session *session)
{
- struct hci_conn *conn;
+ struct l2cap_conn *conn = l2cap_pi(session->sock->sk)->chan->conn;
- conn = l2cap_pi(session->sock->sk)->chan->conn->hcon;
- if (!conn)
+ if (!conn || !conn->hcon)
return NULL;
- return &conn->dev;
+ return &conn->hcon->dev;
}
static struct device_type bnep_type = {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 1ca8a87a0787..75bd2c42e3e7 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -253,8 +253,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
if (skb->len < CAPI_MSG_BASELEN + 15)
break;
- controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 10);
-
if (!info && ctrl) {
int len = min_t(uint, CAPI_MANUFACTURER_LEN,
skb->data[CAPI_MSG_BASELEN + 14]);
@@ -270,8 +268,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
if (skb->len < CAPI_MSG_BASELEN + 32)
break;
- controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12);
-
if (!info && ctrl) {
ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16);
ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20);
@@ -285,8 +281,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
if (skb->len < CAPI_MSG_BASELEN + 17)
break;
- controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12);
-
if (!info && ctrl) {
int len = min_t(uint, CAPI_SERIAL_LEN,
skb->data[CAPI_MSG_BASELEN + 16]);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index fe18825cc8a4..c9b8fa544785 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -25,11 +25,13 @@
/* Bluetooth HCI connection handling. */
#include <linux/export.h>
+#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
+#include "hci_request.h"
#include "smp.h"
#include "a2mp.h"
@@ -546,6 +548,8 @@ int hci_conn_del(struct hci_conn *conn)
hci_conn_del_sysfs(conn);
+ debugfs_remove_recursive(conn->debugfs);
+
if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
@@ -629,7 +633,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
mgmt_reenable_advertising(hdev);
}
-static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
+static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct hci_conn *conn;
@@ -1080,21 +1084,6 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
}
EXPORT_SYMBOL(hci_conn_check_secure);
-/* Change link key */
-int hci_conn_change_link_key(struct hci_conn *conn)
-{
- BT_DBG("hcon %p", conn);
-
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
- struct hci_cp_change_conn_link_key cp;
- cp.handle = cpu_to_le16(conn->handle);
- hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
- sizeof(cp), &cp);
- }
-
- return 0;
-}
-
/* Switch role */
int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
{
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 5dcacf9607e4..3322d3f4c85a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -37,6 +37,8 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
+#include "hci_request.h"
+#include "hci_debugfs.h"
#include "smp.h"
static void hci_rx_work(struct work_struct *work);
@@ -137,941 +139,9 @@ static const struct file_operations dut_mode_fops = {
.llseek = default_llseek,
};
-static int features_show(struct seq_file *f, void *ptr)
-{
- struct hci_dev *hdev = f->private;
- u8 p;
-
- hci_dev_lock(hdev);
- for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
- seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
- "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
- hdev->features[p][0], hdev->features[p][1],
- hdev->features[p][2], hdev->features[p][3],
- hdev->features[p][4], hdev->features[p][5],
- hdev->features[p][6], hdev->features[p][7]);
- }
- if (lmp_le_capable(hdev))
- seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
- "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
- hdev->le_features[0], hdev->le_features[1],
- hdev->le_features[2], hdev->le_features[3],
- hdev->le_features[4], hdev->le_features[5],
- hdev->le_features[6], hdev->le_features[7]);
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int features_open(struct inode *inode, struct file *file)
-{
- return single_open(file, features_show, inode->i_private);
-}
-
-static const struct file_operations features_fops = {
- .open = features_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int blacklist_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct bdaddr_list *b;
-
- hci_dev_lock(hdev);
- list_for_each_entry(b, &hdev->blacklist, list)
- seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int blacklist_open(struct inode *inode, struct file *file)
-{
- return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
- .open = blacklist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int uuids_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct bt_uuid *uuid;
-
- hci_dev_lock(hdev);
- list_for_each_entry(uuid, &hdev->uuids, list) {
- u8 i, val[16];
-
- /* The Bluetooth UUID values are stored in big endian,
- * but with reversed byte order. So convert them into
- * the right order for the %pUb modifier.
- */
- for (i = 0; i < 16; i++)
- val[i] = uuid->uuid[15 - i];
-
- seq_printf(f, "%pUb\n", val);
- }
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int uuids_open(struct inode *inode, struct file *file)
-{
- return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
- .open = uuids_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int inquiry_cache_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct discovery_state *cache = &hdev->discovery;
- struct inquiry_entry *e;
-
- hci_dev_lock(hdev);
-
- list_for_each_entry(e, &cache->all, all) {
- struct inquiry_data *data = &e->data;
- seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
- &data->bdaddr,
- data->pscan_rep_mode, data->pscan_period_mode,
- data->pscan_mode, data->dev_class[2],
- data->dev_class[1], data->dev_class[0],
- __le16_to_cpu(data->clock_offset),
- data->rssi, data->ssp_mode, e->timestamp);
- }
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
- return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
- .open = inquiry_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int link_keys_show(struct seq_file *f, void *ptr)
-{
- struct hci_dev *hdev = f->private;
- struct link_key *key;
-
- rcu_read_lock();
- list_for_each_entry_rcu(key, &hdev->link_keys, list)
- seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
- HCI_LINK_KEY_SIZE, key->val, key->pin_len);
- rcu_read_unlock();
-
- return 0;
-}
-
-static int link_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, link_keys_show, inode->i_private);
-}
-
-static const struct file_operations link_keys_fops = {
- .open = link_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int dev_class_show(struct seq_file *f, void *ptr)
-{
- struct hci_dev *hdev = f->private;
-
- hci_dev_lock(hdev);
- seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
- hdev->dev_class[1], hdev->dev_class[0]);
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int dev_class_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dev_class_show, inode->i_private);
-}
-
-static const struct file_operations dev_class_fops = {
- .open = dev_class_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int voice_setting_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->voice_setting;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
- NULL, "0x%4.4llx\n");
-
-static int auto_accept_delay_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- hdev->auto_accept_delay = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int auto_accept_delay_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->auto_accept_delay;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
- auto_accept_delay_set, "%llu\n");
-
-static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[3];
-
- buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
- buf[1] = '\n';
- buf[2] = '\0';
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_sc_support_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[32];
- size_t buf_size = min(count, (sizeof(buf)-1));
- bool enable;
-
- if (test_bit(HCI_UP, &hdev->flags))
- return -EBUSY;
-
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
- if (strtobool(buf, &enable))
- return -EINVAL;
-
- if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
- return -EALREADY;
-
- change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
-
- return count;
-}
-
-static const struct file_operations force_sc_support_fops = {
- .open = simple_open,
- .read = force_sc_support_read,
- .write = force_sc_support_write,
- .llseek = default_llseek,
-};
-
-static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[3];
-
- buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
- buf[1] = '\n';
- buf[2] = '\0';
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_lesc_support_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[32];
- size_t buf_size = min(count, (sizeof(buf)-1));
- bool enable;
-
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
- if (strtobool(buf, &enable))
- return -EINVAL;
-
- if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
- return -EALREADY;
-
- change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
-
- return count;
-}
-
-static const struct file_operations force_lesc_support_fops = {
- .open = simple_open,
- .read = force_lesc_support_read,
- .write = force_lesc_support_write,
- .llseek = default_llseek,
-};
-
-static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[3];
-
- buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
- buf[1] = '\n';
- buf[2] = '\0';
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static const struct file_operations sc_only_mode_fops = {
- .open = simple_open,
- .read = sc_only_mode_read,
- .llseek = default_llseek,
-};
-
-static int idle_timeout_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val != 0 && (val < 500 || val > 3600000))
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->idle_timeout = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int idle_timeout_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->idle_timeout;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
- idle_timeout_set, "%llu\n");
-
-static int rpa_timeout_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- /* Require the RPA timeout to be at least 30 seconds and at most
- * 24 hours.
- */
- if (val < 30 || val > (60 * 60 * 24))
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->rpa_timeout = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int rpa_timeout_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->rpa_timeout;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
- rpa_timeout_set, "%llu\n");
-
-static int sniff_min_interval_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->sniff_min_interval = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int sniff_min_interval_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->sniff_min_interval;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
- sniff_min_interval_set, "%llu\n");
-
-static int sniff_max_interval_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->sniff_max_interval = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int sniff_max_interval_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->sniff_max_interval;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
- sniff_max_interval_set, "%llu\n");
-
-static int conn_info_min_age_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val == 0 || val > hdev->conn_info_max_age)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->conn_info_min_age = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int conn_info_min_age_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->conn_info_min_age;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
- conn_info_min_age_set, "%llu\n");
-
-static int conn_info_max_age_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val == 0 || val < hdev->conn_info_min_age)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->conn_info_max_age = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int conn_info_max_age_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->conn_info_max_age;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
- conn_info_max_age_set, "%llu\n");
-
-static int identity_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- bdaddr_t addr;
- u8 addr_type;
-
- hci_dev_lock(hdev);
-
- hci_copy_identity_address(hdev, &addr, &addr_type);
-
- seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
- 16, hdev->irk, &hdev->rpa);
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int identity_open(struct inode *inode, struct file *file)
-{
- return single_open(file, identity_show, inode->i_private);
-}
-
-static const struct file_operations identity_fops = {
- .open = identity_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int random_address_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
-
- hci_dev_lock(hdev);
- seq_printf(f, "%pMR\n", &hdev->random_addr);
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int random_address_open(struct inode *inode, struct file *file)
-{
- return single_open(file, random_address_show, inode->i_private);
-}
-
-static const struct file_operations random_address_fops = {
- .open = random_address_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int static_address_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
-
- hci_dev_lock(hdev);
- seq_printf(f, "%pMR\n", &hdev->static_addr);
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int static_address_open(struct inode *inode, struct file *file)
-{
- return single_open(file, static_address_show, inode->i_private);
-}
-
-static const struct file_operations static_address_fops = {
- .open = static_address_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static ssize_t force_static_address_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[3];
-
- buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
- buf[1] = '\n';
- buf[2] = '\0';
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_static_address_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[32];
- size_t buf_size = min(count, (sizeof(buf)-1));
- bool enable;
-
- if (test_bit(HCI_UP, &hdev->flags))
- return -EBUSY;
-
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
- if (strtobool(buf, &enable))
- return -EINVAL;
-
- if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
- return -EALREADY;
-
- change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
-
- return count;
-}
-
-static const struct file_operations force_static_address_fops = {
- .open = simple_open,
- .read = force_static_address_read,
- .write = force_static_address_write,
- .llseek = default_llseek,
-};
-
-static int white_list_show(struct seq_file *f, void *ptr)
-{
- struct hci_dev *hdev = f->private;
- struct bdaddr_list *b;
-
- hci_dev_lock(hdev);
- list_for_each_entry(b, &hdev->le_white_list, list)
- seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int white_list_open(struct inode *inode, struct file *file)
-{
- return single_open(file, white_list_show, inode->i_private);
-}
-
-static const struct file_operations white_list_fops = {
- .open = white_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
-{
- struct hci_dev *hdev = f->private;
- struct smp_irk *irk;
-
- rcu_read_lock();
- list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
- seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
- &irk->bdaddr, irk->addr_type,
- 16, irk->val, &irk->rpa);
- }
- rcu_read_unlock();
-
- return 0;
-}
-
-static int identity_resolving_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, identity_resolving_keys_show,
- inode->i_private);
-}
-
-static const struct file_operations identity_resolving_keys_fops = {
- .open = identity_resolving_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int long_term_keys_show(struct seq_file *f, void *ptr)
-{
- struct hci_dev *hdev = f->private;
- struct smp_ltk *ltk;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
- seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
- &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
- ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
- __le64_to_cpu(ltk->rand), 16, ltk->val);
- rcu_read_unlock();
-
- return 0;
-}
-
-static int long_term_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, long_term_keys_show, inode->i_private);
-}
-
-static const struct file_operations long_term_keys_fops = {
- .open = long_term_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int conn_min_interval_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->le_conn_min_interval = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int conn_min_interval_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->le_conn_min_interval;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
- conn_min_interval_set, "%llu\n");
-
-static int conn_max_interval_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->le_conn_max_interval = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int conn_max_interval_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->le_conn_max_interval;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
- conn_max_interval_set, "%llu\n");
-
-static int conn_latency_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val > 0x01f3)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->le_conn_latency = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int conn_latency_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->le_conn_latency;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
- conn_latency_set, "%llu\n");
-
-static int supervision_timeout_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val < 0x000a || val > 0x0c80)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->le_supv_timeout = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int supervision_timeout_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->le_supv_timeout;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
- supervision_timeout_set, "%llu\n");
-
-static int adv_channel_map_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val < 0x01 || val > 0x07)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->le_adv_channel_map = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int adv_channel_map_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->le_adv_channel_map;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
- adv_channel_map_set, "%llu\n");
-
-static int adv_min_interval_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->le_adv_min_interval = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int adv_min_interval_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->le_adv_min_interval;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
- adv_min_interval_set, "%llu\n");
-
-static int adv_max_interval_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
- return -EINVAL;
-
- hci_dev_lock(hdev);
- hdev->le_adv_max_interval = val;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int adv_max_interval_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
- *val = hdev->le_adv_max_interval;
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
- adv_max_interval_set, "%llu\n");
-
-static int device_list_show(struct seq_file *f, void *ptr)
-{
- struct hci_dev *hdev = f->private;
- struct hci_conn_params *p;
- struct bdaddr_list *b;
-
- hci_dev_lock(hdev);
- list_for_each_entry(b, &hdev->whitelist, list)
- seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
- list_for_each_entry(p, &hdev->le_conn_params, list) {
- seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
- p->auto_connect);
- }
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int device_list_open(struct inode *inode, struct file *file)
-{
- return single_open(file, device_list_show, inode->i_private);
-}
-
-static const struct file_operations device_list_fops = {
- .open = device_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
/* ---- HCI requests ---- */
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
{
BT_DBG("%s result 0x%2.2x", hdev->name, result);
@@ -1427,43 +497,6 @@ static void le_setup(struct hci_request *req)
set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
}
-static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
-{
- if (lmp_ext_inq_capable(hdev))
- return 0x02;
-
- if (lmp_inq_rssi_capable(hdev))
- return 0x01;
-
- if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
- hdev->lmp_subver == 0x0757)
- return 0x01;
-
- if (hdev->manufacturer == 15) {
- if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
- return 0x01;
- if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
- return 0x01;
- if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
- return 0x01;
- }
-
- if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
- hdev->lmp_subver == 0x1805)
- return 0x01;
-
- return 0x00;
-}
-
-static void hci_setup_inquiry_mode(struct hci_request *req)
-{
- u8 mode;
-
- mode = hci_get_inquiry_mode(req->hdev);
-
- hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
-}
-
static void hci_setup_event_mask(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
@@ -1553,10 +586,16 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
if (lmp_le_capable(hdev))
le_setup(req);
- /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
- * local supported commands HCI command.
+ /* All Bluetooth 1.2 and later controllers should support the
+ * HCI command for reading the local supported commands.
+ *
+ * Unfortunately some controllers indicate Bluetooth 1.2 support,
+ * but do not have support for this command. If that is the case,
+ * the driver can quirk the behavior and skip reading the local
+ * supported commands.
*/
- if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
+ if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
+ !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (lmp_ssp_capable(hdev)) {
@@ -1570,6 +609,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
u8 mode = 0x01;
+
hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
sizeof(mode), &mode);
} else {
@@ -1582,8 +622,18 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
}
}
- if (lmp_inq_rssi_capable(hdev))
- hci_setup_inquiry_mode(req);
+ if (lmp_inq_rssi_capable(hdev) ||
+ test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
+ u8 mode;
+
+ /* If Extended Inquiry Result events are supported, then
+ * they are clearly preferred over Inquiry Result with RSSI
+ * events.
+ */
+ mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
+
+ hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+ }
if (lmp_inq_tx_pwr_capable(hdev))
hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
@@ -1682,27 +732,12 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
hci_setup_event_mask(req);
- /* Some Broadcom based Bluetooth controllers do not support the
- * Delete Stored Link Key command. They are clearly indicating its
- * absence in the bit mask of supported commands.
- *
- * Check the supported commands and only if the the command is marked
- * as supported send it. If not supported assume that the controller
- * does not have actual support for stored link keys which makes this
- * command redundant anyway.
- *
- * Some controllers indicate that they support handling deleting
- * stored link keys, but they don't. The quirk lets a driver
- * just disable this command.
- */
- if (hdev->commands[6] & 0x80 &&
- !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
- struct hci_cp_delete_stored_link_key cp;
+ if (hdev->commands[6] & 0x20) {
+ struct hci_cp_read_stored_link_key cp;
bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.delete_all = 0x01;
- hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
- sizeof(cp), &cp);
+ cp.read_all = 0x01;
+ hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
}
if (hdev->commands[5] & 0x10)
@@ -1735,6 +770,12 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
* Parameter Request
*/
+ /* If the controller supports the Data Length Extension
+ * feature, enable the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
+ events[0] |= 0x40; /* LE Data Length Change */
+
/* If the controller supports Extended Scanner Filter
* Policies, enable the correspondig event.
*/
@@ -1765,6 +806,14 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
}
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
+ /* Read LE Maximum Data Length */
+ hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
+
+ /* Read LE Suggested Default Data Length */
+ hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
+ }
+
hci_set_le_support(req);
}
@@ -1782,6 +831,29 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
+ /* Some Broadcom based Bluetooth controllers do not support the
+ * Delete Stored Link Key command. They are clearly indicating its
+ * absence in the bit mask of supported commands.
+ *
+ * Check the supported commands and only if the the command is marked
+ * as supported send it. If not supported assume that the controller
+ * does not have actual support for stored link keys which makes this
+ * command redundant anyway.
+ *
+ * Some controllers indicate that they support handling deleting
+ * stored link keys, but they don't. The quirk lets a driver
+ * just disable this command.
+ */
+ if (hdev->commands[6] & 0x80 &&
+ !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
+ struct hci_cp_delete_stored_link_key cp;
+
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 0x01;
+ hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
+ sizeof(cp), &cp);
+ }
+
/* Set event mask page 2 if the HCI command for it is supported */
if (hdev->commands[22] & 0x04)
hci_set_event_mask_page_2(req);
@@ -1799,8 +871,10 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
/* Enable Secure Connections if supported and configured */
- if (bredr_sc_enabled(hdev)) {
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ bredr_sc_enabled(hdev)) {
u8 support = 0x01;
+
hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
sizeof(support), &support);
}
@@ -1841,110 +915,29 @@ static int __hci_init(struct hci_dev *hdev)
if (err < 0)
return err;
- /* Only create debugfs entries during the initial setup
- * phase and not every time the controller gets powered on.
+ /* This function is only called when the controller is actually in
+ * configured state. When the controller is marked as unconfigured,
+ * this initialization procedure is not run.
+ *
+ * It means that it is possible that a controller runs through its
+ * setup phase and then discovers missing settings. If that is the
+ * case, then this function will not be called. It then will only
+ * be called during the config phase.
+ *
+ * So only when in setup phase or config phase, create the debugfs
+ * entries and register the SMP channels.
*/
- if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+ !test_bit(HCI_CONFIG, &hdev->dev_flags))
return 0;
- debugfs_create_file("features", 0444, hdev->debugfs, hdev,
- &features_fops);
- debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
- &hdev->manufacturer);
- debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
- debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
- debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
- &device_list_fops);
- debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
- &blacklist_fops);
- debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
-
- debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
- &conn_info_min_age_fops);
- debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
- &conn_info_max_age_fops);
-
- if (lmp_bredr_capable(hdev)) {
- debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
- hdev, &inquiry_cache_fops);
- debugfs_create_file("link_keys", 0400, hdev->debugfs,
- hdev, &link_keys_fops);
- debugfs_create_file("dev_class", 0444, hdev->debugfs,
- hdev, &dev_class_fops);
- debugfs_create_file("voice_setting", 0444, hdev->debugfs,
- hdev, &voice_setting_fops);
- }
+ hci_debugfs_create_common(hdev);
- if (lmp_ssp_capable(hdev)) {
- debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
- hdev, &auto_accept_delay_fops);
- debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
- hdev, &force_sc_support_fops);
- debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
- hdev, &sc_only_mode_fops);
- if (lmp_le_capable(hdev))
- debugfs_create_file("force_lesc_support", 0644,
- hdev->debugfs, hdev,
- &force_lesc_support_fops);
- }
-
- if (lmp_sniff_capable(hdev)) {
- debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
- hdev, &idle_timeout_fops);
- debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
- hdev, &sniff_min_interval_fops);
- debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
- hdev, &sniff_max_interval_fops);
- }
+ if (lmp_bredr_capable(hdev))
+ hci_debugfs_create_bredr(hdev);
- if (lmp_le_capable(hdev)) {
- debugfs_create_file("identity", 0400, hdev->debugfs,
- hdev, &identity_fops);
- debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
- hdev, &rpa_timeout_fops);
- debugfs_create_file("random_address", 0444, hdev->debugfs,
- hdev, &random_address_fops);
- debugfs_create_file("static_address", 0444, hdev->debugfs,
- hdev, &static_address_fops);
-
- /* For controllers with a public address, provide a debug
- * option to force the usage of the configured static
- * address. By default the public address is used.
- */
- if (bacmp(&hdev->bdaddr, BDADDR_ANY))
- debugfs_create_file("force_static_address", 0644,
- hdev->debugfs, hdev,
- &force_static_address_fops);
-
- debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
- &hdev->le_white_list_size);
- debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
- &white_list_fops);
- debugfs_create_file("identity_resolving_keys", 0400,
- hdev->debugfs, hdev,
- &identity_resolving_keys_fops);
- debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
- hdev, &long_term_keys_fops);
- debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
- hdev, &conn_min_interval_fops);
- debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
- hdev, &conn_max_interval_fops);
- debugfs_create_file("conn_latency", 0644, hdev->debugfs,
- hdev, &conn_latency_fops);
- debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
- hdev, &supervision_timeout_fops);
- debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
- hdev, &adv_channel_map_fops);
- debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
- hdev, &adv_min_interval_fops);
- debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
- hdev, &adv_max_interval_fops);
- debugfs_create_u16("discov_interleaved_timeout", 0644,
- hdev->debugfs,
- &hdev->discov_interleaved_timeout);
-
- smp_register(hdev);
- }
+ if (lmp_le_capable(hdev))
+ hci_debugfs_create_le(hdev);
return 0;
}
@@ -2624,6 +1617,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
cancel_delayed_work(&hdev->service_cache);
cancel_delayed_work_sync(&hdev->le_scan_disable);
+ cancel_delayed_work_sync(&hdev->le_scan_restart);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
cancel_delayed_work_sync(&hdev->rpa_expired);
@@ -2635,6 +1629,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
if (hdev->dev_type == HCI_BREDR)
mgmt_powered(hdev, 0);
@@ -2645,6 +1641,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hci_conn_hash_flush(hdev);
hci_dev_unlock(hdev);
+ smp_unregister(hdev);
+
hci_notify(hdev, HCI_DEV_DOWN);
if (hdev->flush)
@@ -2724,32 +1722,14 @@ done:
return err;
}
-int hci_dev_reset(__u16 dev)
+static int hci_dev_do_reset(struct hci_dev *hdev)
{
- struct hci_dev *hdev;
- int ret = 0;
+ int ret;
- hdev = hci_dev_get(dev);
- if (!hdev)
- return -ENODEV;
+ BT_DBG("%s %p", hdev->name, hdev);
hci_req_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- ret = -ENETDOWN;
- goto done;
- }
-
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
- ret = -EBUSY;
- goto done;
- }
-
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
- ret = -EOPNOTSUPP;
- goto done;
- }
-
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
@@ -2772,12 +1752,41 @@ int hci_dev_reset(__u16 dev)
ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
-done:
hci_req_unlock(hdev);
- hci_dev_put(hdev);
return ret;
}
+int hci_dev_reset(__u16 dev)
+{
+ struct hci_dev *hdev;
+ int err;
+
+ hdev = hci_dev_get(dev);
+ if (!hdev)
+ return -ENODEV;
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = -ENETDOWN;
+ goto done;
+ }
+
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EBUSY;
+ goto done;
+ }
+
+ if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+ err = hci_dev_do_reset(hdev);
+
+done:
+ hci_dev_put(hdev);
+ return err;
+}
+
int hci_dev_reset_stat(__u16 dev)
{
struct hci_dev *hdev;
@@ -3143,6 +2152,24 @@ static void hci_power_off(struct work_struct *work)
hci_dev_do_close(hdev);
}
+static void hci_error_reset(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
+
+ BT_DBG("%s", hdev->name);
+
+ if (hdev->hw_error)
+ hdev->hw_error(hdev, hdev->hw_error_code);
+ else
+ BT_ERR("%s hardware error 0x%2.2x", hdev->name,
+ hdev->hw_error_code);
+
+ if (hci_dev_do_close(hdev))
+ return;
+
+ hci_dev_do_open(hdev);
+}
+
static void hci_discov_off(struct work_struct *work)
{
struct hci_dev *hdev;
@@ -3555,9 +2582,15 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (hash192 && rand192) {
memcpy(data->hash192, hash192, sizeof(data->hash192));
memcpy(data->rand192, rand192, sizeof(data->rand192));
+ if (hash256 && rand256)
+ data->present = 0x03;
} else {
memset(data->hash192, 0, sizeof(data->hash192));
memset(data->rand192, 0, sizeof(data->rand192));
+ if (hash256 && rand256)
+ data->present = 0x02;
+ else
+ data->present = 0x00;
}
if (hash256 && rand256) {
@@ -3566,6 +2599,8 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
} else {
memset(data->hash256, 0, sizeof(data->hash256));
memset(data->rand256, 0, sizeof(data->rand256));
+ if (hash192 && rand192)
+ data->present = 0x01;
}
BT_DBG("%s for %pMR", hdev->name, bdaddr);
@@ -3659,23 +2694,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
return NULL;
}
-static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
-{
- struct hci_conn *conn;
-
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
- if (!conn)
- return false;
-
- if (conn->dst_type != type)
- return false;
-
- if (conn->state != BT_CONNECTED)
- return false;
-
- return true;
-}
-
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr, u8 addr_type)
@@ -3731,47 +2749,6 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
return params;
}
-/* This function requires the caller holds hdev->lock */
-int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
- u8 auto_connect)
-{
- struct hci_conn_params *params;
-
- params = hci_conn_params_add(hdev, addr, addr_type);
- if (!params)
- return -EIO;
-
- if (params->auto_connect == auto_connect)
- return 0;
-
- list_del_init(&params->action);
-
- switch (auto_connect) {
- case HCI_AUTO_CONN_DISABLED:
- case HCI_AUTO_CONN_LINK_LOSS:
- hci_update_background_scan(hdev);
- break;
- case HCI_AUTO_CONN_REPORT:
- list_add(&params->action, &hdev->pend_le_reports);
- hci_update_background_scan(hdev);
- break;
- case HCI_AUTO_CONN_DIRECT:
- case HCI_AUTO_CONN_ALWAYS:
- if (!is_connected(hdev, addr, addr_type)) {
- list_add(&params->action, &hdev->pend_le_conns);
- hci_update_background_scan(hdev);
- }
- break;
- }
-
- params->auto_connect = auto_connect;
-
- BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
- auto_connect);
-
- return 0;
-}
-
static void hci_conn_params_free(struct hci_conn_params *params)
{
if (params->conn) {
@@ -3828,7 +2805,7 @@ void hci_conn_params_clear_all(struct hci_dev *hdev)
BT_DBG("All LE connection parameters were removed");
}
-static void inquiry_complete(struct hci_dev *hdev, u8 status)
+static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
if (status) {
BT_ERR("Failed to start inquiry: status %d", status);
@@ -3840,7 +2817,8 @@ static void inquiry_complete(struct hci_dev *hdev, u8 status)
}
}
-static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
+static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
{
/* General inquiry access code (GIAC) */
u8 lap[3] = { 0x33, 0x8b, 0x9e };
@@ -3853,6 +2831,8 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
return;
}
+ hdev->discovery.scan_start = 0;
+
switch (hdev->discovery.type) {
case DISCOV_TYPE_LE:
hci_dev_lock(hdev);
@@ -3892,6 +2872,8 @@ static void le_scan_disable_work(struct work_struct *work)
BT_DBG("%s", hdev->name);
+ cancel_delayed_work_sync(&hdev->le_scan_restart);
+
hci_req_init(&req, hdev);
hci_req_add_le_scan_disable(&req);
@@ -3901,110 +2883,72 @@ static void le_scan_disable_work(struct work_struct *work)
BT_ERR("Disable LE scanning request failed: err %d", err);
}
-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
+static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
{
- struct hci_dev *hdev = req->hdev;
+ unsigned long timeout, duration, scan_start, now;
- /* If we're advertising or initiating an LE connection we can't
- * go ahead and change the random address at this time. This is
- * because the eventual initiator address used for the
- * subsequently created connection will be undefined (some
- * controllers use the new address and others the one we had
- * when the operation started).
- *
- * In this kind of scenario skip the update and let the random
- * address be updated at the next cycle.
- */
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
- hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
- BT_DBG("Deferring random address update");
- set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ BT_DBG("%s", hdev->name);
+
+ if (status) {
+ BT_ERR("Failed to restart LE scan: status %d", status);
return;
}
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
-}
-
-int hci_update_random_address(struct hci_request *req, bool require_privacy,
- u8 *own_addr_type)
-{
- struct hci_dev *hdev = req->hdev;
- int err;
+ if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
+ !hdev->discovery.scan_start)
+ return;
- /* If privacy is enabled use a resolvable private address. If
- * current RPA has expired or there is something else than
- * the current RPA in use, then generate a new one.
+ /* When the scan was started, hdev->le_scan_disable has been queued
+ * after duration from scan_start. During scan restart this job
+ * has been canceled, and we need to queue it again after proper
+ * timeout, to make sure that scan does not run indefinitely.
*/
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
- int to;
-
- *own_addr_type = ADDR_LE_DEV_RANDOM;
-
- if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
- !bacmp(&hdev->random_addr, &hdev->rpa))
- return 0;
-
- err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
- if (err < 0) {
- BT_ERR("%s failed to generate new RPA", hdev->name);
- return err;
- }
-
- set_random_addr(req, &hdev->rpa);
-
- to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
- queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
+ duration = hdev->discovery.scan_duration;
+ scan_start = hdev->discovery.scan_start;
+ now = jiffies;
+ if (now - scan_start <= duration) {
+ int elapsed;
+
+ if (now >= scan_start)
+ elapsed = now - scan_start;
+ else
+ elapsed = ULONG_MAX - scan_start + now;
- return 0;
+ timeout = duration - elapsed;
+ } else {
+ timeout = 0;
}
+ queue_delayed_work(hdev->workqueue,
+ &hdev->le_scan_disable, timeout);
+}
- /* In case of required privacy without resolvable private address,
- * use an non-resolvable private address. This is useful for active
- * scanning and non-connectable advertising.
- */
- if (require_privacy) {
- bdaddr_t nrpa;
+static void le_scan_restart_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ le_scan_restart.work);
+ struct hci_request req;
+ struct hci_cp_le_set_scan_enable cp;
+ int err;
- while (true) {
- /* The non-resolvable private address is generated
- * from random six bytes with the two most significant
- * bits cleared.
- */
- get_random_bytes(&nrpa, 6);
- nrpa.b[5] &= 0x3f;
+ BT_DBG("%s", hdev->name);
- /* The non-resolvable private address shall not be
- * equal to the public address.
- */
- if (bacmp(&hdev->bdaddr, &nrpa))
- break;
- }
+ /* If controller is not scanning we are done. */
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return;
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- set_random_addr(req, &nrpa);
- return 0;
- }
+ hci_req_init(&req, hdev);
- /* If forcing static address is in use or there is no public
- * address use the static address as random address (but skip
- * the HCI command if the current random address is already the
- * static one.
- */
- if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
- !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- if (bacmp(&hdev->static_addr, &hdev->random_addr))
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
- &hdev->static_addr);
- return 0;
- }
+ hci_req_add_le_scan_disable(&req);
- /* Neither privacy nor static address is being used so use a
- * public address.
- */
- *own_addr_type = ADDR_LE_DEV_PUBLIC;
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = LE_SCAN_ENABLE;
+ cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
- return 0;
+ err = hci_req_run(&req, le_scan_restart_work_complete);
+ if (err)
+ BT_ERR("Restart LE scan request failed: err %d", err);
}
/* Copy the Identity Address of the controller.
@@ -4015,12 +2959,18 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
*
* For debugging purposes it is possible to force controllers with a
* public address to use the static random address instead.
+ *
+ * In case BR/EDR has been disabled on a dual-mode controller and
+ * userspace has configured a static address, then that address
+ * becomes the identity address instead of the public BR/EDR address.
*/
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 *bdaddr_type)
{
if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
- !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+ (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ bacmp(&hdev->static_addr, BDADDR_ANY))) {
bacpy(bdaddr, &hdev->static_addr);
*bdaddr_type = ADDR_LE_DEV_RANDOM;
} else {
@@ -4059,6 +3009,12 @@ struct hci_dev *hci_alloc_dev(void)
hdev->le_conn_max_interval = 0x0038;
hdev->le_conn_latency = 0x0000;
hdev->le_supv_timeout = 0x002a;
+ hdev->le_def_tx_len = 0x001b;
+ hdev->le_def_tx_time = 0x0148;
+ hdev->le_max_tx_len = 0x001b;
+ hdev->le_max_tx_time = 0x0148;
+ hdev->le_max_rx_len = 0x001b;
+ hdev->le_max_rx_time = 0x0148;
hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@@ -4086,10 +3042,12 @@ struct hci_dev *hci_alloc_dev(void)
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
INIT_WORK(&hdev->tx_work, hci_tx_work);
INIT_WORK(&hdev->power_on, hci_power_on);
+ INIT_WORK(&hdev->error_reset, hci_error_reset);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
@@ -4259,8 +3217,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
rfkill_destroy(hdev->rfkill);
}
- smp_unregister(hdev);
-
device_del(&hdev->dev);
debugfs_remove_recursive(hdev->debugfs);
@@ -4539,76 +3495,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
}
}
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
-{
- skb_queue_head_init(&req->cmd_q);
- req->hdev = hdev;
- req->err = 0;
-}
-
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
-{
- struct hci_dev *hdev = req->hdev;
- struct sk_buff *skb;
- unsigned long flags;
-
- BT_DBG("length %u", skb_queue_len(&req->cmd_q));
-
- /* If an error occurred during request building, remove all HCI
- * commands queued on the HCI request queue.
- */
- if (req->err) {
- skb_queue_purge(&req->cmd_q);
- return req->err;
- }
-
- /* Do not allow empty requests */
- if (skb_queue_empty(&req->cmd_q))
- return -ENODATA;
-
- skb = skb_peek_tail(&req->cmd_q);
- bt_cb(skb)->req.complete = complete;
-
- spin_lock_irqsave(&hdev->cmd_q.lock, flags);
- skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
- spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
-
- queue_work(hdev->workqueue, &hdev->cmd_work);
-
- return 0;
-}
-
bool hci_req_pending(struct hci_dev *hdev)
{
return (hdev->req_status == HCI_REQ_PEND);
}
-static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
- u32 plen, const void *param)
-{
- int len = HCI_COMMAND_HDR_SIZE + plen;
- struct hci_command_hdr *hdr;
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!skb)
- return NULL;
-
- hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
- hdr->opcode = cpu_to_le16(opcode);
- hdr->plen = plen;
-
- if (plen)
- memcpy(skb_put(skb, plen), param, plen);
-
- BT_DBG("skb len %d", skb->len);
-
- bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
- bt_cb(skb)->opcode = opcode;
-
- return skb;
-}
-
/* Send HCI command */
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
const void *param)
@@ -4634,43 +3525,6 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
return 0;
}
-/* Queue a command to an asynchronous HCI request */
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
- const void *param, u8 event)
-{
- struct hci_dev *hdev = req->hdev;
- struct sk_buff *skb;
-
- BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
-
- /* If an error occurred during request building, there is no point in
- * queueing the HCI command. We can simply return.
- */
- if (req->err)
- return;
-
- skb = hci_prepare_cmd(hdev, opcode, plen, param);
- if (!skb) {
- BT_ERR("%s no memory for command (opcode 0x%4.4x)",
- hdev->name, opcode);
- req->err = -ENOMEM;
- return;
- }
-
- if (skb_queue_empty(&req->cmd_q))
- bt_cb(skb)->req.start = true;
-
- bt_cb(skb)->req.event = event;
-
- skb_queue_tail(&req->cmd_q, skb);
-}
-
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
- const void *param)
-{
- hci_req_add_ev(req, opcode, plen, param, 0);
-}
-
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
{
@@ -5429,7 +4283,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
call_complete:
if (req_complete)
- req_complete(hdev, status);
+ req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
}
static void hci_rx_work(struct work_struct *work)
@@ -5518,302 +4372,3 @@ static void hci_cmd_work(struct work_struct *work)
}
}
}
-
-void hci_req_add_le_scan_disable(struct hci_request *req)
-{
- struct hci_cp_le_set_scan_enable cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_DISABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
-}
-
-static void add_to_white_list(struct hci_request *req,
- struct hci_conn_params *params)
-{
- struct hci_cp_le_add_to_white_list cp;
-
- cp.bdaddr_type = params->addr_type;
- bacpy(&cp.bdaddr, &params->addr);
-
- hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
-}
-
-static u8 update_white_list(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_conn_params *params;
- struct bdaddr_list *b;
- uint8_t white_list_entries = 0;
-
- /* Go through the current white list programmed into the
- * controller one by one and check if that address is still
- * in the list of pending connections or list of devices to
- * report. If not present in either list, then queue the
- * command to remove it from the controller.
- */
- list_for_each_entry(b, &hdev->le_white_list, list) {
- struct hci_cp_le_del_from_white_list cp;
-
- if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
- &b->bdaddr, b->bdaddr_type) ||
- hci_pend_le_action_lookup(&hdev->pend_le_reports,
- &b->bdaddr, b->bdaddr_type)) {
- white_list_entries++;
- continue;
- }
-
- cp.bdaddr_type = b->bdaddr_type;
- bacpy(&cp.bdaddr, &b->bdaddr);
-
- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
- sizeof(cp), &cp);
- }
-
- /* Since all no longer valid white list entries have been
- * removed, walk through the list of pending connections
- * and ensure that any new device gets programmed into
- * the controller.
- *
- * If the list of the devices is larger than the list of
- * available white list entries in the controller, then
- * just abort and return filer policy value to not use the
- * white list.
- */
- list_for_each_entry(params, &hdev->pend_le_conns, action) {
- if (hci_bdaddr_list_lookup(&hdev->le_white_list,
- &params->addr, params->addr_type))
- continue;
-
- if (white_list_entries >= hdev->le_white_list_size) {
- /* Select filter policy to accept all advertising */
- return 0x00;
- }
-
- if (hci_find_irk_by_addr(hdev, &params->addr,
- params->addr_type)) {
- /* White list can not be used with RPAs */
- return 0x00;
- }
-
- white_list_entries++;
- add_to_white_list(req, params);
- }
-
- /* After adding all new pending connections, walk through
- * the list of pending reports and also add these to the
- * white list if there is still space.
- */
- list_for_each_entry(params, &hdev->pend_le_reports, action) {
- if (hci_bdaddr_list_lookup(&hdev->le_white_list,
- &params->addr, params->addr_type))
- continue;
-
- if (white_list_entries >= hdev->le_white_list_size) {
- /* Select filter policy to accept all advertising */
- return 0x00;
- }
-
- if (hci_find_irk_by_addr(hdev, &params->addr,
- params->addr_type)) {
- /* White list can not be used with RPAs */
- return 0x00;
- }
-
- white_list_entries++;
- add_to_white_list(req, params);
- }
-
- /* Select filter policy to use white list */
- return 0x01;
-}
-
-void hci_req_add_le_passive_scan(struct hci_request *req)
-{
- struct hci_cp_le_set_scan_param param_cp;
- struct hci_cp_le_set_scan_enable enable_cp;
- struct hci_dev *hdev = req->hdev;
- u8 own_addr_type;
- u8 filter_policy;
-
- /* Set require_privacy to false since no SCAN_REQ are send
- * during passive scanning. Not using an non-resolvable address
- * here is important so that peer devices using direct
- * advertising with our address will be correctly reported
- * by the controller.
- */
- if (hci_update_random_address(req, false, &own_addr_type))
- return;
-
- /* Adding or removing entries from the white list must
- * happen before enabling scanning. The controller does
- * not allow white list modification while scanning.
- */
- filter_policy = update_white_list(req);
-
- /* When the controller is using random resolvable addresses and
- * with that having LE privacy enabled, then controllers with
- * Extended Scanner Filter Policies support can now enable support
- * for handling directed advertising.
- *
- * So instead of using filter polices 0x00 (no whitelist)
- * and 0x01 (whitelist enabled) use the new filter policies
- * 0x02 (no whitelist) and 0x03 (whitelist enabled).
- */
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
- (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
- filter_policy |= 0x02;
-
- memset(&param_cp, 0, sizeof(param_cp));
- param_cp.type = LE_SCAN_PASSIVE;
- param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
- param_cp.window = cpu_to_le16(hdev->le_scan_window);
- param_cp.own_address_type = own_addr_type;
- param_cp.filter_policy = filter_policy;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
- &param_cp);
-
- memset(&enable_cp, 0, sizeof(enable_cp));
- enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
- &enable_cp);
-}
-
-static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
-{
- if (status)
- BT_DBG("HCI request failed to update background scanning: "
- "status 0x%2.2x", status);
-}
-
-/* This function controls the background scanning based on hdev->pend_le_conns
- * list. If there are pending LE connection we start the background scanning,
- * otherwise we stop it.
- *
- * This function requires the caller holds hdev->lock.
- */
-void hci_update_background_scan(struct hci_dev *hdev)
-{
- struct hci_request req;
- struct hci_conn *conn;
- int err;
-
- if (!test_bit(HCI_UP, &hdev->flags) ||
- test_bit(HCI_INIT, &hdev->flags) ||
- test_bit(HCI_SETUP, &hdev->dev_flags) ||
- test_bit(HCI_CONFIG, &hdev->dev_flags) ||
- test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
- test_bit(HCI_UNREGISTER, &hdev->dev_flags))
- return;
-
- /* No point in doing scanning if LE support hasn't been enabled */
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
- return;
-
- /* If discovery is active don't interfere with it */
- if (hdev->discovery.state != DISCOVERY_STOPPED)
- return;
-
- /* Reset RSSI and UUID filters when starting background scanning
- * since these filters are meant for service discovery only.
- *
- * The Start Discovery and Start Service Discovery operations
- * ensure to set proper values for RSSI threshold and UUID
- * filter list. So it is safe to just reset them here.
- */
- hci_discovery_filter_clear(hdev);
-
- hci_req_init(&req, hdev);
-
- if (list_empty(&hdev->pend_le_conns) &&
- list_empty(&hdev->pend_le_reports)) {
- /* If there is no pending LE connections or devices
- * to be scanned for, we should stop the background
- * scanning.
- */
-
- /* If controller is not scanning we are done. */
- if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
- return;
-
- hci_req_add_le_scan_disable(&req);
-
- BT_DBG("%s stopping background scanning", hdev->name);
- } else {
- /* If there is at least one pending LE connection, we should
- * keep the background scan running.
- */
-
- /* If controller is connecting, we should not start scanning
- * since some controllers are not able to scan and connect at
- * the same time.
- */
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (conn)
- return;
-
- /* If controller is currently scanning, we stop it to ensure we
- * don't miss any advertising (due to duplicates filter).
- */
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
- hci_req_add_le_scan_disable(&req);
-
- hci_req_add_le_passive_scan(&req);
-
- BT_DBG("%s starting background scanning", hdev->name);
- }
-
- err = hci_req_run(&req, update_background_scan_complete);
- if (err)
- BT_ERR("Failed to run HCI request: err %d", err);
-}
-
-static bool disconnected_whitelist_entries(struct hci_dev *hdev)
-{
- struct bdaddr_list *b;
-
- list_for_each_entry(b, &hdev->whitelist, list) {
- struct hci_conn *conn;
-
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
- if (!conn)
- return true;
-
- if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
- return true;
- }
-
- return false;
-}
-
-void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
-{
- u8 scan;
-
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
- return;
-
- if (!hdev_is_powered(hdev))
- return;
-
- if (mgmt_powering_down(hdev))
- return;
-
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
- disconnected_whitelist_entries(hdev))
- scan = SCAN_PAGE;
- else
- scan = SCAN_DISABLED;
-
- if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
- return;
-
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- scan |= SCAN_INQUIRY;
-
- if (req)
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- else
- hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-}
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
new file mode 100644
index 000000000000..65261e5d4b84
--- /dev/null
+++ b/net/bluetooth/hci_debugfs.c
@@ -0,0 +1,1056 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2014 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#include <linux/debugfs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_debugfs.h"
+
+static int features_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ u8 p;
+
+ hci_dev_lock(hdev);
+ for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
+ seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+ "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
+ hdev->features[p][0], hdev->features[p][1],
+ hdev->features[p][2], hdev->features[p][3],
+ hdev->features[p][4], hdev->features[p][5],
+ hdev->features[p][6], hdev->features[p][7]);
+ }
+ if (lmp_le_capable(hdev))
+ seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+ "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
+ hdev->le_features[0], hdev->le_features[1],
+ hdev->le_features[2], hdev->le_features[3],
+ hdev->le_features[4], hdev->le_features[5],
+ hdev->le_features[6], hdev->le_features[7]);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int features_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, features_show, inode->i_private);
+}
+
+static const struct file_operations features_fops = {
+ .open = features_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int device_list_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct hci_conn_params *p;
+ struct bdaddr_list *b;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->whitelist, list)
+ seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
+ p->auto_connect);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int device_list_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, device_list_show, inode->i_private);
+}
+
+static const struct file_operations device_list_fops = {
+ .open = device_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int blacklist_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct bdaddr_list *b;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->blacklist, list)
+ seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int blacklist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, blacklist_show, inode->i_private);
+}
+
+static const struct file_operations blacklist_fops = {
+ .open = blacklist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct bt_uuid *uuid;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ u8 i, val[16];
+
+ /* The Bluetooth UUID values are stored in big endian,
+ * but with reversed byte order. So convert them into
+ * the right order for the %pUb modifier.
+ */
+ for (i = 0; i < 16; i++)
+ val[i] = uuid->uuid[15 - i];
+
+ seq_printf(f, "%pUb\n", val);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+ .open = uuids_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int remote_oob_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct oob_data *data;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(data, &hdev->remote_oob_data, list) {
+ seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n",
+ &data->bdaddr, data->bdaddr_type, data->present,
+ 16, data->hash192, 16, data->rand192,
+ 16, data->hash256, 19, data->rand256);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int remote_oob_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, remote_oob_show, inode->i_private);
+}
+
+static const struct file_operations remote_oob_fops = {
+ .open = remote_oob_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int conn_info_min_age_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val > hdev->conn_info_max_age)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->conn_info_min_age = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_info_min_age_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->conn_info_min_age;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
+ conn_info_min_age_set, "%llu\n");
+
+static int conn_info_max_age_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val < hdev->conn_info_min_age)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->conn_info_max_age = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_info_max_age_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->conn_info_max_age;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
+ conn_info_max_age_set, "%llu\n");
+
+static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations use_debug_keys_fops = {
+ .open = simple_open,
+ .read = use_debug_keys_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations sc_only_mode_fops = {
+ .open = simple_open,
+ .read = sc_only_mode_read,
+ .llseek = default_llseek,
+};
+
+void hci_debugfs_create_common(struct hci_dev *hdev)
+{
+ debugfs_create_file("features", 0444, hdev->debugfs, hdev,
+ &features_fops);
+ debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
+ &hdev->manufacturer);
+ debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
+ debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+ debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
+ &hdev->hw_error_code);
+
+ debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
+ &device_list_fops);
+ debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
+ &blacklist_fops);
+ debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+ debugfs_create_file("remote_oob", 0400, hdev->debugfs, hdev,
+ &remote_oob_fops);
+
+ debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
+ &conn_info_min_age_fops);
+ debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
+ &conn_info_max_age_fops);
+
+ if (lmp_ssp_capable(hdev) || lmp_le_capable(hdev))
+ debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
+ hdev, &use_debug_keys_fops);
+
+ if (lmp_sc_capable(hdev) || lmp_le_capable(hdev))
+ debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
+ hdev, &sc_only_mode_fops);
+}
+
+static int inquiry_cache_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ hci_dev_lock(hdev);
+
+ list_for_each_entry(e, &cache->all, all) {
+ struct inquiry_data *data = &e->data;
+ seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+ &data->bdaddr,
+ data->pscan_rep_mode, data->pscan_period_mode,
+ data->pscan_mode, data->dev_class[2],
+ data->dev_class[1], data->dev_class[0],
+ __le16_to_cpu(data->clock_offset),
+ data->rssi, data->ssp_mode, e->timestamp);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, inquiry_cache_show, inode->i_private);
+}
+
+static const struct file_operations inquiry_cache_fops = {
+ .open = inquiry_cache_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int link_keys_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct link_key *key;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(key, &hdev->link_keys, list)
+ seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
+ HCI_LINK_KEY_SIZE, key->val, key->pin_len);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int link_keys_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, link_keys_show, inode->i_private);
+}
+
+static const struct file_operations link_keys_fops = {
+ .open = link_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dev_class_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+ hdev->dev_class[1], hdev->dev_class[0]);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int dev_class_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dev_class_show, inode->i_private);
+}
+
+static const struct file_operations dev_class_fops = {
+ .open = dev_class_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int voice_setting_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->voice_setting;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
+ NULL, "0x%4.4llx\n");
+
+static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = hdev->ssp_debug_mode ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations ssp_debug_mode_fops = {
+ .open = simple_open,
+ .read = ssp_debug_mode_read,
+ .llseek = default_llseek,
+};
+
+static int auto_accept_delay_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ hdev->auto_accept_delay = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int auto_accept_delay_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->auto_accept_delay;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
+ auto_accept_delay_set, "%llu\n");
+
+static int idle_timeout_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val != 0 && (val < 500 || val > 3600000))
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->idle_timeout = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int idle_timeout_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->idle_timeout;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
+ idle_timeout_set, "%llu\n");
+
+static int sniff_min_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->sniff_min_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int sniff_min_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->sniff_min_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
+ sniff_min_interval_set, "%llu\n");
+
+static int sniff_max_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->sniff_max_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int sniff_max_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->sniff_max_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
+ sniff_max_interval_set, "%llu\n");
+
+void hci_debugfs_create_bredr(struct hci_dev *hdev)
+{
+ debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, hdev,
+ &inquiry_cache_fops);
+ debugfs_create_file("link_keys", 0400, hdev->debugfs, hdev,
+ &link_keys_fops);
+ debugfs_create_file("dev_class", 0444, hdev->debugfs, hdev,
+ &dev_class_fops);
+ debugfs_create_file("voice_setting", 0444, hdev->debugfs, hdev,
+ &voice_setting_fops);
+
+ if (lmp_ssp_capable(hdev)) {
+ debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
+ hdev, &ssp_debug_mode_fops);
+ debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
+ hdev, &auto_accept_delay_fops);
+ }
+
+ if (lmp_sniff_capable(hdev)) {
+ debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
+ hdev, &idle_timeout_fops);
+ debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
+ hdev, &sniff_min_interval_fops);
+ debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
+ hdev, &sniff_max_interval_fops);
+ }
+}
+
+static int identity_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ bdaddr_t addr;
+ u8 addr_type;
+
+ hci_dev_lock(hdev);
+
+ hci_copy_identity_address(hdev, &addr, &addr_type);
+
+ seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
+ 16, hdev->irk, &hdev->rpa);
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int identity_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, identity_show, inode->i_private);
+}
+
+static const struct file_operations identity_fops = {
+ .open = identity_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int rpa_timeout_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ /* Require the RPA timeout to be at least 30 seconds and at most
+ * 24 hours.
+ */
+ if (val < 30 || val > (60 * 60 * 24))
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->rpa_timeout = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int rpa_timeout_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->rpa_timeout;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
+ rpa_timeout_set, "%llu\n");
+
+static int random_address_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "%pMR\n", &hdev->random_addr);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int random_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, random_address_show, inode->i_private);
+}
+
+static const struct file_operations random_address_fops = {
+ .open = random_address_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int static_address_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "%pMR\n", &hdev->static_addr);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int static_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, static_address_show, inode->i_private);
+}
+
+static const struct file_operations static_address_fops = {
+ .open = static_address_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t force_static_address_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_static_address_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+ bool enable;
+
+ if (test_bit(HCI_UP, &hdev->flags))
+ return -EBUSY;
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
+ return -EALREADY;
+
+ change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
+
+ return count;
+}
+
+static const struct file_operations force_static_address_fops = {
+ .open = simple_open,
+ .read = force_static_address_read,
+ .write = force_static_address_write,
+ .llseek = default_llseek,
+};
+
+static int white_list_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct bdaddr_list *b;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->le_white_list, list)
+ seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int white_list_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, white_list_show, inode->i_private);
+}
+
+static const struct file_operations white_list_fops = {
+ .open = white_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct smp_irk *irk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
+ seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
+ &irk->bdaddr, irk->addr_type,
+ 16, irk->val, &irk->rpa);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int identity_resolving_keys_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, identity_resolving_keys_show,
+ inode->i_private);
+}
+
+static const struct file_operations identity_resolving_keys_fops = {
+ .open = identity_resolving_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int long_term_keys_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct smp_ltk *ltk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
+ seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
+ &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
+ ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
+ __le64_to_cpu(ltk->rand), 16, ltk->val);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int long_term_keys_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, long_term_keys_show, inode->i_private);
+}
+
+static const struct file_operations long_term_keys_fops = {
+ .open = long_term_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int conn_min_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_conn_min_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_min_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_conn_min_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
+ conn_min_interval_set, "%llu\n");
+
+static int conn_max_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_conn_max_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_max_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_conn_max_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
+ conn_max_interval_set, "%llu\n");
+
+static int conn_latency_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val > 0x01f3)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_conn_latency = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_latency_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_conn_latency;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
+ conn_latency_set, "%llu\n");
+
+static int supervision_timeout_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x000a || val > 0x0c80)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_supv_timeout = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int supervision_timeout_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_supv_timeout;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
+ supervision_timeout_set, "%llu\n");
+
+static int adv_channel_map_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x01 || val > 0x07)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_adv_channel_map = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int adv_channel_map_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_adv_channel_map;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
+ adv_channel_map_set, "%llu\n");
+
+static int adv_min_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_adv_min_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int adv_min_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_adv_min_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
+ adv_min_interval_set, "%llu\n");
+
+static int adv_max_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_adv_max_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int adv_max_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_adv_max_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
+ adv_max_interval_set, "%llu\n");
+
+void hci_debugfs_create_le(struct hci_dev *hdev)
+{
+ debugfs_create_file("identity", 0400, hdev->debugfs, hdev,
+ &identity_fops);
+ debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, hdev,
+ &rpa_timeout_fops);
+ debugfs_create_file("random_address", 0444, hdev->debugfs, hdev,
+ &random_address_fops);
+ debugfs_create_file("static_address", 0444, hdev->debugfs, hdev,
+ &static_address_fops);
+
+ /* For controllers with a public address, provide a debug
+ * option to force the usage of the configured static
+ * address. By default the public address is used.
+ */
+ if (bacmp(&hdev->bdaddr, BDADDR_ANY))
+ debugfs_create_file("force_static_address", 0644,
+ hdev->debugfs, hdev,
+ &force_static_address_fops);
+
+ debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
+ &hdev->le_white_list_size);
+ debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
+ &white_list_fops);
+ debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs,
+ hdev, &identity_resolving_keys_fops);
+ debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev,
+ &long_term_keys_fops);
+ debugfs_create_file("conn_min_interval", 0644, hdev->debugfs, hdev,
+ &conn_min_interval_fops);
+ debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, hdev,
+ &conn_max_interval_fops);
+ debugfs_create_file("conn_latency", 0644, hdev->debugfs, hdev,
+ &conn_latency_fops);
+ debugfs_create_file("supervision_timeout", 0644, hdev->debugfs, hdev,
+ &supervision_timeout_fops);
+ debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, hdev,
+ &adv_channel_map_fops);
+ debugfs_create_file("adv_min_interval", 0644, hdev->debugfs, hdev,
+ &adv_min_interval_fops);
+ debugfs_create_file("adv_max_interval", 0644, hdev->debugfs, hdev,
+ &adv_max_interval_fops);
+ debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs,
+ &hdev->discov_interleaved_timeout);
+}
+
+void hci_debugfs_create_conn(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ char name[6];
+
+ if (IS_ERR_OR_NULL(hdev->debugfs))
+ return;
+
+ snprintf(name, sizeof(name), "%u", conn->handle);
+ conn->debugfs = debugfs_create_dir(name, hdev->debugfs);
+}
diff --git a/net/bluetooth/hci_debugfs.h b/net/bluetooth/hci_debugfs.h
new file mode 100644
index 000000000000..fb68efe083c5
--- /dev/null
+++ b/net/bluetooth/hci_debugfs.h
@@ -0,0 +1,26 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2014 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+void hci_debugfs_create_common(struct hci_dev *hdev);
+void hci_debugfs_create_bredr(struct hci_dev *hdev);
+void hci_debugfs_create_le(struct hci_dev *hdev);
+void hci_debugfs_create_conn(struct hci_conn *conn);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 3f2e8b830cbd..a3fb094822b6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -30,10 +30,15 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
+#include "hci_request.h"
+#include "hci_debugfs.h"
#include "a2mp.h"
#include "amp.h"
#include "smp.h"
+#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+
/* Handle HCI Event packets */
static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -195,7 +200,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
/* Reset all non-persistent flags */
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
- hdev->discovery.state = DISCOVERY_STOPPED;
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
hdev->inq_tx_power = HCI_TX_POWER_INVALID;
hdev->adv_tx_power = HCI_TX_POWER_INVALID;
@@ -212,6 +218,40 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hci_bdaddr_list_clear(&hdev->le_white_list);
}
+static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
+ struct hci_cp_read_stored_link_key *sent;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
+ if (!sent)
+ return;
+
+ if (!rp->status && sent->read_all == 0x01) {
+ hdev->stored_max_keys = rp->max_keys;
+ hdev->stored_num_keys = rp->num_keys;
+ }
+}
+
+static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ if (rp->num_keys <= hdev->stored_num_keys)
+ hdev->stored_num_keys -= rp->num_keys;
+ else
+ hdev->stored_num_keys = 0;
+}
+
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -489,9 +529,7 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
hdev->features[1][0] &= ~LMP_HOST_SC;
}
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
- mgmt_sc_enable_complete(hdev, sent->support, status);
- else if (!status) {
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
if (sent->support)
set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
else
@@ -1282,6 +1320,55 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
memcpy(hdev->le_states, rp->le_states, 8);
}
+static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
+ hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
+}
+
+static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_write_def_data_len *sent;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
+ if (!sent)
+ return;
+
+ hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
+ hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
+}
+
+static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
+ hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
+ hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
+ hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
+}
+
static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -1402,6 +1489,21 @@ unlock:
hci_dev_unlock(hdev);
}
+static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ u8 status = *((u8 *) skb->data);
+ u8 *mode;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
+ if (mode)
+ hdev->ssp_debug_mode = *mode;
+}
+
static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -2115,6 +2217,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
} else
conn->state = BT_CONNECTED;
+ hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
if (test_bit(HCI_AUTH, &hdev->flags))
@@ -2130,7 +2233,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
sizeof(cp), &cp);
- hci_update_page_scan(hdev, NULL);
+ hci_update_page_scan(hdev);
}
/* Set packet type for incoming connection */
@@ -2316,7 +2419,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
hci_remove_link_key(hdev, &conn->dst);
- hci_update_page_scan(hdev, NULL);
+ hci_update_page_scan(hdev);
}
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
@@ -2583,7 +2686,8 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
if (conn->state != BT_CONFIG)
goto unlock;
- if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
+ if (!ev->status && lmp_ext_feat_capable(hdev) &&
+ lmp_ext_feat_capable(conn)) {
struct hci_cp_read_remote_ext_features cp;
cp.handle = ev->handle;
cp.page = 0x01;
@@ -2662,6 +2766,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cc_reset(hdev, skb);
break;
+ case HCI_OP_READ_STORED_LINK_KEY:
+ hci_cc_read_stored_link_key(hdev, skb);
+ break;
+
+ case HCI_OP_DELETE_STORED_LINK_KEY:
+ hci_cc_delete_stored_link_key(hdev, skb);
+ break;
+
case HCI_OP_WRITE_LOCAL_NAME:
hci_cc_write_local_name(hdev, skb);
break;
@@ -2854,6 +2966,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cc_le_read_supported_states(hdev, skb);
break;
+ case HCI_OP_LE_READ_DEF_DATA_LEN:
+ hci_cc_le_read_def_data_len(hdev, skb);
+ break;
+
+ case HCI_OP_LE_WRITE_DEF_DATA_LEN:
+ hci_cc_le_write_def_data_len(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_MAX_DATA_LEN:
+ hci_cc_le_read_max_data_len(hdev, skb);
+ break;
+
case HCI_OP_WRITE_LE_HOST_SUPPORTED:
hci_cc_write_le_host_supported(hdev, skb);
break;
@@ -2874,6 +2998,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cc_read_tx_power(hdev, skb);
break;
+ case HCI_OP_WRITE_SSP_DEBUG_MODE:
+ hci_cc_write_ssp_debug_mode(hdev, skb);
+ break;
+
default:
BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
@@ -2992,7 +3120,9 @@ static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_hardware_error *ev = (void *) skb->data;
- BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
+ hdev->hw_error_code = ev->code;
+
+ queue_work(hdev->req_workqueue, &hdev->error_reset);
}
static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3584,6 +3714,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
+ hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
break;
@@ -3750,6 +3881,52 @@ static u8 hci_get_auth_req(struct hci_conn *conn)
return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
}
+static u8 bredr_oob_data_present(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct oob_data *data;
+
+ data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
+ if (!data)
+ return 0x00;
+
+ if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) {
+ if (bredr_sc_enabled(hdev)) {
+ /* When Secure Connections is enabled, then just
+ * return the present value stored with the OOB
+ * data. The stored value contains the right present
+ * information. However it can only be trusted when
+ * not in Secure Connection Only mode.
+ */
+ if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags))
+ return data->present;
+
+ /* When Secure Connections Only mode is enabled, then
+ * the P-256 values are required. If they are not
+ * available, then do not declare that OOB data is
+ * present.
+ */
+ if (!memcmp(data->rand256, ZERO_KEY, 16) ||
+ !memcmp(data->hash256, ZERO_KEY, 16))
+ return 0x00;
+
+ return 0x02;
+ }
+
+ /* When Secure Connections is not enabled or actually
+ * not supported by the hardware, then check that if
+ * P-192 data values are present.
+ */
+ if (!memcmp(data->rand192, ZERO_KEY, 16) ||
+ !memcmp(data->hash192, ZERO_KEY, 16))
+ return 0x00;
+
+ return 0x01;
+ }
+
+ return 0x00;
+}
+
static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -3801,12 +3978,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->auth_type &= HCI_AT_NO_BONDING_MITM;
cp.authentication = conn->auth_type;
-
- if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
- (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
- cp.oob_data = 0x01;
- else
- cp.oob_data = 0x00;
+ cp.oob_data = bredr_oob_data_present(conn);
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
sizeof(cp), &cp);
@@ -4058,33 +4230,39 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
goto unlock;
data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
- if (data) {
- if (bredr_sc_enabled(hdev)) {
- struct hci_cp_remote_oob_ext_data_reply cp;
-
- bacpy(&cp.bdaddr, &ev->bdaddr);
- memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
- memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
- memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
- memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
+ if (!data) {
+ struct hci_cp_remote_oob_data_neg_reply cp;
- hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
- sizeof(cp), &cp);
- } else {
- struct hci_cp_remote_oob_data_reply cp;
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
+ sizeof(cp), &cp);
+ goto unlock;
+ }
- bacpy(&cp.bdaddr, &ev->bdaddr);
- memcpy(cp.hash, data->hash192, sizeof(cp.hash));
- memcpy(cp.rand, data->rand192, sizeof(cp.rand));
+ if (bredr_sc_enabled(hdev)) {
+ struct hci_cp_remote_oob_ext_data_reply cp;
- hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
- sizeof(cp), &cp);
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+ memset(cp.hash192, 0, sizeof(cp.hash192));
+ memset(cp.rand192, 0, sizeof(cp.rand192));
+ } else {
+ memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
+ memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
}
+ memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
+ memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
+
+ hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
+ sizeof(cp), &cp);
} else {
- struct hci_cp_remote_oob_data_neg_reply cp;
+ struct hci_cp_remote_oob_data_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
+ memcpy(cp.hash, data->hash192, sizeof(cp.hash));
+ memcpy(cp.rand, data->rand192, sizeof(cp.rand));
+
+ hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
sizeof(cp), &cp);
}
@@ -4124,6 +4302,7 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_drop(hcon);
+ hci_debugfs_create_conn(hcon);
hci_conn_add_sysfs(hcon);
amp_physical_cfm(bredr_hcon, hcon);
@@ -4330,6 +4509,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->le_conn_latency = le16_to_cpu(ev->latency);
conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
+ hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
new file mode 100644
index 000000000000..b59f92c6df0c
--- /dev/null
+++ b/net/bluetooth/hci_request.c
@@ -0,0 +1,556 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2014 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "smp.h"
+#include "hci_request.h"
+
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
+{
+ skb_queue_head_init(&req->cmd_q);
+ req->hdev = hdev;
+ req->err = 0;
+}
+
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ BT_DBG("length %u", skb_queue_len(&req->cmd_q));
+
+ /* If an error occurred during request building, remove all HCI
+ * commands queued on the HCI request queue.
+ */
+ if (req->err) {
+ skb_queue_purge(&req->cmd_q);
+ return req->err;
+ }
+
+ /* Do not allow empty requests */
+ if (skb_queue_empty(&req->cmd_q))
+ return -ENODATA;
+
+ skb = skb_peek_tail(&req->cmd_q);
+ bt_cb(skb)->req.complete = complete;
+
+ spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+ skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+
+ return 0;
+}
+
+struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param)
+{
+ int len = HCI_COMMAND_HDR_SIZE + plen;
+ struct hci_command_hdr *hdr;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(opcode);
+ hdr->plen = plen;
+
+ if (plen)
+ memcpy(skb_put(skb, plen), param, plen);
+
+ BT_DBG("skb len %d", skb->len);
+
+ bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+ bt_cb(skb)->opcode = opcode;
+
+ return skb;
+}
+
+/* Queue a command to an asynchronous HCI request */
+void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param, u8 event)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+
+ BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+
+ /* If an error occurred during request building, there is no point in
+ * queueing the HCI command. We can simply return.
+ */
+ if (req->err)
+ return;
+
+ skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ if (!skb) {
+ BT_ERR("%s no memory for command (opcode 0x%4.4x)",
+ hdev->name, opcode);
+ req->err = -ENOMEM;
+ return;
+ }
+
+ if (skb_queue_empty(&req->cmd_q))
+ bt_cb(skb)->req.start = true;
+
+ bt_cb(skb)->req.event = event;
+
+ skb_queue_tail(&req->cmd_q, skb);
+}
+
+void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param)
+{
+ hci_req_add_ev(req, opcode, plen, param, 0);
+}
+
+void hci_req_add_le_scan_disable(struct hci_request *req)
+{
+ struct hci_cp_le_set_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = LE_SCAN_DISABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+static void add_to_white_list(struct hci_request *req,
+ struct hci_conn_params *params)
+{
+ struct hci_cp_le_add_to_white_list cp;
+
+ cp.bdaddr_type = params->addr_type;
+ bacpy(&cp.bdaddr, &params->addr);
+
+ hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
+}
+
+static u8 update_white_list(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_conn_params *params;
+ struct bdaddr_list *b;
+ uint8_t white_list_entries = 0;
+
+ /* Go through the current white list programmed into the
+ * controller one by one and check if that address is still
+ * in the list of pending connections or list of devices to
+ * report. If not present in either list, then queue the
+ * command to remove it from the controller.
+ */
+ list_for_each_entry(b, &hdev->le_white_list, list) {
+ struct hci_cp_le_del_from_white_list cp;
+
+ if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
+ &b->bdaddr, b->bdaddr_type) ||
+ hci_pend_le_action_lookup(&hdev->pend_le_reports,
+ &b->bdaddr, b->bdaddr_type)) {
+ white_list_entries++;
+ continue;
+ }
+
+ cp.bdaddr_type = b->bdaddr_type;
+ bacpy(&cp.bdaddr, &b->bdaddr);
+
+ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+ sizeof(cp), &cp);
+ }
+
+ /* Since all no longer valid white list entries have been
+ * removed, walk through the list of pending connections
+ * and ensure that any new device gets programmed into
+ * the controller.
+ *
+ * If the list of the devices is larger than the list of
+ * available white list entries in the controller, then
+ * just abort and return filer policy value to not use the
+ * white list.
+ */
+ list_for_each_entry(params, &hdev->pend_le_conns, action) {
+ if (hci_bdaddr_list_lookup(&hdev->le_white_list,
+ &params->addr, params->addr_type))
+ continue;
+
+ if (white_list_entries >= hdev->le_white_list_size) {
+ /* Select filter policy to accept all advertising */
+ return 0x00;
+ }
+
+ if (hci_find_irk_by_addr(hdev, &params->addr,
+ params->addr_type)) {
+ /* White list can not be used with RPAs */
+ return 0x00;
+ }
+
+ white_list_entries++;
+ add_to_white_list(req, params);
+ }
+
+ /* After adding all new pending connections, walk through
+ * the list of pending reports and also add these to the
+ * white list if there is still space.
+ */
+ list_for_each_entry(params, &hdev->pend_le_reports, action) {
+ if (hci_bdaddr_list_lookup(&hdev->le_white_list,
+ &params->addr, params->addr_type))
+ continue;
+
+ if (white_list_entries >= hdev->le_white_list_size) {
+ /* Select filter policy to accept all advertising */
+ return 0x00;
+ }
+
+ if (hci_find_irk_by_addr(hdev, &params->addr,
+ params->addr_type)) {
+ /* White list can not be used with RPAs */
+ return 0x00;
+ }
+
+ white_list_entries++;
+ add_to_white_list(req, params);
+ }
+
+ /* Select filter policy to use white list */
+ return 0x01;
+}
+
+void hci_req_add_le_passive_scan(struct hci_request *req)
+{
+ struct hci_cp_le_set_scan_param param_cp;
+ struct hci_cp_le_set_scan_enable enable_cp;
+ struct hci_dev *hdev = req->hdev;
+ u8 own_addr_type;
+ u8 filter_policy;
+
+ /* Set require_privacy to false since no SCAN_REQ are send
+ * during passive scanning. Not using an non-resolvable address
+ * here is important so that peer devices using direct
+ * advertising with our address will be correctly reported
+ * by the controller.
+ */
+ if (hci_update_random_address(req, false, &own_addr_type))
+ return;
+
+ /* Adding or removing entries from the white list must
+ * happen before enabling scanning. The controller does
+ * not allow white list modification while scanning.
+ */
+ filter_policy = update_white_list(req);
+
+ /* When the controller is using random resolvable addresses and
+ * with that having LE privacy enabled, then controllers with
+ * Extended Scanner Filter Policies support can now enable support
+ * for handling directed advertising.
+ *
+ * So instead of using filter polices 0x00 (no whitelist)
+ * and 0x01 (whitelist enabled) use the new filter policies
+ * 0x02 (no whitelist) and 0x03 (whitelist enabled).
+ */
+ if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
+ (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
+ filter_policy |= 0x02;
+
+ memset(&param_cp, 0, sizeof(param_cp));
+ param_cp.type = LE_SCAN_PASSIVE;
+ param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
+ param_cp.window = cpu_to_le16(hdev->le_scan_window);
+ param_cp.own_address_type = own_addr_type;
+ param_cp.filter_policy = filter_policy;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+ &param_cp);
+
+ memset(&enable_cp, 0, sizeof(enable_cp));
+ enable_cp.enable = LE_SCAN_ENABLE;
+ enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+ &enable_cp);
+}
+
+static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* If we're advertising or initiating an LE connection we can't
+ * go ahead and change the random address at this time. This is
+ * because the eventual initiator address used for the
+ * subsequently created connection will be undefined (some
+ * controllers use the new address and others the one we had
+ * when the operation started).
+ *
+ * In this kind of scenario skip the update and let the random
+ * address be updated at the next cycle.
+ */
+ if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
+ hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+ BT_DBG("Deferring random address update");
+ set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ return;
+ }
+
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
+}
+
+int hci_update_random_address(struct hci_request *req, bool require_privacy,
+ u8 *own_addr_type)
+{
+ struct hci_dev *hdev = req->hdev;
+ int err;
+
+ /* If privacy is enabled use a resolvable private address. If
+ * current RPA has expired or there is something else than
+ * the current RPA in use, then generate a new one.
+ */
+ if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+ int to;
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
+ !bacmp(&hdev->random_addr, &hdev->rpa))
+ return 0;
+
+ err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
+ if (err < 0) {
+ BT_ERR("%s failed to generate new RPA", hdev->name);
+ return err;
+ }
+
+ set_random_addr(req, &hdev->rpa);
+
+ to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
+
+ return 0;
+ }
+
+ /* In case of required privacy without resolvable private address,
+ * use an non-resolvable private address. This is useful for active
+ * scanning and non-connectable advertising.
+ */
+ if (require_privacy) {
+ bdaddr_t nrpa;
+
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
+
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ set_random_addr(req, &nrpa);
+ return 0;
+ }
+
+ /* If forcing static address is in use or there is no public
+ * address use the static address as random address (but skip
+ * the HCI command if the current random address is already the
+ * static one.
+ *
+ * In case BR/EDR has been disabled on a dual-mode controller
+ * and a static address has been configured, then use that
+ * address instead of the public BR/EDR address.
+ */
+ if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+ (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ if (bacmp(&hdev->static_addr, &hdev->random_addr))
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+ &hdev->static_addr);
+ return 0;
+ }
+
+ /* Neither privacy nor static address is being used so use a
+ * public address.
+ */
+ *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ return 0;
+}
+
+static bool disconnected_whitelist_entries(struct hci_dev *hdev)
+{
+ struct bdaddr_list *b;
+
+ list_for_each_entry(b, &hdev->whitelist, list) {
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
+ if (!conn)
+ return true;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ return true;
+ }
+
+ return false;
+}
+
+void __hci_update_page_scan(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 scan;
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return;
+
+ if (!hdev_is_powered(hdev))
+ return;
+
+ if (mgmt_powering_down(hdev))
+ return;
+
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
+ disconnected_whitelist_entries(hdev))
+ scan = SCAN_PAGE;
+ else
+ scan = SCAN_DISABLED;
+
+ if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
+ return;
+
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ scan |= SCAN_INQUIRY;
+
+ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+void hci_update_page_scan(struct hci_dev *hdev)
+{
+ struct hci_request req;
+
+ hci_req_init(&req, hdev);
+ __hci_update_page_scan(&req);
+ hci_req_run(&req, NULL);
+}
+
+/* This function controls the background scanning based on hdev->pend_le_conns
+ * list. If there are pending LE connection we start the background scanning,
+ * otherwise we stop it.
+ *
+ * This function requires the caller holds hdev->lock.
+ */
+void __hci_update_background_scan(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_conn *conn;
+
+ if (!test_bit(HCI_UP, &hdev->flags) ||
+ test_bit(HCI_INIT, &hdev->flags) ||
+ test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags) ||
+ test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
+ test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+ return;
+
+ /* No point in doing scanning if LE support hasn't been enabled */
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return;
+
+ /* If discovery is active don't interfere with it */
+ if (hdev->discovery.state != DISCOVERY_STOPPED)
+ return;
+
+ /* Reset RSSI and UUID filters when starting background scanning
+ * since these filters are meant for service discovery only.
+ *
+ * The Start Discovery and Start Service Discovery operations
+ * ensure to set proper values for RSSI threshold and UUID
+ * filter list. So it is safe to just reset them here.
+ */
+ hci_discovery_filter_clear(hdev);
+
+ if (list_empty(&hdev->pend_le_conns) &&
+ list_empty(&hdev->pend_le_reports)) {
+ /* If there is no pending LE connections or devices
+ * to be scanned for, we should stop the background
+ * scanning.
+ */
+
+ /* If controller is not scanning we are done. */
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return;
+
+ hci_req_add_le_scan_disable(req);
+
+ BT_DBG("%s stopping background scanning", hdev->name);
+ } else {
+ /* If there is at least one pending LE connection, we should
+ * keep the background scan running.
+ */
+
+ /* If controller is connecting, we should not start scanning
+ * since some controllers are not able to scan and connect at
+ * the same time.
+ */
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (conn)
+ return;
+
+ /* If controller is currently scanning, we stop it to ensure we
+ * don't miss any advertising (due to duplicates filter).
+ */
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ hci_req_add_le_scan_disable(req);
+
+ hci_req_add_le_passive_scan(req);
+
+ BT_DBG("%s starting background scanning", hdev->name);
+ }
+}
+
+static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ if (status)
+ BT_DBG("HCI request failed to update background scanning: "
+ "status 0x%2.2x", status);
+}
+
+void hci_update_background_scan(struct hci_dev *hdev)
+{
+ int err;
+ struct hci_request req;
+
+ hci_req_init(&req, hdev);
+
+ __hci_update_background_scan(&req);
+
+ err = hci_req_run(&req, update_background_scan_complete);
+ if (err && err != -ENODATA)
+ BT_ERR("Failed to run HCI request: err %d", err);
+}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
new file mode 100644
index 000000000000..adf074d33544
--- /dev/null
+++ b/net/bluetooth/hci_request.h
@@ -0,0 +1,54 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2014 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+struct hci_request {
+ struct hci_dev *hdev;
+ struct sk_buff_head cmd_q;
+
+ /* If something goes wrong when building the HCI request, the error
+ * value is stored in this field.
+ */
+ int err;
+};
+
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
+void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param);
+void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param, u8 event);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
+
+struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param);
+
+void hci_req_add_le_scan_disable(struct hci_request *req);
+void hci_req_add_le_passive_scan(struct hci_request *req);
+
+void hci_update_page_scan(struct hci_dev *hdev);
+void __hci_update_page_scan(struct hci_request *req);
+
+int hci_update_random_address(struct hci_request *req, bool require_privacy,
+ u8 *own_addr_type);
+
+void hci_update_background_scan(struct hci_dev *hdev);
+void __hci_update_background_scan(struct hci_request *req);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 2c245fdf319a..1d65c5be7c82 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -216,11 +216,39 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
read_unlock(&hci_sk_list.lock);
}
+static void queue_monitor_skb(struct sk_buff *skb)
+{
+ struct sock *sk;
+
+ BT_DBG("len %d", skb->len);
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+ continue;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ continue;
+
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
/* Send frame to monitor socket */
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct sock *sk;
struct sk_buff *skb_copy = NULL;
+ struct hci_mon_hdr *hdr;
__le16 opcode;
if (!atomic_read(&monitor_promisc))
@@ -251,74 +279,21 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- read_lock(&hci_sk_list.lock);
-
- sk_for_each(sk, &hci_sk_list.head) {
- struct sk_buff *nskb;
-
- if (sk->sk_state != BT_BOUND)
- continue;
-
- if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
- continue;
-
- if (!skb_copy) {
- struct hci_mon_hdr *hdr;
-
- /* Create a private copy with headroom */
- skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
- GFP_ATOMIC, true);
- if (!skb_copy)
- continue;
-
- /* Put header before the data */
- hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
- hdr->opcode = opcode;
- hdr->index = cpu_to_le16(hdev->id);
- hdr->len = cpu_to_le16(skb->len);
- }
-
- nskb = skb_clone(skb_copy, GFP_ATOMIC);
- if (!nskb)
- continue;
-
- if (sock_queue_rcv_skb(sk, nskb))
- kfree_skb(nskb);
- }
+ /* Create a private copy with headroom */
+ skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
+ if (!skb_copy)
+ return;
- read_unlock(&hci_sk_list.lock);
+ /* Put header before the data */
+ hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
+ hdr->opcode = opcode;
+ hdr->index = cpu_to_le16(hdev->id);
+ hdr->len = cpu_to_le16(skb->len);
+ queue_monitor_skb(skb_copy);
kfree_skb(skb_copy);
}
-static void send_monitor_event(struct sk_buff *skb)
-{
- struct sock *sk;
-
- BT_DBG("len %d", skb->len);
-
- read_lock(&hci_sk_list.lock);
-
- sk_for_each(sk, &hci_sk_list.head) {
- struct sk_buff *nskb;
-
- if (sk->sk_state != BT_BOUND)
- continue;
-
- if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
- continue;
-
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- continue;
-
- if (sock_queue_rcv_skb(sk, nskb))
- kfree_skb(nskb);
- }
-
- read_unlock(&hci_sk_list.lock);
-}
-
static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
{
struct hci_mon_hdr *hdr;
@@ -422,7 +397,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
skb = create_monitor_event(hdev, event);
if (skb) {
- send_monitor_event(skb);
+ queue_monitor_skb(skb);
kfree_skb(skb);
}
}
@@ -1230,6 +1205,8 @@ int __init hci_sock_init(void)
{
int err;
+ BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
+
err = proto_register(&hci_sk_proto, 0);
if (err < 0)
return err;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index d04dc0095736..6ba33f9631e8 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -63,10 +63,10 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff_head *skbs, u8 event);
-static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
+static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
{
- if (hcon->type == LE_LINK) {
- if (type == ADDR_LE_DEV_PUBLIC)
+ if (link_type == LE_LINK) {
+ if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
return BDADDR_LE_PUBLIC;
else
return BDADDR_LE_RANDOM;
@@ -75,6 +75,16 @@ static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
return BDADDR_BREDR;
}
+static inline u8 bdaddr_src_type(struct hci_conn *hcon)
+{
+ return bdaddr_type(hcon->type, hcon->src_type);
+}
+
+static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
+{
+ return bdaddr_type(hcon->type, hcon->dst_type);
+}
+
/* ---- L2CAP channels ---- */
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
@@ -646,7 +656,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
bacpy(&chan->dst, &hcon->dst);
- chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
+ chan->dst_type = bdaddr_dst_type(hcon);
l2cap_chan_unlock(chan);
}
@@ -3790,8 +3800,8 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
bacpy(&chan->src, &conn->hcon->src);
bacpy(&chan->dst, &conn->hcon->dst);
- chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
- chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
+ chan->src_type = bdaddr_src_type(conn->hcon);
+ chan->dst_type = bdaddr_dst_type(conn->hcon);
chan->psm = psm;
chan->dcid = scid;
chan->local_amp_id = amp_id;
@@ -5441,8 +5451,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
bacpy(&chan->src, &conn->hcon->src);
bacpy(&chan->dst, &conn->hcon->dst);
- chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
- chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
+ chan->src_type = bdaddr_src_type(conn->hcon);
+ chan->dst_type = bdaddr_dst_type(conn->hcon);
chan->psm = psm;
chan->dcid = scid;
chan->omtu = mtu;
@@ -6881,7 +6891,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
*/
if (hcon->type == LE_LINK &&
hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
- bdaddr_type(hcon, hcon->dst_type))) {
+ bdaddr_dst_type(hcon))) {
kfree_skb(skb);
return;
}
@@ -6968,7 +6978,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
(bredr_sc_enabled(hcon->hdev) ||
- test_bit(HCI_FORCE_LESC, &hcon->hdev->dbg_flags)))
+ test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);
@@ -7123,7 +7133,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
/* Update source addr of the socket */
bacpy(&chan->src, &hcon->src);
- chan->src_type = bdaddr_type(hcon, hcon->src_type);
+ chan->src_type = bdaddr_src_type(hcon);
__l2cap_chan_add(conn, chan);
@@ -7197,8 +7207,10 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
* global list (by passing NULL as first parameter).
*/
static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
- bdaddr_t *src, u8 link_type)
+ struct hci_conn *hcon)
{
+ u8 src_type = bdaddr_src_type(hcon);
+
read_lock(&chan_list_lock);
if (c)
@@ -7211,11 +7223,9 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
continue;
if (c->state != BT_LISTEN)
continue;
- if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
+ if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
continue;
- if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
- continue;
- if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
+ if (src_type != c->src_type)
continue;
l2cap_chan_hold(c);
@@ -7246,7 +7256,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
if (!conn)
return;
- dst_type = bdaddr_type(hcon, hcon->dst_type);
+ dst_type = bdaddr_dst_type(hcon);
/* If device is blocked, do not create channels for it */
if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
@@ -7257,7 +7267,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
* we left off, because the list lock would prevent calling the
* potentially sleeping l2cap_chan_lock() function.
*/
- pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
+ pchan = l2cap_global_fixed_chan(NULL, hcon);
while (pchan) {
struct l2cap_chan *chan, *next;
@@ -7270,7 +7280,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
if (chan) {
bacpy(&chan->src, &hcon->src);
bacpy(&chan->dst, &hcon->dst);
- chan->src_type = bdaddr_type(hcon, hcon->src_type);
+ chan->src_type = bdaddr_src_type(hcon);
chan->dst_type = dst_type;
__l2cap_chan_add(conn, chan);
@@ -7278,8 +7288,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
l2cap_chan_unlock(pchan);
next:
- next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
- hcon->type);
+ next = l2cap_global_fixed_chan(pchan, hcon);
l2cap_chan_put(pchan);
pchan = next;
}
@@ -7527,8 +7536,8 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
read_lock(&chan_list_lock);
list_for_each_entry(c, &chan_list, global_l) {
- seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
- &c->src, &c->dst,
+ seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
+ &c->src, c->src_type, &c->dst, c->dst_type,
c->state, __le16_to_cpu(c->psm),
c->scid, c->dcid, c->imtu, c->omtu,
c->sec_level, c->mode);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f65caf41953f..60694f0f4c73 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -302,7 +302,7 @@ done:
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
int flags)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
@@ -316,8 +316,6 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
@@ -338,10 +336,11 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
}
release_sock(sk);
- timeo = schedule_timeout(timeo);
+
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+
lock_sock_nested(sk, L2CAP_NESTING_PARENT);
}
- __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -1614,6 +1613,8 @@ int __init l2cap_init_sockets(void)
{
int err;
+ BUILD_BUG_ON(sizeof(struct sockaddr_l2) > sizeof(struct sockaddr));
+
err = proto_register(&l2cap_proto, 0);
if (err < 0)
return err;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 693ce8bcd06e..9ec5390c85eb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -32,6 +32,7 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
+#include "hci_request.h"
#include "smp.h"
#define MGMT_VERSION 1
@@ -130,6 +131,9 @@ static const u16 mgmt_events[] = {
#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
+#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+
struct pending_cmd {
struct list_head list;
u16 opcode;
@@ -138,7 +142,7 @@ struct pending_cmd {
size_t param_len;
struct sock *sk;
void *user_data;
- void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
+ int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
};
/* HCI to MGMT error code conversion table */
@@ -569,8 +573,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
settings |= MGMT_SETTING_HS;
}
- if (lmp_sc_capable(hdev) ||
- test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
+ if (lmp_sc_capable(hdev))
settings |= MGMT_SETTING_SECURE_CONN;
}
@@ -1251,7 +1254,7 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
sizeof(settings));
}
-static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
+static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
BT_DBG("%s status 0x%02x", hdev->name, status);
@@ -1486,16 +1489,16 @@ static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
cmd_status_rsp(cmd, data);
}
-static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
{
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
- cmd->param_len);
+ return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+ cmd->param, cmd->param_len);
}
-static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
{
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
- sizeof(struct mgmt_addr_info));
+ return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
+ sizeof(struct mgmt_addr_info));
}
static u8 mgmt_bredr_support(struct hci_dev *hdev)
@@ -1518,7 +1521,8 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
return MGMT_STATUS_SUCCESS;
}
-static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
+static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
{
struct pending_cmd *cmd;
struct mgmt_mode *cp;
@@ -1566,7 +1570,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
* entries.
*/
hci_req_init(&req, hdev);
- hci_update_page_scan(hdev, &req);
+ __hci_update_page_scan(&req);
update_class(&req);
hci_req_run(&req, NULL);
@@ -1777,7 +1781,8 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
}
-static void set_connectable_complete(struct hci_dev *hdev, u8 status)
+static void set_connectable_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
{
struct pending_cmd *cmd;
struct mgmt_mode *cp;
@@ -1813,7 +1818,7 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
if (conn_changed || discov_changed) {
new_settings(hdev, cmd->sk);
- hci_update_page_scan(hdev, NULL);
+ hci_update_page_scan(hdev);
if (discov_changed)
mgmt_update_adv_data(hdev);
hci_update_background_scan(hdev);
@@ -1847,7 +1852,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
return err;
if (changed) {
- hci_update_page_scan(hdev, NULL);
+ hci_update_page_scan(hdev);
hci_update_background_scan(hdev);
return new_settings(hdev, sk);
}
@@ -2195,7 +2200,7 @@ unlock:
return err;
}
-static void le_enable_complete(struct hci_dev *hdev, u8 status)
+static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct cmd_lookup match = { NULL, hdev };
@@ -2227,9 +2232,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
hci_req_init(&req, hdev);
update_adv_data(&req);
update_scan_rsp_data(&req);
+ __hci_update_background_scan(&req);
hci_req_run(&req, NULL);
-
- hci_update_background_scan(hdev);
}
unlock:
@@ -2386,7 +2390,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static void add_uuid_complete(struct hci_dev *hdev, u8 status)
+static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
BT_DBG("status 0x%02x", status);
@@ -2465,7 +2469,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
return false;
}
-static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
+static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
BT_DBG("status 0x%02x", status);
@@ -2550,7 +2554,7 @@ unlock:
return err;
}
-static void set_class_complete(struct hci_dev *hdev, u8 status)
+static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
BT_DBG("status 0x%02x", status);
@@ -3098,16 +3102,17 @@ static struct pending_cmd *find_pairing(struct hci_conn *conn)
return NULL;
}
-static void pairing_complete(struct pending_cmd *cmd, u8 status)
+static int pairing_complete(struct pending_cmd *cmd, u8 status)
{
struct mgmt_rp_pair_device rp;
struct hci_conn *conn = cmd->user_data;
+ int err;
bacpy(&rp.addr.bdaddr, &conn->dst);
rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
- &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
+ &rp, sizeof(rp));
/* So we don't get further callbacks for this connection */
conn->connect_cfm_cb = NULL;
@@ -3122,6 +3127,8 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
hci_conn_put(conn);
+
+ return err;
}
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
@@ -3481,7 +3488,7 @@ static void update_name(struct hci_request *req)
hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
}
-static void set_name_complete(struct hci_dev *hdev, u8 status)
+static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct mgmt_cp_set_local_name *cp;
struct pending_cmd *cmd;
@@ -3629,10 +3636,16 @@ unlock:
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
+ struct mgmt_addr_info *addr = data;
int err;
BT_DBG("%s ", hdev->name);
+ if (!bdaddr_type_is_valid(addr->type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS, addr,
+ sizeof(*addr));
+
hci_dev_lock(hdev);
if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
@@ -3659,28 +3672,53 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
status, &cp->addr, sizeof(cp->addr));
} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
struct mgmt_cp_add_remote_oob_ext_data *cp = data;
- u8 *rand192, *hash192;
+ u8 *rand192, *hash192, *rand256, *hash256;
u8 status;
- if (cp->addr.type != BDADDR_BREDR) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
- goto unlock;
- }
-
if (bdaddr_type_is_le(cp->addr.type)) {
+ /* Enforce zero-valued 192-bit parameters as
+ * long as legacy SMP OOB isn't implemented.
+ */
+ if (memcmp(cp->rand192, ZERO_KEY, 16) ||
+ memcmp(cp->hash192, ZERO_KEY, 16)) {
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS,
+ addr, sizeof(*addr));
+ goto unlock;
+ }
+
rand192 = NULL;
hash192 = NULL;
} else {
- rand192 = cp->rand192;
- hash192 = cp->hash192;
+ /* In case one of the P-192 values is set to zero,
+ * then just disable OOB data for P-192.
+ */
+ if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
+ !memcmp(cp->hash192, ZERO_KEY, 16)) {
+ rand192 = NULL;
+ hash192 = NULL;
+ } else {
+ rand192 = cp->rand192;
+ hash192 = cp->hash192;
+ }
+ }
+
+ /* In case one of the P-256 values is set to zero, then just
+ * disable OOB data for P-256.
+ */
+ if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
+ !memcmp(cp->hash256, ZERO_KEY, 16)) {
+ rand256 = NULL;
+ hash256 = NULL;
+ } else {
+ rand256 = cp->rand256;
+ hash256 = cp->hash256;
}
err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
cp->addr.type, hash192, rand192,
- cp->hash256, cp->rand256);
+ hash256, rand256);
if (err < 0)
status = MGMT_STATUS_FAILED;
else
@@ -3832,7 +3870,8 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
return true;
}
-static void start_discovery_complete(struct hci_dev *hdev, u8 status)
+static void start_discovery_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
{
struct pending_cmd *cmd;
unsigned long timeout;
@@ -3857,6 +3896,9 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ /* If the scan involves LE scan, pick proper timeout to schedule
+ * hdev->le_scan_disable that will stop it.
+ */
switch (hdev->discovery.type) {
case DISCOV_TYPE_LE:
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
@@ -3873,9 +3915,23 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
break;
}
- if (timeout)
+ if (timeout) {
+ /* When service discovery is used and the controller has
+ * a strict duplicate filter, it is important to remember
+ * the start and duration of the scan. This is required
+ * for restarting scanning during the discovery phase.
+ */
+ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+ &hdev->quirks) &&
+ (hdev->discovery.uuid_count > 0 ||
+ hdev->discovery.rssi != HCI_RSSI_INVALID)) {
+ hdev->discovery.scan_start = jiffies;
+ hdev->discovery.scan_duration = timeout;
+ }
+
queue_delayed_work(hdev->workqueue,
&hdev->le_scan_disable, timeout);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -3947,9 +4003,10 @@ failed:
return err;
}
-static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
{
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
+ return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+ cmd->param, 1);
}
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -4060,7 +4117,7 @@ failed:
return err;
}
-static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
+static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct pending_cmd *cmd;
@@ -4286,7 +4343,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
-static void set_advertising_complete(struct hci_dev *hdev, u8 status)
+static void set_advertising_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
{
struct cmd_lookup match = { NULL, hdev };
@@ -4493,7 +4551,8 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
+static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
{
struct pending_cmd *cmd;
@@ -4591,7 +4650,7 @@ unlock:
return err;
}
-static void set_bredr_complete(struct hci_dev *hdev, u8 status)
+static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct pending_cmd *cmd;
@@ -4675,6 +4734,28 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
MGMT_STATUS_REJECTED);
goto unlock;
+ } else {
+ /* When configuring a dual-mode controller to operate
+ * with LE only and using a static address, then switching
+ * BR/EDR back on is not allowed.
+ *
+ * Dual-mode controllers shall operate with the public
+ * address as its identity address for BR/EDR and LE. So
+ * reject the attempt to create an invalid configuration.
+ *
+ * The same restrictions applies when secure connections
+ * has been enabled. For BR/EDR this is a controller feature
+ * while for LE it is a host stack feature. This means that
+ * switching BR/EDR back on when secure connections has been
+ * enabled is not a supported transaction.
+ */
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ (bacmp(&hdev->static_addr, BDADDR_ANY) ||
+ test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
}
if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
@@ -4697,7 +4778,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_req_init(&req, hdev);
write_fast_connectable(&req, false);
- hci_update_page_scan(hdev, &req);
+ __hci_update_page_scan(&req);
/* Since only the advertising data flags will change, there
* is no need to update the scan response data.
@@ -4713,30 +4794,80 @@ unlock:
return err;
}
+static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_mode *cp;
+
+ BT_DBG("%s status %u", hdev->name, status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ cmd_status(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(status));
+ goto remove;
+ }
+
+ cp = cmd->param;
+
+ switch (cp->val) {
+ case 0x00:
+ clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ break;
+ case 0x01:
+ set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ break;
+ case 0x02:
+ set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ break;
+ }
+
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
+ new_settings(hdev, cmd->sk);
+
+remove:
+ mgmt_pending_remove(cmd);
+unlock:
+ hci_dev_unlock(hdev);
+}
+
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_mode *cp = data;
struct pending_cmd *cmd;
+ struct hci_request req;
u8 val;
int err;
BT_DBG("request for %s", hdev->name);
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
- !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
+ if (!lmp_sc_capable(hdev) &&
+ !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
MGMT_STATUS_NOT_SUPPORTED);
+ if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ lmp_sc_capable(hdev) &&
+ !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_REJECTED);
+
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (!hdev_is_powered(hdev) ||
- (!lmp_sc_capable(hdev) &&
- !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
+ if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
bool changed;
@@ -4783,17 +4914,14 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
+ err = hci_req_run(&req, sc_enable_complete);
if (err < 0) {
mgmt_pending_remove(cmd);
goto failed;
}
- if (cp->val == 0x02)
- set_bit(HCI_SC_ONLY, &hdev->dev_flags);
- else
- clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
-
failed:
hci_dev_unlock(hdev);
return err;
@@ -5091,10 +5219,11 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
{
struct hci_conn *conn = cmd->user_data;
struct mgmt_rp_get_conn_info rp;
+ int err;
memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
@@ -5108,14 +5237,17 @@ static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
rp.max_tx_power = HCI_TX_POWER_INVALID;
}
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
- &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
+ &rp, sizeof(rp));
hci_conn_drop(conn);
hci_conn_put(conn);
+
+ return err;
}
-static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
+static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
+ u16 opcode)
{
struct hci_cp_read_rssi *cp;
struct pending_cmd *cmd;
@@ -5286,11 +5418,12 @@ unlock:
return err;
}
-static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
{
struct hci_conn *conn = cmd->user_data;
struct mgmt_rp_get_clock_info rp;
struct hci_dev *hdev;
+ int err;
memset(&rp, 0, sizeof(rp));
memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
@@ -5310,15 +5443,18 @@ static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
}
complete:
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
+ sizeof(rp));
if (conn) {
hci_conn_drop(conn);
hci_conn_put(conn);
}
+
+ return err;
}
-static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
+static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct hci_cp_read_clock *hci_cp;
struct pending_cmd *cmd;
@@ -5425,6 +5561,65 @@ unlock:
return err;
}
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+ if (!conn)
+ return false;
+
+ if (conn->dst_type != type)
+ return false;
+
+ if (conn->state != BT_CONNECTED)
+ return false;
+
+ return true;
+}
+
+/* This function requires the caller holds hdev->lock */
+static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
+ u8 addr_type, u8 auto_connect)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_conn_params *params;
+
+ params = hci_conn_params_add(hdev, addr, addr_type);
+ if (!params)
+ return -EIO;
+
+ if (params->auto_connect == auto_connect)
+ return 0;
+
+ list_del_init(&params->action);
+
+ switch (auto_connect) {
+ case HCI_AUTO_CONN_DISABLED:
+ case HCI_AUTO_CONN_LINK_LOSS:
+ __hci_update_background_scan(req);
+ break;
+ case HCI_AUTO_CONN_REPORT:
+ list_add(&params->action, &hdev->pend_le_reports);
+ __hci_update_background_scan(req);
+ break;
+ case HCI_AUTO_CONN_DIRECT:
+ case HCI_AUTO_CONN_ALWAYS:
+ if (!is_connected(hdev, addr, addr_type)) {
+ list_add(&params->action, &hdev->pend_le_conns);
+ __hci_update_background_scan(req);
+ }
+ break;
+ }
+
+ params->auto_connect = auto_connect;
+
+ BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+ auto_connect);
+
+ return 0;
+}
+
static void device_added(struct sock *sk, struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 type, u8 action)
{
@@ -5437,10 +5632,31 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}
+static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
+ if (!cmd)
+ goto unlock;
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
static int add_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_add_device *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
u8 auto_conn, addr_type;
int err;
@@ -5457,14 +5673,24 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_INVALID_PARAMS,
&cp->addr, sizeof(cp->addr));
+ hci_req_init(&req, hdev);
+
hci_dev_lock(hdev);
+ cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ cmd->cmd_complete = addr_cmd_complete;
+
if (cp->addr.type == BDADDR_BREDR) {
/* Only incoming connections action is supported for now */
if (cp->action != 0x01) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ err = cmd->cmd_complete(cmd,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
goto unlock;
}
@@ -5473,7 +5699,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
if (err)
goto unlock;
- hci_update_page_scan(hdev, NULL);
+ __hci_update_page_scan(&req);
goto added;
}
@@ -5493,19 +5719,25 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
/* If the connection parameters don't exist for this device,
* they will be created and configured with defaults.
*/
- if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
+ if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
auto_conn) < 0) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
- MGMT_STATUS_FAILED,
- &cp->addr, sizeof(cp->addr));
+ err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
+ mgmt_pending_remove(cmd);
goto unlock;
}
added:
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
- MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
+ err = hci_req_run(&req, add_device_complete);
+ if (err < 0) {
+ /* ENODATA means no HCI commands were needed (e.g. if
+ * the adapter is powered off).
+ */
+ if (err == -ENODATA)
+ err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
+ mgmt_pending_remove(cmd);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -5523,24 +5755,55 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}
+static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
+ if (!cmd)
+ goto unlock;
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
static int remove_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_remove_device *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
int err;
BT_DBG("%s", hdev->name);
+ hci_req_init(&req, hdev);
+
hci_dev_lock(hdev);
+ cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ cmd->cmd_complete = addr_cmd_complete;
+
if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
struct hci_conn_params *params;
u8 addr_type;
if (!bdaddr_type_is_valid(cp->addr.type)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ err = cmd->cmd_complete(cmd,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
goto unlock;
}
@@ -5549,14 +5812,13 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
&cp->addr.bdaddr,
cp->addr.type);
if (err) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_REMOVE_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ err = cmd->cmd_complete(cmd,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
goto unlock;
}
- hci_update_page_scan(hdev, NULL);
+ __hci_update_page_scan(&req);
device_removed(sk, hdev, &cp->addr.bdaddr,
cp->addr.type);
@@ -5571,23 +5833,23 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
addr_type);
if (!params) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ err = cmd->cmd_complete(cmd,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
goto unlock;
}
if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ err = cmd->cmd_complete(cmd,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
goto unlock;
}
list_del(&params->action);
list_del(&params->list);
kfree(params);
- hci_update_background_scan(hdev);
+ __hci_update_background_scan(&req);
device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
} else {
@@ -5595,9 +5857,9 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
struct bdaddr_list *b, *btmp;
if (cp->addr.type) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ err = cmd->cmd_complete(cmd,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
goto unlock;
}
@@ -5607,7 +5869,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
kfree(b);
}
- hci_update_page_scan(hdev, NULL);
+ __hci_update_page_scan(&req);
list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
@@ -5620,12 +5882,19 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
BT_DBG("All LE connection parameters were removed");
- hci_update_background_scan(hdev);
+ __hci_update_background_scan(&req);
}
complete:
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
- MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
+ err = hci_req_run(&req, remove_device_complete);
+ if (err < 0) {
+ /* ENODATA means no HCI commands were needed (e.g. if
+ * the adapter is powered off).
+ */
+ if (err == -ENODATA)
+ err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
+ mgmt_pending_remove(cmd);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -6037,8 +6306,9 @@ void mgmt_index_removed(struct hci_dev *hdev)
}
/* This function requires the caller holds hdev->lock */
-static void restart_le_actions(struct hci_dev *hdev)
+static void restart_le_actions(struct hci_request *req)
{
+ struct hci_dev *hdev = req->hdev;
struct hci_conn_params *p;
list_for_each_entry(p, &hdev->le_conn_params, list) {
@@ -6060,18 +6330,25 @@ static void restart_le_actions(struct hci_dev *hdev)
}
}
- hci_update_background_scan(hdev);
+ __hci_update_background_scan(req);
}
-static void powered_complete(struct hci_dev *hdev, u8 status)
+static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct cmd_lookup match = { NULL, hdev };
BT_DBG("status 0x%02x", status);
- hci_dev_lock(hdev);
+ if (!status) {
+ /* Register the available SMP channels (BR/EDR and LE) only
+ * when successfully powering on the controller. This late
+ * registration is required so that LE SMP can clearly
+ * decide if the public address or static address is used.
+ */
+ smp_register(hdev);
+ }
- restart_le_actions(hdev);
+ hci_dev_lock(hdev);
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
@@ -6092,14 +6369,16 @@ static int powered_update_hci(struct hci_dev *hdev)
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
!lmp_host_ssp_capable(hdev)) {
- u8 ssp = 1;
+ u8 mode = 0x01;
- hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
- }
+ hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
+
+ if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
+ u8 support = 0x01;
- if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
- u8 sc = 0x01;
- hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
+ hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
+ sizeof(support), &support);
+ }
}
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
@@ -6130,6 +6409,8 @@ static int powered_update_hci(struct hci_dev *hdev)
if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
enable_advertising(&req);
+
+ restart_le_actions(&req);
}
link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
@@ -6139,7 +6420,7 @@ static int powered_update_hci(struct hci_dev *hdev)
if (lmp_bredr_capable(hdev)) {
write_fast_connectable(&req, false);
- hci_update_page_scan(hdev, &req);
+ __hci_update_page_scan(&req);
update_class(&req);
update_name(&req);
update_eir(&req);
@@ -6817,43 +7098,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
hci_req_run(&req, NULL);
}
-void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
- struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
-
- if (enable) {
- if (test_and_clear_bit(HCI_SC_ENABLED,
- &hdev->dev_flags))
- new_settings(hdev, NULL);
- clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
- cmd_status_rsp, &mgmt_err);
- return;
- }
-
- if (enable) {
- changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
- } else {
- changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
- clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
- settings_rsp, &match);
-
- if (changed)
- new_settings(hdev, match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-}
-
static void sk_lookup(struct pending_cmd *cmd, void *data)
{
struct cmd_lookup *match = data;
@@ -6924,28 +7168,21 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
mgmt_status(status));
} else {
- if (bredr_sc_enabled(hdev) && hash256 && rand256) {
- struct mgmt_rp_read_local_oob_ext_data rp;
+ struct mgmt_rp_read_local_oob_data rp;
+ size_t rp_size = sizeof(rp);
- memcpy(rp.hash192, hash192, sizeof(rp.hash192));
- memcpy(rp.rand192, rand192, sizeof(rp.rand192));
+ memcpy(rp.hash192, hash192, sizeof(rp.hash192));
+ memcpy(rp.rand192, rand192, sizeof(rp.rand192));
+ if (bredr_sc_enabled(hdev) && hash256 && rand256) {
memcpy(rp.hash256, hash256, sizeof(rp.hash256));
memcpy(rp.rand256, rand256, sizeof(rp.rand256));
-
- cmd_complete(cmd->sk, hdev->id,
- MGMT_OP_READ_LOCAL_OOB_DATA, 0,
- &rp, sizeof(rp));
} else {
- struct mgmt_rp_read_local_oob_data rp;
-
- memcpy(rp.hash, hash192, sizeof(rp.hash));
- memcpy(rp.rand, rand192, sizeof(rp.rand));
-
- cmd_complete(cmd->sk, hdev->id,
- MGMT_OP_READ_LOCAL_OOB_DATA, 0,
- &rp, sizeof(rp));
+ rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
}
+
+ cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
+ &rp, rp_size);
}
mgmt_pending_remove(cmd);
@@ -7018,6 +7255,21 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
return false;
}
+static void restart_le_scan(struct hci_dev *hdev)
+{
+ /* If controller is not scanning we are done. */
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return;
+
+ if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
+ hdev->discovery.scan_start +
+ hdev->discovery.scan_duration))
+ return;
+
+ queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
+ DISCOV_LE_RESTART_DELAY);
+}
+
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
@@ -7040,14 +7292,18 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
/* When using service discovery with a RSSI threshold, then check
* if such a RSSI threshold is specified. If a RSSI threshold has
- * been specified, then all results with a RSSI smaller than the
- * RSSI threshold will be dropped.
+ * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
+ * then all results with a RSSI smaller than the RSSI threshold will be
+ * dropped. If the quirk is set, let it through for further processing,
+ * as we might need to restart the scan.
*
* For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
* the results are also dropped.
*/
if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
- (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
+ (rssi == HCI_RSSI_INVALID ||
+ (rssi < hdev->discovery.rssi &&
+ !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
return;
/* Make sure that the buffer is big enough. The 5 extra bytes
@@ -7066,7 +7322,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
* However when using service discovery, the value 127 will be
* returned when the RSSI is not available.
*/
- if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
+ if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
+ link_type == ACL_LINK)
rssi = 0;
bacpy(&ev->addr.bdaddr, bdaddr);
@@ -7081,12 +7338,20 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
* kept and checking possible scan response data
* will be skipped.
*/
- if (hdev->discovery.uuid_count > 0)
+ if (hdev->discovery.uuid_count > 0) {
match = eir_has_uuids(eir, eir_len,
hdev->discovery.uuid_count,
hdev->discovery.uuids);
- else
+ /* If duplicate filtering does not report RSSI changes,
+ * then restart scanning to ensure updated result with
+ * updated RSSI values.
+ */
+ if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+ &hdev->quirks))
+ restart_le_scan(hdev);
+ } else {
match = true;
+ }
if (!match && !scan_rsp_len)
return;
@@ -7119,6 +7384,14 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
hdev->discovery.uuid_count,
hdev->discovery.uuids))
return;
+
+ /* If duplicate filtering does not report RSSI changes,
+ * then restart scanning to ensure updated result with
+ * updated RSSI values.
+ */
+ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+ &hdev->quirks))
+ restart_le_scan(hdev);
}
/* Append scan response data to event */
@@ -7132,6 +7405,14 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
return;
}
+ /* Validate the reported RSSI value against the RSSI threshold once more
+ * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
+ * scanning.
+ */
+ if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
+ rssi < hdev->discovery.rssi)
+ return;
+
ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
@@ -7174,7 +7455,7 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
}
-static void adv_enable_complete(struct hci_dev *hdev, u8 status)
+static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
BT_DBG("%s status %u", hdev->name, status);
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 73f8c75abe6e..4fea24275b17 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -771,7 +771,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
bacpy(&addr.l2_bdaddr, dst);
addr.l2_family = AF_BLUETOOTH;
- addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
+ addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM);
addr.l2_cid = 0;
addr.l2_bdaddr_type = BDADDR_BREDR;
*err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
@@ -2038,7 +2038,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
/* Bind socket */
bacpy(&addr.l2_bdaddr, ba);
addr.l2_family = AF_BLUETOOTH;
- addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
+ addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM);
addr.l2_cid = 0;
addr.l2_bdaddr_type = BDADDR_BREDR;
err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 2348176401a0..3c6d2c8ac1a4 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -468,7 +468,7 @@ done:
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
@@ -487,8 +487,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
@@ -509,10 +507,11 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
}
release_sock(sk);
- timeo = schedule_timeout(timeo);
+
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
- __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -1058,6 +1057,8 @@ int __init rfcomm_init_sockets(void)
{
int err;
+ BUILD_BUG_ON(sizeof(struct sockaddr_rc) > sizeof(struct sockaddr));
+
err = proto_register(&rfcomm_proto, 0);
if (err < 0)
return err;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 30e5ea3f1ad3..76321b546e84 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -618,7 +618,7 @@ done:
static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
@@ -632,8 +632,6 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
@@ -654,10 +652,10 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
}
release_sock(sk);
- timeo = schedule_timeout(timeo);
+
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
}
- __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -1184,6 +1182,8 @@ int __init sco_init(void)
{
int err;
+ BUILD_BUG_ON(sizeof(struct sockaddr_sco) > sizeof(struct sockaddr));
+
err = proto_register(&sco_proto, 0);
if (err < 0)
return err;
diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c
new file mode 100644
index 000000000000..378f4064952c
--- /dev/null
+++ b/net/bluetooth/selftest.c
@@ -0,0 +1,244 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2014 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "ecc.h"
+#include "smp.h"
+#include "selftest.h"
+
+#if IS_ENABLED(CONFIG_BT_SELFTEST_ECDH)
+
+static const u8 priv_a_1[32] __initconst = {
+ 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58,
+ 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a,
+ 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74,
+ 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f,
+};
+static const u8 priv_b_1[32] __initconst = {
+ 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
+ 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
+ 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
+ 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55,
+};
+static const u8 pub_a_1[64] __initconst = {
+ 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+ 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+ 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+ 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20,
+
+ 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74,
+ 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76,
+ 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63,
+ 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc,
+};
+static const u8 pub_b_1[64] __initconst = {
+ 0x90, 0xa1, 0xaa, 0x2f, 0xb2, 0x77, 0x90, 0x55,
+ 0x9f, 0xa6, 0x15, 0x86, 0xfd, 0x8a, 0xb5, 0x47,
+ 0x00, 0x4c, 0x9e, 0xf1, 0x84, 0x22, 0x59, 0x09,
+ 0x96, 0x1d, 0xaf, 0x1f, 0xf0, 0xf0, 0xa1, 0x1e,
+
+ 0x4a, 0x21, 0xb1, 0x15, 0xf9, 0xaf, 0x89, 0x5f,
+ 0x76, 0x36, 0x8e, 0xe2, 0x30, 0x11, 0x2d, 0x47,
+ 0x60, 0x51, 0xb8, 0x9a, 0x3a, 0x70, 0x56, 0x73,
+ 0x37, 0xad, 0x9d, 0x42, 0x3e, 0xf3, 0x55, 0x4c,
+};
+static const u8 dhkey_1[32] __initconst = {
+ 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,
+ 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99,
+ 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
+ 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec,
+};
+
+static const u8 priv_a_2[32] __initconst = {
+ 0x63, 0x76, 0x45, 0xd0, 0xf7, 0x73, 0xac, 0xb7,
+ 0xff, 0xdd, 0x03, 0x72, 0xb9, 0x72, 0x85, 0xb4,
+ 0x41, 0xb6, 0x5d, 0x0c, 0x5d, 0x54, 0x84, 0x60,
+ 0x1a, 0xa3, 0x9a, 0x3c, 0x69, 0x16, 0xa5, 0x06,
+};
+static const u8 priv_b_2[32] __initconst = {
+ 0xba, 0x30, 0x55, 0x50, 0x19, 0xa2, 0xca, 0xa3,
+ 0xa5, 0x29, 0x08, 0xc6, 0xb5, 0x03, 0x88, 0x7e,
+ 0x03, 0x2b, 0x50, 0x73, 0xd4, 0x2e, 0x50, 0x97,
+ 0x64, 0xcd, 0x72, 0x0d, 0x67, 0xa0, 0x9a, 0x52,
+};
+static const u8 pub_a_2[64] __initconst = {
+ 0xdd, 0x78, 0x5c, 0x74, 0x03, 0x9b, 0x7e, 0x98,
+ 0xcb, 0x94, 0x87, 0x4a, 0xad, 0xfa, 0xf8, 0xd5,
+ 0x43, 0x3e, 0x5c, 0xaf, 0xea, 0xb5, 0x4c, 0xf4,
+ 0x9e, 0x80, 0x79, 0x57, 0x7b, 0xa4, 0x31, 0x2c,
+
+ 0x4f, 0x5d, 0x71, 0x43, 0x77, 0x43, 0xf8, 0xea,
+ 0xd4, 0x3e, 0xbd, 0x17, 0x91, 0x10, 0x21, 0xd0,
+ 0x1f, 0x87, 0x43, 0x8e, 0x40, 0xe2, 0x52, 0xcd,
+ 0xbe, 0xdf, 0x98, 0x38, 0x18, 0x12, 0x95, 0x91,
+};
+static const u8 pub_b_2[64] __initconst = {
+ 0xcc, 0x00, 0x65, 0xe1, 0xf5, 0x6c, 0x0d, 0xcf,
+ 0xec, 0x96, 0x47, 0x20, 0x66, 0xc9, 0xdb, 0x84,
+ 0x81, 0x75, 0xa8, 0x4d, 0xc0, 0xdf, 0xc7, 0x9d,
+ 0x1b, 0x3f, 0x3d, 0xf2, 0x3f, 0xe4, 0x65, 0xf4,
+
+ 0x79, 0xb2, 0xec, 0xd8, 0xca, 0x55, 0xa1, 0xa8,
+ 0x43, 0x4d, 0x6b, 0xca, 0x10, 0xb0, 0xc2, 0x01,
+ 0xc2, 0x33, 0x4e, 0x16, 0x24, 0xc4, 0xef, 0xee,
+ 0x99, 0xd8, 0xbb, 0xbc, 0x48, 0xd0, 0x01, 0x02,
+};
+static const u8 dhkey_2[32] __initconst = {
+ 0x69, 0xeb, 0x21, 0x32, 0xf2, 0xc6, 0x05, 0x41,
+ 0x60, 0x19, 0xcd, 0x5e, 0x94, 0xe1, 0xe6, 0x5f,
+ 0x33, 0x07, 0xe3, 0x38, 0x4b, 0x68, 0xe5, 0x62,
+ 0x3f, 0x88, 0x6d, 0x2f, 0x3a, 0x84, 0x85, 0xab,
+};
+
+static const u8 priv_a_3[32] __initconst = {
+ 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58,
+ 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a,
+ 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74,
+ 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f,
+};
+static const u8 pub_a_3[64] __initconst = {
+ 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+ 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+ 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+ 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20,
+
+ 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74,
+ 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76,
+ 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63,
+ 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc,
+};
+static const u8 dhkey_3[32] __initconst = {
+ 0x2d, 0xab, 0x00, 0x48, 0xcb, 0xb3, 0x7b, 0xda,
+ 0x55, 0x7b, 0x8b, 0x72, 0xa8, 0x57, 0x87, 0xc3,
+ 0x87, 0x27, 0x99, 0x32, 0xfc, 0x79, 0x5f, 0xae,
+ 0x7c, 0x1c, 0xf9, 0x49, 0xe6, 0xd7, 0xaa, 0x70,
+};
+
+static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32],
+ const u8 pub_a[64], const u8 pub_b[64],
+ const u8 dhkey[32])
+{
+ u8 dhkey_a[32], dhkey_b[32];
+
+ ecdh_shared_secret(pub_b, priv_a, dhkey_a);
+ ecdh_shared_secret(pub_a, priv_b, dhkey_b);
+
+ if (memcmp(dhkey_a, dhkey, 32))
+ return -EINVAL;
+
+ if (memcmp(dhkey_b, dhkey, 32))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init test_ecdh(void)
+{
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ calltime = ktime_get();
+
+ err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1);
+ if (err) {
+ BT_ERR("ECDH sample 1 failed");
+ return err;
+ }
+
+ err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2);
+ if (err) {
+ BT_ERR("ECDH sample 2 failed");
+ return err;
+ }
+
+ err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3);
+ if (err) {
+ BT_ERR("ECDH sample 3 failed");
+ return err;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ BT_INFO("ECDH test passed in %llu usecs", duration);
+
+ return 0;
+}
+
+#else
+
+static inline int test_ecdh(void)
+{
+ return 0;
+}
+
+#endif
+
+static int __init run_selftest(void)
+{
+ int err;
+
+ BT_INFO("Starting self testing");
+
+ err = test_ecdh();
+ if (err)
+ goto done;
+
+ err = bt_selftest_smp();
+
+done:
+ BT_INFO("Finished self testing");
+
+ return err;
+}
+
+#if IS_MODULE(CONFIG_BT)
+
+/* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=m and is just a
+ * wrapper to allow running this at module init.
+ *
+ * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all.
+ */
+int __init bt_selftest(void)
+{
+ return run_selftest();
+}
+
+#else
+
+/* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=y and is run
+ * via late_initcall() as last item in the initialization sequence.
+ *
+ * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all.
+ */
+static int __init bt_selftest_init(void)
+{
+ return run_selftest();
+}
+late_initcall(bt_selftest_init);
+
+#endif
diff --git a/net/bluetooth/selftest.h b/net/bluetooth/selftest.h
new file mode 100644
index 000000000000..2aa0a346a913
--- /dev/null
+++ b/net/bluetooth/selftest.h
@@ -0,0 +1,45 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2014 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#if IS_ENABLED(CONFIG_BT_SELFTEST) && IS_MODULE(CONFIG_BT)
+
+/* When CONFIG_BT_SELFTEST=y and the CONFIG_BT=m, then the self testing
+ * is run at module loading time.
+ */
+int bt_selftest(void);
+
+#else
+
+/* When CONFIG_BT_SELFTEST=y and CONFIG_BT=y, then the self testing
+ * is run via late_initcall() to make sure that subsys_initcall() of
+ * the Bluetooth subsystem and device_initcall() of the Crypto subsystem
+ * do not clash.
+ *
+ * When CONFIG_BT_SELFTEST=n, then this turns into an empty call that
+ * has no impact.
+ */
+static inline int bt_selftest(void)
+{
+ return 0;
+}
+
+#endif
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index b67749bb55bf..c09a821f381d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,6 +20,7 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/debugfs.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/b128ops.h>
@@ -223,8 +224,9 @@ static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
return err;
}
-static int smp_f5(struct crypto_hash *tfm_cmac, u8 w[32], u8 n1[16], u8 n2[16],
- u8 a1[7], u8 a2[7], u8 mackey[16], u8 ltk[16])
+static int smp_f5(struct crypto_hash *tfm_cmac, const u8 w[32],
+ const u8 n1[16], const u8 n2[16], const u8 a1[7],
+ const u8 a2[7], u8 mackey[16], u8 ltk[16])
{
/* The btle, salt and length "magic" values are as defined in
* the SMP section of the Bluetooth core specification. In ASCII
@@ -276,7 +278,7 @@ static int smp_f5(struct crypto_hash *tfm_cmac, u8 w[32], u8 n1[16], u8 n2[16],
}
static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
- const u8 n1[16], u8 n2[16], const u8 r[16],
+ const u8 n1[16], const u8 n2[16], const u8 r[16],
const u8 io_cap[3], const u8 a1[7], const u8 a2[7],
u8 res[16])
{
@@ -298,7 +300,7 @@ static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
if (err)
return err;
- BT_DBG("res %16phN", res);
+ SMP_DBG("res %16phN", res);
return err;
}
@@ -618,7 +620,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
oob_data = hci_find_remote_oob_data(hdev, &hcon->dst,
bdaddr_type);
- if (oob_data) {
+ if (oob_data && oob_data->present) {
set_bit(SMP_FLAG_OOB, &smp->flags);
oob_flag = SMP_OOB_PRESENT;
memcpy(smp->rr, oob_data->rand256, 16);
@@ -1674,7 +1676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (conn->hcon->type == ACL_LINK) {
/* We must have a BR/EDR SC link */
if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
- !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+ !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
return SMP_CROSS_TRANSP_NOT_ALLOWED;
set_bit(SMP_FLAG_SC, &smp->flags);
@@ -2303,8 +2305,12 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
* implementations are not known of and in order to not over
* complicate our implementation, simply pretend that we never
* received an IRK for such a device.
+ *
+ * The Identity Address must also be a Static Random or Public
+ * Address, which hci_is_identity_address() checks for.
*/
- if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
+ if (!bacmp(&info->bdaddr, BDADDR_ANY) ||
+ !hci_is_identity_address(&info->bdaddr, info->addr_type)) {
BT_ERR("Ignoring IRK with no identity address");
goto distribute;
}
@@ -2737,7 +2743,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
/* BR/EDR must use Secure Connections for SMP */
if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) &&
- !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+ !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
return;
/* If our LE support is not enabled don't do anything */
@@ -2944,11 +2950,30 @@ create_chan:
l2cap_chan_set_defaults(chan);
- bacpy(&chan->src, &hdev->bdaddr);
- if (cid == L2CAP_CID_SMP)
- chan->src_type = BDADDR_LE_PUBLIC;
- else
+ if (cid == L2CAP_CID_SMP) {
+ /* If usage of static address is forced or if the devices
+ * does not have a public address, then listen on the static
+ * address.
+ *
+ * In case BR/EDR has been disabled on a dual-mode controller
+ * and a static address has been configued, then listen on
+ * the static address instead.
+ */
+ if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+ (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ bacpy(&chan->src, &hdev->static_addr);
+ chan->src_type = BDADDR_LE_RANDOM;
+ } else {
+ bacpy(&chan->src, &hdev->bdaddr);
+ chan->src_type = BDADDR_LE_PUBLIC;
+ }
+ } else {
+ bacpy(&chan->src, &hdev->bdaddr);
chan->src_type = BDADDR_BREDR;
+ }
+
chan->state = BT_LISTEN;
chan->mode = L2CAP_MODE_BASIC;
chan->imtu = L2CAP_DEFAULT_MTU;
@@ -2975,21 +3000,108 @@ static void smp_del_chan(struct l2cap_chan *chan)
l2cap_chan_put(chan);
}
+static ssize_t force_bredr_smp_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_bredr_smp_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+ bool enable;
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
+ return -EALREADY;
+
+ if (enable) {
+ struct l2cap_chan *chan;
+
+ chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ hdev->smp_bredr_data = chan;
+ } else {
+ struct l2cap_chan *chan;
+
+ chan = hdev->smp_bredr_data;
+ hdev->smp_bredr_data = NULL;
+ smp_del_chan(chan);
+ }
+
+ change_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags);
+
+ return count;
+}
+
+static const struct file_operations force_bredr_smp_fops = {
+ .open = simple_open,
+ .read = force_bredr_smp_read,
+ .write = force_bredr_smp_write,
+ .llseek = default_llseek,
+};
+
int smp_register(struct hci_dev *hdev)
{
struct l2cap_chan *chan;
BT_DBG("%s", hdev->name);
+ /* If the controller does not support Low Energy operation, then
+ * there is also no need to register any SMP channel.
+ */
+ if (!lmp_le_capable(hdev))
+ return 0;
+
+ if (WARN_ON(hdev->smp_data)) {
+ chan = hdev->smp_data;
+ hdev->smp_data = NULL;
+ smp_del_chan(chan);
+ }
+
chan = smp_add_cid(hdev, L2CAP_CID_SMP);
if (IS_ERR(chan))
return PTR_ERR(chan);
hdev->smp_data = chan;
- if (!lmp_sc_capable(hdev) &&
- !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+ /* If the controller does not support BR/EDR Secure Connections
+ * feature, then the BR/EDR SMP channel shall not be present.
+ *
+ * To test this with Bluetooth 4.0 controllers, create a debugfs
+ * switch that allows forcing BR/EDR SMP support and accepting
+ * cross-transport pairing on non-AES encrypted connections.
+ */
+ if (!lmp_sc_capable(hdev)) {
+ debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs,
+ hdev, &force_bredr_smp_fops);
return 0;
+ }
+
+ if (WARN_ON(hdev->smp_bredr_data)) {
+ chan = hdev->smp_bredr_data;
+ hdev->smp_bredr_data = NULL;
+ smp_del_chan(chan);
+ }
chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR);
if (IS_ERR(chan)) {
@@ -3021,3 +3133,331 @@ void smp_unregister(struct hci_dev *hdev)
smp_del_chan(chan);
}
}
+
+#if IS_ENABLED(CONFIG_BT_SELFTEST_SMP)
+
+static int __init test_ah(struct crypto_blkcipher *tfm_aes)
+{
+ const u8 irk[16] = {
+ 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
+ 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
+ const u8 r[3] = { 0x94, 0x81, 0x70 };
+ const u8 exp[3] = { 0xaa, 0xfb, 0x0d };
+ u8 res[3];
+ int err;
+
+ err = smp_ah(tfm_aes, irk, r, res);
+ if (err)
+ return err;
+
+ if (memcmp(res, exp, 3))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init test_c1(struct crypto_blkcipher *tfm_aes)
+{
+ const u8 k[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const u8 r[16] = {
+ 0xe0, 0x2e, 0x70, 0xc6, 0x4e, 0x27, 0x88, 0x63,
+ 0x0e, 0x6f, 0xad, 0x56, 0x21, 0xd5, 0x83, 0x57 };
+ const u8 preq[7] = { 0x01, 0x01, 0x00, 0x00, 0x10, 0x07, 0x07 };
+ const u8 pres[7] = { 0x02, 0x03, 0x00, 0x00, 0x08, 0x00, 0x05 };
+ const u8 _iat = 0x01;
+ const u8 _rat = 0x00;
+ const bdaddr_t ra = { { 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1 } };
+ const bdaddr_t ia = { { 0xa6, 0xa5, 0xa4, 0xa3, 0xa2, 0xa1 } };
+ const u8 exp[16] = {
+ 0x86, 0x3b, 0xf1, 0xbe, 0xc5, 0x4d, 0xa7, 0xd2,
+ 0xea, 0x88, 0x89, 0x87, 0xef, 0x3f, 0x1e, 0x1e };
+ u8 res[16];
+ int err;
+
+ err = smp_c1(tfm_aes, k, r, preq, pres, _iat, &ia, _rat, &ra, res);
+ if (err)
+ return err;
+
+ if (memcmp(res, exp, 16))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init test_s1(struct crypto_blkcipher *tfm_aes)
+{
+ const u8 k[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const u8 r1[16] = {
+ 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 };
+ const u8 r2[16] = {
+ 0x00, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99 };
+ const u8 exp[16] = {
+ 0x62, 0xa0, 0x6d, 0x79, 0xae, 0x16, 0x42, 0x5b,
+ 0x9b, 0xf4, 0xb0, 0xe8, 0xf0, 0xe1, 0x1f, 0x9a };
+ u8 res[16];
+ int err;
+
+ err = smp_s1(tfm_aes, k, r1, r2, res);
+ if (err)
+ return err;
+
+ if (memcmp(res, exp, 16))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init test_f4(struct crypto_hash *tfm_cmac)
+{
+ const u8 u[32] = {
+ 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+ 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+ 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+ 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 };
+ const u8 v[32] = {
+ 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
+ 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
+ 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
+ 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 };
+ const u8 x[16] = {
+ 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
+ 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
+ const u8 z = 0x00;
+ const u8 exp[16] = {
+ 0x2d, 0x87, 0x74, 0xa9, 0xbe, 0xa1, 0xed, 0xf1,
+ 0x1c, 0xbd, 0xa9, 0x07, 0xf1, 0x16, 0xc9, 0xf2 };
+ u8 res[16];
+ int err;
+
+ err = smp_f4(tfm_cmac, u, v, x, z, res);
+ if (err)
+ return err;
+
+ if (memcmp(res, exp, 16))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init test_f5(struct crypto_hash *tfm_cmac)
+{
+ const u8 w[32] = {
+ 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,
+ 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99,
+ 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
+ 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
+ const u8 n1[16] = {
+ 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
+ 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
+ const u8 n2[16] = {
+ 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
+ 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
+ const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 };
+ const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 };
+ const u8 exp_ltk[16] = {
+ 0x38, 0x0a, 0x75, 0x94, 0xb5, 0x22, 0x05, 0x98,
+ 0x23, 0xcd, 0xd7, 0x69, 0x11, 0x79, 0x86, 0x69 };
+ const u8 exp_mackey[16] = {
+ 0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,
+ 0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };
+ u8 mackey[16], ltk[16];
+ int err;
+
+ err = smp_f5(tfm_cmac, w, n1, n2, a1, a2, mackey, ltk);
+ if (err)
+ return err;
+
+ if (memcmp(mackey, exp_mackey, 16))
+ return -EINVAL;
+
+ if (memcmp(ltk, exp_ltk, 16))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init test_f6(struct crypto_hash *tfm_cmac)
+{
+ const u8 w[16] = {
+ 0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,
+ 0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };
+ const u8 n1[16] = {
+ 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
+ 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
+ const u8 n2[16] = {
+ 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
+ 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
+ const u8 r[16] = {
+ 0xc8, 0x0f, 0x2d, 0x0c, 0xd2, 0x42, 0xda, 0x08,
+ 0x54, 0xbb, 0x53, 0xb4, 0x3b, 0x34, 0xa3, 0x12 };
+ const u8 io_cap[3] = { 0x02, 0x01, 0x01 };
+ const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 };
+ const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 };
+ const u8 exp[16] = {
+ 0x61, 0x8f, 0x95, 0xda, 0x09, 0x0b, 0x6c, 0xd2,
+ 0xc5, 0xe8, 0xd0, 0x9c, 0x98, 0x73, 0xc4, 0xe3 };
+ u8 res[16];
+ int err;
+
+ err = smp_f6(tfm_cmac, w, n1, n2, r, io_cap, a1, a2, res);
+ if (err)
+ return err;
+
+ if (memcmp(res, exp, 16))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init test_g2(struct crypto_hash *tfm_cmac)
+{
+ const u8 u[32] = {
+ 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+ 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+ 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+ 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 };
+ const u8 v[32] = {
+ 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
+ 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
+ 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
+ 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 };
+ const u8 x[16] = {
+ 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
+ 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
+ const u8 y[16] = {
+ 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
+ 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
+ const u32 exp_val = 0x2f9ed5ba % 1000000;
+ u32 val;
+ int err;
+
+ err = smp_g2(tfm_cmac, u, v, x, y, &val);
+ if (err)
+ return err;
+
+ if (val != exp_val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init test_h6(struct crypto_hash *tfm_cmac)
+{
+ const u8 w[16] = {
+ 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
+ 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
+ const u8 key_id[4] = { 0x72, 0x62, 0x65, 0x6c };
+ const u8 exp[16] = {
+ 0x99, 0x63, 0xb1, 0x80, 0xe2, 0xa9, 0xd3, 0xe8,
+ 0x1c, 0xc9, 0x6d, 0xe7, 0x02, 0xe1, 0x9a, 0x2d };
+ u8 res[16];
+ int err;
+
+ err = smp_h6(tfm_cmac, w, key_id, res);
+ if (err)
+ return err;
+
+ if (memcmp(res, exp, 16))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
+ struct crypto_hash *tfm_cmac)
+{
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ calltime = ktime_get();
+
+ err = test_ah(tfm_aes);
+ if (err) {
+ BT_ERR("smp_ah test failed");
+ return err;
+ }
+
+ err = test_c1(tfm_aes);
+ if (err) {
+ BT_ERR("smp_c1 test failed");
+ return err;
+ }
+
+ err = test_s1(tfm_aes);
+ if (err) {
+ BT_ERR("smp_s1 test failed");
+ return err;
+ }
+
+ err = test_f4(tfm_cmac);
+ if (err) {
+ BT_ERR("smp_f4 test failed");
+ return err;
+ }
+
+ err = test_f5(tfm_cmac);
+ if (err) {
+ BT_ERR("smp_f5 test failed");
+ return err;
+ }
+
+ err = test_f6(tfm_cmac);
+ if (err) {
+ BT_ERR("smp_f6 test failed");
+ return err;
+ }
+
+ err = test_g2(tfm_cmac);
+ if (err) {
+ BT_ERR("smp_g2 test failed");
+ return err;
+ }
+
+ err = test_h6(tfm_cmac);
+ if (err) {
+ BT_ERR("smp_h6 test failed");
+ return err;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ BT_INFO("SMP test passed in %llu usecs", duration);
+
+ return 0;
+}
+
+int __init bt_selftest_smp(void)
+{
+ struct crypto_blkcipher *tfm_aes;
+ struct crypto_hash *tfm_cmac;
+ int err;
+
+ tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm_aes)) {
+ BT_ERR("Unable to create ECB crypto context");
+ return PTR_ERR(tfm_aes);
+ }
+
+ tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm_cmac)) {
+ BT_ERR("Unable to create CMAC crypto context");
+ crypto_free_blkcipher(tfm_aes);
+ return PTR_ERR(tfm_cmac);
+ }
+
+ err = run_selftests(tfm_aes, tfm_cmac);
+
+ crypto_free_hash(tfm_cmac);
+ crypto_free_blkcipher(tfm_aes);
+
+ return err;
+}
+
+#endif
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 3296bf42ae80..60c5b73fcb4b 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -192,4 +192,17 @@ int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa);
int smp_register(struct hci_dev *hdev);
void smp_unregister(struct hci_dev *hdev);
+#if IS_ENABLED(CONFIG_BT_SELFTEST_SMP)
+
+int bt_selftest_smp(void);
+
+#else
+
+static inline int bt_selftest_smp(void)
+{
+ return 0;
+}
+
+#endif
+
#endif /* __SMP_H */
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 44425aff7cba..fb57ab6b24f9 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -19,6 +19,7 @@
#include <linux/llc.h>
#include <net/llc.h>
#include <net/stp.h>
+#include <net/switchdev.h>
#include "br_private.h"
@@ -120,6 +121,48 @@ static struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
+static int br_netdev_switch_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_switch_notifier_info_to_dev(ptr);
+ struct net_bridge_port *p;
+ struct net_bridge *br;
+ struct netdev_switch_notifier_fdb_info *fdb_info;
+ int err = NOTIFY_DONE;
+
+ rtnl_lock();
+ p = br_port_get_rtnl(dev);
+ if (!p)
+ goto out;
+
+ br = p->br;
+
+ switch (event) {
+ case NETDEV_SWITCH_FDB_ADD:
+ fdb_info = ptr;
+ err = br_fdb_external_learn_add(br, p, fdb_info->addr,
+ fdb_info->vid);
+ if (err)
+ err = notifier_from_errno(err);
+ break;
+ case NETDEV_SWITCH_FDB_DEL:
+ fdb_info = ptr;
+ err = br_fdb_external_learn_del(br, p, fdb_info->addr,
+ fdb_info->vid);
+ if (err)
+ err = notifier_from_errno(err);
+ break;
+ }
+
+out:
+ rtnl_unlock();
+ return err;
+}
+
+static struct notifier_block br_netdev_switch_notifier = {
+ .notifier_call = br_netdev_switch_event,
+};
+
static void __net_exit br_net_exit(struct net *net)
{
struct net_device *dev;
@@ -169,10 +212,14 @@ static int __init br_init(void)
if (err)
goto err_out3;
- err = br_netlink_init();
+ err = register_netdev_switch_notifier(&br_netdev_switch_notifier);
if (err)
goto err_out4;
+ err = br_netlink_init();
+ if (err)
+ goto err_out5;
+
brioctl_set(br_ioctl_deviceless_stub);
#if IS_ENABLED(CONFIG_ATM_LANE)
@@ -185,6 +232,8 @@ static int __init br_init(void)
return 0;
+err_out5:
+ unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
err_out4:
unregister_netdevice_notifier(&br_device_notifier);
err_out3:
@@ -202,6 +251,7 @@ static void __exit br_deinit(void)
{
stp_proto_unregister(&br_stp_proto);
br_netlink_fini();
+ unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
unregister_netdevice_notifier(&br_device_notifier);
brioctl_set(NULL);
unregister_pernet_subsys(&br_net_ops);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index cc36e59db7d7..e0670d7054f9 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -633,7 +633,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -686,6 +687,9 @@ int br_fdb_dump(struct sk_buff *skb,
if (!(dev->priv_flags & IFF_EBRIDGE))
goto out;
+ if (!filter_dev)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
@@ -697,7 +701,7 @@ int br_fdb_dump(struct sk_buff *skb,
(!f->dst || f->dst->dev != filter_dev)) {
if (filter_dev != dev)
goto skip;
- /* !f->dst is a speacial case for bridge
+ /* !f->dst is a special case for bridge
* It means the MAC belongs to the bridge
* Therefore need a little more filtering
* we only want to dump the !f->dst case
@@ -705,6 +709,8 @@ int br_fdb_dump(struct sk_buff *skb,
if (f->dst)
goto skip;
}
+ if (!filter_dev && f->dst)
+ goto skip;
if (fdb_fill_info(skb, br, f,
NETLINK_CB(cb->skb).portid,
@@ -840,10 +846,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
- err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
+ err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
+ if (err || !pv)
goto out;
- }
/* We have vlans configured on this port and user didn't
* specify a VLAN. To be nice, add/update entry for every
@@ -911,16 +916,15 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
err = __br_fdb_delete(p, addr, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
- err = __br_fdb_delete(p, addr, 0);
+ err = -ENOENT;
+ err &= __br_fdb_delete(p, addr, 0);
+ if (!pv)
goto out;
- }
/* We have vlans configured on this port and user didn't
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
- err = -ENOENT;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err &= __br_fdb_delete(p, addr, vid);
}
@@ -985,26 +989,14 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
}
}
-int br_fdb_external_learn_add(struct net_device *dev,
+int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
- struct net_bridge_port *p;
- struct net_bridge *br;
struct hlist_head *head;
struct net_bridge_fdb_entry *fdb;
int err = 0;
- rtnl_lock();
-
- p = br_port_get_rtnl(dev);
- if (!p) {
- pr_info("bridge: %s not a bridge port\n", dev->name);
- err = -EINVAL;
- goto err_rtnl_unlock;
- }
-
- br = p->br;
-
+ ASSERT_RTNL();
spin_lock_bh(&br->hash_lock);
head = &br->hash[br_mac_hash(addr, vid)];
@@ -1029,33 +1021,18 @@ int br_fdb_external_learn_add(struct net_device *dev,
err_unlock:
spin_unlock_bh(&br->hash_lock);
-err_rtnl_unlock:
- rtnl_unlock();
return err;
}
-EXPORT_SYMBOL(br_fdb_external_learn_add);
-int br_fdb_external_learn_del(struct net_device *dev,
+int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
- struct net_bridge_port *p;
- struct net_bridge *br;
struct hlist_head *head;
struct net_bridge_fdb_entry *fdb;
int err = 0;
- rtnl_lock();
-
- p = br_port_get_rtnl(dev);
- if (!p) {
- pr_info("bridge: %s not a bridge port\n", dev->name);
- err = -EINVAL;
- goto err_rtnl_unlock;
- }
-
- br = p->br;
-
+ ASSERT_RTNL();
spin_lock_bh(&br->hash_lock);
head = &br->hash[br_mac_hash(addr, vid)];
@@ -1066,9 +1043,6 @@ int br_fdb_external_learn_del(struct net_device *dev,
err = -ENOENT;
spin_unlock_bh(&br->hash_lock);
-err_rtnl_unlock:
- rtnl_unlock();
return err;
}
-EXPORT_SYMBOL(br_fdb_external_learn_del);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ed307db7a12b..b087d278c679 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -424,6 +424,7 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
features = netdev_increment_features(features,
p->dev->features, mask);
}
+ features = netdev_add_tso_features(features, mask);
return features;
}
@@ -435,10 +436,16 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
int err = 0;
bool changed_addr;
- /* Don't allow bridging non-ethernet like devices */
+ /* Don't allow bridging non-ethernet like devices, or DSA-enabled
+ * master network devices since the bridge layer rx_handler prevents
+ * the DSA fake ethertype handler to be invoked, so we do not strip off
+ * the DSA switch tag protocol header and the bridge layer just return
+ * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
+ */
if ((dev->flags & IFF_LOOPBACK) ||
dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
- !is_valid_ether_addr(dev->dev_addr))
+ !is_valid_ether_addr(dev->dev_addr) ||
+ netdev_uses_dsa(dev))
return -EINVAL;
/* No bridging of bridges */
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 1f1de715197c..e2aa7be3a847 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL;
if (is_broadcast_ether_addr(dest)) {
- if (p->flags & BR_PROXYARP &&
+ if (IS_ENABLED(CONFIG_INET) &&
+ p->flags & BR_PROXYARP &&
skb->protocol == htons(ETH_P_ARP))
br_do_proxy_arp(skb, br, vid);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 5df05269d17a..409608960899 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -190,7 +190,8 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
nla_nest_end(skb, nest2);
nla_nest_end(skb, nest);
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
end:
nla_nest_end(skb, nest);
@@ -276,7 +277,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_device *dev;
int err;
- err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL);
+ err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL);
if (err < 0)
return err;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index c190d22b6b3d..65728e0dc4ff 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -66,17 +66,17 @@ static int brnf_pass_vlan_indev __read_mostly = 0;
#endif
#define IS_IP(skb) \
- (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
+ (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
#define IS_IPV6(skb) \
- (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
+ (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
#define IS_ARP(skb) \
- (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+ (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
return skb->protocol;
else if (skb->protocol == htons(ETH_P_8021Q))
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -436,11 +436,11 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
struct net_device *vlan, *br;
br = bridge_parent(dev);
- if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
+ if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
return br;
vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
- vlan_tx_tag_get(skb) & VLAN_VID_MASK);
+ skb_vlan_tag_get(skb) & VLAN_VID_MASK);
return vlan ? vlan : br;
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 9f5eb55a4d3a..4fbcea0e7ecb 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -16,6 +16,7 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
+#include <net/switchdev.h>
#include <uapi/linux/if_bridge.h>
#include "br_private.h"
@@ -67,6 +68,120 @@ static int br_port_fill_attrs(struct sk_buff *skb,
return 0;
}
+static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
+ u16 vid_end, u16 flags)
+{
+ struct bridge_vlan_info vinfo;
+
+ if ((vid_end - vid_start) > 0) {
+ /* add range to skb */
+ vinfo.vid = vid_start;
+ vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
+ if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+ sizeof(vinfo), &vinfo))
+ goto nla_put_failure;
+
+ vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
+
+ vinfo.vid = vid_end;
+ vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
+ if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+ sizeof(vinfo), &vinfo))
+ goto nla_put_failure;
+ } else {
+ vinfo.vid = vid_start;
+ vinfo.flags = flags;
+ if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+ sizeof(vinfo), &vinfo))
+ goto nla_put_failure;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
+ const struct net_port_vlans *pv)
+{
+ u16 vid_range_start = 0, vid_range_end = 0;
+ u16 vid_range_flags = 0;
+ u16 pvid, vid, flags;
+ int err = 0;
+
+ /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
+ * and mark vlan info with begin and end flags
+ * if vlaninfo represents a range
+ */
+ pvid = br_get_pvid(pv);
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ flags = 0;
+ if (vid == pvid)
+ flags |= BRIDGE_VLAN_INFO_PVID;
+
+ if (test_bit(vid, pv->untagged_bitmap))
+ flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+ if (vid_range_start == 0) {
+ goto initvars;
+ } else if ((vid - vid_range_end) == 1 &&
+ flags == vid_range_flags) {
+ vid_range_end = vid;
+ continue;
+ } else {
+ err = br_fill_ifvlaninfo_range(skb, vid_range_start,
+ vid_range_end,
+ vid_range_flags);
+ if (err)
+ return err;
+ }
+
+initvars:
+ vid_range_start = vid;
+ vid_range_end = vid;
+ vid_range_flags = flags;
+ }
+
+ if (vid_range_start != 0) {
+ /* Call it once more to send any left over vlans */
+ err = br_fill_ifvlaninfo_range(skb, vid_range_start,
+ vid_range_end,
+ vid_range_flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int br_fill_ifvlaninfo(struct sk_buff *skb,
+ const struct net_port_vlans *pv)
+{
+ struct bridge_vlan_info vinfo;
+ u16 pvid, vid;
+
+ pvid = br_get_pvid(pv);
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ vinfo.vid = vid;
+ vinfo.flags = 0;
+ if (vid == pvid)
+ vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
+
+ if (test_bit(vid, pv->untagged_bitmap))
+ vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+ if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+ sizeof(vinfo), &vinfo))
+ goto nla_put_failure;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
/*
* Create one netlink message for one interface
* Contains port and master info as well as carrier and bridge state.
@@ -121,12 +236,11 @@ static int br_fill_ifinfo(struct sk_buff *skb,
}
/* Check if the VID information is requested */
- if (filter_mask & RTEXT_FILTER_BRVLAN) {
- struct nlattr *af;
+ if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
+ (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
const struct net_port_vlans *pv;
- struct bridge_vlan_info vinfo;
- u16 vid;
- u16 pvid;
+ struct nlattr *af;
+ int err;
if (port)
pv = nbp_get_vlan_info(port);
@@ -140,26 +254,18 @@ static int br_fill_ifinfo(struct sk_buff *skb,
if (!af)
goto nla_put_failure;
- pvid = br_get_pvid(pv);
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
- vinfo.vid = vid;
- vinfo.flags = 0;
- if (vid == pvid)
- vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
-
- if (test_bit(vid, pv->untagged_bitmap))
- vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
-
- if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
- sizeof(vinfo), &vinfo))
- goto nla_put_failure;
- }
-
+ if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
+ err = br_fill_ifvlaninfo_compressed(skb, pv);
+ else
+ err = br_fill_ifvlaninfo(skb, pv);
+ if (err)
+ goto nla_put_failure;
nla_nest_end(skb, af);
}
done:
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -206,69 +312,99 @@ errout:
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask)
{
- int err = 0;
struct net_bridge_port *port = br_port_get_rtnl(dev);
- if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
- goto out;
+ if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
+ !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
+ return 0;
- err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
- filter_mask, dev);
-out:
- return err;
+ return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
+ filter_mask, dev);
}
-static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
- [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
- [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
- [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
- .len = sizeof(struct bridge_vlan_info), },
-};
+static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
+ int cmd, struct bridge_vlan_info *vinfo)
+{
+ int err = 0;
+
+ switch (cmd) {
+ case RTM_SETLINK:
+ if (p) {
+ err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
+ if (err)
+ break;
+
+ if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
+ err = br_vlan_add(p->br, vinfo->vid,
+ vinfo->flags);
+ } else {
+ err = br_vlan_add(br, vinfo->vid, vinfo->flags);
+ }
+ break;
+
+ case RTM_DELLINK:
+ if (p) {
+ nbp_vlan_delete(p, vinfo->vid);
+ if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
+ br_vlan_delete(p->br, vinfo->vid);
+ } else {
+ br_vlan_delete(br, vinfo->vid);
+ }
+ break;
+ }
+
+ return err;
+}
static int br_afspec(struct net_bridge *br,
struct net_bridge_port *p,
struct nlattr *af_spec,
int cmd)
{
- struct nlattr *tb[IFLA_BRIDGE_MAX+1];
+ struct bridge_vlan_info *vinfo_start = NULL;
+ struct bridge_vlan_info *vinfo = NULL;
+ struct nlattr *attr;
int err = 0;
+ int rem;
- err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, af_spec, ifla_br_policy);
- if (err)
- return err;
+ nla_for_each_nested(attr, af_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
+ continue;
+ if (nla_len(attr) != sizeof(struct bridge_vlan_info))
+ return -EINVAL;
+ vinfo = nla_data(attr);
+ if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
+ if (vinfo_start)
+ return -EINVAL;
+ vinfo_start = vinfo;
+ continue;
+ }
- if (tb[IFLA_BRIDGE_VLAN_INFO]) {
- struct bridge_vlan_info *vinfo;
+ if (vinfo_start) {
+ struct bridge_vlan_info tmp_vinfo;
+ int v;
- vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
+ if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END))
+ return -EINVAL;
- if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
- return -EINVAL;
+ if (vinfo->vid <= vinfo_start->vid)
+ return -EINVAL;
- switch (cmd) {
- case RTM_SETLINK:
- if (p) {
- err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
+ memcpy(&tmp_vinfo, vinfo_start,
+ sizeof(struct bridge_vlan_info));
+
+ for (v = vinfo_start->vid; v <= vinfo->vid; v++) {
+ tmp_vinfo.vid = v;
+ err = br_vlan_info(br, p, cmd, &tmp_vinfo);
if (err)
break;
-
- if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
- err = br_vlan_add(p->br, vinfo->vid,
- vinfo->flags);
- } else
- err = br_vlan_add(br, vinfo->vid, vinfo->flags);
-
- break;
-
- case RTM_DELLINK:
- if (p) {
- nbp_vlan_delete(p, vinfo->vid);
- if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
- br_vlan_delete(p->br, vinfo->vid);
- } else
- br_vlan_delete(br, vinfo->vid);
- break;
+ }
+ vinfo_start = NULL;
+ } else {
+ err = br_vlan_info(br, p, cmd, vinfo);
}
+ if (err)
+ break;
}
return err;
@@ -359,13 +495,13 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
}
/* Change state and parameters on port. */
-int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
{
struct nlattr *protinfo;
struct nlattr *afspec;
struct net_bridge_port *p;
struct nlattr *tb[IFLA_BRPORT_MAX + 1];
- int err = 0;
+ int err = 0, ret_offload = 0;
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
@@ -407,19 +543,28 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
afspec, RTM_SETLINK);
}
+ if (p && !(flags & BRIDGE_FLAGS_SELF)) {
+ /* set bridge attributes in hardware if supported
+ */
+ ret_offload = netdev_switch_port_bridge_setlink(dev, nlh,
+ flags);
+ if (ret_offload && ret_offload != -EOPNOTSUPP)
+ br_warn(p->br, "error setting attrs on port %u(%s)\n",
+ (unsigned int)p->port_no, p->dev->name);
+ }
+
if (err == 0)
br_ifinfo_notify(RTM_NEWLINK, p);
-
out:
return err;
}
/* Delete port information */
-int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
{
struct nlattr *afspec;
struct net_bridge_port *p;
- int err;
+ int err = 0, ret_offload = 0;
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!afspec)
@@ -432,6 +577,21 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
afspec, RTM_DELLINK);
+ if (err == 0)
+ /* Send RTM_NEWLINK because userspace
+ * expects RTM_NEWLINK for vlan dels
+ */
+ br_ifinfo_notify(RTM_NEWLINK, p);
+
+ if (p && !(flags & BRIDGE_FLAGS_SELF)) {
+ /* del bridge attributes in hardware
+ */
+ ret_offload = netdev_switch_port_bridge_dellink(dev, nlh,
+ flags);
+ if (ret_offload && ret_offload != -EOPNOTSUPP)
+ br_warn(p->br, "error deleting attrs on port %u (%s)\n",
+ (unsigned int)p->port_no, p->dev->name);
+ }
return err;
}
@@ -561,7 +721,7 @@ static size_t br_get_link_af_size(const struct net_device *dev)
return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
}
-static struct rtnl_af_ops br_af_ops = {
+static struct rtnl_af_ops br_af_ops __read_mostly = {
.family = AF_BRIDGE,
.get_link_af_size = br_get_link_af_size,
};
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index aea3d1339b3f..de0919975a25 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -402,6 +402,10 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, struct net_device *fdev, int idx);
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
+int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid);
+int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid);
/* br_forward.c */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
@@ -628,8 +632,8 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
{
int err = 0;
- if (vlan_tx_tag_present(skb))
- *vid = vlan_tx_tag_get(skb) & VLAN_VID_MASK;
+ if (skb_vlan_tag_present(skb))
+ *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
else {
*vid = 0;
err = -EINVAL;
@@ -815,8 +819,8 @@ extern struct rtnl_link_ops br_link_ops;
int br_netlink_init(void);
void br_netlink_fini(void);
void br_ifinfo_notify(int event, struct net_bridge_port *port);
-int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
-int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
u32 filter_mask);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 97b8ddf57363..13013fe8db24 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -187,7 +187,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
* sent from vlan device on the bridge device, it does not have
* HW accelerated vlan tag.
*/
- if (unlikely(!vlan_tx_tag_present(skb) &&
+ if (unlikely(!skb_vlan_tag_present(skb) &&
skb->protocol == proto)) {
skb = skb_vlan_untag(skb);
if (unlikely(!skb))
@@ -200,7 +200,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
/* Protocol-mismatch, empty out vlan_tci for new tag */
skb_push(skb, ETH_HLEN);
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
+ skb_vlan_tag_get(skb));
if (unlikely(!skb))
return false;
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 8d3f8c7651f0..618568888128 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -45,8 +45,8 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
/* VLAN encapsulated Type/Length field, given from orig frame */
__be16 encap;
- if (vlan_tx_tag_present(skb)) {
- TCI = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ TCI = skb_vlan_tag_get(skb);
encap = skb->protocol;
} else {
const struct vlan_hdr *fp;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index d9a8c05d995d..91180a7fc943 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -133,7 +133,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
__be16 ethproto;
int verdict, i;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
ethproto = htons(ETH_P_8021Q);
else
ethproto = h->h_proto;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index b0330aecbf97..3244aead0926 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -265,22 +265,12 @@ out:
data[NFT_REG_VERDICT].verdict = NF_DROP;
}
-static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
+static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
{
- struct nft_base_chain *basechain;
-
- if (chain->flags & NFT_BASE_CHAIN) {
- basechain = nft_base_chain(chain);
-
- switch (basechain->ops[0].hooknum) {
- case NF_BR_PRE_ROUTING:
- case NF_BR_LOCAL_IN:
- break;
- default:
- return -EOPNOTSUPP;
- }
- }
- return 0;
+ return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
+ (1 << NF_BR_LOCAL_IN));
}
static int nft_reject_bridge_init(const struct nft_ctx *ctx,
@@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
struct nft_reject *priv = nft_expr_priv(expr);
int icmp_code, err;
- err = nft_reject_bridge_validate_hooks(ctx->chain);
+ err = nft_reject_bridge_validate(ctx, expr, NULL);
if (err < 0)
return err;
@@ -341,13 +331,6 @@ nla_put_failure:
return -1;
}
-static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
-{
- return nft_reject_bridge_validate_hooks(ctx->chain);
-}
-
static struct nft_expr_type nft_reject_bridge_type;
static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type,
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4589ff67bfa9..67a4a36febd1 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
ASSERT_RTNL();
caifdev = netdev_priv(dev);
caif_netlink_parms(data, &caifdev->conn_req);
- dev_net_set(caifdev->netdev, src_net);
ret = register_netdevice(dev);
if (ret)
diff --git a/net/can/gw.c b/net/can/gw.c
index 295f62e62eb3..a6f448e18ea8 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -575,7 +575,8 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
goto cancel;
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
cancel:
nlmsg_cancel(skb, nlh);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 555013034f7a..096d91447e06 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -23,17 +23,15 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
if (!pages)
return ERR_PTR(-ENOMEM);
- down_read(&current->mm->mmap_sem);
while (got < num_pages) {
- rc = get_user_pages(current, current->mm,
+ rc = get_user_pages_unlocked(current, current->mm,
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
- num_pages - got, write_page, 0, pages + got, NULL);
+ num_pages - got, write_page, 0, pages + got);
if (rc < 0)
break;
BUG_ON(rc == 0);
got += rc;
}
- up_read(&current->mm->mmap_sem);
if (rc < 0)
goto fail;
return pages;
diff --git a/net/core/Makefile b/net/core/Makefile
index 235e6c50708d..fec0856dd6c0 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -2,7 +2,7 @@
# Makefile for the Linux networking core.
#
-obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
+obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \
gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 683d493aa1bf..d030575532a2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -371,9 +371,10 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
if (pt->type == htons(ETH_P_ALL))
- return &ptype_all;
+ return pt->dev ? &pt->dev->ptype_all : &ptype_all;
else
- return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+ return pt->dev ? &pt->dev->ptype_specific :
+ &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
}
/**
@@ -1734,6 +1735,23 @@ static inline int deliver_skb(struct sk_buff *skb,
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
+static inline void deliver_ptype_list_skb(struct sk_buff *skb,
+ struct packet_type **pt,
+ struct net_device *dev, __be16 type,
+ struct list_head *ptype_list)
+{
+ struct packet_type *ptype, *pt_prev = *pt;
+
+ list_for_each_entry_rcu(ptype, ptype_list, list) {
+ if (ptype->type != type)
+ continue;
+ if (pt_prev)
+ deliver_skb(skb, pt_prev, dev);
+ pt_prev = ptype;
+ }
+ *pt = pt_prev;
+}
+
static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
{
if (!ptype->af_packet_priv || !skb->sk)
@@ -1757,45 +1775,54 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
struct packet_type *ptype;
struct sk_buff *skb2 = NULL;
struct packet_type *pt_prev = NULL;
+ struct list_head *ptype_list = &ptype_all;
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &ptype_all, list) {
+again:
+ list_for_each_entry_rcu(ptype, ptype_list, list) {
/* Never send packets back to the socket
* they originated from - MvS (miquels@drinkel.ow.org)
*/
- if ((ptype->dev == dev || !ptype->dev) &&
- (!skb_loop_sk(ptype, skb))) {
- if (pt_prev) {
- deliver_skb(skb2, pt_prev, skb->dev);
- pt_prev = ptype;
- continue;
- }
+ if (skb_loop_sk(ptype, skb))
+ continue;
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (!skb2)
- break;
+ if (pt_prev) {
+ deliver_skb(skb2, pt_prev, skb->dev);
+ pt_prev = ptype;
+ continue;
+ }
- net_timestamp_set(skb2);
+ /* need to clone skb, done only once */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+ goto out_unlock;
- /* skb->nh should be correctly
- set by sender, so that the second statement is
- just protection against buggy protocols.
- */
- skb_reset_mac_header(skb2);
-
- if (skb_network_header(skb2) < skb2->data ||
- skb_network_header(skb2) > skb_tail_pointer(skb2)) {
- net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
- ntohs(skb2->protocol),
- dev->name);
- skb_reset_network_header(skb2);
- }
+ net_timestamp_set(skb2);
- skb2->transport_header = skb2->network_header;
- skb2->pkt_type = PACKET_OUTGOING;
- pt_prev = ptype;
+ /* skb->nh should be correctly
+ * set by sender, so that the second statement is
+ * just protection against buggy protocols.
+ */
+ skb_reset_mac_header(skb2);
+
+ if (skb_network_header(skb2) < skb2->data ||
+ skb_network_header(skb2) > skb_tail_pointer(skb2)) {
+ net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
+ ntohs(skb2->protocol),
+ dev->name);
+ skb_reset_network_header(skb2);
}
+
+ skb2->transport_header = skb2->network_header;
+ skb2->pkt_type = PACKET_OUTGOING;
+ pt_prev = ptype;
}
+
+ if (ptype_list == &ptype_all) {
+ ptype_list = &dev->ptype_all;
+ goto again;
+ }
+out_unlock:
if (pt_prev)
pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
rcu_read_unlock();
@@ -2352,7 +2379,6 @@ EXPORT_SYMBOL(skb_checksum_help);
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
- unsigned int vlan_depth = skb->mac_len;
__be16 type = skb->protocol;
/* Tunnel gso handlers can set protocol to ethernet. */
@@ -2366,35 +2392,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
type = eth->h_proto;
}
- /* if skb->protocol is 802.1Q/AD then the header should already be
- * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
- * ETH_HLEN otherwise
- */
- if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
- if (vlan_depth) {
- if (WARN_ON(vlan_depth < VLAN_HLEN))
- return 0;
- vlan_depth -= VLAN_HLEN;
- } else {
- vlan_depth = ETH_HLEN;
- }
- do {
- struct vlan_hdr *vh;
-
- if (unlikely(!pskb_may_pull(skb,
- vlan_depth + VLAN_HLEN)))
- return 0;
-
- vh = (struct vlan_hdr *)(skb->data + vlan_depth);
- type = vh->h_vlan_encapsulated_proto;
- vlan_depth += VLAN_HLEN;
- } while (type == htons(ETH_P_8021Q) ||
- type == htons(ETH_P_8021AD));
- }
-
- *depth = vlan_depth;
-
- return type;
+ return __vlan_get_protocol(skb, type, depth);
}
/**
@@ -2578,7 +2576,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
if (skb->encapsulation)
features &= dev->hw_enc_features;
- if (!vlan_tx_tag_present(skb)) {
+ if (!skb_vlan_tag_present(skb)) {
if (unlikely(protocol == htons(ETH_P_8021Q) ||
protocol == htons(ETH_P_8021AD))) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2617,7 +2615,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
unsigned int len;
int rc;
- if (!list_empty(&ptype_all))
+ if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
dev_queue_xmit_nit(skb, dev);
len = skb->len;
@@ -2659,7 +2657,7 @@ out:
static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
netdev_features_t features)
{
- if (vlan_tx_tag_present(skb) &&
+ if (skb_vlan_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto))
skb = __vlan_hwaccel_push_inside(skb);
return skb;
@@ -3032,6 +3030,8 @@ static inline void ____napi_schedule(struct softnet_data *sd,
/* One global table that all flow-based protocols share. */
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
+u32 rps_cpu_mask __read_mostly;
+EXPORT_SYMBOL(rps_cpu_mask);
struct static_key rps_needed __read_mostly;
@@ -3088,16 +3088,17 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{
- struct netdev_rx_queue *rxqueue;
- struct rps_map *map;
+ const struct rps_sock_flow_table *sock_flow_table;
+ struct netdev_rx_queue *rxqueue = dev->_rx;
struct rps_dev_flow_table *flow_table;
- struct rps_sock_flow_table *sock_flow_table;
+ struct rps_map *map;
int cpu = -1;
- u16 tcpu;
+ u32 tcpu;
u32 hash;
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
+
if (unlikely(index >= dev->real_num_rx_queues)) {
WARN_ONCE(dev->real_num_rx_queues > 1,
"%s received packet on queue %u, but number "
@@ -3105,39 +3106,40 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
dev->name, index, dev->real_num_rx_queues);
goto done;
}
- rxqueue = dev->_rx + index;
- } else
- rxqueue = dev->_rx;
+ rxqueue += index;
+ }
+ /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
+
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
map = rcu_dereference(rxqueue->rps_map);
- if (map) {
- if (map->len == 1 &&
- !rcu_access_pointer(rxqueue->rps_flow_table)) {
- tcpu = map->cpus[0];
- if (cpu_online(tcpu))
- cpu = tcpu;
- goto done;
- }
- } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
+ if (!flow_table && !map)
goto done;
- }
skb_reset_network_header(skb);
hash = skb_get_hash(skb);
if (!hash)
goto done;
- flow_table = rcu_dereference(rxqueue->rps_flow_table);
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (flow_table && sock_flow_table) {
- u16 next_cpu;
struct rps_dev_flow *rflow;
+ u32 next_cpu;
+ u32 ident;
+ /* First check into global flow table if there is a match */
+ ident = sock_flow_table->ents[hash & sock_flow_table->mask];
+ if ((ident ^ hash) & ~rps_cpu_mask)
+ goto try_rps;
+
+ next_cpu = ident & rps_cpu_mask;
+
+ /* OK, now we know there is a match,
+ * we can look at the local (per receive queue) flow table
+ */
rflow = &flow_table->flows[hash & flow_table->mask];
tcpu = rflow->cpu;
- next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
-
/*
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
@@ -3164,6 +3166,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
}
}
+try_rps:
+
if (map) {
tcpu = map->cpus[reciprocal_scale(hash, map->len)];
if (cpu_online(tcpu)) {
@@ -3615,7 +3619,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
struct net_device *orig_dev;
- struct net_device *null_or_dev;
bool deliver_exact = false;
int ret = NET_RX_DROP;
__be16 type;
@@ -3658,11 +3661,15 @@ another_round:
goto skip_taps;
list_for_each_entry_rcu(ptype, &ptype_all, list) {
- if (!ptype->dev || ptype->dev == skb->dev) {
- if (pt_prev)
- ret = deliver_skb(skb, pt_prev, orig_dev);
- pt_prev = ptype;
- }
+ if (pt_prev)
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = ptype;
+ }
+
+ list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
+ if (pt_prev)
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = ptype;
}
skip_taps:
@@ -3676,7 +3683,7 @@ ncls:
if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
goto drop;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
@@ -3708,8 +3715,8 @@ ncls:
}
}
- if (unlikely(vlan_tx_tag_present(skb))) {
- if (vlan_tx_tag_get_id(skb))
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ if (skb_vlan_tag_get_id(skb))
skb->pkt_type = PACKET_OTHERHOST;
/* Note: we might in the future use prio bits
* and set skb->priority like in vlan_do_receive()
@@ -3718,19 +3725,21 @@ ncls:
skb->vlan_tci = 0;
}
+ type = skb->protocol;
+
/* deliver only exact match when indicated */
- null_or_dev = deliver_exact ? skb->dev : NULL;
+ if (likely(!deliver_exact)) {
+ deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
+ &ptype_base[ntohs(type) &
+ PTYPE_HASH_MASK]);
+ }
- type = skb->protocol;
- list_for_each_entry_rcu(ptype,
- &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
- if (ptype->type == type &&
- (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
- ptype->dev == orig_dev)) {
- if (pt_prev)
- ret = deliver_skb(skb, pt_prev, orig_dev);
- pt_prev = ptype;
- }
+ deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
+ &orig_dev->ptype_specific);
+
+ if (unlikely(skb->dev != orig_dev)) {
+ deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
+ &skb->dev->ptype_specific);
}
if (pt_prev) {
@@ -5323,7 +5332,27 @@ void netdev_upper_dev_unlink(struct net_device *dev,
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
-void netdev_adjacent_add_links(struct net_device *dev)
+/**
+ * netdev_bonding_info_change - Dispatch event about slave change
+ * @dev: device
+ * @netdev_bonding_info: info to dispatch
+ *
+ * Send NETDEV_BONDING_INFO to netdev notifiers with info.
+ * The caller must hold the RTNL lock.
+ */
+void netdev_bonding_info_change(struct net_device *dev,
+ struct netdev_bonding_info *bonding_info)
+{
+ struct netdev_notifier_bonding_info info;
+
+ memcpy(&info.bonding_info, bonding_info,
+ sizeof(struct netdev_bonding_info));
+ call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
+ &info.info);
+}
+EXPORT_SYMBOL(netdev_bonding_info_change);
+
+static void netdev_adjacent_add_links(struct net_device *dev)
{
struct netdev_adjacent *iter;
@@ -5348,7 +5377,7 @@ void netdev_adjacent_add_links(struct net_device *dev)
}
}
-void netdev_adjacent_del_links(struct net_device *dev)
+static void netdev_adjacent_del_links(struct net_device *dev)
{
struct netdev_adjacent *iter;
@@ -6172,13 +6201,16 @@ static int netif_alloc_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
struct netdev_rx_queue *rx;
+ size_t sz = count * sizeof(*rx);
BUG_ON(count < 1);
- rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
- if (!rx)
- return -ENOMEM;
-
+ rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!rx) {
+ rx = vzalloc(sz);
+ if (!rx)
+ return -ENOMEM;
+ }
dev->_rx = rx;
for (i = 0; i < count; i++)
@@ -6576,6 +6608,8 @@ void netdev_run_todo(void)
/* paranoia */
BUG_ON(netdev_refcnt_read(dev));
+ BUG_ON(!list_empty(&dev->ptype_all));
+ BUG_ON(!list_empty(&dev->ptype_specific));
WARN_ON(rcu_access_pointer(dev->ip_ptr));
WARN_ON(rcu_access_pointer(dev->ip6_ptr));
WARN_ON(dev->dn_ptr);
@@ -6656,7 +6690,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
if (!queue)
return NULL;
netdev_init_one_queue(dev, queue, NULL);
- queue->qdisc = &noop_qdisc;
+ RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
queue->qdisc_sleeping = &noop_qdisc;
rcu_assign_pointer(dev->ingress_queue, queue);
#endif
@@ -6758,6 +6792,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
INIT_LIST_HEAD(&dev->adj_list.lower);
INIT_LIST_HEAD(&dev->all_adj_list.upper);
INIT_LIST_HEAD(&dev->all_adj_list.lower);
+ INIT_LIST_HEAD(&dev->ptype_all);
+ INIT_LIST_HEAD(&dev->ptype_specific);
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
@@ -6808,7 +6844,7 @@ void free_netdev(struct net_device *dev)
netif_free_tx_queues(dev);
#ifdef CONFIG_SYSFS
- kfree(dev->_rx);
+ kvfree(dev->_rx);
#endif
kfree(rcu_dereference_protected(dev->ingress_queue, 1));
@@ -7072,10 +7108,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
oldsd->output_queue = NULL;
oldsd->output_queue_tailp = &oldsd->output_queue;
}
- /* Append NAPI poll list from offline CPU. */
- if (!list_empty(&oldsd->poll_list)) {
- list_splice_init(&oldsd->poll_list, &sd->poll_list);
- raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ /* Append NAPI poll list from offline CPU, with one exception :
+ * process_backlog() must be called by cpu owning percpu backlog.
+ * We properly handle process_queue & input_pkt_queue later.
+ */
+ while (!list_empty(&oldsd->poll_list)) {
+ struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
+ struct napi_struct,
+ poll_list);
+
+ list_del_init(&napi->poll_list);
+ if (napi->poll == process_backlog)
+ napi->state = 0;
+ else
+ ____napi_schedule(sd, napi);
}
raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -7083,11 +7129,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
- netif_rx_internal(skb);
+ netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
- netif_rx_internal(skb);
+ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
+ netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 550892cd6b3f..91f74f3eb204 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1597,20 +1597,31 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
return err;
}
+static int __ethtool_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct phy_device *phydev = dev->phydev;
+
+ if (phydev && phydev->drv && phydev->drv->module_info)
+ return phydev->drv->module_info(phydev, modinfo);
+
+ if (ops->get_module_info)
+ return ops->get_module_info(dev, modinfo);
+
+ return -EOPNOTSUPP;
+}
+
static int ethtool_get_module_info(struct net_device *dev,
void __user *useraddr)
{
int ret;
struct ethtool_modinfo modinfo;
- const struct ethtool_ops *ops = dev->ethtool_ops;
-
- if (!ops->get_module_info)
- return -EOPNOTSUPP;
if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
return -EFAULT;
- ret = ops->get_module_info(dev, &modinfo);
+ ret = __ethtool_get_module_info(dev, &modinfo);
if (ret)
return ret;
@@ -1620,21 +1631,33 @@ static int ethtool_get_module_info(struct net_device *dev,
return 0;
}
+static int __ethtool_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct phy_device *phydev = dev->phydev;
+
+ if (phydev && phydev->drv && phydev->drv->module_eeprom)
+ return phydev->drv->module_eeprom(phydev, ee, data);
+
+ if (ops->get_module_eeprom)
+ return ops->get_module_eeprom(dev, ee, data);
+
+ return -EOPNOTSUPP;
+}
+
static int ethtool_get_module_eeprom(struct net_device *dev,
void __user *useraddr)
{
int ret;
struct ethtool_modinfo modinfo;
- const struct ethtool_ops *ops = dev->ethtool_ops;
-
- if (!ops->get_module_info || !ops->get_module_eeprom)
- return -EOPNOTSUPP;
- ret = ops->get_module_info(dev, &modinfo);
+ ret = __ethtool_get_module_info(dev, &modinfo);
if (ret)
return ret;
- return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom,
+ return ethtool_get_any_eeprom(dev, useraddr,
+ __ethtool_get_module_eeprom,
modinfo.eeprom_len);
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 185c341fafbd..44706e81b2e0 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -609,7 +609,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
if (ops->fill(rule, skb, frh) < 0)
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
diff --git a/net/core/flow.c b/net/core/flow.c
index a0348fde1fdf..1033725be40b 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -379,7 +379,7 @@ done:
static void flow_cache_flush_task(struct work_struct *work)
{
struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
- flow_cache_gc_work);
+ flow_cache_flush_work);
struct net *net = container_of(xfrm, struct net, xfrm);
flow_cache_flush(net);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 45084938c403..2c35c02a931e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -178,6 +178,20 @@ ipv6:
return false;
}
}
+ case htons(ETH_P_TIPC): {
+ struct {
+ __be32 pre[3];
+ __be32 srcnode;
+ } *hdr, _hdr;
+ hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
+ if (!hdr)
+ return false;
+ flow->src = hdr->srcnode;
+ flow->dst = 0;
+ flow->n_proto = proto;
+ flow->thoff = (u16)nhoff;
+ return true;
+ }
case htons(ETH_P_FCOE):
flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
/* fall through */
@@ -408,7 +422,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
dev_maps = rcu_dereference(dev->xps_maps);
if (dev_maps) {
map = rcu_dereference(
- dev_maps->cpu_map[raw_smp_processor_id()]);
+ dev_maps->cpu_map[skb->sender_cpu - 1]);
if (map) {
if (map->len == 1)
queue_index = map->queues[0];
@@ -454,6 +468,11 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
{
int queue_index = 0;
+#ifdef CONFIG_XPS
+ if (skb->sender_cpu == 0)
+ skb->sender_cpu = raw_smp_processor_id() + 1;
+#endif
+
if (dev->real_num_tx_queues != 1) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
diff --git a/net/core/iovec.c b/net/core/iovec.c
deleted file mode 100644
index dcbe98b3726a..000000000000
--- a/net/core/iovec.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * iovec manipulation routines.
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Fixes:
- * Andrew Lunn : Errors in iovec copying.
- * Pedro Roque : Added memcpy_fromiovecend and
- * csum_..._fromiovecend.
- * Andi Kleen : fixed error handling for 2.1
- * Alexey Kuznetsov: 2.1 optimisations
- * Andi Kleen : Fix csum*fromiovecend for IPv6.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/in6.h>
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-#include <net/checksum.h>
-#include <net/sock.h>
-
-/*
- * And now for the all-in-one: copy and checksum from a user iovec
- * directly to a datagram
- * Calls to csum_partial but the last must be in 32 bit chunks
- *
- * ip_build_xmit must ensure that when fragmenting only the last
- * call to this function will be unaligned also.
- */
-int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
- int offset, unsigned int len, __wsum *csump)
-{
- __wsum csum = *csump;
- int partial_cnt = 0, err = 0;
-
- /* Skip over the finished iovecs */
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- iov++;
- }
-
- while (len > 0) {
- u8 __user *base = iov->iov_base + offset;
- int copy = min_t(unsigned int, len, iov->iov_len - offset);
-
- offset = 0;
-
- /* There is a remnant from previous iov. */
- if (partial_cnt) {
- int par_len = 4 - partial_cnt;
-
- /* iov component is too short ... */
- if (par_len > copy) {
- if (copy_from_user(kdata, base, copy))
- goto out_fault;
- kdata += copy;
- base += copy;
- partial_cnt += copy;
- len -= copy;
- iov++;
- if (len)
- continue;
- *csump = csum_partial(kdata - partial_cnt,
- partial_cnt, csum);
- goto out;
- }
- if (copy_from_user(kdata, base, par_len))
- goto out_fault;
- csum = csum_partial(kdata - partial_cnt, 4, csum);
- kdata += par_len;
- base += par_len;
- copy -= par_len;
- len -= par_len;
- partial_cnt = 0;
- }
-
- if (len > copy) {
- partial_cnt = copy % 4;
- if (partial_cnt) {
- copy -= partial_cnt;
- if (copy_from_user(kdata + copy, base + copy,
- partial_cnt))
- goto out_fault;
- }
- }
-
- if (copy) {
- csum = csum_and_copy_from_user(base, kdata, copy,
- csum, &err);
- if (err)
- goto out;
- }
- len -= copy + partial_cnt;
- kdata += copy + partial_cnt;
- iov++;
- }
- *csump = csum;
-out:
- return err;
-
-out_fault:
- err = -EFAULT;
- goto out;
-}
-EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
-
-unsigned long iov_pages(const struct iovec *iov, int offset,
- unsigned long nr_segs)
-{
- unsigned long seg, base;
- int pages = 0, len, size;
-
- while (nr_segs && (offset >= iov->iov_len)) {
- offset -= iov->iov_len;
- ++iov;
- --nr_segs;
- }
-
- for (seg = 0; seg < nr_segs; seg++) {
- base = (unsigned long)iov[seg].iov_base + offset;
- len = iov[seg].iov_len - offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- pages += size;
- offset = 0;
- }
-
- return pages;
-}
-EXPORT_SYMBOL(iov_pages);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8e38f17288d3..70fe9e10ac86 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1884,7 +1884,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
goto nla_put_failure;
read_unlock_bh(&tbl->lock);
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
read_unlock_bh(&tbl->lock);
@@ -1917,7 +1918,8 @@ static int neightbl_fill_param_info(struct sk_buff *skb,
goto errout;
read_unlock_bh(&tbl->lock);
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
errout:
read_unlock_bh(&tbl->lock);
nlmsg_cancel(skb, nlh);
@@ -2043,6 +2045,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
case NDTPA_BASE_REACHABLE_TIME:
NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
nla_get_msecs(tbp[i]));
+ /* update reachable_time as well, otherwise, the change will
+ * only be effective after the next time neigh_periodic_work
+ * decides to recompute it (can be multiple minutes)
+ */
+ p->reachable_time =
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
break;
case NDTPA_GC_STALETIME:
NEIGH_VAR_SET(p, GC_STALETIME,
@@ -2120,7 +2128,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
- NLM_F_MULTI) <= 0)
+ NLM_F_MULTI) < 0)
break;
nidx = 0;
@@ -2136,7 +2144,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGHTBL,
- NLM_F_MULTI) <= 0)
+ NLM_F_MULTI) < 0)
goto out;
next:
nidx++;
@@ -2196,7 +2204,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -2226,7 +2235,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -2264,7 +2274,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
- NLM_F_MULTI) <= 0) {
+ NLM_F_MULTI) < 0) {
rc = -1;
goto out;
}
@@ -2301,7 +2311,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
- NLM_F_MULTI, tbl) <= 0) {
+ NLM_F_MULTI, tbl) < 0) {
read_unlock_bh(&tbl->lock);
rc = -1;
goto out;
@@ -2921,6 +2931,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
+static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct neigh_parms *p = ctl->extra2;
+ int ret;
+
+ if (strcmp(ctl->procname, "base_reachable_time") == 0)
+ ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+ else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
+ ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+ else
+ ret = -1;
+
+ if (write && ret == 0) {
+ /* update reachable_time as well, otherwise, the change will
+ * only be effective after the next time neigh_periodic_work
+ * decides to recompute it
+ */
+ p->reachable_time =
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+ }
+ return ret;
+}
+
#define NEIGH_PARMS_DATA_OFFSET(index) \
(&((struct neigh_parms *) 0)->data[index])
@@ -3047,6 +3082,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
/* ReachableTime (in milliseconds) */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+ } else {
+ /* Those handlers will update p->reachable_time after
+ * base_reachable_time(_ms) is set to ensure the new timer starts being
+ * applied after the next neighbour update instead of waiting for
+ * neigh_periodic_work to update its value (can be multiple minutes)
+ * So any handler that replaces them should do this as well
+ */
+ /* ReachableTime */
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
+ neigh_proc_base_reachable_time;
+ /* ReachableTime (in milliseconds) */
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
+ neigh_proc_base_reachable_time;
}
/* Don't export sysctls to unprivileged users */
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 999341244434..f2aa73bfb0e4 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -614,8 +614,7 @@ static ssize_t show_rps_map(struct netdev_rx_queue *queue,
{
struct rps_map *map;
cpumask_var_t mask;
- size_t len = 0;
- int i;
+ int i, len;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
@@ -626,17 +625,11 @@ static ssize_t show_rps_map(struct netdev_rx_queue *queue,
for (i = 0; i < map->len; i++)
cpumask_set_cpu(map->cpus[i], mask);
- len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
- if (PAGE_SIZE - len < 3) {
- rcu_read_unlock();
- free_cpumask_var(mask);
- return -EINVAL;
- }
+ len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
rcu_read_unlock();
-
free_cpumask_var(mask);
- len += sprintf(buf + len, "\n");
- return len;
+
+ return len < PAGE_SIZE ? len : -EINVAL;
}
static ssize_t store_rps_map(struct netdev_rx_queue *queue,
@@ -1090,8 +1083,7 @@ static ssize_t show_xps_map(struct netdev_queue *queue,
struct xps_dev_maps *dev_maps;
cpumask_var_t mask;
unsigned long index;
- size_t len = 0;
- int i;
+ int i, len;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
@@ -1117,15 +1109,9 @@ static ssize_t show_xps_map(struct netdev_queue *queue,
}
rcu_read_unlock();
- len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
- if (PAGE_SIZE - len < 3) {
- free_cpumask_var(mask);
- return -EINVAL;
- }
-
+ len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
free_cpumask_var(mask);
- len += sprintf(buf + len, "\n");
- return len;
+ return len < PAGE_SIZE ? len : -EINVAL;
}
static ssize_t store_xps_map(struct netdev_queue *queue,
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index ce780c722e48..cb5290b8c428 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -15,6 +15,10 @@
#include <linux/file.h>
#include <linux/export.h>
#include <linux/user_namespace.h>
+#include <linux/net_namespace.h>
+#include <linux/rtnetlink.h>
+#include <net/sock.h>
+#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
@@ -144,6 +148,78 @@ static void ops_free_list(const struct pernet_operations *ops,
}
}
+static int alloc_netid(struct net *net, struct net *peer, int reqid)
+{
+ int min = 0, max = 0;
+
+ ASSERT_RTNL();
+
+ if (reqid >= 0) {
+ min = reqid;
+ max = reqid + 1;
+ }
+
+ return idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
+}
+
+/* This function is used by idr_for_each(). If net is equal to peer, the
+ * function returns the id so that idr_for_each() stops. Because we cannot
+ * returns the id 0 (idr_for_each() will not stop), we return the magic value
+ * NET_ID_ZERO (-1) for it.
+ */
+#define NET_ID_ZERO -1
+static int net_eq_idr(int id, void *net, void *peer)
+{
+ if (net_eq(net, peer))
+ return id ? : NET_ID_ZERO;
+ return 0;
+}
+
+static int __peernet2id(struct net *net, struct net *peer, bool alloc)
+{
+ int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
+
+ ASSERT_RTNL();
+
+ /* Magic value for id 0. */
+ if (id == NET_ID_ZERO)
+ return 0;
+ if (id > 0)
+ return id;
+
+ if (alloc)
+ return alloc_netid(net, peer, -1);
+
+ return -ENOENT;
+}
+
+/* This function returns the id of a peer netns. If no id is assigned, one will
+ * be allocated and returned.
+ */
+int peernet2id(struct net *net, struct net *peer)
+{
+ int id = __peernet2id(net, peer, true);
+
+ return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
+}
+EXPORT_SYMBOL(peernet2id);
+
+struct net *get_net_ns_by_id(struct net *net, int id)
+{
+ struct net *peer;
+
+ if (id < 0)
+ return NULL;
+
+ rcu_read_lock();
+ peer = idr_find(&net->netns_ids, id);
+ if (peer)
+ get_net(peer);
+ rcu_read_unlock();
+
+ return peer;
+}
+
/*
* setup_net runs the initializers for the network namespace object.
*/
@@ -158,6 +234,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
atomic_set(&net->passive, 1);
net->dev_base_seq = 1;
net->user_ns = user_ns;
+ idr_init(&net->netns_ids);
#ifdef NETNS_REFCNT_DEBUG
atomic_set(&net->use_count, 0);
@@ -288,6 +365,14 @@ static void cleanup_net(struct work_struct *work)
list_for_each_entry(net, &net_kill_list, cleanup_list) {
list_del_rcu(&net->list);
list_add_tail(&net->exit_list, &net_exit_list);
+ for_each_net(tmp) {
+ int id = __peernet2id(tmp, net, false);
+
+ if (id >= 0)
+ idr_remove(&tmp->netns_ids, id);
+ }
+ idr_destroy(&net->netns_ids);
+
}
rtnl_unlock();
@@ -361,6 +446,7 @@ struct net *get_net_ns_by_fd(int fd)
return ERR_PTR(-EINVAL);
}
#endif
+EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
struct net *get_net_ns_by_pid(pid_t pid)
{
@@ -402,6 +488,130 @@ static struct pernet_operations __net_initdata net_ns_ops = {
.exit = net_ns_net_exit,
};
+static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
+ [NETNSA_NONE] = { .type = NLA_UNSPEC },
+ [NETNSA_NSID] = { .type = NLA_S32 },
+ [NETNSA_PID] = { .type = NLA_U32 },
+ [NETNSA_FD] = { .type = NLA_U32 },
+};
+
+static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tb[NETNSA_MAX + 1];
+ struct net *peer;
+ int nsid, err;
+
+ err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy);
+ if (err < 0)
+ return err;
+ if (!tb[NETNSA_NSID])
+ return -EINVAL;
+ nsid = nla_get_s32(tb[NETNSA_NSID]);
+
+ if (tb[NETNSA_PID])
+ peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
+ else if (tb[NETNSA_FD])
+ peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
+ else
+ return -EINVAL;
+ if (IS_ERR(peer))
+ return PTR_ERR(peer);
+
+ if (__peernet2id(net, peer, false) >= 0) {
+ err = -EEXIST;
+ goto out;
+ }
+
+ err = alloc_netid(net, peer, nsid);
+ if (err > 0)
+ err = 0;
+out:
+ put_net(peer);
+ return err;
+}
+
+static int rtnl_net_get_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct rtgenmsg))
+ + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
+ ;
+}
+
+static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
+ int cmd, struct net *net, struct net *peer)
+{
+ struct nlmsghdr *nlh;
+ struct rtgenmsg *rth;
+ int id;
+
+ ASSERT_RTNL();
+
+ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ rth = nlmsg_data(nlh);
+ rth->rtgen_family = AF_UNSPEC;
+
+ id = __peernet2id(net, peer, false);
+ if (id < 0)
+ id = NETNSA_NSID_NOT_ASSIGNED;
+ if (nla_put_s32(skb, NETNSA_NSID, id))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tb[NETNSA_MAX + 1];
+ struct sk_buff *msg;
+ int err = -ENOBUFS;
+ struct net *peer;
+
+ err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy);
+ if (err < 0)
+ return err;
+ if (tb[NETNSA_PID])
+ peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
+ else if (tb[NETNSA_FD])
+ peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
+ else
+ return -EINVAL;
+
+ if (IS_ERR(peer))
+ return PTR_ERR(peer);
+
+ msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
+ RTM_GETNSID, net, peer);
+ if (err < 0)
+ goto err_out;
+
+ err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
+ goto out;
+
+err_out:
+ nlmsg_free(msg);
+out:
+ put_net(peer);
+ return err;
+}
+
static int __init net_ns_init(void)
{
struct net_generic *ng;
@@ -435,6 +645,9 @@ static int __init net_ns_init(void)
register_pernet_subsys(&net_ns_ops);
+ rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
+ rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, NULL, NULL);
+
return 0;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e0ad5d16c9c5..c126a878c47c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -77,7 +77,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
features = netif_skb_features(skb);
- if (vlan_tx_tag_present(skb) &&
+ if (skb_vlan_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_hwaccel_push_inside(skb);
if (unlikely(!skb)) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index da934fc3faa8..9fa25b0ea145 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2842,25 +2842,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
+
if (!(pkt_dev->flags & F_UDPCSUM)) {
skb->ip_summed = CHECKSUM_NONE;
} else if (odev->features & NETIF_F_V4_CSUM) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- udp4_hwcsum(skb, udph->source, udph->dest);
+ udp4_hwcsum(skb, iph->saddr, iph->daddr);
} else {
- __wsum csum = udp_csum(skb);
+ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
/* add protocol-dependent pseudo-header */
- udph->check = csum_tcpudp_magic(udph->source, udph->dest,
+ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
datalen + 8, IPPROTO_UDP, csum);
if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
}
- pktgen_finalize_skb(pkt_dev, skb, datalen);
-
#ifdef CONFIG_XFRM
if (!process_ipsec(pkt_dev, skb, protocol))
return NULL;
@@ -2976,6 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
+
if (!(pkt_dev->flags & F_UDPCSUM)) {
skb->ip_summed = CHECKSUM_NONE;
} else if (odev->features & NETIF_F_V6_CSUM) {
@@ -2984,7 +2986,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->csum_offset = offsetof(struct udphdr, check);
udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
} else {
- __wsum csum = udp_csum(skb);
+ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
/* add protocol-dependent pseudo-header */
udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
@@ -2993,8 +2995,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
udph->check = CSUM_MANGLED_0;
}
- pktgen_finalize_skb(pkt_dev, skb, datalen);
-
return skb;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9cf6fe9ddc0c..5be499b6a2d2 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -50,6 +50,7 @@
#include <net/arp.h>
#include <net/route.h>
#include <net/udp.h>
+#include <net/tcp.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <net/fib_rules.h>
@@ -669,9 +670,19 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
for (i = 0; i < RTAX_MAX; i++) {
if (metrics[i]) {
+ if (i == RTAX_CC_ALGO - 1) {
+ char tmp[TCP_CA_NAME_MAX], *name;
+
+ name = tcp_ca_get_name_by_key(metrics[i], tmp);
+ if (!name)
+ continue;
+ if (nla_put_string(skb, i + 1, name))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u32(skb, i + 1, metrics[i]))
+ goto nla_put_failure;
+ }
valid++;
- if (nla_put_u32(skb, i+1, metrics[i]))
- goto nla_put_failure;
}
}
@@ -864,6 +875,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
+ + nla_total_size(4) /* IFLA_LINK_NETNSID */
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1158,6 +1170,18 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
goto nla_put_failure;
}
+ if (dev->rtnl_link_ops &&
+ dev->rtnl_link_ops->get_link_net) {
+ struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
+
+ if (!net_eq(dev_net(dev), link_net)) {
+ int id = peernet2id(dev_net(dev), link_net);
+
+ if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
+ goto nla_put_failure;
+ }
+ }
+
if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
goto nla_put_failure;
@@ -1188,7 +1212,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
nla_nest_end(skb, af_spec);
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -1223,6 +1248,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
[IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
+ [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1237,18 +1263,12 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
};
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
- [IFLA_VF_MAC] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_mac) },
- [IFLA_VF_VLAN] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_vlan) },
- [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_tx_rate) },
- [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_spoofchk) },
- [IFLA_VF_RATE] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_rate) },
- [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_link_state) },
+ [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
+ [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
+ [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
+ [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
+ [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
+ [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
};
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -1315,7 +1335,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
*/
WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
- if (err <= 0)
+ if (err < 0)
goto out;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1996,7 +2016,7 @@ replay:
struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0];
struct nlattr **data = NULL;
struct nlattr **slave_data = NULL;
- struct net *dest_net;
+ struct net *dest_net, *link_net = NULL;
if (ops) {
if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
@@ -2102,7 +2122,18 @@ replay:
if (IS_ERR(dest_net))
return PTR_ERR(dest_net);
- dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
+ if (tb[IFLA_LINK_NETNSID]) {
+ int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
+
+ link_net = get_net_ns_by_id(dest_net, id);
+ if (!link_net) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ dev = rtnl_create_link(link_net ? : dest_net, ifname,
+ name_assign_type, ops, tb);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
goto out;
@@ -2111,7 +2142,7 @@ replay:
dev->ifindex = ifm->ifi_index;
if (ops->newlink) {
- err = ops->newlink(net, dev, tb, data);
+ err = ops->newlink(link_net ? : net, dev, tb, data);
/* Drivers should call free_netdev() in ->destructor
* and unregister it on failure after registration
* so that device could be finally freed in rtnl_unlock.
@@ -2130,9 +2161,19 @@ replay:
}
}
err = rtnl_configure_link(dev, ifm);
- if (err < 0)
+ if (err < 0) {
unregister_netdevice(dev);
+ goto out;
+ }
+
+ if (link_net) {
+ err = dev_change_net_namespace(dev, dest_net, ifname);
+ if (err < 0)
+ unregister_netdevice(dev);
+ }
out:
+ if (link_net)
+ put_net(link_net);
put_net(dest_net);
return err;
}
@@ -2315,7 +2356,8 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -2698,10 +2740,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
idx);
}
- idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
if (dev->netdev_ops->ndo_fdb_dump)
- idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, bdev, dev,
+ idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
idx);
+ else
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
cops = NULL;
}
@@ -2797,7 +2840,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
nla_nest_end(skb, protinfo);
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -2868,39 +2912,35 @@ static inline size_t bridge_nlmsg_size(void)
+ nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
}
-static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
+static int rtnl_bridge_notify(struct net_device *dev)
{
struct net *net = dev_net(dev);
- struct net_device *br_dev = netdev_master_upper_dev_get(dev);
struct sk_buff *skb;
int err = -EOPNOTSUPP;
+ if (!dev->netdev_ops->ndo_bridge_getlink)
+ return 0;
+
skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
if (!skb) {
err = -ENOMEM;
goto errout;
}
- if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
- br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
- err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
- if (err < 0)
- goto errout;
- }
+ err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
+ if (err < 0)
+ goto errout;
- if ((flags & BRIDGE_FLAGS_SELF) &&
- dev->netdev_ops->ndo_bridge_getlink) {
- err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
- if (err < 0)
- goto errout;
- }
+ if (!skb->len)
+ goto errout;
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return 0;
errout:
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
- rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+ if (err)
+ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
return err;
}
@@ -2911,7 +2951,7 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
struct net_device *dev;
struct nlattr *br_spec, *attr = NULL;
int rem, err = -EOPNOTSUPP;
- u16 oflags, flags = 0;
+ u16 flags = 0;
bool have_flags = false;
if (nlmsg_len(nlh) < sizeof(*ifm))
@@ -2941,8 +2981,6 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
}
}
- oflags = flags;
-
if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
@@ -2951,7 +2989,7 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
goto out;
}
- err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
+ err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
if (err)
goto out;
@@ -2962,17 +3000,20 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (!dev->netdev_ops->ndo_bridge_setlink)
err = -EOPNOTSUPP;
else
- err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
-
- if (!err)
+ err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
+ flags);
+ if (!err) {
flags &= ~BRIDGE_FLAGS_SELF;
+
+ /* Generate event to notify upper layer of bridge
+ * change
+ */
+ err = rtnl_bridge_notify(dev);
+ }
}
if (have_flags)
memcpy(nla_data(attr), &flags, sizeof(flags));
- /* Generate event to notify upper layer of bridge change */
- if (!err)
- err = rtnl_bridge_notify(dev, oflags);
out:
return err;
}
@@ -2984,7 +3025,7 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
struct net_device *dev;
struct nlattr *br_spec, *attr = NULL;
int rem, err = -EOPNOTSUPP;
- u16 oflags, flags = 0;
+ u16 flags = 0;
bool have_flags = false;
if (nlmsg_len(nlh) < sizeof(*ifm))
@@ -3014,8 +3055,6 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
}
}
- oflags = flags;
-
if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
@@ -3024,7 +3063,7 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
goto out;
}
- err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
+ err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
if (err)
goto out;
@@ -3035,17 +3074,21 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (!dev->netdev_ops->ndo_bridge_dellink)
err = -EOPNOTSUPP;
else
- err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
+ err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
+ flags);
- if (!err)
+ if (!err) {
flags &= ~BRIDGE_FLAGS_SELF;
+
+ /* Generate event to notify upper layer of bridge
+ * change
+ */
+ err = rtnl_bridge_notify(dev);
+ }
}
if (have_flags)
memcpy(nla_data(attr), &flags, sizeof(flags));
- /* Generate event to notify upper layer of bridge change */
- if (!err)
- err = rtnl_bridge_notify(dev, oflags);
out:
return err;
}
@@ -3135,6 +3178,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_UNREGISTER_FINAL:
case NETDEV_RELEASE:
case NETDEV_JOIN:
+ case NETDEV_BONDING_INFO:
break;
default:
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 395c15b82087..88c613eab142 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -74,6 +74,8 @@
#include <asm/uaccess.h>
#include <trace/events/skb.h>
#include <linux/highmem.h>
+#include <linux/capability.h>
+#include <linux/user_namespace.h>
struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
@@ -677,13 +679,6 @@ static void skb_release_head_state(struct sk_buff *skb)
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(skb->nf_bridge);
#endif
-/* XXX: IS this still necessary? - JHS */
-#ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
-#ifdef CONFIG_NET_CLS_ACT
- skb->tc_verd = 0;
-#endif
-#endif
}
/* Free everything but the sk_buff shell. */
@@ -830,6 +825,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#ifdef CONFIG_NET_RX_BUSY_POLL
CHECK_SKB_FIELD(napi_id);
#endif
+#ifdef CONFIG_XPS
+ CHECK_SKB_FIELD(sender_cpu);
+#endif
#ifdef CONFIG_NET_SCHED
CHECK_SKB_FIELD(tc_index);
#ifdef CONFIG_NET_CLS_ACT
@@ -3697,11 +3695,28 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
kfree_skb(skb);
}
+static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
+{
+ bool ret;
+
+ if (likely(sysctl_tstamp_allow_data || tsonly))
+ return true;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ ret = sk->sk_socket && sk->sk_socket->file &&
+ file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
+ read_unlock_bh(&sk->sk_callback_lock);
+ return ret;
+}
+
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps)
{
struct sock *sk = skb->sk;
+ if (!skb_may_tx_timestamp(sk, false))
+ return;
+
/* take a reference to prevent skb_orphan() from freeing the socket */
sock_hold(sk);
@@ -3717,19 +3732,28 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
struct sock *sk, int tstype)
{
struct sk_buff *skb;
+ bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
- if (!sk)
+ if (!sk || !skb_may_tx_timestamp(sk, tsonly))
return;
- if (hwtstamps)
- *skb_hwtstamps(orig_skb) = *hwtstamps;
+ if (tsonly)
+ skb = alloc_skb(0, GFP_ATOMIC);
else
- orig_skb->tstamp = ktime_get_real();
-
- skb = skb_clone(orig_skb, GFP_ATOMIC);
+ skb = skb_clone(orig_skb, GFP_ATOMIC);
if (!skb)
return;
+ if (tsonly) {
+ skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
+ skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
+ }
+
+ if (hwtstamps)
+ *skb_hwtstamps(skb) = *hwtstamps;
+ else
+ skb->tstamp = ktime_get_real();
+
__skb_complete_tx_timestamp(skb, sk, tstype);
}
EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
@@ -4148,6 +4172,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->ignore_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
+ skb->sender_cpu = 0;
skb_init_secmark(skb);
secpath_reset(skb);
nf_reset(skb);
@@ -4204,7 +4229,7 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
struct vlan_hdr *vhdr;
u16 vlan_tci;
- if (unlikely(vlan_tx_tag_present(skb))) {
+ if (unlikely(skb_vlan_tag_present(skb))) {
/* vlan_tci is already set-up so leave this for another time */
return skb;
}
@@ -4290,7 +4315,7 @@ int skb_vlan_pop(struct sk_buff *skb)
__be16 vlan_proto;
int err;
- if (likely(vlan_tx_tag_present(skb))) {
+ if (likely(skb_vlan_tag_present(skb))) {
skb->vlan_tci = 0;
} else {
if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
@@ -4320,7 +4345,7 @@ EXPORT_SYMBOL(skb_vlan_pop);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
{
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
unsigned int offset = skb->data - skb_mac_header(skb);
int err;
@@ -4330,7 +4355,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
*/
__skb_push(skb, offset);
err = __vlan_insert_tag(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
+ skb_vlan_tag_get(skb));
if (err)
return err;
skb->protocol = skb->vlan_proto;
diff --git a/net/core/sock.c b/net/core/sock.c
index 1c7a33db1314..93c8b20c91e4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -325,6 +325,8 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
+int sysctl_tstamp_allow_data __read_mostly = 1;
+
struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
EXPORT_SYMBOL_GPL(memalloc_socks);
@@ -840,6 +842,7 @@ set_rcvbuf:
ret = -EINVAL;
break;
}
+
if (val & SOF_TIMESTAMPING_OPT_ID &&
!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
if (sk->sk_protocol == IPPROTO_TCP) {
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 31baba2a71ce..433424804284 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -52,7 +52,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
if (write) {
if (size) {
- if (size > 1<<30) {
+ if (size > 1<<29) {
/* Enforce limit to prevent overflow */
mutex_unlock(&sock_flow_mutex);
return -EINVAL;
@@ -65,7 +65,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_unlock(&sock_flow_mutex);
return -ENOMEM;
}
-
+ rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
sock_table->mask = size - 1;
} else
sock_table = orig_sock_table;
@@ -155,7 +155,7 @@ write_unlock:
rcu_read_unlock();
len = min(sizeof(kbuf) - 1, *lenp);
- len = cpumask_scnprintf(kbuf, len, mask);
+ len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask));
if (!len) {
*lenp = 0;
goto done;
@@ -321,6 +321,15 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "tstamp_allow_data",
+ .data = &sysctl_tstamp_allow_data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one
+ },
#ifdef CONFIG_RPS
{
.procname = "rps_sock_flow_entries",
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 4400da7739da..b2c26b081134 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -702,7 +702,8 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
nla_put_u32(skb, IFA_FLAGS, ifa_flags))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index d332aefb0846..df4803437888 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -298,7 +298,8 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct nlattr *att
int type = nla_type(attr);
if (type) {
- if (type > RTAX_MAX || nla_len(attr) < 4)
+ if (type > RTAX_MAX || type == RTAX_CC_ALGO ||
+ nla_len(attr) < 4)
goto err_inval;
fi->fib_metrics[type-1] = nla_get_u32(attr);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index daccc4a36d80..1d7c1256e845 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1616,7 +1616,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
goto errout;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
errout:
nlmsg_cancel(skb, nlh);
@@ -1709,9 +1710,6 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
rt->rt_flags |= RTCF_NOTIFY;
err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
-
- if (err == 0)
- goto out_free;
if (err < 0) {
err = -EMSGSIZE;
goto out_free;
@@ -1762,7 +1760,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb_dst_set(skb, dst_clone(&rt->dst));
if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
- 1, NLM_F_MULTI) <= 0) {
+ 1, NLM_F_MULTI) < 0) {
skb_dst_drop(skb);
rcu_read_unlock_bh();
goto done;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 86e3807052e9..1540b506e3e0 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -29,6 +29,7 @@
#include <linux/route.h> /* RTF_xxx */
#include <net/neighbour.h>
#include <net/netlink.h>
+#include <net/tcp.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/fib_rules.h>
@@ -273,7 +274,8 @@ static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(4) /* RTA_TABLE */
+ nla_total_size(2) /* RTA_DST */
- + nla_total_size(4); /* RTA_PRIORITY */
+ + nla_total_size(4) /* RTA_PRIORITY */
+ + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
/* space for nested metrics */
payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
@@ -365,7 +367,8 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_nest_end(skb, mp_head);
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
errout:
nlmsg_cancel(skb, nlh);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 37317149f918..2173402d87e0 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -603,7 +603,7 @@ static int dsa_of_probe(struct platform_device *pdev)
pdev->dev.platform_data = pd;
pd->netdev = &ethernet_dev->dev;
- pd->nr_chips = of_get_child_count(np);
+ pd->nr_chips = of_get_available_child_count(np);
if (pd->nr_chips > DSA_MAX_SWITCHES)
pd->nr_chips = DSA_MAX_SWITCHES;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 515569ffde8a..d104ae15836f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
ds->index, ds->pd->sw_addr);
ds->slave_mii_bus->parent = ds->master_dev;
+ ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
}
@@ -675,18 +676,5 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
netif_carrier_off(slave_dev);
- if (p->phy != NULL) {
- if (ds->drv->get_phy_flags)
- p->phy->dev_flags |= ds->drv->get_phy_flags(ds, port);
-
- phy_attach(slave_dev, dev_name(&p->phy->dev),
- PHY_INTERFACE_MODE_GMII);
-
- p->phy->autoneg = AUTONEG_ENABLE;
- p->phy->speed = 0;
- p->phy->duplex = 0;
- p->phy->advertising = p->phy->supported | ADVERTISED_Autoneg;
- }
-
return slave_dev;
}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 33a140e15834..238f38d21641 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -424,3 +424,95 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
}
EXPORT_SYMBOL(sysfs_format_mac);
+
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct sk_buff *p, **pp = NULL;
+ struct ethhdr *eh, *eh2;
+ unsigned int hlen, off_eth;
+ const struct packet_offload *ptype;
+ __be16 type;
+ int flush = 1;
+
+ off_eth = skb_gro_offset(skb);
+ hlen = off_eth + sizeof(*eh);
+ eh = skb_gro_header_fast(skb, off_eth);
+ if (skb_gro_header_hard(skb, hlen)) {
+ eh = skb_gro_header_slow(skb, hlen, off_eth);
+ if (unlikely(!eh))
+ goto out;
+ }
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ eh2 = (struct ethhdr *)(p->data + off_eth);
+ if (compare_ether_header(eh, eh2)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+
+ type = eh->h_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (ptype == NULL) {
+ flush = 1;
+ goto out_unlock;
+ }
+
+ skb_gro_pull(skb, sizeof(*eh));
+ skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+EXPORT_SYMBOL(eth_gro_receive);
+
+int eth_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
+ __be16 type = eh->h_proto;
+ struct packet_offload *ptype;
+ int err = -ENOSYS;
+
+ if (skb->encapsulation)
+ skb_set_inner_mac_header(skb, nhoff);
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype != NULL)
+ err = ptype->callbacks.gro_complete(skb, nhoff +
+ sizeof(struct ethhdr));
+
+ rcu_read_unlock();
+ return err;
+}
+EXPORT_SYMBOL(eth_gro_complete);
+
+static struct packet_offload eth_packet_offload __read_mostly = {
+ .type = cpu_to_be16(ETH_P_TEB),
+ .callbacks = {
+ .gro_receive = eth_gro_receive,
+ .gro_complete = eth_gro_complete,
+ },
+};
+
+static int __init eth_offload_init(void)
+{
+ dev_add_offload(&eth_packet_offload);
+
+ return 0;
+}
+
+fs_initcall(eth_offload_init);
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
new file mode 100644
index 000000000000..e50f69da78eb
--- /dev/null
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -0,0 +1,72 @@
+#ifndef __IEEE802154_6LOWPAN_I_H__
+#define __IEEE802154_6LOWPAN_I_H__
+
+#include <linux/list.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/inet_frag.h>
+
+struct lowpan_create_arg {
+ u16 tag;
+ u16 d_size;
+ const struct ieee802154_addr *src;
+ const struct ieee802154_addr *dst;
+};
+
+/* Equivalent of ipv4 struct ip
+ */
+struct lowpan_frag_queue {
+ struct inet_frag_queue q;
+
+ u16 tag;
+ u16 d_size;
+ struct ieee802154_addr saddr;
+ struct ieee802154_addr daddr;
+};
+
+static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
+{
+ switch (a->mode) {
+ case IEEE802154_ADDR_LONG:
+ return (((__force u64)a->extended_addr) >> 32) ^
+ (((__force u64)a->extended_addr) & 0xffffffff);
+ case IEEE802154_ADDR_SHORT:
+ return (__force u32)(a->short_addr);
+ default:
+ return 0;
+ }
+}
+
+struct lowpan_dev_record {
+ struct net_device *ldev;
+ struct list_head list;
+};
+
+/* private device info */
+struct lowpan_dev_info {
+ struct net_device *real_dev; /* real WPAN device ptr */
+ struct mutex dev_list_mtx; /* mutex for list ops */
+ u16 fragment_tag;
+};
+
+static inline struct
+lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+extern struct list_head lowpan_devices;
+
+int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
+void lowpan_net_frag_exit(void);
+int lowpan_net_frag_init(void);
+
+void lowpan_rx_init(void);
+void lowpan_rx_exit(void);
+
+int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned int len);
+netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev);
+
+#endif /* __IEEE802154_6LOWPAN_I_H__ */
diff --git a/net/ieee802154/6lowpan/Kconfig b/net/ieee802154/6lowpan/Kconfig
new file mode 100644
index 000000000000..d24f985b0bfd
--- /dev/null
+++ b/net/ieee802154/6lowpan/Kconfig
@@ -0,0 +1,5 @@
+config IEEE802154_6LOWPAN
+ tristate "6lowpan support over IEEE 802.15.4"
+ depends on 6LOWPAN
+ ---help---
+ IPv6 compression over IEEE 802.15.4.
diff --git a/net/ieee802154/6lowpan/Makefile b/net/ieee802154/6lowpan/Makefile
new file mode 100644
index 000000000000..6bfb270a81a6
--- /dev/null
+++ b/net/ieee802154/6lowpan/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IEEE802154_6LOWPAN) += ieee802154_6lowpan.o
+
+ieee802154_6lowpan-y := core.o rx.o reassembly.o tx.o
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
new file mode 100644
index 000000000000..055fbb71ba6f
--- /dev/null
+++ b/net/ieee802154/6lowpan/core.c
@@ -0,0 +1,304 @@
+/* Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ieee802154.h>
+
+#include <net/ipv6.h>
+
+#include "6lowpan_i.h"
+
+LIST_HEAD(lowpan_devices);
+static int lowpan_open_count;
+
+static __le16 lowpan_get_pan_id(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
+ return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
+}
+
+static __le16 lowpan_get_short_addr(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
+ return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
+}
+
+static u8 lowpan_get_dsn(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
+ return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
+}
+
+static struct header_ops lowpan_header_ops = {
+ .create = lowpan_header_create,
+};
+
+static struct lock_class_key lowpan_tx_busylock;
+static struct lock_class_key lowpan_netdev_xmit_lock_key;
+
+static void lowpan_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &lowpan_netdev_xmit_lock_key);
+}
+
+static int lowpan_dev_init(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
+ dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+ return 0;
+}
+
+static const struct net_device_ops lowpan_netdev_ops = {
+ .ndo_init = lowpan_dev_init,
+ .ndo_start_xmit = lowpan_xmit,
+};
+
+static struct ieee802154_mlme_ops lowpan_mlme = {
+ .get_pan_id = lowpan_get_pan_id,
+ .get_short_addr = lowpan_get_short_addr,
+ .get_dsn = lowpan_get_dsn,
+};
+
+static void lowpan_setup(struct net_device *dev)
+{
+ dev->addr_len = IEEE802154_ADDR_LEN;
+ memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+ dev->type = ARPHRD_IEEE802154;
+ /* Frame Control + Sequence Number + Address fields + Security Header */
+ dev->hard_header_len = 2 + 1 + 20 + 14;
+ dev->needed_tailroom = 2; /* FCS */
+ dev->mtu = IPV6_MIN_MTU;
+ dev->tx_queue_len = 0;
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ dev->watchdog_timeo = 0;
+
+ dev->netdev_ops = &lowpan_netdev_ops;
+ dev->header_ops = &lowpan_header_ops;
+ dev->ml_priv = &lowpan_mlme;
+ dev->destructor = free_netdev;
+}
+
+static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net_device *real_dev;
+ struct lowpan_dev_record *entry;
+ int ret;
+
+ ASSERT_RTNL();
+
+ pr_debug("adding new link\n");
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+ /* find and hold real wpan device */
+ real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev)
+ return -ENODEV;
+ if (real_dev->type != ARPHRD_IEEE802154) {
+ dev_put(real_dev);
+ return -EINVAL;
+ }
+
+ lowpan_dev_info(dev)->real_dev = real_dev;
+ mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ dev_put(real_dev);
+ lowpan_dev_info(dev)->real_dev = NULL;
+ return -ENOMEM;
+ }
+
+ entry->ldev = dev;
+
+ /* Set the lowpan hardware address to the wpan hardware address. */
+ memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+
+ mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+ INIT_LIST_HEAD(&entry->list);
+ list_add_tail(&entry->list, &lowpan_devices);
+ mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ ret = register_netdevice(dev);
+ if (ret >= 0) {
+ if (!lowpan_open_count)
+ lowpan_rx_init();
+ lowpan_open_count++;
+ }
+
+ return ret;
+}
+
+static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
+ struct net_device *real_dev = lowpan_dev->real_dev;
+ struct lowpan_dev_record *entry, *tmp;
+
+ ASSERT_RTNL();
+
+ lowpan_open_count--;
+ if (!lowpan_open_count)
+ lowpan_rx_exit();
+
+ mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+ list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+ if (entry->ldev == dev) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+ mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ unregister_netdevice_queue(dev, head);
+
+ dev_put(real_dev);
+}
+
+static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
+ .kind = "lowpan",
+ .priv_size = sizeof(struct lowpan_dev_info),
+ .setup = lowpan_setup,
+ .newlink = lowpan_newlink,
+ .dellink = lowpan_dellink,
+ .validate = lowpan_validate,
+};
+
+static inline int __init lowpan_netlink_init(void)
+{
+ return rtnl_link_register(&lowpan_link_ops);
+}
+
+static inline void lowpan_netlink_fini(void)
+{
+ rtnl_link_unregister(&lowpan_link_ops);
+}
+
+static int lowpan_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ LIST_HEAD(del_list);
+ struct lowpan_dev_record *entry, *tmp;
+
+ if (dev->type != ARPHRD_IEEE802154)
+ goto out;
+
+ if (event == NETDEV_UNREGISTER) {
+ list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+ if (lowpan_dev_info(entry->ldev)->real_dev == dev)
+ lowpan_dellink(entry->ldev, &del_list);
+ }
+
+ unregister_netdevice_many(&del_list);
+ }
+
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lowpan_dev_notifier = {
+ .notifier_call = lowpan_device_event,
+};
+
+static int __init lowpan_init_module(void)
+{
+ int err = 0;
+
+ err = lowpan_net_frag_init();
+ if (err < 0)
+ goto out;
+
+ err = lowpan_netlink_init();
+ if (err < 0)
+ goto out_frag;
+
+ err = register_netdevice_notifier(&lowpan_dev_notifier);
+ if (err < 0)
+ goto out_pack;
+
+ return 0;
+
+out_pack:
+ lowpan_netlink_fini();
+out_frag:
+ lowpan_net_frag_exit();
+out:
+ return err;
+}
+
+static void __exit lowpan_cleanup_module(void)
+{
+ lowpan_netlink_fini();
+
+ lowpan_net_frag_exit();
+
+ unregister_netdevice_notifier(&lowpan_dev_notifier);
+}
+
+module_init(lowpan_init_module);
+module_exit(lowpan_cleanup_module);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 9d980ed3ffe2..f46e4d1306f2 100644
--- a/net/ieee802154/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -28,7 +28,7 @@
#include <net/ipv6.h>
#include <net/inet_frag.h>
-#include "reassembly.h"
+#include "6lowpan_i.h"
static const char lowpan_frags_cache_name[] = "lowpan-frags";
diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c
new file mode 100644
index 000000000000..4be1d289ab2d
--- /dev/null
+++ b/net/ieee802154/6lowpan/rx.c
@@ -0,0 +1,171 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/if_arp.h>
+
+#include <net/6lowpan.h>
+#include <net/ieee802154_netdev.h>
+
+#include "6lowpan_i.h"
+
+static int lowpan_give_skb_to_devices(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lowpan_dev_record *entry;
+ struct sk_buff *skb_cp;
+ int stat = NET_RX_SUCCESS;
+
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->pkt_type = PACKET_HOST;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &lowpan_devices, list)
+ if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
+ skb_cp = skb_copy(skb, GFP_ATOMIC);
+ if (!skb_cp) {
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return NET_RX_DROP;
+ }
+
+ skb_cp->dev = entry->ldev;
+ stat = netif_rx(skb_cp);
+ if (stat == NET_RX_DROP)
+ break;
+ }
+ rcu_read_unlock();
+
+ consume_skb(skb);
+
+ return stat;
+}
+
+static int
+iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+{
+ u8 iphc0, iphc1;
+ struct ieee802154_addr_sa sa, da;
+ void *sap, *dap;
+
+ raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
+ /* at least two bytes will be used for the encoding */
+ if (skb->len < 2)
+ return -EINVAL;
+
+ if (lowpan_fetch_skb_u8(skb, &iphc0))
+ return -EINVAL;
+
+ if (lowpan_fetch_skb_u8(skb, &iphc1))
+ return -EINVAL;
+
+ ieee802154_addr_to_sa(&sa, &hdr->source);
+ ieee802154_addr_to_sa(&da, &hdr->dest);
+
+ if (sa.addr_type == IEEE802154_ADDR_SHORT)
+ sap = &sa.short_addr;
+ else
+ sap = &sa.hwaddr;
+
+ if (da.addr_type == IEEE802154_ADDR_SHORT)
+ dap = &da.short_addr;
+ else
+ dap = &da.hwaddr;
+
+ return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
+ IEEE802154_ADDR_LEN, dap, da.addr_type,
+ IEEE802154_ADDR_LEN, iphc0, iphc1);
+}
+
+static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct ieee802154_hdr hdr;
+ int ret;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto drop;
+
+ if (!netif_running(dev))
+ goto drop_skb;
+
+ if (skb->pkt_type == PACKET_OTHERHOST)
+ goto drop_skb;
+
+ if (dev->type != ARPHRD_IEEE802154)
+ goto drop_skb;
+
+ if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+ goto drop_skb;
+
+ /* check that it's our buffer */
+ if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
+ /* Pull off the 1-byte of 6lowpan header. */
+ skb_pull(skb, 1);
+ return lowpan_give_skb_to_devices(skb, NULL);
+ } else {
+ switch (skb->data[0] & 0xe0) {
+ case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
+ ret = iphc_decompress(skb, &hdr);
+ if (ret < 0)
+ goto drop_skb;
+
+ return lowpan_give_skb_to_devices(skb, NULL);
+ case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
+ ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
+ if (ret == 1) {
+ ret = iphc_decompress(skb, &hdr);
+ if (ret < 0)
+ goto drop_skb;
+
+ return lowpan_give_skb_to_devices(skb, NULL);
+ } else if (ret == -1) {
+ return NET_RX_DROP;
+ } else {
+ return NET_RX_SUCCESS;
+ }
+ case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
+ ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
+ if (ret == 1) {
+ ret = iphc_decompress(skb, &hdr);
+ if (ret < 0)
+ goto drop_skb;
+
+ return lowpan_give_skb_to_devices(skb, NULL);
+ } else if (ret == -1) {
+ return NET_RX_DROP;
+ } else {
+ return NET_RX_SUCCESS;
+ }
+ default:
+ break;
+ }
+ }
+
+drop_skb:
+ kfree_skb(skb);
+drop:
+ return NET_RX_DROP;
+}
+
+static struct packet_type lowpan_packet_type = {
+ .type = htons(ETH_P_IEEE802154),
+ .func = lowpan_rcv,
+};
+
+void lowpan_rx_init(void)
+{
+ dev_add_pack(&lowpan_packet_type);
+}
+
+void lowpan_rx_exit(void)
+{
+ dev_remove_pack(&lowpan_packet_type);
+}
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
new file mode 100644
index 000000000000..2349070bd534
--- /dev/null
+++ b/net/ieee802154/6lowpan/tx.c
@@ -0,0 +1,271 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/6lowpan.h>
+#include <net/ieee802154_netdev.h>
+
+#include "6lowpan_i.h"
+
+/* don't save pan id, it's intra pan */
+struct lowpan_addr {
+ u8 mode;
+ union {
+ /* IPv6 needs big endian here */
+ __be64 extended_addr;
+ __be16 short_addr;
+ } u;
+};
+
+struct lowpan_addr_info {
+ struct lowpan_addr daddr;
+ struct lowpan_addr saddr;
+};
+
+static inline struct
+lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
+{
+ WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
+ return (struct lowpan_addr_info *)(skb->data -
+ sizeof(struct lowpan_addr_info));
+}
+
+int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned int len)
+{
+ const u8 *saddr = _saddr;
+ const u8 *daddr = _daddr;
+ struct lowpan_addr_info *info;
+
+ /* TODO:
+ * if this package isn't ipv6 one, where should it be routed?
+ */
+ if (type != ETH_P_IPV6)
+ return 0;
+
+ if (!saddr)
+ saddr = dev->dev_addr;
+
+ raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
+ raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
+
+ info = lowpan_skb_priv(skb);
+
+ /* TODO: Currently we only support extended_addr */
+ info->daddr.mode = IEEE802154_ADDR_LONG;
+ memcpy(&info->daddr.u.extended_addr, daddr,
+ sizeof(info->daddr.u.extended_addr));
+ info->saddr.mode = IEEE802154_ADDR_LONG;
+ memcpy(&info->saddr.u.extended_addr, saddr,
+ sizeof(info->daddr.u.extended_addr));
+
+ return 0;
+}
+
+static struct sk_buff*
+lowpan_alloc_frag(struct sk_buff *skb, int size,
+ const struct ieee802154_hdr *master_hdr)
+{
+ struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
+ struct sk_buff *frag;
+ int rc;
+
+ frag = alloc_skb(real_dev->hard_header_len +
+ real_dev->needed_tailroom + size,
+ GFP_ATOMIC);
+
+ if (likely(frag)) {
+ frag->dev = real_dev;
+ frag->priority = skb->priority;
+ skb_reserve(frag, real_dev->hard_header_len);
+ skb_reset_network_header(frag);
+ *mac_cb(frag) = *mac_cb(skb);
+
+ rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
+ &master_hdr->source, size);
+ if (rc < 0) {
+ kfree_skb(frag);
+ return ERR_PTR(rc);
+ }
+ } else {
+ frag = ERR_PTR(-ENOMEM);
+ }
+
+ return frag;
+}
+
+static int
+lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
+ u8 *frag_hdr, int frag_hdrlen,
+ int offset, int len)
+{
+ struct sk_buff *frag;
+
+ raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
+
+ frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
+ if (IS_ERR(frag))
+ return -PTR_ERR(frag);
+
+ memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
+ memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
+
+ raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
+
+ return dev_queue_xmit(frag);
+}
+
+static int
+lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
+ const struct ieee802154_hdr *wpan_hdr)
+{
+ u16 dgram_size, dgram_offset;
+ __be16 frag_tag;
+ u8 frag_hdr[5];
+ int frag_cap, frag_len, payload_cap, rc;
+ int skb_unprocessed, skb_offset;
+
+ dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
+ skb->mac_len;
+ frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
+ lowpan_dev_info(dev)->fragment_tag++;
+
+ frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
+ frag_hdr[1] = dgram_size & 0xff;
+ memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
+
+ payload_cap = ieee802154_max_payload(wpan_hdr);
+
+ frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
+ skb_network_header_len(skb), 8);
+
+ skb_offset = skb_network_header_len(skb);
+ skb_unprocessed = skb->len - skb->mac_len - skb_offset;
+
+ rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
+ LOWPAN_FRAG1_HEAD_SIZE, 0,
+ frag_len + skb_network_header_len(skb));
+ if (rc) {
+ pr_debug("%s unable to send FRAG1 packet (tag: %d)",
+ __func__, ntohs(frag_tag));
+ goto err;
+ }
+
+ frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
+ frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
+ frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
+
+ do {
+ dgram_offset += frag_len;
+ skb_offset += frag_len;
+ skb_unprocessed -= frag_len;
+ frag_len = min(frag_cap, skb_unprocessed);
+
+ frag_hdr[4] = dgram_offset >> 3;
+
+ rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
+ LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
+ frag_len);
+ if (rc) {
+ pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
+ __func__, ntohs(frag_tag), skb_offset);
+ goto err;
+ }
+ } while (skb_unprocessed > frag_cap);
+
+ consume_skb(skb);
+ return NET_XMIT_SUCCESS;
+
+err:
+ kfree_skb(skb);
+ return rc;
+}
+
+static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ieee802154_addr sa, da;
+ struct ieee802154_mac_cb *cb = mac_cb_init(skb);
+ struct lowpan_addr_info info;
+ void *daddr, *saddr;
+
+ memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
+
+ /* TODO: Currently we only support extended_addr */
+ daddr = &info.daddr.u.extended_addr;
+ saddr = &info.saddr.u.extended_addr;
+
+ lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
+
+ cb->type = IEEE802154_FC_TYPE_DATA;
+
+ /* prepare wpan address data */
+ sa.mode = IEEE802154_ADDR_LONG;
+ sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
+
+ /* intra-PAN communications */
+ da.pan_id = sa.pan_id;
+
+ /* if the destination address is the broadcast address, use the
+ * corresponding short address
+ */
+ if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
+ da.mode = IEEE802154_ADDR_SHORT;
+ da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+ cb->ackreq = false;
+ } else {
+ da.mode = IEEE802154_ADDR_LONG;
+ da.extended_addr = ieee802154_devaddr_from_raw(daddr);
+ cb->ackreq = true;
+ }
+
+ return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
+ ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
+}
+
+netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ieee802154_hdr wpan_hdr;
+ int max_single, ret;
+
+ pr_debug("package xmit\n");
+
+ /* We must take a copy of the skb before we modify/replace the ipv6
+ * header as the header could be used elsewhere
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_XMIT_DROP;
+
+ ret = lowpan_header(skb, dev);
+ if (ret < 0) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ max_single = ieee802154_max_payload(&wpan_hdr);
+
+ if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
+ skb->dev = lowpan_dev_info(dev)->real_dev;
+ return dev_queue_xmit(skb);
+ } else {
+ netdev_tx_t rc;
+
+ pr_debug("frame is too big, fragmentation is needed\n");
+ rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
+
+ return rc < 0 ? NET_XMIT_DROP : rc;
+ }
+}
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
deleted file mode 100644
index 27eaa65e88e1..000000000000
--- a/net/ieee802154/6lowpan_rtnl.c
+++ /dev/null
@@ -1,729 +0,0 @@
-/* Copyright 2011, Siemens AG
- * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- */
-
-/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
- * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/* Jon's code is based on 6lowpan implementation for Contiki which is:
- * Copyright (c) 2008, Swedish Institute of Computer Science.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Institute nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <linux/bitops.h>
-#include <linux/if_arp.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/ieee802154.h>
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-#include <net/6lowpan.h>
-#include <net/ipv6.h>
-
-#include "reassembly.h"
-
-static LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
-
-/* private device info */
-struct lowpan_dev_info {
- struct net_device *real_dev; /* real WPAN device ptr */
- struct mutex dev_list_mtx; /* mutex for list ops */
- u16 fragment_tag;
-};
-
-struct lowpan_dev_record {
- struct net_device *ldev;
- struct list_head list;
-};
-
-/* don't save pan id, it's intra pan */
-struct lowpan_addr {
- u8 mode;
- union {
- /* IPv6 needs big endian here */
- __be64 extended_addr;
- __be16 short_addr;
- } u;
-};
-
-struct lowpan_addr_info {
- struct lowpan_addr daddr;
- struct lowpan_addr saddr;
-};
-
-static inline struct
-lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
-{
- return netdev_priv(dev);
-}
-
-static inline struct
-lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
-{
- WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
- return (struct lowpan_addr_info *)(skb->data -
- sizeof(struct lowpan_addr_info));
-}
-
-static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *_daddr,
- const void *_saddr, unsigned int len)
-{
- const u8 *saddr = _saddr;
- const u8 *daddr = _daddr;
- struct lowpan_addr_info *info;
-
- /* TODO:
- * if this package isn't ipv6 one, where should it be routed?
- */
- if (type != ETH_P_IPV6)
- return 0;
-
- if (!saddr)
- saddr = dev->dev_addr;
-
- raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
- raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
-
- info = lowpan_skb_priv(skb);
-
- /* TODO: Currently we only support extended_addr */
- info->daddr.mode = IEEE802154_ADDR_LONG;
- memcpy(&info->daddr.u.extended_addr, daddr,
- sizeof(info->daddr.u.extended_addr));
- info->saddr.mode = IEEE802154_ADDR_LONG;
- memcpy(&info->saddr.u.extended_addr, saddr,
- sizeof(info->daddr.u.extended_addr));
-
- return 0;
-}
-
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct lowpan_dev_record *entry;
- struct sk_buff *skb_cp;
- int stat = NET_RX_SUCCESS;
-
- skb->protocol = htons(ETH_P_IPV6);
- skb->pkt_type = PACKET_HOST;
-
- rcu_read_lock();
- list_for_each_entry_rcu(entry, &lowpan_devices, list)
- if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
- skb_cp = skb_copy(skb, GFP_ATOMIC);
- if (!skb_cp) {
- kfree_skb(skb);
- rcu_read_unlock();
- return NET_RX_DROP;
- }
-
- skb_cp->dev = entry->ldev;
- stat = netif_rx(skb_cp);
- if (stat == NET_RX_DROP)
- break;
- }
- rcu_read_unlock();
-
- consume_skb(skb);
-
- return stat;
-}
-
-static int
-iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
-{
- u8 iphc0, iphc1;
- struct ieee802154_addr_sa sa, da;
- void *sap, *dap;
-
- raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
- /* at least two bytes will be used for the encoding */
- if (skb->len < 2)
- return -EINVAL;
-
- if (lowpan_fetch_skb_u8(skb, &iphc0))
- return -EINVAL;
-
- if (lowpan_fetch_skb_u8(skb, &iphc1))
- return -EINVAL;
-
- ieee802154_addr_to_sa(&sa, &hdr->source);
- ieee802154_addr_to_sa(&da, &hdr->dest);
-
- if (sa.addr_type == IEEE802154_ADDR_SHORT)
- sap = &sa.short_addr;
- else
- sap = &sa.hwaddr;
-
- if (da.addr_type == IEEE802154_ADDR_SHORT)
- dap = &da.short_addr;
- else
- dap = &da.hwaddr;
-
- return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
- IEEE802154_ADDR_LEN, dap, da.addr_type,
- IEEE802154_ADDR_LEN, iphc0, iphc1);
-}
-
-static struct sk_buff*
-lowpan_alloc_frag(struct sk_buff *skb, int size,
- const struct ieee802154_hdr *master_hdr)
-{
- struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
- struct sk_buff *frag;
- int rc;
-
- frag = alloc_skb(real_dev->hard_header_len +
- real_dev->needed_tailroom + size,
- GFP_ATOMIC);
-
- if (likely(frag)) {
- frag->dev = real_dev;
- frag->priority = skb->priority;
- skb_reserve(frag, real_dev->hard_header_len);
- skb_reset_network_header(frag);
- *mac_cb(frag) = *mac_cb(skb);
-
- rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
- &master_hdr->source, size);
- if (rc < 0) {
- kfree_skb(frag);
- return ERR_PTR(rc);
- }
- } else {
- frag = ERR_PTR(-ENOMEM);
- }
-
- return frag;
-}
-
-static int
-lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
- u8 *frag_hdr, int frag_hdrlen,
- int offset, int len)
-{
- struct sk_buff *frag;
-
- raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
-
- frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
- if (IS_ERR(frag))
- return -PTR_ERR(frag);
-
- memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
- memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
-
- raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
-
- return dev_queue_xmit(frag);
-}
-
-static int
-lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
- const struct ieee802154_hdr *wpan_hdr)
-{
- u16 dgram_size, dgram_offset;
- __be16 frag_tag;
- u8 frag_hdr[5];
- int frag_cap, frag_len, payload_cap, rc;
- int skb_unprocessed, skb_offset;
-
- dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
- skb->mac_len;
- frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
- lowpan_dev_info(dev)->fragment_tag++;
-
- frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
- frag_hdr[1] = dgram_size & 0xff;
- memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
-
- payload_cap = ieee802154_max_payload(wpan_hdr);
-
- frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
- skb_network_header_len(skb), 8);
-
- skb_offset = skb_network_header_len(skb);
- skb_unprocessed = skb->len - skb->mac_len - skb_offset;
-
- rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
- LOWPAN_FRAG1_HEAD_SIZE, 0,
- frag_len + skb_network_header_len(skb));
- if (rc) {
- pr_debug("%s unable to send FRAG1 packet (tag: %d)",
- __func__, ntohs(frag_tag));
- goto err;
- }
-
- frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
- frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
- frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
-
- do {
- dgram_offset += frag_len;
- skb_offset += frag_len;
- skb_unprocessed -= frag_len;
- frag_len = min(frag_cap, skb_unprocessed);
-
- frag_hdr[4] = dgram_offset >> 3;
-
- rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
- LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
- frag_len);
- if (rc) {
- pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
- __func__, ntohs(frag_tag), skb_offset);
- goto err;
- }
- } while (skb_unprocessed > frag_cap);
-
- consume_skb(skb);
- return NET_XMIT_SUCCESS;
-
-err:
- kfree_skb(skb);
- return rc;
-}
-
-static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
-{
- struct ieee802154_addr sa, da;
- struct ieee802154_mac_cb *cb = mac_cb_init(skb);
- struct lowpan_addr_info info;
- void *daddr, *saddr;
-
- memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
-
- /* TODO: Currently we only support extended_addr */
- daddr = &info.daddr.u.extended_addr;
- saddr = &info.saddr.u.extended_addr;
-
- lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
-
- cb->type = IEEE802154_FC_TYPE_DATA;
-
- /* prepare wpan address data */
- sa.mode = IEEE802154_ADDR_LONG;
- sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
- sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
-
- /* intra-PAN communications */
- da.pan_id = sa.pan_id;
-
- /* if the destination address is the broadcast address, use the
- * corresponding short address
- */
- if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
- da.mode = IEEE802154_ADDR_SHORT;
- da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
- cb->ackreq = false;
- } else {
- da.mode = IEEE802154_ADDR_LONG;
- da.extended_addr = ieee802154_devaddr_from_raw(daddr);
- cb->ackreq = true;
- }
-
- return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
- ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
-}
-
-static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct ieee802154_hdr wpan_hdr;
- int max_single, ret;
-
- pr_debug("package xmit\n");
-
- /* We must take a copy of the skb before we modify/replace the ipv6
- * header as the header could be used elsewhere
- */
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb)
- return NET_XMIT_DROP;
-
- ret = lowpan_header(skb, dev);
- if (ret < 0) {
- kfree_skb(skb);
- return NET_XMIT_DROP;
- }
-
- if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
- kfree_skb(skb);
- return NET_XMIT_DROP;
- }
-
- max_single = ieee802154_max_payload(&wpan_hdr);
-
- if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
- skb->dev = lowpan_dev_info(dev)->real_dev;
- return dev_queue_xmit(skb);
- } else {
- netdev_tx_t rc;
-
- pr_debug("frame is too big, fragmentation is needed\n");
- rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
-
- return rc < 0 ? NET_XMIT_DROP : rc;
- }
-}
-
-static __le16 lowpan_get_pan_id(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
-}
-
-static __le16 lowpan_get_short_addr(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
-}
-
-static u8 lowpan_get_dsn(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
-}
-
-static struct header_ops lowpan_header_ops = {
- .create = lowpan_header_create,
-};
-
-static struct lock_class_key lowpan_tx_busylock;
-static struct lock_class_key lowpan_netdev_xmit_lock_key;
-
-static void lowpan_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &lowpan_netdev_xmit_lock_key);
-}
-
-static int lowpan_dev_init(struct net_device *dev)
-{
- netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &lowpan_tx_busylock;
- return 0;
-}
-
-static const struct net_device_ops lowpan_netdev_ops = {
- .ndo_init = lowpan_dev_init,
- .ndo_start_xmit = lowpan_xmit,
-};
-
-static struct ieee802154_mlme_ops lowpan_mlme = {
- .get_pan_id = lowpan_get_pan_id,
- .get_short_addr = lowpan_get_short_addr,
- .get_dsn = lowpan_get_dsn,
-};
-
-static void lowpan_setup(struct net_device *dev)
-{
- dev->addr_len = IEEE802154_ADDR_LEN;
- memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->type = ARPHRD_IEEE802154;
- /* Frame Control + Sequence Number + Address fields + Security Header */
- dev->hard_header_len = 2 + 1 + 20 + 14;
- dev->needed_tailroom = 2; /* FCS */
- dev->mtu = IPV6_MIN_MTU;
- dev->tx_queue_len = 0;
- dev->flags = IFF_BROADCAST | IFF_MULTICAST;
- dev->watchdog_timeo = 0;
-
- dev->netdev_ops = &lowpan_netdev_ops;
- dev->header_ops = &lowpan_header_ops;
- dev->ml_priv = &lowpan_mlme;
- dev->destructor = free_netdev;
-}
-
-static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
-{
- if (tb[IFLA_ADDRESS]) {
- if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
- return -EINVAL;
- }
- return 0;
-}
-
-static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
-{
- struct ieee802154_hdr hdr;
- int ret;
-
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto drop;
-
- if (!netif_running(dev))
- goto drop_skb;
-
- if (skb->pkt_type == PACKET_OTHERHOST)
- goto drop_skb;
-
- if (dev->type != ARPHRD_IEEE802154)
- goto drop_skb;
-
- if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
- goto drop_skb;
-
- /* check that it's our buffer */
- if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
- /* Pull off the 1-byte of 6lowpan header. */
- skb_pull(skb, 1);
- return lowpan_give_skb_to_devices(skb, NULL);
- } else {
- switch (skb->data[0] & 0xe0) {
- case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
- ret = iphc_decompress(skb, &hdr);
- if (ret < 0)
- goto drop_skb;
-
- return lowpan_give_skb_to_devices(skb, NULL);
- case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
- ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
- if (ret == 1) {
- ret = iphc_decompress(skb, &hdr);
- if (ret < 0)
- goto drop_skb;
-
- return lowpan_give_skb_to_devices(skb, NULL);
- } else if (ret == -1) {
- return NET_RX_DROP;
- } else {
- return NET_RX_SUCCESS;
- }
- case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
- ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
- if (ret == 1) {
- ret = iphc_decompress(skb, &hdr);
- if (ret < 0)
- goto drop_skb;
-
- return lowpan_give_skb_to_devices(skb, NULL);
- } else if (ret == -1) {
- return NET_RX_DROP;
- } else {
- return NET_RX_SUCCESS;
- }
- default:
- break;
- }
- }
-
-drop_skb:
- kfree_skb(skb);
-drop:
- return NET_RX_DROP;
-}
-
-static struct packet_type lowpan_packet_type = {
- .type = htons(ETH_P_IEEE802154),
- .func = lowpan_rcv,
-};
-
-static int lowpan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
-{
- struct net_device *real_dev;
- struct lowpan_dev_record *entry;
- int ret;
-
- ASSERT_RTNL();
-
- pr_debug("adding new link\n");
-
- if (!tb[IFLA_LINK])
- return -EINVAL;
- /* find and hold real wpan device */
- real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev)
- return -ENODEV;
- if (real_dev->type != ARPHRD_IEEE802154) {
- dev_put(real_dev);
- return -EINVAL;
- }
-
- lowpan_dev_info(dev)->real_dev = real_dev;
- mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- dev_put(real_dev);
- lowpan_dev_info(dev)->real_dev = NULL;
- return -ENOMEM;
- }
-
- entry->ldev = dev;
-
- /* Set the lowpan hardware address to the wpan hardware address. */
- memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
-
- mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
- INIT_LIST_HEAD(&entry->list);
- list_add_tail(&entry->list, &lowpan_devices);
- mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
- ret = register_netdevice(dev);
- if (ret >= 0) {
- if (!lowpan_open_count)
- dev_add_pack(&lowpan_packet_type);
- lowpan_open_count++;
- }
-
- return ret;
-}
-
-static void lowpan_dellink(struct net_device *dev, struct list_head *head)
-{
- struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
- struct net_device *real_dev = lowpan_dev->real_dev;
- struct lowpan_dev_record *entry, *tmp;
-
- ASSERT_RTNL();
-
- lowpan_open_count--;
- if (!lowpan_open_count)
- dev_remove_pack(&lowpan_packet_type);
-
- mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
- list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
- if (entry->ldev == dev) {
- list_del(&entry->list);
- kfree(entry);
- }
- }
- mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
- mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
- unregister_netdevice_queue(dev, head);
-
- dev_put(real_dev);
-}
-
-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
- .kind = "lowpan",
- .priv_size = sizeof(struct lowpan_dev_info),
- .setup = lowpan_setup,
- .newlink = lowpan_newlink,
- .dellink = lowpan_dellink,
- .validate = lowpan_validate,
-};
-
-static inline int __init lowpan_netlink_init(void)
-{
- return rtnl_link_register(&lowpan_link_ops);
-}
-
-static inline void lowpan_netlink_fini(void)
-{
- rtnl_link_unregister(&lowpan_link_ops);
-}
-
-static int lowpan_device_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- LIST_HEAD(del_list);
- struct lowpan_dev_record *entry, *tmp;
-
- if (dev->type != ARPHRD_IEEE802154)
- goto out;
-
- if (event == NETDEV_UNREGISTER) {
- list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
- if (lowpan_dev_info(entry->ldev)->real_dev == dev)
- lowpan_dellink(entry->ldev, &del_list);
- }
-
- unregister_netdevice_many(&del_list);
- }
-
-out:
- return NOTIFY_DONE;
-}
-
-static struct notifier_block lowpan_dev_notifier = {
- .notifier_call = lowpan_device_event,
-};
-
-static int __init lowpan_init_module(void)
-{
- int err = 0;
-
- err = lowpan_net_frag_init();
- if (err < 0)
- goto out;
-
- err = lowpan_netlink_init();
- if (err < 0)
- goto out_frag;
-
- err = register_netdevice_notifier(&lowpan_dev_notifier);
- if (err < 0)
- goto out_pack;
-
- return 0;
-
-out_pack:
- lowpan_netlink_fini();
-out_frag:
- lowpan_net_frag_exit();
-out:
- return err;
-}
-
-static void __exit lowpan_cleanup_module(void)
-{
- lowpan_netlink_fini();
-
- lowpan_net_frag_exit();
-
- unregister_netdevice_notifier(&lowpan_dev_notifier);
-}
-
-module_init(lowpan_init_module);
-module_exit(lowpan_cleanup_module);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index c0d4154d144f..1370d5b0041b 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -1,4 +1,4 @@
-config IEEE802154
+menuconfig IEEE802154
tristate "IEEE Std 802.15.4 Low-Rate Wireless Personal Area Networks support"
---help---
IEEE Std 802.15.4 defines a low data rate, low power and low
@@ -10,8 +10,16 @@ config IEEE802154
Say Y here to compile LR-WPAN support into the kernel or say M to
compile it as modules.
-config IEEE802154_6LOWPAN
- tristate "6lowpan support over IEEE 802.15.4"
- depends on IEEE802154 && 6LOWPAN
+if IEEE802154
+
+config IEEE802154_SOCKET
+ tristate "IEEE 802.15.4 socket interface"
+ default y
---help---
- IPv6 compression over IEEE 802.15.4.
+ Socket interface for IEEE 802.15.4. Contains DGRAM sockets interface
+ for 802.15.4 dataframes. Also RAW socket interface to build MAC
+ header from userspace.
+
+source "net/ieee802154/6lowpan/Kconfig"
+
+endif
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 9f6970f2a28b..05dab2957cd4 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,9 +1,9 @@
-obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
-obj-$(CONFIG_IEEE802154_6LOWPAN) += ieee802154_6lowpan.o
+obj-$(CONFIG_IEEE802154) += ieee802154.o
+obj-$(CONFIG_IEEE802154_SOCKET) += ieee802154_socket.o
+obj-y += 6lowpan/
-ieee802154_6lowpan-y := 6lowpan_rtnl.o reassembly.o
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \
header_ops.o sysfs.o nl802154.o
-af_802154-y := af_ieee802154.o raw.o dgram.o
+ieee802154_socket-y := socket.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/ieee802154/af802154.h b/net/ieee802154/af802154.h
deleted file mode 100644
index 343b63e6f953..000000000000
--- a/net/ieee802154/af802154.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Internal interfaces for ieee 802.15.4 address family.
- *
- * Copyright 2007, 2008, 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- */
-
-#ifndef AF802154_H
-#define AF802154_H
-
-struct sk_buff;
-struct net_device;
-struct ieee802154_addr;
-extern struct proto ieee802154_raw_prot;
-extern struct proto ieee802154_dgram_prot;
-void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb);
-int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb);
-struct net_device *ieee802154_get_dev(struct net *net,
- const struct ieee802154_addr *addr);
-
-#endif
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
deleted file mode 100644
index d0a1282cdf43..000000000000
--- a/net/ieee802154/af_ieee802154.c
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * IEEE802154.4 socket interface
- *
- * Copyright 2007, 2008 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
- */
-
-#include <linux/net.h>
-#include <linux/capability.h>
-#include <linux/module.h>
-#include <linux/if_arp.h>
-#include <linux/if.h>
-#include <linux/termios.h> /* For TIOCOUTQ/INQ */
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <net/datalink.h>
-#include <net/psnap.h>
-#include <net/sock.h>
-#include <net/tcp_states.h>
-#include <net/route.h>
-
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-
-#include "af802154.h"
-
-/* Utility function for families */
-struct net_device*
-ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
-{
- struct net_device *dev = NULL;
- struct net_device *tmp;
- __le16 pan_id, short_addr;
- u8 hwaddr[IEEE802154_ADDR_LEN];
-
- switch (addr->mode) {
- case IEEE802154_ADDR_LONG:
- ieee802154_devaddr_to_raw(hwaddr, addr->extended_addr);
- rcu_read_lock();
- dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, hwaddr);
- if (dev)
- dev_hold(dev);
- rcu_read_unlock();
- break;
- case IEEE802154_ADDR_SHORT:
- if (addr->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST) ||
- addr->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
- addr->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST))
- break;
-
- rtnl_lock();
-
- for_each_netdev(net, tmp) {
- if (tmp->type != ARPHRD_IEEE802154)
- continue;
-
- pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
- short_addr =
- ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
-
- if (pan_id == addr->pan_id &&
- short_addr == addr->short_addr) {
- dev = tmp;
- dev_hold(dev);
- break;
- }
- }
-
- rtnl_unlock();
- break;
- default:
- pr_warn("Unsupported ieee802154 address type: %d\n",
- addr->mode);
- break;
- }
-
- return dev;
-}
-
-static int ieee802154_sock_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- if (sk) {
- sock->sk = NULL;
- sk->sk_prot->close(sk, 0);
- }
- return 0;
-}
-
-static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
-{
- struct sock *sk = sock->sk;
-
- return sk->sk_prot->sendmsg(iocb, sk, msg, len);
-}
-
-static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
- int addr_len)
-{
- struct sock *sk = sock->sk;
-
- if (sk->sk_prot->bind)
- return sk->sk_prot->bind(sk, uaddr, addr_len);
-
- return sock_no_bind(sock, uaddr, addr_len);
-}
-
-static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags)
-{
- struct sock *sk = sock->sk;
-
- if (addr_len < sizeof(uaddr->sa_family))
- return -EINVAL;
-
- if (uaddr->sa_family == AF_UNSPEC)
- return sk->sk_prot->disconnect(sk, flags);
-
- return sk->sk_prot->connect(sk, uaddr, addr_len);
-}
-
-static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
- unsigned int cmd)
-{
- struct ifreq ifr;
- int ret = -ENOIOCTLCMD;
- struct net_device *dev;
-
- if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
- return -EFAULT;
-
- ifr.ifr_name[IFNAMSIZ-1] = 0;
-
- dev_load(sock_net(sk), ifr.ifr_name);
- dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
-
- if (!dev)
- return -ENODEV;
-
- if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
- ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
-
- if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
- ret = -EFAULT;
- dev_put(dev);
-
- return ret;
-}
-
-static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- struct sock *sk = sock->sk;
-
- switch (cmd) {
- case SIOCGSTAMP:
- return sock_get_timestamp(sk, (struct timeval __user *)arg);
- case SIOCGSTAMPNS:
- return sock_get_timestampns(sk, (struct timespec __user *)arg);
- case SIOCGIFADDR:
- case SIOCSIFADDR:
- return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg,
- cmd);
- default:
- if (!sk->sk_prot->ioctl)
- return -ENOIOCTLCMD;
- return sk->sk_prot->ioctl(sk, cmd, arg);
- }
-}
-
-static const struct proto_ops ieee802154_raw_ops = {
- .family = PF_IEEE802154,
- .owner = THIS_MODULE,
- .release = ieee802154_sock_release,
- .bind = ieee802154_sock_bind,
- .connect = ieee802154_sock_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = sock_no_getname,
- .poll = datagram_poll,
- .ioctl = ieee802154_sock_ioctl,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = sock_common_setsockopt,
- .getsockopt = sock_common_getsockopt,
- .sendmsg = ieee802154_sock_sendmsg,
- .recvmsg = sock_common_recvmsg,
- .mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
- .compat_setsockopt = compat_sock_common_setsockopt,
- .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
-};
-
-static const struct proto_ops ieee802154_dgram_ops = {
- .family = PF_IEEE802154,
- .owner = THIS_MODULE,
- .release = ieee802154_sock_release,
- .bind = ieee802154_sock_bind,
- .connect = ieee802154_sock_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = sock_no_getname,
- .poll = datagram_poll,
- .ioctl = ieee802154_sock_ioctl,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = sock_common_setsockopt,
- .getsockopt = sock_common_getsockopt,
- .sendmsg = ieee802154_sock_sendmsg,
- .recvmsg = sock_common_recvmsg,
- .mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
- .compat_setsockopt = compat_sock_common_setsockopt,
- .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
-};
-
-/* Create a socket. Initialise the socket, blank the addresses
- * set the state.
- */
-static int ieee802154_create(struct net *net, struct socket *sock,
- int protocol, int kern)
-{
- struct sock *sk;
- int rc;
- struct proto *proto;
- const struct proto_ops *ops;
-
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
- switch (sock->type) {
- case SOCK_RAW:
- proto = &ieee802154_raw_prot;
- ops = &ieee802154_raw_ops;
- break;
- case SOCK_DGRAM:
- proto = &ieee802154_dgram_prot;
- ops = &ieee802154_dgram_ops;
- break;
- default:
- rc = -ESOCKTNOSUPPORT;
- goto out;
- }
-
- rc = -ENOMEM;
- sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
- if (!sk)
- goto out;
- rc = 0;
-
- sock->ops = ops;
-
- sock_init_data(sock, sk);
- /* FIXME: sk->sk_destruct */
- sk->sk_family = PF_IEEE802154;
-
- /* Checksums on by default */
- sock_set_flag(sk, SOCK_ZAPPED);
-
- if (sk->sk_prot->hash)
- sk->sk_prot->hash(sk);
-
- if (sk->sk_prot->init) {
- rc = sk->sk_prot->init(sk);
- if (rc)
- sk_common_release(sk);
- }
-out:
- return rc;
-}
-
-static const struct net_proto_family ieee802154_family_ops = {
- .family = PF_IEEE802154,
- .create = ieee802154_create,
- .owner = THIS_MODULE,
-};
-
-static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
-{
- if (!netif_running(dev))
- goto drop;
- pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
-#ifdef DEBUG
- print_hex_dump_bytes("ieee802154_rcv ",
- DUMP_PREFIX_NONE, skb->data, skb->len);
-#endif
-
- if (!net_eq(dev_net(dev), &init_net))
- goto drop;
-
- ieee802154_raw_deliver(dev, skb);
-
- if (dev->type != ARPHRD_IEEE802154)
- goto drop;
-
- if (skb->pkt_type != PACKET_OTHERHOST)
- return ieee802154_dgram_deliver(dev, skb);
-
-drop:
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-static struct packet_type ieee802154_packet_type = {
- .type = htons(ETH_P_IEEE802154),
- .func = ieee802154_rcv,
-};
-
-static int __init af_ieee802154_init(void)
-{
- int rc = -EINVAL;
-
- rc = proto_register(&ieee802154_raw_prot, 1);
- if (rc)
- goto out;
-
- rc = proto_register(&ieee802154_dgram_prot, 1);
- if (rc)
- goto err_dgram;
-
- /* Tell SOCKET that we are alive */
- rc = sock_register(&ieee802154_family_ops);
- if (rc)
- goto err_sock;
- dev_add_pack(&ieee802154_packet_type);
-
- rc = 0;
- goto out;
-
-err_sock:
- proto_unregister(&ieee802154_dgram_prot);
-err_dgram:
- proto_unregister(&ieee802154_raw_prot);
-out:
- return rc;
-}
-
-static void __exit af_ieee802154_remove(void)
-{
- dev_remove_pack(&ieee802154_packet_type);
- sock_unregister(PF_IEEE802154);
- proto_unregister(&ieee802154_dgram_prot);
- proto_unregister(&ieee802154_raw_prot);
-}
-
-module_init(af_ieee802154_init);
-module_exit(af_ieee802154_remove);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
deleted file mode 100644
index d1930b70c4aa..000000000000
--- a/net/ieee802154/dgram.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * IEEE 802.15.4 dgram socket interface
- *
- * Copyright 2007, 2008 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- */
-
-#include <linux/capability.h>
-#include <linux/net.h>
-#include <linux/module.h>
-#include <linux/if_arp.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/ieee802154.h>
-#include <net/sock.h>
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-
-#include <asm/ioctls.h>
-
-#include "af802154.h"
-
-static HLIST_HEAD(dgram_head);
-static DEFINE_RWLOCK(dgram_lock);
-
-struct dgram_sock {
- struct sock sk;
-
- struct ieee802154_addr src_addr;
- struct ieee802154_addr dst_addr;
-
- unsigned int bound:1;
- unsigned int connected:1;
- unsigned int want_ack:1;
- unsigned int secen:1;
- unsigned int secen_override:1;
- unsigned int seclevel:3;
- unsigned int seclevel_override:1;
-};
-
-static inline struct dgram_sock *dgram_sk(const struct sock *sk)
-{
- return container_of(sk, struct dgram_sock, sk);
-}
-
-static void dgram_hash(struct sock *sk)
-{
- write_lock_bh(&dgram_lock);
- sk_add_node(sk, &dgram_head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- write_unlock_bh(&dgram_lock);
-}
-
-static void dgram_unhash(struct sock *sk)
-{
- write_lock_bh(&dgram_lock);
- if (sk_del_node_init(sk))
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&dgram_lock);
-}
-
-static int dgram_init(struct sock *sk)
-{
- struct dgram_sock *ro = dgram_sk(sk);
-
- ro->want_ack = 1;
- return 0;
-}
-
-static void dgram_close(struct sock *sk, long timeout)
-{
- sk_common_release(sk);
-}
-
-static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
-{
- struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
- struct ieee802154_addr haddr;
- struct dgram_sock *ro = dgram_sk(sk);
- int err = -EINVAL;
- struct net_device *dev;
-
- lock_sock(sk);
-
- ro->bound = 0;
-
- if (len < sizeof(*addr))
- goto out;
-
- if (addr->family != AF_IEEE802154)
- goto out;
-
- ieee802154_addr_from_sa(&haddr, &addr->addr);
- dev = ieee802154_get_dev(sock_net(sk), &haddr);
- if (!dev) {
- err = -ENODEV;
- goto out;
- }
-
- if (dev->type != ARPHRD_IEEE802154) {
- err = -ENODEV;
- goto out_put;
- }
-
- ro->src_addr = haddr;
-
- ro->bound = 1;
- err = 0;
-out_put:
- dev_put(dev);
-out:
- release_sock(sk);
-
- return err;
-}
-
-static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCOUTQ:
- {
- int amount = sk_wmem_alloc_get(sk);
-
- return put_user(amount, (int __user *)arg);
- }
-
- case SIOCINQ:
- {
- struct sk_buff *skb;
- unsigned long amount;
-
- amount = 0;
- spin_lock_bh(&sk->sk_receive_queue.lock);
- skb = skb_peek(&sk->sk_receive_queue);
- if (skb != NULL) {
- /* We will only return the amount
- * of this packet since that is all
- * that will be read.
- */
- amount = skb->len - ieee802154_hdr_length(skb);
- }
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- return put_user(amount, (int __user *)arg);
- }
- }
-
- return -ENOIOCTLCMD;
-}
-
-/* FIXME: autobind */
-static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
- int len)
-{
- struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
- struct dgram_sock *ro = dgram_sk(sk);
- int err = 0;
-
- if (len < sizeof(*addr))
- return -EINVAL;
-
- if (addr->family != AF_IEEE802154)
- return -EINVAL;
-
- lock_sock(sk);
-
- if (!ro->bound) {
- err = -ENETUNREACH;
- goto out;
- }
-
- ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
- ro->connected = 1;
-
-out:
- release_sock(sk);
- return err;
-}
-
-static int dgram_disconnect(struct sock *sk, int flags)
-{
- struct dgram_sock *ro = dgram_sk(sk);
-
- lock_sock(sk);
- ro->connected = 0;
- release_sock(sk);
-
- return 0;
-}
-
-static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t size)
-{
- struct net_device *dev;
- unsigned int mtu;
- struct sk_buff *skb;
- struct ieee802154_mac_cb *cb;
- struct dgram_sock *ro = dgram_sk(sk);
- struct ieee802154_addr dst_addr;
- int hlen, tlen;
- int err;
-
- if (msg->msg_flags & MSG_OOB) {
- pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
- return -EOPNOTSUPP;
- }
-
- if (!ro->connected && !msg->msg_name)
- return -EDESTADDRREQ;
- else if (ro->connected && msg->msg_name)
- return -EISCONN;
-
- if (!ro->bound)
- dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
- else
- dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr);
-
- if (!dev) {
- pr_debug("no dev\n");
- err = -ENXIO;
- goto out;
- }
- mtu = dev->mtu;
- pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
-
- if (size > mtu) {
- pr_debug("size = %Zu, mtu = %u\n", size, mtu);
- err = -EMSGSIZE;
- goto out_dev;
- }
-
- hlen = LL_RESERVED_SPACE(dev);
- tlen = dev->needed_tailroom;
- skb = sock_alloc_send_skb(sk, hlen + tlen + size,
- msg->msg_flags & MSG_DONTWAIT,
- &err);
- if (!skb)
- goto out_dev;
-
- skb_reserve(skb, hlen);
-
- skb_reset_network_header(skb);
-
- cb = mac_cb_init(skb);
- cb->type = IEEE802154_FC_TYPE_DATA;
- cb->ackreq = ro->want_ack;
-
- if (msg->msg_name) {
- DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
- daddr, msg->msg_name);
-
- ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
- } else {
- dst_addr = ro->dst_addr;
- }
-
- cb->secen = ro->secen;
- cb->secen_override = ro->secen_override;
- cb->seclevel = ro->seclevel;
- cb->seclevel_override = ro->seclevel_override;
-
- err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
- ro->bound ? &ro->src_addr : NULL, size);
- if (err < 0)
- goto out_skb;
-
- err = memcpy_from_msg(skb_put(skb, size), msg, size);
- if (err < 0)
- goto out_skb;
-
- skb->dev = dev;
- skb->sk = sk;
- skb->protocol = htons(ETH_P_IEEE802154);
-
- dev_put(dev);
-
- err = dev_queue_xmit(skb);
- if (err > 0)
- err = net_xmit_errno(err);
-
- return err ?: size;
-
-out_skb:
- kfree_skb(skb);
-out_dev:
- dev_put(dev);
-out:
- return err;
-}
-
-static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
-{
- size_t copied = 0;
- int err = -EOPNOTSUPP;
- struct sk_buff *skb;
- DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name);
-
- skb = skb_recv_datagram(sk, flags, noblock, &err);
- if (!skb)
- goto out;
-
- copied = skb->len;
- if (len < copied) {
- msg->msg_flags |= MSG_TRUNC;
- copied = len;
- }
-
- /* FIXME: skip headers if necessary ?! */
- err = skb_copy_datagram_msg(skb, 0, msg, copied);
- if (err)
- goto done;
-
- sock_recv_ts_and_drops(msg, sk, skb);
-
- if (saddr) {
- saddr->family = AF_IEEE802154;
- ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
- *addr_len = sizeof(*saddr);
- }
-
- if (flags & MSG_TRUNC)
- copied = skb->len;
-done:
- skb_free_datagram(sk, skb);
-out:
- if (err)
- return err;
- return copied;
-}
-
-static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
-{
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- return NET_RX_DROP;
-
- if (sock_queue_rcv_skb(sk, skb) < 0) {
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-
- return NET_RX_SUCCESS;
-}
-
-static inline bool
-ieee802154_match_sock(__le64 hw_addr, __le16 pan_id, __le16 short_addr,
- struct dgram_sock *ro)
-{
- if (!ro->bound)
- return true;
-
- if (ro->src_addr.mode == IEEE802154_ADDR_LONG &&
- hw_addr == ro->src_addr.extended_addr)
- return true;
-
- if (ro->src_addr.mode == IEEE802154_ADDR_SHORT &&
- pan_id == ro->src_addr.pan_id &&
- short_addr == ro->src_addr.short_addr)
- return true;
-
- return false;
-}
-
-int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
-{
- struct sock *sk, *prev = NULL;
- int ret = NET_RX_SUCCESS;
- __le16 pan_id, short_addr;
- __le64 hw_addr;
-
- /* Data frame processing */
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
- short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
- hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
-
- read_lock(&dgram_lock);
- sk_for_each(sk, &dgram_head) {
- if (ieee802154_match_sock(hw_addr, pan_id, short_addr,
- dgram_sk(sk))) {
- if (prev) {
- struct sk_buff *clone;
-
- clone = skb_clone(skb, GFP_ATOMIC);
- if (clone)
- dgram_rcv_skb(prev, clone);
- }
-
- prev = sk;
- }
- }
-
- if (prev) {
- dgram_rcv_skb(prev, skb);
- } else {
- kfree_skb(skb);
- ret = NET_RX_DROP;
- }
- read_unlock(&dgram_lock);
-
- return ret;
-}
-
-static int dgram_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
-{
- struct dgram_sock *ro = dgram_sk(sk);
-
- int val, len;
-
- if (level != SOL_IEEE802154)
- return -EOPNOTSUPP;
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- len = min_t(unsigned int, len, sizeof(int));
-
- switch (optname) {
- case WPAN_WANTACK:
- val = ro->want_ack;
- break;
- case WPAN_SECURITY:
- if (!ro->secen_override)
- val = WPAN_SECURITY_DEFAULT;
- else if (ro->secen)
- val = WPAN_SECURITY_ON;
- else
- val = WPAN_SECURITY_OFF;
- break;
- case WPAN_SECURITY_LEVEL:
- if (!ro->seclevel_override)
- val = WPAN_SECURITY_LEVEL_DEFAULT;
- else
- val = ro->seclevel;
- break;
- default:
- return -ENOPROTOOPT;
- }
-
- if (put_user(len, optlen))
- return -EFAULT;
- if (copy_to_user(optval, &val, len))
- return -EFAULT;
- return 0;
-}
-
-static int dgram_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen)
-{
- struct dgram_sock *ro = dgram_sk(sk);
- struct net *net = sock_net(sk);
- int val;
- int err = 0;
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (get_user(val, (int __user *)optval))
- return -EFAULT;
-
- lock_sock(sk);
-
- switch (optname) {
- case WPAN_WANTACK:
- ro->want_ack = !!val;
- break;
- case WPAN_SECURITY:
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
- !ns_capable(net->user_ns, CAP_NET_RAW)) {
- err = -EPERM;
- break;
- }
-
- switch (val) {
- case WPAN_SECURITY_DEFAULT:
- ro->secen_override = 0;
- break;
- case WPAN_SECURITY_ON:
- ro->secen_override = 1;
- ro->secen = 1;
- break;
- case WPAN_SECURITY_OFF:
- ro->secen_override = 1;
- ro->secen = 0;
- break;
- default:
- err = -EINVAL;
- break;
- }
- break;
- case WPAN_SECURITY_LEVEL:
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
- !ns_capable(net->user_ns, CAP_NET_RAW)) {
- err = -EPERM;
- break;
- }
-
- if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
- val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
- err = -EINVAL;
- } else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
- ro->seclevel_override = 0;
- } else {
- ro->seclevel_override = 1;
- ro->seclevel = val;
- }
- break;
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-struct proto ieee802154_dgram_prot = {
- .name = "IEEE-802.15.4-MAC",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct dgram_sock),
- .init = dgram_init,
- .close = dgram_close,
- .bind = dgram_bind,
- .sendmsg = dgram_sendmsg,
- .recvmsg = dgram_recvmsg,
- .hash = dgram_hash,
- .unhash = dgram_unhash,
- .connect = dgram_connect,
- .disconnect = dgram_disconnect,
- .ioctl = dgram_ioctl,
- .getsockopt = dgram_getsockopt,
- .setsockopt = dgram_setsockopt,
-};
-
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index fa1464762d0d..c8133c07ceee 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -63,13 +63,9 @@ int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group)
struct nlmsghdr *nlh = nlmsg_hdr(msg);
void *hdr = genlmsg_data(nlmsg_data(nlh));
- if (genlmsg_end(msg, hdr) < 0)
- goto out;
+ genlmsg_end(msg, hdr);
return genlmsg_multicast(&nl802154_family, msg, 0, group, GFP_ATOMIC);
-out:
- nlmsg_free(msg);
- return -ENOBUFS;
}
struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
@@ -96,13 +92,9 @@ int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info)
struct nlmsghdr *nlh = nlmsg_hdr(msg);
void *hdr = genlmsg_data(nlmsg_data(nlh));
- if (genlmsg_end(msg, hdr) < 0)
- goto out;
+ genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
-out:
- nlmsg_free(msg);
- return -ENOBUFS;
}
static const struct genl_ops ieee8021154_ops[] = {
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index cd919493c976..9105265920fe 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -121,7 +121,7 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
params.transmit_power) ||
nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
- params.cca_mode) ||
+ params.cca.mode) ||
nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
params.cca_ed_level) ||
nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
@@ -136,7 +136,8 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
}
wpan_phy_put(phy);
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
wpan_phy_put(phy);
@@ -516,7 +517,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
if (info->attrs[IEEE802154_ATTR_CCA_MODE])
- params.cca_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
+ params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 7baf98b14611..1b9d25f6e898 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -65,7 +65,8 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
goto nla_put_failure;
mutex_unlock(&phy->pib_lock);
kfree(buf);
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
mutex_unlock(&phy->pib_lock);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 889647744697..a4daf91b8d0a 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -209,7 +209,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_TX_POWER] = { .type = NLA_S8, },
- [NL802154_ATTR_CCA_MODE] = { .type = NLA_U8, },
+ [NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, },
+ [NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, },
[NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
@@ -290,16 +291,23 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
goto nla_put_failure;
/* cca mode */
- if (nla_put_u8(msg, NL802154_ATTR_CCA_MODE,
- rdev->wpan_phy.cca_mode))
+ if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
+ rdev->wpan_phy.cca.mode))
goto nla_put_failure;
+ if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+ if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
+ rdev->wpan_phy.cca.opt))
+ goto nla_put_failure;
+ }
+
if (nla_put_s8(msg, NL802154_ATTR_TX_POWER,
rdev->wpan_phy.transmit_power))
goto nla_put_failure;
finish:
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -482,7 +490,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
goto nla_put_failure;
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -622,6 +631,31 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
return rdev_set_channel(rdev, page, channel);
}
+static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct wpan_phy_cca cca;
+
+ if (!info->attrs[NL802154_ATTR_CCA_MODE])
+ return -EINVAL;
+
+ cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]);
+ /* checking 802.15.4 constraints */
+ if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX)
+ return -EINVAL;
+
+ if (cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+ if (!info->attrs[NL802154_ATTR_CCA_OPT])
+ return -EINVAL;
+
+ cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]);
+ if (cca.opt > NL802154_CCA_OPT_ATTR_MAX)
+ return -EINVAL;
+ }
+
+ return rdev_set_cca_mode(rdev, &cca);
+}
+
static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
@@ -895,6 +929,14 @@ static const struct genl_ops nl802154_ops[] = {
NL802154_FLAG_NEED_RTNL,
},
{
+ .cmd = NL802154_CMD_SET_CCA_MODE,
+ .doit = nl802154_set_cca_mode,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL802154_CMD_SET_PAN_ID,
.doit = nl802154_set_pan_id,
.policy = nl802154_policy,
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
deleted file mode 100644
index 1674b115c891..000000000000
--- a/net/ieee802154/raw.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Raw IEEE 802.15.4 sockets
- *
- * Copyright 2007, 2008 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- */
-
-#include <linux/net.h>
-#include <linux/module.h>
-#include <linux/if_arp.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <net/sock.h>
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-
-#include "af802154.h"
-
-static HLIST_HEAD(raw_head);
-static DEFINE_RWLOCK(raw_lock);
-
-static void raw_hash(struct sock *sk)
-{
- write_lock_bh(&raw_lock);
- sk_add_node(sk, &raw_head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- write_unlock_bh(&raw_lock);
-}
-
-static void raw_unhash(struct sock *sk)
-{
- write_lock_bh(&raw_lock);
- if (sk_del_node_init(sk))
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&raw_lock);
-}
-
-static void raw_close(struct sock *sk, long timeout)
-{
- sk_common_release(sk);
-}
-
-static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
-{
- struct ieee802154_addr addr;
- struct sockaddr_ieee802154 *uaddr = (struct sockaddr_ieee802154 *)_uaddr;
- int err = 0;
- struct net_device *dev = NULL;
-
- if (len < sizeof(*uaddr))
- return -EINVAL;
-
- uaddr = (struct sockaddr_ieee802154 *)_uaddr;
- if (uaddr->family != AF_IEEE802154)
- return -EINVAL;
-
- lock_sock(sk);
-
- ieee802154_addr_from_sa(&addr, &uaddr->addr);
- dev = ieee802154_get_dev(sock_net(sk), &addr);
- if (!dev) {
- err = -ENODEV;
- goto out;
- }
-
- if (dev->type != ARPHRD_IEEE802154) {
- err = -ENODEV;
- goto out_put;
- }
-
- sk->sk_bound_dev_if = dev->ifindex;
- sk_dst_reset(sk);
-
-out_put:
- dev_put(dev);
-out:
- release_sock(sk);
-
- return err;
-}
-
-static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
- int addr_len)
-{
- return -ENOTSUPP;
-}
-
-static int raw_disconnect(struct sock *sk, int flags)
-{
- return 0;
-}
-
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t size)
-{
- struct net_device *dev;
- unsigned int mtu;
- struct sk_buff *skb;
- int hlen, tlen;
- int err;
-
- if (msg->msg_flags & MSG_OOB) {
- pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
- return -EOPNOTSUPP;
- }
-
- lock_sock(sk);
- if (!sk->sk_bound_dev_if)
- dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
- else
- dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
- release_sock(sk);
-
- if (!dev) {
- pr_debug("no dev\n");
- err = -ENXIO;
- goto out;
- }
-
- mtu = dev->mtu;
- pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
-
- if (size > mtu) {
- pr_debug("size = %Zu, mtu = %u\n", size, mtu);
- err = -EINVAL;
- goto out_dev;
- }
-
- hlen = LL_RESERVED_SPACE(dev);
- tlen = dev->needed_tailroom;
- skb = sock_alloc_send_skb(sk, hlen + tlen + size,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- goto out_dev;
-
- skb_reserve(skb, hlen);
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
-
- err = memcpy_from_msg(skb_put(skb, size), msg, size);
- if (err < 0)
- goto out_skb;
-
- skb->dev = dev;
- skb->sk = sk;
- skb->protocol = htons(ETH_P_IEEE802154);
-
- dev_put(dev);
-
- err = dev_queue_xmit(skb);
- if (err > 0)
- err = net_xmit_errno(err);
-
- return err ?: size;
-
-out_skb:
- kfree_skb(skb);
-out_dev:
- dev_put(dev);
-out:
- return err;
-}
-
-static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len)
-{
- size_t copied = 0;
- int err = -EOPNOTSUPP;
- struct sk_buff *skb;
-
- skb = skb_recv_datagram(sk, flags, noblock, &err);
- if (!skb)
- goto out;
-
- copied = skb->len;
- if (len < copied) {
- msg->msg_flags |= MSG_TRUNC;
- copied = len;
- }
-
- err = skb_copy_datagram_msg(skb, 0, msg, copied);
- if (err)
- goto done;
-
- sock_recv_ts_and_drops(msg, sk, skb);
-
- if (flags & MSG_TRUNC)
- copied = skb->len;
-done:
- skb_free_datagram(sk, skb);
-out:
- if (err)
- return err;
- return copied;
-}
-
-static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
-{
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- return NET_RX_DROP;
-
- if (sock_queue_rcv_skb(sk, skb) < 0) {
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-
- return NET_RX_SUCCESS;
-}
-
-void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
-{
- struct sock *sk;
-
- read_lock(&raw_lock);
- sk_for_each(sk, &raw_head) {
- bh_lock_sock(sk);
- if (!sk->sk_bound_dev_if ||
- sk->sk_bound_dev_if == dev->ifindex) {
- struct sk_buff *clone;
-
- clone = skb_clone(skb, GFP_ATOMIC);
- if (clone)
- raw_rcv_skb(sk, clone);
- }
- bh_unlock_sock(sk);
- }
- read_unlock(&raw_lock);
-}
-
-static int raw_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
-{
- return -EOPNOTSUPP;
-}
-
-static int raw_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen)
-{
- return -EOPNOTSUPP;
-}
-
-struct proto ieee802154_raw_prot = {
- .name = "IEEE-802.15.4-RAW",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct sock),
- .close = raw_close,
- .bind = raw_bind,
- .sendmsg = raw_sendmsg,
- .recvmsg = raw_recvmsg,
- .hash = raw_hash,
- .unhash = raw_unhash,
- .connect = raw_connect,
- .disconnect = raw_disconnect,
- .getsockopt = raw_getsockopt,
- .setsockopt = raw_setsockopt,
-};
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index aff54fbd9264..7c46732fad2b 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -42,6 +42,13 @@ rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel)
}
static inline int
+rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
+ const struct wpan_phy_cca *cca)
+{
+ return rdev->ops->set_cca_mode(&rdev->wpan_phy, cca);
+}
+
+static inline int
rdev_set_pan_id(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, __le16 pan_id)
{
diff --git a/net/ieee802154/reassembly.h b/net/ieee802154/reassembly.h
deleted file mode 100644
index 836b16fa001f..000000000000
--- a/net/ieee802154/reassembly.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __IEEE802154_6LOWPAN_REASSEMBLY_H__
-#define __IEEE802154_6LOWPAN_REASSEMBLY_H__
-
-#include <net/inet_frag.h>
-
-struct lowpan_create_arg {
- u16 tag;
- u16 d_size;
- const struct ieee802154_addr *src;
- const struct ieee802154_addr *dst;
-};
-
-/* Equivalent of ipv4 struct ip
- */
-struct lowpan_frag_queue {
- struct inet_frag_queue q;
-
- u16 tag;
- u16 d_size;
- struct ieee802154_addr saddr;
- struct ieee802154_addr daddr;
-};
-
-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
-{
- switch (a->mode) {
- case IEEE802154_ADDR_LONG:
- return (((__force u64)a->extended_addr) >> 32) ^
- (((__force u64)a->extended_addr) & 0xffffffff);
- case IEEE802154_ADDR_SHORT:
- return (__force u32)(a->short_addr);
- default:
- return 0;
- }
-}
-
-int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
-void lowpan_net_frag_exit(void);
-int lowpan_net_frag_init(void);
-
-#endif /* __IEEE802154_6LOWPAN_REASSEMBLY_H__ */
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
new file mode 100644
index 000000000000..2878d8ca6d3b
--- /dev/null
+++ b/net/ieee802154/socket.c
@@ -0,0 +1,1125 @@
+/*
+ * IEEE802154.4 socket interface
+ *
+ * Copyright 2007, 2008 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ */
+
+#include <linux/net.h>
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/if.h>
+#include <linux/termios.h> /* For TIOCOUTQ/INQ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/datalink.h>
+#include <net/psnap.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/route.h>
+
+#include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
+
+/* Utility function for families */
+static struct net_device*
+ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
+{
+ struct net_device *dev = NULL;
+ struct net_device *tmp;
+ __le16 pan_id, short_addr;
+ u8 hwaddr[IEEE802154_ADDR_LEN];
+
+ switch (addr->mode) {
+ case IEEE802154_ADDR_LONG:
+ ieee802154_devaddr_to_raw(hwaddr, addr->extended_addr);
+ rcu_read_lock();
+ dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, hwaddr);
+ if (dev)
+ dev_hold(dev);
+ rcu_read_unlock();
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (addr->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST) ||
+ addr->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
+ addr->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST))
+ break;
+
+ rtnl_lock();
+
+ for_each_netdev(net, tmp) {
+ if (tmp->type != ARPHRD_IEEE802154)
+ continue;
+
+ pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
+ short_addr =
+ ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
+
+ if (pan_id == addr->pan_id &&
+ short_addr == addr->short_addr) {
+ dev = tmp;
+ dev_hold(dev);
+ break;
+ }
+ }
+
+ rtnl_unlock();
+ break;
+ default:
+ pr_warn("Unsupported ieee802154 address type: %d\n",
+ addr->mode);
+ break;
+ }
+
+ return dev;
+}
+
+static int ieee802154_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk) {
+ sock->sk = NULL;
+ sk->sk_prot->close(sk, 0);
+ }
+ return 0;
+}
+
+static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->sendmsg(iocb, sk, msg, len);
+}
+
+static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_prot->bind)
+ return sk->sk_prot->bind(sk, uaddr, addr_len);
+
+ return sock_no_bind(sock, uaddr, addr_len);
+}
+
+static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+
+ if (addr_len < sizeof(uaddr->sa_family))
+ return -EINVAL;
+
+ if (uaddr->sa_family == AF_UNSPEC)
+ return sk->sk_prot->disconnect(sk, flags);
+
+ return sk->sk_prot->connect(sk, uaddr, addr_len);
+}
+
+static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
+ unsigned int cmd)
+{
+ struct ifreq ifr;
+ int ret = -ENOIOCTLCMD;
+ struct net_device *dev;
+
+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+ return -EFAULT;
+
+ ifr.ifr_name[IFNAMSIZ-1] = 0;
+
+ dev_load(sock_net(sk), ifr.ifr_name);
+ dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
+
+ if (!dev)
+ return -ENODEV;
+
+ if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
+
+ if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
+ ret = -EFAULT;
+ dev_put(dev);
+
+ return ret;
+}
+
+static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+
+ switch (cmd) {
+ case SIOCGSTAMP:
+ return sock_get_timestamp(sk, (struct timeval __user *)arg);
+ case SIOCGSTAMPNS:
+ return sock_get_timestampns(sk, (struct timespec __user *)arg);
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg,
+ cmd);
+ default:
+ if (!sk->sk_prot->ioctl)
+ return -ENOIOCTLCMD;
+ return sk->sk_prot->ioctl(sk, cmd, arg);
+ }
+}
+
+/* RAW Sockets (802.15.4 created in userspace) */
+static HLIST_HEAD(raw_head);
+static DEFINE_RWLOCK(raw_lock);
+
+static void raw_hash(struct sock *sk)
+{
+ write_lock_bh(&raw_lock);
+ sk_add_node(sk, &raw_head);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ write_unlock_bh(&raw_lock);
+}
+
+static void raw_unhash(struct sock *sk)
+{
+ write_lock_bh(&raw_lock);
+ if (sk_del_node_init(sk))
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ write_unlock_bh(&raw_lock);
+}
+
+static void raw_close(struct sock *sk, long timeout)
+{
+ sk_common_release(sk);
+}
+
+static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
+{
+ struct ieee802154_addr addr;
+ struct sockaddr_ieee802154 *uaddr = (struct sockaddr_ieee802154 *)_uaddr;
+ int err = 0;
+ struct net_device *dev = NULL;
+
+ if (len < sizeof(*uaddr))
+ return -EINVAL;
+
+ uaddr = (struct sockaddr_ieee802154 *)_uaddr;
+ if (uaddr->family != AF_IEEE802154)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ ieee802154_addr_from_sa(&addr, &uaddr->addr);
+ dev = ieee802154_get_dev(sock_net(sk), &addr);
+ if (!dev) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (dev->type != ARPHRD_IEEE802154) {
+ err = -ENODEV;
+ goto out_put;
+ }
+
+ sk->sk_bound_dev_if = dev->ifindex;
+ sk_dst_reset(sk);
+
+out_put:
+ dev_put(dev);
+out:
+ release_sock(sk);
+
+ return err;
+}
+
+static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
+ int addr_len)
+{
+ return -ENOTSUPP;
+}
+
+static int raw_disconnect(struct sock *sk, int flags)
+{
+ return 0;
+}
+
+static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t size)
+{
+ struct net_device *dev;
+ unsigned int mtu;
+ struct sk_buff *skb;
+ int hlen, tlen;
+ int err;
+
+ if (msg->msg_flags & MSG_OOB) {
+ pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
+ return -EOPNOTSUPP;
+ }
+
+ lock_sock(sk);
+ if (!sk->sk_bound_dev_if)
+ dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
+ else
+ dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
+ release_sock(sk);
+
+ if (!dev) {
+ pr_debug("no dev\n");
+ err = -ENXIO;
+ goto out;
+ }
+
+ mtu = dev->mtu;
+ pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
+
+ if (size > mtu) {
+ pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+ err = -EINVAL;
+ goto out_dev;
+ }
+
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(sk, hlen + tlen + size,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ goto out_dev;
+
+ skb_reserve(skb, hlen);
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+
+ err = memcpy_from_msg(skb_put(skb, size), msg, size);
+ if (err < 0)
+ goto out_skb;
+
+ skb->dev = dev;
+ skb->sk = sk;
+ skb->protocol = htons(ETH_P_IEEE802154);
+
+ dev_put(dev);
+
+ err = dev_queue_xmit(skb);
+ if (err > 0)
+ err = net_xmit_errno(err);
+
+ return err ?: size;
+
+out_skb:
+ kfree_skb(skb);
+out_dev:
+ dev_put(dev);
+out:
+ return err;
+}
+
+static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len)
+{
+ size_t copied = 0;
+ int err = -EOPNOTSUPP;
+ struct sk_buff *skb;
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (err)
+ goto done;
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+done:
+ skb_free_datagram(sk, skb);
+out:
+ if (err)
+ return err;
+ return copied;
+}
+
+static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+static void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
+{
+ struct sock *sk;
+
+ read_lock(&raw_lock);
+ sk_for_each(sk, &raw_head) {
+ bh_lock_sock(sk);
+ if (!sk->sk_bound_dev_if ||
+ sk->sk_bound_dev_if == dev->ifindex) {
+ struct sk_buff *clone;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (clone)
+ raw_rcv_skb(sk, clone);
+ }
+ bh_unlock_sock(sk);
+ }
+ read_unlock(&raw_lock);
+}
+
+static int raw_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ return -EOPNOTSUPP;
+}
+
+static int raw_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct proto ieee802154_raw_prot = {
+ .name = "IEEE-802.15.4-RAW",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct sock),
+ .close = raw_close,
+ .bind = raw_bind,
+ .sendmsg = raw_sendmsg,
+ .recvmsg = raw_recvmsg,
+ .hash = raw_hash,
+ .unhash = raw_unhash,
+ .connect = raw_connect,
+ .disconnect = raw_disconnect,
+ .getsockopt = raw_getsockopt,
+ .setsockopt = raw_setsockopt,
+};
+
+static const struct proto_ops ieee802154_raw_ops = {
+ .family = PF_IEEE802154,
+ .owner = THIS_MODULE,
+ .release = ieee802154_sock_release,
+ .bind = ieee802154_sock_bind,
+ .connect = ieee802154_sock_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = datagram_poll,
+ .ioctl = ieee802154_sock_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = ieee802154_sock_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_sock_common_setsockopt,
+ .compat_getsockopt = compat_sock_common_getsockopt,
+#endif
+};
+
+/* DGRAM Sockets (802.15.4 dataframes) */
+static HLIST_HEAD(dgram_head);
+static DEFINE_RWLOCK(dgram_lock);
+
+struct dgram_sock {
+ struct sock sk;
+
+ struct ieee802154_addr src_addr;
+ struct ieee802154_addr dst_addr;
+
+ unsigned int bound:1;
+ unsigned int connected:1;
+ unsigned int want_ack:1;
+ unsigned int secen:1;
+ unsigned int secen_override:1;
+ unsigned int seclevel:3;
+ unsigned int seclevel_override:1;
+};
+
+static inline struct dgram_sock *dgram_sk(const struct sock *sk)
+{
+ return container_of(sk, struct dgram_sock, sk);
+}
+
+static void dgram_hash(struct sock *sk)
+{
+ write_lock_bh(&dgram_lock);
+ sk_add_node(sk, &dgram_head);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ write_unlock_bh(&dgram_lock);
+}
+
+static void dgram_unhash(struct sock *sk)
+{
+ write_lock_bh(&dgram_lock);
+ if (sk_del_node_init(sk))
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ write_unlock_bh(&dgram_lock);
+}
+
+static int dgram_init(struct sock *sk)
+{
+ struct dgram_sock *ro = dgram_sk(sk);
+
+ ro->want_ack = 1;
+ return 0;
+}
+
+static void dgram_close(struct sock *sk, long timeout)
+{
+ sk_common_release(sk);
+}
+
+static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
+{
+ struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
+ struct ieee802154_addr haddr;
+ struct dgram_sock *ro = dgram_sk(sk);
+ int err = -EINVAL;
+ struct net_device *dev;
+
+ lock_sock(sk);
+
+ ro->bound = 0;
+
+ if (len < sizeof(*addr))
+ goto out;
+
+ if (addr->family != AF_IEEE802154)
+ goto out;
+
+ ieee802154_addr_from_sa(&haddr, &addr->addr);
+ dev = ieee802154_get_dev(sock_net(sk), &haddr);
+ if (!dev) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (dev->type != ARPHRD_IEEE802154) {
+ err = -ENODEV;
+ goto out_put;
+ }
+
+ ro->src_addr = haddr;
+
+ ro->bound = 1;
+ err = 0;
+out_put:
+ dev_put(dev);
+out:
+ release_sock(sk);
+
+ return err;
+}
+
+static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case SIOCOUTQ:
+ {
+ int amount = sk_wmem_alloc_get(sk);
+
+ return put_user(amount, (int __user *)arg);
+ }
+
+ case SIOCINQ:
+ {
+ struct sk_buff *skb;
+ unsigned long amount;
+
+ amount = 0;
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb) {
+ /* We will only return the amount
+ * of this packet since that is all
+ * that will be read.
+ */
+ amount = skb->len - ieee802154_hdr_length(skb);
+ }
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ return put_user(amount, (int __user *)arg);
+ }
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+/* FIXME: autobind */
+static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
+ int len)
+{
+ struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
+ struct dgram_sock *ro = dgram_sk(sk);
+ int err = 0;
+
+ if (len < sizeof(*addr))
+ return -EINVAL;
+
+ if (addr->family != AF_IEEE802154)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (!ro->bound) {
+ err = -ENETUNREACH;
+ goto out;
+ }
+
+ ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
+ ro->connected = 1;
+
+out:
+ release_sock(sk);
+ return err;
+}
+
+static int dgram_disconnect(struct sock *sk, int flags)
+{
+ struct dgram_sock *ro = dgram_sk(sk);
+
+ lock_sock(sk);
+ ro->connected = 0;
+ release_sock(sk);
+
+ return 0;
+}
+
+static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t size)
+{
+ struct net_device *dev;
+ unsigned int mtu;
+ struct sk_buff *skb;
+ struct ieee802154_mac_cb *cb;
+ struct dgram_sock *ro = dgram_sk(sk);
+ struct ieee802154_addr dst_addr;
+ int hlen, tlen;
+ int err;
+
+ if (msg->msg_flags & MSG_OOB) {
+ pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ro->connected && !msg->msg_name)
+ return -EDESTADDRREQ;
+ else if (ro->connected && msg->msg_name)
+ return -EISCONN;
+
+ if (!ro->bound)
+ dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
+ else
+ dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr);
+
+ if (!dev) {
+ pr_debug("no dev\n");
+ err = -ENXIO;
+ goto out;
+ }
+ mtu = dev->mtu;
+ pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
+
+ if (size > mtu) {
+ pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+ err = -EMSGSIZE;
+ goto out_dev;
+ }
+
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(sk, hlen + tlen + size,
+ msg->msg_flags & MSG_DONTWAIT,
+ &err);
+ if (!skb)
+ goto out_dev;
+
+ skb_reserve(skb, hlen);
+
+ skb_reset_network_header(skb);
+
+ cb = mac_cb_init(skb);
+ cb->type = IEEE802154_FC_TYPE_DATA;
+ cb->ackreq = ro->want_ack;
+
+ if (msg->msg_name) {
+ DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
+ daddr, msg->msg_name);
+
+ ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
+ } else {
+ dst_addr = ro->dst_addr;
+ }
+
+ cb->secen = ro->secen;
+ cb->secen_override = ro->secen_override;
+ cb->seclevel = ro->seclevel;
+ cb->seclevel_override = ro->seclevel_override;
+
+ err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
+ ro->bound ? &ro->src_addr : NULL, size);
+ if (err < 0)
+ goto out_skb;
+
+ err = memcpy_from_msg(skb_put(skb, size), msg, size);
+ if (err < 0)
+ goto out_skb;
+
+ skb->dev = dev;
+ skb->sk = sk;
+ skb->protocol = htons(ETH_P_IEEE802154);
+
+ dev_put(dev);
+
+ err = dev_queue_xmit(skb);
+ if (err > 0)
+ err = net_xmit_errno(err);
+
+ return err ?: size;
+
+out_skb:
+ kfree_skb(skb);
+out_dev:
+ dev_put(dev);
+out:
+ return err;
+}
+
+static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len)
+{
+ size_t copied = 0;
+ int err = -EOPNOTSUPP;
+ struct sk_buff *skb;
+ DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+
+ /* FIXME: skip headers if necessary ?! */
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (err)
+ goto done;
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+
+ if (saddr) {
+ saddr->family = AF_IEEE802154;
+ ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
+ *addr_len = sizeof(*saddr);
+ }
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+done:
+ skb_free_datagram(sk, skb);
+out:
+ if (err)
+ return err;
+ return copied;
+}
+
+static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+static inline bool
+ieee802154_match_sock(__le64 hw_addr, __le16 pan_id, __le16 short_addr,
+ struct dgram_sock *ro)
+{
+ if (!ro->bound)
+ return true;
+
+ if (ro->src_addr.mode == IEEE802154_ADDR_LONG &&
+ hw_addr == ro->src_addr.extended_addr)
+ return true;
+
+ if (ro->src_addr.mode == IEEE802154_ADDR_SHORT &&
+ pan_id == ro->src_addr.pan_id &&
+ short_addr == ro->src_addr.short_addr)
+ return true;
+
+ return false;
+}
+
+static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
+{
+ struct sock *sk, *prev = NULL;
+ int ret = NET_RX_SUCCESS;
+ __le16 pan_id, short_addr;
+ __le64 hw_addr;
+
+ /* Data frame processing */
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
+ hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+
+ read_lock(&dgram_lock);
+ sk_for_each(sk, &dgram_head) {
+ if (ieee802154_match_sock(hw_addr, pan_id, short_addr,
+ dgram_sk(sk))) {
+ if (prev) {
+ struct sk_buff *clone;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (clone)
+ dgram_rcv_skb(prev, clone);
+ }
+
+ prev = sk;
+ }
+ }
+
+ if (prev) {
+ dgram_rcv_skb(prev, skb);
+ } else {
+ kfree_skb(skb);
+ ret = NET_RX_DROP;
+ }
+ read_unlock(&dgram_lock);
+
+ return ret;
+}
+
+static int dgram_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct dgram_sock *ro = dgram_sk(sk);
+
+ int val, len;
+
+ if (level != SOL_IEEE802154)
+ return -EOPNOTSUPP;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ switch (optname) {
+ case WPAN_WANTACK:
+ val = ro->want_ack;
+ break;
+ case WPAN_SECURITY:
+ if (!ro->secen_override)
+ val = WPAN_SECURITY_DEFAULT;
+ else if (ro->secen)
+ val = WPAN_SECURITY_ON;
+ else
+ val = WPAN_SECURITY_OFF;
+ break;
+ case WPAN_SECURITY_LEVEL:
+ if (!ro->seclevel_override)
+ val = WPAN_SECURITY_LEVEL_DEFAULT;
+ else
+ val = ro->seclevel;
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ return 0;
+}
+
+static int dgram_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct dgram_sock *ro = dgram_sk(sk);
+ struct net *net = sock_net(sk);
+ int val;
+ int err = 0;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case WPAN_WANTACK:
+ ro->want_ack = !!val;
+ break;
+ case WPAN_SECURITY:
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+ !ns_capable(net->user_ns, CAP_NET_RAW)) {
+ err = -EPERM;
+ break;
+ }
+
+ switch (val) {
+ case WPAN_SECURITY_DEFAULT:
+ ro->secen_override = 0;
+ break;
+ case WPAN_SECURITY_ON:
+ ro->secen_override = 1;
+ ro->secen = 1;
+ break;
+ case WPAN_SECURITY_OFF:
+ ro->secen_override = 1;
+ ro->secen = 0;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ break;
+ case WPAN_SECURITY_LEVEL:
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+ !ns_capable(net->user_ns, CAP_NET_RAW)) {
+ err = -EPERM;
+ break;
+ }
+
+ if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
+ val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
+ err = -EINVAL;
+ } else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
+ ro->seclevel_override = 0;
+ } else {
+ ro->seclevel_override = 1;
+ ro->seclevel = val;
+ }
+ break;
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static struct proto ieee802154_dgram_prot = {
+ .name = "IEEE-802.15.4-MAC",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct dgram_sock),
+ .init = dgram_init,
+ .close = dgram_close,
+ .bind = dgram_bind,
+ .sendmsg = dgram_sendmsg,
+ .recvmsg = dgram_recvmsg,
+ .hash = dgram_hash,
+ .unhash = dgram_unhash,
+ .connect = dgram_connect,
+ .disconnect = dgram_disconnect,
+ .ioctl = dgram_ioctl,
+ .getsockopt = dgram_getsockopt,
+ .setsockopt = dgram_setsockopt,
+};
+
+static const struct proto_ops ieee802154_dgram_ops = {
+ .family = PF_IEEE802154,
+ .owner = THIS_MODULE,
+ .release = ieee802154_sock_release,
+ .bind = ieee802154_sock_bind,
+ .connect = ieee802154_sock_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = datagram_poll,
+ .ioctl = ieee802154_sock_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = ieee802154_sock_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_sock_common_setsockopt,
+ .compat_getsockopt = compat_sock_common_getsockopt,
+#endif
+};
+
+/* Create a socket. Initialise the socket, blank the addresses
+ * set the state.
+ */
+static int ieee802154_create(struct net *net, struct socket *sock,
+ int protocol, int kern)
+{
+ struct sock *sk;
+ int rc;
+ struct proto *proto;
+ const struct proto_ops *ops;
+
+ if (!net_eq(net, &init_net))
+ return -EAFNOSUPPORT;
+
+ switch (sock->type) {
+ case SOCK_RAW:
+ proto = &ieee802154_raw_prot;
+ ops = &ieee802154_raw_ops;
+ break;
+ case SOCK_DGRAM:
+ proto = &ieee802154_dgram_prot;
+ ops = &ieee802154_dgram_ops;
+ break;
+ default:
+ rc = -ESOCKTNOSUPPORT;
+ goto out;
+ }
+
+ rc = -ENOMEM;
+ sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
+ if (!sk)
+ goto out;
+ rc = 0;
+
+ sock->ops = ops;
+
+ sock_init_data(sock, sk);
+ /* FIXME: sk->sk_destruct */
+ sk->sk_family = PF_IEEE802154;
+
+ /* Checksums on by default */
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+ if (sk->sk_prot->hash)
+ sk->sk_prot->hash(sk);
+
+ if (sk->sk_prot->init) {
+ rc = sk->sk_prot->init(sk);
+ if (rc)
+ sk_common_release(sk);
+ }
+out:
+ return rc;
+}
+
+static const struct net_proto_family ieee802154_family_ops = {
+ .family = PF_IEEE802154,
+ .create = ieee802154_create,
+ .owner = THIS_MODULE,
+};
+
+static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ if (!netif_running(dev))
+ goto drop;
+ pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
+#ifdef DEBUG
+ print_hex_dump_bytes("ieee802154_rcv ",
+ DUMP_PREFIX_NONE, skb->data, skb->len);
+#endif
+
+ if (!net_eq(dev_net(dev), &init_net))
+ goto drop;
+
+ ieee802154_raw_deliver(dev, skb);
+
+ if (dev->type != ARPHRD_IEEE802154)
+ goto drop;
+
+ if (skb->pkt_type != PACKET_OTHERHOST)
+ return ieee802154_dgram_deliver(dev, skb);
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static struct packet_type ieee802154_packet_type = {
+ .type = htons(ETH_P_IEEE802154),
+ .func = ieee802154_rcv,
+};
+
+static int __init af_ieee802154_init(void)
+{
+ int rc = -EINVAL;
+
+ rc = proto_register(&ieee802154_raw_prot, 1);
+ if (rc)
+ goto out;
+
+ rc = proto_register(&ieee802154_dgram_prot, 1);
+ if (rc)
+ goto err_dgram;
+
+ /* Tell SOCKET that we are alive */
+ rc = sock_register(&ieee802154_family_ops);
+ if (rc)
+ goto err_sock;
+ dev_add_pack(&ieee802154_packet_type);
+
+ rc = 0;
+ goto out;
+
+err_sock:
+ proto_unregister(&ieee802154_dgram_prot);
+err_dgram:
+ proto_unregister(&ieee802154_raw_prot);
+out:
+ return rc;
+}
+
+static void __exit af_ieee802154_remove(void)
+{
+ dev_remove_pack(&ieee802154_packet_type);
+ sock_unregister(PF_IEEE802154);
+ proto_unregister(&ieee802154_dgram_prot);
+ proto_unregister(&ieee802154_raw_prot);
+}
+
+module_init(af_ieee802154_init);
+module_exit(af_ieee802154_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index 1613b9c65dfa..dff55c2d87f3 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -68,7 +68,7 @@ static DEVICE_ATTR_RO(name)
MASTER_SHOW(current_channel, "%d");
MASTER_SHOW(current_page, "%d");
MASTER_SHOW(transmit_power, "%d +- 1 dB");
-MASTER_SHOW(cca_mode, "%d");
+MASTER_SHOW_COMPLEX(cca_mode, "%d", phy->cca.mode);
static ssize_t channels_supported_show(struct device *dev,
struct device_attribute *attr,
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index a44773c8346c..d2e49baaff63 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -395,8 +395,6 @@ int inet_release(struct socket *sock)
if (sk) {
long timeout;
- sock_rps_reset_flow(sk);
-
/* Applications forget to leave groups before exiting */
ip_mc_drop_socket(sk);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 5160c710f2eb..e361ea6f3fc8 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -378,20 +378,18 @@ static int cipso_v4_cache_check(const unsigned char *key,
* negative values on failure.
*
*/
-int cipso_v4_cache_add(const struct sk_buff *skb,
+int cipso_v4_cache_add(const unsigned char *cipso_ptr,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val = -EPERM;
u32 bkt;
struct cipso_v4_map_cache_entry *entry = NULL;
struct cipso_v4_map_cache_entry *old_entry = NULL;
- unsigned char *cipso_ptr;
u32 cipso_ptr_len;
if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0)
return 0;
- cipso_ptr = CIPSO_V4_OPTPTR(skb);
cipso_ptr_len = cipso_ptr[1];
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
@@ -1579,6 +1577,33 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
}
/**
+ * cipso_v4_optptr - Find the CIPSO option in the packet
+ * @skb: the packet
+ *
+ * Description:
+ * Parse the packet's IP header looking for a CIPSO option. Returns a pointer
+ * to the start of the CIPSO option on success, NULL if one if not found.
+ *
+ */
+unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ unsigned char *optptr = (unsigned char *)&(ip_hdr(skb)[1]);
+ int optlen;
+ int taglen;
+
+ for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
+ if (optptr[0] == IPOPT_CIPSO)
+ return optptr;
+ taglen = optptr[1];
+ optlen -= taglen;
+ optptr += taglen;
+ }
+
+ return NULL;
+}
+
+/**
* cipso_v4_validate - Validate a CIPSO option
* @option: the start of the option, on error it is set to point to the error
*
@@ -2119,8 +2144,8 @@ void cipso_v4_req_delattr(struct request_sock *req)
* on success and negative values on failure.
*
*/
-static int cipso_v4_getattr(const unsigned char *cipso,
- struct netlbl_lsm_secattr *secattr)
+int cipso_v4_getattr(const unsigned char *cipso,
+ struct netlbl_lsm_secattr *secattr)
{
int ret_val = -ENOMSG;
u32 doi;
@@ -2305,22 +2330,6 @@ int cipso_v4_skbuff_delattr(struct sk_buff *skb)
return 0;
}
-/**
- * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option
- * @skb: the packet
- * @secattr: the security attributes
- *
- * Description:
- * Parse the given packet's CIPSO option and return the security attributes.
- * Returns zero on success and negative values on failure.
- *
- */
-int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
- struct netlbl_lsm_secattr *secattr)
-{
- return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr);
-}
-
/*
* Setup Functions
*/
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 214882e7d6de..f0b4a31d7bd6 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1522,7 +1522,8 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
preferred, valid))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -1566,7 +1567,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
if (inet_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- RTM_NEWADDR, NLM_F_MULTI) <= 0) {
+ RTM_NEWADDR, NLM_F_MULTI) < 0) {
rcu_read_unlock();
goto done;
}
@@ -1749,7 +1750,8 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -1881,7 +1883,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF,
NLM_F_MULTI,
- -1) <= 0) {
+ -1) < 0) {
rcu_read_unlock();
goto done;
}
@@ -1897,7 +1899,7 @@ cont:
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
- -1) <= 0)
+ -1) < 0)
goto done;
else
h++;
@@ -1908,7 +1910,7 @@ cont:
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
- -1) <= 0)
+ -1) < 0)
goto done;
else
h++;
@@ -2320,7 +2322,7 @@ static __net_initdata struct pernet_operations devinet_ops = {
.exit = devinet_exit_net,
};
-static struct rtnl_af_ops inet_af_ops = {
+static struct rtnl_af_ops inet_af_ops __read_mostly = {
.family = AF_INET,
.fill_link_af = inet_fill_link_af,
.get_link_af_size = inet_get_link_af_size,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 23104a3f2924..57be71dd6a9e 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -67,7 +67,7 @@ static int __net_init fib4_rules_init(struct net *net)
return 0;
fail:
- kfree(local_table);
+ fib_free_table(local_table);
return -ENOMEM;
}
#else
@@ -109,6 +109,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
return tb;
}
+/* caller must hold either rtnl or rcu read lock */
struct fib_table *fib_get_table(struct net *net, u32 id)
{
struct fib_table *tb;
@@ -119,15 +120,11 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
id = RT_TABLE_MAIN;
h = id & (FIB_TABLE_HASHSZ - 1);
- rcu_read_lock();
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
- if (tb->tb_id == id) {
- rcu_read_unlock();
+ if (tb->tb_id == id)
return tb;
- }
}
- rcu_read_unlock();
return NULL;
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
@@ -167,16 +164,18 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
if (ipv4_is_multicast(addr))
return RTN_MULTICAST;
+ rcu_read_lock();
+
local_table = fib_get_table(net, RT_TABLE_LOCAL);
if (local_table) {
ret = RTN_UNICAST;
- rcu_read_lock();
if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
if (!dev || dev == res.fi->fib_dev)
ret = res.type;
}
- rcu_read_unlock();
}
+
+ rcu_read_unlock();
return ret;
}
@@ -919,7 +918,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
#undef BRD1_OK
}
-static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
+static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
{
struct fib_result res;
@@ -929,6 +928,11 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
.flowi4_tos = frn->fl_tos,
.flowi4_scope = frn->fl_scope,
};
+ struct fib_table *tb;
+
+ rcu_read_lock();
+
+ tb = fib_get_table(net, frn->tb_id_in);
frn->err = -ENOENT;
if (tb) {
@@ -945,6 +949,8 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
}
local_bh_enable();
}
+
+ rcu_read_unlock();
}
static void nl_fib_input(struct sk_buff *skb)
@@ -952,7 +958,6 @@ static void nl_fib_input(struct sk_buff *skb)
struct net *net;
struct fib_result_nl *frn;
struct nlmsghdr *nlh;
- struct fib_table *tb;
u32 portid;
net = sock_net(skb->sk);
@@ -967,9 +972,7 @@ static void nl_fib_input(struct sk_buff *skb)
nlh = nlmsg_hdr(skb);
frn = (struct fib_result_nl *) nlmsg_data(nlh);
- tb = fib_get_table(net, frn->tb_id_in);
-
- nl_fib_lookup(frn, tb);
+ nl_fib_lookup(net, frn);
portid = NETLINK_CB(skb).portid; /* netlink portid */
NETLINK_CB(skb).portid = 0; /* from kernel */
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index 1e4f6600b31d..825981b1049a 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -32,7 +32,6 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
unsigned int);
void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len,
u32 tb_id, const struct nl_info *info, unsigned int nlm_flags);
-struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio);
static inline void fib_result_assign(struct fib_result *res,
struct fib_info *fi)
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 8f7bd56955b0..d3db718be51d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -81,27 +81,25 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
break;
case FR_ACT_UNREACHABLE:
- err = -ENETUNREACH;
- goto errout;
+ return -ENETUNREACH;
case FR_ACT_PROHIBIT:
- err = -EACCES;
- goto errout;
+ return -EACCES;
case FR_ACT_BLACKHOLE:
default:
- err = -EINVAL;
- goto errout;
+ return -EINVAL;
}
+ rcu_read_lock();
+
tbl = fib_get_table(rule->fr_net, rule->table);
- if (!tbl)
- goto errout;
+ if (tbl)
+ err = fib_table_lookup(tbl, &flp->u.ip4,
+ (struct fib_result *)arg->result,
+ arg->flags);
- err = fib_table_lookup(tbl, &flp->u.ip4, (struct fib_result *) arg->result, arg->flags);
- if (err > 0)
- err = -EAGAIN;
-errout:
+ rcu_read_unlock();
return err;
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f99f41bd15b8..1e2090ea663e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -360,7 +360,8 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
+ nla_total_size(4) /* RTA_TABLE */
+ nla_total_size(4) /* RTA_DST */
+ nla_total_size(4) /* RTA_PRIORITY */
- + nla_total_size(4); /* RTA_PREFSRC */
+ + nla_total_size(4) /* RTA_PREFSRC */
+ + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
/* space for nested metrics */
payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
@@ -410,24 +411,6 @@ errout:
rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
}
-/* Return the first fib alias matching TOS with
- * priority less than or equal to PRIO.
- */
-struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
-{
- if (fah) {
- struct fib_alias *fa;
- list_for_each_entry(fa, fah, fa_list) {
- if (fa->fa_tos > tos)
- continue;
- if (fa->fa_info->fib_priority >= prio ||
- fa->fa_tos < tos)
- return fa;
- }
- }
- return NULL;
-}
-
static int fib_detect_death(struct fib_info *fi, int order,
struct fib_info **last_resort, int *last_idx,
int dflt)
@@ -859,7 +842,16 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (type > RTAX_MAX)
goto err_inval;
- val = nla_get_u32(nla);
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+
+ nla_strlcpy(tmp, nla, sizeof(tmp));
+ val = tcp_ca_get_key_by_name(tmp);
+ if (val == TCP_CA_UNSPEC)
+ goto err_inval;
+ } else {
+ val = nla_get_u32(nla);
+ }
if (type == RTAX_ADVMSS && val > 65535 - 40)
val = 65535 - 40;
if (type == RTAX_MTU && val > 65535 - 15)
@@ -1081,7 +1073,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_nest_end(skb, mp);
}
#endif
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 18bcaf2ff2fd..3daf0224ff2e 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -83,28 +83,33 @@
#define MAX_STAT_DEPTH 32
-#define KEYLENGTH (8*sizeof(t_key))
+#define KEYLENGTH (8*sizeof(t_key))
+#define KEY_MAX ((t_key)~0)
typedef unsigned int t_key;
-#define T_TNODE 0
-#define T_LEAF 1
-#define NODE_TYPE_MASK 0x1UL
-#define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
+#define IS_TNODE(n) ((n)->bits)
+#define IS_LEAF(n) (!(n)->bits)
-#define IS_TNODE(n) (!(n->parent & T_LEAF))
-#define IS_LEAF(n) (n->parent & T_LEAF)
+#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
-struct rt_trie_node {
- unsigned long parent;
- t_key key;
-};
-
-struct leaf {
- unsigned long parent;
+struct tnode {
t_key key;
- struct hlist_head list;
+ unsigned char bits; /* 2log(KEYLENGTH) bits needed */
+ unsigned char pos; /* 2log(KEYLENGTH) bits needed */
+ unsigned char slen;
+ struct tnode __rcu *parent;
struct rcu_head rcu;
+ union {
+ /* The fields in this struct are valid if bits > 0 (TNODE) */
+ struct {
+ t_key empty_children; /* KEYLENGTH bits needed */
+ t_key full_children; /* KEYLENGTH bits needed */
+ struct tnode __rcu *child[0];
+ };
+ /* This list pointer if valid if bits == 0 (LEAF) */
+ struct hlist_head list;
+ };
};
struct leaf_info {
@@ -115,20 +120,6 @@ struct leaf_info {
struct rcu_head rcu;
};
-struct tnode {
- unsigned long parent;
- t_key key;
- unsigned char pos; /* 2log(KEYLENGTH) bits needed */
- unsigned char bits; /* 2log(KEYLENGTH) bits needed */
- unsigned int full_children; /* KEYLENGTH bits needed */
- unsigned int empty_children; /* KEYLENGTH bits needed */
- union {
- struct rcu_head rcu;
- struct tnode *tnode_free;
- };
- struct rt_trie_node __rcu *child[0];
-};
-
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats {
unsigned int gets;
@@ -151,19 +142,13 @@ struct trie_stat {
};
struct trie {
- struct rt_trie_node __rcu *trie;
+ struct tnode __rcu *trie;
#ifdef CONFIG_IP_FIB_TRIE_STATS
- struct trie_use_stats stats;
+ struct trie_use_stats __percpu *stats;
#endif
};
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
- int wasfull);
-static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
-static struct tnode *inflate(struct trie *t, struct tnode *tn);
-static struct tnode *halve(struct trie *t, struct tnode *tn);
-/* tnodes to free after resize(); protected by RTNL */
-static struct tnode *tnode_free_head;
+static void resize(struct trie *t, struct tnode *tn);
static size_t tnode_free_size;
/*
@@ -176,170 +161,101 @@ static const int sync_pages = 128;
static struct kmem_cache *fn_alias_kmem __read_mostly;
static struct kmem_cache *trie_leaf_kmem __read_mostly;
-/*
- * caller must hold RTNL
- */
-static inline struct tnode *node_parent(const struct rt_trie_node *node)
-{
- unsigned long parent;
-
- parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
+/* caller must hold RTNL */
+#define node_parent(n) rtnl_dereference((n)->parent)
- return (struct tnode *)(parent & ~NODE_TYPE_MASK);
-}
+/* caller must hold RCU read lock or RTNL */
+#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
-/*
- * caller must hold RCU read lock or RTNL
- */
-static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
+/* wrapper for rcu_assign_pointer */
+static inline void node_set_parent(struct tnode *n, struct tnode *tp)
{
- unsigned long parent;
-
- parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
- lockdep_rtnl_is_held());
-
- return (struct tnode *)(parent & ~NODE_TYPE_MASK);
+ if (n)
+ rcu_assign_pointer(n->parent, tp);
}
-/* Same as rcu_assign_pointer
- * but that macro() assumes that value is a pointer.
+#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
+
+/* This provides us with the number of children in this node, in the case of a
+ * leaf this will return 0 meaning none of the children are accessible.
*/
-static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
+static inline unsigned long tnode_child_length(const struct tnode *tn)
{
- smp_wmb();
- node->parent = (unsigned long)ptr | NODE_TYPE(node);
+ return (1ul << tn->bits) & ~(1ul);
}
-/*
- * caller must hold RTNL
- */
-static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
+/* caller must hold RTNL */
+static inline struct tnode *tnode_get_child(const struct tnode *tn,
+ unsigned long i)
{
- BUG_ON(i >= 1U << tn->bits);
-
return rtnl_dereference(tn->child[i]);
}
-/*
- * caller must hold RCU read lock or RTNL
- */
-static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
+/* caller must hold RCU read lock or RTNL */
+static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
+ unsigned long i)
{
- BUG_ON(i >= 1U << tn->bits);
-
return rcu_dereference_rtnl(tn->child[i]);
}
-static inline int tnode_child_length(const struct tnode *tn)
-{
- return 1 << tn->bits;
-}
-
-static inline t_key mask_pfx(t_key k, unsigned int l)
-{
- return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
-}
-
-static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
-{
- if (offset < KEYLENGTH)
- return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
- else
- return 0;
-}
-
-static inline int tkey_equals(t_key a, t_key b)
-{
- return a == b;
-}
-
-static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
-{
- if (bits == 0 || offset >= KEYLENGTH)
- return 1;
- bits = bits > KEYLENGTH ? KEYLENGTH : bits;
- return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
-}
-
-static inline int tkey_mismatch(t_key a, int offset, t_key b)
-{
- t_key diff = a ^ b;
- int i = offset;
-
- if (!diff)
- return 0;
- while ((diff << i) >> (KEYLENGTH-1) == 0)
- i++;
- return i;
-}
-
-/*
- To understand this stuff, an understanding of keys and all their bits is
- necessary. Every node in the trie has a key associated with it, but not
- all of the bits in that key are significant.
-
- Consider a node 'n' and its parent 'tp'.
-
- If n is a leaf, every bit in its key is significant. Its presence is
- necessitated by path compression, since during a tree traversal (when
- searching for a leaf - unless we are doing an insertion) we will completely
- ignore all skipped bits we encounter. Thus we need to verify, at the end of
- a potentially successful search, that we have indeed been walking the
- correct key path.
-
- Note that we can never "miss" the correct key in the tree if present by
- following the wrong path. Path compression ensures that segments of the key
- that are the same for all keys with a given prefix are skipped, but the
- skipped part *is* identical for each node in the subtrie below the skipped
- bit! trie_insert() in this implementation takes care of that - note the
- call to tkey_sub_equals() in trie_insert().
-
- if n is an internal node - a 'tnode' here, the various parts of its key
- have many different meanings.
-
- Example:
- _________________________________________________________________
- | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
- -----------------------------------------------------------------
- 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
-
- _________________________________________________________________
- | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
- -----------------------------------------------------------------
- 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
-
- tp->pos = 7
- tp->bits = 3
- n->pos = 15
- n->bits = 4
-
- First, let's just ignore the bits that come before the parent tp, that is
- the bits from 0 to (tp->pos-1). They are *known* but at this point we do
- not use them for anything.
-
- The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
- index into the parent's child array. That is, they will be used to find
- 'n' among tp's children.
-
- The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
- for the node n.
-
- All the bits we have seen so far are significant to the node n. The rest
- of the bits are really not needed or indeed known in n->key.
-
- The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
- n's child array, and will of course be different for each child.
-
-
- The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
- at this point.
-
-*/
-
-static inline void check_tnode(const struct tnode *tn)
-{
- WARN_ON(tn && tn->pos+tn->bits > 32);
-}
+/* To understand this stuff, an understanding of keys and all their bits is
+ * necessary. Every node in the trie has a key associated with it, but not
+ * all of the bits in that key are significant.
+ *
+ * Consider a node 'n' and its parent 'tp'.
+ *
+ * If n is a leaf, every bit in its key is significant. Its presence is
+ * necessitated by path compression, since during a tree traversal (when
+ * searching for a leaf - unless we are doing an insertion) we will completely
+ * ignore all skipped bits we encounter. Thus we need to verify, at the end of
+ * a potentially successful search, that we have indeed been walking the
+ * correct key path.
+ *
+ * Note that we can never "miss" the correct key in the tree if present by
+ * following the wrong path. Path compression ensures that segments of the key
+ * that are the same for all keys with a given prefix are skipped, but the
+ * skipped part *is* identical for each node in the subtrie below the skipped
+ * bit! trie_insert() in this implementation takes care of that.
+ *
+ * if n is an internal node - a 'tnode' here, the various parts of its key
+ * have many different meanings.
+ *
+ * Example:
+ * _________________________________________________________________
+ * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
+ * -----------------------------------------------------------------
+ * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ *
+ * _________________________________________________________________
+ * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
+ * -----------------------------------------------------------------
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ *
+ * tp->pos = 22
+ * tp->bits = 3
+ * n->pos = 13
+ * n->bits = 4
+ *
+ * First, let's just ignore the bits that come before the parent tp, that is
+ * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
+ * point we do not use them for anything.
+ *
+ * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
+ * index into the parent's child array. That is, they will be used to find
+ * 'n' among tp's children.
+ *
+ * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
+ * for the node n.
+ *
+ * All the bits we have seen so far are significant to the node n. The rest
+ * of the bits are really not needed or indeed known in n->key.
+ *
+ * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
+ * n's child array, and will of course be different for each child.
+ *
+ * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
+ * at this point.
+ */
static const int halve_threshold = 25;
static const int inflate_threshold = 50;
@@ -357,17 +273,23 @@ static inline void alias_free_mem_rcu(struct fib_alias *fa)
call_rcu(&fa->rcu, __alias_free_mem);
}
-static void __leaf_free_rcu(struct rcu_head *head)
-{
- struct leaf *l = container_of(head, struct leaf, rcu);
- kmem_cache_free(trie_leaf_kmem, l);
-}
+#define TNODE_KMALLOC_MAX \
+ ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
-static inline void free_leaf(struct leaf *l)
+static void __node_free_rcu(struct rcu_head *head)
{
- call_rcu(&l->rcu, __leaf_free_rcu);
+ struct tnode *n = container_of(head, struct tnode, rcu);
+
+ if (IS_LEAF(n))
+ kmem_cache_free(trie_leaf_kmem, n);
+ else if (n->bits <= TNODE_KMALLOC_MAX)
+ kfree(n);
+ else
+ vfree(n);
}
+#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
+
static inline void free_leaf_info(struct leaf_info *leaf)
{
kfree_rcu(leaf, rcu);
@@ -381,56 +303,31 @@ static struct tnode *tnode_alloc(size_t size)
return vzalloc(size);
}
-static void __tnode_free_rcu(struct rcu_head *head)
-{
- struct tnode *tn = container_of(head, struct tnode, rcu);
- size_t size = sizeof(struct tnode) +
- (sizeof(struct rt_trie_node *) << tn->bits);
-
- if (size <= PAGE_SIZE)
- kfree(tn);
- else
- vfree(tn);
-}
-
-static inline void tnode_free(struct tnode *tn)
-{
- if (IS_LEAF(tn))
- free_leaf((struct leaf *) tn);
- else
- call_rcu(&tn->rcu, __tnode_free_rcu);
-}
-
-static void tnode_free_safe(struct tnode *tn)
+static inline void empty_child_inc(struct tnode *n)
{
- BUG_ON(IS_LEAF(tn));
- tn->tnode_free = tnode_free_head;
- tnode_free_head = tn;
- tnode_free_size += sizeof(struct tnode) +
- (sizeof(struct rt_trie_node *) << tn->bits);
+ ++n->empty_children ? : ++n->full_children;
}
-static void tnode_free_flush(void)
+static inline void empty_child_dec(struct tnode *n)
{
- struct tnode *tn;
-
- while ((tn = tnode_free_head)) {
- tnode_free_head = tn->tnode_free;
- tn->tnode_free = NULL;
- tnode_free(tn);
- }
-
- if (tnode_free_size >= PAGE_SIZE * sync_pages) {
- tnode_free_size = 0;
- synchronize_rcu();
- }
+ n->empty_children-- ? : n->full_children--;
}
-static struct leaf *leaf_new(void)
+static struct tnode *leaf_new(t_key key)
{
- struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
+ struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
if (l) {
- l->parent = T_LEAF;
+ l->parent = NULL;
+ /* set key and pos to reflect full key value
+ * any trailing zeros in the key should be ignored
+ * as the nodes are searched
+ */
+ l->key = key;
+ l->slen = 0;
+ l->pos = 0;
+ /* set bits to 0 indicating we are not a tnode */
+ l->bits = 0;
+
INIT_HLIST_HEAD(&l->list);
}
return l;
@@ -449,462 +346,530 @@ static struct leaf_info *leaf_info_new(int plen)
static struct tnode *tnode_new(t_key key, int pos, int bits)
{
- size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
+ size_t sz = offsetof(struct tnode, child[1ul << bits]);
struct tnode *tn = tnode_alloc(sz);
+ unsigned int shift = pos + bits;
+
+ /* verify bits and pos their msb bits clear and values are valid */
+ BUG_ON(!bits || (shift > KEYLENGTH));
if (tn) {
- tn->parent = T_TNODE;
+ tn->parent = NULL;
+ tn->slen = pos;
tn->pos = pos;
tn->bits = bits;
- tn->key = key;
- tn->full_children = 0;
- tn->empty_children = 1<<bits;
+ tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
+ if (bits == KEYLENGTH)
+ tn->full_children = 1;
+ else
+ tn->empty_children = 1ul << bits;
}
pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
- sizeof(struct rt_trie_node *) << bits);
+ sizeof(struct tnode *) << bits);
return tn;
}
-/*
- * Check whether a tnode 'n' is "full", i.e. it is an internal node
+/* Check whether a tnode 'n' is "full", i.e. it is an internal node
* and no bits are skipped. See discussion in dyntree paper p. 6
*/
-
-static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
+static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
{
- if (n == NULL || IS_LEAF(n))
- return 0;
-
- return ((struct tnode *) n)->pos == tn->pos + tn->bits;
+ return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
}
-static inline void put_child(struct tnode *tn, int i,
- struct rt_trie_node *n)
-{
- tnode_put_child_reorg(tn, i, n, -1);
-}
-
- /*
- * Add a child at position i overwriting the old value.
- * Update the value of full_children and empty_children.
- */
-
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
- int wasfull)
+/* Add a child at position i overwriting the old value.
+ * Update the value of full_children and empty_children.
+ */
+static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
{
- struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
- int isfull;
+ struct tnode *chi = tnode_get_child(tn, i);
+ int isfull, wasfull;
- BUG_ON(i >= 1<<tn->bits);
+ BUG_ON(i >= tnode_child_length(tn));
- /* update emptyChildren */
+ /* update emptyChildren, overflow into fullChildren */
if (n == NULL && chi != NULL)
- tn->empty_children++;
- else if (n != NULL && chi == NULL)
- tn->empty_children--;
+ empty_child_inc(tn);
+ if (n != NULL && chi == NULL)
+ empty_child_dec(tn);
/* update fullChildren */
- if (wasfull == -1)
- wasfull = tnode_full(tn, chi);
-
+ wasfull = tnode_full(tn, chi);
isfull = tnode_full(tn, n);
+
if (wasfull && !isfull)
tn->full_children--;
else if (!wasfull && isfull)
tn->full_children++;
- if (n)
- node_set_parent(n, tn);
+ if (n && (tn->slen < n->slen))
+ tn->slen = n->slen;
rcu_assign_pointer(tn->child[i], n);
}
-#define MAX_WORK 10
-static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
+static void update_children(struct tnode *tn)
{
- int i;
- struct tnode *old_tn;
- int inflate_threshold_use;
- int halve_threshold_use;
- int max_work;
+ unsigned long i;
- if (!tn)
- return NULL;
+ /* update all of the child parent pointers */
+ for (i = tnode_child_length(tn); i;) {
+ struct tnode *inode = tnode_get_child(tn, --i);
- pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
- tn, inflate_threshold, halve_threshold);
+ if (!inode)
+ continue;
- /* No children */
- if (tn->empty_children == tnode_child_length(tn)) {
- tnode_free_safe(tn);
- return NULL;
+ /* Either update the children of a tnode that
+ * already belongs to us or update the child
+ * to point to ourselves.
+ */
+ if (node_parent(inode) == tn)
+ update_children(inode);
+ else
+ node_set_parent(inode, tn);
}
- /* One child */
- if (tn->empty_children == tnode_child_length(tn) - 1)
- goto one_child;
- /*
- * Double as long as the resulting node has a number of
- * nonempty nodes that are above the threshold.
- */
-
- /*
- * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
- * the Helsinki University of Technology and Matti Tikkanen of Nokia
- * Telecommunications, page 6:
- * "A node is doubled if the ratio of non-empty children to all
- * children in the *doubled* node is at least 'high'."
- *
- * 'high' in this instance is the variable 'inflate_threshold'. It
- * is expressed as a percentage, so we multiply it with
- * tnode_child_length() and instead of multiplying by 2 (since the
- * child array will be doubled by inflate()) and multiplying
- * the left-hand side by 100 (to handle the percentage thing) we
- * multiply the left-hand side by 50.
- *
- * The left-hand side may look a bit weird: tnode_child_length(tn)
- * - tn->empty_children is of course the number of non-null children
- * in the current node. tn->full_children is the number of "full"
- * children, that is non-null tnodes with a skip value of 0.
- * All of those will be doubled in the resulting inflated tnode, so
- * we just count them one extra time here.
- *
- * A clearer way to write this would be:
- *
- * to_be_doubled = tn->full_children;
- * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
- * tn->full_children;
- *
- * new_child_length = tnode_child_length(tn) * 2;
- *
- * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
- * new_child_length;
- * if (new_fill_factor >= inflate_threshold)
- *
- * ...and so on, tho it would mess up the while () loop.
- *
- * anyway,
- * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
- * inflate_threshold
- *
- * avoid a division:
- * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
- * inflate_threshold * new_child_length
- *
- * expand not_to_be_doubled and to_be_doubled, and shorten:
- * 100 * (tnode_child_length(tn) - tn->empty_children +
- * tn->full_children) >= inflate_threshold * new_child_length
- *
- * expand new_child_length:
- * 100 * (tnode_child_length(tn) - tn->empty_children +
- * tn->full_children) >=
- * inflate_threshold * tnode_child_length(tn) * 2
- *
- * shorten again:
- * 50 * (tn->full_children + tnode_child_length(tn) -
- * tn->empty_children) >= inflate_threshold *
- * tnode_child_length(tn)
- *
- */
+}
- check_tnode(tn);
+static inline void put_child_root(struct tnode *tp, struct trie *t,
+ t_key key, struct tnode *n)
+{
+ if (tp)
+ put_child(tp, get_index(key, tp), n);
+ else
+ rcu_assign_pointer(t->trie, n);
+}
- /* Keep root node larger */
+static inline void tnode_free_init(struct tnode *tn)
+{
+ tn->rcu.next = NULL;
+}
- if (!node_parent((struct rt_trie_node *)tn)) {
- inflate_threshold_use = inflate_threshold_root;
- halve_threshold_use = halve_threshold_root;
- } else {
- inflate_threshold_use = inflate_threshold;
- halve_threshold_use = halve_threshold;
- }
+static inline void tnode_free_append(struct tnode *tn, struct tnode *n)
+{
+ n->rcu.next = tn->rcu.next;
+ tn->rcu.next = &n->rcu;
+}
- max_work = MAX_WORK;
- while ((tn->full_children > 0 && max_work-- &&
- 50 * (tn->full_children + tnode_child_length(tn)
- - tn->empty_children)
- >= inflate_threshold_use * tnode_child_length(tn))) {
+static void tnode_free(struct tnode *tn)
+{
+ struct callback_head *head = &tn->rcu;
- old_tn = tn;
- tn = inflate(t, tn);
+ while (head) {
+ head = head->next;
+ tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
+ node_free(tn);
- if (IS_ERR(tn)) {
- tn = old_tn;
-#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.resize_node_skipped++;
-#endif
- break;
- }
+ tn = container_of(head, struct tnode, rcu);
}
- check_tnode(tn);
-
- /* Return if at least one inflate is run */
- if (max_work != MAX_WORK)
- return (struct rt_trie_node *) tn;
-
- /*
- * Halve as long as the number of empty children in this
- * node is above threshold.
- */
-
- max_work = MAX_WORK;
- while (tn->bits > 1 && max_work-- &&
- 100 * (tnode_child_length(tn) - tn->empty_children) <
- halve_threshold_use * tnode_child_length(tn)) {
-
- old_tn = tn;
- tn = halve(t, tn);
- if (IS_ERR(tn)) {
- tn = old_tn;
-#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.resize_node_skipped++;
-#endif
- break;
- }
+ if (tnode_free_size >= PAGE_SIZE * sync_pages) {
+ tnode_free_size = 0;
+ synchronize_rcu();
}
+}
+static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
+{
+ struct tnode *tp = node_parent(oldtnode);
+ unsigned long i;
- /* Only one child remains */
- if (tn->empty_children == tnode_child_length(tn) - 1) {
-one_child:
- for (i = 0; i < tnode_child_length(tn); i++) {
- struct rt_trie_node *n;
-
- n = rtnl_dereference(tn->child[i]);
- if (!n)
- continue;
-
- /* compress one level */
+ /* setup the parent pointer out of and back into this node */
+ NODE_INIT_PARENT(tn, tp);
+ put_child_root(tp, t, tn->key, tn);
- node_set_parent(n, NULL);
- tnode_free_safe(tn);
- return n;
- }
- }
- return (struct rt_trie_node *) tn;
-}
+ /* update all of the child parent pointers */
+ update_children(tn);
+ /* all pointers should be clean so we are done */
+ tnode_free(oldtnode);
-static void tnode_clean_free(struct tnode *tn)
-{
- int i;
- struct tnode *tofree;
+ /* resize children now that oldtnode is freed */
+ for (i = tnode_child_length(tn); i;) {
+ struct tnode *inode = tnode_get_child(tn, --i);
- for (i = 0; i < tnode_child_length(tn); i++) {
- tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
- if (tofree)
- tnode_free(tofree);
+ /* resize child node */
+ if (tnode_full(tn, inode))
+ resize(t, inode);
}
- tnode_free(tn);
}
-static struct tnode *inflate(struct trie *t, struct tnode *tn)
+static int inflate(struct trie *t, struct tnode *oldtnode)
{
- struct tnode *oldtnode = tn;
- int olen = tnode_child_length(tn);
- int i;
+ struct tnode *tn;
+ unsigned long i;
+ t_key m;
pr_debug("In inflate\n");
- tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
-
+ tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
if (!tn)
- return ERR_PTR(-ENOMEM);
-
- /*
- * Preallocate and store tnodes before the actual work so we
- * don't get into an inconsistent state if memory allocation
- * fails. In case of failure we return the oldnode and inflate
- * of tnode is ignored.
- */
-
- for (i = 0; i < olen; i++) {
- struct tnode *inode;
-
- inode = (struct tnode *) tnode_get_child(oldtnode, i);
- if (inode &&
- IS_TNODE(inode) &&
- inode->pos == oldtnode->pos + oldtnode->bits &&
- inode->bits > 1) {
- struct tnode *left, *right;
- t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
-
- left = tnode_new(inode->key&(~m), inode->pos + 1,
- inode->bits - 1);
- if (!left)
- goto nomem;
-
- right = tnode_new(inode->key|m, inode->pos + 1,
- inode->bits - 1);
-
- if (!right) {
- tnode_free(left);
- goto nomem;
- }
+ return -ENOMEM;
- put_child(tn, 2*i, (struct rt_trie_node *) left);
- put_child(tn, 2*i+1, (struct rt_trie_node *) right);
- }
- }
+ /* prepare oldtnode to be freed */
+ tnode_free_init(oldtnode);
- for (i = 0; i < olen; i++) {
- struct tnode *inode;
- struct rt_trie_node *node = tnode_get_child(oldtnode, i);
- struct tnode *left, *right;
- int size, j;
+ /* Assemble all of the pointers in our cluster, in this case that
+ * represents all of the pointers out of our allocated nodes that
+ * point to existing tnodes and the links between our allocated
+ * nodes.
+ */
+ for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
+ struct tnode *inode = tnode_get_child(oldtnode, --i);
+ struct tnode *node0, *node1;
+ unsigned long j, k;
/* An empty child */
- if (node == NULL)
+ if (inode == NULL)
continue;
/* A leaf or an internal node with skipped bits */
-
- if (IS_LEAF(node) || ((struct tnode *) node)->pos >
- tn->pos + tn->bits - 1) {
- put_child(tn,
- tkey_extract_bits(node->key, oldtnode->pos, oldtnode->bits + 1),
- node);
+ if (!tnode_full(oldtnode, inode)) {
+ put_child(tn, get_index(inode->key, tn), inode);
continue;
}
- /* An internal node with two children */
- inode = (struct tnode *) node;
+ /* drop the node in the old tnode free list */
+ tnode_free_append(oldtnode, inode);
+ /* An internal node with two children */
if (inode->bits == 1) {
- put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
- put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
-
- tnode_free_safe(inode);
+ put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
+ put_child(tn, 2 * i, tnode_get_child(inode, 0));
continue;
}
- /* An internal node with more than two children */
-
/* We will replace this node 'inode' with two new
- * ones, 'left' and 'right', each with half of the
+ * ones, 'node0' and 'node1', each with half of the
* original children. The two new nodes will have
* a position one bit further down the key and this
* means that the "significant" part of their keys
* (see the discussion near the top of this file)
* will differ by one bit, which will be "0" in
- * left's key and "1" in right's key. Since we are
+ * node0's key and "1" in node1's key. Since we are
* moving the key position by one step, the bit that
* we are moving away from - the bit at position
- * (inode->pos) - is the one that will differ between
- * left and right. So... we synthesize that bit in the
- * two new keys.
- * The mask 'm' below will be a single "one" bit at
- * the position (inode->pos)
+ * (tn->pos) - is the one that will differ between
+ * node0 and node1. So... we synthesize that bit in the
+ * two new keys.
*/
+ node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
+ if (!node1)
+ goto nomem;
+ node0 = tnode_new(inode->key, inode->pos, inode->bits - 1);
+
+ tnode_free_append(tn, node1);
+ if (!node0)
+ goto nomem;
+ tnode_free_append(tn, node0);
+
+ /* populate child pointers in new nodes */
+ for (k = tnode_child_length(inode), j = k / 2; j;) {
+ put_child(node1, --j, tnode_get_child(inode, --k));
+ put_child(node0, j, tnode_get_child(inode, j));
+ put_child(node1, --j, tnode_get_child(inode, --k));
+ put_child(node0, j, tnode_get_child(inode, j));
+ }
- /* Use the old key, but set the new significant
- * bit to zero.
- */
+ /* link new nodes to parent */
+ NODE_INIT_PARENT(node1, tn);
+ NODE_INIT_PARENT(node0, tn);
+
+ /* link parent to nodes */
+ put_child(tn, 2 * i + 1, node1);
+ put_child(tn, 2 * i, node0);
+ }
+
+ /* setup the parent pointers into and out of this node */
+ replace(t, oldtnode, tn);
+
+ return 0;
+nomem:
+ /* all pointers should be clean so we are done */
+ tnode_free(tn);
+ return -ENOMEM;
+}
+
+static int halve(struct trie *t, struct tnode *oldtnode)
+{
+ struct tnode *tn;
+ unsigned long i;
+
+ pr_debug("In halve\n");
- left = (struct tnode *) tnode_get_child(tn, 2*i);
- put_child(tn, 2*i, NULL);
+ tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
+ if (!tn)
+ return -ENOMEM;
- BUG_ON(!left);
+ /* prepare oldtnode to be freed */
+ tnode_free_init(oldtnode);
- right = (struct tnode *) tnode_get_child(tn, 2*i+1);
- put_child(tn, 2*i+1, NULL);
+ /* Assemble all of the pointers in our cluster, in this case that
+ * represents all of the pointers out of our allocated nodes that
+ * point to existing tnodes and the links between our allocated
+ * nodes.
+ */
+ for (i = tnode_child_length(oldtnode); i;) {
+ struct tnode *node1 = tnode_get_child(oldtnode, --i);
+ struct tnode *node0 = tnode_get_child(oldtnode, --i);
+ struct tnode *inode;
- BUG_ON(!right);
+ /* At least one of the children is empty */
+ if (!node1 || !node0) {
+ put_child(tn, i / 2, node1 ? : node0);
+ continue;
+ }
- size = tnode_child_length(left);
- for (j = 0; j < size; j++) {
- put_child(left, j, rtnl_dereference(inode->child[j]));
- put_child(right, j, rtnl_dereference(inode->child[j + size]));
+ /* Two nonempty children */
+ inode = tnode_new(node0->key, oldtnode->pos, 1);
+ if (!inode) {
+ tnode_free(tn);
+ return -ENOMEM;
}
- put_child(tn, 2*i, resize(t, left));
- put_child(tn, 2*i+1, resize(t, right));
+ tnode_free_append(tn, inode);
+
+ /* initialize pointers out of node */
+ put_child(inode, 1, node1);
+ put_child(inode, 0, node0);
+ NODE_INIT_PARENT(inode, tn);
- tnode_free_safe(inode);
+ /* link parent to node */
+ put_child(tn, i / 2, inode);
}
- tnode_free_safe(oldtnode);
- return tn;
-nomem:
- tnode_clean_free(tn);
- return ERR_PTR(-ENOMEM);
+
+ /* setup the parent pointers into and out of this node */
+ replace(t, oldtnode, tn);
+
+ return 0;
}
-static struct tnode *halve(struct trie *t, struct tnode *tn)
+static void collapse(struct trie *t, struct tnode *oldtnode)
{
- struct tnode *oldtnode = tn;
- struct rt_trie_node *left, *right;
- int i;
- int olen = tnode_child_length(tn);
+ struct tnode *n, *tp;
+ unsigned long i;
- pr_debug("In halve\n");
+ /* scan the tnode looking for that one child that might still exist */
+ for (n = NULL, i = tnode_child_length(oldtnode); !n && i;)
+ n = tnode_get_child(oldtnode, --i);
- tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
+ /* compress one level */
+ tp = node_parent(oldtnode);
+ put_child_root(tp, t, oldtnode->key, n);
+ node_set_parent(n, tp);
- if (!tn)
- return ERR_PTR(-ENOMEM);
+ /* drop dead node */
+ node_free(oldtnode);
+}
- /*
- * Preallocate and store tnodes before the actual work so we
- * don't get into an inconsistent state if memory allocation
- * fails. In case of failure we return the oldnode and halve
- * of tnode is ignored.
+static unsigned char update_suffix(struct tnode *tn)
+{
+ unsigned char slen = tn->pos;
+ unsigned long stride, i;
+
+ /* search though the list of children looking for nodes that might
+ * have a suffix greater than the one we currently have. This is
+ * why we start with a stride of 2 since a stride of 1 would
+ * represent the nodes with suffix length equal to tn->pos
*/
+ for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) {
+ struct tnode *n = tnode_get_child(tn, i);
- for (i = 0; i < olen; i += 2) {
- left = tnode_get_child(oldtnode, i);
- right = tnode_get_child(oldtnode, i+1);
+ if (!n || (n->slen <= slen))
+ continue;
- /* Two nonempty children */
- if (left && right) {
- struct tnode *newn;
+ /* update stride and slen based on new value */
+ stride <<= (n->slen - slen);
+ slen = n->slen;
+ i &= ~(stride - 1);
- newn = tnode_new(left->key, tn->pos + tn->bits, 1);
+ /* if slen covers all but the last bit we can stop here
+ * there will be nothing longer than that since only node
+ * 0 and 1 << (bits - 1) could have that as their suffix
+ * length.
+ */
+ if ((slen + 1) >= (tn->pos + tn->bits))
+ break;
+ }
- if (!newn)
- goto nomem;
+ tn->slen = slen;
- put_child(tn, i/2, (struct rt_trie_node *)newn);
- }
+ return slen;
+}
- }
+/* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
+ * the Helsinki University of Technology and Matti Tikkanen of Nokia
+ * Telecommunications, page 6:
+ * "A node is doubled if the ratio of non-empty children to all
+ * children in the *doubled* node is at least 'high'."
+ *
+ * 'high' in this instance is the variable 'inflate_threshold'. It
+ * is expressed as a percentage, so we multiply it with
+ * tnode_child_length() and instead of multiplying by 2 (since the
+ * child array will be doubled by inflate()) and multiplying
+ * the left-hand side by 100 (to handle the percentage thing) we
+ * multiply the left-hand side by 50.
+ *
+ * The left-hand side may look a bit weird: tnode_child_length(tn)
+ * - tn->empty_children is of course the number of non-null children
+ * in the current node. tn->full_children is the number of "full"
+ * children, that is non-null tnodes with a skip value of 0.
+ * All of those will be doubled in the resulting inflated tnode, so
+ * we just count them one extra time here.
+ *
+ * A clearer way to write this would be:
+ *
+ * to_be_doubled = tn->full_children;
+ * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
+ * tn->full_children;
+ *
+ * new_child_length = tnode_child_length(tn) * 2;
+ *
+ * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
+ * new_child_length;
+ * if (new_fill_factor >= inflate_threshold)
+ *
+ * ...and so on, tho it would mess up the while () loop.
+ *
+ * anyway,
+ * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
+ * inflate_threshold
+ *
+ * avoid a division:
+ * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
+ * inflate_threshold * new_child_length
+ *
+ * expand not_to_be_doubled and to_be_doubled, and shorten:
+ * 100 * (tnode_child_length(tn) - tn->empty_children +
+ * tn->full_children) >= inflate_threshold * new_child_length
+ *
+ * expand new_child_length:
+ * 100 * (tnode_child_length(tn) - tn->empty_children +
+ * tn->full_children) >=
+ * inflate_threshold * tnode_child_length(tn) * 2
+ *
+ * shorten again:
+ * 50 * (tn->full_children + tnode_child_length(tn) -
+ * tn->empty_children) >= inflate_threshold *
+ * tnode_child_length(tn)
+ *
+ */
+static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
+{
+ unsigned long used = tnode_child_length(tn);
+ unsigned long threshold = used;
- for (i = 0; i < olen; i += 2) {
- struct tnode *newBinNode;
+ /* Keep root node larger */
+ threshold *= tp ? inflate_threshold : inflate_threshold_root;
+ used -= tn->empty_children;
+ used += tn->full_children;
- left = tnode_get_child(oldtnode, i);
- right = tnode_get_child(oldtnode, i+1);
+ /* if bits == KEYLENGTH then pos = 0, and will fail below */
- /* At least one of the children is empty */
- if (left == NULL) {
- if (right == NULL) /* Both are empty */
- continue;
- put_child(tn, i/2, right);
- continue;
+ return (used > 1) && tn->pos && ((50 * used) >= threshold);
+}
+
+static bool should_halve(const struct tnode *tp, const struct tnode *tn)
+{
+ unsigned long used = tnode_child_length(tn);
+ unsigned long threshold = used;
+
+ /* Keep root node larger */
+ threshold *= tp ? halve_threshold : halve_threshold_root;
+ used -= tn->empty_children;
+
+ /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
+
+ return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
+}
+
+static bool should_collapse(const struct tnode *tn)
+{
+ unsigned long used = tnode_child_length(tn);
+
+ used -= tn->empty_children;
+
+ /* account for bits == KEYLENGTH case */
+ if ((tn->bits == KEYLENGTH) && tn->full_children)
+ used -= KEY_MAX;
+
+ /* One child or none, time to drop us from the trie */
+ return used < 2;
+}
+
+#define MAX_WORK 10
+static void resize(struct trie *t, struct tnode *tn)
+{
+ struct tnode *tp = node_parent(tn);
+ struct tnode __rcu **cptr;
+ int max_work = MAX_WORK;
+
+ pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
+ tn, inflate_threshold, halve_threshold);
+
+ /* track the tnode via the pointer from the parent instead of
+ * doing it ourselves. This way we can let RCU fully do its
+ * thing without us interfering
+ */
+ cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
+ BUG_ON(tn != rtnl_dereference(*cptr));
+
+ /* Double as long as the resulting node has a number of
+ * nonempty nodes that are above the threshold.
+ */
+ while (should_inflate(tp, tn) && max_work) {
+ if (inflate(t, tn)) {
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ this_cpu_inc(t->stats->resize_node_skipped);
+#endif
+ break;
}
- if (right == NULL) {
- put_child(tn, i/2, left);
- continue;
+ max_work--;
+ tn = rtnl_dereference(*cptr);
+ }
+
+ /* Return if at least one inflate is run */
+ if (max_work != MAX_WORK)
+ return;
+
+ /* Halve as long as the number of empty children in this
+ * node is above threshold.
+ */
+ while (should_halve(tp, tn) && max_work) {
+ if (halve(t, tn)) {
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ this_cpu_inc(t->stats->resize_node_skipped);
+#endif
+ break;
}
- /* Two nonempty children */
- newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
- put_child(tn, i/2, NULL);
- put_child(newBinNode, 0, left);
- put_child(newBinNode, 1, right);
- put_child(tn, i/2, resize(t, newBinNode));
+ max_work--;
+ tn = rtnl_dereference(*cptr);
+ }
+
+ /* Only one child remains */
+ if (should_collapse(tn)) {
+ collapse(t, tn);
+ return;
+ }
+
+ /* Return if at least one deflate was run */
+ if (max_work != MAX_WORK)
+ return;
+
+ /* push the suffix length to the parent node */
+ if (tn->slen > tn->pos) {
+ unsigned char slen = update_suffix(tn);
+
+ if (tp && (slen > tp->slen))
+ tp->slen = slen;
}
- tnode_free_safe(oldtnode);
- return tn;
-nomem:
- tnode_clean_free(tn);
- return ERR_PTR(-ENOMEM);
}
/* readside must use rcu_read_lock currently dump routines
via get_fa_head and dump */
-static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
+static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
{
struct hlist_head *head = &l->list;
struct leaf_info *li;
@@ -916,7 +881,7 @@ static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
return NULL;
}
-static inline struct list_head *get_fa_head(struct leaf *l, int plen)
+static inline struct list_head *get_fa_head(struct tnode *l, int plen)
{
struct leaf_info *li = find_leaf_info(l, plen);
@@ -926,8 +891,51 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen)
return &li->falh;
}
-static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
+static void leaf_pull_suffix(struct tnode *l)
+{
+ struct tnode *tp = node_parent(l);
+
+ while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
+ if (update_suffix(tp) > l->slen)
+ break;
+ tp = node_parent(tp);
+ }
+}
+
+static void leaf_push_suffix(struct tnode *l)
+{
+ struct tnode *tn = node_parent(l);
+
+ /* if this is a new leaf then tn will be NULL and we can sort
+ * out parent suffix lengths as a part of trie_rebalance
+ */
+ while (tn && (tn->slen < l->slen)) {
+ tn->slen = l->slen;
+ tn = node_parent(tn);
+ }
+}
+
+static void remove_leaf_info(struct tnode *l, struct leaf_info *old)
{
+ /* record the location of the previous list_info entry */
+ struct hlist_node **pprev = old->hlist.pprev;
+ struct leaf_info *li = hlist_entry(pprev, typeof(*li), hlist.next);
+
+ /* remove the leaf info from the list */
+ hlist_del_rcu(&old->hlist);
+
+ /* only access li if it is pointing at the last valid hlist_node */
+ if (hlist_empty(&l->list) || (*pprev))
+ return;
+
+ /* update the trie with the latest suffix length */
+ l->slen = KEYLENGTH - li->plen;
+ leaf_pull_suffix(l);
+}
+
+static void insert_leaf_info(struct tnode *l, struct leaf_info *new)
+{
+ struct hlist_head *head = &l->list;
struct leaf_info *li = NULL, *last = NULL;
if (hlist_empty(head)) {
@@ -944,218 +952,174 @@ static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
else
hlist_add_before_rcu(&new->hlist, &li->hlist);
}
+
+ /* if we added to the tail node then we need to update slen */
+ if (l->slen < (KEYLENGTH - new->plen)) {
+ l->slen = KEYLENGTH - new->plen;
+ leaf_push_suffix(l);
+ }
}
/* rcu_read_lock needs to be hold by caller from readside */
+static struct tnode *fib_find_node(struct trie *t, u32 key)
+{
+ struct tnode *n = rcu_dereference_rtnl(t->trie);
+
+ while (n) {
+ unsigned long index = get_index(key, n);
+
+ /* This bit of code is a bit tricky but it combines multiple
+ * checks into a single check. The prefix consists of the
+ * prefix plus zeros for the bits in the cindex. The index
+ * is the difference between the key and this value. From
+ * this we can actually derive several pieces of data.
+ * if (index & (~0ul << bits))
+ * we have a mismatch in skip bits and failed
+ * else
+ * we know the value is cindex
+ */
+ if (index & (~0ul << n->bits))
+ return NULL;
-static struct leaf *
-fib_find_node(struct trie *t, u32 key)
-{
- int pos;
- struct tnode *tn;
- struct rt_trie_node *n;
+ /* we have found a leaf. Prefixes have already been compared */
+ if (IS_LEAF(n))
+ break;
- pos = 0;
- n = rcu_dereference_rtnl(t->trie);
+ n = tnode_get_child_rcu(n, index);
+ }
- while (n != NULL && NODE_TYPE(n) == T_TNODE) {
- tn = (struct tnode *) n;
+ return n;
+}
- check_tnode(tn);
+/* Return the first fib alias matching TOS with
+ * priority less than or equal to PRIO.
+ */
+static struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
+{
+ struct fib_alias *fa;
- if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
- pos = tn->pos + tn->bits;
- n = tnode_get_child_rcu(tn,
- tkey_extract_bits(key,
- tn->pos,
- tn->bits));
- } else
- break;
- }
- /* Case we have found a leaf. Compare prefixes */
+ if (!fah)
+ return NULL;
- if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
- return (struct leaf *)n;
+ list_for_each_entry(fa, fah, fa_list) {
+ if (fa->fa_tos > tos)
+ continue;
+ if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos)
+ return fa;
+ }
return NULL;
}
static void trie_rebalance(struct trie *t, struct tnode *tn)
{
- int wasfull;
- t_key cindex, key;
struct tnode *tp;
- key = tn->key;
-
- while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
- cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
- tn = (struct tnode *)resize(t, tn);
-
- tnode_put_child_reorg(tp, cindex,
- (struct rt_trie_node *)tn, wasfull);
-
- tp = node_parent((struct rt_trie_node *) tn);
- if (!tp)
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
-
- tnode_free_flush();
- if (!tp)
- break;
+ while ((tp = node_parent(tn)) != NULL) {
+ resize(t, tn);
tn = tp;
}
/* Handle last (top) tnode */
if (IS_TNODE(tn))
- tn = (struct tnode *)resize(t, tn);
-
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
- tnode_free_flush();
+ resize(t, tn);
}
/* only used from updater-side */
static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
{
- int pos, newpos;
- struct tnode *tp = NULL, *tn = NULL;
- struct rt_trie_node *n;
- struct leaf *l;
- int missbit;
struct list_head *fa_head = NULL;
+ struct tnode *l, *n, *tp = NULL;
struct leaf_info *li;
- t_key cindex;
- pos = 0;
+ li = leaf_info_new(plen);
+ if (!li)
+ return NULL;
+ fa_head = &li->falh;
+
n = rtnl_dereference(t->trie);
/* If we point to NULL, stop. Either the tree is empty and we should
* just put a new leaf in if, or we have reached an empty child slot,
* and we should just put our new leaf in that.
- * If we point to a T_TNODE, check if it matches our key. Note that
- * a T_TNODE might be skipping any number of bits - its 'pos' need
- * not be the parent's 'pos'+'bits'!
- *
- * If it does match the current key, get pos/bits from it, extract
- * the index from our key, push the T_TNODE and walk the tree.
- *
- * If it doesn't, we have to replace it with a new T_TNODE.
*
- * If we point to a T_LEAF, it might or might not have the same key
- * as we do. If it does, just change the value, update the T_LEAF's
- * value, and return it.
- * If it doesn't, we need to replace it with a T_TNODE.
+ * If we hit a node with a key that does't match then we should stop
+ * and create a new tnode to replace that node and insert ourselves
+ * and the other node into the new tnode.
*/
-
- while (n != NULL && NODE_TYPE(n) == T_TNODE) {
- tn = (struct tnode *) n;
-
- check_tnode(tn);
-
- if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
- tp = tn;
- pos = tn->pos + tn->bits;
- n = tnode_get_child(tn,
- tkey_extract_bits(key,
- tn->pos,
- tn->bits));
-
- BUG_ON(n && node_parent(n) != tn);
- } else
+ while (n) {
+ unsigned long index = get_index(key, n);
+
+ /* This bit of code is a bit tricky but it combines multiple
+ * checks into a single check. The prefix consists of the
+ * prefix plus zeros for the "bits" in the prefix. The index
+ * is the difference between the key and this value. From
+ * this we can actually derive several pieces of data.
+ * if !(index >> bits)
+ * we know the value is child index
+ * else
+ * we have a mismatch in skip bits and failed
+ */
+ if (index >> n->bits)
break;
- }
- /*
- * n ----> NULL, LEAF or TNODE
- *
- * tp is n's (parent) ----> NULL or TNODE
- */
-
- BUG_ON(tp && IS_LEAF(tp));
-
- /* Case 1: n is a leaf. Compare prefixes */
-
- if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
- l = (struct leaf *) n;
- li = leaf_info_new(plen);
-
- if (!li)
- return NULL;
+ /* we have found a leaf. Prefixes have already been compared */
+ if (IS_LEAF(n)) {
+ /* Case 1: n is a leaf, and prefixes match*/
+ insert_leaf_info(n, li);
+ return fa_head;
+ }
- fa_head = &li->falh;
- insert_leaf_info(&l->list, li);
- goto done;
+ tp = n;
+ n = tnode_get_child_rcu(n, index);
}
- l = leaf_new();
- if (!l)
- return NULL;
-
- l->key = key;
- li = leaf_info_new(plen);
-
- if (!li) {
- free_leaf(l);
+ l = leaf_new(key);
+ if (!l) {
+ free_leaf_info(li);
return NULL;
}
- fa_head = &li->falh;
- insert_leaf_info(&l->list, li);
-
- if (t->trie && n == NULL) {
- /* Case 2: n is NULL, and will just insert a new leaf */
+ insert_leaf_info(l, li);
- node_set_parent((struct rt_trie_node *)l, tp);
-
- cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(tp, cindex, (struct rt_trie_node *)l);
- } else {
- /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
- /*
- * Add a new tnode here
- * first tnode need some special handling
- */
-
- if (n) {
- pos = tp ? tp->pos+tp->bits : 0;
- newpos = tkey_mismatch(key, pos, n->key);
- tn = tnode_new(n->key, newpos, 1);
- } else {
- newpos = 0;
- tn = tnode_new(key, newpos, 1); /* First tnode */
- }
+ /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
+ *
+ * Add a new tnode here
+ * first tnode need some special handling
+ * leaves us in position for handling as case 3
+ */
+ if (n) {
+ struct tnode *tn;
+ tn = tnode_new(key, __fls(key ^ n->key), 1);
if (!tn) {
free_leaf_info(li);
- free_leaf(l);
+ node_free(l);
return NULL;
}
- node_set_parent((struct rt_trie_node *)tn, tp);
+ /* initialize routes out of node */
+ NODE_INIT_PARENT(tn, tp);
+ put_child(tn, get_index(key, tn) ^ 1, n);
- missbit = tkey_extract_bits(key, newpos, 1);
- put_child(tn, missbit, (struct rt_trie_node *)l);
- put_child(tn, 1-missbit, n);
-
- if (tp) {
- cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(tp, cindex, (struct rt_trie_node *)tn);
- } else {
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
- }
+ /* start adding routes into the node */
+ put_child_root(tp, t, key, tn);
+ node_set_parent(n, tn);
+ /* parent now has a NULL spot where the leaf can go */
tp = tn;
}
- if (tp && tp->pos + tp->bits > 32)
- pr_warn("fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
- tp, tp->pos, tp->bits, key, plen);
-
- /* Rebalance the trie */
+ /* Case 3: n is NULL, and will just insert a new leaf */
+ if (tp) {
+ NODE_INIT_PARENT(l, tp);
+ put_child(tp, get_index(key, tp), l);
+ trie_rebalance(t, tp);
+ } else {
+ rcu_assign_pointer(t->trie, l);
+ }
- trie_rebalance(t, tp);
-done:
return fa_head;
}
@@ -1172,7 +1136,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
u8 tos = cfg->fc_tos;
u32 key, mask;
int err;
- struct leaf *l;
+ struct tnode *l;
if (plen > 32)
return -EINVAL;
@@ -1329,18 +1293,130 @@ err:
return err;
}
+static inline t_key prefix_mismatch(t_key key, struct tnode *n)
+{
+ t_key prefix = n->key;
+
+ return (key ^ prefix) & (prefix | -prefix);
+}
+
/* should be called with rcu_read_lock */
-static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
- t_key key, const struct flowi4 *flp,
- struct fib_result *res, int fib_flags)
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+ struct fib_result *res, int fib_flags)
{
+ struct trie *t = (struct trie *)tb->tb_data;
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ struct trie_use_stats __percpu *stats = t->stats;
+#endif
+ const t_key key = ntohl(flp->daddr);
+ struct tnode *n, *pn;
struct leaf_info *li;
- struct hlist_head *hhead = &l->list;
+ t_key cindex;
+
+ n = rcu_dereference(t->trie);
+ if (!n)
+ return -EAGAIN;
+
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ this_cpu_inc(stats->gets);
+#endif
+
+ pn = n;
+ cindex = 0;
+
+ /* Step 1: Travel to the longest prefix match in the trie */
+ for (;;) {
+ unsigned long index = get_index(key, n);
+
+ /* This bit of code is a bit tricky but it combines multiple
+ * checks into a single check. The prefix consists of the
+ * prefix plus zeros for the "bits" in the prefix. The index
+ * is the difference between the key and this value. From
+ * this we can actually derive several pieces of data.
+ * if (index & (~0ul << bits))
+ * we have a mismatch in skip bits and failed
+ * else
+ * we know the value is cindex
+ */
+ if (index & (~0ul << n->bits))
+ break;
+
+ /* we have found a leaf. Prefixes have already been compared */
+ if (IS_LEAF(n))
+ goto found;
+
+ /* only record pn and cindex if we are going to be chopping
+ * bits later. Otherwise we are just wasting cycles.
+ */
+ if (n->slen > n->pos) {
+ pn = n;
+ cindex = index;
+ }
+
+ n = tnode_get_child_rcu(n, index);
+ if (unlikely(!n))
+ goto backtrace;
+ }
+
+ /* Step 2: Sort out leaves and begin backtracing for longest prefix */
+ for (;;) {
+ /* record the pointer where our next node pointer is stored */
+ struct tnode __rcu **cptr = n->child;
+
+ /* This test verifies that none of the bits that differ
+ * between the key and the prefix exist in the region of
+ * the lsb and higher in the prefix.
+ */
+ if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
+ goto backtrace;
+
+ /* exit out and process leaf */
+ if (unlikely(IS_LEAF(n)))
+ break;
+
+ /* Don't bother recording parent info. Since we are in
+ * prefix match mode we will have to come back to wherever
+ * we started this traversal anyway
+ */
+
+ while ((n = rcu_dereference(*cptr)) == NULL) {
+backtrace:
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ if (!n)
+ this_cpu_inc(stats->null_node_hit);
+#endif
+ /* If we are at cindex 0 there are no more bits for
+ * us to strip at this level so we must ascend back
+ * up one level to see if there are any more bits to
+ * be stripped there.
+ */
+ while (!cindex) {
+ t_key pkey = pn->key;
+
+ pn = node_parent_rcu(pn);
+ if (unlikely(!pn))
+ return -EAGAIN;
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ this_cpu_inc(stats->backtrack);
+#endif
+ /* Get Child's index */
+ cindex = get_index(pkey, pn);
+ }
+
+ /* strip the least significant bit from the cindex */
+ cindex &= cindex - 1;
+
+ /* grab pointer for next child node */
+ cptr = &pn->child[cindex];
+ }
+ }
- hlist_for_each_entry_rcu(li, hhead, hlist) {
+found:
+ /* Step 3: Process the leaf, if that fails fall back to backtracing */
+ hlist_for_each_entry_rcu(li, &n->list, hlist) {
struct fib_alias *fa;
- if (l->key != (key & li->mask_plen))
+ if ((key ^ n->key) & li->mask_plen)
continue;
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -1355,9 +1431,9 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
continue;
fib_alias_accessed(fa);
err = fib_props[fa->fa_type].error;
- if (err) {
+ if (unlikely(err < 0)) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.semantic_match_passed++;
+ this_cpu_inc(stats->semantic_match_passed);
#endif
return err;
}
@@ -1371,241 +1447,48 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
continue;
-#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.semantic_match_passed++;
-#endif
+ if (!(fib_flags & FIB_LOOKUP_NOREF))
+ atomic_inc(&fi->fib_clntref);
+
res->prefixlen = li->plen;
res->nh_sel = nhsel;
res->type = fa->fa_type;
- res->scope = fa->fa_info->fib_scope;
+ res->scope = fi->fib_scope;
res->fi = fi;
res->table = tb;
res->fa_head = &li->falh;
- if (!(fib_flags & FIB_LOOKUP_NOREF))
- atomic_inc(&fi->fib_clntref);
- return 0;
- }
- }
-
-#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.semantic_match_miss++;
-#endif
- }
-
- return 1;
-}
-
-int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
- struct fib_result *res, int fib_flags)
-{
- struct trie *t = (struct trie *) tb->tb_data;
- int ret;
- struct rt_trie_node *n;
- struct tnode *pn;
- unsigned int pos, bits;
- t_key key = ntohl(flp->daddr);
- unsigned int chopped_off;
- t_key cindex = 0;
- unsigned int current_prefix_length = KEYLENGTH;
- struct tnode *cn;
- t_key pref_mismatch;
-
- rcu_read_lock();
-
- n = rcu_dereference(t->trie);
- if (!n)
- goto failed;
-
#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.gets++;
+ this_cpu_inc(stats->semantic_match_passed);
#endif
-
- /* Just a leaf? */
- if (IS_LEAF(n)) {
- ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
- goto found;
- }
-
- pn = (struct tnode *) n;
- chopped_off = 0;
-
- while (pn) {
- pos = pn->pos;
- bits = pn->bits;
-
- if (!chopped_off)
- cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
- pos, bits);
-
- n = tnode_get_child_rcu(pn, cindex);
-
- if (n == NULL) {
-#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.null_node_hit++;
-#endif
- goto backtrace;
- }
-
- if (IS_LEAF(n)) {
- ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
- if (ret > 0)
- goto backtrace;
- goto found;
- }
-
- cn = (struct tnode *)n;
-
- /*
- * It's a tnode, and we can do some extra checks here if we
- * like, to avoid descending into a dead-end branch.
- * This tnode is in the parent's child array at index
- * key[p_pos..p_pos+p_bits] but potentially with some bits
- * chopped off, so in reality the index may be just a
- * subprefix, padded with zero at the end.
- * We can also take a look at any skipped bits in this
- * tnode - everything up to p_pos is supposed to be ok,
- * and the non-chopped bits of the index (se previous
- * paragraph) are also guaranteed ok, but the rest is
- * considered unknown.
- *
- * The skipped bits are key[pos+bits..cn->pos].
- */
-
- /* If current_prefix_length < pos+bits, we are already doing
- * actual prefix matching, which means everything from
- * pos+(bits-chopped_off) onward must be zero along some
- * branch of this subtree - otherwise there is *no* valid
- * prefix present. Here we can only check the skipped
- * bits. Remember, since we have already indexed into the
- * parent's child array, we know that the bits we chopped of
- * *are* zero.
- */
-
- /* NOTA BENE: Checking only skipped bits
- for the new node here */
-
- if (current_prefix_length < pos+bits) {
- if (tkey_extract_bits(cn->key, current_prefix_length,
- cn->pos - current_prefix_length)
- || !(cn->child[0]))
- goto backtrace;
- }
-
- /*
- * If chopped_off=0, the index is fully validated and we
- * only need to look at the skipped bits for this, the new,
- * tnode. What we actually want to do is to find out if
- * these skipped bits match our key perfectly, or if we will
- * have to count on finding a matching prefix further down,
- * because if we do, we would like to have some way of
- * verifying the existence of such a prefix at this point.
- */
-
- /* The only thing we can do at this point is to verify that
- * any such matching prefix can indeed be a prefix to our
- * key, and if the bits in the node we are inspecting that
- * do not match our key are not ZERO, this cannot be true.
- * Thus, find out where there is a mismatch (before cn->pos)
- * and verify that all the mismatching bits are zero in the
- * new tnode's key.
- */
-
- /*
- * Note: We aren't very concerned about the piece of
- * the key that precede pn->pos+pn->bits, since these
- * have already been checked. The bits after cn->pos
- * aren't checked since these are by definition
- * "unknown" at this point. Thus, what we want to see
- * is if we are about to enter the "prefix matching"
- * state, and in that case verify that the skipped
- * bits that will prevail throughout this subtree are
- * zero, as they have to be if we are to find a
- * matching prefix.
- */
-
- pref_mismatch = mask_pfx(cn->key ^ key, cn->pos);
-
- /*
- * In short: If skipped bits in this node do not match
- * the search key, enter the "prefix matching"
- * state.directly.
- */
- if (pref_mismatch) {
- /* fls(x) = __fls(x) + 1 */
- int mp = KEYLENGTH - __fls(pref_mismatch) - 1;
-
- if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
- goto backtrace;
-
- if (current_prefix_length >= cn->pos)
- current_prefix_length = mp;
+ return err;
+ }
}
- pn = (struct tnode *)n; /* Descend */
- chopped_off = 0;
- continue;
-
-backtrace:
- chopped_off++;
-
- /* As zero don't change the child key (cindex) */
- while ((chopped_off <= pn->bits)
- && !(cindex & (1<<(chopped_off-1))))
- chopped_off++;
-
- /* Decrease current_... with bits chopped off */
- if (current_prefix_length > pn->pos + pn->bits - chopped_off)
- current_prefix_length = pn->pos + pn->bits
- - chopped_off;
-
- /*
- * Either we do the actual chop off according or if we have
- * chopped off all bits in this tnode walk up to our parent.
- */
-
- if (chopped_off <= pn->bits) {
- cindex &= ~(1 << (chopped_off-1));
- } else {
- struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
- if (!parent)
- goto failed;
-
- /* Get Child's index */
- cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
- pn = parent;
- chopped_off = 0;
-
#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.backtrack++;
+ this_cpu_inc(stats->semantic_match_miss);
#endif
- goto backtrace;
- }
}
-failed:
- ret = 1;
-found:
- rcu_read_unlock();
- return ret;
+ goto backtrace;
}
EXPORT_SYMBOL_GPL(fib_table_lookup);
/*
* Remove the leaf and return parent.
*/
-static void trie_leaf_remove(struct trie *t, struct leaf *l)
+static void trie_leaf_remove(struct trie *t, struct tnode *l)
{
- struct tnode *tp = node_parent((struct rt_trie_node *) l);
+ struct tnode *tp = node_parent(l);
pr_debug("entering trie_leaf_remove(%p)\n", l);
if (tp) {
- t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
- put_child(tp, cindex, NULL);
+ put_child(tp, get_index(l->key, tp), NULL);
trie_rebalance(t, tp);
- } else
+ } else {
RCU_INIT_POINTER(t->trie, NULL);
+ }
- free_leaf(l);
+ node_free(l);
}
/*
@@ -1619,7 +1502,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
u8 tos = cfg->fc_tos;
struct fib_alias *fa, *fa_to_delete;
struct list_head *fa_head;
- struct leaf *l;
+ struct tnode *l;
struct leaf_info *li;
if (plen > 32)
@@ -1684,7 +1567,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
tb->tb_num_default--;
if (list_empty(fa_head)) {
- hlist_del_rcu(&li->hlist);
+ remove_leaf_info(l, li);
free_leaf_info(li);
}
@@ -1717,12 +1600,13 @@ static int trie_flush_list(struct list_head *head)
return found;
}
-static int trie_flush_leaf(struct leaf *l)
+static int trie_flush_leaf(struct tnode *l)
{
int found = 0;
struct hlist_head *lih = &l->list;
struct hlist_node *tmp;
struct leaf_info *li = NULL;
+ unsigned char plen = KEYLENGTH;
hlist_for_each_entry_safe(li, tmp, lih, hlist) {
found += trie_flush_list(&li->falh);
@@ -1730,8 +1614,14 @@ static int trie_flush_leaf(struct leaf *l)
if (list_empty(&li->falh)) {
hlist_del_rcu(&li->hlist);
free_leaf_info(li);
+ continue;
}
+
+ plen = li->plen;
}
+
+ l->slen = KEYLENGTH - plen;
+
return found;
}
@@ -1739,63 +1629,57 @@ static int trie_flush_leaf(struct leaf *l)
* Scan for the next right leaf starting at node p->child[idx]
* Since we have back pointer, no recursion necessary.
*/
-static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
+static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
{
do {
- t_key idx;
+ unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
- if (c)
- idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1;
- else
- idx = 0;
-
- while (idx < 1u << p->bits) {
+ while (idx < tnode_child_length(p)) {
c = tnode_get_child_rcu(p, idx++);
if (!c)
continue;
if (IS_LEAF(c))
- return (struct leaf *) c;
+ return c;
/* Rescan start scanning in new node */
- p = (struct tnode *) c;
+ p = c;
idx = 0;
}
/* Node empty, walk back up to parent */
- c = (struct rt_trie_node *) p;
+ c = p;
} while ((p = node_parent_rcu(c)) != NULL);
return NULL; /* Root of trie */
}
-static struct leaf *trie_firstleaf(struct trie *t)
+static struct tnode *trie_firstleaf(struct trie *t)
{
- struct tnode *n = (struct tnode *)rcu_dereference_rtnl(t->trie);
+ struct tnode *n = rcu_dereference_rtnl(t->trie);
if (!n)
return NULL;
if (IS_LEAF(n)) /* trie is just a leaf */
- return (struct leaf *) n;
+ return n;
return leaf_walk_rcu(n, NULL);
}
-static struct leaf *trie_nextleaf(struct leaf *l)
+static struct tnode *trie_nextleaf(struct tnode *l)
{
- struct rt_trie_node *c = (struct rt_trie_node *) l;
- struct tnode *p = node_parent_rcu(c);
+ struct tnode *p = node_parent_rcu(l);
if (!p)
return NULL; /* trie with just one leaf */
- return leaf_walk_rcu(p, c);
+ return leaf_walk_rcu(p, l);
}
-static struct leaf *trie_leafindex(struct trie *t, int index)
+static struct tnode *trie_leafindex(struct trie *t, int index)
{
- struct leaf *l = trie_firstleaf(t);
+ struct tnode *l = trie_firstleaf(t);
while (l && index-- > 0)
l = trie_nextleaf(l);
@@ -1810,19 +1694,28 @@ static struct leaf *trie_leafindex(struct trie *t, int index)
int fib_table_flush(struct fib_table *tb)
{
struct trie *t = (struct trie *) tb->tb_data;
- struct leaf *l, *ll = NULL;
+ struct tnode *l, *ll = NULL;
int found = 0;
for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
found += trie_flush_leaf(l);
- if (ll && hlist_empty(&ll->list))
- trie_leaf_remove(t, ll);
+ if (ll) {
+ if (hlist_empty(&ll->list))
+ trie_leaf_remove(t, ll);
+ else
+ leaf_pull_suffix(ll);
+ }
+
ll = l;
}
- if (ll && hlist_empty(&ll->list))
- trie_leaf_remove(t, ll);
+ if (ll) {
+ if (hlist_empty(&ll->list))
+ trie_leaf_remove(t, ll);
+ else
+ leaf_pull_suffix(ll);
+ }
pr_debug("trie_flush found=%d\n", found);
return found;
@@ -1830,6 +1723,11 @@ int fib_table_flush(struct fib_table *tb)
void fib_free_table(struct fib_table *tb)
{
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ struct trie *t = (struct trie *)tb->tb_data;
+
+ free_percpu(t->stats);
+#endif /* CONFIG_IP_FIB_TRIE_STATS */
kfree(tb);
}
@@ -1870,7 +1768,7 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
return skb->len;
}
-static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
+static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
struct sk_buff *skb, struct netlink_callback *cb)
{
struct leaf_info *li;
@@ -1906,7 +1804,7 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct leaf *l;
+ struct tnode *l;
struct trie *t = (struct trie *) tb->tb_data;
t_key key = cb->args[2];
int count = cb->args[3];
@@ -1952,7 +1850,7 @@ void __init fib_trie_init(void)
0, SLAB_PANIC, NULL);
trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
- max(sizeof(struct leaf),
+ max(sizeof(struct tnode),
sizeof(struct leaf_info)),
0, SLAB_PANIC, NULL);
}
@@ -1973,7 +1871,14 @@ struct fib_table *fib_trie_table(u32 id)
tb->tb_num_default = 0;
t = (struct trie *) tb->tb_data;
- memset(t, 0, sizeof(*t));
+ RCU_INIT_POINTER(t->trie, NULL);
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ t->stats = alloc_percpu(struct trie_use_stats);
+ if (!t->stats) {
+ kfree(tb);
+ tb = NULL;
+ }
+#endif
return tb;
}
@@ -1988,10 +1893,10 @@ struct fib_trie_iter {
unsigned int depth;
};
-static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
{
+ unsigned long cindex = iter->index;
struct tnode *tn = iter->tnode;
- unsigned int cindex = iter->index;
struct tnode *p;
/* A single entry routing table */
@@ -2001,8 +1906,8 @@ static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
iter->tnode, iter->index, iter->depth);
rescan:
- while (cindex < (1<<tn->bits)) {
- struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
+ while (cindex < tnode_child_length(tn)) {
+ struct tnode *n = tnode_get_child_rcu(tn, cindex);
if (n) {
if (IS_LEAF(n)) {
@@ -2010,7 +1915,7 @@ rescan:
iter->index = cindex + 1;
} else {
/* push down one level */
- iter->tnode = (struct tnode *) n;
+ iter->tnode = n;
iter->index = 0;
++iter->depth;
}
@@ -2021,9 +1926,9 @@ rescan:
}
/* Current node exhausted, pop back up */
- p = node_parent_rcu((struct rt_trie_node *)tn);
+ p = node_parent_rcu(tn);
if (p) {
- cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
+ cindex = get_index(tn->key, p) + 1;
tn = p;
--iter->depth;
goto rescan;
@@ -2033,10 +1938,10 @@ rescan:
return NULL;
}
-static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
+static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
struct trie *t)
{
- struct rt_trie_node *n;
+ struct tnode *n;
if (!t)
return NULL;
@@ -2046,7 +1951,7 @@ static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
return NULL;
if (IS_TNODE(n)) {
- iter->tnode = (struct tnode *) n;
+ iter->tnode = n;
iter->index = 0;
iter->depth = 1;
} else {
@@ -2060,7 +1965,7 @@ static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
static void trie_collect_stats(struct trie *t, struct trie_stat *s)
{
- struct rt_trie_node *n;
+ struct tnode *n;
struct fib_trie_iter iter;
memset(s, 0, sizeof(*s));
@@ -2068,7 +1973,6 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
rcu_read_lock();
for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
if (IS_LEAF(n)) {
- struct leaf *l = (struct leaf *)n;
struct leaf_info *li;
s->leaves++;
@@ -2076,19 +1980,13 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
if (iter.depth > s->maxdepth)
s->maxdepth = iter.depth;
- hlist_for_each_entry_rcu(li, &l->list, hlist)
+ hlist_for_each_entry_rcu(li, &n->list, hlist)
++s->prefixes;
} else {
- const struct tnode *tn = (const struct tnode *) n;
- int i;
-
s->tnodes++;
- if (tn->bits < MAX_STAT_DEPTH)
- s->nodesizes[tn->bits]++;
-
- for (i = 0; i < (1<<tn->bits); i++)
- if (!tn->child[i])
- s->nullpointers++;
+ if (n->bits < MAX_STAT_DEPTH)
+ s->nodesizes[n->bits]++;
+ s->nullpointers += n->empty_children;
}
}
rcu_read_unlock();
@@ -2111,7 +2009,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
- bytes = sizeof(struct leaf) * stat->leaves;
+ bytes = sizeof(struct tnode) * stat->leaves;
seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
bytes += sizeof(struct leaf_info) * stat->prefixes;
@@ -2132,25 +2030,38 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
seq_putc(seq, '\n');
seq_printf(seq, "\tPointers: %u\n", pointers);
- bytes += sizeof(struct rt_trie_node *) * pointers;
+ bytes += sizeof(struct tnode *) * pointers;
seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
static void trie_show_usage(struct seq_file *seq,
- const struct trie_use_stats *stats)
+ const struct trie_use_stats __percpu *stats)
{
+ struct trie_use_stats s = { 0 };
+ int cpu;
+
+ /* loop through all of the CPUs and gather up the stats */
+ for_each_possible_cpu(cpu) {
+ const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
+
+ s.gets += pcpu->gets;
+ s.backtrack += pcpu->backtrack;
+ s.semantic_match_passed += pcpu->semantic_match_passed;
+ s.semantic_match_miss += pcpu->semantic_match_miss;
+ s.null_node_hit += pcpu->null_node_hit;
+ s.resize_node_skipped += pcpu->resize_node_skipped;
+ }
+
seq_printf(seq, "\nCounters:\n---------\n");
- seq_printf(seq, "gets = %u\n", stats->gets);
- seq_printf(seq, "backtracks = %u\n", stats->backtrack);
+ seq_printf(seq, "gets = %u\n", s.gets);
+ seq_printf(seq, "backtracks = %u\n", s.backtrack);
seq_printf(seq, "semantic match passed = %u\n",
- stats->semantic_match_passed);
- seq_printf(seq, "semantic match miss = %u\n",
- stats->semantic_match_miss);
- seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
- seq_printf(seq, "skipped node resize = %u\n\n",
- stats->resize_node_skipped);
+ s.semantic_match_passed);
+ seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
+ seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
+ seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
}
#endif /* CONFIG_IP_FIB_TRIE_STATS */
@@ -2173,7 +2084,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"Basic info: size of leaf:"
" %Zd bytes, size of tnode: %Zd bytes.\n",
- sizeof(struct leaf), sizeof(struct tnode));
+ sizeof(struct tnode), sizeof(struct tnode));
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
@@ -2191,7 +2102,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
trie_collect_stats(t, &stat);
trie_show_stats(seq, &stat);
#ifdef CONFIG_IP_FIB_TRIE_STATS
- trie_show_usage(seq, &t->stats);
+ trie_show_usage(seq, t->stats);
#endif
}
}
@@ -2212,7 +2123,7 @@ static const struct file_operations fib_triestat_fops = {
.release = single_release_net,
};
-static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
@@ -2224,7 +2135,7 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
- struct rt_trie_node *n;
+ struct tnode *n;
for (n = fib_trie_get_first(iter,
(struct trie *) tb->tb_data);
@@ -2253,7 +2164,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct fib_table *tb = iter->tb;
struct hlist_node *tb_node;
unsigned int h;
- struct rt_trie_node *n;
+ struct tnode *n;
++*pos;
/* next node in same table */
@@ -2339,29 +2250,26 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
static int fib_trie_seq_show(struct seq_file *seq, void *v)
{
const struct fib_trie_iter *iter = seq->private;
- struct rt_trie_node *n = v;
+ struct tnode *n = v;
if (!node_parent_rcu(n))
fib_table_print(seq, iter->tb);
if (IS_TNODE(n)) {
- struct tnode *tn = (struct tnode *) n;
- __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
+ __be32 prf = htonl(n->key);
seq_indent(seq, iter->depth-1);
- seq_printf(seq, " +-- %pI4/%d %d %d %d\n",
- &prf, tn->pos, tn->bits, tn->full_children,
- tn->empty_children);
-
+ seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
+ &prf, KEYLENGTH - n->pos - n->bits, n->bits,
+ n->full_children, n->empty_children);
} else {
- struct leaf *l = (struct leaf *) n;
struct leaf_info *li;
- __be32 val = htonl(l->key);
+ __be32 val = htonl(n->key);
seq_indent(seq, iter->depth);
seq_printf(seq, " |-- %pI4\n", &val);
- hlist_for_each_entry_rcu(li, &l->list, hlist) {
+ hlist_for_each_entry_rcu(li, &n->list, hlist) {
struct fib_alias *fa;
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2411,9 +2319,9 @@ struct fib_route_iter {
t_key key;
};
-static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
+static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
{
- struct leaf *l = NULL;
+ struct tnode *l = NULL;
struct trie *t = iter->main_trie;
/* use cache location of last found key */
@@ -2458,7 +2366,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct fib_route_iter *iter = seq->private;
- struct leaf *l = v;
+ struct tnode *l = v;
++*pos;
if (v == SEQ_START_TOKEN) {
@@ -2504,7 +2412,7 @@ static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info
*/
static int fib_route_seq_show(struct seq_file *seq, void *v)
{
- struct leaf *l = v;
+ struct tnode *l = v;
struct leaf_info *li;
if (v == SEQ_START_TOKEN) {
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index b986298a7ba3..92ddea1e6457 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -70,7 +70,6 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
size_t start = ntohs(pd[0]);
size_t offset = ntohs(pd[1]);
size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
- __wsum delta;
if (skb->remcsum_offload) {
/* Already processed in GRO path */
@@ -82,14 +81,7 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
return NULL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
- if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
- __skb_checksum_complete(skb);
-
- delta = remcsum_adjust((void *)guehdr + hdrlen,
- skb->csum, start, offset);
-
- /* Adjust skb->csum since we changed the packet */
- skb->csum = csum_add(skb->csum, delta);
+ skb_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
return guehdr;
}
@@ -174,7 +166,8 @@ drop:
}
static struct sk_buff **fou_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct udp_offload *uoff)
{
const struct net_offload *ops;
struct sk_buff **pp = NULL;
@@ -195,7 +188,8 @@ out_unlock:
return pp;
}
-static int fou_gro_complete(struct sk_buff *skb, int nhoff)
+static int fou_gro_complete(struct sk_buff *skb, int nhoff,
+ struct udp_offload *uoff)
{
const struct net_offload *ops;
u8 proto = NAPI_GRO_CB(skb)->proto;
@@ -226,7 +220,6 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
size_t start = ntohs(pd[0]);
size_t offset = ntohs(pd[1]);
size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
- __wsum delta;
if (skb->remcsum_offload)
return guehdr;
@@ -241,12 +234,7 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
return NULL;
}
- delta = remcsum_adjust((void *)guehdr + hdrlen,
- NAPI_GRO_CB(skb)->csum, start, offset);
-
- /* Adjust skb->csum since we changed the packet */
- skb->csum = csum_add(skb->csum, delta);
- NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+ skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
skb->remcsum_offload = 1;
@@ -254,7 +242,8 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
}
static struct sk_buff **gue_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct udp_offload *uoff)
{
const struct net_offload **offloads;
const struct net_offload *ops;
@@ -360,7 +349,8 @@ out:
return pp;
}
-static int gue_gro_complete(struct sk_buff *skb, int nhoff)
+static int gue_gro_complete(struct sk_buff *skb, int nhoff,
+ struct udp_offload *uoff)
{
const struct net_offload **offloads;
struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
@@ -490,7 +480,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
sk->sk_user_data = fou;
fou->sock = sock;
- udp_set_convert_csum(sk, true);
+ inet_inc_convert_csum(sk);
sk->sk_allocation = GFP_ATOMIC;
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 394a200f93c1..5a4828ba05ad 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -17,7 +17,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
-#include <linux/rculist.h>
+#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -26,8 +26,8 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
-#include <linux/hash.h>
#include <linux/ethtool.h>
+#include <linux/mutex.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
@@ -50,38 +50,30 @@
#include <net/ip6_checksum.h>
#endif
-#define PORT_HASH_BITS 8
-#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
+/* Protects sock_list and refcounts. */
+static DEFINE_MUTEX(geneve_mutex);
/* per-network namespace private data for this module */
struct geneve_net {
- struct hlist_head sock_list[PORT_HASH_SIZE];
- spinlock_t sock_lock; /* Protects sock_list */
+ struct list_head sock_list;
};
static int geneve_net_id;
-static struct workqueue_struct *geneve_wq;
-
static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
{
return (struct genevehdr *)(udp_hdr(skb) + 1);
}
-static struct hlist_head *gs_head(struct net *net, __be16 port)
+static struct geneve_sock *geneve_find_sock(struct net *net,
+ sa_family_t family, __be16 port)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
-
- return &gn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
-}
-
-/* Find geneve socket based on network namespace and UDP port */
-static struct geneve_sock *geneve_find_sock(struct net *net, __be16 port)
-{
struct geneve_sock *gs;
- hlist_for_each_entry_rcu(gs, gs_head(net, port), hlist) {
- if (inet_sk(gs->sock->sk)->inet_sport == port)
+ list_for_each_entry(gs, &gn->sock_list, list) {
+ if (inet_sk(gs->sock->sk)->inet_sport == port &&
+ inet_sk(gs->sock->sk)->sk.sk_family == family)
return gs;
}
@@ -115,19 +107,19 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
__u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
__be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool xnet)
+ bool csum, bool xnet)
{
struct genevehdr *gnvh;
int min_headroom;
int err;
- skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
+ skb = udp_tunnel_handle_offloads(skb, csum);
if (IS_ERR(skb))
return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err)) {
@@ -144,11 +136,107 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
- tos, ttl, df, src_port, dst_port, xnet);
+ return udp_tunnel_xmit_skb(rt, skb, src, dst,
+ tos, ttl, df, src_port, dst_port, xnet,
+ !csum);
}
EXPORT_SYMBOL_GPL(geneve_xmit_skb);
+static int geneve_hlen(struct genevehdr *gh)
+{
+ return sizeof(*gh) + gh->opt_len * 4;
+}
+
+static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb,
+ struct udp_offload *uoff)
+{
+ struct sk_buff *p, **pp = NULL;
+ struct genevehdr *gh, *gh2;
+ unsigned int hlen, gh_len, off_gnv;
+ const struct packet_offload *ptype;
+ __be16 type;
+ int flush = 1;
+
+ off_gnv = skb_gro_offset(skb);
+ hlen = off_gnv + sizeof(*gh);
+ gh = skb_gro_header_fast(skb, off_gnv);
+ if (skb_gro_header_hard(skb, hlen)) {
+ gh = skb_gro_header_slow(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
+ }
+
+ if (gh->ver != GENEVE_VER || gh->oam)
+ goto out;
+ gh_len = geneve_hlen(gh);
+
+ hlen = off_gnv + gh_len;
+ if (skb_gro_header_hard(skb, hlen)) {
+ gh = skb_gro_header_slow(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
+ }
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ gh2 = (struct genevehdr *)(p->data + off_gnv);
+ if (gh->opt_len != gh2->opt_len ||
+ memcmp(gh, gh2, gh_len)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+
+ type = gh->proto_type;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (ptype == NULL) {
+ flush = 1;
+ goto out_unlock;
+ }
+
+ skb_gro_pull(skb, gh_len);
+ skb_gro_postpull_rcsum(skb, gh, gh_len);
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
+ struct udp_offload *uoff)
+{
+ struct genevehdr *gh;
+ struct packet_offload *ptype;
+ __be16 type;
+ int gh_len;
+ int err = -ENOSYS;
+
+ udp_tunnel_gro_complete(skb, nhoff);
+
+ gh = (struct genevehdr *)(skb->data + nhoff);
+ gh_len = geneve_hlen(gh);
+ type = gh->proto_type;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype != NULL)
+ err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
+
+ rcu_read_unlock();
+ return err;
+}
+
static void geneve_notify_add_rx_port(struct geneve_sock *gs)
{
struct sock *sk = gs->sock->sk;
@@ -214,15 +302,6 @@ error:
return 1;
}
-static void geneve_del_work(struct work_struct *work)
-{
- struct geneve_sock *gs = container_of(work, struct geneve_sock,
- del_work);
-
- udp_tunnel_sock_release(gs->sock);
- kfree_rcu(gs, rcu);
-}
-
static struct socket *geneve_create_sock(struct net *net, bool ipv6,
__be16 port)
{
@@ -263,8 +342,6 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
if (!gs)
return ERR_PTR(-ENOMEM);
- INIT_WORK(&gs->del_work, geneve_del_work);
-
sock = geneve_create_sock(net, ipv6, port);
if (IS_ERR(sock)) {
kfree(gs);
@@ -272,19 +349,15 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
}
gs->sock = sock;
- atomic_set(&gs->refcnt, 1);
+ gs->refcnt = 1;
gs->rcv = rcv;
gs->rcv_data = data;
/* Initialize the geneve udp offloads structure */
gs->udp_offloads.port = port;
- gs->udp_offloads.callbacks.gro_receive = NULL;
- gs->udp_offloads.callbacks.gro_complete = NULL;
-
- spin_lock(&gn->sock_lock);
- hlist_add_head_rcu(&gs->hlist, gs_head(net, port));
+ gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
+ gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
geneve_notify_add_rx_port(gs);
- spin_unlock(&gn->sock_lock);
/* Mark socket as an encapsulation socket */
tunnel_cfg.sk_user_data = gs;
@@ -293,6 +366,8 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
+ list_add(&gs->list, &gn->sock_list);
+
return gs;
}
@@ -300,25 +375,21 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
geneve_rcv_t *rcv, void *data,
bool no_share, bool ipv6)
{
- struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
- gs = geneve_socket_create(net, port, rcv, data, ipv6);
- if (!IS_ERR(gs))
- return gs;
-
- if (no_share) /* Return error if sharing is not allowed. */
- return ERR_PTR(-EINVAL);
+ mutex_lock(&geneve_mutex);
- spin_lock(&gn->sock_lock);
- gs = geneve_find_sock(net, port);
- if (gs && ((gs->rcv != rcv) ||
- !atomic_add_unless(&gs->refcnt, 1, 0)))
+ gs = geneve_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+ if (gs) {
+ if (!no_share && gs->rcv == rcv)
+ gs->refcnt++;
+ else
gs = ERR_PTR(-EBUSY);
- spin_unlock(&gn->sock_lock);
+ } else {
+ gs = geneve_socket_create(net, port, rcv, data, ipv6);
+ }
- if (!gs)
- gs = ERR_PTR(-EINVAL);
+ mutex_unlock(&geneve_mutex);
return gs;
}
@@ -326,37 +397,32 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
void geneve_sock_release(struct geneve_sock *gs)
{
- struct net *net = sock_net(gs->sock->sk);
- struct geneve_net *gn = net_generic(net, geneve_net_id);
+ mutex_lock(&geneve_mutex);
- if (!atomic_dec_and_test(&gs->refcnt))
- return;
+ if (--gs->refcnt)
+ goto unlock;
- spin_lock(&gn->sock_lock);
- hlist_del_rcu(&gs->hlist);
+ list_del(&gs->list);
geneve_notify_del_rx_port(gs);
- spin_unlock(&gn->sock_lock);
+ udp_tunnel_sock_release(gs->sock);
+ kfree_rcu(gs, rcu);
- queue_work(geneve_wq, &gs->del_work);
+unlock:
+ mutex_unlock(&geneve_mutex);
}
EXPORT_SYMBOL_GPL(geneve_sock_release);
static __net_init int geneve_init_net(struct net *net)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
- unsigned int h;
- spin_lock_init(&gn->sock_lock);
-
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- INIT_HLIST_HEAD(&gn->sock_list[h]);
+ INIT_LIST_HEAD(&gn->sock_list);
return 0;
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit = NULL,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};
@@ -365,10 +431,6 @@ static int __init geneve_init_module(void)
{
int rc;
- geneve_wq = alloc_workqueue("geneve", 0, 0);
- if (!geneve_wq)
- return -ENOMEM;
-
rc = register_pernet_subsys(&geneve_net_ops);
if (rc)
return rc;
@@ -377,11 +439,10 @@ static int __init geneve_init_module(void)
return 0;
}
-late_initcall(geneve_init_module);
+module_init(geneve_init_module);
static void __exit geneve_cleanup_module(void)
{
- destroy_workqueue(geneve_wq);
unregister_pernet_subsys(&geneve_net_ops);
}
module_exit(geneve_cleanup_module);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 36f5584d93c5..5e564014a0b7 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -205,7 +205,7 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
*/
static struct sock *icmp_sk(struct net *net)
{
- return net->ipv4.icmp_sk[smp_processor_id()];
+ return *this_cpu_ptr(net->ipv4.icmp_sk);
}
static inline struct sock *icmp_xmit_lock(struct net *net)
@@ -1140,8 +1140,8 @@ static void __net_exit icmp_sk_exit(struct net *net)
int i;
for_each_possible_cpu(i)
- inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
- kfree(net->ipv4.icmp_sk);
+ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
+ free_percpu(net->ipv4.icmp_sk);
net->ipv4.icmp_sk = NULL;
}
@@ -1149,9 +1149,8 @@ static int __net_init icmp_sk_init(struct net *net)
{
int i, err;
- net->ipv4.icmp_sk =
- kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
- if (net->ipv4.icmp_sk == NULL)
+ net->ipv4.icmp_sk = alloc_percpu(struct sock *);
+ if (!net->ipv4.icmp_sk)
return -ENOMEM;
for_each_possible_cpu(i) {
@@ -1162,7 +1161,7 @@ static int __net_init icmp_sk_init(struct net *net)
if (err < 0)
goto fail;
- net->ipv4.icmp_sk[i] = sk;
+ *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk;
/* Enough space for 2 64K ICMP packets, including
* sk_buff/skb_shared_info struct overhead.
@@ -1203,8 +1202,8 @@ static int __net_init icmp_sk_init(struct net *net)
fail:
for_each_possible_cpu(i)
- inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
- kfree(net->ipv4.icmp_sk);
+ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
+ free_percpu(net->ipv4.icmp_sk);
return err;
}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index e34dccbc4d70..81751f12645f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -203,7 +203,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
icsk->icsk_ca_ops->get_info(sk, ext, skb);
out:
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
errout:
nlmsg_cancel(skb, nlh);
@@ -271,7 +272,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
}
#endif
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -758,7 +760,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
}
#endif
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3a83ce5efa80..787b3c294ce6 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
* We now generate an ICMP HOST REDIRECT giving the route
* we calculated.
*/
- if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
+ if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
+ !skb_sec_path(skb))
ip_rt_send_redirect(skb);
skb->priority = rt_tos2priority(iph->tos);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 4f4bf5b99686..6207275fc749 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -659,12 +659,12 @@ static bool ipgre_netlink_encap_parms(struct nlattr *data[],
if (data[IFLA_GRE_ENCAP_SPORT]) {
ret = true;
- ipencap->sport = nla_get_u16(data[IFLA_GRE_ENCAP_SPORT]);
+ ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
}
if (data[IFLA_GRE_ENCAP_DPORT]) {
ret = true;
- ipencap->dport = nla_get_u16(data[IFLA_GRE_ENCAP_DPORT]);
+ ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
}
return ret;
@@ -673,6 +673,7 @@ static bool ipgre_netlink_encap_parms(struct nlattr *data[],
static int gre_tap_init(struct net_device *dev)
{
__gre_tunnel_init(dev);
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
return ip_tunnel_init(dev);
}
@@ -785,10 +786,10 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
t->encap.type) ||
- nla_put_u16(skb, IFLA_GRE_ENCAP_SPORT,
- t->encap.sport) ||
- nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT,
- t->encap.dport) ||
+ nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
+ t->encap.sport) ||
+ nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
+ t->encap.dport) ||
nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
t->encap.flags))
goto nla_put_failure;
@@ -828,6 +829,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
.dellink = ip_tunnel_dellink,
.get_size = ipgre_get_size,
.fill_info = ipgre_fill_info,
+ .get_link_net = ip_tunnel_get_link_net,
};
static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
@@ -842,6 +844,7 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
.dellink = ip_tunnel_dellink,
.get_size = ipgre_get_size,
.fill_info = ipgre_fill_info,
+ .get_link_net = ip_tunnel_get_link_net,
};
static int __net_init ipgre_tap_init_net(struct net *net)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b50861b22b6b..d68199d9b2b0 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -755,13 +755,11 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk
struct msghdr *msg = from;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- /* XXX: stripping const */
- if (memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len) < 0)
+ if (copy_from_iter(to, len, &msg->msg_iter) != len)
return -EFAULT;
} else {
__wsum csum = 0;
- /* XXX: stripping const */
- if (csum_partial_copy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len, &csum) < 0)
+ if (csum_and_copy_from_iter(to, len, &csum, &msg->msg_iter) != len)
return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, odd);
}
@@ -1506,23 +1504,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
/*
* Generic function to send a packet as reply to another packet.
* Used to send some TCP resets/acks so far.
- *
- * Use a fake percpu inet socket to avoid false sharing and contention.
*/
-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
- .sk = {
- .__sk_common = {
- .skc_refcnt = ATOMIC_INIT(1),
- },
- .sk_wmem_alloc = ATOMIC_INIT(1),
- .sk_allocation = GFP_ATOMIC,
- .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
- },
- .pmtudisc = IP_PMTUDISC_WANT,
- .uc_ttl = -1,
-};
-
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
@@ -1532,9 +1515,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
struct ipcm_cookie ipc;
struct flowi4 fl4;
struct rtable *rt = skb_rtable(skb);
+ struct net *net = sock_net(sk);
struct sk_buff *nskb;
- struct sock *sk;
- struct inet_sock *inet;
int err;
if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
@@ -1565,15 +1547,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
if (IS_ERR(rt))
return;
- inet = &get_cpu_var(unicast_sock);
+ inet_sk(sk)->tos = arg->tos;
- inet->tos = arg->tos;
- sk = &inet->sk;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
- sock_net_set(sk, net);
- __skb_queue_head_init(&sk->sk_write_queue);
sk->sk_sndbuf = sysctl_wmem_default;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1589,13 +1567,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
- skb_orphan(nskb);
skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
ip_push_pending_frames(sk, &fl4);
}
out:
- put_cpu_var(unicast_sock);
-
ip_rt_put(rt);
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8a89c738b7a3..31d8c71986b4 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -37,6 +37,7 @@
#include <net/route.h>
#include <net/xfrm.h>
#include <net/compat.h>
+#include <net/checksum.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/transp_v6.h>
#endif
@@ -45,14 +46,6 @@
#include <linux/errqueue.h>
#include <asm/uaccess.h>
-#define IP_CMSG_PKTINFO 1
-#define IP_CMSG_TTL 2
-#define IP_CMSG_TOS 4
-#define IP_CMSG_RECVOPTS 8
-#define IP_CMSG_RETOPTS 16
-#define IP_CMSG_PASSSEC 32
-#define IP_CMSG_ORIGDSTADDR 64
-
/*
* SOL_IP control messages.
*/
@@ -104,6 +97,20 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
}
+static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
+ int offset)
+{
+ __wsum csum = skb->csum;
+
+ if (skb->ip_summed != CHECKSUM_COMPLETE)
+ return;
+
+ if (offset != 0)
+ csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
+
+ put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
+}
+
static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
{
char *secdata;
@@ -144,47 +151,73 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
}
-void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
+ int offset)
{
struct inet_sock *inet = inet_sk(skb->sk);
unsigned int flags = inet->cmsg_flags;
/* Ordered by supposed usage frequency */
- if (flags & 1)
+ if (flags & IP_CMSG_PKTINFO) {
ip_cmsg_recv_pktinfo(msg, skb);
- if ((flags >>= 1) == 0)
- return;
- if (flags & 1)
+ flags &= ~IP_CMSG_PKTINFO;
+ if (!flags)
+ return;
+ }
+
+ if (flags & IP_CMSG_TTL) {
ip_cmsg_recv_ttl(msg, skb);
- if ((flags >>= 1) == 0)
- return;
- if (flags & 1)
+ flags &= ~IP_CMSG_TTL;
+ if (!flags)
+ return;
+ }
+
+ if (flags & IP_CMSG_TOS) {
ip_cmsg_recv_tos(msg, skb);
- if ((flags >>= 1) == 0)
- return;
- if (flags & 1)
+ flags &= ~IP_CMSG_TOS;
+ if (!flags)
+ return;
+ }
+
+ if (flags & IP_CMSG_RECVOPTS) {
ip_cmsg_recv_opts(msg, skb);
- if ((flags >>= 1) == 0)
- return;
- if (flags & 1)
+ flags &= ~IP_CMSG_RECVOPTS;
+ if (!flags)
+ return;
+ }
+
+ if (flags & IP_CMSG_RETOPTS) {
ip_cmsg_recv_retopts(msg, skb);
- if ((flags >>= 1) == 0)
- return;
- if (flags & 1)
+ flags &= ~IP_CMSG_RETOPTS;
+ if (!flags)
+ return;
+ }
+
+ if (flags & IP_CMSG_PASSSEC) {
ip_cmsg_recv_security(msg, skb);
- if ((flags >>= 1) == 0)
- return;
- if (flags & 1)
+ flags &= ~IP_CMSG_PASSSEC;
+ if (!flags)
+ return;
+ }
+
+ if (flags & IP_CMSG_ORIGDSTADDR) {
ip_cmsg_recv_dstaddr(msg, skb);
+ flags &= ~IP_CMSG_ORIGDSTADDR;
+ if (!flags)
+ return;
+ }
+
+ if (flags & IP_CMSG_CHECKSUM)
+ ip_cmsg_recv_checksum(msg, skb, offset);
}
-EXPORT_SYMBOL(ip_cmsg_recv);
+EXPORT_SYMBOL(ip_cmsg_recv_offset);
int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
bool allow_ipv6)
@@ -450,7 +483,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
serr = SKB_EXT_ERR(skb);
- if (sin) {
+ if (sin && skb->len) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
serr->addr_offset);
@@ -461,17 +494,14 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
- sin->sin_family = AF_UNSPEC;
-
- if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
- ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) {
- struct inet_sock *inet = inet_sk(sk);
+ memset(sin, 0, sizeof(*sin));
+ if (skb->len &&
+ (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+ ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- sin->sin_port = 0;
- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
- if (inet->cmsg_flags)
+ if (inet_sk(sk)->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
@@ -522,6 +552,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
case IP_MULTICAST_ALL:
case IP_MULTICAST_LOOP:
case IP_RECVORIGDSTADDR:
+ case IP_CHECKSUM:
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
@@ -619,6 +650,19 @@ static int do_ip_setsockopt(struct sock *sk, int level,
else
inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
break;
+ case IP_CHECKSUM:
+ if (val) {
+ if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
+ inet_inc_convert_csum(sk);
+ inet->cmsg_flags |= IP_CMSG_CHECKSUM;
+ }
+ } else {
+ if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
+ inet_dec_convert_csum(sk);
+ inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
+ }
+ }
+ break;
case IP_TOS: /* This sets both TOS and Precedence */
if (sk->sk_type == SOCK_STREAM) {
val &= ~INET_ECN_MASK;
@@ -1222,6 +1266,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_RECVORIGDSTADDR:
val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
break;
+ case IP_CHECKSUM:
+ val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
+ break;
case IP_TOS:
val = inet->tos;
break;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index d3e447936720..2cd08280c77b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -972,6 +972,14 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
}
EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
+struct net *ip_tunnel_get_link_net(const struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ return tunnel->net;
+}
+EXPORT_SYMBOL(ip_tunnel_get_link_net);
+
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname)
{
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 1a7e979e80ba..94efe148181c 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -531,6 +531,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
.dellink = ip_tunnel_dellink,
.get_size = vti_get_size,
.fill_info = vti_fill_info,
+ .get_link_net = ip_tunnel_get_link_net,
};
static int __init vti_init(void)
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 7fa18bc7e47f..b26376ef87f6 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -209,9 +209,9 @@ static int __init ic_open_devs(void)
last = &ic_first_dev;
rtnl_lock();
- /* bring loopback device up first */
+ /* bring loopback and DSA master network devices up first */
for_each_netdev(&init_net, dev) {
- if (!(dev->flags & IFF_LOOPBACK))
+ if (!(dev->flags & IFF_LOOPBACK) && !netdev_uses_dsa(dev))
continue;
if (dev_change_flags(dev, dev->flags | IFF_UP) < 0)
pr_err("IP-Config: Failed to open %s\n", dev->name);
@@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
while ((d = next)) {
next = d->next;
dev = d->dev;
- if (dev != ic_dev) {
+ if (dev != ic_dev && !netdev_uses_dsa(dev)) {
DBG(("IP-Config: Downing %s\n", dev->name));
dev_change_flags(dev, d->flags);
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 40403114f00a..915d215a7d14 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -366,12 +366,12 @@ static bool ipip_netlink_encap_parms(struct nlattr *data[],
if (data[IFLA_IPTUN_ENCAP_SPORT]) {
ret = true;
- ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]);
+ ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
}
if (data[IFLA_IPTUN_ENCAP_DPORT]) {
ret = true;
- ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]);
+ ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
}
return ret;
@@ -460,10 +460,10 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
tunnel->encap.type) ||
- nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT,
- tunnel->encap.sport) ||
- nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
- tunnel->encap.dport) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
+ tunnel->encap.sport) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
+ tunnel->encap.dport) ||
nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
tunnel->encap.flags))
goto nla_put_failure;
@@ -498,6 +498,7 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly = {
.dellink = ip_tunnel_dellink,
.get_size = ipip_get_size,
.fill_info = ipip_fill_info,
+ .get_link_net = ip_tunnel_get_link_net,
};
static struct xfrm_tunnel ipip_handler __read_mostly = {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c8034587859d..9d78427652d2 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2290,7 +2290,8 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
if (err < 0 && err != -ENOENT)
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index ff2d23d8c87a..6ecfce63201a 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) {
- mr.range[0].min.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- mr.range[0].max.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ mr.range[0].min.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ mr.range[0].max.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c0d82f78d364..e9f66e1cda50 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -599,18 +599,18 @@ int ping_getfrag(void *from, char *to,
struct pingfakehdr *pfh = (struct pingfakehdr *)from;
if (offset == 0) {
- if (fraglen < sizeof(struct icmphdr))
+ fraglen -= sizeof(struct icmphdr);
+ if (fraglen < 0)
BUG();
- if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr),
- pfh->iov, 0, fraglen - sizeof(struct icmphdr),
- &pfh->wcheck))
+ if (csum_and_copy_from_iter(to + sizeof(struct icmphdr),
+ fraglen, &pfh->wcheck,
+ &pfh->msg->msg_iter) != fraglen)
return -EFAULT;
} else if (offset < sizeof(struct icmphdr)) {
BUG();
} else {
- if (csum_partial_copy_fromiovecend
- (to, pfh->iov, offset - sizeof(struct icmphdr),
- fraglen, &pfh->wcheck))
+ if (csum_and_copy_from_iter(to, fraglen, &pfh->wcheck,
+ &pfh->msg->msg_iter) != fraglen)
return -EFAULT;
}
@@ -811,8 +811,7 @@ back_from_confirm:
pfh.icmph.checksum = 0;
pfh.icmph.un.echo.id = inet->inet_sport;
pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
- /* XXX: stripping const */
- pfh.iov = (struct iovec *)msg->msg_iter.iov;
+ pfh.msg = msg;
pfh.wcheck = 0;
pfh.family = AF_INET;
@@ -966,8 +965,11 @@ bool ping_rcv(struct sk_buff *skb)
sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
if (sk != NULL) {
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
pr_debug("rcv on socket %p\n", sk);
- ping_queue_rcv_skb(sk, skb_get(skb));
+ if (skb2)
+ ping_queue_rcv_skb(sk, skb2);
sock_put(sk);
return true;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8f9cd200ce20..d8953ef0770c 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -292,6 +292,12 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND),
SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT),
SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND),
+ SNMP_MIB_ITEM("TCPACKSkippedSynRecv", LINUX_MIB_TCPACKSKIPPEDSYNRECV),
+ SNMP_MIB_ITEM("TCPACKSkippedPAWS", LINUX_MIB_TCPACKSKIPPEDPAWS),
+ SNMP_MIB_ITEM("TCPACKSkippedSeq", LINUX_MIB_TCPACKSKIPPEDSEQ),
+ SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
+ SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
+ SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 0bb68df5055d..f027a708b7e0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -337,7 +337,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
}
static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
- void *from, size_t length,
+ struct msghdr *msg, size_t length,
struct rtable **rtp,
unsigned int flags)
{
@@ -382,7 +382,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->transport_header = skb->network_header;
err = -EFAULT;
- if (memcpy_fromiovecend((void *)iph, from, 0, length))
+ if (memcpy_from_msg(iph, msg, length))
goto error_free;
iphlen = iph->ihl * 4;
@@ -625,8 +625,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
back_from_confirm:
if (inet->hdrincl)
- /* XXX: stripping const */
- err = raw_send_hdrinc(sk, &fl4, (struct iovec *)msg->msg_iter.iov, len,
+ err = raw_send_hdrinc(sk, &fl4, msg, len,
&rt, msg->msg_flags);
else {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6a2155b02602..ad5064362c5c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (dst->dev->mtu < mtu)
return;
+ if (rt->rt_pmtu && rt->rt_pmtu < mtu)
+ return;
+
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
@@ -1325,14 +1328,22 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
return ret;
}
-static DEFINE_SPINLOCK(rt_uncached_lock);
-static LIST_HEAD(rt_uncached_list);
+struct uncached_list {
+ spinlock_t lock;
+ struct list_head head;
+};
+
+static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
static void rt_add_uncached_list(struct rtable *rt)
{
- spin_lock_bh(&rt_uncached_lock);
- list_add_tail(&rt->rt_uncached, &rt_uncached_list);
- spin_unlock_bh(&rt_uncached_lock);
+ struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
+
+ rt->rt_uncached_list = ul;
+
+ spin_lock_bh(&ul->lock);
+ list_add_tail(&rt->rt_uncached, &ul->head);
+ spin_unlock_bh(&ul->lock);
}
static void ipv4_dst_destroy(struct dst_entry *dst)
@@ -1340,27 +1351,32 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
struct rtable *rt = (struct rtable *) dst;
if (!list_empty(&rt->rt_uncached)) {
- spin_lock_bh(&rt_uncached_lock);
+ struct uncached_list *ul = rt->rt_uncached_list;
+
+ spin_lock_bh(&ul->lock);
list_del(&rt->rt_uncached);
- spin_unlock_bh(&rt_uncached_lock);
+ spin_unlock_bh(&ul->lock);
}
}
void rt_flush_dev(struct net_device *dev)
{
- if (!list_empty(&rt_uncached_list)) {
- struct net *net = dev_net(dev);
- struct rtable *rt;
+ struct net *net = dev_net(dev);
+ struct rtable *rt;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
- spin_lock_bh(&rt_uncached_lock);
- list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
+ spin_lock_bh(&ul->lock);
+ list_for_each_entry(rt, &ul->head, rt_uncached) {
if (rt->dst.dev != dev)
continue;
rt->dst.dev = net->loopback_dev;
dev_hold(rt->dst.dev);
dev_put(dev);
}
- spin_unlock_bh(&rt_uncached_lock);
+ spin_unlock_bh(&ul->lock);
}
}
@@ -1554,11 +1570,10 @@ static int __mkroute_input(struct sk_buff *skb,
do_cache = res->fi && !itag;
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
+ skb->protocol == htons(ETH_P_IP) &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
- inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
- flags |= RTCF_DOREDIRECT;
- do_cache = false;
- }
+ inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
+ IPCB(skb)->flags |= IPSKB_DOREDIRECT;
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
@@ -2303,6 +2318,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
+ if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
+ r->rtm_flags |= RTCF_DOREDIRECT;
if (nla_put_be32(skb, RTA_DST, dst))
goto nla_put_failure;
@@ -2377,7 +2394,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -2469,7 +2487,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
err = rt_fill_info(net, dst, src, &fl4, skb,
NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
RTM_NEWROUTE, 0, 0);
- if (err <= 0)
+ if (err < 0)
goto errout_free;
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
@@ -2717,6 +2735,7 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
int __init ip_rt_init(void)
{
int rc = 0;
+ int cpu;
ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
if (!ip_idents)
@@ -2724,6 +2743,12 @@ int __init ip_rt_init(void)
prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
+ for_each_possible_cpu(cpu) {
+ struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
+
+ INIT_LIST_HEAD(&ul->head);
+ spin_lock_init(&ul->lock);
+ }
#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index e0ee384a448f..d151539da8e6 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -604,20 +604,6 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_tcp_congestion_control,
},
{
- .procname = "tcp_mtu_probing",
- .data = &sysctl_tcp_mtu_probing,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "tcp_base_mss",
- .data = &sysctl_tcp_base_mss,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "tcp_workaround_signed_windows",
.data = &sysctl_tcp_workaround_signed_windows,
.maxlen = sizeof(int),
@@ -729,6 +715,13 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &one,
},
{
+ .procname = "tcp_invalid_ratelimit",
+ .data = &sysctl_tcp_invalid_ratelimit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
.procname = "icmp_msgs_per_sec",
.data = &sysctl_icmp_msgs_per_sec,
.maxlen = sizeof(int),
@@ -876,6 +869,20 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "tcp_mtu_probing",
+ .data = &init_net.ipv4.sysctl_tcp_mtu_probing,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "tcp_base_mss",
+ .data = &init_net.ipv4.sysctl_tcp_base_mss,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3075723c729b..9d72a0fcd928 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1067,11 +1067,10 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
- const struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int iovlen, flags, err, copied = 0;
- int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
+ int flags, err, copied = 0;
+ int mss_now = 0, size_goal, copied_syn = 0;
bool sg;
long timeo;
@@ -1084,7 +1083,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto out;
else if (err)
goto out_err;
- offset = copied_syn;
}
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -1118,8 +1116,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
mss_now = tcp_send_mss(sk, &size_goal, flags);
/* Ok commence sending. */
- iovlen = msg->msg_iter.nr_segs;
- iov = msg->msg_iter.iov;
copied = 0;
err = -EPIPE;
@@ -1128,151 +1124,134 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sg = !!(sk->sk_route_caps & NETIF_F_SG);
- while (--iovlen >= 0) {
- size_t seglen = iov->iov_len;
- unsigned char __user *from = iov->iov_base;
+ while (iov_iter_count(&msg->msg_iter)) {
+ int copy = 0;
+ int max = size_goal;
- iov++;
- if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */
- if (offset >= seglen) {
- offset -= seglen;
- continue;
- }
- seglen -= offset;
- from += offset;
- offset = 0;
+ skb = tcp_write_queue_tail(sk);
+ if (tcp_send_head(sk)) {
+ if (skb->ip_summed == CHECKSUM_NONE)
+ max = mss_now;
+ copy = max - skb->len;
}
- while (seglen > 0) {
- int copy = 0;
- int max = size_goal;
-
- skb = tcp_write_queue_tail(sk);
- if (tcp_send_head(sk)) {
- if (skb->ip_summed == CHECKSUM_NONE)
- max = mss_now;
- copy = max - skb->len;
- }
-
- if (copy <= 0) {
+ if (copy <= 0) {
new_segment:
- /* Allocate new segment. If the interface is SG,
- * allocate skb fitting to single page.
- */
- if (!sk_stream_memory_free(sk))
- goto wait_for_sndbuf;
+ /* Allocate new segment. If the interface is SG,
+ * allocate skb fitting to single page.
+ */
+ if (!sk_stream_memory_free(sk))
+ goto wait_for_sndbuf;
- skb = sk_stream_alloc_skb(sk,
- select_size(sk, sg),
- sk->sk_allocation);
- if (!skb)
- goto wait_for_memory;
+ skb = sk_stream_alloc_skb(sk,
+ select_size(sk, sg),
+ sk->sk_allocation);
+ if (!skb)
+ goto wait_for_memory;
- /*
- * Check whether we can use HW checksum.
- */
- if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
- skb->ip_summed = CHECKSUM_PARTIAL;
+ /*
+ * Check whether we can use HW checksum.
+ */
+ if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
+ skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb);
- copy = size_goal;
- max = size_goal;
+ skb_entail(sk, skb);
+ copy = size_goal;
+ max = size_goal;
- /* All packets are restored as if they have
- * already been sent. skb_mstamp isn't set to
- * avoid wrong rtt estimation.
- */
- if (tp->repair)
- TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
- }
+ /* All packets are restored as if they have
+ * already been sent. skb_mstamp isn't set to
+ * avoid wrong rtt estimation.
+ */
+ if (tp->repair)
+ TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
+ }
- /* Try to append data to the end of skb. */
- if (copy > seglen)
- copy = seglen;
-
- /* Where to copy to? */
- if (skb_availroom(skb) > 0) {
- /* We have some space in skb head. Superb! */
- copy = min_t(int, copy, skb_availroom(skb));
- err = skb_add_data_nocache(sk, skb, from, copy);
- if (err)
- goto do_fault;
- } else {
- bool merge = true;
- int i = skb_shinfo(skb)->nr_frags;
- struct page_frag *pfrag = sk_page_frag(sk);
-
- if (!sk_page_frag_refill(sk, pfrag))
- goto wait_for_memory;
-
- if (!skb_can_coalesce(skb, i, pfrag->page,
- pfrag->offset)) {
- if (i == MAX_SKB_FRAGS || !sg) {
- tcp_mark_push(tp, skb);
- goto new_segment;
- }
- merge = false;
- }
+ /* Try to append data to the end of skb. */
+ if (copy > iov_iter_count(&msg->msg_iter))
+ copy = iov_iter_count(&msg->msg_iter);
+
+ /* Where to copy to? */
+ if (skb_availroom(skb) > 0) {
+ /* We have some space in skb head. Superb! */
+ copy = min_t(int, copy, skb_availroom(skb));
+ err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
+ if (err)
+ goto do_fault;
+ } else {
+ bool merge = true;
+ int i = skb_shinfo(skb)->nr_frags;
+ struct page_frag *pfrag = sk_page_frag(sk);
+
+ if (!sk_page_frag_refill(sk, pfrag))
+ goto wait_for_memory;
- copy = min_t(int, copy, pfrag->size - pfrag->offset);
-
- if (!sk_wmem_schedule(sk, copy))
- goto wait_for_memory;
-
- err = skb_copy_to_page_nocache(sk, from, skb,
- pfrag->page,
- pfrag->offset,
- copy);
- if (err)
- goto do_error;
-
- /* Update the skb. */
- if (merge) {
- skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
- } else {
- skb_fill_page_desc(skb, i, pfrag->page,
- pfrag->offset, copy);
- get_page(pfrag->page);
+ if (!skb_can_coalesce(skb, i, pfrag->page,
+ pfrag->offset)) {
+ if (i == MAX_SKB_FRAGS || !sg) {
+ tcp_mark_push(tp, skb);
+ goto new_segment;
}
- pfrag->offset += copy;
+ merge = false;
}
- if (!copied)
- TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+ copy = min_t(int, copy, pfrag->size - pfrag->offset);
- tp->write_seq += copy;
- TCP_SKB_CB(skb)->end_seq += copy;
- tcp_skb_pcount_set(skb, 0);
+ if (!sk_wmem_schedule(sk, copy))
+ goto wait_for_memory;
- from += copy;
- copied += copy;
- if ((seglen -= copy) == 0 && iovlen == 0) {
- tcp_tx_timestamp(sk, skb);
- goto out;
+ err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
+ pfrag->page,
+ pfrag->offset,
+ copy);
+ if (err)
+ goto do_error;
+
+ /* Update the skb. */
+ if (merge) {
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
+ } else {
+ skb_fill_page_desc(skb, i, pfrag->page,
+ pfrag->offset, copy);
+ get_page(pfrag->page);
}
+ pfrag->offset += copy;
+ }
- if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
- continue;
+ if (!copied)
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+
+ tp->write_seq += copy;
+ TCP_SKB_CB(skb)->end_seq += copy;
+ tcp_skb_pcount_set(skb, 0);
+
+ copied += copy;
+ if (!iov_iter_count(&msg->msg_iter)) {
+ tcp_tx_timestamp(sk, skb);
+ goto out;
+ }
- if (forced_push(tp)) {
- tcp_mark_push(tp, skb);
- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
- } else if (skb == tcp_send_head(sk))
- tcp_push_one(sk, mss_now);
+ if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
continue;
+ if (forced_push(tp)) {
+ tcp_mark_push(tp, skb);
+ __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
+ } else if (skb == tcp_send_head(sk))
+ tcp_push_one(sk, mss_now);
+ continue;
+
wait_for_sndbuf:
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- if (copied)
- tcp_push(sk, flags & ~MSG_MORE, mss_now,
- TCP_NAGLE_PUSH, size_goal);
+ if (copied)
+ tcp_push(sk, flags & ~MSG_MORE, mss_now,
+ TCP_NAGLE_PUSH, size_goal);
- if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
- goto do_error;
+ if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+ goto do_error;
- mss_now = tcp_send_mss(sk, &size_goal, flags);
- }
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
}
out:
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index bb395d46a389..c037644eafb7 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
tcp_slow_start(tp, acked);
else {
bictcp_update(ca, tp->snd_cwnd);
- tcp_cong_avoid_ai(tp, ca->cnt);
+ tcp_cong_avoid_ai(tp, ca->cnt, 1);
}
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 27ead0dd16bc..d694088214cd 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/gfp.h>
+#include <linux/jhash.h>
#include <net/tcp.h>
static DEFINE_SPINLOCK(tcp_cong_list_lock);
@@ -31,6 +32,34 @@ static struct tcp_congestion_ops *tcp_ca_find(const char *name)
return NULL;
}
+/* Must be called with rcu lock held */
+static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
+{
+ const struct tcp_congestion_ops *ca = tcp_ca_find(name);
+#ifdef CONFIG_MODULES
+ if (!ca && capable(CAP_NET_ADMIN)) {
+ rcu_read_unlock();
+ request_module("tcp_%s", name);
+ rcu_read_lock();
+ ca = tcp_ca_find(name);
+ }
+#endif
+ return ca;
+}
+
+/* Simple linear search, not much in here. */
+struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
+{
+ struct tcp_congestion_ops *e;
+
+ list_for_each_entry_rcu(e, &tcp_cong_list, list) {
+ if (e->key == key)
+ return e;
+ }
+
+ return NULL;
+}
+
/*
* Attach new congestion control algorithm to the list
* of available options.
@@ -45,9 +74,12 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
return -EINVAL;
}
+ ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
+
spin_lock(&tcp_cong_list_lock);
- if (tcp_ca_find(ca->name)) {
- pr_notice("%s already registered\n", ca->name);
+ if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
+ pr_notice("%s already registered or non-unique key\n",
+ ca->name);
ret = -EEXIST;
} else {
list_add_tail_rcu(&ca->list, &tcp_cong_list);
@@ -70,9 +102,50 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
spin_lock(&tcp_cong_list_lock);
list_del_rcu(&ca->list);
spin_unlock(&tcp_cong_list_lock);
+
+ /* Wait for outstanding readers to complete before the
+ * module gets removed entirely.
+ *
+ * A try_module_get() should fail by now as our module is
+ * in "going" state since no refs are held anymore and
+ * module_exit() handler being called.
+ */
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
+u32 tcp_ca_get_key_by_name(const char *name)
+{
+ const struct tcp_congestion_ops *ca;
+ u32 key;
+
+ might_sleep();
+
+ rcu_read_lock();
+ ca = __tcp_ca_find_autoload(name);
+ key = ca ? ca->key : TCP_CA_UNSPEC;
+ rcu_read_unlock();
+
+ return key;
+}
+EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
+
+char *tcp_ca_get_name_by_key(u32 key, char *buffer)
+{
+ const struct tcp_congestion_ops *ca;
+ char *ret = NULL;
+
+ rcu_read_lock();
+ ca = tcp_ca_find_key(key);
+ if (ca)
+ ret = strncpy(buffer, ca->name,
+ TCP_CA_NAME_MAX);
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
+
/* Assign choice of congestion control. */
void tcp_assign_congestion_control(struct sock *sk)
{
@@ -107,6 +180,18 @@ void tcp_init_congestion_control(struct sock *sk)
icsk->icsk_ca_ops->init(sk);
}
+static void tcp_reinit_congestion_control(struct sock *sk,
+ const struct tcp_congestion_ops *ca)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ tcp_cleanup_congestion_control(sk);
+ icsk->icsk_ca_ops = ca;
+
+ if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
+ icsk->icsk_ca_ops->init(sk);
+}
+
/* Manage refcounts on socket close. */
void tcp_cleanup_congestion_control(struct sock *sk)
{
@@ -241,42 +326,26 @@ out:
int tcp_set_congestion_control(struct sock *sk, const char *name)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_congestion_ops *ca;
+ const struct tcp_congestion_ops *ca;
int err = 0;
- rcu_read_lock();
- ca = tcp_ca_find(name);
+ if (icsk->icsk_ca_dst_locked)
+ return -EPERM;
- /* no change asking for existing value */
+ rcu_read_lock();
+ ca = __tcp_ca_find_autoload(name);
+ /* No change asking for existing value */
if (ca == icsk->icsk_ca_ops)
goto out;
-
-#ifdef CONFIG_MODULES
- /* not found attempt to autoload module */
- if (!ca && capable(CAP_NET_ADMIN)) {
- rcu_read_unlock();
- request_module("tcp_%s", name);
- rcu_read_lock();
- ca = tcp_ca_find(name);
- }
-#endif
if (!ca)
err = -ENOENT;
-
else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
err = -EPERM;
-
else if (!try_module_get(ca->owner))
err = -EBUSY;
-
- else {
- tcp_cleanup_congestion_control(sk);
- icsk->icsk_ca_ops = ca;
-
- if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
- icsk->icsk_ca_ops->init(sk);
- }
+ else
+ tcp_reinit_congestion_control(sk, ca);
out:
rcu_read_unlock();
return err;
@@ -291,26 +360,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
* ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
* returns the leftover acks to adjust cwnd in congestion avoidance mode.
*/
-void tcp_slow_start(struct tcp_sock *tp, u32 acked)
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = tp->snd_cwnd + acked;
if (cwnd > tp->snd_ssthresh)
cwnd = tp->snd_ssthresh + 1;
+ acked -= cwnd - tp->snd_cwnd;
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
+
+ return acked;
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
-/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
+/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
+ * for every packet that was ACKed.
+ */
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
{
+ tp->snd_cwnd_cnt += acked;
if (tp->snd_cwnd_cnt >= w) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- } else {
- tp->snd_cwnd_cnt++;
+ u32 delta = tp->snd_cwnd_cnt / w;
+
+ tp->snd_cwnd_cnt -= delta * w;
+ tp->snd_cwnd += delta;
}
+ tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
}
EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
@@ -329,11 +404,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
/* In "safe" area, increase. */
- if (tp->snd_cwnd <= tp->snd_ssthresh)
- tcp_slow_start(tp, acked);
+ if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ acked = tcp_slow_start(tp, acked);
+ if (!acked)
+ return;
+ }
/* In dangerous area, increase slowly. */
- else
- tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+ tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
}
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 6b6002416a73..4b276d1ed980 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -93,9 +93,7 @@ struct bictcp {
u32 epoch_start; /* beginning of an epoch */
u32 ack_cnt; /* number of acks */
u32 tcp_cwnd; /* estimated tcp cwnd */
-#define ACK_RATIO_SHIFT 4
-#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
- u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
+ u16 unused;
u8 sample_cnt; /* number of samples to decide curr_rtt */
u8 found; /* the exit point is found? */
u32 round_start; /* beginning of each round */
@@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca)
ca->bic_K = 0;
ca->delay_min = 0;
ca->epoch_start = 0;
- ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
ca->ack_cnt = 0;
ca->tcp_cwnd = 0;
ca->found = 0;
@@ -205,23 +202,30 @@ static u32 cubic_root(u64 a)
/*
* Compute congestion window to use.
*/
-static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
+static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
{
u32 delta, bic_target, max_cnt;
u64 offs, t;
- ca->ack_cnt++; /* count the number of ACKs */
+ ca->ack_cnt += acked; /* count the number of ACKed packets */
if (ca->last_cwnd == cwnd &&
(s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
return;
+ /* The CUBIC function can update ca->cnt at most once per jiffy.
+ * On all cwnd reduction events, ca->epoch_start is set to 0,
+ * which will force a recalculation of ca->cnt.
+ */
+ if (ca->epoch_start && tcp_time_stamp == ca->last_time)
+ goto tcp_friendliness;
+
ca->last_cwnd = cwnd;
ca->last_time = tcp_time_stamp;
if (ca->epoch_start == 0) {
ca->epoch_start = tcp_time_stamp; /* record beginning */
- ca->ack_cnt = 1; /* start counting */
+ ca->ack_cnt = acked; /* start counting */
ca->tcp_cwnd = cwnd; /* syn with cubic */
if (ca->last_max_cwnd <= cwnd) {
@@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
if (ca->last_max_cwnd == 0 && ca->cnt > 20)
ca->cnt = 20; /* increase cwnd 5% per RTT */
+tcp_friendliness:
/* TCP Friendly */
if (tcp_friendliness) {
u32 scale = beta_scale;
@@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
}
}
- ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
if (ca->cnt == 0) /* cannot be zero */
ca->cnt = 1;
}
@@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (tp->snd_cwnd <= tp->snd_ssthresh) {
if (hystart && after(ack, ca->end_seq))
bictcp_hystart_reset(sk);
- tcp_slow_start(tp, acked);
- } else {
- bictcp_update(ca, tp->snd_cwnd);
- tcp_cong_avoid_ai(tp, ca->cnt);
+ acked = tcp_slow_start(tp, acked);
+ if (!acked)
+ return;
}
+ bictcp_update(ca, tp->snd_cwnd, acked);
+ tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay)
*/
static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
u32 delay;
- if (icsk->icsk_ca_state == TCP_CA_Open) {
- u32 ratio = ca->delayed_ack;
-
- ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
- ratio += cnt;
-
- ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
- }
-
/* Some calls are for duplicates without timetamps */
if (rtt_us < 0)
return;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 815c85e3b1e0..53db2c309572 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -255,6 +255,9 @@ bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct tcp_fastopen_cookie valid_foc = { .len = -1 };
bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
+ if (foc->len == 0) /* Client requests a cookie */
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+
if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
(syn_data || foc->len >= 0) &&
tcp_fastopen_queue_check(sk))) {
@@ -265,7 +268,8 @@ bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
goto fastopen;
- if (tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
+ if (foc->len >= 0 && /* Client presents or requests a cookie */
+ tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
foc->len == valid_foc.len &&
!memcmp(foc->val, valid_foc.val, foc->len)) {
@@ -284,11 +288,10 @@ fastopen:
LINUX_MIB_TCPFASTOPENPASSIVE);
return true;
}
- }
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+ } else if (foc->len > 0) /* Client presents an invalid cookie */
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
- NET_INC_STATS_BH(sock_net(sk), foc->len ?
- LINUX_MIB_TCPFASTOPENPASSIVEFAIL :
- LINUX_MIB_TCPFASTOPENCOOKIEREQD);
*foc = valid_foc;
return false;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 075ab4d5af5e..8fdd27b17306 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -100,6 +100,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -3183,8 +3184,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
- if (ca_ops->pkts_acked)
- ca_ops->pkts_acked(sk, pkts_acked, ca_seq_rtt_us);
+ if (ca_ops->pkts_acked) {
+ long rtt_us = min_t(ulong, ca_seq_rtt_us, sack_rtt_us);
+ ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
+ }
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
@@ -3319,13 +3322,22 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
}
/* RFC 5961 7 [ACK Throttling] */
-static void tcp_send_challenge_ack(struct sock *sk)
+static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
{
/* unprotected vars, we dont care of overwrites */
static u32 challenge_timestamp;
static unsigned int challenge_count;
- u32 now = jiffies / HZ;
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 now;
+
+ /* First check our per-socket dupack rate limit. */
+ if (tcp_oow_rate_limited(sock_net(sk), skb,
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+ &tp->last_oow_ack_time))
+ return;
+ /* Then check the check host-wide RFC 5961 rate limit. */
+ now = jiffies / HZ;
if (now != challenge_timestamp) {
challenge_timestamp = now;
challenge_count = 0;
@@ -3358,34 +3370,34 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
}
/* This routine deals with acks during a TLP episode.
+ * We mark the end of a TLP episode on receiving TLP dupack or when
+ * ack is after tlp_high_seq.
* Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
*/
static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
- bool is_tlp_dupack = (ack == tp->tlp_high_seq) &&
- !(flag & (FLAG_SND_UNA_ADVANCED |
- FLAG_NOT_DUP | FLAG_DATA_SACKED));
- /* Mark the end of TLP episode on receiving TLP dupack or when
- * ack is after tlp_high_seq.
- */
- if (is_tlp_dupack) {
- tp->tlp_high_seq = 0;
+ if (before(ack, tp->tlp_high_seq))
return;
- }
- if (after(ack, tp->tlp_high_seq)) {
+ if (flag & FLAG_DSACKING_ACK) {
+ /* This DSACK means original and TLP probe arrived; no loss */
+ tp->tlp_high_seq = 0;
+ } else if (after(ack, tp->tlp_high_seq)) {
+ /* ACK advances: there was a loss, so reduce cwnd. Reset
+ * tlp_high_seq in tcp_init_cwnd_reduction()
+ */
+ tcp_init_cwnd_reduction(sk);
+ tcp_set_ca_state(sk, TCP_CA_CWR);
+ tcp_end_cwnd_reduction(sk);
+ tcp_try_keep_open(sk);
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPLOSSPROBERECOVERY);
+ } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
+ FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
+ /* Pure dupack: original and TLP probe arrived; no loss */
tp->tlp_high_seq = 0;
- /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
- if (!(flag & FLAG_DSACKING_ACK)) {
- tcp_init_cwnd_reduction(sk);
- tcp_set_ca_state(sk, TCP_CA_CWR);
- tcp_end_cwnd_reduction(sk);
- tcp_try_keep_open(sk);
- NET_INC_STATS_BH(sock_net(sk),
- LINUX_MIB_TCPLOSSPROBERECOVERY);
- }
}
}
@@ -3421,7 +3433,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una)) {
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
if (before(ack, prior_snd_una - tp->max_window)) {
- tcp_send_challenge_ack(sk);
+ tcp_send_challenge_ack(sk, skb);
return -1;
}
goto old_ack;
@@ -4990,7 +5002,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
tcp_paws_discard(sk, skb)) {
if (!th->rst) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
- tcp_send_dupack(sk, skb);
+ if (!tcp_oow_rate_limited(sock_net(sk), skb,
+ LINUX_MIB_TCPACKSKIPPEDPAWS,
+ &tp->last_oow_ack_time))
+ tcp_send_dupack(sk, skb);
goto discard;
}
/* Reset is accepted even if it did not pass PAWS. */
@@ -5007,7 +5022,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
if (!th->rst) {
if (th->syn)
goto syn_challenge;
- tcp_send_dupack(sk, skb);
+ if (!tcp_oow_rate_limited(sock_net(sk), skb,
+ LINUX_MIB_TCPACKSKIPPEDSEQ,
+ &tp->last_oow_ack_time))
+ tcp_send_dupack(sk, skb);
}
goto discard;
}
@@ -5023,7 +5041,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
tcp_reset(sk);
else
- tcp_send_challenge_ack(sk);
+ tcp_send_challenge_ack(sk, skb);
goto discard;
}
@@ -5037,7 +5055,7 @@ syn_challenge:
if (syn_inerr)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
- tcp_send_challenge_ack(sk);
+ tcp_send_challenge_ack(sk, skb);
goto discard;
}
@@ -5870,10 +5888,9 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
* TCP ECN negotiation.
*
* Exception: tcp_ca wants ECN. This is required for DCTCP
- * congestion control; it requires setting ECT on all packets,
- * including SYN. We inverse the test in this case: If our
- * local socket wants ECN, but peer only set ece/cwr (but not
- * ECT in IP header) its probably a non-DCTCP aware sender.
+ * congestion control: Linux DCTCP asserts ECT on all packets,
+ * including SYN, which is most optimal solution; however,
+ * others, such as FreeBSD do not.
*/
static void tcp_ecn_create_request(struct request_sock *req,
const struct sk_buff *skb,
@@ -5883,18 +5900,15 @@ static void tcp_ecn_create_request(struct request_sock *req,
const struct tcphdr *th = tcp_hdr(skb);
const struct net *net = sock_net(listen_sk);
bool th_ecn = th->ece && th->cwr;
- bool ect, need_ecn, ecn_ok;
+ bool ect, ecn_ok;
if (!th_ecn)
return;
ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
- need_ecn = tcp_ca_needs_ecn(listen_sk);
ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN);
- if (!ect && !need_ecn && ecn_ok)
- inet_rsk(req)->ecn_ok = 1;
- else if (ect && need_ecn)
+ if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk))
inet_rsk(req)->ecn_ok = 1;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a3f72d7fc06c..5a2dfed4783b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.bound_dev_if = sk->sk_bound_dev_if;
arg.tos = ip_hdr(skb)->tos;
- ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len);
@@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
- ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len);
@@ -1340,6 +1342,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
sk_setup_caps(newsk, dst);
+ tcp_ca_openreq_child(newsk, dst);
+
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst);
if (tcp_sk(sk)->rx_opt.user_mss &&
@@ -2428,14 +2432,40 @@ struct proto tcp_prot = {
};
EXPORT_SYMBOL(tcp_prot);
+static void __net_exit tcp_sk_exit(struct net *net)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
+ free_percpu(net->ipv4.tcp_sk);
+}
+
static int __net_init tcp_sk_init(struct net *net)
{
+ int res, cpu;
+
+ net->ipv4.tcp_sk = alloc_percpu(struct sock *);
+ if (!net->ipv4.tcp_sk)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ struct sock *sk;
+
+ res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
+ IPPROTO_TCP, net);
+ if (res)
+ goto fail;
+ *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
+ }
net->ipv4.sysctl_tcp_ecn = 2;
+ net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
return 0;
-}
-static void __net_exit tcp_sk_exit(struct net *net)
-{
+fail:
+ tcp_sk_exit(net);
+
+ return res;
}
static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 272327134a1b..2379c1b4efb2 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -47,6 +47,10 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg)
return;
percpu_counter_destroy(&cg_proto->sockets_allocated);
+
+ if (test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
+ static_key_slow_dec(&memcg_socket_limit_enabled);
+
}
EXPORT_SYMBOL(tcp_destroy_cgroup);
@@ -120,7 +124,7 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of,
switch (of_cft(of)->private) {
case RES_LIMIT:
/* see memcontrol.c */
- ret = page_counter_memparse(buf, &nr_pages);
+ ret = page_counter_memparse(buf, "-1", &nr_pages);
if (ret)
break;
mutex_lock(&tcp_limit_mutex);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index ed9c9a91851c..e5f41bd5ec1b 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -886,7 +886,8 @@ static int tcp_metrics_dump_info(struct sk_buff *skb,
if (tcp_metrics_fill_info(skb, tm) < 0)
goto nla_put_failure;
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 63d2680b65db..dd11ac7798c6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -58,6 +58,25 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
return seq == e_win && seq == end_seq;
}
+static enum tcp_tw_status
+tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
+ const struct sk_buff *skb, int mib_idx)
+{
+ struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+
+ if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
+ &tcptw->tw_last_oow_ack_time)) {
+ /* Send ACK. Note, we do not put the bucket,
+ * it will be released by caller.
+ */
+ return TCP_TW_ACK;
+ }
+
+ /* We are rate-limiting, so just release the tw sock and drop skb. */
+ inet_twsk_put(tw);
+ return TCP_TW_SUCCESS;
+}
+
/*
* * Main purpose of TIME-WAIT state is to close connection gracefully,
* when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
@@ -116,7 +135,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
tcptw->tw_rcv_nxt,
tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
- return TCP_TW_ACK;
+ return tcp_timewait_check_oow_rate_limit(
+ tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
if (th->rst)
goto kill;
@@ -250,10 +270,8 @@ kill:
inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
TCP_TIMEWAIT_LEN);
- /* Send ACK. Note, we do not put the bucket,
- * it will be released by caller.
- */
- return TCP_TW_ACK;
+ return tcp_timewait_check_oow_rate_limit(
+ tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
}
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
@@ -289,6 +307,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
tcptw->tw_ts_offset = tp->tsoffset;
+ tcptw->tw_last_oow_ack_time = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
@@ -399,6 +418,32 @@ static void tcp_ecn_openreq_child(struct tcp_sock *tp,
tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
}
+void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
+ bool ca_got_dst = false;
+
+ if (ca_key != TCP_CA_UNSPEC) {
+ const struct tcp_congestion_ops *ca;
+
+ rcu_read_lock();
+ ca = tcp_ca_find_key(ca_key);
+ if (likely(ca && try_module_get(ca->owner))) {
+ icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
+ icsk->icsk_ca_ops = ca;
+ ca_got_dst = true;
+ }
+ rcu_read_unlock();
+ }
+
+ if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
+ tcp_assign_congestion_control(sk);
+
+ tcp_set_ca_state(sk, TCP_CA_Open);
+}
+EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
+
/* This is not only more efficient than what we used to do, it eliminates
* a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
*
@@ -441,6 +486,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
tcp_enable_early_retrans(newtp);
newtp->tlp_high_seq = 0;
newtp->lsndtime = treq->snt_synack;
+ newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans;
/* So many TCP implementations out there (incorrectly) count the
@@ -451,10 +497,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->snd_cwnd = TCP_INIT_CWND;
newtp->snd_cwnd_cnt = 0;
- if (!try_module_get(newicsk->icsk_ca_ops->owner))
- tcp_assign_congestion_control(newsk);
-
- tcp_set_ca_state(newsk, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
__skb_queue_head_init(&newtp->out_of_order_queue);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
@@ -583,7 +625,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* Reset timer after retransmitting SYNACK, similar to
* the idea of fast retransmit in recovery.
*/
- if (!inet_rtx_syn_ack(sk, req))
+ if (!tcp_oow_rate_limited(sock_net(sk), skb,
+ LINUX_MIB_TCPACKSKIPPEDSYNRECV,
+ &tcp_rsk(req)->last_oow_ack_time) &&
+
+ !inet_rtx_syn_ack(sk, req))
req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
TCP_RTO_MAX) + jiffies;
return NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 65caf8b95e17..a2a796c5536b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -59,9 +59,6 @@ int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
*/
int sysctl_tcp_tso_win_divisor __read_mostly = 3;
-int sysctl_tcp_mtu_probing __read_mostly = 0;
-int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
-
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
@@ -948,7 +945,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb_orphan(skb);
skb->sk = sk;
- skb->destructor = tcp_wfree;
+ skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree;
skb_set_hash_from_sk(skb, sk);
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
@@ -1350,11 +1347,12 @@ void tcp_mtup_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct net *net = sock_net(sk);
- icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
+ icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
icsk->icsk_af_ops->net_header_len;
- icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
+ icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
icsk->icsk_mtup.probe_size = 0;
}
EXPORT_SYMBOL(tcp_mtup_init);
@@ -2939,6 +2937,25 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
}
EXPORT_SYMBOL(tcp_make_synack);
+static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_congestion_ops *ca;
+ u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
+
+ if (ca_key == TCP_CA_UNSPEC)
+ return;
+
+ rcu_read_lock();
+ ca = tcp_ca_find_key(ca_key);
+ if (likely(ca && try_module_get(ca->owner))) {
+ module_put(icsk->icsk_ca_ops->owner);
+ icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
+ icsk->icsk_ca_ops = ca;
+ }
+ rcu_read_unlock();
+}
+
/* Do all connect socket setups that can be done AF independent. */
static void tcp_connect_init(struct sock *sk)
{
@@ -2964,6 +2981,8 @@ static void tcp_connect_init(struct sock *sk)
tcp_mtup_init(sk);
tcp_sync_mss(sk, dst_mtu(dst));
+ tcp_ca_dst_init(sk, dst);
+
if (!tp->window_clamp)
tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
tp->advmss = dst_metric_advmss(dst);
@@ -3034,7 +3053,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_fastopen_request *fo = tp->fastopen_req;
- int syn_loss = 0, space, err = 0;
+ int syn_loss = 0, space, err = 0, copied;
unsigned long last_syn_loss = 0;
struct sk_buff *syn_data;
@@ -3072,11 +3091,16 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
goto fallback;
syn_data->ip_summed = CHECKSUM_PARTIAL;
memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
- if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
- fo->data->msg_iter.iov, 0, space))) {
+ copied = copy_from_iter(skb_put(syn_data, space), space,
+ &fo->data->msg_iter);
+ if (unlikely(!copied)) {
kfree_skb(syn_data);
goto fallback;
}
+ if (copied != space) {
+ skb_trim(syn_data, copied);
+ space = copied;
+ }
/* No more data pending in inet_wait_for_connect() */
if (space == fo->size)
@@ -3244,6 +3268,14 @@ void tcp_send_ack(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER);
tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
+ /* We do not want pure acks influencing TCP Small Queues or fq/pacing
+ * too much.
+ * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
+ * We also avoid tcp_wfree() overhead (cache line miss accessing
+ * tp->tsq_flags) by using regular sock_wfree()
+ */
+ skb_set_tcp_pure_ack(buff);
+
/* Send it off, this clears delayed acks for us. */
skb_mstamp_get(&buff->skb_mstamp);
tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 6824afb65d93..333bcb2415ff 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp, acked);
else
- tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
+ tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+ 1);
}
static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 1829c7fbc77e..0732b787904e 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -101,17 +101,20 @@ static int tcp_orphan_retries(struct sock *sk, int alive)
static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
{
+ struct net *net = sock_net(sk);
+
/* Black hole detection */
- if (sysctl_tcp_mtu_probing) {
+ if (net->ipv4.sysctl_tcp_mtu_probing) {
if (!icsk->icsk_mtup.enabled) {
icsk->icsk_mtup.enabled = 1;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else {
+ struct net *net = sock_net(sk);
struct tcp_sock *tp = tcp_sk(sk);
int mss;
mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
- mss = min(sysctl_tcp_base_mss, mss);
+ mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
mss = max(mss, 68 - tp->tcp_header_len);
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index a4d2d2d88dca..112151eeee45 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
/* In the "non-congestive state", increase cwnd
* every rtt.
*/
- tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+ tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
} else {
/* In the "congestive state", increase cwnd
* every other rtt.
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index cd7273218598..17d35662930d 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
} else {
/* Reno */
- tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+ tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
}
/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 13b4dcf86ef6..97ef1f8b7be8 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1329,7 +1329,7 @@ try_again:
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
+ ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
err = copied;
if (flags & MSG_TRUNC)
@@ -1806,7 +1806,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (sk != NULL) {
int ret;
- if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
+ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
inet_compute_pseudo);
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 7927db0a9279..4a000f1dd757 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
s_slot = cb->args[0];
num = s_num = cb->args[1];
- for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+ for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
struct sock *sk;
struct hlist_nulls_node *node;
struct udp_hslot *hslot = &table->hash[slot];
+ num = 0;
+
if (hlist_nulls_empty(&hslot->head))
continue;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index d3e537ef6b7f..d10f6f4ead27 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -339,7 +339,8 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
- pp = uo_priv->offload->callbacks.gro_receive(head, skb);
+ pp = uo_priv->offload->callbacks.gro_receive(head, skb,
+ uo_priv->offload);
out_unlock:
rcu_read_unlock();
@@ -395,7 +396,9 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
if (uo_priv != NULL) {
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
- err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
+ err = uo_priv->offload->callbacks.gro_complete(skb,
+ nhoff + sizeof(struct udphdr),
+ uo_priv->offload);
}
rcu_read_unlock();
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 1671263e5fa0..c83b35485056 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -63,7 +63,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
inet_sk(sk)->mc_loop = 0;
/* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
- udp_set_convert_csum(sk, true);
+ inet_inc_convert_csum(sk);
rcu_assign_sk_user_data(sk, cfg->sk_user_data);
@@ -75,10 +75,10 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
}
EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
-int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
- struct sk_buff *skb, __be32 src, __be32 dst,
- __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
- __be16 dst_port, bool xnet)
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl,
+ __be16 df, __be16 src_port, __be16 dst_port,
+ bool xnet, bool nocheck)
{
struct udphdr *uh;
@@ -90,9 +90,9 @@ int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
uh->source = src_port;
uh->len = htons(skb->len);
- udp_set_csum(sock->sk->sk_no_check_tx, skb, src, dst, skb->len);
+ udp_set_csum(nocheck, skb, src, dst, skb->len);
- return iptunnel_xmit(sock->sk, rt, skb, src, dst, IPPROTO_UDP,
+ return iptunnel_xmit(skb->sk, rt, skb, src, dst, IPPROTO_UDP,
tos, ttl, df, xnet);
}
EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f7c8bbeb27b7..98e4a63d72bb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -201,6 +201,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.disable_ipv6 = 0,
.accept_dad = 1,
.suppress_frag_ndisc = 1,
+ .accept_ra_mtu = 1,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -238,6 +239,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.disable_ipv6 = 0,
.accept_dad = 1,
.suppress_frag_ndisc = 1,
+ .accept_ra_mtu = 1,
};
/* Check if a valid qdisc is available */
@@ -489,7 +491,8 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -619,7 +622,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF,
NLM_F_MULTI,
- -1) <= 0) {
+ -1) < 0) {
rcu_read_unlock();
goto done;
}
@@ -635,7 +638,7 @@ cont:
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
- -1) <= 0)
+ -1) < 0)
goto done;
else
h++;
@@ -646,7 +649,7 @@ cont:
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
- -1) <= 0)
+ -1) < 0)
goto done;
else
h++;
@@ -1519,15 +1522,30 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict)
{
+ return ipv6_chk_addr_and_flags(net, addr, dev, strict, IFA_F_TENTATIVE);
+}
+EXPORT_SYMBOL(ipv6_chk_addr);
+
+int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict,
+ u32 banned_flags)
+{
struct inet6_ifaddr *ifp;
unsigned int hash = inet6_addr_hash(addr);
+ u32 ifp_flags;
rcu_read_lock_bh();
hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
+ /* Decouple optimistic from tentative for evaluation here.
+ * Ban optimistic addresses explicitly, when required.
+ */
+ ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
+ ? (ifp->flags&~IFA_F_TENTATIVE)
+ : ifp->flags;
if (ipv6_addr_equal(&ifp->addr, addr) &&
- !(ifp->flags&IFA_F_TENTATIVE) &&
+ !(ifp_flags&banned_flags) &&
(dev == NULL || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
rcu_read_unlock_bh();
@@ -1538,7 +1556,7 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
rcu_read_unlock_bh();
return 0;
}
-EXPORT_SYMBOL(ipv6_chk_addr);
+EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
struct net_device *dev)
@@ -4047,7 +4065,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
goto error;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
error:
nlmsg_cancel(skb, nlh);
@@ -4076,7 +4095,8 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
return -EMSGSIZE;
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
@@ -4101,7 +4121,8 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
return -EMSGSIZE;
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
enum addr_type_t {
@@ -4134,7 +4155,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
cb->nlh->nlmsg_seq,
RTM_NEWADDR,
NLM_F_MULTI);
- if (err <= 0)
+ if (err < 0)
break;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
}
@@ -4151,7 +4172,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
cb->nlh->nlmsg_seq,
RTM_GETMULTICAST,
NLM_F_MULTI);
- if (err <= 0)
+ if (err < 0)
break;
}
break;
@@ -4166,7 +4187,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
cb->nlh->nlmsg_seq,
RTM_GETANYCAST,
NLM_F_MULTI);
- if (err <= 0)
+ if (err < 0)
break;
}
break;
@@ -4209,7 +4230,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
goto cont;
if (in6_dump_addrs(idev, skb, cb, type,
- s_ip_idx, &ip_idx) <= 0)
+ s_ip_idx, &ip_idx) < 0)
goto done;
cont:
idx++;
@@ -4376,6 +4397,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
+ array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
}
static inline size_t inet6_ifla6_size(void)
@@ -4572,6 +4594,22 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
return 0;
}
+static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
+ [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
+ [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
+};
+
+static int inet6_validate_link_af(const struct net_device *dev,
+ const struct nlattr *nla)
+{
+ struct nlattr *tb[IFLA_INET6_MAX + 1];
+
+ if (dev && !__in6_dev_get(dev))
+ return -EAFNOSUPPORT;
+
+ return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
+}
+
static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
{
int err = -EINVAL;
@@ -4638,7 +4676,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
goto nla_put_failure;
nla_nest_end(skb, protoinfo);
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -4670,7 +4709,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
if (inet6_fill_ifinfo(skb, idev,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- RTM_NEWLINK, NLM_F_MULTI) <= 0)
+ RTM_NEWLINK, NLM_F_MULTI) < 0)
goto out;
cont:
idx++;
@@ -4747,7 +4786,8 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
ci.valid_time = ntohl(pinfo->valid);
if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -5253,6 +5293,13 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec,
},
{
+ .procname = "accept_ra_mtu",
+ .data = &ipv6_devconf.accept_ra_mtu,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
/* sentinel */
}
},
@@ -5389,10 +5436,11 @@ static struct pernet_operations addrconf_ops = {
.exit = addrconf_exit_net,
};
-static struct rtnl_af_ops inet6_ops = {
+static struct rtnl_af_ops inet6_ops __read_mostly = {
.family = AF_INET6,
.fill_link_af = inet6_fill_link_af,
.get_link_af_size = inet6_get_link_af_size,
+ .validate_link_af = inet6_validate_link_af,
.set_link_af = inet6_set_link_af,
};
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index fd0dc47f471d..e43e79d0a612 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -490,7 +490,8 @@ static int ip6addrlbl_fill(struct sk_buff *skb,
return -EMSGSIZE;
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -510,7 +511,7 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq,
RTM_NEWADDRLABEL,
NLM_F_MULTI);
- if (err <= 0)
+ if (err < 0)
break;
}
idx++;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 100c589a2a6c..c215be70cac0 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -369,7 +369,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
serr = SKB_EXT_ERR(skb);
- if (sin) {
+ if (sin && skb->len) {
const unsigned char *nh = skb_network_header(skb);
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
@@ -393,11 +393,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
- sin->sin6_family = AF_UNSPEC;
- if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
+ memset(sin, 0, sizeof(*sin));
+ if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) {
sin->sin6_family = AF_INET6;
- sin->sin6_flowinfo = 0;
- sin->sin6_port = 0;
if (np->rxopt.all) {
if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
@@ -412,12 +410,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
ipv6_iface_scope_id(&sin->sin6_addr,
IP6CB(skb)->iif);
} else {
- struct inet_sock *inet = inet_sk(sk);
-
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin->sin6_addr);
- sin->sin6_scope_id = 0;
- if (inet->cmsg_flags)
+ if (inet_sk(sk)->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index d674152b6ede..a5e95199585e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -427,7 +427,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
* Dest addr check
*/
- if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
+ if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
if (type != ICMPV6_PKT_TOOBIG &&
!(type == ICMPV6_PARAMPROB &&
code == ICMPV6_UNK_OPTION &&
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index b2d1838897c9..263ef4143bff 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -277,7 +277,6 @@ static int fib6_dump_node(struct fib6_walker *w)
w->leaf = rt;
return 1;
}
- WARN_ON(res == 0);
}
w->leaf = NULL;
return 0;
@@ -630,33 +629,59 @@ static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
RTF_GATEWAY;
}
-static int fib6_commit_metrics(struct dst_entry *dst,
- struct nlattr *mx, int mx_len)
+static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc)
{
- struct nlattr *nla;
- int remaining;
- u32 *mp;
+ int i;
+
+ for (i = 0; i < RTAX_MAX; i++) {
+ if (test_bit(i, mxc->mx_valid))
+ mp[i] = mxc->mx[i];
+ }
+}
+
+static int fib6_commit_metrics(struct dst_entry *dst, struct mx6_config *mxc)
+{
+ if (!mxc->mx)
+ return 0;
if (dst->flags & DST_HOST) {
- mp = dst_metrics_write_ptr(dst);
- } else {
- mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
- if (!mp)
+ u32 *mp = dst_metrics_write_ptr(dst);
+
+ if (unlikely(!mp))
return -ENOMEM;
- dst_init_metrics(dst, mp, 0);
- }
- nla_for_each_attr(nla, mx, mx_len, remaining) {
- int type = nla_type(nla);
+ fib6_copy_metrics(mp, mxc);
+ } else {
+ dst_init_metrics(dst, mxc->mx, false);
- if (type) {
- if (type > RTAX_MAX)
- return -EINVAL;
+ /* We've stolen mx now. */
+ mxc->mx = NULL;
+ }
- mp[type - 1] = nla_get_u32(nla);
+ return 0;
+}
+
+static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
+ struct net *net)
+{
+ if (atomic_read(&rt->rt6i_ref) != 1) {
+ /* This route is used as dummy address holder in some split
+ * nodes. It is not leaked, but it still holds other resources,
+ * which must be released in time. So, scan ascendant nodes
+ * and replace dummy references to this route with references
+ * to still alive ones.
+ */
+ while (fn) {
+ if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
+ fn->leaf = fib6_find_prefix(net, fn);
+ atomic_inc(&fn->leaf->rt6i_ref);
+ rt6_release(rt);
+ }
+ fn = fn->parent;
}
+ /* No more references are possible at this point. */
+ BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
}
- return 0;
}
/*
@@ -664,7 +689,7 @@ static int fib6_commit_metrics(struct dst_entry *dst,
*/
static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
- struct nl_info *info, struct nlattr *mx, int mx_len)
+ struct nl_info *info, struct mx6_config *mxc)
{
struct rt6_info *iter = NULL;
struct rt6_info **ins;
@@ -773,11 +798,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
pr_warn("NLM_F_CREATE should be set when creating new route\n");
add:
- if (mx) {
- err = fib6_commit_metrics(&rt->dst, mx, mx_len);
- if (err)
- return err;
- }
+ err = fib6_commit_metrics(&rt->dst, mxc);
+ if (err)
+ return err;
+
rt->dst.rt6_next = iter;
*ins = rt;
rt->rt6i_node = fn;
@@ -797,21 +821,22 @@ add:
pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
return -ENOENT;
}
- if (mx) {
- err = fib6_commit_metrics(&rt->dst, mx, mx_len);
- if (err)
- return err;
- }
+
+ err = fib6_commit_metrics(&rt->dst, mxc);
+ if (err)
+ return err;
+
*ins = rt;
rt->rt6i_node = fn;
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info);
- rt6_release(iter);
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
}
+ fib6_purge_rt(iter, fn, info->nl_net);
+ rt6_release(iter);
}
return 0;
@@ -838,8 +863,8 @@ void fib6_force_start_gc(struct net *net)
* with source addr info in sub-trees
*/
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
- struct nlattr *mx, int mx_len)
+int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+ struct nl_info *info, struct mx6_config *mxc)
{
struct fib6_node *fn, *pn = NULL;
int err = -ENOMEM;
@@ -934,7 +959,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
}
#endif
- err = fib6_add_rt2node(fn, rt, info, mx, mx_len);
+ err = fib6_add_rt2node(fn, rt, info, mxc);
if (!err) {
fib6_start_gc(info->nl_net, rt);
if (!(rt->rt6i_flags & RTF_CACHE))
@@ -1322,24 +1347,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
fn = fib6_repair_tree(net, fn);
}
- if (atomic_read(&rt->rt6i_ref) != 1) {
- /* This route is used as dummy address holder in some split
- * nodes. It is not leaked, but it still holds other resources,
- * which must be released in time. So, scan ascendant nodes
- * and replace dummy references to this route with references
- * to still alive ones.
- */
- while (fn) {
- if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
- fn->leaf = fib6_find_prefix(net, fn);
- atomic_inc(&fn->leaf->rt6i_ref);
- rt6_release(rt);
- }
- fn = fn->parent;
- }
- /* No more references are possible at this point. */
- BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
- }
+ fib6_purge_rt(rt, fn, net);
inet6_rt_notify(RTM_DELROUTE, rt, info);
rt6_release(rt);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 13cda4c6313b..bc28b7d42a6d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (code == ICMPV6_HDR_FIELD)
teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
- if (teli && teli == info - 2) {
+ if (teli && teli == be32_to_cpu(info) - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
@@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
break;
case ICMPV6_PKT_TOOBIG:
- mtu = info - offset;
+ mtu = be32_to_cpu(info) - offset;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
@@ -1662,6 +1662,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
.dellink = ip6gre_dellink,
.get_size = ip6gre_get_size,
.fill_info = ip6gre_fill_info,
+ .get_link_net = ip6_tnl_get_link_net,
};
static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
@@ -1675,6 +1676,7 @@ static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
.changelink = ip6gre_changelink,
.get_size = ip6gre_get_size,
.fill_info = ip6gre_fill_info,
+ .get_link_net = ip6_tnl_get_link_net,
};
/*
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ce69a12ae48c..d33df4cbd872 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
skb_copy_secmark(to, from);
}
-static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
-{
- static u32 ip6_idents_hashrnd __read_mostly;
- u32 hash, id;
-
- net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
-
- hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
- hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
-
- id = ip_idents_reserve(hash, 1);
- fhdr->identification = htonl(id);
-}
-
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct sk_buff *frag;
@@ -1041,6 +1027,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
static inline int ip6_ufo_append_data(struct sock *sk,
+ struct sk_buff_head *queue,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
@@ -1056,7 +1043,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
* device, so create one single skb packet containing complete
* udp datagram
*/
- skb = skb_peek_tail(&sk->sk_write_queue);
+ skb = skb_peek_tail(queue);
if (skb == NULL) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
@@ -1079,7 +1066,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb->protocol = htons(ETH_P_IPV6);
skb->csum = 0;
- __skb_queue_tail(&sk->sk_write_queue, skb);
+ __skb_queue_tail(queue, skb);
} else if (skb_is_gso(skb)) {
goto append;
}
@@ -1135,99 +1122,106 @@ static void ip6_append_data_mtu(unsigned int *mtu,
}
}
-int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
- int offset, int len, int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen,
- int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
- struct rt6_info *rt, unsigned int flags, int dontfrag)
+static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
+ struct inet6_cork *v6_cork,
+ int hlimit, int tclass, struct ipv6_txoptions *opt,
+ struct rt6_info *rt, struct flowi6 *fl6)
{
- struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
- struct inet_cork *cork;
+ unsigned int mtu;
+
+ /*
+ * setup for corking
+ */
+ if (opt) {
+ if (WARN_ON(v6_cork->opt))
+ return -EINVAL;
+
+ v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
+ if (unlikely(v6_cork->opt == NULL))
+ return -ENOBUFS;
+
+ v6_cork->opt->tot_len = opt->tot_len;
+ v6_cork->opt->opt_flen = opt->opt_flen;
+ v6_cork->opt->opt_nflen = opt->opt_nflen;
+
+ v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
+ sk->sk_allocation);
+ if (opt->dst0opt && !v6_cork->opt->dst0opt)
+ return -ENOBUFS;
+
+ v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
+ sk->sk_allocation);
+ if (opt->dst1opt && !v6_cork->opt->dst1opt)
+ return -ENOBUFS;
+
+ v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
+ sk->sk_allocation);
+ if (opt->hopopt && !v6_cork->opt->hopopt)
+ return -ENOBUFS;
+
+ v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
+ sk->sk_allocation);
+ if (opt->srcrt && !v6_cork->opt->srcrt)
+ return -ENOBUFS;
+
+ /* need source address above miyazawa*/
+ }
+ dst_hold(&rt->dst);
+ cork->base.dst = &rt->dst;
+ cork->fl.u.ip6 = *fl6;
+ v6_cork->hop_limit = hlimit;
+ v6_cork->tclass = tclass;
+ if (rt->dst.flags & DST_XFRM_TUNNEL)
+ mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ else
+ mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+ if (np->frag_size < mtu) {
+ if (np->frag_size)
+ mtu = np->frag_size;
+ }
+ cork->base.fragsize = mtu;
+ if (dst_allfrag(rt->dst.path))
+ cork->base.flags |= IPCORK_ALLFRAG;
+ cork->base.length = 0;
+
+ return 0;
+}
+
+static int __ip6_append_data(struct sock *sk,
+ struct flowi6 *fl6,
+ struct sk_buff_head *queue,
+ struct inet_cork *cork,
+ struct inet6_cork *v6_cork,
+ struct page_frag *pfrag,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ unsigned int flags, int dontfrag)
+{
struct sk_buff *skb, *skb_prev = NULL;
unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
- int exthdrlen;
- int dst_exthdrlen;
+ int exthdrlen = 0;
+ int dst_exthdrlen = 0;
int hh_len;
int copy;
int err;
int offset = 0;
__u8 tx_flags = 0;
u32 tskey = 0;
+ struct rt6_info *rt = (struct rt6_info *)cork->dst;
+ struct ipv6_txoptions *opt = v6_cork->opt;
+ int csummode = CHECKSUM_NONE;
- if (flags&MSG_PROBE)
- return 0;
- cork = &inet->cork.base;
- if (skb_queue_empty(&sk->sk_write_queue)) {
- /*
- * setup for corking
- */
- if (opt) {
- if (WARN_ON(np->cork.opt))
- return -EINVAL;
-
- np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
- if (unlikely(np->cork.opt == NULL))
- return -ENOBUFS;
-
- np->cork.opt->tot_len = opt->tot_len;
- np->cork.opt->opt_flen = opt->opt_flen;
- np->cork.opt->opt_nflen = opt->opt_nflen;
-
- np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
- sk->sk_allocation);
- if (opt->dst0opt && !np->cork.opt->dst0opt)
- return -ENOBUFS;
-
- np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
- sk->sk_allocation);
- if (opt->dst1opt && !np->cork.opt->dst1opt)
- return -ENOBUFS;
-
- np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
- sk->sk_allocation);
- if (opt->hopopt && !np->cork.opt->hopopt)
- return -ENOBUFS;
-
- np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
- sk->sk_allocation);
- if (opt->srcrt && !np->cork.opt->srcrt)
- return -ENOBUFS;
-
- /* need source address above miyazawa*/
- }
- dst_hold(&rt->dst);
- cork->dst = &rt->dst;
- inet->cork.fl.u.ip6 = *fl6;
- np->cork.hop_limit = hlimit;
- np->cork.tclass = tclass;
- if (rt->dst.flags & DST_XFRM_TUNNEL)
- mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(&rt->dst);
- else
- mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(rt->dst.path);
- if (np->frag_size < mtu) {
- if (np->frag_size)
- mtu = np->frag_size;
- }
- cork->fragsize = mtu;
- if (dst_allfrag(rt->dst.path))
- cork->flags |= IPCORK_ALLFRAG;
- cork->length = 0;
- exthdrlen = (opt ? opt->opt_flen : 0);
- length += exthdrlen;
- transhdrlen += exthdrlen;
+ skb = skb_peek_tail(queue);
+ if (!skb) {
+ exthdrlen = opt ? opt->opt_flen : 0;
dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
- } else {
- rt = (struct rt6_info *)cork->dst;
- fl6 = &inet->cork.fl.u.ip6;
- opt = np->cork.opt;
- transhdrlen = 0;
- exthdrlen = 0;
- dst_exthdrlen = 0;
- mtu = cork->fragsize;
}
+
+ mtu = cork->fragsize;
orig_mtu = mtu;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1276,6 +1270,14 @@ emsgsize:
tskey = sk->sk_tskey++;
}
+ /* If this is the first and only packet and device
+ * supports checksum offloading, let's use it.
+ */
+ if (!skb &&
+ length + fragheaderlen < mtu &&
+ rt->dst.dev->features & NETIF_F_V6_CSUM &&
+ !exthdrlen)
+ csummode = CHECKSUM_PARTIAL;
/*
* Let's try using as much space as possible.
* Use MTU if total length of the message fits into the MTU.
@@ -1292,13 +1294,12 @@ emsgsize:
* --yoshfuji
*/
- skb = skb_peek_tail(&sk->sk_write_queue);
cork->length += length;
if (((length > mtu) ||
(skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
- err = ip6_ufo_append_data(sk, getfrag, from, length,
+ err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen,
transhdrlen, mtu, flags, rt);
if (err)
@@ -1389,7 +1390,7 @@ alloc_new_skb:
* Fill in the control structures
*/
skb->protocol = htons(ETH_P_IPV6);
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = csummode;
skb->csum = 0;
/* reserve for fragmentation and ipsec header */
skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
@@ -1439,7 +1440,7 @@ alloc_new_skb:
/*
* Put the packet on the pending queue
*/
- __skb_queue_tail(&sk->sk_write_queue, skb);
+ __skb_queue_tail(queue, skb);
continue;
}
@@ -1458,7 +1459,6 @@ alloc_new_skb:
}
} else {
int i = skb_shinfo(skb)->nr_frags;
- struct page_frag *pfrag = sk_page_frag(sk);
err = -ENOMEM;
if (!sk_page_frag_refill(sk, pfrag))
@@ -1501,43 +1501,81 @@ error:
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
return err;
}
+
+int ip6_append_data(struct sock *sk,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen, int hlimit,
+ int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags, int dontfrag)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ int exthdrlen;
+ int err;
+
+ if (flags&MSG_PROBE)
+ return 0;
+ if (skb_queue_empty(&sk->sk_write_queue)) {
+ /*
+ * setup for corking
+ */
+ err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
+ tclass, opt, rt, fl6);
+ if (err)
+ return err;
+
+ exthdrlen = (opt ? opt->opt_flen : 0);
+ length += exthdrlen;
+ transhdrlen += exthdrlen;
+ } else {
+ fl6 = &inet->cork.fl.u.ip6;
+ transhdrlen = 0;
+ }
+
+ return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
+ &np->cork, sk_page_frag(sk), getfrag,
+ from, length, transhdrlen, flags, dontfrag);
+}
EXPORT_SYMBOL_GPL(ip6_append_data);
-static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
+static void ip6_cork_release(struct inet_cork_full *cork,
+ struct inet6_cork *v6_cork)
{
- if (np->cork.opt) {
- kfree(np->cork.opt->dst0opt);
- kfree(np->cork.opt->dst1opt);
- kfree(np->cork.opt->hopopt);
- kfree(np->cork.opt->srcrt);
- kfree(np->cork.opt);
- np->cork.opt = NULL;
+ if (v6_cork->opt) {
+ kfree(v6_cork->opt->dst0opt);
+ kfree(v6_cork->opt->dst1opt);
+ kfree(v6_cork->opt->hopopt);
+ kfree(v6_cork->opt->srcrt);
+ kfree(v6_cork->opt);
+ v6_cork->opt = NULL;
}
- if (inet->cork.base.dst) {
- dst_release(inet->cork.base.dst);
- inet->cork.base.dst = NULL;
- inet->cork.base.flags &= ~IPCORK_ALLFRAG;
+ if (cork->base.dst) {
+ dst_release(cork->base.dst);
+ cork->base.dst = NULL;
+ cork->base.flags &= ~IPCORK_ALLFRAG;
}
- memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
+ memset(&cork->fl, 0, sizeof(cork->fl));
}
-int ip6_push_pending_frames(struct sock *sk)
+struct sk_buff *__ip6_make_skb(struct sock *sk,
+ struct sk_buff_head *queue,
+ struct inet_cork_full *cork,
+ struct inet6_cork *v6_cork)
{
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
- struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
struct ipv6hdr *hdr;
- struct ipv6_txoptions *opt = np->cork.opt;
- struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
- struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
+ struct ipv6_txoptions *opt = v6_cork->opt;
+ struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
+ struct flowi6 *fl6 = &cork->fl.u.ip6;
unsigned char proto = fl6->flowi6_proto;
- int err = 0;
- skb = __skb_dequeue(&sk->sk_write_queue);
+ skb = __skb_dequeue(queue);
if (skb == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
@@ -1545,7 +1583,7 @@ int ip6_push_pending_frames(struct sock *sk)
/* move skb->data to ip header from ext header */
if (skb->data < skb_network_header(skb))
__skb_pull(skb, skb_network_offset(skb));
- while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
+ while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
__skb_pull(tmp_skb, skb_network_header_len(skb));
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
@@ -1570,10 +1608,10 @@ int ip6_push_pending_frames(struct sock *sk)
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
- ip6_flow_hdr(hdr, np->cork.tclass,
+ ip6_flow_hdr(hdr, v6_cork->tclass,
ip6_make_flowlabel(net, skb, fl6->flowlabel,
np->autoflowlabel));
- hdr->hop_limit = np->cork.hop_limit;
+ hdr->hop_limit = v6_cork->hop_limit;
hdr->nexthdr = proto;
hdr->saddr = fl6->saddr;
hdr->daddr = *final_dst;
@@ -1590,34 +1628,104 @@ int ip6_push_pending_frames(struct sock *sk)
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
+ ip6_cork_release(cork, v6_cork);
+out:
+ return skb;
+}
+
+int ip6_send_skb(struct sk_buff *skb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ int err;
+
err = ip6_local_out(skb);
if (err) {
if (err > 0)
err = net_xmit_errno(err);
if (err)
- goto error;
+ IP6_INC_STATS(net, rt->rt6i_idev,
+ IPSTATS_MIB_OUTDISCARDS);
}
-out:
- ip6_cork_release(inet, np);
return err;
-error:
- IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
- goto out;
+}
+
+int ip6_push_pending_frames(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ skb = ip6_finish_skb(sk);
+ if (!skb)
+ return 0;
+
+ return ip6_send_skb(skb);
}
EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
-void ip6_flush_pending_frames(struct sock *sk)
+static void __ip6_flush_pending_frames(struct sock *sk,
+ struct sk_buff_head *queue,
+ struct inet_cork_full *cork,
+ struct inet6_cork *v6_cork)
{
struct sk_buff *skb;
- while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
+ while ((skb = __skb_dequeue_tail(queue)) != NULL) {
if (skb_dst(skb))
IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
}
- ip6_cork_release(inet_sk(sk), inet6_sk(sk));
+ ip6_cork_release(cork, v6_cork);
+}
+
+void ip6_flush_pending_frames(struct sock *sk)
+{
+ __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
+ &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
}
EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
+
+struct sk_buff *ip6_make_skb(struct sock *sk,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ int hlimit, int tclass,
+ struct ipv6_txoptions *opt, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags,
+ int dontfrag)
+{
+ struct inet_cork_full cork;
+ struct inet6_cork v6_cork;
+ struct sk_buff_head queue;
+ int exthdrlen = (opt ? opt->opt_flen : 0);
+ int err;
+
+ if (flags & MSG_PROBE)
+ return NULL;
+
+ __skb_queue_head_init(&queue);
+
+ cork.base.flags = 0;
+ cork.base.addr = 0;
+ cork.base.opt = NULL;
+ v6_cork.opt = NULL;
+ err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
+ if (err)
+ return ERR_PTR(err);
+
+ if (dontfrag < 0)
+ dontfrag = inet6_sk(sk)->dontfrag;
+
+ err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
+ &current->task_frag, getfrag, from,
+ length + exthdrlen, transhdrlen + exthdrlen,
+ flags, dontfrag);
+ if (err) {
+ __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
+ return ERR_PTR(err);
+ }
+
+ return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
+}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 92b3da571980..266a264ec212 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1760,6 +1760,14 @@ nla_put_failure:
return -EMSGSIZE;
}
+struct net *ip6_tnl_get_link_net(const struct net_device *dev)
+{
+ struct ip6_tnl *tunnel = netdev_priv(dev);
+
+ return tunnel->net;
+}
+EXPORT_SYMBOL(ip6_tnl_get_link_net);
+
static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
[IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
@@ -1783,6 +1791,7 @@ static struct rtnl_link_ops ip6_link_ops __read_mostly = {
.dellink = ip6_tnl_dellink,
.get_size = ip6_tnl_get_size,
.fill_info = ip6_tnl_fill_info,
+ .get_link_net = ip6_tnl_get_link_net,
};
static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index 8db6c98fe218..32d9b268e7d8 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -62,14 +62,14 @@ error:
}
EXPORT_SYMBOL_GPL(udp_sock_create6);
-int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
- struct sk_buff *skb, struct net_device *dev,
- struct in6_addr *saddr, struct in6_addr *daddr,
- __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port)
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+ struct net_device *dev, struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be16 src_port,
+ __be16 dst_port, bool nocheck)
{
struct udphdr *uh;
struct ipv6hdr *ip6h;
- struct sock *sk = sock->sk;
__skb_push(skb, sizeof(*uh));
skb_reset_transport_header(skb);
@@ -85,7 +85,7 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
| IPSKB_REROUTED);
skb_dst_set(skb, dst);
- udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len);
+ udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
__skb_push(skb, sizeof(*ip6h));
skb_reset_network_header(skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index ace10d0b3aac..5fb9e212eca8 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -1016,6 +1016,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
.changelink = vti6_changelink,
.get_size = vti6_get_size,
.fill_info = vti6_fill_info,
+ .get_link_net = ip6_tnl_get_link_net,
};
static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 722669754bbf..34b682617f50 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2388,7 +2388,8 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
if (err < 0 && err != -ENOENT)
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 66980d8d98d1..8d766d9100cb 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -996,13 +996,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
lock_sock(sk);
skb = np->pktoptions;
if (skb)
- atomic_inc(&skb->users);
- release_sock(sk);
-
- if (skb) {
ip6_datagram_recv_ctl(sk, &msg, skb);
- kfree_skb(skb);
- } else {
+ release_sock(sk);
+ if (!skb) {
if (np->rxopt.bits.rxinfo) {
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 682866777d53..471ed24aabae 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -655,7 +655,9 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
struct in6_addr *target = (struct in6_addr *)&neigh->primary_key;
int probes = atomic_read(&neigh->probes);
- if (skb && ipv6_chk_addr(dev_net(dev), &ipv6_hdr(skb)->saddr, dev, 1))
+ if (skb && ipv6_chk_addr_and_flags(dev_net(dev), &ipv6_hdr(skb)->saddr,
+ dev, 1,
+ IFA_F_TENTATIVE|IFA_F_OPTIMISTIC))
saddr = &ipv6_hdr(skb)->saddr;
probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES);
if (probes < 0) {
@@ -1348,7 +1350,7 @@ skip_routeinfo:
}
}
- if (ndopts.nd_opts_mtu) {
+ if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) {
__be32 n;
u32 mtu;
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index 2433a6bfb191..11820b6b3613 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- range.max_proto.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ range.min_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ range.max_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 97f41a3e68d9..74581f706c4d 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -9,6 +9,25 @@
#include <net/addrconf.h>
#include <net/secure_seq.h>
+static u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
+ struct in6_addr *src)
+{
+ u32 hash, id;
+
+ hash = __ipv6_addr_jhash(dst, hashrnd);
+ hash = __ipv6_addr_jhash(src, hash);
+
+ /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
+ * set the hight order instead thus minimizing possible future
+ * collisions.
+ */
+ id = ip_idents_reserve(hash, 1);
+ if (unlikely(!id))
+ id = 1 << 31;
+
+ return id;
+}
+
/* This function exists only for tap drivers that must support broken
* clients requesting UFO without specifying an IPv6 fragment ID.
*
@@ -22,7 +41,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
static u32 ip6_proxy_idents_hashrnd __read_mostly;
struct in6_addr buf[2];
struct in6_addr *addrs;
- u32 hash, id;
+ u32 id;
addrs = skb_header_pointer(skb,
skb_network_offset(skb) +
@@ -34,14 +53,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
net_get_random_once(&ip6_proxy_idents_hashrnd,
sizeof(ip6_proxy_idents_hashrnd));
- hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
- hash = __ipv6_addr_jhash(&addrs[0], hash);
-
- id = ip_idents_reserve(hash, 1);
+ id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
+ &addrs[1], &addrs[0]);
skb_shinfo(skb)->ip6_frag_id = htonl(id);
}
EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+{
+ static u32 ip6_idents_hashrnd __read_mostly;
+ u32 id;
+
+ net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
+
+ id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
+ &rt->rt6i_src.addr);
+ fhdr->identification = htonl(id);
+}
+EXPORT_SYMBOL(ipv6_select_ident);
+
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 2d3148378a1f..bd46f736f61d 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -163,8 +163,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
pfh.icmph.checksum = 0;
pfh.icmph.un.echo.id = inet->inet_sport;
pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
- /* XXX: stripping const */
- pfh.iov = (struct iovec *)msg->msg_iter.iov;
+ pfh.msg = msg;
pfh.wcheck = 0;
pfh.family = AF_INET6;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ee25631f8c29..dae7f1a1e464 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -609,7 +609,7 @@ out:
return err;
}
-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
struct flowi6 *fl6, struct dst_entry **dstp,
unsigned int flags)
{
@@ -648,7 +648,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
- err = memcpy_fromiovecend((void *)iph, from, 0, length);
+ err = memcpy_from_msg(iph, msg, length);
if (err)
goto error_fault;
@@ -886,8 +886,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
back_from_confirm:
if (inet->hdrincl)
- /* XXX: stripping const */
- err = rawv6_send_hdrinc(sk, (struct iovec *)msg->msg_iter.iov, len, &fl6, &dst, msg->msg_flags);
+ err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
else {
lock_sock(sk);
err = ip6_append_data(sk, raw6_getfrag, &rfv,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c91083156edb..98565ce0ebcd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -499,7 +499,7 @@ static void rt6_probe_deferred(struct work_struct *w)
addrconf_addr_solict_mult(&work->target, &mcaddr);
ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
dev_put(work->dev);
- kfree(w);
+ kfree(work);
}
static void rt6_probe(struct rt6_info *rt)
@@ -853,14 +853,14 @@ EXPORT_SYMBOL(rt6_lookup);
*/
static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
- struct nlattr *mx, int mx_len)
+ struct mx6_config *mxc)
{
int err;
struct fib6_table *table;
table = rt->rt6i_table;
write_lock_bh(&table->tb6_lock);
- err = fib6_add(&table->tb6_root, rt, info, mx, mx_len);
+ err = fib6_add(&table->tb6_root, rt, info, mxc);
write_unlock_bh(&table->tb6_lock);
return err;
@@ -868,10 +868,10 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
int ip6_ins_rt(struct rt6_info *rt)
{
- struct nl_info info = {
- .nl_net = dev_net(rt->dst.dev),
- };
- return __ip6_ins_rt(rt, &info, NULL, 0);
+ struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
+ struct mx6_config mxc = { .mx = NULL, };
+
+ return __ip6_ins_rt(rt, &info, &mxc);
}
static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
@@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct net *net = dev_net(dst->dev);
rt6->rt6i_flags |= RTF_MODIFIED;
- if (mtu < IPV6_MIN_MTU) {
- u32 features = dst_metric(dst, RTAX_FEATURES);
+ if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- features |= RTAX_FEATURE_ALLFRAG;
- dst_metric_set(dst, RTAX_FEATURES, features);
- }
+
dst_metric_set(dst, RTAX_MTU, mtu);
rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
}
@@ -1245,12 +1242,16 @@ restart:
rt = net->ipv6.ip6_null_entry;
else if (rt->dst.error) {
rt = net->ipv6.ip6_null_entry;
- } else if (rt == net->ipv6.ip6_null_entry) {
+ goto out;
+ }
+
+ if (rt == net->ipv6.ip6_null_entry) {
fn = fib6_backtrack(fn, &fl6->saddr);
if (fn)
goto restart;
}
+out:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
@@ -1470,9 +1471,51 @@ out:
return entries > rt_max_size;
}
-/*
- *
- */
+static int ip6_convert_metrics(struct mx6_config *mxc,
+ const struct fib6_config *cfg)
+{
+ struct nlattr *nla;
+ int remaining;
+ u32 *mp;
+
+ if (cfg->fc_mx == NULL)
+ return 0;
+
+ mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ if (unlikely(!mp))
+ return -ENOMEM;
+
+ nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
+ int type = nla_type(nla);
+
+ if (type) {
+ u32 val;
+
+ if (unlikely(type > RTAX_MAX))
+ goto err;
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+
+ nla_strlcpy(tmp, nla, sizeof(tmp));
+ val = tcp_ca_get_key_by_name(tmp);
+ if (val == TCP_CA_UNSPEC)
+ goto err;
+ } else {
+ val = nla_get_u32(nla);
+ }
+
+ mp[type - 1] = val;
+ __set_bit(type - 1, mxc->mx_valid);
+ }
+ }
+
+ mxc->mx = mp;
+
+ return 0;
+ err:
+ kfree(mp);
+ return -EINVAL;
+}
int ip6_route_add(struct fib6_config *cfg)
{
@@ -1482,6 +1525,7 @@ int ip6_route_add(struct fib6_config *cfg)
struct net_device *dev = NULL;
struct inet6_dev *idev = NULL;
struct fib6_table *table;
+ struct mx6_config mxc = { .mx = NULL, };
int addr_type;
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
@@ -1677,8 +1721,14 @@ install_route:
cfg->fc_nlinfo.nl_net = dev_net(dev);
- return __ip6_ins_rt(rt, &cfg->fc_nlinfo, cfg->fc_mx, cfg->fc_mx_len);
+ err = ip6_convert_metrics(&mxc, cfg);
+ if (err)
+ goto out;
+
+ err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
+ kfree(mxc.mx);
+ return err;
out:
if (dev)
dev_put(dev);
@@ -2534,7 +2584,8 @@ static inline size_t rt6_nlmsg_size(void)
+ nla_total_size(4) /* RTA_OIF */
+ nla_total_size(4) /* RTA_PRIORITY */
+ RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
- + nla_total_size(sizeof(struct rta_cacheinfo));
+ + nla_total_size(sizeof(struct rta_cacheinfo))
+ + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
}
static int rt6_fill_node(struct net *net,
@@ -2675,7 +2726,8 @@ static int rt6_fill_node(struct net *net,
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 213546bd6d5d..e4cbd5798eba 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[],
if (data[IFLA_IPTUN_ENCAP_SPORT]) {
ret = true;
- ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]);
+ ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
}
if (data[IFLA_IPTUN_ENCAP_DPORT]) {
ret = true;
- ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]);
+ ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
}
return ret;
@@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
tunnel->encap.type) ||
- nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT,
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
tunnel->encap.sport) ||
- nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
tunnel->encap.dport) ||
nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
tunnel->encap.flags))
@@ -1763,6 +1763,7 @@ static struct rtnl_link_ops sit_link_ops __read_mostly = {
.get_size = ipip6_get_size,
.fill_info = ipip6_fill_info,
.dellink = ipip6_dellink,
+ .get_link_net = ip_tunnel_get_link_net,
};
static struct xfrm_tunnel sit_handler __read_mostly = {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9c0b54e87b47..5d46832c6f72 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1199,6 +1199,8 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
+ tcp_ca_openreq_child(newsk, dst);
+
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst);
if (tcp_sk(sk)->rx_opt.user_mss &&
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 189dc4ae3eca..d048d46779fc 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -909,7 +909,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
goto csum_error;
}
- if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
+ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
ip6_compute_pseudo);
@@ -990,9 +990,10 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
{
unsigned int offset;
struct udphdr *uh = udp_hdr(skb);
+ struct sk_buff *frags = skb_shinfo(skb)->frag_list;
__wsum csum = 0;
- if (skb_queue_len(&sk->sk_write_queue) == 1) {
+ if (!frags) {
/* Only one fragment on the socket. */
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
@@ -1008,9 +1009,9 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
skb->ip_summed = CHECKSUM_NONE;
- skb_queue_walk(&sk->sk_write_queue, skb) {
- csum = csum_add(csum, skb->csum);
- }
+ do {
+ csum = csum_add(csum, frags->csum);
+ } while ((frags = frags->next));
uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
csum);
@@ -1023,26 +1024,15 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
* Sending
*/
-static int udp_v6_push_pending_frames(struct sock *sk)
+static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6)
{
- struct sk_buff *skb;
+ struct sock *sk = skb->sk;
struct udphdr *uh;
- struct udp_sock *up = udp_sk(sk);
- struct inet_sock *inet = inet_sk(sk);
- struct flowi6 *fl6;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
-
- if (up->pending == AF_INET)
- return udp_push_pending_frames(sk);
-
- fl6 = &inet->cork.fl.u.ip6;
-
- /* Grab the skbuff where UDP header space exists. */
- skb = skb_peek(&sk->sk_write_queue);
- if (skb == NULL)
- goto out;
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
/*
* Create a UDP header
@@ -1050,29 +1040,28 @@ static int udp_v6_push_pending_frames(struct sock *sk)
uh = udp_hdr(skb);
uh->source = fl6->fl6_sport;
uh->dest = fl6->fl6_dport;
- uh->len = htons(up->len);
+ uh->len = htons(len);
uh->check = 0;
if (is_udplite)
- csum = udplite_csum_outgoing(sk, skb);
- else if (up->no_check6_tx) { /* UDP csum disabled */
+ csum = udplite_csum(skb);
+ else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
- udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
- up->len);
+ udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
goto send;
} else
- csum = udp_csum_outgoing(sk, skb);
+ csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
- up->len, fl6->flowi6_proto, csum);
+ len, fl6->flowi6_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
- err = ip6_push_pending_frames(sk);
+ err = ip6_send_skb(skb);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk),
@@ -1082,6 +1071,30 @@ send:
} else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
+ return err;
+}
+
+static int udp_v6_push_pending_frames(struct sock *sk)
+{
+ struct sk_buff *skb;
+ struct udp_sock *up = udp_sk(sk);
+ struct flowi6 fl6;
+ int err = 0;
+
+ if (up->pending == AF_INET)
+ return udp_push_pending_frames(sk);
+
+ /* ip6_finish_skb will release the cork, so make a copy of
+ * fl6 here.
+ */
+ fl6 = inet_sk(sk)->cork.fl.u.ip6;
+
+ skb = ip6_finish_skb(sk);
+ if (!skb)
+ goto out;
+
+ err = udp_v6_send_skb(skb, &fl6);
+
out:
up->len = 0;
up->pending = 0;
@@ -1164,6 +1177,7 @@ do_udp_sendmsg:
if (len > INT_MAX - sizeof(struct udphdr))
return -EMSGSIZE;
+ getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
if (up->pending) {
/*
* There are pending frames.
@@ -1294,6 +1308,20 @@ do_udp_sendmsg:
goto do_confirm;
back_from_confirm:
+ /* Lockless fast path for the non-corking case */
+ if (!corkreq) {
+ struct sk_buff *skb;
+
+ skb = ip6_make_skb(sk, getfrag, msg, ulen,
+ sizeof(struct udphdr), hlimit, tclass, opt,
+ &fl6, (struct rt6_info *)dst,
+ msg->msg_flags, dontfrag);
+ err = PTR_ERR(skb);
+ if (!IS_ERR_OR_NULL(skb))
+ err = udp_v6_send_skb(skb, &fl6);
+ goto release_dst;
+ }
+
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
@@ -1311,7 +1339,6 @@ do_append_data:
if (dontfrag < 0)
dontfrag = np->dontfrag;
up->len += ulen;
- getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
err = ip6_append_data(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
(struct rt6_info *)dst,
@@ -1323,6 +1350,11 @@ do_append_data:
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
+ if (err > 0)
+ err = np->recverr ? net_xmit_errno(err) : 0;
+ release_sock(sk);
+
+release_dst:
if (dst) {
if (connected) {
ip6_dst_store(sk, dst,
@@ -1339,9 +1371,6 @@ do_append_data:
dst = NULL;
}
- if (err > 0)
- err = np->recverr ? net_xmit_errno(err) : 0;
- release_sock(sk);
out:
dst_release(dst);
fl6_sock_release(flowlabel);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b6aa8ed18257..a56276996b72 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
+ /* Set the IPv6 fragment id if not set yet */
+ if (!skb_shinfo(skb)->ip6_frag_id)
+ ipv6_proxy_select_ident(skb);
+
segs = NULL;
goto out;
}
@@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
- fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+ if (skb_shinfo(skb)->ip6_frag_id)
+ fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+ else
+ ipv6_select_ident(fptr,
+ (struct rt6_info *)skb_dst(skb));
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 5f983644373a..48bf5a06847b 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
{
struct flowi6 *fl6 = &fl->u.ip6;
int onlyproto = 0;
- u16 offset = skb_network_header_len(skb);
const struct ipv6hdr *hdr = ipv6_hdr(skb);
+ u16 offset = sizeof(*hdr);
struct ipv6_opt_hdr *exthdr;
const unsigned char *nh = skb_network_header(skb);
- u8 nexthdr = nh[IP6CB(skb)->nhoff];
+ u16 nhoff = IP6CB(skb)->nhoff;
int oif = 0;
+ u8 nexthdr;
+
+ if (!nhoff)
+ nhoff = offsetof(struct ipv6hdr, nexthdr);
+
+ nexthdr = nh[nhoff];
if (skb_dst(skb))
oif = skb_dst(skb)->dev->ifindex;
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 7f2cafddfb6e..1cde711bcab5 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -533,7 +533,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
info.discovery = discovery;
/* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
- self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
+ self->slot_timeout = msecs_to_jiffies(sysctl_slot_timeout);
irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
}
@@ -1015,13 +1015,15 @@ void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
* Or, this is how much we can keep the pf bit in primary mode.
* Therefore, it must be lower or equal than our *OWN* max turn around.
* Jean II */
- self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
+ self->poll_timeout = msecs_to_jiffies(
+ self->qos_tx.max_turn_time.value);
/* The Final timeout applies only to the primary station.
* It defines the maximum time the primary wait (mostly in RECV mode)
* for an answer from the secondary station before polling it again.
* Therefore, it must be greater or equal than our *PARTNER*
* max turn around time - Jean II */
- self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
+ self->final_timeout = msecs_to_jiffies(
+ self->qos_rx.max_turn_time.value);
/* The Watchdog Bit timeout applies only to the secondary station.
* It defines the maximum time the secondary wait (mostly in RECV mode)
* for poll from the primary station before getting annoyed.
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 0ac907adb2f4..b4e923f77954 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -40,6 +40,18 @@ static struct genl_family l2tp_nl_family = {
.netnsok = true,
};
+static const struct genl_multicast_group l2tp_multicast_group[] = {
+ {
+ .name = L2TP_GENL_MCGROUP,
+ },
+};
+
+static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq,
+ int flags, struct l2tp_tunnel *tunnel, u8 cmd);
+static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
+ int flags, struct l2tp_session *session,
+ u8 cmd);
+
/* Accessed under genl lock */
static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
@@ -97,6 +109,52 @@ out:
return ret;
}
+static int l2tp_tunnel_notify(struct genl_family *family,
+ struct genl_info *info,
+ struct l2tp_tunnel *tunnel,
+ u8 cmd)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, tunnel, cmd);
+
+ if (ret >= 0)
+ return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+
+ nlmsg_free(msg);
+
+ return ret;
+}
+
+static int l2tp_session_notify(struct genl_family *family,
+ struct genl_info *info,
+ struct l2tp_session *session,
+ u8 cmd)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, session, cmd);
+
+ if (ret >= 0)
+ return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+
+ nlmsg_free(msg);
+
+ return ret;
+}
+
static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
{
u32 tunnel_id;
@@ -188,6 +246,9 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
break;
}
+ if (ret >= 0)
+ ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
+ tunnel, L2TP_CMD_TUNNEL_CREATE);
out:
return ret;
}
@@ -211,6 +272,9 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
goto out;
}
+ l2tp_tunnel_notify(&l2tp_nl_family, info,
+ tunnel, L2TP_CMD_TUNNEL_DELETE);
+
(void) l2tp_tunnel_delete(tunnel);
out:
@@ -239,12 +303,15 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
if (info->attrs[L2TP_ATTR_DEBUG])
tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+ ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
+ tunnel, L2TP_CMD_TUNNEL_MODIFY);
+
out:
return ret;
}
static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
- struct l2tp_tunnel *tunnel)
+ struct l2tp_tunnel *tunnel, u8 cmd)
{
void *hdr;
struct nlattr *nest;
@@ -254,8 +321,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
struct ipv6_pinfo *np = NULL;
#endif
- hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
- L2TP_CMD_TUNNEL_GET);
+ hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
if (!hdr)
return -EMSGSIZE;
@@ -324,7 +390,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
}
out:
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
@@ -359,7 +426,7 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
}
ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
- NLM_F_ACK, tunnel);
+ NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
if (ret < 0)
goto err_out;
@@ -385,7 +452,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- tunnel) <= 0)
+ tunnel, L2TP_CMD_TUNNEL_GET) < 0)
goto out;
ti++;
@@ -539,6 +606,13 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
session_id, peer_session_id, &cfg);
+ if (ret >= 0) {
+ session = l2tp_session_find(net, tunnel, session_id);
+ if (session)
+ ret = l2tp_session_notify(&l2tp_nl_family, info, session,
+ L2TP_CMD_SESSION_CREATE);
+ }
+
out:
return ret;
}
@@ -555,6 +629,9 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
goto out;
}
+ l2tp_session_notify(&l2tp_nl_family, info,
+ session, L2TP_CMD_SESSION_DELETE);
+
pw_type = session->pwtype;
if (pw_type < __L2TP_PWTYPE_MAX)
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
@@ -601,12 +678,15 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
if (info->attrs[L2TP_ATTR_MRU])
session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
+ ret = l2tp_session_notify(&l2tp_nl_family, info,
+ session, L2TP_CMD_SESSION_MODIFY);
+
out:
return ret;
}
static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
- struct l2tp_session *session)
+ struct l2tp_session *session, u8 cmd)
{
void *hdr;
struct nlattr *nest;
@@ -615,7 +695,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
sk = tunnel->sock;
- hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
+ hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
if (!hdr)
return -EMSGSIZE;
@@ -673,7 +753,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
goto nla_put_failure;
nla_nest_end(skb, nest);
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
@@ -699,7 +780,7 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
}
ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
- 0, session);
+ 0, session, L2TP_CMD_SESSION_GET);
if (ret < 0)
goto err_out;
@@ -737,7 +818,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- session) <= 0)
+ session, L2TP_CMD_SESSION_GET) < 0)
break;
si++;
@@ -896,7 +977,9 @@ EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
static int l2tp_nl_init(void)
{
pr_info("L2TP netlink interface\n");
- return genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops);
+ return genl_register_family_with_ops_groups(&l2tp_nl_family,
+ l2tp_nl_ops,
+ l2tp_multicast_group);
}
static void l2tp_nl_cleanup(void)
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index 612a5ddaf93b..799bafc2af39 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
{
.procname = "ack",
.data = &sysctl_llc2_ack_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_ack_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "busy",
.data = &sysctl_llc2_busy_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_busy_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "p",
.data = &sysctl_llc2_p_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_p_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "rej",
.data = &sysctl_llc2_rej_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_rej_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 75cc6801a431..64a012a0c6e5 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -5,6 +5,7 @@ config MAC80211
select CRYPTO_ARC4
select CRYPTO_AES
select CRYPTO_CCM
+ select CRYPTO_GCM
select CRC32
select AVERAGE
---help---
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index e53671b1105e..3275f01881be 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -15,7 +15,9 @@ mac80211-y := \
michael.o \
tkip.o \
aes_ccm.o \
+ aes_gcm.o \
aes_cmac.o \
+ aes_gmac.o \
cfg.o \
ethtool.o \
rx.o \
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 09d9caaec591..7869bb40acaa 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -20,7 +20,8 @@
#include "aes_ccm.h"
void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic)
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len)
{
struct scatterlist assoc, pt, ct[2];
@@ -35,7 +36,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
sg_init_table(ct, 2);
sg_set_buf(&ct[0], data, data_len);
- sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
+ sg_set_buf(&ct[1], mic, mic_len);
aead_request_set_tfm(aead_req, tfm);
aead_request_set_assoc(aead_req, &assoc, assoc.length);
@@ -45,7 +46,8 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
}
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic)
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len)
{
struct scatterlist assoc, pt, ct[2];
char aead_req_data[sizeof(struct aead_request) +
@@ -62,17 +64,18 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
sg_init_table(ct, 2);
sg_set_buf(&ct[0], data, data_len);
- sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
+ sg_set_buf(&ct[1], mic, mic_len);
aead_request_set_tfm(aead_req, tfm);
aead_request_set_assoc(aead_req, &assoc, assoc.length);
- aead_request_set_crypt(aead_req, ct, &pt,
- data_len + IEEE80211_CCMP_MIC_LEN, b_0);
+ aead_request_set_crypt(aead_req, ct, &pt, data_len + mic_len, b_0);
return crypto_aead_decrypt(aead_req);
}
-struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
+struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
+ size_t key_len,
+ size_t mic_len)
{
struct crypto_aead *tfm;
int err;
@@ -81,9 +84,9 @@ struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
if (IS_ERR(tfm))
return tfm;
- err = crypto_aead_setkey(tfm, key, WLAN_KEY_LEN_CCMP);
+ err = crypto_aead_setkey(tfm, key, key_len);
if (!err)
- err = crypto_aead_setauthsize(tfm, IEEE80211_CCMP_MIC_LEN);
+ err = crypto_aead_setauthsize(tfm, mic_len);
if (!err)
return tfm;
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 2c7ab1948a2e..6a73d1e4d186 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,11 +12,15 @@
#include <linux/crypto.h>
-struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[]);
+struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
+ size_t key_len,
+ size_t mic_len);
void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic);
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len);
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic);
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len);
void ieee80211_aes_key_free(struct crypto_aead *tfm);
#endif /* AES_CCM_H */
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 9b9009f99551..4192806be3d3 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -18,8 +18,8 @@
#include "key.h"
#include "aes_cmac.h"
-#define AES_CMAC_KEY_LEN 16
#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
+#define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */
#define AAD_LEN 20
@@ -35,9 +35,9 @@ static void gf_mulx(u8 *pad)
pad[AES_BLOCK_SIZE - 1] ^= 0x87;
}
-
-static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
- const u8 *addr[], const size_t *len, u8 *mac)
+static void aes_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
+ const u8 *addr[], const size_t *len, u8 *mac,
+ size_t mac_len)
{
u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
const u8 *pos, *end;
@@ -88,7 +88,7 @@ static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
for (i = 0; i < AES_BLOCK_SIZE; i++)
pad[i] ^= cbc[i];
crypto_cipher_encrypt_one(tfm, pad, pad);
- memcpy(mac, pad, CMAC_TLEN);
+ memcpy(mac, pad, mac_len);
}
@@ -107,17 +107,35 @@ void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
addr[2] = zero;
len[2] = CMAC_TLEN;
- aes_128_cmac_vector(tfm, 3, addr, len, mic);
+ aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN);
}
+void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad,
+ const u8 *data, size_t data_len, u8 *mic)
+{
+ const u8 *addr[3];
+ size_t len[3];
+ u8 zero[CMAC_TLEN_256];
+
+ memset(zero, 0, CMAC_TLEN_256);
+ addr[0] = aad;
+ len[0] = AAD_LEN;
+ addr[1] = data;
+ len[1] = data_len - CMAC_TLEN_256;
+ addr[2] = zero;
+ len[2] = CMAC_TLEN_256;
+
+ aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN_256);
+}
-struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[])
+struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[],
+ size_t key_len)
{
struct crypto_cipher *tfm;
tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (!IS_ERR(tfm))
- crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
+ crypto_cipher_setkey(tfm, key, key_len);
return tfm;
}
diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h
index 0ce6487af795..3702041f44fd 100644
--- a/net/mac80211/aes_cmac.h
+++ b/net/mac80211/aes_cmac.h
@@ -11,9 +11,12 @@
#include <linux/crypto.h>
-struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[]);
+struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[],
+ size_t key_len);
void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic);
+void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad,
+ const u8 *data, size_t data_len, u8 *mic);
void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
#endif /* AES_CMAC_H */
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
new file mode 100644
index 000000000000..c2bf6698d738
--- /dev/null
+++ b/net/mac80211/aes_gcm.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2014-2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/aes.h>
+
+#include <net/mac80211.h>
+#include "key.h"
+#include "aes_gcm.h"
+
+void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic)
+{
+ struct scatterlist assoc, pt, ct[2];
+
+ char aead_req_data[sizeof(struct aead_request) +
+ crypto_aead_reqsize(tfm)]
+ __aligned(__alignof__(struct aead_request));
+ struct aead_request *aead_req = (void *)aead_req_data;
+
+ memset(aead_req, 0, sizeof(aead_req_data));
+
+ sg_init_one(&pt, data, data_len);
+ sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_init_table(ct, 2);
+ sg_set_buf(&ct[0], data, data_len);
+ sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN);
+
+ aead_request_set_tfm(aead_req, tfm);
+ aead_request_set_assoc(aead_req, &assoc, assoc.length);
+ aead_request_set_crypt(aead_req, &pt, ct, data_len, j_0);
+
+ crypto_aead_encrypt(aead_req);
+}
+
+int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic)
+{
+ struct scatterlist assoc, pt, ct[2];
+ char aead_req_data[sizeof(struct aead_request) +
+ crypto_aead_reqsize(tfm)]
+ __aligned(__alignof__(struct aead_request));
+ struct aead_request *aead_req = (void *)aead_req_data;
+
+ if (data_len == 0)
+ return -EINVAL;
+
+ memset(aead_req, 0, sizeof(aead_req_data));
+
+ sg_init_one(&pt, data, data_len);
+ sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_init_table(ct, 2);
+ sg_set_buf(&ct[0], data, data_len);
+ sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN);
+
+ aead_request_set_tfm(aead_req, tfm);
+ aead_request_set_assoc(aead_req, &assoc, assoc.length);
+ aead_request_set_crypt(aead_req, ct, &pt,
+ data_len + IEEE80211_GCMP_MIC_LEN, j_0);
+
+ return crypto_aead_decrypt(aead_req);
+}
+
+struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
+ size_t key_len)
+{
+ struct crypto_aead *tfm;
+ int err;
+
+ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return tfm;
+
+ err = crypto_aead_setkey(tfm, key, key_len);
+ if (!err)
+ err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN);
+ if (!err)
+ return tfm;
+
+ crypto_free_aead(tfm);
+ return ERR_PTR(err);
+}
+
+void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm)
+{
+ crypto_free_aead(tfm);
+}
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
new file mode 100644
index 000000000000..1347fda6b76a
--- /dev/null
+++ b/net/mac80211/aes_gcm.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2014-2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef AES_GCM_H
+#define AES_GCM_H
+
+#include <linux/crypto.h>
+
+void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic);
+int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic);
+struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
+ size_t key_len);
+void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm);
+
+#endif /* AES_GCM_H */
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
new file mode 100644
index 000000000000..1c72edcb0083
--- /dev/null
+++ b/net/mac80211/aes_gmac.c
@@ -0,0 +1,84 @@
+/*
+ * AES-GMAC for IEEE 802.11 BIP-GMAC-128 and BIP-GMAC-256
+ * Copyright 2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/aes.h>
+
+#include <net/mac80211.h>
+#include "key.h"
+#include "aes_gmac.h"
+
+#define GMAC_MIC_LEN 16
+#define GMAC_NONCE_LEN 12
+#define AAD_LEN 20
+
+int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+ const u8 *data, size_t data_len, u8 *mic)
+{
+ struct scatterlist sg[3], ct[1];
+ char aead_req_data[sizeof(struct aead_request) +
+ crypto_aead_reqsize(tfm)]
+ __aligned(__alignof__(struct aead_request));
+ struct aead_request *aead_req = (void *)aead_req_data;
+ u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
+
+ if (data_len < GMAC_MIC_LEN)
+ return -EINVAL;
+
+ memset(aead_req, 0, sizeof(aead_req_data));
+
+ memset(zero, 0, GMAC_MIC_LEN);
+ sg_init_table(sg, 3);
+ sg_set_buf(&sg[0], aad, AAD_LEN);
+ sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
+ sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
+
+ memcpy(iv, nonce, GMAC_NONCE_LEN);
+ memset(iv + GMAC_NONCE_LEN, 0, sizeof(iv) - GMAC_NONCE_LEN);
+ iv[AES_BLOCK_SIZE - 1] = 0x01;
+
+ sg_init_table(ct, 1);
+ sg_set_buf(&ct[0], mic, GMAC_MIC_LEN);
+
+ aead_request_set_tfm(aead_req, tfm);
+ aead_request_set_assoc(aead_req, sg, AAD_LEN + data_len);
+ aead_request_set_crypt(aead_req, NULL, ct, 0, iv);
+
+ crypto_aead_encrypt(aead_req);
+
+ return 0;
+}
+
+struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+ size_t key_len)
+{
+ struct crypto_aead *tfm;
+ int err;
+
+ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return tfm;
+
+ err = crypto_aead_setkey(tfm, key, key_len);
+ if (!err)
+ return tfm;
+ if (!err)
+ err = crypto_aead_setauthsize(tfm, GMAC_MIC_LEN);
+
+ crypto_free_aead(tfm);
+ return ERR_PTR(err);
+}
+
+void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm)
+{
+ crypto_free_aead(tfm);
+}
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
new file mode 100644
index 000000000000..d328204d73a8
--- /dev/null
+++ b/net/mac80211/aes_gmac.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef AES_GMAC_H
+#define AES_GMAC_H
+
+#include <linux/crypto.h>
+
+struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+ size_t key_len);
+int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+ const u8 *data, size_t data_len, u8 *mic);
+void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm);
+
+#endif /* AES_GMAC_H */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index e75d5c53e97b..dd4ff36c557a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -162,8 +162,13 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
break;
default:
cs = ieee80211_cs_get(local, params->cipher, sdata->vif.type);
@@ -348,6 +353,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
params.seq_len = 6;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
pn64 = atomic64_read(&key->u.ccmp.tx_pn);
seq[0] = pn64;
seq[1] = pn64 >> 8;
@@ -359,6 +365,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
params.seq_len = 6;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
seq[0] = pn64;
seq[1] = pn64 >> 8;
@@ -369,6 +376,30 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
params.seq = seq;
params.seq_len = 6;
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+ seq[0] = pn64;
+ seq[1] = pn64 >> 8;
+ seq[2] = pn64 >> 16;
+ seq[3] = pn64 >> 24;
+ seq[4] = pn64 >> 32;
+ seq[5] = pn64 >> 40;
+ params.seq = seq;
+ params.seq_len = 6;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ pn64 = atomic64_read(&key->u.gcmp.tx_pn);
+ seq[0] = pn64;
+ seq[1] = pn64 >> 8;
+ seq[2] = pn64 >> 16;
+ seq[3] = pn64 >> 24;
+ seq[4] = pn64 >> 32;
+ seq[5] = pn64 >> 40;
+ params.seq = seq;
+ params.seq_len = 6;
+ break;
}
params.key = key->conf.key;
@@ -428,11 +459,13 @@ void sta_set_rate_info_tx(struct sta_info *sta,
rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
- if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
- rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
+ rinfo->bw = RATE_INFO_BW_40;
+ else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ rinfo->bw = RATE_INFO_BW_80;
+ else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ rinfo->bw = RATE_INFO_BW_160;
+ else
+ rinfo->bw = RATE_INFO_BW_20;
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
@@ -459,16 +492,21 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
- if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
- rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
- if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ)
- rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
- if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80P80MHZ)
- rinfo->flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
- if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ)
- rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
+
+ if (sta->last_rx_rate_flag & RX_FLAG_5MHZ)
+ rinfo->bw = RATE_INFO_BW_5;
+ else if (sta->last_rx_rate_flag & RX_FLAG_10MHZ)
+ rinfo->bw = RATE_INFO_BW_10;
+ else if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
+ rinfo->bw = RATE_INFO_BW_40;
+ else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ)
+ rinfo->bw = RATE_INFO_BW_80;
+ else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ)
+ rinfo->bw = RATE_INFO_BW_160;
+ else
+ rinfo->bw = RATE_INFO_BW_20;
}
static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
@@ -678,7 +716,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_BEACON |
BSS_CHANGED_SSID |
- BSS_CHANGED_P2P_PS;
+ BSS_CHANGED_P2P_PS |
+ BSS_CHANGED_TXPOWER;
int err;
old = sdata_dereference(sdata->u.ap.beacon, sdata);
@@ -2102,6 +2141,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
+ enum nl80211_tx_power_setting txp_type = type;
+ bool update_txp_type = false;
if (wdev) {
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
@@ -2109,6 +2150,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+ txp_type = NL80211_TX_POWER_LIMITED;
break;
case NL80211_TX_POWER_LIMITED:
case NL80211_TX_POWER_FIXED:
@@ -2118,7 +2160,12 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
break;
}
- ieee80211_recalc_txpower(sdata);
+ if (txp_type != sdata->vif.bss_conf.txpower_type) {
+ update_txp_type = true;
+ sdata->vif.bss_conf.txpower_type = txp_type;
+ }
+
+ ieee80211_recalc_txpower(sdata, update_txp_type);
return 0;
}
@@ -2126,6 +2173,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+ txp_type = NL80211_TX_POWER_LIMITED;
break;
case NL80211_TX_POWER_LIMITED:
case NL80211_TX_POWER_FIXED:
@@ -2136,10 +2184,14 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
}
mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list)
+ list_for_each_entry(sdata, &local->interfaces, list) {
sdata->user_power_level = local->user_power_level;
+ if (txp_type != sdata->vif.bss_conf.txpower_type)
+ update_txp_type = true;
+ sdata->vif.bss_conf.txpower_type = txp_type;
+ }
list_for_each_entry(sdata, &local->interfaces, list)
- ieee80211_recalc_txpower(sdata);
+ ieee80211_recalc_txpower(sdata, update_txp_type);
mutex_unlock(&local->iflist_mtx);
return 0;
@@ -2556,7 +2608,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
/* if there's one pending or we're scanning, queue this one */
if (!list_empty(&local->roc_list) ||
- local->scanning || local->radar_detect_enabled)
+ local->scanning || ieee80211_is_radar_required(local))
goto out_check_combine;
/* if not HW assist, just queue & schedule work */
@@ -3664,7 +3716,7 @@ static int ieee80211_del_tx_ts(struct wiphy *wiphy, struct net_device *dev,
* queues.
*/
synchronize_net();
- ieee80211_flush_queues(local, sdata);
+ ieee80211_flush_queues(local, sdata, false);
/* restore the normal QoS parameters
* (unconditionally to avoid races)
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index da1c12c34487..ff0d2db09df9 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -388,7 +388,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
return NULL;
}
-static bool ieee80211_is_radar_required(struct ieee80211_local *local)
+bool ieee80211_is_radar_required(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
@@ -406,6 +406,34 @@ static bool ieee80211_is_radar_required(struct ieee80211_local *local)
return false;
}
+static bool
+ieee80211_chanctx_radar_required(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
+{
+ struct ieee80211_chanctx_conf *conf = &ctx->conf;
+ struct ieee80211_sub_if_data *sdata;
+ bool required = false;
+
+ lockdep_assert_held(&local->chanctx_mtx);
+ lockdep_assert_held(&local->mtx);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+ if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
+ continue;
+ if (!sdata->radar_required)
+ continue;
+
+ required = true;
+ break;
+ }
+ rcu_read_unlock();
+
+ return required;
+}
+
static struct ieee80211_chanctx *
ieee80211_alloc_chanctx(struct ieee80211_local *local,
const struct cfg80211_chan_def *chandef,
@@ -425,7 +453,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
ctx->conf.rx_chains_static = 1;
ctx->conf.rx_chains_dynamic = 1;
ctx->mode = mode;
- ctx->conf.radar_enabled = ieee80211_is_radar_required(local);
+ ctx->conf.radar_enabled = false;
ieee80211_recalc_chanctx_min_def(local, ctx);
return ctx;
@@ -567,16 +595,15 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
bool radar_enabled;
lockdep_assert_held(&local->chanctx_mtx);
- /* for setting local->radar_detect_enabled */
+ /* for ieee80211_is_radar_required */
lockdep_assert_held(&local->mtx);
- radar_enabled = ieee80211_is_radar_required(local);
+ radar_enabled = ieee80211_chanctx_radar_required(local, chanctx);
if (radar_enabled == chanctx->conf.radar_enabled)
return;
chanctx->conf.radar_enabled = radar_enabled;
- local->radar_detect_enabled = chanctx->conf.radar_enabled;
if (!local->use_chanctx) {
local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
@@ -628,7 +655,7 @@ out:
}
if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
- ieee80211_recalc_txpower(sdata);
+ ieee80211_recalc_txpower(sdata, false);
ieee80211_recalc_chanctx_min_def(local, new_ctx);
}
@@ -1360,7 +1387,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
ieee80211_bss_info_change_notify(sdata,
changed);
- ieee80211_recalc_txpower(sdata);
+ ieee80211_recalc_txpower(sdata, false);
}
ieee80211_recalc_chanctx_chantype(local, ctx);
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 54a189f0393e..eeb0bbd69d98 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -303,8 +303,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
sf += scnprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
- sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
sf += scnprintf(buf + sf, mxln - sf,
"REPORTS_TX_ACK_STATUS\n");
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 5523b94c7c90..71ac1b5f4da5 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -94,17 +94,33 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
key->u.tkip.tx.iv16);
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
pn = atomic64_read(&key->u.ccmp.tx_pn);
len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
pn = atomic64_read(&key->u.aes_cmac.tx_pn);
len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ pn = atomic64_read(&key->u.aes_gmac.tx_pn);
+ len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
+ (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+ (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ pn = atomic64_read(&key->u.gcmp.tx_pn);
+ len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
+ (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+ (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
+ break;
default:
return 0;
}
@@ -134,6 +150,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
len = p - buf;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
rpn = key->u.ccmp.rx_pn[i];
p += scnprintf(p, sizeof(buf)+buf-p,
@@ -144,6 +161,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
len = p - buf;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
rpn = key->u.aes_cmac.rx_pn;
p += scnprintf(p, sizeof(buf)+buf-p,
"%02x%02x%02x%02x%02x%02x\n",
@@ -151,6 +169,26 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
rpn[3], rpn[4], rpn[5]);
len = p - buf;
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ rpn = key->u.aes_gmac.rx_pn;
+ p += scnprintf(p, sizeof(buf)+buf-p,
+ "%02x%02x%02x%02x%02x%02x\n",
+ rpn[0], rpn[1], rpn[2],
+ rpn[3], rpn[4], rpn[5]);
+ len = p - buf;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
+ rpn = key->u.gcmp.rx_pn[i];
+ p += scnprintf(p, sizeof(buf)+buf-p,
+ "%02x%02x%02x%02x%02x%02x\n",
+ rpn[0], rpn[1], rpn[2],
+ rpn[3], rpn[4], rpn[5]);
+ }
+ len = p - buf;
+ break;
default:
return 0;
}
@@ -167,12 +205,23 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf,
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
len = scnprintf(buf, sizeof(buf), "%u\n",
key->u.aes_cmac.replays);
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ len = scnprintf(buf, sizeof(buf), "%u\n",
+ key->u.aes_gmac.replays);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ len = scnprintf(buf, sizeof(buf), "%u\n", key->u.gcmp.replays);
+ break;
default:
return 0;
}
@@ -189,9 +238,15 @@ static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
len = scnprintf(buf, sizeof(buf), "%u\n",
key->u.aes_cmac.icverrors);
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ len = scnprintf(buf, sizeof(buf), "%u\n",
+ key->u.aes_gmac.icverrors);
+ break;
default:
return 0;
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 2ebc9ead9695..fdeda17b8dd2 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -639,6 +639,21 @@ static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline void drv_sta_statistics(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_sta_statistics(local, sdata, sta);
+ if (local->ops->sta_statistics)
+ local->ops->sta_statistics(&local->hw, &sdata->vif, sta, sinfo);
+ trace_drv_return_void(local);
+}
+
static inline int drv_conf_tx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata, u16 ac,
const struct ieee80211_tx_queue_params *params)
@@ -966,21 +981,6 @@ drv_allow_buffered_frames(struct ieee80211_local *local,
trace_drv_return_void(local);
}
-static inline int drv_get_rssi(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta,
- s8 *rssi_dbm)
-{
- int ret;
-
- might_sleep();
-
- ret = local->ops->get_rssi(&local->hw, &sdata->vif, sta, rssi_dbm);
- trace_drv_get_rssi(local, sta, *rssi_dbm, ret);
-
- return ret;
-}
-
static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index ebfc8091557b..52bcea6ad9e8 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -117,16 +117,16 @@ static void ieee80211_get_stats(struct net_device *dev,
data[i++] = sta->sta_state;
- if (sinfo.filled & STATION_INFO_TX_BITRATE)
+ if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
data[i] = 100000 *
cfg80211_calculate_bitrate(&sinfo.txrate);
i++;
- if (sinfo.filled & STATION_INFO_RX_BITRATE)
+ if (sinfo.filled & BIT(NL80211_STA_INFO_RX_BITRATE))
data[i] = 100000 *
cfg80211_calculate_bitrate(&sinfo.rxrate);
i++;
- if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
+ if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))
data[i] = (u8)sinfo.signal_avg;
i++;
} else {
@@ -175,24 +175,24 @@ do_survey:
data[i++] = (u8)survey.noise;
else
data[i++] = -1LL;
- if (survey.filled & SURVEY_INFO_CHANNEL_TIME)
- data[i++] = survey.channel_time;
+ if (survey.filled & SURVEY_INFO_TIME)
+ data[i++] = survey.time;
else
data[i++] = -1LL;
- if (survey.filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
- data[i++] = survey.channel_time_busy;
+ if (survey.filled & SURVEY_INFO_TIME_BUSY)
+ data[i++] = survey.time_busy;
else
data[i++] = -1LL;
- if (survey.filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
- data[i++] = survey.channel_time_ext_busy;
+ if (survey.filled & SURVEY_INFO_TIME_EXT_BUSY)
+ data[i++] = survey.time_ext_busy;
else
data[i++] = -1LL;
- if (survey.filled & SURVEY_INFO_CHANNEL_TIME_RX)
- data[i++] = survey.channel_time_rx;
+ if (survey.filled & SURVEY_INFO_TIME_RX)
+ data[i++] = survey.time_rx;
else
data[i++] = -1LL;
- if (survey.filled & SURVEY_INFO_CHANNEL_TIME_TX)
- data[i++] = survey.channel_time_tx;
+ if (survey.filled & SURVEY_INFO_TIME_TX)
+ data[i++] = survey.time_tx;
else
data[i++] = -1LL;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 509bc157ce55..b606b53a49a7 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1069,9 +1069,16 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
}
if (sta && rates_updated) {
- drv_sta_rc_update(local, sdata, &sta->sta,
- IEEE80211_RC_SUPP_RATES_CHANGED);
+ u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
+ u8 rx_nss = sta->sta.rx_nss;
+
+ /* Force rx_nss recalculation */
+ sta->sta.rx_nss = 0;
rate_control_rate_init(sta);
+ if (sta->sta.rx_nss != rx_nss)
+ changed |= IEEE80211_RC_NSS_CHANGED;
+
+ drv_sta_rc_update(local, sdata, &sta->sta, changed);
}
rcu_read_unlock();
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index cc6e964d9837..3afe36824703 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1168,8 +1168,6 @@ struct ieee80211_local {
/* wowlan is enabled -- don't reconfig on resume */
bool wowlan;
- /* DFS/radar detection is enabled */
- bool radar_detect_enabled;
struct work_struct radar_detected_work;
/* number of RX chains the hardware has */
@@ -1623,7 +1621,8 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local);
void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
-void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
+ bool update_bss);
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
{
@@ -1704,6 +1703,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_vht_cap *vht_cap_ie,
struct sta_info *sta);
+enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
@@ -1752,7 +1752,8 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
+ WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+ !test_bit(SCAN_COMPLETED, &local->scanning),
"%s: resume with hardware scan still in progress\n",
wiphy_name(hw->wiphy));
@@ -1881,10 +1882,40 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
void ieee80211_add_pending_skbs(struct ieee80211_local *local,
struct sk_buff_head *skbs);
void ieee80211_flush_queues(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata);
+ struct ieee80211_sub_if_data *sdata, bool drop);
void __ieee80211_flush_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- unsigned int queues);
+ unsigned int queues, bool drop);
+
+static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
+{
+ /*
+ * If quiescing is set, we are racing with __ieee80211_suspend.
+ * __ieee80211_suspend flushes the workers after setting quiescing,
+ * and we check quiescing / suspended before enqueing new workers.
+ * We should abort the worker to avoid the races below.
+ */
+ if (local->quiescing)
+ return false;
+
+ /*
+ * We might already be suspended if the following scenario occurs:
+ * __ieee80211_suspend Control path
+ *
+ * if (local->quiescing)
+ * return;
+ * local->quiescing = true;
+ * flush_workqueue();
+ * queue_work(...);
+ * local->suspended = true;
+ * local->quiescing = false;
+ * worker starts running...
+ */
+ if (local->suspended)
+ return false;
+
+ return true;
+}
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
@@ -1981,6 +2012,7 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *chanctx);
void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx);
+bool ieee80211_is_radar_required(struct ieee80211_local *local);
void ieee80211_dfs_cac_timer(unsigned long data);
void ieee80211_dfs_cac_timer_work(struct work_struct *work);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 417355390873..81a27516813e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -73,9 +73,10 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
return false;
}
-void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
+void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
+ bool update_bss)
{
- if (__ieee80211_recalc_txpower(sdata))
+ if (__ieee80211_recalc_txpower(sdata) || update_bss)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
}
@@ -93,7 +94,7 @@ static u32 __ieee80211_idle_on(struct ieee80211_local *local)
if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
return 0;
- ieee80211_flush_queues(local, NULL);
+ ieee80211_flush_queues(local, NULL, false);
local->hw.conf.flags |= IEEE80211_CONF_IDLE;
return IEEE80211_CONF_CHANGE_IDLE;
@@ -1169,12 +1170,7 @@ static void ieee80211_iface_work(struct work_struct *work)
if (local->scanning)
return;
- /*
- * ieee80211_queue_work() should have picked up most cases,
- * here we'll pick the rest.
- */
- if (WARN(local->suspended,
- "interface work scheduled while going to suspend\n"))
+ if (!ieee80211_can_run_worker(local))
return;
/* first process frames */
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index bd4e46ec32bd..0825d76edcfc 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -24,6 +24,8 @@
#include "debugfs_key.h"
#include "aes_ccm.h"
#include "aes_cmac.h"
+#include "aes_gmac.h"
+#include "aes_gcm.h"
/**
@@ -90,7 +92,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
- int ret;
+ int ret = -EOPNOTSUPP;
might_sleep();
@@ -141,8 +143,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+ (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
sdata->crypto_tx_tailroom_needed_cnt--;
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@@ -151,7 +152,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
return 0;
}
- if (ret != -ENOSPC && ret != -EOPNOTSUPP)
+ if (ret != -ENOSPC && ret != -EOPNOTSUPP && ret != 1)
sdata_err(sdata,
"failed to set key (%d, %pM) to hardware (%d)\n",
key->conf.keyidx,
@@ -163,8 +164,18 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
- /* all of these we can do in software */
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* all of these we can do in software - if driver can */
+ if (ret == 1)
+ return 0;
+ if (key->local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL)
+ return -EINVAL;
return 0;
default:
return -EINVAL;
@@ -191,8 +202,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sdata = key->sdata;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+ (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(sdata);
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@@ -384,7 +394,26 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
* Initialize AES key state here as an optimization so that
* it does not need to be initialized for every packet.
*/
- key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data);
+ key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(
+ key_data, key_len, IEEE80211_CCMP_MIC_LEN);
+ if (IS_ERR(key->u.ccmp.tfm)) {
+ err = PTR_ERR(key->u.ccmp.tfm);
+ kfree(key);
+ return ERR_PTR(err);
+ }
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ key->conf.iv_len = IEEE80211_CCMP_256_HDR_LEN;
+ key->conf.icv_len = IEEE80211_CCMP_256_MIC_LEN;
+ for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++)
+ for (j = 0; j < IEEE80211_CCMP_256_PN_LEN; j++)
+ key->u.ccmp.rx_pn[i][j] =
+ seq[IEEE80211_CCMP_256_PN_LEN - j - 1];
+ /* Initialize AES key state here as an optimization so that
+ * it does not need to be initialized for every packet.
+ */
+ key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(
+ key_data, key_len, IEEE80211_CCMP_256_MIC_LEN);
if (IS_ERR(key->u.ccmp.tfm)) {
err = PTR_ERR(key->u.ccmp.tfm);
kfree(key);
@@ -392,8 +421,12 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
}
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
key->conf.iv_len = 0;
- key->conf.icv_len = sizeof(struct ieee80211_mmie);
+ if (cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ key->conf.icv_len = sizeof(struct ieee80211_mmie);
+ else
+ key->conf.icv_len = sizeof(struct ieee80211_mmie_16);
if (seq)
for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++)
key->u.aes_cmac.rx_pn[j] =
@@ -403,13 +436,51 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
* it does not need to be initialized for every packet.
*/
key->u.aes_cmac.tfm =
- ieee80211_aes_cmac_key_setup(key_data);
+ ieee80211_aes_cmac_key_setup(key_data, key_len);
if (IS_ERR(key->u.aes_cmac.tfm)) {
err = PTR_ERR(key->u.aes_cmac.tfm);
kfree(key);
return ERR_PTR(err);
}
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ key->conf.iv_len = 0;
+ key->conf.icv_len = sizeof(struct ieee80211_mmie_16);
+ if (seq)
+ for (j = 0; j < IEEE80211_GMAC_PN_LEN; j++)
+ key->u.aes_gmac.rx_pn[j] =
+ seq[IEEE80211_GMAC_PN_LEN - j - 1];
+ /* Initialize AES key state here as an optimization so that
+ * it does not need to be initialized for every packet.
+ */
+ key->u.aes_gmac.tfm =
+ ieee80211_aes_gmac_key_setup(key_data, key_len);
+ if (IS_ERR(key->u.aes_gmac.tfm)) {
+ err = PTR_ERR(key->u.aes_gmac.tfm);
+ kfree(key);
+ return ERR_PTR(err);
+ }
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ key->conf.iv_len = IEEE80211_GCMP_HDR_LEN;
+ key->conf.icv_len = IEEE80211_GCMP_MIC_LEN;
+ for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++)
+ for (j = 0; j < IEEE80211_GCMP_PN_LEN; j++)
+ key->u.gcmp.rx_pn[i][j] =
+ seq[IEEE80211_GCMP_PN_LEN - j - 1];
+ /* Initialize AES key state here as an optimization so that
+ * it does not need to be initialized for every packet.
+ */
+ key->u.gcmp.tfm = ieee80211_aes_gcm_key_setup_encrypt(key_data,
+ key_len);
+ if (IS_ERR(key->u.gcmp.tfm)) {
+ err = PTR_ERR(key->u.gcmp.tfm);
+ kfree(key);
+ return ERR_PTR(err);
+ }
+ break;
default:
if (cs) {
size_t len = (seq_len > MAX_PN_LEN) ?
@@ -431,10 +502,24 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
static void ieee80211_key_free_common(struct ieee80211_key *key)
{
- if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP)
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
ieee80211_aes_key_free(key->u.ccmp.tfm);
- if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ ieee80211_aes_gmac_key_free(key->u.aes_gmac.tfm);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ ieee80211_aes_gcm_key_free(key->u.gcmp.tfm);
+ break;
+ }
kzfree(key);
}
@@ -741,6 +826,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
seq->tkip.iv16 = key->u.tkip.tx.iv16;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
pn64 = atomic64_read(&key->u.ccmp.tx_pn);
seq->ccmp.pn[5] = pn64;
seq->ccmp.pn[4] = pn64 >> 8;
@@ -750,6 +836,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
seq->ccmp.pn[0] = pn64 >> 40;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
seq->ccmp.pn[5] = pn64;
seq->ccmp.pn[4] = pn64 >> 8;
@@ -758,6 +845,26 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
seq->ccmp.pn[1] = pn64 >> 32;
seq->ccmp.pn[0] = pn64 >> 40;
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+ seq->ccmp.pn[5] = pn64;
+ seq->ccmp.pn[4] = pn64 >> 8;
+ seq->ccmp.pn[3] = pn64 >> 16;
+ seq->ccmp.pn[2] = pn64 >> 24;
+ seq->ccmp.pn[1] = pn64 >> 32;
+ seq->ccmp.pn[0] = pn64 >> 40;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ pn64 = atomic64_read(&key->u.gcmp.tx_pn);
+ seq->gcmp.pn[5] = pn64;
+ seq->gcmp.pn[4] = pn64 >> 8;
+ seq->gcmp.pn[3] = pn64 >> 16;
+ seq->gcmp.pn[2] = pn64 >> 24;
+ seq->gcmp.pn[1] = pn64 >> 32;
+ seq->gcmp.pn[0] = pn64 >> 40;
+ break;
default:
WARN_ON(1);
}
@@ -780,6 +887,7 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
seq->tkip.iv16 = key->u.tkip.rx[tid].iv16;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
return;
if (tid < 0)
@@ -789,11 +897,29 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
if (WARN_ON(tid != 0))
return;
pn = key->u.aes_cmac.rx_pn;
memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN);
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (WARN_ON(tid != 0))
+ return;
+ pn = key->u.aes_gmac.rx_pn;
+ memcpy(seq->aes_gmac.pn, pn, IEEE80211_GMAC_PN_LEN);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+ return;
+ if (tid < 0)
+ pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS];
+ else
+ pn = key->u.gcmp.rx_pn[tid];
+ memcpy(seq->gcmp.pn, pn, IEEE80211_GCMP_PN_LEN);
+ break;
}
}
EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
@@ -812,6 +938,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
key->u.tkip.tx.iv16 = seq->tkip.iv16;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
pn64 = (u64)seq->ccmp.pn[5] |
((u64)seq->ccmp.pn[4] << 8) |
((u64)seq->ccmp.pn[3] << 16) |
@@ -821,6 +948,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
atomic64_set(&key->u.ccmp.tx_pn, pn64);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
pn64 = (u64)seq->aes_cmac.pn[5] |
((u64)seq->aes_cmac.pn[4] << 8) |
((u64)seq->aes_cmac.pn[3] << 16) |
@@ -829,6 +957,26 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
((u64)seq->aes_cmac.pn[0] << 40);
atomic64_set(&key->u.aes_cmac.tx_pn, pn64);
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ pn64 = (u64)seq->aes_gmac.pn[5] |
+ ((u64)seq->aes_gmac.pn[4] << 8) |
+ ((u64)seq->aes_gmac.pn[3] << 16) |
+ ((u64)seq->aes_gmac.pn[2] << 24) |
+ ((u64)seq->aes_gmac.pn[1] << 32) |
+ ((u64)seq->aes_gmac.pn[0] << 40);
+ atomic64_set(&key->u.aes_gmac.tx_pn, pn64);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ pn64 = (u64)seq->gcmp.pn[5] |
+ ((u64)seq->gcmp.pn[4] << 8) |
+ ((u64)seq->gcmp.pn[3] << 16) |
+ ((u64)seq->gcmp.pn[2] << 24) |
+ ((u64)seq->gcmp.pn[1] << 32) |
+ ((u64)seq->gcmp.pn[0] << 40);
+ atomic64_set(&key->u.gcmp.tx_pn, pn64);
+ break;
default:
WARN_ON(1);
break;
@@ -852,6 +1000,7 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
key->u.tkip.rx[tid].iv16 = seq->tkip.iv16;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
return;
if (tid < 0)
@@ -861,11 +1010,29 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
if (WARN_ON(tid != 0))
return;
pn = key->u.aes_cmac.rx_pn;
memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN);
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (WARN_ON(tid != 0))
+ return;
+ pn = key->u.aes_gmac.rx_pn;
+ memcpy(pn, seq->aes_gmac.pn, IEEE80211_GMAC_PN_LEN);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+ return;
+ if (tid < 0)
+ pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS];
+ else
+ pn = key->u.gcmp.rx_pn[tid];
+ memcpy(pn, seq->gcmp.pn, IEEE80211_GCMP_PN_LEN);
+ break;
default:
WARN_ON(1);
break;
@@ -889,8 +1056,7 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+ (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(key->sdata);
}
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 19db68663d75..d57a9915494f 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -95,6 +95,24 @@ struct ieee80211_key {
u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
} aes_cmac;
struct {
+ atomic64_t tx_pn;
+ u8 rx_pn[IEEE80211_GMAC_PN_LEN];
+ struct crypto_aead *tfm;
+ u32 replays; /* dot11RSNAStatsCMACReplays */
+ u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
+ } aes_gmac;
+ struct {
+ atomic64_t tx_pn;
+ /* Last received packet number. The first
+ * IEEE80211_NUM_TIDS counters are used with Data
+ * frames and the last counter is used with Robust
+ * Management frames.
+ */
+ u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_GCMP_PN_LEN];
+ struct crypto_aead *tfm;
+ u32 replays; /* dot11RSNAStatsGCMPReplays */
+ } gcmp;
+ struct {
/* generic cipher scheme */
u8 rx_pn[IEEE80211_NUM_TIDS + 1][MAX_PN_LEN];
} gen;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 6ab99da38db9..5e09d354c5a5 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -658,7 +658,6 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
bool have_wep = !(IS_ERR(local->wep_tx_tfm) ||
IS_ERR(local->wep_rx_tfm));
bool have_mfp = local->hw.flags & IEEE80211_HW_MFP_CAPABLE;
- const struct ieee80211_cipher_scheme *cs = local->hw.cipher_schemes;
int n_suites = 0, r = 0, w = 0;
u32 *suites;
static const u32 cipher_suites[] = {
@@ -667,79 +666,109 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
/* keep last -- depends on hw flags! */
- WLAN_CIPHER_SUITE_AES_CMAC
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
};
- /* Driver specifies the ciphers, we have nothing to do... */
- if (local->hw.wiphy->cipher_suites && have_wep)
- return 0;
+ if (local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL ||
+ local->hw.wiphy->cipher_suites) {
+ /* If the driver advertises, or doesn't support SW crypto,
+ * we only need to remove WEP if necessary.
+ */
+ if (have_wep)
+ return 0;
+
+ /* well if it has _no_ ciphers ... fine */
+ if (!local->hw.wiphy->n_cipher_suites)
+ return 0;
+
+ /* Driver provides cipher suites, but we need to exclude WEP */
+ suites = kmemdup(local->hw.wiphy->cipher_suites,
+ sizeof(u32) * local->hw.wiphy->n_cipher_suites,
+ GFP_KERNEL);
+ if (!suites)
+ return -ENOMEM;
+
+ for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
+ u32 suite = local->hw.wiphy->cipher_suites[r];
- /* Set up cipher suites if driver relies on mac80211 cipher defs */
- if (!local->hw.wiphy->cipher_suites && !cs) {
+ if (suite == WLAN_CIPHER_SUITE_WEP40 ||
+ suite == WLAN_CIPHER_SUITE_WEP104)
+ continue;
+ suites[w++] = suite;
+ }
+ } else if (!local->hw.cipher_schemes) {
+ /* If the driver doesn't have cipher schemes, there's nothing
+ * else to do other than assign the (software supported and
+ * perhaps offloaded) cipher suites.
+ */
local->hw.wiphy->cipher_suites = cipher_suites;
local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
if (!have_mfp)
- local->hw.wiphy->n_cipher_suites--;
+ local->hw.wiphy->n_cipher_suites -= 4;
if (!have_wep) {
local->hw.wiphy->cipher_suites += 2;
local->hw.wiphy->n_cipher_suites -= 2;
}
+ /* not dynamically allocated, so just return */
return 0;
- }
+ } else {
+ const struct ieee80211_cipher_scheme *cs;
- if (!local->hw.wiphy->cipher_suites) {
- /*
- * Driver specifies cipher schemes only
- * We start counting ciphers defined by schemes, TKIP and CCMP
+ cs = local->hw.cipher_schemes;
+
+ /* Driver specifies cipher schemes only (but not cipher suites
+ * including the schemes)
+ *
+ * We start counting ciphers defined by schemes, TKIP, CCMP,
+ * CCMP-256, GCMP, and GCMP-256
*/
- n_suites = local->hw.n_cipher_schemes + 2;
+ n_suites = local->hw.n_cipher_schemes + 5;
/* check if we have WEP40 and WEP104 */
if (have_wep)
n_suites += 2;
- /* check if we have AES_CMAC */
+ /* check if we have AES_CMAC, BIP-CMAC-256, BIP-GMAC-128,
+ * BIP-GMAC-256
+ */
if (have_mfp)
- n_suites++;
+ n_suites += 4;
suites = kmalloc(sizeof(u32) * n_suites, GFP_KERNEL);
if (!suites)
return -ENOMEM;
suites[w++] = WLAN_CIPHER_SUITE_CCMP;
+ suites[w++] = WLAN_CIPHER_SUITE_CCMP_256;
suites[w++] = WLAN_CIPHER_SUITE_TKIP;
+ suites[w++] = WLAN_CIPHER_SUITE_GCMP;
+ suites[w++] = WLAN_CIPHER_SUITE_GCMP_256;
if (have_wep) {
suites[w++] = WLAN_CIPHER_SUITE_WEP40;
suites[w++] = WLAN_CIPHER_SUITE_WEP104;
}
- if (have_mfp)
+ if (have_mfp) {
suites[w++] = WLAN_CIPHER_SUITE_AES_CMAC;
+ suites[w++] = WLAN_CIPHER_SUITE_BIP_CMAC_256;
+ suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_128;
+ suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_256;
+ }
for (r = 0; r < local->hw.n_cipher_schemes; r++)
suites[w++] = cs[r].cipher;
- } else {
- /* Driver provides cipher suites, but we need to exclude WEP */
- suites = kmemdup(local->hw.wiphy->cipher_suites,
- sizeof(u32) * local->hw.wiphy->n_cipher_suites,
- GFP_KERNEL);
- if (!suites)
- return -ENOMEM;
-
- for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
- u32 suite = local->hw.wiphy->cipher_suites[r];
-
- if (suite == WLAN_CIPHER_SUITE_WEP40 ||
- suite == WLAN_CIPHER_SUITE_WEP104)
- continue;
- suites[w++] = suite;
- }
}
local->hw.wiphy->cipher_suites = suites;
@@ -916,10 +945,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
}
}
- WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
- && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
- "U-APSD not supported with HW_PS_NULLFUNC_STACK\n");
-
/*
* Calculate scan IE length -- we need this to alloc
* memory and to subtract from the driver limit. It
@@ -1045,10 +1070,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_max_network_latency;
result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
&local->network_latency_notifier);
- if (result) {
- rtnl_lock();
+ if (result)
goto fail_pm_qos;
- }
#ifdef CONFIG_INET
local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
@@ -1076,15 +1099,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
fail_ifa:
pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
&local->network_latency_notifier);
- rtnl_lock();
#endif
fail_pm_qos:
- ieee80211_led_exit(local);
+ rtnl_lock();
+ rate_control_deinitialize(local);
ieee80211_remove_interfaces(local);
fail_rate:
rtnl_unlock();
+ ieee80211_led_exit(local);
ieee80211_wep_free(local);
- sta_info_stop(local);
destroy_workqueue(local->workqueue);
fail_workqueue:
wiphy_unregister(local->hw.wiphy);
@@ -1180,6 +1203,8 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
kfree(rcu_access_pointer(local->tx_latency));
+ sta_info_stop(local);
+
wiphy_free(local->hw.wiphy);
}
EXPORT_SYMBOL(ieee80211_free_hw);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2c36c4765f47..10ac6324c1d0 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -157,14 +157,18 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct cfg80211_chan_def vht_chandef;
+ struct ieee80211_sta_ht_cap sta_ht_cap;
u32 ht_cfreq, ret;
+ memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
+ ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
chandef->chan = channel;
chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
chandef->center_freq1 = channel->center_freq;
chandef->center_freq2 = 0;
- if (!ht_cap || !ht_oper || !sband->ht_cap.ht_supported) {
+ if (!ht_cap || !ht_oper || !sta_ht_cap.ht_supported) {
ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -198,7 +202,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
}
/* check 40 MHz support, if we have it */
- if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
chandef->width = NL80211_CHAN_WIDTH_40;
@@ -1054,8 +1058,6 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
sdata->csa_block_tx = false;
}
- cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef);
-
sdata->vif.csa_active = false;
ifmgd->csa_waiting_bcn = false;
@@ -1067,6 +1069,8 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
&ifmgd->csa_connection_drop_work);
return;
}
+
+ cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
@@ -1284,8 +1288,11 @@ ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata,
country_ie_len -= 3;
}
- if (have_chan_pwr)
+ if (have_chan_pwr && pwr_constr_elem)
*pwr_reduction = *pwr_constr_elem;
+ else
+ *pwr_reduction = 0;
+
return have_chan_pwr;
}
@@ -1314,10 +1321,11 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
int chan_pwr = 0, pwr_reduction_80211h = 0;
int pwr_level_cisco, pwr_level_80211h;
int new_ap_level;
+ __le16 capab = mgmt->u.probe_resp.capab_info;
- if (country_ie && pwr_constr_ie &&
- mgmt->u.probe_resp.capab_info &
- cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT)) {
+ if (country_ie &&
+ (capab & cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT) ||
+ capab & cpu_to_le16(WLAN_CAPABILITY_RADIO_MEASURE))) {
has_80211h_pwr = ieee80211_find_80211h_pwr_constr(
sdata, channel, country_ie, country_ie_len,
pwr_constr_ie, &chan_pwr, &pwr_reduction_80211h);
@@ -1596,7 +1604,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
} else {
ieee80211_send_nullfunc(local, sdata, 1);
/* Flush to get the tx status of nullfunc frame */
- ieee80211_flush_queues(local, sdata);
+ ieee80211_flush_queues(local, sdata, false);
}
}
@@ -1643,7 +1651,7 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- bool ret;
+ bool ret = false;
int ac;
if (local->hw.queues < IEEE80211_NUM_ACS)
@@ -2003,18 +2011,26 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
/* disable per-vif ps */
ieee80211_recalc_ps_vif(sdata);
- /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
+ /* make sure ongoing transmission finishes */
+ synchronize_net();
+
+ /*
+ * drop any frame before deauth/disassoc, this can be data or
+ * management frame. Since we are disconnecting, we should not
+ * insist sending these frames which can take time and delay
+ * the disconnection and possible the roaming.
+ */
if (tx)
- ieee80211_flush_queues(local, sdata);
+ ieee80211_flush_queues(local, sdata, true);
/* deauthenticate/disassociate now */
if (tx || frame_buf)
ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
reason, tx, frame_buf);
- /* flush out frame */
+ /* flush out frame - make sure the deauth was actually sent */
if (tx)
- ieee80211_flush_queues(local, sdata);
+ ieee80211_flush_queues(local, sdata, false);
/* clear bssid only after building the needed mgmt frames */
memset(ifmgd->bssid, 0, ETH_ALEN);
@@ -2440,6 +2456,12 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
sdata_assert_lock(sdata);
if (!assoc) {
+ /*
+ * we are not authenticated yet, the only timer that could be
+ * running is the timeout for the authentication response which
+ * which is not relevant anymore.
+ */
+ del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, auth_data->bss->bssid);
memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
@@ -2747,6 +2769,12 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
sdata_assert_lock(sdata);
if (!assoc) {
+ /*
+ * we are not associated yet, the only timer that could be
+ * running is the timeout for the association response which
+ * which is not relevant anymore.
+ */
+ del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
@@ -4197,9 +4225,13 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss *bss = (void *)cbss->priv;
struct sta_info *new_sta = NULL;
- bool have_sta = false;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_sta_ht_cap sta_ht_cap;
+ bool have_sta = false, is_override = false;
int err;
+ sband = local->hw.wiphy->bands[cbss->channel->band];
+
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
return -EINVAL;
@@ -4214,25 +4246,32 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (!new_sta)
return -ENOMEM;
}
+
+ memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
+ ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
+ is_override = (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) !=
+ (sband->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+
+ if (new_sta || is_override) {
+ err = ieee80211_prep_channel(sdata, cbss);
+ if (err) {
+ if (new_sta)
+ sta_info_free(local, new_sta);
+ return -EINVAL;
+ }
+ }
+
if (new_sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_supported_band *sband;
const struct cfg80211_bss_ies *ies;
- int shift;
+ int shift = ieee80211_vif_get_shift(&sdata->vif);
u32 rate_flags;
- sband = local->hw.wiphy->bands[cbss->channel->band];
-
- err = ieee80211_prep_channel(sdata, cbss);
- if (err) {
- sta_info_free(local, new_sta);
- return -EINVAL;
- }
- shift = ieee80211_vif_get_shift(&sdata->vif);
-
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
@@ -4668,8 +4707,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
rcu_read_unlock();
+ if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
+ (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
+ "U-APSD not supported with HW_PS_NULLFUNC_STACK\n"))
+ sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+
if (bss->wmm_used && bss->uapsd_supported &&
- (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
+ (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) {
assoc_data->uapsd = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
} else {
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index ff20b2ebdb30..683f0e3cb124 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -121,7 +121,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
false);
- ieee80211_flush_queues(local, NULL);
+ ieee80211_flush_queues(local, NULL, false);
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -398,7 +398,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
ieee80211_roc_notify_destroy(roc, !roc->abort);
if (started && !on_channel) {
- ieee80211_flush_queues(local, NULL);
+ ieee80211_flush_queues(local, NULL, false);
local->tmp_channel = NULL;
ieee80211_hw_config(local, 0);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 4c5192e0d66c..ca405b6b686d 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -41,7 +41,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* flush out all packets */
synchronize_net();
- ieee80211_flush_queues(local, NULL);
+ ieee80211_flush_queues(local, NULL, true);
local->quiescing = true;
/* make quiescing visible to timers everywhere */
@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
}
}
- /* tear down aggregation sessions and remove STAs */
- mutex_lock(&local->sta_mtx);
- list_for_each_entry(sta, &local->sta_list, list) {
- if (sta->uploaded) {
- enum ieee80211_sta_state state;
-
- state = sta->sta_state;
- for (; state > IEEE80211_STA_NOTEXIST; state--)
- WARN_ON(drv_sta_state(local, sta->sdata, sta,
- state, state - 1));
- }
- }
- mutex_unlock(&local->sta_mtx);
-
/* remove all interfaces that were created in the driver */
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
case NL80211_IFTYPE_STATION:
ieee80211_mgd_quiesce(sdata);
break;
+ case NL80211_IFTYPE_WDS:
+ /* tear down aggregation sessions and remove STAs */
+ mutex_lock(&local->sta_mtx);
+ sta = sdata->u.wds.sta;
+ if (sta && sta->uploaded) {
+ enum ieee80211_sta_state state;
+
+ state = sta->sta_state;
+ for (; state > IEEE80211_STA_NOTEXIST; state--)
+ WARN_ON(drv_sta_state(local, sta->sdata,
+ sta, state,
+ state - 1));
+ }
+ mutex_unlock(&local->sta_mtx);
+ break;
default:
break;
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index d51f6b1c549b..7c86a002df95 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -263,12 +263,12 @@ static inline unsigned int
minstrel_get_retry_count(struct minstrel_rate *mr,
struct ieee80211_tx_info *info)
{
- unsigned int retry = mr->adjusted_retry_count;
+ u8 retry = mr->adjusted_retry_count;
if (info->control.use_rts)
- retry = max(2U, min(mr->stats.retry_count_rtscts, retry));
+ retry = max_t(u8, 2, min(mr->stats.retry_count_rtscts, retry));
else if (info->control.use_cts_prot)
- retry = max(2U, min(mr->retry_count_cts, retry));
+ retry = max_t(u8, 2, min(mr->retry_count_cts, retry));
return retry;
}
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 97eca86a4af0..410efe620c57 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -33,8 +33,8 @@ minstrel_ewma(int old, int new, int weight)
struct minstrel_rate_stats {
/* current / last sampling period attempts/success counters */
- unsigned int attempts, last_attempts;
- unsigned int success, last_success;
+ u16 attempts, last_attempts;
+ u16 success, last_success;
/* total attempts/success counters */
u64 att_hist, succ_hist;
@@ -46,8 +46,8 @@ struct minstrel_rate_stats {
unsigned int cur_prob, probability;
/* maximum retry counts */
- unsigned int retry_count;
- unsigned int retry_count_rtscts;
+ u8 retry_count;
+ u8 retry_count_rtscts;
u8 sample_skipped;
bool retry_updated;
@@ -55,14 +55,15 @@ struct minstrel_rate_stats {
struct minstrel_rate {
int bitrate;
- int rix;
+
+ s8 rix;
+ u8 retry_count_cts;
+ u8 adjusted_retry_count;
unsigned int perfect_tx_time;
unsigned int ack_time;
int sample_limit;
- unsigned int retry_count_cts;
- unsigned int adjusted_retry_count;
struct minstrel_rate_stats stats;
};
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 683b10f46505..1101563357ea 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
else if (rate)
- channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+ channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
else
channel_flags |= IEEE80211_CHAN_2GHZ;
put_unaligned_le16(channel_flags, pos);
@@ -361,9 +361,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
u16 known = local->hw.radiotap_vht_details;
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
- /* known field - how to handle 80+80? */
- if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
- known &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
put_unaligned_le16(known, pos);
pos += 2;
/* flags */
@@ -378,8 +375,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* bandwidth */
if (status->vht_flag & RX_VHT_FLAG_80MHZ)
*pos++ = 4;
- else if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
- *pos++ = 0; /* marked not known above */
else if (status->vht_flag & RX_VHT_FLAG_160MHZ)
*pos++ = 11;
else if (status->flag & RX_FLAG_40MHZ)
@@ -652,6 +647,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
{
struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
struct ieee80211_mmie *mmie;
+ struct ieee80211_mmie_16 *mmie16;
if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
return -1;
@@ -661,11 +657,18 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
mmie = (struct ieee80211_mmie *)
(skb->data + skb->len - sizeof(*mmie));
- if (mmie->element_id != WLAN_EID_MMIE ||
- mmie->length != sizeof(*mmie) - 2)
- return -1;
-
- return le16_to_cpu(mmie->key_id);
+ if (mmie->element_id == WLAN_EID_MMIE &&
+ mmie->length == sizeof(*mmie) - 2)
+ return le16_to_cpu(mmie->key_id);
+
+ mmie16 = (struct ieee80211_mmie_16 *)
+ (skb->data + skb->len - sizeof(*mmie16));
+ if (skb->len >= 24 + sizeof(*mmie16) &&
+ mmie16->element_id == WLAN_EID_MMIE &&
+ mmie16->length == sizeof(*mmie16) - 2)
+ return le16_to_cpu(mmie16->key_id);
+
+ return -1;
}
static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
@@ -1655,11 +1658,27 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
result = ieee80211_crypto_tkip_decrypt(rx);
break;
case WLAN_CIPHER_SUITE_CCMP:
- result = ieee80211_crypto_ccmp_decrypt(rx);
+ result = ieee80211_crypto_ccmp_decrypt(
+ rx, IEEE80211_CCMP_MIC_LEN);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ result = ieee80211_crypto_ccmp_decrypt(
+ rx, IEEE80211_CCMP_256_MIC_LEN);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
result = ieee80211_crypto_aes_cmac_decrypt(rx);
break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ result = ieee80211_crypto_aes_gmac_decrypt(rx);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ result = ieee80211_crypto_gcmp_decrypt(rx);
+ break;
default:
result = ieee80211_crypto_hw_decrypt(rx);
}
@@ -1786,7 +1805,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
/* This is the first fragment of a new frame. */
entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
rx->seqno_idx, &(rx->skb));
- if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
+ if (rx->key &&
+ (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
ieee80211_has_protected(fc)) {
int queue = rx->security_idx;
/* Store CCMP PN so that we can verify that the next
@@ -1815,7 +1836,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
int i;
u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
int queue;
- if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
+ if (!rx->key ||
+ (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
return RX_DROP_UNUSABLE;
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -2314,6 +2337,15 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
return RX_DROP_MONITOR;
+ if (rx->sta) {
+ /* The seqno index has the same property as needed
+ * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
+ * for non-QoS-data frames. Here we know it's a data
+ * frame, so count MSDUs.
+ */
+ rx->sta->rx_msdu[rx->seqno_idx]++;
+ }
+
/*
* Send unexpected-4addr-frame event to hostapd. For older versions,
* also drop the frame to cooked monitor interfaces.
@@ -2598,7 +2630,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
struct ieee80211_supported_band *sband;
u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
- enum ieee80211_sta_rx_bandwidth new_bw;
+ enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
/* If it doesn't support 40 MHz it can't change ... */
if (!(rx->sta->sta.ht_cap.cap &
@@ -2606,13 +2638,18 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
- new_bw = IEEE80211_STA_RX_BW_20;
+ max_bw = IEEE80211_STA_RX_BW_20;
else
- new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
+ max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
+
+ /* set cur_max_bandwidth and recalc sta bw */
+ rx->sta->cur_max_bandwidth = max_bw;
+ new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
if (rx->sta->sta.bandwidth == new_bw)
goto handled;
+ rx->sta->sta.bandwidth = new_bw;
sband = rx->local->hw.wiphy->bands[status->band];
rate_control_rate_update(local, sband, rx->sta,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index ae842678b629..05f0d711b6d8 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -416,7 +416,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
ieee80211_offchannel_stop_vifs(local);
/* ensure nullfunc is transmitted before leaving operating channel */
- ieee80211_flush_queues(local, NULL);
+ ieee80211_flush_queues(local, NULL, false);
ieee80211_configure_filter(local);
@@ -432,7 +432,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
static bool ieee80211_can_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
- if (local->radar_detect_enabled)
+ if (ieee80211_is_radar_required(local))
return false;
if (!list_empty(&local->roc_list))
@@ -505,7 +505,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
lockdep_assert_held(&local->mtx);
- if (local->scan_req)
+ if (local->scan_req || ieee80211_is_radar_required(local))
return -EBUSY;
if (!ieee80211_can_scan(local, sdata)) {
@@ -805,7 +805,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
ieee80211_offchannel_stop_vifs(local);
if (local->ops->flush) {
- ieee80211_flush_queues(local, NULL);
+ ieee80211_flush_queues(local, NULL, false);
*next_delay = 0;
} else
*next_delay = HZ / 10;
@@ -828,6 +828,11 @@ void ieee80211_scan_work(struct work_struct *work)
mutex_lock(&local->mtx);
+ if (!ieee80211_can_run_worker(local)) {
+ aborted = true;
+ goto out_complete;
+ }
+
sdata = rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx));
scan_req = rcu_dereference_protected(local->scan_req,
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index efeba56c913b..06e6ac8cc693 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -34,19 +34,15 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct cfg80211_chan_def new_vht_chandef = {};
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
- const struct ieee80211_ht_operation *ht_oper;
int secondary_channel_offset = -1;
sec_chan_offs = elems->sec_chan_offs;
wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
- ht_oper = elems->ht_operation;
if (sta_flags & (IEEE80211_STA_DISABLE_HT |
IEEE80211_STA_DISABLE_40MHZ)) {
sec_chan_offs = NULL;
wide_bw_chansw_ie = NULL;
- /* only used for bandwidth here */
- ht_oper = NULL;
}
if (sta_flags & IEEE80211_STA_DISABLE_VHT)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a42f5b2b024d..00ca8dcc2bcf 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -116,7 +116,6 @@ static void __cleanup_single_sta(struct sta_info *sta)
clear_sta_flag(sta, WLAN_STA_PS_DELIVER);
atomic_dec(&ps->num_sta_ps);
- sta_info_recalc_tim(sta);
}
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
@@ -625,7 +624,7 @@ static unsigned long ieee80211_tids_for_ac(int ac)
}
}
-void sta_info_recalc_tim(struct sta_info *sta)
+static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
{
struct ieee80211_local *local = sta->local;
struct ps_data *ps;
@@ -667,6 +666,9 @@ void sta_info_recalc_tim(struct sta_info *sta)
if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1)
ignore_for_tim = 0;
+ if (ignore_pending)
+ ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1;
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
unsigned long tids;
@@ -695,7 +697,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
else
__bss_tim_clear(ps->tim, id);
- if (local->ops->set_tim) {
+ if (local->ops->set_tim && !WARN_ON(sta->dead)) {
local->tim_in_locked_section = true;
drv_set_tim(local, &sta->sta, indicate_tim);
local->tim_in_locked_section = false;
@@ -705,6 +707,11 @@ out_unlock:
spin_unlock_bh(&local->tim_lock);
}
+void sta_info_recalc_tim(struct sta_info *sta)
+{
+ __sta_info_recalc_tim(sta, false);
+}
+
static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
@@ -874,6 +881,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct station_info sinfo = {};
int ret;
/*
@@ -887,6 +895,9 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
/* now keys can no longer be reached */
ieee80211_free_sta_keys(local, sta);
+ /* disable TIM bit - last chance to tell driver */
+ __sta_info_recalc_tim(sta, true);
+
sta->dead = true;
local->num_sta--;
@@ -908,7 +919,8 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
- cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
+ sta_set_sinfo(sta, &sinfo);
+ cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
rate_control_remove_sta_debugfs(sta);
ieee80211_sta_debugfs_remove(sta);
@@ -1243,10 +1255,11 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
* ends the poll/service period.
*/
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
- IEEE80211_TX_CTL_PS_RESPONSE |
IEEE80211_TX_STATUS_EOSP |
IEEE80211_TX_CTL_REQ_TX_STATUS;
+ info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+
if (call_driver)
drv_allow_buffered_frames(local, sta, BIT(tid), 1,
reason, false);
@@ -1395,8 +1408,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
* STA may still remain is PS mode after this frame
* exchange.
*/
- info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
- IEEE80211_TX_CTL_PS_RESPONSE;
+ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+ info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
/*
* Use MoreData flag to indicate whether there are
@@ -1743,7 +1756,6 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
struct ieee80211_local *local = sdata->local;
struct rate_control_ref *ref = NULL;
struct timespec uptime;
- u64 packets = 0;
u32 thr = 0;
int i, ac;
@@ -1752,49 +1764,90 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->generation = sdata->local->sta_generation;
- sinfo->filled = STATION_INFO_INACTIVE_TIME |
- STATION_INFO_RX_BYTES64 |
- STATION_INFO_TX_BYTES64 |
- STATION_INFO_RX_PACKETS |
- STATION_INFO_TX_PACKETS |
- STATION_INFO_TX_RETRIES |
- STATION_INFO_TX_FAILED |
- STATION_INFO_TX_BITRATE |
- STATION_INFO_RX_BITRATE |
- STATION_INFO_RX_DROP_MISC |
- STATION_INFO_BSS_PARAM |
- STATION_INFO_CONNECTED_TIME |
- STATION_INFO_STA_FLAGS |
- STATION_INFO_BEACON_LOSS_COUNT;
+ /* do before driver, so beacon filtering drivers have a
+ * chance to e.g. just add the number of filtered beacons
+ * (or just modify the value entirely, of course)
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
+
+ drv_sta_statistics(local, sdata, &sta->sta, sinfo);
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
+ BIT(NL80211_STA_INFO_STA_FLAGS) |
+ BIT(NL80211_STA_INFO_BSS_PARAM) |
+ BIT(NL80211_STA_INFO_CONNECTED_TIME) |
+ BIT(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT(NL80211_STA_INFO_BEACON_LOSS);
ktime_get_ts(&uptime);
sinfo->connected_time = uptime.tv_sec - sta->last_connected;
-
sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
- sinfo->tx_bytes = 0;
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- sinfo->tx_bytes += sta->tx_bytes[ac];
- packets += sta->tx_packets[ac];
+
+ if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
+ BIT(NL80211_STA_INFO_TX_BYTES)))) {
+ sinfo->tx_bytes = 0;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ sinfo->tx_bytes += sta->tx_bytes[ac];
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ }
+
+ if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) {
+ sinfo->tx_packets = 0;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ sinfo->tx_packets += sta->tx_packets[ac];
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ }
+
+ if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) |
+ BIT(NL80211_STA_INFO_RX_BYTES)))) {
+ sinfo->rx_bytes = sta->rx_bytes;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ }
+
+ if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) {
+ sinfo->rx_packets = sta->rx_packets;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ }
+
+ if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) {
+ sinfo->tx_retries = sta->tx_retry_count;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES);
}
- sinfo->tx_packets = packets;
- sinfo->rx_bytes = sta->rx_bytes;
- sinfo->rx_packets = sta->rx_packets;
- sinfo->tx_retries = sta->tx_retry_count;
- sinfo->tx_failed = sta->tx_retry_failed;
+
+ if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) {
+ sinfo->tx_failed = sta->tx_retry_failed;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ }
+
sinfo->rx_dropped_misc = sta->rx_dropped;
sinfo->beacon_loss_count = sta->beacon_loss_count;
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX) |
+ BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
+ }
+
if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
(sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
- sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
- if (!local->ops->get_rssi ||
- drv_get_rssi(local, sdata, &sta->sta, &sinfo->signal))
+ if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
sinfo->signal = (s8)sta->last_signal;
- sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ }
+
+ if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
+ sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ }
}
- if (sta->chains) {
- sinfo->filled |= STATION_INFO_CHAIN_SIGNAL |
- STATION_INFO_CHAIN_SIGNAL_AVG;
+
+ if (sta->chains &&
+ !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
+ BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
+ BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
sinfo->chains = sta->chains;
for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
@@ -1804,23 +1857,61 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
}
}
- sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
- sta_set_rate_info_rx(sta, &sinfo->rxrate);
+ if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) {
+ sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE))) {
+ sta_set_rate_info_rx(sta, &sinfo->rxrate);
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ }
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_TID_STATS);
+ for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
+ struct cfg80211_tid_stats *tidstats = &sinfo->pertid[i];
+
+ if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
+ tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
+ tidstats->rx_msdu = sta->rx_msdu[i];
+ }
+
+ if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
+ tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
+ tidstats->tx_msdu = sta->tx_msdu[i];
+ }
+
+ if (!(tidstats->filled &
+ BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
+ local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ tidstats->filled |=
+ BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
+ tidstats->tx_msdu_retries = sta->tx_msdu_retries[i];
+ }
+
+ if (!(tidstats->filled &
+ BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
+ local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ tidstats->filled |=
+ BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
+ tidstats->tx_msdu_failed = sta->tx_msdu_failed[i];
+ }
+ }
if (ieee80211_vif_is_mesh(&sdata->vif)) {
#ifdef CONFIG_MAC80211_MESH
- sinfo->filled |= STATION_INFO_LLID |
- STATION_INFO_PLID |
- STATION_INFO_PLINK_STATE |
- STATION_INFO_LOCAL_PM |
- STATION_INFO_PEER_PM |
- STATION_INFO_NONPEER_PM;
+ sinfo->filled |= BIT(NL80211_STA_INFO_LLID) |
+ BIT(NL80211_STA_INFO_PLID) |
+ BIT(NL80211_STA_INFO_PLINK_STATE) |
+ BIT(NL80211_STA_INFO_LOCAL_PM) |
+ BIT(NL80211_STA_INFO_PEER_PM) |
+ BIT(NL80211_STA_INFO_NONPEER_PM);
sinfo->llid = sta->llid;
sinfo->plid = sta->plid;
sinfo->plink_state = sta->plink_state;
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
- sinfo->filled |= STATION_INFO_T_OFFSET;
+ sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
sinfo->t_offset = sta->t_offset;
}
sinfo->local_pm = sta->local_pm;
@@ -1869,7 +1960,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
thr = drv_get_expected_throughput(local, &sta->sta);
if (thr != 0) {
- sinfo->filled |= STATION_INFO_EXPECTED_THROUGHPUT;
+ sinfo->filled |= BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
sinfo->expected_throughput = thr;
}
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 4f052bb2a5ad..925e68fe64c7 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -346,6 +346,14 @@ struct ieee80211_tx_latency_stat {
* @cipher_scheme: optional cipher scheme for this station
* @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed
* @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
+ * @tx_msdu: MSDUs transmitted to this station, using IEEE80211_NUM_TID
+ * entry for non-QoS frames
+ * @tx_msdu_retries: MSDU retries for transmissions to to this station,
+ * using IEEE80211_NUM_TID entry for non-QoS frames
+ * @tx_msdu_failed: MSDU failures for transmissions to to this station,
+ * using IEEE80211_NUM_TID entry for non-QoS frames
+ * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
+ * entry for non-QoS frames
*/
struct sta_info {
/* General information, mostly static */
@@ -416,6 +424,10 @@ struct sta_info {
u32 last_rx_rate_vht_flag;
u8 last_rx_rate_vht_nss;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
+ u64 tx_msdu[IEEE80211_NUM_TIDS + 1];
+ u64 tx_msdu_retries[IEEE80211_NUM_TIDS + 1];
+ u64 tx_msdu_failed[IEEE80211_NUM_TIDS + 1];
+ u64 rx_msdu[IEEE80211_NUM_TIDS + 1];
/*
* Aggregation information, locked with lock.
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index bb146f377ee4..e679b7c9b160 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -664,13 +664,15 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
struct ieee80211_supported_band *sband;
int retry_count;
int rates_idx;
- bool acked;
+ bool acked, noack_success;
rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
sband = hw->wiphy->bands[info->band];
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
+
if (pubsta) {
struct sta_info *sta;
@@ -696,7 +698,7 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
rate_control_tx_status_noskb(local, sband, sta, info);
}
- if (acked) {
+ if (acked || noack_success) {
local->dot11TransmittedFrameCount++;
if (!pubsta)
local->dot11MulticastTransmittedFrameCount++;
@@ -728,6 +730,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_bar *bar;
int rtap_len;
int shift = 0;
+ int tid = IEEE80211_NUM_TIDS;
rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
@@ -771,7 +774,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
(ieee80211_is_data_qos(fc))) {
- u16 tid, ssn;
+ u16 ssn;
u8 *qc;
qc = ieee80211_get_qos_ctl(hdr);
@@ -780,10 +783,14 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
& IEEE80211_SCTL_SEQ);
ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
tid, ssn);
+ } else if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ tid = qc[0] & 0xf;
}
if (!acked && ieee80211_is_back_req(fc)) {
- u16 tid, control;
+ u16 control;
/*
* BAR failed, store the last SSN and retry sending
@@ -811,6 +818,12 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!acked)
sta->tx_retry_failed++;
sta->tx_retry_count += retry_count;
+
+ if (ieee80211_is_data_present(fc)) {
+ if (!acked)
+ sta->tx_msdu_failed[tid]++;
+ sta->tx_msdu_retries[tid] += retry_count;
+ }
}
rate_control_tx_status(local, sband, sta, skb);
@@ -856,10 +869,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
* Fragments are passed to low-level drivers as separate skbs, so these
* are actually fragments, not frames. Update frame counters only for
* the first fragment of the frame. */
- if (info->flags & IEEE80211_TX_STAT_ACK) {
+ if ((info->flags & IEEE80211_TX_STAT_ACK) ||
+ (info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED)) {
if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
local->dot11TransmittedFrameCount++;
- if (is_multicast_ether_addr(hdr->addr1))
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
local->dot11MulticastTransmittedFrameCount++;
if (retry_count > 0)
local->dot11RetryCount++;
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 55ddd77b865d..c9f9752217ac 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -68,17 +68,24 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
ch = ieee80211_get_channel(sdata->local->hw.wiphy, i);
if (ch) {
/* we will be active on the channel */
- u32 flags = IEEE80211_CHAN_DISABLED |
- IEEE80211_CHAN_NO_IR;
cfg80211_chandef_create(&chandef, ch,
- NL80211_CHAN_HT20);
- if (cfg80211_chandef_usable(sdata->local->hw.wiphy,
- &chandef, flags)) {
+ NL80211_CHAN_NO_HT);
+ if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
+ &chandef,
+ sdata->wdev.iftype)) {
ch_cnt++;
+ /*
+ * check if the next channel is also part of
+ * this allowed range
+ */
continue;
}
}
+ /*
+ * we've reached the end of a range, with allowed channels
+ * found
+ */
if (ch_cnt) {
u8 *pos = skb_put(skb, 2);
*pos++ = ieee80211_frequency_to_channel(subband_start);
@@ -89,6 +96,15 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
}
}
+ /* all channels in the requested range are allowed - add them here */
+ if (ch_cnt) {
+ u8 *pos = skb_put(skb, 2);
+ *pos++ = ieee80211_frequency_to_channel(subband_start);
+ *pos++ = ch_cnt;
+
+ subband_cnt++;
+ }
+
return subband_cnt;
}
@@ -329,24 +345,24 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
*/
sband = local->hw.wiphy->bands[band];
memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
- if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE) &&
- ht_cap.ht_supported && (!sta || sta->sta.ht_cap.ht_supported)) {
- if (action_code == WLAN_TDLS_SETUP_REQUEST) {
- ieee80211_apply_htcap_overrides(sdata, &ht_cap);
-
- /* disable SMPS in TDLS initiator */
- ht_cap.cap |= (WLAN_HT_CAP_SM_PS_DISABLED
- << IEEE80211_HT_CAP_SM_PS_SHIFT);
- } else {
- /* disable SMPS in TDLS responder */
- sta->sta.ht_cap.cap |=
- (WLAN_HT_CAP_SM_PS_DISABLED
- << IEEE80211_HT_CAP_SM_PS_SHIFT);
-
- /* the peer caps are already intersected with our own */
- memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
- }
+
+ if (action_code == WLAN_TDLS_SETUP_REQUEST && ht_cap.ht_supported) {
+ ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+ /* disable SMPS in TDLS initiator */
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
+ << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
+ } else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
+ ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
+ /* disable SMPS in TDLS responder */
+ sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
+ << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ /* the peer caps are already intersected with our own */
+ memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
@@ -836,7 +852,6 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
*/
if ((action_code == WLAN_TDLS_TEARDOWN) &&
(sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
- struct sta_info *sta = NULL;
bool try_resend; /* Should we keep skb for possible resend */
/* If not sending directly to peer - no point in keeping skb */
@@ -912,7 +927,7 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
rcu_read_unlock();
}
- ieee80211_flush_queues(local, sdata);
+ ieee80211_flush_queues(local, sdata, false);
ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
dialog_token, status_code,
@@ -952,7 +967,7 @@ ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev,
*/
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN);
- ieee80211_flush_queues(local, sdata);
+ ieee80211_flush_queues(local, sdata, false);
ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
dialog_token, status_code,
@@ -1098,7 +1113,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
*/
tasklet_kill(&local->tx_pending_tasklet);
/* flush a potentially queued teardown packet */
- ieee80211_flush_queues(local, sdata);
+ ieee80211_flush_queues(local, sdata, false);
ret = sta_info_destroy_addr(sdata, peer);
break;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 8e461a02c6a8..263a9561eb26 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -825,6 +825,13 @@ DECLARE_EVENT_CLASS(sta_event,
)
);
+DEFINE_EVENT(sta_event, drv_sta_statistics,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
+ TP_ARGS(local, sdata, sta)
+);
+
DEFINE_EVENT(sta_event, drv_sta_add,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
@@ -1329,32 +1336,6 @@ DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
TP_ARGS(local, sta, tids, num_frames, reason, more_data)
);
-TRACE_EVENT(drv_get_rssi,
- TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta,
- s8 rssi, int ret),
-
- TP_ARGS(local, sta, rssi, ret),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- STA_ENTRY
- __field(s8, rssi)
- __field(int, ret)
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- STA_ASSIGN;
- __entry->rssi = rssi;
- __entry->ret = ret;
- ),
-
- TP_printk(
- LOCAL_PR_FMT STA_PR_FMT " rssi:%d ret:%d",
- LOCAL_PR_ARG, STA_PR_ARG, __entry->rssi, __entry->ret
- )
-);
-
DEFINE_EVENT(local_sdata_evt, drv_mgd_prepare_tx,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 058686a721a1..88a18ffe2975 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -626,6 +626,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
tx->key = NULL;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
if (!ieee80211_is_data_present(hdr->frame_control) &&
!ieee80211_use_mfp(hdr->frame_control, tx->sta,
tx->skb))
@@ -636,6 +639,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
ieee80211_is_mgmt(hdr->frame_control);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (!ieee80211_is_mgmt(hdr->frame_control))
tx->key = NULL;
break;
@@ -815,6 +821,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
/* for pure STA mode without beacons, we can do it */
hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
tx->sdata->sequence_number += 0x10;
+ if (tx->sta)
+ tx->sta->tx_msdu[IEEE80211_NUM_TIDS]++;
return TX_CONTINUE;
}
@@ -831,6 +839,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
seq = &tx->sta->tid_seq[tid];
+ tx->sta->tx_msdu[tid]++;
hdr->seq_ctrl = cpu_to_le16(*seq);
@@ -1008,9 +1017,21 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
case WLAN_CIPHER_SUITE_TKIP:
return ieee80211_crypto_tkip_encrypt(tx);
case WLAN_CIPHER_SUITE_CCMP:
- return ieee80211_crypto_ccmp_encrypt(tx);
+ return ieee80211_crypto_ccmp_encrypt(
+ tx, IEEE80211_CCMP_MIC_LEN);
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return ieee80211_crypto_ccmp_encrypt(
+ tx, IEEE80211_CCMP_256_MIC_LEN);
case WLAN_CIPHER_SUITE_AES_CMAC:
return ieee80211_crypto_aes_cmac_encrypt(tx);
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ return ieee80211_crypto_aes_cmac_256_encrypt(tx);
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ return ieee80211_crypto_aes_gmac_encrypt(tx);
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return ieee80211_crypto_gcmp_encrypt(tx);
default:
return ieee80211_crypto_hw_encrypt(tx);
}
@@ -3152,7 +3173,7 @@ int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
}
queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]);
- __ieee80211_flush_queues(local, sdata, queues);
+ __ieee80211_flush_queues(local, sdata, queues, false);
sta->reserved_tid = tid;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 974ebe70f5b0..8428f4a95479 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -578,7 +578,7 @@ ieee80211_get_vif_queues(struct ieee80211_local *local,
void __ieee80211_flush_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- unsigned int queues)
+ unsigned int queues, bool drop)
{
if (!local->ops->flush)
return;
@@ -594,7 +594,7 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
IEEE80211_QUEUE_STOP_REASON_FLUSH,
false);
- drv_flush(local, sdata, queues, false);
+ drv_flush(local, sdata, queues, drop);
ieee80211_wake_queues_by_reason(&local->hw, queues,
IEEE80211_QUEUE_STOP_REASON_FLUSH,
@@ -602,9 +602,9 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
}
void ieee80211_flush_queues(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+ struct ieee80211_sub_if_data *sdata, bool drop)
{
- __ieee80211_flush_queues(local, sdata, 0);
+ __ieee80211_flush_queues(local, sdata, 0, drop);
}
void ieee80211_stop_vif_queues(struct ieee80211_local *local,
@@ -744,16 +744,19 @@ EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif);
/*
* Nothing should have been stuffed into the workqueue during
- * the suspend->resume cycle. If this WARN is seen then there
- * is a bug with either the driver suspend or something in
- * mac80211 stuffing into the workqueue which we haven't yet
- * cleared during mac80211's suspend cycle.
+ * the suspend->resume cycle. Since we can't check each caller
+ * of this function if we are already quiescing / suspended,
+ * check here and don't WARN since this can actually happen when
+ * the rx path (for example) is racing against __ieee80211_suspend
+ * and suspending / quiescing was set after the rx path checked
+ * them.
*/
static bool ieee80211_can_queue_work(struct ieee80211_local *local)
{
- if (WARN(local->suspended && !local->resuming,
- "queueing ieee80211 work while going to suspend\n"))
+ if (local->quiescing || (local->suspended && !local->resuming)) {
+ pr_warn("queueing ieee80211 work while going to suspend\n");
return false;
+ }
return true;
}
@@ -1470,10 +1473,12 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
/* Check if any channel in this sband supports at least 80 MHz */
for (i = 0; i < sband->n_channels; i++) {
- if (!(sband->channels[i].flags & IEEE80211_CHAN_NO_80MHZ)) {
- have_80mhz = true;
- break;
- }
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
+
+ have_80mhz = true;
+ break;
}
if (sband->vht_cap.vht_supported && have_80mhz) {
@@ -1735,6 +1740,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct cfg80211_sched_scan_request *sched_scan_req;
bool sched_scan_stopped = false;
+ /* nothing to do if HW shouldn't run */
+ if (!local->open_count)
+ goto wake_up;
+
#ifdef CONFIG_PM
if (local->suspended)
local->resuming = true;
@@ -1756,9 +1765,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
reconfig_due_to_wowlan = true;
}
#endif
- /* everything else happens only if HW was up & running */
- if (!local->open_count)
- goto wake_up;
/*
* Upon resume hardware can sometimes be goofy due to
@@ -2042,7 +2048,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
* If this is for hw restart things are still running.
* We may want to change that later, however.
*/
- if (!local->suspended || reconfig_due_to_wowlan)
+ if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
if (!local->suspended)
@@ -2054,7 +2060,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mb();
local->resuming = false;
- if (!reconfig_due_to_wowlan)
+ /* It's possible that we don't handle the scan completion in
+ * time during suspend, so if it's still marked as completed
+ * here, queue the work and flush it to clean things up.
+ * Instead of calling the worker function directly here, we
+ * really queue it to avoid potential races with other flows
+ * scheduling the same work.
+ */
+ if (test_bit(SCAN_COMPLETED, &local->scanning)) {
+ ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+ flush_delayed_work(&local->scan_work);
+ }
+
+ if (local->open_count && !reconfig_due_to_wowlan)
drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -2538,7 +2556,9 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.mcs = status->rate_idx;
ri.flags |= RATE_INFO_FLAGS_MCS;
if (status->flag & RX_FLAG_40MHZ)
- ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ ri.bw = RATE_INFO_BW_40;
+ else
+ ri.bw = RATE_INFO_BW_20;
if (status->flag & RX_FLAG_SHORT_GI)
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (status->flag & RX_FLAG_VHT) {
@@ -2546,13 +2566,13 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.mcs = status->rate_idx;
ri.nss = status->vht_nss;
if (status->flag & RX_FLAG_40MHZ)
- ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
- if (status->vht_flag & RX_VHT_FLAG_80MHZ)
- ri.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
- if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
- ri.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
- if (status->vht_flag & RX_VHT_FLAG_160MHZ)
- ri.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
+ ri.bw = RATE_INFO_BW_40;
+ else if (status->vht_flag & RX_VHT_FLAG_80MHZ)
+ ri.bw = RATE_INFO_BW_80;
+ else if (status->vht_flag & RX_VHT_FLAG_160MHZ)
+ ri.bw = RATE_INFO_BW_160;
+ else
+ ri.bw = RATE_INFO_BW_20;
if (status->flag & RX_FLAG_SHORT_GI)
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
@@ -2560,10 +2580,15 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
int shift = 0;
int bitrate;
- if (status->flag & RX_FLAG_10MHZ)
+ if (status->flag & RX_FLAG_10MHZ) {
shift = 1;
- if (status->flag & RX_FLAG_5MHZ)
+ ri.bw = RATE_INFO_BW_10;
+ } else if (status->flag & RX_FLAG_5MHZ) {
shift = 2;
+ ri.bw = RATE_INFO_BW_5;
+ } else {
+ ri.bw = RATE_INFO_BW_20;
+ }
sband = local->hw.wiphy->bands[status->band];
bitrate = sband->bitrates[status->rate_idx].bitrate;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index bc9e8fc48785..85f9596da07b 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -269,51 +269,54 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
}
-enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
+enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- u32 cap = sta->sta.vht_cap.cap;
- enum ieee80211_sta_rx_bandwidth bw;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+ u32 cap_width;
- if (!sta->sta.vht_cap.vht_supported) {
- bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
- IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
- goto check_max;
- }
+ if (!vht_cap->vht_supported)
+ return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ IEEE80211_STA_RX_BW_40 :
+ IEEE80211_STA_RX_BW_20;
- switch (sdata->vif.bss_conf.chandef.width) {
- default:
- WARN_ON_ONCE(1);
- /* fall through */
+ cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+
+ if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ ||
+ cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ return IEEE80211_STA_RX_BW_160;
+
+ return IEEE80211_STA_RX_BW_80;
+}
+
+static enum ieee80211_sta_rx_bandwidth
+ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
+{
+ switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
- bw = IEEE80211_STA_RX_BW_20;
- break;
+ return IEEE80211_STA_RX_BW_20;
case NL80211_CHAN_WIDTH_40:
- bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
- IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
- break;
+ return IEEE80211_STA_RX_BW_40;
+ case NL80211_CHAN_WIDTH_80:
+ return IEEE80211_STA_RX_BW_80;
case NL80211_CHAN_WIDTH_160:
- if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) ==
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) {
- bw = IEEE80211_STA_RX_BW_160;
- break;
- }
- /* fall through */
case NL80211_CHAN_WIDTH_80P80:
- if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) ==
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) {
- bw = IEEE80211_STA_RX_BW_160;
- break;
- }
- /* fall through */
- case NL80211_CHAN_WIDTH_80:
- bw = IEEE80211_STA_RX_BW_80;
+ return IEEE80211_STA_RX_BW_160;
+ default:
+ WARN_ON_ONCE(1);
+ return IEEE80211_STA_RX_BW_20;
}
+}
+
+enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ enum ieee80211_sta_rx_bandwidth bw;
+
+ bw = ieee80211_chan_width_to_rx_bw(sdata->vif.bss_conf.chandef.width);
+ bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+ bw = min(bw, sta->cur_max_bandwidth);
- check_max:
- if (bw > sta->cur_max_bandwidth)
- bw = sta->cur_max_bandwidth;
return bw;
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 12398fde02e8..75de6fac40d1 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -22,6 +22,8 @@
#include "tkip.h"
#include "aes_ccm.h"
#include "aes_cmac.h"
+#include "aes_gmac.h"
+#include "aes_gcm.h"
#include "wpa.h"
ieee80211_tx_result
@@ -393,7 +395,8 @@ static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr)
}
-static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
+ unsigned int mic_len)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_key *key = tx->key;
@@ -424,7 +427,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
if (info->control.hw_key)
tail = 0;
else
- tail = IEEE80211_CCMP_MIC_LEN;
+ tail = mic_len;
if (WARN_ON(skb_tailroom(skb) < tail ||
skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN))
@@ -459,21 +462,22 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos += IEEE80211_CCMP_HDR_LEN;
ccmp_special_blocks(skb, pn, b_0, aad);
ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
- skb_put(skb, IEEE80211_CCMP_MIC_LEN));
+ skb_put(skb, mic_len), mic_len);
return 0;
}
ieee80211_tx_result
-ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
+ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx,
+ unsigned int mic_len)
{
struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
skb_queue_walk(&tx->skbs, skb) {
- if (ccmp_encrypt_skb(tx, skb) < 0)
+ if (ccmp_encrypt_skb(tx, skb, mic_len) < 0)
return TX_DROP;
}
@@ -482,7 +486,8 @@ ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
ieee80211_rx_result
-ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
+ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
+ unsigned int mic_len)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
int hdrlen;
@@ -499,8 +504,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
!ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
- data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN -
- IEEE80211_CCMP_MIC_LEN;
+ data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
@@ -531,14 +535,14 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
key->u.ccmp.tfm, b_0, aad,
skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
data_len,
- skb->data + skb->len - IEEE80211_CCMP_MIC_LEN))
+ skb->data + skb->len - mic_len, mic_len))
return RX_DROP_UNUSABLE;
}
memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
/* Remove CCMP header and MIC */
- if (pskb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN))
+ if (pskb_trim(skb, skb->len - mic_len))
return RX_DROP_UNUSABLE;
memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen);
skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
@@ -546,6 +550,229 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
}
+static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
+{
+ __le16 mask_fc;
+ u8 qos_tid;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ memcpy(j_0, hdr->addr2, ETH_ALEN);
+ memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN);
+ j_0[13] = 0;
+ j_0[14] = 0;
+ j_0[AES_BLOCK_SIZE - 1] = 0x01;
+
+ /* AAD (extra authenticate-only data) / masked 802.11 header
+ * FC | A1 | A2 | A3 | SC | [A4] | [QC]
+ */
+ put_unaligned_be16(ieee80211_hdrlen(hdr->frame_control) - 2, &aad[0]);
+ /* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
+ * Retry, PwrMgt, MoreData; set Protected
+ */
+ mask_fc = hdr->frame_control;
+ mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
+ IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ mask_fc &= ~cpu_to_le16(0x0070);
+ mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+ put_unaligned(mask_fc, (__le16 *)&aad[2]);
+ memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
+
+ /* Mask Seq#, leave Frag# */
+ aad[22] = *((u8 *)&hdr->seq_ctrl) & 0x0f;
+ aad[23] = 0;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ qos_tid = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_TID_MASK;
+ else
+ qos_tid = 0;
+
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ memcpy(&aad[24], hdr->addr4, ETH_ALEN);
+ aad[30] = qos_tid;
+ aad[31] = 0;
+ } else {
+ memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
+ aad[24] = qos_tid;
+ }
+}
+
+static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
+{
+ hdr[0] = pn[5];
+ hdr[1] = pn[4];
+ hdr[2] = 0;
+ hdr[3] = 0x20 | (key_id << 6);
+ hdr[4] = pn[3];
+ hdr[5] = pn[2];
+ hdr[6] = pn[1];
+ hdr[7] = pn[0];
+}
+
+static inline void gcmp_hdr2pn(u8 *pn, const u8 *hdr)
+{
+ pn[0] = hdr[7];
+ pn[1] = hdr[6];
+ pn[2] = hdr[5];
+ pn[3] = hdr[4];
+ pn[4] = hdr[1];
+ pn[5] = hdr[0];
+}
+
+static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_key *key = tx->key;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int hdrlen, len, tail;
+ u8 *pos;
+ u8 pn[6];
+ u64 pn64;
+ u8 aad[2 * AES_BLOCK_SIZE];
+ u8 j_0[AES_BLOCK_SIZE];
+
+ if (info->control.hw_key &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+ !((info->control.hw_key->flags &
+ IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
+ ieee80211_is_mgmt(hdr->frame_control))) {
+ /* hwaccel has no need for preallocated room for GCMP
+ * header or MIC fields
+ */
+ return 0;
+ }
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ len = skb->len - hdrlen;
+
+ if (info->control.hw_key)
+ tail = 0;
+ else
+ tail = IEEE80211_GCMP_MIC_LEN;
+
+ if (WARN_ON(skb_tailroom(skb) < tail ||
+ skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN))
+ return -1;
+
+ pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN);
+ memmove(pos, pos + IEEE80211_GCMP_HDR_LEN, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) +
+ IEEE80211_GCMP_HDR_LEN);
+
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return 0;
+
+ hdr = (struct ieee80211_hdr *)pos;
+ pos += hdrlen;
+
+ pn64 = atomic64_inc_return(&key->u.gcmp.tx_pn);
+
+ pn[5] = pn64;
+ pn[4] = pn64 >> 8;
+ pn[3] = pn64 >> 16;
+ pn[2] = pn64 >> 24;
+ pn[1] = pn64 >> 32;
+ pn[0] = pn64 >> 40;
+
+ gcmp_pn2hdr(pos, pn, key->conf.keyidx);
+
+ /* hwaccel - with software GCMP header */
+ if (info->control.hw_key)
+ return 0;
+
+ pos += IEEE80211_GCMP_HDR_LEN;
+ gcmp_special_blocks(skb, pn, j_0, aad);
+ ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
+ skb_put(skb, IEEE80211_GCMP_MIC_LEN));
+
+ return 0;
+}
+
+ieee80211_tx_result
+ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx)
+{
+ struct sk_buff *skb;
+
+ ieee80211_tx_set_protected(tx);
+
+ skb_queue_walk(&tx->skbs, skb) {
+ if (gcmp_encrypt_skb(tx, skb) < 0)
+ return TX_DROP;
+ }
+
+ return TX_CONTINUE;
+}
+
+ieee80211_rx_result
+ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ int hdrlen;
+ struct ieee80211_key *key = rx->key;
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ u8 pn[IEEE80211_GCMP_PN_LEN];
+ int data_len;
+ int queue;
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!ieee80211_is_data(hdr->frame_control) &&
+ !ieee80211_is_robust_mgmt_frame(skb))
+ return RX_CONTINUE;
+
+ data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN -
+ IEEE80211_GCMP_MIC_LEN;
+ if (!rx->sta || data_len < 0)
+ return RX_DROP_UNUSABLE;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
+ return RX_DROP_UNUSABLE;
+ } else {
+ if (skb_linearize(rx->skb))
+ return RX_DROP_UNUSABLE;
+ }
+
+ gcmp_hdr2pn(pn, skb->data + hdrlen);
+
+ queue = rx->security_idx;
+
+ if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
+ key->u.gcmp.replays++;
+ return RX_DROP_UNUSABLE;
+ }
+
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
+ u8 aad[2 * AES_BLOCK_SIZE];
+ u8 j_0[AES_BLOCK_SIZE];
+ /* hardware didn't decrypt/verify MIC */
+ gcmp_special_blocks(skb, pn, j_0, aad);
+
+ if (ieee80211_aes_gcm_decrypt(
+ key->u.gcmp.tfm, j_0, aad,
+ skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+ data_len,
+ skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
+ return RX_DROP_UNUSABLE;
+ }
+
+ memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+
+ /* Remove GCMP header and MIC */
+ if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
+ return RX_DROP_UNUSABLE;
+ memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
+ skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
+
+ return RX_CONTINUE;
+}
+
static ieee80211_tx_result
ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
struct sk_buff *skb)
@@ -729,6 +956,48 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
+ieee80211_tx_result
+ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
+{
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_key *key = tx->key;
+ struct ieee80211_mmie_16 *mmie;
+ u8 aad[20];
+ u64 pn64;
+
+ if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+ return TX_DROP;
+
+ skb = skb_peek(&tx->skbs);
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (info->control.hw_key)
+ return TX_CONTINUE;
+
+ if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
+ return TX_DROP;
+
+ mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie));
+ mmie->element_id = WLAN_EID_MMIE;
+ mmie->length = sizeof(*mmie) - 2;
+ mmie->key_id = cpu_to_le16(key->conf.keyidx);
+
+ /* PN = PN + 1 */
+ pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
+
+ bip_ipn_set64(mmie->sequence_number, pn64);
+
+ bip_aad(skb, aad);
+
+ /* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128)
+ */
+ ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
+ skb->data + 24, skb->len - 24, mmie->mic);
+
+ return TX_CONTINUE;
+}
ieee80211_rx_result
ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
@@ -780,6 +1049,160 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
}
+ieee80211_rx_result
+ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
+{
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_key *key = rx->key;
+ struct ieee80211_mmie_16 *mmie;
+ u8 aad[20], mic[16], ipn[6];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ return RX_CONTINUE;
+
+ /* management frames are already linear */
+
+ if (skb->len < 24 + sizeof(*mmie))
+ return RX_DROP_UNUSABLE;
+
+ mmie = (struct ieee80211_mmie_16 *)
+ (skb->data + skb->len - sizeof(*mmie));
+ if (mmie->element_id != WLAN_EID_MMIE ||
+ mmie->length != sizeof(*mmie) - 2)
+ return RX_DROP_UNUSABLE; /* Invalid MMIE */
+
+ bip_ipn_swap(ipn, mmie->sequence_number);
+
+ if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
+ key->u.aes_cmac.replays++;
+ return RX_DROP_UNUSABLE;
+ }
+
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
+ /* hardware didn't decrypt/verify MIC */
+ bip_aad(skb, aad);
+ ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
+ skb->data + 24, skb->len - 24, mic);
+ if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ key->u.aes_cmac.icverrors++;
+ return RX_DROP_UNUSABLE;
+ }
+ }
+
+ memcpy(key->u.aes_cmac.rx_pn, ipn, 6);
+
+ /* Remove MMIE */
+ skb_trim(skb, skb->len - sizeof(*mmie));
+
+ return RX_CONTINUE;
+}
+
+ieee80211_tx_result
+ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
+{
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_key *key = tx->key;
+ struct ieee80211_mmie_16 *mmie;
+ struct ieee80211_hdr *hdr;
+ u8 aad[20];
+ u64 pn64;
+ u8 nonce[12];
+
+ if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+ return TX_DROP;
+
+ skb = skb_peek(&tx->skbs);
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (info->control.hw_key)
+ return TX_CONTINUE;
+
+ if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
+ return TX_DROP;
+
+ mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie));
+ mmie->element_id = WLAN_EID_MMIE;
+ mmie->length = sizeof(*mmie) - 2;
+ mmie->key_id = cpu_to_le16(key->conf.keyidx);
+
+ /* PN = PN + 1 */
+ pn64 = atomic64_inc_return(&key->u.aes_gmac.tx_pn);
+
+ bip_ipn_set64(mmie->sequence_number, pn64);
+
+ bip_aad(skb, aad);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ memcpy(nonce, hdr->addr2, ETH_ALEN);
+ bip_ipn_swap(nonce + ETH_ALEN, mmie->sequence_number);
+
+ /* MIC = AES-GMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */
+ if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
+ skb->data + 24, skb->len - 24, mmie->mic) < 0)
+ return TX_DROP;
+
+ return TX_CONTINUE;
+}
+
+ieee80211_rx_result
+ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
+{
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_key *key = rx->key;
+ struct ieee80211_mmie_16 *mmie;
+ u8 aad[20], mic[16], ipn[6], nonce[12];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ return RX_CONTINUE;
+
+ /* management frames are already linear */
+
+ if (skb->len < 24 + sizeof(*mmie))
+ return RX_DROP_UNUSABLE;
+
+ mmie = (struct ieee80211_mmie_16 *)
+ (skb->data + skb->len - sizeof(*mmie));
+ if (mmie->element_id != WLAN_EID_MMIE ||
+ mmie->length != sizeof(*mmie) - 2)
+ return RX_DROP_UNUSABLE; /* Invalid MMIE */
+
+ bip_ipn_swap(ipn, mmie->sequence_number);
+
+ if (memcmp(ipn, key->u.aes_gmac.rx_pn, 6) <= 0) {
+ key->u.aes_gmac.replays++;
+ return RX_DROP_UNUSABLE;
+ }
+
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
+ /* hardware didn't decrypt/verify MIC */
+ bip_aad(skb, aad);
+
+ memcpy(nonce, hdr->addr2, ETH_ALEN);
+ memcpy(nonce + ETH_ALEN, ipn, 6);
+
+ if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
+ skb->data + 24, skb->len - 24,
+ mic) < 0 ||
+ memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ key->u.aes_gmac.icverrors++;
+ return RX_DROP_UNUSABLE;
+ }
+ }
+
+ memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
+
+ /* Remove MMIE */
+ skb_trim(skb, skb->len - sizeof(*mmie));
+
+ return RX_CONTINUE;
+}
+
ieee80211_tx_result
ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
{
diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h
index 62e5a12dfe0a..d98011ee8f55 100644
--- a/net/mac80211/wpa.h
+++ b/net/mac80211/wpa.h
@@ -24,17 +24,32 @@ ieee80211_rx_result
ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx);
ieee80211_tx_result
-ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx,
+ unsigned int mic_len);
ieee80211_rx_result
-ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
+ unsigned int mic_len);
ieee80211_tx_result
ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_tx_result
+ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx);
ieee80211_rx_result
ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_rx_result
+ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_tx_result
+ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_rx_result
+ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx);
ieee80211_tx_result
ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx);
ieee80211_rx_result
ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_tx_result
+ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_rx_result
+ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx);
+
#endif /* WPA_H */
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index c035708ada16..5d9f68c75e5f 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -51,10 +51,7 @@ ieee802154_add_iface(struct wpan_phy *phy, const char *name,
struct net_device *err;
err = ieee802154_if_add(local, name, type, extended_addr);
- if (IS_ERR(err))
- return PTR_ERR(err);
-
- return 0;
+ return PTR_ERR_OR_ZERO(err);
}
static int
@@ -87,6 +84,26 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
}
static int
+ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
+ const struct wpan_phy_cca *cca)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ int ret;
+
+ ASSERT_RTNL();
+
+ /* check if phy support this setting */
+ if (!(local->hw.flags & IEEE802154_HW_CCA_MODE))
+ return -EOPNOTSUPP;
+
+ ret = drv_set_cca_mode(local, cca);
+ if (!ret)
+ wpan_phy->cca = *cca;
+
+ return ret;
+}
+
+static int
ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 pan_id)
{
@@ -201,6 +218,7 @@ const struct cfg802154_ops mac802154_config_ops = {
.add_virtual_intf = ieee802154_add_iface,
.del_virtual_intf = ieee802154_del_iface,
.set_channel = ieee802154_set_channel,
+ .set_cca_mode = ieee802154_set_cca_mode,
.set_pan_id = ieee802154_set_pan_id,
.set_short_addr = ieee802154_set_short_addr,
.set_backoff_exponent = ieee802154_set_backoff_exponent,
diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h
index f21e864613d0..98180a9fff4a 100644
--- a/net/mac802154/driver-ops.h
+++ b/net/mac802154/driver-ops.h
@@ -70,7 +70,8 @@ static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
return local->ops->set_txpower(&local->hw, dbm);
}
-static inline int drv_set_cca_mode(struct ieee802154_local *local, u8 cca_mode)
+static inline int drv_set_cca_mode(struct ieee802154_local *local,
+ const struct wpan_phy_cca *cca)
{
might_sleep();
@@ -79,7 +80,7 @@ static inline int drv_set_cca_mode(struct ieee802154_local *local, u8 cca_mode)
return -EOPNOTSUPP;
}
- return local->ops->set_cca_mode(&local->hw, cca_mode);
+ return local->ops->set_cca_mode(&local->hw, cca);
}
static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 9ae893057dd7..6fb6bdf9868c 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -137,25 +137,11 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
static int mac802154_slave_open(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
- struct ieee802154_sub_if_data *subif;
struct ieee802154_local *local = sdata->local;
int res = 0;
ASSERT_RTNL();
- if (sdata->vif.type == NL802154_IFTYPE_NODE) {
- mutex_lock(&sdata->local->iflist_mtx);
- list_for_each_entry(subif, &sdata->local->interfaces, list) {
- if (subif != sdata &&
- subif->vif.type == sdata->vif.type &&
- ieee802154_sdata_running(subif)) {
- mutex_unlock(&sdata->local->iflist_mtx);
- return -EBUSY;
- }
- }
- mutex_unlock(&sdata->local->iflist_mtx);
- }
-
set_bit(SDATA_STATE_RUNNING, &sdata->state);
if (!local->open_count) {
@@ -175,6 +161,88 @@ err:
return res;
}
+static int
+ieee802154_check_mac_settings(struct ieee802154_local *local,
+ struct wpan_dev *wpan_dev,
+ struct wpan_dev *nwpan_dev)
+{
+ ASSERT_RTNL();
+
+ if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
+ if (wpan_dev->promiscuous_mode != nwpan_dev->promiscuous_mode)
+ return -EBUSY;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_AFILT) {
+ if (wpan_dev->pan_id != nwpan_dev->pan_id)
+ return -EBUSY;
+
+ if (wpan_dev->short_addr != nwpan_dev->short_addr)
+ return -EBUSY;
+
+ if (wpan_dev->extended_addr != nwpan_dev->extended_addr)
+ return -EBUSY;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
+ if (wpan_dev->min_be != nwpan_dev->min_be)
+ return -EBUSY;
+
+ if (wpan_dev->max_be != nwpan_dev->max_be)
+ return -EBUSY;
+
+ if (wpan_dev->csma_retries != nwpan_dev->csma_retries)
+ return -EBUSY;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
+ if (wpan_dev->frame_retries != nwpan_dev->frame_retries)
+ return -EBUSY;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_LBT) {
+ if (wpan_dev->lbt != nwpan_dev->lbt)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+ieee802154_check_concurrent_iface(struct ieee802154_sub_if_data *sdata,
+ enum nl802154_iftype iftype)
+{
+ struct ieee802154_local *local = sdata->local;
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ struct ieee802154_sub_if_data *nsdata;
+
+ /* we hold the RTNL here so can safely walk the list */
+ list_for_each_entry(nsdata, &local->interfaces, list) {
+ if (nsdata != sdata && ieee802154_sdata_running(nsdata)) {
+ int ret;
+
+ /* TODO currently we don't support multiple node types
+ * we need to run skb_clone at rx path. Check if there
+ * exist really an use case if we need to support
+ * multiple node types at the same time.
+ */
+ if (sdata->vif.type == NL802154_IFTYPE_NODE &&
+ nsdata->vif.type == NL802154_IFTYPE_NODE)
+ return -EBUSY;
+
+ /* check all phy mac sublayer settings are the same.
+ * We have only one phy, different values makes trouble.
+ */
+ ret = ieee802154_check_mac_settings(local, wpan_dev,
+ &nsdata->wpan_dev);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int mac802154_wpan_open(struct net_device *dev)
{
int rc;
@@ -183,6 +251,10 @@ static int mac802154_wpan_open(struct net_device *dev)
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct wpan_phy *phy = sdata->local->phy;
+ rc = ieee802154_check_concurrent_iface(sdata, sdata->vif.type);
+ if (rc < 0)
+ return rc;
+
rc = mac802154_slave_open(dev);
if (rc < 0)
return rc;
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index 6aacb1816889..bdccb4ecd30f 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -81,7 +81,7 @@ static int mac802154_set_mac_params(struct net_device *dev,
/* PHY */
wpan_dev->wpan_phy->transmit_power = params->transmit_power;
- wpan_dev->wpan_phy->cca_mode = params->cca_mode;
+ wpan_dev->wpan_phy->cca = params->cca;
wpan_dev->wpan_phy->cca_ed_level = params->cca_ed_level;
/* MAC */
@@ -98,7 +98,7 @@ static int mac802154_set_mac_params(struct net_device *dev,
}
if (local->hw.flags & IEEE802154_HW_CCA_MODE) {
- ret = drv_set_cca_mode(local, params->cca_mode);
+ ret = drv_set_cca_mode(local, &params->cca);
if (ret < 0)
return ret;
}
@@ -122,7 +122,7 @@ static void mac802154_get_mac_params(struct net_device *dev,
/* PHY */
params->transmit_power = wpan_dev->wpan_phy->transmit_power;
- params->cca_mode = wpan_dev->wpan_phy->cca_mode;
+ params->cca = wpan_dev->wpan_phy->cca;
params->cca_ed_level = wpan_dev->wpan_phy->cca_ed_level;
/* MAC */
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 349295d21946..809df534a720 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -60,14 +60,14 @@ out:
return segs;
}
-static struct packet_offload mpls_mc_offload = {
+static struct packet_offload mpls_mc_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_MPLS_MC),
.callbacks = {
.gso_segment = mpls_gso_segment,
},
};
-static struct packet_offload mpls_uc_offload = {
+static struct packet_offload mpls_uc_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_MPLS_UC),
.callbacks = {
.gso_segment = mpls_gso_segment,
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 990decba1fe4..b87ca32efa0b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
return err;
}
-static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
+static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
+ unsigned int hooknum)
{
+ if (!sysctl_snat_reroute(skb))
+ return 0;
+ /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
+ if (NF_INET_LOCAL_IN == hooknum)
+ return 0;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
- if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
+ ip6_route_me_harder(skb) != 0)
return 1;
} else
#endif
- if ((sysctl_snat_reroute(skb) ||
- skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+ if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
ip_route_me_harder(skb, RTN_LOCAL) != 0)
return 1;
@@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
union nf_inet_addr *snet,
__u8 protocol, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp,
- unsigned int offset, unsigned int ihl)
+ unsigned int offset, unsigned int ihl,
+ unsigned int hooknum)
{
unsigned int verdict = NF_DROP;
@@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
#endif
ip_vs_nat_icmp(skb, pp, cp, 1);
- if (ip_vs_route_me_harder(af, skb))
+ if (ip_vs_route_me_harder(af, skb, hooknum))
goto out;
/* do the statistics and put it back */
@@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
snet.ip = iph->saddr;
return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
- pp, ciph.len, ihl);
+ pp, ciph.len, ihl, hooknum);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
snet.in6 = ciph.saddr.in6;
writable = ciph.len;
return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
- pp, writable, sizeof(struct ipv6hdr));
+ pp, writable, sizeof(struct ipv6hdr),
+ hooknum);
}
#endif
@@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
*/
static unsigned int
handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
- struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
+ unsigned int hooknum)
{
struct ip_vs_protocol *pp = pd->pp;
@@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
* if it came from this machine itself. So re-compute
* the routing information.
*/
- if (ip_vs_route_me_harder(af, skb))
+ if (ip_vs_route_me_harder(af, skb, hooknum))
goto drop;
IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
cp = pp->conn_out_get(af, skb, &iph, 0);
if (likely(cp))
- return handle_response(af, skb, pd, cp, &iph);
+ return handle_response(af, skb, pd, cp, &iph, hooknum);
if (sysctl_nat_icmp_send(net) &&
(pp->protocol == IPPROTO_TCP ||
pp->protocol == IPPROTO_UDP ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b8295a430a56..e55759056361 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2887,7 +2887,8 @@ static int ip_vs_genl_dump_service(struct sk_buff *skb,
if (ip_vs_genl_fill_service(skb, svc) < 0)
goto nla_put_failure;
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
@@ -3079,7 +3080,8 @@ static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
if (ip_vs_genl_fill_dest(skb, dest) < 0)
goto nla_put_failure;
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
@@ -3215,7 +3217,8 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
goto nla_put_failure;
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 1d5341f3761d..5d3daae98bf0 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct nf_conn *ct;
struct net *net;
+ *diff = 0;
+
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
* so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
return 1;
#endif
- *diff = 0;
-
/* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1;
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct ip_vs_conn *n_cp;
struct net *net;
+ /* no diff required for incoming packets */
+ *diff = 0;
+
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
* so turn this into a no-op for IPv6 packets
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
return 1;
#endif
- /* no diff required for incoming packets */
- *diff = 0;
-
/* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a11674806707..13fad8668f83 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
*/
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
pr_debug("Confirming conntrack %p\n", ct);
- /* We have to check the DYING flag inside the lock to prevent
- a race against nf_ct_get_next_corpse() possibly called from
- user context, else we insert an already 'dead' hash, blocking
- further use of that particular connection -JM */
+ /* We have to check the DYING flag after unlink to prevent
+ * a race against nf_ct_get_next_corpse() possibly called from
+ * user context, else we insert an already 'dead' hash, blocking
+ * further use of that particular connection -JM.
+ */
+ nf_ct_del_from_dying_or_unconfirmed_list(ct);
- if (unlikely(nf_ct_is_dying(ct))) {
- nf_conntrack_double_unlock(hash, reply_hash);
- local_bh_enable();
- return NF_ACCEPT;
- }
+ if (unlikely(nf_ct_is_dying(ct)))
+ goto out;
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
goto out;
- nf_ct_del_from_dying_or_unconfirmed_list(ct);
-
/* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_ACCEPT;
out:
+ nf_ct_add_to_dying_list(ct);
nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert_failed);
local_bh_enable();
@@ -1426,12 +1424,6 @@ void nf_ct_free_hashtable(void *hash, unsigned int size)
}
EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
-void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
-{
- nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report);
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
-
static int untrack_refs(void)
{
int cnt = 0, cpu;
@@ -1624,13 +1616,18 @@ int nf_conntrack_init_start(void)
for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_lock_init(&nf_conntrack_locks[i]);
- /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
- * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
if (!nf_conntrack_htable_size) {
+ /* Idea from tcp.c: use 1/16384 of memory.
+ * On i386: 32MB machine has 512 buckets.
+ * >= 1GB machines have 16384 buckets.
+ * >= 4GB machines have 65536 buckets.
+ */
nf_conntrack_htable_size
= (((totalram_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head));
- if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
+ nf_conntrack_htable_size = 65536;
+ else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 16384;
if (nf_conntrack_htable_size < 32)
nf_conntrack_htable_size = 32;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1bd9ed9e62f6..d1c23940a86a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -749,13 +749,47 @@ static int ctnetlink_done(struct netlink_callback *cb)
return 0;
}
-struct ctnetlink_dump_filter {
+struct ctnetlink_filter {
struct {
u_int32_t val;
u_int32_t mask;
} mark;
};
+static struct ctnetlink_filter *
+ctnetlink_alloc_filter(const struct nlattr * const cda[])
+{
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ struct ctnetlink_filter *filter;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (filter == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
+ filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+
+ return filter;
+#else
+ return ERR_PTR(-EOPNOTSUPP);
+#endif
+}
+
+static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
+{
+ struct ctnetlink_filter *filter = data;
+
+ if (filter == NULL)
+ return 1;
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ if ((ct->mark & filter->mark.mask) == filter->mark.val)
+ return 1;
+#endif
+
+ return 0;
+}
+
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -768,10 +802,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
int res;
spinlock_t *lockp;
-#ifdef CONFIG_NF_CONNTRACK_MARK
- const struct ctnetlink_dump_filter *filter = cb->data;
-#endif
-
last = (struct nf_conn *)cb->args[1];
local_bh_disable();
@@ -798,12 +828,9 @@ restart:
continue;
cb->args[1] = 0;
}
-#ifdef CONFIG_NF_CONNTRACK_MARK
- if (filter && !((ct->mark & filter->mark.mask) ==
- filter->mark.val)) {
+ if (!ctnetlink_filter_match(ct, cb->data))
continue;
- }
-#endif
+
rcu_read_lock();
res =
ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
@@ -1001,6 +1028,25 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
.len = NF_CT_LABELS_MAX_SIZE },
};
+static int ctnetlink_flush_conntrack(struct net *net,
+ const struct nlattr * const cda[],
+ u32 portid, int report)
+{
+ struct ctnetlink_filter *filter = NULL;
+
+ if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
+ filter = ctnetlink_alloc_filter(cda);
+ if (IS_ERR(filter))
+ return PTR_ERR(filter);
+ }
+
+ nf_ct_iterate_cleanup(net, ctnetlink_filter_match, filter,
+ portid, report);
+ kfree(filter);
+
+ return 0;
+}
+
static int
ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
@@ -1024,11 +1070,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
else if (cda[CTA_TUPLE_REPLY])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
else {
- /* Flush the whole table */
- nf_conntrack_flush_report(net,
- NETLINK_CB(skb).portid,
- nlmsg_report(nlh));
- return 0;
+ return ctnetlink_flush_conntrack(net, cda,
+ NETLINK_CB(skb).portid,
+ nlmsg_report(nlh));
}
if (err < 0)
@@ -1076,21 +1120,16 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
.dump = ctnetlink_dump_table,
.done = ctnetlink_done,
};
-#ifdef CONFIG_NF_CONNTRACK_MARK
+
if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
- struct ctnetlink_dump_filter *filter;
+ struct ctnetlink_filter *filter;
- filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
- GFP_ATOMIC);
- if (filter == NULL)
- return -ENOMEM;
+ filter = ctnetlink_alloc_filter(cda);
+ if (IS_ERR(filter))
+ return PTR_ERR(filter);
- filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
- filter->mark.mask =
- ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
c.data = filter;
}
-#endif
return netlink_dump_start(ctnl, skb, nlh, &c);
}
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index f6e2ae91a80b..ce3e840c8704 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -98,9 +98,9 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
new_end_seq = htonl(ntohl(sack->end_seq) -
seq->offset_before);
- pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
- ntohl(sack->start_seq), new_start_seq,
- ntohl(sack->end_seq), new_end_seq);
+ pr_debug("sack_adjust: start_seq: %u->%u, end_seq: %u->%u\n",
+ ntohl(sack->start_seq), ntohl(new_start_seq),
+ ntohl(sack->end_seq), ntohl(new_end_seq));
inet_proto_csum_replace4(&tcph->check, skb,
sack->start_seq, new_start_seq, 0);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 43c926cae9c0..0d8448f19dfe 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -425,8 +425,7 @@ static int netfilter_log_sysctl_init(struct net *net)
nf_log_sysctl_table[i].procname =
nf_log_sysctl_fnames[i];
nf_log_sysctl_table[i].data = NULL;
- nf_log_sysctl_table[i].maxlen =
- NFLOGGER_NAME_LEN * sizeof(char);
+ nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN;
nf_log_sysctl_table[i].mode = 0644;
nf_log_sysctl_table[i].proc_handler =
nf_log_proc_dostring;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 129a8daa4abf..199fd0f27b0e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -427,7 +427,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
@@ -713,16 +714,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
struct nft_chain *chain, *nc;
struct nft_set *set, *ns;
- list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+ list_for_each_entry(chain, &ctx->table->chains, list) {
ctx->chain = chain;
err = nft_delrule_by_chain(ctx);
if (err < 0)
goto out;
-
- err = nft_delchain(ctx);
- if (err < 0)
- goto out;
}
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@@ -735,6 +732,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
goto out;
}
+ list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+ ctx->chain = chain;
+
+ err = nft_delchain(ctx);
+ if (err < 0)
+ goto out;
+ }
+
err = nft_deltable(ctx);
out:
return err;
@@ -967,7 +972,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
@@ -1130,9 +1136,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
/* Restore old counters on this cpu, no problem. Per-cpu statistics
* are not exposed to userspace.
*/
+ preempt_disable();
stats = this_cpu_ptr(newstats);
stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+ preempt_enable();
return newstats;
}
@@ -1258,8 +1266,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
sizeof(struct nft_trans_chain));
- if (trans == NULL)
+ if (trans == NULL) {
+ free_percpu(stats);
return -ENOMEM;
+ }
nft_trans_chain_stats(trans) = stats;
nft_trans_chain_update(trans) = true;
@@ -1315,8 +1325,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
hookfn = type->hooks[hooknum];
basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
- if (basechain == NULL)
+ if (basechain == NULL) {
+ module_put(type->owner);
return -ENOMEM;
+ }
if (nla[NFTA_CHAIN_COUNTERS]) {
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -1703,7 +1715,8 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule)))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
@@ -2357,7 +2370,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
nla_nest_end(skb, desc);
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
@@ -3031,7 +3045,8 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
nla_nest_end(skb, nest);
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
@@ -3320,7 +3335,8 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq)))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
@@ -3749,6 +3765,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
}
EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+ unsigned int hook_flags)
+{
+ struct nft_base_chain *basechain;
+
+ if (chain->flags & NFT_BASE_CHAIN) {
+ basechain = nft_base_chain(chain);
+
+ if ((1 << basechain->ops[0].hooknum) & hook_flags)
+ return 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
+
/*
* Loop detection - walk through the ruleset beginning at the destination chain
* of a new jump until either the source chain is reached (loop) or all
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index cde4a6702fa3..8b117c90ecd7 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -272,7 +272,7 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
u_int16_t subsys_id)
{
- struct sk_buff *nskb, *oskb = skb;
+ struct sk_buff *oskb = skb;
struct net *net = sock_net(skb->sk);
const struct nfnetlink_subsystem *ss;
const struct nfnl_callback *nc;
@@ -283,12 +283,11 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
if (subsys_id >= NFNL_SUBSYS_COUNT)
return netlink_ack(skb, nlh, -EINVAL);
replay:
- nskb = netlink_skb_clone(oskb, GFP_KERNEL);
- if (!nskb)
+ skb = netlink_skb_clone(oskb, GFP_KERNEL);
+ if (!skb)
return netlink_ack(oskb, nlh, -ENOMEM);
- nskb->sk = oskb->sk;
- skb = nskb;
+ skb->sk = oskb->sk;
nfnl_lock(subsys_id);
ss = rcu_dereference_protected(table[subsys_id].subsys,
@@ -305,7 +304,7 @@ replay:
{
nfnl_unlock(subsys_id);
netlink_ack(skb, nlh, -EOPNOTSUPP);
- return kfree_skb(nskb);
+ return kfree_skb(skb);
}
}
@@ -321,7 +320,8 @@ replay:
nlh = nlmsg_hdr(skb);
err = 0;
- if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+ if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
+ skb->len < nlh->nlmsg_len) {
err = -EINVAL;
goto ack;
}
@@ -385,7 +385,7 @@ replay:
nfnl_err_reset(&err_list);
ss->abort(oskb);
nfnl_unlock(subsys_id);
- kfree_skb(nskb);
+ kfree_skb(skb);
goto replay;
}
}
@@ -426,7 +426,7 @@ done:
nfnl_err_deliver(&err_list, oskb);
nfnl_unlock(subsys_id);
- kfree_skb(nskb);
+ kfree_skb(skb);
}
static void nfnetlink_rcv(struct sk_buff *skb)
@@ -469,7 +469,7 @@ static int nfnetlink_bind(struct net *net, int group)
int type;
if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
- return -EINVAL;
+ return 0;
type = nfnl_group2type[group];
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 9e287cb56a04..a5599fc51a6f 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -86,7 +86,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
static int
nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
{
- const struct nf_conn_help *help = nfct_help(ct);
+ struct nf_conn_help *help = nfct_help(ct);
if (attr == NULL)
return -EINVAL;
@@ -94,7 +94,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
if (help->helper->data_len == 0)
return -EINVAL;
- memcpy(&help->data, nla_data(attr), help->helper->data_len);
+ memcpy(help->data, nla_data(attr), help->helper->data_len);
return 0;
}
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 1e316ce4cb5d..61e6c407476a 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -33,7 +33,7 @@ static bool nft_hash_lookup(const struct nft_set *set,
const struct nft_data *key,
struct nft_data *data)
{
- const struct rhashtable *priv = nft_set_priv(set);
+ struct rhashtable *priv = nft_set_priv(set);
const struct nft_hash_elem *he;
he = rhashtable_lookup(priv, key);
@@ -83,69 +83,97 @@ static void nft_hash_remove(const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct rhashtable *priv = nft_set_priv(set);
- struct rhash_head *he, __rcu **pprev;
- pprev = elem->cookie;
- he = rht_dereference((*pprev), priv);
+ rhashtable_remove(priv, elem->cookie);
+ synchronize_rcu();
+ kfree(elem->cookie);
+}
- rhashtable_remove_pprev(priv, he, pprev);
+struct nft_compare_arg {
+ const struct nft_set *set;
+ struct nft_set_elem *elem;
+};
- synchronize_rcu();
- kfree(he);
+static bool nft_hash_compare(void *ptr, void *arg)
+{
+ struct nft_hash_elem *he = ptr;
+ struct nft_compare_arg *x = arg;
+
+ if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) {
+ x->elem->cookie = he;
+ x->elem->flags = 0;
+ if (x->set->flags & NFT_SET_MAP)
+ nft_data_copy(&x->elem->data, he->data);
+
+ return true;
+ }
+
+ return false;
}
static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
{
- const struct rhashtable *priv = nft_set_priv(set);
- const struct bucket_table *tbl = rht_dereference_rcu(priv->tbl, priv);
- struct rhash_head __rcu * const *pprev;
- struct nft_hash_elem *he;
- u32 h;
-
- h = rhashtable_hashfn(priv, &elem->key, set->klen);
- pprev = &tbl->buckets[h];
- rht_for_each_entry_rcu(he, tbl->buckets[h], node) {
- if (nft_data_cmp(&he->key, &elem->key, set->klen)) {
- pprev = &he->node.next;
- continue;
- }
+ struct rhashtable *priv = nft_set_priv(set);
+ struct nft_compare_arg arg = {
+ .set = set,
+ .elem = elem,
+ };
- elem->cookie = (void *)pprev;
- elem->flags = 0;
- if (set->flags & NFT_SET_MAP)
- nft_data_copy(&elem->data, he->data);
+ if (rhashtable_lookup_compare(priv, &elem->key,
+ &nft_hash_compare, &arg))
return 0;
- }
+
return -ENOENT;
}
static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
struct nft_set_iter *iter)
{
- const struct rhashtable *priv = nft_set_priv(set);
- const struct bucket_table *tbl;
+ struct rhashtable *priv = nft_set_priv(set);
const struct nft_hash_elem *he;
+ struct rhashtable_iter hti;
struct nft_set_elem elem;
- unsigned int i;
+ int err;
- tbl = rht_dereference_rcu(priv->tbl, priv);
- for (i = 0; i < tbl->size; i++) {
- rht_for_each_entry_rcu(he, tbl->buckets[i], node) {
- if (iter->count < iter->skip)
- goto cont;
-
- memcpy(&elem.key, &he->key, sizeof(elem.key));
- if (set->flags & NFT_SET_MAP)
- memcpy(&elem.data, he->data, sizeof(elem.data));
- elem.flags = 0;
-
- iter->err = iter->fn(ctx, set, iter, &elem);
- if (iter->err < 0)
- return;
-cont:
- iter->count++;
+ err = rhashtable_walk_init(priv, &hti);
+ iter->err = err;
+ if (err)
+ return;
+
+ err = rhashtable_walk_start(&hti);
+ if (err && err != -EAGAIN) {
+ iter->err = err;
+ goto out;
+ }
+
+ while ((he = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(he)) {
+ err = PTR_ERR(he);
+ if (err != -EAGAIN) {
+ iter->err = err;
+ goto out;
+ }
}
+
+ if (iter->count < iter->skip)
+ goto cont;
+
+ memcpy(&elem.key, &he->key, sizeof(elem.key));
+ if (set->flags & NFT_SET_MAP)
+ memcpy(&elem.data, he->data, sizeof(elem.data));
+ elem.flags = 0;
+
+ iter->err = iter->fn(ctx, set, iter, &elem);
+ if (iter->err < 0)
+ goto out;
+
+cont:
+ iter->count++;
}
+
+out:
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
}
static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
@@ -153,13 +181,6 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
return sizeof(struct rhashtable);
}
-#ifdef CONFIG_PROVE_LOCKING
-static int lockdep_nfnl_lock_is_held(void *parent)
-{
- return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES);
-}
-#endif
-
static int nft_hash_init(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const tb[])
@@ -173,9 +194,6 @@ static int nft_hash_init(const struct nft_set *set,
.hashfn = jhash,
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
-#ifdef CONFIG_PROVE_LOCKING
- .mutex_is_held = lockdep_nfnl_lock_is_held,
-#endif
};
return rhashtable_init(priv, &params);
@@ -183,18 +201,23 @@ static int nft_hash_init(const struct nft_set *set,
static void nft_hash_destroy(const struct nft_set *set)
{
- const struct rhashtable *priv = nft_set_priv(set);
- const struct bucket_table *tbl = priv->tbl;
- struct nft_hash_elem *he, *next;
+ struct rhashtable *priv = nft_set_priv(set);
+ const struct bucket_table *tbl;
+ struct nft_hash_elem *he;
+ struct rhash_head *pos, *next;
unsigned int i;
+ /* Stop an eventual async resizing */
+ priv->being_destroyed = true;
+ mutex_lock(&priv->mutex);
+
+ tbl = rht_dereference(priv->tbl, priv);
for (i = 0; i < tbl->size; i++) {
- for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node);
- he != NULL; he = next) {
- next = rht_entry(he->node.next, struct nft_hash_elem, node);
+ rht_for_each_entry_safe(he, pos, next, tbl, i, node)
nft_hash_elem_destroy(set, he);
- }
}
+ mutex_unlock(&priv->mutex);
+
rhashtable_destroy(priv);
}
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index d1ffd5eb3a9b..9aea747b43ea 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
};
EXPORT_SYMBOL_GPL(nft_masq_policy);
+int nft_masq_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ int err;
+
+ err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+ if (err < 0)
+ return err;
+
+ return nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_POST_ROUTING));
+}
+EXPORT_SYMBOL_GPL(nft_masq_validate);
+
int nft_masq_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
struct nft_masq *priv = nft_expr_priv(expr);
int err;
- err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
- if (err < 0)
+ err = nft_masq_validate(ctx, expr, NULL);
+ if (err)
return err;
if (tb[NFTA_MASQ_FLAGS] == NULL)
@@ -60,12 +75,5 @@ nla_put_failure:
}
EXPORT_SYMBOL_GPL(nft_masq_dump);
-int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data)
-{
- return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-EXPORT_SYMBOL_GPL(nft_masq_validate);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index afe2b0b45ec4..a0837c6c9283 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
}
if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- range.max_proto.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ range.min_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ range.max_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
@@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
[NFTA_NAT_FLAGS] = { .type = NLA_U32 },
};
-static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_nat_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
{
struct nft_nat *priv = nft_expr_priv(expr);
- u32 family;
int err;
err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
if (err < 0)
return err;
+ switch (priv->type) {
+ case NFT_NAT_SNAT:
+ err = nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_POST_ROUTING) |
+ (1 << NF_INET_LOCAL_IN));
+ break;
+ case NFT_NAT_DNAT:
+ err = nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_OUT));
+ break;
+ }
+
+ return err;
+}
+
+static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_nat *priv = nft_expr_priv(expr);
+ u32 family;
+ int err;
+
if (tb[NFTA_NAT_TYPE] == NULL ||
(tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
@@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return -EINVAL;
}
+ err = nft_nat_validate(ctx, expr, NULL);
+ if (err < 0)
+ return err;
+
if (tb[NFTA_NAT_FAMILY] == NULL)
return -EINVAL;
@@ -219,13 +246,6 @@ nla_put_failure:
return -1;
}
-static int nft_nat_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
-{
- return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-
static struct nft_expr_type nft_nat_type;
static const struct nft_expr_ops nft_nat_ops = {
.type = &nft_nat_type,
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 9e8093f28311..d7e9e93a4e90 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
};
EXPORT_SYMBOL_GPL(nft_redir_policy);
+int nft_redir_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ int err;
+
+ err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+ if (err < 0)
+ return err;
+
+ return nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_OUT));
+}
+EXPORT_SYMBOL_GPL(nft_redir_validate);
+
int nft_redir_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
struct nft_redir *priv = nft_expr_priv(expr);
int err;
- err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+ err = nft_redir_validate(ctx, expr, NULL);
if (err < 0)
return err;
@@ -88,12 +104,5 @@ nla_put_failure:
}
EXPORT_SYMBOL_GPL(nft_redir_dump);
-int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data)
-{
- return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-EXPORT_SYMBOL_GPL(nft_redir_validate);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index c529161cdbf8..0778855ea5e7 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -225,6 +225,8 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
rcu_read_lock();
list_for_each_entry_rcu(kf, &xt_osf_fingers[df], finger_entry) {
+ int foptsize, optnum;
+
f = &kf->finger;
if (!(info->flags & XT_OSF_LOG) && strcmp(info->genre, f->genre))
@@ -233,110 +235,109 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
optp = _optp;
fmatch = FMATCH_WRONG;
- if (totlen == f->ss && xt_osf_ttl(skb, info, f->ttl)) {
- int foptsize, optnum;
+ if (totlen != f->ss || !xt_osf_ttl(skb, info, f->ttl))
+ continue;
- /*
- * Should not happen if userspace parser was written correctly.
- */
- if (f->wss.wc >= OSF_WSS_MAX)
- continue;
+ /*
+ * Should not happen if userspace parser was written correctly.
+ */
+ if (f->wss.wc >= OSF_WSS_MAX)
+ continue;
- /* Check options */
+ /* Check options */
- foptsize = 0;
- for (optnum = 0; optnum < f->opt_num; ++optnum)
- foptsize += f->opt[optnum].length;
+ foptsize = 0;
+ for (optnum = 0; optnum < f->opt_num; ++optnum)
+ foptsize += f->opt[optnum].length;
- if (foptsize > MAX_IPOPTLEN ||
- optsize > MAX_IPOPTLEN ||
- optsize != foptsize)
- continue;
+ if (foptsize > MAX_IPOPTLEN ||
+ optsize > MAX_IPOPTLEN ||
+ optsize != foptsize)
+ continue;
- check_WSS = f->wss.wc;
+ check_WSS = f->wss.wc;
- for (optnum = 0; optnum < f->opt_num; ++optnum) {
- if (f->opt[optnum].kind == (*optp)) {
- __u32 len = f->opt[optnum].length;
- const __u8 *optend = optp + len;
- int loop_cont = 0;
+ for (optnum = 0; optnum < f->opt_num; ++optnum) {
+ if (f->opt[optnum].kind == (*optp)) {
+ __u32 len = f->opt[optnum].length;
+ const __u8 *optend = optp + len;
+ int loop_cont = 0;
- fmatch = FMATCH_OK;
+ fmatch = FMATCH_OK;
- switch (*optp) {
- case OSFOPT_MSS:
- mss = optp[3];
- mss <<= 8;
- mss |= optp[2];
+ switch (*optp) {
+ case OSFOPT_MSS:
+ mss = optp[3];
+ mss <<= 8;
+ mss |= optp[2];
- mss = ntohs((__force __be16)mss);
- break;
- case OSFOPT_TS:
- loop_cont = 1;
- break;
- }
+ mss = ntohs((__force __be16)mss);
+ break;
+ case OSFOPT_TS:
+ loop_cont = 1;
+ break;
+ }
- optp = optend;
- } else
- fmatch = FMATCH_OPT_WRONG;
+ optp = optend;
+ } else
+ fmatch = FMATCH_OPT_WRONG;
- if (fmatch != FMATCH_OK)
- break;
- }
+ if (fmatch != FMATCH_OK)
+ break;
+ }
- if (fmatch != FMATCH_OPT_WRONG) {
- fmatch = FMATCH_WRONG;
+ if (fmatch != FMATCH_OPT_WRONG) {
+ fmatch = FMATCH_WRONG;
- switch (check_WSS) {
- case OSF_WSS_PLAIN:
- if (f->wss.val == 0 || window == f->wss.val)
- fmatch = FMATCH_OK;
- break;
- case OSF_WSS_MSS:
- /*
- * Some smart modems decrease mangle MSS to
- * SMART_MSS_2, so we check standard, decreased
- * and the one provided in the fingerprint MSS
- * values.
- */
+ switch (check_WSS) {
+ case OSF_WSS_PLAIN:
+ if (f->wss.val == 0 || window == f->wss.val)
+ fmatch = FMATCH_OK;
+ break;
+ case OSF_WSS_MSS:
+ /*
+ * Some smart modems decrease mangle MSS to
+ * SMART_MSS_2, so we check standard, decreased
+ * and the one provided in the fingerprint MSS
+ * values.
+ */
#define SMART_MSS_1 1460
#define SMART_MSS_2 1448
- if (window == f->wss.val * mss ||
- window == f->wss.val * SMART_MSS_1 ||
- window == f->wss.val * SMART_MSS_2)
- fmatch = FMATCH_OK;
- break;
- case OSF_WSS_MTU:
- if (window == f->wss.val * (mss + 40) ||
- window == f->wss.val * (SMART_MSS_1 + 40) ||
- window == f->wss.val * (SMART_MSS_2 + 40))
- fmatch = FMATCH_OK;
- break;
- case OSF_WSS_MODULO:
- if ((window % f->wss.val) == 0)
- fmatch = FMATCH_OK;
- break;
- }
+ if (window == f->wss.val * mss ||
+ window == f->wss.val * SMART_MSS_1 ||
+ window == f->wss.val * SMART_MSS_2)
+ fmatch = FMATCH_OK;
+ break;
+ case OSF_WSS_MTU:
+ if (window == f->wss.val * (mss + 40) ||
+ window == f->wss.val * (SMART_MSS_1 + 40) ||
+ window == f->wss.val * (SMART_MSS_2 + 40))
+ fmatch = FMATCH_OK;
+ break;
+ case OSF_WSS_MODULO:
+ if ((window % f->wss.val) == 0)
+ fmatch = FMATCH_OK;
+ break;
}
+ }
- if (fmatch != FMATCH_OK)
- continue;
+ if (fmatch != FMATCH_OK)
+ continue;
- fcount++;
+ fcount++;
- if (info->flags & XT_OSF_LOG)
- nf_log_packet(net, p->family, p->hooknum, skb,
- p->in, p->out, NULL,
- "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
- f->genre, f->version, f->subtype,
- &ip->saddr, ntohs(tcp->source),
- &ip->daddr, ntohs(tcp->dest),
- f->ttl - ip->ttl);
+ if (info->flags & XT_OSF_LOG)
+ nf_log_packet(net, p->family, p->hooknum, skb,
+ p->in, p->out, NULL,
+ "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
+ f->genre, f->version, f->subtype,
+ &ip->saddr, ntohs(tcp->source),
+ &ip->daddr, ntohs(tcp->dest),
+ f->ttl - ip->ttl);
- if ((info->flags & XT_OSF_LOG) &&
- info->loglevel == XT_OSF_LOGLEVEL_FIRST)
- break;
- }
+ if ((info->flags & XT_OSF_LOG) &&
+ info->loglevel == XT_OSF_LOGLEVEL_FIRST)
+ break;
}
rcu_read_unlock();
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index c2f2a53a4879..7fd1104ba900 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -324,8 +324,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
return 0;
add_std_failure:
- if (doi_def)
- cipso_v4_doi_free(doi_def);
+ cipso_v4_doi_free(doi_def);
return ret_val;
}
@@ -641,7 +640,8 @@ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg)
if (ret_val != 0)
goto listall_cb_failure;
- return genlmsg_end(cb_arg->skb, data);
+ genlmsg_end(cb_arg->skb, data);
+ return 0;
listall_cb_failure:
genlmsg_cancel(cb_arg->skb, data);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index a845cd4cf21e..28cddc85b700 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -1065,10 +1065,12 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
u16 family,
struct netlbl_lsm_secattr *secattr)
{
+ unsigned char *ptr;
+
switch (family) {
case AF_INET:
- if (CIPSO_V4_OPTEXIST(skb) &&
- cipso_v4_skbuff_getattr(skb, secattr) == 0)
+ ptr = cipso_v4_optptr(skb);
+ if (ptr && cipso_v4_getattr(ptr, secattr) == 0)
return 0;
break;
#if IS_ENABLED(CONFIG_IPV6)
@@ -1094,7 +1096,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
*/
void netlbl_skbuff_err(struct sk_buff *skb, int error, int gateway)
{
- if (CIPSO_V4_OPTEXIST(skb))
+ if (cipso_v4_optptr(skb))
cipso_v4_error(skb, error, gateway);
}
@@ -1126,11 +1128,14 @@ void netlbl_cache_invalidate(void)
int netlbl_cache_add(const struct sk_buff *skb,
const struct netlbl_lsm_secattr *secattr)
{
+ unsigned char *ptr;
+
if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0)
return -ENOMSG;
- if (CIPSO_V4_OPTEXIST(skb))
- return cipso_v4_cache_add(skb, secattr);
+ ptr = cipso_v4_optptr(skb);
+ if (ptr)
+ return cipso_v4_cache_add(ptr, secattr);
return -ENOMSG;
}
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index e66e977ef2fa..70440748fe5c 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -93,23 +93,20 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
struct netlbl_audit *audit_info)
{
int ret_val = -EINVAL;
- struct netlbl_dom_map *entry = NULL;
struct netlbl_domaddr_map *addrmap = NULL;
struct cipso_v4_doi *cipsov4 = NULL;
u32 tmp_val;
+ struct netlbl_dom_map *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (entry == NULL) {
- ret_val = -ENOMEM;
- goto add_failure;
- }
+ if (!entry)
+ return -ENOMEM;
entry->def.type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
if (info->attrs[NLBL_MGMT_A_DOMAIN]) {
size_t tmp_size = nla_len(info->attrs[NLBL_MGMT_A_DOMAIN]);
entry->domain = kmalloc(tmp_size, GFP_KERNEL);
if (entry->domain == NULL) {
ret_val = -ENOMEM;
- goto add_failure;
+ goto add_free_entry;
}
nla_strlcpy(entry->domain,
info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size);
@@ -125,16 +122,16 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
break;
case NETLBL_NLTYPE_CIPSOV4:
if (!info->attrs[NLBL_MGMT_A_CV4DOI])
- goto add_failure;
+ goto add_free_domain;
tmp_val = nla_get_u32(info->attrs[NLBL_MGMT_A_CV4DOI]);
cipsov4 = cipso_v4_doi_getdef(tmp_val);
if (cipsov4 == NULL)
- goto add_failure;
+ goto add_free_domain;
entry->def.cipso = cipsov4;
break;
default:
- goto add_failure;
+ goto add_free_domain;
}
if (info->attrs[NLBL_MGMT_A_IPV4ADDR]) {
@@ -145,7 +142,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
addrmap = kzalloc(sizeof(*addrmap), GFP_KERNEL);
if (addrmap == NULL) {
ret_val = -ENOMEM;
- goto add_failure;
+ goto add_doi_put_def;
}
INIT_LIST_HEAD(&addrmap->list4);
INIT_LIST_HEAD(&addrmap->list6);
@@ -153,12 +150,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
if (nla_len(info->attrs[NLBL_MGMT_A_IPV4ADDR]) !=
sizeof(struct in_addr)) {
ret_val = -EINVAL;
- goto add_failure;
+ goto add_free_addrmap;
}
if (nla_len(info->attrs[NLBL_MGMT_A_IPV4MASK]) !=
sizeof(struct in_addr)) {
ret_val = -EINVAL;
- goto add_failure;
+ goto add_free_addrmap;
}
addr = nla_data(info->attrs[NLBL_MGMT_A_IPV4ADDR]);
mask = nla_data(info->attrs[NLBL_MGMT_A_IPV4MASK]);
@@ -166,7 +163,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
ret_val = -ENOMEM;
- goto add_failure;
+ goto add_free_addrmap;
}
map->list.addr = addr->s_addr & mask->s_addr;
map->list.mask = mask->s_addr;
@@ -178,7 +175,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
if (ret_val != 0) {
kfree(map);
- goto add_failure;
+ goto add_free_addrmap;
}
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
@@ -192,7 +189,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
addrmap = kzalloc(sizeof(*addrmap), GFP_KERNEL);
if (addrmap == NULL) {
ret_val = -ENOMEM;
- goto add_failure;
+ goto add_doi_put_def;
}
INIT_LIST_HEAD(&addrmap->list4);
INIT_LIST_HEAD(&addrmap->list6);
@@ -200,12 +197,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
if (nla_len(info->attrs[NLBL_MGMT_A_IPV6ADDR]) !=
sizeof(struct in6_addr)) {
ret_val = -EINVAL;
- goto add_failure;
+ goto add_free_addrmap;
}
if (nla_len(info->attrs[NLBL_MGMT_A_IPV6MASK]) !=
sizeof(struct in6_addr)) {
ret_val = -EINVAL;
- goto add_failure;
+ goto add_free_addrmap;
}
addr = nla_data(info->attrs[NLBL_MGMT_A_IPV6ADDR]);
mask = nla_data(info->attrs[NLBL_MGMT_A_IPV6MASK]);
@@ -213,7 +210,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
ret_val = -ENOMEM;
- goto add_failure;
+ goto add_free_addrmap;
}
map->list.addr = *addr;
map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
@@ -227,7 +224,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
if (ret_val != 0) {
kfree(map);
- goto add_failure;
+ goto add_free_addrmap;
}
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
@@ -237,16 +234,17 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = netlbl_domhsh_add(entry, audit_info);
if (ret_val != 0)
- goto add_failure;
+ goto add_free_addrmap;
return 0;
-add_failure:
- if (cipsov4)
- cipso_v4_doi_putdef(cipsov4);
- if (entry)
- kfree(entry->domain);
+add_free_addrmap:
kfree(addrmap);
+add_doi_put_def:
+ cipso_v4_doi_putdef(cipsov4);
+add_free_domain:
+ kfree(entry->domain);
+add_free_entry:
kfree(entry);
return ret_val;
}
@@ -456,7 +454,8 @@ static int netlbl_mgmt_listall_cb(struct netlbl_dom_map *entry, void *arg)
goto listall_cb_failure;
cb_arg->seq++;
- return genlmsg_end(cb_arg->skb, data);
+ genlmsg_end(cb_arg->skb, data);
+ return 0;
listall_cb_failure:
genlmsg_cancel(cb_arg->skb, data);
@@ -620,7 +619,8 @@ static int netlbl_mgmt_protocols_cb(struct sk_buff *skb,
if (ret_val != 0)
goto protocols_cb_failure;
- return genlmsg_end(skb, data);
+ genlmsg_end(skb, data);
+ return 0;
protocols_cb_failure:
genlmsg_cancel(skb, data);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 78a63c18779e..aec7994f78cf 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1163,7 +1163,8 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
goto list_cb_failure;
cb_arg->seq++;
- return genlmsg_end(cb_arg->skb, data);
+ genlmsg_end(cb_arg->skb, data);
+ return 0;
list_cb_failure:
genlmsg_cancel(cb_arg->skb, data);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 84ea76ca3f1f..2702673f0f23 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -61,6 +61,7 @@
#include <linux/rhashtable.h>
#include <asm/cacheflush.h>
#include <linux/hash.h>
+#include <linux/genetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -97,12 +98,12 @@ static int netlink_dump(struct sock *sk);
static void netlink_skb_destructor(struct sk_buff *skb);
/* nl_table locking explained:
- * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
- * combined with an RCU read-side lock. Insertion and removal are protected
- * with nl_sk_hash_lock while using RCU list modification primitives and may
- * run in parallel to nl_table_lock protected lookups. Destruction of the
- * Netlink socket may only occur *after* nl_table_lock has been acquired
- * either during or after the socket has been removed from the list.
+ * Lookup and traversal are protected with an RCU read-side lock. Insertion
+ * and removal are protected with per bucket lock while using RCU list
+ * modification primitives and may run in parallel to RCU protected lookups.
+ * Destruction of the Netlink socket may only occur *after* nl_table_lock has
+ * been acquired * either during or after the socket has been removed from
+ * the list and after an RCU grace period.
*/
DEFINE_RWLOCK(nl_table_lock);
EXPORT_SYMBOL_GPL(nl_table_lock);
@@ -110,19 +111,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
-/* Protects netlink socket hash table mutations */
-DEFINE_MUTEX(nl_sk_hash_lock);
-EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
-
-#ifdef CONFIG_PROVE_LOCKING
-static int lockdep_nl_sk_hash_is_held(void *parent)
-{
- if (debug_locks)
- return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
- return 1;
-}
-#endif
-
static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static DEFINE_SPINLOCK(netlink_tap_lock);
@@ -707,7 +695,7 @@ static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
u32 dst_portid, u32 dst_group,
- struct sock_iocb *siocb)
+ struct scm_cookie *scm)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ring *ring;
@@ -753,7 +741,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
NETLINK_CB(skb).portid = nlk->portid;
NETLINK_CB(skb).dst_group = dst_group;
- NETLINK_CB(skb).creds = siocb->scm->creds;
+ NETLINK_CB(skb).creds = scm->creds;
err = security_netlink_send(sk, skb);
if (err) {
@@ -832,7 +820,7 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
#define netlink_tx_is_mmaped(sk) false
#define netlink_mmap sock_no_mmap
#define netlink_poll datagram_poll
-#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
+#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
#endif /* CONFIG_NETLINK_MMAP */
static void netlink_skb_destructor(struct sk_buff *skb)
@@ -1002,26 +990,33 @@ static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
.net = net,
.portid = portid,
};
- u32 hash;
-
- hash = rhashtable_hashfn(&table->hash, &portid, sizeof(portid));
- return rhashtable_lookup_compare(&table->hash, hash,
+ return rhashtable_lookup_compare(&table->hash, &portid,
&netlink_compare, &arg);
}
+static bool __netlink_insert(struct netlink_table *table, struct sock *sk)
+{
+ struct netlink_compare_arg arg = {
+ .net = sock_net(sk),
+ .portid = nlk_sk(sk)->portid,
+ };
+
+ return rhashtable_lookup_compare_insert(&table->hash,
+ &nlk_sk(sk)->node,
+ &netlink_compare, &arg);
+}
+
static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
{
struct netlink_table *table = &nl_table[protocol];
struct sock *sk;
- read_lock(&nl_table_lock);
rcu_read_lock();
sk = __netlink_lookup(table, portid, net);
if (sk)
sock_hold(sk);
rcu_read_unlock();
- read_unlock(&nl_table_lock);
return sk;
}
@@ -1052,29 +1047,33 @@ netlink_update_listeners(struct sock *sk)
* makes sure updates are visible before bind or setsockopt return. */
}
-static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
+static int netlink_insert(struct sock *sk, u32 portid)
{
struct netlink_table *table = &nl_table[sk->sk_protocol];
- int err = -EADDRINUSE;
+ int err;
- mutex_lock(&nl_sk_hash_lock);
- if (__netlink_lookup(table, portid, net))
- goto err;
+ lock_sock(sk);
err = -EBUSY;
if (nlk_sk(sk)->portid)
goto err;
err = -ENOMEM;
- if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX))
+ if (BITS_PER_LONG > 32 &&
+ unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
goto err;
nlk_sk(sk)->portid = portid;
sock_hold(sk);
- rhashtable_insert(&table->hash, &nlk_sk(sk)->node);
+
err = 0;
+ if (!__netlink_insert(table, sk)) {
+ err = -EADDRINUSE;
+ sock_put(sk);
+ }
+
err:
- mutex_unlock(&nl_sk_hash_lock);
+ release_sock(sk);
return err;
}
@@ -1082,19 +1081,19 @@ static void netlink_remove(struct sock *sk)
{
struct netlink_table *table;
- mutex_lock(&nl_sk_hash_lock);
table = &nl_table[sk->sk_protocol];
if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
- mutex_unlock(&nl_sk_hash_lock);
netlink_table_grab();
if (nlk_sk(sk)->subscriptions) {
__sk_del_bind_node(sk);
netlink_update_listeners(sk);
}
+ if (sk->sk_protocol == NETLINK_GENERIC)
+ atomic_inc(&genl_sk_destructing_cnt);
netlink_table_ungrab();
}
@@ -1194,6 +1193,13 @@ out_module:
goto out;
}
+static void deferred_put_nlk_sk(struct rcu_head *head)
+{
+ struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
+
+ sock_put(&nlk->sk);
+}
+
static int netlink_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -1211,6 +1217,20 @@ static int netlink_release(struct socket *sock)
* will be purged.
*/
+ /* must not acquire netlink_table_lock in any way again before unbind
+ * and notifying genetlink is done as otherwise it might deadlock
+ */
+ if (nlk->netlink_unbind) {
+ int i;
+
+ for (i = 0; i < nlk->ngroups; i++)
+ if (test_bit(i, nlk->groups))
+ nlk->netlink_unbind(sock_net(sk), i + 1);
+ }
+ if (sk->sk_protocol == NETLINK_GENERIC &&
+ atomic_dec_return(&genl_sk_destructing_cnt) == 0)
+ wake_up(&genl_sk_destructing_waitq);
+
sock->sk = NULL;
wake_up_interruptible_all(&nlk->wait);
@@ -1246,20 +1266,13 @@ static int netlink_release(struct socket *sock)
netlink_table_ungrab();
}
- if (nlk->netlink_unbind) {
- int i;
-
- for (i = 0; i < nlk->ngroups; i++)
- if (test_bit(i, nlk->groups))
- nlk->netlink_unbind(sock_net(sk), i + 1);
- }
kfree(nlk->groups);
nlk->groups = NULL;
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
local_bh_enable();
- sock_put(sk);
+ call_rcu(&nlk->rcu, deferred_put_nlk_sk);
return 0;
}
@@ -1274,7 +1287,6 @@ static int netlink_autobind(struct socket *sock)
retry:
cond_resched();
- netlink_table_grab();
rcu_read_lock();
if (__netlink_lookup(table, portid, net)) {
/* Bind collision, search negative portid values. */
@@ -1282,13 +1294,11 @@ retry:
if (rover > -4097)
rover = -4097;
rcu_read_unlock();
- netlink_table_ungrab();
goto retry;
}
rcu_read_unlock();
- netlink_table_ungrab();
- err = netlink_insert(sk, net, portid);
+ err = netlink_insert(sk, portid);
if (err == -EADDRINUSE)
goto retry;
@@ -1428,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups,
for (undo = 0; undo < group; undo++)
if (test_bit(undo, &groups))
- nlk->netlink_unbind(sock_net(sk), undo);
+ nlk->netlink_unbind(sock_net(sk), undo + 1);
}
static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1466,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
for (group = 0; group < nlk->ngroups; group++) {
if (!test_bit(group, &groups))
continue;
- err = nlk->netlink_bind(net, group);
+ err = nlk->netlink_bind(net, group + 1);
if (!err)
continue;
netlink_undo_bind(group, groups, sk);
@@ -1476,7 +1486,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (!nlk->portid) {
err = nladdr->nl_pid ?
- netlink_insert(sk, net, nladdr->nl_pid) :
+ netlink_insert(sk, nladdr->nl_pid) :
netlink_autobind(sock);
if (err) {
netlink_undo_bind(nlk->ngroups, groups, sk);
@@ -2249,7 +2259,6 @@ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
- struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
@@ -2263,10 +2272,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (msg->msg_flags&MSG_OOB)
return -EOPNOTSUPP;
- if (NULL == siocb->scm)
- siocb->scm = &scm;
-
- err = scm_send(sock, msg, siocb->scm, true);
+ err = scm_send(sock, msg, &scm, true);
if (err < 0)
return err;
@@ -2292,10 +2298,15 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out;
}
+ /* It's a really convoluted way for userland to ask for mmaped
+ * sendmsg(), but that's what we've got...
+ */
if (netlink_tx_is_mmaped(sk) &&
+ msg->msg_iter.type == ITER_IOVEC &&
+ msg->msg_iter.nr_segs == 1 &&
msg->msg_iter.iov->iov_base == NULL) {
err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
- siocb);
+ &scm);
goto out;
}
@@ -2309,7 +2320,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
NETLINK_CB(skb).portid = nlk->portid;
NETLINK_CB(skb).dst_group = dst_group;
- NETLINK_CB(skb).creds = siocb->scm->creds;
+ NETLINK_CB(skb).creds = scm.creds;
NETLINK_CB(skb).flags = netlink_skb_flags;
err = -EFAULT;
@@ -2331,7 +2342,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
out:
- scm_destroy(siocb->scm);
+ scm_destroy(&scm);
return err;
}
@@ -2339,7 +2350,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len,
int flags)
{
- struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
@@ -2402,11 +2412,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
if (nlk->flags & NETLINK_RECV_PKTINFO)
netlink_cmsg_recv_pktinfo(msg, skb);
- if (NULL == siocb->scm) {
- memset(&scm, 0, sizeof(scm));
- siocb->scm = &scm;
- }
- siocb->scm->creds = *NETLINK_CREDS(skb);
+ memset(&scm, 0, sizeof(scm));
+ scm.creds = *NETLINK_CREDS(skb);
if (flags & MSG_TRUNC)
copied = data_skb->len;
@@ -2421,7 +2428,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
}
}
- scm_recv(sock, msg, siocb->scm, flags);
+ scm_recv(sock, msg, &scm, flags);
out:
netlink_rcv_wake(sk);
return err ? : copied;
@@ -2482,7 +2489,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
if (cfg && cfg->input)
nlk_sk(sk)->netlink_rcv = cfg->input;
- if (netlink_insert(sk, net, 0))
+ if (netlink_insert(sk, 0))
goto out_sock_release;
nlk = nlk_sk(sk);
@@ -2884,97 +2891,97 @@ EXPORT_SYMBOL(nlmsg_notify);
#ifdef CONFIG_PROC_FS
struct nl_seq_iter {
struct seq_net_private p;
+ struct rhashtable_iter hti;
int link;
- int hash_idx;
};
-static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
+static int netlink_walk_start(struct nl_seq_iter *iter)
{
- struct nl_seq_iter *iter = seq->private;
- int i, j;
- struct netlink_sock *nlk;
- struct sock *s;
- loff_t off = 0;
-
- for (i = 0; i < MAX_LINKS; i++) {
- struct rhashtable *ht = &nl_table[i].hash;
- const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-
- for (j = 0; j < tbl->size; j++) {
- rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
- s = (struct sock *)nlk;
+ int err;
- if (sock_net(s) != seq_file_net(seq))
- continue;
- if (off == pos) {
- iter->link = i;
- iter->hash_idx = j;
- return s;
- }
- ++off;
- }
- }
+ err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
+ if (err) {
+ iter->link = MAX_LINKS;
+ return err;
}
- return NULL;
+
+ err = rhashtable_walk_start(&iter->hti);
+ return err == -EAGAIN ? 0 : err;
}
-static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(nl_table_lock) __acquires(RCU)
+static void netlink_walk_stop(struct nl_seq_iter *iter)
{
- read_lock(&nl_table_lock);
- rcu_read_lock();
- return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+ rhashtable_walk_stop(&iter->hti);
+ rhashtable_walk_exit(&iter->hti);
}
-static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+static void *__netlink_seq_next(struct seq_file *seq)
{
- struct rhashtable *ht;
+ struct nl_seq_iter *iter = seq->private;
struct netlink_sock *nlk;
- struct nl_seq_iter *iter;
- struct net *net;
- int i, j;
- ++*pos;
+ do {
+ for (;;) {
+ int err;
- if (v == SEQ_START_TOKEN)
- return netlink_seq_socket_idx(seq, 0);
+ nlk = rhashtable_walk_next(&iter->hti);
- net = seq_file_net(seq);
- iter = seq->private;
- nlk = v;
+ if (IS_ERR(nlk)) {
+ if (PTR_ERR(nlk) == -EAGAIN)
+ continue;
- i = iter->link;
- ht = &nl_table[i].hash;
- rht_for_each_entry(nlk, nlk->node.next, ht, node)
- if (net_eq(sock_net((struct sock *)nlk), net))
- return nlk;
+ return nlk;
+ }
- j = iter->hash_idx + 1;
+ if (nlk)
+ break;
- do {
- const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-
- for (; j < tbl->size; j++) {
- rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
- if (net_eq(sock_net((struct sock *)nlk), net)) {
- iter->link = i;
- iter->hash_idx = j;
- return nlk;
- }
- }
+ netlink_walk_stop(iter);
+ if (++iter->link >= MAX_LINKS)
+ return NULL;
+
+ err = netlink_walk_start(iter);
+ if (err)
+ return ERR_PTR(err);
}
+ } while (sock_net(&nlk->sk) != seq_file_net(seq));
- j = 0;
- } while (++i < MAX_LINKS);
+ return nlk;
+}
- return NULL;
+static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
+{
+ struct nl_seq_iter *iter = seq->private;
+ void *obj = SEQ_START_TOKEN;
+ loff_t pos;
+ int err;
+
+ iter->link = 0;
+
+ err = netlink_walk_start(iter);
+ if (err)
+ return ERR_PTR(err);
+
+ for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
+ obj = __netlink_seq_next(seq);
+
+ return obj;
+}
+
+static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return __netlink_seq_next(seq);
}
static void netlink_seq_stop(struct seq_file *seq, void *v)
- __releases(RCU) __releases(nl_table_lock)
{
- rcu_read_unlock();
- read_unlock(&nl_table_lock);
+ struct nl_seq_iter *iter = seq->private;
+
+ if (iter->link >= MAX_LINKS)
+ return;
+
+ netlink_walk_stop(iter);
}
@@ -3121,9 +3128,6 @@ static int __init netlink_proto_init(void)
.max_shift = 16, /* 64K */
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
-#ifdef CONFIG_PROVE_LOCKING
- .mutex_is_held = lockdep_nl_sk_hash_is_held,
-#endif
};
if (err != 0)
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index f123a88496f8..89008405d6b4 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -2,6 +2,7 @@
#define _AF_NETLINK_H
#include <linux/rhashtable.h>
+#include <linux/atomic.h>
#include <net/sock.h>
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -50,6 +51,7 @@ struct netlink_sock {
#endif /* CONFIG_NETLINK_MMAP */
struct rhash_head node;
+ struct rcu_head rcu;
};
static inline struct netlink_sock *nlk_sk(struct sock *sk)
@@ -73,6 +75,5 @@ struct netlink_table {
extern struct netlink_table *nl_table;
extern rwlock_t nl_table_lock;
-extern struct mutex nl_sk_hash_lock;
#endif
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index de8c74a3c061..3ee63a3cff30 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -91,7 +91,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
sk_diag_put_rings_cfg(sk, skb))
goto out_nlmsg_trim;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
out_nlmsg_trim:
nlmsg_cancel(skb, nlh);
@@ -103,7 +104,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
{
struct netlink_table *tbl = &nl_table[protocol];
struct rhashtable *ht = &tbl->hash;
- const struct bucket_table *htbl = rht_dereference(ht->tbl, ht);
+ const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht);
struct net *net = sock_net(skb->sk);
struct netlink_diag_req *req;
struct netlink_sock *nlsk;
@@ -113,7 +114,9 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
req = nlmsg_data(cb->nlh);
for (i = 0; i < htbl->size; i++) {
- rht_for_each_entry(nlsk, htbl->buckets[i], ht, node) {
+ struct rhash_head *pos;
+
+ rht_for_each_entry_rcu(nlsk, pos, htbl, i, node) {
sk = (struct sock *)nlsk;
if (!net_eq(sock_net(sk), net))
@@ -170,7 +173,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
req = nlmsg_data(cb->nlh);
- mutex_lock(&nl_sk_hash_lock);
+ rcu_read_lock();
read_lock(&nl_table_lock);
if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
@@ -184,7 +187,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
} else {
if (req->sdiag_protocol >= MAX_LINKS) {
read_unlock(&nl_table_lock);
- mutex_unlock(&nl_sk_hash_lock);
+ rcu_read_unlock();
return -ENOENT;
}
@@ -192,7 +195,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
}
read_unlock(&nl_table_lock);
- mutex_unlock(&nl_sk_hash_lock);
+ rcu_read_unlock();
return skb->len;
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2e11061ef885..2ed5f964772e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -23,6 +23,9 @@
static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
static DECLARE_RWSEM(cb_lock);
+atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
+
void genl_lock(void)
{
mutex_lock(&genl_mutex);
@@ -435,15 +438,18 @@ int genl_unregister_family(struct genl_family *family)
genl_lock_all();
- genl_unregister_mc_groups(family);
-
list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
if (family->id != rc->id || strcmp(rc->name, family->name))
continue;
+ genl_unregister_mc_groups(family);
+
list_del(&rc->family_list);
family->n_ops = 0;
- genl_unlock_all();
+ up_write(&cb_lock);
+ wait_event(genl_sk_destructing_waitq,
+ atomic_read(&genl_sk_destructing_cnt) == 0);
+ genl_unlock();
kfree(family->attrbuf);
genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
@@ -756,7 +762,8 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
nla_nest_end(skb, nla_grps);
}
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
@@ -796,7 +803,8 @@ static int ctrl_fill_mcgrp_info(struct genl_family *family,
nla_nest_end(skb, nest);
nla_nest_end(skb, nla_grps);
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
@@ -985,7 +993,7 @@ static struct genl_multicast_group genl_ctrl_groups[] = {
static int genl_bind(struct net *net, int group)
{
- int i, err = 0;
+ int i, err = -ENOENT;
down_read(&cb_lock);
for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
@@ -1014,7 +1022,6 @@ static int genl_bind(struct net *net, int group)
static void genl_unbind(struct net *net, int group)
{
int i;
- bool found = false;
down_read(&cb_lock);
for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
@@ -1027,14 +1034,11 @@ static void genl_unbind(struct net *net, int group)
if (f->mcast_unbind)
f->mcast_unbind(net, fam_grp);
- found = true;
break;
}
}
}
up_read(&cb_lock);
-
- WARN_ON(!found);
}
static int __net_init genl_pernet_init(struct net *net)
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 819b87702b70..cff3f1614ad4 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -555,7 +555,6 @@ EXPORT_SYMBOL(nfc_find_se);
int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
{
-
struct nfc_se *se;
int rc;
@@ -605,7 +604,6 @@ error:
int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
{
-
struct nfc_se *se;
int rc;
@@ -934,6 +932,27 @@ int nfc_remove_se(struct nfc_dev *dev, u32 se_idx)
}
EXPORT_SYMBOL(nfc_remove_se);
+int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx,
+ struct nfc_evt_transaction *evt_transaction)
+{
+ int rc;
+
+ pr_debug("transaction: %x\n", se_idx);
+
+ device_lock(&dev->dev);
+
+ if (!evt_transaction) {
+ rc = -EPROTO;
+ goto out;
+ }
+
+ rc = nfc_genl_se_transaction(dev, se_idx, evt_transaction);
+out:
+ device_unlock(&dev->dev);
+ return rc;
+}
+EXPORT_SYMBOL(nfc_se_transaction);
+
static void nfc_release(struct device *d)
{
struct nfc_dev *dev = to_nfc_dev(d);
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 91df487aa0a9..844673cb7c18 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -116,23 +116,6 @@ int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
}
EXPORT_SYMBOL(nfc_hci_send_event);
-int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
- const u8 *param, size_t param_len)
-{
- u8 pipe;
-
- pr_debug("\n");
-
- pipe = hdev->gate2pipe[gate];
- if (pipe == NFC_HCI_INVALID_PIPE)
- return -EADDRNOTAVAIL;
-
- return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
- response, param, param_len, NULL, NULL,
- 0);
-}
-EXPORT_SYMBOL(nfc_hci_send_response);
-
/*
* Execute an hci command sent to gate.
* skb will contain response data if success. skb can be NULL if you are not
@@ -331,7 +314,7 @@ int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
if (r < 0)
return r;
- memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+ nfc_hci_reset_pipes(hdev);
return 0;
}
@@ -345,7 +328,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
pr_debug("\n");
- if (hdev->gate2pipe[dest_gate] == NFC_HCI_DO_NOT_CREATE_PIPE)
+ if (pipe == NFC_HCI_DO_NOT_CREATE_PIPE)
return 0;
if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
@@ -380,6 +363,8 @@ open_pipe:
return r;
}
+ hdev->pipes[pipe].gate = dest_gate;
+ hdev->pipes[pipe].dest_host = dest_host;
hdev->gate2pipe[dest_gate] = pipe;
return 0;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index ef50e7716c4a..6e061da2258a 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -46,6 +46,32 @@ int nfc_hci_result_to_errno(u8 result)
}
EXPORT_SYMBOL(nfc_hci_result_to_errno);
+void nfc_hci_reset_pipes(struct nfc_hci_dev *hdev)
+{
+ int i = 0;
+
+ for (i = 0; i < NFC_HCI_MAX_PIPES; i++) {
+ hdev->pipes[i].gate = NFC_HCI_INVALID_GATE;
+ hdev->pipes[i].dest_host = NFC_HCI_INVALID_HOST;
+ }
+ memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+}
+EXPORT_SYMBOL(nfc_hci_reset_pipes);
+
+void nfc_hci_reset_pipes_per_host(struct nfc_hci_dev *hdev, u8 host)
+{
+ int i = 0;
+
+ for (i = 0; i < NFC_HCI_MAX_PIPES; i++) {
+ if (hdev->pipes[i].dest_host != host)
+ continue;
+
+ hdev->pipes[i].gate = NFC_HCI_INVALID_GATE;
+ hdev->pipes[i].dest_host = NFC_HCI_INVALID_HOST;
+ }
+}
+EXPORT_SYMBOL(nfc_hci_reset_pipes_per_host);
+
static void nfc_hci_msg_tx_work(struct work_struct *work)
{
struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
@@ -167,48 +193,69 @@ exit:
void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
struct sk_buff *skb)
{
- int r = 0;
- u8 gate = nfc_hci_pipe2gate(hdev, pipe);
- u8 local_gate, new_pipe;
- u8 gate_opened = 0x00;
+ u8 gate = hdev->pipes[pipe].gate;
+ u8 status = NFC_HCI_ANY_OK;
+ struct hci_create_pipe_resp *create_info;
+ struct hci_delete_pipe_noti *delete_info;
+ struct hci_all_pipe_cleared_noti *cleared_info;
pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
switch (cmd) {
case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
if (skb->len != 5) {
- r = -EPROTO;
- break;
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
}
+ create_info = (struct hci_create_pipe_resp *)skb->data;
- local_gate = skb->data[3];
- new_pipe = skb->data[4];
- nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, NULL, 0);
-
- /* save the new created pipe and bind with local gate,
+ /* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
* but since we received this cmd from host controller, we
* are the destination and it is our local gate
*/
- hdev->gate2pipe[local_gate] = new_pipe;
+ hdev->gate2pipe[create_info->dest_gate] = create_info->pipe;
+ hdev->pipes[create_info->pipe].gate = create_info->dest_gate;
+ hdev->pipes[create_info->pipe].dest_host =
+ create_info->src_host;
break;
case NFC_HCI_ANY_OPEN_PIPE:
- /* if the pipe is already created, we allow remote host to
- * open it
- */
- if (gate != 0xff)
- nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK,
- &gate_opened, 1);
+ if (gate == NFC_HCI_INVALID_GATE) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ break;
+ case NFC_HCI_ADM_NOTIFY_PIPE_DELETED:
+ if (skb->len != 1) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ delete_info = (struct hci_delete_pipe_noti *)skb->data;
+
+ hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
+ hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
break;
case NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED:
- nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, NULL, 0);
+ if (skb->len != 1) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ cleared_info = (struct hci_all_pipe_cleared_noti *)skb->data;
+
+ nfc_hci_reset_pipes_per_host(hdev, cleared_info->host);
break;
default:
pr_info("Discarded unknown cmd %x to gate %x\n", cmd, gate);
- r = -EINVAL;
break;
}
+ if (hdev->ops->cmd_received)
+ hdev->ops->cmd_received(hdev, pipe, cmd, skb);
+
+exit:
+ nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
+ status, NULL, 0, NULL, NULL, 0);
+
kfree_skb(skb);
}
@@ -330,15 +377,15 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
struct sk_buff *skb)
{
int r = 0;
- u8 gate = nfc_hci_pipe2gate(hdev, pipe);
+ u8 gate = hdev->pipes[pipe].gate;
- if (gate == 0xff) {
+ if (gate == NFC_HCI_INVALID_GATE) {
pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
goto exit;
}
if (hdev->ops->event_received) {
- r = hdev->ops->event_received(hdev, gate, event, skb);
+ r = hdev->ops->event_received(hdev, pipe, event, skb);
if (r <= 0)
goto exit_noskb;
}
@@ -573,7 +620,7 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
if (hdev->ops->close)
hdev->ops->close(hdev);
- memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+ nfc_hci_reset_pipes(hdev);
return 0;
}
@@ -932,7 +979,7 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
nfc_set_drvdata(hdev->ndev, hdev);
- memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+ nfc_hci_reset_pipes(hdev);
hdev->quirks = quirks;
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
index c3d2e2c1394c..ab4c8e80b1ad 100644
--- a/net/nfc/hci/hci.h
+++ b/net/nfc/hci/hci.h
@@ -65,6 +65,14 @@ struct hci_create_pipe_resp {
u8 pipe;
} __packed;
+struct hci_delete_pipe_noti {
+ u8 pipe;
+} __packed;
+
+struct hci_all_pipe_cleared_noti {
+ u8 host;
+} __packed;
+
#define NFC_HCI_FRAGMENT 0x7f
#define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f))
@@ -77,8 +85,6 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
data_exchange_cb_t cb, void *cb_context,
unsigned long completion_delay);
-u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
-
void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
u8 instruction, struct sk_buff *skb);
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
index e9de1514656e..1fe725d66085 100644
--- a/net/nfc/hci/hcp.c
+++ b/net/nfc/hci/hcp.c
@@ -124,17 +124,6 @@ out_skb_err:
return err;
}
-u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe)
-{
- int gate;
-
- for (gate = 0; gate < NFC_HCI_MAX_GATES; gate++)
- if (hdev->gate2pipe[gate] == pipe)
- return gate;
-
- return 0xff;
-}
-
/*
* Receive hcp message for pipe, with type and cmd.
* skb contains optional message data only.
diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile
index 7aeedc43187d..7ed8949266cc 100644
--- a/net/nfc/nci/Makefile
+++ b/net/nfc/nci/Makefile
@@ -4,6 +4,6 @@
obj-$(CONFIG_NFC_NCI) += nci.o
-nci-objs := core.o data.o lib.o ntf.o rsp.o
+nci-objs := core.o data.o lib.o ntf.o rsp.o hci.o
nci-$(CONFIG_NFC_NCI_SPI) += spi.o
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 51feb5e63008..9575a1892607 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -41,10 +41,28 @@
#include <net/nfc/nci_core.h>
#include <linux/nfc.h>
+struct core_conn_create_data {
+ int length;
+ struct nci_core_conn_create_cmd *cmd;
+};
+
static void nci_cmd_work(struct work_struct *work);
static void nci_rx_work(struct work_struct *work);
static void nci_tx_work(struct work_struct *work);
+struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
+ int conn_id)
+{
+ struct nci_conn_info *conn_info;
+
+ list_for_each_entry(conn_info, &ndev->conn_info_list, list) {
+ if (conn_info->conn_id == conn_id)
+ return conn_info;
+ }
+
+ return NULL;
+}
+
/* ---- NCI requests ---- */
void nci_req_complete(struct nci_dev *ndev, int result)
@@ -109,10 +127,10 @@ static int __nci_request(struct nci_dev *ndev,
return rc;
}
-static inline int nci_request(struct nci_dev *ndev,
- void (*req)(struct nci_dev *ndev,
- unsigned long opt),
- unsigned long opt, __u32 timeout)
+inline int nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev,
+ unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
int rc;
@@ -456,6 +474,95 @@ int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val)
}
EXPORT_SYMBOL(nci_set_config);
+static void nci_nfcee_discover_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_nfcee_discover_cmd cmd;
+ __u8 action = opt;
+
+ cmd.discovery_action = action;
+
+ nci_send_cmd(ndev, NCI_OP_NFCEE_DISCOVER_CMD, 1, &cmd);
+}
+
+int nci_nfcee_discover(struct nci_dev *ndev, u8 action)
+{
+ return nci_request(ndev, nci_nfcee_discover_req, action,
+ msecs_to_jiffies(NCI_CMD_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_nfcee_discover);
+
+static void nci_nfcee_mode_set_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_nfcee_mode_set_cmd *cmd =
+ (struct nci_nfcee_mode_set_cmd *)opt;
+
+ nci_send_cmd(ndev, NCI_OP_NFCEE_MODE_SET_CMD,
+ sizeof(struct nci_nfcee_mode_set_cmd), cmd);
+}
+
+int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode)
+{
+ struct nci_nfcee_mode_set_cmd cmd;
+
+ cmd.nfcee_id = nfcee_id;
+ cmd.nfcee_mode = nfcee_mode;
+
+ return nci_request(ndev, nci_nfcee_mode_set_req, (unsigned long)&cmd,
+ msecs_to_jiffies(NCI_CMD_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_nfcee_mode_set);
+
+static void nci_core_conn_create_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct core_conn_create_data *data =
+ (struct core_conn_create_data *)opt;
+
+ nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, data->length, data->cmd);
+}
+
+int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
+ u8 number_destination_params,
+ size_t params_len,
+ struct core_conn_create_dest_spec_params *params)
+{
+ int r;
+ struct nci_core_conn_create_cmd *cmd;
+ struct core_conn_create_data data;
+
+ data.length = params_len + sizeof(struct nci_core_conn_create_cmd);
+ cmd = kzalloc(data.length, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->destination_type = destination_type;
+ cmd->number_destination_params = number_destination_params;
+ memcpy(cmd->params, params, params_len);
+
+ data.cmd = cmd;
+ ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX];
+
+ r = __nci_request(ndev, nci_core_conn_create_req,
+ (unsigned long)&data,
+ msecs_to_jiffies(NCI_CMD_TIMEOUT));
+ kfree(cmd);
+ return r;
+}
+EXPORT_SYMBOL(nci_core_conn_create);
+
+static void nci_core_conn_close_req(struct nci_dev *ndev, unsigned long opt)
+{
+ __u8 conn_id = opt;
+
+ nci_send_cmd(ndev, NCI_OP_CORE_CONN_CLOSE_CMD, 1, &conn_id);
+}
+
+int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id)
+{
+ return nci_request(ndev, nci_core_conn_close_req, conn_id,
+ msecs_to_jiffies(NCI_CMD_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_conn_close);
+
static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
@@ -712,6 +819,11 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->rf_conn_info;
+ if (!conn_info)
+ return -EPROTO;
pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
@@ -724,8 +836,8 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
return -EBUSY;
/* store cb and context to be used on receiving data */
- ndev->data_exchange_cb = cb;
- ndev->data_exchange_cb_context = cb_context;
+ conn_info->data_exchange_cb = cb;
+ conn_info->data_exchange_cb_context = cb_context;
rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
if (rc)
@@ -768,10 +880,16 @@ static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
static int nci_discover_se(struct nfc_dev *nfc_dev)
{
+ int r;
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- if (ndev->ops->discover_se)
+ if (ndev->ops->discover_se) {
+ r = nci_nfcee_discover(ndev, NCI_NFCEE_DISCOVERY_ACTION_ENABLE);
+ if (r != NCI_STATUS_OK)
+ return -EPROTO;
+
return ndev->ops->discover_se(ndev);
+ }
return 0;
}
@@ -807,7 +925,6 @@ static struct nfc_ops nci_nfc_ops = {
};
/* ---- Interface to NCI drivers ---- */
-
/**
* nci_allocate_device - allocate a new nci device
*
@@ -842,13 +959,20 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
tx_headroom + NCI_DATA_HDR_SIZE,
tx_tailroom);
if (!ndev->nfc_dev)
- goto free_exit;
+ goto free_nci;
+
+ ndev->hci_dev = nci_hci_allocate(ndev);
+ if (!ndev->hci_dev)
+ goto free_nfc;
nfc_set_drvdata(ndev->nfc_dev, ndev);
return ndev;
-free_exit:
+free_nfc:
+ kfree(ndev->nfc_dev);
+
+free_nci:
kfree(ndev);
return NULL;
}
@@ -913,6 +1037,7 @@ int nci_register_device(struct nci_dev *ndev)
(unsigned long) ndev);
mutex_init(&ndev->req_lock);
+ INIT_LIST_HEAD(&ndev->conn_info_list);
rc = nfc_register_device(ndev->nfc_dev);
if (rc)
@@ -938,12 +1063,19 @@ EXPORT_SYMBOL(nci_register_device);
*/
void nci_unregister_device(struct nci_dev *ndev)
{
+ struct nci_conn_info *conn_info, *n;
+
nci_close_device(ndev);
destroy_workqueue(ndev->cmd_wq);
destroy_workqueue(ndev->rx_wq);
destroy_workqueue(ndev->tx_wq);
+ list_for_each_entry_safe(conn_info, n, &ndev->conn_info_list, list) {
+ list_del(&conn_info->list);
+ /* conn_info is allocated with devm_kzalloc */
+ }
+
nfc_unregister_device(ndev->nfc_dev);
}
EXPORT_SYMBOL(nci_unregister_device);
@@ -1027,20 +1159,25 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
static void nci_tx_work(struct work_struct *work)
{
struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
+ struct nci_conn_info *conn_info;
struct sk_buff *skb;
- pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
+ conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id);
+ if (!conn_info)
+ return;
+
+ pr_debug("credits_cnt %d\n", atomic_read(&conn_info->credits_cnt));
/* Send queued tx data */
- while (atomic_read(&ndev->credits_cnt)) {
+ while (atomic_read(&conn_info->credits_cnt)) {
skb = skb_dequeue(&ndev->tx_q);
if (!skb)
return;
/* Check if data flow control is used */
- if (atomic_read(&ndev->credits_cnt) !=
+ if (atomic_read(&conn_info->credits_cnt) !=
NCI_DATA_FLOW_CONTROL_NOT_USED)
- atomic_dec(&ndev->credits_cnt);
+ atomic_dec(&conn_info->credits_cnt);
pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
nci_pbf(skb->data),
@@ -1092,7 +1229,9 @@ static void nci_rx_work(struct work_struct *work)
if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
- nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
+ nci_data_exchange_complete(ndev, NULL,
+ ndev->cur_conn_id,
+ -ETIMEDOUT);
clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index a2de2a8cb00e..566466d90048 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -36,10 +36,20 @@
/* Complete data exchange transaction and forward skb to nfc core */
void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
- int err)
+ __u8 conn_id, int err)
{
- data_exchange_cb_t cb = ndev->data_exchange_cb;
- void *cb_context = ndev->data_exchange_cb_context;
+ struct nci_conn_info *conn_info;
+ data_exchange_cb_t cb;
+ void *cb_context;
+
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ kfree_skb(skb);
+ goto exit;
+ }
+
+ cb = conn_info->data_exchange_cb;
+ cb_context = conn_info->data_exchange_cb_context;
pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
@@ -48,9 +58,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
if (cb) {
- ndev->data_exchange_cb = NULL;
- ndev->data_exchange_cb_context = NULL;
-
/* forward skb to nfc core */
cb(cb_context, skb, err);
} else if (skb) {
@@ -60,6 +67,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
kfree_skb(skb);
}
+exit:
clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
}
@@ -85,6 +93,7 @@ static inline void nci_push_data_hdr(struct nci_dev *ndev,
static int nci_queue_tx_data_frags(struct nci_dev *ndev,
__u8 conn_id,
struct sk_buff *skb) {
+ struct nci_conn_info *conn_info;
int total_len = skb->len;
unsigned char *data = skb->data;
unsigned long flags;
@@ -95,11 +104,17 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len);
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ rc = -EPROTO;
+ goto free_exit;
+ }
+
__skb_queue_head_init(&frags_q);
while (total_len) {
frag_len =
- min_t(int, total_len, ndev->max_data_pkt_payload_size);
+ min_t(int, total_len, conn_info->max_pkt_payload_len);
skb_frag = nci_skb_alloc(ndev,
(NCI_DATA_HDR_SIZE + frag_len),
@@ -151,12 +166,19 @@ exit:
/* Send NCI data */
int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
{
+ struct nci_conn_info *conn_info;
int rc = 0;
pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len);
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ rc = -EPROTO;
+ goto free_exit;
+ }
+
/* check if the packet need to be fragmented */
- if (skb->len <= ndev->max_data_pkt_payload_size) {
+ if (skb->len <= conn_info->max_pkt_payload_len) {
/* no need to fragment packet */
nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
@@ -170,6 +192,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
}
}
+ ndev->cur_conn_id = conn_id;
queue_work(ndev->tx_wq, &ndev->tx_work);
goto exit;
@@ -185,7 +208,7 @@ exit:
static void nci_add_rx_data_frag(struct nci_dev *ndev,
struct sk_buff *skb,
- __u8 pbf, __u8 status)
+ __u8 pbf, __u8 conn_id, __u8 status)
{
int reassembly_len;
int err = 0;
@@ -229,16 +252,13 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
}
exit:
- if (ndev->nfc_dev->rf_mode == NFC_RF_INITIATOR) {
- nci_data_exchange_complete(ndev, skb, err);
- } else if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) {
+ if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) {
/* Data received in Target mode, forward to nfc core */
err = nfc_tm_data_received(ndev->nfc_dev, skb);
if (err)
pr_err("unable to handle received data\n");
} else {
- pr_err("rf mode unknown\n");
- kfree_skb(skb);
+ nci_data_exchange_complete(ndev, skb, conn_id, err);
}
}
@@ -247,6 +267,8 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
__u8 pbf = nci_pbf(skb->data);
__u8 status = 0;
+ __u8 conn_id = nci_conn_id(skb->data);
+ struct nci_conn_info *conn_info;
pr_debug("len %d\n", skb->len);
@@ -255,6 +277,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_conn_id(skb->data),
nci_plen(skb->data));
+ conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data));
+ if (!conn_info)
+ return;
+
/* strip the nci data header */
skb_pull(skb, NCI_DATA_HDR_SIZE);
@@ -268,5 +294,5 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
skb_trim(skb, (skb->len - 1));
}
- nci_add_rx_data_frag(ndev, skb, pbf, nci_to_errno(status));
+ nci_add_rx_data_frag(ndev, skb, pbf, conn_id, nci_to_errno(status));
}
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
new file mode 100644
index 000000000000..ed54ec533836
--- /dev/null
+++ b/net/nfc/nci/hci.c
@@ -0,0 +1,694 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ * This is the HCI over NCI implementation, as specified in the 10.2
+ * section of the NCI 1.1 specification.
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
+
+struct nci_data {
+ u8 conn_id;
+ u8 pipe;
+ u8 cmd;
+ const u8 *data;
+ u32 data_len;
+} __packed;
+
+struct nci_hci_create_pipe_params {
+ u8 src_gate;
+ u8 dest_host;
+ u8 dest_gate;
+} __packed;
+
+struct nci_hci_create_pipe_resp {
+ u8 src_host;
+ u8 src_gate;
+ u8 dest_host;
+ u8 dest_gate;
+ u8 pipe;
+} __packed;
+
+struct nci_hci_delete_pipe_noti {
+ u8 pipe;
+} __packed;
+
+struct nci_hci_all_pipe_cleared_noti {
+ u8 host;
+} __packed;
+
+struct nci_hcp_message {
+ u8 header; /* type -cmd,evt,rsp- + instruction */
+ u8 data[];
+} __packed;
+
+struct nci_hcp_packet {
+ u8 header; /* cbit+pipe */
+ struct nci_hcp_message message;
+} __packed;
+
+#define NCI_HCI_ANY_SET_PARAMETER 0x01
+#define NCI_HCI_ANY_GET_PARAMETER 0x02
+#define NCI_HCI_ANY_CLOSE_PIPE 0x04
+
+#define NCI_HFP_NO_CHAINING 0x80
+
+#define NCI_NFCEE_ID_HCI 0x80
+
+#define NCI_EVT_HOT_PLUG 0x03
+
+#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY 0x01
+
+/* HCP headers */
+#define NCI_HCI_HCP_PACKET_HEADER_LEN 1
+#define NCI_HCI_HCP_MESSAGE_HEADER_LEN 1
+#define NCI_HCI_HCP_HEADER_LEN 2
+
+/* HCP types */
+#define NCI_HCI_HCP_COMMAND 0x00
+#define NCI_HCI_HCP_EVENT 0x01
+#define NCI_HCI_HCP_RESPONSE 0x02
+
+#define NCI_HCI_ADM_NOTIFY_PIPE_CREATED 0x12
+#define NCI_HCI_ADM_NOTIFY_PIPE_DELETED 0x13
+#define NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15
+
+#define NCI_HCI_FRAGMENT 0x7f
+#define NCI_HCP_HEADER(type, instr) ((((type) & 0x03) << 6) |\
+ ((instr) & 0x3f))
+
+#define NCI_HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6)
+#define NCI_HCP_MSG_GET_CMD(header) (header & 0x3f)
+#define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f)
+
+/* HCI core */
+static void nci_hci_reset_pipes(struct nci_hci_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < NCI_HCI_MAX_PIPES; i++) {
+ hdev->pipes[i].gate = NCI_HCI_INVALID_GATE;
+ hdev->pipes[i].host = NCI_HCI_INVALID_HOST;
+ }
+ memset(hdev->gate2pipe, NCI_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+}
+
+static void nci_hci_reset_pipes_per_host(struct nci_dev *ndev, u8 host)
+{
+ int i;
+
+ for (i = 0; i < NCI_HCI_MAX_PIPES; i++) {
+ if (ndev->hci_dev->pipes[i].host == host) {
+ ndev->hci_dev->pipes[i].gate = NCI_HCI_INVALID_GATE;
+ ndev->hci_dev->pipes[i].host = NCI_HCI_INVALID_HOST;
+ }
+ }
+}
+
+/* Fragment HCI data over NCI packet.
+ * NFC Forum NCI 10.2.2 Data Exchange:
+ * The payload of the Data Packets sent on the Logical Connection SHALL be
+ * valid HCP packets, as defined within [ETSI_102622]. Each Data Packet SHALL
+ * contain a single HCP packet. NCI Segmentation and Reassembly SHALL NOT be
+ * applied to Data Messages in either direction. The HCI fragmentation mechanism
+ * is used if required.
+ */
+static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
+ const u8 data_type, const u8 *data,
+ size_t data_len)
+{
+ struct nci_conn_info *conn_info;
+ struct sk_buff *skb;
+ int len, i, r;
+ u8 cb = pipe;
+
+ conn_info = ndev->hci_dev->conn_info;
+ if (!conn_info)
+ return -EPROTO;
+
+ skb = nci_skb_alloc(ndev, 2 + conn_info->max_pkt_payload_len +
+ NCI_DATA_HDR_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, 2 + NCI_DATA_HDR_SIZE);
+ *skb_push(skb, 1) = data_type;
+
+ i = 0;
+ len = conn_info->max_pkt_payload_len;
+
+ do {
+ /* If last packet add NCI_HFP_NO_CHAINING */
+ if (i + conn_info->max_pkt_payload_len -
+ (skb->len + 1) >= data_len) {
+ cb |= NCI_HFP_NO_CHAINING;
+ len = data_len - i;
+ } else {
+ len = conn_info->max_pkt_payload_len - skb->len - 1;
+ }
+
+ *skb_push(skb, 1) = cb;
+
+ if (len > 0)
+ memcpy(skb_put(skb, len), data + i, len);
+
+ r = nci_send_data(ndev, conn_info->conn_id, skb);
+ if (r < 0)
+ return r;
+
+ i += len;
+ if (i < data_len) {
+ skb_trim(skb, 0);
+ skb_pull(skb, len);
+ }
+ } while (i < data_len);
+
+ return i;
+}
+
+static void nci_hci_send_data_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_data *data = (struct nci_data *)opt;
+
+ nci_hci_send_data(ndev, data->pipe, data->cmd,
+ data->data, data->data_len);
+}
+
+int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
+ const u8 *param, size_t param_len)
+{
+ u8 pipe = ndev->hci_dev->gate2pipe[gate];
+
+ if (pipe == NCI_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ return nci_hci_send_data(ndev, pipe,
+ NCI_HCP_HEADER(NCI_HCI_HCP_EVENT, event),
+ param, param_len);
+}
+EXPORT_SYMBOL(nci_hci_send_event);
+
+int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
+ const u8 *param, size_t param_len,
+ struct sk_buff **skb)
+{
+ struct nci_conn_info *conn_info;
+ struct nci_data data;
+ int r;
+ u8 pipe = ndev->hci_dev->gate2pipe[gate];
+
+ if (pipe == NCI_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ conn_info = ndev->hci_dev->conn_info;
+ if (!conn_info)
+ return -EPROTO;
+
+ data.conn_id = conn_info->conn_id;
+ data.pipe = pipe;
+ data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, cmd);
+ data.data = param;
+ data.data_len = param_len;
+
+ r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
+ msecs_to_jiffies(NCI_DATA_TIMEOUT));
+
+ if (r == NCI_STATUS_OK)
+ *skb = conn_info->rx_skb;
+
+ return r;
+}
+EXPORT_SYMBOL(nci_hci_send_cmd);
+
+static void nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
+ u8 event, struct sk_buff *skb)
+{
+ if (ndev->ops->hci_event_received)
+ ndev->ops->hci_event_received(ndev, pipe, event, skb);
+}
+
+static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
+ u8 cmd, struct sk_buff *skb)
+{
+ u8 gate = ndev->hci_dev->pipes[pipe].gate;
+ u8 status = NCI_HCI_ANY_OK | ~NCI_HCI_FRAGMENT;
+ u8 dest_gate, new_pipe;
+ struct nci_hci_create_pipe_resp *create_info;
+ struct nci_hci_delete_pipe_noti *delete_info;
+ struct nci_hci_all_pipe_cleared_noti *cleared_info;
+
+ pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
+
+ switch (cmd) {
+ case NCI_HCI_ADM_NOTIFY_PIPE_CREATED:
+ if (skb->len != 5) {
+ status = NCI_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ create_info = (struct nci_hci_create_pipe_resp *)skb->data;
+ dest_gate = create_info->dest_gate;
+ new_pipe = create_info->pipe;
+
+ /* Save the new created pipe and bind with local gate,
+ * the description for skb->data[3] is destination gate id
+ * but since we received this cmd from host controller, we
+ * are the destination and it is our local gate
+ */
+ ndev->hci_dev->gate2pipe[dest_gate] = new_pipe;
+ ndev->hci_dev->pipes[new_pipe].gate = dest_gate;
+ ndev->hci_dev->pipes[new_pipe].host =
+ create_info->src_host;
+ break;
+ case NCI_HCI_ANY_OPEN_PIPE:
+ /* If the pipe is not created report an error */
+ if (gate == NCI_HCI_INVALID_GATE) {
+ status = NCI_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ break;
+ case NCI_HCI_ADM_NOTIFY_PIPE_DELETED:
+ if (skb->len != 1) {
+ status = NCI_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
+
+ ndev->hci_dev->pipes[delete_info->pipe].gate =
+ NCI_HCI_INVALID_GATE;
+ ndev->hci_dev->pipes[delete_info->pipe].host =
+ NCI_HCI_INVALID_HOST;
+ break;
+ case NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED:
+ if (skb->len != 1) {
+ status = NCI_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
+ cleared_info =
+ (struct nci_hci_all_pipe_cleared_noti *)skb->data;
+ nci_hci_reset_pipes_per_host(ndev, cleared_info->host);
+ break;
+ default:
+ pr_debug("Discarded unknown cmd %x to gate %x\n", cmd, gate);
+ break;
+ }
+
+ if (ndev->ops->hci_cmd_received)
+ ndev->ops->hci_cmd_received(ndev, pipe, cmd, skb);
+
+exit:
+ nci_hci_send_data(ndev, pipe, status, NULL, 0);
+
+ kfree_skb(skb);
+}
+
+static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
+ u8 result, struct sk_buff *skb)
+{
+ struct nci_conn_info *conn_info;
+ u8 status = result;
+
+ if (result != NCI_HCI_ANY_OK)
+ goto exit;
+
+ conn_info = ndev->hci_dev->conn_info;
+ if (!conn_info) {
+ status = NCI_STATUS_REJECTED;
+ goto exit;
+ }
+
+ conn_info->rx_skb = skb;
+
+exit:
+ nci_req_complete(ndev, status);
+}
+
+/* Receive hcp message for pipe, with type and cmd.
+ * skb contains optional message data only.
+ */
+static void nci_hci_hcp_message_rx(struct nci_dev *ndev, u8 pipe,
+ u8 type, u8 instruction, struct sk_buff *skb)
+{
+ switch (type) {
+ case NCI_HCI_HCP_RESPONSE:
+ nci_hci_resp_received(ndev, pipe, instruction, skb);
+ break;
+ case NCI_HCI_HCP_COMMAND:
+ nci_hci_cmd_received(ndev, pipe, instruction, skb);
+ break;
+ case NCI_HCI_HCP_EVENT:
+ nci_hci_event_received(ndev, pipe, instruction, skb);
+ break;
+ default:
+ pr_err("UNKNOWN MSG Type %d, instruction=%d\n",
+ type, instruction);
+ kfree_skb(skb);
+ break;
+ }
+
+ nci_req_complete(ndev, 0);
+}
+
+static void nci_hci_msg_rx_work(struct work_struct *work)
+{
+ struct nci_hci_dev *hdev =
+ container_of(work, struct nci_hci_dev, msg_rx_work);
+ struct sk_buff *skb;
+ struct nci_hcp_message *message;
+ u8 pipe, type, instruction;
+
+ while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
+ pipe = skb->data[0];
+ skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
+ message = (struct nci_hcp_message *)skb->data;
+ type = NCI_HCP_MSG_GET_TYPE(message->header);
+ instruction = NCI_HCP_MSG_GET_CMD(message->header);
+ skb_pull(skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+
+ nci_hci_hcp_message_rx(hdev->ndev, pipe,
+ type, instruction, skb);
+ }
+}
+
+void nci_hci_data_received_cb(void *context,
+ struct sk_buff *skb, int err)
+{
+ struct nci_dev *ndev = (struct nci_dev *)context;
+ struct nci_hcp_packet *packet;
+ u8 pipe, type, instruction;
+ struct sk_buff *hcp_skb;
+ struct sk_buff *frag_skb;
+ int msg_len;
+
+ pr_debug("\n");
+
+ if (err) {
+ nci_req_complete(ndev, err);
+ return;
+ }
+
+ packet = (struct nci_hcp_packet *)skb->data;
+ if ((packet->header & ~NCI_HCI_FRAGMENT) == 0) {
+ skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
+ return;
+ }
+
+ /* it's the last fragment. Does it need re-aggregation? */
+ if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) {
+ pipe = packet->header & NCI_HCI_FRAGMENT;
+ skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
+
+ msg_len = 0;
+ skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
+ msg_len += (frag_skb->len -
+ NCI_HCI_HCP_PACKET_HEADER_LEN);
+ }
+
+ hcp_skb = nfc_alloc_recv_skb(NCI_HCI_HCP_PACKET_HEADER_LEN +
+ msg_len, GFP_KERNEL);
+ if (!hcp_skb) {
+ nci_req_complete(ndev, -ENOMEM);
+ return;
+ }
+
+ *skb_put(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+
+ skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
+ msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
+ memcpy(skb_put(hcp_skb, msg_len), frag_skb->data +
+ NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len);
+ }
+
+ skb_queue_purge(&ndev->hci_dev->rx_hcp_frags);
+ } else {
+ packet->header &= NCI_HCI_FRAGMENT;
+ hcp_skb = skb;
+ }
+
+ /* if this is a response, dispatch immediately to
+ * unblock waiting cmd context. Otherwise, enqueue to dispatch
+ * in separate context where handler can also execute command.
+ */
+ packet = (struct nci_hcp_packet *)hcp_skb->data;
+ type = NCI_HCP_MSG_GET_TYPE(packet->message.header);
+ if (type == NCI_HCI_HCP_RESPONSE) {
+ pipe = packet->header;
+ instruction = NCI_HCP_MSG_GET_CMD(packet->message.header);
+ skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN +
+ NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+ nci_hci_hcp_message_rx(ndev, pipe, type, instruction, hcp_skb);
+ } else {
+ skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb);
+ schedule_work(&ndev->hci_dev->msg_rx_work);
+ }
+}
+
+int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe)
+{
+ struct nci_data data;
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->hci_dev->conn_info;
+ if (!conn_info)
+ return -EPROTO;
+
+ data.conn_id = conn_info->conn_id;
+ data.pipe = pipe;
+ data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND,
+ NCI_HCI_ANY_OPEN_PIPE);
+ data.data = NULL;
+ data.data_len = 0;
+
+ return nci_request(ndev, nci_hci_send_data_req,
+ (unsigned long)&data,
+ msecs_to_jiffies(NCI_DATA_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_hci_open_pipe);
+
+int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ const u8 *param, size_t param_len)
+{
+ struct nci_conn_info *conn_info;
+ struct nci_data data;
+ int r;
+ u8 *tmp;
+ u8 pipe = ndev->hci_dev->gate2pipe[gate];
+
+ pr_debug("idx=%d to gate %d\n", idx, gate);
+
+ if (pipe == NCI_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ conn_info = ndev->hci_dev->conn_info;
+ if (!conn_info)
+ return -EPROTO;
+
+ tmp = kmalloc(1 + param_len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ *tmp = idx;
+ memcpy(tmp + 1, param, param_len);
+
+ data.conn_id = conn_info->conn_id;
+ data.pipe = pipe;
+ data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND,
+ NCI_HCI_ANY_SET_PARAMETER);
+ data.data = tmp;
+ data.data_len = param_len + 1;
+
+ r = nci_request(ndev, nci_hci_send_data_req,
+ (unsigned long)&data,
+ msecs_to_jiffies(NCI_DATA_TIMEOUT));
+
+ kfree(tmp);
+ return r;
+}
+EXPORT_SYMBOL(nci_hci_set_param);
+
+int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ struct sk_buff **skb)
+{
+ struct nci_conn_info *conn_info;
+ struct nci_data data;
+ int r;
+ u8 pipe = ndev->hci_dev->gate2pipe[gate];
+
+ pr_debug("idx=%d to gate %d\n", idx, gate);
+
+ if (pipe == NCI_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ conn_info = ndev->hci_dev->conn_info;
+ if (!conn_info)
+ return -EPROTO;
+
+ data.conn_id = conn_info->conn_id;
+ data.pipe = pipe;
+ data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND,
+ NCI_HCI_ANY_GET_PARAMETER);
+ data.data = &idx;
+ data.data_len = 1;
+
+ r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
+ msecs_to_jiffies(NCI_DATA_TIMEOUT));
+
+ if (r == NCI_STATUS_OK)
+ *skb = conn_info->rx_skb;
+
+ return r;
+}
+EXPORT_SYMBOL(nci_hci_get_param);
+
+int nci_hci_connect_gate(struct nci_dev *ndev,
+ u8 dest_host, u8 dest_gate, u8 pipe)
+{
+ int r;
+
+ if (pipe == NCI_HCI_DO_NOT_OPEN_PIPE)
+ return 0;
+
+ if (ndev->hci_dev->gate2pipe[dest_gate] != NCI_HCI_INVALID_PIPE)
+ return -EADDRINUSE;
+
+ if (pipe != NCI_HCI_INVALID_PIPE)
+ goto open_pipe;
+
+ switch (dest_gate) {
+ case NCI_HCI_LINK_MGMT_GATE:
+ pipe = NCI_HCI_LINK_MGMT_PIPE;
+ break;
+ case NCI_HCI_ADMIN_GATE:
+ pipe = NCI_HCI_ADMIN_PIPE;
+ break;
+ }
+
+open_pipe:
+ r = nci_hci_open_pipe(ndev, pipe);
+ if (r < 0)
+ return r;
+
+ ndev->hci_dev->pipes[pipe].gate = dest_gate;
+ ndev->hci_dev->pipes[pipe].host = dest_host;
+ ndev->hci_dev->gate2pipe[dest_gate] = pipe;
+
+ return 0;
+}
+EXPORT_SYMBOL(nci_hci_connect_gate);
+
+static int nci_hci_dev_connect_gates(struct nci_dev *ndev,
+ u8 gate_count,
+ struct nci_hci_gate *gates)
+{
+ int r;
+
+ while (gate_count--) {
+ r = nci_hci_connect_gate(ndev, gates->dest_host,
+ gates->gate, gates->pipe);
+ if (r < 0)
+ return r;
+ gates++;
+ }
+
+ return 0;
+}
+
+int nci_hci_dev_session_init(struct nci_dev *ndev)
+{
+ struct nci_conn_info *conn_info;
+ struct sk_buff *skb;
+ int r;
+
+ ndev->hci_dev->count_pipes = 0;
+ ndev->hci_dev->expected_pipes = 0;
+
+ conn_info = ndev->hci_dev->conn_info;
+ if (!conn_info)
+ return -EPROTO;
+
+ conn_info->data_exchange_cb = nci_hci_data_received_cb;
+ conn_info->data_exchange_cb_context = ndev;
+
+ nci_hci_reset_pipes(ndev->hci_dev);
+
+ if (ndev->hci_dev->init_data.gates[0].gate != NCI_HCI_ADMIN_GATE)
+ return -EPROTO;
+
+ r = nci_hci_connect_gate(ndev,
+ ndev->hci_dev->init_data.gates[0].dest_host,
+ ndev->hci_dev->init_data.gates[0].gate,
+ ndev->hci_dev->init_data.gates[0].pipe);
+ if (r < 0)
+ goto exit;
+
+ r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE,
+ NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, &skb);
+ if (r < 0)
+ goto exit;
+
+ if (skb->len &&
+ skb->len == strlen(ndev->hci_dev->init_data.session_id) &&
+ memcmp(ndev->hci_dev->init_data.session_id,
+ skb->data, skb->len) == 0 &&
+ ndev->ops->hci_load_session) {
+ /* Restore gate<->pipe table from some proprietary location. */
+ r = ndev->ops->hci_load_session(ndev);
+ if (r < 0)
+ goto exit;
+ } else {
+ r = nci_hci_dev_connect_gates(ndev,
+ ndev->hci_dev->init_data.gate_count,
+ ndev->hci_dev->init_data.gates);
+ if (r < 0)
+ goto exit;
+
+ r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
+ NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY,
+ ndev->hci_dev->init_data.session_id,
+ strlen(ndev->hci_dev->init_data.session_id));
+ }
+ if (r == 0)
+ goto exit;
+
+exit:
+ kfree_skb(skb);
+
+ return r;
+}
+EXPORT_SYMBOL(nci_hci_dev_session_init);
+
+struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
+{
+ struct nci_hci_dev *hdev;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return NULL;
+
+ skb_queue_head_init(&hdev->rx_hcp_frags);
+ INIT_WORK(&hdev->msg_rx_work, nci_hci_msg_rx_work);
+ skb_queue_head_init(&hdev->msg_rx_queue);
+ hdev->ndev = ndev;
+
+ return hdev;
+}
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 22e453cb787d..3218071072ac 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -43,6 +43,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
+ struct nci_conn_info *conn_info;
int i;
pr_debug("num_entries %d\n", ntf->num_entries);
@@ -59,11 +60,13 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
i, ntf->conn_entries[i].conn_id,
ntf->conn_entries[i].credits);
- if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
- /* found static rf connection */
- atomic_add(ntf->conn_entries[i].credits,
- &ndev->credits_cnt);
- }
+ conn_info = nci_get_conn_info_by_conn_id(ndev,
+ ntf->conn_entries[i].conn_id);
+ if (!conn_info)
+ return;
+
+ atomic_add(ntf->conn_entries[i].credits,
+ &conn_info->credits_cnt);
}
/* trigger the next tx */
@@ -96,7 +99,7 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
- nci_data_exchange_complete(ndev, NULL, -EIO);
+ nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO);
}
static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
@@ -513,6 +516,7 @@ static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev,
static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
+ struct nci_conn_info *conn_info;
struct nci_rf_intf_activated_ntf ntf;
__u8 *data = skb->data;
int err = NCI_STATUS_OK;
@@ -537,6 +541,13 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
pr_debug("rf_tech_specific_params_len %d\n",
ntf.rf_tech_specific_params_len);
+ /* If this contains a value of 0x00 (NFCEE Direct RF
+ * Interface) then all following parameters SHALL contain a
+ * value of 0 and SHALL be ignored.
+ */
+ if (ntf.rf_interface == NCI_RF_INTERFACE_NFCEE_DIRECT)
+ goto listen;
+
if (ntf.rf_tech_specific_params_len > 0) {
switch (ntf.activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
@@ -614,11 +625,16 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
exit:
if (err == NCI_STATUS_OK) {
- ndev->max_data_pkt_payload_size = ntf.max_data_pkt_payload_size;
- ndev->initial_num_credits = ntf.initial_num_credits;
+ conn_info = ndev->rf_conn_info;
+ if (!conn_info)
+ return;
+
+ conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size;
+ conn_info->initial_num_credits = ntf.initial_num_credits;
/* set the available credits to initial value */
- atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+ atomic_set(&conn_info->credits_cnt,
+ conn_info->initial_num_credits);
/* store general bytes to be reported later in dep_link_up */
if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) {
@@ -643,6 +659,7 @@ exit:
nci_req_complete(ndev, err);
}
} else {
+listen:
/* Listen mode */
atomic_set(&ndev->state, NCI_LISTEN_ACTIVE);
if (err == NCI_STATUS_OK &&
@@ -661,10 +678,15 @@ exit:
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
+ struct nci_conn_info *conn_info;
struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
+ conn_info = ndev->rf_conn_info;
+ if (!conn_info)
+ return;
+
/* drop tx data queue */
skb_queue_purge(&ndev->tx_q);
@@ -676,7 +698,8 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
- nci_data_exchange_complete(ndev, NULL, -EIO);
+ nci_data_exchange_complete(ndev, NULL, NCI_STATIC_RF_CONN_ID,
+ -EIO);
switch (ntf->type) {
case NCI_DEACTIVATE_TYPE_IDLE_MODE:
@@ -696,6 +719,32 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
nci_req_complete(ndev, NCI_STATUS_OK);
}
+static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ u8 status = NCI_STATUS_OK;
+ struct nci_nfcee_discover_ntf *nfcee_ntf =
+ (struct nci_nfcee_discover_ntf *)skb->data;
+
+ pr_debug("\n");
+
+ /* NFCForum NCI 9.2.1 HCI Network Specific Handling
+ * If the NFCC supports the HCI Network, it SHALL return one,
+ * and only one, NFCEE_DISCOVER_NTF with a Protocol type of
+ * “HCI Accessâ€, even if the HCI Network contains multiple NFCEEs.
+ */
+ ndev->hci_dev->nfcee_id = nfcee_ntf->nfcee_id;
+ ndev->cur_id = nfcee_ntf->nfcee_id;
+
+ nci_req_complete(ndev, status);
+}
+
+static void nci_nfcee_action_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ pr_debug("\n");
+}
+
void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
__u16 ntf_opcode = nci_opcode(skb->data);
@@ -734,6 +783,14 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_rf_deactivate_ntf_packet(ndev, skb);
break;
+ case NCI_OP_NFCEE_DISCOVER_NTF:
+ nci_nfcee_discover_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_NFCEE_ACTION_NTF:
+ nci_nfcee_action_ntf_packet(ndev, skb);
+ break;
+
default:
pr_err("unknown ntf opcode 0x%x\n", ntf_opcode);
break;
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 041de51ccdbe..02486bc2ceea 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -140,13 +140,31 @@ static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
+ struct nci_conn_info *conn_info;
__u8 status = skb->data[0];
pr_debug("status 0x%x\n", status);
- if (status == NCI_STATUS_OK)
+ if (status == NCI_STATUS_OK) {
atomic_set(&ndev->state, NCI_DISCOVERY);
+ conn_info = ndev->rf_conn_info;
+ if (!conn_info) {
+ conn_info = devm_kzalloc(&ndev->nfc_dev->dev,
+ sizeof(struct nci_conn_info),
+ GFP_KERNEL);
+ if (!conn_info) {
+ status = NCI_STATUS_REJECTED;
+ goto exit;
+ }
+ conn_info->conn_id = NCI_STATIC_RF_CONN_ID;
+ INIT_LIST_HEAD(&conn_info->list);
+ list_add(&conn_info->list, &ndev->conn_info_list);
+ ndev->rf_conn_info = conn_info;
+ }
+ }
+
+exit:
nci_req_complete(ndev, status);
}
@@ -178,6 +196,90 @@ static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
}
}
+static void nci_nfcee_discover_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_nfcee_discover_rsp *discover_rsp;
+
+ if (skb->len != 2) {
+ nci_req_complete(ndev, NCI_STATUS_NFCEE_PROTOCOL_ERROR);
+ return;
+ }
+
+ discover_rsp = (struct nci_nfcee_discover_rsp *)skb->data;
+
+ if (discover_rsp->status != NCI_STATUS_OK ||
+ discover_rsp->num_nfcee == 0)
+ nci_req_complete(ndev, discover_rsp->status);
+}
+
+static void nci_nfcee_mode_set_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ pr_debug("status 0x%x\n", status);
+ nci_req_complete(ndev, status);
+}
+
+static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+ struct nci_conn_info *conn_info;
+ struct nci_core_conn_create_rsp *rsp;
+
+ pr_debug("status 0x%x\n", status);
+
+ if (status == NCI_STATUS_OK) {
+ rsp = (struct nci_core_conn_create_rsp *)skb->data;
+
+ conn_info = devm_kzalloc(&ndev->nfc_dev->dev,
+ sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ status = NCI_STATUS_REJECTED;
+ goto exit;
+ }
+
+ conn_info->id = ndev->cur_id;
+ conn_info->conn_id = rsp->conn_id;
+
+ /* Note: data_exchange_cb and data_exchange_cb_context need to
+ * be specify out of nci_core_conn_create_rsp_packet
+ */
+
+ INIT_LIST_HEAD(&conn_info->list);
+ list_add(&conn_info->list, &ndev->conn_info_list);
+
+ if (ndev->cur_id == ndev->hci_dev->nfcee_id)
+ ndev->hci_dev->conn_info = conn_info;
+
+ conn_info->conn_id = rsp->conn_id;
+ conn_info->max_pkt_payload_len = rsp->max_ctrl_pkt_payload_len;
+ atomic_set(&conn_info->credits_cnt, rsp->credits_cnt);
+ }
+
+exit:
+ nci_req_complete(ndev, status);
+}
+
+static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_conn_info *conn_info;
+ __u8 status = skb->data[0];
+
+ pr_debug("status 0x%x\n", status);
+ if (status == NCI_STATUS_OK) {
+ conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_id);
+ if (conn_info) {
+ list_del(&conn_info->list);
+ devm_kfree(&ndev->nfc_dev->dev, conn_info);
+ }
+ }
+ nci_req_complete(ndev, status);
+}
+
void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
__u16 rsp_opcode = nci_opcode(skb->data);
@@ -207,6 +309,14 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_core_set_config_rsp_packet(ndev, skb);
break;
+ case NCI_OP_CORE_CONN_CREATE_RSP:
+ nci_core_conn_create_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_CORE_CONN_CLOSE_RSP:
+ nci_core_conn_close_rsp_packet(ndev, skb);
+ break;
+
case NCI_OP_RF_DISCOVER_MAP_RSP:
nci_rf_disc_map_rsp_packet(ndev, skb);
break;
@@ -223,6 +333,14 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_rf_deactivate_rsp_packet(ndev, skb);
break;
+ case NCI_OP_NFCEE_DISCOVER_RSP:
+ nci_nfcee_discover_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_NFCEE_MODE_SET_RSP:
+ nci_nfcee_mode_set_rsp_packet(ndev, skb);
+ break;
+
default:
pr_err("unknown rsp opcode 0x%x\n", rsp_opcode);
break;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 44989fc8cddf..14a2d11581da 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -102,7 +102,8 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
goto nla_put_failure;
}
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -496,6 +497,53 @@ free_msg:
return -EMSGSIZE;
}
+int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx,
+ struct nfc_evt_transaction *evt_transaction)
+{
+ struct nfc_se *se;
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_EVENT_SE_TRANSACTION);
+ if (!hdr)
+ goto free_msg;
+
+ se = nfc_find_se(dev, se_idx);
+ if (!se)
+ goto free_msg;
+
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+ nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) ||
+ nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type) ||
+ nla_put(msg, NFC_ATTR_SE_AID, evt_transaction->aid_len,
+ evt_transaction->aid) ||
+ nla_put(msg, NFC_ATTR_SE_PARAMS, evt_transaction->params_len,
+ evt_transaction->params))
+ goto nla_put_failure;
+
+ /* evt_transaction is no more used */
+ devm_kfree(&dev->dev, evt_transaction);
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ /* evt_transaction is no more used */
+ devm_kfree(&dev->dev, evt_transaction);
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
u32 portid, u32 seq,
struct netlink_callback *cb,
@@ -518,7 +566,8 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode))
goto nla_put_failure;
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -908,7 +957,8 @@ static int nfc_genl_send_params(struct sk_buff *msg,
nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux)))
goto nla_put_failure;
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
@@ -1247,8 +1297,7 @@ static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev,
nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type))
goto nla_put_failure;
- if (genlmsg_end(msg, hdr) < 0)
- goto nla_put_failure;
+ genlmsg_end(msg, hdr);
}
return 0;
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 88d60064890e..a8ce80b47720 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -100,6 +100,8 @@ int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list);
int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type);
int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx);
+int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx,
+ struct nfc_evt_transaction *evt_transaction);
struct nfc_dev *nfc_get_device(unsigned int idx);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 770064c83711..b491c1c296fe 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -185,10 +185,15 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
return 0;
}
-static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
- const __be32 *mpls_lse)
+/* 'KEY' must not have any bits set outside of the 'MASK' */
+#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
+#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
+
+static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const __be32 *mpls_lse, const __be32 *mask)
{
__be32 *stack;
+ __be32 lse;
int err;
err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
@@ -196,14 +201,16 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
return err;
stack = (__be32 *)skb_mpls_header(skb);
+ lse = MASKED(*stack, *mpls_lse, *mask);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
- __be32 diff[] = { ~(*stack), *mpls_lse };
+ __be32 diff[] = { ~(*stack), lse };
+
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
~skb->csum);
}
- *stack = *mpls_lse;
- key->mpls.top_lse = *mpls_lse;
+ *stack = lse;
+ flow_key->mpls.top_lse = lse;
return 0;
}
@@ -212,7 +219,7 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
int err;
err = skb_vlan_pop(skb);
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
invalidate_flow_key(key);
else
key->eth.tci = 0;
@@ -222,7 +229,7 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_vlan *vlan)
{
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
invalidate_flow_key(key);
else
key->eth.tci = vlan->vlan_tci;
@@ -230,23 +237,39 @@ static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
}
-static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key,
- const struct ovs_key_ethernet *eth_key)
+/* 'src' is already properly masked. */
+static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
+{
+ u16 *dst = (u16 *)dst_;
+ const u16 *src = (const u16 *)src_;
+ const u16 *mask = (const u16 *)mask_;
+
+ SET_MASKED(dst[0], src[0], mask[0]);
+ SET_MASKED(dst[1], src[1], mask[1]);
+ SET_MASKED(dst[2], src[2], mask[2]);
+}
+
+static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_ethernet *key,
+ const struct ovs_key_ethernet *mask)
{
int err;
+
err = skb_ensure_writable(skb, ETH_HLEN);
if (unlikely(err))
return err;
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
- ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
- ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
+ ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
+ mask->eth_src);
+ ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
+ mask->eth_dst);
ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
- ether_addr_copy(key->eth.src, eth_key->eth_src);
- ether_addr_copy(key->eth.dst, eth_key->eth_dst);
+ ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
+ ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
return 0;
}
@@ -304,6 +327,15 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
}
}
+static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
+ const __be32 mask[4], __be32 masked[4])
+{
+ masked[0] = MASKED(old[0], addr[0], mask[0]);
+ masked[1] = MASKED(old[1], addr[1], mask[1]);
+ masked[2] = MASKED(old[2], addr[2], mask[2]);
+ masked[3] = MASKED(old[3], addr[3], mask[3]);
+}
+
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
__be32 addr[4], const __be32 new_addr[4],
bool recalculate_csum)
@@ -315,29 +347,29 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
memcpy(addr, new_addr, sizeof(__be32[4]));
}
-static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
+static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
{
- nh->priority = tc >> 4;
- nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
+ /* Bits 21-24 are always unmasked, so this retains their values. */
+ SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
+ SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
+ SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
}
-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
+static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
+ u8 mask)
{
- nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
- nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
- nh->flow_lbl[2] = fl & 0x000000FF;
-}
+ new_ttl = MASKED(nh->ttl, new_ttl, mask);
-static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
-{
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
nh->ttl = new_ttl;
}
-static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
- const struct ovs_key_ipv4 *ipv4_key)
+static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_ipv4 *key,
+ const struct ovs_key_ipv4 *mask)
{
struct iphdr *nh;
+ __be32 new_addr;
int err;
err = skb_ensure_writable(skb, skb_network_offset(skb) +
@@ -347,36 +379,49 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
nh = ip_hdr(skb);
- if (ipv4_key->ipv4_src != nh->saddr) {
- set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
- key->ipv4.addr.src = ipv4_key->ipv4_src;
- }
+ /* Setting an IP addresses is typically only a side effect of
+ * matching on them in the current userspace implementation, so it
+ * makes sense to check if the value actually changed.
+ */
+ if (mask->ipv4_src) {
+ new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
- if (ipv4_key->ipv4_dst != nh->daddr) {
- set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
- key->ipv4.addr.dst = ipv4_key->ipv4_dst;
+ if (unlikely(new_addr != nh->saddr)) {
+ set_ip_addr(skb, nh, &nh->saddr, new_addr);
+ flow_key->ipv4.addr.src = new_addr;
+ }
}
+ if (mask->ipv4_dst) {
+ new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
- if (ipv4_key->ipv4_tos != nh->tos) {
- ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
- key->ip.tos = nh->tos;
+ if (unlikely(new_addr != nh->daddr)) {
+ set_ip_addr(skb, nh, &nh->daddr, new_addr);
+ flow_key->ipv4.addr.dst = new_addr;
+ }
}
-
- if (ipv4_key->ipv4_ttl != nh->ttl) {
- set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
- key->ip.ttl = ipv4_key->ipv4_ttl;
+ if (mask->ipv4_tos) {
+ ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
+ flow_key->ip.tos = nh->tos;
+ }
+ if (mask->ipv4_ttl) {
+ set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
+ flow_key->ip.ttl = nh->ttl;
}
return 0;
}
-static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
- const struct ovs_key_ipv6 *ipv6_key)
+static bool is_ipv6_mask_nonzero(const __be32 addr[4])
+{
+ return !!(addr[0] | addr[1] | addr[2] | addr[3]);
+}
+
+static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_ipv6 *key,
+ const struct ovs_key_ipv6 *mask)
{
struct ipv6hdr *nh;
int err;
- __be32 *saddr;
- __be32 *daddr;
err = skb_ensure_writable(skb, skb_network_offset(skb) +
sizeof(struct ipv6hdr));
@@ -384,71 +429,77 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
return err;
nh = ipv6_hdr(skb);
- saddr = (__be32 *)&nh->saddr;
- daddr = (__be32 *)&nh->daddr;
-
- if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) {
- set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
- ipv6_key->ipv6_src, true);
- memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src,
- sizeof(ipv6_key->ipv6_src));
- }
- if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
+ /* Setting an IP addresses is typically only a side effect of
+ * matching on them in the current userspace implementation, so it
+ * makes sense to check if the value actually changed.
+ */
+ if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
+ __be32 *saddr = (__be32 *)&nh->saddr;
+ __be32 masked[4];
+
+ mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
+
+ if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
+ set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+ true);
+ memcpy(&flow_key->ipv6.addr.src, masked,
+ sizeof(flow_key->ipv6.addr.src));
+ }
+ }
+ if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
unsigned int offset = 0;
int flags = IP6_FH_F_SKIP_RH;
bool recalc_csum = true;
-
- if (ipv6_ext_hdr(nh->nexthdr))
- recalc_csum = ipv6_find_hdr(skb, &offset,
- NEXTHDR_ROUTING, NULL,
- &flags) != NEXTHDR_ROUTING;
-
- set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
- ipv6_key->ipv6_dst, recalc_csum);
- memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst,
- sizeof(ipv6_key->ipv6_dst));
+ __be32 *daddr = (__be32 *)&nh->daddr;
+ __be32 masked[4];
+
+ mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
+
+ if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
+ if (ipv6_ext_hdr(nh->nexthdr))
+ recalc_csum = (ipv6_find_hdr(skb, &offset,
+ NEXTHDR_ROUTING,
+ NULL, &flags)
+ != NEXTHDR_ROUTING);
+
+ set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+ recalc_csum);
+ memcpy(&flow_key->ipv6.addr.dst, masked,
+ sizeof(flow_key->ipv6.addr.dst));
+ }
+ }
+ if (mask->ipv6_tclass) {
+ ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
+ flow_key->ip.tos = ipv6_get_dsfield(nh);
+ }
+ if (mask->ipv6_label) {
+ set_ipv6_fl(nh, ntohl(key->ipv6_label),
+ ntohl(mask->ipv6_label));
+ flow_key->ipv6.label =
+ *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+ }
+ if (mask->ipv6_hlimit) {
+ SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
+ flow_key->ip.ttl = nh->hop_limit;
}
-
- set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
- key->ip.tos = ipv6_get_dsfield(nh);
-
- set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
- key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
-
- nh->hop_limit = ipv6_key->ipv6_hlimit;
- key->ip.ttl = ipv6_key->ipv6_hlimit;
return 0;
}
/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
- __be16 new_port, __sum16 *check)
+ __be16 new_port, __sum16 *check)
{
inet_proto_csum_replace2(check, skb, *port, new_port, 0);
*port = new_port;
- skb_clear_hash(skb);
-}
-
-static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
-{
- struct udphdr *uh = udp_hdr(skb);
-
- if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
- set_tp_port(skb, port, new_port, &uh->check);
-
- if (!uh->check)
- uh->check = CSUM_MANGLED_0;
- } else {
- *port = new_port;
- skb_clear_hash(skb);
- }
}
-static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
- const struct ovs_key_udp *udp_port_key)
+static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_udp *key,
+ const struct ovs_key_udp *mask)
{
struct udphdr *uh;
+ __be16 src, dst;
int err;
err = skb_ensure_writable(skb, skb_transport_offset(skb) +
@@ -457,23 +508,40 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
return err;
uh = udp_hdr(skb);
- if (udp_port_key->udp_src != uh->source) {
- set_udp_port(skb, &uh->source, udp_port_key->udp_src);
- key->tp.src = udp_port_key->udp_src;
- }
+ /* Either of the masks is non-zero, so do not bother checking them. */
+ src = MASKED(uh->source, key->udp_src, mask->udp_src);
+ dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
- if (udp_port_key->udp_dst != uh->dest) {
- set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
- key->tp.dst = udp_port_key->udp_dst;
+ if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (likely(src != uh->source)) {
+ set_tp_port(skb, &uh->source, src, &uh->check);
+ flow_key->tp.src = src;
+ }
+ if (likely(dst != uh->dest)) {
+ set_tp_port(skb, &uh->dest, dst, &uh->check);
+ flow_key->tp.dst = dst;
+ }
+
+ if (unlikely(!uh->check))
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ uh->source = src;
+ uh->dest = dst;
+ flow_key->tp.src = src;
+ flow_key->tp.dst = dst;
}
+ skb_clear_hash(skb);
+
return 0;
}
-static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
- const struct ovs_key_tcp *tcp_port_key)
+static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_tcp *key,
+ const struct ovs_key_tcp *mask)
{
struct tcphdr *th;
+ __be16 src, dst;
int err;
err = skb_ensure_writable(skb, skb_transport_offset(skb) +
@@ -482,50 +550,49 @@ static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
return err;
th = tcp_hdr(skb);
- if (tcp_port_key->tcp_src != th->source) {
- set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
- key->tp.src = tcp_port_key->tcp_src;
+ src = MASKED(th->source, key->tcp_src, mask->tcp_src);
+ if (likely(src != th->source)) {
+ set_tp_port(skb, &th->source, src, &th->check);
+ flow_key->tp.src = src;
}
-
- if (tcp_port_key->tcp_dst != th->dest) {
- set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
- key->tp.dst = tcp_port_key->tcp_dst;
+ dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
+ if (likely(dst != th->dest)) {
+ set_tp_port(skb, &th->dest, dst, &th->check);
+ flow_key->tp.dst = dst;
}
+ skb_clear_hash(skb);
return 0;
}
-static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key,
- const struct ovs_key_sctp *sctp_port_key)
+static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_sctp *key,
+ const struct ovs_key_sctp *mask)
{
+ unsigned int sctphoff = skb_transport_offset(skb);
struct sctphdr *sh;
+ __le32 old_correct_csum, new_csum, old_csum;
int err;
- unsigned int sctphoff = skb_transport_offset(skb);
err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
if (unlikely(err))
return err;
sh = sctp_hdr(skb);
- if (sctp_port_key->sctp_src != sh->source ||
- sctp_port_key->sctp_dst != sh->dest) {
- __le32 old_correct_csum, new_csum, old_csum;
+ old_csum = sh->checksum;
+ old_correct_csum = sctp_compute_cksum(skb, sctphoff);
- old_csum = sh->checksum;
- old_correct_csum = sctp_compute_cksum(skb, sctphoff);
+ sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
+ sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
- sh->source = sctp_port_key->sctp_src;
- sh->dest = sctp_port_key->sctp_dst;
+ new_csum = sctp_compute_cksum(skb, sctphoff);
- new_csum = sctp_compute_cksum(skb, sctphoff);
+ /* Carry any checksum errors through. */
+ sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
- /* Carry any checksum errors through. */
- sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
-
- skb_clear_hash(skb);
- key->tp.src = sctp_port_key->sctp_src;
- key->tp.dst = sctp_port_key->sctp_dst;
- }
+ skb_clear_hash(skb);
+ flow_key->tp.src = sh->source;
+ flow_key->tp.dst = sh->dest;
return 0;
}
@@ -653,52 +720,77 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
key->ovs_flow_hash = hash;
}
-static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key,
- const struct nlattr *nested_attr)
+static int execute_set_action(struct sk_buff *skb,
+ struct sw_flow_key *flow_key,
+ const struct nlattr *a)
+{
+ /* Only tunnel set execution is supported without a mask. */
+ if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
+ OVS_CB(skb)->egress_tun_info = nla_data(a);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Mask is at the midpoint of the data. */
+#define get_mask(a, type) ((const type)nla_data(a) + 1)
+
+static int execute_masked_set_action(struct sk_buff *skb,
+ struct sw_flow_key *flow_key,
+ const struct nlattr *a)
{
int err = 0;
- switch (nla_type(nested_attr)) {
+ switch (nla_type(a)) {
case OVS_KEY_ATTR_PRIORITY:
- skb->priority = nla_get_u32(nested_attr);
- key->phy.priority = skb->priority;
+ SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
+ flow_key->phy.priority = skb->priority;
break;
case OVS_KEY_ATTR_SKB_MARK:
- skb->mark = nla_get_u32(nested_attr);
- key->phy.skb_mark = skb->mark;
+ SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
+ flow_key->phy.skb_mark = skb->mark;
break;
case OVS_KEY_ATTR_TUNNEL_INFO:
- OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
+ /* Masked data not supported for tunnel. */
+ err = -EINVAL;
break;
case OVS_KEY_ATTR_ETHERNET:
- err = set_eth_addr(skb, key, nla_data(nested_attr));
+ err = set_eth_addr(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_ethernet *));
break;
case OVS_KEY_ATTR_IPV4:
- err = set_ipv4(skb, key, nla_data(nested_attr));
+ err = set_ipv4(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_ipv4 *));
break;
case OVS_KEY_ATTR_IPV6:
- err = set_ipv6(skb, key, nla_data(nested_attr));
+ err = set_ipv6(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_ipv6 *));
break;
case OVS_KEY_ATTR_TCP:
- err = set_tcp(skb, key, nla_data(nested_attr));
+ err = set_tcp(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_tcp *));
break;
case OVS_KEY_ATTR_UDP:
- err = set_udp(skb, key, nla_data(nested_attr));
+ err = set_udp(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_udp *));
break;
case OVS_KEY_ATTR_SCTP:
- err = set_sctp(skb, key, nla_data(nested_attr));
+ err = set_sctp(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_sctp *));
break;
case OVS_KEY_ATTR_MPLS:
- err = set_mpls(skb, key, nla_data(nested_attr));
+ err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
+ __be32 *));
break;
}
@@ -818,6 +910,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
err = execute_set_action(skb, key, nla_data(a));
break;
+ case OVS_ACTION_ATTR_SET_MASKED:
+ case OVS_ACTION_ATTR_SET_TO_MASKED:
+ err = execute_masked_set_action(skb, key, nla_data(a));
+ break;
+
case OVS_ACTION_ATTR_SAMPLE:
err = sample(dp, skb, key, a);
break;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 4e9a5f035cbc..ae5e77cdc0ca 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -65,6 +65,8 @@ static struct genl_family dp_packet_genl_family;
static struct genl_family dp_flow_genl_family;
static struct genl_family dp_datapath_genl_family;
+static const struct nla_policy flow_policy[];
+
static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
.name = OVS_FLOW_MCGROUP,
};
@@ -419,7 +421,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
if (!dp_ifindex)
return -ENODEV;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
@@ -461,10 +463,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
0, upcall_info->cmd);
upcall->dp_ifindex = dp_ifindex;
- nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- err = ovs_nla_put_flow(key, key, user_skb);
+ err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
BUG_ON(err);
- nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
@@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct vport *input_vport;
int len;
int err;
- bool log = !a[OVS_FLOW_ATTR_PROBE];
+ bool log = !a[OVS_PACKET_ATTR_PROBE];
err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+ [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
};
static const struct genl_ops dp_packet_genl_ops[] = {
@@ -663,46 +664,48 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
}
}
-static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
+static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
{
- return NLMSG_ALIGN(sizeof(struct ovs_header))
- + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
- + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
- + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
- + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
- + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
- + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
+ return ovs_identifier_is_ufid(sfid) &&
+ !(ufid_flags & OVS_UFID_F_OMIT_KEY);
}
-/* Called with ovs_mutex or RCU read lock. */
-static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
- struct sk_buff *skb)
+static bool should_fill_mask(uint32_t ufid_flags)
{
- struct nlattr *nla;
- int err;
+ return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
+}
- /* Fill flow key. */
- nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
- if (!nla)
- return -EMSGSIZE;
+static bool should_fill_actions(uint32_t ufid_flags)
+{
+ return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
+}
- err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
- if (err)
- return err;
+static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
+ const struct sw_flow_id *sfid,
+ uint32_t ufid_flags)
+{
+ size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
- nla_nest_end(skb, nla);
+ /* OVS_FLOW_ATTR_UFID */
+ if (sfid && ovs_identifier_is_ufid(sfid))
+ len += nla_total_size(sfid->ufid_len);
- /* Fill flow mask. */
- nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
- if (!nla)
- return -EMSGSIZE;
+ /* OVS_FLOW_ATTR_KEY */
+ if (!sfid || should_fill_key(sfid, ufid_flags))
+ len += nla_total_size(ovs_key_attr_size());
- err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
- if (err)
- return err;
+ /* OVS_FLOW_ATTR_MASK */
+ if (should_fill_mask(ufid_flags))
+ len += nla_total_size(ovs_key_attr_size());
- nla_nest_end(skb, nla);
- return 0;
+ /* OVS_FLOW_ATTR_ACTIONS */
+ if (should_fill_actions(ufid_flags))
+ len += nla_total_size(acts->actions_len);
+
+ return len
+ + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+ + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
+ + nla_total_size(8); /* OVS_FLOW_ATTR_USED */
}
/* Called with ovs_mutex or RCU read lock. */
@@ -773,7 +776,7 @@ static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
struct sk_buff *skb, u32 portid,
- u32 seq, u32 flags, u8 cmd)
+ u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
{
const int skb_orig_len = skb->len;
struct ovs_header *ovs_header;
@@ -786,19 +789,34 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
ovs_header->dp_ifindex = dp_ifindex;
- err = ovs_flow_cmd_fill_match(flow, skb);
+ err = ovs_nla_put_identifier(flow, skb);
if (err)
goto error;
+ if (should_fill_key(&flow->id, ufid_flags)) {
+ err = ovs_nla_put_masked_key(flow, skb);
+ if (err)
+ goto error;
+ }
+
+ if (should_fill_mask(ufid_flags)) {
+ err = ovs_nla_put_mask(flow, skb);
+ if (err)
+ goto error;
+ }
+
err = ovs_flow_cmd_fill_stats(flow, skb);
if (err)
goto error;
- err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
- if (err)
- goto error;
+ if (should_fill_actions(ufid_flags)) {
+ err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
+ if (err)
+ goto error;
+ }
- return genlmsg_end(skb, ovs_header);
+ genlmsg_end(skb, ovs_header);
+ return 0;
error:
genlmsg_cancel(skb, ovs_header);
@@ -807,15 +825,19 @@ error:
/* May not be called with RCU read lock. */
static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
+ const struct sw_flow_id *sfid,
struct genl_info *info,
- bool always)
+ bool always,
+ uint32_t ufid_flags)
{
struct sk_buff *skb;
+ size_t len;
if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
return NULL;
- skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
+ len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
+ skb = genlmsg_new_unicast(len, info, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -826,19 +848,19 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act
static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
int dp_ifindex,
struct genl_info *info, u8 cmd,
- bool always)
+ bool always, u32 ufid_flags)
{
struct sk_buff *skb;
int retval;
- skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
- always);
+ skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
+ &flow->id, info, always, ufid_flags);
if (IS_ERR_OR_NULL(skb))
return skb;
retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
info->snd_portid, info->snd_seq, 0,
- cmd);
+ cmd, ufid_flags);
BUG_ON(retval < 0);
return skb;
}
@@ -847,12 +869,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
- struct sw_flow *flow, *new_flow;
+ struct sw_flow *flow = NULL, *new_flow;
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
+ struct sw_flow_key key;
struct sw_flow_actions *acts;
struct sw_flow_match match;
+ u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
int error;
bool log = !a[OVS_FLOW_ATTR_PROBE];
@@ -877,13 +901,19 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* Extract key. */
- ovs_match_init(&match, &new_flow->unmasked_key, &mask);
+ ovs_match_init(&match, &key, &mask);
error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto err_kfree_flow;
- ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
+ ovs_flow_mask_key(&new_flow->key, &key, &mask);
+
+ /* Extract flow identifier. */
+ error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
+ &key, log);
+ if (error)
+ goto err_kfree_flow;
/* Validate actions. */
error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
@@ -893,7 +923,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_kfree_flow;
}
- reply = ovs_flow_cmd_alloc_info(acts, info, false);
+ reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
+ ufid_flags);
if (IS_ERR(reply)) {
error = PTR_ERR(reply);
goto err_kfree_acts;
@@ -905,8 +936,12 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
error = -ENODEV;
goto err_unlock_ovs;
}
+
/* Check if this is a duplicate flow */
- flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
+ if (ovs_identifier_is_ufid(&new_flow->id))
+ flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
+ if (!flow)
+ flow = ovs_flow_tbl_lookup(&dp->table, &key);
if (likely(!flow)) {
rcu_assign_pointer(new_flow->sf_acts, acts);
@@ -922,7 +957,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
- OVS_FLOW_CMD_NEW);
+ OVS_FLOW_CMD_NEW,
+ ufid_flags);
BUG_ON(error < 0);
}
ovs_unlock();
@@ -940,10 +976,15 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
error = -EEXIST;
goto err_unlock_ovs;
}
- /* The unmasked key has to be the same for flow updates. */
- if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
- /* Look for any overlapping flow. */
- flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ /* The flow identifier has to be the same for flow updates.
+ * Look for any overlapping flow.
+ */
+ if (unlikely(!ovs_flow_cmp(flow, &match))) {
+ if (ovs_identifier_is_key(&flow->id))
+ flow = ovs_flow_tbl_lookup_exact(&dp->table,
+ &match);
+ else /* UFID matches but key is different */
+ flow = NULL;
if (!flow) {
error = -ENOENT;
goto err_unlock_ovs;
@@ -958,7 +999,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
- OVS_FLOW_CMD_NEW);
+ OVS_FLOW_CMD_NEW,
+ ufid_flags);
BUG_ON(error < 0);
}
ovs_unlock();
@@ -1014,8 +1056,11 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct sw_flow_actions *old_acts = NULL, *acts = NULL;
struct sw_flow_match match;
+ struct sw_flow_id sfid;
+ u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
int error;
bool log = !a[OVS_FLOW_ATTR_PROBE];
+ bool ufid_present;
/* Extract key. */
error = -EINVAL;
@@ -1024,6 +1069,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
goto error;
}
+ ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
ovs_match_init(&match, &key, &mask);
error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
@@ -1040,7 +1086,8 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
}
/* Can allocate before locking if have acts. */
- reply = ovs_flow_cmd_alloc_info(acts, info, false);
+ reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
+ ufid_flags);
if (IS_ERR(reply)) {
error = PTR_ERR(reply);
goto err_kfree_acts;
@@ -1054,7 +1101,10 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
}
/* Check that the flow exists. */
- flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (ufid_present)
+ flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
+ else
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (unlikely(!flow)) {
error = -ENOENT;
goto err_unlock_ovs;
@@ -1070,13 +1120,16 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
- OVS_FLOW_CMD_NEW);
+ OVS_FLOW_CMD_NEW,
+ ufid_flags);
BUG_ON(error < 0);
}
} else {
/* Could not alloc without acts before locking. */
reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
- info, OVS_FLOW_CMD_NEW, false);
+ info, OVS_FLOW_CMD_NEW, false,
+ ufid_flags);
+
if (unlikely(IS_ERR(reply))) {
error = PTR_ERR(reply);
goto err_unlock_ovs;
@@ -1113,17 +1166,22 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
struct sw_flow *flow;
struct datapath *dp;
struct sw_flow_match match;
- int err;
+ struct sw_flow_id ufid;
+ u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
+ int err = 0;
bool log = !a[OVS_FLOW_ATTR_PROBE];
+ bool ufid_present;
- if (!a[OVS_FLOW_ATTR_KEY]) {
+ ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
+ if (a[OVS_FLOW_ATTR_KEY]) {
+ ovs_match_init(&match, &key, NULL);
+ err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
+ log);
+ } else if (!ufid_present) {
OVS_NLERR(log,
"Flow get message rejected, Key attribute missing.");
- return -EINVAL;
+ err = -EINVAL;
}
-
- ovs_match_init(&match, &key, NULL);
- err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, log);
if (err)
return err;
@@ -1134,14 +1192,17 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (ufid_present)
+ flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
+ else
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (!flow) {
err = -ENOENT;
goto unlock;
}
reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
- OVS_FLOW_CMD_NEW, true);
+ OVS_FLOW_CMD_NEW, true, ufid_flags);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
goto unlock;
@@ -1160,13 +1221,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow_key key;
struct sk_buff *reply;
- struct sw_flow *flow;
+ struct sw_flow *flow = NULL;
struct datapath *dp;
struct sw_flow_match match;
+ struct sw_flow_id ufid;
+ u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
int err;
bool log = !a[OVS_FLOW_ATTR_PROBE];
+ bool ufid_present;
- if (likely(a[OVS_FLOW_ATTR_KEY])) {
+ ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
+ if (a[OVS_FLOW_ATTR_KEY]) {
ovs_match_init(&match, &key, NULL);
err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
log);
@@ -1181,12 +1246,15 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
+ if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
err = ovs_flow_tbl_flush(&dp->table);
goto unlock;
}
- flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (ufid_present)
+ flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
+ else
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (unlikely(!flow)) {
err = -ENOENT;
goto unlock;
@@ -1196,14 +1264,15 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
ovs_unlock();
reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
- info, false);
+ &flow->id, info, false, ufid_flags);
if (likely(reply)) {
if (likely(!IS_ERR(reply))) {
rcu_read_lock(); /*To keep RCU checker happy. */
err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
- OVS_FLOW_CMD_DEL);
+ OVS_FLOW_CMD_DEL,
+ ufid_flags);
rcu_read_unlock();
BUG_ON(err < 0);
@@ -1222,9 +1291,18 @@ unlock:
static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct nlattr *a[__OVS_FLOW_ATTR_MAX];
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
struct table_instance *ti;
struct datapath *dp;
+ u32 ufid_flags;
+ int err;
+
+ err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
+ OVS_FLOW_ATTR_MAX, flow_policy);
+ if (err)
+ return err;
+ ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
rcu_read_lock();
dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1247,7 +1325,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- OVS_FLOW_CMD_NEW) < 0)
+ OVS_FLOW_CMD_NEW, ufid_flags) < 0)
break;
cb->args[0] = bucket;
@@ -1263,6 +1341,8 @@ static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
[OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
[OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
+ [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
+ [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
};
static const struct genl_ops dp_flow_genl_ops[] = {
@@ -1348,7 +1428,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
goto nla_put_failure;
- return genlmsg_end(skb, ovs_header);
+ genlmsg_end(skb, ovs_header);
+ return 0;
nla_put_failure:
genlmsg_cancel(skb, ovs_header);
@@ -1722,7 +1803,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
if (err == -EMSGSIZE)
goto error;
- return genlmsg_end(skb, ovs_header);
+ genlmsg_end(skb, ovs_header);
+ return 0;
nla_put_failure:
err = -EMSGSIZE;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index da2fae0873a5..e2c348b8baca 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -70,7 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
{
struct flow_stats *stats;
int node = numa_node_id();
- int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
stats = rcu_dereference(flow->stats[node]);
@@ -472,7 +472,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
*/
key->eth.tci = 0;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
key->eth.tci = htons(skb->vlan_tci);
else if (eth->h_proto == htons(ETH_P_8021Q))
if (unlikely(parse_vlan(skb, key)))
@@ -691,7 +691,7 @@ int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
8)) - 1
> sizeof(key->tun_opts));
- memcpy(GENEVE_OPTS(key, tun_info->options_len),
+ memcpy(TUN_METADATA_OPTS(key, tun_info->options_len),
tun_info->options, tun_info->options_len);
key->tun_opts_len = tun_info->options_len;
} else {
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index a8b30f334388..a076e445ccc2 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -53,7 +53,7 @@ struct ovs_key_ipv4_tunnel {
struct ovs_tunnel_info {
struct ovs_key_ipv4_tunnel tunnel;
- const struct geneve_opt *options;
+ const void *options;
u8 options_len;
};
@@ -61,10 +61,10 @@ struct ovs_tunnel_info {
* maximum size. This allows us to get the benefits of variable length
* matching for small options.
*/
-#define GENEVE_OPTS(flow_key, opt_len) \
- ((struct geneve_opt *)((flow_key)->tun_opts + \
- FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \
- opt_len))
+#define TUN_METADATA_OFFSET(opt_len) \
+ (FIELD_SIZEOF(struct sw_flow_key, tun_opts) - opt_len)
+#define TUN_METADATA_OPTS(flow_key, opt_len) \
+ ((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
__be32 saddr, __be32 daddr,
@@ -73,7 +73,7 @@ static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
__be16 tp_dst,
__be64 tun_id,
__be16 tun_flags,
- const struct geneve_opt *opts,
+ const void *opts,
u8 opts_len)
{
tun_info->tunnel.tun_id = tun_id;
@@ -105,7 +105,7 @@ static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
__be16 tp_dst,
__be64 tun_id,
__be16 tun_flags,
- const struct geneve_opt *opts,
+ const void *opts,
u8 opts_len)
{
__ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
@@ -197,6 +197,16 @@ struct sw_flow_match {
struct sw_flow_mask *mask;
};
+#define MAX_UFID_LENGTH 16 /* 128 bits */
+
+struct sw_flow_id {
+ u32 ufid_len;
+ union {
+ u32 ufid[MAX_UFID_LENGTH / 4];
+ struct sw_flow_key *unmasked_key;
+ };
+};
+
struct sw_flow_actions {
struct rcu_head rcu;
u32 actions_len;
@@ -213,13 +223,15 @@ struct flow_stats {
struct sw_flow {
struct rcu_head rcu;
- struct hlist_node hash_node[2];
- u32 hash;
+ struct {
+ struct hlist_node node[2];
+ u32 hash;
+ } flow_table, ufid_table;
int stats_last_writer; /* NUMA-node id of the last writer on
* 'stats[0]'.
*/
struct sw_flow_key key;
- struct sw_flow_key unmasked_key;
+ struct sw_flow_id id;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
struct flow_stats __rcu *stats[]; /* One for each NUMA node. First one
@@ -243,6 +255,16 @@ struct arp_eth_header {
unsigned char ar_tip[4]; /* target IP address */
} __packed;
+static inline bool ovs_identifier_is_ufid(const struct sw_flow_id *sfid)
+{
+ return sfid->ufid_len;
+}
+
+static inline bool ovs_identifier_is_key(const struct sw_flow_id *sfid)
+{
+ return !ovs_identifier_is_ufid(sfid);
+}
+
void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
const struct sk_buff *);
void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index d1eecf707613..993281e6278d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -49,6 +49,14 @@
#include <net/mpls.h>
#include "flow_netlink.h"
+#include "vport-vxlan.h"
+
+struct ovs_len_tbl {
+ int len;
+ const struct ovs_len_tbl *next;
+};
+
+#define OVS_ATTR_NESTED -1
static void update_range(struct sw_flow_match *match,
size_t offset, size_t size, bool is_mask)
@@ -261,6 +269,9 @@ size_t ovs_tun_key_attr_size(void)
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
+ nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
+ /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
+ * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
+ */
+ nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
+ nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
}
@@ -289,29 +300,45 @@ size_t ovs_key_attr_size(void)
+ nla_total_size(28); /* OVS_KEY_ATTR_ND */
}
+static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
+ [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
+ [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
+ [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) },
+ [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
+ [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
+ [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
+ [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
+ [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
+ [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
+ [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
+ [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED },
+ [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED },
+};
+
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
-static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
- [OVS_KEY_ATTR_ENCAP] = -1,
- [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
- [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
- [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
- [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
- [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
- [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
- [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
- [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
- [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
- [OVS_KEY_ATTR_TCP_FLAGS] = sizeof(__be16),
- [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
- [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
- [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
- [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
- [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
- [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
- [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32),
- [OVS_KEY_ATTR_DP_HASH] = sizeof(u32),
- [OVS_KEY_ATTR_TUNNEL] = -1,
- [OVS_KEY_ATTR_MPLS] = sizeof(struct ovs_key_mpls),
+static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+ [OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED },
+ [OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) },
+ [OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) },
+ [OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) },
+ [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
+ [OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) },
+ [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
+ [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
+ [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
+ [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
+ [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
+ [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
+ [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
+ [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
+ [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
+ [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
+ [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
+ [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
+ [OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
+ [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
+ .next = ovs_tunnel_key_lens, },
+ [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
};
static bool is_all_zero(const u8 *fp, size_t size)
@@ -352,8 +379,8 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
return -EINVAL;
}
- expected_len = ovs_key_lens[type];
- if (nla_len(nla) != expected_len && expected_len != -1) {
+ expected_len = ovs_key_lens[type].len;
+ if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) {
OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
type, nla_len(nla), expected_len);
return -EINVAL;
@@ -432,13 +459,47 @@ static int genev_tun_opt_from_nlattr(const struct nlattr *a,
SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
}
- opt_key_offset = (unsigned long)GENEVE_OPTS((struct sw_flow_key *)0,
- nla_len(a));
+ opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
nla_len(a), is_mask);
return 0;
}
+static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = {
+ [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 },
+};
+
+static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
+ struct sw_flow_match *match, bool is_mask,
+ bool log)
+{
+ struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
+ unsigned long opt_key_offset;
+ struct ovs_vxlan_opts opts;
+ int err;
+
+ BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
+
+ err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy);
+ if (err < 0)
+ return err;
+
+ memset(&opts, 0, sizeof(opts));
+
+ if (tb[OVS_VXLAN_EXT_GBP])
+ opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]);
+
+ if (!is_mask)
+ SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
+ else
+ SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
+
+ opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
+ SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
+ is_mask);
+ return 0;
+}
+
static int ipv4_tun_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool log)
@@ -447,35 +508,22 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
int rem;
bool ttl = false;
__be16 tun_flags = 0;
+ int opts_type = 0;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
int err;
- static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
- [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
- [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
- [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
- [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
- [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
- [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
- [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
- [OVS_TUNNEL_KEY_ATTR_TP_SRC] = sizeof(u16),
- [OVS_TUNNEL_KEY_ATTR_TP_DST] = sizeof(u16),
- [OVS_TUNNEL_KEY_ATTR_OAM] = 0,
- [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1,
- };
-
if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
OVS_NLERR(log, "Tunnel attr %d out of range max %d",
type, OVS_TUNNEL_KEY_ATTR_MAX);
return -EINVAL;
}
- if (ovs_tunnel_key_lens[type] != nla_len(a) &&
- ovs_tunnel_key_lens[type] != -1) {
+ if (ovs_tunnel_key_lens[type].len != nla_len(a) &&
+ ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) {
OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
- type, nla_len(a), ovs_tunnel_key_lens[type]);
+ type, nla_len(a), ovs_tunnel_key_lens[type].len);
return -EINVAL;
}
@@ -520,11 +568,30 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
tun_flags |= TUNNEL_OAM;
break;
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
+ if (opts_type) {
+ OVS_NLERR(log, "Multiple metadata blocks provided");
+ return -EINVAL;
+ }
+
err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
if (err)
return err;
- tun_flags |= TUNNEL_OPTIONS_PRESENT;
+ tun_flags |= TUNNEL_GENEVE_OPT;
+ opts_type = type;
+ break;
+ case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
+ if (opts_type) {
+ OVS_NLERR(log, "Multiple metadata blocks provided");
+ return -EINVAL;
+ }
+
+ err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
+ if (err)
+ return err;
+
+ tun_flags |= TUNNEL_VXLAN_OPT;
+ opts_type = type;
break;
default:
OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
@@ -553,13 +620,29 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
}
}
+ return opts_type;
+}
+
+static int vxlan_opt_to_nlattr(struct sk_buff *skb,
+ const void *tun_opts, int swkey_tun_opts_len)
+{
+ const struct ovs_vxlan_opts *opts = tun_opts;
+ struct nlattr *nla;
+
+ nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
+ if (!nla)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
+ return -EMSGSIZE;
+
+ nla_nest_end(skb, nla);
return 0;
}
static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
const struct ovs_key_ipv4_tunnel *output,
- const struct geneve_opt *tun_opts,
- int swkey_tun_opts_len)
+ const void *tun_opts, int swkey_tun_opts_len)
{
if (output->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
@@ -590,18 +673,22 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
if ((output->tun_flags & TUNNEL_OAM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
- if (tun_opts &&
- nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
- swkey_tun_opts_len, tun_opts))
- return -EMSGSIZE;
+ if (tun_opts) {
+ if (output->tun_flags & TUNNEL_GENEVE_OPT &&
+ nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
+ swkey_tun_opts_len, tun_opts))
+ return -EMSGSIZE;
+ else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
+ vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
+ return -EMSGSIZE;
+ }
return 0;
}
static int ipv4_tun_to_nlattr(struct sk_buff *skb,
const struct ovs_key_ipv4_tunnel *output,
- const struct geneve_opt *tun_opts,
- int swkey_tun_opts_len)
+ const void *tun_opts, int swkey_tun_opts_len)
{
struct nlattr *nla;
int err;
@@ -675,7 +762,7 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
}
if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
- is_mask, log))
+ is_mask, log) < 0)
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
}
@@ -915,18 +1002,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
return 0;
}
-static void nlattr_set(struct nlattr *attr, u8 val, bool is_attr_mask_key)
+static void nlattr_set(struct nlattr *attr, u8 val,
+ const struct ovs_len_tbl *tbl)
{
struct nlattr *nla;
int rem;
/* The nlattr stream should already have been validated */
nla_for_each_nested(nla, attr, rem) {
- /* We assume that ovs_key_lens[type] == -1 means that type is a
- * nested attribute
- */
- if (is_attr_mask_key && ovs_key_lens[nla_type(nla)] == -1)
- nlattr_set(nla, val, false);
+ if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
+ nlattr_set(nla, val, tbl[nla_type(nla)].next);
else
memset(nla_data(nla), val, nla_len(nla));
}
@@ -934,7 +1019,7 @@ static void nlattr_set(struct nlattr *attr, u8 val, bool is_attr_mask_key)
static void mask_set_nlattr(struct nlattr *attr, u8 val)
{
- nlattr_set(attr, val, true);
+ nlattr_set(attr, val, ovs_key_lens);
}
/**
@@ -1095,6 +1180,59 @@ free_newmask:
return err;
}
+static size_t get_ufid_len(const struct nlattr *attr, bool log)
+{
+ size_t len;
+
+ if (!attr)
+ return 0;
+
+ len = nla_len(attr);
+ if (len < 1 || len > MAX_UFID_LENGTH) {
+ OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
+ nla_len(attr), MAX_UFID_LENGTH);
+ return 0;
+ }
+
+ return len;
+}
+
+/* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
+ * or false otherwise.
+ */
+bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
+ bool log)
+{
+ sfid->ufid_len = get_ufid_len(attr, log);
+ if (sfid->ufid_len)
+ memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
+
+ return sfid->ufid_len;
+}
+
+int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
+ const struct sw_flow_key *key, bool log)
+{
+ struct sw_flow_key *new_key;
+
+ if (ovs_nla_get_ufid(sfid, ufid, log))
+ return 0;
+
+ /* If UFID was not provided, use unmasked key. */
+ new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
+ if (!new_key)
+ return -ENOMEM;
+ memcpy(new_key, key, sizeof(*key));
+ sfid->unmasked_key = new_key;
+
+ return 0;
+}
+
+u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
+{
+ return attr ? nla_get_u32(attr) : 0;
+}
+
/**
* ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
* @key: Receives extracted in_port, priority, tun_key and skb_mark.
@@ -1131,12 +1269,12 @@ int ovs_nla_get_flow_metadata(const struct nlattr *attr,
return metadata_from_nlattrs(&match, &attrs, a, false, log);
}
-int ovs_nla_put_flow(const struct sw_flow_key *swkey,
- const struct sw_flow_key *output, struct sk_buff *skb)
+static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
+ const struct sw_flow_key *output, bool is_mask,
+ struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
- bool is_mask = (swkey != output);
if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
goto nla_put_failure;
@@ -1148,10 +1286,10 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
goto nla_put_failure;
if ((swkey->tun_key.ipv4_dst || is_mask)) {
- const struct geneve_opt *opts = NULL;
+ const void *opts = NULL;
if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
- opts = GENEVE_OPTS(output, swkey->tun_opts_len);
+ opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
swkey->tun_opts_len))
@@ -1346,6 +1484,49 @@ nla_put_failure:
return -EMSGSIZE;
}
+int ovs_nla_put_key(const struct sw_flow_key *swkey,
+ const struct sw_flow_key *output, int attr, bool is_mask,
+ struct sk_buff *skb)
+{
+ int err;
+ struct nlattr *nla;
+
+ nla = nla_nest_start(skb, attr);
+ if (!nla)
+ return -EMSGSIZE;
+ err = __ovs_nla_put_key(swkey, output, is_mask, skb);
+ if (err)
+ return err;
+ nla_nest_end(skb, nla);
+
+ return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
+{
+ if (ovs_identifier_is_ufid(&flow->id))
+ return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
+ flow->id.ufid);
+
+ return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
+ OVS_FLOW_ATTR_KEY, false, skb);
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
+{
+ return ovs_nla_put_key(&flow->mask->key, &flow->key,
+ OVS_FLOW_ATTR_KEY, false, skb);
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
+{
+ return ovs_nla_put_key(&flow->key, &flow->mask->key,
+ OVS_FLOW_ATTR_MASK, true, skb);
+}
+
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
@@ -1514,16 +1695,6 @@ static int validate_and_copy_sample(const struct nlattr *attr,
return 0;
}
-static int validate_tp_port(const struct sw_flow_key *flow_key,
- __be16 eth_type)
-{
- if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) &&
- (flow_key->tp.src || flow_key->tp.dst))
- return 0;
-
- return -EINVAL;
-}
-
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key,
struct sw_flow_mask *mask)
@@ -1540,6 +1711,34 @@ void ovs_match_init(struct sw_flow_match *match,
}
}
+static int validate_geneve_opts(struct sw_flow_key *key)
+{
+ struct geneve_opt *option;
+ int opts_len = key->tun_opts_len;
+ bool crit_opt = false;
+
+ option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
+ while (opts_len > 0) {
+ int len;
+
+ if (opts_len < sizeof(*option))
+ return -EINVAL;
+
+ len = sizeof(*option) + option->length * 4;
+ if (len > opts_len)
+ return -EINVAL;
+
+ crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
+
+ option = (struct geneve_opt *)((u8 *)option + len);
+ opts_len -= len;
+ };
+
+ key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
+
+ return 0;
+}
+
static int validate_and_copy_set_tun(const struct nlattr *attr,
struct sw_flow_actions **sfa, bool log)
{
@@ -1547,36 +1746,23 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
struct sw_flow_key key;
struct ovs_tunnel_info *tun_info;
struct nlattr *a;
- int err, start;
+ int err, start, opts_type;
ovs_match_init(&match, &key, NULL);
- err = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
- if (err)
- return err;
+ opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
+ if (opts_type < 0)
+ return opts_type;
if (key.tun_opts_len) {
- struct geneve_opt *option = GENEVE_OPTS(&key,
- key.tun_opts_len);
- int opts_len = key.tun_opts_len;
- bool crit_opt = false;
-
- while (opts_len > 0) {
- int len;
-
- if (opts_len < sizeof(*option))
- return -EINVAL;
-
- len = sizeof(*option) + option->length * 4;
- if (len > opts_len)
- return -EINVAL;
-
- crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
-
- option = (struct geneve_opt *)((u8 *)option + len);
- opts_len -= len;
- };
-
- key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
+ switch (opts_type) {
+ case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
+ err = validate_geneve_opts(&key);
+ if (err < 0)
+ return err;
+ break;
+ case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
+ break;
+ }
};
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
@@ -1597,9 +1783,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
* everything else will go away after flow setup. We can append
* it to tun_info and then point there.
*/
- memcpy((tun_info + 1), GENEVE_OPTS(&key, key.tun_opts_len),
- key.tun_opts_len);
- tun_info->options = (struct geneve_opt *)(tun_info + 1);
+ memcpy((tun_info + 1),
+ TUN_METADATA_OPTS(&key, key.tun_opts_len), key.tun_opts_len);
+ tun_info->options = (tun_info + 1);
} else {
tun_info->options = NULL;
}
@@ -1609,21 +1795,43 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
return err;
}
+/* Return false if there are any non-masked bits set.
+ * Mask follows data immediately, before any netlink padding.
+ */
+static bool validate_masked(u8 *data, int len)
+{
+ u8 *mask = data + len;
+
+ while (len--)
+ if (*data++ & ~*mask++)
+ return false;
+
+ return true;
+}
+
static int validate_set(const struct nlattr *a,
const struct sw_flow_key *flow_key,
struct sw_flow_actions **sfa,
- bool *set_tun, __be16 eth_type, bool log)
+ bool *skip_copy, __be16 eth_type, bool masked, bool log)
{
const struct nlattr *ovs_key = nla_data(a);
int key_type = nla_type(ovs_key);
+ size_t key_len;
/* There can be only one key in a action */
if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
return -EINVAL;
+ key_len = nla_len(ovs_key);
+ if (masked)
+ key_len /= 2;
+
if (key_type > OVS_KEY_ATTR_MAX ||
- (ovs_key_lens[key_type] != nla_len(ovs_key) &&
- ovs_key_lens[key_type] != -1))
+ (ovs_key_lens[key_type].len != key_len &&
+ ovs_key_lens[key_type].len != OVS_ATTR_NESTED))
+ return -EINVAL;
+
+ if (masked && !validate_masked(nla_data(ovs_key), key_len))
return -EINVAL;
switch (key_type) {
@@ -1640,7 +1848,10 @@ static int validate_set(const struct nlattr *a,
if (eth_p_mpls(eth_type))
return -EINVAL;
- *set_tun = true;
+ if (masked)
+ return -EINVAL; /* Masked tunnel set not supported. */
+
+ *skip_copy = true;
err = validate_and_copy_set_tun(a, sfa, log);
if (err)
return err;
@@ -1650,48 +1861,66 @@ static int validate_set(const struct nlattr *a,
if (eth_type != htons(ETH_P_IP))
return -EINVAL;
- if (!flow_key->ip.proto)
- return -EINVAL;
-
ipv4_key = nla_data(ovs_key);
- if (ipv4_key->ipv4_proto != flow_key->ip.proto)
- return -EINVAL;
- if (ipv4_key->ipv4_frag != flow_key->ip.frag)
- return -EINVAL;
+ if (masked) {
+ const struct ovs_key_ipv4 *mask = ipv4_key + 1;
+
+ /* Non-writeable fields. */
+ if (mask->ipv4_proto || mask->ipv4_frag)
+ return -EINVAL;
+ } else {
+ if (ipv4_key->ipv4_proto != flow_key->ip.proto)
+ return -EINVAL;
+ if (ipv4_key->ipv4_frag != flow_key->ip.frag)
+ return -EINVAL;
+ }
break;
case OVS_KEY_ATTR_IPV6:
if (eth_type != htons(ETH_P_IPV6))
return -EINVAL;
- if (!flow_key->ip.proto)
- return -EINVAL;
-
ipv6_key = nla_data(ovs_key);
- if (ipv6_key->ipv6_proto != flow_key->ip.proto)
- return -EINVAL;
- if (ipv6_key->ipv6_frag != flow_key->ip.frag)
- return -EINVAL;
+ if (masked) {
+ const struct ovs_key_ipv6 *mask = ipv6_key + 1;
+ /* Non-writeable fields. */
+ if (mask->ipv6_proto || mask->ipv6_frag)
+ return -EINVAL;
+
+ /* Invalid bits in the flow label mask? */
+ if (ntohl(mask->ipv6_label) & 0xFFF00000)
+ return -EINVAL;
+ } else {
+ if (ipv6_key->ipv6_proto != flow_key->ip.proto)
+ return -EINVAL;
+
+ if (ipv6_key->ipv6_frag != flow_key->ip.frag)
+ return -EINVAL;
+ }
if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
return -EINVAL;
break;
case OVS_KEY_ATTR_TCP:
- if (flow_key->ip.proto != IPPROTO_TCP)
+ if ((eth_type != htons(ETH_P_IP) &&
+ eth_type != htons(ETH_P_IPV6)) ||
+ flow_key->ip.proto != IPPROTO_TCP)
return -EINVAL;
- return validate_tp_port(flow_key, eth_type);
+ break;
case OVS_KEY_ATTR_UDP:
- if (flow_key->ip.proto != IPPROTO_UDP)
+ if ((eth_type != htons(ETH_P_IP) &&
+ eth_type != htons(ETH_P_IPV6)) ||
+ flow_key->ip.proto != IPPROTO_UDP)
return -EINVAL;
- return validate_tp_port(flow_key, eth_type);
+ break;
case OVS_KEY_ATTR_MPLS:
if (!eth_p_mpls(eth_type))
@@ -1699,15 +1928,45 @@ static int validate_set(const struct nlattr *a,
break;
case OVS_KEY_ATTR_SCTP:
- if (flow_key->ip.proto != IPPROTO_SCTP)
+ if ((eth_type != htons(ETH_P_IP) &&
+ eth_type != htons(ETH_P_IPV6)) ||
+ flow_key->ip.proto != IPPROTO_SCTP)
return -EINVAL;
- return validate_tp_port(flow_key, eth_type);
+ break;
default:
return -EINVAL;
}
+ /* Convert non-masked non-tunnel set actions to masked set actions. */
+ if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
+ int start, len = key_len * 2;
+ struct nlattr *at;
+
+ *skip_copy = true;
+
+ start = add_nested_action_start(sfa,
+ OVS_ACTION_ATTR_SET_TO_MASKED,
+ log);
+ if (start < 0)
+ return start;
+
+ at = __add_action(sfa, key_type, NULL, len, log);
+ if (IS_ERR(at))
+ return PTR_ERR(at);
+
+ memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
+ memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */
+ /* Clear non-writeable bits from otherwise writeable fields. */
+ if (key_type == OVS_KEY_ATTR_IPV6) {
+ struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
+
+ mask->ipv6_label &= htonl(0x000FFFFF);
+ }
+ add_nested_action_end(*sfa, start);
+ }
+
return 0;
}
@@ -1769,6 +2028,7 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
[OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
[OVS_ACTION_ATTR_POP_VLAN] = 0,
[OVS_ACTION_ATTR_SET] = (u32)-1,
+ [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
[OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
[OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
};
@@ -1864,7 +2124,14 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa,
- &skip_copy, eth_type, log);
+ &skip_copy, eth_type, false, log);
+ if (err)
+ return err;
+ break;
+
+ case OVS_ACTION_ATTR_SET_MASKED:
+ err = validate_set(a, key, sfa,
+ &skip_copy, eth_type, true, log);
if (err)
return err;
break;
@@ -1894,6 +2161,7 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
return 0;
}
+/* 'key' must be the masked key. */
int ovs_nla_copy_actions(const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa, bool log)
@@ -1981,6 +2249,21 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
return 0;
}
+static int masked_set_action_to_set_action_attr(const struct nlattr *a,
+ struct sk_buff *skb)
+{
+ const struct nlattr *ovs_key = nla_data(a);
+ size_t key_len = nla_len(ovs_key) / 2;
+
+ /* Revert the conversion we did from a non-masked set action to
+ * masked set action.
+ */
+ if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
{
const struct nlattr *a;
@@ -1996,6 +2279,12 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
return err;
break;
+ case OVS_ACTION_ATTR_SET_TO_MASKED:
+ err = masked_set_action_to_set_action_attr(a, skb);
+ if (err)
+ return err;
+ break;
+
case OVS_ACTION_ATTR_SAMPLE:
err = sample_action_to_attr(a, skb);
if (err)
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 577f12be3459..5c3d75bff310 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -43,16 +43,25 @@ size_t ovs_key_attr_size(void);
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key, struct sw_flow_mask *mask);
-int ovs_nla_put_flow(const struct sw_flow_key *,
- const struct sw_flow_key *, struct sk_buff *);
+int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *,
+ int attr, bool is_mask, struct sk_buff *);
int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *,
bool log);
+int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb);
+int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb);
+int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
+
int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
const struct nlattr *mask, bool log);
int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
const struct ovs_tunnel_info *);
+bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
+int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
+ const struct sw_flow_key *key, bool log);
+u32 ovs_nla_get_ufid_flags(const struct nlattr *attr);
+
int ovs_nla_copy_actions(const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa, bool log);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 5899bf161c61..4613df8c8290 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -85,6 +85,8 @@ struct sw_flow *ovs_flow_alloc(void)
flow->sf_acts = NULL;
flow->mask = NULL;
+ flow->id.unmasked_key = NULL;
+ flow->id.ufid_len = 0;
flow->stats_last_writer = NUMA_NO_NODE;
/* Initialize the default stat node. */
@@ -139,6 +141,8 @@ static void flow_free(struct sw_flow *flow)
{
int node;
+ if (ovs_identifier_is_key(&flow->id))
+ kfree(flow->id.unmasked_key);
kfree((struct sw_flow_actions __force *)flow->sf_acts);
for_each_node(node)
if (flow->stats[node])
@@ -200,18 +204,28 @@ static struct table_instance *table_instance_alloc(int new_size)
int ovs_flow_tbl_init(struct flow_table *table)
{
- struct table_instance *ti;
+ struct table_instance *ti, *ufid_ti;
ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ti)
return -ENOMEM;
+ ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ if (!ufid_ti)
+ goto free_ti;
+
rcu_assign_pointer(table->ti, ti);
+ rcu_assign_pointer(table->ufid_ti, ufid_ti);
INIT_LIST_HEAD(&table->mask_list);
table->last_rehash = jiffies;
table->count = 0;
+ table->ufid_count = 0;
return 0;
+
+free_ti:
+ __table_instance_destroy(ti);
+ return -ENOMEM;
}
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
@@ -221,13 +235,16 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
__table_instance_destroy(ti);
}
-static void table_instance_destroy(struct table_instance *ti, bool deferred)
+static void table_instance_destroy(struct table_instance *ti,
+ struct table_instance *ufid_ti,
+ bool deferred)
{
int i;
if (!ti)
return;
+ BUG_ON(!ufid_ti);
if (ti->keep_flows)
goto skip_flows;
@@ -236,18 +253,24 @@ static void table_instance_destroy(struct table_instance *ti, bool deferred)
struct hlist_head *head = flex_array_get(ti->buckets, i);
struct hlist_node *n;
int ver = ti->node_ver;
+ int ufid_ver = ufid_ti->node_ver;
- hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
- hlist_del_rcu(&flow->hash_node[ver]);
+ hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
+ hlist_del_rcu(&flow->flow_table.node[ver]);
+ if (ovs_identifier_is_ufid(&flow->id))
+ hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
ovs_flow_free(flow, deferred);
}
}
skip_flows:
- if (deferred)
+ if (deferred) {
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
- else
+ call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
+ } else {
__table_instance_destroy(ti);
+ __table_instance_destroy(ufid_ti);
+ }
}
/* No need for locking this function is called from RCU callback or
@@ -256,8 +279,9 @@ skip_flows:
void ovs_flow_tbl_destroy(struct flow_table *table)
{
struct table_instance *ti = rcu_dereference_raw(table->ti);
+ struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
- table_instance_destroy(ti, false);
+ table_instance_destroy(ti, ufid_ti, false);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -272,7 +296,7 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
while (*bucket < ti->n_buckets) {
i = 0;
head = flex_array_get(ti->buckets, *bucket);
- hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
+ hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
if (i < *last) {
i++;
continue;
@@ -294,16 +318,26 @@ static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
(hash & (ti->n_buckets - 1)));
}
-static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
+static void table_instance_insert(struct table_instance *ti,
+ struct sw_flow *flow)
+{
+ struct hlist_head *head;
+
+ head = find_bucket(ti, flow->flow_table.hash);
+ hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
+}
+
+static void ufid_table_instance_insert(struct table_instance *ti,
+ struct sw_flow *flow)
{
struct hlist_head *head;
- head = find_bucket(ti, flow->hash);
- hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
+ head = find_bucket(ti, flow->ufid_table.hash);
+ hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
}
static void flow_table_copy_flows(struct table_instance *old,
- struct table_instance *new)
+ struct table_instance *new, bool ufid)
{
int old_ver;
int i;
@@ -318,15 +352,21 @@ static void flow_table_copy_flows(struct table_instance *old,
head = flex_array_get(old->buckets, i);
- hlist_for_each_entry(flow, head, hash_node[old_ver])
- table_instance_insert(new, flow);
+ if (ufid)
+ hlist_for_each_entry(flow, head,
+ ufid_table.node[old_ver])
+ ufid_table_instance_insert(new, flow);
+ else
+ hlist_for_each_entry(flow, head,
+ flow_table.node[old_ver])
+ table_instance_insert(new, flow);
}
old->keep_flows = true;
}
static struct table_instance *table_instance_rehash(struct table_instance *ti,
- int n_buckets)
+ int n_buckets, bool ufid)
{
struct table_instance *new_ti;
@@ -334,32 +374,45 @@ static struct table_instance *table_instance_rehash(struct table_instance *ti,
if (!new_ti)
return NULL;
- flow_table_copy_flows(ti, new_ti);
+ flow_table_copy_flows(ti, new_ti, ufid);
return new_ti;
}
int ovs_flow_tbl_flush(struct flow_table *flow_table)
{
- struct table_instance *old_ti;
- struct table_instance *new_ti;
+ struct table_instance *old_ti, *new_ti;
+ struct table_instance *old_ufid_ti, *new_ufid_ti;
- old_ti = ovsl_dereference(flow_table->ti);
new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!new_ti)
return -ENOMEM;
+ new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ if (!new_ufid_ti)
+ goto err_free_ti;
+
+ old_ti = ovsl_dereference(flow_table->ti);
+ old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
rcu_assign_pointer(flow_table->ti, new_ti);
+ rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
flow_table->last_rehash = jiffies;
flow_table->count = 0;
+ flow_table->ufid_count = 0;
- table_instance_destroy(old_ti, true);
+ table_instance_destroy(old_ti, old_ufid_ti, true);
return 0;
+
+err_free_ti:
+ __table_instance_destroy(new_ti);
+ return -ENOMEM;
}
-static u32 flow_hash(const struct sw_flow_key *key, int key_start,
- int key_end)
+static u32 flow_hash(const struct sw_flow_key *key,
+ const struct sw_flow_key_range *range)
{
+ int key_start = range->start;
+ int key_end = range->end;
const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
int hash_u32s = (key_end - key_start) >> 2;
@@ -395,19 +448,20 @@ static bool cmp_key(const struct sw_flow_key *key1,
static bool flow_cmp_masked_key(const struct sw_flow *flow,
const struct sw_flow_key *key,
- int key_start, int key_end)
+ const struct sw_flow_key_range *range)
{
- return cmp_key(&flow->key, key, key_start, key_end);
+ return cmp_key(&flow->key, key, range->start, range->end);
}
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- const struct sw_flow_match *match)
+static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_match *match)
{
struct sw_flow_key *key = match->key;
int key_start = flow_key_start(key);
int key_end = match->range.end;
- return cmp_key(&flow->unmasked_key, key, key_start, key_end);
+ BUG_ON(ovs_identifier_is_ufid(&flow->id));
+ return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
}
static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
@@ -416,18 +470,15 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
{
struct sw_flow *flow;
struct hlist_head *head;
- int key_start = mask->range.start;
- int key_end = mask->range.end;
u32 hash;
struct sw_flow_key masked_key;
ovs_flow_mask_key(&masked_key, unmasked, mask);
- hash = flow_hash(&masked_key, key_start, key_end);
+ hash = flow_hash(&masked_key, &mask->range);
head = find_bucket(ti, hash);
- hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
- if (flow->mask == mask && flow->hash == hash &&
- flow_cmp_masked_key(flow, &masked_key,
- key_start, key_end))
+ hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
+ if (flow->mask == mask && flow->flow_table.hash == hash &&
+ flow_cmp_masked_key(flow, &masked_key, &mask->range))
return flow;
}
return NULL;
@@ -469,7 +520,48 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
/* Always called under ovs-mutex. */
list_for_each_entry(mask, &tbl->mask_list, list) {
flow = masked_flow_lookup(ti, match->key, mask);
- if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
+ if (flow && ovs_identifier_is_key(&flow->id) &&
+ ovs_flow_cmp_unmasked_key(flow, match))
+ return flow;
+ }
+ return NULL;
+}
+
+static u32 ufid_hash(const struct sw_flow_id *sfid)
+{
+ return jhash(sfid->ufid, sfid->ufid_len, 0);
+}
+
+static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
+ const struct sw_flow_id *sfid)
+{
+ if (flow->id.ufid_len != sfid->ufid_len)
+ return false;
+
+ return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
+}
+
+bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
+{
+ if (ovs_identifier_is_ufid(&flow->id))
+ return flow_cmp_masked_key(flow, match->key, &match->range);
+
+ return ovs_flow_cmp_unmasked_key(flow, match);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
+ const struct sw_flow_id *ufid)
+{
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
+ struct sw_flow *flow;
+ struct hlist_head *head;
+ u32 hash;
+
+ hash = ufid_hash(ufid);
+ head = find_bucket(ti, hash);
+ hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
+ if (flow->ufid_table.hash == hash &&
+ ovs_flow_cmp_ufid(flow, ufid))
return flow;
}
return NULL;
@@ -486,9 +578,10 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table)
return num;
}
-static struct table_instance *table_instance_expand(struct table_instance *ti)
+static struct table_instance *table_instance_expand(struct table_instance *ti,
+ bool ufid)
{
- return table_instance_rehash(ti, ti->n_buckets * 2);
+ return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
}
/* Remove 'mask' from the mask list, if it is not needed any more. */
@@ -513,10 +606,15 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *ti = ovsl_dereference(table->ti);
+ struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
BUG_ON(table->count == 0);
- hlist_del_rcu(&flow->hash_node[ti->node_ver]);
+ hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
table->count--;
+ if (ovs_identifier_is_ufid(&flow->id)) {
+ hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
+ table->ufid_count--;
+ }
/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
* accessible as long as the RCU read lock is held.
@@ -585,34 +683,64 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
}
/* Must be called with OVS mutex held. */
-int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- const struct sw_flow_mask *mask)
+static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *new_ti = NULL;
struct table_instance *ti;
- int err;
-
- err = flow_mask_insert(table, flow, mask);
- if (err)
- return err;
- flow->hash = flow_hash(&flow->key, flow->mask->range.start,
- flow->mask->range.end);
+ flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
ti = ovsl_dereference(table->ti);
table_instance_insert(ti, flow);
table->count++;
/* Expand table, if necessary, to make room. */
if (table->count > ti->n_buckets)
- new_ti = table_instance_expand(ti);
+ new_ti = table_instance_expand(ti, false);
else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
- new_ti = table_instance_rehash(ti, ti->n_buckets);
+ new_ti = table_instance_rehash(ti, ti->n_buckets, false);
if (new_ti) {
rcu_assign_pointer(table->ti, new_ti);
- table_instance_destroy(ti, true);
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
table->last_rehash = jiffies;
}
+}
+
+/* Must be called with OVS mutex held. */
+static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
+{
+ struct table_instance *ti;
+
+ flow->ufid_table.hash = ufid_hash(&flow->id);
+ ti = ovsl_dereference(table->ufid_ti);
+ ufid_table_instance_insert(ti, flow);
+ table->ufid_count++;
+
+ /* Expand table, if necessary, to make room. */
+ if (table->ufid_count > ti->n_buckets) {
+ struct table_instance *new_ti;
+
+ new_ti = table_instance_expand(ti, true);
+ if (new_ti) {
+ rcu_assign_pointer(table->ufid_ti, new_ti);
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+ }
+ }
+}
+
+/* Must be called with OVS mutex held. */
+int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+ const struct sw_flow_mask *mask)
+{
+ int err;
+
+ err = flow_mask_insert(table, flow, mask);
+ if (err)
+ return err;
+ flow_key_insert(table, flow);
+ if (ovs_identifier_is_ufid(&flow->id))
+ flow_ufid_insert(table, flow);
+
return 0;
}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 309fa6415689..616eda10d955 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -47,9 +47,11 @@ struct table_instance {
struct flow_table {
struct table_instance __rcu *ti;
+ struct table_instance __rcu *ufid_ti;
struct list_head mask_list;
unsigned long last_rehash;
unsigned int count;
+ unsigned int ufid_count;
};
extern struct kmem_cache *flow_stats_cache;
@@ -78,8 +80,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
const struct sw_flow_match *match);
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- const struct sw_flow_match *match);
+struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *,
+ const struct sw_flow_id *);
+
+bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask);
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 484864dd0e68..bf02fd5808c9 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -9,8 +9,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/version.h>
-
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/net.h>
@@ -90,7 +88,7 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
opts_len = geneveh->opt_len * 4;
- flags = TUNNEL_KEY | TUNNEL_OPTIONS_PRESENT |
+ flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
(udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) |
(geneveh->oam ? TUNNEL_OAM : 0) |
(geneveh->critical ? TUNNEL_CRIT_OPT : 0);
@@ -172,7 +170,7 @@ error:
static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
{
- struct ovs_key_ipv4_tunnel *tun_key;
+ const struct ovs_key_ipv4_tunnel *tun_key;
struct ovs_tunnel_info *tun_info;
struct net *net = ovs_dp_get_net(vport->dp);
struct geneve_port *geneve_port = geneve_vport(vport);
@@ -180,7 +178,7 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
__be16 sport;
struct rtable *rt;
struct flowi4 fl;
- u8 vni[3];
+ u8 vni[3], opts_len, *opts;
__be16 df;
int err;
@@ -191,16 +189,7 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
}
tun_key = &tun_info->tunnel;
-
- /* Route lookup */
- memset(&fl, 0, sizeof(fl));
- fl.daddr = tun_key->ipv4_dst;
- fl.saddr = tun_key->ipv4_src;
- fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
- fl.flowi4_mark = skb->mark;
- fl.flowi4_proto = IPPROTO_UDP;
-
- rt = ip_route_output_key(net, &fl);
+ rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto error;
@@ -211,12 +200,19 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
tunnel_id_to_vni(tun_key->tun_id, vni);
skb->ignore_df = 1;
+ if (tun_key->tun_flags & TUNNEL_GENEVE_OPT) {
+ opts = (u8 *)tun_info->options;
+ opts_len = tun_info->options_len;
+ } else {
+ opts = NULL;
+ opts_len = 0;
+ }
+
err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
tun_key->ipv4_dst, tun_key->ipv4_tos,
tun_key->ipv4_ttl, df, sport, dport,
- tun_key->tun_flags, vni,
- tun_info->options_len, (u8 *)tun_info->options,
- false);
+ tun_key->tun_flags, vni, opts_len, opts,
+ !!(tun_key->tun_flags & TUNNEL_CSUM), false);
if (err < 0)
ip_rt_put(rt);
return err;
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index d4168c442db5..f17ac9642f4e 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -134,7 +134,7 @@ static int gre_err(struct sk_buff *skb, u32 info,
static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct net *net = ovs_dp_get_net(vport->dp);
- struct ovs_key_ipv4_tunnel *tun_key;
+ const struct ovs_key_ipv4_tunnel *tun_key;
struct flowi4 fl;
struct rtable *rt;
int min_headroom;
@@ -148,15 +148,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
}
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
- /* Route lookup */
- memset(&fl, 0, sizeof(fl));
- fl.daddr = tun_key->ipv4_dst;
- fl.saddr = tun_key->ipv4_src;
- fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
- fl.flowi4_mark = skb->mark;
- fl.flowi4_proto = IPPROTO_GRE;
-
- rt = ip_route_output_key(net, &fl);
+ rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto err_free_skb;
@@ -166,7 +158,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ tunnel_hlen + sizeof(struct iphdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
int head_delta = SKB_DATA_ALIGN(min_headroom -
skb_headroom(skb) +
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index d7c46b301024..3277a7520e31 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -40,6 +40,7 @@
#include "datapath.h"
#include "vport.h"
+#include "vport-vxlan.h"
/**
* struct vxlan_port - Keeps track of open UDP ports
@@ -49,6 +50,7 @@
struct vxlan_port {
struct vxlan_sock *vs;
char name[IFNAMSIZ];
+ u32 exts; /* VXLAN_F_* in <net/vxlan.h> */
};
static struct vport_ops ovs_vxlan_vport_ops;
@@ -59,19 +61,30 @@ static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
}
/* Called with rcu_read_lock and BH disabled. */
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+ struct vxlan_metadata *md)
{
struct ovs_tunnel_info tun_info;
+ struct vxlan_port *vxlan_port;
struct vport *vport = vs->data;
struct iphdr *iph;
+ struct ovs_vxlan_opts opts = {
+ .gbp = md->gbp,
+ };
__be64 key;
+ __be16 flags;
+
+ flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
+ vxlan_port = vxlan_vport(vport);
+ if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
+ flags |= TUNNEL_VXLAN_OPT;
/* Save outer tunnel values */
iph = ip_hdr(skb);
- key = cpu_to_be64(ntohl(vx_vni) >> 8);
+ key = cpu_to_be64(ntohl(md->vni) >> 8);
ovs_flow_tun_info_init(&tun_info, iph,
udp_hdr(skb)->source, udp_hdr(skb)->dest,
- key, TUNNEL_KEY, NULL, 0);
+ key, flags, &opts, sizeof(opts));
ovs_vport_receive(vport, skb, &tun_info);
}
@@ -83,6 +96,21 @@ static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
return -EMSGSIZE;
+
+ if (vxlan_port->exts) {
+ struct nlattr *exts;
+
+ exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
+ if (!exts)
+ return -EMSGSIZE;
+
+ if (vxlan_port->exts & VXLAN_F_GBP &&
+ nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
+ return -EMSGSIZE;
+
+ nla_nest_end(skb, exts);
+ }
+
return 0;
}
@@ -95,6 +123,31 @@ static void vxlan_tnl_destroy(struct vport *vport)
ovs_vport_deferred_free(vport);
}
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
+ [OVS_VXLAN_EXT_GBP] = { .type = NLA_FLAG, },
+};
+
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
+{
+ struct nlattr *exts[OVS_VXLAN_EXT_MAX+1];
+ struct vxlan_port *vxlan_port;
+ int err;
+
+ if (nla_len(attr) < sizeof(struct nlattr))
+ return -EINVAL;
+
+ err = nla_parse_nested(exts, OVS_VXLAN_EXT_MAX, attr, exts_policy);
+ if (err < 0)
+ return err;
+
+ vxlan_port = vxlan_vport(vport);
+
+ if (exts[OVS_VXLAN_EXT_GBP])
+ vxlan_port->exts |= VXLAN_F_GBP;
+
+ return 0;
+}
+
static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
@@ -127,7 +180,17 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
vxlan_port = vxlan_vport(vport);
strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
- vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, 0);
+ a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
+ if (a) {
+ err = vxlan_configure_exts(vport, a);
+ if (err) {
+ ovs_vport_free(vport);
+ goto error;
+ }
+ }
+
+ vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
+ vxlan_port->exts);
if (IS_ERR(vs)) {
ovs_vport_free(vport);
return (void *)vs;
@@ -140,17 +203,34 @@ error:
return ERR_PTR(err);
}
+static int vxlan_ext_gbp(struct sk_buff *skb)
+{
+ const struct ovs_tunnel_info *tun_info;
+ const struct ovs_vxlan_opts *opts;
+
+ tun_info = OVS_CB(skb)->egress_tun_info;
+ opts = tun_info->options;
+
+ if (tun_info->tunnel.tun_flags & TUNNEL_VXLAN_OPT &&
+ tun_info->options_len >= sizeof(*opts))
+ return opts->gbp;
+ else
+ return 0;
+}
+
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct net *net = ovs_dp_get_net(vport->dp);
struct vxlan_port *vxlan_port = vxlan_vport(vport);
__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
- struct ovs_key_ipv4_tunnel *tun_key;
+ const struct ovs_key_ipv4_tunnel *tun_key;
+ struct vxlan_metadata md = {0};
struct rtable *rt;
struct flowi4 fl;
__be16 src_port;
__be16 df;
int err;
+ u32 vxflags;
if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL;
@@ -158,15 +238,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
}
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
- /* Route lookup */
- memset(&fl, 0, sizeof(fl));
- fl.daddr = tun_key->ipv4_dst;
- fl.saddr = tun_key->ipv4_src;
- fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
- fl.flowi4_mark = skb->mark;
- fl.flowi4_proto = IPPROTO_UDP;
-
- rt = ip_route_output_key(net, &fl);
+ rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto error;
@@ -178,13 +250,15 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
skb->ignore_df = 1;
src_port = udp_flow_src_port(net, skb, 0, 0, true);
+ md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
+ md.gbp = vxlan_ext_gbp(skb);
+ vxflags = vxlan_port->exts |
+ (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
- err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
- fl.saddr, tun_key->ipv4_dst,
+ err = vxlan_xmit_skb(rt, skb, fl.saddr, tun_key->ipv4_dst,
tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
src_port, dst_port,
- htonl(be64_to_cpu(tun_key->tun_id) << 8),
- false);
+ &md, false, vxflags);
if (err < 0)
ip_rt_put(rt);
return err;
diff --git a/net/openvswitch/vport-vxlan.h b/net/openvswitch/vport-vxlan.h
new file mode 100644
index 000000000000..4b08233e73d5
--- /dev/null
+++ b/net/openvswitch/vport-vxlan.h
@@ -0,0 +1,11 @@
+#ifndef VPORT_VXLAN_H
+#define VPORT_VXLAN_H 1
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct ovs_vxlan_opts {
+ __u32 gbp;
+};
+
+#endif
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 2034c6d9cb5a..ec2954ffc690 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -480,7 +480,8 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
- stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ stats->rx_bytes += skb->len +
+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
u64_stats_update_end(&stats->syncp);
OVS_CB(skb)->input_vport = vport;
@@ -594,14 +595,7 @@ int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
* The process may need to be changed if the corresponding process
* in vports ops changed.
*/
- memset(&fl, 0, sizeof(fl));
- fl.daddr = tun_key->ipv4_dst;
- fl.saddr = tun_key->ipv4_src;
- fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
- fl.flowi4_mark = skb_mark;
- fl.flowi4_proto = ipproto;
-
- rt = ip_route_output_key(net, &fl);
+ rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 99c8e71d9e6c..f8ae295fb001 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -236,4 +236,22 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
int ovs_vport_ops_register(struct vport_ops *ops);
void ovs_vport_ops_unregister(struct vport_ops *ops);
+static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
+ const struct ovs_key_ipv4_tunnel *key,
+ u32 mark,
+ struct flowi4 *fl,
+ u8 protocol)
+{
+ struct rtable *rt;
+
+ memset(fl, 0, sizeof(*fl));
+ fl->daddr = key->ipv4_dst;
+ fl->saddr = key->ipv4_src;
+ fl->flowi4_tos = RT_TOS(key->ipv4_tos);
+ fl->flowi4_mark = mark;
+ fl->flowi4_proto = protocol;
+
+ rt = ip_route_output_key(net, fl);
+ return rt;
+}
#endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6880f34a529a..9c28cec1a083 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -986,8 +986,8 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
- if (vlan_tx_tag_present(pkc->skb)) {
- ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
+ if (skb_vlan_tag_present(pkc->skb)) {
+ ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
@@ -2000,8 +2000,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h2->tp_net = netoff;
h.h2->tp_sec = ts.tv_sec;
h.h2->tp_nsec = ts.tv_nsec;
- if (vlan_tx_tag_present(skb)) {
- h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
@@ -2102,7 +2102,7 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
{
/* net device doesn't like empty head */
if (unlikely(len <= dev->hard_header_len)) {
- net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
+ net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
current->comm, len, dev->hard_header_len);
return true;
}
@@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
err = -EINVAL;
if (sock->type == SOCK_DGRAM) {
offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
- if (unlikely(offset) < 0)
+ if (unlikely(offset < 0))
goto out_free;
} else {
if (ll_header_truncated(dev, len))
@@ -3010,8 +3010,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
aux.tp_snaplen = skb->len;
aux.tp_mac = 0;
aux.tp_net = skb_network_offset(skb);
- if (vlan_tx_tag_present(skb)) {
- aux.tp_vlan_tci = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ aux.tp_vlan_tci = skb_vlan_tag_get(skb);
aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 92f2c7107eec..0ed68f0238bf 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -177,7 +177,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
PACKET_DIAG_FILTER))
goto out_nlmsg_trim;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
out_nlmsg_trim:
nlmsg_cancel(skb, nlh);
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index b64151ade6b3..bc5ee5fbe6ae 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -121,7 +121,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
ifm->ifa_index = dev->ifindex;
if (nla_put_u8(skb, IFA_LOCAL, addr))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -190,7 +191,8 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
if (nla_put_u8(skb, RTA_DST, dst) ||
nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
@@ -270,27 +272,23 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- u8 addr, addr_idx = 0, addr_start_idx = cb->args[0];
+ u8 addr;
rcu_read_lock();
- for (addr = 0; addr < 64; addr++) {
- struct net_device *dev;
+ for (addr = cb->args[0]; addr < 64; addr++) {
+ struct net_device *dev = phonet_route_get_rcu(net, addr << 2);
- dev = phonet_route_get_rcu(net, addr << 2);
if (!dev)
continue;
- if (addr_idx++ < addr_start_idx)
- continue;
if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE))
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE) < 0)
goto out;
}
out:
rcu_read_unlock();
- cb->args[0] = addr_idx;
- cb->args[1] = 0;
+ cb->args[0] = addr;
return skb->len;
}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 1dde91e3dc70..bd3825d38abc 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -409,7 +409,7 @@ try_again:
posted = IB_GET_POST_CREDITS(oldval);
avail = IB_GET_SEND_CREDITS(oldval);
- rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
+ rdsdebug("wanted=%u credits=%u posted=%u\n",
wanted, avail, posted);
/* The last credit must be used to send a credit update. */
@@ -453,7 +453,7 @@ void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
if (credits == 0)
return;
- rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
+ rdsdebug("credits=%u current=%u%s\n",
credits,
IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a91e1db62ee6..a6c2bea9f8f9 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -590,8 +590,8 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
/* Actually this may happen quite frequently, when
* an outgoing connect raced with an incoming connect.
*/
- rdsdebug("rds_iw_conn_shutdown: failed to disconnect,"
- " cm: %p err %d\n", ic->i_cm_id, err);
+ rdsdebug("failed to disconnect, cm: %p err %d\n",
+ ic->i_cm_id, err);
}
if (ic->i_cm_id->qp) {
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 9105ea03aec5..13834780a308 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -361,7 +361,7 @@ try_again:
posted = IB_GET_POST_CREDITS(oldval);
avail = IB_GET_SEND_CREDITS(oldval);
- rdsdebug("rds_iw_send_grab_credits(%u): credits=%u posted=%u\n",
+ rdsdebug("wanted=%u credits=%u posted=%u\n",
wanted, avail, posted);
/* The last credit must be used to send a credit update. */
@@ -405,7 +405,7 @@ void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits)
if (credits == 0)
return;
- rdsdebug("rds_iw_send_add_credits(%u): current=%u%s\n",
+ rdsdebug("credits=%u current=%u%s\n",
credits,
IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
diff --git a/net/rds/message.c b/net/rds/message.c
index 5a21e6f5986f..756c73729126 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -266,7 +266,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
{
- unsigned long to_copy;
+ unsigned long to_copy, nbytes;
unsigned long sg_off;
struct scatterlist *sg;
int ret = 0;
@@ -293,9 +293,9 @@ int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
sg->length - sg_off);
rds_stats_add(s_copy_from_user, to_copy);
- ret = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
- to_copy, from);
- if (ret != to_copy)
+ nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
+ to_copy, from);
+ if (nbytes != to_copy)
return -EFAULT;
sg_off += to_copy;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index c3b0cd43eb56..c173f69e1479 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
{
.procname = "max_unacked_packets",
.data = &rds_sysctl_max_unacked_packets,
- .maxlen = sizeof(unsigned long),
+ .maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "max_unacked_bytes",
.data = &rds_sysctl_max_unacked_bytes,
- .maxlen = sizeof(unsigned long),
+ .maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 3f4a0bbeed3d..d978f2f46ff3 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -170,6 +170,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E39", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
+ { "BCM2E40", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E64", RFKILL_TYPE_BLUETOOTH },
{ "BCM4752", RFKILL_TYPE_GPS },
{ "LNV4752", RFKILL_TYPE_GPS },
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 74c0fcd36838..5394b6be46ec 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -42,6 +42,11 @@ void rxrpc_UDP_error_report(struct sock *sk)
_leave("UDP socket errqueue empty");
return;
}
+ if (!skb->len) {
+ _leave("UDP empty message");
+ kfree_skb(skb);
+ return;
+ }
rxrpc_new_skb(skb);
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index e1a9373e5979..8331c95e1522 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -232,10 +232,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
ret = -EPROTO; /* request phase complete for this client call */
} else {
- mm_segment_t oldfs = get_fs();
- set_fs(KERNEL_DS);
ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
- set_fs(oldfs);
}
release_sock(&call->socket->sk);
@@ -529,13 +526,11 @@ static int rxrpc_send_data(struct kiocb *iocb,
struct msghdr *msg, size_t len)
{
struct rxrpc_skb_priv *sp;
- unsigned char __user *from;
struct sk_buff *skb;
- const struct iovec *iov;
struct sock *sk = &rx->sk;
long timeo;
bool more;
- int ret, ioc, segment, copied;
+ int ret, copied;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -545,25 +540,17 @@ static int rxrpc_send_data(struct kiocb *iocb,
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
return -EPIPE;
- iov = msg->msg_iter.iov;
- ioc = msg->msg_iter.nr_segs - 1;
- from = iov->iov_base;
- segment = iov->iov_len;
- iov++;
more = msg->msg_flags & MSG_MORE;
skb = call->tx_pending;
call->tx_pending = NULL;
copied = 0;
- do {
+ if (len > iov_iter_count(&msg->msg_iter))
+ len = iov_iter_count(&msg->msg_iter);
+ while (len) {
int copy;
- if (segment > len)
- segment = len;
-
- _debug("SEGMENT %d @%p", segment, from);
-
if (!skb) {
size_t size, chunk, max, space;
@@ -631,13 +618,13 @@ static int rxrpc_send_data(struct kiocb *iocb,
/* append next segment of data to the current buffer */
copy = skb_tailroom(skb);
ASSERTCMP(copy, >, 0);
- if (copy > segment)
- copy = segment;
+ if (copy > len)
+ copy = len;
if (copy > sp->remain)
copy = sp->remain;
_debug("add");
- ret = skb_add_data(skb, from, copy);
+ ret = skb_add_data(skb, &msg->msg_iter, copy);
_debug("added");
if (ret < 0)
goto efault;
@@ -646,18 +633,6 @@ static int rxrpc_send_data(struct kiocb *iocb,
copied += copy;
len -= copy;
- segment -= copy;
- from += copy;
- while (segment == 0 && ioc > 0) {
- from = iov->iov_base;
- segment = iov->iov_len;
- iov++;
- ioc--;
- }
- if (len == 0) {
- segment = 0;
- ioc = 0;
- }
/* check for the far side aborting the call or a network error
* occurring */
@@ -665,7 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
goto call_aborted;
/* add the packet to the send queue if it's now full */
- if (sp->remain <= 0 || (segment == 0 && !more)) {
+ if (sp->remain <= 0 || (!len && !more)) {
struct rxrpc_connection *conn = call->conn;
uint32_t seq;
size_t pad;
@@ -711,11 +686,10 @@ static int rxrpc_send_data(struct kiocb *iocb,
memcpy(skb->head, &sp->hdr,
sizeof(struct rxrpc_header));
- rxrpc_queue_packet(call, skb, segment == 0 && !more);
+ rxrpc_queue_packet(call, skb, !iov_iter_count(&msg->msg_iter) && !more);
skb = NULL;
}
-
- } while (segment > 0);
+ }
success:
ret = copied;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index c54c9d9d1ffb..899d0319f2b2 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -698,6 +698,30 @@ config NET_ACT_VLAN
To compile this code as a module, choose M here: the
module will be called act_vlan.
+config NET_ACT_BPF
+ tristate "BPF based action"
+ depends on NET_CLS_ACT
+ ---help---
+ Say Y here to execute BPF code on packets. The BPF code will decide
+ if the packet should be dropped or not.
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_bpf.
+
+config NET_ACT_CONNMARK
+ tristate "Netfilter Connection Mark Retriever"
+ depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
+ depends on NF_CONNTRACK && NF_CONNTRACK_MARK
+ ---help---
+ Say Y here to allow retrieving of conn mark
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_connmark.
+
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 679f24ae7f93..7ca7f4c1b8c2 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -17,6 +17,8 @@ obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o
+obj-$(CONFIG_NET_ACT_BPF) += act_bpf.o
+obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
new file mode 100644
index 000000000000..82c5d7fc1988
--- /dev/null
+++ b/net/sched/act_bpf.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/filter.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+#include <linux/tc_act/tc_bpf.h>
+#include <net/tc_act/tc_bpf.h>
+
+#define BPF_TAB_MASK 15
+
+static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_bpf *b = a->priv;
+ int action;
+ int filter_res;
+
+ spin_lock(&b->tcf_lock);
+ b->tcf_tm.lastuse = jiffies;
+ bstats_update(&b->tcf_bstats, skb);
+ action = b->tcf_action;
+
+ filter_res = BPF_PROG_RUN(b->filter, skb);
+ if (filter_res == 0) {
+ /* Return code 0 from the BPF program
+ * is being interpreted as a drop here.
+ */
+ action = TC_ACT_SHOT;
+ b->tcf_qstats.drops++;
+ }
+
+ spin_unlock(&b->tcf_lock);
+ return action;
+}
+
+static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ unsigned char *tp = skb_tail_pointer(skb);
+ struct tcf_bpf *b = a->priv;
+ struct tc_act_bpf opt = {
+ .index = b->tcf_index,
+ .refcnt = b->tcf_refcnt - ref,
+ .bindcnt = b->tcf_bindcnt - bind,
+ .action = b->tcf_action,
+ };
+ struct tcf_t t;
+ struct nlattr *nla;
+
+ if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, b->bpf_num_ops))
+ goto nla_put_failure;
+
+ nla = nla_reserve(skb, TCA_ACT_BPF_OPS, b->bpf_num_ops *
+ sizeof(struct sock_filter));
+ if (!nla)
+ goto nla_put_failure;
+
+ memcpy(nla_data(nla), b->bpf_ops, nla_len(nla));
+
+ t.install = jiffies_to_clock_t(jiffies - b->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - b->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(b->tcf_tm.expires);
+ if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(t), &t))
+ goto nla_put_failure;
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, tp);
+ return -1;
+}
+
+static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
+ [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
+ [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
+ [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
+ .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
+};
+
+static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
+{
+ struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+ struct tc_act_bpf *parm;
+ struct tcf_bpf *b;
+ u16 bpf_size, bpf_num_ops;
+ struct sock_filter *bpf_ops;
+ struct sock_fprog_kern tmp;
+ struct bpf_prog *fp;
+ int ret;
+
+ if (!nla)
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
+ if (ret < 0)
+ return ret;
+
+ if (!tb[TCA_ACT_BPF_PARMS] ||
+ !tb[TCA_ACT_BPF_OPS_LEN] || !tb[TCA_ACT_BPF_OPS])
+ return -EINVAL;
+ parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
+
+ bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
+ if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
+ return -EINVAL;
+
+ bpf_size = bpf_num_ops * sizeof(*bpf_ops);
+ if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
+ return -EINVAL;
+
+ bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
+ if (!bpf_ops)
+ return -ENOMEM;
+
+ memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size);
+
+ tmp.len = bpf_num_ops;
+ tmp.filter = bpf_ops;
+
+ ret = bpf_prog_create(&fp, &tmp);
+ if (ret)
+ goto free_bpf_ops;
+
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*b), bind);
+ if (ret)
+ goto destroy_fp;
+
+ ret = ACT_P_CREATED;
+ } else {
+ if (bind)
+ goto destroy_fp;
+ tcf_hash_release(a, bind);
+ if (!ovr) {
+ ret = -EEXIST;
+ goto destroy_fp;
+ }
+ }
+
+ b = to_bpf(a);
+ spin_lock_bh(&b->tcf_lock);
+ b->tcf_action = parm->action;
+ b->bpf_num_ops = bpf_num_ops;
+ b->bpf_ops = bpf_ops;
+ b->filter = fp;
+ spin_unlock_bh(&b->tcf_lock);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(a);
+ return ret;
+
+destroy_fp:
+ bpf_prog_destroy(fp);
+free_bpf_ops:
+ kfree(bpf_ops);
+ return ret;
+}
+
+static void tcf_bpf_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_bpf *b = a->priv;
+
+ bpf_prog_destroy(b->filter);
+}
+
+static struct tc_action_ops act_bpf_ops = {
+ .kind = "bpf",
+ .type = TCA_ACT_BPF,
+ .owner = THIS_MODULE,
+ .act = tcf_bpf,
+ .dump = tcf_bpf_dump,
+ .cleanup = tcf_bpf_cleanup,
+ .init = tcf_bpf_init,
+};
+
+static int __init bpf_init_module(void)
+{
+ return tcf_register_action(&act_bpf_ops, BPF_TAB_MASK);
+}
+
+static void __exit bpf_cleanup_module(void)
+{
+ tcf_unregister_action(&act_bpf_ops);
+}
+
+module_init(bpf_init_module);
+module_exit(bpf_cleanup_module);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_DESCRIPTION("TC BPF based action");
+MODULE_LICENSE("GPL v2");
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
new file mode 100644
index 000000000000..8e472518f9f6
--- /dev/null
+++ b/net/sched/act_connmark.c
@@ -0,0 +1,192 @@
+/*
+ * net/sched/act_connmark.c netfilter connmark retriever action
+ * skb mark is over-written
+ *
+ * Copyright (c) 2011 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_cls.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/act_api.h>
+#include <uapi/linux/tc_act/tc_connmark.h>
+#include <net/tc_act/tc_connmark.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+#define CONNMARK_TAB_MASK 3
+
+static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ const struct nf_conntrack_tuple_hash *thash;
+ struct nf_conntrack_tuple tuple;
+ enum ip_conntrack_info ctinfo;
+ struct tcf_connmark_info *ca = a->priv;
+ struct nf_conn *c;
+ int proto;
+
+ spin_lock(&ca->tcf_lock);
+ ca->tcf_tm.lastuse = jiffies;
+ bstats_update(&ca->tcf_bstats, skb);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->len < sizeof(struct iphdr))
+ goto out;
+
+ proto = NFPROTO_IPV4;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (skb->len < sizeof(struct ipv6hdr))
+ goto out;
+
+ proto = NFPROTO_IPV6;
+ } else {
+ goto out;
+ }
+
+ c = nf_ct_get(skb, &ctinfo);
+ if (c) {
+ skb->mark = c->mark;
+ /* using overlimits stats to count how many packets marked */
+ ca->tcf_qstats.overlimits++;
+ nf_ct_put(c);
+ goto out;
+ }
+
+ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+ proto, &tuple))
+ goto out;
+
+ thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
+ if (!thash)
+ goto out;
+
+ c = nf_ct_tuplehash_to_ctrack(thash);
+ /* using overlimits stats to count how many packets marked */
+ ca->tcf_qstats.overlimits++;
+ skb->mark = c->mark;
+ nf_ct_put(c);
+
+out:
+ skb->nfct = NULL;
+ spin_unlock(&ca->tcf_lock);
+ return ca->tcf_action;
+}
+
+static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
+ [TCA_CONNMARK_PARMS] = { .len = sizeof(struct tc_connmark) },
+};
+
+static int tcf_connmark_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
+{
+ struct nlattr *tb[TCA_CONNMARK_MAX + 1];
+ struct tcf_connmark_info *ci;
+ struct tc_connmark *parm;
+ int ret = 0;
+
+ if (!nla)
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, TCA_CONNMARK_MAX, nla, connmark_policy);
+ if (ret < 0)
+ return ret;
+
+ parm = nla_data(tb[TCA_CONNMARK_PARMS]);
+
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
+ if (ret)
+ return ret;
+
+ ci = to_connmark(a);
+ ci->tcf_action = parm->action;
+ ci->zone = parm->zone;
+
+ tcf_hash_insert(a);
+ ret = ACT_P_CREATED;
+ } else {
+ ci = to_connmark(a);
+ if (bind)
+ return 0;
+ tcf_hash_release(a, bind);
+ if (!ovr)
+ return -EEXIST;
+ /* replacing action and zone */
+ ci->tcf_action = parm->action;
+ ci->zone = parm->zone;
+ }
+
+ return ret;
+}
+
+static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_connmark_info *ci = a->priv;
+
+ struct tc_connmark opt = {
+ .index = ci->tcf_index,
+ .refcnt = ci->tcf_refcnt - ref,
+ .bindcnt = ci->tcf_bindcnt - bind,
+ .action = ci->tcf_action,
+ .zone = ci->zone,
+ };
+ struct tcf_t t;
+
+ if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(ci->tcf_tm.expires);
+ if (nla_put(skb, TCA_CONNMARK_TM, sizeof(t), &t))
+ goto nla_put_failure;
+
+ return skb->len;
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct tc_action_ops act_connmark_ops = {
+ .kind = "connmark",
+ .type = TCA_ACT_CONNMARK,
+ .owner = THIS_MODULE,
+ .act = tcf_connmark,
+ .dump = tcf_connmark_dump,
+ .init = tcf_connmark_init,
+};
+
+static int __init connmark_init_module(void)
+{
+ return tcf_register_action(&act_connmark_ops, CONNMARK_TAB_MASK);
+}
+
+static void __exit connmark_cleanup_module(void)
+{
+ tcf_unregister_action(&act_connmark_ops);
+}
+
+module_init(connmark_init_module);
+module_exit(connmark_cleanup_module);
+MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
+MODULE_DESCRIPTION("Connection tracking mark restoring");
+MODULE_LICENSE("GPL");
+
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index edbf40dac709..4cd5cf1aedf8 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -509,7 +509,7 @@ static int tcf_csum(struct sk_buff *skb,
if (unlikely(action == TC_ACT_SHOT))
goto drop;
- switch (skb->protocol) {
+ switch (tc_skb_protocol(skb)) {
case cpu_to_be16(ETH_P_IP):
if (!tcf_csum_ipv4(skb, update_flags))
goto drop;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index aad6a679fb13..baef987fe2c0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
}
EXPORT_SYMBOL(tcf_exts_change);
-#define tcf_exts_first_act(ext) \
- list_first_entry(&(exts)->actions, struct tc_action, list)
+#define tcf_exts_first_act(ext) \
+ list_first_entry_or_null(&(exts)->actions, \
+ struct tc_action, list)
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
{
@@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
struct tc_action *a = tcf_exts_first_act(exts);
- if (tcf_action_copy_stats(skb, a, 1) < 0)
+ if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
return -1;
#endif
return 0;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 5aed341406c2..fc399db86f11 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -65,9 +65,12 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
if (head == NULL)
return 0UL;
- list_for_each_entry(f, &head->flist, link)
- if (f->handle == handle)
+ list_for_each_entry(f, &head->flist, link) {
+ if (f->handle == handle) {
l = (unsigned long) f;
+ break;
+ }
+ }
return l;
}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 84c8219c3e1c..5f3ee9e4b5bf 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -37,7 +37,7 @@ struct cls_bpf_prog {
struct tcf_result res;
struct list_head link;
u32 handle;
- u16 bpf_len;
+ u16 bpf_num_ops;
struct tcf_proto *tp;
struct rcu_head rcu;
};
@@ -160,7 +160,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
struct tcf_exts exts;
struct sock_fprog_kern tmp;
struct bpf_prog *fp;
- u16 bpf_size, bpf_len;
+ u16 bpf_size, bpf_num_ops;
u32 classid;
int ret;
@@ -173,13 +173,18 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
return ret;
classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
- bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
- if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
+ bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
+ if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) {
+ ret = -EINVAL;
+ goto errout;
+ }
+
+ bpf_size = bpf_num_ops * sizeof(*bpf_ops);
+ if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
ret = -EINVAL;
goto errout;
}
- bpf_size = bpf_len * sizeof(*bpf_ops);
bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
if (bpf_ops == NULL) {
ret = -ENOMEM;
@@ -188,14 +193,14 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
- tmp.len = bpf_len;
+ tmp.len = bpf_num_ops;
tmp.filter = bpf_ops;
ret = bpf_prog_create(&fp, &tmp);
if (ret)
goto errout_free;
- prog->bpf_len = bpf_len;
+ prog->bpf_num_ops = bpf_num_ops;
prog->bpf_ops = bpf_ops;
prog->filter = fp;
prog->res.classid = classid;
@@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
struct cls_bpf_head *head)
{
unsigned int i = 0x80000000;
+ u32 handle;
do {
if (++head->hgen == 0x7FFFFFFF)
head->hgen = 1;
} while (--i > 0 && cls_bpf_get(tp, head->hgen));
- if (i == 0)
+
+ if (unlikely(i == 0)) {
pr_err("Insufficient number of handles\n");
+ handle = 0;
+ } else {
+ handle = head->hgen;
+ }
- return i;
+ return handle;
}
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
@@ -303,10 +314,10 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
goto nla_put_failure;
- if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
+ if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
goto nla_put_failure;
- nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
+ nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
sizeof(struct sock_filter));
if (nla == NULL)
goto nla_put_failure;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 15d68f24a521..461410394d08 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -77,7 +77,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
{
if (flow->dst)
return ntohl(flow->dst);
- return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
+ return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
}
static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
@@ -98,7 +98,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys
if (flow->ports)
return ntohs(flow->port16[1]);
- return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
+ return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
}
static u32 flow_get_iif(const struct sk_buff *skb)
@@ -144,7 +144,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
{
- switch (skb->protocol) {
+ switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
return ntohl(CTTUPLE(skb, src.u3.ip));
case htons(ETH_P_IPV6):
@@ -156,7 +156,7 @@ fallback:
static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
{
- switch (skb->protocol) {
+ switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
return ntohl(CTTUPLE(skb, dst.u3.ip));
case htons(ETH_P_IPV6):
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
index 5b4a4efe468c..a3d79c8bf3b8 100644
--- a/net/sched/em_ipset.c
+++ b/net/sched/em_ipset.c
@@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
struct net_device *dev, *indev = NULL;
int ret, network_offset;
- switch (skb->protocol) {
+ switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
acpar.family = NFPROTO_IPV4;
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index c8f8c399b99a..b5294ce20cd4 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -176,7 +176,7 @@ META_COLLECTOR(int_vlan_tag)
{
unsigned short tag;
- tag = vlan_tx_tag_get(skb);
+ tag = skb_vlan_tag_get(skb);
if (!tag && __vlan_get_tag(skb, &tag))
*err = -1;
else
@@ -197,7 +197,7 @@ META_COLLECTOR(int_priority)
META_COLLECTOR(int_protocol)
{
/* Let userspace take care of the byte ordering */
- dst->value = skb->protocol;
+ dst->value = tc_skb_protocol(skb);
}
META_COLLECTOR(int_pkttype)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 76f402e05bd6..243b7d169d61 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1807,7 +1807,7 @@ done:
int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- __be16 protocol = skb->protocol;
+ __be16 protocol = tc_skb_protocol(skb);
int err;
for (; tp; tp = rcu_dereference_bh(tp->next)) {
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 227114f27f94..66700a6116aa 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -203,7 +203,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
if (p->set_tc_index) {
- switch (skb->protocol) {
+ switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
if (skb_cow_head(skb, sizeof(struct iphdr)))
goto drop;
@@ -289,7 +289,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
index = skb->tc_index & (p->indices - 1);
pr_debug("index %d->%d\n", skb->tc_index, index);
- switch (skb->protocol) {
+ switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
p->value[index]);
@@ -306,7 +306,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
*/
if (p->mask[index] != 0xff || p->value[index])
pr_warn("%s: unsupported protocol %d\n",
- __func__, ntohs(skb->protocol));
+ __func__, ntohs(tc_skb_protocol(skb)));
break;
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 9b05924cc386..dfcea20e3171 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -1,7 +1,7 @@
/*
* net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
*
- * Copyright (C) 2013 Eric Dumazet <edumazet@google.com>
+ * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -52,6 +52,7 @@
#include <net/pkt_sched.h>
#include <net/sock.h>
#include <net/tcp_states.h>
+#include <net/tcp.h>
/*
* Per flow structure, dynamically allocated
@@ -92,6 +93,7 @@ struct fq_sched_data {
u32 flow_refill_delay;
u32 flow_max_rate; /* optional max rate per flow */
u32 flow_plimit; /* max packets per flow */
+ u32 orphan_mask; /* mask for orphaned skb */
struct rb_root *fq_root;
u8 rate_enable;
u8 fq_trees_log;
@@ -222,11 +224,20 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
return &q->internal;
- if (unlikely(!sk)) {
+ /* SYNACK messages are attached to a listener socket.
+ * 1) They are not part of a 'flow' yet
+ * 2) We do not want to rate limit them (eg SYNFLOOD attack),
+ * especially if the listener set SO_MAX_PACING_RATE
+ * 3) We pretend they are orphaned
+ */
+ if (!sk || sk->sk_state == TCP_LISTEN) {
+ unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
+
/* By forcing low order bit to 1, we make sure to not
* collide with a local flow (socket pointers are word aligned)
*/
- sk = (struct sock *)(skb_get_hash(skb) | 1L);
+ sk = (struct sock *)((hash << 1) | 1UL);
+ skb_orphan(skb);
}
root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
@@ -445,7 +456,9 @@ begin:
goto begin;
}
- if (unlikely(f->head && now < f->time_next_packet)) {
+ skb = f->head;
+ if (unlikely(skb && now < f->time_next_packet &&
+ !skb_is_tcp_pure_ack(skb))) {
head->first = f->next;
fq_flow_set_throttled(q, f);
goto begin;
@@ -464,14 +477,17 @@ begin:
goto begin;
}
prefetch(&skb->end);
- f->time_next_packet = now;
f->credit -= qdisc_pkt_len(skb);
if (f->credit > 0 || !q->rate_enable)
goto out;
+ /* Do not pace locally generated ack packets */
+ if (skb_is_tcp_pure_ack(skb))
+ goto out;
+
rate = q->flow_max_rate;
- if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
+ if (skb->sk)
rate = min(skb->sk->sk_pacing_rate, rate);
if (rate != ~0U) {
@@ -670,8 +686,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_FQ_FLOW_PLIMIT])
q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
- if (tb[TCA_FQ_QUANTUM])
- q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+ if (tb[TCA_FQ_QUANTUM]) {
+ u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+
+ if (quantum > 0)
+ q->quantum = quantum;
+ else
+ err = -EINVAL;
+ }
if (tb[TCA_FQ_INITIAL_QUANTUM])
q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
@@ -698,6 +720,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
}
+ if (tb[TCA_FQ_ORPHAN_MASK])
+ q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
+
if (!err) {
sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
@@ -743,6 +768,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
q->delayed = RB_ROOT;
q->fq_root = NULL;
q->fq_trees_log = ilog2(1024);
+ q->orphan_mask = 1024 - 1;
qdisc_watchdog_init(&q->watchdog, sch);
if (opt)
@@ -772,6 +798,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
jiffies_to_usecs(q->flow_refill_delay)) ||
+ nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 6ada42396a24..e02687185a59 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -122,13 +122,6 @@ teql_peek(struct Qdisc *sch)
return NULL;
}
-static inline void
-teql_neigh_release(struct neighbour *n)
-{
- if (n)
- neigh_release(n);
-}
-
static void
teql_reset(struct Qdisc *sch)
{
@@ -249,8 +242,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
char haddr[MAX_ADDR_LEN];
neigh_ha_snapshot(haddr, n, dev);
- err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
- NULL, skb->len);
+ err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+ haddr, NULL, skb->len);
if (err < 0)
err = -EINVAL;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f791edd64d6c..197c3f59ecbf 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -391,8 +391,7 @@ void sctp_association_free(struct sctp_association *asoc)
sctp_asconf_queue_teardown(asoc);
/* Free pending address space being deleted */
- if (asoc->asconf_addr_del_pending != NULL)
- kfree(asoc->asconf_addr_del_pending);
+ kfree(asoc->asconf_addr_del_pending);
/* AUTH - Free the endpoint shared keys */
sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -1182,7 +1181,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
new->peer.peer_hmacs = NULL;
- sctp_auth_key_put(asoc->asoc_shared_key);
sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e49e231cef52..06320c8c1c86 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2608,7 +2608,7 @@ do_addr_param:
addr_param = param.v + sizeof(sctp_addip_param_t);
- af = sctp_get_af_specific(param_type2af(param.p->type));
+ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
if (af == NULL)
break;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 2625eccb77d5..aafe94bf292e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1603,7 +1603,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_assoc_t associd = 0;
sctp_cmsgs_t cmsgs = { NULL };
sctp_scope_t scope;
- bool fill_sinfo_ttl = false;
+ bool fill_sinfo_ttl = false, wait_connect = false;
struct sctp_datamsg *datamsg;
int msg_flags = msg->msg_flags;
__u16 sinfo_flags = 0;
@@ -1943,6 +1943,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
if (err < 0)
goto out_free;
+ wait_connect = true;
pr_debug("%s: we associated primitively\n", __func__);
}
@@ -1980,6 +1981,11 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_datamsg_put(datamsg);
err = msg_len;
+ if (unlikely(wait_connect)) {
+ timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
+ sctp_wait_for_connect(asoc, &timeo);
+ }
+
/* If we are already past ASSOCIATE, the lower
* layers are responsible for association cleanup.
*/
diff --git a/net/socket.c b/net/socket.c
index a2c33a4dc7ba..bbedbfcb42c2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -113,10 +113,8 @@ unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
-static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos);
-static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos);
+static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
+static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
static int sock_close(struct inode *inode, struct file *file);
@@ -142,8 +140,10 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
static const struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .aio_read = sock_aio_read,
- .aio_write = sock_aio_write,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = sock_read_iter,
+ .write_iter = sock_write_iter,
.poll = sock_poll,
.unlocked_ioctl = sock_ioctl,
#ifdef CONFIG_COMPAT
@@ -613,13 +613,6 @@ EXPORT_SYMBOL(__sock_tx_timestamp);
static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size)
{
- struct sock_iocb *si = kiocb_to_siocb(iocb);
-
- si->sock = sock;
- si->scm = NULL;
- si->msg = msg;
- si->size = size;
-
return sock->ops->sendmsg(iocb, sock, msg, size);
}
@@ -635,11 +628,9 @@ static int do_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size, bool nosec)
{
struct kiocb iocb;
- struct sock_iocb siocb;
int ret;
init_sync_kiocb(&iocb, NULL);
- iocb.private = &siocb;
ret = nosec ? __sock_sendmsg_nosec(&iocb, sock, msg, size) :
__sock_sendmsg(&iocb, sock, msg, size);
if (-EIOCBQUEUED == ret)
@@ -756,14 +747,6 @@ EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
- struct sock_iocb *si = kiocb_to_siocb(iocb);
-
- si->sock = sock;
- si->scm = NULL;
- si->msg = msg;
- si->size = size;
- si->flags = flags;
-
return sock->ops->recvmsg(iocb, sock, msg, size, flags);
}
@@ -779,11 +762,9 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct kiocb iocb;
- struct sock_iocb siocb;
int ret;
init_sync_kiocb(&iocb, NULL);
- iocb.private = &siocb;
ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&iocb);
@@ -795,11 +776,9 @@ static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct kiocb iocb;
- struct sock_iocb siocb;
int ret;
init_sync_kiocb(&iocb, NULL);
- iocb.private = &siocb;
ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&iocb);
@@ -866,92 +845,47 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
return sock->ops->splice_read(sock, ppos, pipe, len, flags);
}
-static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
- struct sock_iocb *siocb)
-{
- if (!is_sync_kiocb(iocb))
- BUG();
-
- siocb->kiocb = iocb;
- iocb->private = siocb;
- return siocb;
-}
-
-static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
- struct file *file, const struct iovec *iov,
- unsigned long nr_segs)
+static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct socket *sock = file->private_data;
- size_t size = 0;
- int i;
-
- for (i = 0; i < nr_segs; i++)
- size += iov[i].iov_len;
+ struct msghdr msg = {.msg_iter = *to};
+ ssize_t res;
- msg->msg_name = NULL;
- msg->msg_namelen = 0;
- msg->msg_control = NULL;
- msg->msg_controllen = 0;
- iov_iter_init(&msg->msg_iter, READ, iov, nr_segs, size);
- msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+ if (file->f_flags & O_NONBLOCK)
+ msg.msg_flags = MSG_DONTWAIT;
- return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags);
-}
-
-static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
-{
- struct sock_iocb siocb, *x;
-
- if (pos != 0)
+ if (iocb->ki_pos != 0)
return -ESPIPE;
if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */
return 0;
-
- x = alloc_sock_iocb(iocb, &siocb);
- if (!x)
- return -ENOMEM;
- return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+ res = __sock_recvmsg(iocb, sock, &msg,
+ iocb->ki_nbytes, msg.msg_flags);
+ *to = msg.msg_iter;
+ return res;
}
-static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
- struct file *file, const struct iovec *iov,
- unsigned long nr_segs)
+static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
struct socket *sock = file->private_data;
- size_t size = 0;
- int i;
-
- for (i = 0; i < nr_segs; i++)
- size += iov[i].iov_len;
-
- msg->msg_name = NULL;
- msg->msg_namelen = 0;
- msg->msg_control = NULL;
- msg->msg_controllen = 0;
- iov_iter_init(&msg->msg_iter, WRITE, iov, nr_segs, size);
- msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
- if (sock->type == SOCK_SEQPACKET)
- msg->msg_flags |= MSG_EOR;
-
- return __sock_sendmsg(iocb, sock, msg, size);
-}
+ struct msghdr msg = {.msg_iter = *from};
+ ssize_t res;
-static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
-{
- struct sock_iocb siocb, *x;
-
- if (pos != 0)
+ if (iocb->ki_pos != 0)
return -ESPIPE;
- x = alloc_sock_iocb(iocb, &siocb);
- if (!x)
- return -ENOMEM;
+ if (file->f_flags & O_NONBLOCK)
+ msg.msg_flags = MSG_DONTWAIT;
+
+ if (sock->type == SOCK_SEQPACKET)
+ msg.msg_flags |= MSG_EOR;
- return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+ res = __sock_sendmsg(iocb, sock, &msg, iocb->ki_nbytes);
+ *from = msg.msg_iter;
+ return res;
}
/*
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 05da12a33945..612aa73bbc60 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -286,10 +286,8 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
{
- clnt->cl_nodelen = strlen(nodename);
- if (clnt->cl_nodelen > UNX_MAXNODENAME)
- clnt->cl_nodelen = UNX_MAXNODENAME;
- memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
+ clnt->cl_nodelen = strlcpy(clnt->cl_nodename,
+ nodename, sizeof(clnt->cl_nodename));
}
static int rpc_client_register(struct rpc_clnt *clnt,
@@ -365,6 +363,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
const struct rpc_version *version;
struct rpc_clnt *clnt = NULL;
const struct rpc_timeout *timeout;
+ const char *nodename = args->nodename;
int err;
/* sanity check the name before trying to print it */
@@ -420,8 +419,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
atomic_set(&clnt->cl_count, 1);
+ if (nodename == NULL)
+ nodename = utsname()->nodename;
/* save the nodename */
- rpc_clnt_set_nodename(clnt, utsname()->nodename);
+ rpc_clnt_set_nodename(clnt, nodename);
err = rpc_client_register(clnt, args->authflavor, args->client_name);
if (err)
@@ -576,6 +577,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
if (xprt == NULL)
goto out_err;
args->servername = xprt->servername;
+ args->nodename = clnt->cl_nodename;
new = rpc_new_client(args, xprt, clnt);
if (IS_ERR(new)) {
@@ -1824,6 +1826,7 @@ call_connect_status(struct rpc_task *task)
case -ECONNABORTED:
case -ENETUNREACH:
case -EHOSTUNREACH:
+ case -EADDRINUSE:
case -ENOBUFS:
case -EPIPE:
if (RPC_IS_SOFTCONN(task))
@@ -1932,6 +1935,7 @@ call_transmit_status(struct rpc_task *task)
}
case -ECONNRESET:
case -ECONNABORTED:
+ case -EADDRINUSE:
case -ENOTCONN:
case -ENOBUFS:
case -EPIPE:
@@ -2051,6 +2055,7 @@ call_status(struct rpc_task *task)
case -ECONNRESET:
case -ECONNABORTED:
rpc_force_rebind(clnt);
+ case -EADDRINUSE:
case -ENOBUFS:
rpc_delay(task, 3*HZ);
case -EPIPE:
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 05202012bcfc..cf5770d8f49a 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -355,7 +355,8 @@ out:
return result;
}
-static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
+static struct rpc_clnt *rpcb_create(struct net *net, const char *nodename,
+ const char *hostname,
struct sockaddr *srvaddr, size_t salen,
int proto, u32 version)
{
@@ -365,6 +366,7 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
.address = srvaddr,
.addrsize = salen,
.servername = hostname,
+ .nodename = nodename,
.program = &rpcb_program,
.version = version,
.authflavor = RPC_AUTH_UNIX,
@@ -740,7 +742,9 @@ void rpcb_getport_async(struct rpc_task *task)
dprintk("RPC: %5u %s: trying rpcbind version %u\n",
task->tk_pid, __func__, bind_version);
- rpcb_clnt = rpcb_create(xprt->xprt_net, xprt->servername, sap, salen,
+ rpcb_clnt = rpcb_create(xprt->xprt_net,
+ clnt->cl_nodename,
+ xprt->servername, sap, salen,
xprt->prot, bind_version);
if (IS_ERR(rpcb_clnt)) {
status = PTR_ERR(rpcb_clnt);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index d20f2329eea3..b91fd9c597b4 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -844,10 +844,10 @@ static void rpc_async_schedule(struct work_struct *work)
void *rpc_malloc(struct rpc_task *task, size_t size)
{
struct rpc_buffer *buf;
- gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
+ gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
if (RPC_IS_SWAPPER(task))
- gfp |= __GFP_MEMALLOC;
+ gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE)
@@ -1069,7 +1069,8 @@ static int rpciod_start(void)
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
- wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1);
+ /* Note: highpri because network receive is latency sensitive */
+ wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
rpciod_workqueue = wq;
return rpciod_workqueue != NULL;
}
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 9711a155bc50..2ecb994314c1 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -140,22 +140,20 @@ void rpc_free_iostats(struct rpc_iostats *stats)
EXPORT_SYMBOL_GPL(rpc_free_iostats);
/**
- * rpc_count_iostats - tally up per-task stats
+ * rpc_count_iostats_metrics - tally up per-task stats
* @task: completed rpc_task
- * @stats: array of stat structures
+ * @op_metrics: stat structure for OP that will accumulate stats from @task
*/
-void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
+void rpc_count_iostats_metrics(const struct rpc_task *task,
+ struct rpc_iostats *op_metrics)
{
struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_iostats *op_metrics;
ktime_t delta, now;
- if (!stats || !req)
+ if (!op_metrics || !req)
return;
now = ktime_get();
- op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
-
spin_lock(&op_metrics->om_lock);
op_metrics->om_ops++;
@@ -175,6 +173,20 @@ void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
spin_unlock(&op_metrics->om_lock);
}
+EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics);
+
+/**
+ * rpc_count_iostats - tally up per-task stats
+ * @task: completed rpc_task
+ * @stats: array of stat structures
+ *
+ * Uses the statidx from @task
+ */
+void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
+{
+ rpc_count_iostats_metrics(task,
+ &stats[task->tk_msg.rpc_proc->p_statidx]);
+}
EXPORT_SYMBOL_GPL(rpc_count_iostats);
static void _print_name(struct seq_file *seq, unsigned int op,
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 91eaef1844c8..78974e4d9ad2 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -768,8 +768,8 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
EXPORT_SYMBOL_GPL(svc_set_num_threads);
/*
- * Called from a server thread as it's exiting. Caller must hold the BKL or
- * the "service mutex", whichever is appropriate for the service.
+ * Called from a server thread as it's exiting. Caller must hold the "service
+ * mutex" for the service.
*/
void
svc_exit_thread(struct svc_rqst *rqstp)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c69358b3cf7f..163ac45c3639 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -42,7 +42,7 @@ static LIST_HEAD(svc_xprt_class_list);
* svc_pool->sp_lock protects most of the fields of that pool.
* svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
* when both need to be taken (rare), svc_serv->sv_lock is first.
- * BKL protects svc_serv->sv_nrthread.
+ * The "service mutex" protects svc_serv->sv_nrthread.
* svc_sock->sk_lock protects the svc_sock->sk_deferred list
* and the ->sk_info_authunix cache.
*
@@ -67,7 +67,6 @@ static LIST_HEAD(svc_xprt_class_list);
* that no other thread will be using the transport or will
* try to set XPT_DEAD.
*/
-
int svc_reg_xprt_class(struct svc_xprt_class *xcl)
{
struct svc_xprt_class *cl;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ebbefad21a37..e3015aede0d9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -683,13 +683,43 @@ xprt_init_autodisconnect(unsigned long data)
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
goto out_abort;
spin_unlock(&xprt->transport_lock);
- set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
queue_work(rpciod_workqueue, &xprt->task_cleanup);
return;
out_abort:
spin_unlock(&xprt->transport_lock);
}
+bool xprt_lock_connect(struct rpc_xprt *xprt,
+ struct rpc_task *task,
+ void *cookie)
+{
+ bool ret = false;
+
+ spin_lock_bh(&xprt->transport_lock);
+ if (!test_bit(XPRT_LOCKED, &xprt->state))
+ goto out;
+ if (xprt->snd_task != task)
+ goto out;
+ xprt->snd_task = cookie;
+ ret = true;
+out:
+ spin_unlock_bh(&xprt->transport_lock);
+ return ret;
+}
+
+void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
+{
+ spin_lock_bh(&xprt->transport_lock);
+ if (xprt->snd_task != cookie)
+ goto out;
+ if (!test_bit(XPRT_LOCKED, &xprt->state))
+ goto out;
+ xprt->snd_task =NULL;
+ xprt->ops->release_xprt(xprt, NULL);
+out:
+ spin_unlock_bh(&xprt->transport_lock);
+}
+
/**
* xprt_connect - schedule a transport connect operation
* @task: RPC task that is requesting the connect
@@ -712,9 +742,7 @@ void xprt_connect(struct rpc_task *task)
if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
xprt->ops->close(xprt);
- if (xprt_connected(xprt))
- xprt_release_write(xprt, task);
- else {
+ if (!xprt_connected(xprt)) {
task->tk_rqstp->rq_bytes_sent = 0;
task->tk_timeout = task->tk_rqstp->rq_timeout;
rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
@@ -726,6 +754,7 @@ void xprt_connect(struct rpc_task *task)
xprt->stat.connect_start = jiffies;
xprt->ops->connect(xprt, task);
}
+ xprt_release_write(xprt, task);
}
static void xprt_connect_status(struct rpc_task *task)
@@ -758,7 +787,6 @@ static void xprt_connect_status(struct rpc_task *task)
dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
"server %s\n", task->tk_pid, -task->tk_status,
xprt->servername);
- xprt_release_write(xprt, task);
task->tk_status = -EIO;
}
}
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index df01d124936c..7e9acd9361c5 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -209,9 +209,11 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
if (cur_rchunk) { /* read */
cur_rchunk->rc_discrim = xdr_one;
/* all read chunks have the same "position" */
- cur_rchunk->rc_position = htonl(pos);
- cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
- cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
+ cur_rchunk->rc_position = cpu_to_be32(pos);
+ cur_rchunk->rc_target.rs_handle =
+ cpu_to_be32(seg->mr_rkey);
+ cur_rchunk->rc_target.rs_length =
+ cpu_to_be32(seg->mr_len);
xdr_encode_hyper(
(__be32 *)&cur_rchunk->rc_target.rs_offset,
seg->mr_base);
@@ -222,8 +224,10 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
cur_rchunk++;
r_xprt->rx_stats.read_chunk_count++;
} else { /* write/reply */
- cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
- cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
+ cur_wchunk->wc_target.rs_handle =
+ cpu_to_be32(seg->mr_rkey);
+ cur_wchunk->wc_target.rs_length =
+ cpu_to_be32(seg->mr_len);
xdr_encode_hyper(
(__be32 *)&cur_wchunk->wc_target.rs_offset,
seg->mr_base);
@@ -257,7 +261,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
} else {
warray->wc_discrim = xdr_one;
- warray->wc_nchunks = htonl(nchunks);
+ warray->wc_nchunks = cpu_to_be32(nchunks);
iptr = (__be32 *) cur_wchunk;
if (type == rpcrdma_writech) {
*iptr++ = xdr_zero; /* finish the write chunk list */
@@ -290,7 +294,7 @@ ssize_t
rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
{
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base;
+ struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf);
if (req->rl_rtype != rpcrdma_noch)
result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
@@ -402,13 +406,12 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
base = rqst->rq_svec[0].iov_base;
rpclen = rqst->rq_svec[0].iov_len;
- /* build RDMA header in private area at front */
- headerp = (struct rpcrdma_msg *) req->rl_base;
- /* don't htonl XID, it's already done in request */
+ headerp = rdmab_to_msg(req->rl_rdmabuf);
+ /* don't byte-swap XID, it's already done in request */
headerp->rm_xid = rqst->rq_xid;
- headerp->rm_vers = xdr_one;
- headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
- headerp->rm_type = htonl(RDMA_MSG);
+ headerp->rm_vers = rpcrdma_version;
+ headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
+ headerp->rm_type = rdma_msg;
/*
* Chunks needed for results?
@@ -468,7 +471,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
return -EIO;
}
- hdrlen = 28; /*sizeof *headerp;*/
+ hdrlen = RPCRDMA_HDRLEN_MIN;
padlen = 0;
/*
@@ -482,11 +485,11 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
RPCRDMA_INLINE_PAD_VALUE(rqst));
if (padlen) {
- headerp->rm_type = htonl(RDMA_MSGP);
+ headerp->rm_type = rdma_msgp;
headerp->rm_body.rm_padded.rm_align =
- htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
+ cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst));
headerp->rm_body.rm_padded.rm_thresh =
- htonl(RPCRDMA_INLINE_PAD_THRESH);
+ cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH);
headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
@@ -524,7 +527,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
" headerp 0x%p base 0x%p lkey 0x%x\n",
__func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
- headerp, base, req->rl_iov.lkey);
+ headerp, base, rdmab_lkey(req->rl_rdmabuf));
/*
* initialize send_iov's - normally only two: rdma chunk header and
@@ -533,26 +536,26 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* header and any write data. In all non-rdma cases, any following
* data has been copied into the RPC header buffer.
*/
- req->rl_send_iov[0].addr = req->rl_iov.addr;
+ req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
req->rl_send_iov[0].length = hdrlen;
- req->rl_send_iov[0].lkey = req->rl_iov.lkey;
+ req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
- req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
+ req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
req->rl_send_iov[1].length = rpclen;
- req->rl_send_iov[1].lkey = req->rl_iov.lkey;
+ req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
req->rl_niovs = 2;
if (padlen) {
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
- req->rl_send_iov[2].addr = ep->rep_pad.addr;
+ req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf);
req->rl_send_iov[2].length = padlen;
- req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
+ req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf);
req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
- req->rl_send_iov[3].lkey = req->rl_iov.lkey;
+ req->rl_send_iov[3].lkey = rdmab_lkey(req->rl_sendbuf);
req->rl_niovs = 4;
}
@@ -569,8 +572,9 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b
{
unsigned int i, total_len;
struct rpcrdma_write_chunk *cur_wchunk;
+ char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
- i = ntohl(**iptrp); /* get array count */
+ i = be32_to_cpu(**iptrp);
if (i > max)
return -1;
cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
@@ -582,11 +586,11 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b
xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
__func__,
- ntohl(seg->rs_length),
+ be32_to_cpu(seg->rs_length),
(unsigned long long)off,
- ntohl(seg->rs_handle));
+ be32_to_cpu(seg->rs_handle));
}
- total_len += ntohl(seg->rs_length);
+ total_len += be32_to_cpu(seg->rs_length);
++cur_wchunk;
}
/* check and adjust for properly terminated write chunk */
@@ -596,7 +600,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b
return -1;
cur_wchunk = (struct rpcrdma_write_chunk *) w;
}
- if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
+ if ((char *)cur_wchunk > base + rep->rr_len)
return -1;
*iptrp = (__be32 *) cur_wchunk;
@@ -691,7 +695,9 @@ rpcrdma_connect_worker(struct work_struct *work)
{
struct rpcrdma_ep *ep =
container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
- struct rpc_xprt *xprt = ep->rep_xprt;
+ struct rpcrdma_xprt *r_xprt =
+ container_of(ep, struct rpcrdma_xprt, rx_ep);
+ struct rpc_xprt *xprt = &r_xprt->rx_xprt;
spin_lock_bh(&xprt->transport_lock);
if (++xprt->connect_cookie == 0) /* maintain a reserved value */
@@ -732,7 +738,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
struct rpc_xprt *xprt = rep->rr_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
__be32 *iptr;
- int rdmalen, status;
+ int credits, rdmalen, status;
unsigned long cwnd;
/* Check status. If bad, signal disconnect and return rep to pool */
@@ -744,14 +750,14 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
}
return;
}
- if (rep->rr_len < 28) {
+ if (rep->rr_len < RPCRDMA_HDRLEN_MIN) {
dprintk("RPC: %s: short/invalid reply\n", __func__);
goto repost;
}
- headerp = (struct rpcrdma_msg *) rep->rr_base;
- if (headerp->rm_vers != xdr_one) {
+ headerp = rdmab_to_msg(rep->rr_rdmabuf);
+ if (headerp->rm_vers != rpcrdma_version) {
dprintk("RPC: %s: invalid version %d\n",
- __func__, ntohl(headerp->rm_vers));
+ __func__, be32_to_cpu(headerp->rm_vers));
goto repost;
}
@@ -762,7 +768,8 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
spin_unlock(&xprt->transport_lock);
dprintk("RPC: %s: reply 0x%p failed "
"to match any request xid 0x%08x len %d\n",
- __func__, rep, headerp->rm_xid, rep->rr_len);
+ __func__, rep, be32_to_cpu(headerp->rm_xid),
+ rep->rr_len);
repost:
r_xprt->rx_stats.bad_reply_count++;
rep->rr_func = rpcrdma_reply_handler;
@@ -778,13 +785,14 @@ repost:
spin_unlock(&xprt->transport_lock);
dprintk("RPC: %s: duplicate reply 0x%p to RPC "
"request 0x%p: xid 0x%08x\n", __func__, rep, req,
- headerp->rm_xid);
+ be32_to_cpu(headerp->rm_xid));
goto repost;
}
dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
" RPC request 0x%p xid 0x%08x\n",
- __func__, rep, req, rqst, headerp->rm_xid);
+ __func__, rep, req, rqst,
+ be32_to_cpu(headerp->rm_xid));
/* from here on, the reply is no longer an orphan */
req->rl_reply = rep;
@@ -793,7 +801,7 @@ repost:
/* check for expected message types */
/* The order of some of these tests is important. */
switch (headerp->rm_type) {
- case htonl(RDMA_MSG):
+ case rdma_msg:
/* never expect read chunks */
/* never expect reply chunks (two ways to check) */
/* never expect write chunks without having offered RDMA */
@@ -824,22 +832,24 @@ repost:
} else {
/* else ordinary inline */
rdmalen = 0;
- iptr = (__be32 *)((unsigned char *)headerp + 28);
- rep->rr_len -= 28; /*sizeof *headerp;*/
+ iptr = (__be32 *)((unsigned char *)headerp +
+ RPCRDMA_HDRLEN_MIN);
+ rep->rr_len -= RPCRDMA_HDRLEN_MIN;
status = rep->rr_len;
}
/* Fix up the rpc results for upper layer */
rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
break;
- case htonl(RDMA_NOMSG):
+ case rdma_nomsg:
/* never expect read or write chunks, always reply chunks */
if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
headerp->rm_body.rm_chunks[1] != xdr_zero ||
headerp->rm_body.rm_chunks[2] != xdr_one ||
req->rl_nchunks == 0)
goto badheader;
- iptr = (__be32 *)((unsigned char *)headerp + 28);
+ iptr = (__be32 *)((unsigned char *)headerp +
+ RPCRDMA_HDRLEN_MIN);
rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
if (rdmalen < 0)
goto badheader;
@@ -853,7 +863,7 @@ badheader:
dprintk("%s: invalid rpcrdma reply header (type %d):"
" chunks[012] == %d %d %d"
" expected chunks <= %d\n",
- __func__, ntohl(headerp->rm_type),
+ __func__, be32_to_cpu(headerp->rm_type),
headerp->rm_body.rm_chunks[0],
headerp->rm_body.rm_chunks[1],
headerp->rm_body.rm_chunks[2],
@@ -863,8 +873,14 @@ badheader:
break;
}
+ credits = be32_to_cpu(headerp->rm_credit);
+ if (credits == 0)
+ credits = 1; /* don't deadlock */
+ else if (credits > r_xprt->rx_buf.rb_max_requests)
+ credits = r_xprt->rx_buf.rb_max_requests;
+
cwnd = xprt->cwnd;
- xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
+ xprt->cwnd = credits << RPC_CWNDSHIFT;
if (xprt->cwnd > cwnd)
xprt_release_rqst_cong(rqst->rq_task);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 65b146297f5a..b681855cf970 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -71,22 +71,6 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
}
/*
- * Determine number of chunks and total bytes in chunk list. The chunk
- * list has already been verified to fit within the RPCRDMA header.
- */
-void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
- int *ch_count, int *byte_count)
-{
- /* compute the number of bytes represented by read chunks */
- *byte_count = 0;
- *ch_count = 0;
- for (; ch->rc_discrim != 0; ch++) {
- *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
- *ch_count = *ch_count + 1;
- }
-}
-
-/*
* Decodes a write chunk list. The expected format is as follows:
* descrim : xdr_one
* nchunks : <count>
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index e0110270d650..f9f13a32ddb8 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -43,7 +43,6 @@
#include <linux/sunrpc/debug.h>
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/spinlock.h>
-#include <linux/highmem.h>
#include <asm/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
@@ -60,6 +59,7 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
struct svc_rdma_op_ctxt *ctxt,
u32 byte_count)
{
+ struct rpcrdma_msg *rmsgp;
struct page *page;
u32 bc;
int sge_no;
@@ -82,7 +82,14 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
/* If data remains, store it in the pagelist */
rqstp->rq_arg.page_len = bc;
rqstp->rq_arg.page_base = 0;
- rqstp->rq_arg.pages = &rqstp->rq_pages[1];
+
+ /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
+ rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
+ if (be32_to_cpu(rmsgp->rm_type) == RDMA_NOMSG)
+ rqstp->rq_arg.pages = &rqstp->rq_pages[0];
+ else
+ rqstp->rq_arg.pages = &rqstp->rq_pages[1];
+
sge_no = 1;
while (bc && sge_no < ctxt->count) {
page = ctxt->pages[sge_no];
@@ -95,14 +102,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
rqstp->rq_respages = &rqstp->rq_pages[sge_no];
rqstp->rq_next_page = rqstp->rq_respages + 1;
- /* We should never run out of SGE because the limit is defined to
- * support the max allowed RPC data length
- */
- BUG_ON(bc && (sge_no == ctxt->count));
- BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len)
- != byte_count);
- BUG_ON(rqstp->rq_arg.len != byte_count);
-
/* If not all pages were used from the SGL, free the remaining ones */
bc = sge_no;
while (sge_no < ctxt->count) {
@@ -125,26 +124,16 @@ static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
return min_t(int, sge_count, xprt->sc_max_sge);
}
-typedef int (*rdma_reader_fn)(struct svcxprt_rdma *xprt,
- struct svc_rqst *rqstp,
- struct svc_rdma_op_ctxt *head,
- int *page_no,
- u32 *page_offset,
- u32 rs_handle,
- u32 rs_length,
- u64 rs_offset,
- int last);
-
/* Issue an RDMA_READ using the local lkey to map the data sink */
-static int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
- struct svc_rqst *rqstp,
- struct svc_rdma_op_ctxt *head,
- int *page_no,
- u32 *page_offset,
- u32 rs_handle,
- u32 rs_length,
- u64 rs_offset,
- int last)
+int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
+ struct svc_rqst *rqstp,
+ struct svc_rdma_op_ctxt *head,
+ int *page_no,
+ u32 *page_offset,
+ u32 rs_handle,
+ u32 rs_length,
+ u64 rs_offset,
+ bool last)
{
struct ib_send_wr read_wr;
int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
@@ -229,15 +218,15 @@ static int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
}
/* Issue an RDMA_READ using an FRMR to map the data sink */
-static int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
- struct svc_rqst *rqstp,
- struct svc_rdma_op_ctxt *head,
- int *page_no,
- u32 *page_offset,
- u32 rs_handle,
- u32 rs_length,
- u64 rs_offset,
- int last)
+int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
+ struct svc_rqst *rqstp,
+ struct svc_rdma_op_ctxt *head,
+ int *page_no,
+ u32 *page_offset,
+ u32 rs_handle,
+ u32 rs_length,
+ u64 rs_offset,
+ bool last)
{
struct ib_send_wr read_wr;
struct ib_send_wr inv_wr;
@@ -365,24 +354,84 @@ static int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
return ret;
}
+static unsigned int
+rdma_rcl_chunk_count(struct rpcrdma_read_chunk *ch)
+{
+ unsigned int count;
+
+ for (count = 0; ch->rc_discrim != xdr_zero; ch++)
+ count++;
+ return count;
+}
+
+/* If there was additional inline content, append it to the end of arg.pages.
+ * Tail copy has to be done after the reader function has determined how many
+ * pages are needed for RDMA READ.
+ */
+static int
+rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
+ u32 position, u32 byte_count, u32 page_offset, int page_no)
+{
+ char *srcp, *destp;
+ int ret;
+
+ ret = 0;
+ srcp = head->arg.head[0].iov_base + position;
+ byte_count = head->arg.head[0].iov_len - position;
+ if (byte_count > PAGE_SIZE) {
+ dprintk("svcrdma: large tail unsupported\n");
+ return 0;
+ }
+
+ /* Fit as much of the tail on the current page as possible */
+ if (page_offset != PAGE_SIZE) {
+ destp = page_address(rqstp->rq_arg.pages[page_no]);
+ destp += page_offset;
+ while (byte_count--) {
+ *destp++ = *srcp++;
+ page_offset++;
+ if (page_offset == PAGE_SIZE && byte_count)
+ goto more;
+ }
+ goto done;
+ }
+
+more:
+ /* Fit the rest on the next page */
+ page_no++;
+ destp = page_address(rqstp->rq_arg.pages[page_no]);
+ while (byte_count--)
+ *destp++ = *srcp++;
+
+ rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
+
+done:
+ byte_count = head->arg.head[0].iov_len - position;
+ head->arg.page_len += byte_count;
+ head->arg.len += byte_count;
+ head->arg.buflen += byte_count;
+ return 1;
+}
+
static int rdma_read_chunks(struct svcxprt_rdma *xprt,
struct rpcrdma_msg *rmsgp,
struct svc_rqst *rqstp,
struct svc_rdma_op_ctxt *head)
{
- int page_no, ch_count, ret;
+ int page_no, ret;
struct rpcrdma_read_chunk *ch;
- u32 page_offset, byte_count;
+ u32 handle, page_offset, byte_count;
+ u32 position;
u64 rs_offset;
- rdma_reader_fn reader;
+ bool last;
/* If no read list is present, return 0 */
ch = svc_rdma_get_read_chunk(rmsgp);
if (!ch)
return 0;
- svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
- if (ch_count > RPCSVC_MAXPAGES)
+ if (rdma_rcl_chunk_count(ch) > RPCSVC_MAXPAGES)
return -EINVAL;
/* The request is completed when the RDMA_READs complete. The
@@ -391,34 +440,41 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
*/
head->arg.head[0] = rqstp->rq_arg.head[0];
head->arg.tail[0] = rqstp->rq_arg.tail[0];
- head->arg.pages = &head->pages[head->count];
head->hdr_count = head->count;
head->arg.page_base = 0;
head->arg.page_len = 0;
head->arg.len = rqstp->rq_arg.len;
head->arg.buflen = rqstp->rq_arg.buflen;
- /* Use FRMR if supported */
- if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)
- reader = rdma_read_chunk_frmr;
- else
- reader = rdma_read_chunk_lcl;
+ ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+ position = be32_to_cpu(ch->rc_position);
+
+ /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
+ if (position == 0) {
+ head->arg.pages = &head->pages[0];
+ page_offset = head->byte_len;
+ } else {
+ head->arg.pages = &head->pages[head->count];
+ page_offset = 0;
+ }
- page_no = 0; page_offset = 0;
- for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
- ch->rc_discrim != 0; ch++) {
+ ret = 0;
+ page_no = 0;
+ for (; ch->rc_discrim != xdr_zero; ch++) {
+ if (be32_to_cpu(ch->rc_position) != position)
+ goto err;
+ handle = be32_to_cpu(ch->rc_target.rs_handle),
+ byte_count = be32_to_cpu(ch->rc_target.rs_length);
xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
&rs_offset);
- byte_count = ntohl(ch->rc_target.rs_length);
while (byte_count > 0) {
- ret = reader(xprt, rqstp, head,
- &page_no, &page_offset,
- ntohl(ch->rc_target.rs_handle),
- byte_count, rs_offset,
- ((ch+1)->rc_discrim == 0) /* last */
- );
+ last = (ch + 1)->rc_discrim == xdr_zero;
+ ret = xprt->sc_reader(xprt, rqstp, head,
+ &page_no, &page_offset,
+ handle, byte_count,
+ rs_offset, last);
if (ret < 0)
goto err;
byte_count -= ret;
@@ -426,7 +482,24 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
head->arg.buflen += ret;
}
}
+
+ /* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
+ if (page_offset & 3) {
+ u32 pad = 4 - (page_offset & 3);
+
+ head->arg.page_len += pad;
+ head->arg.len += pad;
+ head->arg.buflen += pad;
+ page_offset += pad;
+ }
+
ret = 1;
+ if (position && position < head->arg.head[0].iov_len)
+ ret = rdma_copy_tail(rqstp, head, position,
+ byte_count, page_offset, page_no);
+ head->arg.head[0].iov_len = position;
+ head->position = position;
+
err:
/* Detach arg pages. svc_recv will replenish them */
for (page_no = 0;
@@ -436,47 +509,33 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
return ret;
}
-/*
- * To avoid a separate RDMA READ just for a handful of zero bytes,
- * RFC 5666 section 3.7 allows the client to omit the XDR zero pad
- * in chunk lists.
- */
-static void
-rdma_fix_xdr_pad(struct xdr_buf *buf)
-{
- unsigned int page_len = buf->page_len;
- unsigned int size = (XDR_QUADLEN(page_len) << 2) - page_len;
- unsigned int offset, pg_no;
- char *p;
-
- if (size == 0)
- return;
-
- pg_no = page_len >> PAGE_SHIFT;
- offset = page_len & ~PAGE_MASK;
- p = page_address(buf->pages[pg_no]);
- memset(p + offset, 0, size);
-
- buf->page_len += size;
- buf->buflen += size;
- buf->len += size;
-}
-
static int rdma_read_complete(struct svc_rqst *rqstp,
struct svc_rdma_op_ctxt *head)
{
int page_no;
int ret;
- BUG_ON(!head);
-
/* Copy RPC pages */
for (page_no = 0; page_no < head->count; page_no++) {
put_page(rqstp->rq_pages[page_no]);
rqstp->rq_pages[page_no] = head->pages[page_no];
}
+
+ /* Adjustments made for RDMA_NOMSG type requests */
+ if (head->position == 0) {
+ if (head->arg.len <= head->sge[0].length) {
+ head->arg.head[0].iov_len = head->arg.len -
+ head->byte_len;
+ head->arg.page_len = 0;
+ } else {
+ head->arg.head[0].iov_len = head->sge[0].length -
+ head->byte_len;
+ head->arg.page_len = head->arg.len -
+ head->sge[0].length;
+ }
+ }
+
/* Point rq_arg.pages past header */
- rdma_fix_xdr_pad(&head->arg);
rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
rqstp->rq_arg.page_len = head->arg.page_len;
rqstp->rq_arg.page_base = head->arg.page_base;
@@ -501,8 +560,8 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
ret = rqstp->rq_arg.head[0].iov_len
+ rqstp->rq_arg.page_len
+ rqstp->rq_arg.tail[0].iov_len;
- dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "
- "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
+ dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
+ "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
rqstp->rq_arg.head[0].iov_len);
@@ -558,7 +617,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
}
dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
ctxt, rdma_xprt, rqstp, ctxt->wc_status);
- BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
atomic_inc(&rdma_stat_recv);
/* Build up the XDR from the receive buffers. */
@@ -591,8 +649,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ rqstp->rq_arg.tail[0].iov_len;
svc_rdma_put_context(ctxt, 0);
out:
- dprintk("svcrdma: ret = %d, rq_arg.len =%d, "
- "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
+ dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
+ "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
ret, rqstp->rq_arg.len,
rqstp->rq_arg.head[0].iov_base,
rqstp->rq_arg.head[0].iov_len);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 9f1b50689c0f..7de33d1af9b6 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -60,8 +60,11 @@ static int map_xdr(struct svcxprt_rdma *xprt,
u32 page_off;
int page_no;
- BUG_ON(xdr->len !=
- (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
+ if (xdr->len !=
+ (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
+ pr_err("svcrdma: map_xdr: XDR buffer length error\n");
+ return -EIO;
+ }
/* Skip the first sge, this is for the RPCRDMA header */
sge_no = 1;
@@ -150,7 +153,11 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
int bc;
struct svc_rdma_op_ctxt *ctxt;
- BUG_ON(vec->count > RPCSVC_MAXPAGES);
+ if (vec->count > RPCSVC_MAXPAGES) {
+ pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
+ return -EIO;
+ }
+
dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
"write_len=%d, vec->sge=%p, vec->count=%lu\n",
rmr, (unsigned long long)to, xdr_off,
@@ -190,7 +197,10 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
sge_off = 0;
sge_no++;
xdr_sge_no++;
- BUG_ON(xdr_sge_no > vec->count);
+ if (xdr_sge_no > vec->count) {
+ pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
+ goto err;
+ }
bc -= sge_bytes;
if (sge_no == xprt->sc_max_sge)
break;
@@ -421,7 +431,10 @@ static int send_reply(struct svcxprt_rdma *rdma,
ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
ctxt->sge[sge_no].length = sge_bytes;
}
- BUG_ON(byte_count != 0);
+ if (byte_count != 0) {
+ pr_err("svcrdma: Could not map %d bytes\n", byte_count);
+ goto err;
+ }
/* Save all respages in the ctxt and remove them from the
* respages array. They are our pages until the I/O
@@ -442,7 +455,10 @@ static int send_reply(struct svcxprt_rdma *rdma,
}
rqstp->rq_next_page = rqstp->rq_respages + 1;
- BUG_ON(sge_no > rdma->sc_max_sge);
+ if (sge_no > rdma->sc_max_sge) {
+ pr_err("svcrdma: Too many sges (%d)\n", sge_no);
+ goto err;
+ }
memset(&send_wr, 0, sizeof send_wr);
ctxt->wr_op = IB_WR_SEND;
send_wr.wr_id = (unsigned long)ctxt;
@@ -467,18 +483,6 @@ void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
{
}
-/*
- * Return the start of an xdr buffer.
- */
-static void *xdr_start(struct xdr_buf *xdr)
-{
- return xdr->head[0].iov_base -
- (xdr->len -
- xdr->page_len -
- xdr->tail[0].iov_len -
- xdr->head[0].iov_len);
-}
-
int svc_rdma_sendto(struct svc_rqst *rqstp)
{
struct svc_xprt *xprt = rqstp->rq_xprt;
@@ -496,8 +500,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
- /* Get the RDMA request header. */
- rdma_argp = xdr_start(&rqstp->rq_arg);
+ /* Get the RDMA request header. The receive logic always
+ * places this at the start of page 0.
+ */
+ rdma_argp = page_address(rqstp->rq_pages[0]);
/* Build an req vec for the XDR */
ctxt = svc_rdma_get_context(rdma);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4e618808bc98..f609c1c2d38d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -139,7 +139,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
struct svcxprt_rdma *xprt;
int i;
- BUG_ON(!ctxt);
xprt = ctxt->xprt;
if (free_pages)
for (i = 0; i < ctxt->count; i++)
@@ -339,12 +338,14 @@ static void process_context(struct svcxprt_rdma *xprt,
switch (ctxt->wr_op) {
case IB_WR_SEND:
- BUG_ON(ctxt->frmr);
+ if (ctxt->frmr)
+ pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
svc_rdma_put_context(ctxt, 1);
break;
case IB_WR_RDMA_WRITE:
- BUG_ON(ctxt->frmr);
+ if (ctxt->frmr)
+ pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
svc_rdma_put_context(ctxt, 0);
break;
@@ -353,19 +354,21 @@ static void process_context(struct svcxprt_rdma *xprt,
svc_rdma_put_frmr(xprt, ctxt->frmr);
if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
- BUG_ON(!read_hdr);
- spin_lock_bh(&xprt->sc_rq_dto_lock);
- set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
- list_add_tail(&read_hdr->dto_q,
- &xprt->sc_read_complete_q);
- spin_unlock_bh(&xprt->sc_rq_dto_lock);
+ if (read_hdr) {
+ spin_lock_bh(&xprt->sc_rq_dto_lock);
+ set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
+ list_add_tail(&read_hdr->dto_q,
+ &xprt->sc_read_complete_q);
+ spin_unlock_bh(&xprt->sc_rq_dto_lock);
+ } else {
+ pr_err("svcrdma: ctxt->read_hdr == NULL\n");
+ }
svc_xprt_enqueue(&xprt->sc_xprt);
}
svc_rdma_put_context(ctxt, 0);
break;
default:
- BUG_ON(1);
printk(KERN_ERR "svcrdma: unexpected completion type, "
"opcode=%d\n",
ctxt->wr_op);
@@ -513,7 +516,10 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
buflen = 0;
ctxt->direction = DMA_FROM_DEVICE;
for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
- BUG_ON(sge_no >= xprt->sc_max_sge);
+ if (sge_no >= xprt->sc_max_sge) {
+ pr_err("svcrdma: Too many sges (%d)\n", sge_no);
+ goto err_put_ctxt;
+ }
page = svc_rdma_get_page();
ctxt->pages[sge_no] = page;
pa = ib_dma_map_page(xprt->sc_cm_id->device,
@@ -687,7 +693,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
{
struct rdma_cm_id *listen_id;
struct svcxprt_rdma *cma_xprt;
- struct svc_xprt *xprt;
int ret;
dprintk("svcrdma: Creating RDMA socket\n");
@@ -698,7 +703,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
cma_xprt = rdma_create_xprt(serv, 1);
if (!cma_xprt)
return ERR_PTR(-ENOMEM);
- xprt = &cma_xprt->sc_xprt;
listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
IB_QPT_RC);
@@ -822,7 +826,7 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
if (frmr) {
frmr_unmap_dma(rdma, frmr);
spin_lock_bh(&rdma->sc_frmr_q_lock);
- BUG_ON(!list_empty(&frmr->frmr_list));
+ WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
spin_unlock_bh(&rdma->sc_frmr_q_lock);
}
@@ -970,10 +974,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
* NB: iWARP requires remote write access for the data sink
* of an RDMA_READ. IB does not.
*/
+ newxprt->sc_reader = rdma_read_chunk_lcl;
if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
newxprt->sc_frmr_pg_list_len =
devattr.max_fast_reg_page_list_len;
newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
+ newxprt->sc_reader = rdma_read_chunk_frmr;
}
/*
@@ -1125,7 +1131,9 @@ static void __svc_rdma_free(struct work_struct *work)
dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
/* We should only be called from kref_put */
- BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
+ if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
+ pr_err("svcrdma: sc_xprt still in use? (%d)\n",
+ atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
/*
* Destroy queued, but not processed read completions. Note
@@ -1153,8 +1161,12 @@ static void __svc_rdma_free(struct work_struct *work)
}
/* Warn if we leaked a resource or under-referenced */
- WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
- WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
+ if (atomic_read(&rdma->sc_ctxt_used) != 0)
+ pr_err("svcrdma: ctxt still in use? (%d)\n",
+ atomic_read(&rdma->sc_ctxt_used));
+ if (atomic_read(&rdma->sc_dma_used) != 0)
+ pr_err("svcrdma: dma still in use? (%d)\n",
+ atomic_read(&rdma->sc_dma_used));
/* De-allocate fastreg mr */
rdma_dealloc_frmr_q(rdma);
@@ -1254,7 +1266,6 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
return -ENOTCONN;
- BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
wr_count = 1;
for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
wr_count++;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index bbd6155d3e34..2e192baa59f3 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -200,9 +200,9 @@ xprt_rdma_free_addresses(struct rpc_xprt *xprt)
static void
xprt_rdma_connect_worker(struct work_struct *work)
{
- struct rpcrdma_xprt *r_xprt =
- container_of(work, struct rpcrdma_xprt, rdma_connect.work);
- struct rpc_xprt *xprt = &r_xprt->xprt;
+ struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
+ rx_connect_worker.work);
+ struct rpc_xprt *xprt = &r_xprt->rx_xprt;
int rc = 0;
xprt_clear_connected(xprt);
@@ -235,7 +235,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
dprintk("RPC: %s: called\n", __func__);
- cancel_delayed_work_sync(&r_xprt->rdma_connect);
+ cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
xprt_clear_connected(xprt);
@@ -364,8 +364,7 @@ xprt_setup_rdma(struct xprt_create *args)
* any inline data. Also specify any padding which will be provided
* from a preregistered zero buffer.
*/
- rc = rpcrdma_buffer_create(&new_xprt->rx_buf, new_ep, &new_xprt->rx_ia,
- &new_xprt->rx_data);
+ rc = rpcrdma_buffer_create(new_xprt);
if (rc)
goto out3;
@@ -374,9 +373,8 @@ xprt_setup_rdma(struct xprt_create *args)
* connection loss notification is async. We also catch connection loss
* when reaping receives.
*/
- INIT_DELAYED_WORK(&new_xprt->rdma_connect, xprt_rdma_connect_worker);
- new_ep->rep_func = rpcrdma_conn_func;
- new_ep->rep_xprt = xprt;
+ INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
+ xprt_rdma_connect_worker);
xprt_rdma_format_addresses(xprt);
xprt->max_payload = rpcrdma_max_payload(new_xprt);
@@ -434,94 +432,101 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
if (r_xprt->rx_ep.rep_connected != 0) {
/* Reconnect */
- schedule_delayed_work(&r_xprt->rdma_connect,
- xprt->reestablish_timeout);
+ schedule_delayed_work(&r_xprt->rx_connect_worker,
+ xprt->reestablish_timeout);
xprt->reestablish_timeout <<= 1;
if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO)
xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO;
else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
} else {
- schedule_delayed_work(&r_xprt->rdma_connect, 0);
+ schedule_delayed_work(&r_xprt->rx_connect_worker, 0);
if (!RPC_IS_ASYNC(task))
- flush_delayed_work(&r_xprt->rdma_connect);
+ flush_delayed_work(&r_xprt->rx_connect_worker);
}
}
/*
* The RDMA allocate/free functions need the task structure as a place
* to hide the struct rpcrdma_req, which is necessary for the actual send/recv
- * sequence. For this reason, the recv buffers are attached to send
- * buffers for portions of the RPC. Note that the RPC layer allocates
- * both send and receive buffers in the same call. We may register
- * the receive buffer portion when using reply chunks.
+ * sequence.
+ *
+ * The RPC layer allocates both send and receive buffers in the same call
+ * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer).
+ * We may register rq_rcv_buf when using reply chunks.
*/
static void *
xprt_rdma_allocate(struct rpc_task *task, size_t size)
{
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
- struct rpcrdma_req *req, *nreq;
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+ struct rpcrdma_regbuf *rb;
+ struct rpcrdma_req *req;
+ size_t min_size;
+ gfp_t flags;
- req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf);
+ req = rpcrdma_buffer_get(&r_xprt->rx_buf);
if (req == NULL)
return NULL;
- if (size > req->rl_size) {
- dprintk("RPC: %s: size %zd too large for buffer[%zd]: "
- "prog %d vers %d proc %d\n",
- __func__, size, req->rl_size,
- task->tk_client->cl_prog, task->tk_client->cl_vers,
- task->tk_msg.rpc_proc->p_proc);
- /*
- * Outgoing length shortage. Our inline write max must have
- * been configured to perform direct i/o.
- *
- * This is therefore a large metadata operation, and the
- * allocate call was made on the maximum possible message,
- * e.g. containing long filename(s) or symlink data. In
- * fact, while these metadata operations *might* carry
- * large outgoing payloads, they rarely *do*. However, we
- * have to commit to the request here, so reallocate and
- * register it now. The data path will never require this
- * reallocation.
- *
- * If the allocation or registration fails, the RPC framework
- * will (doggedly) retry.
- */
- if (task->tk_flags & RPC_TASK_SWAPPER)
- nreq = kmalloc(sizeof *req + size, GFP_ATOMIC);
- else
- nreq = kmalloc(sizeof *req + size, GFP_NOFS);
- if (nreq == NULL)
- goto outfail;
-
- if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt)->rx_ia,
- nreq->rl_base, size + sizeof(struct rpcrdma_req)
- - offsetof(struct rpcrdma_req, rl_base),
- &nreq->rl_handle, &nreq->rl_iov)) {
- kfree(nreq);
- goto outfail;
- }
- rpcx_to_rdmax(xprt)->rx_stats.hardway_register_count += size;
- nreq->rl_size = size;
- nreq->rl_niovs = 0;
- nreq->rl_nchunks = 0;
- nreq->rl_buffer = (struct rpcrdma_buffer *)req;
- nreq->rl_reply = req->rl_reply;
- memcpy(nreq->rl_segments,
- req->rl_segments, sizeof nreq->rl_segments);
- /* flag the swap with an unused field */
- nreq->rl_iov.length = 0;
- req->rl_reply = NULL;
- req = nreq;
- }
+ flags = GFP_NOIO | __GFP_NOWARN;
+ if (RPC_IS_SWAPPER(task))
+ flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
+
+ if (req->rl_rdmabuf == NULL)
+ goto out_rdmabuf;
+ if (req->rl_sendbuf == NULL)
+ goto out_sendbuf;
+ if (size > req->rl_sendbuf->rg_size)
+ goto out_sendbuf;
+
+out:
dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req);
req->rl_connect_cookie = 0; /* our reserved value */
- return req->rl_xdr_buf;
-
-outfail:
+ return req->rl_sendbuf->rg_base;
+
+out_rdmabuf:
+ min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
+ rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
+ if (IS_ERR(rb))
+ goto out_fail;
+ req->rl_rdmabuf = rb;
+
+out_sendbuf:
+ /* XDR encoding and RPC/RDMA marshaling of this request has not
+ * yet occurred. Thus a lower bound is needed to prevent buffer
+ * overrun during marshaling.
+ *
+ * RPC/RDMA marshaling may choose to send payload bearing ops
+ * inline, if the result is smaller than the inline threshold.
+ * The value of the "size" argument accounts for header
+ * requirements but not for the payload in these cases.
+ *
+ * Likewise, allocate enough space to receive a reply up to the
+ * size of the inline threshold.
+ *
+ * It's unlikely that both the send header and the received
+ * reply will be large, but slush is provided here to allow
+ * flexibility when marshaling.
+ */
+ min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp);
+ min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
+ if (size < min_size)
+ size = min_size;
+
+ rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
+ if (IS_ERR(rb))
+ goto out_fail;
+ rb->rg_owner = req;
+
+ r_xprt->rx_stats.hardway_register_count += size;
+ rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
+ req->rl_sendbuf = rb;
+ goto out;
+
+out_fail:
rpcrdma_buffer_put(req);
- rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++;
+ r_xprt->rx_stats.failed_marshal_count++;
return NULL;
}
@@ -533,47 +538,24 @@ xprt_rdma_free(void *buffer)
{
struct rpcrdma_req *req;
struct rpcrdma_xprt *r_xprt;
- struct rpcrdma_rep *rep;
+ struct rpcrdma_regbuf *rb;
int i;
if (buffer == NULL)
return;
- req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]);
- if (req->rl_iov.length == 0) { /* see allocate above */
- r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer,
- struct rpcrdma_xprt, rx_buf);
- } else
- r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
- rep = req->rl_reply;
+ rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]);
+ req = rb->rg_owner;
+ r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
- dprintk("RPC: %s: called on 0x%p%s\n",
- __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : "");
+ dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
- /*
- * Finish the deregistration. The process is considered
- * complete when the rr_func vector becomes NULL - this
- * was put in place during rpcrdma_reply_handler() - the wait
- * call below will not block if the dereg is "done". If
- * interrupted, our framework will clean up.
- */
for (i = 0; req->rl_nchunks;) {
--req->rl_nchunks;
i += rpcrdma_deregister_external(
&req->rl_segments[i], r_xprt);
}
- if (req->rl_iov.length == 0) { /* see allocate above */
- struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer;
- oreq->rl_reply = req->rl_reply;
- (void) rpcrdma_deregister_internal(&r_xprt->rx_ia,
- req->rl_handle,
- &req->rl_iov);
- kfree(req);
- req = oreq;
- }
-
- /* Put back request+reply buffers */
rpcrdma_buffer_put(req);
}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index c98e40643910..124676c13780 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -49,6 +49,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/prefetch.h>
#include <asm/bitops.h>
#include "xprt_rdma.h"
@@ -153,7 +154,7 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
event->device->name, context);
if (ep->rep_connected == 1) {
ep->rep_connected = -EIO;
- ep->rep_func(ep);
+ rpcrdma_conn_func(ep);
wake_up_all(&ep->rep_connect_wait);
}
}
@@ -168,23 +169,59 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
event->device->name, context);
if (ep->rep_connected == 1) {
ep->rep_connected = -EIO;
- ep->rep_func(ep);
+ rpcrdma_conn_func(ep);
wake_up_all(&ep->rep_connect_wait);
}
}
+static const char * const wc_status[] = {
+ "success",
+ "local length error",
+ "local QP operation error",
+ "local EE context operation error",
+ "local protection error",
+ "WR flushed",
+ "memory management operation error",
+ "bad response error",
+ "local access error",
+ "remote invalid request error",
+ "remote access error",
+ "remote operation error",
+ "transport retry counter exceeded",
+ "RNR retrycounter exceeded",
+ "local RDD violation error",
+ "remove invalid RD request",
+ "operation aborted",
+ "invalid EE context number",
+ "invalid EE context state",
+ "fatal error",
+ "response timeout error",
+ "general error",
+};
+
+#define COMPLETION_MSG(status) \
+ ((status) < ARRAY_SIZE(wc_status) ? \
+ wc_status[(status)] : "unexpected completion error")
+
static void
rpcrdma_sendcq_process_wc(struct ib_wc *wc)
{
- struct rpcrdma_mw *frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+ if (likely(wc->status == IB_WC_SUCCESS))
+ return;
- dprintk("RPC: %s: frmr %p status %X opcode %d\n",
- __func__, frmr, wc->status, wc->opcode);
+ /* WARNING: Only wr_id and status are reliable at this point */
+ if (wc->wr_id == 0ULL) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ pr_err("RPC: %s: SEND: %s\n",
+ __func__, COMPLETION_MSG(wc->status));
+ } else {
+ struct rpcrdma_mw *r;
- if (wc->wr_id == 0ULL)
- return;
- if (wc->status != IB_WC_SUCCESS)
- frmr->r.frmr.fr_state = FRMR_IS_STALE;
+ r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+ r->r.frmr.fr_state = FRMR_IS_STALE;
+ pr_err("RPC: %s: frmr %p (stale): %s\n",
+ __func__, r, COMPLETION_MSG(wc->status));
+ }
}
static int
@@ -248,33 +285,32 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
struct rpcrdma_rep *rep =
(struct rpcrdma_rep *)(unsigned long)wc->wr_id;
- dprintk("RPC: %s: rep %p status %X opcode %X length %u\n",
- __func__, rep, wc->status, wc->opcode, wc->byte_len);
+ /* WARNING: Only wr_id and status are reliable at this point */
+ if (wc->status != IB_WC_SUCCESS)
+ goto out_fail;
- if (wc->status != IB_WC_SUCCESS) {
- rep->rr_len = ~0U;
- goto out_schedule;
- }
+ /* status == SUCCESS means all fields in wc are trustworthy */
if (wc->opcode != IB_WC_RECV)
return;
+ dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
+ __func__, rep, wc->byte_len);
+
rep->rr_len = wc->byte_len;
ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
- rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE);
-
- if (rep->rr_len >= 16) {
- struct rpcrdma_msg *p = (struct rpcrdma_msg *)rep->rr_base;
- unsigned int credits = ntohl(p->rm_credit);
-
- if (credits == 0)
- credits = 1; /* don't deadlock */
- else if (credits > rep->rr_buffer->rb_max_requests)
- credits = rep->rr_buffer->rb_max_requests;
- atomic_set(&rep->rr_buffer->rb_credits, credits);
- }
+ rdmab_addr(rep->rr_rdmabuf),
+ rep->rr_len, DMA_FROM_DEVICE);
+ prefetch(rdmab_to_msg(rep->rr_rdmabuf));
out_schedule:
list_add_tail(&rep->rr_list, sched_list);
+ return;
+out_fail:
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ pr_err("RPC: %s: rep %p: %s\n",
+ __func__, rep, COMPLETION_MSG(wc->status));
+ rep->rr_len = ~0U;
+ goto out_schedule;
}
static int
@@ -390,8 +426,8 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr;
#endif
- struct ib_qp_attr attr;
- struct ib_qp_init_attr iattr;
+ struct ib_qp_attr *attr = &ia->ri_qp_attr;
+ struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
int connstate = 0;
switch (event->event) {
@@ -414,12 +450,13 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
break;
case RDMA_CM_EVENT_ESTABLISHED:
connstate = 1;
- ib_query_qp(ia->ri_id->qp, &attr,
- IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
- &iattr);
+ ib_query_qp(ia->ri_id->qp, attr,
+ IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
+ iattr);
dprintk("RPC: %s: %d responder resources"
" (%d initiator)\n",
- __func__, attr.max_dest_rd_atomic, attr.max_rd_atomic);
+ __func__, attr->max_dest_rd_atomic,
+ attr->max_rd_atomic);
goto connected;
case RDMA_CM_EVENT_CONNECT_ERROR:
connstate = -ENOTCONN;
@@ -436,11 +473,10 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_DEVICE_REMOVAL:
connstate = -ENODEV;
connected:
- atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1);
dprintk("RPC: %s: %sconnected\n",
__func__, connstate > 0 ? "" : "dis");
ep->rep_connected = connstate;
- ep->rep_func(ep);
+ rpcrdma_conn_func(ep);
wake_up_all(&ep->rep_connect_wait);
/*FALLTHROUGH*/
default:
@@ -453,7 +489,7 @@ connected:
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
if (connstate == 1) {
- int ird = attr.max_dest_rd_atomic;
+ int ird = attr->max_dest_rd_atomic;
int tird = ep->rep_remote_cma.responder_resources;
printk(KERN_INFO "rpcrdma: connection to %pI4:%u "
"on %s, memreg %d slots %d ird %d%s\n",
@@ -554,8 +590,8 @@ int
rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
{
int rc, mem_priv;
- struct ib_device_attr devattr;
struct rpcrdma_ia *ia = &xprt->rx_ia;
+ struct ib_device_attr *devattr = &ia->ri_devattr;
ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
if (IS_ERR(ia->ri_id)) {
@@ -571,26 +607,21 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
goto out2;
}
- /*
- * Query the device to determine if the requested memory
- * registration strategy is supported. If it isn't, set the
- * strategy to a globally supported model.
- */
- rc = ib_query_device(ia->ri_id->device, &devattr);
+ rc = ib_query_device(ia->ri_id->device, devattr);
if (rc) {
dprintk("RPC: %s: ib_query_device failed %d\n",
__func__, rc);
- goto out2;
+ goto out3;
}
- if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
+ if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
ia->ri_have_dma_lkey = 1;
ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
}
if (memreg == RPCRDMA_FRMR) {
/* Requires both frmr reg and local dma lkey */
- if ((devattr.device_cap_flags &
+ if ((devattr->device_cap_flags &
(IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
(IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) {
dprintk("RPC: %s: FRMR registration "
@@ -600,7 +631,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
/* Mind the ia limit on FRMR page list depth */
ia->ri_max_frmr_depth = min_t(unsigned int,
RPCRDMA_MAX_DATA_SEGS,
- devattr.max_fast_reg_page_list_len);
+ devattr->max_fast_reg_page_list_len);
}
}
if (memreg == RPCRDMA_MTHCAFMR) {
@@ -638,14 +669,14 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
"phys register failed with %lX\n",
__func__, PTR_ERR(ia->ri_bind_mem));
rc = -ENOMEM;
- goto out2;
+ goto out3;
}
break;
default:
printk(KERN_ERR "RPC: Unsupported memory "
"registration mode: %d\n", memreg);
rc = -ENOMEM;
- goto out2;
+ goto out3;
}
dprintk("RPC: %s: memory registration strategy is %d\n",
__func__, memreg);
@@ -655,6 +686,10 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
rwlock_init(&ia->ri_qplock);
return 0;
+
+out3:
+ ib_dealloc_pd(ia->ri_pd);
+ ia->ri_pd = NULL;
out2:
rdma_destroy_id(ia->ri_id);
ia->ri_id = NULL;
@@ -698,20 +733,13 @@ int
rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
struct rpcrdma_create_data_internal *cdata)
{
- struct ib_device_attr devattr;
+ struct ib_device_attr *devattr = &ia->ri_devattr;
struct ib_cq *sendcq, *recvcq;
int rc, err;
- rc = ib_query_device(ia->ri_id->device, &devattr);
- if (rc) {
- dprintk("RPC: %s: ib_query_device failed %d\n",
- __func__, rc);
- return rc;
- }
-
/* check provider's send/recv wr limits */
- if (cdata->max_requests > devattr.max_qp_wr)
- cdata->max_requests = devattr.max_qp_wr;
+ if (cdata->max_requests > devattr->max_qp_wr)
+ cdata->max_requests = devattr->max_qp_wr;
ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
ep->rep_attr.qp_context = ep;
@@ -746,8 +774,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
}
ep->rep_attr.cap.max_send_wr *= depth;
- if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) {
- cdata->max_requests = devattr.max_qp_wr / depth;
+ if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
+ cdata->max_requests = devattr->max_qp_wr / depth;
if (!cdata->max_requests)
return -EINVAL;
ep->rep_attr.cap.max_send_wr = cdata->max_requests *
@@ -766,6 +794,14 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
ep->rep_attr.qp_type = IB_QPT_RC;
ep->rep_attr.port_num = ~0;
+ if (cdata->padding) {
+ ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding,
+ GFP_KERNEL);
+ if (IS_ERR(ep->rep_padbuf))
+ return PTR_ERR(ep->rep_padbuf);
+ } else
+ ep->rep_padbuf = NULL;
+
dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
"iovs: send %d recv %d\n",
__func__,
@@ -781,7 +817,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
else if (ep->rep_cqinit <= 2)
ep->rep_cqinit = 0;
INIT_CQCOUNT(ep);
- ep->rep_ia = ia;
init_waitqueue_head(&ep->rep_connect_wait);
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
@@ -831,10 +866,11 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
/* Client offers RDMA Read but does not initiate */
ep->rep_remote_cma.initiator_depth = 0;
- if (devattr.max_qp_rd_atom > 32) /* arbitrary but <= 255 */
+ if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */
ep->rep_remote_cma.responder_resources = 32;
else
- ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom;
+ ep->rep_remote_cma.responder_resources =
+ devattr->max_qp_rd_atom;
ep->rep_remote_cma.retry_count = 7;
ep->rep_remote_cma.flow_control = 0;
@@ -848,6 +884,7 @@ out2:
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
__func__, err);
out1:
+ rpcrdma_free_regbuf(ia, ep->rep_padbuf);
return rc;
}
@@ -874,11 +911,7 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
ia->ri_id->qp = NULL;
}
- /* padding - could be done in rpcrdma_buffer_destroy... */
- if (ep->rep_pad_mr) {
- rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad);
- ep->rep_pad_mr = NULL;
- }
+ rpcrdma_free_regbuf(ia, ep->rep_padbuf);
rpcrdma_clean_cq(ep->rep_attr.recv_cq);
rc = ib_destroy_cq(ep->rep_attr.recv_cq);
@@ -1048,6 +1081,48 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
}
}
+static struct rpcrdma_req *
+rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_req *req;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ req->rl_buffer = &r_xprt->rx_buf;
+ return req;
+}
+
+static struct rpcrdma_rep *
+rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rpcrdma_rep *rep;
+ int rc;
+
+ rc = -ENOMEM;
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (rep == NULL)
+ goto out;
+
+ rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
+ GFP_KERNEL);
+ if (IS_ERR(rep->rr_rdmabuf)) {
+ rc = PTR_ERR(rep->rr_rdmabuf);
+ goto out_free;
+ }
+
+ rep->rr_buffer = &r_xprt->rx_buf;
+ return rep;
+
+out_free:
+ kfree(rep);
+out:
+ return ERR_PTR(rc);
+}
+
static int
rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
{
@@ -1134,27 +1209,26 @@ out_free:
}
int
-rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
- struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata)
+rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
char *p;
- size_t len, rlen, wlen;
+ size_t len;
int i, rc;
buf->rb_max_requests = cdata->max_requests;
spin_lock_init(&buf->rb_lock);
- atomic_set(&buf->rb_credits, 1);
/* Need to allocate:
* 1. arrays for send and recv pointers
* 2. arrays of struct rpcrdma_req to fill in pointers
* 3. array of struct rpcrdma_rep for replies
- * 4. padding, if any
* Send/recv buffers in req/rep need to be registered
*/
len = buf->rb_max_requests *
(sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
- len += cdata->padding;
p = kzalloc(len, GFP_KERNEL);
if (p == NULL) {
@@ -1170,17 +1244,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
- /*
- * Register the zeroed pad buffer, if any.
- */
- if (cdata->padding) {
- rc = rpcrdma_register_internal(ia, p, cdata->padding,
- &ep->rep_pad_mr, &ep->rep_pad);
- if (rc)
- goto out;
- }
- p += cdata->padding;
-
INIT_LIST_HEAD(&buf->rb_mws);
INIT_LIST_HEAD(&buf->rb_all);
switch (ia->ri_memreg_strategy) {
@@ -1198,62 +1261,29 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
break;
}
- /*
- * Allocate/init the request/reply buffers. Doing this
- * using kmalloc for now -- one for each buf.
- */
- wlen = 1 << fls(cdata->inline_wsize + sizeof(struct rpcrdma_req));
- rlen = 1 << fls(cdata->inline_rsize + sizeof(struct rpcrdma_rep));
- dprintk("RPC: %s: wlen = %zu, rlen = %zu\n",
- __func__, wlen, rlen);
-
for (i = 0; i < buf->rb_max_requests; i++) {
struct rpcrdma_req *req;
struct rpcrdma_rep *rep;
- req = kmalloc(wlen, GFP_KERNEL);
- if (req == NULL) {
+ req = rpcrdma_create_req(r_xprt);
+ if (IS_ERR(req)) {
dprintk("RPC: %s: request buffer %d alloc"
" failed\n", __func__, i);
- rc = -ENOMEM;
+ rc = PTR_ERR(req);
goto out;
}
- memset(req, 0, sizeof(struct rpcrdma_req));
buf->rb_send_bufs[i] = req;
- buf->rb_send_bufs[i]->rl_buffer = buf;
- rc = rpcrdma_register_internal(ia, req->rl_base,
- wlen - offsetof(struct rpcrdma_req, rl_base),
- &buf->rb_send_bufs[i]->rl_handle,
- &buf->rb_send_bufs[i]->rl_iov);
- if (rc)
- goto out;
-
- buf->rb_send_bufs[i]->rl_size = wlen -
- sizeof(struct rpcrdma_req);
-
- rep = kmalloc(rlen, GFP_KERNEL);
- if (rep == NULL) {
+ rep = rpcrdma_create_rep(r_xprt);
+ if (IS_ERR(rep)) {
dprintk("RPC: %s: reply buffer %d alloc failed\n",
__func__, i);
- rc = -ENOMEM;
+ rc = PTR_ERR(rep);
goto out;
}
- memset(rep, 0, sizeof(struct rpcrdma_rep));
buf->rb_recv_bufs[i] = rep;
- buf->rb_recv_bufs[i]->rr_buffer = buf;
-
- rc = rpcrdma_register_internal(ia, rep->rr_base,
- rlen - offsetof(struct rpcrdma_rep, rr_base),
- &buf->rb_recv_bufs[i]->rr_handle,
- &buf->rb_recv_bufs[i]->rr_iov);
- if (rc)
- goto out;
-
}
- dprintk("RPC: %s: max_requests %d\n",
- __func__, buf->rb_max_requests);
- /* done */
+
return 0;
out:
rpcrdma_buffer_destroy(buf);
@@ -1261,6 +1291,27 @@ out:
}
static void
+rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
+{
+ if (!rep)
+ return;
+
+ rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
+ kfree(rep);
+}
+
+static void
+rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
+{
+ if (!req)
+ return;
+
+ rpcrdma_free_regbuf(ia, req->rl_sendbuf);
+ rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
+ kfree(req);
+}
+
+static void
rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf)
{
struct rpcrdma_mw *r;
@@ -1315,18 +1366,10 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
dprintk("RPC: %s: entering\n", __func__);
for (i = 0; i < buf->rb_max_requests; i++) {
- if (buf->rb_recv_bufs && buf->rb_recv_bufs[i]) {
- rpcrdma_deregister_internal(ia,
- buf->rb_recv_bufs[i]->rr_handle,
- &buf->rb_recv_bufs[i]->rr_iov);
- kfree(buf->rb_recv_bufs[i]);
- }
- if (buf->rb_send_bufs && buf->rb_send_bufs[i]) {
- rpcrdma_deregister_internal(ia,
- buf->rb_send_bufs[i]->rl_handle,
- &buf->rb_send_bufs[i]->rl_iov);
- kfree(buf->rb_send_bufs[i]);
- }
+ if (buf->rb_recv_bufs)
+ rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
+ if (buf->rb_send_bufs)
+ rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
}
switch (ia->ri_memreg_strategy) {
@@ -1450,8 +1493,8 @@ rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
int i;
for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
- rpcrdma_buffer_put_mr(&seg->mr_chunk.rl_mw, buf);
- rpcrdma_buffer_put_mr(&seg1->mr_chunk.rl_mw, buf);
+ rpcrdma_buffer_put_mr(&seg->rl_mw, buf);
+ rpcrdma_buffer_put_mr(&seg1->rl_mw, buf);
}
static void
@@ -1537,7 +1580,7 @@ rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf,
list_add(&r->mw_list, stale);
continue;
}
- req->rl_segments[i].mr_chunk.rl_mw = r;
+ req->rl_segments[i].rl_mw = r;
if (unlikely(i-- == 0))
return req; /* Success */
}
@@ -1559,7 +1602,7 @@ rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
r = list_entry(buf->rb_mws.next,
struct rpcrdma_mw, mw_list);
list_del(&r->mw_list);
- req->rl_segments[i].mr_chunk.rl_mw = r;
+ req->rl_segments[i].rl_mw = r;
if (unlikely(i-- == 0))
return req; /* Success */
}
@@ -1658,8 +1701,6 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
struct rpcrdma_buffer *buffers = req->rl_buffer;
unsigned long flags;
- if (req->rl_iov.length == 0) /* special case xprt_rdma_allocate() */
- buffers = ((struct rpcrdma_req *) buffers)->rl_buffer;
spin_lock_irqsave(&buffers->rb_lock, flags);
if (buffers->rb_recv_index < buffers->rb_max_requests) {
req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
@@ -1688,7 +1729,7 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
* Wrappers for internal-use kmalloc memory registration, used by buffer code.
*/
-int
+static int
rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
struct ib_mr **mrp, struct ib_sge *iov)
{
@@ -1739,7 +1780,7 @@ rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
return rc;
}
-int
+static int
rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
struct ib_mr *mr, struct ib_sge *iov)
{
@@ -1757,6 +1798,61 @@ rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
return rc;
}
+/**
+ * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
+ * @ia: controlling rpcrdma_ia
+ * @size: size of buffer to be allocated, in bytes
+ * @flags: GFP flags
+ *
+ * Returns pointer to private header of an area of internally
+ * registered memory, or an ERR_PTR. The registered buffer follows
+ * the end of the private header.
+ *
+ * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
+ * receiving the payload of RDMA RECV operations. regbufs are not
+ * used for RDMA READ/WRITE operations, thus are registered only for
+ * LOCAL access.
+ */
+struct rpcrdma_regbuf *
+rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
+{
+ struct rpcrdma_regbuf *rb;
+ int rc;
+
+ rc = -ENOMEM;
+ rb = kmalloc(sizeof(*rb) + size, flags);
+ if (rb == NULL)
+ goto out;
+
+ rb->rg_size = size;
+ rb->rg_owner = NULL;
+ rc = rpcrdma_register_internal(ia, rb->rg_base, size,
+ &rb->rg_mr, &rb->rg_iov);
+ if (rc)
+ goto out_free;
+
+ return rb;
+
+out_free:
+ kfree(rb);
+out:
+ return ERR_PTR(rc);
+}
+
+/**
+ * rpcrdma_free_regbuf - deregister and free registered buffer
+ * @ia: controlling rpcrdma_ia
+ * @rb: regbuf to be deregistered and freed
+ */
+void
+rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
+{
+ if (rb) {
+ rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov);
+ kfree(rb);
+ }
+}
+
/*
* Wrappers for chunk registration, shared by read/write chunk code.
*/
@@ -1799,7 +1895,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_mr_seg *seg1 = seg;
- struct rpcrdma_mw *mw = seg1->mr_chunk.rl_mw;
+ struct rpcrdma_mw *mw = seg1->rl_mw;
struct rpcrdma_frmr *frmr = &mw->r.frmr;
struct ib_mr *mr = frmr->fr_mr;
struct ib_send_wr fastreg_wr, *bad_wr;
@@ -1888,12 +1984,12 @@ rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
struct ib_send_wr invalidate_wr, *bad_wr;
int rc;
- seg1->mr_chunk.rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
+ seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
memset(&invalidate_wr, 0, sizeof invalidate_wr);
- invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
+ invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
invalidate_wr.opcode = IB_WR_LOCAL_INV;
- invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
+ invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
DECR_CQCOUNT(&r_xprt->rx_ep);
read_lock(&ia->ri_qplock);
@@ -1903,7 +1999,7 @@ rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
read_unlock(&ia->ri_qplock);
if (rc) {
/* Force rpcrdma_buffer_get() to retry */
- seg1->mr_chunk.rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
+ seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
dprintk("RPC: %s: failed ib_post_send for invalidate,"
" status %i\n", __func__, rc);
}
@@ -1935,8 +2031,7 @@ rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- rc = ib_map_phys_fmr(seg1->mr_chunk.rl_mw->r.fmr,
- physaddrs, i, seg1->mr_dma);
+ rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma);
if (rc) {
dprintk("RPC: %s: failed ib_map_phys_fmr "
"%u@0x%llx+%i (%d)... status %i\n", __func__,
@@ -1945,7 +2040,7 @@ rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg,
while (i--)
rpcrdma_unmap_one(ia, --seg);
} else {
- seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.fmr->rkey;
+ seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey;
seg1->mr_base = seg1->mr_dma + pageoff;
seg1->mr_nsegs = i;
seg1->mr_len = len;
@@ -1962,7 +2057,7 @@ rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
LIST_HEAD(l);
int rc;
- list_add(&seg1->mr_chunk.rl_mw->r.fmr->list, &l);
+ list_add(&seg1->rl_mw->r.fmr->list, &l);
rc = ib_unmap_fmr(&l);
read_lock(&ia->ri_qplock);
while (seg1->mr_nsegs--)
@@ -2104,11 +2199,13 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
recv_wr.next = NULL;
recv_wr.wr_id = (u64) (unsigned long) rep;
- recv_wr.sg_list = &rep->rr_iov;
+ recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
recv_wr.num_sge = 1;
ib_dma_sync_single_for_cpu(ia->ri_id->device,
- rep->rr_iov.addr, rep->rr_iov.length, DMA_BIDIRECTIONAL);
+ rdmab_addr(rep->rr_rdmabuf),
+ rdmab_length(rep->rr_rdmabuf),
+ DMA_BIDIRECTIONAL);
rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index b799041b75bf..d1b70397c60f 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -70,6 +70,9 @@ struct rpcrdma_ia {
int ri_async_rc;
enum rpcrdma_memreg ri_memreg_strategy;
unsigned int ri_max_frmr_depth;
+ struct ib_device_attr ri_devattr;
+ struct ib_qp_attr ri_qp_attr;
+ struct ib_qp_init_attr ri_qp_init_attr;
};
/*
@@ -83,13 +86,9 @@ struct rpcrdma_ep {
atomic_t rep_cqcount;
int rep_cqinit;
int rep_connected;
- struct rpcrdma_ia *rep_ia;
struct ib_qp_init_attr rep_attr;
wait_queue_head_t rep_connect_wait;
- struct ib_sge rep_pad; /* holds zeroed pad */
- struct ib_mr *rep_pad_mr; /* holds zeroed pad */
- void (*rep_func)(struct rpcrdma_ep *);
- struct rpc_xprt *rep_xprt; /* for rep_func */
+ struct rpcrdma_regbuf *rep_padbuf;
struct rdma_conn_param rep_remote_cma;
struct sockaddr_storage rep_remote_addr;
struct delayed_work rep_connect_worker;
@@ -106,6 +105,44 @@ struct rpcrdma_ep {
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
+/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
+ *
+ * The below structure appears at the front of a large region of kmalloc'd
+ * memory, which always starts on a good alignment boundary.
+ */
+
+struct rpcrdma_regbuf {
+ size_t rg_size;
+ struct rpcrdma_req *rg_owner;
+ struct ib_mr *rg_mr;
+ struct ib_sge rg_iov;
+ __be32 rg_base[0] __attribute__ ((aligned(256)));
+};
+
+static inline u64
+rdmab_addr(struct rpcrdma_regbuf *rb)
+{
+ return rb->rg_iov.addr;
+}
+
+static inline u32
+rdmab_length(struct rpcrdma_regbuf *rb)
+{
+ return rb->rg_iov.length;
+}
+
+static inline u32
+rdmab_lkey(struct rpcrdma_regbuf *rb)
+{
+ return rb->rg_iov.lkey;
+}
+
+static inline struct rpcrdma_msg *
+rdmab_to_msg(struct rpcrdma_regbuf *rb)
+{
+ return (struct rpcrdma_msg *)rb->rg_base;
+}
+
enum rpcrdma_chunktype {
rpcrdma_noch = 0,
rpcrdma_readch,
@@ -134,22 +171,16 @@ enum rpcrdma_chunktype {
/* temporary static scatter/gather max */
#define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */
#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
-#define MAX_RPCRDMAHDR (\
- /* max supported RPC/RDMA header */ \
- sizeof(struct rpcrdma_msg) + (2 * sizeof(u32)) + \
- (sizeof(struct rpcrdma_read_chunk) * RPCRDMA_MAX_SEGS) + sizeof(u32))
struct rpcrdma_buffer;
struct rpcrdma_rep {
- unsigned int rr_len; /* actual received reply length */
- struct rpcrdma_buffer *rr_buffer; /* home base for this structure */
- struct rpc_xprt *rr_xprt; /* needed for request/reply matching */
- void (*rr_func)(struct rpcrdma_rep *);/* called by tasklet in softint */
- struct list_head rr_list; /* tasklet list */
- struct ib_sge rr_iov; /* for posting */
- struct ib_mr *rr_handle; /* handle for mem in rr_iov */
- char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */
+ unsigned int rr_len;
+ struct rpcrdma_buffer *rr_buffer;
+ struct rpc_xprt *rr_xprt;
+ void (*rr_func)(struct rpcrdma_rep *);
+ struct list_head rr_list;
+ struct rpcrdma_regbuf *rr_rdmabuf;
};
/*
@@ -211,10 +242,7 @@ struct rpcrdma_mw {
*/
struct rpcrdma_mr_seg { /* chunk descriptors */
- union { /* chunk memory handles */
- struct ib_mr *rl_mr; /* if registered directly */
- struct rpcrdma_mw *rl_mw; /* if registered from region */
- } mr_chunk;
+ struct rpcrdma_mw *rl_mw; /* registered MR */
u64 mr_base; /* registration result */
u32 mr_rkey; /* registration result */
u32 mr_len; /* length of chunk or segment */
@@ -227,22 +255,27 @@ struct rpcrdma_mr_seg { /* chunk descriptors */
};
struct rpcrdma_req {
- size_t rl_size; /* actual length of buffer */
unsigned int rl_niovs; /* 0, 2 or 4 */
unsigned int rl_nchunks; /* non-zero if chunks */
unsigned int rl_connect_cookie; /* retry detection */
enum rpcrdma_chunktype rl_rtype, rl_wtype;
struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
- struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
struct ib_sge rl_send_iov[4]; /* for active requests */
- struct ib_sge rl_iov; /* for posting */
- struct ib_mr *rl_handle; /* handle for mem in rl_iov */
- char rl_base[MAX_RPCRDMAHDR]; /* start of actual buffer */
- __u32 rl_xdr_buf[0]; /* start of returned rpc rq_buffer */
+ struct rpcrdma_regbuf *rl_rdmabuf;
+ struct rpcrdma_regbuf *rl_sendbuf;
+ struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
};
-#define rpcr_to_rdmar(r) \
- container_of((r)->rq_buffer, struct rpcrdma_req, rl_xdr_buf[0])
+
+static inline struct rpcrdma_req *
+rpcr_to_rdmar(struct rpc_rqst *rqst)
+{
+ void *buffer = rqst->rq_buffer;
+ struct rpcrdma_regbuf *rb;
+
+ rb = container_of(buffer, struct rpcrdma_regbuf, rg_base);
+ return rb->rg_owner;
+}
/*
* struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
@@ -252,7 +285,6 @@ struct rpcrdma_req {
*/
struct rpcrdma_buffer {
spinlock_t rb_lock; /* protects indexes */
- atomic_t rb_credits; /* most recent server credits */
int rb_max_requests;/* client max requests */
struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */
struct list_head rb_all;
@@ -318,16 +350,16 @@ struct rpcrdma_stats {
* during unmount.
*/
struct rpcrdma_xprt {
- struct rpc_xprt xprt;
+ struct rpc_xprt rx_xprt;
struct rpcrdma_ia rx_ia;
struct rpcrdma_ep rx_ep;
struct rpcrdma_buffer rx_buf;
struct rpcrdma_create_data_internal rx_data;
- struct delayed_work rdma_connect;
+ struct delayed_work rx_connect_worker;
struct rpcrdma_stats rx_stats;
};
-#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, xprt)
+#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
/* Setting this to 0 ensures interoperability with early servers.
@@ -358,9 +390,7 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
/*
* Buffer calls - xprtrdma/verbs.c
*/
-int rpcrdma_buffer_create(struct rpcrdma_buffer *, struct rpcrdma_ep *,
- struct rpcrdma_ia *,
- struct rpcrdma_create_data_internal *);
+int rpcrdma_buffer_create(struct rpcrdma_xprt *);
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
@@ -368,16 +398,16 @@ void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
-int rpcrdma_register_internal(struct rpcrdma_ia *, void *, int,
- struct ib_mr **, struct ib_sge *);
-int rpcrdma_deregister_internal(struct rpcrdma_ia *,
- struct ib_mr *, struct ib_sge *);
-
int rpcrdma_register_external(struct rpcrdma_mr_seg *,
int, int, struct rpcrdma_xprt *);
int rpcrdma_deregister_external(struct rpcrdma_mr_seg *,
struct rpcrdma_xprt *);
+struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
+ size_t, gfp_t);
+void rpcrdma_free_regbuf(struct rpcrdma_ia *,
+ struct rpcrdma_regbuf *);
+
/*
* RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
*/
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 87ce7e8bb8dc..66891e32c5e3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -63,6 +63,8 @@ static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+
#define XS_TCP_LINGER_TO (15U * HZ)
static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
@@ -75,8 +77,6 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
* someone else's file names!
*/
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-
static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
@@ -627,7 +627,7 @@ process_status:
* @xprt: transport
*
* Initiates a graceful shutdown of the TCP socket by calling the
- * equivalent of shutdown(SHUT_WR);
+ * equivalent of shutdown(SHUT_RDWR);
*/
static void xs_tcp_shutdown(struct rpc_xprt *xprt)
{
@@ -635,7 +635,7 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
struct socket *sock = transport->sock;
if (sock != NULL) {
- kernel_sock_shutdown(sock, SHUT_WR);
+ kernel_sock_shutdown(sock, SHUT_RDWR);
trace_rpc_socket_shutdown(xprt, sock);
}
}
@@ -718,9 +718,9 @@ static int xs_tcp_send_request(struct rpc_task *task)
dprintk("RPC: sendmsg returned unrecognized error %d\n",
-status);
case -ECONNRESET:
- xs_tcp_shutdown(xprt);
case -ECONNREFUSED:
case -ENOTCONN:
+ case -EADDRINUSE:
case -EPIPE:
clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
}
@@ -773,6 +773,21 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
sk->sk_error_report = transport->old_error_report;
}
+static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
+{
+ smp_mb__before_atomic();
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ clear_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__after_atomic();
+}
+
+static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+{
+ xs_sock_reset_connection_flags(xprt);
+ /* Mark transport as closed and wake up all pending tasks */
+ xprt_disconnect_done(xprt);
+}
+
/**
* xs_error_report - callback to handle TCP socket state errors
* @sk: socket
@@ -792,11 +807,12 @@ static void xs_error_report(struct sock *sk)
err = -sk->sk_err;
if (err == 0)
goto out;
+ /* Is this a reset event? */
+ if (sk->sk_state == TCP_CLOSE)
+ xs_sock_mark_closed(xprt);
dprintk("RPC: xs_error_report client %p, error=%d...\n",
xprt, -err);
trace_rpc_socket_error(xprt, sk->sk_socket, err);
- if (test_bit(XPRT_CONNECTION_REUSE, &xprt->state))
- goto out;
xprt_wake_pending_tasks(xprt, err);
out:
read_unlock_bh(&sk->sk_callback_lock);
@@ -806,12 +822,11 @@ static void xs_reset_transport(struct sock_xprt *transport)
{
struct socket *sock = transport->sock;
struct sock *sk = transport->inet;
+ struct rpc_xprt *xprt = &transport->xprt;
if (sk == NULL)
return;
- transport->srcport = 0;
-
write_lock_bh(&sk->sk_callback_lock);
transport->inet = NULL;
transport->sock = NULL;
@@ -820,8 +835,9 @@ static void xs_reset_transport(struct sock_xprt *transport)
xs_restore_old_callbacks(transport, sk);
write_unlock_bh(&sk->sk_callback_lock);
+ xs_sock_reset_connection_flags(xprt);
- trace_rpc_socket_close(&transport->xprt, sock);
+ trace_rpc_socket_close(xprt, sock);
sock_release(sock);
}
@@ -841,27 +857,12 @@ static void xs_close(struct rpc_xprt *xprt)
dprintk("RPC: xs_close xprt %p\n", xprt);
- cancel_delayed_work_sync(&transport->connect_worker);
-
xs_reset_transport(transport);
xprt->reestablish_timeout = 0;
- smp_mb__before_atomic();
- clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
- clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
- clear_bit(XPRT_CLOSING, &xprt->state);
- smp_mb__after_atomic();
xprt_disconnect_done(xprt);
}
-static void xs_tcp_close(struct rpc_xprt *xprt)
-{
- if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
- xs_close(xprt);
- else
- xs_tcp_shutdown(xprt);
-}
-
static void xs_xprt_free(struct rpc_xprt *xprt)
{
xs_free_peer_addresses(xprt);
@@ -1032,7 +1033,6 @@ static void xs_udp_data_ready(struct sock *sk)
*/
static void xs_tcp_force_close(struct rpc_xprt *xprt)
{
- set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
xprt_force_disconnect(xprt);
}
@@ -1425,54 +1425,6 @@ out:
read_unlock_bh(&sk->sk_callback_lock);
}
-/*
- * Do the equivalent of linger/linger2 handling for dealing with
- * broken servers that don't close the socket in a timely
- * fashion
- */
-static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt,
- unsigned long timeout)
-{
- struct sock_xprt *transport;
-
- if (xprt_test_and_set_connecting(xprt))
- return;
- set_bit(XPRT_CONNECTION_ABORT, &xprt->state);
- transport = container_of(xprt, struct sock_xprt, xprt);
- queue_delayed_work(rpciod_workqueue, &transport->connect_worker,
- timeout);
-}
-
-static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
-{
- struct sock_xprt *transport;
-
- transport = container_of(xprt, struct sock_xprt, xprt);
-
- if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) ||
- !cancel_delayed_work(&transport->connect_worker))
- return;
- clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
- xprt_clear_connecting(xprt);
-}
-
-static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
-{
- smp_mb__before_atomic();
- clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
- clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
- clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
- clear_bit(XPRT_CLOSING, &xprt->state);
- smp_mb__after_atomic();
-}
-
-static void xs_sock_mark_closed(struct rpc_xprt *xprt)
-{
- xs_sock_reset_connection_flags(xprt);
- /* Mark transport as closed and wake up all pending tasks */
- xprt_disconnect_done(xprt);
-}
-
/**
* xs_tcp_state_change - callback to handle TCP socket state changes
* @sk: socket whose state has changed
@@ -1521,7 +1473,6 @@ static void xs_tcp_state_change(struct sock *sk)
clear_bit(XPRT_CONNECTED, &xprt->state);
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
smp_mb__after_atomic();
- xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
break;
case TCP_CLOSE_WAIT:
/* The server initiated a shutdown of the socket */
@@ -1538,13 +1489,11 @@ static void xs_tcp_state_change(struct sock *sk)
break;
case TCP_LAST_ACK:
set_bit(XPRT_CLOSING, &xprt->state);
- xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
smp_mb__before_atomic();
clear_bit(XPRT_CONNECTED, &xprt->state);
smp_mb__after_atomic();
break;
case TCP_CLOSE:
- xs_tcp_cancel_linger_timeout(xprt);
xs_sock_mark_closed(xprt);
}
out:
@@ -1667,6 +1616,40 @@ static unsigned short xs_get_random_port(void)
}
/**
+ * xs_set_reuseaddr_port - set the socket's port and address reuse options
+ * @sock: socket
+ *
+ * Note that this function has to be called on all sockets that share the
+ * same port, and it must be called before binding.
+ */
+static void xs_sock_set_reuseport(struct socket *sock)
+{
+ int opt = 1;
+
+ kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
+ (char *)&opt, sizeof(opt));
+}
+
+static unsigned short xs_sock_getport(struct socket *sock)
+{
+ struct sockaddr_storage buf;
+ int buflen;
+ unsigned short port = 0;
+
+ if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0)
+ goto out;
+ switch (buf.ss_family) {
+ case AF_INET6:
+ port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
+ break;
+ case AF_INET:
+ port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
+ }
+out:
+ return port;
+}
+
+/**
* xs_set_port - reset the port number in the remote endpoint address
* @xprt: generic transport
* @port: new port number
@@ -1680,6 +1663,12 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
xs_update_peer_port(xprt);
}
+static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
+{
+ if (transport->srcport == 0)
+ transport->srcport = xs_sock_getport(sock);
+}
+
static unsigned short xs_get_srcport(struct sock_xprt *transport)
{
unsigned short port = transport->srcport;
@@ -1833,7 +1822,8 @@ static void xs_dummy_setup_socket(struct work_struct *work)
}
static struct socket *xs_create_sock(struct rpc_xprt *xprt,
- struct sock_xprt *transport, int family, int type, int protocol)
+ struct sock_xprt *transport, int family, int type,
+ int protocol, bool reuseport)
{
struct socket *sock;
int err;
@@ -1846,6 +1836,9 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
}
xs_reclassify_socket(family, sock);
+ if (reuseport)
+ xs_sock_set_reuseport(sock);
+
err = xs_bind(transport, sock);
if (err) {
sock_release(sock);
@@ -1903,7 +1896,6 @@ static int xs_local_setup_socket(struct sock_xprt *transport)
struct socket *sock;
int status = -EIO;
- clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
status = __sock_create(xprt->xprt_net, AF_LOCAL,
SOCK_STREAM, 0, &sock, 1);
if (status < 0) {
@@ -2044,10 +2036,9 @@ static void xs_udp_setup_socket(struct work_struct *work)
struct socket *sock = transport->sock;
int status = -EIO;
- /* Start by resetting any existing state */
- xs_reset_transport(transport);
sock = xs_create_sock(xprt, transport,
- xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP);
+ xs_addr(xprt)->sa_family, SOCK_DGRAM,
+ IPPROTO_UDP, false);
if (IS_ERR(sock))
goto out;
@@ -2061,61 +2052,11 @@ static void xs_udp_setup_socket(struct work_struct *work)
trace_rpc_socket_connect(xprt, sock, 0);
status = 0;
out:
+ xprt_unlock_connect(xprt, transport);
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
}
-/*
- * We need to preserve the port number so the reply cache on the server can
- * find our cached RPC replies when we get around to reconnecting.
- */
-static void xs_abort_connection(struct sock_xprt *transport)
-{
- int result;
- struct sockaddr any;
-
- dprintk("RPC: disconnecting xprt %p to reuse port\n", transport);
-
- /*
- * Disconnect the transport socket by doing a connect operation
- * with AF_UNSPEC. This should return immediately...
- */
- memset(&any, 0, sizeof(any));
- any.sa_family = AF_UNSPEC;
- result = kernel_connect(transport->sock, &any, sizeof(any), 0);
- trace_rpc_socket_reset_connection(&transport->xprt,
- transport->sock, result);
- if (!result)
- xs_sock_reset_connection_flags(&transport->xprt);
- dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
-}
-
-static void xs_tcp_reuse_connection(struct sock_xprt *transport)
-{
- unsigned int state = transport->inet->sk_state;
-
- if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
- /* we don't need to abort the connection if the socket
- * hasn't undergone a shutdown
- */
- if (transport->inet->sk_shutdown == 0)
- return;
- dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n",
- __func__, transport->inet->sk_shutdown);
- }
- if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
- /* we don't need to abort the connection if the socket
- * hasn't undergone a shutdown
- */
- if (transport->inet->sk_shutdown == 0)
- return;
- dprintk("RPC: %s: ESTABLISHED/SYN_SENT "
- "sk_shutdown set to %d\n",
- __func__, transport->inet->sk_shutdown);
- }
- xs_abort_connection(transport);
-}
-
static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2149,9 +2090,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_allocation = GFP_ATOMIC;
/* socket options */
- sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
sock_reset_flag(sk, SOCK_LINGER);
- tcp_sk(sk)->linger2 = 0;
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
xprt_clear_connected(xprt);
@@ -2174,6 +2113,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
switch (ret) {
case 0:
+ xs_set_srcport(transport, sock);
case -EINPROGRESS:
/* SYN_SENT! */
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
@@ -2200,25 +2140,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
int status = -EIO;
if (!sock) {
- clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
sock = xs_create_sock(xprt, transport,
- xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP);
+ xs_addr(xprt)->sa_family, SOCK_STREAM,
+ IPPROTO_TCP, true);
if (IS_ERR(sock)) {
status = PTR_ERR(sock);
goto out;
}
- } else {
- int abort_and_exit;
-
- abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
- &xprt->state);
- /* "close" the socket, preserving the local port */
- set_bit(XPRT_CONNECTION_REUSE, &xprt->state);
- xs_tcp_reuse_connection(transport);
- clear_bit(XPRT_CONNECTION_REUSE, &xprt->state);
-
- if (abort_and_exit)
- goto out_eagain;
}
dprintk("RPC: worker connecting xprt %p via %s to "
@@ -2245,6 +2173,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case 0:
case -EINPROGRESS:
case -EALREADY:
+ xprt_unlock_connect(xprt, transport);
xprt_clear_connecting(xprt);
return;
case -EINVAL:
@@ -2254,13 +2183,15 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -ECONNREFUSED:
case -ECONNRESET:
case -ENETUNREACH:
+ case -EADDRINUSE:
case -ENOBUFS:
/* retry with existing socket, after a delay */
+ xs_tcp_force_close(xprt);
goto out;
}
-out_eagain:
status = -EAGAIN;
out:
+ xprt_unlock_connect(xprt, transport);
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
}
@@ -2283,6 +2214,11 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
+
+ /* Start by resetting any existing state */
+ xs_reset_transport(transport);
+
if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
dprintk("RPC: xs_connect delayed xprt %p for %lu "
"seconds\n",
@@ -2559,7 +2495,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
.buf_free = rpc_free,
.send_request = xs_tcp_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
- .close = xs_tcp_close,
+ .close = xs_tcp_shutdown,
.destroy = xs_destroy,
.print_stats = xs_tcp_print_stats,
};
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index d162b21b14bd..8c1e558db118 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <net/switchdev.h>
@@ -50,3 +52,176 @@ int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
return ops->ndo_switch_port_stp_update(dev, state);
}
EXPORT_SYMBOL(netdev_switch_port_stp_update);
+
+static DEFINE_MUTEX(netdev_switch_mutex);
+static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
+
+/**
+ * register_netdev_switch_notifier - Register nofifier
+ * @nb: notifier_block
+ *
+ * Register switch device notifier. This should be used by code
+ * which needs to monitor events happening in particular device.
+ * Return values are same as for atomic_notifier_chain_register().
+ */
+int register_netdev_switch_notifier(struct notifier_block *nb)
+{
+ int err;
+
+ mutex_lock(&netdev_switch_mutex);
+ err = raw_notifier_chain_register(&netdev_switch_notif_chain, nb);
+ mutex_unlock(&netdev_switch_mutex);
+ return err;
+}
+EXPORT_SYMBOL(register_netdev_switch_notifier);
+
+/**
+ * unregister_netdev_switch_notifier - Unregister nofifier
+ * @nb: notifier_block
+ *
+ * Unregister switch device notifier.
+ * Return values are same as for atomic_notifier_chain_unregister().
+ */
+int unregister_netdev_switch_notifier(struct notifier_block *nb)
+{
+ int err;
+
+ mutex_lock(&netdev_switch_mutex);
+ err = raw_notifier_chain_unregister(&netdev_switch_notif_chain, nb);
+ mutex_unlock(&netdev_switch_mutex);
+ return err;
+}
+EXPORT_SYMBOL(unregister_netdev_switch_notifier);
+
+/**
+ * call_netdev_switch_notifiers - Call nofifiers
+ * @val: value passed unmodified to notifier function
+ * @dev: port device
+ * @info: notifier information data
+ *
+ * Call all network notifier blocks. This should be called by driver
+ * when it needs to propagate hardware event.
+ * Return values are same as for atomic_notifier_call_chain().
+ */
+int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
+ struct netdev_switch_notifier_info *info)
+{
+ int err;
+
+ info->dev = dev;
+ mutex_lock(&netdev_switch_mutex);
+ err = raw_notifier_call_chain(&netdev_switch_notif_chain, val, info);
+ mutex_unlock(&netdev_switch_mutex);
+ return err;
+}
+EXPORT_SYMBOL(call_netdev_switch_notifiers);
+
+/**
+ * netdev_switch_port_bridge_setlink - Notify switch device port of bridge
+ * port attributes
+ *
+ * @dev: port device
+ * @nlh: netlink msg with bridge port attributes
+ * @flags: bridge setlink flags
+ *
+ * Notify switch device port of bridge port attributes
+ */
+int netdev_switch_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+ return 0;
+
+ if (!ops->ndo_bridge_setlink)
+ return -EOPNOTSUPP;
+
+ return ops->ndo_bridge_setlink(dev, nlh, flags);
+}
+EXPORT_SYMBOL(netdev_switch_port_bridge_setlink);
+
+/**
+ * netdev_switch_port_bridge_dellink - Notify switch device port of bridge
+ * port attribute delete
+ *
+ * @dev: port device
+ * @nlh: netlink msg with bridge port attributes
+ * @flags: bridge setlink flags
+ *
+ * Notify switch device port of bridge port attribute delete
+ */
+int netdev_switch_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+ return 0;
+
+ if (!ops->ndo_bridge_dellink)
+ return -EOPNOTSUPP;
+
+ return ops->ndo_bridge_dellink(dev, nlh, flags);
+}
+EXPORT_SYMBOL(netdev_switch_port_bridge_dellink);
+
+/**
+ * ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink
+ * op for master devices
+ *
+ * @dev: port device
+ * @nlh: netlink msg with bridge port attributes
+ * @flags: bridge setlink flags
+ *
+ * Notify master device slaves of bridge port attributes
+ */
+int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int ret = 0, err = 0;
+
+ if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+ return ret;
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = netdev_switch_port_bridge_setlink(lower_dev, nlh, flags);
+ if (err && err != -EOPNOTSUPP)
+ ret = err;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_setlink);
+
+/**
+ * ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink
+ * op for master devices
+ *
+ * @dev: port device
+ * @nlh: netlink msg with bridge port attributes
+ * @flags: bridge dellink flags
+ *
+ * Notify master device slaves of bridge port attribute deletes
+ */
+int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int ret = 0, err = 0;
+
+ if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+ return ret;
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = netdev_switch_port_bridge_dellink(lower_dev, nlh, flags);
+ if (err && err != -EOPNOTSUPP)
+ ret = err;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_dellink);
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index c890848f9d56..91c8a8e031db 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -20,18 +20,6 @@ menuconfig TIPC
If in doubt, say N.
-config TIPC_PORTS
- int "Maximum number of ports in a node"
- depends on TIPC
- range 127 65535
- default "8191"
- help
- Specifies how many ports can be supported by a node.
- Can range from 127 to 65535 ports; default is 8191.
-
- Setting this to a smaller value saves some memory,
- setting it to higher allows for more ports.
-
config TIPC_MEDIA_IB
bool "InfiniBand media type support"
depends on TIPC && INFINIBAND_IPOIB
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 333e4592772c..599b1a540d2b 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -4,11 +4,11 @@
obj-$(CONFIG_TIPC) := tipc.o
-tipc-y += addr.o bcast.o bearer.o config.o \
+tipc-y += addr.o bcast.o bearer.o \
core.o link.o discover.o msg.o \
name_distr.o subscr.o name_table.o net.o \
- netlink.o node.o socket.o log.o eth_media.o \
- server.o
+ netlink.o netlink_compat.o node.o socket.o eth_media.o \
+ server.o socket.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 357b74b26f9e..48fd3b5a73fb 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -34,8 +34,51 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include "core.h"
+#include <linux/kernel.h>
#include "addr.h"
+#include "core.h"
+
+/**
+ * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
+ */
+int in_own_cluster(struct net *net, u32 addr)
+{
+ return in_own_cluster_exact(net, addr) || !addr;
+}
+
+int in_own_cluster_exact(struct net *net, u32 addr)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return !((addr ^ tn->own_addr) >> 12);
+}
+
+/**
+ * in_own_node - test for node inclusion; <0.0.0> always matches
+ */
+int in_own_node(struct net *net, u32 addr)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return (addr == tn->own_addr) || !addr;
+}
+
+/**
+ * addr_domain - convert 2-bit scope value to equivalent message lookup domain
+ *
+ * Needed when address of a named message must be looked up a second time
+ * after a network hop.
+ */
+u32 addr_domain(struct net *net, u32 sc)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ if (likely(sc == TIPC_NODE_SCOPE))
+ return tn->own_addr;
+ if (sc == TIPC_CLUSTER_SCOPE)
+ return tipc_cluster_mask(tn->own_addr);
+ return tipc_zone_mask(tn->own_addr);
+}
/**
* tipc_addr_domain_valid - validates a network domain address
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index a74acf9ee804..c700c2d28e09 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -37,7 +37,10 @@
#ifndef _TIPC_ADDR_H
#define _TIPC_ADDR_H
-#include "core.h"
+#include <linux/types.h>
+#include <linux/tipc.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#define TIPC_ZONE_MASK 0xff000000u
#define TIPC_CLUSTER_MASK 0xfffff000u
@@ -52,42 +55,10 @@ static inline u32 tipc_cluster_mask(u32 addr)
return addr & TIPC_CLUSTER_MASK;
}
-static inline int in_own_cluster_exact(u32 addr)
-{
- return !((addr ^ tipc_own_addr) >> 12);
-}
-
-/**
- * in_own_node - test for node inclusion; <0.0.0> always matches
- */
-static inline int in_own_node(u32 addr)
-{
- return (addr == tipc_own_addr) || !addr;
-}
-
-/**
- * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
- */
-static inline int in_own_cluster(u32 addr)
-{
- return in_own_cluster_exact(addr) || !addr;
-}
-
-/**
- * addr_domain - convert 2-bit scope value to equivalent message lookup domain
- *
- * Needed when address of a named message must be looked up a second time
- * after a network hop.
- */
-static inline u32 addr_domain(u32 sc)
-{
- if (likely(sc == TIPC_NODE_SCOPE))
- return tipc_own_addr;
- if (sc == TIPC_CLUSTER_SCOPE)
- return tipc_cluster_mask(tipc_own_addr);
- return tipc_zone_mask(tipc_own_addr);
-}
-
+int in_own_cluster(struct net *net, u32 addr);
+int in_own_cluster_exact(struct net *net, u32 addr);
+int in_own_node(struct net *net, u32 addr);
+u32 addr_domain(struct net *net, u32 sc);
int tipc_addr_domain_valid(u32);
int tipc_addr_node_valid(u32 addr);
int tipc_in_scope(u32 domain, u32 addr);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 96ceefeb9daf..3e41704832de 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,7 +1,7 @@
/*
* net/tipc/bcast.c: TIPC broadcast code
*
- * Copyright (c) 2004-2006, 2014, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
@@ -35,77 +35,14 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include "core.h"
-#include "link.h"
#include "socket.h"
#include "msg.h"
#include "bcast.h"
#include "name_distr.h"
+#include "core.h"
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
-#define BCBEARER MAX_BEARERS
-
-/**
- * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
- * @primary: pointer to primary bearer
- * @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
- * to be paired.
- */
-
-struct tipc_bcbearer_pair {
- struct tipc_bearer *primary;
- struct tipc_bearer *secondary;
-};
-
-/**
- * struct tipc_bcbearer - bearer used by broadcast link
- * @bearer: (non-standard) broadcast bearer structure
- * @media: (non-standard) broadcast media structure
- * @bpairs: array of bearer pairs
- * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
- * @remains: temporary node map used by tipc_bcbearer_send()
- * @remains_new: temporary node map used tipc_bcbearer_send()
- *
- * Note: The fields labelled "temporary" are incorporated into the bearer
- * to avoid consuming potentially limited stack space through the use of
- * large local variables within multicast routines. Concurrent access is
- * prevented through use of the spinlock "bclink_lock".
- */
-struct tipc_bcbearer {
- struct tipc_bearer bearer;
- struct tipc_media media;
- struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
- struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
- struct tipc_node_map remains;
- struct tipc_node_map remains_new;
-};
-
-/**
- * struct tipc_bclink - link used for broadcast messages
- * @lock: spinlock governing access to structure
- * @link: (non-standard) broadcast link structure
- * @node: (non-standard) node structure representing b'cast link's peer node
- * @flags: represent bclink states
- * @bcast_nodes: map of broadcast-capable nodes
- * @retransmit_to: node that most recently requested a retransmit
- *
- * Handles sequence numbering, fragmentation, bundling, etc.
- */
-struct tipc_bclink {
- spinlock_t lock;
- struct tipc_link link;
- struct tipc_node node;
- unsigned int flags;
- struct tipc_node_map bcast_nodes;
- struct tipc_node *retransmit_to;
-};
-
-static struct tipc_bcbearer *bcbearer;
-static struct tipc_bclink *bclink;
-static struct tipc_link *bcl;
const char tipc_bclink_name[] = "broadcast-link";
@@ -115,38 +52,50 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-static void tipc_bclink_lock(void)
+static void tipc_bclink_lock(struct net *net)
{
- spin_lock_bh(&bclink->lock);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ spin_lock_bh(&tn->bclink->lock);
}
-static void tipc_bclink_unlock(void)
+static void tipc_bclink_unlock(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node = NULL;
- if (likely(!bclink->flags)) {
- spin_unlock_bh(&bclink->lock);
+ if (likely(!tn->bclink->flags)) {
+ spin_unlock_bh(&tn->bclink->lock);
return;
}
- if (bclink->flags & TIPC_BCLINK_RESET) {
- bclink->flags &= ~TIPC_BCLINK_RESET;
- node = tipc_bclink_retransmit_to();
+ if (tn->bclink->flags & TIPC_BCLINK_RESET) {
+ tn->bclink->flags &= ~TIPC_BCLINK_RESET;
+ node = tipc_bclink_retransmit_to(net);
}
- spin_unlock_bh(&bclink->lock);
+ spin_unlock_bh(&tn->bclink->lock);
if (node)
tipc_link_reset_all(node);
}
+void tipc_bclink_input(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
+}
+
uint tipc_bclink_get_mtu(void)
{
return MAX_PKT_DEFAULT_MCAST;
}
-void tipc_bclink_set_flags(unsigned int flags)
+void tipc_bclink_set_flags(struct net *net, unsigned int flags)
{
- bclink->flags |= flags;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tn->bclink->flags |= flags;
}
static u32 bcbuf_acks(struct sk_buff *buf)
@@ -164,31 +113,40 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}
-void tipc_bclink_add_node(u32 addr)
+void tipc_bclink_add_node(struct net *net, u32 addr)
{
- tipc_bclink_lock();
- tipc_nmap_add(&bclink->bcast_nodes, addr);
- tipc_bclink_unlock();
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tipc_bclink_lock(net);
+ tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
+ tipc_bclink_unlock(net);
}
-void tipc_bclink_remove_node(u32 addr)
+void tipc_bclink_remove_node(struct net *net, u32 addr)
{
- tipc_bclink_lock();
- tipc_nmap_remove(&bclink->bcast_nodes, addr);
- tipc_bclink_unlock();
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tipc_bclink_lock(net);
+ tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
+ tipc_bclink_unlock(net);
}
-static void bclink_set_last_sent(void)
+static void bclink_set_last_sent(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+
if (bcl->next_out)
bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
else
bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
}
-u32 tipc_bclink_get_last_sent(void)
+u32 tipc_bclink_get_last_sent(struct net *net)
{
- return bcl->fsm_msg_cnt;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return tn->bcl->fsm_msg_cnt;
}
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -203,9 +161,11 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
*
* Called with bclink_lock locked
*/
-struct tipc_node *tipc_bclink_retransmit_to(void)
+struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
{
- return bclink->retransmit_to;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return tn->bclink->retransmit_to;
}
/**
@@ -215,15 +175,17 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
*
* Called with bclink_lock locked
*/
-static void bclink_retransmit_pkt(u32 after, u32 to)
+static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
{
struct sk_buff *skb;
+ struct tipc_link *bcl = tn->bcl;
skb_queue_walk(&bcl->outqueue, skb) {
- if (more(buf_seqno(skb), after))
+ if (more(buf_seqno(skb), after)) {
+ tipc_link_retransmit(bcl, skb, mod(to - after));
break;
+ }
}
- tipc_link_retransmit(bcl, skb, mod(to - after));
}
/**
@@ -231,13 +193,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
*
* Called with no locks taken
*/
-void tipc_bclink_wakeup_users(void)
+void tipc_bclink_wakeup_users(struct net *net)
{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
- tipc_sk_rcv(skb);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
}
/**
@@ -252,10 +212,12 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
struct sk_buff *skb, *tmp;
struct sk_buff *next;
unsigned int released = 0;
+ struct net *net = n_ptr->net;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
/* Bail out if tx queue is empty (no clean up is required) */
- skb = skb_peek(&bcl->outqueue);
+ skb = skb_peek(&tn->bcl->outqueue);
if (!skb)
goto exit;
@@ -266,43 +228,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
* acknowledge sent messages only (if other nodes still exist)
* or both sent and unsent messages (otherwise)
*/
- if (bclink->bcast_nodes.count)
- acked = bcl->fsm_msg_cnt;
+ if (tn->bclink->bcast_nodes.count)
+ acked = tn->bcl->fsm_msg_cnt;
else
- acked = bcl->next_out_no;
+ acked = tn->bcl->next_out_no;
} else {
/*
* Bail out if specified sequence number does not correspond
* to a message that has been sent and not yet acknowledged
*/
if (less(acked, buf_seqno(skb)) ||
- less(bcl->fsm_msg_cnt, acked) ||
+ less(tn->bcl->fsm_msg_cnt, acked) ||
less_eq(acked, n_ptr->bclink.acked))
goto exit;
}
/* Skip over packets that node has previously acknowledged */
- skb_queue_walk(&bcl->outqueue, skb) {
+ skb_queue_walk(&tn->bcl->outqueue, skb) {
if (more(buf_seqno(skb), n_ptr->bclink.acked))
break;
}
/* Update packets that node is now acknowledging */
- skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
+ skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
if (more(buf_seqno(skb), acked))
break;
- next = tipc_skb_queue_next(&bcl->outqueue, skb);
- if (skb != bcl->next_out) {
+ next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
+ if (skb != tn->bcl->next_out) {
bcbuf_decr_acks(skb);
} else {
bcbuf_set_acks(skb, 0);
- bcl->next_out = next;
- bclink_set_last_sent();
+ tn->bcl->next_out = next;
+ bclink_set_last_sent(net);
}
if (bcbuf_acks(skb) == 0) {
- __skb_unlink(skb, &bcl->outqueue);
+ __skb_unlink(skb, &tn->bcl->outqueue);
kfree_skb(skb);
released = 1;
}
@@ -310,15 +272,14 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
n_ptr->bclink.acked = acked;
/* Try resolving broadcast link congestion, if necessary */
- if (unlikely(bcl->next_out)) {
- tipc_link_push_packets(bcl);
- bclink_set_last_sent();
+ if (unlikely(tn->bcl->next_out)) {
+ tipc_link_push_packets(tn->bcl);
+ bclink_set_last_sent(net);
}
- if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
+ if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
-
exit:
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
}
/**
@@ -326,9 +287,12 @@ exit:
*
* RCU and node lock set
*/
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
+void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
+ u32 last_sent)
{
struct sk_buff *buf;
+ struct net *net = n_ptr->net;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
/* Ignore "stale" link state info */
if (less_eq(last_sent, n_ptr->bclink.last_in))
@@ -358,18 +322,18 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
- tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+ tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
INT_H_SIZE, n_ptr->addr);
msg_set_non_seq(msg, 1);
- msg_set_mc_netid(msg, tipc_net_id);
+ msg_set_mc_netid(msg, tn->net_id);
msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
msg_set_bcgap_to(msg, to);
- tipc_bclink_lock();
- tipc_bearer_send(MAX_BEARERS, buf, NULL);
- bcl->stats.sent_nacks++;
- tipc_bclink_unlock();
+ tipc_bclink_lock(net);
+ tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
+ tn->bcl->stats.sent_nacks++;
+ tipc_bclink_unlock(net);
kfree_skb(buf);
n_ptr->bclink.oos_state++;
@@ -382,9 +346,9 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
* Delay any upcoming NACK by this node if another node has already
* requested the first message this node is going to ask for.
*/
-static void bclink_peek_nack(struct tipc_msg *msg)
+static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
{
- struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
+ struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
if (unlikely(!n_ptr))
return;
@@ -399,17 +363,23 @@ static void bclink_peek_nack(struct tipc_msg *msg)
tipc_node_unlock(n_ptr);
}
-/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
+/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
* and to identified node local sockets
+ * @net: the applicable net namespace
* @list: chain of buffers containing message
* Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/
-int tipc_bclink_xmit(struct sk_buff_head *list)
+int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+ struct tipc_bclink *bclink = tn->bclink;
int rc = 0;
int bc = 0;
struct sk_buff *skb;
+ struct sk_buff_head arrvq;
+ struct sk_buff_head inputq;
/* Prepare clone of message for local node */
skb = tipc_msg_reassemble(list);
@@ -418,32 +388,35 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
return -EHOSTUNREACH;
}
- /* Broadcast to all other nodes */
+ /* Broadcast to all nodes */
if (likely(bclink)) {
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
if (likely(bclink->bcast_nodes.count)) {
- rc = __tipc_link_xmit(bcl, list);
+ rc = __tipc_link_xmit(net, bcl, list);
if (likely(!rc)) {
u32 len = skb_queue_len(&bcl->outqueue);
- bclink_set_last_sent();
+ bclink_set_last_sent(net);
bcl->stats.queue_sz_counts++;
bcl->stats.accu_queue_sz += len;
}
bc = 1;
}
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
}
if (unlikely(!bc))
__skb_queue_purge(list);
- /* Deliver message clone */
- if (likely(!rc))
- tipc_sk_mcast_rcv(skb);
- else
+ if (unlikely(rc)) {
kfree_skb(skb);
-
+ return rc;
+ }
+ /* Deliver message clone */
+ __skb_queue_head_init(&arrvq);
+ skb_queue_head_init(&inputq);
+ __skb_queue_tail(&arrvq, skb);
+ tipc_sk_mcast_rcv(net, &arrvq, &inputq);
return rc;
}
@@ -454,19 +427,21 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
*/
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
+ struct tipc_net *tn = net_generic(node->net, tipc_net_id);
+
bclink_update_last_sent(node, seqno);
node->bclink.last_in = seqno;
node->bclink.oos_state = 0;
- bcl->stats.recv_info++;
+ tn->bcl->stats.recv_info++;
/*
* Unicast an ACK periodically, ensuring that
* all nodes in the cluster don't ACK at the same time
*/
- if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
+ if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
tipc_link_proto_xmit(node->active_links[node->addr & 1],
STATE_MSG, 0, 0, 0, 0, 0);
- bcl->stats.sent_acks++;
+ tn->bcl->stats.sent_acks++;
}
}
@@ -475,19 +450,24 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
*
* RCU is locked, no other locks set
*/
-void tipc_bclink_rcv(struct sk_buff *buf)
+void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
struct tipc_msg *msg = buf_msg(buf);
struct tipc_node *node;
u32 next_in;
u32 seqno;
int deferred = 0;
+ int pos = 0;
+ struct sk_buff *iskb;
+ struct sk_buff_head *arrvq, *inputq;
/* Screen out unwanted broadcast messages */
- if (msg_mc_netid(msg) != tipc_net_id)
+ if (msg_mc_netid(msg) != tn->net_id)
goto exit;
- node = tipc_node_find(msg_prevnode(msg));
+ node = tipc_node_find(net, msg_prevnode(msg));
if (unlikely(!node))
goto exit;
@@ -499,18 +479,18 @@ void tipc_bclink_rcv(struct sk_buff *buf)
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
if (msg_type(msg) != STATE_MSG)
goto unlock;
- if (msg_destnode(msg) == tipc_own_addr) {
+ if (msg_destnode(msg) == tn->own_addr) {
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
tipc_node_unlock(node);
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bcl->stats.recv_nacks++;
- bclink->retransmit_to = node;
- bclink_retransmit_pkt(msg_bcgap_after(msg),
+ tn->bclink->retransmit_to = node;
+ bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
msg_bcgap_to(msg));
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
} else {
tipc_node_unlock(node);
- bclink_peek_nack(msg);
+ bclink_peek_nack(net, msg);
}
goto exit;
}
@@ -518,52 +498,54 @@ void tipc_bclink_rcv(struct sk_buff *buf)
/* Handle in-sequence broadcast message */
seqno = msg_seqno(msg);
next_in = mod(node->bclink.last_in + 1);
+ arrvq = &tn->bclink->arrvq;
+ inputq = &tn->bclink->inputq;
if (likely(seqno == next_in)) {
receive:
/* Deliver message to destination */
if (likely(msg_isdata(msg))) {
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
- tipc_bclink_unlock();
+ spin_lock_bh(&inputq->lock);
+ __skb_queue_tail(arrvq, buf);
+ spin_unlock_bh(&inputq->lock);
+ node->action_flags |= TIPC_BCAST_MSG_EVT;
+ tipc_bclink_unlock(net);
tipc_node_unlock(node);
- if (likely(msg_mcast(msg)))
- tipc_sk_mcast_rcv(buf);
- else
- kfree_skb(buf);
} else if (msg_user(msg) == MSG_BUNDLER) {
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
bcl->stats.recv_bundles++;
bcl->stats.recv_bundled += msg_msgcnt(msg);
- tipc_bclink_unlock();
+ pos = 0;
+ while (tipc_msg_extract(buf, &iskb, &pos)) {
+ spin_lock_bh(&inputq->lock);
+ __skb_queue_tail(arrvq, iskb);
+ spin_unlock_bh(&inputq->lock);
+ }
+ node->action_flags |= TIPC_BCAST_MSG_EVT;
+ tipc_bclink_unlock(net);
tipc_node_unlock(node);
- tipc_link_bundle_rcv(buf);
} else if (msg_user(msg) == MSG_FRAGMENTER) {
tipc_buf_append(&node->bclink.reasm_buf, &buf);
if (unlikely(!buf && !node->bclink.reasm_buf))
goto unlock;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
bcl->stats.recv_fragments++;
if (buf) {
bcl->stats.recv_fragmented++;
msg = buf_msg(buf);
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
goto receive;
}
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
tipc_node_unlock(node);
- } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
- tipc_bclink_lock();
- bclink_accept_pkt(node, seqno);
- tipc_bclink_unlock();
- tipc_node_unlock(node);
- tipc_named_rcv(buf);
} else {
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
tipc_node_unlock(node);
kfree_skb(buf);
}
@@ -601,14 +583,14 @@ receive:
buf = NULL;
}
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
if (deferred)
bcl->stats.deferred_recv++;
else
bcl->stats.duplicates++;
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
unlock:
tipc_node_unlock(node);
@@ -619,7 +601,7 @@ exit:
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
{
return (n_ptr->bclink.recv_permitted &&
- (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
+ (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
}
@@ -632,11 +614,15 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
* Returns 0 (packet sent successfully) under all circumstances,
* since the broadcast link's pseudo-bearer never blocks
*/
-static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
+static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *unused1,
struct tipc_media_addr *unused2)
{
int bp_index;
struct tipc_msg *msg = buf_msg(buf);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bcbearer *bcbearer = tn->bcbearer;
+ struct tipc_bclink *bclink = tn->bclink;
/* Prepare broadcast link message for reliable transmission,
* if first time trying to send it;
@@ -646,8 +632,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
if (likely(!msg_non_seq(buf_msg(buf)))) {
bcbuf_set_acks(buf, bclink->bcast_nodes.count);
msg_set_non_seq(msg, 1);
- msg_set_mc_netid(msg, tipc_net_id);
- bcl->stats.sent_info++;
+ msg_set_mc_netid(msg, tn->net_id);
+ tn->bcl->stats.sent_info++;
if (WARN_ON(!bclink->bcast_nodes.count)) {
dump_stack();
@@ -676,13 +662,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
if (bp_index == 0) {
/* Use original buffer for first bearer */
- tipc_bearer_send(b->identity, buf, &b->bcast_addr);
+ tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
} else {
/* Avoid concurrent buffer access */
tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
if (!tbuf)
break;
- tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
+ tipc_bearer_send(net, b->identity, tbuf,
+ &b->bcast_addr);
kfree_skb(tbuf); /* Bearer keeps a clone */
}
if (bcbearer->remains_new.count == 0)
@@ -697,15 +684,18 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
/**
* tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
*/
-void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
+void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
+ u32 node, bool action)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bcbearer *bcbearer = tn->bcbearer;
struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
struct tipc_bcbearer_pair *bp_curr;
struct tipc_bearer *b;
int b_index;
int pri;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
if (action)
tipc_nmap_add(nm_ptr, node);
@@ -717,7 +707,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
rcu_read_lock();
for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
- b = rcu_dereference_rtnl(bearer_list[b_index]);
+ b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
if (!b || !b->nodes.count)
continue;
@@ -752,7 +742,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
bp_curr++;
}
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
}
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -806,19 +796,21 @@ msg_full:
return -EMSGSIZE;
}
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
{
int err;
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
if (!bcl)
return 0;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
- hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
@@ -851,7 +843,7 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
if (err)
goto attr_msg_full;
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
@@ -862,79 +854,49 @@ prop_msg_full:
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
-int tipc_bclink_stats(char *buf, const u32 buf_size)
+int tipc_bclink_reset_stats(struct net *net)
{
- int ret;
- struct tipc_stats *s;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
if (!bcl)
- return 0;
-
- tipc_bclink_lock();
-
- s = &bcl->stats;
-
- ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
- " Window:%u packets\n",
- bcl->name, bcl->queue_limit[0]);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- s->recv_info, s->recv_fragments,
- s->recv_fragmented, s->recv_bundles,
- s->recv_bundled);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- s->sent_info, s->sent_fragments,
- s->sent_fragmented, s->sent_bundles,
- s->sent_bundled);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " RX naks:%u defs:%u dups:%u\n",
- s->recv_nacks, s->deferred_recv, s->duplicates);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " TX naks:%u acks:%u dups:%u\n",
- s->sent_nacks, s->sent_acks, s->retransmitted);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " Congestion link:%u Send queue max:%u avg:%u\n",
- s->link_congs, s->max_queue_sz,
- s->queue_sz_counts ?
- (s->accu_queue_sz / s->queue_sz_counts) : 0);
-
- tipc_bclink_unlock();
- return ret;
-}
-
-int tipc_bclink_reset_stats(void)
-{
- if (!bcl)
return -ENOPROTOOPT;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
memset(&bcl->stats, 0, sizeof(bcl->stats));
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
return 0;
}
-int tipc_bclink_set_queue_limits(u32 limit)
+int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+
if (!bcl)
return -ENOPROTOOPT;
if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
return -EINVAL;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
tipc_link_set_queue_limits(bcl, limit);
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
return 0;
}
-int tipc_bclink_init(void)
+int tipc_bclink_init(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bcbearer *bcbearer;
+ struct tipc_bclink *bclink;
+ struct tipc_link *bcl;
+
bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
if (!bcbearer)
return -ENOMEM;
@@ -953,30 +915,39 @@ int tipc_bclink_init(void)
spin_lock_init(&bclink->lock);
__skb_queue_head_init(&bcl->outqueue);
__skb_queue_head_init(&bcl->deferred_queue);
- skb_queue_head_init(&bcl->waiting_sks);
+ skb_queue_head_init(&bcl->wakeupq);
bcl->next_out_no = 1;
spin_lock_init(&bclink->node.lock);
- __skb_queue_head_init(&bclink->node.waiting_sks);
+ __skb_queue_head_init(&bclink->arrvq);
+ skb_queue_head_init(&bclink->inputq);
bcl->owner = &bclink->node;
+ bcl->owner->net = net;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
bcl->bearer_id = MAX_BEARERS;
- rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
+ rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
bcl->state = WORKING_WORKING;
+ bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
+ msg_set_prevnode(bcl->pmsg, tn->own_addr);
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
+ tn->bcbearer = bcbearer;
+ tn->bclink = bclink;
+ tn->bcl = bcl;
return 0;
}
-void tipc_bclink_stop(void)
+void tipc_bclink_stop(struct net *net)
{
- tipc_bclink_lock();
- tipc_link_purge_queues(bcl);
- tipc_bclink_unlock();
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tipc_bclink_lock(net);
+ tipc_link_purge_queues(tn->bcl);
+ tipc_bclink_unlock(net);
- RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
+ RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
synchronize_net();
- kfree(bcbearer);
- kfree(bclink);
+ kfree(tn->bcbearer);
+ kfree(tn->bclink);
}
/**
@@ -1036,50 +1007,3 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
}
}
}
-
-/**
- * tipc_port_list_add - add a port to a port list, ensuring no duplicates
- */
-void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
-{
- struct tipc_port_list *item = pl_ptr;
- int i;
- int item_sz = PLSIZE;
- int cnt = pl_ptr->count;
-
- for (; ; cnt -= item_sz, item = item->next) {
- if (cnt < PLSIZE)
- item_sz = cnt;
- for (i = 0; i < item_sz; i++)
- if (item->ports[i] == port)
- return;
- if (i < PLSIZE) {
- item->ports[i] = port;
- pl_ptr->count++;
- return;
- }
- if (!item->next) {
- item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
- if (!item->next) {
- pr_warn("Incomplete multicast delivery, no memory\n");
- return;
- }
- item->next->next = NULL;
- }
- }
-}
-
-/**
- * tipc_port_list_free - free dynamically created entries in port_list chain
- *
- */
-void tipc_port_list_free(struct tipc_port_list *pl_ptr)
-{
- struct tipc_port_list *item;
- struct tipc_port_list *next;
-
- for (item = pl_ptr->next; item; item = next) {
- next = item->next;
- kfree(item);
- }
-}
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 644d79129fba..43f397fbac55 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -1,7 +1,7 @@
/*
* net/tipc/bcast.h: Include file for TIPC broadcast code
*
- * Copyright (c) 2003-2006, 2014, Ericsson AB
+ * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -37,39 +37,73 @@
#ifndef _TIPC_BCAST_H
#define _TIPC_BCAST_H
-#include "netlink.h"
-
-#define MAX_NODES 4096
-#define WSIZE 32
-#define TIPC_BCLINK_RESET 1
+#include <linux/tipc_config.h>
+#include "link.h"
+#include "node.h"
/**
- * struct tipc_node_map - set of node identifiers
- * @count: # of nodes in set
- * @map: bitmap of node identifiers that are in the set
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ *
+ * Bearers must have same priority and same set of reachable destinations
+ * to be paired.
*/
-struct tipc_node_map {
- u32 count;
- u32 map[MAX_NODES / WSIZE];
+
+struct tipc_bcbearer_pair {
+ struct tipc_bearer *primary;
+ struct tipc_bearer *secondary;
};
-#define PLSIZE 32
+#define TIPC_BCLINK_RESET 1
+#define BCBEARER MAX_BEARERS
/**
- * struct tipc_port_list - set of node local destination ports
- * @count: # of ports in set (only valid for first entry in list)
- * @next: pointer to next entry in list
- * @ports: array of port references
+ * struct tipc_bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
+ * @remains: temporary node map used by tipc_bcbearer_send()
+ * @remains_new: temporary node map used tipc_bcbearer_send()
+ *
+ * Note: The fields labelled "temporary" are incorporated into the bearer
+ * to avoid consuming potentially limited stack space through the use of
+ * large local variables within multicast routines. Concurrent access is
+ * prevented through use of the spinlock "bclink_lock".
*/
-struct tipc_port_list {
- int count;
- struct tipc_port_list *next;
- u32 ports[PLSIZE];
+struct tipc_bcbearer {
+ struct tipc_bearer bearer;
+ struct tipc_media media;
+ struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+ struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+ struct tipc_node_map remains;
+ struct tipc_node_map remains_new;
};
+/**
+ * struct tipc_bclink - link used for broadcast messages
+ * @lock: spinlock governing access to structure
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ * @flags: represent bclink states
+ * @bcast_nodes: map of broadcast-capable nodes
+ * @retransmit_to: node that most recently requested a retransmit
+ *
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+struct tipc_bclink {
+ spinlock_t lock;
+ struct tipc_link link;
+ struct tipc_node node;
+ unsigned int flags;
+ struct sk_buff_head arrvq;
+ struct sk_buff_head inputq;
+ struct tipc_node_map bcast_nodes;
+ struct tipc_node *retransmit_to;
+};
struct tipc_node;
-
extern const char tipc_bclink_name[];
/**
@@ -81,27 +115,26 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
return !memcmp(nm_a, nm_b, sizeof(*nm_a));
}
-void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
-void tipc_port_list_free(struct tipc_port_list *pl_ptr);
-
-int tipc_bclink_init(void);
-void tipc_bclink_stop(void);
-void tipc_bclink_set_flags(unsigned int flags);
-void tipc_bclink_add_node(u32 addr);
-void tipc_bclink_remove_node(u32 addr);
-struct tipc_node *tipc_bclink_retransmit_to(void);
+int tipc_bclink_init(struct net *net);
+void tipc_bclink_stop(struct net *net);
+void tipc_bclink_set_flags(struct net *tn, unsigned int flags);
+void tipc_bclink_add_node(struct net *net, u32 addr);
+void tipc_bclink_remove_node(struct net *net, u32 addr);
+struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-void tipc_bclink_rcv(struct sk_buff *buf);
-u32 tipc_bclink_get_last_sent(void);
+void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
+u32 tipc_bclink_get_last_sent(struct net *net);
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
-int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
-int tipc_bclink_reset_stats(void);
-int tipc_bclink_set_queue_limits(u32 limit);
-void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
+void tipc_bclink_update_link_state(struct tipc_node *node,
+ u32 last_sent);
+int tipc_bclink_reset_stats(struct net *net);
+int tipc_bclink_set_queue_limits(struct net *net, u32 limit);
+void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
+ u32 node, bool action);
uint tipc_bclink_get_mtu(void);
-int tipc_bclink_xmit(struct sk_buff_head *list);
-void tipc_bclink_wakeup_users(void);
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
+int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
+void tipc_bclink_wakeup_users(struct net *net);
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
+void tipc_bclink_input(struct net *net);
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 463db5b15b8b..48852c2dcc03 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -34,11 +34,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <net/sock.h>
#include "core.h"
-#include "config.h"
#include "bearer.h"
#include "link.h"
#include "discover.h"
+#include "bcast.h"
#define MAX_ADDR_STR 60
@@ -67,9 +68,8 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
[TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
};
-struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
-
-static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
+ bool shutting_down);
/**
* tipc_media_find - locates specified media object by name
@@ -111,38 +111,18 @@ void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
m_ptr = media_find_id(a->media_id);
if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
- ret = tipc_snprintf(buf, len, "%s(%s)", m_ptr->name, addr_str);
+ ret = scnprintf(buf, len, "%s(%s)", m_ptr->name, addr_str);
else {
u32 i;
- ret = tipc_snprintf(buf, len, "UNKNOWN(%u)", a->media_id);
+ ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id);
for (i = 0; i < sizeof(a->value); i++)
- ret += tipc_snprintf(buf - ret, len + ret,
+ ret += scnprintf(buf - ret, len + ret,
"-%02x", a->value[i]);
}
}
/**
- * tipc_media_get_names - record names of registered media in buffer
- */
-struct sk_buff *tipc_media_get_names(void)
-{
- struct sk_buff *buf;
- int i;
-
- buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
- if (!buf)
- return NULL;
-
- for (i = 0; media_info_array[i] != NULL; i++) {
- tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME,
- media_info_array[i]->name,
- strlen(media_info_array[i]->name) + 1);
- }
- return buf;
-}
-
-/**
* bearer_name_validate - validate & (optionally) deconstruct bearer name
* @name: ptr to bearer name string
* @name_parts: ptr to area for bearer name components (or NULL if not needed)
@@ -190,68 +170,43 @@ static int bearer_name_validate(const char *name,
/**
* tipc_bearer_find - locates bearer object with matching bearer name
*/
-struct tipc_bearer *tipc_bearer_find(const char *name)
+struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
u32 i;
for (i = 0; i < MAX_BEARERS; i++) {
- b_ptr = rtnl_dereference(bearer_list[i]);
+ b_ptr = rtnl_dereference(tn->bearer_list[i]);
if (b_ptr && (!strcmp(b_ptr->name, name)))
return b_ptr;
}
return NULL;
}
-/**
- * tipc_bearer_get_names - record names of bearers in buffer
- */
-struct sk_buff *tipc_bearer_get_names(void)
-{
- struct sk_buff *buf;
- struct tipc_bearer *b;
- int i, j;
-
- buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
- if (!buf)
- return NULL;
-
- for (i = 0; media_info_array[i] != NULL; i++) {
- for (j = 0; j < MAX_BEARERS; j++) {
- b = rtnl_dereference(bearer_list[j]);
- if (!b)
- continue;
- if (b->media == media_info_array[i]) {
- tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
- b->name,
- strlen(b->name) + 1);
- }
- }
- }
- return buf;
-}
-
-void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
+void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+ b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
if (b_ptr) {
- tipc_bcbearer_sort(&b_ptr->nodes, dest, true);
+ tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true);
tipc_disc_add_dest(b_ptr->link_req);
}
rcu_read_unlock();
}
-void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
+void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+ b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
if (b_ptr) {
- tipc_bcbearer_sort(&b_ptr->nodes, dest, false);
+ tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false);
tipc_disc_remove_dest(b_ptr->link_req);
}
rcu_read_unlock();
@@ -260,8 +215,10 @@ void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
/**
* tipc_enable_bearer - enable bearer with the given name
*/
-int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
+static int tipc_enable_bearer(struct net *net, const char *name,
+ u32 disc_domain, u32 priority)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
struct tipc_media *m_ptr;
struct tipc_bearer_names b_names;
@@ -271,7 +228,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
u32 i;
int res = -EINVAL;
- if (!tipc_own_addr) {
+ if (!tn->own_addr) {
pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
name);
return -ENOPROTOOPT;
@@ -281,11 +238,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
return -EINVAL;
}
if (tipc_addr_domain_valid(disc_domain) &&
- (disc_domain != tipc_own_addr)) {
- if (tipc_in_scope(disc_domain, tipc_own_addr)) {
- disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
+ (disc_domain != tn->own_addr)) {
+ if (tipc_in_scope(disc_domain, tn->own_addr)) {
+ disc_domain = tn->own_addr & TIPC_CLUSTER_MASK;
res = 0; /* accept any node in own cluster */
- } else if (in_own_cluster_exact(disc_domain))
+ } else if (in_own_cluster_exact(net, disc_domain))
res = 0; /* accept specified node in own cluster */
}
if (res) {
@@ -313,7 +270,7 @@ restart:
bearer_id = MAX_BEARERS;
with_this_prio = 1;
for (i = MAX_BEARERS; i-- != 0; ) {
- b_ptr = rtnl_dereference(bearer_list[i]);
+ b_ptr = rtnl_dereference(tn->bearer_list[i]);
if (!b_ptr) {
bearer_id = i;
continue;
@@ -347,7 +304,7 @@ restart:
strcpy(b_ptr->name, name);
b_ptr->media = m_ptr;
- res = m_ptr->enable_media(b_ptr);
+ res = m_ptr->enable_media(net, b_ptr);
if (res) {
pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
name, -res);
@@ -361,15 +318,15 @@ restart:
b_ptr->net_plane = bearer_id + 'A';
b_ptr->priority = priority;
- res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
+ res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
if (res) {
- bearer_disable(b_ptr, false);
+ bearer_disable(net, b_ptr, false);
pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
name);
return -EINVAL;
}
- rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
+ rcu_assign_pointer(tn->bearer_list[bearer_id], b_ptr);
pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name,
@@ -380,11 +337,11 @@ restart:
/**
* tipc_reset_bearer - Reset all links established over this bearer
*/
-static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
+static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
{
pr_info("Resetting bearer <%s>\n", b_ptr->name);
- tipc_link_reset_list(b_ptr->identity);
- tipc_disc_reset(b_ptr);
+ tipc_link_reset_list(net, b_ptr->identity);
+ tipc_disc_reset(net, b_ptr);
return 0;
}
@@ -393,49 +350,35 @@ static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
*
* Note: This routine assumes caller holds RTNL lock.
*/
-static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
+ bool shutting_down)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 i;
pr_info("Disabling bearer <%s>\n", b_ptr->name);
b_ptr->media->disable_media(b_ptr);
- tipc_link_delete_list(b_ptr->identity, shutting_down);
+ tipc_link_delete_list(net, b_ptr->identity, shutting_down);
if (b_ptr->link_req)
tipc_disc_delete(b_ptr->link_req);
for (i = 0; i < MAX_BEARERS; i++) {
- if (b_ptr == rtnl_dereference(bearer_list[i])) {
- RCU_INIT_POINTER(bearer_list[i], NULL);
+ if (b_ptr == rtnl_dereference(tn->bearer_list[i])) {
+ RCU_INIT_POINTER(tn->bearer_list[i], NULL);
break;
}
}
kfree_rcu(b_ptr, rcu);
}
-int tipc_disable_bearer(const char *name)
-{
- struct tipc_bearer *b_ptr;
- int res;
-
- b_ptr = tipc_bearer_find(name);
- if (b_ptr == NULL) {
- pr_warn("Attempt to disable unknown bearer <%s>\n", name);
- res = -EINVAL;
- } else {
- bearer_disable(b_ptr, false);
- res = 0;
- }
- return res;
-}
-
-int tipc_enable_l2_media(struct tipc_bearer *b)
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b)
{
struct net_device *dev;
char *driver_name = strchr((const char *)b->name, ':') + 1;
/* Find device with specified name */
- dev = dev_get_by_name(&init_net, driver_name);
+ dev = dev_get_by_name(net, driver_name);
if (!dev)
return -ENODEV;
@@ -474,8 +417,8 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
* @b_ptr: the bearer through which the packet is to be sent
* @dest: peer destination address
*/
-int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
- struct tipc_media_addr *dest)
+int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *b, struct tipc_media_addr *dest)
{
struct sk_buff *clone;
struct net_device *dev;
@@ -511,15 +454,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
* The media send routine must not alter the buffer being passed in
* as it may be needed for later retransmission!
*/
-void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
+void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
struct tipc_media_addr *dest)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+ b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
if (likely(b_ptr))
- b_ptr->media->send_msg(buf, b_ptr, dest);
+ b_ptr->media->send_msg(net, buf, b_ptr, dest);
rcu_read_unlock();
}
@@ -539,17 +483,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
{
struct tipc_bearer *b_ptr;
- if (!net_eq(dev_net(dev), &init_net)) {
- kfree_skb(buf);
- return NET_RX_DROP;
- }
-
rcu_read_lock();
b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
if (likely(b_ptr)) {
if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
buf->next = NULL;
- tipc_rcv(buf, b_ptr);
+ tipc_rcv(dev_net(dev), buf, b_ptr);
rcu_read_unlock();
return NET_RX_SUCCESS;
}
@@ -572,11 +511,9 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
void *ptr)
{
- struct tipc_bearer *b_ptr;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
+ struct net *net = dev_net(dev);
+ struct tipc_bearer *b_ptr;
b_ptr = rtnl_dereference(dev->tipc_ptr);
if (!b_ptr)
@@ -590,16 +527,16 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
break;
case NETDEV_DOWN:
case NETDEV_CHANGEMTU:
- tipc_reset_bearer(b_ptr);
+ tipc_reset_bearer(net, b_ptr);
break;
case NETDEV_CHANGEADDR:
b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
(char *)dev->dev_addr);
- tipc_reset_bearer(b_ptr);
+ tipc_reset_bearer(net, b_ptr);
break;
case NETDEV_UNREGISTER:
case NETDEV_CHANGENAME:
- bearer_disable(b_ptr, false);
+ bearer_disable(dev_net(dev), b_ptr, false);
break;
}
return NOTIFY_OK;
@@ -632,16 +569,17 @@ void tipc_bearer_cleanup(void)
dev_remove_pack(&tipc_packet_type);
}
-void tipc_bearer_stop(void)
+void tipc_bearer_stop(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
u32 i;
for (i = 0; i < MAX_BEARERS; i++) {
- b_ptr = rtnl_dereference(bearer_list[i]);
+ b_ptr = rtnl_dereference(tn->bearer_list[i]);
if (b_ptr) {
- bearer_disable(b_ptr, true);
- bearer_list[i] = NULL;
+ bearer_disable(net, b_ptr, true);
+ tn->bearer_list[i] = NULL;
}
}
}
@@ -654,7 +592,7 @@ static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
struct nlattr *attrs;
struct nlattr *prop;
- hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_BEARER_GET);
if (!hdr)
return -EMSGSIZE;
@@ -698,6 +636,8 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
int i = cb->args[0];
struct tipc_bearer *bearer;
struct tipc_nl_msg msg;
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
if (i == MAX_BEARERS)
return 0;
@@ -708,7 +648,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
for (i = 0; i < MAX_BEARERS; i++) {
- bearer = rtnl_dereference(bearer_list[i]);
+ bearer = rtnl_dereference(tn->bearer_list[i]);
if (!bearer)
continue;
@@ -730,6 +670,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
struct tipc_bearer *bearer;
struct tipc_nl_msg msg;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = genl_info_net(info);
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
@@ -753,7 +694,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
msg.seq = info->snd_seq;
rtnl_lock();
- bearer = tipc_bearer_find(name);
+ bearer = tipc_bearer_find(net, name);
if (!bearer) {
err = -EINVAL;
goto err_out;
@@ -778,6 +719,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
char *name;
struct tipc_bearer *bearer;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = sock_net(skb->sk);
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
@@ -794,13 +736,13 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
rtnl_lock();
- bearer = tipc_bearer_find(name);
+ bearer = tipc_bearer_find(net, name);
if (!bearer) {
rtnl_unlock();
return -EINVAL;
}
- bearer_disable(bearer, false);
+ bearer_disable(net, bearer, false);
rtnl_unlock();
return 0;
@@ -811,11 +753,13 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
int err;
char *bearer;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 domain;
u32 prio;
prio = TIPC_MEDIA_LINK_PRI;
- domain = tipc_own_addr & TIPC_CLUSTER_MASK;
+ domain = tn->own_addr & TIPC_CLUSTER_MASK;
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
@@ -847,7 +791,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
}
rtnl_lock();
- err = tipc_enable_bearer(bearer, domain, prio);
+ err = tipc_enable_bearer(net, bearer, domain, prio);
if (err) {
rtnl_unlock();
return err;
@@ -863,6 +807,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
char *name;
struct tipc_bearer *b;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = genl_info_net(info);
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
@@ -878,7 +823,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
rtnl_lock();
- b = tipc_bearer_find(name);
+ b = tipc_bearer_find(net, name);
if (!b) {
rtnl_unlock();
return -EINVAL;
@@ -913,7 +858,7 @@ static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
struct nlattr *attrs;
struct nlattr *prop;
- hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_MEDIA_GET);
if (!hdr)
return -EMSGSIZE;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 2c1230ac5dfe..6b17795ff8bc 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -37,12 +37,13 @@
#ifndef _TIPC_BEARER_H
#define _TIPC_BEARER_H
-#include "bcast.h"
#include "netlink.h"
#include <net/genetlink.h>
#define MAX_BEARERS 2
#define MAX_MEDIA 2
+#define MAX_NODES 4096
+#define WSIZE 32
/* Identifiers associated with TIPC message header media address info
* - address info field is 32 bytes long
@@ -59,6 +60,16 @@
#define TIPC_MEDIA_TYPE_IB 2
/**
+ * struct tipc_node_map - set of node identifiers
+ * @count: # of nodes in set
+ * @map: bitmap of node identifiers that are in the set
+ */
+struct tipc_node_map {
+ u32 count;
+ u32 map[MAX_NODES / WSIZE];
+};
+
+/**
* struct tipc_media_addr - destination address used by TIPC bearers
* @value: address info (format defined by media)
* @media_id: TIPC media type identifier
@@ -89,10 +100,10 @@ struct tipc_bearer;
* @name: media name
*/
struct tipc_media {
- int (*send_msg)(struct sk_buff *buf,
+ int (*send_msg)(struct net *net, struct sk_buff *buf,
struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest);
- int (*enable_media)(struct tipc_bearer *b_ptr);
+ int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr);
void (*disable_media)(struct tipc_bearer *b_ptr);
int (*addr2str)(struct tipc_media_addr *addr,
char *strbuf,
@@ -157,17 +168,11 @@ struct tipc_bearer_names {
char if_name[TIPC_MAX_IF_NAME];
};
-struct tipc_link;
-
-extern struct tipc_bearer __rcu *bearer_list[];
-
/*
* TIPC routines available to supported media types
*/
-void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr);
-int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
-int tipc_disable_bearer(const char *name);
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr);
/*
* Routines made available to TIPC by supported media types
@@ -191,21 +196,19 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
int tipc_media_set_priority(const char *name, u32 new_value);
int tipc_media_set_window(const char *name, u32 new_value);
void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
-struct sk_buff *tipc_media_get_names(void);
-int tipc_enable_l2_media(struct tipc_bearer *b);
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b);
void tipc_disable_l2_media(struct tipc_bearer *b);
-int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
- struct tipc_media_addr *dest);
+int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *b, struct tipc_media_addr *dest);
-struct sk_buff *tipc_bearer_get_names(void);
-void tipc_bearer_add_dest(u32 bearer_id, u32 dest);
-void tipc_bearer_remove_dest(u32 bearer_id, u32 dest);
-struct tipc_bearer *tipc_bearer_find(const char *name);
+void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
+void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
+struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
struct tipc_media *tipc_media_find(const char *name);
int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
-void tipc_bearer_stop(void);
-void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
+void tipc_bearer_stop(struct net *net);
+void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
struct tipc_media_addr *dest);
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/config.c b/net/tipc/config.c
deleted file mode 100644
index 876f4c6a2631..000000000000
--- a/net/tipc/config.c
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * net/tipc/config.c: TIPC configuration management code
- *
- * Copyright (c) 2002-2006, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "socket.h"
-#include "name_table.h"
-#include "config.h"
-#include "server.h"
-
-#define REPLY_TRUNCATED "<truncated>\n"
-
-static const void *req_tlv_area; /* request message TLV area */
-static int req_tlv_space; /* request message TLV area size */
-static int rep_headroom; /* reply message headroom to use */
-
-struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
-{
- struct sk_buff *buf;
-
- buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
- if (buf)
- skb_reserve(buf, rep_headroom);
- return buf;
-}
-
-int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
- void *tlv_data, int tlv_data_size)
-{
- struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf);
- int new_tlv_space = TLV_SPACE(tlv_data_size);
-
- if (skb_tailroom(buf) < new_tlv_space)
- return 0;
- skb_put(buf, new_tlv_space);
- tlv->tlv_type = htons(tlv_type);
- tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size));
- if (tlv_data_size && tlv_data)
- memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size);
- return 1;
-}
-
-static struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
-{
- struct sk_buff *buf;
- __be32 value_net;
-
- buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
- if (buf) {
- value_net = htonl(value);
- tipc_cfg_append_tlv(buf, tlv_type, &value_net,
- sizeof(value_net));
- }
- return buf;
-}
-
-static struct sk_buff *tipc_cfg_reply_unsigned(u32 value)
-{
- return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
-}
-
-struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
-{
- struct sk_buff *buf;
- int string_len = strlen(string) + 1;
-
- buf = tipc_cfg_reply_alloc(TLV_SPACE(string_len));
- if (buf)
- tipc_cfg_append_tlv(buf, tlv_type, string, string_len);
- return buf;
-}
-
-static struct sk_buff *tipc_show_stats(void)
-{
- struct sk_buff *buf;
- struct tlv_desc *rep_tlv;
- char *pb;
- int pb_len;
- int str_len;
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- value = ntohl(*(u32 *)TLV_DATA(req_tlv_area));
- if (value != 0)
- return tipc_cfg_reply_error_string("unsupported argument");
-
- buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
- if (buf == NULL)
- return NULL;
-
- rep_tlv = (struct tlv_desc *)buf->data;
- pb = TLV_DATA(rep_tlv);
- pb_len = ULTRA_STRING_MAX_LEN;
-
- str_len = tipc_snprintf(pb, pb_len, "TIPC version " TIPC_MOD_VER "\n");
- str_len += 1; /* for "\0" */
- skb_put(buf, TLV_SPACE(str_len));
- TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
-
- return buf;
-}
-
-static struct sk_buff *cfg_enable_bearer(void)
-{
- struct tipc_bearer_config *args;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
- if (tipc_enable_bearer(args->name,
- ntohl(args->disc_domain),
- ntohl(args->priority)))
- return tipc_cfg_reply_error_string("unable to enable bearer");
-
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_disable_bearer(void)
-{
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
- return tipc_cfg_reply_error_string("unable to disable bearer");
-
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_own_addr(void)
-{
- u32 addr;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- addr = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (addr == tipc_own_addr)
- return tipc_cfg_reply_none();
- if (!tipc_addr_node_valid(addr))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (node address)");
- if (tipc_own_addr)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change node address once assigned)");
- if (!tipc_net_start(addr))
- return tipc_cfg_reply_none();
-
- return tipc_cfg_reply_error_string("cannot change to network mode");
-}
-
-static struct sk_buff *cfg_set_max_ports(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value == tipc_max_ports)
- return tipc_cfg_reply_none();
- if (value < 127 || value > 65535)
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (max ports must be 127-65535)");
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change max ports while TIPC is active)");
-}
-
-static struct sk_buff *cfg_set_netid(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value == tipc_net_id)
- return tipc_cfg_reply_none();
- if (value < 1 || value > 9999)
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (network id must be 1-9999)");
- if (tipc_own_addr)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change network id once TIPC has joined a network)");
- tipc_net_id = value;
- return tipc_cfg_reply_none();
-}
-
-struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
- int request_space, int reply_headroom)
-{
- struct sk_buff *rep_tlv_buf;
-
- rtnl_lock();
-
- /* Save request and reply details in a well-known location */
- req_tlv_area = request_area;
- req_tlv_space = request_space;
- rep_headroom = reply_headroom;
-
- /* Check command authorization */
- if (likely(in_own_node(orig_node))) {
- /* command is permitted */
- } else {
- rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot be done remotely)");
- goto exit;
- }
-
- /* Call appropriate processing routine */
- switch (cmd) {
- case TIPC_CMD_NOOP:
- rep_tlv_buf = tipc_cfg_reply_none();
- break;
- case TIPC_CMD_GET_NODES:
- rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space);
- break;
- case TIPC_CMD_GET_LINKS:
- rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space);
- break;
- case TIPC_CMD_SHOW_LINK_STATS:
- rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space);
- break;
- case TIPC_CMD_RESET_LINK_STATS:
- rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space);
- break;
- case TIPC_CMD_SHOW_NAME_TABLE:
- rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space);
- break;
- case TIPC_CMD_GET_BEARER_NAMES:
- rep_tlv_buf = tipc_bearer_get_names();
- break;
- case TIPC_CMD_GET_MEDIA_NAMES:
- rep_tlv_buf = tipc_media_get_names();
- break;
- case TIPC_CMD_SHOW_PORTS:
- rep_tlv_buf = tipc_sk_socks_show();
- break;
- case TIPC_CMD_SHOW_STATS:
- rep_tlv_buf = tipc_show_stats();
- break;
- case TIPC_CMD_SET_LINK_TOL:
- case TIPC_CMD_SET_LINK_PRI:
- case TIPC_CMD_SET_LINK_WINDOW:
- rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd);
- break;
- case TIPC_CMD_ENABLE_BEARER:
- rep_tlv_buf = cfg_enable_bearer();
- break;
- case TIPC_CMD_DISABLE_BEARER:
- rep_tlv_buf = cfg_disable_bearer();
- break;
- case TIPC_CMD_SET_NODE_ADDR:
- rep_tlv_buf = cfg_set_own_addr();
- break;
- case TIPC_CMD_SET_MAX_PORTS:
- rep_tlv_buf = cfg_set_max_ports();
- break;
- case TIPC_CMD_SET_NETID:
- rep_tlv_buf = cfg_set_netid();
- break;
- case TIPC_CMD_GET_MAX_PORTS:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
- break;
- case TIPC_CMD_GET_NETID:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
- break;
- case TIPC_CMD_NOT_NET_ADMIN:
- rep_tlv_buf =
- tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
- break;
- case TIPC_CMD_SET_MAX_ZONES:
- case TIPC_CMD_GET_MAX_ZONES:
- case TIPC_CMD_SET_MAX_SLAVES:
- case TIPC_CMD_GET_MAX_SLAVES:
- case TIPC_CMD_SET_MAX_CLUSTERS:
- case TIPC_CMD_GET_MAX_CLUSTERS:
- case TIPC_CMD_SET_MAX_NODES:
- case TIPC_CMD_GET_MAX_NODES:
- case TIPC_CMD_SET_MAX_SUBSCR:
- case TIPC_CMD_GET_MAX_SUBSCR:
- case TIPC_CMD_SET_MAX_PUBL:
- case TIPC_CMD_GET_MAX_PUBL:
- case TIPC_CMD_SET_LOG_SIZE:
- case TIPC_CMD_SET_REMOTE_MNG:
- case TIPC_CMD_GET_REMOTE_MNG:
- case TIPC_CMD_DUMP_LOG:
- rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (obsolete command)");
- break;
- default:
- rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (unknown command)");
- break;
- }
-
- WARN_ON(rep_tlv_buf->len > TLV_SPACE(ULTRA_STRING_MAX_LEN));
-
- /* Append an error message if we cannot return all requested data */
- if (rep_tlv_buf->len == TLV_SPACE(ULTRA_STRING_MAX_LEN)) {
- if (*(rep_tlv_buf->data + ULTRA_STRING_MAX_LEN) != '\0')
- sprintf(rep_tlv_buf->data + rep_tlv_buf->len -
- sizeof(REPLY_TRUNCATED) - 1, REPLY_TRUNCATED);
- }
-
- /* Return reply buffer */
-exit:
- rtnl_unlock();
- return rep_tlv_buf;
-}
diff --git a/net/tipc/config.h b/net/tipc/config.h
deleted file mode 100644
index 47b1bf181612..000000000000
--- a/net/tipc/config.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * net/tipc/config.h: Include file for TIPC configuration service code
- *
- * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_CONFIG_H
-#define _TIPC_CONFIG_H
-
-/* ---------------------------------------------------------------------- */
-
-#include "link.h"
-
-struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
-int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
- void *tlv_data, int tlv_data_size);
-struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
-
-static inline struct sk_buff *tipc_cfg_reply_none(void)
-{
- return tipc_cfg_reply_alloc(0);
-}
-
-static inline struct sk_buff *tipc_cfg_reply_error_string(char *string)
-{
- return tipc_cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
-}
-
-static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
-{
- return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
-}
-
-struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
- const void *req_tlv_area, int req_tlv_space,
- int headroom);
-#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index a5737b8407dd..935205e6bcfe 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,82 +34,88 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "core.h"
#include "name_table.h"
#include "subscr.h"
-#include "config.h"
+#include "bearer.h"
+#include "net.h"
#include "socket.h"
#include <linux/module.h>
-/* global variables used by multiple sub-systems within TIPC */
-int tipc_random __read_mostly;
-
/* configurable TIPC parameters */
-u32 tipc_own_addr __read_mostly;
-int tipc_max_ports __read_mostly;
int tipc_net_id __read_mostly;
int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
-/**
- * tipc_buf_acquire - creates a TIPC message buffer
- * @size: message size (including TIPC header)
- *
- * Returns a new buffer with data pointers set to the specified size.
- *
- * NOTE: Headroom is reserved to allow prepending of a data link header.
- * There may also be unrequested tailroom present at the buffer's end.
- */
-struct sk_buff *tipc_buf_acquire(u32 size)
+static int __net_init tipc_init_net(struct net *net)
{
- struct sk_buff *skb;
- unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
-
- skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
- if (skb) {
- skb_reserve(skb, BUF_HEADROOM);
- skb_put(skb, size);
- skb->next = NULL;
- }
- return skb;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ int err;
+
+ tn->net_id = 4711;
+ tn->own_addr = 0;
+ get_random_bytes(&tn->random, sizeof(int));
+ INIT_LIST_HEAD(&tn->node_list);
+ spin_lock_init(&tn->node_list_lock);
+
+ err = tipc_sk_rht_init(net);
+ if (err)
+ goto out_sk_rht;
+
+ err = tipc_nametbl_init(net);
+ if (err)
+ goto out_nametbl;
+
+ err = tipc_subscr_start(net);
+ if (err)
+ goto out_subscr;
+ return 0;
+
+out_subscr:
+ tipc_nametbl_stop(net);
+out_nametbl:
+ tipc_sk_rht_destroy(net);
+out_sk_rht:
+ return err;
}
-/**
- * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
- */
-static void tipc_core_stop(void)
+static void __net_exit tipc_exit_net(struct net *net)
{
- tipc_net_stop();
- tipc_bearer_cleanup();
- tipc_netlink_stop();
- tipc_subscr_stop();
- tipc_nametbl_stop();
- tipc_sk_ref_table_stop();
- tipc_socket_stop();
- tipc_unregister_sysctl();
+ tipc_subscr_stop(net);
+ tipc_net_stop(net);
+ tipc_nametbl_stop(net);
+ tipc_sk_rht_destroy(net);
}
-/**
- * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
- */
-static int tipc_core_start(void)
+static struct pernet_operations tipc_net_ops = {
+ .init = tipc_init_net,
+ .exit = tipc_exit_net,
+ .id = &tipc_net_id,
+ .size = sizeof(struct tipc_net),
+};
+
+static int __init tipc_init(void)
{
int err;
- get_random_bytes(&tipc_random, sizeof(tipc_random));
-
- err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random);
- if (err)
- goto out_reftbl;
+ pr_info("Activated (version " TIPC_MOD_VER ")\n");
- err = tipc_nametbl_init();
- if (err)
- goto out_nametbl;
+ sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+ TIPC_LOW_IMPORTANCE;
+ sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+ TIPC_CRITICAL_IMPORTANCE;
+ sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
err = tipc_netlink_start();
if (err)
goto out_netlink;
+ err = tipc_netlink_compat_start();
+ if (err)
+ goto out_netlink_compat;
+
err = tipc_socket_init();
if (err)
goto out_socket;
@@ -118,58 +124,40 @@ static int tipc_core_start(void)
if (err)
goto out_sysctl;
- err = tipc_subscr_start();
+ err = register_pernet_subsys(&tipc_net_ops);
if (err)
- goto out_subscr;
+ goto out_pernet;
err = tipc_bearer_setup();
if (err)
goto out_bearer;
+ pr_info("Started in single node mode\n");
return 0;
out_bearer:
- tipc_subscr_stop();
-out_subscr:
+ unregister_pernet_subsys(&tipc_net_ops);
+out_pernet:
tipc_unregister_sysctl();
out_sysctl:
tipc_socket_stop();
out_socket:
+ tipc_netlink_compat_stop();
+out_netlink_compat:
tipc_netlink_stop();
out_netlink:
- tipc_nametbl_stop();
-out_nametbl:
- tipc_sk_ref_table_stop();
-out_reftbl:
+ pr_err("Unable to start in single node mode\n");
return err;
}
-static int __init tipc_init(void)
-{
- int res;
-
- pr_info("Activated (version " TIPC_MOD_VER ")\n");
-
- tipc_own_addr = 0;
- tipc_max_ports = CONFIG_TIPC_PORTS;
- tipc_net_id = 4711;
-
- sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
- TIPC_LOW_IMPORTANCE;
- sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
- TIPC_CRITICAL_IMPORTANCE;
- sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
-
- res = tipc_core_start();
- if (res)
- pr_err("Unable to start in single node mode\n");
- else
- pr_info("Started in single node mode\n");
- return res;
-}
-
static void __exit tipc_exit(void)
{
- tipc_core_stop();
+ tipc_bearer_cleanup();
+ tipc_netlink_stop();
+ tipc_netlink_compat_stop();
+ tipc_socket_stop();
+ tipc_unregister_sysctl();
+ unregister_pernet_subsys(&tipc_net_ops);
+
pr_info("Deactivated\n");
}
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 84602137ce20..3dc68c7a966d 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -37,8 +37,6 @@
#ifndef _TIPC_CORE_H
#define _TIPC_CORE_H
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/tipc.h>
#include <linux/tipc_config.h>
#include <linux/tipc_netlink.h>
@@ -59,47 +57,54 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
+#include <net/netns/generic.h>
+#include <linux/rhashtable.h>
+
+#include "node.h"
+#include "bearer.h"
+#include "bcast.h"
+#include "netlink.h"
+#include "link.h"
+#include "node.h"
+#include "msg.h"
#define TIPC_MOD_VER "2.0.0"
-#define ULTRA_STRING_MAX_LEN 32768
-#define TIPC_MAX_SUBSCRIPTIONS 65535
-#define TIPC_MAX_PUBLICATIONS 65535
+extern int tipc_net_id __read_mostly;
+extern int sysctl_tipc_rmem[3] __read_mostly;
+extern int sysctl_tipc_named_timeout __read_mostly;
-struct tipc_msg; /* msg.h */
+struct tipc_net {
+ u32 own_addr;
+ int net_id;
+ int random;
-int tipc_snprintf(char *buf, int len, const char *fmt, ...);
+ /* Node table and node list */
+ spinlock_t node_list_lock;
+ struct hlist_head node_htable[NODE_HTABLE_SIZE];
+ struct list_head node_list;
+ u32 num_nodes;
+ u32 num_links;
-/*
- * TIPC-specific error codes
- */
-#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
+ /* Bearer list */
+ struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
-/*
- * Global configuration variables
- */
-extern u32 tipc_own_addr __read_mostly;
-extern int tipc_max_ports __read_mostly;
-extern int tipc_net_id __read_mostly;
-extern int sysctl_tipc_rmem[3] __read_mostly;
-extern int sysctl_tipc_named_timeout __read_mostly;
+ /* Broadcast link */
+ struct tipc_bcbearer *bcbearer;
+ struct tipc_bclink *bclink;
+ struct tipc_link *bcl;
-/*
- * Other global variables
- */
-extern int tipc_random __read_mostly;
+ /* Socket hash table */
+ struct rhashtable sk_rht;
-/*
- * Routines available to privileged subsystems
- */
-int tipc_netlink_start(void);
-void tipc_netlink_stop(void);
-int tipc_socket_init(void);
-void tipc_socket_stop(void);
-int tipc_sock_create_local(int type, struct socket **res);
-void tipc_sock_release_local(struct socket *sock);
-int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
- int flags);
+ /* Name table */
+ spinlock_t nametbl_lock;
+ struct name_table *nametbl;
+
+ /* Topology subscription server */
+ struct tipc_server *topsrv;
+ atomic_t subscription_count;
+};
#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
@@ -108,102 +113,4 @@ void tipc_unregister_sysctl(void);
#define tipc_register_sysctl() 0
#define tipc_unregister_sysctl()
#endif
-
-/*
- * TIPC timer code
- */
-typedef void (*Handler) (unsigned long);
-
-/**
- * k_init_timer - initialize a timer
- * @timer: pointer to timer structure
- * @routine: pointer to routine to invoke when timer expires
- * @argument: value to pass to routine when timer expires
- *
- * Timer must be initialized before use (and terminated when no longer needed).
- */
-static inline void k_init_timer(struct timer_list *timer, Handler routine,
- unsigned long argument)
-{
- setup_timer(timer, routine, argument);
-}
-
-/**
- * k_start_timer - start a timer
- * @timer: pointer to timer structure
- * @msec: time to delay (in ms)
- *
- * Schedules a previously initialized timer for later execution.
- * If timer is already running, the new timeout overrides the previous request.
- *
- * To ensure the timer doesn't expire before the specified delay elapses,
- * the amount of delay is rounded up when converting to the jiffies
- * then an additional jiffy is added to account for the fact that
- * the starting time may be in the middle of the current jiffy.
- */
-static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
-{
- mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
-}
-
-/**
- * k_cancel_timer - cancel a timer
- * @timer: pointer to timer structure
- *
- * Cancels a previously initialized timer.
- * Can be called safely even if the timer is already inactive.
- *
- * WARNING: Must not be called when holding locks required by the timer's
- * timeout routine, otherwise deadlock can occur on SMP systems!
- */
-static inline void k_cancel_timer(struct timer_list *timer)
-{
- del_timer_sync(timer);
-}
-
-/**
- * k_term_timer - terminate a timer
- * @timer: pointer to timer structure
- *
- * Prevents further use of a previously initialized timer.
- *
- * WARNING: Caller must ensure timer isn't currently running.
- *
- * (Do not "enhance" this routine to automatically cancel an active timer,
- * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
- */
-static inline void k_term_timer(struct timer_list *timer)
-{
-}
-
-/*
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- * are word aligned for quicker access
- */
-#define BUF_HEADROOM LL_MAX_HEADER
-
-struct tipc_skb_cb {
- void *handle;
- struct sk_buff *tail;
- bool deferred;
- bool wakeup_pending;
- bool bundling;
- u16 chain_sz;
- u16 chain_imp;
-};
-
-#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
-
-static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
-{
- return (struct tipc_msg *)skb->data;
-}
-
-struct sk_buff *tipc_buf_acquire(u32 size);
-
#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index aa722a42ef8b..feef3753615d 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -1,7 +1,7 @@
/*
* net/tipc/discover.c
*
- * Copyright (c) 2003-2006, 2014, Ericsson AB
+ * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
* Copyright (c) 2005-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -38,15 +38,19 @@
#include "link.h"
#include "discover.h"
-#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
-#define TIPC_LINK_REQ_FAST 1000 /* max delay if bearer has no links */
-#define TIPC_LINK_REQ_SLOW 60000 /* max delay if bearer has links */
-#define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */
-
+/* min delay during bearer start up */
+#define TIPC_LINK_REQ_INIT msecs_to_jiffies(125)
+/* max delay if bearer has no links */
+#define TIPC_LINK_REQ_FAST msecs_to_jiffies(1000)
+/* max delay if bearer has links */
+#define TIPC_LINK_REQ_SLOW msecs_to_jiffies(60000)
+/* indicates no timer in use */
+#define TIPC_LINK_REQ_INACTIVE 0xffffffff
/**
* struct tipc_link_req - information about an ongoing link setup request
* @bearer_id: identity of bearer issuing requests
+ * @net: network namespace instance
* @dest: destination address for request messages
* @domain: network domain to which links can be established
* @num_nodes: number of nodes currently discovered (i.e. with an active link)
@@ -58,31 +62,35 @@
struct tipc_link_req {
u32 bearer_id;
struct tipc_media_addr dest;
+ struct net *net;
u32 domain;
int num_nodes;
spinlock_t lock;
struct sk_buff *buf;
struct timer_list timer;
- unsigned int timer_intv;
+ unsigned long timer_intv;
};
/**
* tipc_disc_init_msg - initialize a link setup message
+ * @net: the applicable net namespace
* @type: message type (request or response)
* @b_ptr: ptr to bearer issuing message
*/
-static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
+static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
struct tipc_bearer *b_ptr)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_msg *msg;
u32 dest_domain = b_ptr->domain;
msg = buf_msg(buf);
- tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
+ tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
+ INT_H_SIZE, dest_domain);
msg_set_non_seq(msg, 1);
- msg_set_node_sig(msg, tipc_random);
+ msg_set_node_sig(msg, tn->random);
msg_set_dest_domain(msg, dest_domain);
- msg_set_bc_netid(msg, tipc_net_id);
+ msg_set_bc_netid(msg, tn->net_id);
b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
}
@@ -107,11 +115,14 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
/**
* tipc_disc_rcv - handle incoming discovery message (request or response)
+ * @net: the applicable net namespace
* @buf: buffer containing message
* @bearer: bearer that message arrived on
*/
-void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
+void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *bearer)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node;
struct tipc_link *link;
struct tipc_media_addr maddr;
@@ -133,7 +144,7 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
kfree_skb(buf);
/* Ensure message from node is valid and communication is permitted */
- if (net_id != tipc_net_id)
+ if (net_id != tn->net_id)
return;
if (maddr.broadcast)
return;
@@ -142,23 +153,19 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
if (!tipc_addr_node_valid(onode))
return;
- if (in_own_node(onode)) {
+ if (in_own_node(net, onode)) {
if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
- disc_dupl_alert(bearer, tipc_own_addr, &maddr);
+ disc_dupl_alert(bearer, tn->own_addr, &maddr);
return;
}
- if (!tipc_in_scope(ddom, tipc_own_addr))
+ if (!tipc_in_scope(ddom, tn->own_addr))
return;
if (!tipc_in_scope(bearer->domain, onode))
return;
- /* Locate, or if necessary, create, node: */
- node = tipc_node_find(onode);
- if (!node)
- node = tipc_node_create(onode);
+ node = tipc_node_create(net, onode);
if (!node)
return;
-
tipc_node_lock(node);
link = node->links[bearer->identity];
@@ -244,8 +251,8 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
if (respond && (mtyp == DSC_REQ_MSG)) {
rbuf = tipc_buf_acquire(INT_H_SIZE);
if (rbuf) {
- tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
- tipc_bearer_send(bearer->identity, rbuf, &maddr);
+ tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
+ tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
kfree_skb(rbuf);
}
}
@@ -265,7 +272,7 @@ static void disc_update(struct tipc_link_req *req)
if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
(req->timer_intv > TIPC_LINK_REQ_FAST)) {
req->timer_intv = TIPC_LINK_REQ_INIT;
- k_start_timer(&req->timer, req->timer_intv);
+ mod_timer(&req->timer, jiffies + req->timer_intv);
}
}
}
@@ -295,12 +302,13 @@ void tipc_disc_remove_dest(struct tipc_link_req *req)
/**
* disc_timeout - send a periodic link setup request
- * @req: ptr to link request structure
+ * @data: ptr to link request structure
*
* Called whenever a link setup request timer associated with a bearer expires.
*/
-static void disc_timeout(struct tipc_link_req *req)
+static void disc_timeout(unsigned long data)
{
+ struct tipc_link_req *req = (struct tipc_link_req *)data;
int max_delay;
spin_lock_bh(&req->lock);
@@ -318,7 +326,7 @@ static void disc_timeout(struct tipc_link_req *req)
* hold at fast polling rate if don't have any associated nodes,
* otherwise hold at slow polling rate
*/
- tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+ tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest);
req->timer_intv *= 2;
@@ -329,20 +337,22 @@ static void disc_timeout(struct tipc_link_req *req)
if (req->timer_intv > max_delay)
req->timer_intv = max_delay;
- k_start_timer(&req->timer, req->timer_intv);
+ mod_timer(&req->timer, jiffies + req->timer_intv);
exit:
spin_unlock_bh(&req->lock);
}
/**
* tipc_disc_create - create object to send periodic link setup requests
+ * @net: the applicable net namespace
* @b_ptr: ptr to bearer issuing requests
* @dest: destination address for request messages
* @dest_domain: network domain to which links can be established
*
* Returns 0 if successful, otherwise -errno.
*/
-int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
+int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
+ struct tipc_media_addr *dest)
{
struct tipc_link_req *req;
@@ -356,17 +366,18 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
return -ENOMEM;
}
- tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+ tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
memcpy(&req->dest, dest, sizeof(*dest));
+ req->net = net;
req->bearer_id = b_ptr->identity;
req->domain = b_ptr->domain;
req->num_nodes = 0;
req->timer_intv = TIPC_LINK_REQ_INIT;
spin_lock_init(&req->lock);
- k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
- k_start_timer(&req->timer, req->timer_intv);
+ setup_timer(&req->timer, disc_timeout, (unsigned long)req);
+ mod_timer(&req->timer, jiffies + req->timer_intv);
b_ptr->link_req = req;
- tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+ tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
return 0;
}
@@ -376,28 +387,29 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
*/
void tipc_disc_delete(struct tipc_link_req *req)
{
- k_cancel_timer(&req->timer);
- k_term_timer(&req->timer);
+ del_timer_sync(&req->timer);
kfree_skb(req->buf);
kfree(req);
}
/**
* tipc_disc_reset - reset object to send periodic link setup requests
+ * @net: the applicable net namespace
* @b_ptr: ptr to bearer issuing requests
* @dest_domain: network domain to which links can be established
*/
-void tipc_disc_reset(struct tipc_bearer *b_ptr)
+void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr)
{
struct tipc_link_req *req = b_ptr->link_req;
spin_lock_bh(&req->lock);
- tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+ tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
+ req->net = net;
req->bearer_id = b_ptr->identity;
req->domain = b_ptr->domain;
req->num_nodes = 0;
req->timer_intv = TIPC_LINK_REQ_INIT;
- k_start_timer(&req->timer, req->timer_intv);
- tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+ mod_timer(&req->timer, jiffies + req->timer_intv);
+ tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
spin_unlock_bh(&req->lock);
}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 515b57392f4d..c9b12770c5ed 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -39,11 +39,13 @@
struct tipc_link_req;
-int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
+int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
+ struct tipc_media_addr *dest);
void tipc_disc_delete(struct tipc_link_req *req);
-void tipc_disc_reset(struct tipc_bearer *b_ptr);
+void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr);
void tipc_disc_add_dest(struct tipc_link_req *req);
void tipc_disc_remove_dest(struct tipc_link_req *req);
-void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
+void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *b_ptr);
#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 23bcc1132365..a4cf364316de 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -40,7 +40,6 @@
#include "socket.h"
#include "name_distr.h"
#include "discover.h"
-#include "config.h"
#include "netlink.h"
#include <linux/pkt_sched.h>
@@ -101,19 +100,20 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
*/
#define START_CHANGEOVER 100000u
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
- struct sk_buff *buf);
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
-static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
- struct sk_buff **buf);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
+static void link_handle_out_of_seq_msg(struct tipc_link *link,
+ struct sk_buff *skb);
+static void tipc_link_proto_rcv(struct tipc_link *link,
+ struct sk_buff *skb);
+static int tipc_link_tunnel_rcv(struct tipc_node *node,
+ struct sk_buff **skb);
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
static void tipc_link_sync_xmit(struct tipc_link *l);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
-static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
+static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
+static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
/*
* Simple link routines
@@ -123,13 +123,30 @@ static unsigned int align(unsigned int i)
return (i + 3) & ~3u;
}
+static void tipc_link_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct tipc_link, ref));
+}
+
+static void tipc_link_get(struct tipc_link *l_ptr)
+{
+ kref_get(&l_ptr->ref);
+}
+
+static void tipc_link_put(struct tipc_link *l_ptr)
+{
+ kref_put(&l_ptr->ref, tipc_link_release);
+}
+
static void link_init_max_pkt(struct tipc_link *l_ptr)
{
+ struct tipc_node *node = l_ptr->owner;
+ struct tipc_net *tn = net_generic(node->net, tipc_net_id);
struct tipc_bearer *b_ptr;
u32 max_pkt;
rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+ b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
if (!b_ptr) {
rcu_read_unlock();
return;
@@ -169,8 +186,9 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
* link_timeout - handle expiration of link timer
* @l_ptr: pointer to link
*/
-static void link_timeout(struct tipc_link *l_ptr)
+static void link_timeout(unsigned long data)
{
+ struct tipc_link *l_ptr = (struct tipc_link *)data;
struct sk_buff *skb;
tipc_node_lock(l_ptr->owner);
@@ -215,11 +233,13 @@ static void link_timeout(struct tipc_link *l_ptr)
tipc_link_push_packets(l_ptr);
tipc_node_unlock(l_ptr->owner);
+ tipc_link_put(l_ptr);
}
-static void link_set_timer(struct tipc_link *l_ptr, u32 time)
+static void link_set_timer(struct tipc_link *link, unsigned long time)
{
- k_start_timer(&l_ptr->timer, time);
+ if (!mod_timer(&link->timer, jiffies + time))
+ tipc_link_get(link);
}
/**
@@ -234,6 +254,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr)
{
+ struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
struct tipc_link *l_ptr;
struct tipc_msg *msg;
char *if_name;
@@ -259,12 +280,12 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
pr_warn("Link creation failed, no memory\n");
return NULL;
}
-
+ kref_init(&l_ptr->ref);
l_ptr->addr = peer;
if_name = strchr(b_ptr->name, ':') + 1;
sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
- tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
- tipc_node(tipc_own_addr),
+ tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
+ tipc_node(tn->own_addr),
if_name,
tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
/* note: peer i/f name is updated by reset/activate message */
@@ -278,9 +299,10 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
msg = l_ptr->pmsg;
- tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
+ tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
+ l_ptr->addr);
msg_set_size(msg, sizeof(l_ptr->proto_msg));
- msg_set_session(msg, (tipc_random & 0xffff));
+ msg_set_session(msg, (tn->random & 0xffff));
msg_set_bearer_id(msg, b_ptr->identity);
strcpy((char *)msg_data(msg), if_name);
@@ -293,48 +315,52 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
l_ptr->next_out_no = 1;
__skb_queue_head_init(&l_ptr->outqueue);
__skb_queue_head_init(&l_ptr->deferred_queue);
- skb_queue_head_init(&l_ptr->waiting_sks);
-
+ skb_queue_head_init(&l_ptr->wakeupq);
+ skb_queue_head_init(&l_ptr->inputq);
+ skb_queue_head_init(&l_ptr->namedq);
link_reset_statistics(l_ptr);
-
tipc_node_attach_link(n_ptr, l_ptr);
-
- k_init_timer(&l_ptr->timer, (Handler)link_timeout,
- (unsigned long)l_ptr);
-
+ setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
link_state_event(l_ptr, STARTING_EVT);
return l_ptr;
}
-void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
+/**
+ * link_delete - Conditional deletion of link.
+ * If timer still running, real delete is done when it expires
+ * @link: link to be deleted
+ */
+void tipc_link_delete(struct tipc_link *link)
+{
+ tipc_link_reset_fragments(link);
+ tipc_node_detach_link(link->owner, link);
+ tipc_link_put(link);
+}
+
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
+ bool shutting_down)
{
- struct tipc_link *l_ptr;
- struct tipc_node *n_ptr;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *link;
+ struct tipc_node *node;
rcu_read_lock();
- list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
- tipc_node_lock(n_ptr);
- l_ptr = n_ptr->links[bearer_id];
- if (l_ptr) {
- tipc_link_reset(l_ptr);
- if (shutting_down || !tipc_node_is_up(n_ptr)) {
- tipc_node_detach_link(l_ptr->owner, l_ptr);
- tipc_link_reset_fragments(l_ptr);
- tipc_node_unlock(n_ptr);
-
- /* Nobody else can access this link now: */
- del_timer_sync(&l_ptr->timer);
- kfree(l_ptr);
- } else {
- /* Detach/delete when failover is finished: */
- l_ptr->flags |= LINK_STOPPED;
- tipc_node_unlock(n_ptr);
- del_timer_sync(&l_ptr->timer);
- }
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
+ tipc_node_lock(node);
+ link = node->links[bearer_id];
+ if (!link) {
+ tipc_node_unlock(node);
continue;
}
- tipc_node_unlock(n_ptr);
+ tipc_link_reset(link);
+ if (del_timer(&link->timer))
+ tipc_link_put(link);
+ link->flags |= LINK_STOPPED;
+ /* Delete link now, or when failover is finished: */
+ if (shutting_down || !tipc_node_is_up(node))
+ tipc_link_delete(link);
+ tipc_node_unlock(node);
}
rcu_read_unlock();
}
@@ -352,13 +378,14 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
{
struct sk_buff *buf;
- buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr,
- tipc_own_addr, oport, 0, 0);
+ buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
+ link_own_addr(link), link_own_addr(link),
+ oport, 0, 0);
if (!buf)
return false;
TIPC_SKB_CB(buf)->chain_sz = chain_sz;
TIPC_SKB_CB(buf)->chain_imp = imp;
- skb_queue_tail(&link->waiting_sks, buf);
+ skb_queue_tail(&link->wakeupq, buf);
link->stats.link_congs++;
return true;
}
@@ -369,17 +396,19 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
* Move a number of waiting users, as permitted by available space in
* the send queue, from link wait queue to node wait queue for wakeup
*/
-static void link_prepare_wakeup(struct tipc_link *link)
+void link_prepare_wakeup(struct tipc_link *link)
{
uint pend_qsz = skb_queue_len(&link->outqueue);
struct sk_buff *skb, *tmp;
- skb_queue_walk_safe(&link->waiting_sks, skb, tmp) {
+ skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
break;
pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
- skb_unlink(skb, &link->waiting_sks);
- skb_queue_tail(&link->owner->waiting_sks, skb);
+ skb_unlink(skb, &link->wakeupq);
+ skb_queue_tail(&link->inputq, skb);
+ link->owner->inputq = &link->inputq;
+ link->owner->action_flags |= TIPC_MSG_EVT;
}
}
@@ -425,20 +454,20 @@ void tipc_link_reset(struct tipc_link *l_ptr)
return;
tipc_node_link_down(l_ptr->owner, l_ptr);
- tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
+ tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
l_ptr->reset_checkpoint = checkpoint;
l_ptr->exp_msg_count = START_CHANGEOVER;
}
- /* Clean up all queues: */
+ /* Clean up all queues, except inputq: */
__skb_queue_purge(&l_ptr->outqueue);
__skb_queue_purge(&l_ptr->deferred_queue);
- if (!skb_queue_empty(&l_ptr->waiting_sks)) {
- skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
- owner->action_flags |= TIPC_WAKEUP_USERS;
- }
+ skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq);
+ if (!skb_queue_empty(&l_ptr->inputq))
+ owner->action_flags |= TIPC_MSG_EVT;
+ owner->inputq = &l_ptr->inputq;
l_ptr->next_out = NULL;
l_ptr->unacked_window = 0;
l_ptr->checkpoint = 1;
@@ -448,13 +477,14 @@ void tipc_link_reset(struct tipc_link *l_ptr)
link_reset_statistics(l_ptr);
}
-void tipc_link_reset_list(unsigned int bearer_id)
+void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
rcu_read_lock();
- list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+ list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
tipc_node_lock(n_ptr);
l_ptr = n_ptr->links[bearer_id];
if (l_ptr)
@@ -464,11 +494,14 @@ void tipc_link_reset_list(unsigned int bearer_id)
rcu_read_unlock();
}
-static void link_activate(struct tipc_link *l_ptr)
+static void link_activate(struct tipc_link *link)
{
- l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
- tipc_node_link_up(l_ptr->owner, l_ptr);
- tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
+ struct tipc_node *node = link->owner;
+
+ link->next_in_no = 1;
+ link->stats.recv_info = 1;
+ tipc_node_link_up(node, link);
+ tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
}
/**
@@ -479,7 +512,7 @@ static void link_activate(struct tipc_link *l_ptr)
static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
{
struct tipc_link *other;
- u32 cont_intv = l_ptr->continuity_interval;
+ unsigned long cont_intv = l_ptr->cont_intv;
if (l_ptr->flags & LINK_STOPPED)
return;
@@ -522,8 +555,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv / 4);
break;
case RESET_MSG:
- pr_info("%s<%s>, requested by peer\n", link_rst_msg,
- l_ptr->name);
+ pr_debug("%s<%s>, requested by peer\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -533,7 +566,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
default:
- pr_err("%s%u in WW state\n", link_unk_evt, event);
+ pr_debug("%s%u in WW state\n", link_unk_evt, event);
}
break;
case WORKING_UNKNOWN:
@@ -545,8 +578,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- pr_info("%s<%s>, requested by peer while probing\n",
- link_rst_msg, l_ptr->name);
+ pr_debug("%s<%s>, requested by peer while probing\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -572,8 +605,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
- pr_warn("%s<%s>, peer not responding\n",
- link_rst_msg, l_ptr->name);
+ pr_debug("%s<%s>, peer not responding\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
@@ -614,7 +647,9 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
break;
case STARTING_EVT:
l_ptr->flags |= LINK_STARTED;
- /* fall through */
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ break;
case TIMEOUT_EVT:
tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
@@ -700,7 +735,8 @@ drop:
* Only the socket functions tipc_send_stream() and tipc_send_packet() need
* to act on the return value, since they may need to do more send attempts.
*/
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
+int __tipc_link_xmit(struct net *net, struct tipc_link *link,
+ struct sk_buff_head *list)
{
struct tipc_msg *msg = buf_msg(skb_peek(list));
uint psz = msg_size(msg);
@@ -733,7 +769,8 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
if (skb_queue_len(outqueue) < sndlim) {
__skb_queue_tail(outqueue, skb);
- tipc_bearer_send(link->bearer_id, skb, addr);
+ tipc_bearer_send(net, link->bearer_id,
+ skb, addr);
link->next_out = NULL;
link->unacked_window = 0;
} else if (tipc_msg_bundle(outqueue, skb, mtu)) {
@@ -758,7 +795,7 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
- __skb_queue_head_init(list);
+ skb_queue_head_init(list);
__skb_queue_tail(list, skb);
}
@@ -767,19 +804,21 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
struct sk_buff_head head;
skb2list(skb, &head);
- return __tipc_link_xmit(link, &head);
+ return __tipc_link_xmit(link->owner->net, link, &head);
}
-int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
+int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+ u32 selector)
{
struct sk_buff_head head;
skb2list(skb, &head);
- return tipc_link_xmit(&head, dnode, selector);
+ return tipc_link_xmit(net, &head, dnode, selector);
}
/**
* tipc_link_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
* @list: chain of buffers containing message
* @dsz: amount of user data to be sent
* @dnode: address of destination node
@@ -787,33 +826,28 @@ int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
* Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/
-int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
+int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+ u32 selector)
{
struct tipc_link *link = NULL;
struct tipc_node *node;
int rc = -EHOSTUNREACH;
- node = tipc_node_find(dnode);
+ node = tipc_node_find(net, dnode);
if (node) {
tipc_node_lock(node);
link = node->active_links[selector & 1];
if (link)
- rc = __tipc_link_xmit(link, list);
+ rc = __tipc_link_xmit(net, link, list);
tipc_node_unlock(node);
}
-
if (link)
return rc;
- if (likely(in_own_node(dnode))) {
- /* As a node local message chain never contains more than one
- * buffer, we just need to dequeue one SKB buffer from the
- * head list.
- */
- return tipc_sk_rcv(__skb_dequeue(list));
- }
- __skb_queue_purge(list);
+ if (likely(in_own_node(net, dnode)))
+ return tipc_sk_rcv(net, list);
+ __skb_queue_purge(list);
return rc;
}
@@ -835,7 +869,8 @@ static void tipc_link_sync_xmit(struct tipc_link *link)
return;
msg = buf_msg(skb);
- tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
+ tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
+ INT_H_SIZE, link->addr);
msg_set_last_bcast(msg, link->owner->bclink.acked);
__tipc_link_xmit_skb(link, skb);
}
@@ -890,7 +925,8 @@ void tipc_link_push_packets(struct tipc_link *l_ptr)
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
if (msg_user(msg) == MSG_BUNDLER)
TIPC_SKB_CB(skb)->bundling = false;
- tipc_bearer_send(l_ptr->bearer_id, skb,
+ tipc_bearer_send(l_ptr->owner->net,
+ l_ptr->bearer_id, skb,
&l_ptr->media_addr);
l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
} else {
@@ -923,6 +959,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
+ struct net *net = l_ptr->owner->net;
pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
@@ -940,7 +977,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
pr_cont("Outstanding acks: %lu\n",
(unsigned long) TIPC_SKB_CB(buf)->handle);
- n_ptr = tipc_bclink_retransmit_to();
+ n_ptr = tipc_bclink_retransmit_to(net);
tipc_node_lock(n_ptr);
tipc_addr_string_fill(addr_string, n_ptr->addr);
@@ -955,7 +992,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
tipc_node_unlock(n_ptr);
- tipc_bclink_set_flags(TIPC_BCLINK_RESET);
+ tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
l_ptr->stale_count = 0;
}
}
@@ -987,7 +1024,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
msg = buf_msg(skb);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
+ tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
+ &l_ptr->media_addr);
retransmits--;
l_ptr->stats.retransmitted++;
}
@@ -1063,14 +1101,16 @@ static int link_recv_buf_validate(struct sk_buff *buf)
/**
* tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
* @skb: TIPC packet
* @b_ptr: pointer to bearer message arrived on
*
* Invoked with no locks held. Bearer pointer must point to a valid bearer
* structure (i.e. cannot be NULL), but bearer can be inactive.
*/
-void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff_head head;
struct tipc_node *n_ptr;
struct tipc_link *l_ptr;
@@ -1096,19 +1136,19 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
if (unlikely(msg_non_seq(msg))) {
if (msg_user(msg) == LINK_CONFIG)
- tipc_disc_rcv(skb, b_ptr);
+ tipc_disc_rcv(net, skb, b_ptr);
else
- tipc_bclink_rcv(skb);
+ tipc_bclink_rcv(net, skb);
continue;
}
/* Discard unicast link messages destined for another node */
if (unlikely(!msg_short(msg) &&
- (msg_destnode(msg) != tipc_own_addr)))
+ (msg_destnode(msg) != tn->own_addr)))
goto discard;
/* Locate neighboring node that sent message */
- n_ptr = tipc_node_find(msg_prevnode(msg));
+ n_ptr = tipc_node_find(net, msg_prevnode(msg));
if (unlikely(!n_ptr))
goto discard;
tipc_node_lock(n_ptr);
@@ -1116,7 +1156,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
/* Locate unicast link endpoint that should handle message */
l_ptr = n_ptr->links[b_ptr->identity];
if (unlikely(!l_ptr))
- goto unlock_discard;
+ goto unlock;
/* Verify that communication with node is currently allowed */
if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
@@ -1127,7 +1167,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
if (tipc_node_blocked(n_ptr))
- goto unlock_discard;
+ goto unlock;
/* Validate message sequence number info */
seq_no = msg_seqno(msg);
@@ -1151,18 +1191,16 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
if (unlikely(l_ptr->next_out))
tipc_link_push_packets(l_ptr);
- if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
+ if (released && !skb_queue_empty(&l_ptr->wakeupq))
link_prepare_wakeup(l_ptr);
- l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS;
- }
/* Process the incoming packet */
if (unlikely(!link_working_working(l_ptr))) {
if (msg_user(msg) == LINK_PROTOCOL) {
tipc_link_proto_rcv(l_ptr, skb);
link_retrieve_defq(l_ptr, &head);
- tipc_node_unlock(n_ptr);
- continue;
+ skb = NULL;
+ goto unlock;
}
/* Traffic message. Conditionally activate link */
@@ -1171,18 +1209,18 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
if (link_working_working(l_ptr)) {
/* Re-insert buffer in front of queue */
__skb_queue_head(&head, skb);
- tipc_node_unlock(n_ptr);
- continue;
+ skb = NULL;
+ goto unlock;
}
- goto unlock_discard;
+ goto unlock;
}
/* Link is now in state WORKING_WORKING */
if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
link_handle_out_of_seq_msg(l_ptr, skb);
link_retrieve_defq(l_ptr, &head);
- tipc_node_unlock(n_ptr);
- continue;
+ skb = NULL;
+ goto unlock;
}
l_ptr->next_in_no++;
if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
@@ -1192,95 +1230,102 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
l_ptr->stats.sent_acks++;
tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
}
-
- if (tipc_link_prepare_input(l_ptr, &skb)) {
- tipc_node_unlock(n_ptr);
- continue;
- }
- tipc_node_unlock(n_ptr);
-
- if (tipc_link_input(l_ptr, skb) != 0)
- goto discard;
- continue;
-unlock_discard:
+ tipc_link_input(l_ptr, skb);
+ skb = NULL;
+unlock:
tipc_node_unlock(n_ptr);
discard:
- kfree_skb(skb);
+ if (unlikely(skb))
+ kfree_skb(skb);
}
}
-/**
- * tipc_link_prepare_input - process TIPC link messages
- *
- * returns nonzero if the message was consumed
+/* tipc_data_input - deliver data and name distr msgs to upper layer
*
+ * Consumes buffer if message is of right type
* Node lock must be held
*/
-static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
{
- struct tipc_node *n;
- struct tipc_msg *msg;
- int res = -EINVAL;
+ struct tipc_node *node = link->owner;
+ struct tipc_msg *msg = buf_msg(skb);
+ u32 dport = msg_destport(msg);
- n = l->owner;
- msg = buf_msg(*buf);
switch (msg_user(msg)) {
- case CHANGEOVER_PROTOCOL:
- if (tipc_link_tunnel_rcv(n, buf))
- res = 0;
- break;
- case MSG_FRAGMENTER:
- l->stats.recv_fragments++;
- if (tipc_buf_append(&l->reasm_buf, buf)) {
- l->stats.recv_fragmented++;
- res = 0;
- } else if (!l->reasm_buf) {
- tipc_link_reset(l);
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ case CONN_MANAGER:
+ if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
+ node->inputq = &link->inputq;
+ node->action_flags |= TIPC_MSG_EVT;
}
- break;
- case MSG_BUNDLER:
- l->stats.recv_bundles++;
- l->stats.recv_bundled += msg_msgcnt(msg);
- res = 0;
- break;
+ return true;
case NAME_DISTRIBUTOR:
- n->bclink.recv_permitted = true;
- res = 0;
- break;
+ node->bclink.recv_permitted = true;
+ node->namedq = &link->namedq;
+ skb_queue_tail(&link->namedq, skb);
+ if (skb_queue_len(&link->namedq) == 1)
+ node->action_flags |= TIPC_NAMED_MSG_EVT;
+ return true;
+ case MSG_BUNDLER:
+ case CHANGEOVER_PROTOCOL:
+ case MSG_FRAGMENTER:
case BCAST_PROTOCOL:
- tipc_link_sync_rcv(n, *buf);
- break;
+ return false;
default:
- res = 0;
- }
- return res;
+ pr_warn("Dropping received illegal msg type\n");
+ kfree_skb(skb);
+ return false;
+ };
}
-/**
- * tipc_link_input - Deliver message too higher layers
+
+/* tipc_link_input - process packet that has passed link protocol check
+ *
+ * Consumes buffer
+ * Node lock must be held
*/
-static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
+static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
{
- struct tipc_msg *msg = buf_msg(buf);
- int res = 0;
+ struct tipc_node *node = link->owner;
+ struct tipc_msg *msg = buf_msg(skb);
+ struct sk_buff *iskb;
+ int pos = 0;
+
+ if (likely(tipc_data_input(link, skb)))
+ return;
switch (msg_user(msg)) {
- case TIPC_LOW_IMPORTANCE:
- case TIPC_MEDIUM_IMPORTANCE:
- case TIPC_HIGH_IMPORTANCE:
- case TIPC_CRITICAL_IMPORTANCE:
- case CONN_MANAGER:
- tipc_sk_rcv(buf);
+ case CHANGEOVER_PROTOCOL:
+ if (!tipc_link_tunnel_rcv(node, &skb))
+ break;
+ if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
+ tipc_data_input(link, skb);
+ break;
+ }
+ case MSG_BUNDLER:
+ link->stats.recv_bundles++;
+ link->stats.recv_bundled += msg_msgcnt(msg);
+
+ while (tipc_msg_extract(skb, &iskb, &pos))
+ tipc_data_input(link, iskb);
break;
- case NAME_DISTRIBUTOR:
- tipc_named_rcv(buf);
+ case MSG_FRAGMENTER:
+ link->stats.recv_fragments++;
+ if (tipc_buf_append(&link->reasm_buf, &skb)) {
+ link->stats.recv_fragmented++;
+ tipc_data_input(link, skb);
+ } else if (!link->reasm_buf) {
+ tipc_link_reset(link);
+ }
break;
- case MSG_BUNDLER:
- tipc_link_bundle_rcv(buf);
+ case BCAST_PROTOCOL:
+ tipc_link_sync_rcv(node, skb);
break;
default:
- res = -EINVAL;
- }
- return res;
+ break;
+ };
}
/**
@@ -1381,7 +1426,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
msg_set_type(msg, msg_typ);
msg_set_net_plane(msg, l_ptr->net_plane);
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
+ msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
if (msg_typ == STATE_MSG) {
u32 next_sent = mod(l_ptr->next_out_no);
@@ -1445,7 +1490,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
buf->priority = TC_PRIO_CONTROL;
- tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
+ tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
+ &l_ptr->media_addr);
l_ptr->unacked_window = 0;
kfree_skb(buf);
}
@@ -1455,7 +1501,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest address rules
*/
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
+ struct sk_buff *buf)
{
u32 rec_gap = 0;
u32 max_pkt_info;
@@ -1468,7 +1515,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
goto exit;
if (l_ptr->net_plane != msg_net_plane(msg))
- if (tipc_own_addr > msg_prevnode(msg))
+ if (link_own_addr(l_ptr) > msg_prevnode(msg))
l_ptr->net_plane = msg_net_plane(msg);
switch (msg_type(msg)) {
@@ -1535,9 +1582,9 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
if (msg_linkprio(msg) &&
(msg_linkprio(msg) != l_ptr->priority)) {
- pr_warn("%s<%s>, priority change %u->%u\n",
- link_rst_msg, l_ptr->name, l_ptr->priority,
- msg_linkprio(msg));
+ pr_debug("%s<%s>, priority change %u->%u\n",
+ link_rst_msg, l_ptr->name,
+ l_ptr->priority, msg_linkprio(msg));
l_ptr->priority = msg_linkprio(msg);
tipc_link_reset(l_ptr); /* Enforce change to take effect */
break;
@@ -1636,8 +1683,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
if (!tunnel)
return;
- tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
- ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
+ tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
+ ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
@@ -1694,8 +1741,8 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
struct sk_buff *skb;
struct tipc_msg tunnel_hdr;
- tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
- DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
+ tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
+ DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
skb_queue_walk(&l_ptr->outqueue, skb) {
@@ -1729,7 +1776,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
* @from_pos: offset to extract from
*
* Returns a new message buffer containing an embedded message. The
- * encapsulating message itself is left unchanged.
+ * encapsulating buffer is left unchanged.
*/
static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
{
@@ -1743,8 +1790,6 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
return eb;
}
-
-
/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
* Owner node is locked.
*/
@@ -1804,10 +1849,8 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
}
}
exit:
- if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
- tipc_node_detach_link(l_ptr->owner, l_ptr);
- kfree(l_ptr);
- }
+ if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
+ tipc_link_delete(l_ptr);
return buf;
}
@@ -1845,50 +1888,16 @@ exit:
return *buf != NULL;
}
-/*
- * Bundler functionality:
- */
-void tipc_link_bundle_rcv(struct sk_buff *buf)
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
{
- u32 msgcount = msg_msgcnt(buf_msg(buf));
- u32 pos = INT_H_SIZE;
- struct sk_buff *obuf;
- struct tipc_msg *omsg;
-
- while (msgcount--) {
- obuf = buf_extract(buf, pos);
- if (obuf == NULL) {
- pr_warn("Link unable to unbundle message(s)\n");
- break;
- }
- omsg = buf_msg(obuf);
- pos += align(msg_size(omsg));
- if (msg_isdata(omsg)) {
- if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
- tipc_sk_mcast_rcv(obuf);
- else
- tipc_sk_rcv(obuf);
- } else if (msg_user(omsg) == CONN_MANAGER) {
- tipc_sk_rcv(obuf);
- } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
- tipc_named_rcv(obuf);
- } else {
- pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
- kfree_skb(obuf);
- }
- }
- kfree_skb(buf);
-}
+ unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
-{
- if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
+ if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
return;
- l_ptr->tolerance = tolerance;
- l_ptr->continuity_interval =
- ((tolerance / 4) > 500) ? 500 : tolerance / 4;
- l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
+ l_ptr->tolerance = tol;
+ l_ptr->cont_intv = msecs_to_jiffies(intv);
+ l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
}
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
@@ -1911,22 +1920,25 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
}
/* tipc_link_find_owner - locate owner node of link by link's name
+ * @net: the applicable net namespace
* @name: pointer to link name string
* @bearer_id: pointer to index in 'node->links' array where the link was found.
*
* Returns pointer to node owning the link, or 0 if no matching link is found.
*/
-static struct tipc_node *tipc_link_find_owner(const char *link_name,
+static struct tipc_node *tipc_link_find_owner(struct net *net,
+ const char *link_name,
unsigned int *bearer_id)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
- struct tipc_node *found_node = 0;
+ struct tipc_node *found_node = NULL;
int i;
*bearer_id = 0;
rcu_read_lock();
- list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+ list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
tipc_node_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
l_ptr = n_ptr->links[i];
@@ -1946,148 +1958,6 @@ static struct tipc_node *tipc_link_find_owner(const char *link_name,
}
/**
- * link_value_is_valid -- validate proposed link tolerance/priority/window
- *
- * @cmd: value type (TIPC_CMD_SET_LINK_*)
- * @new_value: the new value
- *
- * Returns 1 if value is within range, 0 if not.
- */
-static int link_value_is_valid(u16 cmd, u32 new_value)
-{
- switch (cmd) {
- case TIPC_CMD_SET_LINK_TOL:
- return (new_value >= TIPC_MIN_LINK_TOL) &&
- (new_value <= TIPC_MAX_LINK_TOL);
- case TIPC_CMD_SET_LINK_PRI:
- return (new_value <= TIPC_MAX_LINK_PRI);
- case TIPC_CMD_SET_LINK_WINDOW:
- return (new_value >= TIPC_MIN_LINK_WIN) &&
- (new_value <= TIPC_MAX_LINK_WIN);
- }
- return 0;
-}
-
-/**
- * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
- * @name: ptr to link, bearer, or media name
- * @new_value: new value of link, bearer, or media setting
- * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
- *
- * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
- *
- * Returns 0 if value updated and negative value on error.
- */
-static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
-{
- struct tipc_node *node;
- struct tipc_link *l_ptr;
- struct tipc_bearer *b_ptr;
- struct tipc_media *m_ptr;
- int bearer_id;
- int res = 0;
-
- node = tipc_link_find_owner(name, &bearer_id);
- if (node) {
- tipc_node_lock(node);
- l_ptr = node->links[bearer_id];
-
- if (l_ptr) {
- switch (cmd) {
- case TIPC_CMD_SET_LINK_TOL:
- link_set_supervision_props(l_ptr, new_value);
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
- new_value, 0, 0);
- break;
- case TIPC_CMD_SET_LINK_PRI:
- l_ptr->priority = new_value;
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
- 0, new_value, 0);
- break;
- case TIPC_CMD_SET_LINK_WINDOW:
- tipc_link_set_queue_limits(l_ptr, new_value);
- break;
- default:
- res = -EINVAL;
- break;
- }
- }
- tipc_node_unlock(node);
- return res;
- }
-
- b_ptr = tipc_bearer_find(name);
- if (b_ptr) {
- switch (cmd) {
- case TIPC_CMD_SET_LINK_TOL:
- b_ptr->tolerance = new_value;
- break;
- case TIPC_CMD_SET_LINK_PRI:
- b_ptr->priority = new_value;
- break;
- case TIPC_CMD_SET_LINK_WINDOW:
- b_ptr->window = new_value;
- break;
- default:
- res = -EINVAL;
- break;
- }
- return res;
- }
-
- m_ptr = tipc_media_find(name);
- if (!m_ptr)
- return -ENODEV;
- switch (cmd) {
- case TIPC_CMD_SET_LINK_TOL:
- m_ptr->tolerance = new_value;
- break;
- case TIPC_CMD_SET_LINK_PRI:
- m_ptr->priority = new_value;
- break;
- case TIPC_CMD_SET_LINK_WINDOW:
- m_ptr->window = new_value;
- break;
- default:
- res = -EINVAL;
- break;
- }
- return res;
-}
-
-struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
- u16 cmd)
-{
- struct tipc_link_config *args;
- u32 new_value;
- int res;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
- new_value = ntohl(args->value);
-
- if (!link_value_is_valid(cmd, new_value))
- return tipc_cfg_reply_error_string(
- "cannot change, value invalid");
-
- if (!strcmp(args->name, tipc_bclink_name)) {
- if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
- (tipc_bclink_set_queue_limits(new_value) == 0))
- return tipc_cfg_reply_none();
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change setting on broadcast link)");
- }
-
- res = link_cmd_set_value(args->name, new_value, cmd);
- if (res)
- return tipc_cfg_reply_error_string("cannot change link setting");
-
- return tipc_cfg_reply_none();
-}
-
-/**
* link_reset_statistics - reset link statistics
* @l_ptr: pointer to link
*/
@@ -2098,207 +1968,13 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
l_ptr->stats.recv_info = l_ptr->next_in_no;
}
-struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
-{
- char *link_name;
- struct tipc_link *l_ptr;
- struct tipc_node *node;
- unsigned int bearer_id;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- link_name = (char *)TLV_DATA(req_tlv_area);
- if (!strcmp(link_name, tipc_bclink_name)) {
- if (tipc_bclink_reset_stats())
- return tipc_cfg_reply_error_string("link not found");
- return tipc_cfg_reply_none();
- }
- node = tipc_link_find_owner(link_name, &bearer_id);
- if (!node)
- return tipc_cfg_reply_error_string("link not found");
-
- tipc_node_lock(node);
- l_ptr = node->links[bearer_id];
- if (!l_ptr) {
- tipc_node_unlock(node);
- return tipc_cfg_reply_error_string("link not found");
- }
- link_reset_statistics(l_ptr);
- tipc_node_unlock(node);
- return tipc_cfg_reply_none();
-}
-
-/**
- * percent - convert count to a percentage of total (rounding up or down)
- */
-static u32 percent(u32 count, u32 total)
-{
- return (count * 100 + (total / 2)) / total;
-}
-
-/**
- * tipc_link_stats - print link statistics
- * @name: link name
- * @buf: print buffer area
- * @buf_size: size of print buffer area
- *
- * Returns length of print buffer data string (or 0 if error)
- */
-static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
-{
- struct tipc_link *l;
- struct tipc_stats *s;
- struct tipc_node *node;
- char *status;
- u32 profile_total = 0;
- unsigned int bearer_id;
- int ret;
-
- if (!strcmp(name, tipc_bclink_name))
- return tipc_bclink_stats(buf, buf_size);
-
- node = tipc_link_find_owner(name, &bearer_id);
- if (!node)
- return 0;
-
- tipc_node_lock(node);
-
- l = node->links[bearer_id];
- if (!l) {
- tipc_node_unlock(node);
- return 0;
- }
-
- s = &l->stats;
-
- if (tipc_link_is_active(l))
- status = "ACTIVE";
- else if (tipc_link_is_up(l))
- status = "STANDBY";
- else
- status = "DEFUNCT";
-
- ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
- " %s MTU:%u Priority:%u Tolerance:%u ms"
- " Window:%u packets\n",
- l->name, status, l->max_pkt, l->priority,
- l->tolerance, l->queue_limit[0]);
-
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- l->next_in_no - s->recv_info, s->recv_fragments,
- s->recv_fragmented, s->recv_bundles,
- s->recv_bundled);
-
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- l->next_out_no - s->sent_info, s->sent_fragments,
- s->sent_fragmented, s->sent_bundles,
- s->sent_bundled);
-
- profile_total = s->msg_length_counts;
- if (!profile_total)
- profile_total = 1;
-
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " TX profile sample:%u packets average:%u octets\n"
- " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
- "-16384:%u%% -32768:%u%% -66000:%u%%\n",
- s->msg_length_counts,
- s->msg_lengths_total / profile_total,
- percent(s->msg_length_profile[0], profile_total),
- percent(s->msg_length_profile[1], profile_total),
- percent(s->msg_length_profile[2], profile_total),
- percent(s->msg_length_profile[3], profile_total),
- percent(s->msg_length_profile[4], profile_total),
- percent(s->msg_length_profile[5], profile_total),
- percent(s->msg_length_profile[6], profile_total));
-
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " RX states:%u probes:%u naks:%u defs:%u"
- " dups:%u\n", s->recv_states, s->recv_probes,
- s->recv_nacks, s->deferred_recv, s->duplicates);
-
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " TX states:%u probes:%u naks:%u acks:%u"
- " dups:%u\n", s->sent_states, s->sent_probes,
- s->sent_nacks, s->sent_acks, s->retransmitted);
-
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " Congestion link:%u Send queue"
- " max:%u avg:%u\n", s->link_congs,
- s->max_queue_sz, s->queue_sz_counts ?
- (s->accu_queue_sz / s->queue_sz_counts) : 0);
-
- tipc_node_unlock(node);
- return ret;
-}
-
-struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
-{
- struct sk_buff *buf;
- struct tlv_desc *rep_tlv;
- int str_len;
- int pb_len;
- char *pb;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
- if (!buf)
- return NULL;
-
- rep_tlv = (struct tlv_desc *)buf->data;
- pb = TLV_DATA(rep_tlv);
- pb_len = ULTRA_STRING_MAX_LEN;
- str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
- pb, pb_len);
- if (!str_len) {
- kfree_skb(buf);
- return tipc_cfg_reply_error_string("link not found");
- }
- str_len += 1; /* for "\0" */
- skb_put(buf, TLV_SPACE(str_len));
- TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
-
- return buf;
-}
-
-/**
- * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
- * @dest: network address of destination node
- * @selector: used to select from set of active links
- *
- * If no active link can be found, uses default maximum packet size.
- */
-u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
-{
- struct tipc_node *n_ptr;
- struct tipc_link *l_ptr;
- u32 res = MAX_PKT_DEFAULT;
-
- if (dest == tipc_own_addr)
- return MAX_MSG_SIZE;
-
- n_ptr = tipc_node_find(dest);
- if (n_ptr) {
- tipc_node_lock(n_ptr);
- l_ptr = n_ptr->active_links[selector & 1];
- if (l_ptr)
- res = l_ptr->max_pkt;
- tipc_node_unlock(n_ptr);
- }
- return res;
-}
-
static void link_print(struct tipc_link *l_ptr, const char *str)
{
+ struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
struct tipc_bearer *b_ptr;
rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+ b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
if (b_ptr)
pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
rcu_read_unlock();
@@ -2362,6 +2038,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
struct tipc_link *link;
struct tipc_node *node;
struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+ struct net *net = sock_net(skb->sk);
if (!info->attrs[TIPC_NLA_LINK])
return -EINVAL;
@@ -2377,7 +2054,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
- node = tipc_link_find_owner(name, &bearer_id);
+ node = tipc_link_find_owner(net, name, &bearer_id);
if (!node)
return -EINVAL;
@@ -2493,14 +2170,16 @@ msg_full:
}
/* Caller should hold appropriate locks to protect the link */
-static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
+static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_link *link)
{
int err;
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
- hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
@@ -2512,7 +2191,7 @@ static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
- tipc_cluster_mask(tipc_own_addr)))
+ tipc_cluster_mask(tn->own_addr)))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
goto attr_msg_full;
@@ -2562,9 +2241,8 @@ msg_full:
}
/* Caller should hold node lock */
-static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
- struct tipc_node *node,
- u32 *prev_link)
+static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_node *node, u32 *prev_link)
{
u32 i;
int err;
@@ -2575,7 +2253,7 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
if (!node->links[i])
continue;
- err = __tipc_nl_add_link(msg, node->links[i]);
+ err = __tipc_nl_add_link(net, msg, node->links[i]);
if (err)
return err;
}
@@ -2586,6 +2264,8 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node;
struct tipc_nl_msg msg;
u32 prev_node = cb->args[0];
@@ -2603,7 +2283,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
if (prev_node) {
- node = tipc_node_find(prev_node);
+ node = tipc_node_find(net, prev_node);
if (!node) {
/* We never set seq or call nl_dump_check_consistent()
* this means that setting prev_seq here will cause the
@@ -2615,9 +2295,11 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto out;
}
- list_for_each_entry_continue_rcu(node, &tipc_node_list, list) {
+ list_for_each_entry_continue_rcu(node, &tn->node_list,
+ list) {
tipc_node_lock(node);
- err = __tipc_nl_add_node_links(&msg, node, &prev_link);
+ err = __tipc_nl_add_node_links(net, &msg, node,
+ &prev_link);
tipc_node_unlock(node);
if (err)
goto out;
@@ -2625,13 +2307,14 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
prev_node = node->addr;
}
} else {
- err = tipc_nl_add_bc_link(&msg);
+ err = tipc_nl_add_bc_link(net, &msg);
if (err)
goto out;
- list_for_each_entry_rcu(node, &tipc_node_list, list) {
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
tipc_node_lock(node);
- err = __tipc_nl_add_node_links(&msg, node, &prev_link);
+ err = __tipc_nl_add_node_links(net, &msg, node,
+ &prev_link);
tipc_node_unlock(node);
if (err)
goto out;
@@ -2652,6 +2335,7 @@ out:
int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
{
+ struct net *net = genl_info_net(info);
struct sk_buff *ans_skb;
struct tipc_nl_msg msg;
struct tipc_link *link;
@@ -2664,7 +2348,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
- node = tipc_link_find_owner(name, &bearer_id);
+ node = tipc_link_find_owner(net, name, &bearer_id);
if (!node)
return -EINVAL;
@@ -2683,7 +2367,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- err = __tipc_nl_add_link(&msg, link);
+ err = __tipc_nl_add_link(net, &msg, link);
if (err)
goto err_out;
@@ -2706,6 +2390,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
struct tipc_link *link;
struct tipc_node *node;
struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+ struct net *net = sock_net(skb->sk);
if (!info->attrs[TIPC_NLA_LINK])
return -EINVAL;
@@ -2722,13 +2407,13 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
if (strcmp(link_name, tipc_bclink_name) == 0) {
- err = tipc_bclink_reset_stats();
+ err = tipc_bclink_reset_stats(net);
if (err)
return err;
return 0;
}
- node = tipc_link_find_owner(link_name, &bearer_id);
+ node = tipc_link_find_owner(net, link_name, &bearer_id);
if (!node)
return -EINVAL;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 55812e87ca1e..7aeb52092bf3 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -41,6 +41,10 @@
#include "msg.h"
#include "node.h"
+/* TIPC-specific error codes
+*/
+#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
+
/* Out-of-range value for link sequence numbers
*/
#define INVALID_LINK_SEQ 0x10000
@@ -99,13 +103,14 @@ struct tipc_stats {
* @media_addr: media address to use when sending messages over link
* @timer: link timer
* @owner: pointer to peer node
+ * @refcnt: reference counter for permanent references (owner node & timer)
* @flags: execution state flags for link endpoint instance
* @checkpoint: reference point for triggering link continuity checking
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
* @tolerance: minimum link continuity loss needed to reset link [in ms]
- * @continuity_interval: link continuity testing interval [in ms]
+ * @cont_intv: link continuity testing interval
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
* @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
@@ -126,8 +131,10 @@ struct tipc_stats {
* @next_in_no: next sequence number to expect for inbound messages
* @deferred_queue: deferred queue saved OOS b'cast message received from node
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
+ * @inputq: buffer queue for messages to be delivered upwards
+ * @namedq: buffer queue for name table messages to be delivered upwards
* @next_out: ptr to first unsent outbound message in queue
- * @waiting_sks: linked list of sockets waiting for link congestion to abate
+ * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
* @reasm_buf: head of partially reassembled inbound message fragments
* @stats: collects statistics regarding link activity
@@ -138,6 +145,7 @@ struct tipc_link {
struct tipc_media_addr media_addr;
struct timer_list timer;
struct tipc_node *owner;
+ struct kref ref;
/* Management and link supervision data */
unsigned int flags;
@@ -146,7 +154,7 @@ struct tipc_link {
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
- u32 continuity_interval;
+ unsigned long cont_intv;
u32 abort_limit;
int state;
u32 fsm_msg_cnt;
@@ -178,10 +186,12 @@ struct tipc_link {
u32 next_in_no;
struct sk_buff_head deferred_queue;
u32 unacked_window;
+ struct sk_buff_head inputq;
+ struct sk_buff_head namedq;
/* Congestion handling */
struct sk_buff *next_out;
- struct sk_buff_head waiting_sks;
+ struct sk_buff_head wakeupq;
/* Fragmentation/reassembly */
u32 long_msg_seq_no;
@@ -196,28 +206,24 @@ struct tipc_port;
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr);
-void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down);
+void tipc_link_delete(struct tipc_link *link);
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
+ bool shutting_down);
void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
int tipc_link_is_up(struct tipc_link *l_ptr);
int tipc_link_is_active(struct tipc_link *l_ptr);
void tipc_link_purge_queues(struct tipc_link *l_ptr);
-struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area,
- int req_tlv_space,
- u16 cmd);
-struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
- int req_tlv_space);
-struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
- int req_tlv_space);
void tipc_link_reset_all(struct tipc_node *node);
void tipc_link_reset(struct tipc_link *l_ptr);
-void tipc_link_reset_list(unsigned int bearer_id);
-int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
-int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
-u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-void tipc_link_bundle_rcv(struct sk_buff *buf);
+void tipc_link_reset_list(struct net *net, unsigned int bearer_id);
+int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+ u32 selector);
+int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
+ u32 selector);
+int __tipc_link_xmit(struct net *net, struct tipc_link *link,
+ struct sk_buff_head *list);
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -233,6 +239,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
+void link_prepare_wakeup(struct tipc_link *l);
/*
* Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
@@ -267,6 +274,10 @@ static inline u32 lesser(u32 left, u32 right)
return less_eq(left, right) ? left : right;
}
+static inline u32 link_own_addr(struct tipc_link *l)
+{
+ return msg_prevnode(l->pmsg);
+}
/*
* Link status checking routines
diff --git a/net/tipc/log.c b/net/tipc/log.c
deleted file mode 100644
index abef644f27d8..000000000000
--- a/net/tipc/log.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * net/tipc/log.c: TIPC print buffer routines for debugging
- *
- * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "config.h"
-
-/**
- * tipc_snprintf - append formatted output to print buffer
- * @buf: pointer to print buffer
- * @len: buffer length
- * @fmt: formatted info to be printed
- */
-int tipc_snprintf(char *buf, int len, const char *fmt, ...)
-{
- int i;
- va_list args;
-
- va_start(args, fmt);
- i = vscnprintf(buf, len, fmt, args);
- va_end(args);
- return i;
-}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index a687b30a699c..b6eb90cd3ef7 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -34,6 +34,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <net/sock.h>
#include "core.h"
#include "msg.h"
#include "addr.h"
@@ -46,25 +47,48 @@ static unsigned int align(unsigned int i)
return (i + 3) & ~3u;
}
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
- u32 destnode)
+/**
+ * tipc_buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ *
+ * Returns a new buffer with data pointers set to the specified size.
+ *
+ * NOTE: Headroom is reserved to allow prepending of a data link header.
+ * There may also be unrequested tailroom present at the buffer's end.
+ */
+struct sk_buff *tipc_buf_acquire(u32 size)
+{
+ struct sk_buff *skb;
+ unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+
+ skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
+ if (skb) {
+ skb_reserve(skb, BUF_HEADROOM);
+ skb_put(skb, size);
+ skb->next = NULL;
+ }
+ return skb;
+}
+
+void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
+ u32 hsize, u32 dnode)
{
memset(m, 0, hsize);
msg_set_version(m);
msg_set_user(m, user);
msg_set_hdr_sz(m, hsize);
msg_set_size(m, hsize);
- msg_set_prevnode(m, tipc_own_addr);
+ msg_set_prevnode(m, own_node);
msg_set_type(m, type);
if (hsize > SHORT_H_SIZE) {
- msg_set_orignode(m, tipc_own_addr);
- msg_set_destnode(m, destnode);
+ msg_set_orignode(m, own_node);
+ msg_set_destnode(m, dnode);
}
}
-struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
- uint data_sz, u32 dnode, u32 onode,
- u32 dport, u32 oport, int errcode)
+struct sk_buff *tipc_msg_create(uint user, uint type,
+ uint hdr_sz, uint data_sz, u32 dnode,
+ u32 onode, u32 dport, u32 oport, int errcode)
{
struct tipc_msg *msg;
struct sk_buff *buf;
@@ -74,9 +98,8 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
return NULL;
msg = buf_msg(buf);
- tipc_msg_init(msg, user, type, hdr_sz, dnode);
+ tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
msg_set_size(msg, hdr_sz + data_sz);
- msg_set_prevnode(msg, onode);
msg_set_origport(msg, oport);
msg_set_destport(msg, dport);
msg_set_errcode(msg, errcode);
@@ -163,15 +186,14 @@ err:
* tipc_msg_build - create buffer chain containing specified header and data
* @mhdr: Message header, to be prepended to data
* @m: User message
- * @offset: Posision in iov to start copying from
* @dsz: Total length of user data
* @pktmax: Max packet size that can be used
* @list: Buffer or chain of buffers to be returned to caller
*
* Returns message data size or errno: -ENOMEM, -EFAULT
*/
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
- int dsz, int pktmax, struct sk_buff_head *list)
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
+ int offset, int dsz, int pktmax, struct sk_buff_head *list)
{
int mhsz = msg_hdr_sz(mhdr);
int msz = mhsz + dsz;
@@ -191,19 +213,19 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
skb = tipc_buf_acquire(msz);
if (unlikely(!skb))
return -ENOMEM;
+ skb_orphan(skb);
__skb_queue_tail(list, skb);
skb_copy_to_linear_data(skb, mhdr, mhsz);
pktpos = skb->data + mhsz;
- if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
- dsz))
+ if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
return dsz;
rc = -EFAULT;
goto error;
}
/* Prepare reusable fragment header */
- tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
- INT_H_SIZE, msg_destnode(mhdr));
+ tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
+ FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
msg_set_size(&pkthdr, pktmax);
msg_set_fragm_no(&pkthdr, pktno);
@@ -211,6 +233,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
skb = tipc_buf_acquire(pktmax);
if (!skb)
return -ENOMEM;
+ skb_orphan(skb);
__skb_queue_tail(list, skb);
pktpos = skb->data;
skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
@@ -224,12 +247,11 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
if (drem < pktrem)
pktrem = drem;
- if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
+ if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
rc = -EFAULT;
goto error;
}
drem -= pktrem;
- offset += pktrem;
if (!drem)
break;
@@ -244,6 +266,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
rc = -ENOMEM;
goto error;
}
+ skb_orphan(skb);
__skb_queue_tail(list, skb);
msg_set_type(&pkthdr, FRAGMENT);
msg_set_size(&pkthdr, pktsz);
@@ -304,6 +327,40 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
}
/**
+ * tipc_msg_extract(): extract bundled inner packet from buffer
+ * @skb: linear outer buffer, to be extracted from.
+ * @iskb: extracted inner buffer, to be returned
+ * @pos: position of msg to be extracted. Returns with pointer of next msg
+ * Consumes outer buffer when last packet extracted
+ * Returns true when when there is an extracted buffer, otherwise false
+ */
+bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
+{
+ struct tipc_msg *msg = buf_msg(skb);
+ int imsz;
+ struct tipc_msg *imsg = (struct tipc_msg *)(msg_data(msg) + *pos);
+
+ /* Is there space left for shortest possible message? */
+ if (*pos > (msg_data_sz(msg) - SHORT_H_SIZE))
+ goto none;
+ imsz = msg_size(imsg);
+
+ /* Is there space left for current message ? */
+ if ((*pos + imsz) > msg_data_sz(msg))
+ goto none;
+ *iskb = tipc_buf_acquire(imsz);
+ if (!*iskb)
+ goto none;
+ skb_copy_to_linear_data(*iskb, imsg, imsz);
+ *pos += align(imsz);
+ return true;
+none:
+ kfree_skb(skb);
+ *iskb = NULL;
+ return false;
+}
+
+/**
* tipc_msg_make_bundle(): Create bundle buf and append message to its tail
* @list: the buffer chain
* @skb: buffer to be appended and replaced
@@ -312,8 +369,8 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
* Replaces buffer if successful
* Returns true if success, otherwise false
*/
-bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
- u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff_head *list,
+ struct sk_buff *skb, u32 mtu, u32 dnode)
{
struct sk_buff *bskb;
struct tipc_msg *bmsg;
@@ -336,7 +393,8 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
skb_trim(bskb, INT_H_SIZE);
bmsg = buf_msg(bskb);
- tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
+ tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
+ INT_H_SIZE, dnode);
msg_set_seqno(bmsg, msg_seqno(msg));
msg_set_ack(bmsg, msg_ack(msg));
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
@@ -353,7 +411,8 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
* Consumes buffer if failure
* Returns true if success, otherwise false
*/
-bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
+ int err)
{
struct tipc_msg *msg = buf_msg(buf);
uint imp = msg_importance(msg);
@@ -374,7 +433,7 @@ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
msg_set_errcode(msg, err);
msg_set_origport(msg, msg_destport(&ohdr));
msg_set_destport(msg, msg_origport(&ohdr));
- msg_set_prevnode(msg, tipc_own_addr);
+ msg_set_prevnode(msg, own_addr);
if (!msg_short(msg)) {
msg_set_orignode(msg, msg_destnode(&ohdr));
msg_set_destnode(msg, msg_orignode(&ohdr));
@@ -386,43 +445,43 @@ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
return true;
exit:
kfree_skb(buf);
+ *dnode = 0;
return false;
}
/**
- * tipc_msg_eval: determine fate of message that found no destination
- * @buf: the buffer containing the message.
- * @dnode: return value: next-hop node, if message to be forwarded
- * @err: error code to use, if message to be rejected
- *
+ * tipc_msg_lookup_dest(): try to find new destination for named message
+ * @skb: the buffer containing the message.
+ * @dnode: return value: next-hop node, if destination found
+ * @err: return value: error code to use, if message to be rejected
* Does not consume buffer
- * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
- * code if message to be rejected
+ * Returns true if a destination is found, false otherwise
*/
-int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
+ u32 *dnode, int *err)
{
- struct tipc_msg *msg = buf_msg(buf);
+ struct tipc_msg *msg = buf_msg(skb);
u32 dport;
- if (msg_type(msg) != TIPC_NAMED_MSG)
- return -TIPC_ERR_NO_PORT;
- if (skb_linearize(buf))
- return -TIPC_ERR_NO_NAME;
- if (msg_data_sz(msg) > MAX_FORWARD_SIZE)
- return -TIPC_ERR_NO_NAME;
+ if (!msg_isdata(msg))
+ return false;
+ if (!msg_named(msg))
+ return false;
+ *err = -TIPC_ERR_NO_NAME;
+ if (skb_linearize(skb))
+ return false;
if (msg_reroute_cnt(msg) > 0)
- return -TIPC_ERR_NO_NAME;
-
- *dnode = addr_domain(msg_lookup_scope(msg));
- dport = tipc_nametbl_translate(msg_nametype(msg),
- msg_nameinst(msg),
- dnode);
+ return false;
+ *dnode = addr_domain(net, msg_lookup_scope(msg));
+ dport = tipc_nametbl_translate(net, msg_nametype(msg),
+ msg_nameinst(msg), dnode);
if (!dport)
- return -TIPC_ERR_NO_NAME;
+ return false;
msg_incr_reroute_cnt(msg);
msg_set_destnode(msg, *dnode);
msg_set_destport(msg, dport);
- return TIPC_OK;
+ *err = TIPC_OK;
+ return true;
}
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index d5c83d7ecb47..9ace47f44a69 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -37,7 +37,7 @@
#ifndef _TIPC_MSG_H
#define _TIPC_MSG_H
-#include "bearer.h"
+#include <linux/tipc.h>
/*
* Constants and routines used to read and write TIPC payload message headers
@@ -45,6 +45,7 @@
* Note: Some items are also used with TIPC internal message headers
*/
#define TIPC_VERSION 2
+struct plist;
/*
* Payload message users are defined in TIPC's public API:
@@ -77,11 +78,37 @@
#define TIPC_MEDIA_ADDR_OFFSET 5
+/**
+ * TIPC message buffer code
+ *
+ * TIPC message buffer headroom reserves space for the worst-case
+ * link-level device header (in case the message is sent off-node).
+ *
+ * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
+ * are word aligned for quicker access
+ */
+#define BUF_HEADROOM LL_MAX_HEADER
+
+struct tipc_skb_cb {
+ void *handle;
+ struct sk_buff *tail;
+ bool deferred;
+ bool wakeup_pending;
+ bool bundling;
+ u16 chain_sz;
+ u16 chain_imp;
+};
+
+#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
struct tipc_msg {
__be32 hdr[15];
};
+static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
+{
+ return (struct tipc_msg *)skb->data;
+}
static inline u32 msg_word(struct tipc_msg *m, u32 pos)
{
@@ -721,27 +748,111 @@ static inline u32 msg_tot_origport(struct tipc_msg *m)
return msg_origport(m);
}
-bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err);
-
-int tipc_msg_eval(struct sk_buff *buf, u32 *dnode);
-
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
- u32 destnode);
-
+struct sk_buff *tipc_buf_acquire(u32 size);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
+ int err);
+void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
+ u32 hsize, u32 destnode);
struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
uint data_sz, u32 dnode, u32 onode,
u32 dport, u32 oport, int errcode);
-
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-
bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
+bool tipc_msg_make_bundle(struct sk_buff_head *list,
+ struct sk_buff *skb, u32 mtu, u32 dnode);
+bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
+ int offset, int dsz, int mtu, struct sk_buff_head *list);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
+ int *err);
+struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
-bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
- u32 mtu, u32 dnode);
-
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
- int dsz, int mtu, struct sk_buff_head *list);
+/* tipc_skb_peek(): peek and reserve first buffer in list
+ * @list: list to be peeked in
+ * Returns pointer to first buffer in list, if any
+ */
+static inline struct sk_buff *tipc_skb_peek(struct sk_buff_head *list,
+ spinlock_t *lock)
+{
+ struct sk_buff *skb;
+
+ spin_lock_bh(lock);
+ skb = skb_peek(list);
+ if (skb)
+ skb_get(skb);
+ spin_unlock_bh(lock);
+ return skb;
+}
+
+/* tipc_skb_peek_port(): find a destination port, ignoring all destinations
+ * up to and including 'filter'.
+ * Note: ignoring previously tried destinations minimizes the risk of
+ * contention on the socket lock
+ * @list: list to be peeked in
+ * @filter: last destination to be ignored from search
+ * Returns a destination port number, of applicable.
+ */
+static inline u32 tipc_skb_peek_port(struct sk_buff_head *list, u32 filter)
+{
+ struct sk_buff *skb;
+ u32 dport = 0;
+ bool ignore = true;
+
+ spin_lock_bh(&list->lock);
+ skb_queue_walk(list, skb) {
+ dport = msg_destport(buf_msg(skb));
+ if (!filter || skb_queue_is_last(list, skb))
+ break;
+ if (dport == filter)
+ ignore = false;
+ else if (!ignore)
+ break;
+ }
+ spin_unlock_bh(&list->lock);
+ return dport;
+}
+
+/* tipc_skb_dequeue(): unlink first buffer with dest 'dport' from list
+ * @list: list to be unlinked from
+ * @dport: selection criteria for buffer to unlink
+ */
+static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
+ u32 dport)
+{
+ struct sk_buff *_skb, *tmp, *skb = NULL;
+
+ spin_lock_bh(&list->lock);
+ skb_queue_walk_safe(list, _skb, tmp) {
+ if (msg_destport(buf_msg(_skb)) == dport) {
+ __skb_unlink(_skb, list);
+ skb = _skb;
+ break;
+ }
+ }
+ spin_unlock_bh(&list->lock);
+ return skb;
+}
+
+/* tipc_skb_queue_tail(): add buffer to tail of list;
+ * @list: list to be appended to
+ * @skb: buffer to append. Always appended
+ * @dport: the destination port of the buffer
+ * returns true if dport differs from previous destination
+ */
+static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
+ struct sk_buff *skb, u32 dport)
+{
+ struct sk_buff *_skb = NULL;
+ bool rv = false;
-struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
+ spin_lock_bh(&list->lock);
+ _skb = skb_peek_tail(list);
+ if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
+ (skb_queue_len(list) > 32))
+ rv = true;
+ __skb_queue_tail(list, skb);
+ spin_unlock_bh(&list->lock);
+ return rv;
+}
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index ba6083dca95b..fcb07915aaac 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -68,29 +68,33 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
/**
* named_prepare_buf - allocate & initialize a publication message
*/
-static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
+static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
+ u32 dest)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
struct tipc_msg *msg;
if (buf != NULL) {
msg = buf_msg(buf);
- tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
+ tipc_msg_init(tn->own_addr, msg, NAME_DISTRIBUTOR, type,
+ INT_H_SIZE, dest);
msg_set_size(msg, INT_H_SIZE + size);
}
return buf;
}
-void named_cluster_distribute(struct sk_buff *skb)
+void named_cluster_distribute(struct net *net, struct sk_buff *skb)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *oskb;
struct tipc_node *node;
u32 dnode;
rcu_read_lock();
- list_for_each_entry_rcu(node, &tipc_node_list, list) {
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
dnode = node->addr;
- if (in_own_node(dnode))
+ if (in_own_node(net, dnode))
continue;
if (!tipc_node_active_links(node))
continue;
@@ -98,7 +102,7 @@ void named_cluster_distribute(struct sk_buff *skb)
if (!oskb)
break;
msg_set_destnode(buf_msg(oskb), dnode);
- tipc_link_xmit_skb(oskb, dnode, dnode);
+ tipc_link_xmit_skb(net, oskb, dnode, dnode);
}
rcu_read_unlock();
@@ -108,18 +112,19 @@ void named_cluster_distribute(struct sk_buff *skb)
/**
* tipc_named_publish - tell other nodes about a new publication by this node
*/
-struct sk_buff *tipc_named_publish(struct publication *publ)
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *buf;
struct distr_item *item;
list_add_tail_rcu(&publ->local_list,
- &tipc_nametbl->publ_list[publ->scope]);
+ &tn->nametbl->publ_list[publ->scope]);
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
- buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
+ buf = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
pr_warn("Publication distribution failure\n");
return NULL;
@@ -133,7 +138,7 @@ struct sk_buff *tipc_named_publish(struct publication *publ)
/**
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
*/
-struct sk_buff *tipc_named_withdraw(struct publication *publ)
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
{
struct sk_buff *buf;
struct distr_item *item;
@@ -143,7 +148,7 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
- buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
+ buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
pr_warn("Withdrawal distribution failure\n");
return NULL;
@@ -160,19 +165,21 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
* @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain
*/
-static void named_distribute(struct sk_buff_head *list, u32 dnode,
- struct list_head *pls)
+static void named_distribute(struct net *net, struct sk_buff_head *list,
+ u32 dnode, struct list_head *pls)
{
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
- uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
+ uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
+ ITEM_SIZE;
uint msg_rem = msg_dsz;
list_for_each_entry(publ, pls, local_list) {
/* Prepare next buffer: */
if (!skb) {
- skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
+ skb = named_prepare_buf(net, PUBLICATION, msg_rem,
+ dnode);
if (!skb) {
pr_warn("Bulk publication failure\n");
return;
@@ -202,30 +209,32 @@ static void named_distribute(struct sk_buff_head *list, u32 dnode,
/**
* tipc_named_node_up - tell specified node about all publications by this node
*/
-void tipc_named_node_up(u32 dnode)
+void tipc_named_node_up(struct net *net, u32 dnode)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff_head head;
__skb_queue_head_init(&head);
rcu_read_lock();
- named_distribute(&head, dnode,
- &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
- named_distribute(&head, dnode,
- &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
+ named_distribute(net, &head, dnode,
+ &tn->nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
+ named_distribute(net, &head, dnode,
+ &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
rcu_read_unlock();
- tipc_link_xmit(&head, dnode, dnode);
+ tipc_link_xmit(net, &head, dnode, dnode);
}
-static void tipc_publ_subscribe(struct publication *publ, u32 addr)
+static void tipc_publ_subscribe(struct net *net, struct publication *publ,
+ u32 addr)
{
struct tipc_node *node;
- if (in_own_node(addr))
+ if (in_own_node(net, addr))
return;
- node = tipc_node_find(addr);
+ node = tipc_node_find(net, addr);
if (!node) {
pr_warn("Node subscription rejected, unknown node 0x%x\n",
addr);
@@ -237,11 +246,12 @@ static void tipc_publ_subscribe(struct publication *publ, u32 addr)
tipc_node_unlock(node);
}
-static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
+static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
+ u32 addr)
{
struct tipc_node *node;
- node = tipc_node_find(addr);
+ node = tipc_node_find(net, addr);
if (!node)
return;
@@ -256,16 +266,17 @@ static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
*/
-static void tipc_publ_purge(struct publication *publ, u32 addr)
+static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct publication *p;
- spin_lock_bh(&tipc_nametbl_lock);
- p = tipc_nametbl_remove_publ(publ->type, publ->lower,
+ spin_lock_bh(&tn->nametbl_lock);
+ p = tipc_nametbl_remove_publ(net, publ->type, publ->lower,
publ->node, publ->ref, publ->key);
if (p)
- tipc_publ_unsubscribe(p, addr);
- spin_unlock_bh(&tipc_nametbl_lock);
+ tipc_publ_unsubscribe(net, p, addr);
+ spin_unlock_bh(&tn->nametbl_lock);
if (p != publ) {
pr_err("Unable to remove publication from failed node\n"
@@ -277,12 +288,12 @@ static void tipc_publ_purge(struct publication *publ, u32 addr)
kfree_rcu(p, rcu);
}
-void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
{
struct publication *publ, *tmp;
list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
- tipc_publ_purge(publ, addr);
+ tipc_publ_purge(net, publ, addr);
}
/**
@@ -292,25 +303,28 @@ void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
* tipc_nametbl_lock must be held.
* Returns the publication item if successful, otherwise NULL.
*/
-static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
+static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
+ u32 node, u32 dtype)
{
struct publication *publ = NULL;
if (dtype == PUBLICATION) {
- publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower),
+ publ = tipc_nametbl_insert_publ(net, ntohl(i->type),
+ ntohl(i->lower),
ntohl(i->upper),
TIPC_CLUSTER_SCOPE, node,
ntohl(i->ref), ntohl(i->key));
if (publ) {
- tipc_publ_subscribe(publ, node);
+ tipc_publ_subscribe(net, publ, node);
return true;
}
} else if (dtype == WITHDRAWAL) {
- publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower),
+ publ = tipc_nametbl_remove_publ(net, ntohl(i->type),
+ ntohl(i->lower),
node, ntohl(i->ref),
ntohl(i->key));
if (publ) {
- tipc_publ_unsubscribe(publ, node);
+ tipc_publ_unsubscribe(net, publ, node);
kfree_rcu(publ, rcu);
return true;
}
@@ -343,7 +357,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
* tipc_named_process_backlog - try to process any pending name table updates
* from the network.
*/
-void tipc_named_process_backlog(void)
+void tipc_named_process_backlog(struct net *net)
{
struct distr_queue_item *e, *tmp;
char addr[16];
@@ -351,7 +365,7 @@ void tipc_named_process_backlog(void)
list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
if (time_after(e->expires, now)) {
- if (!tipc_update_nametbl(&e->i, e->node, e->dtype))
+ if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
continue;
} else {
tipc_addr_string_fill(addr, e->node);
@@ -367,24 +381,34 @@ void tipc_named_process_backlog(void)
}
/**
- * tipc_named_rcv - process name table update message sent by another node
+ * tipc_named_rcv - process name table update messages sent by another node
*/
-void tipc_named_rcv(struct sk_buff *buf)
+void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
{
- struct tipc_msg *msg = buf_msg(buf);
- struct distr_item *item = (struct distr_item *)msg_data(msg);
- u32 count = msg_data_sz(msg) / ITEM_SIZE;
- u32 node = msg_orignode(msg);
-
- spin_lock_bh(&tipc_nametbl_lock);
- while (count--) {
- if (!tipc_update_nametbl(item, node, msg_type(msg)))
- tipc_named_add_backlog(item, msg_type(msg), node);
- item++;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_msg *msg;
+ struct distr_item *item;
+ uint count;
+ u32 node;
+ struct sk_buff *skb;
+ int mtype;
+
+ spin_lock_bh(&tn->nametbl_lock);
+ for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
+ msg = buf_msg(skb);
+ mtype = msg_type(msg);
+ item = (struct distr_item *)msg_data(msg);
+ count = msg_data_sz(msg) / ITEM_SIZE;
+ node = msg_orignode(msg);
+ while (count--) {
+ if (!tipc_update_nametbl(net, item, node, mtype))
+ tipc_named_add_backlog(item, mtype, node);
+ item++;
+ }
+ kfree_skb(skb);
+ tipc_named_process_backlog(net);
}
- tipc_named_process_backlog();
- spin_unlock_bh(&tipc_nametbl_lock);
- kfree_skb(buf);
+ spin_unlock_bh(&tn->nametbl_lock);
}
/**
@@ -394,17 +418,18 @@ void tipc_named_rcv(struct sk_buff *buf)
* All name table entries published by this node are updated to reflect
* the node's new network address.
*/
-void tipc_named_reinit(void)
+void tipc_named_reinit(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct publication *publ;
int scope;
- spin_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tn->nametbl_lock);
for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
- list_for_each_entry_rcu(publ, &tipc_nametbl->publ_list[scope],
+ list_for_each_entry_rcu(publ, &tn->nametbl->publ_list[scope],
local_list)
- publ->node = tipc_own_addr;
+ publ->node = tn->own_addr;
- spin_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tn->nametbl_lock);
}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index cef55cedcfb2..dd2d9fd80da2 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -67,13 +67,13 @@ struct distr_item {
__be32 key;
};
-struct sk_buff *tipc_named_publish(struct publication *publ);
-struct sk_buff *tipc_named_withdraw(struct publication *publ);
-void named_cluster_distribute(struct sk_buff *buf);
-void tipc_named_node_up(u32 dnode);
-void tipc_named_rcv(struct sk_buff *buf);
-void tipc_named_reinit(void);
-void tipc_named_process_backlog(void);
-void tipc_publ_notify(struct list_head *nsub_list, u32 addr);
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ);
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ);
+void named_cluster_distribute(struct net *net, struct sk_buff *buf);
+void tipc_named_node_up(struct net *net, u32 dnode);
+void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue);
+void tipc_named_reinit(struct net *net);
+void tipc_named_process_backlog(struct net *net);
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr);
#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index c8df0223371a..105ba7adf06f 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -1,7 +1,7 @@
/*
* net/tipc/name_table.c: TIPC name table code
*
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
* Copyright (c) 2004-2008, 2010-2014, Wind River Systems
* All rights reserved.
*
@@ -34,11 +34,15 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <net/sock.h>
#include "core.h"
-#include "config.h"
+#include "netlink.h"
#include "name_table.h"
#include "name_distr.h"
#include "subscr.h"
+#include "bcast.h"
+#include "addr.h"
+#include <net/genetlink.h>
#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
@@ -105,9 +109,6 @@ struct name_seq {
struct rcu_head rcu;
};
-struct name_table *tipc_nametbl;
-DEFINE_SPINLOCK(tipc_nametbl_lock);
-
static int hash(int x)
{
return x & (TIPC_NAMETBL_SIZE - 1);
@@ -228,9 +229,11 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
/**
* tipc_nameseq_insert_publ
*/
-static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
- u32 type, u32 lower, u32 upper,
- u32 scope, u32 node, u32 port, u32 key)
+static struct publication *tipc_nameseq_insert_publ(struct net *net,
+ struct name_seq *nseq,
+ u32 type, u32 lower,
+ u32 upper, u32 scope,
+ u32 node, u32 port, u32 key)
{
struct tipc_subscription *s;
struct tipc_subscription *st;
@@ -315,12 +318,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
list_add(&publ->zone_list, &info->zone_list);
info->zone_list_size++;
- if (in_own_cluster(node)) {
+ if (in_own_cluster(net, node)) {
list_add(&publ->cluster_list, &info->cluster_list);
info->cluster_list_size++;
}
- if (in_own_node(node)) {
+ if (in_own_node(net, node)) {
list_add(&publ->node_list, &info->node_list);
info->node_list_size++;
}
@@ -349,8 +352,10 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
* A failed withdraw request simply returns a failure indication and lets the
* caller issue any error or warning messages associated with such a problem.
*/
-static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
- u32 node, u32 ref, u32 key)
+static struct publication *tipc_nameseq_remove_publ(struct net *net,
+ struct name_seq *nseq,
+ u32 inst, u32 node,
+ u32 ref, u32 key)
{
struct publication *publ;
struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
@@ -378,13 +383,13 @@ found:
info->zone_list_size--;
/* Remove publication from cluster scope list, if present */
- if (in_own_cluster(node)) {
+ if (in_own_cluster(net, node)) {
list_del(&publ->cluster_list);
info->cluster_list_size--;
}
/* Remove publication from node scope list, if present */
- if (in_own_node(node)) {
+ if (in_own_node(net, node)) {
list_del(&publ->node_list);
info->node_list_size--;
}
@@ -447,12 +452,13 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
}
}
-static struct name_seq *nametbl_find_seq(u32 type)
+static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct hlist_head *seq_head;
struct name_seq *ns;
- seq_head = &tipc_nametbl->seq_hlist[hash(type)];
+ seq_head = &tn->nametbl->seq_hlist[hash(type)];
hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
if (ns->type == type)
return ns;
@@ -461,11 +467,13 @@ static struct name_seq *nametbl_find_seq(u32 type)
return NULL;
};
-struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
- u32 scope, u32 node, u32 port, u32 key)
+struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
+ u32 lower, u32 upper, u32 scope,
+ u32 node, u32 port, u32 key)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct publication *publ;
- struct name_seq *seq = nametbl_find_seq(type);
+ struct name_seq *seq = nametbl_find_seq(net, type);
int index = hash(type);
if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
@@ -476,29 +484,29 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
}
if (!seq)
- seq = tipc_nameseq_create(type,
- &tipc_nametbl->seq_hlist[index]);
+ seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
if (!seq)
return NULL;
spin_lock_bh(&seq->lock);
- publ = tipc_nameseq_insert_publ(seq, type, lower, upper,
+ publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
scope, node, port, key);
spin_unlock_bh(&seq->lock);
return publ;
}
-struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
- u32 node, u32 ref, u32 key)
+struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
+ u32 lower, u32 node, u32 ref,
+ u32 key)
{
struct publication *publ;
- struct name_seq *seq = nametbl_find_seq(type);
+ struct name_seq *seq = nametbl_find_seq(net, type);
if (!seq)
return NULL;
spin_lock_bh(&seq->lock);
- publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
+ publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
if (!seq->first_free && list_empty(&seq->subscriptions)) {
hlist_del_init_rcu(&seq->ns_list);
kfree(seq->sseqs);
@@ -523,8 +531,10 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
* - if name translation is attempted and fails, sets 'destnode' to 0
* and returns 0
*/
-u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
+u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
+ u32 *destnode)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sub_seq *sseq;
struct name_info *info;
struct publication *publ;
@@ -532,11 +542,11 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
u32 ref = 0;
u32 node = 0;
- if (!tipc_in_scope(*destnode, tipc_own_addr))
+ if (!tipc_in_scope(*destnode, tn->own_addr))
return 0;
rcu_read_lock();
- seq = nametbl_find_seq(type);
+ seq = nametbl_find_seq(net, type);
if (unlikely(!seq))
goto not_found;
spin_lock_bh(&seq->lock);
@@ -569,13 +579,13 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
}
/* Round-Robin Algorithm */
- else if (*destnode == tipc_own_addr) {
+ else if (*destnode == tn->own_addr) {
if (list_empty(&info->node_list))
goto no_match;
publ = list_first_entry(&info->node_list, struct publication,
node_list);
list_move_tail(&publ->node_list, &info->node_list);
- } else if (in_own_cluster_exact(*destnode)) {
+ } else if (in_own_cluster_exact(net, *destnode)) {
if (list_empty(&info->cluster_list))
goto no_match;
publ = list_first_entry(&info->cluster_list, struct publication,
@@ -609,8 +619,8 @@ not_found:
*
* Returns non-zero if any off-node ports overlap
*/
-int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
- struct tipc_port_list *dports)
+int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
+ u32 limit, struct tipc_plist *dports)
{
struct name_seq *seq;
struct sub_seq *sseq;
@@ -619,7 +629,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
int res = 0;
rcu_read_lock();
- seq = nametbl_find_seq(type);
+ seq = nametbl_find_seq(net, type);
if (!seq)
goto exit;
@@ -635,7 +645,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
info = sseq->info;
list_for_each_entry(publ, &info->node_list, node_list) {
if (publ->scope <= limit)
- tipc_port_list_add(dports, publ->ref);
+ tipc_plist_push(dports, publ->ref);
}
if (info->cluster_list_size != info->node_list_size)
@@ -650,50 +660,55 @@ exit:
/*
* tipc_nametbl_publish - add name publication to network name tables
*/
-struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
- u32 scope, u32 port_ref, u32 key)
+struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
+ u32 upper, u32 scope, u32 port_ref,
+ u32 key)
{
struct publication *publ;
struct sk_buff *buf = NULL;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
- spin_lock_bh(&tipc_nametbl_lock);
- if (tipc_nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
+ spin_lock_bh(&tn->nametbl_lock);
+ if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
pr_warn("Publication failed, local publication limit reached (%u)\n",
TIPC_MAX_PUBLICATIONS);
- spin_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tn->nametbl_lock);
return NULL;
}
- publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
- tipc_own_addr, port_ref, key);
+ publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
+ tn->own_addr, port_ref, key);
if (likely(publ)) {
- tipc_nametbl->local_publ_count++;
- buf = tipc_named_publish(publ);
+ tn->nametbl->local_publ_count++;
+ buf = tipc_named_publish(net, publ);
/* Any pending external events? */
- tipc_named_process_backlog();
+ tipc_named_process_backlog(net);
}
- spin_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tn->nametbl_lock);
if (buf)
- named_cluster_distribute(buf);
+ named_cluster_distribute(net, buf);
return publ;
}
/**
* tipc_nametbl_withdraw - withdraw name publication from network name tables
*/
-int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
+int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
+ u32 key)
{
struct publication *publ;
struct sk_buff *skb = NULL;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
- spin_lock_bh(&tipc_nametbl_lock);
- publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
+ spin_lock_bh(&tn->nametbl_lock);
+ publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
+ ref, key);
if (likely(publ)) {
- tipc_nametbl->local_publ_count--;
- skb = tipc_named_withdraw(publ);
+ tn->nametbl->local_publ_count--;
+ skb = tipc_named_withdraw(net, publ);
/* Any pending external events? */
- tipc_named_process_backlog();
+ tipc_named_process_backlog(net);
list_del_init(&publ->pport_list);
kfree_rcu(publ, rcu);
} else {
@@ -701,10 +716,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
"(type=%u, lower=%u, ref=%u, key=%u)\n",
type, lower, ref, key);
}
- spin_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tn->nametbl_lock);
if (skb) {
- named_cluster_distribute(skb);
+ named_cluster_distribute(net, skb);
return 1;
}
return 0;
@@ -715,15 +730,15 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
*/
void tipc_nametbl_subscribe(struct tipc_subscription *s)
{
+ struct tipc_net *tn = net_generic(s->net, tipc_net_id);
u32 type = s->seq.type;
int index = hash(type);
struct name_seq *seq;
- spin_lock_bh(&tipc_nametbl_lock);
- seq = nametbl_find_seq(type);
+ spin_lock_bh(&tn->nametbl_lock);
+ seq = nametbl_find_seq(s->net, type);
if (!seq)
- seq = tipc_nameseq_create(type,
- &tipc_nametbl->seq_hlist[index]);
+ seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
if (seq) {
spin_lock_bh(&seq->lock);
tipc_nameseq_subscribe(seq, s);
@@ -732,7 +747,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
pr_warn("Failed to create subscription for {%u,%u,%u}\n",
s->seq.type, s->seq.lower, s->seq.upper);
}
- spin_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tn->nametbl_lock);
}
/**
@@ -740,10 +755,11 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
*/
void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
{
+ struct tipc_net *tn = net_generic(s->net, tipc_net_id);
struct name_seq *seq;
- spin_lock_bh(&tipc_nametbl_lock);
- seq = nametbl_find_seq(s->seq.type);
+ spin_lock_bh(&tn->nametbl_lock);
+ seq = nametbl_find_seq(s->net, s->seq.type);
if (seq != NULL) {
spin_lock_bh(&seq->lock);
list_del_init(&s->nameseq_list);
@@ -756,193 +772,13 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
spin_unlock_bh(&seq->lock);
}
}
- spin_unlock_bh(&tipc_nametbl_lock);
-}
-
-/**
- * subseq_list - print specified sub-sequence contents into the given buffer
- */
-static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
- u32 index)
-{
- char portIdStr[27];
- const char *scope_str[] = {"", " zone", " cluster", " node"};
- struct publication *publ;
- struct name_info *info;
- int ret;
-
- ret = tipc_snprintf(buf, len, "%-10u %-10u ", sseq->lower, sseq->upper);
-
- if (depth == 2) {
- ret += tipc_snprintf(buf - ret, len + ret, "\n");
- return ret;
- }
-
- info = sseq->info;
-
- list_for_each_entry(publ, &info->zone_list, zone_list) {
- sprintf(portIdStr, "<%u.%u.%u:%u>",
- tipc_zone(publ->node), tipc_cluster(publ->node),
- tipc_node(publ->node), publ->ref);
- ret += tipc_snprintf(buf + ret, len - ret, "%-26s ", portIdStr);
- if (depth > 3) {
- ret += tipc_snprintf(buf + ret, len - ret, "%-10u %s",
- publ->key, scope_str[publ->scope]);
- }
- if (!list_is_last(&publ->zone_list, &info->zone_list))
- ret += tipc_snprintf(buf + ret, len - ret,
- "\n%33s", " ");
- }
-
- ret += tipc_snprintf(buf + ret, len - ret, "\n");
- return ret;
-}
-
-/**
- * nameseq_list - print specified name sequence contents into the given buffer
- */
-static int nameseq_list(struct name_seq *seq, char *buf, int len, u32 depth,
- u32 type, u32 lowbound, u32 upbound, u32 index)
-{
- struct sub_seq *sseq;
- char typearea[11];
- int ret = 0;
-
- if (seq->first_free == 0)
- return 0;
-
- sprintf(typearea, "%-10u", seq->type);
-
- if (depth == 1) {
- ret += tipc_snprintf(buf, len, "%s\n", typearea);
- return ret;
- }
-
- for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
- if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
- ret += tipc_snprintf(buf + ret, len - ret, "%s ",
- typearea);
- spin_lock_bh(&seq->lock);
- ret += subseq_list(sseq, buf + ret, len - ret,
- depth, index);
- spin_unlock_bh(&seq->lock);
- sprintf(typearea, "%10s", " ");
- }
- }
- return ret;
-}
-
-/**
- * nametbl_header - print name table header into the given buffer
- */
-static int nametbl_header(char *buf, int len, u32 depth)
-{
- const char *header[] = {
- "Type ",
- "Lower Upper ",
- "Port Identity ",
- "Publication Scope"
- };
-
- int i;
- int ret = 0;
-
- if (depth > 4)
- depth = 4;
- for (i = 0; i < depth; i++)
- ret += tipc_snprintf(buf + ret, len - ret, header[i]);
- ret += tipc_snprintf(buf + ret, len - ret, "\n");
- return ret;
-}
-
-/**
- * nametbl_list - print specified name table contents into the given buffer
- */
-static int nametbl_list(char *buf, int len, u32 depth_info,
- u32 type, u32 lowbound, u32 upbound)
-{
- struct hlist_head *seq_head;
- struct name_seq *seq;
- int all_types;
- int ret = 0;
- u32 depth;
- u32 i;
-
- all_types = (depth_info & TIPC_NTQ_ALLTYPES);
- depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
-
- if (depth == 0)
- return 0;
-
- if (all_types) {
- /* display all entries in name table to specified depth */
- ret += nametbl_header(buf, len, depth);
- lowbound = 0;
- upbound = ~0;
- for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
- seq_head = &tipc_nametbl->seq_hlist[i];
- hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
- ret += nameseq_list(seq, buf + ret, len - ret,
- depth, seq->type,
- lowbound, upbound, i);
- }
- }
- } else {
- /* display only the sequence that matches the specified type */
- if (upbound < lowbound) {
- ret += tipc_snprintf(buf + ret, len - ret,
- "invalid name sequence specified\n");
- return ret;
- }
- ret += nametbl_header(buf + ret, len - ret, depth);
- i = hash(type);
- seq_head = &tipc_nametbl->seq_hlist[i];
- hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
- if (seq->type == type) {
- ret += nameseq_list(seq, buf + ret, len - ret,
- depth, type,
- lowbound, upbound, i);
- break;
- }
- }
- }
- return ret;
+ spin_unlock_bh(&tn->nametbl_lock);
}
-struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
-{
- struct sk_buff *buf;
- struct tipc_name_table_query *argv;
- struct tlv_desc *rep_tlv;
- char *pb;
- int pb_len;
- int str_len;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
- if (!buf)
- return NULL;
-
- rep_tlv = (struct tlv_desc *)buf->data;
- pb = TLV_DATA(rep_tlv);
- pb_len = ULTRA_STRING_MAX_LEN;
- argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
- rcu_read_lock();
- str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
- ntohl(argv->type),
- ntohl(argv->lowbound), ntohl(argv->upbound));
- rcu_read_unlock();
- str_len += 1; /* for "\0" */
- skb_put(buf, TLV_SPACE(str_len));
- TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
-
- return buf;
-}
-
-int tipc_nametbl_init(void)
+int tipc_nametbl_init(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct name_table *tipc_nametbl;
int i;
tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
@@ -955,6 +791,8 @@ int tipc_nametbl_init(void)
INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
+ tn->nametbl = tipc_nametbl;
+ spin_lock_init(&tn->nametbl_lock);
return 0;
}
@@ -963,7 +801,7 @@ int tipc_nametbl_init(void)
*
* tipc_nametbl_lock must be held when calling this function
*/
-static void tipc_purge_publications(struct name_seq *seq)
+static void tipc_purge_publications(struct net *net, struct name_seq *seq)
{
struct publication *publ, *safe;
struct sub_seq *sseq;
@@ -973,8 +811,8 @@ static void tipc_purge_publications(struct name_seq *seq)
sseq = seq->sseqs;
info = sseq->info;
list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
- tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
- publ->ref, publ->key);
+ tipc_nametbl_remove_publ(net, publ->type, publ->lower,
+ publ->node, publ->ref, publ->key);
kfree_rcu(publ, rcu);
}
hlist_del_init_rcu(&seq->ns_list);
@@ -984,25 +822,27 @@ static void tipc_purge_publications(struct name_seq *seq)
kfree_rcu(seq, rcu);
}
-void tipc_nametbl_stop(void)
+void tipc_nametbl_stop(struct net *net)
{
u32 i;
struct name_seq *seq;
struct hlist_head *seq_head;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct name_table *tipc_nametbl = tn->nametbl;
/* Verify name table is empty and purge any lingering
* publications, then release the name table
*/
- spin_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tn->nametbl_lock);
for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
continue;
seq_head = &tipc_nametbl->seq_hlist[i];
hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
- tipc_purge_publications(seq);
+ tipc_purge_publications(net, seq);
}
}
- spin_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tn->nametbl_lock);
synchronize_net();
kfree(tipc_nametbl);
@@ -1033,7 +873,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
*last_publ = p->key;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
- &tipc_genl_v2_family, NLM_F_MULTI,
+ &tipc_genl_family, NLM_F_MULTI,
TIPC_NL_NAME_TABLE_GET);
if (!hdr)
return -EMSGSIZE;
@@ -1106,9 +946,10 @@ static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
return 0;
}
-static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
- u32 *last_lower, u32 *last_publ)
+static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
+ u32 *last_type, u32 *last_lower, u32 *last_publ)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct hlist_head *seq_head;
struct name_seq *seq = NULL;
int err;
@@ -1120,10 +961,10 @@ static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
i = 0;
for (; i < TIPC_NAMETBL_SIZE; i++) {
- seq_head = &tipc_nametbl->seq_hlist[i];
+ seq_head = &tn->nametbl->seq_hlist[i];
if (*last_type) {
- seq = nametbl_find_seq(*last_type);
+ seq = nametbl_find_seq(net, *last_type);
if (!seq)
return -EPIPE;
} else {
@@ -1157,6 +998,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
u32 last_type = cb->args[0];
u32 last_lower = cb->args[1];
u32 last_publ = cb->args[2];
+ struct net *net = sock_net(skb->sk);
struct tipc_nl_msg msg;
if (done)
@@ -1167,7 +1009,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
msg.seq = cb->nlh->nlmsg_seq;
rcu_read_lock();
- err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ);
+ err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
if (!err) {
done = 1;
} else if (err != -EMSGSIZE) {
@@ -1188,3 +1030,41 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+
+void tipc_plist_push(struct tipc_plist *pl, u32 port)
+{
+ struct tipc_plist *nl;
+
+ if (likely(!pl->port)) {
+ pl->port = port;
+ return;
+ }
+ if (pl->port == port)
+ return;
+ list_for_each_entry(nl, &pl->list, list) {
+ if (nl->port == port)
+ return;
+ }
+ nl = kmalloc(sizeof(*nl), GFP_ATOMIC);
+ if (nl) {
+ nl->port = port;
+ list_add(&nl->list, &pl->list);
+ }
+}
+
+u32 tipc_plist_pop(struct tipc_plist *pl)
+{
+ struct tipc_plist *nl;
+ u32 port = 0;
+
+ if (likely(list_empty(&pl->list))) {
+ port = pl->port;
+ pl->port = 0;
+ return port;
+ }
+ nl = list_first_entry(&pl->list, typeof(*nl), list);
+ port = nl->port;
+ list_del(&nl->list);
+ kfree(nl);
+ return port;
+}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 5f0dee92010d..1524a73830f7 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -1,7 +1,7 @@
/*
* net/tipc/name_table.h: Include file for TIPC name table code
*
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -38,7 +38,7 @@
#define _TIPC_NAME_TABLE_H
struct tipc_subscription;
-struct tipc_port_list;
+struct tipc_plist;
/*
* TIPC name types reserved for internal TIPC use (both current and planned)
@@ -95,26 +95,39 @@ struct name_table {
u32 local_publ_count;
};
-extern spinlock_t tipc_nametbl_lock;
-extern struct name_table *tipc_nametbl;
-
int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
-struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
-u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
-int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
- struct tipc_port_list *dports);
-struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
- u32 scope, u32 port_ref, u32 key);
-int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
-struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
- u32 scope, u32 node, u32 ref,
+u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
+int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
+ u32 limit, struct tipc_plist *dports);
+struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
+ u32 upper, u32 scope, u32 port_ref,
+ u32 key);
+int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
+ u32 key);
+struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
+ u32 lower, u32 upper, u32 scope,
+ u32 node, u32 ref, u32 key);
+struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
+ u32 lower, u32 node, u32 ref,
u32 key);
-struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node,
- u32 ref, u32 key);
void tipc_nametbl_subscribe(struct tipc_subscription *s);
void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
-int tipc_nametbl_init(void);
-void tipc_nametbl_stop(void);
+int tipc_nametbl_init(struct net *net);
+void tipc_nametbl_stop(struct net *net);
+
+struct tipc_plist {
+ struct list_head list;
+ u32 port;
+};
+
+static inline void tipc_plist_init(struct tipc_plist *pl)
+{
+ INIT_LIST_HEAD(&pl->list);
+ pl->port = 0;
+}
+
+void tipc_plist_push(struct tipc_plist *pl, u32 port);
+u32 tipc_plist_pop(struct tipc_plist *pl);
#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
index cf13df3cde8f..a54f3cbe2246 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -40,7 +40,6 @@
#include "subscr.h"
#include "socket.h"
#include "node.h"
-#include "config.h"
static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
@@ -108,48 +107,54 @@ static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
* - A local spin_lock protecting the queue of subscriber events.
*/
-int tipc_net_start(u32 addr)
+int tipc_net_start(struct net *net, u32 addr)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
char addr_string[16];
int res;
- tipc_own_addr = addr;
- tipc_named_reinit();
- tipc_sk_reinit();
- res = tipc_bclink_init();
+ tn->own_addr = addr;
+ tipc_named_reinit(net);
+ tipc_sk_reinit(net);
+ res = tipc_bclink_init(net);
if (res)
return res;
- tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
- TIPC_ZONE_SCOPE, 0, tipc_own_addr);
+ tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
+ TIPC_ZONE_SCOPE, 0, tn->own_addr);
pr_info("Started in network mode\n");
pr_info("Own node address %s, network identity %u\n",
- tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+ tipc_addr_string_fill(addr_string, tn->own_addr),
+ tn->net_id);
return 0;
}
-void tipc_net_stop(void)
+void tipc_net_stop(struct net *net)
{
- if (!tipc_own_addr)
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ if (!tn->own_addr)
return;
- tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
+ tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tn->own_addr, 0,
+ tn->own_addr);
rtnl_lock();
- tipc_bearer_stop();
- tipc_bclink_stop();
- tipc_node_stop();
+ tipc_bearer_stop(net);
+ tipc_bclink_stop(net);
+ tipc_node_stop(net);
rtnl_unlock();
pr_info("Left network mode\n");
}
-static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
+static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
void *hdr;
struct nlattr *attrs;
- hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_NET_GET);
if (!hdr)
return -EMSGSIZE;
@@ -158,7 +163,7 @@ static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
if (!attrs)
goto msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id))
+ if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
goto attr_msg_full;
nla_nest_end(msg->skb, attrs);
@@ -176,6 +181,7 @@ msg_full:
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct net *net = sock_net(skb->sk);
int err;
int done = cb->args[0];
struct tipc_nl_msg msg;
@@ -187,7 +193,7 @@ int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
msg.portid = NETLINK_CB(cb->skb).portid;
msg.seq = cb->nlh->nlmsg_seq;
- err = __tipc_nl_add_net(&msg);
+ err = __tipc_nl_add_net(net, &msg);
if (err)
goto out;
@@ -200,8 +206,10 @@ out:
int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
{
- int err;
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
+ int err;
if (!info->attrs[TIPC_NLA_NET])
return -EINVAL;
@@ -216,21 +224,21 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
u32 val;
/* Can't change net id once TIPC has joined a network */
- if (tipc_own_addr)
+ if (tn->own_addr)
return -EPERM;
val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
if (val < 1 || val > 9999)
return -EINVAL;
- tipc_net_id = val;
+ tn->net_id = val;
}
if (attrs[TIPC_NLA_NET_ADDR]) {
u32 addr;
/* Can't change net addr once TIPC has joined a network */
- if (tipc_own_addr)
+ if (tn->own_addr)
return -EPERM;
addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
@@ -238,7 +246,7 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
rtnl_lock();
- tipc_net_start(addr);
+ tipc_net_start(net, addr);
rtnl_unlock();
}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index a81c1b9eb150..77a7a118911d 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -39,9 +39,9 @@
#include <net/genetlink.h>
-int tipc_net_start(u32 addr);
+int tipc_net_start(struct net *net, u32 addr);
-void tipc_net_stop(void);
+void tipc_net_stop(struct net *net);
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index b891e3905bc4..7f6475efc984 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -35,7 +35,6 @@
*/
#include "core.h"
-#include "config.h"
#include "socket.h"
#include "name_table.h"
#include "bearer.h"
@@ -44,36 +43,6 @@
#include "net.h"
#include <net/genetlink.h>
-static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
-{
- struct sk_buff *rep_buf;
- struct nlmsghdr *rep_nlh;
- struct nlmsghdr *req_nlh = info->nlhdr;
- struct tipc_genlmsghdr *req_userhdr = info->userhdr;
- int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
- u16 cmd;
-
- if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
- cmd = TIPC_CMD_NOT_NET_ADMIN;
- else
- cmd = req_userhdr->cmd;
-
- rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
- nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
- nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
- hdr_space);
-
- if (rep_buf) {
- skb_push(rep_buf, hdr_space);
- rep_nlh = nlmsg_hdr(rep_buf);
- memcpy(rep_nlh, req_nlh, hdr_space);
- rep_nlh->nlmsg_len = rep_buf->len;
- genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
- }
-
- return 0;
-}
-
static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
[TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, },
[TIPC_NLA_BEARER] = { .type = NLA_NESTED, },
@@ -86,32 +55,16 @@ static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
[TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, }
};
-/* Legacy ASCII API */
-static struct genl_family tipc_genl_family = {
- .id = GENL_ID_GENERATE,
- .name = TIPC_GENL_NAME,
- .version = TIPC_GENL_VERSION,
- .hdrsize = TIPC_GENL_HDRLEN,
- .maxattr = 0,
-};
-
-/* Legacy ASCII API */
-static struct genl_ops tipc_genl_ops[] = {
- {
- .cmd = TIPC_GENL_CMD,
- .doit = handle_cmd,
- },
-};
-
/* Users of the legacy API (tipc-config) can't handle that we add operations,
* so we have a separate genl handling for the new API.
*/
-struct genl_family tipc_genl_v2_family = {
+struct genl_family tipc_genl_family = {
.id = GENL_ID_GENERATE,
.name = TIPC_GENL_V2_NAME,
.version = TIPC_GENL_V2_VERSION,
.hdrsize = 0,
.maxattr = TIPC_NLA_MAX,
+ .netnsok = true,
};
static const struct genl_ops tipc_genl_v2_ops[] = {
@@ -197,9 +150,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr)
{
- u32 maxattr = tipc_genl_v2_family.maxattr;
+ u32 maxattr = tipc_genl_family.maxattr;
- *attr = tipc_genl_v2_family.attrbuf;
+ *attr = tipc_genl_family.attrbuf;
if (!*attr)
return -EOPNOTSUPP;
@@ -210,13 +163,7 @@ int tipc_netlink_start(void)
{
int res;
- res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops);
- if (res) {
- pr_err("Failed to register legacy interface\n");
- return res;
- }
-
- res = genl_register_family_with_ops(&tipc_genl_v2_family,
+ res = genl_register_family_with_ops(&tipc_genl_family,
tipc_genl_v2_ops);
if (res) {
pr_err("Failed to register netlink interface\n");
@@ -228,5 +175,4 @@ int tipc_netlink_start(void)
void tipc_netlink_stop(void)
{
genl_unregister_family(&tipc_genl_family);
- genl_unregister_family(&tipc_genl_v2_family);
}
diff --git a/net/tipc/netlink.h b/net/tipc/netlink.h
index 1425c6869de0..08a1db67b927 100644
--- a/net/tipc/netlink.h
+++ b/net/tipc/netlink.h
@@ -36,7 +36,7 @@
#ifndef _TIPC_NETLINK_H
#define _TIPC_NETLINK_H
-extern struct genl_family tipc_genl_v2_family;
+extern struct genl_family tipc_genl_family;
int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf);
struct tipc_nl_msg {
@@ -45,4 +45,9 @@ struct tipc_nl_msg {
u32 seq;
};
+int tipc_netlink_start(void);
+int tipc_netlink_compat_start(void);
+void tipc_netlink_stop(void);
+void tipc_netlink_compat_stop(void);
+
#endif
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
new file mode 100644
index 000000000000..ce9121e8e990
--- /dev/null
+++ b/net/tipc/netlink_compat.c
@@ -0,0 +1,1084 @@
+/*
+ * Copyright (c) 2014, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "bearer.h"
+#include "link.h"
+#include "name_table.h"
+#include "socket.h"
+#include "node.h"
+#include "net.h"
+#include <net/genetlink.h>
+#include <linux/tipc_config.h>
+
+/* The legacy API had an artificial message length limit called
+ * ULTRA_STRING_MAX_LEN.
+ */
+#define ULTRA_STRING_MAX_LEN 32768
+
+#define TIPC_SKB_MAX TLV_SPACE(ULTRA_STRING_MAX_LEN)
+
+#define REPLY_TRUNCATED "<truncated>\n"
+
+struct tipc_nl_compat_msg {
+ u16 cmd;
+ int rep_type;
+ int rep_size;
+ int req_type;
+ struct sk_buff *rep;
+ struct tlv_desc *req;
+ struct sock *dst_sk;
+};
+
+struct tipc_nl_compat_cmd_dump {
+ int (*header)(struct tipc_nl_compat_msg *);
+ int (*dumpit)(struct sk_buff *, struct netlink_callback *);
+ int (*format)(struct tipc_nl_compat_msg *msg, struct nlattr **attrs);
+};
+
+struct tipc_nl_compat_cmd_doit {
+ int (*doit)(struct sk_buff *skb, struct genl_info *info);
+ int (*transcode)(struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
+};
+
+static int tipc_skb_tailroom(struct sk_buff *skb)
+{
+ int tailroom;
+ int limit;
+
+ tailroom = skb_tailroom(skb);
+ limit = TIPC_SKB_MAX - skb->len;
+
+ if (tailroom < limit)
+ return tailroom;
+
+ return limit;
+}
+
+static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+{
+ struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
+
+ if (tipc_skb_tailroom(skb) < TLV_SPACE(len))
+ return -EMSGSIZE;
+
+ skb_put(skb, TLV_SPACE(len));
+ tlv->tlv_type = htons(type);
+ tlv->tlv_len = htons(TLV_LENGTH(len));
+ if (len && data)
+ memcpy(TLV_DATA(tlv), data, len);
+
+ return 0;
+}
+
+static void tipc_tlv_init(struct sk_buff *skb, u16 type)
+{
+ struct tlv_desc *tlv = (struct tlv_desc *)skb->data;
+
+ TLV_SET_LEN(tlv, 0);
+ TLV_SET_TYPE(tlv, type);
+ skb_put(skb, sizeof(struct tlv_desc));
+}
+
+static int tipc_tlv_sprintf(struct sk_buff *skb, const char *fmt, ...)
+{
+ int n;
+ u16 len;
+ u32 rem;
+ char *buf;
+ struct tlv_desc *tlv;
+ va_list args;
+
+ rem = tipc_skb_tailroom(skb);
+
+ tlv = (struct tlv_desc *)skb->data;
+ len = TLV_GET_LEN(tlv);
+ buf = TLV_DATA(tlv) + len;
+
+ va_start(args, fmt);
+ n = vscnprintf(buf, rem, fmt, args);
+ va_end(args);
+
+ TLV_SET_LEN(tlv, n + len);
+ skb_put(skb, n);
+
+ return n;
+}
+
+static struct sk_buff *tipc_tlv_alloc(int size)
+{
+ int hdr_len;
+ struct sk_buff *buf;
+
+ size = TLV_SPACE(size);
+ hdr_len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
+
+ buf = alloc_skb(hdr_len + size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ skb_reserve(buf, hdr_len);
+
+ return buf;
+}
+
+static struct sk_buff *tipc_get_err_tlv(char *str)
+{
+ int str_len = strlen(str) + 1;
+ struct sk_buff *buf;
+
+ buf = tipc_tlv_alloc(TLV_SPACE(str_len));
+ if (buf)
+ tipc_add_tlv(buf, TIPC_TLV_ERROR_STRING, str, str_len);
+
+ return buf;
+}
+
+static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ struct tipc_nl_compat_msg *msg,
+ struct sk_buff *arg)
+{
+ int len = 0;
+ int err;
+ struct sk_buff *buf;
+ struct nlmsghdr *nlmsg;
+ struct netlink_callback cb;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.nlh = (struct nlmsghdr *)arg->data;
+ cb.skb = arg;
+
+ buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->sk = msg->dst_sk;
+
+ do {
+ int rem;
+
+ len = (*cmd->dumpit)(buf, &cb);
+
+ nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) {
+ struct nlattr **attrs;
+
+ err = tipc_nlmsg_parse(nlmsg, &attrs);
+ if (err)
+ goto err_out;
+
+ err = (*cmd->format)(msg, attrs);
+ if (err)
+ goto err_out;
+
+ if (tipc_skb_tailroom(msg->rep) <= 1) {
+ err = -EMSGSIZE;
+ goto err_out;
+ }
+ }
+
+ skb_reset_tail_pointer(buf);
+ buf->len = 0;
+
+ } while (len);
+
+ err = 0;
+
+err_out:
+ kfree_skb(buf);
+
+ if (err == -EMSGSIZE) {
+ /* The legacy API only considered messages filling
+ * "ULTRA_STRING_MAX_LEN" to be truncated.
+ */
+ if ((TIPC_SKB_MAX - msg->rep->len) <= 1) {
+ char *tail = skb_tail_pointer(msg->rep);
+
+ if (*tail != '\0')
+ sprintf(tail - sizeof(REPLY_TRUNCATED) - 1,
+ REPLY_TRUNCATED);
+ }
+
+ return 0;
+ }
+
+ return err;
+}
+
+static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ struct tipc_nl_compat_msg *msg)
+{
+ int err;
+ struct sk_buff *arg;
+
+ if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ return -EINVAL;
+
+ msg->rep = tipc_tlv_alloc(msg->rep_size);
+ if (!msg->rep)
+ return -ENOMEM;
+
+ if (msg->rep_type)
+ tipc_tlv_init(msg->rep, msg->rep_type);
+
+ if (cmd->header)
+ (*cmd->header)(msg);
+
+ arg = nlmsg_new(0, GFP_KERNEL);
+ if (!arg) {
+ kfree_skb(msg->rep);
+ return -ENOMEM;
+ }
+
+ err = __tipc_nl_compat_dumpit(cmd, msg, arg);
+ if (err)
+ kfree_skb(msg->rep);
+
+ kfree_skb(arg);
+
+ return err;
+}
+
+static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
+ struct tipc_nl_compat_msg *msg)
+{
+ int err;
+ struct sk_buff *doit_buf;
+ struct sk_buff *trans_buf;
+ struct nlattr **attrbuf;
+ struct genl_info info;
+
+ trans_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!trans_buf)
+ return -ENOMEM;
+
+ err = (*cmd->transcode)(trans_buf, msg);
+ if (err)
+ goto trans_out;
+
+ attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
+ sizeof(struct nlattr *), GFP_KERNEL);
+ if (!attrbuf) {
+ err = -ENOMEM;
+ goto trans_out;
+ }
+
+ err = nla_parse(attrbuf, tipc_genl_family.maxattr,
+ (const struct nlattr *)trans_buf->data,
+ trans_buf->len, NULL);
+ if (err)
+ goto parse_out;
+
+ doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!doit_buf) {
+ err = -ENOMEM;
+ goto parse_out;
+ }
+
+ doit_buf->sk = msg->dst_sk;
+
+ memset(&info, 0, sizeof(info));
+ info.attrs = attrbuf;
+
+ err = (*cmd->doit)(doit_buf, &info);
+
+ kfree_skb(doit_buf);
+parse_out:
+ kfree(attrbuf);
+trans_out:
+ kfree_skb(trans_buf);
+
+ return err;
+}
+
+static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
+ struct tipc_nl_compat_msg *msg)
+{
+ int err;
+
+ if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ return -EINVAL;
+
+ err = __tipc_nl_compat_doit(cmd, msg);
+ if (err)
+ return err;
+
+ /* The legacy API considered an empty message a success message */
+ msg->rep = tipc_tlv_alloc(0);
+ if (!msg->rep)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
+
+ nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER],
+ NULL);
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
+ nla_data(bearer[TIPC_NLA_BEARER_NAME]),
+ nla_len(bearer[TIPC_NLA_BEARER_NAME]));
+}
+
+static int tipc_nl_compat_bearer_enable(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlattr *prop;
+ struct nlattr *bearer;
+ struct tipc_bearer_config *b;
+
+ b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
+
+ bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
+ if (!bearer)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, TIPC_NLA_BEARER_DOMAIN, ntohl(b->disc_domain)))
+ return -EMSGSIZE;
+
+ if (ntohl(b->priority) <= TIPC_MAX_LINK_PRI) {
+ prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
+ if (!prop)
+ return -EMSGSIZE;
+ if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(b->priority)))
+ return -EMSGSIZE;
+ nla_nest_end(skb, prop);
+ }
+ nla_nest_end(skb, bearer);
+
+ return 0;
+}
+
+static int tipc_nl_compat_bearer_disable(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ char *name;
+ struct nlattr *bearer;
+
+ name = (char *)TLV_DATA(msg->req);
+
+ bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
+ if (!bearer)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
+ return -EMSGSIZE;
+
+ nla_nest_end(skb, bearer);
+
+ return 0;
+}
+
+static inline u32 perc(u32 count, u32 total)
+{
+ return (count * 100 + (total / 2)) / total;
+}
+
+static void __fill_bc_link_stat(struct tipc_nl_compat_msg *msg,
+ struct nlattr *prop[], struct nlattr *stats[])
+{
+ tipc_tlv_sprintf(msg->rep, " Window:%u packets\n",
+ nla_get_u32(prop[TIPC_NLA_PROP_WIN]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));
+
+ tipc_tlv_sprintf(msg->rep, " RX naks:%u defs:%u dups:%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));
+
+ tipc_tlv_sprintf(msg->rep, " TX naks:%u acks:%u dups:%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " Congestion link:%u Send queue max:%u avg:%u",
+ nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
+ nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));
+}
+
+static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ char *name;
+ struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
+ struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
+ struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
+
+ nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+
+ nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP],
+ NULL);
+
+ nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS],
+ NULL);
+
+ name = (char *)TLV_DATA(msg->req);
+ if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
+ return 0;
+
+ tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n",
+ nla_data(link[TIPC_NLA_LINK_NAME]));
+
+ if (link[TIPC_NLA_LINK_BROADCAST]) {
+ __fill_bc_link_stat(msg, prop, stats);
+ return 0;
+ }
+
+ if (link[TIPC_NLA_LINK_ACTIVE])
+ tipc_tlv_sprintf(msg->rep, " ACTIVE");
+ else if (link[TIPC_NLA_LINK_UP])
+ tipc_tlv_sprintf(msg->rep, " STANDBY");
+ else
+ tipc_tlv_sprintf(msg->rep, " DEFUNCT");
+
+ tipc_tlv_sprintf(msg->rep, " MTU:%u Priority:%u",
+ nla_get_u32(link[TIPC_NLA_LINK_MTU]),
+ nla_get_u32(prop[TIPC_NLA_PROP_PRIO]));
+
+ tipc_tlv_sprintf(msg->rep, " Tolerance:%u ms Window:%u packets\n",
+ nla_get_u32(prop[TIPC_NLA_PROP_TOL]),
+ nla_get_u32(prop[TIPC_NLA_PROP_WIN]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ nla_get_u32(link[TIPC_NLA_LINK_RX]) -
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ nla_get_u32(link[TIPC_NLA_LINK_TX]) -
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " TX profile sample:%u packets average:%u octets\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_CNT]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_TOT]) /
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% ",
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P0]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P1]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P2]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P3]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));
+
+ tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n",
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P4]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P5]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P6]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));
+
+ tipc_tlv_sprintf(msg->rep,
+ " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_STATES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_PROBES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_STATES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_PROBES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " Congestion link:%u Send queue max:%u avg:%u",
+ nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
+ nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));
+
+ return 0;
+}
+
+static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
+ struct tipc_link_info link_info;
+
+ nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+
+ link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
+ link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
+ strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
+ &link_info, sizeof(link_info));
+}
+
+static int tipc_nl_compat_link_set(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlattr *link;
+ struct nlattr *prop;
+ struct tipc_link_config *lc;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+ link = nla_nest_start(skb, TIPC_NLA_LINK);
+ if (!link)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name))
+ return -EMSGSIZE;
+
+ prop = nla_nest_start(skb, TIPC_NLA_LINK_PROP);
+ if (!prop)
+ return -EMSGSIZE;
+
+ if (msg->cmd == TIPC_CMD_SET_LINK_PRI) {
+ if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value)))
+ return -EMSGSIZE;
+ } else if (msg->cmd == TIPC_CMD_SET_LINK_TOL) {
+ if (nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value)))
+ return -EMSGSIZE;
+ } else if (msg->cmd == TIPC_CMD_SET_LINK_WINDOW) {
+ if (nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value)))
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, prop);
+ nla_nest_end(skb, link);
+
+ return 0;
+}
+
+static int tipc_nl_compat_link_reset_stats(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ char *name;
+ struct nlattr *link;
+
+ name = (char *)TLV_DATA(msg->req);
+
+ link = nla_nest_start(skb, TIPC_NLA_LINK);
+ if (!link)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
+ return -EMSGSIZE;
+
+ nla_nest_end(skb, link);
+
+ return 0;
+}
+
+static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
+{
+ int i;
+ u32 depth;
+ struct tipc_name_table_query *ntq;
+ static const char * const header[] = {
+ "Type ",
+ "Lower Upper ",
+ "Port Identity ",
+ "Publication Scope"
+ };
+
+ ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+
+ depth = ntohl(ntq->depth);
+
+ if (depth > 4)
+ depth = 4;
+ for (i = 0; i < depth; i++)
+ tipc_tlv_sprintf(msg->rep, header[i]);
+ tipc_tlv_sprintf(msg->rep, "\n");
+
+ return 0;
+}
+
+static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ char port_str[27];
+ struct tipc_name_table_query *ntq;
+ struct nlattr *nt[TIPC_NLA_NAME_TABLE_MAX + 1];
+ struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
+ u32 node, depth, type, lowbound, upbound;
+ static const char * const scope_str[] = {"", " zone", " cluster",
+ " node"};
+
+ nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
+ attrs[TIPC_NLA_NAME_TABLE], NULL);
+
+ nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL],
+ NULL);
+
+ ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+
+ depth = ntohl(ntq->depth);
+ type = ntohl(ntq->type);
+ lowbound = ntohl(ntq->lowbound);
+ upbound = ntohl(ntq->upbound);
+
+ if (!(depth & TIPC_NTQ_ALLTYPES) &&
+ (type != nla_get_u32(publ[TIPC_NLA_PUBL_TYPE])))
+ return 0;
+ if (lowbound && (lowbound > nla_get_u32(publ[TIPC_NLA_PUBL_UPPER])))
+ return 0;
+ if (upbound && (upbound < nla_get_u32(publ[TIPC_NLA_PUBL_LOWER])))
+ return 0;
+
+ tipc_tlv_sprintf(msg->rep, "%-10u ",
+ nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]));
+
+ if (depth == 1)
+ goto out;
+
+ tipc_tlv_sprintf(msg->rep, "%-10u %-10u ",
+ nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]),
+ nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]));
+
+ if (depth == 2)
+ goto out;
+
+ node = nla_get_u32(publ[TIPC_NLA_PUBL_NODE]);
+ sprintf(port_str, "<%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node),
+ tipc_node(node), nla_get_u32(publ[TIPC_NLA_PUBL_REF]));
+ tipc_tlv_sprintf(msg->rep, "%-26s ", port_str);
+
+ if (depth == 3)
+ goto out;
+
+ tipc_tlv_sprintf(msg->rep, "%-10u %s",
+ nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
+ scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
+out:
+ tipc_tlv_sprintf(msg->rep, "\n");
+
+ return 0;
+}
+
+static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ u32 type, lower, upper;
+ struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
+
+ nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL);
+
+ type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
+ lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
+ upper = nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]);
+
+ if (lower == upper)
+ tipc_tlv_sprintf(msg->rep, " {%u,%u}", type, lower);
+ else
+ tipc_tlv_sprintf(msg->rep, " {%u,%u,%u}", type, lower, upper);
+
+ return 0;
+}
+
+static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
+{
+ int err;
+ void *hdr;
+ struct nlattr *nest;
+ struct sk_buff *args;
+ struct tipc_nl_compat_cmd_dump dump;
+
+ args = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
+ TIPC_NL_PUBL_GET);
+
+ nest = nla_nest_start(args, TIPC_NLA_SOCK);
+ if (!nest) {
+ kfree_skb(args);
+ return -EMSGSIZE;
+ }
+
+ if (nla_put_u32(args, TIPC_NLA_SOCK_REF, sock)) {
+ kfree_skb(args);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(args, nest);
+ genlmsg_end(args, hdr);
+
+ dump.dumpit = tipc_nl_publ_dump;
+ dump.format = __tipc_nl_compat_publ_dump;
+
+ err = __tipc_nl_compat_dumpit(&dump, msg, args);
+
+ kfree_skb(args);
+
+ return err;
+}
+
+static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ int err;
+ u32 sock_ref;
+ struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
+
+ nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL);
+
+ sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
+ tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
+
+ if (sock[TIPC_NLA_SOCK_CON]) {
+ u32 node;
+ struct nlattr *con[TIPC_NLA_CON_MAX + 1];
+
+ nla_parse_nested(con, TIPC_NLA_CON_MAX, sock[TIPC_NLA_SOCK_CON],
+ NULL);
+
+ node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
+ tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>",
+ tipc_zone(node),
+ tipc_cluster(node),
+ tipc_node(node),
+ nla_get_u32(con[TIPC_NLA_CON_SOCK]));
+
+ if (con[TIPC_NLA_CON_FLAG])
+ tipc_tlv_sprintf(msg->rep, " via {%u,%u}\n",
+ nla_get_u32(con[TIPC_NLA_CON_TYPE]),
+ nla_get_u32(con[TIPC_NLA_CON_INST]));
+ else
+ tipc_tlv_sprintf(msg->rep, "\n");
+ } else if (sock[TIPC_NLA_SOCK_HAS_PUBL]) {
+ tipc_tlv_sprintf(msg->rep, " bound to");
+
+ err = tipc_nl_compat_publ_dump(msg, sock_ref);
+ if (err)
+ return err;
+ }
+ tipc_tlv_sprintf(msg->rep, "\n");
+
+ return 0;
+}
+
+static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
+
+ nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
+ NULL);
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
+ nla_data(media[TIPC_NLA_MEDIA_NAME]),
+ nla_len(media[TIPC_NLA_MEDIA_NAME]));
+}
+
+static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ struct tipc_node_info node_info;
+ struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
+
+ nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL);
+
+ node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
+ node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_NODE_INFO, &node_info,
+ sizeof(node_info));
+}
+
+static int tipc_nl_compat_net_set(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ u32 val;
+ struct nlattr *net;
+
+ val = ntohl(*(__be32 *)TLV_DATA(msg->req));
+
+ net = nla_nest_start(skb, TIPC_NLA_NET);
+ if (!net)
+ return -EMSGSIZE;
+
+ if (msg->cmd == TIPC_CMD_SET_NODE_ADDR) {
+ if (nla_put_u32(skb, TIPC_NLA_NET_ADDR, val))
+ return -EMSGSIZE;
+ } else if (msg->cmd == TIPC_CMD_SET_NETID) {
+ if (nla_put_u32(skb, TIPC_NLA_NET_ID, val))
+ return -EMSGSIZE;
+ }
+ nla_nest_end(skb, net);
+
+ return 0;
+}
+
+static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ __be32 id;
+ struct nlattr *net[TIPC_NLA_NET_MAX + 1];
+
+ nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
+ id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
+}
+
+static int tipc_cmd_show_stats_compat(struct tipc_nl_compat_msg *msg)
+{
+ msg->rep = tipc_tlv_alloc(ULTRA_STRING_MAX_LEN);
+ if (!msg->rep)
+ return -ENOMEM;
+
+ tipc_tlv_init(msg->rep, TIPC_TLV_ULTRA_STRING);
+ tipc_tlv_sprintf(msg->rep, "TIPC version " TIPC_MOD_VER "\n");
+
+ return 0;
+}
+
+static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
+{
+ struct tipc_nl_compat_cmd_dump dump;
+ struct tipc_nl_compat_cmd_doit doit;
+
+ memset(&dump, 0, sizeof(dump));
+ memset(&doit, 0, sizeof(doit));
+
+ switch (msg->cmd) {
+ case TIPC_CMD_NOOP:
+ msg->rep = tipc_tlv_alloc(0);
+ if (!msg->rep)
+ return -ENOMEM;
+ return 0;
+ case TIPC_CMD_GET_BEARER_NAMES:
+ msg->rep_size = MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME);
+ dump.dumpit = tipc_nl_bearer_dump;
+ dump.format = tipc_nl_compat_bearer_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_ENABLE_BEARER:
+ msg->req_type = TIPC_TLV_BEARER_CONFIG;
+ doit.doit = tipc_nl_bearer_enable;
+ doit.transcode = tipc_nl_compat_bearer_enable;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_DISABLE_BEARER:
+ msg->req_type = TIPC_TLV_BEARER_NAME;
+ doit.doit = tipc_nl_bearer_disable;
+ doit.transcode = tipc_nl_compat_bearer_disable;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_SHOW_LINK_STATS:
+ msg->req_type = TIPC_TLV_LINK_NAME;
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ msg->rep_type = TIPC_TLV_ULTRA_STRING;
+ dump.dumpit = tipc_nl_link_dump;
+ dump.format = tipc_nl_compat_link_stat_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_GET_LINKS:
+ msg->req_type = TIPC_TLV_NET_ADDR;
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ dump.dumpit = tipc_nl_link_dump;
+ dump.format = tipc_nl_compat_link_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_SET_LINK_TOL:
+ case TIPC_CMD_SET_LINK_PRI:
+ case TIPC_CMD_SET_LINK_WINDOW:
+ msg->req_type = TIPC_TLV_LINK_CONFIG;
+ doit.doit = tipc_nl_link_set;
+ doit.transcode = tipc_nl_compat_link_set;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_RESET_LINK_STATS:
+ msg->req_type = TIPC_TLV_LINK_NAME;
+ doit.doit = tipc_nl_link_reset_stats;
+ doit.transcode = tipc_nl_compat_link_reset_stats;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_SHOW_NAME_TABLE:
+ msg->req_type = TIPC_TLV_NAME_TBL_QUERY;
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ msg->rep_type = TIPC_TLV_ULTRA_STRING;
+ dump.header = tipc_nl_compat_name_table_dump_header;
+ dump.dumpit = tipc_nl_name_table_dump;
+ dump.format = tipc_nl_compat_name_table_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_SHOW_PORTS:
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ msg->rep_type = TIPC_TLV_ULTRA_STRING;
+ dump.dumpit = tipc_nl_sk_dump;
+ dump.format = tipc_nl_compat_sk_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_GET_MEDIA_NAMES:
+ msg->rep_size = MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME);
+ dump.dumpit = tipc_nl_media_dump;
+ dump.format = tipc_nl_compat_media_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_GET_NODES:
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ dump.dumpit = tipc_nl_node_dump;
+ dump.format = tipc_nl_compat_node_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_SET_NODE_ADDR:
+ msg->req_type = TIPC_TLV_NET_ADDR;
+ doit.doit = tipc_nl_net_set;
+ doit.transcode = tipc_nl_compat_net_set;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_SET_NETID:
+ msg->req_type = TIPC_TLV_UNSIGNED;
+ doit.doit = tipc_nl_net_set;
+ doit.transcode = tipc_nl_compat_net_set;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_GET_NETID:
+ msg->rep_size = sizeof(u32);
+ dump.dumpit = tipc_nl_net_dump;
+ dump.format = tipc_nl_compat_net_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_SHOW_STATS:
+ return tipc_cmd_show_stats_compat(msg);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ int len;
+ struct tipc_nl_compat_msg msg;
+ struct nlmsghdr *req_nlh;
+ struct nlmsghdr *rep_nlh;
+ struct tipc_genlmsghdr *req_userhdr = info->userhdr;
+ struct net *net = genl_info_net(info);
+
+ memset(&msg, 0, sizeof(msg));
+
+ req_nlh = (struct nlmsghdr *)skb->data;
+ msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN;
+ msg.cmd = req_userhdr->cmd;
+ msg.dst_sk = info->dst_sk;
+
+ if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) {
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN);
+ err = -EACCES;
+ goto send;
+ }
+
+ len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) {
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
+ err = -EOPNOTSUPP;
+ goto send;
+ }
+
+ err = tipc_nl_compat_handle(&msg);
+ if (err == -EOPNOTSUPP)
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
+ else if (err == -EINVAL)
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR);
+send:
+ if (!msg.rep)
+ return err;
+
+ len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ skb_push(msg.rep, len);
+ rep_nlh = nlmsg_hdr(msg.rep);
+ memcpy(rep_nlh, info->nlhdr, len);
+ rep_nlh->nlmsg_len = msg.rep->len;
+ genlmsg_unicast(net, msg.rep, NETLINK_CB(skb).portid);
+
+ return err;
+}
+
+static struct genl_family tipc_genl_compat_family = {
+ .id = GENL_ID_GENERATE,
+ .name = TIPC_GENL_NAME,
+ .version = TIPC_GENL_VERSION,
+ .hdrsize = TIPC_GENL_HDRLEN,
+ .maxattr = 0,
+ .netnsok = true,
+};
+
+static struct genl_ops tipc_genl_compat_ops[] = {
+ {
+ .cmd = TIPC_GENL_CMD,
+ .doit = tipc_nl_compat_recv,
+ },
+};
+
+int tipc_netlink_compat_start(void)
+{
+ int res;
+
+ res = genl_register_family_with_ops(&tipc_genl_compat_family,
+ tipc_genl_compat_ops);
+ if (res) {
+ pr_err("Failed to register legacy compat interface\n");
+ return res;
+ }
+
+ return 0;
+}
+
+void tipc_netlink_compat_stop(void)
+{
+ genl_unregister_family(&tipc_genl_compat_family);
+}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 8d353ec77a66..86152de8248d 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -35,22 +35,14 @@
*/
#include "core.h"
-#include "config.h"
+#include "link.h"
#include "node.h"
#include "name_distr.h"
#include "socket.h"
-#define NODE_HTABLE_SIZE 512
-
static void node_lost_contact(struct tipc_node *n_ptr);
static void node_established_contact(struct tipc_node *n_ptr);
-static struct hlist_head node_htable[NODE_HTABLE_SIZE];
-LIST_HEAD(tipc_node_list);
-static u32 tipc_num_nodes;
-static u32 tipc_num_links;
-static DEFINE_SPINLOCK(node_list_lock);
-
struct tipc_sock_conn {
u32 port;
u32 peer_port;
@@ -78,15 +70,17 @@ static unsigned int tipc_hashfn(u32 addr)
/*
* tipc_node_find - locate specified node object, if it exists
*/
-struct tipc_node *tipc_node_find(u32 addr)
+struct tipc_node *tipc_node_find(struct net *net, u32 addr)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node;
- if (unlikely(!in_own_cluster_exact(addr)))
+ if (unlikely(!in_own_cluster_exact(net, addr)))
return NULL;
rcu_read_lock();
- hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
+ hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
+ hash) {
if (node->addr == addr) {
rcu_read_unlock();
return node;
@@ -96,72 +90,68 @@ struct tipc_node *tipc_node_find(u32 addr)
return NULL;
}
-struct tipc_node *tipc_node_create(u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n_ptr, *temp_node;
- spin_lock_bh(&node_list_lock);
-
+ spin_lock_bh(&tn->node_list_lock);
+ n_ptr = tipc_node_find(net, addr);
+ if (n_ptr)
+ goto exit;
n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
if (!n_ptr) {
- spin_unlock_bh(&node_list_lock);
pr_warn("Node creation failed, no memory\n");
- return NULL;
+ goto exit;
}
-
n_ptr->addr = addr;
+ n_ptr->net = net;
spin_lock_init(&n_ptr->lock);
INIT_HLIST_NODE(&n_ptr->hash);
INIT_LIST_HEAD(&n_ptr->list);
INIT_LIST_HEAD(&n_ptr->publ_list);
INIT_LIST_HEAD(&n_ptr->conn_sks);
- skb_queue_head_init(&n_ptr->waiting_sks);
__skb_queue_head_init(&n_ptr->bclink.deferred_queue);
-
- hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
-
- list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
+ hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
if (n_ptr->addr < temp_node->addr)
break;
}
list_add_tail_rcu(&n_ptr->list, &temp_node->list);
n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
n_ptr->signature = INVALID_NODE_SIG;
-
- tipc_num_nodes++;
-
- spin_unlock_bh(&node_list_lock);
+exit:
+ spin_unlock_bh(&tn->node_list_lock);
return n_ptr;
}
-static void tipc_node_delete(struct tipc_node *n_ptr)
+static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr)
{
list_del_rcu(&n_ptr->list);
hlist_del_rcu(&n_ptr->hash);
kfree_rcu(n_ptr, rcu);
-
- tipc_num_nodes--;
}
-void tipc_node_stop(void)
+void tipc_node_stop(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node, *t_node;
- spin_lock_bh(&node_list_lock);
- list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
- tipc_node_delete(node);
- spin_unlock_bh(&node_list_lock);
+ spin_lock_bh(&tn->node_list_lock);
+ list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+ tipc_node_delete(tn, node);
+ spin_unlock_bh(&tn->node_list_lock);
}
-int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
+int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
{
struct tipc_node *node;
struct tipc_sock_conn *conn;
- if (in_own_node(dnode))
+ if (in_own_node(net, dnode))
return 0;
- node = tipc_node_find(dnode);
+ node = tipc_node_find(net, dnode);
if (!node) {
pr_warn("Connecting sock to node 0x%x failed\n", dnode);
return -EHOSTUNREACH;
@@ -179,15 +169,15 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
return 0;
}
-void tipc_node_remove_conn(u32 dnode, u32 port)
+void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
{
struct tipc_node *node;
struct tipc_sock_conn *conn, *safe;
- if (in_own_node(dnode))
+ if (in_own_node(net, dnode))
return;
- node = tipc_node_find(dnode);
+ node = tipc_node_find(net, dnode);
if (!node)
return;
@@ -201,23 +191,6 @@ void tipc_node_remove_conn(u32 dnode, u32 port)
tipc_node_unlock(node);
}
-void tipc_node_abort_sock_conns(struct list_head *conns)
-{
- struct tipc_sock_conn *conn, *safe;
- struct sk_buff *buf;
-
- list_for_each_entry_safe(conn, safe, conns, list) {
- buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
- SHORT_H_SIZE, 0, tipc_own_addr,
- conn->peer_node, conn->port,
- conn->peer_port, TIPC_ERR_NO_NODE);
- if (likely(buf))
- tipc_sk_rcv(buf);
- list_del(&conn->list);
- kfree(conn);
- }
-}
-
/**
* tipc_node_link_up - handle addition of link
*
@@ -231,8 +204,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
- pr_info("Established link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
+ pr_debug("Established link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->net_plane);
if (!active[0]) {
active[0] = active[1] = l_ptr;
@@ -240,7 +213,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
goto exit;
}
if (l_ptr->priority < active[0]->priority) {
- pr_info("New link <%s> becomes standby\n", l_ptr->name);
+ pr_debug("New link <%s> becomes standby\n", l_ptr->name);
goto exit;
}
tipc_link_dup_queue_xmit(active[0], l_ptr);
@@ -248,9 +221,9 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
active[0] = l_ptr;
goto exit;
}
- pr_info("Old link <%s> becomes standby\n", active[0]->name);
+ pr_debug("Old link <%s> becomes standby\n", active[0]->name);
if (active[1] != active[0])
- pr_info("Old link <%s> becomes standby\n", active[1]->name);
+ pr_debug("Old link <%s> becomes standby\n", active[1]->name);
active[0] = active[1] = l_ptr;
exit:
/* Leave room for changeover header when returning 'mtu' to users: */
@@ -290,6 +263,7 @@ static void node_select_active_links(struct tipc_node *n_ptr)
*/
void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
+ struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
struct tipc_link **active;
n_ptr->working_links--;
@@ -297,12 +271,12 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
if (!tipc_link_is_active(l_ptr)) {
- pr_info("Lost standby link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
+ pr_debug("Lost standby link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->net_plane);
return;
}
- pr_info("Lost link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
+ pr_debug("Lost link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->net_plane);
active = &n_ptr->active_links[0];
if (active[0] == l_ptr)
@@ -324,7 +298,7 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
}
/* Loopback link went down? No fragmentation needed from now on. */
- if (n_ptr->addr == tipc_own_addr) {
+ if (n_ptr->addr == tn->own_addr) {
n_ptr->act_mtus[0] = MAX_MSG_SIZE;
n_ptr->act_mtus[1] = MAX_MSG_SIZE;
}
@@ -343,9 +317,6 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
n_ptr->links[l_ptr->bearer_id] = l_ptr;
- spin_lock_bh(&node_list_lock);
- tipc_num_links++;
- spin_unlock_bh(&node_list_lock);
n_ptr->link_cnt++;
}
@@ -357,9 +328,6 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
if (l_ptr != n_ptr->links[i])
continue;
n_ptr->links[i] = NULL;
- spin_lock_bh(&node_list_lock);
- tipc_num_links--;
- spin_unlock_bh(&node_list_lock);
n_ptr->link_cnt--;
}
}
@@ -368,17 +336,21 @@ static void node_established_contact(struct tipc_node *n_ptr)
{
n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
n_ptr->bclink.oos_state = 0;
- n_ptr->bclink.acked = tipc_bclink_get_last_sent();
- tipc_bclink_add_node(n_ptr->addr);
+ n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
+ tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
}
static void node_lost_contact(struct tipc_node *n_ptr)
{
char addr_string[16];
- u32 i;
+ struct tipc_sock_conn *conn, *safe;
+ struct list_head *conns = &n_ptr->conn_sks;
+ struct sk_buff *skb;
+ struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
+ uint i;
- pr_info("Lost contact with %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ pr_debug("Lost contact with %s\n",
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
/* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.recv_permitted) {
@@ -389,7 +361,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
n_ptr->bclink.reasm_buf = NULL;
}
- tipc_bclink_remove_node(n_ptr->addr);
+ tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
n_ptr->bclink.recv_permitted = false;
@@ -403,126 +375,33 @@ static void node_lost_contact(struct tipc_node *n_ptr)
l_ptr->reset_checkpoint = l_ptr->next_in_no;
l_ptr->exp_msg_count = 0;
tipc_link_reset_fragments(l_ptr);
- }
-
- n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
-
- /* Notify subscribers and prevent re-contact with node until
- * cleanup is done.
- */
- n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
- TIPC_NOTIFY_NODE_DOWN;
-}
-struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
-{
- u32 domain;
- struct sk_buff *buf;
- struct tipc_node *n_ptr;
- struct tipc_node_info node_info;
- u32 payload_size;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (!tipc_addr_domain_valid(domain))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (network address)");
-
- spin_lock_bh(&node_list_lock);
- if (!tipc_num_nodes) {
- spin_unlock_bh(&node_list_lock);
- return tipc_cfg_reply_none();
+ /* Link marked for deletion after failover? => do it now */
+ if (l_ptr->flags & LINK_STOPPED)
+ tipc_link_delete(l_ptr);
}
- /* For now, get space for all other nodes */
- payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
- if (payload_size > 32768u) {
- spin_unlock_bh(&node_list_lock);
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (too many nodes)");
- }
- spin_unlock_bh(&node_list_lock);
-
- buf = tipc_cfg_reply_alloc(payload_size);
- if (!buf)
- return NULL;
-
- /* Add TLVs for all nodes in scope */
- rcu_read_lock();
- list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
- if (!tipc_in_scope(domain, n_ptr->addr))
- continue;
- node_info.addr = htonl(n_ptr->addr);
- node_info.up = htonl(tipc_node_is_up(n_ptr));
- tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
- &node_info, sizeof(node_info));
- }
- rcu_read_unlock();
- return buf;
-}
-
-struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
-{
- u32 domain;
- struct sk_buff *buf;
- struct tipc_node *n_ptr;
- struct tipc_link_info link_info;
- u32 payload_size;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (!tipc_addr_domain_valid(domain))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (network address)");
-
- if (!tipc_own_addr)
- return tipc_cfg_reply_none();
-
- spin_lock_bh(&node_list_lock);
- /* Get space for all unicast links + broadcast link */
- payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
- if (payload_size > 32768u) {
- spin_unlock_bh(&node_list_lock);
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (too many links)");
- }
- spin_unlock_bh(&node_list_lock);
-
- buf = tipc_cfg_reply_alloc(payload_size);
- if (!buf)
- return NULL;
+ n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
- /* Add TLV for broadcast link */
- link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
- link_info.up = htonl(1);
- strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
- tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
+ /* Prevent re-contact with node until cleanup is done */
+ n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
- /* Add TLVs for any other links in scope */
- rcu_read_lock();
- list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
- u32 i;
+ /* Notify publications from this node */
+ n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
- if (!tipc_in_scope(domain, n_ptr->addr))
- continue;
- tipc_node_lock(n_ptr);
- for (i = 0; i < MAX_BEARERS; i++) {
- if (!n_ptr->links[i])
- continue;
- link_info.dest = htonl(n_ptr->addr);
- link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
- strcpy(link_info.str, n_ptr->links[i]->name);
- tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
- &link_info, sizeof(link_info));
+ /* Notify sockets connected to node */
+ list_for_each_entry_safe(conn, safe, conns, list) {
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+ SHORT_H_SIZE, 0, tn->own_addr,
+ conn->peer_node, conn->port,
+ conn->peer_port, TIPC_ERR_NO_NODE);
+ if (likely(skb)) {
+ skb_queue_tail(n_ptr->inputq, skb);
+ n_ptr->action_flags |= TIPC_MSG_EVT;
}
- tipc_node_unlock(n_ptr);
+ list_del(&conn->list);
+ kfree(conn);
}
- rcu_read_unlock();
- return buf;
}
/**
@@ -534,10 +413,11 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
*
* Returns 0 on success
*/
-int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
+int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
+ char *linkname, size_t len)
{
struct tipc_link *link;
- struct tipc_node *node = tipc_node_find(addr);
+ struct tipc_node *node = tipc_node_find(net, addr);
if ((bearer_id >= MAX_BEARERS) || !node)
return -EINVAL;
@@ -554,58 +434,60 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
void tipc_node_unlock(struct tipc_node *node)
{
- LIST_HEAD(nsub_list);
- LIST_HEAD(conn_sks);
- struct sk_buff_head waiting_sks;
+ struct net *net = node->net;
u32 addr = 0;
- int flags = node->action_flags;
+ u32 flags = node->action_flags;
u32 link_id = 0;
+ struct list_head *publ_list;
+ struct sk_buff_head *inputq = node->inputq;
+ struct sk_buff_head *namedq;
- if (likely(!flags)) {
+ if (likely(!flags || (flags == TIPC_MSG_EVT))) {
+ node->action_flags = 0;
spin_unlock_bh(&node->lock);
+ if (flags == TIPC_MSG_EVT)
+ tipc_sk_rcv(net, inputq);
return;
}
addr = node->addr;
link_id = node->link_id;
- __skb_queue_head_init(&waiting_sks);
+ namedq = node->namedq;
+ publ_list = &node->publ_list;
- if (flags & TIPC_WAKEUP_USERS)
- skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
-
- if (flags & TIPC_NOTIFY_NODE_DOWN) {
- list_replace_init(&node->publ_list, &nsub_list);
- list_replace_init(&node->conn_sks, &conn_sks);
- }
- node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
- TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP |
- TIPC_NOTIFY_LINK_DOWN |
- TIPC_WAKEUP_BCAST_USERS);
+ node->action_flags &= ~(TIPC_MSG_EVT |
+ TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+ TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
+ TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
+ TIPC_NAMED_MSG_EVT);
spin_unlock_bh(&node->lock);
- while (!skb_queue_empty(&waiting_sks))
- tipc_sk_rcv(__skb_dequeue(&waiting_sks));
-
- if (!list_empty(&conn_sks))
- tipc_node_abort_sock_conns(&conn_sks);
-
- if (!list_empty(&nsub_list))
- tipc_publ_notify(&nsub_list, addr);
+ if (flags & TIPC_NOTIFY_NODE_DOWN)
+ tipc_publ_notify(net, publ_list, addr);
if (flags & TIPC_WAKEUP_BCAST_USERS)
- tipc_bclink_wakeup_users();
+ tipc_bclink_wakeup_users(net);
if (flags & TIPC_NOTIFY_NODE_UP)
- tipc_named_node_up(addr);
+ tipc_named_node_up(net, addr);
if (flags & TIPC_NOTIFY_LINK_UP)
- tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr,
+ tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
TIPC_NODE_SCOPE, link_id, addr);
if (flags & TIPC_NOTIFY_LINK_DOWN)
- tipc_nametbl_withdraw(TIPC_LINK_STATE, addr,
+ tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
link_id, addr);
+
+ if (flags & TIPC_MSG_EVT)
+ tipc_sk_rcv(net, inputq);
+
+ if (flags & TIPC_NAMED_MSG_EVT)
+ tipc_named_rcv(net, namedq);
+
+ if (flags & TIPC_BCAST_MSG_EVT)
+ tipc_bclink_input(net);
}
/* Caller should hold node lock for the passed node */
@@ -614,7 +496,7 @@ static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
void *hdr;
struct nlattr *attrs;
- hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_NODE_GET);
if (!hdr)
return -EMSGSIZE;
@@ -645,6 +527,8 @@ msg_full:
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
int done = cb->args[0];
int last_addr = cb->args[1];
struct tipc_node *node;
@@ -659,7 +543,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
- if (last_addr && !tipc_node_find(last_addr)) {
+ if (last_addr && !tipc_node_find(net, last_addr)) {
rcu_read_unlock();
/* We never set seq or call nl_dump_check_consistent() this
* means that setting prev_seq here will cause the consistence
@@ -671,7 +555,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
return -EPIPE;
}
- list_for_each_entry_rcu(node, &tipc_node_list, list) {
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
if (last_addr) {
if (node->addr == last_addr)
last_addr = 0;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index cbe0e950f1cc..3d18c66b7f78 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -1,7 +1,7 @@
/*
* net/tipc/node.h: Include file for TIPC node management routines
*
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
* Copyright (c) 2005, 2010-2014, Wind River Systems
* All rights reserved.
*
@@ -42,10 +42,10 @@
#include "bearer.h"
#include "msg.h"
-/*
- * Out-of-range value for node signature
- */
-#define INVALID_NODE_SIG 0x10000
+/* Out-of-range value for node signature */
+#define INVALID_NODE_SIG 0x10000
+
+#define NODE_HTABLE_SIZE 512
/* Flags used to take different actions according to flag type
* TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
@@ -55,14 +55,16 @@
* TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
*/
enum {
+ TIPC_MSG_EVT = 1,
TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
TIPC_NOTIFY_NODE_DOWN = (1 << 3),
TIPC_NOTIFY_NODE_UP = (1 << 4),
- TIPC_WAKEUP_USERS = (1 << 5),
- TIPC_WAKEUP_BCAST_USERS = (1 << 6),
- TIPC_NOTIFY_LINK_UP = (1 << 7),
- TIPC_NOTIFY_LINK_DOWN = (1 << 8)
+ TIPC_WAKEUP_BCAST_USERS = (1 << 5),
+ TIPC_NOTIFY_LINK_UP = (1 << 6),
+ TIPC_NOTIFY_LINK_DOWN = (1 << 7),
+ TIPC_NAMED_MSG_EVT = (1 << 8),
+ TIPC_BCAST_MSG_EVT = (1 << 9)
};
/**
@@ -73,6 +75,7 @@ enum {
* @oos_state: state tracker for handling OOS b'cast messages
* @deferred_queue: deferred queue saved OOS b'cast message received from node
* @reasm_buf: broadcast reassembly queue head from node
+ * @inputq_map: bitmap indicating which inqueues should be kicked
* @recv_permitted: true if node is allowed to receive b'cast messages
*/
struct tipc_node_bclink {
@@ -83,6 +86,7 @@ struct tipc_node_bclink {
u32 deferred_size;
struct sk_buff_head deferred_queue;
struct sk_buff *reasm_buf;
+ int inputq_map;
bool recv_permitted;
};
@@ -90,7 +94,11 @@ struct tipc_node_bclink {
* struct tipc_node - TIPC node structure
* @addr: network address of node
* @lock: spinlock governing access to structure
+ * @net: the applicable net namespace
* @hash: links to adjacent nodes in unsorted hash chain
+ * @inputq: pointer to input queue containing messages for msg event
+ * @namedq: pointer to name table input queue with name table messages
+ * @curr_link: the link holding the node lock, if any
* @active_links: pointers to active links to node
* @links: pointers to all links to node
* @action_flags: bit mask of different types of node actions
@@ -106,11 +114,14 @@ struct tipc_node_bclink {
struct tipc_node {
u32 addr;
spinlock_t lock;
+ struct net *net;
struct hlist_node hash;
+ struct sk_buff_head *inputq;
+ struct sk_buff_head *namedq;
struct tipc_link *active_links[2];
u32 act_mtus[2];
struct tipc_link *links[MAX_BEARERS];
- unsigned int action_flags;
+ int action_flags;
struct tipc_node_bclink bclink;
struct list_head list;
int link_cnt;
@@ -118,28 +129,24 @@ struct tipc_node {
u32 signature;
u32 link_id;
struct list_head publ_list;
- struct sk_buff_head waiting_sks;
struct list_head conn_sks;
struct rcu_head rcu;
};
-extern struct list_head tipc_node_list;
-
-struct tipc_node *tipc_node_find(u32 addr);
-struct tipc_node *tipc_node_create(u32 addr);
-void tipc_node_stop(void);
+struct tipc_node *tipc_node_find(struct net *net, u32 addr);
+struct tipc_node *tipc_node_create(struct net *net, u32 addr);
+void tipc_node_stop(struct net *net);
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
int tipc_node_active_links(struct tipc_node *n_ptr);
int tipc_node_is_up(struct tipc_node *n_ptr);
-struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
-struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
-int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
+int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
+ char *linkname, size_t len);
void tipc_node_unlock(struct tipc_node *node);
-int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port);
-void tipc_node_remove_conn(u32 dnode, u32 port);
+int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
+void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
@@ -154,12 +161,12 @@ static inline bool tipc_node_blocked(struct tipc_node *node)
TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
}
-static inline uint tipc_node_get_mtu(u32 addr, u32 selector)
+static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
{
struct tipc_node *node;
u32 mtu;
- node = tipc_node_find(addr);
+ node = tipc_node_find(net, addr);
if (likely(node))
mtu = node->act_mtus[selector & 1];
diff --git a/net/tipc/server.c b/net/tipc/server.c
index a538a02f869b..eadd4ed45905 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -35,6 +35,7 @@
#include "server.h"
#include "core.h"
+#include "socket.h"
#include <net/sock.h>
/* Number of messages to send before rescheduling */
@@ -255,7 +256,8 @@ static int tipc_receive_from_sock(struct tipc_conn *con)
goto out_close;
}
- s->tipc_conn_recvmsg(con->conid, &addr, con->usr_data, buf, ret);
+ s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
+ con->usr_data, buf, ret);
kmem_cache_free(s->rcvbuf_cache, buf);
@@ -307,7 +309,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
struct socket *sock = NULL;
int ret;
- ret = tipc_sock_create_local(s->type, &sock);
+ ret = tipc_sock_create_local(s->net, s->type, &sock);
if (ret < 0)
return NULL;
ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
diff --git a/net/tipc/server.h b/net/tipc/server.h
index be817b0b547e..9015faedb1b0 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -36,7 +36,9 @@
#ifndef _TIPC_SERVER_H
#define _TIPC_SERVER_H
-#include "core.h"
+#include <linux/idr.h>
+#include <linux/tipc.h>
+#include <net/net_namespace.h>
#define TIPC_SERVER_NAME_LEN 32
@@ -45,6 +47,7 @@
* @conn_idr: identifier set of connection
* @idr_lock: protect the connection identifier set
* @idr_in_use: amount of allocated identifier entry
+ * @net: network namspace instance
* @rcvbuf_cache: memory cache of server receive buffer
* @rcv_wq: receive workqueue
* @send_wq: send workqueue
@@ -61,16 +64,18 @@ struct tipc_server {
struct idr conn_idr;
spinlock_t idr_lock;
int idr_in_use;
+ struct net *net;
struct kmem_cache *rcvbuf_cache;
struct workqueue_struct *rcv_wq;
struct workqueue_struct *send_wq;
int max_rcvbuf_size;
- void *(*tipc_conn_new) (int conid);
- void (*tipc_conn_shutdown) (int conid, void *usr_data);
- void (*tipc_conn_recvmsg) (int conid, struct sockaddr_tipc *addr,
- void *usr_data, void *buf, size_t len);
+ void *(*tipc_conn_new)(int conid);
+ void (*tipc_conn_shutdown)(int conid, void *usr_data);
+ void (*tipc_conn_recvmsg)(struct net *net, int conid,
+ struct sockaddr_tipc *addr, void *usr_data,
+ void *buf, size_t len);
struct sockaddr_tipc *saddr;
- const char name[TIPC_SERVER_NAME_LEN];
+ char name[TIPC_SERVER_NAME_LEN];
int imp;
int type;
};
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 4731cad99d1c..f73e975af80b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,7 +1,7 @@
/*
* net/tipc/socket.c: TIPC socket API
*
- * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
+ * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
* Copyright (c) 2004-2008, 2010-2013, Wind River Systems
* All rights reserved.
*
@@ -34,22 +34,25 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/rhashtable.h>
+#include <linux/jhash.h>
#include "core.h"
#include "name_table.h"
#include "node.h"
#include "link.h"
-#include <linux/export.h>
-#include "config.h"
+#include "name_distr.h"
#include "socket.h"
-#define SS_LISTENING -1 /* socket is listening */
-#define SS_READY -2 /* socket is connectionless */
+#define SS_LISTENING -1 /* socket is listening */
+#define SS_READY -2 /* socket is connectionless */
-#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
-#define CONN_PROBING_INTERVAL 3600000 /* [ms] => 1 h */
-#define TIPC_FWD_MSG 1
-#define TIPC_CONN_OK 0
-#define TIPC_CONN_PROBING 1
+#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
+#define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
+#define TIPC_FWD_MSG 1
+#define TIPC_CONN_OK 0
+#define TIPC_CONN_PROBING 1
+#define TIPC_MAX_PORT 0xffffffff
+#define TIPC_MIN_PORT 1
/**
* struct tipc_sock - TIPC socket structure
@@ -59,21 +62,20 @@
* @conn_instance: TIPC instance used when connection was established
* @published: non-zero if port has one or more associated names
* @max_pkt: maximum packet size "hint" used when building messages sent by port
- * @ref: unique reference to port in TIPC object registry
+ * @portid: unique port identity in TIPC socket hash table
* @phdr: preformatted message header used when sending messages
* @port_list: adjacent ports in TIPC's global list of ports
* @publications: list of publications for port
* @pub_count: total # of publications port has made during its lifetime
* @probing_state:
- * @probing_interval:
- * @timer:
- * @port: port - interacts with 'sk' and with the rest of the TIPC stack
- * @peer_name: the peer of the connection, if any
+ * @probing_intv:
* @conn_timeout: the time we can wait for an unresponded setup request
* @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
* @link_cong: non-zero if owner must sleep because of link congestion
* @sent_unacked: # messages sent by socket, and not yet acked by peer
* @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @node: hash table node
+ * @rcu: rcu struct for tipc_sock
*/
struct tipc_sock {
struct sock sk;
@@ -82,19 +84,20 @@ struct tipc_sock {
u32 conn_instance;
int published;
u32 max_pkt;
- u32 ref;
+ u32 portid;
struct tipc_msg phdr;
struct list_head sock_list;
struct list_head publications;
u32 pub_count;
u32 probing_state;
- u32 probing_interval;
- struct timer_list timer;
+ unsigned long probing_intv;
uint conn_timeout;
atomic_t dupl_rcvcnt;
bool link_cong;
uint sent_unacked;
uint rcv_unacked;
+ struct rhash_head node;
+ struct rcu_head rcu;
};
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
@@ -103,16 +106,14 @@ static void tipc_write_space(struct sock *sk);
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
-static void tipc_sk_timeout(unsigned long ref);
+static void tipc_sk_timeout(unsigned long data);
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
struct tipc_name_seq const *seq);
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
struct tipc_name_seq const *seq);
-static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk);
-static void tipc_sk_ref_discard(u32 ref);
-static struct tipc_sock *tipc_sk_get(u32 ref);
-static struct tipc_sock *tipc_sk_get_next(u32 *ref);
-static void tipc_sk_put(struct tipc_sock *tsk);
+static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
+static int tipc_sk_insert(struct tipc_sock *tsk);
+static void tipc_sk_remove(struct tipc_sock *tsk);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
@@ -174,6 +175,11 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
* - port reference
*/
+static u32 tsk_own_node(struct tipc_sock *tsk)
+{
+ return msg_prevnode(&tsk->phdr);
+}
+
static u32 tsk_peer_node(struct tipc_sock *tsk)
{
return msg_destnode(&tsk->phdr);
@@ -246,10 +252,11 @@ static void tsk_rej_rx_queue(struct sock *sk)
{
struct sk_buff *skb;
u32 dnode;
+ u32 own_node = tsk_own_node(tipc_sk(sk));
while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
- if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
- tipc_link_xmit_skb(skb, dnode, 0);
+ if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
+ tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
}
}
@@ -260,6 +267,7 @@ static void tsk_rej_rx_queue(struct sock *sk)
*/
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
{
+ struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
u32 peer_port = tsk_peer_port(tsk);
u32 orig_node;
u32 peer_node;
@@ -276,10 +284,10 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
if (likely(orig_node == peer_node))
return true;
- if (!orig_node && (peer_node == tipc_own_addr))
+ if (!orig_node && (peer_node == tn->own_addr))
return true;
- if (!peer_node && (orig_node == tipc_own_addr))
+ if (!peer_node && (orig_node == tn->own_addr))
return true;
return false;
@@ -300,12 +308,12 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
static int tipc_sk_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
+ struct tipc_net *tn;
const struct proto_ops *ops;
socket_state state;
struct sock *sk;
struct tipc_sock *tsk;
struct tipc_msg *msg;
- u32 ref;
/* Validate arguments */
if (unlikely(protocol != 0))
@@ -339,24 +347,23 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
return -ENOMEM;
tsk = tipc_sk(sk);
- ref = tipc_sk_ref_acquire(tsk);
- if (!ref) {
- pr_warn("Socket create failed; reference table exhausted\n");
- return -ENOMEM;
- }
tsk->max_pkt = MAX_PKT_DEFAULT;
- tsk->ref = ref;
INIT_LIST_HEAD(&tsk->publications);
msg = &tsk->phdr;
- tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+ tn = net_generic(sock_net(sk), tipc_net_id);
+ tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
NAMED_H_SIZE, 0);
- msg_set_origport(msg, ref);
/* Finish initializing socket data structures */
sock->ops = ops;
sock->state = state;
sock_init_data(sock, sk);
- k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref);
+ if (tipc_sk_insert(tsk)) {
+ pr_warn("Socket create failed; port numbrer exhausted\n");
+ return -EINVAL;
+ }
+ msg_set_origport(msg, tsk->portid);
+ setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
sk->sk_backlog_rcv = tipc_backlog_rcv;
sk->sk_rcvbuf = sysctl_tipc_rmem[1];
sk->sk_data_ready = tipc_data_ready;
@@ -384,7 +391,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
*
* Returns 0 on success, errno otherwise
*/
-int tipc_sock_create_local(int type, struct socket **res)
+int tipc_sock_create_local(struct net *net, int type, struct socket **res)
{
int rc;
@@ -393,7 +400,7 @@ int tipc_sock_create_local(int type, struct socket **res)
pr_err("Failed to create kernel socket\n");
return rc;
}
- tipc_sk_create(&init_net, *res, 0, 1);
+ tipc_sk_create(net, *res, 0, 1);
return 0;
}
@@ -442,6 +449,13 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
return ret;
}
+static void tipc_sk_callback(struct rcu_head *head)
+{
+ struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
+
+ sock_put(&tsk->sk);
+}
+
/**
* tipc_release - destroy a TIPC socket
* @sock: socket to destroy
@@ -461,9 +475,10 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
static int tipc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
+ struct net *net;
struct tipc_sock *tsk;
struct sk_buff *skb;
- u32 dnode;
+ u32 dnode, probing_state;
/*
* Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -472,6 +487,7 @@ static int tipc_release(struct socket *sock)
if (sk == NULL)
return 0;
+ net = sock_net(sk);
tsk = tipc_sk(sk);
lock_sock(sk);
@@ -491,26 +507,29 @@ static int tipc_release(struct socket *sock)
(sock->state == SS_CONNECTED)) {
sock->state = SS_DISCONNECTING;
tsk->connected = 0;
- tipc_node_remove_conn(dnode, tsk->ref);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
}
- if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
- tipc_link_xmit_skb(skb, dnode, 0);
+ if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
+ TIPC_ERR_NO_PORT))
+ tipc_link_xmit_skb(net, skb, dnode, 0);
}
}
tipc_sk_withdraw(tsk, 0, NULL);
- tipc_sk_ref_discard(tsk->ref);
- k_cancel_timer(&tsk->timer);
+ probing_state = tsk->probing_state;
+ if (del_timer_sync(&sk->sk_timer) &&
+ probing_state != TIPC_CONN_PROBING)
+ sock_put(sk);
+ tipc_sk_remove(tsk);
if (tsk->connected) {
- skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
- SHORT_H_SIZE, 0, dnode, tipc_own_addr,
- tsk_peer_port(tsk),
- tsk->ref, TIPC_ERR_NO_PORT);
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+ TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
+ tsk_own_node(tsk), tsk_peer_port(tsk),
+ tsk->portid, TIPC_ERR_NO_PORT);
if (skb)
- tipc_link_xmit_skb(skb, dnode, tsk->ref);
- tipc_node_remove_conn(dnode, tsk->ref);
+ tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
}
- k_term_timer(&tsk->timer);
/* Discard any remaining (connection-based) messages in receive queue */
__skb_queue_purge(&sk->sk_receive_queue);
@@ -518,7 +537,8 @@ static int tipc_release(struct socket *sock)
/* Reject any messages that accumulated in backlog queue */
sock->state = SS_DISCONNECTING;
release_sock(sk);
- sock_put(sk);
+
+ call_rcu(&tsk->rcu, tipc_sk_callback);
sock->sk = NULL;
return 0;
@@ -602,6 +622,7 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct tipc_sock *tsk = tipc_sk(sock->sk);
+ struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
memset(addr, 0, sizeof(*addr));
if (peer) {
@@ -611,8 +632,8 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
addr->addr.id.ref = tsk_peer_port(tsk);
addr->addr.id.node = tsk_peer_node(tsk);
} else {
- addr->addr.id.ref = tsk->ref;
- addr->addr.id.node = tipc_own_addr;
+ addr->addr.id.ref = tsk->portid;
+ addr->addr.id.node = tn->own_addr;
}
*uaddr_len = sizeof(*addr);
@@ -711,8 +732,11 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
struct msghdr *msg, size_t dsz, long timeo)
{
struct sock *sk = sock->sk;
- struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
- struct sk_buff_head head;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct net *net = sock_net(sk);
+ struct tipc_msg *mhdr = &tsk->phdr;
+ struct sk_buff_head *pktchain = &sk->sk_write_queue;
+ struct iov_iter save = msg->msg_iter;
uint mtu;
int rc;
@@ -727,83 +751,97 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
new_mtu:
mtu = tipc_bclink_get_mtu();
- __skb_queue_head_init(&head);
- rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
+ rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
if (unlikely(rc < 0))
return rc;
do {
- rc = tipc_bclink_xmit(&head);
+ rc = tipc_bclink_xmit(net, pktchain);
if (likely(rc >= 0)) {
rc = dsz;
break;
}
- if (rc == -EMSGSIZE)
+ if (rc == -EMSGSIZE) {
+ msg->msg_iter = save;
goto new_mtu;
+ }
if (rc != -ELINKCONG)
break;
tipc_sk(sk)->link_cong = 1;
rc = tipc_wait_for_sndmsg(sock, &timeo);
if (rc)
- __skb_queue_purge(&head);
+ __skb_queue_purge(pktchain);
} while (!rc);
return rc;
}
-/* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
+/**
+ * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
+ * @arrvq: queue with arriving messages, to be cloned after destination lookup
+ * @inputq: queue with cloned messages, delivered to socket after dest lookup
+ *
+ * Multi-threaded: parallel calls with reference to same queues may occur
*/
-void tipc_sk_mcast_rcv(struct sk_buff *buf)
+void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+ struct sk_buff_head *inputq)
{
- struct tipc_msg *msg = buf_msg(buf);
- struct tipc_port_list dports = {0, NULL, };
- struct tipc_port_list *item;
- struct sk_buff *b;
- uint i, last, dst = 0;
+ struct tipc_msg *msg;
+ struct tipc_plist dports;
+ u32 portid;
u32 scope = TIPC_CLUSTER_SCOPE;
-
- if (in_own_node(msg_orignode(msg)))
- scope = TIPC_NODE_SCOPE;
-
- /* Create destination port list: */
- tipc_nametbl_mc_translate(msg_nametype(msg),
- msg_namelower(msg),
- msg_nameupper(msg),
- scope,
- &dports);
- last = dports.count;
- if (!last) {
- kfree_skb(buf);
- return;
- }
-
- for (item = &dports; item; item = item->next) {
- for (i = 0; i < PLSIZE && ++dst <= last; i++) {
- b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf;
- if (!b) {
- pr_warn("Failed do clone mcast rcv buffer\n");
+ struct sk_buff_head tmpq;
+ uint hsz;
+ struct sk_buff *skb, *_skb;
+
+ __skb_queue_head_init(&tmpq);
+ tipc_plist_init(&dports);
+
+ skb = tipc_skb_peek(arrvq, &inputq->lock);
+ for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
+ msg = buf_msg(skb);
+ hsz = skb_headroom(skb) + msg_hdr_sz(msg);
+
+ if (in_own_node(net, msg_orignode(msg)))
+ scope = TIPC_NODE_SCOPE;
+
+ /* Create destination port list and message clones: */
+ tipc_nametbl_mc_translate(net,
+ msg_nametype(msg), msg_namelower(msg),
+ msg_nameupper(msg), scope, &dports);
+ portid = tipc_plist_pop(&dports);
+ for (; portid; portid = tipc_plist_pop(&dports)) {
+ _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
+ if (_skb) {
+ msg_set_destport(buf_msg(_skb), portid);
+ __skb_queue_tail(&tmpq, _skb);
continue;
}
- msg_set_destport(msg, item->ports[i]);
- tipc_sk_rcv(b);
+ pr_warn("Failed to clone mcast rcv buffer\n");
}
+ /* Append to inputq if not already done by other thread */
+ spin_lock_bh(&inputq->lock);
+ if (skb_peek(arrvq) == skb) {
+ skb_queue_splice_tail_init(&tmpq, inputq);
+ kfree_skb(__skb_dequeue(arrvq));
+ }
+ spin_unlock_bh(&inputq->lock);
+ __skb_queue_purge(&tmpq);
+ kfree_skb(skb);
}
- tipc_port_list_free(&dports);
+ tipc_sk_rcv(net, inputq);
}
/**
* tipc_sk_proto_rcv - receive a connection mng protocol message
* @tsk: receiving socket
- * @dnode: node to send response message to, if any
- * @buf: buffer containing protocol message
- * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
- * (CONN_PROBE_REPLY) message should be forwarded.
+ * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
*/
-static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
- struct sk_buff *buf)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
{
- struct tipc_msg *msg = buf_msg(buf);
+ struct tipc_msg *msg = buf_msg(*skb);
int conn_cong;
-
+ u32 dnode;
+ u32 own_node = tsk_own_node(tsk);
/* Ignore if connection cannot be validated: */
if (!tsk_peer_msg(tsk, msg))
goto exit;
@@ -816,15 +854,15 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
if (conn_cong)
tsk->sk.sk_write_space(&tsk->sk);
} else if (msg_type(msg) == CONN_PROBE) {
- if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
- return TIPC_OK;
- msg_set_type(msg, CONN_PROBE_REPLY);
- return TIPC_FWD_MSG;
+ if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
+ msg_set_type(msg, CONN_PROBE_REPLY);
+ return;
+ }
}
/* Do nothing if msg_type() == CONN_PROBE_REPLY */
exit:
- kfree_skb(buf);
- return TIPC_OK;
+ kfree_skb(*skb);
+ *skb = NULL;
}
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -872,11 +910,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
+ struct net *net = sock_net(sk);
struct tipc_msg *mhdr = &tsk->phdr;
u32 dnode, dport;
- struct sk_buff_head head;
+ struct sk_buff_head *pktchain = &sk->sk_write_queue;
struct sk_buff *skb;
struct tipc_name_seq *seq = &dest->addr.nameseq;
+ struct iov_iter save;
u32 mtu;
long timeo;
int rc;
@@ -929,7 +969,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
msg_set_nametype(mhdr, type);
msg_set_nameinst(mhdr, inst);
msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
- dport = tipc_nametbl_translate(type, inst, &dnode);
+ dport = tipc_nametbl_translate(net, type, inst, &dnode);
msg_set_destnode(mhdr, dnode);
msg_set_destport(mhdr, dport);
if (unlikely(!dport && !dnode)) {
@@ -945,31 +985,33 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
}
+ save = m->msg_iter;
new_mtu:
- mtu = tipc_node_get_mtu(dnode, tsk->ref);
- __skb_queue_head_init(&head);
- rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
+ mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+ rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
if (rc < 0)
goto exit;
do {
- skb = skb_peek(&head);
+ skb = skb_peek(pktchain);
TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
- rc = tipc_link_xmit(&head, dnode, tsk->ref);
+ rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
if (likely(rc >= 0)) {
if (sock->state != SS_READY)
sock->state = SS_CONNECTING;
rc = dsz;
break;
}
- if (rc == -EMSGSIZE)
+ if (rc == -EMSGSIZE) {
+ m->msg_iter = save;
goto new_mtu;
+ }
if (rc != -ELINKCONG)
break;
tsk->link_cong = 1;
rc = tipc_wait_for_sndmsg(sock, &timeo);
if (rc)
- __skb_queue_purge(&head);
+ __skb_queue_purge(pktchain);
} while (!rc);
exit:
if (iocb)
@@ -1024,15 +1066,17 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t dsz)
{
struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *mhdr = &tsk->phdr;
- struct sk_buff_head head;
+ struct sk_buff_head *pktchain = &sk->sk_write_queue;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
- u32 ref = tsk->ref;
+ u32 portid = tsk->portid;
int rc = -EINVAL;
long timeo;
u32 dnode;
uint mtu, send, sent = 0;
+ struct iov_iter save;
/* Handle implied connection establishment */
if (unlikely(dest)) {
@@ -1059,15 +1103,15 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
dnode = tsk_peer_node(tsk);
next:
+ save = m->msg_iter;
mtu = tsk->max_pkt;
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
- __skb_queue_head_init(&head);
- rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
+ rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
if (unlikely(rc < 0))
goto exit;
do {
if (likely(!tsk_conn_cong(tsk))) {
- rc = tipc_link_xmit(&head, dnode, ref);
+ rc = tipc_link_xmit(net, pktchain, dnode, portid);
if (likely(!rc)) {
tsk->sent_unacked++;
sent += send;
@@ -1076,7 +1120,9 @@ next:
goto next;
}
if (rc == -EMSGSIZE) {
- tsk->max_pkt = tipc_node_get_mtu(dnode, ref);
+ tsk->max_pkt = tipc_node_get_mtu(net, dnode,
+ portid);
+ m->msg_iter = save;
goto next;
}
if (rc != -ELINKCONG)
@@ -1085,7 +1131,7 @@ next:
}
rc = tipc_wait_for_sndpkt(sock, &timeo);
if (rc)
- __skb_queue_purge(&head);
+ __skb_queue_purge(pktchain);
} while (!rc);
exit:
if (iocb)
@@ -1118,6 +1164,8 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
u32 peer_node)
{
+ struct sock *sk = &tsk->sk;
+ struct net *net = sock_net(sk);
struct tipc_msg *msg = &tsk->phdr;
msg_set_destnode(msg, peer_node);
@@ -1126,12 +1174,12 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
msg_set_lookup_scope(msg, 0);
msg_set_hdr_sz(msg, SHORT_H_SIZE);
- tsk->probing_interval = CONN_PROBING_INTERVAL;
+ tsk->probing_intv = CONN_PROBING_INTERVAL;
tsk->probing_state = TIPC_CONN_OK;
tsk->connected = 1;
- k_start_timer(&tsk->timer, tsk->probing_interval);
- tipc_node_add_conn(peer_node, tsk->ref, peer_port);
- tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
+ tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
+ tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
}
/**
@@ -1230,6 +1278,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
{
+ struct net *net = sock_net(&tsk->sk);
struct sk_buff *skb = NULL;
struct tipc_msg *msg;
u32 peer_port = tsk_peer_port(tsk);
@@ -1237,13 +1286,14 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
if (!tsk->connected)
return;
- skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
- tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
+ skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
+ dnode, tsk_own_node(tsk), peer_port,
+ tsk->portid, TIPC_OK);
if (!skb)
return;
msg = buf_msg(skb);
msg_set_msgcnt(msg, ack);
- tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
+ tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
}
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1529,15 +1579,16 @@ static void tipc_data_ready(struct sock *sk)
/**
* filter_connect - Handle all incoming messages for a connection-based socket
* @tsk: TIPC socket
- * @msg: message
+ * @skb: pointer to message buffer. Set to NULL if buffer is consumed
*
* Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
*/
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
+static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
{
struct sock *sk = &tsk->sk;
+ struct net *net = sock_net(sk);
struct socket *sock = sk->sk_socket;
- struct tipc_msg *msg = buf_msg(*buf);
+ struct tipc_msg *msg = buf_msg(*skb);
int retval = -TIPC_ERR_NO_PORT;
if (msg_mcast(msg))
@@ -1551,8 +1602,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
sock->state = SS_DISCONNECTING;
tsk->connected = 0;
/* let timer expire on it's own */
- tipc_node_remove_conn(tsk_peer_node(tsk),
- tsk->ref);
+ tipc_node_remove_conn(net, tsk_peer_node(tsk),
+ tsk->portid);
}
retval = TIPC_OK;
}
@@ -1587,8 +1638,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
* connect() routine if sleeping.
*/
if (msg_data_sz(msg) == 0) {
- kfree_skb(*buf);
- *buf = NULL;
+ kfree_skb(*skb);
+ *skb = NULL;
if (waitqueue_active(sk_sleep(sk)))
wake_up_interruptible(sk_sleep(sk));
}
@@ -1640,32 +1691,33 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
/**
* filter_rcv - validate incoming message
* @sk: socket
- * @buf: message
+ * @skb: pointer to message. Set to NULL if buffer is consumed.
*
* Enqueues message on receive queue if acceptable; optionally handles
* disconnect indication for a connected socket.
*
- * Called with socket lock already taken; port lock may also be taken.
+ * Called with socket lock already taken
*
- * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message
- * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
+ * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
*/
-static int filter_rcv(struct sock *sk, struct sk_buff *buf)
+static int filter_rcv(struct sock *sk, struct sk_buff **skb)
{
struct socket *sock = sk->sk_socket;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_msg *msg = buf_msg(buf);
- unsigned int limit = rcvbuf_limit(sk, buf);
- u32 onode;
+ struct tipc_msg *msg = buf_msg(*skb);
+ unsigned int limit = rcvbuf_limit(sk, *skb);
int rc = TIPC_OK;
- if (unlikely(msg_user(msg) == CONN_MANAGER))
- return tipc_sk_proto_rcv(tsk, &onode, buf);
+ if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+ tipc_sk_proto_rcv(tsk, skb);
+ return TIPC_OK;
+ }
if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
- kfree_skb(buf);
+ kfree_skb(*skb);
tsk->link_cong = 0;
sk->sk_write_space(sk);
+ *skb = NULL;
return TIPC_OK;
}
@@ -1677,21 +1729,22 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
if (msg_connected(msg))
return -TIPC_ERR_NO_PORT;
} else {
- rc = filter_connect(tsk, &buf);
- if (rc != TIPC_OK || buf == NULL)
+ rc = filter_connect(tsk, skb);
+ if (rc != TIPC_OK || !*skb)
return rc;
}
/* Reject message if there isn't room to queue it */
- if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
+ if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
return -TIPC_ERR_OVERLOAD;
/* Enqueue message */
- TIPC_SKB_CB(buf)->handle = NULL;
- __skb_queue_tail(&sk->sk_receive_queue, buf);
- skb_set_owner_r(buf, sk);
+ TIPC_SKB_CB(*skb)->handle = NULL;
+ __skb_queue_tail(&sk->sk_receive_queue, *skb);
+ skb_set_owner_r(*skb, sk);
sk->sk_data_ready(sk);
+ *skb = NULL;
return TIPC_OK;
}
@@ -1700,78 +1753,125 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
* @sk: socket
* @skb: message
*
- * Caller must hold socket lock, but not port lock.
+ * Caller must hold socket lock
*
* Returns 0
*/
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
- int rc;
- u32 onode;
+ int err;
+ atomic_t *dcnt;
+ u32 dnode;
struct tipc_sock *tsk = tipc_sk(sk);
+ struct net *net = sock_net(sk);
uint truesize = skb->truesize;
- rc = filter_rcv(sk, skb);
-
- if (likely(!rc)) {
- if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
- atomic_add(truesize, &tsk->dupl_rcvcnt);
+ err = filter_rcv(sk, &skb);
+ if (likely(!skb)) {
+ dcnt = &tsk->dupl_rcvcnt;
+ if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
+ atomic_add(truesize, dcnt);
return 0;
}
+ if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
+ tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+ return 0;
+}
- if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
- return 0;
-
- tipc_link_xmit_skb(skb, onode, 0);
+/**
+ * tipc_sk_enqueue - extract all buffers with destination 'dport' from
+ * inputq and try adding them to socket or backlog queue
+ * @inputq: list of incoming buffers with potentially different destinations
+ * @sk: socket where the buffers should be enqueued
+ * @dport: port number for the socket
+ * @_skb: returned buffer to be forwarded or rejected, if applicable
+ *
+ * Caller must hold socket lock
+ *
+ * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
+ * or -TIPC_ERR_NO_PORT
+ */
+static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+ u32 dport, struct sk_buff **_skb)
+{
+ unsigned int lim;
+ atomic_t *dcnt;
+ int err;
+ struct sk_buff *skb;
+ unsigned long time_limit = jiffies + 2;
- return 0;
+ while (skb_queue_len(inputq)) {
+ if (unlikely(time_after_eq(jiffies, time_limit)))
+ return TIPC_OK;
+ skb = tipc_skb_dequeue(inputq, dport);
+ if (unlikely(!skb))
+ return TIPC_OK;
+ if (!sock_owned_by_user(sk)) {
+ err = filter_rcv(sk, &skb);
+ if (likely(!skb))
+ continue;
+ *_skb = skb;
+ return err;
+ }
+ dcnt = &tipc_sk(sk)->dupl_rcvcnt;
+ if (sk->sk_backlog.len)
+ atomic_set(dcnt, 0);
+ lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
+ if (likely(!sk_add_backlog(sk, skb, lim)))
+ continue;
+ *_skb = skb;
+ return -TIPC_ERR_OVERLOAD;
+ }
+ return TIPC_OK;
}
/**
- * tipc_sk_rcv - handle incoming message
- * @skb: buffer containing arriving message
- * Consumes buffer
- * Returns 0 if success, or errno: -EHOSTUNREACH
+ * tipc_sk_rcv - handle a chain of incoming buffers
+ * @inputq: buffer list containing the buffers
+ * Consumes all buffers in list until inputq is empty
+ * Note: may be called in multiple threads referring to the same queue
+ * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
+ * Only node local calls check the return value, sending single-buffer queues
*/
-int tipc_sk_rcv(struct sk_buff *skb)
+int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
+ u32 dnode, dport = 0;
+ int err = -TIPC_ERR_NO_PORT;
+ struct sk_buff *skb;
struct tipc_sock *tsk;
+ struct tipc_net *tn;
struct sock *sk;
- u32 dport = msg_destport(buf_msg(skb));
- int rc = TIPC_OK;
- uint limit;
- u32 dnode;
- /* Validate destination and message */
- tsk = tipc_sk_get(dport);
- if (unlikely(!tsk)) {
- rc = tipc_msg_eval(skb, &dnode);
- goto exit;
+ while (skb_queue_len(inputq)) {
+ skb = NULL;
+ dport = tipc_skb_peek_port(inputq, dport);
+ tsk = tipc_sk_lookup(net, dport);
+ if (likely(tsk)) {
+ sk = &tsk->sk;
+ if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
+ err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+ spin_unlock_bh(&sk->sk_lock.slock);
+ dport = 0;
+ }
+ sock_put(sk);
+ } else {
+ skb = tipc_skb_dequeue(inputq, dport);
+ }
+ if (likely(!skb))
+ continue;
+ if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
+ goto xmit;
+ if (!err) {
+ dnode = msg_destnode(buf_msg(skb));
+ goto xmit;
+ }
+ tn = net_generic(net, tipc_net_id);
+ if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+ continue;
+xmit:
+ tipc_link_xmit_skb(net, skb, dnode, dport);
}
- sk = &tsk->sk;
-
- /* Queue message */
- spin_lock_bh(&sk->sk_lock.slock);
-
- if (!sock_owned_by_user(sk)) {
- rc = filter_rcv(sk, skb);
- } else {
- if (sk->sk_backlog.len == 0)
- atomic_set(&tsk->dupl_rcvcnt, 0);
- limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
- if (sk_add_backlog(sk, skb, limit))
- rc = -TIPC_ERR_OVERLOAD;
- }
- spin_unlock_bh(&sk->sk_lock.slock);
- tipc_sk_put(tsk);
- if (likely(!rc))
- return 0;
-exit:
- if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
- return -EHOSTUNREACH;
-
- tipc_link_xmit_skb(skb, dnode, 0);
- return (rc < 0) ? -EHOSTUNREACH : 0;
+ return err ? -EHOSTUNREACH : 0;
}
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2027,6 +2127,7 @@ exit:
static int tipc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct sk_buff *skb;
u32 dnode;
@@ -2049,21 +2150,24 @@ restart:
kfree_skb(skb);
goto restart;
}
- if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
- tipc_link_xmit_skb(skb, dnode, tsk->ref);
- tipc_node_remove_conn(dnode, tsk->ref);
+ if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
+ TIPC_CONN_SHUTDOWN))
+ tipc_link_xmit_skb(net, skb, dnode,
+ tsk->portid);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
} else {
dnode = tsk_peer_node(tsk);
+
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE,
- 0, dnode, tipc_own_addr,
+ 0, dnode, tsk_own_node(tsk),
tsk_peer_port(tsk),
- tsk->ref, TIPC_CONN_SHUTDOWN);
- tipc_link_xmit_skb(skb, dnode, tsk->ref);
+ tsk->portid, TIPC_CONN_SHUTDOWN);
+ tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
}
tsk->connected = 0;
sock->state = SS_DISCONNECTING;
- tipc_node_remove_conn(dnode, tsk->ref);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
/* fall through */
case SS_DISCONNECTING:
@@ -2084,18 +2188,14 @@ restart:
return res;
}
-static void tipc_sk_timeout(unsigned long ref)
+static void tipc_sk_timeout(unsigned long data)
{
- struct tipc_sock *tsk;
- struct sock *sk;
+ struct tipc_sock *tsk = (struct tipc_sock *)data;
+ struct sock *sk = &tsk->sk;
struct sk_buff *skb = NULL;
u32 peer_port, peer_node;
+ u32 own_node = tsk_own_node(tsk);
- tsk = tipc_sk_get(ref);
- if (!tsk)
- return;
-
- sk = &tsk->sk;
bh_lock_sock(sk);
if (!tsk->connected) {
bh_unlock_sock(sk);
@@ -2106,38 +2206,39 @@ static void tipc_sk_timeout(unsigned long ref)
if (tsk->probing_state == TIPC_CONN_PROBING) {
/* Previous probe not answered -> self abort */
- skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
- SHORT_H_SIZE, 0, tipc_own_addr,
- peer_node, ref, peer_port,
- TIPC_ERR_NO_PORT);
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+ TIPC_CONN_MSG, SHORT_H_SIZE, 0,
+ own_node, peer_node, tsk->portid,
+ peer_port, TIPC_ERR_NO_PORT);
} else {
- skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
- 0, peer_node, tipc_own_addr,
- peer_port, ref, TIPC_OK);
+ skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
+ INT_H_SIZE, 0, peer_node, own_node,
+ peer_port, tsk->portid, TIPC_OK);
tsk->probing_state = TIPC_CONN_PROBING;
- k_start_timer(&tsk->timer, tsk->probing_interval);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
}
bh_unlock_sock(sk);
if (skb)
- tipc_link_xmit_skb(skb, peer_node, ref);
+ tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
exit:
- tipc_sk_put(tsk);
+ sock_put(sk);
}
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
struct tipc_name_seq const *seq)
{
+ struct net *net = sock_net(&tsk->sk);
struct publication *publ;
u32 key;
if (tsk->connected)
return -EINVAL;
- key = tsk->ref + tsk->pub_count + 1;
- if (key == tsk->ref)
+ key = tsk->portid + tsk->pub_count + 1;
+ if (key == tsk->portid)
return -EADDRINUSE;
- publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
- scope, tsk->ref, key);
+ publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
+ scope, tsk->portid, key);
if (unlikely(!publ))
return -EINVAL;
@@ -2150,6 +2251,7 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
struct tipc_name_seq const *seq)
{
+ struct net *net = sock_net(&tsk->sk);
struct publication *publ;
struct publication *safe;
int rc = -EINVAL;
@@ -2164,12 +2266,12 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
continue;
if (publ->upper != seq->upper)
break;
- tipc_nametbl_withdraw(publ->type, publ->lower,
+ tipc_nametbl_withdraw(net, publ->type, publ->lower,
publ->ref, publ->key);
rc = 0;
break;
}
- tipc_nametbl_withdraw(publ->type, publ->lower,
+ tipc_nametbl_withdraw(net, publ->type, publ->lower,
publ->ref, publ->key);
rc = 0;
}
@@ -2178,336 +2280,105 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
return rc;
}
-static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
- int len, int full_id)
-{
- struct publication *publ;
- int ret;
-
- if (full_id)
- ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
- tipc_zone(tipc_own_addr),
- tipc_cluster(tipc_own_addr),
- tipc_node(tipc_own_addr), tsk->ref);
- else
- ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref);
-
- if (tsk->connected) {
- u32 dport = tsk_peer_port(tsk);
- u32 destnode = tsk_peer_node(tsk);
-
- ret += tipc_snprintf(buf + ret, len - ret,
- " connected to <%u.%u.%u:%u>",
- tipc_zone(destnode),
- tipc_cluster(destnode),
- tipc_node(destnode), dport);
- if (tsk->conn_type != 0)
- ret += tipc_snprintf(buf + ret, len - ret,
- " via {%u,%u}", tsk->conn_type,
- tsk->conn_instance);
- } else if (tsk->published) {
- ret += tipc_snprintf(buf + ret, len - ret, " bound to");
- list_for_each_entry(publ, &tsk->publications, pport_list) {
- if (publ->lower == publ->upper)
- ret += tipc_snprintf(buf + ret, len - ret,
- " {%u,%u}", publ->type,
- publ->lower);
- else
- ret += tipc_snprintf(buf + ret, len - ret,
- " {%u,%u,%u}", publ->type,
- publ->lower, publ->upper);
- }
- }
- ret += tipc_snprintf(buf + ret, len - ret, "\n");
- return ret;
-}
-
-struct sk_buff *tipc_sk_socks_show(void)
-{
- struct sk_buff *buf;
- struct tlv_desc *rep_tlv;
- char *pb;
- int pb_len;
- struct tipc_sock *tsk;
- int str_len = 0;
- u32 ref = 0;
-
- buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
- if (!buf)
- return NULL;
- rep_tlv = (struct tlv_desc *)buf->data;
- pb = TLV_DATA(rep_tlv);
- pb_len = ULTRA_STRING_MAX_LEN;
-
- tsk = tipc_sk_get_next(&ref);
- for (; tsk; tsk = tipc_sk_get_next(&ref)) {
- lock_sock(&tsk->sk);
- str_len += tipc_sk_show(tsk, pb + str_len,
- pb_len - str_len, 0);
- release_sock(&tsk->sk);
- tipc_sk_put(tsk);
- }
- str_len += 1; /* for "\0" */
- skb_put(buf, TLV_SPACE(str_len));
- TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
-
- return buf;
-}
-
/* tipc_sk_reinit: set non-zero address in all existing sockets
* when we go from standalone to network mode.
*/
-void tipc_sk_reinit(void)
+void tipc_sk_reinit(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ const struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct tipc_sock *tsk;
struct tipc_msg *msg;
- u32 ref = 0;
- struct tipc_sock *tsk = tipc_sk_get_next(&ref);
+ int i;
- for (; tsk; tsk = tipc_sk_get_next(&ref)) {
- lock_sock(&tsk->sk);
- msg = &tsk->phdr;
- msg_set_prevnode(msg, tipc_own_addr);
- msg_set_orignode(msg, tipc_own_addr);
- release_sock(&tsk->sk);
- tipc_sk_put(tsk);
+ rcu_read_lock();
+ tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
+ for (i = 0; i < tbl->size; i++) {
+ rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+ spin_lock_bh(&tsk->sk.sk_lock.slock);
+ msg = &tsk->phdr;
+ msg_set_prevnode(msg, tn->own_addr);
+ msg_set_orignode(msg, tn->own_addr);
+ spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ }
}
+ rcu_read_unlock();
}
-/**
- * struct reference - TIPC socket reference entry
- * @tsk: pointer to socket associated with reference entry
- * @ref: reference value for socket (combines instance & array index info)
- */
-struct reference {
- struct tipc_sock *tsk;
- u32 ref;
-};
-
-/**
- * struct tipc_ref_table - table of TIPC socket reference entries
- * @entries: pointer to array of reference entries
- * @capacity: array index of first unusable entry
- * @init_point: array index of first uninitialized entry
- * @first_free: array index of first unused socket reference entry
- * @last_free: array index of last unused socket reference entry
- * @index_mask: bitmask for array index portion of reference values
- * @start_mask: initial value for instance value portion of reference values
- */
-struct ref_table {
- struct reference *entries;
- u32 capacity;
- u32 init_point;
- u32 first_free;
- u32 last_free;
- u32 index_mask;
- u32 start_mask;
-};
-
-/* Socket reference table consists of 2**N entries.
- *
- * State Socket ptr Reference
- * ----- ---------- ---------
- * In use non-NULL XXXX|own index
- * (XXXX changes each time entry is acquired)
- * Free NULL YYYY|next free index
- * (YYYY is one more than last used XXXX)
- * Uninitialized NULL 0
- *
- * Entry 0 is not used; this allows index 0 to denote the end of the free list.
- *
- * Note that a reference value of 0 does not necessarily indicate that an
- * entry is uninitialized, since the last entry in the free list could also
- * have a reference value of 0 (although this is unlikely).
- */
-
-static struct ref_table tipc_ref_table;
-
-static DEFINE_RWLOCK(ref_table_lock);
-
-/**
- * tipc_ref_table_init - create reference table for sockets
- */
-int tipc_sk_ref_table_init(u32 req_sz, u32 start)
+static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
{
- struct reference *table;
- u32 actual_sz;
-
- /* account for unused entry, then round up size to a power of 2 */
-
- req_sz++;
- for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) {
- /* do nothing */
- };
-
- /* allocate table & mark all entries as uninitialized */
- table = vzalloc(actual_sz * sizeof(struct reference));
- if (table == NULL)
- return -ENOMEM;
-
- tipc_ref_table.entries = table;
- tipc_ref_table.capacity = req_sz;
- tipc_ref_table.init_point = 1;
- tipc_ref_table.first_free = 0;
- tipc_ref_table.last_free = 0;
- tipc_ref_table.index_mask = actual_sz - 1;
- tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_sock *tsk;
- return 0;
-}
+ rcu_read_lock();
+ tsk = rhashtable_lookup(&tn->sk_rht, &portid);
+ if (tsk)
+ sock_hold(&tsk->sk);
+ rcu_read_unlock();
-/**
- * tipc_ref_table_stop - destroy reference table for sockets
- */
-void tipc_sk_ref_table_stop(void)
-{
- if (!tipc_ref_table.entries)
- return;
- vfree(tipc_ref_table.entries);
- tipc_ref_table.entries = NULL;
+ return tsk;
}
-/* tipc_ref_acquire - create reference to a socket
- *
- * Register an socket pointer in the reference table.
- * Returns a unique reference value that is used from then on to retrieve the
- * socket pointer, or to determine if the socket has been deregistered.
- */
-u32 tipc_sk_ref_acquire(struct tipc_sock *tsk)
+static int tipc_sk_insert(struct tipc_sock *tsk)
{
- u32 index;
- u32 index_mask;
- u32 next_plus_upper;
- u32 ref = 0;
- struct reference *entry;
-
- if (unlikely(!tsk)) {
- pr_err("Attempt to acquire ref. to non-existent obj\n");
- return 0;
- }
- if (unlikely(!tipc_ref_table.entries)) {
- pr_err("Ref. table not found in acquisition attempt\n");
- return 0;
- }
-
- /* Take a free entry, if available; otherwise initialize a new one */
- write_lock_bh(&ref_table_lock);
- index = tipc_ref_table.first_free;
- entry = &tipc_ref_table.entries[index];
-
- if (likely(index)) {
- index = tipc_ref_table.first_free;
- entry = &tipc_ref_table.entries[index];
- index_mask = tipc_ref_table.index_mask;
- next_plus_upper = entry->ref;
- tipc_ref_table.first_free = next_plus_upper & index_mask;
- ref = (next_plus_upper & ~index_mask) + index;
- entry->tsk = tsk;
- } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
- index = tipc_ref_table.init_point++;
- entry = &tipc_ref_table.entries[index];
- ref = tipc_ref_table.start_mask + index;
+ struct sock *sk = &tsk->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
+ u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
+
+ while (remaining--) {
+ portid++;
+ if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
+ portid = TIPC_MIN_PORT;
+ tsk->portid = portid;
+ sock_hold(&tsk->sk);
+ if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
+ return 0;
+ sock_put(&tsk->sk);
}
- if (ref) {
- entry->ref = ref;
- entry->tsk = tsk;
- }
- write_unlock_bh(&ref_table_lock);
- return ref;
+ return -1;
}
-/* tipc_sk_ref_discard - invalidate reference to an socket
- *
- * Disallow future references to an socket and free up the entry for re-use.
- */
-void tipc_sk_ref_discard(u32 ref)
+static void tipc_sk_remove(struct tipc_sock *tsk)
{
- struct reference *entry;
- u32 index;
- u32 index_mask;
-
- if (unlikely(!tipc_ref_table.entries)) {
- pr_err("Ref. table not found during discard attempt\n");
- return;
- }
-
- index_mask = tipc_ref_table.index_mask;
- index = ref & index_mask;
- entry = &tipc_ref_table.entries[index];
-
- write_lock_bh(&ref_table_lock);
+ struct sock *sk = &tsk->sk;
+ struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
- if (unlikely(!entry->tsk)) {
- pr_err("Attempt to discard ref. to non-existent socket\n");
- goto exit;
+ if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
+ WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+ __sock_put(sk);
}
- if (unlikely(entry->ref != ref)) {
- pr_err("Attempt to discard non-existent reference\n");
- goto exit;
- }
-
- /* Mark entry as unused; increment instance part of entry's
- * reference to invalidate any subsequent references
- */
-
- entry->tsk = NULL;
- entry->ref = (ref & ~index_mask) + (index_mask + 1);
-
- /* Append entry to free entry list */
- if (unlikely(tipc_ref_table.first_free == 0))
- tipc_ref_table.first_free = index;
- else
- tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
- tipc_ref_table.last_free = index;
-exit:
- write_unlock_bh(&ref_table_lock);
}
-/* tipc_sk_get - find referenced socket and return pointer to it
- */
-struct tipc_sock *tipc_sk_get(u32 ref)
+int tipc_sk_rht_init(struct net *net)
{
- struct reference *entry;
- struct tipc_sock *tsk;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct rhashtable_params rht_params = {
+ .nelem_hint = 192,
+ .head_offset = offsetof(struct tipc_sock, node),
+ .key_offset = offsetof(struct tipc_sock, portid),
+ .key_len = sizeof(u32), /* portid */
+ .hashfn = jhash,
+ .max_shift = 20, /* 1M */
+ .min_shift = 8, /* 256 */
+ .grow_decision = rht_grow_above_75,
+ .shrink_decision = rht_shrink_below_30,
+ };
- if (unlikely(!tipc_ref_table.entries))
- return NULL;
- read_lock_bh(&ref_table_lock);
- entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
- tsk = entry->tsk;
- if (likely(tsk && (entry->ref == ref)))
- sock_hold(&tsk->sk);
- else
- tsk = NULL;
- read_unlock_bh(&ref_table_lock);
- return tsk;
+ return rhashtable_init(&tn->sk_rht, &rht_params);
}
-/* tipc_sk_get_next - lock & return next socket after referenced one
-*/
-struct tipc_sock *tipc_sk_get_next(u32 *ref)
+void tipc_sk_rht_destroy(struct net *net)
{
- struct reference *entry;
- struct tipc_sock *tsk = NULL;
- uint index = *ref & tipc_ref_table.index_mask;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
- read_lock_bh(&ref_table_lock);
- while (++index < tipc_ref_table.capacity) {
- entry = &tipc_ref_table.entries[index];
- if (!entry->tsk)
- continue;
- tsk = entry->tsk;
- sock_hold(&tsk->sk);
- *ref = entry->ref;
- break;
- }
- read_unlock_bh(&ref_table_lock);
- return tsk;
-}
+ /* Wait for socket readers to complete */
+ synchronize_net();
-static void tipc_sk_put(struct tipc_sock *tsk)
-{
- sock_put(&tsk->sk);
+ rhashtable_destroy(&tn->sk_rht);
}
/**
@@ -2639,8 +2510,9 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
return put_user(sizeof(value), ol);
}
-static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
+static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
+ struct sock *sk = sock->sk;
struct tipc_sioc_ln_req lnr;
void __user *argp = (void __user *)arg;
@@ -2648,7 +2520,8 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
case SIOCGETLINKNAME:
if (copy_from_user(&lnr, argp, sizeof(lnr)))
return -EFAULT;
- if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer,
+ if (!tipc_node_get_linkname(sock_net(sk),
+ lnr.bearer_id & 0xffff, lnr.peer,
lnr.linkname, TIPC_MAX_LINK_NAME)) {
if (copy_to_user(argp, &lnr, sizeof(lnr)))
return -EFAULT;
@@ -2820,18 +2693,20 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
int err;
void *hdr;
struct nlattr *attrs;
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
+ &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
if (!hdr)
goto msg_cancel;
attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
if (!attrs)
goto genlmsg_cancel;
- if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref))
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
goto attr_msg_cancel;
- if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
goto attr_msg_cancel;
if (tsk->connected) {
@@ -2859,22 +2734,37 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
struct tipc_sock *tsk;
- u32 prev_ref = cb->args[0];
- u32 ref = prev_ref;
-
- tsk = tipc_sk_get_next(&ref);
- for (; tsk; tsk = tipc_sk_get_next(&ref)) {
- lock_sock(&tsk->sk);
- err = __tipc_nl_add_sk(skb, cb, tsk);
- release_sock(&tsk->sk);
- tipc_sk_put(tsk);
- if (err)
- break;
+ const struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ u32 tbl_id = cb->args[0];
+ u32 prev_portid = cb->args[1];
- prev_ref = ref;
- }
+ rcu_read_lock();
+ tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
+ for (; tbl_id < tbl->size; tbl_id++) {
+ rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
+ spin_lock_bh(&tsk->sk.sk_lock.slock);
+ if (prev_portid && prev_portid != tsk->portid) {
+ spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ continue;
+ }
- cb->args[0] = prev_ref;
+ err = __tipc_nl_add_sk(skb, cb, tsk);
+ if (err) {
+ prev_portid = tsk->portid;
+ spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ goto out;
+ }
+ prev_portid = 0;
+ spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ }
+ }
+out:
+ rcu_read_unlock();
+ cb->args[0] = tbl_id;
+ cb->args[1] = prev_portid;
return skb->len;
}
@@ -2888,7 +2778,7 @@ static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
struct nlattr *attrs;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
+ &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
if (!hdr)
goto msg_cancel;
@@ -2962,12 +2852,13 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
- u32 tsk_ref = cb->args[0];
+ u32 tsk_portid = cb->args[0];
u32 last_publ = cb->args[1];
u32 done = cb->args[2];
+ struct net *net = sock_net(skb->sk);
struct tipc_sock *tsk;
- if (!tsk_ref) {
+ if (!tsk_portid) {
struct nlattr **attrs;
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
@@ -2984,13 +2875,13 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!sock[TIPC_NLA_SOCK_REF])
return -EINVAL;
- tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
+ tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
}
if (done)
return 0;
- tsk = tipc_sk_get(tsk_ref);
+ tsk = tipc_sk_lookup(net, tsk_portid);
if (!tsk)
return -EINVAL;
@@ -2999,9 +2890,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!err)
done = 1;
release_sock(&tsk->sk);
- tipc_sk_put(tsk);
+ sock_put(&tsk->sk);
- cb->args[0] = tsk_ref;
+ cb->args[0] = tsk_portid;
cb->args[1] = last_publ;
cb->args[2] = done;
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index d34089387006..238f1b7bd9bd 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -1,6 +1,6 @@
/* net/tipc/socket.h: Include file for TIPC socket code
*
- * Copyright (c) 2014, Ericsson AB
+ * Copyright (c) 2014-2015, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,18 @@
#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
-int tipc_sk_rcv(struct sk_buff *buf);
-struct sk_buff *tipc_sk_socks_show(void);
-void tipc_sk_mcast_rcv(struct sk_buff *buf);
-void tipc_sk_reinit(void);
-int tipc_sk_ref_table_init(u32 requested_size, u32 start);
-void tipc_sk_ref_table_stop(void);
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+int tipc_sock_create_local(struct net *net, int type, struct socket **res);
+void tipc_sock_release_local(struct socket *sock);
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+ int flags);
+int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+ struct sk_buff_head *inputq);
+void tipc_sk_reinit(struct net *net);
+int tipc_sk_rht_init(struct net *net);
+void tipc_sk_rht_destroy(struct net *net);
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0344206b984f..72c339e432aa 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -50,33 +50,6 @@ struct tipc_subscriber {
struct list_head subscription_list;
};
-static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
- void *usr_data, void *buf, size_t len);
-static void *subscr_named_msg_event(int conid);
-static void subscr_conn_shutdown_event(int conid, void *usr_data);
-
-static atomic_t subscription_count = ATOMIC_INIT(0);
-
-static struct sockaddr_tipc topsrv_addr __read_mostly = {
- .family = AF_TIPC,
- .addrtype = TIPC_ADDR_NAMESEQ,
- .addr.nameseq.type = TIPC_TOP_SRV,
- .addr.nameseq.lower = TIPC_TOP_SRV,
- .addr.nameseq.upper = TIPC_TOP_SRV,
- .scope = TIPC_NODE_SCOPE
-};
-
-static struct tipc_server topsrv __read_mostly = {
- .saddr = &topsrv_addr,
- .imp = TIPC_CRITICAL_IMPORTANCE,
- .type = SOCK_SEQPACKET,
- .max_rcvbuf_size = sizeof(struct tipc_subscr),
- .name = "topology_server",
- .tipc_conn_recvmsg = subscr_conn_msg_event,
- .tipc_conn_new = subscr_named_msg_event,
- .tipc_conn_shutdown = subscr_conn_shutdown_event,
-};
-
/**
* htohl - convert value to endianness used by destination
* @in: value to convert
@@ -93,6 +66,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
u32 found_upper, u32 event, u32 port_ref,
u32 node)
{
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
struct tipc_subscriber *subscriber = sub->subscriber;
struct kvec msg_sect;
@@ -103,8 +77,8 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
sub->evt.found_upper = htohl(found_upper, sub->swap);
sub->evt.port.ref = htohl(port_ref, sub->swap);
sub->evt.port.node = htohl(node, sub->swap);
- tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
- msg_sect.iov_len);
+ tipc_conn_sendmsg(tn->topsrv, subscriber->conid, NULL,
+ msg_sect.iov_base, msg_sect.iov_len);
}
/**
@@ -141,9 +115,11 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
}
-static void subscr_timeout(struct tipc_subscription *sub)
+static void subscr_timeout(unsigned long data)
{
+ struct tipc_subscription *sub = (struct tipc_subscription *)data;
struct tipc_subscriber *subscriber = sub->subscriber;
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
/* The spin lock per subscriber is used to protect its members */
spin_lock_bh(&subscriber->lock);
@@ -167,9 +143,8 @@ static void subscr_timeout(struct tipc_subscription *sub)
TIPC_SUBSCR_TIMEOUT, 0, 0);
/* Now destroy subscription */
- k_term_timer(&sub->timer);
kfree(sub);
- atomic_dec(&subscription_count);
+ atomic_dec(&tn->subscription_count);
}
/**
@@ -179,10 +154,12 @@ static void subscr_timeout(struct tipc_subscription *sub)
*/
static void subscr_del(struct tipc_subscription *sub)
{
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+
tipc_nametbl_unsubscribe(sub);
list_del(&sub->subscription_list);
kfree(sub);
- atomic_dec(&subscription_count);
+ atomic_dec(&tn->subscription_count);
}
/**
@@ -190,9 +167,12 @@ static void subscr_del(struct tipc_subscription *sub)
*
* Note: Must call it in process context since it might sleep.
*/
-static void subscr_terminate(struct tipc_subscriber *subscriber)
+static void subscr_terminate(struct tipc_subscription *sub)
{
- tipc_conn_terminate(&topsrv, subscriber->conid);
+ struct tipc_subscriber *subscriber = sub->subscriber;
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+
+ tipc_conn_terminate(tn->topsrv, subscriber->conid);
}
static void subscr_release(struct tipc_subscriber *subscriber)
@@ -207,8 +187,7 @@ static void subscr_release(struct tipc_subscriber *subscriber)
subscription_list) {
if (sub->timeout != TIPC_WAIT_FOREVER) {
spin_unlock_bh(&subscriber->lock);
- k_cancel_timer(&sub->timer);
- k_term_timer(&sub->timer);
+ del_timer_sync(&sub->timer);
spin_lock_bh(&subscriber->lock);
}
subscr_del(sub);
@@ -250,8 +229,7 @@ static void subscr_cancel(struct tipc_subscr *s,
if (sub->timeout != TIPC_WAIT_FOREVER) {
sub->timeout = TIPC_WAIT_FOREVER;
spin_unlock_bh(&subscriber->lock);
- k_cancel_timer(&sub->timer);
- k_term_timer(&sub->timer);
+ del_timer_sync(&sub->timer);
spin_lock_bh(&subscriber->lock);
}
subscr_del(sub);
@@ -262,9 +240,11 @@ static void subscr_cancel(struct tipc_subscr *s,
*
* Called with subscriber lock held.
*/
-static int subscr_subscribe(struct tipc_subscr *s,
+static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
struct tipc_subscriber *subscriber,
- struct tipc_subscription **sub_p) {
+ struct tipc_subscription **sub_p)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_subscription *sub;
int swap;
@@ -279,7 +259,7 @@ static int subscr_subscribe(struct tipc_subscr *s,
}
/* Refuse subscription if global limit exceeded */
- if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
+ if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
pr_warn("Subscription rejected, limit reached (%u)\n",
TIPC_MAX_SUBSCRIPTIONS);
return -EINVAL;
@@ -293,10 +273,11 @@ static int subscr_subscribe(struct tipc_subscr *s,
}
/* Initialize subscription object */
+ sub->net = net;
sub->seq.type = htohl(s->seq.type, swap);
sub->seq.lower = htohl(s->seq.lower, swap);
sub->seq.upper = htohl(s->seq.upper, swap);
- sub->timeout = htohl(s->timeout, swap);
+ sub->timeout = msecs_to_jiffies(htohl(s->timeout, swap));
sub->filter = htohl(s->filter, swap);
if ((!(sub->filter & TIPC_SUB_PORTS) ==
!(sub->filter & TIPC_SUB_SERVICE)) ||
@@ -309,11 +290,10 @@ static int subscr_subscribe(struct tipc_subscr *s,
sub->subscriber = subscriber;
sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
- atomic_inc(&subscription_count);
+ atomic_inc(&tn->subscription_count);
if (sub->timeout != TIPC_WAIT_FOREVER) {
- k_init_timer(&sub->timer,
- (Handler)subscr_timeout, (unsigned long)sub);
- k_start_timer(&sub->timer, sub->timeout);
+ setup_timer(&sub->timer, subscr_timeout, (unsigned long)sub);
+ mod_timer(&sub->timer, jiffies + sub->timeout);
}
*sub_p = sub;
return 0;
@@ -326,16 +306,18 @@ static void subscr_conn_shutdown_event(int conid, void *usr_data)
}
/* Handle one request to create a new subscription for the subscriber */
-static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
- void *usr_data, void *buf, size_t len)
+static void subscr_conn_msg_event(struct net *net, int conid,
+ struct sockaddr_tipc *addr, void *usr_data,
+ void *buf, size_t len)
{
struct tipc_subscriber *subscriber = usr_data;
struct tipc_subscription *sub = NULL;
spin_lock_bh(&subscriber->lock);
- if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
+ if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber,
+ &sub) < 0) {
spin_unlock_bh(&subscriber->lock);
- subscr_terminate(subscriber);
+ subscr_terminate(sub);
return;
}
if (sub)
@@ -343,7 +325,6 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
spin_unlock_bh(&subscriber->lock);
}
-
/* Handle one request to establish a new subscriber */
static void *subscr_named_msg_event(int conid)
{
@@ -362,12 +343,50 @@ static void *subscr_named_msg_event(int conid)
return (void *)subscriber;
}
-int tipc_subscr_start(void)
+int tipc_subscr_start(struct net *net)
{
- return tipc_server_start(&topsrv);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ const char name[] = "topology_server";
+ struct tipc_server *topsrv;
+ struct sockaddr_tipc *saddr;
+
+ saddr = kzalloc(sizeof(*saddr), GFP_ATOMIC);
+ if (!saddr)
+ return -ENOMEM;
+ saddr->family = AF_TIPC;
+ saddr->addrtype = TIPC_ADDR_NAMESEQ;
+ saddr->addr.nameseq.type = TIPC_TOP_SRV;
+ saddr->addr.nameseq.lower = TIPC_TOP_SRV;
+ saddr->addr.nameseq.upper = TIPC_TOP_SRV;
+ saddr->scope = TIPC_NODE_SCOPE;
+
+ topsrv = kzalloc(sizeof(*topsrv), GFP_ATOMIC);
+ if (!topsrv) {
+ kfree(saddr);
+ return -ENOMEM;
+ }
+ topsrv->net = net;
+ topsrv->saddr = saddr;
+ topsrv->imp = TIPC_CRITICAL_IMPORTANCE;
+ topsrv->type = SOCK_SEQPACKET;
+ topsrv->max_rcvbuf_size = sizeof(struct tipc_subscr);
+ topsrv->tipc_conn_recvmsg = subscr_conn_msg_event;
+ topsrv->tipc_conn_new = subscr_named_msg_event;
+ topsrv->tipc_conn_shutdown = subscr_conn_shutdown_event;
+
+ strncpy(topsrv->name, name, strlen(name) + 1);
+ tn->topsrv = topsrv;
+ atomic_set(&tn->subscription_count, 0);
+
+ return tipc_server_start(topsrv);
}
-void tipc_subscr_stop(void)
+void tipc_subscr_stop(struct net *net)
{
- tipc_server_stop(&topsrv);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_server *topsrv = tn->topsrv;
+
+ tipc_server_stop(topsrv);
+ kfree(topsrv->saddr);
+ kfree(topsrv);
}
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 393e417bee3f..33488bd9fe3c 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -39,6 +39,9 @@
#include "server.h"
+#define TIPC_MAX_SUBSCRIPTIONS 65535
+#define TIPC_MAX_PUBLICATIONS 65535
+
struct tipc_subscription;
struct tipc_subscriber;
@@ -46,6 +49,7 @@ struct tipc_subscriber;
* struct tipc_subscription - TIPC network topology subscription object
* @subscriber: pointer to its subscriber
* @seq: name sequence associated with subscription
+ * @net: point to network namespace
* @timeout: duration of subscription (in ms)
* @filter: event filtering to be done for subscription
* @timer: timer governing subscription duration (optional)
@@ -58,7 +62,8 @@ struct tipc_subscriber;
struct tipc_subscription {
struct tipc_subscriber *subscriber;
struct tipc_name_seq seq;
- u32 timeout;
+ struct net *net;
+ unsigned long timeout;
u32 filter;
struct timer_list timer;
struct list_head nameseq_list;
@@ -69,13 +74,10 @@ struct tipc_subscription {
int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
u32 found_upper);
-
void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
u32 found_upper, u32 event, u32 port_ref,
u32 node, int must);
-
-int tipc_subscr_start(void);
-
-void tipc_subscr_stop(void);
+int tipc_subscr_start(struct net *net);
+void tipc_subscr_stop(struct net *net);
#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 8e1b10274b02..526b6edab018 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1445,7 +1445,6 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
- struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
@@ -1456,14 +1455,12 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
unsigned int hash;
struct sk_buff *skb;
long timeo;
- struct scm_cookie tmp_scm;
+ struct scm_cookie scm;
int max_level;
int data_len = 0;
- if (NULL == siocb->scm)
- siocb->scm = &tmp_scm;
wait_for_unix_gc();
- err = scm_send(sock, msg, siocb->scm, false);
+ err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
@@ -1507,11 +1504,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (skb == NULL)
goto out;
- err = unix_scm_to_skb(siocb->scm, skb, true);
+ err = unix_scm_to_skb(&scm, skb, true);
if (err < 0)
goto out_free;
max_level = err + 1;
- unix_get_secdata(siocb->scm, skb);
+ unix_get_secdata(&scm, skb);
skb_put(skb, len - data_len);
skb->data_len = data_len;
@@ -1606,7 +1603,7 @@ restart:
unix_state_unlock(other);
other->sk_data_ready(other);
sock_put(other);
- scm_destroy(siocb->scm);
+ scm_destroy(&scm);
return len;
out_unlock:
@@ -1616,7 +1613,7 @@ out_free:
out:
if (other)
sock_put(other);
- scm_destroy(siocb->scm);
+ scm_destroy(&scm);
return err;
}
@@ -1628,21 +1625,18 @@ out:
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
- struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct sock *other = NULL;
int err, size;
struct sk_buff *skb;
int sent = 0;
- struct scm_cookie tmp_scm;
+ struct scm_cookie scm;
bool fds_sent = false;
int max_level;
int data_len;
- if (NULL == siocb->scm)
- siocb->scm = &tmp_scm;
wait_for_unix_gc();
- err = scm_send(sock, msg, siocb->scm, false);
+ err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
@@ -1683,7 +1677,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out_err;
/* Only send the fds in the first buffer */
- err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
+ err = unix_scm_to_skb(&scm, skb, !fds_sent);
if (err < 0) {
kfree_skb(skb);
goto out_err;
@@ -1715,8 +1709,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
sent += size;
}
- scm_destroy(siocb->scm);
- siocb->scm = NULL;
+ scm_destroy(&scm);
return sent;
@@ -1728,8 +1721,7 @@ pipe_err:
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
out_err:
- scm_destroy(siocb->scm);
- siocb->scm = NULL;
+ scm_destroy(&scm);
return sent ? : err;
}
@@ -1778,8 +1770,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
{
- struct sock_iocb *siocb = kiocb_to_siocb(iocb);
- struct scm_cookie tmp_scm;
+ struct scm_cookie scm;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int noblock = flags & MSG_DONTWAIT;
@@ -1831,16 +1822,14 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
if (sock_flag(sk, SOCK_RCVTSTAMP))
__sock_recv_timestamp(msg, sk, skb);
- if (!siocb->scm) {
- siocb->scm = &tmp_scm;
- memset(&tmp_scm, 0, sizeof(tmp_scm));
- }
- scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
- unix_set_secdata(siocb->scm, skb);
+ memset(&scm, 0, sizeof(scm));
+
+ scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
+ unix_set_secdata(&scm, skb);
if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
- unix_detach_fds(siocb->scm, skb);
+ unix_detach_fds(&scm, skb);
sk_peek_offset_bwd(sk, skb->len);
} else {
@@ -1860,11 +1849,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp)
- siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+ scm.fp = scm_fp_dup(UNIXCB(skb).fp);
}
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
- scm_recv(sock, msg, siocb->scm, flags);
+ scm_recv(sock, msg, &scm, flags);
out_free:
skb_free_datagram(sk, skb);
@@ -1915,8 +1904,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
{
- struct sock_iocb *siocb = kiocb_to_siocb(iocb);
- struct scm_cookie tmp_scm;
+ struct scm_cookie scm;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
@@ -1943,10 +1931,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
* while sleeps in memcpy_tomsg
*/
- if (!siocb->scm) {
- siocb->scm = &tmp_scm;
- memset(&tmp_scm, 0, sizeof(tmp_scm));
- }
+ memset(&scm, 0, sizeof(scm));
err = mutex_lock_interruptible(&u->readlock);
if (unlikely(err)) {
@@ -2012,13 +1997,13 @@ again:
if (check_creds) {
/* Never glue messages from different writers */
- if ((UNIXCB(skb).pid != siocb->scm->pid) ||
- !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
- !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
+ if ((UNIXCB(skb).pid != scm.pid) ||
+ !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
+ !gid_eq(UNIXCB(skb).gid, scm.creds.gid))
break;
} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
/* Copy credentials */
- scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
+ scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
check_creds = 1;
}
@@ -2045,7 +2030,7 @@ again:
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp)
- unix_detach_fds(siocb->scm, skb);
+ unix_detach_fds(&scm, skb);
if (unix_skb_len(skb))
break;
@@ -2053,13 +2038,13 @@ again:
skb_unlink(skb, &sk->sk_receive_queue);
consume_skb(skb);
- if (siocb->scm->fp)
+ if (scm.fp)
break;
} else {
/* It is questionable, see note in unix_dgram_recvmsg.
*/
if (UNIXCB(skb).fp)
- siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+ scm.fp = scm_fp_dup(UNIXCB(skb).fp);
sk_peek_offset_fwd(sk, chunk);
@@ -2068,7 +2053,7 @@ again:
} while (size);
mutex_unlock(&u->readlock);
- scm_recv(sock, msg, siocb->scm, flags);
+ scm_recv(sock, msg, &scm, flags);
out:
return copied ? : err;
}
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 86fa0f3b2caf..ef542fbca9fe 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -155,7 +155,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
goto out_nlmsg_trim;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
out_nlmsg_trim:
nlmsg_cancel(skb, nlh);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 02d2e5229240..7f3255084a6c 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1850,8 +1850,7 @@ static ssize_t vmci_transport_stream_enqueue(
struct msghdr *msg,
size_t len)
{
- /* XXX: stripping const */
- return vmci_qpair_enquev(vmci_trans(vsk)->qpair, (struct iovec *)msg->msg_iter.iov, len, 0);
+ return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
}
static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 53dda7728f86..3af0ecf1cc16 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -320,6 +320,20 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work)
rtnl_unlock();
}
+static void cfg80211_sched_scan_stop_wk(struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(work, struct cfg80211_registered_device,
+ sched_scan_stop_wk);
+
+ rtnl_lock();
+
+ __cfg80211_stop_sched_scan(rdev, false);
+
+ rtnl_unlock();
+}
+
/* exported functions */
struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
@@ -406,6 +420,7 @@ use_default_name:
INIT_LIST_HEAD(&rdev->destroy_list);
spin_lock_init(&rdev->destroy_list_lock);
INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk);
+ INIT_WORK(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk);
#ifdef CONFIG_CFG80211_DEFAULT_PS
rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -560,6 +575,14 @@ int wiphy_register(struct wiphy *wiphy)
BIT(NL80211_IFTYPE_MONITOR)))
wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF;
+ if (WARN_ON((wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) &&
+ (wiphy->regulatory_flags &
+ (REGULATORY_CUSTOM_REG |
+ REGULATORY_STRICT_REG |
+ REGULATORY_COUNTRY_IE_FOLLOW_POWER |
+ REGULATORY_COUNTRY_IE_IGNORE))))
+ return -EINVAL;
+
if (WARN_ON(wiphy->coalesce &&
(!wiphy->coalesce->n_rules ||
!wiphy->coalesce->n_patterns) &&
@@ -778,6 +801,7 @@ void wiphy_unregister(struct wiphy *wiphy)
flush_work(&rdev->event_work);
cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
flush_work(&rdev->destroy_work);
+ flush_work(&rdev->sched_scan_stop_wk);
#ifdef CONFIG_PM
if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -858,6 +882,7 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev)
{
struct net_device *dev = wdev->netdev;
+ struct cfg80211_sched_scan_request *sched_scan_req;
ASSERT_RTNL();
ASSERT_WDEV_LOCK(wdev);
@@ -868,7 +893,8 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
- if (rdev->sched_scan_req && dev == rdev->sched_scan_req->dev)
+ sched_scan_req = rtnl_dereference(rdev->sched_scan_req);
+ if (sched_scan_req && dev == sched_scan_req->dev)
__cfg80211_stop_sched_scan(rdev, false);
#ifdef CONFIG_CFG80211_WEXT
@@ -943,6 +969,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev;
+ struct cfg80211_sched_scan_request *sched_scan_req;
if (!wdev)
return NOTIFY_DONE;
@@ -1007,8 +1034,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
___cfg80211_scan_done(rdev, false);
}
- if (WARN_ON(rdev->sched_scan_req &&
- rdev->sched_scan_req->dev == wdev->netdev)) {
+ sched_scan_req = rtnl_dereference(rdev->sched_scan_req);
+ if (WARN_ON(sched_scan_req &&
+ sched_scan_req->dev == wdev->netdev)) {
__cfg80211_stop_sched_scan(rdev, false);
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index faa5b1609aae..801cd49c5a0c 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -36,6 +36,13 @@ struct cfg80211_registered_device {
* the country on the country IE changed. */
char country_ie_alpha2[2];
+ /*
+ * the driver requests the regulatory core to set this regulatory
+ * domain as the wiphy's. Only used for %REGULATORY_WIPHY_SELF_MANAGED
+ * devices using the regulatory_set_wiphy_regd() API
+ */
+ const struct ieee80211_regdomain *requested_regd;
+
/* If a Country IE has been received this tells us the environment
* which its telling us its in. This defaults to ENVIRON_ANY */
enum environment_cap env;
@@ -63,7 +70,7 @@ struct cfg80211_registered_device {
u32 bss_generation;
struct cfg80211_scan_request *scan_req; /* protected by RTNL */
struct sk_buff *scan_msg;
- struct cfg80211_sched_scan_request *sched_scan_req;
+ struct cfg80211_sched_scan_request __rcu *sched_scan_req;
unsigned long suspend_at;
struct work_struct scan_done_wk;
struct work_struct sched_scan_results_wk;
@@ -84,6 +91,8 @@ struct cfg80211_registered_device {
struct list_head destroy_list;
struct work_struct destroy_work;
+ struct work_struct sched_scan_stop_wk;
+
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __aligned(NETDEV_ALIGN);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7ca4b5133123..d78fd8b54515 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -59,13 +59,13 @@ enum nl80211_multicast_groups {
};
static const struct genl_multicast_group nl80211_mcgrps[] = {
- [NL80211_MCGRP_CONFIG] = { .name = "config", },
- [NL80211_MCGRP_SCAN] = { .name = "scan", },
- [NL80211_MCGRP_REGULATORY] = { .name = "regulatory", },
- [NL80211_MCGRP_MLME] = { .name = "mlme", },
- [NL80211_MCGRP_VENDOR] = { .name = "vendor", },
+ [NL80211_MCGRP_CONFIG] = { .name = NL80211_MULTICAST_GROUP_CONFIG },
+ [NL80211_MCGRP_SCAN] = { .name = NL80211_MULTICAST_GROUP_SCAN },
+ [NL80211_MCGRP_REGULATORY] = { .name = NL80211_MULTICAST_GROUP_REG },
+ [NL80211_MCGRP_MLME] = { .name = NL80211_MULTICAST_GROUP_MLME },
+ [NL80211_MCGRP_VENDOR] = { .name = NL80211_MULTICAST_GROUP_VENDOR },
#ifdef CONFIG_NL80211_TESTMODE
- [NL80211_MCGRP_TESTMODE] = { .name = "testmode", }
+ [NL80211_MCGRP_TESTMODE] = { .name = NL80211_MULTICAST_GROUP_TESTMODE }
#endif
};
@@ -396,6 +396,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
[NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
[NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
+ [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
+ [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
+ [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -1087,6 +1090,11 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
return -ENOBUFS;
}
+ if ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_NET_DETECT) &&
+ nla_put_u32(msg, NL80211_WOWLAN_TRIG_NET_DETECT,
+ rdev->wiphy.wowlan->max_nd_match_sets))
+ return -ENOBUFS;
+
if (large && nl80211_send_wowlan_tcp_caps(rdev, msg))
return -ENOBUFS;
@@ -1701,12 +1709,22 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
rdev->wiphy.max_num_csa_counters))
goto nla_put_failure;
+ if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
+ nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG))
+ goto nla_put_failure;
+
+ if (nla_put(msg, NL80211_ATTR_EXT_FEATURES,
+ sizeof(rdev->wiphy.ext_features),
+ rdev->wiphy.ext_features))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
}
finish:
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -2389,7 +2407,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
goto nla_put_failure;
}
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -2854,6 +2873,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->get_key)
return -EOPNOTSUPP;
+ if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+ return -ENOENT;
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -2873,10 +2895,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
goto nla_put_failure;
- if (pairwise && mac_addr &&
- !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
- return -ENOENT;
-
err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
get_key_callback);
@@ -3047,7 +3065,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
wdev_lock(dev->ieee80211_ptr);
err = nl80211_key_allowed(dev->ieee80211_ptr);
- if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
+ if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
!(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
err = -ENOENT;
@@ -3563,6 +3581,7 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
struct nlattr *rate;
u32 bitrate;
u16 bitrate_compat;
+ enum nl80211_attrs rate_flg;
rate = nla_nest_start(msg, attr);
if (!rate)
@@ -3579,12 +3598,36 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat))
return false;
+ switch (info->bw) {
+ case RATE_INFO_BW_5:
+ rate_flg = NL80211_RATE_INFO_5_MHZ_WIDTH;
+ break;
+ case RATE_INFO_BW_10:
+ rate_flg = NL80211_RATE_INFO_10_MHZ_WIDTH;
+ break;
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case RATE_INFO_BW_20:
+ rate_flg = 0;
+ break;
+ case RATE_INFO_BW_40:
+ rate_flg = NL80211_RATE_INFO_40_MHZ_WIDTH;
+ break;
+ case RATE_INFO_BW_80:
+ rate_flg = NL80211_RATE_INFO_80_MHZ_WIDTH;
+ break;
+ case RATE_INFO_BW_160:
+ rate_flg = NL80211_RATE_INFO_160_MHZ_WIDTH;
+ break;
+ }
+
+ if (rate_flg && nla_put_flag(msg, rate_flg))
+ return false;
+
if (info->flags & RATE_INFO_FLAGS_MCS) {
if (nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs))
return false;
- if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH &&
- nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH))
- return false;
if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
return false;
@@ -3593,18 +3636,6 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
return false;
if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_NSS, info->nss))
return false;
- if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH &&
- nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH))
- return false;
- if (info->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH &&
- nla_put_flag(msg, NL80211_RATE_INFO_80_MHZ_WIDTH))
- return false;
- if (info->flags & RATE_INFO_FLAGS_80P80_MHZ_WIDTH &&
- nla_put_flag(msg, NL80211_RATE_INFO_80P80_MHZ_WIDTH))
- return false;
- if (info->flags & RATE_INFO_FLAGS_160_MHZ_WIDTH &&
- nla_put_flag(msg, NL80211_RATE_INFO_160_MHZ_WIDTH))
- return false;
if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
return false;
@@ -3640,8 +3671,8 @@ static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal,
return true;
}
-static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
- int flags,
+static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
+ u32 seq, int flags,
struct cfg80211_registered_device *rdev,
struct net_device *dev,
const u8 *mac_addr, struct station_info *sinfo)
@@ -3649,7 +3680,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
void *hdr;
struct nlattr *sinfoattr, *bss_param;
- hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION);
+ hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -1;
@@ -3661,115 +3692,77 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
if (!sinfoattr)
goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) &&
- nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME,
- sinfo->connected_time))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) &&
- nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
- sinfo->inactive_time))
- goto nla_put_failure;
- if ((sinfo->filled & (STATION_INFO_RX_BYTES |
- STATION_INFO_RX_BYTES64)) &&
+
+#define PUT_SINFO(attr, memb, type) do { \
+ if (sinfo->filled & BIT(NL80211_STA_INFO_ ## attr) && \
+ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \
+ sinfo->memb)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ PUT_SINFO(CONNECTED_TIME, connected_time, u32);
+ PUT_SINFO(INACTIVE_TIME, inactive_time, u32);
+
+ if (sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES) |
+ BIT(NL80211_STA_INFO_RX_BYTES64)) &&
nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
(u32)sinfo->rx_bytes))
goto nla_put_failure;
- if ((sinfo->filled & (STATION_INFO_TX_BYTES |
- STATION_INFO_TX_BYTES64)) &&
+
+ if (sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES) |
+ BIT(NL80211_STA_INFO_TX_BYTES64)) &&
nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
(u32)sinfo->tx_bytes))
goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_RX_BYTES64) &&
- nla_put_u64(msg, NL80211_STA_INFO_RX_BYTES64,
- sinfo->rx_bytes))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_TX_BYTES64) &&
- nla_put_u64(msg, NL80211_STA_INFO_TX_BYTES64,
- sinfo->tx_bytes))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_LLID) &&
- nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_PLID) &&
- nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_PLINK_STATE) &&
- nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE,
- sinfo->plink_state))
- goto nla_put_failure;
+
+ PUT_SINFO(RX_BYTES64, rx_bytes, u64);
+ PUT_SINFO(TX_BYTES64, tx_bytes, u64);
+ PUT_SINFO(LLID, llid, u16);
+ PUT_SINFO(PLID, plid, u16);
+ PUT_SINFO(PLINK_STATE, plink_state, u8);
+
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
- if ((sinfo->filled & STATION_INFO_SIGNAL) &&
- nla_put_u8(msg, NL80211_STA_INFO_SIGNAL,
- sinfo->signal))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) &&
- nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG,
- sinfo->signal_avg))
- goto nla_put_failure;
+ PUT_SINFO(SIGNAL, signal, u8);
+ PUT_SINFO(SIGNAL_AVG, signal_avg, u8);
break;
default:
break;
}
- if (sinfo->filled & STATION_INFO_CHAIN_SIGNAL) {
+ if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL)) {
if (!nl80211_put_signal(msg, sinfo->chains,
sinfo->chain_signal,
NL80211_STA_INFO_CHAIN_SIGNAL))
goto nla_put_failure;
}
- if (sinfo->filled & STATION_INFO_CHAIN_SIGNAL_AVG) {
+ if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) {
if (!nl80211_put_signal(msg, sinfo->chains,
sinfo->chain_signal_avg,
NL80211_STA_INFO_CHAIN_SIGNAL_AVG))
goto nla_put_failure;
}
- if (sinfo->filled & STATION_INFO_TX_BITRATE) {
+ if (sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE)) {
if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
NL80211_STA_INFO_TX_BITRATE))
goto nla_put_failure;
}
- if (sinfo->filled & STATION_INFO_RX_BITRATE) {
+ if (sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE)) {
if (!nl80211_put_sta_rate(msg, &sinfo->rxrate,
NL80211_STA_INFO_RX_BITRATE))
goto nla_put_failure;
}
- if ((sinfo->filled & STATION_INFO_RX_PACKETS) &&
- nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS,
- sinfo->rx_packets))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_TX_PACKETS) &&
- nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS,
- sinfo->tx_packets))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_TX_RETRIES) &&
- nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES,
- sinfo->tx_retries))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_TX_FAILED) &&
- nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
- sinfo->tx_failed))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_EXPECTED_THROUGHPUT) &&
- nla_put_u32(msg, NL80211_STA_INFO_EXPECTED_THROUGHPUT,
- sinfo->expected_throughput))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
- nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
- sinfo->beacon_loss_count))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_LOCAL_PM) &&
- nla_put_u32(msg, NL80211_STA_INFO_LOCAL_PM,
- sinfo->local_pm))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_PEER_PM) &&
- nla_put_u32(msg, NL80211_STA_INFO_PEER_PM,
- sinfo->peer_pm))
- goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_NONPEER_PM) &&
- nla_put_u32(msg, NL80211_STA_INFO_NONPEER_PM,
- sinfo->nonpeer_pm))
- goto nla_put_failure;
- if (sinfo->filled & STATION_INFO_BSS_PARAM) {
+
+ PUT_SINFO(RX_PACKETS, rx_packets, u32);
+ PUT_SINFO(TX_PACKETS, tx_packets, u32);
+ PUT_SINFO(TX_RETRIES, tx_retries, u32);
+ PUT_SINFO(TX_FAILED, tx_failed, u32);
+ PUT_SINFO(EXPECTED_THROUGHPUT, expected_throughput, u32);
+ PUT_SINFO(BEACON_LOSS, beacon_loss_count, u32);
+ PUT_SINFO(LOCAL_PM, local_pm, u32);
+ PUT_SINFO(PEER_PM, peer_pm, u32);
+ PUT_SINFO(NONPEER_PM, nonpeer_pm, u32);
+
+ if (sinfo->filled & BIT(NL80211_STA_INFO_BSS_PARAM)) {
bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
if (!bss_param)
goto nla_put_failure;
@@ -3788,23 +3781,68 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
nla_nest_end(msg, bss_param);
}
- if ((sinfo->filled & STATION_INFO_STA_FLAGS) &&
+ if ((sinfo->filled & BIT(NL80211_STA_INFO_STA_FLAGS)) &&
nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
sizeof(struct nl80211_sta_flag_update),
&sinfo->sta_flags))
goto nla_put_failure;
- if ((sinfo->filled & STATION_INFO_T_OFFSET) &&
- nla_put_u64(msg, NL80211_STA_INFO_T_OFFSET,
- sinfo->t_offset))
- goto nla_put_failure;
+
+ PUT_SINFO(T_OFFSET, t_offset, u64);
+ PUT_SINFO(RX_DROP_MISC, rx_dropped_misc, u64);
+ PUT_SINFO(BEACON_RX, rx_beacon, u64);
+ PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8);
+
+#undef PUT_SINFO
+
+ if (sinfo->filled & BIT(NL80211_STA_INFO_TID_STATS)) {
+ struct nlattr *tidsattr;
+ int tid;
+
+ tidsattr = nla_nest_start(msg, NL80211_STA_INFO_TID_STATS);
+ if (!tidsattr)
+ goto nla_put_failure;
+
+ for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
+ struct cfg80211_tid_stats *tidstats;
+ struct nlattr *tidattr;
+
+ tidstats = &sinfo->pertid[tid];
+
+ if (!tidstats->filled)
+ continue;
+
+ tidattr = nla_nest_start(msg, tid + 1);
+ if (!tidattr)
+ goto nla_put_failure;
+
+#define PUT_TIDVAL(attr, memb, type) do { \
+ if (tidstats->filled & BIT(NL80211_TID_STATS_ ## attr) && \
+ nla_put_ ## type(msg, NL80211_TID_STATS_ ## attr, \
+ tidstats->memb)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ PUT_TIDVAL(RX_MSDU, rx_msdu, u64);
+ PUT_TIDVAL(TX_MSDU, tx_msdu, u64);
+ PUT_TIDVAL(TX_MSDU_RETRIES, tx_msdu_retries, u64);
+ PUT_TIDVAL(TX_MSDU_FAILED, tx_msdu_failed, u64);
+
+#undef PUT_TIDVAL
+ nla_nest_end(msg, tidattr);
+ }
+
+ nla_nest_end(msg, tidsattr);
+ }
+
nla_nest_end(msg, sinfoattr);
- if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) &&
+ if (sinfo->assoc_req_ies_len &&
nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
sinfo->assoc_req_ies))
goto nla_put_failure;
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -3844,7 +3882,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
if (err)
goto out_err;
- if (nl80211_send_station(skb,
+ if (nl80211_send_station(skb, NL80211_CMD_NEW_STATION,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wdev->netdev, mac_addr,
@@ -3891,7 +3929,8 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
if (!msg)
return -ENOMEM;
- if (nl80211_send_station(msg, info->snd_portid, info->snd_seq, 0,
+ if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION,
+ info->snd_portid, info->snd_seq, 0,
rdev, dev, mac_addr, &sinfo) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
@@ -4533,7 +4572,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
nla_nest_end(msg, pinfoattr);
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -5327,42 +5367,20 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
return err;
}
-static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
+static int nl80211_put_regdom(const struct ieee80211_regdomain *regdom,
+ struct sk_buff *msg)
{
- const struct ieee80211_regdomain *regdom;
- struct sk_buff *msg;
- void *hdr = NULL;
struct nlattr *nl_reg_rules;
unsigned int i;
- if (!cfg80211_regdomain)
- return -EINVAL;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOBUFS;
-
- hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
- NL80211_CMD_GET_REG);
- if (!hdr)
- goto put_failure;
-
- if (reg_last_request_cell_base() &&
- nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE,
- NL80211_USER_REG_HINT_CELL_BASE))
- goto nla_put_failure;
-
- rcu_read_lock();
- regdom = rcu_dereference(cfg80211_regdomain);
-
if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, regdom->alpha2) ||
(regdom->dfs_region &&
nla_put_u8(msg, NL80211_ATTR_DFS_REGION, regdom->dfs_region)))
- goto nla_put_failure_rcu;
+ goto nla_put_failure;
nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
if (!nl_reg_rules)
- goto nla_put_failure_rcu;
+ goto nla_put_failure;
for (i = 0; i < regdom->n_reg_rules; i++) {
struct nlattr *nl_reg_rule;
@@ -5377,7 +5395,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
nl_reg_rule = nla_nest_start(msg, i);
if (!nl_reg_rule)
- goto nla_put_failure_rcu;
+ goto nla_put_failure;
max_bandwidth_khz = freq_range->max_bandwidth_khz;
if (!max_bandwidth_khz)
@@ -5398,13 +5416,74 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
power_rule->max_eirp) ||
nla_put_u32(msg, NL80211_ATTR_DFS_CAC_TIME,
reg_rule->dfs_cac_ms))
- goto nla_put_failure_rcu;
+ goto nla_put_failure;
nla_nest_end(msg, nl_reg_rule);
}
- rcu_read_unlock();
nla_nest_end(msg, nl_reg_rules);
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int nl80211_get_reg_do(struct sk_buff *skb, struct genl_info *info)
+{
+ const struct ieee80211_regdomain *regdom = NULL;
+ struct cfg80211_registered_device *rdev;
+ struct wiphy *wiphy = NULL;
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOBUFS;
+
+ hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
+ NL80211_CMD_GET_REG);
+ if (!hdr)
+ goto put_failure;
+
+ if (info->attrs[NL80211_ATTR_WIPHY]) {
+ bool self_managed;
+
+ rdev = cfg80211_get_dev_from_info(genl_info_net(info), info);
+ if (IS_ERR(rdev)) {
+ nlmsg_free(msg);
+ return PTR_ERR(rdev);
+ }
+
+ wiphy = &rdev->wiphy;
+ self_managed = wiphy->regulatory_flags &
+ REGULATORY_WIPHY_SELF_MANAGED;
+ regdom = get_wiphy_regdom(wiphy);
+
+ /* a self-managed-reg device must have a private regdom */
+ if (WARN_ON(!regdom && self_managed)) {
+ nlmsg_free(msg);
+ return -EINVAL;
+ }
+
+ if (regdom &&
+ nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
+ goto nla_put_failure;
+ }
+
+ if (!wiphy && reg_last_request_cell_base() &&
+ nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE,
+ NL80211_USER_REG_HINT_CELL_BASE))
+ goto nla_put_failure;
+
+ rcu_read_lock();
+
+ if (!regdom)
+ regdom = rcu_dereference(cfg80211_regdomain);
+
+ if (nl80211_put_regdom(regdom, msg))
+ goto nla_put_failure_rcu;
+
+ rcu_read_unlock();
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -5418,6 +5497,84 @@ put_failure:
return -EMSGSIZE;
}
+static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb,
+ u32 seq, int flags, struct wiphy *wiphy,
+ const struct ieee80211_regdomain *regdom)
+{
+ void *hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
+ NL80211_CMD_GET_REG);
+
+ if (!hdr)
+ return -1;
+
+ genl_dump_check_consistent(cb, hdr, &nl80211_fam);
+
+ if (nl80211_put_regdom(regdom, msg))
+ goto nla_put_failure;
+
+ if (!wiphy && reg_last_request_cell_base() &&
+ nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE,
+ NL80211_USER_REG_HINT_CELL_BASE))
+ goto nla_put_failure;
+
+ if (wiphy &&
+ nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
+ goto nla_put_failure;
+
+ if (wiphy && wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
+ nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int nl80211_get_reg_dump(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct ieee80211_regdomain *regdom = NULL;
+ struct cfg80211_registered_device *rdev;
+ int err, reg_idx, start = cb->args[2];
+
+ rtnl_lock();
+
+ if (cfg80211_regdomain && start == 0) {
+ err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, NULL,
+ rtnl_dereference(cfg80211_regdomain));
+ if (err < 0)
+ goto out_err;
+ }
+
+ /* the global regdom is idx 0 */
+ reg_idx = 1;
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ regdom = get_wiphy_regdom(&rdev->wiphy);
+ if (!regdom)
+ continue;
+
+ if (++reg_idx <= start)
+ continue;
+
+ err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, &rdev->wiphy, regdom);
+ if (err < 0) {
+ reg_idx--;
+ break;
+ }
+ }
+
+ cb->args[2] = reg_idx;
+ err = skb->len;
+out_err:
+ rtnl_unlock();
+ return err;
+}
+
static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
@@ -5623,7 +5780,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->ssids = (void *)&request->channels[n_channels];
request->n_ssids = n_ssids;
if (ie_len) {
- if (request->ssids)
+ if (n_ssids)
request->ie = (void *)(request->ssids + n_ssids);
else
request->ie = (void *)(request->channels + n_channels);
@@ -5679,7 +5836,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->n_channels = i;
i = 0;
- if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
+ if (n_ssids) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
@@ -5877,7 +6034,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
request->ssids = (void *)&request->channels[n_channels];
request->n_ssids = n_ssids;
if (ie_len) {
- if (request->ssids)
+ if (n_ssids)
request->ie = (void *)(request->ssids + n_ssids);
else
request->ie = (void *)(request->channels + n_channels);
@@ -5886,7 +6043,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (n_match_sets) {
if (request->ie)
request->match_sets = (void *)(request->ie + ie_len);
- else if (request->ssids)
+ else if (n_ssids)
request->match_sets =
(void *)(request->ssids + n_ssids);
else
@@ -5945,7 +6102,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
request->n_channels = i;
i = 0;
- if (attrs[NL80211_ATTR_SCAN_SSIDS]) {
+ if (n_ssids) {
nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) {
if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
@@ -6053,6 +6210,10 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
}
}
+ if (attrs[NL80211_ATTR_SCHED_SCAN_DELAY])
+ request->delay =
+ nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
+
request->interval = interval;
request->scan_start = jiffies;
@@ -6069,6 +6230,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_sched_scan_request *sched_scan_req;
int err;
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
@@ -6078,27 +6240,32 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (rdev->sched_scan_req)
return -EINPROGRESS;
- rdev->sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev,
- info->attrs);
- err = PTR_ERR_OR_ZERO(rdev->sched_scan_req);
+ sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev,
+ info->attrs);
+
+ err = PTR_ERR_OR_ZERO(sched_scan_req);
if (err)
goto out_err;
- err = rdev_sched_scan_start(rdev, dev, rdev->sched_scan_req);
+ err = rdev_sched_scan_start(rdev, dev, sched_scan_req);
if (err)
goto out_free;
- rdev->sched_scan_req->dev = dev;
- rdev->sched_scan_req->wiphy = &rdev->wiphy;
+ sched_scan_req->dev = dev;
+ sched_scan_req->wiphy = &rdev->wiphy;
+
+ if (info->attrs[NL80211_ATTR_SOCKET_OWNER])
+ sched_scan_req->owner_nlportid = info->snd_portid;
+
+ rcu_assign_pointer(rdev->sched_scan_req, sched_scan_req);
nl80211_send_sched_scan(rdev, dev,
NL80211_CMD_START_SCHED_SCAN);
return 0;
out_free:
- kfree(rdev->sched_scan_req);
+ kfree(sched_scan_req);
out_err:
- rdev->sched_scan_req = NULL;
return err;
}
@@ -6433,7 +6600,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
nla_nest_end(msg, bss);
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
fail_unlock_rcu:
rcu_read_unlock();
@@ -6481,12 +6649,17 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
}
static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
- int flags, struct net_device *dev,
- struct survey_info *survey)
+ int flags, struct net_device *dev,
+ bool allow_radio_stats,
+ struct survey_info *survey)
{
void *hdr;
struct nlattr *infoattr;
+ /* skip radio stats if userspace didn't request them */
+ if (!survey->channel && !allow_radio_stats)
+ return 0;
+
hdr = nl80211hdr_put(msg, portid, seq, flags,
NL80211_CMD_NEW_SURVEY_RESULTS);
if (!hdr)
@@ -6499,7 +6672,8 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
if (!infoattr)
goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY,
+ if (survey->channel &&
+ nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY,
survey->channel->center_freq))
goto nla_put_failure;
@@ -6509,49 +6683,57 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
if ((survey->filled & SURVEY_INFO_IN_USE) &&
nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
goto nla_put_failure;
- if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) &&
- nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
- survey->channel_time))
+ if ((survey->filled & SURVEY_INFO_TIME) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_TIME,
+ survey->time))
+ goto nla_put_failure;
+ if ((survey->filled & SURVEY_INFO_TIME_BUSY) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_BUSY,
+ survey->time_busy))
goto nla_put_failure;
- if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) &&
- nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
- survey->channel_time_busy))
+ if ((survey->filled & SURVEY_INFO_TIME_EXT_BUSY) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY,
+ survey->time_ext_busy))
goto nla_put_failure;
- if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) &&
- nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
- survey->channel_time_ext_busy))
+ if ((survey->filled & SURVEY_INFO_TIME_RX) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_RX,
+ survey->time_rx))
goto nla_put_failure;
- if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) &&
- nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
- survey->channel_time_rx))
+ if ((survey->filled & SURVEY_INFO_TIME_TX) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_TX,
+ survey->time_tx))
goto nla_put_failure;
- if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) &&
- nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
- survey->channel_time_tx))
+ if ((survey->filled & SURVEY_INFO_TIME_SCAN) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_SCAN,
+ survey->time_scan))
goto nla_put_failure;
nla_nest_end(msg, infoattr);
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
-static int nl80211_dump_survey(struct sk_buff *skb,
- struct netlink_callback *cb)
+static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
{
struct survey_info survey;
struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
int survey_idx = cb->args[2];
int res;
+ bool radio_stats;
res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (res)
return res;
+ /* prepare_wdev_dump parsed the attributes */
+ radio_stats = nl80211_fam.attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
+
if (!wdev->netdev) {
res = -EINVAL;
goto out_err;
@@ -6569,13 +6751,9 @@ static int nl80211_dump_survey(struct sk_buff *skb,
if (res)
goto out_err;
- /* Survey without a channel doesn't make sense */
- if (!survey.channel) {
- res = -EINVAL;
- goto out;
- }
-
- if (survey.channel->flags & IEEE80211_CHAN_DISABLED) {
+ /* don't send disabled channels, but do send non-channel data */
+ if (survey.channel &&
+ survey.channel->flags & IEEE80211_CHAN_DISABLED) {
survey_idx++;
continue;
}
@@ -6583,7 +6761,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
if (nl80211_send_survey(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- wdev->netdev, &survey) < 0)
+ wdev->netdev, radio_stats, &survey) < 0)
goto out;
survey_idx++;
}
@@ -7596,14 +7774,19 @@ static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net *net;
int err;
- u32 pid;
- if (!info->attrs[NL80211_ATTR_PID])
- return -EINVAL;
+ if (info->attrs[NL80211_ATTR_PID]) {
+ u32 pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]);
- pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]);
+ net = get_net_ns_by_pid(pid);
+ } else if (info->attrs[NL80211_ATTR_NETNS_FD]) {
+ u32 fd = nla_get_u32(info->attrs[NL80211_ATTR_NETNS_FD]);
+
+ net = get_net_ns_by_fd(fd);
+ } else {
+ return -EINVAL;
+ }
- net = get_net_ns_by_pid(pid);
if (IS_ERR(net))
return PTR_ERR(net);
@@ -8599,6 +8782,48 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
return 0;
}
+static int nl80211_send_wowlan_nd(struct sk_buff *msg,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct nlattr *nd, *freqs, *matches, *match;
+ int i;
+
+ if (!req)
+ return 0;
+
+ nd = nla_nest_start(msg, NL80211_WOWLAN_TRIG_NET_DETECT);
+ if (!nd)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->interval))
+ return -ENOBUFS;
+
+ freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
+ if (!freqs)
+ return -ENOBUFS;
+
+ for (i = 0; i < req->n_channels; i++)
+ nla_put_u32(msg, i, req->channels[i]->center_freq);
+
+ nla_nest_end(msg, freqs);
+
+ if (req->n_match_sets) {
+ matches = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_MATCH);
+ for (i = 0; i < req->n_match_sets; i++) {
+ match = nla_nest_start(msg, i);
+ nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
+ req->match_sets[i].ssid.ssid_len,
+ req->match_sets[i].ssid.ssid);
+ nla_nest_end(msg, match);
+ }
+ nla_nest_end(msg, matches);
+ }
+
+ nla_nest_end(msg, nd);
+
+ return 0;
+}
+
static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8656,6 +8881,11 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.wowlan_config->tcp))
goto nla_put_failure;
+ if (nl80211_send_wowlan_nd(
+ msg,
+ rdev->wiphy.wowlan_config->nd_config))
+ goto nla_put_failure;
+
nla_nest_end(msg, nl_wowlan);
}
@@ -10225,7 +10455,8 @@ static const struct genl_ops nl80211_ops[] = {
},
{
.cmd = NL80211_CMD_GET_REG,
- .doit = nl80211_get_reg,
+ .doit = nl80211_get_reg_do,
+ .dumpit = nl80211_get_reg_dump,
.policy = nl80211_policy,
.internal_flags = NL80211_FLAG_NEED_RTNL,
/* can be retrieved by unprivileged users */
@@ -10824,7 +11055,8 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
/* ignore errors and send incomplete event anyway */
nl80211_add_scan_req(msg, rdev);
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -10847,7 +11079,8 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
goto nla_put_failure;
- return genlmsg_end(msg, hdr);
+ genlmsg_end(msg, hdr);
+ return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -10939,25 +11172,9 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
NL80211_MCGRP_SCAN, GFP_KERNEL);
}
-/*
- * This can happen on global regulatory changes or device specific settings
- * based on custom world regulatory domains.
- */
-void nl80211_send_reg_change_event(struct regulatory_request *request)
+static bool nl80211_reg_change_event_fill(struct sk_buff *msg,
+ struct regulatory_request *request)
{
- struct sk_buff *msg;
- void *hdr;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_CHANGE);
- if (!hdr) {
- nlmsg_free(msg);
- return;
- }
-
/* Userspace can always count this one always being set */
if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator))
goto nla_put_failure;
@@ -10983,8 +11200,46 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
goto nla_put_failure;
}
- if (request->wiphy_idx != WIPHY_IDX_INVALID &&
- nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
+ if (request->wiphy_idx != WIPHY_IDX_INVALID) {
+ struct wiphy *wiphy = wiphy_idx_to_wiphy(request->wiphy_idx);
+
+ if (wiphy &&
+ nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
+ goto nla_put_failure;
+
+ if (wiphy &&
+ wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
+ nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG))
+ goto nla_put_failure;
+ }
+
+ return true;
+
+nla_put_failure:
+ return false;
+}
+
+/*
+ * This can happen on global regulatory changes or device specific settings
+ * based on custom regulatory domains.
+ */
+void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
+ struct regulatory_request *request)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, cmd_id);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ if (nl80211_reg_change_event_fill(msg, request) == false)
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -11523,7 +11778,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
if (!msg)
return;
- if (nl80211_send_station(msg, 0, 0, 0,
+ if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, 0, 0, 0,
rdev, dev, mac_addr, sinfo) < 0) {
nlmsg_free(msg);
return;
@@ -11534,12 +11789,16 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
}
EXPORT_SYMBOL(cfg80211_new_sta);
-void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
+void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp)
{
struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
struct sk_buff *msg;
- void *hdr;
+ struct station_info empty_sinfo = {};
+
+ if (!sinfo)
+ sinfo = &empty_sinfo;
trace_cfg80211_del_sta(dev, mac_addr);
@@ -11547,27 +11806,16 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
if (!msg)
return;
- hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_STATION);
- if (!hdr) {
+ if (nl80211_send_station(msg, NL80211_CMD_DEL_STATION, 0, 0, 0,
+ rdev, dev, mac_addr, sinfo) < 0) {
nlmsg_free(msg);
return;
}
- if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
-
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_MLME, gfp);
- return;
-
- nla_put_failure:
- genlmsg_cancel(msg, hdr);
- nlmsg_free(msg);
}
-EXPORT_SYMBOL(cfg80211_del_sta);
+EXPORT_SYMBOL(cfg80211_del_sta_sinfo);
void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
enum nl80211_connect_failed_reason reason,
@@ -12471,6 +12719,13 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
bool schedule_destroy_work = false;
+ bool schedule_scan_stop = false;
+ struct cfg80211_sched_scan_request *sched_scan_req =
+ rcu_dereference(rdev->sched_scan_req);
+
+ if (sched_scan_req && notify->portid &&
+ sched_scan_req->owner_nlportid == notify->portid)
+ schedule_scan_stop = true;
list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
cfg80211_mlme_unregister_socket(wdev, notify->portid);
@@ -12501,6 +12756,12 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
spin_unlock(&rdev->destroy_list_lock);
schedule_work(&rdev->destroy_work);
}
+ } else if (schedule_scan_stop) {
+ sched_scan_req->owner_nlportid = 0;
+
+ if (rdev->ops->sched_scan_stop &&
+ rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
+ schedule_work(&rdev->sched_scan_stop_wk);
}
}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 7ad70d6f0cc6..84d4edf1d545 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -17,7 +17,21 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 cmd);
void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
struct net_device *netdev);
-void nl80211_send_reg_change_event(struct regulatory_request *request);
+void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
+ struct regulatory_request *request);
+
+static inline void
+nl80211_send_reg_change_event(struct regulatory_request *request)
+{
+ nl80211_common_reg_change_event(NL80211_CMD_REG_CHANGE, request);
+}
+
+static inline void
+nl80211_send_wiphy_reg_change_event(struct regulatory_request *request)
+{
+ nl80211_common_reg_change_event(NL80211_CMD_WIPHY_REG_CHANGE, request);
+}
+
void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *buf, size_t len, gfp_t gfp);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7b8309840d4e..b586d0dcb09e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -109,7 +109,7 @@ static struct regulatory_request core_request_world = {
* protected by RTNL (and can be accessed with RCU protection)
*/
static struct regulatory_request __rcu *last_request =
- (void __rcu *)&core_request_world;
+ (void __force __rcu *)&core_request_world;
/* To trigger userspace events */
static struct platform_device *reg_pdev;
@@ -142,7 +142,7 @@ static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
return rtnl_dereference(cfg80211_regdomain);
}
-static const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
+const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
{
return rtnl_dereference(wiphy->regd);
}
@@ -1307,6 +1307,9 @@ static bool ignore_reg_update(struct wiphy *wiphy,
{
struct regulatory_request *lr = get_last_request();
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+ return true;
+
if (!lr) {
REG_DBG_PRINT("Ignoring regulatory request set by %s "
"since last_request is not set\n",
@@ -1530,45 +1533,40 @@ static void reg_call_notifier(struct wiphy *wiphy,
static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- struct ieee80211_channel *ch;
struct cfg80211_chan_def chandef;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- bool ret = true;
+ enum nl80211_iftype iftype;
wdev_lock(wdev);
+ iftype = wdev->iftype;
+ /* make sure the interface is active */
if (!wdev->netdev || !netif_running(wdev->netdev))
- goto out;
+ goto wdev_inactive_unlock;
- switch (wdev->iftype) {
+ switch (iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (!wdev->beacon_interval)
- goto out;
-
- ret = cfg80211_reg_can_beacon(wiphy,
- &wdev->chandef, wdev->iftype);
+ goto wdev_inactive_unlock;
+ chandef = wdev->chandef;
break;
case NL80211_IFTYPE_ADHOC:
if (!wdev->ssid_len)
- goto out;
-
- ret = cfg80211_reg_can_beacon(wiphy,
- &wdev->chandef, wdev->iftype);
+ goto wdev_inactive_unlock;
+ chandef = wdev->chandef;
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
if (!wdev->current_bss ||
!wdev->current_bss->pub.channel)
- goto out;
+ goto wdev_inactive_unlock;
- ch = wdev->current_bss->pub.channel;
- if (rdev->ops->get_channel &&
- !rdev_get_channel(rdev, wdev, &chandef))
- ret = cfg80211_chandef_usable(wiphy, &chandef,
- IEEE80211_CHAN_DISABLED);
- else
- ret = !(ch->flags & IEEE80211_CHAN_DISABLED);
+ if (!rdev->ops->get_channel ||
+ rdev_get_channel(rdev, wdev, &chandef))
+ cfg80211_chandef_create(&chandef,
+ wdev->current_bss->pub.channel,
+ NL80211_CHAN_NO_HT);
break;
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
@@ -1581,9 +1579,26 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
break;
}
-out:
wdev_unlock(wdev);
- return ret;
+
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_reg_can_beacon(wiphy, &chandef, iftype);
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return cfg80211_chandef_usable(wiphy, &chandef,
+ IEEE80211_CHAN_DISABLED);
+ default:
+ break;
+ }
+
+ return true;
+
+wdev_inactive_unlock:
+ wdev_unlock(wdev);
+ return true;
}
static void reg_leave_invalid_chans(struct wiphy *wiphy)
@@ -1683,8 +1698,12 @@ static void handle_channel_custom(struct wiphy *wiphy,
if (IS_ERR(reg_rule)) {
REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
chan->center_freq);
- chan->orig_flags |= IEEE80211_CHAN_DISABLED;
- chan->flags = chan->orig_flags;
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ } else {
+ chan->orig_flags |= IEEE80211_CHAN_DISABLED;
+ chan->flags = chan->orig_flags;
+ }
return;
}
@@ -1709,7 +1728,13 @@ static void handle_channel_custom(struct wiphy *wiphy,
chan->dfs_state = NL80211_DFS_USABLE;
chan->beacon_found = false;
- chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
+
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+ chan->flags = chan->orig_flags | bw_flags |
+ map_regdom_flags(reg_rule->flags);
+ else
+ chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
+
chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
chan->max_reg_power = chan->max_power =
(int) MBM_TO_DBM(power_rule->max_eirp);
@@ -2095,6 +2120,26 @@ out_free:
reg_free_request(reg_request);
}
+static bool reg_only_self_managed_wiphys(void)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wiphy *wiphy;
+ bool self_managed_found = false;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ wiphy = &rdev->wiphy;
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+ self_managed_found = true;
+ else
+ return false;
+ }
+
+ /* make sure at least one self-managed wiphy exists */
+ return self_managed_found;
+}
+
/*
* Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
* Regulatory hints come on a first come first serve basis and we
@@ -2126,6 +2171,11 @@ static void reg_process_pending_hints(void)
spin_unlock(&reg_requests_lock);
+ if (reg_only_self_managed_wiphys()) {
+ reg_free_request(reg_request);
+ return;
+ }
+
reg_process_hint(reg_request);
}
@@ -2153,11 +2203,52 @@ static void reg_process_pending_beacon_hints(void)
spin_unlock_bh(&reg_pending_beacons_lock);
}
+static void reg_process_self_managed_hints(void)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wiphy *wiphy;
+ const struct ieee80211_regdomain *tmp;
+ const struct ieee80211_regdomain *regd;
+ enum ieee80211_band band;
+ struct regulatory_request request = {};
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ wiphy = &rdev->wiphy;
+
+ spin_lock(&reg_requests_lock);
+ regd = rdev->requested_regd;
+ rdev->requested_regd = NULL;
+ spin_unlock(&reg_requests_lock);
+
+ if (regd == NULL)
+ continue;
+
+ tmp = get_wiphy_regdom(wiphy);
+ rcu_assign_pointer(wiphy->regd, regd);
+ rcu_free_regdom(tmp);
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+ handle_band_custom(wiphy, wiphy->bands[band], regd);
+
+ reg_process_ht_flags(wiphy);
+
+ request.wiphy_idx = get_wiphy_idx(wiphy);
+ request.alpha2[0] = regd->alpha2[0];
+ request.alpha2[1] = regd->alpha2[1];
+ request.initiator = NL80211_REGDOM_SET_BY_DRIVER;
+
+ nl80211_send_wiphy_reg_change_event(&request);
+ }
+
+ reg_check_channels();
+}
+
static void reg_todo(struct work_struct *work)
{
rtnl_lock();
reg_process_pending_hints();
reg_process_pending_beacon_hints();
+ reg_process_self_managed_hints();
rtnl_unlock();
}
@@ -2438,6 +2529,8 @@ static void restore_regulatory_settings(bool reset_user)
world_alpha2[1] = cfg80211_world_regdom->alpha2[1];
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+ continue;
if (rdev->wiphy.regulatory_flags & REGULATORY_CUSTOM_REG)
restore_custom_reg_settings(&rdev->wiphy);
}
@@ -2841,10 +2934,79 @@ int set_regdom(const struct ieee80211_regdomain *rd)
return 0;
}
+static int __regulatory_set_wiphy_regd(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd)
+{
+ const struct ieee80211_regdomain *regd;
+ const struct ieee80211_regdomain *prev_regd;
+ struct cfg80211_registered_device *rdev;
+
+ if (WARN_ON(!wiphy || !rd))
+ return -EINVAL;
+
+ if (WARN(!(wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED),
+ "wiphy should have REGULATORY_WIPHY_SELF_MANAGED\n"))
+ return -EPERM;
+
+ if (WARN(!is_valid_rd(rd), "Invalid regulatory domain detected\n")) {
+ print_regdomain_info(rd);
+ return -EINVAL;
+ }
+
+ regd = reg_copy_regd(rd);
+ if (IS_ERR(regd))
+ return PTR_ERR(regd);
+
+ rdev = wiphy_to_rdev(wiphy);
+
+ spin_lock(&reg_requests_lock);
+ prev_regd = rdev->requested_regd;
+ rdev->requested_regd = regd;
+ spin_unlock(&reg_requests_lock);
+
+ kfree(prev_regd);
+ return 0;
+}
+
+int regulatory_set_wiphy_regd(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd)
+{
+ int ret = __regulatory_set_wiphy_regd(wiphy, rd);
+
+ if (ret)
+ return ret;
+
+ schedule_work(&reg_work);
+ return 0;
+}
+EXPORT_SYMBOL(regulatory_set_wiphy_regd);
+
+int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd)
+{
+ int ret;
+
+ ASSERT_RTNL();
+
+ ret = __regulatory_set_wiphy_regd(wiphy, rd);
+ if (ret)
+ return ret;
+
+ /* process the request immediately */
+ reg_process_self_managed_hints();
+ return 0;
+}
+EXPORT_SYMBOL(regulatory_set_wiphy_regd_sync_rtnl);
+
void wiphy_regulatory_register(struct wiphy *wiphy)
{
struct regulatory_request *lr;
+ /* self-managed devices ignore external hints */
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+ wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
+ REGULATORY_COUNTRY_IE_IGNORE;
+
if (!reg_dev_ignore_cell_hint(wiphy))
reg_num_devs_support_basehint++;
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 5e48031ccb9a..4b45d6e61d24 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -38,6 +38,7 @@ unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
const struct ieee80211_reg_rule *rule);
bool reg_last_request_cell_base(void);
+const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy);
/**
* regulatory_hint_found_beacon - hints a beacon was found on a channel
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index bda39f149810..c705c3e2b751 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -257,7 +257,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
rtnl_lock();
- request = rdev->sched_scan_req;
+ request = rtnl_dereference(rdev->sched_scan_req);
/* we don't have sched_scan_req anymore if the scan is stopping */
if (request) {
@@ -279,7 +279,8 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
{
trace_cfg80211_sched_scan_results(wiphy);
/* ignore if we're not scanning */
- if (wiphy_to_rdev(wiphy)->sched_scan_req)
+
+ if (rcu_access_pointer(wiphy_to_rdev(wiphy)->sched_scan_req))
queue_work(cfg80211_wq,
&wiphy_to_rdev(wiphy)->sched_scan_results_wk);
}
@@ -308,6 +309,7 @@ EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
bool driver_initiated)
{
+ struct cfg80211_sched_scan_request *sched_scan_req;
struct net_device *dev;
ASSERT_RTNL();
@@ -315,7 +317,8 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
if (!rdev->sched_scan_req)
return -ENOENT;
- dev = rdev->sched_scan_req->dev;
+ sched_scan_req = rtnl_dereference(rdev->sched_scan_req);
+ dev = sched_scan_req->dev;
if (!driver_initiated) {
int err = rdev_sched_scan_stop(rdev, dev);
@@ -325,8 +328,8 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
nl80211_send_sched_scan(rdev, dev, NL80211_CMD_SCHED_SCAN_STOPPED);
- kfree(rdev->sched_scan_req);
- rdev->sched_scan_req = NULL;
+ RCU_INIT_POINTER(rdev->sched_scan_req, NULL);
+ kfree_rcu(sched_scan_req, rcu_head);
return 0;
}
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index ad38910f7036..b17b3692f8c2 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1604,11 +1604,12 @@ TRACE_EVENT(rdev_return_int_survey_info,
WIPHY_ENTRY
CHAN_ENTRY
__field(int, ret)
- __field(u64, channel_time)
- __field(u64, channel_time_busy)
- __field(u64, channel_time_ext_busy)
- __field(u64, channel_time_rx)
- __field(u64, channel_time_tx)
+ __field(u64, time)
+ __field(u64, time_busy)
+ __field(u64, time_ext_busy)
+ __field(u64, time_rx)
+ __field(u64, time_tx)
+ __field(u64, time_scan)
__field(u32, filled)
__field(s8, noise)
),
@@ -1616,22 +1617,24 @@ TRACE_EVENT(rdev_return_int_survey_info,
WIPHY_ASSIGN;
CHAN_ASSIGN(info->channel);
__entry->ret = ret;
- __entry->channel_time = info->channel_time;
- __entry->channel_time_busy = info->channel_time_busy;
- __entry->channel_time_ext_busy = info->channel_time_ext_busy;
- __entry->channel_time_rx = info->channel_time_rx;
- __entry->channel_time_tx = info->channel_time_tx;
+ __entry->time = info->time;
+ __entry->time_busy = info->time_busy;
+ __entry->time_ext_busy = info->time_ext_busy;
+ __entry->time_rx = info->time_rx;
+ __entry->time_tx = info->time_tx;
+ __entry->time_scan = info->time_scan;
__entry->filled = info->filled;
__entry->noise = info->noise;
),
TP_printk(WIPHY_PR_FMT ", returned: %d, " CHAN_PR_FMT
", channel time: %llu, channel time busy: %llu, "
"channel time extension busy: %llu, channel time rx: %llu, "
- "channel time tx: %llu, filled: %u, noise: %d",
+ "channel time tx: %llu, scan time: %llu, filled: %u, noise: %d",
WIPHY_PR_ARG, __entry->ret, CHAN_PR_ARG,
- __entry->channel_time, __entry->channel_time_busy,
- __entry->channel_time_ext_busy, __entry->channel_time_rx,
- __entry->channel_time_tx, __entry->filled, __entry->noise)
+ __entry->time, __entry->time_busy,
+ __entry->time_ext_busy, __entry->time_rx,
+ __entry->time_tx, __entry->time_scan,
+ __entry->filled, __entry->noise)
);
TRACE_EVENT(rdev_tdls_oper,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d0ac795445b7..6903dbdcb8c1 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -227,18 +227,32 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
if (pairwise && !mac_addr)
return -EINVAL;
- /*
- * Disallow pairwise keys with non-zero index unless it's WEP
- * or a vendor specific cipher (because current deployments use
- * pairwise WEP keys with non-zero indices and for vendor specific
- * ciphers this should be validated in the driver or hardware level
- * - but 802.11i clearly specifies to use zero)
- */
- if (pairwise && key_idx &&
- ((params->cipher == WLAN_CIPHER_SUITE_TKIP) ||
- (params->cipher == WLAN_CIPHER_SUITE_CCMP) ||
- (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC)))
- return -EINVAL;
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* Disallow pairwise keys with non-zero index unless it's WEP
+ * or a vendor specific cipher (because current deployments use
+ * pairwise WEP keys with non-zero indices and for vendor
+ * specific ciphers this should be validated in the driver or
+ * hardware level - but 802.11i clearly specifies to use zero)
+ */
+ if (pairwise && key_idx)
+ return -EINVAL;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ /* Disallow BIP (group-only) cipher as pairwise cipher */
+ if (pairwise)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -253,6 +267,18 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
if (params->key_len != WLAN_KEY_LEN_CCMP)
return -EINVAL;
break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ if (params->key_len != WLAN_KEY_LEN_CCMP_256)
+ return -EINVAL;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ if (params->key_len != WLAN_KEY_LEN_GCMP)
+ return -EINVAL;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (params->key_len != WLAN_KEY_LEN_GCMP_256)
+ return -EINVAL;
+ break;
case WLAN_CIPHER_SUITE_WEP104:
if (params->key_len != WLAN_KEY_LEN_WEP104)
return -EINVAL;
@@ -261,6 +287,18 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
if (params->key_len != WLAN_KEY_LEN_AES_CMAC)
return -EINVAL;
break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ if (params->key_len != WLAN_KEY_LEN_BIP_CMAC_256)
+ return -EINVAL;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_128)
+ return -EINVAL;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_256)
+ return -EINVAL;
+ break;
default:
/*
* We don't know anything about this algorithm,
@@ -280,7 +318,13 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
return -EINVAL;
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (params->seq_len != 6)
return -EINVAL;
break;
@@ -308,6 +352,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
goto out;
}
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_has_order(fc))
+ hdrlen += IEEE80211_HT_CTL_LEN;
+ goto out;
+ }
+
if (ieee80211_is_ctl(fc)) {
/*
* ACK and CTS are 10 bytes, all others 16. To see how
@@ -708,8 +758,8 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
if (skb->priority >= 256 && skb->priority <= 263)
return skb->priority - 256;
- if (vlan_tx_tag_present(skb)) {
- vlan_priority = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK)
+ if (skb_vlan_tag_present(skb)) {
+ vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
>> VLAN_PRIO_SHIFT;
if (vlan_priority > 0)
return vlan_priority;
@@ -1073,10 +1123,24 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
if (WARN_ON_ONCE(rate->mcs > 9))
return 0;
- idx = rate->flags & (RATE_INFO_FLAGS_160_MHZ_WIDTH |
- RATE_INFO_FLAGS_80P80_MHZ_WIDTH) ? 3 :
- rate->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH ? 2 :
- rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH ? 1 : 0;
+ switch (rate->bw) {
+ case RATE_INFO_BW_160:
+ idx = 3;
+ break;
+ case RATE_INFO_BW_80:
+ idx = 2;
+ break;
+ case RATE_INFO_BW_40:
+ idx = 1;
+ break;
+ case RATE_INFO_BW_5:
+ case RATE_INFO_BW_10:
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case RATE_INFO_BW_20:
+ idx = 0;
+ }
bitrate = base[idx][rate->mcs];
bitrate *= rate->nss;
@@ -1107,8 +1171,7 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate)
modulation = rate->mcs & 7;
streams = (rate->mcs >> 3) + 1;
- bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
- 13500000 : 6500000;
+ bitrate = (rate->bw == RATE_INFO_BW_40) ? 13500000 : 6500000;
if (modulation < 4)
bitrate *= (modulation + 1);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 0f47948c572f..5b24d39d7903 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1300,7 +1300,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
if (err)
return err;
- if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
+ if (!(sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)))
return -EOPNOTSUPP;
rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
@@ -1340,7 +1340,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
- if (sinfo.filled & STATION_INFO_SIGNAL) {
+ if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) {
int sig = sinfo.signal;
wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
@@ -1354,7 +1354,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
break;
}
case CFG80211_SIGNAL_TYPE_UNSPEC:
- if (sinfo.filled & STATION_INFO_SIGNAL) {
+ if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) {
wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
wstats.qual.level = sinfo.signal;
@@ -1367,9 +1367,9 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
}
wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
- if (sinfo.filled & STATION_INFO_RX_DROP_MISC)
+ if (sinfo.filled & BIT(NL80211_STA_INFO_RX_DROP_MISC))
wstats.discard.misc = sinfo.rx_dropped_misc;
- if (sinfo.filled & STATION_INFO_TX_FAILED)
+ if (sinfo.filled & BIT(NL80211_STA_INFO_TX_FAILED))
wstats.discard.retries = sinfo.tx_failed;
return &wstats;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index debe733386f8..12e82a5e4ad5 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -561,11 +561,6 @@ static struct xfrm_algo_desc calg_list[] = {
},
};
-static inline int aead_entries(void)
-{
- return ARRAY_SIZE(aead_list);
-}
-
static inline int aalg_entries(void)
{
return ARRAY_SIZE(aalg_list);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8128594ab379..7de2ed9ec46d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1019,7 +1019,8 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
return err;
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1121,7 +1122,8 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
return err;
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1842,7 +1844,8 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
if (err)
goto out_cancel;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
out_cancel:
nlmsg_cancel(skb, nlh);
@@ -2282,7 +2285,8 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
goto out_cancel;
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
out_cancel:
nlmsg_cancel(skb, nlh);
@@ -2490,7 +2494,8 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
if (err)
return err;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2712,7 +2717,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
return err;
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
@@ -2827,7 +2833,8 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
}
upe->hard = !!hard;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2986,7 +2993,8 @@ static int build_report(struct sk_buff *skb, u8 proto,
return err;
}
}
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int xfrm_send_report(struct net *net, u8 proto,
@@ -3031,7 +3039,8 @@ static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
um->old_sport = x->encap->encap_sport;
um->reqid = x->props.reqid;
- return nlmsg_end(skb, nlh);
+ nlmsg_end(skb, nlh);
+ return 0;
}
static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
diff --git a/samples/Kconfig b/samples/Kconfig
index 6181c2cc9ca0..224ebb46bed5 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -63,4 +63,11 @@ config SAMPLE_RPMSG_CLIENT
to communicate with an AMP-configured remote processor over
the rpmsg bus.
+config SAMPLE_LIVEPATCH
+ tristate "Build live patching sample -- loadable modules only"
+ depends on LIVEPATCH && m
+ help
+ Builds a sample live patch that replaces the procfs handler
+ for /proc/cmdline to print "this has been live patched".
+
endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index 1a60c62e2045..f00257bcc5a7 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -1,4 +1,4 @@
# Makefile for Linux samples code
-obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ \
+obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \
hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c
index e286b42307f3..6299ee95cd11 100644
--- a/samples/bpf/test_maps.c
+++ b/samples/bpf/test_maps.c
@@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)
/* iterate over two elements */
assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
- next_key == 2);
+ (next_key == 1 || next_key == 2));
assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
- next_key == 1);
+ (next_key == 1 || next_key == 2));
assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
errno == ENOENT);
diff --git a/samples/livepatch/Makefile b/samples/livepatch/Makefile
new file mode 100644
index 000000000000..10319d7ea0b1
--- /dev/null
+++ b/samples/livepatch/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
new file mode 100644
index 000000000000..fb8c8614e728
--- /dev/null
+++ b/samples/livepatch/livepatch-sample.c
@@ -0,0 +1,91 @@
+/*
+ * livepatch-sample.c - Kernel Live Patching Sample Module
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+/*
+ * This (dumb) live patch overrides the function that prints the
+ * kernel boot cmdline when /proc/cmdline is read.
+ *
+ * Example:
+ *
+ * $ cat /proc/cmdline
+ * <your cmdline>
+ *
+ * $ insmod livepatch-sample.ko
+ * $ cat /proc/cmdline
+ * this has been live patched
+ *
+ * $ echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled
+ * $ cat /proc/cmdline
+ * <your cmdline>
+ */
+
+#include <linux/seq_file.h>
+static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s\n", "this has been live patched");
+ return 0;
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "cmdline_proc_show",
+ .new_func = livepatch_cmdline_proc_show,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ /* name being NULL means vmlinux */
+ .funcs = funcs,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int livepatch_init(void)
+{
+ int ret;
+
+ ret = klp_register_patch(&patch);
+ if (ret)
+ return ret;
+ ret = klp_enable_patch(&patch);
+ if (ret) {
+ WARN_ON(klp_unregister_patch(&patch));
+ return ret;
+ }
+ return 0;
+}
+
+static void livepatch_exit(void)
+{
+ WARN_ON(klp_disable_patch(&patch));
+ WARN_ON(klp_unregister_patch(&patch));
+}
+
+module_init(livepatch_init);
+module_exit(livepatch_exit);
+MODULE_LICENSE("GPL");
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index aabc4e970911..880a7d1d27d2 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -10,12 +10,38 @@
#define CREATE_TRACE_POINTS
#include "trace-events-sample.h"
+static const char *random_strings[] = {
+ "Mother Goose",
+ "Snoopy",
+ "Gandalf",
+ "Frodo",
+ "One ring to rule them all"
+};
static void simple_thread_func(int cnt)
{
+ int array[6];
+ int len = cnt % 5;
+ int i;
+
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
- trace_foo_bar("hello", cnt);
+
+ for (i = 0; i < len; i++)
+ array[i] = i + 1;
+ array[i] = 0;
+
+ /* Silly tracepoints */
+ trace_foo_bar("hello", cnt, array, random_strings[len],
+ tsk_cpus_allowed(current));
+
+ trace_foo_with_template_simple("HELLO", cnt);
+
+ trace_foo_bar_with_cond("Some times print", cnt);
+
+ trace_foo_with_template_cond("prints other times", cnt);
+
+ trace_foo_with_template_print("I have to be different", cnt);
}
static int simple_thread(void *arg)
@@ -29,6 +55,53 @@ static int simple_thread(void *arg)
}
static struct task_struct *simple_tsk;
+static struct task_struct *simple_tsk_fn;
+
+static void simple_thread_func_fn(int cnt)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ /* More silly tracepoints */
+ trace_foo_bar_with_fn("Look at me", cnt);
+ trace_foo_with_template_fn("Look at me too", cnt);
+}
+
+static int simple_thread_fn(void *arg)
+{
+ int cnt = 0;
+
+ while (!kthread_should_stop())
+ simple_thread_func_fn(cnt++);
+
+ return 0;
+}
+
+static DEFINE_MUTEX(thread_mutex);
+
+void foo_bar_reg(void)
+{
+ pr_info("Starting thread for foo_bar_fn\n");
+ /*
+ * We shouldn't be able to start a trace when the module is
+ * unloading (there's other locks to prevent that). But
+ * for consistency sake, we still take the thread_mutex.
+ */
+ mutex_lock(&thread_mutex);
+ simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ mutex_unlock(&thread_mutex);
+}
+
+void foo_bar_unreg(void)
+{
+ pr_info("Killing thread for foo_bar_fn\n");
+ /* protect against module unloading */
+ mutex_lock(&thread_mutex);
+ if (simple_tsk_fn)
+ kthread_stop(simple_tsk_fn);
+ simple_tsk_fn = NULL;
+ mutex_unlock(&thread_mutex);
+}
static int __init trace_event_init(void)
{
@@ -42,6 +115,11 @@ static int __init trace_event_init(void)
static void __exit trace_event_exit(void)
{
kthread_stop(simple_tsk);
+ mutex_lock(&thread_mutex);
+ if (simple_tsk_fn)
+ kthread_stop(simple_tsk_fn);
+ simple_tsk_fn = NULL;
+ mutex_unlock(&thread_mutex);
}
module_init(trace_event_init);
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 476429281389..a2c8b02b6359 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -1,6 +1,6 @@
/*
* If TRACE_SYSTEM is defined, that will be the directory created
- * in the ftrace directory under /sys/kernel/debug/tracing/events/<system>
+ * in the ftrace directory under /sys/kernel/tracing/events/<system>
*
* The define_trace.h below will also look for a file name of
* TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
@@ -54,45 +54,347 @@
* Here it is simply "foo, bar".
*
* struct: This defines the way the data will be stored in the ring buffer.
- * There are currently two types of elements. __field and __array.
- * a __field is broken up into (type, name). Where type can be any
- * primitive type (integer, long or pointer). __field_struct() can
- * be any static complex data value (struct, union, but not an array).
- * For an array. there are three fields. (type, name, size). The
- * type of elements in the array, the name of the field and the size
- * of the array.
+ * The items declared here become part of a special structure
+ * called "__entry", which can be used in the fast_assign part of the
+ * TRACE_EVENT macro.
+ *
+ * Here are the currently defined types you can use:
+ *
+ * __field : Is broken up into type and name. Where type can be any
+ * primitive type (integer, long or pointer).
+ *
+ * __field(int, foo)
+ *
+ * __entry->foo = 5;
+ *
+ * __field_struct : This can be any static complex data type (struct, union
+ * but not an array). Be careful using complex types, as each
+ * event is limited in size, and copying large amounts of data
+ * into the ring buffer can slow things down.
+ *
+ * __field_struct(struct bar, foo)
+ *
+ * __entry->bar.x = y;
+
+ * __array: There are three fields (type, name, size). The type is the
+ * type of elements in teh array, the name is the name of the array.
+ * size is the number of items in the array (not the total size).
+ *
+ * __array( char, foo, 10) is the same as saying: char foo[10];
+ *
+ * Assigning arrays can be done like any array:
+ *
+ * __entry->foo[0] = 'a';
+ *
+ * memcpy(__entry->foo, bar, 10);
+ *
+ * __dynamic_array: This is similar to array, but can vary is size from
+ * instance to instance of the tracepoint being called.
+ * Like __array, this too has three elements (type, name, size);
+ * type is the type of the element, name is the name of the array.
+ * The size is different than __array. It is not a static number,
+ * but the algorithm to figure out the length of the array for the
+ * specific instance of tracepoint. Again, size is the numebr of
+ * items in the array, not the total length in bytes.
+ *
+ * __dynamic_array( int, foo, bar) is similar to: int foo[bar];
+ *
+ * Note, unlike arrays, you must use the __get_dynamic_array() macro
+ * to access the array.
+ *
+ * memcpy(__get_dynamic_array(foo), bar, 10);
+ *
+ * Notice, that "__entry" is not needed here.
+ *
+ * __string: This is a special kind of __dynamic_array. It expects to
+ * have a nul terminated character array passed to it (it allows
+ * for NULL too, which would be converted into "(null)"). __string
+ * takes two paramenter (name, src), where name is the name of
+ * the string saved, and src is the string to copy into the
+ * ring buffer.
+ *
+ * __string(foo, bar) is similar to: strcpy(foo, bar)
+ *
+ * To assign a string, use the helper macro __assign_str().
+ *
+ * __assign_str(foo, bar);
+ *
+ * In most cases, the __assign_str() macro will take the same
+ * parameters as the __string() macro had to declare the string.
+ *
+ * __bitmask: This is another kind of __dynamic_array, but it expects
+ * an array of longs, and the number of bits to parse. It takes
+ * two parameters (name, nr_bits), where name is the name of the
+ * bitmask to save, and the nr_bits is the number of bits to record.
+ *
+ * __bitmask(target_cpu, nr_cpumask_bits)
+ *
+ * To assign a bitmask, use the __assign_bitmask() helper macro.
+ *
+ * __assign_bitmask(target_cpus, cpumask_bits(bar), nr_cpumask_bits);
*
- * __array( char, foo, 10) is the same as saying char foo[10].
*
* fast_assign: This is a C like function that is used to store the items
- * into the ring buffer.
+ * into the ring buffer. A special variable called "__entry" will be the
+ * structure that points into the ring buffer and has the same fields as
+ * described by the struct part of TRACE_EVENT above.
*
* printk: This is a way to print out the data in pretty print. This is
* useful if the system crashes and you are logging via a serial line,
* the data can be printed to the console using this "printk" method.
+ * This is also used to print out the data from the trace files.
+ * Again, the __entry macro is used to access the data from the ring buffer.
+ *
+ * Note, __dynamic_array, __string, and __bitmask require special helpers
+ * to access the data.
+ *
+ * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
+ * Use __get_dynamic_array_len(foo) to get the length of the array
+ * saved.
+ *
+ * For __string(foo, bar) use __get_str(foo)
+ *
+ * For __bitmask(target_cpus, nr_cpumask_bits) use __get_bitmask(target_cpus)
+ *
*
* Note, that for both the assign and the printk, __entry is the handler
* to the data structure in the ring buffer, and is defined by the
* TP_STRUCT__entry.
*/
+
+/*
+ * It is OK to have helper functions in the file, but they need to be protected
+ * from being defined more than once. Remember, this file gets included more
+ * than once.
+ */
+#ifndef __TRACE_EVENT_SAMPLE_HELPER_FUNCTIONS
+#define __TRACE_EVENT_SAMPLE_HELPER_FUNCTIONS
+static inline int __length_of(const int *list)
+{
+ int i;
+
+ if (!list)
+ return 0;
+
+ for (i = 0; list[i]; i++)
+ ;
+ return i;
+}
+#endif
+
TRACE_EVENT(foo_bar,
- TP_PROTO(char *foo, int bar),
+ TP_PROTO(const char *foo, int bar, const int *lst,
+ const char *string, const struct cpumask *mask),
- TP_ARGS(foo, bar),
+ TP_ARGS(foo, bar, lst, string, mask),
TP_STRUCT__entry(
__array( char, foo, 10 )
__field( int, bar )
+ __dynamic_array(int, list, __length_of(lst))
+ __string( str, string )
+ __bitmask( cpus, num_possible_cpus() )
),
TP_fast_assign(
strlcpy(__entry->foo, foo, 10);
__entry->bar = bar;
+ memcpy(__get_dynamic_array(list), lst,
+ __length_of(lst) * sizeof(int));
+ __assign_str(str, string);
+ __assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus());
+ ),
+
+ TP_printk("foo %s %d %s %s (%s)", __entry->foo, __entry->bar,
+ __print_array(__get_dynamic_array(list),
+ __get_dynamic_array_len(list),
+ sizeof(int)),
+ __get_str(str), __get_bitmask(cpus))
+);
+
+/*
+ * There may be a case where a tracepoint should only be called if
+ * some condition is set. Otherwise the tracepoint should not be called.
+ * But to do something like:
+ *
+ * if (cond)
+ * trace_foo();
+ *
+ * Would cause a little overhead when tracing is not enabled, and that
+ * overhead, even if small, is not something we want. As tracepoints
+ * use static branch (aka jump_labels), where no branch is taken to
+ * skip the tracepoint when not enabled, and a jmp is placed to jump
+ * to the tracepoint code when it is enabled, having a if statement
+ * nullifies that optimization. It would be nice to place that
+ * condition within the static branch. This is where TRACE_EVENT_CONDITION
+ * comes in.
+ *
+ * TRACE_EVENT_CONDITION() is just like TRACE_EVENT, except it adds another
+ * parameter just after args. Where TRACE_EVENT has:
+ *
+ * TRACE_EVENT(name, proto, args, struct, assign, printk)
+ *
+ * the CONDITION version has:
+ *
+ * TRACE_EVENT_CONDITION(name, proto, args, cond, struct, assign, printk)
+ *
+ * Everything is the same as TRACE_EVENT except for the new cond. Think
+ * of the cond variable as:
+ *
+ * if (cond)
+ * trace_foo_bar_with_cond();
+ *
+ * Except that the logic for the if branch is placed after the static branch.
+ * That is, the if statement that processes the condition will not be
+ * executed unless that traecpoint is enabled. Otherwise it still remains
+ * a nop.
+ */
+TRACE_EVENT_CONDITION(foo_bar_with_cond,
+
+ TP_PROTO(const char *foo, int bar),
+
+ TP_ARGS(foo, bar),
+
+ TP_CONDITION(!(bar % 10)),
+
+ TP_STRUCT__entry(
+ __string( foo, foo )
+ __field( int, bar )
+ ),
+
+ TP_fast_assign(
+ __assign_str(foo, foo);
+ __entry->bar = bar;
+ ),
+
+ TP_printk("foo %s %d", __get_str(foo), __entry->bar)
+);
+
+void foo_bar_reg(void);
+void foo_bar_unreg(void);
+
+/*
+ * Now in the case that some function needs to be called when the
+ * tracepoint is enabled and/or when it is disabled, the
+ * TRACE_EVENT_FN() serves this purpose. This is just like TRACE_EVENT()
+ * but adds two more parameters at the end:
+ *
+ * TRACE_EVENT_FN( name, proto, args, struct, assign, printk, reg, unreg)
+ *
+ * reg and unreg are functions with the prototype of:
+ *
+ * void reg(void)
+ *
+ * The reg function gets called before the tracepoint is enabled, and
+ * the unreg function gets called after the tracepoint is disabled.
+ *
+ * Note, reg and unreg are allowed to be NULL. If you only need to
+ * call a function before enabling, or after disabling, just set one
+ * function and pass in NULL for the other parameter.
+ */
+TRACE_EVENT_FN(foo_bar_with_fn,
+
+ TP_PROTO(const char *foo, int bar),
+
+ TP_ARGS(foo, bar),
+
+ TP_STRUCT__entry(
+ __string( foo, foo )
+ __field( int, bar )
+ ),
+
+ TP_fast_assign(
+ __assign_str(foo, foo);
+ __entry->bar = bar;
+ ),
+
+ TP_printk("foo %s %d", __get_str(foo), __entry->bar),
+
+ foo_bar_reg, foo_bar_unreg
+);
+
+/*
+ * Each TRACE_EVENT macro creates several helper functions to produce
+ * the code to add the tracepoint, create the files in the trace
+ * directory, hook it to perf, assign the values and to print out
+ * the raw data from the ring buffer. To prevent too much bloat,
+ * if there are more than one tracepoint that uses the same format
+ * for the proto, args, struct, assign and printk, and only the name
+ * is different, it is highly recommended to use the DECLARE_EVENT_CLASS
+ *
+ * DECLARE_EVENT_CLASS() macro creates most of the functions for the
+ * tracepoint. Then DEFINE_EVENT() is use to hook a tracepoint to those
+ * functions. This DEFINE_EVENT() is an instance of the class and can
+ * be enabled and disabled separately from other events (either TRACE_EVENT
+ * or other DEFINE_EVENT()s).
+ *
+ * Note, TRACE_EVENT() itself is simply defined as:
+ *
+ * #define TRACE_EVENT(name, proto, args, tstruct, assign, printk) \
+ * DEFINE_EVENT_CLASS(name, proto, args, tstruct, assign, printk); \
+ * DEFINE_EVENT(name, name, proto, args)
+ *
+ * The DEFINE_EVENT() also can be declared with conditions and reg functions:
+ *
+ * DEFINE_EVENT_CONDITION(template, name, proto, args, cond);
+ * DEFINE_EVENT_FN(template, name, proto, args, reg, unreg);
+ */
+DECLARE_EVENT_CLASS(foo_template,
+
+ TP_PROTO(const char *foo, int bar),
+
+ TP_ARGS(foo, bar),
+
+ TP_STRUCT__entry(
+ __string( foo, foo )
+ __field( int, bar )
+ ),
+
+ TP_fast_assign(
+ __assign_str(foo, foo);
+ __entry->bar = bar;
),
- TP_printk("foo %s %d", __entry->foo, __entry->bar)
+ TP_printk("foo %s %d", __get_str(foo), __entry->bar)
);
+
+/*
+ * Here's a better way for the previous samples (except, the first
+ * exmaple had more fields and could not be used here).
+ */
+DEFINE_EVENT(foo_template, foo_with_template_simple,
+ TP_PROTO(const char *foo, int bar),
+ TP_ARGS(foo, bar));
+
+DEFINE_EVENT_CONDITION(foo_template, foo_with_template_cond,
+ TP_PROTO(const char *foo, int bar),
+ TP_ARGS(foo, bar),
+ TP_CONDITION(!(bar % 8)));
+
+
+DEFINE_EVENT_FN(foo_template, foo_with_template_fn,
+ TP_PROTO(const char *foo, int bar),
+ TP_ARGS(foo, bar),
+ foo_bar_reg, foo_bar_unreg);
+
+/*
+ * Anytime two events share basically the same values and have
+ * the same output, use the DECLARE_EVENT_CLASS() and DEFINE_EVENT()
+ * when ever possible.
+ */
+
+/*
+ * If the event is similar to the DECLARE_EVENT_CLASS, but you need
+ * to have a different output, then use DEFINE_EVENT_PRINT() which
+ * lets you override the TP_printk() of the class.
+ */
+
+DEFINE_EVENT_PRINT(foo_template, foo_with_template_print,
+ TP_PROTO(const char *foo, int bar),
+ TP_ARGS(foo, bar),
+ TP_printk("bar %s %d", __get_str(foo), __entry->bar));
+
#endif
/***** NOTICE! The #if protection ends here. *****/
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 649ce6844033..01df30af4d4a 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -234,8 +234,9 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH
"$(if $(part-of-module),1,0)" "$(@)";
recordmcount_source := $(srctree)/scripts/recordmcount.pl
endif
-cmd_record_mcount = \
- if [ "$(findstring -pg,$(_c_flags))" = "-pg" ]; then \
+cmd_record_mcount = \
+ if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \
+ "$(CC_FLAGS_FTRACE)" ]; then \
$(sub_cmd_record_mcount) \
fi;
endif
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
new file mode 100644
index 000000000000..631619b2b118
--- /dev/null
+++ b/scripts/Makefile.kasan
@@ -0,0 +1,25 @@
+ifdef CONFIG_KASAN
+ifdef CONFIG_KASAN_INLINE
+ call_threshold := 10000
+else
+ call_threshold := 0
+endif
+
+CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
+
+CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
+ -fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \
+ --param asan-stack=1 --param asan-globals=1 \
+ --param asan-instrumentation-with-call-threshold=$(call_threshold))
+
+ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),)
+ $(warning Cannot use CONFIG_KASAN: \
+ -fsanitize=kernel-address is not supported by compiler)
+else
+ ifeq ($(CFLAGS_KASAN),)
+ $(warning CONFIG_KASAN: compiler does not support all options.\
+ Trying minimal configuration)
+ CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
+ endif
+endif
+endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 511755200634..044eb4f89a91 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -119,6 +119,16 @@ _c_flags += $(if $(patsubst n%,, \
$(CFLAGS_GCOV))
endif
+#
+# Enable address sanitizer flags for kernel except some files or directories
+# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
+#
+ifeq ($(CONFIG_KASAN),y)
+_c_flags += $(if $(patsubst n%,, \
+ $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
+ $(CFLAGS_KASAN))
+endif
+
# If building the kernel in a separate objtree expand all occurrences
# of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/').
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index 91c4117637ae..7750e9c31483 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -311,6 +311,9 @@ struct token {
static struct token *token_list;
static unsigned nr_tokens;
+static _Bool verbose;
+
+#define debug(fmt, ...) do { if (verbose) printf(fmt, ## __VA_ARGS__); } while (0)
static int directive_compare(const void *_key, const void *_pdir)
{
@@ -322,21 +325,21 @@ static int directive_compare(const void *_key, const void *_pdir)
dlen = strlen(dir);
clen = (dlen < token->size) ? dlen : token->size;
- //printf("cmp(%*.*s,%s) = ",
+ //debug("cmp(%*.*s,%s) = ",
// (int)token->size, (int)token->size, token->value,
// dir);
val = memcmp(token->value, dir, clen);
if (val != 0) {
- //printf("%d [cmp]\n", val);
+ //debug("%d [cmp]\n", val);
return val;
}
if (dlen == token->size) {
- //printf("0\n");
+ //debug("0\n");
return 0;
}
- //printf("%d\n", (int)dlen - (int)token->size);
+ //debug("%d\n", (int)dlen - (int)token->size);
return dlen - token->size; /* shorter -> negative */
}
@@ -515,13 +518,13 @@ static void tokenise(char *buffer, char *end)
}
nr_tokens = tix;
- printf("Extracted %u tokens\n", nr_tokens);
+ debug("Extracted %u tokens\n", nr_tokens);
#if 0
{
int n;
for (n = 0; n < nr_tokens; n++)
- printf("Token %3u: '%*.*s'\n",
+ debug("Token %3u: '%*.*s'\n",
n,
(int)token_list[n].size, (int)token_list[n].size,
token_list[n].value);
@@ -542,6 +545,7 @@ int main(int argc, char **argv)
ssize_t readlen;
FILE *out, *hdr;
char *buffer, *p;
+ char *kbuild_verbose;
int fd;
if (argc != 4) {
@@ -550,6 +554,10 @@ int main(int argc, char **argv)
exit(2);
}
+ kbuild_verbose = getenv("KBUILD_VERBOSE");
+ if (kbuild_verbose)
+ verbose = atoi(kbuild_verbose);
+
filename = argv[1];
outputname = argv[2];
headername = argv[3];
@@ -748,11 +756,11 @@ static void build_type_list(void)
qsort(type_index, nr, sizeof(type_index[0]), type_index_compare);
- printf("Extracted %u types\n", nr_types);
+ debug("Extracted %u types\n", nr_types);
#if 0
for (n = 0; n < nr_types; n++) {
struct type *type = type_index[n];
- printf("- %*.*s\n",
+ debug("- %*.*s\n",
(int)type->name->size,
(int)type->name->size,
type->name->value);
@@ -793,7 +801,7 @@ static void parse(void)
} while (type++, !(type->flags & TYPE_STOP_MARKER));
- printf("Extracted %u actions\n", nr_actions);
+ debug("Extracted %u actions\n", nr_actions);
}
static struct element *element_list;
@@ -1284,7 +1292,7 @@ static void render(FILE *out, FILE *hdr)
}
/* We do two passes - the first one calculates all the offsets */
- printf("Pass 1\n");
+ debug("Pass 1\n");
nr_entries = 0;
root = &type_list[0];
render_element(NULL, root->element, NULL);
@@ -1295,7 +1303,7 @@ static void render(FILE *out, FILE *hdr)
e->flags &= ~ELEMENT_RENDERED;
/* And then we actually render */
- printf("Pass 2\n");
+ debug("Pass 2\n");
fprintf(out, "\n");
fprintf(out, "static const unsigned char %s_machine[] = {\n",
grammar_name);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index f0bb6d60c07b..d12435992dea 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -278,6 +278,7 @@ our $Attribute = qr{
__noreturn|
__used|
__cold|
+ __pure|
__noclone|
__deprecated|
__read_mostly|
@@ -298,6 +299,7 @@ our $Binary = qr{(?i)0b[01]+$Int_type?};
our $Hex = qr{(?i)0x[0-9a-f]+$Int_type?};
our $Int = qr{[0-9]+$Int_type?};
our $Octal = qr{0[0-7]+$Int_type?};
+our $String = qr{"[X\t]*"};
our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?};
our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?};
our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?};
@@ -337,6 +339,11 @@ our $UTF8 = qr{
| $NON_ASCII_UTF8
}x;
+our $typeOtherOSTypedefs = qr{(?x:
+ u_(?:char|short|int|long) | # bsd
+ u(?:nchar|short|int|long) # sysv
+)};
+
our $typeTypedefs = qr{(?x:
(?:__)?(?:u|s|be|le)(?:8|16|32|64)|
atomic_t
@@ -473,6 +480,7 @@ sub build_types {
(?:$Modifier\s+|const\s+)*
(?:
(?:typeof|__typeof__)\s*\([^\)]*\)|
+ (?:$typeOtherOSTypedefs\b)|
(?:$typeTypedefs\b)|
(?:${all}\b)
)
@@ -490,6 +498,7 @@ sub build_types {
(?:
(?:typeof|__typeof__)\s*\([^\)]*\)|
(?:$typeTypedefs\b)|
+ (?:$typeOtherOSTypedefs\b)|
(?:${allWithAttr}\b)
)
(?:\s+$Modifier|\s+const)*
@@ -517,7 +526,7 @@ our $Typecast = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*};
our $balanced_parens = qr/(\((?:[^\(\)]++|(?-1))*\))/;
our $LvalOrFunc = qr{((?:[\&\*]\s*)?$Lval)\s*($balanced_parens{0,1})\s*};
-our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant)};
+our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant|$String)};
our $declaration_macros = qr{(?x:
(?:$Storage\s+)?(?:[A-Z_][A-Z0-9]*_){0,2}(?:DEFINE|DECLARE)(?:_[A-Z0-9]+){1,2}\s*\(|
@@ -632,6 +641,8 @@ sub git_commit_info {
$output =~ s/^\s*//gm;
my @lines = split("\n", $output);
+ return ($id, $desc) if ($#lines < 0);
+
if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous\./) {
# Maybe one day convert this block of bash into something that returns
# all matching commit ids, but it's very slow...
@@ -2159,6 +2170,13 @@ sub process {
}
}
+# Check email subject for common tools that don't need to be mentioned
+ if ($in_header_lines &&
+ $line =~ /^Subject:.*\b(?:checkpatch|sparse|smatch)\b[^:]/i) {
+ WARN("EMAIL_SUBJECT",
+ "A patch subject line should describe the change not the tool that found it\n" . $herecurr);
+ }
+
# Check for old stable address
if ($line =~ /^\s*cc:\s*.*<?\bstable\@kernel\.org\b>?.*$/i) {
ERROR("STABLE_ADDRESS",
@@ -2171,21 +2189,49 @@ sub process {
"Remove Gerrit Change-Id's before submitting upstream.\n" . $herecurr);
}
-# Check for improperly formed commit descriptions
- if ($in_commit_log &&
- $line =~ /\bcommit\s+[0-9a-f]{5,}/i &&
- !($line =~ /\b[Cc]ommit [0-9a-f]{12,40} \("/ ||
- ($line =~ /\b[Cc]ommit [0-9a-f]{12,40}\s*$/ &&
- defined $rawlines[$linenr] &&
- $rawlines[$linenr] =~ /^\s*\("/))) {
- $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i;
+# Check for git id commit length and improperly formed commit descriptions
+ if ($in_commit_log && $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i) {
my $init_char = $1;
my $orig_commit = lc($2);
- my $id = '01234567890ab';
- my $desc = 'commit description';
- ($id, $desc) = git_commit_info($orig_commit, $id, $desc);
- ERROR("GIT_COMMIT_ID",
- "Please use 12 or more chars for the git commit ID like: '${init_char}ommit $id (\"$desc\")'\n" . $herecurr);
+ my $short = 1;
+ my $long = 0;
+ my $case = 1;
+ my $space = 1;
+ my $hasdesc = 0;
+ my $hasparens = 0;
+ my $id = '0123456789ab';
+ my $orig_desc = "commit description";
+ my $description = "";
+
+ $short = 0 if ($line =~ /\bcommit\s+[0-9a-f]{12,40}/i);
+ $long = 1 if ($line =~ /\bcommit\s+[0-9a-f]{41,}/i);
+ $space = 0 if ($line =~ /\bcommit [0-9a-f]/i);
+ $case = 0 if ($line =~ /\b[Cc]ommit\s+[0-9a-f]{5,40}[^A-F]/);
+ if ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)"\)/i) {
+ $orig_desc = $1;
+ $hasparens = 1;
+ } elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s*$/i &&
+ defined $rawlines[$linenr] &&
+ $rawlines[$linenr] =~ /^\s*\("([^"]+)"\)/) {
+ $orig_desc = $1;
+ $hasparens = 1;
+ } elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("[^"]+$/i &&
+ defined $rawlines[$linenr] &&
+ $rawlines[$linenr] =~ /^\s*[^"]+"\)/) {
+ $line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)$/i;
+ $orig_desc = $1;
+ $rawlines[$linenr] =~ /^\s*([^"]+)"\)/;
+ $orig_desc .= " " . $1;
+ $hasparens = 1;
+ }
+
+ ($id, $description) = git_commit_info($orig_commit,
+ $id, $orig_desc);
+
+ if ($short || $long || $space || $case || ($orig_desc ne $description) || !$hasparens) {
+ ERROR("GIT_COMMIT_ID",
+ "Please use git commit description style 'commit <12+ chars of sha1> (\"<title line>\")' - ie: '${init_char}ommit $id (\"$description\")'\n" . $herecurr);
+ }
}
# Check for added, moved or deleted files
@@ -2355,6 +2401,13 @@ sub process {
"Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
}
+# discourage the use of boolean for type definition attributes of Kconfig options
+ if ($realfile =~ /Kconfig/ &&
+ $line =~ /^\+\s*\bboolean\b/) {
+ WARN("CONFIG_TYPE_BOOLEAN",
+ "Use of boolean is deprecated, please use bool instead.\n" . $herecurr);
+ }
+
if (($realfile =~ /Makefile.*/ || $realfile =~ /Kbuild.*/) &&
($line =~ /\+(EXTRA_[A-Z]+FLAGS).*/)) {
my $flag = $1;
@@ -2499,7 +2552,7 @@ sub process {
}
}
- if ($line =~ /^\+.*(\w+\s*)?\(\s*$Type\s*\)[ \t]+(?!$Assignment|$Arithmetic|[,;\({\[\<\>])/ &&
+ if ($line =~ /^\+.*(\w+\s*)?\(\s*$Type\s*\)[ \t]+(?!$Assignment|$Arithmetic|[,;:\?\(\{\}\[\<\>]|&&|\|\||\\$)/ &&
(!defined($1) || $1 !~ /sizeof\s*/)) {
if (CHK("SPACING",
"No space is necessary after a cast\n" . $herecurr) &&
@@ -3124,6 +3177,7 @@ sub process {
$line !~ /\btypedef\s+$Type\s*\(\s*\*?$Ident\s*\)\s*\(/ &&
$line !~ /\btypedef\s+$Type\s+$Ident\s*\(/ &&
$line !~ /\b$typeTypedefs\b/ &&
+ $line !~ /\b$typeOtherOSTypedefs\b/ &&
$line !~ /\b__bitwise(?:__|)\b/) {
WARN("NEW_TYPEDEFS",
"do not add new typedefs\n" . $herecurr);
@@ -3200,7 +3254,7 @@ sub process {
# check for uses of printk_ratelimit
if ($line =~ /\bprintk_ratelimit\s*\(/) {
WARN("PRINTK_RATELIMITED",
-"Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
+ "Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
}
# printk should use KERN_* levels. Note that follow on printk's on the
@@ -3646,7 +3700,22 @@ sub process {
$op eq '*' or $op eq '/' or
$op eq '%')
{
- if ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {
+ if ($check) {
+ if (defined $fix_elements[$n + 2] && $ctx !~ /[EW]x[EW]/) {
+ if (CHK("SPACING",
+ "spaces preferred around that '$op' $at\n" . $hereptr)) {
+ $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+ $fix_elements[$n + 2] =~ s/^\s+//;
+ $line_fixed = 1;
+ }
+ } elsif (!defined $fix_elements[$n + 2] && $ctx !~ /Wx[OE]/) {
+ if (CHK("SPACING",
+ "space preferred before that '$op' $at\n" . $hereptr)) {
+ $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]);
+ $line_fixed = 1;
+ }
+ }
+ } elsif ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {
if (ERROR("SPACING",
"need consistent spacing around '$op' $at\n" . $hereptr)) {
$good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
@@ -4251,6 +4320,7 @@ sub process {
$ctx = $dstat;
$dstat =~ s/\\\n.//g;
+ $dstat =~ s/$;/ /g;
if ($dstat =~ /^\+\s*#\s*define\s+$Ident\s*${balanced_parens}\s*do\s*{(.*)\s*}\s*while\s*\(\s*0\s*\)\s*([;\s]*)\s*$/) {
my $stmts = $2;
@@ -4417,12 +4487,18 @@ sub process {
# check for unnecessary blank lines around braces
if (($line =~ /^.\s*}\s*$/ && $prevrawline =~ /^.\s*$/)) {
- CHK("BRACES",
- "Blank lines aren't necessary before a close brace '}'\n" . $hereprev);
+ if (CHK("BRACES",
+ "Blank lines aren't necessary before a close brace '}'\n" . $hereprev) &&
+ $fix && $prevrawline =~ /^\+/) {
+ fix_delete_line($fixlinenr - 1, $prevrawline);
+ }
}
if (($rawline =~ /^.\s*$/ && $prevline =~ /^..*{\s*$/)) {
- CHK("BRACES",
- "Blank lines aren't necessary after an open brace '{'\n" . $hereprev);
+ if (CHK("BRACES",
+ "Blank lines aren't necessary after an open brace '{'\n" . $hereprev) &&
+ $fix) {
+ fix_delete_line($fixlinenr, $rawline);
+ }
}
# no volatiles please
@@ -4545,7 +4621,7 @@ sub process {
}
# check for logging functions with KERN_<LEVEL>
- if ($line !~ /printk\s*\(/ &&
+ if ($line !~ /printk(?:_ratelimited|_once)?\s*\(/ &&
$line =~ /\b$logFunctions\s*\(.*\b(KERN_[A-Z]+)\b/) {
my $level = $1;
if (WARN("UNNECESSARY_KERN_LEVEL",
@@ -4804,7 +4880,8 @@ sub process {
# check for seq_printf uses that could be seq_puts
if ($sline =~ /\bseq_printf\s*\(.*"\s*\)\s*;\s*$/) {
my $fmt = get_quoted_string($line, $rawline);
- if ($fmt ne "" && $fmt !~ /[^\\]\%/) {
+ $fmt =~ s/%%//g;
+ if ($fmt !~ /%/) {
if (WARN("PREFER_SEQ_PUTS",
"Prefer seq_puts to seq_printf\n" . $herecurr) &&
$fix) {
@@ -5089,6 +5166,12 @@ sub process {
}
}
+# check for uses of __DATE__, __TIME__, __TIMESTAMP__
+ while ($line =~ /\b(__(?:DATE|TIME|TIMESTAMP)__)\b/g) {
+ ERROR("DATE_TIME",
+ "Use of the '$1' macro makes the build non-deterministic\n" . $herecurr);
+ }
+
# check for use of yield()
if ($line =~ /\byield\s*\(\s*\)/) {
WARN("YIELD",
@@ -5140,8 +5223,9 @@ sub process {
"please use device_initcall() or more appropriate function instead of __initcall() (see include/linux/init.h)\n" . $herecurr);
}
-# check for various ops structs, ensure they are const.
- my $struct_ops = qr{acpi_dock_ops|
+# check for various structs that are normally const (ops, kgdb, device_tree)
+ my $const_structs = qr{
+ acpi_dock_ops|
address_space_operations|
backlight_ops|
block_device_operations|
@@ -5164,6 +5248,7 @@ sub process {
mtrr_ops|
neigh_ops|
nlmsvc_binding|
+ of_device_id|
pci_raw_ops|
pipe_buf_operations|
platform_hibernation_ops|
@@ -5179,7 +5264,7 @@ sub process {
usb_mon_operations|
wd_ops}x;
if ($line !~ /\bconst\b/ &&
- $line =~ /\bstruct\s+($struct_ops)\b/) {
+ $line =~ /\bstruct\s+($const_structs)\b/) {
WARN("CONST_STRUCT",
"struct $1 should normally be const\n" .
$herecurr);
@@ -5204,6 +5289,13 @@ sub process {
"#define of '$1' is wrong - use Kconfig variables or standard guards instead\n" . $herecurr);
}
+# likely/unlikely comparisons similar to "(likely(foo) > 0)"
+ if ($^V && $^V ge 5.10.0 &&
+ $line =~ /\b((?:un)?likely)\s*\(\s*$FuncArg\s*\)\s*$Compare/) {
+ WARN("LIKELY_MISUSE",
+ "Using $1 should generally have parentheses around the comparison\n" . $herecurr);
+ }
+
# whine mightly about in_atomic
if ($line =~ /\bin_atomic\s*\(/) {
if ($realfile =~ m@^drivers/@) {
@@ -5255,6 +5347,9 @@ sub process {
length($val) ne 4)) {
ERROR("NON_OCTAL_PERMISSIONS",
"Use 4 digit octal (0777) not decimal permissions\n" . $herecurr);
+ } elsif ($val =~ /^$Octal$/ && (oct($val) & 02)) {
+ ERROR("EXPORTED_WORLD_WRITABLE",
+ "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
}
}
}
diff --git a/scripts/diffconfig b/scripts/diffconfig
index 6d672836e187..0db267d0adc9 100755
--- a/scripts/diffconfig
+++ b/scripts/diffconfig
@@ -28,7 +28,6 @@ If no config files are specified, .config and .config.old are used.
Example usage:
$ diffconfig .config config-with-some-changes
-EXT2_FS_XATTR n
--EXT2_FS_XIP n
CRAMFS n -> y
EXT2_FS y -> n
LOG_BUF_SHIFT 14 -> 16
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
index 0865b3e752be..73a2c7da0e55 100644
--- a/scripts/module-common.lds
+++ b/scripts/module-common.lds
@@ -6,14 +6,17 @@
SECTIONS {
/DISCARD/ : { *(.discard) }
- __ksymtab : { *(SORT(___ksymtab+*)) }
- __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) }
- __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) }
- __ksymtab_unused_gpl : { *(SORT(___ksymtab_unused_gpl+*)) }
- __ksymtab_gpl_future : { *(SORT(___ksymtab_gpl_future+*)) }
- __kcrctab : { *(SORT(___kcrctab+*)) }
- __kcrctab_gpl : { *(SORT(___kcrctab_gpl+*)) }
- __kcrctab_unused : { *(SORT(___kcrctab_unused+*)) }
- __kcrctab_unused_gpl : { *(SORT(___kcrctab_unused_gpl+*)) }
- __kcrctab_gpl_future : { *(SORT(___kcrctab_gpl_future+*)) }
+ __ksymtab 0 : { *(SORT(___ksymtab+*)) }
+ __ksymtab_gpl 0 : { *(SORT(___ksymtab_gpl+*)) }
+ __ksymtab_unused 0 : { *(SORT(___ksymtab_unused+*)) }
+ __ksymtab_unused_gpl 0 : { *(SORT(___ksymtab_unused_gpl+*)) }
+ __ksymtab_gpl_future 0 : { *(SORT(___ksymtab_gpl_future+*)) }
+ __kcrctab 0 : { *(SORT(___kcrctab+*)) }
+ __kcrctab_gpl 0 : { *(SORT(___kcrctab_gpl+*)) }
+ __kcrctab_unused 0 : { *(SORT(___kcrctab_unused+*)) }
+ __kcrctab_unused_gpl 0 : { *(SORT(___kcrctab_unused_gpl+*)) }
+ __kcrctab_gpl_future 0 : { *(SORT(___kcrctab_gpl_future+*)) }
+
+ . = ALIGN(8);
+ .init_array 0 : { *(SORT(.init_array.*)) *(.init_array) }
}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 56ea99a12ab7..826470d7f000 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -242,8 +242,13 @@ if ($arch eq "x86_64") {
$cc .= " -m32";
} elsif ($arch eq "s390" && $bits == 64) {
- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
- $mcount_adjust = -14;
+ if ($cc =~ /-DCC_USING_HOTPATCH/) {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
+ $mcount_adjust = 0;
+ } else {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
+ $mcount_adjust = -14;
+ }
$alignment = 8;
$type = ".quad";
$ld .= " -m elf64_s390";
@@ -255,7 +260,6 @@ if ($arch eq "x86_64") {
# force flags for this arch
$ld .= " -m shlelf_linux";
$objcopy .= " -O elf32-sh-linux";
- $cc .= " -m32";
} elsif ($arch eq "powerpc") {
$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
diff --git a/security/capability.c b/security/capability.c
index d68c57a62bcf..070dd46f62f4 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -12,6 +12,29 @@
#include <linux/security.h>
+static int cap_binder_set_context_mgr(struct task_struct *mgr)
+{
+ return 0;
+}
+
+static int cap_binder_transaction(struct task_struct *from,
+ struct task_struct *to)
+{
+ return 0;
+}
+
+static int cap_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to)
+{
+ return 0;
+}
+
+static int cap_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to, struct file *file)
+{
+ return 0;
+}
+
static int cap_syslog(int type)
{
return 0;
@@ -930,6 +953,10 @@ static void cap_audit_rule_free(void *lsmrule)
void __init security_fixup_ops(struct security_operations *ops)
{
+ set_to_cap_if_null(ops, binder_set_context_mgr);
+ set_to_cap_if_null(ops, binder_transaction);
+ set_to_cap_if_null(ops, binder_transfer_binder);
+ set_to_cap_if_null(ops, binder_transfer_file);
set_to_cap_if_null(ops, ptrace_access_check);
set_to_cap_if_null(ops, ptrace_traceme);
set_to_cap_if_null(ops, capget);
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 57515bc915c0..df303346029b 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -126,7 +126,6 @@ config IMA_TRUSTED_KEYRING
bool "Require all keys on the .ima keyring be signed"
depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
depends on INTEGRITY_ASYMMETRIC_KEYS
- select KEYS_DEBUG_PROC_KEYS
default y
help
This option requires that all keys added to the .ima
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index a4f3f8c48d6e..72483b8f1be5 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -80,21 +80,3 @@ config ENCRYPTED_KEYS
Userspace only ever sees/stores encrypted blobs.
If you are unsure as to whether this is required, answer N.
-
-config KEYS_DEBUG_PROC_KEYS
- bool "Enable the /proc/keys file by which keys may be viewed"
- depends on KEYS
- help
- This option turns on support for the /proc/keys file - through which
- can be listed all the keys on the system that are viewable by the
- reading process.
-
- The only keys included in the list are those that grant View
- permission to the reading process whether or not it possesses them.
- Note that LSM security checks are still performed, and may further
- filter out keys that the current process is not authorised to view.
-
- Only key attributes are listed here; key payloads are not included in
- the resulting table.
-
- If you are unsure as to whether this is required, answer N.
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 972eeb336b81..f0611a6368cd 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -18,7 +18,6 @@
#include <asm/errno.h>
#include "internal.h"
-#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
static int proc_keys_open(struct inode *inode, struct file *file);
static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
@@ -38,7 +37,6 @@ static const struct file_operations proc_keys_fops = {
.llseek = seq_lseek,
.release = seq_release,
};
-#endif
static int proc_key_users_open(struct inode *inode, struct file *file);
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
@@ -67,11 +65,9 @@ static int __init key_proc_init(void)
{
struct proc_dir_entry *p;
-#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
p = proc_create("keys", 0, NULL, &proc_keys_fops);
if (!p)
panic("Cannot create /proc/keys\n");
-#endif
p = proc_create("key-users", 0, NULL, &proc_key_users_fops);
if (!p)
@@ -86,8 +82,6 @@ __initcall(key_proc_init);
* Implement "/proc/keys" to provide a list of the keys on the system that
* grant View permission to the caller.
*/
-#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
-
static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
{
struct user_namespace *user_ns = seq_user_ns(p);
@@ -275,8 +269,6 @@ static int proc_keys_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
-
static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
while (n) {
diff --git a/security/security.c b/security/security.c
index 18b35c63fc0c..e81d5bbe7363 100644
--- a/security/security.c
+++ b/security/security.c
@@ -135,6 +135,29 @@ int __init register_security(struct security_operations *ops)
/* Security operations */
+int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+ return security_ops->binder_set_context_mgr(mgr);
+}
+
+int security_binder_transaction(struct task_struct *from,
+ struct task_struct *to)
+{
+ return security_ops->binder_transaction(from, to);
+}
+
+int security_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to)
+{
+ return security_ops->binder_transfer_binder(from, to);
+}
+
+int security_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to, struct file *file)
+{
+ return security_ops->binder_transfer_file(from, to, file);
+}
+
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
#ifdef CONFIG_SECURITY_YAMA_STACKED
@@ -726,16 +749,15 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
return prot | PROT_EXEC;
/*
* ditto if it's not on noexec mount, except that on !MMU we need
- * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case
+ * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
*/
if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
#ifndef CONFIG_MMU
- unsigned long caps = 0;
- struct address_space *mapping = file->f_mapping;
- if (mapping && mapping->backing_dev_info)
- caps = mapping->backing_dev_info->capabilities;
- if (!(caps & BDI_CAP_EXEC_MAP))
- return prot;
+ if (file->f_op->mmap_capabilities) {
+ unsigned caps = file->f_op->mmap_capabilities(file);
+ if (!(caps & NOMMU_MAP_EXEC))
+ return prot;
+ }
#endif
return prot | PROT_EXEC;
}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index a18f1fa6440b..afcc0aed9393 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -517,11 +517,6 @@ out:
return rc;
}
-static inline int avc_sidcmp(u32 x, u32 y)
-{
- return (x == y || x == SECSID_WILD || y == SECSID_WILD);
-}
-
/**
* avc_update_node Update an AVC entry
* @event : Updating event
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6da7532893a1..29c39e0b03ed 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -401,23 +401,14 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
- if (sbsec->behavior == SECURITY_FS_USE_XATTR ||
- sbsec->behavior == SECURITY_FS_USE_TRANS ||
- sbsec->behavior == SECURITY_FS_USE_TASK)
- return 1;
-
- /* Special handling for sysfs. Is genfs but also has setxattr handler*/
- if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
- return 1;
-
- /*
- * Special handling for rootfs. Is genfs but supports
- * setting SELinux context on in-core inodes.
- */
- if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
- return 1;
-
- return 0;
+ return sbsec->behavior == SECURITY_FS_USE_XATTR ||
+ sbsec->behavior == SECURITY_FS_USE_TRANS ||
+ sbsec->behavior == SECURITY_FS_USE_TASK ||
+ /* Special handling. Genfs but also in-core setxattr handler */
+ !strcmp(sb->s_type->name, "sysfs") ||
+ !strcmp(sb->s_type->name, "pstore") ||
+ !strcmp(sb->s_type->name, "debugfs") ||
+ !strcmp(sb->s_type->name, "rootfs");
}
static int sb_finish_set_opts(struct super_block *sb)
@@ -456,10 +447,6 @@ static int sb_finish_set_opts(struct super_block *sb)
if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
sb->s_id, sb->s_type->name);
- else
- printk(KERN_DEBUG "SELinux: initialized (dev %s, type %s), %s\n",
- sb->s_id, sb->s_type->name,
- labeling_behaviors[sbsec->behavior-1]);
sbsec->flags |= SE_SBINITIALIZED;
if (selinux_is_sblabel_mnt(sb))
@@ -1933,6 +1920,74 @@ static inline u32 open_file_to_av(struct file *file)
/* Hook functions begin here. */
+static int selinux_binder_set_context_mgr(struct task_struct *mgr)
+{
+ u32 mysid = current_sid();
+ u32 mgrsid = task_sid(mgr);
+
+ return avc_has_perm(mysid, mgrsid, SECCLASS_BINDER,
+ BINDER__SET_CONTEXT_MGR, NULL);
+}
+
+static int selinux_binder_transaction(struct task_struct *from,
+ struct task_struct *to)
+{
+ u32 mysid = current_sid();
+ u32 fromsid = task_sid(from);
+ u32 tosid = task_sid(to);
+ int rc;
+
+ if (mysid != fromsid) {
+ rc = avc_has_perm(mysid, fromsid, SECCLASS_BINDER,
+ BINDER__IMPERSONATE, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__CALL,
+ NULL);
+}
+
+static int selinux_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to)
+{
+ u32 fromsid = task_sid(from);
+ u32 tosid = task_sid(to);
+
+ return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER,
+ NULL);
+}
+
+static int selinux_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to,
+ struct file *file)
+{
+ u32 sid = task_sid(to);
+ struct file_security_struct *fsec = file->f_security;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode_security_struct *isec = inode->i_security;
+ struct common_audit_data ad;
+ int rc;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = file->f_path;
+
+ if (sid != fsec->sid) {
+ rc = avc_has_perm(sid, fsec->sid,
+ SECCLASS_FD,
+ FD__USE,
+ &ad);
+ if (rc)
+ return rc;
+ }
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ return avc_has_perm(sid, isec->sid, isec->sclass, file_to_av(file),
+ &ad);
+}
+
static int selinux_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
@@ -5810,6 +5865,11 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
static struct security_operations selinux_ops = {
.name = "selinux",
+ .binder_set_context_mgr = selinux_binder_set_context_mgr,
+ .binder_transaction = selinux_binder_transaction,
+ .binder_transfer_binder = selinux_binder_transfer_binder,
+ .binder_transfer_file = selinux_binder_transfer_file,
+
.ptrace_access_check = selinux_ptrace_access_check,
.ptrace_traceme = selinux_ptrace_traceme,
.capget = selinux_capget,
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index be491a74c1ed..eccd61b3de8a 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -151,5 +151,7 @@ struct security_class_mapping secclass_map[] = {
{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
{ "tun_socket",
{ COMMON_SOCK_PERMS, "attach_queue", NULL } },
+ { "binder", { "impersonate", "call", "set_context_mgr", "transfer",
+ NULL } },
{ NULL }
};
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index bc2a586f095c..74aa224267c1 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -289,12 +289,16 @@ static int policydb_init(struct policydb *p)
goto out;
p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp, (1 << 10));
- if (!p->filename_trans)
+ if (!p->filename_trans) {
+ rc = -ENOMEM;
goto out;
+ }
p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
- if (!p->range_tr)
+ if (!p->range_tr) {
+ rc = -ENOMEM;
goto out;
+ }
ebitmap_init(&p->filename_trans_ttypes);
ebitmap_init(&p->policycaps);
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
index b065f9789418..271adae81796 100644
--- a/security/smack/Kconfig
+++ b/security/smack/Kconfig
@@ -28,3 +28,15 @@ config SECURITY_SMACK_BRINGUP
access rule set once the behavior is well understood.
This is a superior mechanism to the oft abused
"permissive" mode of other systems.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_SMACK_NETFILTER
+ bool "Packet marking using secmarks for netfilter"
+ depends on SECURITY_SMACK
+ depends on NETWORK_SECMARK
+ depends on NETFILTER
+ default n
+ help
+ This enables security marking of network packets using
+ Smack labels.
+ If you are unsure how to answer this question, answer N.
diff --git a/security/smack/Makefile b/security/smack/Makefile
index 67a63aaec827..ee2ebd504541 100644
--- a/security/smack/Makefile
+++ b/security/smack/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_SECURITY_SMACK) := smack.o
smack-y := smack_lsm.o smack_access.o smackfs.o
+smack-$(CONFIG_SECURITY_SMACK_NETFILTER) += smack_netfilter.o
diff --git a/security/smack/smack.h b/security/smack/smack.h
index b828a379377c..67ccb7b2b89b 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -248,6 +248,7 @@ struct smack_known *smk_find_entry(const char *);
/*
* Shared data.
*/
+extern int smack_enabled;
extern int smack_cipso_direct;
extern int smack_cipso_mapped;
extern struct smack_known *smack_net_ambient;
@@ -298,6 +299,16 @@ static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
return tsp->smk_task;
}
+static inline struct smack_known *smk_of_task_struct(const struct task_struct *t)
+{
+ struct smack_known *skp;
+
+ rcu_read_lock();
+ skp = smk_of_task(__task_cred(t)->security);
+ rcu_read_unlock();
+ return skp;
+}
+
/*
* Present a pointer to the forked smack label entry in an task blob.
*/
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index f1b17a476e12..a0ccce4e46f8 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -43,8 +43,6 @@
#include <linux/binfmts.h>
#include "smack.h"
-#define task_security(task) (task_cred_xxx((task), security))
-
#define TRANS_TRUE "TRUE"
#define TRANS_TRUE_SIZE 4
@@ -52,8 +50,11 @@
#define SMK_RECEIVING 1
#define SMK_SENDING 2
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
LIST_HEAD(smk_ipv6_port_list);
+#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
static struct kmem_cache *smack_inode_cache;
+int smack_enabled;
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
static void smk_bu_mode(int mode, char *s)
@@ -120,7 +121,7 @@ static int smk_bu_current(char *note, struct smack_known *oskp,
static int smk_bu_task(struct task_struct *otp, int mode, int rc)
{
struct task_smack *tsp = current_security();
- struct task_smack *otsp = task_security(otp);
+ struct smack_known *smk_task = smk_of_task_struct(otp);
char acc[SMK_NUM_ACCESS_TYPE + 1];
if (rc <= 0)
@@ -128,7 +129,7 @@ static int smk_bu_task(struct task_struct *otp, int mode, int rc)
smk_bu_mode(mode, acc);
pr_info("Smack Bringup: (%s %s %s) %s to %s\n",
- tsp->smk_task->smk_known, otsp->smk_task->smk_known, acc,
+ tsp->smk_task->smk_known, smk_task->smk_known, acc,
current->comm, otp->comm);
return 0;
}
@@ -160,7 +161,7 @@ static int smk_bu_file(struct file *file, int mode, int rc)
{
struct task_smack *tsp = current_security();
struct smack_known *sskp = tsp->smk_task;
- struct inode *inode = file->f_inode;
+ struct inode *inode = file_inode(file);
char acc[SMK_NUM_ACCESS_TYPE + 1];
if (rc <= 0)
@@ -168,7 +169,7 @@ static int smk_bu_file(struct file *file, int mode, int rc)
smk_bu_mode(mode, acc);
pr_info("Smack Bringup: (%s %s %s) file=(%s %ld %pD) %s\n",
- sskp->smk_known, (char *)file->f_security, acc,
+ sskp->smk_known, smk_of_inode(inode)->smk_known, acc,
inode->i_sb->s_id, inode->i_ino, file,
current->comm);
return 0;
@@ -202,6 +203,7 @@ static int smk_bu_credfile(const struct cred *cred, struct file *file,
/**
* smk_fetch - Fetch the smack label from a file.
+ * @name: type of the label (attribute)
* @ip: a pointer to the inode
* @dp: a pointer to the dentry
*
@@ -254,7 +256,9 @@ struct inode_smack *new_inode_smack(struct smack_known *skp)
/**
* new_task_smack - allocate a task security blob
- * @smack: a pointer to the Smack label to use in the blob
+ * @task: a pointer to the Smack label for the running task
+ * @forked: a pointer to the Smack label for the forked task
+ * @gfp: type of the memory for the allocation
*
* Returns the new blob or NULL if there's no memory available
*/
@@ -277,8 +281,9 @@ static struct task_smack *new_task_smack(struct smack_known *task,
/**
* smk_copy_rules - copy a rule set
- * @nhead - new rules header pointer
- * @ohead - old rules header pointer
+ * @nhead: new rules header pointer
+ * @ohead: old rules header pointer
+ * @gfp: type of the memory for the allocation
*
* Returns 0 on success, -ENOMEM on error
*/
@@ -345,7 +350,8 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
saip = &ad;
}
- tsp = task_security(tracer);
+ rcu_read_lock();
+ tsp = __task_cred(tracer)->security;
tracer_known = smk_of_task(tsp);
if ((mode & PTRACE_MODE_ATTACH) &&
@@ -365,11 +371,14 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
tracee_known->smk_known,
0, rc, saip);
+ rcu_read_unlock();
return rc;
}
/* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */
rc = smk_tskacc(tsp, tracee_known, smk_ptrace_mode(mode), saip);
+
+ rcu_read_unlock();
return rc;
}
@@ -396,7 +405,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
if (rc != 0)
return rc;
- skp = smk_of_task(task_security(ctp));
+ skp = smk_of_task_struct(ctp);
rc = smk_ptrace_rule_check(current, skp, mode, __func__);
return rc;
@@ -796,7 +805,7 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
if (name)
*name = XATTR_SMACK_SUFFIX;
- if (value) {
+ if (value && len) {
rcu_read_lock();
may = smk_access_entry(skp->smk_known, dsp->smk_known,
&skp->smk_rules);
@@ -817,10 +826,9 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
*value = kstrdup(isp->smk_known, GFP_NOFS);
if (*value == NULL)
return -ENOMEM;
- }
- if (len)
*len = strlen(isp->smk_known);
+ }
return 0;
}
@@ -1344,6 +1352,9 @@ static int smack_file_permission(struct file *file, int mask)
* The security blob for a file is a pointer to the master
* label list, so no allocation is done.
*
+ * f_security is the owner security information. It
+ * isn't used on file access checks, it's for send_sigio.
+ *
* Returns 0
*/
static int smack_file_alloc_security(struct file *file)
@@ -1381,17 +1392,18 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
{
int rc = 0;
struct smk_audit_info ad;
+ struct inode *inode = file_inode(file);
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
if (_IOC_DIR(cmd) & _IOC_WRITE) {
- rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
+ rc = smk_curacc(smk_of_inode(inode), MAY_WRITE, &ad);
rc = smk_bu_file(file, MAY_WRITE, rc);
}
if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ)) {
- rc = smk_curacc(file->f_security, MAY_READ, &ad);
+ rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad);
rc = smk_bu_file(file, MAY_READ, rc);
}
@@ -1409,10 +1421,11 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
{
struct smk_audit_info ad;
int rc;
+ struct inode *inode = file_inode(file);
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
+ rc = smk_curacc(smk_of_inode(inode), MAY_LOCK, &ad);
rc = smk_bu_file(file, MAY_LOCK, rc);
return rc;
}
@@ -1434,7 +1447,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
{
struct smk_audit_info ad;
int rc = 0;
-
+ struct inode *inode = file_inode(file);
switch (cmd) {
case F_GETLK:
@@ -1443,14 +1456,14 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
case F_SETLKW:
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
+ rc = smk_curacc(smk_of_inode(inode), MAY_LOCK, &ad);
rc = smk_bu_file(file, MAY_LOCK, rc);
break;
case F_SETOWN:
case F_SETSIG:
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
+ rc = smk_curacc(smk_of_inode(inode), MAY_WRITE, &ad);
rc = smk_bu_file(file, MAY_WRITE, rc);
break;
default:
@@ -1568,14 +1581,10 @@ static int smack_mmap_file(struct file *file,
* smack_file_set_fowner - set the file security blob value
* @file: object in question
*
- * Returns 0
- * Further research may be required on this one.
*/
static void smack_file_set_fowner(struct file *file)
{
- struct smack_known *skp = smk_of_current();
-
- file->f_security = skp;
+ file->f_security = smk_of_current();
}
/**
@@ -1627,6 +1636,7 @@ static int smack_file_receive(struct file *file)
int rc;
int may = 0;
struct smk_audit_info ad;
+ struct inode *inode = file_inode(file);
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
@@ -1638,7 +1648,7 @@ static int smack_file_receive(struct file *file)
if (file->f_mode & FMODE_WRITE)
may |= MAY_WRITE;
- rc = smk_curacc(file->f_security, may, &ad);
+ rc = smk_curacc(smk_of_inode(inode), may, &ad);
rc = smk_bu_file(file, may, rc);
return rc;
}
@@ -1658,21 +1668,17 @@ static int smack_file_receive(struct file *file)
static int smack_file_open(struct file *file, const struct cred *cred)
{
struct task_smack *tsp = cred->security;
- struct inode_smack *isp = file_inode(file)->i_security;
+ struct inode *inode = file_inode(file);
struct smk_audit_info ad;
int rc;
- if (smack_privileged(CAP_MAC_OVERRIDE)) {
- file->f_security = isp->smk_inode;
+ if (smack_privileged(CAP_MAC_OVERRIDE))
return 0;
- }
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- rc = smk_access(tsp->smk_task, isp->smk_inode, MAY_READ, &ad);
+ rc = smk_access(tsp->smk_task, smk_of_inode(inode), MAY_READ, &ad);
rc = smk_bu_credfile(cred, file, MAY_READ, rc);
- if (rc == 0)
- file->f_security = isp->smk_inode;
return rc;
}
@@ -1826,7 +1832,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
const char *caller)
{
struct smk_audit_info ad;
- struct smack_known *skp = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task_struct(p);
int rc;
smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
@@ -1879,7 +1885,7 @@ static int smack_task_getsid(struct task_struct *p)
*/
static void smack_task_getsecid(struct task_struct *p, u32 *secid)
{
- struct smack_known *skp = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task_struct(p);
*secid = skp->smk_secid;
}
@@ -1986,7 +1992,7 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
{
struct smk_audit_info ad;
struct smack_known *skp;
- struct smack_known *tkp = smk_of_task(task_security(p));
+ struct smack_known *tkp = smk_of_task_struct(p);
int rc;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
@@ -2040,7 +2046,7 @@ static int smack_task_wait(struct task_struct *p)
static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
{
struct inode_smack *isp = inode->i_security;
- struct smack_known *skp = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task_struct(p);
isp->smk_inode = skp;
}
@@ -2212,6 +2218,7 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
return smack_netlabel(sk, sk_lbl);
}
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
/**
* smk_ipv6_port_label - Smack port access table management
* @sock: socket
@@ -2361,6 +2368,7 @@ auditout:
rc = smk_bu_note("IPv6 port check", skp, object, MAY_WRITE, rc);
return rc;
}
+#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
/**
* smack_inode_setsecurity - set smack xattrs
@@ -2421,8 +2429,10 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
} else
return -EOPNOTSUPP;
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
if (sock->sk->sk_family == PF_INET6)
smk_ipv6_port_label(sock, NULL);
+#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
return 0;
}
@@ -2450,6 +2460,7 @@ static int smack_socket_post_create(struct socket *sock, int family,
return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
}
+#ifndef CONFIG_SECURITY_SMACK_NETFILTER
/**
* smack_socket_bind - record port binding information.
* @sock: the socket
@@ -2463,11 +2474,14 @@ static int smack_socket_post_create(struct socket *sock, int family,
static int smack_socket_bind(struct socket *sock, struct sockaddr *address,
int addrlen)
{
+#if IS_ENABLED(CONFIG_IPV6)
if (sock->sk != NULL && sock->sk->sk_family == PF_INET6)
smk_ipv6_port_label(sock, address);
+#endif
return 0;
}
+#endif /* !CONFIG_SECURITY_SMACK_NETFILTER */
/**
* smack_socket_connect - connect access check
@@ -2496,8 +2510,10 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
case PF_INET6:
if (addrlen < sizeof(struct sockaddr_in6))
return -EINVAL;
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
SMK_CONNECTING);
+#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
break;
}
return rc;
@@ -3033,7 +3049,8 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* of the superblock.
*/
if (opt_dentry->d_parent == opt_dentry) {
- if (sbp->s_magic == CGROUP_SUPER_MAGIC) {
+ switch (sbp->s_magic) {
+ case CGROUP_SUPER_MAGIC:
/*
* The cgroup filesystem is never mounted,
* so there's no opportunity to set the mount
@@ -3041,8 +3058,19 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
sbsp->smk_root = &smack_known_star;
sbsp->smk_default = &smack_known_star;
+ isp->smk_inode = sbsp->smk_root;
+ break;
+ case TMPFS_MAGIC:
+ /*
+ * What about shmem/tmpfs anonymous files with dentry
+ * obtained from d_alloc_pseudo()?
+ */
+ isp->smk_inode = smk_of_current();
+ break;
+ default:
+ isp->smk_inode = sbsp->smk_root;
+ break;
}
- isp->smk_inode = sbsp->smk_root;
isp->smk_flags |= SMK_INODE_INSTANT;
goto unlockandout;
}
@@ -3200,7 +3228,7 @@ unlockandout:
*/
static int smack_getprocattr(struct task_struct *p, char *name, char **value)
{
- struct smack_known *skp = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task_struct(p);
char *cp;
int slen;
@@ -3297,7 +3325,7 @@ static int smack_unix_stream_connect(struct sock *sock,
if (!smack_privileged(CAP_MAC_OVERRIDE)) {
skp = ssp->smk_out;
- okp = osp->smk_out;
+ okp = osp->smk_in;
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
smk_ad_setfield_u_net_sk(&ad, other);
@@ -3305,7 +3333,9 @@ static int smack_unix_stream_connect(struct sock *sock,
rc = smk_access(skp, okp, MAY_WRITE, &ad);
rc = smk_bu_note("UDS connect", skp, okp, MAY_WRITE, rc);
if (rc == 0) {
- rc = smk_access(okp, skp, MAY_WRITE, NULL);
+ okp = osp->smk_out;
+ skp = ssp->smk_in;
+ rc = smk_access(okp, skp, MAY_WRITE, &ad);
rc = smk_bu_note("UDS connect", okp, skp,
MAY_WRITE, rc);
}
@@ -3366,7 +3396,9 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
+#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
int rc = 0;
/*
@@ -3380,7 +3412,9 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
rc = smack_netlabel_send(sock->sk, sip);
break;
case AF_INET6:
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING);
+#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
break;
}
return rc;
@@ -3471,6 +3505,7 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
return smack_net_ambient;
}
+#if IS_ENABLED(CONFIG_IPV6)
static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
{
u8 nexthdr;
@@ -3517,6 +3552,7 @@ static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
}
return proto;
}
+#endif /* CONFIG_IPV6 */
/**
* smack_socket_sock_rcv_skb - Smack packet delivery access check
@@ -3529,15 +3565,30 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
- struct smack_known *skp;
- struct sockaddr_in6 sadd;
+ struct smack_known *skp = NULL;
int rc = 0;
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ struct sockaddr_in6 sadd;
+ int proto;
+#endif /* CONFIG_IPV6 */
+
switch (sk->sk_family) {
case PF_INET:
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+ /*
+ * If there is a secmark use it rather than the CIPSO label.
+ * If there is no secmark fall back to CIPSO.
+ * The secmark is assumed to reflect policy better.
+ */
+ if (skb && skb->secmark != 0) {
+ skp = smack_from_secid(skb->secmark);
+ goto access_check;
+ }
+#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
/*
* Translate what netlabel gave us.
*/
@@ -3551,6 +3602,9 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
netlbl_secattr_destroy(&secattr);
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+access_check:
+#endif
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
ad.a.u.net->family = sk->sk_family;
@@ -3569,14 +3623,32 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (rc != 0)
netlbl_skbuff_err(skb, rc, 0);
break;
+#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
- rc = smk_skb_to_addr_ipv6(skb, &sadd);
- if (rc == IPPROTO_UDP || rc == IPPROTO_TCP)
- rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
+ proto = smk_skb_to_addr_ipv6(skb, &sadd);
+ if (proto != IPPROTO_UDP && proto != IPPROTO_TCP)
+ break;
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+ if (skb && skb->secmark != 0)
+ skp = smack_from_secid(skb->secmark);
else
- rc = 0;
+ skp = smack_net_ambient;
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sk->sk_family;
+ ad.a.u.net->netif = skb->skb_iif;
+ ipv6_skb_to_auditdata(skb, &ad.a, NULL);
+#endif /* CONFIG_AUDIT */
+ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv6 delivery", skp, ssp->smk_in,
+ MAY_WRITE, rc);
+#else /* CONFIG_SECURITY_SMACK_NETFILTER */
+ rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
+#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
break;
+#endif /* CONFIG_IPV6 */
}
+
return rc;
}
@@ -3638,16 +3710,25 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
if (skb != NULL) {
if (skb->protocol == htons(ETH_P_IP))
family = PF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6))
family = PF_INET6;
+#endif /* CONFIG_IPV6 */
}
if (family == PF_UNSPEC && sock != NULL)
family = sock->sk->sk_family;
- if (family == PF_UNIX) {
+ switch (family) {
+ case PF_UNIX:
ssp = sock->sk->sk_security;
s = ssp->smk_out->smk_secid;
- } else if (family == PF_INET || family == PF_INET6) {
+ break;
+ case PF_INET:
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+ s = skb->secmark;
+ if (s != 0)
+ break;
+#endif
/*
* Translate what netlabel gave us.
*/
@@ -3660,6 +3741,14 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
s = skp->smk_secid;
}
netlbl_secattr_destroy(&secattr);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case PF_INET6:
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+ s = skb->secmark;
+#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
+ break;
+#endif /* CONFIG_IPV6 */
}
*secid = s;
if (s == 0)
@@ -3715,6 +3804,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct lsm_network_audit net;
#endif
+#if IS_ENABLED(CONFIG_IPV6)
if (family == PF_INET6) {
/*
* Handle mapped IPv4 packets arriving
@@ -3726,6 +3816,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
else
return 0;
}
+#endif /* CONFIG_IPV6 */
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
@@ -3834,11 +3925,11 @@ static void smack_key_free(struct key *key)
key->security = NULL;
}
-/*
+/**
* smack_key_permission - Smack access on a key
* @key_ref: gets to the object
* @cred: the credentials to use
- * @perm: unused
+ * @perm: requested key permissions
*
* Return 0 if the task has read and write to the object,
* an error code otherwise
@@ -4184,7 +4275,9 @@ struct security_operations smack_ops = {
.unix_may_send = smack_unix_may_send,
.socket_post_create = smack_socket_post_create,
+#ifndef CONFIG_SECURITY_SMACK_NETFILTER
.socket_bind = smack_socket_bind,
+#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
.socket_connect = smack_socket_connect,
.socket_sendmsg = smack_socket_sendmsg,
.socket_sock_rcv_skb = smack_socket_sock_rcv_skb,
@@ -4265,6 +4358,8 @@ static __init int smack_init(void)
if (!security_module_enable(&smack_ops))
return 0;
+ smack_enabled = 1;
+
smack_inode_cache = KMEM_CACHE(inode_smack, 0);
if (!smack_inode_cache)
return -ENOMEM;
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
new file mode 100644
index 000000000000..c952632afb0d
--- /dev/null
+++ b/security/smack/smack_netfilter.c
@@ -0,0 +1,96 @@
+/*
+ * Simplified MAC Kernel (smack) security module
+ *
+ * This file contains the Smack netfilter implementation
+ *
+ * Author:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * Copyright (C) 2014 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netdevice.h>
+#include "smack.h"
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct socket_smack *ssp;
+ struct smack_known *skp;
+
+ if (skb && skb->sk && skb->sk->sk_security) {
+ ssp = skb->sk->sk_security;
+ skp = ssp->smk_out;
+ skb->secmark = skp->smk_secid;
+ }
+
+ return NF_ACCEPT;
+}
+#endif /* IPV6 */
+
+static unsigned int smack_ipv4_output(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct socket_smack *ssp;
+ struct smack_known *skp;
+
+ if (skb && skb->sk && skb->sk->sk_security) {
+ ssp = skb->sk->sk_security;
+ skp = ssp->smk_out;
+ skb->secmark = skp->smk_secid;
+ }
+
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_ops smack_nf_ops[] = {
+ {
+ .hook = smack_ipv4_output,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_SELINUX_FIRST,
+ },
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ {
+ .hook = smack_ipv6_output,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP6_PRI_SELINUX_FIRST,
+ },
+#endif /* IPV6 */
+};
+
+static int __init smack_nf_ip_init(void)
+{
+ int err;
+
+ if (smack_enabled == 0)
+ return 0;
+
+ printk(KERN_DEBUG "Smack: Registering netfilter hooks\n");
+
+ err = nf_register_hooks(smack_nf_ops, ARRAY_SIZE(smack_nf_ops));
+ if (err)
+ pr_info("Smack: nf_register_hooks: error %d\n", err);
+
+ return 0;
+}
+
+__initcall(smack_nf_ip_init);
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 8eb779b9d77f..604e718d68d3 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -5,6 +5,7 @@ config SECURITY_TOMOYO
select SECURITYFS
select SECURITY_PATH
select SECURITY_NETWORK
+ select SRCU
default n
help
This selects TOMOYO Linux, pathname-based access control.
diff --git a/sound/aoa/soundbus/i2sbus/control.c b/sound/aoa/soundbus/i2sbus/control.c
index 4dc9b49c02cf..f4495decc699 100644
--- a/sound/aoa/soundbus/i2sbus/control.c
+++ b/sound/aoa/soundbus/i2sbus/control.c
@@ -9,8 +9,8 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/prom.h>
#include <asm/macio.h>
#include <asm/pmac_feature.h>
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 4e2b4fbf2496..b9737fae656a 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -74,10 +74,9 @@ static void i2sbus_release_dev(struct device *dev)
int i;
i2sdev = container_of(dev, struct i2sbus_dev, sound.ofdev.dev);
-
- if (i2sdev->intfregs) iounmap(i2sdev->intfregs);
- if (i2sdev->out.dbdma) iounmap(i2sdev->out.dbdma);
- if (i2sdev->in.dbdma) iounmap(i2sdev->in.dbdma);
+ iounmap(i2sdev->intfregs);
+ iounmap(i2sdev->out.dbdma);
+ iounmap(i2sdev->in.dbdma);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++)
release_and_free_resource(i2sdev->allocated_resource[i]);
free_dbdma_descriptor_ring(i2sdev, &i2sdev->out.dbdma_ring);
@@ -318,9 +317,9 @@ static int i2sbus_add_dev(struct macio_dev *macio,
free_irq(dev->interrupts[i], dev);
free_dbdma_descriptor_ring(dev, &dev->out.dbdma_ring);
free_dbdma_descriptor_ring(dev, &dev->in.dbdma_ring);
- if (dev->intfregs) iounmap(dev->intfregs);
- if (dev->out.dbdma) iounmap(dev->out.dbdma);
- if (dev->in.dbdma) iounmap(dev->in.dbdma);
+ iounmap(dev->intfregs);
+ iounmap(dev->out.dbdma);
+ iounmap(dev->in.dbdma);
for (i=0;i<3;i++)
release_and_free_resource(dev->allocated_resource[i]);
mutex_destroy(&dev->lock);
@@ -381,10 +380,8 @@ static int i2sbus_suspend(struct macio_dev* dev, pm_message_t state)
list_for_each_entry(i2sdev, &control->list, item) {
/* Notify Alsa */
- if (i2sdev->sound.pcm) {
- /* Suspend PCM streams */
- snd_pcm_suspend_all(i2sdev->sound.pcm);
- }
+ /* Suspend PCM streams */
+ snd_pcm_suspend_all(i2sdev->sound.pcm);
/* Notify codecs */
list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
diff --git a/sound/aoa/soundbus/i2sbus/pcm.c b/sound/aoa/soundbus/i2sbus/pcm.c
index 7b74a4ba75f8..053b09c79053 100644
--- a/sound/aoa/soundbus/i2sbus/pcm.c
+++ b/sound/aoa/soundbus/i2sbus/pcm.c
@@ -6,7 +6,7 @@
* GPL v2, can be found in COPYING.
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -968,7 +968,6 @@ i2sbus_attach_codec(struct soundbus_dev *dev, struct snd_card *card,
printk(KERN_DEBUG "i2sbus: failed to create pcm\n");
goto out_put_ci_module;
}
- dev->pcm->dev = &dev->ofdev.dev;
}
/* ALSA yet again sucks.
@@ -988,6 +987,8 @@ i2sbus_attach_codec(struct soundbus_dev *dev, struct snd_card *card,
goto out_put_ci_module;
snd_pcm_set_ops(dev->pcm, SNDRV_PCM_STREAM_PLAYBACK,
&i2sbus_playback_ops);
+ dev->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].dev.parent =
+ &dev->ofdev.dev;
i2sdev->out.created = 1;
}
@@ -1003,6 +1004,8 @@ i2sbus_attach_codec(struct soundbus_dev *dev, struct snd_card *card,
goto out_put_ci_module;
snd_pcm_set_ops(dev->pcm, SNDRV_PCM_STREAM_CAPTURE,
&i2sbus_record_ops);
+ dev->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].dev.parent =
+ &dev->ofdev.dev;
i2sdev->in.created = 1;
}
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 0e83a73efb16..4140b1b95054 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -889,8 +889,8 @@ static int aaci_probe_ac97(struct aaci *aaci)
static void aaci_free_card(struct snd_card *card)
{
struct aaci *aaci = card->private_data;
- if (aaci->base)
- iounmap(aaci->base);
+
+ iounmap(aaci->base);
}
static struct aaci *aaci_init_card(struct amba_device *dev)
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 4f6b14d704f3..cf4cedf2b420 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -22,6 +22,9 @@
#include <linux/gpio.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/initval.h>
@@ -34,10 +37,10 @@
#include <linux/platform_data/dma-dw.h>
#include <linux/dma/dw.h>
+#ifdef CONFIG_AVR32
#include <mach/cpu.h>
-
-#ifdef CONFIG_ARCH_AT91
-#include <mach/hardware.h>
+#else
+#define cpu_is_at32ap7000() 0
#endif
#include "ac97c.h"
@@ -902,6 +905,40 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
}
}
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ac97c_dt_ids[] = {
+ { .compatible = "atmel,at91sam9263-ac97c", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atmel_ac97c_dt_ids);
+
+static struct ac97c_platform_data *atmel_ac97c_probe_dt(struct device *dev)
+{
+ struct ac97c_platform_data *pdata;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+
+ if (!node) {
+ dev_err(dev, "Device does not have associated DT data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->reset_pin = of_get_named_gpio(dev->of_node, "ac97-gpios", 2);
+
+ return pdata;
+}
+#else
+static struct ac97c_platform_data *atmel_ac97c_probe_dt(struct device *dev)
+{
+ dev_err(dev, "no platform data defined\n");
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
static int atmel_ac97c_probe(struct platform_device *pdev)
{
struct snd_card *card;
@@ -922,10 +959,11 @@ static int atmel_ac97c_probe(struct platform_device *pdev)
return -ENXIO;
}
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
- dev_dbg(&pdev->dev, "no platform data\n");
- return -ENXIO;
+ pdata = atmel_ac97c_probe_dt(&pdev->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
}
irq = platform_get_irq(pdev, 0);
@@ -1204,6 +1242,7 @@ static struct platform_driver atmel_ac97c_driver = {
.driver = {
.name = "atmel_ac97c",
.pm = ATMEL_AC97C_PM_OPS,
+ .of_match_table = of_match_ptr(atmel_ac97c_dt_ids),
},
};
module_platform_driver(atmel_ac97c_driver);
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 89028fab64fd..b123c42e7dc8 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -868,12 +868,12 @@ static int snd_compress_dev_register(struct snd_device *device)
return -EBADFD;
compr = device->device_data;
- sprintf(str, "comprC%iD%i", compr->card->number, compr->device);
pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
compr->direction);
/* register compressed device */
- ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
- compr->device, &snd_compr_file_ops, compr, str);
+ ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
+ compr->card, compr->device,
+ &snd_compr_file_ops, compr, &compr->dev);
if (ret < 0) {
pr_err("snd_register_device failed\n %d", ret);
return ret;
@@ -887,8 +887,16 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
struct snd_compr *compr;
compr = device->device_data;
- snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
- compr->device);
+ snd_unregister_device(&compr->dev);
+ return 0;
+}
+
+static int snd_compress_dev_free(struct snd_device *device)
+{
+ struct snd_compr *compr;
+
+ compr = device->device_data;
+ put_device(&compr->dev);
return 0;
}
@@ -903,7 +911,7 @@ int snd_compress_new(struct snd_card *card, int device,
int dirn, struct snd_compr *compr)
{
static struct snd_device_ops ops = {
- .dev_free = NULL,
+ .dev_free = snd_compress_dev_free,
.dev_register = snd_compress_dev_register,
.dev_disconnect = snd_compress_dev_disconnect,
};
@@ -911,6 +919,10 @@ int snd_compress_new(struct snd_card *card, int device,
compr->card = card;
compr->device = device;
compr->direction = dirn;
+
+ snd_device_initialize(&compr->dev, card);
+ dev_set_name(&compr->dev, "comprC%iD%i", card->number, device);
+
return snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
}
EXPORT_SYMBOL_GPL(snd_compress_new);
@@ -948,7 +960,7 @@ int snd_compress_register(struct snd_compr *device)
{
int retval;
- if (device->name == NULL || device->dev == NULL || device->ops == NULL)
+ if (device->name == NULL || device->ops == NULL)
return -EINVAL;
pr_debug("Registering compressed device %s\n", device->name);
diff --git a/sound/core/control.c b/sound/core/control.c
index bb96a467e88d..35324a8e83c8 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -50,7 +50,7 @@ static int snd_ctl_open(struct inode *inode, struct file *file)
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
- int err;
+ int i, err;
err = nonseekable_open(inode, file);
if (err < 0)
@@ -79,8 +79,8 @@ static int snd_ctl_open(struct inode *inode, struct file *file)
init_waitqueue_head(&ctl->change_sleep);
spin_lock_init(&ctl->read_lock);
ctl->card = card;
- ctl->prefer_pcm_subdevice = -1;
- ctl->prefer_rawmidi_subdevice = -1;
+ for (i = 0; i < SND_CTL_SUBDEV_ITEMS; i++)
+ ctl->preferred_subdevice[i] = -1;
ctl->pid = get_pid(task_pid(current));
file->private_data = ctl;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
@@ -373,6 +373,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
+ id = kcontrol->id;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
@@ -439,6 +440,7 @@ add:
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
+ id = kcontrol->id;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
@@ -1607,6 +1609,27 @@ static int snd_ctl_fasync(int fd, struct file * file, int on)
return fasync_helper(fd, file, on, &ctl->fasync);
}
+/* return the preferred subdevice number if already assigned;
+ * otherwise return -1
+ */
+int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type)
+{
+ struct snd_ctl_file *kctl;
+ int subdevice = -1;
+
+ read_lock(&card->ctl_files_rwlock);
+ list_for_each_entry(kctl, &card->ctl_files, list) {
+ if (kctl->pid == task_pid(current)) {
+ subdevice = kctl->preferred_subdevice[type];
+ if (subdevice != -1)
+ break;
+ }
+ }
+ read_unlock(&card->ctl_files_rwlock);
+ return subdevice;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_get_preferred_subdevice);
+
/*
* ioctl32 compat
*/
@@ -1639,19 +1662,9 @@ static const struct file_operations snd_ctl_f_ops =
static int snd_ctl_dev_register(struct snd_device *device)
{
struct snd_card *card = device->device_data;
- int err, cardnum;
- char name[16];
- if (snd_BUG_ON(!card))
- return -ENXIO;
- cardnum = card->number;
- if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
- return -ENXIO;
- sprintf(name, "controlC%i", cardnum);
- if ((err = snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1,
- &snd_ctl_f_ops, card, name)) < 0)
- return err;
- return 0;
+ return snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1,
+ &snd_ctl_f_ops, card, &card->ctl_dev);
}
/*
@@ -1661,13 +1674,6 @@ static int snd_ctl_dev_disconnect(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_ctl_file *ctl;
- int err, cardnum;
-
- if (snd_BUG_ON(!card))
- return -ENXIO;
- cardnum = card->number;
- if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
- return -ENXIO;
read_lock(&card->ctl_files_rwlock);
list_for_each_entry(ctl, &card->ctl_files, list) {
@@ -1676,10 +1682,7 @@ static int snd_ctl_dev_disconnect(struct snd_device *device)
}
read_unlock(&card->ctl_files_rwlock);
- if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL,
- card, -1)) < 0)
- return err;
- return 0;
+ return snd_unregister_device(&card->ctl_dev);
}
/*
@@ -1696,6 +1699,7 @@ static int snd_ctl_dev_free(struct snd_device *device)
snd_ctl_remove(card, control);
}
up_write(&card->controls_rwsem);
+ put_device(&card->ctl_dev);
return 0;
}
@@ -1710,10 +1714,20 @@ int snd_ctl_create(struct snd_card *card)
.dev_register = snd_ctl_dev_register,
.dev_disconnect = snd_ctl_dev_disconnect,
};
+ int err;
if (snd_BUG_ON(!card))
return -ENXIO;
- return snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops);
+ if (snd_BUG_ON(card->number < 0 || card->number >= SNDRV_CARDS))
+ return -ENXIO;
+
+ snd_device_initialize(&card->ctl_dev, card);
+ dev_set_name(&card->ctl_dev, "controlC%d", card->number);
+
+ err = snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops);
+ if (err < 0)
+ put_device(&card->ctl_dev);
+ return err;
}
/*
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index 69459e5f712e..84244a5143cf 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -38,7 +38,6 @@ MODULE_LICENSE("GPL");
static LIST_HEAD(snd_hwdep_devices);
static DEFINE_MUTEX(register_mutex);
-static int snd_hwdep_free(struct snd_hwdep *hwdep);
static int snd_hwdep_dev_free(struct snd_device *device);
static int snd_hwdep_dev_register(struct snd_device *device);
static int snd_hwdep_dev_disconnect(struct snd_device *device);
@@ -345,6 +344,11 @@ static const struct file_operations snd_hwdep_f_ops =
.mmap = snd_hwdep_mmap,
};
+static void release_hwdep_device(struct device *dev)
+{
+ kfree(container_of(dev, struct snd_hwdep, dev));
+}
+
/**
* snd_hwdep_new - create a new hwdep instance
* @card: the card instance
@@ -378,48 +382,49 @@ int snd_hwdep_new(struct snd_card *card, char *id, int device,
dev_err(card->dev, "hwdep: cannot allocate\n");
return -ENOMEM;
}
+
+ init_waitqueue_head(&hwdep->open_wait);
+ mutex_init(&hwdep->open_mutex);
hwdep->card = card;
hwdep->device = device;
if (id)
strlcpy(hwdep->id, id, sizeof(hwdep->id));
+
+ snd_device_initialize(&hwdep->dev, card);
+ hwdep->dev.release = release_hwdep_device;
+ dev_set_name(&hwdep->dev, "hwC%iD%i", card->number, device);
#ifdef CONFIG_SND_OSSEMUL
hwdep->oss_type = -1;
#endif
- if ((err = snd_device_new(card, SNDRV_DEV_HWDEP, hwdep, &ops)) < 0) {
- snd_hwdep_free(hwdep);
+
+ err = snd_device_new(card, SNDRV_DEV_HWDEP, hwdep, &ops);
+ if (err < 0) {
+ put_device(&hwdep->dev);
return err;
}
- init_waitqueue_head(&hwdep->open_wait);
- mutex_init(&hwdep->open_mutex);
+
if (rhwdep)
*rhwdep = hwdep;
return 0;
}
EXPORT_SYMBOL(snd_hwdep_new);
-static int snd_hwdep_free(struct snd_hwdep *hwdep)
+static int snd_hwdep_dev_free(struct snd_device *device)
{
+ struct snd_hwdep *hwdep = device->device_data;
if (!hwdep)
return 0;
if (hwdep->private_free)
hwdep->private_free(hwdep);
- kfree(hwdep);
+ put_device(&hwdep->dev);
return 0;
}
-static int snd_hwdep_dev_free(struct snd_device *device)
-{
- struct snd_hwdep *hwdep = device->device_data;
- return snd_hwdep_free(hwdep);
-}
-
static int snd_hwdep_dev_register(struct snd_device *device)
{
struct snd_hwdep *hwdep = device->device_data;
struct snd_card *card = hwdep->card;
- struct device *dev;
int err;
- char name[32];
mutex_lock(&register_mutex);
if (snd_hwdep_search(card, hwdep->device)) {
@@ -427,53 +432,30 @@ static int snd_hwdep_dev_register(struct snd_device *device)
return -EBUSY;
}
list_add_tail(&hwdep->list, &snd_hwdep_devices);
- sprintf(name, "hwC%iD%i", hwdep->card->number, hwdep->device);
- dev = hwdep->dev;
- if (!dev)
- dev = snd_card_get_device_link(hwdep->card);
- err = snd_register_device_for_dev(SNDRV_DEVICE_TYPE_HWDEP,
- hwdep->card, hwdep->device,
- &snd_hwdep_f_ops, hwdep, name, dev);
+ err = snd_register_device(SNDRV_DEVICE_TYPE_HWDEP,
+ hwdep->card, hwdep->device,
+ &snd_hwdep_f_ops, hwdep, &hwdep->dev);
if (err < 0) {
- dev_err(dev,
- "unable to register hardware dependent device %i:%i\n",
- card->number, hwdep->device);
+ dev_err(&hwdep->dev, "unable to register\n");
list_del(&hwdep->list);
mutex_unlock(&register_mutex);
return err;
}
- if (hwdep->groups) {
- struct device *d = snd_get_device(SNDRV_DEVICE_TYPE_HWDEP,
- hwdep->card, hwdep->device);
- if (d) {
- if (hwdep->private_data)
- dev_set_drvdata(d, hwdep->private_data);
- err = sysfs_create_groups(&d->kobj, hwdep->groups);
- if (err < 0)
- dev_warn(dev,
- "hwdep %d:%d: cannot create sysfs groups\n",
- card->number, hwdep->device);
- put_device(d);
- }
- }
-
#ifdef CONFIG_SND_OSSEMUL
hwdep->ossreg = 0;
if (hwdep->oss_type >= 0) {
- if ((hwdep->oss_type == SNDRV_OSS_DEVICE_TYPE_DMFM) && (hwdep->device != 0)) {
- dev_warn(dev,
+ if (hwdep->oss_type == SNDRV_OSS_DEVICE_TYPE_DMFM &&
+ hwdep->device)
+ dev_warn(&hwdep->dev,
"only hwdep device 0 can be registered as OSS direct FM device!\n");
- } else {
- if (snd_register_oss_device(hwdep->oss_type,
- card, hwdep->device,
- &snd_hwdep_f_ops, hwdep) < 0) {
- dev_err(dev,
- "unable to register OSS compatibility device %i:%i\n",
- card->number, hwdep->device);
- } else
- hwdep->ossreg = 1;
- }
+ else if (snd_register_oss_device(hwdep->oss_type,
+ card, hwdep->device,
+ &snd_hwdep_f_ops, hwdep) < 0)
+ dev_warn(&hwdep->dev,
+ "unable to register OSS compatibility device\n");
+ else
+ hwdep->ossreg = 1;
}
#endif
mutex_unlock(&register_mutex);
@@ -497,7 +479,7 @@ static int snd_hwdep_dev_disconnect(struct snd_device *device)
if (hwdep->ossreg)
snd_unregister_oss_device(hwdep->oss_type, hwdep->card, hwdep->device);
#endif
- snd_unregister_device(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device);
+ snd_unregister_device(&hwdep->dev);
list_del_init(&hwdep->list);
mutex_unlock(&hwdep->open_mutex);
mutex_unlock(&register_mutex);
diff --git a/sound/core/init.c b/sound/core/init.c
index 074875d68c15..35419054821c 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -157,8 +157,31 @@ static int get_slot_from_bitmask(int mask, int (*check)(struct module *, int),
return mask; /* unchanged */
}
+/* the default release callback set in snd_device_initialize() below;
+ * this is just NOP for now, as almost all jobs are already done in
+ * dev_free callback of snd_device chain instead.
+ */
+static void default_release(struct device *dev)
+{
+}
+
+/**
+ * snd_device_initialize - Initialize struct device for sound devices
+ * @dev: device to initialize
+ * @card: card to assign, optional
+ */
+void snd_device_initialize(struct device *dev, struct snd_card *card)
+{
+ device_initialize(dev);
+ if (card)
+ dev->parent = &card->card_dev;
+ dev->class = sound_class;
+ dev->release = default_release;
+}
+EXPORT_SYMBOL_GPL(snd_device_initialize);
+
static int snd_card_do_free(struct snd_card *card);
-static const struct attribute_group *card_dev_attr_groups[];
+static const struct attribute_group card_dev_attr_group;
static void release_card_device(struct device *dev)
{
@@ -246,7 +269,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
card->card_dev.parent = parent;
card->card_dev.class = sound_class;
card->card_dev.release = release_card_device;
- card->card_dev.groups = card_dev_attr_groups;
+ card->card_dev.groups = card->dev_groups;
+ card->dev_groups[0] = &card_dev_attr_group;
err = kobject_set_name(&card->card_dev.kobj, "card%d", idx);
if (err < 0)
goto __error;
@@ -677,14 +701,32 @@ static struct attribute *card_dev_attrs[] = {
NULL
};
-static struct attribute_group card_dev_attr_group = {
+static const struct attribute_group card_dev_attr_group = {
.attrs = card_dev_attrs,
};
-static const struct attribute_group *card_dev_attr_groups[] = {
- &card_dev_attr_group,
- NULL
+/**
+ * snd_card_add_dev_attr - Append a new sysfs attribute group to card
+ * @card: card instance
+ * @group: attribute group to append
+ */
+int snd_card_add_dev_attr(struct snd_card *card,
+ const struct attribute_group *group)
+{
+ int i;
+
+ /* loop for (arraysize-1) here to keep NULL at the last entry */
+ for (i = 0; i < ARRAY_SIZE(card->dev_groups) - 1; i++) {
+ if (!card->dev_groups[i]) {
+ card->dev_groups[i] = group;
+ return 0;
+ }
+ }
+
+ dev_err(card->dev, "Too many groups assigned\n");
+ return -ENOSPC;
};
+EXPORT_SYMBOL_GPL(snd_card_add_dev_attr);
/**
* snd_card_register - register the soundcard
diff --git a/sound/core/memory.c b/sound/core/memory.c
index 36c0f1a2e189..4cd664efad77 100644
--- a/sound/core/memory.c
+++ b/sound/core/memory.c
@@ -21,8 +21,8 @@
*/
#include <linux/export.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <sound/core.h>
/**
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index ada69d7a8d70..80423a4ccab6 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -719,7 +719,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
oss_buffer_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
- oss_buffer_size = 1 << ld2(oss_buffer_size);
+ oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
if (atomic_read(&substream->mmap_count)) {
if (oss_buffer_size > runtime->oss.mmap_bytes)
oss_buffer_size = runtime->oss.mmap_bytes;
@@ -755,14 +755,14 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
min_period_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
min_period_size *= oss_frame_size;
- min_period_size = 1 << (ld2(min_period_size - 1) + 1);
+ min_period_size = roundup_pow_of_two(min_period_size);
if (oss_period_size < min_period_size)
oss_period_size = min_period_size;
max_period_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
max_period_size *= oss_frame_size;
- max_period_size = 1 << ld2(max_period_size);
+ max_period_size = rounddown_pow_of_two(max_period_size);
if (oss_period_size > max_period_size)
oss_period_size = max_period_size;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index cfc56c806964..0345e53a340c 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -161,7 +161,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
if (get_user(val, (int __user *)arg))
return -EFAULT;
- control->prefer_pcm_subdevice = val;
+ control->preferred_subdevice[SND_CTL_SUBDEV_PCM] = val;
return 0;
}
}
@@ -673,6 +673,8 @@ static inline int snd_pcm_substream_proc_init(struct snd_pcm_substream *substrea
static inline int snd_pcm_substream_proc_done(struct snd_pcm_substream *substream) { return 0; }
#endif /* CONFIG_SND_VERBOSE_PROCFS */
+static const struct attribute_group *pcm_dev_attr_groups[];
+
/**
* snd_pcm_new_stream - create a new PCM stream
* @pcm: the pcm instance
@@ -698,7 +700,15 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
pstr->stream = stream;
pstr->pcm = pcm;
pstr->substream_count = substream_count;
- if (substream_count > 0 && !pcm->internal) {
+ if (!substream_count)
+ return 0;
+
+ snd_device_initialize(&pstr->dev, pcm->card);
+ pstr->dev.groups = pcm_dev_attr_groups;
+ dev_set_name(&pstr->dev, "pcmC%iD%i%c", pcm->card->number, pcm->device,
+ stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c');
+
+ if (!pcm->internal) {
err = snd_pcm_stream_proc_init(pstr);
if (err < 0) {
pcm_err(pcm, "Error in snd_pcm_stream_proc_init\n");
@@ -868,6 +878,8 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
kfree(setup);
}
#endif
+ if (pstr->substream_count)
+ put_device(&pstr->dev);
}
static int snd_pcm_free(struct snd_pcm *pcm)
@@ -901,9 +913,8 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
struct snd_pcm_str * pstr;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
- struct snd_ctl_file *kctl;
struct snd_card *card;
- int prefer_subdevice = -1;
+ int prefer_subdevice;
size_t size;
if (snd_BUG_ON(!pcm || !rsubstream))
@@ -914,15 +925,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
return -ENODEV;
card = pcm->card;
- read_lock(&card->ctl_files_rwlock);
- list_for_each_entry(kctl, &card->ctl_files, list) {
- if (kctl->pid == task_pid(current)) {
- prefer_subdevice = kctl->prefer_pcm_subdevice;
- if (prefer_subdevice != -1)
- break;
- }
- }
- read_unlock(&card->ctl_files_rwlock);
+ prefer_subdevice = snd_ctl_get_preferred_subdevice(card, SND_CTL_SUBDEV_PCM);
switch (stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
@@ -1078,9 +1081,7 @@ static int snd_pcm_dev_register(struct snd_device *device)
int cidx, err;
struct snd_pcm_substream *substream;
struct snd_pcm_notify *notify;
- char str[16];
struct snd_pcm *pcm;
- struct device *dev;
if (snd_BUG_ON(!device || !device->device_data))
return -ENXIO;
@@ -1097,42 +1098,22 @@ static int snd_pcm_dev_register(struct snd_device *device)
continue;
switch (cidx) {
case SNDRV_PCM_STREAM_PLAYBACK:
- sprintf(str, "pcmC%iD%ip", pcm->card->number, pcm->device);
devtype = SNDRV_DEVICE_TYPE_PCM_PLAYBACK;
break;
case SNDRV_PCM_STREAM_CAPTURE:
- sprintf(str, "pcmC%iD%ic", pcm->card->number, pcm->device);
devtype = SNDRV_DEVICE_TYPE_PCM_CAPTURE;
break;
}
- /* device pointer to use, pcm->dev takes precedence if
- * it is assigned, otherwise fall back to card's device
- * if possible */
- dev = pcm->dev;
- if (!dev)
- dev = snd_card_get_device_link(pcm->card);
/* register pcm */
- err = snd_register_device_for_dev(devtype, pcm->card,
- pcm->device,
- &snd_pcm_f_ops[cidx],
- pcm, str, dev);
+ err = snd_register_device(devtype, pcm->card, pcm->device,
+ &snd_pcm_f_ops[cidx], pcm,
+ &pcm->streams[cidx].dev);
if (err < 0) {
list_del(&pcm->list);
mutex_unlock(&register_mutex);
return err;
}
- dev = snd_get_device(devtype, pcm->card, pcm->device);
- if (dev) {
- err = sysfs_create_groups(&dev->kobj,
- pcm_dev_attr_groups);
- if (err < 0)
- dev_warn(dev,
- "pcm %d:%d: cannot create sysfs groups\n",
- pcm->card->number, pcm->device);
- put_device(dev);
- }
-
for (substream = pcm->streams[cidx].substream; substream; substream = substream->next)
snd_pcm_timer_init(substream);
}
@@ -1149,7 +1130,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
struct snd_pcm *pcm = device->device_data;
struct snd_pcm_notify *notify;
struct snd_pcm_substream *substream;
- int cidx, devtype;
+ int cidx;
mutex_lock(&register_mutex);
if (list_empty(&pcm->list))
@@ -1172,16 +1153,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
notify->n_disconnect(pcm);
}
for (cidx = 0; cidx < 2; cidx++) {
- devtype = -1;
- switch (cidx) {
- case SNDRV_PCM_STREAM_PLAYBACK:
- devtype = SNDRV_DEVICE_TYPE_PCM_PLAYBACK;
- break;
- case SNDRV_PCM_STREAM_CAPTURE:
- devtype = SNDRV_DEVICE_TYPE_PCM_CAPTURE;
- break;
- }
- snd_unregister_device(devtype, pcm->card, pcm->device);
+ snd_unregister_device(&pcm->streams[cidx].dev);
if (pcm->streams[cidx].chmap_kctl) {
snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
pcm->streams[cidx].chmap_kctl = NULL;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index ec9e7866177f..ffd656012ab8 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1015,6 +1015,60 @@ int snd_interval_list(struct snd_interval *i, unsigned int count,
EXPORT_SYMBOL(snd_interval_list);
+/**
+ * snd_interval_ranges - refine the interval value from the list of ranges
+ * @i: the interval value to refine
+ * @count: the number of elements in the list of ranges
+ * @ranges: the ranges list
+ * @mask: the bit-mask to evaluate
+ *
+ * Refines the interval value from the list of ranges.
+ * When mask is non-zero, only the elements corresponding to bit 1 are
+ * evaluated.
+ *
+ * Return: Positive if the value is changed, zero if it's not changed, or a
+ * negative error code.
+ */
+int snd_interval_ranges(struct snd_interval *i, unsigned int count,
+ const struct snd_interval *ranges, unsigned int mask)
+{
+ unsigned int k;
+ struct snd_interval range_union;
+ struct snd_interval range;
+
+ if (!count) {
+ snd_interval_none(i);
+ return -EINVAL;
+ }
+ snd_interval_any(&range_union);
+ range_union.min = UINT_MAX;
+ range_union.max = 0;
+ for (k = 0; k < count; k++) {
+ if (mask && !(mask & (1 << k)))
+ continue;
+ snd_interval_copy(&range, &ranges[k]);
+ if (snd_interval_refine(&range, i) < 0)
+ continue;
+ if (snd_interval_empty(&range))
+ continue;
+
+ if (range.min < range_union.min) {
+ range_union.min = range.min;
+ range_union.openmin = 1;
+ }
+ if (range.min == range_union.min && !range.openmin)
+ range_union.openmin = 0;
+ if (range.max > range_union.max) {
+ range_union.max = range.max;
+ range_union.openmax = 1;
+ }
+ if (range.max == range_union.max && !range.openmax)
+ range_union.openmax = 0;
+ }
+ return snd_interval_refine(i, &range_union);
+}
+EXPORT_SYMBOL(snd_interval_ranges);
+
static int snd_interval_step(struct snd_interval *i, unsigned int step)
{
unsigned int n;
@@ -1221,6 +1275,37 @@ int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
+static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_pcm_hw_constraint_ranges *r = rule->private;
+ return snd_interval_ranges(hw_param_interval(params, rule->var),
+ r->count, r->ranges, r->mask);
+}
+
+
+/**
+ * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
+ * @runtime: PCM runtime instance
+ * @cond: condition bits
+ * @var: hw_params variable to apply the list of range constraints
+ * @r: ranges
+ *
+ * Apply the list of range constraints to an interval parameter.
+ *
+ * Return: Zero if successful, or a negative error code on failure.
+ */
+int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
+ unsigned int cond,
+ snd_pcm_hw_param_t var,
+ const struct snd_pcm_hw_constraint_ranges *r)
+{
+ return snd_pcm_hw_rule_add(runtime, cond, var,
+ snd_pcm_hw_rule_ranges, (void *)r,
+ var, -1);
+}
+EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
+
static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
@@ -1299,8 +1384,14 @@ static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
int width = l & 0xffff;
unsigned int msbits = l >> 16;
struct snd_interval *i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
- if (snd_interval_single(i) && snd_interval_value(i) == width)
- params->msbits = msbits;
+
+ if (!snd_interval_single(i))
+ return 0;
+
+ if ((snd_interval_value(i) == width) ||
+ (width == 0 && snd_interval_value(i) > msbits))
+ params->msbits = min_not_zero(params->msbits, msbits);
+
return 0;
}
@@ -1311,6 +1402,11 @@ static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
* @width: sample bits width
* @msbits: msbits width
*
+ * This constraint will set the number of most significant bits (msbits) if a
+ * sample format with the specified width has been select. If width is set to 0
+ * the msbits will be set for any sample format with a width larger than the
+ * specified msbits.
+ *
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 54debc07f5cb..b45f6aa32264 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -19,7 +19,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 095d9572ad2b..b03a638b420c 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -26,6 +26,7 @@
#include <linux/time.h>
#include <linux/pm_qos.h>
#include <linux/aio.h>
+#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -34,7 +35,6 @@
#include <sound/pcm_params.h>
#include <sound/timer.h>
#include <sound/minors.h>
-#include <asm/io.h>
/*
* Compatibility
@@ -420,7 +420,8 @@ int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
hw = &substream->runtime->hw;
if (!params->info) {
- params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES;
+ params->info = hw->info & ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
+ SNDRV_PCM_INFO_DRAIN_TRIGGER);
if (!hw_support_mmap(substream))
params->info &= ~(SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID);
@@ -719,8 +720,11 @@ int snd_pcm_status(struct snd_pcm_substream *substream,
runtime->status->audio_tstamp;
goto _tstamp_end;
}
+ } else {
+ /* get tstamp only in fallback mode and only if enabled */
+ if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
+ snd_pcm_gettime(runtime, &status->tstamp);
}
- snd_pcm_gettime(runtime, &status->tstamp);
_tstamp_end:
status->appl_ptr = runtime->control->appl_ptr;
status->hw_ptr = runtime->status->hw_ptr;
@@ -806,7 +810,8 @@ static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
if (runtime->trigger_master == NULL)
return;
if (runtime->trigger_master == substream) {
- snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
+ if (!runtime->trigger_tstamp_latched)
+ snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
} else {
snd_pcm_trigger_tstamp(runtime->trigger_master);
runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
@@ -975,6 +980,7 @@ static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
!snd_pcm_playback_data(substream))
return -EPIPE;
+ runtime->trigger_tstamp_latched = false;
runtime->trigger_master = substream;
return 0;
}
@@ -1566,6 +1572,13 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
snd_pcm_post_stop(substream, new_state);
}
}
+
+ if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
+ runtime->trigger_master == substream &&
+ (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
+ return substream->ops->trigger(substream,
+ SNDRV_PCM_TRIGGER_DRAIN);
+
return 0;
}
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 6fc71a4c8a51..b5a748596fc4 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -57,11 +57,11 @@ static LIST_HEAD(snd_rawmidi_devices);
static DEFINE_MUTEX(register_mutex);
#define rmidi_err(rmidi, fmt, args...) \
- dev_err((rmidi)->card->dev, fmt, ##args)
+ dev_err(&(rmidi)->dev, fmt, ##args)
#define rmidi_warn(rmidi, fmt, args...) \
- dev_warn((rmidi)->card->dev, fmt, ##args)
+ dev_warn(&(rmidi)->dev, fmt, ##args)
#define rmidi_dbg(rmidi, fmt, args...) \
- dev_dbg((rmidi)->card->dev, fmt, ##args)
+ dev_dbg(&(rmidi)->dev, fmt, ##args)
static struct snd_rawmidi *snd_rawmidi_search(struct snd_card *card, int device)
{
@@ -369,7 +369,6 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
struct snd_rawmidi *rmidi;
struct snd_rawmidi_file *rawmidi_file = NULL;
wait_queue_t wait;
- struct snd_ctl_file *kctl;
if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK))
return -EINVAL; /* invalid combination */
@@ -413,16 +412,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
init_waitqueue_entry(&wait, current);
add_wait_queue(&rmidi->open_wait, &wait);
while (1) {
- subdevice = -1;
- read_lock(&card->ctl_files_rwlock);
- list_for_each_entry(kctl, &card->ctl_files, list) {
- if (kctl->pid == task_pid(current)) {
- subdevice = kctl->prefer_rawmidi_subdevice;
- if (subdevice != -1)
- break;
- }
- }
- read_unlock(&card->ctl_files_rwlock);
+ subdevice = snd_ctl_get_preferred_subdevice(card, SND_CTL_SUBDEV_RAWMIDI);
err = rawmidi_open_priv(rmidi, subdevice, fflags, rawmidi_file);
if (err >= 0)
break;
@@ -862,7 +852,7 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
if (get_user(val, (int __user *)argp))
return -EFAULT;
- control->prefer_rawmidi_subdevice = val;
+ control->preferred_subdevice[SND_CTL_SUBDEV_RAWMIDI] = val;
return 0;
}
case SNDRV_CTL_IOCTL_RAWMIDI_INFO:
@@ -1453,6 +1443,11 @@ static int snd_rawmidi_alloc_substreams(struct snd_rawmidi *rmidi,
return 0;
}
+static void release_rawmidi_device(struct device *dev)
+{
+ kfree(container_of(dev, struct snd_rawmidi, dev));
+}
+
/**
* snd_rawmidi_new - create a rawmidi instance
* @card: the card instance
@@ -1497,6 +1492,11 @@ int snd_rawmidi_new(struct snd_card *card, char *id, int device,
if (id != NULL)
strlcpy(rmidi->id, id, sizeof(rmidi->id));
+
+ snd_device_initialize(&rmidi->dev, card);
+ rmidi->dev.release = release_rawmidi_device;
+ dev_set_name(&rmidi->dev, "midiC%iD%i", card->number, device);
+
if ((err = snd_rawmidi_alloc_substreams(rmidi,
&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT],
SNDRV_RAWMIDI_STREAM_INPUT,
@@ -1548,7 +1548,7 @@ static int snd_rawmidi_free(struct snd_rawmidi *rmidi)
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]);
if (rmidi->private_free)
rmidi->private_free(rmidi);
- kfree(rmidi);
+ put_device(&rmidi->dev);
return 0;
}
@@ -1581,19 +1581,18 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
return -EBUSY;
}
list_add_tail(&rmidi->list, &snd_rawmidi_devices);
- sprintf(name, "midiC%iD%i", rmidi->card->number, rmidi->device);
- if ((err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
- rmidi->card, rmidi->device,
- &snd_rawmidi_f_ops, rmidi, name)) < 0) {
- rmidi_err(rmidi, "unable to register rawmidi device %i:%i\n",
- rmidi->card->number, rmidi->device);
+ err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
+ rmidi->card, rmidi->device,
+ &snd_rawmidi_f_ops, rmidi, &rmidi->dev);
+ if (err < 0) {
+ rmidi_err(rmidi, "unable to register\n");
list_del(&rmidi->list);
mutex_unlock(&register_mutex);
return err;
}
if (rmidi->ops && rmidi->ops->dev_register &&
(err = rmidi->ops->dev_register(rmidi)) < 0) {
- snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
+ snd_unregister_device(&rmidi->dev);
list_del(&rmidi->list);
mutex_unlock(&register_mutex);
return err;
@@ -1681,7 +1680,7 @@ static int snd_rawmidi_dev_disconnect(struct snd_device *device)
rmidi->ossreg = 0;
}
#endif /* CONFIG_SND_OSSEMUL */
- snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
+ snd_unregister_device(&rmidi->dev);
mutex_unlock(&rmidi->open_mutex);
mutex_unlock(&register_mutex);
return 0;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 3a4569669efa..e79cc44b1394 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -237,8 +237,7 @@ snd_seq_oss_midi_check_exit_port(int client, int port)
spin_unlock_irqrestore(&register_lock, flags);
snd_use_lock_free(&mdev->use_lock);
snd_use_lock_sync(&mdev->use_lock);
- if (mdev->coder)
- snd_midi_event_free(mdev->coder);
+ snd_midi_event_free(mdev->coder);
kfree(mdev);
}
spin_lock_irqsave(&register_lock, flags);
@@ -265,8 +264,7 @@ snd_seq_oss_midi_clear_all(void)
spin_lock_irqsave(&register_lock, flags);
for (i = 0; i < max_midi_devs; i++) {
if ((mdev = midi_devs[i]) != NULL) {
- if (mdev->coder)
- snd_midi_event_free(mdev->coder);
+ snd_midi_event_free(mdev->coder);
kfree(mdev);
midi_devs[i] = NULL;
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 225c73152ee9..48287651ac77 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1133,7 +1133,7 @@ static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void __user
/* fill the info fields */
info.queues = SNDRV_SEQ_MAX_QUEUES;
info.clients = SNDRV_SEQ_MAX_CLIENTS;
- info.ports = 256; /* fixed limit */
+ info.ports = SNDRV_SEQ_MAX_PORTS;
info.channels = 256; /* fixed limit */
info.cur_clients = client_usage.cur;
info.cur_queues = snd_seq_queue_get_cur_queues();
@@ -1279,7 +1279,6 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
port->owner = callback->owner;
port->private_data = callback->private_data;
port->private_free = callback->private_free;
- port->callback_all = callback->callback_all;
port->event_input = callback->event_input;
port->c_src.open = callback->subscribe;
port->c_src.close = callback->unsubscribe;
@@ -2571,6 +2570,8 @@ static const struct file_operations snd_seq_f_ops =
.compat_ioctl = snd_seq_ioctl_compat,
};
+static struct device seq_dev;
+
/*
* register sequencer device
*/
@@ -2578,12 +2579,17 @@ int __init snd_sequencer_device_init(void)
{
int err;
+ snd_device_initialize(&seq_dev, NULL);
+ dev_set_name(&seq_dev, "seq");
+
if (mutex_lock_interruptible(&register_mutex))
return -ERESTARTSYS;
- if ((err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
- &snd_seq_f_ops, NULL, "seq")) < 0) {
+ err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
+ &snd_seq_f_ops, NULL, &seq_dev);
+ if (err < 0) {
mutex_unlock(&register_mutex);
+ put_device(&seq_dev);
return err;
}
@@ -2599,5 +2605,6 @@ int __init snd_sequencer_device_init(void)
*/
void __exit snd_sequencer_device_done(void)
{
- snd_unregister_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0);
+ snd_unregister_device(&seq_dev);
+ put_device(&seq_dev);
}
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index ec667f158f19..5d905d90d504 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
static int my_client = -1;
/*
- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
- * to subscribers.
- * Note: this callback is called only after all subscribers are removed.
- */
-static int
-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
-{
- struct snd_seq_dummy_port *p;
- int i;
- struct snd_seq_event ev;
-
- p = private_data;
- memset(&ev, 0, sizeof(ev));
- if (p->duplex)
- ev.source.port = p->connect;
- else
- ev.source.port = p->port;
- ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
- ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
- for (i = 0; i < 16; i++) {
- ev.data.control.channel = i;
- ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
- snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
- ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
- snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
- }
- return 0;
-}
-
-/*
* event input callback - just redirect events to subscribers
*/
static int
@@ -175,7 +145,6 @@ create_port(int idx, int type)
| SNDRV_SEQ_PORT_TYPE_PORT;
memset(&pcb, 0, sizeof(pcb));
pcb.owner = THIS_MODULE;
- pcb.unuse = dummy_unuse;
pcb.event_input = dummy_input;
pcb.private_free = dummy_free;
pcb.private_data = rec;
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
index a1fd77af6059..68fec776da26 100644
--- a/sound/core/seq/seq_midi.c
+++ b/sound/core/seq/seq_midi.c
@@ -268,8 +268,7 @@ static void snd_seq_midisynth_delete(struct seq_midisynth *msynth)
snd_seq_event_port_detach(msynth->seq_client, msynth->seq_port);
}
- if (msynth->parser)
- snd_midi_event_free(msynth->parser);
+ snd_midi_event_free(msynth->parser);
}
/* register new midi synth port */
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 794a341bf0e5..46ff593f618d 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -134,7 +134,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
if (snd_BUG_ON(!client))
return NULL;
- if (client->num_ports >= SNDRV_SEQ_MAX_PORTS - 1) {
+ if (client->num_ports >= SNDRV_SEQ_MAX_PORTS) {
pr_warn("ALSA: seq: too many ports for client %d\n", client->number);
return NULL;
}
@@ -411,9 +411,6 @@ int snd_seq_get_port_info(struct snd_seq_client_port * port,
* invoked.
* This feature is useful if these callbacks are associated with
* initialization or termination of devices (see seq_midi.c).
- *
- * If callback_all option is set, the callback function is invoked
- * at each connection/disconnection.
*/
static int subscribe_port(struct snd_seq_client *client,
@@ -427,7 +424,7 @@ static int subscribe_port(struct snd_seq_client *client,
if (!try_module_get(port->owner))
return -EFAULT;
grp->count++;
- if (grp->open && (port->callback_all || grp->count == 1)) {
+ if (grp->open && grp->count == 1) {
err = grp->open(port->private_data, info);
if (err < 0) {
module_put(port->owner);
@@ -452,7 +449,7 @@ static int unsubscribe_port(struct snd_seq_client *client,
if (! grp->count)
return -EINVAL;
grp->count--;
- if (grp->close && (port->callback_all || grp->count == 0))
+ if (grp->close && grp->count == 0)
err = grp->close(port->private_data, info);
if (send_ack && client->type == USER_CLIENT)
snd_seq_client_notify_subscription(port->addr.client, port->addr.port,
diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h
index 9d7117118ba4..26bd71f36c41 100644
--- a/sound/core/seq/seq_ports.h
+++ b/sound/core/seq/seq_ports.h
@@ -73,7 +73,6 @@ struct snd_seq_client_port {
int atomic, int hop);
void (*private_free)(void *private_data);
void *private_data;
- unsigned int callback_all : 1;
unsigned int closing : 1;
unsigned int timestamping: 1;
unsigned int time_real: 1;
diff --git a/sound/core/sound.c b/sound/core/sound.c
index f1333060bf1c..185cec01ee25 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -242,30 +242,30 @@ static int snd_kernel_minor(int type, struct snd_card *card, int dev)
#endif
/**
- * snd_register_device_for_dev - Register the ALSA device file for the card
+ * snd_register_device - Register the ALSA device file for the card
* @type: the device type, SNDRV_DEVICE_TYPE_XXX
* @card: the card instance
* @dev: the device index
* @f_ops: the file operations
* @private_data: user pointer for f_ops->open()
- * @name: the device file name
- * @device: the &struct device to link this new device to
+ * @device: the device to register
*
* Registers an ALSA device file for the given card.
* The operators have to be set in reg parameter.
*
* Return: Zero if successful, or a negative error code on failure.
*/
-int snd_register_device_for_dev(int type, struct snd_card *card, int dev,
- const struct file_operations *f_ops,
- void *private_data,
- const char *name, struct device *device)
+int snd_register_device(int type, struct snd_card *card, int dev,
+ const struct file_operations *f_ops,
+ void *private_data, struct device *device)
{
int minor;
+ int err = 0;
struct snd_minor *preg;
- if (snd_BUG_ON(!name))
+ if (snd_BUG_ON(!device))
return -EINVAL;
+
preg = kmalloc(sizeof *preg, GFP_KERNEL);
if (preg == NULL)
return -ENOMEM;
@@ -284,102 +284,56 @@ int snd_register_device_for_dev(int type, struct snd_card *card, int dev,
minor = -EBUSY;
#endif
if (minor < 0) {
- mutex_unlock(&sound_mutex);
- kfree(preg);
- return minor;
- }
- snd_minors[minor] = preg;
- preg->dev = device_create(sound_class, device, MKDEV(major, minor),
- private_data, "%s", name);
- if (IS_ERR(preg->dev)) {
- snd_minors[minor] = NULL;
- mutex_unlock(&sound_mutex);
- minor = PTR_ERR(preg->dev);
- kfree(preg);
- return minor;
+ err = minor;
+ goto error;
}
- mutex_unlock(&sound_mutex);
- return 0;
-}
-
-EXPORT_SYMBOL(snd_register_device_for_dev);
-
-/* find the matching minor record
- * return the index of snd_minor, or -1 if not found
- */
-static int find_snd_minor(int type, struct snd_card *card, int dev)
-{
- int cardnum, minor;
- struct snd_minor *mptr;
+ preg->dev = device;
+ device->devt = MKDEV(major, minor);
+ err = device_add(device);
+ if (err < 0)
+ goto error;
- cardnum = card ? card->number : -1;
- for (minor = 0; minor < ARRAY_SIZE(snd_minors); ++minor)
- if ((mptr = snd_minors[minor]) != NULL &&
- mptr->type == type &&
- mptr->card == cardnum &&
- mptr->device == dev)
- return minor;
- return -1;
+ snd_minors[minor] = preg;
+ error:
+ mutex_unlock(&sound_mutex);
+ if (err < 0)
+ kfree(preg);
+ return err;
}
+EXPORT_SYMBOL(snd_register_device);
/**
* snd_unregister_device - unregister the device on the given card
- * @type: the device type, SNDRV_DEVICE_TYPE_XXX
- * @card: the card instance
- * @dev: the device index
+ * @dev: the device instance
*
* Unregisters the device file already registered via
* snd_register_device().
*
* Return: Zero if successful, or a negative error code on failure.
*/
-int snd_unregister_device(int type, struct snd_card *card, int dev)
+int snd_unregister_device(struct device *dev)
{
int minor;
+ struct snd_minor *preg;
mutex_lock(&sound_mutex);
- minor = find_snd_minor(type, card, dev);
- if (minor < 0) {
- mutex_unlock(&sound_mutex);
- return -EINVAL;
+ for (minor = 0; minor < ARRAY_SIZE(snd_minors); ++minor) {
+ preg = snd_minors[minor];
+ if (preg && preg->dev == dev) {
+ snd_minors[minor] = NULL;
+ device_del(dev);
+ kfree(preg);
+ break;
+ }
}
-
- device_destroy(sound_class, MKDEV(major, minor));
-
- kfree(snd_minors[minor]);
- snd_minors[minor] = NULL;
mutex_unlock(&sound_mutex);
+ if (minor >= ARRAY_SIZE(snd_minors))
+ return -ENOENT;
return 0;
}
-
EXPORT_SYMBOL(snd_unregister_device);
-/**
- * snd_get_device - get the assigned device to the given type and device number
- * @type: the device type, SNDRV_DEVICE_TYPE_XXX
- * @card:the card instance
- * @dev: the device index
- *
- * The caller needs to release it via put_device() after using it.
- */
-struct device *snd_get_device(int type, struct snd_card *card, int dev)
-{
- int minor;
- struct device *d = NULL;
-
- mutex_lock(&sound_mutex);
- minor = find_snd_minor(type, card, dev);
- if (minor >= 0) {
- d = snd_minors[minor]->dev;
- if (d)
- get_device(d);
- }
- mutex_unlock(&sound_mutex);
- return d;
-}
-EXPORT_SYMBOL(snd_get_device);
-
#ifdef CONFIG_PROC_FS
/*
* INFO PART
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 777a45e08e53..490b489d713d 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1030,9 +1030,7 @@ static int snd_timer_register_system(void)
snd_timer_free(timer);
return -ENOMEM;
}
- init_timer(&priv->tlist);
- priv->tlist.function = snd_timer_s_function;
- priv->tlist.data = (unsigned long) timer;
+ setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
@@ -1942,6 +1940,17 @@ static const struct file_operations snd_timer_f_ops =
.fasync = snd_timer_user_fasync,
};
+/* unregister the system timer */
+static void snd_timer_free_all(void)
+{
+ struct snd_timer *timer, *n;
+
+ list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
+ snd_timer_free(timer);
+}
+
+static struct device timer_dev;
+
/*
* ENTRY functions
*/
@@ -1950,30 +1959,39 @@ static int __init alsa_timer_init(void)
{
int err;
+ snd_device_initialize(&timer_dev, NULL);
+ dev_set_name(&timer_dev, "timer");
+
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
- if ((err = snd_timer_register_system()) < 0)
+ err = snd_timer_register_system();
+ if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
- if ((err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
- &snd_timer_f_ops, NULL, "timer")) < 0)
+ put_device(&timer_dev);
+ return err;
+ }
+
+ err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
+ &snd_timer_f_ops, NULL, &timer_dev);
+ if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
+ snd_timer_free_all();
+ put_device(&timer_dev);
+ return err;
+ }
+
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
- struct list_head *p, *n;
-
- snd_unregister_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0);
- /* unregister the system timer */
- list_for_each_safe(p, n, &snd_timer_list) {
- struct snd_timer *timer = list_entry(p, struct snd_timer, device_list);
- snd_timer_free(timer);
- }
+ snd_unregister_device(&timer_dev);
+ snd_timer_free_all();
+ put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 7ea53399404d..7f9126efc1e5 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -181,8 +181,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
}
tick = dpcm->period_size_frac - dpcm->irq_pos;
tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
- dpcm->timer.expires = jiffies + tick;
- add_timer(&dpcm->timer);
+ mod_timer(&dpcm->timer, jiffies + tick);
}
/* call in cable->lock */
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 5d0dfb787cec..d11baaf0f0b4 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -245,9 +245,8 @@ struct dummy_systimer_pcm {
static void dummy_systimer_rearm(struct dummy_systimer_pcm *dpcm)
{
- dpcm->timer.expires = jiffies +
- (dpcm->frac_period_rest + dpcm->rate - 1) / dpcm->rate;
- add_timer(&dpcm->timer);
+ mod_timer(&dpcm->timer, jiffies +
+ (dpcm->frac_period_rest + dpcm->rate - 1) / dpcm->rate);
}
static void dummy_systimer_update(struct dummy_systimer_pcm *dpcm)
@@ -340,9 +339,8 @@ static int dummy_systimer_create(struct snd_pcm_substream *substream)
if (!dpcm)
return -ENOMEM;
substream->runtime->private_data = dpcm;
- init_timer(&dpcm->timer);
- dpcm->timer.data = (unsigned long) dpcm;
- dpcm->timer.function = dummy_systimer_callback;
+ setup_timer(&dpcm->timer, dummy_systimer_callback,
+ (unsigned long) dpcm);
spin_lock_init(&dpcm->lock);
dpcm->substream = substream;
return 0;
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
index bcca825a1c8d..bdcb5721393b 100644
--- a/sound/drivers/ml403-ac97cr.c
+++ b/sound/drivers/ml403-ac97cr.c
@@ -1094,8 +1094,7 @@ static int snd_ml403_ac97cr_free(struct snd_ml403_ac97cr *ml403_ac97cr)
if (ml403_ac97cr->capture_irq >= 0)
free_irq(ml403_ac97cr->capture_irq, ml403_ac97cr);
/* give back "port" */
- if (ml403_ac97cr->port != NULL)
- iounmap(ml403_ac97cr->port);
+ iounmap(ml403_ac97cr->port);
kfree(ml403_ac97cr);
PDEBUG(INIT_INFO, "free(): (done)\n");
return 0;
@@ -1238,14 +1237,11 @@ snd_ml403_ac97cr_mixer(struct snd_ml403_ac97cr *ml403_ac97cr)
}
static int
-snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device,
- struct snd_pcm **rpcm)
+snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
err = snd_pcm_new(ml403_ac97cr->card, "ML403AC97CR/1", device, 1, 1,
&pcm);
if (err < 0)
@@ -1263,8 +1259,6 @@ snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device,
snd_dma_continuous_data(GFP_KERNEL),
64 * 1024,
128 * 1024);
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -1298,7 +1292,7 @@ static int snd_ml403_ac97cr_probe(struct platform_device *pfdev)
return err;
}
PDEBUG(INIT_INFO, "probe(): mixer done\n");
- err = snd_ml403_ac97cr_pcm(ml403_ac97cr, 0, NULL);
+ err = snd_ml403_ac97cr_pcm(ml403_ac97cr, 0);
if (err < 0) {
snd_card_free(card);
return err;
diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
index e3a90d043f03..776596b5ee05 100644
--- a/sound/drivers/mpu401/mpu401_uart.c
+++ b/sound/drivers/mpu401/mpu401_uart.c
@@ -28,7 +28,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -176,8 +176,7 @@ static void snd_mpu401_uart_timer(unsigned long data)
spin_lock_irqsave(&mpu->timer_lock, flags);
/*mpu->mode |= MPU401_MODE_TIMER;*/
- mpu->timer.expires = 1 + jiffies;
- add_timer(&mpu->timer);
+ mod_timer(&mpu->timer, 1 + jiffies);
spin_unlock_irqrestore(&mpu->timer_lock, flags);
if (mpu->rmidi)
_snd_mpu401_uart_interrupt(mpu);
@@ -192,11 +191,9 @@ static void snd_mpu401_uart_add_timer (struct snd_mpu401 *mpu, int input)
spin_lock_irqsave (&mpu->timer_lock, flags);
if (mpu->timer_invoked == 0) {
- init_timer(&mpu->timer);
- mpu->timer.data = (unsigned long)mpu;
- mpu->timer.function = snd_mpu401_uart_timer;
- mpu->timer.expires = 1 + jiffies;
- add_timer(&mpu->timer);
+ setup_timer(&mpu->timer, snd_mpu401_uart_timer,
+ (unsigned long)mpu);
+ mod_timer(&mpu->timer, 1 + jiffies);
}
mpu->timer_invoked |= input ? MPU401_MODE_INPUT_TIMER :
MPU401_MODE_OUTPUT_TIMER;
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index 15769447688f..30e8a1d5bc87 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -414,8 +414,7 @@ static void snd_mtpav_output_timer(unsigned long data)
spin_lock_irqsave(&chip->spinlock, flags);
/* reprogram timer */
- chip->timer.expires = 1 + jiffies;
- add_timer(&chip->timer);
+ mod_timer(&chip->timer, 1 + jiffies);
/* process each port */
for (p = 0; p <= chip->num_ports * 2 + MTPAV_PIDX_BROADCAST; p++) {
struct mtpav_port *portp = &chip->ports[p];
@@ -428,8 +427,7 @@ static void snd_mtpav_output_timer(unsigned long data)
/* spinlock held! */
static void snd_mtpav_add_output_timer(struct mtpav *chip)
{
- chip->timer.expires = 1 + jiffies;
- add_timer(&chip->timer);
+ mod_timer(&chip->timer, 1 + jiffies);
}
/* spinlock held! */
@@ -704,15 +702,13 @@ static int snd_mtpav_probe(struct platform_device *dev)
mtp_card = card->private_data;
spin_lock_init(&mtp_card->spinlock);
- init_timer(&mtp_card->timer);
mtp_card->card = card;
mtp_card->irq = -1;
mtp_card->share_irq = 0;
mtp_card->inmidistate = 0;
mtp_card->outmidihwport = 0xffffffff;
- init_timer(&mtp_card->timer);
- mtp_card->timer.function = snd_mtpav_output_timer;
- mtp_card->timer.data = (unsigned long) mtp_card;
+ setup_timer(&mtp_card->timer, snd_mtpav_output_timer,
+ (unsigned long) mtp_card);
card->private_free = snd_mtpav_free;
diff --git a/sound/drivers/opl3/opl3_lib.c b/sound/drivers/opl3/opl3_lib.c
index f66af5884c40..369cef212ea9 100644
--- a/sound/drivers/opl3/opl3_lib.c
+++ b/sound/drivers/opl3/opl3_lib.c
@@ -24,7 +24,7 @@
*/
#include <sound/opl3.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index 6c6d09a51f42..f62780ed64ad 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -258,12 +258,10 @@ void snd_opl3_timer_func(unsigned long data)
spin_unlock_irqrestore(&opl3->voice_lock, flags);
spin_lock_irqsave(&opl3->sys_timer_lock, flags);
- if (again) {
- opl3->tlist.expires = jiffies + 1; /* invoke again */
- add_timer(&opl3->tlist);
- } else {
+ if (again)
+ mod_timer(&opl3->tlist, jiffies + 1); /* invoke again */
+ else
opl3->sys_timer_status = 0;
- }
spin_unlock_irqrestore(&opl3->sys_timer_lock, flags);
}
@@ -275,8 +273,7 @@ static void snd_opl3_start_timer(struct snd_opl3 *opl3)
unsigned long flags;
spin_lock_irqsave(&opl3->sys_timer_lock, flags);
if (! opl3->sys_timer_status) {
- opl3->tlist.expires = jiffies + 1;
- add_timer(&opl3->tlist);
+ mod_timer(&opl3->tlist, jiffies + 1);
opl3->sys_timer_status = 1;
}
spin_unlock_irqrestore(&opl3->sys_timer_lock, flags);
diff --git a/sound/drivers/opl3/opl3_seq.c b/sound/drivers/opl3/opl3_seq.c
index 68399538e435..a9f618e06a22 100644
--- a/sound/drivers/opl3/opl3_seq.c
+++ b/sound/drivers/opl3/opl3_seq.c
@@ -247,9 +247,7 @@ static int snd_opl3_seq_new_device(struct snd_seq_device *dev)
}
/* setup system timer */
- init_timer(&opl3->tlist);
- opl3->tlist.function = snd_opl3_timer_func;
- opl3->tlist.data = (unsigned long) opl3;
+ setup_timer(&opl3->tlist, snd_opl3_timer_func, (unsigned long) opl3);
spin_lock_init(&opl3->sys_timer_lock);
opl3->sys_timer_status = 0;
diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
index b953fb4aa298..3b0ee42a5343 100644
--- a/sound/drivers/opl4/opl4_lib.c
+++ b/sound/drivers/opl4/opl4_lib.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_DESCRIPTION("OPL4 driver");
diff --git a/sound/drivers/opl4/opl4_synth.c b/sound/drivers/opl4/opl4_synth.c
index 4b91adc0238c..7bc1e58c95aa 100644
--- a/sound/drivers/opl4/opl4_synth.c
+++ b/sound/drivers/opl4/opl4_synth.c
@@ -33,7 +33,7 @@
#include "opl4_local.h"
#include <linux/delay.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/asoundef.h>
/* GM2 controllers */
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 2adc7548ffca..d9647bd84d0f 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -13,7 +13,7 @@
#include <sound/pcm.h>
#include <linux/input.h>
#include <linux/delay.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
#include "pcsp_input.h"
#include "pcsp.h"
diff --git a/sound/drivers/pcsp/pcsp_input.c b/sound/drivers/pcsp/pcsp_input.c
index 0ecf8a453e01..bfc25811985f 100644
--- a/sound/drivers/pcsp/pcsp_input.c
+++ b/sound/drivers/pcsp/pcsp_input.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/input.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "pcsp.h"
#include "pcsp_input.h"
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index 29ebaa4ec0fd..3689f5f6be64 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -10,8 +10,8 @@
#include <linux/gfp.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <sound/pcm.h>
-#include <asm/io.h>
#include "pcsp.h"
static bool nforce_wa;
diff --git a/sound/drivers/serial-u16550.c b/sound/drivers/serial-u16550.c
index 13a34e3c6382..1927b89e1d1f 100644
--- a/sound/drivers/serial-u16550.c
+++ b/sound/drivers/serial-u16550.c
@@ -37,6 +37,7 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/rawmidi.h>
#include <sound/initval.h>
@@ -44,8 +45,6 @@
#include <linux/serial_reg.h>
#include <linux/jiffies.h>
-#include <asm/io.h>
-
MODULE_DESCRIPTION("MIDI serial u16550");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{ALSA, MIDI serial u16550}}");
@@ -174,9 +173,8 @@ static inline void snd_uart16550_add_timer(struct snd_uart16550 *uart)
{
if (!uart->timer_running) {
/* timer 38600bps * 10bit * 16byte */
- uart->buffer_timer.expires = jiffies + (HZ+255)/256;
+ mod_timer(&uart->buffer_timer, jiffies + (HZ + 255) / 256);
uart->timer_running = 1;
- add_timer(&uart->buffer_timer);
}
}
@@ -830,9 +828,8 @@ static int snd_uart16550_create(struct snd_card *card,
uart->prev_in = 0;
uart->rstatus = 0;
memset(uart->prev_status, 0x80, sizeof(unsigned char) * SNDRV_SERIAL_MAX_OUTS);
- init_timer(&uart->buffer_timer);
- uart->buffer_timer.function = snd_uart16550_buffer_timer;
- uart->buffer_timer.data = (unsigned long)uart;
+ setup_timer(&uart->buffer_timer, snd_uart16550_buffer_timer,
+ (unsigned long)uart);
uart->timer_running = 0;
/* Register device */
diff --git a/sound/drivers/vx/vx_core.c b/sound/drivers/vx/vx_core.c
index fc05a37fd017..289f041706cd 100644
--- a/sound/drivers/vx/vx_core.c
+++ b/sound/drivers/vx/vx_core.c
@@ -27,11 +27,11 @@
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/asoundef.h>
#include <sound/info.h>
-#include <asm/io.h>
#include <sound/vx_core.h>
#include "vx_cmd.h"
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 3badc70124ab..0d580186ef1a 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -21,7 +21,19 @@
#define CYCLES_PER_SECOND 8000
#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
-#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */
+/*
+ * Nominally 3125 bytes/second, but the MIDI port's clock might be
+ * 1% too slow, and the bus clock 100 ppm too fast.
+ */
+#define MIDI_BYTES_PER_SECOND 3093
+
+/*
+ * Several devices look only at the first eight data blocks.
+ * In any case, this is more than enough for the MIDI data rate.
+ */
+#define MAX_MIDI_RX_BLOCKS 8
+
+#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */
/* isochronous header parameters */
#define ISO_DATA_LENGTH_SHIFT 16
@@ -78,8 +90,6 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
s->callbacked = false;
s->sync_slave = NULL;
- s->rx_blocks_for_midi = UINT_MAX;
-
return 0;
}
EXPORT_SYMBOL(amdtp_stream_init);
@@ -222,6 +232,14 @@ sfc_found:
for (i = 0; i < pcm_channels; i++)
s->pcm_positions[i] = i;
s->midi_position = s->pcm_channels;
+
+ /*
+ * We do not know the actual MIDI FIFO size of most devices. Just
+ * assume two bytes, i.e., one byte can be received over the bus while
+ * the previous one is transmitted over MIDI.
+ * (The value here is adjusted for midi_ratelimit_per_packet().)
+ */
+ s->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1;
}
EXPORT_SYMBOL(amdtp_stream_set_parameters);
@@ -463,6 +481,36 @@ static void amdtp_fill_pcm_silence(struct amdtp_stream *s,
}
}
+/*
+ * To avoid sending MIDI bytes at too high a rate, assume that the receiving
+ * device has a FIFO, and track how much it is filled. This values increases
+ * by one whenever we send one byte in a packet, but the FIFO empties at
+ * a constant rate independent of our packet rate. One packet has syt_interval
+ * samples, so the number of bytes that empty out of the FIFO, per packet(!),
+ * is MIDI_BYTES_PER_SECOND * syt_interval / sample_rate. To avoid storing
+ * fractional values, the values in midi_fifo_used[] are measured in bytes
+ * multiplied by the sample rate.
+ */
+static bool midi_ratelimit_per_packet(struct amdtp_stream *s, unsigned int port)
+{
+ int used;
+
+ used = s->midi_fifo_used[port];
+ if (used == 0) /* common shortcut */
+ return true;
+
+ used -= MIDI_BYTES_PER_SECOND * s->syt_interval;
+ used = max(used, 0);
+ s->midi_fifo_used[port] = used;
+
+ return used < s->midi_fifo_limit;
+}
+
+static void midi_rate_use_one_byte(struct amdtp_stream *s, unsigned int port)
+{
+ s->midi_fifo_used[port] += amdtp_rate_table[s->sfc];
+}
+
static void amdtp_fill_midi(struct amdtp_stream *s,
__be32 *buffer, unsigned int frames)
{
@@ -470,16 +518,21 @@ static void amdtp_fill_midi(struct amdtp_stream *s,
u8 *b;
for (f = 0; f < frames; f++) {
- buffer[s->midi_position] = 0;
b = (u8 *)&buffer[s->midi_position];
port = (s->data_block_counter + f) % 8;
- if ((f >= s->rx_blocks_for_midi) ||
- (s->midi[port] == NULL) ||
- (snd_rawmidi_transmit(s->midi[port], b + 1, 1) <= 0))
- b[0] = 0x80;
- else
+ if (f < MAX_MIDI_RX_BLOCKS &&
+ midi_ratelimit_per_packet(s, port) &&
+ s->midi[port] != NULL &&
+ snd_rawmidi_transmit(s->midi[port], &b[1], 1) == 1) {
+ midi_rate_use_one_byte(s, port);
b[0] = 0x81;
+ } else {
+ b[0] = 0x80;
+ b[1] = 0;
+ }
+ b[2] = 0;
+ b[3] = 0;
buffer += s->data_block_quadlets;
}
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index e6e8926275b0..8a03a91e728b 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -148,13 +148,12 @@ struct amdtp_stream {
bool double_pcm_frames;
struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8];
+ int midi_fifo_limit;
+ int midi_fifo_used[AMDTP_MAX_CHANNELS_FOR_MIDI * 8];
/* quirk: fixed interval of dbc between previos/current packets. */
unsigned int tx_dbc_interval;
- /* quirk: the first count of data blocks in an rx packet for MIDI */
- unsigned int rx_blocks_for_midi;
-
bool callbacked;
wait_queue_head_t callback_wait;
struct amdtp_stream *sync_slave;
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 1aab0a32870c..0ebcabfdc7ce 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -484,13 +484,6 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
amdtp_stream_destroy(&bebob->rx_stream);
destroy_both_connections(bebob);
}
- /*
- * The firmware for these devices ignore MIDI messages in more than
- * first 8 data blocks of an received AMDTP packet.
- */
- if (bebob->spec == &maudio_fw410_spec ||
- bebob->spec == &maudio_special_spec)
- bebob->rx_stream.rx_blocks_for_midi = 8;
end:
return err;
}
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index b985fc5ebdc6..4f440e163667 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -179,11 +179,6 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
destroy_stream(efw, &efw->tx_stream);
goto end;
}
- /*
- * Fireworks ignores MIDI messages in more than first 8 data
- * blocks of an received AMDTP packet.
- */
- efw->rx_stream.rx_blocks_for_midi = 8;
/* set IEC61883 compliant mode (actually not fully compliant...) */
err = snd_efw_command_set_tx_mode(efw, SND_EFW_TRANSPORT_MODE_IEC61883);
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 1a3a6fa27158..88844881cbff 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg)
static void snd_ak4113_free(struct ak4113 *chip)
{
- chip->init = 1; /* don't schedule new work */
- mb();
+ atomic_inc(&chip->wq_processing); /* don't schedule new work */
cancel_delayed_work_sync(&chip->work);
kfree(chip);
}
@@ -89,6 +88,8 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
chip->write = write;
chip->private_data = private_data;
INIT_DELAYED_WORK(&chip->work, ak4113_stats);
+ atomic_set(&chip->wq_processing, 0);
+ mutex_init(&chip->reinit_mutex);
for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++)
chip->regmap[reg] = pgm[reg];
@@ -139,13 +140,13 @@ static void ak4113_init_regs(struct ak4113 *chip)
void snd_ak4113_reinit(struct ak4113 *chip)
{
- chip->init = 1;
- mb();
- flush_delayed_work(&chip->work);
+ if (atomic_inc_return(&chip->wq_processing) == 1)
+ cancel_delayed_work_sync(&chip->work);
+ mutex_lock(&chip->reinit_mutex);
ak4113_init_regs(chip);
+ mutex_unlock(&chip->reinit_mutex);
/* bring up statistics / event queing */
- chip->init = 0;
- if (chip->kctls[0])
+ if (atomic_dec_and_test(&chip->wq_processing))
schedule_delayed_work(&chip->work, HZ / 10);
}
EXPORT_SYMBOL_GPL(snd_ak4113_reinit);
@@ -632,8 +633,25 @@ static void ak4113_stats(struct work_struct *work)
{
struct ak4113 *chip = container_of(work, struct ak4113, work.work);
- if (!chip->init)
+ if (atomic_inc_return(&chip->wq_processing) == 1)
snd_ak4113_check_rate_and_errors(chip, chip->check_flags);
- schedule_delayed_work(&chip->work, HZ / 10);
+ if (atomic_dec_and_test(&chip->wq_processing))
+ schedule_delayed_work(&chip->work, HZ / 10);
+}
+
+#ifdef CONFIG_PM
+void snd_ak4113_suspend(struct ak4113 *chip)
+{
+ atomic_inc(&chip->wq_processing); /* don't schedule new work */
+ cancel_delayed_work_sync(&chip->work);
+}
+EXPORT_SYMBOL(snd_ak4113_suspend);
+
+void snd_ak4113_resume(struct ak4113 *chip)
+{
+ atomic_dec(&chip->wq_processing);
+ snd_ak4113_reinit(chip);
}
+EXPORT_SYMBOL(snd_ak4113_resume);
+#endif
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index c7f56339415d..5a4cf3fab4ae 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114)
static void snd_ak4114_free(struct ak4114 *chip)
{
- chip->init = 1; /* don't schedule new work */
- mb();
+ atomic_inc(&chip->wq_processing); /* don't schedule new work */
cancel_delayed_work_sync(&chip->work);
kfree(chip);
}
@@ -100,6 +99,8 @@ int snd_ak4114_create(struct snd_card *card,
chip->write = write;
chip->private_data = private_data;
INIT_DELAYED_WORK(&chip->work, ak4114_stats);
+ atomic_set(&chip->wq_processing, 0);
+ mutex_init(&chip->reinit_mutex);
for (reg = 0; reg < 6; reg++)
chip->regmap[reg] = pgm[reg];
@@ -122,6 +123,7 @@ int snd_ak4114_create(struct snd_card *card,
snd_ak4114_free(chip);
return err < 0 ? err : -EIO;
}
+EXPORT_SYMBOL(snd_ak4114_create);
void snd_ak4114_reg_write(struct ak4114 *chip, unsigned char reg, unsigned char mask, unsigned char val)
{
@@ -131,6 +133,7 @@ void snd_ak4114_reg_write(struct ak4114 *chip, unsigned char reg, unsigned char
reg_write(chip, reg,
(chip->txcsb[reg-AK4114_REG_TXCSB0] & ~mask) | val);
}
+EXPORT_SYMBOL(snd_ak4114_reg_write);
static void ak4114_init_regs(struct ak4114 *chip)
{
@@ -152,15 +155,16 @@ static void ak4114_init_regs(struct ak4114 *chip)
void snd_ak4114_reinit(struct ak4114 *chip)
{
- chip->init = 1;
- mb();
- flush_delayed_work(&chip->work);
+ if (atomic_inc_return(&chip->wq_processing) == 1)
+ cancel_delayed_work_sync(&chip->work);
+ mutex_lock(&chip->reinit_mutex);
ak4114_init_regs(chip);
+ mutex_unlock(&chip->reinit_mutex);
/* bring up statistics / event queing */
- chip->init = 0;
- if (chip->kctls[0])
+ if (atomic_dec_and_test(&chip->wq_processing))
schedule_delayed_work(&chip->work, HZ / 10);
}
+EXPORT_SYMBOL(snd_ak4114_reinit);
static unsigned int external_rate(unsigned char rcs1)
{
@@ -505,6 +509,7 @@ int snd_ak4114_build(struct ak4114 *ak4114,
schedule_delayed_work(&ak4114->work, HZ / 10);
return 0;
}
+EXPORT_SYMBOL(snd_ak4114_build);
/* notify kcontrols if any parameters are changed */
static void ak4114_notify(struct ak4114 *ak4114,
@@ -560,6 +565,7 @@ int snd_ak4114_external_rate(struct ak4114 *ak4114)
rcs1 = reg_read(ak4114, AK4114_REG_RCS1);
return external_rate(rcs1);
}
+EXPORT_SYMBOL(snd_ak4114_external_rate);
int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags)
{
@@ -607,20 +613,30 @@ int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags)
}
return res;
}
+EXPORT_SYMBOL(snd_ak4114_check_rate_and_errors);
static void ak4114_stats(struct work_struct *work)
{
struct ak4114 *chip = container_of(work, struct ak4114, work.work);
- if (!chip->init)
+ if (atomic_inc_return(&chip->wq_processing) == 1)
snd_ak4114_check_rate_and_errors(chip, chip->check_flags);
+ if (atomic_dec_and_test(&chip->wq_processing))
+ schedule_delayed_work(&chip->work, HZ / 10);
+}
- schedule_delayed_work(&chip->work, HZ / 10);
+#ifdef CONFIG_PM
+void snd_ak4114_suspend(struct ak4114 *chip)
+{
+ atomic_inc(&chip->wq_processing); /* don't schedule new work */
+ cancel_delayed_work_sync(&chip->work);
}
+EXPORT_SYMBOL(snd_ak4114_suspend);
-EXPORT_SYMBOL(snd_ak4114_create);
-EXPORT_SYMBOL(snd_ak4114_reg_write);
-EXPORT_SYMBOL(snd_ak4114_reinit);
-EXPORT_SYMBOL(snd_ak4114_build);
-EXPORT_SYMBOL(snd_ak4114_external_rate);
-EXPORT_SYMBOL(snd_ak4114_check_rate_and_errors);
+void snd_ak4114_resume(struct ak4114 *chip)
+{
+ atomic_dec(&chip->wq_processing);
+ snd_ak4114_reinit(chip);
+}
+EXPORT_SYMBOL(snd_ak4114_resume);
+#endif
diff --git a/sound/i2c/other/ak4117.c b/sound/i2c/other/ak4117.c
index 88452e899bd9..48848909a5a9 100644
--- a/sound/i2c/other/ak4117.c
+++ b/sound/i2c/other/ak4117.c
@@ -91,9 +91,7 @@ int snd_ak4117_create(struct snd_card *card, ak4117_read_t *read, ak4117_write_t
chip->read = read;
chip->write = write;
chip->private_data = private_data;
- init_timer(&chip->timer);
- chip->timer.data = (unsigned long)chip;
- chip->timer.function = snd_ak4117_timer;
+ setup_timer(&chip->timer, snd_ak4117_timer, (unsigned long)chip);
for (reg = 0; reg < 5; reg++)
chip->regmap[reg] = pgm[reg];
@@ -139,8 +137,7 @@ void snd_ak4117_reinit(struct ak4117 *chip)
/* release powerdown, everything is initialized now */
reg_write(chip, AK4117_REG_PWRDN, old | AK4117_RST | AK4117_PWN);
chip->init = 0;
- chip->timer.expires = 1 + jiffies;
- add_timer(&chip->timer);
+ mod_timer(&chip->timer, 1 + jiffies);
}
static unsigned int external_rate(unsigned char rcs1)
@@ -540,8 +537,7 @@ static void snd_ak4117_timer(unsigned long data)
if (chip->init)
return;
snd_ak4117_check_rate_and_errors(chip, 0);
- chip->timer.expires = 1 + jiffies;
- add_timer(&chip->timer);
+ mod_timer(&chip->timer, 1 + jiffies);
}
EXPORT_SYMBOL(snd_ak4117_create);
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index 67dbfde837ab..c65731088aa2 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -21,7 +21,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c
index f481a41e027e..769226515f0d 100644
--- a/sound/isa/ad1816a/ad1816a.c
+++ b/sound/isa/ad1816a/ad1816a.c
@@ -142,7 +142,6 @@ static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
struct snd_card *card;
struct snd_ad1816a *chip;
struct snd_opl3 *opl3;
- struct snd_timer *timer;
error = snd_card_new(&pcard->card->dev,
index[dev], id[dev], THIS_MODULE,
@@ -172,7 +171,7 @@ static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
sprintf(card->longname, "%s, SS at 0x%lx, irq %d, dma %d&%d",
card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]);
- if ((error = snd_ad1816a_pcm(chip, 0, NULL)) < 0) {
+ if ((error = snd_ad1816a_pcm(chip, 0)) < 0) {
snd_card_free(card);
return error;
}
@@ -182,7 +181,7 @@ static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
return error;
}
- error = snd_ad1816a_timer(chip, 0, &timer);
+ error = snd_ad1816a_timer(chip, 0);
if (error < 0) {
snd_card_free(card);
return error;
diff --git a/sound/isa/ad1816a/ad1816a_lib.c b/sound/isa/ad1816a/ad1816a_lib.c
index 01a07986f4a3..5c815f5fb044 100644
--- a/sound/isa/ad1816a/ad1816a_lib.c
+++ b/sound/isa/ad1816a/ad1816a_lib.c
@@ -22,11 +22,11 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/tlv.h>
#include <sound/ad1816a.h>
-#include <asm/io.h>
#include <asm/dma.h>
static inline int snd_ad1816a_busy_wait(struct snd_ad1816a *chip)
@@ -675,7 +675,7 @@ static struct snd_pcm_ops snd_ad1816a_capture_ops = {
.pointer = snd_ad1816a_capture_pointer,
};
-int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device, struct snd_pcm **rpcm)
+int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device)
{
int error;
struct snd_pcm *pcm;
@@ -697,13 +697,10 @@ int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device, struct snd_pcm **rpcm)
64*1024, chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
chip->pcm = pcm;
- if (rpcm)
- *rpcm = pcm;
return 0;
}
-int snd_ad1816a_timer(struct snd_ad1816a *chip, int device,
- struct snd_timer **rtimer)
+int snd_ad1816a_timer(struct snd_ad1816a *chip, int device)
{
struct snd_timer *timer;
struct snd_timer_id tid;
@@ -720,8 +717,6 @@ int snd_ad1816a_timer(struct snd_ad1816a *chip, int device,
timer->private_data = chip;
chip->timer = timer;
timer->hw = snd_ad1816a_timer_table;
- if (rtimer)
- *rtimer = timer;
return 0;
}
diff --git a/sound/isa/ad1848/ad1848.c b/sound/isa/ad1848/ad1848.c
index 093f22a464d7..f159da4ec890 100644
--- a/sound/isa/ad1848/ad1848.c
+++ b/sound/isa/ad1848/ad1848.c
@@ -88,7 +88,6 @@ static int snd_ad1848_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
struct snd_wss *chip;
- struct snd_pcm *pcm;
int error;
error = snd_card_new(dev, index[n], id[n], THIS_MODULE, 0, &card);
@@ -103,7 +102,7 @@ static int snd_ad1848_probe(struct device *dev, unsigned int n)
card->private_data = chip;
- error = snd_wss_pcm(chip, 0, &pcm);
+ error = snd_wss_pcm(chip, 0);
if (error < 0)
goto out;
@@ -112,10 +111,10 @@ static int snd_ad1848_probe(struct device *dev, unsigned int n)
goto out;
strcpy(card->driver, "AD1848");
- strcpy(card->shortname, pcm->name);
+ strcpy(card->shortname, chip->pcm->name);
sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
- pcm->name, chip->port, irq[n], dma1[n]);
+ chip->pcm->name, chip->port, irq[n], dma1[n]);
if (thinkpad[n])
strcat(card->longname, " [Thinkpad]");
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index 32d01525211d..bc9ea306ee02 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -233,7 +233,7 @@ static int snd_card_als100_probe(int dev,
irq[dev], dma8[dev], dma16[dev]);
}
- if ((error = snd_sb16dsp_pcm(chip, 0, &chip->pcm)) < 0) {
+ if ((error = snd_sb16dsp_pcm(chip, 0)) < 0) {
snd_card_free(card);
return error;
}
diff --git a/sound/isa/azt2320.c b/sound/isa/azt2320.c
index 0ea75fc62072..fff186fa621e 100644
--- a/sound/isa/azt2320.c
+++ b/sound/isa/azt2320.c
@@ -29,7 +29,7 @@
activation method (full-duplex audio!).
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/time.h>
@@ -215,7 +215,7 @@ static int snd_card_azt2320_probe(int dev,
sprintf(card->longname, "%s, WSS at 0x%lx, irq %i, dma %i&%i",
card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]);
- error = snd_wss_pcm(chip, 0, NULL);
+ error = snd_wss_pcm(chip, 0);
if (error < 0) {
snd_card_free(card);
return error;
@@ -225,7 +225,7 @@ static int snd_card_azt2320_probe(int dev,
snd_card_free(card);
return error;
}
- error = snd_wss_timer(chip, 0, NULL);
+ error = snd_wss_timer(chip, 0);
if (error < 0) {
snd_card_free(card);
return error;
diff --git a/sound/isa/cmi8328.c b/sound/isa/cmi8328.c
index 4778852a1201..2c89d95da674 100644
--- a/sound/isa/cmi8328.c
+++ b/sound/isa/cmi8328.c
@@ -307,7 +307,7 @@ static int snd_cmi8328_probe(struct device *pdev, unsigned int ndev)
if (err < 0)
goto error;
- err = snd_wss_pcm(cmi->wss, 0, NULL);
+ err = snd_wss_pcm(cmi->wss, 0);
if (err < 0)
goto error;
@@ -318,7 +318,7 @@ static int snd_cmi8328_probe(struct device *pdev, unsigned int ndev)
if (err < 0)
goto error;
- if (snd_wss_timer(cmi->wss, 0, NULL) < 0)
+ if (snd_wss_timer(cmi->wss, 0) < 0)
snd_printk(KERN_WARNING "error initializing WSS timer\n");
if (mpuport[ndev] == SNDRV_AUTO_PORT) {
diff --git a/sound/isa/cs423x/cs4231.c b/sound/isa/cs423x/cs4231.c
index 7dba07a4343a..282cd75d2235 100644
--- a/sound/isa/cs423x/cs4231.c
+++ b/sound/isa/cs423x/cs4231.c
@@ -92,7 +92,6 @@ static int snd_cs4231_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
struct snd_wss *chip;
- struct snd_pcm *pcm;
int error;
error = snd_card_new(dev, index[n], id[n], THIS_MODULE, 0, &card);
@@ -106,15 +105,15 @@ static int snd_cs4231_probe(struct device *dev, unsigned int n)
card->private_data = chip;
- error = snd_wss_pcm(chip, 0, &pcm);
+ error = snd_wss_pcm(chip, 0);
if (error < 0)
goto out;
strcpy(card->driver, "CS4231");
- strcpy(card->shortname, pcm->name);
+ strcpy(card->shortname, chip->pcm->name);
sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
- pcm->name, chip->port, irq[n], dma1[n]);
+ chip->pcm->name, chip->port, irq[n], dma1[n]);
if (dma2[n] >= 0)
sprintf(card->longname + strlen(card->longname), "&%d", dma2[n]);
@@ -122,7 +121,7 @@ static int snd_cs4231_probe(struct device *dev, unsigned int n)
if (error < 0)
goto out;
- error = snd_wss_timer(chip, 0, NULL);
+ error = snd_wss_timer(chip, 0);
if (error < 0)
goto out;
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index 750f51c904fc..9d7582c90a95 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -382,7 +382,6 @@ static int snd_cs423x_card_new(struct device *pdev, int dev,
static int snd_cs423x_probe(struct snd_card *card, int dev)
{
struct snd_card_cs4236 *acard;
- struct snd_pcm *pcm;
struct snd_wss *chip;
struct snd_opl3 *opl3;
int err;
@@ -404,7 +403,7 @@ static int snd_cs423x_probe(struct snd_card *card, int dev)
acard->chip = chip;
if (chip->hardware & WSS_HW_CS4236B_MASK) {
- err = snd_cs4236_pcm(chip, 0, &pcm);
+ err = snd_cs4236_pcm(chip, 0);
if (err < 0)
return err;
@@ -412,7 +411,7 @@ static int snd_cs423x_probe(struct snd_card *card, int dev)
if (err < 0)
return err;
} else {
- err = snd_wss_pcm(chip, 0, &pcm);
+ err = snd_wss_pcm(chip, 0);
if (err < 0)
return err;
@@ -420,17 +419,17 @@ static int snd_cs423x_probe(struct snd_card *card, int dev)
if (err < 0)
return err;
}
- strcpy(card->driver, pcm->name);
- strcpy(card->shortname, pcm->name);
+ strcpy(card->driver, chip->pcm->name);
+ strcpy(card->shortname, chip->pcm->name);
sprintf(card->longname, "%s at 0x%lx, irq %i, dma %i",
- pcm->name,
+ chip->pcm->name,
chip->port,
irq[dev],
dma1[dev]);
if (dma2[dev] >= 0)
sprintf(card->longname + strlen(card->longname), "&%d", dma2[dev]);
- err = snd_wss_timer(chip, 0, NULL);
+ err = snd_wss_timer(chip, 0);
if (err < 0)
return err;
diff --git a/sound/isa/cs423x/cs4236_lib.c b/sound/isa/cs423x/cs4236_lib.c
index c5adca300632..2b7cc596f4c6 100644
--- a/sound/isa/cs423x/cs4236_lib.c
+++ b/sound/isa/cs423x/cs4236_lib.c
@@ -79,7 +79,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/time.h>
@@ -376,17 +376,14 @@ int snd_cs4236_create(struct snd_card *card,
return 0;
}
-int snd_cs4236_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm)
+int snd_cs4236_pcm(struct snd_wss *chip, int device)
{
- struct snd_pcm *pcm;
int err;
- err = snd_wss_pcm(chip, device, &pcm);
+ err = snd_wss_pcm(chip, device);
if (err < 0)
return err;
- pcm->info_flags &= ~SNDRV_PCM_INFO_JOINT_DUPLEX;
- if (rpcm)
- *rpcm = pcm;
+ chip->pcm->info_flags &= ~SNDRV_PCM_INFO_JOINT_DUPLEX;
return 0;
}
diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
index 76001fe0579d..1901c2bb6c3b 100644
--- a/sound/isa/es1688/es1688.c
+++ b/sound/isa/es1688/es1688.c
@@ -138,10 +138,9 @@ static int snd_es1688_probe(struct snd_card *card, unsigned int n)
{
struct snd_es1688 *chip = card->private_data;
struct snd_opl3 *opl3;
- struct snd_pcm *pcm;
int error;
- error = snd_es1688_pcm(card, chip, 0, &pcm);
+ error = snd_es1688_pcm(card, chip, 0);
if (error < 0)
return error;
@@ -150,9 +149,9 @@ static int snd_es1688_probe(struct snd_card *card, unsigned int n)
return error;
strlcpy(card->driver, "ES1688", sizeof(card->driver));
- strlcpy(card->shortname, pcm->name, sizeof(card->shortname));
+ strlcpy(card->shortname, chip->pcm->name, sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %i, dma %i", pcm->name, chip->port,
+ "%s at 0x%lx, irq %i, dma %i", chip->pcm->name, chip->port,
chip->irq, chip->dma8);
if (fm_port[n] == SNDRV_AUTO_PORT)
diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c
index b5450143407b..e2cf508841b1 100644
--- a/sound/isa/es1688/es1688_lib.c
+++ b/sound/isa/es1688/es1688_lib.c
@@ -25,11 +25,11 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/es1688.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <asm/dma.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
@@ -728,8 +728,7 @@ static struct snd_pcm_ops snd_es1688_capture_ops = {
.pointer = snd_es1688_capture_pointer,
};
-int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip,
- int device, struct snd_pcm **rpcm)
+int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip, int device)
{
struct snd_pcm *pcm;
int err;
@@ -749,9 +748,6 @@ int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_isa_data(),
64*1024, 64*1024);
-
- if (rpcm)
- *rpcm = pcm;
return 0;
}
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index b481bb8c31bc..5094b62d8f77 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -84,8 +84,8 @@
#include <linux/isapnp.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -1687,16 +1687,13 @@ static struct snd_pcm_ops snd_es18xx_capture_ops = {
.pointer = snd_es18xx_capture_pointer,
};
-static int snd_es18xx_pcm(struct snd_card *card, int device,
- struct snd_pcm **rpcm)
+static int snd_es18xx_pcm(struct snd_card *card, int device)
{
struct snd_es18xx *chip = card->private_data;
struct snd_pcm *pcm;
char str[16];
int err;
- if (rpcm)
- *rpcm = NULL;
sprintf(str, "ES%x", chip->version);
if (chip->caps & ES18XX_PCM2)
err = snd_pcm_new(card, str, device, 2, 1, &pcm);
@@ -1722,9 +1719,6 @@ static int snd_es18xx_pcm(struct snd_card *card, int device,
snd_dma_isa_data(),
64*1024,
chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
-
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -2154,7 +2148,7 @@ static int snd_audiodrive_probe(struct snd_card *card, int dev)
chip->port,
irq[dev], dma1[dev]);
- err = snd_es18xx_pcm(card, 0, NULL);
+ err = snd_es18xx_pcm(card, 0);
if (err < 0)
return err;
diff --git a/sound/isa/galaxy/galaxy.c b/sound/isa/galaxy/galaxy.c
index 1eb2b1ec0fd9..32278847884f 100644
--- a/sound/isa/galaxy/galaxy.c
+++ b/sound/isa/galaxy/galaxy.c
@@ -569,7 +569,7 @@ static int snd_galaxy_probe(struct device *dev, unsigned int n)
if (err < 0)
goto error;
- err = snd_wss_pcm(chip, 0, NULL);
+ err = snd_wss_pcm(chip, 0);
if (err < 0)
goto error;
@@ -577,7 +577,7 @@ static int snd_galaxy_probe(struct device *dev, unsigned int n)
if (err < 0)
goto error;
- err = snd_wss_timer(chip, 0, NULL);
+ err = snd_wss_timer(chip, 0);
if (err < 0)
goto error;
diff --git a/sound/isa/gus/gus_instr.c b/sound/isa/gus/gus_instr.c
deleted file mode 100644
index 4dc9caf8ddcf..000000000000
--- a/sound/isa/gus/gus_instr.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Routines for Gravis UltraSound soundcards - Synthesizer
- * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/time.h>
-#include <sound/core.h>
-#include <sound/gus.h>
-
-/*
- *
- */
-
-int snd_gus_iwffff_put_sample(void *private_data, struct iwffff_wave *wave,
- char __user *data, long len, int atomic)
-{
- struct snd_gus_card *gus = private_data;
- struct snd_gf1_mem_block *block;
- int err;
-
- if (wave->format & IWFFFF_WAVE_ROM)
- return 0; /* it's probably ok - verify the address? */
- if (wave->format & IWFFFF_WAVE_STEREO)
- return -EINVAL; /* not supported */
- block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
- SNDRV_GF1_MEM_OWNER_WAVE_IWFFFF,
- NULL, wave->size,
- wave->format & IWFFFF_WAVE_16BIT, 1,
- wave->share_id);
- if (block == NULL)
- return -ENOMEM;
- err = snd_gus_dram_write(gus, data,
- block->ptr, wave->size);
- if (err < 0) {
- snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0);
- snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block);
- snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1);
- return err;
- }
- wave->address.memory = block->ptr;
- return 0;
-}
-
-int snd_gus_iwffff_get_sample(void *private_data, struct iwffff_wave *wave,
- char __user *data, long len, int atomic)
-{
- struct snd_gus_card *gus = private_data;
-
- return snd_gus_dram_read(gus, data, wave->address.memory, wave->size,
- wave->format & IWFFFF_WAVE_ROM ? 1 : 0);
-}
-
-int snd_gus_iwffff_remove_sample(void *private_data, struct iwffff_wave *wave,
- int atomic)
-{
- struct snd_gus_card *gus = private_data;
-
- if (wave->format & IWFFFF_WAVE_ROM)
- return 0; /* it's probably ok - verify the address? */
- return snd_gf1_mem_free(&gus->gf1.mem_alloc, wave->address.memory);
-}
-
-/*
- *
- */
-
-int snd_gus_gf1_put_sample(void *private_data, struct gf1_wave *wave,
- char __user *data, long len, int atomic)
-{
- struct snd_gus_card *gus = private_data;
- struct snd_gf1_mem_block *block;
- int err;
-
- if (wave->format & GF1_WAVE_STEREO)
- return -EINVAL; /* not supported */
- block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
- SNDRV_GF1_MEM_OWNER_WAVE_GF1,
- NULL, wave->size,
- wave->format & GF1_WAVE_16BIT, 1,
- wave->share_id);
- if (block == NULL)
- return -ENOMEM;
- err = snd_gus_dram_write(gus, data,
- block->ptr, wave->size);
- if (err < 0) {
- snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0);
- snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block);
- snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1);
- return err;
- }
- wave->address.memory = block->ptr;
- return 0;
-}
-
-int snd_gus_gf1_get_sample(void *private_data, struct gf1_wave *wave,
- char __user *data, long len, int atomic)
-{
- struct snd_gus_card *gus = private_data;
-
- return snd_gus_dram_read(gus, data, wave->address.memory, wave->size, 0);
-}
-
-int snd_gus_gf1_remove_sample(void *private_data, struct gf1_wave *wave,
- int atomic)
-{
- struct snd_gus_card *gus = private_data;
-
- return snd_gf1_mem_free(&gus->gf1.mem_alloc, wave->address.memory);
-}
-
-/*
- *
- */
-
-int snd_gus_simple_put_sample(void *private_data, struct simple_instrument *instr,
- char __user *data, long len, int atomic)
-{
- struct snd_gus_card *gus = private_data;
- struct snd_gf1_mem_block *block;
- int err;
-
- if (instr->format & SIMPLE_WAVE_STEREO)
- return -EINVAL; /* not supported */
- block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
- SNDRV_GF1_MEM_OWNER_WAVE_SIMPLE,
- NULL, instr->size,
- instr->format & SIMPLE_WAVE_16BIT, 1,
- instr->share_id);
- if (block == NULL)
- return -ENOMEM;
- err = snd_gus_dram_write(gus, data, block->ptr, instr->size);
- if (err < 0) {
- snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0);
- snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block);
- snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1);
- return err;
- }
- instr->address.memory = block->ptr;
- return 0;
-}
-
-int snd_gus_simple_get_sample(void *private_data, struct simple_instrument *instr,
- char __user *data, long len, int atomic)
-{
- struct snd_gus_card *gus = private_data;
-
- return snd_gus_dram_read(gus, data, instr->address.memory, instr->size, 0);
-}
-
-int snd_gus_simple_remove_sample(void *private_data, struct simple_instrument *instr,
- int atomic)
-{
- struct snd_gus_card *gus = private_data;
-
- return snd_gf1_mem_free(&gus->gf1.mem_alloc, instr->address.memory);
-}
diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c
index 2dcf45bf7293..25f6788ccef3 100644
--- a/sound/isa/gus/gus_pcm.c
+++ b/sound/isa/gus/gus_pcm.c
@@ -849,7 +849,7 @@ static struct snd_pcm_ops snd_gf1_pcm_capture_ops = {
.pointer = snd_gf1_pcm_capture_pointer,
};
-int snd_gf1_pcm_new(struct snd_gus_card * gus, int pcm_dev, int control_index, struct snd_pcm ** rpcm)
+int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index)
{
struct snd_card *card;
struct snd_kcontrol *kctl;
@@ -857,8 +857,6 @@ int snd_gf1_pcm_new(struct snd_gus_card * gus, int pcm_dev, int control_index, s
struct snd_pcm_substream *substream;
int capture, err;
- if (rpcm)
- *rpcm = NULL;
card = gus->card;
capture = !gus->interwave && !gus->ess_flag && !gus->ace_flag ? 1 : 0;
err = snd_pcm_new(card,
@@ -903,8 +901,6 @@ int snd_gf1_pcm_new(struct snd_gus_card * gus, int pcm_dev, int control_index, s
return err;
kctl->id.index = control_index;
- if (rpcm)
- *rpcm = pcm;
return 0;
}
diff --git a/sound/isa/gus/gus_uart.c b/sound/isa/gus/gus_uart.c
index 21cc42e4c4be..3992912743f5 100644
--- a/sound/isa/gus/gus_uart.c
+++ b/sound/isa/gus/gus_uart.c
@@ -241,13 +241,11 @@ static struct snd_rawmidi_ops snd_gf1_uart_input =
.trigger = snd_gf1_uart_input_trigger,
};
-int snd_gf1_rawmidi_new(struct snd_gus_card * gus, int device, struct snd_rawmidi ** rrawmidi)
+int snd_gf1_rawmidi_new(struct snd_gus_card *gus, int device)
{
struct snd_rawmidi *rmidi;
int err;
- if (rrawmidi)
- *rrawmidi = NULL;
if ((err = snd_rawmidi_new(gus->card, "GF1", device, 1, 1, &rmidi)) < 0)
return err;
strcpy(rmidi->name, gus->interwave ? "AMD InterWave" : "GF1");
@@ -256,7 +254,5 @@ int snd_gf1_rawmidi_new(struct snd_gus_card * gus, int device, struct snd_rawmid
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = gus;
gus->midi_uart = rmidi;
- if (rrawmidi)
- *rrawmidi = rmidi;
return err;
}
diff --git a/sound/isa/gus/gusclassic.c b/sound/isa/gus/gusclassic.c
index 7ce29ffa1af9..f0019715d82e 100644
--- a/sound/isa/gus/gusclassic.c
+++ b/sound/isa/gus/gusclassic.c
@@ -181,12 +181,12 @@ static int snd_gusclassic_probe(struct device *dev, unsigned int n)
if (error < 0)
goto out;
- error = snd_gf1_pcm_new(gus, 0, 0, NULL);
+ error = snd_gf1_pcm_new(gus, 0, 0);
if (error < 0)
goto out;
if (!gus->ace_flag) {
- error = snd_gf1_rawmidi_new(gus, 0, NULL);
+ error = snd_gf1_rawmidi_new(gus, 0);
if (error < 0)
goto out;
}
diff --git a/sound/isa/gus/gusextreme.c b/sound/isa/gus/gusextreme.c
index 28a16936a397..693d95f46804 100644
--- a/sound/isa/gus/gusextreme.c
+++ b/sound/isa/gus/gusextreme.c
@@ -284,7 +284,7 @@ static int snd_gusextreme_probe(struct device *dev, unsigned int n)
}
gus->codec_flag = 1;
- error = snd_es1688_pcm(card, es1688, 0, NULL);
+ error = snd_es1688_pcm(card, es1688, 0);
if (error < 0)
goto out;
@@ -295,7 +295,7 @@ static int snd_gusextreme_probe(struct device *dev, unsigned int n)
snd_component_add(card, "ES1688");
if (pcm_channels[n] > 0) {
- error = snd_gf1_pcm_new(gus, 1, 1, NULL);
+ error = snd_gf1_pcm_new(gus, 1, 1);
if (error < 0)
goto out;
}
diff --git a/sound/isa/gus/gusmax.c b/sound/isa/gus/gusmax.c
index 39df36ca3acb..8216e8d8f017 100644
--- a/sound/isa/gus/gusmax.c
+++ b/sound/isa/gus/gusmax.c
@@ -309,7 +309,7 @@ static int snd_gusmax_probe(struct device *pdev, unsigned int dev)
if (err < 0)
goto _err;
- err = snd_wss_pcm(wss, 0, NULL);
+ err = snd_wss_pcm(wss, 0);
if (err < 0)
goto _err;
@@ -317,19 +317,19 @@ static int snd_gusmax_probe(struct device *pdev, unsigned int dev)
if (err < 0)
goto _err;
- err = snd_wss_timer(wss, 2, NULL);
+ err = snd_wss_timer(wss, 2);
if (err < 0)
goto _err;
if (pcm_channels[dev] > 0) {
- if ((err = snd_gf1_pcm_new(gus, 1, 1, NULL)) < 0)
+ if ((err = snd_gf1_pcm_new(gus, 1, 1)) < 0)
goto _err;
}
err = snd_gusmax_mixer(wss);
if (err < 0)
goto _err;
- err = snd_gf1_rawmidi_new(gus, 0, NULL);
+ err = snd_gf1_rawmidi_new(gus, 0);
if (err < 0)
goto _err;
diff --git a/sound/isa/gus/interwave.c b/sound/isa/gus/interwave.c
index ad55e5cb8e94..70d0040484c8 100644
--- a/sound/isa/gus/interwave.c
+++ b/sound/isa/gus/interwave.c
@@ -647,7 +647,6 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
#ifdef SNDRV_STB
struct snd_i2c_bus *i2c_bus;
#endif
- struct snd_pcm *pcm;
char *str;
int err;
@@ -695,14 +694,15 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
if (err < 0)
return err;
- err = snd_wss_pcm(wss, 0, &pcm);
+ err = snd_wss_pcm(wss, 0);
if (err < 0)
return err;
- sprintf(pcm->name + strlen(pcm->name), " rev %c", gus->revision + 'A');
- strcat(pcm->name, " (codec)");
+ sprintf(wss->pcm->name + strlen(wss->pcm->name), " rev %c",
+ gus->revision + 'A');
+ strcat(wss->pcm->name, " (codec)");
- err = snd_wss_timer(wss, 2, NULL);
+ err = snd_wss_timer(wss, 2);
if (err < 0)
return err;
@@ -711,7 +711,7 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
return err;
if (pcm_channels[dev] > 0) {
- err = snd_gf1_pcm_new(gus, 1, 1, NULL);
+ err = snd_gf1_pcm_new(gus, 1, 1);
if (err < 0)
return err;
}
@@ -740,7 +740,7 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
#endif
gus->uart_enable = midi[dev];
- if ((err = snd_gf1_rawmidi_new(gus, 0, NULL)) < 0)
+ if ((err = snd_gf1_rawmidi_new(gus, 0)) < 0)
return err;
#ifndef SNDRV_STB
diff --git a/sound/isa/msnd/msnd.c b/sound/isa/msnd/msnd.c
index 1cee18fb28a8..835d4aa26761 100644
--- a/sound/isa/msnd/msnd.c
+++ b/sound/isa/msnd/msnd.c
@@ -679,8 +679,7 @@ static struct snd_pcm_ops snd_msnd_capture_ops = {
};
-int snd_msnd_pcm(struct snd_card *card, int device,
- struct snd_pcm **rpcm)
+int snd_msnd_pcm(struct snd_card *card, int device)
{
struct snd_msnd *chip = card->private_data;
struct snd_pcm *pcm;
@@ -696,9 +695,6 @@ int snd_msnd_pcm(struct snd_card *card, int device,
pcm->private_data = chip;
strcpy(pcm->name, "Hurricane");
-
- if (rpcm)
- *rpcm = pcm;
return 0;
}
EXPORT_SYMBOL(snd_msnd_pcm);
diff --git a/sound/isa/msnd/msnd.h b/sound/isa/msnd/msnd.h
index dbac3a42347b..5f3c7dcd9f9d 100644
--- a/sound/isa/msnd/msnd.h
+++ b/sound/isa/msnd/msnd.h
@@ -297,7 +297,7 @@ int snd_msnd_disable_irq(struct snd_msnd *chip);
void snd_msnd_dsp_halt(struct snd_msnd *chip, struct file *file);
int snd_msnd_DAPQ(struct snd_msnd *chip, int start);
int snd_msnd_DARQ(struct snd_msnd *chip, int start);
-int snd_msnd_pcm(struct snd_card *card, int device, struct snd_pcm **rpcm);
+int snd_msnd_pcm(struct snd_card *card, int device);
int snd_msndmidi_new(struct snd_card *card, int device);
void snd_msndmidi_input_read(void *mpu);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 5016bf957f51..4c072666115d 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -582,7 +582,7 @@ static int snd_msnd_attach(struct snd_card *card)
if (err < 0)
goto err_release_region;
- err = snd_msnd_pcm(card, 0, NULL);
+ err = snd_msnd_pcm(card, 0);
if (err < 0) {
printk(KERN_ERR LOGNAME ": error creating new PCM device\n");
goto err_release_region;
@@ -627,8 +627,7 @@ static int snd_msnd_attach(struct snd_card *card)
return 0;
err_release_region:
- if (chip->mappedbase)
- iounmap(chip->mappedbase);
+ iounmap(chip->mappedbase);
release_mem_region(chip->base, BUFFSIZE);
release_region(chip->io, DSP_NUMIO);
free_irq(chip->irq, chip);
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index a219bc37816b..ae133633a420 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -26,6 +26,7 @@
#include <linux/pm.h>
#include <linux/pnp.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/wss.h>
#include <sound/mpu401.h>
@@ -33,8 +34,6 @@
#include <sound/initval.h>
#include <sound/tlv.h>
-#include <asm/io.h>
-
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Yamaha OPL3SA2+");
MODULE_LICENSE("GPL");
@@ -684,7 +683,7 @@ static int snd_opl3sa2_probe(struct snd_card *card, int dev)
return err;
}
chip->wss = wss;
- err = snd_wss_pcm(wss, 0, NULL);
+ err = snd_wss_pcm(wss, 0);
if (err < 0)
return err;
err = snd_wss_mixer(wss);
@@ -693,7 +692,7 @@ static int snd_opl3sa2_probe(struct snd_card *card, int dev)
err = snd_opl3sa2_mixer(card);
if (err < 0)
return err;
- err = snd_wss_timer(wss, 0, NULL);
+ err = snd_wss_timer(wss, 0);
if (err < 0)
return err;
if (fm_port[dev] >= 0x340 && fm_port[dev] < 0x400) {
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
index c2ca681ac51b..3a9067db1a84 100644
--- a/sound/isa/opti9xx/miro.c
+++ b/sound/isa/opti9xx/miro.c
@@ -29,7 +29,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include <sound/core.h>
#include <sound/wss.h>
@@ -1270,8 +1270,6 @@ static int snd_miro_probe(struct snd_card *card)
int error;
struct snd_miro *miro = card->private_data;
struct snd_wss *codec;
- struct snd_timer *timer;
- struct snd_pcm *pcm;
struct snd_rawmidi *rmidi;
if (!miro->res_mc_base) {
@@ -1310,7 +1308,7 @@ static int snd_miro_probe(struct snd_card *card)
if (error < 0)
return error;
- error = snd_wss_pcm(codec, 0, &pcm);
+ error = snd_wss_pcm(codec, 0);
if (error < 0)
return error;
@@ -1318,11 +1316,11 @@ static int snd_miro_probe(struct snd_card *card)
if (error < 0)
return error;
- error = snd_wss_timer(codec, 0, &timer);
+ error = snd_wss_timer(codec, 0);
if (error < 0)
return error;
- miro->pcm = pcm;
+ miro->pcm = codec->pcm;
error = snd_miro_mixer(card, miro);
if (error < 0)
@@ -1356,8 +1354,8 @@ static int snd_miro_probe(struct snd_card *card)
strcpy(card->driver, "miro");
sprintf(card->longname, "%s: OPTi%s, %s at 0x%lx, irq %d, dma %d&%d",
- card->shortname, miro->name, pcm->name, miro->wss_base + 4,
- miro->irq, miro->dma1, miro->dma2);
+ card->shortname, miro->name, codec->pcm->name,
+ miro->wss_base + 4, miro->irq, miro->dma1, miro->dma2);
if (mpu_port <= 0 || mpu_port == SNDRV_AUTO_PORT)
rmidi = NULL;
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index c9b582848603..0a5266003786 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -29,7 +29,7 @@
#include <linux/delay.h>
#include <linux/pnp.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include <sound/core.h>
#include <sound/tlv.h>
@@ -820,10 +820,6 @@ static int snd_opti9xx_probe(struct snd_card *card)
int xdma2;
struct snd_opti9xx *chip = card->private_data;
struct snd_wss *codec;
-#ifdef CS4231
- struct snd_timer *timer;
-#endif
- struct snd_pcm *pcm;
struct snd_rawmidi *rmidi;
struct snd_hwdep *synth;
@@ -855,7 +851,7 @@ static int snd_opti9xx_probe(struct snd_card *card)
if (error < 0)
return error;
chip->codec = codec;
- error = snd_wss_pcm(codec, 0, &pcm);
+ error = snd_wss_pcm(codec, 0);
if (error < 0)
return error;
error = snd_wss_mixer(codec);
@@ -867,7 +863,7 @@ static int snd_opti9xx_probe(struct snd_card *card)
return error;
#endif
#ifdef CS4231
- error = snd_wss_timer(codec, 0, &timer);
+ error = snd_wss_timer(codec, 0);
if (error < 0)
return error;
#endif
@@ -884,11 +880,12 @@ static int snd_opti9xx_probe(struct snd_card *card)
sprintf(card->shortname, "OPTi %s", card->driver);
#if defined(CS4231) || defined(OPTi93X)
sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d&%d",
- card->shortname, pcm->name,
+ card->shortname, codec->pcm->name,
chip->wss_base + 4, irq, dma1, xdma2);
#else
sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d",
- card->shortname, pcm->name, chip->wss_base + 4, irq, dma1);
+ card->shortname, codec->pcm->name, chip->wss_base + 4, irq,
+ dma1);
#endif /* CS4231 || OPTi93X */
if (mpu_port <= 0 || mpu_port == SNDRV_AUTO_PORT)
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
index 45fcdff611f9..94c411299e5a 100644
--- a/sound/isa/sb/emu8000.c
+++ b/sound/isa/sb/emu8000.c
@@ -26,11 +26,11 @@
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/emu8000.h>
#include <sound/emu8000_reg.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/init.h>
#include <sound/control.h>
#include <sound/initval.h>
@@ -378,13 +378,12 @@ init_arrays(struct snd_emu8000 *emu)
static void
size_dram(struct snd_emu8000 *emu)
{
- int i, size, detected_size;
+ int i, size;
if (emu->dram_checked)
return;
size = 0;
- detected_size = 0;
/* write out a magic number */
snd_emu8000_dma_chan(emu, 0, EMU8000_RAM_WRITE);
@@ -392,10 +391,19 @@ size_dram(struct snd_emu8000 *emu)
EMU8000_SMALW_WRITE(emu, EMU8000_DRAM_OFFSET);
EMU8000_SMLD_WRITE(emu, UNIQUE_ID1);
snd_emu8000_init_fm(emu); /* This must really be here and not 2 lines back even */
+ snd_emu8000_write_wait(emu);
- while (size < EMU8000_MAX_DRAM) {
+ /*
+ * Detect first 512 KiB. If a write succeeds at the beginning of a
+ * 512 KiB page we assume that the whole page is there.
+ */
+ EMU8000_SMALR_WRITE(emu, EMU8000_DRAM_OFFSET);
+ EMU8000_SMLD_READ(emu); /* discard stale data */
+ if (EMU8000_SMLD_READ(emu) != UNIQUE_ID1)
+ goto skip_detect; /* No RAM */
+ snd_emu8000_read_wait(emu);
- size += 512 * 1024; /* increment 512kbytes */
+ for (size = 512 * 1024; size < EMU8000_MAX_DRAM; size += 512 * 1024) {
/* Write a unique data on the test address.
* if the address is out of range, the data is written on
@@ -431,18 +439,9 @@ size_dram(struct snd_emu8000 *emu)
snd_emu8000_read_wait(emu);
/* Otherwise, it's valid memory. */
- detected_size = size + 512 * 1024;
- }
-
- /* Distinguish 512 KiB from 0. */
- if (detected_size == 0) {
- snd_emu8000_read_wait(emu);
- EMU8000_SMALR_WRITE(emu, EMU8000_DRAM_OFFSET);
- EMU8000_SMLD_READ(emu); /* discard stale data */
- if (EMU8000_SMLD_READ(emu) == UNIQUE_ID1)
- detected_size = 512 * 1024;
}
+skip_detect:
/* wait until FULL bit in SMAxW register is false */
for (i = 0; i < 10000; i++) {
if ((EMU8000_SMALW_READ(emu) & 0x80000000) == 0)
@@ -454,10 +453,10 @@ size_dram(struct snd_emu8000 *emu)
snd_emu8000_dma_chan(emu, 0, EMU8000_RAM_CLOSE);
snd_emu8000_dma_chan(emu, 1, EMU8000_RAM_CLOSE);
- snd_printdd("EMU8000 [0x%lx]: %d Kb on-board memory detected\n",
- emu->port1, detected_size/1024);
+ pr_info("EMU8000 [0x%lx]: %d KiB on-board DRAM detected\n",
+ emu->port1, size/1024);
- emu->mem_size = detected_size;
+ emu->mem_size = size;
emu->dram_checked = 1;
}
diff --git a/sound/isa/sb/emu8000_patch.c b/sound/isa/sb/emu8000_patch.c
index c99c6078be33..71d13c0bb746 100644
--- a/sound/isa/sb/emu8000_patch.c
+++ b/sound/isa/sb/emu8000_patch.c
@@ -20,7 +20,7 @@
*/
#include "emu8000_local.h"
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/moduleparam.h>
static int emu8000_reset_addr;
diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c
index 2f85c66f8e38..250fd0006b53 100644
--- a/sound/isa/sb/emu8000_pcm.c
+++ b/sound/isa/sb/emu8000_pcm.c
@@ -207,8 +207,7 @@ static void emu8k_pcm_timer_func(unsigned long data)
rec->last_ptr = ptr;
/* reprogram timer */
- rec->timer.expires = jiffies + 1;
- add_timer(&rec->timer);
+ mod_timer(&rec->timer, jiffies + 1);
/* update period */
if (rec->period_pos >= (int)rec->period_size) {
@@ -240,9 +239,7 @@ static int emu8k_pcm_open(struct snd_pcm_substream *subs)
runtime->private_data = rec;
spin_lock_init(&rec->timer_lock);
- init_timer(&rec->timer);
- rec->timer.function = emu8k_pcm_timer_func;
- rec->timer.data = (unsigned long)rec;
+ setup_timer(&rec->timer, emu8k_pcm_timer_func, (unsigned long)rec);
runtime->hw = emu8k_pcm_hw;
runtime->hw.buffer_bytes_max = emu->mem_size - LOOP_BLANK_SIZE * 3;
@@ -359,8 +356,7 @@ static void start_voice(struct snd_emu8k_pcm *rec, int ch)
/* start timer */
spin_lock_irqsave(&rec->timer_lock, flags);
if (! rec->timer_running) {
- rec->timer.expires = jiffies + 1;
- add_timer(&rec->timer);
+ mod_timer(&rec->timer, jiffies + 1);
rec->timer_running = 1;
}
spin_unlock_irqrestore(&rec->timer_lock, flags);
diff --git a/sound/isa/sb/emu8000_synth.c b/sound/isa/sb/emu8000_synth.c
index 95b39beb61c1..72332dfada9a 100644
--- a/sound/isa/sb/emu8000_synth.c
+++ b/sound/isa/sb/emu8000_synth.c
@@ -103,8 +103,7 @@ static int snd_emu8000_delete_device(struct snd_seq_device *dev)
hw = dev->driver_data;
if (hw->pcm)
snd_device_free(dev->card, hw->pcm);
- if (hw->emu)
- snd_emux_free(hw->emu);
+ snd_emux_free(hw->emu);
snd_util_memhdr_free(hw->memhdr);
hw->emu = NULL;
hw->memhdr = NULL;
diff --git a/sound/isa/sb/jazz16.c b/sound/isa/sb/jazz16.c
index 90d2eba549e9..6b4884d052a5 100644
--- a/sound/isa/sb/jazz16.c
+++ b/sound/isa/sb/jazz16.c
@@ -297,7 +297,7 @@ static int snd_jazz16_probe(struct device *devptr, unsigned int dev)
"Media Vision Jazz16 at 0x%lx, irq %d, dma8 %d, dma16 %d",
port[dev], xirq, xdma8, xdma16);
- err = snd_sb8dsp_pcm(chip, 0, NULL);
+ err = snd_sb8dsp_pcm(chip, 0);
if (err < 0)
goto err_free;
err = snd_sbmixer_new(chip);
diff --git a/sound/isa/sb/sb16.c b/sound/isa/sb/sb16.c
index 3f694543a7ea..4a7d7c89808f 100644
--- a/sound/isa/sb/sb16.c
+++ b/sound/isa/sb/sb16.c
@@ -374,7 +374,7 @@ static int snd_sb16_probe(struct snd_card *card, int dev)
if (! is_isapnp_selected(dev) && (err = snd_sb16dsp_configure(chip)) < 0)
return err;
- if ((err = snd_sb16dsp_pcm(chip, 0, &chip->pcm)) < 0)
+ if ((err = snd_sb16dsp_pcm(chip, 0)) < 0)
return err;
strcpy(card->driver,
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index 72b10f4f3e70..8b2d6c6bfe97 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -33,7 +33,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include <linux/init.h>
#include <linux/time.h>
@@ -860,19 +860,18 @@ static struct snd_pcm_ops snd_sb16_capture_ops = {
.pointer = snd_sb16_capture_pointer,
};
-int snd_sb16dsp_pcm(struct snd_sb * chip, int device, struct snd_pcm ** rpcm)
+int snd_sb16dsp_pcm(struct snd_sb *chip, int device)
{
struct snd_card *card = chip->card;
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(card, "SB16 DSP", device, 1, 1, &pcm)) < 0)
return err;
sprintf(pcm->name, "DSP v%i.%i", chip->version >> 8, chip->version & 0xff);
pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
pcm->private_data = chip;
+ chip->pcm = pcm;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb16_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb16_capture_ops);
@@ -885,9 +884,6 @@ int snd_sb16dsp_pcm(struct snd_sb * chip, int device, struct snd_pcm ** rpcm)
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_isa_data(),
64*1024, 128*1024);
-
- if (rpcm)
- *rpcm = pcm;
return 0;
}
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index 6c32b3aa34af..b8e2391c33ff 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -157,7 +157,7 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
goto _err;
}
- if ((err = snd_sb8dsp_pcm(chip, 0, NULL)) < 0)
+ if ((err = snd_sb8dsp_pcm(chip, 0)) < 0)
goto _err;
if ((err = snd_sbmixer_new(chip)) < 0)
@@ -182,7 +182,7 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
goto _err;
}
- if ((err = snd_sb8dsp_midi(chip, 0, NULL)) < 0)
+ if ((err = snd_sb8dsp_midi(chip, 0)) < 0)
goto _err;
strcpy(card->driver, chip->hardware == SB_HW_PRO ? "SB Pro" : "SB8");
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c
index 24d4121ab0e0..9043397fe62f 100644
--- a/sound/isa/sb/sb8_main.c
+++ b/sound/isa/sb/sb8_main.c
@@ -30,7 +30,7 @@
* Cleaned up and rewrote lowlevel routines.
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include <linux/init.h>
#include <linux/time.h>
@@ -594,15 +594,13 @@ static struct snd_pcm_ops snd_sb8_capture_ops = {
.pointer = snd_sb8_capture_pointer,
};
-int snd_sb8dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm)
+int snd_sb8dsp_pcm(struct snd_sb *chip, int device)
{
struct snd_card *card = chip->card;
struct snd_pcm *pcm;
int err;
size_t max_prealloc = 64 * 1024;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(card, "SB8 DSP", device, 1, 1, &pcm)) < 0)
return err;
sprintf(pcm->name, "DSP v%i.%i", chip->version >> 8, chip->version & 0xff);
@@ -618,8 +616,6 @@ int snd_sb8dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm)
snd_dma_isa_data(),
64*1024, max_prealloc);
- if (rpcm)
- *rpcm = pcm;
return 0;
}
diff --git a/sound/isa/sb/sb8_midi.c b/sound/isa/sb/sb8_midi.c
index 988a8b73475f..d551c50e549f 100644
--- a/sound/isa/sb/sb8_midi.c
+++ b/sound/isa/sb/sb8_midi.c
@@ -26,7 +26,7 @@
* Added full duplex UART mode for DSP version 2.0 and later.
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/sb.h>
@@ -216,8 +216,7 @@ static void snd_sb8dsp_midi_output_timer(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&chip->open_lock, flags);
- chip->midi_timer.expires = 1 + jiffies;
- add_timer(&chip->midi_timer);
+ mod_timer(&chip->midi_timer, 1 + jiffies);
spin_unlock_irqrestore(&chip->open_lock, flags);
snd_sb8dsp_midi_output_write(substream);
}
@@ -231,11 +230,10 @@ static void snd_sb8dsp_midi_output_trigger(struct snd_rawmidi_substream *substre
spin_lock_irqsave(&chip->open_lock, flags);
if (up) {
if (!(chip->open & SB_OPEN_MIDI_OUTPUT_TRIGGER)) {
- init_timer(&chip->midi_timer);
- chip->midi_timer.function = snd_sb8dsp_midi_output_timer;
- chip->midi_timer.data = (unsigned long) substream;
- chip->midi_timer.expires = 1 + jiffies;
- add_timer(&chip->midi_timer);
+ setup_timer(&chip->midi_timer,
+ snd_sb8dsp_midi_output_timer,
+ (unsigned long) substream);
+ mod_timer(&chip->midi_timer, 1 + jiffies);
chip->open |= SB_OPEN_MIDI_OUTPUT_TRIGGER;
}
} else {
@@ -263,13 +261,11 @@ static struct snd_rawmidi_ops snd_sb8dsp_midi_input =
.trigger = snd_sb8dsp_midi_input_trigger,
};
-int snd_sb8dsp_midi(struct snd_sb *chip, int device, struct snd_rawmidi ** rrawmidi)
+int snd_sb8dsp_midi(struct snd_sb *chip, int device)
{
struct snd_rawmidi *rmidi;
int err;
- if (rrawmidi)
- *rrawmidi = NULL;
if ((err = snd_rawmidi_new(chip->card, "SB8 MIDI", device, 1, 1, &rmidi)) < 0)
return err;
strcpy(rmidi->name, "SB8 MIDI");
@@ -280,7 +276,5 @@ int snd_sb8dsp_midi(struct snd_sb *chip, int device, struct snd_rawmidi ** rrawm
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = chip;
chip->rmidi = rmidi;
- if (rrawmidi)
- *rrawmidi = rmidi;
return 0;
}
diff --git a/sound/isa/sb/sb_common.c b/sound/isa/sb/sb_common.c
index f22b4480828e..787a4ade4afd 100644
--- a/sound/isa/sb/sb_common.c
+++ b/sound/isa/sb/sb_common.c
@@ -26,11 +26,11 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/sb.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <asm/dma.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
diff --git a/sound/isa/sb/sb_mixer.c b/sound/isa/sb/sb_mixer.c
index e403334a19ad..add1d3f99609 100644
--- a/sound/isa/sb/sb_mixer.c
+++ b/sound/isa/sb/sb_mixer.c
@@ -19,7 +19,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <sound/core.h>
diff --git a/sound/isa/sc6000.c b/sound/isa/sc6000.c
index 15a152eaa2e8..51cfa7615f72 100644
--- a/sound/isa/sc6000.c
+++ b/sound/isa/sc6000.c
@@ -625,7 +625,7 @@ static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
if (err < 0)
goto err_unmap2;
- err = snd_wss_pcm(chip, 0, NULL);
+ err = snd_wss_pcm(chip, 0);
if (err < 0) {
snd_printk(KERN_ERR PFX
"error creating new WSS PCM device\n");
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index 44405df7d4be..7b248cdf06e2 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/isa.h>
#include <linux/delay.h>
#include <linux/firmware.h>
@@ -877,7 +878,6 @@ static int create_ad1845(struct snd_card *card, unsigned port,
codec_type, WSS_HWSHARE_DMA1, &chip);
if (!err) {
unsigned long flags;
- struct snd_pcm *pcm;
if (sscape->type != SSCAPE_VIVO) {
/*
@@ -893,7 +893,7 @@ static int create_ad1845(struct snd_card *card, unsigned port,
}
- err = snd_wss_pcm(chip, 0, &pcm);
+ err = snd_wss_pcm(chip, 0);
if (err < 0) {
snd_printk(KERN_ERR "sscape: No PCM device "
"for AD1845 chip\n");
@@ -907,7 +907,7 @@ static int create_ad1845(struct snd_card *card, unsigned port,
goto _error;
}
if (chip->hardware != WSS_HW_AD1848) {
- err = snd_wss_timer(chip, 0, NULL);
+ err = snd_wss_timer(chip, 0);
if (err < 0) {
snd_printk(KERN_ERR "sscape: No timer device "
"for AD1845 chip\n");
diff --git a/sound/isa/wavefront/wavefront.c b/sound/isa/wavefront/wavefront.c
index bfbf38cf9841..a0987a57c8a9 100644
--- a/sound/isa/wavefront/wavefront.c
+++ b/sound/isa/wavefront/wavefront.c
@@ -380,11 +380,11 @@ snd_wavefront_probe (struct snd_card *card, int dev)
return err;
}
- err = snd_wss_pcm(chip, 0, NULL);
+ err = snd_wss_pcm(chip, 0);
if (err < 0)
return err;
- err = snd_wss_timer(chip, 0, NULL);
+ err = snd_wss_timer(chip, 0);
if (err < 0)
return err;
diff --git a/sound/isa/wavefront/wavefront_fx.c b/sound/isa/wavefront/wavefront_fx.c
index b77883c7ee76..b5a19708473a 100644
--- a/sound/isa/wavefront/wavefront_fx.c
+++ b/sound/isa/wavefront/wavefront_fx.c
@@ -16,7 +16,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/wait.h>
diff --git a/sound/isa/wavefront/wavefront_midi.c b/sound/isa/wavefront/wavefront_midi.c
index 7dc991682297..8a80fc6a616b 100644
--- a/sound/isa/wavefront/wavefront_midi.c
+++ b/sound/isa/wavefront/wavefront_midi.c
@@ -47,7 +47,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/wait.h>
@@ -356,8 +356,7 @@ static void snd_wavefront_midi_output_timer(unsigned long data)
unsigned long flags;
spin_lock_irqsave (&midi->virtual, flags);
- midi->timer.expires = 1 + jiffies;
- add_timer(&midi->timer);
+ mod_timer(&midi->timer, 1 + jiffies);
spin_unlock_irqrestore (&midi->virtual, flags);
snd_wavefront_midi_output_write(card);
}
@@ -384,11 +383,10 @@ static void snd_wavefront_midi_output_trigger(struct snd_rawmidi_substream *subs
if (up) {
if ((midi->mode[mpu] & MPU401_MODE_OUTPUT_TRIGGER) == 0) {
if (!midi->istimer) {
- init_timer(&midi->timer);
- midi->timer.function = snd_wavefront_midi_output_timer;
- midi->timer.data = (unsigned long) substream->rmidi->card->private_data;
- midi->timer.expires = 1 + jiffies;
- add_timer(&midi->timer);
+ setup_timer(&midi->timer,
+ snd_wavefront_midi_output_timer,
+ (unsigned long) substream->rmidi->card->private_data);
+ mod_timer(&midi->timer, 1 + jiffies);
}
midi->istimer++;
midi->mode[mpu] |= MPU401_MODE_OUTPUT_TRIGGER;
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index e5db001363ee..33f5ec14fcfa 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -20,7 +20,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 347bb1bda110..913b731d2236 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -31,12 +31,12 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/wss.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <asm/irq.h>
@@ -1923,7 +1923,7 @@ static struct snd_pcm_ops snd_wss_capture_ops = {
.pointer = snd_wss_capture_pointer,
};
-int snd_wss_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm)
+int snd_wss_pcm(struct snd_wss *chip, int device)
{
struct snd_pcm *pcm;
int err;
@@ -1949,8 +1949,6 @@ int snd_wss_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm)
64*1024, chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
chip->pcm = pcm;
- if (rpcm)
- *rpcm = pcm;
return 0;
}
EXPORT_SYMBOL(snd_wss_pcm);
@@ -1961,7 +1959,7 @@ static void snd_wss_timer_free(struct snd_timer *timer)
chip->timer = NULL;
}
-int snd_wss_timer(struct snd_wss *chip, int device, struct snd_timer **rtimer)
+int snd_wss_timer(struct snd_wss *chip, int device)
{
struct snd_timer *timer;
struct snd_timer_id tid;
@@ -1980,8 +1978,6 @@ int snd_wss_timer(struct snd_wss *chip, int device, struct snd_timer **rtimer)
timer->private_free = snd_wss_timer_free;
timer->hw = snd_wss_timer_table;
chip->timer = timer;
- if (rtimer)
- *rtimer = timer;
return 0;
}
EXPORT_SYMBOL(snd_wss_timer);
diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c
index 13c214466d3b..1c56bf58eff9 100644
--- a/sound/oss/dmasound/dmasound_atari.c
+++ b/sound/oss/dmasound/dmasound_atari.c
@@ -851,7 +851,7 @@ static int __init AtaIrqInit(void)
st_mfp.tim_dt_a = 1; /* Cause interrupt after first event. */
st_mfp.tim_ct_a = 8; /* Turn on event counting. */
/* Register interrupt handler. */
- if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, IRQ_TYPE_SLOW, "DMA sound",
+ if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, 0, "DMA sound",
AtaInterrupt))
return 0;
st_mfp.int_en_a |= 0x20; /* Turn interrupt on. */
diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c
index c23f9f95bfa5..a8ceef8d1a8d 100644
--- a/sound/oss/msnd_pinnacle.c
+++ b/sound/oss/msnd_pinnacle.c
@@ -675,7 +675,7 @@ static void dsp_write_flush(void)
timeout);
clear_bit(F_WRITEFLUSH, &dev.flags);
if (!signal_pending(current)) {
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(get_play_delay_jiffies(DAP_BUFF_SIZE));
}
clear_bit(F_WRITING, &dev.flags);
@@ -1288,7 +1288,7 @@ static int __init calibrate_adc(WORD srate)
& ~0x0001, dev.SMA + SMA_wCurrHostStatusFlags);
if (msnd_send_word(&dev, 0, 0, HDEXAR_CAL_A_TO_D) == 0 &&
chk_send_dsp_cmd(&dev, HDEX_AUX_REQ) == 0) {
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 3);
return 0;
}
diff --git a/sound/oss/pss.c b/sound/oss/pss.c
index ca0d6e9f49f5..81314f9e2ccb 100644
--- a/sound/oss/pss.c
+++ b/sound/oss/pss.c
@@ -1228,7 +1228,7 @@ static void __exit cleanup_pss(void)
{
if(!pss_no_sound)
{
- if(fw_load && pss_synth)
+ if (fw_load)
vfree(pss_synth);
if(pssmss)
unload_pss_mss(&cfg2);
diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
index a33e8ce8085b..213a416b6e0b 100644
--- a/sound/oss/swarm_cs4297a.c
+++ b/sound/oss/swarm_cs4297a.c
@@ -1654,7 +1654,7 @@ static int drain_dac(struct cs4297a_state *s, int nonblock)
s->dma_dac.hwptr = s->dma_dac.swptr = hwptr;
spin_unlock_irqrestore(&s->lock, flags);
remove_wait_queue(&s->dma_dac.wait, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
return 0;
}
diff --git a/sound/oss/trix.c b/sound/oss/trix.c
index 944e0c015485..3c494dc93b93 100644
--- a/sound/oss/trix.c
+++ b/sound/oss/trix.c
@@ -487,7 +487,7 @@ static int __init init_trix(void)
static void __exit cleanup_trix(void)
{
- if (fw_load && trix_boot)
+ if (fw_load)
vfree(trix_boot);
if (sb)
unload_trix_sb(&cfg2);
diff --git a/sound/parisc/harmony.c b/sound/parisc/harmony.c
index 29604a239c44..99b64cb3cef8 100644
--- a/sound/parisc/harmony.c
+++ b/sound/parisc/harmony.c
@@ -44,6 +44,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -52,7 +53,6 @@
#include <sound/initval.h>
#include <sound/info.h>
-#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/parisc-device.h>
@@ -893,9 +893,7 @@ snd_harmony_free(struct snd_harmony *h)
if (h->irq >= 0)
free_irq(h->irq, h);
- if (h->iobase)
- iounmap(h->iobase);
-
+ iounmap(h->iobase);
kfree(h);
return 0;
}
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 50dd0086cfb1..edfc1b8d553e 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -793,6 +793,15 @@ config SND_RME9652
To compile this driver as a module, choose M here: the module
will be called snd-rme9652.
+config SND_SE6X
+ tristate "Studio Evolution SE6X"
+ depends on SND_OXYGEN=n && SND_VIRTUOSO=n # PCI ID conflict
+ select SND_OXYGEN_LIB
+ select SND_PCM
+ select SND_MPU401_UART
+ help
+ Say Y or M here only if you actually have this sound card.
+
config SND_SIS7019
tristate "SiS 7019 Audio Accelerator"
depends on X86_32
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 1610c38337af..850a8c984c25 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -40,14 +40,13 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/ac97_codec.h>
-#include <asm/io.h>
-
#include "ad1889.h"
#include "ac97/ac97_id.h"
@@ -623,14 +622,11 @@ snd_ad1889_interrupt(int irq, void *dev_id)
}
static int
-snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device, struct snd_pcm **rpcm)
+snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device)
{
int err;
struct snd_pcm *pcm;
- if (rpcm)
- *rpcm = NULL;
-
err = snd_pcm_new(chip->card, chip->card->driver, device, 1, 1, &pcm);
if (err < 0)
return err;
@@ -658,9 +654,6 @@ snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device, struct snd_pcm **rpcm)
return err;
}
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
@@ -859,12 +852,9 @@ snd_ad1889_free(struct snd_ad1889 *chip)
free_irq(chip->irq, chip);
skip_hw:
- if (chip->iobase)
- iounmap(chip->iobase);
-
+ iounmap(chip->iobase);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
-
kfree(chip);
return 0;
}
@@ -1016,7 +1006,7 @@ snd_ad1889_probe(struct pci_dev *pci,
if (err < 0)
goto free_and_ret;
- err = snd_ad1889_pcm_init(chip, 0, NULL);
+ err = snd_ad1889_pcm_init(chip, 0);
if (err < 0)
goto free_and_ret;
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index af89e42b2160..c8d499575c01 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -25,7 +25,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1873,7 +1873,6 @@ static int snd_ali_mixer(struct snd_ali *codec)
#ifdef CONFIG_PM_SLEEP
static int ali_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ali *chip = card->private_data;
struct snd_ali_image *im;
@@ -1914,16 +1913,11 @@ static int ali_suspend(struct device *dev)
outl(0xffffffff, ALI_REG(chip, ALI_STOP));
spin_unlock_irq(&chip->reg_lock);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int ali_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ali *chip = card->private_data;
struct snd_ali_image *im;
@@ -1933,15 +1927,6 @@ static int ali_resume(struct device *dev)
if (!im)
return 0;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
spin_lock_irq(&chip->reg_lock);
for (i = 0; i < ALI_CHANNELS; i++) {
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index 7bb6ac565107..57e034f208dc 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -37,8 +37,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -728,35 +727,20 @@ static int snd_als300_create(struct snd_card *card,
#ifdef CONFIG_PM_SLEEP
static int snd_als300_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_als300 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_als300_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_als300 *chip = card->private_data;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_als300_init(chip);
snd_ac97_resume(chip->ac97);
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index d3e6424ee656..a3dea464134d 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -65,7 +65,7 @@
* - power management? (card can do voice wakeup according to datasheet!!)
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/gameport.h>
@@ -988,7 +988,6 @@ static void snd_card_als4000_remove(struct pci_dev *pci)
#ifdef CONFIG_PM_SLEEP
static int snd_als4000_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_card_als4000 *acard = card->private_data;
struct snd_sb *chip = acard->chip;
@@ -997,29 +996,15 @@ static int snd_als4000_suspend(struct device *dev)
snd_pcm_suspend_all(chip->pcm);
snd_sbmixer_suspend(chip);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_als4000_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_card_als4000 *acard = card->private_data;
struct snd_sb *chip = acard->chip;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_als4000_configure(chip);
snd_sbdsp_reset(chip);
snd_sbmixer_resume(chip);
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index e9273fb2a505..e5cd7be85355 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -540,9 +540,8 @@ static void snd_card_asihpi_pcm_timer_start(struct snd_pcm_substream *
expiry = HZ / 200;
expiry = max(expiry, 1); /* don't let it be zero! */
- dpcm->timer.expires = jiffies + expiry;
+ mod_timer(&dpcm->timer, jiffies + expiry);
dpcm->respawn_timer = 1;
- add_timer(&dpcm->timer);
}
static void snd_card_asihpi_pcm_timer_stop(struct snd_pcm_substream *substream)
@@ -1064,9 +1063,8 @@ static int snd_card_asihpi_playback_open(struct snd_pcm_substream *substream)
If internal and other stream playing, can't switch
*/
- init_timer(&dpcm->timer);
- dpcm->timer.data = (unsigned long) dpcm;
- dpcm->timer.function = snd_card_asihpi_timer_function;
+ setup_timer(&dpcm->timer, snd_card_asihpi_timer_function,
+ (unsigned long) dpcm);
dpcm->substream = substream;
runtime->private_data = dpcm;
runtime->private_free = snd_card_asihpi_runtime_free;
@@ -1246,9 +1244,8 @@ static int snd_card_asihpi_capture_open(struct snd_pcm_substream *substream)
if (err)
return -EIO;
- init_timer(&dpcm->timer);
- dpcm->timer.data = (unsigned long) dpcm;
- dpcm->timer.function = snd_card_asihpi_timer_function;
+ setup_timer(&dpcm->timer, snd_card_asihpi_timer_function,
+ (unsigned long) dpcm);
dpcm->substream = substream;
runtime->private_data = dpcm;
runtime->private_free = snd_card_asihpi_runtime_free;
@@ -2832,14 +2829,11 @@ static int snd_asihpi_hpi_ioctl(struct snd_hwdep *hw, struct file *file,
/* results in /dev/snd/hwC#D0 file for each card with index #
also /proc/asound/hwdep will contain '#-00: asihpi (HPI) for each card'
*/
-static int snd_asihpi_hpi_new(struct snd_card_asihpi *asihpi,
- int device, struct snd_hwdep **rhwdep)
+static int snd_asihpi_hpi_new(struct snd_card_asihpi *asihpi, int device)
{
struct snd_hwdep *hw;
int err;
- if (rhwdep)
- *rhwdep = NULL;
err = snd_hwdep_new(asihpi->card, "HPI", device, &hw);
if (err < 0)
return err;
@@ -2849,8 +2843,6 @@ static int snd_asihpi_hpi_new(struct snd_card_asihpi *asihpi,
hw->ops.ioctl = snd_asihpi_hpi_ioctl;
hw->ops.release = snd_asihpi_hpi_release;
hw->private_data = asihpi;
- if (rhwdep)
- *rhwdep = hw;
return 0;
}
@@ -2993,7 +2985,7 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev,
/* always create, can be enabled or disabled dynamically
by enable_hwdep module param*/
- snd_asihpi_hpi_new(asihpi, 0, NULL);
+ snd_asihpi_hpi_new(asihpi, 0);
strcpy(card->driver, "ASIHPI");
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
index 2414d7a2239d..2d6364825d4d 100644
--- a/sound/pci/asihpi/hpi6000.c
+++ b/sound/pci/asihpi/hpi6000.c
@@ -47,7 +47,7 @@
/* operational/messaging errors */
#define HPI6000_ERROR_MSG_RESP_IDLE_TIMEOUT 901
-
+#define HPI6000_ERROR_RESP_GET_LEN 902
#define HPI6000_ERROR_MSG_RESP_GET_RESP_ACK 903
#define HPI6000_ERROR_MSG_GET_ADR 904
#define HPI6000_ERROR_RESP_GET_ADR 905
@@ -1365,7 +1365,10 @@ static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao,
length = hpi_read_word(pdo, HPI_HIF_ADDR(length));
} while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout);
if (!timeout)
- length = sizeof(struct hpi_response);
+ return HPI6000_ERROR_RESP_GET_LEN;
+
+ if (length > phr->size)
+ return HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL;
/* get the response */
p_data = (u32 *)phr;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 6aa677e60555..6610bd096fc9 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -28,7 +28,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/stringify.h>
#include <linux/module.h>
@@ -153,6 +153,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
goto out;
}
+ res_max_size = min_t(size_t, res_max_size, sizeof(*hr));
+
switch (hm->h.function) {
case HPI_SUBSYS_CREATE_ADAPTER:
case HPI_ADAPTER_DELETE:
@@ -539,10 +541,8 @@ void asihpi_adapter_remove(struct pci_dev *pci_dev)
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
/* unmap PCI memory space, mapped during device init. */
- for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
- if (pci.ap_mem_base[idx])
- iounmap(pci.ap_mem_base[idx]);
- }
+ for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; ++idx)
+ iounmap(pci.ap_mem_base[idx]);
if (pa->irq)
free_irq(pa->irq, pa);
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 9c1c4452a8ee..d5f15c9bbeda 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -19,7 +19,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1474,7 +1474,6 @@ static int snd_atiixp_mixer_new(struct atiixp *chip, int clock,
*/
static int snd_atiixp_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct atiixp *chip = card->private_data;
int i;
@@ -1492,29 +1491,15 @@ static int snd_atiixp_suspend(struct device *dev)
snd_ac97_suspend(chip->ac97[i]);
snd_atiixp_aclink_down(chip);
snd_atiixp_chip_stop(chip);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_atiixp_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct atiixp *chip = card->private_data;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_atiixp_aclink_reset(chip);
snd_atiixp_chip_start(chip);
@@ -1585,8 +1570,7 @@ static int snd_atiixp_free(struct atiixp *chip)
__hw_end:
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- if (chip->remap_addr)
- iounmap(chip->remap_addr);
+ iounmap(chip->remap_addr);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip);
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index b2f63e0727de..0a38e08164ab 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -19,7 +19,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1120,7 +1120,6 @@ static int snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
*/
static int snd_atiixp_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct atiixp_modem *chip = card->private_data;
int i;
@@ -1132,29 +1131,15 @@ static int snd_atiixp_suspend(struct device *dev)
snd_ac97_suspend(chip->ac97[i]);
snd_atiixp_aclink_down(chip);
snd_atiixp_chip_stop(chip);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_atiixp_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct atiixp_modem *chip = card->private_data;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_atiixp_aclink_reset(chip);
snd_atiixp_chip_start(chip);
@@ -1211,8 +1196,7 @@ static int snd_atiixp_free(struct atiixp_modem *chip)
__hw_end:
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- if (chip->remap_addr)
- iounmap(chip->remap_addr);
+ iounmap(chip->remap_addr);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip);
diff --git a/sound/pci/au88x0/au88x0.h b/sound/pci/au88x0/au88x0.h
index 3a8fefefea77..bcc648bf6478 100644
--- a/sound/pci/au88x0/au88x0.h
+++ b/sound/pci/au88x0/au88x0.h
@@ -17,9 +17,8 @@
#ifndef __SOUND_AU88X0_H
#define __SOUND_AU88X0_H
-#ifdef __KERNEL__
#include <linux/pci.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/rawmidi.h>
@@ -27,7 +26,6 @@
#include <sound/hwdep.h>
#include <sound/ac97_codec.h>
#include <sound/tlv.h>
-#endif
#ifndef CHIP_AU8820
#include "au88x0_eq.h"
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index e1cf01949fda..8d2fee7b33bd 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -229,9 +229,7 @@ static int snd_aw2_dev_free(struct snd_device *device)
if (chip->irq >= 0)
free_irq(chip->irq, (void *)chip);
/* release the i/o ports & memory */
- if (chip->iobase_virt)
- iounmap(chip->iobase_virt);
-
+ iounmap(chip->iobase_virt);
pci_release_regions(chip->pci);
/* disable the PCI entry */
pci_disable_device(chip->pci);
diff --git a/sound/pci/aw2/aw2-saa7146.c b/sound/pci/aw2/aw2-saa7146.c
index 6d24e9536777..1d7890459334 100644
--- a/sound/pci/aw2/aw2-saa7146.c
+++ b/sound/pci/aw2/aw2-saa7146.c
@@ -27,7 +27,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index fdbb9c05c77b..a40a2b4c8fd7 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -179,7 +179,7 @@
* - use MMIO (memory-mapped I/O)? Slightly faster access, e.g. for gameport.
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/bug.h> /* WARN_ONCE */
#include <linux/pci.h>
@@ -2694,7 +2694,6 @@ snd_azf3328_resume_ac97(const struct snd_azf3328 *chip)
static int
snd_azf3328_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_azf3328 *chip = card->private_data;
u16 *saved_regs_ctrl_u16;
@@ -2720,29 +2719,15 @@ snd_azf3328_suspend(struct device *dev)
ARRAY_SIZE(chip->saved_regs_mpu), chip->saved_regs_mpu);
snd_azf3328_suspend_regs(chip, chip->opl3_io,
ARRAY_SIZE(chip->saved_regs_opl3), chip->saved_regs_opl3);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int
snd_azf3328_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
const struct snd_azf3328 *chip = card->private_data;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_azf3328_resume_regs(chip, chip->saved_regs_game, chip->game_io,
ARRAY_SIZE(chip->saved_regs_game));
snd_azf3328_resume_regs(chip, chip->saved_regs_mpu, chip->mpu_io,
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 058b9973c09c..5925b7170e25 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bitops.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -690,8 +690,7 @@ static int snd_bt87x_free(struct snd_bt87x *chip)
snd_bt87x_stop(chip);
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- if (chip->mmio)
- iounmap(chip->mmio);
+ iounmap(chip->mmio);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip);
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 96af33965b51..dd75b7536fa2 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1910,7 +1910,6 @@ static void snd_ca0106_remove(struct pci_dev *pci)
#ifdef CONFIG_PM_SLEEP
static int snd_ca0106_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ca0106 *chip = card->private_data;
int i;
@@ -1923,30 +1922,15 @@ static int snd_ca0106_suspend(struct device *dev)
snd_ca0106_mixer_suspend(chip);
ca0106_stop_chip(chip);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_ca0106_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ca0106 *chip = card->private_data;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
-
- if (pci_enable_device(pci) < 0) {
- snd_card_disconnect(card);
- return -EIO;
- }
-
- pci_set_master(pci);
-
ca0106_init_chip(chip, 1);
if (chip->details->ac97)
diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c
index 68c0eb0a2807..025805cba779 100644
--- a/sound/pci/ca0106/ca0106_mixer.c
+++ b/sound/pci/ca0106/ca0106_mixer.c
@@ -70,7 +70,7 @@
#include <sound/ac97_codec.h>
#include <sound/info.h>
#include <sound/tlv.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "ca0106.h"
diff --git a/sound/pci/ca0106/ca0106_proc.c b/sound/pci/ca0106/ca0106_proc.c
index 4f9c2821bb31..2c5c28adbefd 100644
--- a/sound/pci/ca0106/ca0106_proc.c
+++ b/sound/pci/ca0106/ca0106_proc.c
@@ -64,13 +64,13 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/info.h>
#include <sound/asoundef.h>
-#include <asm/io.h>
#include "ca0106.h"
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 85ed40339db9..1d0f2cad2f5a 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -20,7 +20,7 @@
/* Does not work. Warning may block system in capture mode */
/* #define USE_VAR48KRATE */
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -3347,7 +3347,6 @@ static unsigned char saved_mixers[] = {
static int snd_cmipci_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct cmipci *cm = card->private_data;
int i;
@@ -3366,29 +3365,15 @@ static int snd_cmipci_suspend(struct device *dev)
/* disable ints */
snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_cmipci_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct cmipci *cm = card->private_data;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
/* reset / initialize to a sane state */
snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0);
snd_cmipci_ch_reset(cm, CM_CH_PLAY);
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 4c49b5c8a7b3..c296fd0dbc9c 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -19,7 +19,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -973,14 +973,11 @@ static struct snd_pcm_ops snd_cs4281_capture_ops = {
.pointer = snd_cs4281_pointer,
};
-static int snd_cs4281_pcm(struct cs4281 *chip, int device,
- struct snd_pcm **rpcm)
+static int snd_cs4281_pcm(struct cs4281 *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
err = snd_pcm_new(chip->card, "CS4281", device, 1, 1, &pcm);
if (err < 0)
return err;
@@ -996,8 +993,6 @@ static int snd_cs4281_pcm(struct cs4281 *chip, int device,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 512*1024);
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -1321,10 +1316,8 @@ static int snd_cs4281_free(struct cs4281 *chip)
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- if (chip->ba0)
- iounmap(chip->ba0);
- if (chip->ba1)
- iounmap(chip->ba1);
+ iounmap(chip->ba0);
+ iounmap(chip->ba1);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
@@ -1788,14 +1781,11 @@ static struct snd_rawmidi_ops snd_cs4281_midi_input =
.trigger = snd_cs4281_midi_input_trigger,
};
-static int snd_cs4281_midi(struct cs4281 *chip, int device,
- struct snd_rawmidi **rrawmidi)
+static int snd_cs4281_midi(struct cs4281 *chip, int device)
{
struct snd_rawmidi *rmidi;
int err;
- if (rrawmidi)
- *rrawmidi = NULL;
if ((err = snd_rawmidi_new(chip->card, "CS4281", device, 1, 1, &rmidi)) < 0)
return err;
strcpy(rmidi->name, "CS4281");
@@ -1804,8 +1794,6 @@ static int snd_cs4281_midi(struct cs4281 *chip, int device,
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = chip;
chip->rmidi = rmidi;
- if (rrawmidi)
- *rrawmidi = rmidi;
return 0;
}
@@ -1941,11 +1929,11 @@ static int snd_cs4281_probe(struct pci_dev *pci,
snd_card_free(card);
return err;
}
- if ((err = snd_cs4281_pcm(chip, 0, NULL)) < 0) {
+ if ((err = snd_cs4281_pcm(chip, 0)) < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_cs4281_midi(chip, 0, NULL)) < 0) {
+ if ((err = snd_cs4281_midi(chip, 0)) < 0) {
snd_card_free(card);
return err;
}
@@ -2008,7 +1996,6 @@ static int saved_regs[SUSPEND_REGISTERS] = {
static int cs4281_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct cs4281 *chip = card->private_data;
u32 ulCLK;
@@ -2047,30 +2034,16 @@ static int cs4281_suspend(struct device *dev)
ulCLK = snd_cs4281_peekBA0(chip, BA0_CLKCR1);
ulCLK &= ~CLKCR1_CKRA;
snd_cs4281_pokeBA0(chip, BA0_CLKCR1, ulCLK);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int cs4281_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct cs4281 *chip = card->private_data;
unsigned int i;
u32 ulCLK;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
ulCLK = snd_cs4281_peekBA0(chip, BA0_CLKCR1);
ulCLK |= CLKCR1_CKRA;
snd_cs4281_pokeBA0(chip, BA0_CLKCR1, ulCLK);
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index 6a6858c07826..655fbea1692c 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -100,16 +100,16 @@ static int snd_card_cs46xx_probe(struct pci_dev *pci,
}
card->private_data = chip;
chip->accept_valid = mmap_valid[dev];
- if ((err = snd_cs46xx_pcm(chip, 0, NULL)) < 0) {
+ if ((err = snd_cs46xx_pcm(chip, 0)) < 0) {
snd_card_free(card);
return err;
}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
- if ((err = snd_cs46xx_pcm_rear(chip,1, NULL)) < 0) {
+ if ((err = snd_cs46xx_pcm_rear(chip, 1)) < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_cs46xx_pcm_iec958(chip,2,NULL)) < 0) {
+ if ((err = snd_cs46xx_pcm_iec958(chip, 2)) < 0) {
snd_card_free(card);
return err;
}
@@ -120,13 +120,13 @@ static int snd_card_cs46xx_probe(struct pci_dev *pci,
}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
if (chip->nr_ac97_codecs ==2) {
- if ((err = snd_cs46xx_pcm_center_lfe(chip,3,NULL)) < 0) {
+ if ((err = snd_cs46xx_pcm_center_lfe(chip, 3)) < 0) {
snd_card_free(card);
return err;
}
}
#endif
- if ((err = snd_cs46xx_midi(chip, 0, NULL)) < 0) {
+ if ((err = snd_cs46xx_midi(chip, 0)) < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/cs46xx/cs46xx.h b/sound/pci/cs46xx/cs46xx.h
index c49a082c378b..9c9f89a8be5f 100644
--- a/sound/pci/cs46xx/cs46xx.h
+++ b/sound/pci/cs46xx/cs46xx.h
@@ -1737,12 +1737,12 @@ int snd_cs46xx_create(struct snd_card *card,
struct snd_cs46xx **rcodec);
extern const struct dev_pm_ops snd_cs46xx_pm;
-int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm);
-int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm);
-int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm);
-int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm);
+int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device);
+int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device);
+int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device);
+int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device);
int snd_cs46xx_mixer(struct snd_cs46xx *chip, int spdif_device);
-int snd_cs46xx_midi(struct snd_cs46xx *chip, int device, struct snd_rawmidi **rmidi);
+int snd_cs46xx_midi(struct snd_cs46xx *chip, int device);
int snd_cs46xx_start_dsp(struct snd_cs46xx *chip);
int snd_cs46xx_gameport(struct snd_cs46xx *chip);
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 32b44f25b5c8..8d74004b1ed2 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -57,6 +57,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -65,8 +66,6 @@
#include <sound/pcm_params.h>
#include "cs46xx.h"
-#include <asm/io.h>
-
#include "cs46xx_lib.h"
#include "dsp_spos.h"
@@ -1778,13 +1777,11 @@ static struct snd_pcm_ops snd_cs46xx_capture_indirect_ops = {
#define MAX_PLAYBACK_CHANNELS 1
#endif
-int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm)
+int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(chip->card, "CS46xx", device, MAX_PLAYBACK_CHANNELS, 1, &pcm)) < 0)
return err;
@@ -1801,23 +1798,16 @@ int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm)
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
-int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device,
- struct snd_pcm **rpcm)
+int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
-
if ((err = snd_pcm_new(chip->card, "CS46xx - Rear", device, MAX_PLAYBACK_CHANNELS, 0, &pcm)) < 0)
return err;
@@ -1833,21 +1823,14 @@ int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
-int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device,
- struct snd_pcm **rpcm)
+int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
-
if ((err = snd_pcm_new(chip->card, "CS46xx - Center LFE", device, MAX_PLAYBACK_CHANNELS, 0, &pcm)) < 0)
return err;
@@ -1863,21 +1846,14 @@ int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
-int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device,
- struct snd_pcm **rpcm)
+int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
-
if ((err = snd_pcm_new(chip->card, "CS46xx - IEC958", device, 1, 0, &pcm)) < 0)
return err;
@@ -1893,9 +1869,6 @@ int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
#endif
@@ -2724,13 +2697,11 @@ static struct snd_rawmidi_ops snd_cs46xx_midi_input =
.trigger = snd_cs46xx_midi_input_trigger,
};
-int snd_cs46xx_midi(struct snd_cs46xx *chip, int device, struct snd_rawmidi **rrawmidi)
+int snd_cs46xx_midi(struct snd_cs46xx *chip, int device)
{
struct snd_rawmidi *rmidi;
int err;
- if (rrawmidi)
- *rrawmidi = NULL;
if ((err = snd_rawmidi_new(chip->card, "CS46XX", device, 1, 1, &rmidi)) < 0)
return err;
strcpy(rmidi->name, "CS46XX");
@@ -2739,8 +2710,6 @@ int snd_cs46xx_midi(struct snd_cs46xx *chip, int device, struct snd_rawmidi **rr
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = chip;
chip->rmidi = rmidi;
- if (rrawmidi)
- *rrawmidi = NULL;
return 0;
}
@@ -2979,8 +2948,8 @@ static int snd_cs46xx_free(struct snd_cs46xx *chip)
for (idx = 0; idx < 5; idx++) {
struct snd_cs46xx_region *region = &chip->region.idx[idx];
- if (region->remap_addr)
- iounmap(region->remap_addr);
+
+ iounmap(region->remap_addr);
release_and_free_resource(region->resource);
}
@@ -3804,7 +3773,6 @@ static unsigned int saved_regs[] = {
static int snd_cs46xx_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_cs46xx *chip = card->private_data;
int i, amp_saved;
@@ -3829,16 +3797,11 @@ static int snd_cs46xx_suspend(struct device *dev)
/* disable CLKRUN */
chip->active_ctrl(chip, -chip->amplifier);
chip->amplifier = amp_saved; /* restore the status */
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_cs46xx_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_cs46xx *chip = card->private_data;
int amp_saved;
@@ -3847,15 +3810,6 @@ static int snd_cs46xx_resume(struct device *dev)
#endif
unsigned int tmp;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
amp_saved = chip->amplifier;
chip->amplifier = 0;
chip->active_ctrl(chip, 1); /* force to on */
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 1c4a0fb3ffef..5c99efb004c0 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -20,7 +20,7 @@
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/init.h>
diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
index 8284bc9b5858..2c90c0bded69 100644
--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
+++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
@@ -21,7 +21,7 @@
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/init.h>
diff --git a/sound/pci/cs5530.c b/sound/pci/cs5530.c
index b1025507a467..0a8cf94c4858 100644
--- a/sound/pci/cs5530.c
+++ b/sound/pci/cs5530.c
@@ -223,7 +223,7 @@ static int snd_cs5530_create(struct snd_card *card,
return err;
}
- err = snd_sb16dsp_pcm(chip->sb, 0, &chip->sb->pcm);
+ err = snd_sb16dsp_pcm(chip->sb, 0);
if (err < 0) {
dev_err(card->dev, "Could not create PCM\n");
snd_cs5530_free(chip);
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 16288e4d338a..802c33f1cc59 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -27,7 +27,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
diff --git a/sound/pci/cs5535audio/cs5535audio_pm.c b/sound/pci/cs5535audio/cs5535audio_pm.c
index 34cc60057d0c..06ac5d8da362 100644
--- a/sound/pci/cs5535audio/cs5535audio_pm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pm.c
@@ -57,7 +57,6 @@ static void snd_cs5535audio_stop_hardware(struct cs5535audio *cs5535au)
static int snd_cs5535audio_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct cs5535audio *cs5535au = card->private_data;
int i;
@@ -72,34 +71,17 @@ static int snd_cs5535audio_suspend(struct device *dev)
}
/* save important regs, then disable aclink in hw */
snd_cs5535audio_stop_hardware(cs5535au);
-
- if (pci_save_state(pci)) {
- dev_err(dev, "pci_save_state failed!\n");
- return -EIO;
- }
- pci_disable_device(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_cs5535audio_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct cs5535audio *cs5535au = card->private_data;
u32 tmp;
int timeout;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
/* set LNK_WRM_RST to reset AC link */
cs_writel(cs5535au, ACC_CODEC_CNTL, ACC_CODEC_CNTL_LNK_WRM_RST);
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index b425aa8ee578..1cac55fd1139 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1985,10 +1985,7 @@ static int hw_card_shutdown(struct hw *hw)
free_irq(hw->irq, hw);
hw->irq = -1;
-
- if (hw->mem_base)
- iounmap(hw->mem_base);
-
+ iounmap(hw->mem_base);
hw->mem_base = NULL;
if (hw->io_base)
@@ -2099,20 +2096,11 @@ static int hw_suspend(struct hw *hw)
pci_write_config_dword(pci, UAA_CFG_SPACE_FLAG, 0x0);
}
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
-
return 0;
}
static int hw_resume(struct hw *hw, struct card_conf *info)
{
- struct pci_dev *pci = hw->pci;
-
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
-
/* Re-initialize card hardware. */
return hw_card_init(hw, info);
}
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index 253899d13790..955ad871e9a8 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -2110,10 +2110,7 @@ static int hw_card_shutdown(struct hw *hw)
free_irq(hw->irq, hw);
hw->irq = -1;
-
- if (hw->mem_base)
- iounmap(hw->mem_base);
-
+ iounmap(hw->mem_base);
hw->mem_base = NULL;
if (hw->io_base)
@@ -2209,24 +2206,12 @@ static int hw_card_init(struct hw *hw, struct card_conf *info)
#ifdef CONFIG_PM_SLEEP
static int hw_suspend(struct hw *hw)
{
- struct pci_dev *pci = hw->pci;
-
hw_card_stop(hw);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
-
return 0;
}
static int hw_resume(struct hw *hw, struct card_conf *info)
{
- struct pci_dev *pci = hw->pci;
-
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
-
/* Re-initialize card hardware. */
return hw_card_init(hw, info);
}
diff --git a/sound/pci/echoaudio/darla20.c b/sound/pci/echoaudio/darla20.c
index 4632946205a8..c95da6301677 100644
--- a/sound/pci/echoaudio/darla20.c
+++ b/sound/pci/echoaudio/darla20.c
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -51,7 +52,6 @@
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/darla24.c b/sound/pci/echoaudio/darla24.c
index f81c839cc887..3013b4daa19e 100644
--- a/sound/pci/echoaudio/darla24.c
+++ b/sound/pci/echoaudio/darla24.c
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -55,7 +56,6 @@
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/echo3g.c b/sound/pci/echoaudio/echo3g.c
index 3a5346c33d76..1f34a07b0b19 100644
--- a/sound/pci/echoaudio/echo3g.c
+++ b/sound/pci/echoaudio/echo3g.c
@@ -54,6 +54,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -63,7 +64,6 @@
#include <sound/asoundef.h>
#include <sound/initval.h>
#include <sound/rawmidi.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 21228adaa70c..a962de03ebb6 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1872,12 +1872,8 @@ static int snd_echo_free(struct echoaudio *chip)
if (chip->comm_page)
snd_dma_free_pages(&chip->commpage_dma_buf);
- if (chip->dsp_registers)
- iounmap(chip->dsp_registers);
-
+ iounmap(chip->dsp_registers);
release_and_free_resource(chip->iores);
-
-
pci_disable_device(chip->pci);
/* release chip data */
@@ -2162,7 +2158,6 @@ ctl_error:
static int snd_echo_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct echoaudio *chip = dev_get_drvdata(dev);
snd_pcm_suspend_all(chip->analog_pcm);
@@ -2188,9 +2183,6 @@ static int snd_echo_suspend(struct device *dev)
chip->dsp_code = NULL;
free_irq(chip->irq, chip);
chip->irq = -1;
- pci_save_state(pci);
- pci_disable_device(pci);
-
return 0;
}
@@ -2204,7 +2196,6 @@ static int snd_echo_resume(struct device *dev)
u32 pipe_alloc_mask;
int err;
- pci_restore_state(pci);
commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
if (commpage_bak == NULL)
return -ENOMEM;
diff --git a/sound/pci/echoaudio/gina20.c b/sound/pci/echoaudio/gina20.c
index 9cb81c500824..4fa32a2e97db 100644
--- a/sound/pci/echoaudio/gina20.c
+++ b/sound/pci/echoaudio/gina20.c
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -55,7 +56,6 @@
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/gina24.c b/sound/pci/echoaudio/gina24.c
index 35d3e6eac990..b1bcacaef257 100644
--- a/sound/pci/echoaudio/gina24.c
+++ b/sound/pci/echoaudio/gina24.c
@@ -53,6 +53,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -61,7 +62,6 @@
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/indigo.c b/sound/pci/echoaudio/indigo.c
index 8d91842d1268..175af9b1435f 100644
--- a/sound/pci/echoaudio/indigo.c
+++ b/sound/pci/echoaudio/indigo.c
@@ -45,6 +45,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -53,7 +54,6 @@
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/indigodj.c b/sound/pci/echoaudio/indigodj.c
index 289cb969f5b9..8c60314e4901 100644
--- a/sound/pci/echoaudio/indigodj.c
+++ b/sound/pci/echoaudio/indigodj.c
@@ -45,6 +45,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -53,7 +54,6 @@
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/indigoio.c b/sound/pci/echoaudio/indigoio.c
index 405a3f2e496f..f7618edfd79c 100644
--- a/sound/pci/echoaudio/indigoio.c
+++ b/sound/pci/echoaudio/indigoio.c
@@ -46,6 +46,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -54,7 +55,6 @@
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/layla20.c b/sound/pci/echoaudio/layla20.c
index b392dd776b71..12e5d2164dc4 100644
--- a/sound/pci/echoaudio/layla20.c
+++ b/sound/pci/echoaudio/layla20.c
@@ -52,6 +52,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -61,7 +62,6 @@
#include <sound/asoundef.h>
#include <sound/initval.h>
#include <sound/rawmidi.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/layla24.c b/sound/pci/echoaudio/layla24.c
index bc7f730b0ec6..6e4023728ef5 100644
--- a/sound/pci/echoaudio/layla24.c
+++ b/sound/pci/echoaudio/layla24.c
@@ -54,6 +54,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -63,7 +64,6 @@
#include <sound/asoundef.h>
#include <sound/initval.h>
#include <sound/rawmidi.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/mia.c b/sound/pci/echoaudio/mia.c
index 27a9a6e5db2d..2f7562f1aefb 100644
--- a/sound/pci/echoaudio/mia.c
+++ b/sound/pci/echoaudio/mia.c
@@ -53,6 +53,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -62,7 +63,6 @@
#include <sound/asoundef.h>
#include <sound/initval.h>
#include <sound/rawmidi.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/echoaudio/midi.c b/sound/pci/echoaudio/midi.c
index d913749d154a..a8fe58335ddc 100644
--- a/sound/pci/echoaudio/midi.c
+++ b/sound/pci/echoaudio/midi.c
@@ -257,9 +257,8 @@ static void snd_echo_midi_output_trigger(struct snd_rawmidi_substream *substream
spin_lock_irq(&chip->lock);
if (up) {
if (!chip->tinuse) {
- init_timer(&chip->timer);
- chip->timer.function = snd_echo_midi_output_write;
- chip->timer.data = (unsigned long)chip;
+ setup_timer(&chip->timer, snd_echo_midi_output_write,
+ (unsigned long)chip);
chip->tinuse = 1;
}
} else {
diff --git a/sound/pci/echoaudio/mona.c b/sound/pci/echoaudio/mona.c
index 3d13875c303d..34d499466393 100644
--- a/sound/pci/echoaudio/mona.c
+++ b/sound/pci/echoaudio/mona.c
@@ -51,6 +51,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -59,7 +60,6 @@
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <linux/atomic.h>
#include "echoaudio.h"
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index 4c171636efcd..37d0220a094c 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -132,11 +132,11 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
goto error;
card->private_data = emu;
emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
- if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
+ if ((err = snd_emu10k1_pcm(emu, 0)) < 0)
goto error;
- if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
+ if ((err = snd_emu10k1_pcm_mic(emu, 1)) < 0)
goto error;
- if ((err = snd_emu10k1_pcm_efx(emu, 2, NULL)) < 0)
+ if ((err = snd_emu10k1_pcm_efx(emu, 2)) < 0)
goto error;
/* This stores the periods table. */
if (emu->card_capabilities->ca0151_chip) { /* P16V */
@@ -151,10 +151,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
if ((err = snd_emu10k1_timer(emu, 0)) < 0)
goto error;
- if ((err = snd_emu10k1_pcm_multi(emu, 3, NULL)) < 0)
+ if ((err = snd_emu10k1_pcm_multi(emu, 3)) < 0)
goto error;
if (emu->card_capabilities->ca0151_chip) { /* P16V */
- if ((err = snd_p16v_pcm(emu, 4, NULL)) < 0)
+ if ((err = snd_p16v_pcm(emu, 4)) < 0)
goto error;
}
if (emu->audigy) {
@@ -164,7 +164,7 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
if ((err = snd_emu10k1_midi(emu)) < 0)
goto error;
}
- if ((err = snd_emu10k1_fx8010_new(emu, 0, NULL)) < 0)
+ if ((err = snd_emu10k1_fx8010_new(emu, 0)) < 0)
goto error;
#ifdef ENABLE_SYNTH
if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH,
@@ -210,7 +210,6 @@ static void snd_card_emu10k1_remove(struct pci_dev *pci)
#ifdef CONFIG_PM_SLEEP
static int snd_emu10k1_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_emu10k1 *emu = card->private_data;
@@ -232,28 +231,14 @@ static int snd_emu10k1_suspend(struct device *dev)
snd_p16v_suspend(emu);
snd_emu10k1_done(emu);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_emu10k1_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_emu10k1 *emu = card->private_data;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_emu10k1_resume_init(emu);
snd_emu10k1_efx_resume(emu);
snd_ac97_resume(emu->ac97);
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 15933f92f63a..6d1b98d14327 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -847,15 +847,13 @@ static const struct snd_pcm_chmap_elem clfe_map[] = {
{ }
};
-static int snd_emu10k1x_pcm(struct emu10k1x *emu, int device, struct snd_pcm **rpcm)
+static int snd_emu10k1x_pcm(struct emu10k1x *emu, int device)
{
struct snd_pcm *pcm;
const struct snd_pcm_chmap_elem *map = NULL;
int err;
int capture = 0;
- if (rpcm)
- *rpcm = NULL;
if (device == 0)
capture = 1;
@@ -896,15 +894,8 @@ static int snd_emu10k1x_pcm(struct emu10k1x *emu, int device, struct snd_pcm **r
snd_dma_pci_data(emu->pci),
32*1024, 32*1024);
- err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2,
+ return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2,
1 << 2, NULL);
- if (err < 0)
- return err;
-
- if (rpcm)
- *rpcm = pcm;
-
- return 0;
}
static int snd_emu10k1x_create(struct snd_card *card,
@@ -1583,15 +1574,15 @@ static int snd_emu10k1x_probe(struct pci_dev *pci,
return err;
}
- if ((err = snd_emu10k1x_pcm(chip, 0, NULL)) < 0) {
+ if ((err = snd_emu10k1x_pcm(chip, 0)) < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_emu10k1x_pcm(chip, 1, NULL)) < 0) {
+ if ((err = snd_emu10k1x_pcm(chip, 1)) < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_emu10k1x_pcm(chip, 2, NULL)) < 0) {
+ if ((err = snd_emu10k1x_pcm(chip, 2)) < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index eb5c0aba41c1..56fc47bd6dba 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2641,14 +2641,11 @@ static int snd_emu10k1_fx8010_release(struct snd_hwdep * hw, struct file *file)
return 0;
}
-int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device,
- struct snd_hwdep **rhwdep)
+int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device)
{
struct snd_hwdep *hw;
int err;
- if (rhwdep)
- *rhwdep = NULL;
if ((err = snd_hwdep_new(emu->card, "FX8010", device, &hw)) < 0)
return err;
strcpy(hw->name, "EMU10K1 (FX8010)");
@@ -2657,8 +2654,6 @@ int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device,
hw->ops.ioctl = snd_emu10k1_fx8010_ioctl;
hw->ops.release = snd_emu10k1_fx8010_release;
hw->private_data = emu;
- if (rhwdep)
- *rhwdep = hw;
return 0;
}
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index f82481bd2542..0dc07385af0e 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1400,15 +1400,12 @@ static struct snd_pcm_ops snd_emu10k1_efx_playback_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
-int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device, struct snd_pcm **rpcm)
+int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device)
{
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
int err;
- if (rpcm)
- *rpcm = NULL;
-
if ((err = snd_pcm_new(emu->card, "emu10k1", device, 32, 1, &pcm)) < 0)
return err;
@@ -1429,22 +1426,15 @@ int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device, struct snd_pcm **rpcm)
for (substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
-int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device,
- struct snd_pcm **rpcm)
+int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device)
{
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
int err;
- if (rpcm)
- *rpcm = NULL;
-
if ((err = snd_pcm_new(emu->card, "emu10k1", device, 1, 0, &pcm)) < 0)
return err;
@@ -1461,9 +1451,6 @@ int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device,
if ((err = snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(emu->pci), 64*1024, 64*1024)) < 0)
return err;
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
@@ -1479,15 +1466,11 @@ static struct snd_pcm_ops snd_emu10k1_capture_mic_ops = {
.pointer = snd_emu10k1_capture_pointer,
};
-int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device,
- struct snd_pcm **rpcm)
+int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
-
if ((err = snd_pcm_new(emu->card, "emu10k1 mic", device, 0, 1, &pcm)) < 0)
return err;
@@ -1501,8 +1484,6 @@ int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -1822,16 +1803,12 @@ static struct snd_pcm_ops snd_emu10k1_fx8010_playback_ops = {
.ack = snd_emu10k1_fx8010_playback_transfer,
};
-int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device,
- struct snd_pcm **rpcm)
+int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
{
struct snd_pcm *pcm;
struct snd_kcontrol *kctl;
int err;
- if (rpcm)
- *rpcm = NULL;
-
if ((err = snd_pcm_new(emu->card, "emu10k1 efx", device, 8, 1, &pcm)) < 0)
return err;
@@ -1843,8 +1820,6 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device,
pcm->info_flags = 0;
strcpy(pcm->name, "Multichannel Capture/PT Playback");
emu->pcm_efx = pcm;
- if (rpcm)
- *rpcm = pcm;
/* EFX capture - record the "FXBUS2" channels, by default we connect the EXTINs
* to these
diff --git a/sound/pci/emu10k1/p16v.c b/sound/pci/emu10k1/p16v.c
index 7ef3898a7806..3c60b433de9f 100644
--- a/sound/pci/emu10k1/p16v.c
+++ b/sound/pci/emu10k1/p16v.c
@@ -166,11 +166,8 @@ static struct snd_pcm_hardware snd_p16v_capture_hw = {
static void snd_p16v_pcm_free_substream(struct snd_pcm_runtime *runtime)
{
struct snd_emu10k1_pcm *epcm = runtime->private_data;
-
- if (epcm) {
- /* dev_dbg(emu->card->dev, "epcm free: %p\n", epcm); */
- kfree(epcm);
- }
+
+ kfree(epcm);
}
/* open_playback callback */
@@ -640,7 +637,7 @@ int snd_p16v_free(struct snd_emu10k1 *chip)
return 0;
}
-int snd_p16v_pcm(struct snd_emu10k1 *emu, int device, struct snd_pcm **rpcm)
+int snd_p16v_pcm(struct snd_emu10k1 *emu, int device)
{
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
@@ -649,8 +646,6 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device, struct snd_pcm **rpcm)
/* dev_dbg(emu->card->dev, "snd_p16v_pcm called. device=%d\n", device); */
emu->p16v_device_offset = device;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(emu->card, "p16v", device, 1, capture, &pcm)) < 0)
return err;
@@ -694,9 +689,6 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device, struct snd_pcm **rpcm)
*/
}
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index d94cb3ca7a64..0dc44ebb0032 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -26,7 +26,7 @@
* by Kurt J. Bosch
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1268,14 +1268,11 @@ static const struct snd_pcm_chmap_elem surround_map[] = {
{ }
};
-static int snd_ensoniq_pcm(struct ensoniq *ensoniq, int device,
- struct snd_pcm **rpcm)
+static int snd_ensoniq_pcm(struct ensoniq *ensoniq, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
err = snd_pcm_new(ensoniq->card, CHIP_NAME "/1", device, 1, 1, &pcm);
if (err < 0)
return err;
@@ -1302,22 +1299,14 @@ static int snd_ensoniq_pcm(struct ensoniq *ensoniq, int device,
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_std_chmaps, 2, 0, NULL);
#endif
- if (err < 0)
- return err;
-
- if (rpcm)
- *rpcm = pcm;
- return 0;
+ return err;
}
-static int snd_ensoniq_pcm2(struct ensoniq *ensoniq, int device,
- struct snd_pcm **rpcm)
+static int snd_ensoniq_pcm2(struct ensoniq *ensoniq, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
err = snd_pcm_new(ensoniq->card, CHIP_NAME "/2", device, 1, 0, &pcm);
if (err < 0)
return err;
@@ -1342,12 +1331,7 @@ static int snd_ensoniq_pcm2(struct ensoniq *ensoniq, int device,
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
surround_map, 2, 0, NULL);
#endif
- if (err < 0)
- return err;
-
- if (rpcm)
- *rpcm = pcm;
- return 0;
+ return err;
}
/*
@@ -2049,7 +2033,6 @@ static void snd_ensoniq_chip_init(struct ensoniq *ensoniq)
#ifdef CONFIG_PM_SLEEP
static int snd_ensoniq_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct ensoniq *ensoniq = card->private_data;
@@ -2070,28 +2053,14 @@ static int snd_ensoniq_suspend(struct device *dev)
udelay(100);
snd_ak4531_suspend(ensoniq->u.es1370.ak4531);
#endif
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_ensoniq_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct ensoniq *ensoniq = card->private_data;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_ensoniq_chip_init(ensoniq);
#ifdef CHIP1371
@@ -2362,14 +2331,11 @@ static struct snd_rawmidi_ops snd_ensoniq_midi_input =
.trigger = snd_ensoniq_midi_input_trigger,
};
-static int snd_ensoniq_midi(struct ensoniq *ensoniq, int device,
- struct snd_rawmidi **rrawmidi)
+static int snd_ensoniq_midi(struct ensoniq *ensoniq, int device)
{
struct snd_rawmidi *rmidi;
int err;
- if (rrawmidi)
- *rrawmidi = NULL;
if ((err = snd_rawmidi_new(ensoniq->card, "ES1370/1", device, 1, 1, &rmidi)) < 0)
return err;
strcpy(rmidi->name, CHIP_NAME);
@@ -2379,8 +2345,6 @@ static int snd_ensoniq_midi(struct ensoniq *ensoniq, int device,
SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = ensoniq;
ensoniq->rmidi = rmidi;
- if (rrawmidi)
- *rrawmidi = rmidi;
return 0;
}
@@ -2462,15 +2426,15 @@ static int snd_audiopci_probe(struct pci_dev *pci,
return err;
}
#endif
- if ((err = snd_ensoniq_pcm(ensoniq, 0, NULL)) < 0) {
+ if ((err = snd_ensoniq_pcm(ensoniq, 0)) < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_ensoniq_pcm2(ensoniq, 1, NULL)) < 0) {
+ if ((err = snd_ensoniq_pcm2(ensoniq, 1)) < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_ensoniq_midi(ensoniq, 0, NULL)) < 0) {
+ if ((err = snd_ensoniq_midi(ensoniq, 0)) < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 0fc46eb4e251..e1858d9d23d8 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -55,6 +55,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
@@ -63,8 +64,6 @@
#include <sound/initval.h>
#include <sound/tlv.h>
-#include <asm/io.h>
-
MODULE_AUTHOR("Jaromir Koutek <miri@punknet.cz>");
MODULE_DESCRIPTION("ESS Solo-1");
MODULE_LICENSE("GPL");
@@ -1454,7 +1453,6 @@ static unsigned char saved_regs[SAVED_REG_SIZE+1] = {
static int es1938_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct es1938 *chip = card->private_data;
unsigned char *s, *d;
@@ -1471,9 +1469,6 @@ static int es1938_suspend(struct device *dev)
free_irq(chip->irq, chip);
chip->irq = -1;
}
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
@@ -1484,14 +1479,6 @@ static int es1938_resume(struct device *dev)
struct es1938 *chip = card->private_data;
unsigned char *s, *d;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
-
if (request_irq(pci->irq, snd_es1938_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
dev_err(dev, "unable to grab IRQ %d, disabling device\n",
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 6039700f8579..059f3846d7b8 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -94,7 +94,7 @@
* places.
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -2383,7 +2383,6 @@ static void snd_es1968_start_irq(struct es1968 *chip)
*/
static int es1968_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct es1968 *chip = card->private_data;
@@ -2396,16 +2395,11 @@ static int es1968_suspend(struct device *dev)
snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
snd_es1968_bob_stop(chip);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int es1968_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct es1968 *chip = card->private_data;
struct esschan *es;
@@ -2413,16 +2407,6 @@ static int es1968_resume(struct device *dev)
if (! chip->do_pm)
return 0;
- /* restore all our config */
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_es1968_chip_init(chip);
/* need to restore the base pointers.. */
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index d167afffce5f..1fdd92b6f18f 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -2,8 +2,6 @@
* The driver for the ForteMedia FM801 based soundcards
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
- * Support FM only card by Andy Shevchenko <andy@smile.org.ua>
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -14,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/delay.h>
@@ -704,13 +698,11 @@ static struct snd_pcm_ops snd_fm801_capture_ops = {
.pointer = snd_fm801_capture_pointer,
};
-static int snd_fm801_pcm(struct fm801 *chip, int device, struct snd_pcm **rpcm)
+static int snd_fm801_pcm(struct fm801 *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(chip->card, "FM801", device, 1, 1, &pcm)) < 0)
return err;
@@ -726,16 +718,10 @@ static int snd_fm801_pcm(struct fm801 *chip, int device, struct snd_pcm **rpcm)
snd_dma_pci_data(chip->pci),
chip->multichannel ? 128*1024 : 64*1024, 128*1024);
- err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_alt_chmaps,
chip->multichannel ? 6 : 2, 0,
NULL);
- if (err < 0)
- return err;
-
- if (rpcm)
- *rpcm = pcm;
- return 0;
}
/*
@@ -1186,12 +1172,6 @@ static int snd_fm801_free(struct fm801 *chip)
v4l2_device_unregister(&chip->v4l2_dev);
}
#endif
- if (chip->irq >= 0)
- free_irq(chip->irq, chip);
- pci_release_regions(chip->pci);
- pci_disable_device(chip->pci);
-
- kfree(chip);
return 0;
}
@@ -1214,28 +1194,23 @@ static int snd_fm801_create(struct snd_card *card,
};
*rchip = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ if ((err = pcim_enable_device(pci)) < 0)
return err;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (chip == NULL) {
- pci_disable_device(pci);
+ chip = devm_kzalloc(&pci->dev, sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL)
return -ENOMEM;
- }
spin_lock_init(&chip->reg_lock);
chip->card = card;
chip->pci = pci;
chip->irq = -1;
chip->tea575x_tuner = tea575x_tuner;
- if ((err = pci_request_regions(pci, "FM801")) < 0) {
- kfree(chip);
- pci_disable_device(pci);
+ if ((err = pci_request_regions(pci, "FM801")) < 0)
return err;
- }
chip->port = pci_resource_start(pci, 0);
if ((tea575x_tuner & TUNER_ONLY) == 0) {
- if (request_irq(pci->irq, snd_fm801_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, chip)) {
- dev_err(card->dev, "unable to grab IRQ %d\n", chip->irq);
+ if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, chip)) {
+ dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
snd_fm801_free(chip);
return -EBUSY;
}
@@ -1250,12 +1225,6 @@ static int snd_fm801_create(struct snd_card *card,
/* init might set tuner access method */
tea575x_tuner = chip->tea575x_tuner;
- if (chip->irq >= 0 && (tea575x_tuner & TUNER_ONLY)) {
- pci_clear_master(pci);
- free_irq(chip->irq, chip);
- chip->irq = -1;
- }
-
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
snd_fm801_free(chip);
return err;
@@ -1340,7 +1309,7 @@ static int snd_card_fm801_probe(struct pci_dev *pci,
if (chip->tea575x_tuner & TUNER_ONLY)
goto __fm801_tuner_only;
- if ((err = snd_fm801_pcm(chip, 0, NULL)) < 0) {
+ if ((err = snd_fm801_pcm(chip, 0)) < 0) {
snd_card_free(card);
return err;
}
@@ -1392,7 +1361,6 @@ static unsigned char saved_regs[] = {
static int snd_fm801_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct fm801 *chip = card->private_data;
int i;
@@ -1404,29 +1372,15 @@ static int snd_fm801_suspend(struct device *dev)
for (i = 0; i < ARRAY_SIZE(saved_regs); i++)
chip->saved_regs[i] = inw(chip->port + saved_regs[i]);
/* FIXME: tea575x suspend */
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_fm801_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct fm801 *chip = card->private_data;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_fm801_chip_init(chip, 1);
snd_ac97_resume(chip->ac97);
snd_ac97_resume(chip->ac97_sec);
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index ebf4c2fb99df..7f0f2c5a4e97 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -107,6 +107,7 @@ config SND_HDA_PATCH_LOADER
config SND_HDA_CODEC_REALTEK
tristate "Build Realtek HD-audio codec support"
select SND_HDA_GENERIC
+ select INPUT
help
Say Y or M here to include Realtek HD-audio codec support in
snd-hda-intel driver, such as ALC880.
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 1ede82200ee5..3f8706bb3d16 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -409,10 +409,10 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
/*
* debug prints of the parsed results
*/
- codec_info(codec, "autoconfig: line_outs=%d (0x%x/0x%x/0x%x/0x%x/0x%x) type:%s\n",
- cfg->line_outs, cfg->line_out_pins[0], cfg->line_out_pins[1],
- cfg->line_out_pins[2], cfg->line_out_pins[3],
- cfg->line_out_pins[4],
+ codec_info(codec, "autoconfig for %s: line_outs=%d (0x%x/0x%x/0x%x/0x%x/0x%x) type:%s\n",
+ codec->chip_name, cfg->line_outs, cfg->line_out_pins[0],
+ cfg->line_out_pins[1], cfg->line_out_pins[2],
+ cfg->line_out_pins[3], cfg->line_out_pins[4],
cfg->line_out_type == AUTO_PIN_HP_OUT ? "hp" :
(cfg->line_out_type == AUTO_PIN_SPEAKER_OUT ?
"speaker" : "line"));
@@ -920,6 +920,8 @@ void snd_hda_pick_pin_fixup(struct hda_codec *codec,
codec->fixup_id = pq->value;
#ifdef CONFIG_SND_DEBUG_VERBOSE
codec->fixup_name = pq->name;
+ codec_dbg(codec, "%s: picked fixup %s (pin match)\n",
+ codec->chip_name, codec->fixup_name);
#endif
codec->fixup_list = fixlist;
return;
@@ -960,6 +962,8 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
codec->fixup_list = NULL;
codec->fixup_name = NULL;
codec->fixup_id = HDA_FIXUP_ID_NO_FIXUP;
+ codec_dbg(codec, "%s: picked no fixup (nofixup specified)\n",
+ codec->chip_name);
return;
}
@@ -969,6 +973,8 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
codec->fixup_id = models->id;
codec->fixup_name = models->name;
codec->fixup_list = fixlist;
+ codec_dbg(codec, "%s: picked fixup %s (model specified)\n",
+ codec->chip_name, codec->fixup_name);
return;
}
models++;
@@ -980,6 +986,8 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
id = q->value;
#ifdef CONFIG_SND_DEBUG_VERBOSE
name = q->name;
+ codec_dbg(codec, "%s: picked fixup %s (PCI SSID%s)\n",
+ codec->chip_name, name, q->subdevice_mask ? "" : " - vendor generic");
#endif
}
}
@@ -992,6 +1000,8 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
id = q->value;
#ifdef CONFIG_SND_DEBUG_VERBOSE
name = q->name;
+ codec_dbg(codec, "%s: picked fixup %s (codec SSID)\n",
+ codec->chip_name, name);
#endif
break;
}
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 0cfc9c8c4b4e..dfcb5e929f9f 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -657,6 +657,9 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
if (start) {
azx_timecounter_init(substream, 0, 0);
+ snd_pcm_gettime(substream->runtime, &substream->runtime->trigger_tstamp);
+ substream->runtime->trigger_tstamp_latched = true;
+
if (nsync > 1) {
cycle_t cycle_last;
@@ -939,7 +942,8 @@ static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
chip->card->dev,
size, MAX_PREALLOC_SIZE);
/* link to codec */
- pcm->dev = &codec->dev;
+ for (s = 0; s < 2; s++)
+ pcm->streams[s].dev.parent = &codec->dev;
return 0;
}
@@ -1993,4 +1997,4 @@ void azx_notifier_unregister(struct azx *chip)
EXPORT_SYMBOL_GPL(azx_notifier_unregister);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Common HDA driver funcitons");
+MODULE_DESCRIPTION("Common HDA driver functions");
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 014a7849e8fd..11b5a42b4ec8 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -109,7 +109,6 @@ int snd_hda_create_hwdep(struct hda_codec *codec)
hwdep->iface = SNDRV_HWDEP_IFACE_HDA;
hwdep->private_data = codec;
hwdep->exclusive = 1;
- hwdep->groups = snd_hda_dev_attr_groups;
hwdep->ops.open = hda_hwdep_open;
hwdep->ops.ioctl = hda_hwdep_ioctl;
@@ -118,7 +117,11 @@ int snd_hda_create_hwdep(struct hda_codec *codec)
#endif
/* link to codec */
- hwdep->dev = &codec->dev;
+ hwdep->dev.parent = &codec->dev;
+
+ /* for sysfs */
+ hwdep->dev.groups = snd_hda_dev_attr_groups;
+ dev_set_drvdata(&hwdep->dev, codec);
return 0;
}
diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c
index d4d0375ac181..714894527e06 100644
--- a/sound/pci/hda/hda_i915.c
+++ b/sound/pci/hda/hda_i915.c
@@ -18,10 +18,12 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/component.h>
+#include <drm/i915_component.h>
#include <sound/core.h>
-#include <drm/i915_powerwell.h>
#include "hda_priv.h"
-#include "hda_i915.h"
+#include "hda_intel.h"
/* Intel HSW/BDW display HDA controller Extended Mode registers.
* EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
@@ -31,32 +33,33 @@
#define AZX_REG_EM4 0x100c
#define AZX_REG_EM5 0x1010
-static int (*get_power)(void);
-static int (*put_power)(void);
-static int (*get_cdclk)(void);
-
-int hda_display_power(bool enable)
+int hda_display_power(struct hda_intel *hda, bool enable)
{
- if (!get_power || !put_power)
+ struct i915_audio_component *acomp = &hda->audio_component;
+
+ if (!acomp->ops)
return -ENODEV;
- pr_debug("HDA display power %s \n",
- enable ? "Enable" : "Disable");
+ dev_dbg(&hda->chip.pci->dev, "display power %s\n",
+ enable ? "enable" : "disable");
if (enable)
- return get_power();
+ acomp->ops->get_power(acomp->dev);
else
- return put_power();
+ acomp->ops->put_power(acomp->dev);
+
+ return 0;
}
-void haswell_set_bclk(struct azx *chip)
+void haswell_set_bclk(struct hda_intel *hda)
{
int cdclk_freq;
unsigned int bclk_m, bclk_n;
+ struct i915_audio_component *acomp = &hda->audio_component;
- if (!get_cdclk)
+ if (!acomp->ops)
return;
- cdclk_freq = get_cdclk();
+ cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
switch (cdclk_freq) {
case 337500:
bclk_m = 16;
@@ -80,51 +83,108 @@ void haswell_set_bclk(struct azx *chip)
break;
}
- azx_writew(chip, EM4, bclk_m);
- azx_writew(chip, EM5, bclk_n);
+ azx_writew(&hda->chip, EM4, bclk_m);
+ azx_writew(&hda->chip, EM5, bclk_n);
}
-
-int hda_i915_init(void)
+static int hda_component_master_bind(struct device *dev)
{
- int err = 0;
-
- get_power = symbol_request(i915_request_power_well);
- if (!get_power) {
- pr_warn("hda-i915: get_power symbol get fail\n");
- return -ENODEV;
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip = card->private_data;
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct i915_audio_component *acomp = &hda->audio_component;
+ int ret;
+
+ ret = component_bind_all(dev, acomp);
+ if (ret < 0)
+ return ret;
+
+ if (WARN_ON(!(acomp->dev && acomp->ops && acomp->ops->get_power &&
+ acomp->ops->put_power && acomp->ops->get_cdclk_freq))) {
+ ret = -EINVAL;
+ goto out_unbind;
}
- put_power = symbol_request(i915_release_power_well);
- if (!put_power) {
- symbol_put(i915_request_power_well);
- get_power = NULL;
- return -ENODEV;
+ /*
+ * Atm, we don't support dynamic unbinding initiated by the child
+ * component, so pin its containing module until we unbind.
+ */
+ if (!try_module_get(acomp->ops->owner)) {
+ ret = -ENODEV;
+ goto out_unbind;
}
- get_cdclk = symbol_request(i915_get_cdclk_freq);
- if (!get_cdclk) /* may have abnormal BCLK and audio playback rate */
- pr_warn("hda-i915: get_cdclk symbol get fail\n");
+ return 0;
- pr_debug("HDA driver get symbol successfully from i915 module\n");
+out_unbind:
+ component_unbind_all(dev, acomp);
- return err;
+ return ret;
}
-int hda_i915_exit(void)
+static void hda_component_master_unbind(struct device *dev)
{
- if (get_power) {
- symbol_put(i915_request_power_well);
- get_power = NULL;
- }
- if (put_power) {
- symbol_put(i915_release_power_well);
- put_power = NULL;
- }
- if (get_cdclk) {
- symbol_put(i915_get_cdclk_freq);
- get_cdclk = NULL;
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip = card->private_data;
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct i915_audio_component *acomp = &hda->audio_component;
+
+ module_put(acomp->ops->owner);
+ component_unbind_all(dev, acomp);
+ WARN_ON(acomp->ops || acomp->dev);
+}
+
+static const struct component_master_ops hda_component_master_ops = {
+ .bind = hda_component_master_bind,
+ .unbind = hda_component_master_unbind,
+};
+
+static int hda_component_master_match(struct device *dev, void *data)
+{
+ /* i915 is the only supported component */
+ return !strcmp(dev->driver->name, "i915");
+}
+
+int hda_i915_init(struct hda_intel *hda)
+{
+ struct component_match *match = NULL;
+ struct device *dev = &hda->chip.pci->dev;
+ struct i915_audio_component *acomp = &hda->audio_component;
+ int ret;
+
+ component_match_add(dev, &match, hda_component_master_match, hda);
+ ret = component_master_add_with_match(dev, &hda_component_master_ops,
+ match);
+ if (ret < 0)
+ goto out_err;
+
+ /*
+ * Atm, we don't support deferring the component binding, so make sure
+ * i915 is loaded and that the binding successfully completes.
+ */
+ request_module("i915");
+
+ if (!acomp->ops) {
+ ret = -ENODEV;
+ goto out_master_del;
}
+ dev_dbg(dev, "bound to i915 component master\n");
+
+ return 0;
+out_master_del:
+ component_master_del(dev, &hda_component_master_ops);
+out_err:
+ dev_err(dev, "failed to add i915 component master (%d)\n", ret);
+
+ return ret;
+}
+
+int hda_i915_exit(struct hda_intel *hda)
+{
+ struct device *dev = &hda->chip.pci->dev;
+
+ component_master_del(dev, &hda_component_master_ops);
+
return 0;
}
diff --git a/sound/pci/hda/hda_i915.h b/sound/pci/hda/hda_i915.h
deleted file mode 100644
index e6072c627583..000000000000
--- a/sound/pci/hda/hda_i915.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-#ifndef __SOUND_HDA_I915_H
-#define __SOUND_HDA_I915_H
-
-#ifdef CONFIG_SND_HDA_I915
-int hda_display_power(bool enable);
-void haswell_set_bclk(struct azx *chip);
-int hda_i915_init(void);
-int hda_i915_exit(void);
-#else
-static inline int hda_display_power(bool enable) { return 0; }
-static inline void haswell_set_bclk(struct azx *chip) { return; }
-static inline int hda_i915_init(void)
-{
- return -ENODEV;
-}
-static inline int hda_i915_exit(void)
-{
- return 0;
-}
-#endif
-
-#endif
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index d426a0bd6a5f..36d2f20db7a4 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -63,7 +63,7 @@
#include "hda_codec.h"
#include "hda_controller.h"
#include "hda_priv.h"
-#include "hda_i915.h"
+#include "hda_intel.h"
/* position fix mode */
enum {
@@ -354,31 +354,6 @@ static char *driver_short_names[] = {
[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
};
-struct hda_intel {
- struct azx chip;
-
- /* for pending irqs */
- struct work_struct irq_pending_work;
-
- /* sync probing */
- struct completion probe_wait;
- struct work_struct probe_work;
-
- /* card list (for power_save trigger) */
- struct list_head list;
-
- /* extra flags */
- unsigned int irq_pending_warned:1;
-
- /* VGA-switcheroo setup */
- unsigned int use_vga_switcheroo:1;
- unsigned int vga_switcheroo_registered:1;
- unsigned int init_failed:1; /* delayed init failed */
-
- /* secondary power domain for hdmi audio under vga device */
- struct dev_pm_domain hdmi_pm_domain;
-};
-
#ifdef CONFIG_X86
static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
{
@@ -795,7 +770,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
*/
static int azx_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
struct hda_intel *hda;
@@ -824,11 +798,8 @@ static int azx_suspend(struct device *dev)
if (chip->msi)
pci_disable_msi(chip->pci);
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- hda_display_power(false);
+ hda_display_power(hda, false);
return 0;
}
@@ -848,18 +819,9 @@ static int azx_resume(struct device *dev)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- hda_display_power(true);
- haswell_set_bclk(chip);
+ hda_display_power(hda, true);
+ haswell_set_bclk(hda);
}
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(chip->card->dev,
- "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
@@ -901,7 +863,7 @@ static int azx_runtime_suspend(struct device *dev)
azx_enter_link_reset(chip);
azx_clear_irq_pending(chip);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- hda_display_power(false);
+ hda_display_power(hda, false);
return 0;
}
@@ -927,8 +889,8 @@ static int azx_runtime_resume(struct device *dev)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- hda_display_power(true);
- haswell_set_bclk(chip);
+ hda_display_power(hda, true);
+ haswell_set_bclk(hda);
}
/* Read STATESTS before controller reset */
@@ -1138,8 +1100,7 @@ static int azx_free(struct azx *chip)
free_irq(chip->irq, (void*)chip);
if (chip->msi)
pci_disable_msi(chip->pci);
- if (chip->remap_addr)
- iounmap(chip->remap_addr);
+ iounmap(chip->remap_addr);
azx_free_stream_pages(chip);
if (chip->region_requested)
@@ -1150,8 +1111,8 @@ static int azx_free(struct azx *chip)
release_firmware(chip->fw);
#endif
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- hda_display_power(false);
- hda_i915_exit();
+ hda_display_power(hda, false);
+ hda_i915_exit(hda);
}
kfree(hda);
@@ -1629,8 +1590,12 @@ static int azx_first_init(struct azx *chip)
/* initialize chip */
azx_init_pci(chip);
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- haswell_set_bclk(chip);
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+ struct hda_intel *hda;
+
+ hda = container_of(chip, struct hda_intel, chip);
+ haswell_set_bclk(hda);
+ }
azx_init_chip(chip, (probe_only[dev] & 2) == 0);
@@ -1910,13 +1875,10 @@ static int azx_probe_continue(struct azx *chip)
/* Request power well for Haswell HDA controller and codec */
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
#ifdef CONFIG_SND_HDA_I915
- err = hda_i915_init();
- if (err < 0) {
- dev_err(chip->card->dev,
- "Error request power-well from i915\n");
+ err = hda_i915_init(hda);
+ if (err < 0)
goto out_free;
- }
- err = hda_display_power(true);
+ err = hda_display_power(hda, true);
if (err < 0) {
dev_err(chip->card->dev,
"Cannot turn on display power on i915\n");
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
new file mode 100644
index 000000000000..348611835476
--- /dev/null
+++ b/sound/pci/hda/hda_intel.h
@@ -0,0 +1,71 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __SOUND_HDA_INTEL_H
+#define __SOUND_HDA_INTEL_H
+
+#include <drm/i915_component.h>
+#include "hda_priv.h"
+
+struct hda_intel {
+ struct azx chip;
+
+ /* for pending irqs */
+ struct work_struct irq_pending_work;
+
+ /* sync probing */
+ struct completion probe_wait;
+ struct work_struct probe_work;
+
+ /* card list (for power_save trigger) */
+ struct list_head list;
+
+ /* extra flags */
+ unsigned int irq_pending_warned:1;
+
+ /* VGA-switcheroo setup */
+ unsigned int use_vga_switcheroo:1;
+ unsigned int vga_switcheroo_registered:1;
+ unsigned int init_failed:1; /* delayed init failed */
+
+ /* secondary power domain for hdmi audio under vga device */
+ struct dev_pm_domain hdmi_pm_domain;
+
+ /* i915 component interface */
+ struct i915_audio_component audio_component;
+};
+
+#ifdef CONFIG_SND_HDA_I915
+int hda_display_power(struct hda_intel *hda, bool enable);
+void haswell_set_bclk(struct hda_intel *hda);
+int hda_i915_init(struct hda_intel *hda);
+int hda_i915_exit(struct hda_intel *hda);
+#else
+static inline int hda_display_power(struct hda_intel *hda, bool enable)
+{
+ return 0;
+}
+static inline void haswell_set_bclk(struct hda_intel *hda) { return; }
+static inline int hda_i915_init(struct hda_intel *hda)
+{
+ return -ENODEV;
+}
+static inline int hda_i915_exit(struct hda_intel *hda)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 166e3e84b963..daf458299753 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -15,7 +15,7 @@
#ifndef __SOUND_HDA_PRIV_H
#define __SOUND_HDA_PRIV_H
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index a9d78e275138..d285904cdb64 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -739,39 +739,6 @@ static int patch_ad1981(struct hda_codec *codec)
* E/F quad mic array
*/
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int ad198x_ch_mode_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_ch_mode_info(codec, uinfo, spec->channel_mode,
- spec->num_channel_mode);
-}
-
-static int ad198x_ch_mode_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_ch_mode_get(codec, ucontrol, spec->channel_mode,
- spec->num_channel_mode, spec->multiout.max_channels);
-}
-
-static int ad198x_ch_mode_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
- int err = snd_hda_ch_mode_put(codec, ucontrol, spec->channel_mode,
- spec->num_channel_mode,
- &spec->multiout.max_channels);
- if (err >= 0 && spec->need_dac_fix)
- spec->multiout.num_dacs = spec->multiout.max_channels / 2;
- return err;
-}
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
static int ad1988_auto_smux_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 65f1f4e18ea5..ddb93083a2af 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <linux/dmi.h>
#include <linux/module.h>
+#include <linux/input.h>
#include <sound/core.h>
#include <sound/jack.h>
#include "hda_codec.h"
@@ -120,6 +121,7 @@ struct alc_spec {
hda_nid_t pll_nid;
unsigned int pll_coef_idx, pll_coef_bit;
unsigned int coef0;
+ struct input_dev *kb_dev;
};
/*
@@ -3472,6 +3474,79 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
}
}
+static void gpio2_mic_hotkey_event(struct hda_codec *codec,
+ struct hda_jack_callback *event)
+{
+ struct alc_spec *spec = codec->spec;
+
+ /* GPIO2 just toggles on a keypress/keyrelease cycle. Therefore
+ send both key on and key off event for every interrupt. */
+ input_report_key(spec->kb_dev, KEY_MICMUTE, 1);
+ input_sync(spec->kb_dev);
+ input_report_key(spec->kb_dev, KEY_MICMUTE, 0);
+ input_sync(spec->kb_dev);
+}
+
+static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ /* GPIO1 = set according to SKU external amp
+ GPIO2 = mic mute hotkey
+ GPIO3 = mute LED
+ GPIO4 = mic mute LED */
+ static const struct hda_verb gpio_init[] = {
+ { 0x01, AC_VERB_SET_GPIO_MASK, 0x1e },
+ { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x1a },
+ { 0x01, AC_VERB_SET_GPIO_DATA, 0x02 },
+ {}
+ };
+
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->kb_dev = input_allocate_device();
+ if (!spec->kb_dev) {
+ codec_err(codec, "Out of memory (input_allocate_device)\n");
+ return;
+ }
+ spec->kb_dev->name = "Microphone Mute Button";
+ spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY);
+ spec->kb_dev->keybit[BIT_WORD(KEY_MICMUTE)] = BIT_MASK(KEY_MICMUTE);
+ if (input_register_device(spec->kb_dev)) {
+ codec_err(codec, "input_register_device failed\n");
+ input_free_device(spec->kb_dev);
+ spec->kb_dev = NULL;
+ return;
+ }
+
+ snd_hda_add_verbs(codec, gpio_init);
+ snd_hda_codec_write_cache(codec, codec->afg, 0,
+ AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x04);
+ snd_hda_jack_detect_enable_callback(codec, codec->afg,
+ gpio2_mic_hotkey_event);
+
+ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+ spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
+ spec->gpio_led = 0;
+ spec->mute_led_polarity = 0;
+ spec->gpio_mute_led_mask = 0x08;
+ spec->gpio_mic_led_mask = 0x10;
+ return;
+ }
+
+ if (!spec->kb_dev)
+ return;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PROBE:
+ spec->init_amp = ALC_INIT_DEFAULT;
+ break;
+ case HDA_FIXUP_ACT_FREE:
+ input_unregister_device(spec->kb_dev);
+ spec->kb_dev = NULL;
+ }
+}
+
static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -4341,6 +4416,8 @@ enum {
ALC282_FIXUP_ASPIRE_V5_PINS,
ALC280_FIXUP_HP_GPIO4,
ALC286_FIXUP_HP_GPIO_LED,
+ ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
+ ALC280_FIXUP_HP_DOCK_PINS,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -4814,6 +4891,21 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc286_fixup_hp_gpio_led,
},
+ [ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc280_fixup_hp_gpio2_mic_hotkey,
+ },
+ [ALC280_FIXUP_HP_DOCK_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1b, 0x21011020 }, /* line-out */
+ { 0x1a, 0x01a1903c }, /* headset mic */
+ { 0x18, 0x2181103f }, /* line-in */
+ { },
+ },
+ .chained = true,
+ .chain_id = ALC280_FIXUP_HP_GPIO4
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -4843,6 +4935,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
/* ALC282 */
SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -4856,6 +4949,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+ SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 605d14003d25..6d36c5b78805 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -99,6 +99,7 @@ enum {
STAC_HP_ENVY_BASS,
STAC_HP_BNB13_EQ,
STAC_HP_ENVY_TS_BASS,
+ STAC_92HD83XXX_GPIO10_EAPD,
STAC_92HD83XXX_MODELS
};
@@ -2141,6 +2142,19 @@ static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
spec->headset_jack = 1;
}
+static void stac92hd83xxx_fixup_gpio10_eapd(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct sigmatel_spec *spec = codec->spec;
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+ spec->eapd_mask = spec->gpio_mask = spec->gpio_dir =
+ spec->gpio_data = 0x10;
+ spec->eapd_switch = 0;
+}
+
static const struct hda_verb hp_bnb13_eq_verbs[] = {
/* 44.1KHz base */
{ 0x22, 0x7A6, 0x3E },
@@ -2656,6 +2670,10 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
{}
},
},
+ [STAC_92HD83XXX_GPIO10_EAPD] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd83xxx_fixup_gpio10_eapd,
+ },
};
static const struct hda_model_fixup stac92hd83xxx_models[] = {
@@ -2861,6 +2879,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
"HP Mini", STAC_92HD83XXX_HP_LED),
SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
+ "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
{} /* terminator */
};
diff --git a/sound/pci/ice1712/ak4xxx.c b/sound/pci/ice1712/ak4xxx.c
index 3981823f9094..179ef7a5f0d1 100644
--- a/sound/pci/ice1712/ak4xxx.c
+++ b/sound/pci/ice1712/ak4xxx.c
@@ -21,7 +21,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index b039b46152c6..f7b1523e8a82 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -880,13 +880,11 @@ static struct snd_pcm_ops snd_ice1712_capture_ops = {
.pointer = snd_ice1712_capture_pointer,
};
-static int snd_ice1712_pcm(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
+static int snd_ice1712_pcm(struct snd_ice1712 *ice, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
err = snd_pcm_new(ice->card, "ICE1712 consumer", device, 1, 1, &pcm);
if (err < 0)
return err;
@@ -902,22 +900,17 @@ static int snd_ice1712_pcm(struct snd_ice1712 *ice, int device, struct snd_pcm *
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(ice->pci), 64*1024, 64*1024);
- if (rpcm)
- *rpcm = pcm;
-
dev_warn(ice->card->dev,
"Consumer PCM code does not work well at the moment --jk\n");
return 0;
}
-static int snd_ice1712_pcm_ds(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
+static int snd_ice1712_pcm_ds(struct snd_ice1712 *ice, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
err = snd_pcm_new(ice->card, "ICE1712 consumer (DS)", device, 6, 0, &pcm);
if (err < 0)
return err;
@@ -932,9 +925,6 @@ static int snd_ice1712_pcm_ds(struct snd_ice1712 *ice, int device, struct snd_pc
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(ice->pci), 64*1024, 128*1024);
- if (rpcm)
- *rpcm = pcm;
-
return 0;
}
@@ -1260,13 +1250,11 @@ static struct snd_pcm_ops snd_ice1712_capture_pro_ops = {
.pointer = snd_ice1712_capture_pro_pointer,
};
-static int snd_ice1712_pcm_profi(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
+static int snd_ice1712_pcm_profi(struct snd_ice1712 *ice, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
err = snd_pcm_new(ice->card, "ICE1712 multi", device, 1, 1, &pcm);
if (err < 0)
return err;
@@ -1282,8 +1270,6 @@ static int snd_ice1712_pcm_profi(struct snd_ice1712 *ice, int device, struct snd
snd_dma_pci_data(ice->pci), 256*1024, 256*1024);
ice->pcm_pro = pcm;
- if (rpcm)
- *rpcm = pcm;
if (ice->cs8427) {
/* assign channels to iec958 */
@@ -2691,14 +2677,14 @@ static int snd_ice1712_probe(struct pci_dev *pci,
c = &no_matched;
__found:
- err = snd_ice1712_pcm_profi(ice, pcm_dev++, NULL);
+ err = snd_ice1712_pcm_profi(ice, pcm_dev++);
if (err < 0) {
snd_card_free(card);
return err;
}
if (ice_has_con_ac97(ice)) {
- err = snd_ice1712_pcm(ice, pcm_dev++, NULL);
+ err = snd_ice1712_pcm(ice, pcm_dev++);
if (err < 0) {
snd_card_free(card);
return err;
@@ -2726,7 +2712,7 @@ static int snd_ice1712_probe(struct pci_dev *pci,
}
if (ice_has_con_ac97(ice)) {
- err = snd_ice1712_pcm_ds(ice, pcm_dev++, NULL);
+ err = snd_ice1712_pcm_ds(ice, pcm_dev++);
if (err < 0) {
snd_card_free(card);
return err;
@@ -2798,7 +2784,6 @@ static void snd_ice1712_remove(struct pci_dev *pci)
#ifdef CONFIG_PM_SLEEP
static int snd_ice1712_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ice1712 *ice = card->private_data;
@@ -2820,16 +2805,11 @@ static int snd_ice1712_suspend(struct device *dev)
if (ice->pm_suspend)
ice->pm_suspend(ice);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_ice1712_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ice1712 *ice = card->private_data;
int rate;
@@ -2837,16 +2817,6 @@ static int snd_ice1712_resume(struct device *dev)
if (!ice->pm_suspend_enabled)
return 0;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
-
- if (pci_enable_device(pci) < 0) {
- snd_card_disconnect(card);
- return -EIO;
- }
-
- pci_set_master(pci);
-
if (ice->cur_rate)
rate = ice->cur_rate;
else
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index d73da157ea14..0b22c00642bb 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -2798,7 +2798,6 @@ static void snd_vt1724_remove(struct pci_dev *pci)
#ifdef CONFIG_PM_SLEEP
static int snd_vt1724_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ice1712 *ice = card->private_data;
@@ -2821,32 +2820,17 @@ static int snd_vt1724_suspend(struct device *dev)
if (ice->pm_suspend)
ice->pm_suspend(ice);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_vt1724_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ice1712 *ice = card->private_data;
if (!ice->pm_suspend_enabled)
return 0;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
-
- if (pci_enable_device(pci) < 0) {
- snd_card_disconnect(card);
- return -EIO;
- }
-
- pci_set_master(pci);
-
snd_vt1724_chip_reset(ice);
if (snd_vt1724_chip_init(ice) < 0) {
diff --git a/sound/pci/ice1712/juli.c b/sound/pci/ice1712/juli.c
index a1536c1a7ed4..4f0213427152 100644
--- a/sound/pci/ice1712/juli.c
+++ b/sound/pci/ice1712/juli.c
@@ -491,15 +491,17 @@ static int juli_resume(struct snd_ice1712 *ice)
/* akm4358 un-reset, un-mute */
snd_akm4xxx_reset(ak, 0);
/* reinit ak4114 */
- snd_ak4114_reinit(spec->ak4114);
+ snd_ak4114_resume(spec->ak4114);
return 0;
}
static int juli_suspend(struct snd_ice1712 *ice)
{
struct snd_akm4xxx *ak = ice->akm;
+ struct juli_spec *spec = ice->spec;
/* akm4358 reset and soft-mute */
snd_akm4xxx_reset(ak, 1);
+ snd_ak4114_suspend(spec->ak4114);
return 0;
}
#endif
diff --git a/sound/pci/ice1712/wm8766.c b/sound/pci/ice1712/wm8766.c
index 21b373b2e260..f7ac8d5e862c 100644
--- a/sound/pci/ice1712/wm8766.c
+++ b/sound/pci/ice1712/wm8766.c
@@ -183,22 +183,6 @@ void snd_wm8766_set_if(struct snd_wm8766 *wm, u16 dac)
snd_wm8766_write(wm, WM8766_REG_IFCTRL, val | dac);
}
-void snd_wm8766_set_master_mode(struct snd_wm8766 *wm, u16 mode)
-{
- u16 val = wm->regs[WM8766_REG_DACCTRL3] & ~WM8766_DAC3_MSTR_MASK;
-
- mode &= WM8766_DAC3_MSTR_MASK;
- snd_wm8766_write(wm, WM8766_REG_DACCTRL3, val | mode);
-}
-
-void snd_wm8766_set_power(struct snd_wm8766 *wm, u16 power)
-{
- u16 val = wm->regs[WM8766_REG_DACCTRL3] & ~WM8766_DAC3_POWER_MASK;
-
- power &= WM8766_DAC3_POWER_MASK;
- snd_wm8766_write(wm, WM8766_REG_DACCTRL3, val | power);
-}
-
void snd_wm8766_volume_restore(struct snd_wm8766 *wm)
{
u16 val = wm->regs[WM8766_REG_DACR1];
diff --git a/sound/pci/ice1712/wm8766.h b/sound/pci/ice1712/wm8766.h
index c119f84bd2c2..18c8d9d47b38 100644
--- a/sound/pci/ice1712/wm8766.h
+++ b/sound/pci/ice1712/wm8766.h
@@ -155,8 +155,6 @@ struct snd_wm8766 {
void snd_wm8766_init(struct snd_wm8766 *wm);
void snd_wm8766_resume(struct snd_wm8766 *wm);
void snd_wm8766_set_if(struct snd_wm8766 *wm, u16 dac);
-void snd_wm8766_set_master_mode(struct snd_wm8766 *wm, u16 mode);
-void snd_wm8766_set_power(struct snd_wm8766 *wm, u16 power);
void snd_wm8766_volume_restore(struct snd_wm8766 *wm);
int snd_wm8766_build_controls(struct snd_wm8766 *wm);
diff --git a/sound/pci/ice1712/wm8776.c b/sound/pci/ice1712/wm8776.c
index e66c0da62014..ebd2fe4b4a57 100644
--- a/sound/pci/ice1712/wm8776.c
+++ b/sound/pci/ice1712/wm8776.c
@@ -452,21 +452,6 @@ void snd_wm8776_resume(struct snd_wm8776 *wm)
snd_wm8776_write(wm, i, wm->regs[i]);
}
-void snd_wm8776_set_dac_if(struct snd_wm8776 *wm, u16 dac)
-{
- snd_wm8776_write(wm, WM8776_REG_DACIFCTRL, dac);
-}
-
-void snd_wm8776_set_adc_if(struct snd_wm8776 *wm, u16 adc)
-{
- snd_wm8776_write(wm, WM8776_REG_ADCIFCTRL, adc);
-}
-
-void snd_wm8776_set_master_mode(struct snd_wm8776 *wm, u16 mode)
-{
- snd_wm8776_write(wm, WM8776_REG_MSTRCTRL, mode);
-}
-
void snd_wm8776_set_power(struct snd_wm8776 *wm, u16 power)
{
snd_wm8776_write(wm, WM8776_REG_PWRDOWN, power);
diff --git a/sound/pci/ice1712/wm8776.h b/sound/pci/ice1712/wm8776.h
index 93a2d6971154..42acef05540c 100644
--- a/sound/pci/ice1712/wm8776.h
+++ b/sound/pci/ice1712/wm8776.h
@@ -216,9 +216,6 @@ struct snd_wm8776 {
void snd_wm8776_init(struct snd_wm8776 *wm);
void snd_wm8776_resume(struct snd_wm8776 *wm);
-void snd_wm8776_set_dac_if(struct snd_wm8776 *wm, u16 dac);
-void snd_wm8776_set_adc_if(struct snd_wm8776 *wm, u16 adc);
-void snd_wm8776_set_master_mode(struct snd_wm8776 *wm, u16 mode);
void snd_wm8776_set_power(struct snd_wm8776 *wm, u16 power);
void snd_wm8776_volume_restore(struct snd_wm8776 *wm);
int snd_wm8776_build_controls(struct snd_wm8776 *wm);
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 4a28252a42b9..2c5484eeb963 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -26,7 +26,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -2654,7 +2654,6 @@ static int snd_intel8x0_free(struct intel8x0 *chip)
*/
static int intel8x0_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0 *chip = card->private_data;
int i;
@@ -2682,12 +2681,6 @@ static int intel8x0_suspend(struct device *dev)
free_irq(chip->irq, chip);
chip->irq = -1;
}
- pci_disable_device(pci);
- pci_save_state(pci);
- /* The call below may disable built-in speaker on some laptops
- * after S2RAM. So, don't touch it.
- */
- /* pci_set_power_state(pci, PCI_D3hot); */
return 0;
}
@@ -2698,14 +2691,6 @@ static int intel8x0_resume(struct device *dev)
struct intel8x0 *chip = card->private_data;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
snd_intel8x0_chip_init(chip, 0);
if (request_irq(pci->irq, snd_intel8x0_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 6b40235be13c..7577f31cd504 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -23,7 +23,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1023,7 +1023,6 @@ static int snd_intel8x0m_free(struct intel8x0m *chip)
*/
static int intel8x0m_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0m *chip = card->private_data;
int i;
@@ -1036,9 +1035,6 @@ static int intel8x0m_suspend(struct device *dev)
free_irq(chip->irq, chip);
chip->irq = -1;
}
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
@@ -1048,14 +1044,6 @@ static int intel8x0m_resume(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0m *chip = card->private_data;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
if (request_irq(pci->irq, snd_intel8x0m_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
dev_err(dev, "unable to grab IRQ %d, disabling device\n",
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 59d21c9401d2..7acbc21d642a 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
@@ -36,8 +37,6 @@
#include <sound/pcm_params.h>
#include <sound/initval.h>
-#include <asm/io.h>
-
// ----------------------------------------------------------------------------
// Debug Stuff
// ----------------------------------------------------------------------------
@@ -585,8 +584,7 @@ static void snd_korg1212_SendStop(struct snd_korg1212 *korg1212)
korg1212->sharedBufferPtr->cardCommand = 0xffffffff;
/* program the timer */
korg1212->stop_pending_cnt = HZ;
- korg1212->timer.expires = jiffies + 1;
- add_timer(&korg1212->timer);
+ mod_timer(&korg1212->timer, jiffies + 1);
}
}
@@ -617,8 +615,7 @@ static void snd_korg1212_timer_func(unsigned long data)
} else {
if (--korg1212->stop_pending_cnt > 0) {
/* reprogram timer */
- korg1212->timer.expires = jiffies + 1;
- add_timer(&korg1212->timer);
+ mod_timer(&korg1212->timer, jiffies + 1);
} else {
snd_printd("korg1212_timer_func timeout\n");
korg1212->sharedBufferPtr->cardCommand = 0;
@@ -2172,9 +2169,8 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
init_waitqueue_head(&korg1212->wait);
spin_lock_init(&korg1212->lock);
mutex_init(&korg1212->open_mutex);
- init_timer(&korg1212->timer);
- korg1212->timer.function = snd_korg1212_timer_func;
- korg1212->timer.data = (unsigned long)korg1212;
+ setup_timer(&korg1212->timer, snd_korg1212_timer_func,
+ (unsigned long)korg1212);
korg1212->irq = -1;
korg1212->clkSource = K1212_CLKIDX_Local;
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 4cf4be5ef82a..9ff600084973 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -551,10 +551,8 @@ static void lola_free(struct lola *chip)
lola_free_mixer(chip);
if (chip->irq >= 0)
free_irq(chip->irq, (void *)chip);
- if (chip->bar[0].remap_addr)
- iounmap(chip->bar[0].remap_addr);
- if (chip->bar[1].remap_addr)
- iounmap(chip->bar[1].remap_addr);
+ iounmap(chip->bar[0].remap_addr);
+ iounmap(chip->bar[1].remap_addr);
if (chip->rb.area)
snd_dma_free_pages(&chip->rb);
pci_release_regions(chip->pci);
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 98823d11d485..9be660993bd0 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -31,7 +31,7 @@
#define CARD_NAME "ESS Maestro3/Allegro/Canyon3D-2"
#define DRIVER_NAME "Maestro3"
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -2395,7 +2395,6 @@ static int snd_m3_free(struct snd_m3 *chip)
#ifdef CONFIG_PM_SLEEP
static int m3_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_m3 *chip = card->private_data;
int i, dsp_index;
@@ -2421,16 +2420,11 @@ static int m3_suspend(struct device *dev)
for (i = REV_B_DATA_MEMORY_BEGIN ; i <= REV_B_DATA_MEMORY_END; i++)
chip->suspend_mem[dsp_index++] =
snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, i);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int m3_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_m3 *chip = card->private_data;
int i, dsp_index;
@@ -2438,15 +2432,6 @@ static int m3_resume(struct device *dev)
if (chip->suspend_mem == NULL)
return 0;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
/* first lets just bring everything back. .*/
snd_m3_outw(chip, 0, 0x54);
snd_m3_outw(chip, 0, 0x56);
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 1faf47e81570..c3a9f39f8d61 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -1114,10 +1114,9 @@ static int snd_mixart_free(struct mixart_mgr *mgr)
}
/* release the i/o ports */
- for (i = 0; i < 2; i++) {
- if (mgr->mem[i].virt)
- iounmap(mgr->mem[i].virt);
- }
+ for (i = 0; i < 2; ++i)
+ iounmap(mgr->mem[i].virt);
+
pci_release_regions(mgr->pci);
/* free flowarray */
diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c
index fe80313674d9..dccf3db48fe0 100644
--- a/sound/pci/mixart/mixart_core.c
+++ b/sound/pci/mixart/mixart_core.c
@@ -23,8 +23,8 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <sound/core.h>
#include "mixart.h"
#include "mixart_hwdep.h"
diff --git a/sound/pci/mixart/mixart_hwdep.c b/sound/pci/mixart/mixart_hwdep.c
index 9996a4dead0f..5bfd3ac80db5 100644
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -26,7 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include "mixart.h"
#include "mixart_mixer.h"
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 4e41a4e29a1e..4735e27cc773 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -24,7 +24,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1392,7 +1392,6 @@ snd_nm256_peek_for_sig(struct nm256 *chip)
*/
static int nm256_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct nm256 *chip = card->private_data;
@@ -1400,15 +1399,11 @@ static int nm256_suspend(struct device *dev)
snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
chip->coeffs_current = 0;
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int nm256_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct nm256 *chip = card->private_data;
int i;
@@ -1416,15 +1411,6 @@ static int nm256_resume(struct device *dev)
/* Perform a full reset on the hardware */
chip->in_resume = 1;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_nm256_init_chip(chip);
/* restore ac97 */
@@ -1460,10 +1446,8 @@ static int snd_nm256_free(struct nm256 *chip)
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- if (chip->cport)
- iounmap(chip->cport);
- if (chip->buffer)
- iounmap(chip->buffer);
+ iounmap(chip->cport);
+ iounmap(chip->buffer);
release_and_free_resource(chip->res_cport);
release_and_free_resource(chip->res_buffer);
diff --git a/sound/pci/oxygen/Makefile b/sound/pci/oxygen/Makefile
index 8f4c409f7e45..ab085d753661 100644
--- a/sound/pci/oxygen/Makefile
+++ b/sound/pci/oxygen/Makefile
@@ -1,8 +1,10 @@
snd-oxygen-lib-objs := oxygen_io.o oxygen_lib.o oxygen_mixer.o oxygen_pcm.o
snd-oxygen-objs := oxygen.o xonar_dg_mixer.o xonar_dg.o
+snd-se6x-objs := se6x.o
snd-virtuoso-objs := virtuoso.o xonar_lib.o \
xonar_pcm179x.o xonar_cs43xx.o xonar_wm87x6.o xonar_hdmi.o
obj-$(CONFIG_SND_OXYGEN_LIB) += snd-oxygen-lib.o
obj-$(CONFIG_SND_OXYGEN) += snd-oxygen.o
+obj-$(CONFIG_SND_SE6X) += snd-se6x.o
obj-$(CONFIG_SND_VIRTUOSO) += snd-virtuoso.o
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index c10ab077afd8..293d0b9a50c3 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -35,7 +35,7 @@
#define CAPTURE_1_FROM_SPDIF 0x0080
#define CAPTURE_2_FROM_I2S_2 0x0100
#define CAPTURE_2_FROM_AC97_1 0x0200
- /* CAPTURE_3_FROM_I2S_3 not implemented */
+#define CAPTURE_3_FROM_I2S_3 0x0400
#define MIDI_OUTPUT 0x0800
#define MIDI_INPUT 0x1000
#define AC97_CD_INPUT 0x2000
diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c
index 4b8a32c37e31..c7851da37749 100644
--- a/sound/pci/oxygen/oxygen_io.c
+++ b/sound/pci/oxygen/oxygen_io.c
@@ -20,9 +20,9 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/mpu401.h>
-#include <asm/io.h>
#include "oxygen.h"
u8 oxygen_read8(struct oxygen *chip, unsigned int reg)
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index b67e30602473..ffff3b25fd73 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -319,11 +319,12 @@ static void oxygen_restore_eeprom(struct oxygen *chip,
static void configure_pcie_bridge(struct pci_dev *pci)
{
- enum { PEX811X, PI7C9X110 };
+ enum { PEX811X, PI7C9X110, XIO2001 };
static const struct pci_device_id bridge_ids[] = {
{ PCI_VDEVICE(PLX, 0x8111), .driver_data = PEX811X },
{ PCI_VDEVICE(PLX, 0x8112), .driver_data = PEX811X },
{ PCI_DEVICE(0x12d8, 0xe110), .driver_data = PI7C9X110 },
+ { PCI_VDEVICE(TI, 0x8240), .driver_data = XIO2001 },
{ }
};
struct pci_dev *bridge;
@@ -357,6 +358,14 @@ static void configure_pcie_bridge(struct pci_dev *pci)
tmp |= 1; /* park the PCI arbiter to the sound chip */
pci_write_config_dword(bridge, 0x40, tmp);
break;
+
+ case XIO2001: /* Texas Instruments XIO2001 PCIe/PCI bridge */
+ pci_read_config_dword(bridge, 0xe8, &tmp);
+ tmp &= ~0xf; /* request length limit: 64 bytes */
+ tmp &= ~(0xf << 8);
+ tmp |= 1 << 8; /* request count limit: one buffer */
+ pci_write_config_dword(bridge, 0xe8, tmp);
+ break;
}
}
@@ -441,9 +450,18 @@ static void oxygen_init(struct oxygen *chip)
oxygen_write16(chip, OXYGEN_I2S_B_FORMAT,
OXYGEN_I2S_MASTER |
OXYGEN_I2S_MUTE_MCLK);
- oxygen_write16(chip, OXYGEN_I2S_C_FORMAT,
- OXYGEN_I2S_MASTER |
- OXYGEN_I2S_MUTE_MCLK);
+ if (chip->model.device_config & CAPTURE_3_FROM_I2S_3)
+ oxygen_write16(chip, OXYGEN_I2S_C_FORMAT,
+ OXYGEN_RATE_48000 |
+ chip->model.adc_i2s_format |
+ OXYGEN_I2S_MCLK(chip->model.adc_mclks) |
+ OXYGEN_I2S_BITS_16 |
+ OXYGEN_I2S_MASTER |
+ OXYGEN_I2S_BCLK_64);
+ else
+ oxygen_write16(chip, OXYGEN_I2S_C_FORMAT,
+ OXYGEN_I2S_MASTER |
+ OXYGEN_I2S_MUTE_MCLK);
oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL,
OXYGEN_SPDIF_OUT_ENABLE |
OXYGEN_SPDIF_LOOPBACK);
@@ -728,7 +746,6 @@ EXPORT_SYMBOL(oxygen_pci_remove);
#ifdef CONFIG_PM_SLEEP
static int oxygen_pci_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct oxygen *chip = card->private_data;
unsigned int i, saved_interrupt_mask;
@@ -736,8 +753,7 @@ static int oxygen_pci_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < PCM_COUNT; ++i)
- if (chip->streams[i])
- snd_pcm_suspend(chip->streams[i]);
+ snd_pcm_suspend(chip->streams[i]);
if (chip->model.suspend)
chip->model.suspend(chip);
@@ -753,10 +769,6 @@ static int oxygen_pci_suspend(struct device *dev)
flush_work(&chip->spdif_input_bits_work);
flush_work(&chip->gpio_work);
chip->interrupt_mask = saved_interrupt_mask;
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
@@ -788,20 +800,10 @@ static void oxygen_restore_ac97(struct oxygen *chip, unsigned int codec)
static int oxygen_pci_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct oxygen *chip = card->private_data;
unsigned int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "cannot reenable device");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
for (i = 0; i < OXYGEN_IO_SIZE; ++i)
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 5988e044c519..6492bca8c70f 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -786,6 +786,9 @@ static const struct snd_kcontrol_new controls[] = {
.get = upmix_get,
.put = upmix_put,
},
+};
+
+static const struct snd_kcontrol_new spdif_output_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, SWITCH),
@@ -938,6 +941,33 @@ static const struct {
},
},
{
+ .pcm_dev = CAPTURE_3_FROM_I2S_3,
+ .controls = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Analog Input Monitor Playback Switch",
+ .index = 2,
+ .info = snd_ctl_boolean_mono_info,
+ .get = monitor_get,
+ .put = monitor_put,
+ .private_value = OXYGEN_ADC_MONITOR_C,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Analog Input Monitor Playback Volume",
+ .index = 2,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .info = monitor_volume_info,
+ .get = monitor_get,
+ .put = monitor_put,
+ .private_value = OXYGEN_ADC_MONITOR_C_HALF_VOL
+ | (1 << 8),
+ .tlv = { .p = monitor_db_scale, },
+ },
+ },
+ },
+ {
.pcm_dev = CAPTURE_1_FROM_SPDIF,
.controls = {
{
@@ -1073,6 +1103,12 @@ int oxygen_mixer_init(struct oxygen *chip)
err = add_controls(chip, controls, ARRAY_SIZE(controls));
if (err < 0)
return err;
+ if (chip->model.device_config & PLAYBACK_1_TO_SPDIF) {
+ err = add_controls(chip, spdif_output_controls,
+ ARRAY_SIZE(spdif_output_controls));
+ if (err < 0)
+ return err;
+ }
if (chip->model.device_config & CAPTURE_1_FROM_SPDIF) {
err = add_controls(chip, spdif_input_controls,
ARRAY_SIZE(spdif_input_controls));
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index 02828240ba15..aa2ebd1d6d12 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -144,9 +144,11 @@ static int oxygen_open(struct snd_pcm_substream *substream,
runtime->hw = *oxygen_hardware[channel];
switch (channel) {
case PCM_C:
- runtime->hw.rates &= ~(SNDRV_PCM_RATE_32000 |
- SNDRV_PCM_RATE_64000);
- runtime->hw.rate_min = 44100;
+ if (chip->model.device_config & CAPTURE_1_FROM_SPDIF) {
+ runtime->hw.rates &= ~(SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_64000);
+ runtime->hw.rate_min = 44100;
+ }
/* fall through */
case PCM_A:
case PCM_B:
@@ -430,17 +432,36 @@ static int oxygen_rec_c_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
+ bool is_spdif;
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
+ is_spdif = chip->model.device_config & CAPTURE_1_FROM_SPDIF;
+
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_REC_FORMAT,
oxygen_format(hw_params) << OXYGEN_REC_FORMAT_C_SHIFT,
OXYGEN_REC_FORMAT_C_MASK);
+ if (!is_spdif)
+ oxygen_write16_masked(chip, OXYGEN_I2S_C_FORMAT,
+ oxygen_rate(hw_params) |
+ chip->model.adc_i2s_format |
+ get_mclk(chip, PCM_B, hw_params) |
+ oxygen_i2s_bits(hw_params),
+ OXYGEN_I2S_RATE_MASK |
+ OXYGEN_I2S_FORMAT_MASK |
+ OXYGEN_I2S_MCLK_MASK |
+ OXYGEN_I2S_BITS_MASK);
spin_unlock_irq(&chip->reg_lock);
+
+ if (!is_spdif) {
+ mutex_lock(&chip->mutex);
+ chip->model.set_adc_params(chip, hw_params);
+ mutex_unlock(&chip->mutex);
+ }
return 0;
}
@@ -676,11 +697,6 @@ static struct snd_pcm_ops oxygen_ac97_ops = {
.pointer = oxygen_pointer,
};
-static void oxygen_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
int oxygen_pcm_init(struct oxygen *chip)
{
struct snd_pcm *pcm;
@@ -705,7 +721,6 @@ int oxygen_pcm_init(struct oxygen *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_b_ops);
pcm->private_data = chip;
- pcm->private_free = oxygen_pcm_free;
strcpy(pcm->name, "Multichannel");
if (outs)
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
@@ -734,7 +749,6 @@ int oxygen_pcm_init(struct oxygen *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_c_ops);
pcm->private_data = chip;
- pcm->private_free = oxygen_pcm_free;
strcpy(pcm->name, "Digital");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
@@ -765,12 +779,29 @@ int oxygen_pcm_init(struct oxygen *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_b_ops);
pcm->private_data = chip;
- pcm->private_free = oxygen_pcm_free;
strcpy(pcm->name, outs ? "Front Panel" : "Analog 2");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
+
+ ins = !!(chip->model.device_config & CAPTURE_3_FROM_I2S_3);
+ if (ins) {
+ err = snd_pcm_new(chip->card, "Analog3", 3, 0, ins, &pcm);
+ if (err < 0)
+ return err;
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &oxygen_rec_c_ops);
+ oxygen_write8_masked(chip, OXYGEN_REC_ROUTING,
+ OXYGEN_REC_C_ROUTE_I2S_ADC_3,
+ OXYGEN_REC_C_ROUTE_MASK);
+ pcm->private_data = chip;
+ strcpy(pcm->name, "Analog 3");
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(chip->pci),
+ DEFAULT_BUFFER_BYTES,
+ BUFFER_BYTES_MAX);
+ }
return 0;
}
diff --git a/sound/pci/oxygen/se6x.c b/sound/pci/oxygen/se6x.c
new file mode 100644
index 000000000000..f70d514c1084
--- /dev/null
+++ b/sound/pci/oxygen/se6x.c
@@ -0,0 +1,160 @@
+/*
+ * C-Media CMI8787 driver for the Studio Evolution SE6X
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ *
+ * This driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2.
+ *
+ * This driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this driver; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * CMI8787:
+ *
+ * SPI -> microcontroller (not actually used)
+ * GPIO 0 -> do.
+ * GPIO 2 -> do.
+ *
+ * DAC0 -> both PCM1792A (L+R, each in mono mode)
+ * ADC1 <- 1st PCM1804
+ * ADC2 <- 2nd PCM1804
+ * ADC3 <- 3rd PCM1804
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include "oxygen.h"
+
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_DESCRIPTION("Studio Evolution SE6X driver");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("{{Studio Evolution,SE6X}}");
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "card index");
+module_param_array(id, charp, NULL, 0444);
+MODULE_PARM_DESC(id, "ID string");
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "enable card");
+
+static const struct pci_device_id se6x_ids[] = {
+ { OXYGEN_PCI_SUBID(0x13f6, 0x8788) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, se6x_ids);
+
+static void se6x_init(struct oxygen *chip)
+{
+ oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, 0x005);
+
+ snd_component_add(chip->card, "PCM1792A");
+ snd_component_add(chip->card, "PCM1804");
+}
+
+static int se6x_control_filter(struct snd_kcontrol_new *template)
+{
+ /* no DAC volume/mute */
+ if (!strncmp(template->name, "Master Playback ", 16))
+ return 1;
+ return 0;
+}
+
+static void se6x_cleanup(struct oxygen *chip)
+{
+}
+
+static void set_pcm1792a_params(struct oxygen *chip,
+ struct snd_pcm_hw_params *params)
+{
+ /* nothing to do (the microcontroller monitors DAC_LRCK) */
+}
+
+static void set_pcm1804_params(struct oxygen *chip,
+ struct snd_pcm_hw_params *params)
+{
+}
+
+static unsigned int se6x_adjust_dac_routing(struct oxygen *chip,
+ unsigned int play_routing)
+{
+ /* route the same stereo pair to DAC0 and DAC1 */
+ return ( play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
+ ((play_routing << 2) & OXYGEN_PLAY_DAC1_SOURCE_MASK);
+}
+
+static const struct oxygen_model model_se6x = {
+ .shortname = "Studio Evolution SE6X",
+ .longname = "C-Media Oxygen HD Audio",
+ .chip = "CMI8787",
+ .init = se6x_init,
+ .control_filter = se6x_control_filter,
+ .cleanup = se6x_cleanup,
+ .set_dac_params = set_pcm1792a_params,
+ .set_adc_params = set_pcm1804_params,
+ .adjust_dac_routing = se6x_adjust_dac_routing,
+ .device_config = PLAYBACK_0_TO_I2S |
+ CAPTURE_0_FROM_I2S_1 |
+ CAPTURE_2_FROM_I2S_2 |
+ CAPTURE_3_FROM_I2S_3,
+ .dac_channels_pcm = 2,
+ .function_flags = OXYGEN_FUNCTION_SPI,
+ .dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+ .adc_mclks = OXYGEN_MCLKS(256, 256, 128),
+ .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+ .adc_i2s_format = OXYGEN_I2S_FORMAT_I2S,
+};
+
+static int se6x_get_model(struct oxygen *chip,
+ const struct pci_device_id *pci_id)
+{
+ chip->model = model_se6x;
+ return 0;
+}
+
+static int se6x_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ static int dev;
+ int err;
+
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
+ if (!enable[dev]) {
+ ++dev;
+ return -ENOENT;
+ }
+ err = oxygen_pci_probe(pci, index[dev], id[dev], THIS_MODULE,
+ se6x_ids, se6x_get_model);
+ if (err >= 0)
+ ++dev;
+ return err;
+}
+
+static struct pci_driver se6x_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = se6x_ids,
+ .probe = se6x_probe,
+ .remove = oxygen_pci_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &oxygen_pci_pm,
+ },
+#endif
+ .shutdown = oxygen_pci_shutdown,
+};
+
+module_pci_driver(se6x_driver);
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index 181f7729d409..c5194f5b150a 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include "pcxhr.h"
#include "pcxhr_mixer.h"
diff --git a/sound/pci/pcxhr/pcxhr_hwdep.c b/sound/pci/pcxhr/pcxhr_hwdep.c
index 15a8ce5f1f48..80633055e17e 100644
--- a/sound/pci/pcxhr/pcxhr_hwdep.c
+++ b/sound/pci/pcxhr/pcxhr_hwdep.c
@@ -25,7 +25,7 @@
#include <linux/firmware.h>
#include <linux/pci.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/hwdep.h>
#include "pcxhr.h"
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 6abc2ac8fffb..94639d6b5fb5 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -99,7 +99,7 @@
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -1153,7 +1153,6 @@ static void riptide_handleirq(unsigned long dev_id)
#ifdef CONFIG_PM_SLEEP
static int riptide_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_riptide *chip = card->private_data;
@@ -1161,27 +1160,14 @@ static int riptide_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int riptide_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_riptide *chip = card->private_data;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- printk(KERN_ERR "riptide: pci_enable_device failed, "
- "disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
snd_riptide_initialize(chip);
snd_ac97_resume(chip->ac97);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
@@ -1706,14 +1692,11 @@ static struct snd_pcm_ops snd_riptide_capture_ops = {
.pointer = snd_riptide_pointer,
};
-static int
-snd_riptide_pcm(struct snd_riptide *chip, int device, struct snd_pcm **rpcm)
+static int snd_riptide_pcm(struct snd_riptide *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err =
snd_pcm_new(chip->card, "RIPTIDE", device, PLAYBACK_SUBSTREAMS, 1,
&pcm)) < 0)
@@ -1729,8 +1712,6 @@ snd_riptide_pcm(struct snd_riptide *chip, int device, struct snd_pcm **rpcm)
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(chip->pci),
64 * 1024, 128 * 1024);
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -2030,32 +2011,43 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
static int dev;
struct gameport *gameport;
+ int ret;
if (dev >= SNDRV_CARDS)
return -ENODEV;
+
if (!enable[dev]) {
- dev++;
- return -ENOENT;
+ ret = -ENOENT;
+ goto inc_dev;
}
- if (!joystick_port[dev++])
- return 0;
+ if (!joystick_port[dev]) {
+ ret = 0;
+ goto inc_dev;
+ }
gameport = gameport_allocate_port();
- if (!gameport)
- return -ENOMEM;
+ if (!gameport) {
+ ret = -ENOMEM;
+ goto inc_dev;
+ }
if (!request_region(joystick_port[dev], 8, "Riptide gameport")) {
snd_printk(KERN_WARNING
"Riptide: cannot grab gameport 0x%x\n",
joystick_port[dev]);
gameport_free_port(gameport);
- return -EBUSY;
+ ret = -EBUSY;
+ goto inc_dev;
}
gameport->io = joystick_port[dev];
gameport_register_port(gameport);
pci_set_drvdata(pci, gameport);
- return 0;
+
+ ret = 0;
+inc_dev:
+ dev++;
+ return ret;
}
static void snd_riptide_joystick_remove(struct pci_dev *pci)
@@ -2092,7 +2084,7 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
if (err < 0)
goto error;
card->private_data = chip;
- err = snd_riptide_pcm(chip, 0, NULL);
+ err = snd_riptide_pcm(chip, 0);
if (err < 0)
goto error;
err = snd_riptide_mixer(chip);
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 6c60dcd2e5a1..23d7f5d30c41 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -75,6 +75,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
@@ -85,8 +86,6 @@
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
-
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
@@ -632,7 +631,7 @@ snd_rme32_setframelog(struct rme32 * rme32, int n_channels, int is_playback)
}
}
-static int snd_rme32_setformat(struct rme32 * rme32, int format)
+static int snd_rme32_setformat(struct rme32 *rme32, snd_pcm_format_t format)
{
switch (format) {
case SNDRV_PCM_FORMAT_S16_LE:
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 2f1a85185a16..2306ccf7281e 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
@@ -38,8 +39,6 @@
#include <sound/asoundef.h>
#include <sound/initval.h>
-#include <asm/io.h>
-
/* note, two last pcis should be equal, it is not a bug */
MODULE_AUTHOR("Anders Torger <torger@ludd.luth.se>");
@@ -922,8 +921,7 @@ snd_rme96_setframelog(struct rme96 *rme96,
}
static int
-snd_rme96_playback_setformat(struct rme96 *rme96,
- int format)
+snd_rme96_playback_setformat(struct rme96 *rme96, snd_pcm_format_t format)
{
switch (format) {
case SNDRV_PCM_FORMAT_S16_LE:
@@ -940,8 +938,7 @@ snd_rme96_playback_setformat(struct rme96 *rme96,
}
static int
-snd_rme96_capture_setformat(struct rme96 *rme96,
- int format)
+snd_rme96_capture_setformat(struct rme96 *rme96, snd_pcm_format_t format)
{
switch (format) {
case SNDRV_PCM_FORMAT_S16_LE:
@@ -2358,7 +2355,6 @@ snd_rme96_create_switches(struct snd_card *card,
static int rme96_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct rme96 *rme96 = card->private_data;
@@ -2381,26 +2377,14 @@ static int rme96_suspend(struct device *dev)
/* disable the DAC */
rme96->areg &= ~RME96_AR_DAC_EN;
writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG);
-
- pci_disable_device(pci);
- pci_save_state(pci);
-
return 0;
}
static int rme96_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct rme96 *rme96 = card->private_data;
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
-
/* reset playback and record buffer pointers */
writel(0, rme96->iobase + RME96_IO_SET_PLAY_POS
+ rme96->playback_pointer);
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index cf5a6c8b9a63..c19e021ccf66 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/math64.h>
#include <linux/vmalloc.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -42,7 +43,6 @@
#include <asm/byteorder.h>
#include <asm/current.h>
-#include <asm/io.h>
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
@@ -1428,10 +1428,8 @@ static void snd_hdsp_midi_output_timer(unsigned long data)
leaving istimer wherever it was set before.
*/
- if (hmidi->istimer) {
- hmidi->timer.expires = 1 + jiffies;
- add_timer(&hmidi->timer);
- }
+ if (hmidi->istimer)
+ mod_timer(&hmidi->timer, 1 + jiffies);
spin_unlock_irqrestore (&hmidi->lock, flags);
}
@@ -1445,11 +1443,9 @@ static void snd_hdsp_midi_output_trigger(struct snd_rawmidi_substream *substream
spin_lock_irqsave (&hmidi->lock, flags);
if (up) {
if (!hmidi->istimer) {
- init_timer(&hmidi->timer);
- hmidi->timer.function = snd_hdsp_midi_output_timer;
- hmidi->timer.data = (unsigned long) hmidi;
- hmidi->timer.expires = 1 + jiffies;
- add_timer(&hmidi->timer);
+ setup_timer(&hmidi->timer, snd_hdsp_midi_output_timer,
+ (unsigned long) hmidi);
+ mod_timer(&hmidi->timer, 1 + jiffies);
hmidi->istimer++;
}
} else {
@@ -5309,9 +5305,7 @@ static int snd_hdsp_free(struct hdsp *hdsp)
release_firmware(hdsp->firmware);
vfree(hdsp->fw_uploaded);
-
- if (hdsp->iobase)
- iounmap(hdsp->iobase);
+ iounmap(hdsp->iobase);
if (hdsp->port)
pci_release_regions(hdsp->pci);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 3342705a5715..2c363fdca9fd 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -136,7 +136,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/math64.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -1957,10 +1957,8 @@ static void snd_hdspm_midi_output_timer(unsigned long data)
leaving istimer wherever it was set before.
*/
- if (hmidi->istimer) {
- hmidi->timer.expires = 1 + jiffies;
- add_timer(&hmidi->timer);
- }
+ if (hmidi->istimer)
+ mod_timer(&hmidi->timer, 1 + jiffies);
spin_unlock_irqrestore (&hmidi->lock, flags);
}
@@ -1975,11 +1973,9 @@ snd_hdspm_midi_output_trigger(struct snd_rawmidi_substream *substream, int up)
spin_lock_irqsave (&hmidi->lock, flags);
if (up) {
if (!hmidi->istimer) {
- init_timer(&hmidi->timer);
- hmidi->timer.function = snd_hdspm_midi_output_timer;
- hmidi->timer.data = (unsigned long) hmidi;
- hmidi->timer.expires = 1 + jiffies;
- add_timer(&hmidi->timer);
+ setup_timer(&hmidi->timer, snd_hdspm_midi_output_timer,
+ (unsigned long) hmidi);
+ mod_timer(&hmidi->timer, 1 + jiffies);
hmidi->istimer++;
}
} else {
@@ -6965,9 +6961,7 @@ static int snd_hdspm_free(struct hdspm * hdspm)
free_irq(hdspm->irq, (void *) hdspm);
kfree(hdspm->mixer);
-
- if (hdspm->iobase)
- iounmap(hdspm->iobase);
+ iounmap(hdspm->iobase);
if (hdspm->port)
pci_release_regions(hdspm->pci);
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 6521521853b8..fdbc0aa2776a 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -34,7 +35,6 @@
#include <sound/initval.h>
#include <asm/current.h>
-#include <asm/io.h>
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
@@ -1756,8 +1756,7 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
if (rme9652->irq >= 0)
free_irq(rme9652->irq, (void *)rme9652);
- if (rme9652->iobase)
- iounmap(rme9652->iobase);
+ iounmap(rme9652->iobase);
if (rme9652->port)
pci_release_regions(rme9652->pci);
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index 7f6a0a0d115a..efe669b80256 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -1064,12 +1064,9 @@ static int sis_chip_free(struct sis7019 *sis)
if (sis->irq >= 0)
free_irq(sis->irq, sis);
- if (sis->ioaddr)
- iounmap(sis->ioaddr);
-
+ iounmap(sis->ioaddr);
pci_release_regions(sis->pci);
pci_disable_device(sis->pci);
-
sis_free_suspend(sis);
return 0;
}
@@ -1211,7 +1208,6 @@ static int sis_chip_init(struct sis7019 *sis)
#ifdef CONFIG_PM_SLEEP
static int sis_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct sis7019 *sis = card->private_data;
void __iomem *ioaddr = sis->ioaddr;
@@ -1240,9 +1236,6 @@ static int sis_suspend(struct device *dev)
ioaddr += 4096;
}
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
@@ -1254,14 +1247,6 @@ static int sis_resume(struct device *dev)
void __iomem *ioaddr = sis->ioaddr;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
-
- if (pci_enable_device(pci) < 0) {
- dev_err(&pci->dev, "unable to re-enable device\n");
- goto error;
- }
-
if (sis_chip_init(sis)) {
dev_err(&pci->dev, "unable to re-init controller\n");
goto error;
@@ -1284,7 +1269,6 @@ static int sis_resume(struct device *dev)
memset(sis->suspend_state[0], 0, 4096);
sis->irq = pci->irq;
- pci_set_master(pci);
if (sis->codecs_present & SIS_PRIMARY_CODEC_PRESENT)
snd_ac97_resume(sis->ac97[0]);
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 313a7328bf9c..0f40624a4275 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -30,6 +30,7 @@
#include <linux/gameport.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -39,8 +40,6 @@
#include <sound/opl3.h>
#include <sound/initval.h>
-#include <asm/io.h>
-
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("S3 SonicVibes PCI");
MODULE_LICENSE("GPL");
@@ -880,8 +879,7 @@ static struct snd_pcm_ops snd_sonicvibes_capture_ops = {
.pointer = snd_sonicvibes_capture_pointer,
};
-static int snd_sonicvibes_pcm(struct sonicvibes *sonic, int device,
- struct snd_pcm **rpcm)
+static int snd_sonicvibes_pcm(struct sonicvibes *sonic, int device)
{
struct snd_pcm *pcm;
int err;
@@ -902,8 +900,6 @@ static int snd_sonicvibes_pcm(struct sonicvibes *sonic, int device,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(sonic->pci), 64*1024, 128*1024);
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -1491,7 +1487,7 @@ static int snd_sonic_probe(struct pci_dev *pci,
(unsigned long long)pci_resource_start(pci, 1),
sonic->irq);
- if ((err = snd_sonicvibes_pcm(sonic, 0, NULL)) < 0) {
+ if ((err = snd_sonicvibes_pcm(sonic, 0)) < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c
index a54cd6879b31..cedf13b64803 100644
--- a/sound/pci/trident/trident.c
+++ b/sound/pci/trident/trident.c
@@ -127,21 +127,21 @@ static int snd_trident_probe(struct pci_dev *pci,
sprintf(card->longname, "%s PCI Audio at 0x%lx, irq %d",
card->shortname, trident->port, trident->irq);
- if ((err = snd_trident_pcm(trident, pcm_dev++, NULL)) < 0) {
+ if ((err = snd_trident_pcm(trident, pcm_dev++)) < 0) {
snd_card_free(card);
return err;
}
switch (trident->device) {
case TRIDENT_DEVICE_ID_DX:
case TRIDENT_DEVICE_ID_NX:
- if ((err = snd_trident_foldback_pcm(trident, pcm_dev++, NULL)) < 0) {
+ if ((err = snd_trident_foldback_pcm(trident, pcm_dev++)) < 0) {
snd_card_free(card);
return err;
}
break;
}
if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018) {
- if ((err = snd_trident_spdif_pcm(trident, pcm_dev++, NULL)) < 0) {
+ if ((err = snd_trident_spdif_pcm(trident, pcm_dev++)) < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/trident/trident.h b/sound/pci/trident/trident.h
index 5f110eb56e47..9624e5937719 100644
--- a/sound/pci/trident/trident.h
+++ b/sound/pci/trident/trident.h
@@ -420,9 +420,9 @@ int snd_trident_create(struct snd_card *card,
struct snd_trident ** rtrident);
int snd_trident_create_gameport(struct snd_trident *trident);
-int snd_trident_pcm(struct snd_trident * trident, int device, struct snd_pcm **rpcm);
-int snd_trident_foldback_pcm(struct snd_trident * trident, int device, struct snd_pcm **rpcm);
-int snd_trident_spdif_pcm(struct snd_trident * trident, int device, struct snd_pcm **rpcm);
+int snd_trident_pcm(struct snd_trident *trident, int device);
+int snd_trident_foldback_pcm(struct snd_trident *trident, int device);
+int snd_trident_spdif_pcm(struct snd_trident *trident, int device);
int snd_trident_attach_synthesizer(struct snd_trident * trident);
struct snd_trident_voice *snd_trident_alloc_voice(struct snd_trident * trident, int type,
int client, int port);
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 57cd757acfe7..b72be035f785 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -36,6 +36,7 @@
#include <linux/gameport.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/info.h>
@@ -44,8 +45,6 @@
#include "trident.h"
#include <sound/asoundef.h>
-#include <asm/io.h>
-
static int snd_trident_pcm_mixer_build(struct snd_trident *trident,
struct snd_trident_voice * voice,
struct snd_pcm_substream *substream);
@@ -2172,14 +2171,11 @@ static struct snd_pcm_ops snd_trident_spdif_7018_ops = {
---------------------------------------------------------------------------*/
-int snd_trident_pcm(struct snd_trident *trident,
- int device, struct snd_pcm **rpcm)
+int snd_trident_pcm(struct snd_trident *trident, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(trident->card, "trident_dx_nx", device, trident->ChanPCM, 1, &pcm)) < 0)
return err;
@@ -2214,8 +2210,6 @@ int snd_trident_pcm(struct snd_trident *trident,
snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
}
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -2230,16 +2224,13 @@ int snd_trident_pcm(struct snd_trident *trident,
---------------------------------------------------------------------------*/
-int snd_trident_foldback_pcm(struct snd_trident *trident,
- int device, struct snd_pcm **rpcm)
+int snd_trident_foldback_pcm(struct snd_trident *trident, int device)
{
struct snd_pcm *foldback;
int err;
int num_chan = 3;
struct snd_pcm_substream *substream;
- if (rpcm)
- *rpcm = NULL;
if (trident->device == TRIDENT_DEVICE_ID_NX)
num_chan = 4;
if ((err = snd_pcm_new(trident->card, "trident_dx_nx", device, 0, num_chan, &foldback)) < 0)
@@ -2271,8 +2262,6 @@ int snd_trident_foldback_pcm(struct snd_trident *trident,
snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
- if (rpcm)
- *rpcm = foldback;
return 0;
}
@@ -2287,14 +2276,11 @@ int snd_trident_foldback_pcm(struct snd_trident *trident,
---------------------------------------------------------------------------*/
-int snd_trident_spdif_pcm(struct snd_trident *trident,
- int device, struct snd_pcm **rpcm)
+int snd_trident_spdif_pcm(struct snd_trident *trident, int device)
{
struct snd_pcm *spdif;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(trident->card, "trident_dx_nx IEC958", device, 1, 0, &spdif)) < 0)
return err;
@@ -2310,8 +2296,6 @@ int snd_trident_spdif_pcm(struct snd_trident *trident,
snd_pcm_lib_preallocate_pages_for_all(spdif, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
- if (rpcm)
- *rpcm = spdif;
return 0;
}
@@ -3926,7 +3910,6 @@ static void snd_trident_clear_voices(struct snd_trident * trident, unsigned shor
#ifdef CONFIG_PM_SLEEP
static int snd_trident_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_trident *trident = card->private_data;
@@ -3938,28 +3921,14 @@ static int snd_trident_suspend(struct device *dev)
snd_ac97_suspend(trident->ac97);
snd_ac97_suspend(trident->ac97_sec);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_trident_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_trident *trident = card->private_data;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
switch (trident->device) {
case TRIDENT_DEVICE_ID_DX:
snd_trident_4d_dx_init(trident);
diff --git a/sound/pci/trident/trident_memory.c b/sound/pci/trident/trident_memory.c
index 04c474658e3c..b9ebb51893c5 100644
--- a/sound/pci/trident/trident_memory.c
+++ b/sound/pci/trident/trident_memory.c
@@ -23,7 +23,7 @@
*
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/pci.h>
#include <linux/time.h>
#include <linux/mutex.h>
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index e088467fb736..8622283e89f3 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -46,7 +46,7 @@
* - Optimize position calculation for the 823x chips.
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -2271,7 +2271,6 @@ static int snd_via82xx_chip_init(struct via82xx *chip)
*/
static int snd_via82xx_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct via82xx *chip = card->private_data;
int i;
@@ -2291,28 +2290,15 @@ static int snd_via82xx_suspend(struct device *dev)
chip->capture_src_saved[1] = inb(chip->port + VIA_REG_CAPTURE_CHANNEL + 0x10);
}
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_via82xx_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct via82xx *chip = card->private_data;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_via82xx_chip_init(chip);
if (chip->chip_type == TYPE_VIA686) {
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index fd46ffe12e4f..99b9137bc677 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -31,7 +31,7 @@
* modems.
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1031,7 +1031,6 @@ static int snd_via82xx_chip_init(struct via82xx_modem *chip)
*/
static int snd_via82xx_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct via82xx_modem *chip = card->private_data;
int i;
@@ -1043,29 +1042,15 @@ static int snd_via82xx_suspend(struct device *dev)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
synchronize_irq(chip->irq);
snd_ac97_suspend(chip->ac97);
-
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_via82xx_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct via82xx_modem *chip = card->private_data;
int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
-
snd_via82xx_chip_init(chip);
snd_ac97_resume(chip->ac97);
diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c
index c5a25e39e3a8..ecbaf473fc1e 100644
--- a/sound/pci/vx222/vx222.c
+++ b/sound/pci/vx222/vx222.c
@@ -259,32 +259,17 @@ static void snd_vx222_remove(struct pci_dev *pci)
#ifdef CONFIG_PM_SLEEP
static int snd_vx222_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_vx222 *vx = card->private_data;
- int err;
- err = snd_vx_suspend(&vx->core);
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
- return err;
+ return snd_vx_suspend(&vx->core);
}
static int snd_vx222_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_vx222 *vx = card->private_data;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
return snd_vx_resume(&vx->core);
}
diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
index 52c1a8d5b88a..af83b3b38052 100644
--- a/sound/pci/vx222/vx222_ops.c
+++ b/sound/pci/vx222/vx222_ops.c
@@ -24,11 +24,11 @@
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/mutex.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/tlv.h>
-#include <asm/io.h>
#include "vx222.h"
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
index 47a192369e8f..812e27a1bcbc 100644
--- a/sound/pci/ymfpci/ymfpci.c
+++ b/sound/pci/ymfpci/ymfpci.c
@@ -283,11 +283,11 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
card->shortname,
chip->reg_area_phys,
chip->irq);
- if ((err = snd_ymfpci_pcm(chip, 0, NULL)) < 0) {
+ if ((err = snd_ymfpci_pcm(chip, 0)) < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_ymfpci_pcm_spdif(chip, 1, NULL)) < 0) {
+ if ((err = snd_ymfpci_pcm_spdif(chip, 1)) < 0) {
snd_card_free(card);
return err;
}
@@ -297,12 +297,12 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
return err;
}
if (chip->ac97->ext_id & AC97_EI_SDAC) {
- err = snd_ymfpci_pcm_4ch(chip, 2, NULL);
+ err = snd_ymfpci_pcm_4ch(chip, 2);
if (err < 0) {
snd_card_free(card);
return err;
}
- err = snd_ymfpci_pcm2(chip, 3, NULL);
+ err = snd_ymfpci_pcm2(chip, 3);
if (err < 0) {
snd_card_free(card);
return err;
diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
index 4631a2348915..149d4cb46998 100644
--- a/sound/pci/ymfpci/ymfpci.h
+++ b/sound/pci/ymfpci/ymfpci.h
@@ -379,10 +379,10 @@ void snd_ymfpci_free_gameport(struct snd_ymfpci *chip);
extern const struct dev_pm_ops snd_ymfpci_pm;
-int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm);
-int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm);
-int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm);
-int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm);
+int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device);
+int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device);
+int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device);
+int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device);
int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch);
int snd_ymfpci_timer(struct snd_ymfpci *chip, int device);
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 81c916a5eb96..4c26076dbf78 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -36,7 +37,6 @@
#include <sound/asoundef.h>
#include <sound/mpu401.h>
-#include <asm/io.h>
#include <asm/byteorder.h>
/*
@@ -1145,13 +1145,11 @@ static struct snd_pcm_ops snd_ymfpci_capture_rec_ops = {
.pointer = snd_ymfpci_capture_pointer,
};
-int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm)
+int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(chip->card, "YMFPCI", device, 32, 1, &pcm)) < 0)
return err;
pcm->private_data = chip;
@@ -1167,14 +1165,8 @@ int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm)
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
- err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_std_chmaps, 2, 0, NULL);
- if (err < 0)
- return err;
-
- if (rpcm)
- *rpcm = pcm;
- return 0;
}
static struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = {
@@ -1188,13 +1180,11 @@ static struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = {
.pointer = snd_ymfpci_capture_pointer,
};
-int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm)
+int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(chip->card, "YMFPCI - PCM2", device, 0, 1, &pcm)) < 0)
return err;
pcm->private_data = chip;
@@ -1210,8 +1200,6 @@ int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm)
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -1226,14 +1214,11 @@ static struct snd_pcm_ops snd_ymfpci_playback_spdif_ops = {
.pointer = snd_ymfpci_playback_pointer,
};
-int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device,
- struct snd_pcm **rpcm)
+int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(chip->card, "YMFPCI - IEC958", device, 1, 0, &pcm)) < 0)
return err;
pcm->private_data = chip;
@@ -1248,8 +1233,6 @@ int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
- if (rpcm)
- *rpcm = pcm;
return 0;
}
@@ -1272,14 +1255,11 @@ static const struct snd_pcm_chmap_elem surround_map[] = {
{ }
};
-int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device,
- struct snd_pcm **rpcm)
+int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device)
{
struct snd_pcm *pcm;
int err;
- if (rpcm)
- *rpcm = NULL;
if ((err = snd_pcm_new(chip->card, "YMFPCI - Rear", device, 1, 0, &pcm)) < 0)
return err;
pcm->private_data = chip;
@@ -1294,14 +1274,8 @@ int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
- err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
surround_map, 2, 0, NULL);
- if (err < 0)
- return err;
-
- if (rpcm)
- *rpcm = pcm;
- return 0;
}
static int snd_ymfpci_spdif_default_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
@@ -2272,8 +2246,7 @@ static int snd_ymfpci_free(struct snd_ymfpci *chip)
release_and_free_resource(chip->mpu_res);
release_and_free_resource(chip->fm_res);
snd_ymfpci_free_gameport(chip);
- if (chip->reg_area_virt)
- iounmap(chip->reg_area_virt);
+ iounmap(chip->reg_area_virt);
if (chip->work_ptr.area)
snd_dma_free_pages(&chip->work_ptr);
@@ -2326,7 +2299,6 @@ static int saved_regs_index[] = {
static int snd_ymfpci_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ymfpci *chip = card->private_data;
unsigned int i;
@@ -2347,9 +2319,6 @@ static int snd_ymfpci_suspend(struct device *dev)
snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0);
snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0);
snd_ymfpci_disable_dsp(chip);
- pci_disable_device(pci);
- pci_save_state(pci);
- pci_set_power_state(pci, PCI_D3hot);
return 0;
}
@@ -2360,14 +2329,6 @@ static int snd_ymfpci_resume(struct device *dev)
struct snd_ymfpci *chip = card->private_data;
unsigned int i;
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- if (pci_enable_device(pci) < 0) {
- dev_err(dev, "pci_enable_device failed, disabling device\n");
- snd_card_disconnect(card);
- return -EIO;
- }
- pci_set_master(pci);
snd_ymfpci_aclink_reset(pci);
snd_ymfpci_codec_ready(chip, 0);
snd_ymfpci_download_image(chip);
diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
index 5fbf5db2543d..09da7b52bc2e 100644
--- a/sound/ppc/awacs.c
+++ b/sound/ppc/awacs.c
@@ -20,7 +20,7 @@
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/nvram.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/sound/ppc/beep.c b/sound/ppc/beep.c
index 0040f048221f..d3524f9fa05d 100644
--- a/sound/ppc/beep.c
+++ b/sound/ppc/beep.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/sound/ppc/burgundy.c b/sound/ppc/burgundy.c
index cb4f0a5e984e..b86159e04493 100644
--- a/sound/ppc/burgundy.c
+++ b/sound/ppc/burgundy.c
@@ -19,7 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <sound/core.h>
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 5a13b22748b2..13146d701413 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -20,7 +20,7 @@
*/
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -867,16 +867,11 @@ static int snd_pmac_free(struct snd_pmac *chip)
snd_pmac_dbdma_free(chip, &chip->capture.cmd);
snd_pmac_dbdma_free(chip, &chip->extra_dma);
snd_pmac_dbdma_free(chip, &emergency_dbdma);
- if (chip->macio_base)
- iounmap(chip->macio_base);
- if (chip->latch_base)
- iounmap(chip->latch_base);
- if (chip->awacs)
- iounmap(chip->awacs);
- if (chip->playback.dma)
- iounmap(chip->playback.dma);
- if (chip->capture.dma)
- iounmap(chip->capture.dma);
+ iounmap(chip->macio_base);
+ iounmap(chip->latch_base);
+ iounmap(chip->awacs);
+ iounmap(chip->playback.dma);
+ iounmap(chip->capture.dma);
if (chip->node) {
int i;
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 58f292a87f98..368242519279 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -1044,7 +1044,7 @@ static int snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
if (!the_card.null_buffer_start_vaddr) {
pr_info("%s: nullbuffer alloc failed\n", __func__);
ret = -ENOMEM;
- goto clean_preallocate;
+ goto clean_card;
}
pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__,
the_card.null_buffer_start_vaddr,
@@ -1066,8 +1066,6 @@ clean_dma_map:
PAGE_SIZE,
the_card.null_buffer_start_vaddr,
the_card.null_buffer_start_dma_addr);
-clean_preallocate:
- snd_pcm_lib_preallocate_free_for_all(the_card.pcm);
clean_card:
snd_card_free(the_card.card);
clean_irq:
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 24c8766a925d..c8fafba218a5 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -32,8 +32,8 @@
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/of_irq.h>
+#include <linux/io.h>
#include <sound/core.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index f44dda610ed2..ad3d9ae38034 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -35,12 +35,12 @@
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/info.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <mach/sysasic.h>
#include "aica.h"
@@ -343,11 +343,9 @@ static void spu_begin_dma(struct snd_pcm_substream *substream)
mod_timer(&dreamcastcard->timer, jiffies + 4);
return;
}
- init_timer(&(dreamcastcard->timer));
- dreamcastcard->timer.data = (unsigned long) substream;
- dreamcastcard->timer.function = aica_period_elapsed;
- dreamcastcard->timer.expires = jiffies + 4;
- add_timer(&(dreamcastcard->timer));
+ setup_timer(&dreamcastcard->timer, aica_period_elapsed,
+ (unsigned long) substream);
+ mod_timer(&dreamcastcard->timer, jiffies + 4);
}
static int snd_aicapcm_pcm_open(struct snd_pcm_substream
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 7d5d6444a837..dcc79aa0236b 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -55,6 +55,7 @@ source "sound/soc/spear/Kconfig"
source "sound/soc/tegra/Kconfig"
source "sound/soc/txx9/Kconfig"
source "sound/soc/ux500/Kconfig"
+source "sound/soc/xtensa/Kconfig"
# Supported codecs
source "sound/soc/codecs/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 865e090c8061..5b3c8f67c8db 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -36,3 +36,4 @@ obj-$(CONFIG_SND_SOC) += spear/
obj-$(CONFIG_SND_SOC) += tegra/
obj-$(CONFIG_SND_SOC) += txx9/
obj-$(CONFIG_SND_SOC) += ux500/
+obj-$(CONFIG_SND_SOC) += xtensa/
diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c
index 7752860f7230..4c23381727a1 100644
--- a/sound/soc/adi/axi-i2s.c
+++ b/sound/soc/adi/axi-i2s.c
@@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev)
if (ret)
goto err_clk_disable;
+ return 0;
+
err_clk_disable:
clk_disable_unprepare(i2s->clk);
return ret;
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index fb3878312bf8..1579e994acf8 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -45,7 +45,7 @@ config SND_ATMEL_SOC_WM8904
config SND_AT91_SOC_SAM9X5_WM8731
tristate "SoC Audio support for WM8731-based at91sam9x5 board"
- depends on ATMEL_SSC && SND_ATMEL_SOC && SOC_AT91SAM9X5
+ depends on ARCH_AT91 && ATMEL_SSC && SND_ATMEL_SOC
select SND_ATMEL_SOC_SSC
select SND_ATMEL_SOC_DMA
select SND_SOC_WM8731
diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
index 33fb3bb133df..b8e7bad05eb1 100644
--- a/sound/soc/atmel/atmel-pcm-dma.c
+++ b/sound/soc/atmel/atmel-pcm-dma.c
@@ -105,13 +105,11 @@ static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
return ret;
}
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- slave_config->dst_addr = ssc->phybase + SSC_THR;
- slave_config->dst_maxburst = 1;
- } else {
- slave_config->src_addr = ssc->phybase + SSC_RHR;
- slave_config->src_maxburst = 1;
- }
+ slave_config->dst_addr = ssc->phybase + SSC_THR;
+ slave_config->dst_maxburst = 1;
+
+ slave_config->src_addr = ssc->phybase + SSC_RHR;
+ slave_config->src_maxburst = 1;
prtd->dma_intr_handler = atmel_pcm_dma_irq;
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 99ff35e2a25d..fb0b7e8b08ff 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -204,6 +204,13 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
ssc_readl(ssc_p->ssc->regs, SR));
+ /* Enable PMC peripheral clock for this SSC */
+ pr_debug("atmel_ssc_dai: Starting clock\n");
+ clk_enable(ssc_p->ssc->clk);
+
+ /* Reset the SSC to keep it at a clean status */
+ ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dir = 0;
dir_mask = SSC_DIR_MASK_PLAYBACK;
@@ -250,11 +257,6 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
dma_params = ssc_p->dma_params[dir];
if (dma_params != NULL) {
- ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
- pr_debug("atmel_ssc_shutdown: %s disabled SSC_SR=0x%08x\n",
- (dir ? "receive" : "transmit"),
- ssc_readl(ssc_p->ssc->regs, SR));
-
dma_params->ssc = NULL;
dma_params->substream = NULL;
ssc_p->dma_params[dir] = NULL;
@@ -266,10 +268,6 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
ssc_p->dir_mask &= ~dir_mask;
if (!ssc_p->dir_mask) {
if (ssc_p->initialized) {
- /* Shutdown the SSC clock. */
- pr_debug("atmel_ssc_dai: Stopping clock\n");
- clk_disable(ssc_p->ssc->clk);
-
free_irq(ssc_p->ssc->irq, ssc_p);
ssc_p->initialized = 0;
}
@@ -280,6 +278,10 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
ssc_p->cmr_div = ssc_p->tcmr_period = ssc_p->rcmr_period = 0;
}
spin_unlock_irq(&ssc_p->lock);
+
+ /* Shutdown the SSC clock. */
+ pr_debug("atmel_ssc_dai: Stopping clock\n");
+ clk_disable(ssc_p->ssc->clk);
}
@@ -348,7 +350,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
struct atmel_pcm_dma_params *dma_params;
int dir, channels, bits;
u32 tfmr, rfmr, tcmr, rcmr;
- int start_event;
int ret;
int fslen, fslen_ext;
@@ -451,25 +452,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
break;
case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
- /*
- * I2S format, CODEC supplies BCLK and LRC clocks.
- *
- * The SSC transmit clock is obtained from the BCLK signal on
- * on the TK line, and the SSC receive clock is
- * generated from the transmit clock.
- *
- * For single channel data, one sample is transferred
- * on the falling edge of the LRC clock.
- * For two channel data, one sample is
- * transferred on both edges of the LRC clock.
- */
- start_event = ((channels == 1)
- ? SSC_START_FALLING_RF
- : SSC_START_EDGE_RF);
-
+ /* I2S format, CODEC supplies BCLK and LRC clocks. */
rcmr = SSC_BF(RCMR_PERIOD, 0)
| SSC_BF(RCMR_STTDLY, START_DELAY)
- | SSC_BF(RCMR_START, start_event)
+ | SSC_BF(RCMR_START, SSC_START_FALLING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
@@ -478,14 +464,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
| SSC_BF(RFMR_FSLEN, 0)
- | SSC_BF(RFMR_DATNB, 0)
+ | SSC_BF(RFMR_DATNB, (channels - 1))
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tcmr = SSC_BF(TCMR_PERIOD, 0)
| SSC_BF(TCMR_STTDLY, START_DELAY)
- | SSC_BF(TCMR_START, start_event)
+ | SSC_BF(TCMR_START, SSC_START_FALLING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_NONE)
| SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ?
@@ -495,7 +481,55 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
| SSC_BF(TFMR_FSLEN, 0)
- | SSC_BF(TFMR_DATNB, 0)
+ | SSC_BF(TFMR_DATNB, (channels - 1))
+ | SSC_BIT(TFMR_MSBF)
+ | SSC_BF(TFMR_DATDEF, 0)
+ | SSC_BF(TFMR_DATLEN, (bits - 1));
+ break;
+
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFS:
+ /* I2S format, CODEC supplies BCLK, SSC supplies LRCLK. */
+ if (bits > 16 && !ssc->pdata->has_fslen_ext) {
+ dev_err(dai->dev,
+ "sample size %d is too large for SSC device\n",
+ bits);
+ return -EINVAL;
+ }
+
+ fslen_ext = (bits - 1) / 16;
+ fslen = (bits - 1) % 16;
+
+ rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
+ | SSC_BF(RCMR_STTDLY, START_DELAY)
+ | SSC_BF(RCMR_START, SSC_START_FALLING_RF)
+ | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
+ | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
+ SSC_CKS_PIN : SSC_CKS_CLOCK);
+
+ rfmr = SSC_BF(RFMR_FSLEN_EXT, fslen_ext)
+ | SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
+ | SSC_BF(RFMR_FSOS, SSC_FSOS_NEGATIVE)
+ | SSC_BF(RFMR_FSLEN, fslen)
+ | SSC_BF(RFMR_DATNB, (channels - 1))
+ | SSC_BIT(RFMR_MSBF)
+ | SSC_BF(RFMR_LOOP, 0)
+ | SSC_BF(RFMR_DATLEN, (bits - 1));
+
+ tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
+ | SSC_BF(TCMR_STTDLY, START_DELAY)
+ | SSC_BF(TCMR_START, SSC_START_FALLING_RF)
+ | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
+ | SSC_BF(TCMR_CKO, SSC_CKO_NONE)
+ | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ?
+ SSC_CKS_CLOCK : SSC_CKS_PIN);
+
+ tfmr = SSC_BF(TFMR_FSLEN_EXT, fslen_ext)
+ | SSC_BF(TFMR_FSEDGE, SSC_FSEDGE_NEGATIVE)
+ | SSC_BF(TFMR_FSDEN, 0)
+ | SSC_BF(TFMR_FSOS, SSC_FSOS_NEGATIVE)
+ | SSC_BF(TFMR_FSLEN, fslen)
+ | SSC_BF(TFMR_DATNB, (channels - 1))
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
@@ -512,7 +546,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
| SSC_BF(RCMR_STTDLY, 1)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
- | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_DIV);
@@ -527,7 +561,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
| SSC_BF(TCMR_STTDLY, 1)
| SSC_BF(TCMR_START, SSC_START_RISING_RF)
- | SSC_BF(TCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
| SSC_BF(TCMR_CKS, SSC_CKS_DIV);
@@ -545,10 +579,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
/*
* DSP/PCM Mode A format, CODEC supplies BCLK and LRC clocks.
*
- * The SSC transmit clock is obtained from the BCLK signal on
- * on the TK line, and the SSC receive clock is
- * generated from the transmit clock.
- *
* Data is transferred on first BCLK after LRC pulse rising
* edge.If stereo, the right channel data is contiguous with
* the left channel data.
@@ -556,7 +586,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
rcmr = SSC_BF(RCMR_PERIOD, 0)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
- | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
SSC_CKS_PIN : SSC_CKS_CLOCK);
@@ -597,23 +627,17 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
rcmr, rfmr, tcmr, tfmr);
if (!ssc_p->initialized) {
-
- /* Enable PMC peripheral clock for this SSC */
- pr_debug("atmel_ssc_dai: Starting clock\n");
- clk_enable(ssc_p->ssc->clk);
-
- /* Reset the SSC and its PDC registers */
- ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
-
- ssc_writel(ssc_p->ssc->regs, PDC_RPR, 0);
- ssc_writel(ssc_p->ssc->regs, PDC_RCR, 0);
- ssc_writel(ssc_p->ssc->regs, PDC_RNPR, 0);
- ssc_writel(ssc_p->ssc->regs, PDC_RNCR, 0);
-
- ssc_writel(ssc_p->ssc->regs, PDC_TPR, 0);
- ssc_writel(ssc_p->ssc->regs, PDC_TCR, 0);
- ssc_writel(ssc_p->ssc->regs, PDC_TNPR, 0);
- ssc_writel(ssc_p->ssc->regs, PDC_TNCR, 0);
+ if (!ssc_p->ssc->pdata->use_dma) {
+ ssc_writel(ssc_p->ssc->regs, PDC_RPR, 0);
+ ssc_writel(ssc_p->ssc->regs, PDC_RCR, 0);
+ ssc_writel(ssc_p->ssc->regs, PDC_RNPR, 0);
+ ssc_writel(ssc_p->ssc->regs, PDC_RNCR, 0);
+
+ ssc_writel(ssc_p->ssc->regs, PDC_TPR, 0);
+ ssc_writel(ssc_p->ssc->regs, PDC_TCR, 0);
+ ssc_writel(ssc_p->ssc->regs, PDC_TNPR, 0);
+ ssc_writel(ssc_p->ssc->regs, PDC_TNCR, 0);
+ }
ret = request_irq(ssc_p->ssc->irq, atmel_ssc_interrupt, 0,
ssc_p->name, ssc_p);
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 66b66d0e7514..f5ad214663f9 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -47,7 +47,6 @@
#include <sound/soc.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include "../codecs/wm8731.h"
#include "atmel-pcm.h"
@@ -64,33 +63,6 @@
static struct clk *mclk;
-static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret;
-
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
- /* set cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static struct snd_soc_ops at91sam9g20ek_ops = {
- .hw_params = at91sam9g20ek_hw_params,
-};
-
static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
struct snd_soc_dapm_context *dapm,
enum snd_soc_bias_level level)
@@ -173,7 +145,8 @@ static struct snd_soc_dai_link at91sam9g20ek_dai = {
.init = at91sam9g20ek_wm8731_init,
.platform_name = "at91rm9200_ssc.0",
.codec_name = "wm8731.0-001b",
- .ops = &at91sam9g20ek_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
};
static struct snd_soc_card snd_soc_at91sam9g20ek = {
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index a747ac0b399f..c75995f2779c 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -91,27 +91,12 @@ static int db1200_i2s_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret;
/* WM8731 has its own 12MHz crystal */
snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
12000000, SND_SOC_CLOCK_IN);
- /* codec is bitclock and lrclk master */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- goto out;
-
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_LEFT_J |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- goto out;
-
- ret = 0;
-out:
- return ret;
+ return 0;
}
static struct snd_soc_ops db1200_i2s_wm8731_ops = {
@@ -125,6 +110,8 @@ static struct snd_soc_dai_link db1200_i2s_dai = {
.cpu_dai_name = "au1xpsc_i2s.1",
.platform_name = "au1xpsc-pcm.1",
.codec_name = "wm8731.0-001b",
+ .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
.ops = &db1200_i2s_wm8731_ops,
};
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index b06b8d8128c6..dd94fea72d5d 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -315,11 +315,6 @@ static struct snd_pcm_ops au1xpsc_pcm_ops = {
.pointer = au1xpsc_pcm_pointer,
};
-static void au1xpsc_pcm_free_dma_buffers(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int au1xpsc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
@@ -335,7 +330,6 @@ static int au1xpsc_pcm_new(struct snd_soc_pcm_runtime *rtd)
static struct snd_soc_platform_driver au1xpsc_soc_platform = {
.ops = &au1xpsc_pcm_ops,
.pcm_new = au1xpsc_pcm_new,
- .pcm_free = au1xpsc_pcm_free_dma_buffers,
};
static int au1xpsc_pcm_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index 6ffaaff469c7..24cc7f40d87a 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -287,11 +287,6 @@ static struct snd_pcm_ops alchemy_pcm_ops = {
.pointer = alchemy_pcm_pointer,
};
-static void alchemy_pcm_free_dma_buffers(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int alchemy_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
@@ -305,7 +300,6 @@ static int alchemy_pcm_new(struct snd_soc_pcm_runtime *rtd)
static struct snd_soc_platform_driver alchemy_pcm_soc_platform = {
.ops = &alchemy_pcm_ops,
.pcm_new = alchemy_pcm_new,
- .pcm_free = alchemy_pcm_free_dma_buffers,
};
static int alchemy_pcm_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index a2bf27f4baab..a0f265327fdf 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -386,7 +386,7 @@ static int snd_soc_put_volsw_2r_out(struct snd_kcontrol *kcontrol,
static int pm860x_rsync_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
/*
* In order to avoid current on the load, mute power-on and power-off
@@ -403,7 +403,7 @@ static int pm860x_rsync_event(struct snd_soc_dapm_widget *w,
static int pm860x_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
unsigned int dac = 0;
int data;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 8349f982a586..064e6c18e109 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -69,6 +69,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX98088 if I2C
select SND_SOC_MAX98090 if I2C
select SND_SOC_MAX98095 if I2C
+ select SND_SOC_MAX98357A
select SND_SOC_MAX9850 if I2C
select SND_SOC_MAX9768 if I2C
select SND_SOC_MAX9877 if I2C
@@ -456,6 +457,9 @@ config SND_SOC_MAX98090
config SND_SOC_MAX98095
tristate
+config SND_SOC_MAX98357A
+ tristate
+
config SND_SOC_MAX9850
tristate
@@ -525,7 +529,7 @@ config SND_SOC_RT5677
config SND_SOC_RT5677_SPI
tristate
- default SND_SOC_RT5677
+ default SND_SOC_RT5677 && SPI
#Freescale sgtl5000 codec
config SND_SOC_SGTL5000
@@ -580,7 +584,9 @@ config SND_SOC_SSM4567
depends on I2C
config SND_SOC_STA32X
- tristate
+ tristate "STA326, STA328 and STA329 speaker amplifier"
+ depends on I2C
+ select REGMAP_I2C
config SND_SOC_STA350
tristate "STA350 speaker amplifier"
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index bbdfd1e1c182..69b8666d187a 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -64,6 +64,7 @@ snd-soc-max9768-objs := max9768.o
snd-soc-max98088-objs := max98088.o
snd-soc-max98090-objs := max98090.o
snd-soc-max98095-objs := max98095.o
+snd-soc-max98357a-objs := max98357a.o
snd-soc-max9850-objs := max9850.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
@@ -245,6 +246,7 @@ obj-$(CONFIG_SND_SOC_MAX9768) += snd-soc-max9768.o
obj-$(CONFIG_SND_SOC_MAX98088) += snd-soc-max98088.o
obj-$(CONFIG_SND_SOC_MAX98090) += snd-soc-max98090.o
obj-$(CONFIG_SND_SOC_MAX98095) += snd-soc-max98095.o
+obj-$(CONFIG_SND_SOC_MAX98357A) += snd-soc-max98357a.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index 387530b0b0fd..17c953595660 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -333,8 +333,8 @@ static int ad193x_codec_probe(struct snd_soc_codec *codec)
regmap_write(ad193x->regmap, AD193X_DAC_CHNL_MUTE, 0x0);
/* de-emphasis: 48kHz, powedown dac */
regmap_write(ad193x->regmap, AD193X_DAC_CTRL2, 0x1A);
- /* powerdown dac, dac in tdm mode */
- regmap_write(ad193x->regmap, AD193X_DAC_CTRL0, 0x41);
+ /* dac in tdm mode */
+ regmap_write(ad193x->regmap, AD193X_DAC_CTRL0, 0x40);
/* high-pass filter enable */
regmap_write(ad193x->regmap, AD193X_ADC_CTRL0, 0x3);
/* sata delay=1, adc aux mode */
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 686cacb0e835..632e89f793a7 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -163,7 +163,7 @@ static const struct snd_kcontrol_new ak4671_snd_controls[] = {
static int ak4671_out2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index bdf8c5ac8ca4..0e357996864b 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -55,18 +55,20 @@ static inline int alc5623_reset(struct snd_soc_codec *codec)
static int amp_mixer_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
/* to power-on/off class-d amp generators/speaker */
/* need to write to 'index-46h' register : */
/* so write index num (here 0x46) to reg 0x6a */
/* and then 0xffff/0 to reg 0x6c */
- snd_soc_write(w->codec, ALC5623_HID_CTRL_INDEX, 0x46);
+ snd_soc_write(codec, ALC5623_HID_CTRL_INDEX, 0x46);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0xFFFF);
+ snd_soc_write(codec, ALC5623_HID_CTRL_DATA, 0xFFFF);
break;
case SND_SOC_DAPM_POST_PMD:
- snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0);
+ snd_soc_write(codec, ALC5623_HID_CTRL_DATA, 0);
break;
}
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index d1fdbc266631..db3283abbe18 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -116,18 +116,20 @@ static inline int alc5632_reset(struct regmap *map)
static int amp_mixer_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
/* to power-on/off class-d amp generators/speaker */
/* need to write to 'index-46h' register : */
/* so write index num (here 0x46) to reg 0x6a */
/* and then 0xffff/0 to reg 0x6c */
- snd_soc_write(w->codec, ALC5632_HID_CTRL_INDEX, 0x46);
+ snd_soc_write(codec, ALC5632_HID_CTRL_INDEX, 0x46);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- snd_soc_write(w->codec, ALC5632_HID_CTRL_DATA, 0xFFFF);
+ snd_soc_write(codec, ALC5632_HID_CTRL_DATA, 0xFFFF);
break;
case SND_SOC_DAPM_POST_PMD:
- snd_soc_write(w->codec, ALC5632_HID_CTRL_DATA, 0);
+ snd_soc_write(codec, ALC5632_HID_CTRL_DATA, 0);
break;
}
@@ -1066,7 +1068,7 @@ static int alc5632_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_device_alc5632 = {
+static const struct snd_soc_codec_driver soc_codec_device_alc5632 = {
.probe = alc5632_probe,
.resume = alc5632_resume,
.set_bias_level = alc5632_set_bias_level,
@@ -1080,7 +1082,7 @@ static struct snd_soc_codec_driver soc_codec_device_alc5632 = {
.num_dapm_routes = ARRAY_SIZE(alc5632_dapm_routes),
};
-static struct regmap_config alc5632_regmap = {
+static const struct regmap_config alc5632_regmap = {
.reg_bits = 8,
.val_bits = 16,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 9550d7433ad0..29202610dd0d 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -84,7 +84,7 @@ static int arizona_spk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
bool manual_ena = false;
@@ -692,7 +692,8 @@ static void arizona_in_set_vu(struct snd_soc_codec *codec, int ena)
int arizona_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
int event)
{
- struct arizona_priv *priv = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
unsigned int reg;
if (w->shift % 2)
@@ -705,25 +706,25 @@ int arizona_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
priv->in_pending++;
break;
case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(w->codec, reg, ARIZONA_IN1L_MUTE, 0);
+ snd_soc_update_bits(codec, reg, ARIZONA_IN1L_MUTE, 0);
/* If this is the last input pending then allow VU */
priv->in_pending--;
if (priv->in_pending == 0) {
msleep(1);
- arizona_in_set_vu(w->codec, 1);
+ arizona_in_set_vu(codec, 1);
}
break;
case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(w->codec, reg,
+ snd_soc_update_bits(codec, reg,
ARIZONA_IN1L_MUTE | ARIZONA_IN_VU,
ARIZONA_IN1L_MUTE | ARIZONA_IN_VU);
break;
case SND_SOC_DAPM_POST_PMD:
/* Disable volume updates if no inputs are enabled */
- reg = snd_soc_read(w->codec, ARIZONA_INPUT_ENABLES);
+ reg = snd_soc_read(codec, ARIZONA_INPUT_ENABLES);
if (reg == 0)
- arizona_in_set_vu(w->codec, 0);
+ arizona_in_set_vu(codec, 0);
}
return 0;
@@ -734,7 +735,25 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+
switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ switch (w->shift) {
+ case ARIZONA_OUT1L_ENA_SHIFT:
+ case ARIZONA_OUT1R_ENA_SHIFT:
+ case ARIZONA_OUT2L_ENA_SHIFT:
+ case ARIZONA_OUT2R_ENA_SHIFT:
+ case ARIZONA_OUT3L_ENA_SHIFT:
+ case ARIZONA_OUT3R_ENA_SHIFT:
+ priv->out_up_pending++;
+ priv->out_up_delay += 17;
+ break;
+ default:
+ break;
+ }
+ break;
case SND_SOC_DAPM_POST_PMU:
switch (w->shift) {
case ARIZONA_OUT1L_ENA_SHIFT:
@@ -743,13 +762,50 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
case ARIZONA_OUT2R_ENA_SHIFT:
case ARIZONA_OUT3L_ENA_SHIFT:
case ARIZONA_OUT3R_ENA_SHIFT:
- msleep(17);
+ priv->out_up_pending--;
+ if (!priv->out_up_pending) {
+ msleep(priv->out_up_delay);
+ priv->out_up_delay = 0;
+ }
break;
default:
break;
}
break;
+ case SND_SOC_DAPM_PRE_PMD:
+ switch (w->shift) {
+ case ARIZONA_OUT1L_ENA_SHIFT:
+ case ARIZONA_OUT1R_ENA_SHIFT:
+ case ARIZONA_OUT2L_ENA_SHIFT:
+ case ARIZONA_OUT2R_ENA_SHIFT:
+ case ARIZONA_OUT3L_ENA_SHIFT:
+ case ARIZONA_OUT3R_ENA_SHIFT:
+ priv->out_down_pending++;
+ priv->out_down_delay++;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ switch (w->shift) {
+ case ARIZONA_OUT1L_ENA_SHIFT:
+ case ARIZONA_OUT1R_ENA_SHIFT:
+ case ARIZONA_OUT2L_ENA_SHIFT:
+ case ARIZONA_OUT2R_ENA_SHIFT:
+ case ARIZONA_OUT3L_ENA_SHIFT:
+ case ARIZONA_OUT3R_ENA_SHIFT:
+ priv->out_down_pending--;
+ if (!priv->out_down_pending) {
+ msleep(priv->out_down_delay);
+ priv->out_down_delay = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
}
return 0;
@@ -760,7 +816,8 @@ int arizona_hp_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
- struct arizona_priv *priv = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
struct arizona *arizona = priv->arizona;
unsigned int mask = 1 << w->shift;
unsigned int val;
@@ -772,6 +829,9 @@ int arizona_hp_ev(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_PRE_PMD:
val = 0;
break;
+ case SND_SOC_DAPM_PRE_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ return arizona_out_ev(w, kcontrol, event);
default:
return -EINVAL;
}
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 942cfb197b6d..11ff899b0272 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -77,6 +77,11 @@ struct arizona_priv {
int num_inputs;
unsigned int in_pending;
+ unsigned int out_up_pending;
+ unsigned int out_up_delay;
+ unsigned int out_down_pending;
+ unsigned int out_down_delay;
+
unsigned int spk_ena:2;
unsigned int spk_ena_pending:1;
};
diff --git a/sound/soc/codecs/bt-sco.c b/sound/soc/codecs/bt-sco.c
index 5075bf0a7276..e7238b8904bc 100644
--- a/sound/soc/codecs/bt-sco.c
+++ b/sound/soc/codecs/bt-sco.c
@@ -86,5 +86,5 @@ static struct platform_driver bt_sco_driver = {
module_platform_driver(bt_sco_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("ASoC generic bluethooth sco link driver");
+MODULE_DESCRIPTION("ASoC generic bluetooth sco link driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index ec55c590afd0..f2b8aad21274 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -264,7 +264,7 @@ static int cs35l32_codec_set_sysclk(struct snd_soc_codec *codec,
CS35L32_MCLK_DIV2_MASK | CS35L32_MCLK_RATIO_MASK, val);
}
-static struct snd_soc_codec_driver soc_codec_dev_cs35l32 = {
+static const struct snd_soc_codec_driver soc_codec_dev_cs35l32 = {
.set_sysclk = cs35l32_codec_set_sysclk,
.dapm_widgets = cs35l32_dapm_widgets,
@@ -288,7 +288,7 @@ static const struct reg_default cs35l32_monitor_patch[] = {
{ 0x00, 0x00 },
};
-static struct regmap_config cs35l32_regmap = {
+static const struct regmap_config cs35l32_regmap = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 35fbef743fbe..1589e7a881d8 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1103,7 +1103,7 @@ static int cs42l52_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_cs42l52 = {
+static const struct snd_soc_codec_driver soc_codec_dev_cs42l52 = {
.probe = cs42l52_probe,
.remove = cs42l52_remove,
.set_bias_level = cs42l52_set_bias_level,
@@ -1130,7 +1130,7 @@ static const struct reg_default cs42l52_threshold_patch[] = {
};
-static struct regmap_config cs42l52_regmap = {
+static const struct regmap_config cs42l52_regmap = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 2ddc7ac10ad7..cbc654fe48c7 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1164,7 +1164,7 @@ static int cs42l56_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_cs42l56 = {
+static const struct snd_soc_codec_driver soc_codec_dev_cs42l56 = {
.probe = cs42l56_probe,
.remove = cs42l56_remove,
.set_bias_level = cs42l56_set_bias_level,
@@ -1179,7 +1179,7 @@ static struct snd_soc_codec_driver soc_codec_dev_cs42l56 = {
.num_controls = ARRAY_SIZE(cs42l56_snd_controls),
};
-static struct regmap_config cs42l56_regmap = {
+static const struct regmap_config cs42l56_regmap = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 7c55537c69cf..8ecedba79606 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1347,7 +1347,7 @@ static int cs42l73_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_cs42l73 = {
+static const struct snd_soc_codec_driver soc_codec_dev_cs42l73 = {
.probe = cs42l73_probe,
.set_bias_level = cs42l73_set_bias_level,
.suspend_bias_off = true,
@@ -1361,7 +1361,7 @@ static struct snd_soc_codec_driver soc_codec_dev_cs42l73 = {
.num_controls = ARRAY_SIZE(cs42l73_snd_controls),
};
-static struct regmap_config cs42l73_regmap = {
+static const struct regmap_config cs42l73_regmap = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 61b2f9a2eef1..ffe96175a8a5 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -609,7 +609,7 @@ static const struct snd_kcontrol_new da732x_snd_controls[] = {
static int da732x_adc_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -663,7 +663,7 @@ static int da732x_adc_event(struct snd_soc_dapm_widget *w,
static int da732x_out_pga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
new file mode 100644
index 000000000000..1806333ea29e
--- /dev/null
+++ b/sound/soc/codecs/max98357a.c
@@ -0,0 +1,138 @@
+/* Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * max98357a.c -- MAX98357A ALSA SoC Codec driver
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <sound/soc.h>
+
+#define DRV_NAME "max98357a"
+
+static int max98357a_daiops_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct gpio_desc *sdmode = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ gpiod_set_value(sdmode, 1);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ gpiod_set_value(sdmode, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget max98357a_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("SDMode", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUTPUT("Speaker"),
+};
+
+static const struct snd_soc_dapm_route max98357a_dapm_routes[] = {
+ {"Speaker", NULL, "SDMode"},
+};
+
+static int max98357a_codec_probe(struct snd_soc_codec *codec)
+{
+ struct gpio_desc *sdmode;
+
+ sdmode = devm_gpiod_get(codec->dev, "sdmode");
+ if (IS_ERR(sdmode)) {
+ dev_err(codec->dev, "%s() unable to get sdmode GPIO: %ld\n",
+ __func__, PTR_ERR(sdmode));
+ return PTR_ERR(sdmode);
+ }
+ gpiod_direction_output(sdmode, 0);
+ snd_soc_codec_set_drvdata(codec, sdmode);
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver max98357a_codec_driver = {
+ .probe = max98357a_codec_probe,
+ .dapm_widgets = max98357a_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98357a_dapm_widgets),
+ .dapm_routes = max98357a_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max98357a_dapm_routes),
+};
+
+static struct snd_soc_dai_ops max98357a_dai_ops = {
+ .trigger = max98357a_daiops_trigger,
+};
+
+static struct snd_soc_dai_driver max98357a_dai_driver = {
+ .name = DRV_NAME,
+ .playback = {
+ .stream_name = DRV_NAME "-playback",
+ .formats = SNDRV_PCM_FMTBIT_S16 |
+ SNDRV_PCM_FMTBIT_S24 |
+ SNDRV_PCM_FMTBIT_S32,
+ .rates = SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &max98357a_dai_ops,
+};
+
+static int max98357a_platform_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
+ &max98357a_dai_driver, 1);
+ if (ret)
+ dev_err(&pdev->dev, "%s() error registering codec driver: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int max98357a_platform_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id max98357a_device_id[] = {
+ { .compatible = "maxim," DRV_NAME, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max98357a_device_id);
+#endif
+
+static struct platform_driver max98357a_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(max98357a_device_id),
+ },
+ .probe = max98357a_platform_probe,
+ .remove = max98357a_platform_remove,
+};
+module_platform_driver(max98357a_platform_driver);
+
+MODULE_DESCRIPTION("Maxim MAX98357A Codec Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index c1e441c2c8af..2ffb9a0570dc 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -328,16 +328,16 @@ static int mc13783_set_tdm_slot_dac(struct snd_soc_dai *dai,
}
switch (rx_mask) {
- case 0xfffffffc:
+ case 0x03:
val |= SSI_NETWORK_DAC_RXSLOT_0_1;
break;
- case 0xfffffff3:
+ case 0x0c:
val |= SSI_NETWORK_DAC_RXSLOT_2_3;
break;
- case 0xffffffcf:
+ case 0x30:
val |= SSI_NETWORK_DAC_RXSLOT_4_5;
break;
- case 0xffffff3f:
+ case 0xc0:
val |= SSI_NETWORK_DAC_RXSLOT_6_7;
break;
default:
@@ -360,7 +360,7 @@ static int mc13783_set_tdm_slot_codec(struct snd_soc_dai *dai,
if (slots != 4)
return -EINVAL;
- if (tx_mask != 0xfffffffc)
+ if (tx_mask != 0x3)
return -EINVAL;
val |= (0x00 << 2); /* primary timeslot RX/TX(?) is 0 */
diff --git a/sound/soc/codecs/pcm3008.c b/sound/soc/codecs/pcm3008.c
index 7e73fa4b3183..8fb445f33f6f 100644
--- a/sound/soc/codecs/pcm3008.c
+++ b/sound/soc/codecs/pcm3008.c
@@ -32,7 +32,7 @@ static int pcm3008_dac_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct pcm3008_setup_data *setup = codec->dev->platform_data;
gpio_set_value_cansleep(setup->pdda_pin,
@@ -45,7 +45,7 @@ static int pcm3008_adc_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct pcm3008_setup_data *setup = codec->dev->platform_data;
gpio_set_value_cansleep(setup->pdad_pin,
diff --git a/sound/soc/codecs/pcm512x-i2c.c b/sound/soc/codecs/pcm512x-i2c.c
index d0547fa275fc..dcdfac0ffeb1 100644
--- a/sound/soc/codecs/pcm512x-i2c.c
+++ b/sound/soc/codecs/pcm512x-i2c.c
@@ -46,6 +46,8 @@ static int pcm512x_i2c_remove(struct i2c_client *i2c)
static const struct i2c_device_id pcm512x_i2c_id[] = {
{ "pcm5121", },
{ "pcm5122", },
+ { "pcm5141", },
+ { "pcm5142", },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcm512x_i2c_id);
@@ -53,6 +55,8 @@ MODULE_DEVICE_TABLE(i2c, pcm512x_i2c_id);
static const struct of_device_id pcm512x_of_match[] = {
{ .compatible = "ti,pcm5121", },
{ .compatible = "ti,pcm5122", },
+ { .compatible = "ti,pcm5141", },
+ { .compatible = "ti,pcm5142", },
{ }
};
MODULE_DEVICE_TABLE(of, pcm512x_of_match);
diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c
index f297058c0038..7b64a9cef704 100644
--- a/sound/soc/codecs/pcm512x-spi.c
+++ b/sound/soc/codecs/pcm512x-spi.c
@@ -43,6 +43,8 @@ static int pcm512x_spi_remove(struct spi_device *spi)
static const struct spi_device_id pcm512x_spi_id[] = {
{ "pcm5121", },
{ "pcm5122", },
+ { "pcm5141", },
+ { "pcm5142", },
{ },
};
MODULE_DEVICE_TABLE(spi, pcm512x_spi_id);
@@ -50,6 +52,8 @@ MODULE_DEVICE_TABLE(spi, pcm512x_spi_id);
static const struct of_device_id pcm512x_of_match[] = {
{ .compatible = "ti,pcm5121", },
{ .compatible = "ti,pcm5122", },
+ { .compatible = "ti,pcm5141", },
+ { .compatible = "ti,pcm5142", },
{ }
};
MODULE_DEVICE_TABLE(of, pcm512x_of_match);
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index e5f2fb884bf3..9974f201a08f 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -21,12 +21,19 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/gcd.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
+#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include "pcm512x.h"
+#define DIV_ROUND_DOWN_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+#define DIV_ROUND_CLOSEST_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+
#define PCM512x_NUM_SUPPLIES 3
static const char * const pcm512x_supply_names[PCM512x_NUM_SUPPLIES] = {
"AVDD",
@@ -39,6 +46,14 @@ struct pcm512x_priv {
struct clk *sclk;
struct regulator_bulk_data supplies[PCM512x_NUM_SUPPLIES];
struct notifier_block supply_nb[PCM512x_NUM_SUPPLIES];
+ int fmt;
+ int pll_in;
+ int pll_out;
+ int pll_r;
+ int pll_j;
+ int pll_d;
+ int pll_p;
+ unsigned long real_pll;
};
/*
@@ -69,6 +84,7 @@ static const struct reg_default pcm512x_reg_defaults[] = {
{ PCM512x_MUTE, 0x00 },
{ PCM512x_DSP, 0x00 },
{ PCM512x_PLL_REF, 0x00 },
+ { PCM512x_DAC_REF, 0x00 },
{ PCM512x_DAC_ROUTING, 0x11 },
{ PCM512x_DSP_PROGRAM, 0x01 },
{ PCM512x_CLKDET, 0x00 },
@@ -87,6 +103,25 @@ static const struct reg_default pcm512x_reg_defaults[] = {
{ PCM512x_ANALOG_GAIN_BOOST, 0x00 },
{ PCM512x_VCOM_CTRL_1, 0x00 },
{ PCM512x_VCOM_CTRL_2, 0x01 },
+ { PCM512x_BCLK_LRCLK_CFG, 0x00 },
+ { PCM512x_MASTER_MODE, 0x7c },
+ { PCM512x_GPIO_DACIN, 0x00 },
+ { PCM512x_GPIO_PLLIN, 0x00 },
+ { PCM512x_SYNCHRONIZE, 0x10 },
+ { PCM512x_PLL_COEFF_0, 0x00 },
+ { PCM512x_PLL_COEFF_1, 0x00 },
+ { PCM512x_PLL_COEFF_2, 0x00 },
+ { PCM512x_PLL_COEFF_3, 0x00 },
+ { PCM512x_PLL_COEFF_4, 0x00 },
+ { PCM512x_DSP_CLKDIV, 0x00 },
+ { PCM512x_DAC_CLKDIV, 0x00 },
+ { PCM512x_NCP_CLKDIV, 0x00 },
+ { PCM512x_OSR_CLKDIV, 0x00 },
+ { PCM512x_MASTER_CLKDIV_1, 0x00 },
+ { PCM512x_MASTER_CLKDIV_2, 0x00 },
+ { PCM512x_FS_SPEED_MODE, 0x00 },
+ { PCM512x_IDAC_1, 0x01 },
+ { PCM512x_IDAC_2, 0x00 },
};
static bool pcm512x_readable(struct device *dev, unsigned int reg)
@@ -103,6 +138,10 @@ static bool pcm512x_readable(struct device *dev, unsigned int reg)
case PCM512x_DSP_GPIO_INPUT:
case PCM512x_MASTER_MODE:
case PCM512x_PLL_REF:
+ case PCM512x_DAC_REF:
+ case PCM512x_GPIO_DACIN:
+ case PCM512x_GPIO_PLLIN:
+ case PCM512x_SYNCHRONIZE:
case PCM512x_PLL_COEFF_0:
case PCM512x_PLL_COEFF_1:
case PCM512x_PLL_COEFF_2:
@@ -143,6 +182,7 @@ static bool pcm512x_readable(struct device *dev, unsigned int reg)
case PCM512x_RATE_DET_2:
case PCM512x_RATE_DET_3:
case PCM512x_RATE_DET_4:
+ case PCM512x_CLOCK_STATUS:
case PCM512x_ANALOG_MUTE_DET:
case PCM512x_GPIN:
case PCM512x_DIGITAL_MUTE_DET:
@@ -154,6 +194,8 @@ static bool pcm512x_readable(struct device *dev, unsigned int reg)
case PCM512x_VCOM_CTRL_1:
case PCM512x_VCOM_CTRL_2:
case PCM512x_CRAM_CTRL:
+ case PCM512x_FLEX_A:
+ case PCM512x_FLEX_B:
return true;
default:
/* There are 256 raw register addresses */
@@ -170,6 +212,7 @@ static bool pcm512x_volatile(struct device *dev, unsigned int reg)
case PCM512x_RATE_DET_2:
case PCM512x_RATE_DET_3:
case PCM512x_RATE_DET_4:
+ case PCM512x_CLOCK_STATUS:
case PCM512x_ANALOG_MUTE_DET:
case PCM512x_GPIN:
case PCM512x_DIGITAL_MUTE_DET:
@@ -188,8 +231,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
static const char * const pcm512x_dsp_program_texts[] = {
"FIR interpolation with de-emphasis",
"Low latency IIR with de-emphasis",
- "Fixed process flow",
"High attenuation with de-emphasis",
+ "Fixed process flow",
"Ringing-less low latency FIR",
};
@@ -277,7 +320,7 @@ SOC_ENUM("Auto Mute Time Right", pcm512x_autom_r),
SOC_SINGLE("Auto Mute Mono Switch", PCM512x_DIGITAL_MUTE_3,
PCM512x_ACTL_SHIFT, 1, 0),
SOC_DOUBLE("Auto Mute Switch", PCM512x_DIGITAL_MUTE_3, PCM512x_AMLE_SHIFT,
- PCM512x_AMLR_SHIFT, 1, 0),
+ PCM512x_AMRE_SHIFT, 1, 0),
SOC_ENUM("Volume Ramp Down Rate", pcm512x_vndf),
SOC_ENUM("Volume Ramp Down Step", pcm512x_vnds),
@@ -303,6 +346,136 @@ static const struct snd_soc_dapm_route pcm512x_dapm_routes[] = {
{ "OUTR", NULL, "DACR" },
};
+static const u32 pcm512x_dai_rates[] = {
+ 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
+ 88200, 96000, 176400, 192000, 384000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_slave = {
+ .count = ARRAY_SIZE(pcm512x_dai_rates),
+ .list = pcm512x_dai_rates,
+};
+
+static int pcm512x_hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval ranges[2];
+ int frame_size;
+
+ frame_size = snd_soc_params_to_frame_size(params);
+ if (frame_size < 0)
+ return frame_size;
+
+ switch (frame_size) {
+ case 32:
+ /* No hole when the frame size is 32. */
+ return 0;
+ case 48:
+ case 64:
+ /* There is only one hole in the range of supported
+ * rates, but it moves with the frame size.
+ */
+ memset(ranges, 0, sizeof(ranges));
+ ranges[0].min = 8000;
+ ranges[0].max = 25000000 / frame_size / 2;
+ ranges[1].min = DIV_ROUND_UP(16000000, frame_size);
+ ranges[1].max = 384000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snd_interval_ranges(hw_param_interval(params, rule->var),
+ ARRAY_SIZE(ranges), ranges, 0);
+}
+
+static int pcm512x_dai_startup_master(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+ struct device *dev = dai->dev;
+ struct snd_pcm_hw_constraint_ratnums *constraints_no_pll;
+ struct snd_ratnum *rats_no_pll;
+
+ if (IS_ERR(pcm512x->sclk)) {
+ dev_err(dev, "Need SCLK for master mode: %ld\n",
+ PTR_ERR(pcm512x->sclk));
+ return PTR_ERR(pcm512x->sclk);
+ }
+
+ if (pcm512x->pll_out)
+ return snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ pcm512x_hw_rule_rate,
+ NULL,
+ SNDRV_PCM_HW_PARAM_FRAME_BITS,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+
+ constraints_no_pll = devm_kzalloc(dev, sizeof(*constraints_no_pll),
+ GFP_KERNEL);
+ if (!constraints_no_pll)
+ return -ENOMEM;
+ constraints_no_pll->nrats = 1;
+ rats_no_pll = devm_kzalloc(dev, sizeof(*rats_no_pll), GFP_KERNEL);
+ if (!rats_no_pll)
+ return -ENOMEM;
+ constraints_no_pll->rats = rats_no_pll;
+ rats_no_pll->num = clk_get_rate(pcm512x->sclk) / 64;
+ rats_no_pll->den_min = 1;
+ rats_no_pll->den_max = 128;
+ rats_no_pll->den_step = 1;
+
+ return snd_pcm_hw_constraint_ratnums(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ constraints_no_pll);
+}
+
+static int pcm512x_dai_startup_slave(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+ struct device *dev = dai->dev;
+ struct regmap *regmap = pcm512x->regmap;
+
+ if (IS_ERR(pcm512x->sclk)) {
+ dev_info(dev, "No SCLK, using BCLK: %ld\n",
+ PTR_ERR(pcm512x->sclk));
+
+ /* Disable reporting of missing SCLK as an error */
+ regmap_update_bits(regmap, PCM512x_ERROR_DETECT,
+ PCM512x_IDCH, PCM512x_IDCH);
+
+ /* Switch PLL input to BCLK */
+ regmap_update_bits(regmap, PCM512x_PLL_REF,
+ PCM512x_SREF, PCM512x_SREF_BCK);
+ }
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_slave);
+}
+
+static int pcm512x_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+
+ switch (pcm512x->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBM_CFS:
+ return pcm512x_dai_startup_master(substream, dai);
+
+ case SND_SOC_DAIFMT_CBS_CFS:
+ return pcm512x_dai_startup_slave(substream, dai);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int pcm512x_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
@@ -340,17 +513,717 @@ static int pcm512x_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
+static unsigned long pcm512x_find_sck(struct snd_soc_dai *dai,
+ unsigned long bclk_rate)
+{
+ struct device *dev = dai->dev;
+ unsigned long sck_rate;
+ int pow2;
+
+ /* 64 MHz <= pll_rate <= 100 MHz, VREF mode */
+ /* 16 MHz <= sck_rate <= 25 MHz, VREF mode */
+
+ /* select sck_rate as a multiple of bclk_rate but still with
+ * as many factors of 2 as possible, as that makes it easier
+ * to find a fast DAC rate
+ */
+ pow2 = 1 << fls((25000000 - 16000000) / bclk_rate);
+ for (; pow2; pow2 >>= 1) {
+ sck_rate = rounddown(25000000, bclk_rate * pow2);
+ if (sck_rate >= 16000000)
+ break;
+ }
+ if (!pow2) {
+ dev_err(dev, "Impossible to generate a suitable SCK\n");
+ return 0;
+ }
+
+ dev_dbg(dev, "sck_rate %lu\n", sck_rate);
+ return sck_rate;
+}
+
+/* pll_rate = pllin_rate * R * J.D / P
+ * 1 <= R <= 16
+ * 1 <= J <= 63
+ * 0 <= D <= 9999
+ * 1 <= P <= 15
+ * 64 MHz <= pll_rate <= 100 MHz
+ * if D == 0
+ * 1 MHz <= pllin_rate / P <= 20 MHz
+ * else if D > 0
+ * 6.667 MHz <= pllin_rate / P <= 20 MHz
+ * 4 <= J <= 11
+ * R = 1
+ */
+static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai,
+ unsigned long pllin_rate,
+ unsigned long pll_rate)
+{
+ struct device *dev = dai->dev;
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+ unsigned long common;
+ int R, J, D, P;
+ unsigned long K; /* 10000 * J.D */
+ unsigned long num;
+ unsigned long den;
+
+ common = gcd(pll_rate, pllin_rate);
+ dev_dbg(dev, "pll %lu pllin %lu common %lu\n",
+ pll_rate, pllin_rate, common);
+ num = pll_rate / common;
+ den = pllin_rate / common;
+
+ /* pllin_rate / P (or here, den) cannot be greater than 20 MHz */
+ if (pllin_rate / den > 20000000 && num < 8) {
+ num *= 20000000 / (pllin_rate / den);
+ den *= 20000000 / (pllin_rate / den);
+ }
+ dev_dbg(dev, "num / den = %lu / %lu\n", num, den);
+
+ P = den;
+ if (den <= 15 && num <= 16 * 63
+ && 1000000 <= pllin_rate / P && pllin_rate / P <= 20000000) {
+ /* Try the case with D = 0 */
+ D = 0;
+ /* factor 'num' into J and R, such that R <= 16 and J <= 63 */
+ for (R = 16; R; R--) {
+ if (num % R)
+ continue;
+ J = num / R;
+ if (J == 0 || J > 63)
+ continue;
+
+ dev_dbg(dev, "R * J / P = %d * %d / %d\n", R, J, P);
+ pcm512x->real_pll = pll_rate;
+ goto done;
+ }
+ /* no luck */
+ }
+
+ R = 1;
+
+ if (num > 0xffffffffUL / 10000)
+ goto fallback;
+
+ /* Try to find an exact pll_rate using the D > 0 case */
+ common = gcd(10000 * num, den);
+ num = 10000 * num / common;
+ den /= common;
+ dev_dbg(dev, "num %lu den %lu common %lu\n", num, den, common);
+
+ for (P = den; P <= 15; P++) {
+ if (pllin_rate / P < 6667000 || 200000000 < pllin_rate / P)
+ continue;
+ if (num * P % den)
+ continue;
+ K = num * P / den;
+ /* J == 12 is ok if D == 0 */
+ if (K < 40000 || K > 120000)
+ continue;
+
+ J = K / 10000;
+ D = K % 10000;
+ dev_dbg(dev, "J.D / P = %d.%04d / %d\n", J, D, P);
+ pcm512x->real_pll = pll_rate;
+ goto done;
+ }
+
+ /* Fall back to an approximate pll_rate */
+
+fallback:
+ /* find smallest possible P */
+ P = DIV_ROUND_UP(pllin_rate, 20000000);
+ if (!P)
+ P = 1;
+ else if (P > 15) {
+ dev_err(dev, "Need a slower clock as pll-input\n");
+ return -EINVAL;
+ }
+ if (pllin_rate / P < 6667000) {
+ dev_err(dev, "Need a faster clock as pll-input\n");
+ return -EINVAL;
+ }
+ K = DIV_ROUND_CLOSEST_ULL(10000ULL * pll_rate * P, pllin_rate);
+ if (K < 40000)
+ K = 40000;
+ /* J == 12 is ok if D == 0 */
+ if (K > 120000)
+ K = 120000;
+ J = K / 10000;
+ D = K % 10000;
+ dev_dbg(dev, "J.D / P ~ %d.%04d / %d\n", J, D, P);
+ pcm512x->real_pll = DIV_ROUND_DOWN_ULL((u64)K * pllin_rate, 10000 * P);
+
+done:
+ pcm512x->pll_r = R;
+ pcm512x->pll_j = J;
+ pcm512x->pll_d = D;
+ pcm512x->pll_p = P;
+ return 0;
+}
+
+static unsigned long pcm512x_pllin_dac_rate(struct snd_soc_dai *dai,
+ unsigned long osr_rate,
+ unsigned long pllin_rate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+ unsigned long dac_rate;
+
+ if (!pcm512x->pll_out)
+ return 0; /* no PLL to bypass, force SCK as DAC input */
+
+ if (pllin_rate % osr_rate)
+ return 0; /* futile, quit early */
+
+ /* run DAC no faster than 6144000 Hz */
+ for (dac_rate = rounddown(6144000, osr_rate);
+ dac_rate;
+ dac_rate -= osr_rate) {
+
+ if (pllin_rate / dac_rate > 128)
+ return 0; /* DAC divider would be too big */
+
+ if (!(pllin_rate % dac_rate))
+ return dac_rate;
+
+ dac_rate -= osr_rate;
+ }
+
+ return 0;
+}
+
+static int pcm512x_set_dividers(struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *params)
+{
+ struct device *dev = dai->dev;
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+ unsigned long pllin_rate = 0;
+ unsigned long pll_rate;
+ unsigned long sck_rate;
+ unsigned long mck_rate;
+ unsigned long bclk_rate;
+ unsigned long sample_rate;
+ unsigned long osr_rate;
+ unsigned long dacsrc_rate;
+ int bclk_div;
+ int lrclk_div;
+ int dsp_div;
+ int dac_div;
+ unsigned long dac_rate;
+ int ncp_div;
+ int osr_div;
+ int ret;
+ int idac;
+ int fssp;
+ int gpio;
+
+ lrclk_div = snd_soc_params_to_frame_size(params);
+ if (lrclk_div == 0) {
+ dev_err(dev, "No LRCLK?\n");
+ return -EINVAL;
+ }
+
+ if (!pcm512x->pll_out) {
+ sck_rate = clk_get_rate(pcm512x->sclk);
+ bclk_div = params->rate_den * 64 / lrclk_div;
+ bclk_rate = DIV_ROUND_CLOSEST(sck_rate, bclk_div);
+
+ mck_rate = sck_rate;
+ } else {
+ ret = snd_soc_params_to_bclk(params);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find suitable BCLK: %d\n", ret);
+ return ret;
+ }
+ if (ret == 0) {
+ dev_err(dev, "No BCLK?\n");
+ return -EINVAL;
+ }
+ bclk_rate = ret;
+
+ pllin_rate = clk_get_rate(pcm512x->sclk);
+
+ sck_rate = pcm512x_find_sck(dai, bclk_rate);
+ if (!sck_rate)
+ return -EINVAL;
+ pll_rate = 4 * sck_rate;
+
+ ret = pcm512x_find_pll_coeff(dai, pllin_rate, pll_rate);
+ if (ret != 0)
+ return ret;
+
+ ret = regmap_write(pcm512x->regmap,
+ PCM512x_PLL_COEFF_0, pcm512x->pll_p - 1);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write PLL P: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap,
+ PCM512x_PLL_COEFF_1, pcm512x->pll_j);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write PLL J: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap,
+ PCM512x_PLL_COEFF_2, pcm512x->pll_d >> 8);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write PLL D msb: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap,
+ PCM512x_PLL_COEFF_3, pcm512x->pll_d & 0xff);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write PLL D lsb: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap,
+ PCM512x_PLL_COEFF_4, pcm512x->pll_r - 1);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write PLL R: %d\n", ret);
+ return ret;
+ }
+
+ mck_rate = pcm512x->real_pll;
+
+ bclk_div = DIV_ROUND_CLOSEST(sck_rate, bclk_rate);
+ }
+
+ if (bclk_div > 128) {
+ dev_err(dev, "Failed to find BCLK divider\n");
+ return -EINVAL;
+ }
+
+ /* the actual rate */
+ sample_rate = sck_rate / bclk_div / lrclk_div;
+ osr_rate = 16 * sample_rate;
+
+ /* run DSP no faster than 50 MHz */
+ dsp_div = mck_rate > 50000000 ? 2 : 1;
+
+ dac_rate = pcm512x_pllin_dac_rate(dai, osr_rate, pllin_rate);
+ if (dac_rate) {
+ /* the desired clock rate is "compatible" with the pll input
+ * clock, so use that clock as dac input instead of the pll
+ * output clock since the pll will introduce jitter and thus
+ * noise.
+ */
+ dev_dbg(dev, "using pll input as dac input\n");
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_DAC_REF,
+ PCM512x_SDAC, PCM512x_SDAC_GPIO);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to set gpio as dacref: %d\n", ret);
+ return ret;
+ }
+
+ gpio = PCM512x_GREF_GPIO1 + pcm512x->pll_in - 1;
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_DACIN,
+ PCM512x_GREF, gpio);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to set gpio %d as dacin: %d\n",
+ pcm512x->pll_in, ret);
+ return ret;
+ }
+
+ dacsrc_rate = pllin_rate;
+ } else {
+ /* run DAC no faster than 6144000 Hz */
+ unsigned long dac_mul = 6144000 / osr_rate;
+ unsigned long sck_mul = sck_rate / osr_rate;
+
+ for (; dac_mul; dac_mul--) {
+ if (!(sck_mul % dac_mul))
+ break;
+ }
+ if (!dac_mul) {
+ dev_err(dev, "Failed to find DAC rate\n");
+ return -EINVAL;
+ }
+
+ dac_rate = dac_mul * osr_rate;
+ dev_dbg(dev, "dac_rate %lu sample_rate %lu\n",
+ dac_rate, sample_rate);
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_DAC_REF,
+ PCM512x_SDAC, PCM512x_SDAC_SCK);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to set sck as dacref: %d\n", ret);
+ return ret;
+ }
+
+ dacsrc_rate = sck_rate;
+ }
+
+ dac_div = DIV_ROUND_CLOSEST(dacsrc_rate, dac_rate);
+ if (dac_div > 128) {
+ dev_err(dev, "Failed to find DAC divider\n");
+ return -EINVAL;
+ }
+
+ ncp_div = DIV_ROUND_CLOSEST(dacsrc_rate / dac_div, 1536000);
+ if (ncp_div > 128 || dacsrc_rate / dac_div / ncp_div > 2048000) {
+ /* run NCP no faster than 2048000 Hz, but why? */
+ ncp_div = DIV_ROUND_UP(dacsrc_rate / dac_div, 2048000);
+ if (ncp_div > 128) {
+ dev_err(dev, "Failed to find NCP divider\n");
+ return -EINVAL;
+ }
+ }
+
+ osr_div = DIV_ROUND_CLOSEST(dac_rate, osr_rate);
+ if (osr_div > 128) {
+ dev_err(dev, "Failed to find OSR divider\n");
+ return -EINVAL;
+ }
+
+ idac = mck_rate / (dsp_div * sample_rate);
+
+ ret = regmap_write(pcm512x->regmap, PCM512x_DSP_CLKDIV, dsp_div - 1);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write DSP divider: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap, PCM512x_DAC_CLKDIV, dac_div - 1);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write DAC divider: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap, PCM512x_NCP_CLKDIV, ncp_div - 1);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write NCP divider: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap, PCM512x_OSR_CLKDIV, osr_div - 1);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write OSR divider: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap,
+ PCM512x_MASTER_CLKDIV_1, bclk_div - 1);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write BCLK divider: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap,
+ PCM512x_MASTER_CLKDIV_2, lrclk_div - 1);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write LRCLK divider: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap, PCM512x_IDAC_1, idac >> 8);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write IDAC msb divider: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap, PCM512x_IDAC_2, idac & 0xff);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write IDAC lsb divider: %d\n", ret);
+ return ret;
+ }
+
+ if (sample_rate <= 48000)
+ fssp = PCM512x_FSSP_48KHZ;
+ else if (sample_rate <= 96000)
+ fssp = PCM512x_FSSP_96KHZ;
+ else if (sample_rate <= 192000)
+ fssp = PCM512x_FSSP_192KHZ;
+ else
+ fssp = PCM512x_FSSP_384KHZ;
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_FS_SPEED_MODE,
+ PCM512x_FSSP, fssp);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set fs speed: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(codec->dev, "DSP divider %d\n", dsp_div);
+ dev_dbg(codec->dev, "DAC divider %d\n", dac_div);
+ dev_dbg(codec->dev, "NCP divider %d\n", ncp_div);
+ dev_dbg(codec->dev, "OSR divider %d\n", osr_div);
+ dev_dbg(codec->dev, "BCK divider %d\n", bclk_div);
+ dev_dbg(codec->dev, "LRCK divider %d\n", lrclk_div);
+ dev_dbg(codec->dev, "IDAC %d\n", idac);
+ dev_dbg(codec->dev, "1<<FSSP %d\n", 1 << fssp);
+
+ return 0;
+}
+
+static int pcm512x_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+ int alen;
+ int gpio;
+ int clock_output;
+ int master_mode;
+ int ret;
+
+ dev_dbg(codec->dev, "hw_params %u Hz, %u channels\n",
+ params_rate(params),
+ params_channels(params));
+
+ switch (snd_pcm_format_width(params_format(params))) {
+ case 16:
+ alen = PCM512x_ALEN_16;
+ break;
+ case 20:
+ alen = PCM512x_ALEN_20;
+ break;
+ case 24:
+ alen = PCM512x_ALEN_24;
+ break;
+ case 32:
+ alen = PCM512x_ALEN_32;
+ break;
+ default:
+ dev_err(codec->dev, "Bad frame size: %d\n",
+ snd_pcm_format_width(params_format(params)));
+ return -EINVAL;
+ }
+
+ switch (pcm512x->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ ret = regmap_update_bits(pcm512x->regmap,
+ PCM512x_BCLK_LRCLK_CFG,
+ PCM512x_BCKP
+ | PCM512x_BCKO | PCM512x_LRKO,
+ 0);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to enable slave mode: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_ERROR_DETECT,
+ PCM512x_DCAS, 0);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to enable clock divider autoset: %d\n",
+ ret);
+ return ret;
+ }
+ return 0;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ clock_output = PCM512x_BCKO | PCM512x_LRKO;
+ master_mode = PCM512x_RLRK | PCM512x_RBCK;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ clock_output = PCM512x_BCKO;
+ master_mode = PCM512x_RBCK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_I2S_1,
+ PCM512x_ALEN, alen);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set frame size: %d\n", ret);
+ return ret;
+ }
+
+ if (pcm512x->pll_out) {
+ ret = regmap_write(pcm512x->regmap, PCM512x_FLEX_A, 0x11);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set FLEX_A: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(pcm512x->regmap, PCM512x_FLEX_B, 0xff);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set FLEX_B: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_ERROR_DETECT,
+ PCM512x_IDFS | PCM512x_IDBK
+ | PCM512x_IDSK | PCM512x_IDCH
+ | PCM512x_IDCM | PCM512x_DCAS
+ | PCM512x_IPLK,
+ PCM512x_IDFS | PCM512x_IDBK
+ | PCM512x_IDSK | PCM512x_IDCH
+ | PCM512x_DCAS);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to ignore auto-clock failures: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_ERROR_DETECT,
+ PCM512x_IDFS | PCM512x_IDBK
+ | PCM512x_IDSK | PCM512x_IDCH
+ | PCM512x_IDCM | PCM512x_DCAS
+ | PCM512x_IPLK,
+ PCM512x_IDFS | PCM512x_IDBK
+ | PCM512x_IDSK | PCM512x_IDCH
+ | PCM512x_DCAS | PCM512x_IPLK);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to ignore auto-clock failures: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_PLL_EN,
+ PCM512x_PLLE, 0);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to disable pll: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = pcm512x_set_dividers(dai, params);
+ if (ret != 0)
+ return ret;
+
+ if (pcm512x->pll_out) {
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_PLL_REF,
+ PCM512x_SREF, PCM512x_SREF_GPIO);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to set gpio as pllref: %d\n", ret);
+ return ret;
+ }
+
+ gpio = PCM512x_GREF_GPIO1 + pcm512x->pll_in - 1;
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_PLLIN,
+ PCM512x_GREF, gpio);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to set gpio %d as pllin: %d\n",
+ pcm512x->pll_in, ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_PLL_EN,
+ PCM512x_PLLE, PCM512x_PLLE);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable pll: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_BCLK_LRCLK_CFG,
+ PCM512x_BCKP | PCM512x_BCKO | PCM512x_LRKO,
+ clock_output);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable clock output: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_MASTER_MODE,
+ PCM512x_RLRK | PCM512x_RBCK,
+ master_mode);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable master mode: %d\n", ret);
+ return ret;
+ }
+
+ if (pcm512x->pll_out) {
+ gpio = PCM512x_G1OE << (pcm512x->pll_out - 1);
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_EN,
+ gpio, gpio);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable gpio %d: %d\n",
+ pcm512x->pll_out, ret);
+ return ret;
+ }
+
+ gpio = PCM512x_GPIO_OUTPUT_1 + pcm512x->pll_out - 1;
+ ret = regmap_update_bits(pcm512x->regmap, gpio,
+ PCM512x_GxSL, PCM512x_GxSL_PLLCK);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to output pll on %d: %d\n",
+ ret, pcm512x->pll_out);
+ return ret;
+ }
+
+ gpio = PCM512x_G1OE << (4 - 1);
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_EN,
+ gpio, gpio);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable gpio %d: %d\n",
+ 4, ret);
+ return ret;
+ }
+
+ gpio = PCM512x_GPIO_OUTPUT_1 + 4 - 1;
+ ret = regmap_update_bits(pcm512x->regmap, gpio,
+ PCM512x_GxSL, PCM512x_GxSL_PLLLK);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to output pll lock on %d: %d\n",
+ ret, 4);
+ return ret;
+ }
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_SYNCHRONIZE,
+ PCM512x_RQSY, PCM512x_RQSY_HALT);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to halt clocks: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_SYNCHRONIZE,
+ PCM512x_RQSY, PCM512x_RQSY_RESUME);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to resume clocks: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pcm512x_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+
+ pcm512x->fmt = fmt;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops pcm512x_dai_ops = {
+ .startup = pcm512x_dai_startup,
+ .hw_params = pcm512x_hw_params,
+ .set_fmt = pcm512x_set_fmt,
+};
+
static struct snd_soc_dai_driver pcm512x_dai = {
.name = "pcm512x-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 384000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE
},
+ .ops = &pcm512x_dai_ops,
};
static struct snd_soc_codec_driver pcm512x_codec_driver = {
@@ -448,21 +1321,9 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
}
pcm512x->sclk = devm_clk_get(dev, NULL);
- if (IS_ERR(pcm512x->sclk)) {
- if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_info(dev, "No SCLK, using BCLK: %ld\n",
- PTR_ERR(pcm512x->sclk));
-
- /* Disable reporting of missing SCLK as an error */
- regmap_update_bits(regmap, PCM512x_ERROR_DETECT,
- PCM512x_IDCH, PCM512x_IDCH);
-
- /* Switch PLL input to BCLK */
- regmap_update_bits(regmap, PCM512x_PLL_REF,
- PCM512x_SREF, PCM512x_SREF);
- } else {
+ if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(pcm512x->sclk)) {
ret = clk_prepare_enable(pcm512x->sclk);
if (ret != 0) {
dev_err(dev, "Failed to enable SCLK: %d\n", ret);
@@ -483,6 +1344,43 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
pm_runtime_enable(dev);
pm_runtime_idle(dev);
+#ifdef CONFIG_OF
+ if (dev->of_node) {
+ const struct device_node *np = dev->of_node;
+ u32 val;
+
+ if (of_property_read_u32(np, "pll-in", &val) >= 0) {
+ if (val > 6) {
+ dev_err(dev, "Invalid pll-in\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+ pcm512x->pll_in = val;
+ }
+
+ if (of_property_read_u32(np, "pll-out", &val) >= 0) {
+ if (val > 6) {
+ dev_err(dev, "Invalid pll-out\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+ pcm512x->pll_out = val;
+ }
+
+ if (!pcm512x->pll_in != !pcm512x->pll_out) {
+ dev_err(dev,
+ "Error: both pll-in and pll-out, or none\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+ if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) {
+ dev_err(dev, "Error: pll-in == pll-out\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+ }
+#endif
+
ret = snd_soc_register_codec(dev, &pcm512x_codec_driver,
&pcm512x_dai, 1);
if (ret != 0) {
diff --git a/sound/soc/codecs/pcm512x.h b/sound/soc/codecs/pcm512x.h
index 6ee76aaca09a..b7c310207223 100644
--- a/sound/soc/codecs/pcm512x.h
+++ b/sound/soc/codecs/pcm512x.h
@@ -37,6 +37,10 @@
#define PCM512x_DSP_GPIO_INPUT (PCM512x_PAGE_BASE(0) + 10)
#define PCM512x_MASTER_MODE (PCM512x_PAGE_BASE(0) + 12)
#define PCM512x_PLL_REF (PCM512x_PAGE_BASE(0) + 13)
+#define PCM512x_DAC_REF (PCM512x_PAGE_BASE(0) + 14)
+#define PCM512x_GPIO_DACIN (PCM512x_PAGE_BASE(0) + 16)
+#define PCM512x_GPIO_PLLIN (PCM512x_PAGE_BASE(0) + 18)
+#define PCM512x_SYNCHRONIZE (PCM512x_PAGE_BASE(0) + 19)
#define PCM512x_PLL_COEFF_0 (PCM512x_PAGE_BASE(0) + 20)
#define PCM512x_PLL_COEFF_1 (PCM512x_PAGE_BASE(0) + 21)
#define PCM512x_PLL_COEFF_2 (PCM512x_PAGE_BASE(0) + 22)
@@ -77,6 +81,7 @@
#define PCM512x_RATE_DET_2 (PCM512x_PAGE_BASE(0) + 92)
#define PCM512x_RATE_DET_3 (PCM512x_PAGE_BASE(0) + 93)
#define PCM512x_RATE_DET_4 (PCM512x_PAGE_BASE(0) + 94)
+#define PCM512x_CLOCK_STATUS (PCM512x_PAGE_BASE(0) + 95)
#define PCM512x_ANALOG_MUTE_DET (PCM512x_PAGE_BASE(0) + 108)
#define PCM512x_GPIN (PCM512x_PAGE_BASE(0) + 119)
#define PCM512x_DIGITAL_MUTE_DET (PCM512x_PAGE_BASE(0) + 120)
@@ -91,7 +96,10 @@
#define PCM512x_CRAM_CTRL (PCM512x_PAGE_BASE(44) + 1)
-#define PCM512x_MAX_REGISTER (PCM512x_PAGE_BASE(44) + 1)
+#define PCM512x_FLEX_A (PCM512x_PAGE_BASE(253) + 63)
+#define PCM512x_FLEX_B (PCM512x_PAGE_BASE(253) + 64)
+
+#define PCM512x_MAX_REGISTER (PCM512x_PAGE_BASE(253) + 64)
/* Page 0, Register 1 - reset */
#define PCM512x_RSTR (1 << 0)
@@ -108,8 +116,8 @@
#define PCM512x_RQML_SHIFT 4
/* Page 0, Register 4 - PLL */
-#define PCM512x_PLCE (1 << 0)
-#define PCM512x_RLCE_SHIFT 0
+#define PCM512x_PLLE (1 << 0)
+#define PCM512x_PLLE_SHIFT 0
#define PCM512x_PLCK (1 << 4)
#define PCM512x_PLCK_SHIFT 4
@@ -119,8 +127,66 @@
#define PCM512x_DEMP (1 << 4)
#define PCM512x_DEMP_SHIFT 4
+/* Page 0, Register 8 - GPIO output enable */
+#define PCM512x_G1OE (1 << 0)
+#define PCM512x_G2OE (1 << 1)
+#define PCM512x_G3OE (1 << 2)
+#define PCM512x_G4OE (1 << 3)
+#define PCM512x_G5OE (1 << 4)
+#define PCM512x_G6OE (1 << 5)
+
+/* Page 0, Register 9 - BCK, LRCLK configuration */
+#define PCM512x_LRKO (1 << 0)
+#define PCM512x_LRKO_SHIFT 0
+#define PCM512x_BCKO (1 << 4)
+#define PCM512x_BCKO_SHIFT 4
+#define PCM512x_BCKP (1 << 5)
+#define PCM512x_BCKP_SHIFT 5
+
+/* Page 0, Register 12 - Master mode BCK, LRCLK reset */
+#define PCM512x_RLRK (1 << 0)
+#define PCM512x_RLRK_SHIFT 0
+#define PCM512x_RBCK (1 << 1)
+#define PCM512x_RBCK_SHIFT 1
+
/* Page 0, Register 13 - PLL reference */
-#define PCM512x_SREF (1 << 4)
+#define PCM512x_SREF (7 << 4)
+#define PCM512x_SREF_SHIFT 4
+#define PCM512x_SREF_SCK (0 << 4)
+#define PCM512x_SREF_BCK (1 << 4)
+#define PCM512x_SREF_GPIO (3 << 4)
+
+/* Page 0, Register 14 - DAC reference */
+#define PCM512x_SDAC (7 << 4)
+#define PCM512x_SDAC_SHIFT 4
+#define PCM512x_SDAC_MCK (0 << 4)
+#define PCM512x_SDAC_PLL (1 << 4)
+#define PCM512x_SDAC_SCK (3 << 4)
+#define PCM512x_SDAC_BCK (4 << 4)
+#define PCM512x_SDAC_GPIO (5 << 4)
+
+/* Page 0, Register 16, 18 - GPIO source for DAC, PLL */
+#define PCM512x_GREF (7 << 0)
+#define PCM512x_GREF_SHIFT 0
+#define PCM512x_GREF_GPIO1 (0 << 0)
+#define PCM512x_GREF_GPIO2 (1 << 0)
+#define PCM512x_GREF_GPIO3 (2 << 0)
+#define PCM512x_GREF_GPIO4 (3 << 0)
+#define PCM512x_GREF_GPIO5 (4 << 0)
+#define PCM512x_GREF_GPIO6 (5 << 0)
+
+/* Page 0, Register 19 - synchronize */
+#define PCM512x_RQSY (1 << 0)
+#define PCM512x_RQSY_RESUME (0 << 0)
+#define PCM512x_RQSY_HALT (1 << 0)
+
+/* Page 0, Register 34 - fs speed mode */
+#define PCM512x_FSSP (3 << 0)
+#define PCM512x_FSSP_SHIFT 0
+#define PCM512x_FSSP_48KHZ (0 << 0)
+#define PCM512x_FSSP_96KHZ (1 << 0)
+#define PCM512x_FSSP_192KHZ (2 << 0)
+#define PCM512x_FSSP_384KHZ (3 << 0)
/* Page 0, Register 37 - Error detection */
#define PCM512x_IPLK (1 << 0)
@@ -131,6 +197,20 @@
#define PCM512x_IDBK (1 << 5)
#define PCM512x_IDFS (1 << 6)
+/* Page 0, Register 40 - I2S configuration */
+#define PCM512x_ALEN (3 << 0)
+#define PCM512x_ALEN_SHIFT 0
+#define PCM512x_ALEN_16 (0 << 0)
+#define PCM512x_ALEN_20 (1 << 0)
+#define PCM512x_ALEN_24 (2 << 0)
+#define PCM512x_ALEN_32 (3 << 0)
+#define PCM512x_AFMT (3 << 4)
+#define PCM512x_AFMT_SHIFT 4
+#define PCM512x_AFMT_I2S (0 << 4)
+#define PCM512x_AFMT_DSP (1 << 4)
+#define PCM512x_AFMT_RTJ (2 << 4)
+#define PCM512x_AFMT_LTJ (3 << 4)
+
/* Page 0, Register 42 - DAC routing */
#define PCM512x_AUPR_SHIFT 0
#define PCM512x_AUPL_SHIFT 4
@@ -152,7 +232,26 @@
/* Page 0, Register 65 - Digital mute enables */
#define PCM512x_ACTL_SHIFT 2
#define PCM512x_AMLE_SHIFT 1
-#define PCM512x_AMLR_SHIFT 0
+#define PCM512x_AMRE_SHIFT 0
+
+/* Page 0, Register 80-85, GPIO output selection */
+#define PCM512x_GxSL (31 << 0)
+#define PCM512x_GxSL_SHIFT 0
+#define PCM512x_GxSL_OFF (0 << 0)
+#define PCM512x_GxSL_DSP (1 << 0)
+#define PCM512x_GxSL_REG (2 << 0)
+#define PCM512x_GxSL_AMUTB (3 << 0)
+#define PCM512x_GxSL_AMUTL (4 << 0)
+#define PCM512x_GxSL_AMUTR (5 << 0)
+#define PCM512x_GxSL_CLKI (6 << 0)
+#define PCM512x_GxSL_SDOUT (7 << 0)
+#define PCM512x_GxSL_ANMUL (8 << 0)
+#define PCM512x_GxSL_ANMUR (9 << 0)
+#define PCM512x_GxSL_PLLLK (10 << 0)
+#define PCM512x_GxSL_CPCLK (11 << 0)
+#define PCM512x_GxSL_UV0_7 (14 << 0)
+#define PCM512x_GxSL_UV0_3 (15 << 0)
+#define PCM512x_GxSL_PLLCK (16 << 0)
/* Page 1, Register 2 - analog volume control */
#define PCM512x_RAGN_SHIFT 0
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 2cd4fe463102..f374840a5a7c 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -34,6 +34,7 @@
#include "rt286.h"
#define RT286_VENDOR_ID 0x10ec0286
+#define RT288_VENDOR_ID 0x10ec0288
struct rt286_priv {
struct regmap *regmap;
@@ -305,6 +306,8 @@ static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic)
*hp = false;
*mic = false;
+ if (!rt286->codec)
+ return -EINVAL;
if (rt286->pdata.cbj_en) {
regmap_read(rt286->regmap, RT286_GET_HP_SENSE, &buf);
*hp = buf & 0x80000000;
@@ -403,7 +406,8 @@ EXPORT_SYMBOL_GPL(rt286_mic_detect);
static int is_mclk_mode(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- struct rt286_priv *rt286 = snd_soc_codec_get_drvdata(source->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct rt286_priv *rt286 = snd_soc_codec_get_drvdata(codec);
if (rt286->clk_id == RT286_SCLK_S_MCLK)
return 1;
@@ -417,6 +421,8 @@ static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 1000, 0);
static const struct snd_kcontrol_new rt286_snd_controls[] = {
SOC_DOUBLE_R_TLV("DAC0 Playback Volume", RT286_DACL_GAIN,
RT286_DACR_GAIN, 0, 0x7f, 0, out_vol_tlv),
+ SOC_DOUBLE_R("ADC0 Capture Switch", RT286_ADCL_GAIN,
+ RT286_ADCR_GAIN, 7, 1, 1),
SOC_DOUBLE_R_TLV("ADC0 Capture Volume", RT286_ADCL_GAIN,
RT286_ADCR_GAIN, 0, 0x7f, 0, out_vol_tlv),
SOC_SINGLE_TLV("AMIC Volume", RT286_MIC_GAIN,
@@ -500,7 +506,7 @@ SOC_DAPM_ENUM("SPO source", rt286_spo_enum);
static int rt286_spk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -522,7 +528,7 @@ static int rt286_spk_event(struct snd_soc_dapm_widget *w,
static int rt286_set_dmic1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -538,36 +544,10 @@ static int rt286_set_dmic1_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static int rt286_adc_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = w->codec;
- unsigned int nid;
-
- nid = (w->reg >> 20) & 0xff;
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(codec,
- VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
- 0x7080, 0x7000);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(codec,
- VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
- 0x7080, 0x7080);
- break;
- default:
- return 0;
- }
-
- return 0;
-}
-
static int rt286_vref_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -585,7 +565,7 @@ static int rt286_vref_event(struct snd_soc_dapm_widget *w,
static int rt286_ldo2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -604,7 +584,7 @@ static int rt286_ldo2_event(struct snd_soc_dapm_widget *w,
static int rt286_mic1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -667,12 +647,10 @@ static const struct snd_soc_dapm_widget rt286_dapm_widgets[] = {
SND_SOC_DAPM_ADC("ADC 1", NULL, SND_SOC_NOPM, 0, 0),
/* ADC Mux */
- SND_SOC_DAPM_MUX_E("ADC 0 Mux", RT286_SET_POWER(RT286_ADC_IN1), 0, 1,
- &rt286_adc0_mux, rt286_adc_event, SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_MUX_E("ADC 1 Mux", RT286_SET_POWER(RT286_ADC_IN2), 0, 1,
- &rt286_adc1_mux, rt286_adc_event, SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX("ADC 0 Mux", RT286_SET_POWER(RT286_ADC_IN1), 0, 1,
+ &rt286_adc0_mux),
+ SND_SOC_DAPM_MUX("ADC 1 Mux", RT286_SET_POWER(RT286_ADC_IN2), 0, 1,
+ &rt286_adc1_mux),
/* Audio Interface */
SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
@@ -861,10 +839,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,
RT286_I2S_CTRL1, 0x0018, d_len_code << 3);
dev_dbg(codec->dev, "format val = 0x%x\n", val);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
- else
- snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
+ snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
+ snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
return 0;
}
@@ -1196,6 +1172,7 @@ static const struct regmap_config rt286_regmap = {
static const struct i2c_device_id rt286_i2c_id[] = {
{"rt286", 0},
+ {"rt288", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, rt286_i2c_id);
@@ -1216,6 +1193,17 @@ static struct dmi_system_id force_combo_jack_table[] = {
{ }
};
+static struct dmi_system_id dmi_dell_dino[] = {
+ {
+ .ident = "Dell Dino",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "0144P8")
+ }
+ },
+ { }
+};
+
static int rt286_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -1238,7 +1226,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
regmap_read(rt286->regmap,
RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &ret);
- if (ret != RT286_VENDOR_ID) {
+ if (ret != RT286_VENDOR_ID && ret != RT288_VENDOR_ID) {
dev_err(&i2c->dev,
"Device with ID register %x is not rt286\n", ret);
return -ENODEV;
@@ -1251,7 +1239,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
if (pdata)
rt286->pdata = *pdata;
- if (dmi_check_system(force_combo_jack_table))
+ if (dmi_check_system(force_combo_jack_table) ||
+ dmi_check_system(dmi_dell_dino))
rt286->pdata.cbj_en = true;
regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
@@ -1290,6 +1279,17 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
+ if (dmi_check_system(dmi_dell_dino)) {
+ regmap_update_bits(rt286->regmap,
+ RT286_SET_GPIO_MASK, 0x40, 0x40);
+ regmap_update_bits(rt286->regmap,
+ RT286_SET_GPIO_DIRECTION, 0x40, 0x40);
+ regmap_update_bits(rt286->regmap,
+ RT286_SET_GPIO_DATA, 0x40, 0x40);
+ regmap_update_bits(rt286->regmap,
+ RT286_GPIO_CTRL, 0xc, 0x8);
+ }
+
if (rt286->i2c->irq) {
ret = request_threaded_irq(rt286->i2c->irq, NULL, rt286_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "rt286", rt286);
diff --git a/sound/soc/codecs/rt286.h b/sound/soc/codecs/rt286.h
index b539b7320a79..7130edb152ef 100644
--- a/sound/soc/codecs/rt286.h
+++ b/sound/soc/codecs/rt286.h
@@ -117,6 +117,12 @@
VERB_CMD(AC_VERB_SET_COEF_INDEX, RT286_VENDOR_REGISTERS, 0)
#define RT286_PROC_COEF\
VERB_CMD(AC_VERB_SET_PROC_COEF, RT286_VENDOR_REGISTERS, 0)
+#define RT286_SET_GPIO_MASK\
+ VERB_CMD(AC_VERB_SET_GPIO_MASK, RT286_AUDIO_FUNCTION_GROUP, 0)
+#define RT286_SET_GPIO_DIRECTION\
+ VERB_CMD(AC_VERB_SET_GPIO_DIRECTION, RT286_AUDIO_FUNCTION_GROUP, 0)
+#define RT286_SET_GPIO_DATA\
+ VERB_CMD(AC_VERB_SET_GPIO_DATA, RT286_AUDIO_FUNCTION_GROUP, 0)
/* Index registers */
#define RT286_A_BIAS_CTRL1 0x01
@@ -131,6 +137,7 @@
#define RT286_POWER_CTRL3 0x0f
#define RT286_MIC1_DET_CTRL 0x19
#define RT286_MISC_CTRL1 0x20
+#define RT286_GPIO_CTRL 0x29
#define RT286_IRQ_CTRL 0x33
#define RT286_PLL_CTRL1 0x49
#define RT286_CBJ_CTRL1 0x4f
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 6d7b7ca7d530..c61852742ee3 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -287,70 +287,78 @@ static const struct snd_kcontrol_new rt5631_snd_controls[] = {
static int check_sysclk1_source(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg;
- reg = snd_soc_read(source->codec, RT5631_GLOBAL_CLK_CTRL);
+ reg = snd_soc_read(codec, RT5631_GLOBAL_CLK_CTRL);
return reg & RT5631_SYSCLK_SOUR_SEL_PLL;
}
static int check_dmic_used(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(source->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec);
return rt5631->dmic_used_flag;
}
static int check_dacl_to_outmixl(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg;
- reg = snd_soc_read(source->codec, RT5631_OUTMIXER_L_CTRL);
+ reg = snd_soc_read(codec, RT5631_OUTMIXER_L_CTRL);
return !(reg & RT5631_M_DAC_L_TO_OUTMIXER_L);
}
static int check_dacr_to_outmixr(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg;
- reg = snd_soc_read(source->codec, RT5631_OUTMIXER_R_CTRL);
+ reg = snd_soc_read(codec, RT5631_OUTMIXER_R_CTRL);
return !(reg & RT5631_M_DAC_R_TO_OUTMIXER_R);
}
static int check_dacl_to_spkmixl(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg;
- reg = snd_soc_read(source->codec, RT5631_SPK_MIXER_CTRL);
+ reg = snd_soc_read(codec, RT5631_SPK_MIXER_CTRL);
return !(reg & RT5631_M_DAC_L_TO_SPKMIXER_L);
}
static int check_dacr_to_spkmixr(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg;
- reg = snd_soc_read(source->codec, RT5631_SPK_MIXER_CTRL);
+ reg = snd_soc_read(codec, RT5631_SPK_MIXER_CTRL);
return !(reg & RT5631_M_DAC_R_TO_SPKMIXER_R);
}
static int check_adcl_select(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg;
- reg = snd_soc_read(source->codec, RT5631_ADC_REC_MIXER);
+ reg = snd_soc_read(codec, RT5631_ADC_REC_MIXER);
return !(reg & RT5631_M_MIC1_TO_RECMIXER_L);
}
static int check_adcr_select(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg;
- reg = snd_soc_read(source->codec, RT5631_ADC_REC_MIXER);
+ reg = snd_soc_read(codec, RT5631_ADC_REC_MIXER);
return !(reg & RT5631_M_MIC2_TO_RECMIXER_R);
}
@@ -556,7 +564,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable)
static int hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -590,7 +598,7 @@ static int hp_event(struct snd_soc_dapm_widget *w,
static int set_dmic_params(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec);
switch (rt5631->rx_rate) {
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index c3f2decd643c..178e55d4d481 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -458,7 +458,7 @@ static const struct snd_kcontrol_new rt5640_specific_snd_controls[] = {
static int set_dmic_clk(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
int idx = -EINVAL;
@@ -475,9 +475,10 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int val;
- val = snd_soc_read(source->codec, RT5640_GLB_CLK);
+ val = snd_soc_read(codec, RT5640_GLB_CLK);
val &= RT5640_SCLK_SRC_MASK;
if (val == RT5640_SCLK_SRC_PLL1)
return 1;
@@ -963,7 +964,7 @@ static void rt5640_pmu_depop(struct snd_soc_codec *codec)
static int rt5640_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -987,7 +988,7 @@ static int rt5640_hp_event(struct snd_soc_dapm_widget *w,
static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -1003,7 +1004,7 @@ static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
static int rt5640_hp_post_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -2124,6 +2125,7 @@ MODULE_DEVICE_TABLE(of, rt5640_of_match);
static struct acpi_device_id rt5640_acpi_match[] = {
{ "INT33CA", 0 },
{ "10EC5640", 0 },
+ { "10EC5642", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 27141e2df878..c9a4c5be083b 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -31,6 +31,7 @@
#include "rt5645.h"
#define RT5645_DEVICE_ID 0x6308
+#define RT5650_DEVICE_ID 0x6419
#define RT5645_PR_RANGE_BASE (0xff + 1)
#define RT5645_PR_SPACING 0x100
@@ -59,6 +60,10 @@ static const struct reg_default init_list[] = {
};
#define RT5645_INIT_REG_LEN ARRAY_SIZE(init_list)
+static const struct reg_default rt5650_init_list[] = {
+ {0xf6, 0x0100},
+};
+
static const struct reg_default rt5645_reg[] = {
{ 0x00, 0x0000 },
{ 0x01, 0xc8c8 },
@@ -86,6 +91,7 @@ static const struct reg_default rt5645_reg[] = {
{ 0x2a, 0x5656 },
{ 0x2b, 0x5454 },
{ 0x2c, 0xaaa0 },
+ { 0x2d, 0x0000 },
{ 0x2f, 0x1002 },
{ 0x31, 0x5000 },
{ 0x32, 0x0000 },
@@ -193,6 +199,8 @@ static const struct reg_default rt5645_reg[] = {
{ 0xdb, 0x0003 },
{ 0xdc, 0x0049 },
{ 0xdd, 0x001b },
+ { 0xdf, 0x0008 },
+ { 0xe0, 0x4000 },
{ 0xe6, 0x8000 },
{ 0xe7, 0x0200 },
{ 0xec, 0xb300 },
@@ -242,6 +250,7 @@ static bool rt5645_volatile_register(struct device *dev, unsigned int reg)
case RT5645_IRQ_CTRL3:
case RT5645_INT_IRQ_ST:
case RT5645_IL_CMD:
+ case RT5650_4BTN_IL_CMD1:
case RT5645_VENDOR_ID:
case RT5645_VENDOR_ID1:
case RT5645_VENDOR_ID2:
@@ -287,6 +296,7 @@ static bool rt5645_readable_register(struct device *dev, unsigned int reg)
case RT5645_STO_DAC_MIXER:
case RT5645_MONO_DAC_MIXER:
case RT5645_DIG_MIXER:
+ case RT5650_A_DAC_SOUR:
case RT5645_DIG_INF1_DATA:
case RT5645_PDM_OUT_CTRL:
case RT5645_REC_L1_MIXER:
@@ -378,6 +388,8 @@ static bool rt5645_readable_register(struct device *dev, unsigned int reg)
case RT5645_IL_CMD:
case RT5645_IL_CMD2:
case RT5645_IL_CMD3:
+ case RT5650_4BTN_IL_CMD1:
+ case RT5650_4BTN_IL_CMD2:
case RT5645_DRC1_HL_CTRL1:
case RT5645_DRC2_HL_CTRL1:
case RT5645_ADC_MONO_HP_CTRL1:
@@ -527,7 +539,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
static int set_dmic_clk(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
int idx = -EINVAL;
@@ -544,9 +556,10 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int val;
- val = snd_soc_read(source->codec, RT5645_GLB_CLK);
+ val = snd_soc_read(codec, RT5645_GLB_CLK);
val &= RT5645_SCLK_SRC_MASK;
if (val == RT5645_SCLK_SRC_PLL1)
return 1;
@@ -557,6 +570,7 @@ static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
static int is_using_asrc(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg, shift, val;
switch (source->shift) {
@@ -588,7 +602,7 @@ static int is_using_asrc(struct snd_soc_dapm_widget *source,
return 0;
}
- val = (snd_soc_read(source->codec, reg) >> shift) & 0xf;
+ val = (snd_soc_read(codec, reg) >> shift) & 0xf;
switch (val) {
case 1:
case 2:
@@ -601,6 +615,87 @@ static int is_using_asrc(struct snd_soc_dapm_widget *source,
}
+/**
+ * rt5645_sel_asrc_clk_src - select ASRC clock source for a set of filters
+ * @codec: SoC audio codec device.
+ * @filter_mask: mask of filters.
+ * @clk_src: clock source
+ *
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5645 can
+ * only support standard 32fs or 64fs i2s format, ASRC should be enabled to
+ * support special i2s clock format such as Intel's 100fs(100 * sampling rate).
+ * ASRC function will track i2s clock and generate a corresponding system clock
+ * for codec. This function provides an API to select the clock source for a
+ * set of filters specified by the mask. And the codec driver will turn on ASRC
+ * for these filters if ASRC is selected as their clock source.
+ */
+int rt5645_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src)
+{
+ unsigned int asrc2_mask = 0;
+ unsigned int asrc2_value = 0;
+ unsigned int asrc3_mask = 0;
+ unsigned int asrc3_value = 0;
+
+ switch (clk_src) {
+ case RT5645_CLK_SEL_SYS:
+ case RT5645_CLK_SEL_I2S1_ASRC:
+ case RT5645_CLK_SEL_I2S2_ASRC:
+ case RT5645_CLK_SEL_SYS2:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (filter_mask & RT5645_DA_STEREO_FILTER) {
+ asrc2_mask |= RT5645_DA_STO_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5645_DA_STO_CLK_SEL_MASK)
+ | (clk_src << RT5645_DA_STO_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5645_DA_MONO_L_FILTER) {
+ asrc2_mask |= RT5645_DA_MONOL_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5645_DA_MONOL_CLK_SEL_MASK)
+ | (clk_src << RT5645_DA_MONOL_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5645_DA_MONO_R_FILTER) {
+ asrc2_mask |= RT5645_DA_MONOR_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5645_DA_MONOR_CLK_SEL_MASK)
+ | (clk_src << RT5645_DA_MONOR_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5645_AD_STEREO_FILTER) {
+ asrc2_mask |= RT5645_AD_STO1_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5645_AD_STO1_CLK_SEL_MASK)
+ | (clk_src << RT5645_AD_STO1_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5645_AD_MONO_L_FILTER) {
+ asrc3_mask |= RT5645_AD_MONOL_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5645_AD_MONOL_CLK_SEL_MASK)
+ | (clk_src << RT5645_AD_MONOL_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5645_AD_MONO_R_FILTER) {
+ asrc3_mask |= RT5645_AD_MONOR_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5645_AD_MONOR_CLK_SEL_MASK)
+ | (clk_src << RT5645_AD_MONOR_CLK_SEL_SFT);
+ }
+
+ if (asrc2_mask)
+ snd_soc_update_bits(codec, RT5645_ASRC_2,
+ asrc2_mask, asrc2_value);
+
+ if (asrc3_mask)
+ snd_soc_update_bits(codec, RT5645_ASRC_3,
+ asrc3_mask, asrc3_value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5645_sel_asrc_clk_src);
+
/* Digital Mixer */
static const struct snd_kcontrol_new rt5645_sto1_adc_l_mix[] = {
SOC_DAPM_SINGLE("ADC1 Switch", RT5645_STO1_ADC_MIXER,
@@ -1007,6 +1102,44 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5645_if1_adc_in_mux =
SOC_DAPM_ENUM("IF1 ADC IN source", rt5645_if1_adc_in_enum);
+/* MX-2d [3] [2] */
+static const char * const rt5650_a_dac1_src[] = {
+ "DAC1", "Stereo DAC Mixer"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5650_a_dac1_l_enum, RT5650_A_DAC_SOUR,
+ RT5650_A_DAC1_L_IN_SFT, rt5650_a_dac1_src);
+
+static const struct snd_kcontrol_new rt5650_a_dac1_l_mux =
+ SOC_DAPM_ENUM("A DAC1 L source", rt5650_a_dac1_l_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5650_a_dac1_r_enum, RT5650_A_DAC_SOUR,
+ RT5650_A_DAC1_R_IN_SFT, rt5650_a_dac1_src);
+
+static const struct snd_kcontrol_new rt5650_a_dac1_r_mux =
+ SOC_DAPM_ENUM("A DAC1 R source", rt5650_a_dac1_r_enum);
+
+/* MX-2d [1] [0] */
+static const char * const rt5650_a_dac2_src[] = {
+ "Stereo DAC Mixer", "Mono DAC Mixer"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5650_a_dac2_l_enum, RT5650_A_DAC_SOUR,
+ RT5650_A_DAC2_L_IN_SFT, rt5650_a_dac2_src);
+
+static const struct snd_kcontrol_new rt5650_a_dac2_l_mux =
+ SOC_DAPM_ENUM("A DAC2 L source", rt5650_a_dac2_l_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5650_a_dac2_r_enum, RT5650_A_DAC_SOUR,
+ RT5650_A_DAC2_R_IN_SFT, rt5650_a_dac2_src);
+
+static const struct snd_kcontrol_new rt5650_a_dac2_r_mux =
+ SOC_DAPM_ENUM("A DAC2 R source", rt5650_a_dac2_r_enum);
+
/* MX-2F [13:12] */
static const char * const rt5645_if2_adc_in_src[] = {
"IF_ADC1", "IF_ADC2", "VAD_ADC"
@@ -1144,18 +1277,23 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
static int rt5645_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
hp_amp_power(codec, 1);
/* headphone unmute sequence */
- snd_soc_update_bits(codec, RT5645_DEPOP_M3, RT5645_CP_FQ1_MASK |
- RT5645_CP_FQ2_MASK | RT5645_CP_FQ3_MASK,
- (RT5645_CP_FQ_192_KHZ << RT5645_CP_FQ1_SFT) |
- (RT5645_CP_FQ_12_KHZ << RT5645_CP_FQ2_SFT) |
- (RT5645_CP_FQ_192_KHZ << RT5645_CP_FQ3_SFT));
+ if (rt5645->codec_type == CODEC_TYPE_RT5650) {
+ snd_soc_write(codec, RT5645_DEPOP_M3, 0x0737);
+ } else {
+ snd_soc_update_bits(codec, RT5645_DEPOP_M3,
+ RT5645_CP_FQ1_MASK | RT5645_CP_FQ2_MASK |
+ RT5645_CP_FQ3_MASK,
+ (RT5645_CP_FQ_192_KHZ << RT5645_CP_FQ1_SFT) |
+ (RT5645_CP_FQ_12_KHZ << RT5645_CP_FQ2_SFT) |
+ (RT5645_CP_FQ_192_KHZ << RT5645_CP_FQ3_SFT));
+ }
regmap_write(rt5645->regmap,
RT5645_PR_BASE + RT5645_MAMP_INT_REG2, 0xfc00);
snd_soc_update_bits(codec, RT5645_DEPOP_M1,
@@ -1175,12 +1313,16 @@ static int rt5645_hp_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_PRE_PMD:
/* headphone mute sequence */
- snd_soc_update_bits(codec, RT5645_DEPOP_M3,
- RT5645_CP_FQ1_MASK | RT5645_CP_FQ2_MASK |
- RT5645_CP_FQ3_MASK,
- (RT5645_CP_FQ_96_KHZ << RT5645_CP_FQ1_SFT) |
- (RT5645_CP_FQ_12_KHZ << RT5645_CP_FQ2_SFT) |
- (RT5645_CP_FQ_96_KHZ << RT5645_CP_FQ3_SFT));
+ if (rt5645->codec_type == CODEC_TYPE_RT5650) {
+ snd_soc_write(codec, RT5645_DEPOP_M3, 0x0737);
+ } else {
+ snd_soc_update_bits(codec, RT5645_DEPOP_M3,
+ RT5645_CP_FQ1_MASK | RT5645_CP_FQ2_MASK |
+ RT5645_CP_FQ3_MASK,
+ (RT5645_CP_FQ_96_KHZ << RT5645_CP_FQ1_SFT) |
+ (RT5645_CP_FQ_12_KHZ << RT5645_CP_FQ2_SFT) |
+ (RT5645_CP_FQ_96_KHZ << RT5645_CP_FQ3_SFT));
+ }
regmap_write(rt5645->regmap,
RT5645_PR_BASE + RT5645_MAMP_INT_REG2, 0xfc00);
snd_soc_update_bits(codec, RT5645_DEPOP_M1,
@@ -1205,7 +1347,7 @@ static int rt5645_hp_event(struct snd_soc_dapm_widget *w,
static int rt5645_spk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -1232,7 +1374,7 @@ static int rt5645_spk_event(struct snd_soc_dapm_widget *w,
static int rt5645_lout_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -1262,7 +1404,7 @@ static int rt5645_lout_event(struct snd_soc_dapm_widget *w,
static int rt5645_bst2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -1574,6 +1716,17 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("SPOR"),
};
+static const struct snd_soc_dapm_widget rt5650_specific_dapm_widgets[] = {
+ SND_SOC_DAPM_MUX("A DAC1 L Mux", SND_SOC_NOPM,
+ 0, 0, &rt5650_a_dac1_l_mux),
+ SND_SOC_DAPM_MUX("A DAC1 R Mux", SND_SOC_NOPM,
+ 0, 0, &rt5650_a_dac1_r_mux),
+ SND_SOC_DAPM_MUX("A DAC2 L Mux", SND_SOC_NOPM,
+ 0, 0, &rt5650_a_dac2_l_mux),
+ SND_SOC_DAPM_MUX("A DAC2 R Mux", SND_SOC_NOPM,
+ 0, 0, &rt5650_a_dac2_r_mux),
+};
+
static const struct snd_soc_dapm_route rt5645_dapm_routes[] = {
{ "adc stereo1 filter", NULL, "ADC STO1 ASRC", is_using_asrc },
{ "adc stereo2 filter", NULL, "ADC STO2 ASRC", is_using_asrc },
@@ -1779,13 +1932,9 @@ static const struct snd_soc_dapm_route rt5645_dapm_routes[] = {
{ "DAC MIXR", "DAC R2 Switch", "DAC R2 Volume" },
{ "DAC MIXR", "DAC L2 Switch", "DAC L2 Volume" },
- { "DAC L1", NULL, "Stereo DAC MIXL" },
{ "DAC L1", NULL, "PLL1", is_sys_clk_from_pll },
- { "DAC R1", NULL, "Stereo DAC MIXR" },
{ "DAC R1", NULL, "PLL1", is_sys_clk_from_pll },
- { "DAC L2", NULL, "Mono DAC MIXL" },
{ "DAC L2", NULL, "PLL1", is_sys_clk_from_pll },
- { "DAC R2", NULL, "Mono DAC MIXR" },
{ "DAC R2", NULL, "PLL1", is_sys_clk_from_pll },
{ "SPK MIXL", "BST1 Switch", "BST1" },
@@ -1874,6 +2023,30 @@ static const struct snd_soc_dapm_route rt5645_dapm_routes[] = {
{ "SPOR", NULL, "SPK amp" },
};
+static const struct snd_soc_dapm_route rt5650_specific_dapm_routes[] = {
+ { "A DAC1 L Mux", "DAC1", "DAC1 MIXL"},
+ { "A DAC1 L Mux", "Stereo DAC Mixer", "Stereo DAC MIXL"},
+ { "A DAC1 R Mux", "DAC1", "DAC1 MIXR"},
+ { "A DAC1 R Mux", "Stereo DAC Mixer", "Stereo DAC MIXR"},
+
+ { "A DAC2 L Mux", "Stereo DAC Mixer", "Stereo DAC MIXL"},
+ { "A DAC2 L Mux", "Mono DAC Mixer", "Mono DAC MIXL"},
+ { "A DAC2 R Mux", "Stereo DAC Mixer", "Stereo DAC MIXR"},
+ { "A DAC2 R Mux", "Mono DAC Mixer", "Mono DAC MIXR"},
+
+ { "DAC L1", NULL, "A DAC1 L Mux" },
+ { "DAC R1", NULL, "A DAC1 R Mux" },
+ { "DAC L2", NULL, "A DAC2 L Mux" },
+ { "DAC R2", NULL, "A DAC2 R Mux" },
+};
+
+static const struct snd_soc_dapm_route rt5645_specific_dapm_routes[] = {
+ { "DAC L1", NULL, "Stereo DAC MIXL" },
+ { "DAC R1", NULL, "Stereo DAC MIXR" },
+ { "DAC L2", NULL, "Mono DAC MIXL" },
+ { "DAC R2", NULL, "Mono DAC MIXR" },
+};
+
static int rt5645_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
@@ -2293,6 +2466,22 @@ static int rt5645_probe(struct snd_soc_codec *codec)
rt5645->codec = codec;
+ switch (rt5645->codec_type) {
+ case CODEC_TYPE_RT5645:
+ snd_soc_dapm_add_routes(&codec->dapm,
+ rt5645_specific_dapm_routes,
+ ARRAY_SIZE(rt5645_specific_dapm_routes));
+ break;
+ case CODEC_TYPE_RT5650:
+ snd_soc_dapm_new_controls(&codec->dapm,
+ rt5650_specific_dapm_widgets,
+ ARRAY_SIZE(rt5650_specific_dapm_widgets));
+ snd_soc_dapm_add_routes(&codec->dapm,
+ rt5650_specific_dapm_routes,
+ ARRAY_SIZE(rt5650_specific_dapm_routes));
+ break;
+ }
+
rt5645_set_bias_level(codec, SND_SOC_BIAS_OFF);
snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
@@ -2424,6 +2613,7 @@ static const struct regmap_config rt5645_regmap = {
static const struct i2c_device_id rt5645_i2c_id[] = {
{ "rt5645", 0 },
+ { "rt5650", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id);
@@ -2456,9 +2646,18 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
}
regmap_read(rt5645->regmap, RT5645_VENDOR_ID2, &val);
- if (val != RT5645_DEVICE_ID) {
+
+ switch (val) {
+ case RT5645_DEVICE_ID:
+ rt5645->codec_type = CODEC_TYPE_RT5645;
+ break;
+ case RT5650_DEVICE_ID:
+ rt5645->codec_type = CODEC_TYPE_RT5650;
+ break;
+ default:
dev_err(&i2c->dev,
- "Device with ID register %x is not rt5645\n", val);
+ "Device with ID register %x is not rt5645 or rt5650\n",
+ val);
return -ENODEV;
}
@@ -2469,6 +2668,14 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
if (ret != 0)
dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
+ if (rt5645->codec_type == CODEC_TYPE_RT5650) {
+ ret = regmap_register_patch(rt5645->regmap, rt5650_init_list,
+ ARRAY_SIZE(rt5650_init_list));
+ if (ret != 0)
+ dev_warn(&i2c->dev, "Apply rt5650 patch failed: %d\n",
+ ret);
+ }
+
if (rt5645->pdata.in2_diff)
regmap_update_bits(rt5645->regmap, RT5645_IN2_CTRL,
RT5645_IN_DF2, RT5645_IN_DF2);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index a815e36a2bdb..dbfd98c22f4d 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -47,6 +47,7 @@
#define RT5645_STO_DAC_MIXER 0x2a
#define RT5645_MONO_DAC_MIXER 0x2b
#define RT5645_DIG_MIXER 0x2c
+#define RT5650_A_DAC_SOUR 0x2d
#define RT5645_DIG_INF1_DATA 0x2f
/* Mixer - PDM */
#define RT5645_PDM_OUT_CTRL 0x31
@@ -150,6 +151,8 @@
#define RT5645_IL_CMD 0xdb
#define RT5645_IL_CMD2 0xdc
#define RT5645_IL_CMD3 0xdd
+#define RT5650_4BTN_IL_CMD1 0xdf
+#define RT5650_4BTN_IL_CMD2 0xe0
#define RT5645_DRC1_HL_CTRL1 0xe7
#define RT5645_DRC2_HL_CTRL1 0xe9
#define RT5645_MUTI_DRC_CTRL1 0xea
@@ -472,6 +475,12 @@
#define RT5645_DAC_L2_DAC_R_VOL_MASK (0x1 << 4)
#define RT5645_DAC_L2_DAC_R_VOL_SFT 4
+/* Analog DAC1/2 Input Source Control (0x2d) */
+#define RT5650_A_DAC1_L_IN_SFT 3
+#define RT5650_A_DAC1_R_IN_SFT 2
+#define RT5650_A_DAC2_L_IN_SFT 1
+#define RT5650_A_DAC2_R_IN_SFT 0
+
/* Digital Interface Data Control (0x2f) */
#define RT5645_IF1_ADC2_IN_SEL (0x1 << 15)
#define RT5645_IF1_ADC2_IN_SFT 15
@@ -1111,50 +1120,27 @@
#define RT5645_DMIC_2_M_NOR (0x0 << 8)
#define RT5645_DMIC_2_M_ASYN (0x1 << 8)
+/* ASRC clock source selection (0x84, 0x85) */
+#define RT5645_CLK_SEL_SYS (0x0)
+#define RT5645_CLK_SEL_I2S1_ASRC (0x1)
+#define RT5645_CLK_SEL_I2S2_ASRC (0x2)
+#define RT5645_CLK_SEL_SYS2 (0x5)
+
/* ASRC Control 2 (0x84) */
-#define RT5645_MDA_L_M_MASK (0x1 << 15)
-#define RT5645_MDA_L_M_SFT 15
-#define RT5645_MDA_L_M_NOR (0x0 << 15)
-#define RT5645_MDA_L_M_ASYN (0x1 << 15)
-#define RT5645_MDA_R_M_MASK (0x1 << 14)
-#define RT5645_MDA_R_M_SFT 14
-#define RT5645_MDA_R_M_NOR (0x0 << 14)
-#define RT5645_MDA_R_M_ASYN (0x1 << 14)
-#define RT5645_MAD_L_M_MASK (0x1 << 13)
-#define RT5645_MAD_L_M_SFT 13
-#define RT5645_MAD_L_M_NOR (0x0 << 13)
-#define RT5645_MAD_L_M_ASYN (0x1 << 13)
-#define RT5645_MAD_R_M_MASK (0x1 << 12)
-#define RT5645_MAD_R_M_SFT 12
-#define RT5645_MAD_R_M_NOR (0x0 << 12)
-#define RT5645_MAD_R_M_ASYN (0x1 << 12)
-#define RT5645_ADC_M_MASK (0x1 << 11)
-#define RT5645_ADC_M_SFT 11
-#define RT5645_ADC_M_NOR (0x0 << 11)
-#define RT5645_ADC_M_ASYN (0x1 << 11)
-#define RT5645_STO_DAC_M_MASK (0x1 << 5)
-#define RT5645_STO_DAC_M_SFT 5
-#define RT5645_STO_DAC_M_NOR (0x0 << 5)
-#define RT5645_STO_DAC_M_ASYN (0x1 << 5)
-#define RT5645_I2S1_R_D_MASK (0x1 << 4)
-#define RT5645_I2S1_R_D_SFT 4
-#define RT5645_I2S1_R_D_DIS (0x0 << 4)
-#define RT5645_I2S1_R_D_EN (0x1 << 4)
-#define RT5645_I2S2_R_D_MASK (0x1 << 3)
-#define RT5645_I2S2_R_D_SFT 3
-#define RT5645_I2S2_R_D_DIS (0x0 << 3)
-#define RT5645_I2S2_R_D_EN (0x1 << 3)
-#define RT5645_PRE_SCLK_MASK (0x3)
-#define RT5645_PRE_SCLK_SFT 0
-#define RT5645_PRE_SCLK_512 (0x0)
-#define RT5645_PRE_SCLK_1024 (0x1)
-#define RT5645_PRE_SCLK_2048 (0x2)
+#define RT5645_DA_STO_CLK_SEL_MASK (0xf << 12)
+#define RT5645_DA_STO_CLK_SEL_SFT 12
+#define RT5645_DA_MONOL_CLK_SEL_MASK (0xf << 8)
+#define RT5645_DA_MONOL_CLK_SEL_SFT 8
+#define RT5645_DA_MONOR_CLK_SEL_MASK (0xf << 4)
+#define RT5645_DA_MONOR_CLK_SEL_SFT 4
+#define RT5645_AD_STO1_CLK_SEL_MASK (0xf << 0)
+#define RT5645_AD_STO1_CLK_SEL_SFT 0
/* ASRC Control 3 (0x85) */
-#define RT5645_I2S1_RATE_MASK (0xf << 12)
-#define RT5645_I2S1_RATE_SFT 12
-#define RT5645_I2S2_RATE_MASK (0xf << 8)
-#define RT5645_I2S2_RATE_SFT 8
+#define RT5645_AD_MONOL_CLK_SEL_MASK (0xf << 4)
+#define RT5645_AD_MONOL_CLK_SEL_SFT 4
+#define RT5645_AD_MONOR_CLK_SEL_MASK (0xf << 0)
+#define RT5645_AD_MONOR_CLK_SEL_SFT 0
/* ASRC Control 4 (0x89) */
#define RT5645_I2S1_PD_MASK (0x7 << 12)
@@ -2175,6 +2161,24 @@ enum {
RT5645_DMIC_DATA_GPIO11,
};
+enum {
+ CODEC_TYPE_RT5645,
+ CODEC_TYPE_RT5650,
+};
+
+/* filter mask */
+enum {
+ RT5645_DA_STEREO_FILTER = 0x1,
+ RT5645_DA_MONO_L_FILTER = (0x1 << 1),
+ RT5645_DA_MONO_R_FILTER = (0x1 << 2),
+ RT5645_AD_STEREO_FILTER = (0x1 << 3),
+ RT5645_AD_MONO_L_FILTER = (0x1 << 4),
+ RT5645_AD_MONO_R_FILTER = (0x1 << 5),
+};
+
+int rt5645_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src);
+
struct rt5645_priv {
struct snd_soc_codec *codec;
struct rt5645_platform_data pdata;
@@ -2184,6 +2188,7 @@ struct rt5645_priv {
struct snd_soc_jack *mic_jack;
struct delayed_work jack_detect_work;
+ int codec_type;
int sysclk;
int sysclk_src;
int lrck[RT5645_AIFS];
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index bb0a3ab5416c..9f4c7be6d798 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -376,7 +376,7 @@ static const struct snd_kcontrol_new rt5651_snd_controls[] = {
static int set_dmic_clk(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
int idx = -EINVAL;
@@ -394,9 +394,10 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
static int is_sysclk_from_pll(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int val;
- val = snd_soc_read(source->codec, RT5651_GLB_CLK);
+ val = snd_soc_read(codec, RT5651_GLB_CLK);
val &= RT5651_SCLK_SRC_MASK;
if (val == RT5651_SCLK_SRC_PLL1)
return 1;
@@ -731,7 +732,7 @@ static const struct snd_kcontrol_new rt5651_pdm_r_mux =
static int rt5651_amp_power_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -769,7 +770,7 @@ static int rt5651_amp_power_event(struct snd_soc_dapm_widget *w,
static int rt5651_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -813,7 +814,8 @@ static int rt5651_hp_event(struct snd_soc_dapm_widget *w,
static int rt5651_hp_post_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -833,7 +835,7 @@ static int rt5651_hp_post_event(struct snd_soc_dapm_widget *w,
static int rt5651_bst1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -856,7 +858,7 @@ static int rt5651_bst1_event(struct snd_soc_dapm_widget *w,
static int rt5651_bst2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -879,7 +881,7 @@ static int rt5651_bst2_event(struct snd_soc_dapm_widget *w,
static int rt5651_bst3_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 8a0833de1665..e1a4a45c57e2 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -14,10 +14,12 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/spi/spi.h>
+#include <linux/dmi.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -498,7 +500,7 @@ static const struct snd_kcontrol_new rt5670_snd_controls[] = {
static int set_dmic_clk(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
int idx = -EINVAL;
@@ -515,9 +517,10 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int val;
- val = snd_soc_read(source->codec, RT5670_GLB_CLK);
+ val = snd_soc_read(codec, RT5670_GLB_CLK);
val &= RT5670_SCLK_SRC_MASK;
if (val == RT5670_SCLK_SRC_PLL1)
return 1;
@@ -528,6 +531,7 @@ static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
static int is_using_asrc(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg, shift, val;
switch (source->shift) {
@@ -563,7 +567,7 @@ static int is_using_asrc(struct snd_soc_dapm_widget *source,
return 0;
}
- val = (snd_soc_read(source->codec, reg) >> shift) & 0xf;
+ val = (snd_soc_read(codec, reg) >> shift) & 0xf;
switch (val) {
case 1:
case 2:
@@ -588,6 +592,89 @@ static int can_use_asrc(struct snd_soc_dapm_widget *source,
return 0;
}
+
+/**
+ * rt5670_sel_asrc_clk_src - select ASRC clock source for a set of filters
+ * @codec: SoC audio codec device.
+ * @filter_mask: mask of filters.
+ * @clk_src: clock source
+ *
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5670 can
+ * only support standard 32fs or 64fs i2s format, ASRC should be enabled to
+ * support special i2s clock format such as Intel's 100fs(100 * sampling rate).
+ * ASRC function will track i2s clock and generate a corresponding system clock
+ * for codec. This function provides an API to select the clock source for a
+ * set of filters specified by the mask. And the codec driver will turn on ASRC
+ * for these filters if ASRC is selected as their clock source.
+ */
+int rt5670_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src)
+{
+ unsigned int asrc2_mask = 0, asrc2_value = 0;
+ unsigned int asrc3_mask = 0, asrc3_value = 0;
+
+ if (clk_src > RT5670_CLK_SEL_SYS3)
+ return -EINVAL;
+
+ if (filter_mask & RT5670_DA_STEREO_FILTER) {
+ asrc2_mask |= RT5670_DA_STO_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5670_DA_STO_CLK_SEL_MASK)
+ | (clk_src << RT5670_DA_STO_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5670_DA_MONO_L_FILTER) {
+ asrc2_mask |= RT5670_DA_MONOL_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5670_DA_MONOL_CLK_SEL_MASK)
+ | (clk_src << RT5670_DA_MONOL_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5670_DA_MONO_R_FILTER) {
+ asrc2_mask |= RT5670_DA_MONOR_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5670_DA_MONOR_CLK_SEL_MASK)
+ | (clk_src << RT5670_DA_MONOR_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5670_AD_STEREO_FILTER) {
+ asrc2_mask |= RT5670_AD_STO1_CLK_SEL_MASK;
+ asrc2_value = (asrc2_value & ~RT5670_AD_STO1_CLK_SEL_MASK)
+ | (clk_src << RT5670_AD_STO1_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5670_AD_MONO_L_FILTER) {
+ asrc3_mask |= RT5670_AD_MONOL_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5670_AD_MONOL_CLK_SEL_MASK)
+ | (clk_src << RT5670_AD_MONOL_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5670_AD_MONO_R_FILTER) {
+ asrc3_mask |= RT5670_AD_MONOR_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5670_AD_MONOR_CLK_SEL_MASK)
+ | (clk_src << RT5670_AD_MONOR_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5670_UP_RATE_FILTER) {
+ asrc3_mask |= RT5670_UP_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5670_UP_CLK_SEL_MASK)
+ | (clk_src << RT5670_UP_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5670_DOWN_RATE_FILTER) {
+ asrc3_mask |= RT5670_DOWN_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5670_DOWN_CLK_SEL_MASK)
+ | (clk_src << RT5670_DOWN_CLK_SEL_SFT);
+ }
+
+ if (asrc2_mask)
+ snd_soc_update_bits(codec, RT5670_ASRC_2,
+ asrc2_mask, asrc2_value);
+
+ if (asrc3_mask)
+ snd_soc_update_bits(codec, RT5670_ASRC_3,
+ asrc3_mask, asrc3_value);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5670_sel_asrc_clk_src);
+
/* Digital Mixer */
static const struct snd_kcontrol_new rt5670_sto1_adc_l_mix[] = {
SOC_DAPM_SINGLE("ADC1 Switch", RT5670_STO1_ADC_MIXER,
@@ -1146,7 +1233,7 @@ static const struct snd_kcontrol_new rt5670_vad_adc_mux =
static int rt5670_hp_power_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -1182,7 +1269,7 @@ static int rt5670_hp_power_event(struct snd_soc_dapm_widget *w,
static int rt5670_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -1232,7 +1319,7 @@ static int rt5670_hp_event(struct snd_soc_dapm_widget *w,
static int rt5670_bst1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -1255,7 +1342,7 @@ static int rt5670_bst1_event(struct snd_soc_dapm_widget *w,
static int rt5670_bst2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -2188,6 +2275,13 @@ static int rt5670_set_dai_sysclk(struct snd_soc_dai *dai,
if (freq == rt5670->sysclk && clk_id == rt5670->sysclk_src)
return 0;
+ if (rt5670->pdata.jd_mode) {
+ if (clk_id == RT5670_SCLK_S_PLL1)
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "PLL1");
+ else
+ snd_soc_dapm_disable_pin(&codec->dapm, "PLL1");
+ snd_soc_dapm_sync(&codec->dapm);
+ }
switch (clk_id) {
case RT5670_SCLK_S_MCLK:
reg_val |= RT5670_SCLK_SRC_MCLK;
@@ -2522,6 +2616,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5670 = {
static const struct regmap_config rt5670_regmap = {
.reg_bits = 8,
.val_bits = 16,
+ .use_single_rw = true,
.max_register = RT5670_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5670_ranges) *
RT5670_PR_SPACING),
.volatile_reg = rt5670_volatile_register,
@@ -2549,6 +2644,17 @@ static struct acpi_device_id rt5670_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rt5670_acpi_match);
#endif
+static const struct dmi_system_id dmi_platform_intel_braswell[] = {
+ {
+ .ident = "Intel Braswell",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Braswell CRB"),
+ },
+ },
+ {}
+};
+
static int rt5670_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -2568,6 +2674,12 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
if (pdata)
rt5670->pdata = *pdata;
+ if (dmi_check_system(dmi_platform_intel_braswell)) {
+ rt5670->pdata.dmic_en = true;
+ rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_IN2P;
+ rt5670->pdata.jd_mode = 1;
+ }
+
rt5670->regmap = devm_regmap_init_i2c(i2c, &rt5670_regmap);
if (IS_ERR(rt5670->regmap)) {
ret = PTR_ERR(rt5670->regmap);
@@ -2609,6 +2721,10 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
}
if (rt5670->pdata.jd_mode) {
+ regmap_update_bits(rt5670->regmap, RT5670_GLB_CLK,
+ RT5670_SCLK_SRC_MASK, RT5670_SCLK_SRC_RCCLK);
+ rt5670->sysclk = 0;
+ rt5670->sysclk_src = RT5670_SCLK_S_RCCLK;
regmap_update_bits(rt5670->regmap, RT5670_PWR_ANLG1,
RT5670_PWR_MB, RT5670_PWR_MB);
regmap_update_bits(rt5670->regmap, RT5670_PWR_ANLG2,
@@ -2716,18 +2832,26 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
}
+ pm_runtime_enable(&i2c->dev);
+ pm_request_idle(&i2c->dev);
+
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5670,
rt5670_dai, ARRAY_SIZE(rt5670_dai));
if (ret < 0)
goto err;
+ pm_runtime_put(&i2c->dev);
+
return 0;
err:
+ pm_runtime_disable(&i2c->dev);
+
return ret;
}
static int rt5670_i2c_remove(struct i2c_client *i2c)
{
+ pm_runtime_disable(&i2c->dev);
snd_soc_unregister_codec(&i2c->dev);
return 0;
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index d11b9c207e26..21f8e18c13c4 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -1023,50 +1023,33 @@
#define RT5670_DMIC_2_M_NOR (0x0 << 8)
#define RT5670_DMIC_2_M_ASYN (0x1 << 8)
+/* ASRC clock source selection (0x84, 0x85) */
+#define RT5670_CLK_SEL_SYS (0x0)
+#define RT5670_CLK_SEL_I2S1_ASRC (0x1)
+#define RT5670_CLK_SEL_I2S2_ASRC (0x2)
+#define RT5670_CLK_SEL_I2S3_ASRC (0x3)
+#define RT5670_CLK_SEL_SYS2 (0x5)
+#define RT5670_CLK_SEL_SYS3 (0x6)
+
/* ASRC Control 2 (0x84) */
-#define RT5670_MDA_L_M_MASK (0x1 << 15)
-#define RT5670_MDA_L_M_SFT 15
-#define RT5670_MDA_L_M_NOR (0x0 << 15)
-#define RT5670_MDA_L_M_ASYN (0x1 << 15)
-#define RT5670_MDA_R_M_MASK (0x1 << 14)
-#define RT5670_MDA_R_M_SFT 14
-#define RT5670_MDA_R_M_NOR (0x0 << 14)
-#define RT5670_MDA_R_M_ASYN (0x1 << 14)
-#define RT5670_MAD_L_M_MASK (0x1 << 13)
-#define RT5670_MAD_L_M_SFT 13
-#define RT5670_MAD_L_M_NOR (0x0 << 13)
-#define RT5670_MAD_L_M_ASYN (0x1 << 13)
-#define RT5670_MAD_R_M_MASK (0x1 << 12)
-#define RT5670_MAD_R_M_SFT 12
-#define RT5670_MAD_R_M_NOR (0x0 << 12)
-#define RT5670_MAD_R_M_ASYN (0x1 << 12)
-#define RT5670_ADC_M_MASK (0x1 << 11)
-#define RT5670_ADC_M_SFT 11
-#define RT5670_ADC_M_NOR (0x0 << 11)
-#define RT5670_ADC_M_ASYN (0x1 << 11)
-#define RT5670_STO_DAC_M_MASK (0x1 << 5)
-#define RT5670_STO_DAC_M_SFT 5
-#define RT5670_STO_DAC_M_NOR (0x0 << 5)
-#define RT5670_STO_DAC_M_ASYN (0x1 << 5)
-#define RT5670_I2S1_R_D_MASK (0x1 << 4)
-#define RT5670_I2S1_R_D_SFT 4
-#define RT5670_I2S1_R_D_DIS (0x0 << 4)
-#define RT5670_I2S1_R_D_EN (0x1 << 4)
-#define RT5670_I2S2_R_D_MASK (0x1 << 3)
-#define RT5670_I2S2_R_D_SFT 3
-#define RT5670_I2S2_R_D_DIS (0x0 << 3)
-#define RT5670_I2S2_R_D_EN (0x1 << 3)
-#define RT5670_PRE_SCLK_MASK (0x3)
-#define RT5670_PRE_SCLK_SFT 0
-#define RT5670_PRE_SCLK_512 (0x0)
-#define RT5670_PRE_SCLK_1024 (0x1)
-#define RT5670_PRE_SCLK_2048 (0x2)
+#define RT5670_DA_STO_CLK_SEL_MASK (0xf << 12)
+#define RT5670_DA_STO_CLK_SEL_SFT 12
+#define RT5670_DA_MONOL_CLK_SEL_MASK (0xf << 8)
+#define RT5670_DA_MONOL_CLK_SEL_SFT 8
+#define RT5670_DA_MONOR_CLK_SEL_MASK (0xf << 4)
+#define RT5670_DA_MONOR_CLK_SEL_SFT 4
+#define RT5670_AD_STO1_CLK_SEL_MASK (0xf << 0)
+#define RT5670_AD_STO1_CLK_SEL_SFT 0
/* ASRC Control 3 (0x85) */
-#define RT5670_I2S1_RATE_MASK (0xf << 12)
-#define RT5670_I2S1_RATE_SFT 12
-#define RT5670_I2S2_RATE_MASK (0xf << 8)
-#define RT5670_I2S2_RATE_SFT 8
+#define RT5670_UP_CLK_SEL_MASK (0xf << 12)
+#define RT5670_UP_CLK_SEL_SFT 12
+#define RT5670_DOWN_CLK_SEL_MASK (0xf << 8)
+#define RT5670_DOWN_CLK_SEL_SFT 8
+#define RT5670_AD_MONOL_CLK_SEL_MASK (0xf << 4)
+#define RT5670_AD_MONOL_CLK_SEL_SFT 4
+#define RT5670_AD_MONOR_CLK_SEL_MASK (0xf << 0)
+#define RT5670_AD_MONOR_CLK_SEL_SFT 0
/* ASRC Control 4 (0x89) */
#define RT5670_I2S1_PD_MASK (0x7 << 12)
@@ -1983,6 +1966,21 @@ enum {
RT5670_DMIC_DATA_GPIO5,
};
+/* filter mask */
+enum {
+ RT5670_DA_STEREO_FILTER = 0x1,
+ RT5670_DA_MONO_L_FILTER = (0x1 << 1),
+ RT5670_DA_MONO_R_FILTER = (0x1 << 2),
+ RT5670_AD_STEREO_FILTER = (0x1 << 3),
+ RT5670_AD_MONO_L_FILTER = (0x1 << 4),
+ RT5670_AD_MONO_R_FILTER = (0x1 << 5),
+ RT5670_UP_RATE_FILTER = (0x1 << 6),
+ RT5670_DOWN_RATE_FILTER = (0x1 << 7),
+};
+
+int rt5670_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src);
+
struct rt5670_priv {
struct snd_soc_codec *codec;
struct rt5670_platform_data pdata;
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index c0fbe1881439..5d0bb8748dd1 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -702,6 +702,9 @@ static int rt5677_set_dsp_vad(struct snd_soc_codec *codec, bool on)
static bool activity;
int ret;
+ if (!IS_ENABLED(CONFIG_SND_SOC_RT5677_SPI))
+ return -ENXIO;
+
if (on && !activity) {
activity = true;
@@ -896,7 +899,7 @@ static const struct snd_kcontrol_new rt5677_snd_controls[] = {
static int set_dmic_clk(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
int idx = rl6231_calc_dmic_clk(rt5677->sysclk);
@@ -911,7 +914,8 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(source->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
unsigned int val;
regmap_read(rt5677->regmap, RT5677_GLB_CLK1, &val);
@@ -922,6 +926,101 @@ static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
return 0;
}
+static int is_using_asrc(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg, shift, val;
+
+ if (source->reg == RT5677_ASRC_1) {
+ switch (source->shift) {
+ case 12:
+ reg = RT5677_ASRC_4;
+ shift = 0;
+ break;
+ case 13:
+ reg = RT5677_ASRC_4;
+ shift = 4;
+ break;
+ case 14:
+ reg = RT5677_ASRC_4;
+ shift = 8;
+ break;
+ case 15:
+ reg = RT5677_ASRC_4;
+ shift = 12;
+ break;
+ default:
+ return 0;
+ }
+ } else {
+ switch (source->shift) {
+ case 0:
+ reg = RT5677_ASRC_6;
+ shift = 8;
+ break;
+ case 1:
+ reg = RT5677_ASRC_6;
+ shift = 12;
+ break;
+ case 2:
+ reg = RT5677_ASRC_5;
+ shift = 0;
+ break;
+ case 3:
+ reg = RT5677_ASRC_5;
+ shift = 4;
+ break;
+ case 4:
+ reg = RT5677_ASRC_5;
+ shift = 8;
+ break;
+ case 5:
+ reg = RT5677_ASRC_5;
+ shift = 12;
+ break;
+ case 12:
+ reg = RT5677_ASRC_3;
+ shift = 0;
+ break;
+ case 13:
+ reg = RT5677_ASRC_3;
+ shift = 4;
+ break;
+ case 14:
+ reg = RT5677_ASRC_3;
+ shift = 12;
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ regmap_read(rt5677->regmap, reg, &val);
+ val = (val >> shift) & 0xf;
+
+ switch (val) {
+ case 1 ... 6:
+ return 1;
+ default:
+ return 0;
+ }
+
+}
+
+static int can_use_asrc(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+
+ if (rt5677->sysclk > rt5677->lrck[RT5677_AIF1] * 384)
+ return 1;
+
+ return 0;
+}
+
/* Digital Mixer */
static const struct snd_kcontrol_new rt5677_sto1_adc_l_mix[] = {
SOC_DAPM_SINGLE("ADC1 Switch", RT5677_STO1_ADC_MIXER,
@@ -2031,7 +2130,7 @@ static const struct snd_kcontrol_new rt5677_if2_dac7_tdm_sel_mux =
static int rt5677_bst1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -2055,7 +2154,7 @@ static int rt5677_bst1_event(struct snd_soc_dapm_widget *w,
static int rt5677_bst2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -2079,14 +2178,18 @@ static int rt5677_bst2_event(struct snd_soc_dapm_widget *w,
static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
switch (event) {
- case SND_SOC_DAPM_POST_PMU:
+ case SND_SOC_DAPM_PRE_PMU:
regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2);
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0);
break;
+
default:
return 0;
}
@@ -2097,14 +2200,18 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,
static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
switch (event) {
- case SND_SOC_DAPM_POST_PMU:
+ case SND_SOC_DAPM_PRE_PMU:
regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2);
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0);
break;
+
default:
return 0;
}
@@ -2115,7 +2222,7 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,
static int rt5677_set_micbias1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -2142,7 +2249,7 @@ static int rt5677_set_micbias1_event(struct snd_soc_dapm_widget *w,
static int rt5677_if1_adc_tdm_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
unsigned int value;
@@ -2165,7 +2272,7 @@ static int rt5677_if1_adc_tdm_event(struct snd_soc_dapm_widget *w,
static int rt5677_if2_adc_tdm_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
unsigned int value;
@@ -2188,7 +2295,7 @@ static int rt5677_if2_adc_tdm_event(struct snd_soc_dapm_widget *w,
static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -2212,9 +2319,50 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
- 0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU),
+ 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT,
- 0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU),
+ 0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU),
+
+ /* ASRC */
+ SND_SOC_DAPM_SUPPLY_S("I2S1 ASRC", 1, RT5677_ASRC_1, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S2 ASRC", 1, RT5677_ASRC_1, 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S3 ASRC", 1, RT5677_ASRC_1, 2, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S4 ASRC", 1, RT5677_ASRC_1, 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC STO ASRC", 1, RT5677_ASRC_2, 14, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC MONO2 L ASRC", 1, RT5677_ASRC_2, 13, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC MONO2 R ASRC", 1, RT5677_ASRC_2, 12, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC MONO3 L ASRC", 1, RT5677_ASRC_1, 15, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC MONO3 R ASRC", 1, RT5677_ASRC_1, 14, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC MONO4 L ASRC", 1, RT5677_ASRC_1, 13, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC MONO4 R ASRC", 1, RT5677_ASRC_1, 12, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO1 ASRC", 1, RT5677_ASRC_2, 11, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO2 ASRC", 1, RT5677_ASRC_2, 10, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO3 ASRC", 1, RT5677_ASRC_2, 9, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO4 ASRC", 1, RT5677_ASRC_2, 8, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC MONO L ASRC", 1, RT5677_ASRC_2, 7, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC MONO R ASRC", 1, RT5677_ASRC_2, 6, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC STO1 ASRC", 1, RT5677_ASRC_2, 5, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC STO2 ASRC", 1, RT5677_ASRC_2, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC STO3 ASRC", 1, RT5677_ASRC_2, 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC STO4 ASRC", 1, RT5677_ASRC_2, 2, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC MONO L ASRC", 1, RT5677_ASRC_2, 1, 0, NULL,
+ 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC MONO R ASRC", 1, RT5677_ASRC_2, 0, 0, NULL,
+ 0),
/* Input Side */
/* micbias */
@@ -2646,10 +2794,18 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
/* DAC Mixer */
SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2,
RT5677_PWR_DAC_S1F_BIT, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("dac mono left filter", RT5677_PWR_DIG2,
+ SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2,
RT5677_PWR_DAC_M2F_L_BIT, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("dac mono right filter", RT5677_PWR_DIG2,
+ SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2,
RT5677_PWR_DAC_M2F_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2,
+ RT5677_PWR_DAC_M3F_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2,
+ RT5677_PWR_DAC_M3F_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2,
+ RT5677_PWR_DAC_M4F_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2,
+ RT5677_PWR_DAC_M4F_R_BIT, 0, NULL, 0),
SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0,
rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)),
@@ -2722,6 +2878,31 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
};
static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
+ { "Stereo1 DMIC Mux", NULL, "DMIC STO1 ASRC", can_use_asrc },
+ { "Stereo2 DMIC Mux", NULL, "DMIC STO2 ASRC", can_use_asrc },
+ { "Stereo3 DMIC Mux", NULL, "DMIC STO3 ASRC", can_use_asrc },
+ { "Stereo4 DMIC Mux", NULL, "DMIC STO4 ASRC", can_use_asrc },
+ { "Mono DMIC L Mux", NULL, "DMIC MONO L ASRC", can_use_asrc },
+ { "Mono DMIC R Mux", NULL, "DMIC MONO R ASRC", can_use_asrc },
+ { "I2S1", NULL, "I2S1 ASRC", can_use_asrc},
+ { "I2S2", NULL, "I2S2 ASRC", can_use_asrc},
+ { "I2S3", NULL, "I2S3 ASRC", can_use_asrc},
+ { "I2S4", NULL, "I2S4 ASRC", can_use_asrc},
+
+ { "dac stereo1 filter", NULL, "DAC STO ASRC", is_using_asrc },
+ { "dac mono2 left filter", NULL, "DAC MONO2 L ASRC", is_using_asrc },
+ { "dac mono2 right filter", NULL, "DAC MONO2 R ASRC", is_using_asrc },
+ { "dac mono3 left filter", NULL, "DAC MONO3 L ASRC", is_using_asrc },
+ { "dac mono3 right filter", NULL, "DAC MONO3 R ASRC", is_using_asrc },
+ { "dac mono4 left filter", NULL, "DAC MONO4 L ASRC", is_using_asrc },
+ { "dac mono4 right filter", NULL, "DAC MONO4 R ASRC", is_using_asrc },
+ { "adc stereo1 filter", NULL, "ADC STO1 ASRC", is_using_asrc },
+ { "adc stereo2 filter", NULL, "ADC STO2 ASRC", is_using_asrc },
+ { "adc stereo3 filter", NULL, "ADC STO3 ASRC", is_using_asrc },
+ { "adc stereo4 filter", NULL, "ADC STO4 ASRC", is_using_asrc },
+ { "adc mono left filter", NULL, "ADC MONO L ASRC", is_using_asrc },
+ { "adc mono right filter", NULL, "ADC MONO R ASRC", is_using_asrc },
+
{ "DMIC1", NULL, "DMIC L1" },
{ "DMIC1", NULL, "DMIC R1" },
{ "DMIC2", NULL, "DMIC L2" },
@@ -2852,8 +3033,6 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "Stereo1 ADC MIXL", NULL, "Sto1 ADC MIXL" },
{ "Stereo1 ADC MIXL", NULL, "adc stereo1 filter" },
- { "adc stereo1 filter", NULL, "PLL1", is_sys_clk_from_pll },
-
{ "Stereo1 ADC MIXR", NULL, "Sto1 ADC MIXR" },
{ "Stereo1 ADC MIXR", NULL, "adc stereo1 filter" },
{ "adc stereo1 filter", NULL, "PLL1", is_sys_clk_from_pll },
@@ -2874,8 +3053,6 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "Stereo2 ADC MIXL", NULL, "Stereo2 ADC LR Mux" },
{ "Stereo2 ADC MIXL", NULL, "adc stereo2 filter" },
- { "adc stereo2 filter", NULL, "PLL1", is_sys_clk_from_pll },
-
{ "Stereo2 ADC MIXR", NULL, "Sto2 ADC MIXR" },
{ "Stereo2 ADC MIXR", NULL, "adc stereo2 filter" },
{ "adc stereo2 filter", NULL, "PLL1", is_sys_clk_from_pll },
@@ -2890,8 +3067,6 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "Stereo3 ADC MIXL", NULL, "Sto3 ADC MIXL" },
{ "Stereo3 ADC MIXL", NULL, "adc stereo3 filter" },
- { "adc stereo3 filter", NULL, "PLL1", is_sys_clk_from_pll },
-
{ "Stereo3 ADC MIXR", NULL, "Sto3 ADC MIXR" },
{ "Stereo3 ADC MIXR", NULL, "adc stereo3 filter" },
{ "adc stereo3 filter", NULL, "PLL1", is_sys_clk_from_pll },
@@ -2906,8 +3081,6 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "Stereo4 ADC MIXL", NULL, "Sto4 ADC MIXL" },
{ "Stereo4 ADC MIXL", NULL, "adc stereo4 filter" },
- { "adc stereo4 filter", NULL, "PLL1", is_sys_clk_from_pll },
-
{ "Stereo4 ADC MIXR", NULL, "Sto4 ADC MIXR" },
{ "Stereo4 ADC MIXR", NULL, "adc stereo4 filter" },
{ "adc stereo4 filter", NULL, "PLL1", is_sys_clk_from_pll },
@@ -3456,10 +3629,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "DAC1 MIXL", "Stereo ADC Switch", "ADDA1 Mux" },
{ "DAC1 MIXL", "DAC1 Switch", "DAC1 Mux" },
- { "DAC1 MIXL", NULL, "dac stereo1 filter" },
{ "DAC1 MIXR", "Stereo ADC Switch", "ADDA1 Mux" },
{ "DAC1 MIXR", "DAC1 Switch", "DAC1 Mux" },
- { "DAC1 MIXR", NULL, "dac stereo1 filter" },
{ "DAC1 FS", NULL, "DAC1 MIXL" },
{ "DAC1 FS", NULL, "DAC1 MIXR" },
@@ -3526,35 +3697,46 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "Stereo DAC MIXR", "DAC2 R Switch", "DAC2 R Mux" },
{ "Stereo DAC MIXR", "DAC1 L Switch", "DAC1 MIXL" },
{ "Stereo DAC MIXR", NULL, "dac stereo1 filter" },
+ { "dac stereo1 filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "Mono DAC MIXL", "ST L Switch", "Sidetone Mux" },
{ "Mono DAC MIXL", "DAC1 L Switch", "DAC1 MIXL" },
{ "Mono DAC MIXL", "DAC2 L Switch", "DAC2 L Mux" },
{ "Mono DAC MIXL", "DAC2 R Switch", "DAC2 R Mux" },
- { "Mono DAC MIXL", NULL, "dac mono left filter" },
+ { "Mono DAC MIXL", NULL, "dac mono2 left filter" },
+ { "dac mono2 left filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "Mono DAC MIXR", "ST R Switch", "Sidetone Mux" },
{ "Mono DAC MIXR", "DAC1 R Switch", "DAC1 MIXR" },
{ "Mono DAC MIXR", "DAC2 R Switch", "DAC2 R Mux" },
{ "Mono DAC MIXR", "DAC2 L Switch", "DAC2 L Mux" },
- { "Mono DAC MIXR", NULL, "dac mono right filter" },
+ { "Mono DAC MIXR", NULL, "dac mono2 right filter" },
+ { "dac mono2 right filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "DD1 MIXL", "Sto DAC Mix L Switch", "Stereo DAC MIXL" },
{ "DD1 MIXL", "Mono DAC Mix L Switch", "Mono DAC MIXL" },
{ "DD1 MIXL", "DAC3 L Switch", "DAC3 L Mux" },
{ "DD1 MIXL", "DAC3 R Switch", "DAC3 R Mux" },
+ { "DD1 MIXL", NULL, "dac mono3 left filter" },
+ { "dac mono3 left filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "DD1 MIXR", "Sto DAC Mix R Switch", "Stereo DAC MIXR" },
{ "DD1 MIXR", "Mono DAC Mix R Switch", "Mono DAC MIXR" },
{ "DD1 MIXR", "DAC3 L Switch", "DAC3 L Mux" },
{ "DD1 MIXR", "DAC3 R Switch", "DAC3 R Mux" },
+ { "DD1 MIXR", NULL, "dac mono3 right filter" },
+ { "dac mono3 right filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "DD2 MIXL", "Sto DAC Mix L Switch", "Stereo DAC MIXL" },
{ "DD2 MIXL", "Mono DAC Mix L Switch", "Mono DAC MIXL" },
{ "DD2 MIXL", "DAC4 L Switch", "DAC4 L Mux" },
{ "DD2 MIXL", "DAC4 R Switch", "DAC4 R Mux" },
+ { "DD2 MIXL", NULL, "dac mono4 left filter" },
+ { "dac mono4 left filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "DD2 MIXR", "Sto DAC Mix R Switch", "Stereo DAC MIXR" },
{ "DD2 MIXR", "Mono DAC Mix R Switch", "Mono DAC MIXR" },
{ "DD2 MIXR", "DAC4 L Switch", "DAC4 L Mux" },
{ "DD2 MIXR", "DAC4 R Switch", "DAC4 R Mux" },
+ { "DD2 MIXR", NULL, "dac mono4 right filter" },
+ { "dac mono4 right filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "Stereo DAC MIX", NULL, "Stereo DAC MIXL" },
{ "Stereo DAC MIX", NULL, "Stereo DAC MIXR" },
@@ -3576,11 +3758,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "DAC3 SRC Mux", "DD MIX2L", "DD2 MIXL" },
{ "DAC 1", NULL, "DAC12 SRC Mux" },
- { "DAC 1", NULL, "PLL1", is_sys_clk_from_pll },
{ "DAC 2", NULL, "DAC12 SRC Mux" },
- { "DAC 2", NULL, "PLL1", is_sys_clk_from_pll },
{ "DAC 3", NULL, "DAC3 SRC Mux" },
- { "DAC 3", NULL, "PLL1", is_sys_clk_from_pll },
{ "PDM1 L Mux", "STO1 DAC MIX", "Stereo DAC MIXL" },
{ "PDM1 L Mux", "MONO DAC MIX", "Mono DAC MIXL" },
@@ -3927,7 +4106,8 @@ static int rt5677_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots, int slot_width)
{
struct snd_soc_codec *codec = dai->codec;
- unsigned int val = 0;
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val = 0, slot_width_25 = 0;
if (rx_mask || tx_mask)
val |= (1 << 12);
@@ -3951,6 +4131,8 @@ static int rt5677_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
case 20:
val |= (1 << 8);
break;
+ case 25:
+ slot_width_25 = 0x8080;
case 24:
val |= (2 << 8);
break;
@@ -3964,10 +4146,16 @@ static int rt5677_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
switch (dai->id) {
case RT5677_AIF1:
- snd_soc_update_bits(codec, RT5677_TDM1_CTRL1, 0x1f00, val);
+ regmap_update_bits(rt5677->regmap, RT5677_TDM1_CTRL1, 0x1f00,
+ val);
+ regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC, 0x8000,
+ slot_width_25);
break;
case RT5677_AIF2:
- snd_soc_update_bits(codec, RT5677_TDM2_CTRL1, 0x1f00, val);
+ regmap_update_bits(rt5677->regmap, RT5677_TDM2_CTRL1, 0x1f00,
+ val);
+ regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC, 0x80,
+ slot_width_25);
break;
default:
break;
@@ -4752,6 +4940,11 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
RT5677_GPIO5_DIR_OUT);
}
+ if (rt5677->pdata.micbias1_vdd_3v3)
+ regmap_update_bits(rt5677->regmap, RT5677_MICBIAS,
+ RT5677_MICBIAS1_CTRL_VDD_MASK,
+ RT5677_MICBIAS1_CTRL_VDD_3_3V);
+
rt5677_init_gpio(i2c);
rt5677_init_irq(i2c);
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 29cf7ce610f4..e182e6569bbd 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -155,18 +155,19 @@ struct sgtl5000_priv {
static int mic_bias_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
/* change mic bias resistor */
- snd_soc_update_bits(w->codec, SGTL5000_CHIP_MIC_CTRL,
+ snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
SGTL5000_BIAS_R_MASK,
sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
break;
case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(w->codec, SGTL5000_CHIP_MIC_CTRL,
+ snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
SGTL5000_BIAS_R_MASK, 0);
break;
}
@@ -181,11 +182,12 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
static int power_vag_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
break;
@@ -195,9 +197,9 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
* operational to prevent inadvertently starving the
* other one of them.
*/
- if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
+ if ((snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER) &
mask) != mask) {
- snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
SGTL5000_VAG_POWERUP, 0);
msleep(400);
}
@@ -483,21 +485,21 @@ static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
/* setting i2s data format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
- i2sctl |= SGTL5000_I2S_MODE_PCM;
+ i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
break;
case SND_SOC_DAIFMT_DSP_B:
- i2sctl |= SGTL5000_I2S_MODE_PCM;
+ i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
i2sctl |= SGTL5000_I2S_LRALIGN;
break;
case SND_SOC_DAIFMT_I2S:
- i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
break;
case SND_SOC_DAIFMT_RIGHT_J:
- i2sctl |= SGTL5000_I2S_MODE_RJ;
+ i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT;
i2sctl |= SGTL5000_I2S_LRPOL;
break;
case SND_SOC_DAIFMT_LEFT_J:
- i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
i2sctl |= SGTL5000_I2S_LRALIGN;
break;
default:
@@ -1462,6 +1464,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
if (ret)
return ret;
+ /* Need 8 clocks before I2C accesses */
+ udelay(1);
+
/* read chip information */
ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
if (ret)
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index 1f451a1946eb..47b257e41809 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -233,16 +233,18 @@ static int sn95031_set_vaud_bias(struct snd_soc_codec *codec,
static int sn95031_vhs_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
if (SND_SOC_DAPM_EVENT_ON(event)) {
pr_debug("VHS SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
/* power up the rail */
- snd_soc_write(w->codec, SN95031_VHSP, 0x3D);
- snd_soc_write(w->codec, SN95031_VHSN, 0x3F);
+ snd_soc_write(codec, SN95031_VHSP, 0x3D);
+ snd_soc_write(codec, SN95031_VHSN, 0x3F);
msleep(1);
} else if (SND_SOC_DAPM_EVENT_OFF(event)) {
pr_debug("VHS SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
- snd_soc_write(w->codec, SN95031_VHSP, 0xC4);
- snd_soc_write(w->codec, SN95031_VHSN, 0x04);
+ snd_soc_write(codec, SN95031_VHSP, 0xC4);
+ snd_soc_write(codec, SN95031_VHSN, 0x04);
}
return 0;
}
@@ -250,14 +252,16 @@ static int sn95031_vhs_event(struct snd_soc_dapm_widget *w,
static int sn95031_vihf_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
if (SND_SOC_DAPM_EVENT_ON(event)) {
pr_debug("VIHF SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
/* power up the rail */
- snd_soc_write(w->codec, SN95031_VIHF, 0x27);
+ snd_soc_write(codec, SN95031_VIHF, 0x27);
msleep(1);
} else if (SND_SOC_DAPM_EVENT_OFF(event)) {
pr_debug("VIHF SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
- snd_soc_write(w->codec, SN95031_VIHF, 0x24);
+ snd_soc_write(codec, SN95031_VIHF, 0x24);
}
return 0;
}
@@ -265,6 +269,7 @@ static int sn95031_vihf_event(struct snd_soc_dapm_widget *w,
static int sn95031_dmic12_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
if (SND_SOC_DAPM_EVENT_ON(event)) {
@@ -273,15 +278,16 @@ static int sn95031_dmic12_event(struct snd_soc_dapm_widget *w,
data_dir = BIT(7);
}
/* program DMIC LDO, clock and set clock */
- snd_soc_update_bits(w->codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
- snd_soc_update_bits(w->codec, SN95031_DMICBUF0123, BIT(0), clk_dir);
- snd_soc_update_bits(w->codec, SN95031_DMICBUF0123, BIT(7), data_dir);
+ snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
+ snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(0), clk_dir);
+ snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(7), data_dir);
return 0;
}
static int sn95031_dmic34_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
if (SND_SOC_DAPM_EVENT_ON(event)) {
@@ -290,22 +296,23 @@ static int sn95031_dmic34_event(struct snd_soc_dapm_widget *w,
data_dir = BIT(1);
}
/* program DMIC LDO, clock and set clock */
- snd_soc_update_bits(w->codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
- snd_soc_update_bits(w->codec, SN95031_DMICBUF0123, BIT(2), clk_dir);
- snd_soc_update_bits(w->codec, SN95031_DMICBUF45, BIT(1), data_dir);
+ snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
+ snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(2), clk_dir);
+ snd_soc_update_bits(codec, SN95031_DMICBUF45, BIT(1), data_dir);
return 0;
}
static int sn95031_dmic56_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
unsigned int ldo = 0;
if (SND_SOC_DAPM_EVENT_ON(event))
ldo = BIT(7)|BIT(6);
/* program DMIC LDO */
- snd_soc_update_bits(w->codec, SN95031_MICBIAS, BIT(7)|BIT(6), ldo);
+ snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(7)|BIT(6), ldo);
return 0;
}
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 7e18200dd6a9..3a1343fa109b 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -24,8 +24,11 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <sound/core.h>
@@ -102,6 +105,35 @@ static const struct reg_default sta32x_regs[] = {
{ 0x2c, 0x0c },
};
+static const struct regmap_range sta32x_write_regs_range[] = {
+ regmap_reg_range(STA32X_CONFA, STA32X_AUTO2),
+ regmap_reg_range(STA32X_C1CFG, STA32X_FDRC2),
+};
+
+static const struct regmap_range sta32x_read_regs_range[] = {
+ regmap_reg_range(STA32X_CONFA, STA32X_AUTO2),
+ regmap_reg_range(STA32X_C1CFG, STA32X_FDRC2),
+};
+
+static const struct regmap_range sta32x_volatile_regs_range[] = {
+ regmap_reg_range(STA32X_CFADDR2, STA32X_CFUD),
+};
+
+static const struct regmap_access_table sta32x_write_regs = {
+ .yes_ranges = sta32x_write_regs_range,
+ .n_yes_ranges = ARRAY_SIZE(sta32x_write_regs_range),
+};
+
+static const struct regmap_access_table sta32x_read_regs = {
+ .yes_ranges = sta32x_read_regs_range,
+ .n_yes_ranges = ARRAY_SIZE(sta32x_read_regs_range),
+};
+
+static const struct regmap_access_table sta32x_volatile_regs = {
+ .yes_ranges = sta32x_volatile_regs_range,
+ .n_yes_ranges = ARRAY_SIZE(sta32x_volatile_regs_range),
+};
+
/* regulator power supply names */
static const char *sta32x_supply_names[] = {
"Vdda", /* analog supply, 3.3VV */
@@ -122,6 +154,8 @@ struct sta32x_priv {
u32 coef_shadow[STA32X_COEF_COUNT];
struct delayed_work watchdog_work;
int shutdown;
+ struct gpio_desc *gpiod_nreset;
+ struct mutex coeff_lock;
};
static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
@@ -155,37 +189,32 @@ static const char *sta32x_limiter_release_rate[] = {
"0.5116", "0.1370", "0.0744", "0.0499", "0.0360", "0.0299",
"0.0264", "0.0208", "0.0198", "0.0172", "0.0147", "0.0137",
"0.0134", "0.0117", "0.0110", "0.0104" };
-
-static const unsigned int sta32x_limiter_ac_attack_tlv[] = {
- TLV_DB_RANGE_HEAD(2),
+static DECLARE_TLV_DB_RANGE(sta32x_limiter_ac_attack_tlv,
0, 7, TLV_DB_SCALE_ITEM(-1200, 200, 0),
8, 16, TLV_DB_SCALE_ITEM(300, 100, 0),
-};
+);
-static const unsigned int sta32x_limiter_ac_release_tlv[] = {
- TLV_DB_RANGE_HEAD(5),
+static DECLARE_TLV_DB_RANGE(sta32x_limiter_ac_release_tlv,
0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 0),
1, 1, TLV_DB_SCALE_ITEM(-2900, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(-2000, 0, 0),
3, 8, TLV_DB_SCALE_ITEM(-1400, 200, 0),
8, 16, TLV_DB_SCALE_ITEM(-700, 100, 0),
-};
+);
-static const unsigned int sta32x_limiter_drc_attack_tlv[] = {
- TLV_DB_RANGE_HEAD(3),
+static DECLARE_TLV_DB_RANGE(sta32x_limiter_drc_attack_tlv,
0, 7, TLV_DB_SCALE_ITEM(-3100, 200, 0),
8, 13, TLV_DB_SCALE_ITEM(-1600, 100, 0),
14, 16, TLV_DB_SCALE_ITEM(-1000, 300, 0),
-};
+);
-static const unsigned int sta32x_limiter_drc_release_tlv[] = {
- TLV_DB_RANGE_HEAD(5),
+static DECLARE_TLV_DB_RANGE(sta32x_limiter_drc_release_tlv,
0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 0),
1, 2, TLV_DB_SCALE_ITEM(-3800, 200, 0),
3, 4, TLV_DB_SCALE_ITEM(-3300, 200, 0),
5, 12, TLV_DB_SCALE_ITEM(-3000, 200, 0),
13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0),
-};
+);
static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum,
STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
@@ -244,29 +273,42 @@ static int sta32x_coefficient_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
int numcoef = kcontrol->private_value >> 16;
int index = kcontrol->private_value & 0xffff;
- unsigned int cfud;
- int i;
+ unsigned int cfud, val;
+ int i, ret = 0;
+
+ mutex_lock(&sta32x->coeff_lock);
/* preserve reserved bits in STA32X_CFUD */
- cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
- /* chip documentation does not say if the bits are self clearing,
- * so do it explicitly */
- snd_soc_write(codec, STA32X_CFUD, cfud);
+ regmap_read(sta32x->regmap, STA32X_CFUD, &cfud);
+ cfud &= 0xf0;
+ /*
+ * chip documentation does not say if the bits are self clearing,
+ * so do it explicitly
+ */
+ regmap_write(sta32x->regmap, STA32X_CFUD, cfud);
- snd_soc_write(codec, STA32X_CFADDR2, index);
- if (numcoef == 1)
- snd_soc_write(codec, STA32X_CFUD, cfud | 0x04);
- else if (numcoef == 5)
- snd_soc_write(codec, STA32X_CFUD, cfud | 0x08);
- else
- return -EINVAL;
- for (i = 0; i < 3 * numcoef; i++)
- ucontrol->value.bytes.data[i] =
- snd_soc_read(codec, STA32X_B1CF1 + i);
+ regmap_write(sta32x->regmap, STA32X_CFADDR2, index);
+ if (numcoef == 1) {
+ regmap_write(sta32x->regmap, STA32X_CFUD, cfud | 0x04);
+ } else if (numcoef == 5) {
+ regmap_write(sta32x->regmap, STA32X_CFUD, cfud | 0x08);
+ } else {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
- return 0;
+ for (i = 0; i < 3 * numcoef; i++) {
+ regmap_read(sta32x->regmap, STA32X_B1CF1 + i, &val);
+ ucontrol->value.bytes.data[i] = val;
+ }
+
+exit_unlock:
+ mutex_unlock(&sta32x->coeff_lock);
+
+ return ret;
}
static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
@@ -280,24 +322,27 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
int i;
/* preserve reserved bits in STA32X_CFUD */
- cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
- /* chip documentation does not say if the bits are self clearing,
- * so do it explicitly */
- snd_soc_write(codec, STA32X_CFUD, cfud);
+ regmap_read(sta32x->regmap, STA32X_CFUD, &cfud);
+ cfud &= 0xf0;
+ /*
+ * chip documentation does not say if the bits are self clearing,
+ * so do it explicitly
+ */
+ regmap_write(sta32x->regmap, STA32X_CFUD, cfud);
- snd_soc_write(codec, STA32X_CFADDR2, index);
+ regmap_write(sta32x->regmap, STA32X_CFADDR2, index);
for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
sta32x->coef_shadow[index + i] =
(ucontrol->value.bytes.data[3 * i] << 16)
| (ucontrol->value.bytes.data[3 * i + 1] << 8)
| (ucontrol->value.bytes.data[3 * i + 2]);
for (i = 0; i < 3 * numcoef; i++)
- snd_soc_write(codec, STA32X_B1CF1 + i,
- ucontrol->value.bytes.data[i]);
+ regmap_write(sta32x->regmap, STA32X_B1CF1 + i,
+ ucontrol->value.bytes.data[i]);
if (numcoef == 1)
- snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
+ regmap_write(sta32x->regmap, STA32X_CFUD, cfud | 0x01);
else if (numcoef == 5)
- snd_soc_write(codec, STA32X_CFUD, cfud | 0x02);
+ regmap_write(sta32x->regmap, STA32X_CFUD, cfud | 0x02);
else
return -EINVAL;
@@ -311,20 +356,23 @@ static int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
int i;
/* preserve reserved bits in STA32X_CFUD */
- cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
+ regmap_read(sta32x->regmap, STA32X_CFUD, &cfud);
+ cfud &= 0xf0;
for (i = 0; i < STA32X_COEF_COUNT; i++) {
- snd_soc_write(codec, STA32X_CFADDR2, i);
- snd_soc_write(codec, STA32X_B1CF1,
- (sta32x->coef_shadow[i] >> 16) & 0xff);
- snd_soc_write(codec, STA32X_B1CF2,
- (sta32x->coef_shadow[i] >> 8) & 0xff);
- snd_soc_write(codec, STA32X_B1CF3,
- (sta32x->coef_shadow[i]) & 0xff);
- /* chip documentation does not say if the bits are
- * self-clearing, so do it explicitly */
- snd_soc_write(codec, STA32X_CFUD, cfud);
- snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
+ regmap_write(sta32x->regmap, STA32X_CFADDR2, i);
+ regmap_write(sta32x->regmap, STA32X_B1CF1,
+ (sta32x->coef_shadow[i] >> 16) & 0xff);
+ regmap_write(sta32x->regmap, STA32X_B1CF2,
+ (sta32x->coef_shadow[i] >> 8) & 0xff);
+ regmap_write(sta32x->regmap, STA32X_B1CF3,
+ (sta32x->coef_shadow[i]) & 0xff);
+ /*
+ * chip documentation does not say if the bits are
+ * self-clearing, so do it explicitly
+ */
+ regmap_write(sta32x->regmap, STA32X_CFUD, cfud);
+ regmap_write(sta32x->regmap, STA32X_CFUD, cfud | 0x01);
}
return 0;
}
@@ -336,11 +384,11 @@ static int sta32x_cache_sync(struct snd_soc_codec *codec)
int rc;
/* mute during register sync */
- mute = snd_soc_read(codec, STA32X_MMUTE);
- snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
+ regmap_read(sta32x->regmap, STA32X_MMUTE, &mute);
+ regmap_write(sta32x->regmap, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
sta32x_sync_coef_shadow(codec);
rc = regcache_sync(sta32x->regmap);
- snd_soc_write(codec, STA32X_MMUTE, mute);
+ regmap_write(sta32x->regmap, STA32X_MMUTE, mute);
return rc;
}
@@ -508,17 +556,12 @@ static struct {
};
/* MCLK to fs clock ratios */
-static struct {
- int ratio;
- int mcs;
-} mclk_ratios[3][7] = {
- { { 768, 0 }, { 512, 1 }, { 384, 2 }, { 256, 3 },
- { 128, 4 }, { 576, 5 }, { 0, 0 } },
- { { 384, 2 }, { 256, 3 }, { 192, 4 }, { 128, 5 }, {64, 0 }, { 0, 0 } },
- { { 384, 2 }, { 256, 3 }, { 192, 4 }, { 128, 5 }, {64, 0 }, { 0, 0 } },
+static int mcs_ratio_table[3][7] = {
+ { 768, 512, 384, 256, 128, 576, 0 },
+ { 384, 256, 192, 128, 64, 0 },
+ { 384, 256, 192, 128, 64, 0 },
};
-
/**
* sta32x_set_dai_sysclk - configure MCLK
* @codec_dai: the codec DAI
@@ -543,46 +586,10 @@ static int sta32x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
{
struct snd_soc_codec *codec = codec_dai->codec;
struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
- int i, j, ir, fs;
- unsigned int rates = 0;
- unsigned int rate_min = -1;
- unsigned int rate_max = 0;
- pr_debug("mclk=%u\n", freq);
+ dev_dbg(codec->dev, "mclk=%u\n", freq);
sta32x->mclk = freq;
- if (sta32x->mclk) {
- for (i = 0; i < ARRAY_SIZE(interpolation_ratios); i++) {
- ir = interpolation_ratios[i].ir;
- fs = interpolation_ratios[i].fs;
- for (j = 0; mclk_ratios[ir][j].ratio; j++) {
- if (mclk_ratios[ir][j].ratio * fs == freq) {
- rates |= snd_pcm_rate_to_rate_bit(fs);
- if (fs < rate_min)
- rate_min = fs;
- if (fs > rate_max)
- rate_max = fs;
- break;
- }
- }
- }
- /* FIXME: soc should support a rate list */
- rates &= ~SNDRV_PCM_RATE_KNOT;
-
- if (!rates) {
- dev_err(codec->dev, "could not find a valid sample rate\n");
- return -EINVAL;
- }
- } else {
- /* enable all possible rates */
- rates = STA32X_RATES;
- rate_min = 32000;
- rate_max = 192000;
- }
-
- codec_dai->driver->playback.rates = rates;
- codec_dai->driver->playback.rate_min = rate_min;
- codec_dai->driver->playback.rate_max = rate_max;
return 0;
}
@@ -599,10 +606,7 @@ static int sta32x_set_dai_fmt(struct snd_soc_dai *codec_dai,
{
struct snd_soc_codec *codec = codec_dai->codec;
struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
- u8 confb = snd_soc_read(codec, STA32X_CONFB);
-
- pr_debug("\n");
- confb &= ~(STA32X_CONFB_C1IM | STA32X_CONFB_C2IM);
+ u8 confb = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
@@ -632,8 +636,8 @@ static int sta32x_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- snd_soc_write(codec, STA32X_CONFB, confb);
- return 0;
+ return regmap_update_bits(sta32x->regmap, STA32X_CONFB,
+ STA32X_CONFB_C1IM | STA32X_CONFB_C2IM, confb);
}
/**
@@ -651,39 +655,55 @@ static int sta32x_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_codec *codec = dai->codec;
struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
- unsigned int rate;
- int i, mcs = -1, ir = -1;
- u8 confa, confb;
+ int i, mcs = -EINVAL, ir = -EINVAL;
+ unsigned int confa, confb;
+ unsigned int rate, ratio;
+ int ret;
+
+ if (!sta32x->mclk) {
+ dev_err(codec->dev,
+ "sta32x->mclk is unset. Unable to determine ratio\n");
+ return -EIO;
+ }
rate = params_rate(params);
- pr_debug("rate: %u\n", rate);
- for (i = 0; i < ARRAY_SIZE(interpolation_ratios); i++)
+ ratio = sta32x->mclk / rate;
+ dev_dbg(codec->dev, "rate: %u, ratio: %u\n", rate, ratio);
+
+ for (i = 0; i < ARRAY_SIZE(interpolation_ratios); i++) {
if (interpolation_ratios[i].fs == rate) {
ir = interpolation_ratios[i].ir;
break;
}
- if (ir < 0)
+ }
+
+ if (ir < 0) {
+ dev_err(codec->dev, "Unsupported samplerate: %u\n", rate);
return -EINVAL;
- for (i = 0; mclk_ratios[ir][i].ratio; i++)
- if (mclk_ratios[ir][i].ratio * rate == sta32x->mclk) {
- mcs = mclk_ratios[ir][i].mcs;
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (mcs_ratio_table[ir][i] == ratio) {
+ mcs = i;
break;
}
- if (mcs < 0)
+ }
+
+ if (mcs < 0) {
+ dev_err(codec->dev, "Unresolvable ratio: %u\n", ratio);
return -EINVAL;
+ }
- confa = snd_soc_read(codec, STA32X_CONFA);
- confa &= ~(STA32X_CONFA_MCS_MASK | STA32X_CONFA_IR_MASK);
- confa |= (ir << STA32X_CONFA_IR_SHIFT) | (mcs << STA32X_CONFA_MCS_SHIFT);
+ confa = (ir << STA32X_CONFA_IR_SHIFT) |
+ (mcs << STA32X_CONFA_MCS_SHIFT);
+ confb = 0;
- confb = snd_soc_read(codec, STA32X_CONFB);
- confb &= ~(STA32X_CONFB_SAI_MASK | STA32X_CONFB_SAIFB);
switch (params_width(params)) {
case 24:
- pr_debug("24bit\n");
+ dev_dbg(codec->dev, "24bit\n");
/* fall through */
case 32:
- pr_debug("24bit or 32bit\n");
+ dev_dbg(codec->dev, "24bit or 32bit\n");
switch (sta32x->format) {
case SND_SOC_DAIFMT_I2S:
confb |= 0x0;
@@ -698,7 +718,7 @@ static int sta32x_hw_params(struct snd_pcm_substream *substream,
break;
case 20:
- pr_debug("20bit\n");
+ dev_dbg(codec->dev, "20bit\n");
switch (sta32x->format) {
case SND_SOC_DAIFMT_I2S:
confb |= 0x4;
@@ -713,7 +733,7 @@ static int sta32x_hw_params(struct snd_pcm_substream *substream,
break;
case 18:
- pr_debug("18bit\n");
+ dev_dbg(codec->dev, "18bit\n");
switch (sta32x->format) {
case SND_SOC_DAIFMT_I2S:
confb |= 0x8;
@@ -728,7 +748,7 @@ static int sta32x_hw_params(struct snd_pcm_substream *substream,
break;
case 16:
- pr_debug("16bit\n");
+ dev_dbg(codec->dev, "16bit\n");
switch (sta32x->format) {
case SND_SOC_DAIFMT_I2S:
confb |= 0x0;
@@ -746,8 +766,30 @@ static int sta32x_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- snd_soc_write(codec, STA32X_CONFA, confa);
- snd_soc_write(codec, STA32X_CONFB, confb);
+ ret = regmap_update_bits(sta32x->regmap, STA32X_CONFA,
+ STA32X_CONFA_MCS_MASK | STA32X_CONFA_IR_MASK,
+ confa);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(sta32x->regmap, STA32X_CONFB,
+ STA32X_CONFB_SAI_MASK | STA32X_CONFB_SAIFB,
+ confb);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sta32x_startup_sequence(struct sta32x_priv *sta32x)
+{
+ if (sta32x->gpiod_nreset) {
+ gpiod_set_value(sta32x->gpiod_nreset, 0);
+ mdelay(1);
+ gpiod_set_value(sta32x->gpiod_nreset, 1);
+ mdelay(1);
+ }
+
return 0;
}
@@ -766,14 +808,14 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
int ret;
struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
- pr_debug("level = %d\n", level);
+ dev_dbg(codec->dev, "level = %d\n", level);
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
/* Full power on */
- snd_soc_update_bits(codec, STA32X_CONFF,
+ regmap_update_bits(sta32x->regmap, STA32X_CONFF,
STA32X_CONFF_PWDN | STA32X_CONFF_EAPD,
STA32X_CONFF_PWDN | STA32X_CONFF_EAPD);
break;
@@ -788,25 +830,28 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
+ sta32x_startup_sequence(sta32x);
sta32x_cache_sync(codec);
sta32x_watchdog_start(sta32x);
}
- /* Power up to mute */
- /* FIXME */
- snd_soc_update_bits(codec, STA32X_CONFF,
- STA32X_CONFF_PWDN | STA32X_CONFF_EAPD,
- STA32X_CONFF_PWDN | STA32X_CONFF_EAPD);
+ /* Power down */
+ regmap_update_bits(sta32x->regmap, STA32X_CONFF,
+ STA32X_CONFF_PWDN | STA32X_CONFF_EAPD,
+ 0);
break;
case SND_SOC_BIAS_OFF:
/* The chip runs through the power down sequence for us. */
- snd_soc_update_bits(codec, STA32X_CONFF,
- STA32X_CONFF_PWDN | STA32X_CONFF_EAPD,
- STA32X_CONFF_PWDN);
+ regmap_update_bits(sta32x->regmap, STA32X_CONFF,
+ STA32X_CONFF_PWDN | STA32X_CONFF_EAPD, 0);
msleep(300);
sta32x_watchdog_stop(sta32x);
+
+ if (sta32x->gpiod_nreset)
+ gpiod_set_value(sta32x->gpiod_nreset, 0);
+
regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies),
sta32x->supplies);
break;
@@ -822,7 +867,7 @@ static const struct snd_soc_dai_ops sta32x_dai_ops = {
};
static struct snd_soc_dai_driver sta32x_dai = {
- .name = "STA32X",
+ .name = "sta32x-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 2,
@@ -836,11 +881,8 @@ static struct snd_soc_dai_driver sta32x_dai = {
static int sta32x_probe(struct snd_soc_codec *codec)
{
struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
+ struct sta32x_platform_data *pdata = sta32x->pdata;
int i, ret = 0, thermal = 0;
-
- sta32x->codec = codec;
- sta32x->pdata = dev_get_platdata(codec->dev);
-
ret = regulator_bulk_enable(ARRAY_SIZE(sta32x->supplies),
sta32x->supplies);
if (ret != 0) {
@@ -848,50 +890,73 @@ static int sta32x_probe(struct snd_soc_codec *codec)
return ret;
}
- /* Chip documentation explicitly requires that the reset values
- * of reserved register bits are left untouched.
- * Write the register default value to cache for reserved registers,
- * so the write to the these registers are suppressed by the cache
- * restore code when it skips writes of default registers.
- */
- regcache_cache_only(sta32x->regmap, true);
- snd_soc_write(codec, STA32X_CONFC, 0xc2);
- snd_soc_write(codec, STA32X_CONFE, 0xc2);
- snd_soc_write(codec, STA32X_CONFF, 0x5c);
- snd_soc_write(codec, STA32X_MMUTE, 0x10);
- snd_soc_write(codec, STA32X_AUTO1, 0x60);
- snd_soc_write(codec, STA32X_AUTO3, 0x00);
- snd_soc_write(codec, STA32X_C3CFG, 0x40);
- regcache_cache_only(sta32x->regmap, false);
-
- /* set thermal warning adjustment and recovery */
- if (!(sta32x->pdata->thermal_conf & STA32X_THERMAL_ADJUSTMENT_ENABLE))
+ ret = sta32x_startup_sequence(sta32x);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to startup device\n");
+ return ret;
+ }
+
+ /* CONFA */
+ if (!pdata->thermal_warning_recovery)
thermal |= STA32X_CONFA_TWAB;
- if (!(sta32x->pdata->thermal_conf & STA32X_THERMAL_RECOVERY_ENABLE))
+ if (!pdata->thermal_warning_adjustment)
thermal |= STA32X_CONFA_TWRB;
- snd_soc_update_bits(codec, STA32X_CONFA,
- STA32X_CONFA_TWAB | STA32X_CONFA_TWRB,
- thermal);
+ if (!pdata->fault_detect_recovery)
+ thermal |= STA32X_CONFA_FDRB;
+ regmap_update_bits(sta32x->regmap, STA32X_CONFA,
+ STA32X_CONFA_TWAB | STA32X_CONFA_TWRB |
+ STA32X_CONFA_FDRB,
+ thermal);
+
+ /* CONFC */
+ regmap_update_bits(sta32x->regmap, STA32X_CONFC,
+ STA32X_CONFC_CSZ_MASK,
+ pdata->drop_compensation_ns
+ << STA32X_CONFC_CSZ_SHIFT);
+
+ /* CONFE */
+ regmap_update_bits(sta32x->regmap, STA32X_CONFE,
+ STA32X_CONFE_MPCV,
+ pdata->max_power_use_mpcc ?
+ STA32X_CONFE_MPCV : 0);
+ regmap_update_bits(sta32x->regmap, STA32X_CONFE,
+ STA32X_CONFE_MPC,
+ pdata->max_power_correction ?
+ STA32X_CONFE_MPC : 0);
+ regmap_update_bits(sta32x->regmap, STA32X_CONFE,
+ STA32X_CONFE_AME,
+ pdata->am_reduction_mode ?
+ STA32X_CONFE_AME : 0);
+ regmap_update_bits(sta32x->regmap, STA32X_CONFE,
+ STA32X_CONFE_PWMS,
+ pdata->odd_pwm_speed_mode ?
+ STA32X_CONFE_PWMS : 0);
+
+ /* CONFF */
+ regmap_update_bits(sta32x->regmap, STA32X_CONFF,
+ STA32X_CONFF_IDE,
+ pdata->invalid_input_detect_mute ?
+ STA32X_CONFF_IDE : 0);
/* select output configuration */
- snd_soc_update_bits(codec, STA32X_CONFF,
- STA32X_CONFF_OCFG_MASK,
- sta32x->pdata->output_conf
- << STA32X_CONFF_OCFG_SHIFT);
+ regmap_update_bits(sta32x->regmap, STA32X_CONFF,
+ STA32X_CONFF_OCFG_MASK,
+ pdata->output_conf
+ << STA32X_CONFF_OCFG_SHIFT);
/* channel to output mapping */
- snd_soc_update_bits(codec, STA32X_C1CFG,
- STA32X_CxCFG_OM_MASK,
- sta32x->pdata->ch1_output_mapping
- << STA32X_CxCFG_OM_SHIFT);
- snd_soc_update_bits(codec, STA32X_C2CFG,
- STA32X_CxCFG_OM_MASK,
- sta32x->pdata->ch2_output_mapping
- << STA32X_CxCFG_OM_SHIFT);
- snd_soc_update_bits(codec, STA32X_C3CFG,
- STA32X_CxCFG_OM_MASK,
- sta32x->pdata->ch3_output_mapping
- << STA32X_CxCFG_OM_SHIFT);
+ regmap_update_bits(sta32x->regmap, STA32X_C1CFG,
+ STA32X_CxCFG_OM_MASK,
+ pdata->ch1_output_mapping
+ << STA32X_CxCFG_OM_SHIFT);
+ regmap_update_bits(sta32x->regmap, STA32X_C2CFG,
+ STA32X_CxCFG_OM_MASK,
+ pdata->ch2_output_mapping
+ << STA32X_CxCFG_OM_SHIFT);
+ regmap_update_bits(sta32x->regmap, STA32X_C3CFG,
+ STA32X_CxCFG_OM_MASK,
+ pdata->ch3_output_mapping
+ << STA32X_CxCFG_OM_SHIFT);
/* initialize coefficient shadow RAM with reset values */
for (i = 4; i <= 49; i += 5)
@@ -924,16 +989,6 @@ static int sta32x_remove(struct snd_soc_codec *codec)
return 0;
}
-static bool sta32x_reg_is_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case STA32X_CONFA ... STA32X_L2ATRT:
- case STA32X_MPCC1 ... STA32X_FDRC2:
- return 0;
- }
- return 1;
-}
-
static const struct snd_soc_codec_driver sta32x_codec = {
.probe = sta32x_probe,
.remove = sta32x_remove,
@@ -954,12 +1009,75 @@ static const struct regmap_config sta32x_regmap = {
.reg_defaults = sta32x_regs,
.num_reg_defaults = ARRAY_SIZE(sta32x_regs),
.cache_type = REGCACHE_RBTREE,
- .volatile_reg = sta32x_reg_is_volatile,
+ .wr_table = &sta32x_write_regs,
+ .rd_table = &sta32x_read_regs,
+ .volatile_table = &sta32x_volatile_regs,
};
+#ifdef CONFIG_OF
+static const struct of_device_id st32x_dt_ids[] = {
+ { .compatible = "st,sta32x", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, st32x_dt_ids);
+
+static int sta32x_probe_dt(struct device *dev, struct sta32x_priv *sta32x)
+{
+ struct device_node *np = dev->of_node;
+ struct sta32x_platform_data *pdata;
+ u16 tmp;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ of_property_read_u8(np, "st,output-conf",
+ &pdata->output_conf);
+ of_property_read_u8(np, "st,ch1-output-mapping",
+ &pdata->ch1_output_mapping);
+ of_property_read_u8(np, "st,ch2-output-mapping",
+ &pdata->ch2_output_mapping);
+ of_property_read_u8(np, "st,ch3-output-mapping",
+ &pdata->ch3_output_mapping);
+
+ if (of_get_property(np, "st,thermal-warning-recovery", NULL))
+ pdata->thermal_warning_recovery = 1;
+ if (of_get_property(np, "st,thermal-warning-adjustment", NULL))
+ pdata->thermal_warning_adjustment = 1;
+ if (of_get_property(np, "st,needs_esd_watchdog", NULL))
+ pdata->needs_esd_watchdog = 1;
+
+ tmp = 140;
+ of_property_read_u16(np, "st,drop-compensation-ns", &tmp);
+ pdata->drop_compensation_ns = clamp_t(u16, tmp, 0, 300) / 20;
+
+ /* CONFE */
+ if (of_get_property(np, "st,max-power-use-mpcc", NULL))
+ pdata->max_power_use_mpcc = 1;
+
+ if (of_get_property(np, "st,max-power-correction", NULL))
+ pdata->max_power_correction = 1;
+
+ if (of_get_property(np, "st,am-reduction-mode", NULL))
+ pdata->am_reduction_mode = 1;
+
+ if (of_get_property(np, "st,odd-pwm-speed-mode", NULL))
+ pdata->odd_pwm_speed_mode = 1;
+
+ /* CONFF */
+ if (of_get_property(np, "st,invalid-input-detect-mute", NULL))
+ pdata->invalid_input_detect_mute = 1;
+
+ sta32x->pdata = pdata;
+
+ return 0;
+}
+#endif
+
static int sta32x_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ struct device *dev = &i2c->dev;
struct sta32x_priv *sta32x;
int ret, i;
@@ -968,6 +1086,29 @@ static int sta32x_i2c_probe(struct i2c_client *i2c,
if (!sta32x)
return -ENOMEM;
+ mutex_init(&sta32x->coeff_lock);
+ sta32x->pdata = dev_get_platdata(dev);
+
+#ifdef CONFIG_OF
+ if (dev->of_node) {
+ ret = sta32x_probe_dt(dev, sta32x);
+ if (ret < 0)
+ return ret;
+ }
+#endif
+
+ /* GPIOs */
+ sta32x->gpiod_nreset = devm_gpiod_get(dev, "reset");
+ if (IS_ERR(sta32x->gpiod_nreset)) {
+ ret = PTR_ERR(sta32x->gpiod_nreset);
+ if (ret != -ENOENT && ret != -ENOSYS)
+ return ret;
+
+ sta32x->gpiod_nreset = NULL;
+ } else {
+ gpiod_direction_output(sta32x->gpiod_nreset, 0);
+ }
+
/* regulators */
for (i = 0; i < ARRAY_SIZE(sta32x->supplies); i++)
sta32x->supplies[i].supply = sta32x_supply_names[i];
@@ -982,15 +1123,15 @@ static int sta32x_i2c_probe(struct i2c_client *i2c,
sta32x->regmap = devm_regmap_init_i2c(i2c, &sta32x_regmap);
if (IS_ERR(sta32x->regmap)) {
ret = PTR_ERR(sta32x->regmap);
- dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret);
+ dev_err(dev, "Failed to init regmap: %d\n", ret);
return ret;
}
i2c_set_clientdata(i2c, sta32x);
- ret = snd_soc_register_codec(&i2c->dev, &sta32x_codec, &sta32x_dai, 1);
- if (ret != 0)
- dev_err(&i2c->dev, "Failed to register codec (%d)\n", ret);
+ ret = snd_soc_register_codec(dev, &sta32x_codec, &sta32x_dai, 1);
+ if (ret < 0)
+ dev_err(dev, "Failed to register codec (%d)\n", ret);
return ret;
}
@@ -1013,6 +1154,7 @@ static struct i2c_driver sta32x_i2c_driver = {
.driver = {
.name = "sta32x",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(st32x_dt_ids),
},
.probe = sta32x_i2c_probe,
.remove = sta32x_i2c_remove,
diff --git a/sound/soc/codecs/sta32x.h b/sound/soc/codecs/sta32x.h
index d8e32a6262ee..d3191c983d71 100644
--- a/sound/soc/codecs/sta32x.h
+++ b/sound/soc/codecs/sta32x.h
@@ -131,7 +131,7 @@
#define STA32X_CONFF_OCFG_MASK 0x03
#define STA32X_CONFF_OCFG_SHIFT 0
#define STA32X_CONFF_IDE 0x04
-#define STA32X_CONFF_IDE_SHIFT 3
+#define STA32X_CONFF_IDE_SHIFT 2
#define STA32X_CONFF_BCLE 0x08
#define STA32X_CONFF_ECLE 0x20
#define STA32X_CONFF_PWDN 0x40
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index dc3223d6eca1..c86dd9aae157 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -349,7 +349,8 @@ static int aic31xx_wait_bits(struct aic31xx_priv *aic31xx, unsigned int reg,
static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
unsigned int reg = AIC31XX_DACFLAG1;
unsigned int mask;
@@ -377,7 +378,7 @@ static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w,
reg = AIC31XX_ADCFLAG;
break;
default:
- dev_err(w->codec->dev, "Unknown widget '%s' calling %s\n",
+ dev_err(codec->dev, "Unknown widget '%s' calling %s\n",
w->name, __func__);
return -EINVAL;
}
@@ -388,7 +389,7 @@ static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMD:
return aic31xx_wait_bits(aic31xx, reg, mask, 0, 5000, 100);
default:
- dev_dbg(w->codec->dev,
+ dev_dbg(codec->dev,
"Unhandled dapm widget event %d from %s\n",
event, w->name);
}
@@ -433,7 +434,7 @@ static const struct snd_kcontrol_new aic31xx_dapm_spr_switch =
static int mic_bias_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
switch (event) {
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index b7ebce054b4e..51c4713ac6e3 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -87,6 +87,7 @@ struct aic3x_priv {
#define AIC3X_MODEL_3X 0
#define AIC3X_MODEL_33 1
#define AIC3X_MODEL_3007 2
+#define AIC3X_MODEL_3104 3
u16 model;
/* Selects the micbias voltage */
@@ -197,7 +198,7 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
static int mic_bias_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -316,52 +317,37 @@ static const struct snd_kcontrol_new aic3x_snd_controls[] = {
* only for swapped L-to-R and R-to-L routes. See below stereo controls
* for direct L-to-L and R-to-R routes.
*/
- SOC_SINGLE_TLV("Left Line Mixer Line2R Bypass Volume",
- LINE2R_2_LLOPM_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Left Line Mixer PGAR Bypass Volume",
PGAR_2_LLOPM_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Left Line Mixer DACR1 Playback Volume",
DACR1_2_LLOPM_VOL, 0, 118, 1, output_stage_tlv),
- SOC_SINGLE_TLV("Right Line Mixer Line2L Bypass Volume",
- LINE2L_2_RLOPM_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Right Line Mixer PGAL Bypass Volume",
PGAL_2_RLOPM_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Right Line Mixer DACL1 Playback Volume",
DACL1_2_RLOPM_VOL, 0, 118, 1, output_stage_tlv),
- SOC_SINGLE_TLV("Left HP Mixer Line2R Bypass Volume",
- LINE2R_2_HPLOUT_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Left HP Mixer PGAR Bypass Volume",
PGAR_2_HPLOUT_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Left HP Mixer DACR1 Playback Volume",
DACR1_2_HPLOUT_VOL, 0, 118, 1, output_stage_tlv),
- SOC_SINGLE_TLV("Right HP Mixer Line2L Bypass Volume",
- LINE2L_2_HPROUT_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Right HP Mixer PGAL Bypass Volume",
PGAL_2_HPROUT_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Right HP Mixer DACL1 Playback Volume",
DACL1_2_HPROUT_VOL, 0, 118, 1, output_stage_tlv),
- SOC_SINGLE_TLV("Left HPCOM Mixer Line2R Bypass Volume",
- LINE2R_2_HPLCOM_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Left HPCOM Mixer PGAR Bypass Volume",
PGAR_2_HPLCOM_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Left HPCOM Mixer DACR1 Playback Volume",
DACR1_2_HPLCOM_VOL, 0, 118, 1, output_stage_tlv),
- SOC_SINGLE_TLV("Right HPCOM Mixer Line2L Bypass Volume",
- LINE2L_2_HPRCOM_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Right HPCOM Mixer PGAL Bypass Volume",
PGAL_2_HPRCOM_VOL, 0, 118, 1, output_stage_tlv),
SOC_SINGLE_TLV("Right HPCOM Mixer DACL1 Playback Volume",
DACL1_2_HPRCOM_VOL, 0, 118, 1, output_stage_tlv),
/* Stereo output controls for direct L-to-L and R-to-R routes */
- SOC_DOUBLE_R_TLV("Line Line2 Bypass Volume",
- LINE2L_2_LLOPM_VOL, LINE2R_2_RLOPM_VOL,
- 0, 118, 1, output_stage_tlv),
SOC_DOUBLE_R_TLV("Line PGA Bypass Volume",
PGAL_2_LLOPM_VOL, PGAR_2_RLOPM_VOL,
0, 118, 1, output_stage_tlv),
@@ -369,9 +355,6 @@ static const struct snd_kcontrol_new aic3x_snd_controls[] = {
DACL1_2_LLOPM_VOL, DACR1_2_RLOPM_VOL,
0, 118, 1, output_stage_tlv),
- SOC_DOUBLE_R_TLV("HP Line2 Bypass Volume",
- LINE2L_2_HPLOUT_VOL, LINE2R_2_HPROUT_VOL,
- 0, 118, 1, output_stage_tlv),
SOC_DOUBLE_R_TLV("HP PGA Bypass Volume",
PGAL_2_HPLOUT_VOL, PGAR_2_HPROUT_VOL,
0, 118, 1, output_stage_tlv),
@@ -379,9 +362,6 @@ static const struct snd_kcontrol_new aic3x_snd_controls[] = {
DACL1_2_HPLOUT_VOL, DACR1_2_HPROUT_VOL,
0, 118, 1, output_stage_tlv),
- SOC_DOUBLE_R_TLV("HPCOM Line2 Bypass Volume",
- LINE2L_2_HPLCOM_VOL, LINE2R_2_HPRCOM_VOL,
- 0, 118, 1, output_stage_tlv),
SOC_DOUBLE_R_TLV("HPCOM PGA Bypass Volume",
PGAL_2_HPLCOM_VOL, PGAR_2_HPRCOM_VOL,
0, 118, 1, output_stage_tlv),
@@ -424,6 +404,45 @@ static const struct snd_kcontrol_new aic3x_snd_controls[] = {
SOC_ENUM("Output Driver Ramp-up step", aic3x_rampup_step_enum),
};
+/* For other than tlv320aic3104 */
+static const struct snd_kcontrol_new aic3x_extra_snd_controls[] = {
+ /*
+ * Output controls that map to output mixer switches. Note these are
+ * only for swapped L-to-R and R-to-L routes. See below stereo controls
+ * for direct L-to-L and R-to-R routes.
+ */
+ SOC_SINGLE_TLV("Left Line Mixer Line2R Bypass Volume",
+ LINE2R_2_LLOPM_VOL, 0, 118, 1, output_stage_tlv),
+
+ SOC_SINGLE_TLV("Right Line Mixer Line2L Bypass Volume",
+ LINE2L_2_RLOPM_VOL, 0, 118, 1, output_stage_tlv),
+
+ SOC_SINGLE_TLV("Left HP Mixer Line2R Bypass Volume",
+ LINE2R_2_HPLOUT_VOL, 0, 118, 1, output_stage_tlv),
+
+ SOC_SINGLE_TLV("Right HP Mixer Line2L Bypass Volume",
+ LINE2L_2_HPROUT_VOL, 0, 118, 1, output_stage_tlv),
+
+ SOC_SINGLE_TLV("Left HPCOM Mixer Line2R Bypass Volume",
+ LINE2R_2_HPLCOM_VOL, 0, 118, 1, output_stage_tlv),
+
+ SOC_SINGLE_TLV("Right HPCOM Mixer Line2L Bypass Volume",
+ LINE2L_2_HPRCOM_VOL, 0, 118, 1, output_stage_tlv),
+
+ /* Stereo output controls for direct L-to-L and R-to-R routes */
+ SOC_DOUBLE_R_TLV("Line Line2 Bypass Volume",
+ LINE2L_2_LLOPM_VOL, LINE2R_2_RLOPM_VOL,
+ 0, 118, 1, output_stage_tlv),
+
+ SOC_DOUBLE_R_TLV("HP Line2 Bypass Volume",
+ LINE2L_2_HPLOUT_VOL, LINE2R_2_HPROUT_VOL,
+ 0, 118, 1, output_stage_tlv),
+
+ SOC_DOUBLE_R_TLV("HPCOM Line2 Bypass Volume",
+ LINE2L_2_HPLCOM_VOL, LINE2R_2_HPRCOM_VOL,
+ 0, 118, 1, output_stage_tlv),
+};
+
static const struct snd_kcontrol_new aic3x_mono_controls[] = {
SOC_DOUBLE_R_TLV("Mono Line2 Bypass Volume",
LINE2L_2_MONOLOPM_VOL, LINE2R_2_MONOLOPM_VOL,
@@ -464,22 +483,24 @@ SOC_DAPM_ENUM("Route", aic3x_right_hpcom_enum);
/* Left Line Mixer */
static const struct snd_kcontrol_new aic3x_left_line_mixer_controls[] = {
- SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_LLOPM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAL Bypass Switch", PGAL_2_LLOPM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACL1 Switch", DACL1_2_LLOPM_VOL, 7, 1, 0),
- SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_LLOPM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAR Bypass Switch", PGAR_2_LLOPM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACR1 Switch", DACR1_2_LLOPM_VOL, 7, 1, 0),
+ /* Not on tlv320aic3104 */
+ SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_LLOPM_VOL, 7, 1, 0),
+ SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_LLOPM_VOL, 7, 1, 0),
};
/* Right Line Mixer */
static const struct snd_kcontrol_new aic3x_right_line_mixer_controls[] = {
- SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_RLOPM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAL Bypass Switch", PGAL_2_RLOPM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACL1 Switch", DACL1_2_RLOPM_VOL, 7, 1, 0),
- SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_RLOPM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAR Bypass Switch", PGAR_2_RLOPM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACR1 Switch", DACR1_2_RLOPM_VOL, 7, 1, 0),
+ /* Not on tlv320aic3104 */
+ SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_RLOPM_VOL, 7, 1, 0),
+ SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_RLOPM_VOL, 7, 1, 0),
};
/* Mono Mixer */
@@ -494,42 +515,46 @@ static const struct snd_kcontrol_new aic3x_mono_mixer_controls[] = {
/* Left HP Mixer */
static const struct snd_kcontrol_new aic3x_left_hp_mixer_controls[] = {
- SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_HPLOUT_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAL Bypass Switch", PGAL_2_HPLOUT_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACL1 Switch", DACL1_2_HPLOUT_VOL, 7, 1, 0),
- SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_HPLOUT_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAR Bypass Switch", PGAR_2_HPLOUT_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACR1 Switch", DACR1_2_HPLOUT_VOL, 7, 1, 0),
+ /* Not on tlv320aic3104 */
+ SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_HPLOUT_VOL, 7, 1, 0),
+ SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_HPLOUT_VOL, 7, 1, 0),
};
/* Right HP Mixer */
static const struct snd_kcontrol_new aic3x_right_hp_mixer_controls[] = {
- SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_HPROUT_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAL Bypass Switch", PGAL_2_HPROUT_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACL1 Switch", DACL1_2_HPROUT_VOL, 7, 1, 0),
- SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_HPROUT_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAR Bypass Switch", PGAR_2_HPROUT_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACR1 Switch", DACR1_2_HPROUT_VOL, 7, 1, 0),
+ /* Not on tlv320aic3104 */
+ SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_HPROUT_VOL, 7, 1, 0),
+ SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_HPROUT_VOL, 7, 1, 0),
};
/* Left HPCOM Mixer */
static const struct snd_kcontrol_new aic3x_left_hpcom_mixer_controls[] = {
- SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_HPLCOM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAL Bypass Switch", PGAL_2_HPLCOM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACL1 Switch", DACL1_2_HPLCOM_VOL, 7, 1, 0),
- SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_HPLCOM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAR Bypass Switch", PGAR_2_HPLCOM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACR1 Switch", DACR1_2_HPLCOM_VOL, 7, 1, 0),
+ /* Not on tlv320aic3104 */
+ SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_HPLCOM_VOL, 7, 1, 0),
+ SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_HPLCOM_VOL, 7, 1, 0),
};
/* Right HPCOM Mixer */
static const struct snd_kcontrol_new aic3x_right_hpcom_mixer_controls[] = {
- SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_HPRCOM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAL Bypass Switch", PGAL_2_HPRCOM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACL1 Switch", DACL1_2_HPRCOM_VOL, 7, 1, 0),
- SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_HPRCOM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("PGAR Bypass Switch", PGAR_2_HPRCOM_VOL, 7, 1, 0),
SOC_DAPM_SINGLE("DACR1 Switch", DACR1_2_HPRCOM_VOL, 7, 1, 0),
+ /* Not on tlv320aic3104 */
+ SOC_DAPM_SINGLE("Line2L Bypass Switch", LINE2L_2_HPRCOM_VOL, 7, 1, 0),
+ SOC_DAPM_SINGLE("Line2R Bypass Switch", LINE2R_2_HPRCOM_VOL, 7, 1, 0),
};
/* Left PGA Mixer */
@@ -550,6 +575,22 @@ static const struct snd_kcontrol_new aic3x_right_pga_mixer_controls[] = {
SOC_DAPM_SINGLE_AIC3X("Mic3R Switch", MIC3LR_2_RADC_CTRL, 0, 1, 1),
};
+/* Left PGA Mixer for tlv320aic3104 */
+static const struct snd_kcontrol_new aic3104_left_pga_mixer_controls[] = {
+ SOC_DAPM_SINGLE_AIC3X("Line1L Switch", LINE1L_2_LADC_CTRL, 3, 1, 1),
+ SOC_DAPM_SINGLE_AIC3X("Line1R Switch", LINE1R_2_LADC_CTRL, 3, 1, 1),
+ SOC_DAPM_SINGLE_AIC3X("Mic2L Switch", MIC3LR_2_LADC_CTRL, 4, 1, 1),
+ SOC_DAPM_SINGLE_AIC3X("Mic2R Switch", MIC3LR_2_LADC_CTRL, 0, 1, 1),
+};
+
+/* Right PGA Mixer for tlv320aic3104 */
+static const struct snd_kcontrol_new aic3104_right_pga_mixer_controls[] = {
+ SOC_DAPM_SINGLE_AIC3X("Line1R Switch", LINE1R_2_RADC_CTRL, 3, 1, 1),
+ SOC_DAPM_SINGLE_AIC3X("Line1L Switch", LINE1L_2_RADC_CTRL, 3, 1, 1),
+ SOC_DAPM_SINGLE_AIC3X("Mic2L Switch", MIC3LR_2_RADC_CTRL, 4, 1, 1),
+ SOC_DAPM_SINGLE_AIC3X("Mic2R Switch", MIC3LR_2_RADC_CTRL, 0, 1, 1),
+};
+
/* Left Line1 Mux */
static const struct snd_kcontrol_new aic3x_left_line1l_mux_controls =
SOC_DAPM_ENUM("Route", aic3x_line1l_2_l_enum);
@@ -593,26 +634,56 @@ static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
/* Inputs to Left ADC */
SND_SOC_DAPM_ADC("Left ADC", "Left Capture", LINE1L_2_LADC_CTRL, 2, 0),
- SND_SOC_DAPM_MIXER("Left PGA Mixer", SND_SOC_NOPM, 0, 0,
- &aic3x_left_pga_mixer_controls[0],
- ARRAY_SIZE(aic3x_left_pga_mixer_controls)),
SND_SOC_DAPM_MUX("Left Line1L Mux", SND_SOC_NOPM, 0, 0,
&aic3x_left_line1l_mux_controls),
SND_SOC_DAPM_MUX("Left Line1R Mux", SND_SOC_NOPM, 0, 0,
&aic3x_left_line1r_mux_controls),
- SND_SOC_DAPM_MUX("Left Line2L Mux", SND_SOC_NOPM, 0, 0,
- &aic3x_left_line2_mux_controls),
/* Inputs to Right ADC */
SND_SOC_DAPM_ADC("Right ADC", "Right Capture",
LINE1R_2_RADC_CTRL, 2, 0),
- SND_SOC_DAPM_MIXER("Right PGA Mixer", SND_SOC_NOPM, 0, 0,
- &aic3x_right_pga_mixer_controls[0],
- ARRAY_SIZE(aic3x_right_pga_mixer_controls)),
SND_SOC_DAPM_MUX("Right Line1L Mux", SND_SOC_NOPM, 0, 0,
&aic3x_right_line1l_mux_controls),
SND_SOC_DAPM_MUX("Right Line1R Mux", SND_SOC_NOPM, 0, 0,
&aic3x_right_line1r_mux_controls),
+
+ /* Mic Bias */
+ SND_SOC_DAPM_SUPPLY("Mic Bias", MICBIAS_CTRL, 6, 0,
+ mic_bias_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_OUTPUT("LLOUT"),
+ SND_SOC_DAPM_OUTPUT("RLOUT"),
+ SND_SOC_DAPM_OUTPUT("HPLOUT"),
+ SND_SOC_DAPM_OUTPUT("HPROUT"),
+ SND_SOC_DAPM_OUTPUT("HPLCOM"),
+ SND_SOC_DAPM_OUTPUT("HPRCOM"),
+
+ SND_SOC_DAPM_INPUT("LINE1L"),
+ SND_SOC_DAPM_INPUT("LINE1R"),
+
+ /*
+ * Virtual output pin to detection block inside codec. This can be
+ * used to keep codec bias on if gpio or detection features are needed.
+ * Force pin on or construct a path with an input jack and mic bias
+ * widgets.
+ */
+ SND_SOC_DAPM_OUTPUT("Detection"),
+};
+
+/* For other than tlv320aic3104 */
+static const struct snd_soc_dapm_widget aic3x_extra_dapm_widgets[] = {
+ /* Inputs to Left ADC */
+ SND_SOC_DAPM_MIXER("Left PGA Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3x_left_pga_mixer_controls[0],
+ ARRAY_SIZE(aic3x_left_pga_mixer_controls)),
+ SND_SOC_DAPM_MUX("Left Line2L Mux", SND_SOC_NOPM, 0, 0,
+ &aic3x_left_line2_mux_controls),
+
+ /* Inputs to Right ADC */
+ SND_SOC_DAPM_MIXER("Right PGA Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3x_right_pga_mixer_controls[0],
+ ARRAY_SIZE(aic3x_right_pga_mixer_controls)),
SND_SOC_DAPM_MUX("Right Line2R Mux", SND_SOC_NOPM, 0, 0,
&aic3x_right_line2_mux_controls),
@@ -637,11 +708,6 @@ static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
SND_SOC_DAPM_REG(snd_soc_dapm_micbias, "DMic Rate 32",
AIC3X_ASD_INTF_CTRLA, 0, 3, 3, 0),
- /* Mic Bias */
- SND_SOC_DAPM_SUPPLY("Mic Bias", MICBIAS_CTRL, 6, 0,
- mic_bias_event,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
-
/* Output mixers */
SND_SOC_DAPM_MIXER("Left Line Mixer", SND_SOC_NOPM, 0, 0,
&aic3x_left_line_mixer_controls[0],
@@ -662,27 +728,46 @@ static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
&aic3x_right_hpcom_mixer_controls[0],
ARRAY_SIZE(aic3x_right_hpcom_mixer_controls)),
- SND_SOC_DAPM_OUTPUT("LLOUT"),
- SND_SOC_DAPM_OUTPUT("RLOUT"),
- SND_SOC_DAPM_OUTPUT("HPLOUT"),
- SND_SOC_DAPM_OUTPUT("HPROUT"),
- SND_SOC_DAPM_OUTPUT("HPLCOM"),
- SND_SOC_DAPM_OUTPUT("HPRCOM"),
-
SND_SOC_DAPM_INPUT("MIC3L"),
SND_SOC_DAPM_INPUT("MIC3R"),
- SND_SOC_DAPM_INPUT("LINE1L"),
- SND_SOC_DAPM_INPUT("LINE1R"),
SND_SOC_DAPM_INPUT("LINE2L"),
SND_SOC_DAPM_INPUT("LINE2R"),
+};
- /*
- * Virtual output pin to detection block inside codec. This can be
- * used to keep codec bias on if gpio or detection features are needed.
- * Force pin on or construct a path with an input jack and mic bias
- * widgets.
- */
- SND_SOC_DAPM_OUTPUT("Detection"),
+/* For tlv320aic3104 */
+static const struct snd_soc_dapm_widget aic3104_extra_dapm_widgets[] = {
+ /* Inputs to Left ADC */
+ SND_SOC_DAPM_MIXER("Left PGA Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3104_left_pga_mixer_controls[0],
+ ARRAY_SIZE(aic3104_left_pga_mixer_controls)),
+
+ /* Inputs to Right ADC */
+ SND_SOC_DAPM_MIXER("Right PGA Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3104_right_pga_mixer_controls[0],
+ ARRAY_SIZE(aic3104_right_pga_mixer_controls)),
+
+ /* Output mixers */
+ SND_SOC_DAPM_MIXER("Left Line Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3x_left_line_mixer_controls[0],
+ ARRAY_SIZE(aic3x_left_line_mixer_controls) - 2),
+ SND_SOC_DAPM_MIXER("Right Line Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3x_right_line_mixer_controls[0],
+ ARRAY_SIZE(aic3x_right_line_mixer_controls) - 2),
+ SND_SOC_DAPM_MIXER("Left HP Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3x_left_hp_mixer_controls[0],
+ ARRAY_SIZE(aic3x_left_hp_mixer_controls) - 2),
+ SND_SOC_DAPM_MIXER("Right HP Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3x_right_hp_mixer_controls[0],
+ ARRAY_SIZE(aic3x_right_hp_mixer_controls) - 2),
+ SND_SOC_DAPM_MIXER("Left HPCOM Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3x_left_hpcom_mixer_controls[0],
+ ARRAY_SIZE(aic3x_left_hpcom_mixer_controls) - 2),
+ SND_SOC_DAPM_MIXER("Right HPCOM Mixer", SND_SOC_NOPM, 0, 0,
+ &aic3x_right_hpcom_mixer_controls[0],
+ ARRAY_SIZE(aic3x_right_hpcom_mixer_controls) - 2),
+
+ SND_SOC_DAPM_INPUT("MIC2L"),
+ SND_SOC_DAPM_INPUT("MIC2R"),
};
static const struct snd_soc_dapm_widget aic3x_dapm_mono_widgets[] = {
@@ -712,17 +797,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"Left Line1R Mux", "single-ended", "LINE1R"},
{"Left Line1R Mux", "differential", "LINE1R"},
- {"Left Line2L Mux", "single-ended", "LINE2L"},
- {"Left Line2L Mux", "differential", "LINE2L"},
-
{"Left PGA Mixer", "Line1L Switch", "Left Line1L Mux"},
{"Left PGA Mixer", "Line1R Switch", "Left Line1R Mux"},
- {"Left PGA Mixer", "Line2L Switch", "Left Line2L Mux"},
- {"Left PGA Mixer", "Mic3L Switch", "MIC3L"},
- {"Left PGA Mixer", "Mic3R Switch", "MIC3R"},
{"Left ADC", NULL, "Left PGA Mixer"},
- {"Left ADC", NULL, "GPIO1 dmic modclk"},
/* Right Input */
{"Right Line1R Mux", "single-ended", "LINE1R"},
@@ -730,25 +808,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"Right Line1L Mux", "single-ended", "LINE1L"},
{"Right Line1L Mux", "differential", "LINE1L"},
- {"Right Line2R Mux", "single-ended", "LINE2R"},
- {"Right Line2R Mux", "differential", "LINE2R"},
-
{"Right PGA Mixer", "Line1L Switch", "Right Line1L Mux"},
{"Right PGA Mixer", "Line1R Switch", "Right Line1R Mux"},
- {"Right PGA Mixer", "Line2R Switch", "Right Line2R Mux"},
- {"Right PGA Mixer", "Mic3L Switch", "MIC3L"},
- {"Right PGA Mixer", "Mic3R Switch", "MIC3R"},
{"Right ADC", NULL, "Right PGA Mixer"},
- {"Right ADC", NULL, "GPIO1 dmic modclk"},
-
- /*
- * Logical path between digital mic enable and GPIO1 modulator clock
- * output function
- */
- {"GPIO1 dmic modclk", NULL, "DMic Rate 128"},
- {"GPIO1 dmic modclk", NULL, "DMic Rate 64"},
- {"GPIO1 dmic modclk", NULL, "DMic Rate 32"},
/* Left DAC Output */
{"Left DAC Mux", "DAC_L1", "Left DAC"},
@@ -761,10 +824,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"Right DAC Mux", "DAC_R3", "Right DAC"},
/* Left Line Output */
- {"Left Line Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
{"Left Line Mixer", "PGAL Bypass Switch", "Left PGA Mixer"},
{"Left Line Mixer", "DACL1 Switch", "Left DAC Mux"},
- {"Left Line Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
{"Left Line Mixer", "PGAR Bypass Switch", "Right PGA Mixer"},
{"Left Line Mixer", "DACR1 Switch", "Right DAC Mux"},
@@ -773,10 +834,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"LLOUT", NULL, "Left Line Out"},
/* Right Line Output */
- {"Right Line Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
{"Right Line Mixer", "PGAL Bypass Switch", "Left PGA Mixer"},
{"Right Line Mixer", "DACL1 Switch", "Left DAC Mux"},
- {"Right Line Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
{"Right Line Mixer", "PGAR Bypass Switch", "Right PGA Mixer"},
{"Right Line Mixer", "DACR1 Switch", "Right DAC Mux"},
@@ -785,10 +844,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"RLOUT", NULL, "Right Line Out"},
/* Left HP Output */
- {"Left HP Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
{"Left HP Mixer", "PGAL Bypass Switch", "Left PGA Mixer"},
{"Left HP Mixer", "DACL1 Switch", "Left DAC Mux"},
- {"Left HP Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
{"Left HP Mixer", "PGAR Bypass Switch", "Right PGA Mixer"},
{"Left HP Mixer", "DACR1 Switch", "Right DAC Mux"},
@@ -797,10 +854,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"HPLOUT", NULL, "Left HP Out"},
/* Right HP Output */
- {"Right HP Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
{"Right HP Mixer", "PGAL Bypass Switch", "Left PGA Mixer"},
{"Right HP Mixer", "DACL1 Switch", "Left DAC Mux"},
- {"Right HP Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
{"Right HP Mixer", "PGAR Bypass Switch", "Right PGA Mixer"},
{"Right HP Mixer", "DACR1 Switch", "Right DAC Mux"},
@@ -809,10 +864,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"HPROUT", NULL, "Right HP Out"},
/* Left HPCOM Output */
- {"Left HPCOM Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
{"Left HPCOM Mixer", "PGAL Bypass Switch", "Left PGA Mixer"},
{"Left HPCOM Mixer", "DACL1 Switch", "Left DAC Mux"},
- {"Left HPCOM Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
{"Left HPCOM Mixer", "PGAR Bypass Switch", "Right PGA Mixer"},
{"Left HPCOM Mixer", "DACR1 Switch", "Right DAC Mux"},
@@ -823,10 +876,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"HPLCOM", NULL, "Left HP Com"},
/* Right HPCOM Output */
- {"Right HPCOM Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
{"Right HPCOM Mixer", "PGAL Bypass Switch", "Left PGA Mixer"},
{"Right HPCOM Mixer", "DACL1 Switch", "Left DAC Mux"},
- {"Right HPCOM Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
{"Right HPCOM Mixer", "PGAR Bypass Switch", "Right PGA Mixer"},
{"Right HPCOM Mixer", "DACR1 Switch", "Right DAC Mux"},
@@ -839,6 +890,72 @@ static const struct snd_soc_dapm_route intercon[] = {
{"HPRCOM", NULL, "Right HP Com"},
};
+/* For other than tlv320aic3104 */
+static const struct snd_soc_dapm_route intercon_extra[] = {
+ /* Left Input */
+ {"Left Line2L Mux", "single-ended", "LINE2L"},
+ {"Left Line2L Mux", "differential", "LINE2L"},
+
+ {"Left PGA Mixer", "Line2L Switch", "Left Line2L Mux"},
+ {"Left PGA Mixer", "Mic3L Switch", "MIC3L"},
+ {"Left PGA Mixer", "Mic3R Switch", "MIC3R"},
+
+ {"Left ADC", NULL, "GPIO1 dmic modclk"},
+
+ /* Right Input */
+ {"Right Line2R Mux", "single-ended", "LINE2R"},
+ {"Right Line2R Mux", "differential", "LINE2R"},
+
+ {"Right PGA Mixer", "Line2R Switch", "Right Line2R Mux"},
+ {"Right PGA Mixer", "Mic3L Switch", "MIC3L"},
+ {"Right PGA Mixer", "Mic3R Switch", "MIC3R"},
+
+ {"Right ADC", NULL, "GPIO1 dmic modclk"},
+
+ /*
+ * Logical path between digital mic enable and GPIO1 modulator clock
+ * output function
+ */
+ {"GPIO1 dmic modclk", NULL, "DMic Rate 128"},
+ {"GPIO1 dmic modclk", NULL, "DMic Rate 64"},
+ {"GPIO1 dmic modclk", NULL, "DMic Rate 32"},
+
+ /* Left Line Output */
+ {"Left Line Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
+ {"Left Line Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
+
+ /* Right Line Output */
+ {"Right Line Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
+ {"Right Line Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
+
+ /* Left HP Output */
+ {"Left HP Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
+ {"Left HP Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
+
+ /* Right HP Output */
+ {"Right HP Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
+ {"Right HP Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
+
+ /* Left HPCOM Output */
+ {"Left HPCOM Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
+ {"Left HPCOM Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
+
+ /* Right HPCOM Output */
+ {"Right HPCOM Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
+ {"Right HPCOM Mixer", "Line2R Bypass Switch", "Right Line2R Mux"},
+};
+
+/* For tlv320aic3104 */
+static const struct snd_soc_dapm_route intercon_extra_3104[] = {
+ /* Left Input */
+ {"Left PGA Mixer", "Mic2L Switch", "MIC2L"},
+ {"Left PGA Mixer", "Mic2R Switch", "MIC2R"},
+
+ /* Right Input */
+ {"Right PGA Mixer", "Mic2L Switch", "MIC2L"},
+ {"Right PGA Mixer", "Mic2R Switch", "MIC2R"},
+};
+
static const struct snd_soc_dapm_route intercon_mono[] = {
/* Mono Output */
{"Mono Mixer", "Line2L Bypass Switch", "Left Line2L Mux"},
@@ -867,17 +984,31 @@ static int aic3x_add_widgets(struct snd_soc_codec *codec)
switch (aic3x->model) {
case AIC3X_MODEL_3X:
case AIC3X_MODEL_33:
+ snd_soc_dapm_new_controls(dapm, aic3x_extra_dapm_widgets,
+ ARRAY_SIZE(aic3x_extra_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, intercon_extra,
+ ARRAY_SIZE(intercon_extra));
snd_soc_dapm_new_controls(dapm, aic3x_dapm_mono_widgets,
ARRAY_SIZE(aic3x_dapm_mono_widgets));
snd_soc_dapm_add_routes(dapm, intercon_mono,
ARRAY_SIZE(intercon_mono));
break;
case AIC3X_MODEL_3007:
+ snd_soc_dapm_new_controls(dapm, aic3x_extra_dapm_widgets,
+ ARRAY_SIZE(aic3x_extra_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, intercon_extra,
+ ARRAY_SIZE(intercon_extra));
snd_soc_dapm_new_controls(dapm, aic3007_dapm_widgets,
ARRAY_SIZE(aic3007_dapm_widgets));
snd_soc_dapm_add_routes(dapm, intercon_3007,
ARRAY_SIZE(intercon_3007));
break;
+ case AIC3X_MODEL_3104:
+ snd_soc_dapm_new_controls(dapm, aic3104_extra_dapm_widgets,
+ ARRAY_SIZE(aic3104_extra_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, intercon_extra_3104,
+ ARRAY_SIZE(intercon_extra_3104));
+ break;
}
return 0;
@@ -1046,7 +1177,7 @@ static int aic3x_prepare(struct snd_pcm_substream *substream,
delay += aic3x->tdm_delay;
/* Configure data delay */
- snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay);
+ snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay);
return 0;
}
@@ -1438,23 +1569,33 @@ static int aic3x_probe(struct snd_soc_codec *codec)
aic3x_init(codec);
if (aic3x->setup) {
- /* setup GPIO functions */
- snd_soc_write(codec, AIC3X_GPIO1_REG,
- (aic3x->setup->gpio_func[0] & 0xf) << 4);
- snd_soc_write(codec, AIC3X_GPIO2_REG,
- (aic3x->setup->gpio_func[1] & 0xf) << 4);
+ if (aic3x->model != AIC3X_MODEL_3104) {
+ /* setup GPIO functions */
+ snd_soc_write(codec, AIC3X_GPIO1_REG,
+ (aic3x->setup->gpio_func[0] & 0xf) << 4);
+ snd_soc_write(codec, AIC3X_GPIO2_REG,
+ (aic3x->setup->gpio_func[1] & 0xf) << 4);
+ } else {
+ dev_warn(codec->dev, "GPIO functionality is not supported on tlv320aic3104\n");
+ }
}
switch (aic3x->model) {
case AIC3X_MODEL_3X:
case AIC3X_MODEL_33:
+ snd_soc_add_codec_controls(codec, aic3x_extra_snd_controls,
+ ARRAY_SIZE(aic3x_extra_snd_controls));
snd_soc_add_codec_controls(codec, aic3x_mono_controls,
ARRAY_SIZE(aic3x_mono_controls));
break;
case AIC3X_MODEL_3007:
+ snd_soc_add_codec_controls(codec, aic3x_extra_snd_controls,
+ ARRAY_SIZE(aic3x_extra_snd_controls));
snd_soc_add_codec_controls(codec,
&aic3x_classd_amp_gain_ctrl, 1);
break;
+ case AIC3X_MODEL_3104:
+ break;
}
/* set mic bias voltage */
@@ -1522,6 +1663,7 @@ static const struct i2c_device_id aic3x_i2c_id[] = {
{ "tlv320aic33", AIC3X_MODEL_33 },
{ "tlv320aic3007", AIC3X_MODEL_3007 },
{ "tlv320aic3106", AIC3X_MODEL_3X },
+ { "tlv320aic3104", AIC3X_MODEL_3104 },
{ }
};
MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
@@ -1673,6 +1815,7 @@ static const struct of_device_id tlv320aic3x_of_match[] = {
{ .compatible = "ti,tlv320aic33" },
{ .compatible = "ti,tlv320aic3007" },
{ .compatible = "ti,tlv320aic3106" },
+ { .compatible = "ti,tlv320aic3104" },
{},
};
MODULE_DEVICE_TABLE(of, tlv320aic3x_of_match);
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index 0fe2ced5b09f..4e3e607dec13 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -423,17 +423,18 @@ exit:
static int dac33_playback_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
if (likely(dac33->substream)) {
- dac33_calculate_times(dac33->substream, w->codec);
- dac33_prepare_chip(dac33->substream, w->codec);
+ dac33_calculate_times(dac33->substream, codec);
+ dac33_prepare_chip(dac33->substream, codec);
}
break;
case SND_SOC_DAPM_POST_PMD:
- dac33_disable_digital(w->codec);
+ dac33_disable_digital(codec);
break;
}
return 0;
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 1d1205702d23..9fd80ac1897f 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -20,6 +20,8 @@
#include <sound/jack.h>
#include <sound/soc.h>
+#include "ts3a227e.h"
+
struct ts3a227e {
struct regmap *regmap;
struct snd_soc_jack *jack;
@@ -79,6 +81,10 @@ static const int ts3a227e_buttons[] = {
/* TS3A227E_REG_SETTING_2 0x05 */
#define KP_ENABLE 0x04
+/* TS3A227E_REG_SETTING_3 0x06 */
+#define MICBIAS_SETTING_SFT (3)
+#define MICBIAS_SETTING_MASK (0x7 << MICBIAS_SETTING_SFT)
+
/* TS3A227E_REG_ACCESSORY_STATUS 0x0b */
#define TYPE_3_POLE 0x01
#define TYPE_4_POLE_OMTP 0x02
@@ -221,9 +227,9 @@ int ts3a227e_enable_jack_detect(struct snd_soc_component *component,
struct ts3a227e *ts3a227e = snd_soc_component_get_drvdata(component);
snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
ts3a227e->jack = jack;
ts3a227e_jack_report(ts3a227e);
@@ -248,12 +254,28 @@ static const struct regmap_config ts3a227e_regmap_config = {
.num_reg_defaults = ARRAY_SIZE(ts3a227e_reg_defaults),
};
+static int ts3a227e_parse_dt(struct ts3a227e *ts3a227e, struct device_node *np)
+{
+ u32 micbias;
+ int err;
+
+ err = of_property_read_u32(np, "ti,micbias", &micbias);
+ if (!err) {
+ regmap_update_bits(ts3a227e->regmap, TS3A227E_REG_SETTING_3,
+ MICBIAS_SETTING_MASK,
+ (micbias & 0x07) << MICBIAS_SETTING_SFT);
+ }
+
+ return 0;
+}
+
static int ts3a227e_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct ts3a227e *ts3a227e;
struct device *dev = &i2c->dev;
int ret;
+ unsigned int acc_reg;
ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);
if (ts3a227e == NULL)
@@ -265,6 +287,14 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
if (IS_ERR(ts3a227e->regmap))
return PTR_ERR(ts3a227e->regmap);
+ if (dev->of_node) {
+ ret = ts3a227e_parse_dt(ts3a227e, dev->of_node);
+ if (ret) {
+ dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ return ret;
+ }
+ }
+
ret = devm_request_threaded_irq(dev, i2c->irq, NULL, ts3a227e_interrupt,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"TS3A227E", ts3a227e);
@@ -283,6 +313,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,
ADC_COMPLETE_INT_DISABLE);
+ /* Read jack status because chip might not trigger interrupt at boot. */
+ regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
+ ts3a227e_new_jack_state(ts3a227e, acc_reg);
+ ts3a227e_jack_report(ts3a227e);
+
return 0;
}
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 44af3188afb9..d04693e9cf9f 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -567,12 +567,13 @@ static const struct snd_kcontrol_new twl4030_dapm_dbypassv_control =
static int pin_name##pga_event(struct snd_soc_dapm_widget *w, \
struct snd_kcontrol *kcontrol, int event) \
{ \
- struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec); \
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); \
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec); \
\
switch (event) { \
case SND_SOC_DAPM_POST_PMU: \
twl4030->pin_name##_enabled = 1; \
- twl4030_write(w->codec, reg, twl4030_read(w->codec, reg)); \
+ twl4030_write(codec, reg, twl4030_read(codec, reg)); \
break; \
case SND_SOC_DAPM_POST_PMD: \
twl4030->pin_name##_enabled = 0; \
@@ -621,12 +622,14 @@ static void handsfree_ramp(struct snd_soc_codec *codec, int reg, int ramp)
static int handsfreelpga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- handsfree_ramp(w->codec, TWL4030_REG_HFL_CTL, 1);
+ handsfree_ramp(codec, TWL4030_REG_HFL_CTL, 1);
break;
case SND_SOC_DAPM_POST_PMD:
- handsfree_ramp(w->codec, TWL4030_REG_HFL_CTL, 0);
+ handsfree_ramp(codec, TWL4030_REG_HFL_CTL, 0);
break;
}
return 0;
@@ -635,12 +638,14 @@ static int handsfreelpga_event(struct snd_soc_dapm_widget *w,
static int handsfreerpga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- handsfree_ramp(w->codec, TWL4030_REG_HFR_CTL, 1);
+ handsfree_ramp(codec, TWL4030_REG_HFR_CTL, 1);
break;
case SND_SOC_DAPM_POST_PMD:
- handsfree_ramp(w->codec, TWL4030_REG_HFR_CTL, 0);
+ handsfree_ramp(codec, TWL4030_REG_HFR_CTL, 0);
break;
}
return 0;
@@ -649,19 +654,23 @@ static int handsfreerpga_event(struct snd_soc_dapm_widget *w,
static int vibramux_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- twl4030_write(w->codec, TWL4030_REG_VIBRA_SET, 0xff);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ twl4030_write(codec, TWL4030_REG_VIBRA_SET, 0xff);
return 0;
}
static int apll_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- twl4030_apll_enable(w->codec, 1);
+ twl4030_apll_enable(codec, 1);
break;
case SND_SOC_DAPM_POST_PMD:
- twl4030_apll_enable(w->codec, 0);
+ twl4030_apll_enable(codec, 0);
break;
}
return 0;
@@ -670,23 +679,24 @@ static int apll_event(struct snd_soc_dapm_widget *w,
static int aif_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u8 audio_if;
- audio_if = twl4030_read(w->codec, TWL4030_REG_AUDIO_IF);
+ audio_if = twl4030_read(codec, TWL4030_REG_AUDIO_IF);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Enable AIF */
/* enable the PLL before we use it to clock the DAI */
- twl4030_apll_enable(w->codec, 1);
+ twl4030_apll_enable(codec, 1);
- twl4030_write(w->codec, TWL4030_REG_AUDIO_IF,
+ twl4030_write(codec, TWL4030_REG_AUDIO_IF,
audio_if | TWL4030_AIF_EN);
break;
case SND_SOC_DAPM_POST_PMD:
/* disable the DAI before we stop it's source PLL */
- twl4030_write(w->codec, TWL4030_REG_AUDIO_IF,
+ twl4030_write(codec, TWL4030_REG_AUDIO_IF,
audio_if & ~TWL4030_AIF_EN);
- twl4030_apll_enable(w->codec, 0);
+ twl4030_apll_enable(codec, 0);
break;
}
return 0;
@@ -758,20 +768,21 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
static int headsetlpga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
/* Do the ramp-up only once */
if (!twl4030->hsr_enabled)
- headset_ramp(w->codec, 1);
+ headset_ramp(codec, 1);
twl4030->hsl_enabled = 1;
break;
case SND_SOC_DAPM_POST_PMD:
/* Do the ramp-down only if both headsetL/R is disabled */
if (!twl4030->hsr_enabled)
- headset_ramp(w->codec, 0);
+ headset_ramp(codec, 0);
twl4030->hsl_enabled = 0;
break;
@@ -782,20 +793,21 @@ static int headsetlpga_event(struct snd_soc_dapm_widget *w,
static int headsetrpga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
/* Do the ramp-up only once */
if (!twl4030->hsl_enabled)
- headset_ramp(w->codec, 1);
+ headset_ramp(codec, 1);
twl4030->hsr_enabled = 1;
break;
case SND_SOC_DAPM_POST_PMD:
/* Do the ramp-down only if both headsetL/R is disabled */
if (!twl4030->hsl_enabled)
- headset_ramp(w->codec, 0);
+ headset_ramp(codec, 0);
twl4030->hsr_enabled = 0;
break;
@@ -806,7 +818,8 @@ static int headsetrpga_event(struct snd_soc_dapm_widget *w,
static int digimic_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
struct twl4030_codec_data *pdata = twl4030->pdata;
if (pdata && pdata->digimic_delay)
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 90f47f988b3f..aeec27b6f1af 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -234,7 +234,7 @@ static int headset_power_mode(struct snd_soc_codec *codec, int high_perf)
static int twl6040_hs_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u8 hslctl, hsrctl;
/*
@@ -261,7 +261,7 @@ static int twl6040_hs_dac_event(struct snd_soc_dapm_widget *w,
static int twl6040_ep_drv_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
int ret = 0;
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 34ef65c52a7d..8d9de49a5052 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -683,7 +683,7 @@ static const struct snd_kcontrol_new wm2000_controls[] = {
static int wm2000_anc_power_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
int ret;
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index b80970dc2d2f..ea09db585aa1 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -775,7 +775,8 @@ static int wm5100_out_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
- struct wm5100_priv *wm5100 = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wm5100_priv *wm5100 = snd_soc_codec_get_drvdata(codec);
switch (w->reg) {
case WM5100_CHANNEL_ENABLES_1:
@@ -839,7 +840,7 @@ static int wm5100_post_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm5100_priv *wm5100 = snd_soc_codec_get_drvdata(codec);
int ret;
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index f439ae052128..6d0fe0ac95a3 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -28,6 +28,7 @@
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/registers.h>
+#include <asm/unaligned.h>
#include "arizona.h"
#include "wm5102.h"
@@ -580,7 +581,7 @@ static const struct reg_default wm5102_sysclk_revb_patch[] = {
static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
struct regmap *regmap = arizona->regmap;
const struct reg_default *patch = NULL;
@@ -617,11 +618,10 @@ static int wm5102_out_comp_coeff_get(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
- uint16_t data;
mutex_lock(&arizona->dac_comp_lock);
- data = cpu_to_be16(arizona->dac_comp_coeff);
- memcpy(ucontrol->value.bytes.data, &data, sizeof(data));
+ put_unaligned_be16(arizona->dac_comp_coeff,
+ ucontrol->value.bytes.data);
mutex_unlock(&arizona->dac_comp_lock);
return 0;
@@ -1272,19 +1272,24 @@ SND_SOC_DAPM_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM,
ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT2L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT2R", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT2R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 4456b38a3ef5..fbaeddb3e903 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -134,7 +134,7 @@ static const struct reg_default wm5110_sysclk_revd_patch[] = {
static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
struct regmap *regmap = arizona->regmap;
const struct reg_default *patch = NULL;
@@ -905,22 +905,28 @@ SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM,
ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT2L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT2R", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT2R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT3R", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT3R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 574579b98872..c81a9eab3e3e 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -259,7 +259,7 @@ static void wm8350_pga_work(struct work_struct *work)
static int pga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out;
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 8ee446987aa9..b0d84e552fca 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -324,6 +324,7 @@ SOC_SINGLE("RIN34 Mute Switch", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME,
static int outmixer_event (struct snd_soc_dapm_widget *w,
struct snd_kcontrol * kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
u32 reg_shift = mc->shift;
@@ -332,7 +333,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
switch (reg_shift) {
case WM8400_SPEAKER_MIXER | (WM8400_LDSPK << 8) :
- reg = snd_soc_read(w->codec, WM8400_OUTPUT_MIXER1);
+ reg = snd_soc_read(codec, WM8400_OUTPUT_MIXER1);
if (reg & WM8400_LDLO) {
printk(KERN_WARNING
"Cannot set as Output Mixer 1 LDLO Set\n");
@@ -340,7 +341,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
}
break;
case WM8400_SPEAKER_MIXER | (WM8400_RDSPK << 8):
- reg = snd_soc_read(w->codec, WM8400_OUTPUT_MIXER2);
+ reg = snd_soc_read(codec, WM8400_OUTPUT_MIXER2);
if (reg & WM8400_RDRO) {
printk(KERN_WARNING
"Cannot set as Output Mixer 2 RDRO Set\n");
@@ -348,7 +349,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
}
break;
case WM8400_OUTPUT_MIXER1 | (WM8400_LDLO << 8):
- reg = snd_soc_read(w->codec, WM8400_SPEAKER_MIXER);
+ reg = snd_soc_read(codec, WM8400_SPEAKER_MIXER);
if (reg & WM8400_LDSPK) {
printk(KERN_WARNING
"Cannot set as Speaker Mixer LDSPK Set\n");
@@ -356,7 +357,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
}
break;
case WM8400_OUTPUT_MIXER2 | (WM8400_RDRO << 8):
- reg = snd_soc_read(w->codec, WM8400_SPEAKER_MIXER);
+ reg = snd_soc_read(codec, WM8400_SPEAKER_MIXER);
if (reg & WM8400_RDSPK) {
printk(KERN_WARNING
"Cannot set as Speaker Mixer RDSPK Set\n");
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index b9211b42f6e9..098c143f44d6 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -217,7 +217,8 @@ SND_SOC_DAPM_INPUT("LLINEIN"),
static int wm8731_check_osc(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
return wm8731->sysclk_type == WM8731_SYSCLK_XTAL;
}
@@ -717,6 +718,8 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
if (wm8731 == NULL)
return -ENOMEM;
+ mutex_init(&wm8731->lock);
+
wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
if (IS_ERR(wm8731->regmap)) {
ret = PTR_ERR(wm8731->regmap);
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index f6847fdd6ddd..eb0a1644ba11 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -323,7 +323,7 @@ static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("ROUT2"),
SND_SOC_DAPM_OUTPUT("MONO1"),
SND_SOC_DAPM_OUTPUT("OUT3"),
- SND_SOC_DAPM_OUTPUT("VREF"),
+ SND_SOC_DAPM_VMID("VREF"),
SND_SOC_DAPM_INPUT("LINPUT1"),
SND_SOC_DAPM_INPUT("LINPUT2"),
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
index 180e7a098726..53e977da2f86 100644
--- a/sound/soc/codecs/wm8770.c
+++ b/sound/soc/codecs/wm8770.c
@@ -308,9 +308,7 @@ static const struct snd_soc_dapm_route wm8770_intercon[] = {
static int vout12supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec;
-
- codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -327,9 +325,7 @@ static int vout12supply_event(struct snd_soc_dapm_widget *w,
static int vout34supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec;
-
- codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 1315f7642503..b2b0e68f707e 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -648,7 +648,7 @@ static struct snd_soc_dai_driver wm8804_dai = {
.symmetric_rates = 1
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8804 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8804 = {
.probe = wm8804_probe,
.remove = wm8804_remove,
.set_bias_level = wm8804_set_bias_level,
@@ -664,7 +664,7 @@ static const struct of_device_id wm8804_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wm8804_of_match);
-static struct regmap_config wm8804_regmap_config = {
+static const struct regmap_config wm8804_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 3a0d4b7d692f..2eb986c19b88 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -224,7 +224,7 @@ static void wm8900_reset(struct snd_soc_codec *codec)
static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u16 hpctl1 = snd_soc_read(codec, WM8900_REG_HPCTL1);
switch (event) {
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index cc6b0ef98a34..dde462c082be 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -260,7 +260,7 @@ static int wm8903_cp_event(struct snd_soc_dapm_widget *w,
static int wm8903_dcs_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
switch (event) {
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 4d2d2b1380d5..d3b3f57668cc 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -673,7 +673,7 @@ static int cp_event(struct snd_soc_dapm_widget *w,
static int sysclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -711,7 +711,7 @@ static int sysclk_event(struct snd_soc_dapm_widget *w,
static int out_pga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
int reg, val;
int dcs_mask;
@@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
{ "Right Capture PGA", NULL, "Right Capture Mux" },
{ "Right Capture PGA", NULL, "Right Capture Inverting Mux" },
- { "AIFOUTL", "Left", "ADCL" },
- { "AIFOUTL", "Right", "ADCR" },
- { "AIFOUTR", "Left", "ADCL" },
- { "AIFOUTR", "Right", "ADCR" },
+ { "AIFOUTL Mux", "Left", "ADCL" },
+ { "AIFOUTL Mux", "Right", "ADCR" },
+ { "AIFOUTR Mux", "Left", "ADCL" },
+ { "AIFOUTR Mux", "Right", "ADCR" },
+
+ { "AIFOUTL", NULL, "AIFOUTL Mux" },
+ { "AIFOUTR", NULL, "AIFOUTR Mux" },
{ "ADCL", NULL, "CLK_DSP" },
{ "ADCL", NULL, "Left Capture PGA" },
@@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
};
static const struct snd_soc_dapm_route dac_intercon[] = {
- { "DACL", "Right", "AIFINR" },
- { "DACL", "Left", "AIFINL" },
+ { "DACL Mux", "Left", "AIFINL" },
+ { "DACL Mux", "Right", "AIFINR" },
+
+ { "DACR Mux", "Left", "AIFINL" },
+ { "DACR Mux", "Right", "AIFINR" },
+
+ { "DACL", NULL, "DACL Mux" },
{ "DACL", NULL, "CLK_DSP" },
- { "DACR", "Right", "AIFINR" },
- { "DACR", "Left", "AIFINL" },
+ { "DACR", NULL, "DACR Mux" },
{ "DACR", NULL, "CLK_DSP" },
{ "Charge pump", NULL, "SYSCLK" },
@@ -2098,6 +2105,24 @@ static const struct regmap_config wm8904_regmap = {
.num_reg_defaults = ARRAY_SIZE(wm8904_reg_defaults),
};
+#ifdef CONFIG_OF
+static enum wm8904_type wm8904_data = WM8904;
+static enum wm8904_type wm8912_data = WM8912;
+
+static const struct of_device_id wm8904_of_match[] = {
+ {
+ .compatible = "wlf,wm8904",
+ .data = &wm8904_data,
+ }, {
+ .compatible = "wlf,wm8912",
+ .data = &wm8912_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, wm8904_of_match);
+#endif
+
static int wm8904_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -2125,7 +2150,17 @@ static int wm8904_i2c_probe(struct i2c_client *i2c,
return ret;
}
- wm8904->devtype = id->driver_data;
+ if (i2c->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(wm8904_of_match, i2c->dev.of_node);
+ if (match == NULL)
+ return -EINVAL;
+ wm8904->devtype = *((enum wm8904_type *)match->data);
+ } else {
+ wm8904->devtype = id->driver_data;
+ }
+
i2c_set_clientdata(i2c, wm8904);
wm8904->pdata = i2c->dev.platform_data;
@@ -2259,6 +2294,7 @@ static struct i2c_driver wm8904_i2c_driver = {
.driver = {
.name = "wm8904",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(wm8904_of_match),
},
.probe = wm8904_i2c_probe,
.remove = wm8904_i2c_remove,
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 1173f7fef5a7..1ab2d462afad 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -333,7 +333,7 @@ static int wm8955_configure_clocking(struct snd_soc_codec *codec)
static int wm8955_sysclk(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
int ret = 0;
/* Always disable the clocks - if we're doing reconfiguration this
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 3cbc82b33292..c799cca5abeb 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -418,7 +418,7 @@ static void wm8958_dsp_apply(struct snd_soc_codec *codec, int path, int start)
int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
int i;
switch (event) {
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 031a1ae71d94..cf8fecf97f2c 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -117,6 +118,7 @@ static bool wm8960_volatile(struct device *dev, unsigned int reg)
}
struct wm8960_priv {
+ struct clk *mclk;
struct regmap *regmap;
int (*set_bias_level)(struct snd_soc_codec *,
enum snd_soc_bias_level level);
@@ -556,7 +558,7 @@ static struct {
{ 22050, 2 },
{ 24000, 2 },
{ 16000, 3 },
- { 11250, 4 },
+ { 11025, 4 },
{ 12000, 4 },
{ 8000, 5 },
};
@@ -618,14 +620,38 @@ static int wm8960_set_bias_level_out3(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+ int ret;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
- /* Set VMID to 2x50k */
- snd_soc_update_bits(codec, WM8960_POWER1, 0x180, 0x80);
+ switch (codec->dapm.bias_level) {
+ case SND_SOC_BIAS_STANDBY:
+ if (!IS_ERR(wm8960->mclk)) {
+ ret = clk_prepare_enable(wm8960->mclk);
+ if (ret) {
+ dev_err(codec->dev,
+ "Failed to enable MCLK: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* Set VMID to 2x50k */
+ snd_soc_update_bits(codec, WM8960_POWER1, 0x180, 0x80);
+ break;
+
+ case SND_SOC_BIAS_ON:
+ if (!IS_ERR(wm8960->mclk))
+ clk_disable_unprepare(wm8960->mclk);
+ break;
+
+ default:
+ break;
+ }
+
break;
case SND_SOC_BIAS_STANDBY:
@@ -674,7 +700,7 @@ static int wm8960_set_bias_level_capless(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
- int reg;
+ int reg, ret;
switch (level) {
case SND_SOC_BIAS_ON:
@@ -715,9 +741,22 @@ static int wm8960_set_bias_level_capless(struct snd_soc_codec *codec,
WM8960_VREF, WM8960_VREF);
msleep(100);
+
+ if (!IS_ERR(wm8960->mclk)) {
+ ret = clk_prepare_enable(wm8960->mclk);
+ if (ret) {
+ dev_err(codec->dev,
+ "Failed to enable MCLK: %d\n",
+ ret);
+ return ret;
+ }
+ }
break;
case SND_SOC_BIAS_ON:
+ if (!IS_ERR(wm8960->mclk))
+ clk_disable_unprepare(wm8960->mclk);
+
/* Enable anti-pop mode */
snd_soc_update_bits(codec, WM8960_APOP1,
WM8960_POBCTRL | WM8960_SOFT_ST |
@@ -1002,6 +1041,12 @@ static int wm8960_i2c_probe(struct i2c_client *i2c,
if (wm8960 == NULL)
return -ENOMEM;
+ wm8960->mclk = devm_clk_get(&i2c->dev, "mclk");
+ if (IS_ERR(wm8960->mclk)) {
+ if (PTR_ERR(wm8960->mclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ }
+
wm8960->regmap = devm_regmap_init_i2c(i2c, &wm8960_regmap);
if (IS_ERR(wm8960->regmap))
return PTR_ERR(wm8960->regmap);
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index eeffd05384b4..95e2c1bfc809 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -194,7 +194,7 @@ static bool wm8961_readable(struct device *dev, unsigned int reg)
static int wm8961_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u16 hp_reg = snd_soc_read(codec, WM8961_ANALOGUE_HP_0);
u16 cp_reg = snd_soc_read(codec, WM8961_CHARGE_PUMP_1);
u16 pwr_reg = snd_soc_read(codec, WM8961_PWR_MGMT_2);
@@ -286,7 +286,7 @@ static int wm8961_hp_event(struct snd_soc_dapm_widget *w,
static int wm8961_spk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u16 pwr_reg = snd_soc_read(codec, WM8961_PWR_MGMT_2);
u16 spk_reg = snd_soc_read(codec, WM8961_CLASS_D_CONTROL_1);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index d32d554f5b34..118b0034ba23 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1866,7 +1866,7 @@ static int cp_event(struct snd_soc_dapm_widget *w,
static int hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
int timeout;
int reg;
int expected = (WM8962_DCS_STARTUP_DONE_HP1L |
@@ -1960,7 +1960,7 @@ static int hp_event(struct snd_soc_dapm_widget *w,
static int out_pga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
int reg;
switch (w->shift) {
@@ -1993,7 +1993,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
static int dsp2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
switch (event) {
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index e418199155a8..24968aa8618a 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -244,7 +244,7 @@ SOC_DOUBLE_R_TLV("Output 2 Playback Volume", WM8988_LOUT2V, WM8988_ROUT2V,
static int wm8988_lrc_control(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u16 adctl2 = snd_soc_read(codec, WM8988_ADCTL2);
/* Use the DAC to gate LRC if active, otherwise use ADC */
@@ -813,7 +813,7 @@ static int wm8988_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8988 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8988 = {
.probe = wm8988_probe,
.set_bias_level = wm8988_set_bias_level,
.suspend_bias_off = true,
@@ -826,7 +826,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8988 = {
.num_dapm_routes = ARRAY_SIZE(wm8988_dapm_routes),
};
-static struct regmap_config wm8988_regmap = {
+static const struct regmap_config wm8988_regmap = {
.reg_bits = 7,
.val_bits = 9,
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 8a584229310a..c93bffcb3cfb 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -374,13 +374,14 @@ SOC_SINGLE("RIN34 Mute Switch", WM8990_RIGHT_LINE_INPUT_3_4_VOLUME,
static int outmixer_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u32 reg_shift = kcontrol->private_value & 0xfff;
int ret = 0;
u16 reg;
switch (reg_shift) {
case WM8990_SPEAKER_MIXER | (WM8990_LDSPK_BIT << 8) :
- reg = snd_soc_read(w->codec, WM8990_OUTPUT_MIXER1);
+ reg = snd_soc_read(codec, WM8990_OUTPUT_MIXER1);
if (reg & WM8990_LDLO) {
printk(KERN_WARNING
"Cannot set as Output Mixer 1 LDLO Set\n");
@@ -388,7 +389,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
}
break;
case WM8990_SPEAKER_MIXER | (WM8990_RDSPK_BIT << 8):
- reg = snd_soc_read(w->codec, WM8990_OUTPUT_MIXER2);
+ reg = snd_soc_read(codec, WM8990_OUTPUT_MIXER2);
if (reg & WM8990_RDRO) {
printk(KERN_WARNING
"Cannot set as Output Mixer 2 RDRO Set\n");
@@ -396,7 +397,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
}
break;
case WM8990_OUTPUT_MIXER1 | (WM8990_LDLO_BIT << 8):
- reg = snd_soc_read(w->codec, WM8990_SPEAKER_MIXER);
+ reg = snd_soc_read(codec, WM8990_SPEAKER_MIXER);
if (reg & WM8990_LDSPK) {
printk(KERN_WARNING
"Cannot set as Speaker Mixer LDSPK Set\n");
@@ -404,7 +405,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
}
break;
case WM8990_OUTPUT_MIXER2 | (WM8990_RDRO_BIT << 8):
- reg = snd_soc_read(w->codec, WM8990_SPEAKER_MIXER);
+ reg = snd_soc_read(codec, WM8990_SPEAKER_MIXER);
if (reg & WM8990_RDSPK) {
printk(KERN_WARNING
"Cannot set as Speaker Mixer RDSPK Set\n");
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index b0ac2c3e31b9..49df0dc607e6 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -382,13 +382,14 @@ static const struct snd_kcontrol_new wm8991_snd_controls[] = {
static int outmixer_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u32 reg_shift = kcontrol->private_value & 0xfff;
int ret = 0;
u16 reg;
switch (reg_shift) {
case WM8991_SPEAKER_MIXER | (WM8991_LDSPK_BIT << 8):
- reg = snd_soc_read(w->codec, WM8991_OUTPUT_MIXER1);
+ reg = snd_soc_read(codec, WM8991_OUTPUT_MIXER1);
if (reg & WM8991_LDLO) {
printk(KERN_WARNING
"Cannot set as Output Mixer 1 LDLO Set\n");
@@ -397,7 +398,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
break;
case WM8991_SPEAKER_MIXER | (WM8991_RDSPK_BIT << 8):
- reg = snd_soc_read(w->codec, WM8991_OUTPUT_MIXER2);
+ reg = snd_soc_read(codec, WM8991_OUTPUT_MIXER2);
if (reg & WM8991_RDRO) {
printk(KERN_WARNING
"Cannot set as Output Mixer 2 RDRO Set\n");
@@ -406,7 +407,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
break;
case WM8991_OUTPUT_MIXER1 | (WM8991_LDLO_BIT << 8):
- reg = snd_soc_read(w->codec, WM8991_SPEAKER_MIXER);
+ reg = snd_soc_read(codec, WM8991_SPEAKER_MIXER);
if (reg & WM8991_LDSPK) {
printk(KERN_WARNING
"Cannot set as Speaker Mixer LDSPK Set\n");
@@ -415,7 +416,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
break;
case WM8991_OUTPUT_MIXER2 | (WM8991_RDRO_BIT << 8):
- reg = snd_soc_read(w->codec, WM8991_SPEAKER_MIXER);
+ reg = snd_soc_read(codec, WM8991_SPEAKER_MIXER);
if (reg & WM8991_RDSPK) {
printk(KERN_WARNING
"Cannot set as Speaker Mixer RDSPK Set\n");
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 53c6fe359496..2e70a270eb28 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -810,7 +810,7 @@ SOC_SINGLE_TLV("EQ5 Volume", WM8993_EQ6, 0, 24, 0, eq_tlv),
static int clk_sys_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 1b97de2e4e67..4fbc7689339a 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -249,7 +249,8 @@ static int configure_clock(struct snd_soc_codec *codec)
static int check_clk_sys(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- int reg = snd_soc_read(source->codec, WM8994_CLOCKING_1);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ int reg = snd_soc_read(codec, WM8994_CLOCKING_1);
const char *clk;
/* Check what we're currently using for CLK_SYS */
@@ -806,7 +807,7 @@ static void active_dereference(struct snd_soc_codec *codec)
static int clk_sys_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -981,7 +982,7 @@ static void vmid_dereference(struct snd_soc_codec *codec)
static int vmid_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1037,7 +1038,7 @@ static bool wm8994_check_class_w_digital(struct snd_soc_codec *codec)
static int aif1clk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
@@ -1135,7 +1136,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
static int aif2clk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
int i;
int dac;
int adc;
@@ -1220,7 +1221,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
static int aif1clk_late_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -1238,7 +1239,7 @@ static int aif1clk_late_ev(struct snd_soc_dapm_widget *w,
static int aif2clk_late_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -1256,7 +1257,7 @@ static int aif2clk_late_ev(struct snd_soc_dapm_widget *w,
static int late_enable_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -1289,7 +1290,7 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w,
static int late_disable_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -1331,7 +1332,7 @@ static int micbias_ev(struct snd_soc_dapm_widget *w,
static int dac_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
unsigned int mask = 1 << w->shift;
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1372,7 +1373,7 @@ SOC_DAPM_SINGLE("DAC1 Switch", WM8994_SPEAKER_MIXER, 0, 1, 0),
static int post_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
dev_dbg(codec->dev, "SRC status: %x\n",
snd_soc_read(codec,
WM8994_RATE_STATUS));
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index c280f0a3a424..66103c2b012e 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -44,7 +44,7 @@ static const char *wm8995_supply_names[WM8995_NUM_SUPPLIES] = {
"MICVDD"
};
-static struct reg_default wm8995_reg_defaults[] = {
+static const struct reg_default wm8995_reg_defaults[] = {
{ 0, 0x8995 },
{ 5, 0x0100 },
{ 16, 0x000b },
@@ -534,10 +534,11 @@ static void wm8995_update_class_w(struct snd_soc_codec *codec)
static int check_clk_sys(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
unsigned int reg;
const char *clk;
- reg = snd_soc_read(source->codec, WM8995_CLOCKING_1);
+ reg = snd_soc_read(codec, WM8995_CLOCKING_1);
/* Check what we're currently using for CLK_SYS */
if (reg & WM8995_SYSCLK_SRC)
clk = "AIF2CLK";
@@ -560,9 +561,7 @@ static int wm8995_put_class_w(struct snd_kcontrol *kcontrol,
static int hp_supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec;
-
- codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -611,10 +610,9 @@ static void dc_servo_cmd(struct snd_soc_codec *codec,
static int hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
unsigned int reg;
- codec = w->codec;
reg = snd_soc_read(codec, WM8995_ANALOGUE_HP_1);
switch (event) {
@@ -761,9 +759,7 @@ static int configure_clock(struct snd_soc_codec *codec)
static int clk_sys_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec;
-
- codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -2190,7 +2186,7 @@ static struct snd_soc_dai_driver wm8995_dai[] = {
}
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
.probe = wm8995_probe,
.remove = wm8995_remove,
.set_bias_level = wm8995_set_bias_level,
@@ -2204,7 +2200,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
.num_dapm_routes = ARRAY_SIZE(wm8995_intercon),
};
-static struct regmap_config wm8995_regmap = {
+static const struct regmap_config wm8995_regmap = {
.reg_bits = 16,
.val_bits = 16,
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index b1dcc11c1b23..dc92d5e4e942 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -599,7 +599,7 @@ static void wm8996_bg_disable(struct snd_soc_codec *codec)
static int bg_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
int ret = 0;
switch (event) {
@@ -634,7 +634,8 @@ static int cp_event(struct snd_soc_dapm_widget *w,
static int rmv_short_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
/* Record which outputs we enabled */
switch (event) {
@@ -758,7 +759,8 @@ static void wm8996_seq_notifier(struct snd_soc_dapm_context *dapm,
static int dcs_start(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 7e8bfe27566b..a4d11770630c 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -84,7 +84,7 @@ static const struct reg_default wm8997_sysclk_reva_patch[] = {
static int wm8997_sysclk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
struct regmap *regmap = arizona->regmap;
const struct reg_default *patch = NULL;
@@ -610,13 +610,16 @@ SND_SOC_DAPM_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM,
ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index b1d946facd57..13a3f335ea5b 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -734,7 +734,7 @@ static int configure_clock(struct snd_soc_codec *codec)
static int clk_sys_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
/* This should be done on init() for bypass paths */
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 6ffe8dc4f3fa..60d243c904f5 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -254,7 +254,7 @@ SOC_SINGLE_TLV("MIXOUTR IN2B Volume", WM9090_OUTPUT_MIXER4, 0, 3, 1,
static int hp_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
unsigned int reg = snd_soc_read(codec, WM9090_ANALOGUE_HP_0);
switch (event) {
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 3eddb18fefd1..5cc457ef8894 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -344,23 +344,27 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec)
struct snd_ac97 *ac97;
int ret = 0;
- ac97 = snd_soc_new_ac97_codec(codec);
+ ac97 = snd_soc_alloc_ac97_codec(codec);
if (IS_ERR(ac97)) {
ret = PTR_ERR(ac97);
dev_err(codec->dev, "Failed to register AC97 codec\n");
return ret;
}
- snd_soc_codec_set_drvdata(codec, ac97);
-
ret = wm9705_reset(codec);
if (ret)
- goto reset_err;
+ goto err_put_device;
+
+ ret = device_add(&ac97->dev);
+ if (ret)
+ goto err_put_device;
+
+ snd_soc_codec_set_drvdata(codec, ac97);
return 0;
-reset_err:
- snd_soc_free_ac97_codec(ac97);
+err_put_device:
+ put_device(&ac97->dev);
return ret;
}
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index e04643d2bb24..9517571e820d 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -666,7 +666,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
- wm9712->ac97 = snd_soc_new_ac97_codec(codec);
+ wm9712->ac97 = snd_soc_alloc_ac97_codec(codec);
if (IS_ERR(wm9712->ac97)) {
ret = PTR_ERR(wm9712->ac97);
dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -675,15 +675,19 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
ret = wm9712_reset(codec, 0);
if (ret < 0)
- goto reset_err;
+ goto err_put_device;
+
+ ret = device_add(&wm9712->ac97->dev);
+ if (ret)
+ goto err_put_device;
/* set alc mux to none */
ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
return 0;
-reset_err:
- snd_soc_free_ac97_codec(wm9712->ac97);
+err_put_device:
+ put_device(&wm9712->ac97->dev);
return ret;
}
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 71b9d5b0734d..68222917b396 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -217,7 +217,7 @@ SOC_SINGLE("3D Depth", AC97_REC_GAIN_MIC, 0, 15, 1),
static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u16 status, rate;
if (WARN_ON(event != SND_SOC_DAPM_PRE_PMD))
@@ -1225,7 +1225,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
int ret = 0, reg;
- wm9713->ac97 = snd_soc_new_ac97_codec(codec);
+ wm9713->ac97 = snd_soc_alloc_ac97_codec(codec);
if (IS_ERR(wm9713->ac97))
return PTR_ERR(wm9713->ac97);
@@ -1234,7 +1234,11 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
wm9713_reset(codec, 0);
ret = wm9713_reset(codec, 1);
if (ret < 0)
- goto reset_err;
+ goto err_put_device;
+
+ ret = device_add(&wm9713->ac97->dev);
+ if (ret)
+ goto err_put_device;
/* unmute the adc - move to kcontrol */
reg = ac97_read(codec, AC97_CD) & 0x7fff;
@@ -1242,8 +1246,8 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
return 0;
-reset_err:
- snd_soc_free_ac97_codec(wm9713->ac97);
+err_put_device:
+ put_device(&wm9713->ac97->dev);
return ret;
}
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 720d6e852986..ff67b334065b 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1373,7 +1373,7 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
struct wm_adsp *dsp = &dsps[w->shift];
struct wm_adsp_alg_region *alg_region;
@@ -1605,7 +1605,7 @@ err:
int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
struct wm_adsp *dsp = &dsps[w->shift];
@@ -1626,7 +1626,7 @@ EXPORT_SYMBOL_GPL(wm_adsp2_early_event);
int wm_adsp2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
struct wm_adsp *dsp = &dsps[w->shift];
struct wm_adsp_alg_region *alg_region;
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 374537d5e179..8366e19657a7 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -500,7 +500,7 @@ SOC_SINGLE_TLV("LINEOUT2 Volume", WM8993_LINE_OUTPUTS_VOLUME, 0, 1, 1,
static int hp_supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -542,7 +542,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
static int hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
unsigned int reg = snd_soc_read(codec, WM8993_ANALOGUE_HP_0);
switch (event) {
@@ -594,7 +594,7 @@ static int hp_event(struct snd_soc_dapm_widget *w,
static int earpiece_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *control, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
u16 reg = snd_soc_read(codec, WM8993_ANTIPOP1) & ~WM8993_HPOUT2_IN_ENA;
switch (event) {
@@ -619,7 +619,7 @@ static int earpiece_event(struct snd_soc_dapm_widget *w,
static int lineout_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *control, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
bool *flag;
@@ -649,7 +649,7 @@ static int lineout_event(struct snd_soc_dapm_widget *w,
static int micbias_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
switch (w->shift) {
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index 8e948c63f3d9..2b81ca418d2a 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -58,13 +58,12 @@ choice
depends on MACH_DAVINCI_DM365_EVM
config SND_DM365_AIC3X_CODEC
- bool "Audio Codec - AIC3101"
+ tristate "Audio Codec - AIC3101"
help
Say Y if you want to add support for AIC3101 audio codec
config SND_DM365_VOICE_CODEC
tristate "Voice Codec - CQ93VC"
- depends on SND_DAVINCI_SOC
select MFD_DAVINCI_VOICECODEC
select SND_DAVINCI_SOC_VCIF
select SND_SOC_CQ0093VC
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 158cb3d1db70..b6bb5947a8a8 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -14,7 +14,6 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/edma.h>
#include <linux/i2c.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
@@ -25,11 +24,6 @@
#include <asm/dma.h>
#include <asm/mach-types.h>
-#include <linux/edma.h>
-
-#include "davinci-pcm.h"
-#include "davinci-i2s.h"
-
struct snd_soc_card_drvdata_davinci {
struct clk *mclk;
unsigned sysclk;
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 30b94d4f9c5d..de3b155a5011 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -364,6 +364,20 @@ static irqreturn_t davinci_mcasp_rx_irq_handler(int irq, void *data)
return IRQ_RETVAL(handled_mask);
}
+static irqreturn_t davinci_mcasp_common_irq_handler(int irq, void *data)
+{
+ struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (mcasp->substreams[SNDRV_PCM_STREAM_PLAYBACK])
+ ret = davinci_mcasp_tx_irq_handler(irq, data);
+
+ if (mcasp->substreams[SNDRV_PCM_STREAM_CAPTURE])
+ ret |= davinci_mcasp_rx_irq_handler(irq, data);
+
+ return ret;
+}
+
static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
@@ -1313,16 +1327,19 @@ static struct davinci_mcasp_pdata *davinci_mcasp_set_pdata_from_of(
pdata->tx_dma_channel = dma_spec.args[0];
- ret = of_property_match_string(np, "dma-names", "rx");
- if (ret < 0)
- goto nodata;
+ /* RX is not valid in DIT mode */
+ if (pdata->op_mode != DAVINCI_MCASP_DIT_MODE) {
+ ret = of_property_match_string(np, "dma-names", "rx");
+ if (ret < 0)
+ goto nodata;
- ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells", ret,
- &dma_spec);
- if (ret < 0)
- goto nodata;
+ ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells", ret,
+ &dma_spec);
+ if (ret < 0)
+ goto nodata;
- pdata->rx_dma_channel = dma_spec.args[0];
+ pdata->rx_dma_channel = dma_spec.args[0];
+ }
ret = of_property_read_u32(np, "tx-num-evt", &val);
if (ret >= 0)
@@ -1441,6 +1458,23 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
mcasp->dev = &pdev->dev;
+ irq = platform_get_irq_byname(pdev, "common");
+ if (irq >= 0) {
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common\n",
+ dev_name(&pdev->dev));
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ davinci_mcasp_common_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ irq_name, mcasp);
+ if (ret) {
+ dev_err(&pdev->dev, "common IRQ request failed\n");
+ goto err;
+ }
+
+ mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK] = XUNDRN;
+ mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE] = ROVRN;
+ }
+
irq = platform_get_irq_byname(pdev, "rx");
if (irq >= 0) {
irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n",
@@ -1501,19 +1535,34 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
else
dma_data->filter_data = &dma_params->channel;
- dma_params = &mcasp->dma_params[SNDRV_PCM_STREAM_CAPTURE];
- dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_CAPTURE];
- dma_params->asp_chan_q = pdata->asp_chan_q;
- dma_params->ram_chan_q = pdata->ram_chan_q;
- dma_params->sram_pool = pdata->sram_pool;
- dma_params->sram_size = pdata->sram_size_capture;
- if (dat)
- dma_params->dma_addr = dat->start;
- else
- dma_params->dma_addr = mem->start + pdata->rx_dma_offset;
-
- /* Unconditional dmaengine stuff */
- dma_data->addr = dma_params->dma_addr;
+ /* RX is not valid in DIT mode */
+ if (mcasp->op_mode != DAVINCI_MCASP_DIT_MODE) {
+ dma_params = &mcasp->dma_params[SNDRV_PCM_STREAM_CAPTURE];
+ dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_CAPTURE];
+ dma_params->asp_chan_q = pdata->asp_chan_q;
+ dma_params->ram_chan_q = pdata->ram_chan_q;
+ dma_params->sram_pool = pdata->sram_pool;
+ dma_params->sram_size = pdata->sram_size_capture;
+ if (dat)
+ dma_params->dma_addr = dat->start;
+ else
+ dma_params->dma_addr = mem->start + pdata->rx_dma_offset;
+
+ /* Unconditional dmaengine stuff */
+ dma_data->addr = dma_params->dma_addr;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res)
+ dma_params->channel = res->start;
+ else
+ dma_params->channel = pdata->rx_dma_channel;
+
+ /* dmaengine filter data for DT and non-DT boot */
+ if (pdev->dev.of_node)
+ dma_data->filter_data = "rx";
+ else
+ dma_data->filter_data = &dma_params->channel;
+ }
if (mcasp->version < MCASP_VERSION_3) {
mcasp->fifo_base = DAVINCI_MCASP_V2_AFIFO_BASE;
@@ -1523,18 +1572,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
mcasp->fifo_base = DAVINCI_MCASP_V3_AFIFO_BASE;
}
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (res)
- dma_params->channel = res->start;
- else
- dma_params->channel = pdata->rx_dma_channel;
-
- /* dmaengine filter data for DT and non-DT boot */
- if (pdev->dev.of_node)
- dma_data->filter_data = "rx";
- else
- dma_data->filter_data = &dma_params->channel;
-
dev_set_drvdata(&pdev->dev, mcasp);
mcasp_reparent_fck(pdev);
diff --git a/sound/soc/dwc/Kconfig b/sound/soc/dwc/Kconfig
index e334900cf0b8..d50e08517dce 100644
--- a/sound/soc/dwc/Kconfig
+++ b/sound/soc/dwc/Kconfig
@@ -1,6 +1,7 @@
config SND_DESIGNWARE_I2S
tristate "Synopsys I2S Device Driver"
depends on CLKDEV_LOOKUP
+ select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M if you want to add support for I2S driver for
Synopsys desigwnware I2S device. The device supports upto
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index 8d18bbda661b..a3e97b46b64e 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -22,6 +22,7 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
/* common register for all channel */
#define IER 0x000
@@ -54,9 +55,39 @@
#define I2S_COMP_VERSION 0x01F8
#define I2S_COMP_TYPE 0x01FC
+/*
+ * Component parameter register fields - define the I2S block's
+ * configuration.
+ */
+#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25)
+#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22)
+#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19)
+#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16)
+#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9)
+#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7)
+#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6)
+#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5)
+#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4)
+#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2)
+#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0)
+
+#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10)
+#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7)
+#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3)
+#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0)
+
+/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */
+#define COMP_MAX_WORDSIZE (1 << 3)
+#define COMP_MAX_DATA_WIDTH (1 << 2)
+
#define MAX_CHANNEL_NUM 8
#define MIN_CHANNEL_NUM 2
+union dw_i2s_snd_dma_data {
+ struct i2s_dma_data pd;
+ struct snd_dmaengine_dai_dma_data dt;
+};
+
struct dw_i2s_dev {
void __iomem *i2s_base;
struct clk *clk;
@@ -65,8 +96,8 @@ struct dw_i2s_dev {
struct device *dev;
/* data related to DMA transfers b/w i2s and DMAC */
- struct i2s_dma_data play_dma_data;
- struct i2s_dma_data capture_dma_data;
+ union dw_i2s_snd_dma_data play_dma_data;
+ union dw_i2s_snd_dma_data capture_dma_data;
struct i2s_clk_config_data config;
int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
};
@@ -153,7 +184,7 @@ static int dw_i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
- struct i2s_dma_data *dma_data = NULL;
+ union dw_i2s_snd_dma_data *dma_data = NULL;
if (!(dev->capability & DWC_I2S_RECORD) &&
(substream->stream == SNDRV_PCM_STREAM_CAPTURE))
@@ -242,13 +273,21 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
config->sample_rate = params_rate(params);
- if (!dev->i2s_clk_cfg)
- return -EINVAL;
+ if (dev->i2s_clk_cfg) {
+ ret = dev->i2s_clk_cfg(config);
+ if (ret < 0) {
+ dev_err(dev->dev, "runtime audio clk config fail\n");
+ return ret;
+ }
+ } else {
+ u32 bitclk = config->sample_rate * config->data_width * 2;
- ret = dev->i2s_clk_cfg(config);
- if (ret < 0) {
- dev_err(dev->dev, "runtime audio clk config fail\n");
- return ret;
+ ret = clk_set_rate(dev->clk, bitclk);
+ if (ret) {
+ dev_err(dev->dev, "Can't set I2S clock rate: %d\n",
+ ret);
+ return ret;
+ }
}
return 0;
@@ -335,20 +374,162 @@ static int dw_i2s_resume(struct snd_soc_dai *dai)
#define dw_i2s_resume NULL
#endif
+/*
+ * The following tables allow a direct lookup of various parameters
+ * defined in the I2S block's configuration in terms of sound system
+ * parameters. Each table is sized to the number of entries possible
+ * according to the number of configuration bits describing an I2S
+ * block parameter.
+ */
+
+/* Maximum bit resolution of a channel - not uniformly spaced */
+static const u32 fifo_width[COMP_MAX_WORDSIZE] = {
+ 12, 16, 20, 24, 32, 0, 0, 0
+};
+
+/* Width of (DMA) bus */
+static const u32 bus_widths[COMP_MAX_DATA_WIDTH] = {
+ DMA_SLAVE_BUSWIDTH_1_BYTE,
+ DMA_SLAVE_BUSWIDTH_2_BYTES,
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_SLAVE_BUSWIDTH_UNDEFINED
+};
+
+/* PCM format to support channel resolution */
+static const u32 formats[COMP_MAX_WORDSIZE] = {
+ SNDRV_PCM_FMTBIT_S16_LE,
+ SNDRV_PCM_FMTBIT_S16_LE,
+ SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_FMTBIT_S32_LE,
+ 0,
+ 0,
+ 0
+};
+
+static int dw_configure_dai(struct dw_i2s_dev *dev,
+ struct snd_soc_dai_driver *dw_i2s_dai,
+ unsigned int rates)
+{
+ /*
+ * Read component parameter registers to extract
+ * the I2S block's configuration.
+ */
+ u32 comp1 = i2s_read_reg(dev->i2s_base, I2S_COMP_PARAM_1);
+ u32 comp2 = i2s_read_reg(dev->i2s_base, I2S_COMP_PARAM_2);
+ u32 idx;
+
+ if (COMP1_TX_ENABLED(comp1)) {
+ dev_dbg(dev->dev, " designware: play supported\n");
+ idx = COMP1_TX_WORDSIZE_0(comp1);
+ if (WARN_ON(idx >= ARRAY_SIZE(formats)))
+ return -EINVAL;
+ dw_i2s_dai->playback.channels_min = MIN_CHANNEL_NUM;
+ dw_i2s_dai->playback.channels_max =
+ 1 << (COMP1_TX_CHANNELS(comp1) + 1);
+ dw_i2s_dai->playback.formats = formats[idx];
+ dw_i2s_dai->playback.rates = rates;
+ }
+
+ if (COMP1_RX_ENABLED(comp1)) {
+ dev_dbg(dev->dev, "designware: record supported\n");
+ idx = COMP2_RX_WORDSIZE_0(comp2);
+ if (WARN_ON(idx >= ARRAY_SIZE(formats)))
+ return -EINVAL;
+ dw_i2s_dai->capture.channels_min = MIN_CHANNEL_NUM;
+ dw_i2s_dai->capture.channels_max =
+ 1 << (COMP1_RX_CHANNELS(comp1) + 1);
+ dw_i2s_dai->capture.formats = formats[idx];
+ dw_i2s_dai->capture.rates = rates;
+ }
+
+ return 0;
+}
+
+static int dw_configure_dai_by_pd(struct dw_i2s_dev *dev,
+ struct snd_soc_dai_driver *dw_i2s_dai,
+ struct resource *res,
+ const struct i2s_platform_data *pdata)
+{
+ u32 comp1 = i2s_read_reg(dev->i2s_base, I2S_COMP_PARAM_1);
+ u32 idx = COMP1_APB_DATA_WIDTH(comp1);
+ int ret;
+
+ if (WARN_ON(idx >= ARRAY_SIZE(bus_widths)))
+ return -EINVAL;
+
+ ret = dw_configure_dai(dev, dw_i2s_dai, pdata->snd_rates);
+ if (ret < 0)
+ return ret;
+
+ /* Set DMA slaves info */
+ dev->play_dma_data.pd.data = pdata->play_dma_data;
+ dev->capture_dma_data.pd.data = pdata->capture_dma_data;
+ dev->play_dma_data.pd.addr = res->start + I2S_TXDMA;
+ dev->capture_dma_data.pd.addr = res->start + I2S_RXDMA;
+ dev->play_dma_data.pd.max_burst = 16;
+ dev->capture_dma_data.pd.max_burst = 16;
+ dev->play_dma_data.pd.addr_width = bus_widths[idx];
+ dev->capture_dma_data.pd.addr_width = bus_widths[idx];
+ dev->play_dma_data.pd.filter = pdata->filter;
+ dev->capture_dma_data.pd.filter = pdata->filter;
+
+ return 0;
+}
+
+static int dw_configure_dai_by_dt(struct dw_i2s_dev *dev,
+ struct snd_soc_dai_driver *dw_i2s_dai,
+ struct resource *res)
+{
+ u32 comp1 = i2s_read_reg(dev->i2s_base, I2S_COMP_PARAM_1);
+ u32 comp2 = i2s_read_reg(dev->i2s_base, I2S_COMP_PARAM_2);
+ u32 fifo_depth = 1 << (1 + COMP1_FIFO_DEPTH_GLOBAL(comp1));
+ u32 idx = COMP1_APB_DATA_WIDTH(comp1);
+ u32 idx2;
+ int ret;
+
+ if (WARN_ON(idx >= ARRAY_SIZE(bus_widths)))
+ return -EINVAL;
+
+ ret = dw_configure_dai(dev, dw_i2s_dai, SNDRV_PCM_RATE_8000_192000);
+ if (ret < 0)
+ return ret;
+
+ if (COMP1_TX_ENABLED(comp1)) {
+ idx2 = COMP1_TX_WORDSIZE_0(comp1);
+
+ dev->capability |= DWC_I2S_PLAY;
+ dev->play_dma_data.dt.addr = res->start + I2S_TXDMA;
+ dev->play_dma_data.dt.addr_width = bus_widths[idx];
+ dev->play_dma_data.dt.chan_name = "TX";
+ dev->play_dma_data.dt.fifo_size = fifo_depth *
+ (fifo_width[idx2]) >> 8;
+ dev->play_dma_data.dt.maxburst = 16;
+ }
+ if (COMP1_RX_ENABLED(comp1)) {
+ idx2 = COMP2_RX_WORDSIZE_0(comp2);
+
+ dev->capability |= DWC_I2S_RECORD;
+ dev->capture_dma_data.dt.addr = res->start + I2S_RXDMA;
+ dev->capture_dma_data.dt.addr_width = bus_widths[idx];
+ dev->capture_dma_data.dt.chan_name = "RX";
+ dev->capture_dma_data.dt.fifo_size = fifo_depth *
+ (fifo_width[idx2] >> 8);
+ dev->capture_dma_data.dt.maxburst = 16;
+ }
+
+ return 0;
+
+}
+
static int dw_i2s_probe(struct platform_device *pdev)
{
const struct i2s_platform_data *pdata = pdev->dev.platform_data;
struct dw_i2s_dev *dev;
struct resource *res;
int ret;
- unsigned int cap;
struct snd_soc_dai_driver *dw_i2s_dai;
- if (!pdata) {
- dev_err(&pdev->dev, "Invalid platform data\n");
- return -EINVAL;
- }
-
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_warn(&pdev->dev, "kzalloc fail\n");
@@ -356,83 +537,67 @@ static int dw_i2s_probe(struct platform_device *pdev)
}
dw_i2s_dai = devm_kzalloc(&pdev->dev, sizeof(*dw_i2s_dai), GFP_KERNEL);
- if (!dw_i2s_dai) {
- dev_err(&pdev->dev, "mem allocation failed for dai driver\n");
+ if (!dw_i2s_dai)
return -ENOMEM;
- }
dw_i2s_dai->ops = &dw_i2s_dai_ops;
dw_i2s_dai->suspend = dw_i2s_suspend;
dw_i2s_dai->resume = dw_i2s_resume;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no i2s resource defined\n");
- return -ENODEV;
- }
-
dev->i2s_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dev->i2s_base)) {
- dev_err(&pdev->dev, "ioremap fail for i2s_region\n");
+ if (IS_ERR(dev->i2s_base))
return PTR_ERR(dev->i2s_base);
- }
- cap = pdata->cap;
- dev->capability = cap;
- dev->i2s_clk_cfg = pdata->i2s_clk_cfg;
+ dev->dev = &pdev->dev;
+ if (pdata) {
+ ret = dw_configure_dai_by_pd(dev, dw_i2s_dai, res, pdata);
+ if (ret < 0)
+ return ret;
+
+ dev->capability = pdata->cap;
+ dev->i2s_clk_cfg = pdata->i2s_clk_cfg;
+ if (!dev->i2s_clk_cfg) {
+ dev_err(&pdev->dev, "no clock configure method\n");
+ return -ENODEV;
+ }
- /* Set DMA slaves info */
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ } else {
+ ret = dw_configure_dai_by_dt(dev, dw_i2s_dai, res);
+ if (ret < 0)
+ return ret;
- dev->play_dma_data.data = pdata->play_dma_data;
- dev->capture_dma_data.data = pdata->capture_dma_data;
- dev->play_dma_data.addr = res->start + I2S_TXDMA;
- dev->capture_dma_data.addr = res->start + I2S_RXDMA;
- dev->play_dma_data.max_burst = 16;
- dev->capture_dma_data.max_burst = 16;
- dev->play_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- dev->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- dev->play_dma_data.filter = pdata->filter;
- dev->capture_dma_data.filter = pdata->filter;
-
- dev->clk = clk_get(&pdev->dev, NULL);
+ dev->clk = devm_clk_get(&pdev->dev, "i2sclk");
+ }
if (IS_ERR(dev->clk))
- return PTR_ERR(dev->clk);
+ return PTR_ERR(dev->clk);
- ret = clk_enable(dev->clk);
+ ret = clk_prepare_enable(dev->clk);
if (ret < 0)
- goto err_clk_put;
-
- if (cap & DWC_I2S_PLAY) {
- dev_dbg(&pdev->dev, " designware: play supported\n");
- dw_i2s_dai->playback.channels_min = MIN_CHANNEL_NUM;
- dw_i2s_dai->playback.channels_max = pdata->channel;
- dw_i2s_dai->playback.formats = pdata->snd_fmts;
- dw_i2s_dai->playback.rates = pdata->snd_rates;
- }
-
- if (cap & DWC_I2S_RECORD) {
- dev_dbg(&pdev->dev, "designware: record supported\n");
- dw_i2s_dai->capture.channels_min = MIN_CHANNEL_NUM;
- dw_i2s_dai->capture.channels_max = pdata->channel;
- dw_i2s_dai->capture.formats = pdata->snd_fmts;
- dw_i2s_dai->capture.rates = pdata->snd_rates;
- }
+ return ret;
- dev->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, dev);
- ret = snd_soc_register_component(&pdev->dev, &dw_i2s_component,
+ ret = devm_snd_soc_register_component(&pdev->dev, &dw_i2s_component,
dw_i2s_dai, 1);
if (ret != 0) {
dev_err(&pdev->dev, "not able to register dai\n");
goto err_clk_disable;
}
+ if (!pdata) {
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not register PCM: %d\n", ret);
+ goto err_clk_disable;
+ }
+ }
+
return 0;
err_clk_disable:
- clk_disable(dev->clk);
-err_clk_put:
- clk_put(dev->clk);
+ clk_disable_unprepare(dev->clk);
return ret;
}
@@ -440,18 +605,26 @@ static int dw_i2s_remove(struct platform_device *pdev)
{
struct dw_i2s_dev *dev = dev_get_drvdata(&pdev->dev);
- snd_soc_unregister_component(&pdev->dev);
-
- clk_put(dev->clk);
+ clk_disable_unprepare(dev->clk);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id dw_i2s_of_match[] = {
+ { .compatible = "snps,designware-i2s", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dw_i2s_of_match);
+#endif
+
static struct platform_driver dw_i2s_driver = {
.probe = dw_i2s_probe,
.remove = dw_i2s_remove,
.driver = {
.name = "designware-i2s",
+ .of_match_table = of_match_ptr(dw_i2s_of_match),
},
};
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 9ce70fc67b09..e1aa3834b101 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -42,25 +42,6 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret;
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
- /* fsl_ssi lacks the set_fmt ops. */
- if (ret && ret != -ENOTSUPP) {
- dev_err(cpu_dai->dev,
- "Failed to set the cpu dai format.\n");
- return ret;
- }
-
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
- if (ret) {
- dev_err(cpu_dai->dev,
- "Failed to set the codec format.\n");
- return ret;
- }
-
ret = snd_soc_dai_set_sysclk(codec_dai, 0,
CODEC_CLOCK, SND_SOC_CLOCK_OUT);
if (ret) {
@@ -69,7 +50,7 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0);
+ snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 0);
ret = snd_soc_dai_set_sysclk(cpu_dai, IMX_SSP_SYS_CLK, 0,
SND_SOC_CLOCK_IN);
@@ -91,6 +72,8 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = {
.name = "tlv320aic23",
.stream_name = "TLV320AIC23",
.codec_dai_name = "tlv320aic23-hifi",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
.ops = &eukrea_tlv320_snd_ops,
};
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 026a80117540..c068494bae30 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -818,7 +818,6 @@ static int fsl_asrc_probe(struct platform_device *pdev)
return -ENOMEM;
asrc_priv->pdev = pdev;
- strncpy(asrc_priv->name, np->name, sizeof(asrc_priv->name) - 1);
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -837,12 +836,12 @@ static int fsl_asrc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
+ dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
return irq;
}
ret = devm_request_irq(&pdev->dev, irq, fsl_asrc_isr, 0,
- asrc_priv->name, asrc_priv);
+ dev_name(&pdev->dev), asrc_priv);
if (ret) {
dev_err(&pdev->dev, "failed to claim irq %u: %d\n", irq, ret);
return ret;
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index a3f211f53c23..4aed63c4b431 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -433,7 +433,6 @@ struct fsl_asrc_pair {
* @channel_avail: non-occupied channel numbers
* @asrc_rate: default sample rate for ASoC Back-Ends
* @asrc_width: default sample width for ASoC Back-Ends
- * @name: driver name
*/
struct fsl_asrc {
struct snd_dmaengine_dai_dma_data dma_params_rx;
@@ -452,8 +451,6 @@ struct fsl_asrc {
int asrc_rate;
int asrc_width;
-
- char name[32];
};
extern struct snd_soc_platform_driver fsl_asrc_platform;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 1c08ab13637c..5c7597191e3f 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -774,7 +774,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
+ dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
return irq;
}
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
index 91a550f4a10d..5e793bbb6b02 100644
--- a/sound/soc/fsl/fsl_esai.h
+++ b/sound/soc/fsl/fsl_esai.h
@@ -302,7 +302,7 @@
#define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
#define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
#define ESAI_xCCR_xDC_SHIFT 9
-#define ESAI_xCCR_xDC_WIDTH 4
+#define ESAI_xCCR_xDC_WIDTH 5
#define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
#define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
#define ESAI_xCCR_xPSR_SHIFT 8
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 032d2d33619c..ec79c3d5e65e 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -612,7 +612,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
+ dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
return irq;
}
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index af0429421fc8..75870c0ea2c9 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -90,7 +90,6 @@ struct spdif_mixer_control {
* @sysclk: system clock for rx clock rate measurement
* @dma_params_tx: DMA parameters for transmit channel
* @dma_params_rx: DMA parameters for receive channel
- * @name: driver name
*/
struct fsl_spdif_priv {
struct spdif_mixer_control fsl_spdif_control;
@@ -109,12 +108,8 @@ struct fsl_spdif_priv {
struct clk *sysclk;
struct snd_dmaengine_dai_dma_data dma_params_tx;
struct snd_dmaengine_dai_dma_data dma_params_rx;
-
- /* The name space will be allocated dynamically */
- char name[0];
};
-
/* DPLL locked and lock loss interrupt handler */
static void spdif_irq_dpll_lock(struct fsl_spdif_priv *spdif_priv)
{
@@ -1169,19 +1164,15 @@ static int fsl_spdif_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
- spdif_priv = devm_kzalloc(&pdev->dev,
- sizeof(struct fsl_spdif_priv) + strlen(np->name) + 1,
- GFP_KERNEL);
+ spdif_priv = devm_kzalloc(&pdev->dev, sizeof(*spdif_priv), GFP_KERNEL);
if (!spdif_priv)
return -ENOMEM;
- strcpy(spdif_priv->name, np->name);
-
spdif_priv->pdev = pdev;
/* Initialize this copy of the CPU DAI driver structure */
memcpy(&spdif_priv->cpu_dai_drv, &fsl_spdif_dai, sizeof(fsl_spdif_dai));
- spdif_priv->cpu_dai_drv.name = spdif_priv->name;
+ spdif_priv->cpu_dai_drv.name = dev_name(&pdev->dev);
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1198,12 +1189,12 @@ static int fsl_spdif_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
+ dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
return irq;
}
ret = devm_request_irq(&pdev->dev, irq, spdif_isr, 0,
- spdif_priv->name, spdif_priv);
+ dev_name(&pdev->dev), spdif_priv);
if (ret) {
dev_err(&pdev->dev, "could not claim irq %u\n", irq);
return ret;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index a65f17d57ffb..2595611e8a6d 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -160,7 +160,7 @@ struct fsl_ssi_soc_data {
*/
struct fsl_ssi_private {
struct regmap *regs;
- unsigned int irq;
+ int irq;
struct snd_soc_dai_driver cpu_dai_drv;
unsigned int dai_fmt;
@@ -992,8 +992,8 @@ static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN,
CCSR_SSI_SCR_SSIEN);
- regmap_write(regs, CCSR_SSI_STMSK, tx_mask);
- regmap_write(regs, CCSR_SSI_SRMSK, rx_mask);
+ regmap_write(regs, CCSR_SSI_STMSK, ~tx_mask);
+ regmap_write(regs, CCSR_SSI_SRMSK, ~rx_mask);
regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, val);
@@ -1363,8 +1363,8 @@ static int fsl_ssi_probe(struct platform_device *pdev)
ssi_private->irq = platform_get_irq(pdev, 0);
if (!ssi_private->irq) {
- dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
- return -ENXIO;
+ dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
+ return ssi_private->irq;
}
/* Are the RX and the TX clocks locked? */
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index 2ac7755da876..b9e42b503a37 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -86,33 +86,6 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
}
EXPORT_SYMBOL(fsl_asoc_get_dma_channel);
-/**
- * fsl_asoc_xlate_tdm_slot_mask - generate TDM slot TX/RX mask.
- *
- * @slots: Number of slots in use.
- * @tx_mask: bitmask representing active TX slots.
- * @rx_mask: bitmask representing active RX slots.
- *
- * This function used to generate the TDM slot TX/RX mask. And the TX/RX
- * mask will use a 0 bit for an active slot as default, and the default
- * active bits are at the LSB of the mask value.
- */
-int fsl_asoc_xlate_tdm_slot_mask(unsigned int slots,
- unsigned int *tx_mask,
- unsigned int *rx_mask)
-{
- if (!slots)
- return -EINVAL;
-
- if (tx_mask)
- *tx_mask = ~((1 << slots) - 1);
- if (rx_mask)
- *rx_mask = ~((1 << slots) - 1);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_asoc_xlate_tdm_slot_mask);
-
MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
MODULE_DESCRIPTION("Freescale ASoC utility code");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/fsl/fsl_utils.h b/sound/soc/fsl/fsl_utils.h
index df535db40313..1687b66ef18e 100644
--- a/sound/soc/fsl/fsl_utils.h
+++ b/sound/soc/fsl/fsl_utils.h
@@ -22,7 +22,4 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np, const char *name,
struct snd_soc_dai_link *dai,
unsigned int *dma_channel_id,
unsigned int *dma_id);
-int fsl_asoc_xlate_tdm_slot_mask(unsigned int slots,
- unsigned int *tx_mask,
- unsigned int *rx_mask);
#endif /* _FSL_UTILS_H */
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index 6bf5bce01a92..9e6493d4e7ff 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -37,8 +37,7 @@ static int imx_mc13783_hifi_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
- ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xfffffffc, 0xfffffffc,
- 4, 16);
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 4, 16);
if (ret)
return ret;
@@ -46,7 +45,7 @@ static int imx_mc13783_hifi_hw_params(struct snd_pcm_substream *substream,
if (ret)
return ret;
- ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x0, 0xfffffffc, 2, 16);
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 16);
if (ret)
return ret;
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
index e94704f1b9ee..33da26a12457 100644
--- a/sound/soc/fsl/imx-spdif.c
+++ b/sound/soc/fsl/imx-spdif.c
@@ -60,6 +60,7 @@ static int imx_spdif_audio_probe(struct platform_device *pdev)
data->card.dev = &pdev->dev;
data->card.dai_link = &data->dai;
data->card.num_links = 1;
+ data->card.owner = THIS_MODULE;
ret = snd_soc_of_parse_card_name(&data->card, "model");
if (ret)
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index fa801e17c51e..461ce27b884f 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -74,8 +74,8 @@ static int imx_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
sccr |= SSI_STCCR_DC(slots - 1);
writel(sccr, ssi->base + SSI_SRCCR);
- writel(tx_mask, ssi->base + SSI_STMSK);
- writel(rx_mask, ssi->base + SSI_SRMSK);
+ writel(~tx_mask, ssi->base + SSI_STMSK);
+ writel(~rx_mask, ssi->base + SSI_SRMSK);
return 0;
}
@@ -340,7 +340,6 @@ static const struct snd_soc_dai_ops imx_ssi_pcm_dai_ops = {
.set_fmt = imx_ssi_set_dai_fmt,
.set_clkdiv = imx_ssi_set_dai_clkdiv,
.set_sysclk = imx_ssi_set_dai_sysclk,
- .xlate_tdm_slot_mask = fsl_asoc_xlate_tdm_slot_mask,
.set_tdm_slot = imx_ssi_set_dai_tdm_slot,
.trigger = imx_ssi_trigger,
};
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 4caacb05a623..cd146d4fa805 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
if (ret)
goto clk_fail;
data->card.num_links = 1;
+ data->card.owner = THIS_MODULE;
data->card.dai_link = &data->dai;
data->card.dapm_widgets = imx_wm8962_dapm_widgets;
data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
diff --git a/sound/soc/fsl/mx27vis-aic32x4.c b/sound/soc/fsl/mx27vis-aic32x4.c
index b1ced7b8d80c..198eeb3f3f7a 100644
--- a/sound/soc/fsl/mx27vis-aic32x4.c
+++ b/sound/soc/fsl/mx27vis-aic32x4.c
@@ -55,16 +55,6 @@ static int mx27vis_aic32x4_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret;
- u32 dai_format;
-
- dai_format = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM;
-
- /* set codec DAI configuration */
- snd_soc_dai_set_fmt(codec_dai, dai_format);
-
- /* set cpu DAI configuration */
- snd_soc_dai_set_fmt(cpu_dai, dai_format);
ret = snd_soc_dai_set_sysclk(codec_dai, 0,
25000000, SND_SOC_CLOCK_OUT);
@@ -164,6 +154,8 @@ static struct snd_soc_dai_link mx27vis_aic32x4_dai = {
.platform_name = "imx-ssi.0",
.codec_name = "tlv320aic32x4.0-0018",
.cpu_dai_name = "imx-ssi.0",
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
.ops = &mx27vis_aic32x4_snd_ops,
};
diff --git a/sound/soc/fsl/wm1133-ev1.c b/sound/soc/fsl/wm1133-ev1.c
index 804749a6c61e..a958937ab405 100644
--- a/sound/soc/fsl/wm1133-ev1.c
+++ b/sound/soc/fsl/wm1133-ev1.c
@@ -87,7 +87,6 @@ static int wm1133_ev1_hw_params(struct snd_pcm_substream *substream,
snd_pcm_format_t format = params_format(params);
unsigned int rate = params_rate(params);
unsigned int channels = params_channels(params);
- u32 dai_format;
/* find the correct audio parameters */
for (i = 0; i < ARRAY_SIZE(wm8350_audio); i++) {
@@ -104,22 +103,13 @@ static int wm1133_ev1_hw_params(struct snd_pcm_substream *substream,
/* codec FLL input is 14.75 MHz from MCLK */
snd_soc_dai_set_pll(codec_dai, 0, 0, 14750000, wm8350_audio[i].sysclk);
- dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM;
-
- /* set codec DAI configuration */
- snd_soc_dai_set_fmt(codec_dai, dai_format);
-
- /* set cpu DAI configuration */
- snd_soc_dai_set_fmt(cpu_dai, dai_format);
-
/* TODO: The SSI driver should figure this out for us */
switch (channels) {
case 2:
- snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0);
+ snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 0);
break;
case 1:
- snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffe, 0xffffffe, 1, 0);
+ snd_soc_dai_set_tdm_slot(cpu_dai, 0x1, 0x1, 1, 0);
break;
default:
return -EINVAL;
@@ -244,6 +234,8 @@ static struct snd_soc_dai_link wm1133_ev1_dai = {
.init = wm1133_ev1_init,
.ops = &wm1133_ev1_ops,
.symmetric_rates = 1,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
};
static struct snd_soc_card wm1133_ev1 = {
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index fb9240fdc9b7..f7c6734bd5da 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -39,6 +39,37 @@ struct simple_card_data {
#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + i)
#define simple_priv_to_props(priv, i) ((priv)->dai_props + i)
+static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_dai_props *dai_props =
+ &priv->dai_props[rtd - rtd->card->rtd];
+ int ret;
+
+ ret = clk_prepare_enable(dai_props->cpu_dai.clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dai_props->codec_dai.clk);
+ if (ret)
+ clk_disable_unprepare(dai_props->cpu_dai.clk);
+
+ return ret;
+}
+
+static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_dai_props *dai_props =
+ &priv->dai_props[rtd - rtd->card->rtd];
+
+ clk_disable_unprepare(dai_props->cpu_dai.clk);
+
+ clk_disable_unprepare(dai_props->codec_dai.clk);
+}
+
static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -58,6 +89,8 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
}
static struct snd_soc_ops asoc_simple_card_ops = {
+ .startup = asoc_simple_card_startup,
+ .shutdown = asoc_simple_card_shutdown,
.hw_params = asoc_simple_card_hw_params,
};
@@ -219,6 +252,7 @@ asoc_simple_card_sub_parse_of(struct device_node *np,
}
dai->sysclk = clk_get_rate(clk);
+ dai->clk = clk;
} else if (!of_property_read_u32(np, "system-clock-frequency", &val)) {
dai->sysclk = val;
} else {
@@ -452,9 +486,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,
}
/* Decrease the reference count of the device nodes */
-static int asoc_simple_card_unref(struct platform_device *pdev)
+static int asoc_simple_card_unref(struct snd_soc_card *card)
{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
struct snd_soc_dai_link *dai_link;
int num_links;
@@ -556,7 +589,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
return ret;
err:
- asoc_simple_card_unref(pdev);
+ asoc_simple_card_unref(&priv->snd_card);
return ret;
}
@@ -572,7 +605,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
&simple_card_mic_jack_gpio);
- return asoc_simple_card_unref(pdev);
+ return asoc_simple_card_unref(card);
}
static const struct of_device_id asoc_simple_of_match[] = {
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index f86de1211b96..ee03dbdda235 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -46,7 +46,7 @@ config SND_SOC_INTEL_BAYTRAIL
config SND_SOC_INTEL_HASWELL_MACH
tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
- depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C && \\
+ depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C && \
I2C_DESIGNWARE_PLATFORM
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
@@ -76,7 +76,7 @@ config SND_SOC_INTEL_BYT_MAX98090_MACH
config SND_SOC_INTEL_BROADWELL_MACH
tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
- depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC && \\
+ depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC && \
I2C_DESIGNWARE_PLATFORM
select SND_SOC_INTEL_HASWELL
select SND_COMPRESS_OFFLOAD
@@ -110,3 +110,14 @@ config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
platforms with RT5672 audio codec.
Say Y if you have such a device
If unsure select "N".
+
+config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
+ tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645 codec"
+ depends on X86_INTEL_LPSS
+ select SND_SOC_RT5645
+ select SND_SST_MFLD_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with RT5645 audio codec.
+ If unsure select "N".
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index e928ec385300..a8e53c45c6b6 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -28,6 +28,7 @@ snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
snd-soc-sst-broadwell-objs := broadwell.o
snd-soc-sst-bytcr-dpcm-rt5640-objs := bytcr_dpcm_rt5640.o
snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
+snd-soc-sst-cht-bsw-rt5645-objs := cht_bsw_rt5645.o
obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-dpcm-rt5640.o
obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o
+obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH) += snd-soc-sst-cht-bsw-rt5645.o
# DSP driver
obj-$(CONFIG_SND_SST_IPC) += sst/
diff --git a/sound/soc/intel/broadwell.c b/sound/soc/intel/broadwell.c
index 7cf95d5d5d80..9cf7d01479ad 100644
--- a/sound/soc/intel/broadwell.c
+++ b/sound/soc/intel/broadwell.c
@@ -140,8 +140,6 @@ static struct snd_soc_ops broadwell_rt286_ops = {
static int broadwell_rtd_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
struct sst_pdata *pdata = dev_get_platdata(rtd->platform->dev);
struct sst_hsw *broadwell = pdata->dsp;
int ret;
@@ -155,14 +153,6 @@ static int broadwell_rtd_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- /* always connected - check HP for jack detect */
- snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
- snd_soc_dapm_enable_pin(dapm, "Speaker");
- snd_soc_dapm_enable_pin(dapm, "Mic Jack");
- snd_soc_dapm_enable_pin(dapm, "Line Jack");
- snd_soc_dapm_enable_pin(dapm, "DMIC1");
- snd_soc_dapm_enable_pin(dapm, "DMIC2");
-
return 0;
}
diff --git a/sound/soc/intel/byt-rt5640.c b/sound/soc/intel/byt-rt5640.c
index 0cba7830c5e9..354eaad886e1 100644
--- a/sound/soc/intel/byt-rt5640.c
+++ b/sound/soc/intel/byt-rt5640.c
@@ -132,7 +132,6 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
struct snd_soc_codec *codec = runtime->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_card *card = runtime->card;
const struct snd_soc_dapm_route *custom_map;
int num_routes;
@@ -161,7 +160,7 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
num_routes = ARRAY_SIZE(byt_rt5640_intmic_dmic1_map);
}
- ret = snd_soc_dapm_add_routes(dapm, custom_map, num_routes);
+ ret = snd_soc_dapm_add_routes(&card->dapm, custom_map, num_routes);
if (ret)
return ret;
@@ -171,13 +170,8 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
return ret;
}
- snd_soc_dapm_ignore_suspend(dapm, "HPOL");
- snd_soc_dapm_ignore_suspend(dapm, "HPOR");
-
- snd_soc_dapm_ignore_suspend(dapm, "SPOLP");
- snd_soc_dapm_ignore_suspend(dapm, "SPOLN");
- snd_soc_dapm_ignore_suspend(dapm, "SPORP");
- snd_soc_dapm_ignore_suspend(dapm, "SPORN");
+ snd_soc_dapm_ignore_suspend(&card->dapm, "Headphone");
+ snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
return ret;
}
diff --git a/sound/soc/intel/bytcr_dpcm_rt5640.c b/sound/soc/intel/bytcr_dpcm_rt5640.c
index eef0c56ec32e..59308629043e 100644
--- a/sound/soc/intel/bytcr_dpcm_rt5640.c
+++ b/sound/soc/intel/bytcr_dpcm_rt5640.c
@@ -215,7 +215,6 @@ static int snd_byt_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_byt_mc_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "bytt100_rt5640",
.pm = &snd_soc_pm_ops,
},
diff --git a/sound/soc/intel/cht_bsw_rt5645.c b/sound/soc/intel/cht_bsw_rt5645.c
new file mode 100644
index 000000000000..bd29617a9ab9
--- /dev/null
+++ b/sound/soc/intel/cht_bsw_rt5645.c
@@ -0,0 +1,326 @@
+/*
+ * cht-bsw-rt5645.c - ASoc Machine driver for Intel Cherryview-based platforms
+ * Cherrytrail and Braswell, with RT5645 codec.
+ *
+ * Copyright (C) 2015 Intel Corp
+ * Author: Fang, Yang A <yang.a.fang@intel.com>
+ * N,Harshapriya <harshapriya.n@intel.com>
+ * This file is modified from cht_bsw_rt5672.c
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "../codecs/rt5645.h"
+#include "sst-atom-controls.h"
+
+#define CHT_PLAT_CLK_3_HZ 19200000
+#define CHT_CODEC_DAI "rt5645-aif1"
+
+struct cht_mc_private {
+ struct snd_soc_jack hp_jack;
+ struct snd_soc_jack mic_jack;
+};
+
+static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
+{
+ int i;
+
+ for (i = 0; i < card->num_rtd; i++) {
+ struct snd_soc_pcm_runtime *rtd;
+
+ rtd = card->rtd + i;
+ if (!strncmp(rtd->codec_dai->name, CHT_CODEC_DAI,
+ strlen(CHT_CODEC_DAI)))
+ return rtd->codec_dai;
+ }
+ return NULL;
+}
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret;
+
+ codec_dai = cht_get_codec_dai(card);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
+ return -EIO;
+ }
+
+ if (!SND_SOC_DAPM_EVENT_OFF(event))
+ return 0;
+
+ /* Set codec sysclk source to its internal clock because codec PLL will
+ * be off when idle and MCLK will also be off by ACPI when codec is
+ * runtime suspended. Codec needs clock for jack detection and button
+ * press.
+ */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5645_SCLK_S_RCCLK,
+ 0, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route cht_audio_map[] = {
+ {"IN1P", NULL, "Headset Mic"},
+ {"IN1N", NULL, "Headset Mic"},
+ {"DMIC L1", NULL, "Int Mic"},
+ {"DMIC R1", NULL, "Int Mic"},
+ {"Headphone", NULL, "HPOL"},
+ {"Headphone", NULL, "HPOR"},
+ {"Ext Spk", NULL, "SPOL"},
+ {"Ext Spk", NULL, "SPOR"},
+ {"AIF1 Playback", NULL, "ssp2 Tx"},
+ {"ssp2 Tx", NULL, "codec_out0"},
+ {"ssp2 Tx", NULL, "codec_out1"},
+ {"codec_in0", NULL, "ssp2 Rx" },
+ {"codec_in1", NULL, "ssp2 Rx" },
+ {"ssp2 Rx", NULL, "AIF1 Capture"},
+ {"Headphone", NULL, "Platform Clock"},
+ {"Headset Mic", NULL, "Platform Clock"},
+ {"Int Mic", NULL, "Platform Clock"},
+ {"Ext Spk", NULL, "Platform Clock"},
+};
+
+static const struct snd_kcontrol_new cht_mc_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Ext Spk"),
+};
+
+static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ /* set codec PLL source to the 19.2MHz platform clock (MCLK) */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5645_PLL1_S_MCLK,
+ CHT_PLAT_CLK_3_HZ, params_rate(params) * 512);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5645_SCLK_S_PLL1,
+ params_rate(params) * 512, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ int ret;
+ struct snd_soc_codec *codec = runtime->codec;
+ struct snd_soc_dai *codec_dai = runtime->codec_dai;
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
+
+ /* Select clk_i2s1_asrc as ASRC clock source */
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_DA_STEREO_FILTER |
+ RT5645_DA_MONO_L_FILTER |
+ RT5645_DA_MONO_R_FILTER |
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S1_ASRC);
+
+ /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24);
+ if (ret < 0) {
+ dev_err(runtime->dev, "can't set codec TDM slot %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_jack_new(codec, "Headphone Jack",
+ SND_JACK_HEADPHONE,
+ &ctx->hp_jack);
+ if (ret) {
+ dev_err(runtime->dev, "HP jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_jack_new(codec, "Mic Jack",
+ SND_JACK_MICROPHONE,
+ &ctx->mic_jack);
+ if (ret) {
+ dev_err(runtime->dev, "Mic jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ rt5645_set_jack_detect(codec, &ctx->hp_jack, &ctx->mic_jack);
+
+ return ret;
+}
+
+static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /* The DSP will covert the FE rate to 48k, stereo, 24bits */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP2 to 24-bit */
+ snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK],
+ SNDRV_PCM_FORMAT_S24_LE);
+ return 0;
+}
+
+static unsigned int rates_48000[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+ .count = ARRAY_SIZE(rates_48000),
+ .list = rates_48000,
+};
+
+static int cht_aif1_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_48000);
+}
+
+static struct snd_soc_ops cht_aif1_ops = {
+ .startup = cht_aif1_startup,
+};
+
+static struct snd_soc_ops cht_be_ssp2_ops = {
+ .hw_params = cht_aif1_hw_params,
+};
+
+static struct snd_soc_dai_link cht_dailink[] = {
+ [MERR_DPCM_AUDIO] = {
+ .name = "Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "media-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ .ignore_suspend = 1,
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &cht_aif1_ops,
+ },
+ [MERR_DPCM_COMPR] = {
+ .name = "Compressed Port",
+ .stream_name = "Compress",
+ .cpu_dai_name = "compress-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ },
+ /* CODEC<->CODEC link */
+ /* back ends */
+ {
+ .name = "SSP2-Codec",
+ .be_id = 1,
+ .cpu_dai_name = "ssp2-port",
+ .platform_name = "sst-mfld-platform",
+ .no_pcm = 1,
+ .codec_dai_name = "rt5645-aif1",
+ .codec_name = "i2c-10EC5645:00",
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .init = cht_codec_init,
+ .be_hw_params_fixup = cht_codec_fixup,
+ .ignore_suspend = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &cht_be_ssp2_ops,
+ },
+};
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_cht = {
+ .name = "chtrt5645",
+ .dai_link = cht_dailink,
+ .num_links = ARRAY_SIZE(cht_dailink),
+ .dapm_widgets = cht_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cht_dapm_widgets),
+ .dapm_routes = cht_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cht_audio_map),
+ .controls = cht_mc_controls,
+ .num_controls = ARRAY_SIZE(cht_mc_controls),
+};
+
+static int snd_cht_mc_probe(struct platform_device *pdev)
+{
+ int ret_val = 0;
+ struct cht_mc_private *drv;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
+ if (!drv)
+ return -ENOMEM;
+
+ snd_soc_card_cht.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
+ ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
+ if (ret_val) {
+ dev_err(&pdev->dev,
+ "snd_soc_register_card failed %d\n", ret_val);
+ return ret_val;
+ }
+ platform_set_drvdata(pdev, &snd_soc_card_cht);
+ return ret_val;
+}
+
+static struct platform_driver snd_cht_mc_driver = {
+ .driver = {
+ .name = "cht-bsw-rt5645",
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = snd_cht_mc_probe,
+};
+
+module_platform_driver(snd_cht_mc_driver)
+
+MODULE_DESCRIPTION("ASoC Intel(R) Braswell Machine driver");
+MODULE_AUTHOR("Fang, Yang A,N,Harshapriya");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cht-bsw-rt5645");
diff --git a/sound/soc/intel/cht_bsw_rt5672.c b/sound/soc/intel/cht_bsw_rt5672.c
index 9b8b561171b7..ff016621583a 100644
--- a/sound/soc/intel/cht_bsw_rt5672.c
+++ b/sound/soc/intel/cht_bsw_rt5672.c
@@ -140,6 +140,7 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
struct snd_soc_dai *codec_dai = runtime->codec_dai;
+ struct snd_soc_codec *codec = codec_dai->codec;
/* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24);
@@ -148,6 +149,19 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
return ret;
}
+ /* Select codec ASRC clock source to track I2S1 clock, because codec
+ * is in slave mode and 100fs I2S format (BCLK = 100 * LRCLK) cannot
+ * be supported by RT5672. Otherwise, ASRC will be disabled and cause
+ * noise.
+ */
+ rt5670_sel_asrc_clk_src(codec,
+ RT5670_DA_STEREO_FILTER
+ | RT5670_DA_MONO_L_FILTER
+ | RT5670_DA_MONO_R_FILTER
+ | RT5670_AD_STEREO_FILTER
+ | RT5670_AD_MONO_L_FILTER
+ | RT5670_AD_MONO_R_FILTER,
+ RT5670_CLK_SEL_I2S1_ASRC);
return 0;
}
@@ -270,7 +284,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_cht_mc_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "cht-bsw-rt5672",
.pm = &snd_soc_pm_ops,
},
diff --git a/sound/soc/intel/sst-baytrail-pcm.c b/sound/soc/intel/sst-baytrail-pcm.c
index 3bb6288d8b4d..224c49c9f135 100644
--- a/sound/soc/intel/sst-baytrail-pcm.c
+++ b/sound/soc/intel/sst-baytrail-pcm.c
@@ -320,11 +320,6 @@ static struct snd_pcm_ops sst_byt_pcm_ops = {
.mmap = sst_byt_pcm_mmap,
};
-static void sst_byt_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int sst_byt_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
@@ -403,7 +398,6 @@ static struct snd_soc_platform_driver byt_soc_platform = {
.remove = sst_byt_pcm_remove,
.ops = &sst_byt_pcm_ops,
.pcm_new = sst_byt_pcm_new,
- .pcm_free = sst_byt_pcm_free,
};
static const struct snd_soc_component_driver byt_dai_component = {
diff --git a/sound/soc/intel/sst-dsp.c b/sound/soc/intel/sst-dsp.c
index 86e410845670..64e94212d2d2 100644
--- a/sound/soc/intel/sst-dsp.c
+++ b/sound/soc/intel/sst-dsp.c
@@ -410,8 +410,7 @@ void sst_dsp_free(struct sst_dsp *sst)
if (sst->ops->free)
sst->ops->free(sst);
- if (sst->dma)
- sst_dma_free(sst->dma);
+ sst_dma_free(sst->dma);
}
EXPORT_SYMBOL_GPL(sst_dsp_free);
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index ef2e8b5766a1..5f71ef607a57 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -497,6 +497,7 @@ struct sst_module *sst_module_new(struct sst_fw *sst_fw,
sst_module->sst_fw = sst_fw;
sst_module->scratch_size = template->scratch_size;
sst_module->persistent_size = template->persistent_size;
+ sst_module->entry = template->entry;
INIT_LIST_HEAD(&sst_module->block_list);
INIT_LIST_HEAD(&sst_module->runtime_list);
@@ -706,6 +707,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
struct list_head *block_list)
{
struct sst_mem_block *block, *tmp;
+ struct sst_block_allocator ba_tmp = *ba;
u32 end = ba->offset + ba->size, block_end;
int err;
@@ -730,9 +732,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
if (ba->offset >= block->offset && ba->offset < block_end) {
/* align ba to block boundary */
- ba->size -= block_end - ba->offset;
- ba->offset = block_end;
- err = block_alloc_contiguous(dsp, ba, block_list);
+ ba_tmp.size -= block_end - ba->offset;
+ ba_tmp.offset = block_end;
+ err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
if (err < 0)
return -ENOMEM;
@@ -767,10 +769,10 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
list_move(&block->list, &dsp->used_block_list);
list_add(&block->module_list, block_list);
/* align ba to block boundary */
- ba->size -= block_end - ba->offset;
- ba->offset = block_end;
+ ba_tmp.size -= block_end - ba->offset;
+ ba_tmp.offset = block_end;
- err = block_alloc_contiguous(dsp, ba, block_list);
+ err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
if (err < 0)
return -ENOMEM;
@@ -789,6 +791,7 @@ int sst_module_alloc_blocks(struct sst_module *module)
struct sst_block_allocator ba;
int ret;
+ memset(&ba, 0, sizeof(ba));
ba.size = module->size;
ba.type = module->type;
ba.offset = module->offset;
@@ -862,6 +865,7 @@ int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
if (module->persistent_size == 0)
return 0;
+ memset(&ba, 0, sizeof(ba));
ba.size = module->persistent_size;
ba.type = SST_MEM_DRAM;
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c
index 57039b00efc2..c42ffae5fe9f 100644
--- a/sound/soc/intel/sst-haswell-dsp.c
+++ b/sound/soc/intel/sst-haswell-dsp.c
@@ -306,7 +306,7 @@ static void hsw_reset(struct sst_dsp *sst)
static int hsw_set_dsp_D0(struct sst_dsp *sst)
{
int tries = 10;
- u32 reg;
+ u32 reg, fw_dump_bit;
/* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
@@ -368,7 +368,9 @@ finish:
can't be accessed, please enable each block before accessing. */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
reg |= SST_VDRTCL0_DSRAMPGE_MASK | SST_VDRTCL0_ISRAMPGE_MASK;
- writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+ /* for D0, always enable the block(DSRAM[0]) used for FW dump */
+ fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT;
+ writel(reg & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0);
/* disable DMA finish function for SSP0 & SSP1 */
@@ -491,6 +493,7 @@ static const struct sst_sram_shift sram_shift[] = {
{SST_DEV_ID_LYNX_POINT, 6, 16}, /* lp */
{SST_DEV_ID_WILDCAT_POINT, 2, 12}, /* wpt */
};
+
static u32 hsw_block_get_bit(struct sst_mem_block *block)
{
u32 bit = 0, shift = 0, index;
@@ -587,7 +590,9 @@ static int hsw_block_disable(struct sst_mem_block *block)
val = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
bit = hsw_block_get_bit(block);
- writel(val | bit, sst->addr.pci_cfg + SST_VDRTCTL0);
+ /* don't disable DSRAM[0], keep it always enable for FW dump*/
+ if (bit != (1 << SST_VDRTCL0_DSRAMPGE_SHIFT))
+ writel(val | bit, sst->addr.pci_cfg + SST_VDRTCTL0);
/* wait 18 DSP clock ticks */
udelay(10);
@@ -612,7 +617,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
const struct sst_adsp_memregion *region;
struct device *dev;
int ret = -ENODEV, i, j, region_count;
- u32 offset, size;
+ u32 offset, size, fw_dump_bit;
dev = sst->dma_dev;
@@ -669,9 +674,11 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
}
}
+ /* always enable the block(DSRAM[0]) used for FW dump */
+ fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT;
/* set default power gating control, enable power gating control for all blocks. that is,
can't be accessed, please enable each block before accessing. */
- writel(0xffffffff, sst->addr.pci_cfg + SST_VDRTCTL0);
+ writel(0xffffffff & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0);
return 0;
}
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 3f8c48231364..394af5684c05 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -31,6 +31,7 @@
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
+#include <sound/asound.h>
#include "sst-haswell-ipc.h"
#include "sst-dsp.h"
@@ -94,6 +95,8 @@
/* Mailbox */
#define IPC_MAX_MAILBOX_BYTES 256
+#define INVALID_STREAM_HW_ID 0xffffffff
+
/* Global Message - Types and Replies */
enum ipc_glb_type {
IPC_GLB_GET_FW_VERSION = 0, /* Retrieves firmware version */
@@ -240,6 +243,9 @@ struct sst_hsw_stream {
u32 (*notify_position)(struct sst_hsw_stream *stream, void *data);
void *pdata;
+ /* record the fw read position when playback */
+ snd_pcm_uframes_t old_position;
+ bool play_silence;
struct list_head node;
};
@@ -275,7 +281,6 @@ struct sst_hsw {
/* FW config */
struct sst_hsw_ipc_fw_ready fw_ready;
struct sst_hsw_ipc_fw_version version;
- struct sst_module *scratch;
bool fw_done;
struct sst_fw *sst_fw;
@@ -337,12 +342,6 @@ static inline u32 msg_get_stage_type(u32 msg)
return (msg & IPC_STG_TYPE_MASK) >> IPC_STG_TYPE_SHIFT;
}
-static inline u32 msg_set_stage_type(u32 msg, u32 type)
-{
- return (msg & ~IPC_STG_TYPE_MASK) +
- (type << IPC_STG_TYPE_SHIFT);
-}
-
static inline u32 msg_get_stream_id(u32 msg)
{
return (msg & IPC_STR_ID_MASK) >> IPC_STR_ID_SHIFT;
@@ -651,11 +650,11 @@ static void hsw_notification_work(struct work_struct *work)
}
/* tell DSP that notification has been handled */
- sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IPCD,
+ sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD,
SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE);
/* unmask busy interrupt */
- sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
+ sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
}
static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header)
@@ -969,45 +968,6 @@ int sst_hsw_fw_get_version(struct sst_hsw *hsw,
}
/* Mixer Controls */
-int sst_hsw_stream_mute(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
- u32 stage_id, u32 channel)
-{
- int ret;
-
- ret = sst_hsw_stream_get_volume(hsw, stream, stage_id, channel,
- &stream->mute_volume[channel]);
- if (ret < 0)
- return ret;
-
- ret = sst_hsw_stream_set_volume(hsw, stream, stage_id, channel, 0);
- if (ret < 0) {
- dev_err(hsw->dev, "error: can't unmute stream %d channel %d\n",
- stream->reply.stream_hw_id, channel);
- return ret;
- }
-
- stream->mute[channel] = 1;
- return 0;
-}
-
-int sst_hsw_stream_unmute(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
- u32 stage_id, u32 channel)
-
-{
- int ret;
-
- stream->mute[channel] = 0;
- ret = sst_hsw_stream_set_volume(hsw, stream, stage_id, channel,
- stream->mute_volume[channel]);
- if (ret < 0) {
- dev_err(hsw->dev, "error: can't unmute stream %d channel %d\n",
- stream->reply.stream_hw_id, channel);
- return ret;
- }
-
- return 0;
-}
-
int sst_hsw_stream_get_volume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
u32 stage_id, u32 channel, u32 *volume)
{
@@ -1021,17 +981,6 @@ int sst_hsw_stream_get_volume(struct sst_hsw *hsw, struct sst_hsw_stream *stream
return 0;
}
-int sst_hsw_stream_set_volume_curve(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u64 curve_duration,
- enum sst_hsw_volume_curve curve)
-{
- /* curve duration in steps of 100ns */
- stream->vol_req.curve_duration = curve_duration;
- stream->vol_req.curve_type = curve;
-
- return 0;
-}
-
/* stream volume */
int sst_hsw_stream_set_volume(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 stage_id, u32 channel, u32 volume)
@@ -1083,42 +1032,6 @@ int sst_hsw_stream_set_volume(struct sst_hsw *hsw,
return 0;
}
-int sst_hsw_mixer_mute(struct sst_hsw *hsw, u32 stage_id, u32 channel)
-{
- int ret;
-
- ret = sst_hsw_mixer_get_volume(hsw, stage_id, channel,
- &hsw->mute_volume[channel]);
- if (ret < 0)
- return ret;
-
- ret = sst_hsw_mixer_set_volume(hsw, stage_id, channel, 0);
- if (ret < 0) {
- dev_err(hsw->dev, "error: failed to unmute mixer channel %d\n",
- channel);
- return ret;
- }
-
- hsw->mute[channel] = 1;
- return 0;
-}
-
-int sst_hsw_mixer_unmute(struct sst_hsw *hsw, u32 stage_id, u32 channel)
-{
- int ret;
-
- ret = sst_hsw_mixer_set_volume(hsw, stage_id, channel,
- hsw->mixer_info.volume_register_address[channel]);
- if (ret < 0) {
- dev_err(hsw->dev, "error: failed to unmute mixer channel %d\n",
- channel);
- return ret;
- }
-
- hsw->mute[channel] = 0;
- return 0;
-}
-
int sst_hsw_mixer_get_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
u32 *volume)
{
@@ -1132,16 +1045,6 @@ int sst_hsw_mixer_get_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
return 0;
}
-int sst_hsw_mixer_set_volume_curve(struct sst_hsw *hsw,
- u64 curve_duration, enum sst_hsw_volume_curve curve)
-{
- /* curve duration in steps of 100ns */
- hsw->curve_duration = curve_duration;
- hsw->curve_type = curve;
-
- return 0;
-}
-
/* global mixer volume */
int sst_hsw_mixer_set_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
u32 volume)
@@ -1208,6 +1111,7 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
return NULL;
spin_lock_irqsave(&sst->spinlock, flags);
+ stream->reply.stream_hw_id = INVALID_STREAM_HW_ID;
list_add(&stream->node, &hsw->stream_list);
stream->notify_position = notify_position;
stream->pdata = data;
@@ -1228,6 +1132,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
struct sst_dsp *sst = hsw->dsp;
unsigned long flags;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n");
+ return 0;
+ }
+
/* dont free DSP streams that are not commited */
if (!stream->commited)
goto out;
@@ -1415,6 +1324,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
u32 header;
int ret;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n");
+ return 0;
+ }
+
+ if (stream->commited) {
+ dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n");
+ return 0;
+ }
+
trace_ipc_request("stream alloc", stream->host_id);
header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
@@ -1432,50 +1351,32 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
return 0;
}
-/* Stream Information - these calls could be inline but we want the IPC
- ABI to be opaque to client PCM drivers to cope with any future ABI changes */
-int sst_hsw_stream_get_hw_id(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream)
-{
- return stream->reply.stream_hw_id;
-}
-
-int sst_hsw_stream_get_mixer_id(struct sst_hsw *hsw,
+snd_pcm_uframes_t sst_hsw_stream_get_old_position(struct sst_hsw *hsw,
struct sst_hsw_stream *stream)
{
- return stream->reply.mixer_hw_id;
+ return stream->old_position;
}
-u32 sst_hsw_stream_get_read_reg(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream)
+void sst_hsw_stream_set_old_position(struct sst_hsw *hsw,
+ struct sst_hsw_stream *stream, snd_pcm_uframes_t val)
{
- return stream->reply.read_position_register_address;
+ stream->old_position = val;
}
-u32 sst_hsw_stream_get_pointer_reg(struct sst_hsw *hsw,
+bool sst_hsw_stream_get_silence_start(struct sst_hsw *hsw,
struct sst_hsw_stream *stream)
{
- return stream->reply.presentation_position_register_address;
+ return stream->play_silence;
}
-u32 sst_hsw_stream_get_peak_reg(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u32 channel)
+void sst_hsw_stream_set_silence_start(struct sst_hsw *hsw,
+ struct sst_hsw_stream *stream, bool val)
{
- if (channel >= 2)
- return 0;
-
- return stream->reply.peak_meter_register_address[channel];
-}
-
-u32 sst_hsw_stream_get_vol_reg(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u32 channel)
-{
- if (channel >= 2)
- return 0;
-
- return stream->reply.volume_register_address[channel];
+ stream->play_silence = val;
}
+/* Stream Information - these calls could be inline but we want the IPC
+ ABI to be opaque to client PCM drivers to cope with any future ABI changes */
int sst_hsw_mixer_get_info(struct sst_hsw *hsw)
{
struct sst_hsw_ipc_stream_info_reply *reply;
@@ -1519,6 +1420,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
{
int ret;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n");
+ return 0;
+ }
+
trace_ipc_request("stream pause", stream->reply.stream_hw_id);
ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE,
@@ -1535,6 +1441,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
{
int ret;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n");
+ return 0;
+ }
+
trace_ipc_request("stream resume", stream->reply.stream_hw_id);
ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME,
@@ -1550,6 +1461,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
{
int ret, tries = 10;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n");
+ return 0;
+ }
+
/* dont reset streams that are not commited */
if (!stream->commited)
return 0;
@@ -1598,30 +1514,6 @@ u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
return ppos;
}
-int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u32 stage_id, u32 position)
-{
- u32 header;
- int ret;
-
- trace_stream_write_position(stream->reply.stream_hw_id, position);
-
- header = IPC_GLB_TYPE(IPC_GLB_STREAM_MESSAGE) |
- IPC_STR_TYPE(IPC_STR_STAGE_MESSAGE);
- header |= (stream->reply.stream_hw_id << IPC_STR_ID_SHIFT);
- header |= (IPC_STG_SET_WRITE_POSITION << IPC_STG_TYPE_SHIFT);
- header |= (stage_id << IPC_STG_ID_SHIFT);
- stream->wpos.position = position;
-
- ret = ipc_tx_message_nowait(hsw, header, &stream->wpos,
- sizeof(stream->wpos));
- if (ret < 0)
- dev_err(hsw->dev, "error: stream %d set position %d failed\n",
- stream->reply.stream_hw_id, position);
-
- return ret;
-}
-
/* physical BE config */
int sst_hsw_device_set_config(struct sst_hsw *hsw,
enum sst_hsw_device_id dev, enum sst_hsw_device_mclk mclk,
@@ -2102,7 +1994,6 @@ void sst_hsw_dsp_free(struct device *dev, struct sst_pdata *pdata)
dma_free_coherent(hsw->dsp->dma_dev, SST_HSW_DX_CONTEXT_SIZE,
hsw->dx_context, hsw->dx_context_paddr);
sst_dsp_free(hsw->dsp);
- kfree(hsw->scratch);
kthread_stop(hsw->tx_thread);
kfree(hsw->msg);
}
diff --git a/sound/soc/intel/sst-haswell-ipc.h b/sound/soc/intel/sst-haswell-ipc.h
index 138e894ab413..858096041cb1 100644
--- a/sound/soc/intel/sst-haswell-ipc.h
+++ b/sound/soc/intel/sst-haswell-ipc.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <sound/asound.h>
#define SST_HSW_NO_CHANNELS 4
#define SST_HSW_MAX_DX_REGIONS 14
@@ -376,32 +377,17 @@ int sst_hsw_fw_get_version(struct sst_hsw *hsw,
u32 create_channel_map(enum sst_hsw_channel_config config);
/* Stream Mixer Controls - */
-int sst_hsw_stream_mute(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
- u32 stage_id, u32 channel);
-int sst_hsw_stream_unmute(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
- u32 stage_id, u32 channel);
-
int sst_hsw_stream_set_volume(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 stage_id, u32 channel, u32 volume);
int sst_hsw_stream_get_volume(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 stage_id, u32 channel, u32 *volume);
-int sst_hsw_stream_set_volume_curve(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u64 curve_duration,
- enum sst_hsw_volume_curve curve);
-
/* Global Mixer Controls - */
-int sst_hsw_mixer_mute(struct sst_hsw *hsw, u32 stage_id, u32 channel);
-int sst_hsw_mixer_unmute(struct sst_hsw *hsw, u32 stage_id, u32 channel);
-
int sst_hsw_mixer_set_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
u32 volume);
int sst_hsw_mixer_get_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
u32 *volume);
-int sst_hsw_mixer_set_volume_curve(struct sst_hsw *hsw,
- u64 curve_duration, enum sst_hsw_volume_curve curve);
-
/* Stream API */
struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
u32 (*get_write_position)(struct sst_hsw_stream *stream, void *data),
@@ -440,18 +426,14 @@ int sst_hsw_stream_set_pmemory_info(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 offset, u32 size);
int sst_hsw_stream_set_smemory_info(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 offset, u32 size);
-int sst_hsw_stream_get_hw_id(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream);
-int sst_hsw_stream_get_mixer_id(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream);
-u32 sst_hsw_stream_get_read_reg(struct sst_hsw *hsw,
+snd_pcm_uframes_t sst_hsw_stream_get_old_position(struct sst_hsw *hsw,
struct sst_hsw_stream *stream);
-u32 sst_hsw_stream_get_pointer_reg(struct sst_hsw *hsw,
+void sst_hsw_stream_set_old_position(struct sst_hsw *hsw,
+ struct sst_hsw_stream *stream, snd_pcm_uframes_t val);
+bool sst_hsw_stream_get_silence_start(struct sst_hsw *hsw,
struct sst_hsw_stream *stream);
-u32 sst_hsw_stream_get_peak_reg(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u32 channel);
-u32 sst_hsw_stream_get_vol_reg(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u32 channel);
+void sst_hsw_stream_set_silence_start(struct sst_hsw *hsw,
+ struct sst_hsw_stream *stream, bool val);
int sst_hsw_mixer_get_info(struct sst_hsw *hsw);
/* Stream ALSA trigger operations */
@@ -466,8 +448,6 @@ int sst_hsw_stream_get_read_pos(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 *position);
int sst_hsw_stream_get_write_pos(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 *position);
-int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u32 stage_id, u32 position);
u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
struct sst_hsw_stream *stream);
u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
@@ -481,8 +461,6 @@ int sst_hsw_device_set_config(struct sst_hsw *hsw,
/* DX Config */
int sst_hsw_dx_set_state(struct sst_hsw *hsw,
enum sst_hsw_dx_state state, struct sst_hsw_ipc_dx_reply *dx);
-int sst_hsw_dx_get_state(struct sst_hsw *hsw, u32 item,
- u32 *offset, u32 *size, u32 *source);
/* init */
int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata);
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index 619525200705..d6fa9d5514e1 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -36,6 +36,11 @@
#define HSW_PCM_COUNT 6
#define HSW_VOLUME_MAX 0x7FFFFFFF /* 0dB */
+#define SST_OLD_POSITION(d, r, o) ((d) + \
+ frames_to_bytes(r, o))
+#define SST_SAMPLES(r, x) (bytes_to_samples(r, \
+ frames_to_bytes(r, (x))))
+
/* simple volume table */
static const u32 volume_map[] = {
HSW_VOLUME_MAX >> 30,
@@ -78,7 +83,6 @@ static const u32 volume_map[] = {
#define HSW_PCM_DAI_ID_OFFLOAD0 1
#define HSW_PCM_DAI_ID_OFFLOAD1 2
#define HSW_PCM_DAI_ID_LOOPBACK 3
-#define HSW_PCM_DAI_ID_CAPTURE 4
static const struct snd_pcm_hardware hsw_pcm_hardware = {
@@ -99,6 +103,7 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = {
struct hsw_pcm_module_map {
int dai_id;
+ int stream;
enum sst_hsw_module_id mod_id;
};
@@ -119,8 +124,9 @@ struct hsw_pcm_data {
};
enum hsw_pm_state {
- HSW_PM_STATE_D3 = 0,
- HSW_PM_STATE_D0 = 1,
+ HSW_PM_STATE_D0 = 0,
+ HSW_PM_STATE_RTD3 = 1,
+ HSW_PM_STATE_D3 = 2,
};
/* private data for the driver */
@@ -135,7 +141,17 @@ struct hsw_priv_data {
struct snd_dma_buffer dmab[HSW_PCM_COUNT][2];
/* DAI data */
- struct hsw_pcm_data pcm[HSW_PCM_COUNT];
+ struct hsw_pcm_data pcm[HSW_PCM_COUNT][2];
+};
+
+
+/* static mappings between PCMs and modules - may be dynamic in future */
+static struct hsw_pcm_module_map mod_map[] = {
+ {HSW_PCM_DAI_ID_SYSTEM, 0, SST_HSW_MODULE_PCM_SYSTEM},
+ {HSW_PCM_DAI_ID_OFFLOAD0, 0, SST_HSW_MODULE_PCM},
+ {HSW_PCM_DAI_ID_OFFLOAD1, 0, SST_HSW_MODULE_PCM},
+ {HSW_PCM_DAI_ID_LOOPBACK, 1, SST_HSW_MODULE_PCM_REFERENCE},
+ {HSW_PCM_DAI_ID_SYSTEM, 1, SST_HSW_MODULE_PCM_CAPTURE},
};
static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data);
@@ -168,9 +184,14 @@ static int hsw_stream_volume_put(struct snd_kcontrol *kcontrol,
(struct soc_mixer_control *)kcontrol->private_value;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(platform);
- struct hsw_pcm_data *pcm_data = &pdata->pcm[mc->reg];
+ struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
+ int dai, stream;
+
+ dai = mod_map[mc->reg].dai_id;
+ stream = mod_map[mc->reg].stream;
+ pcm_data = &pdata->pcm[dai][stream];
mutex_lock(&pcm_data->mutex);
pm_runtime_get_sync(pdata->dev);
@@ -212,9 +233,14 @@ static int hsw_stream_volume_get(struct snd_kcontrol *kcontrol,
(struct soc_mixer_control *)kcontrol->private_value;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(platform);
- struct hsw_pcm_data *pcm_data = &pdata->pcm[mc->reg];
+ struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
+ int dai, stream;
+
+ dai = mod_map[mc->reg].dai_id;
+ stream = mod_map[mc->reg].stream;
+ pcm_data = &pdata->pcm[dai][stream];
mutex_lock(&pcm_data->mutex);
pm_runtime_get_sync(pdata->dev);
@@ -309,7 +335,7 @@ static const struct snd_kcontrol_new hsw_volume_controls[] = {
ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
/* Mic Capture volume */
- SOC_DOUBLE_EXT_TLV("Mic Capture Volume", 0, 0, 8,
+ SOC_DOUBLE_EXT_TLV("Mic Capture Volume", 4, 0, 8,
ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
};
@@ -353,7 +379,7 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime = substream->runtime;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
- struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd);
+ struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
struct sst_module *module_data;
struct sst_dsp *dsp;
@@ -362,7 +388,10 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
enum sst_hsw_stream_path_id path_id;
u32 rate, bits, map, pages, module_id;
u8 channels;
- int ret;
+ int ret, dai;
+
+ dai = mod_map[rtd->cpu_dai->id].dai_id;
+ pcm_data = &pdata->pcm[dai][substream->stream];
/* check if we are being called a subsequent time */
if (pcm_data->allocated) {
@@ -552,20 +581,35 @@ static int hsw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
- struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd);
+ struct hsw_pcm_data *pcm_data;
+ struct sst_hsw_stream *sst_stream;
struct sst_hsw *hsw = pdata->hsw;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_uframes_t pos;
+ int dai;
+
+ dai = mod_map[rtd->cpu_dai->id].dai_id;
+ pcm_data = &pdata->pcm[dai][substream->stream];
+ sst_stream = pcm_data->stream;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ sst_hsw_stream_set_silence_start(hsw, sst_stream, false);
sst_hsw_stream_resume(hsw, pcm_data->stream, 0);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ sst_hsw_stream_set_silence_start(hsw, sst_stream, false);
sst_hsw_stream_pause(hsw, pcm_data->stream, 0);
break;
+ case SNDRV_PCM_TRIGGER_DRAIN:
+ pos = runtime->control->appl_ptr % runtime->buffer_size;
+ sst_hsw_stream_set_old_position(hsw, pcm_data->stream, pos);
+ sst_hsw_stream_set_silence_start(hsw, sst_stream, true);
+ break;
default:
break;
}
@@ -579,13 +623,62 @@ static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data)
struct snd_pcm_substream *substream = pcm_data->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct hsw_priv_data *pdata =
+ snd_soc_platform_get_drvdata(rtd->platform);
+ struct sst_hsw *hsw = pdata->hsw;
u32 pos;
+ snd_pcm_uframes_t position = bytes_to_frames(runtime,
+ sst_hsw_get_dsp_position(hsw, pcm_data->stream));
+ unsigned char *dma_area = runtime->dma_area;
+ snd_pcm_uframes_t dma_frames =
+ bytes_to_frames(runtime, runtime->dma_bytes);
+ snd_pcm_uframes_t old_position;
+ ssize_t samples;
pos = frames_to_bytes(runtime,
(runtime->control->appl_ptr % runtime->buffer_size));
dev_vdbg(rtd->dev, "PCM: App pointer %d bytes\n", pos);
+ /* SST fw don't know where to stop dma
+ * So, SST driver need to clean the data which has been consumed
+ */
+ if (dma_area == NULL || dma_frames <= 0
+ || (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ || !sst_hsw_stream_get_silence_start(hsw, stream)) {
+ snd_pcm_period_elapsed(substream);
+ return pos;
+ }
+
+ old_position = sst_hsw_stream_get_old_position(hsw, stream);
+ if (position > old_position) {
+ if (position < dma_frames) {
+ samples = SST_SAMPLES(runtime, position - old_position);
+ snd_pcm_format_set_silence(runtime->format,
+ SST_OLD_POSITION(dma_area,
+ runtime, old_position),
+ samples);
+ } else
+ dev_err(rtd->dev, "PCM: position is wrong\n");
+ } else {
+ if (old_position < dma_frames) {
+ samples = SST_SAMPLES(runtime,
+ dma_frames - old_position);
+ snd_pcm_format_set_silence(runtime->format,
+ SST_OLD_POSITION(dma_area,
+ runtime, old_position),
+ samples);
+ } else
+ dev_err(rtd->dev, "PCM: dma_bytes is wrong\n");
+ if (position < dma_frames) {
+ samples = SST_SAMPLES(runtime, position);
+ snd_pcm_format_set_silence(runtime->format,
+ dma_area, samples);
+ } else
+ dev_err(rtd->dev, "PCM: position is wrong\n");
+ }
+ sst_hsw_stream_set_old_position(hsw, stream, position);
+
/* let alsa know we have play a period */
snd_pcm_period_elapsed(substream);
return pos;
@@ -597,11 +690,16 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
- struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd);
+ struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
snd_pcm_uframes_t offset;
uint64_t ppos;
- u32 position = sst_hsw_get_dsp_position(hsw, pcm_data->stream);
+ u32 position;
+ int dai;
+
+ dai = mod_map[rtd->cpu_dai->id].dai_id;
+ pcm_data = &pdata->pcm[dai][substream->stream];
+ position = sst_hsw_get_dsp_position(hsw, pcm_data->stream);
offset = bytes_to_frames(runtime, position);
ppos = sst_hsw_get_dsp_presentation_position(hsw, pcm_data->stream);
@@ -618,8 +716,10 @@ static int hsw_pcm_open(struct snd_pcm_substream *substream)
snd_soc_platform_get_drvdata(rtd->platform);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
+ int dai;
- pcm_data = &pdata->pcm[rtd->cpu_dai->id];
+ dai = mod_map[rtd->cpu_dai->id].dai_id;
+ pcm_data = &pdata->pcm[dai][substream->stream];
mutex_lock(&pcm_data->mutex);
pm_runtime_get_sync(pdata->dev);
@@ -648,9 +748,12 @@ static int hsw_pcm_close(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
- struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd);
+ struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
- int ret;
+ int ret, dai;
+
+ dai = mod_map[rtd->cpu_dai->id].dai_id;
+ pcm_data = &pdata->pcm[dai][substream->stream];
mutex_lock(&pcm_data->mutex);
ret = sst_hsw_stream_reset(hsw, pcm_data->stream);
@@ -685,15 +788,6 @@ static struct snd_pcm_ops hsw_pcm_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
-/* static mappings between PCMs and modules - may be dynamic in future */
-static struct hsw_pcm_module_map mod_map[] = {
- {HSW_PCM_DAI_ID_SYSTEM, SST_HSW_MODULE_PCM_SYSTEM},
- {HSW_PCM_DAI_ID_OFFLOAD0, SST_HSW_MODULE_PCM},
- {HSW_PCM_DAI_ID_OFFLOAD1, SST_HSW_MODULE_PCM},
- {HSW_PCM_DAI_ID_LOOPBACK, SST_HSW_MODULE_PCM_REFERENCE},
- {HSW_PCM_DAI_ID_CAPTURE, SST_HSW_MODULE_PCM_CAPTURE},
-};
-
static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
{
struct sst_hsw *hsw = pdata->hsw;
@@ -701,7 +795,7 @@ static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
int i;
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
- pcm_data = &pdata->pcm[i];
+ pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
/* create new runtime module, use same offset if recreated */
pcm_data->runtime = sst_hsw_runtime_module_create(hsw,
@@ -716,7 +810,7 @@ static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
err:
for (--i; i >= 0; i--) {
- pcm_data = &pdata->pcm[i];
+ pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
sst_hsw_runtime_module_free(pcm_data->runtime);
}
@@ -729,17 +823,12 @@ static void hsw_pcm_free_modules(struct hsw_priv_data *pdata)
int i;
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
- pcm_data = &pdata->pcm[i];
+ pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
sst_hsw_runtime_module_free(pcm_data->runtime);
}
}
-static void hsw_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
@@ -762,7 +851,10 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
return ret;
}
}
- priv_data->pcm[rtd->cpu_dai->id].hsw_pcm = pcm;
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream)
+ priv_data->pcm[rtd->cpu_dai->id][SNDRV_PCM_STREAM_PLAYBACK].hsw_pcm = pcm;
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream)
+ priv_data->pcm[rtd->cpu_dai->id][SNDRV_PCM_STREAM_CAPTURE].hsw_pcm = pcm;
return ret;
}
@@ -871,10 +963,9 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
/* allocate DSP buffer page tables */
for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
- mutex_init(&priv_data->pcm[i].mutex);
-
/* playback */
if (hsw_dais[i].playback.channels_min) {
+ mutex_init(&priv_data->pcm[i][SNDRV_PCM_STREAM_PLAYBACK].mutex);
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
PAGE_SIZE, &priv_data->dmab[i][0]);
if (ret < 0)
@@ -883,6 +974,7 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
/* capture */
if (hsw_dais[i].capture.channels_min) {
+ mutex_init(&priv_data->pcm[i][SNDRV_PCM_STREAM_CAPTURE].mutex);
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
PAGE_SIZE, &priv_data->dmab[i][1]);
if (ret < 0)
@@ -936,7 +1028,6 @@ static struct snd_soc_platform_driver hsw_soc_platform = {
.remove = hsw_pcm_remove,
.ops = &hsw_pcm_ops,
.pcm_new = hsw_pcm_new,
- .pcm_free = hsw_pcm_free,
};
static const struct snd_soc_component_driver hsw_dai_component = {
@@ -1010,12 +1101,12 @@ static int hsw_pcm_runtime_suspend(struct device *dev)
struct hsw_priv_data *pdata = dev_get_drvdata(dev);
struct sst_hsw *hsw = pdata->hsw;
- if (pdata->pm_state == HSW_PM_STATE_D3)
+ if (pdata->pm_state >= HSW_PM_STATE_RTD3)
return 0;
sst_hsw_dsp_runtime_suspend(hsw);
sst_hsw_dsp_runtime_sleep(hsw);
- pdata->pm_state = HSW_PM_STATE_D3;
+ pdata->pm_state = HSW_PM_STATE_RTD3;
return 0;
}
@@ -1026,7 +1117,7 @@ static int hsw_pcm_runtime_resume(struct device *dev)
struct sst_hsw *hsw = pdata->hsw;
int ret;
- if (pdata->pm_state == HSW_PM_STATE_D0)
+ if (pdata->pm_state != HSW_PM_STATE_RTD3)
return 0;
ret = sst_hsw_dsp_load(hsw);
@@ -1066,7 +1157,7 @@ static void hsw_pcm_complete(struct device *dev)
struct hsw_pcm_data *pcm_data;
int i, err;
- if (pdata->pm_state == HSW_PM_STATE_D0)
+ if (pdata->pm_state != HSW_PM_STATE_D3)
return;
err = sst_hsw_dsp_load(hsw);
@@ -1081,8 +1172,8 @@ static void hsw_pcm_complete(struct device *dev)
return;
}
- for (i = 0; i < HSW_PCM_DAI_ID_CAPTURE + 1; i++) {
- pcm_data = &pdata->pcm[i];
+ for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
+ pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
if (!pcm_data->substream)
continue;
@@ -1114,41 +1205,42 @@ static int hsw_pcm_prepare(struct device *dev)
if (pdata->pm_state == HSW_PM_STATE_D3)
return 0;
- /* suspend all active streams */
- for (i = 0; i < HSW_PCM_DAI_ID_CAPTURE + 1; i++) {
- pcm_data = &pdata->pcm[i];
+ else if (pdata->pm_state == HSW_PM_STATE_D0) {
+ /* suspend all active streams */
+ for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
+ pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
+
+ if (!pcm_data->substream)
+ continue;
+ dev_dbg(dev, "suspending pcm %d\n", i);
+ snd_pcm_suspend_all(pcm_data->hsw_pcm);
+
+ /* We need to wait until the DSP FW stops the streams */
+ msleep(2);
+ }
- if (!pcm_data->substream)
- continue;
- dev_dbg(dev, "suspending pcm %d\n", i);
- snd_pcm_suspend_all(pcm_data->hsw_pcm);
+ /* preserve persistent memory */
+ for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
+ pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
+
+ if (!pcm_data->substream)
+ continue;
- /* We need to wait until the DSP FW stops the streams */
- msleep(2);
+ dev_dbg(dev, "saving context pcm %d\n", i);
+ err = sst_module_runtime_save(pcm_data->runtime,
+ &pcm_data->context);
+ if (err < 0)
+ dev_err(dev, "failed to save context for PCM %d\n", i);
+ }
+ /* enter D3 state and stall */
+ sst_hsw_dsp_runtime_suspend(hsw);
+ /* put the DSP to sleep */
+ sst_hsw_dsp_runtime_sleep(hsw);
}
snd_soc_suspend(pdata->soc_card->dev);
snd_soc_poweroff(pdata->soc_card->dev);
- /* enter D3 state and stall */
- sst_hsw_dsp_runtime_suspend(hsw);
-
- /* preserve persistent memory */
- for (i = 0; i < HSW_PCM_DAI_ID_CAPTURE + 1; i++) {
- pcm_data = &pdata->pcm[i];
-
- if (!pcm_data->substream)
- continue;
-
- dev_dbg(dev, "saving context pcm %d\n", i);
- err = sst_module_runtime_save(pcm_data->runtime,
- &pcm_data->context);
- if (err < 0)
- dev_err(dev, "failed to save context for PCM %d\n", i);
- }
-
- /* put the DSP to sleep */
- sst_hsw_dsp_runtime_sleep(hsw);
pdata->pm_state = HSW_PM_STATE_D3;
return 0;
diff --git a/sound/soc/intel/sst-mfld-platform-pcm.c b/sound/soc/intel/sst-mfld-platform-pcm.c
index a1a8d9d91539..7523cbef8780 100644
--- a/sound/soc/intel/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/sst-mfld-platform-pcm.c
@@ -643,12 +643,6 @@ static struct snd_pcm_ops sst_platform_ops = {
.pointer = sst_platform_pcm_pointer,
};
-static void sst_pcm_free(struct snd_pcm *pcm)
-{
- dev_dbg(pcm->dev, "sst_pcm_free called\n");
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
@@ -679,7 +673,6 @@ static struct snd_soc_platform_driver sst_soc_platform_drv = {
.ops = &sst_platform_ops,
.compr_ops = &sst_platform_compr_ops,
.pcm_new = sst_pcm_new,
- .pcm_free = sst_pcm_free,
};
static const struct snd_soc_component_driver sst_component = {
diff --git a/sound/soc/intel/sst/sst.h b/sound/soc/intel/sst/sst.h
index 7f4bbfcbc6f5..562bc483d6b7 100644
--- a/sound/soc/intel/sst/sst.h
+++ b/sound/soc/intel/sst/sst.h
@@ -58,6 +58,7 @@ enum sst_algo_ops {
#define SST_BLOCK_TIMEOUT 1000
#define FW_SIGNATURE_SIZE 4
+#define FW_NAME_SIZE 32
/* stream states */
enum sst_stream_states {
@@ -426,7 +427,7 @@ struct intel_sst_drv {
* Holder for firmware name. Due to async call it needs to be
* persistent till worker thread gets called
*/
- char firmware_name[20];
+ char firmware_name[FW_NAME_SIZE];
};
/* misc definitions */
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 2ac72eb5e75d..b782dfdcdbba 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -47,7 +47,7 @@ struct sst_machines {
char board[32];
char machine[32];
void (*machine_quirk)(void);
- char firmware[32];
+ char firmware[FW_NAME_SIZE];
struct sst_platform_info *pdata;
};
@@ -245,7 +245,7 @@ static struct sst_machines *sst_acpi_find_machine(
return NULL;
}
-int sst_acpi_probe(struct platform_device *pdev)
+static int sst_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret = 0;
@@ -332,7 +332,7 @@ do_sst_cleanup:
* This function is called by OS when a device is unloaded
* This frees the interrupt etc
*/
-int sst_acpi_remove(struct platform_device *pdev)
+static int sst_acpi_remove(struct platform_device *pdev)
{
struct intel_sst_drv *ctx;
@@ -350,7 +350,9 @@ static struct sst_machines sst_acpi_bytcr[] = {
/* Cherryview-based platforms: CherryTrail and Braswell */
static struct sst_machines sst_acpi_chv[] = {
- {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin",
+ {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin",
+ &chv_platform_data },
+ {"10EC5645", "cht-bsw", "cht-bsw-rt5645", NULL, "intel/fw_sst_22a8.bin",
&chv_platform_data },
{},
};
@@ -366,7 +368,6 @@ MODULE_DEVICE_TABLE(acpi, sst_acpi_ids);
static struct platform_driver sst_acpi_driver = {
.driver = {
.name = "intel_sst_acpi",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(sst_acpi_ids),
.pm = &intel_sst_pm,
},
diff --git a/sound/soc/intel/sst/sst_loader.c b/sound/soc/intel/sst/sst_loader.c
index b580f96e25e5..7888cd707853 100644
--- a/sound/soc/intel/sst/sst_loader.c
+++ b/sound/soc/intel/sst/sst_loader.c
@@ -324,8 +324,7 @@ void sst_firmware_load_cb(const struct firmware *fw, void *context)
if (ctx->sst_state != SST_RESET ||
ctx->fw_in_mem != NULL) {
- if (fw != NULL)
- release_firmware(fw);
+ release_firmware(fw);
mutex_unlock(&ctx->sst_lock);
return;
}
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index d3d45c6f064f..07f77815a586 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -14,6 +14,8 @@
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -83,6 +85,8 @@
#define JZ_AIC_I2S_STATUS_BUSY BIT(2)
#define JZ_AIC_CLK_DIV_MASK 0xf
+#define I2SDIV_DV_SHIFT 8
+#define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
struct jz4740_i2s {
struct resource *mem;
@@ -237,10 +241,14 @@ static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int sample_size;
- uint32_t ctrl;
+ uint32_t ctrl, div_reg;
+ int div;
ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
+ div_reg = jz4740_i2s_read(i2s, JZ_REG_AIC_CLK_DIV);
+ div = clk_get_rate(i2s->clk_i2s) / (64 * params_rate(params));
+
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
sample_size = 0;
@@ -264,7 +272,10 @@ static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET;
}
+ div_reg &= ~I2SDIV_DV_MASK;
+ div_reg |= (div - 1) << I2SDIV_DV_SHIFT;
jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
+ jz4740_i2s_write(i2s, JZ_REG_AIC_CLK_DIV, div_reg);
return 0;
}
@@ -415,6 +426,13 @@ static const struct snd_soc_component_driver jz4740_i2s_component = {
.name = "jz4740-i2s",
};
+#ifdef CONFIG_OF
+static const struct of_device_id jz4740_of_matches[] = {
+ { .compatible = "ingenic,jz4740-i2s" },
+ { /* sentinel */ }
+};
+#endif
+
static int jz4740_i2s_dev_probe(struct platform_device *pdev)
{
struct jz4740_i2s *i2s;
@@ -455,6 +473,7 @@ static struct platform_driver jz4740_i2s_driver = {
.probe = jz4740_i2s_dev_probe,
.driver = {
.name = "jz4740-i2s",
+ .of_match_table = of_match_ptr(jz4740_of_matches)
},
};
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index d9865082160c..c866ade28ad0 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -710,7 +710,7 @@ static int mxs_saif_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct resource *iores;
struct mxs_saif *saif;
- int ret = 0;
+ int irq, ret = 0;
struct device_node *master;
if (!np)
@@ -763,16 +763,16 @@ static int mxs_saif_probe(struct platform_device *pdev)
if (IS_ERR(saif->base))
return PTR_ERR(saif->base);
- saif->irq = platform_get_irq(pdev, 0);
- if (saif->irq < 0) {
- ret = saif->irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
dev_err(&pdev->dev, "failed to get irq resource: %d\n",
ret);
return ret;
}
saif->dev = &pdev->dev;
- ret = devm_request_irq(&pdev->dev, saif->irq, mxs_saif_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, mxs_saif_irq, 0,
dev_name(&pdev->dev), saif);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
diff --git a/sound/soc/mxs/mxs-saif.h b/sound/soc/mxs/mxs-saif.h
index fbaf7badfdfb..9a4c0b291b9e 100644
--- a/sound/soc/mxs/mxs-saif.h
+++ b/sound/soc/mxs/mxs-saif.h
@@ -116,7 +116,6 @@ struct mxs_saif {
unsigned int mclk;
unsigned int mclk_in_use;
void __iomem *base;
- int irq;
unsigned int id;
unsigned int master_id;
unsigned int cur_rate;
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 6f1916b71815..6e6fce6a14ba 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -36,7 +36,7 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int rate = params_rate(params);
- u32 dai_format, mclk;
+ u32 mclk;
int ret;
/* sgtl5000 does not support 512*rate when in 96000 fs */
@@ -65,26 +65,6 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- /* set codec to slave mode */
- dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS;
-
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, dai_format);
- if (ret) {
- dev_err(codec_dai->dev, "Failed to set dai format to %08x\n",
- dai_format);
- return ret;
- }
-
- /* set cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, dai_format);
- if (ret) {
- dev_err(cpu_dai->dev, "Failed to set dai format to %08x\n",
- dai_format);
- return ret;
- }
-
return 0;
}
@@ -92,17 +72,22 @@ static struct snd_soc_ops mxs_sgtl5000_hifi_ops = {
.hw_params = mxs_sgtl5000_hw_params,
};
+#define MXS_SGTL5000_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \
+ SND_SOC_DAIFMT_CBS_CFS)
+
static struct snd_soc_dai_link mxs_sgtl5000_dai[] = {
{
.name = "HiFi Tx",
.stream_name = "HiFi Playback",
.codec_dai_name = "sgtl5000",
+ .dai_fmt = MXS_SGTL5000_DAI_FMT,
.ops = &mxs_sgtl5000_hifi_ops,
.playback_only = true,
}, {
.name = "HiFi Rx",
.stream_name = "HiFi Capture",
.codec_dai_name = "sgtl5000",
+ .dai_fmt = MXS_SGTL5000_DAI_FMT,
.ops = &mxs_sgtl5000_hifi_ops,
.capture_only = true,
},
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index b779a3d9b5dd..b809fa909e4d 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -306,11 +306,6 @@ static struct snd_pcm_ops nuc900_dma_ops = {
.mmap = nuc900_dma_mmap,
};
-static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
@@ -330,7 +325,6 @@ static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
static struct snd_soc_platform_driver nuc900_soc_platform = {
.ops = &nuc900_dma_ops,
.pcm_new = nuc900_dma_new,
- .pcm_free = nuc900_dma_free_dma_buffers,
};
static int nuc900_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 4c6afb75eea6..706613077c15 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -412,21 +412,7 @@ static struct tty_ldisc_ops cx81801_ops = {
* over the modem port.
*/
-static int ams_delta_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
-
- /* Set cpu DAI configuration */
- return snd_soc_dai_set_fmt(rtd->cpu_dai,
- SND_SOC_DAIFMT_DSP_A |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
-}
-
-static struct snd_soc_ops ams_delta_ops = {
- .hw_params = ams_delta_hw_params,
-};
+static struct snd_soc_ops ams_delta_ops;
/* Digital mute implemented using modem/CPU multiplexer.
@@ -546,6 +532,8 @@ static struct snd_soc_dai_link ams_delta_dai_link = {
.platform_name = "omap-mcbsp.1",
.codec_name = "cx20442-codec",
.ops = &ams_delta_ops,
+ .dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
};
/* Audio card driver */
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index 3f9ac7dbdc80..ccfb41c22e53 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -393,7 +393,6 @@ static int omap_hdmi_audio_remove(struct platform_device *pdev)
static struct platform_driver hdmi_audio_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = omap_hdmi_audio_probe,
.remove = omap_hdmi_audio_remove,
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 8b79cafab1e2..c7eb9dd67f60 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
case SND_SOC_DAIFMT_CBM_CFS:
/* McBSP slave. FS clock as output */
regs->srgr2 |= FSGM;
- regs->pcr0 |= FSXM;
+ regs->pcr0 |= FSXM | FSRM;
break;
case SND_SOC_DAIFMT_CBM_CFM:
/* McBSP slave */
diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/omap/omap-twl4030.c
index 5e551c762b7a..fb1f6bb87cd4 100644
--- a/sound/soc/omap/omap-twl4030.c
+++ b/sound/soc/omap/omap-twl4030.c
@@ -53,11 +53,7 @@ static int omap_twl4030_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_card *card = rtd->card;
unsigned int fmt;
- int ret;
switch (params_channels(params)) {
case 2: /* Stereo I2S mode */
@@ -74,21 +70,7 @@ static int omap_twl4030_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- /* Set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, fmt);
- if (ret < 0) {
- dev_err(card->dev, "can't set codec DAI configuration\n");
- return ret;
- }
-
- /* Set cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
- if (ret < 0) {
- dev_err(card->dev, "can't set cpu DAI configuration\n");
- return ret;
- }
-
- return 0;
+ return snd_soc_runtime_set_dai_fmt(rtd, fmt);
}
static struct snd_soc_ops omap_twl4030_ops = {
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 04896d6252a2..7f299357c2d2 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -250,14 +250,14 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"FM Transmitter", NULL, "LLOUT"},
{"FM Transmitter", NULL, "RLOUT"},
- {"DMic Rate 64", NULL, "Mic Bias"},
- {"Mic Bias", NULL, "DMic"},
+ {"DMic Rate 64", NULL, "DMic"},
+ {"DMic", NULL, "Mic Bias"},
{"b LINE2R", NULL, "MONO_LOUT"},
{"Earphone", NULL, "b HPLOUT"},
- {"LINE1L", NULL, "b Mic Bias"},
- {"b Mic Bias", NULL, "HS Mic"}
+ {"LINE1L", NULL, "HS Mic"},
+ {"HS Mic", NULL, "b Mic Bias"},
};
static const char * const spk_function[] = {"Off", "On"};
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 2434b6d61675..39cea80846c3 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -140,7 +140,7 @@ config SND_PXA910_SOC
Marvell PXA910 reference platform.
config SND_SOC_TTC_DKB
- bool "SoC Audio support for TTC DKB"
+ tristate "SoC Audio support for TTC DKB"
depends on SND_PXA910_SOC && MACH_TTC_DKB && I2C=y
select PXA_SSP
select SND_PXA_SOC_SSP
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index b7cd0a71fd70..3580d10c9f28 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -259,20 +259,6 @@ static const struct snd_kcontrol_new wm8731_corgi_controls[] = {
corgi_set_spk),
};
-/*
- * Logic for a wm8731 as connected on a Sharp SL-C7x0 Device
- */
-static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- snd_soc_dapm_nc_pin(dapm, "LLINEIN");
- snd_soc_dapm_nc_pin(dapm, "RLINEIN");
-
- return 0;
-}
-
/* corgi digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link corgi_dai = {
.name = "WM8731",
@@ -281,7 +267,6 @@ static struct snd_soc_dai_link corgi_dai = {
.codec_dai_name = "wm8731-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm8731.0-001b",
- .init = corgi_wm8731_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &corgi_ops,
@@ -300,6 +285,7 @@ static struct snd_soc_card corgi = {
.num_dapm_widgets = ARRAY_SIZE(wm8731_dapm_widgets),
.dapm_routes = corgi_audio_map,
.num_dapm_routes = ARRAY_SIZE(corgi_audio_map),
+ .fully_routed = true,
};
static int corgi_probe(struct platform_device *pdev)
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 7c691aae8af2..d72e124a3676 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -88,24 +88,6 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"Mic Amp", NULL, "Mic (Internal)"},
};
-static int e740_ac97_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- snd_soc_dapm_nc_pin(dapm, "HPOUTL");
- snd_soc_dapm_nc_pin(dapm, "HPOUTR");
- snd_soc_dapm_nc_pin(dapm, "PHONE");
- snd_soc_dapm_nc_pin(dapm, "LINEINL");
- snd_soc_dapm_nc_pin(dapm, "LINEINR");
- snd_soc_dapm_nc_pin(dapm, "CDINL");
- snd_soc_dapm_nc_pin(dapm, "CDINR");
- snd_soc_dapm_nc_pin(dapm, "PCBEEP");
- snd_soc_dapm_nc_pin(dapm, "MIC2");
-
- return 0;
-}
-
static struct snd_soc_dai_link e740_dai[] = {
{
.name = "AC97",
@@ -114,7 +96,6 @@ static struct snd_soc_dai_link e740_dai[] = {
.codec_dai_name = "wm9705-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
- .init = e740_ac97_init,
},
{
.name = "AC97 Aux",
@@ -136,6 +117,7 @@ static struct snd_soc_card e740 = {
.num_dapm_widgets = ARRAY_SIZE(e740_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
+ .fully_routed = true,
};
static struct gpio e740_audio_gpios[] = {
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 30544b65b5a8..48f2d7c2e68c 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -70,24 +70,6 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"MIC1", NULL, "Mic (Internal)"},
};
-static int e750_ac97_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- snd_soc_dapm_nc_pin(dapm, "LOUT");
- snd_soc_dapm_nc_pin(dapm, "ROUT");
- snd_soc_dapm_nc_pin(dapm, "PHONE");
- snd_soc_dapm_nc_pin(dapm, "LINEINL");
- snd_soc_dapm_nc_pin(dapm, "LINEINR");
- snd_soc_dapm_nc_pin(dapm, "CDINL");
- snd_soc_dapm_nc_pin(dapm, "CDINR");
- snd_soc_dapm_nc_pin(dapm, "PCBEEP");
- snd_soc_dapm_nc_pin(dapm, "MIC2");
-
- return 0;
-}
-
static struct snd_soc_dai_link e750_dai[] = {
{
.name = "AC97",
@@ -96,7 +78,6 @@ static struct snd_soc_dai_link e750_dai[] = {
.codec_dai_name = "wm9705-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
- .init = e750_ac97_init,
/* use ops to check startup state */
},
{
@@ -119,6 +100,7 @@ static struct snd_soc_card e750 = {
.num_dapm_widgets = ARRAY_SIZE(e750_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
+ .fully_routed = true,
};
static struct gpio e750_audio_gpios[] = {
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index ce26551052a3..73eb5ddf9753 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -127,15 +127,8 @@ static const struct snd_soc_dapm_route hx4700_audio_map[] = {
static int hx4700_ak4641_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
int err;
- /* NC codec pins */
- /* FIXME: is anything connected here? */
- snd_soc_dapm_nc_pin(dapm, "MOUT1");
- snd_soc_dapm_nc_pin(dapm, "MICEXT");
- snd_soc_dapm_nc_pin(dapm, "AUX");
-
/* Jack detection API stuff */
err = snd_soc_jack_new(codec, "Headphone Jack",
SND_JACK_HEADPHONE, &hs_jack);
@@ -184,6 +177,7 @@ static struct snd_soc_card snd_soc_card_hx4700 = {
.num_dapm_widgets = ARRAY_SIZE(hx4700_dapm_widgets),
.dapm_routes = hx4700_audio_map,
.num_dapm_routes = ARRAY_SIZE(hx4700_audio_map),
+ .fully_routed = true,
};
static struct gpio hx4700_audio_gpios[] = {
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 259e048681c0..241d0be42d7a 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -391,25 +391,6 @@ static const struct snd_kcontrol_new uda1380_magician_controls[] = {
magician_get_input, magician_set_input),
};
-/*
- * Logic for a uda1380 as connected on a HTC Magician
- */
-static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- /* NC codec pins */
- snd_soc_dapm_nc_pin(dapm, "VOUTLHP");
- snd_soc_dapm_nc_pin(dapm, "VOUTRHP");
-
- /* FIXME: is anything connected here? */
- snd_soc_dapm_nc_pin(dapm, "VINL");
- snd_soc_dapm_nc_pin(dapm, "VINR");
-
- return 0;
-}
-
/* magician digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link magician_dai[] = {
{
@@ -419,7 +400,6 @@ static struct snd_soc_dai_link magician_dai[] = {
.codec_dai_name = "uda1380-hifi-playback",
.platform_name = "pxa-pcm-audio",
.codec_name = "uda1380-codec.0-0018",
- .init = magician_uda1380_init,
.ops = &magician_playback_ops,
},
{
@@ -446,6 +426,7 @@ static struct snd_soc_card snd_soc_card_magician = {
.num_dapm_widgets = ARRAY_SIZE(uda1380_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
+ .fully_routed = true,
};
static struct platform_device *magician_snd_device;
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 396dbd51a64f..a9615a574546 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -81,7 +81,7 @@ static int rear_amp_power(struct snd_soc_codec *codec, int power)
static int rear_amp_event(struct snd_soc_dapm_widget *widget,
struct snd_kcontrol *kctl, int event)
{
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = widget->dapm->card->rtd[0].codec;
return rear_amp_power(codec, SND_SOC_DAPM_EVENT_ON(event));
}
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 1eebca2f0a97..910336c5ebeb 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -68,7 +68,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"Ext. Speaker", NULL, "ROUT2"},
/* mic connected to MIC1 */
- {"Ext. Microphone", NULL, "MIC1"},
+ {"MIC1", NULL, "Ext. Microphone"},
};
static struct snd_soc_card palm27x_asoc;
@@ -76,18 +76,8 @@ static struct snd_soc_card palm27x_asoc;
static int palm27x_ac97_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
int err;
- /* not connected pins */
- snd_soc_dapm_nc_pin(dapm, "OUT3");
- snd_soc_dapm_nc_pin(dapm, "MONOOUT");
- snd_soc_dapm_nc_pin(dapm, "LINEINL");
- snd_soc_dapm_nc_pin(dapm, "LINEINR");
- snd_soc_dapm_nc_pin(dapm, "PCBEEP");
- snd_soc_dapm_nc_pin(dapm, "PHONE");
- snd_soc_dapm_nc_pin(dapm, "MIC2");
-
/* Jack detection API stuff */
err = snd_soc_jack_new(codec, "Headphone Jack",
SND_JACK_HEADPHONE, &hs_jack);
@@ -133,7 +123,8 @@ static struct snd_soc_card palm27x_asoc = {
.dapm_widgets = palm27x_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(palm27x_dapm_widgets),
.dapm_routes = audio_map,
- .num_dapm_routes = ARRAY_SIZE(audio_map)
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+ .fully_routed = true,
};
static int palm27x_asoc_probe(struct platform_device *pdev)
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 083706595495..552b763005ed 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -88,7 +88,7 @@ static int raumfeld_cs4270_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- unsigned int fmt, clk = 0;
+ unsigned int clk = 0;
int ret = 0;
switch (params_rate(params)) {
@@ -112,15 +112,6 @@ static int raumfeld_cs4270_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- fmt = SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS;
-
- /* setup the CODEC DAI */
- ret = snd_soc_dai_set_fmt(codec_dai, fmt);
- if (ret < 0)
- return ret;
-
ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk, 0);
if (ret < 0)
return ret;
@@ -130,10 +121,6 @@ static int raumfeld_cs4270_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
- if (ret < 0)
- return ret;
-
ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_DIV_SCR, 4);
if (ret < 0)
return ret;
@@ -169,9 +156,8 @@ static int raumfeld_ak4104_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int fmt, ret = 0, clk = 0;
+ int ret = 0, clk = 0;
switch (params_rate(params)) {
case 44100:
@@ -194,22 +180,11 @@ static int raumfeld_ak4104_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF;
-
- /* setup the CODEC DAI */
- ret = snd_soc_dai_set_fmt(codec_dai, fmt | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
/* setup the CPU DAI */
ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, clk);
if (ret < 0)
return ret;
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_DIV_SCR, 4);
if (ret < 0)
return ret;
@@ -233,6 +208,9 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
.platform_name = "pxa-pcm-audio", \
.codec_dai_name = "cs4270-hifi", \
.codec_name = "cs4270.0-0048", \
+ .dai_fmt = SND_SOC_DAIFMT_I2S | \
+ SND_SOC_DAIFMT_NB_NF | \
+ SND_SOC_DAIFMT_CBS_CFS, \
.ops = &raumfeld_cs4270_ops, \
}
@@ -243,6 +221,9 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
.cpu_dai_name = "pxa-ssp-dai.1", \
.codec_dai_name = "ak4104-hifi", \
.platform_name = "pxa-pcm-audio", \
+ .dai_fmt = SND_SOC_DAIFMT_I2S | \
+ SND_SOC_DAIFMT_NB_NF | \
+ SND_SOC_DAIFMT_CBS_CFS, \
.ops = &raumfeld_ak4104_ops, \
.codec_name = "spi0.0", \
}
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index d7d5fb20ea6f..461123ad5ff2 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -256,26 +256,6 @@ static const struct snd_kcontrol_new wm8750_spitz_controls[] = {
spitz_set_spk),
};
-/*
- * Logic for a wm8750 as connected on a Sharp SL-Cxx00 Device
- */
-static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- /* NC codec pins */
- snd_soc_dapm_nc_pin(dapm, "RINPUT1");
- snd_soc_dapm_nc_pin(dapm, "LINPUT2");
- snd_soc_dapm_nc_pin(dapm, "RINPUT2");
- snd_soc_dapm_nc_pin(dapm, "LINPUT3");
- snd_soc_dapm_nc_pin(dapm, "RINPUT3");
- snd_soc_dapm_nc_pin(dapm, "OUT3");
- snd_soc_dapm_nc_pin(dapm, "MONO1");
-
- return 0;
-}
-
/* spitz digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link spitz_dai = {
.name = "wm8750",
@@ -284,7 +264,6 @@ static struct snd_soc_dai_link spitz_dai = {
.codec_dai_name = "wm8750-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm8750.0-001b",
- .init = spitz_wm8750_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &spitz_ops,
@@ -303,6 +282,7 @@ static struct snd_soc_card snd_soc_spitz = {
.num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
.dapm_routes = spitz_audio_map,
.num_dapm_routes = ARRAY_SIZE(spitz_audio_map),
+ .fully_routed = true,
};
static int spitz_probe(struct platform_device *pdev)
@@ -352,7 +332,6 @@ static int spitz_remove(struct platform_device *pdev)
static struct platform_driver spitz_driver = {
.driver = {
.name = "spitz-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = spitz_probe,
diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c
index e3d7257ad09c..5001dbb9b257 100644
--- a/sound/soc/pxa/ttc-dkb.c
+++ b/sound/soc/pxa/ttc-dkb.c
@@ -76,10 +76,6 @@ static const struct snd_soc_dapm_route ttc_audio_map[] = {
static int ttc_pm860x_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
- snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
/* Headset jack detection */
snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index 23bf991e95d5..8f301c72ee5e 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -130,16 +130,6 @@ static int zylonite_voice_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
return 0;
}
@@ -172,6 +162,8 @@ static struct snd_soc_dai_link zylonite_dai[] = {
.platform_name = "pxa-pcm-audio",
.cpu_dai_name = "pxa-ssp-dai.2",
.codec_dai_name = "wm9713-voice",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &zylonite_voice_ops,
},
};
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 13d8507333b8..acb5be53bfb4 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -247,6 +247,10 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
regmap_update_bits(i2s->regmap, I2S_TXCR, I2S_TXCR_VDW_MASK, val);
regmap_update_bits(i2s->regmap, I2S_RXCR, I2S_RXCR_VDW_MASK, val);
+ regmap_update_bits(i2s->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
+ I2S_DMACR_TDL(16));
+ regmap_update_bits(i2s->regmap, I2S_DMACR, I2S_DMACR_RDL_MASK,
+ I2S_DMACR_RDL(16));
return 0;
}
@@ -335,6 +339,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
SNDRV_PCM_FMTBIT_S24_LE),
},
.ops = &rockchip_i2s_dai_ops,
+ .symmetric_rates = 1,
};
static const struct snd_soc_component_driver rockchip_i2s_component = {
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index fc67f97f19f6..3cebf6ca03df 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -54,7 +54,7 @@ config SND_SOC_SAMSUNG_JIVE_WM8750
config SND_SOC_SAMSUNG_SMDK_WM8580
tristate "SoC I2S Audio support for WM8580 on SMDK"
depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
- depends on REGMAP_I2C
+ depends on I2C
select SND_SOC_WM8580
select SND_SAMSUNG_I2S
help
@@ -146,17 +146,6 @@ config SND_SOC_SMARTQ
select SND_SAMSUNG_I2S
select SND_SOC_WM8750
-config SND_SOC_GONI_AQUILA_WM8994
- tristate "SoC I2S Audio support for AQUILA/GONI - WM8994"
- depends on SND_SOC_SAMSUNG && (MACH_GONI || MACH_AQUILA)
- depends on I2C=y
- select SND_SAMSUNG_I2S
- select MFD_WM8994
- select SND_SOC_WM8994
- help
- Say Y if you want to add support for SoC audio on goni or aquila
- with the WM8994.
-
config SND_SOC_SAMSUNG_SMDK_SPDIF
tristate "SoC S/PDIF Audio support for SMDK"
depends on SND_SOC_SAMSUNG
@@ -167,7 +156,7 @@ config SND_SOC_SAMSUNG_SMDK_SPDIF
config SND_SOC_SMDK_WM8580_PCM
tristate "SoC PCM Audio support for WM8580 on SMDK"
depends on SND_SOC_SAMSUNG && (MACH_SMDKV210 || MACH_SMDKC110)
- depends on REGMAP_I2C
+ depends on I2C
select SND_SOC_WM8580
select SND_SAMSUNG_PCM
help
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index 31e3dba7e3b5..052fe71be518 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -35,7 +35,6 @@ snd-soc-smdk-wm8994-objs := smdk_wm8994.o
snd-soc-snow-objs := snow.o
snd-soc-smdk-wm9713-objs := smdk_wm9713.o
snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
-snd-soc-goni-wm8994-objs := goni_wm8994.o
snd-soc-smdk-spdif-objs := smdk_spdif.o
snd-soc-smdk-wm8580pcm-objs := smdk_wm8580pcm.o
snd-soc-smdk-wm8994pcm-objs := smdk_wm8994pcm.o
@@ -63,7 +62,6 @@ obj-$(CONFIG_SND_SOC_SNOW) += snd-soc-snow.o
obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM9713) += snd-soc-smdk-wm9713.o
obj-$(CONFIG_SND_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_SPDIF) += snd-soc-smdk-spdif.o
-obj-$(CONFIG_SND_SOC_GONI_AQUILA_WM8994) += snd-soc-goni-wm8994.o
obj-$(CONFIG_SND_SOC_SMDK_WM8580_PCM) += snd-soc-smdk-wm8580pcm.o
obj-$(CONFIG_SND_SOC_SMDK_WM8994_PCM) += snd-soc-smdk-wm8994pcm.o
obj-$(CONFIG_SND_SOC_SPEYSIDE) += snd-soc-speyside.o
diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
index 1e2b61ca8db2..8bf2e2c4bafb 100644
--- a/sound/soc/samsung/arndale_rt5631.c
+++ b/sound/soc/samsung/arndale_rt5631.c
@@ -135,7 +135,6 @@ MODULE_DEVICE_TABLE(of, samsung_arndale_rt5631_of_match);
static struct platform_driver arndale_audio_driver = {
.driver = {
.name = "arndale-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
},
diff --git a/sound/soc/samsung/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c
deleted file mode 100644
index 3b527dcfc0aa..000000000000
--- a/sound/soc/samsung/goni_wm8994.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * goni_wm8994.c
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-
-#include <asm/mach-types.h>
-#include <mach/gpio-samsung.h>
-
-#include "../codecs/wm8994.h"
-
-#define MACHINE_NAME 0
-#define CPU_VOICE_DAI 1
-
-static const char *aquila_str[] = {
- [MACHINE_NAME] = "aquila",
- [CPU_VOICE_DAI] = "aquila-voice-dai",
-};
-
-static struct snd_soc_card goni;
-static struct platform_device *goni_snd_device;
-
-/* 3.5 pie jack */
-static struct snd_soc_jack jack;
-
-/* 3.5 pie jack detection DAPM pins */
-static struct snd_soc_jack_pin jack_pins[] = {
- {
- .pin = "Headset Mic",
- .mask = SND_JACK_MICROPHONE,
- }, {
- .pin = "Headset Stereophone",
- .mask = SND_JACK_HEADPHONE | SND_JACK_MECHANICAL |
- SND_JACK_AVOUT,
- },
-};
-
-/* 3.5 pie jack detection gpios */
-static struct snd_soc_jack_gpio jack_gpios[] = {
- {
- .gpio = S5PV210_GPH0(6),
- .name = "DET_3.5",
- .report = SND_JACK_HEADSET | SND_JACK_MECHANICAL |
- SND_JACK_AVOUT,
- .debounce_time = 200,
- },
-};
-
-static const struct snd_soc_dapm_widget goni_dapm_widgets[] = {
- SND_SOC_DAPM_SPK("Ext Left Spk", NULL),
- SND_SOC_DAPM_SPK("Ext Right Spk", NULL),
- SND_SOC_DAPM_SPK("Ext Rcv", NULL),
- SND_SOC_DAPM_HP("Headset Stereophone", NULL),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
- SND_SOC_DAPM_MIC("Main Mic", NULL),
- SND_SOC_DAPM_MIC("2nd Mic", NULL),
- SND_SOC_DAPM_LINE("Radio In", NULL),
-};
-
-static const struct snd_soc_dapm_route goni_dapm_routes[] = {
- {"Ext Left Spk", NULL, "SPKOUTLP"},
- {"Ext Left Spk", NULL, "SPKOUTLN"},
-
- {"Ext Right Spk", NULL, "SPKOUTRP"},
- {"Ext Right Spk", NULL, "SPKOUTRN"},
-
- {"Ext Rcv", NULL, "HPOUT2N"},
- {"Ext Rcv", NULL, "HPOUT2P"},
-
- {"Headset Stereophone", NULL, "HPOUT1L"},
- {"Headset Stereophone", NULL, "HPOUT1R"},
-
- {"IN1RN", NULL, "Headset Mic"},
- {"IN1RP", NULL, "Headset Mic"},
-
- {"IN1RN", NULL, "2nd Mic"},
- {"IN1RP", NULL, "2nd Mic"},
-
- {"IN1LN", NULL, "Main Mic"},
- {"IN1LP", NULL, "Main Mic"},
-
- {"IN2LN", NULL, "Radio In"},
- {"IN2RN", NULL, "Radio In"},
-};
-
-static int goni_wm8994_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- int ret;
-
- /* set endpoints to not connected */
- snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN");
- snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP");
- snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
- snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
- snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
- snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
-
- if (machine_is_aquila()) {
- snd_soc_dapm_nc_pin(dapm, "SPKOUTRN");
- snd_soc_dapm_nc_pin(dapm, "SPKOUTRP");
- }
-
- /* Headset jack detection */
- ret = snd_soc_jack_new(codec, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT,
- &jack);
- if (ret)
- return ret;
-
- ret = snd_soc_jack_add_pins(&jack, ARRAY_SIZE(jack_pins), jack_pins);
- if (ret)
- return ret;
-
- ret = snd_soc_jack_add_gpios(&jack, ARRAY_SIZE(jack_gpios), jack_gpios);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int goni_hifi_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- unsigned int pll_out = 24000000;
- int ret = 0;
-
- /* set the cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
- /* set the codec FLL */
- ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, 0, pll_out,
- params_rate(params) * 256);
- if (ret < 0)
- return ret;
-
- /* set the codec system clock */
- ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
- params_rate(params) * 256, SND_SOC_CLOCK_IN);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static struct snd_soc_ops goni_hifi_ops = {
- .hw_params = goni_hifi_hw_params,
-};
-
-static int goni_voice_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- unsigned int pll_out = 24000000;
- int ret = 0;
-
- if (params_rate(params) != 8000)
- return -EINVAL;
-
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J |
- SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
- /* set the codec FLL */
- ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL2, 0, pll_out,
- params_rate(params) * 256);
- if (ret < 0)
- return ret;
-
- /* set the codec system clock */
- ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL2,
- params_rate(params) * 256, SND_SOC_CLOCK_IN);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static struct snd_soc_dai_driver voice_dai = {
- .name = "goni-voice-dai",
- .id = 0,
- .playback = {
- .channels_min = 1,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,},
- .capture = {
- .channels_min = 1,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,},
-};
-
-static const struct snd_soc_component_driver voice_component = {
- .name = "goni-voice",
-};
-
-static struct snd_soc_ops goni_voice_ops = {
- .hw_params = goni_voice_hw_params,
-};
-
-static struct snd_soc_dai_link goni_dai[] = {
-{
- .name = "WM8994",
- .stream_name = "WM8994 HiFi",
- .cpu_dai_name = "samsung-i2s.0",
- .codec_dai_name = "wm8994-aif1",
- .platform_name = "samsung-i2s.0",
- .codec_name = "wm8994-codec.0-001a",
- .init = goni_wm8994_init,
- .ops = &goni_hifi_ops,
-}, {
- .name = "WM8994 Voice",
- .stream_name = "Voice",
- .cpu_dai_name = "goni-voice-dai",
- .codec_dai_name = "wm8994-aif2",
- .codec_name = "wm8994-codec.0-001a",
- .ops = &goni_voice_ops,
-},
-};
-
-static struct snd_soc_card goni = {
- .name = "goni",
- .owner = THIS_MODULE,
- .dai_link = goni_dai,
- .num_links = ARRAY_SIZE(goni_dai),
-
- .dapm_widgets = goni_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(goni_dapm_widgets),
- .dapm_routes = goni_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(goni_dapm_routes),
-};
-
-static int __init goni_init(void)
-{
- int ret;
-
- if (machine_is_aquila()) {
- voice_dai.name = aquila_str[CPU_VOICE_DAI];
- goni_dai[1].cpu_dai_name = aquila_str[CPU_VOICE_DAI];
- goni.name = aquila_str[MACHINE_NAME];
- } else if (!machine_is_goni())
- return -ENODEV;
-
- goni_snd_device = platform_device_alloc("soc-audio", -1);
- if (!goni_snd_device)
- return -ENOMEM;
-
- /* register voice DAI here */
- ret = devm_snd_soc_register_component(&goni_snd_device->dev,
- &voice_component, &voice_dai, 1);
- if (ret) {
- platform_device_put(goni_snd_device);
- return ret;
- }
-
- platform_set_drvdata(goni_snd_device, &goni);
- ret = platform_device_add(goni_snd_device);
-
- if (ret)
- platform_device_put(goni_snd_device);
-
- return ret;
-}
-
-static void __exit goni_exit(void)
-{
- platform_device_unregister(goni_snd_device);
-}
-
-module_init(goni_init);
-module_exit(goni_exit);
-
-/* Module information */
-MODULE_DESCRIPTION("ALSA SoC WM8994 GONI(S5PV210)");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
index f2d7980d7ddc..59b044255b78 100644
--- a/sound/soc/samsung/h1940_uda1380.c
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -76,7 +76,6 @@ static int h1940_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
int div;
int ret;
unsigned int rate = params_rate(params);
@@ -95,18 +94,6 @@ static int h1940_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- /* set cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
/* select clock source */
ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_PCLK, rate,
SND_SOC_CLOCK_OUT);
@@ -207,6 +194,8 @@ static struct snd_soc_dai_link h1940_uda1380_dai[] = {
.init = h1940_uda1380_init,
.platform_name = "s3c24xx-iis",
.codec_name = "uda1380-codec.0-001a",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &h1940_ops,
},
};
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index b5a80c528d86..b92ab40d2be6 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -10,9 +10,11 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/sound/samsung-i2s.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -59,10 +61,8 @@ struct samsung_i2s_dai_data {
struct i2s_dai {
/* Platform device for this DAI */
struct platform_device *pdev;
- /* IOREMAP'd SFRs */
+ /* Memory mapped SFR region */
void __iomem *addr;
- /* Physical base address of SFRs */
- u32 base;
/* Rate of RCLK source clock */
unsigned long rclk_srcrate;
/* Frame Clock */
@@ -83,8 +83,6 @@ struct i2s_dai {
#define DAI_OPENED (1 << 0) /* Dai is opened */
#define DAI_MANAGER (1 << 1) /* Dai is the manager */
unsigned mode;
- /* CDCLK pin direction: 0 - input, 1 - output */
- unsigned int cdclk_out:1;
/* Driver for this DAI */
struct snd_soc_dai_driver i2s_dai_drv;
/* DMA parameters */
@@ -95,8 +93,15 @@ struct i2s_dai {
u32 suspend_i2smod;
u32 suspend_i2scon;
u32 suspend_i2spsr;
- unsigned long gpios[7]; /* i2s gpio line numbers */
const struct samsung_i2s_variant_regs *variant_regs;
+
+ /* Spinlock protecting access to the device's registers */
+ spinlock_t spinlock;
+ spinlock_t *lock;
+
+ /* Below fields are only valid if this is the primary FIFO */
+ struct clk *clk_table[3];
+ struct clk_onecell_data clk_data;
};
/* Lock for cross i/f checks */
@@ -133,10 +138,16 @@ static inline bool tx_active(struct i2s_dai *i2s)
return active ? true : false;
}
+/* Return pointer to the other DAI */
+static inline struct i2s_dai *get_other_dai(struct i2s_dai *i2s)
+{
+ return i2s->pri_dai ? : i2s->sec_dai;
+}
+
/* If the other interface of the controller is transmitting data */
static inline bool other_tx_active(struct i2s_dai *i2s)
{
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+ struct i2s_dai *other = get_other_dai(i2s);
return tx_active(other);
}
@@ -163,7 +174,7 @@ static inline bool rx_active(struct i2s_dai *i2s)
/* If the other interface of the controller is receiving data */
static inline bool other_rx_active(struct i2s_dai *i2s)
{
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+ struct i2s_dai *other = get_other_dai(i2s);
return rx_active(other);
}
@@ -464,18 +475,23 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int rfs, int dir)
{
struct i2s_dai *i2s = to_info(dai);
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
- u32 mod = readl(i2s->addr + I2SMOD);
+ struct i2s_dai *other = get_other_dai(i2s);
const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
+ u32 mod, mask, val = 0;
+
+ spin_lock(i2s->lock);
+ mod = readl(i2s->addr + I2SMOD);
+ spin_unlock(i2s->lock);
switch (clk_id) {
case SAMSUNG_I2S_OPCLK:
- mod &= ~MOD_OPCLK_MASK;
- mod |= dir;
+ mask = MOD_OPCLK_MASK;
+ val = dir;
break;
case SAMSUNG_I2S_CDCLK:
+ mask = 1 << i2s_regs->cdclkcon_off;
/* Shouldn't matter in GATING(CLOCK_IN) mode */
if (dir == SND_SOC_CLOCK_IN)
rfs = 0;
@@ -492,15 +508,15 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
}
if (dir == SND_SOC_CLOCK_IN)
- mod |= 1 << i2s_regs->cdclkcon_off;
- else
- mod &= ~(1 << i2s_regs->cdclkcon_off);
+ val = 1 << i2s_regs->cdclkcon_off;
i2s->rfs = rfs;
break;
case SAMSUNG_I2S_RCLKSRC_0: /* clock corrsponding to IISMOD[10] := 0 */
case SAMSUNG_I2S_RCLKSRC_1: /* clock corrsponding to IISMOD[10] := 1 */
+ mask = 1 << i2s_regs->rclksrc_off;
+
if ((i2s->quirks & QUIRK_NO_MUXPSR)
|| (clk_id == SAMSUNG_I2S_RCLKSRC_0))
clk_id = 0;
@@ -550,18 +566,19 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
return 0;
}
- if (clk_id == 0)
- mod &= ~(1 << i2s_regs->rclksrc_off);
- else
- mod |= 1 << i2s_regs->rclksrc_off;
-
+ if (clk_id == 1)
+ val = 1 << i2s_regs->rclksrc_off;
break;
default:
dev_err(&i2s->pdev->dev, "We don't serve that!\n");
return -EINVAL;
}
+ spin_lock(i2s->lock);
+ mod = readl(i2s->addr + I2SMOD);
+ mod = (mod & ~mask) | val;
writel(mod, i2s->addr + I2SMOD);
+ spin_unlock(i2s->lock);
return 0;
}
@@ -570,9 +587,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct i2s_dai *i2s = to_info(dai);
- u32 mod = readl(i2s->addr + I2SMOD);
int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
- u32 tmp = 0;
+ u32 mod, tmp = 0;
lrp_shift = i2s->variant_regs->lrp_off;
sdf_shift = i2s->variant_regs->sdf_off;
@@ -632,12 +648,15 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
+ spin_lock(i2s->lock);
+ mod = readl(i2s->addr + I2SMOD);
/*
* Don't change the I2S mode if any controller is active on this
* channel.
*/
if (any_active(i2s) &&
((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
+ spin_unlock(i2s->lock);
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
@@ -646,6 +665,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
mod &= ~(sdf_mask | lrp_rlow | mod_slave);
mod |= tmp;
writel(mod, i2s->addr + I2SMOD);
+ spin_unlock(i2s->lock);
return 0;
}
@@ -654,16 +674,16 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
struct i2s_dai *i2s = to_info(dai);
- u32 mod = readl(i2s->addr + I2SMOD);
+ u32 mod, mask = 0, val = 0;
if (!is_secondary(i2s))
- mod &= ~(MOD_DC2_EN | MOD_DC1_EN);
+ mask |= (MOD_DC2_EN | MOD_DC1_EN);
switch (params_channels(params)) {
case 6:
- mod |= MOD_DC2_EN;
+ val |= MOD_DC2_EN;
case 4:
- mod |= MOD_DC1_EN;
+ val |= MOD_DC1_EN;
break;
case 2:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -685,44 +705,49 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
}
if (is_secondary(i2s))
- mod &= ~MOD_BLCS_MASK;
+ mask |= MOD_BLCS_MASK;
else
- mod &= ~MOD_BLCP_MASK;
+ mask |= MOD_BLCP_MASK;
if (is_manager(i2s))
- mod &= ~MOD_BLC_MASK;
+ mask |= MOD_BLC_MASK;
switch (params_width(params)) {
case 8:
if (is_secondary(i2s))
- mod |= MOD_BLCS_8BIT;
+ val |= MOD_BLCS_8BIT;
else
- mod |= MOD_BLCP_8BIT;
+ val |= MOD_BLCP_8BIT;
if (is_manager(i2s))
- mod |= MOD_BLC_8BIT;
+ val |= MOD_BLC_8BIT;
break;
case 16:
if (is_secondary(i2s))
- mod |= MOD_BLCS_16BIT;
+ val |= MOD_BLCS_16BIT;
else
- mod |= MOD_BLCP_16BIT;
+ val |= MOD_BLCP_16BIT;
if (is_manager(i2s))
- mod |= MOD_BLC_16BIT;
+ val |= MOD_BLC_16BIT;
break;
case 24:
if (is_secondary(i2s))
- mod |= MOD_BLCS_24BIT;
+ val |= MOD_BLCS_24BIT;
else
- mod |= MOD_BLCP_24BIT;
+ val |= MOD_BLCP_24BIT;
if (is_manager(i2s))
- mod |= MOD_BLC_24BIT;
+ val |= MOD_BLC_24BIT;
break;
default:
dev_err(&i2s->pdev->dev, "Format(%d) not supported\n",
params_format(params));
return -EINVAL;
}
+
+ spin_lock(i2s->lock);
+ mod = readl(i2s->addr + I2SMOD);
+ mod = (mod & ~mask) | val;
writel(mod, i2s->addr + I2SMOD);
+ spin_unlock(i2s->lock);
samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
@@ -736,7 +761,7 @@ static int i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct i2s_dai *i2s = to_info(dai);
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+ struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
spin_lock_irqsave(&lock, flags);
@@ -753,9 +778,6 @@ static int i2s_startup(struct snd_pcm_substream *substream,
spin_unlock_irqrestore(&lock, flags);
- if (!is_opened(other) && i2s->cdclk_out)
- i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
- 0, SND_SOC_CLOCK_OUT);
return 0;
}
@@ -763,38 +785,27 @@ static void i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct i2s_dai *i2s = to_info(dai);
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+ struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
- const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
spin_lock_irqsave(&lock, flags);
i2s->mode &= ~DAI_OPENED;
i2s->mode &= ~DAI_MANAGER;
- if (is_opened(other)) {
+ if (is_opened(other))
other->mode |= DAI_MANAGER;
- } else {
- u32 mod = readl(i2s->addr + I2SMOD);
- i2s->cdclk_out = !(mod & (1 << i2s_regs->cdclkcon_off));
- if (other)
- other->cdclk_out = i2s->cdclk_out;
- }
+
/* Reset any constraint on RFS and BFS */
i2s->rfs = 0;
i2s->bfs = 0;
spin_unlock_irqrestore(&lock, flags);
-
- /* Gate CDCLK by default */
- if (!is_opened(other))
- i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
- 0, SND_SOC_CLOCK_IN);
}
static int config_setup(struct i2s_dai *i2s)
{
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+ struct i2s_dai *other = get_other_dai(i2s);
unsigned rfs, bfs, blc;
u32 psr;
@@ -864,10 +875,10 @@ static int i2s_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- local_irq_save(flags);
+ spin_lock_irqsave(i2s->lock, flags);
if (config_setup(i2s)) {
- local_irq_restore(flags);
+ spin_unlock_irqrestore(i2s->lock, flags);
return -EINVAL;
}
@@ -876,12 +887,12 @@ static int i2s_trigger(struct snd_pcm_substream *substream,
else
i2s_txctrl(i2s, 1);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(i2s->lock, flags);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- local_irq_save(flags);
+ spin_lock_irqsave(i2s->lock, flags);
if (capture) {
i2s_rxctrl(i2s, 0);
@@ -891,7 +902,7 @@ static int i2s_trigger(struct snd_pcm_substream *substream,
i2s_fifo(i2s, FIC_TXFLUSH);
}
- local_irq_restore(flags);
+ spin_unlock_irqrestore(i2s->lock, flags);
break;
}
@@ -902,7 +913,7 @@ static int i2s_set_clkdiv(struct snd_soc_dai *dai,
int div_id, int div)
{
struct i2s_dai *i2s = to_info(dai);
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+ struct i2s_dai *other = get_other_dai(i2s);
switch (div_id) {
case SAMSUNG_I2S_DIV_BCLK:
@@ -971,58 +982,36 @@ static int i2s_resume(struct snd_soc_dai *dai)
static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct i2s_dai *i2s = to_info(dai);
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
- int ret;
+ struct i2s_dai *other = get_other_dai(i2s);
+ unsigned long flags;
- if (other && other->clk) { /* If this is probe on secondary */
+ if (is_secondary(i2s)) { /* If this is probe on the secondary DAI */
samsung_asoc_init_dma_data(dai, &other->sec_dai->dma_playback,
NULL);
- goto probe_exit;
- }
-
- i2s->addr = ioremap(i2s->base, 0x100);
- if (i2s->addr == NULL) {
- dev_err(&i2s->pdev->dev, "cannot ioremap registers\n");
- return -ENXIO;
- }
-
- i2s->clk = clk_get(&i2s->pdev->dev, "iis");
- if (IS_ERR(i2s->clk)) {
- dev_err(&i2s->pdev->dev, "failed to get i2s_clock\n");
- iounmap(i2s->addr);
- return PTR_ERR(i2s->clk);
- }
-
- ret = clk_prepare_enable(i2s->clk);
- if (ret != 0) {
- dev_err(&i2s->pdev->dev, "failed to enable clock: %d\n", ret);
- return ret;
- }
-
- samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
-
- if (other) {
- other->addr = i2s->addr;
- other->clk = i2s->clk;
- }
+ } else {
+ samsung_asoc_init_dma_data(dai, &i2s->dma_playback,
+ &i2s->dma_capture);
- if (i2s->quirks & QUIRK_NEED_RSTCLR)
- writel(CON_RSTCLR, i2s->addr + I2SCON);
+ if (i2s->quirks & QUIRK_NEED_RSTCLR)
+ writel(CON_RSTCLR, i2s->addr + I2SCON);
- if (i2s->quirks & QUIRK_SUPPORTS_IDMA)
- idma_reg_addr_init(i2s->addr,
+ if (i2s->quirks & QUIRK_SUPPORTS_IDMA)
+ idma_reg_addr_init(i2s->addr,
i2s->sec_dai->idma_playback.dma_addr);
+ }
-probe_exit:
/* Reset any constraint on RFS and BFS */
i2s->rfs = 0;
i2s->bfs = 0;
i2s->rclk_srcrate = 0;
+
+ spin_lock_irqsave(i2s->lock, flags);
i2s_txctrl(i2s, 0);
i2s_rxctrl(i2s, 0);
i2s_fifo(i2s, FIC_TXFLUSH);
i2s_fifo(other, FIC_TXFLUSH);
i2s_fifo(i2s, FIC_RXFLUSH);
+ spin_unlock_irqrestore(i2s->lock, flags);
/* Gate CDCLK by default */
if (!is_opened(other))
@@ -1035,21 +1024,15 @@ probe_exit:
static int samsung_i2s_dai_remove(struct snd_soc_dai *dai)
{
struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
- struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
-
- if (!other || !other->clk) {
- if (i2s->quirks & QUIRK_NEED_RSTCLR)
+ if (!is_secondary(i2s)) {
+ if (i2s->quirks & QUIRK_NEED_RSTCLR) {
+ spin_lock(i2s->lock);
writel(0, i2s->addr + I2SCON);
-
- clk_disable_unprepare(i2s->clk);
- clk_put(i2s->clk);
-
- iounmap(i2s->addr);
+ spin_unlock(i2s->lock);
+ }
}
- i2s->clk = NULL;
-
return 0;
}
@@ -1124,15 +1107,14 @@ static const struct of_device_id exynos_i2s_match[];
static inline const struct samsung_i2s_dai_data *samsung_i2s_get_driver_data(
struct platform_device *pdev)
{
-#ifdef CONFIG_OF
- if (pdev->dev.of_node) {
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(exynos_i2s_match, pdev->dev.of_node);
- return match->data;
- } else
-#endif
+ return match ? match->data : NULL;
+ } else {
return (struct samsung_i2s_dai_data *)
platform_get_device_id(pdev)->driver_data;
+ }
}
#ifdef CONFIG_PM
@@ -1155,6 +1137,87 @@ static int i2s_runtime_resume(struct device *dev)
}
#endif /* CONFIG_PM */
+static void i2s_unregister_clocks(struct i2s_dai *i2s)
+{
+ int i;
+
+ for (i = 0; i < i2s->clk_data.clk_num; i++) {
+ if (!IS_ERR(i2s->clk_table[i]))
+ clk_unregister(i2s->clk_table[i]);
+ }
+}
+
+static void i2s_unregister_clock_provider(struct platform_device *pdev)
+{
+ struct i2s_dai *i2s = dev_get_drvdata(&pdev->dev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+ i2s_unregister_clocks(i2s);
+}
+
+static int i2s_register_clock_provider(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct i2s_dai *i2s = dev_get_drvdata(dev);
+ const char *clk_name[2] = { "i2s_opclk0", "i2s_opclk1" };
+ const char *p_names[2] = { NULL };
+ const struct samsung_i2s_variant_regs *reg_info = i2s->variant_regs;
+ struct clk *rclksrc;
+ int ret, i;
+
+ /* Register the clock provider only if it's expected in the DTB */
+ if (!of_find_property(dev->of_node, "#clock-cells", NULL))
+ return 0;
+
+ /* Get the RCLKSRC mux clock parent clock names */
+ for (i = 0; i < ARRAY_SIZE(p_names); i++) {
+ rclksrc = clk_get(dev, clk_name[i]);
+ if (IS_ERR(rclksrc))
+ continue;
+ p_names[i] = __clk_get_name(rclksrc);
+ clk_put(rclksrc);
+ }
+
+ if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
+ /* Activate the prescaler */
+ u32 val = readl(i2s->addr + I2SPSR);
+ writel(val | PSR_PSREN, i2s->addr + I2SPSR);
+
+ i2s->clk_table[CLK_I2S_RCLK_SRC] = clk_register_mux(NULL,
+ "i2s_rclksrc", p_names, ARRAY_SIZE(p_names),
+ CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ i2s->addr + I2SMOD, reg_info->rclksrc_off,
+ 1, 0, i2s->lock);
+
+ i2s->clk_table[CLK_I2S_RCLK_PSR] = clk_register_divider(NULL,
+ "i2s_presc", "i2s_rclksrc",
+ CLK_SET_RATE_PARENT,
+ i2s->addr + I2SPSR, 8, 6, 0, i2s->lock);
+
+ p_names[0] = "i2s_presc";
+ i2s->clk_data.clk_num = 2;
+ }
+ of_property_read_string_index(dev->of_node,
+ "clock-output-names", 0, &clk_name[0]);
+
+ i2s->clk_table[CLK_I2S_CDCLK] = clk_register_gate(NULL, clk_name[0],
+ p_names[0], CLK_SET_RATE_PARENT,
+ i2s->addr + I2SMOD, reg_info->cdclkcon_off,
+ CLK_GATE_SET_TO_DISABLE, i2s->lock);
+
+ i2s->clk_data.clk_num += 1;
+ i2s->clk_data.clks = i2s->clk_table;
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
+ &i2s->clk_data);
+ if (ret < 0) {
+ dev_err(dev, "failed to add clock provider: %d\n", ret);
+ i2s_unregister_clocks(i2s);
+ }
+
+ return ret;
+}
+
static int samsung_i2s_probe(struct platform_device *pdev)
{
struct i2s_dai *pri_dai, *sec_dai = NULL;
@@ -1164,7 +1227,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
u32 regs_base, quirks = 0, idma_addr = 0;
struct device_node *np = pdev->dev.of_node;
const struct samsung_i2s_dai_data *i2s_dai_data;
- int ret = 0;
+ int ret;
/* Call during Seconday interface registration */
i2s_dai_data = samsung_i2s_get_driver_data(pdev);
@@ -1175,11 +1238,13 @@ static int samsung_i2s_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unable to get drvdata\n");
return -EFAULT;
}
- devm_snd_soc_register_component(&sec_dai->pdev->dev,
+ ret = devm_snd_soc_register_component(&sec_dai->pdev->dev,
&samsung_i2s_component,
&sec_dai->i2s_dai_drv, 1);
- samsung_asoc_dma_platform_register(&pdev->dev);
- return 0;
+ if (ret != 0)
+ return ret;
+
+ return samsung_asoc_dma_platform_register(&pdev->dev);
}
pri_dai = i2s_alloc_dai(pdev, false);
@@ -1188,6 +1253,9 @@ static int samsung_i2s_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ spin_lock_init(&pri_dai->spinlock);
+ pri_dai->lock = &pri_dai->spinlock;
+
if (!np) {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!res) {
@@ -1229,25 +1297,29 @@ static int samsung_i2s_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
- return -ENXIO;
- }
+ pri_dai->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pri_dai->addr))
+ return PTR_ERR(pri_dai->addr);
- if (!request_mem_region(res->start, resource_size(res),
- "samsung-i2s")) {
- dev_err(&pdev->dev, "Unable to request SFR region\n");
- return -EBUSY;
- }
regs_base = res->start;
+ pri_dai->clk = devm_clk_get(&pdev->dev, "iis");
+ if (IS_ERR(pri_dai->clk)) {
+ dev_err(&pdev->dev, "Failed to get iis clock\n");
+ return PTR_ERR(pri_dai->clk);
+ }
+
+ ret = clk_prepare_enable(pri_dai->clk);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
pri_dai->dma_playback.dma_addr = regs_base + I2STXD;
pri_dai->dma_capture.dma_addr = regs_base + I2SRXD;
pri_dai->dma_playback.ch_name = "tx";
pri_dai->dma_capture.ch_name = "rx";
pri_dai->dma_playback.dma_size = 4;
pri_dai->dma_capture.dma_size = 4;
- pri_dai->base = regs_base;
pri_dai->quirks = quirks;
pri_dai->variant_regs = i2s_dai_data->i2s_variant_regs;
@@ -1258,10 +1330,10 @@ static int samsung_i2s_probe(struct platform_device *pdev)
sec_dai = i2s_alloc_dai(pdev, true);
if (!sec_dai) {
dev_err(&pdev->dev, "Unable to alloc I2S_sec\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
+ sec_dai->lock = &pri_dai->spinlock;
sec_dai->variant_regs = pri_dai->variant_regs;
sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
sec_dai->dma_playback.ch_name = "tx-sec";
@@ -1273,7 +1345,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
}
sec_dai->dma_playback.dma_size = 4;
- sec_dai->base = regs_base;
+ sec_dai->addr = pri_dai->addr;
+ sec_dai->clk = pri_dai->clk;
sec_dai->quirks = quirks;
sec_dai->idma_playback.dma_addr = idma_addr;
sec_dai->pri_dai = pri_dai;
@@ -1282,8 +1355,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
dev_err(&pdev->dev, "Unable to configure gpio\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
devm_snd_soc_register_component(&pri_dai->pdev->dev,
@@ -1292,32 +1364,30 @@ static int samsung_i2s_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- samsung_asoc_dma_platform_register(&pdev->dev);
-
- return 0;
-err:
- if (res)
- release_mem_region(regs_base, resource_size(res));
+ ret = samsung_asoc_dma_platform_register(&pdev->dev);
+ if (ret != 0)
+ return ret;
- return ret;
+ return i2s_register_clock_provider(pdev);
}
static int samsung_i2s_remove(struct platform_device *pdev)
{
struct i2s_dai *i2s, *other;
- struct resource *res;
i2s = dev_get_drvdata(&pdev->dev);
- other = i2s->pri_dai ? : i2s->sec_dai;
+ other = get_other_dai(i2s);
if (other) {
other->pri_dai = NULL;
other->sec_dai = NULL;
} else {
pm_runtime_disable(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
+ }
+
+ if (!is_secondary(i2s)) {
+ i2s_unregister_clock_provider(pdev);
+ clk_disable_unprepare(i2s->clk);
}
i2s->pri_dai = NULL;
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index b5f6abd9d221..7fcb51faa2a0 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -61,20 +61,6 @@ static int jive_hw_params(struct snd_pcm_substream *substream,
s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params),
s3c_i2sv2_get_clock(cpu_dai));
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- /* set cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
SND_SOC_CLOCK_IN);
@@ -97,22 +83,6 @@ static struct snd_soc_ops jive_ops = {
.hw_params = jive_hw_params,
};
-static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- /* These endpoints are not being used. */
- snd_soc_dapm_nc_pin(dapm, "LINPUT2");
- snd_soc_dapm_nc_pin(dapm, "RINPUT2");
- snd_soc_dapm_nc_pin(dapm, "LINPUT3");
- snd_soc_dapm_nc_pin(dapm, "RINPUT3");
- snd_soc_dapm_nc_pin(dapm, "OUT3");
- snd_soc_dapm_nc_pin(dapm, "MONO");
-
- return 0;
-}
-
static struct snd_soc_dai_link jive_dai = {
.name = "wm8750",
.stream_name = "WM8750",
@@ -120,7 +90,8 @@ static struct snd_soc_dai_link jive_dai = {
.codec_dai_name = "wm8750-hifi",
.platform_name = "s3c2412-i2s",
.codec_name = "wm8750.0-001a",
- .init = jive_wm8750_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &jive_ops,
};
@@ -135,6 +106,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
.num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
+ .fully_routed = true,
};
static struct platform_device *jive_snd_device;
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index 9b4a09f14b6c..65602b935377 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -70,20 +70,6 @@ static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream,
break;
}
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai,
- SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
- /* set cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai,
- SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
SND_SOC_CLOCK_IN);
@@ -151,13 +137,6 @@ static int neo1973_voice_hw_params(struct snd_pcm_substream *substream,
pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
- /* todo: gg check mode (DSP_B) against CSR datasheet */
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK, 12288000,
SND_SOC_CLOCK_IN);
@@ -300,6 +279,8 @@ static struct snd_soc_dai_link neo1973_dai[] = {
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "wm8753-hifi",
.codec_name = "wm8753.0-001a",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
.init = neo1973_wm8753_init,
.ops = &neo1973_hifi_ops,
},
@@ -309,6 +290,8 @@ static struct snd_soc_dai_link neo1973_dai[] = {
.cpu_dai_name = "bt-sco-pcm",
.codec_dai_name = "wm8753-voice",
.codec_name = "wm8753.0-001a",
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &neo1973_voice_ops,
},
};
diff --git a/sound/soc/samsung/odroidx2_max98090.c b/sound/soc/samsung/odroidx2_max98090.c
index fa4f1d2f69bf..596f1180a369 100644
--- a/sound/soc/samsung/odroidx2_max98090.c
+++ b/sound/soc/samsung/odroidx2_max98090.c
@@ -21,6 +21,8 @@ struct odroidx2_drv_data {
/* The I2S CDCLK output clock frequency for the MAX98090 codec */
#define MAX98090_MCLK 19200000
+static struct snd_soc_dai_link odroidx2_dai[];
+
static int odroidx2_late_probe(struct snd_soc_card *card)
{
struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
@@ -29,7 +31,9 @@ static int odroidx2_late_probe(struct snd_soc_card *card)
ret = snd_soc_dai_set_sysclk(codec_dai, 0, MAX98090_MCLK,
SND_SOC_CLOCK_IN);
- if (ret < 0)
+
+ if (ret < 0 || of_find_property(odroidx2_dai[0].codec_of_node,
+ "clocks", NULL))
return ret;
/* Set the cpu DAI configuration in order to use CDCLK */
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
index 37688ebbb2b4..873f2cb4bebe 100644
--- a/sound/soc/samsung/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -89,6 +89,8 @@ static struct snd_soc_dai_link rx1950_uda1380_dai[] = {
.init = rx1950_uda1380_init,
.platform_name = "s3c24xx-iis",
.codec_name = "uda1380-codec.0-001a",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &rx1950_ops,
},
};
@@ -154,7 +156,6 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
int div;
int ret;
unsigned int rate = params_rate(params);
@@ -181,18 +182,6 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- /* set cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
/* select clock source */
ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source, rate,
SND_SOC_CLOCK_OUT);
diff --git a/sound/soc/samsung/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c
index 2c015f62ead6..dcc008d1e1ab 100644
--- a/sound/soc/samsung/s3c24xx_simtec.c
+++ b/sound/soc/samsung/s3c24xx_simtec.c
@@ -169,24 +169,6 @@ static int simtec_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret;
- /* Set the CODEC as the bus clock master, I2S */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
- if (ret) {
- pr_err("%s: failed set cpu dai format\n", __func__);
- return ret;
- }
-
- /* Set the CODEC as the bus clock master */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
- if (ret) {
- pr_err("%s: failed set codec dai format\n", __func__);
- return ret;
- }
-
ret = snd_soc_dai_set_sysclk(codec_dai, 0,
CODEC_CLOCK, SND_SOC_CLOCK_IN);
if (ret) {
@@ -320,6 +302,8 @@ int simtec_audio_core_probe(struct platform_device *pdev,
int ret;
card->dai_link->ops = &simtec_snd_ops;
+ card->dai_link->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM;
pdata = pdev->dev.platform_data;
if (!pdata) {
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 9c6f7db56f60..50849e137fc0 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -173,16 +173,6 @@ static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source , clk,
SND_SOC_CLOCK_IN);
if (ret < 0)
@@ -223,6 +213,8 @@ static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = {
.codec_name = "uda134x-codec",
.codec_dai_name = "uda134x-hifi",
.cpu_dai_name = "s3c24xx-iis",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &s3c24xx_uda134x_ops,
.platform_name = "s3c24xx-iis",
};
diff --git a/sound/soc/samsung/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
index 9b0ffacab790..8291d2a5f152 100644
--- a/sound/soc/samsung/smartq_wm8987.c
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -56,20 +56,6 @@ static int smartq_hifi_hw_params(struct snd_pcm_substream *substream,
break;
}
- /* set codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- /* set cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
/* Use PCLK for I2S signal generation */
ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
0, SND_SOC_CLOCK_IN);
@@ -199,6 +185,8 @@ static struct snd_soc_dai_link smartq_dai[] = {
.platform_name = "samsung-i2s.0",
.codec_name = "wm8750.0-0x1a",
.init = smartq_wm8987_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &smartq_hifi_ops,
},
};
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index b1a519f83b29..548bfd993788 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -32,7 +32,6 @@ static int smdk_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
unsigned int pll_out;
int bfs, rfs, ret;
@@ -77,20 +76,6 @@ static int smdk_hw_params(struct snd_pcm_substream *substream,
}
pll_out = params_rate(params) * rfs;
- /* Set the Codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
- | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
- /* Set the AP DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
- | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
/* Set WM8580 to drive MCLK from its PLLA */
ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
WM8580_CLKSRC_PLLA);
@@ -151,13 +136,10 @@ static const struct snd_soc_dapm_route smdk_wm8580_audio_map[] = {
static int smdk_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
/* Enabling the microphone requires the fitting of a 0R
* resistor to connect the line from the microphone jack.
*/
- snd_soc_dapm_disable_pin(dapm, "MicIn");
+ snd_soc_dapm_disable_pin(&rtd->card->dapm, "MicIn");
return 0;
}
@@ -168,6 +150,9 @@ enum {
SEC_PLAYBACK,
};
+#define SMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \
+ SND_SOC_DAIFMT_CBM_CFM)
+
static struct snd_soc_dai_link smdk_dai[] = {
[PRI_PLAYBACK] = { /* Primary Playback i/f */
.name = "WM8580 PAIF RX",
@@ -176,6 +161,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.codec_dai_name = "wm8580-hifi-playback",
.platform_name = "samsung-i2s.0",
.codec_name = "wm8580.0-001b",
+ .dai_fmt = SMDK_DAI_FMT,
.ops = &smdk_ops,
},
[PRI_CAPTURE] = { /* Primary Capture i/f */
@@ -185,6 +171,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.codec_dai_name = "wm8580-hifi-capture",
.platform_name = "samsung-i2s.0",
.codec_name = "wm8580.0-001b",
+ .dai_fmt = SMDK_DAI_FMT,
.init = smdk_wm8580_init_paiftx,
.ops = &smdk_ops,
},
@@ -195,6 +182,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.codec_dai_name = "wm8580-hifi-playback",
.platform_name = "samsung-i2s-sec",
.codec_name = "wm8580.0-001b",
+ .dai_fmt = SMDK_DAI_FMT,
.ops = &smdk_ops,
},
};
diff --git a/sound/soc/samsung/smdk_wm8580pcm.c b/sound/soc/samsung/smdk_wm8580pcm.c
index 05c609c62de9..6deec5234c92 100644
--- a/sound/soc/samsung/smdk_wm8580pcm.c
+++ b/sound/soc/samsung/smdk_wm8580pcm.c
@@ -62,20 +62,6 @@ static int smdk_wm8580_pcm_hw_params(struct snd_pcm_substream *substream,
rfs = mclk_freq / params_rate(params) / 2;
- /* Set the codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B
- | SND_SOC_DAIFMT_IB_NF
- | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- /* Set the cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_B
- | SND_SOC_DAIFMT_IB_NF
- | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
if (mclk_freq == xtal_freq) {
ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_MCLK,
mclk_freq, SND_SOC_CLOCK_IN);
@@ -121,6 +107,9 @@ static struct snd_soc_ops smdk_wm8580_pcm_ops = {
.hw_params = smdk_wm8580_pcm_hw_params,
};
+#define SMDK_DAI_FMT (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF | \
+ SND_SOC_DAIFMT_CBS_CFS)
+
static struct snd_soc_dai_link smdk_dai[] = {
{
.name = "WM8580 PAIF PCM RX",
@@ -129,6 +118,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.codec_dai_name = "wm8580-hifi-playback",
.platform_name = "samsung-audio",
.codec_name = "wm8580.0-001b",
+ .dai_fmt = SMDK_DAI_FMT,
.ops = &smdk_wm8580_pcm_ops,
}, {
.name = "WM8580 PAIF PCM TX",
@@ -137,6 +127,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.codec_dai_name = "wm8580-hifi-capture",
.platform_name = "samsung-pcm.0",
.codec_name = "wm8580.0-001b",
+ .dai_fmt = SMDK_DAI_FMT,
.ops = &smdk_wm8580_pcm_ops,
},
};
diff --git a/sound/soc/samsung/smdk_wm8994pcm.c b/sound/soc/samsung/smdk_wm8994pcm.c
index c470e8eed6e1..b1c89ec2d999 100644
--- a/sound/soc/samsung/smdk_wm8994pcm.c
+++ b/sound/soc/samsung/smdk_wm8994pcm.c
@@ -68,20 +68,6 @@ static int smdk_wm8994_pcm_hw_params(struct snd_pcm_substream *substream,
mclk_freq = params_rate(params) * rfs;
- /* Set the codec DAI configuration */
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B
- | SND_SOC_DAIFMT_IB_NF
- | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- /* Set the cpu DAI configuration */
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_B
- | SND_SOC_DAIFMT_IB_NF
- | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
mclk_freq, SND_SOC_CLOCK_IN);
if (ret < 0)
@@ -118,6 +104,8 @@ static struct snd_soc_dai_link smdk_dai[] = {
.codec_dai_name = "wm8994-aif1",
.platform_name = "samsung-pcm.0",
.codec_name = "wm8994-codec",
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &smdk_wm8994_pcm_ops,
},
};
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index a5b2c4ea90d9..fd11404a3bc7 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -305,11 +305,6 @@ static struct snd_pcm_ops camelot_pcm_ops = {
.pointer = camelot_pos,
};
-static void camelot_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
@@ -328,7 +323,6 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
static struct snd_soc_platform_driver sh7760_soc_platform = {
.ops = &camelot_pcm_ops,
.pcm_new = camelot_pcm_new,
- .pcm_free = camelot_pcm_free,
};
static int sh7760_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 8869971d7884..b87b22e88e43 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -820,12 +820,9 @@ static int fsi_clk_enable(struct device *dev,
return ret;
}
- if (clock->xck)
- clk_enable(clock->xck);
- if (clock->ick)
- clk_enable(clock->ick);
- if (clock->div)
- clk_enable(clock->div);
+ clk_enable(clock->xck);
+ clk_enable(clock->ick);
+ clk_enable(clock->div);
clock->count++;
}
@@ -1765,11 +1762,6 @@ static struct snd_pcm_ops fsi_pcm_ops = {
#define PREALLOC_BUFFER (32 * 1024)
#define PREALLOC_BUFFER_MAX (32 * 1024)
-static void fsi_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int fsi_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
return snd_pcm_lib_preallocate_pages_for_all(
@@ -1821,7 +1813,6 @@ static struct snd_soc_dai_driver fsi_soc_dai[] = {
static struct snd_soc_platform_driver fsi_soc_platform = {
.ops = &fsi_pcm_ops,
.pcm_new = fsi_pcm_new,
- .pcm_free = fsi_pcm_free,
};
static const struct snd_soc_component_driver fsi_soc_component = {
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index c58c2529f103..82f582344fe7 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -63,16 +63,6 @@ static int migor_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_NB_IF |
- SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai, SND_SOC_DAIFMT_NB_IF |
- SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
codec_freq = rate * 512;
/*
* This propagates the parent frequency change to children and
@@ -144,6 +134,8 @@ static struct snd_soc_dai_link migor_dai = {
.codec_dai_name = "wm8978-hifi",
.platform_name = "siu-pcm-audio",
.codec_name = "wm8978.0-001a",
+ .dai_fmt = SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_CBS_CFS,
.ops = &migor_dai_ops,
};
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 14d1a7193469..7ac35c9d1cb8 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -57,8 +57,7 @@ static u32 rsnd_adg_ssi_ws_timing_gen2(struct rsnd_dai_stream *io)
return (0x6 + ws) << 8;
}
-int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_dai *rdai,
- struct rsnd_mod *mod,
+int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
int id = rsnd_mod_id(mod);
@@ -75,12 +74,11 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_dai *rdai,
return 0;
}
-static int rsnd_adg_set_src_timsel_gen2(struct rsnd_dai *rdai,
- struct rsnd_mod *mod,
+static int rsnd_adg_set_src_timsel_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
u32 timsel)
{
- int is_play = rsnd_dai_is_play(rdai, io);
+ int is_play = rsnd_io_is_play(io);
int id = rsnd_mod_id(mod);
int shift = (id % 2) ? 16 : 0;
u32 mask, ws;
@@ -122,7 +120,6 @@ static int rsnd_adg_set_src_timsel_gen2(struct rsnd_dai *rdai,
}
int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct rsnd_dai_stream *io,
unsigned int src_rate,
unsigned int dst_rate)
@@ -178,7 +175,7 @@ int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod,
return -EIO;
}
- ret = rsnd_adg_set_src_timsel_gen2(rdai, mod, io, val);
+ ret = rsnd_adg_set_src_timsel_gen2(mod, io, val);
if (ret < 0) {
dev_err(dev, "timsel error\n");
return ret;
@@ -190,12 +187,11 @@ int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod,
}
int rsnd_adg_set_convert_timing_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct rsnd_dai_stream *io)
{
u32 val = rsnd_adg_ssi_ws_timing_gen2(io);
- return rsnd_adg_set_src_timsel_gen2(rdai, mod, io, val);
+ return rsnd_adg_set_src_timsel_gen2(mod, io, val);
}
int rsnd_adg_set_convert_clk_gen1(struct rsnd_priv *priv,
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 75308bbc2ce8..1b53605f7154 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -149,16 +149,16 @@ char *rsnd_mod_dma_name(struct rsnd_mod *mod)
return mod->ops->dma_name(mod);
}
-void rsnd_mod_init(struct rsnd_priv *priv,
- struct rsnd_mod *mod,
+void rsnd_mod_init(struct rsnd_mod *mod,
struct rsnd_mod_ops *ops,
+ struct clk *clk,
enum rsnd_mod_type type,
int id)
{
- mod->priv = priv;
mod->id = id;
mod->ops = ops;
mod->type = type;
+ mod->clk = clk;
}
/*
@@ -412,7 +412,7 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod)
/*
* rsnd_dai functions
*/
-#define __rsnd_mod_call(mod, func, rdai...) \
+#define __rsnd_mod_call(mod, func, param...) \
({ \
struct rsnd_priv *priv = rsnd_mod_to_priv(mod); \
struct device *dev = rsnd_priv_to_dev(priv); \
@@ -422,18 +422,18 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod)
if ((mod->status & mask) == call) { \
dev_dbg(dev, "%s[%d] %s\n", \
rsnd_mod_name(mod), rsnd_mod_id(mod), #func); \
- ret = (mod)->ops->func(mod, rdai); \
+ ret = (mod)->ops->func(mod, param); \
mod->status = (mod->status & ~mask) | (~call & mask); \
} \
ret; \
})
-#define rsnd_mod_call(mod, func, rdai...) \
+#define rsnd_mod_call(mod, func, param...) \
(!(mod) ? -ENODEV : \
!((mod)->ops->func) ? 0 : \
- __rsnd_mod_call(mod, func, rdai))
+ __rsnd_mod_call(mod, func, param))
-#define rsnd_dai_call(fn, io, rdai...) \
+#define rsnd_dai_call(fn, io, param...) \
({ \
struct rsnd_mod *mod; \
int ret = 0, i; \
@@ -441,7 +441,7 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod)
mod = (io)->mod[i]; \
if (!mod) \
continue; \
- ret = rsnd_mod_call(mod, fn, rdai); \
+ ret = rsnd_mod_call(mod, fn, param); \
if (ret < 0) \
break; \
} \
@@ -477,17 +477,7 @@ static void rsnd_dai_disconnect(struct rsnd_mod *mod,
io->mod[mod->type] = NULL;
}
-int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai)
-{
- int id = rdai - priv->rdai;
-
- if ((id < 0) || (id >= rsnd_rdai_nr(priv)))
- return -EINVAL;
-
- return id;
-}
-
-struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id)
+struct rsnd_dai *rsnd_rdai_get(struct rsnd_priv *priv, int id)
{
if ((id < 0) || (id >= rsnd_rdai_nr(priv)))
return NULL;
@@ -499,12 +489,7 @@ static struct rsnd_dai *rsnd_dai_to_rdai(struct snd_soc_dai *dai)
{
struct rsnd_priv *priv = snd_soc_dai_get_drvdata(dai);
- return rsnd_dai_get(priv, dai->id);
-}
-
-int rsnd_dai_is_play(struct rsnd_dai *rdai, struct rsnd_dai_stream *io)
-{
- return &rdai->playback == io;
+ return rsnd_rdai_get(priv, dai->id);
}
/*
@@ -598,20 +583,20 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
if (ret < 0)
goto dai_trigger_end;
- ret = rsnd_dai_call(init, io, rdai);
+ ret = rsnd_dai_call(init, io, priv);
if (ret < 0)
goto dai_trigger_end;
- ret = rsnd_dai_call(start, io, rdai);
+ ret = rsnd_dai_call(start, io, priv);
if (ret < 0)
goto dai_trigger_end;
break;
case SNDRV_PCM_TRIGGER_STOP:
- ret = rsnd_dai_call(stop, io, rdai);
+ ret = rsnd_dai_call(stop, io, priv);
if (ret < 0)
goto dai_trigger_end;
- ret = rsnd_dai_call(quit, io, rdai);
+ ret = rsnd_dai_call(quit, io, priv);
if (ret < 0)
goto dai_trigger_end;
@@ -873,15 +858,15 @@ static int rsnd_dai_probe(struct platform_device *pdev,
priv->rdai = rdai;
for (i = 0; i < dai_nr; i++) {
- rdai[i].info = &info->dai_info[i];
- pmod = rdai[i].info->playback.ssi;
- cmod = rdai[i].info->capture.ssi;
+ pmod = info->dai_info[i].playback.ssi;
+ cmod = info->dai_info[i].capture.ssi;
/*
* init rsnd_dai
*/
snprintf(rdai[i].name, RSND_DAI_NAME_SIZE, "rsnd-dai.%d", i);
+ rdai[i].priv = priv;
/*
* init snd_soc_dai_driver
@@ -895,6 +880,7 @@ static int rsnd_dai_probe(struct platform_device *pdev,
drv[i].playback.channels_max = 2;
rdai[i].playback.info = &info->dai_info[i].playback;
+ rdai[i].playback.rdai = rdai + i;
rsnd_path_init(priv, &rdai[i], &rdai[i].playback);
}
if (cmod) {
@@ -904,6 +890,7 @@ static int rsnd_dai_probe(struct platform_device *pdev,
drv[i].capture.channels_max = 2;
rdai[i].capture.info = &info->dai_info[i].capture;
+ rdai[i].capture.rdai = rdai + i;
rsnd_path_init(priv, &rdai[i], &rdai[i].capture);
}
@@ -1037,7 +1024,6 @@ static int rsnd_kctrl_put(struct snd_kcontrol *kctrl,
}
static int __rsnd_kctrl_new(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
struct rsnd_kctrl_cfg *cfg,
@@ -1060,16 +1046,24 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
return -ENOMEM;
ret = snd_ctl_add(card, kctrl);
- if (ret < 0)
+ if (ret < 0) {
+ snd_ctl_free_one(kctrl);
return ret;
+ }
cfg->update = update;
+ cfg->card = card;
+ cfg->kctrl = kctrl;
return 0;
}
+void _rsnd_kctrl_remove(struct rsnd_kctrl_cfg *cfg)
+{
+ snd_ctl_remove(cfg->card, cfg->kctrl);
+}
+
int rsnd_kctrl_new_m(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
void (*update)(struct rsnd_mod *mod),
@@ -1079,11 +1073,10 @@ int rsnd_kctrl_new_m(struct rsnd_mod *mod,
_cfg->cfg.max = max;
_cfg->cfg.size = RSND_DVC_CHANNELS;
_cfg->cfg.val = _cfg->val;
- return __rsnd_kctrl_new(mod, rdai, rtd, name, &_cfg->cfg, update);
+ return __rsnd_kctrl_new(mod, rtd, name, &_cfg->cfg, update);
}
int rsnd_kctrl_new_s(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
void (*update)(struct rsnd_mod *mod),
@@ -1093,11 +1086,10 @@ int rsnd_kctrl_new_s(struct rsnd_mod *mod,
_cfg->cfg.max = max;
_cfg->cfg.size = 1;
_cfg->cfg.val = &_cfg->val;
- return __rsnd_kctrl_new(mod, rdai, rtd, name, &_cfg->cfg, update);
+ return __rsnd_kctrl_new(mod, rtd, name, &_cfg->cfg, update);
}
int rsnd_kctrl_new_e(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
struct rsnd_kctrl_cfg_s *_cfg,
@@ -1109,7 +1101,7 @@ int rsnd_kctrl_new_e(struct rsnd_mod *mod,
_cfg->cfg.size = 1;
_cfg->cfg.val = &_cfg->val;
_cfg->cfg.texts = texts;
- return __rsnd_kctrl_new(mod, rdai, rtd, name, &_cfg->cfg, update);
+ return __rsnd_kctrl_new(mod, rtd, name, &_cfg->cfg, update);
}
/*
@@ -1125,11 +1117,11 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
int ret;
- ret = rsnd_dai_call(pcm_new, &rdai->playback, rdai, rtd);
+ ret = rsnd_dai_call(pcm_new, &rdai->playback, rtd);
if (ret)
return ret;
- ret = rsnd_dai_call(pcm_new, &rdai->capture, rdai, rtd);
+ ret = rsnd_dai_call(pcm_new, &rdai->capture, rtd);
if (ret)
return ret;
@@ -1140,15 +1132,9 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
}
-static void rsnd_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static struct snd_soc_platform_driver rsnd_soc_platform = {
.ops = &rsnd_pcm_ops,
.pcm_new = rsnd_pcm_new,
- .pcm_free = rsnd_pcm_free,
};
static const struct snd_soc_component_driver rsnd_soc_component = {
@@ -1156,13 +1142,11 @@ static const struct snd_soc_component_driver rsnd_soc_component = {
};
static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
- struct rsnd_dai *rdai,
- int is_play)
+ struct rsnd_dai_stream *io)
{
- struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
int ret;
- ret = rsnd_dai_call(probe, io, rdai);
+ ret = rsnd_dai_call(probe, io, priv);
if (ret == -EAGAIN) {
/*
* Fallback to PIO mode
@@ -1175,7 +1159,7 @@ static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
* rsnd_dma_init()
* rsnd_ssi_fallback()
*/
- rsnd_dai_call(remove, io, rdai);
+ rsnd_dai_call(remove, io, priv);
/*
* remove SRC/DVC from DAI,
@@ -1186,13 +1170,13 @@ static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
/*
* fallback
*/
- rsnd_dai_call(fallback, io, rdai);
+ rsnd_dai_call(fallback, io, priv);
/*
* retry to "probe".
* DAI has SSI which is PIO mode only now.
*/
- ret = rsnd_dai_call(probe, io, rdai);
+ ret = rsnd_dai_call(probe, io, priv);
}
return ret;
@@ -1259,11 +1243,11 @@ static int rsnd_probe(struct platform_device *pdev)
}
for_each_rsnd_dai(rdai, priv, i) {
- ret = rsnd_rdai_continuance_probe(priv, rdai, 1);
+ ret = rsnd_rdai_continuance_probe(priv, &rdai->playback);
if (ret)
goto exit_snd_probe;
- ret = rsnd_rdai_continuance_probe(priv, rdai, 0);
+ ret = rsnd_rdai_continuance_probe(priv, &rdai->capture);
if (ret)
goto exit_snd_probe;
}
@@ -1295,8 +1279,8 @@ exit_snd_soc:
snd_soc_unregister_platform(dev);
exit_snd_probe:
for_each_rsnd_dai(rdai, priv, i) {
- rsnd_dai_call(remove, &rdai->playback, rdai);
- rsnd_dai_call(remove, &rdai->capture, rdai);
+ rsnd_dai_call(remove, &rdai->playback, priv);
+ rsnd_dai_call(remove, &rdai->capture, priv);
}
return ret;
@@ -1311,10 +1295,13 @@ static int rsnd_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
for_each_rsnd_dai(rdai, priv, i) {
- ret |= rsnd_dai_call(remove, &rdai->playback, rdai);
- ret |= rsnd_dai_call(remove, &rdai->capture, rdai);
+ ret |= rsnd_dai_call(remove, &rdai->playback, priv);
+ ret |= rsnd_dai_call(remove, &rdai->capture, priv);
}
+ snd_soc_unregister_component(&pdev->dev);
+ snd_soc_unregister_platform(&pdev->dev);
+
return ret;
}
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 5380a4827ba7..d7f9ed959c4e 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -17,7 +17,6 @@
struct rsnd_dvc {
struct rsnd_dvc_platform_info *info; /* rcar_snd.h */
struct rsnd_mod mod;
- struct clk *clk;
struct rsnd_kctrl_cfg_m volume;
struct rsnd_kctrl_cfg_m mute;
struct rsnd_kctrl_cfg_s ren; /* Ramp Enable */
@@ -118,9 +117,8 @@ static void rsnd_dvc_volume_update(struct rsnd_mod *mod)
}
static int rsnd_dvc_probe_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
dev_dbg(dev, "%s[%d] (Gen2) is probed\n",
@@ -129,12 +127,24 @@ static int rsnd_dvc_probe_gen2(struct rsnd_mod *mod,
return 0;
}
+static int rsnd_dvc_remove_gen2(struct rsnd_mod *mod,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_dvc *dvc = rsnd_mod_to_dvc(mod);
+
+ rsnd_kctrl_remove(dvc->volume);
+ rsnd_kctrl_remove(dvc->mute);
+ rsnd_kctrl_remove(dvc->ren);
+ rsnd_kctrl_remove(dvc->rup);
+ rsnd_kctrl_remove(dvc->rdown);
+
+ return 0;
+}
+
static int rsnd_dvc_init(struct rsnd_mod *dvc_mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_dvc *dvc = rsnd_mod_to_dvc(dvc_mod);
struct rsnd_dai_stream *io = rsnd_mod_to_io(dvc_mod);
- struct rsnd_priv *priv = rsnd_mod_to_priv(dvc_mod);
struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
struct device *dev = rsnd_priv_to_dev(priv);
int dvc_id = rsnd_mod_id(dvc_mod);
@@ -153,7 +163,7 @@ static int rsnd_dvc_init(struct rsnd_mod *dvc_mod,
return -EINVAL;
}
- clk_prepare_enable(dvc->clk);
+ rsnd_mod_hw_start(dvc_mod);
/*
* fixme
@@ -173,23 +183,21 @@ static int rsnd_dvc_init(struct rsnd_mod *dvc_mod,
rsnd_mod_write(dvc_mod, DVC_DVUIR, 0);
- rsnd_adg_set_cmd_timsel_gen2(rdai, dvc_mod, io);
+ rsnd_adg_set_cmd_timsel_gen2(dvc_mod, io);
return 0;
}
static int rsnd_dvc_quit(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_dvc *dvc = rsnd_mod_to_dvc(mod);
-
- clk_disable_unprepare(dvc->clk);
+ rsnd_mod_hw_stop(mod);
return 0;
}
static int rsnd_dvc_start(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
rsnd_mod_write(mod, CMD_CTRL, 0x10);
@@ -197,7 +205,7 @@ static int rsnd_dvc_start(struct rsnd_mod *mod,
}
static int rsnd_dvc_stop(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
rsnd_mod_write(mod, CMD_CTRL, 0);
@@ -205,16 +213,16 @@ static int rsnd_dvc_stop(struct rsnd_mod *mod,
}
static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd)
{
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
struct rsnd_dvc *dvc = rsnd_mod_to_dvc(mod);
+ int is_play = rsnd_io_is_play(io);
int ret;
/* Volume */
- ret = rsnd_kctrl_new_m(mod, rdai, rtd,
- rsnd_dai_is_play(rdai, io) ?
+ ret = rsnd_kctrl_new_m(mod, rtd,
+ is_play ?
"DVC Out Playback Volume" : "DVC In Capture Volume",
rsnd_dvc_volume_update,
&dvc->volume, 0x00800000 - 1);
@@ -222,8 +230,8 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
return ret;
/* Mute */
- ret = rsnd_kctrl_new_m(mod, rdai, rtd,
- rsnd_dai_is_play(rdai, io) ?
+ ret = rsnd_kctrl_new_m(mod, rtd,
+ is_play ?
"DVC Out Mute Switch" : "DVC In Mute Switch",
rsnd_dvc_volume_update,
&dvc->mute, 1);
@@ -231,16 +239,16 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
return ret;
/* Ramp */
- ret = rsnd_kctrl_new_s(mod, rdai, rtd,
- rsnd_dai_is_play(rdai, io) ?
+ ret = rsnd_kctrl_new_s(mod, rtd,
+ is_play ?
"DVC Out Ramp Switch" : "DVC In Ramp Switch",
rsnd_dvc_volume_update,
&dvc->ren, 1);
if (ret < 0)
return ret;
- ret = rsnd_kctrl_new_e(mod, rdai, rtd,
- rsnd_dai_is_play(rdai, io) ?
+ ret = rsnd_kctrl_new_e(mod, rtd,
+ is_play ?
"DVC Out Ramp Up Rate" : "DVC In Ramp Up Rate",
&dvc->rup,
rsnd_dvc_volume_update,
@@ -248,8 +256,8 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
if (ret < 0)
return ret;
- ret = rsnd_kctrl_new_e(mod, rdai, rtd,
- rsnd_dai_is_play(rdai, io) ?
+ ret = rsnd_kctrl_new_e(mod, rtd,
+ is_play ?
"DVC Out Ramp Down Rate" : "DVC In Ramp Down Rate",
&dvc->rdown,
rsnd_dvc_volume_update,
@@ -264,6 +272,7 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
static struct rsnd_mod_ops rsnd_dvc_ops = {
.name = DVC_NAME,
.probe = rsnd_dvc_probe_gen2,
+ .remove = rsnd_dvc_remove_gen2,
.init = rsnd_dvc_init,
.quit = rsnd_dvc_quit,
.start = rsnd_dvc_start,
@@ -356,9 +365,9 @@ int rsnd_dvc_probe(struct platform_device *pdev,
return PTR_ERR(clk);
dvc->info = &info->dvc_info[i];
- dvc->clk = clk;
- rsnd_mod_init(priv, &dvc->mod, &rsnd_dvc_ops, RSND_MOD_DVC, i);
+ rsnd_mod_init(&dvc->mod, &rsnd_dvc_ops,
+ clk, RSND_MOD_DVC, i);
dev_dbg(dev, "CMD%d probed\n", i);
}
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 87a6f2d62775..de0685f2abae 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -309,8 +309,13 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
RSND_GEN_M_REG(SRC_BUSIF_MODE, 0x0, 0x20),
RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20),
RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20),
+ RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20),
RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20),
RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20),
+ RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8),
+ RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc),
+ RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0),
+ RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1c4),
RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40),
RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40),
RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40),
@@ -403,6 +408,16 @@ static int rsnd_gen1_probe(struct platform_device *pdev,
RSND_GEN_M_REG(SRC_IFSVR, 0x220, 0x40),
RSND_GEN_M_REG(SRC_SRCCR, 0x224, 0x40),
RSND_GEN_M_REG(SRC_MNFSR, 0x228, 0x40),
+ /*
+ * ADD US
+ *
+ * SRC_STATUS
+ * SRC_INT_EN
+ * SCU_SYS_STATUS0
+ * SCU_SYS_STATUS1
+ * SCU_SYS_INT_EN0
+ * SCU_SYS_INT_EN1
+ */
};
struct rsnd_regmap_field_conf conf_adg[] = {
RSND_GEN_S_REG(BRRA, 0x00),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 5826c8abf794..e7914bd610e2 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -44,6 +44,8 @@ enum rsnd_reg {
RSND_REG_SRC_IFSCR,
RSND_REG_SRC_IFSVR,
RSND_REG_SRC_SRCCR,
+ RSND_REG_SCU_SYS_STATUS0,
+ RSND_REG_SCU_SYS_INT_EN0,
RSND_REG_CMD_ROUTE_SLCT,
RSND_REG_DVC_SWRSR,
RSND_REG_DVC_DVUIR,
@@ -94,6 +96,9 @@ enum rsnd_reg {
RSND_REG_SHARE23,
RSND_REG_SHARE24,
RSND_REG_SHARE25,
+ RSND_REG_SHARE26,
+ RSND_REG_SHARE27,
+ RSND_REG_SHARE28,
RSND_REG_MAX,
};
@@ -135,6 +140,9 @@ enum rsnd_reg {
#define RSND_REG_DVC_VRCTR RSND_REG_SHARE23
#define RSND_REG_DVC_VRPDR RSND_REG_SHARE24
#define RSND_REG_DVC_VRDBR RSND_REG_SHARE25
+#define RSND_REG_SCU_SYS_STATUS1 RSND_REG_SHARE26
+#define RSND_REG_SCU_SYS_INT_EN1 RSND_REG_SHARE27
+#define RSND_REG_SRC_INT_ENABLE0 RSND_REG_SHARE28
struct rsnd_of_data;
struct rsnd_priv;
@@ -182,9 +190,9 @@ void rsnd_dma_quit(struct rsnd_priv *priv,
* R-Car sound mod
*/
enum rsnd_mod_type {
- RSND_MOD_SRC = 0,
+ RSND_MOD_DVC = 0,
+ RSND_MOD_SRC,
RSND_MOD_SSI,
- RSND_MOD_DVC,
RSND_MOD_MAX,
};
@@ -192,32 +200,31 @@ struct rsnd_mod_ops {
char *name;
char* (*dma_name)(struct rsnd_mod *mod);
int (*probe)(struct rsnd_mod *mod,
- struct rsnd_dai *rdai);
+ struct rsnd_priv *priv);
int (*remove)(struct rsnd_mod *mod,
- struct rsnd_dai *rdai);
+ struct rsnd_priv *priv);
int (*init)(struct rsnd_mod *mod,
- struct rsnd_dai *rdai);
+ struct rsnd_priv *priv);
int (*quit)(struct rsnd_mod *mod,
- struct rsnd_dai *rdai);
+ struct rsnd_priv *priv);
int (*start)(struct rsnd_mod *mod,
- struct rsnd_dai *rdai);
+ struct rsnd_priv *priv);
int (*stop)(struct rsnd_mod *mod,
- struct rsnd_dai *rdai);
+ struct rsnd_priv *priv);
int (*pcm_new)(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd);
int (*fallback)(struct rsnd_mod *mod,
- struct rsnd_dai *rdai);
+ struct rsnd_priv *priv);
};
struct rsnd_dai_stream;
struct rsnd_mod {
int id;
enum rsnd_mod_type type;
- struct rsnd_priv *priv;
struct rsnd_mod_ops *ops;
struct rsnd_dma dma;
struct rsnd_dai_stream *io;
+ struct clk *clk;
u32 status;
};
/*
@@ -248,15 +255,17 @@ struct rsnd_mod {
#define __rsnd_mod_call_pcm_new 0
#define __rsnd_mod_call_fallback 0
-#define rsnd_mod_to_priv(mod) ((mod)->priv)
+#define rsnd_mod_to_priv(mod) (rsnd_io_to_priv(rsnd_mod_to_io(mod)))
#define rsnd_mod_to_dma(mod) (&(mod)->dma)
#define rsnd_dma_to_mod(_dma) container_of((_dma), struct rsnd_mod, dma)
#define rsnd_mod_to_io(mod) ((mod)->io)
#define rsnd_mod_id(mod) ((mod)->id)
+#define rsnd_mod_hw_start(mod) clk_prepare_enable((mod)->clk)
+#define rsnd_mod_hw_stop(mod) clk_disable_unprepare((mod)->clk)
-void rsnd_mod_init(struct rsnd_priv *priv,
- struct rsnd_mod *mod,
+void rsnd_mod_init(struct rsnd_mod *mod,
struct rsnd_mod_ops *ops,
+ struct clk *clk,
enum rsnd_mod_type type,
int id);
char *rsnd_mod_name(struct rsnd_mod *mod);
@@ -270,6 +279,7 @@ struct rsnd_dai_stream {
struct snd_pcm_substream *substream;
struct rsnd_mod *mod[RSND_MOD_MAX];
struct rsnd_dai_path_info *info; /* rcar_snd.h */
+ struct rsnd_dai *rdai;
int byte_pos;
int period_pos;
int byte_per_period;
@@ -278,12 +288,18 @@ struct rsnd_dai_stream {
#define rsnd_io_to_mod_ssi(io) ((io)->mod[RSND_MOD_SSI])
#define rsnd_io_to_mod_src(io) ((io)->mod[RSND_MOD_SRC])
#define rsnd_io_to_mod_dvc(io) ((io)->mod[RSND_MOD_DVC])
+#define rsnd_io_to_rdai(io) ((io)->rdai)
+#define rsnd_io_to_priv(io) (rsnd_rdai_to_priv(rsnd_io_to_rdai(io)))
+#define rsnd_io_is_play(io) (&rsnd_io_to_rdai(io)->playback == io)
+#define rsnd_io_to_runtime(io) ((io)->substream ? \
+ (io)->substream->runtime : NULL)
+
struct rsnd_dai {
char name[RSND_DAI_NAME_SIZE];
- struct rsnd_dai_platform_info *info; /* rcar_snd.h */
struct rsnd_dai_stream playback;
struct rsnd_dai_stream capture;
+ struct rsnd_priv *priv;
unsigned int clk_master:1;
unsigned int bit_clk_inv:1;
@@ -293,22 +309,18 @@ struct rsnd_dai {
};
#define rsnd_rdai_nr(priv) ((priv)->rdai_nr)
+#define rsnd_rdai_is_clk_master(rdai) ((rdai)->clk_master)
+#define rsnd_rdai_to_priv(rdai) ((rdai)->priv)
#define for_each_rsnd_dai(rdai, priv, i) \
for (i = 0; \
(i < rsnd_rdai_nr(priv)) && \
- ((rdai) = rsnd_dai_get(priv, i)); \
+ ((rdai) = rsnd_rdai_get(priv, i)); \
i++)
-struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id);
-int rsnd_dai_is_play(struct rsnd_dai *rdai, struct rsnd_dai_stream *io);
-int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai);
-#define rsnd_dai_get_platform_info(rdai) ((rdai)->info)
-#define rsnd_io_to_runtime(io) ((io)->substream ? \
- (io)->substream->runtime : NULL)
+struct rsnd_dai *rsnd_rdai_get(struct rsnd_priv *priv, int id);
void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int cnt);
int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional);
-#define rsnd_dai_is_clk_master(rdai) ((rdai)->clk_master)
/*
* R-Car Gen1/Gen2
@@ -339,15 +351,12 @@ int rsnd_adg_set_convert_clk_gen1(struct rsnd_priv *priv,
unsigned int src_rate,
unsigned int dst_rate);
int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct rsnd_dai_stream *io,
unsigned int src_rate,
unsigned int dst_rate);
int rsnd_adg_set_convert_timing_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct rsnd_dai_stream *io);
-int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_dai *rdai,
- struct rsnd_mod *mod,
+int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io);
/*
@@ -427,6 +436,8 @@ struct rsnd_kctrl_cfg {
u32 *val;
const char * const *texts;
void (*update)(struct rsnd_mod *mod);
+ struct snd_card *card;
+ struct snd_kcontrol *kctrl;
};
#define RSND_DVC_CHANNELS 2
@@ -440,22 +451,22 @@ struct rsnd_kctrl_cfg_s {
u32 val;
};
+void _rsnd_kctrl_remove(struct rsnd_kctrl_cfg *cfg);
+#define rsnd_kctrl_remove(_cfg) _rsnd_kctrl_remove(&((_cfg).cfg))
+
int rsnd_kctrl_new_m(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
void (*update)(struct rsnd_mod *mod),
struct rsnd_kctrl_cfg_m *_cfg,
u32 max);
int rsnd_kctrl_new_s(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
void (*update)(struct rsnd_mod *mod),
struct rsnd_kctrl_cfg_s *_cfg,
u32 max);
int rsnd_kctrl_new_e(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
struct rsnd_kctrl_cfg_s *_cfg,
@@ -474,14 +485,10 @@ unsigned int rsnd_src_get_ssi_rate(struct rsnd_priv *priv,
struct rsnd_dai_stream *io,
struct snd_pcm_runtime *runtime);
int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai,
int use_busif);
-int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai);
-int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai);
-int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai);
+int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod);
+int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod);
+int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod);
#define rsnd_src_nr(priv) ((priv)->src_nr)
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index eede3ac6eed2..81c182b4bad5 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -12,10 +12,17 @@
#define SRC_NAME "src"
+/* SRCx_STATUS */
+#define OUF_SRCO ((1 << 12) | (1 << 13))
+#define OUF_SRCI ((1 << 9) | (1 << 8))
+
+/* SCU_SYSTEM_STATUS0/1 */
+#define OUF_SRC(id) ((1 << (id + 16)) | (1 << id))
+
struct rsnd_src {
struct rsnd_src_platform_info *info; /* rcar_snd.h */
struct rsnd_mod mod;
- struct clk *clk;
+ int err;
};
#define RSND_SRC_NAME_SIZE 16
@@ -107,10 +114,10 @@ struct rsnd_src {
* Gen1/Gen2 common functions
*/
int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai,
int use_busif)
{
struct rsnd_dai_stream *io = rsnd_mod_to_io(ssi_mod);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
int ssi_id = rsnd_mod_id(ssi_mod);
@@ -140,7 +147,7 @@ int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
if (shift >= 0)
rsnd_mod_bset(ssi_mod, SSI_MODE1,
0x3 << shift,
- rsnd_dai_is_clk_master(rdai) ?
+ rsnd_rdai_is_clk_master(rdai) ?
0x2 << shift : 0x1 << shift);
}
@@ -174,8 +181,7 @@ int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
return 0;
}
-int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai)
+int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod)
{
/*
* DMA settings for SSIU
@@ -185,8 +191,7 @@ int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod,
return 0;
}
-int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai)
+int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
@@ -202,8 +207,7 @@ int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod,
return 0;
}
-int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai)
+int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
@@ -240,8 +244,7 @@ unsigned int rsnd_src_get_ssi_rate(struct rsnd_priv *priv,
return rate;
}
-static int rsnd_src_set_convert_rate(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_src_set_convert_rate(struct rsnd_mod *mod)
{
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -273,12 +276,13 @@ static int rsnd_src_set_convert_rate(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_src_init(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_src_init(struct rsnd_mod *mod)
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
- clk_prepare_enable(src->clk);
+ rsnd_mod_hw_start(mod);
+
+ src->err = 0;
/*
* Initialize the operation of the SRC internal circuits
@@ -290,11 +294,16 @@ static int rsnd_src_init(struct rsnd_mod *mod,
}
static int rsnd_src_quit(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ rsnd_mod_hw_stop(mod);
- clk_disable_unprepare(src->clk);
+ if (src->err)
+ dev_warn(dev, "%s[%d] under/over flow err = %d\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod), src->err);
return 0;
}
@@ -319,8 +328,7 @@ static int rsnd_src_stop(struct rsnd_mod *mod)
/*
* Gen1 functions
*/
-static int rsnd_src_set_route_gen1(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_src_set_route_gen1(struct rsnd_mod *mod)
{
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
struct src_route_config {
@@ -348,7 +356,7 @@ static int rsnd_src_set_route_gen1(struct rsnd_mod *mod,
/*
* SRC_ROUTE_SELECT
*/
- val = rsnd_dai_is_play(rdai, io) ? 0x1 : 0x2;
+ val = rsnd_io_is_play(io) ? 0x1 : 0x2;
val = val << routes[id].shift;
mask = routes[id].mask << routes[id].shift;
@@ -357,8 +365,7 @@ static int rsnd_src_set_route_gen1(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_src_set_convert_timing_gen1(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_src_set_convert_timing_gen1(struct rsnd_mod *mod)
{
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
@@ -416,13 +423,12 @@ static int rsnd_src_set_convert_timing_gen1(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_src_set_convert_rate_gen1(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_src_set_convert_rate_gen1(struct rsnd_mod *mod)
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
int ret;
- ret = rsnd_src_set_convert_rate(mod, rdai);
+ ret = rsnd_src_set_convert_rate(mod);
if (ret < 0)
return ret;
@@ -443,9 +449,8 @@ static int rsnd_src_set_convert_rate_gen1(struct rsnd_mod *mod,
}
static int rsnd_src_probe_gen1(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
dev_dbg(dev, "%s[%d] (Gen1) is probed\n",
@@ -455,23 +460,23 @@ static int rsnd_src_probe_gen1(struct rsnd_mod *mod,
}
static int rsnd_src_init_gen1(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
int ret;
- ret = rsnd_src_init(mod, rdai);
+ ret = rsnd_src_init(mod);
if (ret < 0)
return ret;
- ret = rsnd_src_set_route_gen1(mod, rdai);
+ ret = rsnd_src_set_route_gen1(mod);
if (ret < 0)
return ret;
- ret = rsnd_src_set_convert_rate_gen1(mod, rdai);
+ ret = rsnd_src_set_convert_rate_gen1(mod);
if (ret < 0)
return ret;
- ret = rsnd_src_set_convert_timing_gen1(mod, rdai);
+ ret = rsnd_src_set_convert_timing_gen1(mod);
if (ret < 0)
return ret;
@@ -479,7 +484,7 @@ static int rsnd_src_init_gen1(struct rsnd_mod *mod,
}
static int rsnd_src_start_gen1(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
int id = rsnd_mod_id(mod);
@@ -489,7 +494,7 @@ static int rsnd_src_start_gen1(struct rsnd_mod *mod,
}
static int rsnd_src_stop_gen1(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
int id = rsnd_mod_id(mod);
@@ -510,8 +515,111 @@ static struct rsnd_mod_ops rsnd_src_gen1_ops = {
/*
* Gen2 functions
*/
-static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+#define rsnd_src_irq_enable_gen2(mod) rsnd_src_irq_ctrol_gen2(mod, 1)
+#define rsnd_src_irq_disable_gen2(mod) rsnd_src_irq_ctrol_gen2(mod, 0)
+static void rsnd_src_irq_ctrol_gen2(struct rsnd_mod *mod, int enable)
+{
+ struct rsnd_src *src = rsnd_mod_to_src(mod);
+ u32 sys_int_val, int_val, sys_int_mask;
+ int irq = src->info->irq;
+ int id = rsnd_mod_id(mod);
+
+ sys_int_val =
+ sys_int_mask = OUF_SRC(id);
+ int_val = 0x3300;
+
+ /*
+ * IRQ is not supported on non-DT
+ * see
+ * rsnd_src_probe_gen2()
+ */
+ if ((irq <= 0) || !enable) {
+ sys_int_val = 0;
+ int_val = 0;
+ }
+
+ rsnd_mod_write(mod, SRC_INT_ENABLE0, int_val);
+ rsnd_mod_bset(mod, SCU_SYS_INT_EN0, sys_int_mask, sys_int_val);
+ rsnd_mod_bset(mod, SCU_SYS_INT_EN1, sys_int_mask, sys_int_val);
+}
+
+static void rsnd_src_error_clear_gen2(struct rsnd_mod *mod)
+{
+ u32 val = OUF_SRC(rsnd_mod_id(mod));
+
+ rsnd_mod_bset(mod, SCU_SYS_STATUS0, val, val);
+ rsnd_mod_bset(mod, SCU_SYS_STATUS1, val, val);
+}
+
+static bool rsnd_src_error_record_gen2(struct rsnd_mod *mod)
+{
+ u32 val = OUF_SRC(rsnd_mod_id(mod));
+ bool ret = false;
+
+ if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val) ||
+ (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val)) {
+ struct rsnd_src *src = rsnd_mod_to_src(mod);
+
+ src->err++;
+ ret = true;
+ }
+
+ /* clear error static */
+ rsnd_src_error_clear_gen2(mod);
+
+ return ret;
+}
+
+static int _rsnd_src_start_gen2(struct rsnd_mod *mod)
+{
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ u32 val = rsnd_io_to_mod_dvc(io) ? 0x01 : 0x11;
+
+ rsnd_mod_write(mod, SRC_CTRL, val);
+
+ rsnd_src_error_clear_gen2(mod);
+
+ rsnd_src_start(mod);
+
+ rsnd_src_irq_enable_gen2(mod);
+
+ return 0;
+}
+
+static int _rsnd_src_stop_gen2(struct rsnd_mod *mod)
+{
+ rsnd_src_irq_disable_gen2(mod);
+
+ rsnd_mod_write(mod, SRC_CTRL, 0);
+
+ rsnd_src_error_record_gen2(mod);
+
+ return rsnd_src_stop(mod);
+}
+
+static irqreturn_t rsnd_src_interrupt_gen2(int irq, void *data)
+{
+ struct rsnd_mod *mod = data;
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+
+ if (!io)
+ return IRQ_NONE;
+
+ if (rsnd_src_error_record_gen2(mod)) {
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ _rsnd_src_stop_gen2(mod);
+ _rsnd_src_start_gen2(mod);
+
+ dev_dbg(dev, "%s[%d] restart\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
@@ -535,7 +643,7 @@ static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod,
return -EINVAL;
}
- ret = rsnd_src_set_convert_rate(mod, rdai);
+ ret = rsnd_src_set_convert_rate(mod);
if (ret < 0)
return ret;
@@ -563,8 +671,7 @@ static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_src_set_convert_timing_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_src_set_convert_timing_gen2(struct rsnd_mod *mod)
{
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -573,59 +680,78 @@ static int rsnd_src_set_convert_timing_gen2(struct rsnd_mod *mod,
int ret;
if (convert_rate)
- ret = rsnd_adg_set_convert_clk_gen2(mod, rdai, io,
+ ret = rsnd_adg_set_convert_clk_gen2(mod, io,
runtime->rate,
convert_rate);
else
- ret = rsnd_adg_set_convert_timing_gen2(mod, rdai, io);
+ ret = rsnd_adg_set_convert_timing_gen2(mod, io);
return ret;
}
static int rsnd_src_probe_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct rsnd_src *src = rsnd_mod_to_src(mod);
struct device *dev = rsnd_priv_to_dev(priv);
+ int irq = src->info->irq;
int ret;
+ if (irq > 0) {
+ /*
+ * IRQ is not supported on non-DT
+ * see
+ * rsnd_src_irq_enable_gen2()
+ */
+ ret = devm_request_irq(dev, irq,
+ rsnd_src_interrupt_gen2,
+ IRQF_SHARED,
+ dev_name(dev), mod);
+ if (ret)
+ goto rsnd_src_probe_gen2_fail;
+ }
+
ret = rsnd_dma_init(priv,
rsnd_mod_to_dma(mod),
rsnd_info_is_playback(priv, src),
src->info->dma_id);
- if (ret < 0)
- dev_err(dev, "%s[%d] (Gen2) failed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
- else
- dev_dbg(dev, "%s[%d] (Gen2) is probed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
+ if (ret)
+ goto rsnd_src_probe_gen2_fail;
+
+ dev_dbg(dev, "%s[%d] (Gen2) is probed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ return ret;
+
+rsnd_src_probe_gen2_fail:
+ dev_err(dev, "%s[%d] (Gen2) failed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
return ret;
}
static int rsnd_src_remove_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- rsnd_dma_quit(rsnd_mod_to_priv(mod), rsnd_mod_to_dma(mod));
+ rsnd_dma_quit(priv, rsnd_mod_to_dma(mod));
return 0;
}
static int rsnd_src_init_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
int ret;
- ret = rsnd_src_init(mod, rdai);
+ ret = rsnd_src_init(mod);
if (ret < 0)
return ret;
- ret = rsnd_src_set_convert_rate_gen2(mod, rdai);
+ ret = rsnd_src_set_convert_rate_gen2(mod);
if (ret < 0)
return ret;
- ret = rsnd_src_set_convert_timing_gen2(mod, rdai);
+ ret = rsnd_src_set_convert_timing_gen2(mod);
if (ret < 0)
return ret;
@@ -633,29 +759,23 @@ static int rsnd_src_init_gen2(struct rsnd_mod *mod,
}
static int rsnd_src_start_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
- struct rsnd_src *src = rsnd_mod_to_src(mod);
- u32 val = rsnd_io_to_mod_dvc(io) ? 0x01 : 0x11;
-
- rsnd_dma_start(rsnd_mod_to_dma(&src->mod));
+ rsnd_dma_start(rsnd_mod_to_dma(mod));
- rsnd_mod_write(mod, SRC_CTRL, val);
-
- return rsnd_src_start(mod);
+ return _rsnd_src_start_gen2(mod);
}
static int rsnd_src_stop_gen2(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_src *src = rsnd_mod_to_src(mod);
+ int ret;
- rsnd_mod_write(mod, SRC_CTRL, 0);
+ ret = _rsnd_src_stop_gen2(mod);
- rsnd_dma_stop(rsnd_mod_to_dma(&src->mod));
+ rsnd_dma_stop(rsnd_mod_to_dma(mod));
- return rsnd_src_stop(mod);
+ return ret;
}
static struct rsnd_mod_ops rsnd_src_gen2_ops = {
@@ -681,10 +801,11 @@ static void rsnd_of_parse_src(struct platform_device *pdev,
struct rsnd_priv *priv)
{
struct device_node *src_node;
+ struct device_node *np;
struct rcar_snd_info *info = rsnd_priv_to_info(priv);
struct rsnd_src_platform_info *src_info;
struct device *dev = &pdev->dev;
- int nr;
+ int nr, i;
if (!of_data)
return;
@@ -708,6 +829,13 @@ static void rsnd_of_parse_src(struct platform_device *pdev,
info->src_info = src_info;
info->src_info_nr = nr;
+ i = 0;
+ for_each_child_of_node(src_node, np) {
+ src_info[i].irq = irq_of_parse_and_map(np, 0);
+
+ i++;
+ }
+
rsnd_of_parse_src_end:
of_node_put(src_node);
}
@@ -761,9 +889,8 @@ int rsnd_src_probe(struct platform_device *pdev,
return PTR_ERR(clk);
src->info = &info->src_info[i];
- src->clk = clk;
- rsnd_mod_init(priv, &src->mod, ops, RSND_MOD_SRC, i);
+ rsnd_mod_init(&src->mod, ops, clk, RSND_MOD_SRC, i);
dev_dbg(dev, "SRC%d probed\n", i);
}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 3844fbef4664..9e7b627c08e2 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -60,17 +60,14 @@
#define SSI_NAME "ssi"
struct rsnd_ssi {
- struct clk *clk;
struct rsnd_ssi_platform_info *info; /* rcar_snd.h */
struct rsnd_ssi *parent;
struct rsnd_mod mod;
- struct rsnd_dai *rdai;
u32 cr_own;
u32 cr_clk;
int err;
unsigned int usrcnt;
- unsigned int rate;
};
#define for_each_rsnd_ssi(pos, priv, i) \
@@ -128,7 +125,7 @@ static void rsnd_ssi_status_check(struct rsnd_mod *mod,
static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi,
struct rsnd_dai_stream *io)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+ struct rsnd_priv *priv = rsnd_io_to_priv(io);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
struct device *dev = rsnd_priv_to_dev(priv);
int i, j, ret;
@@ -157,7 +154,6 @@ static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi,
ret = rsnd_adg_ssi_clk_try_start(&ssi->mod, main_rate);
if (0 == ret) {
- ssi->rate = rate;
ssi->cr_clk = FORCE | SWL_32 |
SCKD | SWSD | CKDV(j);
@@ -176,26 +172,25 @@ static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi,
static void rsnd_ssi_master_clk_stop(struct rsnd_ssi *ssi)
{
- ssi->rate = 0;
ssi->cr_clk = 0;
rsnd_adg_ssi_clk_stop(&ssi->mod);
}
static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
- struct rsnd_dai *rdai,
struct rsnd_dai_stream *io)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+ struct rsnd_priv *priv = rsnd_io_to_priv(io);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
struct device *dev = rsnd_priv_to_dev(priv);
u32 cr_mode;
u32 cr;
if (0 == ssi->usrcnt) {
- clk_prepare_enable(ssi->clk);
+ rsnd_mod_hw_start(&ssi->mod);
- if (rsnd_dai_is_clk_master(rdai)) {
+ if (rsnd_rdai_is_clk_master(rdai)) {
if (rsnd_ssi_clk_from_parent(ssi))
- rsnd_ssi_hw_start(ssi->parent, rdai, io);
+ rsnd_ssi_hw_start(ssi->parent, io);
else
rsnd_ssi_master_clk_start(ssi, io);
}
@@ -214,7 +209,7 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
rsnd_mod_write(&ssi->mod, SSICR, cr);
/* enable WS continue */
- if (rsnd_dai_is_clk_master(rdai))
+ if (rsnd_rdai_is_clk_master(rdai))
rsnd_mod_write(&ssi->mod, SSIWSR, CONT);
/* clear error status */
@@ -226,10 +221,11 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
rsnd_mod_name(&ssi->mod), rsnd_mod_id(&ssi->mod));
}
-static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
- struct rsnd_dai *rdai)
+static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(&ssi->mod);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
struct device *dev = rsnd_priv_to_dev(priv);
u32 cr;
@@ -256,14 +252,14 @@ static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
rsnd_mod_write(&ssi->mod, SSICR, cr); /* disabled all */
rsnd_ssi_status_check(&ssi->mod, IIRQ);
- if (rsnd_dai_is_clk_master(rdai)) {
+ if (rsnd_rdai_is_clk_master(rdai)) {
if (rsnd_ssi_clk_from_parent(ssi))
- rsnd_ssi_hw_stop(ssi->parent, rdai);
+ rsnd_ssi_hw_stop(ssi->parent);
else
rsnd_ssi_master_clk_stop(ssi);
}
- clk_disable_unprepare(ssi->clk);
+ rsnd_mod_hw_stop(&ssi->mod);
}
dev_dbg(dev, "%s[%d] hw stopped\n",
@@ -274,10 +270,11 @@ static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
* SSI mod common functions
*/
static int rsnd_ssi_init(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 cr;
@@ -311,13 +308,12 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
cr |= SDTA;
if (rdai->sys_delay)
cr |= DEL;
- if (rsnd_dai_is_play(rdai, io))
+ if (rsnd_io_is_play(io))
cr |= TRMD;
/*
* set ssi parameter
*/
- ssi->rdai = rdai;
ssi->cr_own = cr;
ssi->err = -1; /* ignore 1st error */
@@ -325,16 +321,15 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
}
static int rsnd_ssi_quit(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
if (ssi->err > 0)
- dev_warn(dev, "ssi under/over flow err = %d\n", ssi->err);
+ dev_warn(dev, "%s[%d] under/over flow err = %d\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod), ssi->err);
- ssi->rdai = NULL;
ssi->cr_own = 0;
ssi->err = 0;
@@ -353,32 +348,32 @@ static void rsnd_ssi_record_error(struct rsnd_ssi *ssi, u32 status)
}
static int rsnd_ssi_start(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
- rsnd_src_ssiu_start(mod, rdai, rsnd_ssi_use_busif(mod));
+ rsnd_src_ssiu_start(mod, rsnd_ssi_use_busif(mod));
- rsnd_ssi_hw_start(ssi, rdai, io);
+ rsnd_ssi_hw_start(ssi, io);
- rsnd_src_ssi_irq_enable(mod, rdai);
+ rsnd_src_ssi_irq_enable(mod);
return 0;
}
static int rsnd_ssi_stop(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- rsnd_src_ssi_irq_disable(mod, rdai);
+ rsnd_src_ssi_irq_disable(mod);
rsnd_ssi_record_error(ssi, rsnd_mod_read(mod, SSISR));
- rsnd_ssi_hw_stop(ssi, rdai);
+ rsnd_ssi_hw_stop(ssi);
- rsnd_src_ssiu_stop(mod, rdai);
+ rsnd_src_ssiu_stop(mod);
return 0;
}
@@ -386,16 +381,17 @@ static int rsnd_ssi_stop(struct rsnd_mod *mod,
static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
{
struct rsnd_ssi *ssi = data;
- struct rsnd_dai *rdai = ssi->rdai;
struct rsnd_mod *mod = &ssi->mod;
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ int is_dma = rsnd_ssi_is_dma_mode(mod);
u32 status = rsnd_mod_read(mod, SSISR);
if (!io)
return IRQ_NONE;
/* PIO only */
- if (status & DIRQ) {
+ if (!is_dma && (status & DIRQ)) {
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 *buf = (u32 *)(runtime->dma_area +
rsnd_dai_pointer_offset(io, 0));
@@ -405,7 +401,7 @@ static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
* directly as 32bit data
* see rsnd_ssi_init()
*/
- if (rsnd_dai_is_play(rdai, io))
+ if (rsnd_io_is_play(io))
rsnd_mod_write(mod, SSITDR, *buf);
else
*buf = rsnd_mod_read(mod, SSIRDR);
@@ -415,14 +411,13 @@ static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
/* PIO / DMA */
if (status & (UIRQ | OIRQ)) {
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
/*
* restart SSI
*/
- rsnd_ssi_stop(mod, rdai);
- rsnd_ssi_start(mod, rdai);
+ rsnd_ssi_stop(mod, priv);
+ rsnd_ssi_start(mod, priv);
dev_dbg(dev, "%s[%d] restart\n",
rsnd_mod_name(mod), rsnd_mod_id(mod));
@@ -437,9 +432,8 @@ static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
* SSI PIO
*/
static int rsnd_ssi_pio_probe(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
int ret;
@@ -468,9 +462,8 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
};
static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
struct device *dev = rsnd_priv_to_dev(priv);
int dma_id = ssi->info->dma_id;
@@ -503,14 +496,13 @@ rsnd_ssi_dma_probe_fail:
}
static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
struct device *dev = rsnd_priv_to_dev(priv);
int irq = ssi->info->irq;
- rsnd_dma_quit(rsnd_mod_to_priv(mod), rsnd_mod_to_dma(mod));
+ rsnd_dma_quit(priv, rsnd_mod_to_dma(mod));
/* PIO will request IRQ again */
devm_free_irq(dev, irq, ssi);
@@ -519,9 +511,8 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
}
static int rsnd_ssi_fallback(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
/*
@@ -540,25 +531,25 @@ static int rsnd_ssi_fallback(struct rsnd_mod *mod,
}
static int rsnd_ssi_dma_start(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
- rsnd_ssi_start(mod, rdai);
-
rsnd_dma_start(dma);
+ rsnd_ssi_start(mod, priv);
+
return 0;
}
static int rsnd_ssi_dma_stop(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+ struct rsnd_priv *priv)
{
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
- rsnd_dma_stop(dma);
+ rsnd_ssi_stop(mod, priv);
- rsnd_ssi_stop(mod, rdai);
+ rsnd_dma_stop(dma);
return 0;
}
@@ -734,7 +725,6 @@ int rsnd_ssi_probe(struct platform_device *pdev,
return PTR_ERR(clk);
ssi->info = pinfo;
- ssi->clk = clk;
ops = &rsnd_ssi_non_ops;
if (pinfo->dma_id > 0)
@@ -742,7 +732,7 @@ int rsnd_ssi_probe(struct platform_device *pdev,
else if (rsnd_ssi_pio_available(ssi))
ops = &rsnd_ssi_pio_ops;
- rsnd_mod_init(priv, &ssi->mod, ops, RSND_MOD_SSI, i);
+ rsnd_mod_init(&ssi->mod, ops, clk, RSND_MOD_SSI, i);
rsnd_ssi_parent_clk_setup(priv, ssi);
}
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 32eb6da2d2bd..82902f56e82f 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -589,7 +589,6 @@ static void siu_pcm_free(struct snd_pcm *pcm)
tasklet_kill(&port_info->playback.tasklet);
siu_free_port(port_info);
- snd_pcm_lib_preallocate_free_for_all(pcm);
dev_dbg(pcm->card->dev, "%s\n", __func__);
}
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 2e10e9a38376..08d7259bbaab 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -48,15 +48,18 @@ static void soc_ac97_device_release(struct device *dev)
}
/**
- * snd_soc_new_ac97_codec - initailise AC97 device
- * @codec: audio codec
+ * snd_soc_alloc_ac97_codec() - Allocate new a AC'97 device
+ * @codec: The CODEC for which to create the AC'97 device
*
- * Initialises AC97 codec resources for use by ad-hoc devices only.
+ * Allocated a new snd_ac97 device and intializes it, but does not yet register
+ * it. The caller is responsible to either call device_add(&ac97->dev) to
+ * register the device, or to call put_device(&ac97->dev) to free the device.
+ *
+ * Returns: A snd_ac97 device or a PTR_ERR in case of an error.
*/
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec)
{
struct snd_ac97 *ac97;
- int ret;
ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
if (ac97 == NULL)
@@ -73,7 +76,28 @@ struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
codec->component.card->snd_card->number, 0,
codec->component.name);
- ret = device_register(&ac97->dev);
+ device_initialize(&ac97->dev);
+
+ return ac97;
+}
+EXPORT_SYMBOL(snd_soc_alloc_ac97_codec);
+
+/**
+ * snd_soc_new_ac97_codec - initailise AC97 device
+ * @codec: audio codec
+ *
+ * Initialises AC97 codec resources for use by ad-hoc devices only.
+ */
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+{
+ struct snd_ac97 *ac97;
+ int ret;
+
+ ac97 = snd_soc_alloc_ac97_codec(codec);
+ if (IS_ERR(ac97))
+ return ac97;
+
+ ret = device_add(&ac97->dev);
if (ret) {
put_device(&ac97->dev);
return ERR_PTR(ret);
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 590a82f01d0b..025c38fbe3c0 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
rtd->dai_link->stream_name);
ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
- 1, 0, &be_pcm);
+ rtd->dai_link->dpcm_playback,
+ rtd->dai_link->dpcm_capture, &be_pcm);
if (ret < 0) {
dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
rtd->dai_link->name);
@@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
rtd->pcm = be_pcm;
rtd->fe_compr = 1;
- be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
- be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+ if (rtd->dai_link->dpcm_playback)
+ be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+ else if (rtd->dai_link->dpcm_capture)
+ be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
} else
memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 2c62620abca6..30579ca5bacb 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -191,6 +191,39 @@ static ssize_t pmdown_time_set(struct device *dev,
static DEVICE_ATTR(pmdown_time, 0644, pmdown_time_show, pmdown_time_set);
+static struct attribute *soc_dev_attrs[] = {
+ &dev_attr_codec_reg.attr,
+ &dev_attr_pmdown_time.attr,
+ NULL
+};
+
+static umode_t soc_dev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
+
+ if (attr == &dev_attr_pmdown_time.attr)
+ return attr->mode; /* always visible */
+ return rtd->codec ? attr->mode : 0; /* enabled only with codec */
+}
+
+static const struct attribute_group soc_dapm_dev_group = {
+ .attrs = soc_dapm_dev_attrs,
+ .is_visible = soc_dev_attr_is_visible,
+};
+
+static const struct attribute_group soc_dev_roup = {
+ .attrs = soc_dev_attrs,
+ .is_visible = soc_dev_attr_is_visible,
+};
+
+static const struct attribute_group *soc_dev_attr_groups[] = {
+ &soc_dapm_dev_group,
+ &soc_dev_roup,
+ NULL
+};
+
#ifdef CONFIG_DEBUG_FS
static ssize_t codec_reg_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -949,8 +982,6 @@ static void soc_remove_link_dais(struct snd_soc_card *card, int num, int order)
/* unregister the rtd device */
if (rtd->dev_registered) {
- device_remove_file(rtd->dev, &dev_attr_pmdown_time);
- device_remove_file(rtd->dev, &dev_attr_codec_reg);
device_unregister(rtd->dev);
rtd->dev_registered = 0;
}
@@ -1120,6 +1151,7 @@ static int soc_post_component_init(struct snd_soc_pcm_runtime *rtd,
device_initialize(rtd->dev);
rtd->dev->parent = rtd->card->dev;
rtd->dev->release = rtd_release;
+ rtd->dev->groups = soc_dev_attr_groups;
dev_set_name(rtd->dev, "%s", name);
dev_set_drvdata(rtd->dev, rtd);
mutex_init(&rtd->pcm_mutex);
@@ -1136,23 +1168,6 @@ static int soc_post_component_init(struct snd_soc_pcm_runtime *rtd,
return ret;
}
rtd->dev_registered = 1;
-
- if (rtd->codec) {
- /* add DAPM sysfs entries for this codec */
- ret = snd_soc_dapm_sys_add(rtd->dev);
- if (ret < 0)
- dev_err(rtd->dev,
- "ASoC: failed to add codec dapm sysfs entries: %d\n",
- ret);
-
- /* add codec sysfs entries */
- ret = device_create_file(rtd->dev, &dev_attr_codec_reg);
- if (ret < 0)
- dev_err(rtd->dev,
- "ASoC: failed to add codec sysfs files: %d\n",
- ret);
- }
-
return 0;
}
@@ -1308,11 +1323,6 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
}
#endif
- ret = device_create_file(rtd->dev, &dev_attr_pmdown_time);
- if (ret < 0)
- dev_warn(rtd->dev, "ASoC: failed to add pmdown_time sysfs: %d\n",
- ret);
-
if (cpu_dai->driver->compress_dai) {
/*create compress_device"*/
ret = soc_new_compress(rtd, num);
@@ -1427,11 +1437,76 @@ static int snd_soc_init_codec_cache(struct snd_soc_codec *codec)
return 0;
}
+/**
+ * snd_soc_runtime_set_dai_fmt() - Change DAI link format for a ASoC runtime
+ * @rtd: The runtime for which the DAI link format should be changed
+ * @dai_fmt: The new DAI link format
+ *
+ * This function updates the DAI link format for all DAIs connected to the DAI
+ * link for the specified runtime.
+ *
+ * Note: For setups with a static format set the dai_fmt field in the
+ * corresponding snd_dai_link struct instead of using this function.
+ *
+ * Returns 0 on success, otherwise a negative error code.
+ */
+int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
+ unsigned int dai_fmt)
+{
+ struct snd_soc_dai **codec_dais = rtd->codec_dais;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ struct snd_soc_dai *codec_dai = codec_dais[i];
+
+ ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
+ if (ret != 0 && ret != -ENOTSUPP) {
+ dev_warn(codec_dai->dev,
+ "ASoC: Failed to set DAI format: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Flip the polarity for the "CPU" end of a CODEC<->CODEC link */
+ if (cpu_dai->codec) {
+ unsigned int inv_dai_fmt;
+
+ inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK;
+ switch (dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFS;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+ break;
+ }
+
+ dai_fmt = inv_dai_fmt;
+ }
+
+ ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt);
+ if (ret != 0 && ret != -ENOTSUPP) {
+ dev_warn(cpu_dai->dev,
+ "ASoC: Failed to set DAI format: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt);
+
static int snd_soc_instantiate_card(struct snd_soc_card *card)
{
struct snd_soc_codec *codec;
- struct snd_soc_dai_link *dai_link;
- int ret, i, order, dai_fmt;
+ int ret, i, order;
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
@@ -1542,60 +1617,9 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
card->num_dapm_routes);
for (i = 0; i < card->num_links; i++) {
- struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
- dai_link = &card->dai_link[i];
- dai_fmt = dai_link->dai_fmt;
-
- if (dai_fmt) {
- struct snd_soc_dai **codec_dais = rtd->codec_dais;
- int j;
-
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = codec_dais[j];
-
- ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
- if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(codec_dai->dev,
- "ASoC: Failed to set DAI format: %d\n",
- ret);
- }
- }
-
- /* If this is a regular CPU link there will be a platform */
- if (dai_fmt &&
- (dai_link->platform_name || dai_link->platform_of_node)) {
- ret = snd_soc_dai_set_fmt(card->rtd[i].cpu_dai,
- dai_fmt);
- if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(card->rtd[i].cpu_dai->dev,
- "ASoC: Failed to set DAI format: %d\n",
- ret);
- } else if (dai_fmt) {
- /* Flip the polarity for the "CPU" end */
- dai_fmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
- switch (dai_link->dai_fmt &
- SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
- break;
- case SND_SOC_DAIFMT_CBM_CFS:
- dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
- break;
- case SND_SOC_DAIFMT_CBS_CFM:
- dai_fmt |= SND_SOC_DAIFMT_CBM_CFS;
- break;
- case SND_SOC_DAIFMT_CBS_CFS:
- dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
- break;
- }
-
- ret = snd_soc_dai_set_fmt(card->rtd[i].cpu_dai,
- dai_fmt);
- if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(card->rtd[i].cpu_dai->dev,
- "ASoC: Failed to set DAI format: %d\n",
- ret);
- }
+ if (card->dai_link[i].dai_fmt)
+ snd_soc_runtime_set_dai_fmt(&card->rtd[i],
+ card->dai_link[i].dai_fmt);
}
snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
@@ -1626,9 +1650,6 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
}
- if (card->fully_routed)
- snd_soc_dapm_auto_nc_pins(card);
-
snd_soc_dapm_new_widgets(card);
ret = snd_card_register(card->snd_card);
@@ -2119,15 +2140,27 @@ static int snd_soc_xlate_tdm_slot_mask(unsigned int slots,
}
/**
- * snd_soc_dai_set_tdm_slot - configure DAI TDM.
- * @dai: DAI
+ * snd_soc_dai_set_tdm_slot() - Configures a DAI for TDM operation
+ * @dai: The DAI to configure
* @tx_mask: bitmask representing active TX slots.
* @rx_mask: bitmask representing active RX slots.
* @slots: Number of slots in use.
* @slot_width: Width in bits for each slot.
*
- * Configures a DAI for TDM operation. Both mask and slots are codec and DAI
- * specific.
+ * This function configures the specified DAI for TDM operation. @slot contains
+ * the total number of slots of the TDM stream and @slot_with the width of each
+ * slot in bit clock cycles. @tx_mask and @rx_mask are bitmasks specifying the
+ * active slots of the TDM stream for the specified DAI, i.e. which slots the
+ * DAI should write to or read from. If a bit is set the corresponding slot is
+ * active, if a bit is cleared the corresponding slot is inactive. Bit 0 maps to
+ * the first slot, bit 1 to the second slot and so on. The first active slot
+ * maps to the first channel of the DAI, the second active slot to the second
+ * channel and so on.
+ *
+ * TDM mode can be disabled by passing 0 for @slots. In this case @tx_mask,
+ * @rx_mask and @slot_width will be ignored.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
@@ -2386,8 +2419,8 @@ int snd_soc_unregister_card(struct snd_soc_card *card)
card->instantiated = false;
snd_soc_dapm_shutdown(card);
soc_cleanup_card_resources(card);
+ dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
}
- dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
return 0;
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c5136bb1f982..b6f88202b8c9 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -517,8 +517,8 @@ static int soc_dapm_update_bits(struct snd_soc_dapm_context *dapm,
{
if (!dapm->component)
return -EIO;
- return snd_soc_component_update_bits_async(dapm->component, reg,
- mask, value);
+ return snd_soc_component_update_bits(dapm->component, reg,
+ mask, value);
}
static int soc_dapm_test_bits(struct snd_soc_dapm_context *dapm,
@@ -2127,15 +2127,10 @@ static ssize_t dapm_widget_show(struct device *dev,
static DEVICE_ATTR(dapm_widget, 0444, dapm_widget_show, NULL);
-int snd_soc_dapm_sys_add(struct device *dev)
-{
- return device_create_file(dev, &dev_attr_dapm_widget);
-}
-
-static void snd_soc_dapm_sys_remove(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_dapm_widget);
-}
+struct attribute *soc_dapm_dev_attrs[] = {
+ &dev_attr_dapm_widget.attr,
+ NULL
+};
static void dapm_free_path(struct snd_soc_dapm_path *path)
{
@@ -2279,6 +2274,9 @@ static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
switch (w->id) {
case snd_soc_dapm_input:
+ /* On a fully routed card a input is never a source */
+ if (w->dapm->card->fully_routed)
+ break;
w->is_source = 1;
list_for_each_entry(p, &w->sources, list_sink) {
if (p->source->id == snd_soc_dapm_micbias ||
@@ -2291,6 +2289,9 @@ static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
}
break;
case snd_soc_dapm_output:
+ /* On a fully routed card a output is never a sink */
+ if (w->dapm->card->fully_routed)
+ break;
w->is_sink = 1;
list_for_each_entry(p, &w->sinks, list_source) {
if (p->sink->id == snd_soc_dapm_spk ||
@@ -3085,16 +3086,24 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
switch (w->id) {
case snd_soc_dapm_mic:
- case snd_soc_dapm_input:
w->is_source = 1;
w->power_check = dapm_generic_check_power;
break;
+ case snd_soc_dapm_input:
+ if (!dapm->card->fully_routed)
+ w->is_source = 1;
+ w->power_check = dapm_generic_check_power;
+ break;
case snd_soc_dapm_spk:
case snd_soc_dapm_hp:
- case snd_soc_dapm_output:
w->is_sink = 1;
w->power_check = dapm_generic_check_power;
break;
+ case snd_soc_dapm_output:
+ if (!dapm->card->fully_routed)
+ w->is_sink = 1;
+ w->power_check = dapm_generic_check_power;
+ break;
case snd_soc_dapm_vmid:
case snd_soc_dapm_siggen:
w->is_source = 1;
@@ -3130,8 +3139,6 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
}
w->dapm = dapm;
- if (dapm->component)
- w->codec = dapm->component->codec;
INIT_LIST_HEAD(&w->sources);
INIT_LIST_HEAD(&w->sinks);
INIT_LIST_HEAD(&w->list);
@@ -3809,93 +3816,6 @@ int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
/**
- * dapm_is_external_path() - Checks if a path is a external path
- * @card: The card the path belongs to
- * @path: The path to check
- *
- * Returns true if the path is either between two different DAPM contexts or
- * between two external pins of the same DAPM context. Otherwise returns
- * false.
- */
-static bool dapm_is_external_path(struct snd_soc_card *card,
- struct snd_soc_dapm_path *path)
-{
- dev_dbg(card->dev,
- "... Path %s(id:%d dapm:%p) - %s(id:%d dapm:%p)\n",
- path->source->name, path->source->id, path->source->dapm,
- path->sink->name, path->sink->id, path->sink->dapm);
-
- /* Connection between two different DAPM contexts */
- if (path->source->dapm != path->sink->dapm)
- return true;
-
- /* Loopback connection from external pin to external pin */
- if (path->sink->id == snd_soc_dapm_input) {
- switch (path->source->id) {
- case snd_soc_dapm_output:
- case snd_soc_dapm_micbias:
- return true;
- default:
- break;
- }
- }
-
- return false;
-}
-
-static bool snd_soc_dapm_widget_in_card_paths(struct snd_soc_card *card,
- struct snd_soc_dapm_widget *w)
-{
- struct snd_soc_dapm_path *p;
-
- list_for_each_entry(p, &w->sources, list_sink) {
- if (dapm_is_external_path(card, p))
- return true;
- }
-
- list_for_each_entry(p, &w->sinks, list_source) {
- if (dapm_is_external_path(card, p))
- return true;
- }
-
- return false;
-}
-
-/**
- * snd_soc_dapm_auto_nc_pins - call snd_soc_dapm_nc_pin for unused pins
- * @card: The card whose pins should be processed
- *
- * Automatically call snd_soc_dapm_nc_pin() for any external pins in the card
- * which are unused. Pins are used if they are connected externally to a
- * component, whether that be to some other device, or a loop-back connection to
- * the component itself.
- */
-void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card)
-{
- struct snd_soc_dapm_widget *w;
-
- dev_dbg(card->dev, "ASoC: Auto NC: DAPMs: card:%p\n", &card->dapm);
-
- list_for_each_entry(w, &card->widgets, list) {
- switch (w->id) {
- case snd_soc_dapm_input:
- case snd_soc_dapm_output:
- case snd_soc_dapm_micbias:
- dev_dbg(card->dev, "ASoC: Auto NC: Checking widget %s\n",
- w->name);
- if (!snd_soc_dapm_widget_in_card_paths(card, w)) {
- dev_dbg(card->dev,
- "... Not in map; disabling\n");
- snd_soc_dapm_nc_pin(w->dapm, w->name);
- }
- break;
- default:
- break;
- }
- }
-}
-
-/**
* snd_soc_dapm_free - free dapm resources
* @dapm: DAPM context
*
@@ -3903,7 +3823,6 @@ void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card)
*/
void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm)
{
- snd_soc_dapm_sys_remove(dapm->dev);
dapm_debugfs_cleanup(dapm);
dapm_free_widgets(dapm);
list_del(&dapm->list);
diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c
index 057e5ef7dcce..a57921eeee81 100644
--- a/sound/soc/soc-devres.c
+++ b/sound/soc/soc-devres.c
@@ -60,7 +60,7 @@ static void devm_platform_release(struct device *dev, void *res)
/**
* devm_snd_soc_register_platform - resource managed platform registration
* @dev: Device used to manage platform
- * @platform: platform to register
+ * @platform_drv: platform to register
*
* Register a platform driver with automatic unregistration when the device is
* unregistered.
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index b329b84bc5af..4864392bfcba 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -200,11 +200,6 @@ static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
return snd_dmaengine_pcm_open(substream, chan);
}
-static void dmaengine_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static struct dma_chan *dmaengine_pcm_compat_request_channel(
struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
@@ -283,8 +278,7 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
if (!pcm->chan[i]) {
dev_err(rtd->platform->dev,
"Missing dma channel for stream: %d\n", i);
- ret = -EINVAL;
- goto err_free;
+ return -EINVAL;
}
ret = snd_pcm_lib_preallocate_pages(substream,
@@ -293,7 +287,7 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
prealloc_buffer_size,
max_buffer_size);
if (ret)
- goto err_free;
+ return ret;
/*
* This will only return false if we know for sure that at least
@@ -307,10 +301,6 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
return 0;
-
-err_free:
- dmaengine_pcm_free(rtd->pcm);
- return ret;
}
static snd_pcm_uframes_t dmaengine_pcm_pointer(
@@ -341,7 +331,6 @@ static const struct snd_soc_platform_driver dmaengine_pcm_platform = {
},
.ops = &dmaengine_pcm_ops,
.pcm_new = dmaengine_pcm_new,
- .pcm_free = dmaengine_pcm_free,
};
static const char * const dmaengine_pcm_dma_channel_names[] = {
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index eb87d96e2cf0..6b0136e7cb88 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -301,34 +301,18 @@ static bool soc_pcm_has_symmetry(struct snd_pcm_substream *substream)
return symmetry;
}
-/*
- * List of sample sizes that might go over the bus for parameter
- * application. There ought to be a wildcard sample size for things
- * like the DAC/ADC resolution to use but there isn't right now.
- */
-static int sample_sizes[] = {
- 24, 32,
-};
-
static void soc_pcm_set_msb(struct snd_pcm_substream *substream, int bits)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- int ret, i;
+ int ret;
if (!bits)
return;
- for (i = 0; i < ARRAY_SIZE(sample_sizes); i++) {
- if (bits >= sample_sizes[i])
- continue;
-
- ret = snd_pcm_hw_constraint_msbits(substream->runtime, 0,
- sample_sizes[i], bits);
- if (ret != 0)
- dev_warn(rtd->dev,
- "ASoC: Failed to set MSB %d/%d: %d\n",
- bits, sample_sizes[i], ret);
- }
+ ret = snd_pcm_hw_constraint_msbits(substream->runtime, 0, 0, bits);
+ if (ret != 0)
+ dev_warn(rtd->dev, "ASoC: Failed to set MSB %d: %d\n",
+ bits, ret);
}
static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
@@ -746,7 +730,8 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
codec_dai);
if (ret < 0) {
dev_err(codec_dai->dev,
- "ASoC: DAI prepare error: %d\n", ret);
+ "ASoC: codec DAI prepare error: %d\n",
+ ret);
goto out;
}
}
@@ -755,8 +740,8 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
if (cpu_dai->driver->ops && cpu_dai->driver->ops->prepare) {
ret = cpu_dai->driver->ops->prepare(substream, cpu_dai);
if (ret < 0) {
- dev_err(cpu_dai->dev, "ASoC: DAI prepare error: %d\n",
- ret);
+ dev_err(cpu_dai->dev,
+ "ASoC: cpu DAI prepare error: %d\n", ret);
goto out;
}
}
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index 31198cf7f88d..a6768f832c6f 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -128,3 +128,13 @@ config SND_SOC_TEGRA_MAX98090
help
Say Y or M here if you want to add support for SoC audio on Tegra
boards using the MAX98090 codec, such as Venice2.
+
+config SND_SOC_TEGRA_RT5677
+ tristate "SoC Audio support for Tegra boards using a RT5677 codec"
+ depends on SND_SOC_TEGRA && I2C && GPIOLIB
+ select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
+ select SND_SOC_RT5677
+ help
+ Say Y or M here if you want to add support for SoC audio on Tegra
+ boards using the RT5677 codec, such as Ryu.
diff --git a/sound/soc/tegra/Makefile b/sound/soc/tegra/Makefile
index 5ae588cd96c4..9171655ad843 100644
--- a/sound/soc/tegra/Makefile
+++ b/sound/soc/tegra/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_SND_SOC_TEGRA30_I2S) += snd-soc-tegra30-i2s.o
# Tegra machine Support
snd-soc-tegra-rt5640-objs := tegra_rt5640.o
+snd-soc-tegra-rt5677-objs := tegra_rt5677.o
snd-soc-tegra-wm8753-objs := tegra_wm8753.o
snd-soc-tegra-wm8903-objs := tegra_wm8903.o
snd-soc-tegra-wm9712-objs := tegra_wm9712.o
@@ -27,6 +28,7 @@ snd-soc-tegra-alc5632-objs := tegra_alc5632.o
snd-soc-tegra-max98090-objs := tegra_max98090.o
obj-$(CONFIG_SND_SOC_TEGRA_RT5640) += snd-soc-tegra-rt5640.o
+obj-$(CONFIG_SND_SOC_TEGRA_RT5677) += snd-soc-tegra-rt5677.o
obj-$(CONFIG_SND_SOC_TEGRA_WM8753) += snd-soc-tegra-wm8753.o
obj-$(CONFIG_SND_SOC_TEGRA_WM8903) += snd-soc-tegra-wm8903.o
obj-$(CONFIG_SND_SOC_TEGRA_WM9712) += snd-soc-tegra-wm9712.o
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
new file mode 100644
index 000000000000..e4cf978a6e3a
--- /dev/null
+++ b/sound/soc/tegra/tegra_rt5677.c
@@ -0,0 +1,347 @@
+/*
+* tegra_rt5677.c - Tegra machine ASoC driver for boards using RT5677 codec.
+ *
+ * Copyright (c) 2014, The Chromium OS Authors. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (C) 2010-2012 - NVIDIA, Inc.
+ * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
+ * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../codecs/rt5677.h"
+
+#include "tegra_asoc_utils.h"
+
+#define DRV_NAME "tegra-snd-rt5677"
+
+struct tegra_rt5677 {
+ struct tegra_asoc_utils_data util_data;
+ int gpio_hp_det;
+ int gpio_hp_en;
+ int gpio_mic_present;
+ int gpio_dmic_clk_en;
+};
+
+static int tegra_rt5677_asoc_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, err;
+
+ srate = params_rate(params);
+ mclk = 256 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_sysclk(codec_dai, RT5677_SCLK_S_MCLK, mclk,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_rt5677_event_hp(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(card);
+
+ if (!gpio_is_valid(machine->gpio_hp_en))
+ return 0;
+
+ gpio_set_value_cansleep(machine->gpio_hp_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static struct snd_soc_ops tegra_rt5677_ops = {
+ .hw_params = tegra_rt5677_asoc_hw_params,
+};
+
+static struct snd_soc_jack tegra_rt5677_hp_jack;
+
+static struct snd_soc_jack_pin tegra_rt5677_hp_jack_pins = {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+};
+static struct snd_soc_jack_gpio tegra_rt5677_hp_jack_gpio = {
+ .name = "Headphone detection",
+ .report = SND_JACK_HEADPHONE,
+ .debounce_time = 150,
+};
+
+static struct snd_soc_jack tegra_rt5677_mic_jack;
+
+static struct snd_soc_jack_pin tegra_rt5677_mic_jack_pins = {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+};
+
+static struct snd_soc_jack_gpio tegra_rt5677_mic_jack_gpio = {
+ .name = "Headset Mic detection",
+ .report = SND_JACK_MICROPHONE,
+ .debounce_time = 150,
+ .invert = 1
+};
+
+static const struct snd_soc_dapm_widget tegra_rt5677_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_HP("Headphone", tegra_rt5677_event_hp),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Internal Mic 1", NULL),
+ SND_SOC_DAPM_MIC("Internal Mic 2", NULL),
+};
+
+static const struct snd_kcontrol_new tegra_rt5677_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Internal Mic 1"),
+ SOC_DAPM_PIN_SWITCH("Internal Mic 2"),
+};
+
+static int tegra_rt5677_asoc_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(rtd->card);
+
+ snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
+ &tegra_rt5677_hp_jack);
+ snd_soc_jack_add_pins(&tegra_rt5677_hp_jack, 1,
+ &tegra_rt5677_hp_jack_pins);
+
+ if (gpio_is_valid(machine->gpio_hp_det)) {
+ tegra_rt5677_hp_jack_gpio.gpio = machine->gpio_hp_det;
+ snd_soc_jack_add_gpios(&tegra_rt5677_hp_jack, 1,
+ &tegra_rt5677_hp_jack_gpio);
+ }
+
+
+ snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
+ &tegra_rt5677_mic_jack);
+ snd_soc_jack_add_pins(&tegra_rt5677_mic_jack, 1,
+ &tegra_rt5677_mic_jack_pins);
+
+ if (gpio_is_valid(machine->gpio_mic_present)) {
+ tegra_rt5677_mic_jack_gpio.gpio = machine->gpio_mic_present;
+ snd_soc_jack_add_gpios(&tegra_rt5677_mic_jack, 1,
+ &tegra_rt5677_mic_jack_gpio);
+ }
+
+ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+
+ return 0;
+}
+
+static int tegra_rt5677_card_remove(struct snd_soc_card *card)
+{
+ struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(card);
+
+ if (gpio_is_valid(machine->gpio_hp_det)) {
+ snd_soc_jack_free_gpios(&tegra_rt5677_hp_jack, 1,
+ &tegra_rt5677_hp_jack_gpio);
+ }
+
+ if (gpio_is_valid(machine->gpio_mic_present)) {
+ snd_soc_jack_free_gpios(&tegra_rt5677_mic_jack, 1,
+ &tegra_rt5677_mic_jack_gpio);
+ }
+
+ return 0;
+}
+
+static struct snd_soc_dai_link tegra_rt5677_dai = {
+ .name = "RT5677",
+ .stream_name = "RT5677 PCM",
+ .codec_dai_name = "rt5677-aif1",
+ .init = tegra_rt5677_asoc_init,
+ .ops = &tegra_rt5677_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+};
+
+static struct snd_soc_card snd_soc_tegra_rt5677 = {
+ .name = "tegra-rt5677",
+ .owner = THIS_MODULE,
+ .remove = tegra_rt5677_card_remove,
+ .dai_link = &tegra_rt5677_dai,
+ .num_links = 1,
+ .controls = tegra_rt5677_controls,
+ .num_controls = ARRAY_SIZE(tegra_rt5677_controls),
+ .dapm_widgets = tegra_rt5677_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tegra_rt5677_dapm_widgets),
+ .fully_routed = true,
+};
+
+static int tegra_rt5677_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct snd_soc_card *card = &snd_soc_tegra_rt5677;
+ struct tegra_rt5677 *machine;
+ int ret;
+
+ machine = devm_kzalloc(&pdev->dev,
+ sizeof(struct tegra_rt5677), GFP_KERNEL);
+ if (!machine)
+ return -ENOMEM;
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+
+ machine->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
+ if (machine->gpio_hp_det == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ machine->gpio_mic_present = of_get_named_gpio(np,
+ "nvidia,mic-present-gpios", 0);
+ if (machine->gpio_mic_present == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ machine->gpio_hp_en = of_get_named_gpio(np, "nvidia,hp-en-gpios", 0);
+ if (machine->gpio_hp_en == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(machine->gpio_hp_en)) {
+ ret = devm_gpio_request_one(&pdev->dev, machine->gpio_hp_en,
+ GPIOF_OUT_INIT_LOW, "hp_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get hp_en gpio\n");
+ return ret;
+ }
+ }
+
+ machine->gpio_dmic_clk_en = of_get_named_gpio(np,
+ "nvidia,dmic-clk-en-gpios", 0);
+ if (machine->gpio_dmic_clk_en == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(machine->gpio_dmic_clk_en)) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ machine->gpio_dmic_clk_en,
+ GPIOF_OUT_INIT_HIGH, "dmic_clk_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get dmic_clk_en gpio\n");
+ return ret;
+ }
+ }
+
+ ret = snd_soc_of_parse_card_name(card, "nvidia,model");
+ if (ret)
+ goto err;
+
+ ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
+ if (ret)
+ goto err;
+
+ tegra_rt5677_dai.codec_of_node = of_parse_phandle(np,
+ "nvidia,audio-codec", 0);
+ if (!tegra_rt5677_dai.codec_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'nvidia,audio-codec' missing or invalid\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tegra_rt5677_dai.cpu_of_node = of_parse_phandle(np,
+ "nvidia,i2s-controller", 0);
+ if (!tegra_rt5677_dai.cpu_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'nvidia,i2s-controller' missing or invalid\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ tegra_rt5677_dai.platform_of_node = tegra_rt5677_dai.cpu_of_node;
+
+ ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
+ if (ret)
+ goto err;
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
+ goto err_fini_utils;
+ }
+
+ return 0;
+
+err_fini_utils:
+ tegra_asoc_utils_fini(&machine->util_data);
+err:
+ return ret;
+}
+
+static int tegra_rt5677_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(card);
+
+ snd_soc_unregister_card(card);
+
+ tegra_asoc_utils_fini(&machine->util_data);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_rt5677_of_match[] = {
+ { .compatible = "nvidia,tegra-audio-rt5677", },
+ {},
+};
+
+static struct platform_driver tegra_rt5677_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = tegra_rt5677_of_match,
+ },
+ .probe = tegra_rt5677_probe,
+ .remove = tegra_rt5677_remove,
+};
+module_platform_driver(tegra_rt5677_driver);
+
+MODULE_AUTHOR("Anatol Pomozov <anatol@google.com>");
+MODULE_DESCRIPTION("Tegra+RT5677 machine ASoC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra_rt5677_of_match);
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 070e44e251ce..88eacfd83da6 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -282,11 +282,6 @@ static struct snd_pcm_ops txx9aclc_pcm_ops = {
.pointer = txx9aclc_pcm_pointer,
};
-static void txx9aclc_pcm_free_dma_buffers(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
@@ -412,7 +407,6 @@ static struct snd_soc_platform_driver txx9aclc_soc_platform = {
.remove = txx9aclc_pcm_remove,
.ops = &txx9aclc_pcm_ops,
.pcm_new = txx9aclc_pcm_new,
- .pcm_free = txx9aclc_pcm_free_dma_buffers,
};
static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/ux500/mop500_ab8500.c b/sound/soc/ux500/mop500_ab8500.c
index be4f1ac7cd5e..aa65370db82a 100644
--- a/sound/soc/ux500/mop500_ab8500.c
+++ b/sound/soc/ux500/mop500_ab8500.c
@@ -290,21 +290,9 @@ static int mop500_ab8500_hw_params(struct snd_pcm_substream *substream,
SND_SOC_DAIFMT_GATED;
}
- ret = snd_soc_dai_set_fmt(codec_dai, fmt);
- if (ret < 0) {
- dev_err(dev,
- "%s: ERROR: snd_soc_dai_set_fmt failed for codec_dai (ret = %d)!\n",
- __func__, ret);
- return ret;
- }
-
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
- if (ret < 0) {
- dev_err(dev,
- "%s: ERROR: snd_soc_dai_set_fmt failed for cpu_dai (ret = %d)!\n",
- __func__, ret);
+ ret = snd_soc_runtime_set_dai_fmt(rtd, fmt);
+ if (ret)
return ret;
- }
/* Setup TDM-slots */
diff --git a/sound/soc/xtensa/Kconfig b/sound/soc/xtensa/Kconfig
new file mode 100644
index 000000000000..c201beb36de6
--- /dev/null
+++ b/sound/soc/xtensa/Kconfig
@@ -0,0 +1,7 @@
+config SND_SOC_XTFPGA_I2S
+ tristate "XTFPGA I2S master"
+ select REGMAP_MMIO
+ help
+ Say Y or M if you want to add support for codecs attached to the
+ I2S interface on XTFPGA daughter board. You will also need to select
+ the drivers for the rest of XTFPGA audio subsystem.
diff --git a/sound/soc/xtensa/Makefile b/sound/soc/xtensa/Makefile
new file mode 100644
index 000000000000..15efbf914226
--- /dev/null
+++ b/sound/soc/xtensa/Makefile
@@ -0,0 +1,3 @@
+snd-soc-xtfpga-i2s-objs := xtfpga-i2s.o
+
+obj-$(CONFIG_SND_SOC_XTFPGA_I2S) += snd-soc-xtfpga-i2s.o
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
new file mode 100644
index 000000000000..1cfb19e12949
--- /dev/null
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -0,0 +1,675 @@
+/*
+ * Xtfpga I2S controller driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define DRV_NAME "xtfpga-i2s"
+
+#define XTFPGA_I2S_VERSION 0x00
+#define XTFPGA_I2S_CONFIG 0x04
+#define XTFPGA_I2S_INT_MASK 0x08
+#define XTFPGA_I2S_INT_STATUS 0x0c
+#define XTFPGA_I2S_CHAN0_DATA 0x10
+#define XTFPGA_I2S_CHAN1_DATA 0x14
+#define XTFPGA_I2S_CHAN2_DATA 0x18
+#define XTFPGA_I2S_CHAN3_DATA 0x1c
+
+#define XTFPGA_I2S_CONFIG_TX_ENABLE 0x1
+#define XTFPGA_I2S_CONFIG_INT_ENABLE 0x2
+#define XTFPGA_I2S_CONFIG_LEFT 0x4
+#define XTFPGA_I2S_CONFIG_RATIO_BASE 8
+#define XTFPGA_I2S_CONFIG_RATIO_MASK 0x0000ff00
+#define XTFPGA_I2S_CONFIG_RES_BASE 16
+#define XTFPGA_I2S_CONFIG_RES_MASK 0x003f0000
+#define XTFPGA_I2S_CONFIG_LEVEL_BASE 24
+#define XTFPGA_I2S_CONFIG_LEVEL_MASK 0x0f000000
+#define XTFPGA_I2S_CONFIG_CHANNEL_BASE 28
+
+#define XTFPGA_I2S_INT_UNDERRUN 0x1
+#define XTFPGA_I2S_INT_LEVEL 0x2
+#define XTFPGA_I2S_INT_VALID 0x3
+
+#define XTFPGA_I2S_FIFO_SIZE 8192
+
+/*
+ * I2S controller operation:
+ *
+ * Enabling TX: output 1 period of zeros (starting with left channel)
+ * and then queued data.
+ *
+ * Level status and interrupt: whenever FIFO level is below FIFO trigger,
+ * level status is 1 and an IRQ is asserted (if enabled).
+ *
+ * Underrun status and interrupt: whenever FIFO is empty, underrun status
+ * is 1 and an IRQ is asserted (if enabled).
+ */
+struct xtfpga_i2s {
+ struct device *dev;
+ struct clk *clk;
+ struct regmap *regmap;
+ void __iomem *regs;
+
+ /* current playback substream. NULL if not playing.
+ *
+ * Access to that field is synchronized between the interrupt handler
+ * and userspace through RCU.
+ *
+ * Interrupt handler (threaded part) does PIO on substream data in RCU
+ * read-side critical section. Trigger callback sets and clears the
+ * pointer when the playback is started and stopped with
+ * rcu_assign_pointer. When userspace is about to free the playback
+ * stream in the pcm_close callback it synchronizes with the interrupt
+ * handler by means of synchronize_rcu call.
+ */
+ struct snd_pcm_substream *tx_substream;
+ unsigned (*tx_fn)(struct xtfpga_i2s *i2s,
+ struct snd_pcm_runtime *runtime,
+ unsigned tx_ptr);
+ unsigned tx_ptr; /* next frame index in the sample buffer */
+
+ /* current fifo level estimate.
+ * Doesn't have to be perfectly accurate, but must be not less than
+ * the actual FIFO level in order to avoid stall on push attempt.
+ */
+ unsigned tx_fifo_level;
+
+ /* FIFO level at which level interrupt occurs */
+ unsigned tx_fifo_low;
+
+ /* maximal FIFO level */
+ unsigned tx_fifo_high;
+};
+
+static bool xtfpga_i2s_wr_reg(struct device *dev, unsigned int reg)
+{
+ return reg >= XTFPGA_I2S_CONFIG;
+}
+
+static bool xtfpga_i2s_rd_reg(struct device *dev, unsigned int reg)
+{
+ return reg < XTFPGA_I2S_CHAN0_DATA;
+}
+
+static bool xtfpga_i2s_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == XTFPGA_I2S_INT_STATUS;
+}
+
+static const struct regmap_config xtfpga_i2s_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = XTFPGA_I2S_CHAN3_DATA,
+ .writeable_reg = xtfpga_i2s_wr_reg,
+ .readable_reg = xtfpga_i2s_rd_reg,
+ .volatile_reg = xtfpga_i2s_volatile_reg,
+ .cache_type = REGCACHE_FLAT,
+};
+
+/* Generate functions that do PIO from TX DMA area to FIFO for all supported
+ * stream formats.
+ * Functions will be called xtfpga_pcm_tx_<channels>x<sample bits>, e.g.
+ * xtfpga_pcm_tx_2x16 for 16-bit stereo.
+ *
+ * FIFO consists of 32-bit words, one word per channel, always 2 channels.
+ * If I2S interface is configured with smaller sample resolution, only
+ * the LSB of each word is used.
+ */
+#define xtfpga_pcm_tx_fn(channels, sample_bits) \
+static unsigned xtfpga_pcm_tx_##channels##x##sample_bits( \
+ struct xtfpga_i2s *i2s, struct snd_pcm_runtime *runtime, \
+ unsigned tx_ptr) \
+{ \
+ const u##sample_bits (*p)[channels] = \
+ (void *)runtime->dma_area; \
+\
+ for (; i2s->tx_fifo_level < i2s->tx_fifo_high; \
+ i2s->tx_fifo_level += 2) { \
+ iowrite32(p[tx_ptr][0], \
+ i2s->regs + XTFPGA_I2S_CHAN0_DATA); \
+ iowrite32(p[tx_ptr][channels - 1], \
+ i2s->regs + XTFPGA_I2S_CHAN0_DATA); \
+ if (++tx_ptr >= runtime->buffer_size) \
+ tx_ptr = 0; \
+ } \
+ return tx_ptr; \
+}
+
+xtfpga_pcm_tx_fn(1, 16)
+xtfpga_pcm_tx_fn(2, 16)
+xtfpga_pcm_tx_fn(1, 32)
+xtfpga_pcm_tx_fn(2, 32)
+
+#undef xtfpga_pcm_tx_fn
+
+static bool xtfpga_pcm_push_tx(struct xtfpga_i2s *i2s)
+{
+ struct snd_pcm_substream *tx_substream;
+ bool tx_active;
+
+ rcu_read_lock();
+ tx_substream = rcu_dereference(i2s->tx_substream);
+ tx_active = tx_substream && snd_pcm_running(tx_substream);
+ if (tx_active) {
+ unsigned tx_ptr = ACCESS_ONCE(i2s->tx_ptr);
+ unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime,
+ tx_ptr);
+
+ cmpxchg(&i2s->tx_ptr, tx_ptr, new_tx_ptr);
+ }
+ rcu_read_unlock();
+
+ return tx_active;
+}
+
+static void xtfpga_pcm_refill_fifo(struct xtfpga_i2s *i2s)
+{
+ unsigned int_status;
+ unsigned i;
+
+ regmap_read(i2s->regmap, XTFPGA_I2S_INT_STATUS,
+ &int_status);
+
+ for (i = 0; i < 2; ++i) {
+ bool tx_active = xtfpga_pcm_push_tx(i2s);
+
+ regmap_write(i2s->regmap, XTFPGA_I2S_INT_STATUS,
+ XTFPGA_I2S_INT_VALID);
+ if (tx_active)
+ regmap_read(i2s->regmap, XTFPGA_I2S_INT_STATUS,
+ &int_status);
+
+ if (!tx_active ||
+ !(int_status & XTFPGA_I2S_INT_LEVEL))
+ break;
+
+ /* After the push the level IRQ is still asserted,
+ * means FIFO level is below tx_fifo_low. Estimate
+ * it as tx_fifo_low.
+ */
+ i2s->tx_fifo_level = i2s->tx_fifo_low;
+ }
+
+ if (!(int_status & XTFPGA_I2S_INT_LEVEL))
+ regmap_write(i2s->regmap, XTFPGA_I2S_INT_MASK,
+ XTFPGA_I2S_INT_VALID);
+ else if (!(int_status & XTFPGA_I2S_INT_UNDERRUN))
+ regmap_write(i2s->regmap, XTFPGA_I2S_INT_MASK,
+ XTFPGA_I2S_INT_UNDERRUN);
+
+ if (!(int_status & XTFPGA_I2S_INT_UNDERRUN))
+ regmap_update_bits(i2s->regmap, XTFPGA_I2S_CONFIG,
+ XTFPGA_I2S_CONFIG_INT_ENABLE |
+ XTFPGA_I2S_CONFIG_TX_ENABLE,
+ XTFPGA_I2S_CONFIG_INT_ENABLE |
+ XTFPGA_I2S_CONFIG_TX_ENABLE);
+ else
+ regmap_update_bits(i2s->regmap, XTFPGA_I2S_CONFIG,
+ XTFPGA_I2S_CONFIG_INT_ENABLE |
+ XTFPGA_I2S_CONFIG_TX_ENABLE, 0);
+}
+
+static irqreturn_t xtfpga_i2s_threaded_irq_handler(int irq, void *dev_id)
+{
+ struct xtfpga_i2s *i2s = dev_id;
+ struct snd_pcm_substream *tx_substream;
+ unsigned config, int_status, int_mask;
+
+ regmap_read(i2s->regmap, XTFPGA_I2S_CONFIG, &config);
+ regmap_read(i2s->regmap, XTFPGA_I2S_INT_MASK, &int_mask);
+ regmap_read(i2s->regmap, XTFPGA_I2S_INT_STATUS, &int_status);
+
+ if (!(config & XTFPGA_I2S_CONFIG_INT_ENABLE) ||
+ !(int_status & int_mask & XTFPGA_I2S_INT_VALID))
+ return IRQ_NONE;
+
+ /* Update FIFO level estimate in accordance with interrupt status
+ * register.
+ */
+ if (int_status & XTFPGA_I2S_INT_UNDERRUN) {
+ i2s->tx_fifo_level = 0;
+ regmap_update_bits(i2s->regmap, XTFPGA_I2S_CONFIG,
+ XTFPGA_I2S_CONFIG_TX_ENABLE, 0);
+ } else {
+ /* The FIFO isn't empty, but is below tx_fifo_low. Estimate
+ * it as tx_fifo_low.
+ */
+ i2s->tx_fifo_level = i2s->tx_fifo_low;
+ }
+
+ rcu_read_lock();
+ tx_substream = rcu_dereference(i2s->tx_substream);
+
+ if (tx_substream && snd_pcm_running(tx_substream)) {
+ snd_pcm_period_elapsed(tx_substream);
+ if (int_status & XTFPGA_I2S_INT_UNDERRUN)
+ dev_dbg_ratelimited(i2s->dev, "%s: underrun\n",
+ __func__);
+ }
+ rcu_read_unlock();
+
+ /* Refill FIFO, update allowed IRQ reasons, enable IRQ if FIFO is
+ * not empty.
+ */
+ xtfpga_pcm_refill_fifo(i2s);
+
+ return IRQ_HANDLED;
+}
+
+static int xtfpga_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct xtfpga_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ snd_soc_dai_set_dma_data(dai, substream, i2s);
+ return 0;
+}
+
+static int xtfpga_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct xtfpga_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ unsigned srate = params_rate(params);
+ unsigned channels = params_channels(params);
+ unsigned period_size = params_period_size(params);
+ unsigned sample_size = snd_pcm_format_width(params_format(params));
+ unsigned freq, ratio, level;
+ int err;
+
+ regmap_update_bits(i2s->regmap, XTFPGA_I2S_CONFIG,
+ XTFPGA_I2S_CONFIG_RES_MASK,
+ sample_size << XTFPGA_I2S_CONFIG_RES_BASE);
+
+ freq = 256 * srate;
+ err = clk_set_rate(i2s->clk, freq);
+ if (err < 0)
+ return err;
+
+ /* ratio field of the config register controls MCLK->I2S clock
+ * derivation: I2S clock = MCLK / (2 * (ratio + 2)).
+ *
+ * So with MCLK = 256 * sample rate ratio is 0 for 32 bit stereo
+ * and 2 for 16 bit stereo.
+ */
+ ratio = (freq - (srate * sample_size * 8)) /
+ (srate * sample_size * 4);
+
+ regmap_update_bits(i2s->regmap, XTFPGA_I2S_CONFIG,
+ XTFPGA_I2S_CONFIG_RATIO_MASK,
+ ratio << XTFPGA_I2S_CONFIG_RATIO_BASE);
+
+ i2s->tx_fifo_low = XTFPGA_I2S_FIFO_SIZE / 2;
+
+ /* period_size * 2: FIFO always gets 2 samples per frame */
+ for (level = 1;
+ i2s->tx_fifo_low / 2 >= period_size * 2 &&
+ level < (XTFPGA_I2S_CONFIG_LEVEL_MASK >>
+ XTFPGA_I2S_CONFIG_LEVEL_BASE); ++level)
+ i2s->tx_fifo_low /= 2;
+
+ i2s->tx_fifo_high = 2 * i2s->tx_fifo_low;
+
+ regmap_update_bits(i2s->regmap, XTFPGA_I2S_CONFIG,
+ XTFPGA_I2S_CONFIG_LEVEL_MASK,
+ level << XTFPGA_I2S_CONFIG_LEVEL_BASE);
+
+ dev_dbg(i2s->dev,
+ "%s srate: %u, channels: %u, sample_size: %u, period_size: %u\n",
+ __func__, srate, channels, sample_size, period_size);
+ dev_dbg(i2s->dev, "%s freq: %u, ratio: %u, level: %u\n",
+ __func__, freq, ratio, level);
+
+ return 0;
+}
+
+static int xtfpga_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+ if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF)
+ return -EINVAL;
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
+ return -EINVAL;
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_I2S)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* PCM */
+
+static const struct snd_pcm_hardware xtfpga_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .period_bytes_min = 2,
+ .period_bytes_max = XTFPGA_I2S_FIFO_SIZE / 2 * 8,
+ .periods_min = 2,
+ .periods_max = XTFPGA_I2S_FIFO_SIZE * 8 / 2,
+ .buffer_bytes_max = XTFPGA_I2S_FIFO_SIZE * 8,
+ .fifo_size = 16,
+};
+
+static int xtfpga_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ void *p;
+
+ snd_soc_set_runtime_hwparams(substream, &xtfpga_pcm_hardware);
+ p = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ runtime->private_data = p;
+
+ return 0;
+}
+
+static int xtfpga_pcm_close(struct snd_pcm_substream *substream)
+{
+ synchronize_rcu();
+ return 0;
+}
+
+static int xtfpga_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ int ret;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct xtfpga_i2s *i2s = runtime->private_data;
+ unsigned channels = params_channels(hw_params);
+
+ switch (channels) {
+ case 1:
+ case 2:
+ break;
+
+ default:
+ return -EINVAL;
+
+ }
+
+ switch (params_format(hw_params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ i2s->tx_fn = (channels == 1) ?
+ xtfpga_pcm_tx_1x16 :
+ xtfpga_pcm_tx_2x16;
+ break;
+
+ case SNDRV_PCM_FORMAT_S32_LE:
+ i2s->tx_fn = (channels == 1) ?
+ xtfpga_pcm_tx_1x32 :
+ xtfpga_pcm_tx_2x32;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+ return ret;
+}
+
+static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct xtfpga_i2s *i2s = runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ACCESS_ONCE(i2s->tx_ptr) = 0;
+ rcu_assign_pointer(i2s->tx_substream, substream);
+ xtfpga_pcm_refill_fifo(i2s);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ rcu_assign_pointer(i2s->tx_substream, NULL);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct xtfpga_i2s *i2s = runtime->private_data;
+ snd_pcm_uframes_t pos = ACCESS_ONCE(i2s->tx_ptr);
+
+ return pos < runtime->buffer_size ? pos : 0;
+}
+
+static int xtfpga_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ size_t size = xtfpga_pcm_hardware.buffer_bytes_max;
+
+ return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_DEV,
+ card->dev, size, size);
+}
+
+static void xtfpga_pcm_free(struct snd_pcm *pcm)
+{
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static const struct snd_pcm_ops xtfpga_pcm_ops = {
+ .open = xtfpga_pcm_open,
+ .close = xtfpga_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = xtfpga_pcm_hw_params,
+ .trigger = xtfpga_pcm_trigger,
+ .pointer = xtfpga_pcm_pointer,
+};
+
+static const struct snd_soc_platform_driver xtfpga_soc_platform = {
+ .pcm_new = xtfpga_pcm_new,
+ .pcm_free = xtfpga_pcm_free,
+ .ops = &xtfpga_pcm_ops,
+};
+
+static const struct snd_soc_component_driver xtfpga_i2s_component = {
+ .name = DRV_NAME,
+};
+
+static const struct snd_soc_dai_ops xtfpga_i2s_dai_ops = {
+ .startup = xtfpga_i2s_startup,
+ .hw_params = xtfpga_i2s_hw_params,
+ .set_fmt = xtfpga_i2s_set_fmt,
+};
+
+static struct snd_soc_dai_driver xtfpga_i2s_dai[] = {
+ {
+ .name = "xtfpga-i2s",
+ .id = 0,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &xtfpga_i2s_dai_ops,
+ },
+};
+
+static int xtfpga_i2s_runtime_suspend(struct device *dev)
+{
+ struct xtfpga_i2s *i2s = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(i2s->clk);
+ return 0;
+}
+
+static int xtfpga_i2s_runtime_resume(struct device *dev)
+{
+ struct xtfpga_i2s *i2s = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(i2s->clk);
+ if (ret) {
+ dev_err(dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int xtfpga_i2s_probe(struct platform_device *pdev)
+{
+ struct xtfpga_i2s *i2s;
+ struct resource *mem;
+ int err, irq;
+
+ i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
+ if (!i2s) {
+ err = -ENOMEM;
+ goto err;
+ }
+ platform_set_drvdata(pdev, i2s);
+ i2s->dev = &pdev->dev;
+ dev_dbg(&pdev->dev, "dev: %p, i2s: %p\n", &pdev->dev, i2s);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2s->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(i2s->regs)) {
+ err = PTR_ERR(i2s->regs);
+ goto err;
+ }
+
+ i2s->regmap = devm_regmap_init_mmio(&pdev->dev, i2s->regs,
+ &xtfpga_i2s_regmap_config);
+ if (IS_ERR(i2s->regmap)) {
+ dev_err(&pdev->dev, "regmap init failed\n");
+ err = PTR_ERR(i2s->regmap);
+ goto err;
+ }
+
+ i2s->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2s->clk)) {
+ dev_err(&pdev->dev, "couldn't get clock\n");
+ err = PTR_ERR(i2s->clk);
+ goto err;
+ }
+
+ regmap_write(i2s->regmap, XTFPGA_I2S_CONFIG,
+ (0x1 << XTFPGA_I2S_CONFIG_CHANNEL_BASE));
+ regmap_write(i2s->regmap, XTFPGA_I2S_INT_STATUS, XTFPGA_I2S_INT_VALID);
+ regmap_write(i2s->regmap, XTFPGA_I2S_INT_MASK, XTFPGA_I2S_INT_UNDERRUN);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ err = irq;
+ goto err;
+ }
+ err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ xtfpga_i2s_threaded_irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT,
+ pdev->name, i2s);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto err;
+ }
+
+ err = snd_soc_register_platform(&pdev->dev, &xtfpga_soc_platform);
+ if (err < 0) {
+ dev_err(&pdev->dev, "couldn't register platform\n");
+ goto err;
+ }
+ err = devm_snd_soc_register_component(&pdev->dev,
+ &xtfpga_i2s_component,
+ xtfpga_i2s_dai,
+ ARRAY_SIZE(xtfpga_i2s_dai));
+ if (err < 0) {
+ dev_err(&pdev->dev, "couldn't register component\n");
+ goto err_unregister_platform;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ err = xtfpga_i2s_runtime_resume(&pdev->dev);
+ if (err)
+ goto err_pm_disable;
+ }
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+err_unregister_platform:
+ snd_soc_unregister_platform(&pdev->dev);
+err:
+ dev_err(&pdev->dev, "%s: err = %d\n", __func__, err);
+ return err;
+}
+
+static int xtfpga_i2s_remove(struct platform_device *pdev)
+{
+ struct xtfpga_i2s *i2s = dev_get_drvdata(&pdev->dev);
+
+ snd_soc_unregister_platform(&pdev->dev);
+ if (i2s->regmap && !IS_ERR(i2s->regmap)) {
+ regmap_write(i2s->regmap, XTFPGA_I2S_CONFIG, 0);
+ regmap_write(i2s->regmap, XTFPGA_I2S_INT_MASK, 0);
+ regmap_write(i2s->regmap, XTFPGA_I2S_INT_STATUS,
+ XTFPGA_I2S_INT_VALID);
+ }
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ xtfpga_i2s_runtime_suspend(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id xtfpga_i2s_of_match[] = {
+ { .compatible = "cdns,xtfpga-i2s", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xtfpga_i2s_of_match);
+#endif
+
+static const struct dev_pm_ops xtfpga_i2s_pm_ops = {
+ SET_RUNTIME_PM_OPS(xtfpga_i2s_runtime_suspend,
+ xtfpga_i2s_runtime_resume, NULL)
+};
+
+static struct platform_driver xtfpga_i2s_driver = {
+ .probe = xtfpga_i2s_probe,
+ .remove = xtfpga_i2s_remove,
+ .driver = {
+ .name = "xtfpga-i2s",
+ .of_match_table = of_match_ptr(xtfpga_i2s_of_match),
+ .pm = &xtfpga_i2s_pm_ops,
+ },
+};
+
+module_platform_driver(xtfpga_i2s_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("xtfpga I2S controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 86280d63b76d..1b1a89e80d13 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -37,6 +37,7 @@
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -44,7 +45,6 @@
#include <sound/control.h>
#include <sound/initval.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c
index 93522072bc87..49195325fdf6 100644
--- a/sound/synth/emux/emux.c
+++ b/sound/synth/emux/emux.c
@@ -53,9 +53,7 @@ int snd_emux_new(struct snd_emux **remu)
emu->max_voices = 0;
emu->use_time = 0;
- init_timer(&emu->tlist);
- emu->tlist.function = snd_emux_timer_callback;
- emu->tlist.data = (unsigned long)emu;
+ setup_timer(&emu->tlist, snd_emux_timer_callback, (unsigned long)emu);
emu->timer_active = 0;
*remu = emu;
@@ -160,12 +158,8 @@ int snd_emux_free(struct snd_emux *emu)
snd_emux_detach_seq_oss(emu);
#endif
snd_emux_detach_seq(emu);
-
snd_emux_delete_hwdep(emu);
-
- if (emu->sflist)
- snd_sf_free(emu->sflist);
-
+ snd_sf_free(emu->sflist);
kfree(emu->voices);
kfree(emu->name);
kfree(emu);
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
index 5ae1eae9f6db..e557946718a9 100644
--- a/sound/synth/emux/emux_hwdep.c
+++ b/sound/synth/emux/emux_hwdep.c
@@ -21,7 +21,7 @@
#include <sound/core.h>
#include <sound/hwdep.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "emux_voice.h"
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
index 319754cf6208..ab37add269ae 100644
--- a/sound/synth/emux/emux_oss.c
+++ b/sound/synth/emux/emux_oss.c
@@ -26,7 +26,7 @@
#ifdef CONFIG_SND_SEQUENCER_OSS
#include <linux/export.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <sound/core.h>
#include "emux_voice.h"
#include <sound/asoundef.h>
diff --git a/sound/synth/emux/emux_synth.c b/sound/synth/emux/emux_synth.c
index 9a38de459acb..599551b5af44 100644
--- a/sound/synth/emux/emux_synth.c
+++ b/sound/synth/emux/emux_synth.c
@@ -186,8 +186,7 @@ snd_emux_note_off(void *p, int note, int vel, struct snd_midi_channel *chan)
*/
vp->state = SNDRV_EMUX_ST_PENDING;
if (! emu->timer_active) {
- emu->tlist.expires = jiffies + 1;
- add_timer(&emu->tlist);
+ mod_timer(&emu->tlist, jiffies + 1);
emu->timer_active = 1;
}
} else
@@ -223,8 +222,7 @@ void snd_emux_timer_callback(unsigned long data)
}
}
if (do_again) {
- emu->tlist.expires = jiffies + 1;
- add_timer(&emu->tlist);
+ mod_timer(&emu->tlist, jiffies + 1);
emu->timer_active = 1;
} else
emu->timer_active = 0;
diff --git a/sound/synth/emux/soundfont.c b/sound/synth/emux/soundfont.c
index 78683b2064f7..31a4ea94830e 100644
--- a/sound/synth/emux/soundfont.c
+++ b/sound/synth/emux/soundfont.c
@@ -25,7 +25,7 @@
* of doing things so that the old sfxload utility can be used.
* Everything may change when there is an alsa way of doing things.
*/
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <sound/core.h>
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index d393153c474f..a452ad7cec40 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -160,5 +160,7 @@ config SND_BCD2000
To compile this driver as a module, choose M here: the module
will be called snd-bcd2000.
+source "sound/usb/line6/Kconfig"
+
endif # SND_USB
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index bcee4060fd18..2d2d122b069f 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_SND_USB_USX2Y) += snd-usbmidi-lib.o
obj-$(CONFIG_SND_USB_US122L) += snd-usbmidi-lib.o
obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/ hiface/ bcd2000/
+obj-$(CONFIG_SND_USB_LINE6) += line6/
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 97acb906acc2..ef580b43f1e3 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -153,6 +153,8 @@ struct snd_usb_substream {
int channel;
int byte_idx;
} dsd_dop;
+
+ bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */
};
struct snd_usb_stream {
diff --git a/sound/usb/line6/Kconfig b/sound/usb/line6/Kconfig
new file mode 100644
index 000000000000..f4585d378ef3
--- /dev/null
+++ b/sound/usb/line6/Kconfig
@@ -0,0 +1,42 @@
+config SND_USB_LINE6
+ tristate
+ select SND_RAWMIDI
+ select SND_PCM
+
+config SND_USB_POD
+ tristate "Line 6 POD USB support"
+ select SND_USB_LINE6
+ help
+ This is a driver for PODxt and other similar devices,
+ supporting the following features:
+ * Reading/writing individual parameters
+ * Reading/writing complete channel, effects setup, and amp
+ setup data
+ * Channel switching
+ * Virtual MIDI interface
+ * Tuner access
+ * Playback/capture/mixer device for any ALSA-compatible PCM
+ audio application
+ * Signal routing (record clean/processed guitar signal,
+ re-amping)
+
+config SND_USB_PODHD
+ tristate "Line 6 POD HD300/400/500 USB support"
+ select SND_USB_LINE6
+ help
+ This is a driver for POD HD300, 400 and 500 devices.
+
+config SND_USB_TONEPORT
+ tristate "TonePort GX, UX1 and UX2 USB support"
+ select SND_USB_LINE6
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ This is a driver for TonePort GX, UX1 and UX2 devices.
+
+config SND_USB_VARIAX
+ tristate "Variax Workbench USB support"
+ select SND_USB_LINE6
+ help
+ This is a driver for Variax Workbench device.
+
diff --git a/sound/usb/line6/Makefile b/sound/usb/line6/Makefile
new file mode 100644
index 000000000000..b8b3b2a543d8
--- /dev/null
+++ b/sound/usb/line6/Makefile
@@ -0,0 +1,18 @@
+snd-usb-line6-y := \
+ capture.o \
+ driver.o \
+ midi.o \
+ midibuf.o \
+ pcm.o \
+ playback.o
+
+snd-usb-pod-y := pod.o
+snd-usb-podhd-y := podhd.o
+snd-usb-toneport-y := toneport.o
+snd-usb-variax-y := variax.o
+
+obj-$(CONFIG_SND_USB_LINE6) += snd-usb-line6.o
+obj-$(CONFIG_SND_USB_POD) += snd-usb-pod.o
+obj-$(CONFIG_SND_USB_PODHD) += snd-usb-podhd.o
+obj-$(CONFIG_SND_USB_TONEPORT) += snd-usb-toneport.o
+obj-$(CONFIG_SND_USB_VARIAX) += snd-usb-variax.o
diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c
new file mode 100644
index 000000000000..f518fbbe88de
--- /dev/null
+++ b/sound/usb/line6/capture.c
@@ -0,0 +1,275 @@
+/*
+ * Line 6 Linux USB driver
+ *
+ * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "capture.h"
+#include "driver.h"
+#include "pcm.h"
+
+/*
+ Find a free URB and submit it.
+ must be called in line6pcm->in.lock context
+*/
+static int submit_audio_in_urb(struct snd_line6_pcm *line6pcm)
+{
+ int index;
+ int i, urb_size;
+ int ret;
+ struct urb *urb_in;
+
+ index =
+ find_first_zero_bit(&line6pcm->in.active_urbs, LINE6_ISO_BUFFERS);
+
+ if (index < 0 || index >= LINE6_ISO_BUFFERS) {
+ dev_err(line6pcm->line6->ifcdev, "no free URB found\n");
+ return -EINVAL;
+ }
+
+ urb_in = line6pcm->in.urbs[index];
+ urb_size = 0;
+
+ for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
+ struct usb_iso_packet_descriptor *fin =
+ &urb_in->iso_frame_desc[i];
+ fin->offset = urb_size;
+ fin->length = line6pcm->max_packet_size;
+ urb_size += line6pcm->max_packet_size;
+ }
+
+ urb_in->transfer_buffer =
+ line6pcm->in.buffer +
+ index * LINE6_ISO_PACKETS * line6pcm->max_packet_size;
+ urb_in->transfer_buffer_length = urb_size;
+ urb_in->context = line6pcm;
+
+ ret = usb_submit_urb(urb_in, GFP_ATOMIC);
+
+ if (ret == 0)
+ set_bit(index, &line6pcm->in.active_urbs);
+ else
+ dev_err(line6pcm->line6->ifcdev,
+ "URB in #%d submission failed (%d)\n", index, ret);
+
+ return 0;
+}
+
+/*
+ Submit all currently available capture URBs.
+ must be called in line6pcm->in.lock context
+*/
+int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
+ ret = submit_audio_in_urb(line6pcm);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ Copy data into ALSA capture buffer.
+*/
+void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf, int fsize)
+{
+ struct snd_pcm_substream *substream =
+ get_substream(line6pcm, SNDRV_PCM_STREAM_CAPTURE);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ const int bytes_per_frame = line6pcm->properties->bytes_per_frame;
+ int frames = fsize / bytes_per_frame;
+
+ if (runtime == NULL)
+ return;
+
+ if (line6pcm->in.pos_done + frames > runtime->buffer_size) {
+ /*
+ The transferred area goes over buffer boundary,
+ copy two separate chunks.
+ */
+ int len;
+
+ len = runtime->buffer_size - line6pcm->in.pos_done;
+
+ if (len > 0) {
+ memcpy(runtime->dma_area +
+ line6pcm->in.pos_done * bytes_per_frame, fbuf,
+ len * bytes_per_frame);
+ memcpy(runtime->dma_area, fbuf + len * bytes_per_frame,
+ (frames - len) * bytes_per_frame);
+ } else {
+ /* this is somewhat paranoid */
+ dev_err(line6pcm->line6->ifcdev,
+ "driver bug: len = %d\n", len);
+ }
+ } else {
+ /* copy single chunk */
+ memcpy(runtime->dma_area +
+ line6pcm->in.pos_done * bytes_per_frame, fbuf, fsize);
+ }
+
+ line6pcm->in.pos_done += frames;
+ if (line6pcm->in.pos_done >= runtime->buffer_size)
+ line6pcm->in.pos_done -= runtime->buffer_size;
+}
+
+void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
+{
+ struct snd_pcm_substream *substream =
+ get_substream(line6pcm, SNDRV_PCM_STREAM_CAPTURE);
+
+ line6pcm->in.bytes += length;
+ if (line6pcm->in.bytes >= line6pcm->in.period) {
+ line6pcm->in.bytes %= line6pcm->in.period;
+ spin_unlock(&line6pcm->in.lock);
+ snd_pcm_period_elapsed(substream);
+ spin_lock(&line6pcm->in.lock);
+ }
+}
+
+/*
+ * Callback for completed capture URB.
+ */
+static void audio_in_callback(struct urb *urb)
+{
+ int i, index, length = 0, shutdown = 0;
+ unsigned long flags;
+
+ struct snd_line6_pcm *line6pcm = (struct snd_line6_pcm *)urb->context;
+
+ line6pcm->in.last_frame = urb->start_frame;
+
+ /* find index of URB */
+ for (index = 0; index < LINE6_ISO_BUFFERS; ++index)
+ if (urb == line6pcm->in.urbs[index])
+ break;
+
+ spin_lock_irqsave(&line6pcm->in.lock, flags);
+
+ for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
+ char *fbuf;
+ int fsize;
+ struct usb_iso_packet_descriptor *fin = &urb->iso_frame_desc[i];
+
+ if (fin->status == -EXDEV) {
+ shutdown = 1;
+ break;
+ }
+
+ fbuf = urb->transfer_buffer + fin->offset;
+ fsize = fin->actual_length;
+
+ if (fsize > line6pcm->max_packet_size) {
+ dev_err(line6pcm->line6->ifcdev,
+ "driver and/or device bug: packet too large (%d > %d)\n",
+ fsize, line6pcm->max_packet_size);
+ }
+
+ length += fsize;
+
+ /* the following assumes LINE6_ISO_PACKETS == 1: */
+ line6pcm->prev_fbuf = fbuf;
+ line6pcm->prev_fsize = fsize;
+
+ if (!test_bit(LINE6_STREAM_IMPULSE, &line6pcm->in.running) &&
+ test_bit(LINE6_STREAM_PCM, &line6pcm->in.running) &&
+ fsize > 0)
+ line6_capture_copy(line6pcm, fbuf, fsize);
+ }
+
+ clear_bit(index, &line6pcm->in.active_urbs);
+
+ if (test_and_clear_bit(index, &line6pcm->in.unlink_urbs))
+ shutdown = 1;
+
+ if (!shutdown) {
+ submit_audio_in_urb(line6pcm);
+
+ if (!test_bit(LINE6_STREAM_IMPULSE, &line6pcm->in.running) &&
+ test_bit(LINE6_STREAM_PCM, &line6pcm->in.running))
+ line6_capture_check_period(line6pcm, length);
+ }
+
+ spin_unlock_irqrestore(&line6pcm->in.lock, flags);
+}
+
+/* open capture callback */
+static int snd_line6_capture_open(struct snd_pcm_substream *substream)
+{
+ int err;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+
+ err = snd_pcm_hw_constraint_ratdens(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &line6pcm->properties->rates);
+ if (err < 0)
+ return err;
+
+ runtime->hw = line6pcm->properties->capture_hw;
+ return 0;
+}
+
+/* close capture callback */
+static int snd_line6_capture_close(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+/* capture operators */
+struct snd_pcm_ops snd_line6_capture_ops = {
+ .open = snd_line6_capture_open,
+ .close = snd_line6_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_line6_hw_params,
+ .hw_free = snd_line6_hw_free,
+ .prepare = snd_line6_prepare,
+ .trigger = snd_line6_trigger,
+ .pointer = snd_line6_pointer,
+};
+
+int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm)
+{
+ struct usb_line6 *line6 = line6pcm->line6;
+ int i;
+
+ /* create audio URBs and fill in constant values: */
+ for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
+ struct urb *urb;
+
+ /* URB for audio in: */
+ urb = line6pcm->in.urbs[i] =
+ usb_alloc_urb(LINE6_ISO_PACKETS, GFP_KERNEL);
+
+ if (urb == NULL)
+ return -ENOMEM;
+
+ urb->dev = line6->usbdev;
+ urb->pipe =
+ usb_rcvisocpipe(line6->usbdev,
+ line6->properties->ep_audio_r &
+ USB_ENDPOINT_NUMBER_MASK);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->start_frame = -1;
+ urb->number_of_packets = LINE6_ISO_PACKETS;
+ urb->interval = LINE6_ISO_INTERVAL;
+ urb->error_count = 0;
+ urb->complete = audio_in_callback;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/line6/capture.h b/sound/usb/line6/capture.h
index 4157bcb598a9..890b21bff18c 100644
--- a/drivers/staging/line6/capture.h
+++ b/sound/usb/line6/capture.h
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -24,12 +24,6 @@ extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm,
int length);
extern int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm);
-extern void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm);
-extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm);
-extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm
- *line6pcm);
-extern void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm);
-extern int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd);
#endif
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
new file mode 100644
index 000000000000..99b63a7902f3
--- /dev/null
+++ b/sound/usb/line6/driver.c
@@ -0,0 +1,666 @@
+/*
+ * Line 6 Linux USB driver
+ *
+ * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+
+#include "capture.h"
+#include "driver.h"
+#include "midi.h"
+#include "playback.h"
+
+#define DRIVER_AUTHOR "Markus Grabner <grabner@icg.tugraz.at>"
+#define DRIVER_DESC "Line 6 USB Driver"
+
+/*
+ This is Line 6's MIDI manufacturer ID.
+*/
+const unsigned char line6_midi_id[] = {
+ 0x00, 0x01, 0x0c
+};
+EXPORT_SYMBOL_GPL(line6_midi_id);
+
+/*
+ Code to request version of POD, Variax interface
+ (and maybe other devices).
+*/
+static const char line6_request_version[] = {
+ 0xf0, 0x7e, 0x7f, 0x06, 0x01, 0xf7
+};
+
+/*
+ Class for asynchronous messages.
+*/
+struct message {
+ struct usb_line6 *line6;
+ const char *buffer;
+ int size;
+ int done;
+};
+
+/*
+ Forward declarations.
+*/
+static void line6_data_received(struct urb *urb);
+static int line6_send_raw_message_async_part(struct message *msg,
+ struct urb *urb);
+
+/*
+ Start to listen on endpoint.
+*/
+static int line6_start_listen(struct usb_line6 *line6)
+{
+ int err;
+
+ usb_fill_int_urb(line6->urb_listen, line6->usbdev,
+ usb_rcvintpipe(line6->usbdev, line6->properties->ep_ctrl_r),
+ line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
+ line6_data_received, line6, line6->interval);
+ line6->urb_listen->actual_length = 0;
+ err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC);
+ return err;
+}
+
+/*
+ Stop listening on endpoint.
+*/
+static void line6_stop_listen(struct usb_line6 *line6)
+{
+ usb_kill_urb(line6->urb_listen);
+}
+
+/*
+ Send raw message in pieces of wMaxPacketSize bytes.
+*/
+static int line6_send_raw_message(struct usb_line6 *line6, const char *buffer,
+ int size)
+{
+ int i, done = 0;
+
+ for (i = 0; i < size; i += line6->max_packet_size) {
+ int partial;
+ const char *frag_buf = buffer + i;
+ int frag_size = min(line6->max_packet_size, size - i);
+ int retval;
+
+ retval = usb_interrupt_msg(line6->usbdev,
+ usb_sndintpipe(line6->usbdev,
+ line6->properties->ep_ctrl_w),
+ (char *)frag_buf, frag_size,
+ &partial, LINE6_TIMEOUT * HZ);
+
+ if (retval) {
+ dev_err(line6->ifcdev,
+ "usb_interrupt_msg failed (%d)\n", retval);
+ break;
+ }
+
+ done += frag_size;
+ }
+
+ return done;
+}
+
+/*
+ Notification of completion of asynchronous request transmission.
+*/
+static void line6_async_request_sent(struct urb *urb)
+{
+ struct message *msg = (struct message *)urb->context;
+
+ if (msg->done >= msg->size) {
+ usb_free_urb(urb);
+ kfree(msg);
+ } else
+ line6_send_raw_message_async_part(msg, urb);
+}
+
+/*
+ Asynchronously send part of a raw message.
+*/
+static int line6_send_raw_message_async_part(struct message *msg,
+ struct urb *urb)
+{
+ int retval;
+ struct usb_line6 *line6 = msg->line6;
+ int done = msg->done;
+ int bytes = min(msg->size - done, line6->max_packet_size);
+
+ usb_fill_int_urb(urb, line6->usbdev,
+ usb_sndintpipe(line6->usbdev, line6->properties->ep_ctrl_w),
+ (char *)msg->buffer + done, bytes,
+ line6_async_request_sent, msg, line6->interval);
+
+ msg->done += bytes;
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (retval < 0) {
+ dev_err(line6->ifcdev, "%s: usb_submit_urb failed (%d)\n",
+ __func__, retval);
+ usb_free_urb(urb);
+ kfree(msg);
+ return retval;
+ }
+
+ return 0;
+}
+
+/*
+ Setup and start timer.
+*/
+void line6_start_timer(struct timer_list *timer, unsigned long msecs,
+ void (*function)(unsigned long), unsigned long data)
+{
+ setup_timer(timer, function, data);
+ mod_timer(timer, jiffies + msecs_to_jiffies(msecs));
+}
+EXPORT_SYMBOL_GPL(line6_start_timer);
+
+/*
+ Asynchronously send raw message.
+*/
+int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer,
+ int size)
+{
+ struct message *msg;
+ struct urb *urb;
+
+ /* create message: */
+ msg = kmalloc(sizeof(struct message), GFP_ATOMIC);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ /* create URB: */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+
+ if (urb == NULL) {
+ kfree(msg);
+ return -ENOMEM;
+ }
+
+ /* set message data: */
+ msg->line6 = line6;
+ msg->buffer = buffer;
+ msg->size = size;
+ msg->done = 0;
+
+ /* start sending: */
+ return line6_send_raw_message_async_part(msg, urb);
+}
+EXPORT_SYMBOL_GPL(line6_send_raw_message_async);
+
+/*
+ Send asynchronous device version request.
+*/
+int line6_version_request_async(struct usb_line6 *line6)
+{
+ char *buffer;
+ int retval;
+
+ buffer = kmemdup(line6_request_version,
+ sizeof(line6_request_version), GFP_ATOMIC);
+ if (buffer == NULL)
+ return -ENOMEM;
+
+ retval = line6_send_raw_message_async(line6, buffer,
+ sizeof(line6_request_version));
+ kfree(buffer);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(line6_version_request_async);
+
+/*
+ Send sysex message in pieces of wMaxPacketSize bytes.
+*/
+int line6_send_sysex_message(struct usb_line6 *line6, const char *buffer,
+ int size)
+{
+ return line6_send_raw_message(line6, buffer,
+ size + SYSEX_EXTRA_SIZE) -
+ SYSEX_EXTRA_SIZE;
+}
+EXPORT_SYMBOL_GPL(line6_send_sysex_message);
+
+/*
+ Allocate buffer for sysex message and prepare header.
+ @param code sysex message code
+ @param size number of bytes between code and sysex end
+*/
+char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, int code2,
+ int size)
+{
+ char *buffer = kmalloc(size + SYSEX_EXTRA_SIZE, GFP_ATOMIC);
+
+ if (!buffer)
+ return NULL;
+
+ buffer[0] = LINE6_SYSEX_BEGIN;
+ memcpy(buffer + 1, line6_midi_id, sizeof(line6_midi_id));
+ buffer[sizeof(line6_midi_id) + 1] = code1;
+ buffer[sizeof(line6_midi_id) + 2] = code2;
+ buffer[sizeof(line6_midi_id) + 3 + size] = LINE6_SYSEX_END;
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(line6_alloc_sysex_buffer);
+
+/*
+ Notification of data received from the Line 6 device.
+*/
+static void line6_data_received(struct urb *urb)
+{
+ struct usb_line6 *line6 = (struct usb_line6 *)urb->context;
+ struct midi_buffer *mb = &line6->line6midi->midibuf_in;
+ int done;
+
+ if (urb->status == -ESHUTDOWN)
+ return;
+
+ done =
+ line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length);
+
+ if (done < urb->actual_length) {
+ line6_midibuf_ignore(mb, done);
+ dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n",
+ done, urb->actual_length);
+ }
+
+ for (;;) {
+ done =
+ line6_midibuf_read(mb, line6->buffer_message,
+ LINE6_MESSAGE_MAXLEN);
+
+ if (done == 0)
+ break;
+
+ line6->message_length = done;
+ line6_midi_receive(line6, line6->buffer_message, done);
+
+ if (line6->process_message)
+ line6->process_message(line6);
+ }
+
+ line6_start_listen(line6);
+}
+
+#define LINE6_READ_WRITE_STATUS_DELAY 2 /* milliseconds */
+#define LINE6_READ_WRITE_MAX_RETRIES 50
+
+/*
+ Read data from device.
+*/
+int line6_read_data(struct usb_line6 *line6, int address, void *data,
+ size_t datalen)
+{
+ struct usb_device *usbdev = line6->usbdev;
+ int ret;
+ unsigned char len;
+ unsigned count;
+
+ /* query the serial number: */
+ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ (datalen << 8) | 0x21, address,
+ NULL, 0, LINE6_TIMEOUT * HZ);
+
+ if (ret < 0) {
+ dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
+ return ret;
+ }
+
+ /* Wait for data length. We'll get 0xff until length arrives. */
+ for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
+ mdelay(LINE6_READ_WRITE_STATUS_DELAY);
+
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_IN,
+ 0x0012, 0x0000, &len, 1,
+ LINE6_TIMEOUT * HZ);
+ if (ret < 0) {
+ dev_err(line6->ifcdev,
+ "receive length failed (error %d)\n", ret);
+ return ret;
+ }
+
+ if (len != 0xff)
+ break;
+ }
+
+ if (len == 0xff) {
+ dev_err(line6->ifcdev, "read failed after %d retries\n",
+ count);
+ return -EIO;
+ } else if (len != datalen) {
+ /* should be equal or something went wrong */
+ dev_err(line6->ifcdev,
+ "length mismatch (expected %d, got %d)\n",
+ (int)datalen, (int)len);
+ return -EIO;
+ }
+
+ /* receive the result: */
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0x0013, 0x0000, data, datalen,
+ LINE6_TIMEOUT * HZ);
+
+ if (ret < 0) {
+ dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(line6_read_data);
+
+/*
+ Write data to device.
+*/
+int line6_write_data(struct usb_line6 *line6, int address, void *data,
+ size_t datalen)
+{
+ struct usb_device *usbdev = line6->usbdev;
+ int ret;
+ unsigned char status;
+ int count;
+
+ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ 0x0022, address, data, datalen,
+ LINE6_TIMEOUT * HZ);
+
+ if (ret < 0) {
+ dev_err(line6->ifcdev,
+ "write request failed (error %d)\n", ret);
+ return ret;
+ }
+
+ for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
+ mdelay(LINE6_READ_WRITE_STATUS_DELAY);
+
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
+ 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_IN,
+ 0x0012, 0x0000,
+ &status, 1, LINE6_TIMEOUT * HZ);
+
+ if (ret < 0) {
+ dev_err(line6->ifcdev,
+ "receiving status failed (error %d)\n", ret);
+ return ret;
+ }
+
+ if (status != 0xff)
+ break;
+ }
+
+ if (status == 0xff) {
+ dev_err(line6->ifcdev, "write failed after %d retries\n",
+ count);
+ return -EIO;
+ } else if (status != 0) {
+ dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(line6_write_data);
+
+/*
+ Read Line 6 device serial number.
+ (POD, TonePort, GuitarPort)
+*/
+int line6_read_serial_number(struct usb_line6 *line6, u32 *serial_number)
+{
+ return line6_read_data(line6, 0x80d0, serial_number,
+ sizeof(*serial_number));
+}
+EXPORT_SYMBOL_GPL(line6_read_serial_number);
+
+/*
+ Card destructor.
+*/
+static void line6_destruct(struct snd_card *card)
+{
+ struct usb_line6 *line6 = card->private_data;
+ struct usb_device *usbdev = line6->usbdev;
+
+ /* free buffer memory first: */
+ kfree(line6->buffer_message);
+ kfree(line6->buffer_listen);
+
+ /* then free URBs: */
+ usb_free_urb(line6->urb_listen);
+
+ /* decrement reference counters: */
+ usb_put_dev(usbdev);
+}
+
+/* get data from endpoint descriptor (see usb_maxpacket): */
+static void line6_get_interval(struct usb_line6 *line6)
+{
+ struct usb_device *usbdev = line6->usbdev;
+ struct usb_host_endpoint *ep;
+ unsigned pipe = usb_rcvintpipe(usbdev, line6->properties->ep_ctrl_r);
+ unsigned epnum = usb_pipeendpoint(pipe);
+
+ ep = usbdev->ep_in[epnum];
+ if (ep) {
+ line6->interval = ep->desc.bInterval;
+ line6->max_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
+ } else {
+ dev_err(line6->ifcdev,
+ "endpoint not available, using fallback values");
+ line6->interval = LINE6_FALLBACK_INTERVAL;
+ line6->max_packet_size = LINE6_FALLBACK_MAXPACKETSIZE;
+ }
+}
+
+static int line6_init_cap_control(struct usb_line6 *line6)
+{
+ int ret;
+
+ /* initialize USB buffers: */
+ line6->buffer_listen = kmalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
+ if (!line6->buffer_listen)
+ return -ENOMEM;
+
+ line6->buffer_message = kmalloc(LINE6_MESSAGE_MAXLEN, GFP_KERNEL);
+ if (!line6->buffer_message)
+ return -ENOMEM;
+
+ line6->urb_listen = usb_alloc_urb(0, GFP_KERNEL);
+ if (!line6->urb_listen)
+ return -ENOMEM;
+
+ ret = line6_start_listen(line6);
+ if (ret < 0) {
+ dev_err(line6->ifcdev, "cannot start listening: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ Probe USB device.
+*/
+int line6_probe(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ const char *driver_name,
+ const struct line6_properties *properties,
+ int (*private_init)(struct usb_line6 *, const struct usb_device_id *id),
+ size_t data_size)
+{
+ struct usb_device *usbdev = interface_to_usbdev(interface);
+ struct snd_card *card;
+ struct usb_line6 *line6;
+ int interface_number;
+ int ret;
+
+ if (WARN_ON(data_size < sizeof(*line6)))
+ return -EINVAL;
+
+ /* we don't handle multiple configurations */
+ if (usbdev->descriptor.bNumConfigurations != 1)
+ return -ENODEV;
+
+ ret = snd_card_new(&interface->dev,
+ SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, data_size, &card);
+ if (ret < 0)
+ return ret;
+
+ /* store basic data: */
+ line6 = card->private_data;
+ line6->card = card;
+ line6->properties = properties;
+ line6->usbdev = usbdev;
+ line6->ifcdev = &interface->dev;
+
+ strcpy(card->id, properties->id);
+ strcpy(card->driver, driver_name);
+ strcpy(card->shortname, properties->name);
+ sprintf(card->longname, "Line 6 %s at USB %s", properties->name,
+ dev_name(line6->ifcdev));
+ card->private_free = line6_destruct;
+
+ usb_set_intfdata(interface, line6);
+
+ /* increment reference counters: */
+ usb_get_dev(usbdev);
+
+ /* initialize device info: */
+ dev_info(&interface->dev, "Line 6 %s found\n", properties->name);
+
+ /* query interface number */
+ interface_number = interface->cur_altsetting->desc.bInterfaceNumber;
+
+ ret = usb_set_interface(usbdev, interface_number,
+ properties->altsetting);
+ if (ret < 0) {
+ dev_err(&interface->dev, "set_interface failed\n");
+ goto error;
+ }
+
+ line6_get_interval(line6);
+
+ if (properties->capabilities & LINE6_CAP_CONTROL) {
+ ret = line6_init_cap_control(line6);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* initialize device data based on device: */
+ ret = private_init(line6, id);
+ if (ret < 0)
+ goto error;
+
+ /* creation of additional special files should go here */
+
+ dev_info(&interface->dev, "Line 6 %s now attached\n",
+ properties->name);
+
+ return 0;
+
+ error:
+ if (line6->disconnect)
+ line6->disconnect(line6);
+ snd_card_free(card);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(line6_probe);
+
+/*
+ Line 6 device disconnected.
+*/
+void line6_disconnect(struct usb_interface *interface)
+{
+ struct usb_line6 *line6 = usb_get_intfdata(interface);
+ struct usb_device *usbdev = interface_to_usbdev(interface);
+
+ if (!line6)
+ return;
+
+ if (WARN_ON(usbdev != line6->usbdev))
+ return;
+
+ if (line6->urb_listen != NULL)
+ line6_stop_listen(line6);
+
+ snd_card_disconnect(line6->card);
+ if (line6->line6pcm)
+ line6_pcm_disconnect(line6->line6pcm);
+ if (line6->disconnect)
+ line6->disconnect(line6);
+
+ dev_info(&interface->dev, "Line 6 %s now disconnected\n",
+ line6->properties->name);
+
+ /* make sure the device isn't destructed twice: */
+ usb_set_intfdata(interface, NULL);
+
+ snd_card_free_when_closed(line6->card);
+}
+EXPORT_SYMBOL_GPL(line6_disconnect);
+
+#ifdef CONFIG_PM
+
+/*
+ Suspend Line 6 device.
+*/
+int line6_suspend(struct usb_interface *interface, pm_message_t message)
+{
+ struct usb_line6 *line6 = usb_get_intfdata(interface);
+ struct snd_line6_pcm *line6pcm = line6->line6pcm;
+
+ snd_power_change_state(line6->card, SNDRV_CTL_POWER_D3hot);
+
+ if (line6->properties->capabilities & LINE6_CAP_CONTROL)
+ line6_stop_listen(line6);
+
+ if (line6pcm != NULL) {
+ snd_pcm_suspend_all(line6pcm->pcm);
+ line6pcm->flags = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(line6_suspend);
+
+/*
+ Resume Line 6 device.
+*/
+int line6_resume(struct usb_interface *interface)
+{
+ struct usb_line6 *line6 = usb_get_intfdata(interface);
+
+ if (line6->properties->capabilities & LINE6_CAP_CONTROL)
+ line6_start_listen(line6);
+
+ snd_power_change_state(line6->card, SNDRV_CTL_POWER_D0);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(line6_resume);
+
+#endif /* CONFIG_PM */
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/line6/driver.h b/sound/usb/line6/driver.h
index 16e3fc2f1f15..5d20294d64f4 100644
--- a/drivers/staging/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -18,14 +18,18 @@
#include "midi.h"
-#define DRIVER_NAME "line6usb"
+#define USB_INTERVALS_PER_SECOND 1000
+
+/* Fallback USB interval and max packet size values */
+#define LINE6_FALLBACK_INTERVAL 10
+#define LINE6_FALLBACK_MAXPACKETSIZE 16
#define LINE6_TIMEOUT 1
#define LINE6_BUFSIZE_LISTEN 32
#define LINE6_MESSAGE_MAXLEN 256
/*
- Line6 MIDI control commands
+ Line 6 MIDI control commands
*/
#define LINE6_PARAM_CHANGE 0xb0
#define LINE6_PROGRAM_CHANGE 0xc0
@@ -48,17 +52,6 @@
#define LINE6_CHANNEL_MASK 0x0f
-#define MISSING_CASE \
- pr_err("line6usb driver bug: missing case in %s:%d\n", \
- __FILE__, __LINE__)
-
-#define CHECK_RETURN(x) \
-do { \
- err = x; \
- if (err < 0) \
- return err; \
-} while (0)
-
#define CHECK_STARTUP_PROGRESS(x, n) \
do { \
if ((x) >= (n)) \
@@ -71,145 +64,118 @@ extern const unsigned char line6_midi_id[3];
static const int SYSEX_DATA_OFS = sizeof(line6_midi_id) + 3;
static const int SYSEX_EXTRA_SIZE = sizeof(line6_midi_id) + 4;
-/**
- Common properties of Line6 devices.
+/*
+ Common properties of Line 6 devices.
*/
struct line6_properties {
- /**
- Bit identifying this device in the line6usb driver.
- */
- int device_bit;
-
- /**
- Card id string (maximum 16 characters).
- This can be used to address the device in ALSA programs as
- "default:CARD=<id>"
- */
+ /* Card id string (maximum 16 characters).
+ * This can be used to address the device in ALSA programs as
+ * "default:CARD=<id>"
+ */
const char *id;
- /**
- Card short name (maximum 32 characters).
- */
+ /* Card short name (maximum 32 characters) */
const char *name;
- /**
- Bit vector defining this device's capabilities in the
- line6usb driver.
- */
+ /* Bit vector defining this device's capabilities in line6usb driver */
int capabilities;
+
+ int altsetting;
+
+ unsigned ep_ctrl_r;
+ unsigned ep_ctrl_w;
+ unsigned ep_audio_r;
+ unsigned ep_audio_w;
};
-/**
- Common data shared by all Line6 devices.
+/* Capability bits */
+enum {
+ /* device supports settings parameter via USB */
+ LINE6_CAP_CONTROL = 1 << 0,
+ /* device supports PCM input/output via USB */
+ LINE6_CAP_PCM = 1 << 1,
+ /* device support hardware monitoring */
+ LINE6_CAP_HWMON = 1 << 2,
+};
+
+/*
+ Common data shared by all Line 6 devices.
Corresponds to a pair of USB endpoints.
*/
struct usb_line6 {
- /**
- USB device.
- */
+ /* USB device */
struct usb_device *usbdev;
- /**
- Product id.
- */
- int product;
-
- /**
- Properties.
- */
+ /* Properties */
const struct line6_properties *properties;
- /**
- Interface number.
- */
- int interface_number;
-
- /**
- Interval (ms).
- */
+ /* Interval (ms) */
int interval;
- /**
- Maximum size of USB packet.
- */
+ /* Maximum size of USB packet */
int max_packet_size;
- /**
- Device representing the USB interface.
- */
+ /* Device representing the USB interface */
struct device *ifcdev;
- /**
- Line6 sound card data structure.
- Each device has at least MIDI or PCM.
- */
+ /* Line 6 sound card data structure.
+ * Each device has at least MIDI or PCM.
+ */
struct snd_card *card;
- /**
- Line6 PCM device data structure.
- */
+ /* Line 6 PCM device data structure */
struct snd_line6_pcm *line6pcm;
- /**
- Line6 MIDI device data structure.
- */
+ /* Line 6 MIDI device data structure */
struct snd_line6_midi *line6midi;
- /**
- USB endpoint for listening to control commands.
- */
- int ep_control_read;
-
- /**
- USB endpoint for writing control commands.
- */
- int ep_control_write;
-
- /**
- URB for listening to PODxt Pro control endpoint.
- */
+ /* URB for listening to PODxt Pro control endpoint */
struct urb *urb_listen;
- /**
- Buffer for listening to PODxt Pro control endpoint.
- */
+ /* Buffer for listening to PODxt Pro control endpoint */
unsigned char *buffer_listen;
- /**
- Buffer for message to be processed.
- */
+ /* Buffer for message to be processed */
unsigned char *buffer_message;
- /**
- Length of message to be processed.
- */
+ /* Length of message to be processed */
int message_length;
+
+ void (*process_message)(struct usb_line6 *);
+ void (*disconnect)(struct usb_line6 *line6);
};
extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1,
int code2, int size);
-extern ssize_t line6_nop_read(struct device *dev,
- struct device_attribute *attr, char *buf);
extern int line6_read_data(struct usb_line6 *line6, int address, void *data,
size_t datalen);
extern int line6_read_serial_number(struct usb_line6 *line6,
- int *serial_number);
-extern int line6_send_program(struct usb_line6 *line6, u8 value);
-extern int line6_send_raw_message(struct usb_line6 *line6, const char *buffer,
- int size);
+ u32 *serial_number);
extern int line6_send_raw_message_async(struct usb_line6 *line6,
const char *buffer, int size);
extern int line6_send_sysex_message(struct usb_line6 *line6,
const char *buffer, int size);
extern ssize_t line6_set_raw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
-extern void line6_start_timer(struct timer_list *timer, unsigned int msecs,
+extern void line6_start_timer(struct timer_list *timer, unsigned long msecs,
void (*function)(unsigned long),
unsigned long data);
-extern int line6_transmit_parameter(struct usb_line6 *line6, int param,
- u8 value);
extern int line6_version_request_async(struct usb_line6 *line6);
extern int line6_write_data(struct usb_line6 *line6, int address, void *data,
size_t datalen);
+int line6_probe(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ const char *driver_name,
+ const struct line6_properties *properties,
+ int (*private_init)(struct usb_line6 *, const struct usb_device_id *id),
+ size_t data_size);
+
+void line6_disconnect(struct usb_interface *interface);
+
+#ifdef CONFIG_PM
+int line6_suspend(struct usb_interface *interface, pm_message_t message);
+int line6_resume(struct usb_interface *interface);
+#endif
+
#endif
diff --git a/drivers/staging/line6/midi.c b/sound/usb/line6/midi.c
index 1ac343b649c1..cebea9b7f769 100644
--- a/drivers/staging/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -11,14 +11,12 @@
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/export.h>
#include <sound/core.h>
#include <sound/rawmidi.h>
-#include "audio.h"
#include "driver.h"
#include "midi.h"
-#include "pod.h"
-#include "usbdefs.h"
#define line6_rawmidi_substream_midi(substream) \
((struct snd_line6_midi *)((substream)->rmidi->private_data))
@@ -46,12 +44,9 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
line6_rawmidi_substream_midi(substream)->line6;
struct snd_line6_midi *line6midi = line6->line6midi;
struct midi_buffer *mb = &line6midi->midibuf_out;
- unsigned long flags;
unsigned char chunk[LINE6_FALLBACK_MAXPACKETSIZE];
int req, done;
- spin_lock_irqsave(&line6->line6midi->midi_transmit_lock, flags);
-
for (;;) {
req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
done = snd_rawmidi_transmit_peek(substream, chunk, req);
@@ -72,8 +67,6 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
send_midi_async(line6, chunk, done);
}
-
- spin_unlock_irqrestore(&line6->line6midi->midi_transmit_lock, flags);
}
/*
@@ -93,7 +86,7 @@ static void midi_sent(struct urb *urb)
if (status == -ESHUTDOWN)
return;
- spin_lock_irqsave(&line6->line6midi->send_urb_lock, flags);
+ spin_lock_irqsave(&line6->line6midi->lock, flags);
num = --line6->line6midi->num_active_send_urbs;
if (num == 0) {
@@ -104,12 +97,12 @@ static void midi_sent(struct urb *urb)
if (num == 0)
wake_up(&line6->line6midi->send_wait);
- spin_unlock_irqrestore(&line6->line6midi->send_urb_lock, flags);
+ spin_unlock_irqrestore(&line6->line6midi->lock, flags);
}
/*
Send an asynchronous MIDI message.
- Assumes that line6->line6midi->send_urb_lock is held
+ Assumes that line6->line6midi->lock is held
(i.e., this function is serialized).
*/
static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
@@ -121,22 +114,19 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urb == NULL) {
- dev_err(line6->ifcdev, "Out of memory\n");
+ if (urb == NULL)
return -ENOMEM;
- }
transfer_buffer = kmemdup(data, length, GFP_ATOMIC);
if (transfer_buffer == NULL) {
usb_free_urb(urb);
- dev_err(line6->ifcdev, "Out of memory\n");
return -ENOMEM;
}
usb_fill_int_urb(urb, line6->usbdev,
usb_sndbulkpipe(line6->usbdev,
- line6->ep_control_write),
+ line6->properties->ep_ctrl_w),
transfer_buffer, length, midi_sent, line6,
line6->interval);
urb->actual_length = 0;
@@ -170,12 +160,12 @@ static void line6_midi_output_trigger(struct snd_rawmidi_substream *substream,
line6_rawmidi_substream_midi(substream)->line6;
line6->line6midi->substream_transmit = substream;
- spin_lock_irqsave(&line6->line6midi->send_urb_lock, flags);
+ spin_lock_irqsave(&line6->line6midi->lock, flags);
if (line6->line6midi->num_active_send_urbs == 0)
line6_midi_transmit(substream);
- spin_unlock_irqrestore(&line6->line6midi->send_urb_lock, flags);
+ spin_unlock_irqrestore(&line6->line6midi->lock, flags);
}
static void line6_midi_output_drain(struct snd_rawmidi_substream *substream)
@@ -223,28 +213,20 @@ static struct snd_rawmidi_ops line6_midi_input_ops = {
.trigger = line6_midi_input_trigger,
};
-/*
- Cleanup the Line6 MIDI device.
-*/
-static void line6_cleanup_midi(struct snd_rawmidi *rmidi)
-{
-}
-
/* Create a MIDI device */
-static int snd_line6_new_midi(struct snd_line6_midi *line6midi)
+static int snd_line6_new_midi(struct usb_line6 *line6,
+ struct snd_rawmidi **rmidi_ret)
{
struct snd_rawmidi *rmidi;
int err;
- err = snd_rawmidi_new(line6midi->line6->card, "Line6 MIDI", 0, 1, 1,
- &rmidi);
+ err = snd_rawmidi_new(line6->card, "Line 6 MIDI", 0, 1, 1, rmidi_ret);
if (err < 0)
return err;
- rmidi->private_data = line6midi;
- rmidi->private_free = line6_cleanup_midi;
- strcpy(rmidi->id, line6midi->line6->properties->id);
- strcpy(rmidi->name, line6midi->line6->properties->name);
+ rmidi = *rmidi_ret;
+ strcpy(rmidi->id, line6->properties->id);
+ strcpy(rmidi->name, line6->properties->name);
rmidi->info_flags =
SNDRV_RAWMIDI_INFO_OUTPUT |
@@ -258,64 +240,53 @@ static int snd_line6_new_midi(struct snd_line6_midi *line6midi)
}
/* MIDI device destructor */
-static int snd_line6_midi_free(struct snd_device *device)
+static void snd_line6_midi_free(struct snd_rawmidi *rmidi)
{
- struct snd_line6_midi *line6midi = device->device_data;
+ struct snd_line6_midi *line6midi = rmidi->private_data;
line6_midibuf_destroy(&line6midi->midibuf_in);
line6_midibuf_destroy(&line6midi->midibuf_out);
- return 0;
+ kfree(line6midi);
}
/*
- Initialize the Line6 MIDI subsystem.
+ Initialize the Line 6 MIDI subsystem.
*/
int line6_init_midi(struct usb_line6 *line6)
{
- static struct snd_device_ops midi_ops = {
- .dev_free = snd_line6_midi_free,
- };
-
int err;
+ struct snd_rawmidi *rmidi;
struct snd_line6_midi *line6midi;
- if (!(line6->properties->capabilities & LINE6_BIT_CONTROL)) {
+ if (!(line6->properties->capabilities & LINE6_CAP_CONTROL)) {
/* skip MIDI initialization and report success */
return 0;
}
- line6midi = kzalloc(sizeof(struct snd_line6_midi), GFP_KERNEL);
+ err = snd_line6_new_midi(line6, &rmidi);
+ if (err < 0)
+ return err;
- if (line6midi == NULL)
+ line6midi = kzalloc(sizeof(struct snd_line6_midi), GFP_KERNEL);
+ if (!line6midi)
return -ENOMEM;
- err = line6_midibuf_init(&line6midi->midibuf_in, MIDI_BUFFER_SIZE, 0);
- if (err < 0) {
- kfree(line6midi);
- return err;
- }
-
- err = line6_midibuf_init(&line6midi->midibuf_out, MIDI_BUFFER_SIZE, 1);
- if (err < 0) {
- kfree(line6midi->midibuf_in.buf);
- kfree(line6midi);
- return err;
- }
+ rmidi->private_data = line6midi;
+ rmidi->private_free = snd_line6_midi_free;
+ init_waitqueue_head(&line6midi->send_wait);
+ spin_lock_init(&line6midi->lock);
line6midi->line6 = line6;
- line6->line6midi = line6midi;
- err = snd_device_new(line6->card, SNDRV_DEV_RAWMIDI, line6midi,
- &midi_ops);
+ err = line6_midibuf_init(&line6midi->midibuf_in, MIDI_BUFFER_SIZE, 0);
if (err < 0)
return err;
- err = snd_line6_new_midi(line6midi);
+ err = line6_midibuf_init(&line6midi->midibuf_out, MIDI_BUFFER_SIZE, 1);
if (err < 0)
return err;
- init_waitqueue_head(&line6midi->send_wait);
- spin_lock_init(&line6midi->send_urb_lock);
- spin_lock_init(&line6midi->midi_transmit_lock);
+ line6->line6midi = line6midi;
return 0;
}
+EXPORT_SYMBOL_GPL(line6_init_midi);
diff --git a/drivers/staging/line6/midi.h b/sound/usb/line6/midi.h
index 78f903fb4d41..cf82d69e2747 100644
--- a/drivers/staging/line6/midi.h
+++ b/sound/usb/line6/midi.h
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -19,49 +19,28 @@
#define MIDI_BUFFER_SIZE 1024
struct snd_line6_midi {
- /**
- Pointer back to the Line6 driver data structure.
- */
+ /* Pointer back to the Line 6 driver data structure */
struct usb_line6 *line6;
- /**
- MIDI substream for receiving (or NULL if not active).
- */
+ /* MIDI substream for receiving (or NULL if not active) */
struct snd_rawmidi_substream *substream_receive;
- /**
- MIDI substream for transmitting (or NULL if not active).
- */
+ /* MIDI substream for transmitting (or NULL if not active) */
struct snd_rawmidi_substream *substream_transmit;
- /**
- Number of currently active MIDI send URBs.
- */
+ /* Number of currently active MIDI send URBs */
int num_active_send_urbs;
- /**
- Spin lock to protect updates of send_urb.
- */
- spinlock_t send_urb_lock;
+ /* Spin lock to protect MIDI buffer handling */
+ spinlock_t lock;
- /**
- Spin lock to protect MIDI buffer handling.
- */
- spinlock_t midi_transmit_lock;
-
- /**
- Wait queue for MIDI transmission.
- */
+ /* Wait queue for MIDI transmission */
wait_queue_head_t send_wait;
- /**
- Buffer for incoming MIDI stream.
- */
+ /* Buffer for incoming MIDI stream */
struct midi_buffer midibuf_in;
- /**
- Buffer for outgoing MIDI stream.
- */
+ /* Buffer for outgoing MIDI stream */
struct midi_buffer midibuf_out;
};
diff --git a/drivers/staging/line6/midibuf.c b/sound/usb/line6/midibuf.c
index 1ff856989fd6..36a610ba342e 100644
--- a/drivers/staging/line6/midibuf.c
+++ b/sound/usb/line6/midibuf.c
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -26,7 +26,7 @@ static int midibuf_message_length(unsigned char code)
} else {
/*
Note that according to the MIDI specification 0xf2 is
- the "Song Position Pointer", but this is used by Line6
+ the "Song Position Pointer", but this is used by Line 6
to send sysex messages to the host.
*/
static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1,
@@ -67,13 +67,6 @@ int line6_midibuf_init(struct midi_buffer *this, int size, int split)
return 0;
}
-void line6_midibuf_status(struct midi_buffer *this)
-{
- pr_debug("midibuf size=%d split=%d pos_read=%d pos_write=%d full=%d command_prev=%02x\n",
- this->size, this->split, this->pos_read, this->pos_write,
- this->full, this->command_prev);
-}
-
int line6_midibuf_bytes_free(struct midi_buffer *this)
{
return
@@ -252,17 +245,6 @@ int line6_midibuf_ignore(struct midi_buffer *this, int length)
return length;
}
-int line6_midibuf_skip_message(struct midi_buffer *this, unsigned short mask)
-{
- int cmd = this->command_prev;
-
- if ((cmd >= 0x80) && (cmd < 0xf0))
- if ((mask & (1 << (cmd & 0x0f))) == 0)
- return 1;
-
- return 0;
-}
-
void line6_midibuf_destroy(struct midi_buffer *this)
{
kfree(this->buf);
diff --git a/drivers/staging/line6/midibuf.h b/sound/usb/line6/midibuf.h
index 707482b940e4..6ea21ffb6627 100644
--- a/drivers/staging/line6/midibuf.h
+++ b/sound/usb/line6/midibuf.h
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -29,9 +29,6 @@ extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split);
extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data,
int length);
extern void line6_midibuf_reset(struct midi_buffer *mb);
-extern int line6_midibuf_skip_message(struct midi_buffer *mb,
- unsigned short mask);
-extern void line6_midibuf_status(struct midi_buffer *mb);
extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data,
int length);
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
new file mode 100644
index 000000000000..8461d6bf992f
--- /dev/null
+++ b/sound/usb/line6/pcm.c
@@ -0,0 +1,588 @@
+/*
+ * Line 6 Linux USB driver
+ *
+ * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "capture.h"
+#include "driver.h"
+#include "playback.h"
+
+/* impulse response volume controls */
+static int snd_line6_impulse_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 255;
+ return 0;
+}
+
+static int snd_line6_impulse_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = line6pcm->impulse_volume;
+ return 0;
+}
+
+static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
+ int value = ucontrol->value.integer.value[0];
+ int err;
+
+ if (line6pcm->impulse_volume == value)
+ return 0;
+
+ line6pcm->impulse_volume = value;
+ if (value > 0) {
+ err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
+ if (err < 0) {
+ line6pcm->impulse_volume = 0;
+ line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
+ return err;
+ }
+ } else {
+ line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
+ }
+ return 1;
+}
+
+/* impulse response period controls */
+static int snd_line6_impulse_period_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 2000;
+ return 0;
+}
+
+static int snd_line6_impulse_period_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = line6pcm->impulse_period;
+ return 0;
+}
+
+static int snd_line6_impulse_period_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
+ int value = ucontrol->value.integer.value[0];
+
+ if (line6pcm->impulse_period == value)
+ return 0;
+
+ line6pcm->impulse_period = value;
+ return 1;
+}
+
+/*
+ Unlink all currently active URBs.
+*/
+static void line6_unlink_audio_urbs(struct snd_line6_pcm *line6pcm,
+ struct line6_pcm_stream *pcms)
+{
+ int i;
+
+ for (i = 0; i < LINE6_ISO_BUFFERS; i++) {
+ if (test_bit(i, &pcms->active_urbs)) {
+ if (!test_and_set_bit(i, &pcms->unlink_urbs))
+ usb_unlink_urb(pcms->urbs[i]);
+ }
+ }
+}
+
+/*
+ Wait until unlinking of all currently active URBs has been finished.
+*/
+static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
+ struct line6_pcm_stream *pcms)
+{
+ int timeout = HZ;
+ int i;
+ int alive;
+
+ do {
+ alive = 0;
+ for (i = 0; i < LINE6_ISO_BUFFERS; i++) {
+ if (test_bit(i, &pcms->active_urbs))
+ alive++;
+ }
+ if (!alive)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ } while (--timeout > 0);
+ if (alive)
+ dev_err(line6pcm->line6->ifcdev,
+ "timeout: still %d active urbs..\n", alive);
+}
+
+static inline struct line6_pcm_stream *
+get_stream(struct snd_line6_pcm *line6pcm, int direction)
+{
+ return (direction == SNDRV_PCM_STREAM_PLAYBACK) ?
+ &line6pcm->out : &line6pcm->in;
+}
+
+/* allocate a buffer if not opened yet;
+ * call this in line6pcm.state_change mutex
+ */
+static int line6_buffer_acquire(struct snd_line6_pcm *line6pcm,
+ struct line6_pcm_stream *pstr, int type)
+{
+ /* Invoked multiple times in a row so allocate once only */
+ if (!test_and_set_bit(type, &pstr->opened) && !pstr->buffer) {
+ pstr->buffer = kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+ if (!pstr->buffer)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* free a buffer if all streams are closed;
+ * call this in line6pcm.state_change mutex
+ */
+static void line6_buffer_release(struct snd_line6_pcm *line6pcm,
+ struct line6_pcm_stream *pstr, int type)
+{
+
+ clear_bit(type, &pstr->opened);
+ if (!pstr->opened) {
+ line6_wait_clear_audio_urbs(line6pcm, pstr);
+ kfree(pstr->buffer);
+ pstr->buffer = NULL;
+ }
+}
+
+/* start a PCM stream */
+static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
+ int type)
+{
+ unsigned long flags;
+ struct line6_pcm_stream *pstr = get_stream(line6pcm, direction);
+ int ret = 0;
+
+ spin_lock_irqsave(&pstr->lock, flags);
+ if (!test_and_set_bit(type, &pstr->running)) {
+ if (pstr->active_urbs || pstr->unlink_urbs) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ pstr->count = 0;
+ /* Submit all currently available URBs */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = line6_submit_audio_out_all_urbs(line6pcm);
+ else
+ ret = line6_submit_audio_in_all_urbs(line6pcm);
+ }
+ error:
+ if (ret < 0)
+ clear_bit(type, &pstr->running);
+ spin_unlock_irqrestore(&pstr->lock, flags);
+ return ret;
+}
+
+/* stop a PCM stream; this doesn't sync with the unlinked URBs */
+static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction,
+ int type)
+{
+ unsigned long flags;
+ struct line6_pcm_stream *pstr = get_stream(line6pcm, direction);
+
+ spin_lock_irqsave(&pstr->lock, flags);
+ clear_bit(type, &pstr->running);
+ if (!pstr->running) {
+ line6_unlink_audio_urbs(line6pcm, pstr);
+ if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+ line6pcm->prev_fbuf = NULL;
+ line6pcm->prev_fsize = 0;
+ }
+ }
+ spin_unlock_irqrestore(&pstr->lock, flags);
+}
+
+/* common PCM trigger callback */
+int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+ struct snd_pcm_substream *s;
+ int err;
+
+ clear_bit(LINE6_FLAG_PREPARED, &line6pcm->flags);
+
+ snd_pcm_group_for_each_entry(s, substream) {
+ if (s->pcm->card != substream->pcm->card)
+ continue;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ err = line6_stream_start(line6pcm, s->stream,
+ LINE6_STREAM_PCM);
+ if (err < 0)
+ return err;
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ line6_stream_stop(line6pcm, s->stream,
+ LINE6_STREAM_PCM);
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
+ return -EINVAL;
+ set_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags);
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
+ return -EINVAL;
+ clear_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* common PCM pointer callback */
+snd_pcm_uframes_t snd_line6_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+ struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream);
+
+ return pstr->pos_done;
+}
+
+/* Acquire and start duplex streams:
+ * type is either LINE6_STREAM_IMPULSE or LINE6_STREAM_MONITOR
+ */
+int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type)
+{
+ struct line6_pcm_stream *pstr;
+ int ret = 0, dir;
+
+ mutex_lock(&line6pcm->state_mutex);
+ for (dir = 0; dir < 2; dir++) {
+ pstr = get_stream(line6pcm, dir);
+ ret = line6_buffer_acquire(line6pcm, pstr, type);
+ if (ret < 0)
+ goto error;
+ if (!pstr->running)
+ line6_wait_clear_audio_urbs(line6pcm, pstr);
+ }
+ for (dir = 0; dir < 2; dir++) {
+ ret = line6_stream_start(line6pcm, dir, type);
+ if (ret < 0)
+ goto error;
+ }
+ error:
+ mutex_unlock(&line6pcm->state_mutex);
+ if (ret < 0)
+ line6_pcm_release(line6pcm, type);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(line6_pcm_acquire);
+
+/* Stop and release duplex streams */
+void line6_pcm_release(struct snd_line6_pcm *line6pcm, int type)
+{
+ struct line6_pcm_stream *pstr;
+ int dir;
+
+ mutex_lock(&line6pcm->state_mutex);
+ for (dir = 0; dir < 2; dir++)
+ line6_stream_stop(line6pcm, dir, type);
+ for (dir = 0; dir < 2; dir++) {
+ pstr = get_stream(line6pcm, dir);
+ line6_buffer_release(line6pcm, pstr, type);
+ }
+ mutex_unlock(&line6pcm->state_mutex);
+}
+EXPORT_SYMBOL_GPL(line6_pcm_release);
+
+/* common PCM hw_params callback */
+int snd_line6_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ int ret;
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+ struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream);
+
+ mutex_lock(&line6pcm->state_mutex);
+ ret = line6_buffer_acquire(line6pcm, pstr, LINE6_STREAM_PCM);
+ if (ret < 0)
+ goto error;
+
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+ if (ret < 0) {
+ line6_buffer_release(line6pcm, pstr, LINE6_STREAM_PCM);
+ goto error;
+ }
+
+ pstr->period = params_period_bytes(hw_params);
+ error:
+ mutex_unlock(&line6pcm->state_mutex);
+ return ret;
+}
+
+/* common PCM hw_free callback */
+int snd_line6_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+ struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream);
+
+ mutex_lock(&line6pcm->state_mutex);
+ line6_buffer_release(line6pcm, pstr, LINE6_STREAM_PCM);
+ mutex_unlock(&line6pcm->state_mutex);
+ return snd_pcm_lib_free_pages(substream);
+}
+
+
+/* control info callback */
+static int snd_line6_control_playback_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 2;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 256;
+ return 0;
+}
+
+/* control get callback */
+static int snd_line6_control_playback_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int i;
+ struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
+
+ for (i = 0; i < 2; i++)
+ ucontrol->value.integer.value[i] = line6pcm->volume_playback[i];
+
+ return 0;
+}
+
+/* control put callback */
+static int snd_line6_control_playback_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int i, changed = 0;
+ struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
+
+ for (i = 0; i < 2; i++)
+ if (line6pcm->volume_playback[i] !=
+ ucontrol->value.integer.value[i]) {
+ line6pcm->volume_playback[i] =
+ ucontrol->value.integer.value[i];
+ changed = 1;
+ }
+
+ return changed;
+}
+
+/* control definition */
+static struct snd_kcontrol_new line6_controls[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "PCM Playback Volume",
+ .info = snd_line6_control_playback_info,
+ .get = snd_line6_control_playback_get,
+ .put = snd_line6_control_playback_put
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Impulse Response Volume",
+ .info = snd_line6_impulse_volume_info,
+ .get = snd_line6_impulse_volume_get,
+ .put = snd_line6_impulse_volume_put
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Impulse Response Period",
+ .info = snd_line6_impulse_period_info,
+ .get = snd_line6_impulse_period_get,
+ .put = snd_line6_impulse_period_put
+ },
+};
+
+/*
+ Cleanup the PCM device.
+*/
+static void cleanup_urbs(struct line6_pcm_stream *pcms)
+{
+ int i;
+
+ for (i = 0; i < LINE6_ISO_BUFFERS; i++) {
+ if (pcms->urbs[i]) {
+ usb_kill_urb(pcms->urbs[i]);
+ usb_free_urb(pcms->urbs[i]);
+ }
+ }
+}
+
+static void line6_cleanup_pcm(struct snd_pcm *pcm)
+{
+ struct snd_line6_pcm *line6pcm = snd_pcm_chip(pcm);
+
+ cleanup_urbs(&line6pcm->out);
+ cleanup_urbs(&line6pcm->in);
+ kfree(line6pcm);
+}
+
+/* create a PCM device */
+static int snd_line6_new_pcm(struct usb_line6 *line6, struct snd_pcm **pcm_ret)
+{
+ struct snd_pcm *pcm;
+ int err;
+
+ err = snd_pcm_new(line6->card, (char *)line6->properties->name,
+ 0, 1, 1, pcm_ret);
+ if (err < 0)
+ return err;
+ pcm = *pcm_ret;
+ strcpy(pcm->name, line6->properties->name);
+
+ /* set operators */
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_line6_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_line6_capture_ops);
+
+ /* pre-allocation of buffers */
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data
+ (GFP_KERNEL), 64 * 1024,
+ 128 * 1024);
+ return 0;
+}
+
+/*
+ Sync with PCM stream stops.
+*/
+void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm)
+{
+ line6_unlink_audio_urbs(line6pcm, &line6pcm->out);
+ line6_unlink_audio_urbs(line6pcm, &line6pcm->in);
+ line6_wait_clear_audio_urbs(line6pcm, &line6pcm->out);
+ line6_wait_clear_audio_urbs(line6pcm, &line6pcm->in);
+}
+
+/*
+ Create and register the PCM device and mixer entries.
+ Create URBs for playback and capture.
+*/
+int line6_init_pcm(struct usb_line6 *line6,
+ struct line6_pcm_properties *properties)
+{
+ int i, err;
+ unsigned ep_read = line6->properties->ep_audio_r;
+ unsigned ep_write = line6->properties->ep_audio_w;
+ struct snd_pcm *pcm;
+ struct snd_line6_pcm *line6pcm;
+
+ if (!(line6->properties->capabilities & LINE6_CAP_PCM))
+ return 0; /* skip PCM initialization and report success */
+
+ err = snd_line6_new_pcm(line6, &pcm);
+ if (err < 0)
+ return err;
+
+ line6pcm = kzalloc(sizeof(*line6pcm), GFP_KERNEL);
+ if (!line6pcm)
+ return -ENOMEM;
+
+ mutex_init(&line6pcm->state_mutex);
+ line6pcm->pcm = pcm;
+ line6pcm->properties = properties;
+ line6pcm->volume_playback[0] = line6pcm->volume_playback[1] = 255;
+ line6pcm->volume_monitor = 255;
+ line6pcm->line6 = line6;
+
+ /* Read and write buffers are sized identically, so choose minimum */
+ line6pcm->max_packet_size = min(
+ usb_maxpacket(line6->usbdev,
+ usb_rcvisocpipe(line6->usbdev, ep_read), 0),
+ usb_maxpacket(line6->usbdev,
+ usb_sndisocpipe(line6->usbdev, ep_write), 1));
+
+ spin_lock_init(&line6pcm->out.lock);
+ spin_lock_init(&line6pcm->in.lock);
+ line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
+
+ line6->line6pcm = line6pcm;
+
+ pcm->private_data = line6pcm;
+ pcm->private_free = line6_cleanup_pcm;
+
+ err = line6_create_audio_out_urbs(line6pcm);
+ if (err < 0)
+ return err;
+
+ err = line6_create_audio_in_urbs(line6pcm);
+ if (err < 0)
+ return err;
+
+ /* mixer: */
+ for (i = 0; i < ARRAY_SIZE(line6_controls); i++) {
+ err = snd_ctl_add(line6->card,
+ snd_ctl_new1(&line6_controls[i], line6pcm));
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(line6_init_pcm);
+
+/* prepare pcm callback */
+int snd_line6_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+ struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream);
+
+ mutex_lock(&line6pcm->state_mutex);
+ if (!pstr->running)
+ line6_wait_clear_audio_urbs(line6pcm, pstr);
+
+ if (!test_and_set_bit(LINE6_FLAG_PREPARED, &line6pcm->flags)) {
+ line6pcm->out.count = 0;
+ line6pcm->out.pos = 0;
+ line6pcm->out.pos_done = 0;
+ line6pcm->out.bytes = 0;
+ line6pcm->in.count = 0;
+ line6pcm->in.pos_done = 0;
+ line6pcm->in.bytes = 0;
+ }
+
+ mutex_unlock(&line6pcm->state_mutex);
+ return 0;
+}
diff --git a/sound/usb/line6/pcm.h b/sound/usb/line6/pcm.h
new file mode 100644
index 000000000000..508410adbd51
--- /dev/null
+++ b/sound/usb/line6/pcm.h
@@ -0,0 +1,197 @@
+/*
+ * Line 6 Linux USB driver
+ *
+ * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+/*
+ PCM interface to POD series devices.
+*/
+
+#ifndef PCM_H
+#define PCM_H
+
+#include <sound/pcm.h>
+
+#include "driver.h"
+
+/* number of URBs */
+#define LINE6_ISO_BUFFERS 2
+
+/*
+ number of USB frames per URB
+ The Line 6 Windows driver always transmits two frames per packet, but
+ the Linux driver performs significantly better (i.e., lower latency)
+ with only one frame per packet.
+*/
+#define LINE6_ISO_PACKETS 1
+
+/* in a "full speed" device (such as the PODxt Pro) this means 1ms */
+#define LINE6_ISO_INTERVAL 1
+
+#define LINE6_IMPULSE_DEFAULT_PERIOD 100
+
+/*
+ Get substream from Line 6 PCM data structure
+*/
+#define get_substream(line6pcm, stream) \
+ (line6pcm->pcm->streams[stream].substream)
+
+/*
+ PCM mode bits.
+
+ There are several features of the Line 6 USB driver which require PCM
+ data to be exchanged with the device:
+ *) PCM playback and capture via ALSA
+ *) software monitoring (for devices without hardware monitoring)
+ *) optional impulse response measurement
+ However, from the device's point of view, there is just a single
+ capture and playback stream, which must be shared between these
+ subsystems. It is therefore necessary to maintain the state of the
+ subsystems with respect to PCM usage.
+
+ We define two bit flags, "opened" and "running", for each playback
+ or capture stream. Both can contain the bit flag corresponding to
+ LINE6_STREAM_* type,
+ LINE6_STREAM_PCM = ALSA PCM playback or capture
+ LINE6_STREAM_MONITOR = software monitoring
+ IMPULSE = optional impulse response measurement
+ The opened flag indicates whether the buffer is allocated while
+ the running flag indicates whether the stream is running.
+
+ For monitor or impulse operations, the driver needs to call
+ line6_pcm_acquire() or line6_pcm_release() with the appropriate
+ LINE6_STREAM_* flag.
+*/
+
+/* stream types */
+enum {
+ LINE6_STREAM_PCM,
+ LINE6_STREAM_MONITOR,
+ LINE6_STREAM_IMPULSE,
+};
+
+/* misc bit flags for PCM operation */
+enum {
+ LINE6_FLAG_PAUSE_PLAYBACK,
+ LINE6_FLAG_PREPARED,
+};
+
+struct line6_pcm_properties {
+ struct snd_pcm_hardware playback_hw, capture_hw;
+ struct snd_pcm_hw_constraint_ratdens rates;
+ int bytes_per_frame;
+};
+
+struct line6_pcm_stream {
+ /* allocated URBs */
+ struct urb *urbs[LINE6_ISO_BUFFERS];
+
+ /* Temporary buffer;
+ * Since the packet size is not known in advance, this buffer is
+ * large enough to store maximum size packets.
+ */
+ unsigned char *buffer;
+
+ /* Free frame position in the buffer. */
+ snd_pcm_uframes_t pos;
+
+ /* Count processed bytes;
+ * This is modulo period size (to determine when a period is finished).
+ */
+ unsigned bytes;
+
+ /* Counter to create desired sample rate */
+ unsigned count;
+
+ /* period size in bytes */
+ unsigned period;
+
+ /* Processed frame position in the buffer;
+ * The contents of the ring buffer have been consumed by the USB
+ * subsystem (i.e., sent to the USB device) up to this position.
+ */
+ snd_pcm_uframes_t pos_done;
+
+ /* Bit mask of active URBs */
+ unsigned long active_urbs;
+
+ /* Bit mask of URBs currently being unlinked */
+ unsigned long unlink_urbs;
+
+ /* Spin lock to protect updates of the buffer positions (not contents)
+ */
+ spinlock_t lock;
+
+ /* Bit flags for operational stream types */
+ unsigned long opened;
+
+ /* Bit flags for running stream types */
+ unsigned long running;
+
+ int last_frame;
+};
+
+struct snd_line6_pcm {
+ /* Pointer back to the Line 6 driver data structure */
+ struct usb_line6 *line6;
+
+ /* Properties. */
+ struct line6_pcm_properties *properties;
+
+ /* ALSA pcm stream */
+ struct snd_pcm *pcm;
+
+ /* protection to state changes of in/out streams */
+ struct mutex state_mutex;
+
+ /* Capture and playback streams */
+ struct line6_pcm_stream in;
+ struct line6_pcm_stream out;
+
+ /* Previously captured frame (for software monitoring) */
+ unsigned char *prev_fbuf;
+
+ /* Size of previously captured frame (for software monitoring) */
+ int prev_fsize;
+
+ /* Maximum size of USB packet */
+ int max_packet_size;
+
+ /* PCM playback volume (left and right) */
+ int volume_playback[2];
+
+ /* PCM monitor volume */
+ int volume_monitor;
+
+ /* Volume of impulse response test signal (if zero, test is disabled) */
+ int impulse_volume;
+
+ /* Period of impulse response test signal */
+ int impulse_period;
+
+ /* Counter for impulse response test signal */
+ int impulse_count;
+
+ /* Several status bits (see LINE6_FLAG_*) */
+ unsigned long flags;
+};
+
+extern int line6_init_pcm(struct usb_line6 *line6,
+ struct line6_pcm_properties *properties);
+extern int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd);
+extern int snd_line6_prepare(struct snd_pcm_substream *substream);
+extern int snd_line6_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params);
+extern int snd_line6_hw_free(struct snd_pcm_substream *substream);
+extern snd_pcm_uframes_t snd_line6_pointer(struct snd_pcm_substream *substream);
+extern void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm);
+extern int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type);
+extern void line6_pcm_release(struct snd_line6_pcm *line6pcm, int type);
+
+#endif
diff --git a/drivers/staging/line6/playback.c b/sound/usb/line6/playback.c
index 2ca8900e68c3..05dee690f487 100644
--- a/drivers/staging/line6/playback.c
+++ b/sound/usb/line6/playback.c
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -14,11 +14,9 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
-#include "audio.h"
#include "capture.h"
#include "driver.h"
#include "pcm.h"
-#include "pod.h"
#include "playback.h"
/*
@@ -33,13 +31,16 @@ static void change_volume(struct urb *urb_out, int volume[],
return; /* maximum volume - no change */
if (bytes_per_frame == 4) {
- short *p, *buf_end;
+ __le16 *p, *buf_end;
- p = (short *)urb_out->transfer_buffer;
+ p = (__le16 *)urb_out->transfer_buffer;
buf_end = p + urb_out->transfer_buffer_length / sizeof(*p);
for (; p < buf_end; ++p) {
- *p = (*p * volume[chn & 1]) >> 8;
+ short pv = le16_to_cpu(*p);
+ int val = (pv * volume[chn & 1]) >> 8;
+ pv = clamp(val, 0x7fff, -0x8000);
+ *p = cpu_to_le16(pv);
++chn;
}
} else if (bytes_per_frame == 6) {
@@ -53,6 +54,7 @@ static void change_volume(struct urb *urb_out, int volume[],
val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16);
val = (val * volume[chn & 1]) >> 8;
+ val = clamp(val, 0x7fffff, -0x800000);
p[0] = val;
p[1] = val >> 8;
p[2] = val >> 16;
@@ -61,8 +63,6 @@ static void change_volume(struct urb *urb_out, int volume[],
}
}
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
-
/*
Create signal for impulse response test.
*/
@@ -106,8 +106,6 @@ static void create_impulse_test_signal(struct snd_line6_pcm *line6pcm,
}
}
-#endif
-
/*
Add signal to buffer for software monitoring.
*/
@@ -118,14 +116,19 @@ static void add_monitor_signal(struct urb *urb_out, unsigned char *signal,
return; /* zero volume - no change */
if (bytes_per_frame == 4) {
- short *pi, *po, *buf_end;
+ __le16 *pi, *po, *buf_end;
- pi = (short *)signal;
- po = (short *)urb_out->transfer_buffer;
+ pi = (__le16 *)signal;
+ po = (__le16 *)urb_out->transfer_buffer;
buf_end = po + urb_out->transfer_buffer_length / sizeof(*po);
- for (; po < buf_end; ++pi, ++po)
- *po += (*pi * volume) >> 8;
+ for (; po < buf_end; ++pi, ++po) {
+ short pov = le16_to_cpu(*po);
+ short piv = le16_to_cpu(*pi);
+ int val = pov + ((piv * volume) >> 8);
+ pov = clamp(val, 0x7fff, -0x8000);
+ *po = cpu_to_le16(pov);
+ }
}
/*
@@ -136,32 +139,30 @@ static void add_monitor_signal(struct urb *urb_out, unsigned char *signal,
/*
Find a free URB, prepare audio data, and submit URB.
+ must be called in line6pcm->out.lock context
*/
static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
{
int index;
- unsigned long flags;
int i, urb_size, urb_frames;
int ret;
const int bytes_per_frame = line6pcm->properties->bytes_per_frame;
const int frame_increment =
- line6pcm->properties->snd_line6_rates.rats[0].num_min;
+ line6pcm->properties->rates.rats[0].num_min;
const int frame_factor =
- line6pcm->properties->snd_line6_rates.rats[0].den *
- (USB_INTERVALS_PER_SECOND / LINE6_ISO_INTERVAL);
+ line6pcm->properties->rates.rats[0].den *
+ (USB_INTERVALS_PER_SECOND / LINE6_ISO_INTERVAL);
struct urb *urb_out;
- spin_lock_irqsave(&line6pcm->lock_audio_out, flags);
index =
- find_first_zero_bit(&line6pcm->active_urb_out, LINE6_ISO_BUFFERS);
+ find_first_zero_bit(&line6pcm->out.active_urbs, LINE6_ISO_BUFFERS);
if (index < 0 || index >= LINE6_ISO_BUFFERS) {
- spin_unlock_irqrestore(&line6pcm->lock_audio_out, flags);
dev_err(line6pcm->line6->ifcdev, "no free URB found\n");
return -EINVAL;
}
- urb_out = line6pcm->urb_audio_out[index];
+ urb_out = line6pcm->out.urbs[index];
urb_size = 0;
for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
@@ -170,15 +171,13 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
struct usb_iso_packet_descriptor *fout =
&urb_out->iso_frame_desc[i];
- if (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM)
- fsize = line6pcm->prev_fsize;
-
+ fsize = line6pcm->prev_fsize;
if (fsize == 0) {
int n;
- line6pcm->count_out += frame_increment;
- n = line6pcm->count_out / frame_factor;
- line6pcm->count_out -= n * frame_factor;
+ line6pcm->out.count += frame_increment;
+ n = line6pcm->out.count / frame_factor;
+ line6pcm->out.count -= n * frame_factor;
fsize = n * bytes_per_frame;
}
@@ -189,36 +188,35 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
if (urb_size == 0) {
/* can't determine URB size */
- spin_unlock_irqrestore(&line6pcm->lock_audio_out, flags);
dev_err(line6pcm->line6->ifcdev, "driver bug: urb_size = 0\n");
return -EINVAL;
}
urb_frames = urb_size / bytes_per_frame;
urb_out->transfer_buffer =
- line6pcm->buffer_out +
+ line6pcm->out.buffer +
index * LINE6_ISO_PACKETS * line6pcm->max_packet_size;
urb_out->transfer_buffer_length = urb_size;
urb_out->context = line6pcm;
- if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags) &&
- !test_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running) &&
+ !test_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime =
get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK)->runtime;
- if (line6pcm->pos_out + urb_frames > runtime->buffer_size) {
+ if (line6pcm->out.pos + urb_frames > runtime->buffer_size) {
/*
The transferred area goes over buffer boundary,
copy the data to the temp buffer.
*/
int len;
- len = runtime->buffer_size - line6pcm->pos_out;
+ len = runtime->buffer_size - line6pcm->out.pos;
if (len > 0) {
memcpy(urb_out->transfer_buffer,
runtime->dma_area +
- line6pcm->pos_out * bytes_per_frame,
+ line6pcm->out.pos * bytes_per_frame,
len * bytes_per_frame);
memcpy(urb_out->transfer_buffer +
len * bytes_per_frame, runtime->dma_area,
@@ -229,27 +227,27 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
} else {
memcpy(urb_out->transfer_buffer,
runtime->dma_area +
- line6pcm->pos_out * bytes_per_frame,
+ line6pcm->out.pos * bytes_per_frame,
urb_out->transfer_buffer_length);
}
- line6pcm->pos_out += urb_frames;
- if (line6pcm->pos_out >= runtime->buffer_size)
- line6pcm->pos_out -= runtime->buffer_size;
+ line6pcm->out.pos += urb_frames;
+ if (line6pcm->out.pos >= runtime->buffer_size)
+ line6pcm->out.pos -= runtime->buffer_size;
+
+ change_volume(urb_out, line6pcm->volume_playback,
+ bytes_per_frame);
} else {
memset(urb_out->transfer_buffer, 0,
urb_out->transfer_buffer_length);
}
- change_volume(urb_out, line6pcm->volume_playback, bytes_per_frame);
-
- if (line6pcm->prev_fbuf != NULL) {
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (line6pcm->flags & LINE6_BITS_PCM_IMPULSE) {
+ spin_lock_nested(&line6pcm->in.lock, SINGLE_DEPTH_NESTING);
+ if (line6pcm->prev_fbuf) {
+ if (test_bit(LINE6_STREAM_IMPULSE, &line6pcm->out.running)) {
create_impulse_test_signal(line6pcm, urb_out,
bytes_per_frame);
- if (line6pcm->flags &
- LINE6_BIT_PCM_ALSA_CAPTURE_STREAM) {
+ if (test_bit(LINE6_STREAM_PCM, &line6pcm->in.running)) {
line6_capture_copy(line6pcm,
urb_out->transfer_buffer,
urb_out->
@@ -258,104 +256,43 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_out->transfer_buffer_length);
}
} else {
-#endif
- if (!
- (line6pcm->line6->
- properties->capabilities & LINE6_BIT_HWMON)
- && (line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM)
- && (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM))
+ if (!(line6pcm->line6->properties->capabilities & LINE6_CAP_HWMON)
+ && line6pcm->out.running && line6pcm->in.running)
add_monitor_signal(urb_out, line6pcm->prev_fbuf,
line6pcm->volume_monitor,
bytes_per_frame);
-#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
}
-#endif
+ line6pcm->prev_fbuf = NULL;
+ line6pcm->prev_fsize = 0;
}
+ spin_unlock(&line6pcm->in.lock);
ret = usb_submit_urb(urb_out, GFP_ATOMIC);
if (ret == 0)
- set_bit(index, &line6pcm->active_urb_out);
+ set_bit(index, &line6pcm->out.active_urbs);
else
dev_err(line6pcm->line6->ifcdev,
"URB out #%d submission failed (%d)\n", index, ret);
- spin_unlock_irqrestore(&line6pcm->lock_audio_out, flags);
return 0;
}
/*
Submit all currently available playback URBs.
-*/
+ must be called in line6pcm->out.lock context
+ */
int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm)
{
- int ret, i;
+ int ret = 0, i;
for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
ret = submit_audio_out_urb(line6pcm);
if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-/*
- Unlink all currently active playback URBs.
-*/
-void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm)
-{
- unsigned int i;
-
- for (i = LINE6_ISO_BUFFERS; i--;) {
- if (test_bit(i, &line6pcm->active_urb_out)) {
- if (!test_and_set_bit(i, &line6pcm->unlink_urb_out)) {
- struct urb *u = line6pcm->urb_audio_out[i];
-
- usb_unlink_urb(u);
- }
- }
- }
-}
-
-/*
- Wait until unlinking of all currently active playback URBs has been
- finished.
-*/
-void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
-{
- int timeout = HZ;
- unsigned int i;
- int alive;
-
- do {
- alive = 0;
- for (i = LINE6_ISO_BUFFERS; i--;) {
- if (test_bit(i, &line6pcm->active_urb_out))
- alive++;
- }
- if (!alive)
break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- } while (--timeout > 0);
- if (alive)
- snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive);
-}
-
-/*
- Unlink all currently active playback URBs, and wait for finishing.
-*/
-void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
-{
- line6_unlink_audio_out_urbs(line6pcm);
- line6_wait_clear_audio_out_urbs(line6pcm);
-}
+ }
-void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm)
-{
- kfree(line6pcm->buffer_out);
- line6pcm->buffer_out = NULL;
+ return ret;
}
/*
@@ -373,56 +310,56 @@ static void audio_out_callback(struct urb *urb)
memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
#endif
- line6pcm->last_frame_out = urb->start_frame;
+ line6pcm->out.last_frame = urb->start_frame;
/* find index of URB */
- for (index = LINE6_ISO_BUFFERS; index--;)
- if (urb == line6pcm->urb_audio_out[index])
+ for (index = 0; index < LINE6_ISO_BUFFERS; index++)
+ if (urb == line6pcm->out.urbs[index])
break;
- if (index < 0)
+ if (index >= LINE6_ISO_BUFFERS)
return; /* URB has been unlinked asynchronously */
- for (i = LINE6_ISO_PACKETS; i--;)
+ for (i = 0; i < LINE6_ISO_PACKETS; i++)
length += urb->iso_frame_desc[i].length;
- spin_lock_irqsave(&line6pcm->lock_audio_out, flags);
+ spin_lock_irqsave(&line6pcm->out.lock, flags);
- if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
+ if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running)) {
struct snd_pcm_runtime *runtime = substream->runtime;
- line6pcm->pos_out_done +=
+ line6pcm->out.pos_done +=
length / line6pcm->properties->bytes_per_frame;
- if (line6pcm->pos_out_done >= runtime->buffer_size)
- line6pcm->pos_out_done -= runtime->buffer_size;
+ if (line6pcm->out.pos_done >= runtime->buffer_size)
+ line6pcm->out.pos_done -= runtime->buffer_size;
}
- clear_bit(index, &line6pcm->active_urb_out);
+ clear_bit(index, &line6pcm->out.active_urbs);
- for (i = LINE6_ISO_PACKETS; i--;)
+ for (i = 0; i < LINE6_ISO_PACKETS; i++)
if (urb->iso_frame_desc[i].status == -EXDEV) {
shutdown = 1;
break;
}
- if (test_and_clear_bit(index, &line6pcm->unlink_urb_out))
+ if (test_and_clear_bit(index, &line6pcm->out.unlink_urbs))
shutdown = 1;
- spin_unlock_irqrestore(&line6pcm->lock_audio_out, flags);
-
if (!shutdown) {
submit_audio_out_urb(line6pcm);
- if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM,
- &line6pcm->flags)) {
- line6pcm->bytes_out += length;
- if (line6pcm->bytes_out >= line6pcm->period_out) {
- line6pcm->bytes_out %= line6pcm->period_out;
+ if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running)) {
+ line6pcm->out.bytes += length;
+ if (line6pcm->out.bytes >= line6pcm->out.period) {
+ line6pcm->out.bytes %= line6pcm->out.period;
+ spin_unlock(&line6pcm->out.lock);
snd_pcm_period_elapsed(substream);
+ spin_lock(&line6pcm->out.lock);
}
}
}
+ spin_unlock_irqrestore(&line6pcm->out.lock, flags);
}
/* open playback callback */
@@ -433,12 +370,11 @@ static int snd_line6_playback_open(struct snd_pcm_substream *substream)
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
err = snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- (&line6pcm->
- properties->snd_line6_rates));
+ &line6pcm->properties->rates);
if (err < 0)
return err;
- runtime->hw = line6pcm->properties->snd_line6_playback_hw;
+ runtime->hw = line6pcm->properties->playback_hw;
return 0;
}
@@ -448,118 +384,21 @@ static int snd_line6_playback_close(struct snd_pcm_substream *substream)
return 0;
}
-/* hw_params playback callback */
-static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- int ret;
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- /* -- Florian Demski [FD] */
- /* don't ask me why, but this fixes the bug on my machine */
- if (line6pcm == NULL) {
- if (substream->pcm == NULL)
- return -ENOMEM;
- if (substream->pcm->private_data == NULL)
- return -ENOMEM;
- substream->private_data = substream->pcm->private_data;
- line6pcm = snd_pcm_substream_chip(substream);
- }
- /* -- [FD] end */
-
- ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
-
- if (ret < 0)
- return ret;
-
- ret = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- if (ret < 0) {
- line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
- return ret;
- }
-
- line6pcm->period_out = params_period_bytes(hw_params);
- return 0;
-}
-
-/* hw_free playback callback */
-static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
- return snd_pcm_lib_free_pages(substream);
-}
-
-/* trigger playback callback */
-int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
-{
- int err;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
-#ifdef CONFIG_PM
- case SNDRV_PCM_TRIGGER_RESUME:
-#endif
- err = line6_pcm_acquire(line6pcm,
- LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
-
- if (err < 0)
- return err;
-
- break;
-
- case SNDRV_PCM_TRIGGER_STOP:
-#ifdef CONFIG_PM
- case SNDRV_PCM_TRIGGER_SUSPEND:
-#endif
- err = line6_pcm_release(line6pcm,
- LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
-
- if (err < 0)
- return err;
-
- break;
-
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- set_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
- break;
-
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- clear_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* playback pointer callback */
-static snd_pcm_uframes_t
-snd_line6_playback_pointer(struct snd_pcm_substream *substream)
-{
- struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- return line6pcm->pos_out_done;
-}
-
/* playback operators */
struct snd_pcm_ops snd_line6_playback_ops = {
.open = snd_line6_playback_open,
.close = snd_line6_playback_close,
.ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_line6_playback_hw_params,
- .hw_free = snd_line6_playback_hw_free,
+ .hw_params = snd_line6_hw_params,
+ .hw_free = snd_line6_hw_free,
.prepare = snd_line6_prepare,
.trigger = snd_line6_trigger,
- .pointer = snd_line6_playback_pointer,
+ .pointer = snd_line6_pointer,
};
int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm)
{
+ struct usb_line6 *line6 = line6pcm->line6;
int i;
/* create audio URBs and fill in constant values: */
@@ -567,18 +406,16 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm)
struct urb *urb;
/* URB for audio out: */
- urb = line6pcm->urb_audio_out[i] =
+ urb = line6pcm->out.urbs[i] =
usb_alloc_urb(LINE6_ISO_PACKETS, GFP_KERNEL);
- if (urb == NULL) {
- dev_err(line6pcm->line6->ifcdev, "Out of memory\n");
+ if (urb == NULL)
return -ENOMEM;
- }
- urb->dev = line6pcm->line6->usbdev;
+ urb->dev = line6->usbdev;
urb->pipe =
- usb_sndisocpipe(line6pcm->line6->usbdev,
- line6pcm->ep_audio_write &
+ usb_sndisocpipe(line6->usbdev,
+ line6->properties->ep_audio_w &
USB_ENDPOINT_NUMBER_MASK);
urb->transfer_flags = URB_ISO_ASAP;
urb->start_frame = -1;
diff --git a/drivers/staging/line6/playback.h b/sound/usb/line6/playback.h
index 743bd6f74c57..51fce29e8726 100644
--- a/drivers/staging/line6/playback.h
+++ b/sound/usb/line6/playback.h
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -30,12 +30,6 @@
extern struct snd_pcm_ops snd_line6_playback_ops;
extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
-extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
-extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm);
-extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm
- *line6pcm);
-extern void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm);
-extern int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd);
#endif
diff --git a/drivers/staging/line6/pod.c b/sound/usb/line6/pod.c
index 44f4b2f98570..daf81d169a42 100644
--- a/drivers/staging/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -11,13 +11,76 @@
#include <linux/slab.h>
#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include <sound/core.h>
#include <sound/control.h>
-#include "audio.h"
#include "capture.h"
#include "driver.h"
#include "playback.h"
-#include "pod.h"
+
+/*
+ Locate name in binary program dump
+*/
+#define POD_NAME_OFFSET 0
+#define POD_NAME_LENGTH 16
+
+/*
+ Other constants
+*/
+#define POD_CONTROL_SIZE 0x80
+#define POD_BUFSIZE_DUMPREQ 7
+#define POD_STARTUP_DELAY 1000
+
+/*
+ Stages of POD startup procedure
+*/
+enum {
+ POD_STARTUP_INIT = 1,
+ POD_STARTUP_VERSIONREQ,
+ POD_STARTUP_WORKQUEUE,
+ POD_STARTUP_SETUP,
+ POD_STARTUP_LAST = POD_STARTUP_SETUP - 1
+};
+
+enum {
+ LINE6_BASSPODXT,
+ LINE6_BASSPODXTLIVE,
+ LINE6_BASSPODXTPRO,
+ LINE6_POCKETPOD,
+ LINE6_PODXT,
+ LINE6_PODXTLIVE_POD,
+ LINE6_PODXTPRO,
+};
+
+struct usb_line6_pod {
+ /* Generic Line 6 USB data */
+ struct usb_line6 line6;
+
+ /* Instrument monitor level */
+ int monitor_level;
+
+ /* Timer for device initialization */
+ struct timer_list startup_timer;
+
+ /* Work handler for device initialization */
+ struct work_struct startup_work;
+
+ /* Current progress in startup procedure */
+ int startup_progress;
+
+ /* Serial number of device */
+ u32 serial_number;
+
+ /* Firmware version (x 100) */
+ int firmware_version;
+
+ /* Device ID */
+ int device_id;
+};
#define POD_SYSEX_CODE 3
#define POD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
@@ -66,15 +129,12 @@ static struct snd_ratden pod_ratden = {
};
static struct line6_pcm_properties pod_pcm_properties = {
- .snd_line6_playback_hw = {
+ .playback_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
-#ifdef CONFIG_PM
- SNDRV_PCM_INFO_RESUME |
-#endif
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S24_3LE,
.rates = SNDRV_PCM_RATE_KNOT,
@@ -87,14 +147,11 @@ static struct line6_pcm_properties pod_pcm_properties = {
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 1024},
- .snd_line6_capture_hw = {
+ .capture_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
-#ifdef CONFIG_PM
- SNDRV_PCM_INFO_RESUME |
-#endif
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S24_3LE,
.rates = SNDRV_PCM_RATE_KNOT,
@@ -107,7 +164,7 @@ static struct line6_pcm_properties pod_pcm_properties = {
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 1024},
- .snd_line6_rates = {
+ .rates = {
.nrats = 1,
.rats = &pod_ratden},
.bytes_per_frame = POD_BYTES_PER_FRAME
@@ -131,8 +188,9 @@ static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
/*
Process a completely received message.
*/
-void line6_pod_process_message(struct usb_line6_pod *pod)
+static void line6_pod_process_message(struct usb_line6 *line6)
{
+ struct usb_line6_pod *pod = (struct usb_line6_pod *) line6;
const unsigned char *buf = pod->line6.buffer_message;
if (memcmp(buf, pod_version_header, sizeof(pod_version_header)) == 0) {
@@ -159,15 +217,6 @@ void line6_pod_process_message(struct usb_line6_pod *pod)
}
/*
- Transmit PODxt Pro control parameter.
-*/
-void line6_pod_transmit_parameter(struct usb_line6_pod *pod, int param,
- u8 value)
-{
- line6_transmit_parameter(&pod->line6, param, value);
-}
-
-/*
Send system parameter (from integer).
*/
static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
@@ -198,7 +247,7 @@ static ssize_t serial_number_show(struct device *dev,
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
- return sprintf(buf, "%d\n", pod->serial_number);
+ return sprintf(buf, "%u\n", pod->serial_number);
}
/*
@@ -273,7 +322,7 @@ static void pod_startup4(struct work_struct *work)
line6_read_serial_number(&pod->line6, &pod->serial_number);
/* ALSA audio interface: */
- line6_register_audio(line6);
+ snd_card_register(line6->card);
}
/* POD special files: */
@@ -281,6 +330,18 @@ static DEVICE_ATTR_RO(device_id);
static DEVICE_ATTR_RO(firmware_version);
static DEVICE_ATTR_RO(serial_number);
+static struct attribute *pod_dev_attrs[] = {
+ &dev_attr_device_id.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_serial_number.attr,
+ NULL
+};
+
+static const struct attribute_group pod_dev_attr_group = {
+ .name = "pod",
+ .attrs = pod_dev_attrs,
+};
+
/* control info callback */
static int snd_pod_control_monitor_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
@@ -331,55 +392,33 @@ static struct snd_kcontrol_new pod_control_monitor = {
};
/*
- POD destructor.
+ POD device disconnected.
*/
-static void pod_destruct(struct usb_interface *interface)
+static void line6_pod_disconnect(struct usb_line6 *line6)
{
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct usb_line6_pod *pod = (struct usb_line6_pod *)line6;
- if (pod == NULL)
- return;
- line6_cleanup_audio(&pod->line6);
-
- del_timer(&pod->startup_timer);
+ del_timer_sync(&pod->startup_timer);
cancel_work_sync(&pod->startup_work);
}
/*
- Create sysfs entries.
-*/
-static int pod_create_files2(struct device *dev)
-{
- int err;
-
- CHECK_RETURN(device_create_file(dev, &dev_attr_device_id));
- CHECK_RETURN(device_create_file(dev, &dev_attr_firmware_version));
- CHECK_RETURN(device_create_file(dev, &dev_attr_serial_number));
- return 0;
-}
-
-/*
Try to init POD device.
*/
-static int pod_try_init(struct usb_interface *interface,
- struct usb_line6_pod *pod)
+static int pod_init(struct usb_line6 *line6,
+ const struct usb_device_id *id)
{
int err;
- struct usb_line6 *line6 = &pod->line6;
+ struct usb_line6_pod *pod = (struct usb_line6_pod *) line6;
+
+ line6->process_message = line6_pod_process_message;
+ line6->disconnect = line6_pod_disconnect;
init_timer(&pod->startup_timer);
INIT_WORK(&pod->startup_work, pod_startup4);
- if ((interface == NULL) || (pod == NULL))
- return -ENODEV;
-
/* create sysfs entries: */
- err = pod_create_files2(&interface->dev);
- if (err < 0)
- return err;
-
- /* initialize audio system: */
- err = line6_init_audio(line6);
+ err = snd_card_add_dev_attr(line6->card, &pod_dev_attr_group);
if (err < 0)
return err;
@@ -405,7 +444,7 @@ static int pod_try_init(struct usb_interface *interface,
handler.
*/
- if (pod->line6.properties->capabilities & LINE6_BIT_CONTROL) {
+ if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
pod->monitor_level = POD_SYSTEM_INVALID;
/* initiate startup procedure: */
@@ -415,44 +454,131 @@ static int pod_try_init(struct usb_interface *interface,
return 0;
}
-/*
- Init POD device (and clean up in case of failure).
-*/
-int line6_pod_init(struct usb_interface *interface, struct usb_line6_pod *pod)
-{
- int err = pod_try_init(interface, pod);
-
- if (err < 0)
- pod_destruct(interface);
+#define LINE6_DEVICE(prod) USB_DEVICE(0x0e41, prod)
+#define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n)
+
+/* table of devices that work with this driver */
+static const struct usb_device_id pod_id_table[] = {
+ { LINE6_DEVICE(0x4250), .driver_info = LINE6_BASSPODXT },
+ { LINE6_DEVICE(0x4642), .driver_info = LINE6_BASSPODXTLIVE },
+ { LINE6_DEVICE(0x4252), .driver_info = LINE6_BASSPODXTPRO },
+ { LINE6_IF_NUM(0x5051, 1), .driver_info = LINE6_POCKETPOD },
+ { LINE6_DEVICE(0x5044), .driver_info = LINE6_PODXT },
+ { LINE6_IF_NUM(0x4650, 0), .driver_info = LINE6_PODXTLIVE_POD },
+ { LINE6_DEVICE(0x5050), .driver_info = LINE6_PODXTPRO },
+ {}
+};
- return err;
-}
+MODULE_DEVICE_TABLE(usb, pod_id_table);
+
+static const struct line6_properties pod_properties_table[] = {
+ [LINE6_BASSPODXT] = {
+ .id = "BassPODxt",
+ .name = "BassPODxt",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 5,
+ .ep_ctrl_r = 0x84,
+ .ep_ctrl_w = 0x03,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_BASSPODXTLIVE] = {
+ .id = "BassPODxtLive",
+ .name = "BassPODxt Live",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x84,
+ .ep_ctrl_w = 0x03,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_BASSPODXTPRO] = {
+ .id = "BassPODxtPro",
+ .name = "BassPODxt Pro",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 5,
+ .ep_ctrl_r = 0x84,
+ .ep_ctrl_w = 0x03,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_POCKETPOD] = {
+ .id = "PocketPOD",
+ .name = "Pocket POD",
+ .capabilities = LINE6_CAP_CONTROL,
+ .altsetting = 0,
+ .ep_ctrl_r = 0x82,
+ .ep_ctrl_w = 0x02,
+ /* no audio channel */
+ },
+ [LINE6_PODXT] = {
+ .id = "PODxt",
+ .name = "PODxt",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 5,
+ .ep_ctrl_r = 0x84,
+ .ep_ctrl_w = 0x03,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_PODXTLIVE_POD] = {
+ .id = "PODxtLive",
+ .name = "PODxt Live",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x84,
+ .ep_ctrl_w = 0x03,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_PODXTPRO] = {
+ .id = "PODxtPro",
+ .name = "PODxt Pro",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 5,
+ .ep_ctrl_r = 0x84,
+ .ep_ctrl_w = 0x03,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+};
/*
- POD device disconnected.
+ Probe USB device.
*/
-void line6_pod_disconnect(struct usb_interface *interface)
+static int pod_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
- struct usb_line6_pod *pod;
-
- if (interface == NULL)
- return;
- pod = usb_get_intfdata(interface);
-
- if (pod != NULL) {
- struct snd_line6_pcm *line6pcm = pod->line6.line6pcm;
- struct device *dev = &interface->dev;
+ return line6_probe(interface, id, "Line6-POD",
+ &pod_properties_table[id->driver_info],
+ pod_init, sizeof(struct usb_line6_pod));
+}
- if (line6pcm != NULL)
- line6_pcm_disconnect(line6pcm);
+static struct usb_driver pod_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = pod_probe,
+ .disconnect = line6_disconnect,
+#ifdef CONFIG_PM
+ .suspend = line6_suspend,
+ .resume = line6_resume,
+ .reset_resume = line6_resume,
+#endif
+ .id_table = pod_id_table,
+};
- if (dev != NULL) {
- /* remove sysfs entries: */
- device_remove_file(dev, &dev_attr_device_id);
- device_remove_file(dev, &dev_attr_firmware_version);
- device_remove_file(dev, &dev_attr_serial_number);
- }
- }
+module_usb_driver(pod_driver);
- pod_destruct(interface);
-}
+MODULE_DESCRIPTION("Line 6 POD USB driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
new file mode 100644
index 000000000000..63dcaef41ac3
--- /dev/null
+++ b/sound/usb/line6/podhd.c
@@ -0,0 +1,192 @@
+/*
+ * Line 6 Pod HD
+ *
+ * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+#include "driver.h"
+#include "pcm.h"
+
+enum {
+ LINE6_PODHD300,
+ LINE6_PODHD400,
+ LINE6_PODHD500_0,
+ LINE6_PODHD500_1,
+};
+
+#define PODHD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
+
+static struct snd_ratden podhd_ratden = {
+ .num_min = 48000,
+ .num_max = 48000,
+ .num_step = 1,
+ .den = 1,
+};
+
+static struct line6_pcm_properties podhd_pcm_properties = {
+ .playback_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .capture_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .rates = {
+ .nrats = 1,
+ .rats = &podhd_ratden},
+ .bytes_per_frame = PODHD_BYTES_PER_FRAME
+};
+
+/*
+ Try to init POD HD device.
+*/
+static int podhd_init(struct usb_line6 *line6,
+ const struct usb_device_id *id)
+{
+ int err;
+
+ /* initialize MIDI subsystem: */
+ err = line6_init_midi(line6);
+ if (err < 0)
+ return err;
+
+ /* initialize PCM subsystem: */
+ err = line6_init_pcm(line6, &podhd_pcm_properties);
+ if (err < 0)
+ return err;
+
+ /* register USB audio system: */
+ return snd_card_register(line6->card);
+}
+
+#define LINE6_DEVICE(prod) USB_DEVICE(0x0e41, prod)
+#define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n)
+
+/* table of devices that work with this driver */
+static const struct usb_device_id podhd_id_table[] = {
+ { LINE6_DEVICE(0x5057), .driver_info = LINE6_PODHD300 },
+ { LINE6_DEVICE(0x5058), .driver_info = LINE6_PODHD400 },
+ { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500_0 },
+ { LINE6_IF_NUM(0x414D, 1), .driver_info = LINE6_PODHD500_1 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, podhd_id_table);
+
+static const struct line6_properties podhd_properties_table[] = {
+ [LINE6_PODHD300] = {
+ .id = "PODHD300",
+ .name = "POD HD300",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 5,
+ .ep_ctrl_r = 0x84,
+ .ep_ctrl_w = 0x03,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_PODHD400] = {
+ .id = "PODHD400",
+ .name = "POD HD400",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 5,
+ .ep_ctrl_r = 0x84,
+ .ep_ctrl_w = 0x03,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_PODHD500_0] = {
+ .id = "PODHD500",
+ .name = "POD HD500",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x81,
+ .ep_ctrl_w = 0x01,
+ .ep_audio_r = 0x86,
+ .ep_audio_w = 0x02,
+ },
+ [LINE6_PODHD500_1] = {
+ .id = "PODHD500",
+ .name = "POD HD500",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x81,
+ .ep_ctrl_w = 0x01,
+ .ep_audio_r = 0x86,
+ .ep_audio_w = 0x02,
+ },
+};
+
+/*
+ Probe USB device.
+*/
+static int podhd_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ return line6_probe(interface, id, "Line6-PODHD",
+ &podhd_properties_table[id->driver_info],
+ podhd_init, sizeof(struct usb_line6));
+}
+
+static struct usb_driver podhd_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = podhd_probe,
+ .disconnect = line6_disconnect,
+#ifdef CONFIG_PM
+ .suspend = line6_suspend,
+ .resume = line6_resume,
+ .reset_resume = line6_resume,
+#endif
+ .id_table = podhd_id_table,
+};
+
+module_usb_driver(podhd_driver);
+
+MODULE_DESCRIPTION("Line 6 PODHD USB driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/line6/toneport.c b/sound/usb/line6/toneport.c
index 69437158d383..6d4c50c9b17d 100644
--- a/drivers/staging/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
* Emil Myhrman (emil.myhrman@gmail.com)
@@ -11,13 +11,58 @@
*/
#include <linux/wait.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <sound/core.h>
#include <sound/control.h>
-#include "audio.h"
#include "capture.h"
#include "driver.h"
#include "playback.h"
-#include "toneport.h"
+
+enum line6_device_type {
+ LINE6_GUITARPORT,
+ LINE6_PODSTUDIO_GX,
+ LINE6_PODSTUDIO_UX1,
+ LINE6_PODSTUDIO_UX2,
+ LINE6_TONEPORT_GX,
+ LINE6_TONEPORT_UX1,
+ LINE6_TONEPORT_UX2,
+};
+
+struct usb_line6_toneport;
+
+struct toneport_led {
+ struct led_classdev dev;
+ char name[64];
+ struct usb_line6_toneport *toneport;
+ bool registered;
+};
+
+struct usb_line6_toneport {
+ /* Generic Line 6 USB data */
+ struct usb_line6 line6;
+
+ /* Source selector */
+ int source;
+
+ /* Serial number of device */
+ u32 serial_number;
+
+ /* Firmware version (x 100) */
+ u8 firmware_version;
+
+ /* Timer for delayed PCM startup */
+ struct timer_list timer;
+
+ /* Device type */
+ enum line6_device_type type;
+
+ /* LED instances */
+ struct toneport_led leds[2];
+};
static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2);
@@ -31,15 +76,12 @@ static struct snd_ratden toneport_ratden = {
};
static struct line6_pcm_properties toneport_pcm_properties = {
- .snd_line6_playback_hw = {
+ .playback_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
-#ifdef CONFIG_PM
- SNDRV_PCM_INFO_RESUME |
-#endif
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_KNOT,
@@ -52,14 +94,11 @@ static struct line6_pcm_properties toneport_pcm_properties = {
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 1024},
- .snd_line6_capture_hw = {
+ .capture_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
-#ifdef CONFIG_PM
- SNDRV_PCM_INFO_RESUME |
-#endif
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_KNOT,
@@ -72,21 +111,12 @@ static struct line6_pcm_properties toneport_pcm_properties = {
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 1024},
- .snd_line6_rates = {
+ .rates = {
.nrats = 1,
.rats = &toneport_ratden},
.bytes_per_frame = 4
};
-/*
- For the led on Guitarport.
- Brightness goes from 0x00 to 0x26. Set a value above this to have led
- blink.
- (void cmd_0x02(byte red, byte green)
-*/
-static int led_red = 0x00;
-static int led_green = 0x26;
-
static const struct {
const char *name;
int code;
@@ -97,62 +127,6 @@ static const struct {
{"Inst & Mic", 0x0901}
};
-static bool toneport_has_led(short product)
-{
- return
- (product == LINE6_DEVID_GUITARPORT) ||
- (product == LINE6_DEVID_TONEPORT_GX);
- /* add your device here if you are missing support for the LEDs */
-}
-
-static void toneport_update_led(struct device *dev)
-{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_toneport *tp = usb_get_intfdata(interface);
- struct usb_line6 *line6;
-
- if (!tp)
- return;
-
- line6 = &tp->line6;
- if (line6)
- toneport_send_cmd(line6->usbdev, (led_red << 8) | 0x0002,
- led_green);
-}
-
-static ssize_t toneport_set_led_red(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int retval;
-
- retval = kstrtoint(buf, 10, &led_red);
- if (retval)
- return retval;
-
- toneport_update_led(dev);
- return count;
-}
-
-static ssize_t toneport_set_led_green(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int retval;
-
- retval = kstrtoint(buf, 10, &led_green);
- if (retval)
- return retval;
-
- toneport_update_led(dev);
- return count;
-}
-
-static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read,
- toneport_set_led_red);
-static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read,
- toneport_set_led_green);
-
static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2)
{
int ret;
@@ -195,16 +169,23 @@ static int snd_toneport_monitor_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
+ int err;
if (ucontrol->value.integer.value[0] == line6pcm->volume_monitor)
return 0;
line6pcm->volume_monitor = ucontrol->value.integer.value[0];
- if (line6pcm->volume_monitor > 0)
- line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_MONITOR);
- else
- line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
+ if (line6pcm->volume_monitor > 0) {
+ err = line6_pcm_acquire(line6pcm, LINE6_STREAM_MONITOR);
+ if (err < 0) {
+ line6pcm->volume_monitor = 0;
+ line6_pcm_release(line6pcm, LINE6_STREAM_MONITOR);
+ return err;
+ }
+ } else {
+ line6_pcm_release(line6pcm, LINE6_STREAM_MONITOR);
+ }
return 1;
}
@@ -265,7 +246,7 @@ static void toneport_start_pcm(unsigned long arg)
struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)arg;
struct usb_line6 *line6 = &toneport->line6;
- line6_pcm_acquire(line6->line6pcm, LINE6_BITS_PCM_MONITOR);
+ line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR);
}
/* control definition */
@@ -291,15 +272,94 @@ static struct snd_kcontrol_new toneport_control_source = {
};
/*
- Toneport destructor.
+ For the led on Guitarport.
+ Brightness goes from 0x00 to 0x26. Set a value above this to have led
+ blink.
+ (void cmd_0x02(byte red, byte green)
*/
-static void toneport_destruct(struct usb_interface *interface)
+
+static bool toneport_has_led(struct usb_line6_toneport *toneport)
+{
+ switch (toneport->type) {
+ case LINE6_GUITARPORT:
+ case LINE6_TONEPORT_GX:
+ /* add your device here if you are missing support for the LEDs */
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static const char * const led_colors[2] = { "red", "green" };
+static const int led_init_vals[2] = { 0x00, 0x26 };
+
+static void toneport_update_led(struct usb_line6_toneport *toneport)
+{
+ toneport_send_cmd(toneport->line6.usbdev,
+ (toneport->leds[0].dev.brightness << 8) | 0x0002,
+ toneport->leds[1].dev.brightness);
+}
+
+static void toneport_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct toneport_led *leds =
+ container_of(led_cdev, struct toneport_led, dev);
+ toneport_update_led(leds->toneport);
+}
+
+static int toneport_init_leds(struct usb_line6_toneport *toneport)
{
- struct usb_line6_toneport *toneport = usb_get_intfdata(interface);
+ struct device *dev = &toneport->line6.usbdev->dev;
+ int i, err;
+
+ for (i = 0; i < 2; i++) {
+ struct toneport_led *led = &toneport->leds[i];
+ struct led_classdev *leddev = &led->dev;
+
+ led->toneport = toneport;
+ snprintf(led->name, sizeof(led->name), "%s::%s",
+ dev_name(dev), led_colors[i]);
+ leddev->name = led->name;
+ leddev->brightness = led_init_vals[i];
+ leddev->max_brightness = 0x26;
+ leddev->brightness_set = toneport_led_brightness_set;
+ err = led_classdev_register(dev, leddev);
+ if (err)
+ return err;
+ led->registered = true;
+ }
- if (toneport == NULL)
- return;
- line6_cleanup_audio(&toneport->line6);
+ return 0;
+}
+
+static void toneport_remove_leds(struct usb_line6_toneport *toneport)
+{
+ struct toneport_led *led;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ led = &toneport->leds[i];
+ if (!led->registered)
+ break;
+ led_classdev_unregister(&led->dev);
+ led->registered = false;
+ }
+}
+
+static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
+{
+ switch (toneport->type) {
+ case LINE6_TONEPORT_UX1:
+ case LINE6_TONEPORT_UX2:
+ case LINE6_PODSTUDIO_UX1:
+ case LINE6_PODSTUDIO_UX2:
+ return true;
+
+ default:
+ return false;
+ }
}
/*
@@ -310,7 +370,6 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
int ticks;
struct usb_line6 *line6 = &toneport->line6;
struct usb_device *usbdev = line6->usbdev;
- u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
/* sync time on device with host: */
ticks = (int)get_seconds();
@@ -320,38 +379,46 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
toneport_send_cmd(usbdev, 0x0301, 0x0000);
/* initialize source select: */
- switch (le16_to_cpu(usbdev->descriptor.idProduct)) {
- case LINE6_DEVID_TONEPORT_UX1:
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_PODSTUDIO_UX1:
- case LINE6_DEVID_PODSTUDIO_UX2:
+ if (toneport_has_source_select(toneport))
toneport_send_cmd(usbdev,
toneport_source_info[toneport->source].code,
0x0000);
- }
- if (toneport_has_led(idProduct))
- toneport_update_led(&usbdev->dev);
+ if (toneport_has_led(toneport))
+ toneport_update_led(toneport);
+
+ mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
}
/*
+ Toneport device disconnected.
+*/
+static void line6_toneport_disconnect(struct usb_line6 *line6)
+{
+ struct usb_line6_toneport *toneport =
+ (struct usb_line6_toneport *)line6;
+
+ del_timer_sync(&toneport->timer);
+
+ if (toneport_has_led(toneport))
+ toneport_remove_leds(toneport);
+}
+
+
+/*
Try to init Toneport device.
*/
-static int toneport_try_init(struct usb_interface *interface,
- struct usb_line6_toneport *toneport)
+static int toneport_init(struct usb_line6 *line6,
+ const struct usb_device_id *id)
{
int err;
- struct usb_line6 *line6 = &toneport->line6;
- struct usb_device *usbdev = line6->usbdev;
- u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
+ struct usb_line6_toneport *toneport = (struct usb_line6_toneport *) line6;
- if ((interface == NULL) || (toneport == NULL))
- return -ENODEV;
+ toneport->type = id->driver_info;
+ setup_timer(&toneport->timer, toneport_start_pcm,
+ (unsigned long)toneport);
- /* initialize audio system: */
- err = line6_init_audio(line6);
- if (err < 0)
- return err;
+ line6->disconnect = line6_toneport_disconnect;
/* initialize PCM subsystem: */
err = line6_init_pcm(line6, &toneport_pcm_properties);
@@ -366,11 +433,7 @@ static int toneport_try_init(struct usb_interface *interface,
return err;
/* register source select control: */
- switch (le16_to_cpu(usbdev->descriptor.idProduct)) {
- case LINE6_DEVID_TONEPORT_UX1:
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_PODSTUDIO_UX1:
- case LINE6_DEVID_PODSTUDIO_UX2:
+ if (toneport_has_source_select(toneport)) {
err =
snd_ctl_add(line6->card,
snd_ctl_new1(&toneport_control_source,
@@ -379,82 +442,139 @@ static int toneport_try_init(struct usb_interface *interface,
return err;
}
- /* register audio system: */
- err = line6_register_audio(line6);
- if (err < 0)
- return err;
-
line6_read_serial_number(line6, &toneport->serial_number);
line6_read_data(line6, 0x80c2, &toneport->firmware_version, 1);
- if (toneport_has_led(idProduct)) {
- CHECK_RETURN(device_create_file
- (&interface->dev, &dev_attr_led_red));
- CHECK_RETURN(device_create_file
- (&interface->dev, &dev_attr_led_green));
+ if (toneport_has_led(toneport)) {
+ err = toneport_init_leds(toneport);
+ if (err < 0)
+ return err;
}
toneport_setup(toneport);
- init_timer(&toneport->timer);
- toneport->timer.expires = jiffies + TONEPORT_PCM_DELAY * HZ;
- toneport->timer.function = toneport_start_pcm;
- toneport->timer.data = (unsigned long)toneport;
- add_timer(&toneport->timer);
-
- return 0;
+ /* register audio system: */
+ return snd_card_register(line6->card);
}
+#ifdef CONFIG_PM
/*
- Init Toneport device (and clean up in case of failure).
+ Resume Toneport device after reset.
*/
-int line6_toneport_init(struct usb_interface *interface,
- struct usb_line6_toneport *toneport)
+static int toneport_reset_resume(struct usb_interface *interface)
{
- int err = toneport_try_init(interface, toneport);
+ toneport_setup(usb_get_intfdata(interface));
+ return line6_resume(interface);
+}
+#endif
- if (err < 0)
- toneport_destruct(interface);
+#define LINE6_DEVICE(prod) USB_DEVICE(0x0e41, prod)
+#define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n)
+
+/* table of devices that work with this driver */
+static const struct usb_device_id toneport_id_table[] = {
+ { LINE6_DEVICE(0x4750), .driver_info = LINE6_GUITARPORT },
+ { LINE6_DEVICE(0x4153), .driver_info = LINE6_PODSTUDIO_GX },
+ { LINE6_DEVICE(0x4150), .driver_info = LINE6_PODSTUDIO_UX1 },
+ { LINE6_IF_NUM(0x4151, 0), .driver_info = LINE6_PODSTUDIO_UX2 },
+ { LINE6_DEVICE(0x4147), .driver_info = LINE6_TONEPORT_GX },
+ { LINE6_DEVICE(0x4141), .driver_info = LINE6_TONEPORT_UX1 },
+ { LINE6_IF_NUM(0x4142, 0), .driver_info = LINE6_TONEPORT_UX2 },
+ {}
+};
- return err;
-}
+MODULE_DEVICE_TABLE(usb, toneport_id_table);
+
+static const struct line6_properties toneport_properties_table[] = {
+ [LINE6_GUITARPORT] = {
+ .id = "GuitarPort",
+ .name = "GuitarPort",
+ .capabilities = LINE6_CAP_PCM,
+ .altsetting = 2, /* 1..4 seem to be ok */
+ /* no control channel */
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_PODSTUDIO_GX] = {
+ .id = "PODStudioGX",
+ .name = "POD Studio GX",
+ .capabilities = LINE6_CAP_PCM,
+ .altsetting = 2, /* 1..4 seem to be ok */
+ /* no control channel */
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_PODSTUDIO_UX1] = {
+ .id = "PODStudioUX1",
+ .name = "POD Studio UX1",
+ .capabilities = LINE6_CAP_PCM,
+ .altsetting = 2, /* 1..4 seem to be ok */
+ /* no control channel */
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_PODSTUDIO_UX2] = {
+ .id = "PODStudioUX2",
+ .name = "POD Studio UX2",
+ .capabilities = LINE6_CAP_PCM,
+ .altsetting = 2, /* defaults to 44.1kHz, 16-bit */
+ /* no control channel */
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_TONEPORT_GX] = {
+ .id = "TonePortGX",
+ .name = "TonePort GX",
+ .capabilities = LINE6_CAP_PCM,
+ .altsetting = 2, /* 1..4 seem to be ok */
+ /* no control channel */
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_TONEPORT_UX1] = {
+ .id = "TonePortUX1",
+ .name = "TonePort UX1",
+ .capabilities = LINE6_CAP_PCM,
+ .altsetting = 2, /* 1..4 seem to be ok */
+ /* no control channel */
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_TONEPORT_UX2] = {
+ .id = "TonePortUX2",
+ .name = "TonePort UX2",
+ .capabilities = LINE6_CAP_PCM,
+ .altsetting = 2, /* defaults to 44.1kHz, 16-bit */
+ /* no control channel */
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+};
/*
- Resume Toneport device after reset.
+ Probe USB device.
*/
-void line6_toneport_reset_resume(struct usb_line6_toneport *toneport)
+static int toneport_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
- toneport_setup(toneport);
+ return line6_probe(interface, id, "Line6-TonePort",
+ &toneport_properties_table[id->driver_info],
+ toneport_init, sizeof(struct usb_line6_toneport));
}
-/*
- Toneport device disconnected.
-*/
-void line6_toneport_disconnect(struct usb_interface *interface)
-{
- struct usb_line6_toneport *toneport;
- u16 idProduct;
-
- if (interface == NULL)
- return;
-
- toneport = usb_get_intfdata(interface);
- del_timer_sync(&toneport->timer);
- idProduct = le16_to_cpu(toneport->line6.usbdev->descriptor.idProduct);
-
- if (toneport_has_led(idProduct)) {
- device_remove_file(&interface->dev, &dev_attr_led_red);
- device_remove_file(&interface->dev, &dev_attr_led_green);
- }
-
- if (toneport != NULL) {
- struct snd_line6_pcm *line6pcm = toneport->line6.line6pcm;
+static struct usb_driver toneport_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = toneport_probe,
+ .disconnect = line6_disconnect,
+#ifdef CONFIG_PM
+ .suspend = line6_suspend,
+ .resume = line6_resume,
+ .reset_resume = toneport_reset_resume,
+#endif
+ .id_table = toneport_id_table,
+};
- if (line6pcm != NULL) {
- line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
- line6_pcm_disconnect(line6pcm);
- }
- }
+module_usb_driver(toneport_driver);
- toneport_destruct(interface);
-}
+MODULE_DESCRIPTION("TonePort USB driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/line6/variax.c b/sound/usb/line6/variax.c
index ae2be99f9a92..ddc23ddf0750 100644
--- a/drivers/staging/line6/variax.c
+++ b/sound/usb/line6/variax.c
@@ -1,5 +1,5 @@
/*
- * Line6 Linux USB driver - 0.9.1beta
+ * Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
@@ -10,10 +10,53 @@
*/
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/usb.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <sound/core.h>
-#include "audio.h"
#include "driver.h"
-#include "variax.h"
+
+#define VARIAX_STARTUP_DELAY1 1000
+#define VARIAX_STARTUP_DELAY3 100
+#define VARIAX_STARTUP_DELAY4 100
+
+/*
+ Stages of Variax startup procedure
+*/
+enum {
+ VARIAX_STARTUP_INIT = 1,
+ VARIAX_STARTUP_VERSIONREQ,
+ VARIAX_STARTUP_WAIT,
+ VARIAX_STARTUP_ACTIVATE,
+ VARIAX_STARTUP_WORKQUEUE,
+ VARIAX_STARTUP_SETUP,
+ VARIAX_STARTUP_LAST = VARIAX_STARTUP_SETUP - 1
+};
+
+enum {
+ LINE6_PODXTLIVE_VARIAX,
+ LINE6_VARIAX
+};
+
+struct usb_line6_variax {
+ /* Generic Line 6 USB data */
+ struct usb_line6 line6;
+
+ /* Buffer for activation code */
+ unsigned char *buffer_activate;
+
+ /* Handler for device initialization */
+ struct work_struct startup_work;
+
+ /* Timers for device initialization */
+ struct timer_list startup_timer1;
+ struct timer_list startup_timer2;
+
+ /* Current progress in startup procedure */
+ int startup_progress;
+};
#define VARIAX_OFFSET_ACTIVATE 7
@@ -124,14 +167,15 @@ static void variax_startup6(struct work_struct *work)
CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_SETUP);
/* ALSA audio interface: */
- line6_register_audio(&variax->line6);
+ snd_card_register(variax->line6.card);
}
/*
Process a completely received message.
*/
-void line6_variax_process_message(struct usb_line6_variax *variax)
+static void line6_variax_process_message(struct usb_line6 *line6)
{
+ struct usb_line6_variax *variax = (struct usb_line6_variax *) line6;
const unsigned char *buf = variax->line6.buffer_message;
switch (buf[0]) {
@@ -155,13 +199,9 @@ void line6_variax_process_message(struct usb_line6_variax *variax)
/*
Variax destructor.
*/
-static void variax_destruct(struct usb_interface *interface)
+static void line6_variax_disconnect(struct usb_line6 *line6)
{
- struct usb_line6_variax *variax = usb_get_intfdata(interface);
-
- if (variax == NULL)
- return;
- line6_cleanup_audio(&variax->line6);
+ struct usb_line6_variax *variax = (struct usb_line6_variax *)line6;
del_timer(&variax->startup_timer1);
del_timer(&variax->startup_timer2);
@@ -173,31 +213,25 @@ static void variax_destruct(struct usb_interface *interface)
/*
Try to init workbench device.
*/
-static int variax_try_init(struct usb_interface *interface,
- struct usb_line6_variax *variax)
+static int variax_init(struct usb_line6 *line6,
+ const struct usb_device_id *id)
{
+ struct usb_line6_variax *variax = (struct usb_line6_variax *) line6;
int err;
+ line6->process_message = line6_variax_process_message;
+ line6->disconnect = line6_variax_disconnect;
+
init_timer(&variax->startup_timer1);
init_timer(&variax->startup_timer2);
INIT_WORK(&variax->startup_work, variax_startup6);
- if ((interface == NULL) || (variax == NULL))
- return -ENODEV;
-
/* initialize USB buffers: */
variax->buffer_activate = kmemdup(variax_activate,
sizeof(variax_activate), GFP_KERNEL);
- if (variax->buffer_activate == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (variax->buffer_activate == NULL)
return -ENOMEM;
- }
-
- /* initialize audio system: */
- err = line6_init_audio(&variax->line6);
- if (err < 0)
- return err;
/* initialize MIDI subsystem: */
err = line6_init_midi(&variax->line6);
@@ -209,27 +243,64 @@ static int variax_try_init(struct usb_interface *interface,
return 0;
}
-/*
- Init workbench device (and clean up in case of failure).
-*/
-int line6_variax_init(struct usb_interface *interface,
- struct usb_line6_variax *variax)
-{
- int err = variax_try_init(interface, variax);
+#define LINE6_DEVICE(prod) USB_DEVICE(0x0e41, prod)
+#define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n)
- if (err < 0)
- variax_destruct(interface);
+/* table of devices that work with this driver */
+static const struct usb_device_id variax_id_table[] = {
+ { LINE6_IF_NUM(0x4650, 1), .driver_info = LINE6_PODXTLIVE_VARIAX },
+ { LINE6_DEVICE(0x534d), .driver_info = LINE6_VARIAX },
+ {}
+};
- return err;
-}
+MODULE_DEVICE_TABLE(usb, variax_id_table);
+
+static const struct line6_properties variax_properties_table[] = {
+ [LINE6_PODXTLIVE_VARIAX] = {
+ .id = "PODxtLive",
+ .name = "PODxt Live",
+ .capabilities = LINE6_CAP_CONTROL,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x86,
+ .ep_ctrl_w = 0x05,
+ .ep_audio_r = 0x82,
+ .ep_audio_w = 0x01,
+ },
+ [LINE6_VARIAX] = {
+ .id = "Variax",
+ .name = "Variax Workbench",
+ .capabilities = LINE6_CAP_CONTROL,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x82,
+ .ep_ctrl_w = 0x01,
+ /* no audio channel */
+ }
+};
/*
- Workbench device disconnected.
+ Probe USB device.
*/
-void line6_variax_disconnect(struct usb_interface *interface)
+static int variax_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
- if (interface == NULL)
- return;
-
- variax_destruct(interface);
+ return line6_probe(interface, id, "Line6-Variax",
+ &variax_properties_table[id->driver_info],
+ variax_init, sizeof(struct usb_line6_variax));
}
+
+static struct usb_driver variax_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = variax_probe,
+ .disconnect = line6_disconnect,
+#ifdef CONFIG_PM
+ .suspend = line6_suspend,
+ .resume = line6_resume,
+ .reset_resume = line6_resume,
+#endif
+ .id_table = variax_id_table,
+};
+
+module_usb_driver(variax_driver);
+
+MODULE_DESCRIPTION("Vairax Workbench USB driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 5bfb695547f8..417ebb11cf48 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -2292,14 +2292,13 @@ int snd_usbmidi_create(struct snd_card *card,
umidi->iface = iface;
umidi->quirk = quirk;
umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
- init_timer(&umidi->error_timer);
spin_lock_init(&umidi->disc_lock);
init_rwsem(&umidi->disc_rwsem);
mutex_init(&umidi->mutex);
umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
le16_to_cpu(umidi->dev->descriptor.idProduct));
- umidi->error_timer.function = snd_usbmidi_error_timer;
- umidi->error_timer.data = (unsigned long)umidi;
+ setup_timer(&umidi->error_timer, snd_usbmidi_error_timer,
+ (unsigned long)umidi);
/* detect the endpoint(s) to use */
memset(endpoints, 0, sizeof(endpoints));
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 41650d5b93b7..3e2ef61c627b 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -913,6 +913,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
+ case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 0d8aba5fe1a8..b4ef410e5a98 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -1464,6 +1464,14 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
subs->last_frame_number = usb_get_current_frame_number(subs->dev);
subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
+ if (subs->trigger_tstamp_pending_update) {
+ /* this is the first actual URB submitted,
+ * update trigger timestamp to reflect actual start time
+ */
+ snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
+ subs->trigger_tstamp_pending_update = false;
+ }
+
spin_unlock_irqrestore(&subs->lock, flags);
urb->transfer_buffer_length = bytes;
if (period_elapsed)
@@ -1550,6 +1558,7 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
+ subs->trigger_tstamp_pending_update = true;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
subs->data_endpoint->prepare_data_urb = prepare_playback_urb;
subs->data_endpoint->retire_data_urb = retire_playback_urb;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 0a598af9b38b..67d476548dcf 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2486,6 +2486,28 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+{
+ /* Akai MPC Element */
+ USB_DEVICE(0x09e8, 0x0021),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = & (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_MIDI_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
/* TerraTec devices */
{
USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
diff --git a/sound/usb/usx2y/usb_stream.h b/sound/usb/usx2y/usb_stream.h
index 4dd74ab1e9cc..90369001eab6 100644
--- a/sound/usb/usx2y/usb_stream.h
+++ b/sound/usb/usx2y/usb_stream.h
@@ -1,76 +1,7 @@
-/*
- * Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+#ifndef __USB_STREAM_H
+#define __USB_STREAM_H
-#define USB_STREAM_INTERFACE_VERSION 2
-
-#define SNDRV_USB_STREAM_IOCTL_SET_PARAMS \
- _IOW('H', 0x90, struct usb_stream_config)
-
-struct usb_stream_packet {
- unsigned offset;
- unsigned length;
-};
-
-
-struct usb_stream_config {
- unsigned version;
- unsigned sample_rate;
- unsigned period_frames;
- unsigned frame_size;
-};
-
-struct usb_stream {
- struct usb_stream_config cfg;
- unsigned read_size;
- unsigned write_size;
-
- int period_size;
-
- unsigned state;
-
- int idle_insize;
- int idle_outsize;
- int sync_packet;
- unsigned insize_done;
- unsigned periods_done;
- unsigned periods_polled;
-
- struct usb_stream_packet outpacket[2];
- unsigned inpackets;
- unsigned inpacket_head;
- unsigned inpacket_split;
- unsigned inpacket_split_at;
- unsigned next_inpacket_split;
- unsigned next_inpacket_split_at;
- struct usb_stream_packet inpacket[0];
-};
-
-enum usb_stream_state {
- usb_stream_invalid,
- usb_stream_stopped,
- usb_stream_sync0,
- usb_stream_sync1,
- usb_stream_ready,
- usb_stream_running,
- usb_stream_xrun,
-};
-
-#if __KERNEL__
+#include <uapi/sound/usb_stream.h>
#define USB_STREAM_NURBS 4
#define USB_STREAM_URBDEPTH 4
@@ -108,5 +39,4 @@ void usb_stream_free(struct usb_stream_kernel *);
int usb_stream_start(struct usb_stream_kernel *);
void usb_stream_stop(struct usb_stream_kernel *);
-
-#endif
+#endif /* __USB_STREAM_H */
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index bd22f786a60c..99ffe61051a7 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -5,9 +5,9 @@ PTHREAD_LIBS = -lpthread
WARNINGS = -Wall -Wextra
CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS)
-all: hv_kvp_daemon hv_vss_daemon
+all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
%: %.c
$(CC) $(CFLAGS) -o $@ $^
clean:
- $(RM) hv_kvp_daemon hv_vss_daemon
+ $(RM) hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index f437d739f37d..9445d8f264a4 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -43,15 +43,9 @@ static int hv_start_fcopy(struct hv_start_fcopy *smsg)
int error = HV_E_FAIL;
char *q, *p;
- /*
- * If possile append a path seperator to the path.
- */
- if (strlen((char *)smsg->path_name) < (W_MAX_PATH - 2))
- strcat((char *)smsg->path_name, "/");
-
p = (char *)smsg->path_name;
snprintf(target_fname, sizeof(target_fname), "%s/%s",
- (char *)smsg->path_name, smsg->file_name);
+ (char *)smsg->path_name, (char *)smsg->file_name);
syslog(LOG_INFO, "Target file name: %s", target_fname);
/*
@@ -137,7 +131,7 @@ void print_usage(char *argv[])
int main(int argc, char *argv[])
{
- int fd, fcopy_fd, len;
+ int fcopy_fd, len;
int error;
int daemonize = 1, long_index = 0, opt;
int version = FCOPY_CURRENT_VERSION;
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 6a6432a20a1d..408bb076a234 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -147,7 +147,6 @@ static void kvp_release_lock(int pool)
static void kvp_update_file(int pool)
{
FILE *filep;
- size_t bytes_written;
/*
* We are going to write our in-memory registry out to
@@ -163,8 +162,7 @@ static void kvp_update_file(int pool)
exit(EXIT_FAILURE);
}
- bytes_written = fwrite(kvp_file_info[pool].records,
- sizeof(struct kvp_record),
+ fwrite(kvp_file_info[pool].records, sizeof(struct kvp_record),
kvp_file_info[pool].num_records, filep);
if (ferror(filep) || fclose(filep)) {
@@ -310,7 +308,7 @@ static int kvp_file_init(void)
return 0;
}
-static int kvp_key_delete(int pool, const char *key, int key_size)
+static int kvp_key_delete(int pool, const __u8 *key, int key_size)
{
int i;
int j, k;
@@ -353,8 +351,8 @@ static int kvp_key_delete(int pool, const char *key, int key_size)
return 1;
}
-static int kvp_key_add_or_modify(int pool, const char *key, int key_size, const char *value,
- int value_size)
+static int kvp_key_add_or_modify(int pool, const __u8 *key, int key_size,
+ const __u8 *value, int value_size)
{
int i;
int num_records;
@@ -407,7 +405,7 @@ static int kvp_key_add_or_modify(int pool, const char *key, int key_size, const
return 0;
}
-static int kvp_get_value(int pool, const char *key, int key_size, char *value,
+static int kvp_get_value(int pool, const __u8 *key, int key_size, __u8 *value,
int value_size)
{
int i;
@@ -439,8 +437,8 @@ static int kvp_get_value(int pool, const char *key, int key_size, char *value,
return 1;
}
-static int kvp_pool_enumerate(int pool, int index, char *key, int key_size,
- char *value, int value_size)
+static int kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size,
+ __u8 *value, int value_size)
{
struct kvp_record *record;
@@ -661,7 +659,7 @@ static char *kvp_if_name_to_mac(char *if_name)
char *p, *x;
char buf[256];
char addr_file[256];
- int i;
+ unsigned int i;
char *mac_addr = NULL;
snprintf(addr_file, sizeof(addr_file), "%s%s%s", "/sys/class/net/",
@@ -700,7 +698,7 @@ static char *kvp_mac_to_if_name(char *mac)
char buf[256];
char *kvp_net_dir = "/sys/class/net/";
char dev_id[256];
- int i;
+ unsigned int i;
dir = opendir(kvp_net_dir);
if (dir == NULL)
@@ -750,7 +748,7 @@ static char *kvp_mac_to_if_name(char *mac)
static void kvp_process_ipconfig_file(char *cmd,
- char *config_buf, int len,
+ char *config_buf, unsigned int len,
int element_size, int offset)
{
char buf[256];
@@ -768,7 +766,7 @@ static void kvp_process_ipconfig_file(char *cmd,
if (offset == 0)
memset(config_buf, 0, len);
while ((p = fgets(buf, sizeof(buf), file)) != NULL) {
- if ((len - strlen(config_buf)) < (element_size + 1))
+ if (len < strlen(config_buf) + element_size + 1)
break;
x = strchr(p, '\n');
@@ -916,7 +914,7 @@ static int kvp_process_ip_address(void *addrp,
static int
kvp_get_ip_info(int family, char *if_name, int op,
- void *out_buffer, int length)
+ void *out_buffer, unsigned int length)
{
struct ifaddrs *ifap;
struct ifaddrs *curp;
@@ -1019,8 +1017,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
weight += hweight32(&w[i]);
sprintf(cidr_mask, "/%d", weight);
- if ((length - sn_offset) <
- (strlen(cidr_mask) + 1))
+ if (length < sn_offset + strlen(cidr_mask) + 1)
goto gather_ipaddr;
if (sn_offset == 0)
@@ -1308,16 +1305,17 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
if (error)
goto setval_error;
+ /*
+ * The dhcp_enabled flag is only for IPv4. In the case the host only
+ * injects an IPv6 address, the flag is true, but we still need to
+ * proceed to parse and pass the IPv6 information to the
+ * disto-specific script hv_set_ifconfig.
+ */
if (new_val->dhcp_enabled) {
error = kvp_write_file(file, "BOOTPROTO", "", "dhcp");
if (error)
goto setval_error;
- /*
- * We are done!.
- */
- goto setval_done;
-
} else {
error = kvp_write_file(file, "BOOTPROTO", "", "none");
if (error)
@@ -1345,7 +1343,6 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
if (error)
goto setval_error;
-setval_done:
fclose(file);
/*
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 6eedba1f7732..653d1bad77de 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -22,6 +22,8 @@
#error only <linux/bitops.h> can be included directly
#endif
+#include <asm-generic/bitops/hweight.h>
+
#include <asm-generic/bitops/atomic.h>
#endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
diff --git a/tools/include/asm-generic/bitops/arch_hweight.h b/tools/include/asm-generic/bitops/arch_hweight.h
new file mode 100644
index 000000000000..318bb2b202b0
--- /dev/null
+++ b/tools/include/asm-generic/bitops/arch_hweight.h
@@ -0,0 +1 @@
+#include "../../../../include/asm-generic/bitops/arch_hweight.h"
diff --git a/tools/include/asm-generic/bitops/const_hweight.h b/tools/include/asm-generic/bitops/const_hweight.h
new file mode 100644
index 000000000000..0afd644aff83
--- /dev/null
+++ b/tools/include/asm-generic/bitops/const_hweight.h
@@ -0,0 +1 @@
+#include "../../../../include/asm-generic/bitops/const_hweight.h"
diff --git a/tools/include/asm-generic/bitops/hweight.h b/tools/include/asm-generic/bitops/hweight.h
new file mode 100644
index 000000000000..290120c01a8e
--- /dev/null
+++ b/tools/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,7 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <asm-generic/bitops/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
+
+#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 26005a15e7e2..5ad9ee1dd7f6 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -1,9 +1,9 @@
#ifndef _TOOLS_LINUX_BITOPS_H_
#define _TOOLS_LINUX_BITOPS_H_
+#include <asm/types.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
-#include <asm/hweight.h>
#ifndef __WORDSIZE
#define __WORDSIZE (__SIZEOF_LONG__ * 8)
@@ -19,6 +19,11 @@
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
+extern unsigned int __sw_hweight8(unsigned int w);
+extern unsigned int __sw_hweight16(unsigned int w);
+extern unsigned int __sw_hweight32(unsigned int w);
+extern unsigned long __sw_hweight64(__u64 w);
+
/*
* Include this here because some architectures need generic_ffs/fls in
* scope
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
index a74fba6d7743..d2b18e887071 100644
--- a/tools/lib/api/fs/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
@@ -1,3 +1,4 @@
+#define _GNU_SOURCE
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
@@ -67,7 +68,7 @@ int debugfs_valid_mountpoint(const char *debugfs)
if (statfs(debugfs, &st_fs) < 0)
return -ENOENT;
- else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
+ else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC)
return -ENOENT;
return 0;
@@ -98,3 +99,45 @@ char *debugfs_mount(const char *mountpoint)
out:
return debugfs_mountpoint;
}
+
+int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename)
+{
+ char sbuf[128];
+
+ switch (err) {
+ case ENOENT:
+ if (debugfs_found) {
+ snprintf(buf, size,
+ "Error:\tFile %s/%s not found.\n"
+ "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
+ debugfs_mountpoint, filename);
+ break;
+ }
+ snprintf(buf, size, "%s",
+ "Error:\tUnable to find debugfs\n"
+ "Hint:\tWas your kernel compiled with debugfs support?\n"
+ "Hint:\tIs the debugfs filesystem mounted?\n"
+ "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
+ break;
+ case EACCES:
+ snprintf(buf, size,
+ "Error:\tNo permissions to read %s/%s\n"
+ "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
+ debugfs_mountpoint, filename, debugfs_mountpoint);
+ break;
+ default:
+ snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+ break;
+ }
+
+ return 0;
+}
+
+int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
+{
+ char path[PATH_MAX];
+
+ snprintf(path, PATH_MAX, "tracing/events/%s/%s", sys, name ?: "*");
+
+ return debugfs__strerror_open(err, buf, size, path);
+}
diff --git a/tools/lib/api/fs/debugfs.h b/tools/lib/api/fs/debugfs.h
index f19d3df9609d..0739881a9897 100644
--- a/tools/lib/api/fs/debugfs.h
+++ b/tools/lib/api/fs/debugfs.h
@@ -26,4 +26,7 @@ char *debugfs_mount(const char *mountpoint);
extern char debugfs_mountpoint[];
+int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename);
+int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
+
#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 65d9be3f9887..128ef6332a6b 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -79,7 +79,7 @@ static int fs__valid_mount(const char *fs, long magic)
if (statfs(fs, &st_fs) < 0)
return -ENOENT;
- else if (st_fs.f_type != magic)
+ else if ((long)st_fs.f_type != magic)
return -ENOENT;
return 0;
diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore
new file mode 100644
index 000000000000..cc0e7a9f99e3
--- /dev/null
+++ b/tools/lib/lockdep/.gitignore
@@ -0,0 +1 @@
+liblockdep.so.*
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index 52f9279c6c13..4b866c54f624 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -104,7 +104,7 @@ N =
export Q VERBOSE
-INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
+INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index cf3a44bf1ec3..afe20ed9fac8 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -32,6 +32,7 @@
#include <stdint.h>
#include <limits.h>
+#include <netinet/ip6.h>
#include "event-parse.h"
#include "event-utils.h"
@@ -4149,6 +4150,324 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
}
+static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf)
+{
+ const char *fmt;
+
+ if (i == 'i')
+ fmt = "%03d.%03d.%03d.%03d";
+ else
+ fmt = "%d.%d.%d.%d";
+
+ trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]);
+}
+
+static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
+{
+ return ((unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
+ (unsigned long)(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
+}
+
+static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
+{
+ return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
+}
+
+static void print_ip6c_addr(struct trace_seq *s, unsigned char *addr)
+{
+ int i, j, range;
+ unsigned char zerolength[8];
+ int longest = 1;
+ int colonpos = -1;
+ uint16_t word;
+ uint8_t hi, lo;
+ bool needcolon = false;
+ bool useIPv4;
+ struct in6_addr in6;
+
+ memcpy(&in6, addr, sizeof(struct in6_addr));
+
+ useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
+
+ memset(zerolength, 0, sizeof(zerolength));
+
+ if (useIPv4)
+ range = 6;
+ else
+ range = 8;
+
+ /* find position of longest 0 run */
+ for (i = 0; i < range; i++) {
+ for (j = i; j < range; j++) {
+ if (in6.s6_addr16[j] != 0)
+ break;
+ zerolength[i]++;
+ }
+ }
+ for (i = 0; i < range; i++) {
+ if (zerolength[i] > longest) {
+ longest = zerolength[i];
+ colonpos = i;
+ }
+ }
+ if (longest == 1) /* don't compress a single 0 */
+ colonpos = -1;
+
+ /* emit address */
+ for (i = 0; i < range; i++) {
+ if (i == colonpos) {
+ if (needcolon || i == 0)
+ trace_seq_printf(s, ":");
+ trace_seq_printf(s, ":");
+ needcolon = false;
+ i += longest - 1;
+ continue;
+ }
+ if (needcolon) {
+ trace_seq_printf(s, ":");
+ needcolon = false;
+ }
+ /* hex u16 without leading 0s */
+ word = ntohs(in6.s6_addr16[i]);
+ hi = word >> 8;
+ lo = word & 0xff;
+ if (hi)
+ trace_seq_printf(s, "%x%02x", hi, lo);
+ else
+ trace_seq_printf(s, "%x", lo);
+
+ needcolon = true;
+ }
+
+ if (useIPv4) {
+ if (needcolon)
+ trace_seq_printf(s, ":");
+ print_ip4_addr(s, 'I', &in6.s6_addr[12]);
+ }
+
+ return;
+}
+
+static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
+{
+ int j;
+
+ for (j = 0; j < 16; j += 2) {
+ trace_seq_printf(s, "%02x%02x", buf[j], buf[j+1]);
+ if (i == 'I' && j < 14)
+ trace_seq_printf(s, ":");
+ }
+}
+
+/*
+ * %pi4 print an IPv4 address with leading zeros
+ * %pI4 print an IPv4 address without leading zeros
+ * %pi6 print an IPv6 address without colons
+ * %pI6 print an IPv6 address with colons
+ * %pI6c print an IPv6 address in compressed form with colons
+ * %pISpc print an IP address based on sockaddr; p adds port.
+ */
+static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
+ void *data, int size, struct event_format *event,
+ struct print_arg *arg)
+{
+ unsigned char *buf;
+
+ if (arg->type == PRINT_FUNC) {
+ process_defined_func(s, data, size, event, arg);
+ return 0;
+ }
+
+ if (arg->type != PRINT_FIELD) {
+ trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
+ return 0;
+ }
+
+ if (!arg->field.field) {
+ arg->field.field =
+ pevent_find_any_field(event, arg->field.name);
+ if (!arg->field.field) {
+ do_warning("%s: field %s not found",
+ __func__, arg->field.name);
+ return 0;
+ }
+ }
+
+ buf = data + arg->field.field->offset;
+
+ if (arg->field.field->size != 4) {
+ trace_seq_printf(s, "INVALIDIPv4");
+ return 0;
+ }
+ print_ip4_addr(s, i, buf);
+
+ return 0;
+}
+
+static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
+ void *data, int size, struct event_format *event,
+ struct print_arg *arg)
+{
+ char have_c = 0;
+ unsigned char *buf;
+ int rc = 0;
+
+ /* pI6c */
+ if (i == 'I' && *ptr == 'c') {
+ have_c = 1;
+ ptr++;
+ rc++;
+ }
+
+ if (arg->type == PRINT_FUNC) {
+ process_defined_func(s, data, size, event, arg);
+ return rc;
+ }
+
+ if (arg->type != PRINT_FIELD) {
+ trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
+ return rc;
+ }
+
+ if (!arg->field.field) {
+ arg->field.field =
+ pevent_find_any_field(event, arg->field.name);
+ if (!arg->field.field) {
+ do_warning("%s: field %s not found",
+ __func__, arg->field.name);
+ return rc;
+ }
+ }
+
+ buf = data + arg->field.field->offset;
+
+ if (arg->field.field->size != 16) {
+ trace_seq_printf(s, "INVALIDIPv6");
+ return rc;
+ }
+
+ if (have_c)
+ print_ip6c_addr(s, buf);
+ else
+ print_ip6_addr(s, i, buf);
+
+ return rc;
+}
+
+static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
+ void *data, int size, struct event_format *event,
+ struct print_arg *arg)
+{
+ char have_c = 0, have_p = 0;
+ unsigned char *buf;
+ struct sockaddr_storage *sa;
+ int rc = 0;
+
+ /* pISpc */
+ if (i == 'I') {
+ if (*ptr == 'p') {
+ have_p = 1;
+ ptr++;
+ rc++;
+ }
+ if (*ptr == 'c') {
+ have_c = 1;
+ ptr++;
+ rc++;
+ }
+ }
+
+ if (arg->type == PRINT_FUNC) {
+ process_defined_func(s, data, size, event, arg);
+ return rc;
+ }
+
+ if (arg->type != PRINT_FIELD) {
+ trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
+ return rc;
+ }
+
+ if (!arg->field.field) {
+ arg->field.field =
+ pevent_find_any_field(event, arg->field.name);
+ if (!arg->field.field) {
+ do_warning("%s: field %s not found",
+ __func__, arg->field.name);
+ return rc;
+ }
+ }
+
+ sa = (struct sockaddr_storage *) (data + arg->field.field->offset);
+
+ if (sa->ss_family == AF_INET) {
+ struct sockaddr_in *sa4 = (struct sockaddr_in *) sa;
+
+ if (arg->field.field->size < sizeof(struct sockaddr_in)) {
+ trace_seq_printf(s, "INVALIDIPv4");
+ return rc;
+ }
+
+ print_ip4_addr(s, i, (unsigned char *) &sa4->sin_addr);
+ if (have_p)
+ trace_seq_printf(s, ":%d", ntohs(sa4->sin_port));
+
+
+ } else if (sa->ss_family == AF_INET6) {
+ struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa;
+
+ if (arg->field.field->size < sizeof(struct sockaddr_in6)) {
+ trace_seq_printf(s, "INVALIDIPv6");
+ return rc;
+ }
+
+ if (have_p)
+ trace_seq_printf(s, "[");
+
+ buf = (unsigned char *) &sa6->sin6_addr;
+ if (have_c)
+ print_ip6c_addr(s, buf);
+ else
+ print_ip6_addr(s, i, buf);
+
+ if (have_p)
+ trace_seq_printf(s, "]:%d", ntohs(sa6->sin6_port));
+ }
+
+ return rc;
+}
+
+static int print_ip_arg(struct trace_seq *s, const char *ptr,
+ void *data, int size, struct event_format *event,
+ struct print_arg *arg)
+{
+ char i = *ptr; /* 'i' or 'I' */
+ char ver;
+ int rc = 0;
+
+ ptr++;
+ rc++;
+
+ ver = *ptr;
+ ptr++;
+ rc++;
+
+ switch (ver) {
+ case '4':
+ rc += print_ipv4_arg(s, ptr, i, data, size, event, arg);
+ break;
+ case '6':
+ rc += print_ipv6_arg(s, ptr, i, data, size, event, arg);
+ break;
+ case 'S':
+ rc += print_ipsa_arg(s, ptr, i, data, size, event, arg);
+ break;
+ default:
+ return 0;
+ }
+
+ return rc;
+}
+
static int is_printable_array(char *p, unsigned int len)
{
unsigned int i;
@@ -4337,6 +4656,15 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
ptr++;
arg = arg->next;
break;
+ } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
+ int n;
+
+ n = print_ip_arg(s, ptr+1, data, size, event, arg);
+ if (n > 0) {
+ ptr += n;
+ arg = arg->next;
+ break;
+ }
}
/* fall through */
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index fd77d81ea748..0294c57b1f5e 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -38,7 +38,7 @@ OPTIONS
--remove=::
Remove specified file from the cache.
-M::
---missing=::
+--missing=::
List missing build ids in the cache for the specified file.
-u::
--update::
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index cbb4f743d921..3e2aec94f806 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -89,6 +89,19 @@ raw encoding of 0x1A8 can be used:
You should refer to the processor specific documentation for getting these
details. Some of them are referenced in the SEE ALSO section below.
+PARAMETERIZED EVENTS
+--------------------
+
+Some pmu events listed by 'perf-list' will be displayed with '?' in them. For
+example:
+
+ hv_gpci/dtbp_ptitc,phys_processor_idx=?/
+
+This means that when provided as an event, a value for '?' must
+also be supplied. For example:
+
+ perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
+
OPTIONS
-------
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 1d78a4064da4..43310d8661fe 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -12,11 +12,12 @@ SYNOPSIS
DESCRIPTION
-----------
-"perf mem -t <TYPE> record" runs a command and gathers memory operation data
+"perf mem record" runs a command and gathers memory operation data
from it, into perf.data. Perf record options are accepted and are passed through.
-"perf mem -t <TYPE> report" displays the result. It invokes perf report with the
-right set of options to display a memory access profile.
+"perf mem report" displays the result. It invokes perf report with the
+right set of options to display a memory access profile. By default, loads
+and stores are sampled. Use the -t option to limit to loads or stores.
Note that on Intel systems the memory latency reported is the use-latency,
not the pure load (or store latency). Use latency includes any pipeline
@@ -29,7 +30,7 @@ OPTIONS
-t::
--type=::
- Select the memory operation type: load or store (default: load)
+ Select the memory operation type: load or store (default: load,store)
-D::
--dump-raw-samples=::
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index af9a54ece024..31e977459c51 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -33,12 +33,27 @@ OPTIONS
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
- - a hardware breakpoint event in the form of '\mem:addr[:access]'
+ - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
+ 'param1', 'param2', etc are defined as formats for the PMU in
+ /sys/bus/event_sources/devices/<pmu>/format/*.
+
+ - a symbolically formed event like 'pmu/config=M,config1=N,config3=K/'
+
+ where M, N, K are numbers (in decimal, hex, octal format). Acceptable
+ values for each of 'config', 'config1' and 'config2' are defined by
+ corresponding entries in /sys/bus/event_sources/devices/<pmu>/format/*
+ param1 and param2 are defined as formats for the PMU in:
+ /sys/bus/event_sources/devices/<pmu>/format/*
+
+ - a hardware breakpoint event in the form of '\mem:addr[/len][:access]'
where addr is the address in memory you want to break in.
Access is the memory access type (read, write, execute) it can
- be passed as follows: '\mem:addr[:[r][w][x]]'.
+ be passed as follows: '\mem:addr[:[r][w][x]]'. len is the range,
+ number of bytes from specified addr, which the breakpoint will cover.
If you want to profile read-write accesses in 0x1000, just set
'mem:0x1000:rw'.
+ If you want to profile write accesses in [0x1000~1008), just set
+ 'mem:0x1000/8:w'.
--filter=<filter>::
Event filter.
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 21494806c0ab..a21eec05bc42 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -125,46 +125,46 @@ OPTIONS
is equivalent to:
perf script -f trace:<fields> -f sw:<fields> -f hw:<fields>
-
+
i.e., the specified fields apply to all event types if the type string
is not given.
-
+
The arguments are processed in the order received. A later usage can
reset a prior request. e.g.:
-
+
-f trace: -f comm,tid,time,ip,sym
-
+
The first -f suppresses trace events (field list is ""), but then the
second invocation sets the fields to comm,tid,time,ip,sym. In this case a
warning is given to the user:
-
+
"Overriding previous field request for all events."
-
+
Alternatively, consider the order:
-
+
-f comm,tid,time,ip,sym -f trace:
-
+
The first -f sets the fields for all events and the second -f
suppresses trace events. The user is given a warning message about
the override, and the result of the above is that only S/W and H/W
events are displayed with the given fields.
-
+
For the 'wildcard' option if a user selected field is invalid for an
event type, a message is displayed to the user that the option is
ignored for that type. For example:
-
+
$ perf script -f comm,tid,trace
'trace' not valid for hardware events. Ignoring.
'trace' not valid for software events. Ignoring.
-
+
Alternatively, if the type is given an invalid field is specified it
is an error. For example:
-
+
perf script -v -f sw:comm,tid,trace
'trace' not valid for software events.
-
+
At this point usage is displayed, and perf-script exits.
-
+
Finally, a user may not set fields to none for all event types.
i.e., -f "" is not allowed.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 29ee857c09c6..04e150d83e7d 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -25,10 +25,22 @@ OPTIONS
-e::
--event=::
- Select the PMU event. Selection can be a symbolic event name
- (use 'perf list' to list all events) or a raw PMU
- event (eventsel+umask) in the form of rNNN where NNN is a
- hexadecimal event descriptor.
+ Select the PMU event. Selection can be:
+
+ - a symbolic event name (use 'perf list' to list all events)
+
+ - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
+ hexadecimal event descriptor.
+
+ - a symbolically formed event like 'pmu/param1=0x3,param2/' where
+ param1 and param2 are defined as formats for the PMU in
+ /sys/bus/event_sources/devices/<pmu>/format/*
+
+ - a symbolically formed event like 'pmu/config=M,config1=N,config2=K/'
+ where M, N, K are numbers (in decimal, hex, octal format).
+ Acceptable values for each of 'config', 'config1' and 'config2'
+ parameters are defined by corresponding entries in
+ /sys/bus/event_sources/devices/<pmu>/format/*
-i::
--no-inherit::
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 83e2887f91a3..fbbfdc39271d 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -6,12 +6,15 @@ tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c
tools/include/asm/bug.h
+tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h
+tools/include/asm-generic/bitops/const_hweight.h
tools/include/asm-generic/bitops/__ffs.h
tools/include/asm-generic/bitops/__fls.h
tools/include/asm-generic/bitops/find.h
tools/include/asm-generic/bitops/fls64.h
tools/include/asm-generic/bitops/fls.h
+tools/include/asm-generic/bitops/hweight.h
tools/include/asm-generic/bitops.h
tools/include/linux/bitops.h
tools/include/linux/compiler.h
@@ -19,6 +22,8 @@ tools/include/linux/export.h
tools/include/linux/hash.h
tools/include/linux/log2.h
tools/include/linux/types.h
+include/asm-generic/bitops/arch_hweight.h
+include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/fls64.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
@@ -29,6 +34,7 @@ include/linux/list.h
include/linux/hash.h
include/linux/stringify.h
lib/find_next_bit.c
+lib/hweight.c
lib/rbtree.c
include/linux/swab.h
arch/*/include/asm/unistd*.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 67a03a825b3c..aa6a50447c32 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -232,12 +232,15 @@ LIB_H += ../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += ../include/linux/bitops.h
+LIB_H += ../include/asm-generic/bitops/arch_hweight.h
LIB_H += ../include/asm-generic/bitops/atomic.h
+LIB_H += ../include/asm-generic/bitops/const_hweight.h
LIB_H += ../include/asm-generic/bitops/find.h
LIB_H += ../include/asm-generic/bitops/fls64.h
LIB_H += ../include/asm-generic/bitops/fls.h
LIB_H += ../include/asm-generic/bitops/__ffs.h
LIB_H += ../include/asm-generic/bitops/__fls.h
+LIB_H += ../include/asm-generic/bitops/hweight.h
LIB_H += ../include/asm-generic/bitops.h
LIB_H += ../include/linux/compiler.h
LIB_H += ../include/linux/log2.h
@@ -255,7 +258,6 @@ LIB_H += util/include/linux/linkage.h
LIB_H += util/include/asm/asm-offsets.h
LIB_H += ../include/asm/bug.h
LIB_H += util/include/asm/byteorder.h
-LIB_H += util/include/asm/hweight.h
LIB_H += util/include/asm/swab.h
LIB_H += util/include/asm/system.h
LIB_H += util/include/asm/uaccess.h
@@ -462,10 +464,12 @@ BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
# Benchmark modules
BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
-ifeq ($(RAW_ARCH),x86_64)
+ifeq ($(ARCH), x86)
+ifeq ($(IS_64_BIT), 1)
BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
endif
+endif
BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
@@ -743,6 +747,9 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+$(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 3bb50eac5542..0c370f81e002 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -103,7 +103,7 @@ static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc)
return NULL;
}
- result = dwarf_cfi_addrframe(cfi, pc, &frame);
+ result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
if (result) {
pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
return NULL;
@@ -128,7 +128,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
return NULL;
}
- result = dwarf_cfi_addrframe(cfi, pc, &frame);
+ result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
if (result) {
pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
return NULL;
@@ -145,7 +145,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
* yet used)
* -1 in case of errors
*/
-static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
+static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc)
{
int rc = -1;
Dwfl *dwfl;
@@ -155,6 +155,7 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
Dwarf_Addr start = pc;
Dwarf_Addr end = pc;
bool signalp;
+ const char *exec_file = dso->long_name;
dwfl = dso->dwfl;
@@ -165,8 +166,10 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
return -1;
}
- if (dwfl_report_offline(dwfl, "", dso->long_name, -1) == NULL) {
- pr_debug("dwfl_report_offline() failed %s\n",
+ mod = dwfl_report_elf(dwfl, exec_file, exec_file, -1,
+ map_start, false);
+ if (!mod) {
+ pr_debug("dwfl_report_elf() failed %s\n",
dwarf_errmsg(-1));
/*
* We normally cache the DWARF debug info and never
@@ -256,10 +259,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
return skip_slot;
}
- rc = check_return_addr(dso, ip);
+ rc = check_return_addr(dso, al.map->start, ip);
- pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n",
- dso->long_name, chain->nr, ip, rc);
+ pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n",
+ dso->long_name, al.sym->name, ip, rc);
if (rc == 0) {
/*
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index 71f2844cf97f..7ed22ff1e1ac 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -68,4 +68,17 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
val, opflags);
}
+#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
+#include <pthread.h>
+static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr,
+ size_t cpusetsize,
+ cpu_set_t *cpuset)
+{
+ attr = attr;
+ cpusetsize = cpusetsize;
+ cpuset = cpuset;
+ return 0;
+}
+#endif
+
#endif /* _FUTEX_H */
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 07a8d7646a15..005cc283790c 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -19,12 +19,12 @@
#include <stdlib.h>
#include <signal.h>
#include <sys/wait.h>
-#include <linux/unistd.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/types.h>
+#include <sys/syscall.h>
#include <pthread.h>
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 77d5cae54c6a..50e6b66aea1f 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -236,10 +236,10 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
if (errno == ENOENT)
return false;
- pr_warning("Problems with %s file, consider removing it from the cache\n",
+ pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
} else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
- pr_warning("Problems with %s file, consider removing it from the cache\n",
+ pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1fd96c13f199..74aada554b12 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -390,6 +390,15 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
}
}
+static struct data__file *fmt_to_data_file(struct perf_hpp_fmt *fmt)
+{
+ struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt);
+ void *ptr = dfmt - dfmt->idx;
+ struct data__file *d = container_of(ptr, struct data__file, fmt);
+
+ return d;
+}
+
static struct hist_entry*
get_pair_data(struct hist_entry *he, struct data__file *d)
{
@@ -407,8 +416,7 @@ get_pair_data(struct hist_entry *he, struct data__file *d)
static struct hist_entry*
get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
{
- void *ptr = dfmt - dfmt->idx;
- struct data__file *d = container_of(ptr, struct data__file, fmt);
+ struct data__file *d = fmt_to_data_file(&dfmt->fmt);
return get_pair_data(he, d);
}
@@ -430,7 +438,7 @@ static void hists__baseline_only(struct hists *hists)
next = rb_next(&he->rb_node_in);
if (!hist_entry__next_pair(he)) {
rb_erase(&he->rb_node_in, root);
- hist_entry__free(he);
+ hist_entry__delete(he);
}
}
}
@@ -448,26 +456,30 @@ static void hists__precompute(struct hists *hists)
next = rb_first(root);
while (next != NULL) {
struct hist_entry *he, *pair;
+ struct data__file *d;
+ int i;
he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
- pair = get_pair_data(he, &data__files[sort_compute]);
- if (!pair)
- continue;
+ data__for_each_file_new(i, d) {
+ pair = get_pair_data(he, d);
+ if (!pair)
+ continue;
- switch (compute) {
- case COMPUTE_DELTA:
- compute_delta(he, pair);
- break;
- case COMPUTE_RATIO:
- compute_ratio(he, pair);
- break;
- case COMPUTE_WEIGHTED_DIFF:
- compute_wdiff(he, pair);
- break;
- default:
- BUG_ON(1);
+ switch (compute) {
+ case COMPUTE_DELTA:
+ compute_delta(he, pair);
+ break;
+ case COMPUTE_RATIO:
+ compute_ratio(he, pair);
+ break;
+ case COMPUTE_WEIGHTED_DIFF:
+ compute_wdiff(he, pair);
+ break;
+ default:
+ BUG_ON(1);
+ }
}
}
}
@@ -517,7 +529,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
static int64_t
hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
- int c)
+ int c, int sort_idx)
{
bool pairs_left = hist_entry__has_pairs(left);
bool pairs_right = hist_entry__has_pairs(right);
@@ -529,8 +541,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
if (!pairs_left || !pairs_right)
return pairs_left ? -1 : 1;
- p_left = get_pair_data(left, &data__files[sort_compute]);
- p_right = get_pair_data(right, &data__files[sort_compute]);
+ p_left = get_pair_data(left, &data__files[sort_idx]);
+ p_right = get_pair_data(right, &data__files[sort_idx]);
if (!p_left && !p_right)
return 0;
@@ -546,90 +558,102 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
}
static int64_t
-hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
+ int c, int sort_idx)
+{
+ struct hist_entry *p_right, *p_left;
+
+ p_left = get_pair_data(left, &data__files[sort_idx]);
+ p_right = get_pair_data(right, &data__files[sort_idx]);
+
+ if (!p_left && !p_right)
+ return 0;
+
+ if (!p_left || !p_right)
+ return p_left ? -1 : 1;
+
+ if (c != COMPUTE_DELTA) {
+ /*
+ * The delta can be computed without the baseline, but
+ * others are not. Put those entries which have no
+ * values below.
+ */
+ if (left->dummy && right->dummy)
+ return 0;
+
+ if (left->dummy || right->dummy)
+ return left->dummy ? 1 : -1;
+ }
+
+ return __hist_entry__cmp_compute(p_left, p_right, c);
+}
+
+static int64_t
+hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left __maybe_unused,
struct hist_entry *right __maybe_unused)
{
return 0;
}
static int64_t
-hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
{
- if (sort_compute)
- return 0;
-
if (left->stat.period == right->stat.period)
return 0;
return left->stat.period > right->stat.period ? 1 : -1;
}
static int64_t
-hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp_delta(struct perf_hpp_fmt *fmt,
+ struct hist_entry *left, struct hist_entry *right)
{
- return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+ struct data__file *d = fmt_to_data_file(fmt);
+
+ return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx);
}
static int64_t
-hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt,
+ struct hist_entry *left, struct hist_entry *right)
{
- return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+ struct data__file *d = fmt_to_data_file(fmt);
+
+ return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, d->idx);
}
static int64_t
-hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt,
+ struct hist_entry *left, struct hist_entry *right)
{
- return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+ struct data__file *d = fmt_to_data_file(fmt);
+
+ return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx);
}
-static void insert_hist_entry_by_compute(struct rb_root *root,
- struct hist_entry *he,
- int c)
+static int64_t
+hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct hist_entry *iter;
-
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct hist_entry, rb_node);
- if (hist_entry__cmp_compute(he, iter, c) < 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA,
+ sort_compute);
}
-static void hists__compute_resort(struct hists *hists)
+static int64_t
+hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
{
- struct rb_root *root;
- struct rb_node *next;
-
- if (sort__need_collapse)
- root = &hists->entries_collapsed;
- else
- root = hists->entries_in;
-
- hists->entries = RB_ROOT;
- next = rb_first(root);
-
- hists__reset_stats(hists);
- hists__reset_col_len(hists);
-
- while (next != NULL) {
- struct hist_entry *he;
-
- he = rb_entry(next, struct hist_entry, rb_node_in);
- next = rb_next(&he->rb_node_in);
-
- insert_hist_entry_by_compute(&hists->entries, he, compute);
- hists__inc_stats(hists, he);
+ return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO,
+ sort_compute);
+}
- if (!he->filtered)
- hists__calc_col_len(hists, he);
- }
+static int64_t
+hist_entry__cmp_wdiff_idx(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF,
+ sort_compute);
}
static void hists__process(struct hists *hists)
@@ -637,12 +661,8 @@ static void hists__process(struct hists *hists)
if (show_baseline_only)
hists__baseline_only(hists);
- if (sort_compute) {
- hists__precompute(hists);
- hists__compute_resort(hists);
- } else {
- hists__output_resort(hists, NULL);
- }
+ hists__precompute(hists);
+ hists__output_resort(hists, NULL);
hists__fprintf(hists, true, 0, 0, 0, stdout);
}
@@ -841,7 +861,7 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
char pfmt[20] = " ";
if (!pair)
- goto dummy_print;
+ goto no_print;
switch (comparison_method) {
case COMPUTE_DELTA:
@@ -850,8 +870,6 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
else
diff = compute_delta(he, pair);
- if (fabs(diff) < 0.01)
- goto dummy_print;
scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
return percent_color_snprintf(hpp->buf, hpp->size,
pfmt, diff);
@@ -883,6 +901,9 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
}
dummy_print:
return scnprintf(hpp->buf, hpp->size, "%*s",
+ dfmt->header_width, "N/A");
+no_print:
+ return scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, pfmt);
}
@@ -932,14 +953,15 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
else
diff = compute_delta(he, pair);
- if (fabs(diff) >= 0.01)
- scnprintf(buf, size, "%+4.2F%%", diff);
+ scnprintf(buf, size, "%+4.2F%%", diff);
break;
case PERF_HPP_DIFF__RATIO:
/* No point for ratio number if we are dummy.. */
- if (he->dummy)
+ if (he->dummy) {
+ scnprintf(buf, size, "N/A");
break;
+ }
if (pair->diff.computed)
ratio = pair->diff.period_ratio;
@@ -952,8 +974,10 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
case PERF_HPP_DIFF__WEIGHTED_DIFF:
/* No point for wdiff number if we are dummy.. */
- if (he->dummy)
+ if (he->dummy) {
+ scnprintf(buf, size, "N/A");
break;
+ }
if (pair->diff.computed)
wdiff = pair->diff.wdiff;
@@ -1105,9 +1129,10 @@ static void data__hpp_register(struct data__file *d, int idx)
perf_hpp__register_sort_field(fmt);
}
-static void ui_init(void)
+static int ui_init(void)
{
struct data__file *d;
+ struct perf_hpp_fmt *fmt;
int i;
data__for_each_file(i, d) {
@@ -1137,6 +1162,46 @@ static void ui_init(void)
data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
PERF_HPP_DIFF__PERIOD_BASELINE);
}
+
+ if (!sort_compute)
+ return 0;
+
+ /*
+ * Prepend an fmt to sort on columns at 'sort_compute' first.
+ * This fmt is added only to the sort list but not to the
+ * output fields list.
+ *
+ * Note that this column (data) can be compared twice - one
+ * for this 'sort_compute' fmt and another for the normal
+ * diff_hpp_fmt. But it shouldn't a problem as most entries
+ * will be sorted out by first try or baseline and comparing
+ * is not a costly operation.
+ */
+ fmt = zalloc(sizeof(*fmt));
+ if (fmt == NULL) {
+ pr_err("Memory allocation failed\n");
+ return -1;
+ }
+
+ fmt->cmp = hist_entry__cmp_nop;
+ fmt->collapse = hist_entry__cmp_nop;
+
+ switch (compute) {
+ case COMPUTE_DELTA:
+ fmt->sort = hist_entry__cmp_delta_idx;
+ break;
+ case COMPUTE_RATIO:
+ fmt->sort = hist_entry__cmp_ratio_idx;
+ break;
+ case COMPUTE_WEIGHTED_DIFF:
+ fmt->sort = hist_entry__cmp_wdiff_idx;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ list_add(&fmt->sort_list, &perf_hpp__sort_list);
+ return 0;
}
static int data_init(int argc, const char **argv)
@@ -1202,7 +1267,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
if (data_init(argc, argv) < 0)
return -1;
- ui_init();
+ if (ui_init() < 0)
+ return -1;
sort__mode = SORT_MODE__DIFF;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 84df2deed988..a13641e066f5 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -343,6 +343,7 @@ static int __cmd_inject(struct perf_inject *inject)
int ret = -EINVAL;
struct perf_session *session = inject->session;
struct perf_data_file *file_out = &inject->output;
+ int fd = perf_data_file__fd(file_out);
signal(SIGINT, sig_handler);
@@ -376,7 +377,7 @@ static int __cmd_inject(struct perf_inject *inject)
}
if (!file_out->is_pipe)
- lseek(file_out->fd, session->header.data_offset, SEEK_SET);
+ lseek(fd, session->header.data_offset, SEEK_SET);
ret = perf_session__process_events(session, &inject->tool);
@@ -385,7 +386,7 @@ static int __cmd_inject(struct perf_inject *inject)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
session->header.data_size = inject->bytes_written;
- perf_session__write_header(session, session->evlist, file_out->fd, true);
+ perf_session__write_header(session, session->evlist, fd, true);
}
return ret;
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 24db6ffe2957..9b5663950a4d 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -7,44 +7,47 @@
#include "util/session.h"
#include "util/data.h"
-#define MEM_OPERATION_LOAD "load"
-#define MEM_OPERATION_STORE "store"
-
-static const char *mem_operation = MEM_OPERATION_LOAD;
+#define MEM_OPERATION_LOAD 0x1
+#define MEM_OPERATION_STORE 0x2
struct perf_mem {
struct perf_tool tool;
char const *input_name;
bool hide_unresolved;
bool dump_raw;
+ int operation;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
-static int __cmd_record(int argc, const char **argv)
+static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
{
int rec_argc, i = 0, j;
const char **rec_argv;
- char event[64];
int ret;
- rec_argc = argc + 4;
+ rec_argc = argc + 7; /* max number of arguments */
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
- rec_argv[i++] = strdup("record");
- if (!strcmp(mem_operation, MEM_OPERATION_LOAD))
- rec_argv[i++] = strdup("-W");
- rec_argv[i++] = strdup("-d");
- rec_argv[i++] = strdup("-e");
+ rec_argv[i++] = "record";
- if (strcmp(mem_operation, MEM_OPERATION_LOAD))
- sprintf(event, "cpu/mem-stores/pp");
- else
- sprintf(event, "cpu/mem-loads/pp");
+ if (mem->operation & MEM_OPERATION_LOAD)
+ rec_argv[i++] = "-W";
+
+ rec_argv[i++] = "-d";
+
+ if (mem->operation & MEM_OPERATION_LOAD) {
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = "cpu/mem-loads/pp";
+ }
+
+ if (mem->operation & MEM_OPERATION_STORE) {
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = "cpu/mem-stores/pp";
+ }
- rec_argv[i++] = strdup(event);
for (j = 1; j < argc; j++, i++)
rec_argv[i] = argv[j];
@@ -162,17 +165,17 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
if (!rep_argv)
return -1;
- rep_argv[i++] = strdup("report");
- rep_argv[i++] = strdup("--mem-mode");
- rep_argv[i++] = strdup("-n"); /* display number of samples */
+ rep_argv[i++] = "report";
+ rep_argv[i++] = "--mem-mode";
+ rep_argv[i++] = "-n"; /* display number of samples */
/*
* there is no weight (cost) associated with stores, so don't print
* the column
*/
- if (strcmp(mem_operation, MEM_OPERATION_LOAD))
- rep_argv[i++] = strdup("--sort=mem,sym,dso,symbol_daddr,"
- "dso_daddr,tlb,locked");
+ if (!(mem->operation & MEM_OPERATION_LOAD))
+ rep_argv[i++] = "--sort=mem,sym,dso,symbol_daddr,"
+ "dso_daddr,tlb,locked";
for (j = 1; j < argc; j++, i++)
rep_argv[i] = argv[j];
@@ -182,6 +185,75 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
return ret;
}
+struct mem_mode {
+ const char *name;
+ int mode;
+};
+
+#define MEM_OPT(n, m) \
+ { .name = n, .mode = (m) }
+
+#define MEM_END { .name = NULL }
+
+static const struct mem_mode mem_modes[]={
+ MEM_OPT("load", MEM_OPERATION_LOAD),
+ MEM_OPT("store", MEM_OPERATION_STORE),
+ MEM_END
+};
+
+static int
+parse_mem_ops(const struct option *opt, const char *str, int unset)
+{
+ int *mode = (int *)opt->value;
+ const struct mem_mode *m;
+ char *s, *os = NULL, *p;
+ int ret = -1;
+
+ if (unset)
+ return 0;
+
+ /* str may be NULL in case no arg is passed to -t */
+ if (str) {
+ /* because str is read-only */
+ s = os = strdup(str);
+ if (!s)
+ return -1;
+
+ /* reset mode */
+ *mode = 0;
+
+ for (;;) {
+ p = strchr(s, ',');
+ if (p)
+ *p = '\0';
+
+ for (m = mem_modes; m->name; m++) {
+ if (!strcasecmp(s, m->name))
+ break;
+ }
+ if (!m->name) {
+ fprintf(stderr, "unknown sampling op %s,"
+ " check man page\n", s);
+ goto error;
+ }
+
+ *mode |= m->mode;
+
+ if (!p)
+ break;
+
+ s = p + 1;
+ }
+ }
+ ret = 0;
+
+ if (*mode == 0)
+ *mode = MEM_OPERATION_LOAD;
+error:
+ free(os);
+ return ret;
+}
+
int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct stat st;
@@ -197,10 +269,15 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
.ordered_events = true,
},
.input_name = "perf.data",
+ /*
+ * default to both load an store sampling
+ */
+ .operation = MEM_OPERATION_LOAD | MEM_OPERATION_STORE,
};
const struct option mem_options[] = {
- OPT_STRING('t', "type", &mem_operation,
- "type", "memory operations(load/store)"),
+ OPT_CALLBACK('t', "type", &mem.operation,
+ "type", "memory operations(load,store) Default load,store",
+ parse_mem_ops),
OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw,
"dump raw samples in ASCII"),
OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved,
@@ -225,7 +302,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
mem_usage, PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc || !(strncmp(argv[0], "rec", 3) || mem_operation))
+ if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
usage_with_options(mem_usage, mem_options);
if (!mem.input_name || !strlen(mem.input_name)) {
@@ -236,7 +313,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
}
if (!strncmp(argv[0], "rec", 3))
- return __cmd_record(argc, argv);
+ return __cmd_record(argc, argv, &mem);
else if (!strncmp(argv[0], "rep", 3))
return report_events(argc, argv, &mem);
else
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 8648c6d3003d..404ab3434052 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -190,16 +190,30 @@ out:
return rc;
}
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ struct record *rec = container_of(tool, struct record, tool);
+
+ rec->samples++;
+
+ return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
+}
+
static int process_buildids(struct record *rec)
{
struct perf_data_file *file = &rec->file;
struct perf_session *session = rec->session;
- u64 start = session->header.data_offset;
- u64 size = lseek(file->fd, 0, SEEK_CUR);
+ u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
if (size == 0)
return 0;
+ file->size = size;
+
/*
* During this process, it'll load kernel map and replace the
* dso->long_name to a real pathname it found. In this case
@@ -211,9 +225,7 @@ static int process_buildids(struct record *rec)
*/
symbol_conf.ignore_vmlinux_buildid = true;
- return __perf_session__process_events(session, start,
- size - start,
- size, &build_id__mark_dso_hit_ops);
+ return perf_session__process_events(session, &rec->tool);
}
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
@@ -322,6 +334,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct perf_data_file *file = &rec->file;
struct perf_session *session;
bool disabled = false, draining = false;
+ int fd;
rec->progname = argv[0];
@@ -336,6 +349,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
return -1;
}
+ fd = perf_data_file__fd(file);
rec->session = session;
record__init_features(rec);
@@ -360,12 +374,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
if (file->is_pipe) {
- err = perf_header__write_pipe(file->fd);
+ err = perf_header__write_pipe(fd);
if (err < 0)
goto out_child;
} else {
- err = perf_session__write_header(session, rec->evlist,
- file->fd, false);
+ err = perf_session__write_header(session, rec->evlist, fd, false);
if (err < 0)
goto out_child;
}
@@ -397,7 +410,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
* return this more properly and also
* propagate errors that now are calling die()
*/
- err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
+ err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
process_synthesized_event);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
@@ -504,19 +517,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
- if (!quiet) {
+ if (!quiet)
fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
- /*
- * Approximate RIP event size: 24 bytes.
- */
- fprintf(stderr,
- "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
- (double)rec->bytes_written / 1024.0 / 1024.0,
- file->path,
- rec->bytes_written / 24);
- }
-
out_child:
if (forks) {
int exit_status;
@@ -535,13 +538,29 @@ out_child:
} else
status = err;
+ /* this will be recalculated during process_buildids() */
+ rec->samples = 0;
+
if (!err && !file->is_pipe) {
rec->session->header.data_size += rec->bytes_written;
if (!rec->no_buildid)
process_buildids(rec);
- perf_session__write_header(rec->session, rec->evlist,
- file->fd, true);
+ perf_session__write_header(rec->session, rec->evlist, fd, true);
+ }
+
+ if (!err && !quiet) {
+ char samples[128];
+
+ if (rec->samples)
+ scnprintf(samples, sizeof(samples),
+ " (%" PRIu64 " samples)", rec->samples);
+ else
+ samples[0] = '\0';
+
+ fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
+ perf_data_file__size(file) / 1024.0 / 1024.0,
+ file->path, samples);
}
out_delete_session:
@@ -720,6 +739,13 @@ static struct record record = {
.default_per_cpu = true,
},
},
+ .tool = {
+ .sample = process_sample_event,
+ .fork = perf_event__process_fork,
+ .comm = perf_event__process_comm,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ },
};
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 072ae8ad67fc..2f91094e228b 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -86,17 +86,6 @@ static int report__config(const char *var, const char *value, void *cb)
return perf_default_config(var, value, cb);
}
-static void report__inc_stats(struct report *rep, struct hist_entry *he)
-{
- /*
- * The @he is either of a newly created one or an existing one
- * merging current sample. We only want to count a new one so
- * checking ->nr_events being 1.
- */
- if (he->stat.nr_events == 1)
- rep->nr_entries++;
-}
-
static int hist_iter__report_callback(struct hist_entry_iter *iter,
struct addr_location *al, bool single,
void *arg)
@@ -108,8 +97,6 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
struct mem_info *mi;
struct branch_info *bi;
- report__inc_stats(rep, he);
-
if (!ui__has_annotation())
return 0;
@@ -499,6 +486,9 @@ static int __cmd_report(struct report *rep)
report__warn_kptr_restrict(rep);
+ evlist__for_each(session->evlist, pos)
+ rep->nr_entries += evsel__hists(pos)->nr_entries;
+
if (use_browser == 0) {
if (verbose > 3)
perf_session__fprintf(session, stdout);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 891086376381..e598e4e98170 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1730,7 +1730,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
"detailed run - start a lot of events"),
OPT_BOOLEAN('S', "sync", &sync_run,
"call sync() before starting a run"),
- OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
+ OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
"print large numbers with thousands\' separators",
stat__set_big_num),
OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 961cea183a83..c4c7eac69de4 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -66,7 +66,6 @@
#include <sys/utsname.h>
#include <sys/mman.h>
-#include <linux/unistd.h>
#include <linux/types.h>
static volatile int done;
@@ -166,7 +165,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
err ? "[unknown]" : uts.release, perf_version_string);
if (use_browser <= 0)
sleep(5);
-
+
map->erange_warned = true;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index badfabc6a01f..7e935f1083ec 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -929,66 +929,66 @@ static struct syscall_fmt {
.arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
{ .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
{ .name = "close", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
{ .name = "connect", .errmsg = true, },
{ .name = "dup", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "dup2", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "dup3", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
{ .name = "eventfd2", .errmsg = true,
.arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
{ .name = "faccessat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "fadvise64", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fallocate", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fchdir", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fchmod", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fchmodat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "fchown", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fchownat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "fcntl", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
[1] = SCA_STRARRAY, /* cmd */ },
.arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
{ .name = "fdatasync", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "flock", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
[1] = SCA_FLOCK, /* cmd */ }, },
{ .name = "fsetxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fstat", .errmsg = true, .alias = "newfstat",
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fstatat", .errmsg = true, .alias = "newfstatat",
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "fstatfs", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fsync", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "ftruncate", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "futex", .errmsg = true,
.arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
{ .name = "futimesat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "getdents", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "getdents64", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "ioctl", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */
#if defined(__i386__) || defined(__x86_64__)
/*
* FIXME: Make this available to all arches.
@@ -1002,7 +1002,7 @@ static struct syscall_fmt {
{ .name = "kill", .errmsg = true,
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
{ .name = "linkat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "lseek", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
[2] = SCA_STRARRAY, /* whence */ },
@@ -1012,9 +1012,9 @@ static struct syscall_fmt {
.arg_scnprintf = { [0] = SCA_HEX, /* start */
[2] = SCA_MADV_BHV, /* behavior */ }, },
{ .name = "mkdirat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "mknodat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "mlock", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "mlockall", .errmsg = true,
@@ -1036,9 +1036,9 @@ static struct syscall_fmt {
{ .name = "munmap", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "name_to_handle_at", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "newfstatat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "open", .errmsg = true,
.arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "open_by_handle_at", .errmsg = true,
@@ -1052,20 +1052,20 @@ static struct syscall_fmt {
{ .name = "poll", .errmsg = true, .timeout = true, },
{ .name = "ppoll", .errmsg = true, .timeout = true, },
{ .name = "pread", .errmsg = true, .alias = "pread64",
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "preadv", .errmsg = true, .alias = "pread",
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
{ .name = "pwrite", .errmsg = true, .alias = "pwrite64",
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "pwritev", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "read", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "readlinkat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "readv", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "recvfrom", .errmsg = true,
.arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "recvmmsg", .errmsg = true,
@@ -1073,7 +1073,7 @@ static struct syscall_fmt {
{ .name = "recvmsg", .errmsg = true,
.arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "renameat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "rt_sigaction", .errmsg = true,
.arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
{ .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
@@ -1091,7 +1091,7 @@ static struct syscall_fmt {
{ .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
{ .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "shutdown", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "socket", .errmsg = true,
.arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
[1] = SCA_SK_TYPE, /* type */ },
@@ -1102,7 +1102,7 @@ static struct syscall_fmt {
.arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
{ .name = "stat", .errmsg = true, .alias = "newstat", },
{ .name = "symlinkat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "tgkill", .errmsg = true,
.arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
{ .name = "tkill", .errmsg = true,
@@ -1113,9 +1113,9 @@ static struct syscall_fmt {
{ .name = "utimensat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
{ .name = "write", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "writev", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
};
static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -1191,7 +1191,7 @@ static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
if (thread__priv(thread) == NULL)
thread__set_priv(thread, thread_trace__new());
-
+
if (thread__priv(thread) == NULL)
goto fail;
@@ -2056,23 +2056,24 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (trace->trace_syscalls &&
perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
trace__sys_exit))
- goto out_error_tp;
+ goto out_error_raw_syscalls;
if (trace->trace_syscalls)
perf_evlist__add_vfs_getname(evlist);
if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
- perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ))
- goto out_error_tp;
+ perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
+ goto out_error_mem;
+ }
if ((trace->trace_pgfaults & TRACE_PFMIN) &&
perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
- goto out_error_tp;
+ goto out_error_mem;
if (trace->sched &&
- perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
- trace__sched_stat_runtime))
- goto out_error_tp;
+ perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
+ trace__sched_stat_runtime))
+ goto out_error_sched_stat_runtime;
err = perf_evlist__create_maps(evlist, &trace->opts.target);
if (err < 0) {
@@ -2202,8 +2203,12 @@ out:
{
char errbuf[BUFSIZ];
-out_error_tp:
- perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf));
+out_error_sched_stat_runtime:
+ debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
+ goto out_error;
+
+out_error_raw_syscalls:
+ debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
goto out_error;
out_error_mmap:
@@ -2217,6 +2222,9 @@ out_error:
fprintf(trace->output, "%s\n", errbuf);
goto out_delete_evlist;
}
+out_error_mem:
+ fprintf(trace->output, "Not enough memory to run!\n");
+ goto out_delete_evlist;
}
static int trace__replay(struct trace *trace)
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 5d4b039fe1ed..cc224080b525 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -20,7 +20,7 @@ NO_PERF_REGS := 1
# Additional ARCH settings for x86
ifeq ($(ARCH),x86)
- ifeq (${IS_X86_64}, 1)
+ ifeq (${IS_64_BIT}, 1)
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
@@ -198,6 +198,7 @@ CORE_FEATURE_TESTS = \
libpython-version \
libslang \
libunwind \
+ pthread-attr-setaffinity-np \
stackprotector-all \
timerfd \
libdw-dwarf-unwind \
@@ -226,6 +227,7 @@ VF_FEATURE_TESTS = \
libelf-getphdrnum \
libelf-mmap \
libpython-version \
+ pthread-attr-setaffinity-np \
stackprotector-all \
timerfd \
libunwind-debug-frame \
@@ -301,6 +303,10 @@ ifeq ($(feature-sync-compare-and-swap), 1)
CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
endif
+ifeq ($(feature-pthread-attr-setaffinity-np), 1)
+ CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
+endif
+
ifndef NO_BIONIC
$(call feature_check,bionic)
ifeq ($(feature-bionic), 1)
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch
index 851cd0172a76..ff95a68741d1 100644
--- a/tools/perf/config/Makefile.arch
+++ b/tools/perf/config/Makefile.arch
@@ -1,7 +1,7 @@
uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+RAW_ARCH := $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/arm.*/arm/ -e s/sa110/arm/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
@@ -9,23 +9,23 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/tile.*/tile/ )
# Additional ARCH settings for x86
-ifeq ($(ARCH),i386)
- override ARCH := x86
+ifeq ($(RAW_ARCH),i386)
+ ARCH ?= x86
endif
-ifeq ($(ARCH),x86_64)
- override ARCH := x86
- IS_X86_64 := 0
- ifeq (, $(findstring m32,$(CFLAGS)))
- IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
- RAW_ARCH := x86_64
+ifeq ($(RAW_ARCH),x86_64)
+ ARCH ?= x86
+
+ ifneq (, $(findstring m32,$(CFLAGS)))
+ RAW_ARCH := x86_32
endif
endif
-ifeq (${IS_X86_64}, 1)
+ARCH ?= $(RAW_ARCH)
+
+LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ifeq ($(LP64), 1)
IS_64_BIT := 1
-else ifeq ($(ARCH),x86)
- IS_64_BIT := 0
else
- IS_64_BIT := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ IS_64_BIT := 0
endif
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 53f19b5dbc37..42ac05aaf8ac 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -25,6 +25,7 @@ FILES= \
test-libslang.bin \
test-libunwind.bin \
test-libunwind-debug-frame.bin \
+ test-pthread-attr-setaffinity-np.bin \
test-stackprotector-all.bin \
test-timerfd.bin \
test-libdw-dwarf-unwind.bin \
@@ -47,6 +48,9 @@ test-all.bin:
test-hello.bin:
$(BUILD)
+test-pthread-attr-setaffinity-np.bin:
+ $(BUILD) -Werror -lpthread
+
test-stackprotector-all.bin:
$(BUILD) -Werror -fstack-protector-all
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c
index 652e0098eba6..6d4d09323922 100644
--- a/tools/perf/config/feature-checks/test-all.c
+++ b/tools/perf/config/feature-checks/test-all.c
@@ -97,6 +97,10 @@
# include "test-zlib.c"
#undef main
+#define main main_test_pthread_attr_setaffinity_np
+# include "test-pthread_attr_setaffinity_np.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -121,6 +125,7 @@ int main(int argc, char *argv[])
main_test_libdw_dwarf_unwind();
main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
+ main_test_pthread_attr_setaffinity_np();
return 0;
}
diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
new file mode 100644
index 000000000000..0a0d3ecb4e8a
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
@@ -0,0 +1,14 @@
+#include <stdint.h>
+#include <pthread.h>
+
+int main(void)
+{
+ int ret = 0;
+ pthread_attr_t thread_attr;
+
+ pthread_attr_init(&thread_attr);
+ /* don't care abt exact args, just the API itself in libpthread */
+ ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
+
+ return ret;
+}
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index a3b13d7dc1d4..6ef68165c9db 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -6,7 +6,6 @@
#include <sys/syscall.h>
#include <linux/types.h>
#include <linux/perf_event.h>
-#include <asm/unistd.h>
#if defined(__i386__)
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
index 790ceba6ad3f..28431d1bbcf5 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -5,7 +5,10 @@
* ANY CHANGES MADE HERE WILL BE LOST!
*
*/
-
+#include <stdbool.h>
+#ifndef HAS_BOOL
+# define HAS_BOOL 1
+#endif
#line 1 "Context.xs"
/*
* Context.xs. XS interfaces for perf script.
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index c9b4b6269b51..1091bd47adfd 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -104,7 +104,6 @@ class Event(dict):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
-
# Test file description needs to have following sections:
# [config]
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index ab28cca2cb97..0bf06bec68c7 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -11,6 +11,9 @@
#include "thread.h"
#include "callchain.h"
+/* For bsearch. We try to unwind functions in shared object. */
+#include <stdlib.h>
+
static int mmap_handler(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -28,7 +31,7 @@ static int init_live_machine(struct machine *machine)
mmap_handler, machine, true);
}
-#define MAX_STACK 6
+#define MAX_STACK 8
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
@@ -37,6 +40,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
static const char *funcs[MAX_STACK] = {
"test__arch_unwind_sample",
"unwind_thread",
+ "compare",
+ "bsearch",
"krava_3",
"krava_2",
"krava_1",
@@ -88,10 +93,37 @@ static int unwind_thread(struct thread *thread)
return err;
}
+static int global_unwind_retval = -INT_MAX;
+
+__attribute__ ((noinline))
+static int compare(void *p1, void *p2)
+{
+ /* Any possible value should be 'thread' */
+ struct thread *thread = *(struct thread **)p1;
+
+ if (global_unwind_retval == -INT_MAX)
+ global_unwind_retval = unwind_thread(thread);
+
+ return p1 - p2;
+}
+
__attribute__ ((noinline))
static int krava_3(struct thread *thread)
{
- return unwind_thread(thread);
+ struct thread *array[2] = {thread, thread};
+ void *fp = &bsearch;
+ /*
+ * make _bsearch a volatile function pointer to
+ * prevent potential optimization, which may expand
+ * bsearch and call compare directly from this function,
+ * instead of libc shared object.
+ */
+ void *(*volatile _bsearch)(void *, void *, size_t,
+ size_t, int (*)(void *, void *));
+
+ _bsearch = fp;
+ _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
+ return global_unwind_retval;
}
__attribute__ ((noinline))
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 8d110dec393e..18619966454c 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -140,7 +140,7 @@ static void del_hist_entries(struct hists *hists)
he = rb_entry(node, struct hist_entry, rb_node);
rb_erase(node, root_out);
rb_erase(&he->rb_node_in, root_in);
- hist_entry__free(he);
+ hist_entry__delete(he);
}
}
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index f5547610da02..b52c9faea224 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -106,7 +106,7 @@ static void del_hist_entries(struct hists *hists)
he = rb_entry(node, struct hist_entry, rb_node);
rb_erase(node, root_out);
rb_erase(&he->rb_node_in, root_in);
- hist_entry__free(he);
+ hist_entry__delete(he);
}
}
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 69a71ff84e01..75709d2b17b4 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -222,7 +222,6 @@ tarpkg:
@cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1
-
all: $(run) $(run_O) tarpkg
@echo OK
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 7f2f51f93619..1cdab0ce00e2 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1145,6 +1145,49 @@ static int test__pinned_group(struct perf_evlist *evlist)
return 0;
}
+static int test__checkevent_breakpoint_len(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
+ evsel->attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
+ evsel->attr.bp_len);
+
+ return 0;
+}
+
+static int test__checkevent_breakpoint_len_w(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==
+ evsel->attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
+ evsel->attr.bp_len);
+
+ return 0;
+}
+
+static int
+test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_breakpoint_rw(evlist);
+}
+
static int count_tracepoints(void)
{
char events_path[PATH_MAX];
@@ -1420,6 +1463,21 @@ static struct evlist_test test__events[] = {
.check = test__pinned_group,
.id = 41,
},
+ {
+ .name = "mem:0/1",
+ .check = test__checkevent_breakpoint_len,
+ .id = 42,
+ },
+ {
+ .name = "mem:0/2:w",
+ .check = test__checkevent_breakpoint_len_w,
+ .id = 43,
+ },
+ {
+ .name = "mem:0/4:rw:u",
+ .check = test__checkevent_breakpoint_len_rw_modifier,
+ .id = 44
+ },
#if defined(__s390x__)
{
.name = "kvm-s390:kvm_s390_create_vm",
@@ -1471,7 +1529,7 @@ static int test_event(struct evlist_test *e)
} else {
ret = e->check(evlist);
}
-
+
perf_evlist__delete(evlist);
return ret;
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 4908c648a597..30c02181e78b 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -110,7 +110,7 @@ static bool samples_same(const struct perf_sample *s1,
if (type & PERF_SAMPLE_STACK_USER) {
COMP(user_stack.size);
- if (memcmp(s1->user_stack.data, s1->user_stack.data,
+ if (memcmp(s1->user_stack.data, s2->user_stack.data,
s1->user_stack.size)) {
pr_debug("Samples differ at 'user_stack'\n");
return false;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 1e0a2fd80115..9d32e3c0cfee 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -517,7 +517,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
}
annotate_browser__set_top(browser, dl, idx);
-
+
return true;
}
@@ -867,7 +867,6 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
++browser->nr_jumps;
}
-
}
static inline int width_jumps(int n)
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 482adae3cc44..25d608394d74 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -285,7 +285,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
}
#define __HPP_SORT_FN(_type, _field) \
-static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
+static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort(a, b, he_get_##_field); \
}
@@ -312,7 +313,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
}
#define __HPP_SORT_ACC_FN(_type, _field) \
-static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
+static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort_acc(a, b, he_get_acc_##_field); \
}
@@ -331,7 +333,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
}
#define __HPP_SORT_RAW_FN(_type, _field) \
-static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
+static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort(a, b, he_get_raw_##_field); \
}
@@ -361,7 +364,8 @@ HPP_PERCENT_ACC_FNS(overhead_acc, period)
HPP_RAW_FNS(samples, nr_events)
HPP_RAW_FNS(period, period)
-static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused,
+static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *a __maybe_unused,
struct hist_entry *b __maybe_unused)
{
return 0;
diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h
index f34f89eb607c..717d39d3052b 100644
--- a/tools/perf/ui/progress.h
+++ b/tools/perf/ui/progress.h
@@ -4,12 +4,12 @@
#include <linux/types.h>
void ui_progress__finish(void);
-
+
struct ui_progress {
const char *title;
u64 curr, next, step, total;
};
-
+
void ui_progress__init(struct ui_progress *p, u64 total, const char *title);
void ui_progress__update(struct ui_progress *p, u64 adv);
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index 1c8b9afd5d6e..88f5143a5981 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -9,6 +9,7 @@
#include "../libslang.h"
char ui_helpline__last_msg[1024];
+bool tui_helpline__set;
static void tui_helpline__pop(void)
{
@@ -35,6 +36,8 @@ static int tui_helpline__show(const char *format, va_list ap)
sizeof(ui_helpline__last_msg) - backlog, format, ap);
backlog += ret;
+ tui_helpline__set = true;
+
if (ui_helpline__last_msg[backlog - 1] == '\n') {
ui_helpline__puts(ui_helpline__last_msg);
SLsmg_refresh();
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 3c38f25b1695..b77e1d771363 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -17,6 +17,7 @@
static volatile int ui__need_resize;
extern struct perf_error_ops perf_tui_eops;
+extern bool tui_helpline__set;
extern void hist_browser__init_hpp(void);
@@ -159,7 +160,7 @@ out:
void ui__exit(bool wait_for_ok)
{
- if (wait_for_ok)
+ if (wait_for_ok && tui_helpline__set)
ui__question_window("Fatal Error",
ui_helpline__last_msg,
"Press any key...", 0);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 79999ceaf2be..61bf9128e1f2 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops)
goto out_free_ops;
ops->locked.ins = ins__find(name);
+ free(name);
+
if (ops->locked.ins == NULL)
goto out_free_ops;
if (!ops->locked.ins->ops)
return 0;
- if (ops->locked.ins->ops->parse)
- ops->locked.ins->ops->parse(ops->locked.ops);
+ if (ops->locked.ins->ops->parse &&
+ ops->locked.ins->ops->parse(ops->locked.ops) < 0)
+ goto out_free_ops;
return 0;
@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
static void lock__delete(struct ins_operands *ops)
{
+ struct ins *ins = ops->locked.ins;
+
+ if (ins && ins->ops->free)
+ ins->ops->free(ops->locked.ops);
+ else
+ ins__delete(ops->locked.ops);
+
zfree(&ops->locked.ops);
zfree(&ops->target.raw);
zfree(&ops->target.name);
@@ -229,7 +239,7 @@ static int mov__parse(struct ins_operands *ops)
*s = '\0';
ops->source.raw = strdup(ops->raw);
*s = ',';
-
+
if (ops->source.raw == NULL)
return -1;
@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl)
if (!dl->ins->ops)
return;
- if (dl->ins->ops->parse)
- dl->ins->ops->parse(&dl->ops);
+ if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0)
+ dl->ins = NULL;
}
static int disasm_line__parse(char *line, char **namep, char **rawp)
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 0784a9420528..cadbdc90a5cb 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -116,11 +116,6 @@ struct annotation {
struct annotated_source *src;
};
-struct sannotation {
- struct annotation annotation;
- struct symbol symbol;
-};
-
static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
{
return (((void *)&notes->src->histograms) +
@@ -129,8 +124,7 @@ static inline struct sym_hist *annotation__histogram(struct annotation *notes, i
static inline struct annotation *symbol__annotation(struct symbol *sym)
{
- struct sannotation *a = container_of(sym, struct sannotation, symbol);
- return &a->annotation;
+ return (void *)sym - symbol_conf.priv_size;
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 5cf9e1b5989d..d04d770d90f6 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -71,7 +71,9 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
extern char *perf_pathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
+#ifndef __UCLIBC__
/* Matches the libc/libbsd function attribute so we declare this unconditionally: */
extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index f4654183d391..55355b3d4f85 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -5,132 +5,6 @@
int perf_use_color_default = -1;
-static int parse_color(const char *name, int len)
-{
- static const char * const color_names[] = {
- "normal", "black", "red", "green", "yellow",
- "blue", "magenta", "cyan", "white"
- };
- char *end;
- int i;
-
- for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
- const char *str = color_names[i];
- if (!strncasecmp(name, str, len) && !str[len])
- return i - 1;
- }
- i = strtol(name, &end, 10);
- if (end - name == len && i >= -1 && i <= 255)
- return i;
- return -2;
-}
-
-static int parse_attr(const char *name, int len)
-{
- static const int attr_values[] = { 1, 2, 4, 5, 7 };
- static const char * const attr_names[] = {
- "bold", "dim", "ul", "blink", "reverse"
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
- const char *str = attr_names[i];
- if (!strncasecmp(name, str, len) && !str[len])
- return attr_values[i];
- }
- return -1;
-}
-
-void color_parse(const char *value, const char *var, char *dst)
-{
- color_parse_mem(value, strlen(value), var, dst);
-}
-
-void color_parse_mem(const char *value, int value_len, const char *var,
- char *dst)
-{
- const char *ptr = value;
- int len = value_len;
- int attr = -1;
- int fg = -2;
- int bg = -2;
-
- if (!strncasecmp(value, "reset", len)) {
- strcpy(dst, PERF_COLOR_RESET);
- return;
- }
-
- /* [fg [bg]] [attr] */
- while (len > 0) {
- const char *word = ptr;
- int val, wordlen = 0;
-
- while (len > 0 && !isspace(word[wordlen])) {
- wordlen++;
- len--;
- }
-
- ptr = word + wordlen;
- while (len > 0 && isspace(*ptr)) {
- ptr++;
- len--;
- }
-
- val = parse_color(word, wordlen);
- if (val >= -1) {
- if (fg == -2) {
- fg = val;
- continue;
- }
- if (bg == -2) {
- bg = val;
- continue;
- }
- goto bad;
- }
- val = parse_attr(word, wordlen);
- if (val < 0 || attr != -1)
- goto bad;
- attr = val;
- }
-
- if (attr >= 0 || fg >= 0 || bg >= 0) {
- int sep = 0;
-
- *dst++ = '\033';
- *dst++ = '[';
- if (attr >= 0) {
- *dst++ = '0' + attr;
- sep++;
- }
- if (fg >= 0) {
- if (sep++)
- *dst++ = ';';
- if (fg < 8) {
- *dst++ = '3';
- *dst++ = '0' + fg;
- } else {
- dst += sprintf(dst, "38;5;%d", fg);
- }
- }
- if (bg >= 0) {
- if (sep++)
- *dst++ = ';';
- if (bg < 8) {
- *dst++ = '4';
- *dst++ = '0' + bg;
- } else {
- dst += sprintf(dst, "48;5;%d", bg);
- }
- }
- *dst++ = 'm';
- }
- *dst = 0;
- return;
-bad:
- die("bad color value '%.*s' for variable '%s'", value_len, value, var);
-}
-
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
{
if (value) {
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 0a594b8a0c26..38146f922c54 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -30,8 +30,6 @@ extern int perf_use_color_default;
int perf_color_default_config(const char *var, const char *value, void *cb);
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
-void color_parse(const char *value, const char *var, char *dst);
-void color_parse_mem(const char *value, int len, const char *var, char *dst);
int color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args);
int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 45be944d450a..c2f7d3b90966 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -532,12 +532,8 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
break;
cache_offset = offset & DSO__DATA_CACHE_MASK;
- ret = -EINVAL;
- if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET))
- break;
-
- ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
+ ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
if (ret <= 0)
break;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 3782c82c6e44..ced92841ff97 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -139,6 +139,7 @@ struct dso {
u32 status_seen;
size_t file_size;
struct list_head open_entry;
+ u64 frame_offset;
} data;
union { /* Tool specific area */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index cbab1fb77b1d..28b8ce86bf12 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1436,33 +1436,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
return printed + fprintf(fp, "\n");
}
-int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
- int err, char *buf, size_t size)
-{
- char sbuf[128];
-
- switch (err) {
- case ENOENT:
- scnprintf(buf, size, "%s",
- "Error:\tUnable to find debugfs\n"
- "Hint:\tWas your kernel was compiled with debugfs support?\n"
- "Hint:\tIs the debugfs filesystem mounted?\n"
- "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
- break;
- case EACCES:
- scnprintf(buf, size,
- "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
- "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
- debugfs_mountpoint, debugfs_mountpoint);
- break;
- default:
- scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
- break;
- }
-
- return 0;
-}
-
int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
int err, char *buf, size_t size)
{
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 0ba93f67ab94..c94a9e03ecf1 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -183,7 +183,6 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
-int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1e90c8557ede..ea51a90e20a0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -709,6 +709,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
if (opts->sample_weight)
perf_evsel__set_sample_bit(evsel, WEIGHT);
+ attr->task = track;
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
@@ -797,6 +798,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
+ if (ncpus == 0 || nthreads == 0)
+ return 0;
+
if (evsel->system_wide)
nthreads = 1;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b20e40c74468..1f407f7352a7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2237,6 +2237,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
* - unique number to identify actual perf.data files
* - encode endianness of file
*/
+ ph->version = PERF_HEADER_VERSION_2;
/* check magic number with one endianness */
if (magic == __perf_magic2)
@@ -2247,7 +2248,6 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
return -1;
ph->needs_swap = true;
- ph->version = PERF_HEADER_VERSION_2;
return 0;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 182395546ddc..70b48a65064c 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -241,6 +241,20 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
return he->stat.period == 0;
}
+static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
+{
+ rb_erase(&he->rb_node, &hists->entries);
+
+ if (sort__need_collapse)
+ rb_erase(&he->rb_node_in, &hists->entries_collapsed);
+
+ --hists->nr_entries;
+ if (!he->filtered)
+ --hists->nr_non_filtered_entries;
+
+ hist_entry__delete(he);
+}
+
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
struct rb_node *next = rb_first(&hists->entries);
@@ -258,16 +272,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
(zap_kernel && n->level != '.') ||
hists__decay_entry(hists, n)) &&
!n->used) {
- rb_erase(&n->rb_node, &hists->entries);
-
- if (sort__need_collapse)
- rb_erase(&n->rb_node_in, &hists->entries_collapsed);
-
- --hists->nr_entries;
- if (!n->filtered)
- --hists->nr_non_filtered_entries;
-
- hist_entry__free(n);
+ hists__delete_entry(hists, n);
}
}
}
@@ -281,16 +286,7 @@ void hists__delete_entries(struct hists *hists)
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
- rb_erase(&n->rb_node, &hists->entries);
-
- if (sort__need_collapse)
- rb_erase(&n->rb_node_in, &hists->entries_collapsed);
-
- --hists->nr_entries;
- if (!n->filtered)
- --hists->nr_non_filtered_entries;
-
- hist_entry__free(n);
+ hists__delete_entry(hists, n);
}
}
@@ -433,6 +429,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
if (!he)
return NULL;
+ hists->nr_entries++;
+
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, hists->entries_in);
out:
@@ -915,7 +913,7 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
if (perf_hpp__should_skip(fmt))
continue;
- cmp = fmt->cmp(left, right);
+ cmp = fmt->cmp(fmt, left, right);
if (cmp)
break;
}
@@ -933,7 +931,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
if (perf_hpp__should_skip(fmt))
continue;
- cmp = fmt->collapse(left, right);
+ cmp = fmt->collapse(fmt, left, right);
if (cmp)
break;
}
@@ -941,7 +939,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
return cmp;
}
-void hist_entry__free(struct hist_entry *he)
+void hist_entry__delete(struct hist_entry *he)
{
zfree(&he->branch_info);
zfree(&he->mem_info);
@@ -981,7 +979,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
iter->callchain,
he->callchain);
}
- hist_entry__free(he);
+ hist_entry__delete(he);
return false;
}
@@ -1063,7 +1061,7 @@ static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
if (perf_hpp__should_skip(fmt))
continue;
- cmp = fmt->sort(a, b);
+ cmp = fmt->sort(fmt, a, b);
if (cmp)
break;
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 46bd50344f85..2b690d028907 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -119,7 +119,7 @@ int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
int hist_entry__transaction_len(void);
int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
struct hists *hists);
-void hist_entry__free(struct hist_entry *);
+void hist_entry__delete(struct hist_entry *he);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
@@ -195,9 +195,12 @@ struct perf_hpp_fmt {
struct hist_entry *he);
int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he);
- int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b);
- int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b);
- int64_t (*sort)(struct hist_entry *a, struct hist_entry *b);
+ int64_t (*cmp)(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b);
+ int64_t (*collapse)(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b);
+ int64_t (*sort)(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b);
struct list_head list;
struct list_head sort_list;
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
deleted file mode 100644
index 5c1d0d099f0d..000000000000
--- a/tools/perf/util/hweight.c
+++ /dev/null
@@ -1,31 +0,0 @@
-#include <linux/bitops.h>
-
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-unsigned int hweight32(unsigned int w)
-{
- unsigned int res = w - ((w >> 1) & 0x55555555);
- res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
- res = (res + (res >> 4)) & 0x0F0F0F0F;
- res = res + (res >> 8);
- return (res + (res >> 16)) & 0x000000FF;
-}
-
-unsigned long hweight64(__u64 w)
-{
-#if BITS_PER_LONG == 32
- return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
-#elif BITS_PER_LONG == 64
- __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
- res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
- res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
- res = res + (res >> 8);
- res = res + (res >> 16);
- return (res + (res >> 32)) & 0x00000000000000FFul;
-#endif
-}
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
deleted file mode 100644
index 36cf26d434a5..000000000000
--- a/tools/perf/util/include/asm/hweight.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef PERF_HWEIGHT_H
-#define PERF_HWEIGHT_H
-
-#include <linux/types.h>
-unsigned int hweight32(unsigned int w);
-unsigned long hweight64(__u64 w);
-
-#endif /* PERF_HWEIGHT_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 94de3e48b490..1bca3a9f2b16 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -389,7 +389,6 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
rb_insert_color(&th->rb_node, &machine->threads);
- machine->last_match = th;
/*
* We have to initialize map_groups separately
@@ -400,9 +399,12 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
+ rb_erase(&th->rb_node, &machine->threads);
thread__delete(th);
return NULL;
}
+
+ machine->last_match = th;
}
return th;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 6951a9d42339..0e42438b1e59 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -116,6 +116,22 @@ struct thread;
#define map__for_each_symbol(map, pos, n) \
dso__for_each_symbol(map->dso, pos, n, map->type)
+/* map__for_each_symbol_with_name - iterate over the symbols in the given map
+ * that have the given name
+ *
+ * @map: the 'struct map *' in which symbols itereated
+ * @sym_name: the symbol name
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @filter: to use when loading the DSO
+ */
+#define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \
+ for (pos = map__find_symbol_by_name(map, sym_name, filter); \
+ pos && strcmp(pos->name, sym_name) == 0; \
+ pos = symbol__next_by_name(pos))
+
+#define map__for_each_symbol_by_name(map, sym_name, pos) \
+ __map__for_each_symbol_by_name(map, sym_name, (pos), NULL)
+
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
void map__init(struct map *map, enum map_type type,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 77b43fe43d55..7f8ec6ce2823 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -526,7 +526,7 @@ do { \
}
int parse_events_add_breakpoint(struct list_head *list, int *idx,
- void *ptr, char *type)
+ void *ptr, char *type, u64 len)
{
struct perf_event_attr attr;
@@ -536,14 +536,15 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
if (parse_breakpoint_type(type, &attr))
return -EINVAL;
- /*
- * We should find a nice way to override the access length
- * Provide some defaults for now
- */
- if (attr.bp_type == HW_BREAKPOINT_X)
- attr.bp_len = sizeof(long);
- else
- attr.bp_len = HW_BREAKPOINT_LEN_4;
+ /* Provide some defaults if len is not specified */
+ if (!len) {
+ if (attr.bp_type == HW_BREAKPOINT_X)
+ len = sizeof(long);
+ else
+ len = HW_BREAKPOINT_LEN_4;
+ }
+
+ attr.bp_len = len;
attr.type = PERF_TYPE_BREAKPOINT;
attr.sample_period = 1;
@@ -1121,7 +1122,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
return;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
- if (subsys_glob != NULL &&
+ if (subsys_glob != NULL &&
!strglobmatch(sys_dirent.d_name, subsys_glob))
continue;
@@ -1132,7 +1133,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
continue;
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
- if (event_glob != NULL &&
+ if (event_glob != NULL &&
!strglobmatch(evt_dirent.d_name, event_glob))
continue;
@@ -1305,7 +1306,7 @@ static void print_symbol_events(const char *event_glob, unsigned type,
for (i = 0; i < max; i++, syms++) {
- if (event_glob != NULL &&
+ if (event_glob != NULL &&
!(strglobmatch(syms->symbol, event_glob) ||
(syms->alias && strglobmatch(syms->alias, event_glob))))
continue;
@@ -1366,7 +1367,7 @@ void print_events(const char *event_glob, bool name_only)
printf("\n");
printf(" %-50s [%s]\n",
- "mem:<addr>[:access]",
+ "mem:<addr>[/len][:access]",
event_type_descriptors[PERF_TYPE_BREAKPOINT]);
printf("\n");
}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index db2cf78ff0f3..ff6e1fa4111e 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -71,6 +71,7 @@ struct parse_events_term {
int type_val;
int type_term;
struct list_head list;
+ bool used;
};
struct parse_events_evlist {
@@ -104,7 +105,7 @@ int parse_events_add_numeric(struct list_head *list, int *idx,
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2);
int parse_events_add_breakpoint(struct list_head *list, int *idx,
- void *ptr, char *type);
+ void *ptr, char *type, u64 len);
int parse_events_add_pmu(struct list_head *list, int *idx,
char *pmu , struct list_head *head_config);
enum perf_pmu_event_symbol_type
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 906630bbf8eb..94eacb6c1ef7 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -159,6 +159,7 @@ branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
<mem>{
{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); }
: { return ':'; }
+"/" { return '/'; }
{num_dec} { return value(yyscanner, 10); }
{num_hex} { return value(yyscanner, 16); }
/*
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 93c4c9fbc922..72def077dbbf 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -326,6 +326,28 @@ PE_NAME_CACHE_TYPE
}
event_legacy_mem:
+PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
+ (void *) $2, $6, $4));
+ $$ = list;
+}
+|
+PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
+ (void *) $2, NULL, $4));
+ $$ = list;
+}
+|
PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_evlist *data = _data;
@@ -333,7 +355,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
- (void *) $2, $4));
+ (void *) $2, $4, 0));
$$ = list;
}
|
@@ -344,7 +366,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
- (void *) $2, NULL));
+ (void *) $2, NULL, 0));
$$ = list;
}
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index f62dee7bd924..4a015f77e2b5 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -46,7 +46,7 @@ static int get_value(struct parse_opt_ctx_t *p,
return opterror(opt, "is not usable", flags);
if (opt->flags & PARSE_OPT_EXCLUSIVE) {
- if (p->excl_opt) {
+ if (p->excl_opt && p->excl_opt != opt) {
char msg[128];
if (((flags & OPT_SHORT) && p->excl_opt->short_name) ||
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 5c9c4947cfb4..48411674da0f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -551,31 +551,68 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
}
/*
+ * Term is a string term, and might be a param-term. Try to look up it's value
+ * in the remaining terms.
+ * - We have a term like "base-or-format-term=param-term",
+ * - We need to find the value supplied for "param-term" (with param-term named
+ * in a config string) later on in the term list.
+ */
+static int pmu_resolve_param_term(struct parse_events_term *term,
+ struct list_head *head_terms,
+ __u64 *value)
+{
+ struct parse_events_term *t;
+
+ list_for_each_entry(t, head_terms, list) {
+ if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+ if (!strcmp(t->config, term->config)) {
+ t->used = true;
+ *value = t->val.num;
+ return 0;
+ }
+ }
+ }
+
+ if (verbose)
+ printf("Required parameter '%s' not specified\n", term->config);
+
+ return -1;
+}
+
+/*
* Setup one of config[12] attr members based on the
* user input data - term parameter.
*/
static int pmu_config_term(struct list_head *formats,
struct perf_event_attr *attr,
struct parse_events_term *term,
+ struct list_head *head_terms,
bool zero)
{
struct perf_pmu_format *format;
__u64 *vp;
+ __u64 val;
+
+ /*
+ * If this is a parameter we've already used for parameterized-eval,
+ * skip it in normal eval.
+ */
+ if (term->used)
+ return 0;
/*
- * Support only for hardcoded and numnerial terms.
* Hardcoded terms should be already in, so nothing
* to be done for them.
*/
if (parse_events__is_hardcoded_term(term))
return 0;
- if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
- return -EINVAL;
-
format = pmu_find_format(formats, term->config);
- if (!format)
+ if (!format) {
+ if (verbose)
+ printf("Invalid event/parameter '%s'\n", term->config);
return -EINVAL;
+ }
switch (format->value) {
case PERF_PMU_FORMAT_VALUE_CONFIG:
@@ -592,11 +629,25 @@ static int pmu_config_term(struct list_head *formats,
}
/*
- * XXX If we ever decide to go with string values for
- * non-hardcoded terms, here's the place to translate
- * them into value.
+ * Either directly use a numeric term, or try to translate string terms
+ * using event parameters.
*/
- pmu_format_value(format->bits, term->val.num, vp, zero);
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+ val = term->val.num;
+ else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
+ if (strcmp(term->val.str, "?")) {
+ if (verbose)
+ pr_info("Invalid sysfs entry %s=%s\n",
+ term->config, term->val.str);
+ return -EINVAL;
+ }
+
+ if (pmu_resolve_param_term(term, head_terms, &val))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ pmu_format_value(format->bits, val, vp, zero);
return 0;
}
@@ -607,9 +658,10 @@ int perf_pmu__config_terms(struct list_head *formats,
{
struct parse_events_term *term;
- list_for_each_entry(term, head_terms, list)
- if (pmu_config_term(formats, attr, term, zero))
+ list_for_each_entry(term, head_terms, list) {
+ if (pmu_config_term(formats, attr, term, head_terms, zero))
return -EINVAL;
+ }
return 0;
}
@@ -767,10 +819,36 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
set_bit(b, bits);
}
+static int sub_non_neg(int a, int b)
+{
+ if (b > a)
+ return 0;
+ return a - b;
+}
+
static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
struct perf_pmu_alias *alias)
{
- snprintf(buf, len, "%s/%s/", pmu->name, alias->name);
+ struct parse_events_term *term;
+ int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
+
+ list_for_each_entry(term, &alias->terms, list) {
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
+ used += snprintf(buf + used, sub_non_neg(len, used),
+ ",%s=%s", term->config,
+ term->val.str);
+ }
+
+ if (sub_non_neg(len, used) > 0) {
+ buf[used] = '/';
+ used++;
+ }
+ if (sub_non_neg(len, used) > 0) {
+ buf[used] = '\0';
+ used++;
+ } else
+ buf[len - 1] = '\0';
+
return buf;
}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 7f9b8632e433..919937eb0be2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
}
for (i = 0; i < ntevs; i++) {
- if (tevs[i].point.address) {
+ if (tevs[i].point.address && !tevs[i].point.retprobe) {
tmp = strdup(reloc_sym->name);
if (!tmp)
return -ENOMEM;
@@ -2052,9 +2052,11 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
pr_debug("Writing event: %s\n", buf);
if (!probe_event_dry_run) {
ret = write(fd, buf, strlen(buf));
- if (ret <= 0)
+ if (ret <= 0) {
+ ret = -errno;
pr_warning("Failed to write event: %s\n",
strerror_r(errno, sbuf, sizeof(sbuf)));
+ }
}
free(buf);
return ret;
@@ -2191,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
return ret;
}
-static char *looking_function_name;
-static int num_matched_functions;
-
-static int probe_function_filter(struct map *map __maybe_unused,
- struct symbol *sym)
+static int find_probe_functions(struct map *map, char *name)
{
- if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
- strcmp(looking_function_name, sym->name) == 0) {
- num_matched_functions++;
- return 0;
+ int found = 0;
+ struct symbol *sym;
+
+ map__for_each_symbol_by_name(map, name, sym) {
+ if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL)
+ found++;
}
- return 1;
+
+ return found;
}
#define strdup_or_goto(str, label) \
@@ -2220,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
struct kmap *kmap = NULL;
struct ref_reloc_sym *reloc_sym = NULL;
struct symbol *sym;
- struct rb_node *nd;
struct probe_trace_event *tev;
struct perf_probe_point *pp = &pev->point;
struct probe_trace_point *tp;
+ int num_matched_functions;
int ret, i;
/* Init maps of given executable or kernel */
@@ -2240,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
* Load matched symbols: Since the different local symbols may have
* same name but different addresses, this lists all the symbols.
*/
- num_matched_functions = 0;
- looking_function_name = pp->function;
- ret = map__load(map, probe_function_filter);
- if (ret || num_matched_functions == 0) {
+ num_matched_functions = find_probe_functions(map, pp->function);
+ if (num_matched_functions == 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function,
target ? : "kernel");
ret = -ENOENT;
@@ -2255,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
goto out;
}
- if (!pev->uprobes) {
+ if (!pev->uprobes && !pp->retprobe) {
kmap = map__kmap(map);
reloc_sym = kmap->ref_reloc_sym;
if (!reloc_sym) {
@@ -2273,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
}
ret = 0;
- map__for_each_symbol(map, sym, nd) {
+
+ map__for_each_symbol_by_name(map, pp->function, sym) {
tev = (*tevs) + ret;
tp = &tev->point;
if (ret == num_matched_functions) {
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 16a475a7d492..6c6a6953fa93 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,7 +10,7 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
-util/hweight.c
+../../lib/hweight.c
util/thread_map.c
util/util.c
util/xyarray.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 3dda85ca50c1..d906d0ad5d40 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -768,7 +768,7 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
Py_DECREF(file);
goto free_list;
}
-
+
Py_DECREF(file);
}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index d808a328f4dc..0c815a40a6e8 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -89,7 +89,7 @@ static void handler_call_die(const char *handler_name)
/*
* Insert val into into the dictionary and decrement the reference counter.
- * This is necessary for dictionaries since PyDict_SetItemString() does not
+ * This is necessary for dictionaries since PyDict_SetItemString() does not
* steal a reference, as opposed to PyTuple_SetItem().
*/
static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5f0e05a76c05..0baf75f12b7c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -274,7 +274,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
if (tool->id_index == NULL)
tool->id_index = process_id_index_stub;
}
-
+
static void swap_sample_id_all(union perf_event *event, void *data)
{
void *end = (void *) event + event->header.size;
@@ -1251,9 +1251,9 @@ fetch_mmaped_event(struct perf_session *session,
#define NUM_MMAPS 128
#endif
-int __perf_session__process_events(struct perf_session *session,
- u64 data_offset, u64 data_size,
- u64 file_size, struct perf_tool *tool)
+static int __perf_session__process_events(struct perf_session *session,
+ u64 data_offset, u64 data_size,
+ u64 file_size, struct perf_tool *tool)
{
int fd = perf_data_file__fd(session->file);
u64 head, page_offset, file_offset, file_pos, size;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index dc26ebf60fe4..6d663dc76404 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -49,9 +49,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
union perf_event **event_ptr,
struct perf_sample *sample);
-int __perf_session__process_events(struct perf_session *session,
- u64 data_offset, u64 data_size, u64 size,
- struct perf_tool *tool);
int perf_session__process_events(struct perf_session *session,
struct perf_tool *tool);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 9139dda9f9a3..7a39c1ed8d37 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1304,6 +1304,37 @@ static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
}
+static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b)
+{
+ struct hpp_sort_entry *hse;
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ return hse->se->se_cmp(a, b);
+}
+
+static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b)
+{
+ struct hpp_sort_entry *hse;
+ int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
+ return collapse_fn(a, b);
+}
+
+static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b)
+{
+ struct hpp_sort_entry *hse;
+ int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
+ return sort_fn(a, b);
+}
+
static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension *sd)
{
@@ -1322,9 +1353,9 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
hse->hpp.entry = __sort__hpp_entry;
hse->hpp.color = NULL;
- hse->hpp.cmp = sd->entry->se_cmp;
- hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp;
- hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse;
+ hse->hpp.cmp = __sort__hpp_cmp;
+ hse->hpp.collapse = __sort__hpp_collapse;
+ hse->hpp.sort = __sort__hpp_sort;
INIT_LIST_HEAD(&hse->hpp.list);
INIT_LIST_HEAD(&hse->hpp.sort_list);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 06fcd1bf98b6..b24f9d8727a8 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -574,13 +574,16 @@ static int decompress_kmodule(struct dso *dso, const char *name,
const char *ext = strrchr(name, '.');
char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
- if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
- type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) ||
- type != dso->symtab_type)
+ if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
+ type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
+ type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
return -1;
- if (!ext || !is_supported_compression(ext + 1))
- return -1;
+ if (!ext || !is_supported_compression(ext + 1)) {
+ ext = strrchr(dso->name, '.');
+ if (!ext || !is_supported_compression(ext + 1))
+ return -1;
+ }
fd = mkstemp(tmpbuf);
if (fd < 0)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c24c5b83156c..a69066865a55 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
const char *name)
{
struct rb_node *n;
+ struct symbol_name_rb_node *s;
if (symbols == NULL)
return NULL;
@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
n = symbols->rb_node;
while (n) {
- struct symbol_name_rb_node *s;
int cmp;
s = rb_entry(n, struct symbol_name_rb_node, rb_node);
@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
else if (cmp > 0)
n = n->rb_right;
else
- return &s->sym;
+ break;
}
- return NULL;
+ if (n == NULL)
+ return NULL;
+
+ /* return first symbol that has same name (if any) */
+ for (n = rb_prev(n); n; n = rb_prev(n)) {
+ struct symbol_name_rb_node *tmp;
+
+ tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
+ if (strcmp(tmp->sym.name, s->sym.name))
+ break;
+
+ s = tmp;
+ }
+
+ return &s->sym;
}
struct symbol *dso__find_symbol(struct dso *dso,
@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym)
return symbols__next(sym);
}
+struct symbol *symbol__next_by_name(struct symbol *sym)
+{
+ struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
+ struct rb_node *n = rb_next(&s->rb_node);
+
+ return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
+}
+
+ /*
+ * Teturns first symbol that matched with @name.
+ */
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name)
{
@@ -660,7 +685,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
struct machine *machine = kmaps->machine;
struct map *curr_map = map;
struct symbol *pos;
- int count = 0, moved = 0;
+ int count = 0, moved = 0;
struct rb_root *root = &dso->symbols[map->type];
struct rb_node *next = rb_first(root);
int kernel_range = 0;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 9d602e9c6f59..1650dcb3a67b 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name);
+struct symbol *symbol__next_by_name(struct symbol *sym);
struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
struct symbol *dso__next_symbol(struct symbol *sym);
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 371219a6daf1..e3c40a520a25 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -185,6 +185,28 @@ static u64 elf_section_offset(int fd, const char *name)
return offset;
}
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int elf_is_exec(int fd, const char *name)
+{
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ int retval = 0;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return 0;
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto out;
+
+ retval = (ehdr.e_type == ET_EXEC);
+
+out:
+ elf_end(elf);
+ pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval);
+ return retval;
+}
+#endif
+
struct table_entry {
u32 start_ip_offset;
u32 fde_offset;
@@ -244,14 +266,17 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
u64 *fde_count)
{
int ret = -EINVAL, fd;
- u64 offset;
+ u64 offset = dso->data.frame_offset;
- fd = dso__data_fd(dso, machine);
- if (fd < 0)
- return -EINVAL;
+ if (offset == 0) {
+ fd = dso__data_fd(dso, machine);
+ if (fd < 0)
+ return -EINVAL;
- /* Check the .eh_frame section for unwinding info */
- offset = elf_section_offset(fd, ".eh_frame_hdr");
+ /* Check the .eh_frame section for unwinding info */
+ offset = elf_section_offset(fd, ".eh_frame_hdr");
+ dso->data.frame_offset = offset;
+ }
if (offset)
ret = unwind_spec_ehframe(dso, machine, offset,
@@ -265,14 +290,20 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
static int read_unwind_spec_debug_frame(struct dso *dso,
struct machine *machine, u64 *offset)
{
- int fd = dso__data_fd(dso, machine);
+ int fd;
+ u64 ofs = dso->data.frame_offset;
- if (fd < 0)
- return -EINVAL;
+ if (ofs == 0) {
+ fd = dso__data_fd(dso, machine);
+ if (fd < 0)
+ return -EINVAL;
- /* Check the .debug_frame section for unwinding info */
- *offset = elf_section_offset(fd, ".debug_frame");
+ /* Check the .debug_frame section for unwinding info */
+ ofs = elf_section_offset(fd, ".debug_frame");
+ dso->data.frame_offset = ofs;
+ }
+ *offset = ofs;
if (*offset)
return 0;
@@ -322,8 +353,12 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
#ifndef NO_LIBUNWIND_DEBUG_FRAME
/* Check the .debug_frame section for unwinding info */
if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+ int fd = dso__data_fd(map->dso, ui->machine);
+ int is_exec = elf_is_exec(fd, map->dso->name);
+ unw_word_t base = is_exec ? 0 : map->start;
+
memset(&di, 0, sizeof(di));
- if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+ if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
map->start, map->end))
return dwarf_search_unwind_table(as, ip, &di, pi,
need_unwind_info, arg);
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index f4b953354ff7..eec688041500 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 2f0f34a36db4..5da129e10aa2 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/oslibcfs.c b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
index c13ff9c51d74..b51e40a9a120 100644
--- a/tools/power/acpi/os_specific/service_layers/oslibcfs.c
+++ b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 0dc2485dedf5..92f1fd700344 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index 733f9e490fc4..e153fcb12b1a 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 99b47b6194a3..3853a7350440 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 7ccb073f8316..6858c0893c91 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index a2d37d610639..84bdef0136cb 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 24d32968802d..c736adf5fb55 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index d470046a6d81..8f2fe168228e 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 853b4da22c3e..d0ba6535f5af 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 2e2ba2efa0d9..3ed7c0476d48 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c
$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
$(ECHO) " CC " $@
- $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
+ $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@
$(QUIET) $(STRIPCMD) $@
$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 56bfb523c5bb..feea7ad9500b 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -9,40 +9,50 @@ turbostat \- Report processor frequency and idle statistics
.br
.B turbostat
.RB [ Options ]
-.RB [ "\-i interval_sec" ]
+.RB [ "\--interval seconds" ]
.SH DESCRIPTION
\fBturbostat \fP reports processor topology, frequency,
-idle power-state statistics, temperature and power on modern X86 processors.
-Either \fBcommand\fP is forked and statistics are printed
-upon its completion, or statistics are printed periodically.
-
-\fBturbostat \fP
-must be run on root, and
-minimally requires that the processor
-supports an "invariant" TSC, plus the APERF and MPERF MSRs.
-Additional information is reported depending on hardware counter support.
-
+idle power-state statistics, temperature and power on X86 processors.
+There are two ways to invoke turbostat.
+The first method is to supply a
+\fBcommand\fP, which is forked and statistics are printed
+upon its completion.
+The second method is to omit the command,
+and turbostat displays statistics every 5 seconds.
+The 5-second interval can be changed using the --interval option.
+
+Some information is not available on older processors.
.SS Options
-The \fB-p\fP option limits output to the 1st thread in 1st core of each package.
+\fB--Counter MSR#\fP shows the delta of the specified 64-bit MSR counter.
+.PP
+\fB--counter MSR#\fP shows the delta of the specified 32-bit MSR counter.
+.PP
+\fB--Dump\fP displays the raw counter values.
+.PP
+\fB--debug\fP displays additional system configuration information. Invoking this parameter
+more than once may also enable internal turbostat debug information.
+.PP
+\fB--interval seconds\fP overrides the default 5-second measurement interval.
+.PP
+\fB--help\fP displays usage for the most common parameters.
.PP
-The \fB-P\fP option limits output to the 1st thread in each Package.
+\fB--Joules\fP displays energy in Joules, rather than dividing Joules by time to print power in Watts.
.PP
-The \fB-S\fP option limits output to a 1-line System Summary for each interval.
+\fB--MSR MSR#\fP shows the specified 64-bit MSR value.
.PP
-The \fB-v\fP option increases verbosity.
+\fB--msr MSR#\fP shows the specified 32-bit MSR value.
.PP
-The \fB-c MSR#\fP option includes the delta of the specified 32-bit MSR counter.
+\fB--Package\fP limits output to the system summary plus the 1st thread in each Package.
.PP
-The \fB-C MSR#\fP option includes the delta of the specified 64-bit MSR counter.
+\fB--processor\fP limits output to the system summary plus the 1st thread in each processor of each package. Ie. it skips hyper-threaded siblings.
.PP
-The \fB-m MSR#\fP option includes the the specified 32-bit MSR value.
+\fB--Summary\fP limits output to a 1-line System Summary for each interval.
.PP
-The \fB-M MSR#\fP option includes the the specified 64-bit MSR value.
+\fB--TCC temperature\fP sets the Thermal Control Circuit temperature for systems which do not export that value. This is used for making sense of the Digital Thermal Sensor outputs, as they return degrees Celsius below the TCC activation temperature.
.PP
-The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds.
-The default is 5 seconds.
+\fB--version\fP displays the version.
.PP
-The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit,
+The \fBcommand\fP parameter forks \fBcommand\fP, and upon its exit,
displays the statistics gathered since it was forked.
.PP
.SH FIELD DESCRIPTIONS
@@ -52,7 +62,7 @@ displays the statistics gathered since it was forked.
\fBCPU\fP Linux CPU (logical processor) number.
Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
\fBAVG_MHz\fP number of cycles executed divided by time elapsed.
-\fB%Buzy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state.
+\fB%Busy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state.
\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state).
\fBTSC_MHz\fP average MHz that the TSC ran during the entire interval.
\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.
@@ -68,7 +78,7 @@ Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading T
.fi
.PP
.SH EXAMPLE
-Without any parameters, turbostat prints out counters ever 5 seconds.
+Without any parameters, turbostat displays statistics ever 5 seconds.
(override interval with "-i sec" option, or specify a command
for turbostat to fork).
@@ -91,19 +101,19 @@ Subsequent rows show per-CPU statistics.
3 3 3 0.20 1596 3492 0 0.44 0.00 99.37 0.00 23
3 7 5 0.31 1596 3492 0 0.33
.fi
-.SH VERBOSE EXAMPLE
-The "-v" option adds verbosity to the output:
+.SH DEBUG EXAMPLE
+The "--debug" option prints additional system information before measurements:
.nf
-[root@ivy]# turbostat -v
-turbostat v3.0 November 23, 2012 - Len Brown <lenb@kernel.org>
+turbostat version 4.0 10-Feb, 2015 - Len Brown <lenb@kernel.org>
CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3a:9 (6:58:9)
CPUID(6): APERF, DTS, PTM, EPB
-RAPL: 851 sec. Joule Counter Range
+RAPL: 851 sec. Joule Counter Range, at 77 Watts
cpu0: MSR_NHM_PLATFORM_INFO: 0x81010f0012300
16 * 100 = 1600 MHz max efficiency
35 * 100 = 3500 MHz TSC frequency
-cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6-noret)
+cpu0: MSR_IA32_POWER_CTL: 0x0014005d (C1E auto-promotion: DISabled)
+cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6n)
cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727
37 * 100 = 3700 MHz max turbo 4 active cores
38 * 100 = 3800 MHz max turbo 3 active cores
@@ -112,9 +122,9 @@ cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727
cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced)
cpu0: MSR_RAPL_POWER_UNIT: 0x000a1003 (0.125000 Watts, 0.000015 Joules, 0.000977 sec.)
cpu0: MSR_PKG_POWER_INFO: 0x01e00268 (77 W TDP, RAPL 60 - 0 W, 0.000000 sec.)
-cpu0: MSR_PKG_POWER_LIMIT: 0x830000148268 (UNlocked)
+cpu0: MSR_PKG_POWER_LIMIT: 0x30000148268 (UNlocked)
cpu0: PKG Limit #1: ENabled (77.000000 Watts, 1.000000 sec, clamp DISabled)
-cpu0: PKG Limit #2: ENabled (96.000000 Watts, 0.000977* sec, clamp DISabled)
+cpu0: PKG Limit #2: DISabled (96.000000 Watts, 0.000977* sec, clamp DISabled)
cpu0: MSR_PP0_POLICY: 0
cpu0: MSR_PP0_POWER_LIMIT: 0x00000000 (UNlocked)
cpu0: Cores Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
@@ -123,19 +133,20 @@ cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked)
cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00691400 (105 C)
cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884e0000 (27 C)
-cpu0: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1)
-cpu1: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1)
-cpu2: MSR_IA32_THERM_STATUS: 0x88540000 (21 C +/- 1)
+cpu0: MSR_IA32_THERM_STATUS: 0x88580000 (17 C +/- 1)
+cpu1: MSR_IA32_THERM_STATUS: 0x885a0000 (15 C +/- 1)
+cpu2: MSR_IA32_THERM_STATUS: 0x88570000 (18 C +/- 1)
cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1)
...
.fi
The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
-available at the minimum package voltage. The \fBTSC frequency\fP is the nominal
-maximum frequency of the processor if turbo-mode were not available. This frequency
+available at the minimum package voltage. The \fBTSC frequency\fP is the base
+frequency of the processor -- this should match the brand string
+in /proc/cpuinfo. This base frequency
should be sustainable on all CPUs indefinitely, given nominal power and cooling.
The remaining rows show what maximum turbo frequency is possible
-depending on the number of idle cores. Note that this information is
-not available on all processors.
+depending on the number of idle cores. Note that not all information is
+available on all processors.
.SH FORK EXAMPLE
If turbostat is invoked with a command, it will fork that command
and output the statistics gathered when the command exits.
@@ -176,6 +187,11 @@ not including any non-busy idle time.
.B "turbostat "
must be run as root.
+Alternatively, non-root users can be enabled to run turbostat this way:
+
+# setcap cap_sys_rawio=ep ./turbostat
+
+# chmod +r /dev/cpu/*/msr
.B "turbostat "
reads hardware counters, but doesn't write them.
@@ -184,15 +200,33 @@ multiple invocations of itself.
\fBturbostat \fP
may work poorly on Linux-2.6.20 through 2.6.29,
-as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF
+as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF MSRs
in those kernels.
-If the TSC column does not make sense, then
-the other numbers will also make no sense.
-Turbostat is lightweight, and its data collection is not atomic.
-These issues are usually caused by an extremely short measurement
-interval (much less than 1 second), or system activity that prevents
-turbostat from being able to run on all CPUS to quickly collect data.
+AVG_MHz = APERF_delta/measurement_interval. This is the actual
+number of elapsed cycles divided by the entire sample interval --
+including idle time. Note that this calculation is resilient
+to systems lacking a non-stop TSC.
+
+TSC_MHz = TSC_delta/measurement_interval.
+On a system with an invariant TSC, this value will be constant
+and will closely match the base frequency value shown
+in the brand string in /proc/cpuinfo. On a system where
+the TSC stops in idle, TSC_MHz will drop
+below the processor's base frequency.
+
+%Busy = MPERF_delta/TSC_delta
+
+Bzy_MHz = TSC_delta/APERF_delta/MPERF_delta/measurement_interval
+
+Note that these calculations depend on TSC_delta, so they
+are not reliable during intervals when TSC_MHz is not running at the base frequency.
+
+Turbostat data collection is not atomic.
+Extremely short measurement intervals (much less than 1 second),
+or system activity that prevents turbostat from being able
+to run on all CPUS to quickly collect data, will result in
+inconsistent results.
The APERF, MPERF MSRs are defined to count non-halted cycles.
Although it is not guaranteed by the architecture, turbostat assumes
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 5b1b807265a1..2d089cac8580 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -33,24 +33,29 @@
#include <signal.h>
#include <sys/time.h>
#include <stdlib.h>
+#include <getopt.h>
#include <dirent.h>
#include <string.h>
#include <ctype.h>
#include <sched.h>
#include <cpuid.h>
+#include <linux/capability.h>
+#include <errno.h>
char *proc_stat = "/proc/stat";
-unsigned int interval_sec = 5; /* set with -i interval_sec */
-unsigned int verbose; /* set with -v */
-unsigned int rapl_verbose; /* set with -R */
-unsigned int rapl_joules; /* set with -J */
-unsigned int thermal_verbose; /* set with -T */
-unsigned int summary_only; /* set with -S */
-unsigned int dump_only; /* set with -s */
+unsigned int interval_sec = 5;
+unsigned int debug;
+unsigned int rapl_joules;
+unsigned int summary_only;
+unsigned int dump_only;
unsigned int skip_c0;
unsigned int skip_c1;
unsigned int do_nhm_cstates;
unsigned int do_snb_cstates;
+unsigned int do_pc2;
+unsigned int do_pc3;
+unsigned int do_pc6;
+unsigned int do_pc7;
unsigned int do_c8_c9_c10;
unsigned int do_slm_cstates;
unsigned int use_c1_residency_msr;
@@ -59,8 +64,8 @@ unsigned int has_epb;
unsigned int units = 1000000; /* MHz etc */
unsigned int genuine_intel;
unsigned int has_invariant_tsc;
-unsigned int do_nehalem_platform_info;
-unsigned int do_nehalem_turbo_ratio_limit;
+unsigned int do_nhm_platform_info;
+unsigned int do_nhm_turbo_ratio_limit;
unsigned int do_ivt_turbo_ratio_limit;
unsigned int extra_msr_offset32;
unsigned int extra_msr_offset64;
@@ -81,6 +86,9 @@ unsigned int tcc_activation_temp;
unsigned int tcc_activation_temp_override;
double rapl_power_units, rapl_energy_units, rapl_time_units;
double rapl_joule_counter_range;
+unsigned int do_core_perf_limit_reasons;
+unsigned int do_gfx_perf_limit_reasons;
+unsigned int do_ring_perf_limit_reasons;
#define RAPL_PKG (1 << 0)
/* 0x610 MSR_PKG_POWER_LIMIT */
@@ -251,15 +259,13 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
sprintf(pathname, "/dev/cpu/%d/msr", cpu);
fd = open(pathname, O_RDONLY);
if (fd < 0)
- return -1;
+ err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
retval = pread(fd, msr, sizeof *msr, offset);
close(fd);
- if (retval != sizeof *msr) {
- fprintf(stderr, "%s offset 0x%llx read failed\n", pathname, (unsigned long long)offset);
- return -1;
- }
+ if (retval != sizeof *msr)
+ err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
return 0;
}
@@ -281,7 +287,7 @@ void print_header(void)
outp += sprintf(outp, " CPU");
if (has_aperf)
outp += sprintf(outp, " Avg_MHz");
- if (do_nhm_cstates)
+ if (has_aperf)
outp += sprintf(outp, " %%Busy");
if (has_aperf)
outp += sprintf(outp, " Bzy_MHz");
@@ -310,13 +316,13 @@ void print_header(void)
if (do_ptm)
outp += sprintf(outp, " PkgTmp");
- if (do_snb_cstates)
+ if (do_pc2)
outp += sprintf(outp, " Pkg%%pc2");
- if (do_nhm_cstates && !do_slm_cstates)
+ if (do_pc3)
outp += sprintf(outp, " Pkg%%pc3");
- if (do_nhm_cstates && !do_slm_cstates)
+ if (do_pc6)
outp += sprintf(outp, " Pkg%%pc6");
- if (do_snb_cstates)
+ if (do_pc7)
outp += sprintf(outp, " Pkg%%pc7");
if (do_c8_c9_c10) {
outp += sprintf(outp, " Pkg%%pc8");
@@ -337,7 +343,7 @@ void print_header(void)
outp += sprintf(outp, " PKG_%%");
if (do_rapl & RAPL_DRAM_PERF_STATUS)
outp += sprintf(outp, " RAM_%%");
- } else {
+ } else if (do_rapl && rapl_joules) {
if (do_rapl & RAPL_PKG)
outp += sprintf(outp, " Pkg_J");
if (do_rapl & RAPL_CORES)
@@ -391,9 +397,12 @@ int dump_counters(struct thread_data *t, struct core_data *c,
if (p) {
outp += sprintf(outp, "package: %d\n", p->package_id);
outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
- outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
- outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
- outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
+ if (do_pc3)
+ outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
+ if (do_pc6)
+ outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
+ if (do_pc7)
+ outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
@@ -457,25 +466,25 @@ int format_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, "%8d", t->cpu_id);
}
- /* AvgMHz */
+ /* Avg_MHz */
if (has_aperf)
outp += sprintf(outp, "%8.0f",
1.0 / units * t->aperf / interval_float);
- /* %c0 */
- if (do_nhm_cstates) {
+ /* %Busy */
+ if (has_aperf) {
if (!skip_c0)
outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
else
outp += sprintf(outp, "********");
}
- /* BzyMHz */
+ /* Bzy_MHz */
if (has_aperf)
outp += sprintf(outp, "%8.0f",
1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
- /* TSC */
+ /* TSC_MHz */
outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
/* SMI */
@@ -525,13 +534,13 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (do_ptm)
outp += sprintf(outp, "%8d", p->pkg_temp_c);
- if (do_snb_cstates)
+ if (do_pc2)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
- if (do_nhm_cstates && !do_slm_cstates)
+ if (do_pc3)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
- if (do_nhm_cstates && !do_slm_cstates)
+ if (do_pc6)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
- if (do_snb_cstates)
+ if (do_pc7)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
if (do_c8_c9_c10) {
outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
@@ -561,7 +570,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
if (do_rapl & RAPL_DRAM_PERF_STATUS)
outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
- } else {
+ } else if (do_rapl && rapl_joules) {
if (do_rapl & RAPL_PKG)
outp += sprintf(outp, fmt8,
p->energy_pkg * rapl_energy_units);
@@ -578,8 +587,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
if (do_rapl & RAPL_DRAM_PERF_STATUS)
outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
- outp += sprintf(outp, fmt8, interval_float);
+ outp += sprintf(outp, fmt8, interval_float);
}
done:
outp += sprintf(outp, "\n");
@@ -628,9 +637,12 @@ void
delta_package(struct pkg_data *new, struct pkg_data *old)
{
old->pc2 = new->pc2 - old->pc2;
- old->pc3 = new->pc3 - old->pc3;
- old->pc6 = new->pc6 - old->pc6;
- old->pc7 = new->pc7 - old->pc7;
+ if (do_pc3)
+ old->pc3 = new->pc3 - old->pc3;
+ if (do_pc6)
+ old->pc6 = new->pc6 - old->pc6;
+ if (do_pc7)
+ old->pc7 = new->pc7 - old->pc7;
old->pc8 = new->pc8 - old->pc8;
old->pc9 = new->pc9 - old->pc9;
old->pc10 = new->pc10 - old->pc10;
@@ -670,24 +682,26 @@ delta_thread(struct thread_data *new, struct thread_data *old,
old->c1 = new->c1 - old->c1;
- if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
- old->aperf = new->aperf - old->aperf;
- old->mperf = new->mperf - old->mperf;
- } else {
+ if (has_aperf) {
+ if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
+ old->aperf = new->aperf - old->aperf;
+ old->mperf = new->mperf - old->mperf;
+ } else {
- if (!aperf_mperf_unstable) {
- fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
- fprintf(stderr, "* Frequency results do not cover entire interval *\n");
- fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
+ if (!aperf_mperf_unstable) {
+ fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
+ fprintf(stderr, "* Frequency results do not cover entire interval *\n");
+ fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
- aperf_mperf_unstable = 1;
+ aperf_mperf_unstable = 1;
+ }
+ /*
+ * mperf delta is likely a huge "positive" number
+ * can not use it for calculating c0 time
+ */
+ skip_c0 = 1;
+ skip_c1 = 1;
}
- /*
- * mperf delta is likely a huge "positive" number
- * can not use it for calculating c0 time
- */
- skip_c0 = 1;
- skip_c1 = 1;
}
@@ -712,7 +726,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
}
if (old->mperf == 0) {
- if (verbose > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
+ if (debug > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
old->mperf = 1; /* divide by 0 protection */
}
@@ -769,9 +783,12 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
c->core_temp_c = 0;
p->pc2 = 0;
- p->pc3 = 0;
- p->pc6 = 0;
- p->pc7 = 0;
+ if (do_pc3)
+ p->pc3 = 0;
+ if (do_pc6)
+ p->pc6 = 0;
+ if (do_pc7)
+ p->pc7 = 0;
p->pc8 = 0;
p->pc9 = 0;
p->pc10 = 0;
@@ -810,9 +827,12 @@ int sum_counters(struct thread_data *t, struct core_data *c,
return 0;
average.packages.pc2 += p->pc2;
- average.packages.pc3 += p->pc3;
- average.packages.pc6 += p->pc6;
- average.packages.pc7 += p->pc7;
+ if (do_pc3)
+ average.packages.pc3 += p->pc3;
+ if (do_pc6)
+ average.packages.pc6 += p->pc6;
+ if (do_pc7)
+ average.packages.pc7 += p->pc7;
average.packages.pc8 += p->pc8;
average.packages.pc9 += p->pc9;
average.packages.pc10 += p->pc10;
@@ -854,9 +874,12 @@ void compute_average(struct thread_data *t, struct core_data *c,
average.cores.c7 /= topo.num_cores;
average.packages.pc2 /= topo.num_packages;
- average.packages.pc3 /= topo.num_packages;
- average.packages.pc6 /= topo.num_packages;
- average.packages.pc7 /= topo.num_packages;
+ if (do_pc3)
+ average.packages.pc3 /= topo.num_packages;
+ if (do_pc6)
+ average.packages.pc6 /= topo.num_packages;
+ if (do_pc7)
+ average.packages.pc7 /= topo.num_packages;
average.packages.pc8 /= topo.num_packages;
average.packages.pc9 /= topo.num_packages;
@@ -956,18 +979,18 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
return 0;
- if (do_nhm_cstates && !do_slm_cstates) {
+ if (do_pc3)
if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
return -9;
+ if (do_pc6)
if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
return -10;
- }
- if (do_snb_cstates) {
+ if (do_pc2)
if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
return -11;
+ if (do_pc7)
if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
return -12;
- }
if (do_c8_c9_c10) {
if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
return -13;
@@ -1014,12 +1037,43 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return 0;
}
+/*
+ * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
+ * If you change the values, note they are used both in comparisons
+ * (>= PCL__7) and to index pkg_cstate_limit_strings[].
+ */
+
+#define PCLUKN 0 /* Unknown */
+#define PCLRSV 1 /* Reserved */
+#define PCL__0 2 /* PC0 */
+#define PCL__1 3 /* PC1 */
+#define PCL__2 4 /* PC2 */
+#define PCL__3 5 /* PC3 */
+#define PCL__4 6 /* PC4 */
+#define PCL__6 7 /* PC6 */
+#define PCL_6N 8 /* PC6 No Retention */
+#define PCL_6R 9 /* PC6 Retention */
+#define PCL__7 10 /* PC7 */
+#define PCL_7S 11 /* PC7 Shrink */
+#define PCLUNL 12 /* Unlimited */
+
+int pkg_cstate_limit = PCLUKN;
+char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
+ "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "unlimited"};
+
+int nhm_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL};
+int snb_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL};
+int hsw_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCLRSV, PCLUNL};
+int slv_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7};
+int amt_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7};
+int phi_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL};
+
void print_verbose_header(void)
{
unsigned long long msr;
unsigned int ratio;
- if (!do_nehalem_platform_info)
+ if (!do_nhm_platform_info)
return;
get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
@@ -1093,46 +1147,16 @@ print_nhm_turbo_ratio_limits:
fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
- fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: ",
+ fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
(msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
(msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
(msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
(msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
(msr & (1 << 15)) ? "" : "UN",
- (unsigned int)msr & 7);
-
-
- switch(msr & 0x7) {
- case 0:
- fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0");
- break;
- case 1:
- fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0");
- break;
- case 2:
- fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3");
- break;
- case 3:
- fprintf(stderr, do_slm_cstates ? "invalid" : "pc6");
- break;
- case 4:
- fprintf(stderr, do_slm_cstates ? "pc4" : "pc7");
- break;
- case 5:
- fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid");
- break;
- case 6:
- fprintf(stderr, do_slm_cstates ? "pc6" : "invalid");
- break;
- case 7:
- fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited");
- break;
- default:
- fprintf(stderr, "invalid");
- }
- fprintf(stderr, ")\n");
+ (unsigned int)msr & 7,
+ pkg_cstate_limit_strings[pkg_cstate_limit]);
- if (!do_nehalem_turbo_ratio_limit)
+ if (!do_nhm_turbo_ratio_limit)
return;
get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
@@ -1178,6 +1202,7 @@ print_nhm_turbo_ratio_limits:
if (ratio)
fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
ratio, bclk, ratio * bclk);
+
}
void free_all_buffers(void)
@@ -1458,18 +1483,66 @@ void check_dev_msr()
struct stat sb;
if (stat("/dev/cpu/0/msr", &sb))
- err(-5, "no /dev/cpu/0/msr\n"
- "Try \"# modprobe msr\"");
+ err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
}
-void check_super_user()
+void check_permissions()
{
- if (getuid() != 0)
- errx(-6, "must be root");
+ struct __user_cap_header_struct cap_header_data;
+ cap_user_header_t cap_header = &cap_header_data;
+ struct __user_cap_data_struct cap_data_data;
+ cap_user_data_t cap_data = &cap_data_data;
+ extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
+ int do_exit = 0;
+
+ /* check for CAP_SYS_RAWIO */
+ cap_header->pid = getpid();
+ cap_header->version = _LINUX_CAPABILITY_VERSION;
+ if (capget(cap_header, cap_data) < 0)
+ err(-6, "capget(2) failed");
+
+ if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
+ do_exit++;
+ warnx("capget(CAP_SYS_RAWIO) failed,"
+ " try \"# setcap cap_sys_rawio=ep %s\"", progname);
+ }
+
+ /* test file permissions */
+ if (euidaccess("/dev/cpu/0/msr", R_OK)) {
+ do_exit++;
+ warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
+ }
+
+ /* if all else fails, thell them to be root */
+ if (do_exit)
+ if (getuid() != 0)
+ warnx("... or simply run as root");
+
+ if (do_exit)
+ exit(-6);
}
-int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
+/*
+ * NHM adds support for additional MSRs:
+ *
+ * MSR_SMI_COUNT 0x00000034
+ *
+ * MSR_NHM_PLATFORM_INFO 0x000000ce
+ * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
+ *
+ * MSR_PKG_C3_RESIDENCY 0x000003f8
+ * MSR_PKG_C6_RESIDENCY 0x000003f9
+ * MSR_CORE_C3_RESIDENCY 0x000003fc
+ * MSR_CORE_C6_RESIDENCY 0x000003fd
+ *
+ * Side effect:
+ * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
+ */
+int probe_nhm_msrs(unsigned int family, unsigned int model)
{
+ unsigned long long msr;
+ int *pkg_cstate_limits;
+
if (!genuine_intel)
return 0;
@@ -1482,24 +1555,54 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
case 0x1F: /* Core i7 and i5 Processor - Nehalem */
case 0x25: /* Westmere Client - Clarkdale, Arrandale */
case 0x2C: /* Westmere EP - Gulftown */
+ case 0x2E: /* Nehalem-EX Xeon - Beckton */
+ case 0x2F: /* Westmere-EX Xeon - Eagleton */
+ pkg_cstate_limits = nhm_pkg_cstate_limits;
+ break;
case 0x2A: /* SNB */
case 0x2D: /* SNB Xeon */
case 0x3A: /* IVB */
case 0x3E: /* IVB Xeon */
+ pkg_cstate_limits = snb_pkg_cstate_limits;
+ break;
case 0x3C: /* HSW */
case 0x3F: /* HSX */
case 0x45: /* HSW */
case 0x46: /* HSW */
- case 0x37: /* BYT */
- case 0x4D: /* AVN */
case 0x3D: /* BDW */
+ case 0x47: /* BDW */
case 0x4F: /* BDX */
case 0x56: /* BDX-DE */
- return 1;
+ pkg_cstate_limits = hsw_pkg_cstate_limits;
+ break;
+ case 0x37: /* BYT */
+ case 0x4D: /* AVN */
+ pkg_cstate_limits = slv_pkg_cstate_limits;
+ break;
+ case 0x4C: /* AMT */
+ pkg_cstate_limits = amt_pkg_cstate_limits;
+ break;
+ case 0x57: /* PHI */
+ pkg_cstate_limits = phi_pkg_cstate_limits;
+ break;
+ default:
+ return 0;
+ }
+ get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+
+ pkg_cstate_limit = pkg_cstate_limits[msr & 0x7];
+
+ return 1;
+}
+int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+ switch (model) {
+ /* Nehalem compatible, but do not include turbo-ratio limit support */
case 0x2E: /* Nehalem-EX Xeon - Beckton */
case 0x2F: /* Westmere-EX Xeon - Eagleton */
- default:
return 0;
+ default:
+ return 1;
}
}
int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
@@ -1564,6 +1667,103 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return 0;
}
+/*
+ * print_perf_limit()
+ */
+int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+ unsigned long long msr;
+ int cpu;
+
+ cpu = t->cpu_id;
+
+ /* per-package */
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ return 0;
+
+ if (cpu_migrate(cpu)) {
+ fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
+ return -1;
+ }
+
+ if (do_core_perf_limit_reasons) {
+ get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
+ fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
+ fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
+ (msr & 1 << 0) ? "PROCHOT, " : "",
+ (msr & 1 << 1) ? "ThermStatus, " : "",
+ (msr & 1 << 2) ? "bit2, " : "",
+ (msr & 1 << 4) ? "Graphics, " : "",
+ (msr & 1 << 5) ? "Auto-HWP, " : "",
+ (msr & 1 << 6) ? "VR-Therm, " : "",
+ (msr & 1 << 8) ? "Amps, " : "",
+ (msr & 1 << 9) ? "CorePwr, " : "",
+ (msr & 1 << 10) ? "PkgPwrL1, " : "",
+ (msr & 1 << 11) ? "PkgPwrL2, " : "",
+ (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
+ (msr & 1 << 13) ? "Transitions, " : "",
+ (msr & 1 << 14) ? "bit14, " : "",
+ (msr & 1 << 15) ? "bit15, " : "");
+ fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
+ (msr & 1 << 16) ? "PROCHOT, " : "",
+ (msr & 1 << 17) ? "ThermStatus, " : "",
+ (msr & 1 << 18) ? "bit18, " : "",
+ (msr & 1 << 20) ? "Graphics, " : "",
+ (msr & 1 << 21) ? "Auto-HWP, " : "",
+ (msr & 1 << 22) ? "VR-Therm, " : "",
+ (msr & 1 << 24) ? "Amps, " : "",
+ (msr & 1 << 25) ? "CorePwr, " : "",
+ (msr & 1 << 26) ? "PkgPwrL1, " : "",
+ (msr & 1 << 27) ? "PkgPwrL2, " : "",
+ (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
+ (msr & 1 << 29) ? "Transitions, " : "",
+ (msr & 1 << 30) ? "bit30, " : "",
+ (msr & 1 << 31) ? "bit31, " : "");
+
+ }
+ if (do_gfx_perf_limit_reasons) {
+ get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
+ fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
+ fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)",
+ (msr & 1 << 0) ? "PROCHOT, " : "",
+ (msr & 1 << 1) ? "ThermStatus, " : "",
+ (msr & 1 << 4) ? "Graphics, " : "",
+ (msr & 1 << 6) ? "VR-Therm, " : "",
+ (msr & 1 << 8) ? "Amps, " : "",
+ (msr & 1 << 9) ? "GFXPwr, " : "",
+ (msr & 1 << 10) ? "PkgPwrL1, " : "",
+ (msr & 1 << 11) ? "PkgPwrL2, " : "");
+ fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n",
+ (msr & 1 << 16) ? "PROCHOT, " : "",
+ (msr & 1 << 17) ? "ThermStatus, " : "",
+ (msr & 1 << 20) ? "Graphics, " : "",
+ (msr & 1 << 22) ? "VR-Therm, " : "",
+ (msr & 1 << 24) ? "Amps, " : "",
+ (msr & 1 << 25) ? "GFXPwr, " : "",
+ (msr & 1 << 26) ? "PkgPwrL1, " : "",
+ (msr & 1 << 27) ? "PkgPwrL2, " : "");
+ }
+ if (do_ring_perf_limit_reasons) {
+ get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
+ fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
+ fprintf(stderr, " (Active: %s%s%s%s%s%s)",
+ (msr & 1 << 0) ? "PROCHOT, " : "",
+ (msr & 1 << 1) ? "ThermStatus, " : "",
+ (msr & 1 << 6) ? "VR-Therm, " : "",
+ (msr & 1 << 8) ? "Amps, " : "",
+ (msr & 1 << 10) ? "PkgPwrL1, " : "",
+ (msr & 1 << 11) ? "PkgPwrL2, " : "");
+ fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n",
+ (msr & 1 << 16) ? "PROCHOT, " : "",
+ (msr & 1 << 17) ? "ThermStatus, " : "",
+ (msr & 1 << 22) ? "VR-Therm, " : "",
+ (msr & 1 << 24) ? "Amps, " : "",
+ (msr & 1 << 26) ? "PkgPwrL1, " : "",
+ (msr & 1 << 27) ? "PkgPwrL2, " : "");
+ }
+ return 0;
+}
+
#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
@@ -1609,6 +1809,7 @@ void rapl_probe(unsigned int family, unsigned int model)
case 0x45: /* HSW */
case 0x46: /* HSW */
case 0x3D: /* BDW */
+ case 0x47: /* BDW */
do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
break;
case 0x3F: /* HSX */
@@ -1647,12 +1848,33 @@ void rapl_probe(unsigned int family, unsigned int model)
tdp = get_tdp(model);
rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
- if (verbose)
+ if (debug)
fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
return;
}
+void perf_limit_reasons_probe(family, model)
+{
+ if (!genuine_intel)
+ return;
+
+ if (family != 6)
+ return;
+
+ switch (model) {
+ case 0x3C: /* HSW */
+ case 0x45: /* HSW */
+ case 0x46: /* HSW */
+ do_gfx_perf_limit_reasons = 1;
+ case 0x3F: /* HSX */
+ do_core_perf_limit_reasons = 1;
+ do_ring_perf_limit_reasons = 1;
+ default:
+ return;
+ }
+}
+
int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
unsigned long long msr;
@@ -1751,7 +1973,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
return -1;
- if (verbose) {
+ if (debug) {
fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
"(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
rapl_power_units, rapl_energy_units, rapl_time_units);
@@ -1808,7 +2030,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
print_power_limit_msr(cpu, msr, "DRAM Limit");
}
if (do_rapl & RAPL_CORE_POLICY) {
- if (verbose) {
+ if (debug) {
if (get_msr(cpu, MSR_PP0_POLICY, &msr))
return -7;
@@ -1816,7 +2038,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
}
}
if (do_rapl & RAPL_CORES) {
- if (verbose) {
+ if (debug) {
if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
return -9;
@@ -1826,7 +2048,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
}
}
if (do_rapl & RAPL_GFX) {
- if (verbose) {
+ if (debug) {
if (get_msr(cpu, MSR_PP1_POLICY, &msr))
return -8;
@@ -1842,8 +2064,15 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return 0;
}
+/*
+ * SNB adds support for additional MSRs:
+ *
+ * MSR_PKG_C7_RESIDENCY 0x000003fa
+ * MSR_CORE_C7_RESIDENCY 0x000003fe
+ * MSR_PKG_C2_RESIDENCY 0x0000060d
+ */
-int is_snb(unsigned int family, unsigned int model)
+int has_snb_msrs(unsigned int family, unsigned int model)
{
if (!genuine_intel)
return 0;
@@ -1858,6 +2087,7 @@ int is_snb(unsigned int family, unsigned int model)
case 0x45: /* HSW */
case 0x46: /* HSW */
case 0x3D: /* BDW */
+ case 0x47: /* BDW */
case 0x4F: /* BDX */
case 0x56: /* BDX-DE */
return 1;
@@ -1865,7 +2095,14 @@ int is_snb(unsigned int family, unsigned int model)
return 0;
}
-int has_c8_c9_c10(unsigned int family, unsigned int model)
+/*
+ * HSW adds support for additional MSRs:
+ *
+ * MSR_PKG_C8_RESIDENCY 0x00000630
+ * MSR_PKG_C9_RESIDENCY 0x00000631
+ * MSR_PKG_C10_RESIDENCY 0x00000632
+ */
+int has_hsw_msrs(unsigned int family, unsigned int model)
{
if (!genuine_intel)
return 0;
@@ -1917,7 +2154,7 @@ double slm_bclk(void)
double discover_bclk(unsigned int family, unsigned int model)
{
- if (is_snb(family, model))
+ if (has_snb_msrs(family, model))
return 100.00;
else if (is_slm(family, model))
return slm_bclk();
@@ -1965,7 +2202,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
}
/* Temperature Target MSR is Nehalem and newer only */
- if (!do_nehalem_platform_info)
+ if (!do_nhm_platform_info)
goto guess;
if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
@@ -1973,7 +2210,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
target_c_local = (msr >> 16) & 0xFF;
- if (verbose)
+ if (debug)
fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
cpu, msr, target_c_local);
@@ -2003,7 +2240,7 @@ void check_cpuid()
if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
genuine_intel = 1;
- if (verbose)
+ if (debug)
fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
(char *)&ebx, (char *)&edx, (char *)&ecx);
@@ -2014,7 +2251,7 @@ void check_cpuid()
if (family == 6 || family == 0xf)
model += ((fms >> 16) & 0xf) << 4;
- if (verbose)
+ if (debug)
fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
max_level, family, model, stepping, family, model, stepping);
@@ -2029,18 +2266,15 @@ void check_cpuid()
ebx = ecx = edx = 0;
__get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
- if (max_level < 0x80000007)
- errx(1, "CPUID: no invariant TSC (max_level 0x%x)", max_level);
+ if (max_level >= 0x80000007) {
- /*
- * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
- * this check is valid for both Intel and AMD
- */
- __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
- has_invariant_tsc = edx & (1 << 8);
-
- if (!has_invariant_tsc)
- errx(1, "No invariant TSC");
+ /*
+ * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
+ * this check is valid for both Intel and AMD
+ */
+ __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+ has_invariant_tsc = edx & (1 << 8);
+ }
/*
* APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
@@ -2053,36 +2287,51 @@ void check_cpuid()
do_ptm = eax & (1 << 6);
has_epb = ecx & (1 << 3);
- if (verbose)
- fprintf(stderr, "CPUID(6): %s%s%s%s\n",
- has_aperf ? "APERF" : "No APERF!",
- do_dts ? ", DTS" : "",
- do_ptm ? ", PTM": "",
- has_epb ? ", EPB": "");
-
- if (!has_aperf)
- errx(-1, "No APERF");
-
- do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
- do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
- do_smi = do_nhm_cstates;
- do_snb_cstates = is_snb(family, model);
- do_c8_c9_c10 = has_c8_c9_c10(family, model);
+ if (debug)
+ fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
+ has_aperf ? "" : "No ",
+ do_dts ? "" : "No ",
+ do_ptm ? "" : "No ",
+ has_epb ? "" : "No ");
+
+ do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
+ do_snb_cstates = has_snb_msrs(family, model);
+ do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
+ do_pc3 = (pkg_cstate_limit >= PCL__3);
+ do_pc6 = (pkg_cstate_limit >= PCL__6);
+ do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
+ do_c8_c9_c10 = has_hsw_msrs(family, model);
do_slm_cstates = is_slm(family, model);
bclk = discover_bclk(family, model);
- do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
+ do_nhm_turbo_ratio_limit = do_nhm_platform_info && has_nhm_turbo_ratio_limit(family, model);
do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model);
rapl_probe(family, model);
+ perf_limit_reasons_probe(family, model);
return;
}
-void usage()
+void help()
{
- errx(1, "%s: [-v][-R][-T][-p|-P|-S][-c MSR#][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n",
- progname);
+ fprintf(stderr,
+ "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
+ "\n"
+ "Turbostat forks the specified COMMAND and prints statistics\n"
+ "when COMMAND completes.\n"
+ "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
+ "to print statistics, until interrupted.\n"
+ "--debug run in \"debug\" mode\n"
+ "--interval sec Override default 5-second measurement interval\n"
+ "--help print this help message\n"
+ "--counter msr print 32-bit counter at address \"msr\"\n"
+ "--Counter msr print 64-bit Counter at address \"msr\"\n"
+ "--msr msr print 32-bit value at address \"msr\"\n"
+ "--MSR msr print 64-bit Value at address \"msr\"\n"
+ "--version print version information\n"
+ "\n"
+ "For more help, run \"man turbostat\"\n");
}
@@ -2121,7 +2370,7 @@ void topology_probe()
if (!summary_only && topo.num_cpus > 1)
show_cpu = 1;
- if (verbose > 1)
+ if (debug > 1)
fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
@@ -2156,7 +2405,7 @@ void topology_probe()
int siblings;
if (cpu_is_not_present(i)) {
- if (verbose > 1)
+ if (debug > 1)
fprintf(stderr, "cpu%d NOT PRESENT\n", i);
continue;
}
@@ -2171,26 +2420,26 @@ void topology_probe()
siblings = get_num_ht_siblings(i);
if (siblings > max_siblings)
max_siblings = siblings;
- if (verbose > 1)
+ if (debug > 1)
fprintf(stderr, "cpu %d pkg %d core %d\n",
i, cpus[i].physical_package_id, cpus[i].core_id);
}
topo.num_cores_per_pkg = max_core_id + 1;
- if (verbose > 1)
+ if (debug > 1)
fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
max_core_id, topo.num_cores_per_pkg);
if (!summary_only && topo.num_cores_per_pkg > 1)
show_core = 1;
topo.num_packages = max_package_id + 1;
- if (verbose > 1)
+ if (debug > 1)
fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
max_package_id, topo.num_packages);
if (!summary_only && topo.num_packages > 1)
show_pkg = 1;
topo.num_threads_per_core = max_siblings;
- if (verbose > 1)
+ if (debug > 1)
fprintf(stderr, "max_siblings %d\n", max_siblings);
free(cpus);
@@ -2299,25 +2548,27 @@ void setup_all_buffers(void)
void turbostat_init()
{
- check_cpuid();
-
check_dev_msr();
- check_super_user();
+ check_permissions();
+ check_cpuid();
setup_all_buffers();
- if (verbose)
+ if (debug)
print_verbose_header();
- if (verbose)
+ if (debug)
for_all_cpus(print_epb, ODD_COUNTERS);
- if (verbose)
+ if (debug)
+ for_all_cpus(print_perf_limit, ODD_COUNTERS);
+
+ if (debug)
for_all_cpus(print_rapl, ODD_COUNTERS);
for_all_cpus(set_temperature_target, ODD_COUNTERS);
- if (verbose)
+ if (debug)
for_all_cpus(print_thermal, ODD_COUNTERS);
}
@@ -2382,56 +2633,82 @@ int get_and_dump_counters(void)
return status;
}
+void print_version() {
+ fprintf(stderr, "turbostat version 4.1 10-Feb, 2015"
+ " - Len Brown <lenb@kernel.org>\n");
+}
+
void cmdline(int argc, char **argv)
{
int opt;
+ int option_index = 0;
+ static struct option long_options[] = {
+ {"Counter", required_argument, 0, 'C'},
+ {"counter", required_argument, 0, 'c'},
+ {"Dump", no_argument, 0, 'D'},
+ {"debug", no_argument, 0, 'd'},
+ {"interval", required_argument, 0, 'i'},
+ {"help", no_argument, 0, 'h'},
+ {"Joules", no_argument, 0, 'J'},
+ {"MSR", required_argument, 0, 'M'},
+ {"msr", required_argument, 0, 'm'},
+ {"Package", no_argument, 0, 'p'},
+ {"processor", no_argument, 0, 'p'},
+ {"Summary", no_argument, 0, 'S'},
+ {"TCC", required_argument, 0, 'T'},
+ {"version", no_argument, 0, 'v' },
+ {0, 0, 0, 0 }
+ };
progname = argv[0];
- while ((opt = getopt(argc, argv, "+pPsSvi:c:C:m:M:RJT:")) != -1) {
+ while ((opt = getopt_long_only(argc, argv, "C:c:Ddhi:JM:m:PpST:v",
+ long_options, &option_index)) != -1) {
switch (opt) {
- case 'p':
- show_core_only++;
+ case 'C':
+ sscanf(optarg, "%x", &extra_delta_offset64);
break;
- case 'P':
- show_pkg_only++;
+ case 'c':
+ sscanf(optarg, "%x", &extra_delta_offset32);
break;
- case 's':
+ case 'D':
dump_only++;
break;
- case 'S':
- summary_only++;
- break;
- case 'v':
- verbose++;
+ case 'd':
+ debug++;
break;
+ case 'h':
+ default:
+ help();
+ exit(1);
case 'i':
interval_sec = atoi(optarg);
break;
- case 'c':
- sscanf(optarg, "%x", &extra_delta_offset32);
+ case 'J':
+ rapl_joules++;
break;
- case 'C':
- sscanf(optarg, "%x", &extra_delta_offset64);
+ case 'M':
+ sscanf(optarg, "%x", &extra_msr_offset64);
break;
case 'm':
sscanf(optarg, "%x", &extra_msr_offset32);
break;
- case 'M':
- sscanf(optarg, "%x", &extra_msr_offset64);
+ case 'P':
+ show_pkg_only++;
break;
- case 'R':
- rapl_verbose++;
+ case 'p':
+ show_core_only++;
+ break;
+ case 'S':
+ summary_only++;
break;
case 'T':
tcc_activation_temp_override = atoi(optarg);
break;
- case 'J':
- rapl_joules++;
+ case 'v':
+ print_version();
+ exit(0);
break;
-
- default:
- usage();
}
}
}
@@ -2440,9 +2717,8 @@ int main(int argc, char **argv)
{
cmdline(argc, argv);
- if (verbose)
- fprintf(stderr, "turbostat v3.7 Feb 6, 2014"
- " - Len Brown <lenb@kernel.org>\n");
+ if (debug)
+ print_version();
turbostat_init();
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index b9cd036f0442..d08e214ec6e7 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -178,6 +178,7 @@ my $checkout;
my $localversion;
my $iteration = 0;
my $successes = 0;
+my $stty_orig;
my $bisect_good;
my $bisect_bad;
@@ -197,6 +198,11 @@ my $patchcheck_start;
my $patchcheck_cherry;
my $patchcheck_end;
+my $build_time;
+my $install_time;
+my $reboot_time;
+my $test_time;
+
# set when a test is something other that just building or install
# which would require more options.
my $buildonly = 1;
@@ -554,6 +560,66 @@ sub get_mandatory_config {
}
}
+sub show_time {
+ my ($time) = @_;
+
+ my $hours = 0;
+ my $minutes = 0;
+
+ if ($time > 3600) {
+ $hours = int($time / 3600);
+ $time -= $hours * 3600;
+ }
+ if ($time > 60) {
+ $minutes = int($time / 60);
+ $time -= $minutes * 60;
+ }
+
+ if ($hours > 0) {
+ doprint "$hours hour";
+ doprint "s" if ($hours > 1);
+ doprint " ";
+ }
+
+ if ($minutes > 0) {
+ doprint "$minutes minute";
+ doprint "s" if ($minutes > 1);
+ doprint " ";
+ }
+
+ doprint "$time second";
+ doprint "s" if ($time != 1);
+}
+
+sub print_times {
+ doprint "\n";
+ if ($build_time) {
+ doprint "Build time: ";
+ show_time($build_time);
+ doprint "\n";
+ }
+ if ($install_time) {
+ doprint "Install time: ";
+ show_time($install_time);
+ doprint "\n";
+ }
+ if ($reboot_time) {
+ doprint "Reboot time: ";
+ show_time($reboot_time);
+ doprint "\n";
+ }
+ if ($test_time) {
+ doprint "Test time: ";
+ show_time($test_time);
+ doprint "\n";
+ }
+ # reset for iterations like bisect
+ $build_time = 0;
+ $install_time = 0;
+ $reboot_time = 0;
+ $test_time = 0;
+}
+
sub get_mandatory_configs {
get_mandatory_config("MACHINE");
get_mandatory_config("BUILD_DIR");
@@ -1341,23 +1407,83 @@ sub dodie {
print " See $opt{LOG_FILE} for more info.\n";
}
+ if ($monitor_cnt) {
+ # restore terminal settings
+ system("stty $stty_orig");
+ }
+
die @_, "\n";
}
+sub create_pty {
+ my ($ptm, $pts) = @_;
+ my $tmp;
+ my $TIOCSPTLCK = 0x40045431;
+ my $TIOCGPTN = 0x80045430;
+
+ sysopen($ptm, "/dev/ptmx", O_RDWR | O_NONBLOCK) or
+ dodie "Cant open /dev/ptmx";
+
+ # unlockpt()
+ $tmp = pack("i", 0);
+ ioctl($ptm, $TIOCSPTLCK, $tmp) or
+ dodie "ioctl TIOCSPTLCK for /dev/ptmx failed";
+
+ # ptsname()
+ ioctl($ptm, $TIOCGPTN, $tmp) or
+ dodie "ioctl TIOCGPTN for /dev/ptmx failed";
+ $tmp = unpack("i", $tmp);
+
+ sysopen($pts, "/dev/pts/$tmp", O_RDWR | O_NONBLOCK) or
+ dodie "Can't open /dev/pts/$tmp";
+}
+
+sub exec_console {
+ my ($ptm, $pts) = @_;
+
+ close($ptm);
+
+ close(\*STDIN);
+ close(\*STDOUT);
+ close(\*STDERR);
+
+ open(\*STDIN, '<&', $pts);
+ open(\*STDOUT, '>&', $pts);
+ open(\*STDERR, '>&', $pts);
+
+ close($pts);
+
+ exec $console or
+ die "Can't open console $console";
+}
+
sub open_console {
- my ($fp) = @_;
+ my ($ptm) = @_;
+ my $pts = \*PTSFD;
+ my $pid;
- my $flags;
+ # save terminal settings
+ $stty_orig = `stty -g`;
- my $pid = open($fp, "$console|") or
- dodie "Can't open console $console";
+ # place terminal in cbreak mode so that stdin can be read one character at
+ # a time without having to wait for a newline
+ system("stty -icanon -echo -icrnl");
- $flags = fcntl($fp, F_GETFL, 0) or
- dodie "Can't get flags for the socket: $!";
- $flags = fcntl($fp, F_SETFL, $flags | O_NONBLOCK) or
- dodie "Can't set flags for the socket: $!";
+ create_pty($ptm, $pts);
+
+ $pid = fork;
+
+ if (!$pid) {
+ # child
+ exec_console($ptm, $pts)
+ }
+
+ # parent
+ close($pts);
return $pid;
+
+ open(PTSFD, "Stop perl from warning about single use of PTSFD");
}
sub close_console {
@@ -1368,6 +1494,9 @@ sub close_console {
print "closing!\n";
close($fp);
+
+ # restore terminal settings
+ system("stty $stty_orig");
}
sub start_monitor {
@@ -1519,6 +1648,8 @@ sub fail {
$name = " ($test_name)";
}
+ print_times;
+
doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n";
@@ -1534,10 +1665,14 @@ sub fail {
sub run_command {
my ($command, $redirect) = @_;
+ my $start_time;
+ my $end_time;
my $dolog = 0;
my $dord = 0;
my $pid;
+ $start_time = time;
+
$command =~ s/\$SSH_USER/$ssh_user/g;
$command =~ s/\$MACHINE/$machine/g;
@@ -1570,6 +1705,15 @@ sub run_command {
close(LOG) if ($dolog);
close(RD) if ($dord);
+ $end_time = time;
+ my $delta = $end_time - $start_time;
+
+ if ($delta == 1) {
+ doprint "[1 second] ";
+ } else {
+ doprint "[$delta seconds] ";
+ }
+
if ($failed) {
doprint "FAILED!\n";
} else {
@@ -1694,7 +1838,9 @@ sub wait_for_input
{
my ($fp, $time) = @_;
my $rin;
- my $ready;
+ my $rout;
+ my $nr;
+ my $buf;
my $line;
my $ch;
@@ -1704,21 +1850,36 @@ sub wait_for_input
$rin = '';
vec($rin, fileno($fp), 1) = 1;
- ($ready, $time) = select($rin, undef, undef, $time);
+ vec($rin, fileno(\*STDIN), 1) = 1;
- $line = "";
+ while (1) {
+ $nr = select($rout=$rin, undef, undef, $time);
- # try to read one char at a time
- while (sysread $fp, $ch, 1) {
- $line .= $ch;
- last if ($ch eq "\n");
- }
+ if ($nr <= 0) {
+ return undef;
+ }
- if (!length($line)) {
- return undef;
- }
+ # copy data from stdin to the console
+ if (vec($rout, fileno(\*STDIN), 1) == 1) {
+ sysread(\*STDIN, $buf, 1000);
+ syswrite($fp, $buf, 1000);
+ next;
+ }
- return $line;
+ $line = "";
+
+ # try to read one char at a time
+ while (sysread $fp, $ch, 1) {
+ $line .= $ch;
+ last if ($ch eq "\n");
+ }
+
+ if (!length($line)) {
+ return undef;
+ }
+
+ return $line;
+ }
}
sub reboot_to {
@@ -1766,6 +1927,8 @@ sub monitor {
my $skip_call_trace = 0;
my $loops;
+ my $start_time = time;
+
wait_for_monitor 5;
my $line;
@@ -1890,6 +2053,9 @@ sub monitor {
}
}
+ my $end_time = time;
+ $reboot_time = $end_time - $start_time;
+
close(DMESG);
if ($bug) {
@@ -1938,6 +2104,8 @@ sub install {
return if ($no_install);
+ my $start_time = time;
+
if (defined($pre_install)) {
my $cp_pre_install = eval_kernel_version $pre_install;
run_command "$cp_pre_install" or
@@ -1969,6 +2137,8 @@ sub install {
if (!$install_mods) {
do_post_install;
doprint "No modules needed\n";
+ my $end_time = time;
+ $install_time = $end_time - $start_time;
return;
}
@@ -1996,6 +2166,9 @@ sub install {
run_ssh "rm -f /tmp/$modtar";
do_post_install;
+
+ my $end_time = time;
+ $install_time = $end_time - $start_time;
}
sub get_version {
@@ -2008,7 +2181,7 @@ sub get_version {
$have_version = 1;
}
-sub start_monitor_and_boot {
+sub start_monitor_and_install {
# Make sure the stable kernel has finished booting
# Install bisects, don't need console
@@ -2208,6 +2381,8 @@ sub build {
unlink $buildlog;
+ my $start_time = time;
+
# Failed builds should not reboot the target
my $save_no_reboot = $no_reboot;
$no_reboot = 1;
@@ -2293,6 +2468,9 @@ sub build {
$no_reboot = $save_no_reboot;
+ my $end_time = time;
+ $build_time = $end_time - $start_time;
+
return 1;
}
@@ -2323,6 +2501,8 @@ sub success {
$name = " ($test_name)";
}
+ print_times;
+
doprint "\n\n*******************************************\n";
doprint "*******************************************\n";
doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n";
@@ -2383,6 +2563,8 @@ sub do_run_test {
my $bug = 0;
my $bug_ignored = 0;
+ my $start_time = time;
+
wait_for_monitor 1;
doprint "run test $run_test\n";
@@ -2449,6 +2631,9 @@ sub do_run_test {
waitpid $child_pid, 0;
$child_exit = $?;
+ my $end_time = time;
+ $test_time = $end_time - $start_time;
+
if (!$bug && $in_bisect) {
if (defined($bisect_ret_good)) {
if ($child_exit == $bisect_ret_good) {
@@ -2549,7 +2734,7 @@ sub run_bisect_test {
dodie "Failed on build" if $failed;
# Now boot the box
- start_monitor_and_boot or $failed = 1;
+ start_monitor_and_install or $failed = 1;
if ($type ne "boot") {
if ($failed && $bisect_skip) {
@@ -2755,6 +2940,7 @@ sub bisect {
do {
$result = run_bisect $type;
$test = run_git_bisect "git bisect $result";
+ print_times;
} while ($test);
run_command "git bisect log" or
@@ -3168,6 +3354,7 @@ sub config_bisect {
do {
$ret = run_config_bisect \%good_configs, \%bad_configs;
+ print_times;
} while (!$ret);
return $ret if ($ret < 0);
@@ -3260,7 +3447,7 @@ sub patchcheck {
my $sha1 = $item;
$sha1 =~ s/^([[:xdigit:]]+).*/$1/;
- doprint "\nProcessing commit $item\n\n";
+ doprint "\nProcessing commit \"$item\"\n\n";
run_command "git checkout $sha1" or
die "Failed to checkout $sha1";
@@ -3291,16 +3478,18 @@ sub patchcheck {
my $failed = 0;
- start_monitor_and_boot or $failed = 1;
+ start_monitor_and_install or $failed = 1;
if (!$failed && $type ne "boot"){
do_run_test or $failed = 1;
}
end_monitor;
- return 0 if ($failed);
-
+ if ($failed) {
+ print_times;
+ return 0;
+ }
patchcheck_reboot;
-
+ print_times;
}
$in_patchcheck = 0;
success $i;
@@ -3753,7 +3942,7 @@ sub make_min_config {
my $failed = 0;
build "oldconfig" or $failed = 1;
if (!$failed) {
- start_monitor_and_boot or $failed = 1;
+ start_monitor_and_install or $failed = 1;
if ($type eq "test" && !$failed) {
do_run_test or $failed = 1;
@@ -4000,6 +4189,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$iteration = $i;
+ $build_time = 0;
+ $install_time = 0;
+ $reboot_time = 0;
+ $test_time = 0;
+
undef %force_config;
my $makecmd = set_test_option("MAKE_CMD", $i);
@@ -4157,15 +4351,20 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
if ($test_type ne "build") {
my $failed = 0;
- start_monitor_and_boot or $failed = 1;
+ start_monitor_and_install or $failed = 1;
if (!$failed && $test_type ne "boot" && defined($run_test)) {
do_run_test or $failed = 1;
}
end_monitor;
- next if ($failed);
+ if ($failed) {
+ print_times;
+ next;
+ }
}
+ print_times;
+
success $i;
}
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index d273624c93a6..e238c9559caf 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags,
}
static int check_execveat_invoked_rc(int fd, const char *path, int flags,
- int expected_rc)
+ int expected_rc, int expected_rc2)
{
int status;
int rc;
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
child, status);
return 1;
}
- if (WEXITSTATUS(status) != expected_rc) {
- printf("[FAIL] (child %d exited with %d not %d)\n",
- child, WEXITSTATUS(status), expected_rc);
+ if ((WEXITSTATUS(status) != expected_rc) &&
+ (WEXITSTATUS(status) != expected_rc2)) {
+ printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
+ child, WEXITSTATUS(status), expected_rc, expected_rc2);
return 1;
}
printf("[OK]\n");
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
static int check_execveat(int fd, const char *path, int flags)
{
- return check_execveat_invoked_rc(fd, path, flags, 99);
+ return check_execveat_invoked_rc(fd, path, flags, 99, 99);
}
static char *concat(const char *left, const char *right)
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
* Execute as a long pathname relative to ".". If this is a script,
* the interpreter will launch but fail to open the script because its
* name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
+ *
+ * The failure code is usually 127 (POSIX: "If a command is not found,
+ * the exit status shall be 127."), but some systems give 126 (POSIX:
+ * "If the command name is found, but it is not an executable utility,
+ * the exit status shall be 126."), so allow either.
*/
if (is_script)
- fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127);
+ fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
+ 127, 126);
else
fail += check_execveat(dot_dfd, longpath, 0);
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index 94dae65eea41..8519e9ee97e3 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -536,10 +536,9 @@ int main(int argc, char *argv[])
{
struct mq_attr attr;
char *option, *next_option;
- int i, cpu;
+ int i, cpu, rc;
struct sigaction sa;
poptContext popt_context;
- char rc;
void *retval;
main_thread = pthread_self();
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index f6ff90a76bd7..1d5e7ad2c460 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -13,7 +13,7 @@ CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CUR
export CC CFLAGS
-TARGETS = pmu copyloops mm tm primitives
+TARGETS = pmu copyloops mm tm primitives stringloops
endif
diff --git a/tools/testing/selftests/powerpc/copyloops/.gitignore b/tools/testing/selftests/powerpc/copyloops/.gitignore
new file mode 100644
index 000000000000..25a192f62c4d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/copyloops/.gitignore
@@ -0,0 +1,4 @@
+copyuser_64
+copyuser_power7
+memcpy_64
+memcpy_power7
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
new file mode 100644
index 000000000000..b43ade0ec861
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -0,0 +1,3 @@
+hugetlb_vs_thp_test
+subpage_prot
+tempfile
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 357ccbd6bad9..a14c538dd7f8 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -1,9 +1,9 @@
noarg:
$(MAKE) -C ../
-PROGS := hugetlb_vs_thp_test
+PROGS := hugetlb_vs_thp_test subpage_prot
-all: $(PROGS)
+all: $(PROGS) tempfile
$(PROGS): ../harness.c
@@ -12,7 +12,10 @@ run_tests: all
./$$PROG; \
done;
+tempfile:
+ dd if=/dev/zero of=tempfile bs=64k count=1
+
clean:
- rm -f $(PROGS)
+ rm -f $(PROGS) tempfile
.PHONY: all run_tests clean
diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
new file mode 100644
index 000000000000..440180ff8089
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/ptrace.h>
+#include <sys/syscall.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+char *file_name;
+
+int in_test;
+volatile int faulted;
+volatile void *dar;
+int errors;
+
+static void segv(int signum, siginfo_t *info, void *ctxt_v)
+{
+ ucontext_t *ctxt = (ucontext_t *)ctxt_v;
+ struct pt_regs *regs = ctxt->uc_mcontext.regs;
+
+ if (!in_test) {
+ fprintf(stderr, "Segfault outside of test !\n");
+ exit(1);
+ }
+
+ faulted = 1;
+ dar = (void *)regs->dar;
+ regs->nip += 4;
+}
+
+static inline void do_read(const volatile void *addr)
+{
+ int ret;
+
+ asm volatile("lwz %0,0(%1); twi 0,%0,0; isync;\n"
+ : "=r" (ret) : "r" (addr) : "memory");
+}
+
+static inline void do_write(const volatile void *addr)
+{
+ int val = 0x1234567;
+
+ asm volatile("stw %0,0(%1); sync; \n"
+ : : "r" (val), "r" (addr) : "memory");
+}
+
+static inline void check_faulted(void *addr, long page, long subpage, int write)
+{
+ int want_fault = (subpage == ((page + 3) % 16));
+
+ if (write)
+ want_fault |= (subpage == ((page + 1) % 16));
+
+ if (faulted != want_fault) {
+ printf("Failed at 0x%p (p=%ld,sp=%ld,w=%d), want=%s, got=%s !\n",
+ addr, page, subpage, write,
+ want_fault ? "fault" : "pass",
+ faulted ? "fault" : "pass");
+ ++errors;
+ }
+
+ if (faulted) {
+ if (dar != addr) {
+ printf("Fault expected at 0x%p and happened at 0x%p !\n",
+ addr, dar);
+ }
+ faulted = 0;
+ asm volatile("sync" : : : "memory");
+ }
+}
+
+static int run_test(void *addr, unsigned long size)
+{
+ unsigned int *map;
+ long i, j, pages, err;
+
+ pages = size / 0x10000;
+ map = malloc(pages * 4);
+ assert(map);
+
+ /*
+ * for each page, mark subpage i % 16 read only and subpage
+ * (i + 3) % 16 inaccessible
+ */
+ for (i = 0; i < pages; i++) {
+ map[i] = (0x40000000 >> (((i + 1) * 2) % 32)) |
+ (0xc0000000 >> (((i + 3) * 2) % 32));
+ }
+
+ err = syscall(__NR_subpage_prot, addr, size, map);
+ if (err) {
+ perror("subpage_perm");
+ return 1;
+ }
+ free(map);
+
+ in_test = 1;
+ errors = 0;
+ for (i = 0; i < pages; i++) {
+ for (j = 0; j < 16; j++, addr += 0x1000) {
+ do_read(addr);
+ check_faulted(addr, i, j, 0);
+ do_write(addr);
+ check_faulted(addr, i, j, 1);
+ }
+ }
+
+ in_test = 0;
+ if (errors) {
+ printf("%d errors detected\n", errors);
+ return 1;
+ }
+
+ return 0;
+}
+
+int test_anon(void)
+{
+ unsigned long align;
+ struct sigaction act = {
+ .sa_sigaction = segv,
+ .sa_flags = SA_SIGINFO
+ };
+ void *mallocblock;
+ unsigned long mallocsize;
+
+ if (getpagesize() != 0x10000) {
+ fprintf(stderr, "Kernel page size must be 64K!\n");
+ return 1;
+ }
+
+ sigaction(SIGSEGV, &act, NULL);
+
+ mallocsize = 4 * 16 * 1024 * 1024;
+
+ FAIL_IF(posix_memalign(&mallocblock, 64 * 1024, mallocsize));
+
+ align = (unsigned long)mallocblock;
+ if (align & 0xffff)
+ align = (align | 0xffff) + 1;
+
+ mallocblock = (void *)align;
+
+ printf("allocated malloc block of 0x%lx bytes at 0x%p\n",
+ mallocsize, mallocblock);
+
+ printf("testing malloc block...\n");
+
+ return run_test(mallocblock, mallocsize);
+}
+
+int test_file(void)
+{
+ struct sigaction act = {
+ .sa_sigaction = segv,
+ .sa_flags = SA_SIGINFO
+ };
+ void *fileblock;
+ off_t filesize;
+ int fd;
+
+ fd = open(file_name, O_RDWR);
+ if (fd == -1) {
+ perror("failed to open file");
+ return 1;
+ }
+ sigaction(SIGSEGV, &act, NULL);
+
+ filesize = lseek(fd, 0, SEEK_END);
+ if (filesize & 0xffff)
+ filesize &= ~0xfffful;
+
+ fileblock = mmap(NULL, filesize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (fileblock == MAP_FAILED) {
+ perror("failed to map file");
+ return 1;
+ }
+ printf("allocated %s for 0x%lx bytes at 0x%p\n",
+ file_name, filesize, fileblock);
+
+ printf("testing file map...\n");
+
+ return run_test(fileblock, filesize);
+}
+
+int main(int argc, char *argv[])
+{
+ test_harness(test_anon, "subpage_prot_anon");
+
+ if (argc > 1)
+ file_name = argv[1];
+ else
+ file_name = "tempfile";
+
+ test_harness(test_file, "subpage_prot_file");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/pmu/.gitignore b/tools/testing/selftests/powerpc/pmu/.gitignore
new file mode 100644
index 000000000000..e748f336eed3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/.gitignore
@@ -0,0 +1,3 @@
+count_instructions
+l3_bank_test
+per_event_excludes
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
new file mode 100644
index 000000000000..42bddbed8b64
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -0,0 +1,22 @@
+reg_access_test
+event_attributes_test
+cycles_test
+cycles_with_freeze_test
+pmc56_overflow_test
+ebb_vs_cpu_event_test
+cpu_event_vs_ebb_test
+cpu_event_pinned_vs_ebb_test
+task_event_vs_ebb_test
+task_event_pinned_vs_ebb_test
+multi_ebb_procs_test
+multi_counter_test
+pmae_handling_test
+close_clears_pmcc_test
+instruction_count_test
+fork_cleanup_test
+ebb_on_child_test
+ebb_on_willing_child_test
+back_to_back_ebbs_test
+lost_exception_test
+no_handler_test
+cycles_with_mmcr2_test
diff --git a/tools/testing/selftests/powerpc/primitives/.gitignore b/tools/testing/selftests/powerpc/primitives/.gitignore
new file mode 100644
index 000000000000..4cc4e31bed1d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/.gitignore
@@ -0,0 +1 @@
+load_unaligned_zeropad
diff --git a/tools/testing/selftests/powerpc/stringloops/.gitignore b/tools/testing/selftests/powerpc/stringloops/.gitignore
new file mode 100644
index 000000000000..0b43da74ee46
--- /dev/null
+++ b/tools/testing/selftests/powerpc/stringloops/.gitignore
@@ -0,0 +1 @@
+memcmp
diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile
new file mode 100644
index 000000000000..506d77346477
--- /dev/null
+++ b/tools/testing/selftests/powerpc/stringloops/Makefile
@@ -0,0 +1,20 @@
+# The loops are all 64-bit code
+CFLAGS += -m64
+CFLAGS += -I$(CURDIR)
+
+PROGS := memcmp
+EXTRA_SOURCES := memcmp_64.S ../harness.c
+
+all: $(PROGS)
+
+$(PROGS): $(EXTRA_SOURCES)
+
+run_tests: all
+ @-for PROG in $(PROGS); do \
+ ./$$PROG; \
+ done;
+
+clean:
+ rm -f $(PROGS) *.o
+
+.PHONY: all run_tests clean
diff --git a/tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h
new file mode 100644
index 000000000000..11bece87e880
--- /dev/null
+++ b/tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h
@@ -0,0 +1,7 @@
+#include <ppc-asm.h>
+
+#ifndef r1
+#define r1 sp
+#endif
+
+#define _GLOBAL(A) FUNC_START(test_ ## A)
diff --git a/tools/testing/selftests/powerpc/stringloops/memcmp.c b/tools/testing/selftests/powerpc/stringloops/memcmp.c
new file mode 100644
index 000000000000..17417dd70708
--- /dev/null
+++ b/tools/testing/selftests/powerpc/stringloops/memcmp.c
@@ -0,0 +1,103 @@
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+#include "../utils.h"
+
+#define SIZE 256
+#define ITERATIONS 10000
+
+int test_memcmp(const void *s1, const void *s2, size_t n);
+
+/* test all offsets and lengths */
+static void test_one(char *s1, char *s2)
+{
+ unsigned long offset, size;
+
+ for (offset = 0; offset < SIZE; offset++) {
+ for (size = 0; size < (SIZE-offset); size++) {
+ int x, y;
+ unsigned long i;
+
+ y = memcmp(s1+offset, s2+offset, size);
+ x = test_memcmp(s1+offset, s2+offset, size);
+
+ if (((x ^ y) < 0) && /* Trick to compare sign */
+ ((x | y) != 0)) { /* check for zero */
+ printf("memcmp returned %d, should have returned %d (offset %ld size %ld)\n", x, y, offset, size);
+
+ for (i = offset; i < offset+size; i++)
+ printf("%02x ", s1[i]);
+ printf("\n");
+
+ for (i = offset; i < offset+size; i++)
+ printf("%02x ", s2[i]);
+ printf("\n");
+ abort();
+ }
+ }
+ }
+}
+
+static int testcase(void)
+{
+ char *s1;
+ char *s2;
+ unsigned long i;
+
+ s1 = memalign(128, SIZE);
+ if (!s1) {
+ perror("memalign");
+ exit(1);
+ }
+
+ s2 = memalign(128, SIZE);
+ if (!s2) {
+ perror("memalign");
+ exit(1);
+ }
+
+ srandom(1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ unsigned long j;
+ unsigned long change;
+
+ for (j = 0; j < SIZE; j++)
+ s1[j] = random();
+
+ memcpy(s2, s1, SIZE);
+
+ /* change one byte */
+ change = random() % SIZE;
+ s2[change] = random() & 0xff;
+
+ test_one(s1, s2);
+ }
+
+ srandom(1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ unsigned long j;
+ unsigned long change;
+
+ for (j = 0; j < SIZE; j++)
+ s1[j] = random();
+
+ memcpy(s2, s1, SIZE);
+
+ /* change multiple bytes, 1/8 of total */
+ for (j = 0; j < SIZE / 8; j++) {
+ change = random() % SIZE;
+ s2[change] = random() & 0xff;
+ }
+
+ test_one(s1, s2);
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(testcase, "memcmp");
+}
diff --git a/tools/testing/selftests/powerpc/stringloops/memcmp_64.S b/tools/testing/selftests/powerpc/stringloops/memcmp_64.S
new file mode 120000
index 000000000000..9bc87e438ae9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/stringloops/memcmp_64.S
@@ -0,0 +1 @@
+../../../../../arch/powerpc/lib/memcmp_64.S \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
new file mode 100644
index 000000000000..33d02cc54a3e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -0,0 +1 @@
+tm-resched-dscr
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
index abe14b7f36e9..bb99cde3f5f9 100755
--- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh
+++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
@@ -24,7 +24,7 @@
ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
idlecpus=`mpstat | tail -1 | \
- awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'`
+ awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
BEGIN {
cpus2use = idlecpus;
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index d6cc07fc137f..559e01ac86be 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -30,6 +30,7 @@ else
echo Unreadable results directory: $i
exit 1
fi
+. tools/testing/selftests/rcutorture/bin/functions.sh
configfile=`echo $i | sed -e 's/^.*\///'`
ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
@@ -48,4 +49,21 @@ else
title="$title ($ngpsps per second)"
fi
echo $title
+ nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
+ if test -z "$nclosecalls"
+ then
+ exit 0
+ fi
+ if test "$nclosecalls" -eq 0
+ then
+ exit 0
+ fi
+ # Compute number of close calls per tenth of an hour
+ nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null`
+ if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1
+ then
+ print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
+ else
+ print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
+ fi
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 8ca9f21f2efc..5236e073919d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -8,9 +8,9 @@
#
# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
#
-# qemu-args defaults to "-nographic", along with arguments specifying the
-# number of CPUs and other options generated from
-# the underlying CPU architecture.
+# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
+# arguments specifying the number of CPUs and other
+# options generated from the underlying CPU architecture.
# boot_args defaults to value returned by the per_version_boot_params
# shell function.
#
@@ -138,7 +138,7 @@ then
fi
# Generate -smp qemu argument.
-qemu_args="-nographic $qemu_args"
+qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args"
cpu_count=`configNR_CPUS.sh $config_template`
cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
vcpus=`identify_qemu_vcpus`
@@ -168,6 +168,7 @@ then
touch $resdir/buildonly
exit 0
fi
+echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
qemu_pid=$!
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index 499d1e598e42..a6b57622c2e5 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -26,12 +26,15 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-T=$1
+F=$1
title=$2
+T=/tmp/parse-build.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
. functions.sh
-if grep -q CC < $T
+if grep -q CC < $F
then
:
else
@@ -39,18 +42,21 @@ else
exit 1
fi
-if grep -q "error:" < $T
+if grep -q "error:" < $F
then
print_bug $title build errors:
- grep "error:" < $T
+ grep "error:" < $F
exit 2
fi
-exit 0
-if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
+grep warning: < $F > $T/warnings
+grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings
+grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings
+cat $T/hwarnings $T/cwarnings > $T/rcuwarnings
+if test -s $T/rcuwarnings
then
print_warning $title build errors:
- egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
+ cat $T/rcuwarnings
exit 2
fi
exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index f962ba4cf68b..d8f35cf116be 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file
then
print_warning Console output contains nul bytes, old qemu still running?
fi
-egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
+egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
if test -s $T
then
print_warning Assertion failure in $file $title
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 4c4b1f631ecf..077828c889f1 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress
all: $(BINARIES)
%: %.c
- $(CC) $(CFLAGS) -o $@ $^
+ $(CC) $(CFLAGS) -o $@ $^ -lrt
run_tests: all
@/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)
diff --git a/tools/usb/ffs-aio-example/multibuff/host_app/test.c b/tools/usb/ffs-aio-example/multibuff/host_app/test.c
index daa3abe6bebd..2cbcce6e8dd7 100644
--- a/tools/usb/ffs-aio-example/multibuff/host_app/test.c
+++ b/tools/usb/ffs-aio-example/multibuff/host_app/test.c
@@ -33,11 +33,6 @@
#define VENDOR 0x1d6b
#define PRODUCT 0x0105
-/* endpoints indexes */
-
-#define EP_BULK_IN (1 | LIBUSB_ENDPOINT_IN)
-#define EP_BULK_OUT (2 | LIBUSB_ENDPOINT_OUT)
-
#define BUF_LEN 8192
/*
@@ -159,14 +154,21 @@ void test_exit(struct test_state *state)
int main(void)
{
struct test_state state;
+ struct libusb_config_descriptor *conf;
+ struct libusb_interface_descriptor const *iface;
+ unsigned char addr;
if (test_init(&state))
return 1;
+ libusb_get_config_descriptor(state.found, 0, &conf);
+ iface = &conf->interface[0].altsetting[0];
+ addr = iface->endpoint[0].bEndpointAddress;
+
while (1) {
static unsigned char buffer[BUF_LEN];
int bytes;
- libusb_bulk_transfer(state.handle, EP_BULK_IN, buffer, BUF_LEN,
+ libusb_bulk_transfer(state.handle, addr, buffer, BUF_LEN,
&bytes, 500);
}
test_exit(&state);
diff --git a/tools/usb/ffs-aio-example/simple/device_app/aio_simple.c b/tools/usb/ffs-aio-example/simple/device_app/aio_simple.c
index adc310a6d489..1f44a29818bf 100644
--- a/tools/usb/ffs-aio-example/simple/device_app/aio_simple.c
+++ b/tools/usb/ffs-aio-example/simple/device_app/aio_simple.c
@@ -103,12 +103,14 @@ static const struct {
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 1 | USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = htole16(512),
},
.bulk_source = {
.bLength = sizeof(descriptors.hs_descs.bulk_source),
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 2 | USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = htole16(512),
},
},
};
diff --git a/tools/usb/ffs-aio-example/simple/host_app/test.c b/tools/usb/ffs-aio-example/simple/host_app/test.c
index acd6332811f3..aed86ffff280 100644
--- a/tools/usb/ffs-aio-example/simple/host_app/test.c
+++ b/tools/usb/ffs-aio-example/simple/host_app/test.c
@@ -33,11 +33,6 @@
#define VENDOR 0x1d6b
#define PRODUCT 0x0105
-/* endpoints indexes */
-
-#define EP_BULK_IN (1 | LIBUSB_ENDPOINT_IN)
-#define EP_BULK_OUT (2 | LIBUSB_ENDPOINT_OUT)
-
#define BUF_LEN 8192
/*
@@ -159,16 +154,24 @@ void test_exit(struct test_state *state)
int main(void)
{
struct test_state state;
+ struct libusb_config_descriptor *conf;
+ struct libusb_interface_descriptor const *iface;
+ unsigned char in_addr, out_addr;
if (test_init(&state))
return 1;
+ libusb_get_config_descriptor(state.found, 0, &conf);
+ iface = &conf->interface[0].altsetting[0];
+ in_addr = iface->endpoint[0].bEndpointAddress;
+ out_addr = iface->endpoint[1].bEndpointAddress;
+
while (1) {
static unsigned char buffer[BUF_LEN];
int bytes;
- libusb_bulk_transfer(state.handle, EP_BULK_IN, buffer, BUF_LEN,
+ libusb_bulk_transfer(state.handle, in_addr, buffer, BUF_LEN,
&bytes, 500);
- libusb_bulk_transfer(state.handle, EP_BULK_OUT, buffer, BUF_LEN,
+ libusb_bulk_transfer(state.handle, out_addr, buffer, BUF_LEN,
&bytes, 500);
}
test_exit(&state);
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 264fbc297e0b..8bdf16b8ba60 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -133,6 +133,7 @@ static const char * const page_flag_names[] = {
[KPF_KSM] = "x:ksm",
[KPF_THP] = "t:thp",
[KPF_BALLOON] = "o:balloon",
+ [KPF_ZERO_PAGE] = "z:zero_page",
[KPF_RESERVED] = "r:reserved",
[KPF_MLOCKED] = "m:mlocked",
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index fc0c5e603eb4..e2c876d5a03b 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -37,3 +37,13 @@ config HAVE_KVM_CPU_RELAX_INTERCEPT
config KVM_VFIO
bool
+
+config HAVE_KVM_ARCH_TLB_FLUSH_ALL
+ bool
+
+config KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ bool
+
+config KVM_COMPAT
+ def_bool y
+ depends on COMPAT && !S390
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 1c0772b340d8..6e54f3542126 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -152,7 +152,8 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
return;
}
- ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
+ ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
+ &timecounter->frac);
timer_arm(timer, ns);
}
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
new file mode 100644
index 000000000000..19c6210f02cf
--- /dev/null
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -0,0 +1,847 @@
+/*
+ * Contains GICv2 specific emulation code, was in vgic.c before.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+#define GICC_ARCH_VERSION_V2 0x2
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
+static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
+{
+ return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
+}
+
+static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+ u32 word_offset = offset & 3;
+
+ switch (offset & ~3) {
+ case 0: /* GICD_CTLR */
+ reg = vcpu->kvm->arch.vgic.enabled;
+ vgic_reg_access(mmio, &reg, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vcpu->kvm->arch.vgic.enabled = reg & 1;
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+ break;
+
+ case 4: /* GICD_TYPER */
+ reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
+ reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
+ vgic_reg_access(mmio, &reg, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+
+ case 8: /* GICD_IIDR */
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, &reg, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+}
+
+static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+}
+
+static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+#define GICD_ITARGETSR_SIZE 32
+#define GICD_CPUTARGETS_BITS 8
+#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
+static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+ u32 val = 0;
+
+ irq -= VGIC_NR_PRIVATE_IRQS;
+
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
+ val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
+
+ return val;
+}
+
+static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int i, c;
+ unsigned long *bmap;
+ u32 target;
+
+ irq -= VGIC_NR_PRIVATE_IRQS;
+
+ /*
+ * Pick the LSB in each byte. This ensures we target exactly
+ * one vcpu per IRQ. If the byte is null, assume we target
+ * CPU0.
+ */
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
+ int shift = i * GICD_CPUTARGETS_BITS;
+
+ target = ffs((val >> shift) & 0xffU);
+ target = target ? (target - 1) : 0;
+ dist->irq_spi_cpu[irq + i] = target;
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
+ if (c == target)
+ set_bit(irq + i, bmap);
+ else
+ clear_bit(irq + i, bmap);
+ }
+ }
+}
+
+static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+
+ /* We treat the banked interrupts targets as read-only */
+ if (offset < 32) {
+ u32 roreg;
+
+ roreg = 1 << vcpu->vcpu_id;
+ roreg |= roreg << 8;
+ roreg |= roreg << 16;
+
+ vgic_reg_access(mmio, &roreg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 *reg;
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vgic_dispatch_sgi(vcpu, reg);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+
+ return false;
+}
+
+/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
+static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int sgi;
+ int min_sgi = (offset & ~0x3);
+ int max_sgi = min_sgi + 3;
+ int vcpu_id = vcpu->vcpu_id;
+ u32 reg = 0;
+
+ /* Copy source SGIs from distributor side */
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+ u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
+
+ reg |= ((u32)sources) << (8 * (sgi - min_sgi));
+ }
+
+ mmio_data_write(mmio, ~0, reg);
+ return false;
+}
+
+static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, bool set)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int sgi;
+ int min_sgi = (offset & ~0x3);
+ int max_sgi = min_sgi + 3;
+ int vcpu_id = vcpu->vcpu_id;
+ u32 reg;
+ bool updated = false;
+
+ reg = mmio_data_read(mmio, ~0);
+
+ /* Clear pending SGIs on the distributor */
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+ u8 mask = reg >> (8 * (sgi - min_sgi));
+ u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
+
+ if (set) {
+ if ((*src & mask) != mask)
+ updated = true;
+ *src |= mask;
+ } else {
+ if (*src & mask)
+ updated = true;
+ *src &= ~mask;
+ }
+ }
+
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+
+ return updated;
+}
+
+static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (!mmio->is_write)
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+ else
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
+}
+
+static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (!mmio->is_write)
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+ else
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
+}
+
+static const struct kvm_mmio_range vgic_dist_ranges[] = {
+ {
+ .base = GIC_DIST_CTRL,
+ .len = 12,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_misc,
+ },
+ {
+ .base = GIC_DIST_IGROUP,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_ENABLE_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg,
+ },
+ {
+ .base = GIC_DIST_ENABLE_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg,
+ },
+ {
+ .base = GIC_DIST_PENDING_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg,
+ },
+ {
+ .base = GIC_DIST_PENDING_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg,
+ },
+ {
+ .base = GIC_DIST_ACTIVE_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_ACTIVE_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_PRI,
+ .len = VGIC_MAX_IRQS,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg,
+ },
+ {
+ .base = GIC_DIST_TARGET,
+ .len = VGIC_MAX_IRQS,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_target_reg,
+ },
+ {
+ .base = GIC_DIST_CONFIG,
+ .len = VGIC_MAX_IRQS / 4,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg,
+ },
+ {
+ .base = GIC_DIST_SOFTINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_sgi_reg,
+ },
+ {
+ .base = GIC_DIST_SGI_PENDING_CLEAR,
+ .len = VGIC_NR_SGIS,
+ .handle_mmio = handle_mmio_sgi_clear,
+ },
+ {
+ .base = GIC_DIST_SGI_PENDING_SET,
+ .len = VGIC_NR_SGIS,
+ .handle_mmio = handle_mmio_sgi_set,
+ },
+ {}
+};
+
+static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
+
+ if (!is_in_range(mmio->phys_addr, mmio->len, base,
+ KVM_VGIC_V2_DIST_SIZE))
+ return false;
+
+ /* GICv2 does not support accesses wider than 32 bits */
+ if (mmio->len > 4) {
+ kvm_inject_dabt(vcpu, mmio->phys_addr);
+ return true;
+ }
+
+ return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
+}
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int nrcpus = atomic_read(&kvm->online_vcpus);
+ u8 target_cpus;
+ int sgi, mode, c, vcpu_id;
+
+ vcpu_id = vcpu->vcpu_id;
+
+ sgi = reg & 0xf;
+ target_cpus = (reg >> 16) & 0xff;
+ mode = (reg >> 24) & 3;
+
+ switch (mode) {
+ case 0:
+ if (!target_cpus)
+ return;
+ break;
+
+ case 1:
+ target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
+ break;
+
+ case 2:
+ target_cpus = 1 << vcpu_id;
+ break;
+ }
+
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (target_cpus & 1) {
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set_pending(vcpu, sgi);
+ *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n",
+ sgi, vcpu_id, c);
+ }
+
+ target_cpus >>= 1;
+ }
+}
+
+static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long sources;
+ int vcpu_id = vcpu->vcpu_id;
+ int c;
+
+ sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
+
+ for_each_set_bit(c, &sources, dist->nr_cpus) {
+ if (vgic_queue_irq(vcpu, c, irq))
+ clear_bit(c, &sources);
+ }
+
+ *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
+
+ /*
+ * If the sources bitmap has been cleared it means that we
+ * could queue all the SGIs onto link registers (see the
+ * clear_bit above), and therefore we are done with them in
+ * our emulated gic and can get rid of them.
+ */
+ if (!sources) {
+ vgic_dist_irq_clear_pending(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, irq);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
+ * @kvm: pointer to the kvm struct
+ *
+ * Map the virtual CPU interface into the VM before running any VCPUs. We
+ * can't do this at creation time, because user space must first set the
+ * virtual CPU interface address in the guest physical address space.
+ */
+static int vgic_v2_map_resources(struct kvm *kvm,
+ const struct vgic_params *params)
+{
+ int ret = 0;
+
+ if (!irqchip_in_kernel(kvm))
+ return 0;
+
+ mutex_lock(&kvm->lock);
+
+ if (vgic_ready(kvm))
+ goto out;
+
+ if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
+ kvm_err("Need to set vgic cpu and dist addresses first\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * Initialize the vgic if this hasn't already been done on demand by
+ * accessing the vgic state from userspace.
+ */
+ ret = vgic_init(kvm);
+ if (ret) {
+ kvm_err("Unable to allocate maps\n");
+ goto out;
+ }
+
+ ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
+ params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
+ true);
+ if (ret) {
+ kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ goto out;
+ }
+
+ kvm->arch.vgic.ready = true;
+out:
+ if (ret)
+ kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
+}
+
+static int vgic_v2_init_model(struct kvm *kvm)
+{
+ int i;
+
+ for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
+ vgic_set_target_reg(kvm, 0, i);
+
+ return 0;
+}
+
+void vgic_v2_init_emulation(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
+ dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
+ dist->vm_ops.init_model = vgic_v2_init_model;
+ dist->vm_ops.map_resources = vgic_v2_map_resources;
+
+ kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
+}
+
+static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ bool updated = false;
+ struct vgic_vmcr vmcr;
+ u32 *vmcr_field;
+ u32 reg;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+
+ switch (offset & ~0x3) {
+ case GIC_CPU_CTRL:
+ vmcr_field = &vmcr.ctlr;
+ break;
+ case GIC_CPU_PRIMASK:
+ vmcr_field = &vmcr.pmr;
+ break;
+ case GIC_CPU_BINPOINT:
+ vmcr_field = &vmcr.bpr;
+ break;
+ case GIC_CPU_ALIAS_BINPOINT:
+ vmcr_field = &vmcr.abpr;
+ break;
+ default:
+ BUG();
+ }
+
+ if (!mmio->is_write) {
+ reg = *vmcr_field;
+ mmio_data_write(mmio, ~0, reg);
+ } else {
+ reg = mmio_data_read(mmio, ~0);
+ if (reg != *vmcr_field) {
+ *vmcr_field = reg;
+ vgic_set_vmcr(vcpu, &vmcr);
+ updated = true;
+ }
+ }
+ return updated;
+}
+
+static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
+}
+
+static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+
+ if (mmio->is_write)
+ return false;
+
+ /* GICC_IIDR */
+ reg = (PRODUCT_ID_KVM << 20) |
+ (GICC_ARCH_VERSION_V2 << 16) |
+ (IMPLEMENTER_ARM << 0);
+ mmio_data_write(mmio, ~0, reg);
+ return false;
+}
+
+/*
+ * CPU Interface Register accesses - these are not accessed by the VM, but by
+ * user space for saving and restoring VGIC state.
+ */
+static const struct kvm_mmio_range vgic_cpu_ranges[] = {
+ {
+ .base = GIC_CPU_CTRL,
+ .len = 12,
+ .handle_mmio = handle_cpu_mmio_misc,
+ },
+ {
+ .base = GIC_CPU_ALIAS_BINPOINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_abpr,
+ },
+ {
+ .base = GIC_CPU_ACTIVEPRIO,
+ .len = 16,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_CPU_IDENT,
+ .len = 4,
+ .handle_mmio = handle_cpu_mmio_ident,
+ },
+};
+
+static int vgic_attr_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ u32 *reg, bool is_write)
+{
+ const struct kvm_mmio_range *r = NULL, *ranges;
+ phys_addr_t offset;
+ int ret, cpuid, c;
+ struct kvm_vcpu *vcpu, *tmp_vcpu;
+ struct vgic_dist *vgic;
+ struct kvm_exit_mmio mmio;
+
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+ KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+ mutex_lock(&dev->kvm->lock);
+
+ ret = vgic_init(dev->kvm);
+ if (ret)
+ goto out;
+
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ vgic = &dev->kvm->arch.vgic;
+
+ mmio.len = 4;
+ mmio.is_write = is_write;
+ if (is_write)
+ mmio_data_write(&mmio, ~0, *reg);
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ mmio.phys_addr = vgic->vgic_dist_base + offset;
+ ranges = vgic_dist_ranges;
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ mmio.phys_addr = vgic->vgic_cpu_base + offset;
+ ranges = vgic_cpu_ranges;
+ break;
+ default:
+ BUG();
+ }
+ r = vgic_find_range(ranges, &mmio, offset);
+
+ if (unlikely(!r || !r->handle_mmio)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+
+ spin_lock(&vgic->lock);
+
+ /*
+ * Ensure that no other VCPU is running by checking the vcpu->cpu
+ * field. If no other VPCUs are running we can safely access the VGIC
+ * state, because even if another VPU is run after this point, that
+ * VCPU will not touch the vgic state, because it will block on
+ * getting the vgic->lock in kvm_vgic_sync_hwstate().
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+ if (unlikely(tmp_vcpu->cpu != -1)) {
+ ret = -EBUSY;
+ goto out_vgic_unlock;
+ }
+ }
+
+ /*
+ * Move all pending IRQs from the LRs on all VCPUs so the pending
+ * state can be properly represented in the register state accessible
+ * through this API.
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
+ vgic_unqueue_irqs(tmp_vcpu);
+
+ offset -= r->base;
+ r->handle_mmio(vcpu, &mmio, offset);
+
+ if (!is_write)
+ *reg = mmio_data_read(&mmio, ~0);
+
+ ret = 0;
+out_vgic_unlock:
+ spin_unlock(&vgic->lock);
+out:
+ mutex_unlock(&dev->kvm->lock);
+ return ret;
+}
+
+static int vgic_v2_create(struct kvm_device *dev, u32 type)
+{
+ return kvm_vgic_create(dev->kvm, type);
+}
+
+static void vgic_v2_destroy(struct kvm_device *dev)
+{
+ kfree(dev);
+}
+
+static int vgic_v2_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_set_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg;
+
+ if (get_user(reg, uaddr))
+ return -EFAULT;
+
+ return vgic_attr_regs_access(dev, attr, &reg, true);
+ }
+
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v2_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_get_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg = 0;
+
+ ret = vgic_attr_regs_access(dev, attr, &reg, false);
+ if (ret)
+ return ret;
+ return put_user(reg, uaddr);
+ }
+
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v2_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ phys_addr_t offset;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ return vgic_has_attr_regs(vgic_dist_ranges, offset);
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ return vgic_has_attr_regs(vgic_cpu_ranges, offset);
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ return 0;
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ return 0;
+ }
+ }
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v2_ops = {
+ .name = "kvm-arm-vgic-v2",
+ .create = vgic_v2_create,
+ .destroy = vgic_v2_destroy,
+ .set_attr = vgic_v2_set_attr,
+ .get_attr = vgic_v2_get_attr,
+ .has_attr = vgic_v2_has_attr,
+};
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 2935405ad22f..a0a7b5d1a070 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -229,12 +229,16 @@ int vgic_v2_probe(struct device_node *vgic_node,
goto out_unmap;
}
+ vgic->can_emulate_gicv2 = true;
+ kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2);
+
vgic->vcpu_base = vcpu_res.start;
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
vctrl_res.start, vgic->maint_irq);
vgic->type = VGIC_V2;
+ vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS;
*ops = &vgic_v2_ops;
*params = vgic;
goto out;
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
new file mode 100644
index 000000000000..b3f154631515
--- /dev/null
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -0,0 +1,1036 @@
+/*
+ * GICv3 distributor and redistributor emulation
+ *
+ * GICv3 emulation is currently only supported on a GICv3 host (because
+ * we rely on the hardware's CPU interface virtualization support), but
+ * supports both hardware with or without the optional GICv2 backwards
+ * compatibility features.
+ *
+ * Limitations of the emulation:
+ * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
+ * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
+ * - We do not support the message based interrupts (MBIs) triggered by
+ * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
+ * - We do not support the (optional) backwards compatibility feature.
+ * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
+ * the compatiblity feature, you can use a GICv2 in the guest, though.
+ * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
+ * - Priorities are not emulated (same as the GICv2 emulation). Linux
+ * as a guest is fine with this, because it does not use priorities.
+ * - We only support Group1 interrupts. Again Linux uses only those.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Andre Przywara <andre.przywara@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <kvm/arm_vgic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg = 0xffffffff;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg = 0;
+
+ /*
+ * Force ARE and DS to 1, the guest cannot change this.
+ * For the time being we only support Group1 interrupts.
+ */
+ if (vcpu->kvm->arch.vgic.enabled)
+ reg = GICD_CTLR_ENABLE_SS_G1;
+ reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ if (reg & GICD_CTLR_ENABLE_SS_G0)
+ kvm_info("guest tried to enable unsupported Group0 interrupts\n");
+ vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * As this implementation does not provide compatibility
+ * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
+ * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
+ * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
+ */
+#define INTERRUPT_ID_BITS 10
+static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+
+ reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
+
+ reg |= (INTERRUPT_ID_BITS - 1) << 19;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 *reg;
+
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 *reg;
+
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+/*
+ * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
+ * when we store the target MPIDR written by the guest.
+ */
+static u32 compress_mpidr(unsigned long mpidr)
+{
+ u32 ret;
+
+ ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
+
+ return ret;
+}
+
+static unsigned long uncompress_mpidr(u32 value)
+{
+ unsigned long mpidr;
+
+ mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
+ mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
+ mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
+ mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
+
+ return mpidr;
+}
+
+/*
+ * Lookup the given MPIDR value to get the vcpu_id (if there is one)
+ * and store that in the irq_spi_cpu[] array.
+ * This limits the number of VCPUs to 255 for now, extending the data
+ * type (or storing kvm_vcpu pointers) should lift the limit.
+ * Store the original MPIDR value in an extra array to support read-as-written.
+ * Unallocated MPIDRs are translated to a special value and caught
+ * before any array accesses.
+ */
+static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int spi;
+ u32 reg;
+ int vcpu_id;
+ unsigned long *bmap, mpidr;
+
+ /*
+ * The upper 32 bits of each 64 bit register are zero,
+ * as we don't support Aff3.
+ */
+ if ((offset & 4)) {
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ /* This region only covers SPIs, so no handling of private IRQs here. */
+ spi = offset / 8;
+
+ /* get the stored MPIDR for this IRQ */
+ mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
+ reg = mpidr;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+
+ if (!mmio->is_write)
+ return false;
+
+ /*
+ * Now clear the currently assigned vCPU from the map, making room
+ * for the new one to be written below
+ */
+ vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
+ if (likely(vcpu)) {
+ vcpu_id = vcpu->vcpu_id;
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
+ __clear_bit(spi, bmap);
+ }
+
+ dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
+ vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
+
+ /*
+ * The spec says that non-existent MPIDR values should not be
+ * forwarded to any existent (v)CPU, but should be able to become
+ * pending anyway. We simply keep the irq_spi_target[] array empty, so
+ * the interrupt will never be injected.
+ * irq_spi_cpu[irq] gets a magic value in this case.
+ */
+ if (likely(vcpu)) {
+ vcpu_id = vcpu->vcpu_id;
+ dist->irq_spi_cpu[spi] = vcpu_id;
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
+ __set_bit(spi, bmap);
+ } else {
+ dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
+ }
+
+ vgic_update_state(kvm);
+
+ return true;
+}
+
+/*
+ * We should be careful about promising too much when a guest reads
+ * this register. Don't claim to be like any hardware implementation,
+ * but just report the GIC as version 3 - which is what a Linux guest
+ * would check.
+ */
+static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg = 0;
+
+ switch (offset + GICD_IDREGS) {
+ case GICD_PIDR2:
+ reg = 0x3b;
+ break;
+ }
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
+ {
+ .base = GICD_CTLR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_ctlr,
+ },
+ {
+ .base = GICD_TYPER,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_typer,
+ },
+ {
+ .base = GICD_IIDR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_iidr,
+ },
+ {
+ /* this register is optional, it is RAZ/WI if not implemented */
+ .base = GICD_STATUSR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this write only register is WI when TYPER.MBIS=0 */
+ .base = GICD_SETSPI_NSR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this write only register is WI when TYPER.MBIS=0 */
+ .base = GICD_CLRSPI_NSR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_SETSPI_SR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_CLRSPI_SR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IGROUPR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_rao_wi,
+ },
+ {
+ .base = GICD_ISENABLER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg_dist,
+ },
+ {
+ .base = GICD_ICENABLER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg_dist,
+ },
+ {
+ .base = GICD_ISPENDR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg_dist,
+ },
+ {
+ .base = GICD_ICPENDR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg_dist,
+ },
+ {
+ .base = GICD_ISACTIVER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ICACTIVER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IPRIORITYR,
+ .len = 0x400,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg_dist,
+ },
+ {
+ /* TARGETSRn is RES0 when ARE=1 */
+ .base = GICD_ITARGETSR,
+ .len = 0x400,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ICFGR,
+ .len = 0x100,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg_dist,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_IGRPMODR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_NSACR,
+ .len = 0x100,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when ARE=1 */
+ .base = GICD_SGIR,
+ .len = 0x04,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when ARE=1 */
+ .base = GICD_CPENDSGIR,
+ .len = 0x10,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when ARE=1 */
+ .base = GICD_SPENDSGIR,
+ .len = 0x10,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IROUTER + 0x100,
+ .len = 0x1ee0,
+ .bits_per_irq = 64,
+ .handle_mmio = handle_mmio_route_reg,
+ },
+ {
+ .base = GICD_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {},
+};
+
+static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id);
+}
+
+static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id);
+}
+
+static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+ u32 *reg;
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ redist_vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ redist_vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = {
+ {
+ .base = GICR_IGROUPR0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_rao_wi,
+ },
+ {
+ .base = GICR_ISENABLER0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg_redist,
+ },
+ {
+ .base = GICR_ICENABLER0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg_redist,
+ },
+ {
+ .base = GICR_ISPENDR0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg_redist,
+ },
+ {
+ .base = GICR_ICPENDR0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg_redist,
+ },
+ {
+ .base = GICR_ISACTIVER0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_ICACTIVER0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_IPRIORITYR0,
+ .len = 0x20,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg_redist,
+ },
+ {
+ .base = GICR_ICFGR0,
+ .len = 0x08,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg_redist,
+ },
+ {
+ .base = GICR_IGRPMODR0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_NSACR,
+ .len = 0x04,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {},
+};
+
+static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ /* since we don't support LPIs, this register is zero for now */
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+ u64 mpidr;
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+ int target_vcpu_id = redist_vcpu->vcpu_id;
+
+ /* the upper 32 bits contain the affinity value */
+ if ((offset & ~3) == 4) {
+ mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
+ reg = compress_mpidr(mpidr);
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = redist_vcpu->vcpu_id << 8;
+ if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
+ reg |= GICR_TYPER_LAST;
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static const struct kvm_mmio_range vgic_redist_ranges[] = {
+ {
+ .base = GICR_CTLR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_ctlr_redist,
+ },
+ {
+ .base = GICR_TYPER,
+ .len = 0x08,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_typer_redist,
+ },
+ {
+ .base = GICR_IIDR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_iidr,
+ },
+ {
+ .base = GICR_WAKER,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {},
+};
+
+/*
+ * This function splits accesses between the distributor and the two
+ * redistributor parts (private/SPI). As each redistributor is accessible
+ * from any CPU, we have to determine the affected VCPU by taking the faulting
+ * address into account. We then pass this VCPU to the handler function via
+ * the private parameter.
+ */
+#define SGI_BASE_OFFSET SZ_64K
+static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long dbase = dist->vgic_dist_base;
+ unsigned long rdbase = dist->vgic_redist_base;
+ int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
+ int vcpu_id;
+ const struct kvm_mmio_range *mmio_range;
+
+ if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
+ return vgic_handle_mmio_range(vcpu, run, mmio,
+ vgic_v3_dist_ranges, dbase);
+ }
+
+ if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
+ GIC_V3_REDIST_SIZE * nrcpus))
+ return false;
+
+ vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
+ rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
+ mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
+
+ if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
+ rdbase += SGI_BASE_OFFSET;
+ mmio_range = vgic_redist_sgi_ranges;
+ } else {
+ mmio_range = vgic_redist_ranges;
+ }
+ return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
+}
+
+static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ if (vgic_queue_irq(vcpu, 0, irq)) {
+ vgic_dist_irq_clear_pending(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, irq);
+ return true;
+ }
+
+ return false;
+}
+
+static int vgic_v3_map_resources(struct kvm *kvm,
+ const struct vgic_params *params)
+{
+ int ret = 0;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ if (!irqchip_in_kernel(kvm))
+ return 0;
+
+ mutex_lock(&kvm->lock);
+
+ if (vgic_ready(kvm))
+ goto out;
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
+ kvm_err("Need to set vgic distributor addresses first\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * For a VGICv3 we require the userland to explicitly initialize
+ * the VGIC before we need to use it.
+ */
+ if (!vgic_initialized(kvm)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ kvm->arch.vgic.ready = true;
+out:
+ if (ret)
+ kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static int vgic_v3_init_model(struct kvm *kvm)
+{
+ int i;
+ u32 mpidr;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
+
+ dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
+ GFP_KERNEL);
+
+ if (!dist->irq_spi_mpidr)
+ return -ENOMEM;
+
+ /* Initialize the target VCPUs for each IRQ to VCPU 0 */
+ mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
+ dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
+ dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
+ vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
+ }
+
+ return 0;
+}
+
+/* GICv3 does not keep track of SGI sources anymore. */
+static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
+{
+}
+
+void vgic_v3_init_emulation(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
+ dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
+ dist->vm_ops.init_model = vgic_v3_init_model;
+ dist->vm_ops.map_resources = vgic_v3_map_resources;
+
+ kvm->arch.max_vcpus = KVM_MAX_VCPUS;
+}
+
+/*
+ * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
+ * generation register ICC_SGI1R_EL1) with a given VCPU.
+ * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
+ * return -1.
+ */
+static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
+{
+ unsigned long affinity;
+ int level0;
+
+ /*
+ * Split the current VCPU's MPIDR into affinity level 0 and the
+ * rest as this is what we have to compare against.
+ */
+ affinity = kvm_vcpu_get_mpidr_aff(vcpu);
+ level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
+ affinity &= ~MPIDR_LEVEL_MASK;
+
+ /* bail out if the upper three levels don't match */
+ if (sgi_aff != affinity)
+ return -1;
+
+ /* Is this VCPU's bit set in the mask ? */
+ if (!(sgi_cpu_mask & BIT(level0)))
+ return -1;
+
+ return level0;
+}
+
+#define SGI_AFFINITY_LEVEL(reg, level) \
+ ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
+ >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
+
+/**
+ * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
+ * @vcpu: The VCPU requesting a SGI
+ * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
+ *
+ * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
+ * This will trap in sys_regs.c and call this function.
+ * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
+ * target processors as well as a bitmask of 16 Aff0 CPUs.
+ * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
+ * check for matching ones. If this bit is set, we signal all, but not the
+ * calling VCPU.
+ */
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *c_vcpu;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u16 target_cpus;
+ u64 mpidr;
+ int sgi, c;
+ int vcpu_id = vcpu->vcpu_id;
+ bool broadcast;
+ int updated = 0;
+
+ sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
+ broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+ target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
+ mpidr = SGI_AFFINITY_LEVEL(reg, 3);
+ mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
+ mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
+
+ /*
+ * We take the dist lock here, because we come from the sysregs
+ * code path and not from the MMIO one (which already takes the lock).
+ */
+ spin_lock(&dist->lock);
+
+ /*
+ * We iterate over all VCPUs to find the MPIDRs matching the request.
+ * If we have handled one CPU, we clear it's bit to detect early
+ * if we are already finished. This avoids iterating through all
+ * VCPUs when most of the times we just signal a single VCPU.
+ */
+ kvm_for_each_vcpu(c, c_vcpu, kvm) {
+
+ /* Exit early if we have dealt with all requested CPUs */
+ if (!broadcast && target_cpus == 0)
+ break;
+
+ /* Don't signal the calling VCPU */
+ if (broadcast && c == vcpu_id)
+ continue;
+
+ if (!broadcast) {
+ int level0;
+
+ level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
+ if (level0 == -1)
+ continue;
+
+ /* remove this matching VCPU from the mask */
+ target_cpus &= ~BIT(level0);
+ }
+
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set_pending(c_vcpu, sgi);
+ updated = 1;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
+ }
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+ spin_unlock(&dist->lock);
+ if (updated)
+ vgic_kick_vcpus(vcpu->kvm);
+}
+
+static int vgic_v3_create(struct kvm_device *dev, u32 type)
+{
+ return kvm_vgic_create(dev->kvm, type);
+}
+
+static void vgic_v3_destroy(struct kvm_device *dev)
+{
+ kfree(dev);
+}
+
+static int vgic_v3_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_set_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v3_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_get_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v3_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return -ENXIO;
+ case KVM_VGIC_V3_ADDR_TYPE_DIST:
+ case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ return 0;
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ return 0;
+ }
+ }
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v3_ops = {
+ .name = "kvm-arm-vgic-v3",
+ .create = vgic_v3_create,
+ .destroy = vgic_v3_destroy,
+ .set_attr = vgic_v3_set_attr,
+ .get_attr = vgic_v3_get_attr,
+ .has_attr = vgic_v3_has_attr,
+};
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 1c2c8eef0599..3a62d8a9a2c6 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -34,6 +34,7 @@
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
+#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
/*
* LRs are stored in reverse order in memory. make sure we index them
@@ -48,12 +49,17 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
struct vgic_lr lr_desc;
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
- lr_desc.irq = val & GICH_LR_VIRTUALID;
- if (lr_desc.irq <= 15)
- lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
else
- lr_desc.source = 0;
- lr_desc.state = 0;
+ lr_desc.irq = val & GICH_LR_VIRTUALID;
+
+ lr_desc.source = 0;
+ if (lr_desc.irq <= 15 &&
+ vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
+ lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
+
+ lr_desc.state = 0;
if (val & ICH_LR_PENDING_BIT)
lr_desc.state |= LR_STATE_PENDING;
@@ -68,8 +74,20 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc)
{
- u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
- lr_desc.irq);
+ u64 lr_val;
+
+ lr_val = lr_desc.irq;
+
+ /*
+ * Currently all guest IRQs are Group1, as Group0 would result
+ * in a FIQ in the guest, which it wouldn't expect.
+ * Eventually we want to make this configurable, so we may revisit
+ * this in the future.
+ */
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ lr_val |= ICH_LR_GROUP;
+ else
+ lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
if (lr_desc.state & LR_STATE_PENDING)
lr_val |= ICH_LR_PENDING_BIT;
@@ -145,15 +163,27 @@ static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
static void vgic_v3_enable(struct kvm_vcpu *vcpu)
{
+ struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
/*
* By forcing VMCR to zero, the GIC will restore the binary
* points to their reset values. Anything else resets to zero
* anyway.
*/
- vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
+ vgic_v3->vgic_vmcr = 0;
+
+ /*
+ * If we are emulating a GICv3, we do it in an non-GICv2-compatible
+ * way, so we force SRE to 1 to demonstrate this to the guest.
+ * This goes with the spec allowing the value to be RAO/WI.
+ */
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+ else
+ vgic_v3->vgic_sre = 0;
/* Get the show on the road... */
- vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN;
+ vgic_v3->vgic_hcr = ICH_HCR_EN;
}
static const struct vgic_ops vgic_v3_ops = {
@@ -205,35 +235,37 @@ int vgic_v3_probe(struct device_node *vgic_node,
* maximum of 16 list registers. Just ignore bit 4...
*/
vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
+ vgic->can_emulate_gicv2 = false;
if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
gicv_idx = 1;
gicv_idx += 3; /* Also skip GICD, GICC, GICH */
if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
- kvm_err("Cannot obtain GICV region\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (!PAGE_ALIGNED(vcpu_res.start)) {
- kvm_err("GICV physical address 0x%llx not page aligned\n",
+ kvm_info("GICv3: no GICV resource entry\n");
+ vgic->vcpu_base = 0;
+ } else if (!PAGE_ALIGNED(vcpu_res.start)) {
+ pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)vcpu_res.start);
- ret = -ENXIO;
- goto out;
- }
-
- if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
- kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+ vgic->vcpu_base = 0;
+ } else if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+ pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
(unsigned long long)resource_size(&vcpu_res),
PAGE_SIZE);
- ret = -ENXIO;
- goto out;
+ vgic->vcpu_base = 0;
+ } else {
+ vgic->vcpu_base = vcpu_res.start;
+ vgic->can_emulate_gicv2 = true;
+ kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
+ KVM_DEV_TYPE_ARM_VGIC_V2);
}
+ if (vgic->vcpu_base == 0)
+ kvm_info("disabling GICv2 emulation\n");
+ kvm_register_device_ops(&kvm_arm_vgic_v3_ops, KVM_DEV_TYPE_ARM_VGIC_V3);
- vgic->vcpu_base = vcpu_res.start;
vgic->vctrl_base = NULL;
vgic->type = VGIC_V3;
+ vgic->max_gic_vcpus = KVM_MAX_VCPUS;
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
vcpu_res.start, vgic->maint_irq);
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 03affc7bf453..0cc6ab6005a0 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -75,37 +75,31 @@
* inactive as long as the external input line is held high.
*/
-#define VGIC_ADDR_UNDEF (-1)
-#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
-
-#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
-#define IMPLEMENTER_ARM 0x43b
-#define GICC_ARCH_VERSION_V2 0x2
-
-#define ACCESS_READ_VALUE (1 << 0)
-#define ACCESS_READ_RAZ (0 << 0)
-#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
-#define ACCESS_WRITE_IGNORED (0 << 1)
-#define ACCESS_WRITE_SETBIT (1 << 1)
-#define ACCESS_WRITE_CLEARBIT (2 << 1)
-#define ACCESS_WRITE_VALUE (3 << 1)
-#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
-
-static int vgic_init(struct kvm *kvm);
+#include "vgic.h"
+
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
-static void vgic_update_state(struct kvm *kvm);
-static void vgic_kick_vcpus(struct kvm *kvm);
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
-static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
-static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
+static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
+{
+ vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
+}
+
+static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
+}
+
+int kvm_vgic_map_resources(struct kvm *kvm)
+{
+ return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
+}
+
/*
* struct vgic_bitmap contains a bitmap made of unsigned longs, but
* extracts u32s out of them.
@@ -160,8 +154,7 @@ static unsigned long *u64_to_bitmask(u64 *val)
return (unsigned long *)val;
}
-static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
- int cpuid, u32 offset)
+u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
{
offset >>= 2;
if (!offset)
@@ -179,8 +172,8 @@ static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
}
-static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
- int irq, int val)
+void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+ int irq, int val)
{
unsigned long *reg;
@@ -202,7 +195,7 @@ static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
return x->private + cpuid;
}
-static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
+unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
{
return x->shared;
}
@@ -229,7 +222,7 @@ static void vgic_free_bytemap(struct vgic_bytemap *b)
b->shared = NULL;
}
-static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
+u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
{
u32 *reg;
@@ -326,14 +319,14 @@ static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
}
-static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
+void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
}
-static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
+void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -349,7 +342,7 @@ static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
vcpu->arch.vgic_cpu.pending_shared);
}
-static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
+void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
{
if (irq < VGIC_NR_PRIVATE_IRQS)
clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
@@ -363,16 +356,6 @@ static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
}
-static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
-{
- return le32_to_cpu(*((u32 *)mmio->data)) & mask;
-}
-
-static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
-{
- *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
-}
-
/**
* vgic_reg_access - access vgic register
* @mmio: pointer to the data describing the mmio access
@@ -384,8 +367,8 @@ static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
* modes defined for vgic register access
* (read,raz,write-ignored,setbit,clearbit,write)
*/
-static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
- phys_addr_t offset, int mode)
+void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+ phys_addr_t offset, int mode)
{
int word_offset = (offset & 3) * 8;
u32 mask = (1UL << (mmio->len * 8)) - 1;
@@ -434,107 +417,58 @@ static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
}
}
-static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- u32 word_offset = offset & 3;
-
- switch (offset & ~3) {
- case 0: /* GICD_CTLR */
- reg = vcpu->kvm->arch.vgic.enabled;
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vcpu->kvm->arch.vgic.enabled = reg & 1;
- vgic_update_state(vcpu->kvm);
- return true;
- }
- break;
-
- case 4: /* GICD_TYPER */
- reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
- reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
-
- case 8: /* GICD_IIDR */
- reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
- }
-
- return false;
-}
-
-static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
vgic_reg_access(mmio, NULL, offset,
ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
return false;
}
-static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
- if (mmio->is_write) {
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
+ u32 *reg;
+ int mode = ACCESS_READ_VALUE | access;
+ struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
-static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+ reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
- if (offset < 4) /* Force SGI enabled */
- *reg |= 0xffff;
- vgic_retire_disabled_irqs(vcpu);
- vgic_update_state(vcpu->kvm);
+ if (access & ACCESS_WRITE_CLEARBIT) {
+ if (offset < 4) /* Force SGI enabled */
+ *reg |= 0xffff;
+ vgic_retire_disabled_irqs(target_vcpu);
+ }
+ vgic_update_state(kvm);
return true;
}
return false;
}
-static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+bool vgic_handle_set_pending_reg(struct kvm *kvm,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id)
{
u32 *reg, orig;
u32 level_mask;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
+ struct vgic_dist *dist = &kvm->arch.vgic;
- reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
+ reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
level_mask = (~(*reg));
/* Mark both level and edge triggered irqs as pending */
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
+ reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
orig = *reg;
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
/* Set the soft-pending flag only for level-triggered irqs */
reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+ vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
*reg &= level_mask;
/* Ignore writes to SGIs */
@@ -543,31 +477,30 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
*reg |= orig & 0xffff;
}
- vgic_update_state(vcpu->kvm);
+ vgic_update_state(kvm);
return true;
}
return false;
}
-static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+bool vgic_handle_clear_pending_reg(struct kvm *kvm,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id)
{
u32 *level_active;
u32 *reg, orig;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
+ struct vgic_dist *dist = &kvm->arch.vgic;
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
+ reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
orig = *reg;
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
/* Re-set level triggered level-active interrupts */
level_active = vgic_bitmap_get_reg(&dist->irq_level,
- vcpu->vcpu_id, offset);
- reg = vgic_bitmap_get_reg(&dist->irq_pending,
- vcpu->vcpu_id, offset);
+ vcpu_id, offset);
+ reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
*reg |= *level_active;
/* Ignore writes to SGIs */
@@ -578,101 +511,12 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
/* Clear soft-pending flags */
reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+ vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
- vgic_update_state(vcpu->kvm);
+ vgic_update_state(kvm);
return true;
}
-
- return false;
-}
-
-static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- return false;
-}
-
-#define GICD_ITARGETSR_SIZE 32
-#define GICD_CPUTARGETS_BITS 8
-#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
-static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- int i;
- u32 val = 0;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
- val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
-
- return val;
-}
-
-static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int i, c;
- unsigned long *bmap;
- u32 target;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- /*
- * Pick the LSB in each byte. This ensures we target exactly
- * one vcpu per IRQ. If the byte is null, assume we target
- * CPU0.
- */
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
- int shift = i * GICD_CPUTARGETS_BITS;
- target = ffs((val >> shift) & 0xffU);
- target = target ? (target - 1) : 0;
- dist->irq_spi_cpu[irq + i] = target;
- kvm_for_each_vcpu(c, vcpu, kvm) {
- bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
- if (c == target)
- set_bit(irq + i, bmap);
- else
- clear_bit(irq + i, bmap);
- }
- }
-}
-
-static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- /* We treat the banked interrupts targets as read-only */
- if (offset < 32) {
- u32 roreg = 1 << vcpu->vcpu_id;
- roreg |= roreg << 8;
- roreg |= roreg << 16;
-
- vgic_reg_access(mmio, &roreg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
return false;
}
@@ -711,14 +555,10 @@ static u16 vgic_cfg_compress(u32 val)
* LSB is always 0. As such, we only keep the upper bit, and use the
* two above functions to compress/expand the bits
*/
-static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
u32 val;
- u32 *reg;
-
- reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
if (offset & 4)
val = *reg >> 16;
@@ -747,21 +587,6 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
return false;
}
-static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_dispatch_sgi(vcpu, reg);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
-
/**
* vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
* @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
@@ -774,11 +599,9 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
* to the distributor but the active state stays in the LRs, because we don't
* track the active state on the distributor side.
*/
-static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
+void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- int vcpu_id = vcpu->vcpu_id;
int i;
for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
@@ -805,7 +628,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
*/
vgic_dist_irq_set_pending(vcpu, lr.irq);
if (lr.irq < VGIC_NR_SGIS)
- *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source;
+ add_sgi_source(vcpu, lr.irq, lr.source);
lr.state &= ~LR_STATE_PENDING;
vgic_set_lr(vcpu, i, lr);
@@ -824,188 +647,12 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
}
}
-/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
-static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3);
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg = 0;
-
- /* Copy source SGIs from distributor side */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- int shift = 8 * (sgi - min_sgi);
- reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
- }
-
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, bool set)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3);
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg;
- bool updated = false;
-
- reg = mmio_data_read(mmio, ~0);
-
- /* Clear pending SGIs on the distributor */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- u8 mask = reg >> (8 * (sgi - min_sgi));
- u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
- if (set) {
- if ((*src & mask) != mask)
- updated = true;
- *src |= mask;
- } else {
- if (*src & mask)
- updated = true;
- *src &= ~mask;
- }
- }
-
- if (updated)
- vgic_update_state(vcpu->kvm);
-
- return updated;
-}
-
-static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
-}
-
-static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
-}
-
-/*
- * I would have liked to use the kvm_bus_io_*() API instead, but it
- * cannot cope with banked registers (only the VM pointer is passed
- * around, and we need the vcpu). One of these days, someone please
- * fix it!
- */
-struct mmio_range {
- phys_addr_t base;
- unsigned long len;
- int bits_per_irq;
- bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
-};
-
-static const struct mmio_range vgic_dist_ranges[] = {
- {
- .base = GIC_DIST_CTRL,
- .len = 12,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_misc,
- },
- {
- .base = GIC_DIST_IGROUP,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ENABLE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_enable_reg,
- },
- {
- .base = GIC_DIST_ENABLE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_enable_reg,
- },
- {
- .base = GIC_DIST_PENDING_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_pending_reg,
- },
- {
- .base = GIC_DIST_PENDING_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_pending_reg,
- },
- {
- .base = GIC_DIST_ACTIVE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ACTIVE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_PRI,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_priority_reg,
- },
- {
- .base = GIC_DIST_TARGET,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_target_reg,
- },
- {
- .base = GIC_DIST_CONFIG,
- .len = VGIC_MAX_IRQS / 4,
- .bits_per_irq = 2,
- .handle_mmio = handle_mmio_cfg_reg,
- },
- {
- .base = GIC_DIST_SOFTINT,
- .len = 4,
- .handle_mmio = handle_mmio_sgi_reg,
- },
- {
- .base = GIC_DIST_SGI_PENDING_CLEAR,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_clear,
- },
- {
- .base = GIC_DIST_SGI_PENDING_SET,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_set,
- },
- {}
-};
-
-static const
-struct mmio_range *find_matching_range(const struct mmio_range *ranges,
+const
+struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
- const struct mmio_range *r = ranges;
+ const struct kvm_mmio_range *r = ranges;
while (r->len) {
if (offset >= r->base &&
@@ -1018,7 +665,7 @@ struct mmio_range *find_matching_range(const struct mmio_range *ranges,
}
static bool vgic_validate_access(const struct vgic_dist *dist,
- const struct mmio_range *range,
+ const struct kvm_mmio_range *range,
unsigned long offset)
{
int irq;
@@ -1033,37 +680,76 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
return true;
}
+/*
+ * Call the respective handler function for the given range.
+ * We split up any 64 bit accesses into two consecutive 32 bit
+ * handler calls and merge the result afterwards.
+ * We do this in a little endian fashion regardless of the host's
+ * or guest's endianness, because the GIC is always LE and the rest of
+ * the code (vgic_reg_access) also puts it in a LE fashion already.
+ * At this point we have already identified the handle function, so
+ * range points to that one entry and offset is relative to this.
+ */
+static bool call_range_handler(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ unsigned long offset,
+ const struct kvm_mmio_range *range)
+{
+ u32 *data32 = (void *)mmio->data;
+ struct kvm_exit_mmio mmio32;
+ bool ret;
+
+ if (likely(mmio->len <= 4))
+ return range->handle_mmio(vcpu, mmio, offset);
+
+ /*
+ * Any access bigger than 4 bytes (that we currently handle in KVM)
+ * is actually 8 bytes long, caused by a 64-bit access
+ */
+
+ mmio32.len = 4;
+ mmio32.is_write = mmio->is_write;
+ mmio32.private = mmio->private;
+
+ mmio32.phys_addr = mmio->phys_addr + 4;
+ if (mmio->is_write)
+ *(u32 *)mmio32.data = data32[1];
+ ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
+ if (!mmio->is_write)
+ data32[1] = *(u32 *)mmio32.data;
+
+ mmio32.phys_addr = mmio->phys_addr;
+ if (mmio->is_write)
+ *(u32 *)mmio32.data = data32[0];
+ ret |= range->handle_mmio(vcpu, &mmio32, offset);
+ if (!mmio->is_write)
+ data32[0] = *(u32 *)mmio32.data;
+
+ return ret;
+}
+
/**
- * vgic_handle_mmio - handle an in-kernel MMIO access
+ * vgic_handle_mmio_range - handle an in-kernel MMIO access
* @vcpu: pointer to the vcpu performing the access
* @run: pointer to the kvm_run structure
* @mmio: pointer to the data describing the access
+ * @ranges: array of MMIO ranges in a given region
+ * @mmio_base: base address of that region
*
- * returns true if the MMIO access has been performed in kernel space,
- * and false if it needs to be emulated in user space.
+ * returns true if the MMIO access could be performed
*/
-bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
+bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio,
+ const struct kvm_mmio_range *ranges,
+ unsigned long mmio_base)
{
- const struct mmio_range *range;
+ const struct kvm_mmio_range *range;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long base = dist->vgic_dist_base;
bool updated_state;
unsigned long offset;
- if (!irqchip_in_kernel(vcpu->kvm) ||
- mmio->phys_addr < base ||
- (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
- return false;
-
- /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
- if (mmio->len > 4) {
- kvm_inject_dabt(vcpu, mmio->phys_addr);
- return true;
- }
-
- offset = mmio->phys_addr - base;
- range = find_matching_range(vgic_dist_ranges, mmio, offset);
+ offset = mmio->phys_addr - mmio_base;
+ range = vgic_find_range(ranges, mmio, offset);
if (unlikely(!range || !range->handle_mmio)) {
pr_warn("Unhandled access %d %08llx %d\n",
mmio->is_write, mmio->phys_addr, mmio->len);
@@ -1071,12 +757,12 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
spin_lock(&vcpu->kvm->arch.vgic.lock);
- offset = mmio->phys_addr - range->base - base;
+ offset -= range->base;
if (vgic_validate_access(dist, range, offset)) {
- updated_state = range->handle_mmio(vcpu, mmio, offset);
+ updated_state = call_range_handler(vcpu, mmio, offset, range);
} else {
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ if (!mmio->is_write)
+ memset(mmio->data, 0, mmio->len);
updated_state = false;
}
spin_unlock(&vcpu->kvm->arch.vgic.lock);
@@ -1089,50 +775,28 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
return true;
}
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
-{
- return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
-}
-
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+/**
+ * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
+ * @vcpu: pointer to the vcpu performing the access
+ * @run: pointer to the kvm_run structure
+ * @mmio: pointer to the data describing the access
+ *
+ * returns true if the MMIO access has been performed in kernel space,
+ * and false if it needs to be emulated in user space.
+ * Calls the actual handling routine for the selected VGIC model.
+ */
+bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
{
- struct kvm *kvm = vcpu->kvm;
- struct vgic_dist *dist = &kvm->arch.vgic;
- int nrcpus = atomic_read(&kvm->online_vcpus);
- u8 target_cpus;
- int sgi, mode, c, vcpu_id;
-
- vcpu_id = vcpu->vcpu_id;
-
- sgi = reg & 0xf;
- target_cpus = (reg >> 16) & 0xff;
- mode = (reg >> 24) & 3;
-
- switch (mode) {
- case 0:
- if (!target_cpus)
- return;
- break;
-
- case 1:
- target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
- break;
-
- case 2:
- target_cpus = 1 << vcpu_id;
- break;
- }
-
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (target_cpus & 1) {
- /* Flag the SGI as pending */
- vgic_dist_irq_set_pending(vcpu, sgi);
- *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
- kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
- }
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return false;
- target_cpus >>= 1;
- }
+ /*
+ * This will currently call either vgic_v2_handle_mmio() or
+ * vgic_v3_handle_mmio(), which in turn will call
+ * vgic_handle_mmio_range() defined above.
+ */
+ return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
}
static int vgic_nr_shared_irqs(struct vgic_dist *dist)
@@ -1173,7 +837,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
* Update the interrupt state and determine which CPUs have pending
* interrupts. Must be called with distributor lock held.
*/
-static void vgic_update_state(struct kvm *kvm)
+void vgic_update_state(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
@@ -1234,12 +898,12 @@ static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
vgic_ops->disable_underflow(vcpu);
}
-static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
vgic_ops->get_vmcr(vcpu, vmcr);
}
-static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
vgic_ops->set_vmcr(vcpu, vmcr);
}
@@ -1288,8 +952,9 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
/*
* Queue an interrupt to a CPU virtual interface. Return true on success,
* or false if it wasn't possible to queue it.
+ * sgi_source must be zero for any non-SGI interrupts.
*/
-static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -1338,37 +1003,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
return true;
}
-static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long sources;
- int vcpu_id = vcpu->vcpu_id;
- int c;
-
- sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
-
- for_each_set_bit(c, &sources, dist->nr_cpus) {
- if (vgic_queue_irq(vcpu, c, irq))
- clear_bit(c, &sources);
- }
-
- *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
-
- /*
- * If the sources bitmap has been cleared it means that we
- * could queue all the SGIs onto link registers (see the
- * clear_bit above), and therefore we are done with them in
- * our emulated gic and can get rid of them.
- */
- if (!sources) {
- vgic_dist_irq_clear_pending(vcpu, irq);
- vgic_cpu_irq_clear(vcpu, irq);
- return true;
- }
-
- return false;
-}
-
static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
{
if (!vgic_can_sample_irq(vcpu, irq))
@@ -1413,7 +1047,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
/* SGIs */
for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
- if (!vgic_queue_sgi(vcpu, i))
+ if (!queue_sgi(vcpu, i))
overflow = 1;
}
@@ -1575,7 +1209,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
}
-static void vgic_kick_vcpus(struct kvm *kvm)
+void vgic_kick_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int c;
@@ -1615,7 +1249,7 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
struct kvm_vcpu *vcpu;
int edge_triggered, level_triggered;
int enabled;
- bool ret = true;
+ bool ret = true, can_inject = true;
spin_lock(&dist->lock);
@@ -1630,6 +1264,11 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
+ if (cpuid == VCPU_NOT_ALLOCATED) {
+ /* Pretend we use CPU0, and prevent injection */
+ cpuid = 0;
+ can_inject = false;
+ }
vcpu = kvm_get_vcpu(kvm, cpuid);
}
@@ -1652,7 +1291,7 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
enabled = vgic_irq_is_enabled(vcpu, irq_num);
- if (!enabled) {
+ if (!enabled || !can_inject) {
ret = false;
goto out;
}
@@ -1698,6 +1337,16 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
int vcpu_id;
if (unlikely(!vgic_initialized(kvm))) {
+ /*
+ * We only provide the automatic initialization of the VGIC
+ * for the legacy case of a GICv2. Any other type must
+ * be explicitly initialized once setup with the respective
+ * KVM device call.
+ */
+ if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) {
+ ret = -EBUSY;
+ goto out;
+ }
mutex_lock(&kvm->lock);
ret = vgic_init(kvm);
mutex_unlock(&kvm->lock);
@@ -1762,6 +1411,17 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
return 0;
}
+/**
+ * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
+ *
+ * The host's GIC naturally limits the maximum amount of VCPUs a guest
+ * can use.
+ */
+int kvm_vgic_get_max_vcpus(void)
+{
+ return vgic->max_gic_vcpus;
+}
+
void kvm_vgic_destroy(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1784,6 +1444,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
}
kfree(dist->irq_sgi_sources);
kfree(dist->irq_spi_cpu);
+ kfree(dist->irq_spi_mpidr);
kfree(dist->irq_spi_target);
kfree(dist->irq_pending_on_cpu);
dist->irq_sgi_sources = NULL;
@@ -1797,7 +1458,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
* Allocate and initialize the various data structures. Must be called
* with kvm->lock held!
*/
-static int vgic_init(struct kvm *kvm)
+int vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
@@ -1809,7 +1470,7 @@ static int vgic_init(struct kvm *kvm)
nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
if (!nr_cpus) /* No vcpus? Can't be good... */
- return -EINVAL;
+ return -ENODEV;
/*
* If nobody configured the number of interrupts, use the
@@ -1852,8 +1513,9 @@ static int vgic_init(struct kvm *kvm)
if (ret)
goto out;
- for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
- vgic_set_target_reg(kvm, 0, i);
+ ret = kvm->arch.vgic.vm_ops.init_model(kvm);
+ if (ret)
+ goto out;
kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
@@ -1882,72 +1544,49 @@ out:
return ret;
}
-/**
- * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
- * @kvm: pointer to the kvm struct
- *
- * Map the virtual CPU interface into the VM before running any VCPUs. We
- * can't do this at creation time, because user space must first set the
- * virtual CPU interface address in the guest physical address space.
- */
-int kvm_vgic_map_resources(struct kvm *kvm)
+static int init_vgic_model(struct kvm *kvm, int type)
{
- int ret = 0;
-
- if (!irqchip_in_kernel(kvm))
- return 0;
-
- mutex_lock(&kvm->lock);
-
- if (vgic_ready(kvm))
- goto out;
-
- if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
- IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
- kvm_err("Need to set vgic cpu and dist addresses first\n");
- ret = -ENXIO;
- goto out;
- }
-
- /*
- * Initialize the vgic if this hasn't already been done on demand by
- * accessing the vgic state from userspace.
- */
- ret = vgic_init(kvm);
- if (ret) {
- kvm_err("Unable to allocate maps\n");
- goto out;
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ vgic_v2_init_emulation(kvm);
+ break;
+#ifdef CONFIG_ARM_GIC_V3
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ vgic_v3_init_emulation(kvm);
+ break;
+#endif
+ default:
+ return -ENODEV;
}
- ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
- vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
- true);
- if (ret) {
- kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
- }
+ if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
+ return -E2BIG;
- kvm->arch.vgic.ready = true;
-out:
- if (ret)
- kvm_vgic_destroy(kvm);
- mutex_unlock(&kvm->lock);
- return ret;
+ return 0;
}
-int kvm_vgic_create(struct kvm *kvm)
+int kvm_vgic_create(struct kvm *kvm, u32 type)
{
int i, vcpu_lock_idx = -1, ret;
struct kvm_vcpu *vcpu;
mutex_lock(&kvm->lock);
- if (kvm->arch.vgic.vctrl_base) {
+ if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out;
}
/*
+ * This function is also called by the KVM_CREATE_IRQCHIP handler,
+ * which had no chance yet to check the availability of the GICv2
+ * emulation. So check this here again. KVM_CREATE_DEVICE does
+ * the proper checks already.
+ */
+ if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2)
+ return -ENODEV;
+
+ /*
* Any time a vcpu is run, vcpu_load is called which tries to grab the
* vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
* that no other VCPUs are run while we create the vgic.
@@ -1965,11 +1604,17 @@ int kvm_vgic_create(struct kvm *kvm)
}
ret = 0;
+ ret = init_vgic_model(kvm, type);
+ if (ret)
+ goto out_unlock;
+
spin_lock_init(&kvm->arch.vgic.lock);
kvm->arch.vgic.in_kernel = true;
+ kvm->arch.vgic.vgic_model = type;
kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+ kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
out_unlock:
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
@@ -2022,7 +1667,7 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
/**
* kvm_vgic_addr - set or get vgic VM base addresses
* @kvm: pointer to the vm struct
- * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
+ * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
* @addr: pointer to address value
* @write: if true set the address in the VM address space, if false read the
* address
@@ -2036,216 +1681,64 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
{
int r = 0;
struct vgic_dist *vgic = &kvm->arch.vgic;
+ int type_needed;
+ phys_addr_t *addr_ptr, block_size;
+ phys_addr_t alignment;
mutex_lock(&kvm->lock);
switch (type) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
- if (write) {
- r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
- *addr, KVM_VGIC_V2_DIST_SIZE);
- } else {
- *addr = vgic->vgic_dist_base;
- }
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+ addr_ptr = &vgic->vgic_dist_base;
+ block_size = KVM_VGIC_V2_DIST_SIZE;
+ alignment = SZ_4K;
break;
case KVM_VGIC_V2_ADDR_TYPE_CPU:
- if (write) {
- r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
- *addr, KVM_VGIC_V2_CPU_SIZE);
- } else {
- *addr = vgic->vgic_cpu_base;
- }
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+ addr_ptr = &vgic->vgic_cpu_base;
+ block_size = KVM_VGIC_V2_CPU_SIZE;
+ alignment = SZ_4K;
break;
- default:
- r = -ENODEV;
- }
-
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- bool updated = false;
- struct vgic_vmcr vmcr;
- u32 *vmcr_field;
- u32 reg;
-
- vgic_get_vmcr(vcpu, &vmcr);
-
- switch (offset & ~0x3) {
- case GIC_CPU_CTRL:
- vmcr_field = &vmcr.ctlr;
- break;
- case GIC_CPU_PRIMASK:
- vmcr_field = &vmcr.pmr;
+#ifdef CONFIG_ARM_GIC_V3
+ case KVM_VGIC_V3_ADDR_TYPE_DIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+ addr_ptr = &vgic->vgic_dist_base;
+ block_size = KVM_VGIC_V3_DIST_SIZE;
+ alignment = SZ_64K;
break;
- case GIC_CPU_BINPOINT:
- vmcr_field = &vmcr.bpr;
- break;
- case GIC_CPU_ALIAS_BINPOINT:
- vmcr_field = &vmcr.abpr;
+ case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+ addr_ptr = &vgic->vgic_redist_base;
+ block_size = KVM_VGIC_V3_REDIST_SIZE;
+ alignment = SZ_64K;
break;
+#endif
default:
- BUG();
- }
-
- if (!mmio->is_write) {
- reg = *vmcr_field;
- mmio_data_write(mmio, ~0, reg);
- } else {
- reg = mmio_data_read(mmio, ~0);
- if (reg != *vmcr_field) {
- *vmcr_field = reg;
- vgic_set_vmcr(vcpu, &vmcr);
- updated = true;
- }
- }
- return updated;
-}
-
-static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
-}
-
-static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- if (mmio->is_write)
- return false;
-
- /* GICC_IIDR */
- reg = (PRODUCT_ID_KVM << 20) |
- (GICC_ARCH_VERSION_V2 << 16) |
- (IMPLEMENTER_ARM << 0);
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-/*
- * CPU Interface Register accesses - these are not accessed by the VM, but by
- * user space for saving and restoring VGIC state.
- */
-static const struct mmio_range vgic_cpu_ranges[] = {
- {
- .base = GIC_CPU_CTRL,
- .len = 12,
- .handle_mmio = handle_cpu_mmio_misc,
- },
- {
- .base = GIC_CPU_ALIAS_BINPOINT,
- .len = 4,
- .handle_mmio = handle_mmio_abpr,
- },
- {
- .base = GIC_CPU_ACTIVEPRIO,
- .len = 16,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_CPU_IDENT,
- .len = 4,
- .handle_mmio = handle_cpu_mmio_ident,
- },
-};
-
-static int vgic_attr_regs_access(struct kvm_device *dev,
- struct kvm_device_attr *attr,
- u32 *reg, bool is_write)
-{
- const struct mmio_range *r = NULL, *ranges;
- phys_addr_t offset;
- int ret, cpuid, c;
- struct kvm_vcpu *vcpu, *tmp_vcpu;
- struct vgic_dist *vgic;
- struct kvm_exit_mmio mmio;
-
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
- KVM_DEV_ARM_VGIC_CPUID_SHIFT;
-
- mutex_lock(&dev->kvm->lock);
-
- ret = vgic_init(dev->kvm);
- if (ret)
- goto out;
-
- if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
- ret = -EINVAL;
+ r = -ENODEV;
goto out;
}
- vcpu = kvm_get_vcpu(dev->kvm, cpuid);
- vgic = &dev->kvm->arch.vgic;
-
- mmio.len = 4;
- mmio.is_write = is_write;
- if (is_write)
- mmio_data_write(&mmio, ~0, *reg);
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- mmio.phys_addr = vgic->vgic_dist_base + offset;
- ranges = vgic_dist_ranges;
- break;
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- mmio.phys_addr = vgic->vgic_cpu_base + offset;
- ranges = vgic_cpu_ranges;
- break;
- default:
- BUG();
- }
- r = find_matching_range(ranges, &mmio, offset);
-
- if (unlikely(!r || !r->handle_mmio)) {
- ret = -ENXIO;
+ if (vgic->vgic_model != type_needed) {
+ r = -ENODEV;
goto out;
}
-
- spin_lock(&vgic->lock);
-
- /*
- * Ensure that no other VCPU is running by checking the vcpu->cpu
- * field. If no other VPCUs are running we can safely access the VGIC
- * state, because even if another VPU is run after this point, that
- * VCPU will not touch the vgic state, because it will block on
- * getting the vgic->lock in kvm_vgic_sync_hwstate().
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
- if (unlikely(tmp_vcpu->cpu != -1)) {
- ret = -EBUSY;
- goto out_vgic_unlock;
- }
+ if (write) {
+ if (!IS_ALIGNED(*addr, alignment))
+ r = -EINVAL;
+ else
+ r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
+ block_size);
+ } else {
+ *addr = *addr_ptr;
}
- /*
- * Move all pending IRQs from the LRs on all VCPUs so the pending
- * state can be properly represented in the register state accessible
- * through this API.
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
- vgic_unqueue_irqs(tmp_vcpu);
-
- offset -= r->base;
- r->handle_mmio(vcpu, &mmio, offset);
-
- if (!is_write)
- *reg = mmio_data_read(&mmio, ~0);
-
- ret = 0;
-out_vgic_unlock:
- spin_unlock(&vgic->lock);
out:
- mutex_unlock(&dev->kvm->lock);
- return ret;
+ mutex_unlock(&kvm->lock);
+ return r;
}
-static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r;
@@ -2261,17 +1754,6 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
r = kvm_vgic_addr(dev->kvm, type, &addr, true);
return (r == -ENODEV) ? -ENXIO : r;
}
-
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg;
-
- if (get_user(reg, uaddr))
- return -EFAULT;
-
- return vgic_attr_regs_access(dev, attr, &reg, true);
- }
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 val;
@@ -2302,13 +1784,20 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return ret;
}
-
+ case KVM_DEV_ARM_VGIC_GRP_CTRL: {
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ r = vgic_init(dev->kvm);
+ return r;
+ }
+ break;
+ }
}
return -ENXIO;
}
-static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = -ENXIO;
@@ -2326,20 +1815,9 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return -EFAULT;
break;
}
-
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg = 0;
-
- r = vgic_attr_regs_access(dev, attr, &reg, false);
- if (r)
- return r;
- r = put_user(reg, uaddr);
- break;
- }
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+
r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
break;
}
@@ -2349,61 +1827,17 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return r;
}
-static int vgic_has_attr_regs(const struct mmio_range *ranges,
- phys_addr_t offset)
+int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset)
{
struct kvm_exit_mmio dev_attr_mmio;
dev_attr_mmio.len = 4;
- if (find_matching_range(ranges, &dev_attr_mmio, offset))
+ if (vgic_find_range(ranges, &dev_attr_mmio, offset))
return 0;
else
return -ENXIO;
}
-static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
-{
- phys_addr_t offset;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR:
- switch (attr->attr) {
- case KVM_VGIC_V2_ADDR_TYPE_DIST:
- case KVM_VGIC_V2_ADDR_TYPE_CPU:
- return 0;
- }
- break;
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_dist_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_cpu_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
- return 0;
- }
- return -ENXIO;
-}
-
-static void vgic_destroy(struct kvm_device *dev)
-{
- kfree(dev);
-}
-
-static int vgic_create(struct kvm_device *dev, u32 type)
-{
- return kvm_vgic_create(dev->kvm);
-}
-
-static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
- .name = "kvm-arm-vgic",
- .create = vgic_create,
- .destroy = vgic_destroy,
- .set_attr = vgic_set_attr,
- .get_attr = vgic_get_attr,
- .has_attr = vgic_has_attr,
-};
-
static void vgic_init_maintenance_interrupt(void *info)
{
enable_percpu_irq(vgic->maint_irq, 0);
@@ -2474,8 +1908,7 @@ int kvm_vgic_hyp_init(void)
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
- return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
- KVM_DEV_TYPE_ARM_VGIC_V2);
+ return 0;
out_free_irq:
free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
new file mode 100644
index 000000000000..1e83bdf5f499
--- /dev/null
+++ b/virt/kvm/arm/vgic.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012-2014 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from virt/kvm/arm/vgic.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __KVM_VGIC_H__
+#define __KVM_VGIC_H__
+
+#define VGIC_ADDR_UNDEF (-1)
+#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
+
+#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
+#define IMPLEMENTER_ARM 0x43b
+
+#define ACCESS_READ_VALUE (1 << 0)
+#define ACCESS_READ_RAZ (0 << 0)
+#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
+#define ACCESS_WRITE_IGNORED (0 << 1)
+#define ACCESS_WRITE_SETBIT (1 << 1)
+#define ACCESS_WRITE_CLEARBIT (2 << 1)
+#define ACCESS_WRITE_VALUE (3 << 1)
+#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
+
+#define VCPU_NOT_ALLOCATED ((u8)-1)
+
+unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
+
+void vgic_update_state(struct kvm *kvm);
+int vgic_init_common_maps(struct kvm *kvm);
+
+u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset);
+u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset);
+
+void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq);
+void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq);
+void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq);
+void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+ int irq, int val);
+
+void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+
+bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
+void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
+
+void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+ phys_addr_t offset, int mode);
+bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+static inline
+u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
+{
+ return le32_to_cpu(*((u32 *)mmio->data)) & mask;
+}
+
+static inline
+void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
+{
+ *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
+}
+
+struct kvm_mmio_range {
+ phys_addr_t base;
+ unsigned long len;
+ int bits_per_irq;
+ bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+};
+
+static inline bool is_in_range(phys_addr_t addr, unsigned long len,
+ phys_addr_t baseaddr, unsigned long size)
+{
+ return (addr >= baseaddr) && (addr + len <= baseaddr + size);
+}
+
+const
+struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio,
+ const struct kvm_mmio_range *ranges,
+ unsigned long mmio_base);
+
+bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access);
+
+bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id);
+
+bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id);
+
+bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+void vgic_kick_vcpus(struct kvm *kvm);
+
+int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset);
+int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
+int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
+
+int vgic_init(struct kvm *kvm);
+void vgic_v2_init_emulation(struct kvm *kvm);
+void vgic_v3_init_emulation(struct kvm *kvm);
+
+#endif
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 5ff7f7f2689a..44660aee335f 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -80,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
might_sleep();
- kvm_get_user_page_io(NULL, mm, addr, 1, NULL);
+ get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL);
kvm_async_page_present_sync(vcpu, apf);
spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1cc6e2e19982..a1093700f3a4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -66,6 +66,9 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+unsigned int halt_poll_ns = 0;
+module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+
/*
* Ordering of locks:
*
@@ -89,7 +92,7 @@ struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#endif
@@ -176,6 +179,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
return called;
}
+#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
long dirty_count = kvm->tlbs_dirty;
@@ -186,6 +190,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
+#endif
void kvm_reload_remote_mmus(struct kvm *kvm)
{
@@ -673,6 +678,7 @@ static void update_memslots(struct kvm_memslots *slots,
if (!new->npages) {
WARN_ON(!mslots[i].npages);
new->base_gfn = 0;
+ new->flags = 0;
if (mslots[i].npages)
slots->used_slots--;
} else {
@@ -993,6 +999,86 @@ out:
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+/**
+ * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
+ * are dirty write protect them for next write.
+ * @kvm: pointer to kvm instance
+ * @log: slot id and address to which we copy the log
+ * @is_dirty: flag set if any page is dirty
+ *
+ * We need to keep it in mind that VCPU threads can write to the bitmap
+ * concurrently. So, to avoid losing track of dirty pages we keep the
+ * following order:
+ *
+ * 1. Take a snapshot of the bit and clear it if needed.
+ * 2. Write protect the corresponding page.
+ * 3. Copy the snapshot to the userspace.
+ * 4. Upon return caller flushes TLB's if needed.
+ *
+ * Between 2 and 4, the guest may write to the page using the remaining TLB
+ * entry. This is not a problem because the page is reported dirty using
+ * the snapshot taken before and step 4 ensures that writes done after
+ * exiting to userspace will be logged for the next call.
+ *
+ */
+int kvm_get_dirty_log_protect(struct kvm *kvm,
+ struct kvm_dirty_log *log, bool *is_dirty)
+{
+ struct kvm_memory_slot *memslot;
+ int r, i;
+ unsigned long n;
+ unsigned long *dirty_bitmap;
+ unsigned long *dirty_bitmap_buffer;
+
+ r = -EINVAL;
+ if (log->slot >= KVM_USER_MEM_SLOTS)
+ goto out;
+
+ memslot = id_to_memslot(kvm->memslots, log->slot);
+
+ dirty_bitmap = memslot->dirty_bitmap;
+ r = -ENOENT;
+ if (!dirty_bitmap)
+ goto out;
+
+ n = kvm_dirty_bitmap_bytes(memslot);
+
+ dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
+ memset(dirty_bitmap_buffer, 0, n);
+
+ spin_lock(&kvm->mmu_lock);
+ *is_dirty = false;
+ for (i = 0; i < n / sizeof(long); i++) {
+ unsigned long mask;
+ gfn_t offset;
+
+ if (!dirty_bitmap[i])
+ continue;
+
+ *is_dirty = true;
+
+ mask = xchg(&dirty_bitmap[i], 0);
+ dirty_bitmap_buffer[i] = mask;
+
+ offset = i * BITS_PER_LONG;
+ kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset,
+ mask);
+ }
+
+ spin_unlock(&kvm->mmu_lock);
+
+ r = -EFAULT;
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
+ goto out;
+
+ r = 0;
+out:
+ return r;
+}
+EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
+#endif
+
bool kvm_largepages_enabled(void)
{
return largepages_enabled;
@@ -1128,43 +1214,6 @@ static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
}
-int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, bool write_fault,
- struct page **pagep)
-{
- int npages;
- int locked = 1;
- int flags = FOLL_TOUCH | FOLL_HWPOISON |
- (pagep ? FOLL_GET : 0) |
- (write_fault ? FOLL_WRITE : 0);
-
- /*
- * If retrying the fault, we get here *not* having allowed the filemap
- * to wait on the page lock. We should now allow waiting on the IO with
- * the mmap semaphore released.
- */
- down_read(&mm->mmap_sem);
- npages = __get_user_pages(tsk, mm, addr, 1, flags, pagep, NULL,
- &locked);
- if (!locked) {
- VM_BUG_ON(npages);
-
- if (!pagep)
- return 0;
-
- /*
- * The previous call has now waited on the IO. Now we can
- * retry and complete. Pass TRIED to ensure we do not re
- * schedule async IO (see e.g. filemap_fault).
- */
- down_read(&mm->mmap_sem);
- npages = __get_user_pages(tsk, mm, addr, 1, flags | FOLL_TRIED,
- pagep, NULL, NULL);
- }
- up_read(&mm->mmap_sem);
- return npages;
-}
-
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
@@ -1227,15 +1276,10 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
npages = get_user_page_nowait(current, current->mm,
addr, write_fault, page);
up_read(&current->mm->mmap_sem);
- } else {
- /*
- * By now we have tried gup_fast, and possibly async_pf, and we
- * are certainly not atomic. Time to retry the gup, allowing
- * mmap semaphore to be relinquished in the case of IO.
- */
- npages = kvm_get_user_page_io(current, current->mm, addr,
- write_fault, page);
- }
+ } else
+ npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
+ write_fault, 0, page,
+ FOLL_TOUCH|FOLL_HWPOISON);
if (npages != 1)
return npages;
@@ -1593,6 +1637,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
}
return 0;
}
+EXPORT_SYMBOL_GPL(kvm_write_guest);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
@@ -1729,29 +1774,60 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(mark_page_dirty);
+static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
+{
+ if (kvm_arch_vcpu_runnable(vcpu)) {
+ kvm_make_request(KVM_REQ_UNHALT, vcpu);
+ return -EINTR;
+ }
+ if (kvm_cpu_has_pending_timer(vcpu))
+ return -EINTR;
+ if (signal_pending(current))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
*/
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
+ ktime_t start, cur;
DEFINE_WAIT(wait);
+ bool waited = false;
+
+ start = cur = ktime_get();
+ if (halt_poll_ns) {
+ ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+ do {
+ /*
+ * This sets KVM_REQ_UNHALT if an interrupt
+ * arrives.
+ */
+ if (kvm_vcpu_check_block(vcpu) < 0) {
+ ++vcpu->stat.halt_successful_poll;
+ goto out;
+ }
+ cur = ktime_get();
+ } while (single_task_running() && ktime_before(cur, stop));
+ }
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_arch_vcpu_runnable(vcpu)) {
- kvm_make_request(KVM_REQ_UNHALT, vcpu);
- break;
- }
- if (kvm_cpu_has_pending_timer(vcpu))
- break;
- if (signal_pending(current))
+ if (kvm_vcpu_check_block(vcpu) < 0)
break;
+ waited = true;
schedule();
}
finish_wait(&vcpu->wq, &wait);
+ cur = ktime_get();
+
+out:
+ trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_block);
@@ -1934,7 +2010,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_vcpu_compat_ioctl,
#endif
.mmap = kvm_vcpu_mmap,
@@ -2224,7 +2300,7 @@ out:
return r;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2316,7 +2392,7 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
static const struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_device_ioctl,
#endif
.release = kvm_device_release,
@@ -2603,7 +2679,7 @@ out:
return r;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
struct compat_kvm_dirty_log {
__u32 slot;
__u32 padding1;
@@ -2650,7 +2726,7 @@ out:
static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_vm_compat_ioctl,
#endif
.llseek = noop_llseek,